summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap7
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io41
-rw-r--r--Documentation/ABI/testing/sysfs-block12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i3c146
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-dsa2
-rw-r--r--Documentation/ABI/testing/sysfs-devices-software_node10
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu7
-rw-r--r--Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg499
-rw-r--r--Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg695
-rw-r--r--Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg741
-rw-r--r--Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg834
-rw-r--r--Documentation/RCU/Design/Data-Structures/Data-Structures.html173
-rw-r--r--Documentation/RCU/Design/Data-Structures/blkd_task.svg676
-rw-r--r--Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html6
-rw-r--r--Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html2
-rw-r--r--Documentation/RCU/Design/Requirements/Requirements.html206
-rw-r--r--Documentation/RCU/checklist.txt49
-rw-r--r--Documentation/RCU/stallwarn.txt7
-rw-r--r--Documentation/RCU/whatisRCU.txt70
-rw-r--r--Documentation/admin-guide/LSM/SELinux.rst2
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt106
-rw-r--r--Documentation/admin-guide/l1tf.rst6
-rw-r--r--Documentation/admin-guide/pm/cpuidle.rst631
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst10
-rw-r--r--Documentation/admin-guide/pm/working-state.rst1
-rw-r--r--Documentation/arm64/booting.txt8
-rw-r--r--Documentation/arm64/cpu-feature-registers.txt8
-rw-r--r--Documentation/arm64/elf_hwcaps.txt12
-rw-r--r--Documentation/arm64/pointer-authentication.txt88
-rw-r--r--Documentation/arm64/silicon-errata.txt2
-rw-r--r--Documentation/block/biodoc.txt88
-rw-r--r--Documentation/block/cfq-iosched.txt291
-rw-r--r--Documentation/block/queue-sysfs.txt29
-rw-r--r--Documentation/core-api/xarray.rst5
-rw-r--r--Documentation/cpuidle/core.txt23
-rw-r--r--Documentation/cpuidle/sysfs.txt98
-rw-r--r--Documentation/crypto/api.rst9
-rw-r--r--Documentation/crypto/architecture.rst31
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt37
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt1
-rw-r--r--Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt9
-rw-r--r--Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt13
-rw-r--r--Documentation/devicetree/bindings/clock/clock-bindings.txt16
-rw-r--r--Documentation/devicetree/bindings/clock/imx6q-clock.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/imx7ulp-clock.txt104
-rw-r--r--Documentation/devicetree/bindings/clock/imx8mq-clock.txt20
-rw-r--r--Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt51
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt16
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gpucc.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,lpasscc.txt26
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt1
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,videocc.txt3
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt6
-rw-r--r--Documentation/devicetree/bindings/clock/sun8i-de2.txt5
-rw-r--r--Documentation/devicetree/bindings/clock/sunxi-ccu.txt1
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt172
-rw-r--r--Documentation/devicetree/bindings/crypto/arm-cryptocell.txt8
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-dcp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt1
-rw-r--r--Documentation/devicetree/bindings/display/himax,hx8357d.txt26
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt1
-rw-r--r--Documentation/devicetree/bindings/display/msm/gpu.txt4
-rw-r--r--Documentation/devicetree/bindings/display/msm/mdp4.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g101evn01012
-rw-r--r--Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt42
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt30
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt81
-rw-r--r--Documentation/devicetree/bindings/display/renesas,du.txt4
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt3
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt30
-rw-r--r--Documentation/devicetree/bindings/display/truly,nt35597.txt59
-rw-r--r--Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt43
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.txt138
-rw-r--r--Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt41
-rw-r--r--Documentation/devicetree/bindings/input/input-reset.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt4
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt34
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt61
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt4
-rw-r--r--Documentation/devicetree/bindings/media/aspeed-video.txt26
-rw-r--r--Documentation/devicetree/bindings/media/cedrus.txt2
-rw-r--r--Documentation/devicetree/bindings/media/i2c/mt9m111.txt13
-rw-r--r--Documentation/devicetree/bindings/media/i2c/sony,imx214.txt53
-rw-r--r--Documentation/devicetree/bindings/media/qcom,venus.txt14
-rw-r--r--Documentation/devicetree/bindings/media/rcar_vin.txt2
-rw-r--r--Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt2
-rw-r--r--Documentation/devicetree/bindings/media/spi/sony-cxd2880.txt4
-rw-r--r--Documentation/devicetree/bindings/media/sun6i-csi.txt59
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/synopsys.txt27
-rw-r--r--Documentation/devicetree/bindings/mfd/axp20x.txt9
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/redboot-fis.txt27
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bluetooth.txt11
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt8
-rw-r--r--Documentation/devicetree/bindings/net/can/xilinx_can.txt1
-rw-r--r--Documentation/devicetree/bindings/net/dsa/ksz.txt4
-rw-r--r--Documentation/devicetree/bindings/net/icplus-ip101ag.txt19
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-dwmac.txt78
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt74
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt1
-rw-r--r--Documentation/devicetree/bindings/regulator/act8945a-regulator.txt34
-rw-r--r--Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt82
-rw-r--r--Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt143
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/ak4104.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/ak4118.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-card.txt205
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/cs4270.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/dmic.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/fsl-sai.txt8
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/pcm3060.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6asm.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt395
-rw-r--r--Documentation/devicetree/bindings/sound/rt5631.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/rt5663.txt6
-rw-r--r--Documentation/devicetree/bindings/sound/simple-amplifier.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/simple-card.txt137
-rw-r--r--Documentation/devicetree/bindings/sound/simple-scu-card.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/xlnx,i2s.txt28
-rw-r--r--Documentation/devicetree/bindings/spi/atmel-quadspi.txt (renamed from Documentation/devicetree/bindings/mtd/atmel-quadspi.txt)0
-rw-r--r--Documentation/devicetree/bindings/spi/nuvoton,npcm-pspi.txt43
-rw-r--r--Documentation/devicetree/bindings/spi/omap-spi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mt65xx.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-mxic.txt34
-rw-r--r--Documentation/devicetree/bindings/spi/spi-pxa2xx.txt3
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-uniphier.txt10
-rw-r--r--Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt20
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt15
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/driver-api/i3c/device-driver-api.rst9
-rw-r--r--Documentation/driver-api/i3c/index.rst11
-rw-r--r--Documentation/driver-api/i3c/master-driver-api.rst9
-rw-r--r--Documentation/driver-api/i3c/protocol.rst203
-rw-r--r--Documentation/driver-api/index.rst1
-rw-r--r--Documentation/driver-model/devres.txt1
-rw-r--r--Documentation/features/vm/ioremap_prot/arch-support.txt2
-rw-r--r--Documentation/gpu/amdgpu-dc.rst68
-rw-r--r--Documentation/gpu/drivers.rst1
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst31
-rw-r--r--Documentation/gpu/drm-kms.rst19
-rw-r--r--Documentation/gpu/drm-mm.rst7
-rw-r--r--Documentation/gpu/drm-uapi.rst3
-rw-r--r--Documentation/gpu/todo.rst58
-rw-r--r--Documentation/gpu/vkms.rst101
-rw-r--r--Documentation/media/.gitignore2
-rw-r--r--Documentation/media/Makefile2
-rw-r--r--Documentation/media/audio.h.rst.exceptions2
-rw-r--r--Documentation/media/ca.h.rst.exceptions2
-rw-r--r--Documentation/media/cec-drivers/index.rst2
-rw-r--r--Documentation/media/cec-drivers/pulse8-cec.rst2
-rw-r--r--Documentation/media/cec.h.rst.exceptions2
-rw-r--r--Documentation/media/conf.py2
-rw-r--r--Documentation/media/conf_nitpick.py2
-rw-r--r--Documentation/media/dmx.h.rst.exceptions2
-rw-r--r--Documentation/media/dvb-drivers/avermedia.rst2
-rw-r--r--Documentation/media/dvb-drivers/bt8xx.rst2
-rw-r--r--Documentation/media/dvb-drivers/cards.rst2
-rw-r--r--Documentation/media/dvb-drivers/ci.rst2
-rw-r--r--Documentation/media/dvb-drivers/contributors.rst2
-rw-r--r--Documentation/media/dvb-drivers/dvb-usb.rst2
-rw-r--r--Documentation/media/dvb-drivers/faq.rst2
-rw-r--r--Documentation/media/dvb-drivers/frontends.rst2
-rw-r--r--Documentation/media/dvb-drivers/index.rst2
-rw-r--r--Documentation/media/dvb-drivers/intro.rst2
-rw-r--r--Documentation/media/dvb-drivers/lmedm04.rst2
-rw-r--r--Documentation/media/dvb-drivers/opera-firmware.rst2
-rw-r--r--Documentation/media/dvb-drivers/technisat.rst2
-rw-r--r--Documentation/media/dvb-drivers/ttusb-dec.rst2
-rw-r--r--Documentation/media/dvb-drivers/udev.rst2
-rw-r--r--Documentation/media/frontend.h.rst.exceptions2
-rw-r--r--Documentation/media/index.rst2
-rw-r--r--Documentation/media/intro.rst2
-rw-r--r--Documentation/media/kapi/cec-core.rst2
-rw-r--r--Documentation/media/kapi/csi2.rst2
-rw-r--r--Documentation/media/kapi/dtv-ca.rst2
-rw-r--r--Documentation/media/kapi/dtv-common.rst2
-rw-r--r--Documentation/media/kapi/dtv-core.rst2
-rw-r--r--Documentation/media/kapi/dtv-demux.rst2
-rw-r--r--Documentation/media/kapi/dtv-frontend.rst2
-rw-r--r--Documentation/media/kapi/dtv-net.rst2
-rw-r--r--Documentation/media/kapi/mc-core.rst2
-rw-r--r--Documentation/media/kapi/rc-core.rst2
-rw-r--r--Documentation/media/kapi/v4l2-async.rst2
-rw-r--r--Documentation/media/kapi/v4l2-clocks.rst2
-rw-r--r--Documentation/media/kapi/v4l2-common.rst2
-rw-r--r--Documentation/media/kapi/v4l2-controls.rst2
-rw-r--r--Documentation/media/kapi/v4l2-core.rst2
-rw-r--r--Documentation/media/kapi/v4l2-dev.rst2
-rw-r--r--Documentation/media/kapi/v4l2-device.rst2
-rw-r--r--Documentation/media/kapi/v4l2-dv-timings.rst2
-rw-r--r--Documentation/media/kapi/v4l2-event.rst1
-rw-r--r--Documentation/media/kapi/v4l2-fh.rst2
-rw-r--r--Documentation/media/kapi/v4l2-flash-led-class.rst2
-rw-r--r--Documentation/media/kapi/v4l2-fwnode.rst2
-rw-r--r--Documentation/media/kapi/v4l2-intro.rst2
-rw-r--r--Documentation/media/kapi/v4l2-mc.rst2
-rw-r--r--Documentation/media/kapi/v4l2-mediabus.rst2
-rw-r--r--Documentation/media/kapi/v4l2-mem2mem.rst2
-rw-r--r--Documentation/media/kapi/v4l2-rect.rst2
-rw-r--r--Documentation/media/kapi/v4l2-subdev.rst2
-rw-r--r--Documentation/media/kapi/v4l2-tuner.rst2
-rw-r--r--Documentation/media/kapi/v4l2-tveeprom.rst2
-rw-r--r--Documentation/media/kapi/v4l2-videobuf.rst2
-rw-r--r--Documentation/media/kapi/v4l2-videobuf2.rst2
-rw-r--r--Documentation/media/lirc.h.rst.exceptions2
-rw-r--r--Documentation/media/media.h.rst.exceptions2
-rw-r--r--Documentation/media/media_kapi.rst2
-rw-r--r--Documentation/media/media_uapi.rst8
-rw-r--r--Documentation/media/net.h.rst.exceptions2
-rw-r--r--Documentation/media/typical_media_device.svg10
-rw-r--r--Documentation/media/uapi/cec/cec-api.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-func-close.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-func-ioctl.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-func-open.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-func-poll.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-funcs.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-header.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-intro.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-dqevent.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-g-mode.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-ioc-receive.rst9
-rw-r--r--Documentation/media/uapi/cec/cec-pin-error-inj.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-channel-select.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-clear-buffer.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-continue.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-fclose.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-fopen.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-fwrite.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-get-capabilities.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-get-status.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-pause.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-play.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-select-source.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-set-av-sync.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-set-bypass-mode.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-set-id.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-set-mixer.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-set-mute.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-set-streamtype.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio-stop.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio_data_types.rst9
-rw-r--r--Documentation/media/uapi/dvb/audio_function_calls.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-fclose.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-fopen.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-get-cap.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-get-descr-info.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-get-msg.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-get-slot-info.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-reset.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-send-msg.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca-set-descr.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca_data_types.rst9
-rw-r--r--Documentation/media/uapi/dvb/ca_function_calls.rst9
-rw-r--r--Documentation/media/uapi/dvb/demux.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-add-pid.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-expbuf.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-fclose.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-fopen.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-fread.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-fwrite.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-get-pes-pids.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-get-stc.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-mmap.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-munmap.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-qbuf.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-querybuf.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-remove-pid.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-reqbufs.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-buffer-size.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-filter.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-set-pes-filter.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-start.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx-stop.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx_fcalls.rst9
-rw-r--r--Documentation/media/uapi/dvb/dmx_types.rst9
-rw-r--r--Documentation/media/uapi/dvb/dvb-fe-read-status.rst9
-rw-r--r--Documentation/media/uapi/dvb/dvb-frontend-event.rst9
-rw-r--r--Documentation/media/uapi/dvb/dvb-frontend-parameters.rst9
-rw-r--r--Documentation/media/uapi/dvb/dvbapi.rst9
-rw-r--r--Documentation/media/uapi/dvb/dvbproperty.rst9
-rw-r--r--Documentation/media/uapi/dvb/dvbstb.svg27
-rw-r--r--Documentation/media/uapi/dvb/examples.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-bandwidth-t.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-get-event.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-get-frontend.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-get-info.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-get-property.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-read-ber.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-read-signal-strength.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-read-snr.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-read-status.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-set-frontend.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-set-tone.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-set-voltage.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe-type-t.rst9
-rw-r--r--Documentation/media/uapi/dvb/fe_property_parameters.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend-header.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend-property-cable-systems.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend-property-satellite-systems.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend-stat-properties.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend_f_close.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend_f_open.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend_fcalls.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend_legacy_api.rst9
-rw-r--r--Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst9
-rw-r--r--Documentation/media/uapi/dvb/headers.rst9
-rw-r--r--Documentation/media/uapi/dvb/intro.rst9
-rw-r--r--Documentation/media/uapi/dvb/legacy_dvb_apis.rst9
-rw-r--r--Documentation/media/uapi/dvb/net-add-if.rst9
-rw-r--r--Documentation/media/uapi/dvb/net-get-if.rst9
-rw-r--r--Documentation/media/uapi/dvb/net-remove-if.rst9
-rw-r--r--Documentation/media/uapi/dvb/net-types.rst9
-rw-r--r--Documentation/media/uapi/dvb/net.rst9
-rw-r--r--Documentation/media/uapi/dvb/query-dvb-frontend-info.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-clear-buffer.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-command.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-continue.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-fast-forward.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-fclose.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-fopen.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-freeze.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-fwrite.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-get-capabilities.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-get-event.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-get-frame-count.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-get-pts.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-get-size.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-get-status.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-play.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-select-source.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-set-blank.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-set-display-format.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-set-format.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-set-streamtype.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-slowmotion.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-stillpicture.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-stop.rst9
-rw-r--r--Documentation/media/uapi/dvb/video-try-command.rst9
-rw-r--r--Documentation/media/uapi/dvb/video.rst9
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst9
-rw-r--r--Documentation/media/uapi/dvb/video_types.rst9
-rw-r--r--Documentation/media/uapi/fdl-appendix.rst9
-rw-r--r--Documentation/media/uapi/gen-errors.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-controller-intro.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-controller-model.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-controller.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-func-close.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-func-ioctl.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-func-open.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-funcs.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-header.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-device-info.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-enum-links.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-g-topology.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst26
-rw-r--r--Documentation/media/uapi/mediactl/media-ioc-setup-link.rst9
-rw-r--r--Documentation/media/uapi/mediactl/media-request-ioc-queue.rst26
-rw-r--r--Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst26
-rw-r--r--Documentation/media/uapi/mediactl/media-types.rst9
-rw-r--r--Documentation/media/uapi/mediactl/request-api.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-func-close.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-func-ioctl.rst26
-rw-r--r--Documentation/media/uapi/mediactl/request-func-poll.rst26
-rw-r--r--Documentation/media/uapi/rc/keytable.c.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-dev-intro.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-dev.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-func.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-get-features.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-get-rec-mode.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-get-rec-resolution.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-get-send-mode.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-get-timeout.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-header.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-read.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-rec-carrier.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-rec-timeout.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-send-carrier.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst9
-rw-r--r--Documentation/media/uapi/rc/lirc-write.rst9
-rw-r--r--Documentation/media/uapi/rc/rc-intro.rst9
-rw-r--r--Documentation/media/uapi/rc/rc-sysfs-nodes.rst9
-rw-r--r--Documentation/media/uapi/rc/rc-table-change.rst9
-rw-r--r--Documentation/media/uapi/rc/rc-tables.rst9
-rw-r--r--Documentation/media/uapi/rc/remote_controllers.rst9
-rw-r--r--Documentation/media/uapi/v4l/app-pri.rst11
-rw-r--r--Documentation/media/uapi/v4l/async.rst9
-rw-r--r--Documentation/media/uapi/v4l/audio.rst11
-rw-r--r--Documentation/media/uapi/v4l/bayer.svg27
-rw-r--r--Documentation/media/uapi/v4l/biblio.rst9
-rw-r--r--Documentation/media/uapi/v4l/buffer.rst12
-rw-r--r--Documentation/media/uapi/v4l/capture-example.rst9
-rw-r--r--Documentation/media/uapi/v4l/capture.c.rst9
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-defs.rst9
-rw-r--r--Documentation/media/uapi/v4l/colorspaces-details.rst9
-rw-r--r--Documentation/media/uapi/v4l/colorspaces.rst9
-rw-r--r--Documentation/media/uapi/v4l/common-defs.rst9
-rw-r--r--Documentation/media/uapi/v4l/common.rst9
-rw-r--r--Documentation/media/uapi/v4l/compat.rst9
-rw-r--r--Documentation/media/uapi/v4l/constraints.svg27
-rw-r--r--Documentation/media/uapi/v4l/control.rst9
-rw-r--r--Documentation/media/uapi/v4l/crop.rst9
-rw-r--r--Documentation/media/uapi/v4l/crop.svg10
-rw-r--r--Documentation/media/uapi/v4l/depth-formats.rst10
-rw-r--r--Documentation/media/uapi/v4l/dev-capture.rst11
-rw-r--r--Documentation/media/uapi/v4l/dev-codec.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-effect.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-event.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-meta.rst42
-rw-r--r--Documentation/media/uapi/v4l/dev-osd.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-output.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-overlay.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-radio.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-raw-vbi.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-rds.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-sdr.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-sliced-vbi.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-subdev.rst9
-rw-r--r--Documentation/media/uapi/v4l/dev-teletext.rst11
-rw-r--r--Documentation/media/uapi/v4l/dev-touch.rst9
-rw-r--r--Documentation/media/uapi/v4l/devices.rst9
-rw-r--r--Documentation/media/uapi/v4l/diff-v4l.rst9
-rw-r--r--Documentation/media/uapi/v4l/dmabuf.rst9
-rw-r--r--Documentation/media/uapi/v4l/dv-timings.rst9
-rw-r--r--Documentation/media/uapi/v4l/extended-controls.rst25
-rw-r--r--Documentation/media/uapi/v4l/field-order.rst9
-rw-r--r--Documentation/media/uapi/v4l/fieldseq_bt.svg12
-rw-r--r--Documentation/media/uapi/v4l/fieldseq_tb.svg12
-rw-r--r--Documentation/media/uapi/v4l/format.rst11
-rw-r--r--Documentation/media/uapi/v4l/func-close.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-ioctl.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-mmap.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-munmap.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-open.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-poll.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-read.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-select.rst9
-rw-r--r--Documentation/media/uapi/v4l/func-write.rst9
-rw-r--r--Documentation/media/uapi/v4l/hist-v4l2.rst9
-rw-r--r--Documentation/media/uapi/v4l/hsv-formats.rst9
-rw-r--r--Documentation/media/uapi/v4l/io.rst9
-rw-r--r--Documentation/media/uapi/v4l/libv4l-introduction.rst9
-rw-r--r--Documentation/media/uapi/v4l/libv4l.rst9
-rw-r--r--Documentation/media/uapi/v4l/meta-formats.rst10
-rw-r--r--Documentation/media/uapi/v4l/mmap.rst31
-rw-r--r--Documentation/media/uapi/v4l/nv12mt.svg27
-rw-r--r--Documentation/media/uapi/v4l/nv12mt_example.svg27
-rw-r--r--Documentation/media/uapi/v4l/open.rst11
-rw-r--r--Documentation/media/uapi/v4l/pipeline.dot2
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-cnf4.rst31
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-compressed.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-grey.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-indexed.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-intro.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-inzi.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-m420.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst178
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-uvc.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12m.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv12mt.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv16.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv16m.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-nv24.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-reserved.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-rgb.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst10
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10dpcm8.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb10p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb12.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb12p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb14p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb16.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-srggb8.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-tch-td08.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-tch-td16.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-uv8.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-uyvy.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-v4l2.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-vyuy.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y10.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y10b.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y10p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y12.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y12i.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y16-be.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y16.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y41p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-y8i.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv410.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv411p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv420.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv420m.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv422m.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv422p.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuv444m.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yuyv.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-yvyu.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt-z16.rst9
-rw-r--r--Documentation/media/uapi/v4l/pixfmt.rst9
-rw-r--r--Documentation/media/uapi/v4l/planar-apis.rst9
-rw-r--r--Documentation/media/uapi/v4l/querycap.rst9
-rw-r--r--Documentation/media/uapi/v4l/rw.rst9
-rw-r--r--Documentation/media/uapi/v4l/sdr-formats.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection-api-configuration.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection-api-examples.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection-api-intro.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection-api-targets.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection-api-vs-crop-api.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection-api.rst9
-rw-r--r--Documentation/media/uapi/v4l/selection.svg27
-rw-r--r--Documentation/media/uapi/v4l/selections-common.rst9
-rw-r--r--Documentation/media/uapi/v4l/standard.rst9
-rw-r--r--Documentation/media/uapi/v4l/streaming-par.rst9
-rw-r--r--Documentation/media/uapi/v4l/subdev-formats.rst9
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-crop.svg10
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-full.svg10
-rw-r--r--Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg10
-rw-r--r--Documentation/media/uapi/v4l/tch-formats.rst9
-rw-r--r--Documentation/media/uapi/v4l/tuner.rst13
-rw-r--r--Documentation/media/uapi/v4l/user-func.rst9
-rw-r--r--Documentation/media/uapi/v4l/userp.rst17
-rw-r--r--Documentation/media/uapi/v4l/v4l2-selection-flags.rst9
-rw-r--r--Documentation/media/uapi/v4l/v4l2-selection-targets.rst16
-rw-r--r--Documentation/media/uapi/v4l/v4l2.rst9
-rw-r--r--Documentation/media/uapi/v4l/v4l2grab-example.rst9
-rw-r--r--Documentation/media/uapi/v4l/v4l2grab.c.rst9
-rw-r--r--Documentation/media/uapi/v4l/vbi_525.svg12
-rw-r--r--Documentation/media/uapi/v4l/vbi_625.svg12
-rw-r--r--Documentation/media/uapi/v4l/vbi_hsync.svg12
-rw-r--r--Documentation/media/uapi/v4l/video.rst13
-rw-r--r--Documentation/media/uapi/v4l/videodev.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-create-bufs.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-cropcap.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dqevent.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-fmt.rst17
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumaudio.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumaudioout.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enuminput.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumoutput.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-enumstd.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-expbuf.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-audio.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-audioout.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-crop.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ctrl.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-edid.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-enc-index.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fbuf.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-fmt.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-frequency.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-input.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-modulator.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-output.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-parm.rst12
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-priority.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-selection.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-std.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-g-tuner.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-log-status.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-overlay.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-prepare-buf.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-qbuf.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querybuf.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querycap.rst12
-rw-r--r--Documentation/media/uapi/v4l/vidioc-queryctrl.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-querystd.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-reqbufs.rst26
-rw-r--r--Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-streamon.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst12
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst9
-rw-r--r--Documentation/media/uapi/v4l/vidioc-subscribe-event.rst9
-rw-r--r--Documentation/media/uapi/v4l/yuv-formats.rst9
-rw-r--r--Documentation/media/v4l-drivers/au0828-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/bttv-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/bttv.rst2
-rw-r--r--Documentation/media/v4l-drivers/cafe_ccic.rst2
-rw-r--r--Documentation/media/v4l-drivers/cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/cpia2.rst2
-rw-r--r--Documentation/media/v4l-drivers/cx18.rst2
-rw-r--r--Documentation/media/v4l-drivers/cx2341x.rst2
-rw-r--r--Documentation/media/v4l-drivers/cx23885-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/cx88-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/cx88.rst2
-rw-r--r--Documentation/media/v4l-drivers/davinci-vpbe.rst2
-rw-r--r--Documentation/media/v4l-drivers/em28xx-cardlist.rst4
-rw-r--r--Documentation/media/v4l-drivers/fimc.rst2
-rw-r--r--Documentation/media/v4l-drivers/fourcc.rst2
-rw-r--r--Documentation/media/v4l-drivers/gspca-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/imx.rst2
-rw-r--r--Documentation/media/v4l-drivers/index.rst3
-rw-r--r--Documentation/media/v4l-drivers/ipu3.rst369
-rw-r--r--Documentation/media/v4l-drivers/ivtv-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/ivtv.rst1
-rw-r--r--Documentation/media/v4l-drivers/max2175.rst2
-rw-r--r--Documentation/media/v4l-drivers/meye.rst2
-rw-r--r--Documentation/media/v4l-drivers/omap3isp.rst2
-rw-r--r--Documentation/media/v4l-drivers/omap4_camera.rst2
-rw-r--r--Documentation/media/v4l-drivers/philips.rst2
-rw-r--r--Documentation/media/v4l-drivers/pvrusb2.rst2
-rw-r--r--Documentation/media/v4l-drivers/pxa_camera.rst2
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss.rst2
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot2
-rw-r--r--Documentation/media/v4l-drivers/qcom_camss_graph.dot2
-rw-r--r--Documentation/media/v4l-drivers/radiotrack.rst2
-rw-r--r--Documentation/media/v4l-drivers/rcar-fdp1.rst2
-rw-r--r--Documentation/media/v4l-drivers/saa7134-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/saa7134.rst2
-rw-r--r--Documentation/media/v4l-drivers/saa7164-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst4
-rw-r--r--Documentation/media/v4l-drivers/si470x.rst2
-rw-r--r--Documentation/media/v4l-drivers/si4713.rst2
-rw-r--r--Documentation/media/v4l-drivers/si476x.rst2
-rw-r--r--Documentation/media/v4l-drivers/soc-camera.rst2
-rw-r--r--Documentation/media/v4l-drivers/tm6000-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/tuner-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/tuners.rst2
-rw-r--r--Documentation/media/v4l-drivers/usbvision-cardlist.rst2
-rw-r--r--Documentation/media/v4l-drivers/uvcvideo.rst2
-rw-r--r--Documentation/media/v4l-drivers/v4l-with-ir.rst2
-rw-r--r--Documentation/media/v4l-drivers/vivid.rst2
-rw-r--r--Documentation/media/v4l-drivers/zoran.rst2
-rw-r--r--Documentation/media/v4l-drivers/zr364xx.rst2
-rw-r--r--Documentation/media/video.h.rst.exceptions2
-rw-r--r--Documentation/media/videodev2.h.rst.exceptions4
-rw-r--r--Documentation/networking/device_drivers/3com/3c509.txt (renamed from Documentation/networking/3c509.txt)0
-rw-r--r--Documentation/networking/device_drivers/3com/vortex.txt (renamed from Documentation/networking/vortex.txt)2
-rw-r--r--Documentation/networking/device_drivers/amazon/ena.txt (renamed from Documentation/networking/ena.txt)0
-rw-r--r--Documentation/networking/device_drivers/chelsio/cxgb.txt (renamed from Documentation/networking/cxgb.txt)0
-rw-r--r--Documentation/networking/device_drivers/cirrus/cs89x0.txt (renamed from Documentation/networking/cs89x0.txt)0
-rw-r--r--Documentation/networking/device_drivers/davicom/dm9000.txt (renamed from Documentation/networking/dm9000.txt)0
-rw-r--r--Documentation/networking/device_drivers/dec/de4x5.txt (renamed from Documentation/networking/de4x5.txt)0
-rw-r--r--Documentation/networking/device_drivers/dec/dmfe.txt (renamed from Documentation/networking/dmfe.txt)0
-rw-r--r--Documentation/networking/device_drivers/dlink/dl2k.txt (renamed from Documentation/networking/dl2k.txt)0
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa.txt (renamed from Documentation/networking/dpaa.txt)0
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst (renamed from Documentation/networking/dpaa2/dpio-driver.rst)4
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst (renamed from Documentation/networking/dpaa2/ethernet-driver.rst)2
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/index.rst (renamed from Documentation/networking/dpaa2/index.rst)0
-rw-r--r--Documentation/networking/device_drivers/freescale/dpaa2/overview.rst (renamed from Documentation/networking/dpaa2/overview.rst)0
-rw-r--r--Documentation/networking/device_drivers/freescale/gianfar.txt (renamed from Documentation/networking/gianfar.txt)0
-rw-r--r--Documentation/networking/device_drivers/intel/e100.rst (renamed from Documentation/networking/e100.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/e1000.rst (renamed from Documentation/networking/e1000.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/e1000e.rst (renamed from Documentation/networking/e1000e.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/fm10k.rst (renamed from Documentation/networking/fm10k.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/i40e.rst (renamed from Documentation/networking/i40e.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/iavf.rst (renamed from Documentation/networking/iavf.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/ice.rst (renamed from Documentation/networking/ice.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/igb.rst (renamed from Documentation/networking/igb.rst)19
-rw-r--r--Documentation/networking/device_drivers/intel/igbvf.rst (renamed from Documentation/networking/igbvf.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/ipw2100.txt (renamed from Documentation/networking/README.ipw2100)0
-rw-r--r--Documentation/networking/device_drivers/intel/ipw2200.txt (renamed from Documentation/networking/README.ipw2200)0
-rw-r--r--Documentation/networking/device_drivers/intel/ixgb.rst (renamed from Documentation/networking/ixgb.rst)0
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbe.rst (renamed from Documentation/networking/ixgbe.rst)13
-rw-r--r--Documentation/networking/device_drivers/intel/ixgbevf.rst (renamed from Documentation/networking/ixgbevf.rst)0
-rw-r--r--Documentation/networking/device_drivers/microsoft/netvsc.txt (renamed from Documentation/networking/netvsc.txt)0
-rw-r--r--Documentation/networking/device_drivers/neterion/s2io.txt (renamed from Documentation/networking/s2io.txt)0
-rw-r--r--Documentation/networking/device_drivers/neterion/vxge.txt (renamed from Documentation/networking/vxge.txt)0
-rw-r--r--Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx (renamed from Documentation/networking/LICENSE.qla3xxx)0
-rw-r--r--Documentation/networking/device_drivers/qlogic/LICENSE.qlcnic (renamed from Documentation/networking/LICENSE.qlcnic)0
-rw-r--r--Documentation/networking/device_drivers/qlogic/LICENSE.qlge (renamed from Documentation/networking/LICENSE.qlge)0
-rw-r--r--Documentation/networking/device_drivers/qualcomm/rmnet.txt (renamed from Documentation/networking/rmnet.txt)0
-rw-r--r--Documentation/networking/device_drivers/sb1000.txt (renamed from Documentation/networking/README.sb1000)0
-rw-r--r--Documentation/networking/device_drivers/smsc/smc9.txt (renamed from Documentation/networking/smc9.txt)0
-rw-r--r--Documentation/networking/device_drivers/stmicro/stmmac.txt (renamed from Documentation/networking/stmmac.txt)0
-rw-r--r--Documentation/networking/device_drivers/ti/cpsw.txt (renamed from Documentation/networking/ti-cpsw.txt)0
-rw-r--r--Documentation/networking/device_drivers/ti/tlan.txt (renamed from Documentation/networking/tlan.txt)0
-rw-r--r--Documentation/networking/device_drivers/toshiba/spider_net.txt (renamed from Documentation/networking/spider_net.txt)0
-rw-r--r--Documentation/networking/devlink-params.txt9
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/ip-sysctl.txt18
-rw-r--r--Documentation/networking/netdev-features.txt2
-rw-r--r--Documentation/networking/nf_conntrack-sysctl.txt11
-rw-r--r--Documentation/networking/snmp_counter.rst1190
-rw-r--r--Documentation/networking/vrf.txt22
-rw-r--r--Documentation/networking/xfrm_device.txt7
-rw-r--r--Documentation/perf/thunderx2-pmu.txt41
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.txt17
-rw-r--r--Documentation/powerpc/isa-versions.rst74
-rw-r--r--Documentation/scsi/scsi-parameters.txt5
-rw-r--r--Documentation/sh/new-machine.txt8
-rw-r--r--Documentation/userspace-api/spec_ctrl.rst9
-rw-r--r--Documentation/virtual/kvm/api.txt136
-rw-r--r--Documentation/vm/unevictable-lru.rst6
-rw-r--r--Documentation/x86/boot.txt32
-rw-r--r--Documentation/x86/resctrl_ui.txt (renamed from Documentation/x86/intel_rdt_ui.txt)9
-rw-r--r--MAINTAINERS463
-rw-r--r--Makefile24
-rw-r--r--arch/alpha/kernel/setup.c1
-rw-r--r--arch/alpha/mm/numa.c6
-rw-r--r--arch/arc/Kconfig14
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/hsdk.dts15
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/hsdk_defconfig4
-rw-r--r--arch/arc/configs/nps_defconfig2
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/io.h72
-rw-r--r--arch/arc/kernel/setup.c9
-rw-r--r--arch/arc/mm/cache.c20
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/Makefile12
-rw-r--r--arch/arm/boot/compressed/Makefile1
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts2
-rw-r--r--arch/arm/boot/dts/am3517-som.dtsi2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts4
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts4
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts2
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts2
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts6
-rw-r--r--arch/arm/boot/dts/imx7d-nitrogen7.dts9
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi22
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv.dtsi2
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts2
-rw-r--r--arch/arm/boot/dts/rk3288-veyron.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts4
-rw-r--r--arch/arm/configs/davinci_all_defconfig5
-rw-r--r--arch/arm/configs/omap1_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig14
-rw-r--r--arch/arm/crypto/Kconfig16
-rw-r--r--arch/arm/crypto/Makefile6
-rw-r--r--arch/arm/crypto/aes-ce-glue.c1
-rw-r--r--arch/arm/crypto/aes-cipher-core.S62
-rw-r--r--arch/arm/crypto/chacha-neon-core.S (renamed from arch/arm/crypto/chacha20-neon-core.S)98
-rw-r--r--arch/arm/crypto/chacha-neon-glue.c201
-rw-r--r--arch/arm/crypto/chacha20-neon-glue.c127
-rw-r--r--arch/arm/crypto/nh-neon-core.S116
-rw-r--r--arch/arm/crypto/nhpoly1305-neon-glue.c77
-rw-r--r--arch/arm/include/asm/kvm_asm.h4
-rw-r--r--arch/arm/include/asm/kvm_host.h9
-rw-r--r--arch/arm/include/asm/kvm_mmu.h61
-rw-r--r--arch/arm/include/asm/module.h11
-rw-r--r--arch/arm/include/asm/stackprotector.h12
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h8
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/kernel/asm-offsets.c4
-rw-r--r--arch/arm/kernel/ftrace.c17
-rw-r--r--arch/arm/kernel/process.c6
-rw-r--r--arch/arm/kvm/coproc.c4
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c4
-rw-r--r--arch/arm/mach-davinci/da830.c4
-rw-r--r--arch/arm/mach-davinci/da850.c4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c40
-rw-r--r--arch/arm/mach-davinci/dm355.c32
-rw-r--r--arch/arm/mach-davinci/dm365.c37
-rw-r--r--arch/arm/mach-davinci/dm644x.c22
-rw-r--r--arch/arm/mach-davinci/dm646x.c12
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c2
-rw-r--r--arch/arm/mach-mmp/cputype.h6
-rw-r--r--arch/arm/mach-omap1/Makefile2
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c25
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c4
-rw-r--r--arch/arm/mach-omap2/prm44xx.c2
-rw-r--r--arch/arm/mach-pxa/palm27x.h2
-rw-r--r--arch/arm/mach-pxa/palmld.c12
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c17
-rw-r--r--arch/arm/mm/cache-v7.S8
-rw-r--r--arch/arm/mm/cache-v7m.S14
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/proc-macros.S10
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c2
-rw-r--r--arch/arm64/Kconfig160
-rw-r--r--arch/arm64/Makefile12
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi4
-rw-r--r--arch/arm64/boot/dts/marvell/armada-ap806.dtsi27
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts7
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts7
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-mtp.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi12
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi2
-rw-r--r--arch/arm64/crypto/Kconfig7
-rw-r--r--arch/arm64/crypto/Makefile7
-rw-r--r--arch/arm64/crypto/chacha-neon-core.S (renamed from arch/arm64/crypto/chacha20-neon-core.S)484
-rw-r--r--arch/arm64/crypto/chacha-neon-glue.c198
-rw-r--r--arch/arm64/crypto/chacha20-neon-glue.c133
-rw-r--r--arch/arm64/crypto/nh-neon-core.S103
-rw-r--r--arch/arm64/crypto/nhpoly1305-neon-glue.c77
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/acpi.h19
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h26
-rw-r--r--arch/arm64/include/asm/assembler.h90
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h63
-rw-r--r--arch/arm64/include/asm/atomic_lse.h48
-rw-r--r--arch/arm64/include/asm/barrier.h4
-rw-r--r--arch/arm64/include/asm/cmpxchg.h116
-rw-r--r--arch/arm64/include/asm/cpucaps.h8
-rw-r--r--arch/arm64/include/asm/cpufeature.h124
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/esr.h17
-rw-r--r--arch/arm64/include/asm/ftrace.h14
-rw-r--r--arch/arm64/include/asm/image.h59
-rw-r--r--arch/arm64/include/asm/insn.h8
-rw-r--r--arch/arm64/include/asm/io.h32
-rw-r--r--arch/arm64/include/asm/kexec.h19
-rw-r--r--arch/arm64/include/asm/kvm_arm.h9
-rw-r--r--arch/arm64/include/asm/kvm_asm.h7
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h35
-rw-r--r--arch/arm64/include/asm/kvm_host.h15
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h8
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h48
-rw-r--r--arch/arm64/include/asm/memory.h30
-rw-r--r--arch/arm64/include/asm/mmu_context.h5
-rw-r--r--arch/arm64/include/asm/module.h44
-rw-r--r--arch/arm64/include/asm/neon-intrinsics.h39
-rw-r--r--arch/arm64/include/asm/percpu.h390
-rw-r--r--arch/arm64/include/asm/perf_event.h170
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h16
-rw-r--r--arch/arm64/include/asm/pgtable.h31
-rw-r--r--arch/arm64/include/asm/pointer_auth.h97
-rw-r--r--arch/arm64/include/asm/preempt.h89
-rw-r--r--arch/arm64/include/asm/processor.h36
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/stackprotector.h3
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h16
-rw-r--r--arch/arm64/include/asm/sysreg.h109
-rw-r--r--arch/arm64/include/asm/thread_info.h13
-rw-r--r--arch/arm64/include/asm/tlbflush.h19
-rw-r--r--arch/arm64/include/asm/uaccess.h3
-rw-r--r--arch/arm64/include/asm/xor.h73
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h3
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h7
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/arm64ksyms.c88
-rw-r--r--arch/arm64/kernel/asm-offsets.c3
-rw-r--r--arch/arm64/kernel/cpu-reset.S8
-rw-r--r--arch/arm64/kernel/cpu_errata.c169
-rw-r--r--arch/arm64/kernel/cpufeature.c312
-rw-r--r--arch/arm64/kernel/cpuinfo.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S55
-rw-r--r--arch/arm64/kernel/entry.S12
-rw-r--r--arch/arm64/kernel/ftrace.c19
-rw-r--r--arch/arm64/kernel/head.S46
-rw-r--r--arch/arm64/kernel/hibernate-asm.S1
-rw-r--r--arch/arm64/kernel/hibernate.c2
-rw-r--r--arch/arm64/kernel/image.h67
-rw-r--r--arch/arm64/kernel/insn.c29
-rw-r--r--arch/arm64/kernel/kexec_image.c130
-rw-r--r--arch/arm64/kernel/machine_kexec.c12
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c224
-rw-r--r--arch/arm64/kernel/module-plts.c135
-rw-r--r--arch/arm64/kernel/module.c13
-rw-r--r--arch/arm64/kernel/perf_callchain.c6
-rw-r--r--arch/arm64/kernel/perf_event.c221
-rw-r--r--arch/arm64/kernel/pointer_auth.c47
-rw-r--r--arch/arm64/kernel/process.c6
-rw-r--r--arch/arm64/kernel/ptrace.c38
-rw-r--r--arch/arm64/kernel/relocate_kernel.S3
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kernel/smccc-call.S4
-rw-r--r--arch/arm64/kernel/smp.c7
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S9
-rw-r--r--arch/arm64/kvm/debug.c21
-rw-r--r--arch/arm64/kvm/handle_exit.c32
-rw-r--r--arch/arm64/kvm/hyp/entry.S1
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S4
-rw-r--r--arch/arm64/kvm/hyp/switch.c68
-rw-r--r--arch/arm64/kvm/hyp/tlb.c71
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c12
-rw-r--r--arch/arm64/kvm/sys_regs.c20
-rw-r--r--arch/arm64/kvm/sys_regs.h4
-rw-r--r--arch/arm64/kvm/trace.h35
-rw-r--r--arch/arm64/lib/Makefile6
-rw-r--r--arch/arm64/lib/clear_page.S1
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S4
-rw-r--r--arch/arm64/lib/copy_in_user.S4
-rw-r--r--arch/arm64/lib/copy_page.S1
-rw-r--r--arch/arm64/lib/copy_to_user.S4
-rw-r--r--arch/arm64/lib/crc32.S54
-rw-r--r--arch/arm64/lib/memchr.S1
-rw-r--r--arch/arm64/lib/memcmp.S1
-rw-r--r--arch/arm64/lib/memcpy.S2
-rw-r--r--arch/arm64/lib/memmove.S2
-rw-r--r--arch/arm64/lib/memset.S2
-rw-r--r--arch/arm64/lib/strchr.S1
-rw-r--r--arch/arm64/lib/strcmp.S1
-rw-r--r--arch/arm64/lib/strlen.S1
-rw-r--r--arch/arm64/lib/strncmp.S1
-rw-r--r--arch/arm64/lib/strnlen.S1
-rw-r--r--arch/arm64/lib/strrchr.S1
-rw-r--r--arch/arm64/lib/tishift.S5
-rw-r--r--arch/arm64/lib/xor-neon.c184
-rw-r--r--arch/arm64/mm/cache.S3
-rw-r--r--arch/arm64/mm/dma-mapping.c2
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/arm64/mm/hugetlbpage.c33
-rw-r--r--arch/arm64/mm/init.c20
-rw-r--r--arch/arm64/mm/mmu.c35
-rw-r--r--arch/arm64/mm/numa.c10
-rw-r--r--arch/arm64/mm/pageattr.c21
-rw-r--r--arch/arm64/mm/proc.S14
-rw-r--r--arch/arm64/net/bpf_jit_comp.c49
-rw-r--r--arch/csky/include/asm/mmu_context.h4
-rw-r--r--arch/ia64/Makefile3
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/unistd.h4
-rw-r--r--arch/ia64/include/uapi/asm/Kbuild1
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h332
-rw-r--r--arch/ia64/kernel/entry.S331
-rw-r--r--arch/ia64/kernel/syscalls/Makefile40
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl337
-rw-r--r--arch/ia64/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/ia64/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/m68k/Kconfig.machine2
-rw-r--r--arch/m68k/Makefile3
-rw-r--r--arch/m68k/configs/amiga_defconfig103
-rw-r--r--arch/m68k/configs/apollo_defconfig93
-rw-r--r--arch/m68k/configs/atari_defconfig109
-rw-r--r--arch/m68k/configs/bvme6000_defconfig89
-rw-r--r--arch/m68k/configs/hp300_defconfig91
-rw-r--r--arch/m68k/configs/mac_defconfig93
-rw-r--r--arch/m68k/configs/multi_defconfig123
-rw-r--r--arch/m68k/configs/mvme147_defconfig87
-rw-r--r--arch/m68k/configs/mvme16x_defconfig89
-rw-r--r--arch/m68k/configs/q40_defconfig89
-rw-r--r--arch/m68k/configs/sun3_defconfig81
-rw-r--r--arch/m68k/configs/sun3x_defconfig83
-rw-r--r--arch/m68k/include/asm/Kbuild1
-rw-r--r--arch/m68k/include/asm/raw_io.h39
-rw-r--r--arch/m68k/include/asm/unistd.h3
-rw-r--r--arch/m68k/include/uapi/asm/Kbuild1
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h385
-rw-r--r--arch/m68k/kernel/setup_mm.c2
-rw-r--r--arch/m68k/kernel/syscalls/Makefile38
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl389
-rw-r--r--arch/m68k/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/m68k/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/m68k/kernel/syscalltable.S387
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/microblaze/Makefile25
-rw-r--r--arch/microblaze/boot/Makefile23
-rw-r--r--arch/microblaze/boot/dts/Makefile5
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/Kbuild1
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h407
-rw-r--r--arch/microblaze/kernel/ftrace.c15
-rw-r--r--arch/microblaze/kernel/syscall_table.S406
-rw-r--r--arch/microblaze/kernel/syscalls/Makefile38
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl410
-rw-r--r--arch/microblaze/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/microblaze/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/mips/Kconfig65
-rw-r--r--arch/mips/Makefile5
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c7
-rw-r--r--arch/mips/boot/dts/img/boston.dts6
-rw-r--r--arch/mips/boot/dts/mti/malta.dts5
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c1
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c149
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c68
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c38
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-util.c90
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c39
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c91
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c2
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c12
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c6
-rw-r--r--arch/mips/cavium-octeon/setup.c8
-rw-r--r--arch/mips/cavium-octeon/smp.c4
-rw-r--r--arch/mips/configs/ar7_defconfig44
-rw-r--r--arch/mips/configs/ath25_defconfig25
-rw-r--r--arch/mips/configs/ath79_defconfig33
-rw-r--r--arch/mips/configs/bcm47xx_defconfig11
-rw-r--r--arch/mips/configs/bcm63xx_defconfig37
-rw-r--r--arch/mips/configs/bigsur_defconfig65
-rw-r--r--arch/mips/configs/bmips_be_defconfig22
-rw-r--r--arch/mips/configs/bmips_stb_defconfig23
-rw-r--r--arch/mips/configs/capcella_defconfig24
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig44
-rw-r--r--arch/mips/configs/ci20_defconfig27
-rw-r--r--arch/mips/configs/cobalt_defconfig8
-rw-r--r--arch/mips/configs/db1xxx_defconfig47
-rw-r--r--arch/mips/configs/decstation_64_defconfig227
-rw-r--r--arch/mips/configs/decstation_defconfig163
-rw-r--r--arch/mips/configs/decstation_r4k_defconfig224
-rw-r--r--arch/mips/configs/e55_defconfig8
-rw-r--r--arch/mips/configs/fuloong2e_defconfig79
-rw-r--r--arch/mips/configs/gcw0_defconfig12
-rw-r--r--arch/mips/configs/generic_defconfig26
-rw-r--r--arch/mips/configs/gpr_defconfig112
-rw-r--r--arch/mips/configs/ip22_defconfig76
-rw-r--r--arch/mips/configs/ip27_defconfig149
-rw-r--r--arch/mips/configs/ip28_defconfig26
-rw-r--r--arch/mips/configs/ip32_defconfig41
-rw-r--r--arch/mips/configs/jazz_defconfig62
-rw-r--r--arch/mips/configs/jmr3927_defconfig13
-rw-r--r--arch/mips/configs/lasat_defconfig24
-rw-r--r--arch/mips/configs/lemote2f_defconfig143
-rw-r--r--arch/mips/configs/loongson1b_defconfig15
-rw-r--r--arch/mips/configs/loongson1c_defconfig17
-rw-r--r--arch/mips/configs/loongson3_defconfig70
-rw-r--r--arch/mips/configs/malta_defconfig42
-rw-r--r--arch/mips/configs/malta_kvm_defconfig59
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig48
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig22
-rw-r--r--arch/mips/configs/maltaaprp_defconfig25
-rw-r--r--arch/mips/configs/maltasmvp_defconfig30
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig30
-rw-r--r--arch/mips/configs/maltaup_defconfig21
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig44
-rw-r--r--arch/mips/configs/markeins_defconfig35
-rw-r--r--arch/mips/configs/mips_paravirt_defconfig35
-rw-r--r--arch/mips/configs/mpc30x_defconfig7
-rw-r--r--arch/mips/configs/msp71xx_defconfig20
-rw-r--r--arch/mips/configs/mtx1_defconfig307
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig112
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig145
-rw-r--r--arch/mips/configs/omega2p_defconfig28
-rw-r--r--arch/mips/configs/pic32mzda_defconfig12
-rw-r--r--arch/mips/configs/pistachio_defconfig78
-rw-r--r--arch/mips/configs/pnx8335_stb225_defconfig27
-rw-r--r--arch/mips/configs/qi_lb60_defconfig23
-rw-r--r--arch/mips/configs/rb532_defconfig49
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig24
-rw-r--r--arch/mips/configs/rm200_defconfig79
-rw-r--r--arch/mips/configs/rt305x_defconfig45
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig36
-rw-r--r--arch/mips/configs/tb0219_defconfig32
-rw-r--r--arch/mips/configs/tb0226_defconfig17
-rw-r--r--arch/mips/configs/tb0287_defconfig29
-rw-r--r--arch/mips/configs/vocore2_defconfig28
-rw-r--r--arch/mips/configs/workpad_defconfig18
-rw-r--r--arch/mips/configs/xway_defconfig32
-rw-r--r--arch/mips/include/asm/Kbuild4
-rw-r--r--arch/mips/include/asm/atomic.h27
-rw-r--r--arch/mips/include/asm/bitops.h42
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/include/asm/compiler.h24
-rw-r--r--arch/mips/include/asm/cpu-features.h60
-rw-r--r--arch/mips/include/asm/cpu-info.h2
-rw-r--r--arch/mips/include/asm/cpu.h3
-rw-r--r--arch/mips/include/asm/dsemul.h29
-rw-r--r--arch/mips/include/asm/edac.h3
-rw-r--r--arch/mips/include/asm/elf.h26
-rw-r--r--arch/mips/include/asm/fpu.h145
-rw-r--r--arch/mips/include/asm/fpu_emulator.h11
-rw-r--r--arch/mips/include/asm/futex.h14
-rw-r--r--arch/mips/include/asm/hazards.h6
-rw-r--r--arch/mips/include/asm/io.h22
-rw-r--r--arch/mips/include/asm/kvm_host.h11
-rw-r--r--arch/mips/include/asm/local.h12
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h4
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h1
-rw-r--r--arch/mips/include/asm/mach-rc32434/rb.h6
-rw-r--r--arch/mips/include/asm/mipsmtregs.h7
-rw-r--r--arch/mips/include/asm/mipsregs.h30
-rw-r--r--arch/mips/include/asm/mmu.h3
-rw-r--r--arch/mips/include/asm/mmu_context.h10
-rw-r--r--arch/mips/include/asm/mmzone.h13
-rw-r--r--arch/mips/include/asm/octeon/cvmx-agl-defs.h699
-rw-r--r--arch/mips/include/asm/octeon/cvmx-asxx-defs.h105
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h76
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu2-defs.h7060
-rw-r--r--arch/mips/include/asm/octeon/cvmx-dbg-defs.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-dpi-defs.h178
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa-defs.h247
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gmxx-defs.h4672
-rw-r--r--arch/mips/include/asm/octeon/cvmx-gpio-defs.h116
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h17
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-sgmii.h17
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-util.h23
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-xaui.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper.h36
-rw-r--r--arch/mips/include/asm/octeon/cvmx-iob-defs.h375
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd-defs.h538
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2t-defs.h6
-rw-r--r--arch/mips/include/asm/octeon/cvmx-led-defs.h78
-rw-r--r--arch/mips/include/asm/octeon/cvmx-lmcx-defs.h514
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mio-defs.h1197
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mixx-defs.h136
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npei-defs.h295
-rw-r--r--arch/mips/include/asm/octeon/cvmx-npi-defs.h235
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pci-defs.h392
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsx-defs.h185
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h146
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pemx-defs.h144
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pescx-defs.h59
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip-defs.h688
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko-defs.h619
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow-defs.h317
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rnm-defs.h53
-rw-r--r--arch/mips/include/asm/octeon/cvmx-rst-defs.h28
-rw-r--r--arch/mips/include/asm/octeon/cvmx-smix-defs.h88
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spxx-defs.h62
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sriox-defs.h123
-rw-r--r--arch/mips/include/asm/octeon/cvmx-srxx-defs.h22
-rw-r--r--arch/mips/include/asm/octeon/cvmx-stxx-defs.h64
-rw-r--r--arch/mips/include/asm/octeon/cvmx-uctlx-defs.h89
-rw-r--r--arch/mips/include/asm/page.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h5
-rw-r--r--arch/mips/include/asm/pgtable.h6
-rw-r--r--arch/mips/include/asm/processor.h19
-rw-r--r--arch/mips/include/asm/r4kcache.h22
-rw-r--r--arch/mips/include/asm/stackframe.h3
-rw-r--r--arch/mips/include/asm/switch_to.h6
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/uasm.h1
-rw-r--r--arch/mips/include/asm/unistd.h3
-rw-r--r--arch/mips/include/uapi/asm/Kbuild6
-rw-r--r--arch/mips/include/uapi/asm/inst.h3
-rw-r--r--arch/mips/include/uapi/asm/sgidefs.h8
-rw-r--r--arch/mips/include/uapi/asm/unistd.h1074
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c7
-rw-r--r--arch/mips/kernel/bmips_5xxx_init.S6
-rw-r--r--arch/mips/kernel/branch.c41
-rw-r--r--arch/mips/kernel/cpu-bugs64.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c64
-rw-r--r--arch/mips/kernel/elf.c4
-rw-r--r--arch/mips/kernel/ftrace.c20
-rw-r--r--arch/mips/kernel/genex.S5
-rw-r--r--arch/mips/kernel/idle.c7
-rw-r--r--arch/mips/kernel/mips-mt.c11
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c39
-rw-r--r--arch/mips/kernel/process.c9
-rw-r--r--arch/mips/kernel/ptrace.c466
-rw-r--r--arch/mips/kernel/ptrace32.c33
-rw-r--r--arch/mips/kernel/r2300_fpu.S58
-rw-r--r--arch/mips/kernel/r4k_fpu.S144
-rw-r--r--arch/mips/kernel/scall32-o32.S391
-rw-r--r--arch/mips/kernel/scall64-64.S444
-rw-r--r--arch/mips/kernel/scall64-n32.S341
-rw-r--r--arch/mips/kernel/scall64-n64.S117
-rw-r--r--arch/mips/kernel/scall64-o32.S379
-rw-r--r--arch/mips/kernel/signal.c39
-rw-r--r--arch/mips/kernel/syscall.c6
-rw-r--r--arch/mips/kernel/syscalls/Makefile96
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl343
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl339
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl382
-rw-r--r--arch/mips/kernel/syscalls/syscallhdr.sh37
-rw-r--r--arch/mips/kernel/syscalls/syscallnr.sh28
-rw-r--r--arch/mips/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/mips/kernel/traps.c124
-rw-r--r--arch/mips/kernel/unaligned.c40
-rw-r--r--arch/mips/kernel/vdso.c4
-rw-r--r--arch/mips/kernel/vmlinux.lds.S4
-rw-r--r--arch/mips/kernel/watch.c13
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/mips.c29
-rw-r--r--arch/mips/kvm/mmu.c3
-rw-r--r--arch/mips/loongson64/common/env.c3
-rw-r--r--arch/mips/loongson64/loongson-3/cop2-ex.c7
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c3
-rw-r--r--arch/mips/math-emu/cp1emu.c7
-rw-r--r--arch/mips/math-emu/dsemul.c38
-rw-r--r--arch/mips/math-emu/me-debugfs.c12
-rw-r--r--arch/mips/mm/c-r3k.c2
-rw-r--r--arch/mips/mm/c-r4k.c48
-rw-r--r--arch/mips/mm/tlbex.c1
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c1
-rw-r--r--arch/mips/mm/uasm.c9
-rw-r--r--arch/mips/mti-malta/Makefile1
-rw-r--r--arch/mips/mti-malta/malta-display.c56
-rw-r--r--arch/mips/mti-malta/malta-init.c3
-rw-r--r--arch/mips/mti-malta/malta-setup.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/net/bpf_jit.c18
-rw-r--r--arch/mips/net/ebpf_jit.c4
-rw-r--r--arch/mips/pci/fixup-sb1250.c53
-rw-r--r--arch/mips/pci/pci-rt3883.c6
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/rb532/devices.c12
-rw-r--r--arch/mips/sibyte/common/Makefile1
-rw-r--r--arch/mips/sibyte/common/dma.c14
-rw-r--r--arch/mips/vdso/Makefile1
-rw-r--r--arch/nds32/kernel/ftrace.c18
-rw-r--r--arch/parisc/Makefile10
-rw-r--r--arch/parisc/include/asm/Kbuild3
-rw-r--r--arch/parisc/include/asm/alternative.h4
-rw-r--r--arch/parisc/include/asm/unistd.h8
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h379
-rw-r--r--arch/parisc/kernel/Makefile2
-rw-r--r--arch/parisc/kernel/alternative.c98
-rw-r--r--arch/parisc/kernel/ftrace.c17
-rw-r--r--arch/parisc/kernel/module.c14
-rw-r--r--arch/parisc/kernel/setup.c80
-rw-r--r--arch/parisc/kernel/syscall.S11
-rw-r--r--arch/parisc/kernel/syscall_table.S459
-rw-r--r--arch/parisc/kernel/syscalls/Makefile55
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl369
-rw-r--r--arch/parisc/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/parisc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/Makefile32
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/crt0.S4
-rw-r--r--arch/powerpc/boot/dts/bamboo.dts4
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts128
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts128
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi18
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi18
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi70
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi18
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi47
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi30
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi44
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi22
-rw-r--r--arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi61
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi24
-rw-r--r--arch/powerpc/boot/dts/mpc832x_rdb.dts4
-rw-r--r--arch/powerpc/boot/serial.c3
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/guest.config13
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig81
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/Kbuild4
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h1
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h15
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h40
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h46
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h9
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/cache.h2
-rw-r--r--arch/powerpc/include/asm/code-patching.h23
-rw-r--r--arch/powerpc/include/asm/cputable.h9
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h7
-rw-r--r--arch/powerpc/include/asm/fadump.h7
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h12
-rw-r--r--arch/powerpc/include/asm/hugetlb.h15
-rw-r--r--arch/powerpc/include/asm/hvcall.h1
-rw-r--r--arch/powerpc/include/asm/io.h13
-rw-r--r--arch/powerpc/include/asm/iommu.h17
-rw-r--r--arch/powerpc/include/asm/ipic.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h23
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h18
-rw-r--r--arch/powerpc/include/asm/kvm_host.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h10
-rw-r--r--arch/powerpc/include/asm/mmu.h49
-rw-r--r--arch/powerpc/include/asm/mmu_context.h32
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-40x.h (renamed from arch/powerpc/include/asm/mmu-40x.h)0
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h (renamed from arch/powerpc/include/asm/mmu-44x.h)3
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h (renamed from arch/powerpc/include/asm/mmu-8xx.h)4
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu.h25
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h27
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h15
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h3
-rw-r--r--arch/powerpc/include/asm/nohash/64/mmu.h12
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-book3e.h (renamed from arch/powerpc/include/asm/mmu-book3e.h)0
-rw-r--r--arch/powerpc/include/asm/nohash/mmu.h11
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/opal.h1
-rw-r--r--arch/powerpc/include/asm/page.h18
-rw-r--r--arch/powerpc/include/asm/page_32.h3
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/pci.h4
-rw-r--r--arch/powerpc/include/asm/perf_event.h5
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h4
-rw-r--r--arch/powerpc/include/asm/pgtable.h32
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h29
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/include/asm/setup.h7
-rw-r--r--arch/powerpc/include/asm/sfp-machine.h92
-rw-r--r--arch/powerpc/include/asm/slice.h14
-rw-r--r--arch/powerpc/include/asm/syscall.h3
-rw-r--r--arch/powerpc/include/asm/systbl.h396
-rw-r--r--arch/powerpc/include/asm/time.h2
-rw-r--r--arch/powerpc/include/asm/tlb.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild3
-rw-r--r--arch/powerpc/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h389
-rw-r--r--arch/powerpc/kernel/Makefile12
-rw-r--r--arch/powerpc/kernel/btext.c16
-rw-r--r--arch/powerpc/kernel/cacheinfo.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/cputable.c10
-rw-r--r--arch/powerpc/kernel/dma-iommu.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c6
-rw-r--r--arch/powerpc/kernel/dma.c31
-rw-r--r--arch/powerpc/kernel/eeh.c20
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/powerpc/kernel/eeh_event.c9
-rw-r--r--arch/powerpc/kernel/entry_32.S10
-rw-r--r--arch/powerpc/kernel/entry_64.S16
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S26
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S11
-rw-r--r--arch/powerpc/kernel/fadump.c154
-rw-r--r--arch/powerpc/kernel/head_32.S20
-rw-r--r--arch/powerpc/kernel/head_44x.S11
-rw-r--r--arch/powerpc/kernel/head_8xx.S369
-rw-r--r--arch/powerpc/kernel/head_booke.h6
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S15
-rw-r--r--arch/powerpc/kernel/iommu.c69
-rw-r--r--arch/powerpc/kernel/isa-bridge.c3
-rw-r--r--arch/powerpc/kernel/legacy_serial.c16
-rw-r--r--arch/powerpc/kernel/machine_kexec_file_64.c54
-rw-r--r--arch/powerpc/kernel/misc_32.S4
-rw-r--r--arch/powerpc/kernel/msi.c7
-rw-r--r--arch/powerpc/kernel/nvram_64.c3
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c11
-rw-r--r--arch/powerpc/kernel/pmc.c2
-rw-r--r--arch/powerpc/kernel/prom.c10
-rw-r--r--arch/powerpc/kernel/ptrace.c49
-rw-r--r--arch/powerpc/kernel/security.c29
-rw-r--r--arch/powerpc/kernel/setup-common.c7
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/signal_32.c50
-rw-r--r--arch/powerpc/kernel/signal_64.c71
-rw-r--r--arch/powerpc/kernel/syscalls/Makefile63
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl427
-rw-r--r--arch/powerpc/kernel/syscalls/syscallhdr.sh37
-rw-r--r--arch/powerpc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/powerpc/kernel/sysfs.c2
-rw-r--r--arch/powerpc/kernel/systbl.S40
-rw-r--r--arch/powerpc/kernel/systbl_chk.c60
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c17
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/kernel/vdso.c7
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S18
-rw-r--r--arch/powerpc/kvm/book3s.c8
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c160
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c96
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c190
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c4
-rw-r--r--arch/powerpc/kvm/book3s_xics.c12
-rw-r--r--arch/powerpc/kvm/book3s_xive.c12
-rw-r--r--arch/powerpc/kvm/booke.c3
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S4
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c7
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c47
-rw-r--r--arch/powerpc/lib/code-patching.c16
-rw-r--r--arch/powerpc/lib/feature-fixups.c27
-rw-r--r--arch/powerpc/mm/44x_mmu.c14
-rw-r--r--arch/powerpc/mm/8xx_mmu.c10
-rw-r--r--arch/powerpc/mm/Makefile11
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c15
-rw-r--r--arch/powerpc/mm/dump_bats.c173
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables-generic.c2
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c1
-rw-r--r--arch/powerpc/mm/dump_sr.c64
-rw-r--r--arch/powerpc/mm/fault.c50
-rw-r--r--arch/powerpc/mm/hash_low_32.S33
-rw-r--r--arch/powerpc/mm/hugetlbpage.c42
-rw-r--r--arch/powerpc/mm/init-common.c56
-rw-r--r--arch/powerpc/mm/init_64.c19
-rw-r--r--arch/powerpc/mm/mem.c51
-rw-r--r--arch/powerpc/mm/mmu_context.c10
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c15
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c110
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c4
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c88
-rw-r--r--arch/powerpc/mm/pgtable-frag.c119
-rw-r--r--arch/powerpc/mm/pgtable.c26
-rw-r--r--arch/powerpc/mm/pgtable_32.c29
-rw-r--r--arch/powerpc/mm/pkeys.c25
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c51
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S7
-rw-r--r--arch/powerpc/net/bpf_jit.h4
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c15
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c129
-rw-r--r--arch/powerpc/oprofile/Makefile2
-rw-r--r--arch/powerpc/oprofile/common.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c39
-rw-r--r--arch/powerpc/perf/imc-pmu.c6
-rw-r--r--arch/powerpc/perf/isa207-common.c58
-rw-r--r--arch/powerpc/perf/isa207-common.h9
-rw-r--r--arch/powerpc/perf/perf_regs.c7
-rw-r--r--arch/powerpc/perf/power9-pmu.c22
-rw-r--r--arch/powerpc/platforms/44x/warp.c6
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c17
-rw-r--r--arch/powerpc/platforms/4xx/pci.c7
-rw-r--r--arch/powerpc/platforms/512x/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/efika.c6
-rw-r--r--arch/powerpc/platforms/82xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/83xx/misc.c17
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c10
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c9
-rw-r--r--arch/powerpc/platforms/85xx/t1042rdb_diu.c2
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c3
-rw-r--r--arch/powerpc/platforms/Kconfig8
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype21
-rw-r--r--arch/powerpc/platforms/amigaone/Kconfig2
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c6
-rw-r--r--arch/powerpc/platforms/cell/setup.c5
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c17
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c10
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig2
-rw-r--r--arch/powerpc/platforms/chrp/pci.c4
-rw-r--r--arch/powerpc/platforms/chrp/setup.c10
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/maple/pci.c6
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c4
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c66
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c86
-rw-r--r--arch/powerpc/platforms/powermac/cache.S4
-rw-r--r--arch/powerpc/platforms/powermac/feature.c14
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c11
-rw-r--r--arch/powerpc/platforms/powermac/pci.c27
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c7
-rw-r--r--arch/powerpc/platforms/powermac/pic.c6
-rw-r--r--arch/powerpc/platforms/powermac/setup.c10
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S4
-rw-r--r--arch/powerpc/platforms/powermac/smp.c3
-rw-r--r--arch/powerpc/platforms/powermac/udbg_adb.c2
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c8
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c564
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c265
-rw-r--r--arch/powerpc/platforms/powernv/pci.c49
-rw-r--r--arch/powerpc/platforms/powernv/pci.h36
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c28
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c16
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c88
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c39
-rw-r--r--arch/powerpc/platforms/pseries/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c8
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
-rw-r--r--arch/powerpc/platforms/pseries/vio.c27
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/fsl_rio.h2
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c4
-rw-r--r--arch/powerpc/sysdev/ipic.c28
-rw-r--r--arch/powerpc/sysdev/scom.c4
-rw-r--r--arch/powerpc/sysdev/xive/common.c2
-rwxr-xr-xarch/powerpc/tools/checkpatch.sh1
-rw-r--r--arch/powerpc/xmon/xmon.c33
-rw-r--r--arch/riscv/Kconfig58
-rw-r--r--arch/riscv/Kconfig.debug2
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/include/asm/atomic.h9
-rw-r--r--arch/riscv/kernel/cacheinfo.c11
-rw-r--r--arch/riscv/kernel/cpu.c1
-rw-r--r--arch/riscv/kernel/cpufeature.c2
-rw-r--r--arch/riscv/kernel/ftrace.c15
-rw-r--r--arch/riscv/kernel/perf_event.c1
-rw-r--r--arch/riscv/kernel/setup.c28
-rw-r--r--arch/riscv/kernel/smpboot.c6
-rw-r--r--arch/riscv/kernel/time.c1
-rw-r--r--arch/riscv/lib/tishift.S59
-rw-r--r--arch/riscv/lib/udivdi3.S42
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/include/asm/preempt.h2
-rw-r--r--arch/s390/kernel/ftrace.c13
-rw-r--r--arch/s390/kernel/machine_kexec_file.c10
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c35
-rw-r--r--arch/s390/mm/pgalloc.c3
-rw-r--r--arch/s390/net/bpf_jit_comp.c12
-rw-r--r--arch/sh/Makefile3
-rw-r--r--arch/sh/boards/mach-dreamcast/Makefile4
-rw-r--r--arch/sh/boards/mach-dreamcast/rtc.c45
-rw-r--r--arch/sh/boards/mach-dreamcast/setup.c1
-rw-r--r--arch/sh/boards/mach-sh03/Makefile3
-rw-r--r--arch/sh/boards/mach-sh03/rtc.c51
-rw-r--r--arch/sh/boards/mach-sh03/setup.c9
-rw-r--r--arch/sh/boards/of-generic.c8
-rw-r--r--arch/sh/configs/dreamcast_defconfig2
-rw-r--r--arch/sh/configs/sh03_defconfig2
-rw-r--r--arch/sh/include/asm/Kbuild1
-rw-r--r--arch/sh/include/asm/io.h1
-rw-r--r--arch/sh/include/asm/rtc.h3
-rw-r--r--arch/sh/include/asm/unistd.h2
-rw-r--r--arch/sh/include/mach-dreamcast/mach/sysasic.h1
-rw-r--r--arch/sh/include/uapi/asm/Kbuild1
-rw-r--r--arch/sh/include/uapi/asm/unistd_32.h4
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h4
-rw-r--r--arch/sh/kernel/ftrace.c16
-rw-r--r--arch/sh/kernel/syscalls/Makefile38
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl392
-rw-r--r--arch/sh/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/sh/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/sh/kernel/syscalls_32.S387
-rw-r--r--arch/sh/kernel/time.c74
-rw-r--r--arch/sparc/Makefile3
-rw-r--r--arch/sparc/crypto/aes_glue.c5
-rw-r--r--arch/sparc/crypto/camellia_glue.c5
-rw-r--r--arch/sparc/crypto/des_glue.c5
-rw-r--r--arch/sparc/include/asm/Kbuild4
-rw-r--r--arch/sparc/include/asm/floppy_64.h8
-rw-r--r--arch/sparc/include/asm/leon.h1
-rw-r--r--arch/sparc/include/asm/parport.h2
-rw-r--r--arch/sparc/include/asm/unistd.h18
-rw-r--r--arch/sparc/include/uapi/asm/Kbuild2
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h426
-rw-r--r--arch/sparc/kernel/auxio_64.c11
-rw-r--r--arch/sparc/kernel/central.c2
-rw-r--r--arch/sparc/kernel/chmc.c8
-rw-r--r--arch/sparc/kernel/ftrace.c11
-rw-r--r--arch/sparc/kernel/iommu.c3
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/sparc/kernel/leon_kernel.c14
-rw-r--r--arch/sparc/kernel/of_device_32.c21
-rw-r--r--arch/sparc/kernel/of_device_64.c58
-rw-r--r--arch/sparc/kernel/of_device_common.c4
-rw-r--r--arch/sparc/kernel/pci.c44
-rw-r--r--arch/sparc/kernel/pci_sabre.c2
-rw-r--r--arch/sparc/kernel/power.c4
-rw-r--r--arch/sparc/kernel/process_32.c2
-rw-r--r--arch/sparc/kernel/prom_32.c44
-rw-r--r--arch/sparc/kernel/prom_64.c75
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c20
-rw-r--r--arch/sparc/kernel/reboot.c3
-rw-r--r--arch/sparc/kernel/sbus.c4
-rw-r--r--arch/sparc/kernel/setup_32.c13
-rw-r--r--arch/sparc/kernel/setup_64.c4
-rw-r--r--arch/sparc/kernel/signal32.c1
-rw-r--r--arch/sparc/kernel/signal_32.c1
-rw-r--r--arch/sparc/kernel/signal_64.c1
-rw-r--r--arch/sparc/kernel/sun4d_irq.c14
-rw-r--r--arch/sparc/kernel/syscalls/Makefile55
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl409
-rw-r--r--arch/sparc/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/sparc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/sparc/kernel/systbls_32.S81
-rw-r--r--arch/sparc/kernel/systbls_64.S157
-rw-r--r--arch/sparc/kernel/time_64.c16
-rw-r--r--arch/sparc/kernel/vio.c9
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c13
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c98
-rw-r--r--arch/sparc/oprofile/init.c2
-rw-r--r--arch/sparc/vdso/Makefile2
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig40
-rw-r--r--arch/x86/Makefile22
-rw-r--r--arch/x86/Makefile.um9
-rw-r--r--arch/x86/boot/boot.h2
-rw-r--r--arch/x86/boot/compressed/eboot.c65
-rw-r--r--arch/x86/boot/header.S6
-rw-r--r--arch/x86/boot/memory.c31
-rw-r--r--arch/x86/boot/tools/build.c1
-rw-r--r--arch/x86/crypto/Makefile18
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S2125
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c353
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/crypto/chacha-avx2-x86_64.S1025
-rw-r--r--arch/x86/crypto/chacha-avx512vl-x86_64.S836
-rw-r--r--arch/x86/crypto/chacha-ssse3-x86_64.S (renamed from arch/x86/crypto/chacha20-ssse3-x86_64.S)327
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S448
-rw-r--r--arch/x86/crypto/chacha20_glue.c146
-rw-r--r--arch/x86/crypto/chacha_glue.c304
-rw-r--r--arch/x86/crypto/nh-avx2-x86_64.S157
-rw-r--r--arch/x86/crypto/nh-sse2-x86_64.S123
-rw-r--r--arch/x86/crypto/nhpoly1305-avx2-glue.c77
-rw-r--r--arch/x86/crypto/nhpoly1305-sse2-glue.c76
-rw-r--r--arch/x86/crypto/poly1305_glue.c20
-rw-r--r--arch/x86/entry/calling.h2
-rw-r--r--arch/x86/entry/common.c2
-rw-r--r--arch/x86/entry/entry_64.S4
-rw-r--r--arch/x86/entry/vdso/Makefile7
-rw-r--r--arch/x86/entry/vdso/vdso-layout.lds.S27
-rw-r--r--arch/x86/entry/vdso/vdso2c.c8
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c2
-rw-r--r--arch/x86/events/core.c20
-rw-r--r--arch/x86/events/intel/bts.c2
-rw-r--r--arch/x86/events/intel/core.c70
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/events/intel/p4.c2
-rw-r--r--arch/x86/events/intel/pt.c60
-rw-r--r--arch/x86/events/intel/pt.h58
-rw-r--r--arch/x86/events/perf_event.h13
-rw-r--r--arch/x86/hyperv/nested.c80
-rw-r--r--arch/x86/include/asm/alternative-asm.h20
-rw-r--r--arch/x86/include/asm/alternative.h13
-rw-r--r--arch/x86/include/asm/arch_hweight.h10
-rw-r--r--arch/x86/include/asm/asm.h53
-rw-r--r--arch/x86/include/asm/bootparam_utils.h1
-rw-r--r--arch/x86/include/asm/bug.h98
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h82
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/crash.h1
-rw-r--r--arch/x86/include/asm/disabled-features.h8
-rw-r--r--arch/x86/include/asm/efi.h10
-rw-r--r--arch/x86/include/asm/fpu/api.h15
-rw-r--r--arch/x86/include/asm/fpu/internal.h5
-rw-r--r--arch/x86/include/asm/fsgsbase.h15
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h335
-rw-r--r--arch/x86/include/asm/intel_pt.h26
-rw-r--r--arch/x86/include/asm/irq.h7
-rw-r--r--arch/x86/include/asm/irq_work.h1
-rw-r--r--arch/x86/include/asm/jump_label.h72
-rw-r--r--arch/x86/include/asm/kvm_host.h28
-rw-r--r--arch/x86/include/asm/kvm_para.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h15
-rw-r--r--arch/x86/include/asm/msr-index.h43
-rw-r--r--arch/x86/include/asm/nospec-branch.h27
-rw-r--r--arch/x86/include/asm/paravirt.h5
-rw-r--r--arch/x86/include/asm/paravirt_types.h56
-rw-r--r--arch/x86/include/asm/pci_x86.h7
-rw-r--r--arch/x86/include/asm/pgalloc.h27
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h8
-rw-r--r--arch/x86/include/asm/preempt.h3
-rw-r--r--arch/x86/include/asm/reboot.h1
-rw-r--r--arch/x86/include/asm/refcount.h81
-rw-r--r--arch/x86/include/asm/resctrl_sched.h (renamed from arch/x86/include/asm/intel_rdt_sched.h)28
-rw-r--r--arch/x86/include/asm/setup.h3
-rw-r--r--arch/x86/include/asm/sighandling.h5
-rw-r--r--arch/x86/include/asm/smp.h6
-rw-r--r--arch/x86/include/asm/spec-ctrl.h20
-rw-r--r--arch/x86/include/asm/svm.h7
-rw-r--r--arch/x86/include/asm/switch_to.h3
-rw-r--r--arch/x86/include/asm/thread_info.h28
-rw-r--r--arch/x86/include/asm/tlbflush.h8
-rw-r--r--arch/x86/include/asm/trace/exceptions.h1
-rw-r--r--arch/x86/include/asm/trace/hyperv.h14
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h1
-rw-r--r--arch/x86/include/asm/traps.h59
-rw-r--r--arch/x86/include/asm/tsc.h1
-rw-r--r--arch/x86/include/asm/vmx.h9
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h7
-rw-r--r--arch/x86/kernel/acpi/boot.c4
-rw-r--r--arch/x86/kernel/amd_nb.c53
-rw-r--r--arch/x86/kernel/aperture_64.c25
-rw-r--r--arch/x86/kernel/apic/apic.c1
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c7
-rw-r--r--arch/x86/kernel/apic/vector.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c4
-rw-r--r--arch/x86/kernel/asm-offsets.c3
-rw-r--r--arch/x86/kernel/check.c3
-rw-r--r--arch/x86/kernel/cpu/Makefile7
-rw-r--r--arch/x86/kernel/cpu/amd.c1
-rw-r--r--arch/x86/kernel/cpu/aperfmperf.c1
-rw-r--r--arch/x86/kernel/cpu/bugs.c546
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c1
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/mce/Makefile (renamed from arch/x86/kernel/cpu/mcheck/Makefile)10
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c (renamed from arch/x86/kernel/cpu/mcheck/mce_amd.c)26
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c (renamed from arch/x86/kernel/cpu/mcheck/mce-apei.c)2
-rw-r--r--arch/x86/kernel/cpu/mce/core.c (renamed from arch/x86/kernel/cpu/mcheck/mce.c)6
-rw-r--r--arch/x86/kernel/cpu/mce/dev-mcelog.c (renamed from arch/x86/kernel/cpu/mcheck/dev-mcelog.c)4
-rw-r--r--arch/x86/kernel/cpu/mce/genpool.c (renamed from arch/x86/kernel/cpu/mcheck/mce-genpool.c)2
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c (renamed from arch/x86/kernel/cpu/mcheck/mce-inject.c)2
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c (renamed from arch/x86/kernel/cpu/mcheck/mce_intel.c)2
-rw-r--r--arch/x86/kernel/cpu/mce/internal.h (renamed from arch/x86/kernel/cpu/mcheck/mce-internal.h)3
-rw-r--r--arch/x86/kernel/cpu/mce/p5.c (renamed from arch/x86/kernel/cpu/mcheck/p5.c)2
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c (renamed from arch/x86/kernel/cpu/mcheck/mce-severity.c)2
-rw-r--r--arch/x86/kernel/cpu/mce/therm_throt.c (renamed from arch/x86/kernel/cpu/mcheck/therm_throt.c)5
-rw-r--r--arch/x86/kernel/cpu/mce/threshold.c (renamed from arch/x86/kernel/cpu/mcheck/threshold.c)5
-rw-r--r--arch/x86/kernel/cpu/mce/winchip.c (renamed from arch/x86/kernel/cpu/mcheck/winchip.c)2
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c470
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/Makefile4
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c (renamed from arch/x86/kernel/cpu/intel_rdt.c)186
-rw-r--r--arch/x86/kernel/cpu/resctrl/ctrlmondata.c (renamed from arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c)111
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h (renamed from arch/x86/kernel/cpu/intel_rdt.h)55
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c (renamed from arch/x86/kernel/cpu/intel_rdt_monitor.c)16
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c (renamed from arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c)40
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h (renamed from arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h)2
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c (renamed from arch/x86/kernel/cpu/intel_rdt_rdtgroup.c)61
-rw-r--r--arch/x86/kernel/cpu/scattered.c34
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/crash_dump_64.c2
-rw-r--r--arch/x86/kernel/devicetree.c1
-rw-r--r--arch/x86/kernel/fpu/core.c6
-rw-r--r--arch/x86/kernel/fpu/signal.c4
-rw-r--r--arch/x86/kernel/fpu/xstate.c6
-rw-r--r--arch/x86/kernel/ftrace.c15
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_64.S2
-rw-r--r--arch/x86/kernel/jailhouse.c1
-rw-r--r--arch/x86/kernel/kprobes/core.c10
-rw-r--r--arch/x86/kernel/kprobes/opt.c2
-rw-r--r--arch/x86/kernel/kvmclock.c15
-rw-r--r--arch/x86/kernel/macros.S16
-rw-r--r--arch/x86/kernel/process.c106
-rw-r--r--arch/x86/kernel/process.h39
-rw-r--r--arch/x86/kernel/process_32.c17
-rw-r--r--arch/x86/kernel/process_64.c120
-rw-r--r--arch/x86/kernel/ptrace.c9
-rw-r--r--arch/x86/kernel/quirks.c1
-rw-r--r--arch/x86/kernel/setup.c17
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/sysfb_efi.c3
-rw-r--r--arch/x86/kernel/tracepoint.c2
-rw-r--r--arch/x86/kvm/Makefile2
-rw-r--r--arch/x86/kvm/cpuid.c31
-rw-r--r--arch/x86/kvm/hyperv.c305
-rw-r--r--arch/x86/kvm/hyperv.h4
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h2
-rw-r--r--arch/x86/kvm/lapic.c12
-rw-r--r--arch/x86/kvm/mmu.c125
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/svm.c116
-rw-r--r--arch/x86/kvm/trace.h10
-rw-r--r--arch/x86/kvm/vmx.c15218
-rw-r--r--arch/x86/kvm/vmx/capabilities.h343
-rw-r--r--arch/x86/kvm/vmx/evmcs.c (renamed from arch/x86/kvm/vmx_evmcs.h)78
-rw-r--r--arch/x86/kvm/vmx/evmcs.h202
-rw-r--r--arch/x86/kvm/vmx/nested.c5721
-rw-r--r--arch/x86/kvm/vmx/nested.h282
-rw-r--r--arch/x86/kvm/vmx/ops.h285
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c (renamed from arch/x86/kvm/pmu_intel.c)0
-rw-r--r--arch/x86/kvm/vmx/vmcs.h136
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c157
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h462
-rw-r--r--arch/x86/kvm/vmx/vmcs_shadow_fields.h (renamed from arch/x86/kvm/vmx_shadow_fields.h)0
-rw-r--r--arch/x86/kvm/vmx/vmenter.S57
-rw-r--r--arch/x86/kvm/vmx/vmx.c7935
-rw-r--r--arch/x86/kvm/vmx/vmx.h519
-rw-r--r--arch/x86/kvm/x86.c175
-rw-r--r--arch/x86/mm/debug_pagetables.c58
-rw-r--r--arch/x86/mm/dump_pagetables.c15
-rw-r--r--arch/x86/mm/fault.c244
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/mm/init_64.c30
-rw-r--r--arch/x86/mm/mm_internal.h2
-rw-r--r--arch/x86/mm/pageattr-test.c31
-rw-r--r--arch/x86/mm/pageattr.c309
-rw-r--r--arch/x86/mm/pat.c13
-rw-r--r--arch/x86/mm/pkeys.c1
-rw-r--r--arch/x86/mm/tlb.c119
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/platform/ce4100/ce4100.c4
-rw-r--r--arch/x86/platform/efi/early_printk.c2
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/quirks.c41
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c2
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c2
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c2
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c18
-rw-r--r--arch/x86/platform/pvh/Makefile5
-rw-r--r--arch/x86/platform/pvh/enlighten.c137
-rw-r--r--arch/x86/platform/pvh/head.S (renamed from arch/x86/xen/xen-pvh.S)0
-rw-r--r--arch/x86/platform/uv/uv_nmi.c2
-rw-r--r--arch/x86/um/vdso/Makefile2
-rw-r--r--arch/x86/xen/Kconfig3
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c78
-rw-r--r--arch/x86/xen/enlighten_pvh.c92
-rw-r--r--arch/x86/xen/mmu_pv.c11
-rw-r--r--arch/x86/xen/multicalls.c35
-rw-r--r--arch/x86/xen/setup.c8
-rw-r--r--arch/x86/xen/spinlock.c7
-rw-r--r--arch/x86/xen/xen-asm_64.S2
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/ptrace.c42
-rw-r--r--block/Kconfig6
-rw-r--r--block/Kconfig.iosched61
-rw-r--r--block/Makefile5
-rw-r--r--block/bfq-cgroup.c6
-rw-r--r--block/bfq-iosched.c97
-rw-r--r--block/bfq-iosched.h51
-rw-r--r--block/bfq-wf2q.c5
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/bio.c205
-rw-r--r--block/blk-cgroup.c272
-rw-r--r--block/blk-core.c2066
-rw-r--r--block/blk-exec.c20
-rw-r--r--block/blk-flush.c188
-rw-r--r--block/blk-ioc.c54
-rw-r--r--block/blk-iolatency.c75
-rw-r--r--block/blk-merge.c55
-rw-r--r--block/blk-mq-cpumap.c19
-rw-r--r--block/blk-mq-debugfs.c147
-rw-r--r--block/blk-mq-debugfs.h17
-rw-r--r--block/blk-mq-pci.c10
-rw-r--r--block/blk-mq-rdma.c8
-rw-r--r--block/blk-mq-sched.c82
-rw-r--r--block/blk-mq-sched.h25
-rw-r--r--block/blk-mq-sysfs.c35
-rw-r--r--block/blk-mq-tag.c41
-rw-r--r--block/blk-mq-virtio.c8
-rw-r--r--block/blk-mq.c758
-rw-r--r--block/blk-mq.h70
-rw-r--r--block/blk-pm.c20
-rw-r--r--block/blk-pm.h6
-rw-r--r--block/blk-rq-qos.c154
-rw-r--r--block/blk-rq-qos.h96
-rw-r--r--block/blk-settings.c65
-rw-r--r--block/blk-softirq.c27
-rw-r--r--block/blk-stat.c4
-rw-r--r--block/blk-stat.h5
-rw-r--r--block/blk-sysfs.c107
-rw-r--r--block/blk-tag.c378
-rw-r--r--block/blk-throttle.c39
-rw-r--r--block/blk-timeout.c117
-rw-r--r--block/blk-wbt.c176
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/blk.h188
-rw-r--r--block/bounce.c3
-rw-r--r--block/bsg-lib.c146
-rw-r--r--block/bsg.c2
-rw-r--r--block/cfq-iosched.c4916
-rw-r--r--block/deadline-iosched.c560
-rw-r--r--block/elevator.c477
-rw-r--r--block/genhd.c63
-rw-r--r--block/kyber-iosched.c37
-rw-r--r--block/mq-deadline.c15
-rw-r--r--block/noop-iosched.c124
-rw-r--r--block/partition-generic.c18
-rw-r--r--crypto/Kconfig97
-rw-r--r--crypto/Makefile8
-rw-r--r--crypto/ablkcipher.c94
-rw-r--r--crypto/acompress.c10
-rw-r--r--crypto/adiantum.c664
-rw-r--r--crypto/aead.c14
-rw-r--r--crypto/aes_generic.c9
-rw-r--r--crypto/aes_ti.c18
-rw-r--r--crypto/ahash.c29
-rw-r--r--crypto/akcipher.c11
-rw-r--r--crypto/algapi.c247
-rw-r--r--crypto/blkcipher.c20
-rw-r--r--crypto/cbc.c6
-rw-r--r--crypto/cfb.c8
-rw-r--r--crypto/chacha20_generic.c137
-rw-r--r--crypto/chacha20poly1305.c12
-rw-r--r--crypto/chacha_generic.c217
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/crypto_user_base.c136
-rw-r--r--crypto/crypto_user_stat.c301
-rw-r--r--crypto/ctr.c2
-rw-r--r--crypto/ecc.c58
-rw-r--r--crypto/hash_info.c4
-rw-r--r--crypto/kpp.c10
-rw-r--r--crypto/lz4.c1
-rw-r--r--crypto/lz4hc.c1
-rw-r--r--crypto/nhpoly1305.c254
-rw-r--r--crypto/pcbc.c6
-rw-r--r--crypto/pcrypt.c4
-rw-r--r--crypto/poly1305_generic.c174
-rw-r--r--crypto/rng.c16
-rw-r--r--crypto/salsa20_generic.c2
-rw-r--r--crypto/scompress.c11
-rw-r--r--crypto/shash.c12
-rw-r--r--crypto/skcipher.c23
-rw-r--r--crypto/streebog_generic.c1140
-rw-r--r--crypto/tcrypt.c59
-rw-r--r--crypto/testmgr.c62
-rw-r--r--crypto/testmgr.h3220
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/Kconfig5
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_apd.c6
-rw-r--r--drivers/acpi/acpi_lpss.c7
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/achware.h9
-rw-r--r--drivers/acpi/acpica/acnamesp.h1
-rw-r--r--drivers/acpi/acpica/acstruct.h5
-rw-r--r--drivers/acpi/acpica/dbxface.c118
-rw-r--r--drivers/acpi/acpica/dsmethod.c14
-rw-r--r--drivers/acpi/acpica/dsobject.c11
-rw-r--r--drivers/acpi/acpica/dspkginit.c26
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswload.c6
-rw-r--r--drivers/acpi/acpica/dswload2.c15
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evhandler.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c49
-rw-r--r--drivers/acpi/acpica/excreate.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c6
-rw-r--r--drivers/acpi/acpica/exregion.c4
-rw-r--r--drivers/acpi/acpica/exserial.c19
-rw-r--r--drivers/acpi/acpica/exutils.c3
-rw-r--r--drivers/acpi/acpica/nsaccess.c23
-rw-r--r--drivers/acpi/acpica/nseval.c13
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c12
-rw-r--r--drivers/acpi/acpica/psloop.c10
-rw-r--r--drivers/acpi/acpica/psobject.c3
-rw-r--r--drivers/acpi/acpica/psparse.c15
-rw-r--r--drivers/acpi/acpica/psxface.c6
-rw-r--r--drivers/acpi/acpica/utglobal.c3
-rw-r--r--drivers/acpi/acpica/utmisc.c3
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/apei/einj.c12
-rw-r--r--drivers/acpi/apei/erst.c3
-rw-r--r--drivers/acpi/apei/ghes.c2
-rw-r--r--drivers/acpi/arm64/iort.c22
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/ec.c12
-rw-r--r--drivers/acpi/glue.c21
-rw-r--r--drivers/acpi/internal.h8
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/osi.c7
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/reboot.c40
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c16
-rw-r--r--drivers/acpi/spcr.c11
-rw-r--r--drivers/acpi/tables.c12
-rw-r--r--drivers/android/binder.c21
-rw-r--r--drivers/android/binder_alloc.c16
-rw-r--r--drivers/android/binder_alloc.h3
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/pata_palmld.c83
-rw-r--r--drivers/ata/pata_pxa.c1
-rw-r--r--drivers/ata/pata_rb532_cf.c45
-rw-r--r--drivers/ata/sata_highbank.c37
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/auxdisplay/charlcd.c3
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/core.c34
-rw-r--r--drivers/base/devres.c10
-rw-r--r--drivers/base/platform-msi.c6
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/domain.c343
-rw-r--r--drivers/base/power/runtime.c63
-rw-r--r--drivers/base/property.c513
-rw-r--r--drivers/base/regmap/regcache-rbtree.c12
-rw-r--r--drivers/base/regmap/regmap-debugfs.c12
-rw-r--r--drivers/base/regmap/regmap-irq.c142
-rw-r--r--drivers/base/swnode.c675
-rw-r--r--drivers/block/aoe/aoe.h4
-rw-r--r--drivers/block/aoe/aoeblk.c1
-rw-r--r--drivers/block/aoe/aoecmd.c27
-rw-r--r--drivers/block/aoe/aoedev.c11
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/ataflop.c26
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c2
-rw-r--r--drivers/block/floppy.c6
-rw-r--r--drivers/block/loop.c415
-rw-r--r--drivers/block/loop.h1
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c226
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h48
-rw-r--r--drivers/block/nbd.c3
-rw-r--r--drivers/block/null_blk.h1
-rw-r--r--drivers/block/null_blk_main.c21
-rw-r--r--drivers/block/null_blk_zoned.c27
-rw-r--r--drivers/block/paride/pd.c30
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/skd_main.c16
-rw-r--r--drivers/block/sunvdc.c153
-rw-r--r--drivers/block/sx8.c434
-rw-r--r--drivers/block/umem.c3
-rw-r--r--drivers/block/virtio_blk.c17
-rw-r--r--drivers/bluetooth/btbcm.c14
-rw-r--r--drivers/bluetooth/btusb.c80
-rw-r--r--drivers/bluetooth/hci_bcm.c134
-rw-r--r--drivers/bluetooth/hci_h5.c81
-rw-r--r--drivers/bluetooth/hci_intel.c20
-rw-r--r--drivers/bluetooth/hci_serdev.c3
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/random.c51
-rw-r--r--drivers/clk/Kconfig9
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/bcm/clk-bcm2835-aux.c13
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c14
-rw-r--r--drivers/clk/clk-axm5516.c2
-rw-r--r--drivers/clk/clk-bd718x7.c123
-rw-r--r--drivers/clk/clk-bulk.c13
-rw-r--r--drivers/clk/clk-composite.c13
-rw-r--r--drivers/clk/clk-conf.c5
-rw-r--r--drivers/clk/clk-devres.c7
-rw-r--r--drivers/clk/clk-divider.c5
-rw-r--r--drivers/clk/clk-fixed-factor.c7
-rw-r--r--drivers/clk/clk-fixed-rate.c5
-rw-r--r--drivers/clk/clk-fractional-divider.c15
-rw-r--r--drivers/clk/clk-gate.c5
-rw-r--r--drivers/clk/clk-gpio.c5
-rw-r--r--drivers/clk/clk-hi655x.c4
-rw-r--r--drivers/clk/clk-max77686.c2
-rw-r--r--drivers/clk/clk-multiplier.c5
-rw-r--r--drivers/clk/clk-mux.c5
-rw-r--r--drivers/clk/clk-nomadik.c16
-rw-r--r--drivers/clk/clk-palmas.c2
-rw-r--r--drivers/clk/clk-qoriq.c11
-rw-r--r--drivers/clk/clk-rk808.c15
-rw-r--r--drivers/clk/clk-s2mps11.c2
-rw-r--r--drivers/clk/clk-stm32mp1.c2
-rw-r--r--drivers/clk/clk-twl6040.c5
-rw-r--r--drivers/clk/clk-versaclock5.c25
-rw-r--r--drivers/clk/clk.c47
-rw-r--r--drivers/clk/clk.h7
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c2
-rw-r--r--drivers/clk/hisilicon/clk-hi3620.c2
-rw-r--r--drivers/clk/hisilicon/clk-hisi-phase.c2
-rw-r--r--drivers/clk/hisilicon/clk-hix5hd2.c2
-rw-r--r--drivers/clk/hisilicon/clkgate-separated.c2
-rw-r--r--drivers/clk/imgtec/clk-boston.c21
-rw-r--r--drivers/clk/imx/Kconfig22
-rw-r--r--drivers/clk/imx/Makefile19
-rw-r--r--drivers/clk/imx/clk-busy.c2
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c87
-rw-r--r--drivers/clk/imx/clk-composite-8m.c178
-rw-r--r--drivers/clk/imx/clk-divider-gate.c221
-rw-r--r--drivers/clk/imx/clk-fixup-mux.c2
-rw-r--r--drivers/clk/imx/clk-frac-pll.c232
-rw-r--r--drivers/clk/imx/clk-imx6q.c93
-rw-r--r--drivers/clk/imx/clk-imx6sl.c6
-rw-r--r--drivers/clk/imx/clk-imx7d.c3
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c249
-rw-r--r--drivers/clk/imx/clk-imx8mq.c589
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c216
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.h102
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c153
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c116
-rw-r--r--drivers/clk/imx/clk-pfdv2.c203
-rw-r--r--drivers/clk/imx/clk-pllv4.c184
-rw-r--r--drivers/clk/imx/clk-sccg-pll.c256
-rw-r--r--drivers/clk/imx/clk-scu.c270
-rw-r--r--drivers/clk/imx/clk-scu.h18
-rw-r--r--drivers/clk/imx/clk.c22
-rw-r--r--drivers/clk/imx/clk.h160
-rw-r--r--drivers/clk/loongson1/clk.c8
-rw-r--r--drivers/clk/mediatek/Kconfig23
-rw-r--r--drivers/clk/mediatek/Makefile3
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c8
-rw-r--r--drivers/clk/mediatek/clk-mt7622.c4
-rw-r--r--drivers/clk/mediatek/clk-mt7629-eth.c159
-rw-r--r--drivers/clk/mediatek/clk-mt7629-hif.c156
-rw-r--r--drivers/clk/mediatek/clk-mt7629.c723
-rw-r--r--drivers/clk/meson/Makefile3
-rw-r--r--drivers/clk/meson/axg-audio.c83
-rw-r--r--drivers/clk/meson/clk-input.c44
-rw-r--r--drivers/clk/meson/clk-pll.c19
-rw-r--r--drivers/clk/meson/clk-regmap.c5
-rw-r--r--drivers/clk/meson/clk-regmap.h1
-rw-r--r--drivers/clk/meson/clkc.h11
-rw-r--r--drivers/clk/meson/gxbb.c779
-rw-r--r--drivers/clk/meson/gxbb.h26
-rw-r--r--drivers/clk/meson/meson8b.c1139
-rw-r--r--drivers/clk/meson/meson8b.h69
-rw-r--r--drivers/clk/meson/vid-pll-div.c91
-rw-r--r--drivers/clk/mmp/clk.c2
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c4
-rw-r--r--drivers/clk/pistachio/clk-pll.c8
-rw-r--r--drivers/clk/pxa/clk-pxa.c4
-rw-r--r--drivers/clk/qcom/Kconfig61
-rw-r--r--drivers/clk/qcom/Makefile2
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c6
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c45
-rw-r--r--drivers/clk/qcom/common.c18
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c4
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c271
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clk/qcom/gcc-sdm845.c35
-rw-r--r--drivers/clk/qcom/gdsc.c6
-rw-r--r--drivers/clk/qcom/gpucc-sdm845.c252
-rw-r--r--drivers/clk/qcom/lpasscc-sdm845.c179
-rw-r--r--drivers/clk/renesas/clk-div6.c2
-rw-r--r--drivers/clk/renesas/clk-mstp.c4
-rw-r--r--drivers/clk/renesas/r7s9210-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a77970-cpg-mssr.c5
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c4
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c15
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c8
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c57
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c13
-rw-r--r--drivers/clk/rockchip/clk-rk3328.c4
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c2
-rw-r--r--drivers/clk/st/clk-flexgen.c2
-rw-r--r--drivers/clk/st/clkgen-fsyn.c4
-rw-r--r--drivers/clk/st/clkgen-pll.c2
-rw-r--r--drivers/clk/sunxi-ng/Kconfig6
-rw-r--r--drivers/clk/sunxi-ng/Makefile1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c48
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c43
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c71
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c541
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h34
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c64
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c18
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c3
-rw-r--r--drivers/clk/tegra/clk-dfll.c12
-rw-r--r--drivers/clk/tegra/clk-pll.c7
-rw-r--r--drivers/clk/tegra/clk-tegra-audio.c7
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c3
-rw-r--r--drivers/clk/tegra/clk-tegra114.c9
-rw-r--r--drivers/clk/tegra/clk-tegra124.c9
-rw-r--r--drivers/clk/tegra/clk-tegra20.c46
-rw-r--r--drivers/clk/tegra/clk-tegra210.c9
-rw-r--r--drivers/clk/tegra/clk-tegra30.c15
-rw-r--r--drivers/clk/tegra/clk.h4
-rw-r--r--drivers/clk/ti/clkctrl.c2
-rw-r--r--drivers/clk/ti/dpll.c2
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c10
-rw-r--r--drivers/clk/versatile/clk-sp810.c2
-rw-r--r--drivers/clk/zynqmp/clkc.c5
-rw-r--r--drivers/clocksource/Kconfig26
-rw-r--r--drivers/clocksource/Makefile11
-rw-r--r--drivers/clocksource/arc_timer.c22
-rw-r--r--drivers/clocksource/bcm2835_timer.c15
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c22
-rw-r--r--drivers/clocksource/meson6_timer.c178
-rw-r--r--drivers/clocksource/nomadik-mtu.c4
-rw-r--r--drivers/clocksource/timer-fttmr010.c73
-rw-r--r--drivers/clocksource/timer-imx-gpt.c21
-rw-r--r--drivers/clocksource/timer-imx-tpm.c139
-rw-r--r--drivers/clocksource/timer-integrator-ap.c25
-rw-r--r--drivers/clocksource/timer-meson6.c220
-rw-r--r--drivers/clocksource/timer-rda.c195
-rw-r--r--drivers/clocksource/timer-riscv.c (renamed from drivers/clocksource/riscv_timer.c)9
-rw-r--r--drivers/clocksource/timer-rockchip.c (renamed from drivers/clocksource/rockchip_timer.c)0
-rw-r--r--drivers/clocksource/timer-sun4i.c (renamed from drivers/clocksource/sun4i_timer.c)0
-rw-r--r--drivers/clocksource/timer-tegra20.c (renamed from drivers/clocksource/tegra20_timer.c)1
-rw-r--r--drivers/clocksource/timer-ti-dm.c1
-rw-r--r--drivers/clocksource/timer-vt8500.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm11
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c3
-rw-r--r--drivers/cpufreq/cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq_governor.c2
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c17
-rw-r--r--drivers/cpufreq/intel_pstate.c30
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c4
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c1
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c17
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c308
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c46
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c7
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c8
-rw-r--r--drivers/cpuidle/cpuidle.c32
-rw-r--r--drivers/cpuidle/cpuidle.h1
-rw-r--r--drivers/cpuidle/governor.c9
-rw-r--r--drivers/cpuidle/poll_state.c11
-rw-r--r--drivers/cpuidle/sysfs.c6
-rw-r--r--drivers/crypto/Kconfig4
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2
-rw-r--r--drivers/crypto/bcm/cipher.c9
-rw-r--r--drivers/crypto/caam/caamalg.c266
-rw-r--r--drivers/crypto/caam/caamalg_desc.c139
-rw-r--r--drivers/crypto/caam/caamalg_desc.h5
-rw-r--r--drivers/crypto/caam/caamalg_qi.c37
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c156
-rw-r--r--drivers/crypto/caam/caamhash.c20
-rw-r--r--drivers/crypto/caam/caampkc.c10
-rw-r--r--drivers/crypto/caam/caamrng.c10
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c28
-rw-r--r--drivers/crypto/caam/desc.h28
-rw-r--r--drivers/crypto/caam/desc_constr.h7
-rw-r--r--drivers/crypto/caam/regs.h74
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile5
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c364
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_algs.c456
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h6
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h12
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c48
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.h21
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h74
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c114
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c92
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h2
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c22
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c204
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.h9
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_req.h326
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c302
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c498
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c94
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-cmac.c4
-rw-r--r--drivers/crypto/ccree/cc_aead.c35
-rw-r--r--drivers/crypto/ccree/cc_cipher.c104
-rw-r--r--drivers/crypto/ccree/cc_crypto_ctx.h4
-rw-r--r--drivers/crypto/ccree/cc_driver.c50
-rw-r--r--drivers/crypto/ccree/cc_driver.h15
-rw-r--r--drivers/crypto/ccree/cc_hash.c189
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h30
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c418
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h2
-rw-r--r--drivers/crypto/chelsio/chcr_core.c195
-rw-r--r--drivers/crypto/chelsio/chcr_core.h44
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h10
-rw-r--r--drivers/crypto/chelsio/chcr_ipsec.c187
-rw-r--r--drivers/crypto/chelsio/chtls/chtls.h7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c81
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c20
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c105
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c8
-rw-r--r--drivers/crypto/ixp4xx_crypto.c5
-rw-r--r--drivers/crypto/mxc-scc.c12
-rw-r--r--drivers/crypto/mxs-dcp.c28
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c1
-rw-r--r--drivers/crypto/omap-aes.c3
-rw-r--r--drivers/crypto/omap-des.c1
-rw-r--r--drivers/crypto/picoxcell_crypto.c3
-rw-r--r--drivers/crypto/qce/ablkcipher.c1
-rw-r--r--drivers/crypto/qce/sha.c1
-rw-r--r--drivers/crypto/sahara.c1
-rw-r--r--drivers/crypto/talitos.c1
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/devfreq/devfreq.c153
-rw-r--r--drivers/dma-buf/dma-fence.c36
-rw-r--r--drivers/dma-buf/reservation.c189
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/dw/core.c6
-rw-r--r--drivers/dma/imx-sdma.c69
-rw-r--r--drivers/dma/ti/cppi41.c16
-rw-r--r--drivers/edac/Kconfig6
-rw-r--r--drivers/edac/e752x_edac.c4
-rw-r--r--drivers/edac/edac_mc.c9
-rw-r--r--drivers/edac/edac_mc_sysfs.c33
-rw-r--r--drivers/edac/fsl_ddr_edac.c8
-rw-r--r--drivers/edac/fsl_ddr_edac.h4
-rw-r--r--drivers/edac/i3000_edac.c4
-rw-r--r--drivers/edac/i5000_edac.c13
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c21
-rw-r--r--drivers/edac/qcom_edac.c8
-rw-r--r--drivers/edac/skx_edac.c151
-rw-r--r--drivers/edac/synopsys_edac.c1204
-rw-r--r--drivers/firmware/efi/arm-runtime.c2
-rw-r--r--drivers/firmware/efi/efi-pstore.c4
-rw-r--r--drivers/firmware/efi/efi.c88
-rw-r--r--drivers/firmware/efi/libstub/Makefile5
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/fdt.c30
-rw-r--r--drivers/firmware/efi/vars.c99
-rw-r--r--drivers/fsi/Kconfig1
-rw-r--r--drivers/fsi/fsi-scom.c1
-rw-r--r--drivers/gnss/sirf.c6
-rw-r--r--drivers/gpio/gpio-davinci.c2
-rw-r--r--drivers/gpio/gpio-max7301.c12
-rw-r--r--drivers/gpio/gpio-max77620.c96
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpio/gpio-omap.c64
-rw-r--r--drivers/gpio/gpiolib-acpi.c144
-rw-r--r--drivers/gpio/gpiolib-devres.c80
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpio/gpiolib.h6
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h156
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c313
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c117
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c472
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c373
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c747
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c145
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c64
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c38
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c34
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c32
-rw-r--r--drivers/gpu/drm/amd/display/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c942
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h120
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c115
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c840
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c104
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c884
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h)105
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c947
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c334
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c375
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c98
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_event_log.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h (renamed from drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h)34
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c208
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h11
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c47
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h14
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h27
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c326
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h47
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h175
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h35
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h119
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c134
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c36
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_common.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c1
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c3
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c38
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c14
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c28
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c36
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c64
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c15
-rw-r--r--drivers/gpu/drm/bochs/bochs.h4
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c30
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c20
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c65
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c12
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c247
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c114
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c2
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c64
-rw-r--r--drivers/gpu/drm/drm_atomic.c142
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c689
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c444
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c21
-rw-r--r--drivers/gpu/drm/drm_auth.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c3
-rw-r--r--drivers/gpu/drm/drm_client.c12
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c14
-rw-r--r--drivers/gpu/drm/drm_connector.c180
-rw-r--r--drivers/gpu/drm/drm_crtc.c33
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c115
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c334
-rw-r--r--drivers/gpu/drm/drm_debugfs.c89
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c92
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c9
-rw-r--r--drivers/gpu/drm/drm_drv.c23
-rw-r--r--drivers/gpu/drm/drm_dsc.c228
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c43
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c8
-rw-r--r--drivers/gpu/drm/drm_fourcc.c79
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c109
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c86
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c2
-rw-r--r--drivers/gpu/drm/drm_global.c137
-rw-r--r--drivers/gpu/drm/drm_info.c137
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioctl.c10
-rw-r--r--drivers/gpu/drm/drm_lease.c40
-rw-r--r--drivers/gpu/drm/drm_memory.c10
-rw-r--r--drivers/gpu/drm/drm_mode_config.c12
-rw-r--r--drivers/gpu/drm/drm_mode_object.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c15
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c6
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c16
-rw-r--r--drivers/gpu/drm/drm_pci.c5
-rw-r--r--drivers/gpu/drm/drm_plane.c39
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c331
-rw-r--r--drivers/gpu/drm/drm_prime.c118
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c8
-rw-r--r--drivers/gpu/drm/drm_syncobj.c80
-rw-r--r--drivers/gpu/drm/drm_sysfs.c10
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c5
-rw-r--r--drivers/gpu/drm/exynos/Kconfig5
-rw-r--r--drivers/gpu/drm/exynos/Makefile3
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c87
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c157
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c111
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h134
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c30
-rw-r--r--drivers/gpu/drm/exynos/regs-decon5433.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h9
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c33
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c25
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c57
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c309
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c123
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h298
-rw-r--r--drivers/gpu/drm/i915/i915_fixed.h143
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c178
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c244
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c353
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h28
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c83
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_icl.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c27
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h27
-rw-r--r--drivers/gpu/drm/i915/i915_params.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c186
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c49
-rw-r--r--drivers/gpu/drm/i915/i915_query.c3
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h829
-rw-r--r--drivers/gpu/drm/i915/i915_request.c121
-rw-r--r--drivers/gpu/drm/i915/i915_request.h13
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c399
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h36
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h5
-rw-r--r--drivers/gpu/drm/i915/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h19
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h13
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c1337
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c119
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c184
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c37
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c91
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c6
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c39
-rw-r--r--drivers/gpu/drm/i915/intel_color.c3
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c254
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c (renamed from drivers/gpu/drm/i915/intel_modes.c)129
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c162
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c666
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c77
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h52
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2181
-rw-r--r--drivers/gpu/drm/i915/intel_display.h58
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1108
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c120
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h274
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c128
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h35
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c306
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c77
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c12
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c113
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h41
-rw-r--r--drivers/gpu/drm/i915/intel_guc_reg.h12
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c216
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c214
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c237
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c67
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c404
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c347
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c67
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c158
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.h15
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c904
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c358
-rw-r--r--drivers/gpu/drm/i915/intel_quirks.c169
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c94
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h49
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c346
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c740
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c10
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.h7
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h7
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c1088
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c991
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h36
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c428
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.c44
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_reset.h15
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c199
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.h37
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c59
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c70
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c566
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c247
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c190
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c5
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c11
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c10
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c12
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c10
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c18
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c11
-rw-r--r--drivers/gpu/drm/meson/Kconfig1
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c7
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c292
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c91
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h67
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c13
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c588
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.h14
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c199
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h3
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c127
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h2
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c133
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c54
-rw-r--r--drivers/gpu/drm/meson/meson_viu.h1
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c90
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c64
-rw-r--r--drivers/gpu/drm/msm/Kconfig4
-rw-r--r--drivers/gpu/drm/msm/Makefile11
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h298
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c492
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c8
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c48
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h78
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c81
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h8
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c1165
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h430
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h19
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c77
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c159
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c169
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h29
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c402
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h68
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c50
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c36
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c374
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h45
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c132
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c240
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h217
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h101
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c28
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h359
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c43
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c90
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c30
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c28
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c10
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c40
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c23
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c220
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h28
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c14
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c10
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c219
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c45
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c118
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c137
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h90
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_tracepoints.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c123
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c5
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/atom.h7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base907c.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c61
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c44
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head507d.c19
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head907d.c18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c206
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.c50
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/lut.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wimm.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h18
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c133
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cla06f.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/clc36f.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h11
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c37
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c87
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c52
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c152
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c90
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c83
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c116
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c45
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c167
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c145
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c77
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c20
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c58
-rw-r--r--drivers/gpu/drm/panel/Kconfig25
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c3
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c330
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c264
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c7
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c184
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c675
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_dev.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c37
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h32
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c27
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c73
-rw-r--r--drivers/gpu/drm/radeon/r300.c4
-rw-r--r--drivers/gpu/drm/radeon/r420.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c68
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c21
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/Makefile2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c1076
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c1349
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c128
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_psr.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c7
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c10
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c168
-rw-r--r--drivers/gpu/drm/selftests/Makefile6
-rw-r--r--drivers/gpu/drm/selftests/drm_helper_selftests.h9
-rw-r--r--drivers/gpu/drm/selftests/drm_modeset_selftests.h34
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c811
-rw-r--r--drivers/gpu/drm/selftests/test-drm_format.c280
-rw-r--r--drivers/gpu/drm/selftests/test-drm_framebuffer.c346
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.c32
-rw-r--r--drivers/gpu/drm/selftests/test-drm_modeset_common.h43
-rw-r--r--drivers/gpu/drm/selftests/test-drm_plane_helper.c (renamed from drivers/gpu/drm/selftests/test-drm-helper.c)38
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c4
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1
-rw-r--r--drivers/gpu/drm/stm/drv.c13
-rw-r--r--drivers/gpu/drm/stm/ltdc.c45
-rw-r--r--drivers/gpu/drm/stm/ltdc.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c106
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c25
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c83
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c45
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c201
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c57
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h80
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c52
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h37
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c47
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h28
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c57
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h25
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c70
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h68
-rw-r--r--drivers/gpu/drm/tegra/dc.c38
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/falcon.c14
-rw-r--r--drivers/gpu/drm/tegra/hub.c48
-rw-r--r--drivers/gpu/drm/tegra/hub.h3
-rw-r--r--drivers/gpu/drm/tegra/sor.c254
-rw-r--r--drivers/gpu/drm/tegra/sor.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c46
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c11
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig11
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile1
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c72
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c6
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c6
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c270
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c5
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c4
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c6
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c14
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c7
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c5
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c73
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c14
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c14
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c7
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c46
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h37
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c212
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c29
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h79
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c152
-rw-r--r--drivers/gpu/drm/v3d/v3d_trace.h121
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h6
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c375
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c19
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c31
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h43
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c137
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c56
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c65
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c129
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c22
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h3
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c26
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c593
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c360
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c562
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c68
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h37
-rw-r--r--drivers/gpu/drm/xen/Kconfig1
-rw-r--r--drivers/gpu/drm/xen/Makefile1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c65
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.c414
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_shbuf.h64
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c5
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c1
-rw-r--r--drivers/gpu/host1x/Makefile3
-rw-r--r--drivers/gpu/host1x/dev.c13
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c7
-rw-r--r--drivers/gpu/host1x/hw/debug_hw_1x06.c7
-rw-r--r--drivers/gpu/host1x/hw/host1x07.c44
-rw-r--r--drivers/gpu/host1x/hw/host1x07.h26
-rw-r--r--drivers/gpu/host1x/hw/host1x07_hardware.h142
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h32
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_uclass.h181
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_vm.h46
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c52
-rw-r--r--drivers/gpu/ipu-v3/ipu-ic.c52
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c1019
-rw-r--r--drivers/gpu/vga/vgaarb.c21
-rw-r--r--drivers/hid/hid-hyperv.c2
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-ite.c1
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c13
-rw-r--r--drivers/hv/Kconfig2
-rw-r--r--drivers/hv/channel.c8
-rw-r--r--drivers/hv/channel_mgmt.c189
-rw-r--r--drivers/hv/connection.c24
-rw-r--r--drivers/hv/hv.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h75
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwmon/k10temp.c10
-rw-r--r--drivers/i2c/busses/i2c-axxia.c40
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c9
-rw-r--r--drivers/i2c/busses/i2c-rcar.c9
-rw-r--r--drivers/i2c/busses/i2c-scmi.c10
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c49
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c8
-rw-r--r--drivers/i2c/i2c-core-acpi.c64
-rw-r--r--drivers/i3c/Kconfig24
-rw-r--r--drivers/i3c/Makefile4
-rw-r--r--drivers/i3c/device.c233
-rw-r--r--drivers/i3c/internals.h26
-rw-r--r--drivers/i3c/master.c2659
-rw-r--r--drivers/i3c/master/Kconfig22
-rw-r--r--drivers/i3c/master/Makefile2
-rw-r--r--drivers/i3c/master/dw-i3c-master.c1216
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c1666
-rw-r--r--drivers/ide/ide-atapi.c27
-rw-r--r--drivers/ide/ide-cd.c179
-rw-r--r--drivers/ide/ide-devsets.c4
-rw-r--r--drivers/ide/ide-disk.c15
-rw-r--r--drivers/ide/ide-eh.c2
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/ide-io.c112
-rw-r--r--drivers/ide/ide-park.c8
-rw-r--r--drivers/ide/ide-pm.c46
-rw-r--r--drivers/ide/ide-probe.c69
-rw-r--r--drivers/ide/ide-proc.c15
-rw-r--r--drivers/ide/ide-tape.c2
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ide/pmac.c3
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c5
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c5
-rw-r--r--drivers/iio/humidity/hid-sensor-humidity.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c16
-rw-r--r--drivers/iio/light/hid-sensor-als.c8
-rw-r--r--drivers/iio/light/hid-sensor-prox.c8
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c8
-rw-r--r--drivers/iio/magnetometer/st_magn_buffer.c12
-rw-r--r--drivers/iio/orientation/hid-sensor-incl-3d.c8
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c8
-rw-r--r--drivers/iio/temperature/hid-sensor-temperature.c3
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c9
-rw-r--r--drivers/infiniband/core/umem_odp.c34
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c8
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c3
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c128
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c8
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c9
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile4
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c12
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c4
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c15
-rw-r--r--drivers/infiniband/hw/mlx5/main.c288
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h26
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c350
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c49
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h73
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/srq.c)302
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c8
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c6
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c3
-rw-r--r--drivers/input/keyboard/matrix_keypad.c23
-rw-r--r--drivers/input/keyboard/omap4-keypad.c34
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/mouse/elantech.c18
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/serio/gscps2.c4
-rw-r--r--drivers/input/serio/hp_sdc.c4
-rw-r--r--drivers/input/serio/hyperv-keyboard.c2
-rw-r--r--drivers/input/touchscreen/migor_ts.c15
-rw-r--r--drivers/input/touchscreen/st1232.c12
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-bcm2835.c11
-rw-r--r--drivers/irqchip/irq-bcm2836.c11
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c2
-rw-r--r--drivers/irqchip/irq-gic-common.c12
-rw-r--r--drivers/irqchip/irq-gic-common.h3
-rw-r--r--drivers/irqchip/irq-gic-v3.c27
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c65
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c261
-rw-r--r--drivers/irqchip/irq-madera.c256
-rw-r--r--drivers/irqchip/irq-mscc-ocelot.c6
-rw-r--r--drivers/irqchip/irq-rda-intc.c107
-rw-r--r--drivers/irqchip/irq-renesas-h8s.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c14
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c14
-rw-r--r--drivers/irqchip/irq-s3c24xx.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c122
-rw-r--r--drivers/irqchip/irq-sun4i.c106
-rw-r--r--drivers/irqchip/irq-tango.c10
-rw-r--r--drivers/isdn/hardware/Kconfig2
-rw-r--r--drivers/isdn/hardware/Makefile1
-rw-r--r--drivers/isdn/hardware/eicon/Kconfig51
-rw-r--r--drivers/isdn/hardware/eicon/Makefile24
-rw-r--r--drivers/isdn/hardware/eicon/adapter.h18
-rw-r--r--drivers/isdn/hardware/eicon/capi20.h699
-rw-r--r--drivers/isdn/hardware/eicon/capidtmf.c685
-rw-r--r--drivers/isdn/hardware/eicon/capidtmf.h79
-rw-r--r--drivers/isdn/hardware/eicon/capifunc.c1219
-rw-r--r--drivers/isdn/hardware/eicon/capifunc.h40
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c141
-rw-r--r--drivers/isdn/hardware/eicon/cardtype.h1098
-rw-r--r--drivers/isdn/hardware/eicon/cp_vers.h26
-rw-r--r--drivers/isdn/hardware/eicon/dadapter.c364
-rw-r--r--drivers/isdn/hardware/eicon/dadapter.h34
-rw-r--r--drivers/isdn/hardware/eicon/debug.c2128
-rw-r--r--drivers/isdn/hardware/eicon/debug_if.h88
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.c156
-rw-r--r--drivers/isdn/hardware/eicon/debuglib.h322
-rw-r--r--drivers/isdn/hardware/eicon/dfifo.h54
-rw-r--r--drivers/isdn/hardware/eicon/di.c835
-rw-r--r--drivers/isdn/hardware/eicon/di.h118
-rw-r--r--drivers/isdn/hardware/eicon/di_dbg.h37
-rw-r--r--drivers/isdn/hardware/eicon/di_defs.h181
-rw-r--r--drivers/isdn/hardware/eicon/did_vers.h26
-rw-r--r--drivers/isdn/hardware/eicon/diddfunc.c115
-rw-r--r--drivers/isdn/hardware/eicon/diva.c666
-rw-r--r--drivers/isdn/hardware/eicon/diva.h33
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c139
-rw-r--r--drivers/isdn/hardware/eicon/diva_dma.c94
-rw-r--r--drivers/isdn/hardware/eicon/diva_dma.h48
-rw-r--r--drivers/isdn/hardware/eicon/diva_pci.h20
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h1350
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c239
-rw-r--r--drivers/isdn/hardware/eicon/divasfunc.c237
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c562
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c848
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c412
-rw-r--r--drivers/isdn/hardware/eicon/divasync.h489
-rw-r--r--drivers/isdn/hardware/eicon/dqueue.c110
-rw-r--r--drivers/isdn/hardware/eicon/dqueue.h32
-rw-r--r--drivers/isdn/hardware/eicon/dsp_defs.h301
-rw-r--r--drivers/isdn/hardware/eicon/dsp_tst.h48
-rw-r--r--drivers/isdn/hardware/eicon/dspdids.h75
-rw-r--r--drivers/isdn/hardware/eicon/dsrv4bri.h40
-rw-r--r--drivers/isdn/hardware/eicon/dsrv_bri.h37
-rw-r--r--drivers/isdn/hardware/eicon/dsrv_pri.h38
-rw-r--r--drivers/isdn/hardware/eicon/entity.h29
-rw-r--r--drivers/isdn/hardware/eicon/helpers.h51
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c268
-rw-r--r--drivers/isdn/hardware/eicon/io.c852
-rw-r--r--drivers/isdn/hardware/eicon/io.h308
-rw-r--r--drivers/isdn/hardware/eicon/istream.c226
-rw-r--r--drivers/isdn/hardware/eicon/kst_ifc.h335
-rw-r--r--drivers/isdn/hardware/eicon/maintidi.c2194
-rw-r--r--drivers/isdn/hardware/eicon/maintidi.h171
-rw-r--r--drivers/isdn/hardware/eicon/man_defs.h133
-rw-r--r--drivers/isdn/hardware/eicon/mdm_msg.h346
-rw-r--r--drivers/isdn/hardware/eicon/message.c14954
-rw-r--r--drivers/isdn/hardware/eicon/mi_pc.h204
-rw-r--r--drivers/isdn/hardware/eicon/mntfunc.c370
-rw-r--r--drivers/isdn/hardware/eicon/os_4bri.c1132
-rw-r--r--drivers/isdn/hardware/eicon/os_4bri.h9
-rw-r--r--drivers/isdn/hardware/eicon/os_bri.c815
-rw-r--r--drivers/isdn/hardware/eicon/os_bri.h9
-rw-r--r--drivers/isdn/hardware/eicon/os_capi.h21
-rw-r--r--drivers/isdn/hardware/eicon/os_pri.c1053
-rw-r--r--drivers/isdn/hardware/eicon/os_pri.h9
-rw-r--r--drivers/isdn/hardware/eicon/pc.h738
-rw-r--r--drivers/isdn/hardware/eicon/pc_init.h267
-rw-r--r--drivers/isdn/hardware/eicon/pc_maint.h160
-rw-r--r--drivers/isdn/hardware/eicon/pkmaint.h43
-rw-r--r--drivers/isdn/hardware/eicon/platform.h369
-rw-r--r--drivers/isdn/hardware/eicon/pr_pc.h76
-rw-r--r--drivers/isdn/hardware/eicon/s_4bri.c510
-rw-r--r--drivers/isdn/hardware/eicon/s_bri.c191
-rw-r--r--drivers/isdn/hardware/eicon/s_pri.c205
-rw-r--r--drivers/isdn/hardware/eicon/sdp_hdr.h117
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.c886
-rw-r--r--drivers/isdn/hardware/eicon/um_idi.h44
-rw-r--r--drivers/isdn/hardware/eicon/um_xdi.h69
-rw-r--r--drivers/isdn/hardware/eicon/xdi_adapter.h71
-rw-r--r--drivers/isdn/hardware/eicon/xdi_msg.h128
-rw-r--r--drivers/isdn/hardware/eicon/xdi_vers.h26
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c3
-rw-r--r--drivers/isdn/hisax/hfc_pci.c3
-rw-r--r--drivers/leds/led-triggers.c9
-rw-r--r--drivers/leds/leds-88pm860x.c2
-rw-r--r--drivers/leds/leds-gpio.c1
-rw-r--r--drivers/leds/leds-powernv.c12
-rw-r--r--drivers/leds/leds-pwm.c22
-rw-r--r--drivers/leds/trigger/Kconfig7
-rw-r--r--drivers/leds/trigger/Makefile1
-rw-r--r--drivers/leds/trigger/ledtrig-audio.c44
-rw-r--r--drivers/lightnvm/core.c25
-rw-r--r--drivers/lightnvm/pblk-core.c77
-rw-r--r--drivers/lightnvm/pblk-init.c103
-rw-r--r--drivers/lightnvm/pblk-map.c63
-rw-r--r--drivers/lightnvm/pblk-rb.c5
-rw-r--r--drivers/lightnvm/pblk-read.c66
-rw-r--r--drivers/lightnvm/pblk-recovery.c46
-rw-r--r--drivers/lightnvm/pblk-rl.c5
-rw-r--r--drivers/lightnvm/pblk-sysfs.c7
-rw-r--r--drivers/lightnvm/pblk-write.c64
-rw-r--r--drivers/lightnvm/pblk.h43
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/macintosh/macio_asic.c35
-rw-r--r--drivers/macintosh/macio_sysfs.c18
-rw-r--r--drivers/macintosh/rack-meter.c13
-rw-r--r--drivers/macintosh/via-pmu.c4
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c14
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c9
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c15
-rw-r--r--drivers/md/bcache/bcache.h20
-rw-r--r--drivers/md/bcache/btree.c5
-rw-r--r--drivers/md/bcache/btree.h18
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/journal.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/super.c48
-rw-r--r--drivers/md/bcache/sysfs.c61
-rw-r--r--drivers/md/bcache/writeback.c30
-rw-r--r--drivers/md/bcache/writeback.h12
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-core.h5
-rw-r--r--drivers/md/dm-crypt.c2
-rw-r--r--drivers/md/dm-integrity.c2
-rw-r--r--drivers/md/dm-rq.c7
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm-thin.c72
-rw-r--r--drivers/md/dm-zoned-target.c122
-rw-r--r--drivers/md/dm.c81
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/media/Kconfig13
-rw-r--r--drivers/media/cec/cec-adap.c34
-rw-r--r--drivers/media/cec/cec-core.c6
-rw-r--r--drivers/media/cec/cec-pin.c5
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c69
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c17
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c11
-rw-r--r--drivers/media/dvb-frontends/af9033.c12
-rw-r--r--drivers/media/dvb-frontends/dib0090.c32
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c7
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c8
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c106
-rw-r--r--drivers/media/dvb-frontends/lgdt3306a.c6
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c2
-rw-r--r--drivers/media/dvb-frontends/tda18271c2dd.c1
-rw-r--r--drivers/media/firewire/firedtv-avc.c6
-rw-r--r--drivers/media/firewire/firedtv.h6
-rw-r--r--drivers/media/i2c/Kconfig15
-rw-r--r--drivers/media/i2c/Makefile1
-rw-r--r--drivers/media/i2c/ad9389b.c2
-rw-r--r--drivers/media/i2c/adv7180.c15
-rw-r--r--drivers/media/i2c/adv7511.c4
-rw-r--r--drivers/media/i2c/adv7604.c70
-rw-r--r--drivers/media/i2c/adv7842.c6
-rw-r--r--drivers/media/i2c/imx214.c1118
-rw-r--r--drivers/media/i2c/imx258.c28
-rw-r--r--drivers/media/i2c/imx274.c9
-rw-r--r--drivers/media/i2c/imx319.c8
-rw-r--r--drivers/media/i2c/imx355.c8
-rw-r--r--drivers/media/i2c/mt9m111.c266
-rw-r--r--drivers/media/i2c/ov13858.c6
-rw-r--r--drivers/media/i2c/ov2640.c21
-rw-r--r--drivers/media/i2c/ov2680.c12
-rw-r--r--drivers/media/i2c/ov5640.c771
-rw-r--r--drivers/media/i2c/ov5645.c2
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov772x.c7
-rw-r--r--drivers/media/i2c/ov7740.c4
-rw-r--r--drivers/media/i2c/tc358743.c4
-rw-r--r--drivers/media/i2c/tda1997x.c4
-rw-r--r--drivers/media/i2c/tda7432.c4
-rw-r--r--drivers/media/i2c/ths8200.c2
-rw-r--r--drivers/media/i2c/tvp5150.c2
-rw-r--r--drivers/media/i2c/video-i2c.c300
-rw-r--r--drivers/media/media-device.c4
-rw-r--r--drivers/media/media-request.c3
-rw-r--r--drivers/media/pci/b2c2/flexcop-dma.c70
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c12
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c48
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c13
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c55
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c1
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c40
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h48
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c17
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c1
-rw-r--r--drivers/media/pci/saa7134/saa7134-core.c8
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c115
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c21
-rw-r--r--drivers/media/pci/saa7134/saa7134.h10
-rw-r--r--drivers/media/platform/Kconfig32
-rw-r--r--drivers/media/platform/Makefile5
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c31
-rw-r--r--drivers/media/platform/aspeed-video.c1729
-rw-r--r--drivers/media/platform/coda/coda-bit.c132
-rw-r--r--drivers/media/platform/coda/coda-common.c246
-rw-r--r--drivers/media/platform/coda/coda.h34
-rw-r--r--drivers/media/platform/coda/coda_regs.h2
-rw-r--r--drivers/media/platform/coda/trace.h10
-rw-r--r--drivers/media/platform/davinci/vpbe.c30
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c10
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c12
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c57
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h3
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c23
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.h6
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-errno.h2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c130
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c12
-rw-r--r--drivers/media/platform/imx-pxp.c18
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c2
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c6
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c10
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h5
-rw-r--r--drivers/media/platform/mtk-vcodec/venc_drv_if.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c23
-rw-r--r--drivers/media/platform/qcom/camss/camss.c2
-rw-r--r--drivers/media/platform/qcom/camss/camss.h1
-rw-r--r--drivers/media/platform/qcom/venus/core.c32
-rw-r--r--drivers/media/platform/qcom/venus/core.h6
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c235
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h17
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c15
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h8
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c4
-rw-r--r--drivers/media/platform/qcom/venus/venc.c23
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c36
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c52
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c97
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c10
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c4
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c102
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c1
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c49
-rw-r--r--drivers/media/platform/seco-cec/Makefile1
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.c796
-rw-r--r--drivers/media/platform/seco-cec/seco-cec.h141
-rw-r--r--drivers/media/platform/sh_vou.c2
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-hw.c2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/Kconfig9
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/Makefile3
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c913
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h135
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_reg.h196
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c679
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h38
-rw-r--r--drivers/media/platform/ti-vpe/cal.c4
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.c84
-rw-r--r--drivers/media/platform/vicodec/codec-fwht.h15
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c122
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.h3
-rw-r--r--drivers/media/platform/vicodec/vicodec-core.c157
-rw-r--r--drivers/media/platform/vim2m.c6
-rw-r--r--drivers/media/platform/vimc/vimc-common.c2
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c2
-rw-r--r--drivers/media/platform/vivid/vivid-core.c48
-rw-r--r--drivers/media/platform/vivid/vivid-core.h5
-rw-r--r--drivers/media/platform/vivid/vivid-ctrls.c16
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c56
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c5
-rw-r--r--drivers/media/platform/vivid/vivid-sdr-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-cap.c6
-rw-r--r--drivers/media/platform/vivid/vivid-vbi-out.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c31
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.h2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.c20
-rw-r--r--drivers/media/platform/vivid/vivid-vid-out.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c2
-rw-r--r--drivers/media/platform/xilinx/Kconfig2
-rw-r--r--drivers/media/platform/xilinx/Makefile2
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c5
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.h5
-rw-r--r--drivers/media/platform/xilinx/xilinx-tpg.c7
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c7
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.h5
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c5
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.h5
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.c5
-rw-r--r--drivers/media/platform/xilinx/xilinx-vtc.h5
-rw-r--r--drivers/media/rc/Kconfig12
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/bpf-lirc.c24
-rw-r--r--drivers/media/rc/imon.c4
-rw-r--r--drivers/media/rc/imon_raw.c47
-rw-r--r--drivers/media/rc/keymaps/Makefile1
-rw-r--r--drivers/media/rc/keymaps/rc-xbox-dvd.c63
-rw-r--r--drivers/media/rc/mceusb.c9
-rw-r--r--drivers/media/rc/rc-main.c8
-rw-r--r--drivers/media/rc/xbox_remote.c306
-rw-r--r--drivers/media/spi/cxd2880-spi.c17
-rw-r--r--drivers/media/usb/au0828/au0828-video.c38
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c31
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c41
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c41
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig1
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c102
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c40
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/usb_urb.c5
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c2
-rw-r--r--drivers/media/usb/dvb-usb/friio-fe.c440
-rw-r--r--drivers/media/usb/dvb-usb/friio.c522
-rw-r--r--drivers/media/usb/dvb-usb/friio.h99
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c11
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c13
-rw-r--r--drivers/media/usb/siano/smsusb.c3
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c13
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c83
-rw-r--r--drivers/media/usb/uvc/uvc_isight.c6
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c110
-rw-r--r--drivers/media/usb/uvc/uvc_status.c12
-rw-r--r--drivers/media/usb/uvc/uvc_video.c274
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h69
-rw-r--r--drivers/media/v4l2-core/Kconfig1
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c20
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c129
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c66
-rw-r--r--drivers/memstick/core/ms_block.c109
-rw-r--r--drivers/memstick/core/ms_block.h1
-rw-r--r--drivers/memstick/core/mspro_block.c121
-rw-r--r--drivers/mfd/axp20x.c13
-rw-r--r--drivers/mfd/cros_ec_dev.c8
-rw-r--r--drivers/mfd/wm8994-core.c9
-rw-r--r--drivers/misc/cxl/pci.c4
-rw-r--r--drivers/misc/cxl/vphb.c12
-rw-r--r--drivers/misc/mic/scif/scif_rma.c2
-rw-r--r--drivers/misc/mic/vop/vop_main.c13
-rw-r--r--drivers/misc/ocxl/afu_irq.c1
-rw-r--r--drivers/misc/ocxl/config.c2
-rw-r--r--drivers/misc/ocxl/link.c25
-rw-r--r--drivers/mmc/core/block.c41
-rw-r--r--drivers/mmc/core/mmc.c24
-rw-r--r--drivers/mmc/core/queue.c110
-rw-r--r--drivers/mmc/core/queue.h4
-rw-r--r--drivers/mmc/host/omap.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-omap.c12
-rw-r--r--drivers/mmc/host/sdhci-tegra.c8
-rw-r--r--drivers/mmc/host/sdhci.c22
-rw-r--r--drivers/mtd/Kconfig52
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c6
-rw-r--r--drivers/mtd/devices/block2mtd.c2
-rw-r--r--drivers/mtd/devices/docg3.c16
-rw-r--r--drivers/mtd/devices/docg3.h11
-rw-r--r--drivers/mtd/maps/Kconfig37
-rw-r--r--drivers/mtd/maps/Makefile11
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c281
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c229
-rw-r--r--drivers/mtd/maps/physmap-core.c665
-rw-r--r--drivers/mtd/maps/physmap-gemini.c (renamed from drivers/mtd/maps/physmap_of_gemini.c)112
-rw-r--r--drivers/mtd/maps/physmap-gemini.h (renamed from drivers/mtd/maps/physmap_of_gemini.h)2
-rw-r--r--drivers/mtd/maps/physmap-versatile.c (renamed from drivers/mtd/maps/physmap_of_versatile.c)2
-rw-r--r--drivers/mtd/maps/physmap-versatile.h (renamed from drivers/mtd/maps/physmap_of_versatile.h)2
-rw-r--r--drivers/mtd/maps/physmap.c280
-rw-r--r--drivers/mtd/maps/physmap_of_core.c368
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdcore.c8
-rw-r--r--drivers/mtd/mtdpart.c16
-rw-r--r--drivers/mtd/mtdswap.c13
-rw-r--r--drivers/mtd/nand/bbt.c3
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c263
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c9
-rw-r--r--drivers/mtd/nand/raw/au1550nd.c2
-rw-r--r--drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c2
-rw-r--r--drivers/mtd/nand/raw/cafe_nand.c4
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/raw/denali.c59
-rw-r--r--drivers/mtd/nand/raw/denali.h2
-rw-r--r--drivers/mtd/nand/raw/diskonchip.c4
-rw-r--r--drivers/mtd/nand/raw/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c303
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c29
-rw-r--r--drivers/mtd/nand/raw/hisi504_nand.c4
-rw-r--r--drivers/mtd/nand/raw/internals.h33
-rw-r--r--drivers/mtd/nand/raw/jz4740_nand.c8
-rw-r--r--drivers/mtd/nand/raw/jz4780_bch.c2
-rw-r--r--drivers/mtd/nand/raw/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c51
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c4
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c4
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c16
-rw-r--r--drivers/mtd/nand/raw/nand_base.c769
-rw-r--r--drivers/mtd/nand/raw/nand_bbt.c285
-rw-r--r--drivers/mtd/nand/raw/nand_hynix.c8
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c2
-rw-r--r--drivers/mtd/nand/raw/nand_legacy.c35
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c7
-rw-r--r--drivers/mtd/nand/raw/nandsim.c19
-rw-r--r--drivers/mtd/nand/raw/ndfc.c2
-rw-r--r--drivers/mtd/nand/raw/omap2.c2
-rw-r--r--drivers/mtd/nand/raw/plat_nand.c2
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/raw/r852.c30
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c7
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c21
-rw-r--r--drivers/mtd/nand/raw/sm_common.c2
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c6
-rw-r--r--drivers/mtd/nand/raw/tango_nand.c4
-rw-r--r--drivers/mtd/nand/raw/tegra_nand.c32
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c98
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c2
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/core.c2
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c148
-rw-r--r--drivers/mtd/nand/spi/toshiba.c137
-rw-r--r--drivers/mtd/nand/spi/winbond.c8
-rw-r--r--drivers/mtd/nftlmount.c39
-rw-r--r--drivers/mtd/parsers/Kconfig50
-rw-r--r--drivers/mtd/parsers/Makefile1
-rw-r--r--drivers/mtd/parsers/redboot.c (renamed from drivers/mtd/redboot.c)32
-rw-r--r--drivers/mtd/spi-nor/Kconfig9
-rw-r--r--drivers/mtd/spi-nor/Makefile1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c1373
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/appletalk/cops.c10
-rw-r--r--drivers/net/bonding/bond_3ad.c7
-rw-r--r--drivers/net/bonding/bond_alb.c9
-rw-r--r--drivers/net/bonding/bond_debugfs.c14
-rw-r--r--drivers/net/bonding/bond_main.c46
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/flexcan.c365
-rw-r--r--drivers/net/can/rcar/Kconfig1
-rw-r--r--drivers/net/can/rcar/Makefile1
-rw-r--r--drivers/net/can/rcar/rcar_can.c6
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c6
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c65
-rw-r--r--drivers/net/can/usb/ucan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c36
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c497
-rw-r--r--drivers/net/dsa/microchip/Kconfig18
-rw-r--r--drivers/net/dsa/microchip/Makefile5
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1316
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h (renamed from drivers/net/dsa/microchip/ksz_9477_reg.h)17
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c177
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c1200
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h214
-rw-r--r--drivers/net/dsa/microchip/ksz_priv.h247
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c217
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.h69
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c42
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/3com/Kconfig5
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c5
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c41
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c22
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c876
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c55
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c166
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c109
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h135
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h58
-rw-r--r--drivers/net/ethernet/arc/emac_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c83
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c623
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h115
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c206
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h614
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c1
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c105
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c50
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c113
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c73
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c62
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h20
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c28
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c71
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c454
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h28
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c58
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c12
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c9
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c503
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h48
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c399
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c550
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h57
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c70
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c933
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h713
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1556
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h79
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1063
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h98
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c104
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c775
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h96
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c22
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c55
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c14
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c30
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c47
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/emac.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c27
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h2
-rw-r--r--drivers/net/ethernet/intel/Kconfig25
-rw-r--r--drivers/net/ethernet/intel/e100.c14
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c25
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c64
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c26
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c223
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c51
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c299
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c26
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c50
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c162
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h340
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h24
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c986
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h220
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c1410
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c1538
-rw-r--r--drivers/net/ethernet/marvell/skge.c14
-rw-r--r--drivers/net/ethernet/marvell/sky2.c20
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c634
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c489
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c775
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1013
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c1260
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c283
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c325
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c285
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h98
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h324
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c543
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c281
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c156
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c561
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h81
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c284
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c783
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c22
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c90
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c3
-rw-r--r--drivers/net/ethernet/neterion/Kconfig4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/cls.c283
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c379
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c363
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h208
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c850
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c164
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c288
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c113
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c21
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c42
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c90
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c3
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c334
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h28
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c152
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c79
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c65
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h50
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c47
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h13
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c17
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c9
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c395
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c23
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c60
-rw-r--r--drivers/net/ethernet/sfc/ef10.c7
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/Kconfig4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c385
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c390
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c60
-rw-r--r--drivers/net/ethernet/sun/sunhme.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig3
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c241
-rw-r--r--drivers/net/ethernet/ti/cpts.c32
-rw-r--r--drivers/net/ethernet/ti/cpts.h38
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c14
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c36
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c29
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/fjes/fjes_debugfs.c14
-rw-r--r--drivers/net/geneve.c111
-rw-r--r--drivers/net/hamradio/6pack.c10
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c19
-rw-r--r--drivers/net/ieee802154/at86rf230.c13
-rw-r--r--drivers/net/ieee802154/ca8210.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c35
-rw-r--r--drivers/net/macvlan.c11
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/bpf.c68
-rw-r--r--drivers/net/netdevsim/ipsec.c7
-rw-r--r--drivers/net/phy/amd.c1
-rw-r--r--drivers/net/phy/aquantia.c15
-rw-r--r--drivers/net/phy/at803x.c3
-rw-r--r--drivers/net/phy/bcm63xx.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm87xx.c10
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/davicom.c4
-rw-r--r--drivers/net/phy/dp83640.c1
-rw-r--r--drivers/net/phy/dp83822.c1
-rw-r--r--drivers/net/phy/dp83848.c1
-rw-r--r--drivers/net/phy/dp83867.c1
-rw-r--r--drivers/net/phy/dp83tc811.c1
-rw-r--r--drivers/net/phy/fixed_phy.c43
-rw-r--r--drivers/net/phy/icplus.c145
-rw-r--r--drivers/net/phy/intel-xway.c10
-rw-r--r--drivers/net/phy/lxt.c6
-rw-r--r--drivers/net/phy/marvell.c97
-rw-r--r--drivers/net/phy/marvell10g.c37
-rw-r--r--drivers/net/phy/mdio-gpio.c7
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/microchip_t1.c1
-rw-r--r--drivers/net/phy/mscc.c61
-rw-r--r--drivers/net/phy/national.c1
-rw-r--r--drivers/net/phy/phy-c45.c12
-rw-r--r--drivers/net/phy/phy-core.c213
-rw-r--r--drivers/net/phy/phy.c490
-rw-r--r--drivers/net/phy/phy_device.c223
-rw-r--r--drivers/net/phy/phy_led_triggers.c15
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/qsemi.c1
-rw-r--r--drivers/net/phy/realtek.c45
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/phy/smsc.c7
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/uPD60620.c6
-rw-r--r--drivers/net/phy/vitesse.c21
-rw-r--r--drivers/net/ppp/ppp_async.c22
-rw-r--r--drivers/net/ppp/ppp_generic.c54
-rw-r--r--drivers/net/ppp/ppp_synctty.c9
-rw-r--r--drivers/net/ppp/pptp.c5
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c106
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/aqc111.c1459
-rw-r--r--drivers/net/usb/aqc111.h232
-rw-r--r--drivers/net/usb/cdc_ether.c26
-rw-r--r--drivers/net/usb/hso.c18
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/lan78xx.c31
-rw-r--r--drivers/net/usb/qmi_wwan.c18
-rw-r--r--drivers/net/usb/r8152.c33
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/virtio_net.c47
-rw-r--r--drivers/net/vrf.c30
-rw-r--r--drivers/net/vxlan.c443
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wireless/Kconfig7
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig3
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c33
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c118
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c317
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c115
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c229
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h20
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c155
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h60
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c61
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c104
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c257
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c225
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c5
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c92
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h21
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c56
-rw-r--r--drivers/net/wireless/broadcom/b43/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c47
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h9
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c13
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c138
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c116
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c215
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c144
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c120
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h19
-rw-r--r--drivers/net/wireless/cisco/airo.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c7
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h401
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h239
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c706
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c231
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c290
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c526
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c296
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c13
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/mic.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_ioctl.c1
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_dev.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c277
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h4
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c96
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c62
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c18
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h60
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c137
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h1313
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mac.c197
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c898
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/trace.h312
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c)22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c (renamed from drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c)153
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c324
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c334
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c134
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c114
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c142
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c95
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c78
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h29
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c21
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c282
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h20
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c211
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c1219
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h94
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h45
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c4
-rw-r--r--drivers/net/wireless/st/cw1200/debug.c32
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c13
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c2
-rw-r--r--drivers/net/wireless/virt_wifi.c632
-rw-r--r--drivers/net/wireless/zydas/zd1201.c1
-rw-r--r--drivers/net/xen-netback/xenbus.c18
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nvdimm/nd-core.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c64
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvdimm/region_devs.c41
-rw-r--r--drivers/nvme/host/Kconfig15
-rw-r--r--drivers/nvme/host/Makefile3
-rw-r--r--drivers/nvme/host/core.c209
-rw-r--r--drivers/nvme/host/fabrics.c61
-rw-r--r--drivers/nvme/host/fabrics.h17
-rw-r--r--drivers/nvme/host/fc.c45
-rw-r--r--drivers/nvme/host/lightnvm.c33
-rw-r--r--drivers/nvme/host/multipath.c20
-rw-r--r--drivers/nvme/host/nvme.h27
-rw-r--r--drivers/nvme/host/pci.c518
-rw-r--r--drivers/nvme/host/rdma.c121
-rw-r--r--drivers/nvme/host/tcp.c2278
-rw-r--r--drivers/nvme/host/trace.c3
-rw-r--r--drivers/nvme/host/trace.h27
-rw-r--r--drivers/nvme/target/Kconfig10
-rw-r--r--drivers/nvme/target/Makefile2
-rw-r--r--drivers/nvme/target/admin-cmd.c146
-rw-r--r--drivers/nvme/target/configfs.c43
-rw-r--r--drivers/nvme/target/core.c220
-rw-r--r--drivers/nvme/target/discovery.c139
-rw-r--r--drivers/nvme/target/fabrics-cmd.c64
-rw-r--r--drivers/nvme/target/fc.c66
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c89
-rw-r--r--drivers/nvme/target/io-cmd-file.c165
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvme/target/nvmet.h68
-rw-r--r--drivers/nvme/target/rdma.c15
-rw-r--r--drivers/nvme/target/tcp.c1737
-rw-r--r--drivers/of/of_net.c39
-rw-r--r--drivers/of/pdt.c50
-rw-r--r--drivers/opp/core.c347
-rw-r--r--drivers/opp/of.c345
-rw-r--r--drivers/opp/opp.h26
-rw-r--r--drivers/opp/ti-opp-supply.c1
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c10
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c1
-rw-r--r--drivers/pci/msi.c23
-rw-r--r--drivers/pci/pci.c24
-rw-r--r--drivers/pci/pcie/aer.c2
-rw-r--r--drivers/pci/pcie/aspm.c2
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_spe_pmu.c6
-rw-r--r--drivers/perf/thunderx2_pmu.c861
-rw-r--r--drivers/perf/xgene_pmu.c80
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c20
-rw-r--r--drivers/phy/socionext/Kconfig3
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c28
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c2
-rw-r--r--drivers/platform/mips/cpu_hwmon.c3
-rw-r--r--drivers/platform/x86/Kconfig21
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/dell-laptop.c29
-rw-r--r--drivers/platform/x86/huawei-wmi.c208
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c97
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/intel_atomisp2_pm.c69
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c20
-rw-r--r--drivers/platform/x86/intel_ips.c83
-rw-r--r--drivers/platform/x86/intel_pmc_core.c180
-rw-r--r--drivers/platform/x86/intel_pmc_core.h68
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c42
-rw-r--r--drivers/platform/x86/mlx-platform.c177
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c114
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c8
-rw-r--r--drivers/power/avs/smartreflex.c39
-rw-r--r--drivers/ptp/ptp_chardev.c55
-rw-r--r--drivers/ptp/ptp_clock.c10
-rw-r--r--drivers/pwm/Kconfig4
-rw-r--r--drivers/pwm/pwm-bcm2835.c5
-rw-r--r--drivers/pwm/pwm-clps711x.c13
-rw-r--r--drivers/pwm/pwm-imx.c194
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c3
-rw-r--r--drivers/regulator/88pm8607.c2
-rw-r--r--drivers/regulator/Kconfig10
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/act8945a-regulator.c205
-rw-r--r--drivers/regulator/arizona-ldo1.c3
-rw-r--r--drivers/regulator/as3711-regulator.c5
-rw-r--r--drivers/regulator/axp20x-regulator.c876
-rw-r--r--drivers/regulator/bd718x7-regulator.c33
-rw-r--r--drivers/regulator/bd9571mwv-regulator.c10
-rw-r--r--drivers/regulator/core.c1199
-rw-r--r--drivers/regulator/da9052-regulator.c2
-rw-r--r--drivers/regulator/da9210-regulator.c4
-rw-r--r--drivers/regulator/da9211-regulator.c6
-rw-r--r--drivers/regulator/dbx500-prcmu.c35
-rw-r--r--drivers/regulator/fixed.c6
-rw-r--r--drivers/regulator/internal.h2
-rw-r--r--drivers/regulator/lm363x-regulator.c8
-rw-r--r--drivers/regulator/lochnagar-regulator.c50
-rw-r--r--drivers/regulator/lp8788-ldo.c8
-rw-r--r--drivers/regulator/max77686-regulator.c29
-rw-r--r--drivers/regulator/max8952.c10
-rw-r--r--drivers/regulator/max8973-regulator.c8
-rw-r--r--drivers/regulator/max8997-regulator.c2
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/mcp16502.c552
-rw-r--r--drivers/regulator/of_regulator.c81
-rw-r--r--drivers/regulator/palmas-regulator.c5
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c54
-rw-r--r--drivers/regulator/s5m8767.c11
-rw-r--r--drivers/regulator/stpmic1_regulator.c4
-rw-r--r--drivers/regulator/tps65090-regulator.c6
-rw-r--r--drivers/regulator/tps65910-regulator.c4
-rw-r--r--drivers/regulator/wm8350-regulator.c4
-rw-r--r--drivers/regulator/wm8994-regulator.c33
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c13
-rw-r--r--drivers/rtc/rtc-hid-sensor-time.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c22
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c10
-rw-r--r--drivers/s390/crypto/ap_bus.c8
-rw-r--r--drivers/s390/crypto/ap_bus.h1
-rw-r--r--drivers/s390/crypto/ap_queue.c15
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c1
-rw-r--r--drivers/s390/net/qeth_core.h19
-rw-r--r--drivers/s390/net/qeth_core_main.c164
-rw-r--r--drivers/s390/net/qeth_core_mpc.c1
-rw-r--r--drivers/s390/net/qeth_core_mpc.h32
-rw-r--r--drivers/s390/net/qeth_l2_main.c47
-rw-r--r--drivers/s390/net/qeth_l3_main.c140
-rw-r--r--drivers/s390/virtio/virtio_ccw.c31
-rw-r--r--drivers/sbus/char/bbc_envctrl.c4
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c8
-rw-r--r--drivers/sbus/char/flash.c6
-rw-r--r--drivers/scsi/Kconfig12
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c8
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c8
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxlflash/main.c6
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c21
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c8
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c7
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c7
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/hosts.c29
-rw-r--r--drivers/scsi/libiscsi.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c1
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osst.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c41
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_debug.c3
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c806
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c8
-rw-r--r--drivers/scsi/scsi_transport_fc.c71
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c7
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/sd.c108
-rw-r--r--drivers/scsi/sd.h6
-rw-r--r--drivers/scsi/sd_zbc.c10
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c3
-rw-r--r--drivers/scsi/sr.c12
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c61
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c4
-rw-r--r--drivers/scsi/virtio_scsi.c3
-rw-r--r--drivers/scsi/vmw_pvscsi.c4
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c68
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c96
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h58
-rw-r--r--drivers/soc/fsl/qbman/qman.c35
-rw-r--r--drivers/soc/tegra/pmc.c2
-rw-r--r--drivers/spi/Kconfig24
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/atmel-quadspi.c (renamed from drivers/mtd/spi-nor/atmel-quadspi.c)528
-rw-r--r--drivers/spi/spi-at91-usart.c62
-rw-r--r--drivers/spi/spi-bcm2835.c496
-rw-r--r--drivers/spi/spi-bcm2835aux.c2
-rw-r--r--drivers/spi/spi-dw-mmio.c8
-rw-r--r--drivers/spi/spi-dw.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c4
-rw-r--r--drivers/spi/spi-fsl-lpspi.c202
-rw-r--r--drivers/spi/spi-geni-qcom.c64
-rw-r--r--drivers/spi/spi-gpio.c24
-rw-r--r--drivers/spi/spi-imx.c156
-rw-r--r--drivers/spi/spi-mem.c278
-rw-r--r--drivers/spi/spi-mt65xx.c16
-rw-r--r--drivers/spi/spi-mxic.c619
-rw-r--r--drivers/spi/spi-npcm-pspi.c495
-rw-r--r--drivers/spi/spi-omap2-mcspi.c37
-rw-r--r--drivers/spi/spi-pl022.c14
-rw-r--r--drivers/spi/spi-pxa2xx.c109
-rw-r--r--drivers/spi/spi-pxa2xx.h3
-rw-r--r--drivers/spi/spi-qcom-qspi.c8
-rw-r--r--drivers/spi/spi-rockchip.c579
-rw-r--r--drivers/spi/spi-rspi.c6
-rw-r--r--drivers/spi/spi-sh-msiof.c16
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c6
-rw-r--r--drivers/spi/spi.c84
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/comedi/comedi.h39
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c70
-rw-r--r--drivers/staging/media/Kconfig4
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c5
-rw-r--r--drivers/staging/media/imx/imx-media-of.c2
-rw-r--r--drivers/staging/media/ipu3/Kconfig14
-rw-r--r--drivers/staging/media/ipu3/Makefile11
-rw-r--r--drivers/staging/media/ipu3/TODO34
-rw-r--r--drivers/staging/media/ipu3/include/intel-ipu3.h2785
-rw-r--r--drivers/staging/media/ipu3/ipu3-abi.h2011
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c265
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h188
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.c2943
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-params.h28
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.c100
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-pool.h55
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c2391
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h213
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.c270
-rw-r--r--drivers/staging/media/ipu3/ipu3-dmamap.h22
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.c561
-rw-r--r--drivers/staging/media/ipu3/ipu3-mmu.h35
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.c9609
-rw-r--r--drivers/staging/media/ipu3/ipu3-tables.h66
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c1419
-rw-r--r--drivers/staging/media/ipu3/ipu3.c830
-rw-r--r--drivers/staging/media/ipu3/ipu3.h168
-rw-r--r--drivers/staging/media/rockchip/vpu/Kconfig13
-rw-r--r--drivers/staging/media/rockchip/vpu/Makefile10
-rw-r--r--drivers/staging/media/rockchip/vpu/TODO13
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c118
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c125
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h442
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c118
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c159
-rw-r--r--drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h600
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu.h232
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h29
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c537
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c670
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h58
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c290
-rw-r--r--drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h14
-rw-r--r--drivers/staging/media/sunxi/cedrus/Kconfig1
-rw-r--r--drivers/staging/media/sunxi/cedrus/TODO5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c45
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c11
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c41
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c5
-rw-r--r--drivers/staging/media/tegra-vde/tegra-vde.c222
-rw-r--r--drivers/staging/media/tegra-vde/trace.h93
-rw-r--r--drivers/staging/most/core.c2
-rw-r--r--drivers/staging/mt29f_spinand/Kconfig16
-rw-r--r--drivers/staging/mt29f_spinand/Makefile1
-rw-r--r--drivers/staging/mt29f_spinand/TODO13
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c980
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.h106
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c3
-rw-r--r--drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c2
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c1
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.h2
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c65
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c7
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c13
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c12
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/thermal/armada_thermal.c28
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c11
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c2
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/st/stm_thermal.c12
-rw-r--r--drivers/thunderbolt/switch.c40
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c16
-rw-r--r--drivers/tty/serial/8250/8250_port.c29
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/suncore.c1
-rw-r--r--drivers/tty/serial/sunsu.c31
-rw-r--r--drivers/tty/tty_audit.c13
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/tty_port.c3
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/core/quirks.c7
-rw-r--r--drivers/usb/core/usb.c6
-rw-r--r--drivers/usb/dwc3/gadget.c5
-rw-r--r--drivers/usb/gadget/function/u_ether.c13
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c88
-rw-r--r--drivers/usb/host/hwa-hc.c2
-rw-r--r--drivers/usb/host/xhci-hub.c3
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci.c42
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c1
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/option.c16
-rw-r--r--drivers/usb/storage/unusual_realtek.h10
-rw-r--r--drivers/usb/typec/tps6598x.c8
-rw-r--r--drivers/usb/wusbcore/crypto.c2
-rw-r--r--drivers/vfio/pci/Kconfig6
-rw-r--r--drivers/vfio/pci/Makefile1
-rw-r--r--drivers/vfio/pci/trace.h102
-rw-r--r--drivers/vfio/pci/vfio_pci.c42
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c482
-rw-r--r--drivers/vfio/pci/vfio_pci_private.h20
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c74
-rw-r--r--drivers/vhost/net.c64
-rw-r--r--drivers/vhost/vhost.c26
-rw-r--r--drivers/vhost/vsock.c79
-rw-r--r--drivers/video/backlight/pwm_bl.c41
-rw-r--r--drivers/video/hdmi.c511
-rw-r--r--drivers/virtio/virtio_ring.c1811
-rw-r--r--drivers/xen/Kconfig3
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c65
-rw-r--r--drivers/xen/pvcalls-front.c4
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c553
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c3
-rw-r--r--drivers/xen/xlate_mmu.c1
-rw-r--r--fs/afs/dir.c4
-rw-r--r--fs/afs/fs_probe.c39
-rw-r--r--fs/afs/inode.c18
-rw-r--r--fs/afs/internal.h9
-rw-r--r--fs/afs/misc.c52
-rw-r--r--fs/afs/rotate.c53
-rw-r--r--fs/afs/vl_probe.c45
-rw-r--r--fs/afs/vl_rotate.c50
-rw-r--r--fs/aio.c292
-rw-r--r--fs/block_dev.c50
-rw-r--r--fs/btrfs/backref.c13
-rw-r--r--fs/btrfs/btrfs_inode.h14
-rw-r--r--fs/btrfs/check-integrity.c24
-rw-r--r--fs/btrfs/compression.c26
-rw-r--r--fs/btrfs/ctree.c46
-rw-r--r--fs/btrfs/ctree.h263
-rw-r--r--fs/btrfs/delayed-ref.c61
-rw-r--r--fs/btrfs/delayed-ref.h3
-rw-r--r--fs/btrfs/dev-replace.c191
-rw-r--r--fs/btrfs/dev-replace.h8
-rw-r--r--fs/btrfs/disk-io.c128
-rw-r--r--fs/btrfs/disk-io.h10
-rw-r--r--fs/btrfs/extent-tree.c1254
-rw-r--r--fs/btrfs/extent_io.c414
-rw-r--r--fs/btrfs/extent_io.h66
-rw-r--r--fs/btrfs/extent_map.c3
-rw-r--r--fs/btrfs/extent_map.h21
-rw-r--r--fs/btrfs/file-item.c13
-rw-r--r--fs/btrfs/file.c53
-rw-r--r--fs/btrfs/free-space-tree.c15
-rw-r--r--fs/btrfs/inode.c665
-rw-r--r--fs/btrfs/ioctl.c643
-rw-r--r--fs/btrfs/lzo.c2
-rw-r--r--fs/btrfs/ordered-data.c30
-rw-r--r--fs/btrfs/ordered-data.h47
-rw-r--r--fs/btrfs/qgroup.c38
-rw-r--r--fs/btrfs/qgroup.h6
-rw-r--r--fs/btrfs/raid56.c2
-rw-r--r--fs/btrfs/reada.c16
-rw-r--r--fs/btrfs/ref-verify.c6
-rw-r--r--fs/btrfs/relocation.c51
-rw-r--r--fs/btrfs/scrub.c85
-rw-r--r--fs/btrfs/send.c17
-rw-r--r--fs/btrfs/super.c11
-rw-r--r--fs/btrfs/sysfs.c14
-rw-r--r--fs/btrfs/sysfs.h2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c4
-rw-r--r--fs/btrfs/tests/extent-io-tests.c29
-rw-r--r--fs/btrfs/tests/inode-tests.c6
-rw-r--r--fs/btrfs/transaction.c93
-rw-r--r--fs/btrfs/transaction.h16
-rw-r--r--fs/btrfs/tree-checker.c14
-rw-r--r--fs/btrfs/tree-log.c44
-rw-r--r--fs/btrfs/tree-log.h2
-rw-r--r--fs/btrfs/volumes.c779
-rw-r--r--fs/btrfs/volumes.h25
-rw-r--r--fs/btrfs/xattr.c8
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/cachefiles/namei.c8
-rw-r--r--fs/cachefiles/rdwr.c9
-rw-r--r--fs/cachefiles/xattr.c3
-rw-r--r--fs/ceph/super.c4
-rw-r--r--fs/ceph/super.h4
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c35
-rw-r--r--fs/cifs/smb2inode.c16
-rw-r--r--fs/cifs/smb2ops.c23
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/dax.c55
-rw-r--r--fs/direct-io.c8
-rw-r--r--fs/dlm/ast.c10
-rw-r--r--fs/dlm/lock.c17
-rw-r--r--fs/dlm/lockspace.c9
-rw-r--r--fs/dlm/member.c7
-rw-r--r--fs/dlm/memory.c9
-rw-r--r--fs/dlm/user.c5
-rw-r--r--fs/eventpoll.c52
-rw-r--r--fs/exec.c5
-rw-r--r--fs/exportfs/expfs.c3
-rw-r--r--fs/ext2/super.c13
-rw-r--r--fs/ext2/xattr.c5
-rw-r--r--fs/ext4/acl.c3
-rw-r--r--fs/ext4/ext4.h17
-rw-r--r--fs/ext4/ialloc.c2
-rw-r--r--fs/ext4/inline.c5
-rw-r--r--fs/ext4/inode.c63
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/migrate.c48
-rw-r--r--fs/ext4/namei.c4
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/resize.c79
-rw-r--r--fs/ext4/super.c92
-rw-r--r--fs/ext4/xattr.c83
-rw-r--r--fs/file.c2
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/fuse/dir.c26
-rw-r--r--fs/fuse/file.c64
-rw-r--r--fs/fuse/fuse_i.h4
-rw-r--r--fs/fuse/inode.c3
-rw-r--r--fs/gfs2/aops.c16
-rw-r--r--fs/gfs2/bmap.c10
-rw-r--r--fs/gfs2/file.c10
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/glops.c17
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/inode.c18
-rw-r--r--fs/gfs2/inode.h10
-rw-r--r--fs/gfs2/log.c5
-rw-r--r--fs/gfs2/log.h5
-rw-r--r--fs/gfs2/lops.c257
-rw-r--r--fs/gfs2/lops.h4
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/recovery.c178
-rw-r--r--fs/gfs2/recovery.h5
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/gfs2/rgrp.h2
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/trans.c8
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/inode.c4
-rw-r--r--fs/iomap.c42
-rw-r--r--fs/jbd2/commit.c3
-rw-r--r--fs/jbd2/transaction.c45
-rw-r--r--fs/jffs2/super.c3
-rw-r--r--fs/lockd/svclock.c2
-rw-r--r--fs/locks.c344
-rw-r--r--fs/namei.c3
-rw-r--r--fs/nfs/direct.c9
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c6
-rw-r--r--fs/nfs/nfs4proc.c6
-rw-r--r--fs/nfsd/nfs4state.c6
-rw-r--r--fs/notify/fanotify/fanotify.c32
-rw-r--r--fs/notify/fanotify/fanotify_user.c12
-rw-r--r--fs/notify/fdinfo.c1
-rw-r--r--fs/notify/fsnotify.c2
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/locks.c10
-rw-r--r--fs/ocfs2/move_extents.c47
-rw-r--r--fs/openpromfs/inode.c11
-rw-r--r--fs/overlayfs/dir.c14
-rw-r--r--fs/overlayfs/export.c6
-rw-r--r--fs/overlayfs/inode.c17
-rw-r--r--fs/proc/proc_sysctl.c13
-rw-r--r--fs/pstore/ftrace.c2
-rw-r--r--fs/pstore/inode.c51
-rw-r--r--fs/pstore/platform.c173
-rw-r--r--fs/pstore/ram.c78
-rw-r--r--fs/pstore/ram_core.c45
-rw-r--r--fs/quota/quota.c3
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/select.c360
-rw-r--r--fs/splice.c7
-rw-r--r--fs/sysfs/file.c4
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/ubifs/Kconfig16
-rw-r--r--fs/ubifs/auth.c5
-rw-r--r--fs/ubifs/lpt.c12
-rw-r--r--fs/ubifs/replay.c72
-rw-r--r--fs/ubifs/sb.c13
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/udf/super.c16
-rw-r--r--fs/udf/unicode.c14
-rw-r--r--fs/userfaultfd.c18
-rw-r--r--fs/xfs/libxfs/xfs_ag.c9
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c79
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h4
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h4
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_defer.c67
-rw-r--r--fs/xfs/libxfs/xfs_defer.h37
-rw-r--r--fs/xfs/libxfs/xfs_format.h12
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c54
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c7
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c240
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h54
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c6
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c14
-rw-r--r--fs/xfs/libxfs/xfs_types.c9
-rw-r--r--fs/xfs/libxfs/xfs_types.h22
-rw-r--r--fs/xfs/scrub/agheader.c25
-rw-r--r--fs/xfs/scrub/agheader_repair.c5
-rw-r--r--fs/xfs/scrub/alloc.c4
-rw-r--r--fs/xfs/scrub/btree.c45
-rw-r--r--fs/xfs/scrub/btree.h22
-rw-r--r--fs/xfs/scrub/common.c14
-rw-r--r--fs/xfs/scrub/common.h2
-rw-r--r--fs/xfs/scrub/ialloc.c64
-rw-r--r--fs/xfs/scrub/inode.c4
-rw-r--r--fs/xfs/scrub/refcount.c16
-rw-r--r--fs/xfs/scrub/repair.c54
-rw-r--r--fs/xfs/scrub/repair.h7
-rw-r--r--fs/xfs/scrub/rmap.c35
-rw-r--r--fs/xfs/scrub/scrub.h4
-rw-r--r--fs/xfs/scrub/trace.h131
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_bmap_util.c4
-rw-r--r--fs/xfs/xfs_extfree_item.c5
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_inode.c16
-rw-r--r--fs/xfs/xfs_ioctl32.c58
-rw-r--r--fs/xfs/xfs_itable.c14
-rw-r--r--fs/xfs/xfs_log_recover.c8
-rw-r--r--fs/xfs/xfs_mount.c4
-rw-r--r--fs/xfs/xfs_mount.h11
-rw-r--r--fs/xfs/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/xfs_reflink.c232
-rw-r--r--fs/xfs/xfs_rtalloc.c57
-rw-r--r--fs/xfs/xfs_super.c10
-rw-r--r--fs/xfs/xfs_symlink.c33
-rw-r--r--fs/xfs/xfs_trace.h51
-rw-r--r--fs/xfs/xfs_trans.h7
-rw-r--r--fs/xfs/xfs_trans_bmap.c11
-rw-r--r--fs/xfs/xfs_trans_extfree.c40
-rw-r--r--fs/xfs/xfs_trans_refcount.c11
-rw-r--r--fs/xfs/xfs_trans_rmap.c11
-rw-r--r--include/acpi/acoutput.h6
-rw-r--r--include/acpi/acpi_drivers.h7
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h1
-rw-r--r--include/acpi/actbl3.h23
-rw-r--r--include/acpi/actypes.h6
-rw-r--r--include/acpi/cppc_acpi.h3
-rw-r--r--include/acpi/platform/aclinux.h4
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/bug.h8
-rw-r--r--include/asm-generic/fixmap.h1
-rw-r--r--include/asm-generic/pgtable-nop4d-hack.h1
-rw-r--r--include/asm-generic/pgtable-nop4d.h1
-rw-r--r--include/asm-generic/pgtable-nopud.h1
-rw-r--r--include/asm-generic/pgtable.h56
-rw-r--r--include/crypto/acompress.h38
-rw-r--r--include/crypto/aead.h41
-rw-r--r--include/crypto/akcipher.h74
-rw-r--r--include/crypto/chacha.h54
-rw-r--r--include/crypto/chacha20.h27
-rw-r--r--include/crypto/hash.h32
-rw-r--r--include/crypto/hash_info.h1
-rw-r--r--include/crypto/internal/cryptouser.h9
-rw-r--r--include/crypto/internal/skcipher.h2
-rw-r--r--include/crypto/kpp.h48
-rw-r--r--include/crypto/nhpoly1305.h74
-rw-r--r--include/crypto/poly1305.h28
-rw-r--r--include/crypto/rng.h27
-rw-r--r--include/crypto/skcipher.h49
-rw-r--r--include/crypto/streebog.h34
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h14
-rw-r--r--include/drm/drmP.h7
-rw-r--r--include/drm/drm_atomic.h10
-rw-r--r--include/drm/drm_atomic_helper.h45
-rw-r--r--include/drm/drm_atomic_state_helper.h73
-rw-r--r--include/drm/drm_connector.h60
-rw-r--r--include/drm/drm_crtc.h9
-rw-r--r--include/drm/drm_crtc_helper.h6
-rw-r--r--include/drm/drm_damage_helper.h99
-rw-r--r--include/drm/drm_dp_helper.h98
-rw-r--r--include/drm/drm_dp_mst_helper.h6
-rw-r--r--include/drm/drm_drv.h14
-rw-r--r--include/drm/drm_dsc.h485
-rw-r--r--include/drm/drm_fb_cma_helper.h2
-rw-r--r--include/drm/drm_file.h14
-rw-r--r--include/drm/drm_fourcc.h89
-rw-r--r--include/drm/drm_framebuffer.h24
-rw-r--r--include/drm/drm_gem.h181
-rw-r--r--include/drm/drm_gem_cma_helper.h24
-rw-r--r--include/drm/drm_global.h53
-rw-r--r--include/drm/drm_hdcp.h212
-rw-r--r--include/drm/drm_mipi_dsi.h8
-rw-r--r--include/drm/drm_mode_config.h27
-rw-r--r--include/drm/drm_modeset_lock.h59
-rw-r--r--include/drm/drm_plane.h44
-rw-r--r--include/drm/drm_plane_helper.h35
-rw-r--r--include/drm/drm_prime.h4
-rw-r--r--include/drm/drm_property.h3
-rw-r--r--include/drm/drm_syncobj.h4
-rw-r--r--include/drm/drm_vblank.h8
-rw-r--r--include/drm/gpu_scheduler.h9
-rw-r--r--include/drm/i915_pciids.h21
-rw-r--r--include/drm/tinydrm/tinydrm.h35
-rw-r--r--include/drm/ttm/ttm_bo_driver.h23
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h4
-rw-r--r--include/drm/ttm/ttm_memory.h4
-rw-r--r--include/dt-bindings/clock/bcm2835-aux.h10
-rw-r--r--include/dt-bindings/clock/bcm2835.h10
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h18
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h4
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h116
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h395
-rw-r--r--include/dt-bindings/clock/imx8qxp-clock.h289
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h4
-rw-r--r--include/dt-bindings/clock/mt7629-clk.h203
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h94
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sdm845.h24
-rw-r--r--include/dt-bindings/clock/qcom,lpass-sdm845.h15
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h4
-rw-r--r--include/dt-bindings/clock/r8a7795-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r8a7796-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/r8a77995-cpg-mssr.h5
-rw-r--r--include/dt-bindings/clock/rk3328-cru.h3
-rw-r--r--include/dt-bindings/clock/sun8i-de2.h3
-rw-r--r--include/dt-bindings/clock/suniv-ccu-f1c100s.h70
-rw-r--r--include/dt-bindings/firmware/imx/rsrc.h559
-rw-r--r--include/dt-bindings/media/xilinx-vip.h5
-rw-r--r--include/dt-bindings/regulator/active-semi,8945a-regulator.h30
-rw-r--r--include/dt-bindings/reset/sun8i-de2.h1
-rw-r--r--include/dt-bindings/reset/suniv-ccu-f1c100s.h38
-rw-r--r--include/dt-bindings/sound/qcom,q6afe.h1
-rw-r--r--include/kvm/arm_arch_timer.h4
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/adxl.h5
-rw-r--r--include/linux/audit.h8
-rw-r--r--include/linux/avf/virtchnl.h10
-rw-r--r--include/linux/bio.h29
-rw-r--r--include/linux/blk-cgroup.h227
-rw-r--r--include/linux/blk-mq-pci.h4
-rw-r--r--include/linux/blk-mq-rdma.h2
-rw-r--r--include/linux/blk-mq-virtio.h4
-rw-r--r--include/linux/blk-mq.h83
-rw-r--r--include/linux/blk_types.h24
-rw-r--r--include/linux/blkdev.h250
-rw-r--r--include/linux/bpf.h42
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/bsg-lib.h6
-rw-r--r--include/linux/btf.h20
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clk-provider.h15
-rw-r--r--include/linux/clk/clk-conf.h5
-rw-r--r--include/linux/compat.h26
-rw-r--r--include/linux/compiler.h56
-rw-r--r--include/linux/compiler_attributes.h9
-rw-r--r--include/linux/compiler_types.h108
-rw-r--r--include/linux/cordic.h9
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h2
-rw-r--r--include/linux/crypto.h331
-rw-r--r--include/linux/dax.h14
-rw-r--r--include/linux/dell-led.h7
-rw-r--r--include/linux/devfreq.h13
-rw-r--r--include/linux/dma-fence.h1
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/edac.h6
-rw-r--r--include/linux/efi.h19
-rw-r--r--include/linux/elevator.h94
-rw-r--r--include/linux/energy_model.h187
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/fanotify.h5
-rw-r--r--include/linux/filter.h36
-rw-r--r--include/linux/firmware/imx/sci.h1
-rw-r--r--include/linux/firmware/imx/svc/pm.h85
-rw-r--r--include/linux/firmware/imx/types.h552
-rw-r--r--include/linux/fs.h15
-rw-r--r--include/linux/fscache-cache.h3
-rw-r--r--include/linux/fsnotify.h61
-rw-r--r--include/linux/fsnotify_backend.h11
-rw-r--r--include/linux/ftrace.h7
-rw-r--r--include/linux/futex.h8
-rw-r--r--include/linux/genhd.h57
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio/consumer.h23
-rw-r--r--include/linux/hdmi.h24
-rw-r--r--include/linux/hid-sensor-hub.h4
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i3c/ccc.h385
-rw-r--r--include/linux/i3c/device.h331
-rw-r--r--include/linux/i3c/master.h648
-rw-r--r--include/linux/ide.h14
-rw-r--r--include/linux/ieee80211.h32
-rw-r--r--include/linux/if_bridge.h12
-rw-r--r--include/linux/if_vlan.h53
-rw-r--r--include/linux/indirect_call_wrapper.h51
-rw-r--r--include/linux/init.h1
-rw-r--r--include/linux/interrupt.h19
-rw-r--r--include/linux/ioprio.h13
-rw-r--r--include/linux/irq.h6
-rw-r--r--include/linux/irq_sim.h2
-rw-r--r--include/linux/irqchip.h4
-rw-r--r--include/linux/irqchip/irq-madera.h132
-rw-r--r--include/linux/irqdomain.h6
-rw-r--r--include/linux/jbd2.h7
-rw-r--r--include/linux/kexec.h12
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kvm_host.h12
-rw-r--r--include/linux/leds.h21
-rw-r--r--include/linux/lightnvm.h3
-rw-r--r--include/linux/linkage.h6
-rw-r--r--include/linux/linkmode.h9
-rw-r--r--include/linux/lockdep.h2
-rw-r--r--include/linux/mempolicy.h2
-rw-r--r--include/linux/mfd/axp20x.h4
-rw-r--r--include/linux/mfd/wm8994/pdata.h3
-rw-r--r--include/linux/mii.h121
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mlx5/cq.h12
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h254
-rw-r--r--include/linux/mlx5/eq.h72
-rw-r--r--include/linux/mlx5/fs.h8
-rw-r--r--include/linux/mlx5/mlx5_ifc.h183
-rw-r--r--include/linux/mlx5/port.h3
-rw-r--r--include/linux/mlx5/qp.h5
-rw-r--r--include/linux/mlx5/srq.h72
-rw-r--r--include/linux/mlx5/transobj.h11
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--include/linux/mod_devicetable.h19
-rw-r--r--include/linux/module.h11
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/mtd/cfi.h1
-rw-r--r--include/linux/mtd/mtd.h3
-rw-r--r--include/linux/mtd/rawnand.h158
-rw-r--r--include/linux/mtd/sh_flctl.h16
-rw-r--r--include/linux/mtd/spi-nor.h11
-rw-r--r--include/linux/mtd/spinand.h2
-rw-r--r--include/linux/netdevice.h76
-rw-r--r--include/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h15
-rw-r--r--include/linux/netfilter/nfnetlink.h12
-rw-r--r--include/linux/netfilter_bridge.h33
-rw-r--r--include/linux/netlink.h57
-rw-r--r--include/linux/nvme-fc-driver.h17
-rw-r--r--include/linux/nvme-tcp.h189
-rw-r--r--include/linux/nvme.h73
-rw-r--r--include/linux/objagg.h46
-rw-r--r--include/linux/of.h1
-rw-r--r--include/linux/of_fdt.h1
-rw-r--r--include/linux/of_net.h6
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/pe.h2
-rw-r--r--include/linux/percpu-rwsem.h2
-rw-r--r--include/linux/perf/arm_pmu.h4
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/phy.h114
-rw-r--r--include/linux/phy_fixed.h5
-rw-r--r--include/linux/phy_led_triggers.h2
-rw-r--r--include/linux/platform_data/davinci_asp.h1
-rw-r--r--include/linux/platform_data/gpio-davinci.h2
-rw-r--r--include/linux/platform_data/mdio-gpio.h14
-rw-r--r--include/linux/pm.h5
-rw-r--r--include/linux/pm_domain.h14
-rw-r--r--include/linux/pm_opp.h23
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/power/smartreflex.h10
-rw-r--r--include/linux/preempt.h3
-rw-r--r--include/linux/printk.h5
-rw-r--r--include/linux/property.h12
-rw-r--r--include/linux/psi.h3
-rw-r--r--include/linux/pstore.h39
-rw-r--r--include/linux/pstore_ram.h50
-rw-r--r--include/linux/ptp_clock_kernel.h33
-rw-r--r--include/linux/ptrace.h18
-rw-r--r--include/linux/pwm.h42
-rw-r--r--include/linux/qed/qed_if.h41
-rw-r--r--include/linux/rcupdate_wait.h17
-rw-r--r--include/linux/regmap.h41
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/regulator/driver.h9
-rw-r--r--include/linux/regulator/machine.h3
-rw-r--r--include/linux/regulator/pfuze100.h3
-rw-r--r--include/linux/reservation.h12
-rw-r--r--include/linux/rhashtable.h34
-rw-r--r--include/linux/sbitmap.h89
-rw-r--r--include/linux/sched.h20
-rw-r--r--include/linux/sched/cpufreq.h6
-rw-r--r--include/linux/sched/isolation.h4
-rw-r--r--include/linux/sched/mm.h2
-rw-r--r--include/linux/sched/smt.h20
-rw-r--r--include/linux/sched/stat.h2
-rw-r--r--include/linux/sched/topology.h17
-rw-r--r--include/linux/sfp.h2
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/skbuff.h178
-rw-r--r--include/linux/skmsg.h9
-rw-r--r--include/linux/socket.h10
-rw-r--r--include/linux/spi/pxa2xx_spi.h1
-rw-r--r--include/linux/spi/spi-mem.h84
-rw-r--r--include/linux/spi/spi.h5
-rw-r--r--include/linux/srcu.h79
-rw-r--r--include/linux/srcutiny.h24
-rw-r--r--include/linux/srcutree.h8
-rw-r--r--include/linux/sunrpc/xdr.h1
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/syscalls.h29
-rw-r--r--include/linux/sysfs.h8
-rw-r--r--include/linux/t10-pi.h9
-rw-r--r--include/linux/thinkpad_acpi.h16
-rw-r--r--include/linux/time32.h25
-rw-r--r--include/linux/timekeeping.h14
-rw-r--r--include/linux/timekeeping32.h15
-rw-r--r--include/linux/trace_events.h8
-rw-r--r--include/linux/tracehook.h4
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/types.h4
-rw-r--r--include/linux/udp.h26
-rw-r--r--include/linux/uio.h5
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/linux/xarray.h54
-rw-r--r--include/media/cec.h1
-rw-r--r--include/media/davinci/vpbe.h4
-rw-r--r--include/media/media-request.h2
-rw-r--r--include/media/mpeg2-ctrls.h86
-rw-r--r--include/media/rc-map.h1
-rw-r--r--include/media/v4l2-common.h5
-rw-r--r--include/media/v4l2-ctrls.h6
-rw-r--r--include/media/v4l2-dev.h13
-rw-r--r--include/media/v4l2-ioctl.h33
-rw-r--r--include/media/v4l2-subdev.h6
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/act_api.h30
-rw-r--r--include/net/cfg80211.h282
-rw-r--r--include/net/devlink.h4
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_dissector.h6
-rw-r--r--include/net/gen_stats.h2
-rw-r--r--include/net/geneve.h6
-rw-r--r--include/net/gre.h13
-rw-r--r--include/net/icmp.h2
-rw-r--r--include/net/inet6_hashtables.h5
-rw-r--r--include/net/inet_common.h9
-rw-r--r--include/net/inet_hashtables.h25
-rw-r--r--include/net/inet_sock.h21
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h20
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/l3mdev.h22
-rw-r--r--include/net/mac80211.h25
-rw-r--r--include/net/neighbour.h56
-rw-r--r--include/net/netfilter/br_netfilter.h14
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h2
-rw-r--r--include/net/netfilter/nf_conntrack.h5
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h6
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h7
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h3
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h13
-rw-r--r--include/net/netfilter/nf_flow_table.h4
-rw-r--r--include/net/netfilter/nf_nat_l3proto.h7
-rw-r--r--include/net/netfilter/nf_nat_l4proto.h78
-rw-r--r--include/net/netns/conntrack.h6
-rw-r--r--include/net/netns/ipv4.h3
-rw-r--r--include/net/netns/xfrm.h2
-rw-r--r--include/net/pkt_cls.h108
-rw-r--r--include/net/protocol.h9
-rw-r--r--include/net/raw.h14
-rw-r--r--include/net/rtnetlink.h3
-rw-r--r--include/net/sch_generic.h27
-rw-r--r--include/net/sctp/constants.h2
-rw-r--r--include/net/sctp/sctp.h9
-rw-r--r--include/net/sctp/sm.h4
-rw-r--r--include/net/sctp/structs.h12
-rw-r--r--include/net/sctp/ulpevent.h39
-rw-r--r--include/net/seg6.h1
-rw-r--r--include/net/sock.h45
-rw-r--r--include/net/switchdev.h106
-rw-r--r--include/net/tcp.h51
-rw-r--r--include/net/tls.h15
-rw-r--r--include/net/udp.h58
-rw-r--r--include/net/udp_tunnel.h10
-rw-r--r--include/net/vxlan.h17
-rw-r--r--include/net/xfrm.h47
-rw-r--r--include/scsi/scsi_cmnd.h6
-rw-r--r--include/scsi/scsi_dh.h2
-rw-r--r--include/scsi/scsi_driver.h3
-rw-r--r--include/scsi/scsi_host.h18
-rw-r--r--include/scsi/scsi_tcq.h14
-rw-r--r--include/soc/fsl/dpaa2-io.h4
-rw-r--r--include/soc/fsl/qman.h8
-rw-r--r--include/soc/tegra/pmc.h2
-rw-r--r--include/sound/compress_driver.h19
-rw-r--r--include/sound/hda_codec.h1
-rw-r--r--include/sound/hda_component.h11
-rw-r--r--include/sound/hdaudio.h14
-rw-r--r--include/sound/pcm_params.h4
-rw-r--r--include/sound/simple_card_utils.h6
-rw-r--r--include/sound/soc-acpi-intel-match.h1
-rw-r--r--include/sound/soc-acpi.h15
-rw-r--r--include/sound/soc.h16
-rw-r--r--include/trace/events/bcache.h27
-rw-r--r--include/trace/events/btrfs.h4
-rw-r--r--include/trace/events/ext4.h20
-rw-r--r--include/trace/events/filelock.h16
-rw-r--r--include/trace/events/net.h59
-rw-r--r--include/trace/events/objagg.h228
-rw-r--r--include/trace/events/sched.h12
-rw-r--r--include/uapi/asm-generic/Kbuild.asm1
-rw-r--r--include/uapi/asm-generic/unistd.h8
-rw-r--r--include/uapi/drm/amdgpu_drm.h6
-rw-r--r--include/uapi/drm/drm_fourcc.h15
-rw-r--r--include/uapi/drm/drm_mode.h19
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/msm_drm.h25
-rw-r--r--include/uapi/drm/v3d_drm.h39
-rw-r--r--include/uapi/drm/virtgpu_drm.h13
-rw-r--r--include/uapi/linux/aio_abi.h2
-rw-r--r--include/uapi/linux/blkzoned.h4
-rw-r--r--include/uapi/linux/bpf.h236
-rw-r--r--include/uapi/linux/btf.h38
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/btrfs_tree.h1
-rw-r--r--include/uapi/linux/cryptouser.h102
-rw-r--r--include/uapi/linux/devlink.h5
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/fanotify.h2
-rw-r--r--include/uapi/linux/hash_info.h2
-rw-r--r--include/uapi/linux/if_bridge.h21
-rw-r--r--include/uapi/linux/if_link.h19
-rw-r--r--include/uapi/linux/if_tun.h1
-rw-r--r--include/uapi/linux/if_tunnel.h20
-rw-r--r--include/uapi/linux/in.h10
-rw-r--r--include/uapi/linux/input-event-codes.h9
-rw-r--r--include/uapi/linux/kfd_ioctl.h26
-rw-r--r--include/uapi/linux/kvm.h19
-rw-r--r--include/uapi/linux/ncsi.h15
-rw-r--r--include/uapi/linux/neighbour.h1
-rw-r--r--include/uapi/linux/net_namespace.h2
-rw-r--r--include/uapi/linux/net_tstamp.h4
-rw-r--r--include/uapi/linux/netfilter.h4
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h19
-rw-r--r--include/uapi/linux/netfilter_decnet.h10
-rw-r--r--include/uapi/linux/netfilter_ipv4.h28
-rw-r--r--include/uapi/linux/netfilter_ipv6.h29
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/nl80211.h458
-rw-r--r--include/uapi/linux/pkt_cls.h7
-rw-r--r--include/uapi/linux/pkt_sched.h30
-rw-r--r--include/uapi/linux/prctl.h9
-rw-r--r--include/uapi/linux/ptp_clock.h12
-rw-r--r--include/uapi/linux/sctp.h13
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/v4l2-common.h28
-rw-r--r--include/uapi/linux/v4l2-controls.h68
-rw-r--r--include/uapi/linux/vfio.h42
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/virtio_config.h3
-rw-r--r--include/uapi/linux/virtio_gpu.h18
-rw-r--r--include/uapi/linux/virtio_ring.h52
-rw-r--r--include/uapi/sound/firewire.h20
-rw-r--r--include/video/imx-ipu-v3.h9
-rw-r--r--include/xen/balloon.h5
-rw-r--r--include/xen/interface/hvm/start_info.h63
-rw-r--r--include/xen/xen-front-pgdir-shbuf.h89
-rw-r--r--include/xen/xen.h3
-rw-r--r--init/Kconfig18
-rw-r--r--init/do_mounts_initrd.c3
-rw-r--r--init/initramfs.c28
-rw-r--r--init/main.c22
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/audit.c62
-rw-r--r--kernel/audit.h10
-rw-r--r--kernel/audit_fsnotify.c6
-rw-r--r--kernel/audit_tree.c498
-rw-r--r--kernel/audit_watch.c6
-rw-r--r--kernel/auditsc.c150
-rw-r--r--kernel/bpf/arraymap.c1
-rw-r--r--kernel/bpf/btf.c860
-rw-r--r--kernel/bpf/core.c253
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/hashtab.c13
-rw-r--r--kernel/bpf/local_storage.c90
-rw-r--r--kernel/bpf/lpm_trie.c60
-rw-r--r--kernel/bpf/offload.c76
-rw-r--r--kernel/bpf/queue_stack_maps.c16
-rw-r--r--kernel/bpf/syscall.c160
-rw-r--r--kernel/bpf/verifier.c704
-rw-r--r--kernel/cgroup/cgroup.c50
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/dma/direct.c7
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/events/hw_breakpoint.c2
-rw-r--r--kernel/events/uprobes.c14
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/futex.c276
-rw-r--r--kernel/futex_compat.c202
-rw-r--r--kernel/irq/affinity.c176
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/devres.c4
-rw-r--r--kernel/irq/ipi.c4
-rw-r--r--kernel/irq/irq_sim.c23
-rw-r--r--kernel/irq/irqdesc.c28
-rw-r--r--kernel/irq/irqdomain.c4
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/matrix.c34
-rw-r--r--kernel/irq/msi.c8
-rw-r--r--kernel/irq/spurious.c6
-rw-r--r--kernel/kcov.c4
-rw-r--r--kernel/kexec_file.c70
-rw-r--r--kernel/kprobes.c77
-rw-r--r--kernel/livepatch/patch.c4
-rw-r--r--kernel/livepatch/transition.c4
-rw-r--r--kernel/locking/lockdep.c78
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/module.c142
-rw-r--r--kernel/module_signing.c3
-rw-r--r--kernel/padata.c2
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/pid.c6
-rw-r--r--kernel/power/Kconfig15
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/energy_model.c201
-rw-r--r--kernel/power/main.c15
-rw-r--r--kernel/power/qos.c15
-rw-r--r--kernel/printk/printk.c131
-rw-r--r--kernel/ptrace.c10
-rw-r--r--kernel/rcu/rcu.h4
-rw-r--r--kernel/rcu/rcutorture.c389
-rw-r--r--kernel/rcu/srcutiny.c120
-rw-r--r--kernel/rcu/srcutree.c489
-rw-r--r--kernel/rcu/sync.c25
-rw-r--r--kernel/rcu/tree.c114
-rw-r--r--kernel/rcu/tree.h18
-rw-r--r--kernel/rcu/tree_exp.h10
-rw-r--r--kernel/rcu/tree_plugin.h81
-rw-r--r--kernel/rcu/update.c9
-rw-r--r--kernel/sched/core.c27
-rw-r--r--kernel/sched/cpufreq.c5
-rw-r--r--kernel/sched/cpufreq_schedutil.c95
-rw-r--r--kernel/sched/cputime.c2
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/sched/fair.c403
-rw-r--r--kernel/sched/isolation.c14
-rw-r--r--kernel/sched/membarrier.c6
-rw-r--r--kernel/sched/psi.c30
-rw-r--r--kernel/sched/rt.c28
-rw-r--r--kernel/sched/sched.h101
-rw-r--r--kernel/sched/stats.h8
-rw-r--r--kernel/sched/topology.c231
-rw-r--r--kernel/signal.c143
-rw-r--r--kernel/stackleak.c6
-rw-r--r--kernel/sys.c8
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/time/alarmtimer.c5
-rw-r--r--kernel/time/clockevents.c18
-rw-r--r--kernel/time/clocksource.c20
-rw-r--r--kernel/time/hrtimer.c19
-rw-r--r--kernel/time/itimer.c2
-rw-r--r--kernel/time/jiffies.c28
-rw-r--r--kernel/time/ntp.c11
-rw-r--r--kernel/time/posix-clock.c17
-rw-r--r--kernel/time/posix-stubs.c5
-rw-r--r--kernel/time/posix-timers.c30
-rw-r--r--kernel/time/sched_clock.c9
-rw-r--r--kernel/time/test_udelay.c10
-rw-r--r--kernel/time/tick-broadcast-hrtimer.c4
-rw-r--r--kernel/time/tick-broadcast.c6
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-oneshot.c6
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/time/time.c49
-rw-r--r--kernel/time/timeconst.bc2
-rw-r--r--kernel/time/timeconv.c1
-rw-r--r--kernel/time/timecounter.c17
-rw-r--r--kernel/time/timekeeping.c27
-rw-r--r--kernel/time/timekeeping_debug.c26
-rw-r--r--kernel/time/timer.c3
-rw-r--r--kernel/time/timer_list.c7
-rw-r--r--kernel/torture.c34
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/bpf_trace.c107
-rw-r--r--kernel/trace/ftrace.c32
-rw-r--r--kernel/trace/ring_buffer.c12
-rw-r--r--kernel/trace/trace.c10
-rw-r--r--kernel/trace/trace.h57
-rw-r--r--kernel/trace/trace_events_filter.c9
-rw-r--r--kernel/trace/trace_events_trigger.c6
-rw-r--r--kernel/trace/trace_functions_graph.c53
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--kernel/workqueue.c8
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug10
-rw-r--r--lib/Makefile4
-rw-r--r--lib/chacha.c (renamed from lib/chacha20.c)59
-rw-r--r--lib/cordic.c23
-rw-r--r--lib/debugobjects.c5
-rw-r--r--lib/gcd.c2
-rw-r--r--lib/iov_iter.c57
-rw-r--r--lib/objagg.c501
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/radix-tree.c4
-rw-r--r--lib/raid6/Makefile15
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/sbitmap.c170
-rw-r--r--lib/test_bpf.c14
-rw-r--r--lib/test_debug_virtual.c1
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_objagg.c836
-rw-r--r--lib/test_rhashtable.c32
-rw-r--r--lib/test_xarray.c155
-rw-r--r--lib/xarray.c8
-rw-r--r--mm/gup.c3
-rw-r--r--mm/huge_memory.c114
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/khugepaged.c144
-rw-r--r--mm/memblock.c2
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/mempolicy.c34
-rw-r--r--mm/mmap.c25
-rw-r--r--mm/mmu_gather.c2
-rw-r--r--mm/page_alloc.c23
-rw-r--r--mm/page_io.c9
-rw-r--r--mm/rmap.c13
-rw-r--r--mm/shmem.c51
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab_common.c6
-rw-r--r--mm/sparse.c16
-rw-r--r--mm/swap.c3
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/userfaultfd.c62
-rw-r--r--mm/vmscan.c22
-rw-r--r--net/6lowpan/debugfs.c13
-rw-r--r--net/8021q/vlan.c101
-rw-r--r--net/8021q/vlan.h12
-rw-r--r--net/8021q/vlan_core.c128
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/Kconfig4
-rw-r--r--net/batman-adv/Kconfig10
-rw-r--r--net/batman-adv/bat_iv_ogm.c25
-rw-r--r--net/batman-adv/bat_v.c26
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c82
-rw-r--r--net/batman-adv/debugfs.c2
-rw-r--r--net/batman-adv/distributed-arp-table.c42
-rw-r--r--net/batman-adv/gateway_client.c3
-rw-r--r--net/batman-adv/hard-interface.c3
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h6
-rw-r--r--net/batman-adv/log.c60
-rw-r--r--net/batman-adv/main.c3
-rw-r--r--net/batman-adv/main.h3
-rw-r--r--net/batman-adv/multicast.c51
-rw-r--r--net/batman-adv/netlink.c24
-rw-r--r--net/batman-adv/trace.c2
-rw-r--r--net/batman-adv/trace.h6
-rw-r--r--net/batman-adv/translation-table.c41
-rw-r--r--net/batman-adv/types.h5
-rw-r--r--net/bluetooth/6lowpan.c2
-rw-r--r--net/bluetooth/hci_event.c6
-rw-r--r--net/bluetooth/hci_request.c2
-rw-r--r--net/bluetooth/l2cap_core.c12
-rw-r--r--net/bluetooth/rfcomm/core.c12
-rw-r--r--net/bluetooth/rfcomm/sock.c12
-rw-r--r--net/bluetooth/sco.c12
-rw-r--r--net/bluetooth/smp.c8
-rw-r--r--net/bpf/test_run.c36
-rw-r--r--net/bridge/br.c89
-rw-r--r--net/bridge/br_device.c11
-rw-r--r--net/bridge/br_fdb.c46
-rw-r--r--net/bridge/br_if.c23
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_mdb.c126
-rw-r--r--net/bridge/br_multicast.c442
-rw-r--r--net/bridge/br_netfilter_hooks.c54
-rw-r--r--net/bridge/br_netfilter_ipv6.c4
-rw-r--r--net/bridge/br_netlink.c71
-rw-r--r--net/bridge/br_private.h81
-rw-r--r--net/bridge/br_switchdev.c5
-rw-r--r--net/bridge/br_sysfs_br.c36
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/bridge/br_vlan.c71
-rw-r--r--net/can/raw.c2
-rw-r--r--net/compat.c34
-rw-r--r--net/core/datagram.c204
-rw-r--r--net/core/dev.c217
-rw-r--r--net/core/dev_addr_lists.c100
-rw-r--r--net/core/dev_ioctl.c4
-rw-r--r--net/core/devlink.c5
-rw-r--r--net/core/filter.c485
-rw-r--r--net/core/flow_dissector.c9
-rw-r--r--net/core/gro_cells.c1
-rw-r--r--net/core/neighbour.c455
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/net_namespace.c162
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/rtnetlink.c520
-rw-r--r--net/core/skbuff.c313
-rw-r--r--net/core/skmsg.c28
-rw-r--r--net/core/sock.c14
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/core/stream.c2
-rw-r--r--net/core/sysctl_net_core.c20
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dccp/ipv6.c13
-rw-r--r--net/dccp/proto.c7
-rw-r--r--net/decnet/af_decnet.c4
-rw-r--r--net/dsa/Kconfig4
-rw-r--r--net/dsa/dsa.c8
-rw-r--r--net/dsa/dsa_priv.h2
-rw-r--r--net/dsa/master.c63
-rw-r--r--net/dsa/port.c3
-rw-r--r--net/dsa/slave.c86
-rw-r--r--net/dsa/tag_brcm.c2
-rw-r--r--net/dsa/tag_dsa.c1
-rw-r--r--net/dsa/tag_edsa.c1
-rw-r--r--net/dsa/tag_gswip.c1
-rw-r--r--net/dsa/tag_ksz.c117
-rw-r--r--net/dsa/tag_lan9303.c1
-rw-r--r--net/dsa/tag_mtk.c1
-rw-r--r--net/dsa/tag_qca.c1
-rw-r--r--net/dsa/tag_trailer.c1
-rw-r--r--net/ethernet/eth.c56
-rw-r--r--net/ieee802154/6lowpan/tx.c3
-rw-r--r--net/ieee802154/nl-phy.c2
-rw-r--r--net/ipv4/af_inet.c17
-rw-r--r--net/ipv4/devinet.c7
-rw-r--r--net/ipv4/esp4.c9
-rw-r--r--net/ipv4/esp4_offload.c15
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fou.c75
-rw-r--r--net/ipv4/gre_demux.c9
-rw-r--r--net/ipv4/icmp.c6
-rw-r--r--net/ipv4/inet_connection_sock.c14
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_hashtables.c117
-rw-r--r--net/ipv4/ip_forward.c8
-rw-r--r--net/ipv4/ip_fragment.c25
-rw-r--r--net/ipv4/ip_gre.c56
-rw-r--r--net/ipv4/ip_input.c77
-rw-r--r--net/ipv4/ip_output.c42
-rw-r--r--net/ipv4/ip_tunnel_core.c3
-rw-r--r--net/ipv4/ipconfig.c21
-rw-r--r--net/ipv4/ipip.c14
-rw-r--r--net/ipv4/ipmr.c19
-rw-r--r--net/ipv4/metrics.c26
-rw-r--r--net/ipv4/netfilter/Kconfig5
-rw-r--r--net/ipv4/netfilter/Makefile5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c184
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c7
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c43
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c38
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c150
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c83
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c4
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/protocol.c1
-rw-r--r--net/ipv4/raw.c33
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c11
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_bbr.c15
-rw-r--r--net/ipv4/tcp_bpf.c32
-rw-r--r--net/ipv4/tcp_input.c94
-rw-r--r--net/ipv4/tcp_ipv4.c132
-rw-r--r--net/ipv4/tcp_offload.c6
-rw-r--r--net/ipv4/tcp_output.c79
-rw-r--r--net/ipv4/tcp_timer.c20
-rw-r--r--net/ipv4/tunnel4.c18
-rw-r--r--net/ipv4/udp.c266
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udp_offload.c122
-rw-r--r--net/ipv4/udp_tunnel.c18
-rw-r--r--net/ipv4/udplite.c4
-rw-r--r--net/ipv4/xfrm4_protocol.c18
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/anycast.c6
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/esp6.c9
-rw-r--r--net/ipv6/esp6_offload.c15
-rw-r--r--net/ipv6/fou6.c74
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/inet6_hashtables.c58
-rw-r--r--net/ipv6/ip6_gre.c24
-rw-r--r--net/ipv6/ip6_input.c67
-rw-r--r--net/ipv6/ip6_offload.c48
-rw-r--r--net/ipv6/ip6_output.c92
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_udp_tunnel.c19
-rw-r--r--net/ipv6/ip6_vti.c1
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/netfilter.c3
-rw-r--r--net/ipv6/netfilter/Makefile2
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c43
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c49
-rw-r--r--net/ipv6/netfilter/nf_nat_proto_icmpv6.c90
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c10
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c4
-rw-r--r--net/ipv6/raw.c7
-rw-r--r--net/ipv6/reassembly.c9
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/seg6_iptunnel.c1
-rw-r--r--net/ipv6/tcp_ipv6.c16
-rw-r--r--net/ipv6/tcpv6_offload.c7
-rw-r--r--net/ipv6/tunnel6.c12
-rw-r--r--net/ipv6/udp.c320
-rw-r--r--net/ipv6/udp_impl.h4
-rw-r--r--net/ipv6/udp_offload.c13
-rw-r--r--net/ipv6/udplite.c5
-rw-r--r--net/ipv6/xfrm6_input.c8
-rw-r--r--net/ipv6/xfrm6_policy.c1
-rw-r--r--net/ipv6/xfrm6_protocol.c18
-rw-r--r--net/ipv6/xfrm6_tunnel.c3
-rw-r--r--net/iucv/af_iucv.c41
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l3mdev/l3mdev.c18
-rw-r--r--net/mac80211/Kconfig11
-rw-r--r--net/mac80211/cfg.c36
-rw-r--r--net/mac80211/debugfs_netdev.c3
-rw-r--r--net/mac80211/debugfs_sta.c14
-rw-r--r--net/mac80211/driver-ops.h34
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/iface.c15
-rw-r--r--net/mac80211/main.c6
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_plink.c35
-rw-r--r--net/mac80211/mlme.c65
-rw-r--r--net/mac80211/rx.c42
-rw-r--r--net/mac80211/scan.c22
-rw-r--r--net/mac80211/sta_info.c11
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c7
-rw-r--r--net/mac80211/trace.h18
-rw-r--r--net/mac80211/tx.c15
-rw-r--r--net/mac80211/util.c51
-rw-r--r--net/mac80211/wep.c4
-rw-r--r--net/ncsi/internal.h24
-rw-r--r--net/ncsi/ncsi-aen.c75
-rw-r--r--net/ncsi/ncsi-manage.c550
-rw-r--r--net/ncsi/ncsi-netlink.c233
-rw-r--r--net/ncsi/ncsi-pkt.h9
-rw-r--r--net/ncsi/ncsi-rsp.c43
-rw-r--r--net/netfilter/Kconfig15
-rw-r--r--net/netfilter/Makefile7
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c13
-rw-r--r--net/netfilter/ipset/ip_set_core.c170
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c27
-rw-r--r--net/netfilter/ipset/ip_set_hash_mac.c10
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/nf_conncount.c46
-rw-r--r--net/netfilter/nf_conntrack_acct.c89
-rw-r--r--net/netfilter/nf_conntrack_core.c28
-rw-r--r--net/netfilter/nf_conntrack_ecache.c66
-rw-r--r--net/netfilter/nf_conntrack_helper.c69
-rw-r--r--net/netfilter/nf_conntrack_netlink.c30
-rw-r--r--net/netfilter/nf_conntrack_proto.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c56
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c18
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c7
-rw-r--r--net/netfilter/nf_conntrack_standalone.c103
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c70
-rw-r--r--net/netfilter/nf_flow_table_core.c42
-rw-r--r--net/netfilter/nf_log_common.c20
-rw-r--r--net/netfilter/nf_nat_core.c330
-rw-r--r--net/netfilter/nf_nat_proto.c343
-rw-r--r--net/netfilter/nf_nat_proto_common.c120
-rw-r--r--net/netfilter/nf_nat_proto_dccp.c82
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c77
-rw-r--r--net/netfilter/nf_nat_proto_tcp.c85
-rw-r--r--net/netfilter/nf_nat_proto_udp.c130
-rw-r--r--net/netfilter/nf_nat_proto_unknown.c54
-rw-r--r--net/netfilter/nf_nat_sip.c39
-rw-r--r--net/netfilter/nf_queue.c50
-rw-r--r--net/netfilter/nf_tables_api.c157
-rw-r--r--net/netfilter/nf_tables_core.c2
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c15
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c28
-rw-r--r--net/netfilter/nft_compat.c3
-rw-r--r--net/netfilter/nft_flow_offload.c5
-rw-r--r--net/netfilter/nft_meta.c2
-rw-r--r--net/netfilter/nft_xfrm.c2
-rw-r--r--net/netfilter/xt_RATEEST.c10
-rw-r--r--net/netfilter/xt_hashlimit.c13
-rw-r--r--net/netfilter/xt_physdev.c2
-rw-r--r--net/netfilter/xt_policy.c2
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c13
-rw-r--r--net/openvswitch/conntrack.c2
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/flow_netlink.c22
-rw-r--r--net/openvswitch/vport-geneve.c2
-rw-r--r--net/openvswitch/vport-gre.c2
-rw-r--r--net/openvswitch/vport-netdev.c1
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/rds/message.c24
-rw-r--r--net/rds/rdma.c75
-rw-r--r--net/rds/rds.h23
-rw-r--r--net/rds/send.c61
-rw-r--r--net/rfkill/rfkill-gpio.c1
-rw-r--r--net/sched/act_api.c221
-rw-r--r--net/sched/act_police.c24
-rw-r--r--net/sched/act_tunnel_key.c25
-rw-r--r--net/sched/act_vlan.c2
-rw-r--r--net/sched/cls_api.c337
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_flower.c198
-rw-r--r--net/sched/cls_matchall.c5
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c101
-rw-r--r--net/sched/sch_etf.c79
-rw-r--r--net/sched/sch_fq.c28
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_gred.c375
-rw-r--r--net/sched/sch_mq.c18
-rw-r--r--net/sched/sch_netem.c92
-rw-r--r--net/sched/sch_prio.c47
-rw-r--r--net/sched/sch_red.c48
-rw-r--r--net/sctp/associola.c11
-rw-r--r--net/sctp/bind_addr.c28
-rw-r--r--net/sctp/chunk.c14
-rw-r--r--net/sctp/input.c134
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/output.c1
-rw-r--r--net/sctp/primitive.c2
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sctp/sm_sideeffect.c12
-rw-r--r--net/sctp/sm_statetable.c2
-rw-r--r--net/sctp/socket.c177
-rw-r--r--net/sctp/stream_interleave.c46
-rw-r--r--net/sctp/ulpqueue.c8
-rw-r--r--net/smc/af_smc.c65
-rw-r--r--net/smc/smc.h4
-rw-r--r--net/smc/smc_clc.c33
-rw-r--r--net/smc/smc_clc.h3
-rw-r--r--net/smc/smc_core.c16
-rw-r--r--net/smc/smc_core.h6
-rw-r--r--net/smc/smc_llc.c57
-rw-r--r--net/smc/smc_llc.h2
-rw-r--r--net/socket.c62
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/clnt.c9
-rw-r--r--net/sunrpc/socklib.c2
-rw-r--r--net/sunrpc/xprt.c48
-rw-r--r--net/sunrpc/xprtsock.c91
-rw-r--r--net/switchdev/switchdev.c213
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/link.c220
-rw-r--r--net/tipc/link.h2
-rw-r--r--net/tipc/msg.h1
-rw-r--r--net/tipc/netlink_compat.c7
-rw-r--r--net/tipc/node.c103
-rw-r--r--net/tipc/node.h1
-rw-r--r--net/tipc/socket.c267
-rw-r--r--net/tipc/socket.h4
-rw-r--r--net/tipc/sysctl.c8
-rw-r--r--net/tipc/trace.c206
-rw-r--r--net/tipc/trace.h431
-rw-r--r--net/tipc/udp_media.c9
-rw-r--r--net/tls/tls_main.c58
-rw-r--r--net/tls/tls_sw.c64
-rw-r--r--net/vmw_vsock/af_vsock.c7
-rw-r--r--net/vmw_vsock/vmci_transport.c67
-rw-r--r--net/wireless/Makefile1
-rw-r--r--net/wireless/chan.c3
-rw-r--r--net/wireless/core.c48
-rw-r--r--net/wireless/core.h5
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/wireless/lib80211_crypt_tkip.c4
-rw-r--r--net/wireless/lib80211_crypt_wep.c4
-rw-r--r--net/wireless/mlme.c4
-rw-r--r--net/wireless/nl80211.c312
-rw-r--r--net/wireless/nl80211.h32
-rw-r--r--net/wireless/pmsr.c590
-rw-r--r--net/wireless/rdev-ops.h25
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c8
-rw-r--r--net/wireless/trace.h92
-rw-r--r--net/wireless/util.c17
-rw-r--r--net/x25/af_x25.c18
-rw-r--r--net/x25/x25_in.c9
-rw-r--r--net/xdp/xsk.c16
-rw-r--r--net/xfrm/Kconfig1
-rw-r--r--net/xfrm/xfrm_device.c4
-rw-r--r--net/xfrm/xfrm_input.c83
-rw-r--r--net/xfrm/xfrm_interface.c2
-rw-r--r--net/xfrm/xfrm_output.c8
-rw-r--r--net/xfrm/xfrm_policy.c1270
-rw-r--r--net/xfrm/xfrm_state.c10
-rw-r--r--net/xfrm/xfrm_user.c4
-rw-r--r--samples/bpf/Makefile8
-rw-r--r--samples/bpf/bpf_load.c39
-rw-r--r--samples/bpf/xdp1_user.c27
-rw-r--r--samples/v4l/v4l2-pci-skeleton.c11
-rw-r--r--scripts/Kbuild.include4
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.gcc-plugins6
-rwxr-xr-xscripts/checkpatch.pl35
-rwxr-xr-xscripts/checkstack.pl4
-rwxr-xr-xscripts/checksyscalls.sh1
-rw-r--r--scripts/coccinelle/api/drm-get-put.cocci78
-rw-r--r--scripts/gcc-plugins/Kconfig4
-rw-r--r--scripts/gcc-plugins/arm_ssp_per_task_plugin.c103
-rw-r--r--scripts/gcc-plugins/stackleak_plugin.c8
-rw-r--r--scripts/mod/Makefile2
-rwxr-xr-xscripts/spdxcheck.py6
-rw-r--r--scripts/unifdef.c4
-rw-r--r--security/apparmor/apparmorfs.c2
-rw-r--r--security/apparmor/crypto.c2
-rw-r--r--security/commoncap.c1
-rw-r--r--security/inode.c6
-rw-r--r--security/integrity/evm/evm_crypto.c5
-rw-r--r--security/integrity/evm/evm_main.c5
-rw-r--r--security/integrity/evm/evm_posix_acl.c1
-rw-r--r--security/integrity/evm/evm_secfs.c2
-rw-r--r--security/integrity/iint.c2
-rw-r--r--security/integrity/ima/ima_api.c3
-rw-r--r--security/integrity/ima/ima_appraise.c2
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c5
-rw-r--r--security/integrity/ima/ima_policy.c12
-rw-r--r--security/integrity/ima/ima_queue.c1
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c5
-rw-r--r--security/keys/encrypted-keys/encrypted.c4
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c1
-rw-r--r--security/keys/gc.c1
-rw-r--r--security/keys/key.c2
-rw-r--r--security/keys/keyctl.c1
-rw-r--r--security/keys/keyctl_pkey.c2
-rw-r--r--security/keys/keyring.c2
-rw-r--r--security/keys/permission.c2
-rw-r--r--security/keys/proc.c1
-rw-r--r--security/keys/process_keys.c1
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c1
-rw-r--r--security/keys/trusted.c6
-rw-r--r--security/keys/user_defined.c2
-rw-r--r--security/security.c2
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/nlmsgtab.c13
-rw-r--r--security/selinux/ss/mls.c24
-rw-r--r--security/selinux/ss/mls.h3
-rw-r--r--security/selinux/ss/policydb.c61
-rw-r--r--security/selinux/ss/services.c222
-rw-r--r--security/selinux/ss/services.h2
-rw-r--r--security/selinux/ss/sidtab.c609
-rw-r--r--security/selinux/ss/sidtab.h96
-rw-r--r--security/selinux/xfrm.c4
-rw-r--r--security/tomoyo/util.c2
-rw-r--r--sound/aoa/fabrics/layout.c6
-rw-r--r--sound/aoa/soundbus/core.c4
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c6
-rw-r--r--sound/aoa/soundbus/sysfs.c34
-rw-r--r--sound/core/compress_offload.c18
-rw-r--r--sound/core/control.c171
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_native.c14
-rw-r--r--sound/firewire/Kconfig2
-rw-r--r--sound/firewire/amdtp-stream-trace.h4
-rw-r--r--sound/firewire/amdtp-stream.c4
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/fireface/Makefile3
-rw-r--r--sound/firewire/fireface/ff-pcm.c35
-rw-r--r--sound/firewire/fireface/ff-proc.c193
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c341
-rw-r--r--sound/firewire/fireface/ff-protocol-ff800.c143
-rw-r--r--sound/firewire/fireface/ff-stream.c126
-rw-r--r--sound/firewire/fireface/ff-transaction.c157
-rw-r--r--sound/firewire/fireface/ff.c25
-rw-r--r--sound/firewire/fireface/ff.h42
-rw-r--r--sound/firewire/oxfw/oxfw.c8
-rw-r--r--sound/firewire/tascam/amdtp-tascam.c51
-rw-r--r--sound/firewire/tascam/tascam-hwdep.c115
-rw-r--r--sound/firewire/tascam/tascam.h9
-rw-r--r--sound/hda/hdac_bus.c7
-rw-r--r--sound/hda/hdac_component.c39
-rw-r--r--sound/hda/hdac_device.c17
-rw-r--r--sound/isa/wss/wss_lib.c2
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/asihpi/asihpi.c2
-rw-r--r--sound/pci/emu10k1/emufx.c5
-rw-r--r--sound/pci/hda/Kconfig62
-rw-r--r--sound/pci/hda/dell_wmi_helper.c48
-rw-r--r--sound/pci/hda/hda_codec.c16
-rw-r--r--sound/pci/hda/hda_controller.c11
-rw-r--r--sound/pci/hda/hda_controller.h8
-rw-r--r--sound/pci/hda/hda_generic.c31
-rw-r--r--sound/pci/hda/hda_generic.h2
-rw-r--r--sound/pci/hda/hda_intel.c280
-rw-r--r--sound/pci/hda/hda_jack.c56
-rw-r--r--sound/pci/hda/hda_jack.h12
-rw-r--r--sound/pci/hda/hda_tegra.c22
-rw-r--r--sound/pci/hda/patch_ca0132.c207
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c14
-rw-r--r--sound/pci/hda/patch_realtek.c246
-rw-r--r--sound/pci/hda/thinkpad_helper.c43
-rw-r--r--sound/pci/rme9652/hdsp.c10
-rw-r--r--sound/ppc/pmac.c4
-rw-r--r--sound/ppc/tumbler.c4
-rw-r--r--sound/soc/Kconfig4
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/amd/Kconfig6
-rw-r--r--sound/soc/amd/Makefile1
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c2
-rw-r--r--sound/soc/amd/acp-pcm-dma.c22
-rw-r--r--sound/soc/amd/acp.h2
-rw-r--r--sound/soc/amd/raven/Makefile6
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c777
-rw-r--r--sound/soc/amd/raven/acp3x.h58
-rw-r--r--sound/soc/amd/raven/chip_offset_byte.h639
-rw-r--r--sound/soc/amd/raven/pci-acp3x.c156
-rw-r--r--sound/soc/codecs/Kconfig6
-rw-r--r--sound/soc/codecs/Makefile2
-rw-r--r--sound/soc/codecs/ak4104.c22
-rw-r--r--sound/soc/codecs/ak4118.c438
-rw-r--r--sound/soc/codecs/ak4458.c2
-rw-r--r--sound/soc/codecs/ak5558.c19
-rw-r--r--sound/soc/codecs/cs4270.c23
-rw-r--r--sound/soc/codecs/dmic.c40
-rw-r--r--sound/soc/codecs/hdac_hda.c2
-rw-r--r--sound/soc/codecs/hdac_hdmi.c88
-rw-r--r--sound/soc/codecs/max98373.c35
-rw-r--r--sound/soc/codecs/max9867.c505
-rw-r--r--sound/soc/codecs/max9867.h41
-rw-r--r--sound/soc/codecs/nau8540.c2
-rw-r--r--sound/soc/codecs/nau8822.c26
-rw-r--r--sound/soc/codecs/nau8822.h9
-rw-r--r--sound/soc/codecs/nau8825.c4
-rw-r--r--sound/soc/codecs/pcm186x.h2
-rw-r--r--sound/soc/codecs/pcm3060.c36
-rw-r--r--sound/soc/codecs/pcm3060.h3
-rw-r--r--sound/soc/codecs/pcm3168a.c40
-rw-r--r--sound/soc/codecs/pcm512x.c121
-rw-r--r--sound/soc/codecs/pcm512x.h2
-rw-r--r--sound/soc/codecs/rt5660.c1
-rw-r--r--sound/soc/codecs/rt5663.c75
-rw-r--r--sound/soc/codecs/simple-amplifier.c4
-rw-r--r--sound/soc/codecs/tas6424.c2
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c2
-rw-r--r--sound/soc/codecs/tlv320aic3x.c10
-rw-r--r--sound/soc/codecs/tlv320dac33.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/codecs/wm9705.c10
-rw-r--r--sound/soc/codecs/wm9712.c10
-rw-r--r--sound/soc/codecs/wm9713.c10
-rw-r--r--sound/soc/codecs/wm_adsp.c51
-rw-r--r--sound/soc/davinci/Kconfig106
-rw-r--r--sound/soc/davinci/Makefile16
-rw-r--r--sound/soc/fsl/Kconfig2
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c6
-rw-r--r--sound/soc/fsl/fsl_ssi_dbg.c14
-rw-r--r--sound/soc/generic/Kconfig4
-rw-r--r--sound/soc/generic/audio-graph-card.c465
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c262
-rw-r--r--sound/soc/generic/simple-card-utils.c45
-rw-r--r--sound/soc/generic/simple-card.c402
-rw-r--r--sound/soc/generic/simple-scu-card.c264
-rw-r--r--sound/soc/intel/Kconfig91
-rw-r--r--sound/soc/intel/atom/sst/sst_acpi.c4
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c8
-rw-r--r--sound/soc/intel/atom/sst/sst_pvt.c4
-rw-r--r--sound/soc/intel/boards/Kconfig44
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c33
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c6
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c46
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5645.c6
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c2
-rw-r--r--sound/soc/intel/boards/glk_rt5682_max98357a.c2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c14
-rw-r--r--sound/soc/intel/boards/kbl_rt5660.c543
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c14
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c10
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c22
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_max98357a.c14
-rw-r--r--sound/soc/intel/boards/skl_nau88l25_ssm4567.c14
-rw-r--r--sound/soc/intel/common/Makefile2
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-bxt-match.c36
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-icl-match.c32
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c10
-rw-r--r--sound/soc/intel/skylake/skl-messages.c8
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c3
-rw-r--r--sound/soc/intel/skylake/skl-sst-ipc.c50
-rw-r--r--sound/soc/intel/skylake/skl.c224
-rw-r--r--sound/soc/intel/skylake/skl.h3
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c2
-rw-r--r--sound/soc/meson/Kconfig9
-rw-r--r--sound/soc/meson/Makefile2
-rw-r--r--sound/soc/meson/axg-fifo.h3
-rw-r--r--sound/soc/meson/axg-spdifin.c521
-rw-r--r--sound/soc/meson/axg-toddr.c15
-rw-r--r--sound/soc/omap/Kconfig129
-rw-r--r--sound/soc/omap/Makefile32
-rw-r--r--sound/soc/omap/am3517evm.c141
-rw-r--r--sound/soc/omap/mcbsp.c1104
-rw-r--r--sound/soc/pxa/Kconfig26
-rw-r--r--sound/soc/pxa/Makefile1
-rw-r--r--sound/soc/pxa/raumfeld.c318
-rw-r--r--sound/soc/qcom/Kconfig2
-rw-r--r--sound/soc/qcom/common.c9
-rw-r--r--sound/soc/qcom/lpass-platform.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6afe-dai.c238
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c20
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c405
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c5
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c28
-rw-r--r--sound/soc/qcom/sdm845.c186
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c1
-rw-r--r--sound/soc/sh/rcar/adg.c38
-rw-r--r--sound/soc/sh/rcar/cmd.c11
-rw-r--r--sound/soc/sh/rcar/core.c256
-rw-r--r--sound/soc/sh/rcar/ctu.c138
-rw-r--r--sound/soc/sh/rcar/dma.c84
-rw-r--r--sound/soc/sh/rcar/dvc.c21
-rw-r--r--sound/soc/sh/rcar/gen.c49
-rw-r--r--sound/soc/sh/rcar/mix.c3
-rw-r--r--sound/soc/sh/rcar/rsnd.h382
-rw-r--r--sound/soc/sh/rcar/src.c67
-rw-r--r--sound/soc/sh/rcar/ssi.c271
-rw-r--r--sound/soc/sh/rcar/ssiu.c300
-rw-r--r--sound/soc/soc-acpi.c10
-rw-r--r--sound/soc/soc-core.c14
-rw-r--r--sound/soc/stm/stm32_sai.c8
-rw-r--r--sound/soc/stm/stm32_sai_sub.c5
-rw-r--r--sound/soc/sunxi/Kconfig2
-rw-r--r--sound/soc/sunxi/sun50i-codec-analog.c2
-rw-r--r--sound/soc/sunxi/sun8i-codec.c12
-rw-r--r--sound/soc/ti/Kconfig209
-rw-r--r--sound/soc/ti/Makefile44
-rw-r--r--sound/soc/ti/ams-delta.c (renamed from sound/soc/omap/ams-delta.c)0
-rw-r--r--sound/soc/ti/davinci-evm.c (renamed from sound/soc/davinci/davinci-evm.c)4
-rw-r--r--sound/soc/ti/davinci-i2s.c (renamed from sound/soc/davinci/davinci-i2s.c)0
-rw-r--r--sound/soc/ti/davinci-i2s.h (renamed from sound/soc/davinci/davinci-i2s.h)0
-rw-r--r--sound/soc/ti/davinci-mcasp.c (renamed from sound/soc/davinci/davinci-mcasp.c)123
-rw-r--r--sound/soc/ti/davinci-mcasp.h (renamed from sound/soc/davinci/davinci-mcasp.h)30
-rw-r--r--sound/soc/ti/davinci-vcif.c (renamed from sound/soc/davinci/davinci-vcif.c)0
-rw-r--r--sound/soc/ti/edma-pcm.c (renamed from sound/soc/davinci/edma-pcm.c)0
-rw-r--r--sound/soc/ti/edma-pcm.h (renamed from sound/soc/davinci/edma-pcm.h)4
-rw-r--r--sound/soc/ti/n810.c (renamed from sound/soc/omap/n810.c)0
-rw-r--r--sound/soc/ti/omap-abe-twl6040.c (renamed from sound/soc/omap/omap-abe-twl6040.c)67
-rw-r--r--sound/soc/ti/omap-dmic.c (renamed from sound/soc/omap/omap-dmic.c)9
-rw-r--r--sound/soc/ti/omap-dmic.h (renamed from sound/soc/omap/omap-dmic.h)0
-rw-r--r--sound/soc/ti/omap-hdmi.c (renamed from sound/soc/omap/omap-hdmi-audio.c)0
-rw-r--r--sound/soc/ti/omap-mcbsp-priv.h (renamed from sound/soc/omap/mcbsp.h)126
-rw-r--r--sound/soc/ti/omap-mcbsp-st.c516
-rw-r--r--sound/soc/ti/omap-mcbsp.c (renamed from sound/soc/omap/omap-mcbsp.c)863
-rw-r--r--sound/soc/ti/omap-mcbsp.h (renamed from sound/soc/omap/omap-mcbsp.h)8
-rw-r--r--sound/soc/ti/omap-mcpdm.c (renamed from sound/soc/omap/omap-mcpdm.c)43
-rw-r--r--sound/soc/ti/omap-mcpdm.h (renamed from sound/soc/omap/omap-mcpdm.h)0
-rw-r--r--sound/soc/ti/omap-twl4030.c (renamed from sound/soc/omap/omap-twl4030.c)0
-rw-r--r--sound/soc/ti/omap3pandora.c (renamed from sound/soc/omap/omap3pandora.c)0
-rw-r--r--sound/soc/ti/osk5912.c (renamed from sound/soc/omap/osk5912.c)0
-rw-r--r--sound/soc/ti/rx51.c (renamed from sound/soc/omap/rx51.c)0
-rw-r--r--sound/soc/ti/sdma-pcm.c (renamed from sound/soc/omap/sdma-pcm.c)0
-rw-r--r--sound/soc/ti/sdma-pcm.h (renamed from sound/soc/omap/sdma-pcm.h)4
-rw-r--r--sound/soc/xilinx/Kconfig8
-rw-r--r--sound/soc/xilinx/Makefile2
-rw-r--r--sound/soc/xilinx/xlnx_i2s.c185
-rw-r--r--sound/sparc/cs4231.c14
-rw-r--r--sound/synth/emux/emux_hwdep.c7
-rw-r--r--sound/usb/card.c5
-rw-r--r--sound/usb/quirks-table.h10
-rw-r--r--sound/usb/quirks.c130
-rw-r--r--sound/x86/intel_hdmi_audio.c26
-rw-r--r--sound/xen/Kconfig1
-rw-r--r--sound/xen/Makefile1
-rw-r--r--sound/xen/xen_snd_front.c7
-rw-r--r--sound/xen/xen_snd_front.h4
-rw-r--r--sound/xen/xen_snd_front_alsa.c102
-rw-r--r--sound/xen/xen_snd_front_shbuf.c194
-rw-r--r--sound/xen/xen_snd_front_shbuf.h36
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/prctl.h17
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst70
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst8
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst185
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst13
-rw-r--r--tools/bpf/bpftool/Makefile15
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool132
-rw-r--r--tools/bpf/bpftool/btf_dumper.c263
-rw-r--r--tools/bpf/bpftool/cfg.c36
-rw-r--r--tools/bpf/bpftool/cfg.h38
-rw-r--r--tools/bpf/bpftool/cgroup.c2
-rw-r--r--tools/bpf/bpftool/common.c130
-rw-r--r--tools/bpf/bpftool/jit_disasm.c43
-rw-r--r--tools/bpf/bpftool/json_writer.c7
-rw-r--r--tools/bpf/bpftool/json_writer.h1
-rw-r--r--tools/bpf/bpftool/main.c47
-rw-r--r--tools/bpf/bpftool/main.h108
-rw-r--r--tools/bpf/bpftool/map.c216
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c2
-rw-r--r--tools/bpf/bpftool/net.c2
-rw-r--r--tools/bpf/bpftool/netlink_dumper.c2
-rw-r--r--tools/bpf/bpftool/netlink_dumper.h2
-rw-r--r--tools/bpf/bpftool/perf.c2
-rw-r--r--tools/bpf/bpftool/prog.c417
-rw-r--r--tools/bpf/bpftool/tracelog.c166
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c106
-rw-r--r--tools/bpf/bpftool/xlated_dumper.h48
-rw-r--r--tools/build/Makefile.feature8
-rw-r--r--tools/build/feature/Makefile18
-rw-r--r--tools/build/feature/test-all.c15
-rw-r--r--tools/build/feature/test-eventfd.c9
-rw-r--r--tools/build/feature/test-get_current_dir_name.c10
-rw-r--r--tools/build/feature/test-jvmti-cmlr.c11
-rw-r--r--tools/build/feature/test-jvmti.c1
-rw-r--r--tools/build/feature/test-libaio.c16
-rw-r--r--tools/build/feature/test-libopencsd.c8
-rw-r--r--tools/crypto/getstat.c72
-rw-r--r--tools/include/linux/err.h13
-rw-r--r--tools/include/linux/kernel.h2
-rw-r--r--tools/include/uapi/asm-generic/ioctls.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/drm/i915_drm.h22
-rw-r--r--tools/include/uapi/linux/bpf.h236
-rw-r--r--tools/include/uapi/linux/btf.h38
-rw-r--r--tools/include/uapi/linux/fadvise.h22
-rw-r--r--tools/include/uapi/linux/netlink.h2
-rw-r--r--tools/include/uapi/linux/pkt_cls.h612
-rw-r--r--tools/include/uapi/linux/prctl.h1
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h37
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat2
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/Makefile38
-rw-r--r--tools/lib/bpf/README.rst139
-rw-r--r--tools/lib/bpf/bpf.c114
-rw-r--r--tools/lib/bpf/bpf.h37
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c249
-rw-r--r--tools/lib/bpf/btf.c393
-rw-r--r--tools/lib/bpf/btf.h58
-rw-r--r--tools/lib/bpf/libbpf.c635
-rw-r--r--tools/lib/bpf/libbpf.h42
-rw-r--r--tools/lib/bpf/libbpf.map126
-rw-r--r--tools/lib/bpf/libbpf_errno.c1
-rw-r--r--tools/lib/bpf/test_libbpf.cpp18
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h1
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h1
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h6
-rw-r--r--tools/lib/lockdep/lockdep.c5
-rwxr-xr-xtools/lib/lockdep/run_tests.sh39
-rw-r--r--tools/lib/lockdep/tests/AA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBA.c3
-rw-r--r--tools/lib/lockdep/tests/ABBA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBA_2threads.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.c4
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.c5
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCABC.c4
-rw-r--r--tools/lib/lockdep/tests/ABCABC.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.c5
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.c5
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.sh2
-rw-r--r--tools/lib/lockdep/tests/WW.sh2
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.c2
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.sh2
-rw-r--r--tools/lib/subcmd/Makefile2
-rw-r--r--tools/lib/subcmd/parse-options.h4
-rw-r--r--tools/lib/traceevent/Makefile31
-rw-r--r--tools/lib/traceevent/event-parse-api.c8
-rw-r--r--tools/lib/traceevent/event-parse-local.h13
-rw-r--r--tools/lib/traceevent/event-parse.c235
-rw-r--r--tools/lib/traceevent/event-parse.h77
-rw-r--r--tools/lib/traceevent/libtraceevent.pc.template10
-rw-r--r--tools/lib/traceevent/parse-filter.c42
-rw-r--r--tools/lib/traceevent/plugin_function.c2
-rw-r--r--tools/lib/traceevent/plugin_hrtimer.c4
-rw-r--r--tools/lib/traceevent/plugin_kmem.c2
-rw-r--r--tools/lib/traceevent/plugin_kvm.c16
-rw-r--r--tools/lib/traceevent/plugin_mac80211.c4
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c4
-rw-r--r--tools/objtool/elf.c19
-rw-r--r--tools/perf/Documentation/perf-bench.txt10
-rw-r--r--tools/perf/Documentation/perf-config.txt38
-rw-r--r--tools/perf/Documentation/perf-list.txt2
-rw-r--r--tools/perf/Documentation/perf-record.txt5
-rw-r--r--tools/perf/Documentation/perf-report.txt10
-rw-r--r--tools/perf/Documentation/perf-script.txt2
-rw-r--r--tools/perf/Documentation/perf-stat.txt4
-rw-r--r--tools/perf/Documentation/perf-top.txt3
-rw-r--r--tools/perf/Documentation/perf-trace.txt6
-rw-r--r--tools/perf/Makefile.config25
-rw-r--r--tools/perf/Makefile.perf39
-rw-r--r--tools/perf/arch/arc/annotate/instructions.c9
-rw-r--r--tools/perf/arch/common.c21
-rw-r--r--tools/perf/arch/common.h1
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h3
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c1
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c2
-rw-r--r--tools/perf/arch/x86/util/header.c66
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c11
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c2
-rw-r--r--tools/perf/bench/Build3
-rw-r--r--tools/perf/bench/bench.h14
-rw-r--r--tools/perf/bench/epoll-ctl.c413
-rw-r--r--tools/perf/bench/epoll-wait.c540
-rw-r--r--tools/perf/bench/futex.h12
-rw-r--r--tools/perf/builtin-bench.c13
-rw-r--r--tools/perf/builtin-config.c7
-rw-r--r--tools/perf/builtin-help.c2
-rw-r--r--tools/perf/builtin-kvm.c6
-rw-r--r--tools/perf/builtin-record.c263
-rw-r--r--tools/perf/builtin-report.c26
-rw-r--r--tools/perf/builtin-script.c97
-rw-r--r--tools/perf/builtin-stat.c8
-rw-r--r--tools/perf/builtin-timechart.c4
-rw-r--r--tools/perf/builtin-top.c289
-rw-r--r--tools/perf/builtin-trace.c525
-rwxr-xr-xtools/perf/check-headers.sh2
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c41
-rw-r--r--tools/perf/include/bpf/bpf.h21
-rw-r--r--tools/perf/include/bpf/pid_filter.h21
-rw-r--r--tools/perf/include/bpf/stdio.h3
-rw-r--r--tools/perf/include/bpf/unistd.h10
-rw-r--r--tools/perf/jvmti/libjvmti.c12
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json10172
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json164
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json85
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json482
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/memory.json9909
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json8908
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json969
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json117
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json255
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json285
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json30
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv3
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json4
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json2
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json12
-rw-r--r--tools/perf/tests/attr.c2
-rw-r--r--tools/perf/tests/attr.py2
-rw-r--r--tools/perf/tests/attr/base-record2
-rw-r--r--tools/perf/tests/bp_signal.c20
-rw-r--r--tools/perf/tests/builtin-test.c3
-rw-r--r--tools/perf/tests/code-reading.c2
-rw-r--r--tools/perf/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c4
-rw-r--r--tools/perf/tests/perf-record.c7
-rwxr-xr-xtools/perf/tests/shell/probe_vfs_getname.sh1
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh1
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh1
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh1
-rw-r--r--tools/perf/trace/beauty/Build2
-rw-r--r--tools/perf/trace/beauty/arch_prctl.c33
-rw-r--r--tools/perf/trace/beauty/beauty.h36
-rw-r--r--tools/perf/trace/beauty/clone.c7
-rw-r--r--tools/perf/trace/beauty/eventfd.c4
-rwxr-xr-xtools/perf/trace/beauty/fadvise.sh22
-rw-r--r--tools/perf/trace/beauty/fcntl.c22
-rw-r--r--tools/perf/trace/beauty/flock.c4
-rw-r--r--tools/perf/trace/beauty/futex_op.c8
-rw-r--r--tools/perf/trace/beauty/futex_val3.c3
-rw-r--r--tools/perf/trace/beauty/ioctl.c32
-rw-r--r--tools/perf/trace/beauty/kcmp.c8
-rw-r--r--tools/perf/trace/beauty/mmap.c22
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh4
-rw-r--r--tools/perf/trace/beauty/mode_t.c4
-rw-r--r--tools/perf/trace/beauty/mount_flags.c8
-rw-r--r--tools/perf/trace/beauty/msg_flags.c4
-rw-r--r--tools/perf/trace/beauty/open_flags.c13
-rw-r--r--tools/perf/trace/beauty/perf_event_open.c4
-rw-r--r--tools/perf/trace/beauty/pkey_alloc.c14
-rw-r--r--tools/perf/trace/beauty/prctl.c16
-rwxr-xr-xtools/perf/trace/beauty/rename_flags.sh15
-rw-r--r--tools/perf/trace/beauty/renameat.c19
-rw-r--r--tools/perf/trace/beauty/sched_policy.c6
-rw-r--r--tools/perf/trace/beauty/seccomp.c8
-rw-r--r--tools/perf/trace/beauty/signum.c4
-rw-r--r--tools/perf/trace/beauty/sockaddr.c4
-rw-r--r--tools/perf/trace/beauty/socket.c8
-rw-r--r--tools/perf/trace/beauty/socket_type.c4
-rw-r--r--tools/perf/trace/beauty/statx.c10
-rw-r--r--tools/perf/trace/beauty/waitid_options.c4
-rwxr-xr-xtools/perf/trace/beauty/x86_arch_prctl.sh26
-rw-r--r--tools/perf/ui/browsers/hists.c11
-rw-r--r--tools/perf/ui/tui/helpline.c2
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/annotate.c49
-rw-r--r--tools/perf/util/annotate.h5
-rw-r--r--tools/perf/util/auxtrace.c11
-rw-r--r--tools/perf/util/bpf-loader.c4
-rw-r--r--tools/perf/util/config.c8
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c102
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h29
-rw-r--r--tools/perf/util/cs-etm.c216
-rw-r--r--tools/perf/util/dso.c8
-rw-r--r--tools/perf/util/dso.h1
-rw-r--r--tools/perf/util/env.c2
-rw-r--r--tools/perf/util/event.c61
-rw-r--r--tools/perf/util/event.h8
-rw-r--r--tools/perf/util/evlist.c20
-rw-r--r--tools/perf/util/evlist.h8
-rw-r--r--tools/perf/util/evsel.c2
-rw-r--r--tools/perf/util/evsel.h4
-rw-r--r--tools/perf/util/evsel_fprintf.c1
-rw-r--r--tools/perf/util/get_current_dir_name.c18
-rw-r--r--tools/perf/util/header.c51
-rw-r--r--tools/perf/util/hist.c2
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/jitdump.c2
-rw-r--r--tools/perf/util/machine.c33
-rw-r--r--tools/perf/util/machine.h6
-rw-r--r--tools/perf/util/map.c89
-rw-r--r--tools/perf/util/map.h18
-rw-r--r--tools/perf/util/mmap.c152
-rw-r--r--tools/perf/util/mmap.h26
-rw-r--r--tools/perf/util/namespaces.c17
-rw-r--r--tools/perf/util/namespaces.h1
-rw-r--r--tools/perf/util/ordered-events.c44
-rw-r--r--tools/perf/util/ordered-events.h8
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/pmu.c47
-rw-r--r--tools/perf/util/probe-event.c4
-rw-r--r--tools/perf/util/probe-file.c2
-rw-r--r--tools/perf/util/python.c4
-rw-r--r--tools/perf/util/s390-cpumsf.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c6
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c24
-rw-r--r--tools/perf/util/session.c7
-rw-r--r--tools/perf/util/sort.c63
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/srccode.c186
-rw-r--r--tools/perf/util/srccode.h7
-rw-r--r--tools/perf/util/srcline.c28
-rw-r--r--tools/perf/util/srcline.h1
-rw-r--r--tools/perf/util/stat-display.c16
-rw-r--r--tools/perf/util/stat-shadow.c6
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol.c26
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/thread.c2
-rw-r--r--tools/perf/util/thread.h6
-rw-r--r--tools/perf/util/top.c8
-rw-r--r--tools/perf/util/top.h10
-rw-r--r--tools/perf/util/trace-event-parse.c16
-rw-r--r--tools/perf/util/trace-event-read.c4
-rw-r--r--tools/perf/util/trace-event.c8
-rw-r--r--tools/perf/util/trace-event.h16
-rw-r--r--tools/perf/util/util.h4
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/cpupower/Makefile6
-rw-r--r--tools/power/cpupower/cpupower-completion.sh128
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py4
-rw-r--r--tools/power/x86/turbostat/turbostat.c189
-rw-r--r--tools/testing/nvdimm/test/nfit.c35
-rw-r--r--tools/testing/radix-tree/Makefile1
-rw-r--r--tools/testing/radix-tree/main.c1
-rw-r--r--tools/testing/radix-tree/regression.h1
-rw-r--r--tools/testing/radix-tree/regression4.c79
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/android/Makefile2
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile25
-rw-r--r--tools/testing/selftests/bpf/bpf_flow.c38
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h8
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/connect4_prog.c45
-rw-r--r--tools/testing/selftests/bpf/connect6_prog.c58
-rw-r--r--tools/testing/selftests/bpf/netcnt_prog.c6
-rw-r--r--tools/testing/selftests/bpf/test_align.c4
-rw-r--r--tools/testing/selftests/bpf/test_btf.c2587
-rw-r--r--tools/testing/selftests/bpf/test_btf_haskv.c16
-rw-r--r--tools/testing/selftests/bpf/test_btf_nokv.c16
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh2
-rwxr-xr-xtools/testing/selftests/bpf/test_libbpf.sh14
-rwxr-xr-xtools/testing/selftests/bpf/test_lirc_mode2.sh3
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_kern.c3
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c65
-rw-r--r--tools/testing/selftests/bpf/test_map_in_map.c49
-rw-r--r--tools/testing/selftests/bpf/test_maps.c154
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c5
-rw-r--r--tools/testing/selftests/bpf/test_progs.c75
-rw-r--r--tools/testing/selftests/bpf/test_sk_lookup_kern.c18
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c28
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c127
-rw-r--r--tools/testing/selftests/bpf/test_sockmap_kern.h70
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify.h19
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_kern.c95
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c186
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c691
-rw-r--r--tools/testing/selftests/bpf/xdp_dummy.c13
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/extack.sh145
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh259
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh565
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh259
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh1103
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh309
-rw-r--r--tools/testing/selftests/futex/functional/Makefile1
-rw-r--r--tools/testing/selftests/gpio/Makefile6
-rw-r--r--tools/testing/selftests/kvm/Makefile5
-rw-r--r--tools/testing/selftests/kvm/clear_dirty_log_test.c2
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c165
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h8
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c18
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c67
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h1
-rw-r--r--tools/testing/selftests/kvm/lib/ucall.c36
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c157
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c4
-rw-r--r--tools/testing/selftests/lib.mk8
-rw-r--r--tools/testing/selftests/net/.gitignore2
-rw-r--r--tools/testing/selftests/net/Makefile9
-rw-r--r--tools/testing/selftests/net/config14
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh44
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh311
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_vid_1.sh135
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh786
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh10
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh860
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_port_8472.sh10
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c3
-rwxr-xr-xtools/testing/selftests/net/msg_zerocopy.sh2
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh377
-rw-r--r--tools/testing/selftests/net/reuseport_addr_any.c274
-rwxr-xr-xtools/testing/selftests/net/reuseport_addr_any.sh4
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh122
-rwxr-xr-xtools/testing/selftests/net/run_afpackettests10
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_fdb_changelink.sh29
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_under_vrf.sh129
-rw-r--r--tools/testing/selftests/net/txring_overwrite.c179
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh182
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh95
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh5
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c156
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c22
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh302
-rw-r--r--tools/testing/selftests/netfilter/Makefile6
-rw-r--r--tools/testing/selftests/netfilter/config2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh78
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile6
-rw-r--r--tools/testing/selftests/networking/timestamping/config2
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c382
-rwxr-xr-xtools/testing/selftests/networking/timestamping/txtimestamp.sh57
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h10
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c5
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c4
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c3
-rw-r--r--tools/testing/selftests/powerpc/tm/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c46
-rw-r--r--tools/testing/selftests/proc/proc-self-map-files-002.c9
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh136
-rw-r--r--tools/testing/selftests/rcutorture/bin/nolibc.h2197
-rw-r--r--tools/testing/selftests/rcutorture/doc/initrd.txt99
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h4
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c9
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore3
-rw-r--r--tools/testing/selftests/tc-testing/TdcPlugin.py3
-rw-r--r--tools/testing/selftests/tc-testing/TdcResults.py132
-rw-r--r--tools/testing/selftests/tc-testing/bpf/Makefile1
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py22
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py133
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py2
-rw-r--r--tools/testing/selftests/vm/Makefile1
-rw-r--r--tools/virtio/linux/kernel.h4
-rw-r--r--virt/kvm/arm/arch_timer.c35
-rw-r--r--virt/kvm/arm/arm.c55
-rw-r--r--virt/kvm/arm/hyp/vgic-v3-sr.c6
-rw-r--r--virt/kvm/arm/mmio.c11
-rw-r--r--virt/kvm/arm/mmu.c390
-rw-r--r--virt/kvm/arm/trace.h18
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c44
-rw-r--r--virt/kvm/arm/vgic/vgic.c25
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/coalesced_mmio.c6
-rw-r--r--virt/kvm/kvm_main.c208
6951 files changed, 345236 insertions, 202267 deletions
diff --git a/.mailmap b/.mailmap
index 28fecafa6506..b4b0b0b768dd 100644
--- a/.mailmap
+++ b/.mailmap
@@ -36,9 +36,10 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Ben Gardner <bgardner@wabtec.com>
Ben M Cahill <ben.m.cahill@intel.com>
Björn Steinbrink <B.Steinbrink@gmx.de>
-Boris Brezillon <boris.brezillon@bootlin.com> <boris.brezillon@free-electrons.com>
-Boris Brezillon <boris.brezillon@bootlin.com> <b.brezillon.dev@gmail.com>
-Boris Brezillon <boris.brezillon@bootlin.com> <b.brezillon@overkiz.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
+Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
+Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
+Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
Christoph Hellwig <hch@lst.de>
diff --git a/CREDITS b/CREDITS
index c9273393fe14..7d397ee67524 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2541,6 +2541,10 @@ S: Ormond
S: Victoria 3163
S: Australia
+N: Eric Miao
+E: eric.y.miao@gmail.com
+D: MMP support
+
N: Pauline Middelink
E: middelin@polyware.nl
D: General low-level bug fixes, /proc fixes, identd support
@@ -4115,6 +4119,10 @@ S: 1507 145th Place SE #B5
S: Bellevue, Washington 98007
S: USA
+N: Haojian Zhuang
+E: haojian.zhuang@gmail.com
+D: MMP support
+
N: Richard Zidlicky
E: rz@linux-m68k.org, rdzidlic@geocities.com
W: http://www.geocities.com/rdzidlic
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index d9d117d457e1..9b642669cb16 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -12,7 +12,6 @@ Description: This file shows ASIC health status. The possible values are:
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
cpld1_version
cpld2_version
-
Date: June 2018
KernelVersion: 4.19
Contact: Vadim Pasternak <vadimpmellanox.com>
@@ -21,6 +20,28 @@ Description: These files show with which CPLD versions have been burned
The files are read only.
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ cpld3_version
+
+Date: November 2018
+KernelVersion: 4.21
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show with which CPLD versions have been burned
+ on LED board.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ jtag_enable
+
+Date: November 2018
+KernelVersion: 4.21
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files enable and disable the access to the JTAG domain.
+ By default access to the JTAG domain is disabled.
+
+ The file is read/write.
+
What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/select_iio
Date: June 2018
KernelVersion: 4.19
@@ -76,3 +97,21 @@ Description: These files show the system reset cause, as following: power
reset cause.
The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/
+ reset_comex_pwr_fail
+ reset_from_comex
+ reset_system
+ reset_voltmon_upgrade_fail
+
+Date: November 2018
+KernelVersion: 4.21
+Contact: Vadim Pasternak <vadimpmellanox.com>
+Description: These files show the system reset cause, as following: ComEx
+ power fail, reset from ComEx, system platform reset, reset
+ due to voltage monitor devices upgrade failure,
+ Value 1 in file means this is reset cause, 0 - otherwise.
+ Only one bit could be 1 at the same time, representing only
+ the last reset cause.
+
+ The files are read only.
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index dea212db9df3..7710d4022b19 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -244,7 +244,7 @@ Description:
What: /sys/block/<disk>/queue/zoned
Date: September 2016
-Contact: Damien Le Moal <damien.lemoal@hgst.com>
+Contact: Damien Le Moal <damien.lemoal@wdc.com>
Description:
zoned indicates if the device is a zoned block device
and the zone model of the device if it is indeed zoned.
@@ -259,6 +259,14 @@ Description:
zone commands, they will be treated as regular block
devices and zoned will report "none".
+What: /sys/block/<disk>/queue/nr_zones
+Date: November 2018
+Contact: Damien Le Moal <damien.lemoal@wdc.com>
+Description:
+ nr_zones indicates the total number of zones of a zoned block
+ device ("host-aware" or "host-managed" zone model). For regular
+ block devices, the value is always 0.
+
What: /sys/block/<disk>/queue/chunk_sectors
Date: September 2016
Contact: Hannes Reinecke <hare@suse.com>
@@ -268,6 +276,6 @@ Description:
indicates the size in 512B sectors of the RAID volume
stripe segment. For a zoned block device, either
host-aware or host-managed, chunk_sectors indicates the
- size of 512B sectors of the zones of the device, with
+ size in 512B sectors of the zones of the device, with
the eventual exception of the last zone of the device
which may be smaller.
diff --git a/Documentation/ABI/testing/sysfs-bus-i3c b/Documentation/ABI/testing/sysfs-bus-i3c
new file mode 100644
index 000000000000..2f332ec36f82
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i3c
@@ -0,0 +1,146 @@
+What: /sys/bus/i3c/devices/i3c-<bus-id>
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ An I3C bus. This directory will contain one sub-directory per
+ I3C device present on the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/current_master
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Expose the master that owns the bus (<bus-id>-<master-pid>) at
+ the time this file is read. Note that bus ownership can change
+ overtime, so there's no guarantee that when the read() call
+ returns, the value returned is still valid.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/mode
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ I3C bus mode. Can be "pure", "mixed-fast" or "mixed-slow". See
+ the I3C specification for a detailed description of what each
+ of these modes implies.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/i3c_scl_frequency
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ The frequency (expressed in Hz) of the SCL signal when
+ operating in I3C SDR mode.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/i2c_scl_frequency
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ The frequency (expressed in Hz) of the SCL signal when
+ operating in I2C mode.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/dynamic_address
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Dynamic address assigned to the master controller. This
+ address may change if the bus is re-initialized.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/bcr
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ BCR stands for Bus Characteristics Register and express the
+ device capabilities in term of speed, maximum read/write
+ length, etc. See the I3C specification for more details.
+ This entry describes the BCR of the master controller driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/dcr
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ DCR stands for Device Characteristics Register and express the
+ device capabilities in term of exposed features. See the I3C
+ specification for more details.
+ This entry describes the DCR of the master controller driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/pid
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ PID stands for Provisional ID and is used to uniquely identify
+ a device on a bus. This PID contains information about the
+ vendor, the part and an instance ID so that several devices of
+ the same type can be connected on the same bus.
+ See the I3C specification for more details.
+ This entry describes the PID of the master controller driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/hdrcap
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Expose the HDR (High Data Rate) capabilities of a device.
+ Returns a list of supported HDR mode, each element is separated
+ by space. Modes can be "hdr-ddr", "hdr-tsp" and "hdr-tsl".
+ See the I3C specification for more details about these HDR
+ modes.
+ This entry describes the HDRCAP of the master controller
+ driving the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ An I3C device present on I3C bus identified by <bus-id>. Note
+ that all devices are represented including the master driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/dynamic_address
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Dynamic address assigned to device <bus-id>-<device-pid>. This
+ address may change if the bus is re-initialized.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/bcr
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ BCR stands for Bus Characteristics Register and express the
+ device capabilities in term of speed, maximum read/write
+ length, etc. See the I3C specification for more details.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/dcr
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ DCR stands for Device Characteristics Register and express the
+ device capabilities in term of exposed features. See the I3C
+ specification for more details.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/pid
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ PID stands for Provisional ID and is used to uniquely identify
+ a device on a bus. This PID contains information about the
+ vendor, the part and an instance ID so that several devices of
+ the same type can be connected on the same bus.
+ See the I3C specification for more details.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/hdrcap
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Expose the HDR (High Data Rate) capabilities of a device.
+ Returns a list of supported HDR mode, each element is separated
+ by space. Modes can be "hdr-ddr", "hdr-tsp" and "hdr-tsl".
+ See the I3C specification for more details about these HDR
+ modes.
+
+What: /sys/bus/i3c/devices/<bus-id>-<device-pid>
+KernelVersion: 5.0
+Contact: linux-i3c@vger.kernel.org
+Description:
+ These directories are just symbolic links to
+ /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>.
diff --git a/Documentation/ABI/testing/sysfs-class-net-dsa b/Documentation/ABI/testing/sysfs-class-net-dsa
index f240221e071e..985d84c585c6 100644
--- a/Documentation/ABI/testing/sysfs-class-net-dsa
+++ b/Documentation/ABI/testing/sysfs-class-net-dsa
@@ -1,4 +1,4 @@
-What: /sys/class/net/<iface>/tagging
+What: /sys/class/net/<iface>/dsa/tagging
Date: August 2018
KernelVersion: 4.20
Contact: netdev@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-devices-software_node b/Documentation/ABI/testing/sysfs-devices-software_node
new file mode 100644
index 000000000000..85df37de359f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-software_node
@@ -0,0 +1,10 @@
+What: /sys/devices/.../software_node/
+Date: January 2019
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ This directory contains the details about the device that are
+ assigned in kernel (i.e. software), as opposed to the
+ firmware_node directory which contains the details that are
+ assigned for the device in firmware. The main attributes in the
+ directory will show the properties the device has, and the
+ relationship it has to some of the other devices.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 73318225a368..9605dbd4b5b5 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -145,6 +145,8 @@ What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/name
/sys/devices/system/cpu/cpuX/cpuidle/stateN/power
/sys/devices/system/cpu/cpuX/cpuidle/stateN/time
/sys/devices/system/cpu/cpuX/cpuidle/stateN/usage
+ /sys/devices/system/cpu/cpuX/cpuidle/stateN/above
+ /sys/devices/system/cpu/cpuX/cpuidle/stateN/below
Date: September 2007
KernelVersion: v2.6.24
Contact: Linux power management list <linux-pm@vger.kernel.org>
@@ -166,6 +168,11 @@ Description:
usage: (RO) Number of times this state was entered (a count).
+ above: (RO) Number of times this state was entered, but the
+ observed CPU idle duration was too short for it (a count).
+
+ below: (RO) Number of times this state was entered, but the
+ observed CPU idle duration was too long for it (a count).
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc
Date: February 2008
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg
deleted file mode 100644
index 9bbb1944f962..000000000000
--- a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg
+++ /dev/null
@@ -1,499 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
-
-<!-- CreationDate: Wed Dec 9 17:26:09 2015 -->
-
-<!-- Magnification: 2.000 -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="5.7in"
- height="6.6in"
- viewBox="-44 -44 6838 7888"
- id="svg2"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="BigTreeClassicRCUBH.fig">
- <metadata
- id="metadata110">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs108">
- <marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow1Mend"
- style="overflow:visible;">
- <path
- id="path3868"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
- transform="scale(0.4) rotate(180) translate(10,0)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Mend"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow2Mend"
- style="overflow:visible;">
- <path
- id="path3886"
- style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
- d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
- transform="scale(0.6) rotate(180) translate(0,0)" />
- </marker>
- </defs>
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="878"
- inkscape:window-height="1148"
- id="namedview106"
- showgrid="false"
- inkscape:zoom="1.3547758"
- inkscape:cx="256.5"
- inkscape:cy="297"
- inkscape:window-x="45"
- inkscape:window-y="24"
- inkscape:window-maximized="0"
- inkscape:current-layer="g4" />
- <g
- style="stroke-width:.025in; fill:none"
- id="g4">
- <!-- Line: box -->
- <rect
- x="450"
- y="0"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect6" />
- <!-- Line: box -->
- <rect
- x="4950"
- y="4950"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect8" />
- <!-- Line: box -->
- <rect
- x="750"
- y="600"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect10" />
- <!-- Line: box -->
- <rect
- x="0"
- y="450"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect12" />
- <!-- Line: box -->
- <rect
- x="300"
- y="1050"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect14" />
- <!-- Circle -->
- <circle
- cx="2850"
- cy="3900"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle16" />
- <!-- Circle -->
- <circle
- cx="3150"
- cy="3900"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle18" />
- <!-- Circle -->
- <circle
- cx="3450"
- cy="3900"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle20" />
- <!-- Circle -->
- <circle
- cx="1350"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle22" />
- <!-- Circle -->
- <circle
- cx="1650"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle24" />
- <!-- Circle -->
- <circle
- cx="1950"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle26" />
- <!-- Circle -->
- <circle
- cx="4350"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle28" />
- <!-- Circle -->
- <circle
- cx="4650"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle30" />
- <!-- Circle -->
- <circle
- cx="4950"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle32" />
- <!-- Line -->
- <polyline
- points="1350,3450 2350,2590 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline34" />
- <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
- <!-- Line -->
- <polyline
- points="4950,3450 3948,2590 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline38" />
- <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
- <!-- Line: box -->
- <rect
- x="750"
- y="3450"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect42" />
- <!-- Line -->
- <polyline
- points="2250,5400 2250,4414 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline44" />
- <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
- <!-- Line: box -->
- <rect
- x="1500"
- y="5400"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect48" />
- <!-- Line: box -->
- <rect
- x="300"
- y="6600"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect50" />
- <!-- Line: box -->
- <rect
- x="3750"
- y="3450"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect52" />
- <!-- Line: box -->
- <rect
- x="4500"
- y="5400"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect54" />
- <!-- Line: box -->
- <rect
- x="3300"
- y="6600"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect56" />
- <!-- Line: box -->
- <rect
- x="2250"
- y="1650"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect58" />
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6450"
- y="300"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text60">rcu_bh</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="3150"
- y="1950"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text62">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="3150"
- y="2250"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text64">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1650"
- y="3750"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text66">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1650"
- y="4050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text68">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4650"
- y="4050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text70">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4650"
- y="3750"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text72">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2250"
- y="5700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text74">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2250"
- y="6000"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text76">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="6900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text78">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="7200"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text80">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5250"
- y="5700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text82">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5250"
- y="6000"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text84">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="6900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text86">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="7200"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text88">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="450"
- y="1350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="start"
- id="text90">struct rcu_state</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6000"
- y="750"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text92">rcu_sched</text>
- <!-- Line -->
- <polyline
- points="5250,5400 5250,4414 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline94" />
- <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
- <!-- Line -->
- <polyline
- points="4050,6600 4050,4414 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline98" />
- <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
- <!-- Line -->
- <polyline
- points="1050,6600 1050,4414 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline102" />
- <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
- </g>
-</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg
deleted file mode 100644
index 21ba7823479d..000000000000
--- a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg
+++ /dev/null
@@ -1,695 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
-
-<!-- CreationDate: Wed Dec 9 17:20:02 2015 -->
-
-<!-- Magnification: 2.000 -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="5.7in"
- height="8.6in"
- viewBox="-44 -44 6838 10288"
- id="svg2"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="BigTreeClassicRCUBHdyntick.fig">
- <metadata
- id="metadata166">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs164">
- <marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow1Mend"
- style="overflow:visible;">
- <path
- id="path3924"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
- transform="scale(0.4) rotate(180) translate(10,0)" />
- </marker>
- <marker
- inkscape:stockid="Arrow2Lend"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow2Lend"
- style="overflow:visible;">
- <path
- id="path3936"
- style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
- d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
- transform="scale(1.1) rotate(180) translate(1,0)" />
- </marker>
- </defs>
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="845"
- inkscape:window-height="988"
- id="namedview162"
- showgrid="false"
- inkscape:zoom="1.0452196"
- inkscape:cx="256.5"
- inkscape:cy="387.00003"
- inkscape:window-x="356"
- inkscape:window-y="61"
- inkscape:window-maximized="0"
- inkscape:current-layer="g4" />
- <g
- style="stroke-width:.025in; fill:none"
- id="g4">
- <!-- Line: box -->
- <rect
- x="450"
- y="0"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect6" />
- <!-- Line: box -->
- <rect
- x="4950"
- y="4950"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect8" />
- <!-- Line: box -->
- <rect
- x="750"
- y="600"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect10" />
- <!-- Line -->
- <polyline
- points="5250,8100 5688,5912 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline12" />
- <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
- <polyline
- points="5714 6068 5704 5822 5598 6044 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline14" />
- <!-- Line -->
- <polyline
- points="4050,9300 4486,7262 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline16" />
- <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
- <polyline
- points="4514 7418 4506 7172 4396 7394 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline18" />
- <!-- Line -->
- <polyline
- points="1040,9300 1476,7262 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline20" />
- <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
- <polyline
- points="1504 7418 1496 7172 1386 7394 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline22" />
- <!-- Line -->
- <polyline
- points="2240,8100 2676,6062 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline24" />
- <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
- <polyline
- points="2704 6218 2696 5972 2586 6194 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline26" />
- <!-- Line: box -->
- <rect
- x="0"
- y="450"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect28" />
- <!-- Line: box -->
- <rect
- x="300"
- y="1050"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect30" />
- <!-- Line -->
- <polyline
- points="1350,3450 2350,2590 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline32" />
- <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
- <!-- Line -->
- <polyline
- points="4950,3450 3948,2590 "
- style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline36" />
- <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
- <!-- Line -->
- <polyline
- points="4050,6600 4050,4414 "
- style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline40" />
- <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
- <!-- Line -->
- <polyline
- points="1050,6600 1050,4414 "
- style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline44" />
- <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
- <!-- Line -->
- <polyline
- points="2250,5400 2250,4414 "
- style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline48" />
- <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
- <!-- Line -->
- <polyline
- points="2250,8100 2250,6364 "
- style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline52" />
- <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
- <!-- Line -->
- <polyline
- points="1050,9300 1050,7564 "
- style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline56" />
- <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
- <!-- Line -->
- <polyline
- points="4050,9300 4050,7564 "
- style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline60" />
- <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
- <!-- Line -->
- <polyline
- points="5250,8100 5250,6364 "
- style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline64" />
- <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
- <!-- Circle -->
- <circle
- cx="2850"
- cy="3900"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle68" />
- <!-- Circle -->
- <circle
- cx="3150"
- cy="3900"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle70" />
- <!-- Circle -->
- <circle
- cx="3450"
- cy="3900"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle72" />
- <!-- Circle -->
- <circle
- cx="1350"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle74" />
- <!-- Circle -->
- <circle
- cx="1650"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle76" />
- <!-- Circle -->
- <circle
- cx="1950"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle78" />
- <!-- Circle -->
- <circle
- cx="4350"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle80" />
- <!-- Circle -->
- <circle
- cx="4650"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle82" />
- <!-- Circle -->
- <circle
- cx="4950"
- cy="5100"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle84" />
- <!-- Line: box -->
- <rect
- x="750"
- y="3450"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect86" />
- <!-- Line: box -->
- <rect
- x="300"
- y="6600"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect88" />
- <!-- Line: box -->
- <rect
- x="3750"
- y="3450"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect90" />
- <!-- Line: box -->
- <rect
- x="4500"
- y="5400"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect92" />
- <!-- Line: box -->
- <rect
- x="3300"
- y="6600"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect94" />
- <!-- Line: box -->
- <rect
- x="2250"
- y="1650"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect96" />
- <!-- Line: box -->
- <rect
- x="0"
- y="9300"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect98" />
- <!-- Line: box -->
- <rect
- x="1350"
- y="8100"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect100" />
- <!-- Line: box -->
- <rect
- x="3000"
- y="9300"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect102" />
- <!-- Line: box -->
- <rect
- x="4350"
- y="8100"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect104" />
- <!-- Line: box -->
- <rect
- x="1500"
- y="5400"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect106" />
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6450"
- y="300"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text108">rcu_bh</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="3150"
- y="1950"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text110">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="3150"
- y="2250"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text112">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1650"
- y="3750"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text114">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1650"
- y="4050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text116">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4650"
- y="4050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text118">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4650"
- y="3750"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text120">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2250"
- y="5700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text122">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2250"
- y="6000"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text124">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="6900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text126">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="7200"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text128">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5250"
- y="5700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text130">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5250"
- y="6000"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text132">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="6900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text134">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="7200"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text136">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="450"
- y="1350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="start"
- id="text138">struct rcu_state</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="9600"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text140">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="9900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text142">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="9600"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text144">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="9900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text146">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="8400"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text148">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="8700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text150">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="8400"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text152">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="8700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text154">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6000"
- y="750"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text156">rcu_sched</text>
- <!-- Line -->
- <polyline
- points="5250,5400 5250,4414 "
- style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline158" />
- <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
- </g>
-</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg
deleted file mode 100644
index 15adcac036c7..000000000000
--- a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg
+++ /dev/null
@@ -1,741 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
-
-<!-- CreationDate: Wed Dec 9 17:32:59 2015 -->
-
-<!-- Magnification: 2.000 -->
-
-<svg
- xmlns:dc="http://purl.org/dc/elements/1.1/"
- xmlns:cc="http://creativecommons.org/ns#"
- xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
- xmlns:svg="http://www.w3.org/2000/svg"
- xmlns="http://www.w3.org/2000/svg"
- xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
- xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="6.1in"
- height="8.9in"
- viewBox="-44 -44 7288 10738"
- id="svg2"
- version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="BigTreePreemptRCUBHdyntick.fig">
- <metadata
- id="metadata182">
- <rdf:RDF>
- <cc:Work
- rdf:about="">
- <dc:format>image/svg+xml</dc:format>
- <dc:type
- rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
- <dc:title></dc:title>
- </cc:Work>
- </rdf:RDF>
- </metadata>
- <defs
- id="defs180">
- <marker
- inkscape:stockid="Arrow1Mend"
- orient="auto"
- refY="0.0"
- refX="0.0"
- id="Arrow1Mend"
- style="overflow:visible;">
- <path
- id="path3940"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
- transform="scale(0.4) rotate(180) translate(10,0)" />
- </marker>
- </defs>
- <sodipodi:namedview
- pagecolor="#ffffff"
- bordercolor="#666666"
- borderopacity="1"
- objecttolerance="10"
- gridtolerance="10"
- guidetolerance="10"
- inkscape:pageopacity="0"
- inkscape:pageshadow="2"
- inkscape:window-width="874"
- inkscape:window-height="1148"
- id="namedview178"
- showgrid="false"
- inkscape:zoom="1.2097379"
- inkscape:cx="274.5"
- inkscape:cy="400.49997"
- inkscape:window-x="946"
- inkscape:window-y="24"
- inkscape:window-maximized="0"
- inkscape:current-layer="g4" />
- <g
- style="stroke-width:.025in; fill:none"
- id="g4">
- <!-- Line: box -->
- <rect
- x="900"
- y="0"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect6" />
- <!-- Line: box -->
- <rect
- x="1200"
- y="600"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect8" />
- <!-- Line: box -->
- <rect
- x="5400"
- y="4950"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect10" />
- <!-- Line: box -->
- <rect
- x="450"
- y="450"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect12" />
- <!-- Line: box -->
- <rect
- x="750"
- y="1050"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect14" />
- <!-- Line: box -->
- <rect
- x="4950"
- y="5400"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect16" />
- <!-- Line -->
- <polyline
- points="5250,8550 5688,6362 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline18" />
- <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
- <polyline
- points="5714 6518 5704 6272 5598 6494 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline20" />
- <!-- Line -->
- <polyline
- points="4050,9750 4486,7712 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline22" />
- <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
- <polyline
- points="4514 7868 4506 7622 4396 7844 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline24" />
- <!-- Line -->
- <polyline
- points="1040,9750 1476,7712 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline26" />
- <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
- <polyline
- points="1504 7868 1496 7622 1386 7844 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline28" />
- <!-- Line -->
- <polyline
- points="2240,8550 2676,6512 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline30" />
- <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
- <polyline
- points="2704 6668 2696 6422 2586 6644 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline32" />
- <!-- Line -->
- <polyline
- points="4050,9750 5682,6360 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline34" />
- <!-- Arrowhead on XXXpoint 4050 9750 - 5736 6246-->
- <polyline
- points="5672 6518 5722 6276 5562 6466 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline36" />
- <!-- Line -->
- <polyline
- points="1010,9750 2642,6360 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline38" />
- <!-- Arrowhead on XXXpoint 1010 9750 - 2696 6246-->
- <polyline
- points="2632 6518 2682 6276 2522 6466 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline40" />
- <!-- Line: box -->
- <rect
- x="0"
- y="900"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect42" />
- <!-- Line: box -->
- <rect
- x="300"
- y="1500"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect44" />
- <!-- Line -->
- <polyline
- points="1350,3900 2350,3040 "
- style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline46" />
- <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
- <!-- Line -->
- <polyline
- points="4950,3900 3948,3040 "
- style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline50" />
- <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
- <!-- Line -->
- <polyline
- points="4050,7050 4050,4864 "
- style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline54" />
- <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
- <!-- Line -->
- <polyline
- points="1050,7050 1050,4864 "
- style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline58" />
- <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
- <!-- Line -->
- <polyline
- points="2250,5850 2250,4864 "
- style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline62" />
- <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
- <!-- Line -->
- <polyline
- points="2250,8550 2250,6814 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline66" />
- <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
- <!-- Line -->
- <polyline
- points="1050,9750 1050,8014 "
- style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline70" />
- <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
- <!-- Line -->
- <polyline
- points="4050,9750 4050,8014 "
- style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline74" />
- <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
- <!-- Line -->
- <polyline
- points="5250,8550 5250,6814 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline78" />
- <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
- <!-- Circle -->
- <circle
- cx="2850"
- cy="4350"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle82" />
- <!-- Circle -->
- <circle
- cx="3150"
- cy="4350"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle84" />
- <!-- Circle -->
- <circle
- cx="3450"
- cy="4350"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle86" />
- <!-- Circle -->
- <circle
- cx="1350"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle88" />
- <!-- Circle -->
- <circle
- cx="1650"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle90" />
- <!-- Circle -->
- <circle
- cx="1950"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle92" />
- <!-- Circle -->
- <circle
- cx="4350"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle94" />
- <!-- Circle -->
- <circle
- cx="4650"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle96" />
- <!-- Circle -->
- <circle
- cx="4950"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle98" />
- <!-- Line: box -->
- <rect
- x="750"
- y="3900"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect100" />
- <!-- Line: box -->
- <rect
- x="300"
- y="7050"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect102" />
- <!-- Line: box -->
- <rect
- x="3750"
- y="3900"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect104" />
- <!-- Line: box -->
- <rect
- x="4500"
- y="5850"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect106" />
- <!-- Line: box -->
- <rect
- x="3300"
- y="7050"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect108" />
- <!-- Line: box -->
- <rect
- x="2250"
- y="2100"
- width="1800"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
- id="rect110" />
- <!-- Line: box -->
- <rect
- x="0"
- y="9750"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect112" />
- <!-- Line: box -->
- <rect
- x="1350"
- y="8550"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect114" />
- <!-- Line: box -->
- <rect
- x="3000"
- y="9750"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect116" />
- <!-- Line: box -->
- <rect
- x="4350"
- y="8550"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect118" />
- <!-- Line: box -->
- <rect
- x="1500"
- y="5850"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect120" />
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6450"
- y="750"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text122">rcu_bh</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="3150"
- y="2400"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text124">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="3150"
- y="2700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text126">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1650"
- y="4200"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text128">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1650"
- y="4500"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text130">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4650"
- y="4500"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text132">rcu_node</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4650"
- y="4200"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text134">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2250"
- y="6150"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text136">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2250"
- y="6450"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text138">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="7350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text140">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="7650"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text142">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5250"
- y="6150"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text144">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5250"
- y="6450"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text146">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="7350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text148">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="7650"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text150">rcu_data</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="450"
- y="1800"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="start"
- id="text152">struct rcu_state</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="10050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text154">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="10350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text156">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="10050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text158">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="10350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text160">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="8850"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text162">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="9150"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text164">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="8850"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text166">struct</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="9150"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text168">rcu_dynticks</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6900"
- y="300"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text170">rcu_preempt</text>
- <!-- Text -->
- <text
- xml:space="preserve"
- x="6000"
- y="1200"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text172">rcu_sched</text>
- <!-- Line -->
- <polyline
- points="5250,5850 5250,4864 "
- style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline174" />
- <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
- </g>
-</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
index bbc3801470d0..3a1a4f85dc3a 100644
--- a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
+++ b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
@@ -13,12 +13,12 @@
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
- width="7.4in"
- height="9.9in"
- viewBox="-44 -44 8938 11938"
+ width="7.4000001in"
+ height="7.9000001in"
+ viewBox="-44 -44 8938 9526.283"
id="svg2"
version="1.1"
- inkscape:version="0.48.4 r9939"
+ inkscape:version="0.92.2pre0 (973e216, 2017-07-25)"
sodipodi:docname="BigTreePreemptRCUBHdyntickCB.svg">
<metadata
id="metadata212">
@@ -37,15 +37,46 @@
<marker
inkscape:stockid="Arrow1Mend"
orient="auto"
- refY="0.0"
- refX="0.0"
+ refY="0"
+ refX="0"
+ id="marker1177"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path897"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Lend"
+ orient="auto"
+ refY="0"
+ refX="0"
+ id="Arrow1Lend"
+ style="overflow:visible"
+ inkscape:isstock="true">
+ <path
+ id="path891"
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt;stroke-opacity:1"
+ transform="matrix(-0.8,0,0,-0.8,-10,0)"
+ inkscape:connector-curvature="0" />
+ </marker>
+ <marker
+ inkscape:stockid="Arrow1Mend"
+ orient="auto"
+ refY="0"
+ refX="0"
id="Arrow1Mend"
- style="overflow:visible;">
+ style="overflow:visible">
<path
id="path3970"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
- transform="scale(0.4) rotate(180) translate(10,0)" />
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
</marker>
</defs>
<sodipodi:namedview
@@ -57,802 +88,575 @@
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
- inkscape:window-width="881"
- inkscape:window-height="1128"
+ inkscape:window-width="1920"
+ inkscape:window-height="1019"
id="namedview208"
showgrid="false"
inkscape:zoom="1.0195195"
- inkscape:cx="333"
- inkscape:cy="445.49997"
- inkscape:window-x="936"
- inkscape:window-y="24"
- inkscape:window-maximized="0"
+ inkscape:cx="166.25478"
+ inkscape:cy="362.18693"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
inkscape:current-layer="g4" />
<g
- style="stroke-width:.025in; fill:none"
- id="g4">
+ style="fill:none;stroke-width:0.025in"
+ id="g4"
+ transform="translate(0,-2415.6743)">
<!-- Line: box -->
- <rect
- x="900"
- y="0"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect6" />
<!-- Line: box -->
- <rect
- x="1200"
- y="600"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect8" />
<!-- Line: box -->
- <rect
- x="5400"
- y="4950"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect10" />
<!-- Line: box -->
- <rect
- x="450"
- y="450"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect12" />
<!-- Line: box -->
- <rect
- x="750"
- y="1050"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect14" />
<!-- Line: box -->
- <rect
- x="4950"
- y="5400"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect16" />
<!-- Line -->
- <polyline
- points="5250,8550 5688,6362 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline18" />
<!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
<polyline
points="5714 6518 5704 6272 5598 6494 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline20" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline20"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Line -->
- <polyline
- points="4050,9750 4486,7712 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline22" />
<!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
<polyline
points="4514 7868 4506 7622 4396 7844 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline24" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline24"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Line -->
- <polyline
- points="1040,9750 1476,7712 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline26" />
<!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
<polyline
points="1504 7868 1496 7622 1386 7844 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline28" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline28"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Line -->
- <polyline
- points="2240,8550 2676,6512 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline30" />
<!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
<polyline
points="2704 6668 2696 6422 2586 6644 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline32" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline32"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Line -->
- <polyline
- points="4050,9600 5692,6062 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline34" />
<!-- Arrowhead on XXXpoint 4050 9600 - 5744 5948-->
<polyline
points="5682 6220 5730 5978 5574 6170 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline36" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline36"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Line -->
- <polyline
- points="1086,9600 2728,6062 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline38" />
<!-- Arrowhead on XXXpoint 1086 9600 - 2780 5948-->
<polyline
points="2718 6220 2766 5978 2610 6170 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline40" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline40"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Line: box -->
<rect
- x="0"
- y="900"
+ x="12.340758"
+ y="2442.5947"
width="6300"
- height="7350"
+ height="7045.3135"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+ style="fill:#ffffff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect42" />
<!-- Line: box -->
<rect
- x="300"
- y="1500"
+ x="312.34076"
+ y="3017.7224"
width="5700"
- height="3750"
+ height="3594.5476"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+ style="fill:#ffff00;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect44" />
<!-- Line -->
<polyline
points="1350,3900 2350,3040 "
- style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline46" />
+ style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline46"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
<!-- Line -->
<polyline
points="4950,3900 3948,3040 "
- style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline50" />
+ style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline50"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
<!-- Line -->
<polyline
points="4050,7050 4050,4864 "
- style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline54" />
+ style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline54"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
<!-- Line -->
<polyline
points="1050,7050 1050,4864 "
- style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline58" />
+ style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline58"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
<!-- Line -->
<polyline
points="2250,5850 2250,4864 "
- style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline62" />
+ style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline62"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
<!-- Line -->
- <polyline
- points="2250,8550 2250,6814 "
- style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline66" />
<!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
<!-- Line -->
- <polyline
- points="1050,9750 1050,8014 "
- style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline70" />
<!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
<!-- Line -->
- <polyline
- points="4050,9750 4050,8014 "
- style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline74" />
<!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
<!-- Line -->
- <polyline
- points="5250,8550 5250,6814 "
- style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline78" />
<!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
<!-- Line -->
- <polyline
- points="6000,6300 8048,7910 "
- style="stroke:#87cfff;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline82" />
<!-- Arrowhead on XXXpoint 6000 6300 - 8146 7986-->
<!-- Circle -->
- <circle
- cx="2850"
- cy="4350"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle86" />
+ <ellipse
+ cx="2862.3408"
+ cy="5749.5786"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle86"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="3150"
- cy="4350"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle88" />
+ <ellipse
+ cx="3162.3408"
+ cy="5749.5786"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle88"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="3450"
- cy="4350"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle90" />
+ <ellipse
+ cx="3462.3408"
+ cy="5749.5786"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle90"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="1350"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle92" />
+ <ellipse
+ cx="1362.3407"
+ cy="6899.834"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle92"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="1650"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle94" />
+ <ellipse
+ cx="1662.3407"
+ cy="6899.834"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle94"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="1950"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle96" />
+ <ellipse
+ cx="1962.3407"
+ cy="6899.834"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle96"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="4350"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle98" />
+ <ellipse
+ cx="4362.3408"
+ cy="6899.834"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle98"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="4650"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle100" />
+ <ellipse
+ cx="4662.3408"
+ cy="6899.834"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle100"
+ rx="76"
+ ry="72.849495" />
<!-- Circle -->
- <circle
- cx="4950"
- cy="5550"
- r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
- id="circle102" />
+ <ellipse
+ cx="4962.3408"
+ cy="6899.834"
+ style="fill:#000000;stroke:#000000;stroke-width:13.70675087"
+ id="circle102"
+ rx="76"
+ ry="72.849495" />
<!-- Line: box -->
<rect
- x="7350"
- y="7950"
+ x="6745.3027"
+ y="8146.0654"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+ style="stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect104" />
<!-- Line: box -->
<rect
- x="7350"
- y="9450"
+ x="6745.3027"
+ y="9583.8857"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+ style="stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect106" />
<!-- Line -->
<polyline
points="8100,8850 8100,9384 "
- style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline108" />
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend)"
+ id="polyline108"
+ transform="matrix(1,0,0,0.95854605,-604.69715,525.62477)" />
<!-- Arrowhead on XXXpoint 8100 8850 - 8100 9510-->
<!-- Line: box -->
<rect
- x="7350"
- y="10950"
+ x="6745.3027"
+ y="11021.704"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+ style="stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect112" />
<!-- Line -->
<polyline
points="8100,10350 8100,10884 "
- style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
- id="polyline114" />
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter;marker-end:url(#Arrow1Mend)"
+ id="polyline114"
+ transform="matrix(1,0,0,0.95854605,-604.69715,525.62477)" />
<!-- Arrowhead on XXXpoint 8100 10350 - 8100 11010-->
<!-- Line: box -->
<rect
- x="750"
- y="3900"
+ x="762.34076"
+ y="5318.2324"
width="1800"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect118" />
<!-- Line: box -->
<rect
- x="300"
- y="7050"
+ x="312.34076"
+ y="8337.6533"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect120" />
<!-- Line: box -->
<rect
- x="3750"
- y="3900"
+ x="3762.3408"
+ y="5318.2324"
width="1800"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect122" />
<!-- Line: box -->
<rect
- x="4500"
- y="5850"
+ x="4512.3408"
+ y="7187.3975"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect124" />
<!-- Line: box -->
<rect
- x="3300"
- y="7050"
+ x="3312.3408"
+ y="8337.6533"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect126" />
<!-- Line: box -->
<rect
- x="2250"
- y="2100"
+ x="2262.3408"
+ y="3592.8503"
width="1800"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect128" />
<!-- Line: box -->
- <rect
- x="0"
- y="9750"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect130" />
<!-- Line: box -->
- <rect
- x="1350"
- y="8550"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect132" />
<!-- Line: box -->
- <rect
- x="3000"
- y="9750"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect134" />
<!-- Line: box -->
- <rect
- x="4350"
- y="8550"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect136" />
<!-- Line: box -->
<rect
- x="1500"
- y="5850"
+ x="1512.3407"
+ y="7187.3975"
width="1500"
- height="900"
+ height="862.69141"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:29.37160873;stroke-linecap:butt;stroke-linejoin:miter"
id="rect138" />
<!-- Text -->
<text
xml:space="preserve"
- x="8100"
- y="8250"
- fill="#000000"
- font-family="Courier"
+ x="7338.3037"
+ y="8614.0625"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text140">struct</text>
+ id="text140"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="8100"
- y="8550"
- fill="#000000"
- font-family="Courier"
+ x="7338.3037"
+ y="8907.7783"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text142">rcu_head</text>
+ id="text142"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_head</text>
<!-- Text -->
<text
xml:space="preserve"
- x="8100"
- y="9750"
- fill="#000000"
- font-family="Courier"
+ x="7338.3037"
+ y="10082.644"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text144">struct</text>
+ id="text144"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="8100"
- y="10050"
- fill="#000000"
- font-family="Courier"
+ x="7338.3037"
+ y="10376.36"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text146">rcu_head</text>
+ id="text146"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_head</text>
<!-- Text -->
<text
xml:space="preserve"
- x="8100"
- y="11250"
- fill="#000000"
- font-family="Courier"
+ x="7338.3037"
+ y="11551.224"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text148">struct</text>
+ id="text148"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="8100"
- y="11550"
- fill="#000000"
- font-family="Courier"
+ x="7338.3037"
+ y="11844.94"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text150">rcu_head</text>
+ id="text150"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_head</text>
<!-- Text -->
<text
xml:space="preserve"
- x="6000"
- y="1200"
- fill="#000000"
- font-family="Helvetica"
+ x="5886.4043"
+ y="2788.5688"
font-style="normal"
font-weight="normal"
font-size="192"
- text-anchor="end"
- id="text152">rcu_sched</text>
+ id="text152"
+ style="font-style:normal;font-weight:normal;font-size:187.978302px;font-family:Helvetica;text-anchor:end;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_state</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="6450"
- y="750"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text154">rcu_bh</text>
<!-- Text -->
<text
xml:space="preserve"
- x="3150"
- y="2400"
- fill="#000000"
- font-family="Courier"
+ x="3096.1016"
+ y="3963.4336"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text156">struct</text>
+ id="text156"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="3150"
- y="2700"
- fill="#000000"
- font-family="Courier"
+ x="3096.1016"
+ y="4257.1494"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text158">rcu_node</text>
+ id="text158"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1650"
- y="4200"
- fill="#000000"
- font-family="Courier"
+ x="1627.5209"
+ y="5725.7305"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text160">struct</text>
+ id="text160"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1650"
- y="4500"
- fill="#000000"
- font-family="Courier"
+ x="1627.5209"
+ y="6019.4463"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text162">rcu_node</text>
+ id="text162"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4650"
- y="4500"
- fill="#000000"
- font-family="Courier"
+ x="4564.6821"
+ y="6019.4463"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text164">rcu_node</text>
+ id="text164"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4650"
- y="4200"
- fill="#000000"
- font-family="Courier"
+ x="4564.6821"
+ y="5725.7305"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text166">struct</text>
+ id="text166"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="2250"
- y="6150"
- fill="#000000"
- font-family="Courier"
+ x="2214.9531"
+ y="7634.8848"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text168">struct</text>
+ id="text168"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="2250"
- y="6450"
- fill="#000000"
- font-family="Courier"
+ x="2214.9531"
+ y="7928.6011"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text170">rcu_data</text>
+ id="text170"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1050"
- y="7350"
- fill="#000000"
- font-family="Courier"
+ x="1040.0886"
+ y="8809.749"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text172">struct</text>
+ id="text172"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1050"
- y="7650"
- fill="#000000"
- font-family="Courier"
+ x="1040.0886"
+ y="9103.4648"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text174">rcu_data</text>
+ id="text174"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="5250"
- y="6150"
- fill="#000000"
- font-family="Courier"
+ x="5152.1138"
+ y="7634.8848"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text176">struct</text>
+ id="text176"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="5250"
- y="6450"
- fill="#000000"
- font-family="Courier"
+ x="5152.1138"
+ y="7928.6011"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text178">rcu_data</text>
+ id="text178"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4050"
- y="7350"
- fill="#000000"
- font-family="Courier"
+ x="3977.2495"
+ y="8809.749"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text180">struct</text>
+ id="text180"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4050"
- y="7650"
- fill="#000000"
- font-family="Courier"
+ x="3977.2495"
+ y="9103.4648"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text182">rcu_data</text>
+ id="text182"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:middle;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="450"
- y="1800"
- fill="#000000"
- font-family="Courier"
+ x="452.6564"
+ y="3376.0012"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="start"
- id="text184">struct rcu_state</text>
+ id="text184"
+ style="font-style:normal;font-weight:bold;font-size:187.978302px;font-family:Courier;text-anchor:start;fill:#000000;stroke-width:0.02447634in"
+ transform="scale(1.0213945,0.97905363)">struct rcu_state</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="10050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text186">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="10350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text188">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="10050"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text190">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="10350"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text192">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="8850"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text194">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="9150"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text196">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="8850"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text198">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="9150"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text200">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="6900"
- y="300"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text202">rcu_preempt</text>
<!-- Line -->
<polyline
points="5250,5850 5250,4864 "
- style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline204" />
+ style="stroke:#00d1d1;stroke-width:29.99464035;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline204"
+ transform="matrix(1,0,0,0.95854605,12.340758,1579.9033)" />
<!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+ <path
+ style="fill:none;stroke:#000000;stroke-width:34.24744034;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#marker1177)"
+ d="m 6000.1472,7564.2558 c 1498.5508,0 1498.5508,0 1498.5508,0 v 520.0252"
+ id="path886"
+ inkscape:connector-curvature="0" />
</g>
</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
index 1d2051c0c3fc..18f179807563 100644
--- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
+++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
@@ -23,8 +23,6 @@ to each other.
The <tt>rcu_segcblist</tt> Structure</a>
<li> <a href="#The rcu_data Structure">
The <tt>rcu_data</tt> Structure</a>
-<li> <a href="#The rcu_dynticks Structure">
- The <tt>rcu_dynticks</tt> Structure</a>
<li> <a href="#The rcu_head Structure">
The <tt>rcu_head</tt> Structure</a>
<li> <a href="#RCU-Specific Fields in the task_struct Structure">
@@ -127,9 +125,11 @@ CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
32-bit systems.
-On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be
-as small as 2 if you wish, which would permit only 16 CPUs, which
-is useful for testing.
+On the other hand, you can set both <tt>CONFIG_RCU_FANOUT</tt> and
+<tt>CONFIG_RCU_FANOUT_LEAF</tt> to be as small as 2, which would result
+in a 16-CPU test using a 4-level tree.
+This can be useful for testing large-system capabilities on small test
+machines.
</p><p>This multi-level combining tree allows us to get most of the
performance and scalability
@@ -154,44 +154,9 @@ on that root <tt>rcu_node</tt> structure remains acceptably low.
keeping lock contention under control at all tree levels regardless
of the level of loading on the system.
-</p><p>The Linux kernel actually supports multiple flavors of RCU
-running concurrently, so RCU builds separate data structures for each
-flavor.
-For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides
-rcu_sched and rcu_bh, as shown below:
-
-</p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%">
-
-</p><p>Energy efficiency is increasingly important, and for that
-reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which
-turns off the scheduling-clock interrupts on idle CPUs, which in
-turn allows those CPUs to attain deeper sleep states and to consume
-less energy.
-CPUs whose scheduling-clock interrupts have been turned off are
-said to be in <i>dyntick-idle mode</i>.
-RCU must handle dyntick-idle CPUs specially
-because RCU would otherwise wake up each CPU on every grace period,
-which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>.
-RCU uses the <tt>rcu_dynticks</tt> structure to track
-which CPUs are in dyntick idle mode, as shown below:
-
-</p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%">
-
-</p><p>However, if a CPU is in dyntick-idle mode, it is in that mode
-for all flavors of RCU.
-Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per
-CPU, and all of a given CPU's <tt>rcu_data</tt> structures share
-that <tt>rcu_dynticks</tt>, as shown in the figure.
-
-</p><p>Kernels built with <tt>CONFIG_PREEMPT_RCU</tt> support
-rcu_preempt in addition to rcu_sched and rcu_bh, as shown below:
-
-</p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%">
-
</p><p>RCU updaters wait for normal grace periods by registering
RCU callbacks, either directly via <tt>call_rcu()</tt> and
friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
-there being a separate interface per flavor of RCU)
or indirectly via <tt>synchronize_rcu()</tt> and friends.
RCU callbacks are represented by <tt>rcu_head</tt> structures,
which are queued on <tt>rcu_data</tt> structures while they are
@@ -214,9 +179,6 @@ its own synchronization:
<li> Each <tt>rcu_node</tt> structure has a spinlock.
<li> The fields in <tt>rcu_data</tt> are private to the corresponding
CPU, although a few can be read and written by other CPUs.
-<li> Similarly, the fields in <tt>rcu_dynticks</tt> are private
- to the corresponding CPU, although a few can be read by
- other CPUs.
</ol>
<p>It is important to note that different data structures can have
@@ -272,11 +234,6 @@ follows:
access to this information from the corresponding CPU.
Finally, this structure records past dyntick-idle state
for the corresponding CPU and also tracks statistics.
-<li> <tt>rcu_dynticks</tt>:
- This per-CPU structure tracks the current dyntick-idle
- state for the corresponding CPU.
- Unlike the other three structures, the <tt>rcu_dynticks</tt>
- structure is not replicated per RCU flavor.
<li> <tt>rcu_head</tt>:
This structure represents RCU callbacks, and is the
only structure allocated and managed by RCU users.
@@ -287,14 +244,14 @@ follows:
<p>If all you wanted from this article was a general notion of how
RCU's data structures are related, you are done.
Otherwise, each of the following sections give more details on
-the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>,
-and <tt>rcu_dynticks</tt> data structures.
+the <tt>rcu_state</tt>, <tt>rcu_node</tt> and <tt>rcu_data</tt> data
+structures.
<h3><a name="The rcu_state Structure">
The <tt>rcu_state</tt> Structure</a></h3>
<p>The <tt>rcu_state</tt> structure is the base structure that
-represents a flavor of RCU.
+represents the state of RCU in the system.
This structure forms the interconnection between the
<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
tracks grace periods, contains the lock used to
@@ -389,7 +346,7 @@ sequence number.
The bottom two bits are the state of the current grace period,
which can be zero for not yet started or one for in progress.
In other words, if the bottom two bits of <tt>-&gt;gp_seq</tt> are
-zero, the corresponding flavor of RCU is idle.
+zero, then RCU is idle.
Any other value in the bottom two bits indicates that something is broken.
This field is protected by the root <tt>rcu_node</tt> structure's
<tt>-&gt;lock</tt> field.
@@ -419,10 +376,10 @@ as follows:
grace period in jiffies.
It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
-<p>The <tt>-&gt;name</tt> field points to the name of the RCU flavor
-(for example, &ldquo;rcu_sched&rdquo;), and is constant.
-The <tt>-&gt;abbr</tt> field contains a one-character abbreviation,
-for example, &ldquo;s&rdquo; for RCU-sched.
+<p>The <tt>-&gt;name</tt> and <tt>-&gt;abbr</tt> fields distinguish
+between preemptible RCU (&ldquo;rcu_preempt&rdquo; and &ldquo;p&rdquo;)
+and non-preemptible RCU (&ldquo;rcu_sched&rdquo; and &ldquo;s&rdquo;).
+These fields are used for diagnostic and tracing purposes.
<h3><a name="The rcu_node Structure">
The <tt>rcu_node</tt> Structure</a></h3>
@@ -971,25 +928,31 @@ this <tt>rcu_segcblist</tt> structure, <i>not</i> the <tt>-&gt;head</tt>
pointer.
The reason for this is that all the ready-to-invoke callbacks
(that is, those in the <tt>RCU_DONE_TAIL</tt> segment) are extracted
-all at once at callback-invocation time.
+all at once at callback-invocation time (<tt>rcu_do_batch</tt>), due
+to which <tt>-&gt;head</tt> may be set to NULL if there are no not-done
+callbacks remaining in the <tt>rcu_segcblist</tt>.
If callback invocation must be postponed, for example, because a
high-priority process just woke up on this CPU, then the remaining
-callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment.
-Either way, the <tt>-&gt;len</tt> and <tt>-&gt;len_lazy</tt> counts
-are adjusted after the corresponding callbacks have been invoked, and so
-again it is the <tt>-&gt;len</tt> count that accurately reflects whether
-or not there are callbacks associated with this <tt>rcu_segcblist</tt>
-structure.
+callbacks are placed back on the <tt>RCU_DONE_TAIL</tt> segment and
+<tt>-&gt;head</tt> once again points to the start of the segment.
+In short, the head field can briefly be <tt>NULL</tt> even though the
+CPU has callbacks present the entire time.
+Therefore, it is not appropriate to test the <tt>-&gt;head</tt> pointer
+for <tt>NULL</tt>.
+
+<p>In contrast, the <tt>-&gt;len</tt> and <tt>-&gt;len_lazy</tt> counts
+are adjusted only after the corresponding callbacks have been invoked.
+This means that the <tt>-&gt;len</tt> count is zero only if
+the <tt>rcu_segcblist</tt> structure really is devoid of callbacks.
Of course, off-CPU sampling of the <tt>-&gt;len</tt> count requires
-the use of appropriate synchronization, for example, memory barriers.
+careful use of appropriate synchronization, for example, memory barriers.
This synchronization can be a bit subtle, particularly in the case
of <tt>rcu_barrier()</tt>.
<h3><a name="The rcu_data Structure">
The <tt>rcu_data</tt> Structure</a></h3>
-<p>The <tt>rcu_data</tt> maintains the per-CPU state for the
-corresponding flavor of RCU.
+<p>The <tt>rcu_data</tt> maintains the per-CPU state for the RCU subsystem.
The fields in this structure may be accessed only from the corresponding
CPU (and from tracing) unless otherwise stated.
This structure is the
@@ -1015,30 +978,19 @@ as follows:
<pre>
1 int cpu;
- 2 struct rcu_state *rsp;
- 3 struct rcu_node *mynode;
- 4 struct rcu_dynticks *dynticks;
- 5 unsigned long grpmask;
- 6 bool beenonline;
+ 2 struct rcu_node *mynode;
+ 3 unsigned long grpmask;
+ 4 bool beenonline;
</pre>
<p>The <tt>-&gt;cpu</tt> field contains the number of the
-corresponding CPU, the <tt>-&gt;rsp</tt> pointer references
-the corresponding <tt>rcu_state</tt> structure (and is most frequently
-used to locate the name of the corresponding flavor of RCU for tracing),
-and the <tt>-&gt;mynode</tt> field references the corresponding
-<tt>rcu_node</tt> structure.
+corresponding CPU and the <tt>-&gt;mynode</tt> field references the
+corresponding <tt>rcu_node</tt> structure.
The <tt>-&gt;mynode</tt> is used to propagate quiescent states
up the combining tree.
-<p>The <tt>-&gt;dynticks</tt> pointer references the
-<tt>rcu_dynticks</tt> structure corresponding to this
-CPU.
-Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt>
-structure is shared among all flavors of RCU.
-These first four fields are constant and therefore require not
-synchronization.
+These two fields are constant and therefore do not require synchronization.
-</p><p>The <tt>-&gt;grpmask</tt> field indicates the bit in
+<p>The <tt>-&gt;grpmask</tt> field indicates the bit in
the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
<tt>rcu_data</tt> structure, and is also used when propagating
quiescent states.
@@ -1057,12 +1009,12 @@ as follows:
3 bool cpu_no_qs;
4 bool core_needs_qs;
5 bool gpwrap;
- 6 unsigned long rcu_qs_ctr_snap;
</pre>
-<p>The <tt>-&gt;gp_seq</tt> and <tt>-&gt;gp_seq_needed</tt>
-fields are the counterparts of the fields of the same name
-in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
+<p>The <tt>-&gt;gp_seq</tt> field is the counterpart of the field of the same
+name in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures. The
+<tt>-&gt;gp_seq_needed</tt> field is the counterpart of the field of the same
+name in the rcu_node</tt> structure.
They may each lag up to one behind their <tt>rcu_node</tt>
counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
@@ -1103,10 +1055,6 @@ CPU has remained idle for so long that the
<tt>gp_seq</tt> counter is in danger of overflow, which
will cause the CPU to disregard the values of its counters on
its next exit from idle.
-Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
-cases where a given operation has resulted in a quiescent state
-for all flavors of RCU, for example, <tt>cond_resched()</tt>
-when RCU has indicated a need for quiescent states.
<h5>RCU Callback Handling</h5>
@@ -1179,26 +1127,22 @@ Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
count the number of times this CPU is determined to be in
dyntick-idle state, and is used for tracing and debugging purposes.
-<h3><a name="The rcu_dynticks Structure">
-The <tt>rcu_dynticks</tt> Structure</a></h3>
-
-<p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state
-for the corresponding CPU.
-Unlike the other structures, <tt>rcu_dynticks</tt> is not
-replicated over the different flavors of RCU.
-The fields in this structure may be accessed only from the corresponding
-CPU (and from tracing) unless otherwise stated.
-Its fields are as follows:
+<p>
+This portion of the rcu_data structure is declared as follows:
<pre>
1 long dynticks_nesting;
2 long dynticks_nmi_nesting;
3 atomic_t dynticks;
4 bool rcu_need_heavy_qs;
- 5 unsigned long rcu_qs_ctr;
- 6 bool rcu_urgent_qs;
+ 5 bool rcu_urgent_qs;
</pre>
+<p>These fields in the rcu_data structure maintain the per-CPU dyntick-idle
+state for the corresponding CPU.
+The fields may be accessed only from the corresponding CPU (and from tracing)
+unless otherwise stated.
+
<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
nesting depth of process execution, so that in normal circumstances
this counter has value zero or one.
@@ -1240,19 +1184,12 @@ it is willing to call for heavy-weight dyntick-counter operations.
This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
code, which provide a momentary idle sojourn in response.
-</p><p>The <tt>-&gt;rcu_qs_ctr</tt> field is used to record
-quiescent states from <tt>cond_resched()</tt>.
-Because <tt>cond_resched()</tt> can execute quite frequently, this
-must be quite lightweight, as in a non-atomic increment of this
-per-CPU field.
-
</p><p>Finally, the <tt>-&gt;rcu_urgent_qs</tt> field is used to record
-the fact that the RCU core code would really like to see a quiescent
-state from the corresponding CPU, with the various other fields indicating
-just how badly RCU wants this quiescent state.
-This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
-code, which, if nothing else, non-atomically increment <tt>-&gt;rcu_qs_ctr</tt>
-in response.
+the fact that the RCU core code would really like to see a quiescent state from
+the corresponding CPU, with the various other fields indicating just how badly
+RCU wants this quiescent state.
+This flag is checked by RCU's context-switch path
+(<tt>rcu_note_context_switch</tt>) and the cond_resched code.
<table>
<tr><th>&nbsp;</th></tr>
@@ -1425,11 +1362,11 @@ the last part of the array, thus traversing only the leaf
<h3><a name="Summary">
Summary</a></h3>
-So each flavor of RCU is represented by an <tt>rcu_state</tt> structure,
+So the state of RCU is represented by an <tt>rcu_state</tt> structure,
which contains a combining tree of <tt>rcu_node</tt> and
<tt>rcu_data</tt> structures.
Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
-state is tracked by an <tt>rcu_dynticks</tt> structure.
+state is tracked by dynticks-related fields in the <tt>rcu_data</tt> structure.
If you made it this far, you are well prepared to read the code
walkthroughs in the other articles in this series.
diff --git a/Documentation/RCU/Design/Data-Structures/blkd_task.svg b/Documentation/RCU/Design/Data-Structures/blkd_task.svg
index 00e810bb8419..bed13e9ecab8 100644
--- a/Documentation/RCU/Design/Data-Structures/blkd_task.svg
+++ b/Documentation/RCU/Design/Data-Structures/blkd_task.svg
@@ -14,12 +14,12 @@
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="10.1in"
- height="8.6in"
- viewBox="-44 -44 12088 10288"
+ height="6.5999999in"
+ viewBox="-44 -44 12088 7895.4414"
id="svg2"
version="1.1"
- inkscape:version="0.48.4 r9939"
- sodipodi:docname="blkd_task.fig">
+ inkscape:version="0.92.2pre0 (973e216, 2017-07-25)"
+ sodipodi:docname="blkd_task.svg">
<metadata
id="metadata212">
<rdf:RDF>
@@ -37,15 +37,16 @@
<marker
inkscape:stockid="Arrow1Mend"
orient="auto"
- refY="0.0"
- refX="0.0"
+ refY="0"
+ refX="0"
id="Arrow1Mend"
- style="overflow:visible;">
+ style="overflow:visible">
<path
id="path3970"
- d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
- style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
- transform="scale(0.4) rotate(180) translate(10,0)" />
+ d="M 0,0 5,-5 -12.5,0 5,5 Z"
+ style="fill-rule:evenodd;stroke:#000000;stroke-width:1.00000003pt"
+ transform="matrix(-0.4,0,0,-0.4,-4,0)"
+ inkscape:connector-curvature="0" />
</marker>
</defs>
<sodipodi:namedview
@@ -57,787 +58,574 @@
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
- inkscape:window-width="1087"
- inkscape:window-height="1144"
+ inkscape:window-width="1920"
+ inkscape:window-height="1019"
id="namedview208"
showgrid="false"
inkscape:zoom="1.0495049"
- inkscape:cx="454.50003"
- inkscape:cy="387.00003"
- inkscape:window-x="833"
- inkscape:window-y="28"
- inkscape:window-maximized="0"
- inkscape:current-layer="g4" />
+ inkscape:cx="456.40569"
+ inkscape:cy="348.88682"
+ inkscape:window-x="0"
+ inkscape:window-y="0"
+ inkscape:window-maximized="1"
+ inkscape:current-layer="g4"
+ showguides="false" />
<g
- style="stroke-width:.025in; fill:none"
- id="g4">
+ style="fill:none;stroke-width:0.025in"
+ id="g4"
+ transform="translate(0,-2393.6637)">
<!-- Line: box -->
- <rect
- x="450"
- y="0"
- width="6300"
- height="7350"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
- id="rect6" />
<!-- Line: box -->
- <rect
- x="4950"
- y="4950"
- width="1500"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
- id="rect8" />
<!-- Line: box -->
- <rect
- x="750"
- y="600"
- width="5700"
- height="3750"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
- id="rect10" />
<!-- Line -->
- <polyline
- points="5250,8100 5688,5912 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline12" />
<!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
<polyline
points="5714 6068 5704 5822 5598 6044 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline14" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline14"
+ transform="translate(23.757862,2185.7233)" />
<!-- Line -->
- <polyline
- points="4050,9300 4486,7262 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline16" />
<!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
<polyline
points="4514 7418 4506 7172 4396 7394 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline18" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline18"
+ transform="translate(23.757862,2185.7233)" />
<!-- Line -->
- <polyline
- points="1040,9300 1476,7262 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline20" />
<!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
<polyline
points="1504 7418 1496 7172 1386 7394 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline22" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline22"
+ transform="translate(23.757862,2185.7233)" />
<!-- Line -->
- <polyline
- points="2240,8100 2676,6062 "
- style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
- id="polyline24" />
<!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
<polyline
points="2704 6218 2696 5972 2586 6194 "
- style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
- id="polyline26" />
+ style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8"
+ id="polyline26"
+ transform="translate(23.757862,2185.7233)" />
<!-- Line: box -->
<rect
- x="0"
- y="450"
+ x="23.757858"
+ y="2635.7231"
width="6300"
height="7350"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+ style="fill:#ffffff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect28" />
<!-- Line: box -->
<rect
- x="300"
- y="1050"
+ x="323.75787"
+ y="3235.7231"
width="5700"
height="3750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+ style="fill:#ffff00;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect30" />
<!-- Line -->
<polyline
points="1350,3450 2350,2590 "
- style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline32" />
+ style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline32"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
<!-- Line -->
<polyline
points="4950,3450 3948,2590 "
- style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline36" />
+ style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline36"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
<!-- Line -->
<polyline
points="4050,6600 4050,4414 "
- style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline40" />
+ style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline40"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
<!-- Line -->
<polyline
points="1050,6600 1050,4414 "
- style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline44" />
+ style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline44"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
<!-- Line -->
<polyline
points="2250,5400 2250,4414 "
- style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline48" />
+ style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline48"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
<!-- Line -->
- <polyline
- points="2250,8100 2250,6364 "
- style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline52" />
<!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
<!-- Line -->
- <polyline
- points="1050,9300 1050,7564 "
- style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline56" />
<!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
<!-- Line -->
- <polyline
- points="4050,9300 4050,7564 "
- style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline60" />
<!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
<!-- Line -->
- <polyline
- points="5250,8100 5250,6364 "
- style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline64" />
<!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
<!-- Circle -->
<circle
- cx="2850"
- cy="3900"
+ cx="2873.7581"
+ cy="6085.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle68" />
<!-- Circle -->
<circle
- cx="3150"
- cy="3900"
+ cx="3173.7581"
+ cy="6085.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle70" />
<!-- Circle -->
<circle
- cx="3450"
- cy="3900"
+ cx="3473.7581"
+ cy="6085.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle72" />
<!-- Circle -->
<circle
- cx="1350"
- cy="5100"
+ cx="1373.7578"
+ cy="7285.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle74" />
<!-- Circle -->
<circle
- cx="1650"
- cy="5100"
+ cx="1673.7578"
+ cy="7285.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle76" />
<!-- Circle -->
<circle
- cx="1950"
- cy="5100"
+ cx="1973.7578"
+ cy="7285.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle78" />
<!-- Circle -->
<circle
- cx="4350"
- cy="5100"
+ cx="4373.7578"
+ cy="7285.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle80" />
<!-- Circle -->
<circle
- cx="4650"
- cy="5100"
+ cx="4673.7578"
+ cy="7285.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle82" />
<!-- Circle -->
<circle
- cx="4950"
- cy="5100"
+ cx="4973.7578"
+ cy="7285.7236"
r="76"
- style="fill:#000000;stroke:#000000;stroke-width:14;"
+ style="fill:#000000;stroke:#000000;stroke-width:14"
id="circle84" />
<!-- Line: box -->
<rect
- x="750"
- y="3450"
+ x="773.75781"
+ y="5635.7236"
width="1800"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect86" />
<!-- Line: box -->
<rect
- x="300"
- y="6600"
+ x="323.75787"
+ y="8785.7227"
width="1500"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect88" />
<!-- Line: box -->
<rect
- x="4500"
- y="5400"
+ x="4523.7578"
+ y="7585.7236"
width="1500"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect90" />
<!-- Line: box -->
<rect
- x="3300"
- y="6600"
+ x="3323.7581"
+ y="8785.7227"
width="1500"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect92" />
<!-- Line: box -->
<rect
- x="2250"
- y="1650"
+ x="2273.7581"
+ y="3835.7231"
width="1800"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect94" />
<!-- Line: box -->
- <rect
- x="0"
- y="9300"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect96" />
<!-- Line: box -->
- <rect
- x="1350"
- y="8100"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect98" />
<!-- Line: box -->
- <rect
- x="3000"
- y="9300"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect100" />
<!-- Line: box -->
- <rect
- x="4350"
- y="8100"
- width="2100"
- height="900"
- rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
- id="rect102" />
<!-- Line: box -->
<rect
- x="1500"
- y="5400"
+ x="1523.7578"
+ y="7585.7236"
width="1500"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+ style="fill:#87cfff;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect104" />
<!-- Line -->
<polygon
- points="5550,3450 7350,2850 7350,5100 5550,4350 5550,3450 "
- style="stroke:#000000;stroke-width:14; stroke-linejoin:miter; stroke-linecap:butt; stroke-dasharray:120 120;fill:#ffbfbf; "
- id="polygon106" />
+ points="7350,2850 7350,5100 5550,4350 5550,3450 "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:14;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:120, 120"
+ id="polygon106"
+ transform="translate(23.757862,2185.7233)" />
<!-- Line -->
<polyline
points="9300,3150 10734,3150 "
- style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline108" />
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline108"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
<!-- Line: box -->
<rect
- x="10800"
- y="2850"
+ x="10823.758"
+ y="5035.7236"
width="1200"
height="750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect112" />
<!-- Line -->
<polyline
points="11400,3600 11400,4284 "
- style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline114" />
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline114"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
<!-- Line: box -->
<rect
- x="10800"
- y="4350"
+ x="10823.758"
+ y="6535.7236"
width="1200"
height="750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect118" />
<!-- Line -->
<polyline
points="11400,5100 11400,5784 "
- style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline120" />
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline120"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
<!-- Line: box -->
<rect
- x="10800"
- y="5850"
+ x="10823.758"
+ y="8035.7236"
width="1200"
height="750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+ style="stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect124" />
<!-- Line -->
<polyline
points="9300,3900 9900,3900 9900,4650 10734,4650 "
- style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline126" />
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline126"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
<!-- Line -->
<polyline
points="9300,4650 9600,4650 9600,6150 10734,6150 "
- style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline130" />
+ style="stroke:#000000;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline130"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
<!-- Text -->
- <text
- xml:space="preserve"
- x="6450"
- y="300"
- fill="#000000"
- font-family="Helvetica"
- font-style="normal"
- font-weight="normal"
- font-size="192"
- text-anchor="end"
- id="text134">rcu_bh</text>
<!-- Text -->
<text
xml:space="preserve"
- x="3150"
- y="1950"
- fill="#000000"
- font-family="Courier"
+ x="3173.7581"
+ y="4135.7231"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text136">struct</text>
+ id="text136"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="3150"
- y="2250"
- fill="#000000"
- font-family="Courier"
+ x="3173.7581"
+ y="4435.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text138">rcu_node</text>
+ id="text138"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1650"
- y="3750"
- fill="#000000"
- font-family="Courier"
+ x="1673.7578"
+ y="5935.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text140">struct</text>
+ id="text140"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1650"
- y="4050"
- fill="#000000"
- font-family="Courier"
+ x="1673.7578"
+ y="6235.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text142">rcu_node</text>
+ id="text142"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
- x="2250"
- y="5700"
- fill="#000000"
- font-family="Courier"
+ x="2273.7581"
+ y="7885.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text144">struct</text>
+ id="text144"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="2250"
- y="6000"
- fill="#000000"
- font-family="Courier"
+ x="2273.7581"
+ y="8185.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text146">rcu_data</text>
+ id="text146"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1050"
- y="6900"
- fill="#000000"
- font-family="Courier"
+ x="1073.7578"
+ y="9085.7227"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text148">struct</text>
+ id="text148"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="1050"
- y="7200"
- fill="#000000"
- font-family="Courier"
+ x="1073.7578"
+ y="9385.7227"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text150">rcu_data</text>
+ id="text150"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="5250"
- y="5700"
- fill="#000000"
- font-family="Courier"
+ x="5273.7578"
+ y="7885.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text152">struct</text>
+ id="text152"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="5250"
- y="6000"
- fill="#000000"
- font-family="Courier"
+ x="5273.7578"
+ y="8185.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text154">rcu_data</text>
+ id="text154"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4050"
- y="6900"
- fill="#000000"
- font-family="Courier"
+ x="4073.7578"
+ y="9085.7227"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text156">struct</text>
+ id="text156"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4050"
- y="7200"
- fill="#000000"
- font-family="Courier"
+ x="4073.7578"
+ y="9385.7227"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text158">rcu_data</text>
+ id="text158"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_data</text>
<!-- Text -->
<text
xml:space="preserve"
- x="450"
- y="1350"
- fill="#000000"
- font-family="Courier"
+ x="473.75784"
+ y="3535.7231"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="start"
- id="text160">struct rcu_state</text>
+ id="text160"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">struct rcu_state</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="9600"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text162">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="1050"
- y="9900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text164">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="9600"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text166">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="4050"
- y="9900"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text168">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="8400"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text170">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="2400"
- y="8700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text172">rcu_dynticks</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="8400"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text174">struct</text>
<!-- Text -->
- <text
- xml:space="preserve"
- x="5400"
- y="8700"
- fill="#000000"
- font-family="Courier"
- font-style="normal"
- font-weight="bold"
- font-size="192"
- text-anchor="middle"
- id="text176">rcu_dynticks</text>
<!-- Text -->
<text
xml:space="preserve"
- x="6000"
- y="750"
- fill="#000000"
- font-family="Helvetica"
+ x="6023.7578"
+ y="2935.7231"
font-style="normal"
font-weight="normal"
font-size="192"
- text-anchor="end"
- id="text178">rcu_sched</text>
+ id="text178"
+ style="font-style:normal;font-weight:normal;font-size:192px;font-family:Helvetica;text-anchor:end;fill:#000000">rcu_state</text>
<!-- Text -->
<text
xml:space="preserve"
- x="11400"
- y="3300"
- fill="#000000"
- font-family="Helvetica"
+ x="11423.758"
+ y="5485.7236"
font-style="normal"
font-weight="normal"
font-size="216"
- text-anchor="middle"
- id="text180">T3</text>
+ id="text180"
+ style="font-style:normal;font-weight:normal;font-size:216px;font-family:Helvetica;text-anchor:middle;fill:#000000">T3</text>
<!-- Text -->
<text
xml:space="preserve"
- x="11400"
- y="4800"
- fill="#000000"
- font-family="Helvetica"
+ x="11423.758"
+ y="6985.7236"
font-style="normal"
font-weight="normal"
font-size="216"
- text-anchor="middle"
- id="text182">T2</text>
+ id="text182"
+ style="font-style:normal;font-weight:normal;font-size:216px;font-family:Helvetica;text-anchor:middle;fill:#000000">T2</text>
<!-- Text -->
<text
xml:space="preserve"
- x="11400"
- y="6300"
- fill="#000000"
- font-family="Helvetica"
+ x="11423.758"
+ y="8485.7227"
font-style="normal"
font-weight="normal"
font-size="216"
- text-anchor="middle"
- id="text184">T1</text>
+ id="text184"
+ style="font-style:normal;font-weight:normal;font-size:216px;font-family:Helvetica;text-anchor:middle;fill:#000000">T1</text>
<!-- Line -->
<polyline
points="5250,5400 5250,4414 "
- style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
- id="polyline186" />
+ style="stroke:#00d1d1;stroke-width:30.00057793;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+ id="polyline186"
+ transform="translate(23.757862,2185.7233)" />
<!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
<!-- Line: box -->
<rect
- x="3750"
- y="3450"
+ x="3773.7581"
+ y="5635.7236"
width="1800"
height="900"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect190" />
<!-- Line: box -->
<rect
- x="7350"
- y="2850"
+ x="7373.7578"
+ y="5035.7236"
width="1950"
height="750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect192" />
<!-- Line: box -->
<rect
- x="7350"
- y="3600"
+ x="7373.7578"
+ y="5785.7236"
width="1950"
height="750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect194" />
<!-- Line: box -->
<rect
- x="7350"
- y="4350"
+ x="7373.7578"
+ y="6535.7236"
width="1950"
height="750"
rx="0"
- style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+ style="fill:#ffbfbf;stroke:#000000;stroke-width:30;stroke-linecap:butt;stroke-linejoin:miter"
id="rect196" />
<!-- Text -->
<text
xml:space="preserve"
- x="4650"
- y="4050"
- fill="#000000"
- font-family="Courier"
+ x="4673.7578"
+ y="6235.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text198">rcu_node</text>
+ id="text198"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">rcu_node</text>
<!-- Text -->
<text
xml:space="preserve"
- x="4650"
- y="3750"
- fill="#000000"
- font-family="Courier"
+ x="4673.7578"
+ y="5935.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="middle"
- id="text200">struct</text>
+ id="text200"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:middle;fill:#000000">struct</text>
<!-- Text -->
<text
xml:space="preserve"
- x="7500"
- y="3300"
- fill="#000000"
- font-family="Courier"
+ x="7523.7578"
+ y="5485.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="start"
- id="text202">blkd_tasks</text>
+ id="text202"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">blkd_tasks</text>
<!-- Text -->
<text
xml:space="preserve"
- x="7500"
- y="4050"
- fill="#000000"
- font-family="Courier"
+ x="7523.7578"
+ y="6235.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="start"
- id="text204">gp_tasks</text>
+ id="text204"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">gp_tasks</text>
<!-- Text -->
<text
xml:space="preserve"
- x="7500"
- y="4800"
- fill="#000000"
- font-family="Courier"
+ x="7523.7578"
+ y="6985.7236"
font-style="normal"
font-weight="bold"
font-size="192"
- text-anchor="start"
- id="text206">exp_tasks</text>
+ id="text206"
+ style="font-style:normal;font-weight:bold;font-size:192px;font-family:Courier;text-anchor:start;fill:#000000">exp_tasks</text>
</g>
</svg>
diff --git a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
index e62c7c34a369..8e4f873b979f 100644
--- a/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
+++ b/Documentation/RCU/Design/Expedited-Grace-Periods/Expedited-Grace-Periods.html
@@ -160,9 +160,9 @@ was in flight.
If the CPU is idle, then <tt>sync_sched_exp_handler()</tt> reports
the quiescent state.
-<p>
-Otherwise, the handler invokes <tt>resched_cpu()</tt>, which forces
-a future context switch.
+<p> Otherwise, the handler forces a future context switch by setting the
+NEED_RESCHED flag of the current task's thread flag and the CPU preempt
+counter.
At the time of the context switch, the CPU reports the quiescent state.
Should the CPU go offline first, it will report the quiescent state
at that time.
diff --git a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
index a346ce0116eb..e4d94fba6c89 100644
--- a/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
+++ b/Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.html
@@ -77,7 +77,7 @@ The key point is that the lock-acquisition functions, including
<tt>smp_mb__after_unlock_lock()</tt> immediately after successful
acquisition of the lock.
-<p>Therefore, for any given <tt>rcu_node</tt> struction, any access
+<p>Therefore, for any given <tt>rcu_node</tt> structure, any access
happening before one of the above lock-release functions will be seen
by all CPUs as happening before any access happening after a later
one of the above lock-acquisition functions.
diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
index 43c4e2f05f40..9fca73e03a98 100644
--- a/Documentation/RCU/Design/Requirements/Requirements.html
+++ b/Documentation/RCU/Design/Requirements/Requirements.html
@@ -900,8 +900,6 @@ Except where otherwise noted, these non-guarantees were premeditated.
Grace Periods Don't Partition Read-Side Critical Sections</a>
<li> <a href="#Read-Side Critical Sections Don't Partition Grace Periods">
Read-Side Critical Sections Don't Partition Grace Periods</a>
-<li> <a href="#Disabling Preemption Does Not Block Grace Periods">
- Disabling Preemption Does Not Block Grace Periods</a>
</ol>
<h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3>
@@ -1259,54 +1257,6 @@ of RCU grace periods.
<tr><td>&nbsp;</td></tr>
</table>
-<h3><a name="Disabling Preemption Does Not Block Grace Periods">
-Disabling Preemption Does Not Block Grace Periods</a></h3>
-
-<p>
-There was a time when disabling preemption on any given CPU would block
-subsequent grace periods.
-However, this was an accident of implementation and is not a requirement.
-And in the current Linux-kernel implementation, disabling preemption
-on a given CPU in fact does not block grace periods, as Oleg Nesterov
-<a href="https://lkml.kernel.org/g/20150614193825.GA19582@redhat.com">demonstrated</a>.
-
-<p>
-If you need a preempt-disable region to block grace periods, you need to add
-<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>, for example
-as follows:
-
-<blockquote>
-<pre>
- 1 preempt_disable();
- 2 rcu_read_lock();
- 3 do_something();
- 4 rcu_read_unlock();
- 5 preempt_enable();
- 6
- 7 /* Spinlocks implicitly disable preemption. */
- 8 spin_lock(&amp;mylock);
- 9 rcu_read_lock();
-10 do_something();
-11 rcu_read_unlock();
-12 spin_unlock(&amp;mylock);
-</pre>
-</blockquote>
-
-<p>
-In theory, you could enter the RCU read-side critical section first,
-but it is more efficient to keep the entire RCU read-side critical
-section contained in the preempt-disable region as shown above.
-Of course, RCU read-side critical sections that extend outside of
-preempt-disable regions will work correctly, but such critical sections
-can be preempted, which forces <tt>rcu_read_unlock()</tt> to do
-more work.
-And no, this is <i>not</i> an invitation to enclose all of your RCU
-read-side critical sections within preempt-disable regions, because
-doing so would degrade real-time response.
-
-<p>
-This non-requirement appeared with preemptible RCU.
-
<h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2>
<p>
@@ -1381,6 +1331,7 @@ Classes of quality-of-implementation requirements are as follows:
<ol>
<li> <a href="#Specialization">Specialization</a>
<li> <a href="#Performance and Scalability">Performance and Scalability</a>
+<li> <a href="#Forward Progress">Forward Progress</a>
<li> <a href="#Composability">Composability</a>
<li> <a href="#Corner Cases">Corner Cases</a>
</ol>
@@ -1645,7 +1596,7 @@ used in place of <tt>synchronize_rcu()</tt> as follows:
16 struct foo *p;
17
18 spin_lock(&amp;gp_lock);
-19 p = rcu_dereference(gp);
+19 p = rcu_access_pointer(gp);
20 if (!p) {
21 spin_unlock(&amp;gp_lock);
22 return false;
@@ -1822,6 +1773,106 @@ so it is too early to tell whether they will stand the test of time.
RCU thus provides a range of tools to allow updaters to strike the
required tradeoff between latency, flexibility and CPU overhead.
+<h3><a name="Forward Progress">Forward Progress</a></h3>
+
+<p>
+In theory, delaying grace-period completion and callback invocation
+is harmless.
+In practice, not only are memory sizes finite but also callbacks sometimes
+do wakeups, and sufficiently deferred wakeups can be difficult
+to distinguish from system hangs.
+Therefore, RCU must provide a number of mechanisms to promote forward
+progress.
+
+<p>
+These mechanisms are not foolproof, nor can they be.
+For one simple example, an infinite loop in an RCU read-side critical
+section must by definition prevent later grace periods from ever completing.
+For a more involved example, consider a 64-CPU system built with
+<tt>CONFIG_RCU_NOCB_CPU=y</tt> and booted with <tt>rcu_nocbs=1-63</tt>,
+where CPUs&nbsp;1 through&nbsp;63 spin in tight loops that invoke
+<tt>call_rcu()</tt>.
+Even if these tight loops also contain calls to <tt>cond_resched()</tt>
+(thus allowing grace periods to complete), CPU&nbsp;0 simply will
+not be able to invoke callbacks as fast as the other 63 CPUs can
+register them, at least not until the system runs out of memory.
+In both of these examples, the Spiderman principle applies: With great
+power comes great responsibility.
+However, short of this level of abuse, RCU is required to
+ensure timely completion of grace periods and timely invocation of
+callbacks.
+
+<p>
+RCU takes the following steps to encourage timely completion of
+grace periods:
+
+<ol>
+<li> If a grace period fails to complete within 100&nbsp;milliseconds,
+ RCU causes future invocations of <tt>cond_resched()</tt> on
+ the holdout CPUs to provide an RCU quiescent state.
+ RCU also causes those CPUs' <tt>need_resched()</tt> invocations
+ to return <tt>true</tt>, but only after the corresponding CPU's
+ next scheduling-clock.
+<li> CPUs mentioned in the <tt>nohz_full</tt> kernel boot parameter
+ can run indefinitely in the kernel without scheduling-clock
+ interrupts, which defeats the above <tt>need_resched()</tt>
+ strategem.
+ RCU will therefore invoke <tt>resched_cpu()</tt> on any
+ <tt>nohz_full</tt> CPUs still holding out after
+ 109&nbsp;milliseconds.
+<li> In kernels built with <tt>CONFIG_RCU_BOOST=y</tt>, if a given
+ task that has been preempted within an RCU read-side critical
+ section is holding out for more than 500&nbsp;milliseconds,
+ RCU will resort to priority boosting.
+<li> If a CPU is still holding out 10&nbsp;seconds into the grace
+ period, RCU will invoke <tt>resched_cpu()</tt> on it regardless
+ of its <tt>nohz_full</tt> state.
+</ol>
+
+<p>
+The above values are defaults for systems running with <tt>HZ=1000</tt>.
+They will vary as the value of <tt>HZ</tt> varies, and can also be
+changed using the relevant Kconfig options and kernel boot parameters.
+RCU currently does not do much sanity checking of these
+parameters, so please use caution when changing them.
+Note that these forward-progress measures are provided only for RCU,
+not for
+<a href="#Sleepable RCU">SRCU</a> or
+<a href="#Tasks RCU">Tasks RCU</a>.
+
+<p>
+RCU takes the following steps in <tt>call_rcu()</tt> to encourage timely
+invocation of callbacks when any given non-<tt>rcu_nocbs</tt> CPU has
+10,000 callbacks, or has 10,000 more callbacks than it had the last time
+encouragement was provided:
+
+<ol>
+<li> Starts a grace period, if one is not already in progress.
+<li> Forces immediate checking for quiescent states, rather than
+ waiting for three milliseconds to have elapsed since the
+ beginning of the grace period.
+<li> Immediately tags the CPU's callbacks with their grace period
+ completion numbers, rather than waiting for the <tt>RCU_SOFTIRQ</tt>
+ handler to get around to it.
+<li> Lifts callback-execution batch limits, which speeds up callback
+ invocation at the expense of degrading realtime response.
+</ol>
+
+<p>
+Again, these are default values when running at <tt>HZ=1000</tt>,
+and can be overridden.
+Again, these forward-progress measures are provided only for RCU,
+not for
+<a href="#Sleepable RCU">SRCU</a> or
+<a href="#Tasks RCU">Tasks RCU</a>.
+Even for RCU, callback-invocation forward progress for <tt>rcu_nocbs</tt>
+CPUs is much less well-developed, in part because workloads benefiting
+from <tt>rcu_nocbs</tt> CPUs tend to invoke <tt>call_rcu()</tt>
+relatively infrequently.
+If workloads emerge that need both <tt>rcu_nocbs</tt> CPUs and high
+<tt>call_rcu()</tt> invocation rates, then additional forward-progress
+work will be required.
+
<h3><a name="Composability">Composability</a></h3>
<p>
@@ -2272,7 +2323,7 @@ that meets this requirement.
Furthermore, NMI handlers can be interrupted by what appear to RCU
to be normal interrupts.
One way that this can happen is for code that directly invokes
-<tt>rcu_irq_enter()</tt> and </tt>rcu_irq_exit()</tt> to be called
+<tt>rcu_irq_enter()</tt> and <tt>rcu_irq_exit()</tt> to be called
from an NMI handler.
This astonishing fact of life prompted the current code structure,
which has <tt>rcu_irq_enter()</tt> invoking <tt>rcu_nmi_enter()</tt>
@@ -2294,7 +2345,7 @@ via <tt>del_timer_sync()</tt> or similar.
<p>
Unfortunately, there is no way to cancel an RCU callback;
once you invoke <tt>call_rcu()</tt>, the callback function is
-going to eventually be invoked, unless the system goes down first.
+eventually going to be invoked, unless the system goes down first.
Because it is normally considered socially irresponsible to crash the system
in response to a module unload request, we need some other way
to deal with in-flight RCU callbacks.
@@ -2424,23 +2475,37 @@ for context-switch-heavy <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
but there is room for further improvement.
<p>
-In the past, it was forbidden to disable interrupts across an
-<tt>rcu_read_unlock()</tt> unless that interrupt-disabled region
-of code also included the matching <tt>rcu_read_lock()</tt>.
-Violating this restriction could result in deadlocks involving the
-scheduler's runqueue and priority-inheritance spinlocks.
-This restriction was lifted when interrupt-disabled calls to
-<tt>rcu_read_unlock()</tt> started deferring the reporting of
-the resulting RCU-preempt quiescent state until the end of that
+It is forbidden to hold any of scheduler's runqueue or priority-inheritance
+spinlocks across an <tt>rcu_read_unlock()</tt> unless interrupts have been
+disabled across the entire RCU read-side critical section, that is,
+up to and including the matching <tt>rcu_read_lock()</tt>.
+Violating this restriction can result in deadlocks involving these
+scheduler spinlocks.
+There was hope that this restriction might be lifted when interrupt-disabled
+calls to <tt>rcu_read_unlock()</tt> started deferring the reporting of
+the resulting RCU-preempt quiescent state until the end of the corresponding
interrupts-disabled region.
-This deferred reporting means that the scheduler's runqueue and
-priority-inheritance locks cannot be held while reporting an RCU-preempt
-quiescent state, which lifts the earlier restriction, at least from
-a deadlock perspective.
-Unfortunately, real-time systems using RCU priority boosting may
+Unfortunately, timely reporting of the corresponding quiescent state
+to expedited grace periods requires a call to <tt>raise_softirq()</tt>,
+which can acquire these scheduler spinlocks.
+In addition, real-time systems using RCU priority boosting
need this restriction to remain in effect because deferred
-quiescent-state reporting also defers deboosting, which in turn
-degrades real-time latencies.
+quiescent-state reporting would also defer deboosting, which in turn
+would degrade real-time latencies.
+
+<p>
+In theory, if a given RCU read-side critical section could be
+guaranteed to be less than one second in duration, holding a scheduler
+spinlock across that critical section's <tt>rcu_read_unlock()</tt>
+would require only that preemption be disabled across the entire
+RCU read-side critical section, not interrupts.
+Unfortunately, given the possibility of vCPU preemption, long-running
+interrupts, and so on, it is not possible in practice to guarantee
+that a given RCU read-side critical section will complete in less than
+one second.
+Therefore, as noted above, if scheduler spinlocks are held across
+a given call to <tt>rcu_read_unlock()</tt>, interrupts must be
+disabled across the entire RCU read-side critical section.
<h3><a name="Tracing and RCU">Tracing and RCU</a></h3>
@@ -3233,6 +3298,11 @@ For example, RCU callback overhead might be charged back to the
originating <tt>call_rcu()</tt> instance, though probably not
in production kernels.
+<p>
+Additional work may be required to provide reasonable forward-progress
+guarantees under heavy load for grace periods and for callback
+invocation.
+
<h2><a name="Summary">Summary</a></h2>
<p>
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 49747717d905..6f469864d9f5 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -63,7 +63,7 @@ over a rather long period of time, but improvements are always welcome!
pointer must be covered by rcu_read_lock(), rcu_read_lock_bh(),
rcu_read_lock_sched(), or by the appropriate update-side lock.
Disabling of preemption can serve as rcu_read_lock_sched(), but
- is less readable.
+ is less readable and prevents lockdep from detecting locking issues.
Letting RCU-protected pointers "leak" out of an RCU read-side
critical section is every bid as bad as letting them leak out
@@ -285,11 +285,7 @@ over a rather long period of time, but improvements are always welcome!
here is that superuser already has lots of ways to crash
the machine.
- d. Use call_rcu_bh() rather than call_rcu(), in order to take
- advantage of call_rcu_bh()'s faster grace periods. (This
- is only a partial solution, though.)
-
- e. Periodically invoke synchronize_rcu(), permitting a limited
+ d. Periodically invoke synchronize_rcu(), permitting a limited
number of updates per grace period.
The same cautions apply to call_rcu_bh(), call_rcu_sched(),
@@ -324,37 +320,14 @@ over a rather long period of time, but improvements are always welcome!
will break Alpha, cause aggressive compilers to generate bad code,
and confuse people trying to read your code.
-11. Note that synchronize_rcu() -only- guarantees to wait until
- all currently executing rcu_read_lock()-protected RCU read-side
- critical sections complete. It does -not- necessarily guarantee
- that all currently running interrupts, NMIs, preempt_disable()
- code, or idle loops will complete. Therefore, if your
- read-side critical sections are protected by something other
- than rcu_read_lock(), do -not- use synchronize_rcu().
-
- Similarly, disabling preemption is not an acceptable substitute
- for rcu_read_lock(). Code that attempts to use preemption
- disabling where it should be using rcu_read_lock() will break
- in CONFIG_PREEMPT=y kernel builds.
-
- If you want to wait for interrupt handlers, NMI handlers, and
- code under the influence of preempt_disable(), you instead
- need to use synchronize_irq() or synchronize_sched().
-
- This same limitation also applies to synchronize_rcu_bh()
- and synchronize_srcu(), as well as to the asynchronous and
- expedited forms of the three primitives, namely call_rcu(),
- call_rcu_bh(), call_srcu(), synchronize_rcu_expedited(),
- synchronize_rcu_bh_expedited(), and synchronize_srcu_expedited().
-
-12. Any lock acquired by an RCU callback must be acquired elsewhere
+11. Any lock acquired by an RCU callback must be acquired elsewhere
with softirq disabled, e.g., via spin_lock_irqsave(),
spin_lock_bh(), etc. Failing to disable irq on a given
acquisition of that lock will result in deadlock as soon as
the RCU softirq handler happens to run your RCU callback while
interrupting that acquisition's critical section.
-13. RCU callbacks can be and are executed in parallel. In many cases,
+12. RCU callbacks can be and are executed in parallel. In many cases,
the callback code simply wrappers around kfree(), so that this
is not an issue (or, more accurately, to the extent that it is
an issue, the memory-allocator locking handles it). However,
@@ -370,7 +343,7 @@ over a rather long period of time, but improvements are always welcome!
not the case, a self-spawning RCU callback would prevent the
victim CPU from ever going offline.)
-14. Unlike other forms of RCU, it -is- permissible to block in an
+13. Unlike other forms of RCU, it -is- permissible to block in an
SRCU read-side critical section (demarked by srcu_read_lock()
and srcu_read_unlock()), hence the "SRCU": "sleepable RCU".
Please note that if you don't need to sleep in read-side critical
@@ -414,7 +387,7 @@ over a rather long period of time, but improvements are always welcome!
Note that rcu_dereference() and rcu_assign_pointer() relate to
SRCU just as they do to other forms of RCU.
-15. The whole point of call_rcu(), synchronize_rcu(), and friends
+14. The whole point of call_rcu(), synchronize_rcu(), and friends
is to wait until all pre-existing readers have finished before
carrying out some otherwise-destructive operation. It is
therefore critically important to -first- remove any path
@@ -426,13 +399,13 @@ over a rather long period of time, but improvements are always welcome!
is the caller's responsibility to guarantee that any subsequent
readers will execute safely.
-16. The various RCU read-side primitives do -not- necessarily contain
+15. The various RCU read-side primitives do -not- necessarily contain
memory barriers. You should therefore plan for the CPU
and the compiler to freely reorder code into and out of RCU
read-side critical sections. It is the responsibility of the
RCU update-side primitives to deal with this.
-17. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
+16. Use CONFIG_PROVE_LOCKING, CONFIG_DEBUG_OBJECTS_RCU_HEAD, and the
__rcu sparse checks to validate your RCU code. These can help
find problems as follows:
@@ -455,7 +428,7 @@ over a rather long period of time, but improvements are always welcome!
These debugging aids can help you find problems that are
otherwise extremely difficult to spot.
-18. If you register a callback using call_rcu(), call_rcu_bh(),
+17. If you register a callback using call_rcu(), call_rcu_bh(),
call_rcu_sched(), or call_srcu(), and pass in a function defined
within a loadable module, then it in necessary to wait for
all pending callbacks to be invoked after the last invocation
@@ -469,8 +442,8 @@ over a rather long period of time, but improvements are always welcome!
You instead need to use one of the barrier functions:
o call_rcu() -> rcu_barrier()
- o call_rcu_bh() -> rcu_barrier_bh()
- o call_rcu_sched() -> rcu_barrier_sched()
+ o call_rcu_bh() -> rcu_barrier()
+ o call_rcu_sched() -> rcu_barrier()
o call_srcu() -> srcu_barrier()
However, these barrier functions are absolutely -not- guaranteed
diff --git a/Documentation/RCU/stallwarn.txt b/Documentation/RCU/stallwarn.txt
index 491043fd976f..073dbc12d1ea 100644
--- a/Documentation/RCU/stallwarn.txt
+++ b/Documentation/RCU/stallwarn.txt
@@ -176,9 +176,8 @@ causing stalls, and that the stall was affecting RCU-sched. This message
will normally be followed by stack dumps for each CPU. Please note that
PREEMPT_RCU builds can be stalled by tasks as well as by CPUs, and that
the tasks will be indicated by PID, for example, "P3421". It is even
-possible for a rcu_preempt_state stall to be caused by both CPUs -and-
-tasks, in which case the offending CPUs and tasks will all be called
-out in the list.
+possible for an rcu_state stall to be caused by both CPUs -and- tasks,
+in which case the offending CPUs and tasks will all be called out in the list.
CPU 2's "(3 GPs behind)" indicates that this CPU has not interacted with
the RCU core for the past three grace periods. In contrast, CPU 16's "(0
@@ -206,7 +205,7 @@ handlers are no longer able to execute on this CPU. This can happen if
the stalled CPU is spinning with interrupts are disabled, or, in -rt
kernels, if a high-priority process is starving RCU's softirq handler.
-The "fps=" shows the number of force-quiescent-state idle/offline
+The "fqs=" shows the number of force-quiescent-state idle/offline
detection passes that the grace-period kthread has made across this
CPU since the last time that this CPU noted the beginning of a grace
period.
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 86d82f7f3500..4a6854318b17 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -266,7 +266,7 @@ rcu_dereference()
unnecessary overhead on Alpha CPUs.
Note that the value returned by rcu_dereference() is valid
- only within the enclosing RCU read-side critical section.
+ only within the enclosing RCU read-side critical section [1].
For example, the following is -not- legal:
rcu_read_lock();
@@ -292,6 +292,19 @@ rcu_dereference()
typically used indirectly, via the _rcu list-manipulation
primitives, such as list_for_each_entry_rcu().
+ [1] The variant rcu_dereference_protected() can be used outside
+ of an RCU read-side critical section as long as the usage is
+ protected by locks acquired by the update-side code. This variant
+ avoids the lockdep warning that would happen when using (for
+ example) rcu_dereference() without rcu_read_lock() protection.
+ Using rcu_dereference_protected() also has the advantage
+ of permitting compiler optimizations that rcu_dereference()
+ must prohibit. The rcu_dereference_protected() variant takes
+ a lockdep expression to indicate which locks must be acquired
+ by the caller. If the indicated protection is not provided,
+ a lockdep splat is emitted. See RCU/Design/Requirements.html
+ and the API's code comments for more details and example usage.
+
The following diagram shows how each API communicates among the
reader, updater, and reclaimer.
@@ -322,28 +335,27 @@ to their callers and (2) call_rcu() callbacks may be invoked. Efficient
implementations of the RCU infrastructure make heavy use of batching in
order to amortize their overhead over many uses of the corresponding APIs.
-There are no fewer than three RCU mechanisms in the Linux kernel; the
-diagram above shows the first one, which is by far the most commonly used.
-The rcu_dereference() and rcu_assign_pointer() primitives are used for
-all three mechanisms, but different defer and protect primitives are
-used as follows:
-
- Defer Protect
+There are at least three flavors of RCU usage in the Linux kernel. The diagram
+above shows the most common one. On the updater side, the rcu_assign_pointer(),
+sychronize_rcu() and call_rcu() primitives used are the same for all three
+flavors. However for protection (on the reader side), the primitives used vary
+depending on the flavor:
-a. synchronize_rcu() rcu_read_lock() / rcu_read_unlock()
- call_rcu() rcu_dereference()
+a. rcu_read_lock() / rcu_read_unlock()
+ rcu_dereference()
-b. synchronize_rcu_bh() rcu_read_lock_bh() / rcu_read_unlock_bh()
- call_rcu_bh() rcu_dereference_bh()
+b. rcu_read_lock_bh() / rcu_read_unlock_bh()
+ local_bh_disable() / local_bh_enable()
+ rcu_dereference_bh()
-c. synchronize_sched() rcu_read_lock_sched() / rcu_read_unlock_sched()
- call_rcu_sched() preempt_disable() / preempt_enable()
- local_irq_save() / local_irq_restore()
- hardirq enter / hardirq exit
- NMI enter / NMI exit
- rcu_dereference_sched()
+c. rcu_read_lock_sched() / rcu_read_unlock_sched()
+ preempt_disable() / preempt_enable()
+ local_irq_save() / local_irq_restore()
+ hardirq enter / hardirq exit
+ NMI enter / NMI exit
+ rcu_dereference_sched()
-These three mechanisms are used as follows:
+These three flavors are used as follows:
a. RCU applied to normal data structures.
@@ -867,18 +879,20 @@ RCU: Critical sections Grace period Barrier
bh: Critical sections Grace period Barrier
- rcu_read_lock_bh call_rcu_bh rcu_barrier_bh
- rcu_read_unlock_bh synchronize_rcu_bh
- rcu_dereference_bh synchronize_rcu_bh_expedited
+ rcu_read_lock_bh call_rcu rcu_barrier
+ rcu_read_unlock_bh synchronize_rcu
+ [local_bh_disable] synchronize_rcu_expedited
+ [and friends]
+ rcu_dereference_bh
rcu_dereference_bh_check
rcu_dereference_bh_protected
rcu_read_lock_bh_held
sched: Critical sections Grace period Barrier
- rcu_read_lock_sched synchronize_sched rcu_barrier_sched
- rcu_read_unlock_sched call_rcu_sched
- [preempt_disable] synchronize_sched_expedited
+ rcu_read_lock_sched call_rcu rcu_barrier
+ rcu_read_unlock_sched synchronize_rcu
+ [preempt_disable] synchronize_rcu_expedited
[and friends]
rcu_read_lock_sched_notrace
rcu_read_unlock_sched_notrace
@@ -890,8 +904,8 @@ sched: Critical sections Grace period Barrier
SRCU: Critical sections Grace period Barrier
- srcu_read_lock synchronize_srcu srcu_barrier
- srcu_read_unlock call_srcu
+ srcu_read_lock call_srcu srcu_barrier
+ srcu_read_unlock synchronize_srcu
srcu_dereference synchronize_srcu_expedited
srcu_dereference_check
srcu_read_lock_held
@@ -1034,7 +1048,7 @@ Answer: Just as PREEMPT_RT permits preemption of spinlock
spinlocks blocking while in RCU read-side critical
sections.
- Why the apparent inconsistency? Because it is it
+ Why the apparent inconsistency? Because it is
possible to use priority boosting to keep the RCU
grace periods short if need be (for example, if running
short of memory). In contrast, if blocking waiting
diff --git a/Documentation/admin-guide/LSM/SELinux.rst b/Documentation/admin-guide/LSM/SELinux.rst
index f722c9b4173a..520a1c2c6fd2 100644
--- a/Documentation/admin-guide/LSM/SELinux.rst
+++ b/Documentation/admin-guide/LSM/SELinux.rst
@@ -6,7 +6,7 @@ If you want to use SELinux, chances are you will want
to use the distro-provided policies, or install the
latest reference policy release from
- http://oss.tresys.com/projects/refpolicy
+ https://github.com/SELinuxProject/refpolicy
However, if you want to install a dummy policy for
testing, you can do using ``mdp`` provided under
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 476722b7b636..baf19bf28385 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1879,8 +1879,10 @@ following two functions.
wbc_init_bio(@wbc, @bio)
Should be called for each bio carrying writeback data and
- associates the bio with the inode's owner cgroup. Can be
- called anytime between bio allocation and submission.
+ associates the bio with the inode's owner cgroup and the
+ corresponding request queue. This must be called after
+ a queue (device) has been associated with the bio and
+ before submission.
wbc_account_io(@wbc, @page, @bytes)
Should be called for each data segment being written out.
@@ -1899,7 +1901,7 @@ the configuration, the bio may be executed at a lower priority and if
the writeback session is holding shared resources, e.g. a journal
entry, may lead to priority inversion. There is no one easy solution
for the problem. Filesystems can try to work around specific problem
-cases by skipping wbc_init_bio() or using bio_associate_blkcg()
+cases by skipping wbc_init_bio() and using bio_associate_blkg()
directly.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 19f4423e70d9..ff4daa780ae8 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -674,6 +674,9 @@
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system
+ cpuidle.governor=
+ [CPU_IDLE] Name of the cpuidle governor to use.
+
cpufreq.off=1 [CPU_FREQ]
disable the cpufreq sub-system
@@ -856,7 +859,8 @@
causing system reset or hang due to sending
INIT from AP to BSP.
- disable_counter_freezing [HW]
+ perf_v4_pmi= [X86,INTEL]
+ Format: <bool>
Disable Intel PMU counter freezing feature.
The feature only exists starting from
Arch Perfmon v4 (Skylake and newer).
@@ -2095,6 +2099,9 @@
off
Disables hypervisor mitigations and doesn't
emit any warnings.
+ It also drops the swap size and available
+ RAM limit restriction on both hypervisor and
+ bare metal.
Default is 'flush'.
@@ -2826,7 +2833,7 @@
check bypass). With this option data leaks are possible
in the system.
- nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
to spectre_v2=off.
@@ -3504,6 +3511,10 @@
before loading.
See Documentation/blockdev/ramdisk.txt.
+ psi= [KNL] Enable or disable pressure stall information
+ tracking.
+ Format: <bool>
+
psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
probe for; one of (bare|imps|exps|lifebook|any).
psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
@@ -3743,24 +3754,6 @@
in microseconds. The default of zero says
no holdoff.
- rcutorture.cbflood_inter_holdoff= [KNL]
- Set holdoff time (jiffies) between successive
- callback-flood tests.
-
- rcutorture.cbflood_intra_holdoff= [KNL]
- Set holdoff time (jiffies) between successive
- bursts of callbacks within a given callback-flood
- test.
-
- rcutorture.cbflood_n_burst= [KNL]
- Set the number of bursts making up a given
- callback-flood test. Set this to zero to
- disable callback-flood testing.
-
- rcutorture.cbflood_n_per_burst= [KNL]
- Set the number of callbacks to be registered
- in a given burst of a callback-flood test.
-
rcutorture.fqs_duration= [KNL]
Set duration of force_quiescent_state bursts
in microseconds.
@@ -3773,6 +3766,23 @@
Set wait time between force_quiescent_state bursts
in seconds.
+ rcutorture.fwd_progress= [KNL]
+ Enable RCU grace-period forward-progress testing
+ for the types of RCU supporting this notion.
+
+ rcutorture.fwd_progress_div= [KNL]
+ Specify the fraction of a CPU-stall-warning
+ period to do tight-loop forward-progress testing.
+
+ rcutorture.fwd_progress_holdoff= [KNL]
+ Number of seconds to wait between successive
+ forward-progress tests.
+
+ rcutorture.fwd_progress_need_resched= [KNL]
+ Enclose cond_resched() calls within checks for
+ need_resched() during tight-loop forward-progress
+ testing.
+
rcutorture.gp_cond= [KNL]
Use conditional/asynchronous update-side
primitives, if available.
@@ -4194,9 +4204,13 @@
spectre_v2= [X86] Control mitigation of Spectre variant 2
(indirect branch speculation) vulnerability.
+ The default operation protects the kernel from
+ user space attacks.
- on - unconditionally enable
- off - unconditionally disable
+ on - unconditionally enable, implies
+ spectre_v2_user=on
+ off - unconditionally disable, implies
+ spectre_v2_user=off
auto - kernel detects whether your CPU model is
vulnerable
@@ -4206,6 +4220,12 @@
CONFIG_RETPOLINE configuration option, and the
compiler with which the kernel was built.
+ Selecting 'on' will also enable the mitigation
+ against user space to user space task attacks.
+
+ Selecting 'off' will disable both the kernel and
+ the user space protections.
+
Specific mitigations can also be selected manually:
retpoline - replace indirect branches
@@ -4215,6 +4235,48 @@
Not specifying this option is equivalent to
spectre_v2=auto.
+ spectre_v2_user=
+ [X86] Control mitigation of Spectre variant 2
+ (indirect branch speculation) vulnerability between
+ user space tasks
+
+ on - Unconditionally enable mitigations. Is
+ enforced by spectre_v2=on
+
+ off - Unconditionally disable mitigations. Is
+ enforced by spectre_v2=off
+
+ prctl - Indirect branch speculation is enabled,
+ but mitigation can be enabled via prctl
+ per thread. The mitigation control state
+ is inherited on fork.
+
+ prctl,ibpb
+ - Like "prctl" above, but only STIBP is
+ controlled per thread. IBPB is issued
+ always when switching between different user
+ space processes.
+
+ seccomp
+ - Same as "prctl" above, but all seccomp
+ threads will enable the mitigation unless
+ they explicitly opt out.
+
+ seccomp,ibpb
+ - Like "seccomp" above, but only STIBP is
+ controlled per thread. IBPB is issued
+ always when switching between different
+ user space processes.
+
+ auto - Kernel selects the mitigation depending on
+ the available CPU features and vulnerability.
+
+ Default mitigation:
+ If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
+
+ Not specifying this option is equivalent to
+ spectre_v2_user=auto.
+
spec_store_bypass_disable=
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
(Speculative Store Bypass vulnerability)
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
index b85dd80510b0..9af977384168 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/l1tf.rst
@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
off Disables hypervisor mitigations and doesn't emit any
warnings.
+ It also drops the swap size and available RAM limit restrictions
+ on both hypervisor and bare metal.
+
============ =============================================================
The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
@@ -576,7 +579,8 @@ Default mitigations
The kernel default mitigations for vulnerable processors are:
- PTE inversion to protect against malicious user space. This is done
- unconditionally and cannot be controlled.
+ unconditionally and cannot be controlled. The swap storage is limited
+ to ~16TB.
- L1D conditional flushing on VMENTER when EPT is enabled for
a guest.
diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst
new file mode 100644
index 000000000000..106379e2619f
--- /dev/null
+++ b/Documentation/admin-guide/pm/cpuidle.rst
@@ -0,0 +1,631 @@
+.. |struct cpuidle_state| replace:: :c:type:`struct cpuidle_state <cpuidle_state>`
+.. |cpufreq| replace:: :doc:`CPU Performance Scaling <cpufreq>`
+
+========================
+CPU Idle Time Management
+========================
+
+::
+
+ Copyright (c) 2018 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+Concepts
+========
+
+Modern processors are generally able to enter states in which the execution of
+a program is suspended and instructions belonging to it are not fetched from
+memory or executed. Those states are the *idle* states of the processor.
+
+Since part of the processor hardware is not used in idle states, entering them
+generally allows power drawn by the processor to be reduced and, in consequence,
+it is an opportunity to save energy.
+
+CPU idle time management is an energy-efficiency feature concerned about using
+the idle states of processors for this purpose.
+
+Logical CPUs
+------------
+
+CPU idle time management operates on CPUs as seen by the *CPU scheduler* (that
+is the part of the kernel responsible for the distribution of computational
+work in the system). In its view, CPUs are *logical* units. That is, they need
+not be separate physical entities and may just be interfaces appearing to
+software as individual single-core processors. In other words, a CPU is an
+entity which appears to be fetching instructions that belong to one sequence
+(program) from memory and executing them, but it need not work this way
+physically. Generally, three different cases can be consider here.
+
+First, if the whole processor can only follow one sequence of instructions (one
+program) at a time, it is a CPU. In that case, if the hardware is asked to
+enter an idle state, that applies to the processor as a whole.
+
+Second, if the processor is multi-core, each core in it is able to follow at
+least one program at a time. The cores need not be entirely independent of each
+other (for example, they may share caches), but still most of the time they
+work physically in parallel with each other, so if each of them executes only
+one program, those programs run mostly independently of each other at the same
+time. The entire cores are CPUs in that case and if the hardware is asked to
+enter an idle state, that applies to the core that asked for it in the first
+place, but it also may apply to a larger unit (say a "package" or a "cluster")
+that the core belongs to (in fact, it may apply to an entire hierarchy of larger
+units containing the core). Namely, if all of the cores in the larger unit
+except for one have been put into idle states at the "core level" and the
+remaining core asks the processor to enter an idle state, that may trigger it
+to put the whole larger unit into an idle state which also will affect the
+other cores in that unit.
+
+Finally, each core in a multi-core processor may be able to follow more than one
+program in the same time frame (that is, each core may be able to fetch
+instructions from multiple locations in memory and execute them in the same time
+frame, but not necessarily entirely in parallel with each other). In that case
+the cores present themselves to software as "bundles" each consisting of
+multiple individual single-core "processors", referred to as *hardware threads*
+(or hyper-threads specifically on Intel hardware), that each can follow one
+sequence of instructions. Then, the hardware threads are CPUs from the CPU idle
+time management perspective and if the processor is asked to enter an idle state
+by one of them, the hardware thread (or CPU) that asked for it is stopped, but
+nothing more happens, unless all of the other hardware threads within the same
+core also have asked the processor to enter an idle state. In that situation,
+the core may be put into an idle state individually or a larger unit containing
+it may be put into an idle state as a whole (if the other cores within the
+larger unit are in idle states already).
+
+Idle CPUs
+---------
+
+Logical CPUs, simply referred to as "CPUs" in what follows, are regarded as
+*idle* by the Linux kernel when there are no tasks to run on them except for the
+special "idle" task.
+
+Tasks are the CPU scheduler's representation of work. Each task consists of a
+sequence of instructions to execute, or code, data to be manipulated while
+running that code, and some context information that needs to be loaded into the
+processor every time the task's code is run by a CPU. The CPU scheduler
+distributes work by assigning tasks to run to the CPUs present in the system.
+
+Tasks can be in various states. In particular, they are *runnable* if there are
+no specific conditions preventing their code from being run by a CPU as long as
+there is a CPU available for that (for example, they are not waiting for any
+events to occur or similar). When a task becomes runnable, the CPU scheduler
+assigns it to one of the available CPUs to run and if there are no more runnable
+tasks assigned to it, the CPU will load the given task's context and run its
+code (from the instruction following the last one executed so far, possibly by
+another CPU). [If there are multiple runnable tasks assigned to one CPU
+simultaneously, they will be subject to prioritization and time sharing in order
+to allow them to make some progress over time.]
+
+The special "idle" task becomes runnable if there are no other runnable tasks
+assigned to the given CPU and the CPU is then regarded as idle. In other words,
+in Linux idle CPUs run the code of the "idle" task called *the idle loop*. That
+code may cause the processor to be put into one of its idle states, if they are
+supported, in order to save energy, but if the processor does not support any
+idle states, or there is not enough time to spend in an idle state before the
+next wakeup event, or there are strict latency constraints preventing any of the
+available idle states from being used, the CPU will simply execute more or less
+useless instructions in a loop until it is assigned a new task to run.
+
+
+.. _idle-loop:
+
+The Idle Loop
+=============
+
+The idle loop code takes two major steps in every iteration of it. First, it
+calls into a code module referred to as the *governor* that belongs to the CPU
+idle time management subsystem called ``CPUIdle`` to select an idle state for
+the CPU to ask the hardware to enter. Second, it invokes another code module
+from the ``CPUIdle`` subsystem, called the *driver*, to actually ask the
+processor hardware to enter the idle state selected by the governor.
+
+The role of the governor is to find an idle state most suitable for the
+conditions at hand. For this purpose, idle states that the hardware can be
+asked to enter by logical CPUs are represented in an abstract way independent of
+the platform or the processor architecture and organized in a one-dimensional
+(linear) array. That array has to be prepared and supplied by the ``CPUIdle``
+driver matching the platform the kernel is running on at the initialization
+time. This allows ``CPUIdle`` governors to be independent of the underlying
+hardware and to work with any platforms that the Linux kernel can run on.
+
+Each idle state present in that array is characterized by two parameters to be
+taken into account by the governor, the *target residency* and the (worst-case)
+*exit latency*. The target residency is the minimum time the hardware must
+spend in the given state, including the time needed to enter it (which may be
+substantial), in order to save more energy than it would save by entering one of
+the shallower idle states instead. [The "depth" of an idle state roughly
+corresponds to the power drawn by the processor in that state.] The exit
+latency, in turn, is the maximum time it will take a CPU asking the processor
+hardware to enter an idle state to start executing the first instruction after a
+wakeup from that state. Note that in general the exit latency also must cover
+the time needed to enter the given state in case the wakeup occurs when the
+hardware is entering it and it must be entered completely to be exited in an
+ordered manner.
+
+There are two types of information that can influence the governor's decisions.
+First of all, the governor knows the time until the closest timer event. That
+time is known exactly, because the kernel programs timers and it knows exactly
+when they will trigger, and it is the maximum time the hardware that the given
+CPU depends on can spend in an idle state, including the time necessary to enter
+and exit it. However, the CPU may be woken up by a non-timer event at any time
+(in particular, before the closest timer triggers) and it generally is not known
+when that may happen. The governor can only see how much time the CPU actually
+was idle after it has been woken up (that time will be referred to as the *idle
+duration* from now on) and it can use that information somehow along with the
+time until the closest timer to estimate the idle duration in future. How the
+governor uses that information depends on what algorithm is implemented by it
+and that is the primary reason for having more than one governor in the
+``CPUIdle`` subsystem.
+
+There are two ``CPUIdle`` governors available, ``menu`` and ``ladder``. Which
+of them is used depends on the configuration of the kernel and in particular on
+whether or not the scheduler tick can be `stopped by the idle
+loop <idle-cpus-and-tick_>`_. It is possible to change the governor at run time
+if the ``cpuidle_sysfs_switch`` command line parameter has been passed to the
+kernel, but that is not safe in general, so it should not be done on production
+systems (that may change in the future, though). The name of the ``CPUIdle``
+governor currently used by the kernel can be read from the
+:file:`current_governor_ro` (or :file:`current_governor` if
+``cpuidle_sysfs_switch`` is present in the kernel command line) file under
+:file:`/sys/devices/system/cpu/cpuidle/` in ``sysfs``.
+
+Which ``CPUIdle`` driver is used, on the other hand, usually depends on the
+platform the kernel is running on, but there are platforms with more than one
+matching driver. For example, there are two drivers that can work with the
+majority of Intel platforms, ``intel_idle`` and ``acpi_idle``, one with
+hardcoded idle states information and the other able to read that information
+from the system's ACPI tables, respectively. Still, even in those cases, the
+driver chosen at the system initialization time cannot be replaced later, so the
+decision on which one of them to use has to be made early (on Intel platforms
+the ``acpi_idle`` driver will be used if ``intel_idle`` is disabled for some
+reason or if it does not recognize the processor). The name of the ``CPUIdle``
+driver currently used by the kernel can be read from the :file:`current_driver`
+file under :file:`/sys/devices/system/cpu/cpuidle/` in ``sysfs``.
+
+
+.. _idle-cpus-and-tick:
+
+Idle CPUs and The Scheduler Tick
+================================
+
+The scheduler tick is a timer that triggers periodically in order to implement
+the time sharing strategy of the CPU scheduler. Of course, if there are
+multiple runnable tasks assigned to one CPU at the same time, the only way to
+allow them to make reasonable progress in a given time frame is to make them
+share the available CPU time. Namely, in rough approximation, each task is
+given a slice of the CPU time to run its code, subject to the scheduling class,
+prioritization and so on and when that time slice is used up, the CPU should be
+switched over to running (the code of) another task. The currently running task
+may not want to give the CPU away voluntarily, however, and the scheduler tick
+is there to make the switch happen regardless. That is not the only role of the
+tick, but it is the primary reason for using it.
+
+The scheduler tick is problematic from the CPU idle time management perspective,
+because it triggers periodically and relatively often (depending on the kernel
+configuration, the length of the tick period is between 1 ms and 10 ms).
+Thus, if the tick is allowed to trigger on idle CPUs, it will not make sense
+for them to ask the hardware to enter idle states with target residencies above
+the tick period length. Moreover, in that case the idle duration of any CPU
+will never exceed the tick period length and the energy used for entering and
+exiting idle states due to the tick wakeups on idle CPUs will be wasted.
+
+Fortunately, it is not really necessary to allow the tick to trigger on idle
+CPUs, because (by definition) they have no tasks to run except for the special
+"idle" one. In other words, from the CPU scheduler perspective, the only user
+of the CPU time on them is the idle loop. Since the time of an idle CPU need
+not be shared between multiple runnable tasks, the primary reason for using the
+tick goes away if the given CPU is idle. Consequently, it is possible to stop
+the scheduler tick entirely on idle CPUs in principle, even though that may not
+always be worth the effort.
+
+Whether or not it makes sense to stop the scheduler tick in the idle loop
+depends on what is expected by the governor. First, if there is another
+(non-tick) timer due to trigger within the tick range, stopping the tick clearly
+would be a waste of time, even though the timer hardware may not need to be
+reprogrammed in that case. Second, if the governor is expecting a non-timer
+wakeup within the tick range, stopping the tick is not necessary and it may even
+be harmful. Namely, in that case the governor will select an idle state with
+the target residency within the time until the expected wakeup, so that state is
+going to be relatively shallow. The governor really cannot select a deep idle
+state then, as that would contradict its own expectation of a wakeup in short
+order. Now, if the wakeup really occurs shortly, stopping the tick would be a
+waste of time and in this case the timer hardware would need to be reprogrammed,
+which is expensive. On the other hand, if the tick is stopped and the wakeup
+does not occur any time soon, the hardware may spend indefinite amount of time
+in the shallow idle state selected by the governor, which will be a waste of
+energy. Hence, if the governor is expecting a wakeup of any kind within the
+tick range, it is better to allow the tick trigger. Otherwise, however, the
+governor will select a relatively deep idle state, so the tick should be stopped
+so that it does not wake up the CPU too early.
+
+In any case, the governor knows what it is expecting and the decision on whether
+or not to stop the scheduler tick belongs to it. Still, if the tick has been
+stopped already (in one of the previous iterations of the loop), it is better
+to leave it as is and the governor needs to take that into account.
+
+The kernel can be configured to disable stopping the scheduler tick in the idle
+loop altogether. That can be done through the build-time configuration of it
+(by unsetting the ``CONFIG_NO_HZ_IDLE`` configuration option) or by passing
+``nohz=off`` to it in the command line. In both cases, as the stopping of the
+scheduler tick is disabled, the governor's decisions regarding it are simply
+ignored by the idle loop code and the tick is never stopped.
+
+The systems that run kernels configured to allow the scheduler tick to be
+stopped on idle CPUs are referred to as *tickless* systems and they are
+generally regarded as more energy-efficient than the systems running kernels in
+which the tick cannot be stopped. If the given system is tickless, it will use
+the ``menu`` governor by default and if it is not tickless, the default
+``CPUIdle`` governor on it will be ``ladder``.
+
+
+The ``menu`` Governor
+=====================
+
+The ``menu`` governor is the default ``CPUIdle`` governor for tickless systems.
+It is quite complex, but the basic principle of its design is straightforward.
+Namely, when invoked to select an idle state for a CPU (i.e. an idle state that
+the CPU will ask the processor hardware to enter), it attempts to predict the
+idle duration and uses the predicted value for idle state selection.
+
+It first obtains the time until the closest timer event with the assumption
+that the scheduler tick will be stopped. That time, referred to as the *sleep
+length* in what follows, is the upper bound on the time before the next CPU
+wakeup. It is used to determine the sleep length range, which in turn is needed
+to get the sleep length correction factor.
+
+The ``menu`` governor maintains two arrays of sleep length correction factors.
+One of them is used when tasks previously running on the given CPU are waiting
+for some I/O operations to complete and the other one is used when that is not
+the case. Each array contains several correction factor values that correspond
+to different sleep length ranges organized so that each range represented in the
+array is approximately 10 times wider than the previous one.
+
+The correction factor for the given sleep length range (determined before
+selecting the idle state for the CPU) is updated after the CPU has been woken
+up and the closer the sleep length is to the observed idle duration, the closer
+to 1 the correction factor becomes (it must fall between 0 and 1 inclusive).
+The sleep length is multiplied by the correction factor for the range that it
+falls into to obtain the first approximation of the predicted idle duration.
+
+Next, the governor uses a simple pattern recognition algorithm to refine its
+idle duration prediction. Namely, it saves the last 8 observed idle duration
+values and, when predicting the idle duration next time, it computes the average
+and variance of them. If the variance is small (smaller than 400 square
+milliseconds) or it is small relative to the average (the average is greater
+that 6 times the standard deviation), the average is regarded as the "typical
+interval" value. Otherwise, the longest of the saved observed idle duration
+values is discarded and the computation is repeated for the remaining ones.
+Again, if the variance of them is small (in the above sense), the average is
+taken as the "typical interval" value and so on, until either the "typical
+interval" is determined or too many data points are disregarded, in which case
+the "typical interval" is assumed to equal "infinity" (the maximum unsigned
+integer value). The "typical interval" computed this way is compared with the
+sleep length multiplied by the correction factor and the minimum of the two is
+taken as the predicted idle duration.
+
+Then, the governor computes an extra latency limit to help "interactive"
+workloads. It uses the observation that if the exit latency of the selected
+idle state is comparable with the predicted idle duration, the total time spent
+in that state probably will be very short and the amount of energy to save by
+entering it will be relatively small, so likely it is better to avoid the
+overhead related to entering that state and exiting it. Thus selecting a
+shallower state is likely to be a better option then. The first approximation
+of the extra latency limit is the predicted idle duration itself which
+additionally is divided by a value depending on the number of tasks that
+previously ran on the given CPU and now they are waiting for I/O operations to
+complete. The result of that division is compared with the latency limit coming
+from the power management quality of service, or `PM QoS <cpu-pm-qos_>`_,
+framework and the minimum of the two is taken as the limit for the idle states'
+exit latency.
+
+Now, the governor is ready to walk the list of idle states and choose one of
+them. For this purpose, it compares the target residency of each state with
+the predicted idle duration and the exit latency of it with the computed latency
+limit. It selects the state with the target residency closest to the predicted
+idle duration, but still below it, and exit latency that does not exceed the
+limit.
+
+In the final step the governor may still need to refine the idle state selection
+if it has not decided to `stop the scheduler tick <idle-cpus-and-tick_>`_. That
+happens if the idle duration predicted by it is less than the tick period and
+the tick has not been stopped already (in a previous iteration of the idle
+loop). Then, the sleep length used in the previous computations may not reflect
+the real time until the closest timer event and if it really is greater than
+that time, the governor may need to select a shallower state with a suitable
+target residency.
+
+
+.. _idle-states-representation:
+
+Representation of Idle States
+=============================
+
+For the CPU idle time management purposes all of the physical idle states
+supported by the processor have to be represented as a one-dimensional array of
+|struct cpuidle_state| objects each allowing an individual (logical) CPU to ask
+the processor hardware to enter an idle state of certain properties. If there
+is a hierarchy of units in the processor, one |struct cpuidle_state| object can
+cover a combination of idle states supported by the units at different levels of
+the hierarchy. In that case, the `target residency and exit latency parameters
+of it <idle-loop_>`_, must reflect the properties of the idle state at the
+deepest level (i.e. the idle state of the unit containing all of the other
+units).
+
+For example, take a processor with two cores in a larger unit referred to as
+a "module" and suppose that asking the hardware to enter a specific idle state
+(say "X") at the "core" level by one core will trigger the module to try to
+enter a specific idle state of its own (say "MX") if the other core is in idle
+state "X" already. In other words, asking for idle state "X" at the "core"
+level gives the hardware a license to go as deep as to idle state "MX" at the
+"module" level, but there is no guarantee that this is going to happen (the core
+asking for idle state "X" may just end up in that state by itself instead).
+Then, the target residency of the |struct cpuidle_state| object representing
+idle state "X" must reflect the minimum time to spend in idle state "MX" of
+the module (including the time needed to enter it), because that is the minimum
+time the CPU needs to be idle to save any energy in case the hardware enters
+that state. Analogously, the exit latency parameter of that object must cover
+the exit time of idle state "MX" of the module (and usually its entry time too),
+because that is the maximum delay between a wakeup signal and the time the CPU
+will start to execute the first new instruction (assuming that both cores in the
+module will always be ready to execute instructions as soon as the module
+becomes operational as a whole).
+
+There are processors without direct coordination between different levels of the
+hierarchy of units inside them, however. In those cases asking for an idle
+state at the "core" level does not automatically affect the "module" level, for
+example, in any way and the ``CPUIdle`` driver is responsible for the entire
+handling of the hierarchy. Then, the definition of the idle state objects is
+entirely up to the driver, but still the physical properties of the idle state
+that the processor hardware finally goes into must always follow the parameters
+used by the governor for idle state selection (for instance, the actual exit
+latency of that idle state must not exceed the exit latency parameter of the
+idle state object selected by the governor).
+
+In addition to the target residency and exit latency idle state parameters
+discussed above, the objects representing idle states each contain a few other
+parameters describing the idle state and a pointer to the function to run in
+order to ask the hardware to enter that state. Also, for each
+|struct cpuidle_state| object, there is a corresponding
+:c:type:`struct cpuidle_state_usage <cpuidle_state_usage>` one containing usage
+statistics of the given idle state. That information is exposed by the kernel
+via ``sysfs``.
+
+For each CPU in the system, there is a :file:`/sys/devices/system/cpu<N>/cpuidle/`
+directory in ``sysfs``, where the number ``<N>`` is assigned to the given
+CPU at the initialization time. That directory contains a set of subdirectories
+called :file:`state0`, :file:`state1` and so on, up to the number of idle state
+objects defined for the given CPU minus one. Each of these directories
+corresponds to one idle state object and the larger the number in its name, the
+deeper the (effective) idle state represented by it. Each of them contains
+a number of files (attributes) representing the properties of the idle state
+object corresponding to it, as follows:
+
+``above``
+ Total number of times this idle state had been asked for, but the
+ observed idle duration was certainly too short to match its target
+ residency.
+
+``below``
+ Total number of times this idle state had been asked for, but cerainly
+ a deeper idle state would have been a better match for the observed idle
+ duration.
+
+``desc``
+ Description of the idle state.
+
+``disable``
+ Whether or not this idle state is disabled.
+
+``latency``
+ Exit latency of the idle state in microseconds.
+
+``name``
+ Name of the idle state.
+
+``power``
+ Power drawn by hardware in this idle state in milliwatts (if specified,
+ 0 otherwise).
+
+``residency``
+ Target residency of the idle state in microseconds.
+
+``time``
+ Total time spent in this idle state by the given CPU (as measured by the
+ kernel) in microseconds.
+
+``usage``
+ Total number of times the hardware has been asked by the given CPU to
+ enter this idle state.
+
+The :file:`desc` and :file:`name` files both contain strings. The difference
+between them is that the name is expected to be more concise, while the
+description may be longer and it may contain white space or special characters.
+The other files listed above contain integer numbers.
+
+The :file:`disable` attribute is the only writeable one. If it contains 1, the
+given idle state is disabled for this particular CPU, which means that the
+governor will never select it for this particular CPU and the ``CPUIdle``
+driver will never ask the hardware to enter it for that CPU as a result.
+However, disabling an idle state for one CPU does not prevent it from being
+asked for by the other CPUs, so it must be disabled for all of them in order to
+never be asked for by any of them. [Note that, due to the way the ``ladder``
+governor is implemented, disabling an idle state prevents that governor from
+selecting any idle states deeper than the disabled one too.]
+
+If the :file:`disable` attribute contains 0, the given idle state is enabled for
+this particular CPU, but it still may be disabled for some or all of the other
+CPUs in the system at the same time. Writing 1 to it causes the idle state to
+be disabled for this particular CPU and writing 0 to it allows the governor to
+take it into consideration for the given CPU and the driver to ask for it,
+unless that state was disabled globally in the driver (in which case it cannot
+be used at all).
+
+The :file:`power` attribute is not defined very well, especially for idle state
+objects representing combinations of idle states at different levels of the
+hierarchy of units in the processor, and it generally is hard to obtain idle
+state power numbers for complex hardware, so :file:`power` often contains 0 (not
+available) and if it contains a nonzero number, that number may not be very
+accurate and it should not be relied on for anything meaningful.
+
+The number in the :file:`time` file generally may be greater than the total time
+really spent by the given CPU in the given idle state, because it is measured by
+the kernel and it may not cover the cases in which the hardware refused to enter
+this idle state and entered a shallower one instead of it (or even it did not
+enter any idle state at all). The kernel can only measure the time span between
+asking the hardware to enter an idle state and the subsequent wakeup of the CPU
+and it cannot say what really happened in the meantime at the hardware level.
+Moreover, if the idle state object in question represents a combination of idle
+states at different levels of the hierarchy of units in the processor,
+the kernel can never say how deep the hardware went down the hierarchy in any
+particular case. For these reasons, the only reliable way to find out how
+much time has been spent by the hardware in different idle states supported by
+it is to use idle state residency counters in the hardware, if available.
+
+
+.. _cpu-pm-qos:
+
+Power Management Quality of Service for CPUs
+============================================
+
+The power management quality of service (PM QoS) framework in the Linux kernel
+allows kernel code and user space processes to set constraints on various
+energy-efficiency features of the kernel to prevent performance from dropping
+below a required level. The PM QoS constraints can be set globally, in
+predefined categories referred to as PM QoS classes, or against individual
+devices.
+
+CPU idle time management can be affected by PM QoS in two ways, through the
+global constraint in the ``PM_QOS_CPU_DMA_LATENCY`` class and through the
+resume latency constraints for individual CPUs. Kernel code (e.g. device
+drivers) can set both of them with the help of special internal interfaces
+provided by the PM QoS framework. User space can modify the former by opening
+the :file:`cpu_dma_latency` special device file under :file:`/dev/` and writing
+a binary value (interpreted as a signed 32-bit integer) to it. In turn, the
+resume latency constraint for a CPU can be modified by user space by writing a
+string (representing a signed 32-bit integer) to the
+:file:`power/pm_qos_resume_latency_us` file under
+:file:`/sys/devices/system/cpu/cpu<N>/` in ``sysfs``, where the CPU number
+``<N>`` is allocated at the system initialization time. Negative values
+will be rejected in both cases and, also in both cases, the written integer
+number will be interpreted as a requested PM QoS constraint in microseconds.
+
+The requested value is not automatically applied as a new constraint, however,
+as it may be less restrictive (greater in this particular case) than another
+constraint previously requested by someone else. For this reason, the PM QoS
+framework maintains a list of requests that have been made so far in each
+global class and for each device, aggregates them and applies the effective
+(minimum in this particular case) value as the new constraint.
+
+In fact, opening the :file:`cpu_dma_latency` special device file causes a new
+PM QoS request to be created and added to the priority list of requests in the
+``PM_QOS_CPU_DMA_LATENCY`` class and the file descriptor coming from the
+"open" operation represents that request. If that file descriptor is then
+used for writing, the number written to it will be associated with the PM QoS
+request represented by it as a new requested constraint value. Next, the
+priority list mechanism will be used to determine the new effective value of
+the entire list of requests and that effective value will be set as a new
+constraint. Thus setting a new requested constraint value will only change the
+real constraint if the effective "list" value is affected by it. In particular,
+for the ``PM_QOS_CPU_DMA_LATENCY`` class it only affects the real constraint if
+it is the minimum of the requested constraints in the list. The process holding
+a file descriptor obtained by opening the :file:`cpu_dma_latency` special device
+file controls the PM QoS request associated with that file descriptor, but it
+controls this particular PM QoS request only.
+
+Closing the :file:`cpu_dma_latency` special device file or, more precisely, the
+file descriptor obtained while opening it, causes the PM QoS request associated
+with that file descriptor to be removed from the ``PM_QOS_CPU_DMA_LATENCY``
+class priority list and destroyed. If that happens, the priority list mechanism
+will be used, again, to determine the new effective value for the whole list
+and that value will become the new real constraint.
+
+In turn, for each CPU there is only one resume latency PM QoS request
+associated with the :file:`power/pm_qos_resume_latency_us` file under
+:file:`/sys/devices/system/cpu/cpu<N>/` in ``sysfs`` and writing to it causes
+this single PM QoS request to be updated regardless of which user space
+process does that. In other words, this PM QoS request is shared by the entire
+user space, so access to the file associated with it needs to be arbitrated
+to avoid confusion. [Arguably, the only legitimate use of this mechanism in
+practice is to pin a process to the CPU in question and let it use the
+``sysfs`` interface to control the resume latency constraint for it.] It
+still only is a request, however. It is a member of a priority list used to
+determine the effective value to be set as the resume latency constraint for the
+CPU in question every time the list of requests is updated this way or another
+(there may be other requests coming from kernel code in that list).
+
+CPU idle time governors are expected to regard the minimum of the global
+effective ``PM_QOS_CPU_DMA_LATENCY`` class constraint and the effective
+resume latency constraint for the given CPU as the upper limit for the exit
+latency of the idle states they can select for that CPU. They should never
+select any idle states with exit latency beyond that limit.
+
+
+Idle States Control Via Kernel Command Line
+===========================================
+
+In addition to the ``sysfs`` interface allowing individual idle states to be
+`disabled for individual CPUs <idle-states-representation_>`_, there are kernel
+command line parameters affecting CPU idle time management.
+
+The ``cpuidle.off=1`` kernel command line option can be used to disable the
+CPU idle time management entirely. It does not prevent the idle loop from
+running on idle CPUs, but it prevents the CPU idle time governors and drivers
+from being invoked. If it is added to the kernel command line, the idle loop
+will ask the hardware to enter idle states on idle CPUs via the CPU architecture
+support code that is expected to provide a default mechanism for this purpose.
+That default mechanism usually is the least common denominator for all of the
+processors implementing the architecture (i.e. CPU instruction set) in question,
+however, so it is rather crude and not very energy-efficient. For this reason,
+it is not recommended for production use.
+
+The ``cpuidle.governor=`` kernel command line switch allows the ``CPUIdle``
+governor to use to be specified. It has to be appended with a string matching
+the name of an available governor (e.g. ``cpuidle.governor=menu``) and that
+governor will be used instead of the default one. It is possible to force
+the ``menu`` governor to be used on the systems that use the ``ladder`` governor
+by default this way, for example.
+
+The other kernel command line parameters controlling CPU idle time management
+described below are only relevant for the *x86* architecture and some of
+them affect Intel processors only.
+
+The *x86* architecture support code recognizes three kernel command line
+options related to CPU idle time management: ``idle=poll``, ``idle=halt``,
+and ``idle=nomwait``. The first two of them disable the ``acpi_idle`` and
+``intel_idle`` drivers altogether, which effectively causes the entire
+``CPUIdle`` subsystem to be disabled and makes the idle loop invoke the
+architecture support code to deal with idle CPUs. How it does that depends on
+which of the two parameters is added to the kernel command line. In the
+``idle=halt`` case, the architecture support code will use the ``HLT``
+instruction of the CPUs (which, as a rule, suspends the execution of the program
+and causes the hardware to attempt to enter the shallowest available idle state)
+for this purpose, and if ``idle=poll`` is used, idle CPUs will execute a
+more or less ``lightweight'' sequence of instructions in a tight loop. [Note
+that using ``idle=poll`` is somewhat drastic in many cases, as preventing idle
+CPUs from saving almost any energy at all may not be the only effect of it.
+For example, on Intel hardware it effectively prevents CPUs from using
+P-states (see |cpufreq|) that require any number of CPUs in a package to be
+idle, so it very well may hurt single-thread computations performance as well as
+energy-efficiency. Thus using it for performance reasons may not be a good idea
+at all.]
+
+The ``idle=nomwait`` option disables the ``intel_idle`` driver and causes
+``acpi_idle`` to be used (as long as all of the information needed by it is
+there in the system's ACPI tables), but it is not allowed to use the
+``MWAIT`` instruction of the CPUs to ask the hardware to enter idle states.
+
+In addition to the architecture-level kernel command line options affecting CPU
+idle time management, there are parameters affecting individual ``CPUIdle``
+drivers that can be passed to them via the kernel command line. Specifically,
+the ``intel_idle.max_cstate=<n>`` and ``processor.max_cstate=<n>`` parameters,
+where ``<n>`` is an idle state index also used in the name of the given
+state's directory in ``sysfs`` (see
+`Representation of Idle States <idle-states-representation_>`_), causes the
+``intel_idle`` and ``acpi_idle`` drivers, respectively, to discard all of the
+idle states deeper than idle state ``<n>``. In that case, they will never ask
+for any of those idle states or expose them to the governor. [The behavior of
+the two drivers is different for ``<n>`` equal to ``0``. Adding
+``intel_idle.max_cstate=0`` to the kernel command line disables the
+``intel_idle`` driver and allows ``acpi_idle`` to be used, whereas
+``processor.max_cstate=0`` is equivalent to ``processor.max_cstate=1``.
+Also, the ``acpi_idle`` driver is part of the ``processor`` kernel module that
+can be loaded separately and ``max_cstate=<n>`` can be passed to it as a module
+parameter when it is loaded.]
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index ac6f5c597a56..ec0f7c111f65 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -495,7 +495,15 @@ on the following rules, regardless of the current operation mode of the driver:
2. Each individual CPU is affected by its own per-policy limits (that is, it
cannot be requested to run faster than its own per-policy maximum and it
- cannot be requested to run slower than its own per-policy minimum).
+ cannot be requested to run slower than its own per-policy minimum). The
+ effective performance depends on whether the platform supports per core
+ P-states, hyper-threading is enabled and on current performance requests
+ from other CPUs. When platform doesn't support per core P-states, the
+ effective performance can be more than the policy limits set on a CPU, if
+ other CPUs are requesting higher performance at that moment. Even with per
+ core P-states support, when hyper-threading is enabled, if the sibling CPU
+ is requesting higher performance, the other siblings will get higher
+ performance than their policy limits.
3. The global and per-policy limits can be set independently.
diff --git a/Documentation/admin-guide/pm/working-state.rst b/Documentation/admin-guide/pm/working-state.rst
index fa01bf083dfe..b6cef9b5e961 100644
--- a/Documentation/admin-guide/pm/working-state.rst
+++ b/Documentation/admin-guide/pm/working-state.rst
@@ -5,5 +5,6 @@ Working-State Power Management
.. toctree::
:maxdepth: 2
+ cpuidle
cpufreq
intel_pstate
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 8d0df62c3fe0..8df9f4658d6f 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -205,6 +205,14 @@ Before jumping into the kernel, the following conditions must be met:
ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b0.
- The DT or ACPI tables must describe a GICv2 interrupt controller.
+ For CPUs with pointer authentication functionality:
+ - If EL3 is present:
+ SCR_EL3.APK (bit 16) must be initialised to 0b1
+ SCR_EL3.API (bit 17) must be initialised to 0b1
+ - If the kernel is entered at EL1:
+ HCR_EL2.APK (bit 40) must be initialised to 0b1
+ HCR_EL2.API (bit 41) must be initialised to 0b1
+
The requirements described above for CPU mode, caches, MMUs, architected
timers, coherency and system registers apply to all CPUs. All CPUs must
enter the kernel in the same exception level.
diff --git a/Documentation/arm64/cpu-feature-registers.txt b/Documentation/arm64/cpu-feature-registers.txt
index 7964f03846b1..d4b4dd1fe786 100644
--- a/Documentation/arm64/cpu-feature-registers.txt
+++ b/Documentation/arm64/cpu-feature-registers.txt
@@ -184,12 +184,20 @@ infrastructure:
x--------------------------------------------------x
| Name | bits | visible |
|--------------------------------------------------|
+ | GPI | [31-28] | y |
+ |--------------------------------------------------|
+ | GPA | [27-24] | y |
+ |--------------------------------------------------|
| LRCPC | [23-20] | y |
|--------------------------------------------------|
| FCMA | [19-16] | y |
|--------------------------------------------------|
| JSCVT | [15-12] | y |
|--------------------------------------------------|
+ | API | [11-8] | y |
+ |--------------------------------------------------|
+ | APA | [7-4] | y |
+ |--------------------------------------------------|
| DPB | [3-0] | y |
x--------------------------------------------------x
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index ea819ae024dd..13d6691b37be 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -182,3 +182,15 @@ HWCAP_FLAGM
HWCAP_SSBS
Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
+
+HWCAP_PACA
+
+ Functionality implied by ID_AA64ISAR1_EL1.APA == 0b0001 or
+ ID_AA64ISAR1_EL1.API == 0b0001, as described by
+ Documentation/arm64/pointer-authentication.txt.
+
+HWCAP_PACG
+
+ Functionality implied by ID_AA64ISAR1_EL1.GPA == 0b0001 or
+ ID_AA64ISAR1_EL1.GPI == 0b0001, as described by
+ Documentation/arm64/pointer-authentication.txt.
diff --git a/Documentation/arm64/pointer-authentication.txt b/Documentation/arm64/pointer-authentication.txt
new file mode 100644
index 000000000000..a25cd21290e9
--- /dev/null
+++ b/Documentation/arm64/pointer-authentication.txt
@@ -0,0 +1,88 @@
+Pointer authentication in AArch64 Linux
+=======================================
+
+Author: Mark Rutland <mark.rutland@arm.com>
+Date: 2017-07-19
+
+This document briefly describes the provision of pointer authentication
+functionality in AArch64 Linux.
+
+
+Architecture overview
+---------------------
+
+The ARMv8.3 Pointer Authentication extension adds primitives that can be
+used to mitigate certain classes of attack where an attacker can corrupt
+the contents of some memory (e.g. the stack).
+
+The extension uses a Pointer Authentication Code (PAC) to determine
+whether pointers have been modified unexpectedly. A PAC is derived from
+a pointer, another value (such as the stack pointer), and a secret key
+held in system registers.
+
+The extension adds instructions to insert a valid PAC into a pointer,
+and to verify/remove the PAC from a pointer. The PAC occupies a number
+of high-order bits of the pointer, which varies dependent on the
+configured virtual address size and whether pointer tagging is in use.
+
+A subset of these instructions have been allocated from the HINT
+encoding space. In the absence of the extension (or when disabled),
+these instructions behave as NOPs. Applications and libraries using
+these instructions operate correctly regardless of the presence of the
+extension.
+
+The extension provides five separate keys to generate PACs - two for
+instruction addresses (APIAKey, APIBKey), two for data addresses
+(APDAKey, APDBKey), and one for generic authentication (APGAKey).
+
+
+Basic support
+-------------
+
+When CONFIG_ARM64_PTR_AUTH is selected, and relevant HW support is
+present, the kernel will assign random key values to each process at
+exec*() time. The keys are shared by all threads within the process, and
+are preserved across fork().
+
+Presence of address authentication functionality is advertised via
+HWCAP_PACA, and generic authentication functionality via HWCAP_PACG.
+
+The number of bits that the PAC occupies in a pointer is 55 minus the
+virtual address size configured by the kernel. For example, with a
+virtual address size of 48, the PAC is 7 bits wide.
+
+Recent versions of GCC can compile code with APIAKey-based return
+address protection when passed the -msign-return-address option. This
+uses instructions in the HINT space (unless -march=armv8.3-a or higher
+is also passed), and such code can run on systems without the pointer
+authentication extension.
+
+In addition to exec(), keys can also be reinitialized to random values
+using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
+PR_PAC_APIBKEY, PR_PAC_APDAKEY, PR_PAC_APDBKEY and PR_PAC_APGAKEY
+specifies which keys are to be reinitialized; specifying 0 means "all
+keys".
+
+
+Debugging
+---------
+
+When CONFIG_ARM64_PTR_AUTH is selected, and HW support for address
+authentication is present, the kernel will expose the position of TTBR0
+PAC bits in the NT_ARM_PAC_MASK regset (struct user_pac_mask), which
+userspace can acquire via PTRACE_GETREGSET.
+
+The regset is exposed only when HWCAP_PACA is set. Separate masks are
+exposed for data pointers and instruction pointers, as the set of PAC
+bits can vary between the two. Note that the masks apply to TTBR0
+addresses, and are not valid to apply to TTBR1 addresses (e.g. kernel
+pointers).
+
+
+Virtualization
+--------------
+
+Pointer authentication is not currently supported in KVM guests. KVM
+will mask the feature bits from ID_AA64ISAR1_EL1, and attempted use of
+the feature will result in an UNDEFINED exception being injected into
+the guest.
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 76ccded8b74c..1f09d043d086 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -57,6 +57,8 @@ stable kernels.
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
+| ARM | Cortex-A76 | #1165522 | ARM64_ERRATUM_1165522 |
+| ARM | Cortex-A76 | #1286807 | ARM64_ERRATUM_1286807 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 207eca58efaa..ac18b488cb5e 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -65,7 +65,6 @@ Description of Contents:
3.2.3 I/O completion
3.2.4 Implications for drivers that do not interpret bios (don't handle
multiple segments)
- 3.2.5 Request command tagging
3.3 I/O submission
4. The I/O scheduler
5. Scalability related changes
@@ -708,93 +707,6 @@ is crossed on completion of a transfer. (The end*request* functions should
be used if only if the request has come down from block/bio path, not for
direct access requests which only specify rq->buffer without a valid rq->bio)
-3.2.5 Generic request command tagging
-
-3.2.5.1 Tag helpers
-
-Block now offers some simple generic functionality to help support command
-queueing (typically known as tagged command queueing), ie manage more than
-one outstanding command on a queue at any given time.
-
- blk_queue_init_tags(struct request_queue *q, int depth)
-
- Initialize internal command tagging structures for a maximum
- depth of 'depth'.
-
- blk_queue_free_tags((struct request_queue *q)
-
- Teardown tag info associated with the queue. This will be done
- automatically by block if blk_queue_cleanup() is called on a queue
- that is using tagging.
-
-The above are initialization and exit management, the main helpers during
-normal operations are:
-
- blk_queue_start_tag(struct request_queue *q, struct request *rq)
-
- Start tagged operation for this request. A free tag number between
- 0 and 'depth' is assigned to the request (rq->tag holds this number),
- and 'rq' is added to the internal tag management. If the maximum depth
- for this queue is already achieved (or if the tag wasn't started for
- some other reason), 1 is returned. Otherwise 0 is returned.
-
- blk_queue_end_tag(struct request_queue *q, struct request *rq)
-
- End tagged operation on this request. 'rq' is removed from the internal
- book keeping structures.
-
-To minimize struct request and queue overhead, the tag helpers utilize some
-of the same request members that are used for normal request queue management.
-This means that a request cannot both be an active tag and be on the queue
-list at the same time. blk_queue_start_tag() will remove the request, but
-the driver must remember to call blk_queue_end_tag() before signalling
-completion of the request to the block layer. This means ending tag
-operations before calling end_that_request_last()! For an example of a user
-of these helpers, see the IDE tagged command queueing support.
-
-3.2.5.2 Tag info
-
-Some block functions exist to query current tag status or to go from a
-tag number to the associated request. These are, in no particular order:
-
- blk_queue_tagged(q)
-
- Returns 1 if the queue 'q' is using tagging, 0 if not.
-
- blk_queue_tag_request(q, tag)
-
- Returns a pointer to the request associated with tag 'tag'.
-
- blk_queue_tag_depth(q)
-
- Return current queue depth.
-
- blk_queue_tag_queue(q)
-
- Returns 1 if the queue can accept a new queued command, 0 if we are
- at the maximum depth already.
-
- blk_queue_rq_tagged(rq)
-
- Returns 1 if the request 'rq' is tagged.
-
-3.2.5.2 Internal structure
-
-Internally, block manages tags in the blk_queue_tag structure:
-
- struct blk_queue_tag {
- struct request **tag_index; /* array or pointers to rq */
- unsigned long *tag_map; /* bitmap of free tags */
- struct list_head busy_list; /* fifo list of busy tags */
- int busy; /* queue depth */
- int max_depth; /* max queue depth */
- };
-
-Most of the above is simple and straight forward, however busy_list may need
-a bit of explaining. Normally we don't care too much about request ordering,
-but in the event of any barrier requests in the tag queue we need to ensure
-that requests are restarted in the order they were queue.
-
3.3 I/O Submission
The routine submit_bio() is used to submit a single io. Higher level i/o
diff --git a/Documentation/block/cfq-iosched.txt b/Documentation/block/cfq-iosched.txt
deleted file mode 100644
index 895bd3813115..000000000000
--- a/Documentation/block/cfq-iosched.txt
+++ /dev/null
@@ -1,291 +0,0 @@
-CFQ (Complete Fairness Queueing)
-===============================
-
-The main aim of CFQ scheduler is to provide a fair allocation of the disk
-I/O bandwidth for all the processes which requests an I/O operation.
-
-CFQ maintains the per process queue for the processes which request I/O
-operation(synchronous requests). In case of asynchronous requests, all the
-requests from all the processes are batched together according to their
-process's I/O priority.
-
-CFQ ioscheduler tunables
-========================
-
-slice_idle
-----------
-This specifies how long CFQ should idle for next request on certain cfq queues
-(for sequential workloads) and service trees (for random workloads) before
-queue is expired and CFQ selects next queue to dispatch from.
-
-By default slice_idle is a non-zero value. That means by default we idle on
-queues/service trees. This can be very helpful on highly seeky media like
-single spindle SATA/SAS disks where we can cut down on overall number of
-seeks and see improved throughput.
-
-Setting slice_idle to 0 will remove all the idling on queues/service tree
-level and one should see an overall improved throughput on faster storage
-devices like multiple SATA/SAS disks in hardware RAID configuration. The down
-side is that isolation provided from WRITES also goes down and notion of
-IO priority becomes weaker.
-
-So depending on storage and workload, it might be useful to set slice_idle=0.
-In general I think for SATA/SAS disks and software RAID of SATA/SAS disks
-keeping slice_idle enabled should be useful. For any configurations where
-there are multiple spindles behind single LUN (Host based hardware RAID
-controller or for storage arrays), setting slice_idle=0 might end up in better
-throughput and acceptable latencies.
-
-back_seek_max
--------------
-This specifies, given in Kbytes, the maximum "distance" for backward seeking.
-The distance is the amount of space from the current head location to the
-sectors that are backward in terms of distance.
-
-This parameter allows the scheduler to anticipate requests in the "backward"
-direction and consider them as being the "next" if they are within this
-distance from the current head location.
-
-back_seek_penalty
------------------
-This parameter is used to compute the cost of backward seeking. If the
-backward distance of request is just 1/back_seek_penalty from a "front"
-request, then the seeking cost of two requests is considered equivalent.
-
-So scheduler will not bias toward one or the other request (otherwise scheduler
-will bias toward front request). Default value of back_seek_penalty is 2.
-
-fifo_expire_async
------------------
-This parameter is used to set the timeout of asynchronous requests. Default
-value of this is 248ms.
-
-fifo_expire_sync
-----------------
-This parameter is used to set the timeout of synchronous requests. Default
-value of this is 124ms. In case to favor synchronous requests over asynchronous
-one, this value should be decreased relative to fifo_expire_async.
-
-group_idle
------------
-This parameter forces idling at the CFQ group level instead of CFQ
-queue level. This was introduced after a bottleneck was observed
-in higher end storage due to idle on sequential queue and allow dispatch
-from a single queue. The idea with this parameter is that it can be run with
-slice_idle=0 and group_idle=8, so that idling does not happen on individual
-queues in the group but happens overall on the group and thus still keeps the
-IO controller working.
-Not idling on individual queues in the group will dispatch requests from
-multiple queues in the group at the same time and achieve higher throughput
-on higher end storage.
-
-Default value for this parameter is 8ms.
-
-low_latency
------------
-This parameter is used to enable/disable the low latency mode of the CFQ
-scheduler. If enabled, CFQ tries to recompute the slice time for each process
-based on the target_latency set for the system. This favors fairness over
-throughput. Disabling low latency (setting it to 0) ignores target latency,
-allowing each process in the system to get a full time slice.
-
-By default low latency mode is enabled.
-
-target_latency
---------------
-This parameter is used to calculate the time slice for a process if cfq's
-latency mode is enabled. It will ensure that sync requests have an estimated
-latency. But if sequential workload is higher(e.g. sequential read),
-then to meet the latency constraints, throughput may decrease because of less
-time for each process to issue I/O request before the cfq queue is switched.
-
-Though this can be overcome by disabling the latency_mode, it may increase
-the read latency for some applications. This parameter allows for changing
-target_latency through the sysfs interface which can provide the balanced
-throughput and read latency.
-
-Default value for target_latency is 300ms.
-
-slice_async
------------
-This parameter is same as of slice_sync but for asynchronous queue. The
-default value is 40ms.
-
-slice_async_rq
---------------
-This parameter is used to limit the dispatching of asynchronous request to
-device request queue in queue's slice time. The maximum number of request that
-are allowed to be dispatched also depends upon the io priority. Default value
-for this is 2.
-
-slice_sync
-----------
-When a queue is selected for execution, the queues IO requests are only
-executed for a certain amount of time(time_slice) before switching to another
-queue. This parameter is used to calculate the time slice of synchronous
-queue.
-
-time_slice is computed using the below equation:-
-time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the
-time_slice of synchronous queue, increase the value of slice_sync. Default
-value is 100ms.
-
-quantum
--------
-This specifies the number of request dispatched to the device queue. In a
-queue's time slice, a request will not be dispatched if the number of request
-in the device exceeds this parameter. This parameter is used for synchronous
-request.
-
-In case of storage with several disk, this setting can limit the parallel
-processing of request. Therefore, increasing the value can improve the
-performance although this can cause the latency of some I/O to increase due
-to more number of requests.
-
-CFQ Group scheduling
-====================
-
-CFQ supports blkio cgroup and has "blkio." prefixed files in each
-blkio cgroup directory. It is weight-based and there are four knobs
-for configuration - weight[_device] and leaf_weight[_device].
-Internal cgroup nodes (the ones with children) can also have tasks in
-them, so the former two configure how much proportion the cgroup as a
-whole is entitled to at its parent's level while the latter two
-configure how much proportion the tasks in the cgroup have compared to
-its direct children.
-
-Another way to think about it is assuming that each internal node has
-an implicit leaf child node which hosts all the tasks whose weight is
-configured by leaf_weight[_device]. Let's assume a blkio hierarchy
-composed of five cgroups - root, A, B, AA and AB - with the following
-weights where the names represent the hierarchy.
-
- weight leaf_weight
- root : 125 125
- A : 500 750
- B : 250 500
- AA : 500 500
- AB : 1000 500
-
-root never has a parent making its weight is meaningless. For backward
-compatibility, weight is always kept in sync with leaf_weight. B, AA
-and AB have no child and thus its tasks have no children cgroup to
-compete with. They always get 100% of what the cgroup won at the
-parent level. Considering only the weights which matter, the hierarchy
-looks like the following.
-
- root
- / | \
- A B leaf
- 500 250 125
- / | \
- AA AB leaf
- 500 1000 750
-
-If all cgroups have active IOs and competing with each other, disk
-time will be distributed like the following.
-
-Distribution below root. The total active weight at this level is
-A:500 + B:250 + C:125 = 875.
-
- root-leaf : 125 / 875 =~ 14%
- A : 500 / 875 =~ 57%
- B(-leaf) : 250 / 875 =~ 28%
-
-A has children and further distributes its 57% among the children and
-the implicit leaf node. The total active weight at this level is
-AA:500 + AB:1000 + A-leaf:750 = 2250.
-
- A-leaf : ( 750 / 2250) * A =~ 19%
- AA(-leaf) : ( 500 / 2250) * A =~ 12%
- AB(-leaf) : (1000 / 2250) * A =~ 25%
-
-CFQ IOPS Mode for group scheduling
-===================================
-Basic CFQ design is to provide priority based time slices. Higher priority
-process gets bigger time slice and lower priority process gets smaller time
-slice. Measuring time becomes harder if storage is fast and supports NCQ and
-it would be better to dispatch multiple requests from multiple cfq queues in
-request queue at a time. In such scenario, it is not possible to measure time
-consumed by single queue accurately.
-
-What is possible though is to measure number of requests dispatched from a
-single queue and also allow dispatch from multiple cfq queue at the same time.
-This effectively becomes the fairness in terms of IOPS (IO operations per
-second).
-
-If one sets slice_idle=0 and if storage supports NCQ, CFQ internally switches
-to IOPS mode and starts providing fairness in terms of number of requests
-dispatched. Note that this mode switching takes effect only for group
-scheduling. For non-cgroup users nothing should change.
-
-CFQ IO scheduler Idling Theory
-===============================
-Idling on a queue is primarily about waiting for the next request to come
-on same queue after completion of a request. In this process CFQ will not
-dispatch requests from other cfq queues even if requests are pending there.
-
-The rationale behind idling is that it can cut down on number of seeks
-on rotational media. For example, if a process is doing dependent
-sequential reads (next read will come on only after completion of previous
-one), then not dispatching request from other queue should help as we
-did not move the disk head and kept on dispatching sequential IO from
-one queue.
-
-CFQ has following service trees and various queues are put on these trees.
-
- sync-idle sync-noidle async
-
-All cfq queues doing synchronous sequential IO go on to sync-idle tree.
-On this tree we idle on each queue individually.
-
-All synchronous non-sequential queues go on sync-noidle tree. Also any
-synchronous write request which is not marked with REQ_IDLE goes on this
-service tree. On this tree we do not idle on individual queues instead idle
-on the whole group of queues or the tree. So if there are 4 queues waiting
-for IO to dispatch we will idle only once last queue has dispatched the IO
-and there is no more IO on this service tree.
-
-All async writes go on async service tree. There is no idling on async
-queues.
-
-CFQ has some optimizations for SSDs and if it detects a non-rotational
-media which can support higher queue depth (multiple requests at in
-flight at a time), then it cuts down on idling of individual queues and
-all the queues move to sync-noidle tree and only tree idle remains. This
-tree idling provides isolation with buffered write queues on async tree.
-
-FAQ
-===
-Q1. Why to idle at all on queues not marked with REQ_IDLE.
-
-A1. We only do tree idle (all queues on sync-noidle tree) on queues not marked
- with REQ_IDLE. This helps in providing isolation with all the sync-idle
- queues. Otherwise in presence of many sequential readers, other
- synchronous IO might not get fair share of disk.
-
- For example, if there are 10 sequential readers doing IO and they get
- 100ms each. If a !REQ_IDLE request comes in, it will be scheduled
- roughly after 1 second. If after completion of !REQ_IDLE request we
- do not idle, and after a couple of milli seconds a another !REQ_IDLE
- request comes in, again it will be scheduled after 1second. Repeat it
- and notice how a workload can lose its disk share and suffer due to
- multiple sequential readers.
-
- fsync can generate dependent IO where bunch of data is written in the
- context of fsync, and later some journaling data is written. Journaling
- data comes in only after fsync has finished its IO (atleast for ext4
- that seemed to be the case). Now if one decides not to idle on fsync
- thread due to !REQ_IDLE, then next journaling write will not get
- scheduled for another second. A process doing small fsync, will suffer
- badly in presence of multiple sequential readers.
-
- Hence doing tree idling on threads using !REQ_IDLE flag on requests
- provides isolation from multiple sequential readers and at the same
- time we do not idle on individual threads.
-
-Q2. When to specify REQ_IDLE
-A2. I would think whenever one is doing synchronous write and expecting
- more writes to be dispatched from same context soon, should be able
- to specify REQ_IDLE on writes and that probably should work well for
- most of the cases.
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt
index 2c1e67058fd3..39e286d7afc9 100644
--- a/Documentation/block/queue-sysfs.txt
+++ b/Documentation/block/queue-sysfs.txt
@@ -64,7 +64,7 @@ guess, the kernel will put the process issuing IO to sleep for an amount
of time, before entering a classic poll loop. This mode might be a
little slower than pure classic polling, but it will be more efficient.
If set to a value larger than 0, the kernel will put the process issuing
-IO to sleep for this amont of microseconds before entering classic
+IO to sleep for this amount of microseconds before entering classic
polling.
iostats (RW)
@@ -194,4 +194,31 @@ blk-throttle makes decision based on the samplings. Lower time means cgroups
have more smooth throughput, but higher CPU overhead. This exists only when
CONFIG_BLK_DEV_THROTTLING_LOW is enabled.
+zoned (RO)
+----------
+This indicates if the device is a zoned block device and the zone model of the
+device if it is indeed zoned. The possible values indicated by zoned are
+"none" for regular block devices and "host-aware" or "host-managed" for zoned
+block devices. The characteristics of host-aware and host-managed zoned block
+devices are described in the ZBC (Zoned Block Commands) and ZAC
+(Zoned Device ATA Command Set) standards. These standards also define the
+"drive-managed" zone model. However, since drive-managed zoned block devices
+do not support zone commands, they will be treated as regular block devices
+and zoned will report "none".
+
+nr_zones (RO)
+-------------
+For zoned block devices (zoned attribute indicating "host-managed" or
+"host-aware"), this indicates the total number of zones of the device.
+This is always 0 for regular block devices.
+
+chunk_sectors (RO)
+------------------
+This has different meaning depending on the type of the block device.
+For a RAID device (dm-raid), chunk_sectors indicates the size in 512B sectors
+of the RAID volume stripe segment. For a zoned block device, either host-aware
+or host-managed, chunk_sectors indicates the size in 512B sectors of the zones
+of the device, with the eventual exception of the last zone of the device which
+may be smaller.
+
Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index dbe96cb5558e..6a6d67acaf69 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -187,6 +187,8 @@ Takes xa_lock internally:
* :c:func:`xa_erase_bh`
* :c:func:`xa_erase_irq`
* :c:func:`xa_cmpxchg`
+ * :c:func:`xa_cmpxchg_bh`
+ * :c:func:`xa_cmpxchg_irq`
* :c:func:`xa_store_range`
* :c:func:`xa_alloc`
* :c:func:`xa_alloc_bh`
@@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
in the interrupt handler. Some of the more common patterns have helper
functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
-:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
+:c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh`
+and :c:func:`xa_cmpxchg_irq`.
Sometimes you need to protect access to the XArray with a mutex because
that lock sits above another mutex in the locking hierarchy. That does
diff --git a/Documentation/cpuidle/core.txt b/Documentation/cpuidle/core.txt
deleted file mode 100644
index 63ecc5dc9d8a..000000000000
--- a/Documentation/cpuidle/core.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
- Supporting multiple CPU idle levels in kernel
-
- cpuidle
-
-General Information:
-
-Various CPUs today support multiple idle levels that are differentiated
-by varying exit latencies and power consumption during idle.
-cpuidle is a generic in-kernel infrastructure that separates
-idle policy (governor) from idle mechanism (driver) and provides a
-standardized infrastructure to support independent development of
-governors and drivers.
-
-cpuidle resides under drivers/cpuidle.
-
-Boot options:
-"cpuidle_sysfs_switch"
-enables current_governor interface in /sys/devices/system/cpu/cpuidle/,
-which can be used to switch governors at run time. This boot option
-is meant for developer testing only. In normal usage, kernel picks the
-best governor based on governor ratings.
-SEE ALSO: sysfs.txt in this directory.
diff --git a/Documentation/cpuidle/sysfs.txt b/Documentation/cpuidle/sysfs.txt
deleted file mode 100644
index d1587f434e7b..000000000000
--- a/Documentation/cpuidle/sysfs.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-
-
- Supporting multiple CPU idle levels in kernel
-
- cpuidle sysfs
-
-System global cpuidle related information and tunables are under
-/sys/devices/system/cpu/cpuidle
-
-The current interfaces in this directory has self-explanatory names:
-* current_driver
-* current_governor_ro
-
-With cpuidle_sysfs_switch boot option (meant for developer testing)
-following objects are visible instead.
-* current_driver
-* available_governors
-* current_governor
-In this case users can switch the governor at run time by writing
-to current_governor.
-
-
-Per logical CPU specific cpuidle information are under
-/sys/devices/system/cpu/cpuX/cpuidle
-for each online cpu X
-
---------------------------------------------------------------------------------
-# ls -lR /sys/devices/system/cpu/cpu0/cpuidle/
-/sys/devices/system/cpu/cpu0/cpuidle/:
-total 0
-drwxr-xr-x 2 root root 0 Feb 8 10:42 state0
-drwxr-xr-x 2 root root 0 Feb 8 10:42 state1
-drwxr-xr-x 2 root root 0 Feb 8 10:42 state2
-drwxr-xr-x 2 root root 0 Feb 8 10:42 state3
-
-/sys/devices/system/cpu/cpu0/cpuidle/state0:
-total 0
--r--r--r-- 1 root root 4096 Feb 8 10:42 desc
--rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
--r--r--r-- 1 root root 4096 Feb 8 10:42 latency
--r--r--r-- 1 root root 4096 Feb 8 10:42 name
--r--r--r-- 1 root root 4096 Feb 8 10:42 power
--r--r--r-- 1 root root 4096 Feb 8 10:42 residency
--r--r--r-- 1 root root 4096 Feb 8 10:42 time
--r--r--r-- 1 root root 4096 Feb 8 10:42 usage
-
-/sys/devices/system/cpu/cpu0/cpuidle/state1:
-total 0
--r--r--r-- 1 root root 4096 Feb 8 10:42 desc
--rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
--r--r--r-- 1 root root 4096 Feb 8 10:42 latency
--r--r--r-- 1 root root 4096 Feb 8 10:42 name
--r--r--r-- 1 root root 4096 Feb 8 10:42 power
--r--r--r-- 1 root root 4096 Feb 8 10:42 residency
--r--r--r-- 1 root root 4096 Feb 8 10:42 time
--r--r--r-- 1 root root 4096 Feb 8 10:42 usage
-
-/sys/devices/system/cpu/cpu0/cpuidle/state2:
-total 0
--r--r--r-- 1 root root 4096 Feb 8 10:42 desc
--rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
--r--r--r-- 1 root root 4096 Feb 8 10:42 latency
--r--r--r-- 1 root root 4096 Feb 8 10:42 name
--r--r--r-- 1 root root 4096 Feb 8 10:42 power
--r--r--r-- 1 root root 4096 Feb 8 10:42 residency
--r--r--r-- 1 root root 4096 Feb 8 10:42 time
--r--r--r-- 1 root root 4096 Feb 8 10:42 usage
-
-/sys/devices/system/cpu/cpu0/cpuidle/state3:
-total 0
--r--r--r-- 1 root root 4096 Feb 8 10:42 desc
--rw-r--r-- 1 root root 4096 Feb 8 10:42 disable
--r--r--r-- 1 root root 4096 Feb 8 10:42 latency
--r--r--r-- 1 root root 4096 Feb 8 10:42 name
--r--r--r-- 1 root root 4096 Feb 8 10:42 power
--r--r--r-- 1 root root 4096 Feb 8 10:42 residency
--r--r--r-- 1 root root 4096 Feb 8 10:42 time
--r--r--r-- 1 root root 4096 Feb 8 10:42 usage
---------------------------------------------------------------------------------
-
-
-* desc : Small description about the idle state (string)
-* disable : Option to disable this idle state (bool) -> see note below
-* latency : Latency to exit out of this idle state (in microseconds)
-* residency : Time after which a state becomes more effecient than any
- shallower state (in microseconds)
-* name : Name of the idle state (string)
-* power : Power consumed while in this idle state (in milliwatts)
-* time : Total time spent in this idle state (in microseconds)
-* usage : Number of times this state was entered (count)
-
-Note:
-The behavior and the effect of the disable variable depends on the
-implementation of a particular governor. In the ladder governor, for
-example, it is not coherent, i.e. if one is disabling a light state,
-then all deeper states are disabled as well, but the disable variable
-does not reflect it. Likewise, if one enables a deep state but a lighter
-state still is disabled, then this has no effect.
diff --git a/Documentation/crypto/api.rst b/Documentation/crypto/api.rst
index 2e519193ab4a..b91b31736df8 100644
--- a/Documentation/crypto/api.rst
+++ b/Documentation/crypto/api.rst
@@ -1,15 +1,6 @@
Programming Interface
=====================
-Please note that the kernel crypto API contains the AEAD givcrypt API
-(crypto_aead_giv\* and aead_givcrypt\* function calls in
-include/crypto/aead.h). This API is obsolete and will be removed in the
-future. To obtain the functionality of an AEAD cipher with internal IV
-generation, use the IV generator as a regular cipher. For example,
-rfc4106(gcm(aes)) is the AEAD cipher with external IV generation and
-seqniv(rfc4106(gcm(aes))) implies that the kernel crypto API generates
-the IV. Different IV generators are available.
-
.. class:: toc-title
Table of contents
diff --git a/Documentation/crypto/architecture.rst b/Documentation/crypto/architecture.rst
index ca2d09b991f5..ee8ff0762d7f 100644
--- a/Documentation/crypto/architecture.rst
+++ b/Documentation/crypto/architecture.rst
@@ -157,10 +157,6 @@ applicable to a cipher, it is not displayed:
- rng for random number generator
- - givcipher for cipher with associated IV generator (see the geniv
- entry below for the specification of the IV generator type used by
- the cipher implementation)
-
- kpp for a Key-agreement Protocol Primitive (KPP) cipher such as
an ECDH or DH implementation
@@ -174,16 +170,7 @@ applicable to a cipher, it is not displayed:
- digestsize: output size of the message digest
-- geniv: IV generation type:
-
- - eseqiv for encrypted sequence number based IV generation
-
- - seqiv for sequence number based IV generation
-
- - chainiv for chain iv generation
-
- - <builtin> is a marker that the cipher implements IV generation and
- handling as it is specific to the given cipher
+- geniv: IV generator (obsolete)
Key Sizes
---------
@@ -218,10 +205,6 @@ the aforementioned cipher types:
- CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher
-- CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block cipher packed
- together with an IV generator (see geniv field in the /proc/crypto
- listing for the known IV generators)
-
- CRYPTO_ALG_TYPE_KPP Key-agreement Protocol Primitive (KPP) such as
an ECDH or DH implementation
@@ -338,18 +321,14 @@ uses the API applicable to the cipher type specified for the block.
The following call sequence is applicable when the IPSEC layer triggers
an encryption operation with the esp_output function. During
-configuration, the administrator set up the use of rfc4106(gcm(aes)) as
-the cipher for ESP. The following call sequence is now depicted in the
-ASCII art above:
+configuration, the administrator set up the use of seqiv(rfc4106(gcm(aes)))
+as the cipher for ESP. The following call sequence is now depicted in
+the ASCII art above:
1. esp_output() invokes crypto_aead_encrypt() to trigger an
encryption operation of the AEAD cipher with IV generator.
- In case of GCM, the SEQIV implementation is registered as GIVCIPHER
- in crypto_rfc4106_alloc().
-
- The SEQIV performs its operation to generate an IV where the core
- function is seqiv_geniv().
+ The SEQIV generates the IV.
2. Now, SEQIV uses the AEAD API function calls to invoke the associated
AEAD cipher. In our case, during the instantiation of SEQIV, the
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
index 46d0af1f0872..c20f38e56544 100644
--- a/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
@@ -58,19 +58,11 @@ This binding for the SCU power domain providers uses the generic power
domain binding[2].
Required properties:
-- compatible: Should be "fsl,scu-pd".
-- #address-cells: Should be 1.
-- #size-cells: Should be 0.
-
-Required properties for power domain sub nodes:
-- #power-domain-cells: Must be 0.
-
-Optional Properties:
-- reg: Resource ID of this power domain.
- No exist means uncontrollable by user.
+- compatible: Should be "fsl,imx8qxp-scu-pd".
+- #power-domain-cells: Must be 1. Contains the Resource ID used by
+ SCU commands.
See detailed Resource ID list from:
- include/dt-bindings/power/imx-rsrc.h
-- power-domains: phandle pointing to the parent power domain.
+ include/dt-bindings/firmware/imx/rsrc.h
Clock bindings based on SCU Message Protocol
------------------------------------------------------------
@@ -152,22 +144,9 @@ firmware {
...
};
- imx8qx-pm {
- compatible = "fsl,scu-pd";
- #address-cells = <1>;
- #size-cells = <0>;
-
- pd_dma: dma-power-domain {
- #power-domain-cells = <0>;
-
- pd_dma_lpuart0: dma-lpuart0@57 {
- reg = <SC_R_UART_0>;
- #power-domain-cells = <0>;
- power-domains = <&pd_dma>;
- };
- ...
- };
- ...
+ pd: imx8qx-pd {
+ compatible = "fsl,imx8qxp-scu-pd";
+ #power-domain-cells = <1>;
};
};
};
@@ -179,5 +158,5 @@ serial@5a060000 {
clocks = <&clk IMX8QXP_UART0_CLK>,
<&clk IMX8QXP_UART0_IPG_CLK>;
clock-names = "per", "ipg";
- power-domains = <&pd_dma_lpuart0>;
+ power-domains = <&pd IMX_SC_R_UART_0>;
};
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
index 4e4a3c0ab9ab..de4075413d91 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
@@ -11,6 +11,7 @@ Required Properties:
- "mediatek,mt6797-apmixedsys"
- "mediatek,mt7622-apmixedsys"
- "mediatek,mt7623-apmixedsys", "mediatek,mt2701-apmixedsys"
+ - "mediatek,mt7629-apmixedsys"
- "mediatek,mt8135-apmixedsys"
- "mediatek,mt8173-apmixedsys"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
index f17cfe64255d..6b7e8067e7aa 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
@@ -9,6 +9,7 @@ Required Properties:
- "mediatek,mt2701-ethsys", "syscon"
- "mediatek,mt7622-ethsys", "syscon"
- "mediatek,mt7623-ethsys", "mediatek,mt2701-ethsys", "syscon"
+ - "mediatek,mt7629-ethsys", "syscon"
- #clock-cells: Must be 1
- #reset-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
index 89f4272a1441..417bd83d1378 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
@@ -12,6 +12,7 @@ Required Properties:
- "mediatek,mt6797-infracfg", "syscon"
- "mediatek,mt7622-infracfg", "syscon"
- "mediatek,mt7623-infracfg", "mediatek,mt2701-infracfg", "syscon"
+ - "mediatek,mt7629-infracfg", "syscon"
- "mediatek,mt8135-infracfg", "syscon"
- "mediatek,mt8173-infracfg", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
index 7fe5dc6097a6..d179a61536f4 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pciesys.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: Should be:
- "mediatek,mt7622-pciesys", "syscon"
+ - "mediatek,mt7629-pciesys", "syscon"
- #clock-cells: Must be 1
- #reset-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
index 6755514deb80..4c7e478117a0 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt
@@ -11,6 +11,7 @@ Required Properties:
- "mediatek,mt2712-pericfg", "syscon"
- "mediatek,mt7622-pericfg", "syscon"
- "mediatek,mt7623-pericfg", "mediatek,mt2701-pericfg", "syscon"
+ - "mediatek,mt7629-pericfg", "syscon"
- "mediatek,mt8135-pericfg", "syscon"
- "mediatek,mt8173-pericfg", "syscon"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
index d113b8e741f3..30cb645c0e54 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: Should be:
- "mediatek,mt7622-sgmiisys", "syscon"
+ - "mediatek,mt7629-sgmiisys", "syscon"
- #clock-cells: Must be 1
The SGMIISYS controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
index b8184da2508c..7cb02c930613 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ssusbsys.txt
@@ -7,6 +7,7 @@ Required Properties:
- compatible: Should be:
- "mediatek,mt7622-ssusbsys", "syscon"
+ - "mediatek,mt7629-ssusbsys", "syscon"
- #clock-cells: Must be 1
- #reset-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
index d849465b8c99..d160c2b4b6fe 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
@@ -11,6 +11,7 @@ Required Properties:
- "mediatek,mt6797-topckgen"
- "mediatek,mt7622-topckgen"
- "mediatek,mt7623-topckgen", "mediatek,mt2701-topckgen"
+ - "mediatek,mt7629-topckgen"
- "mediatek,mt8135-topckgen"
- "mediatek,mt8173-topckgen"
- #clock-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt b/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
index 87dfb33fb3be..b9d533717dff 100644
--- a/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
+++ b/Documentation/devicetree/bindings/bus/sun50i-de2-bus.txt
@@ -1,11 +1,14 @@
-Device tree bindings for Allwinner A64 DE2 bus
+Device tree bindings for Allwinner DE2/3 bus
The Allwinner A64 DE2 is on a special bus, which needs a SRAM region (SRAM C)
-to be claimed for enabling the access.
+to be claimed for enabling the access. The DE3 on Allwinner H6 is at the same
+situation, and the binding also applies.
Required properties:
- - compatible: Should contain "allwinner,sun50i-a64-de2"
+ - compatible: Should be one of:
+ - "allwinner,sun50i-a64-de2"
+ - "allwinner,sun50i-h6-de3", "allwinner,sun50i-a64-de2"
- reg: A resource specifier for the register space
- #address-cells: Must be set to 1
- #size-cells: Must be set to 1
diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
index b455c5aa9139..4d94091c1d2d 100644
--- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
+++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt
@@ -9,15 +9,13 @@ Required Properties:
- "amlogic,meson8-clkc" for Meson8 (S802) SoCs
- "amlogic,meson8b-clkc" for Meson8 (S805) SoCs
- "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs
-- reg: it must be composed by two tuples:
- 0) physical base address of the xtal register and length of memory
- mapped region.
- 1) physical base address of the clock controller and length of memory
- mapped region.
-
- #clock-cells: should be 1.
- #reset-cells: should be 1.
+Parent node should have the following properties :
+- compatible: "amlogic,meson-hhi-sysctrl", "simple-mfd", "syscon"
+- reg: base address and size of the HHI system control register space.
+
Each clock is assigned an identifier and client nodes can use this identifier
to specify the clock which they consume. All available clocks are defined as
preprocessor macros in the dt-bindings/clock/meson8b-clkc.h header and can be
@@ -30,9 +28,8 @@ device tree sources).
Example: Clock controller node:
- clkc: clock-controller@c1104000 {
+ clkc: clock-controller {
compatible = "amlogic,meson8b-clkc";
- reg = <0xc1108000 0x4>, <0xc1104000 0x460>;
#clock-cells = <1>;
#reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt
index 2ec489eebe72..b646bbcf7f92 100644
--- a/Documentation/devicetree/bindings/clock/clock-bindings.txt
+++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -168,3 +168,19 @@ a shared clock is forbidden.
Configuration of common clocks, which affect multiple consumer devices can
be similarly specified in the clock provider node.
+
+==Protected clocks==
+
+Some platforms or firmwares may not fully expose all the clocks to the OS, such
+as in situations where those clks are used by drivers running in ARM secure
+execution levels. Such a configuration can be specified in device tree with the
+protected-clocks property in the form of a clock specifier list. This property should
+only be specified in the node that is providing the clocks being protected:
+
+ clock-controller@a000f000 {
+ compatible = "vendor,clk95;
+ reg = <0xa000f000 0x1000>
+ #clocks-cells = <1>;
+ ...
+ protected-clocks = <UART3_CLK>, <SPI5_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx6q-clock.txt b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
index e1308346e00d..13d36d4c6991 100644
--- a/Documentation/devicetree/bindings/clock/imx6q-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx6q-clock.txt
@@ -13,6 +13,9 @@ Optional properties:
management IC (PMIC) triggered via PMIC_STBY_REQ signal.
Boards that are designed to initiate poweroff on PMIC_ON_REQ signal should
be using "syscon-poweroff" driver instead.
+- clocks: list of clock specifiers, must contain an entry for each entry
+ in clock-names
+- clock-names: valid names are "osc", "ckil", "ckih1", "anaclk1" and "anaclk2"
The clock consumer should specify the desired clock by having the clock
ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx6qdl-clock.h
diff --git a/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt b/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt
new file mode 100644
index 000000000000..a4f8cd478f92
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx7ulp-clock.txt
@@ -0,0 +1,104 @@
+* Clock bindings for Freescale i.MX7ULP
+
+i.MX7ULP Clock functions are under joint control of the System
+Clock Generation (SCG) modules, Peripheral Clock Control (PCC)
+modules, and Core Mode Controller (CMC)1 blocks
+
+The clocking scheme provides clear separation between M4 domain
+and A7 domain. Except for a few clock sources shared between two
+domains, such as the System Oscillator clock, the Slow IRC (SIRC),
+and and the Fast IRC clock (FIRCLK), clock sources and clock
+management are separated and contained within each domain.
+
+M4 clock management consists of SCG0, PCC0, PCC1, and CMC0 modules.
+A7 clock management consists of SCG1, PCC2, PCC3, and CMC1 modules.
+
+Note: this binding doc is only for A7 clock domain.
+
+System Clock Generation (SCG) modules:
+---------------------------------------------------------------------
+The System Clock Generation (SCG) is responsible for clock generation
+and distribution across this device. Functions performed by the SCG
+include: clock reference selection, generation of clock used to derive
+processor, system, peripheral bus and external memory interface clocks,
+source selection for peripheral clocks and control of power saving
+clock gating mode.
+
+Required properties:
+
+- compatible: Should be "fsl,imx7ulp-scg1".
+- reg : Should contain registers location and length.
+- #clock-cells: Should be <1>.
+- clocks: Should contain the fixed input clocks.
+- clock-names: Should contain the following clock names:
+ "rosc", "sosc", "sirc", "firc", "upll", "mpll".
+
+Peripheral Clock Control (PCC) modules:
+---------------------------------------------------------------------
+The Peripheral Clock Control (PCC) is responsible for clock selection,
+optional division and clock gating mode for peripherals in their
+respected power domain
+
+Required properties:
+- compatible: Should be one of:
+ "fsl,imx7ulp-pcc2",
+ "fsl,imx7ulp-pcc3".
+- reg : Should contain registers location and length.
+- #clock-cells: Should be <1>.
+- clocks: Should contain the fixed input clocks.
+- clock-names: Should contain the following clock names:
+ "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2",
+ "apll_pfd1", "apll_pfd0", "upll", "sosc_bus_clk",
+ "mpll", "firc_bus_clk", "rosc", "spll_bus_clk";
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell.
+See include/dt-bindings/clock/imx7ulp-clock.h
+for the full list of i.MX7ULP clock IDs of each module.
+
+Examples:
+
+#include <dt-bindings/clock/imx7ulp-clock.h>
+
+scg1: scg1@403e0000 {
+ compatible = "fsl,imx7ulp-scg1;
+ reg = <0x403e0000 0x10000>;
+ clocks = <&rosc>, <&sosc>, <&sirc>,
+ <&firc>, <&upll>, <&mpll>;
+ clock-names = "rosc", "sosc", "sirc",
+ "firc", "upll", "mpll";
+ #clock-cells = <1>;
+};
+
+pcc2: pcc2@403f0000 {
+ compatible = "fsl,imx7ulp-pcc2";
+ reg = <0x403f0000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>,
+ <&scg1 IMX7ULP_CLK_NIC1_DIV>,
+ <&scg1 IMX7ULP_CLK_DDR_DIV>,
+ <&scg1 IMX7ULP_CLK_APLL_PFD2>,
+ <&scg1 IMX7ULP_CLK_APLL_PFD1>,
+ <&scg1 IMX7ULP_CLK_APLL_PFD0>,
+ <&scg1 IMX7ULP_CLK_UPLL>,
+ <&scg1 IMX7ULP_CLK_SOSC_BUS_CLK>,
+ <&scg1 IMX7ULP_CLK_MIPI_PLL>,
+ <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>,
+ <&scg1 IMX7ULP_CLK_ROSC>,
+ <&scg1 IMX7ULP_CLK_SPLL_BUS_CLK>;
+ clock-names = "nic1_bus_clk", "nic1_clk", "ddr_clk",
+ "apll_pfd2", "apll_pfd1", "apll_pfd0",
+ "upll", "sosc_bus_clk", "mpll",
+ "firc_bus_clk", "rosc", "spll_bus_clk";
+};
+
+usdhc1: usdhc@40380000 {
+ compatible = "fsl,imx7ulp-usdhc";
+ reg = <0x40380000 0x10000>;
+ interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&scg1 IMX7ULP_CLK_NIC1_BUS_DIV>,
+ <&scg1 IMX7ULP_CLK_NIC1_DIV>,
+ <&pcc2 IMX7ULP_CLK_USDHC1>;
+ clock-names ="ipg", "ahb", "per";
+ bus-width = <4>;
+};
diff --git a/Documentation/devicetree/bindings/clock/imx8mq-clock.txt b/Documentation/devicetree/bindings/clock/imx8mq-clock.txt
new file mode 100644
index 000000000000..52de8263e012
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8mq-clock.txt
@@ -0,0 +1,20 @@
+* Clock bindings for NXP i.MX8M Quad
+
+Required properties:
+- compatible: Should be "fsl,imx8mq-ccm"
+- reg: Address and length of the register set
+- #clock-cells: Should be <1>
+- clocks: list of clock specifiers, must contain an entry for each required
+ entry in clock-names
+- clock-names: should include the following entries:
+ - "ckil"
+ - "osc_25m"
+ - "osc_27m"
+ - "clk_ext1"
+ - "clk_ext2"
+ - "clk_ext3"
+ - "clk_ext4"
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. See include/dt-bindings/clock/imx8mq-clock.h
+for the full list of i.MX8M Quad clock IDs.
diff --git a/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt
new file mode 100644
index 000000000000..965cfa42e025
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/imx8qxp-lpcg.txt
@@ -0,0 +1,51 @@
+* NXP i.MX8QXP LPCG (Low-Power Clock Gating) Clock bindings
+
+The Low-Power Clock Gate (LPCG) modules contain a local programming
+model to control the clock gates for the peripherals. An LPCG module
+is used to locally gate the clocks for the associated peripheral.
+
+Note:
+This level of clock gating is provided after the clocks are generated
+by the SCU resources and clock controls. Thus even if the clock is
+enabled by these control bits, it might still not be running based
+on the base resource.
+
+Required properties:
+- compatible: Should be one of:
+ "fsl,imx8qxp-lpcg-adma",
+ "fsl,imx8qxp-lpcg-conn",
+ "fsl,imx8qxp-lpcg-dc",
+ "fsl,imx8qxp-lpcg-dsp",
+ "fsl,imx8qxp-lpcg-gpu",
+ "fsl,imx8qxp-lpcg-hsio",
+ "fsl,imx8qxp-lpcg-img",
+ "fsl,imx8qxp-lpcg-lsio",
+ "fsl,imx8qxp-lpcg-vpu"
+- reg: Address and length of the register set
+- #clock-cells: Should be <1>
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell.
+See the full list of clock IDs from:
+include/dt-bindings/clock/imx8qxp-clock.h
+
+Examples:
+
+#include <dt-bindings/clock/imx8qxp-clock.h>
+
+conn_lpcg: clock-controller@5b200000 {
+ compatible = "fsl,imx8qxp-lpcg-conn";
+ reg = <0x5b200000 0xb0000>;
+ #clock-cells = <1>;
+};
+
+usdhc1: mmc@5b010000 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&conn_lpcg IMX8QXP_CONN_LPCG_SDHC0_IPG_CLK>,
+ <&conn_lpcg IMX8QXP_CONN_LPCG_SDHC0_PER_CLK>,
+ <&conn_lpcg IMX8QXP_CONN_LPCG_SDHC0_HCLK>;
+ clock-names = "ipg", "per", "ahb";
+};
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 52d9345c9927..8661c3cd3ccf 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -35,6 +35,8 @@ be part of GCC and hence the TSENS properties can also be
part of the GCC/clock-controller node.
For more details on the TSENS properties please refer
Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+- protected-clocks : Protected clock specifier list as per common clock
+ binding.
Example:
clock-controller@900000 {
@@ -55,3 +57,17 @@ Example of GCC with TSENS properties:
#reset-cells = <1>;
#thermal-sensor-cells = <1>;
};
+
+Example of GCC with protected-clocks properties:
+ clock-controller@100000 {
+ compatible = "qcom,gcc-sdm845";
+ reg = <0x100000 0x1f0000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ protected-clocks = <GCC_QSPI_CORE_CLK>,
+ <GCC_QSPI_CORE_CLK_SRC>,
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
new file mode 100644
index 000000000000..4e5215ef1acd
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -0,0 +1,22 @@
+Qualcomm Graphics Clock & Reset Controller Binding
+--------------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,sdm845-gpucc"
+- reg : shall contain base register location and length
+- #clock-cells : from common clock binding, shall contain 1
+- #reset-cells : from common reset binding, shall contain 1
+- #power-domain-cells : from generic power domain binding, shall contain 1
+- clocks : shall contain the XO clock
+- clock-names : shall be "xo"
+
+Example:
+ gpucc: clock-controller@5090000 {
+ compatible = "qcom,sdm845-gpucc";
+ reg = <0x5090000 0x9000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,lpasscc.txt b/Documentation/devicetree/bindings/clock/qcom,lpasscc.txt
new file mode 100644
index 000000000000..b9e9787045b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,lpasscc.txt
@@ -0,0 +1,26 @@
+Qualcomm LPASS Clock Controller Binding
+-----------------------------------------------
+
+Required properties :
+- compatible : shall contain "qcom,sdm845-lpasscc"
+- #clock-cells : from common clock binding, shall contain 1.
+- reg : shall contain base register address and size,
+ in the order
+ Index-0 maps to LPASS_CC register region
+ Index-1 maps to LPASS_QDSP6SS register region
+
+Optional properties :
+- reg-names : register names of LPASS domain
+ "cc", "qdsp6ss".
+
+Example:
+
+The below node has to be defined in the cases where the LPASS peripheral loader
+would bring the subsystem out of reset.
+
+ lpasscc: clock-controller@17014000 {
+ compatible = "qcom,sdm845-lpasscc";
+ reg = <0x17014000 0x1f004>, <0x17300000 0x200>;
+ reg-names = "cc", "qdsp6ss";
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
index 4491d1c104aa..87b4949e9bc8 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -16,6 +16,7 @@ Required properties :
"qcom,rpmcc-msm8974", "qcom,rpmcc"
"qcom,rpmcc-apq8064", "qcom,rpmcc"
"qcom,rpmcc-msm8996", "qcom,rpmcc"
+ "qcom,rpmcc-qcs404", "qcom,rpmcc"
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/clock/qcom,videocc.txt b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
index e7c035afa778..8a8622c65c5a 100644
--- a/Documentation/devicetree/bindings/clock/qcom,videocc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,videocc.txt
@@ -6,8 +6,6 @@ Required properties :
- reg : shall contain base register location and length
- #clock-cells : from common clock binding, shall contain 1.
- #power-domain-cells : from generic power domain binding, shall contain 1.
-
-Optional properties :
- #reset-cells : from common reset binding, shall contain 1.
Example:
@@ -16,4 +14,5 @@ Example:
reg = <0xab00000 0x10000>;
#clock-cells = <1>;
#power-domain-cells = <1>;
+ #reset-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 97f46adac85f..c655f28d5918 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -28,6 +28,12 @@ Required properties:
* "fsl,p4080-clockgen"
* "fsl,p5020-clockgen"
* "fsl,p5040-clockgen"
+ * "fsl,t1023-clockgen"
+ * "fsl,t1024-clockgen"
+ * "fsl,t1040-clockgen"
+ * "fsl,t1042-clockgen"
+ * "fsl,t2080-clockgen"
+ * "fsl,t2081-clockgen"
* "fsl,t4240-clockgen"
* "fsl,b4420-clockgen"
* "fsl,b4860-clockgen"
diff --git a/Documentation/devicetree/bindings/clock/sun8i-de2.txt b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
index e94582e8b8a9..41a52c2acffd 100644
--- a/Documentation/devicetree/bindings/clock/sun8i-de2.txt
+++ b/Documentation/devicetree/bindings/clock/sun8i-de2.txt
@@ -1,5 +1,5 @@
-Allwinner Display Engine 2.0 Clock Control Binding
---------------------------------------------------
+Allwinner Display Engine 2.0/3.0 Clock Control Binding
+------------------------------------------------------
Required properties :
- compatible: must contain one of the following compatibles:
@@ -8,6 +8,7 @@ Required properties :
- "allwinner,sun8i-v3s-de2-clk"
- "allwinner,sun50i-a64-de2-clk"
- "allwinner,sun50i-h5-de2-clk"
+ - "allwinner,sun50i-h6-de3-clk"
- reg: Must contain the registers base address and length
- clocks: phandle to the clocks feeding the display engine subsystem.
diff --git a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
index 47d2e902ced4..e3bd88ae456b 100644
--- a/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi-ccu.txt
@@ -22,6 +22,7 @@ Required properties :
- "allwinner,sun50i-h5-ccu"
- "allwinner,sun50i-h6-ccu"
- "allwinner,sun50i-h6-r-ccu"
+ - "allwinner,suniv-f1c100s-ccu"
- "nextthing,gr8-ccu"
- reg: Must contain the registers base address and length
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt
new file mode 100644
index 000000000000..33856947c561
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt
@@ -0,0 +1,172 @@
+Qualcomm Technologies, Inc. CPUFREQ Bindings
+
+CPUFREQ HW is a hardware engine used by some Qualcomm Technologies, Inc. (QTI)
+SoCs to manage frequency in hardware. It is capable of controlling frequency
+for multiple clusters.
+
+Properties:
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,cpufreq-hw".
+
+- clocks
+ Usage: required
+ Value type: <phandle> From common clock binding.
+ Definition: clock handle for XO clock and GPLL0 clock.
+
+- clock-names
+ Usage: required
+ Value type: <string> From common clock binding.
+ Definition: must be "xo", "alternate".
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Addresses and sizes for the memory of the HW bases in
+ each frequency domain.
+- reg-names
+ Usage: Optional
+ Value type: <string>
+ Definition: Frequency domain name i.e.
+ "freq-domain0", "freq-domain1".
+
+- #freq-domain-cells:
+ Usage: required.
+ Definition: Number of cells in a freqency domain specifier.
+
+* Property qcom,freq-domain
+Devices supporting freq-domain must set their "qcom,freq-domain" property with
+phandle to a cpufreq_hw followed by the Domain ID(0/1) in the CPU DT node.
+
+
+Example:
+
+Example 1: Dual-cluster, Quad-core per cluster. CPUs within a cluster switch
+DCVS state together.
+
+/ {
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&L2_0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&L2_100>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ next-level-cache = <&L2_200>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ next-level-cache = <&L2_300>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ next-level-cache = <&L2_400>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ next-level-cache = <&L2_500>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ next-level-cache = <&L2_600>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "qcom,kryo385";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ next-level-cache = <&L2_700>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+ };
+
+ soc {
+ cpufreq_hw: cpufreq@17d43000 {
+ compatible = "qcom,cpufreq-hw";
+ reg = <0x17d43000 0x1400>, <0x17d45800 0x1400>;
+ reg-names = "freq-domain0", "freq-domain1";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #freq-domain-cells = <1>;
+ };
+}
diff --git a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
index 999fb2a810f6..6130e6eb4af8 100644
--- a/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
+++ b/Documentation/devicetree/bindings/crypto/arm-cryptocell.txt
@@ -1,8 +1,12 @@
Arm TrustZone CryptoCell cryptographic engine
Required properties:
-- compatible: Should be one of: "arm,cryptocell-712-ree",
- "arm,cryptocell-710-ree" or "arm,cryptocell-630p-ree".
+- compatible: Should be one of -
+ "arm,cryptocell-713-ree"
+ "arm,cryptocell-703-ree"
+ "arm,cryptocell-712-ree"
+ "arm,cryptocell-710-ree"
+ "arm,cryptocell-630p-ree"
- reg: Base physical address of the engine and length of memory mapped region.
- interrupts: Interrupt number for the device.
diff --git a/Documentation/devicetree/bindings/crypto/fsl-dcp.txt b/Documentation/devicetree/bindings/crypto/fsl-dcp.txt
index 76a0b4e80e83..4e4d387e38a5 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-dcp.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-dcp.txt
@@ -6,6 +6,8 @@ Required properties:
- interrupts : Should contain MXS DCP interrupt numbers, VMI IRQ and DCP IRQ
must be supplied, optionally Secure IRQ can be present, but
is currently not implemented and not used.
+- clocks : Clock reference (only required on some SOCs: 6ull and 6sll).
+- clock-names : Must be "dcp".
Example:
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
index 057b81335775..c65fd7a7467c 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.txt
@@ -67,6 +67,8 @@ Required properties:
Optional properties:
- power-domains: Optional phandle to associated power domain as described in
the file ../power/power_domain.txt
+- amlogic,canvas: phandle to canvas provider node as described in the file
+ ../soc/amlogic/amlogic,canvas.txt
Required nodes:
diff --git a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
index 3aeb0ec06fd0..ba5469dd09f3 100644
--- a/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
+++ b/Documentation/devicetree/bindings/display/bridge/renesas,lvds.txt
@@ -13,6 +13,7 @@ Required properties:
- "renesas,r8a7793-lvds" for R8A7793 (R-Car M2-N) compatible LVDS encoders
- "renesas,r8a7795-lvds" for R8A7795 (R-Car H3) compatible LVDS encoders
- "renesas,r8a7796-lvds" for R8A7796 (R-Car M3-W) compatible LVDS encoders
+ - "renesas,r8a77965-lvds" for R8A77965 (R-Car M3-N) compatible LVDS encoders
- "renesas,r8a77970-lvds" for R8A77970 (R-Car V3M) compatible LVDS encoders
- "renesas,r8a77980-lvds" for R8A77980 (R-Car V3H) compatible LVDS encoders
- "renesas,r8a77990-lvds" for R8A77990 (R-Car E3) compatible LVDS encoders
diff --git a/Documentation/devicetree/bindings/display/himax,hx8357d.txt b/Documentation/devicetree/bindings/display/himax,hx8357d.txt
new file mode 100644
index 000000000000..e641f664763d
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/himax,hx8357d.txt
@@ -0,0 +1,26 @@
+Himax HX8357D display panels
+
+This binding is for display panels using a Himax HX8357D controller in SPI
+mode, such as the Adafruit 3.5" TFT for Raspberry Pi.
+
+Required properties:
+- compatible: "adafruit,yx350hv15", "himax,hx8357d"
+- dc-gpios: D/C pin
+- reg: address of the panel on the SPI bus
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in ../spi/spi-bus.txt must be specified.
+
+Optional properties:
+- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
+- backlight: phandle of the backlight device attached to the panel
+
+Example:
+ display@0{
+ compatible = "adafruit,yx350hv15", "himax,hx8357d";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
+ rotation = <90>;
+ backlight = <&backlight>;
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index dfc743219bd8..9ae946942720 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -106,6 +106,7 @@ Required properties:
- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface"
+ * "ref" (only required for new DTS files/entries)
For 28nm HPM/LP, 28nm 8960 PHYs:
- vddio-supply: phandle to vdd-io regulator device node
For 20nm PHY:
diff --git a/Documentation/devicetree/bindings/display/msm/gpu.txt b/Documentation/devicetree/bindings/display/msm/gpu.txt
index 43fac0fe09bb..ac8df3b871f9 100644
--- a/Documentation/devicetree/bindings/display/msm/gpu.txt
+++ b/Documentation/devicetree/bindings/display/msm/gpu.txt
@@ -1,11 +1,13 @@
Qualcomm adreno/snapdragon GPU
Required properties:
-- compatible: "qcom,adreno-XYZ.W", "qcom,adreno"
+- compatible: "qcom,adreno-XYZ.W", "qcom,adreno" or
+ "amd,imageon-XYZ.W", "amd,imageon"
for example: "qcom,adreno-306.0", "qcom,adreno"
Note that you need to list the less specific "qcom,adreno" (since this
is what the device is matched on), in addition to the more specific
with the chip-id.
+ If "amd,imageon" is used, there should be no top level msm device.
- reg: Physical base address and length of the controller's registers.
- interrupts: The interrupt signal from the gpu.
- clocks: device clocks
diff --git a/Documentation/devicetree/bindings/display/msm/mdp4.txt b/Documentation/devicetree/bindings/display/msm/mdp4.txt
index 3c341a15ccdc..b07eeb38f709 100644
--- a/Documentation/devicetree/bindings/display/msm/mdp4.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdp4.txt
@@ -38,6 +38,8 @@ Required properties:
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
+- qcom,lcdc-align-lsb: Boolean value indicating that LSB alignment should be
+ used for LCDC. This is only valid for 18bpp panels.
Example:
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010 b/Documentation/devicetree/bindings/display/panel/auo,g101evn010
new file mode 100644
index 000000000000..bc6a0c858e23
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g101evn010
@@ -0,0 +1,12 @@
+AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g101evn010"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt
new file mode 100644
index 000000000000..35bc0c839f49
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.txt
@@ -0,0 +1,12 @@
+Banana Pi 7" (S070WV20-CT16) TFT LCD Panel
+
+Required properties:
+- compatible: should be "bananapi,s070wv20-ct16"
+- power-supply: see ./panel-common.txt
+
+Optional properties:
+- enable-gpios: see ./simple-panel.txt
+- backlight: see ./simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in ./simple-panel.txt.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
new file mode 100644
index 000000000000..057f7f3f6dbe
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
@@ -0,0 +1,12 @@
+CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "cdtech,s043wq26h-ct7"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
new file mode 100644
index 000000000000..505615dfa0df
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
@@ -0,0 +1,12 @@
+CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
+
+Required properties:
+- compatible: should be "cdtech,s070wv95-ct16"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
new file mode 100644
index 000000000000..fbf5dcd15661
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
@@ -0,0 +1,12 @@
+DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
+
+Required properties:
+- compatible: should be "dlc,dlc1010gig"
+- power-supply: See simple-panel.txt
+
+Optional properties:
+- enable-gpios: See simple-panel.txt
+- backlight: See simple-panel.txt
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
new file mode 100644
index 000000000000..a89f9c830a85
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
@@ -0,0 +1,42 @@
+Binding for Olimex Ltd. LCD-OLinuXino bridge panel.
+
+This device can be used as bridge between a host controller and LCD panels.
+Currently supported LCDs are:
+ - LCD-OLinuXino-4.3TS
+ - LCD-OLinuXino-5
+ - LCD-OLinuXino-7
+ - LCD-OLinuXino-10
+
+The panel itself contains:
+ - AT24C16C EEPROM holding panel identification and timing requirements
+ - AR1021 resistive touch screen controller (optional)
+ - FT5x6 capacitive touch screnn controller (optional)
+ - GT911/GT928 capacitive touch screen controller (optional)
+
+The above chips share same I2C bus. The EEPROM is factory preprogrammed with
+device information (id, serial, etc.) and timing requirements.
+
+Touchscreen bingings can be found in these files:
+ - input/touchscreen/goodix.txt
+ - input/touchscreen/edt-ft5x06.txt
+ - input/touchscreen/ar1021.txt
+
+Required properties:
+ - compatible: should be "olimex,lcd-olinuxino"
+ - reg: address of the configuration EEPROM, should be <0x50>
+ - power-supply: phandle of the regulator that provides the supply voltage
+
+Optional properties:
+ - enable-gpios: GPIO pin to enable or disable the panel
+ - backlight: phandle of the backlight device attacked to the panel
+
+Example:
+&i2c2 {
+ panel@50 {
+ compatible = "olimex,lcd-olinuxino";
+ reg = <0x50>;
+ power-supply = <&reg_vcc5v0>;
+ enable-gpios = <&pio 7 8 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
new file mode 100644
index 000000000000..b94e366f451b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d16d0.txt
@@ -0,0 +1,30 @@
+Samsung S6D16D0 4" 864x480 AMOLED panel
+
+Required properties:
+ - compatible: should be:
+ "samsung,s6d16d0",
+ - reg: the virtual channel number of a DSI peripheral
+ - vdd1-supply: I/O voltage supply
+ - reset-gpios: a GPIO spec for the reset pin (active low)
+
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
+
+Example:
+&dsi {
+ ...
+
+ panel@0 {
+ compatible = "samsung,s6d16d0";
+ reg = <0>;
+ vdd1-supply = <&foo>;
+ reset-gpios = <&foo_gpio 0 GPIO_ACTIVE_LOW>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
index f5e3c6f2095a..40f3d7c713bb 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
+++ b/Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
@@ -1,47 +1,70 @@
TPO TPG110 Panel
================
-This binding builds on the DPI bindings, adding a few properties
-as a superset of a DPI. See panel-dpi.txt for the required DPI
-bindings.
+This panel driver is a component that acts as an intermediary
+between an RGB output and a variety of panels. The panel
+driver is strapped up in electronics to the desired resolution
+and other properties, and has a control interface over 3WIRE
+SPI. By talking to the TPG110 over SPI, the strapped properties
+can be discovered and the hardware is therefore mostly
+self-describing.
+
+ +--------+
+SPI -> | TPO | -> physical display
+RGB -> | TPG110 |
+ +--------+
+
+If some electrical strap or alternate resolution is desired,
+this can be set up by taking software control of the display
+over the SPI interface. The interface can also adjust
+for properties of the display such as gamma correction and
+certain electrical driving levels.
+
+The TPG110 does not know the physical dimensions of the panel
+connected, so this needs to be specified in the device tree.
+
+It requires a GPIO line for control of its reset line.
+
+The serial protocol has line names that resemble I2C but the
+protocol is not I2C but 3WIRE SPI.
Required properties:
-- compatible : "tpo,tpg110"
+- compatible : one of:
+ "ste,nomadik-nhk15-display", "tpo,tpg110"
+ "tpo,tpg110"
- grestb-gpios : panel reset GPIO
-- scen-gpios : serial control enable GPIO
-- scl-gpios : serial control clock line GPIO
-- sda-gpios : serial control data line GPIO
+- width-mm : see display/panel/panel-common.txt
+- height-mm : see display/panel/panel-common.txt
+
+The device needs to be a child of an SPI bus, see
+spi/spi-bus.txt. The SPI child must set the following
+properties:
+- spi-3wire
+- spi-max-frequency = <3000000>;
+as these are characteristics of this device.
-Required nodes:
-- Video port for DPI input, see panel-dpi.txt
-- Panel timing for DPI setup, see panel-dpi.txt
+The device node can contain one 'port' child node with one child
+'endpoint' node, according to the bindings defined in
+media/video-interfaces.txt. This node should describe panel's video bus.
Example
-------
-panel {
- compatible = "tpo,tpg110", "panel-dpi";
- grestb-gpios = <&stmpe_gpio44 5 GPIO_ACTIVE_LOW>;
- scen-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
- scl-gpios = <&gpio0 5 GPIO_ACTIVE_HIGH>;
- sda-gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+panel: display@0 {
+ compatible = "tpo,tpg110";
+ reg = <0>;
+ spi-3wire;
+ /* 320 ns min period ~= 3 MHz */
+ spi-max-frequency = <3000000>;
+ /* Width and height from data sheet */
+ width-mm = <116>;
+ height-mm = <87>;
+ grestb-gpios = <&foo_gpio 5 GPIO_ACTIVE_LOW>;
backlight = <&bl>;
port {
nomadik_clcd_panel: endpoint {
- remote-endpoint = <&nomadik_clcd_pads>;
+ remote-endpoint = <&foo>;
};
};
-
- panel-timing {
- clock-frequency = <33200000>;
- hactive = <800>;
- hback-porch = <216>;
- hfront-porch = <40>;
- hsync-len = <1>;
- vactive = <480>;
- vback-porch = <35>;
- vfront-porch = <10>;
- vsync-len = <1>;
- };
};
diff --git a/Documentation/devicetree/bindings/display/renesas,du.txt b/Documentation/devicetree/bindings/display/renesas,du.txt
index 9de67be632d1..3c855d9f2719 100644
--- a/Documentation/devicetree/bindings/display/renesas,du.txt
+++ b/Documentation/devicetree/bindings/display/renesas,du.txt
@@ -4,7 +4,9 @@ Required Properties:
- compatible: must be one of the following.
- "renesas,du-r8a7743" for R8A7743 (RZ/G1M) compatible DU
+ - "renesas,du-r8a7744" for R8A7744 (RZ/G1N) compatible DU
- "renesas,du-r8a7745" for R8A7745 (RZ/G1E) compatible DU
+ - "renesas,du-r8a77470" for R8A77470 (RZ/G1C) compatible DU
- "renesas,du-r8a7779" for R8A7779 (R-Car H1) compatible DU
- "renesas,du-r8a7790" for R8A7790 (R-Car H2) compatible DU
- "renesas,du-r8a7791" for R8A7791 (R-Car M2-W) compatible DU
@@ -52,7 +54,9 @@ corresponding to each DU output.
Port0 Port1 Port2 Port3
-----------------------------------------------------------------------------
R8A7743 (RZ/G1M) DPAD 0 LVDS 0 - -
+ R8A7744 (RZ/G1N) DPAD 0 LVDS 0 - -
R8A7745 (RZ/G1E) DPAD 0 DPAD 1 - -
+ R8A77470 (RZ/G1C) DPAD 0 DPAD 1 LVDS 0 -
R8A7779 (R-Car H1) DPAD 0 DPAD 1 - -
R8A7790 (R-Car H2) DPAD 0 LVDS 0 LVDS 1 -
R8A7791 (R-Car M2-W) DPAD 0 LVDS 0 - -
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
index adc94fc3c9f8..39143424a474 100644
--- a/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
+++ b/Documentation/devicetree/bindings/display/rockchip/dw_hdmi-rockchip.txt
@@ -13,6 +13,7 @@ Required properties:
- compatible: should be one of the following:
"rockchip,rk3288-dw-hdmi"
+ "rockchip,rk3328-dw-hdmi"
"rockchip,rk3399-dw-hdmi"
- reg: See dw_hdmi.txt.
- reg-io-width: See dw_hdmi.txt. Shall be 4.
@@ -34,6 +35,8 @@ Optional properties
- clock-names: May contain "cec" as defined in dw_hdmi.txt.
- clock-names: May contain "grf", power for grf io.
- clock-names: May contain "vpll", external clock for some hdmi phy.
+- phys: from general PHY binding: the phandle for the PHY device.
+- phy-names: Should be "hdmi" if phys references an external phy.
Example:
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 7854fff4fc16..f426bdb42f18 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -79,6 +79,7 @@ Required properties:
- compatible: value must be one of:
* "allwinner,sun8i-a83t-dw-hdmi"
* "allwinner,sun50i-a64-dw-hdmi", "allwinner,sun8i-a83t-dw-hdmi"
+ * "allwinner,sun50i-h6-dw-hdmi"
- reg: base address and size of memory-mapped region
- reg-io-width: See dw_hdmi.txt. Shall be 1.
- interrupts: HDMI interrupt number
@@ -86,9 +87,14 @@ Required properties:
* iahb: the HDMI bus clock
* isfr: the HDMI register clock
* tmds: TMDS clock
+ * cec: HDMI CEC clock (H6 only)
+ * hdcp: HDCP clock (H6 only)
+ * hdcp-bus: HDCP bus clock (H6 only)
- clock-names: the clock names mentioned above
- - resets: phandle to the reset controller
- - reset-names: must be "ctrl"
+ - resets:
+ * ctrl: HDMI controller reset
+ * hdcp: HDCP reset (H6 only)
+ - reset-names: reset names mentioned above
- phys: phandle to the DWC HDMI PHY
- phy-names: must be "phy"
@@ -109,6 +115,7 @@ Required properties:
* allwinner,sun8i-h3-hdmi-phy
* allwinner,sun8i-r40-hdmi-phy
* allwinner,sun50i-a64-hdmi-phy
+ * allwinner,sun50i-h6-hdmi-phy
- reg: base address and size of memory-mapped region
- clocks: phandles to the clocks feeding the HDMI PHY
* bus: the HDMI PHY interface clock
@@ -158,6 +165,7 @@ Required properties:
* allwinner,sun9i-a80-tcon-tv
* "allwinner,sun50i-a64-tcon-lcd", "allwinner,sun8i-a83t-tcon-lcd"
* "allwinner,sun50i-a64-tcon-tv", "allwinner,sun8i-a83t-tcon-tv"
+ * allwinner,sun50i-h6-tcon-tv, allwinner,sun8i-r40-tcon-tv
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the TCON.
@@ -220,24 +228,26 @@ It allows display pipeline to be configured in very different ways:
\ [3] TCON-TV1 [1] - TVE1/RGB
Note that both TCON TOP references same physical unit. Both mixers can be
-connected to any TCON.
+connected to any TCON. Not all TCON TOP variants support all features.
Required properties:
- compatible: value must be one of:
* allwinner,sun8i-r40-tcon-top
+ * allwinner,sun50i-h6-tcon-top
- reg: base address and size of the memory-mapped region.
- clocks: phandle to the clocks feeding the TCON TOP
* bus: TCON TOP interface clock
* tcon-tv0: TCON TV0 clock
- * tve0: TVE0 clock
- * tcon-tv1: TCON TV1 clock
- * tve1: TVE0 clock
- * dsi: MIPI DSI clock
+ * tve0: TVE0 clock (R40 only)
+ * tcon-tv1: TCON TV1 clock (R40 only)
+ * tve1: TVE0 clock (R40 only)
+ * dsi: MIPI DSI clock (R40 only)
- clock-names: clock name mentioned above
- resets: phandle to the reset line driving the TCON TOP
- #clock-cells : must contain 1
- clock-output-names: Names of clocks created for TCON TV0 channel clock,
- TCON TV1 channel clock and DSI channel clock, in that order.
+ TCON TV1 channel clock (R40 only) and DSI channel clock (R40 only), in
+ that order.
- ports: A ports node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. 6 ports should
@@ -381,6 +391,7 @@ Required properties:
* allwinner,sun8i-v3s-de2-mixer
* allwinner,sun50i-a64-de2-mixer-0
* allwinner,sun50i-a64-de2-mixer-1
+ * allwinner,sun50i-h6-de3-mixer-0
- reg: base address and size of the memory-mapped region.
- clocks: phandles to the clocks feeding the mixer
* bus: the mixer interface clock
@@ -415,9 +426,10 @@ Required properties:
* allwinner,sun8i-v3s-display-engine
* allwinner,sun9i-a80-display-engine
* allwinner,sun50i-a64-display-engine
+ * allwinner,sun50i-h6-display-engine
- allwinner,pipelines: list of phandle to the display engine
- frontends (DE 1.0) or mixers (DE 2.0) available.
+ frontends (DE 1.0) or mixers (DE 2.0/3.0) available.
Example:
diff --git a/Documentation/devicetree/bindings/display/truly,nt35597.txt b/Documentation/devicetree/bindings/display/truly,nt35597.txt
new file mode 100644
index 000000000000..f39c77ee36ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/truly,nt35597.txt
@@ -0,0 +1,59 @@
+Truly model NT35597 DSI display driver
+
+The Truly NT35597 is a generic display driver, currently only configured
+for use in the 2K display on the Qualcomm SDM845 MTP board.
+
+Required properties:
+- compatible: should be "truly,nt35597-2K-display"
+- vdda-supply: phandle of the regulator that provides the supply voltage
+ Power IC supply
+- vdispp-supply: phandle of the regulator that provides the supply voltage
+ for positive LCD bias
+- vdispn-supply: phandle of the regulator that provides the supply voltage
+ for negative LCD bias
+- reset-gpios: phandle of gpio for reset line
+ This should be 8mA, gpio can be configured using mux, pinctrl, pinctrl-names
+ (active low)
+- mode-gpios: phandle of the gpio for choosing the mode of the display
+ for single DSI or Dual DSI
+ This should be low for dual DSI and high for single DSI mode
+- ports: This device has two video ports driven by two DSIs. Their connections
+ are modeled using the OF graph bindings specified in
+ Documentation/devicetree/bindings/graph.txt.
+ - port@0: DSI input port driven by master DSI
+ - port@1: DSI input port driven by secondary DSI
+
+Example:
+
+ dsi@ae94000 {
+ panel@0 {
+ compatible = "truly,nt35597-2K-display";
+ reg = <0>;
+ vdda-supply = <&pm8998_l14>;
+ vdispp-supply = <&lab_regulator>;
+ vdispn-supply = <&ibb_regulator>;
+ pinctrl-names = "default", "suspend";
+ pinctrl-0 = <&dpu_dsi_active>;
+ pinctrl-1 = <&dpu_dsi_suspend>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+ mode-gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ panel0_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ panel1_in: endpoint {
+ remote-endpoint = <&dsi1_out>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
new file mode 100644
index 000000000000..69da2115abdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/cdns,i3c-master.txt
@@ -0,0 +1,43 @@
+Bindings for cadence I3C master block
+=====================================
+
+Required properties:
+--------------------
+- compatible: shall be "cdns,i3c-master"
+- clocks: shall reference the pclk and sysclk
+- clock-names: shall contain "pclk" and "sysclk"
+- interrupts: the interrupt line connected to this I3C master
+- reg: I3C master registers
+
+Mandatory properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- #address-cells: shall be set to 1
+- #size-cells: shall be set to 0
+
+Optional properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- i2c-scl-hz
+- i3c-scl-hz
+
+I3C device connected on the bus follow the generic description (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details).
+
+Example:
+
+ i3c-master@0d040000 {
+ compatible = "cdns,i3c-master";
+ clocks = <&coreclock>, <&i3csysclock>;
+ clock-names = "pclk", "sysclk";
+ interrupts = <3 0>;
+ reg = <0x0d040000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ i2c-scl-hz = <100000>;
+
+ nunchuk: nunchuk@52 {
+ compatible = "nintendo,nunchuk";
+ reg = <0x52 0x80000010 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i3c/i3c.txt b/Documentation/devicetree/bindings/i3c/i3c.txt
new file mode 100644
index 000000000000..ab729a0a86ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/i3c.txt
@@ -0,0 +1,138 @@
+Generic device tree bindings for I3C busses
+===========================================
+
+This document describes generic bindings that should be used to describe I3C
+busses in a device tree.
+
+Required properties
+-------------------
+
+- #address-cells - should be <3>. Read more about addresses below.
+- #size-cells - should be <0>.
+- compatible - name of the I3C master controller driving the I3C bus
+
+For other required properties e.g. to describe register sets,
+clocks, etc. check the binding documentation of the specific driver.
+The node describing an I3C bus should be named i3c-master.
+
+Optional properties
+-------------------
+
+These properties may not be supported by all I3C master drivers. Each I3C
+master bindings should specify which of them are supported.
+
+- i3c-scl-hz: frequency of the SCL signal used for I3C transfers.
+ When undefined the core sets it to 12.5MHz.
+
+- i2c-scl-hz: frequency of the SCL signal used for I2C transfers.
+ When undefined, the core looks at LVR (Legacy Virtual Register)
+ values of I2C devices described in the device tree to determine
+ the maximum I2C frequency.
+
+I2C devices
+===========
+
+Each I2C device connected to the bus should be described in a subnode. All
+properties described in Documentation/devicetree/bindings/i2c/i2c.txt are
+valid here, but several new properties have been added.
+
+New constraint on existing properties:
+--------------------------------------
+- reg: contains 3 cells
+ + first cell : still encoding the I2C address
+
+ + second cell: shall be 0
+
+ + third cell: shall encode the I3C LVR (Legacy Virtual Register)
+ bit[31:8]: unused/ignored
+ bit[7:5]: I2C device index. Possible values
+ * 0: I2C device has a 50 ns spike filter
+ * 1: I2C device does not have a 50 ns spike filter but supports high
+ frequency on SCL
+ * 2: I2C device does not have a 50 ns spike filter and is not tolerant
+ to high frequencies
+ * 3-7: reserved
+
+ bit[4]: tell whether the device operates in FM (Fast Mode) or FM+ mode
+ * 0: FM+ mode
+ * 1: FM mode
+
+ bit[3:0]: device type
+ * 0-15: reserved
+
+The I2C node unit-address should always match the first cell of the reg
+property: <device-type>@<i2c-address>.
+
+I3C devices
+===========
+
+All I3C devices are supposed to support DAA (Dynamic Address Assignment), and
+are thus discoverable. So, by default, I3C devices do not have to be described
+in the device tree.
+This being said, one might want to attach extra resources to these devices,
+and those resources may have to be described in the device tree, which in turn
+means we have to describe I3C devices.
+
+Another use case for describing an I3C device in the device tree is when this
+I3C device has a static I2C address and we want to assign it a specific I3C
+dynamic address before the DAA takes place (so that other devices on the bus
+can't take this dynamic address).
+
+The I3C device should be names <device-type>@<static-i2c-address>,<i3c-pid>,
+where device-type is describing the type of device connected on the bus
+(gpio-controller, sensor, ...).
+
+Required properties
+-------------------
+- reg: contains 3 cells
+ + first cell : encodes the static I2C address. Should be 0 if the device does
+ not have one (0 is not a valid I2C address).
+
+ + second and third cells: should encode the ProvisionalID. The second cell
+ contains the manufacturer ID left-shifted by 1.
+ The third cell contains ORing of the part ID
+ left-shifted by 16, the instance ID left-shifted
+ by 12 and the extra information. This encoding is
+ following the PID definition provided by the I3C
+ specification.
+
+Optional properties
+-------------------
+- assigned-address: dynamic address to be assigned to this device. This
+ property is only valid if the I3C device has a static
+ address (first cell of the reg property != 0).
+
+
+Example:
+
+ i3c-master@d040000 {
+ compatible = "cdns,i3c-master";
+ clocks = <&coreclock>, <&i3csysclock>;
+ clock-names = "pclk", "sysclk";
+ interrupts = <3 0>;
+ reg = <0x0d040000 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ i2c-scl-hz = <100000>;
+
+ /* I2C device. */
+ nunchuk: nunchuk@52 {
+ compatible = "nintendo,nunchuk";
+ reg = <0x52 0x0 0x10>;
+ };
+
+ /* I3C device with a static I2C address. */
+ thermal_sensor: sensor@68,39200144004 {
+ reg = <0x68 0x392 0x144004>;
+ assigned-address = <0xa>;
+ };
+
+ /*
+ * I3C device without a static I2C address but requiring
+ * resources described in the DT.
+ */
+ sensor@0,39200154004 {
+ reg = <0x0 0x392 0x154004>;
+ clocks = <&clock_provider 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
new file mode 100644
index 000000000000..5020eb71eb8d
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
@@ -0,0 +1,41 @@
+Bindings for Synopsys DesignWare I3C master block
+=================================================
+
+Required properties:
+--------------------
+- compatible: shall be "snps,dw-i3c-master-1.00a"
+- clocks: shall reference the core_clk
+- interrupts: the interrupt line connected to this I3C master
+- reg: Offset and length of I3C master registers
+
+Mandatory properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- #address-cells: shall be set to 3
+- #size-cells: shall be set to 0
+
+Optional properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- i2c-scl-hz
+- i3c-scl-hz
+
+I3C device connected on the bus follow the generic description (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details).
+
+Example:
+
+ i3c-master@2000 {
+ compatible = "snps,dw-i3c-master-1.00a";
+ #address-cells = <3>;
+ #size-cells = <0>;
+ reg = <0x02000 0x1000>;
+ interrupts = <0>;
+ clocks = <&i3cclk>;
+
+ eeprom@57{
+ compatible = "atmel,24c01";
+ reg = <0x57 0x0 0x10>;
+ pagesize = <0x8>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/input-reset.txt b/Documentation/devicetree/bindings/input/input-reset.txt
index 2bb2626fdb78..1ca6cc5ebf8e 100644
--- a/Documentation/devicetree/bindings/input/input-reset.txt
+++ b/Documentation/devicetree/bindings/input/input-reset.txt
@@ -12,7 +12,7 @@ The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define
a set of keys.
Required property:
-sysrq-reset-seq: array of Linux keycodes, one keycode per cell.
+keyset: array of Linux keycodes, one keycode per cell.
Optional property:
timeout-ms: duration keys must be pressed together in milliseconds before
diff --git a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
index b290ca150d30..404352524c3a 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/allwinner,sun4i-ic.txt
@@ -2,7 +2,9 @@ Allwinner Sunxi Interrupt Controller
Required properties:
-- compatible : should be "allwinner,sun4i-a10-ic"
+- compatible : should be one of the following:
+ "allwinner,sun4i-a10-ic"
+ "allwinner,suniv-f1c100s-ic"
- reg : Specifies base physical address and size of the registers.
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 3ea78c4ef887..b83bb8249074 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -7,7 +7,9 @@ Interrupts (LPI).
Main node required properties:
-- compatible : should at least contain "arm,gic-v3".
+- compatible : should at least contain "arm,gic-v3" or either
+ "qcom,msm8996-gic-v3", "arm,gic-v3" for msm8996 SoCs
+ to address SoC specific bugs/quirks
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. Must be a single cell with a value of at least 3.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
new file mode 100644
index 000000000000..45790ce6f5b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
@@ -0,0 +1,34 @@
+Freescale IRQSTEER Interrupt multiplexer
+
+Required properties:
+
+- compatible: should be:
+ - "fsl,imx8m-irqsteer"
+ - "fsl,imx-irqsteer"
+- reg: Physical base address and size of registers.
+- interrupts: Should contain the parent interrupt line used to multiplex the
+ input interrupts.
+- clocks: Should contain one clock for entry in clock-names
+ see Documentation/devicetree/bindings/clock/clock-bindings.txt
+- clock-names:
+ - "ipg": main logic clock
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. The value must be 1.
+- fsl,channel: The output channel that all input IRQs should be steered into.
+- fsl,irq-groups: Number of IRQ groups managed by this controller instance.
+ Each group manages 64 input interrupts.
+
+Example:
+
+ interrupt-controller@32e2d000 {
+ compatible = "fsl,imx8m-irqsteer", "fsl,imx-irqsteer";
+ reg = <0x32e2d000 0x1000>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>;
+ clock-names = "ipg";
+ fsl,channel = <0>;
+ fsl,irq-groups = <1>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt
new file mode 100644
index 000000000000..e0062aebf025
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/rda,8810pl-intc.txt
@@ -0,0 +1,61 @@
+RDA Micro RDA8810PL Interrupt Controller
+
+The interrupt controller in RDA8810PL SoC is a custom interrupt controller
+which supports up to 32 interrupts.
+
+Required properties:
+
+- compatible: Should be "rda,8810pl-intc".
+- reg: Specifies base physical address of the registers set.
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. The value shall be 2.
+
+The interrupt sources are as follows:
+
+ID Name
+------------
+0: PULSE_DUMMY
+1: I2C
+2: NAND_NFSC
+3: SDMMC1
+4: SDMMC2
+5: SDMMC3
+6: SPI1
+7: SPI2
+8: SPI3
+9: UART1
+10: UART2
+11: UART3
+12: GPIO1
+13: GPIO2
+14: GPIO3
+15: KEYPAD
+16: TIMER
+17: TIMEROS
+18: COMREG0
+19: COMREG1
+20: USB
+21: DMC
+22: DMA
+23: CAMERA
+24: GOUDA
+25: GPU
+26: VPU_JPG
+27: VPU_HOST
+28: VOC
+29: AUIFC0
+30: AUIFC1
+31: L2CC
+
+Example:
+ apb@20800000 {
+ compatible = "simple-bus";
+ ...
+ intc: interrupt-controller@0 {
+ compatible = "rda,8810pl-intc";
+ reg = <0x0 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
index 6a36bf66d932..cd01b2292ec6 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
@@ -14,6 +14,10 @@ Required properties:
(only needed for exti controller with multiple exti under
same parent interrupt: st,stm32-exti and st,stm32h7-exti)
+Optional properties:
+
+- hwlocks: reference to a phandle of a hardware spinlock provider node.
+
Example:
exti: interrupt-controller@40013c00 {
diff --git a/Documentation/devicetree/bindings/media/aspeed-video.txt b/Documentation/devicetree/bindings/media/aspeed-video.txt
new file mode 100644
index 000000000000..78b464ae2672
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/aspeed-video.txt
@@ -0,0 +1,26 @@
+* Device tree bindings for Aspeed Video Engine
+
+The Video Engine (VE) embedded in the Aspeed AST2400 and AST2500 SOCs can
+capture and compress video data from digital or analog sources.
+
+Required properties:
+ - compatible: "aspeed,ast2400-video-engine" or
+ "aspeed,ast2500-video-engine"
+ - reg: contains the offset and length of the VE memory region
+ - clocks: clock specifiers for the syscon clocks associated with
+ the VE (ordering must match the clock-names property)
+ - clock-names: "vclk" and "eclk"
+ - resets: reset specifier for the syscon reset associated with
+ the VE
+ - interrupts: the interrupt associated with the VE on this platform
+
+Example:
+
+video-engine@1e700000 {
+ compatible = "aspeed,ast2500-video-engine";
+ reg = <0x1e700000 0x20000>;
+ clocks = <&syscon ASPEED_CLK_GATE_VCLK>, <&syscon ASPEED_CLK_GATE_ECLK>;
+ clock-names = "vclk", "eclk";
+ resets = <&syscon ASPEED_RESET_VIDEO>;
+ interrupts = <7>;
+};
diff --git a/Documentation/devicetree/bindings/media/cedrus.txt b/Documentation/devicetree/bindings/media/cedrus.txt
index a089a0c1ff05..b3c0635dcd0e 100644
--- a/Documentation/devicetree/bindings/media/cedrus.txt
+++ b/Documentation/devicetree/bindings/media/cedrus.txt
@@ -11,6 +11,8 @@ Required properties:
- "allwinner,sun7i-a20-video-engine"
- "allwinner,sun8i-a33-video-engine"
- "allwinner,sun8i-h3-video-engine"
+ - "allwinner,sun50i-a64-video-engine"
+ - "allwinner,sun50i-h5-video-engine"
- reg : register base and length of VE;
- clocks : list of clock specifiers, corresponding to entries in
the clock-names property;
diff --git a/Documentation/devicetree/bindings/media/i2c/mt9m111.txt b/Documentation/devicetree/bindings/media/i2c/mt9m111.txt
index 6b910036b57e..d0bed6fa901a 100644
--- a/Documentation/devicetree/bindings/media/i2c/mt9m111.txt
+++ b/Documentation/devicetree/bindings/media/i2c/mt9m111.txt
@@ -9,8 +9,14 @@ Required Properties:
- clocks: reference to the master clock.
- clock-names: shall be "mclk".
-For further reading on port node refer to
-Documentation/devicetree/bindings/media/video-interfaces.txt.
+The device node must contain one 'port' child node with one 'endpoint' child
+sub-node for its digital output video port, in accordance with the video
+interface bindings defined in:
+Documentation/devicetree/bindings/media/video-interfaces.txt
+
+Optional endpoint properties:
+- pclk-sample: For information see ../video-interfaces.txt. The value is set to
+ 0 if it isn't specified.
Example:
@@ -21,11 +27,10 @@ Example:
clocks = <&mclk>;
clock-names = "mclk";
- remote = <&pxa_camera>;
port {
mt9m111_1: endpoint {
- bus-width = <8>;
remote-endpoint = <&pxa_camera>;
+ pclk-sample = <1>;
};
};
};
diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx214.txt b/Documentation/devicetree/bindings/media/i2c/sony,imx214.txt
new file mode 100644
index 000000000000..f11f28a5fda4
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/sony,imx214.txt
@@ -0,0 +1,53 @@
+* Sony 1/3.06-Inch 13.13Mp CMOS Digital Image Sensor
+
+The Sony imx214 is a 1/3.06-inch CMOS active pixel digital image sensor with
+an active array size of 4224H x 3200V. It is programmable through an I2C
+interface.
+Image data is sent through MIPI CSI-2, through 2 or 4 lanes at a maximum
+throughput of 1.2Gbps/lane.
+
+
+Required Properties:
+- compatible: Shall be "sony,imx214".
+- reg: I2C bus address of the device. Depending on how the sensor is wired,
+ it shall be <0x10> or <0x1a>;
+- enable-gpios: GPIO descriptor for the enable pin.
+- vdddo-supply: Chip digital IO regulator (1.8V).
+- vdda-supply: Chip analog regulator (2.7V).
+- vddd-supply: Chip digital core regulator (1.12V).
+- clocks: Reference to the xclk clock.
+- clock-frequency: Frequency of the xclk clock.
+
+Optional Properties:
+- flash-leds: See ../video-interfaces.txt
+- lens-focus: See ../video-interfaces.txt
+
+The imx214 device node shall contain one 'port' child node with
+an 'endpoint' subnode. For further reading on port node refer to
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Required Properties on endpoint:
+- data-lanes: check ../video-interfaces.txt
+- link-frequencies: check ../video-interfaces.txt
+- remote-endpoint: check ../video-interfaces.txt
+
+Example:
+
+ camera-sensor@1a {
+ compatible = "sony,imx214";
+ reg = <0x1a>;
+ vdddo-supply = <&pm8994_lvs1>;
+ vddd-supply = <&camera_vddd_1v12>;
+ vdda-supply = <&pm8994_l17>;
+ lens-focus = <&ad5820>;
+ enable-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+ clocks = <&mmcc CAMSS_MCLK0_CLK>;
+ clock-frequency = <24000000>;
+ port {
+ imx214_ep: endpoint {
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <480000000>;
+ remote-endpoint = <&csiphy0_ep>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/qcom,venus.txt b/Documentation/devicetree/bindings/media/qcom,venus.txt
index 00d0d1bf7647..b602c4c025e7 100644
--- a/Documentation/devicetree/bindings/media/qcom,venus.txt
+++ b/Documentation/devicetree/bindings/media/qcom,venus.txt
@@ -53,7 +53,8 @@
* Subnodes
The Venus video-codec node must contain two subnodes representing
-video-decoder and video-encoder.
+video-decoder and video-encoder, and one optional firmware subnode.
+Firmware subnode is needed when the platform does not have TrustZone.
Every of video-encoder or video-decoder subnode should have:
@@ -79,6 +80,13 @@ Every of video-encoder or video-decoder subnode should have:
power domain which is responsible for collapsing
and restoring power to the subcore.
+The firmware subnode must have:
+
+- iommus:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: A list of phandle and IOMMU specifier pairs.
+
* An Example
video-codec@1d00000 {
compatible = "qcom,msm8916-venus";
@@ -105,4 +113,8 @@ Every of video-encoder or video-decoder subnode should have:
clock-names = "core";
power-domains = <&mmcc VENUS_CORE1_GDSC>;
};
+
+ video-firmware {
+ iommus = <&apps_iommu 0x10b2 0x0>;
+ };
};
diff --git a/Documentation/devicetree/bindings/media/rcar_vin.txt b/Documentation/devicetree/bindings/media/rcar_vin.txt
index d329a4e8ac58..0dd84a183ca7 100644
--- a/Documentation/devicetree/bindings/media/rcar_vin.txt
+++ b/Documentation/devicetree/bindings/media/rcar_vin.txt
@@ -24,6 +24,8 @@ on Gen3 platforms to a CSI-2 receiver.
- "renesas,vin-r8a7796" for the R8A7796 device
- "renesas,vin-r8a77965" for the R8A77965 device
- "renesas,vin-r8a77970" for the R8A77970 device
+ - "renesas,vin-r8a77980" for the R8A77980 device
+ - "renesas,vin-r8a77990" for the R8A77990 device
- "renesas,vin-r8a77995" for the R8A77995 device
- "renesas,rcar-gen2-vin" for a generic R-Car Gen2 or RZ/G1 compatible
device.
diff --git a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
index 2d385b65b275..541d936b62e8 100644
--- a/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
+++ b/Documentation/devicetree/bindings/media/renesas,rcar-csi2.txt
@@ -12,6 +12,8 @@ Mandatory properties
- "renesas,r8a7796-csi2" for the R8A7796 device.
- "renesas,r8a77965-csi2" for the R8A77965 device.
- "renesas,r8a77970-csi2" for the R8A77970 device.
+ - "renesas,r8a77980-csi2" for the R8A77980 device.
+ - "renesas,r8a77990-csi2" for the R8A77990 device.
- reg: the register base and size for the device registers
- interrupts: the interrupt for the device
diff --git a/Documentation/devicetree/bindings/media/spi/sony-cxd2880.txt b/Documentation/devicetree/bindings/media/spi/sony-cxd2880.txt
index fc5aa263abe5..98a72c0b3c64 100644
--- a/Documentation/devicetree/bindings/media/spi/sony-cxd2880.txt
+++ b/Documentation/devicetree/bindings/media/spi/sony-cxd2880.txt
@@ -5,6 +5,10 @@ Required properties:
- reg: SPI chip select number for the device.
- spi-max-frequency: Maximum bus speed, should be set to <55000000> (55MHz).
+Optional properties:
+- vcc-supply: Optional phandle to the vcc regulator to power the adapter,
+ as described in the file ../regulator/regulator.txt
+
Example:
cxd2880@0 {
diff --git a/Documentation/devicetree/bindings/media/sun6i-csi.txt b/Documentation/devicetree/bindings/media/sun6i-csi.txt
new file mode 100644
index 000000000000..d4ab34f2240c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/sun6i-csi.txt
@@ -0,0 +1,59 @@
+Allwinner V3s Camera Sensor Interface
+-------------------------------------
+
+Allwinner V3s SoC features a CSI module(CSI1) with parallel interface.
+
+Required properties:
+ - compatible: value must be one of:
+ * "allwinner,sun6i-a31-csi"
+ * "allwinner,sun8i-h3-csi", "allwinner,sun6i-a31-csi"
+ * "allwinner,sun8i-v3s-csi"
+ - reg: base address and size of the memory-mapped region.
+ - interrupts: interrupt associated to this IP
+ - clocks: phandles to the clocks feeding the CSI
+ * bus: the CSI interface clock
+ * mod: the CSI module clock
+ * ram: the CSI DRAM clock
+ - clock-names: the clock names mentioned above
+ - resets: phandles to the reset line driving the CSI
+
+The CSI node should contain one 'port' child node with one child 'endpoint'
+node, according to the bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Endpoint node properties for CSI
+---------------------------------
+See the video-interfaces.txt for a detailed description of these properties.
+- remote-endpoint : (required) a phandle to the bus receiver's endpoint
+ node
+- bus-width: : (required) must be 8, 10, 12 or 16
+- pclk-sample : (optional) (default: sample on falling edge)
+- hsync-active : (required; parallel-only)
+- vsync-active : (required; parallel-only)
+
+Example:
+
+csi1: csi@1cb4000 {
+ compatible = "allwinner,sun8i-v3s-csi";
+ reg = <0x01cb4000 0x1000>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&ccu CLK_BUS_CSI>,
+ <&ccu CLK_CSI1_SCLK>,
+ <&ccu CLK_DRAM_CSI>;
+ clock-names = "bus", "mod", "ram";
+ resets = <&ccu RST_BUS_CSI>;
+
+ port {
+ /* Parallel bus endpoint */
+ csi1_ep: endpoint {
+ remote-endpoint = <&adv7611_ep>;
+ bus-width = <16>;
+
+ /* If hsync-active/vsync-active are missing,
+ embedded BT.656 sync is used */
+ hsync-active = <0>; /* Active low */
+ vsync-active = <0>; /* Active low */
+ pclk-sample = <1>; /* Rising */
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/memory-controllers/synopsys.txt b/Documentation/devicetree/bindings/memory-controllers/synopsys.txt
index a43d26d41e04..9d32762c47e1 100644
--- a/Documentation/devicetree/bindings/memory-controllers/synopsys.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/synopsys.txt
@@ -1,15 +1,32 @@
Binding for Synopsys IntelliDDR Multi Protocol Memory Controller
-This controller has an optional ECC support in half-bus width (16-bit)
-configuration. The ECC controller corrects one bit error and detects
-two bit errors.
+The ZynqMP DDR ECC controller has an optional ECC support in 64-bit and 32-bit
+bus width configurations.
+
+The Zynq DDR ECC controller has an optional ECC support in half-bus width
+(16-bit) configuration.
+
+These both ECC controllers correct single bit ECC errors and detect double bit
+ECC errors.
Required properties:
- - compatible: Should be 'xlnx,zynq-ddrc-a05'
- - reg: Base address and size of the controllers memory area
+ - compatible: One of:
+ - 'xlnx,zynq-ddrc-a05' : Zynq DDR ECC controller
+ - 'xlnx,zynqmp-ddrc-2.40a' : ZynqMP DDR ECC controller
+ - reg: Should contain DDR controller registers location and length.
+
+Required properties for "xlnx,zynqmp-ddrc-2.40a":
+ - interrupts: Property with a value describing the interrupt number.
Example:
memory-controller@f8006000 {
compatible = "xlnx,zynq-ddrc-a05";
reg = <0xf8006000 0x1000>;
};
+
+ mc: memory-controller@fd070000 {
+ compatible = "xlnx,zynqmp-ddrc-2.40a";
+ reg = <0x0 0xfd070000 0x0 0x30000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 112 4>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/axp20x.txt b/Documentation/devicetree/bindings/mfd/axp20x.txt
index 188f0373d441..2af4ff95d6bc 100644
--- a/Documentation/devicetree/bindings/mfd/axp20x.txt
+++ b/Documentation/devicetree/bindings/mfd/axp20x.txt
@@ -32,6 +32,15 @@ Required properties:
- interrupt-controller: The PMIC has its own internal IRQs
- #interrupt-cells: Should be set to 1
+Supported common regulator properties, see ../regulator/regulator.txt for
+more information:
+- regulator-ramp-delay: sets the ramp up delay in uV/us
+ AXP20x/DCDC2: 1600, 800
+ AXP20x/LDO3: 1600, 800
+- regulator-soft-start: enable the output at the lowest possible voltage and
+ only then set the desired voltage
+ AXP20x/LDO3: software-based implementation
+
Optional properties:
- x-powers,dcdc-freq: defines the work frequency of DC-DC in KHz
AXP152/20X: range: 750-1875, Default: 1.5 MHz
diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
index 01fdc33a41d0..bb7e896cb644 100644
--- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
@@ -10,7 +10,7 @@ such as network interfaces, crypto accelerator instances, L2 switches,
etc.
For an overview of the DPAA2 architecture and fsl-mc bus see:
-Documentation/networking/dpaa2/overview.rst
+Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
As described in the above overview, all DPAA2 objects in a DPRC share the
same hardware "isolation context" and a 10-bit value called an ICID
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index 232fa12e90ef..7df0dcaccb7d 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -29,6 +29,8 @@ file systems on embedded devices.
- use-advanced-sector-protection: boolean to enable support for the
advanced sector protection (Spansion: PPB - Persistent Protection
Bits) locking.
+ - addr-gpios : (optional) List of GPIO descriptors that will be used to
+ address the MSBs address lines. The order goes from LSB to MSB.
For JEDEC compatible devices, the following additional properties
are defined:
diff --git a/Documentation/devicetree/bindings/mtd/partitions/redboot-fis.txt b/Documentation/devicetree/bindings/mtd/partitions/redboot-fis.txt
new file mode 100644
index 000000000000..fd0ebe4e3415
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/redboot-fis.txt
@@ -0,0 +1,27 @@
+RedBoot FLASH Image System (FIS) Partitions
+===========================================
+
+The FLASH Image System (FIS) directory is a flash description
+format closely associated with the RedBoot boot loader.
+
+It uses one single flash eraseblock in the flash to store an index of
+all images in the flash.
+
+This block size will vary depending on flash but is typically
+32 KB in size.
+
+Required properties:
+- compatible : (required) must be "redboot-fis"
+- fis-index-block : (required) a index to the eraseblock containing
+ the FIS directory on this device. On a flash memory with 32KB
+ eraseblocks, 0 means the first eraseblock at 0x00000000, 1 means the
+ second eraseblock at 0x00008000 and so on.
+
+Example:
+
+flash@0 {
+ partitions {
+ compatible = "redboot-fis";
+ fis-index-block = <0>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
index 4194ff7e6ee6..c26f4e11037c 100644
--- a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
+++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt
@@ -10,6 +10,8 @@ device the slave device is attached to.
Required properties:
- compatible: should contain one of the following:
+ * "brcm,bcm20702a1"
+ * "brcm,bcm4330-bt"
* "brcm,bcm43438-bt"
Optional properties:
@@ -18,8 +20,13 @@ Optional properties:
- shutdown-gpios: GPIO specifier, used to enable the BT module
- device-wakeup-gpios: GPIO specifier, used to wakeup the controller
- host-wakeup-gpios: GPIO specifier, used to wakeup the host processor
- - clocks: clock specifier if external clock provided to the controller
- - clock-names: should be "extclk"
+ - clocks: 1 or 2 clocks as defined in clock-names below, in that order
+ - clock-names: names for clock inputs, matching the clocks given
+ - "extclk": deprecated, replaced by "txco"
+ - "txco": external reference clock (not a standalone crystal)
+ - "lpo": external low power 32.768 kHz clock
+ - vbat-supply: phandle to regulator supply for VBAT
+ - vddio-supply: phandle to regulator supply for VDDIO
Example:
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index bfc0c433654f..bc77477c6878 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -24,6 +24,14 @@ Optional properties:
if this property is present then controller is assumed to be big
endian.
+- fsl,stop-mode: register bits of stop mode control, the format is
+ <&gpr req_gpr req_bit ack_gpr ack_bit>.
+ gpr is the phandle to general purpose register node.
+ req_gpr is the gpr register offset of CAN stop request.
+ req_bit is the bit offset of CAN stop request.
+ ack_gpr is the gpr register offset of CAN stop acknowledge.
+ ack_bit is the bit offset of CAN stop acknowledge.
+
Example:
can@1c000 {
diff --git a/Documentation/devicetree/bindings/net/can/xilinx_can.txt b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
index 060e2d46bad9..100cc40b8510 100644
--- a/Documentation/devicetree/bindings/net/can/xilinx_can.txt
+++ b/Documentation/devicetree/bindings/net/can/xilinx_can.txt
@@ -6,6 +6,7 @@ Required properties:
- "xlnx,zynq-can-1.0" for Zynq CAN controllers
- "xlnx,axi-can-1.00.a" for Axi CAN controllers
- "xlnx,canfd-1.0" for CAN FD controllers
+ - "xlnx,canfd-2.0" for CAN FD 2.0 controllers
- reg : Physical base address and size of the controller
registers map.
- interrupts : Property with a value describing the interrupt
diff --git a/Documentation/devicetree/bindings/net/dsa/ksz.txt b/Documentation/devicetree/bindings/net/dsa/ksz.txt
index ac145b885e95..0f407fb371ce 100644
--- a/Documentation/devicetree/bindings/net/dsa/ksz.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ksz.txt
@@ -8,6 +8,10 @@ Required properties:
- "microchip,ksz9477"
- "microchip,ksz9897"
+Optional properties:
+
+- reset-gpios : Should be a gpio specifier for a reset line
+
See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of additional
required and optional properties.
diff --git a/Documentation/devicetree/bindings/net/icplus-ip101ag.txt b/Documentation/devicetree/bindings/net/icplus-ip101ag.txt
new file mode 100644
index 000000000000..a784592bbb15
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/icplus-ip101ag.txt
@@ -0,0 +1,19 @@
+IC Plus Corp. IP101A / IP101G Ethernet PHYs
+
+There are different models of the IP101G Ethernet PHY:
+- IP101GR (32-pin QFN package)
+- IP101G (die only, no package)
+- IP101GA (48-pin LQFP package)
+
+There are different models of the IP101A Ethernet PHY (which is the
+predecessor of the IP101G):
+- IP101A (48-pin LQFP package)
+- IP101AH (48-pin LQFP package)
+
+Optional properties for the IP101GR (32-pin QFN package):
+
+- icplus,select-rx-error:
+ pin 21 ("RXER/INTR_32") will output the receive error status.
+ interrupts are not routed outside the PHY in this mode.
+- icplus,select-interrupt:
+ pin 21 ("RXER/INTR_32") will output the interrupt signal.
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.txt b/Documentation/devicetree/bindings/net/mediatek-dwmac.txt
new file mode 100644
index 000000000000..8a08621a5b54
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mediatek-dwmac.txt
@@ -0,0 +1,78 @@
+MediaTek DWMAC glue layer controller
+
+This file documents platform glue layer for stmmac.
+Please see stmmac.txt for the other unchanged properties.
+
+The device node has following properties.
+
+Required properties:
+- compatible: Should be "mediatek,mt2712-gmac" for MT2712 SoC
+- reg: Address and length of the register set for the device
+- interrupts: Should contain the MAC interrupts
+- interrupt-names: Should contain a list of interrupt names corresponding to
+ the interrupts in the interrupts property, if available.
+ Should be "macirq" for the main MAC IRQ
+- clocks: Must contain a phandle for each entry in clock-names.
+- clock-names: The name of the clock listed in the clocks property. These are
+ "axi", "apb", "mac_main", "ptp_ref" for MT2712 SoC
+- mac-address: See ethernet.txt in the same directory
+- phy-mode: See ethernet.txt in the same directory
+- mediatek,pericfg: A phandle to the syscon node that control ethernet
+ interface and timing delay.
+
+Optional properties:
+- mediatek,tx-delay-ps: TX clock delay macro value. Default is 0.
+ It should be defined for RGMII/MII interface.
+- mediatek,rx-delay-ps: RX clock delay macro value. Default is 0.
+ It should be defined for RGMII/MII/RMII interface.
+Both delay properties need to be a multiple of 170 for RGMII interface,
+or will round down. Range 0~31*170.
+Both delay properties need to be a multiple of 550 for MII/RMII interface,
+or will round down. Range 0~31*550.
+
+- mediatek,rmii-rxc: boolean property, if present indicates that the RMII
+ reference clock, which is from external PHYs, is connected to RXC pin
+ on MT2712 SoC.
+ Otherwise, is connected to TXC pin.
+- mediatek,txc-inverse: boolean property, if present indicates that
+ 1. tx clock will be inversed in MII/RGMII case,
+ 2. tx clock inside MAC will be inversed relative to reference clock
+ which is from external PHYs in RMII case, and it rarely happen.
+- mediatek,rxc-inverse: boolean property, if present indicates that
+ 1. rx clock will be inversed in MII/RGMII case.
+ 2. reference clock will be inversed when arrived at MAC in RMII case.
+- assigned-clocks: mac_main and ptp_ref clocks
+- assigned-clock-parents: parent clocks of the assigned clocks
+
+Example:
+ eth: ethernet@1101c000 {
+ compatible = "mediatek,mt2712-gmac";
+ reg = <0 0x1101c000 0 0x1300>;
+ interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq";
+ phy-mode ="rgmii";
+ mac-address = [00 55 7b b5 7d f7];
+ clock-names = "axi",
+ "apb",
+ "mac_main",
+ "ptp_ref",
+ "ptp_top";
+ clocks = <&pericfg CLK_PERI_GMAC>,
+ <&pericfg CLK_PERI_GMAC_PCLK>,
+ <&topckgen CLK_TOP_ETHER_125M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
+ <&topckgen CLK_TOP_ETHER_50M_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
+ <&topckgen CLK_TOP_APLL1_D3>;
+ mediatek,pericfg = <&pericfg>;
+ mediatek,tx-delay-ps = <1530>;
+ mediatek,rx-delay-ps = <1530>;
+ mediatek,rmii-rxc;
+ mediatek,txc-inverse;
+ mediatek,rxc-inverse;
+ snps,txpbl = <32>;
+ snps,rxpbl = <32>;
+ snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
+ snps,reset-active-low;
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 3530256a879c..7ad36213093e 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -18,6 +18,7 @@ Required properties:
R-Car Gen2 and RZ/G1 devices.
- "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
+ - "renesas,etheravb-r8a774c0" for the R8A774C0 SoC.
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
- "renesas,etheravb-r8a77965" for the R8A77965 SoC.
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 2196d1ab3c8c..ae661e65354e 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -21,10 +21,22 @@ can be provided per device.
SNOC based devices (i.e. wcn3990) uses compatible string "qcom,wcn3990-wifi".
-Optional properties:
- reg: Address and length of the register set for the device.
- reg-names: Must include the list of following reg names,
"membase"
+- interrupts: reference to the list of 17 interrupt numbers for "qcom,ipq4019-wifi"
+ compatible target.
+ reference to the list of 12 interrupt numbers for "qcom,wcn3990-wifi"
+ compatible target.
+ Must contain interrupt-names property per entry for
+ "qcom,ath10k", "qcom,ipq4019-wifi" compatible targets.
+
+- interrupt-names: Must include the entries for MSI interrupt
+ names ("msi0" to "msi15") and legacy interrupt
+ name ("legacy") for "qcom,ath10k", "qcom,ipq4019-wifi"
+ compatible targets.
+
+Optional properties:
- resets: Must contain an entry for each entry in reset-names.
See ../reset/reseti.txt for details.
- reset-names: Must include the list of following reset names,
@@ -37,12 +49,9 @@ Optional properties:
- clocks: List of clock specifiers, must contain an entry for each required
entry in clock-names.
- clock-names: Should contain the clock names "wifi_wcss_cmd", "wifi_wcss_ref",
- "wifi_wcss_rtc".
-- interrupts: List of interrupt lines. Must contain an entry
- for each entry in the interrupt-names property.
-- interrupt-names: Must include the entries for MSI interrupt
- names ("msi0" to "msi15") and legacy interrupt
- name ("legacy"),
+ "wifi_wcss_rtc" for "qcom,ipq4019-wifi" compatible target and
+ "cxo_ref_clk_pin" for "qcom,wcn3990-wifi"
+ compatible target.
- qcom,msi_addr: MSI interrupt address.
- qcom,msi_base: Base value to add before writing MSI data into
MSI address register.
@@ -55,14 +64,25 @@ Optional properties:
- qcom,ath10k-pre-calibration-data : pre calibration data as an array,
the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
- optional "supply-name" is "vdd-0.8-cx-mx".
+ optional "supply-name" are "vdd-0.8-cx-mx",
+ "vdd-1.8-xo", "vdd-1.3-rfa" and "vdd-3.3-ch0".
- memory-region:
Usage: optional
Value type: <phandle>
Definition: reference to the reserved-memory for the msa region
used by the wifi firmware running in Q6.
+- iommus:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: A list of phandle and IOMMU specifier pairs.
+- ext-fem-name:
+ Usage: Optional
+ Value type: string
+ Definition: Name of external front end module used. Some valid FEM names
+ for example: "microsemi-lx5586", "sky85703-11"
+ and "sky85803" etc.
-Example (to supply the calibration data alone):
+Example (to supply PCI based wifi block details):
In this example, the node is defined as child node of the PCI controller.
@@ -74,10 +94,10 @@ pci {
#address-cells = <3>;
device_type = "pci";
- ath10k@0,0 {
+ wifi@0,0 {
reg = <0 0 0 0 0>;
- device_type = "pci";
qcom,ath10k-calibration-data = [ 01 02 03 ... ];
+ ext-fem-name = "microsemi-lx5586";
};
};
};
@@ -138,21 +158,25 @@ wifi@18000000 {
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
- clocks = <&clock_gcc clk_aggre2_noc_clk>;
- clock-names = "smmu_aggre2_noc_clk"
+ clocks = <&clock_gcc clk_rf_clk2_pin>;
+ clock-names = "cxo_ref_clk_pin";
interrupts =
- <0 130 0 /* CE0 */ >,
- <0 131 0 /* CE1 */ >,
- <0 132 0 /* CE2 */ >,
- <0 133 0 /* CE3 */ >,
- <0 134 0 /* CE4 */ >,
- <0 135 0 /* CE5 */ >,
- <0 136 0 /* CE6 */ >,
- <0 137 0 /* CE7 */ >,
- <0 138 0 /* CE8 */ >,
- <0 139 0 /* CE9 */ >,
- <0 140 0 /* CE10 */ >,
- <0 141 0 /* CE11 */ >;
+ <GIC_SPI 414 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 415 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>;
vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
memory-region = <&wifi_msa_mem>;
+ iommus = <&apps_smmu 0x0040 0x1>;
};
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index adf20b2bdf71..fbc198d5dd39 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -40,24 +40,36 @@ Required properties:
"ref" for 19.2 MHz ref clk,
"com_aux" for phy common block aux clock,
"ref_aux" for phy reference aux clock,
+
+ For "qcom,ipq8074-qmp-pcie-phy": no clocks are listed.
For "qcom,msm8996-qmp-pcie-phy" must contain:
"aux", "cfg_ahb", "ref".
For "qcom,msm8996-qmp-usb3-phy" must contain:
"aux", "cfg_ahb", "ref".
- For "qcom,qmp-v3-usb3-phy" must contain:
+ For "qcom,sdm845-qmp-usb3-phy" must contain:
+ "aux", "cfg_ahb", "ref", "com_aux".
+ For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
"aux", "cfg_ahb", "ref", "com_aux".
+ For "qcom,sdm845-qmp-ufs-phy" must contain:
+ "ref", "ref_aux".
- resets: a list of phandles and reset controller specifier pairs,
one for each entry in reset-names.
- reset-names: "phy" for reset of phy block,
"common" for phy common block reset,
- "cfg" for phy's ahb cfg block reset (Optional).
+ "cfg" for phy's ahb cfg block reset.
+
+ For "qcom,ipq8074-qmp-pcie-phy" must contain:
+ "phy", "common".
For "qcom,msm8996-qmp-pcie-phy" must contain:
- "phy", "common", "cfg".
+ "phy", "common", "cfg".
For "qcom,msm8996-qmp-usb3-phy" must contain
- "phy", "common".
- For "qcom,ipq8074-qmp-pcie-phy" must contain:
- "phy", "common".
+ "phy", "common".
+ For "qcom,sdm845-qmp-usb3-phy" must contain:
+ "phy", "common".
+ For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
+ "phy", "common".
+ For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
- vdda-phy-supply: Phandle to a regulator supply to PHY core block.
- vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
@@ -79,9 +91,10 @@ Required properties for child node:
- #phy-cells: must be 0
+Required properties child node of pcie and usb3 qmp phys:
- clocks: a list of phandles and clock-specifier pairs,
one for each entry in clock-names.
- - clock-names: Must contain following for pcie and usb qmp phys:
+ - clock-names: Must contain following:
"pipe<lane-number>" for pipe clock specific to each lane.
- clock-output-names: Name of the PHY clock that will be the parent for
the above pipe clock.
@@ -91,9 +104,11 @@ Required properties for child node:
(or)
"pcie20_phy1_pipe_clk"
+Required properties for child node of PHYs with lane reset, AKA:
+ "qcom,msm8996-qmp-pcie-phy"
- resets: a list of phandles and reset controller specifier pairs,
one for each entry in reset-names.
- - reset-names: Must contain following for pcie qmp phys:
+ - reset-names: Must contain following:
"lane<lane-number>" for reset specific to each lane.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
index 7f31fe7e2093..fbd6a4f943ce 100644
--- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
+++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
@@ -6,6 +6,7 @@ Required Properties:
- "renesas,pwm-r8a7744": for RZ/G1N
- "renesas,pwm-r8a7745": for RZ/G1E
- "renesas,pwm-r8a774a1": for RZ/G2M
+ - "renesas,pwm-r8a774c0": for RZ/G2E
- "renesas,pwm-r8a7778": for R-Car M1A
- "renesas,pwm-r8a7779": for R-Car H1
- "renesas,pwm-r8a7790": for R-Car H2
diff --git a/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt
index ac955dea00d1..4017527619ab 100644
--- a/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt
@@ -15,11 +15,17 @@ Optional input supply properties:
- inl67-supply: The input supply for REG_LDO3 and REG_LDO4
Any standard regulator properties can be used to configure the single regulator.
+regulator-initial-mode, regulator-allowed-modes and regulator-mode could be
+specified using mode values from dt-bindings/regulator/active-semi,8945a-regulator.h
+file.
The valid names for regulators are:
REG_DCDC1, REG_DCDC2, REG_DCDC3, REG_LDO1, REG_LDO2, REG_LDO3, REG_LDO4.
Example:
+
+#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
+
pmic@5b {
compatible = "active-semi,act8945a";
reg = <0x5b>;
@@ -32,6 +38,18 @@ Example:
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <1350000>;
regulator-always-on;
+
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_FIXED>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_FIXED>;
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-suspend-min-microvolt=<1400000>;
+ regulator-suspend-max-microvolt=<1400000>;
+ regulator-changeable-in-suspend;
+ regulator-mode=<ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ };
};
vdd_1v2_reg: REG_DCDC2 {
@@ -39,6 +57,14 @@ Example:
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <1300000>;
regulator-always-on;
+
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_FIXED>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_FIXED>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_3v3_reg: REG_DCDC3 {
@@ -53,6 +79,14 @@ Example:
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
+
+ regulator-allowed-modes = <ACT8945A_REGULATOR_MODE_NORMAL>,
+ <ACT8945A_REGULATOR_MODE_LOWPOWER>;
+ regulator-initial-mode = <ACT8945A_REGULATOR_MODE_NORMAL>;
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
};
vdd_3v3_lp_reg: REG_LDO2 {
diff --git a/Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt b/Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
new file mode 100644
index 000000000000..91974e6ee251
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
@@ -0,0 +1,82 @@
+Cirrus Logic Lochnagar Audio Development Board
+
+Lochnagar is an evaluation and development board for Cirrus Logic
+Smart CODEC and Amp devices. It allows the connection of most Cirrus
+Logic devices on mini-cards, as well as allowing connection of
+various application processor systems to provide a full evaluation
+platform. Audio system topology, clocking and power can all be
+controlled through the Lochnagar, allowing the device under test
+to be used in a variety of possible use cases.
+
+This binding document describes the binding for the regulator portion
+of the driver.
+
+Also see these documents for generic binding information:
+ [1] Regulator: ../regulator/regulator.txt
+
+This binding must be part of the Lochnagar MFD binding:
+ [2] ../mfd/cirrus,lochnagar.txt
+
+Optional sub-nodes:
+
+ - VDDCORE : Initialisation data for the VDDCORE regulator, which
+ supplies the CODECs digital core if it has no build regulator for that
+ purpose.
+ Required Properties:
+ - compatible : One of the following strings:
+ "cirrus,lochnagar2-vddcore"
+ - SYSVDD-supply: Primary power supply for the Lochnagar.
+
+ - MICVDD : Initialisation data for the MICVDD regulator, which
+ supplies the CODECs MICVDD.
+ Required Properties:
+ - compatible : One of the following strings:
+ "cirrus,lochnagar2-micvdd"
+ - SYSVDD-supply: Primary power supply for the Lochnagar.
+
+ - MIC1VDD, MIC2VDD : Initialisation data for the MICxVDD supplies.
+ Required Properties:
+ - compatible : One of the following strings:
+ "cirrus,lochnagar2-mic1vdd", "cirrus,lochnagar2-mic2vdd"
+ Optional Properties:
+ - cirrus,micbias-input : A property selecting which of the CODEC
+ minicard micbias outputs should be used, valid values are 1 - 4.
+ - MICBIAS1-supply, MICBIAS2-supply: Regulator supplies for the
+ MICxVDD outputs, supplying the digital microphones, normally
+ supplied from the attached CODEC.
+
+ - VDD1V8 : Recommended fixed regulator for the VDD1V8 regulator, which supplies the
+ CODECs analog and 1.8V digital supplies.
+ Required Properties:
+ - compatible : Should be set to "regulator-fixed"
+ - regulator-min-microvolt : Should be set to 1.8V
+ - regulator-max-microvolt : Should be set to 1.8V
+ - regulator-boot-on
+ - regulator-always-on
+ - vin-supply : Should be set to same supply as SYSVDD
+
+Example:
+
+lochnagar {
+ lochnagar-micvdd: MICVDD {
+ compatible = "cirrus,lochnagar2-micvdd";
+
+ SYSVDD-supply = <&wallvdd>;
+
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ lochnagar-vdd1v8: VDD1V8 {
+ compatible = "regulator-fixed";
+
+ regulator-name = "VDD1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+
+ vin-supply = <&wallvdd>;
+ };
+};
+
diff --git a/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt b/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
new file mode 100644
index 000000000000..b8f843fa6092
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
@@ -0,0 +1,143 @@
+MCP16502 PMIC
+
+Required properties:
+- compatible: "microchip,mcp16502"
+- reg: I2C slave address
+- lpm-gpios: GPIO for LPM pin. Note that this GPIO *must* remain high during
+ suspend-to-ram, keeping the PMIC into HIBERNATE mode.
+- regulators: A node that houses a sub-node for each regulator within
+ the device. Each sub-node is identified using the node's
+ name. The content of each sub-node is defined by the
+ standard binding for regulators; see regulator.txt.
+
+Regualtors of MCP16502 PMIC:
+1) VDD_IO - Buck (1.2 - 3.7 V)
+2) VDD_DDR - Buck (0.6 - 1.85 V)
+3) VDD_CORE - Buck (0.6 - 1.85 V)
+4) VDD_OTHER - BUCK (0.6 - 1.85 V)
+5) LDO1 - LDO (1.2 - 3.7 V)
+6) LDO2 - LDO (1.2 - 3.7 V)
+
+Regulator modes:
+2 - FPWM: higher precision, higher consumption
+4 - AutoPFM: lower precision, lower consumption
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+mcp16502@5b {
+ compatible = "microchip,mcp16502";
+ reg = <0x5b>;
+ status = "okay";
+ lpm-gpios = <&pioBU 7 GPIO_ACTIVE_HIGH>;
+
+ regulators {
+ VDD_IO {
+ regulator-name = "VDD_IO";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ VDD_DDR {
+ regulator-name = "VDD_DDR";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ VDD_CORE {
+ regulator-name = "VDD_CORE";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ VDD_OTHER {
+ regulator-name = "VDD_OTHER";
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <1850000>;
+ regulator-initial-mode = <2>;
+ regulator-allowed-modes = <2>, <4>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ regulator-mode = <4>;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ regulator-mode = <4>;
+ };
+ };
+
+ LDO1 {
+ regulator-name = "LDO1";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ LDO2 {
+ regulator-name = "LDO2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3700000>;
+ regulator-always-on;
+
+ regulator-state-standby {
+ regulator-on-in-suspend;
+ };
+
+ regulator-state-mem {
+ regulator-off-in-suspend;
+ };
+ };
+
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index a7cd36877bfe..0a3f087d5844 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -33,13 +33,16 @@ Optional properties:
decreases of any level. This is useful for regulators with exponential
voltage changes.
- regulator-soft-start: Enable soft start so that voltage ramps slowly
+- regulator-state-standby sub-root node for Standby mode
+ : equivalent with standby Linux sleep state, which provides energy savings
+ with a relatively quick transition back time.
- regulator-state-mem sub-root node for Suspend-to-RAM mode
: suspend to memory, the device goes to sleep, but all data stored in memory,
only some external interrupt can wake the device.
- regulator-state-disk sub-root node for Suspend-to-DISK mode
: suspend to disk, this state operates similarly to Suspend-to-RAM,
but includes a final step of writing memory contents to disk.
-- regulator-state-[mem/disk] node has following common properties:
+- regulator-state-[mem/disk/standby] node has following common properties:
- regulator-on-in-suspend: regulator should be on in suspend state.
- regulator-off-in-suspend: regulator should be off in suspend state.
- regulator-suspend-min-microvolt: minimum voltage may be set in
@@ -76,8 +79,11 @@ Optional properties:
- regulator-coupled-with: Regulators with which the regulator
is coupled. The linkage is 2-way - all coupled regulators should be linked
with each other. A regulator should not be coupled with its supplier.
-- regulator-coupled-max-spread: Max spread between voltages of coupled regulators
- in microvolts.
+- regulator-coupled-max-spread: Array of maximum spread between voltages of
+ coupled regulators in microvolts, each value in the array relates to the
+ corresponding couple specified by the regulator-coupled-with property.
+- regulator-max-step-microvolt: Maximum difference between current and target
+ voltages that can be changed safely in a single step.
Deprecated properties:
- regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/sound/ak4104.txt b/Documentation/devicetree/bindings/sound/ak4104.txt
index deca5e18f304..ae5f7f057dc3 100644
--- a/Documentation/devicetree/bindings/sound/ak4104.txt
+++ b/Documentation/devicetree/bindings/sound/ak4104.txt
@@ -12,8 +12,8 @@ Required properties:
Optional properties:
- - reset-gpio : a GPIO spec for the reset pin. If specified, it will be
- deasserted before communication to the device starts.
+ - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the device starts.
Example:
diff --git a/Documentation/devicetree/bindings/sound/ak4118.txt b/Documentation/devicetree/bindings/sound/ak4118.txt
new file mode 100644
index 000000000000..6e11a2f7404c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ak4118.txt
@@ -0,0 +1,22 @@
+AK4118 S/PDIF transceiver
+
+This device supports I2C mode.
+
+Required properties:
+
+- compatible : "asahi-kasei,ak4118"
+- reg : The I2C address of the device for I2C
+- reset-gpios: A GPIO specifier for the reset pin
+- irq-gpios: A GPIO specifier for the IRQ pin
+
+Example:
+
+&i2c {
+ ak4118: ak4118@13 {
+ #sound-dai-cells = <0>;
+ compatible = "asahi-kasei,ak4118";
+ reg = <0x13>;
+ reset-gpios = <&gpio 0 GPIO_ACTIVE_LOW>
+ irq-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt
new file mode 100644
index 000000000000..2e6cb7d9b202
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-spdifin.txt
@@ -0,0 +1,22 @@
+* Amlogic Audio SPDIF Input
+
+Required properties:
+- compatible: 'amlogic,axg-spdifin'
+- interrupts: interrupt specifier for the spdif input.
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "refclk" : spdif input reference clock
+- #sound-dai-cells: must be 0.
+
+Example on the A113 SoC:
+
+spdifin: audio-controller@400 {
+ compatible = "amlogic,axg-spdifin";
+ reg = <0x0 0x400 0x0 0x30>;
+ #sound-dai-cells = <0>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&clkc_audio AUD_CLKID_SPDIFIN>,
+ <&clkc_audio AUD_CLKID_SPDIFIN_CLK>;
+ clock-names = "pclk", "refclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
index 7e63e53a901c..269682619a70 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-card.txt
+++ b/Documentation/devicetree/bindings/sound/audio-graph-card.txt
@@ -32,7 +32,9 @@ Required properties:
Optional properties:
- pa-gpios: GPIO used to control external amplifier.
+-----------------------
Example: Single DAI case
+-----------------------
sound_card {
compatible = "audio-graph-card";
@@ -61,7 +63,9 @@ Example: Single DAI case
};
};
+-----------------------
Example: Multi DAI case
+-----------------------
sound-card {
compatible = "audio-graph-card";
@@ -130,3 +134,204 @@ Example: Multi DAI case
};
};
+
+-----------------------
+Example: Sampling Rate Conversion
+-----------------------
+
+ sound_card {
+ compatible = "audio-graph-card";
+
+ label = "sound-card";
+ prefix = "codec";
+ routing = "codec Playback", "DAI0 Playback",
+ "DAI0 Capture", "codec Capture";
+ convert-rate = <48000>;
+
+ dais = <&cpu_port>;
+ };
+
+ audio-codec {
+ ...
+ port {
+ codec_endpoint: endpoint {
+ remote-endpoint = <&cpu_endpoint>;
+ };
+ };
+ };
+
+ dai-controller {
+ ...
+ cpu_port: port {
+ cpu_endpoint: endpoint {
+ remote-endpoint = <&codec_endpoint>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ };
+
+-----------------------
+Example: 2 CPU 1 Codec (Mixing)
+-----------------------
+
+ sound_card {
+ compatible = "audio-graph-card";
+
+ label = "sound-card";
+ routing = "codec Playback", "DAI0 Playback",
+ "codec Playback", "DAI1 Playback",
+ "DAI0 Capture", "codec Capture";
+
+ dais = <&cpu_port>;
+ };
+
+ audio-codec {
+ ...
+
+ audio-graph-card,prefix = "codec";
+ audio-graph-card,convert-rate = <48000>;
+ port {
+ reg = <0>;
+ codec_endpoint0: endpoint@0 {
+ remote-endpoint = <&cpu_endpoint0>;
+ };
+ codec_endpoint1: endpoint@1 {
+ remote-endpoint = <&cpu_endpoint1>;
+ };
+ };
+ };
+
+ dai-controller {
+ ...
+ cpu_port: port {
+ cpu_endpoint0: endpoint@0 {
+ remote-endpoint = <&codec_endpoint0>;
+
+ dai-format = "left_j";
+ ...
+ };
+ cpu_endpoint1: endpoint@1 {
+ remote-endpoint = <&codec_endpoint1>;
+
+ dai-format = "left_j";
+ ...
+ };
+ };
+ };
+
+-----------------------
+Example: Multi DAI with DPCM
+-----------------------
+
+ CPU0 ------ ak4613
+ CPU1 ------ HDMI
+ CPU2 ------ PCM3168A-p /* DPCM 1ch/2ch */
+ CPU3 --/ /* DPCM 3ch/4ch */
+ CPU4 --/ /* DPCM 5ch/6ch */
+ CPU5 --/ /* DPCM 7ch/8ch */
+ CPU6 ------ PCM3168A-c
+
+ sound_card: sound {
+ compatible = "audio-graph-card";
+
+ label = "sound-card";
+
+ routing = "pcm3168a Playback", "DAI2 Playback",
+ "pcm3168a Playback", "DAI3 Playback",
+ "pcm3168a Playback", "DAI4 Playback",
+ "pcm3168a Playback", "DAI5 Playback";
+
+ dais = <&snd_port0 /* ak4613 */
+ &snd_port1 /* HDMI0 */
+ &snd_port2 /* pcm3168a playback */
+ &snd_port3 /* pcm3168a capture */
+ >;
+ };
+
+ ak4613: codec@10 {
+ ...
+ port {
+ ak4613_endpoint: endpoint {
+ remote-endpoint = <&rsnd_endpoint0>;
+ };
+ };
+ };
+
+ pcm3168a: audio-codec@44 {
+ ...
+ audio-graph-card,prefix = "pcm3168a";
+ audio-graph-card,convert-channels = <8>; /* TDM Split */
+ ports {
+ port@0 {
+ reg = <0>;
+ pcm3168a_endpoint_p1: endpoint@1 {
+ remote-endpoint = <&rsnd_endpoint2>;
+ ...
+ };
+ pcm3168a_endpoint_p2: endpoint@2 {
+ remote-endpoint = <&rsnd_endpoint3>;
+ ...
+ };
+ pcm3168a_endpoint_p3: endpoint@3 {
+ remote-endpoint = <&rsnd_endpoint4>;
+ ...
+ };
+ pcm3168a_endpoint_p4: endpoint@4 {
+ remote-endpoint = <&rsnd_endpoint5>;
+ ...
+ };
+ };
+ port@1 {
+ reg = <1>;
+ pcm3168a_endpoint_c: endpoint {
+ remote-endpoint = <&rsnd_endpoint6>;
+ ...
+ };
+ };
+ };
+ };
+
+ &sound {
+ ports {
+ snd_port0: port@0 {
+ rsnd_endpoint0: endpoint {
+ remote-endpoint = <&ak4613_endpoint>;
+ ...
+ };
+ };
+ snd_port1: port@1 {
+ rsnd_endpoint1: endpoint {
+ remote-endpoint = <&dw_hdmi0_snd_in>;
+ ...
+ };
+ };
+ snd_port2: port@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ rsnd_endpoint2: endpoint@2 {
+ remote-endpoint = <&pcm3168a_endpoint_p1>;
+ ...
+ };
+ rsnd_endpoint3: endpoint@3 {
+ remote-endpoint = <&pcm3168a_endpoint_p2>;
+ ...
+ };
+ rsnd_endpoint4: endpoint@4 {
+ remote-endpoint = <&pcm3168a_endpoint_p3>;
+ ...
+ };
+ rsnd_endpoint5: endpoint@5 {
+ remote-endpoint = <&pcm3168a_endpoint_p4>;
+ ...
+ };
+ };
+ snd_port3: port@6 {
+ rsnd_endpoint6: endpoint {
+ remote-endpoint = <&pcm3168a_endpoint_c>;
+ ...
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
index 441dd6f29df1..62d42768a00b 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
+++ b/Documentation/devicetree/bindings/sound/audio-graph-scu-card.txt
@@ -77,11 +77,9 @@ Example 2. 2 CPU 1 Codec (Mixing)
compatible = "audio-graph-scu-card";
label = "sound-card";
- prefix = "codec";
routing = "codec Playback", "DAI0 Playback",
"codec Playback", "DAI1 Playback",
"DAI0 Capture", "codec Capture";
- convert-rate = <48000>;
dais = <&cpu_port0
&cpu_port1>;
@@ -90,6 +88,8 @@ Example 2. 2 CPU 1 Codec (Mixing)
audio-codec {
...
+ audio-graph-card,prefix = "codec";
+ audio-graph-card,convert-rate = <48000>;
port {
codec_endpoint0: endpoint {
remote-endpoint = <&cpu_endpoint0>;
diff --git a/Documentation/devicetree/bindings/sound/cs4270.txt b/Documentation/devicetree/bindings/sound/cs4270.txt
index 6b222f9b8ef5..c33770ec4c3c 100644
--- a/Documentation/devicetree/bindings/sound/cs4270.txt
+++ b/Documentation/devicetree/bindings/sound/cs4270.txt
@@ -10,8 +10,8 @@ Required properties:
Optional properties:
- - reset-gpio : a GPIO spec for the reset pin. If specified, it will be
- deasserted before communication to the codec starts.
+ - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
+ deasserted before communication to the codec starts.
Example:
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
index 46bc9829c71a..b279b6072bd5 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
@@ -30,6 +30,11 @@ Optional properties:
- ti,hwmods : Must be "mcasp<n>", n is controller instance starting 0
- tx-num-evt : FIFO levels.
- rx-num-evt : FIFO levels.
+- dismod : Specify the drive on TX pin during inactive slots
+ 0 : 3-state
+ 2 : logic low
+ 3 : logic high
+ Defaults to 'logic low' when the property is not present
- sram-size-playback : size of sram to be allocated during playback
- sram-size-capture : size of sram to be allocated during capture
- interrupts : Interrupt numbers for McASP
diff --git a/Documentation/devicetree/bindings/sound/dmic.txt b/Documentation/devicetree/bindings/sound/dmic.txt
index e957b4136716..32e871037269 100644
--- a/Documentation/devicetree/bindings/sound/dmic.txt
+++ b/Documentation/devicetree/bindings/sound/dmic.txt
@@ -9,6 +9,7 @@ Optional properties:
- dmicen-gpios: GPIO specifier for dmic to control start and stop
- num-channels: Number of microphones on this DAI
- wakeup-delay-ms: Delay (in ms) after enabling the DMIC
+ - modeswitch-delay-ms: Delay (in ms) to complete DMIC mode switch
Example node:
@@ -17,4 +18,5 @@ Example node:
dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>;
num-channels = <1>;
wakeup-delay-ms <50>;
+ modeswitch-delay-ms <35>;
};
diff --git a/Documentation/devicetree/bindings/sound/fsl-sai.txt b/Documentation/devicetree/bindings/sound/fsl-sai.txt
index dd9e59738e08..2e726b983845 100644
--- a/Documentation/devicetree/bindings/sound/fsl-sai.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-sai.txt
@@ -35,13 +35,13 @@ Required properties:
- fsl,sai-synchronous-rx: This is a boolean property. If present, indicating
that SAI will work in the synchronous mode (sync Tx
- with Rx) which means both the transimitter and the
+ with Rx) which means both the transmitter and the
receiver will send and receive data by following
receiver's bit clocks and frame sync clocks.
- fsl,sai-asynchronous: This is a boolean property. If present, indicating
that SAI will work in the asynchronous mode, which
- means both transimitter and receiver will send and
+ means both transmitter and receiver will send and
receive data by following their own bit clocks and
frame sync clocks separately.
@@ -58,8 +58,8 @@ Optional properties (for mx6ul):
Note:
- If both fsl,sai-asynchronous and fsl,sai-synchronous-rx are absent, the
default synchronous mode (sync Rx with Tx) will be used, which means both
- transimitter and receiver will send and receive data by following clocks
- of transimitter.
+ transmitter and receiver will send and receive data by following clocks
+ of transmitter.
- fsl,sai-asynchronous and fsl,sai-synchronous-rx are exclusive.
Example:
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
index 5f4e68ca228c..ff98a0cb5b3f 100644
--- a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -7,6 +7,8 @@ Required properties:
<L3 interconnect address, size>;
- interrupts: Interrupt number for McPDM
- ti,hwmods: Name of the hwmod associated to the McPDM
+- clocks: phandle for the pdmclk provider, likely <&twl6040>
+- clock-names: Must be "pdmclk"
Example:
@@ -18,3 +20,11 @@ mcpdm: mcpdm@40132000 {
interrupt-parent = <&gic>;
ti,hwmods = "mcpdm";
};
+
+In board DTS file the pdmclk needs to be added:
+
+&mcpdm {
+ clocks = <&twl6040>;
+ clock-names = "pdmclk";
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/sound/pcm3060.txt b/Documentation/devicetree/bindings/sound/pcm3060.txt
index 90fcb8523099..97de66932d44 100644
--- a/Documentation/devicetree/bindings/sound/pcm3060.txt
+++ b/Documentation/devicetree/bindings/sound/pcm3060.txt
@@ -9,9 +9,15 @@ Required properties:
- reg : the I2C address of the device for I2C, the chip select
number for SPI.
+Optional properties:
+
+- ti,out-single-ended: "true" if output is single-ended;
+ "false" or not specified if output is differential.
+
Examples:
pcm3060: pcm3060@46 {
compatible = "ti,pcm3060";
reg = <0x46>;
+ ti,out-single-ended = "true";
};
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
index f9c7bd8c1bc0..9f5378c51686 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6asm.txt
@@ -27,6 +27,28 @@ used by the apr service device.
Value type: <u32>
Definition: Must be 1
+== ASM DAI is subnode of "dais" and represent a dai, it includes board specific
+configuration of each dai. Must contain the following properties.
+
+- reg
+ Usage: required
+ Value type: <u32>
+ Definition: Must be dai id
+
+- direction:
+ Usage: Required for Compress offload dais
+ Value type: <u32>
+ Definition: Specifies the direction of the dai stream
+ 0 for both tx and rx
+ 1 for only tx (Capture/Encode)
+ 2 for only rx (Playback/Decode)
+
+- is-compress-dai:
+ Usage: Required for Compress offload dais
+ Value type: <boolean>
+ Definition: present for Compress offload dais
+
+
= EXAMPLE
q6asm@7 {
@@ -35,5 +57,10 @@ q6asm@7 {
q6asmdai: dais {
compatible = "qcom,q6asm-dais";
#sound-dai-cells = <1>;
+ mm@0 {
+ reg = <0>;
+ direction = <2>;
+ is-compress-dai;
+ };
};
};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index d92b705e7917..648d43e1b1e9 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -39,15 +39,7 @@ This is example of
Playback: [MEM] -> [SRC2] -> [DVC0] -> [SSIU0/SSI0] -> [codec]
Capture: [MEM] <- [DVC1] <- [SRC3] <- [SSIU1/SSI1] <- [codec]
- &rcar_sound {
- ...
- rcar_sound,dai {
- dai0 {
- playback = <&ssi0 &src2 &dvc0>;
- capture = <&ssi1 &src3 &dvc1>;
- };
- };
- };
+see "Example: simple sound card"
You can use below.
${LINUX}/arch/arm/boot/dts/r8a7790.dts can be good example.
@@ -83,29 +75,8 @@ SRC can convert [xx]Hz to [yy]Hz. Then, it has below 2 modes
** Asynchronous mode
------------------
-You need to use "simple-scu-audio-card" sound card for it.
-example)
-
- sound {
- compatible = "simple-scu-audio-card";
- ...
- /*
- * SRC Asynchronous mode setting
- * Playback:
- * All input data will be converted to 48kHz
- * Capture:
- * Inputed 48kHz data will be converted to
- * system specified Hz
- */
- simple-audio-card,convert-rate = <48000>;
- ...
- simple-audio-card,cpu {
- sound-dai = <&rcar_sound>;
- };
- simple-audio-card,codec {
- ...
- };
- };
+You need to use "simple-scu-audio-card" or "audio-graph-scu-card" for it.
+see "Example: simple sound card for Asynchronous mode"
------------------
** Synchronous mode
@@ -141,26 +112,8 @@ For more detail information, see below
${LINUX}/sound/soc/sh/rcar/ctu.c
- comment of header
-You need to use "simple-scu-audio-card" sound card for it.
-example)
-
- sound {
- compatible = "simple-scu-audio-card";
- ...
- /*
- * CTU setting
- * All input data will be converted to 2ch
- * as output data
- */
- simple-audio-card,convert-channels = <2>;
- ...
- simple-audio-card,cpu {
- sound-dai = <&rcar_sound>;
- };
- simple-audio-card,codec {
- ...
- };
- };
+You need to use "simple-scu-audio-card" or "audio-graph-scu-card" for it.
+see "Example: simple sound card for channel convert"
Ex) Exchange output channel
Input -> Output
@@ -190,42 +143,13 @@ and these sounds will be merged by MIX.
aplay -D plughw:0,0 xxxx.wav &
aplay -D plughw:0,1 yyyy.wav
-You need to use "simple-scu-audio-card" sound card for it.
+You need to use "simple-scu-audio-card" or "audio-graph-scu-card" for it.
Ex)
[MEM] -> [SRC1] -> [CTU02] -+-> [MIX0] -> [DVC0] -> [SSI0]
|
[MEM] -> [SRC2] -> [CTU03] -+
- sound {
- #address-cells = <1>;
- #size-cells = <0>;
-
- compatible = "simple-scu-audio-card";
- ...
- simple-audio-card,cpu@0 {
- reg = <0>;
- sound-dai = <&rcar_sound 0>;
- };
- simple-audio-card,cpu@1 {
- reg = <1>;
- sound-dai = <&rcar_sound 1>;
- };
- simple-audio-card,codec {
- ...
- };
- };
-
- &rcar_sound {
- ...
- rcar_sound,dai {
- dai0 {
- playback = <&src1 &ctu02 &mix0 &dvc0 &ssi0>;
- };
- dai1 {
- playback = <&src2 &ctu03 &mix0 &dvc0 &ssi0>;
- };
- };
- };
+see "Example: simple sound card for MIXer"
=============================================
* DVC (Digital Volume and Mute Function)
@@ -257,15 +181,31 @@ Volume Ramp
* SSIU (Serial Sound Interface Unit)
=============================================
-There is no DT settings for SSIU, because SSIU will be automatically
-selected via SSI.
SSIU can avoid some under/over run error, because it has some buffer.
But you can't use it if SSI was PIO mode.
-In DMA mode, you can select not to use SSIU by using "no-busif" on DT.
+In DMA mode, you can select not to use SSIU by using "no-busif" via SSI.
- &ssi0 {
- no-busif;
- };
+SSIU handles BUSIF which will be used for TDM Split mode.
+This driver is assuming that audio-graph card will be used.
+
+TDM Split mode merges 4 sounds. You can see 4 sound interface on system,
+and these sounds will be merged SSIU/SSI.
+
+ aplay -D plughw:0,0 xxxx.wav &
+ aplay -D plughw:0,1 xxxx.wav &
+ aplay -D plughw:0,2 xxxx.wav &
+ aplay -D plughw:0,3 xxxx.wav
+
+ 2ch 8ch
+ [MEM] -> [SSIU 30] -+-> [SSIU 3] --> [Codec]
+ 2ch |
+ [MEM] -> [SSIU 31] -+
+ 2ch |
+ [MEM] -> [SSIU 32] -+
+ 2ch |
+ [MEM] -> [SSIU 33] -+
+
+see "Example: simple sound card for TDM Split"
=============================================
* SSI (Serial Sound Interface)
@@ -304,14 +244,7 @@ This is example if SSI1 want to share WS pin with SSI0
You can use Multi-SSI.
This is example of SSI0/SSI1/SSI2 (= for 6ch)
- &rcar_sound {
- ...
- rcar_sound,dai {
- dai0 {
- playback = <&ssi0 &ssi1 &ssi2 &src0 &dvc0>;
- };
- };
- };
+see "Example: simple sound card for Multi channel"
** TDM-SSI
@@ -319,19 +252,7 @@ You can use TDM with SSI.
This is example of TDM 6ch.
Driver can automatically switches TDM <-> stereo mode in this case.
- rsnd_tdm: sound {
- compatible = "simple-audio-card";
- ...
- simple-audio-card,cpu {
- /* system can use TDM 6ch */
- dai-tdm-slot-num = <6>;
- sound-dai = <&rcar_sound>;
- };
- simple-audio-card,codec {
- ...
- };
- };
-
+see "Example: simple sound card for TDM"
=============================================
Required properties:
@@ -346,6 +267,7 @@ Required properties:
- "renesas,rcar_sound-r8a7744" (RZ/G1N)
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
- "renesas,rcar_sound-r8a774a1" (RZ/G2M)
+ - "renesas,rcar_sound-r8a774c0" (RZ/G2E)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
- "renesas,rcar_sound-r8a7790" (R-Car H2)
@@ -356,6 +278,7 @@ Required properties:
- "renesas,rcar_sound-r8a7796" (R-Car M3-W)
- "renesas,rcar_sound-r8a77965" (R-Car M3-N)
- "renesas,rcar_sound-r8a77990" (R-Car E3)
+ - "renesas,rcar_sound-r8a77995" (R-Car D3)
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
@@ -363,6 +286,9 @@ Required properties:
- rcar_sound,ssi : Should contain SSI feature.
The number of SSI subnode should be same as HW.
see below for detail.
+- rcar_sound,ssiu : Should contain SSIU feature.
+ The number of SSIU subnode should be same as HW.
+ see below for detail.
- rcar_sound,src : Should contain SRC feature.
The number of SRC subnode should be same as HW.
see below for detail.
@@ -402,8 +328,13 @@ SSI subnode properties:
- no-busif : BUSIF is not ussed when [mem -> SSI] via DMA case
- dma : Should contain Audio DMAC entry
- dma-names : SSI case "rx" (=playback), "tx" (=capture)
+ Deprecated: see SSIU subnode properties
SSIU case "rxu" (=playback), "txu" (=capture)
+SSIU subnode properties:
+- dma : Should contain Audio DMAC entry
+- dma-names : "rx" (=playback), "tx" (=capture)
+
SRC subnode properties:
- dma : Should contain Audio DMAC entry
- dma-names : "rx" (=playback), "tx" (=capture)
@@ -532,56 +463,55 @@ rcar_sound: sound@ec500000 {
};
};
+ rcar_sound,ssiu {
+ ssiu00: ssiu-0 {
+ dmas = <&audma0 0x15>, <&audma1 0x16>;
+ dma-names = "rx", "tx";
+ };
+ ssiu01: ssiu-1 {
+ dmas = <&audma0 0x35>, <&audma1 0x36>;
+ dma-names = "rx", "tx";
+ };
+
+ ...
+
+ ssiu95: ssiu-49 {
+ dmas = <&audma0 0xA5>, <&audma1 0xA6>;
+ dma-names = "rx", "tx";
+ };
+ ssiu96: ssiu-50 {
+ dmas = <&audma0 0xA7>, <&audma1 0xA8>;
+ dma-names = "rx", "tx";
+ };
+ ssiu97: ssiu-51 {
+ dmas = <&audma0 0xA9>, <&audma1 0xAA>;
+ dma-names = "rx", "tx";
+ };
+ };
+
rcar_sound,ssi {
ssi0: ssi-0 {
interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x01>, <&audma1 0x02>;
+ dma-names = "rx", "tx";
};
ssi1: ssi-1 {
interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
- dma-names = "rx", "tx", "rxu", "txu";
- };
- ssi2: ssi-2 {
- interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
- dma-names = "rx", "tx", "rxu", "txu";
- };
- ssi3: ssi-3 {
- interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
- dma-names = "rx", "tx", "rxu", "txu";
- };
- ssi4: ssi-4 {
- interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
- dma-names = "rx", "tx", "rxu", "txu";
- };
- ssi5: ssi-5 {
- interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
- dma-names = "rx", "tx", "rxu", "txu";
- };
- ssi6: ssi-6 {
- interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
- dma-names = "rx", "tx", "rxu", "txu";
- };
- ssi7: ssi-7 {
- interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x03>, <&audma1 0x04>;
+ dma-names = "rx", "tx";
};
+
+ ...
+
ssi8: ssi-8 {
interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x11>, <&audma1 0x12>;
+ dma-names = "rx", "tx";
};
ssi9: ssi-9 {
interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
- dma-names = "rx", "tx", "rxu", "txu";
+ dmas = <&audma0 0x13>, <&audma1 0x14>;
+ dma-names = "rx", "tx";
};
};
@@ -647,25 +577,174 @@ Example: simple sound card
};
=============================================
+Example: simple sound card for Asynchronous mode
+=============================================
+
+sound {
+ compatible = "simple-scu-audio-card";
+ ...
+ /*
+ * SRC Asynchronous mode setting
+ * Playback:
+ * All input data will be converted to 48kHz
+ * Capture:
+ * Inputed 48kHz data will be converted to
+ * system specified Hz
+ */
+ simple-audio-card,convert-rate = <48000>;
+ ...
+ simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+ simple-audio-card,codec {
+ ...
+ };
+};
+
+=============================================
+Example: simple sound card for channel convert
+=============================================
+
+sound {
+ compatible = "simple-scu-audio-card";
+ ...
+ /*
+ * CTU setting
+ * All input data will be converted to 2ch
+ * as output data
+ */
+ simple-audio-card,convert-channels = <2>;
+ ...
+ simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+ simple-audio-card,codec {
+ ...
+ };
+};
+
+=============================================
+Example: simple sound card for MIXer
+=============================================
+
+sound {
+ compatible = "simple-scu-audio-card";
+ ...
+ simple-audio-card,cpu@0 {
+ sound-dai = <&rcar_sound 0>;
+ };
+ simple-audio-card,cpu@1 {
+ sound-dai = <&rcar_sound 1>;
+ };
+ simple-audio-card,codec {
+ ...
+ };
+};
+
+&rcar_sound {
+ ...
+ rcar_sound,dai {
+ dai0 {
+ playback = <&src1 &ctu02 &mix0 &dvc0 &ssi0>;
+ };
+ dai1 {
+ playback = <&src2 &ctu03 &mix0 &dvc0 &ssi0>;
+ };
+ };
+};
+
+=============================================
Example: simple sound card for TDM
=============================================
- rsnd_tdm: sound {
- compatible = "simple-audio-card";
+rsnd_tdm: sound {
+ compatible = "simple-audio-card";
- simple-audio-card,format = "left_j";
- simple-audio-card,bitclock-master = <&sndcodec>;
- simple-audio-card,frame-master = <&sndcodec>;
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&sndcodec>;
+ simple-audio-card,frame-master = <&sndcodec>;
- sndcpu: simple-audio-card,cpu {
- sound-dai = <&rcar_sound>;
- dai-tdm-slot-num = <6>;
+ sndcpu: simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ dai-tdm-slot-num = <6>;
+ };
+
+ sndcodec: simple-audio-card,codec {
+ sound-dai = <&xxx>;
+ };
+};
+
+=============================================
+Example: simple sound card for TDM Split
+=============================================
+
+sound_card: sound {
+ compatible = "audio-graph-scu-card";
+ prefix = "xxxx";
+ routing = "xxxx Playback", "DAI0 Playback",
+ "xxxx Playback", "DAI1 Playback",
+ "xxxx Playback", "DAI2 Playback",
+ "xxxx Playback", "DAI3 Playback";
+ convert-channels = <8>; /* TDM Split */
+
+ dais = <&rsnd_port0 /* playback ch1/ch2 */
+ &rsnd_port1 /* playback ch3/ch4 */
+ &rsnd_port2 /* playback ch5/ch6 */
+ &rsnd_port3 /* playback ch7/ch8 */
+ >;
+};
+
+audio-codec {
+ ...
+ port {
+ codec_0: endpoint@1 {
+ remote-endpoint = <&rsnd_ep0>;
+ };
+ codec_1: endpoint@2 {
+ remote-endpoint = <&rsnd_ep1>;
+ };
+ codec_2: endpoint@3 {
+ remote-endpoint = <&rsnd_ep2>;
+ };
+ codec_3: endpoint@4 {
+ remote-endpoint = <&rsnd_ep3>;
};
+ };
+};
- sndcodec: simple-audio-card,codec {
- sound-dai = <&xxx>;
+&rcar_sound {
+ ...
+ ports {
+ rsnd_port0: port@0 {
+ rsnd_ep0: endpoint {
+ remote-endpoint = <&codec_0>;
+ ...
+ playback = <&ssiu30 &ssi3>;
+ };
+ };
+ rsnd_port1: port@1 {
+ rsnd_ep1: endpoint {
+ remote-endpoint = <&codec_1>;
+ ...
+ playback = <&ssiu31 &ssi3>;
+ };
+ };
+ rsnd_port2: port@2 {
+ rsnd_ep2: endpoint {
+ remote-endpoint = <&codec_2>;
+ ...
+ playback = <&ssiu32 &ssi3>;
+ };
+ };
+ rsnd_port3: port@3 {
+ rsnd_ep3: endpoint {
+ remote-endpoint = <&codec_3>;
+ ...
+ playback = <&ssiu33 &ssi3>;
+ };
};
};
+};
=============================================
Example: simple sound card for Multi channel
diff --git a/Documentation/devicetree/bindings/sound/rt5631.txt b/Documentation/devicetree/bindings/sound/rt5631.txt
index 92b986ca337b..56bc85232c49 100644
--- a/Documentation/devicetree/bindings/sound/rt5631.txt
+++ b/Documentation/devicetree/bindings/sound/rt5631.txt
@@ -35,14 +35,14 @@ Pins on the device (for linking into audio routes):
Example:
-alc5631: alc5631@1a {
+alc5631: audio-codec@1a {
compatible = "realtek,alc5631";
reg = <0x1a>;
};
or
-rt5631: rt5631@1a {
+rt5631: audio-codec@1a {
compatible = "realtek,rt5631";
reg = <0x1a>;
};
diff --git a/Documentation/devicetree/bindings/sound/rt5663.txt b/Documentation/devicetree/bindings/sound/rt5663.txt
index 23386446c63d..2a55e9133408 100644
--- a/Documentation/devicetree/bindings/sound/rt5663.txt
+++ b/Documentation/devicetree/bindings/sound/rt5663.txt
@@ -10,6 +10,10 @@ Required properties:
- interrupts : The CODEC's interrupt output.
+- avdd-supply: Power supply for AVDD, providing 1.8V.
+
+- cpvdd-supply: Power supply for CPVDD, providing 3.5V.
+
Optional properties:
- "realtek,dc_offset_l_manual"
@@ -51,4 +55,6 @@ rt5663: codec@12 {
compatible = "realtek,rt5663";
reg = <0x12>;
interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ avdd-supply = <&pp1800_a_alc5662>;
+ cpvdd-supply = <&pp3500_a_alc5662>;
};
diff --git a/Documentation/devicetree/bindings/sound/simple-amplifier.txt b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
index 8647edae7af0..7182ac4f1e65 100644
--- a/Documentation/devicetree/bindings/sound/simple-amplifier.txt
+++ b/Documentation/devicetree/bindings/sound/simple-amplifier.txt
@@ -4,9 +4,14 @@ Required properties:
- compatible : "dioo,dio2125" or "simple-audio-amplifier"
- enable-gpios : the gpio connected to the enable pin of the simple amplifier
+Optional properties:
+- VCC-supply : power supply for the device, as covered
+ in Documentation/devicetree/bindings/regulator/regulator.txt
+
Example:
amp: analog-amplifier {
compatible = "simple-audio-amplifier";
+ VCC-supply = <&regulator>;
enable-gpios = <&gpio GPIOH_3 0>;
};
diff --git a/Documentation/devicetree/bindings/sound/simple-card.txt b/Documentation/devicetree/bindings/sound/simple-card.txt
index a4c72d09cd45..4629c8f8a6b6 100644
--- a/Documentation/devicetree/bindings/sound/simple-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-card.txt
@@ -95,7 +95,9 @@ Optional CPU/CODEC subnodes properties:
initialization. It is useful for some aCPUs with
fixed clocks.
+-------------------------------------------
Example 1 - single DAI link:
+-------------------------------------------
sound {
compatible = "simple-audio-card";
@@ -138,7 +140,9 @@ sh_fsi2: sh_fsi2@ec230000 {
interrupts = <0 146 0x4>;
};
+-------------------------------------------
Example 2 - many DAI links:
+-------------------------------------------
sound {
compatible = "simple-audio-card";
@@ -176,8 +180,10 @@ sound {
};
};
+-------------------------------------------
Example 3 - route audio from IMX6 SSI2 through TLV320DAC3100 codec
through TPA6130A2 amplifier to headphones:
+-------------------------------------------
&i2c0 {
codec: tlv320dac3100@18 {
@@ -210,3 +216,134 @@ sound {
clocks = ...
};
};
+
+-------------------------------------------
+Example 4. Sampling Rate Conversion
+-------------------------------------------
+
+sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "rsnd-ak4643";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&sndcodec>;
+ simple-audio-card,frame-master = <&sndcodec>;
+
+ simple-audio-card,convert-rate = <48000>;
+
+ simple-audio-card,prefix = "ak4642";
+ simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
+ "DAI0 Capture", "ak4642 Capture";
+
+ sndcpu: simple-audio-card,cpu {
+ sound-dai = <&rcar_sound>;
+ };
+
+ sndcodec: simple-audio-card,codec {
+ sound-dai = <&ak4643>;
+ system-clock-frequency = <11289600>;
+ };
+};
+
+-------------------------------------------
+Example 5. 2 CPU 1 Codec (Mixing)
+-------------------------------------------
+sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,name = "rsnd-ak4643";
+ simple-audio-card,format = "left_j";
+ simple-audio-card,bitclock-master = <&dpcmcpu>;
+ simple-audio-card,frame-master = <&dpcmcpu>;
+
+ simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
+ "ak4642 Playback", "DAI1 Playback";
+
+ dpcmcpu: cpu@0 {
+ sound-dai = <&rcar_sound 0>;
+ };
+
+ cpu@1 {
+ sound-dai = <&rcar_sound 1>;
+ };
+
+ codec {
+ prefix = "ak4642";
+ sound-dai = <&ak4643>;
+ clocks = <&audio_clock>;
+ };
+};
+
+-------------------------------------------
+Example 6 - many DAI links with DPCM:
+-------------------------------------------
+
+CPU0 ------ ak4613
+CPU1 ------ PCM3168A-p /* DPCM 1ch/2ch */
+CPU2 --/ /* DPCM 3ch/4ch */
+CPU3 --/ /* DPCM 5ch/6ch */
+CPU4 --/ /* DPCM 7ch/8ch */
+CPU5 ------ PCM3168A-c
+
+sound {
+ compatible = "simple-audio-card";
+
+ simple-audio-card,routing =
+ "pcm3168a Playback", "DAI1 Playback",
+ "pcm3168a Playback", "DAI2 Playback",
+ "pcm3168a Playback", "DAI3 Playback",
+ "pcm3168a Playback", "DAI4 Playback";
+
+ simple-audio-card,dai-link@0 {
+ format = "left_j";
+ bitclock-master = <&sndcpu0>;
+ frame-master = <&sndcpu0>;
+
+ sndcpu0: cpu {
+ sound-dai = <&rcar_sound 0>;
+ };
+ codec {
+ sound-dai = <&ak4613>;
+ };
+ };
+ simple-audio-card,dai-link@1 {
+ format = "i2s";
+ bitclock-master = <&sndcpu1>;
+ frame-master = <&sndcpu1>;
+
+ convert-channels = <8>; /* TDM Split */
+
+ sndcpu1: cpu@0 {
+ sound-dai = <&rcar_sound 1>;
+ };
+ cpu@1 {
+ sound-dai = <&rcar_sound 2>;
+ };
+ cpu@2 {
+ sound-dai = <&rcar_sound 3>;
+ };
+ cpu@3 {
+ sound-dai = <&rcar_sound 4>;
+ };
+ codec {
+ mclk-fs = <512>;
+ prefix = "pcm3168a";
+ dai-tdm-slot-num = <8>;
+ sound-dai = <&pcm3168a 0>;
+ };
+ };
+ simple-audio-card,dai-link@2 {
+ format = "i2s";
+ bitclock-master = <&sndcpu2>;
+ frame-master = <&sndcpu2>;
+
+ sndcpu2: cpu {
+ sound-dai = <&rcar_sound 5>;
+ };
+ codec {
+ mclk-fs = <512>;
+ prefix = "pcm3168a";
+ sound-dai = <&pcm3168a 1>;
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/simple-scu-card.txt b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
index 32f8dbce5241..3a2f71616cda 100644
--- a/Documentation/devicetree/bindings/sound/simple-scu-card.txt
+++ b/Documentation/devicetree/bindings/sound/simple-scu-card.txt
@@ -75,7 +75,6 @@ sound {
simple-audio-card,bitclock-master = <&dpcmcpu>;
simple-audio-card,frame-master = <&dpcmcpu>;
- simple-audio-card,prefix = "ak4642";
simple-audio-card,routing = "ak4642 Playback", "DAI0 Playback",
"ak4642 Playback", "DAI1 Playback";
@@ -88,6 +87,7 @@ sound {
};
codec {
+ prefix = "ak4642";
sound-dai = <&ak4643>;
clocks = <&audio_clock>;
};
diff --git a/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
index 4f8ad0e04d20..056a098495cc 100644
--- a/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
+++ b/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
@@ -4,9 +4,11 @@ Required properties:
- compatible: must be one of the following compatibles:
- "allwinner,sun50i-a64-codec-analog"
- reg: must contain the registers location and length
+- cpvdd-supply: Regulator supply for the headphone amplifier
Example:
codec_analog: codec-analog@1f015c0 {
compatible = "allwinner,sun50i-a64-codec-analog";
reg = <0x01f015c0 0x4>;
+ cpvdd-supply = <&reg_eldo1>;
};
diff --git a/Documentation/devicetree/bindings/sound/xlnx,i2s.txt b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
new file mode 100644
index 000000000000..5e7c7d5bb60a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/xlnx,i2s.txt
@@ -0,0 +1,28 @@
+Device-Tree bindings for Xilinx I2S PL block
+
+The IP supports I2S based playback/capture audio
+
+Required property:
+ - compatible: "xlnx,i2s-transmitter-1.0" for playback and
+ "xlnx,i2s-receiver-1.0" for capture
+
+Required property common to both I2S playback and capture:
+ - reg: Base address and size of the IP core instance.
+ - xlnx,dwidth: sample data width. Can be any of 16, 24.
+ - xlnx,num-channels: Number of I2S streams. Can be any of 1, 2, 3, 4.
+ supported channels = 2 * xlnx,num-channels
+
+Example:
+
+ i2s_receiver@a0080000 {
+ compatible = "xlnx,i2s-receiver-1.0";
+ reg = <0x0 0xa0080000 0x0 0x10000>;
+ xlnx,dwidth = <0x18>;
+ xlnx,num-channels = <1>;
+ };
+ i2s_transmitter@a0090000 {
+ compatible = "xlnx,i2s-transmitter-1.0";
+ reg = <0x0 0xa0090000 0x0 0x10000>;
+ xlnx,dwidth = <0x18>;
+ xlnx,num-channels = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt
index b93c1e2f25dd..b93c1e2f25dd 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-quadspi.txt
+++ b/Documentation/devicetree/bindings/spi/atmel-quadspi.txt
diff --git a/Documentation/devicetree/bindings/spi/nuvoton,npcm-pspi.txt b/Documentation/devicetree/bindings/spi/nuvoton,npcm-pspi.txt
new file mode 100644
index 000000000000..1fd9a4406a1d
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/nuvoton,npcm-pspi.txt
@@ -0,0 +1,43 @@
+Nuvoton NPCM Peripheral Serial Peripheral Interface(PSPI) controller driver
+
+Nuvoton NPCM7xx SOC support two PSPI channels.
+
+Required properties:
+ - compatible : "nuvoton,npcm750-pspi" for NPCM7XX BMC
+ - #address-cells : should be 1. see spi-bus.txt
+ - #size-cells : should be 0. see spi-bus.txt
+ - specifies physical base address and size of the register.
+ - interrupts : contain PSPI interrupt.
+ - clocks : phandle of PSPI reference clock.
+ - clock-names: Should be "clk_apb5".
+ - pinctrl-names : a pinctrl state named "default" must be defined.
+ - pinctrl-0 : phandle referencing pin configuration of the device.
+ - cs-gpios: Specifies the gpio pins to be used for chipselects.
+ See: Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Optional properties:
+- clock-frequency : Input clock frequency to the PSPI block in Hz.
+ Default is 25000000 Hz.
+
+Aliases:
+- All the SPI controller nodes should be represented in the aliases node using
+ the following format 'spi{n}' withe the correct numbered in "aliases" node.
+
+Example:
+
+aliases {
+ spi0 = &spi0;
+};
+
+spi0: spi@f0200000 {
+ compatible = "nuvoton,npcm750-pspi";
+ reg = <0xf0200000 0x1000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pspi1_pins>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_APB5>;
+ clock-names = "clk_apb5";
+ cs-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/spi/omap-spi.txt b/Documentation/devicetree/bindings/spi/omap-spi.txt
index 2ba5f9c023ac..487208c256c0 100644
--- a/Documentation/devicetree/bindings/spi/omap-spi.txt
+++ b/Documentation/devicetree/bindings/spi/omap-spi.txt
@@ -2,6 +2,7 @@ OMAP2+ McSPI device
Required properties:
- compatible :
+ - "ti,am654-mcspi" for AM654.
- "ti,omap2-mcspi" for OMAP2 & OMAP3.
- "ti,omap4-mcspi" for OMAP4+.
- ti,spi-num-cs : Number of chipselect supported by the instance.
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index 4b836ad17b19..37cf69586d10 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -5,6 +5,7 @@ Required properties:
"renesas,msiof-r8a7744" (RZ/G1N)
"renesas,msiof-r8a7745" (RZ/G1E)
"renesas,msiof-r8a774a1" (RZ/G2M)
+ "renesas,msiof-r8a774c0" (RZ/G2E)
"renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
index 8d178a4503cf..6cc3c6fe25a3 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-lpspi.txt
@@ -5,8 +5,11 @@ Required properties:
- "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
- "fsl,imx8qxp-spi" for LPSPI compatible with the one integrated on i.MX8QXP soc
- reg : address and length of the lpspi master registers
+- interrupt-parent : core interrupt controller
- interrupts : lpspi interrupt
- clocks : lpspi clock specifier
+- spi-slave : spi slave mode support. In slave mode, add this attribute without
+ value. In master mode, remove it.
Examples:
@@ -16,4 +19,5 @@ lpspi2: lpspi@40290000 {
interrupt-parent = <&intc>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7ULP_CLK_LPSPI2>;
+ spi-slave;
};
diff --git a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
index 236dcb0faf37..69c356767cf8 100644
--- a/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-mt65xx.txt
@@ -6,8 +6,10 @@ Required properties:
- mediatek,mt2712-spi: for mt2712 platforms
- mediatek,mt6589-spi: for mt6589 platforms
- mediatek,mt7622-spi: for mt7622 platforms
+ - "mediatek,mt7629-spi", "mediatek,mt7622-spi": for mt7629 platforms
- mediatek,mt8135-spi: for mt8135 platforms
- mediatek,mt8173-spi: for mt8173 platforms
+ - mediatek,mt8183-spi: for mt8183 platforms
- #address-cells: should be 1.
diff --git a/Documentation/devicetree/bindings/spi/spi-mxic.txt b/Documentation/devicetree/bindings/spi/spi-mxic.txt
new file mode 100644
index 000000000000..529f2dab2648
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-mxic.txt
@@ -0,0 +1,34 @@
+Macronix SPI controller Device Tree Bindings
+--------------------------------------------
+
+Required properties:
+- compatible: should be "mxicy,mx25f0a-spi"
+- #address-cells: should be 1
+- #size-cells: should be 0
+- reg: should contain 2 entries, one for the registers and one for the direct
+ mapping area
+- reg-names: should contain "regs" and "dirmap"
+- interrupts: interrupt line connected to the SPI controller
+- clock-names: should contain "ps_clk", "send_clk" and "send_dly_clk"
+- clocks: should contain 3 entries for the "ps_clk", "send_clk" and
+ "send_dly_clk" clocks
+
+Example:
+
+ spi@43c30000 {
+ compatible = "mxicy,mx25f0a-spi";
+ reg = <0x43c30000 0x10000>, <0xa0000000 0x20000000>;
+ reg-names = "regs", "dirmap";
+ clocks = <&clkwizard 0>, <&clkwizard 1>, <&clkc 18>;
+ clock-names = "send_clk", "send_dly_clk", "ps_clk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt b/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
index 0335a9bd2e8a..e30e0c2a4bce 100644
--- a/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
+++ b/Documentation/devicetree/bindings/spi/spi-pxa2xx.txt
@@ -11,6 +11,9 @@ Required properties:
Optional properties:
- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
Documentation/devicetree/bindings/spi/spi-bus.txt
+- spi-slave: Empty property indicating the SPI controller is used in slave mode.
+- ready-gpios: GPIO used to signal a SPI master that the FIFO is filled
+ and we're ready to service a transfer. Only useful in slave mode.
Child nodes represent devices on the SPI bus
See ../spi/spi-bus.txt
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
index fc97ad64fbf2..421722b93992 100644
--- a/Documentation/devicetree/bindings/spi/spi-rspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -15,6 +15,7 @@ Required properties:
- "renesas,qspi-r8a7743" (RZ/G1M)
- "renesas,qspi-r8a7744" (RZ/G1N)
- "renesas,qspi-r8a7745" (RZ/G1E)
+ - "renesas,qspi-r8a77470" (RZ/G1C)
- "renesas,qspi-r8a7790" (R-Car H2)
- "renesas,qspi-r8a7791" (R-Car M2-W)
- "renesas,qspi-r8a7792" (R-Car V2H)
diff --git a/Documentation/devicetree/bindings/spi/spi-uniphier.txt b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
index 504a4ecfc7b1..e1201573a29a 100644
--- a/Documentation/devicetree/bindings/spi/spi-uniphier.txt
+++ b/Documentation/devicetree/bindings/spi/spi-uniphier.txt
@@ -7,8 +7,11 @@ Required properties:
- reg: address and length of the spi master registers
- #address-cells: must be <1>, see spi-bus.txt
- #size-cells: must be <0>, see spi-bus.txt
- - clocks: A phandle to the clock for the device.
- - resets: A phandle to the reset control for the device.
+ - interrupts: a single interrupt specifier
+ - pinctrl-names: should be "default"
+ - pinctrl-0: pin control state for the default mode
+ - clocks: a phandle to the clock for the device
+ - resets: a phandle to the reset control for the device
Example:
@@ -17,6 +20,9 @@ spi0: spi@54006000 {
reg = <0x54006000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
+ interrupts = <0 39 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spi0>;
clocks = <&peri_clk 11>;
resets = <&peri_rst 11>;
};
diff --git a/Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt b/Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt
new file mode 100644
index 000000000000..4db542c9a0fd
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/rda,8810pl-timer.txt
@@ -0,0 +1,20 @@
+RDA Micro RDA8810PL Timer
+
+Required properties:
+- compatible : "rda,8810pl-timer"
+- reg : Offset and length of the register set for the device.
+- interrupts : Should contain two interrupts.
+- interrupt-names : Should be "hwtimer", "ostimer".
+
+Example:
+
+ apb@20900000 {
+ compatible = "simple-bus";
+ ...
+ timer@10000 {
+ compatible = "rda,8810pl-timer";
+ reg = <0x10000 0x1000>;
+ interrupts = <16 IRQ_TYPE_LEVEL_HIGH>,
+ <17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hwtimer", "ostimer";
+ };
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
index 33992679a8bd..862a80f0380a 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
@@ -28,6 +28,10 @@ Required Properties:
- "renesas,r8a7744-cmt1" for the 48-bit CMT1 device included in r8a7744.
- "renesas,r8a7745-cmt0" for the 32-bit CMT0 device included in r8a7745.
- "renesas,r8a7745-cmt1" for the 48-bit CMT1 device included in r8a7745.
+ - "renesas,r8a77470-cmt0" for the 32-bit CMT0 device included in r8a77470.
+ - "renesas,r8a77470-cmt1" for the 48-bit CMT1 device included in r8a77470.
+ - "renesas,r8a774a1-cmt0" for the 32-bit CMT0 device included in r8a774a1.
+ - "renesas,r8a774a1-cmt1" for the 48-bit CMT1 device included in r8a774a1.
- "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790.
- "renesas,r8a7790-cmt1" for the 48-bit CMT1 device included in r8a7790.
- "renesas,r8a7791-cmt0" for the 32-bit CMT0 device included in r8a7791.
@@ -36,6 +40,8 @@ Required Properties:
- "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793.
- "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794.
- "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794.
+ - "renesas,r8a7796-cmt0" for the 32-bit CMT0 device included in r8a7796.
+ - "renesas,r8a7796-cmt1" for the 48-bit CMT1 device included in r8a7796.
- "renesas,r8a77970-cmt0" for the 32-bit CMT0 device included in r8a77970.
- "renesas,r8a77970-cmt1" for the 48-bit CMT1 device included in r8a77970.
- "renesas,r8a77980-cmt0" for the 32-bit CMT0 device included in r8a77980.
@@ -47,9 +53,12 @@ Required Properties:
and RZ/G1.
These are fallbacks for r8a73a4, R-Car Gen2 and RZ/G1 entries
listed above.
- - "renesas,rcar-gen3-cmt0" for 32-bit CMT0 devices included in R-Car Gen3.
- - "renesas,rcar-gen3-cmt1" for 48-bit CMT1 devices included in R-Car Gen3.
- These are fallbacks for R-Car Gen3 entries listed above.
+ - "renesas,rcar-gen3-cmt0" for 32-bit CMT0 devices included in R-Car Gen3
+ and RZ/G2.
+ - "renesas,rcar-gen3-cmt1" for 48-bit CMT1 devices included in R-Car Gen3
+ and RZ/G2.
+ These are fallbacks for R-Car Gen3 and RZ/G2 entries listed
+ above.
- reg: base address and length of the registers block for the timer module.
- interrupts: interrupt-specifier for the timer, one per channel.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4b1a2a8fcc16..1c9de314197e 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -67,6 +67,7 @@ capella Capella Microsystems, Inc
cascoda Cascoda, Ltd.
cavium Cavium, Inc.
cdns Cadence Design Systems Inc.
+cdtech CDTech(H.K.) Electronics Limited
ceva Ceva, Inc.
chipidea Chipidea, Inc
chipone ChipOne
@@ -170,6 +171,7 @@ holtek Holtek Semiconductor, Inc.
hwacom HwaCom Systems Inc.
i2se I2SE GmbH
ibm International Business Machines (IBM)
+icplus IC Plus Corp.
idt Integrated Device Technologies, Inc.
ifi Ingenieurburo Fur Ic-Technologie (I/F/I)
ilitek ILI Technology Corporation (ILITEK)
diff --git a/Documentation/driver-api/i3c/device-driver-api.rst b/Documentation/driver-api/i3c/device-driver-api.rst
new file mode 100644
index 000000000000..85bc3381cd3e
--- /dev/null
+++ b/Documentation/driver-api/i3c/device-driver-api.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+I3C device driver API
+=====================
+
+.. kernel-doc:: include/linux/i3c/device.h
+
+.. kernel-doc:: drivers/i3c/device.c
diff --git a/Documentation/driver-api/i3c/index.rst b/Documentation/driver-api/i3c/index.rst
new file mode 100644
index 000000000000..783d6dad054b
--- /dev/null
+++ b/Documentation/driver-api/i3c/index.rst
@@ -0,0 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+I3C subsystem
+=============
+
+.. toctree::
+
+ protocol
+ device-driver-api
+ master-driver-api
diff --git a/Documentation/driver-api/i3c/master-driver-api.rst b/Documentation/driver-api/i3c/master-driver-api.rst
new file mode 100644
index 000000000000..332552b28358
--- /dev/null
+++ b/Documentation/driver-api/i3c/master-driver-api.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+I3C master controller driver API
+================================
+
+.. kernel-doc:: drivers/i3c/master.c
+
+.. kernel-doc:: include/linux/i3c/master.h
diff --git a/Documentation/driver-api/i3c/protocol.rst b/Documentation/driver-api/i3c/protocol.rst
new file mode 100644
index 000000000000..dae3b6d32c6b
--- /dev/null
+++ b/Documentation/driver-api/i3c/protocol.rst
@@ -0,0 +1,203 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+I3C protocol
+============
+
+Disclaimer
+==========
+
+This chapter will focus on aspects that matter to software developers. For
+everything hardware related (like how things are transmitted on the bus, how
+collisions are prevented, ...) please have a look at the I3C specification.
+
+This document is just a brief introduction to the I3C protocol and the concepts
+it brings to the table. If you need more information, please refer to the MIPI
+I3C specification (can be downloaded here
+http://resources.mipi.org/mipi-i3c-v1-download).
+
+Introduction
+============
+
+The I3C (pronounced 'eye-three-see') is a MIPI standardized protocol designed
+to overcome I2C limitations (limited speed, external signals needed for
+interrupts, no automatic detection of the devices connected to the bus, ...)
+while remaining power-efficient.
+
+I3C Bus
+=======
+
+An I3C bus is made of several I3C devices and possibly some I2C devices as
+well, but let's focus on I3C devices for now.
+
+An I3C device on the I3C bus can have one of the following roles:
+
+* Master: the device is driving the bus. It's the one in charge of initiating
+ transactions or deciding who is allowed to talk on the bus (slave generated
+ events are possible in I3C, see below).
+* Slave: the device acts as a slave, and is not able to send frames to another
+ slave on the bus. The device can still send events to the master on
+ its own initiative if the master allowed it.
+
+I3C is a multi-master protocol, so there might be several masters on a bus,
+though only one device can act as a master at a given time. In order to gain
+bus ownership, a master has to follow a specific procedure.
+
+Each device on the I3C bus has to be assigned a dynamic address to be able to
+communicate. Until this is done, the device should only respond to a limited
+set of commands. If it has a static address (also called legacy I2C address),
+the device can reply to I2C transfers.
+
+In addition to these per-device addresses, the protocol defines a broadcast
+address in order to address all devices on the bus.
+
+Once a dynamic address has been assigned to a device, this address will be used
+for any direct communication with the device. Note that even after being
+assigned a dynamic address, the device should still process broadcast messages.
+
+I3C Device discovery
+====================
+
+The I3C protocol defines a mechanism to automatically discover devices present
+on the bus, their capabilities and the functionalities they provide. In this
+regard I3C is closer to a discoverable bus like USB than it is to I2C or SPI.
+
+The discovery mechanism is called DAA (Dynamic Address Assignment), because it
+not only discovers devices but also assigns them a dynamic address.
+
+During DAA, each I3C device reports 3 important things:
+
+* BCR: Bus Characteristic Register. This 8-bit register describes the device bus
+ related capabilities
+* DCR: Device Characteristic Register. This 8-bit register describes the
+ functionalities provided by the device
+* Provisional ID: A 48-bit unique identifier. On a given bus there should be no
+ Provisional ID collision, otherwise the discovery mechanism may fail.
+
+I3C slave events
+================
+
+The I3C protocol allows slaves to generate events on their own, and thus allows
+them to take temporary control of the bus.
+
+This mechanism is called IBI for In Band Interrupts, and as stated in the name,
+it allows devices to generate interrupts without requiring an external signal.
+
+During DAA, each device on the bus has been assigned an address, and this
+address will serve as a priority identifier to determine who wins if 2 different
+devices are generating an interrupt at the same moment on the bus (the lower the
+dynamic address the higher the priority).
+
+Masters are allowed to inhibit interrupts if they want to. This inhibition
+request can be broadcast (applies to all devices) or sent to a specific
+device.
+
+I3C Hot-Join
+============
+
+The Hot-Join mechanism is similar to USB hotplug. This mechanism allows
+slaves to join the bus after it has been initialized by the master.
+
+This covers the following use cases:
+
+* the device is not powered when the bus is probed
+* the device is hotplugged on the bus through an extension board
+
+This mechanism is relying on slave events to inform the master that a new
+device joined the bus and is waiting for a dynamic address.
+
+The master is then free to address the request as it wishes: ignore it or
+assign a dynamic address to the slave.
+
+I3C transfer types
+==================
+
+If you omit SMBus (which is just a standardization on how to access registers
+exposed by I2C devices), I2C has only one transfer type.
+
+I3C defines 3 different classes of transfer in addition to I2C transfers which
+are here for backward compatibility with I2C devices.
+
+I3C CCC commands
+----------------
+
+CCC (Common Command Code) commands are meant to be used for anything that is
+related to bus management and all features that are common to a set of devices.
+
+CCC commands contain an 8-bit CCC ID describing the command that is executed.
+The MSB of this ID specifies whether this is a broadcast command (bit7 = 0) or a
+unicast one (bit7 = 1).
+
+The command ID can be followed by a payload. Depending on the command, this
+payload is either sent by the master sending the command (write CCC command),
+or sent by the slave receiving the command (read CCC command). Of course, read
+accesses only apply to unicast commands.
+Note that, when sending a CCC command to a specific device, the device address
+is passed in the first byte of the payload.
+
+The payload length is not explicitly passed on the bus, and should be extracted
+from the CCC ID.
+
+Note that vendors can use a dedicated range of CCC IDs for their own commands
+(0x61-0x7f and 0xe0-0xef).
+
+I3C Private SDR transfers
+-------------------------
+
+Private SDR (Single Data Rate) transfers should be used for anything that is
+device specific and does not require high transfer speed.
+
+It is the equivalent of I2C transfers but in the I3C world. Each transfer is
+passed the device address (dynamic address assigned during DAA), a payload
+and a direction.
+
+The only difference with I2C is that the transfer is much faster (typical clock
+frequency is 12.5MHz).
+
+I3C HDR commands
+----------------
+
+HDR commands should be used for anything that is device specific and requires
+high transfer speed.
+
+The first thing attached to an HDR command is the HDR mode. There are currently
+3 different modes defined by the I3C specification (refer to the specification
+for more details):
+
+* HDR-DDR: Double Data Rate mode
+* HDR-TSP: Ternary Symbol Pure. Only usable on busses with no I2C devices
+* HDR-TSL: Ternary Symbol Legacy. Usable on busses with I2C devices
+
+When sending an HDR command, the whole bus has to enter HDR mode, which is done
+using a broadcast CCC command.
+Once the bus has entered a specific HDR mode, the master sends the HDR command.
+An HDR command is made of:
+
+* one 16-bits command word in big endian
+* N 16-bits data words in big endian
+
+Those words may be wrapped with specific preambles/post-ambles which depend on
+the chosen HDR mode and are detailed here (see the specification for more
+details).
+
+The 16-bits command word is made of:
+
+* bit[15]: direction bit, read is 1, write is 0
+* bit[14:8]: command code. Identifies the command being executed, the amount of
+ data words and their meaning
+* bit[7:1]: I3C address of the device this command is addressed to
+* bit[0]: reserved/parity-bit
+
+Backward compatibility with I2C devices
+=======================================
+
+The I3C protocol has been designed to be backward compatible with I2C devices.
+This backward compatibility allows one to connect a mix of I2C and I3C devices
+on the same bus, though, in order to be really efficient, I2C devices should
+be equipped with 50 ns spike filters.
+
+I2C devices can't be discovered like I3C ones and have to be statically
+declared. In order to let the master know what these devices are capable of
+(both in terms of bus related limitations and functionalities), the software
+has to provide some information, which is done through the LVR (Legacy I2C
+Virtual Register).
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 909f991b4c0d..ab38ced66a44 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -33,6 +33,7 @@ available subsections can be seen below.
pci/index
spi
i2c
+ i3c/index
hsi
edac
scsi
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 43681ca0837f..fc4cc24dfb97 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -254,6 +254,7 @@ GPIO
devm_gpiod_get_index_optional()
devm_gpiod_get_optional()
devm_gpiod_put()
+ devm_gpiod_unhinge()
devm_gpiochip_add_data()
devm_gpiochip_remove()
devm_gpio_request()
diff --git a/Documentation/features/vm/ioremap_prot/arch-support.txt b/Documentation/features/vm/ioremap_prot/arch-support.txt
index 8527601a3739..326e4797bc65 100644
--- a/Documentation/features/vm/ioremap_prot/arch-support.txt
+++ b/Documentation/features/vm/ioremap_prot/arch-support.txt
@@ -16,7 +16,7 @@
| ia64: | TODO |
| m68k: | TODO |
| microblaze: | TODO |
- | mips: | TODO |
+ | mips: | ok |
| nds32: | TODO |
| nios2: | TODO |
| openrisc: | TODO |
diff --git a/Documentation/gpu/amdgpu-dc.rst b/Documentation/gpu/amdgpu-dc.rst
new file mode 100644
index 000000000000..cc89b0fc11df
--- /dev/null
+++ b/Documentation/gpu/amdgpu-dc.rst
@@ -0,0 +1,68 @@
+===================================
+drm/amd/display - Display Core (DC)
+===================================
+
+*placeholder - general description of supported platforms, what dc is, etc.*
+
+Because it is partially shared with other operating systems, the Display Core
+Driver is divided in two pieces.
+
+1. **Display Core (DC)** contains the OS-agnostic components. Things like
+ hardware programming and resource management are handled here.
+2. **Display Manager (DM)** contains the OS-dependent components. Hooks to the
+ amdgpu base driver and DRM are implemented here.
+
+It doesn't help that the entire package is frequently referred to as DC. But
+with the context in mind, it should be clear.
+
+When CONFIG_DRM_AMD_DC is enabled, DC will be initialized by default for
+supported ASICs. To force disable, set `amdgpu.dc=0` on kernel command line.
+Likewise, to force enable on unsupported ASICs, set `amdgpu.dc=1`.
+
+To determine if DC is loaded, search dmesg for the following entry:
+
+``Display Core initialized with <version number here>``
+
+AMDgpu Display Manager
+======================
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+ :internal:
+
+Lifecycle
+---------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: DM Lifecycle
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: dm_hw_init dm_hw_fini
+
+Interrupts
+----------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: register_hpd_handlers dm_crtc_high_irq dm_pflip_high_irq
+
+Atomic Implementation
+---------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :doc: atomic
+
+.. kernel-doc:: drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+ :functions: amdgpu_dm_atomic_check amdgpu_dm_atomic_commit_tail
+
+Display Core
+============
+
+**WIP**
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
index 7d2d3875ff1a..7c1672118a73 100644
--- a/Documentation/gpu/drivers.rst
+++ b/Documentation/gpu/drivers.rst
@@ -5,6 +5,7 @@ GPU Driver Documentation
.. toctree::
amdgpu
+ amdgpu-dc
i915
meson
pl111
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index f9cfcdcdf024..b422eb8edf16 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -59,19 +59,28 @@ Implementing Asynchronous Atomic Commit
.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
:doc: implementing nonblocking commit
+Helper Functions Reference
+--------------------------
+
+.. kernel-doc:: include/drm/drm_atomic_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+ :export:
+
Atomic State Reset and Initialization
-------------------------------------
-.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:doc: atomic state reset and initialization
-Helper Functions Reference
---------------------------
+Atomic State Helper Reference
+-----------------------------
-.. kernel-doc:: include/drm/drm_atomic_helper.h
+.. kernel-doc:: include/drm/drm_atomic_state_helper.h
:internal:
-.. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c
+.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:export:
Simple KMS Helper Reference
@@ -223,6 +232,18 @@ MIPI DSI Helper Functions Reference
.. kernel-doc:: drivers/gpu/drm/drm_mipi_dsi.c
:export:
+Display Stream Compression Helper Functions Reference
+=====================================================
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+ :doc: dsc helpers
+
+.. kernel-doc:: include/drm/drm_dsc.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_dsc.c
+ :export:
+
Output Probing Helper Functions Reference
=========================================
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 4b1501b4835b..75c882e09fee 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -554,6 +554,18 @@ Plane Composition Properties
.. kernel-doc:: drivers/gpu/drm/drm_blend.c
:export:
+FB_DAMAGE_CLIPS
+~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+ :doc: overview
+
+.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
+ :export:
+
+.. kernel-doc:: include/drm/drm_damage_helper.h
+ :internal:
+
Color Management Properties
---------------------------
@@ -575,6 +587,13 @@ Explicit Fencing Properties
.. kernel-doc:: drivers/gpu/drm/drm_atomic_uapi.c
:doc: explicit fencing properties
+
+Variable Refresh Properties
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_connector.c
+ :doc: Variable refresh properties
+
Existing KMS Properties
-----------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index e725e8449e72..54a696d961a7 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -72,16 +72,13 @@ object TTM to provide a pool for buffer object allocation by clients and
the kernel itself. The type of this object should be
TTM_GLOBAL_TTM_BO, and its size should be sizeof(struct
ttm_bo_global). Again, driver-specific init and release functions may
-be provided, likely eventually calling ttm_bo_global_init() and
-ttm_bo_global_release(), respectively. Also, like the previous
+be provided, likely eventually calling ttm_bo_global_ref_init() and
+ttm_bo_global_ref_release(), respectively. Also, like the previous
object, ttm_global_item_ref() is used to create an initial reference
count for the TTM, which will call your initialization function.
See the radeon_ttm.c file for an example of usage.
-.. kernel-doc:: drivers/gpu/drm/drm_global.c
- :export:
-
The Graphics Execution Manager (GEM)
====================================
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index a2214cc1f821..4b4bf2c5eac5 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -197,6 +197,9 @@ EPERM/EACCESS:
difference between EACCESS and EPERM.
ENODEV:
+ The device is not (yet) present or fully initialized.
+
+EOPNOTSUPP:
Feature (like PRIME, modesetting, GEM) is not supported by the driver.
ENXIO:
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 77c2b3c25565..14191b64446d 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -28,22 +28,16 @@ them, but also all the virtual ones used by KVM, so everyone qualifies).
Contact: Daniel Vetter, Thierry Reding, respective driver maintainers
-Switch from reference/unreference to get/put
---------------------------------------------
-
-For some reason DRM core uses ``reference``/``unreference`` suffixes for
-refcounting functions, but kernel uses ``get``/``put`` (e.g.
-``kref_get``/``put()``). It would be good to switch over for consistency, and
-it's shorter. Needs to be done in 3 steps for each pair of functions:
-* Create new ``get``/``put`` functions, define the old names as compatibility
- wrappers
-* Switch over each file/driver using a cocci-generated spatch.
-* Once all users of the old names are gone, remove them.
+Remove custom dumb_map_offset implementations
+---------------------------------------------
-This way drivers/patches in the progress of getting merged won't break.
+All GEM based drivers should be using drm_gem_create_mmap_offset() instead.
+Audit each individual driver, make sure it'll work with the generic
+implementation (there's lots of outdated locking leftovers in various
+implementations), and then remove it.
-Contact: Daniel Vetter
+Contact: Daniel Vetter, respective driver maintainers
Convert existing KMS drivers to atomic modesetting
--------------------------------------------------
@@ -234,6 +228,34 @@ efficient.
Contact: Daniel Vetter
+Defaults for .gem_prime_import and export
+-----------------------------------------
+
+Most drivers don't need to set drm_driver->gem_prime_import and
+->gem_prime_export now that drm_gem_prime_import() and drm_gem_prime_export()
+are the default.
+
+struct drm_gem_object_funcs
+---------------------------
+
+GEM objects can now have a function table instead of having the callbacks on the
+DRM driver struct. This is now the preferred way and drivers can be moved over.
+
+Use DRM_MODESET_LOCK_ALL_* helpers instead of boilerplate
+---------------------------------------------------------
+
+For cases where drivers are attempting to grab the modeset locks with a local
+acquire context. Replace the boilerplate code surrounding
+drm_modeset_lock_all_ctx() with DRM_MODESET_LOCK_ALL_BEGIN() and
+DRM_MODESET_LOCK_ALL_END() instead.
+
+This should also be done for all places where drm_modest_lock_all() is still
+used.
+
+As a reference, take a look at the conversions already completed in drm core.
+
+Contact: Sean Paul, respective driver maintainers
+
Core refactorings
=================
@@ -339,6 +361,16 @@ Some of these date from the very introduction of KMS in 2008 ...
leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes.
+- Make ->funcs and ->helper_private vtables optional. There's a bunch of empty
+ function tables in drivers, but before we can remove them we need to make sure
+ that all the users in helpers and drivers do correctly check for a NULL
+ vtable.
+
+- Cleanup up the various ->destroy callbacks. A lot of them just wrapt the
+ drm_*_cleanup implementations and can be removed. Some tack a kfree() at the
+ end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
+ historical reasons) misnamed drm_primary_helper_destroy() function.
+
Better Testing
==============
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 0a6ea6216e41..7dfc349a4508 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -10,8 +10,8 @@
TODO
====
-CRC API
--------
+CRC API Improvements
+--------------------
- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
@@ -22,3 +22,100 @@ CRC API
- Add igt test to check extreme alpha values i.e. fully opaque and fully
transparent (intermediate values are affected by hw-specific rounding modes).
+
+Vblank issues
+-------------
+
+Some IGT test cases are failing. Need to analyze why and fix the issues:
+
+- plain-flip-fb-recreate
+- plain-flip-ts-check
+- flip-vs-blocking-wf-vblank
+- plain-flip-fb-recreate-interruptible
+- flip-vs-wf_vblank-interruptible
+
+Runtime Configuration
+---------------------
+
+We want to be able to reconfigure vkms instance without having to reload the
+module. Use/Test-cases:
+
+- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
+ compositors).
+
+- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
+ them first).
+
+- Change output configuration: Plug/unplug screens, change EDID, allow changing
+ the refresh rate.
+
+The currently proposed solution is to expose vkms configuration through
+configfs. All existing module options should be supported through configfs too.
+
+Add Plane Features
+------------------
+
+There's lots of plane features we could add support for:
+
+- Real overlay planes, not just cursor.
+
+- Full alpha blending on all planes.
+
+- Rotation, scaling.
+
+- Additional buffer formats, especially YUV formats for video like NV12.
+ Low/high bpp RGB formats would also be interesting.
+
+- Async updates (currently only possible on cursor plane using the legacy cursor
+ api).
+
+For all of these, we also want to review the igt test coverage and make sure all
+relevant igt testcases work on vkms.
+
+Writeback support
+-----------------
+
+Currently vkms only computes a CRC for each frame. Once we have additional plane
+features, we could write back the entire composited frame, and expose it as:
+
+- Writeback connector. This is useful for testing compositors if you don't have
+ hardware with writeback support.
+
+- As a v4l device. This is useful for debugging compositors on special vkms
+ configurations, so that developers see what's really going on.
+
+Prime Buffer Sharing
+--------------------
+
+We already have vgem, which is a gem driver for testing rendering, similar to
+how vkms is for testing the modeset side. Adding buffer sharing support to vkms
+allows us to test them together, to test synchronization and lots of other
+features. Also, this allows compositors to test whether they work correctly on
+SoC chips, where the display and rendering is very often split between 2
+drivers.
+
+Output Features
+---------------
+
+- Variable refresh rate/freesync support. This probably needs prime buffer
+ sharing support, so that we can use vgem fences to simulate rendering in
+ testing. Also needs support to specify the EDID.
+
+- Add support for link status, so that compositors can validate their runtime
+ fallbacks when e.g. a Display Port link goes bad.
+
+- All the hotplug handling describe under "Runtime Configuration".
+
+Atomic Check using eBPF
+-----------------------
+
+Atomic drivers have lots of restrictions which are not exposed to userspace in
+any explicit form through e.g. possible property values. Userspace can only
+inquiry about these limits through the atomic IOCTL, possibly using the
+TEST_ONLY flag. Trying to add configurable code for all these limits, to allow
+compositors to be tested against them, would be rather futile exercise. Instead
+we could add support for eBPF to validate any kind of atomic state, and
+implement a library of different restrictions.
+
+This needs a bunch of features (plane compositing, multiple outputs, ...)
+enabled already to make sense.
diff --git a/Documentation/media/.gitignore b/Documentation/media/.gitignore
index 08b21de3ef94..53adc029061f 100644
--- a/Documentation/media/.gitignore
+++ b/Documentation/media/.gitignore
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
*.pdf
# Files generated from *.dot
uapi/v4l/pipeline.svg
diff --git a/Documentation/media/Makefile b/Documentation/media/Makefile
index 36166952d555..d75d70f191bc 100644
--- a/Documentation/media/Makefile
+++ b/Documentation/media/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Rules to convert a .h file to inline RST documentation
SRC_DIR=$(srctree)/Documentation/media
diff --git a/Documentation/media/audio.h.rst.exceptions b/Documentation/media/audio.h.rst.exceptions
index 940458774cf6..cf6620477f73 100644
--- a/Documentation/media/audio.h.rst.exceptions
+++ b/Documentation/media/audio.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _DVBAUDIO_H_
diff --git a/Documentation/media/ca.h.rst.exceptions b/Documentation/media/ca.h.rst.exceptions
index 553559cc6ad7..f6828238eb48 100644
--- a/Documentation/media/ca.h.rst.exceptions
+++ b/Documentation/media/ca.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _DVBCA_H_
diff --git a/Documentation/media/cec-drivers/index.rst b/Documentation/media/cec-drivers/index.rst
index 7ef204823422..2b7fcaa4311b 100644
--- a/Documentation/media/cec-drivers/index.rst
+++ b/Documentation/media/cec-drivers/index.rst
@@ -1,4 +1,4 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
diff --git a/Documentation/media/cec-drivers/pulse8-cec.rst b/Documentation/media/cec-drivers/pulse8-cec.rst
index 99551c6a9bc5..356d08b519f3 100644
--- a/Documentation/media/cec-drivers/pulse8-cec.rst
+++ b/Documentation/media/cec-drivers/pulse8-cec.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Pulse-Eight CEC Adapter driver
==============================
diff --git a/Documentation/media/cec.h.rst.exceptions b/Documentation/media/cec.h.rst.exceptions
index d9fd092de6f8..014816d04b9e 100644
--- a/Documentation/media/cec.h.rst.exceptions
+++ b/Documentation/media/cec.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _CEC_UAPI_H
diff --git a/Documentation/media/conf.py b/Documentation/media/conf.py
index bef927bc4659..1f194fcd2cae 100644
--- a/Documentation/media/conf.py
+++ b/Documentation/media/conf.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
+
project = 'Linux Media Subsystem Documentation'
tags.add("subproject")
diff --git a/Documentation/media/conf_nitpick.py b/Documentation/media/conf_nitpick.py
index 480d548af670..d0c50d75f518 100644
--- a/Documentation/media/conf_nitpick.py
+++ b/Documentation/media/conf_nitpick.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8; mode: python -*-
+# SPDX-License-Identifier: GPL-2.0
+
project = 'Linux Media Subsystem Documentation'
# It is possible to run Sphinx in nickpick mode with:
diff --git a/Documentation/media/dmx.h.rst.exceptions b/Documentation/media/dmx.h.rst.exceptions
index a8c4239ed95b..afc14d384b83 100644
--- a/Documentation/media/dmx.h.rst.exceptions
+++ b/Documentation/media/dmx.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _UAPI_DVBDMX_H_
diff --git a/Documentation/media/dvb-drivers/avermedia.rst b/Documentation/media/dvb-drivers/avermedia.rst
index 49cd9c935307..14f437ca38d3 100644
--- a/Documentation/media/dvb-drivers/avermedia.rst
+++ b/Documentation/media/dvb-drivers/avermedia.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
HOWTO: Get An Avermedia DVB-T working under Linux
-------------------------------------------------
diff --git a/Documentation/media/dvb-drivers/bt8xx.rst b/Documentation/media/dvb-drivers/bt8xx.rst
index e3e387bdf498..7936cd96fc8f 100644
--- a/Documentation/media/dvb-drivers/bt8xx.rst
+++ b/Documentation/media/dvb-drivers/bt8xx.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
How to get the bt8xx cards working
==================================
diff --git a/Documentation/media/dvb-drivers/cards.rst b/Documentation/media/dvb-drivers/cards.rst
index 177cbeb2b561..e2e30a56b450 100644
--- a/Documentation/media/dvb-drivers/cards.rst
+++ b/Documentation/media/dvb-drivers/cards.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Hardware supported by the linuxtv.org DVB drivers
=================================================
diff --git a/Documentation/media/dvb-drivers/ci.rst b/Documentation/media/dvb-drivers/ci.rst
index 87f3748c49b9..35f33f1f9e2a 100644
--- a/Documentation/media/dvb-drivers/ci.rst
+++ b/Documentation/media/dvb-drivers/ci.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV Conditional Access Interface (CI API)
================================================
diff --git a/Documentation/media/dvb-drivers/contributors.rst b/Documentation/media/dvb-drivers/contributors.rst
index 5949753008ae..f23b6e6faf46 100644
--- a/Documentation/media/dvb-drivers/contributors.rst
+++ b/Documentation/media/dvb-drivers/contributors.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Contributors
============
diff --git a/Documentation/media/dvb-drivers/dvb-usb.rst b/Documentation/media/dvb-drivers/dvb-usb.rst
index eec99cd07a30..6679191819aa 100644
--- a/Documentation/media/dvb-drivers/dvb-usb.rst
+++ b/Documentation/media/dvb-drivers/dvb-usb.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Idea behind the dvb-usb-framework
=================================
diff --git a/Documentation/media/dvb-drivers/faq.rst b/Documentation/media/dvb-drivers/faq.rst
index a8593d3792fa..52f153d18278 100644
--- a/Documentation/media/dvb-drivers/faq.rst
+++ b/Documentation/media/dvb-drivers/faq.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
FAQ
===
diff --git a/Documentation/media/dvb-drivers/frontends.rst b/Documentation/media/dvb-drivers/frontends.rst
index 1f5f57989196..7b8336ece681 100644
--- a/Documentation/media/dvb-drivers/frontends.rst
+++ b/Documentation/media/dvb-drivers/frontends.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
****************
Frontend drivers
****************
diff --git a/Documentation/media/dvb-drivers/index.rst b/Documentation/media/dvb-drivers/index.rst
index 314e127d82e3..9d3fce544f85 100644
--- a/Documentation/media/dvb-drivers/index.rst
+++ b/Documentation/media/dvb-drivers/index.rst
@@ -1,4 +1,4 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
diff --git a/Documentation/media/dvb-drivers/intro.rst b/Documentation/media/dvb-drivers/intro.rst
index d6eeb2708b9b..4e361bcc3ad4 100644
--- a/Documentation/media/dvb-drivers/intro.rst
+++ b/Documentation/media/dvb-drivers/intro.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Introduction
============
diff --git a/Documentation/media/dvb-drivers/lmedm04.rst b/Documentation/media/dvb-drivers/lmedm04.rst
index e8913d4481a0..a6ee33413748 100644
--- a/Documentation/media/dvb-drivers/lmedm04.rst
+++ b/Documentation/media/dvb-drivers/lmedm04.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Firmware files for lmedm04 cards
================================
diff --git a/Documentation/media/dvb-drivers/opera-firmware.rst b/Documentation/media/dvb-drivers/opera-firmware.rst
index 41236b43c124..fab3581551de 100644
--- a/Documentation/media/dvb-drivers/opera-firmware.rst
+++ b/Documentation/media/dvb-drivers/opera-firmware.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Opera firmware
==============
diff --git a/Documentation/media/dvb-drivers/technisat.rst b/Documentation/media/dvb-drivers/technisat.rst
index f80f4ecc1560..9eaa12366bbf 100644
--- a/Documentation/media/dvb-drivers/technisat.rst
+++ b/Documentation/media/dvb-drivers/technisat.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
How to set up the Technisat/B2C2 Flexcop devices
================================================
diff --git a/Documentation/media/dvb-drivers/ttusb-dec.rst b/Documentation/media/dvb-drivers/ttusb-dec.rst
index 84fc2199dc29..516bbab8a872 100644
--- a/Documentation/media/dvb-drivers/ttusb-dec.rst
+++ b/Documentation/media/dvb-drivers/ttusb-dec.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
TechnoTrend/Hauppauge DEC USB Driver
====================================
diff --git a/Documentation/media/dvb-drivers/udev.rst b/Documentation/media/dvb-drivers/udev.rst
index 7d7d5d82108a..ca6c9c226902 100644
--- a/Documentation/media/dvb-drivers/udev.rst
+++ b/Documentation/media/dvb-drivers/udev.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
UDEV rules for DVB
==================
diff --git a/Documentation/media/frontend.h.rst.exceptions b/Documentation/media/frontend.h.rst.exceptions
index f7c4df620a52..6283702c08c8 100644
--- a/Documentation/media/frontend.h.rst.exceptions
+++ b/Documentation/media/frontend.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _DVBFRONTEND_H_
diff --git a/Documentation/media/index.rst b/Documentation/media/index.rst
index 1cf5316c8ff8..0a222fc1d7ca 100644
--- a/Documentation/media/index.rst
+++ b/Documentation/media/index.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Linux Media Subsystem Documentation
===================================
diff --git a/Documentation/media/intro.rst b/Documentation/media/intro.rst
index 9ce2e23a0236..4a6bd665b884 100644
--- a/Documentation/media/intro.rst
+++ b/Documentation/media/intro.rst
@@ -1,4 +1,4 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. SPDX-License-Identifier: GPL-2.0
============
Introduction
diff --git a/Documentation/media/kapi/cec-core.rst b/Documentation/media/kapi/cec-core.rst
index bca1d9d1d223..3ce26b7c2b2b 100644
--- a/Documentation/media/kapi/cec-core.rst
+++ b/Documentation/media/kapi/cec-core.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
CEC Kernel Support
==================
diff --git a/Documentation/media/kapi/csi2.rst b/Documentation/media/kapi/csi2.rst
index 0560100efca2..a7e75e2eba85 100644
--- a/Documentation/media/kapi/csi2.rst
+++ b/Documentation/media/kapi/csi2.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
MIPI CSI-2
==========
diff --git a/Documentation/media/kapi/dtv-ca.rst b/Documentation/media/kapi/dtv-ca.rst
index fded096b937c..8a09862b428b 100644
--- a/Documentation/media/kapi/dtv-ca.rst
+++ b/Documentation/media/kapi/dtv-ca.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV Conditional Access kABI
----------------------------------
diff --git a/Documentation/media/kapi/dtv-common.rst b/Documentation/media/kapi/dtv-common.rst
index 7a9574f03190..f8b2c4dc8170 100644
--- a/Documentation/media/kapi/dtv-common.rst
+++ b/Documentation/media/kapi/dtv-common.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV Common functions
---------------------------
diff --git a/Documentation/media/kapi/dtv-core.rst b/Documentation/media/kapi/dtv-core.rst
index bca743dc6b43..17454a2cf6b0 100644
--- a/Documentation/media/kapi/dtv-core.rst
+++ b/Documentation/media/kapi/dtv-core.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV (DVB) devices
------------------------
diff --git a/Documentation/media/kapi/dtv-demux.rst b/Documentation/media/kapi/dtv-demux.rst
index 24857133e4e8..c0ae5dec5328 100644
--- a/Documentation/media/kapi/dtv-demux.rst
+++ b/Documentation/media/kapi/dtv-demux.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV Demux kABI
---------------------
diff --git a/Documentation/media/kapi/dtv-frontend.rst b/Documentation/media/kapi/dtv-frontend.rst
index 472650cdb100..8ea64742c7ba 100644
--- a/Documentation/media/kapi/dtv-frontend.rst
+++ b/Documentation/media/kapi/dtv-frontend.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV Frontend kABI
------------------------
diff --git a/Documentation/media/kapi/dtv-net.rst b/Documentation/media/kapi/dtv-net.rst
index 158c7cbd7600..deb6bffe96bb 100644
--- a/Documentation/media/kapi/dtv-net.rst
+++ b/Documentation/media/kapi/dtv-net.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Digital TV Network kABI
-----------------------
diff --git a/Documentation/media/kapi/mc-core.rst b/Documentation/media/kapi/mc-core.rst
index 69362b3135c2..0bcfeadbc52d 100644
--- a/Documentation/media/kapi/mc-core.rst
+++ b/Documentation/media/kapi/mc-core.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Media Controller devices
------------------------
diff --git a/Documentation/media/kapi/rc-core.rst b/Documentation/media/kapi/rc-core.rst
index 4759f020d6b2..53f5e643b6e9 100644
--- a/Documentation/media/kapi/rc-core.rst
+++ b/Documentation/media/kapi/rc-core.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Remote Controller devices
-------------------------
diff --git a/Documentation/media/kapi/v4l2-async.rst b/Documentation/media/kapi/v4l2-async.rst
index 523ff9eb09a0..3422330b3b1f 100644
--- a/Documentation/media/kapi/v4l2-async.rst
+++ b/Documentation/media/kapi/v4l2-async.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 async kAPI
^^^^^^^^^^^^^^^
.. kernel-doc:: include/media/v4l2-async.h
diff --git a/Documentation/media/kapi/v4l2-clocks.rst b/Documentation/media/kapi/v4l2-clocks.rst
index b8a895860a8a..5c22eecab7ba 100644
--- a/Documentation/media/kapi/v4l2-clocks.rst
+++ b/Documentation/media/kapi/v4l2-clocks.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 clocks
-----------
diff --git a/Documentation/media/kapi/v4l2-common.rst b/Documentation/media/kapi/v4l2-common.rst
index 525d804871ff..b1e70eb56aa4 100644
--- a/Documentation/media/kapi/v4l2-common.rst
+++ b/Documentation/media/kapi/v4l2-common.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 common functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-controls.rst b/Documentation/media/kapi/v4l2-controls.rst
index 07a179eeb2fb..64ab99abf0b6 100644
--- a/Documentation/media/kapi/v4l2-controls.rst
+++ b/Documentation/media/kapi/v4l2-controls.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 Controls
=============
diff --git a/Documentation/media/kapi/v4l2-core.rst b/Documentation/media/kapi/v4l2-core.rst
index 5cf292037a48..0dcad7a23141 100644
--- a/Documentation/media/kapi/v4l2-core.rst
+++ b/Documentation/media/kapi/v4l2-core.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Video4Linux devices
-------------------
diff --git a/Documentation/media/kapi/v4l2-dev.rst b/Documentation/media/kapi/v4l2-dev.rst
index eb03ccc41c41..b359f1804bbe 100644
--- a/Documentation/media/kapi/v4l2-dev.rst
+++ b/Documentation/media/kapi/v4l2-dev.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Video device' s internal representation
=======================================
diff --git a/Documentation/media/kapi/v4l2-device.rst b/Documentation/media/kapi/v4l2-device.rst
index 6c58bbbaa66f..c4311f0421be 100644
--- a/Documentation/media/kapi/v4l2-device.rst
+++ b/Documentation/media/kapi/v4l2-device.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 device instance
--------------------
diff --git a/Documentation/media/kapi/v4l2-dv-timings.rst b/Documentation/media/kapi/v4l2-dv-timings.rst
index 55274329d229..b178f931518b 100644
--- a/Documentation/media/kapi/v4l2-dv-timings.rst
+++ b/Documentation/media/kapi/v4l2-dv-timings.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 DV Timings functions
^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-event.rst b/Documentation/media/kapi/v4l2-event.rst
index 5c7e31224ddc..a4b7ae2b94d8 100644
--- a/Documentation/media/kapi/v4l2-event.rst
+++ b/Documentation/media/kapi/v4l2-event.rst
@@ -1,3 +1,4 @@
+.. SPDX-License-Identifier: GPL-2.0
V4L2 events
-----------
diff --git a/Documentation/media/kapi/v4l2-fh.rst b/Documentation/media/kapi/v4l2-fh.rst
index 3ee64adf4635..4c62b19af744 100644
--- a/Documentation/media/kapi/v4l2-fh.rst
+++ b/Documentation/media/kapi/v4l2-fh.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 File handlers
------------------
diff --git a/Documentation/media/kapi/v4l2-flash-led-class.rst b/Documentation/media/kapi/v4l2-flash-led-class.rst
index 20798bdac387..2aa6bed9b8db 100644
--- a/Documentation/media/kapi/v4l2-flash-led-class.rst
+++ b/Documentation/media/kapi/v4l2-flash-led-class.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 flash functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-fwnode.rst b/Documentation/media/kapi/v4l2-fwnode.rst
index 6c8bccdfeb25..e313b6cddcd0 100644
--- a/Documentation/media/kapi/v4l2-fwnode.rst
+++ b/Documentation/media/kapi/v4l2-fwnode.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 fwnode kAPI
^^^^^^^^^^^^^^^^
.. kernel-doc:: include/media/v4l2-fwnode.h
diff --git a/Documentation/media/kapi/v4l2-intro.rst b/Documentation/media/kapi/v4l2-intro.rst
index e614d8d4ca1c..cea3e263e48b 100644
--- a/Documentation/media/kapi/v4l2-intro.rst
+++ b/Documentation/media/kapi/v4l2-intro.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Introduction
------------
diff --git a/Documentation/media/kapi/v4l2-mc.rst b/Documentation/media/kapi/v4l2-mc.rst
index 8af347013490..0c352ac588b2 100644
--- a/Documentation/media/kapi/v4l2-mc.rst
+++ b/Documentation/media/kapi/v4l2-mc.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 Media Controller functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-mediabus.rst b/Documentation/media/kapi/v4l2-mediabus.rst
index e64131906d11..1f2254cba92d 100644
--- a/Documentation/media/kapi/v4l2-mediabus.rst
+++ b/Documentation/media/kapi/v4l2-mediabus.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 Media Bus functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-mem2mem.rst b/Documentation/media/kapi/v4l2-mem2mem.rst
index 5536b4a71e51..a43b31cc8261 100644
--- a/Documentation/media/kapi/v4l2-mem2mem.rst
+++ b/Documentation/media/kapi/v4l2-mem2mem.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 Memory to Memory functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-rect.rst b/Documentation/media/kapi/v4l2-rect.rst
index 8df5067ad57d..fc315cd84156 100644
--- a/Documentation/media/kapi/v4l2-rect.rst
+++ b/Documentation/media/kapi/v4l2-rect.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 rect helper functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-subdev.rst b/Documentation/media/kapi/v4l2-subdev.rst
index 1280e05b662b..be4970909f40 100644
--- a/Documentation/media/kapi/v4l2-subdev.rst
+++ b/Documentation/media/kapi/v4l2-subdev.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
V4L2 sub-devices
----------------
diff --git a/Documentation/media/kapi/v4l2-tuner.rst b/Documentation/media/kapi/v4l2-tuner.rst
index 86e894639651..e6caa3321566 100644
--- a/Documentation/media/kapi/v4l2-tuner.rst
+++ b/Documentation/media/kapi/v4l2-tuner.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Tuner functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-tveeprom.rst b/Documentation/media/kapi/v4l2-tveeprom.rst
index 33422cb26aa7..43fb391edaba 100644
--- a/Documentation/media/kapi/v4l2-tveeprom.rst
+++ b/Documentation/media/kapi/v4l2-tveeprom.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Hauppauge TV EEPROM functions and data structures
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/Documentation/media/kapi/v4l2-videobuf.rst b/Documentation/media/kapi/v4l2-videobuf.rst
index 54adfd772d28..1a7756397b1a 100644
--- a/Documentation/media/kapi/v4l2-videobuf.rst
+++ b/Documentation/media/kapi/v4l2-videobuf.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. _vb_framework:
Videobuf Framework
diff --git a/Documentation/media/kapi/v4l2-videobuf2.rst b/Documentation/media/kapi/v4l2-videobuf2.rst
index 3c4cb1e7e05f..1044f64ff168 100644
--- a/Documentation/media/kapi/v4l2-videobuf2.rst
+++ b/Documentation/media/kapi/v4l2-videobuf2.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. _vb2_framework:
V4L2 videobuf2 functions and data structures
diff --git a/Documentation/media/lirc.h.rst.exceptions b/Documentation/media/lirc.h.rst.exceptions
index 984b61dc3f2e..379b9e7df5d0 100644
--- a/Documentation/media/lirc.h.rst.exceptions
+++ b/Documentation/media/lirc.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _LINUX_LIRC_H
diff --git a/Documentation/media/media.h.rst.exceptions b/Documentation/media/media.h.rst.exceptions
index 684fe9c86dee..9b4c26502d95 100644
--- a/Documentation/media/media.h.rst.exceptions
+++ b/Documentation/media/media.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define __LINUX_MEDIA_H
diff --git a/Documentation/media/media_kapi.rst b/Documentation/media/media_kapi.rst
index 83da736fad72..1389998c90f7 100644
--- a/Documentation/media/media_kapi.rst
+++ b/Documentation/media/media_kapi.rst
@@ -1,4 +1,4 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
diff --git a/Documentation/media/media_uapi.rst b/Documentation/media/media_uapi.rst
index 28eb35a1f965..0753005c7bb4 100644
--- a/Documentation/media/media_uapi.rst
+++ b/Documentation/media/media_uapi.rst
@@ -1,4 +1,4 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
@@ -10,9 +10,9 @@ Linux Media Infrastructure userspace API
Permission is granted to copy, distribute and/or modify this document
under the terms of the GNU Free Documentation License, Version 1.1 or
-any later version published by the Free Software Foundation. A copy of
-the license is included in the chapter entitled "GNU Free Documentation
-License".
+any later version published by the Free Software Foundation, with no
+Invariant Sections. A copy of the license is included in the chapter
+entitled "GNU Free Documentation License".
.. only:: html
diff --git a/Documentation/media/net.h.rst.exceptions b/Documentation/media/net.h.rst.exceptions
index afe6bef91567..5159aa4bbbb9 100644
--- a/Documentation/media/net.h.rst.exceptions
+++ b/Documentation/media/net.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _DVBNET_H_
diff --git a/Documentation/media/typical_media_device.svg b/Documentation/media/typical_media_device.svg
index d6fad90ec199..bfd5c7db3b00 100644
--- a/Documentation/media/typical_media_device.svg
+++ b/Documentation/media/typical_media_device.svg
@@ -1,4 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg id="svg2" width="235mm" height="179mm" clip-path="url(#a)" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 22648.239 17899.829" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><metadata id="metadata1533"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><defs id="defs4"><clipPath id="a"><rect id="rect7" width="28000" height="21000"/></clipPath></defs><path id="path11" d="m10146 2636c-518.06 0-1035.1 515-1035.1 1031v4124c0 516 517.06 1032 1035.1 1032h8572.2c518.06 0 1036.1-516 1036.1-1032v-4124c0-516-518.06-1031-1036.1-1031h-8572.2z"
fill="#fcf" style=""/><path id="path15" d="m1505.5 13443c-293 0-585 292-585 585v2340c0 293 292 586 585 586h3275c293 0 586-293 586-586v-2340c0-293-293-585-586-585h-3275z" fill="#ffc" style=""/><path id="path19" d="m517.15 22.013c-461 0-922 461-922 922v11169c0 461 461 923 922 923h3692c461 0 922-462 922-923v-11169c0-461-461-922-922-922h-3692z" fill="#e6e6e6" style=""/><path id="path23" d="m2371.5 6438h-2260v-1086h4520v1086h-2260z" fill="#ff8080" style=""/><path id="path25" d="m2371.5 6438h-2260v-1086h4520v1086h-2260z" fill="none" stroke="#3465af" style=""/><text id="text27" class="TextShape" x="-2089.4541" y="-2163.9871" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan29" class="TextParagraph" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan31" class="TextPosition" x="489.5459" y="6111.0132" font-family="Serif, serif" font-size="493.88px"><tspan id="tspan33"
fill="#000000" font-family="Serif, serif" font-size="493.88px">Audio decoder</tspan></tspan></tspan></text>
diff --git a/Documentation/media/uapi/cec/cec-api.rst b/Documentation/media/uapi/cec/cec-api.rst
index 1e2cf498ba30..b614bf81aa20 100644
--- a/Documentation/media/uapi/cec/cec-api.rst
+++ b/Documentation/media/uapi/cec/cec-api.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. include:: <isonum.txt>
diff --git a/Documentation/media/uapi/cec/cec-func-close.rst b/Documentation/media/uapi/cec/cec-func-close.rst
index 334358dfa72e..e10d675546f8 100644
--- a/Documentation/media/uapi/cec/cec-func-close.rst
+++ b/Documentation/media/uapi/cec/cec-func-close.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _cec-func-close:
diff --git a/Documentation/media/uapi/cec/cec-func-ioctl.rst b/Documentation/media/uapi/cec/cec-func-ioctl.rst
index e2b6260b0086..c18d4ba5eb37 100644
--- a/Documentation/media/uapi/cec/cec-func-ioctl.rst
+++ b/Documentation/media/uapi/cec/cec-func-ioctl.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _cec-func-ioctl:
diff --git a/Documentation/media/uapi/cec/cec-func-open.rst b/Documentation/media/uapi/cec/cec-func-open.rst
index 5d6663a649bd..f235aa80155c 100644
--- a/Documentation/media/uapi/cec/cec-func-open.rst
+++ b/Documentation/media/uapi/cec/cec-func-open.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _cec-func-open:
diff --git a/Documentation/media/uapi/cec/cec-func-poll.rst b/Documentation/media/uapi/cec/cec-func-poll.rst
index c698c969635c..3f6c5b0effa3 100644
--- a/Documentation/media/uapi/cec/cec-func-poll.rst
+++ b/Documentation/media/uapi/cec/cec-func-poll.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _cec-func-poll:
diff --git a/Documentation/media/uapi/cec/cec-funcs.rst b/Documentation/media/uapi/cec/cec-funcs.rst
index 6d696cead5cb..620590b168c9 100644
--- a/Documentation/media/uapi/cec/cec-funcs.rst
+++ b/Documentation/media/uapi/cec/cec-funcs.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _cec-user-func:
******************
diff --git a/Documentation/media/uapi/cec/cec-header.rst b/Documentation/media/uapi/cec/cec-header.rst
index d5a9a2828274..726f9766a130 100644
--- a/Documentation/media/uapi/cec/cec-header.rst
+++ b/Documentation/media/uapi/cec/cec-header.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _cec_header:
diff --git a/Documentation/media/uapi/cec/cec-intro.rst b/Documentation/media/uapi/cec/cec-intro.rst
index 07ee2b8f89d6..05088fcefe81 100644
--- a/Documentation/media/uapi/cec/cec-intro.rst
+++ b/Documentation/media/uapi/cec/cec-intro.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _cec-intro:
Introduction
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
index 6c1f6efb822e..0c44f31a9b59 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-caps.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CEC_ADAP_G_CAPS:
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
index 84f431a022ad..26465094e3f1 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-log-addrs.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CEC_ADAP_LOG_ADDRS:
.. _CEC_ADAP_G_LOG_ADDRS:
diff --git a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
index 9e49d4be35d5..693be2f9bf2e 100644
--- a/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-adap-g-phys-addr.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CEC_ADAP_PHYS_ADDR:
.. _CEC_ADAP_G_PHYS_ADDR:
diff --git a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
index 8d5633e6ae04..46a1c99a595e 100644
--- a/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-dqevent.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CEC_DQEVENT:
diff --git a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
index 508e2e325683..c53bb5f73f0d 100644
--- a/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-g-mode.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CEC_MODE:
.. _CEC_G_MODE:
diff --git a/Documentation/media/uapi/cec/cec-ioc-receive.rst b/Documentation/media/uapi/cec/cec-ioc-receive.rst
index b25e48afaa08..c3a685ff05cb 100644
--- a/Documentation/media/uapi/cec/cec-ioc-receive.rst
+++ b/Documentation/media/uapi/cec/cec-ioc-receive.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CEC_TRANSMIT:
.. _CEC_RECEIVE:
diff --git a/Documentation/media/uapi/cec/cec-pin-error-inj.rst b/Documentation/media/uapi/cec/cec-pin-error-inj.rst
index 464b006dbe0a..725f8b1c9965 100644
--- a/Documentation/media/uapi/cec/cec-pin-error-inj.rst
+++ b/Documentation/media/uapi/cec/cec-pin-error-inj.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
CEC Pin Framework Error Injection
=================================
diff --git a/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst b/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst
index 1279bd21dbd0..ee2ee74dafa3 100644
--- a/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst
+++ b/Documentation/media/uapi/dvb/audio-bilingual-channel-select.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_BILINGUAL_CHANNEL_SELECT:
diff --git a/Documentation/media/uapi/dvb/audio-channel-select.rst b/Documentation/media/uapi/dvb/audio-channel-select.rst
index 8cab3d7abff5..ebb2f121c4c8 100644
--- a/Documentation/media/uapi/dvb/audio-channel-select.rst
+++ b/Documentation/media/uapi/dvb/audio-channel-select.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_CHANNEL_SELECT:
diff --git a/Documentation/media/uapi/dvb/audio-clear-buffer.rst b/Documentation/media/uapi/dvb/audio-clear-buffer.rst
index f6bed67cb070..c5b62cde18c8 100644
--- a/Documentation/media/uapi/dvb/audio-clear-buffer.rst
+++ b/Documentation/media/uapi/dvb/audio-clear-buffer.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_CLEAR_BUFFER:
diff --git a/Documentation/media/uapi/dvb/audio-continue.rst b/Documentation/media/uapi/dvb/audio-continue.rst
index ca587869306e..6bdc99e39e20 100644
--- a/Documentation/media/uapi/dvb/audio-continue.rst
+++ b/Documentation/media/uapi/dvb/audio-continue.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_CONTINUE:
diff --git a/Documentation/media/uapi/dvb/audio-fclose.rst b/Documentation/media/uapi/dvb/audio-fclose.rst
index 58d351a3af4b..1e4ad7a0325d 100644
--- a/Documentation/media/uapi/dvb/audio-fclose.rst
+++ b/Documentation/media/uapi/dvb/audio-fclose.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _audio_fclose:
diff --git a/Documentation/media/uapi/dvb/audio-fopen.rst b/Documentation/media/uapi/dvb/audio-fopen.rst
index 4a174640bf11..2cf4d83661f4 100644
--- a/Documentation/media/uapi/dvb/audio-fopen.rst
+++ b/Documentation/media/uapi/dvb/audio-fopen.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _audio_fopen:
diff --git a/Documentation/media/uapi/dvb/audio-fwrite.rst b/Documentation/media/uapi/dvb/audio-fwrite.rst
index 4980ae7953ef..6dc6bf6cbbc7 100644
--- a/Documentation/media/uapi/dvb/audio-fwrite.rst
+++ b/Documentation/media/uapi/dvb/audio-fwrite.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _audio_fwrite:
diff --git a/Documentation/media/uapi/dvb/audio-get-capabilities.rst b/Documentation/media/uapi/dvb/audio-get-capabilities.rst
index 0d867f189c22..4f1ec47e8ac2 100644
--- a/Documentation/media/uapi/dvb/audio-get-capabilities.rst
+++ b/Documentation/media/uapi/dvb/audio-get-capabilities.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_GET_CAPABILITIES:
diff --git a/Documentation/media/uapi/dvb/audio-get-status.rst b/Documentation/media/uapi/dvb/audio-get-status.rst
index 857b058325f1..30e4dd7fce6d 100644
--- a/Documentation/media/uapi/dvb/audio-get-status.rst
+++ b/Documentation/media/uapi/dvb/audio-get-status.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_GET_STATUS:
diff --git a/Documentation/media/uapi/dvb/audio-pause.rst b/Documentation/media/uapi/dvb/audio-pause.rst
index c7310dffbff2..4567ecd9e0a3 100644
--- a/Documentation/media/uapi/dvb/audio-pause.rst
+++ b/Documentation/media/uapi/dvb/audio-pause.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_PAUSE:
diff --git a/Documentation/media/uapi/dvb/audio-play.rst b/Documentation/media/uapi/dvb/audio-play.rst
index 943b5eec9f28..17acd4c411b8 100644
--- a/Documentation/media/uapi/dvb/audio-play.rst
+++ b/Documentation/media/uapi/dvb/audio-play.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_PLAY:
diff --git a/Documentation/media/uapi/dvb/audio-select-source.rst b/Documentation/media/uapi/dvb/audio-select-source.rst
index c0434a0bd324..c5ed6243b11c 100644
--- a/Documentation/media/uapi/dvb/audio-select-source.rst
+++ b/Documentation/media/uapi/dvb/audio-select-source.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SELECT_SOURCE:
diff --git a/Documentation/media/uapi/dvb/audio-set-av-sync.rst b/Documentation/media/uapi/dvb/audio-set-av-sync.rst
index cf621f3a3037..c116d105fdea 100644
--- a/Documentation/media/uapi/dvb/audio-set-av-sync.rst
+++ b/Documentation/media/uapi/dvb/audio-set-av-sync.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SET_AV_SYNC:
diff --git a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
index f0db1fbdb066..d537da90acf5 100644
--- a/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
+++ b/Documentation/media/uapi/dvb/audio-set-bypass-mode.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SET_BYPASS_MODE:
diff --git a/Documentation/media/uapi/dvb/audio-set-id.rst b/Documentation/media/uapi/dvb/audio-set-id.rst
index 8b1081d24473..aeb6ace6cd1e 100644
--- a/Documentation/media/uapi/dvb/audio-set-id.rst
+++ b/Documentation/media/uapi/dvb/audio-set-id.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SET_ID:
diff --git a/Documentation/media/uapi/dvb/audio-set-mixer.rst b/Documentation/media/uapi/dvb/audio-set-mixer.rst
index 248aab8c8909..60781aa88202 100644
--- a/Documentation/media/uapi/dvb/audio-set-mixer.rst
+++ b/Documentation/media/uapi/dvb/audio-set-mixer.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SET_MIXER:
diff --git a/Documentation/media/uapi/dvb/audio-set-mute.rst b/Documentation/media/uapi/dvb/audio-set-mute.rst
index 0af105a8ddcc..4449f225e48c 100644
--- a/Documentation/media/uapi/dvb/audio-set-mute.rst
+++ b/Documentation/media/uapi/dvb/audio-set-mute.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SET_MUTE:
diff --git a/Documentation/media/uapi/dvb/audio-set-streamtype.rst b/Documentation/media/uapi/dvb/audio-set-streamtype.rst
index 46c0362ac71d..d20c34fc7128 100644
--- a/Documentation/media/uapi/dvb/audio-set-streamtype.rst
+++ b/Documentation/media/uapi/dvb/audio-set-streamtype.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_SET_STREAMTYPE:
diff --git a/Documentation/media/uapi/dvb/audio-stop.rst b/Documentation/media/uapi/dvb/audio-stop.rst
index dd6c3b6826ec..1bba2e50c364 100644
--- a/Documentation/media/uapi/dvb/audio-stop.rst
+++ b/Documentation/media/uapi/dvb/audio-stop.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _AUDIO_STOP:
diff --git a/Documentation/media/uapi/dvb/audio.rst b/Documentation/media/uapi/dvb/audio.rst
index e9f9e589c486..ebc18fca76a4 100644
--- a/Documentation/media/uapi/dvb/audio.rst
+++ b/Documentation/media/uapi/dvb/audio.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_audio:
diff --git a/Documentation/media/uapi/dvb/audio_data_types.rst b/Documentation/media/uapi/dvb/audio_data_types.rst
index 5bffa2c98a24..5b032fe13b9d 100644
--- a/Documentation/media/uapi/dvb/audio_data_types.rst
+++ b/Documentation/media/uapi/dvb/audio_data_types.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _audio_data_types:
diff --git a/Documentation/media/uapi/dvb/audio_function_calls.rst b/Documentation/media/uapi/dvb/audio_function_calls.rst
index 7dba16285dab..5478e78b085e 100644
--- a/Documentation/media/uapi/dvb/audio_function_calls.rst
+++ b/Documentation/media/uapi/dvb/audio_function_calls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _audio_function_calls:
diff --git a/Documentation/media/uapi/dvb/ca-fclose.rst b/Documentation/media/uapi/dvb/ca-fclose.rst
index e84bbfcfa184..e273444ccc67 100644
--- a/Documentation/media/uapi/dvb/ca-fclose.rst
+++ b/Documentation/media/uapi/dvb/ca-fclose.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _ca_fclose:
diff --git a/Documentation/media/uapi/dvb/ca-fopen.rst b/Documentation/media/uapi/dvb/ca-fopen.rst
index 056c71b53a70..e11ebeae5693 100644
--- a/Documentation/media/uapi/dvb/ca-fopen.rst
+++ b/Documentation/media/uapi/dvb/ca-fopen.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _ca_fopen:
diff --git a/Documentation/media/uapi/dvb/ca-get-cap.rst b/Documentation/media/uapi/dvb/ca-get-cap.rst
index d2d5c1355396..9e4fb5186373 100644
--- a/Documentation/media/uapi/dvb/ca-get-cap.rst
+++ b/Documentation/media/uapi/dvb/ca-get-cap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_GET_CAP:
diff --git a/Documentation/media/uapi/dvb/ca-get-descr-info.rst b/Documentation/media/uapi/dvb/ca-get-descr-info.rst
index e564fbb8d524..80ef43a339df 100644
--- a/Documentation/media/uapi/dvb/ca-get-descr-info.rst
+++ b/Documentation/media/uapi/dvb/ca-get-descr-info.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_GET_DESCR_INFO:
diff --git a/Documentation/media/uapi/dvb/ca-get-msg.rst b/Documentation/media/uapi/dvb/ca-get-msg.rst
index ceeda623ce93..bcb7955a0ddc 100644
--- a/Documentation/media/uapi/dvb/ca-get-msg.rst
+++ b/Documentation/media/uapi/dvb/ca-get-msg.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_GET_MSG:
diff --git a/Documentation/media/uapi/dvb/ca-get-slot-info.rst b/Documentation/media/uapi/dvb/ca-get-slot-info.rst
index 1a1d6f0c71b9..1ea5c497f2ea 100644
--- a/Documentation/media/uapi/dvb/ca-get-slot-info.rst
+++ b/Documentation/media/uapi/dvb/ca-get-slot-info.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_GET_SLOT_INFO:
diff --git a/Documentation/media/uapi/dvb/ca-reset.rst b/Documentation/media/uapi/dvb/ca-reset.rst
index 29788325f90e..29fda19984be 100644
--- a/Documentation/media/uapi/dvb/ca-reset.rst
+++ b/Documentation/media/uapi/dvb/ca-reset.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_RESET:
diff --git a/Documentation/media/uapi/dvb/ca-send-msg.rst b/Documentation/media/uapi/dvb/ca-send-msg.rst
index 9e91287b7bbc..5a3c4e8120c4 100644
--- a/Documentation/media/uapi/dvb/ca-send-msg.rst
+++ b/Documentation/media/uapi/dvb/ca-send-msg.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_SEND_MSG:
diff --git a/Documentation/media/uapi/dvb/ca-set-descr.rst b/Documentation/media/uapi/dvb/ca-set-descr.rst
index a6c47205ffd8..22c8b8f94c7e 100644
--- a/Documentation/media/uapi/dvb/ca-set-descr.rst
+++ b/Documentation/media/uapi/dvb/ca-set-descr.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _CA_SET_DESCR:
diff --git a/Documentation/media/uapi/dvb/ca.rst b/Documentation/media/uapi/dvb/ca.rst
index deac72d89e93..8796512c1378 100644
--- a/Documentation/media/uapi/dvb/ca.rst
+++ b/Documentation/media/uapi/dvb/ca.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_ca:
diff --git a/Documentation/media/uapi/dvb/ca_data_types.rst b/Documentation/media/uapi/dvb/ca_data_types.rst
index ac7cbd76ddd5..834c8ab4c300 100644
--- a/Documentation/media/uapi/dvb/ca_data_types.rst
+++ b/Documentation/media/uapi/dvb/ca_data_types.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _ca_data_types:
diff --git a/Documentation/media/uapi/dvb/ca_function_calls.rst b/Documentation/media/uapi/dvb/ca_function_calls.rst
index 87d697851e82..6985bebd0661 100644
--- a/Documentation/media/uapi/dvb/ca_function_calls.rst
+++ b/Documentation/media/uapi/dvb/ca_function_calls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _ca_function_calls:
diff --git a/Documentation/media/uapi/dvb/demux.rst b/Documentation/media/uapi/dvb/demux.rst
index 45c3d6405c46..d8c0ff4015fe 100644
--- a/Documentation/media/uapi/dvb/demux.rst
+++ b/Documentation/media/uapi/dvb/demux.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_demux:
diff --git a/Documentation/media/uapi/dvb/dmx-add-pid.rst b/Documentation/media/uapi/dvb/dmx-add-pid.rst
index 4d5632dfb43e..f483268e4ede 100644
--- a/Documentation/media/uapi/dvb/dmx-add-pid.rst
+++ b/Documentation/media/uapi/dvb/dmx-add-pid.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_ADD_PID:
diff --git a/Documentation/media/uapi/dvb/dmx-expbuf.rst b/Documentation/media/uapi/dvb/dmx-expbuf.rst
index 2d96cfe891df..d7f0658f3db3 100644
--- a/Documentation/media/uapi/dvb/dmx-expbuf.rst
+++ b/Documentation/media/uapi/dvb/dmx-expbuf.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _DMX_EXPBUF:
****************
diff --git a/Documentation/media/uapi/dvb/dmx-fclose.rst b/Documentation/media/uapi/dvb/dmx-fclose.rst
index 578e929f4bde..05ff32270274 100644
--- a/Documentation/media/uapi/dvb/dmx-fclose.rst
+++ b/Documentation/media/uapi/dvb/dmx-fclose.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmx_fclose:
diff --git a/Documentation/media/uapi/dvb/dmx-fopen.rst b/Documentation/media/uapi/dvb/dmx-fopen.rst
index 55628a18ba67..2700a2fad68b 100644
--- a/Documentation/media/uapi/dvb/dmx-fopen.rst
+++ b/Documentation/media/uapi/dvb/dmx-fopen.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmx_fopen:
diff --git a/Documentation/media/uapi/dvb/dmx-fread.rst b/Documentation/media/uapi/dvb/dmx-fread.rst
index 488bdc4ba178..292fa98f39ff 100644
--- a/Documentation/media/uapi/dvb/dmx-fread.rst
+++ b/Documentation/media/uapi/dvb/dmx-fread.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmx_fread:
diff --git a/Documentation/media/uapi/dvb/dmx-fwrite.rst b/Documentation/media/uapi/dvb/dmx-fwrite.rst
index 519e5733e53b..bdd4d4743bd5 100644
--- a/Documentation/media/uapi/dvb/dmx-fwrite.rst
+++ b/Documentation/media/uapi/dvb/dmx-fwrite.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmx_fwrite:
diff --git a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
index fbdbc12869d1..fcd3dc06c095 100644
--- a/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-pes-pids.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_GET_PES_PIDS:
diff --git a/Documentation/media/uapi/dvb/dmx-get-stc.rst b/Documentation/media/uapi/dvb/dmx-get-stc.rst
index 604031f7904b..2c81595f470a 100644
--- a/Documentation/media/uapi/dvb/dmx-get-stc.rst
+++ b/Documentation/media/uapi/dvb/dmx-get-stc.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_GET_STC:
diff --git a/Documentation/media/uapi/dvb/dmx-mmap.rst b/Documentation/media/uapi/dvb/dmx-mmap.rst
index 15d107348b9f..34bb7766718f 100644
--- a/Documentation/media/uapi/dvb/dmx-mmap.rst
+++ b/Documentation/media/uapi/dvb/dmx-mmap.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _dmx-mmap:
*****************
diff --git a/Documentation/media/uapi/dvb/dmx-munmap.rst b/Documentation/media/uapi/dvb/dmx-munmap.rst
index d77218732bb6..ef26b6f2b12b 100644
--- a/Documentation/media/uapi/dvb/dmx-munmap.rst
+++ b/Documentation/media/uapi/dvb/dmx-munmap.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _dmx-munmap:
************
diff --git a/Documentation/media/uapi/dvb/dmx-qbuf.rst b/Documentation/media/uapi/dvb/dmx-qbuf.rst
index be5a4c6f1904..9a1d85147c25 100644
--- a/Documentation/media/uapi/dvb/dmx-qbuf.rst
+++ b/Documentation/media/uapi/dvb/dmx-qbuf.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _DMX_QBUF:
*************************
diff --git a/Documentation/media/uapi/dvb/dmx-querybuf.rst b/Documentation/media/uapi/dvb/dmx-querybuf.rst
index 89481e24bb86..4cf36e821696 100644
--- a/Documentation/media/uapi/dvb/dmx-querybuf.rst
+++ b/Documentation/media/uapi/dvb/dmx-querybuf.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _DMX_QUERYBUF:
******************
diff --git a/Documentation/media/uapi/dvb/dmx-remove-pid.rst b/Documentation/media/uapi/dvb/dmx-remove-pid.rst
index 456cc2ded2c0..be992f44f306 100644
--- a/Documentation/media/uapi/dvb/dmx-remove-pid.rst
+++ b/Documentation/media/uapi/dvb/dmx-remove-pid.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_REMOVE_PID:
diff --git a/Documentation/media/uapi/dvb/dmx-reqbufs.rst b/Documentation/media/uapi/dvb/dmx-reqbufs.rst
index 14b80d60bf35..b302785bf678 100644
--- a/Documentation/media/uapi/dvb/dmx-reqbufs.rst
+++ b/Documentation/media/uapi/dvb/dmx-reqbufs.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _DMX_REQBUFS:
*****************
diff --git a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
index 74fd076a9b90..2dee0fb11f62 100644
--- a/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-buffer-size.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_SET_BUFFER_SIZE:
diff --git a/Documentation/media/uapi/dvb/dmx-set-filter.rst b/Documentation/media/uapi/dvb/dmx-set-filter.rst
index 88594b8d3846..66afbb9f2fe4 100644
--- a/Documentation/media/uapi/dvb/dmx-set-filter.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-filter.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_SET_FILTER:
diff --git a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
index d70e7bf96a41..dae5ab7878e5 100644
--- a/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
+++ b/Documentation/media/uapi/dvb/dmx-set-pes-filter.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_SET_PES_FILTER:
diff --git a/Documentation/media/uapi/dvb/dmx-start.rst b/Documentation/media/uapi/dvb/dmx-start.rst
index 36700e775296..488289d02504 100644
--- a/Documentation/media/uapi/dvb/dmx-start.rst
+++ b/Documentation/media/uapi/dvb/dmx-start.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_START:
diff --git a/Documentation/media/uapi/dvb/dmx-stop.rst b/Documentation/media/uapi/dvb/dmx-stop.rst
index 6d9c927bcd5f..982384d12923 100644
--- a/Documentation/media/uapi/dvb/dmx-stop.rst
+++ b/Documentation/media/uapi/dvb/dmx-stop.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _DMX_STOP:
diff --git a/Documentation/media/uapi/dvb/dmx_fcalls.rst b/Documentation/media/uapi/dvb/dmx_fcalls.rst
index 4c391cf2554f..67312ab65f94 100644
--- a/Documentation/media/uapi/dvb/dmx_fcalls.rst
+++ b/Documentation/media/uapi/dvb/dmx_fcalls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmx_fcalls:
diff --git a/Documentation/media/uapi/dvb/dmx_types.rst b/Documentation/media/uapi/dvb/dmx_types.rst
index 2a023a4f516c..b5cf704199e5 100644
--- a/Documentation/media/uapi/dvb/dmx_types.rst
+++ b/Documentation/media/uapi/dvb/dmx_types.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmx_types:
diff --git a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
index 212f032cad8b..172783b75fb7 100644
--- a/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
+++ b/Documentation/media/uapi/dvb/dvb-fe-read-status.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb-fe-read-status:
diff --git a/Documentation/media/uapi/dvb/dvb-frontend-event.rst b/Documentation/media/uapi/dvb/dvb-frontend-event.rst
index 2088bc6cacd8..ad4af66040c7 100644
--- a/Documentation/media/uapi/dvb/dvb-frontend-event.rst
+++ b/Documentation/media/uapi/dvb/dvb-frontend-event.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. c:type:: dvb_frontend_event
diff --git a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
index b152166f8fa7..67c2a316019f 100644
--- a/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
+++ b/Documentation/media/uapi/dvb/dvb-frontend-parameters.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. c:type:: dvb_frontend_parameters
diff --git a/Documentation/media/uapi/dvb/dvbapi.rst b/Documentation/media/uapi/dvb/dvbapi.rst
index 89ddca38626f..0fcc01f182f9 100644
--- a/Documentation/media/uapi/dvb/dvbapi.rst
+++ b/Documentation/media/uapi/dvb/dvbapi.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. include:: <isonum.txt>
diff --git a/Documentation/media/uapi/dvb/dvbproperty.rst b/Documentation/media/uapi/dvb/dvbproperty.rst
index 1a56c1724e59..371c72bb9419 100644
--- a/Documentation/media/uapi/dvb/dvbproperty.rst
+++ b/Documentation/media/uapi/dvb/dvbproperty.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend-properties:
diff --git a/Documentation/media/uapi/dvb/dvbstb.svg b/Documentation/media/uapi/dvb/dvbstb.svg
index f6fe2f837373..c7672148d6ff 100644
--- a/Documentation/media/uapi/dvb/dvbstb.svg
+++ b/Documentation/media/uapi/dvb/dvbstb.svg
@@ -1,4 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ This file is dual-licensed: you can use it either under the terms
+ of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+ dual licensing only applies to this file, and not this project as a
+ whole.
+
+ a) This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation version 2 of
+ the License.
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Or, alternatively,
+
+ b) Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg id="svg2" width="15.847cm" height="8.4187cm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 23770.123 12628.122" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><defs id="defs142"><marker id="Arrow1Lend" overflow="visible" orient="auto"><path id="path954" transform="matrix(-.8 0 0 -.8 -10 0)" d="m0 0 5-5-17.5 5 17.5 5z" fill-rule="evenodd" stroke="#000" stroke-width="1pt"/></marker><marker id="marker1243" overflow="visible" orient="auto"><path id="path1241" transform="matrix(-.8 0 0 -.8 -10 0)" d="m0 0 5-5-17.5 5 17.5 5z" fill-rule="evenodd" stroke="#000" stroke-width="1pt"/></marker></defs><metadata id="metadata519"><rdf:RDF><cc:Work
rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><rect id="rect197" class="BoundingBox" x="5355.1" y="13.122" width="18403" height="9603" fill="none"/><path id="path199" d="m14556 9614.1h-9200v-9600h18400v9600z" fill="#fff"/><path id="path201" d="m14556 9614.1h-9200v-9600h18400v9600z" fill="none" stroke="#000"/><rect id="rect206" class="BoundingBox" x="13.122" y="4013.1" width="4544" height="2403" fill="none"/><path id="path208" d="m2285.1 6414.1h-2271v-2400h4541v2400z" fill="#fff"/><path id="path210" d="m2285.1 6414.1h-2271v-2400h4541v2400z" fill="none" stroke="#000"/><text id="text212" class="TextShape" x="-2443.8779" y="-4585.8779"><tspan id="tspan214" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan216" class="TextPosition"
x="1281.1219" y="5435.1221"><tspan id="tspan218" fill="#000000">Antena</tspan></tspan></tspan></text>
diff --git a/Documentation/media/uapi/dvb/examples.rst b/Documentation/media/uapi/dvb/examples.rst
index 16dd90fa9e94..eaa41bc8d173 100644
--- a/Documentation/media/uapi/dvb/examples.rst
+++ b/Documentation/media/uapi/dvb/examples.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_examples:
diff --git a/Documentation/media/uapi/dvb/fe-bandwidth-t.rst b/Documentation/media/uapi/dvb/fe-bandwidth-t.rst
index 70256180e9b3..c3d7837b5f87 100644
--- a/Documentation/media/uapi/dvb/fe-bandwidth-t.rst
+++ b/Documentation/media/uapi/dvb/fe-bandwidth-t.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
******************
Frontend bandwidth
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
index f220ee351e15..88fd2186ca4d 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-recv-slave-reply.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_DISEQC_RECV_SLAVE_REPLY:
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
index 78476c1c7bf5..92929c2e75db 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-reset-overload.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_DISEQC_RESET_OVERLOAD:
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
index a7e05914efae..8af872d306aa 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-burst.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_DISEQC_SEND_BURST:
diff --git a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
index 6bd3994edfc2..30a48114153c 100644
--- a/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
+++ b/Documentation/media/uapi/dvb/fe-diseqc-send-master-cmd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_DISEQC_SEND_MASTER_CMD:
diff --git a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
index dcf2d20d460f..13811289971b 100644
--- a/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
+++ b/Documentation/media/uapi/dvb/fe-dishnetwork-send-legacy-cmd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_DISHNETWORK_SEND_LEGACY_CMD:
diff --git a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
index b20cb360fe37..32b7d140d80b 100644
--- a/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-enable-high-lnb-voltage.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_ENABLE_HIGH_LNB_VOLTAGE:
diff --git a/Documentation/media/uapi/dvb/fe-get-event.rst b/Documentation/media/uapi/dvb/fe-get-event.rst
index 505db94bf183..2573d5b9b636 100644
--- a/Documentation/media/uapi/dvb/fe-get-event.rst
+++ b/Documentation/media/uapi/dvb/fe-get-event.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_GET_EVENT:
diff --git a/Documentation/media/uapi/dvb/fe-get-frontend.rst b/Documentation/media/uapi/dvb/fe-get-frontend.rst
index 5db552cedd70..6cd5250d1832 100644
--- a/Documentation/media/uapi/dvb/fe-get-frontend.rst
+++ b/Documentation/media/uapi/dvb/fe-get-frontend.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_GET_FRONTEND:
diff --git a/Documentation/media/uapi/dvb/fe-get-info.rst b/Documentation/media/uapi/dvb/fe-get-info.rst
index 49307c0abfee..551e68b11528 100644
--- a/Documentation/media/uapi/dvb/fe-get-info.rst
+++ b/Documentation/media/uapi/dvb/fe-get-info.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_GET_INFO:
diff --git a/Documentation/media/uapi/dvb/fe-get-property.rst b/Documentation/media/uapi/dvb/fe-get-property.rst
index b69741d9cedf..99386c7461b3 100644
--- a/Documentation/media/uapi/dvb/fe-get-property.rst
+++ b/Documentation/media/uapi/dvb/fe-get-property.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_GET_PROPERTY:
diff --git a/Documentation/media/uapi/dvb/fe-read-ber.rst b/Documentation/media/uapi/dvb/fe-read-ber.rst
index 1e6a79567a4c..e579d648687e 100644
--- a/Documentation/media/uapi/dvb/fe-read-ber.rst
+++ b/Documentation/media/uapi/dvb/fe-read-ber.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_READ_BER:
diff --git a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
index 198f6dfb53a1..0a0c0c2ff207 100644
--- a/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
+++ b/Documentation/media/uapi/dvb/fe-read-signal-strength.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_READ_SIGNAL_STRENGTH:
diff --git a/Documentation/media/uapi/dvb/fe-read-snr.rst b/Documentation/media/uapi/dvb/fe-read-snr.rst
index 6db22c043512..2a7a0d8f1fd5 100644
--- a/Documentation/media/uapi/dvb/fe-read-snr.rst
+++ b/Documentation/media/uapi/dvb/fe-read-snr.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_READ_SNR:
diff --git a/Documentation/media/uapi/dvb/fe-read-status.rst b/Documentation/media/uapi/dvb/fe-read-status.rst
index 4adb52f084ff..0dfc9fdf568f 100644
--- a/Documentation/media/uapi/dvb/fe-read-status.rst
+++ b/Documentation/media/uapi/dvb/fe-read-status.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_READ_STATUS:
diff --git a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
index f2c688bcacb3..19c532f750aa 100644
--- a/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
+++ b/Documentation/media/uapi/dvb/fe-read-uncorrected-blocks.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_READ_UNCORRECTED_BLOCKS:
diff --git a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
index 3c4bc179b313..36e8913170e1 100644
--- a/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
+++ b/Documentation/media/uapi/dvb/fe-set-frontend-tune-mode.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_SET_FRONTEND_TUNE_MODE:
diff --git a/Documentation/media/uapi/dvb/fe-set-frontend.rst b/Documentation/media/uapi/dvb/fe-set-frontend.rst
index 4f3dcf338254..23caae2588d2 100644
--- a/Documentation/media/uapi/dvb/fe-set-frontend.rst
+++ b/Documentation/media/uapi/dvb/fe-set-frontend.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_SET_FRONTEND:
diff --git a/Documentation/media/uapi/dvb/fe-set-tone.rst b/Documentation/media/uapi/dvb/fe-set-tone.rst
index 758efa11014c..fb605e8c9fc4 100644
--- a/Documentation/media/uapi/dvb/fe-set-tone.rst
+++ b/Documentation/media/uapi/dvb/fe-set-tone.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_SET_TONE:
diff --git a/Documentation/media/uapi/dvb/fe-set-voltage.rst b/Documentation/media/uapi/dvb/fe-set-voltage.rst
index 38d4485290a0..c81a8e6a59aa 100644
--- a/Documentation/media/uapi/dvb/fe-set-voltage.rst
+++ b/Documentation/media/uapi/dvb/fe-set-voltage.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _FE_SET_VOLTAGE:
diff --git a/Documentation/media/uapi/dvb/fe-type-t.rst b/Documentation/media/uapi/dvb/fe-type-t.rst
index dee32ae104d7..9720d2f7ba35 100644
--- a/Documentation/media/uapi/dvb/fe-type-t.rst
+++ b/Documentation/media/uapi/dvb/fe-type-t.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
*************
Frontend type
diff --git a/Documentation/media/uapi/dvb/fe_property_parameters.rst b/Documentation/media/uapi/dvb/fe_property_parameters.rst
index 3524dcae4604..2fd2954d8dae 100644
--- a/Documentation/media/uapi/dvb/fe_property_parameters.rst
+++ b/Documentation/media/uapi/dvb/fe_property_parameters.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _fe_property_parameters:
diff --git a/Documentation/media/uapi/dvb/frontend-header.rst b/Documentation/media/uapi/dvb/frontend-header.rst
index 8d8433cf1e12..635fb4251214 100644
--- a/Documentation/media/uapi/dvb/frontend-header.rst
+++ b/Documentation/media/uapi/dvb/frontend-header.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
Frontend uAPI data types
========================
diff --git a/Documentation/media/uapi/dvb/frontend-property-cable-systems.rst b/Documentation/media/uapi/dvb/frontend-property-cable-systems.rst
index bf2328627af5..97fbfc228c10 100644
--- a/Documentation/media/uapi/dvb/frontend-property-cable-systems.rst
+++ b/Documentation/media/uapi/dvb/frontend-property-cable-systems.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend-property-cable-systems:
diff --git a/Documentation/media/uapi/dvb/frontend-property-satellite-systems.rst b/Documentation/media/uapi/dvb/frontend-property-satellite-systems.rst
index 2929e6999a7a..2bc880a3c826 100644
--- a/Documentation/media/uapi/dvb/frontend-property-satellite-systems.rst
+++ b/Documentation/media/uapi/dvb/frontend-property-satellite-systems.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend-property-satellite-systems:
diff --git a/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst b/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst
index 0beb5cb3d729..c20af13297e5 100644
--- a/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst
+++ b/Documentation/media/uapi/dvb/frontend-property-terrestrial-systems.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend-property-terrestrial-systems:
diff --git a/Documentation/media/uapi/dvb/frontend-stat-properties.rst b/Documentation/media/uapi/dvb/frontend-stat-properties.rst
index e73754fd0631..546464db04b5 100644
--- a/Documentation/media/uapi/dvb/frontend-stat-properties.rst
+++ b/Documentation/media/uapi/dvb/frontend-stat-properties.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend-stat-properties:
diff --git a/Documentation/media/uapi/dvb/frontend.rst b/Documentation/media/uapi/dvb/frontend.rst
index 4967c48d46ce..7ff225dfe11c 100644
--- a/Documentation/media/uapi/dvb/frontend.rst
+++ b/Documentation/media/uapi/dvb/frontend.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_frontend:
diff --git a/Documentation/media/uapi/dvb/frontend_f_close.rst b/Documentation/media/uapi/dvb/frontend_f_close.rst
index 67958d73cf34..af87c2a83719 100644
--- a/Documentation/media/uapi/dvb/frontend_f_close.rst
+++ b/Documentation/media/uapi/dvb/frontend_f_close.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend_f_close:
diff --git a/Documentation/media/uapi/dvb/frontend_f_open.rst b/Documentation/media/uapi/dvb/frontend_f_open.rst
index 8e8cb466c24b..6a46ec5acf7b 100644
--- a/Documentation/media/uapi/dvb/frontend_f_open.rst
+++ b/Documentation/media/uapi/dvb/frontend_f_open.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend_f_open:
diff --git a/Documentation/media/uapi/dvb/frontend_fcalls.rst b/Documentation/media/uapi/dvb/frontend_fcalls.rst
index b03f9cab6d5a..9b3586f538ea 100644
--- a/Documentation/media/uapi/dvb/frontend_fcalls.rst
+++ b/Documentation/media/uapi/dvb/frontend_fcalls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend_fcalls:
diff --git a/Documentation/media/uapi/dvb/frontend_legacy_api.rst b/Documentation/media/uapi/dvb/frontend_legacy_api.rst
index 759833d3eaa4..1ea749d09ca2 100644
--- a/Documentation/media/uapi/dvb/frontend_legacy_api.rst
+++ b/Documentation/media/uapi/dvb/frontend_legacy_api.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend_legacy_types:
diff --git a/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst b/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst
index a4d5319cb76b..1567bc73855a 100644
--- a/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst
+++ b/Documentation/media/uapi/dvb/frontend_legacy_dvbv3_api.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _frontend_legacy_dvbv3_api:
diff --git a/Documentation/media/uapi/dvb/headers.rst b/Documentation/media/uapi/dvb/headers.rst
index c13fd537fbff..edeabd9e8e90 100644
--- a/Documentation/media/uapi/dvb/headers.rst
+++ b/Documentation/media/uapi/dvb/headers.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
****************************
Digital TV uAPI header files
****************************
diff --git a/Documentation/media/uapi/dvb/intro.rst b/Documentation/media/uapi/dvb/intro.rst
index 79b4d0e4e920..f1384616ac4e 100644
--- a/Documentation/media/uapi/dvb/intro.rst
+++ b/Documentation/media/uapi/dvb/intro.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_introdution:
diff --git a/Documentation/media/uapi/dvb/legacy_dvb_apis.rst b/Documentation/media/uapi/dvb/legacy_dvb_apis.rst
index e1b2c9c7b620..a43b4c36d935 100644
--- a/Documentation/media/uapi/dvb/legacy_dvb_apis.rst
+++ b/Documentation/media/uapi/dvb/legacy_dvb_apis.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _legacy_dvb_apis:
diff --git a/Documentation/media/uapi/dvb/net-add-if.rst b/Documentation/media/uapi/dvb/net-add-if.rst
index 6749b70246c5..1188641b453e 100644
--- a/Documentation/media/uapi/dvb/net-add-if.rst
+++ b/Documentation/media/uapi/dvb/net-add-if.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _NET_ADD_IF:
diff --git a/Documentation/media/uapi/dvb/net-get-if.rst b/Documentation/media/uapi/dvb/net-get-if.rst
index 3733b34da9db..7c4ef4b9d6cc 100644
--- a/Documentation/media/uapi/dvb/net-get-if.rst
+++ b/Documentation/media/uapi/dvb/net-get-if.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _NET_GET_IF:
diff --git a/Documentation/media/uapi/dvb/net-remove-if.rst b/Documentation/media/uapi/dvb/net-remove-if.rst
index 4ebe07a6b79a..bf9a1602eeec 100644
--- a/Documentation/media/uapi/dvb/net-remove-if.rst
+++ b/Documentation/media/uapi/dvb/net-remove-if.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _NET_REMOVE_IF:
diff --git a/Documentation/media/uapi/dvb/net-types.rst b/Documentation/media/uapi/dvb/net-types.rst
index 8fa3292eaa42..9e16462a1ef4 100644
--- a/Documentation/media/uapi/dvb/net-types.rst
+++ b/Documentation/media/uapi/dvb/net-types.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _net_types:
diff --git a/Documentation/media/uapi/dvb/net.rst b/Documentation/media/uapi/dvb/net.rst
index e0cd4e402627..833daa381968 100644
--- a/Documentation/media/uapi/dvb/net.rst
+++ b/Documentation/media/uapi/dvb/net.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _net:
diff --git a/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst b/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst
index 51ec0b04b496..9a6badc1d295 100644
--- a/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst
+++ b/Documentation/media/uapi/dvb/query-dvb-frontend-info.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _query-dvb-frontend-info:
diff --git a/Documentation/media/uapi/dvb/video-clear-buffer.rst b/Documentation/media/uapi/dvb/video-clear-buffer.rst
index 2e51a78a69f1..5eb5546e8ce4 100644
--- a/Documentation/media/uapi/dvb/video-clear-buffer.rst
+++ b/Documentation/media/uapi/dvb/video-clear-buffer.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_CLEAR_BUFFER:
diff --git a/Documentation/media/uapi/dvb/video-command.rst b/Documentation/media/uapi/dvb/video-command.rst
index 536d0fdd8399..020b49645c6b 100644
--- a/Documentation/media/uapi/dvb/video-command.rst
+++ b/Documentation/media/uapi/dvb/video-command.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_COMMAND:
diff --git a/Documentation/media/uapi/dvb/video-continue.rst b/Documentation/media/uapi/dvb/video-continue.rst
index e65e600be632..2ae2067dfba8 100644
--- a/Documentation/media/uapi/dvb/video-continue.rst
+++ b/Documentation/media/uapi/dvb/video-continue.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_CONTINUE:
diff --git a/Documentation/media/uapi/dvb/video-fast-forward.rst b/Documentation/media/uapi/dvb/video-fast-forward.rst
index 70a53e110335..3f805f334ae1 100644
--- a/Documentation/media/uapi/dvb/video-fast-forward.rst
+++ b/Documentation/media/uapi/dvb/video-fast-forward.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_FAST_FORWARD:
diff --git a/Documentation/media/uapi/dvb/video-fclose.rst b/Documentation/media/uapi/dvb/video-fclose.rst
index 8a997ae6f6a7..3b0285b96a3c 100644
--- a/Documentation/media/uapi/dvb/video-fclose.rst
+++ b/Documentation/media/uapi/dvb/video-fclose.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _video_fclose:
diff --git a/Documentation/media/uapi/dvb/video-fopen.rst b/Documentation/media/uapi/dvb/video-fopen.rst
index 203a2c56f10a..7b2a8c750e6a 100644
--- a/Documentation/media/uapi/dvb/video-fopen.rst
+++ b/Documentation/media/uapi/dvb/video-fopen.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _video_fopen:
diff --git a/Documentation/media/uapi/dvb/video-freeze.rst b/Documentation/media/uapi/dvb/video-freeze.rst
index 5a28bdc8badd..6b31a4755d2c 100644
--- a/Documentation/media/uapi/dvb/video-freeze.rst
+++ b/Documentation/media/uapi/dvb/video-freeze.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_FREEZE:
diff --git a/Documentation/media/uapi/dvb/video-fwrite.rst b/Documentation/media/uapi/dvb/video-fwrite.rst
index cfe7c57dcfc7..eb35b79eb85c 100644
--- a/Documentation/media/uapi/dvb/video-fwrite.rst
+++ b/Documentation/media/uapi/dvb/video-fwrite.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _video_fwrite:
diff --git a/Documentation/media/uapi/dvb/video-get-capabilities.rst b/Documentation/media/uapi/dvb/video-get-capabilities.rst
index 6987f659a1ad..971fdab70e15 100644
--- a/Documentation/media/uapi/dvb/video-get-capabilities.rst
+++ b/Documentation/media/uapi/dvb/video-get-capabilities.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_GET_CAPABILITIES:
diff --git a/Documentation/media/uapi/dvb/video-get-event.rst b/Documentation/media/uapi/dvb/video-get-event.rst
index b4f53616db9a..def6c40db601 100644
--- a/Documentation/media/uapi/dvb/video-get-event.rst
+++ b/Documentation/media/uapi/dvb/video-get-event.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_GET_EVENT:
diff --git a/Documentation/media/uapi/dvb/video-get-frame-count.rst b/Documentation/media/uapi/dvb/video-get-frame-count.rst
index 0ffe22cd6108..ef35da7d4861 100644
--- a/Documentation/media/uapi/dvb/video-get-frame-count.rst
+++ b/Documentation/media/uapi/dvb/video-get-frame-count.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_GET_FRAME_COUNT:
diff --git a/Documentation/media/uapi/dvb/video-get-pts.rst b/Documentation/media/uapi/dvb/video-get-pts.rst
index c73f86f1d35b..86ceefff7834 100644
--- a/Documentation/media/uapi/dvb/video-get-pts.rst
+++ b/Documentation/media/uapi/dvb/video-get-pts.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_GET_PTS:
diff --git a/Documentation/media/uapi/dvb/video-get-size.rst b/Documentation/media/uapi/dvb/video-get-size.rst
index d077fe2305a0..cc92189d31fd 100644
--- a/Documentation/media/uapi/dvb/video-get-size.rst
+++ b/Documentation/media/uapi/dvb/video-get-size.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_GET_SIZE:
diff --git a/Documentation/media/uapi/dvb/video-get-status.rst b/Documentation/media/uapi/dvb/video-get-status.rst
index ed6ea19827a6..8bfcf8fc3e19 100644
--- a/Documentation/media/uapi/dvb/video-get-status.rst
+++ b/Documentation/media/uapi/dvb/video-get-status.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_GET_STATUS:
diff --git a/Documentation/media/uapi/dvb/video-play.rst b/Documentation/media/uapi/dvb/video-play.rst
index 2124120aec22..fb3f4f168814 100644
--- a/Documentation/media/uapi/dvb/video-play.rst
+++ b/Documentation/media/uapi/dvb/video-play.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_PLAY:
diff --git a/Documentation/media/uapi/dvb/video-select-source.rst b/Documentation/media/uapi/dvb/video-select-source.rst
index cde6542723ca..32cf025356dc 100644
--- a/Documentation/media/uapi/dvb/video-select-source.rst
+++ b/Documentation/media/uapi/dvb/video-select-source.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_SELECT_SOURCE:
diff --git a/Documentation/media/uapi/dvb/video-set-blank.rst b/Documentation/media/uapi/dvb/video-set-blank.rst
index 3858c69496a5..901c3c80f167 100644
--- a/Documentation/media/uapi/dvb/video-set-blank.rst
+++ b/Documentation/media/uapi/dvb/video-set-blank.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_SET_BLANK:
diff --git a/Documentation/media/uapi/dvb/video-set-display-format.rst b/Documentation/media/uapi/dvb/video-set-display-format.rst
index 2ef7401781be..ffdefa341207 100644
--- a/Documentation/media/uapi/dvb/video-set-display-format.rst
+++ b/Documentation/media/uapi/dvb/video-set-display-format.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_SET_DISPLAY_FORMAT:
diff --git a/Documentation/media/uapi/dvb/video-set-format.rst b/Documentation/media/uapi/dvb/video-set-format.rst
index 4239a4e365bb..63e60214ab37 100644
--- a/Documentation/media/uapi/dvb/video-set-format.rst
+++ b/Documentation/media/uapi/dvb/video-set-format.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_SET_FORMAT:
diff --git a/Documentation/media/uapi/dvb/video-set-streamtype.rst b/Documentation/media/uapi/dvb/video-set-streamtype.rst
index 02a3c2e4e67c..845486a6e049 100644
--- a/Documentation/media/uapi/dvb/video-set-streamtype.rst
+++ b/Documentation/media/uapi/dvb/video-set-streamtype.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_SET_STREAMTYPE:
diff --git a/Documentation/media/uapi/dvb/video-slowmotion.rst b/Documentation/media/uapi/dvb/video-slowmotion.rst
index bd3d1a4070d9..32c934aaf2ba 100644
--- a/Documentation/media/uapi/dvb/video-slowmotion.rst
+++ b/Documentation/media/uapi/dvb/video-slowmotion.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_SLOWMOTION:
diff --git a/Documentation/media/uapi/dvb/video-stillpicture.rst b/Documentation/media/uapi/dvb/video-stillpicture.rst
index 6f943f5e27bd..58035a7630e6 100644
--- a/Documentation/media/uapi/dvb/video-stillpicture.rst
+++ b/Documentation/media/uapi/dvb/video-stillpicture.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_STILLPICTURE:
diff --git a/Documentation/media/uapi/dvb/video-stop.rst b/Documentation/media/uapi/dvb/video-stop.rst
index 474309ad31c2..732ace05e34b 100644
--- a/Documentation/media/uapi/dvb/video-stop.rst
+++ b/Documentation/media/uapi/dvb/video-stop.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_STOP:
diff --git a/Documentation/media/uapi/dvb/video-try-command.rst b/Documentation/media/uapi/dvb/video-try-command.rst
index 008e6a9ab696..37ecf8e91eb8 100644
--- a/Documentation/media/uapi/dvb/video-try-command.rst
+++ b/Documentation/media/uapi/dvb/video-try-command.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDEO_TRY_COMMAND:
diff --git a/Documentation/media/uapi/dvb/video.rst b/Documentation/media/uapi/dvb/video.rst
index e7d68cd0cf23..6d72ed0e2b2d 100644
--- a/Documentation/media/uapi/dvb/video.rst
+++ b/Documentation/media/uapi/dvb/video.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dvb_video:
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index a4222b6cd2d3..9e8e49e52b19 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _video_function_calls:
diff --git a/Documentation/media/uapi/dvb/video_types.rst b/Documentation/media/uapi/dvb/video_types.rst
index a0942171596c..2ed8aad84003 100644
--- a/Documentation/media/uapi/dvb/video_types.rst
+++ b/Documentation/media/uapi/dvb/video_types.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _video_types:
diff --git a/Documentation/media/uapi/fdl-appendix.rst b/Documentation/media/uapi/fdl-appendix.rst
index fd475180fed8..f8dc85d3939c 100644
--- a/Documentation/media/uapi/fdl-appendix.rst
+++ b/Documentation/media/uapi/fdl-appendix.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _fdl:
diff --git a/Documentation/media/uapi/gen-errors.rst b/Documentation/media/uapi/gen-errors.rst
index 689d3b101ede..043c312dc06d 100644
--- a/Documentation/media/uapi/gen-errors.rst
+++ b/Documentation/media/uapi/gen-errors.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _gen_errors:
diff --git a/Documentation/media/uapi/mediactl/media-controller-intro.rst b/Documentation/media/uapi/mediactl/media-controller-intro.rst
index 3e776c0d8276..281c559c2f3c 100644
--- a/Documentation/media/uapi/mediactl/media-controller-intro.rst
+++ b/Documentation/media/uapi/mediactl/media-controller-intro.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media-controller-intro:
diff --git a/Documentation/media/uapi/mediactl/media-controller-model.rst b/Documentation/media/uapi/mediactl/media-controller-model.rst
index 558273cf9570..b6d5902b556d 100644
--- a/Documentation/media/uapi/mediactl/media-controller-model.rst
+++ b/Documentation/media/uapi/mediactl/media-controller-model.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media-controller-model:
diff --git a/Documentation/media/uapi/mediactl/media-controller.rst b/Documentation/media/uapi/mediactl/media-controller.rst
index 66aff38cd499..6e624f690331 100644
--- a/Documentation/media/uapi/mediactl/media-controller.rst
+++ b/Documentation/media/uapi/mediactl/media-controller.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. include:: <isonum.txt>
diff --git a/Documentation/media/uapi/mediactl/media-func-close.rst b/Documentation/media/uapi/mediactl/media-func-close.rst
index a8f5203afe4b..369ccd4dee56 100644
--- a/Documentation/media/uapi/mediactl/media-func-close.rst
+++ b/Documentation/media/uapi/mediactl/media-func-close.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media-func-close:
diff --git a/Documentation/media/uapi/mediactl/media-func-ioctl.rst b/Documentation/media/uapi/mediactl/media-func-ioctl.rst
index fe072b7c8765..9a990d6480f5 100644
--- a/Documentation/media/uapi/mediactl/media-func-ioctl.rst
+++ b/Documentation/media/uapi/mediactl/media-func-ioctl.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media-func-ioctl:
diff --git a/Documentation/media/uapi/mediactl/media-func-open.rst b/Documentation/media/uapi/mediactl/media-func-open.rst
index 32f53016a9e5..cd2f840ddf73 100644
--- a/Documentation/media/uapi/mediactl/media-func-open.rst
+++ b/Documentation/media/uapi/mediactl/media-func-open.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media-func-open:
diff --git a/Documentation/media/uapi/mediactl/media-funcs.rst b/Documentation/media/uapi/mediactl/media-funcs.rst
index 260f9dcadcde..87b65df8252a 100644
--- a/Documentation/media/uapi/mediactl/media-funcs.rst
+++ b/Documentation/media/uapi/mediactl/media-funcs.rst
@@ -1,3 +1,12 @@
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _media-user-func:
******************
diff --git a/Documentation/media/uapi/mediactl/media-header.rst b/Documentation/media/uapi/mediactl/media-header.rst
index 96f7b0155e5a..1cb7c88aeff0 100644
--- a/Documentation/media/uapi/mediactl/media-header.rst
+++ b/Documentation/media/uapi/mediactl/media-header.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media_header:
diff --git a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
index c6f224e404b7..f8038cfb708c 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-device-info.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_device_info:
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
index 02738640e34e..6218d9cbdd83 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-entities.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_enum_entities:
diff --git a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
index b89aaae373df..a982f16e55a4 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-enum-links.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_enum_links:
diff --git a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
index 4e1c59238371..0a7d76ac8ded 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-g-topology.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_g_topology:
diff --git a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
index 0f8b31874002..6d4ca4ada2e0 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_request_alloc:
diff --git a/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst b/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst
index e345e7dc9ad7..ae39dbbe48a0 100644
--- a/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst
+++ b/Documentation/media/uapi/mediactl/media-ioc-setup-link.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media_ioc_setup_link:
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
index 6dd2d7fea714..fc8458746d51 100644
--- a/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
+++ b/Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media_request_ioc_queue:
diff --git a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
index febe888494c8..61381e87665a 100644
--- a/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
+++ b/Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media_request_ioc_reinit:
diff --git a/Documentation/media/uapi/mediactl/media-types.rst b/Documentation/media/uapi/mediactl/media-types.rst
index e4c57c8f4553..8627587b7075 100644
--- a/Documentation/media/uapi/mediactl/media-types.rst
+++ b/Documentation/media/uapi/mediactl/media-types.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _media-controller-types:
diff --git a/Documentation/media/uapi/mediactl/request-api.rst b/Documentation/media/uapi/mediactl/request-api.rst
index 5f4a23029c48..4b25ad03f45a 100644
--- a/Documentation/media/uapi/mediactl/request-api.rst
+++ b/Documentation/media/uapi/mediactl/request-api.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _media-request-api:
diff --git a/Documentation/media/uapi/mediactl/request-func-close.rst b/Documentation/media/uapi/mediactl/request-func-close.rst
index 098d7f2b9548..2cff7770558e 100644
--- a/Documentation/media/uapi/mediactl/request-func-close.rst
+++ b/Documentation/media/uapi/mediactl/request-func-close.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _request-func-close:
diff --git a/Documentation/media/uapi/mediactl/request-func-ioctl.rst b/Documentation/media/uapi/mediactl/request-func-ioctl.rst
index ff7b072a6999..de0781c61873 100644
--- a/Documentation/media/uapi/mediactl/request-func-ioctl.rst
+++ b/Documentation/media/uapi/mediactl/request-func-ioctl.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _request-func-ioctl:
diff --git a/Documentation/media/uapi/mediactl/request-func-poll.rst b/Documentation/media/uapi/mediactl/request-func-poll.rst
index 85191254f381..ebaf33e21873 100644
--- a/Documentation/media/uapi/mediactl/request-func-poll.rst
+++ b/Documentation/media/uapi/mediactl/request-func-poll.rst
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+.. modify it under the terms of the GNU General Public License as
+.. published by the Free Software Foundation version 2 of
+.. the License.
+..
+.. This file is distributed in the hope that it will be useful,
+.. but WITHOUT ANY WARRANTY; without even the implied warranty of
+.. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+.. GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
.. _request-func-poll:
diff --git a/Documentation/media/uapi/rc/keytable.c.rst b/Documentation/media/uapi/rc/keytable.c.rst
index 217237f93b37..46f98569e999 100644
--- a/Documentation/media/uapi/rc/keytable.c.rst
+++ b/Documentation/media/uapi/rc/keytable.c.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
file: uapi/v4l/keytable.c
=========================
diff --git a/Documentation/media/uapi/rc/lirc-dev-intro.rst b/Documentation/media/uapi/rc/lirc-dev-intro.rst
index 11516c8bff62..1a901d8e1797 100644
--- a/Documentation/media/uapi/rc/lirc-dev-intro.rst
+++ b/Documentation/media/uapi/rc/lirc-dev-intro.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_dev_intro:
diff --git a/Documentation/media/uapi/rc/lirc-dev.rst b/Documentation/media/uapi/rc/lirc-dev.rst
index 03cde25f5859..7058e0b2296a 100644
--- a/Documentation/media/uapi/rc/lirc-dev.rst
+++ b/Documentation/media/uapi/rc/lirc-dev.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_dev:
diff --git a/Documentation/media/uapi/rc/lirc-func.rst b/Documentation/media/uapi/rc/lirc-func.rst
index ddb4620de294..25058369f724 100644
--- a/Documentation/media/uapi/rc/lirc-func.rst
+++ b/Documentation/media/uapi/rc/lirc-func.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_func:
diff --git a/Documentation/media/uapi/rc/lirc-get-features.rst b/Documentation/media/uapi/rc/lirc-get-features.rst
index 889a8807037b..1d590df8164a 100644
--- a/Documentation/media/uapi/rc/lirc-get-features.rst
+++ b/Documentation/media/uapi/rc/lirc-get-features.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_get_features:
diff --git a/Documentation/media/uapi/rc/lirc-get-rec-mode.rst b/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
index 2722118484fa..0a3e02aca80e 100644
--- a/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-get-rec-mode.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_get_rec_mode:
.. _lirc_set_rec_mode:
diff --git a/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst b/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst
index 6e016edc2bc4..f560b694ccf2 100644
--- a/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst
+++ b/Documentation/media/uapi/rc/lirc-get-rec-resolution.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_get_rec_resolution:
diff --git a/Documentation/media/uapi/rc/lirc-get-send-mode.rst b/Documentation/media/uapi/rc/lirc-get-send-mode.rst
index c44e61a79ad1..4f440c697052 100644
--- a/Documentation/media/uapi/rc/lirc-get-send-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-get-send-mode.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_get_send_mode:
.. _lirc_set_send_mode:
diff --git a/Documentation/media/uapi/rc/lirc-get-timeout.rst b/Documentation/media/uapi/rc/lirc-get-timeout.rst
index c94bc5dcaa8e..1de214529f27 100644
--- a/Documentation/media/uapi/rc/lirc-get-timeout.rst
+++ b/Documentation/media/uapi/rc/lirc-get-timeout.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_get_min_timeout:
.. _lirc_get_max_timeout:
diff --git a/Documentation/media/uapi/rc/lirc-header.rst b/Documentation/media/uapi/rc/lirc-header.rst
index 487fe00e5517..c9b4f33e1031 100644
--- a/Documentation/media/uapi/rc/lirc-header.rst
+++ b/Documentation/media/uapi/rc/lirc-header.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_header:
diff --git a/Documentation/media/uapi/rc/lirc-read.rst b/Documentation/media/uapi/rc/lirc-read.rst
index c024aaffb8ad..a8fedfaaf0ab 100644
--- a/Documentation/media/uapi/rc/lirc-read.rst
+++ b/Documentation/media/uapi/rc/lirc-read.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc-read:
diff --git a/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst b/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst
index 6307b5715595..c80acd85e369 100644
--- a/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst
+++ b/Documentation/media/uapi/rc/lirc-set-measure-carrier-mode.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_measure_carrier_mode:
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst b/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
index a89246806c4b..443681d5cc10 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-carrier-range.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_rec_carrier_range:
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst b/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst
index a411c0330818..cbe1e48b2a4a 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-carrier.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_rec_carrier:
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst b/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
index 86353e602695..d06d69414c1e 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-timeout-reports.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_rec_timeout_reports:
diff --git a/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst b/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst
index a833a6a4c25a..163ac6065737 100644
--- a/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst
+++ b/Documentation/media/uapi/rc/lirc-set-rec-timeout.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_rec_timeout:
.. _lirc_get_rec_timeout:
diff --git a/Documentation/media/uapi/rc/lirc-set-send-carrier.rst b/Documentation/media/uapi/rc/lirc-set-send-carrier.rst
index 42c8cfb42df5..cffc6c1e15cc 100644
--- a/Documentation/media/uapi/rc/lirc-set-send-carrier.rst
+++ b/Documentation/media/uapi/rc/lirc-set-send-carrier.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_send_carrier:
diff --git a/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst b/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst
index 20d07c2a37a5..08ab3d1a96cd 100644
--- a/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst
+++ b/Documentation/media/uapi/rc/lirc-set-send-duty-cycle.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_send_duty_cycle:
diff --git a/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst b/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst
index 69b7ad8c2afb..889a739eaf0d 100644
--- a/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst
+++ b/Documentation/media/uapi/rc/lirc-set-transmitter-mask.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_transmitter_mask:
diff --git a/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst b/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst
index 0415c6a54f23..592715452fce 100644
--- a/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst
+++ b/Documentation/media/uapi/rc/lirc-set-wideband-receiver.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc_set_wideband_receiver:
diff --git a/Documentation/media/uapi/rc/lirc-write.rst b/Documentation/media/uapi/rc/lirc-write.rst
index d4566b0a2015..6adf5ddbac99 100644
--- a/Documentation/media/uapi/rc/lirc-write.rst
+++ b/Documentation/media/uapi/rc/lirc-write.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _lirc-write:
diff --git a/Documentation/media/uapi/rc/rc-intro.rst b/Documentation/media/uapi/rc/rc-intro.rst
index 3707c29d37ed..37c5f90c76e7 100644
--- a/Documentation/media/uapi/rc/rc-intro.rst
+++ b/Documentation/media/uapi/rc/rc-intro.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _Remote_controllers_Intro:
diff --git a/Documentation/media/uapi/rc/rc-sysfs-nodes.rst b/Documentation/media/uapi/rc/rc-sysfs-nodes.rst
index 2d01358d5504..b8e8319e3317 100644
--- a/Documentation/media/uapi/rc/rc-sysfs-nodes.rst
+++ b/Documentation/media/uapi/rc/rc-sysfs-nodes.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _remote_controllers_sysfs_nodes:
diff --git a/Documentation/media/uapi/rc/rc-table-change.rst b/Documentation/media/uapi/rc/rc-table-change.rst
index d604896bca87..4a2e601b89fb 100644
--- a/Documentation/media/uapi/rc/rc-table-change.rst
+++ b/Documentation/media/uapi/rc/rc-table-change.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _Remote_controllers_table_change:
diff --git a/Documentation/media/uapi/rc/rc-tables.rst b/Documentation/media/uapi/rc/rc-tables.rst
index c8ae9479f842..cb670d10998b 100644
--- a/Documentation/media/uapi/rc/rc-tables.rst
+++ b/Documentation/media/uapi/rc/rc-tables.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _Remote_controllers_tables:
diff --git a/Documentation/media/uapi/rc/remote_controllers.rst b/Documentation/media/uapi/rc/remote_controllers.rst
index 46a8acb82125..3051f7abe11d 100644
--- a/Documentation/media/uapi/rc/remote_controllers.rst
+++ b/Documentation/media/uapi/rc/remote_controllers.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. include:: <isonum.txt>
diff --git a/Documentation/media/uapi/v4l/app-pri.rst b/Documentation/media/uapi/v4l/app-pri.rst
index a8c41a7ec396..c25c1271b4f6 100644
--- a/Documentation/media/uapi/v4l/app-pri.rst
+++ b/Documentation/media/uapi/v4l/app-pri.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _app-pri:
@@ -8,7 +15,7 @@ Application Priority
When multiple applications share a device it may be desirable to assign
them different priorities. Contrary to the traditional "rm -rf /" school
-of thought a video recording application could for example block other
+of thought, a video recording application could for example block other
applications from changing video controls or switching the current TV
channel. Another objective is to permit low priority applications
working in background, which can be preempted by user controlled
diff --git a/Documentation/media/uapi/v4l/async.rst b/Documentation/media/uapi/v4l/async.rst
index 5affc0adb95b..be9539313f60 100644
--- a/Documentation/media/uapi/v4l/async.rst
+++ b/Documentation/media/uapi/v4l/async.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _async:
diff --git a/Documentation/media/uapi/v4l/audio.rst b/Documentation/media/uapi/v4l/audio.rst
index 5ec99a2809fe..4c7fdbc8a860 100644
--- a/Documentation/media/uapi/v4l/audio.rst
+++ b/Documentation/media/uapi/v4l/audio.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _audio:
@@ -31,7 +38,7 @@ outputs applications can enumerate them with the
:ref:`VIDIOC_ENUMAUDOUT <VIDIOC_ENUMAUDOUT>` ioctl, respectively.
The struct :c:type:`v4l2_audio` returned by the
:ref:`VIDIOC_ENUMAUDIO` ioctl also contains signal
-:status information applicable when the current audio input is queried.
+status information applicable when the current audio input is queried.
The :ref:`VIDIOC_G_AUDIO <VIDIOC_G_AUDIO>` and
:ref:`VIDIOC_G_AUDOUT <VIDIOC_G_AUDOUT>` ioctls report the current
diff --git a/Documentation/media/uapi/v4l/bayer.svg b/Documentation/media/uapi/v4l/bayer.svg
index c395113d1876..c5bf85103901 100644
--- a/Documentation/media/uapi/v4l/bayer.svg
+++ b/Documentation/media/uapi/v4l/bayer.svg
@@ -1,4 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ This file is dual-licensed: you can use it either under the terms
+ of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+ dual licensing only applies to this file, and not this project as a
+ whole.
+
+ a) This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation version 2 of
+ the License.
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Or, alternatively,
+
+ b) Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg id="svg2" width="164.15mm" height="46.771mm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 16415.333 4677.1107" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><metadata id="metadata652"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><g id="g186" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id6"><rect id="rect189" class="BoundingBox" x="3299" y="3199" width="1303" height="1203" fill="none"/><path id="path191" d="m3950 4400h-650v-1200h1300v1200h-650z" fill="#00f"/><path id="path193" d="m3950
4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text195" class="TextShape"><tspan id="tspan197" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan199" class="TextPosition" x="3739" y="4021"><tspan id="tspan201" fill="#ffffff">B</tspan></tspan></tspan></text>
</g></g><g id="g203" class="com.sun.star.drawing.CustomShape" transform="translate(-3285.9 -3185.9)"><g id="id7"><rect id="rect206" class="BoundingBox" x="4599" y="3199" width="1303" height="1203" fill="none"/><path id="path208" d="m5250 4400h-650v-1200h1300v1200h-650z" fill="#0c0"/><path id="path210" d="m5250 4400h-650v-1200h1300v1200h-650z" fill="none" stroke="#3465a4"/><text id="text212" class="TextShape"><tspan id="tspan214" class="TextParagraph" font-family="sans-serif" font-size="635px" font-weight="400"><tspan id="tspan216" class="TextPosition" x="5003" y="4021"><tspan id="tspan218" fill="#ffffff">G</tspan></tspan></tspan></text>
diff --git a/Documentation/media/uapi/v4l/biblio.rst b/Documentation/media/uapi/v4l/biblio.rst
index 386d6cf83e9c..ec33768c055e 100644
--- a/Documentation/media/uapi/v4l/biblio.rst
+++ b/Documentation/media/uapi/v4l/biblio.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
**********
References
diff --git a/Documentation/media/uapi/v4l/buffer.rst b/Documentation/media/uapi/v4l/buffer.rst
index 2e266d32470a..86878bb0087f 100644
--- a/Documentation/media/uapi/v4l/buffer.rst
+++ b/Documentation/media/uapi/v4l/buffer.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _buffer:
@@ -465,6 +472,9 @@ enum v4l2_buf_type
* - ``V4L2_BUF_TYPE_META_CAPTURE``
- 13
- Buffer for metadata capture, see :ref:`metadata`.
+ * - ``V4L2_BUF_TYPE_META_OUTPUT``
+ - 14
+ - Buffer for metadata output, see :ref:`metadata`.
diff --git a/Documentation/media/uapi/v4l/capture-example.rst b/Documentation/media/uapi/v4l/capture-example.rst
index ac1cd057e25b..130ca47ef796 100644
--- a/Documentation/media/uapi/v4l/capture-example.rst
+++ b/Documentation/media/uapi/v4l/capture-example.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _capture-example:
diff --git a/Documentation/media/uapi/v4l/capture.c.rst b/Documentation/media/uapi/v4l/capture.c.rst
index 56525a0fb2fa..b4652c2351f2 100644
--- a/Documentation/media/uapi/v4l/capture.c.rst
+++ b/Documentation/media/uapi/v4l/capture.c.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
file: media/v4l/capture.c
=========================
diff --git a/Documentation/media/uapi/v4l/colorspaces-defs.rst b/Documentation/media/uapi/v4l/colorspaces-defs.rst
index f24615544792..c4e8fc620379 100644
--- a/Documentation/media/uapi/v4l/colorspaces-defs.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-defs.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
****************************
Defining Colorspaces in V4L2
diff --git a/Documentation/media/uapi/v4l/colorspaces-details.rst b/Documentation/media/uapi/v4l/colorspaces-details.rst
index 09fabf4cd412..8b0ba3668101 100644
--- a/Documentation/media/uapi/v4l/colorspaces-details.rst
+++ b/Documentation/media/uapi/v4l/colorspaces-details.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
********************************
Detailed Colorspace Descriptions
diff --git a/Documentation/media/uapi/v4l/colorspaces.rst b/Documentation/media/uapi/v4l/colorspaces.rst
index 322eb94c1d44..c5a560f0c13d 100644
--- a/Documentation/media/uapi/v4l/colorspaces.rst
+++ b/Documentation/media/uapi/v4l/colorspaces.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _colorspaces:
diff --git a/Documentation/media/uapi/v4l/common-defs.rst b/Documentation/media/uapi/v4l/common-defs.rst
index 39058216b630..504c6c93c9b0 100644
--- a/Documentation/media/uapi/v4l/common-defs.rst
+++ b/Documentation/media/uapi/v4l/common-defs.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _common-defs:
diff --git a/Documentation/media/uapi/v4l/common.rst b/Documentation/media/uapi/v4l/common.rst
index 5f93e71122ef..889f2f2632a1 100644
--- a/Documentation/media/uapi/v4l/common.rst
+++ b/Documentation/media/uapi/v4l/common.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _common:
diff --git a/Documentation/media/uapi/v4l/compat.rst b/Documentation/media/uapi/v4l/compat.rst
index 8b5e1cebd8f4..f35575a300b4 100644
--- a/Documentation/media/uapi/v4l/compat.rst
+++ b/Documentation/media/uapi/v4l/compat.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _compat:
diff --git a/Documentation/media/uapi/v4l/constraints.svg b/Documentation/media/uapi/v4l/constraints.svg
index 7e5d7185ca49..08f9f8b0985e 100644
--- a/Documentation/media/uapi/v4l/constraints.svg
+++ b/Documentation/media/uapi/v4l/constraints.svg
@@ -1,4 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ This file is dual-licensed: you can use it either under the terms
+ of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+ dual licensing only applies to this file, and not this project as a
+ whole.
+
+ a) This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation version 2 of
+ the License.
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Or, alternatively,
+
+ b) Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg id="svg2" width="249.01mm" height="143.01mm" fill-rule="evenodd" stroke-linejoin="round" stroke-width="28.222" preserveAspectRatio="xMidYMid" version="1.2" viewBox="0 0 24900.998 14300.999" xml:space="preserve" xmlns="http://www.w3.org/2000/svg" xmlns:cc="http://creativecommons.org/ns#" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"><metadata id="metadata325"><rdf:RDF><cc:Work rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/><dc:title/></cc:Work></rdf:RDF></metadata><defs id="defs4" class="ClipPathGroup"><marker id="marker6261" overflow="visible" orient="auto"><path id="path6263" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker6125" overflow="visible"
orient="auto"><path id="path6127" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker6001" overflow="visible" orient="auto"><path id="path6003" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker5693" overflow="visible" orient="auto"><path id="path5695" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#f00" fill-rule="evenodd" stroke="#f00" stroke-width="1pt"/></marker><marker id="marker5575" overflow="visible" orient="auto"><path id="path5577" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-width="1pt"/></marker><marker id="marker5469" overflow="visible"
orient="auto"><path id="path5471" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-width="1pt"/></marker><marker id="marker5259" overflow="visible" orient="auto"><path id="path5261" transform="matrix(-.4 0 0 -.4 -4 0)" d="m0 0 5-5-17.5 5 17.5 5-5-5z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-width="1pt"/></marker><marker id="Arrow2Mend" overflow="visible" orient="auto"><path id="path4241" transform="scale(-.6)" d="m8.7186 4.0337-10.926-4.0177 10.926-4.0177c-1.7455 2.3721-1.7354 5.6175-6e-7 8.0354z" fill="#000080" fill-rule="evenodd" stroke="#000080" stroke-linejoin="round" stroke-width=".625"/></marker></defs><g id="g204" class="com.sun.star.drawing.CustomShape" transform="translate(-1350,-3250)"><g id="id6"><rect id="rect207" class="BoundingBox" x="1350" y="3250" width="24901" height="14301"
diff --git a/Documentation/media/uapi/v4l/control.rst b/Documentation/media/uapi/v4l/control.rst
index c1e6adbe83d7..0d46526b5935 100644
--- a/Documentation/media/uapi/v4l/control.rst
+++ b/Documentation/media/uapi/v4l/control.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _control:
diff --git a/Documentation/media/uapi/v4l/crop.rst b/Documentation/media/uapi/v4l/crop.rst
index 45e8a895a320..ada7c22e6291 100644
--- a/Documentation/media/uapi/v4l/crop.rst
+++ b/Documentation/media/uapi/v4l/crop.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _crop:
diff --git a/Documentation/media/uapi/v4l/crop.svg b/Documentation/media/uapi/v4l/crop.svg
index 3878fe4c49e9..32d72598d135 100644
--- a/Documentation/media/uapi/v4l/crop.svg
+++ b/Documentation/media/uapi/v4l/crop.svg
@@ -1,6 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
diff --git a/Documentation/media/uapi/v4l/depth-formats.rst b/Documentation/media/uapi/v4l/depth-formats.rst
index d1641e9687a6..1bfd0b82cb85 100644
--- a/Documentation/media/uapi/v4l/depth-formats.rst
+++ b/Documentation/media/uapi/v4l/depth-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _depth-formats:
@@ -14,3 +21,4 @@ Depth data provides distance to points, mapped onto the image plane
pixfmt-inzi
pixfmt-z16
+ pixfmt-cnf4
diff --git a/Documentation/media/uapi/v4l/dev-capture.rst b/Documentation/media/uapi/v4l/dev-capture.rst
index 4218742ab5d9..134e22b32338 100644
--- a/Documentation/media/uapi/v4l/dev-capture.rst
+++ b/Documentation/media/uapi/v4l/dev-capture.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _capture:
@@ -99,6 +106,6 @@ requests and always returns default parameters as :ref:`VIDIOC_G_FMT <VIDIOC_G_F
Reading Images
==============
-A video capture device may support the ::ref:`read() function <func-read>`
+A video capture device may support the :ref:`read() function <func-read>`
and/or streaming (:ref:`memory mapping <func-mmap>` or
:ref:`user pointer <userp>`) I/O. See :ref:`io` for details.
diff --git a/Documentation/media/uapi/v4l/dev-codec.rst b/Documentation/media/uapi/v4l/dev-codec.rst
index c61e938bd8dc..b5e017c17834 100644
--- a/Documentation/media/uapi/v4l/dev-codec.rst
+++ b/Documentation/media/uapi/v4l/dev-codec.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _codec:
diff --git a/Documentation/media/uapi/v4l/dev-effect.rst b/Documentation/media/uapi/v4l/dev-effect.rst
index b946cc9e1064..b165e2c20910 100644
--- a/Documentation/media/uapi/v4l/dev-effect.rst
+++ b/Documentation/media/uapi/v4l/dev-effect.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _effect:
diff --git a/Documentation/media/uapi/v4l/dev-event.rst b/Documentation/media/uapi/v4l/dev-event.rst
index a06ec4d65359..6029101fe1d7 100644
--- a/Documentation/media/uapi/v4l/dev-event.rst
+++ b/Documentation/media/uapi/v4l/dev-event.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _event:
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst
index b65dc078abeb..c5dbe882be65 100644
--- a/Documentation/media/uapi/v4l/dev-meta.rst
+++ b/Documentation/media/uapi/v4l/dev-meta.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _metadata:
@@ -7,21 +14,27 @@ Metadata Interface
******************
Metadata refers to any non-image data that supplements video frames with
-additional information. This may include statistics computed over the image
-or frame capture parameters supplied by the image source. This interface is
-intended for transfer of metadata to userspace and control of that operation.
+additional information. This may include statistics computed over the image,
+frame capture parameters supplied by the image source or device specific
+parameters for specifying how the device processes images. This interface is
+intended for transfer of metadata between the userspace and the hardware and
+control of that operation.
-The metadata interface is implemented on video capture device nodes. The device
-can be dedicated to metadata or can implement both video and metadata capture
-as specified in its reported capabilities.
+The metadata interface is implemented on video device nodes. The device can be
+dedicated to metadata or can support both video and metadata as specified in its
+reported capabilities.
Querying Capabilities
=====================
-Device nodes supporting the metadata interface set the ``V4L2_CAP_META_CAPTURE``
-flag in the ``device_caps`` field of the
+Device nodes supporting the metadata capture interface set the
+``V4L2_CAP_META_CAPTURE`` flag in the ``device_caps`` field of the
:c:type:`v4l2_capability` structure returned by the :c:func:`VIDIOC_QUERYCAP`
-ioctl. That flag means the device can capture metadata to memory.
+ioctl. That flag means the device can capture metadata to memory. Similarly,
+device nodes supporting metadata output interface set the
+``V4L2_CAP_META_OUTPUT`` flag in the ``device_caps`` field of
+:c:type:`v4l2_capability` structure. That flag means the device can read
+metadata from memory.
At least one of the read/write or streaming I/O methods must be supported.
@@ -35,10 +48,11 @@ to the basic :ref:`format` ioctls, the :c:func:`VIDIOC_ENUM_FMT` ioctl must be
supported as well.
To use the :ref:`format` ioctls applications set the ``type`` field of the
-:c:type:`v4l2_format` structure to ``V4L2_BUF_TYPE_META_CAPTURE`` and use the
-:c:type:`v4l2_meta_format` ``meta`` member of the ``fmt`` union as needed per
-the desired operation. Both drivers and applications must set the remainder of
-the :c:type:`v4l2_format` structure to 0.
+:c:type:`v4l2_format` structure to ``V4L2_BUF_TYPE_META_CAPTURE`` or to
+``V4L2_BUF_TYPE_META_OUTPUT`` and use the :c:type:`v4l2_meta_format` ``meta``
+member of the ``fmt`` union as needed per the desired operation. Both drivers
+and applications must set the remainder of the :c:type:`v4l2_format` structure
+to 0.
.. c:type:: v4l2_meta_format
diff --git a/Documentation/media/uapi/v4l/dev-osd.rst b/Documentation/media/uapi/v4l/dev-osd.rst
index 71da85ed7e4b..d3ad67da6386 100644
--- a/Documentation/media/uapi/v4l/dev-osd.rst
+++ b/Documentation/media/uapi/v4l/dev-osd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _osd:
diff --git a/Documentation/media/uapi/v4l/dev-output.rst b/Documentation/media/uapi/v4l/dev-output.rst
index 342eb4931f5c..3fe1b39696ed 100644
--- a/Documentation/media/uapi/v4l/dev-output.rst
+++ b/Documentation/media/uapi/v4l/dev-output.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _output:
diff --git a/Documentation/media/uapi/v4l/dev-overlay.rst b/Documentation/media/uapi/v4l/dev-overlay.rst
index 9be14b55e305..b91b3837d4e7 100644
--- a/Documentation/media/uapi/v4l/dev-overlay.rst
+++ b/Documentation/media/uapi/v4l/dev-overlay.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _overlay:
diff --git a/Documentation/media/uapi/v4l/dev-radio.rst b/Documentation/media/uapi/v4l/dev-radio.rst
index 2b5b836574eb..133eb0e788c2 100644
--- a/Documentation/media/uapi/v4l/dev-radio.rst
+++ b/Documentation/media/uapi/v4l/dev-radio.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _radio:
diff --git a/Documentation/media/uapi/v4l/dev-raw-vbi.rst b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
index 2e6878b624f6..d6a707f0b24f 100644
--- a/Documentation/media/uapi/v4l/dev-raw-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-raw-vbi.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _raw-vbi:
diff --git a/Documentation/media/uapi/v4l/dev-rds.rst b/Documentation/media/uapi/v4l/dev-rds.rst
index 9c4e39dd66bd..624d6f95b842 100644
--- a/Documentation/media/uapi/v4l/dev-rds.rst
+++ b/Documentation/media/uapi/v4l/dev-rds.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _rds:
diff --git a/Documentation/media/uapi/v4l/dev-sdr.rst b/Documentation/media/uapi/v4l/dev-sdr.rst
index b3e828d8cb1f..75595c58cb5b 100644
--- a/Documentation/media/uapi/v4l/dev-sdr.rst
+++ b/Documentation/media/uapi/v4l/dev-sdr.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _sdr:
diff --git a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
index d311a6866b3b..0aa6cb8a272b 100644
--- a/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
+++ b/Documentation/media/uapi/v4l/dev-sliced-vbi.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _sliced:
diff --git a/Documentation/media/uapi/v4l/dev-subdev.rst b/Documentation/media/uapi/v4l/dev-subdev.rst
index d20d945803a7..2c2768c7343b 100644
--- a/Documentation/media/uapi/v4l/dev-subdev.rst
+++ b/Documentation/media/uapi/v4l/dev-subdev.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _subdev:
diff --git a/Documentation/media/uapi/v4l/dev-teletext.rst b/Documentation/media/uapi/v4l/dev-teletext.rst
index 2648f6b37ea3..35e8c4b35458 100644
--- a/Documentation/media/uapi/v4l/dev-teletext.rst
+++ b/Documentation/media/uapi/v4l/dev-teletext.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _ttx:
@@ -10,7 +17,7 @@ This interface was aimed at devices receiving and demodulating Teletext
data [:ref:`ets300706`, :ref:`itu653`], evaluating the Teletext
packages and storing formatted pages in cache memory. Such devices are
usually implemented as microcontrollers with serial interface
-(I:sup:`2`\ C) and could be found on old TV cards, dedicated Teletext
+(I\ :sup:`2`\ C) and could be found on old TV cards, dedicated Teletext
decoding cards and home-brew devices connected to the PC parallel port.
The Teletext API was designed by Martin Buck. It was defined in the
diff --git a/Documentation/media/uapi/v4l/dev-touch.rst b/Documentation/media/uapi/v4l/dev-touch.rst
index 98797f255ce0..356f01385221 100644
--- a/Documentation/media/uapi/v4l/dev-touch.rst
+++ b/Documentation/media/uapi/v4l/dev-touch.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _touch:
diff --git a/Documentation/media/uapi/v4l/devices.rst b/Documentation/media/uapi/v4l/devices.rst
index fb7f8c26cf09..5dbe9d13b6e6 100644
--- a/Documentation/media/uapi/v4l/devices.rst
+++ b/Documentation/media/uapi/v4l/devices.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _devices:
diff --git a/Documentation/media/uapi/v4l/diff-v4l.rst b/Documentation/media/uapi/v4l/diff-v4l.rst
index 8209eeb63dd2..dd6739e8a5b2 100644
--- a/Documentation/media/uapi/v4l/diff-v4l.rst
+++ b/Documentation/media/uapi/v4l/diff-v4l.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _diff-v4l:
diff --git a/Documentation/media/uapi/v4l/dmabuf.rst b/Documentation/media/uapi/v4l/dmabuf.rst
index 4e980a7e9c9c..bb8fd943b14e 100644
--- a/Documentation/media/uapi/v4l/dmabuf.rst
+++ b/Documentation/media/uapi/v4l/dmabuf.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dmabuf:
diff --git a/Documentation/media/uapi/v4l/dv-timings.rst b/Documentation/media/uapi/v4l/dv-timings.rst
index 415a0c4e2ccb..b3c69ca559e2 100644
--- a/Documentation/media/uapi/v4l/dv-timings.rst
+++ b/Documentation/media/uapi/v4l/dv-timings.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _dv-timings:
diff --git a/Documentation/media/uapi/v4l/extended-controls.rst b/Documentation/media/uapi/v4l/extended-controls.rst
index 65a1d873196b..c471408d9bf9 100644
--- a/Documentation/media/uapi/v4l/extended-controls.rst
+++ b/Documentation/media/uapi/v4l/extended-controls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _extended-controls:
@@ -1110,10 +1117,16 @@ enum v4l2_mpeg_video_h264_loop_filter_mode -
``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA (integer)``
Loop filter alpha coefficient, defined in the H264 standard.
+ This value corresponds to the slice_alpha_c0_offset_div2 slice header
+ field, and should be in the range of -6 to +6, inclusive. The actual alpha
+ offset FilterOffsetA is twice this value.
Applicable to the H264 encoder.
``V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA (integer)``
Loop filter beta coefficient, defined in the H264 standard.
+ This corresponds to the slice_beta_offset_div2 slice header field, and
+ should be in the range of -6 to +6, inclusive. The actual beta offset
+ FilterOffsetB is twice this value.
Applicable to the H264 encoder.
.. _v4l2-mpeg-video-h264-entropy-mode:
@@ -1505,6 +1518,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
configuring a stateless hardware decoding pipeline for MPEG-2.
The bitstream parameters are defined according to :ref:`mpeg2part2`.
+ .. note::
+
+ This compound control is not yet part of the public kernel API and
+ it is expected to change.
+
.. c:type:: v4l2_ctrl_mpeg2_slice_params
.. cssclass:: longtable
@@ -1625,6 +1643,11 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type -
Specifies quantization matrices (as extracted from the bitstream) for the
associated MPEG-2 slice data.
+ .. note::
+
+ This compound control is not yet part of the public kernel API and
+ it is expected to change.
+
.. c:type:: v4l2_ctrl_mpeg2_quantization
.. cssclass:: longtable
diff --git a/Documentation/media/uapi/v4l/field-order.rst b/Documentation/media/uapi/v4l/field-order.rst
index 5f3f82cbfa34..8415268d439c 100644
--- a/Documentation/media/uapi/v4l/field-order.rst
+++ b/Documentation/media/uapi/v4l/field-order.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _field-order:
diff --git a/Documentation/media/uapi/v4l/fieldseq_bt.svg b/Documentation/media/uapi/v4l/fieldseq_bt.svg
index 909d758f8543..1dab1cd1b6de 100644
--- a/Documentation/media/uapi/v4l/fieldseq_bt.svg
+++ b/Documentation/media/uapi/v4l/fieldseq_bt.svg
@@ -1,6 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
@@ -2610,4 +2618,4 @@
sodipodi:role="line"
y="-328.99481"
x="10.054964 14.17972 18.766451 20.597849 25.18458 29.771311 34.358047 38.944778 41.238144 43.531509 48.118244 50.865334 53.158699 55.452068 57.283459 61.870193 63.701588 68.288322">v4l2_buffer.field:</tspan></text>
-</g></svg> \ No newline at end of file
+</g></svg>
diff --git a/Documentation/media/uapi/v4l/fieldseq_tb.svg b/Documentation/media/uapi/v4l/fieldseq_tb.svg
index 7c74344e770f..041071e43f9b 100644
--- a/Documentation/media/uapi/v4l/fieldseq_tb.svg
+++ b/Documentation/media/uapi/v4l/fieldseq_tb.svg
@@ -1,6 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
@@ -2607,4 +2615,4 @@
y="-311.9397"
x="10.05469 15.55712 20.143852 24.730585 29.317318 33.904053 38.944508 41.237877 46.740307 51.327042 57.283192 61.869926 66.910378 73.328506 95.0867 100.58913 105.17586 109.7626 114.34933 118.93606 123.97652 126.26987 131.77232 136.35905 142.3152 146.90193 152.40436 158.82249 163.86295 168.9034 175.32153 197.12534 202.62778 207.21451 211.80124 216.38797 220.9747 226.01515 228.30853 233.81096 238.39769 244.35384 248.94058 253.98103 260.39917 282.15695 287.65936 292.24609 296.83282 301.41956 306.00629 311.04675 313.34012 318.84256 323.42929 329.38544 333.97217 339.47461 345.89273 350.9332 355.97363 362.39175 384.19559 389.698 394.28473 398.87149 403.45822 408.04495 413.08539 415.37875 420.8812 425.46793 431.42407 436.0108 441.05127 447.46939 469.2276 474.73001 479.31674 483.90347 488.49023 493.07697 498.1174 500.41077 505.91321 510.49994 516.45612 521.04285 526.54523 532.96338
538.00385 543.04431 549.4624">V4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOMV4L2_FIELD_TOPV4L2_FIELD_BOTTOM</tspan></text>
-</g></svg> \ No newline at end of file
+</g></svg>
diff --git a/Documentation/media/uapi/v4l/format.rst b/Documentation/media/uapi/v4l/format.rst
index 3e3efb0e349e..9cdb296333b8 100644
--- a/Documentation/media/uapi/v4l/format.rst
+++ b/Documentation/media/uapi/v4l/format.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _format:
@@ -12,7 +19,7 @@ Data Format Negotiation
Different devices exchange different kinds of data with applications,
for example video images, raw or sliced VBI data, RDS datagrams. Even
-within one kind many different formats are possible, in particular an
+within one kind many different formats are possible, in particular there is an
abundance of image formats. Although drivers must provide a default and
the selection persists across closing and reopening a device,
applications should always negotiate a data format before engaging in
diff --git a/Documentation/media/uapi/v4l/func-close.rst b/Documentation/media/uapi/v4l/func-close.rst
index e85a6744eb91..1a56811b827e 100644
--- a/Documentation/media/uapi/v4l/func-close.rst
+++ b/Documentation/media/uapi/v4l/func-close.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-close:
diff --git a/Documentation/media/uapi/v4l/func-ioctl.rst b/Documentation/media/uapi/v4l/func-ioctl.rst
index ebfbe92f0478..e7a8cf62752e 100644
--- a/Documentation/media/uapi/v4l/func-ioctl.rst
+++ b/Documentation/media/uapi/v4l/func-ioctl.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-ioctl:
diff --git a/Documentation/media/uapi/v4l/func-mmap.rst b/Documentation/media/uapi/v4l/func-mmap.rst
index 6d2ce539bd72..75985d80788a 100644
--- a/Documentation/media/uapi/v4l/func-mmap.rst
+++ b/Documentation/media/uapi/v4l/func-mmap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-mmap:
diff --git a/Documentation/media/uapi/v4l/func-munmap.rst b/Documentation/media/uapi/v4l/func-munmap.rst
index c2f4043d7d2b..0d472d86a036 100644
--- a/Documentation/media/uapi/v4l/func-munmap.rst
+++ b/Documentation/media/uapi/v4l/func-munmap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-munmap:
diff --git a/Documentation/media/uapi/v4l/func-open.rst b/Documentation/media/uapi/v4l/func-open.rst
index deea34cc778b..a3d149ce6635 100644
--- a/Documentation/media/uapi/v4l/func-open.rst
+++ b/Documentation/media/uapi/v4l/func-open.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-open:
diff --git a/Documentation/media/uapi/v4l/func-poll.rst b/Documentation/media/uapi/v4l/func-poll.rst
index 967fe8920729..4c579ed31358 100644
--- a/Documentation/media/uapi/v4l/func-poll.rst
+++ b/Documentation/media/uapi/v4l/func-poll.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-poll:
diff --git a/Documentation/media/uapi/v4l/func-read.rst b/Documentation/media/uapi/v4l/func-read.rst
index ae38c2d59d49..14aca4d5e8fd 100644
--- a/Documentation/media/uapi/v4l/func-read.rst
+++ b/Documentation/media/uapi/v4l/func-read.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-read:
diff --git a/Documentation/media/uapi/v4l/func-select.rst b/Documentation/media/uapi/v4l/func-select.rst
index 002dedba2666..af5f1e31c0fb 100644
--- a/Documentation/media/uapi/v4l/func-select.rst
+++ b/Documentation/media/uapi/v4l/func-select.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-select:
diff --git a/Documentation/media/uapi/v4l/func-write.rst b/Documentation/media/uapi/v4l/func-write.rst
index 938f33f85455..865129c726ad 100644
--- a/Documentation/media/uapi/v4l/func-write.rst
+++ b/Documentation/media/uapi/v4l/func-write.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _func-write:
diff --git a/Documentation/media/uapi/v4l/hist-v4l2.rst b/Documentation/media/uapi/v4l/hist-v4l2.rst
index 058b5db95c32..7d8e9efbeb1e 100644
--- a/Documentation/media/uapi/v4l/hist-v4l2.rst
+++ b/Documentation/media/uapi/v4l/hist-v4l2.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _hist-v4l2:
diff --git a/Documentation/media/uapi/v4l/hsv-formats.rst b/Documentation/media/uapi/v4l/hsv-formats.rst
index f0f2615eaa95..f52f8ba131f0 100644
--- a/Documentation/media/uapi/v4l/hsv-formats.rst
+++ b/Documentation/media/uapi/v4l/hsv-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _hsv-formats:
diff --git a/Documentation/media/uapi/v4l/io.rst b/Documentation/media/uapi/v4l/io.rst
index 94b38a10ee65..049a2530d3a2 100644
--- a/Documentation/media/uapi/v4l/io.rst
+++ b/Documentation/media/uapi/v4l/io.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _io:
diff --git a/Documentation/media/uapi/v4l/libv4l-introduction.rst b/Documentation/media/uapi/v4l/libv4l-introduction.rst
index ccc3c4d2fc0f..1b206d380d4b 100644
--- a/Documentation/media/uapi/v4l/libv4l-introduction.rst
+++ b/Documentation/media/uapi/v4l/libv4l-introduction.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _libv4l-introduction:
diff --git a/Documentation/media/uapi/v4l/libv4l.rst b/Documentation/media/uapi/v4l/libv4l.rst
index 332c1d42688b..d114fbf1ffa6 100644
--- a/Documentation/media/uapi/v4l/libv4l.rst
+++ b/Documentation/media/uapi/v4l/libv4l.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _libv4l:
diff --git a/Documentation/media/uapi/v4l/meta-formats.rst b/Documentation/media/uapi/v4l/meta-formats.rst
index cf971d5ad9ea..5f956fa784b7 100644
--- a/Documentation/media/uapi/v4l/meta-formats.rst
+++ b/Documentation/media/uapi/v4l/meta-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _meta-formats:
@@ -12,6 +19,7 @@ These formats are used for the :ref:`metadata` interface only.
.. toctree::
:maxdepth: 1
+ pixfmt-meta-intel-ipu3
pixfmt-meta-d4xx
pixfmt-meta-uvc
pixfmt-meta-vsp1-hgo
diff --git a/Documentation/media/uapi/v4l/mmap.rst b/Documentation/media/uapi/v4l/mmap.rst
index 670596c1a4f7..c47708bf2c87 100644
--- a/Documentation/media/uapi/v4l/mmap.rst
+++ b/Documentation/media/uapi/v4l/mmap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _mmap:
@@ -231,17 +238,17 @@ up the output is started with :ref:`VIDIOC_STREAMON <VIDIOC_STREAMON>`.
In the write loop, when the application runs out of free buffers, it
must wait until an empty buffer can be dequeued and reused.
-To enqueue and dequeue a buffer applications use the :ref:`VIDIOC_QBUF`
-and :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl. The status of a buffer
-being mapped, enqueued, full or empty can be determined at any time
-using the :ref:`VIDIOC_QUERYBUF` ioctl. Two methods exist to suspend
-execution of the application until one or more buffers can be dequeued.
-By default :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` blocks when no buffer is
-in the outgoing queue. When the ``O_NONBLOCK`` flag was given to the
-:ref:`open() <func-open>` function, :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`
-returns immediately with an ``EAGAIN`` error code when no buffer is
-available. The :ref:`select() <func-select>` or :ref:`poll()
-<func-poll>` functions are always available.
+To enqueue and dequeue a buffer applications use the
+:ref:`VIVIOC_QBUF <VIDIOC_QBUF>` and :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`
+ioctl. The status of a buffer being mapped, enqueued, full or empty can
+be determined at any time using the :ref:`VIDIOC_QUERYBUF` ioctl. Two
+methods exist to suspend execution of the application until one or more
+buffers can be dequeued. By default :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>`
+blocks when no buffer is in the outgoing queue. When the ``O_NONBLOCK``
+flag was given to the :ref:`open() <func-open>` function,
+:ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` returns immediately with an ``EAGAIN``
+error code when no buffer is available. The :ref:`select() <func-select>`
+or :ref:`poll() <func-poll>` functions are always available.
To start and stop capturing or output applications call the
:ref:`VIDIOC_STREAMON <VIDIOC_STREAMON>` and :ref:`VIDIOC_STREAMOFF
diff --git a/Documentation/media/uapi/v4l/nv12mt.svg b/Documentation/media/uapi/v4l/nv12mt.svg
index 65d05606c04c..067d8fb34ba2 100644
--- a/Documentation/media/uapi/v4l/nv12mt.svg
+++ b/Documentation/media/uapi/v4l/nv12mt.svg
@@ -1,4 +1,31 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+ This file is dual-licensed: you can use it either under the terms
+ of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+ dual licensing only applies to this file, and not this project as a
+ whole.
+
+ a) This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation version 2 of
+ the License.
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Or, alternatively,
+
+ b) Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
diff --git a/Documentation/media/uapi/v4l/nv12mt_example.svg b/Documentation/media/uapi/v4l/nv12mt_example.svg
index fc51fe8fda8b..70c3200fdb32 100644
--- a/Documentation/media/uapi/v4l/nv12mt_example.svg
+++ b/Documentation/media/uapi/v4l/nv12mt_example.svg
@@ -1,4 +1,31 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+ This file is dual-licensed: you can use it either under the terms
+ of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+ dual licensing only applies to this file, and not this project as a
+ whole.
+
+ a) This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation version 2 of
+ the License.
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Or, alternatively,
+
+ b) Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
diff --git a/Documentation/media/uapi/v4l/open.rst b/Documentation/media/uapi/v4l/open.rst
index afd116edb40d..42fad5001c5c 100644
--- a/Documentation/media/uapi/v4l/open.rst
+++ b/Documentation/media/uapi/v4l/open.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _open:
@@ -53,7 +60,7 @@ ranges. These ranges are listed in :ref:`devices`.
The creation of character special files (with mknod) is a privileged
operation and devices cannot be opened by major and minor number. That
-means applications cannot *reliable* scan for loaded or installed
+means applications cannot *reliably* scan for loaded or installed
drivers. The user must enter a device name, or the application can try
the conventional device names.
diff --git a/Documentation/media/uapi/v4l/pipeline.dot b/Documentation/media/uapi/v4l/pipeline.dot
index 02d7fcf12b26..8c53ce719a14 100644
--- a/Documentation/media/uapi/v4l/pipeline.dot
+++ b/Documentation/media/uapi/v4l/pipeline.dot
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
digraph board {
rankdir=TB
colorscheme=x11
diff --git a/Documentation/media/uapi/v4l/pixfmt-cnf4.rst b/Documentation/media/uapi/v4l/pixfmt-cnf4.rst
new file mode 100644
index 000000000000..8f469290c304
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-cnf4.rst
@@ -0,0 +1,31 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _V4L2-PIX-FMT-CNF4:
+
+******************************
+V4L2_PIX_FMT_CNF4 ('CNF4')
+******************************
+
+Depth sensor confidence information as a 4 bits per pixel packed array
+
+Description
+===========
+
+Proprietary format used by Intel RealSense Depth cameras containing depth
+confidence information in range 0-15 with 0 indicating that the sensor was
+unable to resolve any signal and 15 indicating maximum level of confidence for
+the specific sensor (actual error margins might change from sensor to sensor).
+
+Every two consecutive pixels are packed into a single byte.
+Bits 0-3 of byte n refer to confidence value of depth pixel 2*n,
+bits 4-7 to confidence value of depth pixel 2*n+1.
+
+**Bit-packed representation.**
+
+.. flat-table::
+ :header-rows: 0
+ :stub-columns: 0
+ :widths: 64 64
+
+ * - Y'\ :sub:`01[3:0]`\ (bits 7--4) Y'\ :sub:`00[3:0]`\ (bits 3--0)
+ - Y'\ :sub:`03[3:0]`\ (bits 7--4) Y'\ :sub:`02[3:0]`\ (bits 3--0)
diff --git a/Documentation/media/uapi/v4l/pixfmt-compressed.rst b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
index ba0f6c49d9bf..e4c5e456df59 100644
--- a/Documentation/media/uapi/v4l/pixfmt-compressed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-compressed.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
******************
Compressed Formats
diff --git a/Documentation/media/uapi/v4l/pixfmt-grey.rst b/Documentation/media/uapi/v4l/pixfmt-grey.rst
index dad813819d3e..3a8156164d39 100644
--- a/Documentation/media/uapi/v4l/pixfmt-grey.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-grey.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-GREY:
diff --git a/Documentation/media/uapi/v4l/pixfmt-indexed.rst b/Documentation/media/uapi/v4l/pixfmt-indexed.rst
index 6edac54dad74..4538b425a046 100644
--- a/Documentation/media/uapi/v4l/pixfmt-indexed.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-indexed.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _pixfmt-indexed:
diff --git a/Documentation/media/uapi/v4l/pixfmt-intro.rst b/Documentation/media/uapi/v4l/pixfmt-intro.rst
index 4bc116aa8193..ca0a6e0d8959 100644
--- a/Documentation/media/uapi/v4l/pixfmt-intro.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-intro.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
**********************
Standard Image Formats
diff --git a/Documentation/media/uapi/v4l/pixfmt-inzi.rst b/Documentation/media/uapi/v4l/pixfmt-inzi.rst
index 75272f80bc8a..af2940d844ff 100644
--- a/Documentation/media/uapi/v4l/pixfmt-inzi.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-inzi.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-INZI:
diff --git a/Documentation/media/uapi/v4l/pixfmt-m420.rst b/Documentation/media/uapi/v4l/pixfmt-m420.rst
index 6703f4079c3e..c2bae959bf51 100644
--- a/Documentation/media/uapi/v4l/pixfmt-m420.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-m420.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-M420:
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst b/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst
index 63bf1a2c9116..862e1f327150 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-d4xx.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-meta-fmt-d4xx:
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst b/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
new file mode 100644
index 000000000000..dc871006b41a
--- /dev/null
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
@@ -0,0 +1,178 @@
+.. -*- coding: utf-8; mode: rst -*-
+
+.. _v4l2-meta-fmt-params:
+.. _v4l2-meta-fmt-stat-3a:
+
+******************************************************************
+V4L2_META_FMT_IPU3_PARAMS ('ip3p'), V4L2_META_FMT_IPU3_3A ('ip3s')
+******************************************************************
+
+.. c:type:: ipu3_uapi_stats_3a
+
+3A statistics
+=============
+
+For IPU3 ImgU, the 3A statistics accelerators collect different statistics over
+an input bayer frame. Those statistics, defined in data struct :c:type:`ipu3_uapi_stats_3a`,
+are obtained from "ipu3-imgu 3a stat" metadata capture video node, which are then
+passed to user space for statistics analysis using :c:type:`v4l2_meta_format` interface.
+
+The statistics collected are AWB (Auto-white balance) RGBS (Red, Green, Blue and
+Saturation measure) cells, AWB filter response, AF (Auto-focus) filter response,
+and AE (Auto-exposure) histogram.
+
+struct :c:type:`ipu3_uapi_4a_config` saves configurable parameters for all above.
+
+.. code-block:: c
+
+ struct ipu3_uapi_stats_3a {
+ struct ipu3_uapi_awb_raw_buffer awb_raw_buffer;
+ struct ipu3_uapi_ae_raw_buffer_aligned ae_raw_buffer[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_af_raw_buffer af_raw_buffer;
+ struct ipu3_uapi_awb_fr_raw_buffer awb_fr_raw_buffer;
+ struct ipu3_uapi_4a_config stats_4a_config;
+ __u32 ae_join_buffers;
+ __u8 padding[28];
+ struct ipu3_uapi_stats_3a_bubble_info_per_stripe stats_3a_bubble_per_stripe;
+ struct ipu3_uapi_ff_status stats_3a_status;
+ };
+
+.. c:type:: ipu3_uapi_params
+
+Pipeline parameters
+===================
+
+IPU3 pipeline has a number of image processing stages, each of which takes a
+set of parameters as input. The major stages of pipelines are shown here:
+
+Raw pixels -> Bayer Downscaling -> Optical Black Correction ->
+
+Linearization -> Lens Shading Correction -> White Balance / Exposure /
+
+Focus Apply -> Bayer Noise Reduction -> ANR -> Demosaicing -> Color
+
+Correction Matrix -> Gamma correction -> Color Space Conversion ->
+
+Chroma Down Scaling -> Chromatic Noise Reduction -> Total Color
+
+Correction -> XNR3 -> TNR -> DDR
+
+The table below presents a description of the above algorithms.
+
+======================== =======================================================
+Name Description
+======================== =======================================================
+Optical Black Correction Optical Black Correction block subtracts a pre-defined
+ value from the respective pixel values to obtain better
+ image quality.
+ Defined in :c:type:`ipu3_uapi_obgrid_param`.
+Linearization This algo block uses linearization parameters to
+ address non-linearity sensor effects. The Lookup table
+ table is defined in
+ :c:type:`ipu3_uapi_isp_lin_vmem_params`.
+SHD Lens shading correction is used to correct spatial
+ non-uniformity of the pixel response due to optical
+ lens shading. This is done by applying a different gain
+ for each pixel. The gain, black level etc are
+ configured in :c:type:`ipu3_uapi_shd_config_static`.
+BNR Bayer noise reduction block removes image noise by
+ applying a bilateral filter.
+ See :c:type:`ipu3_uapi_bnr_static_config` for details.
+ANR Advanced Noise Reduction is a block based algorithm
+ that performs noise reduction in the Bayer domain. The
+ convolution matrix etc can be found in
+ :c:type:`ipu3_uapi_anr_config`.
+Demosaicing Demosaicing converts raw sensor data in Bayer format
+ into RGB (Red, Green, Blue) presentation. Then add
+ outputs of estimation of Y channel for following stream
+ processing by Firmware. The struct is defined as
+ :c:type:`ipu3_uapi_dm_config`. (TODO)
+Color Correction Color Correction algo transforms sensor specific color
+ space to the standard "sRGB" color space. This is done
+ by applying 3x3 matrix defined in
+ :c:type:`ipu3_uapi_ccm_mat_config`.
+Gamma correction Gamma correction :c:type:`ipu3_uapi_gamma_config` is a
+ basic non-linear tone mapping correction that is
+ applied per pixel for each pixel component.
+CSC Color space conversion transforms each pixel from the
+ RGB primary presentation to YUV (Y: brightness,
+ UV: Luminance) presentation. This is done by applying
+ a 3x3 matrix defined in
+ :c:type:`ipu3_uapi_csc_mat_config`
+CDS Chroma down sampling
+ After the CSC is performed, the Chroma Down Sampling
+ is applied for a UV plane down sampling by a factor
+ of 2 in each direction for YUV 4:2:0 using a 4x2
+ configurable filter :c:type:`ipu3_uapi_cds_params`.
+CHNR Chroma noise reduction
+ This block processes only the chrominance pixels and
+ performs noise reduction by cleaning the high
+ frequency noise.
+ See struct :c:type:`ipu3_uapi_yuvp1_chnr_config`.
+TCC Total color correction as defined in struct
+ :c:type:`ipu3_uapi_yuvp2_tcc_static_config`.
+XNR3 eXtreme Noise Reduction V3 is the third revision of
+ noise reduction algorithm used to improve image
+ quality. This removes the low frequency noise in the
+ captured image. Two related structs are being defined,
+ :c:type:`ipu3_uapi_isp_xnr3_params` for ISP data memory
+ and :c:type:`ipu3_uapi_isp_xnr3_vmem_params` for vector
+ memory.
+TNR Temporal Noise Reduction block compares successive
+ frames in time to remove anomalies / noise in pixel
+ values. :c:type:`ipu3_uapi_isp_tnr3_vmem_params` and
+ :c:type:`ipu3_uapi_isp_tnr3_params` are defined for ISP
+ vector and data memory respectively.
+======================== =======================================================
+
+A few stages of the pipeline will be executed by firmware running on the ISP
+processor, while many others will use a set of fixed hardware blocks also
+called accelerator cluster (ACC) to crunch pixel data and produce statistics.
+
+ACC parameters of individual algorithms, as defined by
+:c:type:`ipu3_uapi_acc_param`, can be chosen to be applied by the user
+space through struct :c:type:`ipu3_uapi_flags` embedded in
+:c:type:`ipu3_uapi_params` structure. For parameters that are configured as
+not enabled by the user space, the corresponding structs are ignored by the
+driver, in which case the existing configuration of the algorithm will be
+preserved.
+
+Both 3A statistics and pipeline parameters described here are closely tied to
+the underlying camera sub-system (CSS) APIs. They are usually consumed and
+produced by dedicated user space libraries that comprise the important tuning
+tools, thus freeing the developers from being bothered with the low level
+hardware and algorithm details.
+
+It should be noted that IPU3 DMA operations require the addresses of all data
+structures (that includes both input and output) to be aligned on 32 byte
+boundaries.
+
+The meta data :c:type:`ipu3_uapi_params` will be sent to "ipu3-imgu parameters"
+video node in ``V4L2_BUF_TYPE_META_CAPTURE`` format.
+
+.. code-block:: c
+
+ struct ipu3_uapi_params {
+ /* Flags which of the settings below are to be applied */
+ struct ipu3_uapi_flags use;
+
+ /* Accelerator cluster parameters */
+ struct ipu3_uapi_acc_param acc_param;
+
+ /* ISP vector address space parameters */
+ struct ipu3_uapi_isp_lin_vmem_params lin_vmem_params;
+ struct ipu3_uapi_isp_tnr3_vmem_params tnr3_vmem_params;
+ struct ipu3_uapi_isp_xnr3_vmem_params xnr3_vmem_params;
+
+ /* ISP data memory (DMEM) parameters */
+ struct ipu3_uapi_isp_tnr3_params tnr3_dmem_params;
+ struct ipu3_uapi_isp_xnr3_params xnr3_dmem_params;
+
+ /* Optical black level compensation */
+ struct ipu3_uapi_obgrid_param obgrid_param;
+ };
+
+Intel IPU3 ImgU uAPI data types
+===============================
+
+.. kernel-doc:: drivers/staging/media/ipu3/include/intel-ipu3.h
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-uvc.rst b/Documentation/media/uapi/v4l/pixfmt-meta-uvc.rst
index b5165dc090c2..481e4e0e6e1d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-uvc.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-uvc.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-meta-fmt-uvc:
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst
index 67796594fd48..f7a861696281 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgo.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-meta-fmt-vsp1-hgo:
diff --git a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
index fb9f79466319..2ebccdcca95d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-meta-vsp1-hgt.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-meta-fmt-vsp1-hgt:
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12.rst b/Documentation/media/uapi/v4l/pixfmt-nv12.rst
index 2776b41377d5..b8c021b07fd2 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-NV12:
.. _V4L2-PIX-FMT-NV21:
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
index c1a2779f604c..9b2c5c21280a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12m.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-NV12M:
.. _v4l2-pix-fmt-nv12mt-16x16:
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst b/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
index 172a3825604e..2092725de33c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv12mt.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-NV12MT:
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16.rst b/Documentation/media/uapi/v4l/pixfmt-nv16.rst
index f0fdad3006cf..5ec4b7fa8f04 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv16.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-NV16:
.. _V4L2-PIX-FMT-NV61:
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
index c45f036763e7..4a63bcf18b70 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv16m.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-NV16M:
.. _v4l2-pix-fmt-nv61m:
diff --git a/Documentation/media/uapi/v4l/pixfmt-nv24.rst b/Documentation/media/uapi/v4l/pixfmt-nv24.rst
index bda973e86227..13fc6fe1a3d6 100644
--- a/Documentation/media/uapi/v4l/pixfmt-nv24.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-nv24.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-NV24:
.. _V4L2-PIX-FMT-NV42:
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
index 8edf65c80660..38b1895a509f 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-hsv.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _packed-hsv:
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
index 4938d9655a41..6b3781c04dd5 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-rgb.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _packed-rgb:
diff --git a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
index d7644b411ccc..f53e8f57a003 100644
--- a/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-packed-yuv.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _packed-yuv:
diff --git a/Documentation/media/uapi/v4l/pixfmt-reserved.rst b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
index 0c399858bda2..b2cd155e691b 100644
--- a/Documentation/media/uapi/v4l/pixfmt-reserved.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-reserved.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _pixfmt-reserved:
diff --git a/Documentation/media/uapi/v4l/pixfmt-rgb.rst b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
index 1f9a7e3a07c9..48ab80024835 100644
--- a/Documentation/media/uapi/v4l/pixfmt-rgb.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-rgb.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _pixfmt-rgb:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst
index 179894f6f8fb..e7a89fe7e117 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cs08.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-sdr-fmt-cs8:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst
index 5cf7d387447c..d10d56f0e63a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cs14le.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-SDR-FMT-CS14LE:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst
index fd915b7629b7..f37df90f5a21 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cu08.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-sdr-fmt-cu8:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst
index 8922f5b35457..237998fb5f9f 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-cu16le.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-SDR-FMT-CU16LE:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst
index 2de1b1a0f517..df078dcfd18d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu16be.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-SDR-FMT-PCU16BE:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst
index da8b26bf6b95..a1ea63db9230 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu18be.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-SDR-FMT-PCU18BE:
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst
index 5499eed39477..11a05ea60e26 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-pcu20be.rst
@@ -1,4 +1,12 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+
.. _V4L2-SDR-FMT-PCU20BE:
******************************
diff --git a/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst b/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst
index 5e383382802f..3c2c9f75fc5e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-sdr-ru12le.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-SDR-FMT-RU12LE:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst
index 99cde5077519..75279f0fdad8 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-pix-fmt-ipu3-sbggr10:
.. _v4l2-pix-fmt-ipu3-sgbrg10:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10.rst
index af2538ce34e5..cab7fbb1f2fe 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB10:
.. _v4l2-pix-fmt-sbggr10:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst
index c44e093514de..5bb58764b532 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10alaw8.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SBGGR10ALAW8:
.. _v4l2-pix-fmt-sgbrg10alaw8:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10dpcm8.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10dpcm8.rst
index 5e041d02eff0..cbc9c0a52ab4 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10dpcm8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10dpcm8.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SBGGR10DPCM8:
.. _v4l2-pix-fmt-sgbrg10dpcm8:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
index d9e07a4b8b31..cdb70ac26126 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb10p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB10P:
.. _v4l2-pix-fmt-sbggr10p:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
index 15041e568a0a..6fb6a937e6ad 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB12:
.. _v4l2-pix-fmt-sbggr12:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
index 59918a7913fe..01413be12916 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb12p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB12P:
.. _v4l2-pix-fmt-sbggr12p:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
index 88d20c0e4282..b583531c2853 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb14p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB14P:
.. _v4l2-pix-fmt-sbggr14p:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb16.rst b/Documentation/media/uapi/v4l/pixfmt-srggb16.rst
index d407b2b2050f..36527c49eaf7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb16.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB16:
.. _v4l2-pix-fmt-sbggr16:
diff --git a/Documentation/media/uapi/v4l/pixfmt-srggb8.rst b/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
index 5ac25a634d30..f5233c1e2314 100644
--- a/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-srggb8.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-SRGGB8:
.. _v4l2-pix-fmt-sbggr8:
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst b/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst
index 07834cd1249e..b7d3d6ccebc5 100644
--- a/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-td08.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-TCH-FMT-DELTA-TD08:
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst b/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
index 29ebcf40a989..4031b175257c 100644
--- a/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-td16.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-TCH-FMT-DELTA-TD16:
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst b/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst
index e7fb7ddd191b..2d447475aaa7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-tu08.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-TCH-FMT-TU08:
diff --git a/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst b/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
index 1588fcc3f1e7..8278543be99a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-tch-tu16.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-TCH-FMT-TU16:
diff --git a/Documentation/media/uapi/v4l/pixfmt-uv8.rst b/Documentation/media/uapi/v4l/pixfmt-uv8.rst
index c449231b51bb..6008c898305d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-uv8.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-uv8.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-UV8:
diff --git a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
index ecdc2d94c209..72da2639d37e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-uyvy.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-UYVY:
diff --git a/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
index ef52f637d8e9..7f82dad9013a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-v4l2-mplane.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
******************************
Multi-planar format structures
diff --git a/Documentation/media/uapi/v4l/pixfmt-v4l2.rst b/Documentation/media/uapi/v4l/pixfmt-v4l2.rst
index 826f2305da01..71eebfc6d853 100644
--- a/Documentation/media/uapi/v4l/pixfmt-v4l2.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-v4l2.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
******************************
Single-planar format structure
diff --git a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
index 670c339c1714..39b99707cd99 100644
--- a/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-vyuy.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-VYUY:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10.rst b/Documentation/media/uapi/v4l/pixfmt-y10.rst
index 89e22899cd81..63277686764a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y10.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y10.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y10:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10b.rst b/Documentation/media/uapi/v4l/pixfmt-y10b.rst
index 9feddf3ae07b..49c4dd432413 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y10b.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y10b.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y10BPACK:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y10p.rst b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
index 13b571306915..7893642faee3 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y10p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y10p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y10P:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y12.rst b/Documentation/media/uapi/v4l/pixfmt-y12.rst
index 0f230713290b..33a943b4996a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y12.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y12.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y12:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y12i.rst b/Documentation/media/uapi/v4l/pixfmt-y12i.rst
index bb39a2463564..1d4a14e1ec6e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y12i.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y12i.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y12I:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y16-be.rst b/Documentation/media/uapi/v4l/pixfmt-y16-be.rst
index 54ce35ef84b7..1e72bfe2d557 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y16-be.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y16-be.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y16-BE:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y16.rst b/Documentation/media/uapi/v4l/pixfmt-y16.rst
index bcbd52de3aca..f77d900db131 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y16.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y16:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y41p.rst b/Documentation/media/uapi/v4l/pixfmt-y41p.rst
index e1fe548807a4..829c68afd8d7 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y41p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y41p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y41P:
diff --git a/Documentation/media/uapi/v4l/pixfmt-y8i.rst b/Documentation/media/uapi/v4l/pixfmt-y8i.rst
index fd8ed23dd342..2c88ed90522d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-y8i.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-y8i.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Y8I:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
index b51a0d1c6108..ebb72a5c7ceb 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv410.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YVU410:
.. _v4l2-pix-fmt-yuv410:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
index 2582341972db..83ddaa3f8dfb 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv411p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YUV411P:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
index a9b85c4b1dbc..f4f6f792a23e 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv420.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YVU420:
.. _V4L2-PIX-FMT-YUV420:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
index 32c68c33f2b1..c29b30c6445a 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv420m.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YUV420M:
.. _v4l2-pix-fmt-yvu420m:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
index 9e7028c4967c..737fd94a9ae9 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv422m.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YUV422M:
.. _v4l2-pix-fmt-yvu422m:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
index a96f836c7fa5..7cebb6ebb621 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv422p.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YUV422P:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
index 8605bfaee112..8f14ca378816 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuv444m.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YUV444M:
.. _v4l2-pix-fmt-yvu444m:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
index 53e876d053fb..d86d7f086c41 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yuyv.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YUYV:
diff --git a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
index b9c31746e565..656a830fed02 100644
--- a/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-yvyu.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-YVYU:
diff --git a/Documentation/media/uapi/v4l/pixfmt-z16.rst b/Documentation/media/uapi/v4l/pixfmt-z16.rst
index eb713a9bccae..eccf235bf02d 100644
--- a/Documentation/media/uapi/v4l/pixfmt-z16.rst
+++ b/Documentation/media/uapi/v4l/pixfmt-z16.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _V4L2-PIX-FMT-Z16:
diff --git a/Documentation/media/uapi/v4l/pixfmt.rst b/Documentation/media/uapi/v4l/pixfmt.rst
index 2aa449e2da67..29be001796db 100644
--- a/Documentation/media/uapi/v4l/pixfmt.rst
+++ b/Documentation/media/uapi/v4l/pixfmt.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _pixfmt:
diff --git a/Documentation/media/uapi/v4l/planar-apis.rst b/Documentation/media/uapi/v4l/planar-apis.rst
index 4e059fb44153..a422dc9d592c 100644
--- a/Documentation/media/uapi/v4l/planar-apis.rst
+++ b/Documentation/media/uapi/v4l/planar-apis.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _planar-apis:
diff --git a/Documentation/media/uapi/v4l/querycap.rst b/Documentation/media/uapi/v4l/querycap.rst
index c19cce7a816f..8d01ef52f780 100644
--- a/Documentation/media/uapi/v4l/querycap.rst
+++ b/Documentation/media/uapi/v4l/querycap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _querycap:
diff --git a/Documentation/media/uapi/v4l/rw.rst b/Documentation/media/uapi/v4l/rw.rst
index 91596c0cc2f3..6e498fcf32c4 100644
--- a/Documentation/media/uapi/v4l/rw.rst
+++ b/Documentation/media/uapi/v4l/rw.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _rw:
diff --git a/Documentation/media/uapi/v4l/sdr-formats.rst b/Documentation/media/uapi/v4l/sdr-formats.rst
index 2037f5bad727..f452f5574ebb 100644
--- a/Documentation/media/uapi/v4l/sdr-formats.rst
+++ b/Documentation/media/uapi/v4l/sdr-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _sdr-formats:
diff --git a/Documentation/media/uapi/v4l/selection-api-configuration.rst b/Documentation/media/uapi/v4l/selection-api-configuration.rst
index 0a4ddc2d71db..6e0c98c37067 100644
--- a/Documentation/media/uapi/v4l/selection-api-configuration.rst
+++ b/Documentation/media/uapi/v4l/selection-api-configuration.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
*************
Configuration
diff --git a/Documentation/media/uapi/v4l/selection-api-examples.rst b/Documentation/media/uapi/v4l/selection-api-examples.rst
index 67e0e9aed9e8..bb288b06cc17 100644
--- a/Documentation/media/uapi/v4l/selection-api-examples.rst
+++ b/Documentation/media/uapi/v4l/selection-api-examples.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
********
Examples
diff --git a/Documentation/media/uapi/v4l/selection-api-intro.rst b/Documentation/media/uapi/v4l/selection-api-intro.rst
index 09ca93f91bf7..0faed02d0226 100644
--- a/Documentation/media/uapi/v4l/selection-api-intro.rst
+++ b/Documentation/media/uapi/v4l/selection-api-intro.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
************
Introduction
diff --git a/Documentation/media/uapi/v4l/selection-api-targets.rst b/Documentation/media/uapi/v4l/selection-api-targets.rst
index bf7e76dfbdf9..83d633bcbd6f 100644
--- a/Documentation/media/uapi/v4l/selection-api-targets.rst
+++ b/Documentation/media/uapi/v4l/selection-api-targets.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
*****************
Selection targets
diff --git a/Documentation/media/uapi/v4l/selection-api-vs-crop-api.rst b/Documentation/media/uapi/v4l/selection-api-vs-crop-api.rst
index e7455fb1e572..79b3abca341a 100644
--- a/Documentation/media/uapi/v4l/selection-api-vs-crop-api.rst
+++ b/Documentation/media/uapi/v4l/selection-api-vs-crop-api.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _selection-vs-crop:
diff --git a/Documentation/media/uapi/v4l/selection-api.rst b/Documentation/media/uapi/v4l/selection-api.rst
index 390233f704a3..5386004e87cf 100644
--- a/Documentation/media/uapi/v4l/selection-api.rst
+++ b/Documentation/media/uapi/v4l/selection-api.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _selection-api:
diff --git a/Documentation/media/uapi/v4l/selection.svg b/Documentation/media/uapi/v4l/selection.svg
index 911062bd2844..59d2bec9b278 100644
--- a/Documentation/media/uapi/v4l/selection.svg
+++ b/Documentation/media/uapi/v4l/selection.svg
@@ -1,4 +1,31 @@
<?xml version="1.0" encoding="UTF-8"?>
+<!--
+ This file is dual-licensed: you can use it either under the terms
+ of the GPL 2.0 or the GFDL 1.1+ license, at your option. Note that this
+ dual licensing only applies to this file, and not this project as a
+ whole.
+
+ a) This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation version 2 of
+ the License.
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Or, alternatively,
+
+ b) Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg enable-background="new" version="1" viewBox="0 0 4226.3 1686.8" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<pattern id="ig" xlink:href="#ka" patternTransform="matrix(5.4432 0 0 10.1 1722.4 161.06)"/>
diff --git a/Documentation/media/uapi/v4l/selections-common.rst b/Documentation/media/uapi/v4l/selections-common.rst
index 69dbce4e6e47..28b32db280f2 100644
--- a/Documentation/media/uapi/v4l/selections-common.rst
+++ b/Documentation/media/uapi/v4l/selections-common.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-selections-common:
diff --git a/Documentation/media/uapi/v4l/standard.rst b/Documentation/media/uapi/v4l/standard.rst
index 75a14895aed7..bf8959b72988 100644
--- a/Documentation/media/uapi/v4l/standard.rst
+++ b/Documentation/media/uapi/v4l/standard.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _standard:
diff --git a/Documentation/media/uapi/v4l/streaming-par.rst b/Documentation/media/uapi/v4l/streaming-par.rst
index f9b93c53f75c..425bd0ff1477 100644
--- a/Documentation/media/uapi/v4l/streaming-par.rst
+++ b/Documentation/media/uapi/v4l/streaming-par.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _streaming-par:
diff --git a/Documentation/media/uapi/v4l/subdev-formats.rst b/Documentation/media/uapi/v4l/subdev-formats.rst
index 8e73fcfc6900..ff4b2a972fd2 100644
--- a/Documentation/media/uapi/v4l/subdev-formats.rst
+++ b/Documentation/media/uapi/v4l/subdev-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-mbus-format:
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
index ee1df49f83e8..59321e09929d 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-crop.svg
@@ -1,4 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
index c10d222b9ea9..e739c54fbbfb 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-full.svg
@@ -1,4 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
diff --git a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
index 3cb68bf9fc04..401d1456958c 100644
--- a/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
+++ b/Documentation/media/uapi/v4l/subdev-image-processing-scaling-multi-source.svg
@@ -1,4 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
diff --git a/Documentation/media/uapi/v4l/tch-formats.rst b/Documentation/media/uapi/v4l/tch-formats.rst
index dbaabf33a5b8..429c1010149d 100644
--- a/Documentation/media/uapi/v4l/tch-formats.rst
+++ b/Documentation/media/uapi/v4l/tch-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _tch-formats:
diff --git a/Documentation/media/uapi/v4l/tuner.rst b/Documentation/media/uapi/v4l/tuner.rst
index ad117b068831..601dc535199c 100644
--- a/Documentation/media/uapi/v4l/tuner.rst
+++ b/Documentation/media/uapi/v4l/tuner.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _tuner:
@@ -31,7 +38,7 @@ current video or radio input is queried.
.. note::
:ref:`VIDIOC_S_TUNER <VIDIOC_G_TUNER>` does not switch the
- current tuner, when there is more than one at all. The tuner is solely
+ current tuner, when there is more than one. The tuner is solely
determined by the current video input. Drivers must support both ioctls
and set the ``V4L2_CAP_TUNER`` flag in the struct :c:type:`v4l2_capability`
returned by the :ref:`VIDIOC_QUERYCAP` ioctl when the
@@ -41,7 +48,7 @@ current video or radio input is queried.
Modulators
==========
-Video output devices can have one or more modulators, uh, modulating a
+Video output devices can have one or more modulators, that modulate a
video signal for radiation or connection to the antenna input of a TV
set or video recorder. Each modulator is associated with one or more
video outputs, depending on the number of RF connectors on the
diff --git a/Documentation/media/uapi/v4l/user-func.rst b/Documentation/media/uapi/v4l/user-func.rst
index 3e0413b83a33..ca0ef21d77fe 100644
--- a/Documentation/media/uapi/v4l/user-func.rst
+++ b/Documentation/media/uapi/v4l/user-func.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _user-func:
diff --git a/Documentation/media/uapi/v4l/userp.rst b/Documentation/media/uapi/v4l/userp.rst
index dc2893a60d65..b19da8655452 100644
--- a/Documentation/media/uapi/v4l/userp.rst
+++ b/Documentation/media/uapi/v4l/userp.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _userp:
@@ -62,9 +69,9 @@ memory pages at any time between the completion of the DMA and this
ioctl. The memory is also unlocked when
:ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` is called,
:ref:`VIDIOC_REQBUFS`, or when the device is closed.
-Applications must take care not to free buffers without dequeuing. For
-once, the buffers remain locked until further, wasting physical memory.
-Second the driver will not be notified when the memory is returned to
+Applications must take care not to free buffers without dequeuing.
+Firstly, the buffers remain locked for longer, wasting physical memory.
+Secondly the driver will not be notified when the memory is returned to
the application's free list and subsequently reused for other purposes,
possibly completing the requested DMA and overwriting valuable data.
@@ -90,7 +97,7 @@ To start and stop capturing or output applications call the
.. note::
- ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` removes all buffers from
+ :ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>` removes all buffers from
both queues and unlocks all buffers as a side effect. Since there is no
notion of doing anything "now" on a multitasking system, if an
application needs to synchronize with another event it should examine
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-flags.rst b/Documentation/media/uapi/v4l/v4l2-selection-flags.rst
index 1f9a03851d0f..cc8f2a2b7cba 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-flags.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-flags.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-selection-flags:
diff --git a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
index 87433ec76c6b..f74f239b0510 100644
--- a/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
+++ b/Documentation/media/uapi/v4l/v4l2-selection-targets.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2-selection-targets:
@@ -42,12 +49,7 @@ of the two interfaces they are used.
* - ``V4L2_SEL_TGT_NATIVE_SIZE``
- 0x0003
- The native size of the device, e.g. a sensor's pixel array.
- ``left`` and ``top`` fields are zero for this target. Setting the
- native size will generally only make sense for memory to memory
- devices where the software can create a canvas of a given size in
- which for example a video frame can be composed. In that case
- V4L2_SEL_TGT_NATIVE_SIZE can be used to configure the size of
- that canvas.
+ ``left`` and ``top`` fields are zero for this target.
- Yes
- Yes
* - ``V4L2_SEL_TGT_COMPOSE``
diff --git a/Documentation/media/uapi/v4l/v4l2.rst b/Documentation/media/uapi/v4l/v4l2.rst
index b89e5621ae69..004ec00db6bd 100644
--- a/Documentation/media/uapi/v4l/v4l2.rst
+++ b/Documentation/media/uapi/v4l/v4l2.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. include:: <isonum.txt>
.. _v4l2spec:
diff --git a/Documentation/media/uapi/v4l/v4l2grab-example.rst b/Documentation/media/uapi/v4l/v4l2grab-example.rst
index c240f0513bee..2a0cfd4429c1 100644
--- a/Documentation/media/uapi/v4l/v4l2grab-example.rst
+++ b/Documentation/media/uapi/v4l/v4l2grab-example.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _v4l2grab-example:
diff --git a/Documentation/media/uapi/v4l/v4l2grab.c.rst b/Documentation/media/uapi/v4l/v4l2grab.c.rst
index f0d0ab6abd41..e76c5fb7bd19 100644
--- a/Documentation/media/uapi/v4l/v4l2grab.c.rst
+++ b/Documentation/media/uapi/v4l/v4l2grab.c.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
file: media/v4l/v4l2grab.c
==========================
diff --git a/Documentation/media/uapi/v4l/vbi_525.svg b/Documentation/media/uapi/v4l/vbi_525.svg
index 643aec8d0ba2..6cd5def22b1f 100644
--- a/Documentation/media/uapi/v4l/vbi_525.svg
+++ b/Documentation/media/uapi/v4l/vbi_525.svg
@@ -1,6 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
@@ -810,4 +818,4 @@
sodipodi:role="line"
y="-3648.6809"
x="3528.1047 3569.0876 3610.0703 3651.0532 3671.5447 3692.0361 3708.3999 3749.3826 3765.7463">2nd field</tspan></text>
-</g></svg> \ No newline at end of file
+</g></svg>
diff --git a/Documentation/media/uapi/v4l/vbi_625.svg b/Documentation/media/uapi/v4l/vbi_625.svg
index 9b18243c0a06..7aaae5ec4878 100644
--- a/Documentation/media/uapi/v4l/vbi_625.svg
+++ b/Documentation/media/uapi/v4l/vbi_625.svg
@@ -1,6 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
@@ -859,4 +867,4 @@
y="-5054.106"
sodipodi:role="line"
id="tspan4129">24</tspan></text>
-</g></svg> \ No newline at end of file
+</g></svg>
diff --git a/Documentation/media/uapi/v4l/vbi_hsync.svg b/Documentation/media/uapi/v4l/vbi_hsync.svg
index e17ff8314e7b..f8e979ada7e3 100644
--- a/Documentation/media/uapi/v4l/vbi_hsync.svg
+++ b/Documentation/media/uapi/v4l/vbi_hsync.svg
@@ -1,6 +1,14 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Created with Inkscape (http://www.inkscape.org/) -->
+<!--
+ Permission is granted to copy, distribute and/or modify this
+ document under the terms of the GNU Free Documentation License,
+ Version 1.1 or any later version published by the Free Software
+ Foundation, with no Invariant Sections, no Front-Cover Texts
+ and no Back-Cover Texts. A copy of the license is included at
+ Documentation/media/uapi/fdl-appendix.rst.
+ TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
+-->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
@@ -310,4 +318,4 @@
sodipodi:role="line"
y="-395.66284"
x="438.29504 457.96585 469.55164 474.17761 479.97049 491.55627 497.34915 508.93494 520.52069 530.93958 542.52533">White Level</tspan></text>
-</g></svg> \ No newline at end of file
+</g></svg>
diff --git a/Documentation/media/uapi/v4l/video.rst b/Documentation/media/uapi/v4l/video.rst
index d2bc06b064ad..69603b5efbb5 100644
--- a/Documentation/media/uapi/v4l/video.rst
+++ b/Documentation/media/uapi/v4l/video.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _video:
@@ -7,7 +14,7 @@ Video Inputs and Outputs
************************
Video inputs and outputs are physical connectors of a device. These can
-be for example RF connectors (antenna/cable), CVBS a.k.a. Composite
+be for example: RF connectors (antenna/cable), CVBS a.k.a. Composite
Video, S-Video and RGB connectors. Camera sensors are also considered to
be a video input. Video and VBI capture devices have inputs. Video and
VBI output devices have outputs, at least one each. Radio devices have
@@ -19,7 +26,7 @@ outputs applications can enumerate them with the
:ref:`VIDIOC_ENUMOUTPUT` ioctl, respectively. The
struct :c:type:`v4l2_input` returned by the
:ref:`VIDIOC_ENUMINPUT` ioctl also contains signal
-:status information applicable when the current video input is queried.
+status information applicable when the current video input is queried.
The :ref:`VIDIOC_G_INPUT <VIDIOC_G_INPUT>` and
:ref:`VIDIOC_G_OUTPUT <VIDIOC_G_OUTPUT>` ioctls return the index of
diff --git a/Documentation/media/uapi/v4l/videodev.rst b/Documentation/media/uapi/v4l/videodev.rst
index b9ee4672d639..fa3d3398930a 100644
--- a/Documentation/media/uapi/v4l/videodev.rst
+++ b/Documentation/media/uapi/v4l/videodev.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _videodev:
diff --git a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
index eadf6f757fbf..bd08e4f77ae4 100644
--- a/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-create-bufs.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_CREATE_BUFS:
diff --git a/Documentation/media/uapi/v4l/vidioc-cropcap.rst b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
index 0a7b8287fd38..019d3d3a0e0d 100644
--- a/Documentation/media/uapi/v4l/vidioc-cropcap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-cropcap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_CROPCAP:
diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
index 7709852282c2..a1cf20181cf1 100644
--- a/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-chip-info.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_DBG_G_CHIP_INFO:
diff --git a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
index f4e8dd5f7889..29e1d4fc4f52 100644
--- a/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dbg-g-register.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_DBG_G_REGISTER:
diff --git a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
index 85c916b0ce07..ccf83b05afa7 100644
--- a/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-decoder-cmd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_DECODER_CMD:
diff --git a/Documentation/media/uapi/v4l/vidioc-dqevent.rst b/Documentation/media/uapi/v4l/vidioc-dqevent.rst
index 04416b6943c0..dea9c0cc00ab 100644
--- a/Documentation/media/uapi/v4l/vidioc-dqevent.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dqevent.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_DQEVENT:
diff --git a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
index 63ead6b7a115..e62d45d37072 100644
--- a/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-dv-timings-cap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_DV_TIMINGS_CAP:
diff --git a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
index 5ae8c933b1b9..c313ca8b8cb5 100644
--- a/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-encoder-cmd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENCODER_CMD:
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
index 63dca65f49e4..0b286e19b46b 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-dv-timings.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUM_DV_TIMINGS:
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
index 019c513df217..822d6730e7d2 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUM_FMT:
@@ -64,8 +71,12 @@ one until ``EINVAL`` is returned.
are valid here: ``V4L2_BUF_TYPE_VIDEO_CAPTURE``,
``V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE``,
``V4L2_BUF_TYPE_VIDEO_OUTPUT``,
- ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE`` and
- ``V4L2_BUF_TYPE_VIDEO_OVERLAY``. See :c:type:`v4l2_buf_type`.
+ ``V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE``,
+ ``V4L2_BUF_TYPE_VIDEO_OVERLAY``,
+ ``V4L2_BUF_TYPE_SDR_CAPTURE``,
+ ``V4L2_BUF_TYPE_SDR_OUTPUT`` and
+ ``V4L2_BUF_TYPE_META_CAPTURE``.
+ See :c:type:`v4l2_buf_type`.
* - __u32
- ``flags``
- See :ref:`fmtdesc-flags`
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
index fea7dc3c879d..2c69f26b165d 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-frameintervals.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUM_FRAMEINTERVALS:
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
index 6de117f163e0..cf31f548826f 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-framesizes.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUM_FRAMESIZES:
diff --git a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
index 195cf45f3c32..0e97c09afe0e 100644
--- a/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enum-freq-bands.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUM_FREQ_BANDS:
diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
index 8e5193e8696f..ee0c336c8721 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumaudio.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUMAUDIO:
diff --git a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
index 6d2b4f6e78b0..3a8882214d62 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumaudioout.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUMAUDOUT:
diff --git a/Documentation/media/uapi/v4l/vidioc-enuminput.rst b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
index 0350069a56c5..a0e4c4413121 100644
--- a/Documentation/media/uapi/v4l/vidioc-enuminput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enuminput.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUMINPUT:
diff --git a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
index 697dcd186ae3..0fea81f60541 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumoutput.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUMOUTPUT:
diff --git a/Documentation/media/uapi/v4l/vidioc-enumstd.rst b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
index 2644a62acd4b..1603b1b3b6e8 100644
--- a/Documentation/media/uapi/v4l/vidioc-enumstd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-enumstd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_ENUMSTD:
diff --git a/Documentation/media/uapi/v4l/vidioc-expbuf.rst b/Documentation/media/uapi/v4l/vidioc-expbuf.rst
index 226e83eb28a9..4bd8cd79754c 100644
--- a/Documentation/media/uapi/v4l/vidioc-expbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-expbuf.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_EXPBUF:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-audio.rst b/Documentation/media/uapi/v4l/vidioc-g-audio.rst
index 290851f99386..7af4fe478ba4 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-audio.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-audio.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_AUDIO:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
index 1c98af33ee70..c6ea0396a96a 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-audioout.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_AUDOUT:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
index b95ba6743cbd..1eff59dc5f35 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-crop.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_CROP:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
index 299b9aabbac2..8493b52adbb2 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ctrl.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_CTRL:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
index 35cba2c8d459..5712bd48e687 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-dv-timings.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_DV_TIMINGS:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-edid.rst b/Documentation/media/uapi/v4l/vidioc-g-edid.rst
index acab90f06e5a..e55b349a0c7e 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-edid.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-edid.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_EDID:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
index 9dfe64fc21a4..e285a1f14cdf 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-enc-index.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_ENC_INDEX:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
index d9930fe776cf..13dc1a986249 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-ext-ctrls.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_EXT_CTRLS:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
index fc73bf0f6052..7b6179627803 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fbuf.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_FBUF:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
index 9ea494a8faca..e35a9caff652 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_FMT:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
index c1cccb144660..cc30bae3dd6e 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-frequency.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_FREQUENCY:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-input.rst b/Documentation/media/uapi/v4l/vidioc-g-input.rst
index 1dcef44eef02..76b7d487466e 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-input.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-input.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_INPUT:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
index a1773ea9543e..5480277ab327 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-jpegcomp.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_JPEGCOMP:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
index a47b6a15cfbe..2c33a8bdcc47 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-modulator.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_MODULATOR:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-output.rst b/Documentation/media/uapi/v4l/vidioc-g-output.rst
index 3e0093f66834..69542d78977b 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-output.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-output.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_OUTPUT:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-parm.rst b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
index e831fa5512f0..0d2593176c90 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-parm.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-parm.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_PARM:
@@ -42,6 +49,9 @@ side. This is especially useful when using the :ref:`read() <func-read>` or
:ref:`write() <func-write>`, which are not augmented by timestamps or sequence
counters, and to avoid unnecessary data copying.
+Changing the frame interval shall never change the format. Changing the
+format, on the other hand, may change the frame interval.
+
Further these ioctls can be used to determine the number of buffers used
internally by a driver in read/write mode. For implications see the
section discussing the :ref:`read() <func-read>` function.
diff --git a/Documentation/media/uapi/v4l/vidioc-g-priority.rst b/Documentation/media/uapi/v4l/vidioc-g-priority.rst
index c28996b4a45c..244b4dbe9df3 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-priority.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-priority.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_PRIORITY:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
index f1d9df029e0d..7d8ef7ac8e27 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-selection.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_SELECTION:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
index a9633cae76c5..388b826d44b3 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-sliced-vbi-cap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_SLICED_VBI_CAP:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-std.rst b/Documentation/media/uapi/v4l/vidioc-g-std.rst
index 8d94f0404df2..e633e42e3910 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-std.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-std.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_STD:
diff --git a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
index acdd15901a51..82d23b8bd195 100644
--- a/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
+++ b/Documentation/media/uapi/v4l/vidioc-g-tuner.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_G_TUNER:
diff --git a/Documentation/media/uapi/v4l/vidioc-log-status.rst b/Documentation/media/uapi/v4l/vidioc-log-status.rst
index bbeb7b5f516b..16bb5509ad66 100644
--- a/Documentation/media/uapi/v4l/vidioc-log-status.rst
+++ b/Documentation/media/uapi/v4l/vidioc-log-status.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_LOG_STATUS:
diff --git a/Documentation/media/uapi/v4l/vidioc-overlay.rst b/Documentation/media/uapi/v4l/vidioc-overlay.rst
index 1383e3db25fc..fc5a86e8c1f2 100644
--- a/Documentation/media/uapi/v4l/vidioc-overlay.rst
+++ b/Documentation/media/uapi/v4l/vidioc-overlay.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_OVERLAY:
diff --git a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
index 49f9f4c181de..60986710967b 100644
--- a/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-prepare-buf.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_PREPARE_BUF:
diff --git a/Documentation/media/uapi/v4l/vidioc-qbuf.rst b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
index 753b3b5946b1..3259168a7358 100644
--- a/Documentation/media/uapi/v4l/vidioc-qbuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-qbuf.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_QBUF:
diff --git a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
index 6c82eafd28bb..e9b055395382 100644
--- a/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
+++ b/Documentation/media/uapi/v4l/vidioc-query-dv-timings.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_QUERY_DV_TIMINGS:
diff --git a/Documentation/media/uapi/v4l/vidioc-querybuf.rst b/Documentation/media/uapi/v4l/vidioc-querybuf.rst
index dd54747fabc9..7da60b24e8b6 100644
--- a/Documentation/media/uapi/v4l/vidioc-querybuf.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querybuf.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_QUERYBUF:
diff --git a/Documentation/media/uapi/v4l/vidioc-querycap.rst b/Documentation/media/uapi/v4l/vidioc-querycap.rst
index 66fb1b3d6e6e..5f9930195d62 100644
--- a/Documentation/media/uapi/v4l/vidioc-querycap.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querycap.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_QUERYCAP:
@@ -251,6 +258,9 @@ specification the ioctl returns an ``EINVAL`` error code.
* - ``V4L2_CAP_STREAMING``
- 0x04000000
- The device supports the :ref:`streaming <mmap>` I/O method.
+ * - ``V4L2_CAP_META_OUTPUT``
+ - 0x08000000
+ - The device supports the :ref:`metadata` output interface.
* - ``V4L2_CAP_TOUCH``
- 0x10000000
- This is a touch device.
diff --git a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
index 258f5813f281..f824162d0ea9 100644
--- a/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
+++ b/Documentation/media/uapi/v4l/vidioc-queryctrl.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_QUERYCTRL:
diff --git a/Documentation/media/uapi/v4l/vidioc-querystd.rst b/Documentation/media/uapi/v4l/vidioc-querystd.rst
index a8385cc74818..d8cf28274cfc 100644
--- a/Documentation/media/uapi/v4l/vidioc-querystd.rst
+++ b/Documentation/media/uapi/v4l/vidioc-querystd.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_QUERYSTD:
diff --git a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
index d4bbbb0c60e8..d7faef10e39b 100644
--- a/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
+++ b/Documentation/media/uapi/v4l/vidioc-reqbufs.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_REQBUFS:
@@ -59,9 +66,14 @@ When the I/O method is not supported the ioctl returns an ``EINVAL`` error
code.
Applications can call :ref:`VIDIOC_REQBUFS` again to change the number of
-buffers, however this cannot succeed when any buffers are still mapped.
-A ``count`` value of zero frees all buffers, after aborting or finishing
-any DMA in progress, an implicit
+buffers. Note that if any buffers are still mapped or exported via DMABUF,
+then :ref:`VIDIOC_REQBUFS` can only succeed if the
+``V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS`` capability is set. Otherwise
+:ref:`VIDIOC_REQBUFS` will return the ``EBUSY`` error code.
+If ``V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS`` is set, then these buffers are
+orphaned and will be freed when they are unmapped or when the exported DMABUF
+fds are closed. A ``count`` value of zero frees or orphans all buffers, after
+aborting or finishing any DMA in progress, an implicit
:ref:`VIDIOC_STREAMOFF <VIDIOC_STREAMON>`.
@@ -112,6 +124,7 @@ any DMA in progress, an implicit
.. _V4L2-BUF-CAP-SUPPORTS-USERPTR:
.. _V4L2-BUF-CAP-SUPPORTS-DMABUF:
.. _V4L2-BUF-CAP-SUPPORTS-REQUESTS:
+.. _V4L2-BUF-CAP-SUPPORTS-ORPHANED-BUFS:
.. cssclass:: longtable
@@ -132,6 +145,11 @@ any DMA in progress, an implicit
* - ``V4L2_BUF_CAP_SUPPORTS_REQUESTS``
- 0x00000008
- This buffer type supports :ref:`requests <media-request-api>`.
+ * - ``V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS``
+ - 0x00000010
+ - The kernel allows calling :ref:`VIDIOC_REQBUFS` while buffers are still
+ mapped or exported via DMABUF. These orphaned buffers will be freed
+ when they are unmapped or when the exported DMABUF fds are closed.
Return Value
============
diff --git a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
index b318cb8e1df3..4daec97651f2 100644
--- a/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
+++ b/Documentation/media/uapi/v4l/vidioc-s-hw-freq-seek.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_S_HW_FREQ_SEEK:
diff --git a/Documentation/media/uapi/v4l/vidioc-streamon.rst b/Documentation/media/uapi/v4l/vidioc-streamon.rst
index e851a6961b78..2b5528ec9f89 100644
--- a/Documentation/media/uapi/v4l/vidioc-streamon.rst
+++ b/Documentation/media/uapi/v4l/vidioc-streamon.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_STREAMON:
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
index 1bfe3865dcc2..6b4bf9ef5606 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-interval.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_ENUM_FRAME_INTERVAL:
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
index 33fdc3ac9316..253b128b194e 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-frame-size.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_ENUM_FRAME_SIZE:
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
index 4e4291798e4b..fefe4d7349ee 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-enum-mbus-code.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_ENUM_MBUS_CODE:
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
index 69b2ae8e7c15..632ee053accc 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-crop.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_G_CROP:
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
index 81c5d331af9a..472577bd1745 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-fmt.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_G_FMT:
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
index 5af0a7179941..4b1b4bc78bfe 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-frame-interval.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_G_FRAME_INTERVAL:
@@ -63,6 +70,9 @@ doesn't match the device capabilities. They must instead modify the
interval to match what the hardware can provide. The modified interval
should be as close as possible to the original request.
+Changing the frame interval shall never change the format. Changing the
+format, on the other hand, may change the frame interval.
+
Sub-devices that support the frame interval ioctls should implement them
on a single pad only. Their behaviour when supported on multiple pads of
the same sub-device is not defined.
diff --git a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
index b1d3dbbef42a..fc73d27e6d74 100644
--- a/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subdev-g-selection.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBDEV_G_SELECTION:
diff --git a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
index b521efa53ceb..a2d3454555ba 100644
--- a/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
+++ b/Documentation/media/uapi/v4l/vidioc-subscribe-event.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _VIDIOC_SUBSCRIBE_EVENT:
.. _VIDIOC_UNSUBSCRIBE_EVENT:
diff --git a/Documentation/media/uapi/v4l/yuv-formats.rst b/Documentation/media/uapi/v4l/yuv-formats.rst
index 9ab0592d08da..867470e5f9e1 100644
--- a/Documentation/media/uapi/v4l/yuv-formats.rst
+++ b/Documentation/media/uapi/v4l/yuv-formats.rst
@@ -1,4 +1,11 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. Permission is granted to copy, distribute and/or modify this
+.. document under the terms of the GNU Free Documentation License,
+.. Version 1.1 or any later version published by the Free Software
+.. Foundation, with no Invariant Sections, no Front-Cover Texts
+.. and no Back-Cover Texts. A copy of the license is included at
+.. Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GFDL-1.1-or-later WITH no-invariant-sections
.. _yuv-formats:
diff --git a/Documentation/media/v4l-drivers/au0828-cardlist.rst b/Documentation/media/v4l-drivers/au0828-cardlist.rst
index bb87b7b36a83..aaaadc934e7a 100644
--- a/Documentation/media/v4l-drivers/au0828-cardlist.rst
+++ b/Documentation/media/v4l-drivers/au0828-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
AU0828 cards list
=================
diff --git a/Documentation/media/v4l-drivers/bttv-cardlist.rst b/Documentation/media/v4l-drivers/bttv-cardlist.rst
index 8da27b924e01..f5806856b5a1 100644
--- a/Documentation/media/v4l-drivers/bttv-cardlist.rst
+++ b/Documentation/media/v4l-drivers/bttv-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
BTTV cards list
===============
diff --git a/Documentation/media/v4l-drivers/bttv.rst b/Documentation/media/v4l-drivers/bttv.rst
index 5f35e2fb5afa..d72a0f8fd267 100644
--- a/Documentation/media/v4l-drivers/bttv.rst
+++ b/Documentation/media/v4l-drivers/bttv.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The bttv driver
===============
diff --git a/Documentation/media/v4l-drivers/cafe_ccic.rst b/Documentation/media/v4l-drivers/cafe_ccic.rst
index 94f0f58ebe37..ff7fbce1342a 100644
--- a/Documentation/media/v4l-drivers/cafe_ccic.rst
+++ b/Documentation/media/v4l-drivers/cafe_ccic.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The cafe_ccic driver
====================
diff --git a/Documentation/media/v4l-drivers/cardlist.rst b/Documentation/media/v4l-drivers/cardlist.rst
index 8a0728d20684..14249f47fbc2 100644
--- a/Documentation/media/v4l-drivers/cardlist.rst
+++ b/Documentation/media/v4l-drivers/cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Cards List
==========
diff --git a/Documentation/media/v4l-drivers/cpia2.rst b/Documentation/media/v4l-drivers/cpia2.rst
index b5125016cfcb..a86baa1c83f1 100644
--- a/Documentation/media/v4l-drivers/cpia2.rst
+++ b/Documentation/media/v4l-drivers/cpia2.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The cpia2 driver
================
diff --git a/Documentation/media/v4l-drivers/cx18.rst b/Documentation/media/v4l-drivers/cx18.rst
index afa03f65b01c..16895a734bae 100644
--- a/Documentation/media/v4l-drivers/cx18.rst
+++ b/Documentation/media/v4l-drivers/cx18.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The cx18 driver
===============
diff --git a/Documentation/media/v4l-drivers/cx2341x.rst b/Documentation/media/v4l-drivers/cx2341x.rst
index e06d07ebdecd..8ca37deb56b6 100644
--- a/Documentation/media/v4l-drivers/cx2341x.rst
+++ b/Documentation/media/v4l-drivers/cx2341x.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The cx2341x driver
==================
diff --git a/Documentation/media/v4l-drivers/cx23885-cardlist.rst b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
index 8c24df8e0423..ddff8da98eeb 100644
--- a/Documentation/media/v4l-drivers/cx23885-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx23885-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
cx23885 cards list
==================
diff --git a/Documentation/media/v4l-drivers/cx88-cardlist.rst b/Documentation/media/v4l-drivers/cx88-cardlist.rst
index 21648b8c2e83..56ee08028106 100644
--- a/Documentation/media/v4l-drivers/cx88-cardlist.rst
+++ b/Documentation/media/v4l-drivers/cx88-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
CX88 cards list
===============
diff --git a/Documentation/media/v4l-drivers/cx88.rst b/Documentation/media/v4l-drivers/cx88.rst
index d8f3a014726a..698c73ea2e36 100644
--- a/Documentation/media/v4l-drivers/cx88.rst
+++ b/Documentation/media/v4l-drivers/cx88.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The cx88 driver
===============
diff --git a/Documentation/media/v4l-drivers/davinci-vpbe.rst b/Documentation/media/v4l-drivers/davinci-vpbe.rst
index b545fe001919..0fde433e5c71 100644
--- a/Documentation/media/v4l-drivers/davinci-vpbe.rst
+++ b/Documentation/media/v4l-drivers/davinci-vpbe.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The VPBE V4L2 driver design
===========================
diff --git a/Documentation/media/v4l-drivers/em28xx-cardlist.rst b/Documentation/media/v4l-drivers/em28xx-cardlist.rst
index dfe882ca945f..2956cbdc28e0 100644
--- a/Documentation/media/v4l-drivers/em28xx-cardlist.rst
+++ b/Documentation/media/v4l-drivers/em28xx-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
EM28xx cards list
=================
@@ -233,7 +235,7 @@ EM28xx cards list
- em2882
- eb1a:e323
* - 55
- - Terratec Cinnergy Hybrid T USB XS (em2882)
+ - Terratec Cinergy Hybrid T USB XS (em2882)
- em2882
- 0ccd:005e, 0ccd:0042
* - 56
diff --git a/Documentation/media/v4l-drivers/fimc.rst b/Documentation/media/v4l-drivers/fimc.rst
index 3adc19bcf039..74585ba48b7f 100644
--- a/Documentation/media/v4l-drivers/fimc.rst
+++ b/Documentation/media/v4l-drivers/fimc.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
The Samsung S5P/EXYNOS4 FIMC driver
diff --git a/Documentation/media/v4l-drivers/fourcc.rst b/Documentation/media/v4l-drivers/fourcc.rst
index 9c82106e8a26..d3482c40da62 100644
--- a/Documentation/media/v4l-drivers/fourcc.rst
+++ b/Documentation/media/v4l-drivers/fourcc.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Guidelines for Video4Linux pixel format 4CCs
============================================
diff --git a/Documentation/media/v4l-drivers/gspca-cardlist.rst b/Documentation/media/v4l-drivers/gspca-cardlist.rst
index e18d87e80d78..adda933616f1 100644
--- a/Documentation/media/v4l-drivers/gspca-cardlist.rst
+++ b/Documentation/media/v4l-drivers/gspca-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The gspca cards list
====================
diff --git a/Documentation/media/v4l-drivers/imx.rst b/Documentation/media/v4l-drivers/imx.rst
index 65d3d15eb159..6922dde4a82b 100644
--- a/Documentation/media/v4l-drivers/imx.rst
+++ b/Documentation/media/v4l-drivers/imx.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
i.MX Video Capture Driver
=========================
diff --git a/Documentation/media/v4l-drivers/index.rst b/Documentation/media/v4l-drivers/index.rst
index 679238e786a7..f28570ec9e42 100644
--- a/Documentation/media/v4l-drivers/index.rst
+++ b/Documentation/media/v4l-drivers/index.rst
@@ -1,4 +1,4 @@
-.. -*- coding: utf-8; mode: rst -*-
+.. SPDX-License-Identifier: GPL-2.0
.. include:: <isonum.txt>
@@ -44,6 +44,7 @@ For more details see the file COPYING in the source distribution of Linux.
davinci-vpbe
fimc
imx
+ ipu3
ivtv
max2175
meye
diff --git a/Documentation/media/v4l-drivers/ipu3.rst b/Documentation/media/v4l-drivers/ipu3.rst
new file mode 100644
index 000000000000..f89b51dafadd
--- /dev/null
+++ b/Documentation/media/v4l-drivers/ipu3.rst
@@ -0,0 +1,369 @@
+.. include:: <isonum.txt>
+
+===============================================================
+Intel Image Processing Unit 3 (IPU3) Imaging Unit (ImgU) driver
+===============================================================
+
+Copyright |copy| 2018 Intel Corporation
+
+Introduction
+============
+
+This file documents the Intel IPU3 (3rd generation Image Processing Unit)
+Imaging Unit drivers located under drivers/media/pci/intel/ipu3 (CIO2) as well
+as under drivers/staging/media/ipu3 (ImgU).
+
+The Intel IPU3 found in certain Kaby Lake (as well as certain Sky Lake)
+platforms (U/Y processor lines) is made up of two parts namely the Imaging Unit
+(ImgU) and the CIO2 device (MIPI CSI2 receiver).
+
+The CIO2 device receives the raw Bayer data from the sensors and outputs the
+frames in a format that is specific to the IPU3 (for consumption by the IPU3
+ImgU). The CIO2 driver is available as drivers/media/pci/intel/ipu3/ipu3-cio2*
+and is enabled through the CONFIG_VIDEO_IPU3_CIO2 config option.
+
+The Imaging Unit (ImgU) is responsible for processing images captured
+by the IPU3 CIO2 device. The ImgU driver sources can be found under
+drivers/staging/media/ipu3 directory. The driver is enabled through the
+CONFIG_VIDEO_IPU3_IMGU config option.
+
+The two driver modules are named ipu3_csi2 and ipu3_imgu, respectively.
+
+The drivers has been tested on Kaby Lake platforms (U/Y processor lines).
+
+Both of the drivers implement V4L2, Media Controller and V4L2 sub-device
+interfaces. The IPU3 CIO2 driver supports camera sensors connected to the CIO2
+MIPI CSI-2 interfaces through V4L2 sub-device sensor drivers.
+
+CIO2
+====
+
+The CIO2 is represented as a single V4L2 subdev, which provides a V4L2 subdev
+interface to the user space. There is a video node for each CSI-2 receiver,
+with a single media controller interface for the entire device.
+
+The CIO2 contains four independent capture channel, each with its own MIPI CSI-2
+receiver and DMA engine. Each channel is modelled as a V4L2 sub-device exposed
+to userspace as a V4L2 sub-device node and has two pads:
+
+.. tabularcolumns:: |p{0.8cm}|p{4.0cm}|p{4.0cm}|
+
+.. flat-table::
+
+ * - pad
+ - direction
+ - purpose
+
+ * - 0
+ - sink
+ - MIPI CSI-2 input, connected to the sensor subdev
+
+ * - 1
+ - source
+ - Raw video capture, connected to the V4L2 video interface
+
+The V4L2 video interfaces model the DMA engines. They are exposed to userspace
+as V4L2 video device nodes.
+
+Capturing frames in raw Bayer format
+------------------------------------
+
+CIO2 MIPI CSI2 receiver is used to capture frames (in packed raw Bayer format)
+from the raw sensors connected to the CSI2 ports. The captured frames are used
+as input to the ImgU driver.
+
+Image processing using IPU3 ImgU requires tools such as raw2pnm [#f1]_, and
+yavta [#f2]_ due to the following unique requirements and / or features specific
+to IPU3.
+
+-- The IPU3 CSI2 receiver outputs the captured frames from the sensor in packed
+raw Bayer format that is specific to IPU3.
+
+-- Multiple video nodes have to be operated simultaneously.
+
+Let us take the example of ov5670 sensor connected to CSI2 port 0, for a
+2592x1944 image capture.
+
+Using the media contorller APIs, the ov5670 sensor is configured to send
+frames in packed raw Bayer format to IPU3 CSI2 receiver.
+
+# This example assumes /dev/media0 as the CIO2 media device
+
+export MDEV=/dev/media0
+
+# and that ov5670 sensor is connected to i2c bus 10 with address 0x36
+
+export SDEV=$(media-ctl -d $MDEV -e "ov5670 10-0036")
+
+# Establish the link for the media devices using media-ctl [#f3]_
+media-ctl -d $MDEV -l "ov5670:0 -> ipu3-csi2 0:0[1]"
+
+# Set the format for the media devices
+media-ctl -d $MDEV -V "ov5670:0 [fmt:SGRBG10/2592x1944]"
+
+media-ctl -d $MDEV -V "ipu3-csi2 0:0 [fmt:SGRBG10/2592x1944]"
+
+media-ctl -d $MDEV -V "ipu3-csi2 0:1 [fmt:SGRBG10/2592x1944]"
+
+Once the media pipeline is configured, desired sensor specific settings
+(such as exposure and gain settings) can be set, using the yavta tool.
+
+e.g
+
+yavta -w 0x009e0903 444 $SDEV
+
+yavta -w 0x009e0913 1024 $SDEV
+
+yavta -w 0x009e0911 2046 $SDEV
+
+Once the desired sensor settings are set, frame captures can be done as below.
+
+e.g
+
+yavta --data-prefix -u -c10 -n5 -I -s2592x1944 --file=/tmp/frame-#.bin \
+ -f IPU3_SGRBG10 $(media-ctl -d $MDEV -e "ipu3-cio2 0")
+
+With the above command, 10 frames are captured at 2592x1944 resolution, with
+sGRBG10 format and output as IPU3_SGRBG10 format.
+
+The captured frames are available as /tmp/frame-#.bin files.
+
+ImgU
+====
+
+The ImgU is represented as two V4L2 subdevs, each of which provides a V4L2
+subdev interface to the user space.
+
+Each V4L2 subdev represents a pipe, which can support a maximum of 2 streams.
+This helps to support advanced camera features like Continuous View Finder (CVF)
+and Snapshot During Video(SDV).
+
+The ImgU contains two independent pipes, each modelled as a V4L2 sub-device
+exposed to userspace as a V4L2 sub-device node.
+
+Each pipe has two sink pads and three source pads for the following purpose:
+
+.. tabularcolumns:: |p{0.8cm}|p{4.0cm}|p{4.0cm}|
+
+.. flat-table::
+
+ * - pad
+ - direction
+ - purpose
+
+ * - 0
+ - sink
+ - Input raw video stream
+
+ * - 1
+ - sink
+ - Processing parameters
+
+ * - 2
+ - source
+ - Output processed video stream
+
+ * - 3
+ - source
+ - Output viewfinder video stream
+
+ * - 4
+ - source
+ - 3A statistics
+
+Each pad is connected to a corresponding V4L2 video interface, exposed to
+userspace as a V4L2 video device node.
+
+Device operation
+----------------
+
+With ImgU, once the input video node ("ipu3-imgu 0/1":0, in
+<entity>:<pad-number> format) is queued with buffer (in packed raw Bayer
+format), ImgU starts processing the buffer and produces the video output in YUV
+format and statistics output on respective output nodes. The driver is expected
+to have buffers ready for all of parameter, output and statistics nodes, when
+input video node is queued with buffer.
+
+At a minimum, all of input, main output, 3A statistics and viewfinder
+video nodes should be enabled for IPU3 to start image processing.
+
+Each ImgU V4L2 subdev has the following set of video nodes.
+
+input, output and viewfinder video nodes
+----------------------------------------
+
+The frames (in packed raw Bayer format specific to the IPU3) received by the
+input video node is processed by the IPU3 Imaging Unit and are output to 2 video
+nodes, with each targeting a different purpose (main output and viewfinder
+output).
+
+Details onand the Bayer format specific to the IPU3 can be found in
+:ref:`v4l2-pix-fmt-ipu3-sbggr10`.
+
+The driver supports V4L2 Video Capture Interface as defined at :ref:`devices`.
+
+Only the multi-planar API is supported. More details can be found at
+:ref:`planar-apis`.
+
+Parameters video node
+---------------------
+
+The parameters video node receives the ImgU algorithm parameters that are used
+to configure how the ImgU algorithms process the image.
+
+Details on processing parameters specific to the IPU3 can be found in
+:ref:`v4l2-meta-fmt-params`.
+
+3A statistics video node
+------------------------
+
+3A statistics video node is used by the ImgU driver to output the 3A (auto
+focus, auto exposure and auto white balance) statistics for the frames that are
+being processed by the ImgU to user space applications. User space applications
+can use this statistics data to compute the desired algorithm parameters for
+the ImgU.
+
+Configuring the Intel IPU3
+==========================
+
+The IPU3 ImgU pipelines can be configured using the Media Controller, defined at
+:ref:`media_controller`.
+
+Firmware binary selection
+-------------------------
+
+The firmware binary is selected using the V4L2_CID_INTEL_IPU3_MODE, currently
+defined in drivers/staging/media/ipu3/include/intel-ipu3.h . "VIDEO" and "STILL"
+modes are available.
+
+Processing the image in raw Bayer format
+----------------------------------------
+
+Configuring ImgU V4L2 subdev for image processing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The ImgU V4L2 subdevs have to be configured with media controller APIs to have
+all the video nodes setup correctly.
+
+Let us take "ipu3-imgu 0" subdev as an example.
+
+media-ctl -d $MDEV -r
+
+media-ctl -d $MDEV -l "ipu3-imgu 0 input":0 -> "ipu3-imgu 0":0[1]
+
+media-ctl -d $MDEV -l "ipu3-imgu 0":2 -> "ipu3-imgu 0 output":0[1]
+
+media-ctl -d $MDEV -l "ipu3-imgu 0":3 -> "ipu3-imgu 0 viewfinder":0[1]
+
+media-ctl -d $MDEV -l "ipu3-imgu 0":4 -> "ipu3-imgu 0 3a stat":0[1]
+
+Also the pipe mode of the corresponding V4L2 subdev should be set as desired
+(e.g 0 for video mode or 1 for still mode) through the control id 0x009819a1 as
+below.
+
+yavta -w "0x009819A1 1" /dev/v4l-subdev7
+
+RAW Bayer frames go through the following ImgU pipeline HW blocks to have the
+processed image output to the DDR memory.
+
+RAW Bayer frame -> Input Feeder -> Bayer Down Scaling (BDS) -> Geometric
+Distortion Correction (GDC) -> DDR
+
+The ImgU V4L2 subdev has to be configured with the supported resolutions in all
+the above HW blocks, for a given input resolution.
+
+For a given supported resolution for an input frame, the Input Feeder, Bayer
+Down Scaling and GDC blocks should be configured with the supported resolutions.
+This information can be obtained by looking at the following IPU3 ImgU
+configuration table.
+
+https://chromium.googlesource.com/chromiumos/overlays/board-overlays/+/master
+
+Under baseboard-poppy/media-libs/cros-camera-hal-configs-poppy/files/gcss
+directory, graph_settings_ov5670.xml can be used as an example.
+
+The following steps prepare the ImgU pipeline for the image processing.
+
+1. The ImgU V4L2 subdev data format should be set by using the
+VIDIOC_SUBDEV_S_FMT on pad 0, using the GDC width and height obtained above.
+
+2. The ImgU V4L2 subdev cropping should be set by using the
+VIDIOC_SUBDEV_S_SELECTION on pad 0, with V4L2_SEL_TGT_CROP as the target,
+using the input feeder height and width.
+
+3. The ImgU V4L2 subdev composing should be set by using the
+VIDIOC_SUBDEV_S_SELECTION on pad 0, with V4L2_SEL_TGT_COMPOSE as the target,
+using the BDS height and width.
+
+For the ov5670 example, for an input frame with a resolution of 2592x1944
+(which is input to the ImgU subdev pad 0), the corresponding resolutions
+for input feeder, BDS and GDC are 2592x1944, 2592x1944 and 2560x1920
+respectively.
+
+Once this is done, the received raw Bayer frames can be input to the ImgU
+V4L2 subdev as below, using the open source application v4l2n [#f1]_.
+
+For an image captured with 2592x1944 [#f4]_ resolution, with desired output
+resolution as 2560x1920 and viewfinder resolution as 2560x1920, the following
+v4l2n command can be used. This helps process the raw Bayer frames and produces
+the desired results for the main output image and the viewfinder output, in NV12
+format.
+
+v4l2n --pipe=4 --load=/tmp/frame-#.bin --open=/dev/video4
+--fmt=type:VIDEO_OUTPUT_MPLANE,width=2592,height=1944,pixelformat=0X47337069
+--reqbufs=type:VIDEO_OUTPUT_MPLANE,count:1 --pipe=1 --output=/tmp/frames.out
+--open=/dev/video5
+--fmt=type:VIDEO_CAPTURE_MPLANE,width=2560,height=1920,pixelformat=NV12
+--reqbufs=type:VIDEO_CAPTURE_MPLANE,count:1 --pipe=2 --output=/tmp/frames.vf
+--open=/dev/video6
+--fmt=type:VIDEO_CAPTURE_MPLANE,width=2560,height=1920,pixelformat=NV12
+--reqbufs=type:VIDEO_CAPTURE_MPLANE,count:1 --pipe=3 --open=/dev/video7
+--output=/tmp/frames.3A --fmt=type:META_CAPTURE,?
+--reqbufs=count:1,type:META_CAPTURE --pipe=1,2,3,4 --stream=5
+
+where /dev/video4, /dev/video5, /dev/video6 and /dev/video7 devices point to
+input, output, viewfinder and 3A statistics video nodes respectively.
+
+Converting the raw Bayer image into YUV domain
+----------------------------------------------
+
+The processed images after the above step, can be converted to YUV domain
+as below.
+
+Main output frames
+~~~~~~~~~~~~~~~~~~
+
+raw2pnm -x2560 -y1920 -fNV12 /tmp/frames.out /tmp/frames.out.ppm
+
+where 2560x1920 is output resolution, NV12 is the video format, followed
+by input frame and output PNM file.
+
+Viewfinder output frames
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+raw2pnm -x2560 -y1920 -fNV12 /tmp/frames.vf /tmp/frames.vf.ppm
+
+where 2560x1920 is output resolution, NV12 is the video format, followed
+by input frame and output PNM file.
+
+Example user space code for IPU3
+================================
+
+User space code that configures and uses IPU3 is available here.
+
+https://chromium.googlesource.com/chromiumos/platform/arc-camera/+/master/
+
+The source can be located under hal/intel directory.
+
+References
+==========
+
+.. [#f5] include/uapi/linux/intel-ipu3.h
+
+.. [#f1] https://github.com/intel/nvt
+
+.. [#f2] http://git.ideasonboard.org/yavta.git
+
+.. [#f3] http://git.ideasonboard.org/?p=media-ctl.git;a=summary
+
+.. [#f4] ImgU limitation requires an additional 16x16 for all input resolutions
diff --git a/Documentation/media/v4l-drivers/ivtv-cardlist.rst b/Documentation/media/v4l-drivers/ivtv-cardlist.rst
index 022dca80c2c8..c34a9ebc9ac2 100644
--- a/Documentation/media/v4l-drivers/ivtv-cardlist.rst
+++ b/Documentation/media/v4l-drivers/ivtv-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
IVTV cards list
===============
diff --git a/Documentation/media/v4l-drivers/ivtv.rst b/Documentation/media/v4l-drivers/ivtv.rst
index 3ba464c4f9bf..7b8775d20214 100644
--- a/Documentation/media/v4l-drivers/ivtv.rst
+++ b/Documentation/media/v4l-drivers/ivtv.rst
@@ -1,3 +1,4 @@
+.. SPDX-License-Identifier: GPL-2.0
The ivtv driver
===============
diff --git a/Documentation/media/v4l-drivers/max2175.rst b/Documentation/media/v4l-drivers/max2175.rst
index b1a4c89fd869..a5e35059d98d 100644
--- a/Documentation/media/v4l-drivers/max2175.rst
+++ b/Documentation/media/v4l-drivers/max2175.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Maxim Integrated MAX2175 RF to bits tuner driver
================================================
diff --git a/Documentation/media/v4l-drivers/meye.rst b/Documentation/media/v4l-drivers/meye.rst
index cfaba6021850..a572996cdbf6 100644
--- a/Documentation/media/v4l-drivers/meye.rst
+++ b/Documentation/media/v4l-drivers/meye.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
Vaio Picturebook Motion Eye Camera Driver
diff --git a/Documentation/media/v4l-drivers/omap3isp.rst b/Documentation/media/v4l-drivers/omap3isp.rst
index 336e58feaee2..8974c444e3a1 100644
--- a/Documentation/media/v4l-drivers/omap3isp.rst
+++ b/Documentation/media/v4l-drivers/omap3isp.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
OMAP 3 Image Signal Processor (ISP) driver
diff --git a/Documentation/media/v4l-drivers/omap4_camera.rst b/Documentation/media/v4l-drivers/omap4_camera.rst
index 54b427b28e5f..24db4222d36d 100644
--- a/Documentation/media/v4l-drivers/omap4_camera.rst
+++ b/Documentation/media/v4l-drivers/omap4_camera.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
OMAP4 ISS Driver
================
diff --git a/Documentation/media/v4l-drivers/philips.rst b/Documentation/media/v4l-drivers/philips.rst
index 4f68947e6a13..e2840be10d08 100644
--- a/Documentation/media/v4l-drivers/philips.rst
+++ b/Documentation/media/v4l-drivers/philips.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Philips webcams (pwc driver)
============================
diff --git a/Documentation/media/v4l-drivers/pvrusb2.rst b/Documentation/media/v4l-drivers/pvrusb2.rst
index dc0e72d94b1a..83bfaa531ea8 100644
--- a/Documentation/media/v4l-drivers/pvrusb2.rst
+++ b/Documentation/media/v4l-drivers/pvrusb2.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The pvrusb2 driver
==================
diff --git a/Documentation/media/v4l-drivers/pxa_camera.rst b/Documentation/media/v4l-drivers/pxa_camera.rst
index 554f91b04e70..e4fbca755e1a 100644
--- a/Documentation/media/v4l-drivers/pxa_camera.rst
+++ b/Documentation/media/v4l-drivers/pxa_camera.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
PXA-Camera Host Driver
======================
diff --git a/Documentation/media/v4l-drivers/qcom_camss.rst b/Documentation/media/v4l-drivers/qcom_camss.rst
index f27c8df20b2b..6b15385b12b3 100644
--- a/Documentation/media/v4l-drivers/qcom_camss.rst
+++ b/Documentation/media/v4l-drivers/qcom_camss.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
Qualcomm Camera Subsystem driver
diff --git a/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
index de34f0a7afdc..7ed243b41b67 100644
--- a/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
+++ b/Documentation/media/v4l-drivers/qcom_camss_8x96_graph.dot
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
digraph board {
rankdir=TB
n00000001 [label="{{<port0> 0} | msm_csiphy0\n/dev/v4l-subdev0 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
diff --git a/Documentation/media/v4l-drivers/qcom_camss_graph.dot b/Documentation/media/v4l-drivers/qcom_camss_graph.dot
index 827fc7112c1e..ef7dca92fd0b 100644
--- a/Documentation/media/v4l-drivers/qcom_camss_graph.dot
+++ b/Documentation/media/v4l-drivers/qcom_camss_graph.dot
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
digraph board {
rankdir=TB
n00000001 [label="{{<port0> 0} | msm_csiphy0\n/dev/v4l-subdev0 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
diff --git a/Documentation/media/v4l-drivers/radiotrack.rst b/Documentation/media/v4l-drivers/radiotrack.rst
index 2f6325ebfd16..a85cb6205db8 100644
--- a/Documentation/media/v4l-drivers/radiotrack.rst
+++ b/Documentation/media/v4l-drivers/radiotrack.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The Radiotrack radio driver
===========================
diff --git a/Documentation/media/v4l-drivers/rcar-fdp1.rst b/Documentation/media/v4l-drivers/rcar-fdp1.rst
index a59b1e8e3e9c..88b0edcf9046 100644
--- a/Documentation/media/v4l-drivers/rcar-fdp1.rst
+++ b/Documentation/media/v4l-drivers/rcar-fdp1.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Renesas R-Car Fine Display Processor (FDP1) Driver
==================================================
diff --git a/Documentation/media/v4l-drivers/saa7134-cardlist.rst b/Documentation/media/v4l-drivers/saa7134-cardlist.rst
index 6e4c35cbaabf..afb0e2fb52b0 100644
--- a/Documentation/media/v4l-drivers/saa7134-cardlist.rst
+++ b/Documentation/media/v4l-drivers/saa7134-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
SAA7134 cards list
==================
diff --git a/Documentation/media/v4l-drivers/saa7134.rst b/Documentation/media/v4l-drivers/saa7134.rst
index 36b2ee9e0fdc..15d06facdbc1 100644
--- a/Documentation/media/v4l-drivers/saa7134.rst
+++ b/Documentation/media/v4l-drivers/saa7134.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The saa7134 driver
==================
diff --git a/Documentation/media/v4l-drivers/saa7164-cardlist.rst b/Documentation/media/v4l-drivers/saa7164-cardlist.rst
index e28382ba82e6..e8f36e084537 100644
--- a/Documentation/media/v4l-drivers/saa7164-cardlist.rst
+++ b/Documentation/media/v4l-drivers/saa7164-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
SAA7164 cards list
==================
diff --git a/Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst b/Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst
index e40ffea7708c..822fcb8368ae 100644
--- a/Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst
+++ b/Documentation/media/v4l-drivers/sh_mobile_ceu_camera.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Cropping and Scaling algorithm, used in the sh_mobile_ceu_camera driver
=======================================================================
@@ -114,7 +116,7 @@ window:
S_CROP
------
-The API at http://v4l2spec.bytesex.org/spec/x1904.htm says:
+The :ref:`V4L2 crop API <crop-scale>` says:
"...specification does not define an origin or units. However by convention
drivers should horizontally count unscaled samples relative to 0H."
diff --git a/Documentation/media/v4l-drivers/si470x.rst b/Documentation/media/v4l-drivers/si470x.rst
index 955d8ca159fe..d53bf5f95200 100644
--- a/Documentation/media/v4l-drivers/si470x.rst
+++ b/Documentation/media/v4l-drivers/si470x.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
The Silicon Labs Si470x FM Radio Receivers driver
diff --git a/Documentation/media/v4l-drivers/si4713.rst b/Documentation/media/v4l-drivers/si4713.rst
index 3022e7cfe9a8..be8e6b49b7b4 100644
--- a/Documentation/media/v4l-drivers/si4713.rst
+++ b/Documentation/media/v4l-drivers/si4713.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
The Silicon Labs Si4713 FM Radio Transmitter Driver
diff --git a/Documentation/media/v4l-drivers/si476x.rst b/Documentation/media/v4l-drivers/si476x.rst
index 677512566f15..87062301d6a1 100644
--- a/Documentation/media/v4l-drivers/si476x.rst
+++ b/Documentation/media/v4l-drivers/si476x.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
.. include:: <isonum.txt>
diff --git a/Documentation/media/v4l-drivers/soc-camera.rst b/Documentation/media/v4l-drivers/soc-camera.rst
index 79d09e423700..7c39711aebf8 100644
--- a/Documentation/media/v4l-drivers/soc-camera.rst
+++ b/Documentation/media/v4l-drivers/soc-camera.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The Soc-Camera Drivers
======================
diff --git a/Documentation/media/v4l-drivers/tm6000-cardlist.rst b/Documentation/media/v4l-drivers/tm6000-cardlist.rst
index 6bd083544457..6d2769c0f4d8 100644
--- a/Documentation/media/v4l-drivers/tm6000-cardlist.rst
+++ b/Documentation/media/v4l-drivers/tm6000-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
TM6000 cards list
=================
diff --git a/Documentation/media/v4l-drivers/tuner-cardlist.rst b/Documentation/media/v4l-drivers/tuner-cardlist.rst
index 276dd90e0c59..362617c59c5d 100644
--- a/Documentation/media/v4l-drivers/tuner-cardlist.rst
+++ b/Documentation/media/v4l-drivers/tuner-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Tuner cards list
================
diff --git a/Documentation/media/v4l-drivers/tuners.rst b/Documentation/media/v4l-drivers/tuners.rst
index c3e8a1cf64a6..7509be888909 100644
--- a/Documentation/media/v4l-drivers/tuners.rst
+++ b/Documentation/media/v4l-drivers/tuners.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Tuner drivers
=============
diff --git a/Documentation/media/v4l-drivers/usbvision-cardlist.rst b/Documentation/media/v4l-drivers/usbvision-cardlist.rst
index 5a8ffbfc204e..6aee115ee6e2 100644
--- a/Documentation/media/v4l-drivers/usbvision-cardlist.rst
+++ b/Documentation/media/v4l-drivers/usbvision-cardlist.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
USBvision cards list
====================
diff --git a/Documentation/media/v4l-drivers/uvcvideo.rst b/Documentation/media/v4l-drivers/uvcvideo.rst
index d68b3d59a4b5..e5fd8fad333c 100644
--- a/Documentation/media/v4l-drivers/uvcvideo.rst
+++ b/Documentation/media/v4l-drivers/uvcvideo.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The Linux USB Video Class (UVC) driver
======================================
diff --git a/Documentation/media/v4l-drivers/v4l-with-ir.rst b/Documentation/media/v4l-drivers/v4l-with-ir.rst
index 613e1e79fc96..ce23c8a7bc93 100644
--- a/Documentation/media/v4l-drivers/v4l-with-ir.rst
+++ b/Documentation/media/v4l-drivers/v4l-with-ir.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Infrared remote control support in video4linux drivers
======================================================
diff --git a/Documentation/media/v4l-drivers/vivid.rst b/Documentation/media/v4l-drivers/vivid.rst
index 089595ce11c5..edb6f33e029c 100644
--- a/Documentation/media/v4l-drivers/vivid.rst
+++ b/Documentation/media/v4l-drivers/vivid.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The Virtual Video Test Driver (vivid)
=====================================
diff --git a/Documentation/media/v4l-drivers/zoran.rst b/Documentation/media/v4l-drivers/zoran.rst
index c3a0f7bc2c7b..d2724a863d1d 100644
--- a/Documentation/media/v4l-drivers/zoran.rst
+++ b/Documentation/media/v4l-drivers/zoran.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
The Zoran driver
================
diff --git a/Documentation/media/v4l-drivers/zr364xx.rst b/Documentation/media/v4l-drivers/zr364xx.rst
index 3d193f01d8bb..ec8acb3e98fc 100644
--- a/Documentation/media/v4l-drivers/zr364xx.rst
+++ b/Documentation/media/v4l-drivers/zr364xx.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
Zoran 364xx based USB webcam module
===================================
diff --git a/Documentation/media/video.h.rst.exceptions b/Documentation/media/video.h.rst.exceptions
index 371cdbd7d062..ea9de59ad8b7 100644
--- a/Documentation/media/video.h.rst.exceptions
+++ b/Documentation/media/video.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _UAPI_DVBVIDEO_H_
diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions
index 1ec425a7c364..64d348e67df9 100644
--- a/Documentation/media/videodev2.h.rst.exceptions
+++ b/Documentation/media/videodev2.h.rst.exceptions
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
# Ignore header name
ignore define _UAPI__LINUX_VIDEODEV2_H
@@ -28,6 +30,7 @@ replace symbol V4L2_FIELD_TOP :c:type:`v4l2_field`
# Documented enum v4l2_buf_type
replace symbol V4L2_BUF_TYPE_META_CAPTURE :c:type:`v4l2_buf_type`
+replace symbol V4L2_BUF_TYPE_META_OUTPUT :c:type:`v4l2_buf_type`
replace symbol V4L2_BUF_TYPE_SDR_CAPTURE :c:type:`v4l2_buf_type`
replace symbol V4L2_BUF_TYPE_SDR_OUTPUT :c:type:`v4l2_buf_type`
replace symbol V4L2_BUF_TYPE_SLICED_VBI_CAPTURE :c:type:`v4l2_buf_type`
@@ -161,6 +164,7 @@ replace define V4L2_CAP_META_CAPTURE device-capabilities
replace define V4L2_CAP_READWRITE device-capabilities
replace define V4L2_CAP_ASYNCIO device-capabilities
replace define V4L2_CAP_STREAMING device-capabilities
+replace define V4L2_CAP_META_OUTPUT device-capabilities
replace define V4L2_CAP_DEVICE_CAPS device-capabilities
replace define V4L2_CAP_TOUCH device-capabilities
diff --git a/Documentation/networking/3c509.txt b/Documentation/networking/device_drivers/3com/3c509.txt
index fbf722e15ac3..fbf722e15ac3 100644
--- a/Documentation/networking/3c509.txt
+++ b/Documentation/networking/device_drivers/3com/3c509.txt
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/device_drivers/3com/vortex.txt
index ad3dead052a4..587f3fcfbcae 100644
--- a/Documentation/networking/vortex.txt
+++ b/Documentation/networking/device_drivers/3com/vortex.txt
@@ -1,4 +1,4 @@
-Documentation/networking/vortex.txt
+Documentation/networking/device_drivers/3com/vortex.txt
Andrew Morton
30 April 2000
diff --git a/Documentation/networking/ena.txt b/Documentation/networking/device_drivers/amazon/ena.txt
index 2b4b6f57e549..2b4b6f57e549 100644
--- a/Documentation/networking/ena.txt
+++ b/Documentation/networking/device_drivers/amazon/ena.txt
diff --git a/Documentation/networking/cxgb.txt b/Documentation/networking/device_drivers/chelsio/cxgb.txt
index 20a887615c4a..20a887615c4a 100644
--- a/Documentation/networking/cxgb.txt
+++ b/Documentation/networking/device_drivers/chelsio/cxgb.txt
diff --git a/Documentation/networking/cs89x0.txt b/Documentation/networking/device_drivers/cirrus/cs89x0.txt
index 0e190180eec8..0e190180eec8 100644
--- a/Documentation/networking/cs89x0.txt
+++ b/Documentation/networking/device_drivers/cirrus/cs89x0.txt
diff --git a/Documentation/networking/dm9000.txt b/Documentation/networking/device_drivers/davicom/dm9000.txt
index 5552e2e575c5..5552e2e575c5 100644
--- a/Documentation/networking/dm9000.txt
+++ b/Documentation/networking/device_drivers/davicom/dm9000.txt
diff --git a/Documentation/networking/de4x5.txt b/Documentation/networking/device_drivers/dec/de4x5.txt
index c8e4ca9b2c3e..c8e4ca9b2c3e 100644
--- a/Documentation/networking/de4x5.txt
+++ b/Documentation/networking/device_drivers/dec/de4x5.txt
diff --git a/Documentation/networking/dmfe.txt b/Documentation/networking/device_drivers/dec/dmfe.txt
index 25320bf19c86..25320bf19c86 100644
--- a/Documentation/networking/dmfe.txt
+++ b/Documentation/networking/device_drivers/dec/dmfe.txt
diff --git a/Documentation/networking/dl2k.txt b/Documentation/networking/device_drivers/dlink/dl2k.txt
index cba74f7a3abc..cba74f7a3abc 100644
--- a/Documentation/networking/dl2k.txt
+++ b/Documentation/networking/device_drivers/dlink/dl2k.txt
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/device_drivers/freescale/dpaa.txt
index f88194f71c54..f88194f71c54 100644
--- a/Documentation/networking/dpaa.txt
+++ b/Documentation/networking/device_drivers/freescale/dpaa.txt
diff --git a/Documentation/networking/dpaa2/dpio-driver.rst b/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst
index 13588104161b..a188466b6698 100644
--- a/Documentation/networking/dpaa2/dpio-driver.rst
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst
@@ -19,8 +19,8 @@ pool management for network interfaces.
This document provides an overview the Linux DPIO driver, its
subcomponents, and its APIs.
-See Documentation/networking/dpaa2/overview.rst for a general overview of DPAA2
-and the general DPAA2 driver architecture in Linux.
+See Documentation/networking/device_drivers/freescale/dpaa2/overview.rst for
+a general overview of DPAA2 and the general DPAA2 driver architecture in Linux.
Driver Overview
---------------
diff --git a/Documentation/networking/dpaa2/ethernet-driver.rst b/Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst
index 90ec940749e8..cb4c9a0c5a17 100644
--- a/Documentation/networking/dpaa2/ethernet-driver.rst
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst
@@ -33,7 +33,7 @@ hardware resources, like queues, do not have a corresponding MC object and
are treated as internal resources of other objects.
For a more detailed description of the DPAA2 architecture and its object
-abstractions see *Documentation/networking/dpaa2/overview.rst*.
+abstractions see *Documentation/networking/device_drivers/freescale/dpaa2/overview.rst*.
Each Linux net device is built on top of a Datapath Network Interface (DPNI)
object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
diff --git a/Documentation/networking/dpaa2/index.rst b/Documentation/networking/device_drivers/freescale/dpaa2/index.rst
index 67bd87fe6c53..67bd87fe6c53 100644
--- a/Documentation/networking/dpaa2/index.rst
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/index.rst
diff --git a/Documentation/networking/dpaa2/overview.rst b/Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
index d638b5a8aadd..d638b5a8aadd 100644
--- a/Documentation/networking/dpaa2/overview.rst
+++ b/Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
diff --git a/Documentation/networking/gianfar.txt b/Documentation/networking/device_drivers/freescale/gianfar.txt
index ba1daea7f2e4..ba1daea7f2e4 100644
--- a/Documentation/networking/gianfar.txt
+++ b/Documentation/networking/device_drivers/freescale/gianfar.txt
diff --git a/Documentation/networking/e100.rst b/Documentation/networking/device_drivers/intel/e100.rst
index 5e2839b4ec92..5e2839b4ec92 100644
--- a/Documentation/networking/e100.rst
+++ b/Documentation/networking/device_drivers/intel/e100.rst
diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/device_drivers/intel/e1000.rst
index 6379d4d20771..6379d4d20771 100644
--- a/Documentation/networking/e1000.rst
+++ b/Documentation/networking/device_drivers/intel/e1000.rst
diff --git a/Documentation/networking/e1000e.rst b/Documentation/networking/device_drivers/intel/e1000e.rst
index 33554e5416c5..33554e5416c5 100644
--- a/Documentation/networking/e1000e.rst
+++ b/Documentation/networking/device_drivers/intel/e1000e.rst
diff --git a/Documentation/networking/fm10k.rst b/Documentation/networking/device_drivers/intel/fm10k.rst
index bf5e5942f28d..bf5e5942f28d 100644
--- a/Documentation/networking/fm10k.rst
+++ b/Documentation/networking/device_drivers/intel/fm10k.rst
diff --git a/Documentation/networking/i40e.rst b/Documentation/networking/device_drivers/intel/i40e.rst
index 0cc16c525d10..0cc16c525d10 100644
--- a/Documentation/networking/i40e.rst
+++ b/Documentation/networking/device_drivers/intel/i40e.rst
diff --git a/Documentation/networking/iavf.rst b/Documentation/networking/device_drivers/intel/iavf.rst
index f8b42b64eb28..f8b42b64eb28 100644
--- a/Documentation/networking/iavf.rst
+++ b/Documentation/networking/device_drivers/intel/iavf.rst
diff --git a/Documentation/networking/ice.rst b/Documentation/networking/device_drivers/intel/ice.rst
index 4d118b827bbb..4d118b827bbb 100644
--- a/Documentation/networking/ice.rst
+++ b/Documentation/networking/device_drivers/intel/ice.rst
diff --git a/Documentation/networking/igb.rst b/Documentation/networking/device_drivers/intel/igb.rst
index ba16b86d5593..e87a4a72ea2d 100644
--- a/Documentation/networking/igb.rst
+++ b/Documentation/networking/device_drivers/intel/igb.rst
@@ -177,6 +177,25 @@ rate limit using the IProute2 tool. Download the latest version of the
IProute2 tool from Sourceforge if your version does not have all the features
you require.
+Credit Based Shaper (Qav Mode)
+------------------------------
+When enabling the CBS qdisc in the hardware offload mode, traffic shaping using
+the CBS (described in the IEEE 802.1Q-2018 Section 8.6.8.2 and discussed in the
+Annex L) algorithm will run in the i210 controller, so it's more accurate and
+uses less CPU.
+
+When using offloaded CBS, and the traffic rate obeys the configured rate
+(doesn't go above it), CBS should have little to no effect in the latency.
+
+The offloaded version of the algorithm has some limits, caused by how the idle
+slope is expressed in the adapter's registers. It can only represent idle slopes
+in 16.38431 kbps units, which means that if a idle slope of 2576kbps is
+requested, the controller will be configured to use a idle slope of ~2589 kbps,
+because the driver rounds the value up. For more details, see the comments on
+:c:func:`igb_config_tx_modes()`.
+
+NOTE: This feature is exclusive to i210 models.
+
Support
=======
diff --git a/Documentation/networking/igbvf.rst b/Documentation/networking/device_drivers/intel/igbvf.rst
index a8a9ffa4f8d3..a8a9ffa4f8d3 100644
--- a/Documentation/networking/igbvf.rst
+++ b/Documentation/networking/device_drivers/intel/igbvf.rst
diff --git a/Documentation/networking/README.ipw2100 b/Documentation/networking/device_drivers/intel/ipw2100.txt
index 6f85e1d06031..6f85e1d06031 100644
--- a/Documentation/networking/README.ipw2100
+++ b/Documentation/networking/device_drivers/intel/ipw2100.txt
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/device_drivers/intel/ipw2200.txt
index b7658bed4906..b7658bed4906 100644
--- a/Documentation/networking/README.ipw2200
+++ b/Documentation/networking/device_drivers/intel/ipw2200.txt
diff --git a/Documentation/networking/ixgb.rst b/Documentation/networking/device_drivers/intel/ixgb.rst
index 8bd80e27843d..8bd80e27843d 100644
--- a/Documentation/networking/ixgb.rst
+++ b/Documentation/networking/device_drivers/intel/ixgb.rst
diff --git a/Documentation/networking/ixgbe.rst b/Documentation/networking/device_drivers/intel/ixgbe.rst
index 725fc697fd8f..86d887a63606 100644
--- a/Documentation/networking/ixgbe.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbe.rst
@@ -501,6 +501,19 @@ NOTE: This feature can be disabled for a specific Virtual Function (VF)::
ip link set <pf dev> vf <vf id> spoofchk {off|on}
+IPsec Offload
+-------------
+The ixgbe driver supports IPsec Hardware Offload. When creating Security
+Associations with "ip xfrm ..." the 'offload' tag option can be used to
+register the IPsec SA with the driver in order to get higher throughput in
+the secure communications.
+
+The offload is also supported for ixgbe's VFs, but the VF must be set as
+'trusted' and the support must be enabled with::
+
+ ethtool --set-priv-flags eth<x> vf-ipsec on
+ ip link set eth<x> vf <y> trust on
+
Known Issues/Troubleshooting
============================
diff --git a/Documentation/networking/ixgbevf.rst b/Documentation/networking/device_drivers/intel/ixgbevf.rst
index 56cde6366c2f..56cde6366c2f 100644
--- a/Documentation/networking/ixgbevf.rst
+++ b/Documentation/networking/device_drivers/intel/ixgbevf.rst
diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/device_drivers/microsoft/netvsc.txt
index 3bfa635bbbd5..3bfa635bbbd5 100644
--- a/Documentation/networking/netvsc.txt
+++ b/Documentation/networking/device_drivers/microsoft/netvsc.txt
diff --git a/Documentation/networking/s2io.txt b/Documentation/networking/device_drivers/neterion/s2io.txt
index 0362a42f7cf4..0362a42f7cf4 100644
--- a/Documentation/networking/s2io.txt
+++ b/Documentation/networking/device_drivers/neterion/s2io.txt
diff --git a/Documentation/networking/vxge.txt b/Documentation/networking/device_drivers/neterion/vxge.txt
index abfec245f97c..abfec245f97c 100644
--- a/Documentation/networking/vxge.txt
+++ b/Documentation/networking/device_drivers/neterion/vxge.txt
diff --git a/Documentation/networking/LICENSE.qla3xxx b/Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
index 2f2077e34d81..2f2077e34d81 100644
--- a/Documentation/networking/LICENSE.qla3xxx
+++ b/Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
diff --git a/Documentation/networking/LICENSE.qlcnic b/Documentation/networking/device_drivers/qlogic/LICENSE.qlcnic
index 2ae3b64983ab..2ae3b64983ab 100644
--- a/Documentation/networking/LICENSE.qlcnic
+++ b/Documentation/networking/device_drivers/qlogic/LICENSE.qlcnic
diff --git a/Documentation/networking/LICENSE.qlge b/Documentation/networking/device_drivers/qlogic/LICENSE.qlge
index ce64e4d15b21..ce64e4d15b21 100644
--- a/Documentation/networking/LICENSE.qlge
+++ b/Documentation/networking/device_drivers/qlogic/LICENSE.qlge
diff --git a/Documentation/networking/rmnet.txt b/Documentation/networking/device_drivers/qualcomm/rmnet.txt
index 6b341eaf2062..6b341eaf2062 100644
--- a/Documentation/networking/rmnet.txt
+++ b/Documentation/networking/device_drivers/qualcomm/rmnet.txt
diff --git a/Documentation/networking/README.sb1000 b/Documentation/networking/device_drivers/sb1000.txt
index f92c2aac56a9..f92c2aac56a9 100644
--- a/Documentation/networking/README.sb1000
+++ b/Documentation/networking/device_drivers/sb1000.txt
diff --git a/Documentation/networking/smc9.txt b/Documentation/networking/device_drivers/smsc/smc9.txt
index d1e15074e43d..d1e15074e43d 100644
--- a/Documentation/networking/smc9.txt
+++ b/Documentation/networking/device_drivers/smsc/smc9.txt
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/device_drivers/stmicro/stmmac.txt
index 2bb07078f535..2bb07078f535 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/device_drivers/stmicro/stmmac.txt
diff --git a/Documentation/networking/ti-cpsw.txt b/Documentation/networking/device_drivers/ti/cpsw.txt
index d4d4c0751a09..d4d4c0751a09 100644
--- a/Documentation/networking/ti-cpsw.txt
+++ b/Documentation/networking/device_drivers/ti/cpsw.txt
diff --git a/Documentation/networking/tlan.txt b/Documentation/networking/device_drivers/ti/tlan.txt
index 34550dfcef74..34550dfcef74 100644
--- a/Documentation/networking/tlan.txt
+++ b/Documentation/networking/device_drivers/ti/tlan.txt
diff --git a/Documentation/networking/spider_net.txt b/Documentation/networking/device_drivers/toshiba/spider_net.txt
index b0b75f8463b3..b0b75f8463b3 100644
--- a/Documentation/networking/spider_net.txt
+++ b/Documentation/networking/device_drivers/toshiba/spider_net.txt
diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt
index ae444ffe73ac..2d26434ddcf8 100644
--- a/Documentation/networking/devlink-params.txt
+++ b/Documentation/networking/devlink-params.txt
@@ -40,3 +40,12 @@ msix_vec_per_pf_min [DEVICE, GENERIC]
for the device initialization. Value is same across all
physical functions (PFs) in the device.
Type: u32
+
+fw_load_policy [DEVICE, GENERIC]
+ Controls the device's firmware loading policy.
+ Valid values:
+ * DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER (0)
+ Load firmware version preferred by the driver.
+ * DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH (1)
+ Load firmware currently stored in flash.
+ Type: u8
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index bd89dae8d578..6a47629ef8ed 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -31,6 +31,7 @@ Contents:
net_failover
alias
bridge
+ snmp_counter
.. only:: subproject
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 32b21571adfe..acdfb5d2bcaa 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -108,8 +108,8 @@ neigh/default/gc_thresh2 - INTEGER
Default: 512
neigh/default/gc_thresh3 - INTEGER
- Maximum number of neighbor entries allowed. Increase this
- when using large numbers of interfaces and when communicating
+ Maximum number of non-PERMANENT neighbor entries allowed. Increase
+ this when using large numbers of interfaces and when communicating
with large numbers of directly-connected peers.
Default: 1024
@@ -370,6 +370,7 @@ tcp_l3mdev_accept - BOOLEAN
derived from the listen socket to be bound to the L3 domain in
which the packets originated. Only valid when the kernel was
compiled with CONFIG_NET_L3_MASTER_DEV.
+ Default: 0 (disabled)
tcp_low_latency - BOOLEAN
This is a legacy option, it has no effect anymore.
@@ -758,7 +759,7 @@ tcp_limit_output_bytes - INTEGER
flows, for typical pfifo_fast qdiscs. tcp_limit_output_bytes
limits the number of bytes on qdisc or device to reduce artificial
RTT/cwnd and reduce bufferbloat.
- Default: 262144
+ Default: 1048576 (16 * 65536)
tcp_challenge_ack_limit - INTEGER
Limits number of Challenge ACK sent per second, as recommended
@@ -773,6 +774,7 @@ udp_l3mdev_accept - BOOLEAN
being received regardless of the L3 domain in which they
originated. Only valid when the kernel was compiled with
CONFIG_NET_L3_MASTER_DEV.
+ Default: 0 (disabled)
udp_mem - vector of 3 INTEGERs: min, pressure, max
Number of pages allowed for queueing by all UDP sockets.
@@ -799,6 +801,16 @@ udp_wmem_min - INTEGER
total pages of UDP sockets exceed udp_mem pressure. The unit is byte.
Default: 4K
+RAW variables:
+
+raw_l3mdev_accept - BOOLEAN
+ Enabling this option allows a "global" bound socket to work
+ across L3 master domains (e.g., VRFs) with packets capable of
+ being received regardless of the L3 domain in which they
+ originated. Only valid when the kernel was compiled with
+ CONFIG_NET_L3_MASTER_DEV.
+ Default: 1 (enabled)
+
CIPSOv4 Variables:
cipso_cache_enable - BOOLEAN
diff --git a/Documentation/networking/netdev-features.txt b/Documentation/networking/netdev-features.txt
index c4a54c162547..58dd1c1e3c65 100644
--- a/Documentation/networking/netdev-features.txt
+++ b/Documentation/networking/netdev-features.txt
@@ -115,7 +115,7 @@ set, be it TCPv4 (when NETIF_F_TSO is enabled) or TCPv6 (NETIF_F_TSO6).
* Transmit UDP segmentation offload
-NETIF_F_GSO_UDP_GSO_L4 accepts a single UDP header with a payload that exceeds
+NETIF_F_GSO_UDP_L4 accepts a single UDP header with a payload that exceeds
gso_size. On segmentation, it segments the payload on gso_size boundaries and
replicates the network and UDP headers (fixing up the last one if less than
gso_size).
diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt
index 1669dc2419fd..f75c2ce6e136 100644
--- a/Documentation/networking/nf_conntrack-sysctl.txt
+++ b/Documentation/networking/nf_conntrack-sysctl.txt
@@ -157,7 +157,16 @@ nf_conntrack_udp_timeout - INTEGER (seconds)
default 30
nf_conntrack_udp_timeout_stream - INTEGER (seconds)
- default 180
+ default 120
This extended timeout will be used in case there is an UDP stream
detected.
+
+nf_conntrack_gre_timeout - INTEGER (seconds)
+ default 30
+
+nf_conntrack_gre_timeout_stream - INTEGER (seconds)
+ default 180
+
+ This extended timeout will be used in case there is an GRE stream
+ detected.
diff --git a/Documentation/networking/snmp_counter.rst b/Documentation/networking/snmp_counter.rst
new file mode 100644
index 000000000000..f8eb77ddbd44
--- /dev/null
+++ b/Documentation/networking/snmp_counter.rst
@@ -0,0 +1,1190 @@
+===========
+SNMP counter
+===========
+
+This document explains the meaning of SNMP counters.
+
+General IPv4 counters
+====================
+All layer 4 packets and ICMP packets will change these counters, but
+these counters won't be changed by layer 2 packets (such as STP) or
+ARP packets.
+
+* IpInReceives
+Defined in `RFC1213 ipInReceives`_
+
+.. _RFC1213 ipInReceives: https://tools.ietf.org/html/rfc1213#page-26
+
+The number of packets received by the IP layer. It gets increasing at the
+beginning of ip_rcv function, always be updated together with
+IpExtInOctets. It will be increased even if the packet is dropped
+later (e.g. due to the IP header is invalid or the checksum is wrong
+and so on). It indicates the number of aggregated segments after
+GRO/LRO.
+
+* IpInDelivers
+Defined in `RFC1213 ipInDelivers`_
+
+.. _RFC1213 ipInDelivers: https://tools.ietf.org/html/rfc1213#page-28
+
+The number of packets delivers to the upper layer protocols. E.g. TCP, UDP,
+ICMP and so on. If no one listens on a raw socket, only kernel
+supported protocols will be delivered, if someone listens on the raw
+socket, all valid IP packets will be delivered.
+
+* IpOutRequests
+Defined in `RFC1213 ipOutRequests`_
+
+.. _RFC1213 ipOutRequests: https://tools.ietf.org/html/rfc1213#page-28
+
+The number of packets sent via IP layer, for both single cast and
+multicast packets, and would always be updated together with
+IpExtOutOctets.
+
+* IpExtInOctets and IpExtOutOctets
+They are Linux kernel extensions, no RFC definitions. Please note,
+RFC1213 indeed defines ifInOctets and ifOutOctets, but they
+are different things. The ifInOctets and ifOutOctets include the MAC
+layer header size but IpExtInOctets and IpExtOutOctets don't, they
+only include the IP layer header and the IP layer data.
+
+* IpExtInNoECTPkts, IpExtInECT1Pkts, IpExtInECT0Pkts, IpExtInCEPkts
+They indicate the number of four kinds of ECN IP packets, please refer
+`Explicit Congestion Notification`_ for more details.
+
+.. _Explicit Congestion Notification: https://tools.ietf.org/html/rfc3168#page-6
+
+These 4 counters calculate how many packets received per ECN
+status. They count the real frame number regardless the LRO/GRO. So
+for the same packet, you might find that IpInReceives count 1, but
+IpExtInNoECTPkts counts 2 or more.
+
+* IpInHdrErrors
+Defined in `RFC1213 ipInHdrErrors`_. It indicates the packet is
+dropped due to the IP header error. It might happen in both IP input
+and IP forward paths.
+
+.. _RFC1213 ipInHdrErrors: https://tools.ietf.org/html/rfc1213#page-27
+
+* IpInAddrErrors
+Defined in `RFC1213 ipInAddrErrors`_. It will be increased in two
+scenarios: (1) The IP address is invalid. (2) The destination IP
+address is not a local address and IP forwarding is not enabled
+
+.. _RFC1213 ipInAddrErrors: https://tools.ietf.org/html/rfc1213#page-27
+
+* IpExtInNoRoutes
+This counter means the packet is dropped when the IP stack receives a
+packet and can't find a route for it from the route table. It might
+happen when IP forwarding is enabled and the destination IP address is
+not a local address and there is no route for the destination IP
+address.
+
+* IpInUnknownProtos
+Defined in `RFC1213 ipInUnknownProtos`_. It will be increased if the
+layer 4 protocol is unsupported by kernel. If an application is using
+raw socket, kernel will always deliver the packet to the raw socket
+and this counter won't be increased.
+
+.. _RFC1213 ipInUnknownProtos: https://tools.ietf.org/html/rfc1213#page-27
+
+* IpExtInTruncatedPkts
+For IPv4 packet, it means the actual data size is smaller than the
+"Total Length" field in the IPv4 header.
+
+* IpInDiscards
+Defined in `RFC1213 ipInDiscards`_. It indicates the packet is dropped
+in the IP receiving path and due to kernel internal reasons (e.g. no
+enough memory).
+
+.. _RFC1213 ipInDiscards: https://tools.ietf.org/html/rfc1213#page-28
+
+* IpOutDiscards
+Defined in `RFC1213 ipOutDiscards`_. It indicates the packet is
+dropped in the IP sending path and due to kernel internal reasons.
+
+.. _RFC1213 ipOutDiscards: https://tools.ietf.org/html/rfc1213#page-28
+
+* IpOutNoRoutes
+Defined in `RFC1213 ipOutNoRoutes`_. It indicates the packet is
+dropped in the IP sending path and no route is found for it.
+
+.. _RFC1213 ipOutNoRoutes: https://tools.ietf.org/html/rfc1213#page-29
+
+ICMP counters
+============
+* IcmpInMsgs and IcmpOutMsgs
+Defined by `RFC1213 icmpInMsgs`_ and `RFC1213 icmpOutMsgs`_
+
+.. _RFC1213 icmpInMsgs: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpOutMsgs: https://tools.ietf.org/html/rfc1213#page-43
+
+As mentioned in the RFC1213, these two counters include errors, they
+would be increased even if the ICMP packet has an invalid type. The
+ICMP output path will check the header of a raw socket, so the
+IcmpOutMsgs would still be updated if the IP header is constructed by
+a userspace program.
+
+* ICMP named types
+| These counters include most of common ICMP types, they are:
+| IcmpInDestUnreachs: `RFC1213 icmpInDestUnreachs`_
+| IcmpInTimeExcds: `RFC1213 icmpInTimeExcds`_
+| IcmpInParmProbs: `RFC1213 icmpInParmProbs`_
+| IcmpInSrcQuenchs: `RFC1213 icmpInSrcQuenchs`_
+| IcmpInRedirects: `RFC1213 icmpInRedirects`_
+| IcmpInEchos: `RFC1213 icmpInEchos`_
+| IcmpInEchoReps: `RFC1213 icmpInEchoReps`_
+| IcmpInTimestamps: `RFC1213 icmpInTimestamps`_
+| IcmpInTimestampReps: `RFC1213 icmpInTimestampReps`_
+| IcmpInAddrMasks: `RFC1213 icmpInAddrMasks`_
+| IcmpInAddrMaskReps: `RFC1213 icmpInAddrMaskReps`_
+| IcmpOutDestUnreachs: `RFC1213 icmpOutDestUnreachs`_
+| IcmpOutTimeExcds: `RFC1213 icmpOutTimeExcds`_
+| IcmpOutParmProbs: `RFC1213 icmpOutParmProbs`_
+| IcmpOutSrcQuenchs: `RFC1213 icmpOutSrcQuenchs`_
+| IcmpOutRedirects: `RFC1213 icmpOutRedirects`_
+| IcmpOutEchos: `RFC1213 icmpOutEchos`_
+| IcmpOutEchoReps: `RFC1213 icmpOutEchoReps`_
+| IcmpOutTimestamps: `RFC1213 icmpOutTimestamps`_
+| IcmpOutTimestampReps: `RFC1213 icmpOutTimestampReps`_
+| IcmpOutAddrMasks: `RFC1213 icmpOutAddrMasks`_
+| IcmpOutAddrMaskReps: `RFC1213 icmpOutAddrMaskReps`_
+
+.. _RFC1213 icmpInDestUnreachs: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpInTimeExcds: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpInParmProbs: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInSrcQuenchs: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInRedirects: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInEchos: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInEchoReps: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInTimestamps: https://tools.ietf.org/html/rfc1213#page-42
+.. _RFC1213 icmpInTimestampReps: https://tools.ietf.org/html/rfc1213#page-43
+.. _RFC1213 icmpInAddrMasks: https://tools.ietf.org/html/rfc1213#page-43
+.. _RFC1213 icmpInAddrMaskReps: https://tools.ietf.org/html/rfc1213#page-43
+
+.. _RFC1213 icmpOutDestUnreachs: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutTimeExcds: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutParmProbs: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutSrcQuenchs: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutRedirects: https://tools.ietf.org/html/rfc1213#page-44
+.. _RFC1213 icmpOutEchos: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutEchoReps: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutTimestamps: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutTimestampReps: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutAddrMasks: https://tools.ietf.org/html/rfc1213#page-45
+.. _RFC1213 icmpOutAddrMaskReps: https://tools.ietf.org/html/rfc1213#page-46
+
+Every ICMP type has two counters: 'In' and 'Out'. E.g., for the ICMP
+Echo packet, they are IcmpInEchos and IcmpOutEchos. Their meanings are
+straightforward. The 'In' counter means kernel receives such a packet
+and the 'Out' counter means kernel sends such a packet.
+
+* ICMP numeric types
+They are IcmpMsgInType[N] and IcmpMsgOutType[N], the [N] indicates the
+ICMP type number. These counters track all kinds of ICMP packets. The
+ICMP type number definition could be found in the `ICMP parameters`_
+document.
+
+.. _ICMP parameters: https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xhtml
+
+For example, if the Linux kernel sends an ICMP Echo packet, the
+IcmpMsgOutType8 would increase 1. And if kernel gets an ICMP Echo Reply
+packet, IcmpMsgInType0 would increase 1.
+
+* IcmpInCsumErrors
+This counter indicates the checksum of the ICMP packet is
+wrong. Kernel verifies the checksum after updating the IcmpInMsgs and
+before updating IcmpMsgInType[N]. If a packet has bad checksum, the
+IcmpInMsgs would be updated but none of IcmpMsgInType[N] would be updated.
+
+* IcmpInErrors and IcmpOutErrors
+Defined by `RFC1213 icmpInErrors`_ and `RFC1213 icmpOutErrors`_
+
+.. _RFC1213 icmpInErrors: https://tools.ietf.org/html/rfc1213#page-41
+.. _RFC1213 icmpOutErrors: https://tools.ietf.org/html/rfc1213#page-43
+
+When an error occurs in the ICMP packet handler path, these two
+counters would be updated. The receiving packet path use IcmpInErrors
+and the sending packet path use IcmpOutErrors. When IcmpInCsumErrors
+is increased, IcmpInErrors would always be increased too.
+
+relationship of the ICMP counters
+-------------------------------
+The sum of IcmpMsgOutType[N] is always equal to IcmpOutMsgs, as they
+are updated at the same time. The sum of IcmpMsgInType[N] plus
+IcmpInErrors should be equal or larger than IcmpInMsgs. When kernel
+receives an ICMP packet, kernel follows below logic:
+
+1. increase IcmpInMsgs
+2. if has any error, update IcmpInErrors and finish the process
+3. update IcmpMsgOutType[N]
+4. handle the packet depending on the type, if has any error, update
+ IcmpInErrors and finish the process
+
+So if all errors occur in step (2), IcmpInMsgs should be equal to the
+sum of IcmpMsgOutType[N] plus IcmpInErrors. If all errors occur in
+step (4), IcmpInMsgs should be equal to the sum of
+IcmpMsgOutType[N]. If the errors occur in both step (2) and step (4),
+IcmpInMsgs should be less than the sum of IcmpMsgOutType[N] plus
+IcmpInErrors.
+
+General TCP counters
+==================
+* TcpInSegs
+Defined in `RFC1213 tcpInSegs`_
+
+.. _RFC1213 tcpInSegs: https://tools.ietf.org/html/rfc1213#page-48
+
+The number of packets received by the TCP layer. As mentioned in
+RFC1213, it includes the packets received in error, such as checksum
+error, invalid TCP header and so on. Only one error won't be included:
+if the layer 2 destination address is not the NIC's layer 2
+address. It might happen if the packet is a multicast or broadcast
+packet, or the NIC is in promiscuous mode. In these situations, the
+packets would be delivered to the TCP layer, but the TCP layer will discard
+these packets before increasing TcpInSegs. The TcpInSegs counter
+isn't aware of GRO. So if two packets are merged by GRO, the TcpInSegs
+counter would only increase 1.
+
+* TcpOutSegs
+Defined in `RFC1213 tcpOutSegs`_
+
+.. _RFC1213 tcpOutSegs: https://tools.ietf.org/html/rfc1213#page-48
+
+The number of packets sent by the TCP layer. As mentioned in RFC1213,
+it excludes the retransmitted packets. But it includes the SYN, ACK
+and RST packets. Doesn't like TcpInSegs, the TcpOutSegs is aware of
+GSO, so if a packet would be split to 2 by GSO, TcpOutSegs will
+increase 2.
+
+* TcpActiveOpens
+Defined in `RFC1213 tcpActiveOpens`_
+
+.. _RFC1213 tcpActiveOpens: https://tools.ietf.org/html/rfc1213#page-47
+
+It means the TCP layer sends a SYN, and come into the SYN-SENT
+state. Every time TcpActiveOpens increases 1, TcpOutSegs should always
+increase 1.
+
+* TcpPassiveOpens
+Defined in `RFC1213 tcpPassiveOpens`_
+
+.. _RFC1213 tcpPassiveOpens: https://tools.ietf.org/html/rfc1213#page-47
+
+It means the TCP layer receives a SYN, replies a SYN+ACK, come into
+the SYN-RCVD state.
+
+* TcpExtTCPRcvCoalesce
+When packets are received by the TCP layer and are not be read by the
+application, the TCP layer will try to merge them. This counter
+indicate how many packets are merged in such situation. If GRO is
+enabled, lots of packets would be merged by GRO, these packets
+wouldn't be counted to TcpExtTCPRcvCoalesce.
+
+* TcpExtTCPAutoCorking
+When sending packets, the TCP layer will try to merge small packets to
+a bigger one. This counter increase 1 for every packet merged in such
+situation. Please refer to the LWN article for more details:
+https://lwn.net/Articles/576263/
+
+* TcpExtTCPOrigDataSent
+This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
+explaination below::
+
+ TCPOrigDataSent: number of outgoing packets with original data (excluding
+ retransmission but including data-in-SYN). This counter is different from
+ TcpOutSegs because TcpOutSegs also tracks pure ACKs. TCPOrigDataSent is
+ more useful to track the TCP retransmission rate.
+
+* TCPSynRetrans
+This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
+explaination below::
+
+ TCPSynRetrans: number of SYN and SYN/ACK retransmits to break down
+ retransmissions into SYN, fast-retransmits, timeout retransmits, etc.
+
+* TCPFastOpenActiveFail
+This counter is explained by `kernel commit f19c29e3e391`_, I pasted the
+explaination below::
+
+ TCPFastOpenActiveFail: Fast Open attempts (SYN/data) failed because
+ the remote does not accept it or the attempts timed out.
+
+.. _kernel commit f19c29e3e391: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=f19c29e3e391a66a273e9afebaf01917245148cd
+
+* TcpExtListenOverflows and TcpExtListenDrops
+When kernel receives a SYN from a client, and if the TCP accept queue
+is full, kernel will drop the SYN and add 1 to TcpExtListenOverflows.
+At the same time kernel will also add 1 to TcpExtListenDrops. When a
+TCP socket is in LISTEN state, and kernel need to drop a packet,
+kernel would always add 1 to TcpExtListenDrops. So increase
+TcpExtListenOverflows would let TcpExtListenDrops increasing at the
+same time, but TcpExtListenDrops would also increase without
+TcpExtListenOverflows increasing, e.g. a memory allocation fail would
+also let TcpExtListenDrops increase.
+
+Note: The above explanation is based on kernel 4.10 or above version, on
+an old kernel, the TCP stack has different behavior when TCP accept
+queue is full. On the old kernel, TCP stack won't drop the SYN, it
+would complete the 3-way handshake. As the accept queue is full, TCP
+stack will keep the socket in the TCP half-open queue. As it is in the
+half open queue, TCP stack will send SYN+ACK on an exponential backoff
+timer, after client replies ACK, TCP stack checks whether the accept
+queue is still full, if it is not full, moves the socket to the accept
+queue, if it is full, keeps the socket in the half-open queue, at next
+time client replies ACK, this socket will get another chance to move
+to the accept queue.
+
+
+TCP Fast Open
+============
+When kernel receives a TCP packet, it has two paths to handler the
+packet, one is fast path, another is slow path. The comment in kernel
+code provides a good explanation of them, I pasted them below::
+
+ It is split into a fast path and a slow path. The fast path is
+ disabled when:
+
+ - A zero window was announced from us
+ - zero window probing
+ is only handled properly on the slow path.
+ - Out of order segments arrived.
+ - Urgent data is expected.
+ - There is no buffer space left
+ - Unexpected TCP flags/window values/header lengths are received
+ (detected by checking the TCP header against pred_flags)
+ - Data is sent in both directions. The fast path only supports pure senders
+ or pure receivers (this means either the sequence number or the ack
+ value must stay constant)
+ - Unexpected TCP option.
+
+Kernel will try to use fast path unless any of the above conditions
+are satisfied. If the packets are out of order, kernel will handle
+them in slow path, which means the performance might be not very
+good. Kernel would also come into slow path if the "Delayed ack" is
+used, because when using "Delayed ack", the data is sent in both
+directions. When the TCP window scale option is not used, kernel will
+try to enable fast path immediately when the connection comes into the
+established state, but if the TCP window scale option is used, kernel
+will disable the fast path at first, and try to enable it after kernel
+receives packets.
+
+* TcpExtTCPPureAcks and TcpExtTCPHPAcks
+If a packet set ACK flag and has no data, it is a pure ACK packet, if
+kernel handles it in the fast path, TcpExtTCPHPAcks will increase 1,
+if kernel handles it in the slow path, TcpExtTCPPureAcks will
+increase 1.
+
+* TcpExtTCPHPHits
+If a TCP packet has data (which means it is not a pure ACK packet),
+and this packet is handled in the fast path, TcpExtTCPHPHits will
+increase 1.
+
+
+TCP abort
+========
+
+
+* TcpExtTCPAbortOnData
+It means TCP layer has data in flight, but need to close the
+connection. So TCP layer sends a RST to the other side, indicate the
+connection is not closed very graceful. An easy way to increase this
+counter is using the SO_LINGER option. Please refer to the SO_LINGER
+section of the `socket man page`_:
+
+.. _socket man page: http://man7.org/linux/man-pages/man7/socket.7.html
+
+By default, when an application closes a connection, the close function
+will return immediately and kernel will try to send the in-flight data
+async. If you use the SO_LINGER option, set l_onoff to 1, and l_linger
+to a positive number, the close function won't return immediately, but
+wait for the in-flight data are acked by the other side, the max wait
+time is l_linger seconds. If set l_onoff to 1 and set l_linger to 0,
+when the application closes a connection, kernel will send a RST
+immediately and increase the TcpExtTCPAbortOnData counter.
+
+* TcpExtTCPAbortOnClose
+This counter means the application has unread data in the TCP layer when
+the application wants to close the TCP connection. In such a situation,
+kernel will send a RST to the other side of the TCP connection.
+
+* TcpExtTCPAbortOnMemory
+When an application closes a TCP connection, kernel still need to track
+the connection, let it complete the TCP disconnect process. E.g. an
+app calls the close method of a socket, kernel sends fin to the other
+side of the connection, then the app has no relationship with the
+socket any more, but kernel need to keep the socket, this socket
+becomes an orphan socket, kernel waits for the reply of the other side,
+and would come to the TIME_WAIT state finally. When kernel has no
+enough memory to keep the orphan socket, kernel would send an RST to
+the other side, and delete the socket, in such situation, kernel will
+increase 1 to the TcpExtTCPAbortOnMemory. Two conditions would trigger
+TcpExtTCPAbortOnMemory:
+
+1. the memory used by the TCP protocol is higher than the third value of
+the tcp_mem. Please refer the tcp_mem section in the `TCP man page`_:
+
+.. _TCP man page: http://man7.org/linux/man-pages/man7/tcp.7.html
+
+2. the orphan socket count is higher than net.ipv4.tcp_max_orphans
+
+
+* TcpExtTCPAbortOnTimeout
+This counter will increase when any of the TCP timers expire. In such
+situation, kernel won't send RST, just give up the connection.
+
+* TcpExtTCPAbortOnLinger
+When a TCP connection comes into FIN_WAIT_2 state, instead of waiting
+for the fin packet from the other side, kernel could send a RST and
+delete the socket immediately. This is not the default behavior of
+Linux kernel TCP stack. By configuring the TCP_LINGER2 socket option,
+you could let kernel follow this behavior.
+
+* TcpExtTCPAbortFailed
+The kernel TCP layer will send RST if the `RFC2525 2.17 section`_ is
+satisfied. If an internal error occurs during this process,
+TcpExtTCPAbortFailed will be increased.
+
+.. _RFC2525 2.17 section: https://tools.ietf.org/html/rfc2525#page-50
+
+TCP Hybrid Slow Start
+====================
+The Hybrid Slow Start algorithm is an enhancement of the traditional
+TCP congestion window Slow Start algorithm. It uses two pieces of
+information to detect whether the max bandwidth of the TCP path is
+approached. The two pieces of information are ACK train length and
+increase in packet delay. For detail information, please refer the
+`Hybrid Slow Start paper`_. Either ACK train length or packet delay
+hits a specific threshold, the congestion control algorithm will come
+into the Congestion Avoidance state. Until v4.20, two congestion
+control algorithms are using Hybrid Slow Start, they are cubic (the
+default congestion control algorithm) and cdg. Four snmp counters
+relate with the Hybrid Slow Start algorithm.
+
+.. _Hybrid Slow Start paper: https://pdfs.semanticscholar.org/25e9/ef3f03315782c7f1cbcd31b587857adae7d1.pdf
+
+* TcpExtTCPHystartTrainDetect
+How many times the ACK train length threshold is detected
+
+* TcpExtTCPHystartTrainCwnd
+The sum of CWND detected by ACK train length. Dividing this value by
+TcpExtTCPHystartTrainDetect is the average CWND which detected by the
+ACK train length.
+
+* TcpExtTCPHystartDelayDetect
+How many times the packet delay threshold is detected.
+
+* TcpExtTCPHystartDelayCwnd
+The sum of CWND detected by packet delay. Dividing this value by
+TcpExtTCPHystartDelayDetect is the average CWND which detected by the
+packet delay.
+
+TCP retransmission and congestion control
+======================================
+The TCP protocol has two retransmission mechanisms: SACK and fast
+recovery. They are exclusive with each other. When SACK is enabled,
+the kernel TCP stack would use SACK, or kernel would use fast
+recovery. The SACK is a TCP option, which is defined in `RFC2018`_,
+the fast recovery is defined in `RFC6582`_, which is also called
+'Reno'.
+
+The TCP congestion control is a big and complex topic. To understand
+the related snmp counter, we need to know the states of the congestion
+control state machine. There are 5 states: Open, Disorder, CWR,
+Recovery and Loss. For details about these states, please refer page 5
+and page 6 of this document:
+https://pdfs.semanticscholar.org/0e9c/968d09ab2e53e24c4dca5b2d67c7f7140f8e.pdf
+
+.. _RFC2018: https://tools.ietf.org/html/rfc2018
+.. _RFC6582: https://tools.ietf.org/html/rfc6582
+
+* TcpExtTCPRenoRecovery and TcpExtTCPSackRecovery
+When the congestion control comes into Recovery state, if sack is
+used, TcpExtTCPSackRecovery increases 1, if sack is not used,
+TcpExtTCPRenoRecovery increases 1. These two counters mean the TCP
+stack begins to retransmit the lost packets.
+
+* TcpExtTCPSACKReneging
+A packet was acknowledged by SACK, but the receiver has dropped this
+packet, so the sender needs to retransmit this packet. In this
+situation, the sender adds 1 to TcpExtTCPSACKReneging. A receiver
+could drop a packet which has been acknowledged by SACK, although it is
+unusual, it is allowed by the TCP protocol. The sender doesn't really
+know what happened on the receiver side. The sender just waits until
+the RTO expires for this packet, then the sender assumes this packet
+has been dropped by the receiver.
+
+* TcpExtTCPRenoReorder
+The reorder packet is detected by fast recovery. It would only be used
+if SACK is disabled. The fast recovery algorithm detects recorder by
+the duplicate ACK number. E.g., if retransmission is triggered, and
+the original retransmitted packet is not lost, it is just out of
+order, the receiver would acknowledge multiple times, one for the
+retransmitted packet, another for the arriving of the original out of
+order packet. Thus the sender would find more ACks than its
+expectation, and the sender knows out of order occurs.
+
+* TcpExtTCPTSReorder
+The reorder packet is detected when a hole is filled. E.g., assume the
+sender sends packet 1,2,3,4,5, and the receiving order is
+1,2,4,5,3. When the sender receives the ACK of packet 3 (which will
+fill the hole), two conditions will let TcpExtTCPTSReorder increase
+1: (1) if the packet 3 is not re-retransmitted yet. (2) if the packet
+3 is retransmitted but the timestamp of the packet 3's ACK is earlier
+than the retransmission timestamp.
+
+* TcpExtTCPSACKReorder
+The reorder packet detected by SACK. The SACK has two methods to
+detect reorder: (1) DSACK is received by the sender. It means the
+sender sends the same packet more than one times. And the only reason
+is the sender believes an out of order packet is lost so it sends the
+packet again. (2) Assume packet 1,2,3,4,5 are sent by the sender, and
+the sender has received SACKs for packet 2 and 5, now the sender
+receives SACK for packet 4 and the sender doesn't retransmit the
+packet yet, the sender would know packet 4 is out of order. The TCP
+stack of kernel will increase TcpExtTCPSACKReorder for both of the
+above scenarios.
+
+
+DSACK
+=====
+The DSACK is defined in `RFC2883`_. The receiver uses DSACK to report
+duplicate packets to the sender. There are two kinds of
+duplications: (1) a packet which has been acknowledged is
+duplicate. (2) an out of order packet is duplicate. The TCP stack
+counts these two kinds of duplications on both receiver side and
+sender side.
+
+.. _RFC2883 : https://tools.ietf.org/html/rfc2883
+
+* TcpExtTCPDSACKOldSent
+The TCP stack receives a duplicate packet which has been acked, so it
+sends a DSACK to the sender.
+
+* TcpExtTCPDSACKOfoSent
+The TCP stack receives an out of order duplicate packet, so it sends a
+DSACK to the sender.
+
+* TcpExtTCPDSACKRecv
+The TCP stack receives a DSACK, which indicate an acknowledged
+duplicate packet is received.
+
+* TcpExtTCPDSACKOfoRecv
+The TCP stack receives a DSACK, which indicate an out of order
+duplciate packet is received.
+
+examples
+=======
+
+ping test
+--------
+Run the ping command against the public dns server 8.8.8.8::
+
+ nstatuser@nstat-a:~$ ping 8.8.8.8 -c 1
+ PING 8.8.8.8 (8.8.8.8) 56(84) bytes of data.
+ 64 bytes from 8.8.8.8: icmp_seq=1 ttl=119 time=17.8 ms
+
+ --- 8.8.8.8 ping statistics ---
+ 1 packets transmitted, 1 received, 0% packet loss, time 0ms
+ rtt min/avg/max/mdev = 17.875/17.875/17.875/0.000 ms
+
+The nstayt result::
+
+ nstatuser@nstat-a:~$ nstat
+ #kernel
+ IpInReceives 1 0.0
+ IpInDelivers 1 0.0
+ IpOutRequests 1 0.0
+ IcmpInMsgs 1 0.0
+ IcmpInEchoReps 1 0.0
+ IcmpOutMsgs 1 0.0
+ IcmpOutEchos 1 0.0
+ IcmpMsgInType0 1 0.0
+ IcmpMsgOutType8 1 0.0
+ IpExtInOctets 84 0.0
+ IpExtOutOctets 84 0.0
+ IpExtInNoECTPkts 1 0.0
+
+The Linux server sent an ICMP Echo packet, so IpOutRequests,
+IcmpOutMsgs, IcmpOutEchos and IcmpMsgOutType8 were increased 1. The
+server got ICMP Echo Reply from 8.8.8.8, so IpInReceives, IcmpInMsgs,
+IcmpInEchoReps and IcmpMsgInType0 were increased 1. The ICMP Echo Reply
+was passed to the ICMP layer via IP layer, so IpInDelivers was
+increased 1. The default ping data size is 48, so an ICMP Echo packet
+and its corresponding Echo Reply packet are constructed by:
+
+* 14 bytes MAC header
+* 20 bytes IP header
+* 16 bytes ICMP header
+* 48 bytes data (default value of the ping command)
+
+So the IpExtInOctets and IpExtOutOctets are 20+16+48=84.
+
+tcp 3-way handshake
+------------------
+On server side, we run::
+
+ nstatuser@nstat-b:~$ nc -lknv 0.0.0.0 9000
+ Listening on [0.0.0.0] (family 0, port 9000)
+
+On client side, we run::
+
+ nstatuser@nstat-a:~$ nc -nv 192.168.122.251 9000
+ Connection to 192.168.122.251 9000 port [tcp/*] succeeded!
+
+The server listened on tcp 9000 port, the client connected to it, they
+completed the 3-way handshake.
+
+On server side, we can find below nstat output::
+
+ nstatuser@nstat-b:~$ nstat | grep -i tcp
+ TcpPassiveOpens 1 0.0
+ TcpInSegs 2 0.0
+ TcpOutSegs 1 0.0
+ TcpExtTCPPureAcks 1 0.0
+
+On client side, we can find below nstat output::
+
+ nstatuser@nstat-a:~$ nstat | grep -i tcp
+ TcpActiveOpens 1 0.0
+ TcpInSegs 1 0.0
+ TcpOutSegs 2 0.0
+
+When the server received the first SYN, it replied a SYN+ACK, and came into
+SYN-RCVD state, so TcpPassiveOpens increased 1. The server received
+SYN, sent SYN+ACK, received ACK, so server sent 1 packet, received 2
+packets, TcpInSegs increased 2, TcpOutSegs increased 1. The last ACK
+of the 3-way handshake is a pure ACK without data, so
+TcpExtTCPPureAcks increased 1.
+
+When the client sent SYN, the client came into the SYN-SENT state, so
+TcpActiveOpens increased 1, the client sent SYN, received SYN+ACK, sent
+ACK, so client sent 2 packets, received 1 packet, TcpInSegs increased
+1, TcpOutSegs increased 2.
+
+TCP normal traffic
+-----------------
+Run nc on server::
+
+ nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
+ Listening on [0.0.0.0] (family 0, port 9000)
+
+Run nc on client::
+
+ nstatuser@nstat-a:~$ nc -v nstat-b 9000
+ Connection to nstat-b 9000 port [tcp/*] succeeded!
+
+Input a string in the nc client ('hello' in our example)::
+
+ nstatuser@nstat-a:~$ nc -v nstat-b 9000
+ Connection to nstat-b 9000 port [tcp/*] succeeded!
+ hello
+
+The client side nstat output::
+
+ nstatuser@nstat-a:~$ nstat
+ #kernel
+ IpInReceives 1 0.0
+ IpInDelivers 1 0.0
+ IpOutRequests 1 0.0
+ TcpInSegs 1 0.0
+ TcpOutSegs 1 0.0
+ TcpExtTCPPureAcks 1 0.0
+ TcpExtTCPOrigDataSent 1 0.0
+ IpExtInOctets 52 0.0
+ IpExtOutOctets 58 0.0
+ IpExtInNoECTPkts 1 0.0
+
+The server side nstat output::
+
+ nstatuser@nstat-b:~$ nstat
+ #kernel
+ IpInReceives 1 0.0
+ IpInDelivers 1 0.0
+ IpOutRequests 1 0.0
+ TcpInSegs 1 0.0
+ TcpOutSegs 1 0.0
+ IpExtInOctets 58 0.0
+ IpExtOutOctets 52 0.0
+ IpExtInNoECTPkts 1 0.0
+
+Input a string in nc client side again ('world' in our exmaple)::
+
+ nstatuser@nstat-a:~$ nc -v nstat-b 9000
+ Connection to nstat-b 9000 port [tcp/*] succeeded!
+ hello
+ world
+
+Client side nstat output::
+
+ nstatuser@nstat-a:~$ nstat
+ #kernel
+ IpInReceives 1 0.0
+ IpInDelivers 1 0.0
+ IpOutRequests 1 0.0
+ TcpInSegs 1 0.0
+ TcpOutSegs 1 0.0
+ TcpExtTCPHPAcks 1 0.0
+ TcpExtTCPOrigDataSent 1 0.0
+ IpExtInOctets 52 0.0
+ IpExtOutOctets 58 0.0
+ IpExtInNoECTPkts 1 0.0
+
+
+Server side nstat output::
+
+ nstatuser@nstat-b:~$ nstat
+ #kernel
+ IpInReceives 1 0.0
+ IpInDelivers 1 0.0
+ IpOutRequests 1 0.0
+ TcpInSegs 1 0.0
+ TcpOutSegs 1 0.0
+ TcpExtTCPHPHits 1 0.0
+ IpExtInOctets 58 0.0
+ IpExtOutOctets 52 0.0
+ IpExtInNoECTPkts 1 0.0
+
+Compare the first client-side nstat and the second client-side nstat,
+we could find one difference: the first one had a 'TcpExtTCPPureAcks',
+but the second one had a 'TcpExtTCPHPAcks'. The first server-side
+nstat and the second server-side nstat had a difference too: the
+second server-side nstat had a TcpExtTCPHPHits, but the first
+server-side nstat didn't have it. The network traffic patterns were
+exactly the same: the client sent a packet to the server, the server
+replied an ACK. But kernel handled them in different ways. When the
+TCP window scale option is not used, kernel will try to enable fast
+path immediately when the connection comes into the established state,
+but if the TCP window scale option is used, kernel will disable the
+fast path at first, and try to enable it after kerenl receives
+packets. We could use the 'ss' command to verify whether the window
+scale option is used. e.g. run below command on either server or
+client::
+
+ nstatuser@nstat-a:~$ ss -o state established -i '( dport = :9000 or sport = :9000 )
+ Netid Recv-Q Send-Q Local Address:Port Peer Address:Port
+ tcp 0 0 192.168.122.250:40654 192.168.122.251:9000
+ ts sack cubic wscale:7,7 rto:204 rtt:0.98/0.49 mss:1448 pmtu:1500 rcvmss:536 advmss:1448 cwnd:10 bytes_acked:1 segs_out:2 segs_in:1 send 118.2Mbps lastsnd:46572 lastrcv:46572 lastack:46572 pacing_rate 236.4Mbps rcv_space:29200 rcv_ssthresh:29200 minrtt:0.98
+
+The 'wscale:7,7' means both server and client set the window scale
+option to 7. Now we could explain the nstat output in our test:
+
+In the first nstat output of client side, the client sent a packet, server
+reply an ACK, when kernel handled this ACK, the fast path was not
+enabled, so the ACK was counted into 'TcpExtTCPPureAcks'.
+
+In the second nstat output of client side, the client sent a packet again,
+and received another ACK from the server, in this time, the fast path is
+enabled, and the ACK was qualified for fast path, so it was handled by
+the fast path, so this ACK was counted into TcpExtTCPHPAcks.
+
+In the first nstat output of server side, fast path was not enabled,
+so there was no 'TcpExtTCPHPHits'.
+
+In the second nstat output of server side, the fast path was enabled,
+and the packet received from client qualified for fast path, so it
+was counted into 'TcpExtTCPHPHits'.
+
+TcpExtTCPAbortOnClose
+--------------------
+On the server side, we run below python script::
+
+ import socket
+ import time
+
+ port = 9000
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind(('0.0.0.0', port))
+ s.listen(1)
+ sock, addr = s.accept()
+ while True:
+ time.sleep(9999999)
+
+This python script listen on 9000 port, but doesn't read anything from
+the connection.
+
+On the client side, we send the string "hello" by nc::
+
+ nstatuser@nstat-a:~$ echo "hello" | nc nstat-b 9000
+
+Then, we come back to the server side, the server has received the "hello"
+packet, and the TCP layer has acked this packet, but the application didn't
+read it yet. We type Ctrl-C to terminate the server script. Then we
+could find TcpExtTCPAbortOnClose increased 1 on the server side::
+
+ nstatuser@nstat-b:~$ nstat | grep -i abort
+ TcpExtTCPAbortOnClose 1 0.0
+
+If we run tcpdump on the server side, we could find the server sent a
+RST after we type Ctrl-C.
+
+TcpExtTCPAbortOnMemory and TcpExtTCPAbortOnTimeout
+-----------------------------------------------
+Below is an example which let the orphan socket count be higher than
+net.ipv4.tcp_max_orphans.
+Change tcp_max_orphans to a smaller value on client::
+
+ sudo bash -c "echo 10 > /proc/sys/net/ipv4/tcp_max_orphans"
+
+Client code (create 64 connection to server)::
+
+ nstatuser@nstat-a:~$ cat client_orphan.py
+ import socket
+ import time
+
+ server = 'nstat-b' # server address
+ port = 9000
+
+ count = 64
+
+ connection_list = []
+
+ for i in range(64):
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((server, port))
+ connection_list.append(s)
+ print("connection_count: %d" % len(connection_list))
+
+ while True:
+ time.sleep(99999)
+
+Server code (accept 64 connection from client)::
+
+ nstatuser@nstat-b:~$ cat server_orphan.py
+ import socket
+ import time
+
+ port = 9000
+ count = 64
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind(('0.0.0.0', port))
+ s.listen(count)
+ connection_list = []
+ while True:
+ sock, addr = s.accept()
+ connection_list.append((sock, addr))
+ print("connection_count: %d" % len(connection_list))
+
+Run the python scripts on server and client.
+
+On server::
+
+ python3 server_orphan.py
+
+On client::
+
+ python3 client_orphan.py
+
+Run iptables on server::
+
+ sudo iptables -A INPUT -i ens3 -p tcp --destination-port 9000 -j DROP
+
+Type Ctrl-C on client, stop client_orphan.py.
+
+Check TcpExtTCPAbortOnMemory on client::
+
+ nstatuser@nstat-a:~$ nstat | grep -i abort
+ TcpExtTCPAbortOnMemory 54 0.0
+
+Check orphane socket count on client::
+
+ nstatuser@nstat-a:~$ ss -s
+ Total: 131 (kernel 0)
+ TCP: 14 (estab 1, closed 0, orphaned 10, synrecv 0, timewait 0/0), ports 0
+
+ Transport Total IP IPv6
+ * 0 - -
+ RAW 1 0 1
+ UDP 1 1 0
+ TCP 14 13 1
+ INET 16 14 2
+ FRAG 0 0 0
+
+The explanation of the test: after run server_orphan.py and
+client_orphan.py, we set up 64 connections between server and
+client. Run the iptables command, the server will drop all packets from
+the client, type Ctrl-C on client_orphan.py, the system of the client
+would try to close these connections, and before they are closed
+gracefully, these connections became orphan sockets. As the iptables
+of the server blocked packets from the client, the server won't receive fin
+from the client, so all connection on clients would be stuck on FIN_WAIT_1
+stage, so they will keep as orphan sockets until timeout. We have echo
+10 to /proc/sys/net/ipv4/tcp_max_orphans, so the client system would
+only keep 10 orphan sockets, for all other orphan sockets, the client
+system sent RST for them and delete them. We have 64 connections, so
+the 'ss -s' command shows the system has 10 orphan sockets, and the
+value of TcpExtTCPAbortOnMemory was 54.
+
+An additional explanation about orphan socket count: You could find the
+exactly orphan socket count by the 'ss -s' command, but when kernel
+decide whither increases TcpExtTCPAbortOnMemory and sends RST, kernel
+doesn't always check the exactly orphan socket count. For increasing
+performance, kernel checks an approximate count firstly, if the
+approximate count is more than tcp_max_orphans, kernel checks the
+exact count again. So if the approximate count is less than
+tcp_max_orphans, but exactly count is more than tcp_max_orphans, you
+would find TcpExtTCPAbortOnMemory is not increased at all. If
+tcp_max_orphans is large enough, it won't occur, but if you decrease
+tcp_max_orphans to a small value like our test, you might find this
+issue. So in our test, the client set up 64 connections although the
+tcp_max_orphans is 10. If the client only set up 11 connections, we
+can't find the change of TcpExtTCPAbortOnMemory.
+
+Continue the previous test, we wait for several minutes. Because of the
+iptables on the server blocked the traffic, the server wouldn't receive
+fin, and all the client's orphan sockets would timeout on the
+FIN_WAIT_1 state finally. So we wait for a few minutes, we could find
+10 timeout on the client::
+
+ nstatuser@nstat-a:~$ nstat | grep -i abort
+ TcpExtTCPAbortOnTimeout 10 0.0
+
+TcpExtTCPAbortOnLinger
+---------------------
+The server side code::
+
+ nstatuser@nstat-b:~$ cat server_linger.py
+ import socket
+ import time
+
+ port = 9000
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind(('0.0.0.0', port))
+ s.listen(1)
+ sock, addr = s.accept()
+ while True:
+ time.sleep(9999999)
+
+The client side code::
+
+ nstatuser@nstat-a:~$ cat client_linger.py
+ import socket
+ import struct
+
+ server = 'nstat-b' # server address
+ port = 9000
+
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 10))
+ s.setsockopt(socket.SOL_TCP, socket.TCP_LINGER2, struct.pack('i', -1))
+ s.connect((server, port))
+ s.close()
+
+Run server_linger.py on server::
+
+ nstatuser@nstat-b:~$ python3 server_linger.py
+
+Run client_linger.py on client::
+
+ nstatuser@nstat-a:~$ python3 client_linger.py
+
+After run client_linger.py, check the output of nstat::
+
+ nstatuser@nstat-a:~$ nstat | grep -i abort
+ TcpExtTCPAbortOnLinger 1 0.0
+
+TcpExtTCPRcvCoalesce
+-------------------
+On the server, we run a program which listen on TCP port 9000, but
+doesn't read any data::
+
+ import socket
+ import time
+ port = 9000
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind(('0.0.0.0', port))
+ s.listen(1)
+ sock, addr = s.accept()
+ while True:
+ time.sleep(9999999)
+
+Save the above code as server_coalesce.py, and run::
+
+ python3 server_coalesce.py
+
+On the client, save below code as client_coalesce.py::
+
+ import socket
+ server = 'nstat-b'
+ port = 9000
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.connect((server, port))
+
+Run::
+
+ nstatuser@nstat-a:~$ python3 -i client_coalesce.py
+
+We use '-i' to come into the interactive mode, then a packet::
+
+ >>> s.send(b'foo')
+ 3
+
+Send a packet again::
+
+ >>> s.send(b'bar')
+ 3
+
+On the server, run nstat::
+
+ ubuntu@nstat-b:~$ nstat
+ #kernel
+ IpInReceives 2 0.0
+ IpInDelivers 2 0.0
+ IpOutRequests 2 0.0
+ TcpInSegs 2 0.0
+ TcpOutSegs 2 0.0
+ TcpExtTCPRcvCoalesce 1 0.0
+ IpExtInOctets 110 0.0
+ IpExtOutOctets 104 0.0
+ IpExtInNoECTPkts 2 0.0
+
+The client sent two packets, server didn't read any data. When
+the second packet arrived at server, the first packet was still in
+the receiving queue. So the TCP layer merged the two packets, and we
+could find the TcpExtTCPRcvCoalesce increased 1.
+
+TcpExtListenOverflows and TcpExtListenDrops
+----------------------------------------
+On server, run the nc command, listen on port 9000::
+
+ nstatuser@nstat-b:~$ nc -lkv 0.0.0.0 9000
+ Listening on [0.0.0.0] (family 0, port 9000)
+
+On client, run 3 nc commands in different terminals::
+
+ nstatuser@nstat-a:~$ nc -v nstat-b 9000
+ Connection to nstat-b 9000 port [tcp/*] succeeded!
+
+The nc command only accepts 1 connection, and the accept queue length
+is 1. On current linux implementation, set queue length to n means the
+actual queue length is n+1. Now we create 3 connections, 1 is accepted
+by nc, 2 in accepted queue, so the accept queue is full.
+
+Before running the 4th nc, we clean the nstat history on the server::
+
+ nstatuser@nstat-b:~$ nstat -n
+
+Run the 4th nc on the client::
+
+ nstatuser@nstat-a:~$ nc -v nstat-b 9000
+
+If the nc server is running on kernel 4.10 or higher version, you
+won't see the "Connection to ... succeeded!" string, because kernel
+will drop the SYN if the accept queue is full. If the nc client is running
+on an old kernel, you would see that the connection is succeeded,
+because kernel would complete the 3 way handshake and keep the socket
+on half open queue. I did the test on kernel 4.15. Below is the nstat
+on the server::
+
+ nstatuser@nstat-b:~$ nstat
+ #kernel
+ IpInReceives 4 0.0
+ IpInDelivers 4 0.0
+ TcpInSegs 4 0.0
+ TcpExtListenOverflows 4 0.0
+ TcpExtListenDrops 4 0.0
+ IpExtInOctets 240 0.0
+ IpExtInNoECTPkts 4 0.0
+
+Both TcpExtListenOverflows and TcpExtListenDrops were 4. If the time
+between the 4th nc and the nstat was longer, the value of
+TcpExtListenOverflows and TcpExtListenDrops would be larger, because
+the SYN of the 4th nc was dropped, the client was retrying.
+
+IpInAddrErrors, IpExtInNoRoutes and IpOutNoRoutes
+----------------------------------------------
+server A IP address: 192.168.122.250
+server B IP address: 192.168.122.251
+Prepare on server A, add a route to server B::
+
+ $ sudo ip route add 8.8.8.8/32 via 192.168.122.251
+
+Prepare on server B, disable send_redirects for all interfaces::
+
+ $ sudo sysctl -w net.ipv4.conf.all.send_redirects=0
+ $ sudo sysctl -w net.ipv4.conf.ens3.send_redirects=0
+ $ sudo sysctl -w net.ipv4.conf.lo.send_redirects=0
+ $ sudo sysctl -w net.ipv4.conf.default.send_redirects=0
+
+We want to let sever A send a packet to 8.8.8.8, and route the packet
+to server B. When server B receives such packet, it might send a ICMP
+Redirect message to server A, set send_redirects to 0 will disable
+this behavior.
+
+First, generate InAddrErrors. On server B, we disable IP forwarding::
+
+ $ sudo sysctl -w net.ipv4.conf.all.forwarding=0
+
+On server A, we send packets to 8.8.8.8::
+
+ $ nc -v 8.8.8.8 53
+
+On server B, we check the output of nstat::
+
+ $ nstat
+ #kernel
+ IpInReceives 3 0.0
+ IpInAddrErrors 3 0.0
+ IpExtInOctets 180 0.0
+ IpExtInNoECTPkts 3 0.0
+
+As we have let server A route 8.8.8.8 to server B, and we disabled IP
+forwarding on server B, Server A sent packets to server B, then server B
+dropped packets and increased IpInAddrErrors. As the nc command would
+re-send the SYN packet if it didn't receive a SYN+ACK, we could find
+multiple IpInAddrErrors.
+
+Second, generate IpExtInNoRoutes. On server B, we enable IP
+forwarding::
+
+ $ sudo sysctl -w net.ipv4.conf.all.forwarding=1
+
+Check the route table of server B and remove the default route::
+
+ $ ip route show
+ default via 192.168.122.1 dev ens3 proto static
+ 192.168.122.0/24 dev ens3 proto kernel scope link src 192.168.122.251
+ $ sudo ip route delete default via 192.168.122.1 dev ens3 proto static
+
+On server A, we contact 8.8.8.8 again::
+
+ $ nc -v 8.8.8.8 53
+ nc: connect to 8.8.8.8 port 53 (tcp) failed: Network is unreachable
+
+On server B, run nstat::
+
+ $ nstat
+ #kernel
+ IpInReceives 1 0.0
+ IpOutRequests 1 0.0
+ IcmpOutMsgs 1 0.0
+ IcmpOutDestUnreachs 1 0.0
+ IcmpMsgOutType3 1 0.0
+ IpExtInNoRoutes 1 0.0
+ IpExtInOctets 60 0.0
+ IpExtOutOctets 88 0.0
+ IpExtInNoECTPkts 1 0.0
+
+We enabled IP forwarding on server B, when server B received a packet
+which destination IP address is 8.8.8.8, server B will try to forward
+this packet. We have deleted the default route, there was no route for
+8.8.8.8, so server B increase IpExtInNoRoutes and sent the "ICMP
+Destination Unreachable" message to server A.
+
+Third, generate IpOutNoRoutes. Run ping command on server B::
+
+ $ ping -c 1 8.8.8.8
+ connect: Network is unreachable
+
+Run nstat on server B::
+
+ $ nstat
+ #kernel
+ IpOutNoRoutes 1 0.0
+
+We have deleted the default route on server B. Server B couldn't find
+a route for the 8.8.8.8 IP address, so server B increased
+IpOutNoRoutes.
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index 8ff7b4c8f91b..a5f103b083a0 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -103,19 +103,33 @@ VRF device:
or to specify the output device using cmsg and IP_PKTINFO.
+By default the scope of the port bindings for unbound sockets is
+limited to the default VRF. That is, it will not be matched by packets
+arriving on interfaces enslaved to an l3mdev and processes may bind to
+the same port if they bind to an l3mdev.
+
TCP & UDP services running in the default VRF context (ie., not bound
to any VRF device) can work across all VRF domains by enabling the
tcp_l3mdev_accept and udp_l3mdev_accept sysctl options:
+
sysctl -w net.ipv4.tcp_l3mdev_accept=1
sysctl -w net.ipv4.udp_l3mdev_accept=1
+These options are disabled by default so that a socket in a VRF is only
+selected for packets in that VRF. There is a similar option for RAW
+sockets, which is enabled by default for reasons of backwards compatibility.
+This is so as to specify the output device with cmsg and IP_PKTINFO, but
+using a socket not bound to the corresponding VRF. This allows e.g. older ping
+implementations to be run with specifying the device but without executing it
+in the VRF. This option can be disabled so that packets received in a VRF
+context are only handled by a raw socket bound to the VRF, and packets in the
+default VRF are only handled by a socket not bound to any VRF:
+
+ sysctl -w net.ipv4.raw_l3mdev_accept=0
+
netfilter rules on the VRF device can be used to limit access to services
running in the default VRF context as well.
-The default VRF does not have limited scope with respect to port bindings.
-That is, if a process does a wildcard bind to a port in the default VRF it
-owns the port across all VRF domains within the network namespace.
-
################################################################################
Using iproute2 for VRFs
diff --git a/Documentation/networking/xfrm_device.txt b/Documentation/networking/xfrm_device.txt
index 267f55b5f54a..a1c904dc70dc 100644
--- a/Documentation/networking/xfrm_device.txt
+++ b/Documentation/networking/xfrm_device.txt
@@ -111,9 +111,10 @@ the stack in xfrm_input().
xfrm_state_hold(xs);
store the state information into the skb
- skb->sp = secpath_dup(skb->sp);
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp = secpath_set(skb);
+ if (!sp) return;
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
indicate the success and/or error status of the offload
xo = xfrm_offload(skb);
diff --git a/Documentation/perf/thunderx2-pmu.txt b/Documentation/perf/thunderx2-pmu.txt
new file mode 100644
index 000000000000..dffc57143736
--- /dev/null
+++ b/Documentation/perf/thunderx2-pmu.txt
@@ -0,0 +1,41 @@
+Cavium ThunderX2 SoC Performance Monitoring Unit (PMU UNCORE)
+=============================================================
+
+The ThunderX2 SoC PMU consists of independent, system-wide, per-socket
+PMUs such as the Level 3 Cache (L3C) and DDR4 Memory Controller (DMC).
+
+The DMC has 8 interleaved channels and the L3C has 16 interleaved tiles.
+Events are counted for the default channel (i.e. channel 0) and prorated
+to the total number of channels/tiles.
+
+The DMC and L3C support up to 4 counters. Counters are independently
+programmable and can be started and stopped individually. Each counter
+can be set to a different event. Counters are 32-bit and do not support
+an overflow interrupt; they are read every 2 seconds.
+
+PMU UNCORE (perf) driver:
+
+The thunderx2_pmu driver registers per-socket perf PMUs for the DMC and
+L3C devices. Each PMU can be used to count up to 4 events
+simultaneously. The PMUs provide a description of their available events
+and configuration options under sysfs, see
+/sys/devices/uncore_<l3c_S/dmc_S/>; S is the socket id.
+
+The driver does not support sampling, therefore "perf record" will not
+work. Per-task perf sessions are also not supported.
+
+Examples:
+
+# perf stat -a -e uncore_dmc_0/cnt_cycles/ sleep 1
+
+# perf stat -a -e \
+uncore_dmc_0/cnt_cycles/,\
+uncore_dmc_0/data_transfers/,\
+uncore_dmc_0/read_txns/,\
+uncore_dmc_0/write_txns/ sleep 1
+
+# perf stat -a -e \
+uncore_l3c_0/read_request/,\
+uncore_l3c_0/read_hit/,\
+uncore_l3c_0/inv_request/,\
+uncore_l3c_0/inv_hit/ sleep 1
diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.txt
index bdd344aa18d9..18c5feef2577 100644
--- a/Documentation/powerpc/firmware-assisted-dump.txt
+++ b/Documentation/powerpc/firmware-assisted-dump.txt
@@ -113,7 +113,15 @@ header, is usually reserved at an offset greater than boot memory
size (see Fig. 1). This area is *not* released: this region will
be kept permanently reserved, so that it can act as a receptacle
for a copy of the boot memory content in addition to CPU state
-and HPTE region, in the case a crash does occur.
+and HPTE region, in the case a crash does occur. Since this reserved
+memory area is used only after the system crash, there is no point in
+blocking this significant chunk of memory from production kernel.
+Hence, the implementation uses the Linux kernel's Contiguous Memory
+Allocator (CMA) for memory reservation if CMA is configured for kernel.
+With CMA reservation this memory will be available for applications to
+use it, while kernel is prevented from using it. With this fadump will
+still be able to capture all of the kernel memory and most of the user
+space memory except the user pages that were present in CMA region.
o Memory Reservation during first kernel
@@ -162,6 +170,9 @@ How to enable firmware-assisted dump (fadump):
1. Set config option CONFIG_FA_DUMP=y and build kernel.
2. Boot into linux kernel with 'fadump=on' kernel cmdline option.
+ By default, fadump reserved memory will be initialized as CMA area.
+ Alternatively, user can boot linux kernel with 'fadump=nocma' to
+ prevent fadump to use CMA.
3. Optionally, user can also set 'crashkernel=' kernel cmdline
to specify size of the memory to reserve for boot memory dump
preservation.
@@ -172,6 +183,10 @@ NOTE: 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead
2. If firmware-assisted dump fails to reserve memory then it
will fallback to existing kdump mechanism if 'crashkernel='
option is set at kernel cmdline.
+ 3. if user wants to capture all of user space memory and ok with
+ reserved memory not available to production system, then
+ 'fadump=nocma' kernel parameter can be used to fallback to
+ old behaviour.
Sysfs/debugfs files:
------------
diff --git a/Documentation/powerpc/isa-versions.rst b/Documentation/powerpc/isa-versions.rst
new file mode 100644
index 000000000000..812e20cc898c
--- /dev/null
+++ b/Documentation/powerpc/isa-versions.rst
@@ -0,0 +1,74 @@
+CPU to ISA Version Mapping
+==========================
+
+Mapping of some CPU versions to relevant ISA versions.
+
+========= ====================
+CPU Architecture version
+========= ====================
+Power9 Power ISA v3.0B
+Power8 Power ISA v2.07
+Power7 Power ISA v2.06
+Power6 Power ISA v2.05
+PA6T Power ISA v2.04
+Cell PPU - Power ISA v2.02 with some minor exceptions
+ - Plus Altivec/VMX ~= 2.03
+Power5++ Power ISA v2.04 (no VMX)
+Power5+ Power ISA v2.03
+Power5 - PowerPC User Instruction Set Architecture Book I v2.02
+ - PowerPC Virtual Environment Architecture Book II v2.02
+ - PowerPC Operating Environment Architecture Book III v2.02
+PPC970 - PowerPC User Instruction Set Architecture Book I v2.01
+ - PowerPC Virtual Environment Architecture Book II v2.01
+ - PowerPC Operating Environment Architecture Book III v2.01
+ - Plus Altivec/VMX ~= 2.03
+========= ====================
+
+
+Key Features
+------------
+
+========== ==================
+CPU VMX (aka. Altivec)
+========== ==================
+Power9 Yes
+Power8 Yes
+Power7 Yes
+Power6 Yes
+PA6T Yes
+Cell PPU Yes
+Power5++ No
+Power5+ No
+Power5 No
+PPC970 Yes
+========== ==================
+
+========== ====
+CPU VSX
+========== ====
+Power9 Yes
+Power8 Yes
+Power7 Yes
+Power6 No
+PA6T No
+Cell PPU No
+Power5++ No
+Power5+ No
+Power5 No
+PPC970 No
+========== ====
+
+========== ====================
+CPU Transactional Memory
+========== ====================
+Power9 Yes (* see transactional_memory.txt)
+Power8 Yes
+Power7 No
+Power6 No
+PA6T No
+Cell PPU No
+Power5++ No
+Power5+ No
+Power5 No
+PPC970 No
+========== ====================
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index 92999d4e0cb8..25a4b4cf04a6 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -97,11 +97,6 @@ parameters may be changed at runtime by the command
allowing boot to proceed. none ignores them, expecting
user space to do the scan.
- scsi_mod.use_blk_mq=
- [SCSI] use blk-mq I/O path by default
- See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
- Format: <y/n>
-
sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c.
diff --git a/Documentation/sh/new-machine.txt b/Documentation/sh/new-machine.txt
index f0354164cb0e..e0961a66130b 100644
--- a/Documentation/sh/new-machine.txt
+++ b/Documentation/sh/new-machine.txt
@@ -116,7 +116,6 @@ might look something like:
* arch/sh/boards/vapor/setup.c - Setup code for imaginary board
*/
#include <linux/init.h>
-#include <asm/rtc.h> /* for board_time_init() */
const char *get_system_type(void)
{
@@ -132,13 +131,6 @@ int __init platform_setup(void)
* this board.
*/
- /*
- * Presume all FooTech boards have the same broken timer,
- * and also presume that we've defined foo_timer_init to
- * do something useful.
- */
- board_time_init = foo_timer_init;
-
/* Start-up imaginary PCI ... */
/* And whatever else ... */
diff --git a/Documentation/userspace-api/spec_ctrl.rst b/Documentation/userspace-api/spec_ctrl.rst
index 32f3d55c54b7..c4dbe6f7cdae 100644
--- a/Documentation/userspace-api/spec_ctrl.rst
+++ b/Documentation/userspace-api/spec_ctrl.rst
@@ -92,3 +92,12 @@ Speculation misfeature controls
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
+ (Mitigate Spectre V2 style attacks against user processes)
+
+ Invocations:
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index cd209f7730af..356156f5c52d 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -305,6 +305,9 @@ the address space for which you want to return the dirty bitmap.
They must be less than the value that KVM_CHECK_EXTENSION returns for
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+The bits in the dirty bitmap are cleared before the ioctl returns, unless
+KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is enabled. For more information,
+see the description of the capability.
4.9 KVM_SET_MEMORY_ALIAS
@@ -1129,10 +1132,15 @@ documentation when it pops into existence).
4.37 KVM_ENABLE_CAP
-Capability: KVM_CAP_ENABLE_CAP, KVM_CAP_ENABLE_CAP_VM
-Architectures: x86 (only KVM_CAP_ENABLE_CAP_VM),
- mips (only KVM_CAP_ENABLE_CAP), ppc, s390
-Type: vcpu ioctl, vm ioctl (with KVM_CAP_ENABLE_CAP_VM)
+Capability: KVM_CAP_ENABLE_CAP
+Architectures: mips, ppc, s390
+Type: vcpu ioctl
+Parameters: struct kvm_enable_cap (in)
+Returns: 0 on success; -1 on error
+
+Capability: KVM_CAP_ENABLE_CAP_VM
+Architectures: all
+Type: vcpu ioctl
Parameters: struct kvm_enable_cap (in)
Returns: 0 on success; -1 on error
@@ -3753,6 +3761,102 @@ Coalesced pio is based on coalesced mmio. There is little difference
between coalesced mmio and pio except that coalesced pio records accesses
to I/O ports.
+4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
+
+Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_dirty_log (in)
+Returns: 0 on success, -1 on error
+
+/* for KVM_CLEAR_DIRTY_LOG */
+struct kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding;
+ };
+};
+
+The ioctl clears the dirty status of pages in a memory slot, according to
+the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
+field. Bit 0 of the bitmap corresponds to page "first_page" in the
+memory slot, and num_pages is the size in bits of the input bitmap.
+Both first_page and num_pages must be a multiple of 64. For each bit
+that is set in the input bitmap, the corresponding page is marked "clean"
+in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
+(for example via write-protection, or by clearing the dirty bit in
+a page table entry).
+
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
+the address space for which you want to return the dirty bitmap.
+They must be less than the value that KVM_CHECK_EXTENSION returns for
+the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+
+This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+is enabled; for more information, see the description of the capability.
+However, it can always be used as long as KVM_CHECK_EXTENSION confirms
+that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is present.
+
+4.118 KVM_GET_SUPPORTED_HV_CPUID
+
+Capability: KVM_CAP_HYPERV_CPUID
+Architectures: x86
+Type: vcpu ioctl
+Parameters: struct kvm_cpuid2 (in/out)
+Returns: 0 on success, -1 on error
+
+struct kvm_cpuid2 {
+ __u32 nent;
+ __u32 padding;
+ struct kvm_cpuid_entry2 entries[0];
+};
+
+struct kvm_cpuid_entry2 {
+ __u32 function;
+ __u32 index;
+ __u32 flags;
+ __u32 eax;
+ __u32 ebx;
+ __u32 ecx;
+ __u32 edx;
+ __u32 padding[3];
+};
+
+This ioctl returns x86 cpuid features leaves related to Hyper-V emulation in
+KVM. Userspace can use the information returned by this ioctl to construct
+cpuid information presented to guests consuming Hyper-V enlightenments (e.g.
+Windows or Hyper-V guests).
+
+CPUID feature leaves returned by this ioctl are defined by Hyper-V Top Level
+Functional Specification (TLFS). These leaves can't be obtained with
+KVM_GET_SUPPORTED_CPUID ioctl because some of them intersect with KVM feature
+leaves (0x40000000, 0x40000001).
+
+Currently, the following list of CPUID leaves are returned:
+ HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
+ HYPERV_CPUID_INTERFACE
+ HYPERV_CPUID_VERSION
+ HYPERV_CPUID_FEATURES
+ HYPERV_CPUID_ENLIGHTMENT_INFO
+ HYPERV_CPUID_IMPLEMENT_LIMITS
+ HYPERV_CPUID_NESTED_FEATURES
+
+HYPERV_CPUID_NESTED_FEATURES leaf is only exposed when Enlightened VMCS was
+enabled on the corresponding vCPU (KVM_CAP_HYPERV_ENLIGHTENED_VMCS).
+
+Userspace invokes KVM_GET_SUPPORTED_CPUID by passing a kvm_cpuid2 structure
+with the 'nent' field indicating the number of entries in the variable-size
+array 'entries'. If the number of entries is too low to describe all Hyper-V
+feature leaves, an error (E2BIG) is returned. If the number is more or equal
+to the number of Hyper-V feature leaves, the 'nent' field is adjusted to the
+number of valid entries in the 'entries' array, which is then filled.
+
+'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved,
+userspace should not expect to get any particular value there.
+
5. The kvm_run structure
------------------------
@@ -4647,6 +4751,30 @@ and injected exceptions.
* For the new DR6 bits, note that bit 16 is set iff the #DB exception
will clear DR6.RTM.
+7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT
+
+Architectures: all
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
+clear and write-protect all pages that are returned as dirty.
+Rather, userspace will have to do this operation separately using
+KVM_CLEAR_DIRTY_LOG.
+
+At the cost of a slightly more complicated operation, this provides better
+scalability and responsiveness for two reasons. First,
+KVM_CLEAR_DIRTY_LOG ioctl can operate on a 64-page granularity rather
+than requiring to sync a full memslot; this ensures that KVM does not
+take spinlocks for an extended period of time. Second, in some cases a
+large amount of time can pass between a call to KVM_GET_DIRTY_LOG and
+userspace actually using the data in the page. Pages can be modified
+during this time, which is inefficint for both the guest and userspace:
+the guest will incur a higher penalty due to write protection faults,
+while userspace can see false reports of dirty pages. Manual reprotection
+helps reducing this time, improving guest performance and reducing the
+number of dirty log false positives.
+
+
8. Other capabilities.
----------------------
diff --git a/Documentation/vm/unevictable-lru.rst b/Documentation/vm/unevictable-lru.rst
index fdd84cb8d511..b8e29f977f2d 100644
--- a/Documentation/vm/unevictable-lru.rst
+++ b/Documentation/vm/unevictable-lru.rst
@@ -143,7 +143,7 @@ using a number of wrapper functions:
Query the address space, and return true if it is completely
unevictable.
-These are currently used in two places in the kernel:
+These are currently used in three places in the kernel:
(1) By ramfs to mark the address spaces of its inodes when they are created,
and this mark remains for the life of the inode.
@@ -154,6 +154,10 @@ These are currently used in two places in the kernel:
swapped out; the application must touch the pages manually if it wants to
ensure they're in memory.
+ (3) By the i915 driver to mark pinned address space until it's unpinned. The
+ amount of unevictable memory marked by i915 driver is roughly the bounded
+ object size in debugfs/dri/0/i915_gem_objects.
+
Detecting Unevictable Pages
---------------------------
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 7727db8f94bc..5e9b826b5f62 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -61,18 +61,6 @@ Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
to struct boot_params for loading bzImage and ramdisk
above 4G in 64bit.
-Protocol 2.13: (Kernel 3.14) Support 32- and 64-bit flags being set in
- xloadflags to support booting a 64-bit kernel from 32-bit
- EFI
-
-Protocol 2.14: (Kernel 4.20) Added acpi_rsdp_addr holding the physical
- address of the ACPI RSDP table.
- The bootloader updates version with:
- 0x8000 | min(kernel-version, bootloader-version)
- kernel-version being the protocol version supported by
- the kernel and bootloader-version the protocol version
- supported by the bootloader.
-
**** MEMORY LAYOUT
The traditional memory map for the kernel loader, used for Image or
@@ -209,7 +197,6 @@ Offset Proto Name Meaning
0258/8 2.10+ pref_address Preferred loading address
0260/4 2.10+ init_size Linear memory required during initialization
0264/4 2.11+ handover_offset Offset of handover entry point
-0268/8 2.14+ acpi_rsdp_addr Physical address of RSDP table
(1) For backwards compatibility, if the setup_sects field contains 0, the
real value is 4.
@@ -322,7 +309,7 @@ Protocol: 2.00+
Contains the magic number "HdrS" (0x53726448).
Field name: version
-Type: modify
+Type: read
Offset/size: 0x206/2
Protocol: 2.00+
@@ -330,12 +317,6 @@ Protocol: 2.00+
e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
10.17.
- Up to protocol version 2.13 this information is only read by the
- bootloader. From protocol version 2.14 onwards the bootloader will
- write the used protocol version or-ed with 0x8000 to the field. The
- used protocol version will be the minimum of the supported protocol
- versions of the bootloader and the kernel.
-
Field name: realmode_swtch
Type: modify (optional)
Offset/size: 0x208/4
@@ -763,17 +744,6 @@ Offset/size: 0x264/4
See EFI HANDOVER PROTOCOL below for more details.
-Field name: acpi_rsdp_addr
-Type: write
-Offset/size: 0x268/8
-Protocol: 2.14+
-
- This field can be set by the boot loader to tell the kernel the
- physical address of the ACPI RSDP table.
-
- A value of 0 indicates the kernel should fall back to the standard
- methods to locate the RSDP.
-
**** THE IMAGE CHECKSUM
diff --git a/Documentation/x86/intel_rdt_ui.txt b/Documentation/x86/resctrl_ui.txt
index 52b10945ff75..d9aed8303984 100644
--- a/Documentation/x86/intel_rdt_ui.txt
+++ b/Documentation/x86/resctrl_ui.txt
@@ -1,4 +1,7 @@
-User Interface for Resource Allocation in Intel Resource Director Technology
+User Interface for Resource Control feature
+
+Intel refers to this feature as Intel Resource Director Technology(Intel(R) RDT).
+AMD refers to this feature as AMD Platform Quality of Service(AMD QoS).
Copyright (C) 2016 Intel Corporation
@@ -6,8 +9,8 @@ Fenghua Yu <fenghua.yu@intel.com>
Tony Luck <tony.luck@intel.com>
Vikas Shivappa <vikas.shivappa@intel.com>
-This feature is enabled by the CONFIG_INTEL_RDT Kconfig and the
-X86 /proc/cpuinfo flag bits:
+This feature is enabled by the CONFIG_RESCTRL and the X86 /proc/cpuinfo
+flag bits:
RDT (Resource Director Technology) Allocation - "rdt_a"
CAT (Cache Allocation Technology) - "cat_l3", "cat_l2"
CDP (Code and Data Prioritization ) - "cdp_l3", "cdp_l2"
diff --git a/MAINTAINERS b/MAINTAINERS
index 380e43f585d3..7a9804a891fd 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -140,7 +140,7 @@ Maintainers List (try to look for most precise areas first)
M: Steffen Klassert <klassert@kernel.org>
L: netdev@vger.kernel.org
S: Odd Fixes
-F: Documentation/networking/vortex.txt
+F: Documentation/networking/device_drivers/3com/vortex.txt
F: drivers/net/ethernet/3com/3c59x.c
3CR990 NETWORK DRIVER
@@ -740,7 +740,7 @@ R: Saeed Bishara <saeedb@amazon.com>
R: Zorik Machulsky <zorik@amazon.com>
L: netdev@vger.kernel.org
S: Supported
-F: Documentation/networking/ena.txt
+F: Documentation/networking/device_drivers/amazon/ena.txt
F: drivers/net/ethernet/amazon/
AMD CRYPTOGRAPHIC COPROCESSOR (CCP) DRIVER
@@ -1310,6 +1310,13 @@ F: drivers/pinctrl/meson/
F: drivers/mmc/host/meson*
N: meson
+ARM/Amlogic Meson SoC Sound Drivers
+M: Jerome Brunet <jbrunet@baylibre.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/meson/
+F: Documentation/devicetree/bindings/sound/amlogic*
+
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
M: Antoine Tenart <antoine.tenart@bootlin.com>
@@ -1472,6 +1479,7 @@ F: drivers/clk/sirf/
F: drivers/clocksource/timer-prima2.c
F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
+X: drivers/gnss
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@armlinux.org.uk>
@@ -1738,13 +1746,17 @@ ARM/Mediatek SoC support
M: Matthias Brugger <matthias.bgg@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+W: https://mtk.bcnfs.org/
+C: irc://chat.freenode.net/linux-mediatek
S: Maintained
F: arch/arm/boot/dts/mt6*
F: arch/arm/boot/dts/mt7*
F: arch/arm/boot/dts/mt8*
F: arch/arm/mach-mediatek/
F: arch/arm64/boot/dts/mediatek/
+F: drivers/soc/mediatek/
N: mtk
+N: mt[678]
K: mediatek
ARM/Mediatek USB3 PHY DRIVER
@@ -1923,7 +1935,6 @@ ARM/QUALCOMM SUPPORT
M: Andy Gross <andy.gross@linaro.org>
M: David Brown <david.brown@linaro.org>
L: linux-arm-msm@vger.kernel.org
-L: linux-soc@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/soc/qcom/
F: arch/arm/boot/dts/qcom-*.dts
@@ -2063,7 +2074,6 @@ M: Andrzej Hajda <a.hajda@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
-F: arch/arm/plat-samsung/s5p-dev-mfc.c
F: drivers/media/platform/s5p-mfc/
ARM/SHMOBILE ARM ARCHITECTURE
@@ -2415,6 +2425,14 @@ S: Maintained
F: Documentation/hwmon/asc7621
F: drivers/hwmon/asc7621.c
+ASPEED VIDEO ENGINE DRIVER
+M: Eddie James <eajames@linux.ibm.com>
+L: linux-media@vger.kernel.org
+L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/media/platform/aspeed-video.c
+F: Documentation/devicetree/bindings/media/aspeed-video.txt
+
ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
M: Corentin Chary <corentin.chary@gmail.com>
L: acpi4asus-user@lists.sourceforge.net
@@ -2491,7 +2509,7 @@ F: drivers/net/wireless/ath/*
ATHEROS ATH5K WIRELESS DRIVER
M: Jiri Slaby <jirislaby@gmail.com>
M: Nick Kossifidis <mickflemm@gmail.com>
-M: "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
+M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ath5k
S: Maintained
@@ -2629,6 +2647,13 @@ S: Maintained
F: Documentation/devicetree/bindings/sound/axentia,*
F: sound/soc/atmel/tse850-pcm5142.c
+AXXIA I2C CONTROLLER
+M: Krzysztof Adamski <krzysztof.adamski@nokia.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-axxia.txt
+F: drivers/i2c/busses/i2c-axxia.c
+
AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
@@ -2801,7 +2826,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
S: Supported
-F: arch/x86/net/bpf_jit*
+F: arch/*/net/*
F: Documentation/networking/filter.txt
F: Documentation/bpf/
F: include/linux/bpf*
@@ -2821,6 +2846,67 @@ F: tools/bpf/
F: tools/lib/bpf/
F: tools/testing/selftests/bpf/
+BPF JIT for ARM
+M: Shubham Bansal <illusionist.neo@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/arm/net/
+
+BPF JIT for ARM64
+M: Daniel Borkmann <daniel@iogearbox.net>
+M: Alexei Starovoitov <ast@kernel.org>
+M: Zi Shen Lim <zlim.lnx@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: arch/arm64/net/
+
+BPF JIT for MIPS (32-BIT AND 64-BIT)
+M: Paul Burton <paul.burton@mips.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/mips/net/
+
+BPF JIT for NFP NICs
+M: Jakub Kicinski <jakub.kicinski@netronome.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/netronome/nfp/bpf/
+
+BPF JIT for POWERPC (32-BIT AND 64-BIT)
+M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+M: Sandipan Das <sandipan@linux.ibm.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/powerpc/net/
+
+BPF JIT for S390
+M: Martin Schwidefsky <schwidefsky@de.ibm.com>
+M: Heiko Carstens <heiko.carstens@de.ibm.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/s390/net/
+X: arch/s390/net/pnet.c
+
+BPF JIT for SPARC (32-BIT AND 64-BIT)
+M: David S. Miller <davem@davemloft.net>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/sparc/net/
+
+BPF JIT for X86 32-BIT
+M: Wang YanQing <udknight@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: arch/x86/net/bpf_jit_comp32.c
+
+BPF JIT for X86 64-BIT
+M: Alexei Starovoitov <ast@kernel.org>
+M: Daniel Borkmann <daniel@iogearbox.net>
+L: netdev@vger.kernel.org
+S: Supported
+F: arch/x86/net/
+X: arch/x86/net/bpf_jit_comp32.c
+
BROADCOM B44 10/100 ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
@@ -2861,7 +2947,7 @@ F: drivers/staging/vc04_services
BROADCOM BCM47XX MIPS ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/mips/brcm/
F: arch/mips/bcm47xx/*
@@ -2870,7 +2956,6 @@ F: arch/mips/include/asm/mach-bcm47xx/*
BROADCOM BCM5301X ARM ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
-M: Jon Mason <jonmason@broadcom.com>
M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org
S: Maintained
@@ -2925,7 +3010,7 @@ F: drivers/cpufreq/bmips-cpufreq.c
BROADCOM BMIPS MIPS ARCHITECTURE
M: Kevin Cernekee <cernekee@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
T: git git://github.com/broadcom/stblinux.git
S: Maintained
F: arch/mips/bmips/*
@@ -3016,7 +3101,6 @@ F: drivers/net/ethernet/broadcom/genet/
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com>
M: Scott Branden <sbranden@broadcom.com>
-M: Jon Mason <jonmason@broadcom.com>
M: bcm-kernel-feedback-list@broadcom.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
T: git git://github.com/broadcom/cygnus-linux.git
@@ -3063,7 +3147,7 @@ F: include/uapi/rdma/bnxt_re-abi.h
BROADCOM NVRAM DRIVER
M: Rafał Miłecki <zajec5@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: drivers/firmware/broadcom/*
@@ -3213,11 +3297,16 @@ S: Maintained
F: sound/pci/oxygen/
C-SKY ARCHITECTURE
-M: Guo Ren <ren_guo@c-sky.com>
+M: Guo Ren <guoren@kernel.org>
T: git https://github.com/c-sky/csky-linux.git
S: Supported
F: arch/csky/
F: Documentation/devicetree/bindings/csky/
+F: drivers/irqchip/irq-csky-*
+F: Documentation/devicetree/bindings/interrupt-controller/csky,*
+F: drivers/clocksource/timer-gx6605s.c
+F: drivers/clocksource/timer-mp-csky.c
+F: Documentation/devicetree/bindings/timer/csky,*
K: csky
N: csky
@@ -3395,6 +3484,7 @@ F: include/linux/spi/cc2520.h
F: Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
CCREE ARM TRUSTZONE CRYPTOCELL REE DRIVER
+M: Yael Chemla <yael.chemla@foss.arm.com>
M: Gilad Ben-Yossef <gilad@benyossef.com>
L: linux-crypto@vger.kernel.org
S: Supported
@@ -3611,8 +3701,10 @@ W: https://github.com/CirrusLogic/linux-drivers/wiki
S: Supported
F: Documentation/devicetree/bindings/mfd/madera.txt
F: Documentation/devicetree/bindings/pinctrl/cirrus,madera-pinctrl.txt
+F: include/linux/irqchip/irq-madera*
F: include/linux/mfd/madera/*
F: drivers/gpio/gpio-madera*
+F: drivers/irqchip/irq-madera*
F: drivers/mfd/madera*
F: drivers/mfd/cs47l*
F: drivers/pinctrl/cirrus/*
@@ -3920,13 +4012,20 @@ T: git git://linuxtv.org/media_tree.git
W: http://linuxtv.org
S: Odd Fixes
F: drivers/media/i2c/cs3308.c
-F: drivers/media/i2c/cs3308.h
CS5535 Audio ALSA driver
M: Jaya Kumar <jayakumar.alsa@gmail.com>
S: Maintained
F: sound/pci/cs5535audio/
+CSI DRIVERS FOR ALLWINNER V3s
+M: Yong Deng <yong.deng@magewell.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/platform/sunxi/sun6i-csi/
+F: Documentation/devicetree/bindings/media/sun6i-csi.txt
+
CW1200 WLAN driver
M: Solomon Peachy <pizza@shaftnet.org>
S: Maintained
@@ -3951,7 +4050,7 @@ T: git git://linuxtv.org/media_tree.git
W: https://linuxtv.org
S: Maintained
F: drivers/media/common/cx2341x*
-F: include/media/cx2341x*
+F: include/media/drv-intf/cx2341x.h
CX24120 MEDIA DRIVER
M: Jemma Denson <jdenson@gmail.com>
@@ -3982,7 +4081,7 @@ S: Maintained
F: drivers/media/dvb-frontends/cxd2820r*
CXGB3 ETHERNET DRIVER (CXGB3)
-M: Santosh Raspatur <santosh@chelsio.com>
+M: Arjun Vynipadath <arjun@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4011,7 +4110,7 @@ S: Supported
F: drivers/crypto/chelsio
CXGB4 ETHERNET DRIVER (CXGB4)
-M: Ganesh Goudar <ganeshgr@chelsio.com>
+M: Arjun Vynipadath <arjun@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4040,7 +4139,7 @@ S: Supported
F: drivers/net/ethernet/chelsio/cxgb4vf/
CXL (IBM Coherent Accelerator Processor Interface CAPI) DRIVER
-M: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+M: Frederic Barrat <fbarrat@linux.ibm.com>
M: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
@@ -4052,9 +4151,9 @@ F: Documentation/powerpc/cxl.txt
F: Documentation/ABI/testing/sysfs-class-cxl
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
-M: Manoj N. Kumar <manoj@linux.vnet.ibm.com>
-M: Matthew R. Ochs <mrochs@linux.vnet.ibm.com>
-M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
+M: Manoj N. Kumar <manoj@linux.ibm.com>
+M: Matthew R. Ochs <mrochs@linux.ibm.com>
+M: Uma Krishnan <ukrishn@linux.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/cxlflash/
@@ -4126,7 +4225,7 @@ F: net/ax25/sysctl_net_ax25.c
DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
L: netdev@vger.kernel.org
S: Orphan
-F: Documentation/networking/dmfe.txt
+F: Documentation/networking/device_drivers/dec/dmfe.txt
F: drivers/net/ethernet/dec/tulip/dmfe.c
DC390/AM53C974 SCSI driver
@@ -4165,7 +4264,7 @@ F: net/decnet/
DECSTATION PLATFORM SUPPORT
M: "Maciej W. Rozycki" <macro@linux-mips.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
W: http://www.linux-mips.org/wiki/DECstation
S: Maintained
F: arch/mips/dec/
@@ -4662,6 +4761,13 @@ S: Maintained
F: drivers/gpu/drm/tinydrm/ili9225.c
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
+DRM DRIVER FOR HX8357D PANELS
+M: Eric Anholt <eric@anholt.net>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/tinydrm/hx8357d.c
+F: Documentation/devicetree/bindings/display/himax,hx8357d.txt
+
DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
@@ -4703,6 +4809,12 @@ S: Supported
F: drivers/gpu/drm/nouveau/
F: include/uapi/drm/nouveau_drm.h
+DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
+M: Stefan Mavrodiev <stefan@olimex.com>
+S: Maintained
+F: drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+F: Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
+
DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
M: Noralf Trønnes <noralf@tronnes.org>
S: Maintained
@@ -4768,10 +4880,8 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
-M: Sinclair Yeh <syeh@vmware.com>
M: Thomas Hellstrom <thellstrom@vmware.com>
L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~syeh/repos_linux
T: git git://people.freedesktop.org/~thomash/linux
S: Supported
F: drivers/gpu/drm/vmwgfx/
@@ -4779,6 +4889,7 @@ F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
+M: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm
B: https://bugs.freedesktop.org/
@@ -4828,7 +4939,7 @@ F: Documentation/gpu/meson.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR ATMEL HLCDC
-M: Boris Brezillon <boris.brezillon@bootlin.com>
+M: Boris Brezillon <bbrezillon@kernel.org>
L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/atmel-hlcdc/
@@ -5256,7 +5367,7 @@ EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
M: David Daney <david.daney@cavium.com>
L: linux-edac@vger.kernel.org
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Supported
F: drivers/edac/octeon_edac*
@@ -5337,7 +5448,6 @@ S: Maintained
F: drivers/edac/i82443bxgx_edac.c
EDAC-I82975X
-M: Ranganathan Desikan <ravi@jetztechnologies.com>
M: "Arvind R." <arvino55@gmail.com>
L: linux-edac@vger.kernel.org
S: Maintained
@@ -5435,7 +5545,7 @@ S: Orphan
F: fs/efs/
EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M: Douglas Miller <dougmill@linux.vnet.ibm.com>
+M: Douglas Miller <dougmill@linux.ibm.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ibm/ehea/
@@ -5549,6 +5659,7 @@ F: include/linux/of_net.h
F: include/linux/phy.h
F: include/linux/phy_fixed.h
F: include/linux/platform_data/mdio-bcm-unimac.h
+F: include/linux/platform_data/mdio-gpio.h
F: include/trace/events/mdio.h
F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
@@ -5573,7 +5684,7 @@ F: Documentation/filesystems/ext4/ext4.rst
F: fs/ext4/
Extended Verification Module (EVM)
-M: Mimi Zohar <zohar@linux.vnet.ibm.com>
+M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
S: Supported
F: security/integrity/evm/
@@ -5774,7 +5885,7 @@ F: include/uapi/linux/firewire*.h
F: tools/firewire/
FIRMWARE LOADER (request_firmware)
-M: Luis R. Rodriguez <mcgrof@kernel.org>
+M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/firmware_class/
@@ -5783,7 +5894,7 @@ F: include/linux/firmware.h
FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
-M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
+M: Philip Kelleher <pjk1939@linux.ibm.com>
S: Maintained
F: drivers/block/rsxx/
@@ -6050,7 +6161,7 @@ F: include/linux/fscrypt*.h
F: Documentation/filesystems/fscrypt.rst
FSI-ATTACHED I2C DRIVER
-M: Eddie James <eajames@linux.vnet.ibm.com>
+M: Eddie James <eajames@linux.ibm.com>
L: linux-i2c@vger.kernel.org
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
S: Maintained
@@ -6226,8 +6337,7 @@ S: Supported
F: drivers/uio/uio_pci_generic.c
GENWQE (IBM Generic Workqueue Card)
-M: Frank Haverkamp <haver@linux.vnet.ibm.com>
-M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
+M: Frank Haverkamp <haver@linux.ibm.com>
S: Supported
F: drivers/misc/genwqe/
@@ -6258,6 +6368,7 @@ F: include/uapi/linux/gigaset_dev.h
GNSS SUBSYSTEM
M: Johan Hovold <johan@kernel.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
S: Maintained
F: Documentation/ABI/testing/sysfs-class-gnss
F: Documentation/devicetree/bindings/gnss/
@@ -6836,9 +6947,11 @@ Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <kys@microsoft.com>
M: Haiyang Zhang <haiyangz@microsoft.com>
M: Stephen Hemminger <sthemmin@microsoft.com>
+M: Sasha Levin <sashal@kernel.org>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
L: devel@linuxdriverproject.org
-S: Maintained
-F: Documentation/networking/netvsc.txt
+S: Supported
+F: Documentation/networking/device_drivers/microsoft/netvsc.txt
F: arch/x86/include/asm/mshyperv.h
F: arch/x86/include/asm/trace/hyperv.h
F: arch/x86/include/asm/hyperv-tlfs.h
@@ -6998,6 +7111,24 @@ L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-stub.c
+I3C SUBSYSTEM
+M: Boris Brezillon <bbrezillon@kernel.org>
+L: linux-i3c@lists.infradead.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/i3c/linux.git
+S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-i3c
+F: Documentation/devicetree/bindings/i3c/
+F: Documentation/driver-api/i3c
+F: drivers/i3c/
+F: include/linux/i3c/
+F: include/dt-bindings/i3c/
+
+I3C DRIVER FOR SYNOPSYS DESIGNWARE
+M: Vitor Soares <vitor.soares@synopsys.com>
+S: Maintained
+F: Documentation/devicetree/bindings/i3c/snps,dw-i3c-master.txt
+F: drivers/i3c/master/dw*
+
IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
@@ -7017,8 +7148,9 @@ F: crypto/842.c
F: lib/842/
IBM Power in-Nest Crypto Acceleration
-M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
-M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+M: Breno Leitão <leitao@debian.org>
+M: Nayna Jain <nayna@linux.ibm.com>
+M: Paulo Flabiano Smorigo <pfsmorigo@gmail.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/nx/Makefile
@@ -7035,8 +7167,8 @@ S: Supported
F: drivers/scsi/ipr.*
IBM Power SRIOV Virtual NIC Device Driver
-M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
-M: John Allen <jallen@linux.vnet.ibm.com>
+M: Thomas Falcon <tlfalcon@linux.ibm.com>
+M: John Allen <jallen@linux.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmvnic.*
@@ -7051,41 +7183,40 @@ F: arch/powerpc/include/asm/vas.h
F: arch/powerpc/include/uapi/asm/vas.h
IBM Power Virtual Ethernet Device Driver
-M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
+M: Thomas Falcon <tlfalcon@linux.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
IBM Power Virtual FC Device Drivers
-M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvfc*
IBM Power Virtual Management Channel Driver
-M: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
-M: Steven Royer <seroyer@linux.vnet.ibm.com>
+M: Steven Royer <seroyer@linux.ibm.com>
S: Supported
F: drivers/misc/ibmvmc.*
IBM Power Virtual SCSI Device Drivers
-M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvscsi*
F: include/scsi/viosrp.h
IBM Power Virtual SCSI Device Target Driver
-M: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
-M: Michael Cyr <mikecyr@linux.vnet.ibm.com>
+M: Michael Cyr <mikecyr@linux.ibm.com>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi_tgt/
IBM Power VMX Cryptographic instructions
-M: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
-M: Paulo Flabiano Smorigo <pfsmorigo@linux.vnet.ibm.com>
+M: Breno Leitão <leitao@debian.org>
+M: Nayna Jain <nayna@linux.ibm.com>
+M: Paulo Flabiano Smorigo <pfsmorigo@gmail.com>
L: linux-crypto@vger.kernel.org
S: Supported
F: drivers/crypto/vmx/Makefile
@@ -7362,7 +7493,7 @@ S: Maintained
L: linux-crypto@vger.kernel.org
INTEGRITY MEASUREMENT ARCHITECTURE (IMA)
-M: Mimi Zohar <zohar@linux.vnet.ibm.com>
+M: Mimi Zohar <zohar@linux.ibm.com>
M: Dmitry Kasatkin <dmitry.kasatkin@gmail.com>
L: linux-integrity@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/zohar/linux-integrity.git
@@ -7422,18 +7553,18 @@ Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
S: Supported
-F: Documentation/networking/e100.rst
-F: Documentation/networking/e1000.rst
-F: Documentation/networking/e1000e.rst
-F: Documentation/networking/fm10k.rst
-F: Documentation/networking/igb.rst
-F: Documentation/networking/igbvf.rst
-F: Documentation/networking/ixgb.rst
-F: Documentation/networking/ixgbe.rst
-F: Documentation/networking/ixgbevf.rst
-F: Documentation/networking/i40e.rst
-F: Documentation/networking/iavf.rst
-F: Documentation/networking/ice.rst
+F: Documentation/networking/device_drivers/intel/e100.rst
+F: Documentation/networking/device_drivers/intel/e1000.rst
+F: Documentation/networking/device_drivers/intel/e1000e.rst
+F: Documentation/networking/device_drivers/intel/fm10k.rst
+F: Documentation/networking/device_drivers/intel/igb.rst
+F: Documentation/networking/device_drivers/intel/igbvf.rst
+F: Documentation/networking/device_drivers/intel/ixgb.rst
+F: Documentation/networking/device_drivers/intel/ixgbe.rst
+F: Documentation/networking/device_drivers/intel/ixgbevf.rst
+F: Documentation/networking/device_drivers/intel/i40e.rst
+F: Documentation/networking/device_drivers/intel/iavf.rst
+F: Documentation/networking/device_drivers/intel/ice.rst
F: drivers/net/ethernet/intel/
F: drivers/net/ethernet/intel/*/
F: include/linux/avf/virtchnl.h
@@ -7523,6 +7654,14 @@ S: Maintained
F: drivers/media/pci/intel/ipu3/
F: Documentation/media/uapi/v4l/pixfmt-srggb10-ipu3.rst
+INTEL IPU3 CSI-2 IMGU DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/staging/media/ipu3/
+F: Documentation/media/uapi/v4l/pixfmt-meta-intel-ipu3.rst
+F: Documentation/media/v4l-drivers/ipu3.rst
+
INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
M: Krzysztof Halasa <khalasa@piap.pl>
S: Maintained
@@ -7607,8 +7746,8 @@ INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
M: Stanislav Yakovlev <stas.yakovlev@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
-F: Documentation/networking/README.ipw2100
-F: Documentation/networking/README.ipw2200
+F: Documentation/networking/device_drivers/intel/ipw2100.txt
+F: Documentation/networking/device_drivers/intel/ipw2200.txt
F: drivers/net/wireless/intel/ipw2x00/
INTEL PSTATE DRIVER
@@ -7703,7 +7842,7 @@ F: Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
IOC3 ETHERNET DRIVER
M: Ralf Baechle <ralf@linux-mips.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/sgi/ioc3-eth.c
@@ -7868,13 +8007,6 @@ F: include/linux/isdn/
F: include/uapi/linux/isdn.h
F: include/uapi/linux/isdn/
-ISDN SUBSYSTEM (Eicon active card driver)
-M: Armin Schindler <mac@melware.de>
-L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
-W: http://www.melware.de
-S: Maintained
-F: drivers/isdn/hardware/eicon/
-
IT87 HARDWARE MONITORING DRIVER
M: Jean Delvare <jdelvare@suse.com>
L: linux-hwmon@vger.kernel.org
@@ -7963,9 +8095,8 @@ S: Maintained
F: drivers/media/platform/rcar_jpu.c
JSM Neo PCI based serial card
-M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
L: linux-serial@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/tty/serial/jsm/
K10TEMP HARDWARE MONITORING DRIVER
@@ -8074,7 +8205,7 @@ F: tools/testing/selftests/
F: Documentation/dev-tools/kselftest*
KERNEL USERMODE HELPER
-M: "Luis R. Rodriguez" <mcgrof@kernel.org>
+M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: kernel/umh.c
@@ -8131,7 +8262,7 @@ F: arch/arm64/kvm/
KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
M: James Hogan <jhogan@kernel.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Supported
F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/include/asm/kvm*
@@ -8171,6 +8302,7 @@ W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
S: Supported
F: arch/x86/kvm/
+F: arch/x86/kvm/*/
F: arch/x86/include/uapi/asm/kvm*
F: arch/x86/include/asm/kvm*
F: arch/x86/include/asm/pvclock-abi.h
@@ -8195,7 +8327,7 @@ F: include/uapi/linux/kexec.h
F: kernel/kexec*
KEYS-ENCRYPTED
-M: Mimi Zohar <zohar@linux.vnet.ibm.com>
+M: Mimi Zohar <zohar@linux.ibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
@@ -8204,9 +8336,9 @@ F: include/keys/encrypted-type.h
F: security/keys/encrypted-keys/
KEYS-TRUSTED
-M: James Bottomley <jejb@linux.vnet.ibm.com>
+M: James Bottomley <jejb@linux.ibm.com>
M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
-M: Mimi Zohar <zohar@linux.vnet.ibm.com>
+M: Mimi Zohar <zohar@linuxibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
S: Supported
@@ -8250,7 +8382,7 @@ F: mm/kmemleak.c
F: mm/kmemleak-test.c
KMOD KERNEL MODULE LOADER - USERMODE HELPER
-M: "Luis R. Rodriguez" <mcgrof@kernel.org>
+M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
F: kernel/kmod.c
@@ -8259,7 +8391,7 @@ F: lib/test_kmod.c
F: tools/testing/selftests/kmod/
KPROBES
-M: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -8304,7 +8436,7 @@ F: drivers/net/dsa/lantiq_gswip.c
LANTIQ MIPS ARCHITECTURE
M: John Crispin <john@phrozen.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/lantiq
F: drivers/soc/lantiq
@@ -8615,7 +8747,7 @@ M: Nicholas Piggin <npiggin@gmail.com>
M: David Howells <dhowells@redhat.com>
M: Jade Alglave <j.alglave@ucl.ac.uk>
M: Luc Maranget <luc.maranget@inria.fr>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
R: Akira Yokosawa <akiyks@gmail.com>
R: Daniel Lustig <dlustig@nvidia.com>
L: linux-kernel@vger.kernel.org
@@ -8867,13 +8999,13 @@ S: Maintained
MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
M: Rahul Bedarkar <rahulbedarkar89@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/boot/dts/img/pistachio_marduk.dts
MARVELL 88E6XXX ETHERNET SWITCH FABRIC DRIVER
M: Andrew Lunn <andrew@lunn.ch>
-M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M: Vivien Didelot <vivien.didelot@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/dsa/mv88e6xxx/
@@ -8890,7 +9022,7 @@ F: include/uapi/drm/armada_drm.h
F: Documentation/devicetree/bindings/display/armada/
MARVELL CRYPTO DRIVER
-M: Boris Brezillon <boris.brezillon@bootlin.com>
+M: Boris Brezillon <bbrezillon@kernel.org>
M: Arnaud Ebalard <arno@natisbad.org>
F: drivers/crypto/marvell/
S: Maintained
@@ -9378,6 +9510,13 @@ F: drivers/media/platform/mtk-vpu/
F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
+MEDIATEK MT76 WIRELESS LAN DRIVER
+M: Felix Fietkau <nbd@nbd.name>
+M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+L: linux-wireless@vger.kernel.org
+S: Maintained
+F: drivers/net/wireless/mediatek/mt76/
+
MEDIATEK MT7601U WIRELESS LAN DRIVER
M: Jakub Kicinski <kubakici@wp.pl>
L: linux-wireless@vger.kernel.org
@@ -9573,7 +9712,7 @@ F: drivers/platform/x86/mlx-platform.c
MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: kernel/sched/membarrier.c
@@ -9594,7 +9733,7 @@ F: mm/
MEMORY TECHNOLOGY DEVICES (MTD)
M: David Woodhouse <dwmw2@infradead.org>
M: Brian Norris <computersforpeace@gmail.com>
-M: Boris Brezillon <boris.brezillon@bootlin.com>
+M: Boris Brezillon <bbrezillon@kernel.org>
M: Marek Vasut <marek.vasut@gmail.com>
M: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
@@ -9695,14 +9834,14 @@ L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/platform/atmel/atmel-isc.c
F: drivers/media/platform/atmel/atmel-isc-regs.h
-F: devicetree/bindings/media/atmel-isc.txt
+F: Documentation/devicetree/bindings/media/atmel-isc.txt
MICROCHIP ISI DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/platform/atmel/atmel-isi.c
-F: include/media/atmel-isi.h
+F: drivers/media/platform/atmel/atmel-isi.h
MICROCHIP AT91 USART MFD DRIVER
M: Radu Pirea <radu_nicolae.pirea@upb.ro>
@@ -9748,6 +9887,13 @@ M: Ludovic Desroches <ludovic.desroches@microchip.com>
S: Maintained
F: drivers/mmc/host/atmel-mci.c
+MICROCHIP MCP16502 PMIC DRIVER
+M: Andrei Stefanescu <andrei.stefanescu@microchip.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt
+F: drivers/regulator/mcp16502.c
+
MICROCHIP MCP3911 ADC DRIVER
M: Marcus Folkesson <marcus.folkesson@gmail.com>
M: Kent Gustavsson <kent@minoris.se>
@@ -9826,7 +9972,7 @@ F: drivers/dma/at_xdmac.c
MICROSEMI MIPS SOCS
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/generic/board-ocelot.c
F: arch/mips/configs/generic/board-ocelot.config
@@ -9847,6 +9993,7 @@ F: Documentation/scsi/smartpqi.txt
MICROSEMI ETHERNET SWITCH DRIVER
M: Alexandre Belloni <alexandre.belloni@bootlin.com>
+M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/mscc/
@@ -9866,7 +10013,7 @@ MIPS
M: Ralf Baechle <ralf@linux-mips.org>
M: Paul Burton <paul.burton@mips.com>
M: James Hogan <jhogan@kernel.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
W: http://www.linux-mips.org/
T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
@@ -9879,7 +10026,7 @@ F: drivers/platform/mips/
MIPS BOSTON DEVELOPMENT BOARD
M: Paul Burton <paul.burton@mips.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/clock/img,boston-clock.txt
F: arch/mips/boot/dts/img/boston.dts
@@ -9889,7 +10036,7 @@ F: include/dt-bindings/clock/boston-clock.h
MIPS GENERIC PLATFORM
M: Paul Burton <paul.burton@mips.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
F: arch/mips/generic/
@@ -9897,7 +10044,7 @@ F: arch/mips/tools/generic-board-config.sh
MIPS/LOONGSON1 ARCHITECTURE
M: Keguang Zhang <keguang.zhang@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/loongson32/
F: arch/mips/include/asm/mach-loongson32/
@@ -9906,7 +10053,7 @@ F: drivers/*/*/*loongson1*
MIPS/LOONGSON2 ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/loongson64/fuloong-2e/
F: arch/mips/loongson64/lemote-2f/
@@ -9916,7 +10063,7 @@ F: drivers/*/*/*loongson2*
MIPS/LOONGSON3 ARCHITECTURE
M: Huacai Chen <chenhc@lemote.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/loongson64/
F: arch/mips/include/asm/mach-loongson64/
@@ -9926,7 +10073,7 @@ F: drivers/*/*/*loongson3*
MIPS RINT INSTRUCTION EMULATION
M: Aleksandar Markovic <aleksandar.markovic@mips.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Supported
F: arch/mips/math-emu/sp_rint.c
F: arch/mips/math-emu/dp_rint.c
@@ -9940,12 +10087,9 @@ S: Odd Fixes
F: drivers/media/radio/radio-miropcm20*
MMP SUPPORT
-M: Eric Miao <eric.y.miao@gmail.com>
-M: Haojian Zhuang <haojian.zhuang@gmail.com>
+R: Lubomir Rintel <lkundrak@v3.sk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T: git git://github.com/hzhuang1/linux.git
-T: git git://git.linaro.org/people/ycmiao/pxa-linux.git
-S: Maintained
+S: Odd Fixes
F: arch/arm/boot/dts/mmp*
F: arch/arm/mach-mmp/
@@ -10184,7 +10328,7 @@ S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
NAND FLASH SUBSYSTEM
-M: Boris Brezillon <boris.brezillon@bootlin.com>
+M: Boris Brezillon <bbrezillon@kernel.org>
M: Miquel Raynal <miquel.raynal@bootlin.com>
R: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
@@ -10261,8 +10405,8 @@ NETERION 10GbE DRIVERS (s2io/vxge)
M: Jon Mason <jdmason@kudzu.us>
L: netdev@vger.kernel.org
S: Supported
-F: Documentation/networking/s2io.txt
-F: Documentation/networking/vxge.txt
+F: Documentation/networking/device_drivers/neterion/s2io.txt
+F: Documentation/networking/device_drivers/neterion/vxge.txt
F: drivers/net/ethernet/neterion/
NETFILTER
@@ -10351,7 +10495,7 @@ F: drivers/net/wireless/
NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch>
-M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+M: Vivien Didelot <vivien.didelot@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
S: Maintained
F: Documentation/devicetree/bindings/net/dsa/
@@ -10704,6 +10848,14 @@ L: linux-nfc@lists.01.org (moderated for non-subscribers)
S: Supported
F: drivers/nfc/nxp-nci
+OBJAGG
+M: Jiri Pirko <jiri@mellanox.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: lib/objagg.c
+F: lib/test_objagg.c
+F: include/linux/objagg.h
+
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>
@@ -10711,7 +10863,7 @@ S: Supported
F: tools/objtool/
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
-M: Frederic Barrat <fbarrat@linux.vnet.ibm.com>
+M: Frederic Barrat <fbarrat@linux.ibm.com>
M: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
@@ -10728,7 +10880,10 @@ M: Jarkko Nikula <jarkko.nikula@bitmer.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: linux-omap@vger.kernel.org
S: Maintained
-F: sound/soc/omap/
+F: sound/soc/ti/omap*
+F: sound/soc/ti/rx51.c
+F: sound/soc/ti/n810.c
+F: sound/soc/ti/sdma-pcm.*
OMAP CLOCK FRAMEWORK SUPPORT
M: Paul Walmsley <paul@pwsan.com>
@@ -10911,7 +11066,7 @@ F: include/linux/platform_data/i2c-omap.h
ONION OMEGA2+ BOARD
M: Harvey Hunt <harveyhuntnexus@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/boot/dts/ralink/omega2p.dts
@@ -11820,7 +11975,7 @@ F: drivers/pinctrl/spear/
PISTACHIO SOC SUPPORT
M: James Hartley <james.hartley@sondrel.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Odd Fixes
F: arch/mips/pistachio/
F: arch/mips/include/asm/mach-pistachio/
@@ -12000,7 +12155,7 @@ F: kernel/printk/
F: include/linux/printk.h
PRISM54 WIRELESS DRIVER
-M: "Luis R. Rodriguez" <mcgrof@gmail.com>
+M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/p54
S: Obsolete
@@ -12014,9 +12169,10 @@ S: Maintained
F: fs/proc/
F: include/linux/proc_fs.h
F: tools/testing/selftests/proc/
+F: Documentation/filesystems/proc.txt
PROC SYSCTL
-M: "Luis R. Rodriguez" <mcgrof@kernel.org>
+M: Luis Chamberlain <mcgrof@kernel.org>
M: Kees Cook <keescook@chromium.org>
L: linux-kernel@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
@@ -12263,7 +12419,7 @@ QLOGIC QLA3XXX NETWORK DRIVER
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
-F: Documentation/networking/LICENSE.qla3xxx
+F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLA4XXX iSCSI DRIVER
@@ -12315,7 +12471,7 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/bus/fsl-mc/
F: Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
-F: Documentation/networking/dpaa2/overview.rst
+F: Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
QT1010 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
@@ -12343,7 +12499,7 @@ S: Supported
F: drivers/net/wireless/ath/ath9k/
QUALCOMM CAMERA SUBSYSTEM DRIVER
-M: Todor Tomov <todor.tomov@linaro.org>
+M: Todor Tomov <todor.too@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/qcom,camss.txt
@@ -12479,7 +12635,7 @@ F: drivers/media/usb/rainshadow-cec/*
RALINK MIPS ARCHITECTURE
M: John Crispin <john@phrozen.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/ralink
@@ -12499,7 +12655,7 @@ F: drivers/block/brd.c
RANCHU VIRTUAL BOARD FOR MIPS
M: Miodrag Dinic <miodrag.dinic@mips.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Supported
F: arch/mips/generic/board-ranchu.c
F: arch/mips/configs/generic/board-ranchu.config
@@ -12521,7 +12677,7 @@ S: Orphan
F: drivers/net/wireless/ray*
RCUTORTURE TEST FRAMEWORK
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
@@ -12563,16 +12719,17 @@ M: Fenghua Yu <fenghua.yu@intel.com>
M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
-F: arch/x86/kernel/cpu/intel_rdt*
-F: arch/x86/include/asm/intel_rdt_sched.h
-F: Documentation/x86/intel_rdt*
+F: arch/x86/kernel/cpu/resctrl/
+F: arch/x86/include/asm/resctrl_sched.h
+F: Documentation/x86/resctrl*
READ-COPY UPDATE (RCU)
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
R: Lai Jiangshan <jiangshanlai@gmail.com>
+R: Joel Fernandes <joel@joelfernandes.org>
L: linux-kernel@vger.kernel.org
W: http://www.rdrop.com/users/paulmck/RCU/
S: Supported
@@ -12708,7 +12865,7 @@ F: include/linux/reset-controller.h
RESTARTABLE SEQUENCES SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: Peter Zijlstra <peterz@infradead.org>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Boqun Feng <boqun.feng@gmail.com>
L: linux-kernel@vger.kernel.org
S: Supported
@@ -12776,6 +12933,13 @@ S: Maintained
F: drivers/media/platform/rockchip/rga/
F: Documentation/devicetree/bindings/media/rockchip-rga.txt
+ROCKCHIP VPU CODEC DRIVER
+M: Ezequiel Garcia <ezequiel@collabora.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/staging/media/platform/rockchip/vpu/
+F: Documentation/devicetree/bindings/media/rockchip-vpu.txt
+
ROCKER DRIVER
M: Jiri Pirko <jiri@resnulli.us>
L: netdev@vger.kernel.org
@@ -13025,7 +13189,7 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/common/saa7146/
F: drivers/media/pci/saa7146/
-F: include/media/saa7146*
+F: include/media/drv-intf/saa7146*
SAMSUNG AUDIO (ASoC) DRIVERS
M: Krzysztof Kozlowski <krzk@kernel.org>
@@ -13233,7 +13397,7 @@ F: drivers/scsi/sg.c
F: include/scsi/sg.h
SCSI SUBSYSTEM
-M: "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
+M: "James E.J. Bottomley" <jejb@linux.ibm.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
M: "Martin K. Petersen" <martin.petersen@oracle.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
@@ -13291,6 +13455,12 @@ L: sdricohcs-devel@lists.sourceforge.net (subscribers-only)
S: Maintained
F: drivers/mmc/host/sdricoh_cs.c
+SECO BOARDS CEC DRIVER
+M: Ettore Chimenti <ek5.chimenti@gmail.com>
+S: Maintained
+F: drivers/media/platform/seco-cec/seco-cec.c
+F: drivers/media/platform/seco-cec/seco-cec.h
+
SECURE COMPUTING
M: Kees Cook <keescook@chromium.org>
R: Andy Lutomirski <luto@amacapital.net>
@@ -13668,7 +13838,7 @@ F: mm/sl?b*
SLEEPABLE READ-COPY UPDATE (SRCU)
M: Lai Jiangshan <jiangshanlai@gmail.com>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
@@ -13824,6 +13994,13 @@ F: drivers/md/raid*
F: include/linux/raid/
F: include/uapi/linux/raid/
+SOCIONEXT (SNI) AVE NETWORK DRIVER
+M: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/socionext/sni_ave.c
+F: Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
+
SOCIONEXT (SNI) NETSEC NETWORK DRIVER
M: Jassi Brar <jaswinder.singh@linaro.org>
L: netdev@vger.kernel.org
@@ -13857,6 +14034,14 @@ S: Maintained
F: drivers/ssb/
F: include/linux/ssb/
+SONY IMX214 SENSOR DRIVER
+M: Ricardo Ribalda <ricardo.ribalda@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx214.c
+F: Documentation/devicetree/bindings/media/i2c/sony,imx214.txt
+
SONY IMX258 SENSOR DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
@@ -13949,6 +14134,7 @@ S: Supported
F: Documentation/devicetree/bindings/sound/
F: Documentation/sound/soc/
F: sound/soc/
+F: include/dt-bindings/sound/
F: include/sound/soc*
SOUNDWIRE SUBSYSTEM
@@ -14047,7 +14233,7 @@ SPIDERNET NETWORK DRIVER for CELL
M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
L: netdev@vger.kernel.org
S: Supported
-F: Documentation/networking/spider_net.txt
+F: Documentation/networking/device_drivers/toshiba/spider_net.txt
F: drivers/net/ethernet/toshiba/spider_net*
SPMI SUBSYSTEM
@@ -14763,6 +14949,12 @@ F: Documentation/devicetree/bindings/clock/ti,sci-clk.txt
F: drivers/clk/keystone/sci-clk.c
F: drivers/reset/reset-ti-sci.c
+Texas Instruments ASoC drivers
+M: Peter Ujfalusi <peter.ujfalusi@ti.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: sound/soc/ti/
+
THANKO'S RAREMONO AM/FM/SW RADIO RECEIVER USB DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -15037,7 +15229,7 @@ M: Samuel Chessman <chessman@tux.org>
L: tlan-devel@lists.sourceforge.net (subscribers-only)
W: http://sourceforge.net/projects/tlan/
S: Maintained
-F: Documentation/networking/tlan.txt
+F: Documentation/networking/device_drivers/ti/tlan.txt
F: drivers/net/ethernet/ti/tlan.*
TM6000 VIDEO4LINUX DRIVER
@@ -15091,7 +15283,7 @@ F: drivers/platform/x86/topstar-laptop.c
TORTURE-TEST MODULES
M: Davidlohr Bueso <dave@stgolabs.net>
-M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M: "Paul E. McKenney" <paulmck@linux.ibm.com>
M: Josh Triplett <josh@joshtriplett.org>
L: linux-kernel@vger.kernel.org
S: Supported
@@ -15234,7 +15426,7 @@ F: arch/um/os-Linux/drivers/
TURBOCHANNEL SUBSYSTEM
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: Ralf Baechle <ralf@linux-mips.org>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
S: Maintained
F: drivers/tc/
@@ -16055,7 +16247,7 @@ F: drivers/net/vmxnet3/
VOCORE VOCORE2 BOARD
M: Harvey Hunt <harveyhuntnexus@gmail.com>
-L: linux-mips@linux-mips.org
+L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/boot/dts/ralink/vocore2.dts
@@ -16350,6 +16542,12 @@ F: include/linux/idr.h
F: include/linux/xarray.h
F: tools/testing/radix-tree
+XBOX DVD IR REMOTE
+M: Benjamin Valentin <benpicco@googlemail.com>
+S: Maintained
+F: drivers/media/rc/xbox_remote.c
+F: drivers/media/rc/keymaps/rc-xbox-dvd.c
+
XC2028/3028 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org
@@ -16396,6 +16594,7 @@ L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
S: Supported
F: arch/x86/xen/
+F: arch/x86/platform/pvh/
F: drivers/*/xen-*front.c
F: drivers/xen/
F: arch/x86/include/asm/xen/
diff --git a/Makefile b/Makefile
index 0ce4e29ee342..7a2a9a175756 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 20
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
NAME = Shy Crocodile
# *DOCUMENTATION*
@@ -962,11 +962,6 @@ ifdef CONFIG_STACK_VALIDATION
ifeq ($(has_libelf),1)
objtool_target := tools/objtool FORCE
else
- ifdef CONFIG_UNWINDER_ORC
- $(error "Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
- else
- $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
- endif
SKIP_STACK_VALIDATION := 1
export SKIP_STACK_VALIDATION
endif
@@ -1081,7 +1076,7 @@ scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
# version.h and scripts_basic is processed / created.
# Listed in dependency order
-PHONY += prepare archprepare macroprepare prepare0 prepare1 prepare2 prepare3
+PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
# prepare3 is used to check if we are building in a separate output directory,
# and if so do:
@@ -1104,9 +1099,7 @@ prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 $(version_h) $(autoksyms_h) include/generated/utsrelease.h
$(cmd_crmodverdir)
-macroprepare: prepare1 archmacros
-
-archprepare: archheaders archscripts macroprepare scripts_basic
+archprepare: archheaders archscripts prepare1 scripts_basic
prepare0: archprepare gcc-plugins
$(Q)$(MAKE) $(build)=.
@@ -1125,6 +1118,14 @@ uapi-asm-generic:
PHONY += prepare-objtool
prepare-objtool: $(objtool_target)
+ifeq ($(SKIP_STACK_VALIDATION),1)
+ifdef CONFIG_UNWINDER_ORC
+ @echo "error: Cannot generate ORC metadata for CONFIG_UNWINDER_ORC=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+ @false
+else
+ @echo "warning: Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+endif
+endif
# Generate some files
# ---------------------------------------------------------------------------
@@ -1174,9 +1175,6 @@ archheaders:
PHONY += archscripts
archscripts:
-PHONY += archmacros
-archmacros:
-
PHONY += __headers
__headers: $(version_h) scripts_basic uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index a37fd990bd55..4b5b1b244f86 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -634,6 +634,7 @@ setup_arch(char **cmdline_p)
/* Find our memory. */
setup_memory(kernel_end);
+ memblock_set_bottom_up(true);
/* First guess at cpu cache sizes. Do this before init_arch. */
determine_cpu_caches(cpu->type);
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 74846553e3f1..d0b73371e985 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -144,14 +144,14 @@ setup_memory_node(int nid, void *kernel_end)
if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
panic("kernel loaded out of ram");
+ memblock_add(PFN_PHYS(node_min_pfn),
+ (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
+
/* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
Note that we round this down, not up - node memory
has much larger alignment than 8Mb, so it's safe. */
node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
- memblock_add(PFN_PHYS(node_min_pfn),
- (node_max_pfn - node_min_pfn) << PAGE_SHIFT);
-
NODE_DATA(nid)->node_start_pfn = node_min_pfn;
NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 7deaabeb531a..ca8e26d045a9 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -24,6 +24,7 @@ config ARC
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
@@ -107,7 +108,7 @@ endmenu
choice
prompt "ARC Instruction Set"
- default ISA_ARCOMPACT
+ default ISA_ARCV2
config ISA_ARCOMPACT
bool "ARCompact ISA"
@@ -174,13 +175,11 @@ endchoice
config CPU_BIG_ENDIAN
bool "Enable Big Endian Mode"
- default n
help
Build kernel for Big Endian Mode of ARC CPU
config SMP
bool "Symmetric Multi-Processing"
- default n
select ARC_MCIP if ISA_ARCV2
help
This enables support for systems with more than one CPU.
@@ -252,7 +251,6 @@ config ARC_CACHE_PAGES
config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$"
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
- default n
endif #ARC_CACHE
@@ -260,7 +258,6 @@ config ARC_HAS_ICCM
bool "Use ICCM"
help
Single Cycle RAMS to store Fast Path Code
- default n
config ARC_ICCM_SZ
int "ICCM Size in KB"
@@ -271,7 +268,6 @@ config ARC_HAS_DCCM
bool "Use DCCM"
help
Single Cycle RAMS to store Fast Path Data
- default n
config ARC_DCCM_SZ
int "DCCM Size in KB"
@@ -364,13 +360,11 @@ if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
bool "Setup Timer IRQ as high Priority"
- default n
# if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
depends on !SMP
config ARC_FPU_SAVE_RESTORE
bool "Enable FPU state persistence across context switch"
- default n
help
Double Precision Floating Point unit had dedicated regs which
need to be saved/restored across context-switch.
@@ -451,7 +445,6 @@ config HIGHMEM
config ARC_HAS_PAE40
bool "Support for the 40-bit Physical Address Extension"
- default n
depends on ISA_ARCV2
select HIGHMEM
select PHYS_ADDR_T_64BIT
@@ -494,7 +487,6 @@ config HZ
config ARC_METAWARE_HLINK
bool "Support for Metaware debugger assisted Host access"
- default n
help
This options allows a Linux userland apps to directly access
host file system (open/creat/read/write etc) with help from
@@ -522,13 +514,11 @@ config ARC_DW2_UNWIND
config ARC_DBG_TLB_PARANOIA
bool "Paranoia Checks in Low Level TLB Handlers"
- default n
endif
config ARC_UBOOT_SUPPORT
bool "Support uboot arg Handling"
- default n
help
ARC Linux by default checks for uboot provided args as pointers to
external cmdline or DTB. This however breaks in absence of uboot,
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index c64c505d966c..df00578c279d 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,7 +6,7 @@
# published by the Free Software Foundation.
#
-KBUILD_DEFCONFIG := nsim_700_defconfig
+KBUILD_DEFCONFIG := nsim_hs_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index ef149f59929a..43f17b51ee89 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -222,6 +222,21 @@
bus-width = <4>;
dma-coherent;
};
+
+ gpio: gpio@3000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x3000 0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio_port_a: gpio-controller@0 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <24>;
+ reg = <0>;
+ };
+ };
};
memory@80000000 {
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 41bc08be6a3b..020d4493edfd 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 1e1c4a8011b5..666314fffc60 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 6b0c0cfd5c30..429832b8560b 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 1dec2b4bc5e6..87b23b7fb781 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -45,6 +45,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_DWAPB=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
@@ -65,6 +68,7 @@ CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 31ba224bbfb4..6e84060e7c90 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
@@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 8e0b8b134cd9..219c2a65294b 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index f14eeff7d308..35dfc6491a09 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set
@@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 025298a48305..1638e5bc9672 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index df7b77b13b82..11cfbdb0f441 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FTRACE=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index a7f65313f84a..e71ade3cf9c8 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index db47c3541f15..1e59a2e9c602 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index a8ac5e917d9a..b5c3f6c54b03 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index ff7d3232764a..f393b663413e 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -113,7 +113,9 @@ extern unsigned long perip_base, perip_end;
/* IO coherency related Auxiliary registers */
#define ARC_REG_IO_COH_ENABLE 0x500
+#define ARC_IO_COH_ENABLE_BIT BIT(0)
#define ARC_REG_IO_COH_PARTIAL 0x501
+#define ARC_IO_COH_PARTIAL_BIT BIT(0)
#define ARC_REG_IO_COH_AP0_BASE 0x508
#define ARC_REG_IO_COH_AP0_SIZE 0x509
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index c22b181e8206..2f39d9b3886e 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w;
}
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr, \
+ void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } else { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ put_unaligned(x, buf++); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
+#define __raw_writesx(t,f) \
+static inline void __raw_writes##f(volatile void __iomem *addr, \
+ const void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ const u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ __raw_write##f(*buf++, addr); \
+ } while (--count); \
+ } else { \
+ do { \
+ __raw_write##f(get_unaligned(buf++), addr); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
/*
* Relaxed API for drivers which can handle barrier ordering themselves
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2cae79a25d7..eea8c5ce6335 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -243,7 +243,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
- int i, n = 0;
+ int i, n = 0, ua = 0;
FIX_PTR(cpu);
@@ -263,10 +263,13 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
- n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
+#ifdef __ARC_UNALIGNED__
+ ua = 1;
+#endif
+ n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
- IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
+ IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
if (i)
n += scnprintf(buf + n, len - n, "\n\t\t: ");
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index e188bb3ede53..4135abec3fb0 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -1145,6 +1145,20 @@ noinline void __init arc_ioc_setup(void)
unsigned int ioc_base, mem_sz;
/*
+ * If IOC was already enabled (due to bootloader) it technically needs to
+ * be reconfigured with aperture base,size corresponding to Linux memory map
+ * which will certainly be different than uboot's. But disabling and
+ * reenabling IOC when DMA might be potentially active is tricky business.
+ * To avoid random memory issues later, just panic here and ask user to
+ * upgrade bootloader to one which doesn't enable IOC
+ */
+ if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
+ panic("IOC already enabled, please upgrade bootloader!\n");
+
+ if (!ioc_enable)
+ return;
+
+ /*
* As for today we don't support both IOC and ZONE_HIGHMEM enabled
* simultaneously. This happens because as of today IOC aperture covers
* only ZONE_NORMAL (low mem) and any dma transactions outside this
@@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void)
panic("IOC Aperture start must be aligned to the size of the aperture");
write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
- write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
- write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+ write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
+ write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
/* Re-enable L1 dcache */
__dc_enable();
@@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void)
if (is_isa_arcv2() && l2_line_sz && !slc_enable)
arc_slc_disable();
- if (is_isa_arcv2() && ioc_enable)
+ if (is_isa_arcv2() && ioc_exists)
arc_ioc_setup();
if (is_isa_arcv2() && l2_line_sz && slc_enable) {
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index c9da6102eb4f..e2d9fc3fea01 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- int si_code;
+ int si_code = 0;
int ret;
vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 586fc30b23bd..2196aac0e45c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1810,6 +1810,21 @@ config XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
+config STACKPROTECTOR_PER_TASK
+ bool "Use a unique stack canary value for each task"
+ depends on GCC_PLUGINS && STACKPROTECTOR && SMP && !XIP_DEFLATED_DATA
+ select GCC_PLUGIN_ARM_SSP_PER_TASK
+ default y
+ help
+ Due to the fact that GCC uses an ordinary symbol reference from
+ which to load the value of the stack canary, this value can only
+ change at reboot time on SMP systems, and all tasks running in the
+ kernel's address space are forced to use the same canary value for
+ the entire duration that the system is up.
+
+ Enable this option to switch to a different method that uses a
+ different canary value for each task.
+
endmenu
menu "Boot options"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 05a91d8b89f3..0436002d5091 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -303,6 +303,18 @@ else
KBUILD_IMAGE := $(boot)/zImage
endif
+ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
+prepare: stack_protector_prepare
+stack_protector_prepare: prepare0
+ $(eval KBUILD_CFLAGS += \
+ -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \
+ awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
+ include/generated/asm-offsets.h) \
+ -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
+ awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
+ include/generated/asm-offsets.h))
+endif
+
all: $(notdir $(KBUILD_IMAGE))
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 1f5a5ffe7fcf..01bf2585a0fa 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -101,6 +101,7 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
$(libfdt) $(libfdt_hdrs) hyp-stub.S
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index d4d33cd7adad..1e2bb68231ad 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -228,7 +228,7 @@
vmmc-supply = <&vmmc_fixed>;
bus-width = <4>;
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
- cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */
+ cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
};
&mmc3 {
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi
index dae6e458e59f..b1c988eed87c 100644
--- a/arch/arm/boot/dts/am3517-som.dtsi
+++ b/arch/arm/boot/dts/am3517-som.dtsi
@@ -163,7 +163,7 @@
compatible = "ti,wl1271";
reg = <2>;
interrupt-parent = <&gpio6>;
- interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */
+ interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
ref-clock-frequency = <26000000>;
tcxo-clock-frequency = <26000000>;
};
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index f2a1d25eb6cf..83e0fbc4a1a1 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -45,7 +45,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
- vmmc: fixedregulator@0 {
+ vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@@ -53,7 +53,7 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
+ veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 7f9cbdf33a51..2f6aa24a0b67 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -145,7 +145,7 @@
};
/* The voltage to the MMC card is hardwired at 3.3V */
- vmmc: fixedregulator@0 {
+ vmmc: regulator-vmmc {
compatible = "regulator-fixed";
regulator-name = "vmmc";
regulator-min-microvolt = <3300000>;
@@ -153,7 +153,7 @@
regulator-boot-on;
};
- veth: fixedregulator@0 {
+ veth: regulator-veth {
compatible = "regulator-fixed";
regulator-name = "veth";
regulator-min-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
index 4adb85e66be3..93762244be7f 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
@@ -31,7 +31,7 @@
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
- reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index c318bcbc6ba7..89e6fd547c75 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -26,7 +26,7 @@
wifi_pwrseq: wifi-pwrseq {
compatible = "mmc-pwrseq-simple";
- reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index e45a15ceb94b..69d753cac89a 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -492,12 +492,6 @@
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
- eeprom@50 {
- compatible = "atmel,24c04";
- pagesize = <16>;
- reg = <0x50>;
- };
-
hpa1: amp@60 {
compatible = "ti,tpa6130a2";
reg = <0x60>;
diff --git a/arch/arm/boot/dts/imx7d-nitrogen7.dts b/arch/arm/boot/dts/imx7d-nitrogen7.dts
index d8aac4a2d02a..177d21fdeb28 100644
--- a/arch/arm/boot/dts/imx7d-nitrogen7.dts
+++ b/arch/arm/boot/dts/imx7d-nitrogen7.dts
@@ -86,13 +86,17 @@
compatible = "regulator-fixed";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
- clock-names = "slow";
regulator-name = "reg_wlan";
startup-delay-us = <70000>;
gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
enable-active-high;
};
+
+ usdhc2_pwrseq: usdhc2_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+ clock-names = "ext_clock";
+ };
};
&adc1 {
@@ -375,6 +379,7 @@
bus-width = <4>;
non-removable;
vmmc-supply = <&reg_wlan>;
+ mmc-pwrseq = <&usdhc2_pwrseq>;
cap-power-off-card;
keep-power-in-suspend;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index 21973eb55671..f27b3849d3ff 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -100,6 +100,19 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
+
+ usdhc2_pwrseq: usdhc2_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+ clock-names = "ext_clock";
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+ <&clks IMX7D_CLKO2_ROOT_DIV>;
+ assigned-clock-parents = <&clks IMX7D_CKIL>;
+ assigned-clock-rates = <0>, <32768>;
};
&i2c4 {
@@ -199,12 +212,13 @@
&usdhc2 { /* Wifi SDIO */
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usdhc2>;
+ pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
no-1-8-v;
non-removable;
keep-power-in-suspend;
wakeup-source;
vmmc-supply = <&reg_ap6212>;
+ mmc-pwrseq = <&usdhc2_pwrseq>;
status = "okay";
};
@@ -301,6 +315,12 @@
};
&iomuxc_lpsr {
+ pinctrl_wifi_clk: wificlkgrp {
+ fsl,pins = <
+ MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2 0x7d
+ >;
+ };
+
pinctrl_wdog: wdoggrp {
fsl,pins = <
MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B 0x74
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
index ac343330d0c8..98b682a8080c 100644
--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
@@ -129,7 +129,7 @@
};
&mmc3 {
- interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
+ interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
pinctrl-names = "default";
vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
index 9d5d53fbe9c0..c39cf2ca54da 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
@@ -35,7 +35,7 @@
* jumpering combinations for the long run.
*/
&mmc3 {
- interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
+ interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
pinctrl-names = "default";
vmmc-supply = <&wl12xx_vmmc>;
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi
index 2075120cfc4d..d8bf939a3aff 100644
--- a/arch/arm/boot/dts/rk3288-veyron.dtsi
+++ b/arch/arm/boot/dts/rk3288-veyron.dtsi
@@ -10,7 +10,11 @@
#include "rk3288.dtsi"
/ {
- memory@0 {
+ /*
+ * The default coreboot on veyron devices ignores memory@0 nodes
+ * and would instead create another memory node.
+ */
+ memory {
device_type = "memory";
reg = <0x0 0x0 0x0 0x80000000>;
};
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 843052f14f1c..dd0dda6ed44b 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -314,7 +314,7 @@
0x1 0x0 0x60000000 0x10000000
0x2 0x0 0x70000000 0x10000000
0x3 0x0 0x80000000 0x10000000>;
- clocks = <&mck>;
+ clocks = <&h32ck>;
status = "disabled";
nand_controller: nand-controller {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
index b828677f331d..ffafe9720b35 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2.dts
@@ -245,6 +245,8 @@
regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <2800000>;
regulator-name = "vddio-csi0";
+ regulator-soft-start;
+ regulator-ramp-delay = <1600>;
};
&reg_ldo4 {
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index 742d2946b08b..583a5a01642f 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -314,8 +314,8 @@
&reg_dldo3 {
regulator-always-on;
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <2500000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-name = "vcc-pd";
};
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 07b2eadac3dd..207962a656a2 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -167,8 +167,9 @@ CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
-CONFIG_SND_EDMA_SOC=m
-CONFIG_SND_DA850_SOC_EVM=m
+CONFIG_SND_SOC_TLV320AIC3X=m
+CONFIG_SND_SOC_DAVINCI_MCASP=m
+CONFIG_SND_SOC_DAVINCI_EVM=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_HID=m
CONFIG_HID_A4TECH=m
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 72f4bc83f467..cfc00b0961ec 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -175,8 +175,6 @@ CONFIG_SND_PCM_OSS=y
# CONFIG_SND_VERBOSE_PROCFS is not set
CONFIG_SND_DUMMY=y
CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_SOC=y
-CONFIG_SND_OMAP_SOC=y
# CONFIG_USB_HID is not set
CONFIG_USB=y
CONFIG_USB_PHY=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 6491419b1dad..2274e45623f9 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -381,13 +381,13 @@ CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_SOC=m
-CONFIG_SND_EDMA_SOC=m
-CONFIG_SND_AM33XX_SOC_EVM=m
-CONFIG_SND_OMAP_SOC=m
-CONFIG_SND_OMAP_SOC_HDMI_AUDIO=m
-CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m
-CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m
-CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
+CONFIG_SND_SOC_TLV320AIC3X=m
+CONFIG_SND_SOC_DAVINCI_MCASP=m
+CONFIG_SND_SOC_NOKIA_RX51=m
+CONFIG_SND_SOC_OMAP_HDMI=m
+CONFIG_SND_SOC_OMAP_ABE_TWL6040=m
+CONFIG_SND_SOC_OMAP3_PANDORA=m
+CONFIG_SND_SOC_OMAP3_TWL4030=m
CONFIG_SND_SOC_CPCAP=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_AUDIO_GRAPH_CARD=m
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index ef0c7feea6e2..a95322b59799 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -69,6 +69,15 @@ config CRYPTO_AES_ARM
help
Use optimized AES assembler routines for ARM platforms.
+ On ARM processors without the Crypto Extensions, this is the
+ fastest AES implementation for single blocks. For multiple
+ blocks, the NEON bit-sliced implementation is usually faster.
+
+ This implementation may be vulnerable to cache timing attacks,
+ since it uses lookup tables. However, as countermeasures it
+ disables IRQs and preloads the tables; it is hoped this makes
+ such attacks very difficult.
+
config CRYPTO_AES_ARM_BS
tristate "Bit sliced AES using NEON instructions"
depends on KERNEL_MODE_NEON
@@ -117,9 +126,14 @@ config CRYPTO_CRC32_ARM_CE
select CRYPTO_HASH
config CRYPTO_CHACHA20_NEON
- tristate "NEON accelerated ChaCha20 symmetric cipher"
+ tristate "NEON accelerated ChaCha stream cipher algorithms"
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_CHACHA20
+config CRYPTO_NHPOLY1305_NEON
+ tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_NHPOLY1305
+
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index bd5bceef0605..b65d6bfab8e6 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
-obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
+obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
+obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -52,7 +53,8 @@ aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
-chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
+chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
+nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index d0a9cec73707..5affb8482379 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -10,7 +10,6 @@
#include <asm/hwcap.h>
#include <asm/neon.h>
-#include <asm/hwcap.h>
#include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S
index 184d6c2d15d5..f2d67c095e59 100644
--- a/arch/arm/crypto/aes-cipher-core.S
+++ b/arch/arm/crypto/aes-cipher-core.S
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
#include <asm/cache.h>
.text
@@ -41,7 +42,7 @@
.endif
.endm
- .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op
+ .macro __hround, out0, out1, in0, in1, in2, in3, t3, t4, enc, sz, op, oldcpsr
__select \out0, \in0, 0
__select t0, \in1, 1
__load \out0, \out0, 0, \sz, \op
@@ -73,6 +74,14 @@
__load t0, t0, 3, \sz, \op
__load \t4, \t4, 3, \sz, \op
+ .ifnb \oldcpsr
+ /*
+ * This is the final round and we're done with all data-dependent table
+ * lookups, so we can safely re-enable interrupts.
+ */
+ restore_irqs \oldcpsr
+ .endif
+
eor \out1, \out1, t1, ror #24
eor \out0, \out0, t2, ror #16
ldm rk!, {t1, t2}
@@ -83,14 +92,14 @@
eor \out1, \out1, t2
.endm
- .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
+ .macro fround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr
__hround \out0, \out1, \in0, \in1, \in2, \in3, \out2, \out3, 1, \sz, \op
- __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op
+ __hround \out2, \out3, \in2, \in3, \in0, \in1, \in1, \in2, 1, \sz, \op, \oldcpsr
.endm
- .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op
+ .macro iround, out0, out1, out2, out3, in0, in1, in2, in3, sz=2, op, oldcpsr
__hround \out0, \out1, \in0, \in3, \in2, \in1, \out2, \out3, 0, \sz, \op
- __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op
+ __hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr
.endm
.macro __rev, out, in
@@ -118,13 +127,14 @@
.macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr}
+ // Load keys first, to reduce latency in case they're not cached yet.
+ ldm rk!, {r8-r11}
+
ldr r4, [in]
ldr r5, [in, #4]
ldr r6, [in, #8]
ldr r7, [in, #12]
- ldm rk!, {r8-r11}
-
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
__rev r5, r5
@@ -138,6 +148,25 @@
eor r7, r7, r11
__adrl ttab, \ttab
+ /*
+ * Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into
+ * L1 cache, assuming cacheline size >= 32. This is a hardening measure
+ * intended to make cache-timing attacks more difficult. They may not
+ * be fully prevented, however; see the paper
+ * https://cr.yp.to/antiforgery/cachetiming-20050414.pdf
+ * ("Cache-timing attacks on AES") for a discussion of the many
+ * difficulties involved in writing truly constant-time AES software.
+ */
+ save_and_disable_irqs t0
+ .set i, 0
+ .rept 1024 / 128
+ ldr r8, [ttab, #i + 0]
+ ldr r9, [ttab, #i + 32]
+ ldr r10, [ttab, #i + 64]
+ ldr r11, [ttab, #i + 96]
+ .set i, i + 128
+ .endr
+ push {t0} // oldcpsr
tst rounds, #2
bne 1f
@@ -151,8 +180,21 @@
\round r4, r5, r6, r7, r8, r9, r10, r11
b 0b
-2: __adrl ttab, \ltab
- \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b
+2: .ifb \ltab
+ add ttab, ttab, #1
+ .else
+ __adrl ttab, \ltab
+ // Prefetch inverse S-box for final round; see explanation above
+ .set i, 0
+ .rept 256 / 64
+ ldr t0, [ttab, #i + 0]
+ ldr t1, [ttab, #i + 32]
+ .set i, i + 64
+ .endr
+ .endif
+
+ pop {rounds} // oldcpsr
+ \round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds
#ifdef CONFIG_CPU_BIG_ENDIAN
__rev r4, r4
@@ -175,7 +217,7 @@
.endm
ENTRY(__aes_arm_encrypt)
- do_crypt fround, crypto_ft_tab, crypto_ft_tab + 1, 2
+ do_crypt fround, crypto_ft_tab,, 2
ENDPROC(__aes_arm_encrypt)
.align 5
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha-neon-core.S
index 50e7b9896818..eb22926d4912 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha-neon-core.S
@@ -1,5 +1,5 @@
/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions
+ * ChaCha/XChaCha NEON helper functions
*
* Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
@@ -27,9 +27,9 @@
* (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
* needs index vector)
*
- * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit
- * rotations, the only choices are (a) and (b). We use (a) since it takes
- * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53.
+ * ChaCha has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit rotations,
+ * the only choices are (a) and (b). We use (a) since it takes two-thirds the
+ * cycles of (b) on both Cortex-A7 and Cortex-A53.
*
* For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
* and doesn't need a temporary register.
@@ -52,30 +52,20 @@
.fpu neon
.align 5
-ENTRY(chacha20_block_xor_neon)
- // r0: Input state matrix, s
- // r1: 1 data block output, o
- // r2: 1 data block input, i
-
- //
- // This function encrypts one ChaCha20 block by loading the state matrix
- // in four NEON registers. It performs matrix operation on four words in
- // parallel, but requireds shuffling to rearrange the words after each
- // round.
- //
-
- // x0..3 = s0..3
- add ip, r0, #0x20
- vld1.32 {q0-q1}, [r0]
- vld1.32 {q2-q3}, [ip]
-
- vmov q8, q0
- vmov q9, q1
- vmov q10, q2
- vmov q11, q3
+/*
+ * chacha_permute - permute one block
+ *
+ * Permute one 64-byte block where the state matrix is stored in the four NEON
+ * registers q0-q3. It performs matrix operations on four words in parallel,
+ * but requires shuffling to rearrange the words after each round.
+ *
+ * The round count is given in r3.
+ *
+ * Clobbers: r3, ip, q4-q5
+ */
+chacha_permute:
adr ip, .Lrol8_table
- mov r3, #10
vld1.8 {d10}, [ip, :64]
.Ldoubleround:
@@ -139,9 +129,31 @@ ENTRY(chacha20_block_xor_neon)
// x3 = shuffle32(x3, MASK(0, 3, 2, 1))
vext.8 q3, q3, q3, #4
- subs r3, r3, #1
+ subs r3, r3, #2
bne .Ldoubleround
+ bx lr
+ENDPROC(chacha_permute)
+
+ENTRY(chacha_block_xor_neon)
+ // r0: Input state matrix, s
+ // r1: 1 data block output, o
+ // r2: 1 data block input, i
+ // r3: nrounds
+ push {lr}
+
+ // x0..3 = s0..3
+ add ip, r0, #0x20
+ vld1.32 {q0-q1}, [r0]
+ vld1.32 {q2-q3}, [ip]
+
+ vmov q8, q0
+ vmov q9, q1
+ vmov q10, q2
+ vmov q11, q3
+
+ bl chacha_permute
+
add ip, r2, #0x20
vld1.8 {q4-q5}, [r2]
vld1.8 {q6-q7}, [ip]
@@ -166,15 +178,33 @@ ENTRY(chacha20_block_xor_neon)
vst1.8 {q0-q1}, [r1]
vst1.8 {q2-q3}, [ip]
- bx lr
-ENDPROC(chacha20_block_xor_neon)
+ pop {pc}
+ENDPROC(chacha_block_xor_neon)
+
+ENTRY(hchacha_block_neon)
+ // r0: Input state matrix, s
+ // r1: output (8 32-bit words)
+ // r2: nrounds
+ push {lr}
+
+ vld1.32 {q0-q1}, [r0]!
+ vld1.32 {q2-q3}, [r0]
+
+ mov r3, r2
+ bl chacha_permute
+
+ vst1.32 {q0}, [r1]!
+ vst1.32 {q3}, [r1]
+
+ pop {pc}
+ENDPROC(hchacha_block_neon)
.align 4
.Lctrinc: .word 0, 1, 2, 3
.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
.align 5
-ENTRY(chacha20_4block_xor_neon)
+ENTRY(chacha_4block_xor_neon)
push {r4-r5}
mov r4, sp // preserve the stack pointer
sub ip, sp, #0x20 // allocate a 32 byte buffer
@@ -184,9 +214,10 @@ ENTRY(chacha20_4block_xor_neon)
// r0: Input state matrix, s
// r1: 4 data blocks output, o
// r2: 4 data blocks input, i
+ // r3: nrounds
//
- // This function encrypts four consecutive ChaCha20 blocks by loading
+ // This function encrypts four consecutive ChaCha blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
// requires no word shuffling. The words are re-interleaved before the
@@ -219,7 +250,6 @@ ENTRY(chacha20_4block_xor_neon)
vdup.32 q0, d0[0]
adr ip, .Lrol8_table
- mov r3, #10
b 1f
.Ldoubleround4:
@@ -417,7 +447,7 @@ ENTRY(chacha20_4block_xor_neon)
vsri.u32 q5, q8, #25
vsri.u32 q6, q9, #25
- subs r3, r3, #1
+ subs r3, r3, #2
bne .Ldoubleround4
// x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
@@ -527,4 +557,4 @@ ENTRY(chacha20_4block_xor_neon)
pop {r4-r5}
bx lr
-ENDPROC(chacha20_4block_xor_neon)
+ENDPROC(chacha_4block_xor_neon)
diff --git a/arch/arm/crypto/chacha-neon-glue.c b/arch/arm/crypto/chacha-neon-glue.c
new file mode 100644
index 000000000000..9d6fda81986d
--- /dev/null
+++ b/arch/arm/crypto/chacha-neon-glue.c
@@ -0,0 +1,201 @@
+/*
+ * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on:
+ * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
+ int nrounds);
+asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src,
+ int nrounds);
+asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
+
+static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ u8 buf[CHACHA_BLOCK_SIZE];
+
+ while (bytes >= CHACHA_BLOCK_SIZE * 4) {
+ chacha_4block_xor_neon(state, dst, src, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 4;
+ src += CHACHA_BLOCK_SIZE * 4;
+ dst += CHACHA_BLOCK_SIZE * 4;
+ state[12] += 4;
+ }
+ while (bytes >= CHACHA_BLOCK_SIZE) {
+ chacha_block_xor_neon(state, dst, src, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE;
+ src += CHACHA_BLOCK_SIZE;
+ dst += CHACHA_BLOCK_SIZE;
+ state[12]++;
+ }
+ if (bytes) {
+ memcpy(buf, src, bytes);
+ chacha_block_xor_neon(state, buf, buf, nrounds);
+ memcpy(dst, buf, bytes);
+ }
+}
+
+static int chacha_neon_stream_xor(struct skcipher_request *req,
+ struct chacha_ctx *ctx, u8 *iv)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ crypto_chacha_init(state, ctx, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ kernel_neon_begin();
+ chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, ctx->nrounds);
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int chacha_neon(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ return crypto_chacha_crypt(req);
+
+ return chacha_neon_stream_xor(req, ctx, req->iv);
+}
+
+static int xchacha_neon(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ return crypto_xchacha_crypt(req);
+
+ crypto_chacha_init(state, ctx, req->iv);
+
+ kernel_neon_begin();
+ hchacha_block_neon(state, subctx.key, ctx->nrounds);
+ kernel_neon_end();
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ return chacha_neon_stream_xor(req, &subctx, real_iv);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = chacha_neon,
+ .decrypt = chacha_neon,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = xchacha_neon,
+ .decrypt = xchacha_neon,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 4 * CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha12_setkey,
+ .encrypt = xchacha_neon,
+ .decrypt = xchacha_neon,
+ }
+};
+
+static int __init chacha_simd_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_NEON))
+ return -ENODEV;
+
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_simd_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_init(chacha_simd_mod_init);
+module_exit(chacha_simd_mod_fini);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-neon");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-neon");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-neon");
diff --git a/arch/arm/crypto/chacha20-neon-glue.c b/arch/arm/crypto/chacha20-neon-glue.c
deleted file mode 100644
index 59a7be08e80c..000000000000
--- a/arch/arm/crypto/chacha20-neon-glue.c
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, ARM NEON functions
- *
- * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on:
- * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha20_block_xor_neon(u32 *state, u8 *dst, const u8 *src);
-asmlinkage void chacha20_4block_xor_neon(u32 *state, u8 *dst, const u8 *src);
-
-static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
-{
- u8 buf[CHACHA20_BLOCK_SIZE];
-
- while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
- chacha20_4block_xor_neon(state, dst, src);
- bytes -= CHACHA20_BLOCK_SIZE * 4;
- src += CHACHA20_BLOCK_SIZE * 4;
- dst += CHACHA20_BLOCK_SIZE * 4;
- state[12] += 4;
- }
- while (bytes >= CHACHA20_BLOCK_SIZE) {
- chacha20_block_xor_neon(state, dst, src);
- bytes -= CHACHA20_BLOCK_SIZE;
- src += CHACHA20_BLOCK_SIZE;
- dst += CHACHA20_BLOCK_SIZE;
- state[12]++;
- }
- if (bytes) {
- memcpy(buf, src, bytes);
- chacha20_block_xor_neon(state, buf, buf);
- memcpy(dst, buf, bytes);
- }
-}
-
-static int chacha20_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd())
- return crypto_chacha20_crypt(req);
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_chacha20_init(state, ctx, walk.iv);
-
- kernel_neon_begin();
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- chacha20_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
- kernel_neon_end();
-
- return err;
-}
-
-static struct skcipher_alg alg = {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .chunksize = CHACHA20_BLOCK_SIZE,
- .walksize = 4 * CHACHA20_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = chacha20_neon,
- .decrypt = chacha20_neon,
-};
-
-static int __init chacha20_simd_mod_init(void)
-{
- if (!(elf_hwcap & HWCAP_NEON))
- return -ENODEV;
-
- return crypto_register_skcipher(&alg);
-}
-
-static void __exit chacha20_simd_mod_fini(void)
-{
- crypto_unregister_skcipher(&alg);
-}
-
-module_init(chacha20_simd_mod_init);
-module_exit(chacha20_simd_mod_fini);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/arm/crypto/nh-neon-core.S b/arch/arm/crypto/nh-neon-core.S
new file mode 100644
index 000000000000..434d80ab531c
--- /dev/null
+++ b/arch/arm/crypto/nh-neon-core.S
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NH - ε-almost-universal hash function, NEON accelerated version
+ *
+ * Copyright 2018 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+ .text
+ .fpu neon
+
+ KEY .req r0
+ MESSAGE .req r1
+ MESSAGE_LEN .req r2
+ HASH .req r3
+
+ PASS0_SUMS .req q0
+ PASS0_SUM_A .req d0
+ PASS0_SUM_B .req d1
+ PASS1_SUMS .req q1
+ PASS1_SUM_A .req d2
+ PASS1_SUM_B .req d3
+ PASS2_SUMS .req q2
+ PASS2_SUM_A .req d4
+ PASS2_SUM_B .req d5
+ PASS3_SUMS .req q3
+ PASS3_SUM_A .req d6
+ PASS3_SUM_B .req d7
+ K0 .req q4
+ K1 .req q5
+ K2 .req q6
+ K3 .req q7
+ T0 .req q8
+ T0_L .req d16
+ T0_H .req d17
+ T1 .req q9
+ T1_L .req d18
+ T1_H .req d19
+ T2 .req q10
+ T2_L .req d20
+ T2_H .req d21
+ T3 .req q11
+ T3_L .req d22
+ T3_H .req d23
+
+.macro _nh_stride k0, k1, k2, k3
+
+ // Load next message stride
+ vld1.8 {T3}, [MESSAGE]!
+
+ // Load next key stride
+ vld1.32 {\k3}, [KEY]!
+
+ // Add message words to key words
+ vadd.u32 T0, T3, \k0
+ vadd.u32 T1, T3, \k1
+ vadd.u32 T2, T3, \k2
+ vadd.u32 T3, T3, \k3
+
+ // Multiply 32x32 => 64 and accumulate
+ vmlal.u32 PASS0_SUMS, T0_L, T0_H
+ vmlal.u32 PASS1_SUMS, T1_L, T1_H
+ vmlal.u32 PASS2_SUMS, T2_L, T2_H
+ vmlal.u32 PASS3_SUMS, T3_L, T3_H
+.endm
+
+/*
+ * void nh_neon(const u32 *key, const u8 *message, size_t message_len,
+ * u8 hash[NH_HASH_BYTES])
+ *
+ * It's guaranteed that message_len % 16 == 0.
+ */
+ENTRY(nh_neon)
+
+ vld1.32 {K0,K1}, [KEY]!
+ vmov.u64 PASS0_SUMS, #0
+ vmov.u64 PASS1_SUMS, #0
+ vld1.32 {K2}, [KEY]!
+ vmov.u64 PASS2_SUMS, #0
+ vmov.u64 PASS3_SUMS, #0
+
+ subs MESSAGE_LEN, MESSAGE_LEN, #64
+ blt .Lloop4_done
+.Lloop4:
+ _nh_stride K0, K1, K2, K3
+ _nh_stride K1, K2, K3, K0
+ _nh_stride K2, K3, K0, K1
+ _nh_stride K3, K0, K1, K2
+ subs MESSAGE_LEN, MESSAGE_LEN, #64
+ bge .Lloop4
+
+.Lloop4_done:
+ ands MESSAGE_LEN, MESSAGE_LEN, #63
+ beq .Ldone
+ _nh_stride K0, K1, K2, K3
+
+ subs MESSAGE_LEN, MESSAGE_LEN, #16
+ beq .Ldone
+ _nh_stride K1, K2, K3, K0
+
+ subs MESSAGE_LEN, MESSAGE_LEN, #16
+ beq .Ldone
+ _nh_stride K2, K3, K0, K1
+
+.Ldone:
+ // Sum the accumulators for each pass, then store the sums to 'hash'
+ vadd.u64 T0_L, PASS0_SUM_A, PASS0_SUM_B
+ vadd.u64 T0_H, PASS1_SUM_A, PASS1_SUM_B
+ vadd.u64 T1_L, PASS2_SUM_A, PASS2_SUM_B
+ vadd.u64 T1_H, PASS3_SUM_A, PASS3_SUM_B
+ vst1.8 {T0-T1}, [HASH]
+ bx lr
+ENDPROC(nh_neon)
diff --git a/arch/arm/crypto/nhpoly1305-neon-glue.c b/arch/arm/crypto/nhpoly1305-neon-glue.c
new file mode 100644
index 000000000000..49aae87cb2bc
--- /dev/null
+++ b/arch/arm/crypto/nhpoly1305-neon-glue.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
+ * (NEON accelerated version)
+ *
+ * Copyright 2018 Google LLC
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/internal/hash.h>
+#include <crypto/nhpoly1305.h>
+#include <linux/module.h>
+
+asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len,
+ u8 hash[NH_HASH_BYTES]);
+
+/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */
+static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES])
+{
+ nh_neon(key, message, message_len, (u8 *)hash);
+}
+
+static int nhpoly1305_neon_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ if (srclen < 64 || !may_use_simd())
+ return crypto_nhpoly1305_update(desc, src, srclen);
+
+ do {
+ unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+
+ kernel_neon_begin();
+ crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
+ kernel_neon_end();
+ src += n;
+ srclen -= n;
+ } while (srclen);
+ return 0;
+}
+
+static struct shash_alg nhpoly1305_alg = {
+ .base.cra_name = "nhpoly1305",
+ .base.cra_driver_name = "nhpoly1305-neon",
+ .base.cra_priority = 200,
+ .base.cra_ctxsize = sizeof(struct nhpoly1305_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = crypto_nhpoly1305_init,
+ .update = nhpoly1305_neon_update,
+ .final = crypto_nhpoly1305_final,
+ .setkey = crypto_nhpoly1305_setkey,
+ .descsize = sizeof(struct nhpoly1305_state),
+};
+
+static int __init nhpoly1305_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_NEON))
+ return -ENODEV;
+
+ return crypto_register_shash(&nhpoly1305_alg);
+}
+
+static void __exit nhpoly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&nhpoly1305_alg);
+}
+
+module_init(nhpoly1305_mod_init);
+module_exit(nhpoly1305_mod_exit);
+
+MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("nhpoly1305");
+MODULE_ALIAS_CRYPTO("nhpoly1305-neon");
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 231e87ad45d5..35491af87985 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -23,6 +23,10 @@
#define ARM_EXIT_WITH_ABORT_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
+#define ARM_EXCEPTION_IS_TRAP(x) \
+ (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
+ ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
+ ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
#define ARM_EXCEPTION_RESET 0
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 5ca5d9af0c26..ca56537b61bc 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -225,7 +225,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
@@ -285,7 +285,7 @@ void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
-static inline bool kvm_arch_check_sve_has_vhe(void) { return true; }
+static inline bool kvm_arch_requires_vhe(void) { return false; }
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
@@ -296,11 +296,6 @@ static inline void kvm_arm_init_debug(void) {}
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
-static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
-{
- return false;
-}
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 1098ffc3d54b..3a875fc1b63c 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -82,6 +82,67 @@ void kvm_clear_hyp_idmap(void);
#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
+#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
+#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
+#define kvm_pfn_pud(pfn, prot) (__pud(0))
+
+#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; })
+
+
+#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
+/* No support for pud hugepages */
+#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; })
+
+/*
+ * The following kvm_*pud*() functions are provided strictly to allow
+ * sharing code with arm64. They should never be called in practice.
+ */
+static inline void kvm_set_s2pud_readonly(pud_t *pud)
+{
+ WARN_ON(1);
+}
+
+static inline bool kvm_s2pud_readonly(pud_t *pud)
+{
+ WARN_ON(1);
+ return false;
+}
+
+static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
+{
+ WARN_ON(1);
+}
+
+static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
+{
+ WARN_ON(1);
+ return pud;
+}
+
+static inline pud_t kvm_s2pud_mkexec(pud_t pud)
+{
+ WARN_ON(1);
+ return pud;
+}
+
+static inline bool kvm_s2pud_exec(pud_t *pud)
+{
+ WARN_ON(1);
+ return false;
+}
+
+static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
+{
+ BUG();
+ return pud;
+}
+
+static inline bool kvm_s2pud_young(pud_t pud)
+{
+ WARN_ON(1);
+ return false;
+}
+
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
pte_val(pte) |= L_PTE_S2_RDWR;
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 9e81b7c498d8..182163b55546 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -61,4 +61,15 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val);
MODULE_ARCH_VERMAGIC_ARMTHUMB \
MODULE_ARCH_VERMAGIC_P2V
+#ifdef CONFIG_THUMB2_KERNEL
+#define HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ if (ELF_ST_TYPE(sym->st_info) == STT_FUNC)
+ return sym->st_value & ~1;
+
+ return sym->st_value;
+}
+#endif
+
#endif /* _ASM_ARM_MODULE_H */
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
index ef5f7b69443e..72a20c3a0a90 100644
--- a/arch/arm/include/asm/stackprotector.h
+++ b/arch/arm/include/asm/stackprotector.h
@@ -6,8 +6,10 @@
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
* and gcc expects it to be defined by a global variable called
- * "__stack_chk_guard" on ARM. This unfortunately means that on SMP
- * we cannot have a different canary value per task.
+ * "__stack_chk_guard" on ARM. This prevents SMP systems from using a
+ * different value for each task unless we enable a GCC plugin that
+ * replaces these symbol references with references to each task's own
+ * value.
*/
#ifndef _ASM_STACKPROTECTOR_H
@@ -16,6 +18,8 @@
#include <linux/random.h>
#include <linux/version.h>
+#include <asm/thread_info.h>
+
extern unsigned long __stack_chk_guard;
/*
@@ -33,7 +37,11 @@ static __always_inline void boot_init_stack_canary(void)
canary ^= LINUX_VERSION_CODE;
current->stack_canary = canary;
+#ifndef CONFIG_STACKPROTECTOR_PER_TASK
__stack_chk_guard = current->stack_canary;
+#else
+ current_thread_info()->stack_canary = current->stack_canary;
+#endif
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index f6a7ea805232..c4b1d4fb1797 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -68,4 +68,12 @@ stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
#define stage2_pud_table_empty(kvm, pudp) false
+static inline bool kvm_stage2_has_pud(struct kvm *kvm)
+{
+ return false;
+}
+
+#define S2_PMD_MASK PMD_MASK
+#define S2_PMD_SIZE PMD_SIZE
+
#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 8f55dc520a3e..286eb61c632b 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -53,6 +53,9 @@ struct thread_info {
struct task_struct *task; /* main task structure */
__u32 cpu; /* cpu */
__u32 cpu_domain; /* cpu domain */
+#ifdef CONFIG_STACKPROTECTOR_PER_TASK
+ unsigned long stack_canary;
+#endif
struct cpu_context_save cpu_context; /* cpu context */
__u32 syscall; /* syscall number */
__u8 used_cp[16]; /* thread used copro */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 3968d6c22455..28b27104ac0c 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -79,6 +79,10 @@ int main(void)
#ifdef CONFIG_CRUNCH
DEFINE(TI_CRUNCH_STATE, offsetof(struct thread_info, crunchstate));
#endif
+#ifdef CONFIG_STACKPROTECTOR_PER_TASK
+ DEFINE(TI_STACK_CANARY, offsetof(struct thread_info, stack_canary));
+#endif
+ DEFINE(THREAD_SZ_ORDER, THREAD_SIZE_ORDER);
BLANK();
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 0142fcfcc3d3..bda949fd84e8 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -183,9 +183,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
- struct ftrace_graph_ent trace;
unsigned long old;
- int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
@@ -193,21 +191,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
old = *parent;
*parent = return_hooker;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
+ if (function_graph_enter(old, self_addr, frame_pointer, NULL))
*parent = old;
- return;
- }
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, NULL);
- if (err == -EBUSY) {
- *parent = old;
- return;
- }
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 82ab015bf42b..16601d1442d1 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -39,7 +39,7 @@
#include <asm/tls.h>
#include <asm/vdso.h>
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@@ -267,6 +267,10 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
thread_notify(THREAD_NOTIFY_COPY, thread);
+#ifdef CONFIG_STACKPROTECTOR_PER_TASK
+ thread->stack_canary = p->stack_canary;
+#endif
+
return 0;
}
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index cb094e55dc5f..222c1635bc7a 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -602,8 +602,8 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
}
} else {
/* If access function fails, it should complain. */
- kvm_err("Unsupported guest CP15 access at: %08lx\n",
- *vcpu_pc(vcpu));
+ kvm_err("Unsupported guest CP15 access at: %08lx [%08lx]\n",
+ *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_cp_instr(params);
kvm_inject_undefined(vcpu);
}
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 8143756ff38b..09e439d4abf5 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -794,9 +794,9 @@ static __init void dm365_evm_init(void)
/* maybe setup mmc1/etc ... _after_ mmc0 */
evm_init_cpld();
-#ifdef CONFIG_SND_DM365_AIC3X_CODEC
+#ifdef CONFIG_SND_SOC_DM365_AIC3X_CODEC
dm365_init_asp();
-#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
+#elif defined(CONFIG_SND_SOC_DM365_VOICE_CODEC)
dm365_init_vc();
#endif
dm365_init_rtc();
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index 0bc5bd2665df..2cc9fe4c3a91 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -759,7 +759,9 @@ static struct davinci_id da830_ids[] = {
};
static struct davinci_gpio_platform_data da830_gpio_platform_data = {
- .ngpio = 128,
+ .no_auto_base = true,
+ .base = 0,
+ .ngpio = 128,
};
int __init da830_register_gpio(void)
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 4528bbf0c861..e7b78df2bfef 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -719,7 +719,9 @@ int __init da850_register_vpif_capture(struct vpif_capture_config
}
static struct davinci_gpio_platform_data da850_gpio_platform_data = {
- .ngpio = 144,
+ .no_auto_base = true,
+ .base = 0,
+ .ngpio = 144,
};
int __init da850_register_gpio(void)
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 1fd3619f6a09..cf78da5ab054 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -701,6 +701,46 @@ static struct resource da8xx_gpio_resources[] = {
},
{ /* interrupt */
.start = IRQ_DA8XX_GPIO0,
+ .end = IRQ_DA8XX_GPIO0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO1,
+ .end = IRQ_DA8XX_GPIO1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO2,
+ .end = IRQ_DA8XX_GPIO2,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO3,
+ .end = IRQ_DA8XX_GPIO3,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO4,
+ .end = IRQ_DA8XX_GPIO4,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO5,
+ .end = IRQ_DA8XX_GPIO5,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO6,
+ .end = IRQ_DA8XX_GPIO6,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO7,
+ .end = IRQ_DA8XX_GPIO7,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DA8XX_GPIO8,
.end = IRQ_DA8XX_GPIO8,
.flags = IORESOURCE_IRQ,
},
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9f7d38d12c88..4c6e0bef4509 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -548,12 +548,44 @@ static struct resource dm355_gpio_resources[] = {
},
{ /* interrupt */
.start = IRQ_DM355_GPIOBNK0,
+ .end = IRQ_DM355_GPIOBNK0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM355_GPIOBNK1,
+ .end = IRQ_DM355_GPIOBNK1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM355_GPIOBNK2,
+ .end = IRQ_DM355_GPIOBNK2,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM355_GPIOBNK3,
+ .end = IRQ_DM355_GPIOBNK3,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM355_GPIOBNK4,
+ .end = IRQ_DM355_GPIOBNK4,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM355_GPIOBNK5,
+ .end = IRQ_DM355_GPIOBNK5,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM355_GPIOBNK6,
.end = IRQ_DM355_GPIOBNK6,
.flags = IORESOURCE_IRQ,
},
};
static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
+ .no_auto_base = true,
+ .base = 0,
.ngpio = 104,
};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index abcf2a5ed89b..01fb2b0c82de 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -267,12 +267,49 @@ static struct resource dm365_gpio_resources[] = {
},
{ /* interrupt */
.start = IRQ_DM365_GPIO0,
+ .end = IRQ_DM365_GPIO0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO1,
+ .end = IRQ_DM365_GPIO1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO2,
+ .end = IRQ_DM365_GPIO2,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO3,
+ .end = IRQ_DM365_GPIO3,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO4,
+ .end = IRQ_DM365_GPIO4,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO5,
+ .end = IRQ_DM365_GPIO5,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO6,
+ .end = IRQ_DM365_GPIO6,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM365_GPIO7,
.end = IRQ_DM365_GPIO7,
.flags = IORESOURCE_IRQ,
},
};
static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
+ .no_auto_base = true,
+ .base = 0,
.ngpio = 104,
.gpio_unbanked = 8,
};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0720da7809a6..38f92b7d413e 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -492,12 +492,34 @@ static struct resource dm644_gpio_resources[] = {
},
{ /* interrupt */
.start = IRQ_GPIOBNK0,
+ .end = IRQ_GPIOBNK0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_GPIOBNK1,
+ .end = IRQ_GPIOBNK1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_GPIOBNK2,
+ .end = IRQ_GPIOBNK2,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_GPIOBNK3,
+ .end = IRQ_GPIOBNK3,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_GPIOBNK4,
.end = IRQ_GPIOBNK4,
.flags = IORESOURCE_IRQ,
},
};
static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
+ .no_auto_base = true,
+ .base = 0,
.ngpio = 71,
};
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 6bd2ed069d0d..7dc54b2a610f 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -442,12 +442,24 @@ static struct resource dm646x_gpio_resources[] = {
},
{ /* interrupt */
.start = IRQ_DM646X_GPIOBNK0,
+ .end = IRQ_DM646X_GPIOBNK0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM646X_GPIOBNK1,
+ .end = IRQ_DM646X_GPIOBNK1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = IRQ_DM646X_GPIOBNK2,
.end = IRQ_DM646X_GPIOBNK2,
.flags = IORESOURCE_IRQ,
},
};
static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
+ .no_auto_base = true,
+ .base = 0,
.ngpio = 43,
};
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 243a108a940b..fd0053e47a15 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
- imx_gpc_set_arm_power_up_timing(2, 1);
+ imx_gpc_set_arm_power_up_timing(0xf, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm/mach-mmp/cputype.h b/arch/arm/mach-mmp/cputype.h
index 446edaeb78a7..a96abcf521b4 100644
--- a/arch/arm/mach-mmp/cputype.h
+++ b/arch/arm/mach-mmp/cputype.h
@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
#define cpu_is_pxa910() (0)
#endif
-#ifdef CONFIG_CPU_MMP2
+#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
static inline int cpu_is_mmp2(void)
{
- return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
+ (((mmp_chip_id & 0xfff) == 0x410) ||
+ ((mmp_chip_id & 0xfff) == 0x610));
}
#else
#define cpu_is_mmp2() (0)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index e8ccf51c6f29..a7e9c6d19fb5 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -8,7 +8,7 @@ obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
serial.o devices.o dma.o fb.o
obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o timer.o
-ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
+ifneq ($(CONFIG_SND_SOC_OMAP_MCBSP),)
obj-y += mcbsp.o
endif
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 3d191fd52910..691a8da13fac 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -296,23 +296,13 @@ struct modem_private_data {
static struct modem_private_data modem_priv;
-static struct resource ams_delta_nand_resources[] = {
- [0] = {
- .start = OMAP1_MPUIO_BASE,
- .end = OMAP1_MPUIO_BASE +
- OMAP_MPUIO_IO_CNTL + sizeof(u32) - 1,
- .flags = IORESOURCE_MEM,
- },
-};
-
static struct platform_device ams_delta_nand_device = {
.name = "ams-delta-nand",
.id = -1,
- .num_resources = ARRAY_SIZE(ams_delta_nand_resources),
- .resource = ams_delta_nand_resources,
};
-#define OMAP_GPIO_LABEL "gpio-0-15"
+#define OMAP_GPIO_LABEL "gpio-0-15"
+#define OMAP_MPUIO_LABEL "mpuio"
static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
.table = {
@@ -324,6 +314,14 @@ static struct gpiod_lookup_table ams_delta_nand_gpio_table = {
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_NWE, "nwe", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_ALE, "ale", 0),
GPIO_LOOKUP(LATCH2_LABEL, LATCH2_PIN_NAND_CLE, "cle", 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 0, "data", 0, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 1, "data", 1, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 2, "data", 2, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 3, "data", 3, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 4, "data", 4, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 5, "data", 5, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 6, "data", 6, 0),
+ GPIO_LOOKUP_IDX(OMAP_MPUIO_LABEL, 7, "data", 7, 0),
{ },
},
};
@@ -750,6 +748,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
struct modem_private_data *priv = port->private_data;
int ret;
+ if (!priv)
+ return;
+
if (IS_ERR(priv->regulator))
return;
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 01377c292db4..899c60fac159 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common)
obj-$(CONFIG_SOC_AM43XX) += $(hwmod-common) $(secure-common)
obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common)
-ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
+ifneq ($(CONFIG_SND_SOC_OMAP_MCBSP),)
obj-y += mcbsp.o
endif
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 9fec5f84bf77..8a5b6ed4ec36 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -524,7 +524,7 @@ void omap_auxdata_legacy_init(struct device *dev)
dev->platform_data = &twl_gpio_auxdata;
}
-#if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP)
+#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
static struct omap_mcbsp_platform_data mcbsp_pdata;
static void __init omap3_mcbsp_init(void)
{
@@ -572,7 +572,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
&am35xx_emac_pdata),
/* McBSP modules with sidetone core */
-#if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP)
+#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),
OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49024000, "49024000.mcbsp", &mcbsp_pdata),
#endif
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 7b95729e8359..38a1be6c3694 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
* to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
* omap44xx_prm_reconfigure_io_chain() must be called. No return value.
*/
-static void __init omap44xx_prm_enable_io_wakeup(void)
+static void omap44xx_prm_enable_io_wakeup(void)
{
s32 inst = omap4_prmst_get_prm_dev_inst();
diff --git a/arch/arm/mach-pxa/palm27x.h b/arch/arm/mach-pxa/palm27x.h
index d4eac3d6ffb5..3316ed2016f3 100644
--- a/arch/arm/mach-pxa/palm27x.h
+++ b/arch/arm/mach-pxa/palm27x.h
@@ -12,6 +12,8 @@
#ifndef __INCLUDE_MACH_PALM27X__
#define __INCLUDE_MACH_PALM27X__
+#include <linux/gpio/machine.h>
+
#if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE)
extern void __init palm27x_mmc_init(int detect, int ro, int power,
int power_inverted);
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 980f2847f5b5..a37ceec22903 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -288,8 +288,20 @@ static struct platform_device palmld_ide_device = {
.id = -1,
};
+static struct gpiod_lookup_table palmld_ide_gpio_table = {
+ .dev_id = "pata_palmld",
+ .table = {
+ GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMLD_IDE_PWEN,
+ "power", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("gpio-pxa", GPIO_NR_PALMLD_IDE_RESET,
+ "reset", GPIO_ACTIVE_LOW),
+ { },
+ },
+};
+
static void __init palmld_ide_init(void)
{
+ gpiod_add_lookup_table(&palmld_ide_gpio_table);
platform_device_register(&palmld_ide_device);
}
#else
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 5aa472892465..76c4855a03bc 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -194,8 +194,8 @@ static struct wm8994_pdata wm8994_pdata = {
0x3, /* IRQ out, active high, CMOS */
},
.ldo = {
- { .enable = S3C64XX_GPN(6), .init_data = &wm8994_ldo1, },
- { .enable = S3C64XX_GPN(4), .init_data = &wm8994_ldo2, },
+ { .init_data = &wm8994_ldo1, },
+ { .init_data = &wm8994_ldo2, },
},
};
@@ -203,6 +203,18 @@ static const struct i2c_board_info wm1277_devs[] = {
{ I2C_BOARD_INFO("wm8958", 0x1a), /* WM8958 is the superset */
.platform_data = &wm8994_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
+ .dev_name = "wm8958",
+ },
+};
+
+static struct gpiod_lookup_table wm8994_gpiod_table = {
+ .dev_id = "i2c-wm8958", /* I2C device name */
+ .table = {
+ GPIO_LOOKUP("GPION", 6,
+ "wlf,ldo1ena", GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP("GPION", 4,
+ "wlf,ldo2ena", GPIO_ACTIVE_HIGH),
+ { },
},
};
@@ -381,6 +393,7 @@ static int wlf_gf_module_probe(struct i2c_client *i2c,
gpiod_add_lookup_table(&wm5102_reva_gpiod_table);
gpiod_add_lookup_table(&wm5102_gpiod_table);
+ gpiod_add_lookup_table(&wm8994_gpiod_table);
if (i < ARRAY_SIZE(gf_mods)) {
dev_info(&i2c->dev, "%s revision %d\n",
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 215df435bfb9..2149b47a0c5a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -360,14 +360,16 @@ v7_dma_inv_range:
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
+ addne r0, r0, r2
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
-1:
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
- add r0, r0, r2
cmp r0, r1
+1:
+ mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
+ addlo r0, r0, r2
+ cmplo r0, r1
blo 1b
dsb st
ret lr
diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S
index 788486e830d3..32aa2a2aa260 100644
--- a/arch/arm/mm/cache-v7m.S
+++ b/arch/arm/mm/cache-v7m.S
@@ -73,9 +73,11 @@
/*
* dcimvac: Invalidate data cache line by MVA to PoC
*/
-.macro dcimvac, rt, tmp
- v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
+.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+.macro dcimvac\c, rt, tmp
+ v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
.endm
+.endr
/*
* dccmvau: Clean data cache line by MVA to PoU
@@ -369,14 +371,16 @@ v7m_dma_inv_range:
tst r0, r3
bic r0, r0, r3
dccimvacne r0, r3
+ addne r0, r0, r2
subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
tst r1, r3
bic r1, r1, r3
dccimvacne r1, r3
-1:
- dcimvac r0, r3
- add r0, r0, r2
cmp r0, r1
+1:
+ dcimvaclo r0, r3
+ addlo r0, r0, r2
+ cmplo r0, r1
blo 1b
dsb st
ret lr
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 2cfb17bad1e6..f1e2922e447c 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -822,7 +822,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
- int ret;
+ int ret = -ENXIO;
unsigned long nr_vma_pages = vma_pages(vma);
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = dma_to_pfn(dev, dma_addr);
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 81d0efb055c6..19516fbc2c55 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -274,6 +274,13 @@
.endm
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+ .section ".rodata"
+#endif
.type \name\()_processor_functions, #object
.align 2
ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
.endif
.size \name\()_processor_functions, . - \name\()_processor_functions
+#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+ .previous
+#endif
.endm
.macro define_cache_functions name:req
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index b2aa9b32bff2..2c118a6ab358 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
- memcpy(code, &optprobe_template_entry,
+ memcpy(code, (unsigned char *)optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 2092080240b0..ff9291872372 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -5,7 +5,7 @@ config ARM64
select ACPI_GTDT if ACPI
select ACPI_IORT if ACPI
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
- select ACPI_MCFG if ACPI
+ select ACPI_MCFG if (ACPI && PCI)
select ACPI_SPCR_TABLE if ACPI
select ACPI_PPTT if ACPI
select ARCH_CLOCKSOURCE_DATA
@@ -162,7 +162,7 @@ config ARM64
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
- select PCI_ECAM if ACPI
+ select PCI_ECAM if (ACPI && PCI)
select POWER_RESET
select POWER_SUPPLY
select REFCOUNT_FULL
@@ -260,6 +260,9 @@ config ZONE_DMA32
config HAVE_GENERIC_GUP
def_bool y
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
+
config SMP
def_bool y
@@ -273,7 +276,7 @@ config PGTABLE_LEVELS
int
default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36
default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
- default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
+ default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52)
default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47
default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48
@@ -312,9 +315,13 @@ menu "Kernel Features"
menu "ARM errata workarounds via the alternatives framework"
+config ARM64_WORKAROUND_CLEAN_CACHE
+ def_bool n
+
config ARM64_ERRATUM_826319
bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
default y
+ select ARM64_WORKAROUND_CLEAN_CACHE
help
This option adds an alternative code sequence to work around ARM
erratum 826319 on Cortex-A53 parts up to r0p2 with an AMBA 4 ACE or
@@ -336,6 +343,7 @@ config ARM64_ERRATUM_826319
config ARM64_ERRATUM_827319
bool "Cortex-A53: 827319: Data cache clean instructions might cause overlapping transactions to the interconnect"
default y
+ select ARM64_WORKAROUND_CLEAN_CACHE
help
This option adds an alternative code sequence to work around ARM
erratum 827319 on Cortex-A53 parts up to r0p2 with an AMBA 5 CHI
@@ -357,6 +365,7 @@ config ARM64_ERRATUM_827319
config ARM64_ERRATUM_824069
bool "Cortex-A53: 824069: Cache line might not be marked as clean after a CleanShared snoop"
default y
+ select ARM64_WORKAROUND_CLEAN_CACHE
help
This option adds an alternative code sequence to work around ARM
erratum 824069 on Cortex-A53 parts up to r0p2 when it is connected
@@ -379,6 +388,7 @@ config ARM64_ERRATUM_824069
config ARM64_ERRATUM_819472
bool "Cortex-A53: 819472: Store exclusive instructions might cause data corruption"
default y
+ select ARM64_WORKAROUND_CLEAN_CACHE
help
This option adds an alternative code sequence to work around ARM
erratum 819472 on Cortex-A53 parts up to r0p1 with an L2 cache
@@ -496,6 +506,36 @@ config ARM64_ERRATUM_1188873
If unsure, say Y.
+config ARM64_ERRATUM_1165522
+ bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
+ default y
+ help
+ This option adds work arounds for ARM Cortex-A76 erratum 1165522
+
+ Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with
+ corrupted TLBs by speculating an AT instruction during a guest
+ context switch.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_1286807
+ bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
+ default y
+ select ARM64_WORKAROUND_REPEAT_TLBI
+ help
+ This option adds workaround for ARM Cortex-A76 erratum 1286807
+
+ On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
+ address for a cacheable mapping of a location is being
+ accessed by a core while another core is remapping the virtual
+ address to a new physical page using the recommended
+ break-before-make sequence, then under very rare circumstances
+ TLBI+DSB completes before a read using the translation being
+ invalidated has been observed by other observers. The
+ workaround repeats the TLBI+DSB operation.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -565,9 +605,16 @@ config QCOM_FALKOR_ERRATUM_1003
is unchanged. Work around the erratum by invalidating the walk cache
entries for the trampoline before entering the kernel proper.
+config ARM64_WORKAROUND_REPEAT_TLBI
+ bool
+ help
+ Enable the repeat TLBI workaround for Falkor erratum 1009 and
+ Cortex-A76 erratum 1286807.
+
config QCOM_FALKOR_ERRATUM_1009
bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
default y
+ select ARM64_WORKAROUND_REPEAT_TLBI
help
On Falkor v1, the CPU may prematurely complete a DSB following a
TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation
@@ -674,15 +721,43 @@ config ARM64_VA_BITS_47
config ARM64_VA_BITS_48
bool "48-bit"
+config ARM64_USER_VA_BITS_52
+ bool "52-bit (user)"
+ depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN)
+ help
+ Enable 52-bit virtual addressing for userspace when explicitly
+ requested via a hint to mmap(). The kernel will continue to
+ use 48-bit virtual addresses for its own mappings.
+
+ NOTE: Enabling 52-bit virtual addressing in conjunction with
+ ARMv8.3 Pointer Authentication will result in the PAC being
+ reduced from 7 bits to 3 bits, which may have a significant
+ impact on its susceptibility to brute-force attacks.
+
+ If unsure, select 48-bit virtual addressing instead.
+
endchoice
+config ARM64_FORCE_52BIT
+ bool "Force 52-bit virtual addresses for userspace"
+ depends on ARM64_USER_VA_BITS_52 && EXPERT
+ help
+ For systems with 52-bit userspace VAs enabled, the kernel will attempt
+ to maintain compatibility with older software by providing 48-bit VAs
+ unless a hint is supplied to mmap.
+
+ This configuration option disables the 48-bit compatibility logic, and
+ forces all userspace addresses to be 52-bit on HW that supports it. One
+ should only enable this configuration option for stress testing userspace
+ memory management code. If unsure say N here.
+
config ARM64_VA_BITS
int
default 36 if ARM64_VA_BITS_36
default 39 if ARM64_VA_BITS_39
default 42 if ARM64_VA_BITS_42
default 47 if ARM64_VA_BITS_47
- default 48 if ARM64_VA_BITS_48
+ default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52
choice
prompt "Physical address space size"
@@ -857,6 +932,39 @@ config KEXEC
but it is independent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
+config KEXEC_FILE
+ bool "kexec file based system call"
+ select KEXEC_CORE
+ help
+ This is new version of kexec system call. This system call is
+ file based and takes file descriptors as system call argument
+ for kernel and initramfs as opposed to list of segments as
+ accepted by previous system call.
+
+config KEXEC_VERIFY_SIG
+ bool "Verify kernel signature during kexec_file_load() syscall"
+ depends on KEXEC_FILE
+ help
+ Select this option to verify a signature with loaded kernel
+ image. If configured, any attempt of loading a image without
+ valid signature will fail.
+
+ In addition to that option, you need to enable signature
+ verification for the corresponding kernel image type being
+ loaded in order for this to work.
+
+config KEXEC_IMAGE_VERIFY_SIG
+ bool "Enable Image signature verification support"
+ default y
+ depends on KEXEC_VERIFY_SIG
+ depends on EFI && SIGNED_PE_FILE_VERIFICATION
+ help
+ Enable Image signature verification support.
+
+comment "Support for PE file signature verification disabled"
+ depends on KEXEC_VERIFY_SIG
+ depends on !EFI || !SIGNED_PE_FILE_VERIFICATION
+
config CRASH_DUMP
bool "Build kdump crash kernel"
help
@@ -957,6 +1065,20 @@ config ARM64_SSBD
If unsure, say Y.
+config RODATA_FULL_DEFAULT_ENABLED
+ bool "Apply r/o permissions of VM areas also to their linear aliases"
+ default y
+ help
+ Apply read-only attributes of VM areas to the linear alias of
+ the backing pages as well. This prevents code or read-only data
+ from being modified (inadvertently or intentionally) via another
+ mapping of the same memory page. This additional enhancement can
+ be turned off at runtime by passing rodata=[off|on] (and turned on
+ with rodata=full if this option is set to 'n')
+
+ This requires the linear region to be mapped down to pages,
+ which may adversely affect performance in some cases.
+
menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
@@ -1162,6 +1284,29 @@ config ARM64_CNP
endmenu
+menu "ARMv8.3 architectural features"
+
+config ARM64_PTR_AUTH
+ bool "Enable support for pointer authentication"
+ default y
+ help
+ Pointer authentication (part of the ARMv8.3 Extensions) provides
+ instructions for signing and authenticating pointers against secret
+ keys, which can be used to mitigate Return Oriented Programming (ROP)
+ and other attacks.
+
+ This option enables these instructions at EL0 (i.e. for userspace).
+
+ Choosing this option will cause the kernel to initialise secret keys
+ for each process at exec() time, with these keys being
+ context-switched along with the process.
+
+ The feature is detected at runtime. If the feature is not present in
+ hardware it will not be advertised to userspace nor will it be
+ enabled.
+
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
@@ -1246,6 +1391,13 @@ config RANDOMIZE_MODULE_REGION_FULL
a limited range that contains the [_stext, _etext] interval of the
core kernel, so branch relocations are always in range.
+config CC_HAVE_STACKPROTECTOR_SYSREG
+ def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
+
+config STACKPROTECTOR_PER_TASK
+ def_bool y
+ depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_SYSREG
+
endmenu
menu "Boot options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 6cb9fc7e9382..398bdb81a900 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -18,7 +18,7 @@ ifeq ($(CONFIG_RELOCATABLE), y)
# Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
# for relative relocs, since this leads to better Image compression
# with the relocation offsets always being zero.
-LDFLAGS_vmlinux += -pie -shared -Bsymbolic \
+LDFLAGS_vmlinux += -shared -Bsymbolic -z notext -z norelro \
$(call ld-option, --no-apply-dynamic-relocs)
endif
@@ -56,6 +56,16 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
+ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
+prepare: stack_protector_prepare
+stack_protector_prepare: prepare0
+ $(eval KBUILD_CFLAGS += -mstack-protector-guard=sysreg \
+ -mstack-protector-guard-reg=sp_el0 \
+ -mstack-protector-guard-offset=$(shell \
+ awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
+ include/generated/asm-offsets.h))
+endif
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
index 64632c873888..01ea662afba8 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
@@ -20,28 +20,24 @@
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x000>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x001>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu2: cpu@100 {
device_type = "cpu";
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x100>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu3: cpu@101 {
device_type = "cpu";
compatible = "arm,cortex-a72", "arm,armv8";
reg = <0x101>;
enable-method = "psci";
- cpu-idle-states = <&CPU_SLEEP_0>;
};
};
};
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
index 073610ac0a53..7d94c1fa592a 100644
--- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi
@@ -28,33 +28,6 @@
method = "smc";
};
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- idle_states {
- entry_method = "arm,pcsi";
-
- CPU_SLEEP_0: cpu-sleep-0 {
- compatible = "arm,idle-state";
- local-timer-stop;
- arm,psci-suspend-param = <0x0010000>;
- entry-latency-us = <80>;
- exit-latency-us = <160>;
- min-residency-us = <320>;
- };
-
- CLUSTER_SLEEP_0: cluster-sleep-0 {
- compatible = "arm,idle-state";
- local-timer-stop;
- arm,psci-suspend-param = <0x1010000>;
- entry-latency-us = <500>;
- exit-latency-us = <1000>;
- min-residency-us = <2500>;
- };
- };
- };
-
ap806 {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index 5d6005c9b097..710c5c3d87d3 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -16,8 +16,13 @@
model = "Bananapi BPI-R64";
compatible = "bananapi,bpi-r64", "mediatek,mt7622";
+ aliases {
+ serial0 = &uart0;
+ };
+
chosen {
- bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+ stdout-path = "serial0:115200n8";
+ bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
};
cpus {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index dcad0869b84c..3f783348c66a 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -17,8 +17,13 @@
model = "MediaTek MT7622 RFB1 board";
compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
+ aliases {
+ serial0 = &uart0;
+ };
+
chosen {
- bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+ stdout-path = "serial0:115200n8";
+ bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
};
cpus {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index fe0c875f1d95..14a1028ca3a6 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -227,16 +227,6 @@
#reset-cells = <1>;
};
- timer: timer@10004000 {
- compatible = "mediatek,mt7622-timer",
- "mediatek,mt6577-timer";
- reg = <0 0x10004000 0 0x80>;
- interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&infracfg CLK_INFRA_APXGPT_PD>,
- <&topckgen CLK_TOP_RTC>;
- clock-names = "system-clk", "rtc-clk";
- };
-
scpsys: scpsys@10006000 {
compatible = "mediatek,mt7622-scpsys",
"syscon";
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index b4276da1fb0d..11fd1fe8bdb5 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -241,3 +241,7 @@
};
};
};
+
+&tlmm {
+ gpio-reserved-ranges = <0 4>, <81 4>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
index eedfaf8922e2..b3def0358177 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts
@@ -343,6 +343,12 @@
};
};
+&gcc {
+ protected-clocks = <GCC_QSPI_CORE_CLK>,
+ <GCC_QSPI_CORE_CLK_SRC>,
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+};
+
&i2c10 {
status = "okay";
clock-frequency = <400000>;
@@ -352,6 +358,10 @@
status = "okay";
};
+&tlmm {
+ gpio-reserved-ranges = <0 4>, <81 4>;
+};
+
&uart9 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index 2dceeea29b83..1e6a71066c16 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -153,7 +153,7 @@
};
&pcie0 {
- ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>;
+ ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
num-lanes = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pcie_clkreqn_cpm>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index 6c8c4ab044aa..56abbb08c133 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -57,18 +57,6 @@
regulator-always-on;
vin-supply = <&vcc_sys>;
};
-
- vdd_log: vdd-log {
- compatible = "pwm-regulator";
- pwms = <&pwm2 0 25000 0>;
- regulator-name = "vdd_log";
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1400000>;
- regulator-always-on;
- regulator-boot-on;
- vin-supply = <&vcc_sys>;
- };
-
};
&cpu_l0 {
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index affc3c309353..8d7b47f9dfbf 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -36,7 +36,7 @@
wkup_uart0: serial@42300000 {
compatible = "ti,am654-uart";
- reg = <0x00 0x42300000 0x00 0x100>;
+ reg = <0x42300000 0x100>;
reg-shift = <2>;
reg-io-width = <4>;
interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index a5606823ed4d..d9a523ecdd83 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -101,11 +101,16 @@ config CRYPTO_AES_ARM64_NEON_BLK
select CRYPTO_SIMD
config CRYPTO_CHACHA20_NEON
- tristate "NEON accelerated ChaCha20 symmetric cipher"
+ tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
depends on KERNEL_MODE_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_CHACHA20
+config CRYPTO_NHPOLY1305_NEON
+ tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
+ depends on KERNEL_MODE_NEON
+ select CRYPTO_NHPOLY1305
+
config CRYPTO_AES_ARM64_BS
tristate "AES in ECB/CBC/CTR/XTS modes using bit-sliced NEON algorithm"
depends on KERNEL_MODE_NEON
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index f476fede09ba..a4ffd9fe3265 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -50,8 +50,11 @@ sha256-arm64-y := sha256-glue.o sha256-core.o
obj-$(CONFIG_CRYPTO_SHA512_ARM64) += sha512-arm64.o
sha512-arm64-y := sha512-glue.o sha512-core.o
-obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
-chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
+obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o
+chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o
+
+obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o
+nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
diff --git a/arch/arm64/crypto/chacha20-neon-core.S b/arch/arm64/crypto/chacha-neon-core.S
index 13c85e272c2a..021bb9e9784b 100644
--- a/arch/arm64/crypto/chacha20-neon-core.S
+++ b/arch/arm64/crypto/chacha-neon-core.S
@@ -1,13 +1,13 @@
/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions
+ * ChaCha/XChaCha NEON helper functions
*
- * Copyright (C) 2016 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2016-2018 Linaro, Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Based on:
+ * Originally based on:
* ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
*
* Copyright (C) 2015 Martin Willi
@@ -19,29 +19,27 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
.text
.align 6
-ENTRY(chacha20_block_xor_neon)
- // x0: Input state matrix, s
- // x1: 1 data block output, o
- // x2: 1 data block input, i
-
- //
- // This function encrypts one ChaCha20 block by loading the state matrix
- // in four NEON registers. It performs matrix operation on four words in
- // parallel, but requires shuffling to rearrange the words after each
- // round.
- //
-
- // x0..3 = s0..3
- adr x3, ROT8
- ld1 {v0.4s-v3.4s}, [x0]
- ld1 {v8.4s-v11.4s}, [x0]
- ld1 {v12.4s}, [x3]
+/*
+ * chacha_permute - permute one block
+ *
+ * Permute one 64-byte block where the state matrix is stored in the four NEON
+ * registers v0-v3. It performs matrix operations on four words in parallel,
+ * but requires shuffling to rearrange the words after each round.
+ *
+ * The round count is given in w3.
+ *
+ * Clobbers: w3, x10, v4, v12
+ */
+chacha_permute:
- mov x3, #10
+ adr_l x10, ROT8
+ ld1 {v12.4s}, [x10]
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
@@ -102,9 +100,27 @@ ENTRY(chacha20_block_xor_neon)
// x3 = shuffle32(x3, MASK(0, 3, 2, 1))
ext v3.16b, v3.16b, v3.16b, #4
- subs x3, x3, #1
+ subs w3, w3, #2
b.ne .Ldoubleround
+ ret
+ENDPROC(chacha_permute)
+
+ENTRY(chacha_block_xor_neon)
+ // x0: Input state matrix, s
+ // x1: 1 data block output, o
+ // x2: 1 data block input, i
+ // w3: nrounds
+
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ // x0..3 = s0..3
+ ld1 {v0.4s-v3.4s}, [x0]
+ ld1 {v8.4s-v11.4s}, [x0]
+
+ bl chacha_permute
+
ld1 {v4.16b-v7.16b}, [x2]
// o0 = i0 ^ (x0 + s0)
@@ -125,71 +141,156 @@ ENTRY(chacha20_block_xor_neon)
st1 {v0.16b-v3.16b}, [x1]
+ ldp x29, x30, [sp], #16
ret
-ENDPROC(chacha20_block_xor_neon)
+ENDPROC(chacha_block_xor_neon)
+
+ENTRY(hchacha_block_neon)
+ // x0: Input state matrix, s
+ // x1: output (8 32-bit words)
+ // w2: nrounds
+
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ ld1 {v0.4s-v3.4s}, [x0]
+
+ mov w3, w2
+ bl chacha_permute
+
+ st1 {v0.16b}, [x1], #16
+ st1 {v3.16b}, [x1]
+
+ ldp x29, x30, [sp], #16
+ ret
+ENDPROC(hchacha_block_neon)
+
+ a0 .req w12
+ a1 .req w13
+ a2 .req w14
+ a3 .req w15
+ a4 .req w16
+ a5 .req w17
+ a6 .req w19
+ a7 .req w20
+ a8 .req w21
+ a9 .req w22
+ a10 .req w23
+ a11 .req w24
+ a12 .req w25
+ a13 .req w26
+ a14 .req w27
+ a15 .req w28
.align 6
-ENTRY(chacha20_4block_xor_neon)
+ENTRY(chacha_4block_xor_neon)
+ frame_push 10
+
// x0: Input state matrix, s
// x1: 4 data blocks output, o
// x2: 4 data blocks input, i
+ // w3: nrounds
+ // x4: byte count
+
+ adr_l x10, .Lpermute
+ and x5, x4, #63
+ add x10, x10, x5
+ add x11, x10, #64
//
- // This function encrypts four consecutive ChaCha20 blocks by loading
+ // This function encrypts four consecutive ChaCha blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
// requires no word shuffling. For final XORing step we transpose the
// matrix by interleaving 32- and then 64-bit words, which allows us to
// do XOR in NEON registers.
//
- adr x3, CTRINC // ... and ROT8
- ld1 {v30.4s-v31.4s}, [x3]
+ // At the same time, a fifth block is encrypted in parallel using
+ // scalar registers
+ //
+ adr_l x9, CTRINC // ... and ROT8
+ ld1 {v30.4s-v31.4s}, [x9]
// x0..15[0-3] = s0..3[0..3]
- mov x4, x0
- ld4r { v0.4s- v3.4s}, [x4], #16
- ld4r { v4.4s- v7.4s}, [x4], #16
- ld4r { v8.4s-v11.4s}, [x4], #16
- ld4r {v12.4s-v15.4s}, [x4]
-
- // x12 += counter values 0-3
+ add x8, x0, #16
+ ld4r { v0.4s- v3.4s}, [x0]
+ ld4r { v4.4s- v7.4s}, [x8], #16
+ ld4r { v8.4s-v11.4s}, [x8], #16
+ ld4r {v12.4s-v15.4s}, [x8]
+
+ mov a0, v0.s[0]
+ mov a1, v1.s[0]
+ mov a2, v2.s[0]
+ mov a3, v3.s[0]
+ mov a4, v4.s[0]
+ mov a5, v5.s[0]
+ mov a6, v6.s[0]
+ mov a7, v7.s[0]
+ mov a8, v8.s[0]
+ mov a9, v9.s[0]
+ mov a10, v10.s[0]
+ mov a11, v11.s[0]
+ mov a12, v12.s[0]
+ mov a13, v13.s[0]
+ mov a14, v14.s[0]
+ mov a15, v15.s[0]
+
+ // x12 += counter values 1-4
add v12.4s, v12.4s, v30.4s
- mov x3, #10
-
.Ldoubleround4:
// x0 += x4, x12 = rotl32(x12 ^ x0, 16)
// x1 += x5, x13 = rotl32(x13 ^ x1, 16)
// x2 += x6, x14 = rotl32(x14 ^ x2, 16)
// x3 += x7, x15 = rotl32(x15 ^ x3, 16)
add v0.4s, v0.4s, v4.4s
+ add a0, a0, a4
add v1.4s, v1.4s, v5.4s
+ add a1, a1, a5
add v2.4s, v2.4s, v6.4s
+ add a2, a2, a6
add v3.4s, v3.4s, v7.4s
+ add a3, a3, a7
eor v12.16b, v12.16b, v0.16b
+ eor a12, a12, a0
eor v13.16b, v13.16b, v1.16b
+ eor a13, a13, a1
eor v14.16b, v14.16b, v2.16b
+ eor a14, a14, a2
eor v15.16b, v15.16b, v3.16b
+ eor a15, a15, a3
rev32 v12.8h, v12.8h
+ ror a12, a12, #16
rev32 v13.8h, v13.8h
+ ror a13, a13, #16
rev32 v14.8h, v14.8h
+ ror a14, a14, #16
rev32 v15.8h, v15.8h
+ ror a15, a15, #16
// x8 += x12, x4 = rotl32(x4 ^ x8, 12)
// x9 += x13, x5 = rotl32(x5 ^ x9, 12)
// x10 += x14, x6 = rotl32(x6 ^ x10, 12)
// x11 += x15, x7 = rotl32(x7 ^ x11, 12)
add v8.4s, v8.4s, v12.4s
+ add a8, a8, a12
add v9.4s, v9.4s, v13.4s
+ add a9, a9, a13
add v10.4s, v10.4s, v14.4s
+ add a10, a10, a14
add v11.4s, v11.4s, v15.4s
+ add a11, a11, a15
eor v16.16b, v4.16b, v8.16b
+ eor a4, a4, a8
eor v17.16b, v5.16b, v9.16b
+ eor a5, a5, a9
eor v18.16b, v6.16b, v10.16b
+ eor a6, a6, a10
eor v19.16b, v7.16b, v11.16b
+ eor a7, a7, a11
shl v4.4s, v16.4s, #12
shl v5.4s, v17.4s, #12
@@ -197,42 +298,66 @@ ENTRY(chacha20_4block_xor_neon)
shl v7.4s, v19.4s, #12
sri v4.4s, v16.4s, #20
+ ror a4, a4, #20
sri v5.4s, v17.4s, #20
+ ror a5, a5, #20
sri v6.4s, v18.4s, #20
+ ror a6, a6, #20
sri v7.4s, v19.4s, #20
+ ror a7, a7, #20
// x0 += x4, x12 = rotl32(x12 ^ x0, 8)
// x1 += x5, x13 = rotl32(x13 ^ x1, 8)
// x2 += x6, x14 = rotl32(x14 ^ x2, 8)
// x3 += x7, x15 = rotl32(x15 ^ x3, 8)
add v0.4s, v0.4s, v4.4s
+ add a0, a0, a4
add v1.4s, v1.4s, v5.4s
+ add a1, a1, a5
add v2.4s, v2.4s, v6.4s
+ add a2, a2, a6
add v3.4s, v3.4s, v7.4s
+ add a3, a3, a7
eor v12.16b, v12.16b, v0.16b
+ eor a12, a12, a0
eor v13.16b, v13.16b, v1.16b
+ eor a13, a13, a1
eor v14.16b, v14.16b, v2.16b
+ eor a14, a14, a2
eor v15.16b, v15.16b, v3.16b
+ eor a15, a15, a3
tbl v12.16b, {v12.16b}, v31.16b
+ ror a12, a12, #24
tbl v13.16b, {v13.16b}, v31.16b
+ ror a13, a13, #24
tbl v14.16b, {v14.16b}, v31.16b
+ ror a14, a14, #24
tbl v15.16b, {v15.16b}, v31.16b
+ ror a15, a15, #24
// x8 += x12, x4 = rotl32(x4 ^ x8, 7)
// x9 += x13, x5 = rotl32(x5 ^ x9, 7)
// x10 += x14, x6 = rotl32(x6 ^ x10, 7)
// x11 += x15, x7 = rotl32(x7 ^ x11, 7)
add v8.4s, v8.4s, v12.4s
+ add a8, a8, a12
add v9.4s, v9.4s, v13.4s
+ add a9, a9, a13
add v10.4s, v10.4s, v14.4s
+ add a10, a10, a14
add v11.4s, v11.4s, v15.4s
+ add a11, a11, a15
eor v16.16b, v4.16b, v8.16b
+ eor a4, a4, a8
eor v17.16b, v5.16b, v9.16b
+ eor a5, a5, a9
eor v18.16b, v6.16b, v10.16b
+ eor a6, a6, a10
eor v19.16b, v7.16b, v11.16b
+ eor a7, a7, a11
shl v4.4s, v16.4s, #7
shl v5.4s, v17.4s, #7
@@ -240,42 +365,66 @@ ENTRY(chacha20_4block_xor_neon)
shl v7.4s, v19.4s, #7
sri v4.4s, v16.4s, #25
+ ror a4, a4, #25
sri v5.4s, v17.4s, #25
+ ror a5, a5, #25
sri v6.4s, v18.4s, #25
+ ror a6, a6, #25
sri v7.4s, v19.4s, #25
+ ror a7, a7, #25
// x0 += x5, x15 = rotl32(x15 ^ x0, 16)
// x1 += x6, x12 = rotl32(x12 ^ x1, 16)
// x2 += x7, x13 = rotl32(x13 ^ x2, 16)
// x3 += x4, x14 = rotl32(x14 ^ x3, 16)
add v0.4s, v0.4s, v5.4s
+ add a0, a0, a5
add v1.4s, v1.4s, v6.4s
+ add a1, a1, a6
add v2.4s, v2.4s, v7.4s
+ add a2, a2, a7
add v3.4s, v3.4s, v4.4s
+ add a3, a3, a4
eor v15.16b, v15.16b, v0.16b
+ eor a15, a15, a0
eor v12.16b, v12.16b, v1.16b
+ eor a12, a12, a1
eor v13.16b, v13.16b, v2.16b
+ eor a13, a13, a2
eor v14.16b, v14.16b, v3.16b
+ eor a14, a14, a3
rev32 v15.8h, v15.8h
+ ror a15, a15, #16
rev32 v12.8h, v12.8h
+ ror a12, a12, #16
rev32 v13.8h, v13.8h
+ ror a13, a13, #16
rev32 v14.8h, v14.8h
+ ror a14, a14, #16
// x10 += x15, x5 = rotl32(x5 ^ x10, 12)
// x11 += x12, x6 = rotl32(x6 ^ x11, 12)
// x8 += x13, x7 = rotl32(x7 ^ x8, 12)
// x9 += x14, x4 = rotl32(x4 ^ x9, 12)
add v10.4s, v10.4s, v15.4s
+ add a10, a10, a15
add v11.4s, v11.4s, v12.4s
+ add a11, a11, a12
add v8.4s, v8.4s, v13.4s
+ add a8, a8, a13
add v9.4s, v9.4s, v14.4s
+ add a9, a9, a14
eor v16.16b, v5.16b, v10.16b
+ eor a5, a5, a10
eor v17.16b, v6.16b, v11.16b
+ eor a6, a6, a11
eor v18.16b, v7.16b, v8.16b
+ eor a7, a7, a8
eor v19.16b, v4.16b, v9.16b
+ eor a4, a4, a9
shl v5.4s, v16.4s, #12
shl v6.4s, v17.4s, #12
@@ -283,42 +432,66 @@ ENTRY(chacha20_4block_xor_neon)
shl v4.4s, v19.4s, #12
sri v5.4s, v16.4s, #20
+ ror a5, a5, #20
sri v6.4s, v17.4s, #20
+ ror a6, a6, #20
sri v7.4s, v18.4s, #20
+ ror a7, a7, #20
sri v4.4s, v19.4s, #20
+ ror a4, a4, #20
// x0 += x5, x15 = rotl32(x15 ^ x0, 8)
// x1 += x6, x12 = rotl32(x12 ^ x1, 8)
// x2 += x7, x13 = rotl32(x13 ^ x2, 8)
// x3 += x4, x14 = rotl32(x14 ^ x3, 8)
add v0.4s, v0.4s, v5.4s
+ add a0, a0, a5
add v1.4s, v1.4s, v6.4s
+ add a1, a1, a6
add v2.4s, v2.4s, v7.4s
+ add a2, a2, a7
add v3.4s, v3.4s, v4.4s
+ add a3, a3, a4
eor v15.16b, v15.16b, v0.16b
+ eor a15, a15, a0
eor v12.16b, v12.16b, v1.16b
+ eor a12, a12, a1
eor v13.16b, v13.16b, v2.16b
+ eor a13, a13, a2
eor v14.16b, v14.16b, v3.16b
+ eor a14, a14, a3
tbl v15.16b, {v15.16b}, v31.16b
+ ror a15, a15, #24
tbl v12.16b, {v12.16b}, v31.16b
+ ror a12, a12, #24
tbl v13.16b, {v13.16b}, v31.16b
+ ror a13, a13, #24
tbl v14.16b, {v14.16b}, v31.16b
+ ror a14, a14, #24
// x10 += x15, x5 = rotl32(x5 ^ x10, 7)
// x11 += x12, x6 = rotl32(x6 ^ x11, 7)
// x8 += x13, x7 = rotl32(x7 ^ x8, 7)
// x9 += x14, x4 = rotl32(x4 ^ x9, 7)
add v10.4s, v10.4s, v15.4s
+ add a10, a10, a15
add v11.4s, v11.4s, v12.4s
+ add a11, a11, a12
add v8.4s, v8.4s, v13.4s
+ add a8, a8, a13
add v9.4s, v9.4s, v14.4s
+ add a9, a9, a14
eor v16.16b, v5.16b, v10.16b
+ eor a5, a5, a10
eor v17.16b, v6.16b, v11.16b
+ eor a6, a6, a11
eor v18.16b, v7.16b, v8.16b
+ eor a7, a7, a8
eor v19.16b, v4.16b, v9.16b
+ eor a4, a4, a9
shl v5.4s, v16.4s, #7
shl v6.4s, v17.4s, #7
@@ -326,11 +499,15 @@ ENTRY(chacha20_4block_xor_neon)
shl v4.4s, v19.4s, #7
sri v5.4s, v16.4s, #25
+ ror a5, a5, #25
sri v6.4s, v17.4s, #25
+ ror a6, a6, #25
sri v7.4s, v18.4s, #25
+ ror a7, a7, #25
sri v4.4s, v19.4s, #25
+ ror a4, a4, #25
- subs x3, x3, #1
+ subs w3, w3, #2
b.ne .Ldoubleround4
ld4r {v16.4s-v19.4s}, [x0], #16
@@ -344,9 +521,17 @@ ENTRY(chacha20_4block_xor_neon)
// x2[0-3] += s0[2]
// x3[0-3] += s0[3]
add v0.4s, v0.4s, v16.4s
+ mov w6, v16.s[0]
+ mov w7, v17.s[0]
add v1.4s, v1.4s, v17.4s
+ mov w8, v18.s[0]
+ mov w9, v19.s[0]
add v2.4s, v2.4s, v18.4s
+ add a0, a0, w6
+ add a1, a1, w7
add v3.4s, v3.4s, v19.4s
+ add a2, a2, w8
+ add a3, a3, w9
ld4r {v24.4s-v27.4s}, [x0], #16
ld4r {v28.4s-v31.4s}, [x0]
@@ -356,95 +541,304 @@ ENTRY(chacha20_4block_xor_neon)
// x6[0-3] += s1[2]
// x7[0-3] += s1[3]
add v4.4s, v4.4s, v20.4s
+ mov w6, v20.s[0]
+ mov w7, v21.s[0]
add v5.4s, v5.4s, v21.4s
+ mov w8, v22.s[0]
+ mov w9, v23.s[0]
add v6.4s, v6.4s, v22.4s
+ add a4, a4, w6
+ add a5, a5, w7
add v7.4s, v7.4s, v23.4s
+ add a6, a6, w8
+ add a7, a7, w9
// x8[0-3] += s2[0]
// x9[0-3] += s2[1]
// x10[0-3] += s2[2]
// x11[0-3] += s2[3]
add v8.4s, v8.4s, v24.4s
+ mov w6, v24.s[0]
+ mov w7, v25.s[0]
add v9.4s, v9.4s, v25.4s
+ mov w8, v26.s[0]
+ mov w9, v27.s[0]
add v10.4s, v10.4s, v26.4s
+ add a8, a8, w6
+ add a9, a9, w7
add v11.4s, v11.4s, v27.4s
+ add a10, a10, w8
+ add a11, a11, w9
// x12[0-3] += s3[0]
// x13[0-3] += s3[1]
// x14[0-3] += s3[2]
// x15[0-3] += s3[3]
add v12.4s, v12.4s, v28.4s
+ mov w6, v28.s[0]
+ mov w7, v29.s[0]
add v13.4s, v13.4s, v29.4s
+ mov w8, v30.s[0]
+ mov w9, v31.s[0]
add v14.4s, v14.4s, v30.4s
+ add a12, a12, w6
+ add a13, a13, w7
add v15.4s, v15.4s, v31.4s
+ add a14, a14, w8
+ add a15, a15, w9
// interleave 32-bit words in state n, n+1
+ ldp w6, w7, [x2], #64
zip1 v16.4s, v0.4s, v1.4s
+ ldp w8, w9, [x2, #-56]
+ eor a0, a0, w6
zip2 v17.4s, v0.4s, v1.4s
+ eor a1, a1, w7
zip1 v18.4s, v2.4s, v3.4s
+ eor a2, a2, w8
zip2 v19.4s, v2.4s, v3.4s
+ eor a3, a3, w9
+ ldp w6, w7, [x2, #-48]
zip1 v20.4s, v4.4s, v5.4s
+ ldp w8, w9, [x2, #-40]
+ eor a4, a4, w6
zip2 v21.4s, v4.4s, v5.4s
+ eor a5, a5, w7
zip1 v22.4s, v6.4s, v7.4s
+ eor a6, a6, w8
zip2 v23.4s, v6.4s, v7.4s
+ eor a7, a7, w9
+ ldp w6, w7, [x2, #-32]
zip1 v24.4s, v8.4s, v9.4s
+ ldp w8, w9, [x2, #-24]
+ eor a8, a8, w6
zip2 v25.4s, v8.4s, v9.4s
+ eor a9, a9, w7
zip1 v26.4s, v10.4s, v11.4s
+ eor a10, a10, w8
zip2 v27.4s, v10.4s, v11.4s
+ eor a11, a11, w9
+ ldp w6, w7, [x2, #-16]
zip1 v28.4s, v12.4s, v13.4s
+ ldp w8, w9, [x2, #-8]
+ eor a12, a12, w6
zip2 v29.4s, v12.4s, v13.4s
+ eor a13, a13, w7
zip1 v30.4s, v14.4s, v15.4s
+ eor a14, a14, w8
zip2 v31.4s, v14.4s, v15.4s
+ eor a15, a15, w9
+
+ mov x3, #64
+ subs x5, x4, #128
+ add x6, x5, x2
+ csel x3, x3, xzr, ge
+ csel x2, x2, x6, ge
// interleave 64-bit words in state n, n+2
zip1 v0.2d, v16.2d, v18.2d
zip2 v4.2d, v16.2d, v18.2d
+ stp a0, a1, [x1], #64
zip1 v8.2d, v17.2d, v19.2d
zip2 v12.2d, v17.2d, v19.2d
- ld1 {v16.16b-v19.16b}, [x2], #64
+ stp a2, a3, [x1, #-56]
+ ld1 {v16.16b-v19.16b}, [x2], x3
+
+ subs x6, x4, #192
+ ccmp x3, xzr, #4, lt
+ add x7, x6, x2
+ csel x3, x3, xzr, eq
+ csel x2, x2, x7, eq
zip1 v1.2d, v20.2d, v22.2d
zip2 v5.2d, v20.2d, v22.2d
+ stp a4, a5, [x1, #-48]
zip1 v9.2d, v21.2d, v23.2d
zip2 v13.2d, v21.2d, v23.2d
- ld1 {v20.16b-v23.16b}, [x2], #64
+ stp a6, a7, [x1, #-40]
+ ld1 {v20.16b-v23.16b}, [x2], x3
+
+ subs x7, x4, #256
+ ccmp x3, xzr, #4, lt
+ add x8, x7, x2
+ csel x3, x3, xzr, eq
+ csel x2, x2, x8, eq
zip1 v2.2d, v24.2d, v26.2d
zip2 v6.2d, v24.2d, v26.2d
+ stp a8, a9, [x1, #-32]
zip1 v10.2d, v25.2d, v27.2d
zip2 v14.2d, v25.2d, v27.2d
- ld1 {v24.16b-v27.16b}, [x2], #64
+ stp a10, a11, [x1, #-24]
+ ld1 {v24.16b-v27.16b}, [x2], x3
+
+ subs x8, x4, #320
+ ccmp x3, xzr, #4, lt
+ add x9, x8, x2
+ csel x2, x2, x9, eq
zip1 v3.2d, v28.2d, v30.2d
zip2 v7.2d, v28.2d, v30.2d
+ stp a12, a13, [x1, #-16]
zip1 v11.2d, v29.2d, v31.2d
zip2 v15.2d, v29.2d, v31.2d
+ stp a14, a15, [x1, #-8]
ld1 {v28.16b-v31.16b}, [x2]
// xor with corresponding input, write to output
+ tbnz x5, #63, 0f
eor v16.16b, v16.16b, v0.16b
eor v17.16b, v17.16b, v1.16b
eor v18.16b, v18.16b, v2.16b
eor v19.16b, v19.16b, v3.16b
+ st1 {v16.16b-v19.16b}, [x1], #64
+ cbz x5, .Lout
+
+ tbnz x6, #63, 1f
eor v20.16b, v20.16b, v4.16b
eor v21.16b, v21.16b, v5.16b
- st1 {v16.16b-v19.16b}, [x1], #64
eor v22.16b, v22.16b, v6.16b
eor v23.16b, v23.16b, v7.16b
+ st1 {v20.16b-v23.16b}, [x1], #64
+ cbz x6, .Lout
+
+ tbnz x7, #63, 2f
eor v24.16b, v24.16b, v8.16b
eor v25.16b, v25.16b, v9.16b
- st1 {v20.16b-v23.16b}, [x1], #64
eor v26.16b, v26.16b, v10.16b
eor v27.16b, v27.16b, v11.16b
- eor v28.16b, v28.16b, v12.16b
st1 {v24.16b-v27.16b}, [x1], #64
+ cbz x7, .Lout
+
+ tbnz x8, #63, 3f
+ eor v28.16b, v28.16b, v12.16b
eor v29.16b, v29.16b, v13.16b
eor v30.16b, v30.16b, v14.16b
eor v31.16b, v31.16b, v15.16b
st1 {v28.16b-v31.16b}, [x1]
+.Lout: frame_pop
ret
-ENDPROC(chacha20_4block_xor_neon)
-CTRINC: .word 0, 1, 2, 3
+ // fewer than 128 bytes of in/output
+0: ld1 {v8.16b}, [x10]
+ ld1 {v9.16b}, [x11]
+ movi v10.16b, #16
+ sub x2, x1, #64
+ add x1, x1, x5
+ ld1 {v16.16b-v19.16b}, [x2]
+ tbl v4.16b, {v0.16b-v3.16b}, v8.16b
+ tbx v20.16b, {v16.16b-v19.16b}, v9.16b
+ add v8.16b, v8.16b, v10.16b
+ add v9.16b, v9.16b, v10.16b
+ tbl v5.16b, {v0.16b-v3.16b}, v8.16b
+ tbx v21.16b, {v16.16b-v19.16b}, v9.16b
+ add v8.16b, v8.16b, v10.16b
+ add v9.16b, v9.16b, v10.16b
+ tbl v6.16b, {v0.16b-v3.16b}, v8.16b
+ tbx v22.16b, {v16.16b-v19.16b}, v9.16b
+ add v8.16b, v8.16b, v10.16b
+ add v9.16b, v9.16b, v10.16b
+ tbl v7.16b, {v0.16b-v3.16b}, v8.16b
+ tbx v23.16b, {v16.16b-v19.16b}, v9.16b
+
+ eor v20.16b, v20.16b, v4.16b
+ eor v21.16b, v21.16b, v5.16b
+ eor v22.16b, v22.16b, v6.16b
+ eor v23.16b, v23.16b, v7.16b
+ st1 {v20.16b-v23.16b}, [x1]
+ b .Lout
+
+ // fewer than 192 bytes of in/output
+1: ld1 {v8.16b}, [x10]
+ ld1 {v9.16b}, [x11]
+ movi v10.16b, #16
+ add x1, x1, x6
+ tbl v0.16b, {v4.16b-v7.16b}, v8.16b
+ tbx v20.16b, {v16.16b-v19.16b}, v9.16b
+ add v8.16b, v8.16b, v10.16b
+ add v9.16b, v9.16b, v10.16b
+ tbl v1.16b, {v4.16b-v7.16b}, v8.16b
+ tbx v21.16b, {v16.16b-v19.16b}, v9.16b
+ add v8.16b, v8.16b, v10.16b
+ add v9.16b, v9.16b, v10.16b
+ tbl v2.16b, {v4.16b-v7.16b}, v8.16b
+ tbx v22.16b, {v16.16b-v19.16b}, v9.16b
+ add v8.16b, v8.16b, v10.16b
+ add v9.16b, v9.16b, v10.16b
+ tbl v3.16b, {v4.16b-v7.16b}, v8.16b
+ tbx v23.16b, {v16.16b-v19.16b}, v9.16b
+
+ eor v20.16b, v20.16b, v0.16b
+ eor v21.16b, v21.16b, v1.16b
+ eor v22.16b, v22.16b, v2.16b
+ eor v23.16b, v23.16b, v3.16b
+ st1 {v20.16b-v23.16b}, [x1]
+ b .Lout
+
+ // fewer than 256 bytes of in/output
+2: ld1 {v4.16b}, [x10]
+ ld1 {v5.16b}, [x11]
+ movi v6.16b, #16
+ add x1, x1, x7
+ tbl v0.16b, {v8.16b-v11.16b}, v4.16b
+ tbx v24.16b, {v20.16b-v23.16b}, v5.16b
+ add v4.16b, v4.16b, v6.16b
+ add v5.16b, v5.16b, v6.16b
+ tbl v1.16b, {v8.16b-v11.16b}, v4.16b
+ tbx v25.16b, {v20.16b-v23.16b}, v5.16b
+ add v4.16b, v4.16b, v6.16b
+ add v5.16b, v5.16b, v6.16b
+ tbl v2.16b, {v8.16b-v11.16b}, v4.16b
+ tbx v26.16b, {v20.16b-v23.16b}, v5.16b
+ add v4.16b, v4.16b, v6.16b
+ add v5.16b, v5.16b, v6.16b
+ tbl v3.16b, {v8.16b-v11.16b}, v4.16b
+ tbx v27.16b, {v20.16b-v23.16b}, v5.16b
+
+ eor v24.16b, v24.16b, v0.16b
+ eor v25.16b, v25.16b, v1.16b
+ eor v26.16b, v26.16b, v2.16b
+ eor v27.16b, v27.16b, v3.16b
+ st1 {v24.16b-v27.16b}, [x1]
+ b .Lout
+
+ // fewer than 320 bytes of in/output
+3: ld1 {v4.16b}, [x10]
+ ld1 {v5.16b}, [x11]
+ movi v6.16b, #16
+ add x1, x1, x8
+ tbl v0.16b, {v12.16b-v15.16b}, v4.16b
+ tbx v28.16b, {v24.16b-v27.16b}, v5.16b
+ add v4.16b, v4.16b, v6.16b
+ add v5.16b, v5.16b, v6.16b
+ tbl v1.16b, {v12.16b-v15.16b}, v4.16b
+ tbx v29.16b, {v24.16b-v27.16b}, v5.16b
+ add v4.16b, v4.16b, v6.16b
+ add v5.16b, v5.16b, v6.16b
+ tbl v2.16b, {v12.16b-v15.16b}, v4.16b
+ tbx v30.16b, {v24.16b-v27.16b}, v5.16b
+ add v4.16b, v4.16b, v6.16b
+ add v5.16b, v5.16b, v6.16b
+ tbl v3.16b, {v12.16b-v15.16b}, v4.16b
+ tbx v31.16b, {v24.16b-v27.16b}, v5.16b
+
+ eor v28.16b, v28.16b, v0.16b
+ eor v29.16b, v29.16b, v1.16b
+ eor v30.16b, v30.16b, v2.16b
+ eor v31.16b, v31.16b, v3.16b
+ st1 {v28.16b-v31.16b}, [x1]
+ b .Lout
+ENDPROC(chacha_4block_xor_neon)
+
+ .section ".rodata", "a", %progbits
+ .align L1_CACHE_SHIFT
+.Lpermute:
+ .set .Li, 0
+ .rept 192
+ .byte (.Li - 64)
+ .set .Li, .Li + 1
+ .endr
+
+CTRINC: .word 1, 2, 3, 4
ROT8: .word 0x02010003, 0x06050407, 0x0a09080b, 0x0e0d0c0f
diff --git a/arch/arm64/crypto/chacha-neon-glue.c b/arch/arm64/crypto/chacha-neon-glue.c
new file mode 100644
index 000000000000..bece1d85bd81
--- /dev/null
+++ b/arch/arm64/crypto/chacha-neon-glue.c
@@ -0,0 +1,198 @@
+/*
+ * ARM NEON accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on:
+ * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+
+asmlinkage void chacha_block_xor_neon(u32 *state, u8 *dst, const u8 *src,
+ int nrounds);
+asmlinkage void chacha_4block_xor_neon(u32 *state, u8 *dst, const u8 *src,
+ int nrounds, int bytes);
+asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds);
+
+static void chacha_doneon(u32 *state, u8 *dst, const u8 *src,
+ int bytes, int nrounds)
+{
+ while (bytes > 0) {
+ int l = min(bytes, CHACHA_BLOCK_SIZE * 5);
+
+ if (l <= CHACHA_BLOCK_SIZE) {
+ u8 buf[CHACHA_BLOCK_SIZE];
+
+ memcpy(buf, src, l);
+ chacha_block_xor_neon(state, buf, buf, nrounds);
+ memcpy(dst, buf, l);
+ state[12] += 1;
+ break;
+ }
+ chacha_4block_xor_neon(state, dst, src, nrounds, l);
+ bytes -= CHACHA_BLOCK_SIZE * 5;
+ src += CHACHA_BLOCK_SIZE * 5;
+ dst += CHACHA_BLOCK_SIZE * 5;
+ state[12] += 5;
+ }
+}
+
+static int chacha_neon_stream_xor(struct skcipher_request *req,
+ struct chacha_ctx *ctx, u8 *iv)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ crypto_chacha_init(state, ctx, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = rounddown(nbytes, walk.stride);
+
+ kernel_neon_begin();
+ chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, ctx->nrounds);
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int chacha_neon(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ return crypto_chacha_crypt(req);
+
+ return chacha_neon_stream_xor(req, ctx, req->iv);
+}
+
+static int xchacha_neon(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd())
+ return crypto_xchacha_crypt(req);
+
+ crypto_chacha_init(state, ctx, req->iv);
+
+ kernel_neon_begin();
+ hchacha_block_neon(state, subctx.key, ctx->nrounds);
+ kernel_neon_end();
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ return chacha_neon_stream_xor(req, &subctx, real_iv);
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 5 * CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = chacha_neon,
+ .decrypt = chacha_neon,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 5 * CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = xchacha_neon,
+ .decrypt = xchacha_neon,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-neon",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .walksize = 5 * CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha12_setkey,
+ .encrypt = xchacha_neon,
+ .decrypt = xchacha_neon,
+ }
+};
+
+static int __init chacha_simd_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_ASIMD))
+ return -ENODEV;
+
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_simd_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_init(chacha_simd_mod_init);
+module_exit(chacha_simd_mod_fini);
+
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-neon");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-neon");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-neon");
diff --git a/arch/arm64/crypto/chacha20-neon-glue.c b/arch/arm64/crypto/chacha20-neon-glue.c
deleted file mode 100644
index 727579c93ded..000000000000
--- a/arch/arm64/crypto/chacha20-neon-glue.c
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, arm64 NEON functions
- *
- * Copyright (C) 2016 - 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on:
- * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-
-asmlinkage void chacha20_block_xor_neon(u32 *state, u8 *dst, const u8 *src);
-asmlinkage void chacha20_4block_xor_neon(u32 *state, u8 *dst, const u8 *src);
-
-static void chacha20_doneon(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
-{
- u8 buf[CHACHA20_BLOCK_SIZE];
-
- while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
- kernel_neon_begin();
- chacha20_4block_xor_neon(state, dst, src);
- kernel_neon_end();
- bytes -= CHACHA20_BLOCK_SIZE * 4;
- src += CHACHA20_BLOCK_SIZE * 4;
- dst += CHACHA20_BLOCK_SIZE * 4;
- state[12] += 4;
- }
-
- if (!bytes)
- return;
-
- kernel_neon_begin();
- while (bytes >= CHACHA20_BLOCK_SIZE) {
- chacha20_block_xor_neon(state, dst, src);
- bytes -= CHACHA20_BLOCK_SIZE;
- src += CHACHA20_BLOCK_SIZE;
- dst += CHACHA20_BLOCK_SIZE;
- state[12]++;
- }
- if (bytes) {
- memcpy(buf, src, bytes);
- chacha20_block_xor_neon(state, buf, buf);
- memcpy(dst, buf, bytes);
- }
- kernel_neon_end();
-}
-
-static int chacha20_neon(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- if (!may_use_simd() || req->cryptlen <= CHACHA20_BLOCK_SIZE)
- return crypto_chacha20_crypt(req);
-
- err = skcipher_walk_virt(&walk, req, false);
-
- crypto_chacha20_init(state, ctx, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- chacha20_doneon(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-
-static struct skcipher_alg alg = {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .chunksize = CHACHA20_BLOCK_SIZE,
- .walksize = 4 * CHACHA20_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = chacha20_neon,
- .decrypt = chacha20_neon,
-};
-
-static int __init chacha20_simd_mod_init(void)
-{
- if (!(elf_hwcap & HWCAP_ASIMD))
- return -ENODEV;
-
- return crypto_register_skcipher(&alg);
-}
-
-static void __exit chacha20_simd_mod_fini(void)
-{
- crypto_unregister_skcipher(&alg);
-}
-
-module_init(chacha20_simd_mod_init);
-module_exit(chacha20_simd_mod_fini);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS_CRYPTO("chacha20");
diff --git a/arch/arm64/crypto/nh-neon-core.S b/arch/arm64/crypto/nh-neon-core.S
new file mode 100644
index 000000000000..e05570c38de7
--- /dev/null
+++ b/arch/arm64/crypto/nh-neon-core.S
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NH - ε-almost-universal hash function, ARM64 NEON accelerated version
+ *
+ * Copyright 2018 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+ KEY .req x0
+ MESSAGE .req x1
+ MESSAGE_LEN .req x2
+ HASH .req x3
+
+ PASS0_SUMS .req v0
+ PASS1_SUMS .req v1
+ PASS2_SUMS .req v2
+ PASS3_SUMS .req v3
+ K0 .req v4
+ K1 .req v5
+ K2 .req v6
+ K3 .req v7
+ T0 .req v8
+ T1 .req v9
+ T2 .req v10
+ T3 .req v11
+ T4 .req v12
+ T5 .req v13
+ T6 .req v14
+ T7 .req v15
+
+.macro _nh_stride k0, k1, k2, k3
+
+ // Load next message stride
+ ld1 {T3.16b}, [MESSAGE], #16
+
+ // Load next key stride
+ ld1 {\k3\().4s}, [KEY], #16
+
+ // Add message words to key words
+ add T0.4s, T3.4s, \k0\().4s
+ add T1.4s, T3.4s, \k1\().4s
+ add T2.4s, T3.4s, \k2\().4s
+ add T3.4s, T3.4s, \k3\().4s
+
+ // Multiply 32x32 => 64 and accumulate
+ mov T4.d[0], T0.d[1]
+ mov T5.d[0], T1.d[1]
+ mov T6.d[0], T2.d[1]
+ mov T7.d[0], T3.d[1]
+ umlal PASS0_SUMS.2d, T0.2s, T4.2s
+ umlal PASS1_SUMS.2d, T1.2s, T5.2s
+ umlal PASS2_SUMS.2d, T2.2s, T6.2s
+ umlal PASS3_SUMS.2d, T3.2s, T7.2s
+.endm
+
+/*
+ * void nh_neon(const u32 *key, const u8 *message, size_t message_len,
+ * u8 hash[NH_HASH_BYTES])
+ *
+ * It's guaranteed that message_len % 16 == 0.
+ */
+ENTRY(nh_neon)
+
+ ld1 {K0.4s,K1.4s}, [KEY], #32
+ movi PASS0_SUMS.2d, #0
+ movi PASS1_SUMS.2d, #0
+ ld1 {K2.4s}, [KEY], #16
+ movi PASS2_SUMS.2d, #0
+ movi PASS3_SUMS.2d, #0
+
+ subs MESSAGE_LEN, MESSAGE_LEN, #64
+ blt .Lloop4_done
+.Lloop4:
+ _nh_stride K0, K1, K2, K3
+ _nh_stride K1, K2, K3, K0
+ _nh_stride K2, K3, K0, K1
+ _nh_stride K3, K0, K1, K2
+ subs MESSAGE_LEN, MESSAGE_LEN, #64
+ bge .Lloop4
+
+.Lloop4_done:
+ ands MESSAGE_LEN, MESSAGE_LEN, #63
+ beq .Ldone
+ _nh_stride K0, K1, K2, K3
+
+ subs MESSAGE_LEN, MESSAGE_LEN, #16
+ beq .Ldone
+ _nh_stride K1, K2, K3, K0
+
+ subs MESSAGE_LEN, MESSAGE_LEN, #16
+ beq .Ldone
+ _nh_stride K2, K3, K0, K1
+
+.Ldone:
+ // Sum the accumulators for each pass, then store the sums to 'hash'
+ addp T0.2d, PASS0_SUMS.2d, PASS1_SUMS.2d
+ addp T1.2d, PASS2_SUMS.2d, PASS3_SUMS.2d
+ st1 {T0.16b,T1.16b}, [HASH]
+ ret
+ENDPROC(nh_neon)
diff --git a/arch/arm64/crypto/nhpoly1305-neon-glue.c b/arch/arm64/crypto/nhpoly1305-neon-glue.c
new file mode 100644
index 000000000000..22cc32ac9448
--- /dev/null
+++ b/arch/arm64/crypto/nhpoly1305-neon-glue.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
+ * (ARM64 NEON accelerated version)
+ *
+ * Copyright 2018 Google LLC
+ */
+
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <crypto/internal/hash.h>
+#include <crypto/nhpoly1305.h>
+#include <linux/module.h>
+
+asmlinkage void nh_neon(const u32 *key, const u8 *message, size_t message_len,
+ u8 hash[NH_HASH_BYTES]);
+
+/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */
+static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES])
+{
+ nh_neon(key, message, message_len, (u8 *)hash);
+}
+
+static int nhpoly1305_neon_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ if (srclen < 64 || !may_use_simd())
+ return crypto_nhpoly1305_update(desc, src, srclen);
+
+ do {
+ unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+
+ kernel_neon_begin();
+ crypto_nhpoly1305_update_helper(desc, src, n, _nh_neon);
+ kernel_neon_end();
+ src += n;
+ srclen -= n;
+ } while (srclen);
+ return 0;
+}
+
+static struct shash_alg nhpoly1305_alg = {
+ .base.cra_name = "nhpoly1305",
+ .base.cra_driver_name = "nhpoly1305-neon",
+ .base.cra_priority = 200,
+ .base.cra_ctxsize = sizeof(struct nhpoly1305_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = crypto_nhpoly1305_init,
+ .update = nhpoly1305_neon_update,
+ .final = crypto_nhpoly1305_final,
+ .setkey = crypto_nhpoly1305_setkey,
+ .descsize = sizeof(struct nhpoly1305_state),
+};
+
+static int __init nhpoly1305_mod_init(void)
+{
+ if (!(elf_hwcap & HWCAP_ASIMD))
+ return -ENODEV;
+
+ return crypto_register_shash(&nhpoly1305_alg);
+}
+
+static void __exit nhpoly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&nhpoly1305_alg);
+}
+
+module_init(nhpoly1305_mod_init);
+module_exit(nhpoly1305_mod_exit);
+
+MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (NEON-accelerated)");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("nhpoly1305");
+MODULE_ALIAS_CRYPTO("nhpoly1305-neon");
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 6cd5d77b6b44..1e17ea5c372b 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -14,7 +14,6 @@ generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
generic-y += msi.h
-generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += rwsem.h
@@ -27,4 +26,3 @@ generic-y += trace_clock.h
generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
-generic-y += xor.h
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 709208dfdc8b..2def77ec14be 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -22,12 +22,23 @@
#include <asm/tlbflush.h>
/* Macros for consistency checks of the GICC subtable of MADT */
-#define ACPI_MADT_GICC_LENGTH \
- (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
+
+/*
+ * MADT GICC minimum length refers to the MADT GICC structure table length as
+ * defined in the earliest ACPI version supported on arm64, ie ACPI 5.1.
+ *
+ * The efficiency_class member was added to the
+ * struct acpi_madt_generic_interrupt to represent the MADT GICC structure
+ * "Processor Power Efficiency Class" field, added in ACPI 6.0 whose offset
+ * is therefore used to delimit the MADT GICC structure minimum length
+ * appropriately.
+ */
+#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \
+ struct acpi_madt_generic_interrupt, efficiency_class)
#define BAD_MADT_GICC_ENTRY(entry, end) \
- (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
- (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
+ (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \
+ (unsigned long)(entry) + (entry)->header.length > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..2173ad32d550
--- /dev/null
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PROTOTYPES_H
+#define __ASM_PROTOTYPES_H
+/*
+ * CONFIG_MODEVERIONS requires a C declaration to generate the appropriate CRC
+ * for each symbol. Since commit:
+ *
+ * 4efca4ed05cbdfd1 ("kbuild: modversions for EXPORT_SYMBOL() for asm")
+ *
+ * ... kbuild will automatically pick these up from <asm/asm-prototypes.h> and
+ * feed this to genksyms when building assembly files.
+ */
+#include <linux/arm-smccc.h>
+
+#include <asm/ftrace.h>
+#include <asm/page.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+
+#include <asm-generic/asm-prototypes.h>
+
+long long __ashlti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __lshrti3(long long a, int b);
+
+#endif /* __ASM_PROTOTYPES_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 6142402c2eb4..4feb6119c3c9 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -23,6 +23,8 @@
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
+#include <asm-generic/export.h>
+
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
@@ -123,6 +125,19 @@
.endm
/*
+ * Speculation barrier
+ */
+ .macro sb
+alternative_if_not ARM64_HAS_SB
+ dsb nsh
+ isb
+alternative_else
+ SB_BARRIER_INSN
+ nop
+alternative_endif
+ .endm
+
+/*
* Sanitise a 64-bit bounded index wrt speculation, returning zero if out
* of bounds.
*/
@@ -342,11 +357,10 @@ alternative_endif
.endm
/*
- * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
+ * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
*/
- .macro tcr_set_idmap_t0sz, valreg, tmpreg
- ldr_l \tmpreg, idmap_t0sz
- bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ .macro tcr_set_t0sz, valreg, t0sz
+ bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
.endm
/*
@@ -377,27 +391,33 @@ alternative_endif
* size: size of the region
* Corrupts: kaddr, size, tmp1, tmp2
*/
+ .macro __dcache_op_workaround_clean_cache, op, kaddr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc \op, \kaddr
+alternative_else
+ dc civac, \kaddr
+alternative_endif
+ .endm
+
.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
dcache_line_size \tmp1, \tmp2
add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
bic \kaddr, \kaddr, \tmp2
9998:
- .if (\op == cvau || \op == cvac)
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
- dc \op, \kaddr
-alternative_else
- dc civac, \kaddr
-alternative_endif
- .elseif (\op == cvap)
-alternative_if ARM64_HAS_DCPOP
- sys 3, c7, c12, 1, \kaddr // dc cvap
-alternative_else
- dc cvac, \kaddr
-alternative_endif
+ .ifc \op, cvau
+ __dcache_op_workaround_clean_cache \op, \kaddr
+ .else
+ .ifc \op, cvac
+ __dcache_op_workaround_clean_cache \op, \kaddr
+ .else
+ .ifc \op, cvap
+ sys 3, c7, c12, 1, \kaddr // dc cvap
.else
dc \op, \kaddr
.endif
+ .endif
+ .endif
add \kaddr, \kaddr, \tmp1
cmp \kaddr, \size
b.lo 9998b
@@ -477,6 +497,13 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
#else
#define NOKPROBE(x)
#endif
+
+#ifdef CONFIG_KASAN
+#define EXPORT_SYMBOL_NOKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
+#endif
+
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
@@ -516,6 +543,29 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
+ * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
+ * orr is used as it can cover the immediate value (and is idempotent).
+ * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
+ * ttbr: Value of ttbr to set, modified.
+ */
+ .macro offset_ttbr1, ttbr
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+#endif
+ .endm
+
+/*
+ * Perform the reverse of offset_ttbr1.
+ * bic is used as it can cover the immediate value and, in future, won't need
+ * to be nop'ed out when dealing with 52-bit kernel VAs.
+ */
+ .macro restore_ttbr1, ttbr
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
+#endif
+ .endm
+
+/*
* Arrange a physical address in a TTBR register, taking care of 52-bit
* addresses.
*
@@ -672,11 +722,9 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT
get_thread_info x0
- ldr w1, [x0, #TSK_TI_PREEMPT]
- ldr x0, [x0, #TSK_TI_FLAGS]
- cmp w1, #PREEMPT_DISABLE_OFFSET
- csel x0, x0, xzr, eq
- tbnz x0, #TIF_NEED_RESCHED, .Lyield_\@ // needs rescheduling?
+ ldr x0, [x0, #TSK_TI_PREEMPT]
+ sub x0, x0, #PREEMPT_DISABLE_OFFSET
+ cbz x0, .Lyield_\@
/* fall through to endif_yield_neon */
.subsection 1
.Lyield_\@ :
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index f5a2d09afb38..af7b99005453 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -248,48 +248,57 @@ __LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
}
__LL_SC_EXPORT(atomic64_dec_if_positive);
-#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl) \
-__LL_SC_INLINE unsigned long \
-__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
- unsigned long old, \
- unsigned long new)) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
+__LL_SC_INLINE u##sz \
+__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
+ unsigned long old, \
+ u##sz new)) \
{ \
- unsigned long tmp, oldval; \
+ unsigned long tmp; \
+ u##sz oldval; \
+ \
+ /* \
+ * Sub-word sizes require explicit casting so that the compare \
+ * part of the cmpxchg doesn't end up interpreting non-zero \
+ * upper bits of the register containing "old". \
+ */ \
+ if (sz < 32) \
+ old = (u##sz)old; \
\
asm volatile( \
" prfm pstl1strm, %[v]\n" \
- "1: ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n" \
+ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
" cbnz %" #w "[tmp], 2f\n" \
- " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
" cbnz %w[tmp], 1b\n" \
" " #mb "\n" \
"2:" \
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
- [v] "+Q" (*(unsigned long *)ptr) \
- : [old] "Lr" (old), [new] "r" (new) \
+ [v] "+Q" (*(u##sz *)ptr) \
+ : [old] "Kr" (old), [new] "r" (new) \
: cl); \
\
return oldval; \
} \
-__LL_SC_EXPORT(__cmpxchg_case_##name);
+__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
-__CMPXCHG_CASE(w, b, 1, , , , )
-__CMPXCHG_CASE(w, h, 2, , , , )
-__CMPXCHG_CASE(w, , 4, , , , )
-__CMPXCHG_CASE( , , 8, , , , )
-__CMPXCHG_CASE(w, b, acq_1, , a, , "memory")
-__CMPXCHG_CASE(w, h, acq_2, , a, , "memory")
-__CMPXCHG_CASE(w, , acq_4, , a, , "memory")
-__CMPXCHG_CASE( , , acq_8, , a, , "memory")
-__CMPXCHG_CASE(w, b, rel_1, , , l, "memory")
-__CMPXCHG_CASE(w, h, rel_2, , , l, "memory")
-__CMPXCHG_CASE(w, , rel_4, , , l, "memory")
-__CMPXCHG_CASE( , , rel_8, , , l, "memory")
-__CMPXCHG_CASE(w, b, mb_1, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, h, mb_2, dmb ish, , l, "memory")
-__CMPXCHG_CASE(w, , mb_4, dmb ish, , l, "memory")
-__CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
+__CMPXCHG_CASE(w, b, , 8, , , , )
+__CMPXCHG_CASE(w, h, , 16, , , , )
+__CMPXCHG_CASE(w, , , 32, , , , )
+__CMPXCHG_CASE( , , , 64, , , , )
+__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
+__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
+__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
+__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
+__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
+__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
+__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
+__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
+__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
+__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
+__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
+__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
#undef __CMPXCHG_CASE
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index f9b0b09153e0..a424355240c5 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -446,22 +446,22 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
-#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
-static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
- unsigned long old, \
- unsigned long new) \
+#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
+static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
+ u##sz old, \
+ u##sz new) \
{ \
register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
- register unsigned long x1 asm ("x1") = old; \
- register unsigned long x2 asm ("x2") = new; \
+ register u##sz x1 asm ("x1") = old; \
+ register u##sz x2 asm ("x2") = new; \
\
asm volatile(ARM64_LSE_ATOMIC_INSN( \
/* LL/SC */ \
- __LL_SC_CMPXCHG(name) \
+ __LL_SC_CMPXCHG(name##sz) \
__nops(2), \
/* LSE atomics */ \
" mov " #w "30, %" #w "[old]\n" \
- " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
+ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \
" mov %" #w "[ret], " #w "30") \
: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
: [old] "r" (x1), [new] "r" (x2) \
@@ -470,22 +470,22 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
return x0; \
}
-__CMPXCHG_CASE(w, b, 1, )
-__CMPXCHG_CASE(w, h, 2, )
-__CMPXCHG_CASE(w, , 4, )
-__CMPXCHG_CASE(x, , 8, )
-__CMPXCHG_CASE(w, b, acq_1, a, "memory")
-__CMPXCHG_CASE(w, h, acq_2, a, "memory")
-__CMPXCHG_CASE(w, , acq_4, a, "memory")
-__CMPXCHG_CASE(x, , acq_8, a, "memory")
-__CMPXCHG_CASE(w, b, rel_1, l, "memory")
-__CMPXCHG_CASE(w, h, rel_2, l, "memory")
-__CMPXCHG_CASE(w, , rel_4, l, "memory")
-__CMPXCHG_CASE(x, , rel_8, l, "memory")
-__CMPXCHG_CASE(w, b, mb_1, al, "memory")
-__CMPXCHG_CASE(w, h, mb_2, al, "memory")
-__CMPXCHG_CASE(w, , mb_4, al, "memory")
-__CMPXCHG_CASE(x, , mb_8, al, "memory")
+__CMPXCHG_CASE(w, b, , 8, )
+__CMPXCHG_CASE(w, h, , 16, )
+__CMPXCHG_CASE(w, , , 32, )
+__CMPXCHG_CASE(x, , , 64, )
+__CMPXCHG_CASE(w, b, acq_, 8, a, "memory")
+__CMPXCHG_CASE(w, h, acq_, 16, a, "memory")
+__CMPXCHG_CASE(w, , acq_, 32, a, "memory")
+__CMPXCHG_CASE(x, , acq_, 64, a, "memory")
+__CMPXCHG_CASE(w, b, rel_, 8, l, "memory")
+__CMPXCHG_CASE(w, h, rel_, 16, l, "memory")
+__CMPXCHG_CASE(w, , rel_, 32, l, "memory")
+__CMPXCHG_CASE(x, , rel_, 64, l, "memory")
+__CMPXCHG_CASE(w, b, mb_, 8, al, "memory")
+__CMPXCHG_CASE(w, h, mb_, 16, al, "memory")
+__CMPXCHG_CASE(w, , mb_, 32, al, "memory")
+__CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __LL_SC_CMPXCHG
#undef __CMPXCHG_CASE
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 822a9192c551..f66bb04fdf2d 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -34,6 +34,10 @@
#define psb_csync() asm volatile("hint #17" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
+#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
+ SB_BARRIER_INSN"nop\n", \
+ ARM64_HAS_SB))
+
#define mb() dsb(sy)
#define rmb() dsb(ld)
#define wmb() dsb(st)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 3b0938281541..3f9376f1c409 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -30,46 +30,46 @@
* barrier case is generated as release+dmb for the former and
* acquire+release for the latter.
*/
-#define __XCHG_CASE(w, sz, name, mb, nop_lse, acq, acq_lse, rel, cl) \
-static inline unsigned long __xchg_case_##name(unsigned long x, \
- volatile void *ptr) \
-{ \
- unsigned long ret, tmp; \
- \
- asm volatile(ARM64_LSE_ATOMIC_INSN( \
- /* LL/SC */ \
- " prfm pstl1strm, %2\n" \
- "1: ld" #acq "xr" #sz "\t%" #w "0, %2\n" \
- " st" #rel "xr" #sz "\t%w1, %" #w "3, %2\n" \
- " cbnz %w1, 1b\n" \
- " " #mb, \
- /* LSE atomics */ \
- " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
- __nops(3) \
- " " #nop_lse) \
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
- : "r" (x) \
- : cl); \
- \
- return ret; \
+#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
+static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
+{ \
+ u##sz ret; \
+ unsigned long tmp; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " prfm pstl1strm, %2\n" \
+ "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
+ " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
+ " cbnz %w1, 1b\n" \
+ " " #mb, \
+ /* LSE atomics */ \
+ " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
+ __nops(3) \
+ " " #nop_lse) \
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
+ : "r" (x) \
+ : cl); \
+ \
+ return ret; \
}
-__XCHG_CASE(w, b, 1, , , , , , )
-__XCHG_CASE(w, h, 2, , , , , , )
-__XCHG_CASE(w, , 4, , , , , , )
-__XCHG_CASE( , , 8, , , , , , )
-__XCHG_CASE(w, b, acq_1, , , a, a, , "memory")
-__XCHG_CASE(w, h, acq_2, , , a, a, , "memory")
-__XCHG_CASE(w, , acq_4, , , a, a, , "memory")
-__XCHG_CASE( , , acq_8, , , a, a, , "memory")
-__XCHG_CASE(w, b, rel_1, , , , , l, "memory")
-__XCHG_CASE(w, h, rel_2, , , , , l, "memory")
-__XCHG_CASE(w, , rel_4, , , , , l, "memory")
-__XCHG_CASE( , , rel_8, , , , , l, "memory")
-__XCHG_CASE(w, b, mb_1, dmb ish, nop, , a, l, "memory")
-__XCHG_CASE(w, h, mb_2, dmb ish, nop, , a, l, "memory")
-__XCHG_CASE(w, , mb_4, dmb ish, nop, , a, l, "memory")
-__XCHG_CASE( , , mb_8, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE(w, b, , 8, , , , , , )
+__XCHG_CASE(w, h, , 16, , , , , , )
+__XCHG_CASE(w, , , 32, , , , , , )
+__XCHG_CASE( , , , 64, , , , , , )
+__XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
+__XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
+__XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
+__XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
+__XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
+__XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
+__XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
+__XCHG_CASE( , , rel_, 64, , , , , l, "memory")
+__XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
+__XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
#undef __XCHG_CASE
@@ -80,13 +80,13 @@ static inline unsigned long __xchg##sfx(unsigned long x, \
{ \
switch (size) { \
case 1: \
- return __xchg_case##sfx##_1(x, ptr); \
+ return __xchg_case##sfx##_8(x, ptr); \
case 2: \
- return __xchg_case##sfx##_2(x, ptr); \
+ return __xchg_case##sfx##_16(x, ptr); \
case 4: \
- return __xchg_case##sfx##_4(x, ptr); \
+ return __xchg_case##sfx##_32(x, ptr); \
case 8: \
- return __xchg_case##sfx##_8(x, ptr); \
+ return __xchg_case##sfx##_64(x, ptr); \
default: \
BUILD_BUG(); \
} \
@@ -123,13 +123,13 @@ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
{ \
switch (size) { \
case 1: \
- return __cmpxchg_case##sfx##_1(ptr, (u8)old, new); \
+ return __cmpxchg_case##sfx##_8(ptr, old, new); \
case 2: \
- return __cmpxchg_case##sfx##_2(ptr, (u16)old, new); \
+ return __cmpxchg_case##sfx##_16(ptr, old, new); \
case 4: \
- return __cmpxchg_case##sfx##_4(ptr, old, new); \
+ return __cmpxchg_case##sfx##_32(ptr, old, new); \
case 8: \
- return __cmpxchg_case##sfx##_8(ptr, old, new); \
+ return __cmpxchg_case##sfx##_64(ptr, old, new); \
default: \
BUILD_BUG(); \
} \
@@ -197,16 +197,16 @@ __CMPXCHG_GEN(_mb)
__ret; \
})
-#define __CMPWAIT_CASE(w, sz, name) \
-static inline void __cmpwait_case_##name(volatile void *ptr, \
- unsigned long val) \
+#define __CMPWAIT_CASE(w, sfx, sz) \
+static inline void __cmpwait_case_##sz(volatile void *ptr, \
+ unsigned long val) \
{ \
unsigned long tmp; \
\
asm volatile( \
" sevl\n" \
" wfe\n" \
- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \
+ " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
" eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \
@@ -215,10 +215,10 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \
: [val] "r" (val)); \
}
-__CMPWAIT_CASE(w, b, 1);
-__CMPWAIT_CASE(w, h, 2);
-__CMPWAIT_CASE(w, , 4);
-__CMPWAIT_CASE( , , 8);
+__CMPWAIT_CASE(w, b, 8);
+__CMPWAIT_CASE(w, h, 16);
+__CMPWAIT_CASE(w, , 32);
+__CMPWAIT_CASE( , , 64);
#undef __CMPWAIT_CASE
@@ -229,13 +229,13 @@ static inline void __cmpwait##sfx(volatile void *ptr, \
{ \
switch (size) { \
case 1: \
- return __cmpwait_case##sfx##_1(ptr, (u8)val); \
+ return __cmpwait_case##sfx##_8(ptr, (u8)val); \
case 2: \
- return __cmpwait_case##sfx##_2(ptr, (u16)val); \
+ return __cmpwait_case##sfx##_16(ptr, (u16)val); \
case 4: \
- return __cmpwait_case##sfx##_4(ptr, val); \
+ return __cmpwait_case##sfx##_32(ptr, val); \
case 8: \
- return __cmpwait_case##sfx##_8(ptr, val); \
+ return __cmpwait_case##sfx##_64(ptr, val); \
default: \
BUILD_BUG(); \
} \
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index 6e2d254c09eb..82e9099834ae 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -54,7 +54,13 @@
#define ARM64_HAS_CRC32 33
#define ARM64_SSBS 34
#define ARM64_WORKAROUND_1188873 35
+#define ARM64_HAS_SB 36
+#define ARM64_WORKAROUND_1165522 37
+#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
+#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
+#define ARM64_HAS_GENERIC_AUTH_ARCH 40
+#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
-#define ARM64_NCAPS 36
+#define ARM64_NCAPS 42
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 7e2ec64aa414..dfcfba725d72 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -321,19 +321,20 @@ struct arm64_cpu_capabilities {
bool sign;
unsigned long hwcap;
};
- /*
- * A list of "matches/cpu_enable" pair for the same
- * "capability" of the same "type" as described by the parent.
- * Only matches(), cpu_enable() and fields relevant to these
- * methods are significant in the list. The cpu_enable is
- * invoked only if the corresponding entry "matches()".
- * However, if a cpu_enable() method is associated
- * with multiple matches(), care should be taken that either
- * the match criteria are mutually exclusive, or that the
- * method is robust against being called multiple times.
- */
- const struct arm64_cpu_capabilities *match_list;
};
+
+ /*
+ * An optional list of "matches/cpu_enable" pair for the same
+ * "capability" of the same "type" as described by the parent.
+ * Only matches(), cpu_enable() and fields relevant to these
+ * methods are significant in the list. The cpu_enable is
+ * invoked only if the corresponding entry "matches()".
+ * However, if a cpu_enable() method is associated
+ * with multiple matches(), care should be taken that either
+ * the match criteria are mutually exclusive, or that the
+ * method is robust against being called multiple times.
+ */
+ const struct arm64_cpu_capabilities *match_list;
};
static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
@@ -353,10 +354,46 @@ cpucap_late_cpu_permitted(const struct arm64_cpu_capabilities *cap)
return !!(cap->type & ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU);
}
+/*
+ * Generic helper for handling capabilties with multiple (match,enable) pairs
+ * of call backs, sharing the same capability bit.
+ * Iterate over each entry to see if at least one matches.
+ */
+static inline bool
+cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ const struct arm64_cpu_capabilities *caps;
+
+ for (caps = entry->match_list; caps->matches; caps++)
+ if (caps->matches(caps, scope))
+ return true;
+
+ return false;
+}
+
+/*
+ * Take appropriate action for all matching entries in the shared capability
+ * entry.
+ */
+static inline void
+cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
+{
+ const struct arm64_cpu_capabilities *caps;
+
+ for (caps = entry->match_list; caps->matches; caps++)
+ if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
+ caps->cpu_enable)
+ caps->cpu_enable(caps);
+}
+
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready;
+#define for_each_available_cap(cap) \
+ for_each_set_bit(cap, cpu_hwcaps, ARM64_NCAPS)
+
bool this_cpu_has_cap(unsigned int cap);
static inline bool cpu_have_feature(unsigned int num)
@@ -473,7 +510,6 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
-
u64 read_sanitised_ftr_reg(u32 id);
static inline bool cpu_supports_mixed_endian_el0(void)
@@ -486,11 +522,59 @@ static inline bool system_supports_32bit_el0(void)
return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
}
+static inline bool system_supports_4kb_granule(void)
+{
+ u64 mmfr0;
+ u32 val;
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_TGRAN4_SHIFT);
+
+ return val == ID_AA64MMFR0_TGRAN4_SUPPORTED;
+}
+
+static inline bool system_supports_64kb_granule(void)
+{
+ u64 mmfr0;
+ u32 val;
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_TGRAN64_SHIFT);
+
+ return val == ID_AA64MMFR0_TGRAN64_SUPPORTED;
+}
+
+static inline bool system_supports_16kb_granule(void)
+{
+ u64 mmfr0;
+ u32 val;
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_TGRAN16_SHIFT);
+
+ return val == ID_AA64MMFR0_TGRAN16_SUPPORTED;
+}
+
static inline bool system_supports_mixed_endian_el0(void)
{
return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
}
+static inline bool system_supports_mixed_endian(void)
+{
+ u64 mmfr0;
+ u32 val;
+
+ mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ val = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_BIGENDEL_SHIFT);
+
+ return val == 0x1;
+}
+
static inline bool system_supports_fpsimd(void)
{
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
@@ -514,6 +598,20 @@ static inline bool system_supports_cnp(void)
cpus_have_const_cap(ARM64_HAS_CNP);
}
+static inline bool system_supports_address_auth(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
+ (cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
+ cpus_have_const_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF));
+}
+
+static inline bool system_supports_generic_auth(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_PTR_AUTH) &&
+ (cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
+ cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF));
+}
+
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 12f93e4d2452..951ed1a4e5c9 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -151,6 +151,8 @@ struct midr_range {
.rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \
}
+#define MIDR_REV_RANGE(m, v, r_min, r_max) MIDR_RANGE(m, v, r_min, v, r_max)
+#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 433b9554c6a1..6adc1a90e7e6 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -117,7 +117,11 @@
* 64-bit, this is above 4GB to leave the entire 32-bit address
* space open for things that want to use the area for 32-bit pointers.
*/
+#ifdef CONFIG_ARM64_FORCE_52BIT
#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#else
+#define ELF_ET_DYN_BASE (2 * DEFAULT_MAP_WINDOW_64 / 3)
+#endif /* CONFIG_ARM64_FORCE_52BIT */
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 676de2ec1762..52233f00d53d 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -29,23 +29,24 @@
#define ESR_ELx_EC_CP14_MR (0x05)
#define ESR_ELx_EC_CP14_LS (0x06)
#define ESR_ELx_EC_FP_ASIMD (0x07)
-#define ESR_ELx_EC_CP10_ID (0x08)
-/* Unallocated EC: 0x09 - 0x0B */
+#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
+#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
+/* Unallocated EC: 0x0A - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C)
/* Unallocated EC: 0x0d */
#define ESR_ELx_EC_ILL (0x0E)
/* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11)
-#define ESR_ELx_EC_HVC32 (0x12)
-#define ESR_ELx_EC_SMC32 (0x13)
+#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
+#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
/* Unallocated EC: 0x14 */
#define ESR_ELx_EC_SVC64 (0x15)
-#define ESR_ELx_EC_HVC64 (0x16)
-#define ESR_ELx_EC_SMC64 (0x17)
+#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
+#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
#define ESR_ELx_EC_SYS64 (0x18)
#define ESR_ELx_EC_SVE (0x19)
/* Unallocated EC: 0x1A - 0x1E */
-#define ESR_ELx_EC_IMP_DEF (0x1f)
+#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
#define ESR_ELx_EC_PC_ALIGN (0x22)
@@ -68,7 +69,7 @@
/* Unallocated EC: 0x36 - 0x37 */
#define ESR_ELx_EC_BKPT32 (0x38)
/* Unallocated EC: 0x39 */
-#define ESR_ELx_EC_VECTOR32 (0x3A)
+#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
/* Unallocted EC: 0x3B */
#define ESR_ELx_EC_BRK64 (0x3C)
/* Unallocated EC: 0x3D - 0x3F */
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index caa955f10e19..15a6587e12f9 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -13,6 +13,7 @@
#include <asm/insn.h>
+#define HAVE_FUNCTION_GRAPH_FP_TEST
#define MCOUNT_ADDR ((unsigned long)_mcount)
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
@@ -56,6 +57,19 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
{
return is_compat_task();
}
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ /*
+ * Since all syscall functions have __arm64_ prefix, we must skip it.
+ * However, as we described above, we decided to ignore compat
+ * syscalls, so we don't care about __arm64_compat_ prefix here.
+ */
+ return !strcmp(sym + 8, name);
+}
#endif /* ifndef __ASSEMBLY__ */
#endif /* __ASM_FTRACE_H */
diff --git a/arch/arm64/include/asm/image.h b/arch/arm64/include/asm/image.h
new file mode 100644
index 000000000000..e2c27a2278e9
--- /dev/null
+++ b/arch/arm64/include/asm/image.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_IMAGE_H
+#define __ASM_IMAGE_H
+
+#define ARM64_IMAGE_MAGIC "ARM\x64"
+
+#define ARM64_IMAGE_FLAG_BE_SHIFT 0
+#define ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT (ARM64_IMAGE_FLAG_BE_SHIFT + 1)
+#define ARM64_IMAGE_FLAG_PHYS_BASE_SHIFT \
+ (ARM64_IMAGE_FLAG_PAGE_SIZE_SHIFT + 2)
+#define ARM64_IMAGE_FLAG_BE_MASK 0x1
+#define ARM64_IMAGE_FLAG_PAGE_SIZE_MASK 0x3
+#define ARM64_IMAGE_FLAG_PHYS_BASE_MASK 0x1
+
+#define ARM64_IMAGE_FLAG_LE 0
+#define ARM64_IMAGE_FLAG_BE 1
+#define ARM64_IMAGE_FLAG_PAGE_SIZE_4K 1
+#define ARM64_IMAGE_FLAG_PAGE_SIZE_16K 2
+#define ARM64_IMAGE_FLAG_PAGE_SIZE_64K 3
+#define ARM64_IMAGE_FLAG_PHYS_BASE 1
+
+#ifndef __ASSEMBLY__
+
+#define arm64_image_flag_field(flags, field) \
+ (((flags) >> field##_SHIFT) & field##_MASK)
+
+/*
+ * struct arm64_image_header - arm64 kernel image header
+ * See Documentation/arm64/booting.txt for details
+ *
+ * @code0: Executable code, or
+ * @mz_header alternatively used for part of MZ header
+ * @code1: Executable code
+ * @text_offset: Image load offset
+ * @image_size: Effective Image size
+ * @flags: kernel flags
+ * @reserved: reserved
+ * @magic: Magic number
+ * @reserved5: reserved, or
+ * @pe_header: alternatively used for PE COFF offset
+ */
+
+struct arm64_image_header {
+ __le32 code0;
+ __le32 code1;
+ __le64 text_offset;
+ __le64 image_size;
+ __le64 flags;
+ __le64 res2;
+ __le64 res3;
+ __le64 res4;
+ __le32 magic;
+ __le32 res5;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_IMAGE_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index c6802dea6cab..9c01f04db64d 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -261,6 +261,11 @@ enum aarch64_insn_prfm_policy {
AARCH64_INSN_PRFM_POLICY_STRM,
};
+enum aarch64_insn_adr_type {
+ AARCH64_INSN_ADR_TYPE_ADRP,
+ AARCH64_INSN_ADR_TYPE_ADR,
+};
+
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
{ return (code & (mask)) == (val); } \
@@ -393,6 +398,9 @@ u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
enum aarch64_insn_adsb_type type);
+u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_adr_type type);
u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int immr, int imms,
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 9f8b915af3a7..ee723835c1f4 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -104,7 +104,23 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
}
/* IO barriers */
-#define __iormb() rmb()
+#define __iormb(v) \
+({ \
+ unsigned long tmp; \
+ \
+ rmb(); \
+ \
+ /* \
+ * Create a dummy control dependency from the IO read to any \
+ * later instructions. This ensures that a subsequent call to \
+ * udelay() will be ordered due to the ISB in get_cycles(). \
+ */ \
+ asm volatile("eor %0, %1, %1\n" \
+ "cbnz %0, ." \
+ : "=r" (tmp) : "r" ((unsigned long)(v)) \
+ : "memory"); \
+})
+
#define __iowmb() wmb()
#define mmiowb() do { } while (0)
@@ -129,10 +145,10 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access.
*/
-#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
-#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
-#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
-#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
@@ -183,9 +199,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
/*
* io{read,write}{16,32,64}be() macros
*/
-#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
-#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
-#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(); __v; })
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
+#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index e17f0529a882..67e4cb75d1fd 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -93,6 +93,25 @@ static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif
+#ifdef CONFIG_KEXEC_FILE
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ void *dtb;
+ unsigned long dtb_mem;
+};
+
+extern const struct kexec_file_ops kexec_image_ops;
+
+struct kimage;
+
+extern int arch_kimage_file_post_load_cleanup(struct kimage *image);
+extern int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr, unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline);
+#endif
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 6f602af5263c..7f9d2bfcf82e 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -24,6 +24,8 @@
/* Hyp Configuration Register (HCR) bits */
#define HCR_FWB (UL(1) << 46)
+#define HCR_API (UL(1) << 41)
+#define HCR_APK (UL(1) << 40)
#define HCR_TEA (UL(1) << 37)
#define HCR_TERR (UL(1) << 36)
#define HCR_TLOR (UL(1) << 35)
@@ -87,6 +89,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO)
#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
@@ -104,7 +107,7 @@
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
/* VTCR_EL2 Registers bits */
-#define VTCR_EL2_RES1 (1 << 31)
+#define VTCR_EL2_RES1 (1U << 31)
#define VTCR_EL2_HD (1 << 22)
#define VTCR_EL2_HA (1 << 21)
#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
@@ -320,10 +323,6 @@
#define PAR_TO_HPFAR(par) \
(((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
-#define kvm_arm_exception_type \
- {0, "IRQ" }, \
- {1, "TRAP" }
-
#define ECN(x) { ESR_ELx_EC_##x, #x }
#define kvm_arm_exception_class \
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index aea01a09eb94..f5b79e995f40 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -25,6 +25,7 @@
#define ARM_EXIT_WITH_SERROR_BIT 31
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
+#define ARM_EXCEPTION_IS_TRAP(x) (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
#define ARM_SERROR_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
#define ARM_EXCEPTION_IRQ 0
@@ -34,6 +35,12 @@
/* The hyp-stub will return this for any kvm_call_hyp() call */
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
+#define kvm_arm_exception_type \
+ {ARM_EXCEPTION_IRQ, "IRQ" }, \
+ {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
+ {ARM_EXCEPTION_TRAP, "TRAP" }, \
+ {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
+
#ifndef __ASSEMBLY__
#include <linux/mm.h>
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 21247870def7..506386a3edde 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -24,6 +24,7 @@
#include <linux/kvm_host.h>
+#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_hyp.h>
@@ -147,14 +148,6 @@ static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
return true;
}
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
-{
- if (vcpu_mode_is_32bit(vcpu))
- kvm_skip_instr32(vcpu, is_wide_instr);
- else
- *vcpu_pc(vcpu) += 4;
-}
-
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
{
*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
@@ -424,4 +417,30 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
return data; /* Leave LE untouched */
}
+static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ if (vcpu_mode_is_32bit(vcpu))
+ kvm_skip_instr32(vcpu, is_wide_instr);
+ else
+ *vcpu_pc(vcpu) += 4;
+
+ /* advance the singlestep state machine */
+ *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+}
+
+/*
+ * Skip an instruction which has been emulated at hyp while most guest sysregs
+ * are live.
+ */
+static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
+{
+ *vcpu_pc(vcpu) = read_sysreg_el2(elr);
+ vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
+
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
+ write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
+ write_sysreg_el2(*vcpu_pc(vcpu), elr);
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 52fbc823ff8c..7732d0ba4e60 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -319,7 +319,7 @@ struct kvm_vcpu_arch {
*/
#define __vcpu_sys_reg(v,r) ((v)->arch.ctxt.sys_regs[(r)])
-u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg);
+u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
/*
@@ -360,7 +360,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -422,7 +422,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
}
}
-static inline bool kvm_arch_check_sve_has_vhe(void)
+static inline bool kvm_arch_requires_vhe(void)
{
/*
* The Arm architecture specifies that implementation of SVE
@@ -430,9 +430,13 @@ static inline bool kvm_arch_check_sve_has_vhe(void)
* relies on this when SVE is present:
*/
if (system_supports_sve())
- return has_vhe();
- else
return true;
+
+ /* Some implementations have defects that confine them to VHE */
+ if (cpus_have_cap(ARM64_WORKAROUND_1165522))
+ return true;
+
+ return false;
}
static inline void kvm_arch_hardware_unsetup(void) {}
@@ -445,7 +449,6 @@ void kvm_arm_init_debug(void);
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
-bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 23aca66767f9..a80a7ef57325 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -20,6 +20,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
+#include <asm/alternative.h>
#include <asm/sysreg.h>
#define __hyp_text __section(.hyp.text) notrace
@@ -163,6 +164,13 @@ static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
{
write_sysreg(kvm->arch.vtcr, vtcr_el2);
write_sysreg(kvm->arch.vttbr, vttbr_el2);
+
+ /*
+ * ARM erratum 1165522 requires the actual execution of the above
+ * before we can switch to the EL1/EL0 translation regime used by
+ * the guest.
+ */
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
}
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 658657367f2f..8af4b1befa42 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -184,6 +184,17 @@ void kvm_clear_hyp_idmap(void);
#define kvm_mk_pgd(pudp) \
__pgd(__phys_to_pgd_val(__pa(pudp)) | PUD_TYPE_TABLE)
+#define kvm_set_pud(pudp, pud) set_pud(pudp, pud)
+
+#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
+#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
+#define kvm_pfn_pud(pfn, prot) pfn_pud(pfn, prot)
+
+#define kvm_pud_pfn(pud) pud_pfn(pud)
+
+#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
+#define kvm_pud_mkhuge(pud) pud_mkhuge(pud)
+
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
{
pte_val(pte) |= PTE_S2_RDWR;
@@ -196,6 +207,12 @@ static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
return pmd;
}
+static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
+{
+ pud_val(pud) |= PUD_S2_RDWR;
+ return pud;
+}
+
static inline pte_t kvm_s2pte_mkexec(pte_t pte)
{
pte_val(pte) &= ~PTE_S2_XN;
@@ -208,6 +225,12 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
return pmd;
}
+static inline pud_t kvm_s2pud_mkexec(pud_t pud)
+{
+ pud_val(pud) &= ~PUD_S2_XN;
+ return pud;
+}
+
static inline void kvm_set_s2pte_readonly(pte_t *ptep)
{
pteval_t old_pteval, pteval;
@@ -246,6 +269,31 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
}
+static inline void kvm_set_s2pud_readonly(pud_t *pudp)
+{
+ kvm_set_s2pte_readonly((pte_t *)pudp);
+}
+
+static inline bool kvm_s2pud_readonly(pud_t *pudp)
+{
+ return kvm_s2pte_readonly((pte_t *)pudp);
+}
+
+static inline bool kvm_s2pud_exec(pud_t *pudp)
+{
+ return !(READ_ONCE(pud_val(*pudp)) & PUD_S2_XN);
+}
+
+static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
+{
+ return pud_mkyoung(pud);
+}
+
+static inline bool kvm_s2pud_young(pud_t pud)
+{
+ return pud_young(pud);
+}
+
#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
#ifdef __PAGETABLE_PMD_FOLDED
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b96442960aea..0385752bd079 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -35,15 +35,6 @@
#define PCI_IO_SIZE SZ_16M
/*
- * Log2 of the upper bound of the size of a struct page. Used for sizing
- * the vmemmap region only, does not affect actual memory footprint.
- * We don't use sizeof(struct page) directly since taking its size here
- * requires its definition to be available at this point in the inclusion
- * chain, and it may not be a power of 2 in the first place.
- */
-#define STRUCT_PAGE_MAX_SHIFT 6
-
-/*
* VMEMMAP_SIZE - allows the whole linear region to be covered by
* a struct page array
*/
@@ -62,8 +53,11 @@
#define PAGE_OFFSET (UL(0xffffffffffffffff) - \
(UL(1) << (VA_BITS - 1)) + 1)
#define KIMAGE_VADDR (MODULES_END)
+#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE)
+#define BPF_JIT_REGION_SIZE (SZ_128M)
+#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
-#define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
#define PCI_IO_END (VMEMMAP_START - SZ_2M)
@@ -73,15 +67,26 @@
#define KERNEL_START _text
#define KERNEL_END _end
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+#define MAX_USER_VA_BITS 52
+#else
+#define MAX_USER_VA_BITS VA_BITS
+#endif
+
/*
* KASAN requires 1/8th of the kernel virtual address space for the shadow
* region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use.
+ * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
+ * on.
*/
#ifdef CONFIG_KASAN
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_THREAD_SHIFT 2
+#else
#define KASAN_THREAD_SHIFT 1
+#endif /* CONFIG_KASAN_EXTRA */
#else
#define KASAN_SHADOW_SIZE (0)
#define KASAN_THREAD_SHIFT 0
@@ -196,6 +201,9 @@ static inline unsigned long kaslr_offset(void)
return kimage_vaddr - KIMAGE_VADDR;
}
+/* the actual size of a user virtual address */
+extern u64 vabits_user;
+
/*
* Allow all memory at the discovery stage. We will clip it later.
*/
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 1e58bf58c22b..2da3e478fd8f 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -35,6 +35,8 @@
#include <asm/sysreg.h>
#include <asm/tlbflush.h>
+extern bool rodata_full;
+
static inline void contextidr_thread_switch(struct task_struct *next)
{
if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
@@ -72,6 +74,9 @@ extern u64 idmap_ptrs_per_pgd;
static inline bool __cpu_uses_extended_idmap(void)
{
+ if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52))
+ return false;
+
return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
}
diff --git a/arch/arm64/include/asm/module.h b/arch/arm64/include/asm/module.h
index 97d0ef12e2ff..905e1bb0e7bd 100644
--- a/arch/arm64/include/asm/module.h
+++ b/arch/arm64/include/asm/module.h
@@ -22,7 +22,7 @@
#ifdef CONFIG_ARM64_MODULE_PLTS
struct mod_plt_sec {
- struct elf64_shdr *plt;
+ int plt_shndx;
int plt_num_entries;
int plt_max_entries;
};
@@ -36,10 +36,12 @@ struct mod_arch_specific {
};
#endif
-u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
+u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym);
-u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
+u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base;
@@ -56,39 +58,19 @@ struct plt_entry {
* is exactly what we are dealing with here, we are free to use x16
* as a scratch register in the PLT veneers.
*/
- __le32 mov0; /* movn x16, #0x.... */
- __le32 mov1; /* movk x16, #0x...., lsl #16 */
- __le32 mov2; /* movk x16, #0x...., lsl #32 */
+ __le32 adrp; /* adrp x16, .... */
+ __le32 add; /* add x16, x16, #0x.... */
__le32 br; /* br x16 */
};
-static inline struct plt_entry get_plt_entry(u64 val)
+static inline bool is_forbidden_offset_for_adrp(void *place)
{
- /*
- * MOVK/MOVN/MOVZ opcode:
- * +--------+------------+--------+-----------+-------------+---------+
- * | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
- * +--------+------------+--------+-----------+-------------+---------+
- *
- * Rd := 0x10 (x16)
- * hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
- * opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
- * sf := 1 (64-bit variant)
- */
- return (struct plt_entry){
- cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
- cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
- cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
- cpu_to_le32(0xd61f0200)
- };
+ return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
+ ((u64)place & 0xfff) >= 0xff8;
}
-static inline bool plt_entries_equal(const struct plt_entry *a,
- const struct plt_entry *b)
-{
- return a->mov0 == b->mov0 &&
- a->mov1 == b->mov1 &&
- a->mov2 == b->mov2;
-}
+struct plt_entry get_plt_entry(u64 dst, void *pc);
+bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
#endif /* __ASM_MODULE_H */
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h
new file mode 100644
index 000000000000..2ba6c6b9541f
--- /dev/null
+++ b/arch/arm64/include/asm/neon-intrinsics.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2018 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_NEON_INTRINSICS_H
+#define __ASM_NEON_INTRINSICS_H
+
+#include <asm-generic/int-ll64.h>
+
+/*
+ * In the kernel, u64/s64 are [un]signed long long, not [un]signed long.
+ * So by redefining these macros to the former, we can force gcc-stdint.h
+ * to define uint64_t / in64_t in a compatible manner.
+ */
+
+#ifdef __INT64_TYPE__
+#undef __INT64_TYPE__
+#define __INT64_TYPE__ long long
+#endif
+
+#ifdef __UINT64_TYPE__
+#undef __UINT64_TYPE__
+#define __UINT64_TYPE__ unsigned long long
+#endif
+
+/*
+ * genksyms chokes on the ARM NEON instrinsics system header, but we
+ * don't export anything it defines anyway, so just disregard when
+ * genksyms execute.
+ */
+#ifndef __GENKSYMS__
+#include <arm_neon.h>
+#endif
+
+#endif /* __ASM_NEON_INTRINSICS_H */
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 21a81b59a0cc..6b81dd8cee01 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -48,263 +48,193 @@ static inline unsigned long __my_cpu_offset(void)
}
#define __my_cpu_offset __my_cpu_offset()
-#define PERCPU_OP(op, asm_op) \
-static inline unsigned long __percpu_##op(void *ptr, \
- unsigned long val, int size) \
+#define PERCPU_RW_OPS(sz) \
+static inline unsigned long __percpu_read_##sz(void *ptr) \
{ \
- unsigned long loop, ret; \
+ return READ_ONCE(*(u##sz *)ptr); \
+} \
\
- switch (size) { \
- case 1: \
- asm ("//__per_cpu_" #op "_1\n" \
- "1: ldxrb %w[ret], %[ptr]\n" \
- #asm_op " %w[ret], %w[ret], %w[val]\n" \
- " stxrb %w[loop], %w[ret], %[ptr]\n" \
- " cbnz %w[loop], 1b" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u8 *)ptr) \
- : [val] "Ir" (val)); \
- break; \
- case 2: \
- asm ("//__per_cpu_" #op "_2\n" \
- "1: ldxrh %w[ret], %[ptr]\n" \
- #asm_op " %w[ret], %w[ret], %w[val]\n" \
- " stxrh %w[loop], %w[ret], %[ptr]\n" \
- " cbnz %w[loop], 1b" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u16 *)ptr) \
- : [val] "Ir" (val)); \
- break; \
- case 4: \
- asm ("//__per_cpu_" #op "_4\n" \
- "1: ldxr %w[ret], %[ptr]\n" \
- #asm_op " %w[ret], %w[ret], %w[val]\n" \
- " stxr %w[loop], %w[ret], %[ptr]\n" \
- " cbnz %w[loop], 1b" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u32 *)ptr) \
- : [val] "Ir" (val)); \
- break; \
- case 8: \
- asm ("//__per_cpu_" #op "_8\n" \
- "1: ldxr %[ret], %[ptr]\n" \
- #asm_op " %[ret], %[ret], %[val]\n" \
- " stxr %w[loop], %[ret], %[ptr]\n" \
- " cbnz %w[loop], 1b" \
- : [loop] "=&r" (loop), [ret] "=&r" (ret), \
- [ptr] "+Q"(*(u64 *)ptr) \
- : [val] "Ir" (val)); \
- break; \
- default: \
- ret = 0; \
- BUILD_BUG(); \
- } \
- \
- return ret; \
-}
-
-PERCPU_OP(add, add)
-PERCPU_OP(and, and)
-PERCPU_OP(or, orr)
-#undef PERCPU_OP
-
-static inline unsigned long __percpu_read(void *ptr, int size)
-{
- unsigned long ret;
-
- switch (size) {
- case 1:
- ret = READ_ONCE(*(u8 *)ptr);
- break;
- case 2:
- ret = READ_ONCE(*(u16 *)ptr);
- break;
- case 4:
- ret = READ_ONCE(*(u32 *)ptr);
- break;
- case 8:
- ret = READ_ONCE(*(u64 *)ptr);
- break;
- default:
- ret = 0;
- BUILD_BUG();
- }
-
- return ret;
+static inline void __percpu_write_##sz(void *ptr, unsigned long val) \
+{ \
+ WRITE_ONCE(*(u##sz *)ptr, (u##sz)val); \
}
-static inline void __percpu_write(void *ptr, unsigned long val, int size)
-{
- switch (size) {
- case 1:
- WRITE_ONCE(*(u8 *)ptr, (u8)val);
- break;
- case 2:
- WRITE_ONCE(*(u16 *)ptr, (u16)val);
- break;
- case 4:
- WRITE_ONCE(*(u32 *)ptr, (u32)val);
- break;
- case 8:
- WRITE_ONCE(*(u64 *)ptr, (u64)val);
- break;
- default:
- BUILD_BUG();
- }
+#define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \
+static inline void \
+__percpu_##name##_case_##sz(void *ptr, unsigned long val) \
+{ \
+ unsigned int loop; \
+ u##sz tmp; \
+ \
+ asm volatile (ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ "1: ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n" \
+ #op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
+ " stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n" \
+ " cbnz %w[loop], 1b", \
+ /* LSE atomics */ \
+ #op_lse "\t%" #w "[val], %[ptr]\n" \
+ __nops(3)) \
+ : [loop] "=&r" (loop), [tmp] "=&r" (tmp), \
+ [ptr] "+Q"(*(u##sz *)ptr) \
+ : [val] "r" ((u##sz)(val))); \
}
-static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
- int size)
-{
- unsigned long ret, loop;
-
- switch (size) {
- case 1:
- asm ("//__percpu_xchg_1\n"
- "1: ldxrb %w[ret], %[ptr]\n"
- " stxrb %w[loop], %w[val], %[ptr]\n"
- " cbnz %w[loop], 1b"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u8 *)ptr)
- : [val] "r" (val));
- break;
- case 2:
- asm ("//__percpu_xchg_2\n"
- "1: ldxrh %w[ret], %[ptr]\n"
- " stxrh %w[loop], %w[val], %[ptr]\n"
- " cbnz %w[loop], 1b"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u16 *)ptr)
- : [val] "r" (val));
- break;
- case 4:
- asm ("//__percpu_xchg_4\n"
- "1: ldxr %w[ret], %[ptr]\n"
- " stxr %w[loop], %w[val], %[ptr]\n"
- " cbnz %w[loop], 1b"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u32 *)ptr)
- : [val] "r" (val));
- break;
- case 8:
- asm ("//__percpu_xchg_8\n"
- "1: ldxr %[ret], %[ptr]\n"
- " stxr %w[loop], %[val], %[ptr]\n"
- " cbnz %w[loop], 1b"
- : [loop] "=&r"(loop), [ret] "=&r"(ret),
- [ptr] "+Q"(*(u64 *)ptr)
- : [val] "r" (val));
- break;
- default:
- ret = 0;
- BUILD_BUG();
- }
-
- return ret;
+#define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \
+static inline u##sz \
+__percpu_##name##_return_case_##sz(void *ptr, unsigned long val) \
+{ \
+ unsigned int loop; \
+ u##sz ret; \
+ \
+ asm volatile (ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ "1: ldxr" #sfx "\t%" #w "[ret], %[ptr]\n" \
+ #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \
+ " stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n" \
+ " cbnz %w[loop], 1b", \
+ /* LSE atomics */ \
+ #op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n" \
+ #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \
+ __nops(2)) \
+ : [loop] "=&r" (loop), [ret] "=&r" (ret), \
+ [ptr] "+Q"(*(u##sz *)ptr) \
+ : [val] "r" ((u##sz)(val))); \
+ \
+ return ret; \
}
-/* this_cpu_cmpxchg */
-#define _protect_cmpxchg_local(pcp, o, n) \
-({ \
- typeof(*raw_cpu_ptr(&(pcp))) __ret; \
- preempt_disable(); \
- __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
- preempt_enable(); \
- __ret; \
-})
-
-#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define PERCPU_OP(name, op_llsc, op_lse) \
+ __PERCPU_OP_CASE(w, b, name, 8, op_llsc, op_lse) \
+ __PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse) \
+ __PERCPU_OP_CASE(w, , name, 32, op_llsc, op_lse) \
+ __PERCPU_OP_CASE( , , name, 64, op_llsc, op_lse)
+
+#define PERCPU_RET_OP(name, op_llsc, op_lse) \
+ __PERCPU_RET_OP_CASE(w, b, name, 8, op_llsc, op_lse) \
+ __PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse) \
+ __PERCPU_RET_OP_CASE(w, , name, 32, op_llsc, op_lse) \
+ __PERCPU_RET_OP_CASE( , , name, 64, op_llsc, op_lse)
+
+PERCPU_RW_OPS(8)
+PERCPU_RW_OPS(16)
+PERCPU_RW_OPS(32)
+PERCPU_RW_OPS(64)
+PERCPU_OP(add, add, stadd)
+PERCPU_OP(andnot, bic, stclr)
+PERCPU_OP(or, orr, stset)
+PERCPU_RET_OP(add, add, ldadd)
+
+#undef PERCPU_RW_OPS
+#undef __PERCPU_OP_CASE
+#undef __PERCPU_RET_OP_CASE
+#undef PERCPU_OP
+#undef PERCPU_RET_OP
+/*
+ * It would be nice to avoid the conditional call into the scheduler when
+ * re-enabling preemption for preemptible kernels, but doing that in a way
+ * which builds inside a module would mean messing directly with the preempt
+ * count. If you do this, peterz and tglx will hunt you down.
+ */
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
- preempt_disable(); \
+ preempt_disable_notrace(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
- preempt_enable(); \
+ preempt_enable_notrace(); \
__ret; \
})
-#define _percpu_read(pcp) \
+#define _pcp_protect(op, pcp, ...) \
({ \
- typeof(pcp) __retval; \
preempt_disable_notrace(); \
- __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
- sizeof(pcp)); \
+ op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \
preempt_enable_notrace(); \
- __retval; \
})
-#define _percpu_write(pcp, val) \
-do { \
+#define _pcp_protect_return(op, pcp, args...) \
+({ \
+ typeof(pcp) __retval; \
preempt_disable_notrace(); \
- __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
- sizeof(pcp)); \
+ __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args); \
preempt_enable_notrace(); \
-} while(0) \
-
-#define _pcp_protect(operation, pcp, val) \
-({ \
- typeof(pcp) __retval; \
- preempt_disable(); \
- __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
- (val), sizeof(pcp)); \
- preempt_enable(); \
- __retval; \
+ __retval; \
})
-#define _percpu_add(pcp, val) \
- _pcp_protect(__percpu_add, pcp, val)
-
-#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
-
-#define _percpu_and(pcp, val) \
- _pcp_protect(__percpu_and, pcp, val)
-
-#define _percpu_or(pcp, val) \
- _pcp_protect(__percpu_or, pcp, val)
-
-#define _percpu_xchg(pcp, val) (typeof(pcp)) \
- _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
-
-#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
-#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
-#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
-#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
-
-#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val)
-#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val)
-#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
-#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
-
-#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val)
-#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val)
-#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
-#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
-
-#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
-#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
-
-#define this_cpu_read_1(pcp) _percpu_read(pcp)
-#define this_cpu_read_2(pcp) _percpu_read(pcp)
-#define this_cpu_read_4(pcp) _percpu_read(pcp)
-#define this_cpu_read_8(pcp) _percpu_read(pcp)
-
-#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
-#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
-
-#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
-#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
-#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
-#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
+#define this_cpu_read_1(pcp) \
+ _pcp_protect_return(__percpu_read_8, pcp)
+#define this_cpu_read_2(pcp) \
+ _pcp_protect_return(__percpu_read_16, pcp)
+#define this_cpu_read_4(pcp) \
+ _pcp_protect_return(__percpu_read_32, pcp)
+#define this_cpu_read_8(pcp) \
+ _pcp_protect_return(__percpu_read_64, pcp)
+
+#define this_cpu_write_1(pcp, val) \
+ _pcp_protect(__percpu_write_8, pcp, (unsigned long)val)
+#define this_cpu_write_2(pcp, val) \
+ _pcp_protect(__percpu_write_16, pcp, (unsigned long)val)
+#define this_cpu_write_4(pcp, val) \
+ _pcp_protect(__percpu_write_32, pcp, (unsigned long)val)
+#define this_cpu_write_8(pcp, val) \
+ _pcp_protect(__percpu_write_64, pcp, (unsigned long)val)
+
+#define this_cpu_add_1(pcp, val) \
+ _pcp_protect(__percpu_add_case_8, pcp, val)
+#define this_cpu_add_2(pcp, val) \
+ _pcp_protect(__percpu_add_case_16, pcp, val)
+#define this_cpu_add_4(pcp, val) \
+ _pcp_protect(__percpu_add_case_32, pcp, val)
+#define this_cpu_add_8(pcp, val) \
+ _pcp_protect(__percpu_add_case_64, pcp, val)
+
+#define this_cpu_add_return_1(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_8, pcp, val)
+#define this_cpu_add_return_2(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_16, pcp, val)
+#define this_cpu_add_return_4(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_32, pcp, val)
+#define this_cpu_add_return_8(pcp, val) \
+ _pcp_protect_return(__percpu_add_return_case_64, pcp, val)
+
+#define this_cpu_and_1(pcp, val) \
+ _pcp_protect(__percpu_andnot_case_8, pcp, ~val)
+#define this_cpu_and_2(pcp, val) \
+ _pcp_protect(__percpu_andnot_case_16, pcp, ~val)
+#define this_cpu_and_4(pcp, val) \
+ _pcp_protect(__percpu_andnot_case_32, pcp, ~val)
+#define this_cpu_and_8(pcp, val) \
+ _pcp_protect(__percpu_andnot_case_64, pcp, ~val)
+
+#define this_cpu_or_1(pcp, val) \
+ _pcp_protect(__percpu_or_case_8, pcp, val)
+#define this_cpu_or_2(pcp, val) \
+ _pcp_protect(__percpu_or_case_16, pcp, val)
+#define this_cpu_or_4(pcp, val) \
+ _pcp_protect(__percpu_or_case_32, pcp, val)
+#define this_cpu_or_8(pcp, val) \
+ _pcp_protect(__percpu_or_case_64, pcp, val)
+
+#define this_cpu_xchg_1(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+#define this_cpu_xchg_2(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+#define this_cpu_xchg_4(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+#define this_cpu_xchg_8(pcp, val) \
+ _pcp_protect_return(xchg_relaxed, pcp, val)
+
+#define this_cpu_cmpxchg_1(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg_2(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg_4(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
+#define this_cpu_cmpxchg_8(pcp, o, n) \
+ _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
#include <asm-generic/percpu.h>
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index f9ccc36d3dc3..c593761ba61c 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -24,6 +24,160 @@
#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
/*
+ * Common architectural and microarchitectural event numbers.
+ */
+#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
+#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
+#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
+#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
+#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
+#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
+#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
+#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
+#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
+#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
+#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
+#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
+#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
+#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
+#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
+#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
+#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
+#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
+#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
+#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
+#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
+#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
+#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
+#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
+#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
+#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
+#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
+#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
+#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
+#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
+#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
+#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
+#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x31
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x32
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x33
+#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x34
+#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x35
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x36
+#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x37
+#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x38
+
+/* Statistical profiling extension microarchitectural events */
+#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
+#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001
+#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
+#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
+
+/* ARMv8 recommended implementation defined event types */
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
+#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
+
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
+#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
+#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
+
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
+#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
+#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
+#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
+#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
+
+#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
+#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
+#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
+#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
+#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
+#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
+#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
+#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
+#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
+#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
+#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
+#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
+#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
+#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
+#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
+
+#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
+#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
+#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
+#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
+#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
+#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
+#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
+#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
+
+#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
+#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
+#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
+#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
+
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
+#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
+
+/*
* Per-CPU PMCR: config reg
*/
#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
@@ -50,21 +204,11 @@
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
/*
- * PMUv3 event types: required events
- */
-#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
-#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
-#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
-
-/*
* Event filters for PMUv3
*/
-#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
-#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
-#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
/*
* PMUSERENR: user enable reg
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 1d7d8da2ef9b..22bb3ae514f5 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -80,7 +80,7 @@
#define PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - CONFIG_PGTABLE_LEVELS)
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
-#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
+#define PTRS_PER_PGD (1 << (MAX_USER_VA_BITS - PGDIR_SHIFT))
/*
* Section address mask and size definitions.
@@ -193,6 +193,10 @@
#define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
#define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */
+#define PUD_S2_RDONLY (_AT(pudval_t, 1) << 6) /* HAP[2:1] */
+#define PUD_S2_RDWR (_AT(pudval_t, 3) << 6) /* HAP[2:1] */
+#define PUD_S2_XN (_AT(pudval_t, 2) << 53) /* XN[1:0] */
+
/*
* Memory Attribute override for Stage-2 (MemAttr[3:0])
*/
@@ -224,6 +228,8 @@
#define TCR_TxSZ_WIDTH 6
#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
+#define TCR_EPD0_SHIFT 7
+#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT)
#define TCR_IRGN0_SHIFT 8
#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
@@ -231,6 +237,8 @@
#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
+#define TCR_EPD1_SHIFT 23
+#define TCR_EPD1_MASK (UL(1) << TCR_EPD1_SHIFT)
#define TCR_IRGN1_SHIFT 24
#define TCR_IRGN1_MASK (UL(3) << TCR_IRGN1_SHIFT)
#define TCR_IRGN1_NC (UL(0) << TCR_IRGN1_SHIFT)
@@ -306,4 +314,10 @@
#define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2)
#endif
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+/* Must be at least 64-byte aligned to prevent corruption of the TTBR */
+#define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \
+ (UL(1) << (48 - PGDIR_SHIFT))) * 8)
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 50b1ef8584c0..de70c1eabf33 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -22,6 +22,7 @@
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable-prot.h>
+#include <asm/tlbflush.h>
/*
* VMALLOC range.
@@ -314,6 +315,11 @@ static inline pte_t pud_pte(pud_t pud)
return __pte(pud_val(pud));
}
+static inline pud_t pte_pud(pte_t pte)
+{
+ return __pud(pte_val(pte));
+}
+
static inline pmd_t pud_pmd(pud_t pud)
{
return __pmd(pud_val(pud));
@@ -381,8 +387,12 @@ static inline int pmd_protnone(pmd_t pmd)
#define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+#define pud_young(pud) pte_young(pud_pte(pud))
+#define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud)))
#define pud_write(pud) pte_write(pud_pte(pud))
+#define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT))
+
#define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud))
#define __phys_to_pud_val(phys) __phys_to_pte_val(phys)
#define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT)
@@ -685,6 +695,27 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
return __ptep_test_and_clear_young(ptep);
}
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ int young = ptep_test_and_clear_young(vma, address, ptep);
+
+ if (young) {
+ /*
+ * We can elide the trailing DSB here since the worst that can
+ * happen is that a CPU continues to use the young entry in its
+ * TLB and we mistakenly reclaim the associated page. The
+ * window for such an event is bounded by the next
+ * context-switch, which provides a DSB to complete the TLB
+ * invalidation.
+ */
+ flush_tlb_page_nosync(vma, address);
+ }
+
+ return young;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
new file mode 100644
index 000000000000..15d49515efdd
--- /dev/null
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __ASM_POINTER_AUTH_H
+#define __ASM_POINTER_AUTH_H
+
+#include <linux/bitops.h>
+#include <linux/random.h>
+
+#include <asm/cpufeature.h>
+#include <asm/memory.h>
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * Each key is a 128-bit quantity which is split across a pair of 64-bit
+ * registers (Lo and Hi).
+ */
+struct ptrauth_key {
+ unsigned long lo, hi;
+};
+
+/*
+ * We give each process its own keys, which are shared by all threads. The keys
+ * are inherited upon fork(), and reinitialised upon exec*().
+ */
+struct ptrauth_keys {
+ struct ptrauth_key apia;
+ struct ptrauth_key apib;
+ struct ptrauth_key apda;
+ struct ptrauth_key apdb;
+ struct ptrauth_key apga;
+};
+
+static inline void ptrauth_keys_init(struct ptrauth_keys *keys)
+{
+ if (system_supports_address_auth()) {
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+ get_random_bytes(&keys->apib, sizeof(keys->apib));
+ get_random_bytes(&keys->apda, sizeof(keys->apda));
+ get_random_bytes(&keys->apdb, sizeof(keys->apdb));
+ }
+
+ if (system_supports_generic_auth())
+ get_random_bytes(&keys->apga, sizeof(keys->apga));
+}
+
+#define __ptrauth_key_install(k, v) \
+do { \
+ struct ptrauth_key __pki_v = (v); \
+ write_sysreg_s(__pki_v.lo, SYS_ ## k ## KEYLO_EL1); \
+ write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
+} while (0)
+
+static inline void ptrauth_keys_switch(struct ptrauth_keys *keys)
+{
+ if (system_supports_address_auth()) {
+ __ptrauth_key_install(APIA, keys->apia);
+ __ptrauth_key_install(APIB, keys->apib);
+ __ptrauth_key_install(APDA, keys->apda);
+ __ptrauth_key_install(APDB, keys->apdb);
+ }
+
+ if (system_supports_generic_auth())
+ __ptrauth_key_install(APGA, keys->apga);
+}
+
+extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
+
+/*
+ * The EL0 pointer bits used by a pointer authentication code.
+ * This is dependent on TBI0 being enabled, or bits 63:56 would also apply.
+ */
+#define ptrauth_user_pac_mask() GENMASK(54, vabits_user)
+
+/* Only valid for EL0 TTBR0 instruction pointers */
+static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
+{
+ return ptr & ~ptrauth_user_pac_mask();
+}
+
+#define ptrauth_thread_init_user(tsk) \
+do { \
+ struct task_struct *__ptiu_tsk = (tsk); \
+ ptrauth_keys_init(&__ptiu_tsk->thread.keys_user); \
+ ptrauth_keys_switch(&__ptiu_tsk->thread.keys_user); \
+} while (0)
+
+#define ptrauth_thread_switch(tsk) \
+ ptrauth_keys_switch(&(tsk)->thread.keys_user)
+
+#else /* CONFIG_ARM64_PTR_AUTH */
+#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
+#define ptrauth_strip_insn_pac(lr) (lr)
+#define ptrauth_thread_init_user(tsk)
+#define ptrauth_thread_switch(tsk)
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#endif /* __ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
new file mode 100644
index 000000000000..d49951647014
--- /dev/null
+++ b/arch/arm64/include/asm/preempt.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_PREEMPT_H
+#define __ASM_PREEMPT_H
+
+#include <linux/thread_info.h>
+
+#define PREEMPT_NEED_RESCHED BIT(32)
+#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
+
+static inline int preempt_count(void)
+{
+ return READ_ONCE(current_thread_info()->preempt.count);
+}
+
+static inline void preempt_count_set(u64 pc)
+{
+ /* Preserve existing value of PREEMPT_NEED_RESCHED */
+ WRITE_ONCE(current_thread_info()->preempt.count, pc);
+}
+
+#define init_task_preempt_count(p) do { \
+ task_thread_info(p)->preempt_count = FORK_PREEMPT_COUNT; \
+} while (0)
+
+#define init_idle_preempt_count(p, cpu) do { \
+ task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+} while (0)
+
+static inline void set_preempt_need_resched(void)
+{
+ current_thread_info()->preempt.need_resched = 0;
+}
+
+static inline void clear_preempt_need_resched(void)
+{
+ current_thread_info()->preempt.need_resched = 1;
+}
+
+static inline bool test_preempt_need_resched(void)
+{
+ return !current_thread_info()->preempt.need_resched;
+}
+
+static inline void __preempt_count_add(int val)
+{
+ u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+ pc += val;
+ WRITE_ONCE(current_thread_info()->preempt.count, pc);
+}
+
+static inline void __preempt_count_sub(int val)
+{
+ u32 pc = READ_ONCE(current_thread_info()->preempt.count);
+ pc -= val;
+ WRITE_ONCE(current_thread_info()->preempt.count, pc);
+}
+
+static inline bool __preempt_count_dec_and_test(void)
+{
+ struct thread_info *ti = current_thread_info();
+ u64 pc = READ_ONCE(ti->preempt_count);
+
+ /* Update only the count field, leaving need_resched unchanged */
+ WRITE_ONCE(ti->preempt.count, --pc);
+
+ /*
+ * If we wrote back all zeroes, then we're preemptible and in
+ * need of a reschedule. Otherwise, we need to reload the
+ * preempt_count in case the need_resched flag was cleared by an
+ * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE
+ * pair.
+ */
+ return !pc || !READ_ONCE(ti->preempt_count);
+}
+
+static inline bool should_resched(int preempt_offset)
+{
+ u64 pc = READ_ONCE(current_thread_info()->preempt_count);
+ return pc == preempt_offset;
+}
+
+#ifdef CONFIG_PREEMPT
+void preempt_schedule(void);
+#define __preempt_schedule() preempt_schedule()
+void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
+#endif /* CONFIG_PREEMPT */
+
+#endif /* __ASM_PREEMPT_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 6b0d4dff5012..f1a7ab18faf3 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -19,10 +19,8 @@
#ifndef __ASM_PROCESSOR_H
#define __ASM_PROCESSOR_H
-#define TASK_SIZE_64 (UL(1) << VA_BITS)
-
-#define KERNEL_DS UL(-1)
-#define USER_DS (TASK_SIZE_64 - 1)
+#define KERNEL_DS UL(-1)
+#define USER_DS ((UL(1) << MAX_USER_VA_BITS) - 1)
/*
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
@@ -46,6 +44,7 @@
#include <asm/hw_breakpoint.h>
#include <asm/lse.h>
#include <asm/pgtable-hwdef.h>
+#include <asm/pointer_auth.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -53,19 +52,31 @@
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
*/
+
+#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS)
+#define TASK_SIZE_64 (UL(1) << vabits_user)
+
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 UL(0x100000000)
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
+#define DEFAULT_MAP_WINDOW (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
#else
#define TASK_SIZE TASK_SIZE_64
+#define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64
#endif /* CONFIG_COMPAT */
+#ifdef CONFIG_ARM64_FORCE_52BIT
+#define STACK_TOP_MAX TASK_SIZE_64
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
+#else
+#define STACK_TOP_MAX DEFAULT_MAP_WINDOW_64
+#define TASK_UNMAPPED_BASE (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
+#endif /* CONFIG_ARM64_FORCE_52BIT */
-#define STACK_TOP_MAX TASK_SIZE_64
#ifdef CONFIG_COMPAT
#define AARCH32_VECTORS_BASE 0xffff0000
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
@@ -74,6 +85,15 @@
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */
+#ifndef CONFIG_ARM64_FORCE_52BIT
+#define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
+ DEFAULT_MAP_WINDOW)
+
+#define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
+ base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
+ base)
+#endif /* CONFIG_ARM64_FORCE_52BIT */
+
extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
@@ -127,6 +147,9 @@ struct thread_struct {
unsigned long fault_address; /* fault info */
unsigned long fault_code; /* ESR_EL1 value */
struct debug_info debug; /* debugging */
+#ifdef CONFIG_ARM64_PTR_AUTH
+ struct ptrauth_keys keys_user;
+#endif
};
static inline void arch_thread_struct_whitelist(unsigned long *offset,
@@ -270,6 +293,9 @@ extern void __init minsigstksz_setup(void);
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
#define SVE_GET_VL() sve_get_current_vl()
+/* PR_PAC_RESET_KEYS prctl */
+#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
+
/*
* For CONFIG_GCC_PLUGIN_STACKLEAK
*
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index f82b447bd34f..1895561839a9 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -17,15 +17,20 @@
#define __ASM_SMP_H
/* Values for secondary_data.status */
+#define CPU_STUCK_REASON_SHIFT (8)
+#define CPU_BOOT_STATUS_MASK ((1U << CPU_STUCK_REASON_SHIFT) - 1)
-#define CPU_MMU_OFF (-1)
-#define CPU_BOOT_SUCCESS (0)
+#define CPU_MMU_OFF (-1)
+#define CPU_BOOT_SUCCESS (0)
/* The cpu invoked ops->cpu_die, synchronise it with cpu_kill */
-#define CPU_KILL_ME (1)
+#define CPU_KILL_ME (1)
/* The cpu couldn't die gracefully and is looping in the kernel */
-#define CPU_STUCK_IN_KERNEL (2)
+#define CPU_STUCK_IN_KERNEL (2)
/* Fatal system error detected by secondary CPU, crash the system */
-#define CPU_PANIC_KERNEL (3)
+#define CPU_PANIC_KERNEL (3)
+
+#define CPU_STUCK_REASON_52_BIT_VA (1U << CPU_STUCK_REASON_SHIFT)
+#define CPU_STUCK_REASON_NO_GRAN (2U << CPU_STUCK_REASON_SHIFT)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
index 58d15be11c4d..5884a2b02827 100644
--- a/arch/arm64/include/asm/stackprotector.h
+++ b/arch/arm64/include/asm/stackprotector.h
@@ -34,7 +34,8 @@ static __always_inline void boot_init_stack_canary(void)
canary &= CANARY_MASK;
current->stack_canary = canary;
- __stack_chk_guard = current->stack_canary;
+ if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
+ __stack_chk_guard = current->stack_canary;
}
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index d352f6df8d2c..5412fa40825e 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -30,16 +30,14 @@
#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
/*
- * The hardware supports concatenation of up to 16 tables at stage2 entry level
- * and we use the feature whenever possible.
+ * The hardware supports concatenation of up to 16 tables at stage2 entry
+ * level and we use the feature whenever possible, which means we resolve 4
+ * additional bits of address at the entry level.
*
- * Now, the minimum number of bits resolved at any level is (PAGE_SHIFT - 3).
- * On arm64, the smallest PAGE_SIZE supported is 4k, which means
- * (PAGE_SHIFT - 3) > 4 holds for all page sizes.
- * This implies, the total number of page table levels at stage2 expected
- * by the hardware is actually the number of levels required for (IPA_SHIFT - 4)
- * in normal translations(e.g, stage1), since we cannot have another level in
- * the range (IPA_SHIFT, IPA_SHIFT - 4).
+ * This implies, the total number of page table levels required for
+ * IPA_SHIFT at stage2 expected by the hardware can be calculated using
+ * the same logic used for the (non-collapsable) stage1 page tables but for
+ * (IPA_SHIFT - 4).
*/
#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 842fb9572661..72dc4c011014 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,6 +20,7 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
+#include <linux/const.h>
#include <linux/stringify.h>
/*
@@ -104,6 +105,11 @@
#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
+#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
+
+#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
+
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
@@ -183,6 +189,19 @@
#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
+#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
+#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1)
+#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2)
+#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3)
+
+#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0)
+#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1)
+#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2)
+#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3)
+
+#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0)
+#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1)
+
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
@@ -431,27 +450,31 @@
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
/* Common SCTLR_ELx flags. */
-#define SCTLR_ELx_DSSBS (1UL << 44)
-#define SCTLR_ELx_EE (1 << 25)
-#define SCTLR_ELx_IESB (1 << 21)
-#define SCTLR_ELx_WXN (1 << 19)
-#define SCTLR_ELx_I (1 << 12)
-#define SCTLR_ELx_SA (1 << 3)
-#define SCTLR_ELx_C (1 << 2)
-#define SCTLR_ELx_A (1 << 1)
-#define SCTLR_ELx_M 1
+#define SCTLR_ELx_DSSBS (_BITUL(44))
+#define SCTLR_ELx_ENIA (_BITUL(31))
+#define SCTLR_ELx_ENIB (_BITUL(30))
+#define SCTLR_ELx_ENDA (_BITUL(27))
+#define SCTLR_ELx_EE (_BITUL(25))
+#define SCTLR_ELx_IESB (_BITUL(21))
+#define SCTLR_ELx_WXN (_BITUL(19))
+#define SCTLR_ELx_ENDB (_BITUL(13))
+#define SCTLR_ELx_I (_BITUL(12))
+#define SCTLR_ELx_SA (_BITUL(3))
+#define SCTLR_ELx_C (_BITUL(2))
+#define SCTLR_ELx_A (_BITUL(1))
+#define SCTLR_ELx_M (_BITUL(0))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
/* SCTLR_EL2 specific flags. */
-#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
- (1 << 18) | (1 << 22) | (1 << 23) | (1 << 28) | \
- (1 << 29))
-#define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
- (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
- (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
- (1 << 27) | (1 << 30) | (1 << 31) | \
+#define SCTLR_EL2_RES1 ((_BITUL(4)) | (_BITUL(5)) | (_BITUL(11)) | (_BITUL(16)) | \
+ (_BITUL(18)) | (_BITUL(22)) | (_BITUL(23)) | (_BITUL(28)) | \
+ (_BITUL(29)))
+#define SCTLR_EL2_RES0 ((_BITUL(6)) | (_BITUL(7)) | (_BITUL(8)) | (_BITUL(9)) | \
+ (_BITUL(10)) | (_BITUL(13)) | (_BITUL(14)) | (_BITUL(15)) | \
+ (_BITUL(17)) | (_BITUL(20)) | (_BITUL(24)) | (_BITUL(26)) | \
+ (_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \
(0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -473,23 +496,23 @@
#endif
/* SCTLR_EL1 specific flags. */
-#define SCTLR_EL1_UCI (1 << 26)
-#define SCTLR_EL1_E0E (1 << 24)
-#define SCTLR_EL1_SPAN (1 << 23)
-#define SCTLR_EL1_NTWE (1 << 18)
-#define SCTLR_EL1_NTWI (1 << 16)
-#define SCTLR_EL1_UCT (1 << 15)
-#define SCTLR_EL1_DZE (1 << 14)
-#define SCTLR_EL1_UMA (1 << 9)
-#define SCTLR_EL1_SED (1 << 8)
-#define SCTLR_EL1_ITD (1 << 7)
-#define SCTLR_EL1_CP15BEN (1 << 5)
-#define SCTLR_EL1_SA0 (1 << 4)
-
-#define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
- (1 << 29))
-#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
- (1 << 27) | (1 << 30) | (1 << 31) | \
+#define SCTLR_EL1_UCI (_BITUL(26))
+#define SCTLR_EL1_E0E (_BITUL(24))
+#define SCTLR_EL1_SPAN (_BITUL(23))
+#define SCTLR_EL1_NTWE (_BITUL(18))
+#define SCTLR_EL1_NTWI (_BITUL(16))
+#define SCTLR_EL1_UCT (_BITUL(15))
+#define SCTLR_EL1_DZE (_BITUL(14))
+#define SCTLR_EL1_UMA (_BITUL(9))
+#define SCTLR_EL1_SED (_BITUL(8))
+#define SCTLR_EL1_ITD (_BITUL(7))
+#define SCTLR_EL1_CP15BEN (_BITUL(5))
+#define SCTLR_EL1_SA0 (_BITUL(4))
+
+#define SCTLR_EL1_RES1 ((_BITUL(11)) | (_BITUL(20)) | (_BITUL(22)) | (_BITUL(28)) | \
+ (_BITUL(29)))
+#define SCTLR_EL1_RES0 ((_BITUL(6)) | (_BITUL(10)) | (_BITUL(13)) | (_BITUL(17)) | \
+ (_BITUL(27)) | (_BITUL(30)) | (_BITUL(31)) | \
(0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -528,11 +551,25 @@
#define ID_AA64ISAR0_AES_SHIFT 4
/* id_aa64isar1 */
+#define ID_AA64ISAR1_SB_SHIFT 36
+#define ID_AA64ISAR1_GPI_SHIFT 28
+#define ID_AA64ISAR1_GPA_SHIFT 24
#define ID_AA64ISAR1_LRCPC_SHIFT 20
#define ID_AA64ISAR1_FCMA_SHIFT 16
#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_API_SHIFT 8
+#define ID_AA64ISAR1_APA_SHIFT 4
#define ID_AA64ISAR1_DPB_SHIFT 0
+#define ID_AA64ISAR1_APA_NI 0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_API_NI 0x0
+#define ID_AA64ISAR1_API_IMP_DEF 0x1
+#define ID_AA64ISAR1_GPA_NI 0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_GPI_NI 0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+
/* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
@@ -676,13 +713,13 @@
#define ZCR_ELx_LEN_SIZE 9
#define ZCR_ELx_LEN_MASK 0x1ff
-#define CPACR_EL1_ZEN_EL1EN (1 << 16) /* enable EL1 access */
-#define CPACR_EL1_ZEN_EL0EN (1 << 17) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_ZEN_EL1EN (_BITUL(16)) /* enable EL1 access */
+#define CPACR_EL1_ZEN_EL0EN (_BITUL(17)) /* enable EL0 access, if EL1EN set */
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
-#define SYS_MPIDR_SAFE_VAL (1UL << 31)
+#define SYS_MPIDR_SAFE_VAL (_BITUL(31))
#ifdef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index cb2c10a8f0a8..bbca68b54732 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -42,7 +42,18 @@ struct thread_info {
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
- int preempt_count; /* 0 => preemptable, <0 => bug */
+ union {
+ u64 preempt_count; /* 0 => preemptible, <0 => bug */
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ u32 need_resched;
+ u32 count;
+#else
+ u32 count;
+ u32 need_resched;
+#endif
+ } preempt;
+ };
};
#define thread_saved_pc(tsk) \
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index c3c0387aee18..3a1870228946 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -21,6 +21,7 @@
#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
#include <linux/sched.h>
#include <asm/cputype.h>
#include <asm/mmu.h>
@@ -41,14 +42,14 @@
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op, \
ARM64_WORKAROUND_REPEAT_TLBI, \
- CONFIG_QCOM_FALKOR_ERRATUM_1009) \
+ CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : )
#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
ALTERNATIVE("nop\n nop", \
"dsb ish\n tlbi " #op ", %0", \
ARM64_WORKAROUND_REPEAT_TLBI, \
- CONFIG_QCOM_FALKOR_ERRATUM_1009) \
+ CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
: : "r" (arg))
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
@@ -164,14 +165,20 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
dsb(ish);
}
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long uaddr)
+static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
+ unsigned long uaddr)
{
unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
dsb(ishst);
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+ unsigned long uaddr)
+{
+ flush_tlb_page_nosync(vma, uaddr);
dsb(ish);
}
@@ -179,7 +186,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
-#define MAX_TLBI_OPS 1024UL
+#define MAX_TLBI_OPS PTRS_PER_PTE
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
@@ -188,7 +195,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
- if ((end - start) > (MAX_TLBI_OPS * stride)) {
+ if ((end - start) >= (MAX_TLBI_OPS * stride)) {
flush_tlb_mm(vma->vm_mm);
return;
}
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 07c34087bd5e..fad33f5fde47 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -45,8 +45,7 @@ static inline void set_fs(mm_segment_t fs)
* Prevent a mispredicted conditional call to set_fs from forwarding
* the wrong address limit to access_ok under speculation.
*/
- dsb(nsh);
- isb();
+ spec_bar();
/* On user-mode return, check fs is correct */
set_thread_flag(TIF_FSCHECK);
diff --git a/arch/arm64/include/asm/xor.h b/arch/arm64/include/asm/xor.h
new file mode 100644
index 000000000000..856386ad076c
--- /dev/null
+++ b/arch/arm64/include/asm/xor.h
@@ -0,0 +1,73 @@
+/*
+ * arch/arm64/include/asm/xor.h
+ *
+ * Authors: Jackie Liu <liuyun01@kylinos.cn>
+ * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/hardirq.h>
+#include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_inner_neon;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ kernel_neon_begin();
+ xor_block_inner_neon.do_2(bytes, p1, p2);
+ kernel_neon_end();
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ kernel_neon_begin();
+ xor_block_inner_neon.do_3(bytes, p1, p2, p3);
+ kernel_neon_end();
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ kernel_neon_begin();
+ xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
+ kernel_neon_end();
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ kernel_neon_begin();
+ xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
+ kernel_neon_end();
+}
+
+static struct xor_block_template xor_block_arm64 = {
+ .name = "arm64_neon",
+ .do_2 = xor_neon_2,
+ .do_3 = xor_neon_3,
+ .do_4 = xor_neon_4,
+ .do_5 = xor_neon_5
+};
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_32regs); \
+ if (cpu_has_neon()) { \
+ xor_speed(&xor_block_arm64);\
+ } \
+ } while (0)
+
+#endif /* ! CONFIG_KERNEL_MODE_NEON */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 2bcd6e4f3474..5f0750c2199c 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -49,5 +49,8 @@
#define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27)
#define HWCAP_SSBS (1 << 28)
+#define HWCAP_SB (1 << 29)
+#define HWCAP_PACA (1 << 30)
+#define HWCAP_PACG (1UL << 31)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index a36227fdb084..c2f249bcd829 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -229,6 +229,13 @@ struct user_sve_header {
SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
: SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
+/* pointer authentication masks (NT_ARM_PAC_MASK) */
+
+struct user_pac_mask {
+ __u64 data_mask;
+ __u64 insn_mask;
+};
+
#endif /* __ASSEMBLY__ */
#endif /* _UAPI__ASM_PTRACE_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 4c8b13bede80..df08d735b21d 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -30,7 +30,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
-arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
+arm64-obj-$(CONFIG_MODULES) += module.o
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
@@ -49,14 +49,16 @@ arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
-arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
+arm64-obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
cpu-reset.o
+arm64-obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
+arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
deleted file mode 100644
index 72f63a59b008..000000000000
--- a/arch/arm64/kernel/arm64ksyms.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Based on arch/arm/kernel/armksyms.c
- *
- * Copyright (C) 2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/cryptohash.h>
-#include <linux/delay.h>
-#include <linux/in6.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/arm-smccc.h>
-#include <linux/kprobes.h>
-
-#include <asm/checksum.h>
-
-EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(clear_page);
-
- /* user mem (segment) */
-EXPORT_SYMBOL(__arch_copy_from_user);
-EXPORT_SYMBOL(__arch_copy_to_user);
-EXPORT_SYMBOL(__arch_clear_user);
-EXPORT_SYMBOL(__arch_copy_in_user);
-
- /* physical memory */
-EXPORT_SYMBOL(memstart_addr);
-
- /* string / mem functions */
-#ifndef CONFIG_KASAN
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(memcmp);
-EXPORT_SYMBOL(memchr);
-#endif
-
-EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(__memset);
-EXPORT_SYMBOL(__memcpy);
-EXPORT_SYMBOL(__memmove);
-
- /* atomic bitops */
-EXPORT_SYMBOL(set_bit);
-EXPORT_SYMBOL(test_and_set_bit);
-EXPORT_SYMBOL(clear_bit);
-EXPORT_SYMBOL(test_and_clear_bit);
-EXPORT_SYMBOL(change_bit);
-EXPORT_SYMBOL(test_and_change_bit);
-
-#ifdef CONFIG_FUNCTION_TRACER
-EXPORT_SYMBOL(_mcount);
-NOKPROBE_SYMBOL(_mcount);
-#endif
-
- /* arm-smccc */
-EXPORT_SYMBOL(__arm_smccc_smc);
-EXPORT_SYMBOL(__arm_smccc_hvc);
-
- /* tishift.S */
-extern long long __ashlti3(long long a, int b);
-EXPORT_SYMBOL(__ashlti3);
-extern long long __ashrti3(long long a, int b);
-EXPORT_SYMBOL(__ashrti3);
-extern long long __lshrti3(long long a, int b);
-EXPORT_SYMBOL(__lshrti3);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 323aeb5f2fe6..65b8afc84466 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -46,6 +46,9 @@ int main(void)
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
#endif
DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
+#ifdef CONFIG_STACKPROTECTOR
+ DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 8021b46c9743..a2be30275a73 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -22,11 +22,11 @@
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
* cpu_soft_restart.
*
- * @el2_switch: Flag to indicate a swich to EL2 is needed.
+ * @el2_switch: Flag to indicate a switch to EL2 is needed.
* @entry: Location to jump to for soft reset.
- * arg0: First argument passed to @entry.
- * arg1: Second argument passed to @entry.
- * arg2: Third argument passed to @entry.
+ * arg0: First argument passed to @entry. (relocation list)
+ * arg1: Second argument passed to @entry.(physical kernel entry)
+ * arg2: Third argument passed to @entry. (physical dtb address)
*
* Put the CPU into the same state as it would be if it had been reset, and
* branch to what would be the reset vector. It must be executed with the
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a509e35132d2..09ac548c9d44 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -135,7 +135,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
- static DEFINE_SPINLOCK(bp_lock);
+ static DEFINE_RAW_SPINLOCK(bp_lock);
int cpu, slot = -1;
/*
@@ -147,7 +147,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
return;
}
- spin_lock(&bp_lock);
+ raw_spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
@@ -163,7 +163,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
- spin_unlock(&bp_lock);
+ raw_spin_unlock(&bp_lock);
}
#else
#define __smccc_workaround_1_smc_start NULL
@@ -507,38 +507,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
CAP_MIDR_RANGE_LIST(midr_list)
-/*
- * Generic helper for handling capabilties with multiple (match,enable) pairs
- * of call backs, sharing the same capability bit.
- * Iterate over each entry to see if at least one matches.
- */
-static bool __maybe_unused
-multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
-{
- const struct arm64_cpu_capabilities *caps;
-
- for (caps = entry->match_list; caps->matches; caps++)
- if (caps->matches(caps, scope))
- return true;
-
- return false;
-}
-
-/*
- * Take appropriate action for all matching entries in the shared capability
- * entry.
- */
-static void __maybe_unused
-multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
-{
- const struct arm64_cpu_capabilities *caps;
-
- for (caps = entry->match_list; caps->matches; caps++)
- if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
- caps->cpu_enable)
- caps->cpu_enable(caps);
-}
-
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/*
@@ -570,24 +538,77 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
#endif
-const struct arm64_cpu_capabilities arm64_errata[] = {
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
+
+static const struct midr_range arm64_repeat_tlbi_cpus[] = {
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+ MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1286807
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+#endif
+ {},
+};
+
+#endif
+
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+static const struct midr_range cavium_erratum_27456_cpus[] = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.1 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
+ /* Cavium ThunderX, T81 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_CAVIUM_ERRATUM_30115
+static const struct midr_range cavium_erratum_30115_cpus[] = {
+ /* Cavium ThunderX, T88 pass 1.x - 2.2 */
+ MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
+ /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
+ MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
+ /* Cavium ThunderX, T83 pass 1.0 */
+ MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
+ {},
+};
+#endif
+
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
+static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
+ {
+ ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+ },
+ {
+ .midr_range.model = MIDR_QCOM_KRYO,
+ .matches = is_kryo_midr,
+ },
+ {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+static const struct midr_range workaround_clean_cache[] = {
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
defined(CONFIG_ARM64_ERRATUM_827319) || \
defined(CONFIG_ARM64_ERRATUM_824069)
- {
- /* Cortex-A53 r0p[012] */
- .desc = "ARM errata 826319, 827319, 824069",
- .capability = ARM64_WORKAROUND_CLEAN_CACHE,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
- .cpu_enable = cpu_enable_cache_maint_trap,
- },
+ /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_819472
+ /* Cortex-A53 r0p[01] : ARM errata 819472 */
+ MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+#endif
+ {},
+};
#endif
-#ifdef CONFIG_ARM64_ERRATUM_819472
+
+const struct arm64_cpu_capabilities arm64_errata[] = {
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
- /* Cortex-A53 r0p[01] */
- .desc = "ARM errata 819472",
+ .desc = "ARM errata 826319, 827319, 824069, 819472",
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
- ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
+ ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
.cpu_enable = cpu_enable_cache_maint_trap,
},
#endif
@@ -638,40 +659,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_27456
{
- /* Cavium ThunderX, T88 pass 1.x - 2.1 */
- .desc = "Cavium erratum 27456",
- .capability = ARM64_WORKAROUND_CAVIUM_27456,
- ERRATA_MIDR_RANGE(MIDR_THUNDERX,
- 0, 0,
- 1, 1),
- },
- {
- /* Cavium ThunderX, T81 pass 1.0 */
.desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456,
- ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
},
#endif
#ifdef CONFIG_CAVIUM_ERRATUM_30115
{
- /* Cavium ThunderX, T88 pass 1.x - 2.2 */
.desc = "Cavium erratum 30115",
.capability = ARM64_WORKAROUND_CAVIUM_30115,
- ERRATA_MIDR_RANGE(MIDR_THUNDERX,
- 0, 0,
- 1, 2),
- },
- {
- /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
- .desc = "Cavium erratum 30115",
- .capability = ARM64_WORKAROUND_CAVIUM_30115,
- ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
- },
- {
- /* Cavium ThunderX, T83 pass 1.0 */
- .desc = "Cavium erratum 30115",
- .capability = ARM64_WORKAROUND_CAVIUM_30115,
- ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
+ ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
},
#endif
{
@@ -683,23 +680,17 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{
- .desc = "Qualcomm Technologies Falkor erratum 1003",
+ .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
- ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
- },
- {
- .desc = "Qualcomm Technologies Kryo erratum 1003",
- .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .midr_range.model = MIDR_QCOM_KRYO,
- .matches = is_kryo_midr,
+ .matches = cpucap_multi_entry_cap_matches,
+ .match_list = qcom_erratum_1003_list,
},
#endif
-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
{
- .desc = "Qualcomm Technologies Falkor erratum 1009",
+ .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
.capability = ARM64_WORKAROUND_REPEAT_TLBI,
- ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+ ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
@@ -740,6 +731,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1165522
+ {
+ /* Cortex-A76 r0p0 to r2p0 */
+ .desc = "ARM erratum 1165522",
+ .capability = ARM64_WORKAROUND_1165522,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index aec5ecb85737..4f272399de89 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -52,6 +52,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
EXPORT_SYMBOL(cpu_hwcaps);
+static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
/*
* Flag to indicate if we have computed the system wide
@@ -141,9 +142,18 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
};
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
+ FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -518,6 +528,29 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
}
extern const struct arm64_cpu_capabilities arm64_errata[];
+static const struct arm64_cpu_capabilities arm64_features[];
+
+static void __init
+init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
+{
+ for (; caps->matches; caps++) {
+ if (WARN(caps->capability >= ARM64_NCAPS,
+ "Invalid capability %d\n", caps->capability))
+ continue;
+ if (WARN(cpu_hwcaps_ptrs[caps->capability],
+ "Duplicate entry for capability %d\n",
+ caps->capability))
+ continue;
+ cpu_hwcaps_ptrs[caps->capability] = caps;
+ }
+}
+
+static void __init init_cpu_hwcaps_indirect_list(void)
+{
+ init_cpu_hwcaps_indirect_list_from_array(arm64_features);
+ init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
+}
+
static void __init setup_boot_cpu_capabilities(void);
void __init init_cpu_features(struct cpuinfo_arm64 *info)
@@ -564,6 +597,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
}
/*
+ * Initialize the indirect array of CPU hwcaps capabilities pointers
+ * before we handle the boot CPU below.
+ */
+ init_cpu_hwcaps_indirect_list();
+
+ /*
* Detect and enable early CPU capabilities based on the boot CPU,
* after we have initialised the CPU feature infrastructure.
*/
@@ -915,6 +954,12 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
{ /* sentinel */ }
};
char const *str = "command line option";
@@ -1145,6 +1190,14 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
}
#endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_PTR_AUTH
+static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
+{
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
+}
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1368,22 +1421,115 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_cnp,
},
#endif
+ {
+ .desc = "Speculation barrier (SB)",
+ .capability = ARM64_HAS_SB,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .field_pos = ID_AA64ISAR1_SB_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_PTR_AUTH
+ {
+ .desc = "Address authentication (architected algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_APA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .desc = "Address authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_API_SHIFT,
+ .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
+ .matches = has_cpuid_feature,
+ .cpu_enable = cpu_enable_address_auth,
+ },
+ {
+ .desc = "Generic authentication (architected algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPA_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
+ .matches = has_cpuid_feature,
+ },
+ {
+ .desc = "Generic authentication (IMP DEF algorithm)",
+ .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .sys_reg = SYS_ID_AA64ISAR1_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64ISAR1_GPI_SHIFT,
+ .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
+ .matches = has_cpuid_feature,
+ },
+#endif /* CONFIG_ARM64_PTR_AUTH */
{},
};
-#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
- { \
- .desc = #cap, \
- .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
- .matches = has_cpuid_feature, \
- .sys_reg = reg, \
- .field_pos = field, \
- .sign = s, \
- .min_field_value = min_value, \
- .hwcap_type = cap_type, \
- .hwcap = cap, \
+#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
+ .matches = has_cpuid_feature, \
+ .sys_reg = reg, \
+ .field_pos = field, \
+ .sign = s, \
+ .min_field_value = min_value,
+
+#define __HWCAP_CAP(name, cap_type, cap) \
+ .desc = name, \
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
+ .hwcap_type = cap_type, \
+ .hwcap = cap, \
+
+#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ HWCAP_CPUID_MATCH(reg, field, s, min_value) \
}
+#define HWCAP_MULTI_CAP(list, cap_type, cap) \
+ { \
+ __HWCAP_CAP(#cap, cap_type, cap) \
+ .matches = cpucap_multi_entry_cap_matches, \
+ .match_list = list, \
+ }
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
+ },
+ {},
+};
+
+static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
+ },
+ {
+ HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
+ FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
+ },
+ {},
+};
+#endif
+
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
@@ -1409,11 +1555,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
+ HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
#endif
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+#ifdef CONFIG_ARM64_PTR_AUTH
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA),
+ HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG),
+#endif
{},
};
@@ -1482,52 +1633,46 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
cap_set_elf_hwcap(hwcaps);
}
-/*
- * Check if the current CPU has a given feature capability.
- * Should be called from non-preemptible context.
- */
-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
- unsigned int cap)
+static void update_cpu_capabilities(u16 scope_mask)
{
+ int i;
const struct arm64_cpu_capabilities *caps;
- if (WARN_ON(preemptible()))
- return false;
-
- for (caps = cap_array; caps->matches; caps++)
- if (caps->capability == cap)
- return caps->matches(caps, SCOPE_LOCAL_CPU);
-
- return false;
-}
-
-static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask, const char *info)
-{
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- if (!(caps->type & scope_mask) ||
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask) ||
+ cpus_have_cap(caps->capability) ||
!caps->matches(caps, cpucap_default_scope(caps)))
continue;
- if (!cpus_have_cap(caps->capability) && caps->desc)
- pr_info("%s %s\n", info, caps->desc);
+ if (caps->desc)
+ pr_info("detected: %s\n", caps->desc);
cpus_set_cap(caps->capability);
}
}
-static void update_cpu_capabilities(u16 scope_mask)
+/*
+ * Enable all the available capabilities on this CPU. The capabilities
+ * with BOOT_CPU scope are handled separately and hence skipped here.
+ */
+static int cpu_enable_non_boot_scope_capabilities(void *__unused)
{
- __update_cpu_capabilities(arm64_errata, scope_mask,
- "enabling workaround for");
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
-}
+ int i;
+ u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
-static int __enable_cpu_capability(void *arg)
-{
- const struct arm64_cpu_capabilities *cap = arg;
+ for_each_available_cap(i) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
+
+ if (WARN_ON(!cap))
+ continue;
- cap->cpu_enable(cap);
+ if (!(cap->type & non_boot_scope))
+ continue;
+
+ if (cap->cpu_enable)
+ cap->cpu_enable(cap);
+ }
return 0;
}
@@ -1535,21 +1680,29 @@ static int __enable_cpu_capability(void *arg)
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void __init
-__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask)
+static void __init enable_cpu_capabilities(u16 scope_mask)
{
+ int i;
+ const struct arm64_cpu_capabilities *caps;
+ bool boot_scope;
+
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- unsigned int num = caps->capability;
+ boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
- if (!(caps->type & scope_mask) || !cpus_have_cap(num))
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ unsigned int num;
+
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
+ continue;
+ num = caps->capability;
+ if (!cpus_have_cap(num))
continue;
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
- if (caps->cpu_enable) {
+ if (boot_scope && caps->cpu_enable)
/*
* Capabilities with SCOPE_BOOT_CPU scope are finalised
* before any secondary CPU boots. Thus, each secondary
@@ -1558,25 +1711,19 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* the boot CPU, for which the capability must be
* enabled here. This approach avoids costly
* stop_machine() calls for this case.
- *
- * Otherwise, use stop_machine() as it schedules the
- * work allowing us to modify PSTATE, instead of
- * on_each_cpu() which uses an IPI, giving us a PSTATE
- * that disappears when we return.
*/
- if (scope_mask & SCOPE_BOOT_CPU)
- caps->cpu_enable(caps);
- else
- stop_machine(__enable_cpu_capability,
- (void *)caps, cpu_online_mask);
- }
+ caps->cpu_enable(caps);
}
-}
-static void __init enable_cpu_capabilities(u16 scope_mask)
-{
- __enable_cpu_capabilities(arm64_errata, scope_mask);
- __enable_cpu_capabilities(arm64_features, scope_mask);
+ /*
+ * For all non-boot scope capabilities, use stop_machine()
+ * as it schedules the work allowing us to modify PSTATE,
+ * instead of on_each_cpu() which uses an IPI, giving us a
+ * PSTATE that disappears when we return.
+ */
+ if (!boot_scope)
+ stop_machine(cpu_enable_non_boot_scope_capabilities,
+ NULL, cpu_online_mask);
}
/*
@@ -1586,16 +1733,17 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
*
* Returns "false" on conflicts.
*/
-static bool
-__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
- u16 scope_mask)
+static bool verify_local_cpu_caps(u16 scope_mask)
{
+ int i;
bool cpu_has_cap, system_has_cap;
+ const struct arm64_cpu_capabilities *caps;
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
- for (; caps->matches; caps++) {
- if (!(caps->type & scope_mask))
+ for (i = 0; i < ARM64_NCAPS; i++) {
+ caps = cpu_hwcaps_ptrs[i];
+ if (!caps || !(caps->type & scope_mask))
continue;
cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
@@ -1626,7 +1774,7 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
}
}
- if (caps->matches) {
+ if (i < ARM64_NCAPS) {
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
smp_processor_id(), caps->capability,
caps->desc, system_has_cap, cpu_has_cap);
@@ -1636,12 +1784,6 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
return true;
}
-static bool verify_local_cpu_caps(u16 scope_mask)
-{
- return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
- __verify_local_cpu_caps(arm64_features, scope_mask);
-}
-
/*
* Check for CPU features that are used in early boot
* based on the Boot CPU value.
@@ -1750,12 +1892,16 @@ static void __init mark_const_caps_ready(void)
static_branch_enable(&arm64_const_caps_ready);
}
-extern const struct arm64_cpu_capabilities arm64_errata[];
-
-bool this_cpu_has_cap(unsigned int cap)
+bool this_cpu_has_cap(unsigned int n)
{
- return (__this_cpu_has_cap(arm64_features, cap) ||
- __this_cpu_has_cap(arm64_errata, cap));
+ if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
+ const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
+
+ if (cap)
+ return cap->matches(cap, SCOPE_LOCAL_CPU);
+ }
+
+ return false;
}
static void __init setup_system_capabilities(void)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index bcc2831399cb..ca0685f33900 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -82,6 +82,9 @@ static const char *const hwcap_str[] = {
"ilrcpc",
"flagm",
"ssbs",
+ "sb",
+ "paca",
+ "pacg",
NULL
};
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 1175f5827ae1..81b8eb5c4633 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -79,7 +79,6 @@
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
- mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
@@ -121,6 +120,8 @@ skip_ftrace_call: // }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit
ENDPROC(_mcount)
+EXPORT_SYMBOL(_mcount)
+NOKPROBE(_mcount)
#else /* CONFIG_DYNAMIC_FTRACE */
/*
@@ -132,6 +133,8 @@ ENDPROC(_mcount)
ENTRY(_mcount)
ret
ENDPROC(_mcount)
+EXPORT_SYMBOL(_mcount)
+NOKPROBE(_mcount)
/*
* void ftrace_caller(unsigned long return_address)
@@ -148,14 +151,12 @@ ENTRY(ftrace_caller)
mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr
- .global ftrace_call
-ftrace_call: // tracer(pc, lr);
+GLOBAL(ftrace_call) // tracer(pc, lr);
nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .global ftrace_graph_call
-ftrace_graph_call: // ftrace_graph_caller();
+GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
nop // If enabled, this will be replaced
// "b ftrace_graph_caller"
#endif
@@ -169,24 +170,6 @@ ENTRY(ftrace_stub)
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- /* save return value regs*/
- .macro save_return_regs
- sub sp, sp, #64
- stp x0, x1, [sp]
- stp x2, x3, [sp, #16]
- stp x4, x5, [sp, #32]
- stp x6, x7, [sp, #48]
- .endm
-
- /* restore return value regs*/
- .macro restore_return_regs
- ldp x0, x1, [sp]
- ldp x2, x3, [sp, #16]
- ldp x4, x5, [sp, #32]
- ldp x6, x7, [sp, #48]
- add sp, sp, #64
- .endm
-
/*
* void ftrace_graph_caller(void)
*
@@ -197,10 +180,10 @@ ENDPROC(ftrace_stub)
* and run return_to_handler() later on its exit.
*/
ENTRY(ftrace_graph_caller)
- mcount_get_lr_addr x0 // pointer to function's saved lr
- mcount_get_pc x1 // function's pc
+ mcount_get_pc x0 // function's pc
+ mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp
- bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
+ bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit
ENDPROC(ftrace_graph_caller)
@@ -209,15 +192,27 @@ ENDPROC(ftrace_graph_caller)
* void return_to_handler(void)
*
* Run ftrace_return_to_handler() before going back to parent.
- * @fp is checked against the value passed by ftrace_graph_caller()
- * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
+ * @fp is checked against the value passed by ftrace_graph_caller().
*/
ENTRY(return_to_handler)
- save_return_regs
+ /* save return value regs */
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
- restore_return_regs
+
+ /* restore return value regs */
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 039144ecbcb2..763f03dc4d9e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -344,10 +344,6 @@ alternative_else_nop_endif
ldp x28, x29, [sp, #16 * 14]
ldr lr, [sp, #S_LR]
add sp, sp, #S_FRAME_SIZE // restore sp
- /*
- * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
- * when returning from IPI handler, and when returning to user-space.
- */
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
@@ -363,6 +359,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
.else
eret
.endif
+ sb
.endm
.macro irq_stack_entry
@@ -622,10 +619,8 @@ el1_irq:
irq_handler
#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
- cbnz w24, 1f // preempt count != 0
- ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+ ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+ cbnz x24, 1f // preempt count != 0
bl el1_preempt
1:
#endif
@@ -1006,6 +1001,7 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
mrs x30, far_el1
.endif
eret
+ sb
.endm
.align 11
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 50986e388d2b..c1f30f854fb3 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -104,7 +104,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only.
*/
- trampoline = get_plt_entry(addr);
+ trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
&trampoline)) {
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
@@ -211,13 +211,11 @@ int __init ftrace_dyn_arch_init(void)
*
* Note that @frame_pointer is used only for sanity check later.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
unsigned long old;
- struct ftrace_graph_ent trace;
- int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
@@ -229,18 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
*/
old = *parent;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
- return;
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, NULL);
- if (err == -EBUSY)
- return;
- else
+ if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
*parent = return_hooker;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 4471f570a295..c7213674cb24 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -31,6 +31,7 @@
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/elf.h>
+#include <asm/image.h>
#include <asm/kernel-pgtable.h>
#include <asm/kvm_arm.h>
#include <asm/memory.h>
@@ -91,7 +92,7 @@ _head:
.quad 0 // reserved
.quad 0 // reserved
.quad 0 // reserved
- .ascii "ARM\x64" // Magic number
+ .ascii ARM64_IMAGE_MAGIC // Magic number
#ifdef CONFIG_EFI
.long pe_header - _head // Offset to the PE header.
@@ -318,6 +319,19 @@ __create_page_tables:
adrp x0, idmap_pg_dir
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ mrs_s x6, SYS_ID_AA64MMFR2_EL1
+ and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ mov x5, #52
+ cbnz x6, 1f
+#endif
+ mov x5, #VA_BITS
+1:
+ adr_l x6, vabits_user
+ str x5, [x6]
+ dmb sy
+ dc ivac, x6 // Invalidate potentially stale cache line
+
/*
* VA_BITS may be too small to allow for an ID mapping to be created
* that covers system RAM if that is located sufficiently high in the
@@ -496,10 +510,9 @@ ENTRY(el2_setup)
#endif
/* Hyp configuration. */
- mov x0, #HCR_RW // 64-bit EL1
+ mov_q x0, HCR_HOST_NVHE_FLAGS
cbz x2, set_hcr
- orr x0, x0, #HCR_TGE // Enable Host Extensions
- orr x0, x0, #HCR_E2H
+ mov_q x0, HCR_HOST_VHE_FLAGS
set_hcr:
msr hcr_el2, x0
isb
@@ -707,6 +720,7 @@ secondary_startup:
/*
* Common entry point for secondary CPUs.
*/
+ bl __cpu_secondary_check52bitva
bl __cpu_setup // initialise processor
adrp x1, swapper_pg_dir
bl __enable_mmu
@@ -769,6 +783,7 @@ ENTRY(__enable_mmu)
phys_to_ttbr x1, x1
phys_to_ttbr x2, x2
msr ttbr0_el1, x2 // load TTBR0
+ offset_ttbr1 x1
msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
@@ -784,9 +799,30 @@ ENTRY(__enable_mmu)
ret
ENDPROC(__enable_mmu)
+ENTRY(__cpu_secondary_check52bitva)
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ ldr_l x0, vabits_user
+ cmp x0, #52
+ b.ne 2f
+
+ mrs_s x0, SYS_ID_AA64MMFR2_EL1
+ and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
+ cbnz x0, 2f
+
+ update_early_cpu_boot_status \
+ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
+1: wfe
+ wfi
+ b 1b
+
+#endif
+2: ret
+ENDPROC(__cpu_secondary_check52bitva)
+
__no_granule_support:
/* Indicate that this CPU can't boot and is stuck in the kernel */
- update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
+ update_early_cpu_boot_status \
+ CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
1:
wfe
wfi
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index dd14ab8c9f72..fe36d85c60bd 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -40,6 +40,7 @@
tlbi vmalle1
dsb nsh
phys_to_ttbr \tmp, \page_table
+ offset_ttbr1 \tmp
msr ttbr1_el1, \tmp
isb
.endm
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 6b2686d54411..29cdc99688f3 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
}
memcpy((void *)dst, src_start, length);
- flush_icache_range(dst, dst + length);
+ __flush_icache_range(dst, dst + length);
pgdp = pgd_offset_raw(allocator(mask), dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) {
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index a820ed07fb80..33f14e484040 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -15,13 +15,15 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __ASM_IMAGE_H
-#define __ASM_IMAGE_H
+#ifndef __ARM64_KERNEL_IMAGE_H
+#define __ARM64_KERNEL_IMAGE_H
#ifndef LINKER_SCRIPT
#error This file should only be included in vmlinux.lds.S
#endif
+#include <asm/image.h>
+
/*
* There aren't any ELF relocations we can use to endian-swap values known only
* at link time (e.g. the subtraction of two symbol addresses), so we must get
@@ -47,19 +49,22 @@
sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
sym##_hi32 = DATA_LE32((data) >> 32)
+#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
+ ARM64_IMAGE_FLAG_##field##_SHIFT)
+
#ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE
#else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE
#endif
#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
#define __HEAD_FLAG_PHYS_BASE 1
-#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
- (__HEAD_FLAG_PAGE_SIZE << 1) | \
- (__HEAD_FLAG_PHYS_BASE << 3))
+#define __HEAD_FLAGS (__HEAD_FLAG(BE) | \
+ __HEAD_FLAG(PAGE_SIZE) | \
+ __HEAD_FLAG(PHYS_BASE))
/*
* These will output as part of the Image header, which should be little-endian
@@ -76,16 +81,6 @@
__efistub_stext_offset = stext - _text;
/*
- * Prevent the symbol aliases below from being emitted into the kallsyms
- * table, by forcing them to be absolute symbols (which are conveniently
- * ignored by scripts/kallsyms) rather than section relative symbols.
- * The distinction is only relevant for partial linking, and only for symbols
- * that are defined within a section declaration (which is not the case for
- * the definitions below) so the resulting values will be identical.
- */
-#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
-
-/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
* isolate it from the kernel proper. The following symbols are legally
* accessed by the stub, so provide some aliases to make them accessible.
@@ -94,29 +89,29 @@ __efistub_stext_offset = stext - _text;
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
-__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
-__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub_memset = KALLSYMS_HIDE(__pi_memset);
-__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
-__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
-__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
-__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
-__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr);
-__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
+__efistub_memcmp = __pi_memcmp;
+__efistub_memchr = __pi_memchr;
+__efistub_memcpy = __pi_memcpy;
+__efistub_memmove = __pi_memmove;
+__efistub_memset = __pi_memset;
+__efistub_strlen = __pi_strlen;
+__efistub_strnlen = __pi_strnlen;
+__efistub_strcmp = __pi_strcmp;
+__efistub_strncmp = __pi_strncmp;
+__efistub_strrchr = __pi_strrchr;
+__efistub___flush_dcache_area = __pi___flush_dcache_area;
#ifdef CONFIG_KASAN
-__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
-__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
-__efistub___memset = KALLSYMS_HIDE(__pi_memset);
+__efistub___memcpy = __pi_memcpy;
+__efistub___memmove = __pi_memmove;
+__efistub___memset = __pi_memset;
#endif
-__efistub__text = KALLSYMS_HIDE(_text);
-__efistub__end = KALLSYMS_HIDE(_end);
-__efistub__edata = KALLSYMS_HIDE(_edata);
-__efistub_screen_info = KALLSYMS_HIDE(screen_info);
+__efistub__text = _text;
+__efistub__end = _end;
+__efistub__edata = _edata;
+__efistub_screen_info = screen_info;
#endif
-#endif /* __ASM_IMAGE_H */
+#endif /* __ARM64_KERNEL_IMAGE_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 2b3413549734..7820a4a688fa 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -1239,6 +1239,35 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
}
+u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_register reg,
+ enum aarch64_insn_adr_type type)
+{
+ u32 insn;
+ s32 offset;
+
+ switch (type) {
+ case AARCH64_INSN_ADR_TYPE_ADR:
+ insn = aarch64_insn_get_adr_value();
+ offset = addr - pc;
+ break;
+ case AARCH64_INSN_ADR_TYPE_ADRP:
+ insn = aarch64_insn_get_adrp_value();
+ offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
+ break;
+ default:
+ pr_err("%s: unknown adr encoding %d\n", __func__, type);
+ return AARCH64_BREAK_FAULT;
+ }
+
+ if (offset < -SZ_1M || offset >= SZ_1M)
+ return AARCH64_BREAK_FAULT;
+
+ insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
+}
+
/*
* Decode the imm field of a branch, and return the byte offset as a
* signed value (so it can be used when computing a new branch
diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c
new file mode 100644
index 000000000000..07bf740bea91
--- /dev/null
+++ b/arch/arm64/kernel/kexec_image.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kexec image loader
+
+ * Copyright (C) 2018 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ */
+
+#define pr_fmt(fmt) "kexec_file(Image): " fmt
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/pe.h>
+#include <linux/string.h>
+#include <linux/verification.h>
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/image.h>
+#include <asm/memory.h>
+
+static int image_probe(const char *kernel_buf, unsigned long kernel_len)
+{
+ const struct arm64_image_header *h =
+ (const struct arm64_image_header *)(kernel_buf);
+
+ if (!h || (kernel_len < sizeof(*h)))
+ return -EINVAL;
+
+ if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *image_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ struct arm64_image_header *h;
+ u64 flags, value;
+ bool be_image, be_kernel;
+ struct kexec_buf kbuf;
+ unsigned long text_offset;
+ struct kexec_segment *kernel_segment;
+ int ret;
+
+ /* We don't support crash kernels yet. */
+ if (image->type == KEXEC_TYPE_CRASH)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /*
+ * We require a kernel with an unambiguous Image header. Per
+ * Documentation/booting.txt, this is the case when image_size
+ * is non-zero (practically speaking, since v3.17).
+ */
+ h = (struct arm64_image_header *)kernel;
+ if (!h->image_size)
+ return ERR_PTR(-EINVAL);
+
+ /* Check cpu features */
+ flags = le64_to_cpu(h->flags);
+ be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE);
+ be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ if ((be_image != be_kernel) && !system_supports_mixed_endian())
+ return ERR_PTR(-EINVAL);
+
+ value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE);
+ if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) &&
+ !system_supports_4kb_granule()) ||
+ ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) &&
+ !system_supports_64kb_granule()) ||
+ ((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) &&
+ !system_supports_16kb_granule()))
+ return ERR_PTR(-EINVAL);
+
+ /* Load the kernel */
+ kbuf.image = image;
+ kbuf.buf_min = 0;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kbuf.buffer = kernel;
+ kbuf.bufsz = kernel_len;
+ kbuf.mem = 0;
+ kbuf.memsz = le64_to_cpu(h->image_size);
+ text_offset = le64_to_cpu(h->text_offset);
+ kbuf.buf_align = MIN_KIMG_ALIGN;
+
+ /* Adjust kernel segment with TEXT_OFFSET */
+ kbuf.memsz += text_offset;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ kernel_segment = &image->segment[image->nr_segments - 1];
+ kernel_segment->mem += text_offset;
+ kernel_segment->memsz -= text_offset;
+ image->start = kernel_segment->mem;
+
+ pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz,
+ kernel_segment->memsz);
+
+ /* Load additional data */
+ ret = load_other_segments(image,
+ kernel_segment->mem, kernel_segment->memsz,
+ initrd, initrd_len, cmdline);
+
+ return ERR_PTR(ret);
+}
+
+#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+static int image_verify_sig(const char *kernel, unsigned long kernel_len)
+{
+ return verify_pefile_signature(kernel, kernel_len, NULL,
+ VERIFYING_KEXEC_PE_SIGNATURE);
+}
+#endif
+
+const struct kexec_file_ops kexec_image_ops = {
+ .probe = image_probe,
+ .load = image_load,
+#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
+ .verify_sig = image_verify_sig,
+#endif
+};
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 922add8adb74..aa9c94113700 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -212,9 +212,17 @@ void machine_kexec(struct kimage *kimage)
* uses physical addressing to relocate the new image to its final
* position and transfers control to the image entry point when the
* relocation is complete.
+ * In kexec case, kimage->start points to purgatory assuming that
+ * kernel entry and dtb address are embedded in purgatory by
+ * userspace (kexec-tools).
+ * In kexec_file case, the kernel starts directly without purgatory.
*/
-
- cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0);
+ cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
+#ifdef CONFIG_KEXEC_FILE
+ kimage->arch.dtb_mem);
+#else
+ 0);
+#endif
BUG(); /* Should never get here. */
}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
new file mode 100644
index 000000000000..10e33860e47a
--- /dev/null
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kexec_file for arm64
+ *
+ * Copyright (C) 2018 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ *
+ * Most code is derived from arm64 port of kexec-tools
+ */
+
+#define pr_fmt(fmt) "kexec_file: " fmt
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/libfdt.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/byteorder.h>
+
+/* relevant device tree properties */
+#define FDT_PROP_INITRD_START "linux,initrd-start"
+#define FDT_PROP_INITRD_END "linux,initrd-end"
+#define FDT_PROP_BOOTARGS "bootargs"
+#define FDT_PROP_KASLR_SEED "kaslr-seed"
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &kexec_image_ops,
+ NULL
+};
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->arch.dtb);
+ image->arch.dtb = NULL;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
+
+static int setup_dtb(struct kimage *image,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ char *cmdline, void *dtb)
+{
+ int off, ret;
+
+ ret = fdt_path_offset(dtb, "/chosen");
+ if (ret < 0)
+ goto out;
+
+ off = ret;
+
+ /* add bootargs */
+ if (cmdline) {
+ ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
+ if (ret)
+ goto out;
+ } else {
+ ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+ }
+
+ /* add initrd-* */
+ if (initrd_load_addr) {
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START,
+ initrd_load_addr);
+ if (ret)
+ goto out;
+
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END,
+ initrd_load_addr + initrd_len);
+ if (ret)
+ goto out;
+ } else {
+ ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+
+ ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+ }
+
+ /* add kaslr-seed */
+ ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
+ if (ret && (ret != -FDT_ERR_NOTFOUND))
+ goto out;
+
+ if (rng_is_initialized()) {
+ u64 seed = get_random_u64();
+ ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed);
+ if (ret)
+ goto out;
+ } else {
+ pr_notice("RNG is not initialised: omitting \"%s\" property\n",
+ FDT_PROP_KASLR_SEED);
+ }
+
+out:
+ if (ret)
+ return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
+
+ return 0;
+}
+
+/*
+ * More space needed so that we can add initrd, bootargs and kaslr-seed.
+ */
+#define DTB_EXTRA_SPACE 0x1000
+
+static int create_dtb(struct kimage *image,
+ unsigned long initrd_load_addr, unsigned long initrd_len,
+ char *cmdline, void **dtb)
+{
+ void *buf;
+ size_t buf_size;
+ int ret;
+
+ buf_size = fdt_totalsize(initial_boot_params)
+ + strlen(cmdline) + DTB_EXTRA_SPACE;
+
+ for (;;) {
+ buf = vmalloc(buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ /* duplicate a device tree blob */
+ ret = fdt_open_into(initial_boot_params, buf, buf_size);
+ if (ret)
+ return -EINVAL;
+
+ ret = setup_dtb(image, initrd_load_addr, initrd_len,
+ cmdline, buf);
+ if (ret) {
+ vfree(buf);
+ if (ret == -ENOMEM) {
+ /* unlikely, but just in case */
+ buf_size += DTB_EXTRA_SPACE;
+ continue;
+ } else {
+ return ret;
+ }
+ }
+
+ /* trim it */
+ fdt_pack(buf);
+ *dtb = buf;
+
+ return 0;
+ }
+}
+
+int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr,
+ unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline)
+{
+ struct kexec_buf kbuf;
+ void *dtb = NULL;
+ unsigned long initrd_load_addr = 0, dtb_len;
+ int ret = 0;
+
+ kbuf.image = image;
+ /* not allocate anything below the kernel */
+ kbuf.buf_min = kernel_load_addr + kernel_size;
+
+ /* load initrd */
+ if (initrd) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = initrd_len;
+ kbuf.mem = 0;
+ kbuf.memsz = initrd_len;
+ kbuf.buf_align = 0;
+ /* within 1GB-aligned window of up to 32GB in size */
+ kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
+ + (unsigned long)SZ_1G * 32;
+ kbuf.top_down = false;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out_err;
+ initrd_load_addr = kbuf.mem;
+
+ pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, initrd_len, initrd_len);
+ }
+
+ /* load dtb */
+ ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb);
+ if (ret) {
+ pr_err("Preparing for new dtb failed\n");
+ goto out_err;
+ }
+
+ dtb_len = fdt_totalsize(dtb);
+ kbuf.buffer = dtb;
+ kbuf.bufsz = dtb_len;
+ kbuf.mem = 0;
+ kbuf.memsz = dtb_len;
+ /* not across 2MB boundary */
+ kbuf.buf_align = SZ_2M;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret)
+ goto out_err;
+ image->arch.dtb = dtb;
+ image->arch.dtb_mem = kbuf.mem;
+
+ pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kbuf.mem, dtb_len, dtb_len);
+
+ return 0;
+
+out_err:
+ vfree(dtb);
+ return ret;
+}
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index f0690c2ca3e0..255941394941 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -11,31 +11,91 @@
#include <linux/module.h>
#include <linux/sort.h>
+static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
+ enum aarch64_insn_register reg)
+{
+ u32 adrp, add;
+
+ adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
+ add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_ADSB_ADD);
+
+ return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
+}
+
+struct plt_entry get_plt_entry(u64 dst, void *pc)
+{
+ struct plt_entry plt;
+ static u32 br;
+
+ if (!br)
+ br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
+ AARCH64_INSN_BRANCH_NOLINK);
+
+ plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
+ plt.br = cpu_to_le32(br);
+
+ return plt;
+}
+
+bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
+{
+ u64 p, q;
+
+ /*
+ * Check whether both entries refer to the same target:
+ * do the cheapest checks first.
+ * If the 'add' or 'br' opcodes are different, then the target
+ * cannot be the same.
+ */
+ if (a->add != b->add || a->br != b->br)
+ return false;
+
+ p = ALIGN_DOWN((u64)a, SZ_4K);
+ q = ALIGN_DOWN((u64)b, SZ_4K);
+
+ /*
+ * If the 'adrp' opcodes are the same then we just need to check
+ * that they refer to the same 4k region.
+ */
+ if (a->adrp == b->adrp && p == q)
+ return true;
+
+ return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
+ (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
+}
+
static bool in_init(const struct module *mod, void *loc)
{
return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
}
-u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
+u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
- struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
+ struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries;
+ int j = i - 1;
u64 val = sym->st_value + rela->r_addend;
- plt[i] = get_plt_entry(val);
+ if (is_forbidden_offset_for_adrp(&plt[i].adrp))
+ i++;
+
+ plt[i] = get_plt_entry(val, &plt[i]);
/*
* Check if the entry we just created is a duplicate. Given that the
* relocations are sorted, this will be the last entry we allocated.
* (if one exists).
*/
- if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
- return (u64)&plt[i - 1];
+ if (j >= 0 && plt_entries_equal(plt + i, plt + j))
+ return (u64)&plt[j];
- pltsec->plt_num_entries++;
+ pltsec->plt_num_entries += i - j;
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
@@ -43,41 +103,31 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
}
#ifdef CONFIG_ARM64_ERRATUM_843419
-u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
+u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ void *loc, u64 val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;
- struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
+ struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
int i = pltsec->plt_num_entries++;
- u32 mov0, mov1, mov2, br;
+ u32 br;
int rd;
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
return 0;
+ if (is_forbidden_offset_for_adrp(&plt[i].adrp))
+ i = pltsec->plt_num_entries++;
+
/* get the destination register of the ADRP instruction */
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
le32_to_cpup((__le32 *)loc));
- /* generate the veneer instructions */
- mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_MOVEWIDE_INVERSE);
- mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_MOVEWIDE_KEEP);
- mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_MOVEWIDE_KEEP);
br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
AARCH64_INSN_BRANCH_NOLINK);
- plt[i] = (struct plt_entry){
- cpu_to_le32(mov0),
- cpu_to_le32(mov1),
- cpu_to_le32(mov2),
- cpu_to_le32(br)
- };
+ plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
+ plt[i].br = cpu_to_le32(br);
return (u64)&plt[i];
}
@@ -193,6 +243,15 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
break;
}
}
+
+ if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_843419))
+ /*
+ * Add some slack so we can skip PLT slots that may trigger
+ * the erratum due to the placement of the ADRP instruction.
+ */
+ ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
+
return ret;
}
@@ -202,7 +261,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
unsigned long core_plts = 0;
unsigned long init_plts = 0;
Elf64_Sym *syms = NULL;
- Elf_Shdr *tramp = NULL;
+ Elf_Shdr *pltsec, *tramp = NULL;
int i;
/*
@@ -211,9 +270,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
*/
for (i = 0; i < ehdr->e_shnum; i++) {
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
- mod->arch.core.plt = sechdrs + i;
+ mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
- mod->arch.init.plt = sechdrs + i;
+ mod->arch.init.plt_shndx = i;
else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
@@ -222,7 +281,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
syms = (Elf64_Sym *)sechdrs[i].sh_addr;
}
- if (!mod->arch.core.plt || !mod->arch.init.plt) {
+ if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
pr_err("%s: module PLT section(s) missing\n", mod->name);
return -ENOEXEC;
}
@@ -254,17 +313,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
sechdrs[i].sh_info, dstsec);
}
- mod->arch.core.plt->sh_type = SHT_NOBITS;
- mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
+ pltsec = sechdrs + mod->arch.core.plt_shndx;
+ pltsec->sh_type = SHT_NOBITS;
+ pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ pltsec->sh_addralign = L1_CACHE_BYTES;
+ pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
mod->arch.core.plt_num_entries = 0;
mod->arch.core.plt_max_entries = core_plts;
- mod->arch.init.plt->sh_type = SHT_NOBITS;
- mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
- mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
- mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
+ pltsec = sechdrs + mod->arch.init.plt_shndx;
+ pltsec->sh_type = SHT_NOBITS;
+ pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ pltsec->sh_addralign = L1_CACHE_BYTES;
+ pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
mod->arch.init.plt_num_entries = 0;
mod->arch.init.plt_max_entries = init_plts;
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f0f27aeefb73..f713e2fc4d75 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -198,13 +198,12 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
return 0;
}
-static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
+static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
+ __le32 *place, u64 val)
{
u32 insn;
- if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
- !cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
- ((u64)place & 0xfff) < 0xff8)
+ if (!is_forbidden_offset_for_adrp(place))
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
AARCH64_INSN_IMM_ADR);
@@ -215,7 +214,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
insn &= ~BIT(31);
} else {
/* out of range for ADR -> emit a veneer */
- val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
+ val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
if (!val)
return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val,
@@ -368,7 +367,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
- ovf = reloc_insn_adrp(me, loc, val);
+ ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
return ovf;
break;
@@ -413,7 +412,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
ovf == -ERANGE) {
- val = module_emit_plt_entry(me, loc, &rel[i], sym);
+ val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
if (!val)
return -ENOEXEC;
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index bcafd7dcfe8b..94754f07f67a 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -18,6 +18,7 @@
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
struct frame_tail {
@@ -35,6 +36,7 @@ user_backtrace(struct frame_tail __user *tail,
{
struct frame_tail buftail;
unsigned long err;
+ unsigned long lr;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
@@ -47,7 +49,9 @@ user_backtrace(struct frame_tail __user *tail,
if (err)
return NULL;
- perf_callchain_store(entry, buftail.lr);
+ lr = ptrauth_strip_insn_pac(buftail.lr);
+
+ perf_callchain_store(entry, lr);
/*
* Frame pointers should strictly progress back up the stack
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index e213f8e867f6..1620a371b1f5 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1,5 +1,5 @@
/*
- * PMU support
+ * ARMv8 PMUv3 Performance Events handling code.
*
* Copyright (C) 2012 ARM Limited
* Author: Will Deacon <will.deacon@arm.com>
@@ -30,149 +30,6 @@
#include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h>
-/*
- * ARMv8 PMUv3 Performance Events handling code.
- * Common event types (some are defined in asm/perf_event.h).
- */
-
-/* At least one of the following is required. */
-#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
-#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
-
-/* Common architectural events. */
-#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
-#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
-#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
-#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
-#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
-#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
-#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
-#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
-#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
-#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
-#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
-#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
-
-/* Common microarchitectural events. */
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
-#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
-#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
-#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
-#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
-#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
-#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
-#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
-#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
-#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
-#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
-#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
-#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
-#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
-#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
-#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
-#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
-
-/* ARMv8 recommended implementation defined event types */
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
-#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
-
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
-#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
-#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
-
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
-#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
-
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
-#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
-
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
-#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
-#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
-
-#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
-#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
-#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
-#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
-#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
-#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
-#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
-#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
-#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
-#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
-#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
-#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
-#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
-#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
-#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
-
-#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
-#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
-#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
-#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
-#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
-#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
-#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
-#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
-
-#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
-#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
-#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
-#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
-
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
-#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
-
/* ARMv8 Cortex-A53 specific event types. */
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
@@ -183,12 +40,10 @@
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
-/* PMUv3 HW events mapping. */
-
/*
* ARMv8 Architectural defined events, not all of these may
- * be supported on any given implementation. Undefined events will
- * be disabled at run-time.
+ * be supported on any given implementation. Unsupported events will
+ * be disabled at run-time based on the PMCEID registers.
*/
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
@@ -210,8 +65,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
- [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
- [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
@@ -224,8 +77,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
- [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
};
static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -370,6 +221,18 @@ ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
+ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
+ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
+ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
+ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
+ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
+ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
+ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
+ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
+ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
+ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
+ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
+ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
static struct attribute *armv8_pmuv3_event_attrs[] = {
&armv8_event_attr_sw_incr.attr.attr,
@@ -420,6 +283,18 @@ static struct attribute *armv8_pmuv3_event_attrs[] = {
&armv8_event_attr_l2i_tlb_refill.attr.attr,
&armv8_event_attr_l2d_tlb.attr.attr,
&armv8_event_attr_l2i_tlb.attr.attr,
+ &armv8_event_attr_remote_access.attr.attr,
+ &armv8_event_attr_ll_cache.attr.attr,
+ &armv8_event_attr_ll_cache_miss.attr.attr,
+ &armv8_event_attr_dtlb_walk.attr.attr,
+ &armv8_event_attr_itlb_walk.attr.attr,
+ &armv8_event_attr_ll_cache_rd.attr.attr,
+ &armv8_event_attr_ll_cache_miss_rd.attr.attr,
+ &armv8_event_attr_remote_access_rd.attr.attr,
+ &armv8_event_attr_sample_pop.attr.attr,
+ &armv8_event_attr_sample_feed.attr.attr,
+ &armv8_event_attr_sample_filtrate.attr.attr,
+ &armv8_event_attr_sample_collision.attr.attr,
NULL,
};
@@ -434,7 +309,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
- if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+ if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+ test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
+ return attr->mode;
+
+ pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
+ if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
+ test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
return attr->mode;
return 0;
@@ -1009,7 +890,7 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
if (armv8pmu_event_is_64bit(event))
event->hw.flags |= ARMPMU_EVT_64BIT;
- /* Onl expose micro/arch events supported by this PMU */
+ /* Only expose micro/arch events supported by this PMU */
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
return hw_event_id;
@@ -1061,6 +942,7 @@ static void __armv8pmu_probe_pmu(void *info)
struct armv8pmu_probe_info *probe = info;
struct arm_pmu *cpu_pmu = probe->pmu;
u64 dfr0;
+ u64 pmceid_raw[2];
u32 pmceid[2];
int pmuver;
@@ -1079,11 +961,17 @@ static void __armv8pmu_probe_pmu(void *info)
/* Add the CPU cycles counter */
cpu_pmu->num_events += 1;
- pmceid[0] = read_sysreg(pmceid0_el0);
- pmceid[1] = read_sysreg(pmceid1_el0);
+ pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
+ pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+
+ pmceid[0] = pmceid_raw[0] >> 32;
+ pmceid[1] = pmceid_raw[1] >> 32;
+
+ bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
+ pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
}
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
@@ -1109,16 +997,16 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
if (ret)
return ret;
- cpu_pmu->handle_irq = armv8pmu_handle_irq,
- cpu_pmu->enable = armv8pmu_enable_event,
- cpu_pmu->disable = armv8pmu_disable_event,
- cpu_pmu->read_counter = armv8pmu_read_counter,
- cpu_pmu->write_counter = armv8pmu_write_counter,
- cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
- cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
- cpu_pmu->start = armv8pmu_start,
- cpu_pmu->stop = armv8pmu_stop,
- cpu_pmu->reset = armv8pmu_reset,
+ cpu_pmu->handle_irq = armv8pmu_handle_irq;
+ cpu_pmu->enable = armv8pmu_enable_event;
+ cpu_pmu->disable = armv8pmu_disable_event;
+ cpu_pmu->read_counter = armv8pmu_read_counter;
+ cpu_pmu->write_counter = armv8pmu_write_counter;
+ cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
+ cpu_pmu->start = armv8pmu_start;
+ cpu_pmu->stop = armv8pmu_stop;
+ cpu_pmu->reset = armv8pmu_reset;
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
cpu_pmu->filter_match = armv8pmu_filter_match;
@@ -1274,6 +1162,7 @@ static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = ARMV8_PMU_PDEV_NAME,
.of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
new file mode 100644
index 000000000000..c507b584259d
--- /dev/null
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/errno.h>
+#include <linux/prctl.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <asm/cpufeature.h>
+#include <asm/pointer_auth.h>
+
+int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
+{
+ struct ptrauth_keys *keys = &tsk->thread.keys_user;
+ unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
+ PR_PAC_APDAKEY | PR_PAC_APDBKEY;
+ unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
+
+ if (!system_supports_address_auth() && !system_supports_generic_auth())
+ return -EINVAL;
+
+ if (!arg) {
+ ptrauth_keys_init(keys);
+ ptrauth_keys_switch(keys);
+ return 0;
+ }
+
+ if (arg & ~key_mask)
+ return -EINVAL;
+
+ if (((arg & addr_key_mask) && !system_supports_address_auth()) ||
+ ((arg & PR_PAC_APGAKEY) && !system_supports_generic_auth()))
+ return -EINVAL;
+
+ if (arg & PR_PAC_APIAKEY)
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+ if (arg & PR_PAC_APIBKEY)
+ get_random_bytes(&keys->apib, sizeof(keys->apib));
+ if (arg & PR_PAC_APDAKEY)
+ get_random_bytes(&keys->apda, sizeof(keys->apda));
+ if (arg & PR_PAC_APDBKEY)
+ get_random_bytes(&keys->apdb, sizeof(keys->apdb));
+ if (arg & PR_PAC_APGAKEY)
+ get_random_bytes(&keys->apga, sizeof(keys->apga));
+
+ ptrauth_keys_switch(keys);
+
+ return 0;
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index d9a4c2d6dd8b..e0a443730e04 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -57,9 +57,10 @@
#include <asm/fpsimd.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
+#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@@ -429,6 +430,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next);
entry_task_switch(next);
uao_thread_switch(next);
+ ptrauth_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -496,4 +498,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
void arch_setup_new_exec(void)
{
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+
+ ptrauth_thread_init_user(current);
}
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 1710a2d01669..9dce33b0e260 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -46,6 +46,7 @@
#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/pgtable.h>
+#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
#include <asm/syscall.h>
#include <asm/traps.h>
@@ -956,6 +957,30 @@ out:
#endif /* CONFIG_ARM64_SVE */
+#ifdef CONFIG_ARM64_PTR_AUTH
+static int pac_mask_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ /*
+ * The PAC bits can differ across data and instruction pointers
+ * depending on TCR_EL1.TBID*, which we may make use of in future, so
+ * we expose separate masks.
+ */
+ unsigned long mask = ptrauth_user_pac_mask();
+ struct user_pac_mask uregs = {
+ .data_mask = mask,
+ .insn_mask = mask,
+ };
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
+}
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
enum aarch64_regset {
REGSET_GPR,
REGSET_FPR,
@@ -968,6 +993,9 @@ enum aarch64_regset {
#ifdef CONFIG_ARM64_SVE
REGSET_SVE,
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ REGSET_PAC_MASK,
+#endif
};
static const struct user_regset aarch64_regsets[] = {
@@ -1037,6 +1065,16 @@ static const struct user_regset aarch64_regsets[] = {
.get_size = sve_get_size,
},
#endif
+#ifdef CONFIG_ARM64_PTR_AUTH
+ [REGSET_PAC_MASK] = {
+ .core_note_type = NT_ARM_PAC_MASK,
+ .n = sizeof(struct user_pac_mask) / sizeof(u64),
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .get = pac_mask_get,
+ /* this cannot be set dynamically */
+ },
+#endif
};
static const struct user_regset_view user_aarch64_view = {
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S
index f407e422a720..95fd94209aae 100644
--- a/arch/arm64/kernel/relocate_kernel.S
+++ b/arch/arm64/kernel/relocate_kernel.S
@@ -32,6 +32,7 @@
ENTRY(arm64_relocate_new_kernel)
/* Setup the list loop variables. */
+ mov x18, x2 /* x18 = dtb address */
mov x17, x1 /* x17 = kimage_start */
mov x16, x0 /* x16 = kimage_head */
raw_dcache_line_size x15, x0 /* x15 = dcache line size */
@@ -107,7 +108,7 @@ ENTRY(arm64_relocate_new_kernel)
isb
/* Start new image. */
- mov x0, xzr
+ mov x0, x18
mov x1, xzr
mov x2, xzr
mov x3, xzr
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f4fc1e0544b7..4b0e1231625c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -388,6 +388,7 @@ static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
offset, KIMAGE_VADDR);
+ pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
} else {
pr_emerg("Kernel Offset: disabled\n");
}
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index 62522342e1e4..184332286a81 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -13,7 +13,9 @@
*/
#include <linux/linkage.h>
#include <linux/arm-smccc.h>
+
#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
.macro SMCCC instr
.cfi_startproc
@@ -40,6 +42,7 @@
ENTRY(__arm_smccc_smc)
SMCCC smc
ENDPROC(__arm_smccc_smc)
+EXPORT_SYMBOL(__arm_smccc_smc)
/*
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
@@ -50,3 +53,4 @@ ENDPROC(__arm_smccc_smc)
ENTRY(__arm_smccc_hvc)
SMCCC hvc
ENDPROC(__arm_smccc_hvc)
+EXPORT_SYMBOL(__arm_smccc_hvc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 96b8f2f51ab2..1598d6f7200a 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -141,6 +141,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
}
} else {
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
+ return ret;
}
secondary_data.task = NULL;
@@ -151,7 +152,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
- switch (status) {
+ switch (status & CPU_BOOT_STATUS_MASK) {
default:
pr_err("CPU%u: failed in unknown state : 0x%lx\n",
cpu, status);
@@ -165,6 +166,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
+ if (status & CPU_STUCK_REASON_52_BIT_VA)
+ pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
+ if (status & CPU_STUCK_REASON_NO_GRAN)
+ pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
cpus_stuck_in_kernel++;
break;
case CPU_PANIC_KERNEL:
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 03b00007553d..7fa008374907 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -99,7 +99,8 @@ SECTIONS
*(.discard)
*(.discard.*)
*(.interp .dynamic)
- *(.dynsym .dynstr .hash)
+ *(.dynsym .dynstr .hash .gnu.hash)
+ *(.eh_frame)
}
. = KIMAGE_VADDR + TEXT_OFFSET;
@@ -192,12 +193,12 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
- .rela : ALIGN(8) {
+ .rela.dyn : ALIGN(8) {
*(.rela .rela*)
}
- __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
- __rela_size = SIZEOF(.rela);
+ __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
+ __rela_size = SIZEOF(.rela.dyn);
. = ALIGN(SEGMENT_ALIGN);
__initdata_end = .;
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 00d422336a45..f39801e4136c 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -236,24 +236,3 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
}
}
}
-
-
-/*
- * After successfully emulating an instruction, we might want to
- * return to user space with a KVM_EXIT_DEBUG. We can only do this
- * once the emulation is complete, though, so for userspace emulations
- * we have to wait until we have re-entered KVM before calling this
- * helper.
- *
- * Return true (and set exit_reason) to return to userspace or false
- * if no further action is required.
- */
-bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT;
- return true;
- }
- return false;
-}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 35a81bebd02b..0b7983442071 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -173,6 +173,23 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+/*
+ * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
+ * a NOP).
+ */
+static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ /*
+ * We don't currently support ptrauth in a guest, and we mask the ID
+ * registers to prevent well-behaved guests from trying to make use of
+ * it.
+ *
+ * Inject an UNDEF, as if the feature really isn't present.
+ */
+ kvm_inject_undefined(vcpu);
+ return 1;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
@@ -195,6 +212,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
[ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
[ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
+ [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
@@ -229,13 +247,6 @@ static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run)
handled = exit_handler(vcpu, run);
}
- /*
- * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run
- * structure if we need to return to userspace.
- */
- if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run))
- handled = 0;
-
return handled;
}
@@ -269,12 +280,7 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
case ARM_EXCEPTION_IRQ:
return 1;
case ARM_EXCEPTION_EL1_SERROR:
- /* We may still need to return for single-step */
- if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS)
- && kvm_arm_handle_step_debug(vcpu, run))
- return 0;
- else
- return 1;
+ return 1;
case ARM_EXCEPTION_TRAP:
return handle_trap_exceptions(vcpu, run);
case ARM_EXCEPTION_HYP_GONE:
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fad1e164fe48..675fdc186e3b 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -83,6 +83,7 @@ ENTRY(__guest_enter)
// Do not touch any register after this!
eret
+ sb
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index b1f14f736962..73c1b483ec39 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -96,6 +96,7 @@ el1_sync: // Guest trapped into EL2
do_el2_call
eret
+ sb
el1_hvc_guest:
/*
@@ -146,6 +147,7 @@ wa_epilogue:
mov x0, xzr
add sp, sp, #16
eret
+ sb
el1_trap:
get_vcpu_ptr x1, x0
@@ -199,6 +201,7 @@ el2_error:
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
+ sb
ENTRY(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -207,6 +210,7 @@ ENTRY(__hyp_do_panic)
ldr lr, =panic
msr elr_el2, lr
eret
+ sb
ENDPROC(__hyp_do_panic)
ENTRY(__hyp_panic)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 7cc175c88a37..b0b1478094b4 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -143,6 +143,14 @@ static void deactivate_traps_vhe(void)
{
extern char vectors[]; /* kernel exception vectors */
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+
+ /*
+ * ARM erratum 1165522 requires the actual execution of the above
+ * before we can switch to the EL2/EL0 translation regime used by
+ * the host.
+ */
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
@@ -157,7 +165,7 @@ static void __hyp_text __deactivate_traps_nvhe(void)
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2);
- write_sysreg(HCR_RW, hcr_el2);
+ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
}
@@ -305,33 +313,6 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
-/* Skip an instruction which has been emulated. Returns true if
- * execution can continue or false if we need to exit hyp mode because
- * single-step was in effect.
- */
-static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
-{
- *vcpu_pc(vcpu) = read_sysreg_el2(elr);
-
- if (vcpu_mode_is_32bit(vcpu)) {
- vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
- kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
- } else {
- *vcpu_pc(vcpu) += 4;
- }
-
- write_sysreg_el2(*vcpu_pc(vcpu), elr);
-
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- vcpu->arch.fault.esr_el2 =
- (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
- return false;
- } else {
- return true;
- }
-}
-
static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
{
struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
@@ -420,20 +401,12 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
- if (ret == 1 && __skip_instr(vcpu))
+ if (ret == 1)
return true;
- if (ret == -1) {
- /* Promote an illegal access to an
- * SError. If we would be returning
- * due to single-step clear the SS
- * bit so handle_exit knows what to
- * do after dealing with the error.
- */
- if (!__skip_instr(vcpu))
- *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+ /* Promote an illegal access to an SError.*/
+ if (ret == -1)
*exit_code = ARM_EXCEPTION_EL1_SERROR;
- }
goto exit;
}
@@ -444,7 +417,7 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
int ret = __vgic_v3_perform_cpuif_access(vcpu);
- if (ret == 1 && __skip_instr(vcpu))
+ if (ret == 1)
return true;
}
@@ -499,8 +472,19 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt);
- __activate_traps(vcpu);
+ /*
+ * ARM erratum 1165522 requires us to configure both stage 1 and
+ * stage 2 translation for the guest context before we clear
+ * HCR_EL2.TGE.
+ *
+ * We have already configured the guest's stage 1 translation in
+ * kvm_vcpu_load_sysregs above. We must now call __activate_vm
+ * before __activate_traps, because __activate_vm configures
+ * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
+ * (among other things).
+ */
__activate_vm(vcpu->kvm);
+ __activate_traps(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
@@ -545,8 +529,8 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
__sysreg_save_state_nvhe(host_ctxt);
- __activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
+ __activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 4dbd9c69a96d..76c30866069e 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,20 +15,54 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqflags.h>
+
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+struct tlb_inv_context {
+ unsigned long flags;
+ u64 tcr;
+ u64 sctlr;
+};
+
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
u64 val;
+ local_irq_save(cxt->flags);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ /*
+ * For CPUs that are affected by ARM erratum 1165522, we
+ * cannot trust stage-1 to be in a correct state at that
+ * point. Since we do not want to force a full load of the
+ * vcpu state, we prevent the EL1 page-table walker to
+ * allocate new TLBs. This is done by setting the EPD bits
+ * in the TCR_EL1 register. We also need to prevent it to
+ * allocate IPA->PA walks, so we enable the S1 MMU...
+ */
+ val = cxt->tcr = read_sysreg_el1(tcr);
+ val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
+ write_sysreg_el1(val, tcr);
+ val = cxt->sctlr = read_sysreg_el1(sctlr);
+ val |= SCTLR_ELx_M;
+ write_sysreg_el1(val, sctlr);
+ }
+
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
+ *
+ * ARM erratum 1165522 requires some special handling (again),
+ * as we need to make sure both stages of translation are in
+ * place before clearing TGE. __load_guest_stage2() already
+ * has an ISB in order to deal with this.
*/
__load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
@@ -37,7 +71,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
isb();
}
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
__load_guest_stage2(kvm);
isb();
@@ -48,7 +83,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -56,9 +92,19 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ isb();
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ /* Restore the registers to what they were */
+ write_sysreg_el1(cxt->tcr, tcr);
+ write_sysreg_el1(cxt->sctlr, sctlr);
+ }
+
+ local_irq_restore(cxt->flags);
}
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
}
@@ -70,11 +116,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
+ struct tlb_inv_context cxt;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -117,36 +165,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
+ struct tlb_inv_context cxt;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+ struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, &cxt);
}
void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 215c7c0eb3b0..9cbdd034a563 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -41,7 +41,7 @@ static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
* Returns:
* 1: GICV access successfully performed
* 0: Not a GICV access
- * -1: Illegal GICV access
+ * -1: Illegal GICV access successfully performed
*/
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
@@ -61,12 +61,16 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
return 0;
/* Reject anything but a 32bit access */
- if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
+ if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
+ __kvm_skip_instr(vcpu);
return -1;
+ }
/* Not aligned? Don't bother */
- if (fault_ipa & 3)
+ if (fault_ipa & 3) {
+ __kvm_skip_instr(vcpu);
return -1;
+ }
rd = kvm_vcpu_dabt_get_rd(vcpu);
addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
@@ -88,5 +92,7 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
vcpu_set_reg(vcpu, rd, data);
}
+ __kvm_skip_instr(vcpu);
+
return 1;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 22fbbdbece3c..e3e37228ae4e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -76,7 +76,7 @@ static bool write_to_read_only(struct kvm_vcpu *vcpu,
return false;
}
-u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg)
+u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
{
if (!vcpu->arch.sysregs_loaded_on_cpu)
goto immediate_read;
@@ -1040,6 +1040,14 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
kvm_debug("SVE unsupported for guests, suppressing\n");
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
+ } else if (id == SYS_ID_AA64ISAR1_EL1) {
+ const u64 ptrauth_mask = (0xfUL << ID_AA64ISAR1_APA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_API_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
+ (0xfUL << ID_AA64ISAR1_GPI_SHIFT);
+ if (val & ptrauth_mask)
+ kvm_debug("ptrauth unsupported for guests, suppressing\n");
+ val &= ~ptrauth_mask;
} else if (id == SYS_ID_AA64MMFR1_EL1) {
if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
kvm_debug("LORegions unsupported for guests, suppressing\n");
@@ -1850,6 +1858,8 @@ static void perform_access(struct kvm_vcpu *vcpu,
struct sys_reg_params *params,
const struct sys_reg_desc *r)
{
+ trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
+
/*
* Not having an accessor means that we have configured a trap
* that we don't know how to handle. This certainly qualifies
@@ -1912,8 +1922,8 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
WARN_ON(1);
}
- kvm_err("Unsupported guest CP%d access at: %08lx\n",
- cp, *vcpu_pc(vcpu));
+ kvm_err("Unsupported guest CP%d access at: %08lx [%08lx]\n",
+ cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
@@ -2063,8 +2073,8 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
if (likely(r)) {
perform_access(vcpu, params, r);
} else {
- kvm_err("Unsupported guest sys_reg access at: %lx\n",
- *vcpu_pc(vcpu));
+ kvm_err("Unsupported guest sys_reg access at: %lx [%08lx]\n",
+ *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
print_sys_reg_instr(params);
kvm_inject_undefined(vcpu);
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index cd710f8b63e0..3b1bc7f01d0b 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -35,6 +35,9 @@ struct sys_reg_params {
};
struct sys_reg_desc {
+ /* Sysreg string for debug */
+ const char *name;
+
/* MRS/MSR instruction which accesses it. */
u8 Op0;
u8 Op1;
@@ -130,6 +133,7 @@ const struct sys_reg_desc *find_reg_by_id(u64 id,
#define Op2(_x) .Op2 = _x
#define SYS_DESC(reg) \
+ .name = #reg, \
Op0(sys_reg_Op0(reg)), Op1(sys_reg_Op1(reg)), \
CRn(sys_reg_CRn(reg)), CRm(sys_reg_CRm(reg)), \
Op2(sys_reg_Op2(reg))
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 3b82fb1ddd09..eab91ad0effb 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -3,6 +3,7 @@
#define _TRACE_ARM64_KVM_H
#include <linux/tracepoint.h>
+#include "sys_regs.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
@@ -152,6 +153,40 @@ TRACE_EVENT(kvm_handle_sys_reg,
TP_printk("HSR 0x%08lx", __entry->hsr)
);
+TRACE_EVENT(kvm_sys_access,
+ TP_PROTO(unsigned long vcpu_pc, struct sys_reg_params *params, const struct sys_reg_desc *reg),
+ TP_ARGS(vcpu_pc, params, reg),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, vcpu_pc)
+ __field(bool, is_write)
+ __field(const char *, name)
+ __field(u8, Op0)
+ __field(u8, Op1)
+ __field(u8, CRn)
+ __field(u8, CRm)
+ __field(u8, Op2)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->is_write = params->is_write;
+ __entry->name = reg->name;
+ __entry->Op0 = reg->Op0;
+ __entry->Op0 = reg->Op0;
+ __entry->Op1 = reg->Op1;
+ __entry->CRn = reg->CRn;
+ __entry->CRm = reg->CRm;
+ __entry->Op2 = reg->Op2;
+ ),
+
+ TP_printk("PC: %lx %s (%d,%d,%d,%d,%d) %s",
+ __entry->vcpu_pc, __entry->name ?: "UNKN",
+ __entry->Op0, __entry->Op1, __entry->CRn,
+ __entry->CRm, __entry->Op2,
+ __entry->is_write ? "write" : "read")
+);
+
TRACE_EVENT(kvm_set_guest_debug,
TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
TP_ARGS(vcpu, guest_debug),
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 69ff9887f724..5540a1638baf 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -5,6 +5,12 @@ lib-y := clear_user.o delay.o copy_from_user.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o tishift.o
+ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
+obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
+CFLAGS_REMOVE_xor-neon.o += -mgeneral-regs-only
+CFLAGS_xor-neon.o += -ffreestanding
+endif
+
# Tell the compiler to treat all general purpose registers (with the
# exception of the IP registers, which are already handled by the caller
# in case of a PLT) as callee-saved, which allows for efficient runtime
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
index ef08e905e35b..6d13b0d64ad5 100644
--- a/arch/arm64/lib/clear_page.S
+++ b/arch/arm64/lib/clear_page.S
@@ -37,3 +37,4 @@ ENTRY(clear_page)
b.ne 1b
ret
ENDPROC(clear_page)
+EXPORT_SYMBOL(clear_page)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 21ba0b29621b..feb225bd4b80 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -18,6 +18,7 @@
#include <linux/linkage.h>
#include <asm/asm-uaccess.h>
+#include <asm/assembler.h>
.text
@@ -53,6 +54,7 @@ uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
uaccess_disable_not_uao x2, x3
ret
ENDPROC(__arch_clear_user)
+EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 20305d485046..dea6c762d52f 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,8 +16,9 @@
#include <linux/linkage.h>
-#include <asm/cache.h>
#include <asm/asm-uaccess.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
/*
* Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -71,6 +72,7 @@ ENTRY(__arch_copy_from_user)
mov x0, #0 // Nothing to copy
ret
ENDPROC(__arch_copy_from_user)
+EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 54b75deb1d16..a84227fbf716 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,8 +18,9 @@
#include <linux/linkage.h>
-#include <asm/cache.h>
#include <asm/asm-uaccess.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
/*
* Copy from user space to user space (alignment handled by the hardware)
@@ -73,6 +74,7 @@ ENTRY(__arch_copy_in_user)
mov x0, #0
ret
ENDPROC(__arch_copy_in_user)
+EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index 076c43715e64..98313e24a987 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -87,3 +87,4 @@ alternative_else_nop_endif
ret
ENDPROC(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index fda6172d6b88..ef44c7ca3ffb 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,8 +16,9 @@
#include <linux/linkage.h>
-#include <asm/cache.h>
#include <asm/asm-uaccess.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
/*
* Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -70,6 +71,7 @@ ENTRY(__arch_copy_to_user)
mov x0, #0
ret
ENDPROC(__arch_copy_to_user)
+EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
index 5bc1e85b4e1c..f132f2a7522e 100644
--- a/arch/arm64/lib/crc32.S
+++ b/arch/arm64/lib/crc32.S
@@ -15,15 +15,59 @@
.cpu generic+crc
.macro __crc32, c
-0: subs x2, x2, #16
- b.mi 8f
- ldp x3, x4, [x1], #16
+ cmp x2, #16
+ b.lt 8f // less than 16 bytes
+
+ and x7, x2, #0x1f
+ and x2, x2, #~0x1f
+ cbz x7, 32f // multiple of 32 bytes
+
+ and x8, x7, #0xf
+ ldp x3, x4, [x1]
+ add x8, x8, x1
+ add x1, x1, x7
+ ldp x5, x6, [x8]
CPU_BE( rev x3, x3 )
CPU_BE( rev x4, x4 )
+CPU_BE( rev x5, x5 )
+CPU_BE( rev x6, x6 )
+
+ tst x7, #8
+ crc32\c\()x w8, w0, x3
+ csel x3, x3, x4, eq
+ csel w0, w0, w8, eq
+ tst x7, #4
+ lsr x4, x3, #32
+ crc32\c\()w w8, w0, w3
+ csel x3, x3, x4, eq
+ csel w0, w0, w8, eq
+ tst x7, #2
+ lsr w4, w3, #16
+ crc32\c\()h w8, w0, w3
+ csel w3, w3, w4, eq
+ csel w0, w0, w8, eq
+ tst x7, #1
+ crc32\c\()b w8, w0, w3
+ csel w0, w0, w8, eq
+ tst x7, #16
+ crc32\c\()x w8, w0, x5
+ crc32\c\()x w8, w8, x6
+ csel w0, w0, w8, eq
+ cbz x2, 0f
+
+32: ldp x3, x4, [x1], #32
+ sub x2, x2, #32
+ ldp x5, x6, [x1, #-16]
+CPU_BE( rev x3, x3 )
+CPU_BE( rev x4, x4 )
+CPU_BE( rev x5, x5 )
+CPU_BE( rev x6, x6 )
crc32\c\()x w0, w0, x3
crc32\c\()x w0, w0, x4
- b.ne 0b
- ret
+ crc32\c\()x w0, w0, x5
+ crc32\c\()x w0, w0, x6
+ cbnz x2, 32b
+0: ret
8: tbz x2, #3, 4f
ldr x3, [x1], #8
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index 0f164a4baf52..f146b7ecd28f 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -42,3 +42,4 @@ WEAK(memchr)
2: mov x0, #0
ret
ENDPIPROC(memchr)
+EXPORT_SYMBOL_NOKASAN(memchr)
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index fb295f52e9f8..e2e629b09049 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -256,3 +256,4 @@ CPU_LE( rev data2, data2 )
mov result, #0
ret
ENDPIPROC(memcmp)
+EXPORT_SYMBOL_NOKASAN(memcmp)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index 67613937711f..b4f82888ed60 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -74,4 +74,6 @@ ENTRY(memcpy)
#include "copy_template.S"
ret
ENDPIPROC(memcpy)
+EXPORT_SYMBOL(memcpy)
ENDPROC(__memcpy)
+EXPORT_SYMBOL(__memcpy)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
index a5a4459013b1..ef12f719d99d 100644
--- a/arch/arm64/lib/memmove.S
+++ b/arch/arm64/lib/memmove.S
@@ -197,4 +197,6 @@ ENTRY(memmove)
b.ne .Ltail63
ret
ENDPIPROC(memmove)
+EXPORT_SYMBOL(memmove)
ENDPROC(__memmove)
+EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
index f2670a9f218c..a79cf118d6d0 100644
--- a/arch/arm64/lib/memset.S
+++ b/arch/arm64/lib/memset.S
@@ -216,4 +216,6 @@ ENTRY(memset)
b.ne .Ltail_maybe_long
ret
ENDPIPROC(memset)
+EXPORT_SYMBOL(memset)
ENDPROC(__memset)
+EXPORT_SYMBOL(__memset)
diff --git a/arch/arm64/lib/strchr.S b/arch/arm64/lib/strchr.S
index 7c83091d1bcd..b179421f46c7 100644
--- a/arch/arm64/lib/strchr.S
+++ b/arch/arm64/lib/strchr.S
@@ -40,3 +40,4 @@ WEAK(strchr)
csel x0, x0, xzr, eq
ret
ENDPROC(strchr)
+EXPORT_SYMBOL_NOKASAN(strchr)
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 7d5d15398bfb..c306c7b88574 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -232,3 +232,4 @@ CPU_BE( orr syndrome, diff, has_nul )
sub result, data1, data2, lsr #56
ret
ENDPIPROC(strcmp)
+EXPORT_SYMBOL_NOKASAN(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index 8e0b14205dcb..2a0240937416 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -124,3 +124,4 @@ CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
csel data2, data2, data2a, le
b .Lrealigned
ENDPIPROC(strlen)
+EXPORT_SYMBOL_NOKASAN(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index 66bd145935d9..c5d567afb039 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -308,3 +308,4 @@ CPU_BE( orr syndrome, diff, has_nul )
mov result, #0
ret
ENDPIPROC(strncmp)
+EXPORT_SYMBOL_NOKASAN(strncmp)
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index 355be04441fe..e21e536d420e 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -169,3 +169,4 @@ CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
mov len, limit
ret
ENDPIPROC(strnlen)
+EXPORT_SYMBOL_NOKASAN(strnlen)
diff --git a/arch/arm64/lib/strrchr.S b/arch/arm64/lib/strrchr.S
index ea84924d5990..47e1593016dc 100644
--- a/arch/arm64/lib/strrchr.S
+++ b/arch/arm64/lib/strrchr.S
@@ -41,3 +41,4 @@ WEAK(strrchr)
2: mov x0, x3
ret
ENDPIPROC(strrchr)
+EXPORT_SYMBOL_NOKASAN(strrchr)
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index 0fdff97794de..047622536535 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -5,6 +5,8 @@
#include <linux/linkage.h>
+#include <asm/assembler.h>
+
ENTRY(__ashlti3)
cbz x2, 1f
mov x3, #64
@@ -25,6 +27,7 @@ ENTRY(__ashlti3)
mov x0, x2
ret
ENDPROC(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
ENTRY(__ashrti3)
cbz x2, 1f
@@ -46,6 +49,7 @@ ENTRY(__ashrti3)
mov x1, x2
ret
ENDPROC(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
ENTRY(__lshrti3)
cbz x2, 1f
@@ -67,3 +71,4 @@ ENTRY(__lshrti3)
mov x1, x2
ret
ENDPROC(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
diff --git a/arch/arm64/lib/xor-neon.c b/arch/arm64/lib/xor-neon.c
new file mode 100644
index 000000000000..131c60c27dff
--- /dev/null
+++ b/arch/arm64/lib/xor-neon.c
@@ -0,0 +1,184 @@
+/*
+ * arch/arm64/lib/xor-neon.c
+ *
+ * Authors: Jackie Liu <liuyun01@kylinos.cn>
+ * Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/xor.h>
+#include <linux/module.h>
+#include <asm/neon-intrinsics.h>
+
+void xor_arm64_neon_2(unsigned long bytes, unsigned long *p1,
+ unsigned long *p2)
+{
+ uint64_t *dp1 = (uint64_t *)p1;
+ uint64_t *dp2 = (uint64_t *)p2;
+
+ register uint64x2_t v0, v1, v2, v3;
+ long lines = bytes / (sizeof(uint64x2_t) * 4);
+
+ do {
+ /* p1 ^= p2 */
+ v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
+ v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
+ v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
+ v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
+
+ /* store */
+ vst1q_u64(dp1 + 0, v0);
+ vst1q_u64(dp1 + 2, v1);
+ vst1q_u64(dp1 + 4, v2);
+ vst1q_u64(dp1 + 6, v3);
+
+ dp1 += 8;
+ dp2 += 8;
+ } while (--lines > 0);
+}
+
+void xor_arm64_neon_3(unsigned long bytes, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3)
+{
+ uint64_t *dp1 = (uint64_t *)p1;
+ uint64_t *dp2 = (uint64_t *)p2;
+ uint64_t *dp3 = (uint64_t *)p3;
+
+ register uint64x2_t v0, v1, v2, v3;
+ long lines = bytes / (sizeof(uint64x2_t) * 4);
+
+ do {
+ /* p1 ^= p2 */
+ v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
+ v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
+ v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
+ v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
+
+ /* p1 ^= p3 */
+ v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
+ v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
+ v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
+ v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
+
+ /* store */
+ vst1q_u64(dp1 + 0, v0);
+ vst1q_u64(dp1 + 2, v1);
+ vst1q_u64(dp1 + 4, v2);
+ vst1q_u64(dp1 + 6, v3);
+
+ dp1 += 8;
+ dp2 += 8;
+ dp3 += 8;
+ } while (--lines > 0);
+}
+
+void xor_arm64_neon_4(unsigned long bytes, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3, unsigned long *p4)
+{
+ uint64_t *dp1 = (uint64_t *)p1;
+ uint64_t *dp2 = (uint64_t *)p2;
+ uint64_t *dp3 = (uint64_t *)p3;
+ uint64_t *dp4 = (uint64_t *)p4;
+
+ register uint64x2_t v0, v1, v2, v3;
+ long lines = bytes / (sizeof(uint64x2_t) * 4);
+
+ do {
+ /* p1 ^= p2 */
+ v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
+ v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
+ v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
+ v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
+
+ /* p1 ^= p3 */
+ v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
+ v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
+ v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
+ v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
+
+ /* p1 ^= p4 */
+ v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
+ v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
+ v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
+ v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
+
+ /* store */
+ vst1q_u64(dp1 + 0, v0);
+ vst1q_u64(dp1 + 2, v1);
+ vst1q_u64(dp1 + 4, v2);
+ vst1q_u64(dp1 + 6, v3);
+
+ dp1 += 8;
+ dp2 += 8;
+ dp3 += 8;
+ dp4 += 8;
+ } while (--lines > 0);
+}
+
+void xor_arm64_neon_5(unsigned long bytes, unsigned long *p1,
+ unsigned long *p2, unsigned long *p3,
+ unsigned long *p4, unsigned long *p5)
+{
+ uint64_t *dp1 = (uint64_t *)p1;
+ uint64_t *dp2 = (uint64_t *)p2;
+ uint64_t *dp3 = (uint64_t *)p3;
+ uint64_t *dp4 = (uint64_t *)p4;
+ uint64_t *dp5 = (uint64_t *)p5;
+
+ register uint64x2_t v0, v1, v2, v3;
+ long lines = bytes / (sizeof(uint64x2_t) * 4);
+
+ do {
+ /* p1 ^= p2 */
+ v0 = veorq_u64(vld1q_u64(dp1 + 0), vld1q_u64(dp2 + 0));
+ v1 = veorq_u64(vld1q_u64(dp1 + 2), vld1q_u64(dp2 + 2));
+ v2 = veorq_u64(vld1q_u64(dp1 + 4), vld1q_u64(dp2 + 4));
+ v3 = veorq_u64(vld1q_u64(dp1 + 6), vld1q_u64(dp2 + 6));
+
+ /* p1 ^= p3 */
+ v0 = veorq_u64(v0, vld1q_u64(dp3 + 0));
+ v1 = veorq_u64(v1, vld1q_u64(dp3 + 2));
+ v2 = veorq_u64(v2, vld1q_u64(dp3 + 4));
+ v3 = veorq_u64(v3, vld1q_u64(dp3 + 6));
+
+ /* p1 ^= p4 */
+ v0 = veorq_u64(v0, vld1q_u64(dp4 + 0));
+ v1 = veorq_u64(v1, vld1q_u64(dp4 + 2));
+ v2 = veorq_u64(v2, vld1q_u64(dp4 + 4));
+ v3 = veorq_u64(v3, vld1q_u64(dp4 + 6));
+
+ /* p1 ^= p5 */
+ v0 = veorq_u64(v0, vld1q_u64(dp5 + 0));
+ v1 = veorq_u64(v1, vld1q_u64(dp5 + 2));
+ v2 = veorq_u64(v2, vld1q_u64(dp5 + 4));
+ v3 = veorq_u64(v3, vld1q_u64(dp5 + 6));
+
+ /* store */
+ vst1q_u64(dp1 + 0, v0);
+ vst1q_u64(dp1 + 2, v1);
+ vst1q_u64(dp1 + 4, v2);
+ vst1q_u64(dp1 + 6, v3);
+
+ dp1 += 8;
+ dp2 += 8;
+ dp3 += 8;
+ dp4 += 8;
+ dp5 += 8;
+ } while (--lines > 0);
+}
+
+struct xor_block_template const xor_block_inner_neon = {
+ .name = "__inner_neon__",
+ .do_2 = xor_arm64_neon_2,
+ .do_3 = xor_arm64_neon_3,
+ .do_4 = xor_arm64_neon_4,
+ .do_5 = xor_arm64_neon_5,
+};
+EXPORT_SYMBOL(xor_block_inner_neon);
+
+MODULE_AUTHOR("Jackie Liu <liuyun01@kylinos.cn>");
+MODULE_DESCRIPTION("ARMv8 XOR Extensions");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 0c22ede52f90..a194fd0e837f 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -212,6 +212,9 @@ ENDPROC(__dma_clean_area)
* - size - size in question
*/
ENTRY(__clean_dcache_area_pop)
+ alternative_if_not ARM64_HAS_DCPOP
+ b __clean_dcache_area_poc
+ alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
ENDPIPROC(__clean_dcache_area_pop)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 95eda81e3f2d..fb0908456a1f 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -173,9 +173,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
prot,
__builtin_return_address(0));
if (addr) {
- memset(addr, 0, size);
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
+ memset(addr, 0, size);
} else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7d9571f4ae3d..5fe6d2e40e9b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -160,7 +160,7 @@ void show_pte(unsigned long addr)
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
- VA_BITS, mm->pgd);
+ mm == &init_mm ? VA_BITS : (int) vabits_user, mm->pgd);
pgdp = pgd_offset(mm, addr);
pgd = READ_ONCE(*pgdp);
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index f58ea503ad01..28cbc22d7e30 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -429,6 +429,27 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
}
+static void __init add_huge_page_size(unsigned long size)
+{
+ if (size_to_hstate(size))
+ return;
+
+ hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
+}
+
+static int __init hugetlbpage_init(void)
+{
+#ifdef CONFIG_ARM64_4K_PAGES
+ add_huge_page_size(PUD_SIZE);
+#endif
+ add_huge_page_size(PMD_SIZE * CONT_PMDS);
+ add_huge_page_size(PMD_SIZE);
+ add_huge_page_size(PAGE_SIZE * CONT_PTES);
+
+ return 0;
+}
+arch_initcall(hugetlbpage_init);
+
static __init int setup_hugepagesz(char *opt)
{
unsigned long ps = memparse(opt, &opt);
@@ -440,7 +461,7 @@ static __init int setup_hugepagesz(char *opt)
case PMD_SIZE * CONT_PMDS:
case PMD_SIZE:
case PAGE_SIZE * CONT_PTES:
- hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT);
+ add_huge_page_size(ps);
return 1;
}
@@ -449,13 +470,3 @@ static __init int setup_hugepagesz(char *opt)
return 0;
}
__setup("hugepagesz=", setup_hugepagesz);
-
-#ifdef CONFIG_ARM64_64K_PAGES
-static __init int add_default_hugepagesz(void)
-{
- if (size_to_hstate(CONT_PTES * PAGE_SIZE) == NULL)
- hugetlb_add_hstate(CONT_PTE_SHIFT);
- return 0;
-}
-arch_initcall(add_default_hugepagesz);
-#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 9b432d9fcada..cbba537ba3d2 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -59,6 +59,8 @@
* that cannot be mistaken for a real physical address.
*/
s64 memstart_addr __ro_after_init = -1;
+EXPORT_SYMBOL(memstart_addr);
+
phys_addr_t arm64_dma_phys_limit __ro_after_init;
#ifdef CONFIG_BLK_DEV_INITRD
@@ -289,6 +291,14 @@ int pfn_valid(unsigned long pfn)
if ((addr >> PAGE_SHIFT) != pfn)
return 0;
+
+#ifdef CONFIG_SPARSEMEM
+ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
+ return 0;
+
+ if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn))))
+ return 0;
+#endif
return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
@@ -607,15 +617,7 @@ void __init mem_init(void)
* detected at build time already.
*/
#ifdef CONFIG_COMPAT
- BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
-#endif
-
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- /*
- * Make sure we chose the upper bound of sizeof(struct page)
- * correctly when sizing the VMEMMAP array.
- */
- BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+ BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index d1d6601b385d..da513a1facf4 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -52,6 +52,8 @@
u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
+u64 vabits_user __ro_after_init;
+EXPORT_SYMBOL(vabits_user);
u64 kimage_voffset __ro_after_init;
EXPORT_SYMBOL(kimage_voffset);
@@ -451,7 +453,7 @@ static void __init map_mem(pgd_t *pgdp)
struct memblock_region *reg;
int flags = 0;
- if (debug_pagealloc_enabled())
+ if (rodata_full || debug_pagealloc_enabled())
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
@@ -552,7 +554,19 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
static int __init parse_rodata(char *arg)
{
- return strtobool(arg, &rodata_enabled);
+ int ret = strtobool(arg, &rodata_enabled);
+ if (!ret) {
+ rodata_full = false;
+ return 0;
+ }
+
+ /* permit 'full' in addition to boolean options */
+ if (strcmp(arg, "full"))
+ return -EINVAL;
+
+ rodata_enabled = true;
+ rodata_full = true;
+ return 0;
}
early_param("rodata", parse_rodata);
@@ -1032,3 +1046,20 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
pmd_free(NULL, table);
return 1;
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
+ bool want_memblock)
+{
+ int flags = 0;
+
+ if (rodata_full || debug_pagealloc_enabled())
+ flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+
+ __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
+ size, PAGE_KERNEL, pgd_pgtable_alloc, flags);
+
+ return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
+ altmap, want_memblock);
+}
+#endif
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 27a31efd9e8e..ae34e3a1cef1 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -466,3 +466,13 @@ void __init arm64_numa_init(void)
numa_init(dummy_numa_init);
}
+
+/*
+ * We hope that we will be hotplugging memory on nodes we already know about,
+ * such that acpi_get_node() succeeds and we never fall back to this...
+ */
+int memory_add_physaddr_to_nid(u64 addr)
+{
+ pr_warn("Unknown node for memory at 0x%llx, assuming node 0\n", addr);
+ return 0;
+}
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index a56359373d8b..6cd645edcf35 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -25,6 +25,8 @@ struct page_change_data {
pgprot_t clear_mask;
};
+bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
+
static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
void *data)
{
@@ -64,6 +66,7 @@ static int change_memory_common(unsigned long addr, int numpages,
unsigned long size = PAGE_SIZE*numpages;
unsigned long end = start + size;
struct vm_struct *area;
+ int i;
if (!PAGE_ALIGNED(addr)) {
start &= PAGE_MASK;
@@ -93,6 +96,24 @@ static int change_memory_common(unsigned long addr, int numpages,
if (!numpages)
return 0;
+ /*
+ * If we are manipulating read-only permissions, apply the same
+ * change to the linear mapping of the pages that back this VM area.
+ */
+ if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
+ pgprot_val(clear_mask) == PTE_RDONLY)) {
+ for (i = 0; i < area->nr_pages; i++) {
+ __change_memory_common((u64)page_address(area->pages[i]),
+ PAGE_SIZE, set_mask, clear_mask);
+ }
+ }
+
+ /*
+ * Get rid of potentially aliasing lazily unmapped vm areas that may
+ * have permissions set that deviate from the ones we are setting here.
+ */
+ vm_unmap_aliases();
+
return __change_memory_common(start, size, set_mask, clear_mask);
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 2c75b0b903ae..e05b3ce1db6b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -182,6 +182,7 @@ ENDPROC(cpu_do_switch_mm)
.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2
adrp \tmp1, empty_zero_page
phys_to_ttbr \tmp2, \tmp1
+ offset_ttbr1 \tmp2
msr ttbr1_el1, \tmp2
isb
tlbi vmalle1
@@ -200,6 +201,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3
+ offset_ttbr1 x0
msr ttbr1_el1, x0
isb
@@ -254,6 +256,7 @@ ENTRY(idmap_kpti_install_ng_mappings)
pte .req x16
mrs swapper_ttb, ttbr1_el1
+ restore_ttbr1 swapper_ttb
adr flag_ptr, __idmap_kpti_flag
cbnz cpu, __idmap_kpti_secondary
@@ -373,6 +376,7 @@ __idmap_kpti_secondary:
cbnz w18, 1b
/* All done, act like nothing happened */
+ offset_ttbr1 swapper_ttb
msr ttbr1_el1, swapper_ttb
isb
ret
@@ -446,7 +450,15 @@ ENTRY(__cpu_setup)
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
TCR_TBI0 | TCR_A1
- tcr_set_idmap_t0sz x10, x9
+
+#ifdef CONFIG_ARM64_USER_VA_BITS_52
+ ldr_l x9, vabits_user
+ sub x9, xzr, x9
+ add x9, x9, #64
+#else
+ ldr_l x9, idmap_t0sz
+#endif
+ tcr_set_t0sz x10, x9
/*
* Set the IPS bits in TCR_EL1.
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a6fdaea07c63..1542df00b23c 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -134,10 +134,9 @@ static inline void emit_a64_mov_i64(const int reg, const u64 val,
}
/*
- * This is an unoptimized 64 immediate emission used for BPF to BPF call
- * addresses. It will always do a full 64 bit decomposition as otherwise
- * more complexity in the last extra pass is required since we previously
- * reserved 4 instructions for the address.
+ * Kernel addresses in the vmalloc space use at most 48 bits, and the
+ * remaining bits are guaranteed to be 0x1. So we can compose the address
+ * with a fixed length movn/movk/movk sequence.
*/
static inline void emit_addr_mov_i64(const int reg, const u64 val,
struct jit_ctx *ctx)
@@ -145,8 +144,8 @@ static inline void emit_addr_mov_i64(const int reg, const u64 val,
u64 tmp = val;
int shift = 0;
- emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
- for (;shift < 48;) {
+ emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
+ while (shift < 32) {
tmp >>= 16;
shift += 16;
emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
@@ -351,7 +350,8 @@ static void build_epilogue(struct jit_ctx *ctx)
* >0 - successfully JITed a 16-byte eBPF instruction.
* <0 - failed to JIT.
*/
-static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ bool extra_pass)
{
const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg];
@@ -625,12 +625,15 @@ emit_cond_jmp:
case BPF_JMP | BPF_CALL:
{
const u8 r0 = bpf2a64[BPF_REG_0];
- const u64 func = (u64)__bpf_call_base + imm;
+ bool func_addr_fixed;
+ u64 func_addr;
+ int ret;
- if (ctx->prog->is_func)
- emit_addr_mov_i64(tmp, func, ctx);
- else
- emit_a64_mov_i64(tmp, func, ctx);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &func_addr, &func_addr_fixed);
+ if (ret < 0)
+ return ret;
+ emit_addr_mov_i64(tmp, func_addr, ctx);
emit(A64_BLR(tmp), ctx);
emit(A64_MOV(1, r0, A64_R(0)), ctx);
break;
@@ -753,7 +756,7 @@ emit_cond_jmp:
return 0;
}
-static int build_body(struct jit_ctx *ctx)
+static int build_body(struct jit_ctx *ctx, bool extra_pass)
{
const struct bpf_prog *prog = ctx->prog;
int i;
@@ -762,7 +765,7 @@ static int build_body(struct jit_ctx *ctx)
const struct bpf_insn *insn = &prog->insnsi[i];
int ret;
- ret = build_insn(insn, ctx);
+ ret = build_insn(insn, ctx, extra_pass);
if (ret > 0) {
i++;
if (ctx->image == NULL)
@@ -858,7 +861,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset. */
- if (build_body(&ctx)) {
+ if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
@@ -888,7 +891,7 @@ skip_init_ctx:
build_prologue(&ctx, was_classic);
- if (build_body(&ctx)) {
+ if (build_body(&ctx, extra_pass)) {
bpf_jit_binary_free(header);
prog = orig_prog;
goto out_off;
@@ -929,6 +932,7 @@ skip_init_ctx:
prog->jited_len = image_size;
if (!prog->is_func || extra_pass) {
+ bpf_prog_fill_jited_linfo(prog, ctx.offset);
out_off:
kfree(ctx.offset);
kfree(jit_data);
@@ -940,3 +944,16 @@ out:
tmp : orig_prog);
return prog;
}
+
+void *bpf_jit_alloc_exec(unsigned long size)
+{
+ return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
+ BPF_JIT_REGION_END, GFP_KERNEL,
+ PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+
+void bpf_jit_free_exec(void *addr)
+{
+ return vfree(addr);
+}
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
index c410aa4fff1a..b2905c0485a7 100644
--- a/arch/csky/include/asm/mmu_context.h
+++ b/arch/csky/include/asm/mmu_context.h
@@ -16,7 +16,7 @@
static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
{
- pgd &= ~(1<<31);
+ pgd -= PAGE_OFFSET;
pgd += PHYS_OFFSET;
pgd |= 1;
setup_pgd(pgd, kernel);
@@ -29,7 +29,7 @@ static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
static inline unsigned long tlb_get_pgd(void)
{
- return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
+ return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET;
}
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 45f59808b842..320d86f192ee 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -80,6 +80,9 @@ unwcheck: vmlinux
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel/syscalls all
+
CLEAN_FILES += vmlinux.gz bootloader
boot: lib/lib.a vmlinux
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 557bbc8ba9f5..43e21fe3499c 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,3 +1,4 @@
+generated-y += syscall_table.h
generic-y += compat.h
generic-y += exec.h
generic-y += irq_work.h
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 49e34db2529c..0b08ebd2dfde 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -10,9 +10,7 @@
#include <uapi/asm/unistd.h>
-
-
-#define NR_syscalls 326 /* length of syscall table */
+#define NR_syscalls __NR_syscalls /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild
index 3982e673e967..ccce0ea65e05 100644
--- a/arch/ia64/include/uapi/asm/Kbuild
+++ b/arch/ia64/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_64.h
generic-y += bpf_perf_event.h
generic-y += ipcbuf.h
generic-y += kvm_para.h
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 5fe71d4a43de..b2513922dcb5 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -11,334 +11,10 @@
#include <asm/break.h>
-#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
+#define __BREAK_SYSCALL __IA64_BREAK_SYSCALL
-#define __NR_ni_syscall 1024
-#define __NR_exit 1025
-#define __NR_read 1026
-#define __NR_write 1027
-#define __NR_open 1028
-#define __NR_close 1029
-#define __NR_creat 1030
-#define __NR_link 1031
-#define __NR_unlink 1032
-#define __NR_execve 1033
-#define __NR_chdir 1034
-#define __NR_fchdir 1035
-#define __NR_utimes 1036
-#define __NR_mknod 1037
-#define __NR_chmod 1038
-#define __NR_chown 1039
-#define __NR_lseek 1040
-#define __NR_getpid 1041
-#define __NR_getppid 1042
-#define __NR_mount 1043
-#define __NR_umount 1044
-#define __NR_setuid 1045
-#define __NR_getuid 1046
-#define __NR_geteuid 1047
-#define __NR_ptrace 1048
-#define __NR_access 1049
-#define __NR_sync 1050
-#define __NR_fsync 1051
-#define __NR_fdatasync 1052
-#define __NR_kill 1053
-#define __NR_rename 1054
-#define __NR_mkdir 1055
-#define __NR_rmdir 1056
-#define __NR_dup 1057
-#define __NR_pipe 1058
-#define __NR_times 1059
-#define __NR_brk 1060
-#define __NR_setgid 1061
-#define __NR_getgid 1062
-#define __NR_getegid 1063
-#define __NR_acct 1064
-#define __NR_ioctl 1065
-#define __NR_fcntl 1066
-#define __NR_umask 1067
-#define __NR_chroot 1068
-#define __NR_ustat 1069
-#define __NR_dup2 1070
-#define __NR_setreuid 1071
-#define __NR_setregid 1072
-#define __NR_getresuid 1073
-#define __NR_setresuid 1074
-#define __NR_getresgid 1075
-#define __NR_setresgid 1076
-#define __NR_getgroups 1077
-#define __NR_setgroups 1078
-#define __NR_getpgid 1079
-#define __NR_setpgid 1080
-#define __NR_setsid 1081
-#define __NR_getsid 1082
-#define __NR_sethostname 1083
-#define __NR_setrlimit 1084
-#define __NR_getrlimit 1085
-#define __NR_getrusage 1086
-#define __NR_gettimeofday 1087
-#define __NR_settimeofday 1088
-#define __NR_select 1089
-#define __NR_poll 1090
-#define __NR_symlink 1091
-#define __NR_readlink 1092
-#define __NR_uselib 1093
-#define __NR_swapon 1094
-#define __NR_swapoff 1095
-#define __NR_reboot 1096
-#define __NR_truncate 1097
-#define __NR_ftruncate 1098
-#define __NR_fchmod 1099
-#define __NR_fchown 1100
-#define __NR_getpriority 1101
-#define __NR_setpriority 1102
-#define __NR_statfs 1103
-#define __NR_fstatfs 1104
-#define __NR_gettid 1105
-#define __NR_semget 1106
-#define __NR_semop 1107
-#define __NR_semctl 1108
-#define __NR_msgget 1109
-#define __NR_msgsnd 1110
-#define __NR_msgrcv 1111
-#define __NR_msgctl 1112
-#define __NR_shmget 1113
-#define __NR_shmat 1114
-#define __NR_shmdt 1115
-#define __NR_shmctl 1116
-/* also known as klogctl() in GNU libc: */
-#define __NR_syslog 1117
-#define __NR_setitimer 1118
-#define __NR_getitimer 1119
-/* 1120 was __NR_old_stat */
-/* 1121 was __NR_old_lstat */
-/* 1122 was __NR_old_fstat */
-#define __NR_vhangup 1123
-#define __NR_lchown 1124
-#define __NR_remap_file_pages 1125
-#define __NR_wait4 1126
-#define __NR_sysinfo 1127
-#define __NR_clone 1128
-#define __NR_setdomainname 1129
-#define __NR_uname 1130
-#define __NR_adjtimex 1131
-/* 1132 was __NR_create_module */
-#define __NR_init_module 1133
-#define __NR_delete_module 1134
-/* 1135 was __NR_get_kernel_syms */
-/* 1136 was __NR_query_module */
-#define __NR_quotactl 1137
-#define __NR_bdflush 1138
-#define __NR_sysfs 1139
-#define __NR_personality 1140
-#define __NR_afs_syscall 1141
-#define __NR_setfsuid 1142
-#define __NR_setfsgid 1143
-#define __NR_getdents 1144
-#define __NR_flock 1145
-#define __NR_readv 1146
-#define __NR_writev 1147
-#define __NR_pread64 1148
-#define __NR_pwrite64 1149
-#define __NR__sysctl 1150
-#define __NR_mmap 1151
-#define __NR_munmap 1152
-#define __NR_mlock 1153
-#define __NR_mlockall 1154
-#define __NR_mprotect 1155
-#define __NR_mremap 1156
-#define __NR_msync 1157
-#define __NR_munlock 1158
-#define __NR_munlockall 1159
-#define __NR_sched_getparam 1160
-#define __NR_sched_setparam 1161
-#define __NR_sched_getscheduler 1162
-#define __NR_sched_setscheduler 1163
-#define __NR_sched_yield 1164
-#define __NR_sched_get_priority_max 1165
-#define __NR_sched_get_priority_min 1166
-#define __NR_sched_rr_get_interval 1167
-#define __NR_nanosleep 1168
-#define __NR_nfsservctl 1169
-#define __NR_prctl 1170
-/* 1171 is reserved for backwards compatibility with old __NR_getpagesize */
-#define __NR_mmap2 1172
-#define __NR_pciconfig_read 1173
-#define __NR_pciconfig_write 1174
-#define __NR_perfmonctl 1175
-#define __NR_sigaltstack 1176
-#define __NR_rt_sigaction 1177
-#define __NR_rt_sigpending 1178
-#define __NR_rt_sigprocmask 1179
-#define __NR_rt_sigqueueinfo 1180
-#define __NR_rt_sigreturn 1181
-#define __NR_rt_sigsuspend 1182
-#define __NR_rt_sigtimedwait 1183
-#define __NR_getcwd 1184
-#define __NR_capget 1185
-#define __NR_capset 1186
-#define __NR_sendfile 1187
-#define __NR_getpmsg 1188
-#define __NR_putpmsg 1189
-#define __NR_socket 1190
-#define __NR_bind 1191
-#define __NR_connect 1192
-#define __NR_listen 1193
-#define __NR_accept 1194
-#define __NR_getsockname 1195
-#define __NR_getpeername 1196
-#define __NR_socketpair 1197
-#define __NR_send 1198
-#define __NR_sendto 1199
-#define __NR_recv 1200
-#define __NR_recvfrom 1201
-#define __NR_shutdown 1202
-#define __NR_setsockopt 1203
-#define __NR_getsockopt 1204
-#define __NR_sendmsg 1205
-#define __NR_recvmsg 1206
-#define __NR_pivot_root 1207
-#define __NR_mincore 1208
-#define __NR_madvise 1209
-#define __NR_stat 1210
-#define __NR_lstat 1211
-#define __NR_fstat 1212
-#define __NR_clone2 1213
-#define __NR_getdents64 1214
-#define __NR_getunwind 1215
-#define __NR_readahead 1216
-#define __NR_setxattr 1217
-#define __NR_lsetxattr 1218
-#define __NR_fsetxattr 1219
-#define __NR_getxattr 1220
-#define __NR_lgetxattr 1221
-#define __NR_fgetxattr 1222
-#define __NR_listxattr 1223
-#define __NR_llistxattr 1224
-#define __NR_flistxattr 1225
-#define __NR_removexattr 1226
-#define __NR_lremovexattr 1227
-#define __NR_fremovexattr 1228
-#define __NR_tkill 1229
-#define __NR_futex 1230
-#define __NR_sched_setaffinity 1231
-#define __NR_sched_getaffinity 1232
-#define __NR_set_tid_address 1233
-#define __NR_fadvise64 1234
-#define __NR_tgkill 1235
-#define __NR_exit_group 1236
-#define __NR_lookup_dcookie 1237
-#define __NR_io_setup 1238
-#define __NR_io_destroy 1239
-#define __NR_io_getevents 1240
-#define __NR_io_submit 1241
-#define __NR_io_cancel 1242
-#define __NR_epoll_create 1243
-#define __NR_epoll_ctl 1244
-#define __NR_epoll_wait 1245
-#define __NR_restart_syscall 1246
-#define __NR_semtimedop 1247
-#define __NR_timer_create 1248
-#define __NR_timer_settime 1249
-#define __NR_timer_gettime 1250
-#define __NR_timer_getoverrun 1251
-#define __NR_timer_delete 1252
-#define __NR_clock_settime 1253
-#define __NR_clock_gettime 1254
-#define __NR_clock_getres 1255
-#define __NR_clock_nanosleep 1256
-#define __NR_fstatfs64 1257
-#define __NR_statfs64 1258
-#define __NR_mbind 1259
-#define __NR_get_mempolicy 1260
-#define __NR_set_mempolicy 1261
-#define __NR_mq_open 1262
-#define __NR_mq_unlink 1263
-#define __NR_mq_timedsend 1264
-#define __NR_mq_timedreceive 1265
-#define __NR_mq_notify 1266
-#define __NR_mq_getsetattr 1267
-#define __NR_kexec_load 1268
-#define __NR_vserver 1269
-#define __NR_waitid 1270
-#define __NR_add_key 1271
-#define __NR_request_key 1272
-#define __NR_keyctl 1273
-#define __NR_ioprio_set 1274
-#define __NR_ioprio_get 1275
-#define __NR_move_pages 1276
-#define __NR_inotify_init 1277
-#define __NR_inotify_add_watch 1278
-#define __NR_inotify_rm_watch 1279
-#define __NR_migrate_pages 1280
-#define __NR_openat 1281
-#define __NR_mkdirat 1282
-#define __NR_mknodat 1283
-#define __NR_fchownat 1284
-#define __NR_futimesat 1285
-#define __NR_newfstatat 1286
-#define __NR_unlinkat 1287
-#define __NR_renameat 1288
-#define __NR_linkat 1289
-#define __NR_symlinkat 1290
-#define __NR_readlinkat 1291
-#define __NR_fchmodat 1292
-#define __NR_faccessat 1293
-#define __NR_pselect6 1294
-#define __NR_ppoll 1295
-#define __NR_unshare 1296
-#define __NR_splice 1297
-#define __NR_set_robust_list 1298
-#define __NR_get_robust_list 1299
-#define __NR_sync_file_range 1300
-#define __NR_tee 1301
-#define __NR_vmsplice 1302
-#define __NR_fallocate 1303
-#define __NR_getcpu 1304
-#define __NR_epoll_pwait 1305
-#define __NR_utimensat 1306
-#define __NR_signalfd 1307
-#define __NR_timerfd 1308
-#define __NR_eventfd 1309
-#define __NR_timerfd_create 1310
-#define __NR_timerfd_settime 1311
-#define __NR_timerfd_gettime 1312
-#define __NR_signalfd4 1313
-#define __NR_eventfd2 1314
-#define __NR_epoll_create1 1315
-#define __NR_dup3 1316
-#define __NR_pipe2 1317
-#define __NR_inotify_init1 1318
-#define __NR_preadv 1319
-#define __NR_pwritev 1320
-#define __NR_rt_tgsigqueueinfo 1321
-#define __NR_recvmmsg 1322
-#define __NR_fanotify_init 1323
-#define __NR_fanotify_mark 1324
-#define __NR_prlimit64 1325
-#define __NR_name_to_handle_at 1326
-#define __NR_open_by_handle_at 1327
-#define __NR_clock_adjtime 1328
-#define __NR_syncfs 1329
-#define __NR_setns 1330
-#define __NR_sendmmsg 1331
-#define __NR_process_vm_readv 1332
-#define __NR_process_vm_writev 1333
-#define __NR_accept4 1334
-#define __NR_finit_module 1335
-#define __NR_sched_setattr 1336
-#define __NR_sched_getattr 1337
-#define __NR_renameat2 1338
-#define __NR_getrandom 1339
-#define __NR_memfd_create 1340
-#define __NR_bpf 1341
-#define __NR_execveat 1342
-#define __NR_userfaultfd 1343
-#define __NR_membarrier 1344
-#define __NR_kcmp 1345
-#define __NR_mlock2 1346
-#define __NR_copy_file_range 1347
-#define __NR_preadv2 1348
-#define __NR_pwritev2 1349
+#define __NR_Linux 1024
+
+#include <asm/unistd_64.h>
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 68362b30ea47..a9992be5718b 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1426,335 +1426,10 @@ END(ftrace_stub)
#endif /* CONFIG_FUNCTION_TRACER */
+#define __SYSCALL(nr, entry, nargs) data8 entry
.rodata
.align 8
.globl sys_call_table
sys_call_table:
- data8 sys_ni_syscall // This must be sys_ni_syscall! See ivt.S.
- data8 sys_exit // 1025
- data8 sys_read
- data8 sys_write
- data8 sys_open
- data8 sys_close
- data8 sys_creat // 1030
- data8 sys_link
- data8 sys_unlink
- data8 ia64_execve
- data8 sys_chdir
- data8 sys_fchdir // 1035
- data8 sys_utimes
- data8 sys_mknod
- data8 sys_chmod
- data8 sys_chown
- data8 sys_lseek // 1040
- data8 sys_getpid
- data8 sys_getppid
- data8 sys_mount
- data8 sys_umount
- data8 sys_setuid // 1045
- data8 sys_getuid
- data8 sys_geteuid
- data8 sys_ptrace
- data8 sys_access
- data8 sys_sync // 1050
- data8 sys_fsync
- data8 sys_fdatasync
- data8 sys_kill
- data8 sys_rename
- data8 sys_mkdir // 1055
- data8 sys_rmdir
- data8 sys_dup
- data8 sys_ia64_pipe
- data8 sys_times
- data8 ia64_brk // 1060
- data8 sys_setgid
- data8 sys_getgid
- data8 sys_getegid
- data8 sys_acct
- data8 sys_ioctl // 1065
- data8 sys_fcntl
- data8 sys_umask
- data8 sys_chroot
- data8 sys_ustat
- data8 sys_dup2 // 1070
- data8 sys_setreuid
- data8 sys_setregid
- data8 sys_getresuid
- data8 sys_setresuid
- data8 sys_getresgid // 1075
- data8 sys_setresgid
- data8 sys_getgroups
- data8 sys_setgroups
- data8 sys_getpgid
- data8 sys_setpgid // 1080
- data8 sys_setsid
- data8 sys_getsid
- data8 sys_sethostname
- data8 sys_setrlimit
- data8 sys_getrlimit // 1085
- data8 sys_getrusage
- data8 sys_gettimeofday
- data8 sys_settimeofday
- data8 sys_select
- data8 sys_poll // 1090
- data8 sys_symlink
- data8 sys_readlink
- data8 sys_uselib
- data8 sys_swapon
- data8 sys_swapoff // 1095
- data8 sys_reboot
- data8 sys_truncate
- data8 sys_ftruncate
- data8 sys_fchmod
- data8 sys_fchown // 1100
- data8 ia64_getpriority
- data8 sys_setpriority
- data8 sys_statfs
- data8 sys_fstatfs
- data8 sys_gettid // 1105
- data8 sys_semget
- data8 sys_semop
- data8 sys_semctl
- data8 sys_msgget
- data8 sys_msgsnd // 1110
- data8 sys_msgrcv
- data8 sys_msgctl
- data8 sys_shmget
- data8 sys_shmat
- data8 sys_shmdt // 1115
- data8 sys_shmctl
- data8 sys_syslog
- data8 sys_setitimer
- data8 sys_getitimer
- data8 sys_ni_syscall // 1120 /* was: ia64_oldstat */
- data8 sys_ni_syscall /* was: ia64_oldlstat */
- data8 sys_ni_syscall /* was: ia64_oldfstat */
- data8 sys_vhangup
- data8 sys_lchown
- data8 sys_remap_file_pages // 1125
- data8 sys_wait4
- data8 sys_sysinfo
- data8 sys_clone
- data8 sys_setdomainname
- data8 sys_newuname // 1130
- data8 sys_adjtimex
- data8 sys_ni_syscall /* was: ia64_create_module */
- data8 sys_init_module
- data8 sys_delete_module
- data8 sys_ni_syscall // 1135 /* was: sys_get_kernel_syms */
- data8 sys_ni_syscall /* was: sys_query_module */
- data8 sys_quotactl
- data8 sys_bdflush
- data8 sys_sysfs
- data8 sys_personality // 1140
- data8 sys_ni_syscall // sys_afs_syscall
- data8 sys_setfsuid
- data8 sys_setfsgid
- data8 sys_getdents
- data8 sys_flock // 1145
- data8 sys_readv
- data8 sys_writev
- data8 sys_pread64
- data8 sys_pwrite64
- data8 sys_sysctl // 1150
- data8 sys_mmap
- data8 sys_munmap
- data8 sys_mlock
- data8 sys_mlockall
- data8 sys_mprotect // 1155
- data8 ia64_mremap
- data8 sys_msync
- data8 sys_munlock
- data8 sys_munlockall
- data8 sys_sched_getparam // 1160
- data8 sys_sched_setparam
- data8 sys_sched_getscheduler
- data8 sys_sched_setscheduler
- data8 sys_sched_yield
- data8 sys_sched_get_priority_max // 1165
- data8 sys_sched_get_priority_min
- data8 sys_sched_rr_get_interval
- data8 sys_nanosleep
- data8 sys_ni_syscall // old nfsservctl
- data8 sys_prctl // 1170
- data8 sys_getpagesize
- data8 sys_mmap2
- data8 sys_pciconfig_read
- data8 sys_pciconfig_write
- data8 sys_perfmonctl // 1175
- data8 sys_sigaltstack
- data8 sys_rt_sigaction
- data8 sys_rt_sigpending
- data8 sys_rt_sigprocmask
- data8 sys_rt_sigqueueinfo // 1180
- data8 sys_rt_sigreturn
- data8 sys_rt_sigsuspend
- data8 sys_rt_sigtimedwait
- data8 sys_getcwd
- data8 sys_capget // 1185
- data8 sys_capset
- data8 sys_sendfile64
- data8 sys_ni_syscall // sys_getpmsg (STREAMS)
- data8 sys_ni_syscall // sys_putpmsg (STREAMS)
- data8 sys_socket // 1190
- data8 sys_bind
- data8 sys_connect
- data8 sys_listen
- data8 sys_accept
- data8 sys_getsockname // 1195
- data8 sys_getpeername
- data8 sys_socketpair
- data8 sys_send
- data8 sys_sendto
- data8 sys_recv // 1200
- data8 sys_recvfrom
- data8 sys_shutdown
- data8 sys_setsockopt
- data8 sys_getsockopt
- data8 sys_sendmsg // 1205
- data8 sys_recvmsg
- data8 sys_pivot_root
- data8 sys_mincore
- data8 sys_madvise
- data8 sys_newstat // 1210
- data8 sys_newlstat
- data8 sys_newfstat
- data8 sys_clone2
- data8 sys_getdents64
- data8 sys_getunwind // 1215
- data8 sys_readahead
- data8 sys_setxattr
- data8 sys_lsetxattr
- data8 sys_fsetxattr
- data8 sys_getxattr // 1220
- data8 sys_lgetxattr
- data8 sys_fgetxattr
- data8 sys_listxattr
- data8 sys_llistxattr
- data8 sys_flistxattr // 1225
- data8 sys_removexattr
- data8 sys_lremovexattr
- data8 sys_fremovexattr
- data8 sys_tkill
- data8 sys_futex // 1230
- data8 sys_sched_setaffinity
- data8 sys_sched_getaffinity
- data8 sys_set_tid_address
- data8 sys_fadvise64_64
- data8 sys_tgkill // 1235
- data8 sys_exit_group
- data8 sys_lookup_dcookie
- data8 sys_io_setup
- data8 sys_io_destroy
- data8 sys_io_getevents // 1240
- data8 sys_io_submit
- data8 sys_io_cancel
- data8 sys_epoll_create
- data8 sys_epoll_ctl
- data8 sys_epoll_wait // 1245
- data8 sys_restart_syscall
- data8 sys_semtimedop
- data8 sys_timer_create
- data8 sys_timer_settime
- data8 sys_timer_gettime // 1250
- data8 sys_timer_getoverrun
- data8 sys_timer_delete
- data8 sys_clock_settime
- data8 sys_clock_gettime
- data8 sys_clock_getres // 1255
- data8 sys_clock_nanosleep
- data8 sys_fstatfs64
- data8 sys_statfs64
- data8 sys_mbind
- data8 sys_get_mempolicy // 1260
- data8 sys_set_mempolicy
- data8 sys_mq_open
- data8 sys_mq_unlink
- data8 sys_mq_timedsend
- data8 sys_mq_timedreceive // 1265
- data8 sys_mq_notify
- data8 sys_mq_getsetattr
- data8 sys_kexec_load
- data8 sys_ni_syscall // reserved for vserver
- data8 sys_waitid // 1270
- data8 sys_add_key
- data8 sys_request_key
- data8 sys_keyctl
- data8 sys_ioprio_set
- data8 sys_ioprio_get // 1275
- data8 sys_move_pages
- data8 sys_inotify_init
- data8 sys_inotify_add_watch
- data8 sys_inotify_rm_watch
- data8 sys_migrate_pages // 1280
- data8 sys_openat
- data8 sys_mkdirat
- data8 sys_mknodat
- data8 sys_fchownat
- data8 sys_futimesat // 1285
- data8 sys_newfstatat
- data8 sys_unlinkat
- data8 sys_renameat
- data8 sys_linkat
- data8 sys_symlinkat // 1290
- data8 sys_readlinkat
- data8 sys_fchmodat
- data8 sys_faccessat
- data8 sys_pselect6
- data8 sys_ppoll // 1295
- data8 sys_unshare
- data8 sys_splice
- data8 sys_set_robust_list
- data8 sys_get_robust_list
- data8 sys_sync_file_range // 1300
- data8 sys_tee
- data8 sys_vmsplice
- data8 sys_fallocate
- data8 sys_getcpu
- data8 sys_epoll_pwait // 1305
- data8 sys_utimensat
- data8 sys_signalfd
- data8 sys_ni_syscall
- data8 sys_eventfd
- data8 sys_timerfd_create // 1310
- data8 sys_timerfd_settime
- data8 sys_timerfd_gettime
- data8 sys_signalfd4
- data8 sys_eventfd2
- data8 sys_epoll_create1 // 1315
- data8 sys_dup3
- data8 sys_pipe2
- data8 sys_inotify_init1
- data8 sys_preadv
- data8 sys_pwritev // 1320
- data8 sys_rt_tgsigqueueinfo
- data8 sys_recvmmsg
- data8 sys_fanotify_init
- data8 sys_fanotify_mark
- data8 sys_prlimit64 // 1325
- data8 sys_name_to_handle_at
- data8 sys_open_by_handle_at
- data8 sys_clock_adjtime
- data8 sys_syncfs
- data8 sys_setns // 1330
- data8 sys_sendmmsg
- data8 sys_process_vm_readv
- data8 sys_process_vm_writev
- data8 sys_accept4
- data8 sys_finit_module // 1335
- data8 sys_sched_setattr
- data8 sys_sched_getattr
- data8 sys_renameat2
- data8 sys_getrandom
- data8 sys_memfd_create // 1340
- data8 sys_bpf
- data8 sys_execveat
- data8 sys_userfaultfd
- data8 sys_membarrier
- data8 sys_kcmp // 1345
- data8 sys_mlock2
- data8 sys_copy_file_range
- data8 sys_preadv2
- data8 sys_pwritev2
-
- .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
+#include <asm/syscall_table.h>
+#undef __SYSCALL
diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..813a58cba39c
--- /dev/null
+++ b/arch/ia64/kernel/syscalls/Makefile
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+syshdr_offset_unistd_64 := __NR_Linux
+$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+systbl_offset_syscall_table := 1024
+$(kapi)/syscall_table.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_64.h
+kapisyshdr-y += syscall_table.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..b22203b40bfe
--- /dev/null
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -0,0 +1,337 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# Linux system call numbers and entry vectors for ia64
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# Add 1024 to <number> will get the actual system call number
+#
+# The <abi> is always "common" for this file
+#
+0 common ni_syscall sys_ni_syscall
+1 common exit sys_exit
+2 common read sys_read
+3 common write sys_write
+4 common open sys_open
+5 common close sys_close
+6 common creat sys_creat
+7 common link sys_link
+8 common unlink sys_unlink
+9 common execve ia64_execve
+10 common chdir sys_chdir
+11 common fchdir sys_fchdir
+12 common utimes sys_utimes
+13 common mknod sys_mknod
+14 common chmod sys_chmod
+15 common chown sys_chown
+16 common lseek sys_lseek
+17 common getpid sys_getpid
+18 common getppid sys_getppid
+19 common mount sys_mount
+20 common umount sys_umount
+21 common setuid sys_setuid
+22 common getuid sys_getuid
+23 common geteuid sys_geteuid
+24 common ptrace sys_ptrace
+25 common access sys_access
+26 common sync sys_sync
+27 common fsync sys_fsync
+28 common fdatasync sys_fdatasync
+29 common kill sys_kill
+30 common rename sys_rename
+31 common mkdir sys_mkdir
+32 common rmdir sys_rmdir
+33 common dup sys_dup
+34 common pipe sys_ia64_pipe
+35 common times sys_times
+36 common brk ia64_brk
+37 common setgid sys_setgid
+38 common getgid sys_getgid
+39 common getegid sys_getegid
+40 common acct sys_acct
+41 common ioctl sys_ioctl
+42 common fcntl sys_fcntl
+43 common umask sys_umask
+44 common chroot sys_chroot
+45 common ustat sys_ustat
+46 common dup2 sys_dup2
+47 common setreuid sys_setreuid
+48 common setregid sys_setregid
+49 common getresuid sys_getresuid
+50 common setresuid sys_setresuid
+51 common getresgid sys_getresgid
+52 common setresgid sys_setresgid
+53 common getgroups sys_getgroups
+54 common setgroups sys_setgroups
+55 common getpgid sys_getpgid
+56 common setpgid sys_setpgid
+57 common setsid sys_setsid
+58 common getsid sys_getsid
+59 common sethostname sys_sethostname
+60 common setrlimit sys_setrlimit
+61 common getrlimit sys_getrlimit
+62 common getrusage sys_getrusage
+63 common gettimeofday sys_gettimeofday
+64 common settimeofday sys_settimeofday
+65 common select sys_select
+66 common poll sys_poll
+67 common symlink sys_symlink
+68 common readlink sys_readlink
+69 common uselib sys_uselib
+70 common swapon sys_swapon
+71 common swapoff sys_swapoff
+72 common reboot sys_reboot
+73 common truncate sys_truncate
+74 common ftruncate sys_ftruncate
+75 common fchmod sys_fchmod
+76 common fchown sys_fchown
+77 common getpriority ia64_getpriority
+78 common setpriority sys_setpriority
+79 common statfs sys_statfs
+80 common fstatfs sys_fstatfs
+81 common gettid sys_gettid
+82 common semget sys_semget
+83 common semop sys_semop
+84 common semctl sys_semctl
+85 common msgget sys_msgget
+86 common msgsnd sys_msgsnd
+87 common msgrcv sys_msgrcv
+88 common msgctl sys_msgctl
+89 common shmget sys_shmget
+90 common shmat sys_shmat
+91 common shmdt sys_shmdt
+92 common shmctl sys_shmctl
+93 common syslog sys_syslog
+94 common setitimer sys_setitimer
+95 common getitimer sys_getitimer
+# 1120 was old_stat
+# 1121 was old_lstat
+# 1122 was old_fstat
+99 common vhangup sys_vhangup
+100 common lchown sys_lchown
+101 common remap_file_pages sys_remap_file_pages
+102 common wait4 sys_wait4
+103 common sysinfo sys_sysinfo
+104 common clone sys_clone
+105 common setdomainname sys_setdomainname
+106 common uname sys_newuname
+107 common adjtimex sys_adjtimex
+# 1132 was create_module
+109 common init_module sys_init_module
+110 common delete_module sys_delete_module
+# 1135 was get_kernel_syms
+# 1136 was query_module
+113 common quotactl sys_quotactl
+114 common bdflush sys_bdflush
+115 common sysfs sys_sysfs
+116 common personality sys_personality
+117 common afs_syscall sys_ni_syscall
+118 common setfsuid sys_setfsuid
+119 common setfsgid sys_setfsgid
+120 common getdents sys_getdents
+121 common flock sys_flock
+122 common readv sys_readv
+123 common writev sys_writev
+124 common pread64 sys_pread64
+125 common pwrite64 sys_pwrite64
+126 common _sysctl sys_sysctl
+127 common mmap sys_mmap
+128 common munmap sys_munmap
+129 common mlock sys_mlock
+130 common mlockall sys_mlockall
+131 common mprotect sys_mprotect
+132 common mremap ia64_mremap
+133 common msync sys_msync
+134 common munlock sys_munlock
+135 common munlockall sys_munlockall
+136 common sched_getparam sys_sched_getparam
+137 common sched_setparam sys_sched_setparam
+138 common sched_getscheduler sys_sched_getscheduler
+139 common sched_setscheduler sys_sched_setscheduler
+140 common sched_yield sys_sched_yield
+141 common sched_get_priority_max sys_sched_get_priority_max
+142 common sched_get_priority_min sys_sched_get_priority_min
+143 common sched_rr_get_interval sys_sched_rr_get_interval
+144 common nanosleep sys_nanosleep
+145 common nfsservctl sys_ni_syscall
+146 common prctl sys_prctl
+147 common old_getpagesize sys_getpagesize
+148 common mmap2 sys_mmap2
+149 common pciconfig_read sys_pciconfig_read
+150 common pciconfig_write sys_pciconfig_write
+151 common perfmonctl sys_perfmonctl
+152 common sigaltstack sys_sigaltstack
+153 common rt_sigaction sys_rt_sigaction
+154 common rt_sigpending sys_rt_sigpending
+155 common rt_sigprocmask sys_rt_sigprocmask
+156 common rt_sigqueueinfo sys_rt_sigqueueinfo
+157 common rt_sigreturn sys_rt_sigreturn
+158 common rt_sigsuspend sys_rt_sigsuspend
+159 common rt_sigtimedwait sys_rt_sigtimedwait
+160 common getcwd sys_getcwd
+161 common capget sys_capget
+162 common capset sys_capset
+163 common sendfile sys_sendfile64
+164 common getpmsg sys_ni_syscall
+165 common putpmsg sys_ni_syscall
+166 common socket sys_socket
+167 common bind sys_bind
+168 common connect sys_connect
+169 common listen sys_listen
+170 common accept sys_accept
+171 common getsockname sys_getsockname
+172 common getpeername sys_getpeername
+173 common socketpair sys_socketpair
+174 common send sys_send
+175 common sendto sys_sendto
+176 common recv sys_recv
+177 common recvfrom sys_recvfrom
+178 common shutdown sys_shutdown
+179 common setsockopt sys_setsockopt
+180 common getsockopt sys_getsockopt
+181 common sendmsg sys_sendmsg
+182 common recvmsg sys_recvmsg
+183 common pivot_root sys_pivot_root
+184 common mincore sys_mincore
+185 common madvise sys_madvise
+186 common stat sys_newstat
+187 common lstat sys_newlstat
+188 common fstat sys_newfstat
+189 common clone2 sys_clone2
+190 common getdents64 sys_getdents64
+191 common getunwind sys_getunwind
+192 common readahead sys_readahead
+193 common setxattr sys_setxattr
+194 common lsetxattr sys_lsetxattr
+195 common fsetxattr sys_fsetxattr
+196 common getxattr sys_getxattr
+197 common lgetxattr sys_lgetxattr
+198 common fgetxattr sys_fgetxattr
+199 common listxattr sys_listxattr
+200 common llistxattr sys_llistxattr
+201 common flistxattr sys_flistxattr
+202 common removexattr sys_removexattr
+203 common lremovexattr sys_lremovexattr
+204 common fremovexattr sys_fremovexattr
+205 common tkill sys_tkill
+206 common futex sys_futex
+207 common sched_setaffinity sys_sched_setaffinity
+208 common sched_getaffinity sys_sched_getaffinity
+209 common set_tid_address sys_set_tid_address
+210 common fadvise64 sys_fadvise64_64
+211 common tgkill sys_tgkill
+212 common exit_group sys_exit_group
+213 common lookup_dcookie sys_lookup_dcookie
+214 common io_setup sys_io_setup
+215 common io_destroy sys_io_destroy
+216 common io_getevents sys_io_getevents
+217 common io_submit sys_io_submit
+218 common io_cancel sys_io_cancel
+219 common epoll_create sys_epoll_create
+220 common epoll_ctl sys_epoll_ctl
+221 common epoll_wait sys_epoll_wait
+222 common restart_syscall sys_restart_syscall
+223 common semtimedop sys_semtimedop
+224 common timer_create sys_timer_create
+225 common timer_settime sys_timer_settime
+226 common timer_gettime sys_timer_gettime
+227 common timer_getoverrun sys_timer_getoverrun
+228 common timer_delete sys_timer_delete
+229 common clock_settime sys_clock_settime
+230 common clock_gettime sys_clock_gettime
+231 common clock_getres sys_clock_getres
+232 common clock_nanosleep sys_clock_nanosleep
+233 common fstatfs64 sys_fstatfs64
+234 common statfs64 sys_statfs64
+235 common mbind sys_mbind
+236 common get_mempolicy sys_get_mempolicy
+237 common set_mempolicy sys_set_mempolicy
+238 common mq_open sys_mq_open
+239 common mq_unlink sys_mq_unlink
+240 common mq_timedsend sys_mq_timedsend
+241 common mq_timedreceive sys_mq_timedreceive
+242 common mq_notify sys_mq_notify
+243 common mq_getsetattr sys_mq_getsetattr
+244 common kexec_load sys_kexec_load
+245 common vserver sys_ni_syscall
+246 common waitid sys_waitid
+247 common add_key sys_add_key
+248 common request_key sys_request_key
+249 common keyctl sys_keyctl
+250 common ioprio_set sys_ioprio_set
+251 common ioprio_get sys_ioprio_get
+252 common move_pages sys_move_pages
+253 common inotify_init sys_inotify_init
+254 common inotify_add_watch sys_inotify_add_watch
+255 common inotify_rm_watch sys_inotify_rm_watch
+256 common migrate_pages sys_migrate_pages
+257 common openat sys_openat
+258 common mkdirat sys_mkdirat
+259 common mknodat sys_mknodat
+260 common fchownat sys_fchownat
+261 common futimesat sys_futimesat
+262 common newfstatat sys_newfstatat
+263 common unlinkat sys_unlinkat
+264 common renameat sys_renameat
+265 common linkat sys_linkat
+266 common symlinkat sys_symlinkat
+267 common readlinkat sys_readlinkat
+268 common fchmodat sys_fchmodat
+269 common faccessat sys_faccessat
+270 common pselect6 sys_pselect6
+271 common ppoll sys_ppoll
+272 common unshare sys_unshare
+273 common splice sys_splice
+274 common set_robust_list sys_set_robust_list
+275 common get_robust_list sys_get_robust_list
+276 common sync_file_range sys_sync_file_range
+277 common tee sys_tee
+278 common vmsplice sys_vmsplice
+279 common fallocate sys_fallocate
+280 common getcpu sys_getcpu
+281 common epoll_pwait sys_epoll_pwait
+282 common utimensat sys_utimensat
+283 common signalfd sys_signalfd
+284 common timerfd sys_ni_syscall
+285 common eventfd sys_eventfd
+286 common timerfd_create sys_timerfd_create
+287 common timerfd_settime sys_timerfd_settime
+288 common timerfd_gettime sys_timerfd_gettime
+289 common signalfd4 sys_signalfd4
+290 common eventfd2 sys_eventfd2
+291 common epoll_create1 sys_epoll_create1
+292 common dup3 sys_dup3
+293 common pipe2 sys_pipe2
+294 common inotify_init1 sys_inotify_init1
+295 common preadv sys_preadv
+296 common pwritev sys_pwritev
+297 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+298 common recvmmsg sys_recvmmsg
+299 common fanotify_init sys_fanotify_init
+300 common fanotify_mark sys_fanotify_mark
+301 common prlimit64 sys_prlimit64
+302 common name_to_handle_at sys_name_to_handle_at
+303 common open_by_handle_at sys_open_by_handle_at
+304 common clock_adjtime sys_clock_adjtime
+305 common syncfs sys_syncfs
+306 common setns sys_setns
+307 common sendmmsg sys_sendmmsg
+308 common process_vm_readv sys_process_vm_readv
+309 common process_vm_writev sys_process_vm_writev
+310 common accept4 sys_accept4
+311 common finit_module sys_finit_module
+312 common sched_setattr sys_sched_setattr
+313 common sched_getattr sys_sched_getattr
+314 common renameat2 sys_renameat2
+315 common getrandom sys_getrandom
+316 common memfd_create sys_memfd_create
+317 common bpf sys_bpf
+318 common execveat sys_execveat
+319 common userfaultfd sys_userfaultfd
+320 common membarrier sys_membarrier
+321 common kcmp sys_kcmp
+322 common mlock2 sys_mlock2
+323 common copy_file_range sys_copy_file_range
+324 common preadv2 sys_preadv2
+325 common pwritev2 sys_pwritev2
diff --git a/arch/ia64/kernel/syscalls/syscallhdr.sh b/arch/ia64/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..0c2d2c748565
--- /dev/null
+++ b/arch/ia64/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_IA64_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+) > "$out"
diff --git a/arch/ia64/kernel/syscalls/syscalltbl.sh b/arch/ia64/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..85d78d9309ad
--- /dev/null
+++ b/arch/ia64/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry ; do
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 64a641467736..328ba83d735b 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -317,7 +317,6 @@ config UBOOT
help
If you say Y here kernel will try to collect command
line parameters from the initial u-boot stack.
- default n
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
@@ -395,7 +394,6 @@ comment "ROM configuration"
config ROM
bool "Specify ROM linker regions"
- default n
help
Define a ROM region for the linker script. This creates a kernel
that can be stored in flash, with possibly the text, and data
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 997c9f20ea0f..f00ca53f8c14 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -154,5 +154,8 @@ endif
archclean:
rm -f vmlinux.gz vmlinux.bz2
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/m68k/kernel/syscalls all
+
install:
sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 85904b73e261..bfd4648e76e3 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -12,6 +12,20 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_AMIGA=y
+CONFIG_ZORRO=y
+CONFIG_AMIGA_PCMCIA=y
+CONFIG_ZORRO_NAMES=y
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_AMIGA_BUILTIN_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,22 +42,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68020=y
-CONFIG_M68030=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_AMIGA=y
-CONFIG_ZORRO=y
-CONFIG_AMIGA_PCMCIA=y
-CONFIG_ZORRO_NAMES=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -112,6 +116,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -119,7 +124,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -197,7 +205,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -227,7 +234,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -297,7 +303,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -337,6 +342,7 @@ CONFIG_BLK_DEV_GAYLE=y
CONFIG_BLK_DEV_BUDDHA=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -400,8 +406,10 @@ CONFIG_A2065=y
CONFIG_ARIADNE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -487,10 +495,6 @@ CONFIG_RTC_DRV_RP5C01=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
-CONFIG_AMIGA_BUILTIN_SERIAL=y
-CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -588,31 +592,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -630,6 +609,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -639,7 +619,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -672,5 +651,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 9b3818bbb68b..81112af1e478 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -12,6 +12,15 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_APOLLO=y
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -29,19 +38,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68020=y
-CONFIG_M68030=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_APOLLO=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -110,6 +112,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -117,7 +120,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +201,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -225,7 +230,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -295,7 +299,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -324,6 +327,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -378,8 +382,10 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -446,8 +452,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -545,31 +549,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -587,6 +566,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -596,7 +576,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -629,5 +608,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 769677809945..6d4b6023a2f0 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -12,6 +12,23 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_ATARI=y
+CONFIG_ATARI_ROM_ISA=y
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
+CONFIG_ATARI_ETHERNAT=y
+CONFIG_ATARI_ETHERNEC=y
+CONFIG_ATARI_DSP56K=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,20 +45,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68020=y
-CONFIG_M68030=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_ATARI=y
-CONFIG_ATARI_ROM_ISA=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -110,6 +119,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -117,7 +127,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +208,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -225,7 +237,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -295,7 +306,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -332,6 +342,7 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -388,8 +399,10 @@ CONFIG_VETH=m
CONFIG_ATARILANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -460,15 +473,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
-CONFIG_NATFEAT=y
-CONFIG_NFBLOCK=y
-CONFIG_NFCON=y
-CONFIG_NFETH=y
-CONFIG_ATARI_ETHERNAT=y
-CONFIG_ATARI_ETHERNEC=y
-CONFIG_ATARI_DSP56K=m
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -566,31 +570,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -608,6 +587,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -617,7 +597,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -650,5 +629,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 7dd264ddf2ea..3306dff09d3c 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -12,6 +12,13 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_BVME6000=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,18 +35,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_VME=y
-CONFIG_BVME6000=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -108,6 +109,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -115,7 +117,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +198,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -223,7 +227,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -293,7 +296,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -322,6 +324,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -377,8 +380,10 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -439,7 +444,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -537,31 +541,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -579,6 +558,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -588,7 +568,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -621,5 +600,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 515f7439c755..c15e15b68d39 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -12,6 +12,14 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_HP300=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -29,19 +37,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68020=y
-CONFIG_M68030=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_HP300=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -110,6 +111,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -117,7 +119,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -195,7 +200,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -225,7 +229,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -295,7 +298,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -324,6 +326,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -379,8 +382,10 @@ CONFIG_VETH=m
CONFIG_HPLANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -449,7 +454,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -547,31 +551,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -589,6 +568,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -598,7 +578,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -631,5 +610,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 8e1038ceb407..1a0ce0d11267 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -12,6 +12,14 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68020=y
+CONFIG_M68030=y
+CONFIG_M68040=y
+CONFIG_M68KFPU_EMU=y
+CONFIG_MAC=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,19 +36,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68020=y
-CONFIG_M68030=y
-CONFIG_M68040=y
-CONFIG_M68KFPU_EMU=y
-CONFIG_MAC=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -109,6 +110,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -116,7 +118,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -194,7 +199,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -224,7 +228,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -297,7 +300,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -331,6 +333,7 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -366,8 +369,8 @@ CONFIG_TCM_PSCSI=m
CONFIG_ADB=y
CONFIG_ADB_MACII=y
CONFIG_ADB_IOP=y
-CONFIG_ADB_PMU=y
CONFIG_ADB_CUDA=y
+CONFIG_ADB_PMU=y
CONFIG_INPUT_ADBHID=y
CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
@@ -395,8 +398,10 @@ CONFIG_VETH=m
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -471,7 +476,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -569,31 +573,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -611,6 +590,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -620,7 +600,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -653,5 +632,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 62c8aaa15cc7..9758839b74bd 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -12,18 +12,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-# CONFIG_EFI_PARTITION is not set
-CONFIG_IOSCHED_DEADLINE=m
-CONFIG_MQ_IOSCHED_DEADLINE=m
-CONFIG_MQ_IOSCHED_KYBER=m
-CONFIG_IOSCHED_BFQ=m
CONFIG_KEXEC=y
CONFIG_BOOTINFO_PROC=y
CONFIG_M68020=y
@@ -45,12 +33,35 @@ CONFIG_ZORRO=y
CONFIG_AMIGA_PCMCIA=y
CONFIG_ATARI_ROM_ISA=y
CONFIG_ZORRO_NAMES=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
+CONFIG_NATFEAT=y
+CONFIG_NFBLOCK=y
+CONFIG_NFCON=y
+CONFIG_NFETH=y
+CONFIG_ATARI_ETHERNAT=y
+CONFIG_ATARI_ETHERNEC=y
+CONFIG_ATARI_DSP56K=m
+CONFIG_AMIGA_BUILTIN_SERIAL=y
+CONFIG_SERIAL_CONSOLE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_MQ_IOSCHED_KYBER=m
+CONFIG_IOSCHED_BFQ=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -119,6 +130,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -126,7 +138,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -204,7 +219,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -234,7 +248,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -307,7 +320,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -354,6 +366,7 @@ CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -400,8 +413,8 @@ CONFIG_TCM_PSCSI=m
CONFIG_ADB=y
CONFIG_ADB_MACII=y
CONFIG_ADB_IOP=y
-CONFIG_ADB_PMU=y
CONFIG_ADB_CUDA=y
+CONFIG_ADB_PMU=y
CONFIG_INPUT_ADBHID=y
CONFIG_MAC_EMUMOUSEBTN=y
CONFIG_NETDEVICES=y
@@ -436,8 +449,10 @@ CONFIG_SUN3LANCE=y
CONFIG_MACMACE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
CONFIG_MAC89x0=y
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -543,17 +558,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
-CONFIG_NATFEAT=y
-CONFIG_NFBLOCK=y
-CONFIG_NFCON=y
-CONFIG_NFETH=y
-CONFIG_ATARI_ETHERNAT=y
-CONFIG_ATARI_ETHERNEC=y
-CONFIG_ATARI_DSP56K=m
-CONFIG_AMIGA_BUILTIN_SERIAL=y
-CONFIG_SERIAL_CONSOLE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -651,31 +655,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -693,6 +672,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -702,7 +682,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -735,5 +714,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 733973f91297..f5526731ccdb 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -12,6 +12,12 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68030=y
+CONFIG_VME=y
+CONFIG_MVME147=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,17 +34,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68030=y
-CONFIG_VME=y
-CONFIG_MVME147=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -107,6 +108,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -114,7 +116,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -192,7 +197,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -222,7 +226,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -292,7 +295,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -321,6 +323,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -377,8 +380,10 @@ CONFIG_VETH=m
CONFIG_MVME147_NET=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -439,7 +444,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -537,31 +541,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -579,6 +558,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -588,7 +568,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -621,5 +600,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index fee30cc9ac16..5db58ff4b107 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -12,6 +12,13 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_VME=y
+CONFIG_MVME16x=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,18 +35,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_VME=y
-CONFIG_MVME16x=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -108,6 +109,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -115,7 +117,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +198,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -223,7 +227,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -293,7 +296,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -322,6 +324,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -377,8 +380,10 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMAZON is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -439,7 +444,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -537,31 +541,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -579,6 +558,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -588,7 +568,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -621,5 +600,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index eebf9c9088e7..b645230da128 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -12,6 +12,13 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_M68040=y
+CONFIG_M68060=y
+CONFIG_Q40=y
+CONFIG_HEARTBEAT=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -29,17 +36,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_M68040=y
-CONFIG_M68060=y
-CONFIG_Q40=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -108,6 +110,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -115,7 +118,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -193,7 +199,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -223,7 +228,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -293,7 +297,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -329,6 +332,7 @@ CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -385,8 +389,10 @@ CONFIG_VETH=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
@@ -461,8 +467,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_HEARTBEAT=y
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -560,31 +564,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -602,6 +581,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -611,7 +591,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -644,5 +623,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index dabc54318c09..4afe2100947e 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -12,6 +12,10 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_SUN3=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,15 +32,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_SUN3=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -105,6 +106,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -112,7 +114,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -190,7 +195,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -220,7 +224,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -290,7 +293,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -319,6 +321,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -375,7 +378,9 @@ CONFIG_VETH=m
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -441,7 +446,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -539,30 +543,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -580,6 +560,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -589,7 +570,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -622,5 +602,34 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 0d9a5c2a311a..bd22893d0dc3 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -12,6 +12,10 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_USERFAULTFD=y
CONFIG_SLAB=y
+CONFIG_KEXEC=y
+CONFIG_BOOTINFO_PROC=y
+CONFIG_SUN3X=y
+CONFIG_PROC_HARDWARE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -28,15 +32,12 @@ CONFIG_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_DEADLINE=m
CONFIG_MQ_IOSCHED_KYBER=m
CONFIG_IOSCHED_BFQ=m
-CONFIG_KEXEC=y
-CONFIG_BOOTINFO_PROC=y
-CONFIG_SUN3X=y
-# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
-CONFIG_ZPOOL=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
+# CONFIG_COMPACTION is not set
+CONFIG_CLEANCACHE=y
+CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -105,6 +106,7 @@ CONFIG_NFT_LIMIT=m
CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
@@ -112,7 +114,10 @@ CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
CONFIG_NFT_DUP_NETDEV=m
CONFIG_NFT_FWD_NETDEV=m
CONFIG_NFT_FIB_NETDEV=m
@@ -190,7 +195,6 @@ CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NFT_DUP_IPV4=m
CONFIG_NFT_FIB_IPV4=m
@@ -220,7 +224,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_NFT_MASQ_IPV6=m
@@ -290,7 +293,6 @@ CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
# CONFIG_BATMAN_ADV_BATMAN_V is not set
-CONFIG_BATMAN_ADV_DAT=y
CONFIG_BATMAN_ADV_NC=y
CONFIG_BATMAN_ADV_MCAST=y
CONFIG_NETLINK_DIAG=m
@@ -319,6 +321,7 @@ CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
@@ -375,8 +378,10 @@ CONFIG_VETH=m
CONFIG_SUN3LANCE=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -441,7 +446,6 @@ CONFIG_RTC_DRV_GENERIC=m
# CONFIG_VIRTIO_MENU is not set
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_DAX=m
-CONFIG_PROC_HARDWARE=y
CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_JFS_FS=m
@@ -539,31 +543,6 @@ CONFIG_NLS_MAC_INUIT=m
CONFIG_NLS_MAC_ROMANIAN=m
CONFIG_NLS_MAC_TURKISH=m
CONFIG_DLM=m
-# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_WW_MUTEX_SELFTEST=m
-CONFIG_TEST_LIST_SORT=m
-CONFIG_TEST_SORT=m
-CONFIG_ATOMIC64_SELFTEST=m
-CONFIG_ASYNC_RAID6_TEST=m
-CONFIG_TEST_HEXDUMP=m
-CONFIG_TEST_STRING_HELPERS=m
-CONFIG_TEST_KSTRTOX=m
-CONFIG_TEST_PRINTF=m
-CONFIG_TEST_BITMAP=m
-CONFIG_TEST_UUID=m
-CONFIG_TEST_OVERFLOW=m
-CONFIG_TEST_RHASHTABLE=m
-CONFIG_TEST_HASH=m
-CONFIG_TEST_USER_COPY=m
-CONFIG_TEST_BPF=m
-CONFIG_FIND_BIT_BENCHMARK=m
-CONFIG_TEST_FIRMWARE=m
-CONFIG_TEST_SYSCTL=m
-CONFIG_TEST_UDELAY=m
-CONFIG_TEST_STATIC_KEYS=m
-CONFIG_TEST_KMOD=m
-CONFIG_EARLY_PRINTK=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_HARDENED_USERCOPY=y
CONFIG_CRYPTO_RSA=m
@@ -581,6 +560,7 @@ CONFIG_CRYPTO_MORUS640=m
CONFIG_CRYPTO_MORUS1280=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
@@ -590,7 +570,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
@@ -623,5 +602,35 @@ CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC32_SELFTEST=m
+CONFIG_CRC64=m
CONFIG_XZ_DEC_TEST=m
CONFIG_STRING_SELFTEST=m
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_WW_MUTEX_SELFTEST=m
+CONFIG_TEST_LIST_SORT=m
+CONFIG_TEST_SORT=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_ASYNC_RAID6_TEST=m
+CONFIG_TEST_HEXDUMP=m
+CONFIG_TEST_STRING_HELPERS=m
+CONFIG_TEST_KSTRTOX=m
+CONFIG_TEST_PRINTF=m
+CONFIG_TEST_BITMAP=m
+CONFIG_TEST_BITFIELD=m
+CONFIG_TEST_UUID=m
+CONFIG_TEST_XARRAY=m
+CONFIG_TEST_OVERFLOW=m
+CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
+CONFIG_TEST_IDA=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_TEST_BPF=m
+CONFIG_FIND_BIT_BENCHMARK=m
+CONFIG_TEST_FIRMWARE=m
+CONFIG_TEST_SYSCTL=m
+CONFIG_TEST_UDELAY=m
+CONFIG_TEST_STATIC_KEYS=m
+CONFIG_TEST_KMOD=m
+CONFIG_TEST_MEMCAT_P=m
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index a4b8d3331a9e..9f1dd26903e3 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,3 +1,4 @@
+generated-y += syscall_table.h
generic-y += barrier.h
generic-y += compat.h
generic-y += device.h
diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
index 85761255dde5..8a6dc6e5a279 100644
--- a/arch/m68k/include/asm/raw_io.h
+++ b/arch/m68k/include/asm/raw_io.h
@@ -107,12 +107,43 @@ static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len
}
static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
- unsigned int len)
+ unsigned int nr)
{
- unsigned int i;
+ unsigned int tmp;
- for (i = 0; i < len; i++)
- out_8(port, *buf++);
+ if (nr & 15) {
+ tmp = (nr & 15) - 1;
+ asm volatile (
+ "1: moveb %0@+,%2@; dbra %1,1b"
+ : "=a" (buf), "=d" (tmp)
+ : "a" (port), "0" (buf),
+ "1" (tmp));
+ }
+ if (nr >> 4) {
+ tmp = (nr >> 4) - 1;
+ asm volatile (
+ "1: "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "moveb %0@+,%2@; "
+ "dbra %1,1b"
+ : "=a" (buf), "=d" (tmp)
+ : "a" (port), "0" (buf),
+ "1" (tmp));
+ }
}
static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index e680031bda7b..49d5de18646b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,8 +4,7 @@
#include <uapi/asm/unistd.h>
-
-#define NR_syscalls 380
+#define NR_syscalls __NR_syscalls
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild
index c2e26a44c482..b6452910d7e1 100644
--- a/arch/m68k/include/uapi/asm/Kbuild
+++ b/arch/m68k/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_32.h
generic-y += auxvec.h
generic-y += bitsperlong.h
generic-y += bpf_perf_event.h
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index de3054f8a681..cdbd090d44a2 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -2,389 +2,6 @@
#ifndef _UAPI_ASM_M68K_UNISTD_H_
#define _UAPI_ASM_M68K_UNISTD_H_
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_chown 16
-/*#define __NR_break 17*/
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-/*#define __NR_stty 31*/
-/*#define __NR_gtty 32*/
-#define __NR_access 33
-#define __NR_nice 34
-/*#define __NR_ftime 35*/
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-/*#define __NR_prof 44*/
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-/*#define __NR_lock 53*/
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-/*#define __NR_mpx 56*/
-#define __NR_setpgid 57
-/*#define __NR_ulimit 58*/
-/*#define __NR_oldolduname 59*/
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-/*#define __NR_profil 98*/
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-/*#define __NR_ioperm 101*/
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-/*#define __NR_olduname 109*/
-/*#define __NR_iopl 110*/ /* not supported */
-#define __NR_vhangup 111
-/*#define __NR_idle 112*/ /* Obsolete */
-/*#define __NR_vm86 113*/ /* not supported */
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_cacheflush 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-/*#define __NR_afs_syscall 137*/ /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_getpagesize 166
-#define __NR_query_module 167
-#define __NR_poll 168
-#define __NR_nfsservctl 169
-#define __NR_setresgid 170
-#define __NR_getresgid 171
-#define __NR_prctl 172
-#define __NR_rt_sigreturn 173
-#define __NR_rt_sigaction 174
-#define __NR_rt_sigprocmask 175
-#define __NR_rt_sigpending 176
-#define __NR_rt_sigtimedwait 177
-#define __NR_rt_sigqueueinfo 178
-#define __NR_rt_sigsuspend 179
-#define __NR_pread64 180
-#define __NR_pwrite64 181
-#define __NR_lchown 182
-#define __NR_getcwd 183
-#define __NR_capget 184
-#define __NR_capset 185
-#define __NR_sigaltstack 186
-#define __NR_sendfile 187
-#define __NR_getpmsg 188 /* some people actually want streams */
-#define __NR_putpmsg 189 /* some people actually want streams */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#define __NR_chown32 198
-#define __NR_getuid32 199
-#define __NR_getgid32 200
-#define __NR_geteuid32 201
-#define __NR_getegid32 202
-#define __NR_setreuid32 203
-#define __NR_setregid32 204
-#define __NR_getgroups32 205
-#define __NR_setgroups32 206
-#define __NR_fchown32 207
-#define __NR_setresuid32 208
-#define __NR_getresuid32 209
-#define __NR_setresgid32 210
-#define __NR_getresgid32 211
-#define __NR_lchown32 212
-#define __NR_setuid32 213
-#define __NR_setgid32 214
-#define __NR_setfsuid32 215
-#define __NR_setfsgid32 216
-#define __NR_pivot_root 217
-/* 218*/
-/* 219*/
-#define __NR_getdents64 220
-#define __NR_gettid 221
-#define __NR_tkill 222
-#define __NR_setxattr 223
-#define __NR_lsetxattr 224
-#define __NR_fsetxattr 225
-#define __NR_getxattr 226
-#define __NR_lgetxattr 227
-#define __NR_fgetxattr 228
-#define __NR_listxattr 229
-#define __NR_llistxattr 230
-#define __NR_flistxattr 231
-#define __NR_removexattr 232
-#define __NR_lremovexattr 233
-#define __NR_fremovexattr 234
-#define __NR_futex 235
-#define __NR_sendfile64 236
-#define __NR_mincore 237
-#define __NR_madvise 238
-#define __NR_fcntl64 239
-#define __NR_readahead 240
-#define __NR_io_setup 241
-#define __NR_io_destroy 242
-#define __NR_io_getevents 243
-#define __NR_io_submit 244
-#define __NR_io_cancel 245
-#define __NR_fadvise64 246
-#define __NR_exit_group 247
-#define __NR_lookup_dcookie 248
-#define __NR_epoll_create 249
-#define __NR_epoll_ctl 250
-#define __NR_epoll_wait 251
-#define __NR_remap_file_pages 252
-#define __NR_set_tid_address 253
-#define __NR_timer_create 254
-#define __NR_timer_settime 255
-#define __NR_timer_gettime 256
-#define __NR_timer_getoverrun 257
-#define __NR_timer_delete 258
-#define __NR_clock_settime 259
-#define __NR_clock_gettime 260
-#define __NR_clock_getres 261
-#define __NR_clock_nanosleep 262
-#define __NR_statfs64 263
-#define __NR_fstatfs64 264
-#define __NR_tgkill 265
-#define __NR_utimes 266
-#define __NR_fadvise64_64 267
-#define __NR_mbind 268
-#define __NR_get_mempolicy 269
-#define __NR_set_mempolicy 270
-#define __NR_mq_open 271
-#define __NR_mq_unlink 272
-#define __NR_mq_timedsend 273
-#define __NR_mq_timedreceive 274
-#define __NR_mq_notify 275
-#define __NR_mq_getsetattr 276
-#define __NR_waitid 277
-/*#define __NR_vserver 278*/
-#define __NR_add_key 279
-#define __NR_request_key 280
-#define __NR_keyctl 281
-#define __NR_ioprio_set 282
-#define __NR_ioprio_get 283
-#define __NR_inotify_init 284
-#define __NR_inotify_add_watch 285
-#define __NR_inotify_rm_watch 286
-#define __NR_migrate_pages 287
-#define __NR_openat 288
-#define __NR_mkdirat 289
-#define __NR_mknodat 290
-#define __NR_fchownat 291
-#define __NR_futimesat 292
-#define __NR_fstatat64 293
-#define __NR_unlinkat 294
-#define __NR_renameat 295
-#define __NR_linkat 296
-#define __NR_symlinkat 297
-#define __NR_readlinkat 298
-#define __NR_fchmodat 299
-#define __NR_faccessat 300
-#define __NR_pselect6 301
-#define __NR_ppoll 302
-#define __NR_unshare 303
-#define __NR_set_robust_list 304
-#define __NR_get_robust_list 305
-#define __NR_splice 306
-#define __NR_sync_file_range 307
-#define __NR_tee 308
-#define __NR_vmsplice 309
-#define __NR_move_pages 310
-#define __NR_sched_setaffinity 311
-#define __NR_sched_getaffinity 312
-#define __NR_kexec_load 313
-#define __NR_getcpu 314
-#define __NR_epoll_pwait 315
-#define __NR_utimensat 316
-#define __NR_signalfd 317
-#define __NR_timerfd_create 318
-#define __NR_eventfd 319
-#define __NR_fallocate 320
-#define __NR_timerfd_settime 321
-#define __NR_timerfd_gettime 322
-#define __NR_signalfd4 323
-#define __NR_eventfd2 324
-#define __NR_epoll_create1 325
-#define __NR_dup3 326
-#define __NR_pipe2 327
-#define __NR_inotify_init1 328
-#define __NR_preadv 329
-#define __NR_pwritev 330
-#define __NR_rt_tgsigqueueinfo 331
-#define __NR_perf_event_open 332
-#define __NR_get_thread_area 333
-#define __NR_set_thread_area 334
-#define __NR_atomic_cmpxchg_32 335
-#define __NR_atomic_barrier 336
-#define __NR_fanotify_init 337
-#define __NR_fanotify_mark 338
-#define __NR_prlimit64 339
-#define __NR_name_to_handle_at 340
-#define __NR_open_by_handle_at 341
-#define __NR_clock_adjtime 342
-#define __NR_syncfs 343
-#define __NR_setns 344
-#define __NR_process_vm_readv 345
-#define __NR_process_vm_writev 346
-#define __NR_kcmp 347
-#define __NR_finit_module 348
-#define __NR_sched_setattr 349
-#define __NR_sched_getattr 350
-#define __NR_renameat2 351
-#define __NR_getrandom 352
-#define __NR_memfd_create 353
-#define __NR_bpf 354
-#define __NR_execveat 355
-#define __NR_socket 356
-#define __NR_socketpair 357
-#define __NR_bind 358
-#define __NR_connect 359
-#define __NR_listen 360
-#define __NR_accept4 361
-#define __NR_getsockopt 362
-#define __NR_setsockopt 363
-#define __NR_getsockname 364
-#define __NR_getpeername 365
-#define __NR_sendto 366
-#define __NR_sendmsg 367
-#define __NR_recvfrom 368
-#define __NR_recvmsg 369
-#define __NR_shutdown 370
-#define __NR_recvmmsg 371
-#define __NR_sendmmsg 372
-#define __NR_userfaultfd 373
-#define __NR_membarrier 374
-#define __NR_mlock2 375
-#define __NR_copy_file_range 376
-#define __NR_preadv2 377
-#define __NR_pwritev2 378
-#define __NR_statx 379
+#include <asm/unistd_32.h>
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
index a1a3eaeaf58c..ad0195cbe042 100644
--- a/arch/m68k/kernel/setup_mm.c
+++ b/arch/m68k/kernel/setup_mm.c
@@ -164,8 +164,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
be32_to_cpu(m->addr);
m68k_memory[m68k_num_memory].size =
be32_to_cpu(m->size);
- memblock_add(m68k_memory[m68k_num_memory].addr,
- m68k_memory[m68k_num_memory].size);
m68k_num_memory++;
} else
pr_warn("%s: too many memory chunks\n",
diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..659faefdcb1d
--- /dev/null
+++ b/arch/m68k/kernel/syscalls/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+$(kapi)/syscall_table.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h
+kapisyshdr-y += syscall_table.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..1a95c4a1bc0d
--- /dev/null
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -0,0 +1,389 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for m68k
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork __sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+13 common time sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common chown sys_chown16
+# 17 was break
+18 common oldstat sys_stat
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+23 common setuid sys_setuid16
+24 common getuid sys_getuid16
+25 common stime sys_stime
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+28 common oldfstat sys_fstat
+29 common pause sys_pause
+30 common utime sys_utime
+# 31 was stty
+# 32 was gtty
+33 common access sys_access
+34 common nice sys_nice
+# 35 was ftime
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times
+# 44 was prof
+45 common brk sys_brk
+46 common setgid sys_setgid16
+47 common getgid sys_getgid16
+48 common signal sys_signal
+49 common geteuid sys_geteuid16
+50 common getegid sys_getegid16
+51 common acct sys_acct
+52 common umount2 sys_umount
+# 53 was lock
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+# 56 was mpx
+57 common setpgid sys_setpgid
+# 58 was ulimit
+# 59 was oldolduname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid16
+71 common setregid sys_setregid16
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+76 common getrlimit sys_old_getrlimit
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+80 common getgroups sys_getgroups16
+81 common setgroups sys_setgroups16
+82 common select sys_old_select
+83 common symlink sys_symlink
+84 common oldlstat sys_lstat
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_old_readdir
+90 common mmap sys_old_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown16
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+# 98 was profil
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+# 101 was ioperm
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+# 109 was olduname
+# 110 was iopl
+111 common vhangup sys_vhangup
+# 112 was idle
+# 113 was vm86
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn
+120 common clone __sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common cacheflush sys_cacheflush
+124 common adjtimex sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+127 common create_module sys_ni_syscall
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+# 137 was afs_syscall
+138 common setfsuid sys_setfsuid16
+139 common setfsgid sys_setfsgid16
+140 common _llseek sys_llseek
+141 common getdents sys_getdents
+142 common _newselect sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid16
+165 common getresuid sys_getresuid16
+166 common getpagesize sys_getpagesize
+167 common query_module sys_ni_syscall
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid16
+171 common getresgid sys_getresgid16
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread64
+181 common pwrite64 sys_pwrite64
+182 common lchown sys_lchown16
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack
+187 common sendfile sys_sendfile
+188 common getpmsg sys_ni_syscall
+189 common putpmsg sys_ni_syscall
+190 common vfork __sys_vfork
+191 common ugetrlimit sys_getrlimit
+192 common mmap2 sys_mmap2
+193 common truncate64 sys_truncate64
+194 common ftruncate64 sys_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common chown32 sys_chown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common lchown32 sys_lchown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common pivot_root sys_pivot_root
+# 218 is reserved
+# 219 is reserved
+220 common getdents64 sys_getdents64
+221 common gettid sys_gettid
+222 common tkill sys_tkill
+223 common setxattr sys_setxattr
+224 common lsetxattr sys_lsetxattr
+225 common fsetxattr sys_fsetxattr
+226 common getxattr sys_getxattr
+227 common lgetxattr sys_lgetxattr
+228 common fgetxattr sys_fgetxattr
+229 common listxattr sys_listxattr
+230 common llistxattr sys_llistxattr
+231 common flistxattr sys_flistxattr
+232 common removexattr sys_removexattr
+233 common lremovexattr sys_lremovexattr
+234 common fremovexattr sys_fremovexattr
+235 common futex sys_futex
+236 common sendfile64 sys_sendfile64
+237 common mincore sys_mincore
+238 common madvise sys_madvise
+239 common fcntl64 sys_fcntl64
+240 common readahead sys_readahead
+241 common io_setup sys_io_setup
+242 common io_destroy sys_io_destroy
+243 common io_getevents sys_io_getevents
+244 common io_submit sys_io_submit
+245 common io_cancel sys_io_cancel
+246 common fadvise64 sys_fadvise64
+247 common exit_group sys_exit_group
+248 common lookup_dcookie sys_lookup_dcookie
+249 common epoll_create sys_epoll_create
+250 common epoll_ctl sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait
+252 common remap_file_pages sys_remap_file_pages
+253 common set_tid_address sys_set_tid_address
+254 common timer_create sys_timer_create
+255 common timer_settime sys_timer_settime
+256 common timer_gettime sys_timer_gettime
+257 common timer_getoverrun sys_timer_getoverrun
+258 common timer_delete sys_timer_delete
+259 common clock_settime sys_clock_settime
+260 common clock_gettime sys_clock_gettime
+261 common clock_getres sys_clock_getres
+262 common clock_nanosleep sys_clock_nanosleep
+263 common statfs64 sys_statfs64
+264 common fstatfs64 sys_fstatfs64
+265 common tgkill sys_tgkill
+266 common utimes sys_utimes
+267 common fadvise64_64 sys_fadvise64_64
+268 common mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy
+271 common mq_open sys_mq_open
+272 common mq_unlink sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend
+274 common mq_timedreceive sys_mq_timedreceive
+275 common mq_notify sys_mq_notify
+276 common mq_getsetattr sys_mq_getsetattr
+277 common waitid sys_waitid
+# 278 was vserver
+279 common add_key sys_add_key
+280 common request_key sys_request_key
+281 common keyctl sys_keyctl
+282 common ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get
+284 common inotify_init sys_inotify_init
+285 common inotify_add_watch sys_inotify_add_watch
+286 common inotify_rm_watch sys_inotify_rm_watch
+287 common migrate_pages sys_migrate_pages
+288 common openat sys_openat
+289 common mkdirat sys_mkdirat
+290 common mknodat sys_mknodat
+291 common fchownat sys_fchownat
+292 common futimesat sys_futimesat
+293 common fstatat64 sys_fstatat64
+294 common unlinkat sys_unlinkat
+295 common renameat sys_renameat
+296 common linkat sys_linkat
+297 common symlinkat sys_symlinkat
+298 common readlinkat sys_readlinkat
+299 common fchmodat sys_fchmodat
+300 common faccessat sys_faccessat
+301 common pselect6 sys_pselect6
+302 common ppoll sys_ppoll
+303 common unshare sys_unshare
+304 common set_robust_list sys_set_robust_list
+305 common get_robust_list sys_get_robust_list
+306 common splice sys_splice
+307 common sync_file_range sys_sync_file_range
+308 common tee sys_tee
+309 common vmsplice sys_vmsplice
+310 common move_pages sys_move_pages
+311 common sched_setaffinity sys_sched_setaffinity
+312 common sched_getaffinity sys_sched_getaffinity
+313 common kexec_load sys_kexec_load
+314 common getcpu sys_getcpu
+315 common epoll_pwait sys_epoll_pwait
+316 common utimensat sys_utimensat
+317 common signalfd sys_signalfd
+318 common timerfd_create sys_timerfd_create
+319 common eventfd sys_eventfd
+320 common fallocate sys_fallocate
+321 common timerfd_settime sys_timerfd_settime
+322 common timerfd_gettime sys_timerfd_gettime
+323 common signalfd4 sys_signalfd4
+324 common eventfd2 sys_eventfd2
+325 common epoll_create1 sys_epoll_create1
+326 common dup3 sys_dup3
+327 common pipe2 sys_pipe2
+328 common inotify_init1 sys_inotify_init1
+329 common preadv sys_preadv
+330 common pwritev sys_pwritev
+331 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+332 common perf_event_open sys_perf_event_open
+333 common get_thread_area sys_get_thread_area
+334 common set_thread_area sys_set_thread_area
+335 common atomic_cmpxchg_32 sys_atomic_cmpxchg_32
+336 common atomic_barrier sys_atomic_barrier
+337 common fanotify_init sys_fanotify_init
+338 common fanotify_mark sys_fanotify_mark
+339 common prlimit64 sys_prlimit64
+340 common name_to_handle_at sys_name_to_handle_at
+341 common open_by_handle_at sys_open_by_handle_at
+342 common clock_adjtime sys_clock_adjtime
+343 common syncfs sys_syncfs
+344 common setns sys_setns
+345 common process_vm_readv sys_process_vm_readv
+346 common process_vm_writev sys_process_vm_writev
+347 common kcmp sys_kcmp
+348 common finit_module sys_finit_module
+349 common sched_setattr sys_sched_setattr
+350 common sched_getattr sys_sched_getattr
+351 common renameat2 sys_renameat2
+352 common getrandom sys_getrandom
+353 common memfd_create sys_memfd_create
+354 common bpf sys_bpf
+355 common execveat sys_execveat
+356 common socket sys_socket
+357 common socketpair sys_socketpair
+358 common bind sys_bind
+359 common connect sys_connect
+360 common listen sys_listen
+361 common accept4 sys_accept4
+362 common getsockopt sys_getsockopt
+363 common setsockopt sys_setsockopt
+364 common getsockname sys_getsockname
+365 common getpeername sys_getpeername
+366 common sendto sys_sendto
+367 common sendmsg sys_sendmsg
+368 common recvfrom sys_recvfrom
+369 common recvmsg sys_recvmsg
+370 common shutdown sys_shutdown
+371 common recvmmsg sys_recvmmsg
+372 common sendmmsg sys_sendmmsg
+373 common userfaultfd sys_userfaultfd
+374 common membarrier sys_membarrier
+375 common mlock2 sys_mlock2
+376 common copy_file_range sys_copy_file_range
+377 common preadv2 sys_preadv2
+378 common pwritev2 sys_pwritev2
+379 common statx sys_statx
diff --git a/arch/m68k/kernel/syscalls/syscallhdr.sh b/arch/m68k/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..6f357d68ef44
--- /dev/null
+++ b/arch/m68k/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_M68K_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */\n" "${fileguard}"
+) > "$out"
diff --git a/arch/m68k/kernel/syscalls/syscalltbl.sh b/arch/m68k/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..85d78d9309ad
--- /dev/null
+++ b/arch/m68k/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry ; do
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 2c8402e75f62..d329cc7b481c 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -15,389 +15,12 @@
#include <linux/linkage.h>
#ifndef CONFIG_MMU
-#define sys_mmap2 sys_mmap_pgoff
+#define sys_mmap2 sys_mmap_pgoff
#endif
-.section .rodata
+#define __SYSCALL(nr, entry, nargs) .long entry
+ .section .rodata
ALIGN
ENTRY(sys_call_table)
- .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
- .long sys_exit
- .long __sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_chown16
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_stat
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid16
- .long sys_getuid16
- .long sys_stime /* 25 */
- .long sys_ptrace
- .long sys_alarm
- .long sys_fstat
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 - old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid16
- .long sys_getgid16
- .long sys_signal
- .long sys_geteuid16
- .long sys_getegid16 /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys() */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_ni_syscall
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_sigaction
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid16 /* 70 */
- .long sys_setregid16
- .long sys_sigsuspend
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_old_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups16 /* 80 */
- .long sys_setgroups16
- .long sys_old_select
- .long sys_symlink
- .long sys_lstat
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long sys_old_readdir
- .long sys_old_mmap /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown16 /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ni_syscall /* ioperm for i386 */
- .long sys_socketcall
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_ni_syscall
- .long sys_ni_syscall /* 110 - iopl for i386 */
- .long sys_vhangup
- .long sys_ni_syscall /* obsolete idle() syscall */
- .long sys_ni_syscall /* vm86old for i386 */
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ipc
- .long sys_fsync
- .long sys_sigreturn
- .long __sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_cacheflush /* modify_ldt for i386 */
- .long sys_adjtimex
- .long sys_mprotect /* 125 */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* for afs_syscall */
- .long sys_setfsuid16
- .long sys_setfsgid16
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150 */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid16
- .long sys_getresuid16 /* 165 */
- .long sys_getpagesize
- .long sys_ni_syscall /* old "query_module" */
- .long sys_poll
- .long sys_ni_syscall /* old nfsservctl */
- .long sys_setresgid16 /* 170 */
- .long sys_getresgid16
- .long sys_prctl
- .long sys_rt_sigreturn
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread64 /* 180 */
- .long sys_pwrite64
- .long sys_lchown16
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_sigaltstack
- .long sys_sendfile
- .long sys_ni_syscall /* streams1 */
- .long sys_ni_syscall /* streams2 */
- .long __sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_chown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_lchown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_ni_syscall
- .long sys_ni_syscall
- .long sys_getdents64 /* 220 */
- .long sys_gettid
- .long sys_tkill
- .long sys_setxattr
- .long sys_lsetxattr
- .long sys_fsetxattr /* 225 */
- .long sys_getxattr
- .long sys_lgetxattr
- .long sys_fgetxattr
- .long sys_listxattr
- .long sys_llistxattr /* 230 */
- .long sys_flistxattr
- .long sys_removexattr
- .long sys_lremovexattr
- .long sys_fremovexattr
- .long sys_futex /* 235 */
- .long sys_sendfile64
- .long sys_mincore
- .long sys_madvise
- .long sys_fcntl64
- .long sys_readahead /* 240 */
- .long sys_io_setup
- .long sys_io_destroy
- .long sys_io_getevents
- .long sys_io_submit
- .long sys_io_cancel /* 245 */
- .long sys_fadvise64
- .long sys_exit_group
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl /* 250 */
- .long sys_epoll_wait
- .long sys_remap_file_pages
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime /* 255 */
- .long sys_timer_gettime
- .long sys_timer_getoverrun
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime /* 260 */
- .long sys_clock_getres
- .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill /* 265 */
- .long sys_utimes
- .long sys_fadvise64_64
- .long sys_mbind
- .long sys_get_mempolicy
- .long sys_set_mempolicy /* 270 */
- .long sys_mq_open
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive
- .long sys_mq_notify /* 275 */
- .long sys_mq_getsetattr
- .long sys_waitid
- .long sys_ni_syscall /* for sys_vserver */
- .long sys_add_key
- .long sys_request_key /* 280 */
- .long sys_keyctl
- .long sys_ioprio_set
- .long sys_ioprio_get
- .long sys_inotify_init
- .long sys_inotify_add_watch /* 285 */
- .long sys_inotify_rm_watch
- .long sys_migrate_pages
- .long sys_openat
- .long sys_mkdirat
- .long sys_mknodat /* 290 */
- .long sys_fchownat
- .long sys_futimesat
- .long sys_fstatat64
- .long sys_unlinkat
- .long sys_renameat /* 295 */
- .long sys_linkat
- .long sys_symlinkat
- .long sys_readlinkat
- .long sys_fchmodat
- .long sys_faccessat /* 300 */
- .long sys_pselect6
- .long sys_ppoll
- .long sys_unshare
- .long sys_set_robust_list
- .long sys_get_robust_list /* 305 */
- .long sys_splice
- .long sys_sync_file_range
- .long sys_tee
- .long sys_vmsplice
- .long sys_move_pages /* 310 */
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity
- .long sys_kexec_load
- .long sys_getcpu
- .long sys_epoll_pwait /* 315 */
- .long sys_utimensat
- .long sys_signalfd
- .long sys_timerfd_create
- .long sys_eventfd
- .long sys_fallocate /* 320 */
- .long sys_timerfd_settime
- .long sys_timerfd_gettime
- .long sys_signalfd4
- .long sys_eventfd2
- .long sys_epoll_create1 /* 325 */
- .long sys_dup3
- .long sys_pipe2
- .long sys_inotify_init1
- .long sys_preadv
- .long sys_pwritev /* 330 */
- .long sys_rt_tgsigqueueinfo
- .long sys_perf_event_open
- .long sys_get_thread_area
- .long sys_set_thread_area
- .long sys_atomic_cmpxchg_32 /* 335 */
- .long sys_atomic_barrier
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64
- .long sys_name_to_handle_at /* 340 */
- .long sys_open_by_handle_at
- .long sys_clock_adjtime
- .long sys_syncfs
- .long sys_setns
- .long sys_process_vm_readv /* 345 */
- .long sys_process_vm_writev
- .long sys_kcmp
- .long sys_finit_module
- .long sys_sched_setattr
- .long sys_sched_getattr /* 350 */
- .long sys_renameat2
- .long sys_getrandom
- .long sys_memfd_create
- .long sys_bpf
- .long sys_execveat /* 355 */
- .long sys_socket
- .long sys_socketpair
- .long sys_bind
- .long sys_connect
- .long sys_listen /* 360 */
- .long sys_accept4
- .long sys_getsockopt
- .long sys_setsockopt
- .long sys_getsockname
- .long sys_getpeername /* 365 */
- .long sys_sendto
- .long sys_sendmsg
- .long sys_recvfrom
- .long sys_recvmsg
- .long sys_shutdown /* 370 */
- .long sys_recvmmsg
- .long sys_sendmmsg
- .long sys_userfaultfd
- .long sys_membarrier
- .long sys_mlock2 /* 375 */
- .long sys_copy_file_range
- .long sys_preadv2
- .long sys_pwritev2
- .long sys_statx
+#include <asm/syscall_table.h>
+#undef __SYSCALL
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 7497cf30bf1c..3f3d0bf36091 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -228,6 +228,7 @@ void __init paging_init(void)
min_addr = m68k_memory[0].addr;
max_addr = min_addr + m68k_memory[0].size;
+ memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
for (i = 1; i < m68k_num_memory;) {
if (m68k_memory[i].addr < min_addr) {
printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
@@ -238,6 +239,7 @@ void __init paging_init(void)
(m68k_num_memory - i) * sizeof(struct m68k_mem_info));
continue;
}
+ memblock_add(m68k_memory[i].addr, m68k_memory[i].size);
addr = m68k_memory[i].addr + m68k_memory[i].size;
if (addr > max_addr)
max_addr = addr;
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 0823d291fbeb..7b340a35b194 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -79,21 +79,30 @@ all: linux.bin
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-linux.bin linux.bin.gz linux.bin.ub: vmlinux
- $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/microblaze/kernel/syscalls all
-simpleImage.%: vmlinux
+PHONY += linux.bin linux.bin.gz linux.bin.ub
+linux.bin.ub linux.bin.gz: linux.bin
+linux.bin: vmlinux
+linux.bin linux.bin.gz linux.bin.ub:
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
+
+PHONY += simpleImage.$(DTB)
+simpleImage.$(DTB): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(addprefix $(boot)/$@., ub unstrip strip)
+ @echo 'Kernel: $(boot)/$@ is ready' ' (#'`cat .version`')'
define archhelp
echo '* linux.bin - Create raw binary'
echo ' linux.bin.gz - Create compressed raw binary'
echo ' linux.bin.ub - Create U-Boot wrapped raw binary'
- echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
- echo ' - stripped elf with fdt blob'
- echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
- echo ' *_defconfig - Select default config from arch/microblaze/configs'
- echo ''
+ echo ' simpleImage.<dt> - Create the following images with <dt>.dtb linked in'
+ echo ' simpleImage.<dt> : raw image'
+ echo ' simpleImage.<dt>.ub : raw image with U-Boot header'
+ echo ' simpleImage.<dt>.unstrip: ELF (identical to vmlinux)'
+ echo ' simpleImage.<dt>.strip : stripped ELF'
echo ' Targets with <dt> embed a device tree blob inside the image'
echo ' These targets support board with firmware that does not'
echo ' support passing a device tree directly. Replace <dt> with the'
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 600e5a198bd2..cff570a71946 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -3,38 +3,33 @@
# arch/microblaze/boot/Makefile
#
-targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.%
+targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.*
OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary
$(obj)/linux.bin: vmlinux FORCE
$(call if_changed,objcopy)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/linux.bin.ub: $(obj)/linux.bin FORCE
$(call if_changed,uimage)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
$(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
$(call if_changed,gzip)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
-
-quiet_cmd_cp = CP $< $@$2
- cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
quiet_cmd_strip = STRIP $< $@$2
cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
-K _fdt_start $< -o $@$2
UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
-UIMAGE_IN = $@
-UIMAGE_OUT = $@.ub
-$(obj)/simpleImage.%: vmlinux FORCE
- $(call if_changed,cp,.unstrip)
+$(obj)/simpleImage.$(DTB): vmlinux FORCE
$(call if_changed,objcopy)
+
+$(obj)/simpleImage.$(DTB).ub: $(obj)/simpleImage.$(DTB) FORCE
$(call if_changed,uimage)
- $(call if_changed,strip,.strip)
- @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
-clean-files += simpleImage.*.unstrip linux.bin.ub
+$(obj)/simpleImage.$(DTB).unstrip: vmlinux FORCE
+ $(call if_changed,shipped)
+
+$(obj)/simpleImage.$(DTB).strip: vmlinux FORCE
+ $(call if_changed,strip)
diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile
index c7324e74f9ef..ef00dd30d19a 100644
--- a/arch/microblaze/boot/dts/Makefile
+++ b/arch/microblaze/boot/dts/Makefile
@@ -12,12 +12,9 @@ $(obj)/linked_dtb.o: $(obj)/system.dtb
# Generate system.dtb from $(DTB).dtb
ifneq ($(DTB),system)
$(obj)/system.dtb: $(obj)/$(DTB).dtb
- $(call if_changed,cp)
+ $(call if_changed,shipped)
endif
endif
-quiet_cmd_cp = CP $< $@$2
- cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
-
# Rule to build device tree blobs
DTC_FLAGS := -p 1024
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 569ba9e670c1..9c7d1d25bf3d 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,3 +1,4 @@
+generated-y += syscall_table.h
generic-y += barrier.h
generic-y += bitops.h
generic-y += bug.h
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index e14b6621c933..142d3f004848 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -200,7 +200,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
* is cleared in the TLB miss handler before the TLB entry is loaded.
* - All other bits of the PTE are loaded into TLBLO without
* * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
- * software PTE bits. We actually use use bits 21, 24, 25, and
+ * software PTE bits. We actually use bits 21, 24, 25, and
* 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
* PRESENT.
*/
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index f42c40f5001b..9b7c2c4eaf12 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,4 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 401
-
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild
index 2c6a6bffea32..b6656d930a0e 100644
--- a/arch/microblaze/include/uapi/asm/Kbuild
+++ b/arch/microblaze/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_32.h
generic-y += bitsperlong.h
generic-y += bpf_perf_event.h
generic-y += errno.h
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index 7a9f16a76413..3f2d7cb6836c 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -11,411 +11,6 @@
#ifndef _UAPI_ASM_MICROBLAZE_UNISTD_H
#define _UAPI_ASM_MICROBLAZE_UNISTD_H
-#define __NR_restart_syscall 0 /* ok */
-#define __NR_exit 1 /* ok */
-#define __NR_fork 2 /* not for no MMU - weird */
-#define __NR_read 3 /* ok */
-#define __NR_write 4 /* ok */
-#define __NR_open 5 /* openat */
-#define __NR_close 6 /* ok */
-#define __NR_waitpid 7 /* waitid */
-#define __NR_creat 8 /* openat */
-#define __NR_link 9 /* linkat */
-#define __NR_unlink 10 /* unlinkat */
-#define __NR_execve 11 /* ok */
-#define __NR_chdir 12 /* ok */
-#define __NR_time 13 /* obsolete -> sys_gettimeofday */
-#define __NR_mknod 14 /* mknodat */
-#define __NR_chmod 15 /* fchmodat */
-#define __NR_lchown 16 /* ok */
-#define __NR_break 17 /* don't know */
-#define __NR_oldstat 18 /* remove */
-#define __NR_lseek 19 /* ok */
-#define __NR_getpid 20 /* ok */
-#define __NR_mount 21 /* ok */
-#define __NR_umount 22 /* ok */ /* use only umount2 */
-#define __NR_setuid 23 /* ok */
-#define __NR_getuid 24 /* ok */
-#define __NR_stime 25 /* obsolete -> sys_settimeofday */
-#define __NR_ptrace 26 /* ok */
-#define __NR_alarm 27 /* obsolete -> sys_setitimer */
-#define __NR_oldfstat 28 /* remove */
-#define __NR_pause 29 /* obsolete -> sys_rt_sigtimedwait */
-#define __NR_utime 30 /* obsolete -> sys_utimesat */
-#define __NR_stty 31 /* remove */
-#define __NR_gtty 32 /* remove */
-#define __NR_access 33 /* faccessat */
-/* can be implemented by sys_setpriority */
-#define __NR_nice 34
-#define __NR_ftime 35 /* remove */
-#define __NR_sync 36 /* ok */
-#define __NR_kill 37 /* ok */
-#define __NR_rename 38 /* renameat */
-#define __NR_mkdir 39 /* mkdirat */
-#define __NR_rmdir 40 /* unlinkat */
-#define __NR_dup 41 /* ok */
-#define __NR_pipe 42 /* ok */
-#define __NR_times 43 /* ok */
-#define __NR_prof 44 /* remove */
-#define __NR_brk 45 /* ok -mmu, nommu specific */
-#define __NR_setgid 46 /* ok */
-#define __NR_getgid 47 /* ok */
-#define __NR_signal 48 /* obsolete -> sys_rt_sigaction */
-#define __NR_geteuid 49 /* ok */
-#define __NR_getegid 50 /* ok */
-#define __NR_acct 51 /* add it and then I can disable it */
-#define __NR_umount2 52 /* remove */
-#define __NR_lock 53 /* remove */
-#define __NR_ioctl 54 /* ok */
-#define __NR_fcntl 55 /* ok -> 64bit version*/
-#define __NR_mpx 56 /* remove */
-#define __NR_setpgid 57 /* ok */
-#define __NR_ulimit 58 /* remove */
-#define __NR_oldolduname 59 /* remove */
-#define __NR_umask 60 /* ok */
-#define __NR_chroot 61 /* ok */
-#define __NR_ustat 62 /* obsolete -> statfs64 */
-#define __NR_dup2 63 /* ok */
-#define __NR_getppid 64 /* ok */
-#define __NR_getpgrp 65 /* obsolete -> sys_getpgid */
-#define __NR_setsid 66 /* ok */
-#define __NR_sigaction 67 /* obsolete -> rt_sigaction */
-#define __NR_sgetmask 68 /* obsolete -> sys_rt_sigprocmask */
-#define __NR_ssetmask 69 /* obsolete ->sys_rt_sigprocmask */
-#define __NR_setreuid 70 /* ok */
-#define __NR_setregid 71 /* ok */
-#define __NR_sigsuspend 72 /* obsolete -> rt_sigsuspend */
-#define __NR_sigpending 73 /* obsolete -> sys_rt_sigpending */
-#define __NR_sethostname 74 /* ok */
-#define __NR_setrlimit 75 /* ok */
-#define __NR_getrlimit 76 /* ok Back compatible 2G limited rlimit */
-#define __NR_getrusage 77 /* ok */
-#define __NR_gettimeofday 78 /* ok */
-#define __NR_settimeofday 79 /* ok */
-#define __NR_getgroups 80 /* ok */
-#define __NR_setgroups 81 /* ok */
-#define __NR_select 82 /* obsolete -> sys_pselect6 */
-#define __NR_symlink 83 /* symlinkat */
-#define __NR_oldlstat 84 /* remove */
-#define __NR_readlink 85 /* obsolete -> sys_readlinkat */
-#define __NR_uselib 86 /* remove */
-#define __NR_swapon 87 /* ok */
-#define __NR_reboot 88 /* ok */
-#define __NR_readdir 89 /* remove ? */
-#define __NR_mmap 90 /* obsolete -> sys_mmap2 */
-#define __NR_munmap 91 /* ok - mmu and nommu */
-#define __NR_truncate 92 /* ok or truncate64 */
-#define __NR_ftruncate 93 /* ok or ftruncate64 */
-#define __NR_fchmod 94 /* ok */
-#define __NR_fchown 95 /* ok */
-#define __NR_getpriority 96 /* ok */
-#define __NR_setpriority 97 /* ok */
-#define __NR_profil 98 /* remove */
-#define __NR_statfs 99 /* ok or statfs64 */
-#define __NR_fstatfs 100 /* ok or fstatfs64 */
-#define __NR_ioperm 101 /* remove */
-#define __NR_socketcall 102 /* remove */
-#define __NR_syslog 103 /* ok */
-#define __NR_setitimer 104 /* ok */
-#define __NR_getitimer 105 /* ok */
-#define __NR_stat 106 /* remove */
-#define __NR_lstat 107 /* remove */
-#define __NR_fstat 108 /* remove */
-#define __NR_olduname 109 /* remove */
-#define __NR_iopl 110 /* remove */
-#define __NR_vhangup 111 /* ok */
-#define __NR_idle 112 /* remove */
-#define __NR_vm86old 113 /* remove */
-#define __NR_wait4 114 /* obsolete -> waitid */
-#define __NR_swapoff 115 /* ok */
-#define __NR_sysinfo 116 /* ok */
-#define __NR_ipc 117 /* remove - direct call */
-#define __NR_fsync 118 /* ok */
-#define __NR_sigreturn 119 /* obsolete -> sys_rt_sigreturn */
-#define __NR_clone 120 /* ok */
-#define __NR_setdomainname 121 /* ok */
-#define __NR_uname 122 /* remove */
-#define __NR_modify_ldt 123 /* remove */
-#define __NR_adjtimex 124 /* ok */
-#define __NR_mprotect 125 /* remove */
-#define __NR_sigprocmask 126 /* obsolete -> sys_rt_sigprocmask */
-#define __NR_create_module 127 /* remove */
-#define __NR_init_module 128 /* ok */
-#define __NR_delete_module 129 /* ok */
-#define __NR_get_kernel_syms 130 /* remove */
-#define __NR_quotactl 131 /* ok */
-#define __NR_getpgid 132 /* ok */
-#define __NR_fchdir 133 /* ok */
-#define __NR_bdflush 134 /* remove */
-#define __NR_sysfs 135 /* needed for busybox */
-#define __NR_personality 136 /* ok */
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138 /* ok */
-#define __NR_setfsgid 139 /* ok */
-#define __NR__llseek 140 /* remove only lseek */
-#define __NR_getdents 141 /* ok or getdents64 */
-#define __NR__newselect 142 /* remove */
-#define __NR_flock 143 /* ok */
-#define __NR_msync 144 /* remove */
-#define __NR_readv 145 /* ok */
-#define __NR_writev 146 /* ok */
-#define __NR_getsid 147 /* ok */
-#define __NR_fdatasync 148 /* ok */
-#define __NR__sysctl 149 /* remove */
-#define __NR_mlock 150 /* ok - nommu or mmu */
-#define __NR_munlock 151 /* ok - nommu or mmu */
-#define __NR_mlockall 152 /* ok - nommu or mmu */
-#define __NR_munlockall 153 /* ok - nommu or mmu */
-#define __NR_sched_setparam 154 /* ok */
-#define __NR_sched_getparam 155 /* ok */
-#define __NR_sched_setscheduler 156 /* ok */
-#define __NR_sched_getscheduler 157 /* ok */
-#define __NR_sched_yield 158 /* ok */
-#define __NR_sched_get_priority_max 159 /* ok */
-#define __NR_sched_get_priority_min 160 /* ok */
-#define __NR_sched_rr_get_interval 161 /* ok */
-#define __NR_nanosleep 162 /* ok */
-#define __NR_mremap 163 /* ok - nommu or mmu */
-#define __NR_setresuid 164 /* ok */
-#define __NR_getresuid 165 /* ok */
-#define __NR_vm86 166 /* remove */
-#define __NR_query_module 167 /* ok */
-#define __NR_poll 168 /* obsolete -> sys_ppoll */
-#define __NR_nfsservctl 169 /* ok */
-#define __NR_setresgid 170 /* ok */
-#define __NR_getresgid 171 /* ok */
-#define __NR_prctl 172 /* ok */
-#define __NR_rt_sigreturn 173 /* ok */
-#define __NR_rt_sigaction 174 /* ok */
-#define __NR_rt_sigprocmask 175 /* ok */
-#define __NR_rt_sigpending 176 /* ok */
-#define __NR_rt_sigtimedwait 177 /* ok */
-#define __NR_rt_sigqueueinfo 178 /* ok */
-#define __NR_rt_sigsuspend 179 /* ok */
-#define __NR_pread64 180 /* ok */
-#define __NR_pwrite64 181 /* ok */
-#define __NR_chown 182 /* obsolete -> fchownat */
-#define __NR_getcwd 183 /* ok */
-#define __NR_capget 184 /* ok */
-#define __NR_capset 185 /* ok */
-#define __NR_sigaltstack 186 /* remove */
-#define __NR_sendfile 187 /* ok -> exist 64bit version*/
-#define __NR_getpmsg 188 /* remove */
-/* remove - some people actually want streams */
-#define __NR_putpmsg 189
-/* for noMMU - group with clone -> maybe remove */
-#define __NR_vfork 190
-#define __NR_ugetrlimit 191 /* remove - SuS compliant getrlimit */
-#define __NR_mmap2 192 /* ok */
-#define __NR_truncate64 193 /* ok */
-#define __NR_ftruncate64 194 /* ok */
-#define __NR_stat64 195 /* remove _ARCH_WANT_STAT64 */
-#define __NR_lstat64 196 /* remove _ARCH_WANT_STAT64 */
-#define __NR_fstat64 197 /* remove _ARCH_WANT_STAT64 */
-#define __NR_lchown32 198 /* ok - without 32 */
-#define __NR_getuid32 199 /* ok - without 32 */
-#define __NR_getgid32 200 /* ok - without 32 */
-#define __NR_geteuid32 201 /* ok - without 32 */
-#define __NR_getegid32 202 /* ok - without 32 */
-#define __NR_setreuid32 203 /* ok - without 32 */
-#define __NR_setregid32 204 /* ok - without 32 */
-#define __NR_getgroups32 205 /* ok - without 32 */
-#define __NR_setgroups32 206 /* ok - without 32 */
-#define __NR_fchown32 207 /* ok - without 32 */
-#define __NR_setresuid32 208 /* ok - without 32 */
-#define __NR_getresuid32 209 /* ok - without 32 */
-#define __NR_setresgid32 210 /* ok - without 32 */
-#define __NR_getresgid32 211 /* ok - without 32 */
-#define __NR_chown32 212 /* ok - without 32 -obsolete -> fchownat */
-#define __NR_setuid32 213 /* ok - without 32 */
-#define __NR_setgid32 214 /* ok - without 32 */
-#define __NR_setfsuid32 215 /* ok - without 32 */
-#define __NR_setfsgid32 216 /* ok - without 32 */
-#define __NR_pivot_root 217 /* ok */
-#define __NR_mincore 218 /* ok */
-#define __NR_madvise 219 /* ok */
-#define __NR_getdents64 220 /* ok */
-#define __NR_fcntl64 221 /* ok */
-/* 223 is unused */
-#define __NR_gettid 224 /* ok */
-#define __NR_readahead 225 /* ok */
-#define __NR_setxattr 226 /* ok */
-#define __NR_lsetxattr 227 /* ok */
-#define __NR_fsetxattr 228 /* ok */
-#define __NR_getxattr 229 /* ok */
-#define __NR_lgetxattr 230 /* ok */
-#define __NR_fgetxattr 231 /* ok */
-#define __NR_listxattr 232 /* ok */
-#define __NR_llistxattr 233 /* ok */
-#define __NR_flistxattr 234 /* ok */
-#define __NR_removexattr 235 /* ok */
-#define __NR_lremovexattr 236 /* ok */
-#define __NR_fremovexattr 237 /* ok */
-#define __NR_tkill 238 /* ok */
-#define __NR_sendfile64 239 /* ok */
-#define __NR_futex 240 /* ok */
-#define __NR_sched_setaffinity 241 /* ok */
-#define __NR_sched_getaffinity 242 /* ok */
-#define __NR_set_thread_area 243 /* remove */
-#define __NR_get_thread_area 244 /* remove */
-#define __NR_io_setup 245 /* ok */
-#define __NR_io_destroy 246 /* ok */
-#define __NR_io_getevents 247 /* ok */
-#define __NR_io_submit 248 /* ok */
-#define __NR_io_cancel 249 /* ok */
-#define __NR_fadvise64 250 /* remove -> sys_fadvise64_64 */
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group 252 /* ok */
-#define __NR_lookup_dcookie 253 /* ok */
-#define __NR_epoll_create 254 /* ok */
-#define __NR_epoll_ctl 255 /* ok */
-#define __NR_epoll_wait 256 /* obsolete -> sys_epoll_pwait */
-#define __NR_remap_file_pages 257 /* only for mmu */
-#define __NR_set_tid_address 258 /* ok */
-#define __NR_timer_create 259 /* ok */
-#define __NR_timer_settime (__NR_timer_create+1) /* 260 */ /* ok */
-#define __NR_timer_gettime (__NR_timer_create+2) /* 261 */ /* ok */
-#define __NR_timer_getoverrun (__NR_timer_create+3) /* 262 */ /* ok */
-#define __NR_timer_delete (__NR_timer_create+4) /* 263 */ /* ok */
-#define __NR_clock_settime (__NR_timer_create+5) /* 264 */ /* ok */
-#define __NR_clock_gettime (__NR_timer_create+6) /* 265 */ /* ok */
-#define __NR_clock_getres (__NR_timer_create+7) /* 266 */ /* ok */
-#define __NR_clock_nanosleep (__NR_timer_create+8) /* 267 */ /* ok */
-#define __NR_statfs64 268 /* ok */
-#define __NR_fstatfs64 269 /* ok */
-#define __NR_tgkill 270 /* ok */
-#define __NR_utimes 271 /* obsolete -> sys_futimesat */
-#define __NR_fadvise64_64 272 /* ok */
-#define __NR_vserver 273 /* ok */
-#define __NR_mbind 274 /* only for mmu */
-#define __NR_get_mempolicy 275 /* only for mmu */
-#define __NR_set_mempolicy 276 /* only for mmu */
-#define __NR_mq_open 277 /* ok */
-#define __NR_mq_unlink (__NR_mq_open+1) /* 278 */ /* ok */
-#define __NR_mq_timedsend (__NR_mq_open+2) /* 279 */ /* ok */
-#define __NR_mq_timedreceive (__NR_mq_open+3) /* 280 */ /* ok */
-#define __NR_mq_notify (__NR_mq_open+4) /* 281 */ /* ok */
-#define __NR_mq_getsetattr (__NR_mq_open+5) /* 282 */ /* ok */
-#define __NR_kexec_load 283 /* ok */
-#define __NR_waitid 284 /* ok */
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key 286 /* ok */
-#define __NR_request_key 287 /* ok */
-#define __NR_keyctl 288 /* ok */
-#define __NR_ioprio_set 289 /* ok */
-#define __NR_ioprio_get 290 /* ok */
-#define __NR_inotify_init 291 /* ok */
-#define __NR_inotify_add_watch 292 /* ok */
-#define __NR_inotify_rm_watch 293 /* ok */
-#define __NR_migrate_pages 294 /* mmu */
-#define __NR_openat 295 /* ok */
-#define __NR_mkdirat 296 /* ok */
-#define __NR_mknodat 297 /* ok */
-#define __NR_fchownat 298 /* ok */
-#define __NR_futimesat 299 /* obsolete -> sys_utimesat */
-#define __NR_fstatat64 300 /* stat64 */
-#define __NR_unlinkat 301 /* ok */
-#define __NR_renameat 302 /* ok */
-#define __NR_linkat 303 /* ok */
-#define __NR_symlinkat 304 /* ok */
-#define __NR_readlinkat 305 /* ok */
-#define __NR_fchmodat 306 /* ok */
-#define __NR_faccessat 307 /* ok */
-#define __NR_pselect6 308 /* ok */
-#define __NR_ppoll 309 /* ok */
-#define __NR_unshare 310 /* ok */
-#define __NR_set_robust_list 311 /* ok */
-#define __NR_get_robust_list 312 /* ok */
-#define __NR_splice 313 /* ok */
-#define __NR_sync_file_range 314 /* ok */
-#define __NR_tee 315 /* ok */
-#define __NR_vmsplice 316 /* ok */
-#define __NR_move_pages 317 /* mmu */
-#define __NR_getcpu 318 /* ok */
-#define __NR_epoll_pwait 319 /* ok */
-#define __NR_utimensat 320 /* ok */
-#define __NR_signalfd 321 /* ok */
-#define __NR_timerfd_create 322 /* ok */
-#define __NR_eventfd 323 /* ok */
-#define __NR_fallocate 324 /* ok */
-#define __NR_semtimedop 325 /* ok - semaphore group */
-#define __NR_timerfd_settime 326 /* ok */
-#define __NR_timerfd_gettime 327 /* ok */
-/* sysv ipc syscalls */
-#define __NR_semctl 328 /* ok */
-#define __NR_semget 329 /* ok */
-#define __NR_semop 330 /* ok */
-#define __NR_msgctl 331 /* ok */
-#define __NR_msgget 332 /* ok */
-#define __NR_msgrcv 333 /* ok */
-#define __NR_msgsnd 334 /* ok */
-#define __NR_shmat 335 /* ok */
-#define __NR_shmctl 336 /* ok */
-#define __NR_shmdt 337 /* ok */
-#define __NR_shmget 338 /* ok */
-
-
-#define __NR_signalfd4 339 /* new */
-#define __NR_eventfd2 340 /* new */
-#define __NR_epoll_create1 341 /* new */
-#define __NR_dup3 342 /* new */
-#define __NR_pipe2 343 /* new */
-#define __NR_inotify_init1 344 /* new */
-#define __NR_socket 345 /* new */
-#define __NR_socketpair 346 /* new */
-#define __NR_bind 347 /* new */
-#define __NR_listen 348 /* new */
-#define __NR_accept 349 /* new */
-#define __NR_connect 350 /* new */
-#define __NR_getsockname 351 /* new */
-#define __NR_getpeername 352 /* new */
-#define __NR_sendto 353 /* new */
-#define __NR_send 354 /* new */
-#define __NR_recvfrom 355 /* new */
-#define __NR_recv 356 /* new */
-#define __NR_setsockopt 357 /* new */
-#define __NR_getsockopt 358 /* new */
-#define __NR_shutdown 359 /* new */
-#define __NR_sendmsg 360 /* new */
-#define __NR_recvmsg 361 /* new */
-#define __NR_accept4 362 /* new */
-#define __NR_preadv 363 /* new */
-#define __NR_pwritev 364 /* new */
-#define __NR_rt_tgsigqueueinfo 365 /* new */
-#define __NR_perf_event_open 366 /* new */
-#define __NR_recvmmsg 367 /* new */
-#define __NR_fanotify_init 368
-#define __NR_fanotify_mark 369
-#define __NR_prlimit64 370
-#define __NR_name_to_handle_at 371
-#define __NR_open_by_handle_at 372
-#define __NR_clock_adjtime 373
-#define __NR_syncfs 374
-#define __NR_setns 375
-#define __NR_sendmmsg 376
-#define __NR_process_vm_readv 377
-#define __NR_process_vm_writev 378
-#define __NR_kcmp 379
-#define __NR_finit_module 380
-#define __NR_sched_setattr 381
-#define __NR_sched_getattr 382
-#define __NR_renameat2 383
-#define __NR_seccomp 384
-#define __NR_getrandom 385
-#define __NR_memfd_create 386
-#define __NR_bpf 387
-#define __NR_execveat 388
-#define __NR_userfaultfd 389
-#define __NR_membarrier 390
-#define __NR_mlock2 391
-#define __NR_copy_file_range 392
-#define __NR_preadv2 393
-#define __NR_pwritev2 394
-#define __NR_pkey_mprotect 395
-#define __NR_pkey_alloc 396
-#define __NR_pkey_free 397
-#define __NR_statx 398
-#define __NR_io_pgetevents 399
-#define __NR_rseq 400
+#include <asm/unistd_32.h>
#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
index d57563c58a26..224eea40e1ee 100644
--- a/arch/microblaze/kernel/ftrace.c
+++ b/arch/microblaze/kernel/ftrace.c
@@ -22,8 +22,7 @@
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
- int faulted, err;
- struct ftrace_graph_ent trace;
+ int faulted;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
@@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
- err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL);
- if (err == -EBUSY) {
+ if (function_graph_enter(old, self_addr, 0, NULL))
*parent = old;
- return;
- }
-
- trace.func = self_addr;
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
- *parent = old;
- }
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 6ab650593792..ce006646f741 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -1,404 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+
+#define __SYSCALL(nr, entry, nargs) .long entry
ENTRY(sys_call_table)
- .long sys_restart_syscall /* 0 - old "setup()" system call,
- * used for restarting */
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_lchown
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_ni_syscall /* old stat */
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid
- .long sys_getuid
- .long sys_stime /* 25 */
- .long sys_ptrace
- .long sys_alarm
- .long sys_ni_syscall /* oldfstat */
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 - old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid
- .long sys_getgid
- .long sys_signal
- .long sys_geteuid
- .long sys_getegid /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys() */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_ni_syscall /* olduname */
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_ni_syscall /* sys_sigaction */
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid /* 70 */
- .long sys_setregid
- .long sys_ni_syscall /* sys_sigsuspend_wrapper */
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_ni_syscall /* old_getrlimit */
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups /* 80 */
- .long sys_setgroups
- .long sys_ni_syscall /* old_select */
- .long sys_symlink
- .long sys_ni_syscall /* oldlstat */
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long sys_ni_syscall /* old_readdir */
- .long sys_mmap /* 90 */ /* old_mmap */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ni_syscall /* ioperm */
- .long sys_socketcall
- .long sys_syslog /* operation with system console */
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_ni_syscall /* uname */
- .long sys_ni_syscall /* 110 */ /* iopl */
- .long sys_vhangup
- .long sys_ni_syscall /* old "idle" system call */
- .long sys_ni_syscall /* old sys_vm86old */
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ni_syscall /* old sys_ipc */
- .long sys_fsync
- .long sys_ni_syscall /* sys_sigreturn_wrapper */
- .long sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_ni_syscall /* modify_ldt */
- .long sys_adjtimex
- .long sys_mprotect /* 125: sys_mprotect */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130: old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* reserved for afs_syscall */
- .long sys_setfsuid
- .long sys_setfsgid
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150: sys_mlock */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid
- .long sys_getresuid /* 165 */
- .long sys_ni_syscall /* sys_vm86 */
- .long sys_ni_syscall /* Old sys_query_module */
- .long sys_poll
- .long sys_ni_syscall /* old nfsservctl */
- .long sys_setresgid /* 170 */
- .long sys_getresgid
- .long sys_prctl
- .long sys_rt_sigreturn_wrapper
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread64 /* 180 */
- .long sys_pwrite64
- .long sys_chown
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_ni_syscall /* sigaltstack */
- .long sys_sendfile
- .long sys_ni_syscall /* reserved for streams1 */
- .long sys_ni_syscall /* reserved for streams2 */
- .long sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_lchown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_chown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- .long sys_getdents64 /* 220 */
- .long sys_fcntl64
- .long sys_ni_syscall /* reserved for TUX */
- .long sys_ni_syscall
- .long sys_gettid
- .long sys_readahead /* 225 */
- .long sys_setxattr
- .long sys_lsetxattr
- .long sys_fsetxattr
- .long sys_getxattr
- .long sys_lgetxattr /* 230 */
- .long sys_fgetxattr
- .long sys_listxattr
- .long sys_llistxattr
- .long sys_flistxattr
- .long sys_removexattr /* 235 */
- .long sys_lremovexattr
- .long sys_fremovexattr
- .long sys_tkill
- .long sys_sendfile64
- .long sys_futex /* 240 */
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity
- .long sys_ni_syscall /* set_thread_area */
- .long sys_ni_syscall /* get_thread_area */
- .long sys_io_setup /* 245 */
- .long sys_io_destroy
- .long sys_io_getevents
- .long sys_io_submit
- .long sys_io_cancel
- .long sys_fadvise64 /* 250 */
- .long sys_ni_syscall
- .long sys_exit_group
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl /* 255 */
- .long sys_epoll_wait
- .long sys_remap_file_pages
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime /* 260 */
- .long sys_timer_gettime
- .long sys_timer_getoverrun
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime /* 265 */
- .long sys_clock_getres
- .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill /* 270 */
- .long sys_utimes
- .long sys_fadvise64_64
- .long sys_ni_syscall /* sys_vserver */
- .long sys_mbind
- .long sys_get_mempolicy
- .long sys_set_mempolicy
- .long sys_mq_open
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive /* 280 */
- .long sys_mq_notify
- .long sys_mq_getsetattr
- .long sys_kexec_load
- .long sys_waitid
- .long sys_ni_syscall /* 285 */ /* available */
- .long sys_add_key
- .long sys_request_key
- .long sys_keyctl
- .long sys_ioprio_set
- .long sys_ioprio_get /* 290 */
- .long sys_inotify_init
- .long sys_inotify_add_watch
- .long sys_inotify_rm_watch
- .long sys_ni_syscall /* sys_migrate_pages */
- .long sys_openat /* 295 */
- .long sys_mkdirat
- .long sys_mknodat
- .long sys_fchownat
- .long sys_futimesat
- .long sys_fstatat64 /* 300 */
- .long sys_unlinkat
- .long sys_renameat
- .long sys_linkat
- .long sys_symlinkat
- .long sys_readlinkat /* 305 */
- .long sys_fchmodat
- .long sys_faccessat
- .long sys_pselect6
- .long sys_ppoll
- .long sys_unshare /* 310 */
- .long sys_set_robust_list
- .long sys_get_robust_list
- .long sys_splice
- .long sys_sync_file_range
- .long sys_tee /* 315 */
- .long sys_vmsplice
- .long sys_move_pages
- .long sys_getcpu
- .long sys_epoll_pwait
- .long sys_utimensat /* 320 */
- .long sys_signalfd
- .long sys_timerfd_create
- .long sys_eventfd
- .long sys_fallocate
- .long sys_semtimedop /* 325 */
- .long sys_timerfd_settime
- .long sys_timerfd_gettime
- .long sys_semctl
- .long sys_semget
- .long sys_semop /* 330 */
- .long sys_msgctl
- .long sys_msgget
- .long sys_msgrcv
- .long sys_msgsnd
- .long sys_shmat /* 335 */
- .long sys_shmctl
- .long sys_shmdt
- .long sys_shmget
- .long sys_signalfd4 /* new syscall */
- .long sys_eventfd2 /* 340 */
- .long sys_epoll_create1
- .long sys_dup3
- .long sys_pipe2
- .long sys_inotify_init1
- .long sys_socket /* 345 */
- .long sys_socketpair
- .long sys_bind
- .long sys_listen
- .long sys_accept
- .long sys_connect /* 350 */
- .long sys_getsockname
- .long sys_getpeername
- .long sys_sendto
- .long sys_send
- .long sys_recvfrom /* 355 */
- .long sys_recv
- .long sys_setsockopt
- .long sys_getsockopt
- .long sys_shutdown
- .long sys_sendmsg /* 360 */
- .long sys_recvmsg
- .long sys_accept4
- .long sys_preadv
- .long sys_pwritev
- .long sys_rt_tgsigqueueinfo /* 365 */
- .long sys_perf_event_open
- .long sys_recvmmsg
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64 /* 370 */
- .long sys_name_to_handle_at
- .long sys_open_by_handle_at
- .long sys_clock_adjtime
- .long sys_syncfs
- .long sys_setns /* 375 */
- .long sys_sendmmsg
- .long sys_process_vm_readv
- .long sys_process_vm_writev
- .long sys_kcmp
- .long sys_finit_module /* 380 */
- .long sys_sched_setattr
- .long sys_sched_getattr
- .long sys_renameat2
- .long sys_seccomp
- .long sys_getrandom /* 385 */
- .long sys_memfd_create
- .long sys_bpf
- .long sys_execveat
- .long sys_userfaultfd
- .long sys_membarrier /* 390 */
- .long sys_mlock2
- .long sys_copy_file_range
- .long sys_preadv2
- .long sys_pwritev2
- .long sys_pkey_mprotect /* 395 */
- .long sys_pkey_alloc
- .long sys_pkey_free
- .long sys_statx
- .long sys_io_pgetevents
- .long sys_rseq
+#include <asm/syscall_table.h>
+#undef __SYSCALL
diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..659faefdcb1d
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+$(kapi)/syscall_table.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h
+kapisyshdr-y += syscall_table.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..a24d09e937dd
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -0,0 +1,410 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for microblaze
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+13 common time sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common break sys_ni_syscall
+18 common oldstat sys_ni_syscall
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 common stime sys_stime
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+28 common oldfstat sys_ni_syscall
+29 common pause sys_pause
+30 common utime sys_utime
+31 common stty sys_ni_syscall
+32 common gtty sys_ni_syscall
+33 common access sys_access
+34 common nice sys_nice
+35 common ftime sys_ni_syscall
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times
+44 common prof sys_ni_syscall
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 common signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 common acct sys_acct
+52 common umount2 sys_umount
+53 common lock sys_ni_syscall
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+56 common mpx sys_ni_syscall
+57 common setpgid sys_setpgid
+58 common ulimit sys_ni_syscall
+59 common oldolduname sys_ni_syscall
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_ni_syscall
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 common sigsuspend sys_ni_syscall
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+76 common getrlimit sys_ni_syscall
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 common select sys_ni_syscall
+83 common symlink sys_symlink
+84 common oldlstat sys_ni_syscall
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_ni_syscall
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common profil sys_ni_syscall
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+101 common ioperm sys_ni_syscall
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+109 common olduname sys_ni_syscall
+110 common iopl sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+113 common vm86old sys_ni_syscall
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_ni_syscall
+118 common fsync sys_fsync
+119 common sigreturn sys_ni_syscall
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common modify_ldt sys_ni_syscall
+124 common adjtimex sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+127 common create_module sys_ni_syscall
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+137 common afs_syscall sys_ni_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents
+142 common _newselect sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common vm86 sys_ni_syscall
+167 common query_module sys_ni_syscall
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid
+171 common getresgid sys_getresgid
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread64
+181 common pwrite64 sys_pwrite64
+182 common chown sys_chown
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_ni_syscall
+187 common sendfile sys_sendfile
+188 common getpmsg sys_ni_syscall
+189 common putpmsg sys_ni_syscall
+190 common vfork sys_vfork
+191 common ugetrlimit sys_getrlimit
+192 common mmap2 sys_mmap2
+193 common truncate64 sys_truncate64
+194 common ftruncate64 sys_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common pivot_root sys_pivot_root
+218 common mincore sys_mincore
+219 common madvise sys_madvise
+220 common getdents64 sys_getdents64
+221 common fcntl64 sys_fcntl64
+# 222 is reserved for TUX
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex
+241 common sched_setaffinity sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity
+243 common set_thread_area sys_ni_syscall
+244 common get_thread_area sys_ni_syscall
+245 common io_setup sys_io_setup
+246 common io_destroy sys_io_destroy
+247 common io_getevents sys_io_getevents
+248 common io_submit sys_io_submit
+249 common io_cancel sys_io_cancel
+250 common fadvise64 sys_fadvise64
+# 251 is available for reuse (was briefly sys_set_zone_reclaim)
+252 common exit_group sys_exit_group
+253 common lookup_dcookie sys_lookup_dcookie
+254 common epoll_create sys_epoll_create
+255 common epoll_ctl sys_epoll_ctl
+256 common epoll_wait sys_epoll_wait
+257 common remap_file_pages sys_remap_file_pages
+258 common set_tid_address sys_set_tid_address
+259 common timer_create sys_timer_create
+260 common timer_settime sys_timer_settime
+261 common timer_gettime sys_timer_gettime
+262 common timer_getoverrun sys_timer_getoverrun
+263 common timer_delete sys_timer_delete
+264 common clock_settime sys_clock_settime
+265 common clock_gettime sys_clock_gettime
+266 common clock_getres sys_clock_getres
+267 common clock_nanosleep sys_clock_nanosleep
+268 common statfs64 sys_statfs64
+269 common fstatfs64 sys_fstatfs64
+270 common tgkill sys_tgkill
+271 common utimes sys_utimes
+272 common fadvise64_64 sys_fadvise64_64
+273 common vserver sys_ni_syscall
+274 common mbind sys_mbind
+275 common get_mempolicy sys_get_mempolicy
+276 common set_mempolicy sys_set_mempolicy
+277 common mq_open sys_mq_open
+278 common mq_unlink sys_mq_unlink
+279 common mq_timedsend sys_mq_timedsend
+280 common mq_timedreceive sys_mq_timedreceive
+281 common mq_notify sys_mq_notify
+282 common mq_getsetattr sys_mq_getsetattr
+283 common kexec_load sys_kexec_load
+284 common waitid sys_waitid
+# 285 was setaltroot
+286 common add_key sys_add_key
+287 common request_key sys_request_key
+288 common keyctl sys_keyctl
+289 common ioprio_set sys_ioprio_set
+290 common ioprio_get sys_ioprio_get
+291 common inotify_init sys_inotify_init
+292 common inotify_add_watch sys_inotify_add_watch
+293 common inotify_rm_watch sys_inotify_rm_watch
+294 common migrate_pages sys_ni_syscall
+295 common openat sys_openat
+296 common mkdirat sys_mkdirat
+297 common mknodat sys_mknodat
+298 common fchownat sys_fchownat
+299 common futimesat sys_futimesat
+300 common fstatat64 sys_fstatat64
+301 common unlinkat sys_unlinkat
+302 common renameat sys_renameat
+303 common linkat sys_linkat
+304 common symlinkat sys_symlinkat
+305 common readlinkat sys_readlinkat
+306 common fchmodat sys_fchmodat
+307 common faccessat sys_faccessat
+308 common pselect6 sys_pselect6
+309 common ppoll sys_ppoll
+310 common unshare sys_unshare
+311 common set_robust_list sys_set_robust_list
+312 common get_robust_list sys_get_robust_list
+313 common splice sys_splice
+314 common sync_file_range sys_sync_file_range
+315 common tee sys_tee
+316 common vmsplice sys_vmsplice
+317 common move_pages sys_move_pages
+318 common getcpu sys_getcpu
+319 common epoll_pwait sys_epoll_pwait
+320 common utimensat sys_utimensat
+321 common signalfd sys_signalfd
+322 common timerfd_create sys_timerfd_create
+323 common eventfd sys_eventfd
+324 common fallocate sys_fallocate
+325 common semtimedop sys_semtimedop
+326 common timerfd_settime sys_timerfd_settime
+327 common timerfd_gettime sys_timerfd_gettime
+328 common semctl sys_semctl
+329 common semget sys_semget
+330 common semop sys_semop
+331 common msgctl sys_msgctl
+332 common msgget sys_msgget
+333 common msgrcv sys_msgrcv
+334 common msgsnd sys_msgsnd
+335 common shmat sys_shmat
+336 common shmctl sys_shmctl
+337 common shmdt sys_shmdt
+338 common shmget sys_shmget
+339 common signalfd4 sys_signalfd4
+340 common eventfd2 sys_eventfd2
+341 common epoll_create1 sys_epoll_create1
+342 common dup3 sys_dup3
+343 common pipe2 sys_pipe2
+344 common inotify_init1 sys_inotify_init1
+345 common socket sys_socket
+346 common socketpair sys_socketpair
+347 common bind sys_bind
+348 common listen sys_listen
+349 common accept sys_accept
+350 common connect sys_connect
+351 common getsockname sys_getsockname
+352 common getpeername sys_getpeername
+353 common sendto sys_sendto
+354 common send sys_send
+355 common recvfrom sys_recvfrom
+356 common recv sys_recv
+357 common setsockopt sys_setsockopt
+358 common getsockopt sys_getsockopt
+359 common shutdown sys_shutdown
+360 common sendmsg sys_sendmsg
+361 common recvmsg sys_recvmsg
+362 common accept4 sys_accept4
+363 common preadv sys_preadv
+364 common pwritev sys_pwritev
+365 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+366 common perf_event_open sys_perf_event_open
+367 common recvmmsg sys_recvmmsg
+368 common fanotify_init sys_fanotify_init
+369 common fanotify_mark sys_fanotify_mark
+370 common prlimit64 sys_prlimit64
+371 common name_to_handle_at sys_name_to_handle_at
+372 common open_by_handle_at sys_open_by_handle_at
+373 common clock_adjtime sys_clock_adjtime
+374 common syncfs sys_syncfs
+375 common setns sys_setns
+376 common sendmmsg sys_sendmmsg
+377 common process_vm_readv sys_process_vm_readv
+378 common process_vm_writev sys_process_vm_writev
+379 common kcmp sys_kcmp
+380 common finit_module sys_finit_module
+381 common sched_setattr sys_sched_setattr
+382 common sched_getattr sys_sched_getattr
+383 common renameat2 sys_renameat2
+384 common seccomp sys_seccomp
+385 common getrandom sys_getrandom
+386 common memfd_create sys_memfd_create
+387 common bpf sys_bpf
+388 common execveat sys_execveat
+389 common userfaultfd sys_userfaultfd
+390 common membarrier sys_membarrier
+391 common mlock2 sys_mlock2
+392 common copy_file_range sys_copy_file_range
+393 common preadv2 sys_preadv2
+394 common pwritev2 sys_pwritev2
+395 common pkey_mprotect sys_pkey_mprotect
+396 common pkey_alloc sys_pkey_alloc
+397 common pkey_free sys_pkey_free
+398 common statx sys_statx
+399 common io_pgetevents sys_io_pgetevents
+400 common rseq sys_rseq
diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..2e9062a926a3
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_MICROBLAZE_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+) > "$out"
diff --git a/arch/microblaze/kernel/syscalls/syscalltbl.sh b/arch/microblaze/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..85d78d9309ad
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry ; do
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2993aa9842c0..63183a8454d6 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2,11 +2,12 @@
config MIPS
bool
default y
- select ARCH_BINFMT_ELF_STATE
+ select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
select ARCH_CLOCKSOURCE_DATA
select ARCH_DISCARD_MEMBLOCK
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_UPROBES
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
@@ -15,6 +16,7 @@ config MIPS
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
+ select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
select CPU_PM if CPU_IDLE
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS
@@ -55,10 +57,12 @@ config MIPS
select HAVE_FUNCTION_TRACER
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
+ select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KPROBES
select HAVE_KRETPROBES
+ select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
@@ -493,22 +497,23 @@ config MIPS_MALTA
select BOOT_RAW
select BUILTIN_DTB
select CEVT_R4K
- select CSRC_R4K
select CLKSRC_MIPS_GIC
select COMMON_CLK
+ select CSRC_R4K
select DMA_MAYBE_COHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
- select IRQ_MIPS_CPU
- select MIPS_GIC
select HW_HAS_PCI
select I8253
select I8259
+ select IRQ_MIPS_CPU
+ select LIBFDT
select MIPS_BONITO64
select MIPS_CPU_SCACHE
+ select MIPS_GIC
select MIPS_L1_CACHE_SHIFT_6
- select PCI_GT64XXX_PCI0
select MIPS_MSC
+ select PCI_GT64XXX_PCI0
select SMP_UP if SMP
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
@@ -527,19 +532,16 @@ config MIPS_MALTA
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MICROMIPS
+ select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MIPS_CMP
select SYS_SUPPORTS_MIPS_CPS
- select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
select SYS_SUPPORTS_SMARTMIPS
select SYS_SUPPORTS_VPE_LOADER
select SYS_SUPPORTS_ZBOOT
- select SYS_SUPPORTS_RELOCATABLE
select USE_OF
- select LIBFDT
select ZONE_DMA32 if 64BIT
- select BUILTIN_DTB
- select LIBFDT
help
This enables support for the MIPS Technologies Malta evaluation
board.
@@ -793,6 +795,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@@ -804,6 +807,7 @@ config SIBYTE_LITTLESUR
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select ZONE_DMA32 if 64BIT
config SIBYTE_SENTOSA
bool "Sibyte BCM91250E-Sentosa"
@@ -813,6 +817,7 @@ config SIBYTE_SENTOSA
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@@ -825,6 +830,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"
@@ -2031,7 +2037,7 @@ config CPU_MIPS64
default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
#
-# These two indicate the revision of the architecture, either Release 1 or Release 2
+# These indicate the revision of the architecture
#
config CPU_MIPSR1
bool
@@ -2052,6 +2058,16 @@ config CPU_MIPSR6
select MIPS_CRC_SUPPORT
select MIPS_SPRAM
+config TARGET_ISA_REV
+ int
+ default 1 if CPU_MIPSR1
+ default 2 if CPU_MIPSR2
+ default 6 if CPU_MIPSR6
+ default 0
+ help
+ Reflects the ISA revision being targeted by the kernel build. This
+ is effectively the Kconfig equivalent of MIPS_ISA_REV.
+
config EVA
bool
@@ -2253,9 +2269,30 @@ config CPU_GENERIC_DUMP_TLB
bool
default y if !(CPU_R3000 || CPU_R8000 || CPU_TX39XX)
+config MIPS_FP_SUPPORT
+ bool "Floating Point support" if EXPERT
+ default y
+ help
+ Select y to include support for floating point in the kernel
+ including initialization of FPU hardware, FP context save & restore
+ and emulation of an FPU where necessary. Without this support any
+ userland program attempting to use floating point instructions will
+ receive a SIGILL.
+
+ If you know that your userland will not attempt to use floating point
+ instructions then you can say n here to shrink the kernel a little.
+
+ If unsure, say y.
+
+config CPU_R2300_FPU
+ bool
+ depends on MIPS_FP_SUPPORT
+ default y if CPU_R3000 || CPU_TX39XX
+
config CPU_R4K_FPU
bool
- default y if !(CPU_R3000 || CPU_TX39XX)
+ depends on MIPS_FP_SUPPORT
+ default y if !CPU_R2300_FPU
config CPU_R4K_CACHE_TLB
bool
@@ -2307,6 +2344,7 @@ config MIPS_MT_FPAFF
config MIPSR2_TO_R6_EMULATOR
bool "MIPS R2-to-R6 emulator"
depends on CPU_MIPSR6
+ depends on MIPS_FP_SUPPORT
default y
help
Choose this option if you want to run non-R6 MIPS userland code.
@@ -2454,6 +2492,7 @@ endchoice
config CPU_HAS_MSA
bool "Support for the MIPS SIMD Architecture"
depends on CPU_SUPPORTS_MSA
+ depends on MIPS_FP_SUPPORT
depends on 64BIT || MIPS_O32_FP64_SUPPORT
help
MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers
@@ -2901,7 +2940,7 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config MIPS_O32_FP64_SUPPORT
- bool "Support for O32 binaries using 64-bit FP"
+ bool "Support for O32 binaries using 64-bit FP" if !CPU_MIPSR6
depends on 32BIT || MIPS32_O32
help
When this is enabled, the kernel will support use of 64-bit floating
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 68410490e12f..5b174c3d0de3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -319,7 +319,7 @@ OBJCOPYFLAGS += --remove-section=.reginfo
head-y := arch/mips/kernel/head.o
libs-y += arch/mips/lib/
-libs-y += arch/mips/math-emu/
+libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/mips/math-emu/
# See arch/mips/Kbuild for content of core part of the kernel
core-y += arch/mips/
@@ -430,6 +430,9 @@ archclean:
$(Q)$(MAKE) $(clean)=arch/mips/boot/tools
$(Q)$(MAKE) $(clean)=arch/mips/lasat
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/mips/kernel/syscalls all
+
define archhelp
echo ' install - install kernel into $(INSTALL_PATH)'
echo ' vmlinux.ecoff - ECOFF boot image'
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c19a90..542c3ede9722 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
int main(int argc, char *argv[])
{
@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
/*
- * Align with 16 bytes: "greater than that used for any standard data
- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+ * which may be as large as 64KB depending on the kernel configuration.
*/
- vmlinuz_load_addr += (16 - vmlinux_size % 16);
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
printf("0x%llx\n", vmlinuz_load_addr);
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
index 65af3f6ba81c..84328afa3a55 100644
--- a/arch/mips/boot/dts/img/boston.dts
+++ b/arch/mips/boot/dts/img/boston.dts
@@ -141,6 +141,12 @@
#size-cells = <2>;
#interrupt-cells = <1>;
+ eg20t_phub@2,0,0 {
+ compatible = "pci8086,8801";
+ reg = <0x00020000 0 0 0 0>;
+ intel,eg20t-prefetch = <0>;
+ };
+
eg20t_mac@2,0,1 {
compatible = "pci8086,8802";
reg = <0x00020100 0 0 0 0>;
diff --git a/arch/mips/boot/dts/mti/malta.dts b/arch/mips/boot/dts/mti/malta.dts
index 9944e716eac8..f03279b1cde7 100644
--- a/arch/mips/boot/dts/mti/malta.dts
+++ b/arch/mips/boot/dts/mti/malta.dts
@@ -87,6 +87,11 @@
reg = <0x1f000000 0x1000>;
native-endian;
+ lcd@410 {
+ compatible = "mti,malta-lcd";
+ offset = <0x410>;
+ };
+
reboot {
compatible = "syscon-reboot";
regmap = <&fpga_regs>;
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 39f153fe0022..124817609ce0 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -7,6 +7,7 @@
* Copyright (C) 2009, 2012 Cavium, Inc.
*/
#include <linux/clocksource.h>
+#include <linux/sched/clock.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/smp.h>
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 94d97ebfa036..ba8f82a29a81 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -122,8 +122,21 @@ static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
}
-void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
- uint64_t min_addr, uint64_t max_addr)
+/**
+ * Allocate a block of memory from the free list that was
+ * passed to the application by the bootloader within a specified
+ * address range. This is an allocate-only algorithm, so
+ * freeing memory is not possible. Allocation will fail if
+ * memory cannot be allocated in the requested range.
+ *
+ * @size: Size in bytes of block to allocate
+ * @min_addr: defines the minimum address of the range
+ * @max_addr: defines the maximum address of the range
+ * @alignment: Alignment required - must be power of 2
+ * Returns pointer to block of memory, NULL on error
+ */
+static void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
+ uint64_t min_addr, uint64_t max_addr)
{
int64_t address;
address =
@@ -142,47 +155,6 @@ void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
address + size);
}
-void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
-{
- return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
-}
-
-void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr,
- uint64_t max_addr, uint64_t align,
- char *name,
- void (*init) (void *))
-{
- int64_t addr;
- void *ptr;
- uint64_t named_block_desc_addr;
-
- named_block_desc_addr = (uint64_t)
- cvmx_bootmem_phy_named_block_find(name,
- (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
-
- if (named_block_desc_addr) {
- addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr,
- base_addr);
- return cvmx_phys_to_ptr(addr);
- }
-
- addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
- align, name,
- (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
-
- if (addr < 0)
- return NULL;
- ptr = cvmx_phys_to_ptr(addr);
-
- if (init)
- init(ptr);
- else
- memset(ptr, 0, size);
-
- return ptr;
-}
-EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once);
-
void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
uint64_t max_addr, uint64_t align,
char *name)
@@ -197,30 +169,12 @@ void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr,
return NULL;
}
-void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address,
- char *name)
-{
- return cvmx_bootmem_alloc_named_range(size, address, address + size,
- 0, name);
-}
-
void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name)
{
return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name);
}
EXPORT_SYMBOL(cvmx_bootmem_alloc_named);
-int cvmx_bootmem_free_named(char *name)
-{
- return cvmx_bootmem_phy_named_block_free(name, 0);
-}
-
-struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
-{
- return cvmx_bootmem_phy_named_block_find(name, 0);
-}
-EXPORT_SYMBOL(cvmx_bootmem_find_named_block);
-
void cvmx_bootmem_lock(void)
{
cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
@@ -603,7 +557,20 @@ bootmem_free_done:
}
-struct cvmx_bootmem_named_block_desc *
+/**
+ * Finds a named memory block by name.
+ * Also used for finding an unused entry in the named block table.
+ *
+ * @name: Name of memory block to find. If NULL pointer given, then
+ * finds unused descriptor, if available.
+ *
+ * @flags: Flags to control options for the allocation.
+ *
+ * Returns Pointer to memory block descriptor, NULL if not found.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
+ */
+static struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
{
unsigned int i;
@@ -655,7 +622,58 @@ struct cvmx_bootmem_named_block_desc *
return NULL;
}
-int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
+void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr,
+ uint64_t max_addr, uint64_t align,
+ char *name,
+ void (*init) (void *))
+{
+ int64_t addr;
+ void *ptr;
+ uint64_t named_block_desc_addr;
+
+ named_block_desc_addr = (uint64_t)
+ cvmx_bootmem_phy_named_block_find(name,
+ (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (named_block_desc_addr) {
+ addr = CVMX_BOOTMEM_NAMED_GET_FIELD(named_block_desc_addr,
+ base_addr);
+ return cvmx_phys_to_ptr(addr);
+ }
+
+ addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr,
+ align, name,
+ (uint32_t)CVMX_BOOTMEM_FLAG_NO_LOCKING);
+
+ if (addr < 0)
+ return NULL;
+ ptr = cvmx_phys_to_ptr(addr);
+
+ if (init)
+ init(ptr);
+ else
+ memset(ptr, 0, size);
+
+ return ptr;
+}
+EXPORT_SYMBOL(cvmx_bootmem_alloc_named_range_once);
+
+struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
+{
+ return cvmx_bootmem_phy_named_block_find(name, 0);
+}
+EXPORT_SYMBOL(cvmx_bootmem_find_named_block);
+
+/**
+ * Frees a named block.
+ *
+ * @name: name of block to free
+ * @flags: flags for passing options
+ *
+ * Returns 0 on failure
+ * 1 on success
+ */
+static int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
{
struct cvmx_bootmem_named_block_desc *named_block_ptr;
@@ -699,6 +717,11 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
return named_block_ptr != NULL; /* 0 on failure, 1 on success */
}
+int cvmx_bootmem_free_named(char *name)
+{
+ return cvmx_bootmem_phy_named_block_free(name, 0);
+}
+
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
uint64_t max_addr,
uint64_t alignment,
diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
index 8241fc6aa17d..3839feba68f2 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -266,7 +266,7 @@ int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
} else {
union cvmx_pko_mem_debug8 debug8;
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- return debug8.cn58xx.doorbell;
+ return debug8.cn50xx.doorbell;
}
case CVMX_CMD_QUEUE_ZIP:
case CVMX_CMD_QUEUE_DFA:
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index b8898e2b8a6f..e812ed9a03bb 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -449,71 +449,3 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
return result;
}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- int original_enable;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- union cvmx_asxx_prt_loop asxx_prt_loop;
-
- /* Read the current enable state and save it */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- original_enable = gmx_cfg.s.en;
- /* Force port to be disabled */
- gmx_cfg.s.en = 0;
- if (enable_internal) {
- /* Force speed if we're doing internal loopback */
- gmx_cfg.s.duplex = 1;
- gmx_cfg.s.slottime = 1;
- gmx_cfg.s.speed = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- }
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- /* Set the loopback bits */
- asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- if (enable_internal)
- asxx_prt_loop.s.int_loop |= 1 << index;
- else
- asxx_prt_loop.s.int_loop &= ~(1 << index);
- if (enable_external)
- asxx_prt_loop.s.ext_loop |= 1 << index;
- else
- asxx_prt_loop.s.ext_loop &= ~(1 << index);
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
-
- /* Force enables in internal loopback */
- if (enable_internal) {
- uint64_t tmp;
- tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
- (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- (1 << index) | tmp);
- original_enable = 1;
- }
-
- /* Restore the enable state */
- gmx_cfg.s.en = original_enable;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- return 0;
-}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index a176358c5a21..f6ebf63dc84c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -513,41 +513,3 @@ int __cvmx_helper_sgmii_link_set(int ipd_port,
return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
link_info);
}
-
-/**
- * Configure a port for internal and/or external loopback. Internal
- * loopback causes packets sent by the port to be received by
- * Octeon. External loopback causes packets received from the wire to
- * sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
-
- pcsx_mrx_control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- pcsx_mrx_control_reg.u64);
-
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
- cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
- pcsx_miscx_ctl_reg.u64);
-
- __cvmx_helper_sgmii_hardware_init_link(interface, index);
- return 0;
-}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index b45b2975746d..53b912745dbd 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -81,93 +81,6 @@ const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
}
/**
- * Debug routine to dump the packet structure to the console
- *
- * @work: Work queue entry containing the packet to dump
- * Returns
- */
-int cvmx_helper_dump_packet(cvmx_wqe_t *work)
-{
- uint64_t count;
- uint64_t remaining_bytes;
- union cvmx_buf_ptr buffer_ptr;
- uint64_t start_of_buffer;
- uint8_t *data_address;
- uint8_t *end_of_data;
-
- cvmx_dprintf("Packet Length: %u\n", work->word1.len);
- cvmx_dprintf(" Input Port: %u\n", cvmx_wqe_get_port(work));
- cvmx_dprintf(" QoS: %u\n", cvmx_wqe_get_qos(work));
- cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
-
- if (work->word2.s.bufs == 0) {
- union cvmx_ipd_wqe_fpa_queue wqe_pool;
- wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
- buffer_ptr.u64 = 0;
- buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
- buffer_ptr.s.size = 128;
- buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
- if (likely(!work->word2.s.not_IP)) {
- union cvmx_pip_ip_offset pip_ip_offset;
- pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
- buffer_ptr.s.addr +=
- (pip_ip_offset.s.offset << 3) -
- work->word2.s.ip_offset;
- buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
- } else {
- /*
- * WARNING: This code assumes that the packet
- * is not RAW. If it was, we would use
- * PIP_GBL_CFG[RAW_SHF] instead of
- * PIP_GBL_CFG[NIP_SHF].
- */
- union cvmx_pip_gbl_cfg pip_gbl_cfg;
- pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
- buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
- }
- } else
- buffer_ptr = work->packet_ptr;
- remaining_bytes = work->word1.len;
-
- while (remaining_bytes) {
- start_of_buffer =
- ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- cvmx_dprintf(" Buffer Start:%llx\n",
- (unsigned long long)start_of_buffer);
- cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
- cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
- cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
- cvmx_dprintf(" Buffer Data: %llx\n",
- (unsigned long long)buffer_ptr.s.addr);
- cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
-
- cvmx_dprintf("\t\t");
- data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
- end_of_data = data_address + buffer_ptr.s.size;
- count = 0;
- while (data_address < end_of_data) {
- if (remaining_bytes == 0)
- break;
- else
- remaining_bytes--;
- cvmx_dprintf("%02x", (unsigned int)*data_address);
- data_address++;
- if (remaining_bytes && (count == 7)) {
- cvmx_dprintf("\n\t\t");
- count = 0;
- } else
- count++;
- }
- cvmx_dprintf("\n");
-
- if (remaining_bytes)
- buffer_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- }
- return 0;
-}
-
-/**
* Setup Random Early Drop on a specific input queue
*
* @queue: Input queue to setup RED on (0-7)
@@ -179,7 +92,8 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
* than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
-int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
+static int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
+ int drop_thresh)
{
union cvmx_ipd_qosx_red_marks red_marks;
union cvmx_ipd_red_quex_param red_param;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 2bb6912a580d..93a498d05184 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -319,42 +319,3 @@ int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
/* Bring the link up */
return __cvmx_helper_xaui_enable(interface);
}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
- union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
-
- /* Set the internal loop */
- pcsxx_control1_reg.u64 =
- cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
- pcsxx_control1_reg.s.loopbck1 = enable_internal;
- cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface),
- pcsxx_control1_reg.u64);
-
- /* Set the external loop */
- gmxx_xaui_ext_loopback.u64 =
- cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
- gmxx_xaui_ext_loopback.s.en = enable_external;
- cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
- gmxx_xaui_ext_loopback.u64);
-
- /* Take the link through a reset */
- return __cvmx_helper_xaui_enable(interface);
-}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 6c79e8a16a26..a76bbcc30f95 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -46,26 +46,6 @@
#include <asm/octeon/cvmx-smix-defs.h>
#include <asm/octeon/cvmx-asxx-defs.h>
-/**
- * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
- * priorities[16]) is a function pointer. It is meant to allow
- * customization of the PKO queue priorities based on the port
- * number. Users should set this pointer to a function before
- * calling any cvmx-helper operations.
- */
-void (*cvmx_override_pko_queue_priority) (int pko_port,
- uint64_t priorities[16]);
-
-/**
- * cvmx_override_ipd_port_setup(int ipd_port) is a function
- * pointer. It is meant to allow customization of the IPD port
- * setup before packet input/output comes online. It is called
- * after cvmx-helper does the default IPD configuration, but
- * before IPD is enabled. Users should set this pointer to a
- * function before calling any cvmx-helper operations.
- */
-void (*cvmx_override_ipd_port_setup) (int ipd_port);
-
/* Port count per interface */
static int interface_port_count[9];
@@ -238,7 +218,7 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
- switch (mode.cn63xx.mode) {
+ switch (mode.cn61xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_SGMII;
case 1:
@@ -362,7 +342,7 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- switch (mode.cn56xx.mode) {
+ switch (mode.cn52xx.mode) {
case 0:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
case 1:
@@ -436,10 +416,6 @@ static int __cvmx_helper_port_setup_ipd(int ipd_port)
cvmx_pip_config_port(ipd_port, port_config, tag_config);
- /* Give the user a chance to override our setting for each port */
- if (cvmx_override_ipd_port_setup)
- cvmx_override_ipd_port_setup(ipd_port);
-
return 0;
}
@@ -663,13 +639,6 @@ static int __cvmx_helper_interface_setup_pko(int interface)
int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
int num_ports = interface_port_count[interface];
while (num_ports--) {
- /*
- * Give the user a chance to override the per queue
- * priorities.
- */
- if (cvmx_override_pko_queue_priority)
- cvmx_override_pko_queue_priority(ipd_port, priorities);
-
cvmx_pko_config_port(ipd_port,
cvmx_pko_get_base_queue_per_core(ipd_port,
0),
@@ -818,7 +787,7 @@ static int __cvmx_helper_packet_hardware_enable(int interface)
* Returns 0 on success
* !0 on failure
*/
-int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
+static int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
{
#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
(CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
@@ -1239,57 +1208,3 @@ int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
return result;
}
EXPORT_SYMBOL_GPL(cvmx_helper_link_set);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int result = -1;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return -1;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result =
- __cvmx_helper_xaui_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result =
- __cvmx_helper_rgmii_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result =
- __cvmx_helper_sgmii_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- }
- return result;
-}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
index fa327ec891cd..d23f46736dd6 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
@@ -84,7 +84,7 @@ void __cvmx_interrupt_gmxx_enable(int interface)
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
if (mode.s.en) {
- switch (mode.cn56xx.mode) {
+ switch (mode.cn52xx.mode) {
case 1: /* XAUI */
num_ports = 1;
break;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index f091c9b70603..83df0a963a8b 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -44,7 +44,7 @@
* if multiple applications or operating systems are running, then it
* is up to the user program to coordinate between them.
*/
-cvmx_spinlock_t cvmx_l2c_spinlock;
+static cvmx_spinlock_t cvmx_l2c_spinlock;
int cvmx_l2c_get_core_way_partition(uint32_t core)
{
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index 341052387b49..657dbad9644e 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -305,7 +305,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
if (fus_dat3.s.nozip)
suffix = "SCP";
- if (fus_dat3.cn56xx.bar2_en)
+ if (fus_dat3.cn38xx.bar2_en)
suffix = "NSPB2";
}
if (l2d_fus3)
@@ -344,7 +344,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
suffix = "CP";
else if (fus_dat2.cn63xx.dorm_crypto)
suffix = "DAP";
- else if (fus_dat3.cn63xx.nozip)
+ else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else
suffix = "AAP";
@@ -359,18 +359,18 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
suffix = "CP";
else if (fus_dat2.cn66xx.dorm_crypto)
suffix = "DAP";
- else if (fus_dat3.cn66xx.nozip)
+ else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else
suffix = "AAP";
break;
case 0x91: /* CN68XX */
family = "68";
- if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn68xx.nozip)
+ if (fus_dat2.cn68xx.nocrypto && fus_dat3.cn61xx.nozip)
suffix = "CP";
else if (fus_dat2.cn68xx.dorm_crypto)
suffix = "DAP";
- else if (fus_dat3.cn68xx.nozip)
+ else if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else if (fus_dat2.cn68xx.nocrypto)
suffix = "SP";
@@ -379,7 +379,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
break;
case 0x94: /* CNF71XX */
family = "F71";
- if (fus_dat3.cnf71xx.nozip)
+ if (fus_dat3.cn61xx.nozip)
suffix = "SCP";
else
suffix = "AAP";
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index cc1d8525e651..f97be32bf699 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -2483,8 +2483,8 @@ void octeon_irq_ciu3_mask_ack(struct irq_data *data)
}
#ifdef CONFIG_SMP
-int octeon_irq_ciu3_set_affinity(struct irq_data *data,
- const struct cpumask *dest, bool force)
+static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
{
union cvmx_ciu3_iscx_ctl isc_ctl;
union cvmx_ciu3_iscx_w1c isc_w1c;
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 807cadaf554e..1f9ba60f7375 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -440,7 +440,7 @@ out:
}
device_initcall(octeon_rng_device_init);
-const struct of_device_id octeon_ids[] __initconst = {
+static const struct of_device_id octeon_ids[] __initconst = {
{ .compatible = "simple-bus", },
{ .compatible = "cavium,octeon-6335-uctl", },
{ .compatible = "cavium,octeon-5750-usbn", },
@@ -501,7 +501,7 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr)
if (phy_addr >= 256 && alt_phy > 0) {
const struct fdt_property *phy_prop;
struct fdt_property *alt_prop;
- u32 phy_handle_name;
+ fdt32_t phy_handle_name;
/* Use the alt phy node instead.*/
phy_prop = fdt_get_property(initial_boot_params, eth, "phy-handle", NULL);
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index bfdfaf32d2c4..1f730ded5224 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -253,17 +253,17 @@ static int dwc3_octeon_config_power(struct device *dev, u64 base)
&& gpio <= 31) {
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
gpio_bit.s.tx_oe = 1;
- gpio_bit.cn73xx.output_sel = (index == 0 ? 0x14 : 0x15);
+ gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x15);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
} else if (gpio <= 15) {
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(gpio));
gpio_bit.s.tx_oe = 1;
- gpio_bit.cn70xx.output_sel = (index == 0 ? 0x14 : 0x19);
+ gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19);
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(gpio), gpio_bit.u64);
} else {
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(gpio));
gpio_bit.s.tx_oe = 1;
- gpio_bit.cn70xx.output_sel = (index == 0 ? 0x14 : 0x19);
+ gpio_bit.s.output_sel = (index == 0 ? 0x14 : 0x19);
cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(gpio), gpio_bit.u64);
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index dfb95cffef3e..2c79ab52977a 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -36,7 +36,9 @@
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
+#include <asm/fw/fw.h>
#include <asm/setup.h>
+#include <asm/prom.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
@@ -72,7 +74,7 @@ static unsigned long long reserve_low_mem;
DEFINE_SEMAPHORE(octeon_bootbus_sem);
EXPORT_SYMBOL(octeon_bootbus_sem);
-struct octeon_boot_descriptor *octeon_boot_desc_ptr;
+static struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
EXPORT_SYMBOL(octeon_bootinfo);
@@ -351,7 +353,7 @@ EXPORT_SYMBOL(octeon_get_io_clock_rate);
*
* @s: String to write
*/
-void octeon_write_lcd(const char *s)
+static void octeon_write_lcd(const char *s)
{
if (octeon_bootinfo->led_display_base_addr) {
void __iomem *lcd_address =
@@ -373,7 +375,7 @@ void octeon_write_lcd(const char *s)
*
* Returns uart (0 or 1)
*/
-int octeon_get_boot_uart(void)
+static int octeon_get_boot_uart(void)
{
return (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 39f2a2ec1286..076db9a06b5e 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -284,7 +284,7 @@ static void octeon_smp_finish(void)
#ifdef CONFIG_HOTPLUG_CPU
/* State of each CPU. */
-DEFINE_PER_CPU(int, cpu_state);
+static DEFINE_PER_CPU(int, cpu_state);
static int octeon_cpu_disable(void)
{
@@ -413,7 +413,7 @@ late_initcall(register_cavium_notifier);
#endif /* CONFIG_HOTPLUG_CPU */
-const struct plat_smp_ops octeon_smp_ops = {
+static const struct plat_smp_ops octeon_smp_ops = {
.send_ipi_single = octeon_send_ipi_single,
.send_ipi_mask = octeon_send_ipi_mask,
.init_secondary = octeon_init_secondary,
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
index 5651f4d8f45c..9fbfb6e5c7d2 100644
--- a/arch/mips/configs/ar7_defconfig
+++ b/arch/mips/configs/ar7_defconfig
@@ -1,29 +1,27 @@
-CONFIG_AR7=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_TINY_RCU=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_LZMA=y
CONFIG_EXPERT=y
-# CONFIG_KALLSYMS is not set
# CONFIG_ELF_CORE is not set
-# CONFIG_PCSPKR_PLATFORM is not set
+# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_AR7=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC=y
+# CONFIG_SECCOMP is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BSD_DISKLABEL=y
# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -35,7 +33,6 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
-CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
@@ -59,13 +56,9 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_RAW=m
CONFIG_ATM=m
@@ -79,8 +72,6 @@ CONFIG_NET_ACT_POLICE=y
CONFIG_HAMRADIO=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
@@ -91,25 +82,22 @@ CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
CONFIG_NETDEVICES=y
-CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_CPMAC=y
+CONFIG_FIXED_PHY=y
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPPOE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
@@ -131,13 +119,9 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_SUMMARY=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
+# CONFIG_CRYPTO_HW is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
-CONFIG_CRYPTO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
index b8d48038e74f..5dd6b1939e9c 100644
--- a/arch/mips/configs/ath25_defconfig
+++ b/arch/mips/configs/ath25_defconfig
@@ -1,11 +1,6 @@
-CONFIG_ATH25=y
-# CONFIG_COMPACTION is not set
-CONFIG_HZ_100=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
-# CONFIG_FHANDLE is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
@@ -14,16 +9,21 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_FHANDLE is not set
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ATH25=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_SUSPEND is not set
+# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -75,7 +75,6 @@ CONFIG_INPUT=m
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
@@ -104,15 +103,15 @@ CONFIG_SQUASHFS_FILE_DIRECT=y
CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
-CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_FTRACE is not set
# CONFIG_XZ_DEC_X86 is not set
# CONFIG_XZ_DEC_POWERPC is not set
# CONFIG_XZ_DEC_IA64 is not set
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 951c4231bdb8..4e4ec779f182 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -1,30 +1,29 @@
-CONFIG_ATH79=y
-CONFIG_ATH79_MACH_AP121=y
-CONFIG_ATH79_MACH_AP136=y
-CONFIG_ATH79_MACH_AP81=y
-CONFIG_ATH79_MACH_DB120=y
-CONFIG_ATH79_MACH_PB44=y
-CONFIG_ATH79_MACH_UBNT_XM=y
-CONFIG_HZ_100=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
-CONFIG_RD_LZMA=y
-# CONFIG_KALLSYMS is not set
# CONFIG_AIO is not set
+# CONFIG_KALLSYMS is not set
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ATH79=y
+CONFIG_ATH79_MACH_AP121=y
+CONFIG_ATH79_MACH_AP136=y
+CONFIG_ATH79_MACH_AP81=y
+CONFIG_ATH79_MACH_DB120=y
+CONFIG_ATH79_MACH_PB44=y
+CONFIG_ATH79_MACH_UBNT_XM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
-# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -52,12 +51,9 @@ CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_NETDEVICES=y
-# CONFIG_NET_PACKET_ENGINE is not set
-CONFIG_ATH_COMMON=m
CONFIG_ATH9K=m
CONFIG_ATH9K_AHB=y
CONFIG_INPUT=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO_POLLED=m
# CONFIG_INPUT_MOUSE is not set
@@ -65,7 +61,6 @@ CONFIG_INPUT_MISC=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
@@ -98,11 +93,9 @@ CONFIG_LEDS_GPIO=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_CRC_ITU_T=m
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
-CONFIG_CRYPTO=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC_ITU_T=m
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index ba800a892384..249f5285e343 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -1,16 +1,15 @@
-CONFIG_BCM47XX=y
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_UIDGID_STRICT_TYPE_CHECKS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_BCM47XX=y
+CONFIG_PCI=y
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
-CONFIG_PCI=y
-# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -47,8 +46,6 @@ CONFIG_MTD_NAND_BCM47XXNFLASH=y
CONFIG_NETDEVICES=y
CONFIG_B44=y
CONFIG_TIGON3=y
-CONFIG_BGMAC=y
-CONFIG_ATH_CARDS=y
CONFIG_ATH5K=y
CONFIG_B43=y
CONFIG_B43LEGACY=y
@@ -73,6 +70,7 @@ CONFIG_USB_HCD_BCMA=y
CONFIG_USB_HCD_SSB=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_CRC32_SARWATE=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y
@@ -81,4 +79,3 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
-CONFIG_CRC32_SARWATE=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index 131b350f014f..d22fe62adad3 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -1,16 +1,7 @@
-CONFIG_BCM63XX=y
-CONFIG_BCM63XX_CPU_6338=y
-CONFIG_BCM63XX_CPU_6345=y
-CONFIG_BCM63XX_CPU_6348=y
-CONFIG_BCM63XX_CPU_6358=y
-CONFIG_NO_HZ=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
-CONFIG_TINY_RCU=y
-CONFIG_SYSFS_DEPRECATED_V2=y
+CONFIG_NO_HZ=y
CONFIG_EXPERT=y
-# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
@@ -20,12 +11,18 @@ CONFIG_EXPERT=y
# CONFIG_AIO is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
+CONFIG_BCM63XX=y
+CONFIG_BCM63XX_CPU_6338=y
+CONFIG_BCM63XX_CPU_6345=y
+CONFIG_BCM63XX_CPU_6348=y
+CONFIG_BCM63XX_CPU_6358=y
+# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_PCCARD=y
CONFIG_PCMCIA_BCM63XX=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
@@ -37,7 +34,6 @@ CONFIG_INET=y
CONFIG_CFG80211=y
CONFIG_NL80211_TESTMODE=y
CONFIG_MAC80211=y
-CONFIG_MAC80211_LEDS=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
@@ -49,18 +45,16 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
-CONFIG_BCM63XX_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_BCM63XX_ENET=y
+CONFIG_BCM63XX_PHY=y
CONFIG_B43=y
# CONFIG_B43_PHY_LP is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_BCM63XX=y
CONFIG_SERIAL_BCM63XX_CONSOLE=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_VGA_ARB is not set
@@ -68,16 +62,11 @@ CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_OHCI_HCD=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_GPIO=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
# CONFIG_FILE_LOCKING is not set
# CONFIG_DNOTIFY is not set
CONFIG_PROC_KCORE=y
# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_CRYPTO_HW is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index 5e73fe755be6..597bc0aa2653 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -1,45 +1,37 @@
-CONFIG_SIBYTE_BIGSUR=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
-CONFIG_RELAY=y
CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-CONFIG_NET_NS=y
+CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_SIBYTE_BIGSUR=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_HZ_1000=y
CONFIG_PCI=y
CONFIG_PCI_DEBUG=y
-CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -94,7 +86,6 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_DCCP=m
-CONFIG_SCTP_HMAC_SHA1=y
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
@@ -134,20 +125,18 @@ CONFIG_PATA_SIL680=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_SB1250_MAC=y
CONFIG_CHELSIO_T3=m
CONFIG_NETXEN_NIC=m
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
@@ -168,13 +157,10 @@ CONFIG_EXT2_FS=m
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
CONFIG_EXT3_FS=m
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
# CONFIG_PRINT_QUOTA_WARNING is not set
@@ -192,10 +178,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_RPCSEC_GSS_SPKM3=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
@@ -234,13 +217,6 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_SPINLOCK_SLEEP=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_LIST=y
CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -265,7 +241,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -283,3 +258,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
CONFIG_CRC_T10DIF=m
CONFIG_CRC7=m
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_LIST=y
diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig
index a7072a14d396..8a91f0101134 100644
--- a/arch/mips/configs/bmips_be_defconfig
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -1,17 +1,16 @@
-CONFIG_BMIPS_GENERIC=y
-CONFIG_HIGHMEM=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-# CONFIG_SECCOMP is not set
-CONFIG_MIPS_O32_FP64_SUPPORT=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_NO_HZ=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_GZIP=y
CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
+CONFIG_BMIPS_GENERIC=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_O32_FP64_SUPPORT=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
@@ -32,8 +31,6 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PRINTK_TIME=y
-CONFIG_BRCMSTB_GISB_ARB=y
CONFIG_MTD=y
CONFIG_MTD_BCM63XX_PARTS=y
CONFIG_MTD_CFI=y
@@ -50,14 +47,12 @@ CONFIG_USB_USBNET=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_BCM63XX=y
CONFIG_SERIAL_BCM63XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_POWER_SUPPLY=y
CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_BRCMSTB=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
@@ -79,8 +74,9 @@ CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon"
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig
index 47aecb8750e6..39adcca46bb0 100644
--- a/arch/mips/configs/bmips_stb_defconfig
+++ b/arch/mips/configs/bmips_stb_defconfig
@@ -1,10 +1,3 @@
-CONFIG_BMIPS_GENERIC=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_HIGHMEM=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-# CONFIG_SECCOMP is not set
-CONFIG_MIPS_O32_FP64_SUPPORT=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_NO_HZ=y
@@ -12,9 +5,13 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
+CONFIG_BMIPS_GENERIC=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_O32_FP64_SUPPORT=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -23,6 +20,9 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_BMIPS_CPUFREQ=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
@@ -61,7 +61,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_BRCMSTB=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_POWER_SUPPLY=y
# CONFIG_HWMON is not set
@@ -86,9 +85,9 @@ CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
+# CONFIG_CRYPTO_HW is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon"
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/capcella_defconfig b/arch/mips/configs/capcella_defconfig
index bd80b5c852dd..7bf8971af53b 100644
--- a/arch/mips/configs/capcella_defconfig
+++ b/arch/mips/configs/capcella_defconfig
@@ -1,10 +1,9 @@
-CONFIG_MACH_VR41XX=y
-CONFIG_ZAO_CAPCELLA=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_ZAO_CAPCELLA=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -34,18 +33,15 @@ CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_PATA_LEGACY=y
CONFIG_NETDEVICES=y
+CONFIG_8139TOO=y
CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
+CONFIG_CICADA_PHY=m
CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_8139TOO=y
-# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_VITESSE_PHY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -67,9 +63,6 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="mem=32M console=ttyVR0,38400"
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
@@ -77,7 +70,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -95,3 +87,5 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="mem=32M console=ttyVR0,38400"
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index c52d0efacd14..d7abb648b8a0 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -1,13 +1,6 @@
-CONFIG_CAVIUM_OCTEON_SOC=y
-CONFIG_CAVIUM_CN63XXP1=y
-CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=32
-CONFIG_HZ_100=y
-CONFIG_PREEMPT=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -17,14 +10,21 @@ CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_CAVIUM_OCTEON_SOC=y
+CONFIG_CAVIUM_CN63XXP1=y
+CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE=2
+CONFIG_OCTEON_ILM=m
+CONFIG_SMP=y
+CONFIG_NR_CPUS=32
+CONFIG_HZ_100=y
CONFIG_PCI=y
CONFIG_PCI_MSI=y
-CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -42,7 +42,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-CONFIG_IPV6=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FW_LOADER is not set
@@ -52,7 +51,6 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_SLRAM=y
-CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
@@ -74,7 +72,6 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -84,10 +81,9 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -99,9 +95,9 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_TOSHIBA is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_MARVELL_PHY=y
-CONFIG_BROADCOM_PHY=y
CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_MARVELL_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -111,7 +107,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_SERIAL_8250_DW=y
-# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_OCTEON=y
CONFIG_SPI=y
@@ -159,10 +154,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_CBC=y
@@ -172,4 +163,7 @@ CONFIG_CRYPTO_SHA1_OCTEON=m
CONFIG_CRYPTO_SHA256_OCTEON=m
CONFIG_CRYPTO_SHA512_OCTEON=m
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 030ff9c205fb..412800d5d7e0 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -1,18 +1,10 @@
-CONFIG_MACH_INGENIC=y
-CONFIG_JZ4780_CI20=y
-CONFIG_HIGHMEM=y
-# CONFIG_COMPACTION is not set
-CONFIG_CMA=y
-CONFIG_HZ_100=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_XZ=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -20,7 +12,6 @@ CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
-CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_NAMESPACES=y
@@ -32,8 +23,15 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_MACH_INGENIC=y
+CONFIG_JZ4780_CI20=y
+CONFIG_HIGHMEM=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -59,7 +57,6 @@ CONFIG_MTD_UBI=y
CONFIG_MTD_UBI_FASTMAP=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
CONFIG_DM9000=y
CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
@@ -76,13 +73,11 @@ CONFIG_DM9000_FORCE_SIMPLE_PHY_POLL=y
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_LEGACY_PTY_COUNT=2
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=5
@@ -95,7 +90,6 @@ CONFIG_I2C_JZ4780=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_INGENIC=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_JZ4740_WDT=y
@@ -166,9 +160,6 @@ CONFIG_DEBUG_INFO=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_PANIC_TIMEOUT=10
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/mips/configs/cobalt_defconfig b/arch/mips/configs/cobalt_defconfig
index a9066f300665..20c62841827f 100644
--- a/arch/mips/configs/cobalt_defconfig
+++ b/arch/mips/configs/cobalt_defconfig
@@ -1,9 +1,8 @@
-CONFIG_MIPS_COBALT=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
+CONFIG_MIPS_COBALT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -17,7 +16,6 @@ CONFIG_INET=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
-CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
@@ -28,11 +26,9 @@ CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
CONFIG_NET_TULIP=y
CONFIG_DE2104X=y
CONFIG_TULIP=y
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -72,10 +68,8 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
-CONFIG_CRC16=y
CONFIG_LIBCRC32C=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 0108bb9f1e37..34633b7611cb 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -1,41 +1,36 @@
-CONFIG_MIPS_ALCHEMY=y
-CONFIG_MIPS_DB1XXX=y
-CONFIG_CMA=y
-CONFIG_CMA_DEBUG=y
-CONFIG_HZ_100=y
CONFIG_LOCALVERSION="-db1xxx"
CONFIG_KERNEL_XZ=y
CONFIG_DEFAULT_HOSTNAME="db1xxx"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_BLK_DEV_BSGLIB=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_DEFAULT_NOOP=y
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_HZ_100=y
CONFIG_PCI=y
-CONFIG_PCI_REALLOC_ENABLE_AUTO=y
CONFIG_PCCARD=y
CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
-CONFIG_PM=y
+CONFIG_FIRMWARE_MEMMAP=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_DEFAULT_NOOP=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
@@ -78,13 +73,6 @@ CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_BRIDGE=y
CONFIG_NETLINK_DIAG=y
-CONFIG_IRDA=y
-CONFIG_IRLAN=y
-CONFIG_IRCOMM=y
-CONFIG_IRDA_ULTRA=y
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-CONFIG_AU1000_FIR=y
CONFIG_BT=y
CONFIG_BT_RFCOMM=y
CONFIG_BT_RFCOMM_TTY=y
@@ -116,7 +104,6 @@ CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_ATA=y
CONFIG_PATA_HPT37X=y
CONFIG_PATA_HPT3X2N=y
@@ -155,9 +142,9 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=y
CONFIG_SND_HRTIMER=y
CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=y
CONFIG_SND_AC97_POWER_SAVE=y
CONFIG_SND_AC97_POWER_SAVE_DEFAULT=1
CONFIG_SND_SOC=y
@@ -180,7 +167,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
-CONFIG_MMC_CLKGATE=y
CONFIG_SDIO_UART=y
CONFIG_MMC_AU1X=y
CONFIG_NEW_LEDS=y
@@ -188,12 +174,13 @@ CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AU1XXX=y
-CONFIG_FIRMWARE_MEMMAP=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_XFS_POSIX_ACL=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
CONFIG_FANOTIFY=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
@@ -211,8 +198,6 @@ CONFIG_SQUASHFS_FILE_DIRECT=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
-CONFIG_F2FS_FS=y
-CONFIG_F2FS_FS_SECURITY=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
@@ -232,7 +217,6 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_2=y
CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_SECURITYFS=y
CONFIG_CRYPTO_USER=y
CONFIG_CRYPTO_CRYPTD=y
@@ -241,3 +225,4 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=y
CONFIG_CRC32_SLICEBY4=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
new file mode 100644
index 000000000000..85f1955b4b00
--- /dev/null
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -0,0 +1,227 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EXPERT=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_DECSTATION=y
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_TC=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
+CONFIG_VLAN_8021Q=m
+CONFIG_DECNET=m
+CONFIG_DECNET_ROUTER=y
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_MS02NV=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_DECLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_FDDI=y
+CONFIG_DEFZA=y
+CONFIG_DEFXX=y
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_LKKBD=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_VSXXXAA=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_TGA=y
+CONFIG_FB_PMAG_AA=y
+CONFIG_FB_PMAG_BA=y
+CONFIG_FB_PMAGB_B=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE_COLUMNS=160
+CONFIG_DUMMY_CONSOLE_ROWS=64
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index e149f78901f8..0c86ed86266a 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -1,17 +1,26 @@
-CONFIG_MACH_DECSTATION=y
-CONFIG_CPU_R3000=y
CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=15
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MACH_DECSTATION=y
+CONFIG_CPU_R3000=y
+CONFIG_TC=y
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_TC=y
-CONFIG_PM=y
+# CONFIG_LBDAF is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -39,37 +48,92 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_CONNECTOR=m
+CONFIG_DECNET=m
+CONFIG_DECNET_ROUTER=y
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_MS02NV=m
CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_SPI_ATTRS=m
-CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
CONFIG_DECLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_FDDI=y
-CONFIG_DEFXX=m
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_SERIAL_DZ is not set
+CONFIG_DEFZA=y
+CONFIG_DEFXX=y
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_LKKBD=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_VSXXXAA=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
+CONFIG_FB_TGA=y
+CONFIG_FB_PMAG_AA=y
CONFIG_FB_PMAG_BA=y
CONFIG_FB_PMAGB_B=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE_COLUMNS=160
+CONFIG_DUMMY_CONSOLE_ROWS=64
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -77,30 +141,60 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_FUSE_FS=m
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=y
CONFIG_UFS_FS=y
CONFIG_UFS_FS_WRITE=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_SWAP=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_OSF_PARTITION=y
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_ECB=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -112,6 +206,19 @@ CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
new file mode 100644
index 000000000000..0e54ab2680ce
--- /dev/null
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -0,0 +1,224 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_EXPERT=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_SYSCALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MACH_DECSTATION=y
+CONFIG_TC=y
+# CONFIG_SUSPEND is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_LBDAF is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+# CONFIG_EFI_PARTITION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=m
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_MIP6=m
+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_IP_SCTP=m
+CONFIG_VLAN_8021Q=m
+CONFIG_DECNET=m
+CONFIG_DECNET_ROUTER=y
+# CONFIG_WIRELESS is not set
+# CONFIG_UEVENT_HELPER is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=m
+CONFIG_MTD_BLOCK=m
+CONFIG_MTD_BLOCK_RO=m
+CONFIG_MTD_MS02NV=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_RAM=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+CONFIG_DECLANCE=y
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_FDDI=y
+CONFIG_DEFZA=y
+CONFIG_DEFXX=y
+# CONFIG_WLAN is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_LKKBD=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_MOUSE_VSXXXAA=y
+# CONFIG_SERIAL_DZ is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_TGA=y
+CONFIG_FB_PMAG_AA=y
+CONFIG_FB_PMAG_BA=y
+CONFIG_FB_PMAGB_B=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE_COLUMNS=160
+CONFIG_DUMMY_CONSOLE_ROWS=64
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+# CONFIG_MANDATORY_FILE_LOCKING is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_CHILDREN=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_FS_WRITE=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_SWAP=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_UTF8=m
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_CMAC=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_CRCT10DIF=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_DRBG_HASH=y
+CONFIG_CRYPTO_DRBG_CTR=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/e55_defconfig b/arch/mips/configs/e55_defconfig
index c3ac0209457c..fd82b858a8f0 100644
--- a/arch/mips/configs/e55_defconfig
+++ b/arch/mips/configs/e55_defconfig
@@ -1,11 +1,9 @@
-CONFIG_MACH_VR41XX=y
-CONFIG_CASIO_E55=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_CASIO_E55=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -16,7 +14,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_PATA_LEGACY=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -38,4 +35,3 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyVR0,19200 ide0=0x1f0,0x3f6,40 mem=8M"
-# CONFIG_CRC32 is not set
diff --git a/arch/mips/configs/fuloong2e_defconfig b/arch/mips/configs/fuloong2e_defconfig
index 499f51498ecb..8bcb61a6ec15 100644
--- a/arch/mips/configs/fuloong2e_defconfig
+++ b/arch/mips/configs/fuloong2e_defconfig
@@ -1,39 +1,33 @@
-CONFIG_MACH_LOONGSON64=y
-CONFIG_64BIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_LOCALVERSION="-fuloong2e"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MACH_LOONGSON64=y
CONFIG_PCI=y
-CONFIG_BINFMT_MISC=y
-CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
-CONFIG_PM=y
# CONFIG_SUSPEND is not set
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION="/dev/sda3"
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -42,14 +36,11 @@ CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -78,13 +69,11 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -120,32 +109,30 @@ CONFIG_PATA_VIA=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=y
CONFIG_NETDEVICES=y
+CONFIG_NET_FC=y
CONFIG_MACVLAN=m
CONFIG_VETH=m
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
+CONFIG_CICADA_PHY=m
CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_8139TOO=y
-# CONFIG_8139TOO_PIO is not set
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
CONFIG_SLIP=m
CONFIG_SLIP_COMPRESSED=y
CONFIG_SLIP_SMART=y
CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NET_FC=y
CONFIG_INPUT_FF_MEMLESS=y
CONFIG_MOUSE_SERIAL=y
CONFIG_SERIAL_8250=y
@@ -153,7 +140,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
CONFIG_HW_RANDOM=y
-CONFIG_RTC=y
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_VIAPRO=m
@@ -167,9 +153,6 @@ CONFIG_SOUND=y
CONFIG_SND=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_VIA82XX=m
CONFIG_HIDRAW=y
# CONFIG_USB_HID is not set
@@ -183,7 +166,6 @@ CONFIG_USB_WUSB_CBAF=m
CONFIG_USB_C67X00_HCD=m
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_ISP1760=m
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_R8A66597_HCD=m
@@ -194,16 +176,13 @@ CONFIG_USB_TMC=m
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE_ONETOUCH=y
CONFIG_USB_STORAGE_CYPRESS_ATACB=y
+CONFIG_USB_ISP1760=m
CONFIG_USB_SEVSEG=m
CONFIG_USB_ISIGHTFW=m
CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XIP=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=m
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_REISERFS_FS=m
@@ -223,33 +202,22 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_OMFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
-CONFIG_SMB_NLS_REMOTE="cp936"
CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CIFS_DEBUG2=y
-CONFIG_CIFS_EXPERIMENTAL=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_DEBUG_FS=y
-CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_PCBC=m
@@ -266,3 +234,4 @@ CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC7=m
+# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/mips/configs/gcw0_defconfig b/arch/mips/configs/gcw0_defconfig
index 99ac1fa3b35f..a3e3eb3c5a8b 100644
--- a/arch/mips/configs/gcw0_defconfig
+++ b/arch/mips/configs/gcw0_defconfig
@@ -1,14 +1,14 @@
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_EMBEDDED=y
CONFIG_MACH_INGENIC=y
CONFIG_JZ4770_GCW0=y
CONFIG_HIGHMEM=y
-# CONFIG_BOUNCE is not set
-CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_SECCOMP is not set
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_EMBEDDED=y
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_SUSPEND is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BOUNCE is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/mips/configs/generic_defconfig b/arch/mips/configs/generic_defconfig
index 684c9dcba126..7c138dab87df 100644
--- a/arch/mips/configs/generic_defconfig
+++ b/arch/mips/configs/generic_defconfig
@@ -1,10 +1,3 @@
-CONFIG_MIPS_GENERIC=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_MIPS_CPS=y
-CONFIG_CPU_HAS_MSA=y
-CONFIG_HIGHMEM=y
-CONFIG_NR_CPUS=16
-CONFIG_MIPS_O32_FP64_SUPPORT=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ_IDLE=y
CONFIG_IKCONFIG=y
@@ -28,7 +21,11 @@ CONFIG_USERFAULTFD=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_MIPS_CPS=y
+CONFIG_HIGHMEM=y
+CONFIG_NR_CPUS=16
+CONFIG_MIPS_O32_FP64_SUPPORT=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_TRIM_UNUSED_KSYMS=y
@@ -43,7 +40,6 @@ CONFIG_NETFILTER=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_SCSI=y
-# CONFIG_SERIO is not set
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
CONFIG_MFD_SYSCON=y
@@ -79,6 +75,12 @@ CONFIG_NFS_V4=y
CONFIG_NFS_V4_1=y
CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_REDUCED=y
@@ -87,9 +89,3 @@ CONFIG_DEBUG_FS=y
# CONFIG_FTRACE is not set
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon"
-# CONFIG_XZ_DEC_X86 is not set
-# CONFIG_XZ_DEC_POWERPC is not set
-# CONFIG_XZ_DEC_IA64 is not set
-# CONFIG_XZ_DEC_ARM is not set
-# CONFIG_XZ_DEC_ARMTHUMB is not set
-# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 55438fc9991e..9d9af5f923c3 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -1,22 +1,21 @@
-CONFIG_MIPS_ALCHEMY=y
-CONFIG_MIPS_GPR=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_MIPS_GPR=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=m
CONFIG_NET=y
CONFIG_PACKET=y
@@ -36,7 +35,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
@@ -59,13 +57,11 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -93,7 +89,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_IP_DCCP=m
CONFIG_IP_SCTP=m
CONFIG_TIPC=m
@@ -106,14 +101,12 @@ CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_DECNET=m
CONFIG_LLC2=m
-CONFIG_IPX=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_X25=m
CONFIG_LAPB=m
-CONFIG_WAN_ROUTER=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
@@ -173,26 +166,50 @@ CONFIG_TIFM_CORE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_MARVELL_PHY=m
+CONFIG_NET_FC=y
+CONFIG_NETCONSOLE=m
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+CONFIG_ATM_FIRESTREAM=m
+CONFIG_ATM_ZATM=m
+CONFIG_ATM_NICSTAR=m
+CONFIG_ATM_IDT77252=m
+CONFIG_ATM_AMBASSADOR=m
+CONFIG_ATM_HORIZON=m
+CONFIG_ATM_IA=m
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_MIPS_AU1X00_ENET=y
+CONFIG_CICADA_PHY=m
CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MIPS_AU1X00_ENET=y
-CONFIG_ATH_COMMON=y
+CONFIG_VITESSE_PHY=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
CONFIG_ATH_DEBUG=y
CONFIG_ATH5K=y
CONFIG_ATH5K_DEBUG=y
@@ -212,41 +229,8 @@ CONFIG_DSCC4=m
CONFIG_DSCC4_PCISYNC=y
CONFIG_DSCC4_PCI_RST=y
CONFIG_DLCI=m
-CONFIG_WAN_ROUTER_DRIVERS=m
-CONFIG_CYCLADES_SYNC=m
-CONFIG_CYCLOMX_X25=y
CONFIG_LAPBETHER=m
CONFIG_X25_ASY=m
-CONFIG_ATM_TCP=m
-CONFIG_ATM_LANAI=m
-CONFIG_ATM_ENI=m
-CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
-CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_IDT77252=m
-CONFIG_ATM_AMBASSADOR=m
-CONFIG_ATM_HORIZON=m
-CONFIG_ATM_IA=m
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NET_FC=y
-CONFIG_NETCONSOLE=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -258,7 +242,6 @@ CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_GPIO=y
-CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_LM83=y
CONFIG_WATCHDOG=y
@@ -283,7 +266,6 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=m
CONFIG_USB_SERIAL=y
-CONFIG_USB_EZUSB=y
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_SIERRAWIRELESS=y
CONFIG_LEDS_GPIO=y
@@ -304,26 +286,16 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_JFFS2_RUBIN=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs rw ip=auto"
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -336,3 +308,7 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs rw ip=auto"
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 7ddfb4ef9479..ff40fbc2f439 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -1,35 +1,28 @@
-CONFIG_SGI_IP22=y
-CONFIG_ARC_CONSOLE=y
-CONFIG_CPU_R5000=y
+CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_RELAY=y
CONFIG_NAMESPACES=y
-CONFIG_UTS_NS=y
-CONFIG_IPC_NS=y
CONFIG_USER_NS=y
-CONFIG_PID_NS=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_RELAY=y
CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
-# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_SGI_IP22=y
+CONFIG_ARC_CONSOLE=y
+CONFIG_CPU_R5000=y
+CONFIG_HZ_1000=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
-# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -62,12 +55,9 @@ CONFIG_IPV6_MROUTE=y
CONFIG_IPV6_PIMSM_V2=y
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -77,7 +67,6 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
@@ -136,21 +125,12 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -159,8 +139,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -222,23 +200,22 @@ CONFIG_SCSI_SPI_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_SGIWD93_SCSI=y
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
-CONFIG_MACVLAN=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
+CONFIG_MACVLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
+CONFIG_SGISEEQ=y
+CONFIG_SMC91X=m
+CONFIG_MDIO_BITBANG=m
CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
+CONFIG_CICADA_PHY=m
CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_REALTEK_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=m
-CONFIG_SGISEEQ=y
CONFIG_HOSTAP=m
CONFIG_INPUT_MOUSEDEV=m
CONFIG_MOUSE_PS2=m
@@ -261,7 +238,6 @@ CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_HIDRAW=y
-CONFIG_HID_PID=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
CONFIG_RTC_DRV_DS1286=y
@@ -269,9 +245,6 @@ CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=m
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_QUOTA=y
@@ -294,18 +267,13 @@ CONFIG_MINIX_FS=m
CONFIG_OMFS_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_NFSD_V3_ACL=y
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_SMB_FS=m
-CONFIG_SMB_NLS_DEFAULT=y
CONFIG_CIFS=m
CONFIG_CIFS_UPCALL=y
CONFIG_CODA_FS=m
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
@@ -344,13 +312,8 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_KEYS=y
-CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
@@ -358,13 +321,10 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -382,4 +342,4 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_T10DIF=m
-CONFIG_CRC32=m
+CONFIG_DEBUG_MEMORY_INIT=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 91a9c13e2c82..81c47e18131b 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -1,32 +1,28 @@
-CONFIG_SGI_IP27=y
-CONFIG_NUMA=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
-CONFIG_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
CONFIG_RELAY=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_SGI_IP27=y
+CONFIG_NUMA=y
+CONFIG_SMP=y
+CONFIG_HZ_1000=y
CONFIG_PCI=y
-CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
-CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_PM=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -41,7 +37,6 @@ CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -95,12 +90,10 @@ CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_CFG80211=m
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_SCSI=y
@@ -115,7 +108,6 @@ CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_SPI_ATTRS=y
CONFIG_SCSI_FC_ATTRS=y
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
CONFIG_SCSI_CXGB3_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
CONFIG_BE2ISCSI=m
@@ -160,69 +152,56 @@ CONFIG_DM_UEVENT=y
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_VETH=m
-CONFIG_PHYLIB=y
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_REALTEK_PHY=m
-CONFIG_NATIONAL_PHY=m
-CONFIG_STE10XP=m
-CONFIG_LSI_ET1011C_PHY=m
-CONFIG_MDIO_BITBANG=m
-CONFIG_NET_ETHERNET=y
-CONFIG_AX88796=m
-CONFIG_AX88796_93CX6=y
-CONFIG_SGI_IOC3_ETH=y
-CONFIG_SMC91X=m
-CONFIG_ETHOC=m
-CONFIG_SMSC911X=m
-CONFIG_DNET=m
-CONFIG_B44=m
-CONFIG_KS8851_MLL=m
CONFIG_ATL2=m
-CONFIG_E1000E=m
-CONFIG_IP1000=m
-CONFIG_IGB=m
-CONFIG_IGBVF=m
-CONFIG_VIA_VELOCITY=m
-CONFIG_QLA3XXX=m
CONFIG_ATL1E=m
CONFIG_ATL1C=m
-CONFIG_JME=m
+CONFIG_B44=m
+CONFIG_BNX2X=m
CONFIG_ENIC=m
+CONFIG_DNET=m
+CONFIG_BE2NET=m
+CONFIG_E1000E=m
+CONFIG_IGB=m
+CONFIG_IGBVF=m
CONFIG_IXGBE=m
+CONFIG_JME=m
+CONFIG_MLX4_EN=m
+# CONFIG_MLX4_DEBUG is not set
+CONFIG_KS8851_MLL=m
CONFIG_VXGE=m
+CONFIG_AX88796=m
+CONFIG_AX88796_93CX6=y
+CONFIG_ETHOC=m
+CONFIG_QLA3XXX=m
CONFIG_NETXEN_NIC=m
+CONFIG_SFC=m
+CONFIG_SGI_IOC3_ETH=y
+CONFIG_SMC91X=m
+CONFIG_SMSC911X=m
CONFIG_NIU=m
-CONFIG_MLX4_EN=m
-# CONFIG_MLX4_DEBUG is not set
CONFIG_TEHUTI=m
-CONFIG_BNX2X=m
-CONFIG_SFC=m
-CONFIG_BE2NET=m
-CONFIG_LIBERTAS_THINFIRM=m
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
-CONFIG_RTL8180=m
+CONFIG_VIA_VELOCITY=m
+CONFIG_PHYLIB=y
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_LSI_ET1011C_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_NATIONAL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_STE10XP=m
+CONFIG_VITESSE_PHY=m
CONFIG_ADM8211=m
-CONFIG_MWL8K=m
-CONFIG_ATH_COMMON=m
CONFIG_ATH5K=m
CONFIG_ATH9K=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
CONFIG_B43=m
CONFIG_B43LEGACY=m
# CONFIG_B43LEGACY_DEBUG is not set
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOSTAP_PCI=m
CONFIG_IPW2100=m
CONFIG_IPW2100_MONITOR=y
CONFIG_IPW2100_DEBUG=y
@@ -231,12 +210,14 @@ CONFIG_IPW2200_MONITOR=y
CONFIG_IPW2200_PROMISCUOUS=y
CONFIG_IPW2200_QOS=y
CONFIG_IPW2200_DEBUG=y
-CONFIG_IWLWIFI=m
-CONFIG_IWLAGN=m
-CONFIG_IWL4965=y
-CONFIG_IWL5000=y
+CONFIG_IWL4965=m
CONFIG_IWL3945=m
-CONFIG_LIBERTAS=m
+CONFIG_IWLWIFI=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
CONFIG_HERMES=m
# CONFIG_HERMES_CACHE_FW_ON_INIT is not set
CONFIG_PLX_HERMES=m
@@ -244,13 +225,18 @@ CONFIG_TMD_HERMES=m
CONFIG_NORTEL_HERMES=m
CONFIG_P54_COMMON=m
CONFIG_P54_PCI=m
+CONFIG_PRISM54=m
+CONFIG_LIBERTAS=m
+CONFIG_LIBERTAS_THINFIRM=m
+CONFIG_MWL8K=m
CONFIG_RT2X00=m
CONFIG_RT2400PCI=m
CONFIG_RT2500PCI=m
CONFIG_RT61PCI=m
CONFIG_RT2800PCI=m
-CONFIG_WL12XX=m
+CONFIG_RTL8180=m
CONFIG_WL1251=m
+CONFIG_WL12XX=m
# CONFIG_INPUT is not set
CONFIG_SERIO_LIBPS2=m
CONFIG_SERIO_RAW=m
@@ -262,7 +248,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_HW_RANDOM_TIMERIOMEM=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_ALI1535=m
@@ -285,7 +270,6 @@ CONFIG_I2C_SIMTEC=m
CONFIG_I2C_PARPORT_LIGHT=m
CONFIG_I2C_TAOS_EVM=m
CONFIG_I2C_STUB=m
-CONFIG_PPS=m
# CONFIG_HWMON is not set
CONFIG_THERMAL=m
CONFIG_MFD_PCF50633=m
@@ -310,12 +294,8 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=m
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -334,17 +314,8 @@ CONFIG_SQUASHFS=m
CONFIG_OMFS_FS=m
CONFIG_EXOFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_RPCSEC_GSS_KRB5=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_DLM=m
-CONFIG_KEYS=y
CONFIG_SECURITYFS=y
-CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -357,7 +328,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -374,5 +344,4 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRYPTO_DEV_HIFN_795X=m
CONFIG_CRC_T10DIF=m
diff --git a/arch/mips/configs/ip28_defconfig b/arch/mips/configs/ip28_defconfig
index d0a4c2cfacf8..0921ef38e9fb 100644
--- a/arch/mips/configs/ip28_defconfig
+++ b/arch/mips/configs/ip28_defconfig
@@ -1,26 +1,24 @@
-CONFIG_SGI_IP28=y
-CONFIG_ARC_CONSOLE=y
-CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_SYSVIPC=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
+CONFIG_SGI_IP28=y
+CONFIG_ARC_CONSOLE=y
+CONFIG_EISA=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+# CONFIG_SUSPEND is not set
+CONFIG_PM=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_EISA=y
-CONFIG_MIPS32_COMPAT=y
-CONFIG_MIPS32_O32=y
-CONFIG_MIPS32_N32=y
-CONFIG_PM=y
-# CONFIG_SUSPEND is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,7 +41,6 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SGIWD93_SCSI=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
-CONFIG_NET_ETHERNET=y
CONFIG_SGISEEQ=y
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_SYNAPTICS is not set
@@ -65,11 +62,8 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_CRYPTO_MANAGER=y
# CONFIG_CRYPTO_HW is not set
-# CONFIG_CRC32 is not set
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/ip32_defconfig b/arch/mips/configs/ip32_defconfig
index ebff297328ae..8f6d8af2e3c0 100644
--- a/arch/mips/configs/ip32_defconfig
+++ b/arch/mips/configs/ip32_defconfig
@@ -1,26 +1,25 @@
-CONFIG_SGI_IP32=y
-# CONFIG_SECCOMP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_BSD_PROCESS_ACCT=y
CONFIG_AUDIT=y
+CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_RELAY=y
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
+CONFIG_SGI_IP32=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
CONFIG_BINFMT_MISC=y
-CONFIG_MIPS32_COMPAT=y
-CONFIG_MIPS32_O32=y
-CONFIG_MIPS32_N32=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -33,7 +32,6 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
@@ -56,24 +54,20 @@ CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_SAS_LIBSAS=y
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
CONFIG_SCSI_AIC7XXX=y
CONFIG_AIC7XXX_RESET_DELAY_MS=15000
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_SGI_O2MACE_ETH=y
+CONFIG_DUMMY=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
CONFIG_TULIP_MMIO=y
+CONFIG_SGI_O2MACE_ETH=y
CONFIG_INPUT_EVDEV=m
CONFIG_SERIO_MACEPS2=y
CONFIG_SERIO_RAW=y
@@ -87,9 +81,6 @@ CONFIG_FIRMWARE_EDID=y
CONFIG_FB_GBE=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -100,7 +91,6 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_INTF_SYSFS is not set
# CONFIG_RTC_INTF_PROC is not set
CONFIG_RTC_DRV_DS1685_FAMILY=y
-CONFIG_RTC_DRV_DS1685=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -124,13 +114,10 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CONFIGFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_CIFS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_SGI_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
@@ -170,7 +157,6 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_MAGIC_SYSRQ=y
CONFIG_KEYS=y
CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_CBC=y
@@ -186,7 +172,6 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_TGR192=y
CONFIG_CRYPTO_WP512=y
-CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ANUBIS=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_BLOWFISH=y
@@ -200,7 +185,9 @@ CONFIG_CRYPTO_SERPENT=y
CONFIG_CRYPTO_TEA=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_DEFLATE=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=y
CONFIG_CRC_T10DIF=y
CONFIG_LIBCRC32C=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index 9ad1c94376c8..328d4dfeb4cb 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -1,22 +1,20 @@
-CONFIG_MACH_JAZZ=y
-CONFIG_OLIVETTI_M700=y
-CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SLAB=y
+CONFIG_MACH_JAZZ=y
+CONFIG_OLIVETTI_M700=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=m
CONFIG_UNIX=y
@@ -25,8 +23,6 @@ CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -41,7 +37,6 @@ CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -83,20 +78,12 @@ CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -105,7 +92,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -140,7 +126,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE=m
CONFIG_DECNET=m
CONFIG_NET_SCHED=y
@@ -230,24 +215,20 @@ CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
+CONFIG_MIPS_JAZZ_SONIC=y
+CONFIG_NE2000=m
CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
+CONFIG_CICADA_PHY=m
CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_MIPS_JAZZ_SONIC=y
-CONFIG_NET_ISA=y
-CONFIG_NE2000=m
-CONFIG_NET_PCI=y
+CONFIG_VITESSE_PHY=m
CONFIG_PLIP=m
CONFIG_INPUT_FF_MEMLESS=m
CONFIG_SERIO_PARKBD=m
@@ -297,25 +278,11 @@ CONFIG_ROMFS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_SMALLDOS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_AFS_FS=m
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
@@ -354,21 +321,14 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST6=m
diff --git a/arch/mips/configs/jmr3927_defconfig b/arch/mips/configs/jmr3927_defconfig
index af12281a5c33..24b96faf9b4e 100644
--- a/arch/mips/configs/jmr3927_defconfig
+++ b/arch/mips/configs/jmr3927_defconfig
@@ -1,13 +1,10 @@
-CONFIG_MACH_TX39XX=y
-CONFIG_TOSHIBA_JMR3927=y
-# CONFIG_SECCOMP is not set
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
+CONFIG_MACH_TX39XX=y
+CONFIG_TOSHIBA_JMR3927=y
+# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -27,16 +24,14 @@ CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_TC35815=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_UNIX98_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_TXX9_STDSERIAL=y
-# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
diff --git a/arch/mips/configs/lasat_defconfig b/arch/mips/configs/lasat_defconfig
index 947a35c7c46c..c66ca3785655 100644
--- a/arch/mips/configs/lasat_defconfig
+++ b/arch/mips/configs/lasat_defconfig
@@ -1,25 +1,23 @@
-CONFIG_LASAT=y
-CONFIG_PICVUE=y
-CONFIG_PICVUE_PROC=y
-CONFIG_DS1603=y
-CONFIG_LASAT_SYSCTL=y
-CONFIG_HZ_1000=y
-# CONFIG_SECCOMP is not set
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_HOTPLUG is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
+# CONFIG_KALLSYMS is not set
CONFIG_SLAB=y
+CONFIG_LASAT=y
+CONFIG_PICVUE=y
+CONFIG_PICVUE_PROC=y
+CONFIG_DS1603=y
+CONFIG_LASAT_SYSCTL=y
+CONFIG_HZ_1000=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -39,10 +37,7 @@ CONFIG_PATA_CMD64X=y
CONFIG_ATA_GENERIC=y
CONFIG_PATA_LEGACY=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_PCNET32=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_SERIO_RAW=y
@@ -55,7 +50,6 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index 02be95c1b712..300127b0f5b7 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -1,48 +1,33 @@
-CONFIG_MACH_LOONGSON64=y
-CONFIG_LEMOTE_MACH2F=y
-CONFIG_CS5536_MFGPT=y
-CONFIG_64BIT=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
-CONFIG_KEXEC=y
-# CONFIG_SECCOMP is not set
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_PROFILING=y
+CONFIG_MACH_LOONGSON64=y
+CONFIG_LEMOTE_MACH2F=y
+CONFIG_KEXEC=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_STD_PARTITION="/dev/hda3"
CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_IOSCHED_DEADLINE=m
-CONFIG_PCI=y
CONFIG_BINFMT_MISC=m
-CONFIG_MIPS32_COMPAT=y
-CONFIG_MIPS32_O32=y
-CONFIG_MIPS32_N32=y
-CONFIG_PM=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_STD_PARTITION="/dev/hda3"
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_STAT=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=m
-CONFIG_CPU_FREQ_GOV_USERSPACE=m
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
-CONFIG_LOONGSON2_CPUFREQ=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -55,11 +40,9 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
@@ -76,7 +59,6 @@ CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_IPX=m
CONFIG_NET_SCHED=y
CONFIG_NET_EMATCH=y
CONFIG_NET_CLS_ACT=y
@@ -91,8 +73,6 @@ CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIVHCI=m
CONFIG_CFG80211=m
-CONFIG_LIB80211=m
-CONFIG_LIB80211_DEBUG=y
CONFIG_MAC80211=m
CONFIG_MAC80211_LEDS=y
CONFIG_RFKILL=m
@@ -130,18 +110,14 @@ CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
CONFIG_TUN=m
CONFIG_VETH=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_8139TOO=y
# CONFIG_8139TOO_PIO is not set
CONFIG_R8169=y
-CONFIG_R8169_VLAN=y
CONFIG_USB_USBNET=m
CONFIG_USB_NET_CDC_EEM=m
-CONFIG_NETCONSOLE=m
-CONFIG_NETCONSOLE_DYNAMIC=y
CONFIG_INPUT_POLLDEV=m
CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2_ALPS is not set
@@ -149,6 +125,7 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2_TRACKPOINT is not set
CONFIG_MOUSE_APPLETOUCH=m
# CONFIG_SERIO_SERPORT is not set
+CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=m
# CONFIG_SERIAL_8250_PCI is not set
@@ -156,50 +133,10 @@ CONFIG_SERIAL_8250_NR_UARTS=16
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_FOURPORT=y
-CONFIG_LEGACY_PTY_COUNT=16
CONFIG_HW_RANDOM=y
-CONFIG_RTC=y
CONFIG_GPIO_LOONGSON=y
CONFIG_THERMAL=y
CONFIG_MEDIA_SUPPORT=m
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_HELPER_CHIPS_AUTO=y
-CONFIG_VIDEO_VIVI=m
-CONFIG_USB_VIDEO_CLASS=m
-CONFIG_USB_M5602=m
-CONFIG_USB_STV06XX=m
-CONFIG_USB_GSPCA_CONEX=m
-CONFIG_USB_GSPCA_ETOMS=m
-CONFIG_USB_GSPCA_FINEPIX=m
-CONFIG_USB_GSPCA_MARS=m
-CONFIG_USB_GSPCA_MR97310A=m
-CONFIG_USB_GSPCA_OV519=m
-CONFIG_USB_GSPCA_OV534=m
-CONFIG_USB_GSPCA_PAC207=m
-CONFIG_USB_GSPCA_PAC7311=m
-CONFIG_USB_GSPCA_SN9C20X=m
-CONFIG_USB_GSPCA_SONIXB=m
-CONFIG_USB_GSPCA_SONIXJ=m
-CONFIG_USB_GSPCA_SPCA500=m
-CONFIG_USB_GSPCA_SPCA501=m
-CONFIG_USB_GSPCA_SPCA505=m
-CONFIG_USB_GSPCA_SPCA506=m
-CONFIG_USB_GSPCA_SPCA508=m
-CONFIG_USB_GSPCA_SPCA561=m
-CONFIG_USB_GSPCA_SQ905=m
-CONFIG_USB_GSPCA_SQ905C=m
-CONFIG_USB_GSPCA_STK014=m
-CONFIG_USB_GSPCA_SUNPLUS=m
-CONFIG_USB_GSPCA_T613=m
-CONFIG_USB_GSPCA_TV8532=m
-CONFIG_USB_GSPCA_VC032X=m
-CONFIG_USB_GSPCA_ZC3XX=m
-CONFIG_USB_ET61X251=m
-CONFIG_USB_SN9C102=m
-CONFIG_USB_ZR364XX=m
-CONFIG_USB_STKWEBCAM=m
-CONFIG_USB_S2255=m
-# CONFIG_RADIO_ADAPTERS is not set
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
@@ -214,27 +151,14 @@ CONFIG_BACKLIGHT_GENERIC=m
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_6x11=y
-CONFIG_FONT_7x14=y
-CONFIG_FONT_PEARL_8x8=y
-CONFIG_FONT_ACORN_8x8=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_FONT_SUN8x16=y
-CONFIG_FONT_SUN12x22=y
-CONFIG_FONT_10x18=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
CONFIG_SOUND=m
CONFIG_SND=m
+CONFIG_SND_HRTIMER=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
-CONFIG_SND_HRTIMER=m
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_SERIAL_U16550=m
@@ -247,7 +171,6 @@ CONFIG_SND_USB_AUDIO=m
CONFIG_SND_USB_CAIAQ=m
CONFIG_SND_USB_CAIAQ_INPUT=y
CONFIG_HIDRAW=y
-CONFIG_USB_HIDDEV=y
CONFIG_HID_A4TECH=m
CONFIG_HID_APPLE=m
CONFIG_HID_BELKIN=m
@@ -283,6 +206,7 @@ CONFIG_THRUSTMASTER_FF=y
CONFIG_HID_WACOM=m
CONFIG_HID_ZEROPLUS=m
CONFIG_ZEROPLUS_FF=y
+CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_DYNAMIC_MINORS=y
CONFIG_USB_OTG_WHITELIST=y
@@ -292,8 +216,6 @@ CONFIG_USB_EHCI_ROOT_HUB_TT=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=m
-CONFIG_USB_WHCI_HCD=m
-CONFIG_USB_HWA_HCD=m
CONFIG_USB_ACM=m
CONFIG_USB_PRINTER=m
CONFIG_USB_WDM=m
@@ -309,18 +231,13 @@ CONFIG_USB_STORAGE_ALAUDA=m
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_M66592=y
CONFIG_MMC=m
CONFIG_LEDS_CLASS=y
CONFIG_STAGING=y
-# CONFIG_STAGING_EXCLUDE_BUILD is not set
-CONFIG_FB_SM7XX=y
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
@@ -349,7 +266,6 @@ CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_EMBEDDED=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
@@ -393,32 +309,19 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=y
-CONFIG_PRINTK_TIME=y
-CONFIG_FRAME_WARN=1024
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-CONFIG_KEYS=y
-CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_HMAC=m
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -435,4 +338,16 @@ CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_T10DIF=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_6x11=y
+CONFIG_FONT_7x14=y
+CONFIG_FONT_PEARL_8x8=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_FONT_MINI_4x6=y
+CONFIG_FONT_10x18=y
+CONFIG_FONT_SUN8x16=y
+CONFIG_FONT_SUN12x22=y
+CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
+CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/loongson1b_defconfig b/arch/mips/configs/loongson1b_defconfig
index 914c867887bd..b064d68a5424 100644
--- a/arch/mips/configs/loongson1b_defconfig
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -1,10 +1,8 @@
-CONFIG_MACH_LOONGSON32=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_XZ=y
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -15,13 +13,15 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_MACH_LOONGSON32=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,7 +43,6 @@ CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_LOONGSON1=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=m
@@ -67,7 +66,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
@@ -116,8 +114,9 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
CONFIG_DYNAMIC_DEBUG=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
@@ -125,5 +124,3 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
# CONFIG_EARLY_PRINTK is not set
-# CONFIG_CRYPTO_ECHAINIV is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/loongson1c_defconfig b/arch/mips/configs/loongson1c_defconfig
index 68e42eff908e..5d76559b56cd 100644
--- a/arch/mips/configs/loongson1c_defconfig
+++ b/arch/mips/configs/loongson1c_defconfig
@@ -1,11 +1,8 @@
-CONFIG_MACH_LOONGSON32=y
-CONFIG_LOONGSON1_LS1C=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_XZ=y
CONFIG_SYSVIPC=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -16,13 +13,16 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_MACH_LOONGSON32=y
+CONFIG_LOONGSON1_LS1C=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -44,7 +44,6 @@ CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_LOONGSON1=y
CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=m
@@ -68,7 +67,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
@@ -117,8 +115,9 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
CONFIG_DYNAMIC_DEBUG=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
@@ -126,5 +125,3 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
# CONFIG_EARLY_PRINTK is not set
-# CONFIG_CRYPTO_ECHAINIV is not set
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 324dfee23dfb..1322adb705c8 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -1,15 +1,3 @@
-CONFIG_MACH_LOONGSON64=y
-CONFIG_SWIOTLB=y
-CONFIG_LOONGSON_MACH3X=y
-CONFIG_CPU_LOONGSON3=y
-CONFIG_64BIT=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_KSM=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-CONFIG_HZ_256=y
-CONFIG_PREEMPT=y
-CONFIG_KEXEC=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZMA=y
CONFIG_SYSVIPC=y
@@ -17,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -24,40 +13,38 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CPUSETS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
+CONFIG_CPUSETS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
+CONFIG_MACH_LOONGSON64=y
+CONFIG_LOONGSON_MACH3X=y
+CONFIG_SMP=y
+CONFIG_HZ_256=y
+CONFIG_KEXEC=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+# CONFIG_PCIEAER is not set
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IOSCHED_DEADLINE=m
CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_PCI=y
-CONFIG_HT_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_HOTPLUG_PCI_PCIE=y
-# CONFIG_PCIEAER is not set
-CONFIG_PCIEASPM_PERFORMANCE=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_SHPC=m
CONFIG_BINFMT_MISC=m
-CONFIG_MIPS32_COMPAT=y
-CONFIG_MIPS32_O32=y
-CONFIG_MIPS32_N32=y
-CONFIG_PM=y
+CONFIG_KSM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -123,7 +110,6 @@ CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
@@ -164,7 +150,6 @@ CONFIG_TUN=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
@@ -173,14 +158,13 @@ CONFIG_TUN=m
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_I825XX is not set
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IXGB=y
CONFIG_IXGBE=y
-# CONFIG_NET_VENDOR_I825XX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -188,12 +172,11 @@ CONFIG_IXGBE=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_RDC is not set
CONFIG_8139CP=m
CONFIG_8139TOO=m
CONFIG_R8169=y
-# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -215,7 +198,6 @@ CONFIG_PPPOE=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_ATH_CARDS=m
CONFIG_ATH9K=m
CONFIG_HOSTAP=m
CONFIG_INPUT_POLLDEV=m
@@ -296,9 +278,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_QUOTA=y
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_AUTOFS4_FS=y
@@ -327,13 +306,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_UTF8=y
-CONFIG_PRINTK_TIME=y
-CONFIG_FRAME_WARN=1024
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_MAGIC_SYSRQ=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_FTRACE is not set
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
CONFIG_SECURITY_NETWORK=y
@@ -345,7 +317,6 @@ CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -357,3 +328,10 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
+CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 81058295d35f..0ee5e677662e 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -1,9 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_NR_CPUS=8
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -13,11 +7,17 @@ CONFIG_RELAY=y
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -58,8 +58,6 @@ CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -124,7 +122,6 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -139,7 +136,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -291,26 +287,26 @@ CONFIG_CHELSIO_T3=m
CONFIG_AX88796=m
CONFIG_NETXEN_NIC=m
CONFIG_TC35815=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
+CONFIG_PRISM54=m
CONFIG_LIBERTAS=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_MOUSE_PS2_ELANTECH=y
@@ -331,7 +327,6 @@ CONFIG_UIO=m
CONFIG_UIO_CIF=m
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_REISERFS_FS=m
CONFIG_REISERFS_PROC_INFO=y
CONFIG_REISERFS_FS_XATTR=y
@@ -411,14 +406,12 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -432,4 +425,3 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 5c10cddc39d3..041bffac043b 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -1,9 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_NR_CPUS=8
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -14,11 +8,21 @@ CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_VHOST_NET=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -59,8 +63,6 @@ CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -125,7 +127,6 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -140,7 +141,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -174,7 +174,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_SCTP=m
CONFIG_BRIDGE=m
@@ -219,8 +218,6 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -254,7 +251,6 @@ CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -297,32 +293,31 @@ CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
-CONFIG_VHOST_NET=m
CONFIG_PCNET32=y
CONFIG_CHELSIO_T3=m
CONFIG_AX88796=m
CONFIG_NETXEN_NIC=m
CONFIG_TC35815=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
+CONFIG_PRISM54=m
CONFIG_LIBERTAS=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
@@ -422,16 +417,12 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_ENABLE_DEFAULT_TRACERS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -445,9 +436,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=m
-CONFIG_KVM_MIPS_DYN_TRANS=y
-CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_ENABLE_DEFAULT_TRACERS=y
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index bb694f5065f1..511065e62182 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -1,10 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_KVM_GUEST=y
-CONFIG_PAGE_SIZE_16KB=y
-# CONFIG_MIPS_MT_SMP is not set
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -15,11 +8,18 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_KVM_GUEST=y
+CONFIG_PAGE_SIZE_16KB=y
+# CONFIG_MIPS_MT_SMP is not set
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -60,8 +60,6 @@ CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -126,7 +124,6 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -141,7 +138,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -175,7 +171,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_SCTP=m
CONFIG_BRIDGE=m
@@ -220,8 +215,6 @@ CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_CLS_IND=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -256,7 +249,6 @@ CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -305,26 +297,26 @@ CONFIG_CHELSIO_T3=m
CONFIG_AX88796=m
CONFIG_NETXEN_NIC=m
CONFIG_TC35815=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
+CONFIG_PRISM54=m
CONFIG_LIBERTAS=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
@@ -426,14 +418,12 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -447,5 +437,3 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 5b5306b80576..299088043164 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -1,8 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R6=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
@@ -13,12 +8,17 @@ CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -100,7 +100,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -110,10 +109,9 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -157,7 +155,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -175,12 +172,9 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -191,5 +185,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 85543599448f..2b4b3a24f637 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -1,9 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_MIPS_VPE_LOADER=y
-CONFIG_MIPS_VPE_APSP_API=y
-CONFIG_HZ_100=y
CONFIG_LOCALVERSION="aprp"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -14,12 +8,19 @@ CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_MIPS_VPE_APSP_API=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -101,7 +102,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -111,10 +111,9 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -157,7 +156,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -175,12 +173,9 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -191,5 +186,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 067bb84ac916..425ddfd7cd78 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -1,11 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_SCHED_SMT=y
-CONFIG_MIPS_CPS=y
-CONFIG_NR_CPUS=8
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
@@ -16,12 +8,20 @@ CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CPS=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -101,7 +101,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -111,10 +110,9 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -159,9 +157,6 @@ CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -179,12 +174,9 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -195,5 +187,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index dfc78c3172a3..8beaa7ba1e52 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -1,12 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_CPU_MIPS32_3_5_FEATURES=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_SCHED_SMT=y
-CONFIG_MIPS_CPS=y
-CONFIG_NR_CPUS=8
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
@@ -17,12 +8,21 @@ CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_3_5_FEATURES=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_SCHED_SMT=y
+CONFIG_MIPS_CPS=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -104,7 +104,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -114,10 +113,9 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -161,7 +159,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -179,12 +176,9 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -195,5 +189,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 50a2288c69f8..6e8b95ceb54a 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -1,7 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_HZ_100=y
CONFIG_LOCALVERSION="up"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@@ -13,12 +9,17 @@ CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -100,7 +101,6 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -110,10 +110,9 @@ CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -156,7 +155,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
@@ -174,12 +172,9 @@ CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
-# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -190,5 +185,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 99a19cf5f9ba..6c026db96ff9 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -1,10 +1,3 @@
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_CPU_MIPS32_R5_FEATURES=y
-CONFIG_CPU_MIPS32_R5_XPA=y
-CONFIG_PAGE_SIZE_16KB=y
-CONFIG_HZ_100=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -16,11 +9,19 @@ CONFIG_RELAY=y
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_R5_FEATURES=y
+CONFIG_CPU_MIPS32_R5_XPA=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -61,8 +62,6 @@ CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -125,7 +124,6 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -140,7 +138,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -300,26 +297,26 @@ CONFIG_CHELSIO_T3=m
CONFIG_AX88796=m
CONFIG_NETXEN_NIC=m
CONFIG_TC35815=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
CONFIG_BROADCOM_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
CONFIG_ICPLUS_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
CONFIG_REALTEK_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
CONFIG_ATMEL=m
CONFIG_PCI_ATMEL=m
-CONFIG_PRISM54=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
CONFIG_HOSTAP_PLX=m
CONFIG_HOSTAP_PCI=m
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
+CONFIG_PRISM54=m
CONFIG_LIBERTAS=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_MOUSE_PS2_ELANTECH=y
@@ -425,7 +422,6 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
@@ -439,5 +435,3 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=m
diff --git a/arch/mips/configs/markeins_defconfig b/arch/mips/configs/markeins_defconfig
index 43ce6576ab1c..ae93a94f8c71 100644
--- a/arch/mips/configs/markeins_defconfig
+++ b/arch/mips/configs/markeins_defconfig
@@ -1,21 +1,19 @@
-CONFIG_NEC_MARKEINS=y
-CONFIG_HZ_1000=y
-CONFIG_PREEMPT=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
+CONFIG_NEC_MARKEINS=y
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-CONFIG_PCI=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -82,20 +80,12 @@ CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -104,7 +94,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -134,23 +123,18 @@ CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_SCAN_ASYNC=y
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
CONFIG_SCSI_AIC94XX=m
# CONFIG_AIC94XX_DEBUG is not set
CONFIG_NETDEVICES=y
CONFIG_TUN=m
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
+CONFIG_CHELSIO_T3=m
CONFIG_NATSEMI=y
CONFIG_QLA3XXX=m
-CONFIG_CHELSIO_T3=m
CONFIG_NETXEN_NIC=m
CONFIG_PPP=m
+CONFIG_PPP_DEFLATE=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -182,20 +166,15 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_SMB_FS=m
CONFIG_NLS_DEFAULT=""
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,115200 mem=192m ip=bootp root=/dev/nfs rw"
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -203,3 +182,5 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttyS0,115200 mem=192m ip=bootp root=/dev/nfs rw"
diff --git a/arch/mips/configs/mips_paravirt_defconfig b/arch/mips/configs/mips_paravirt_defconfig
index accf0db1dc6f..8dc5d96a08de 100644
--- a/arch/mips/configs/mips_paravirt_defconfig
+++ b/arch/mips/configs/mips_paravirt_defconfig
@@ -1,11 +1,5 @@
-CONFIG_MIPS_PARAVIRT=y
-CONFIG_CPU_MIPS64_R2=y
-CONFIG_64BIT=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_SMP=y
-CONFIG_HZ_1000=y
-CONFIG_PREEMPT=y
CONFIG_SYSVIPC=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_IKCONFIG=y
@@ -15,13 +9,18 @@ CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_MIPS_PARAVIRT=y
+CONFIG_CPU_MIPS64_R2=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_HZ_1000=y
CONFIG_PCI=y
-CONFIG_MIPS32_COMPAT=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -39,7 +38,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
-CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
@@ -58,9 +56,9 @@ CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PHYLIB=y
-CONFIG_MARVELL_PHY=y
-CONFIG_BROADCOM_PHY=y
CONFIG_BCM87XX_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_MARVELL_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -90,13 +88,12 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_DES=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/mips/configs/mpc30x_defconfig b/arch/mips/configs/mpc30x_defconfig
index 3486b034f726..d4e038802510 100644
--- a/arch/mips/configs/mpc30x_defconfig
+++ b/arch/mips/configs/mpc30x_defconfig
@@ -1,11 +1,10 @@
-CONFIG_MACH_VR41XX=y
-CONFIG_VICTOR_MPC30X=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_VICTOR_MPC30X=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@@ -31,7 +30,6 @@ CONFIG_ATA=y
CONFIG_PATA_LEGACY=y
CONFIG_NETDEVICES=y
CONFIG_USB_PEGASUS=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -53,4 +51,3 @@ CONFIG_CONFIGFS_FS=m
CONFIG_NFS_FS=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="mem=32M console=ttyVR0,19200 ide0=0x170,0x376,73"
-# CONFIG_CRC32 is not set
diff --git a/arch/mips/configs/msp71xx_defconfig b/arch/mips/configs/msp71xx_defconfig
index 3c8c16b10732..0fdc03fda12e 100644
--- a/arch/mips/configs/msp71xx_defconfig
+++ b/arch/mips/configs/msp71xx_defconfig
@@ -1,21 +1,21 @@
-CONFIG_PMC_MSP=y
-CONFIG_PMC_MSP7120_GW=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_PREEMPT=y
CONFIG_LOCALVERSION="-pmc"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_PREEMPT=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_SHMEM is not set
CONFIG_SLAB=y
+CONFIG_PMC_MSP=y
+CONFIG_PMC_MSP7120_GW=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_NR_CPUS=2
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
@@ -47,18 +47,15 @@ CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_PPP=y
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -80,6 +77,3 @@ CONFIG_SQUASHFS_EMBEDDED=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_CRYPTO_NULL=y
-CONFIG_CRYPTO_AES=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index c3d0d0a6e044..16bef819fe98 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -1,31 +1,45 @@
-CONFIG_MIPS_ALCHEMY=y
-CONFIG_MIPS_MTX1=y
-CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_AUDIT=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
+CONFIG_MIPS_ALCHEMY=y
+CONFIG_MIPS_MTX1=y
CONFIG_PCI=y
CONFIG_PCCARD=m
CONFIG_YENTA=m
CONFIG_PD6729=m
CONFIG_I82092=m
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ACORN_PARTITION=y
+CONFIG_ACORN_PARTITION_ICS=y
+CONFIG_ACORN_PARTITION_RISCIX=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_SGI_PARTITION=y
+CONFIG_ULTRIX_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=m
CONFIG_UNIX=y
@@ -38,8 +52,6 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -57,7 +69,6 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
@@ -81,13 +92,11 @@ CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
@@ -128,7 +137,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_IP_DCCP=m
CONFIG_IP_SCTP=m
CONFIG_TIPC=m
@@ -141,14 +149,12 @@ CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_DECNET=m
CONFIG_LLC2=m
-CONFIG_IPX=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_X25=m
CONFIG_LAPB=m
-CONFIG_WAN_ROUTER=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
@@ -191,30 +197,6 @@ CONFIG_BPQETHER=m
CONFIG_BAYCOM_SER_FDX=m
CONFIG_BAYCOM_SER_HDX=m
CONFIG_YAM=m
-CONFIG_IRDA=m
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-CONFIG_IRDA_ULTRA=y
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-CONFIG_IRDA_DEBUG=y
-CONFIG_IRTTY_SIR=m
-CONFIG_DONGLE=y
-CONFIG_ESI_DONGLE=m
-CONFIG_ACTISYS_DONGLE=m
-CONFIG_TEKRAM_DONGLE=m
-CONFIG_LITELINK_DONGLE=m
-CONFIG_MA600_DONGLE=m
-CONFIG_GIRBIL_DONGLE=m
-CONFIG_MCP2120_DONGLE=m
-CONFIG_OLD_BELKIN_DONGLE=m
-CONFIG_ACT200L_DONGLE=m
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_MCS_FIR=m
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
@@ -231,7 +213,6 @@ CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m
CONFIG_BT_HCIBT3C=m
CONFIG_BT_HCIBLUECARD=m
-CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
CONFIG_CONNECTOR=m
CONFIG_MTD=y
@@ -248,18 +229,18 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_FC_ATTRS=m
CONFIG_SCSI_ISCSI_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
+CONFIG_NET_FC=y
+CONFIG_NETCONSOLE=m
CONFIG_TUN=m
CONFIG_ARCNET=m
CONFIG_ARCNET_1201=m
@@ -271,20 +252,33 @@ CONFIG_ARCNET_COM90xxIO=m
CONFIG_ARCNET_RIM_I=m
CONFIG_ARCNET_COM20020=m
CONFIG_ARCNET_COM20020_PCI=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_HAPPYMEAL=m
-CONFIG_SUNGEM=m
-CONFIG_CASSINI=m
-CONFIG_NET_VENDOR_3COM=y
+CONFIG_ARCNET_COM20020_CS=m
+CONFIG_ATM_TCP=m
+CONFIG_ATM_LANAI=m
+CONFIG_ATM_ENI=m
+CONFIG_ATM_FIRESTREAM=m
+CONFIG_ATM_ZATM=m
+CONFIG_ATM_NICSTAR=m
+CONFIG_ATM_IDT77252=m
+CONFIG_ATM_AMBASSADOR=m
+CONFIG_ATM_HORIZON=m
+CONFIG_ATM_IA=m
+CONFIG_ATM_FORE200E=m
+CONFIG_ATM_HE=m
+CONFIG_ATM_HE_USE_SUNI=y
+CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
CONFIG_VORTEX=m
CONFIG_TYPHOON=m
+CONFIG_ADAPTEC_STARFIRE=m
+CONFIG_ACENIC=m
+CONFIG_AMD8111_ETH=m
+CONFIG_PCNET32=m
+CONFIG_PCMCIA_NMCLAN=m
+CONFIG_B44=m
+CONFIG_BNX2=m
+CONFIG_TIGON3=m
+CONFIG_CHELSIO_T1=m
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
@@ -293,49 +287,69 @@ CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
CONFIG_PCMCIA_XIRCOM=m
+CONFIG_DL2K=m
+CONFIG_SUNDANCE=m
+CONFIG_PCMCIA_FMVJ18X=m
CONFIG_HP100=m
-CONFIG_NET_PCI=y
-CONFIG_PCNET32=m
-CONFIG_AMD8111_ETH=m
-CONFIG_ADAPTEC_STARFIRE=m
-CONFIG_B44=m
-CONFIG_FORCEDETH=m
CONFIG_E100=m
+CONFIG_E1000=m
+CONFIG_IXGB=m
+CONFIG_SKGE=m
+CONFIG_SKY2=m
+CONFIG_MYRI10GE=m
CONFIG_FEALNX=m
CONFIG_NATSEMI=m
+CONFIG_NS83820=m
+CONFIG_S2IO=m
+CONFIG_PCMCIA_AXNET=m
CONFIG_NE2K_PCI=m
+CONFIG_PCMCIA_PCNET=m
+CONFIG_FORCEDETH=m
+CONFIG_HAMACHI=m
+CONFIG_YELLOWFIN=m
+CONFIG_QLA3XXX=m
CONFIG_8139CP=m
CONFIG_8139TOO=m
# CONFIG_8139TOO_PIO is not set
CONFIG_8139TOO_8129=y
+CONFIG_R8169=m
CONFIG_SIS900=m
+CONFIG_SIS190=m
+CONFIG_PCMCIA_SMC91C92=m
CONFIG_EPIC100=m
-CONFIG_SUNDANCE=m
+CONFIG_HAPPYMEAL=m
+CONFIG_SUNGEM=m
+CONFIG_CASSINI=m
CONFIG_TLAN=m
CONFIG_VIA_RHINE=m
-CONFIG_ACENIC=m
-CONFIG_DL2K=m
-CONFIG_E1000=m
-CONFIG_NS83820=m
-CONFIG_HAMACHI=m
-CONFIG_YELLOWFIN=m
-CONFIG_R8169=m
-CONFIG_R8169_VLAN=y
-CONFIG_SIS190=m
-CONFIG_SKGE=m
-CONFIG_SKY2=m
CONFIG_VIA_VELOCITY=m
-CONFIG_TIGON3=m
-CONFIG_BNX2=m
-CONFIG_QLA3XXX=m
-CONFIG_CHELSIO_T1=m
-CONFIG_IXGB=m
-CONFIG_S2IO=m
-CONFIG_MYRI10GE=m
-CONFIG_IBMOL=m
-CONFIG_IBMLS=m
-CONFIG_TMSPCI=m
-CONFIG_ABYSS=m
+CONFIG_PCMCIA_XIRC2PS=m
+CONFIG_FDDI=y
+CONFIG_DEFXX=m
+CONFIG_SKFP=m
+CONFIG_HIPPI=y
+CONFIG_ROADRUNNER=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOATM=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_SLIP=m
+CONFIG_SLIP_COMPRESSED=y
+CONFIG_SLIP_SMART=y
+CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
@@ -349,16 +363,6 @@ CONFIG_USB_ALI_M5632=y
CONFIG_USB_AN2720=y
CONFIG_USB_EPSON2888=y
CONFIG_USB_SIERRA_NET=m
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
-CONFIG_PCMCIA_3C574=m
-CONFIG_PCMCIA_FMVJ18X=m
-CONFIG_PCMCIA_PCNET=m
-CONFIG_PCMCIA_NMCLAN=m
-CONFIG_PCMCIA_SMC91C92=m
-CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_PCMCIA_AXNET=m
-CONFIG_ARCNET_COM20020_CS=m
CONFIG_WAN=y
CONFIG_LANMEDIA=m
CONFIG_HDLC=m
@@ -375,46 +379,8 @@ CONFIG_DSCC4=m
CONFIG_DSCC4_PCISYNC=y
CONFIG_DSCC4_PCI_RST=y
CONFIG_DLCI=m
-CONFIG_WAN_ROUTER_DRIVERS=m
-CONFIG_CYCLADES_SYNC=m
-CONFIG_CYCLOMX_X25=y
CONFIG_LAPBETHER=m
CONFIG_X25_ASY=m
-CONFIG_ATM_TCP=m
-CONFIG_ATM_LANAI=m
-CONFIG_ATM_ENI=m
-CONFIG_ATM_FIRESTREAM=m
-CONFIG_ATM_ZATM=m
-CONFIG_ATM_NICSTAR=m
-CONFIG_ATM_IDT77252=m
-CONFIG_ATM_AMBASSADOR=m
-CONFIG_ATM_HORIZON=m
-CONFIG_ATM_IA=m
-CONFIG_ATM_FORE200E=m
-CONFIG_ATM_HE=m
-CONFIG_ATM_HE_USE_SUNI=y
-CONFIG_FDDI=y
-CONFIG_DEFXX=m
-CONFIG_SKFP=m
-CONFIG_HIPPI=y
-CONFIG_ROADRUNNER=m
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_MPPE=m
-CONFIG_PPPOE=m
-CONFIG_PPPOATM=m
-CONFIG_SLIP=m
-CONFIG_SLIP_COMPRESSED=y
-CONFIG_SLIP_SMART=y
-CONFIG_SLIP_MODE_SLIP6=y
-CONFIG_NET_FC=y
-CONFIG_NETCONSOLE=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -440,7 +406,6 @@ CONFIG_HW_RANDOM=y
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
-CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_SENSORS_ADM1021=m
CONFIG_SENSORS_ADM1025=m
@@ -453,6 +418,7 @@ CONFIG_SENSORS_F71805F=m
CONFIG_SENSORS_GL518SM=m
CONFIG_SENSORS_GL520SM=m
CONFIG_SENSORS_IT87=m
+CONFIG_SENSORS_MAX1619=m
CONFIG_SENSORS_LM63=m
CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM77=m
@@ -463,7 +429,6 @@ CONFIG_SENSORS_LM85=m
CONFIG_SENSORS_LM87=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_LM92=m
-CONFIG_SENSORS_MAX1619=m
CONFIG_SENSORS_PC87360=m
CONFIG_SENSORS_PCF8591=m
CONFIG_SENSORS_SIS5595=m
@@ -491,23 +456,17 @@ CONFIG_SOUND=m
CONFIG_SND=m
CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_DUMMY=m
CONFIG_SND_VIRMIDI=m
CONFIG_SND_MTPAV=m
CONFIG_SND_SERIAL_U16550=m
CONFIG_SND_MPU401=m
CONFIG_SND_AD1889=m
-CONFIG_SND_ALS300=m
-CONFIG_SND_ALI5451=m
CONFIG_SND_ATIIXP=m
CONFIG_SND_ATIIXP_MODEM=m
CONFIG_SND_AU8810=m
CONFIG_SND_AU8820=m
CONFIG_SND_AU8830=m
-CONFIG_SND_AZT3328=m
CONFIG_SND_BT87X=m
CONFIG_SND_CA0106=m
CONFIG_SND_CMIPCI=m
@@ -525,22 +484,15 @@ CONFIG_SND_ECHO3G=m
CONFIG_SND_INDIGO=m
CONFIG_SND_INDIGOIO=m
CONFIG_SND_INDIGODJ=m
-CONFIG_SND_EMU10K1=m
-CONFIG_SND_EMU10K1X=m
CONFIG_SND_ENS1370=m
CONFIG_SND_ENS1371=m
-CONFIG_SND_ES1938=m
-CONFIG_SND_ES1968=m
CONFIG_SND_FM801=m
-CONFIG_SND_HDA_INTEL=m
CONFIG_SND_HDSP=m
CONFIG_SND_HDSPM=m
-CONFIG_SND_ICE1712=m
CONFIG_SND_ICE1724=m
CONFIG_SND_INTEL8X0=m
CONFIG_SND_INTEL8X0M=m
CONFIG_SND_KORG1212=m
-CONFIG_SND_MAESTRO3=m
CONFIG_SND_MIXART=m
CONFIG_SND_NM256=m
CONFIG_SND_PCXHR=m
@@ -548,16 +500,14 @@ CONFIG_SND_RIPTIDE=m
CONFIG_SND_RME32=m
CONFIG_SND_RME96=m
CONFIG_SND_RME9652=m
-CONFIG_SND_SONICVIBES=m
-CONFIG_SND_TRIDENT=m
CONFIG_SND_VIA82XX=m
CONFIG_SND_VIA82XX_MODEM=m
CONFIG_SND_VX222=m
CONFIG_SND_YMFPCI=m
+CONFIG_SND_HDA_INTEL=m
CONFIG_SND_USB_AUDIO=m
CONFIG_SND_VXPOCKET=m
CONFIG_SND_PDAUDIOCF=m
-CONFIG_SOUND_PRIME=m
CONFIG_USB_HIDDEV=y
CONFIG_USB_KBD=m
CONFIG_USB_MOUSE=m
@@ -566,7 +516,7 @@ CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_EHCI_ROOT_HUB_TT=y
CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD_PLATFORM=m
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_U132_HCD=m
CONFIG_USB_SL811_HCD=m
@@ -595,7 +545,6 @@ CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
CONFIG_USB_SERIAL_CYPRESS_M8=m
CONFIG_USB_SERIAL_EMPEG=m
CONFIG_USB_SERIAL_FTDI_SIO=m
-CONFIG_USB_SERIAL_FUNSOFT=m
CONFIG_USB_SERIAL_VISOR=m
CONFIG_USB_SERIAL_IPAQ=m
CONFIG_USB_SERIAL_IR=m
@@ -612,7 +561,6 @@ CONFIG_USB_SERIAL_MOS7720=m
CONFIG_USB_SERIAL_MOS7840=m
CONFIG_USB_SERIAL_NAVMAN=m
CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_TI=m
@@ -641,7 +589,6 @@ CONFIG_USB_CXACRU=m
CONFIG_USB_UEAGLEATM=m
CONFIG_USB_XUSBATM=m
CONFIG_USB_GADGET=m
-CONFIG_USB_GADGET_NET2280=y
CONFIG_USB_ZERO=m
CONFIG_USB_ETH=m
CONFIG_USB_GADGETFS=m
@@ -677,7 +624,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=m
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
CONFIG_QUOTA=y
@@ -692,48 +638,18 @@ CONFIG_VFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
-CONFIG_CONFIGFS_FS=m
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_XATTR=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_CRAMFS=y
CONFIG_SQUASHFS=y
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFSD=m
CONFIG_NFSD_V4=y
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_AFS_FS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_ACORN_PARTITION=y
-CONFIG_ACORN_PARTITION_ICS=y
-CONFIG_ACORN_PARTITION_RISCIX=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_ATARI_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-CONFIG_SGI_PARTITION=y
-CONFIG_ULTRIX_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=m
@@ -774,18 +690,11 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -795,3 +704,5 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index e8e1dd8e0e99..72a211d2d556 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -1,32 +1,35 @@
-CONFIG_NLM_XLP_BOARD=y
-CONFIG_64BIT=y
-CONFIG_PAGE_SIZE_16KB=y
-# CONFIG_HW_PERF_EVENTS is not set
-CONFIG_KSM=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
-CONFIG_SMP=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_CGROUPS=y
CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_NLM_XLP_BOARD=y
+CONFIG_64BIT=y
+CONFIG_PAGE_SIZE_16KB=y
+# CONFIG_HW_PERF_EVENTS is not set
+CONFIG_SMP=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_PCI_STUB=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@@ -49,19 +52,11 @@ CONFIG_SGI_PARTITION=y
CONFIG_ULTRIX_PARTITION=y
CONFIG_SUN_PARTITION=y
CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
CONFIG_SYSV68_PARTITION=y
-CONFIG_PCI=y
-CONFIG_PCI_DEBUG=y
-CONFIG_PCI_REALLOC_ENABLE_AUTO=y
-CONFIG_PCI_STUB=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
-CONFIG_MIPS32_COMPAT=y
-CONFIG_MIPS32_O32=y
-CONFIG_MIPS32_N32=y
-CONFIG_PM=y
-CONFIG_PM_DEBUG=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -93,7 +88,6 @@ CONFIG_TCP_CONG_VENO=m
CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -104,12 +98,10 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_NETLABEL=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -120,7 +112,6 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
@@ -180,18 +171,12 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -201,8 +186,6 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -238,7 +221,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
CONFIG_RDS=m
@@ -254,14 +236,12 @@ CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_DECNET=m
CONFIG_LLC2=m
-CONFIG_IPX=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_X25=m
CONFIG_LAPB=m
-CONFIG_WAN_ROUTER=m
CONFIG_PHONET=m
CONFIG_IEEE802154=m
CONFIG_NET_SCHED=y
@@ -324,7 +304,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=y
@@ -335,7 +314,6 @@ CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -343,7 +321,6 @@ CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
CONFIG_SCSI_DEBUG=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
@@ -368,10 +345,9 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
-CONFIG_E1000E=y
# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000E=y
CONFIG_SKY2=y
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -379,10 +355,9 @@ CONFIG_SKY2=y
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
@@ -394,7 +369,6 @@ CONFIG_SKY2=y
# CONFIG_NET_VENDOR_TOSHIBA is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -403,12 +377,9 @@ CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_N_HDLC=m
-# CONFIG_DEVKMEM is not set
-CONFIG_STALDRV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=48
@@ -430,7 +401,6 @@ CONFIG_THERMAL=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_UIO=y
-CONFIG_UIO_PDRV=m
CONFIG_UIO_PDRV_GENIRQ=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
@@ -440,9 +410,6 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_GFS2_FS=m
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
@@ -487,7 +454,7 @@ CONFIG_UFS_FS=m
CONFIG_EXOFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_FSCACHE=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
@@ -498,14 +465,6 @@ CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_AFS_FS=m
CONFIG_NLS=y
@@ -547,19 +506,6 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_SCHED_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_KGDB=y
CONFIG_SECURITY=y
CONFIG_LSM_MMAP_MIN_ADDR=0
CONFIG_SECURITY_SELINUX=y
@@ -568,10 +514,8 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_SMACK=y
CONFIG_SECURITY_TOMOYO=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
@@ -585,8 +529,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -602,5 +544,15 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_CCITT=m
CONFIG_CRC7=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KGDB=y
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index c4477a4d40c1..4ecb157e56d4 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -1,47 +1,60 @@
-CONFIG_NLM_XLR_BOARD=y
-CONFIG_HIGHMEM=y
-CONFIG_KSM=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
-CONFIG_SMP=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_KEXEC=y
-CONFIG_CROSS_COMPILE=""
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_AUDIT=y
CONFIG_NAMESPACES=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_INITRAMFS_COMPRESSION_GZIP=y
CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_ELF_CORE is not set
+CONFIG_KALLSYMS_ALL=y
# CONFIG_PERF_EVENTS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_NLM_XLR_BOARD=y
+CONFIG_HIGHMEM=y
+CONFIG_SMP=y
+CONFIG_KEXEC=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
-CONFIG_PCI=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_DEBUG=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ACORN_PARTITION=y
+CONFIG_ACORN_PARTITION_ICS=y
+CONFIG_ACORN_PARTITION_RISCIX=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_ATARI_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_LDM_PARTITION=y
+CONFIG_SGI_PARTITION=y
+CONFIG_ULTRIX_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SYSV68_PARTITION=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
-CONFIG_PM_DEBUG=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -73,7 +86,6 @@ CONFIG_TCP_CONG_VENO=m
CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
@@ -84,12 +96,10 @@ CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_NETLABEL=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -100,7 +110,6 @@ CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
@@ -160,19 +169,12 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
-CONFIG_IP_VS_FTP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -182,8 +184,6 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
@@ -219,7 +219,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
CONFIG_RDS=m
@@ -235,14 +234,12 @@ CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_DECNET=m
CONFIG_LLC2=m
-CONFIG_IPX=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_X25=m
CONFIG_LAPB=m
-CONFIG_WAN_ROUTER=m
CONFIG_PHONET=m
CONFIG_IEEE802154=m
CONFIG_NET_SCHED=y
@@ -295,7 +292,6 @@ CONFIG_CONNECTOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_CDROM_PKTCDVD=y
@@ -307,7 +303,6 @@ CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -315,7 +310,6 @@ CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
-CONFIG_LIBFCOE=m
CONFIG_SCSI_DEBUG=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
@@ -327,7 +321,6 @@ CONFIG_SCSI_OSD_ULD=m
CONFIG_NETDEVICES=y
CONFIG_E1000E=y
CONFIG_SKY2=y
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
# CONFIG_INPUT_KEYBOARD is not set
@@ -336,12 +329,9 @@ CONFIG_SERIO_SERPORT=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_RAW=m
CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_N_HDLC=m
-# CONFIG_DEVKMEM is not set
-CONFIG_STALDRV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=48
@@ -354,13 +344,12 @@ CONFIG_HW_RANDOM_TIMERIOMEM=m
CONFIG_RAW_DRIVER=m
CONFIG_I2C=y
CONFIG_I2C_XLR=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_DS1374=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1374=y
CONFIG_UIO=y
-CONFIG_UIO_PDRV=m
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -369,11 +358,7 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_EXT4_FS_SECURITY=y
CONFIG_GFS2_FS=m
-CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
@@ -420,9 +405,8 @@ CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_EXOFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
-CONFIG_NFS_V4=y
+CONFIG_NFS_V4=m
CONFIG_NFS_FSCACHE=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
@@ -433,35 +417,8 @@ CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_AFS_FS=m
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_ACORN_PARTITION=y
-CONFIG_ACORN_PARTITION_ICS=y
-CONFIG_ACORN_PARTITION_RISCIX=y
-CONFIG_OSF_PARTITION=y
-CONFIG_AMIGA_PARTITION=y
-CONFIG_ATARI_PARTITION=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_MINIX_SUBPARTITION=y
-CONFIG_SOLARIS_X86_PARTITION=y
-CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_LDM_PARTITION=y
-CONFIG_SGI_PARTITION=y
-CONFIG_ULTRIX_PARTITION=y
-CONFIG_SUN_PARTITION=y
-CONFIG_KARMA_PARTITION=y
-CONFIG_EFI_PARTITION=y
-CONFIG_SYSV68_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="cp437"
CONFIG_NLS_CODEPAGE_437=m
@@ -501,20 +458,7 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_SCHED_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_KGDB=y
CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
CONFIG_LSM_MMAP_MIN_ADDR=0
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
@@ -522,10 +466,8 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_SECURITY_SMACK=y
CONFIG_SECURITY_TOMOYO=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
@@ -539,8 +481,6 @@ CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -556,5 +496,14 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
-CONFIG_CRC_CCITT=m
CONFIG_CRC7=m
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_KGDB=y
diff --git a/arch/mips/configs/omega2p_defconfig b/arch/mips/configs/omega2p_defconfig
index e2731c3cc7e7..0649b8f06b7c 100644
--- a/arch/mips/configs/omega2p_defconfig
+++ b/arch/mips/configs/omega2p_defconfig
@@ -1,17 +1,9 @@
-CONFIG_RALINK=y
-CONFIG_SOC_MT7620=y
-CONFIG_DTB_OMEGA2P=y
-CONFIG_CPU_MIPS32_R2=y
-# CONFIG_COMPACTION is not set
-CONFIG_HZ_100=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
-CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -30,8 +22,16 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_RALINK=y
+CONFIG_SOC_MT7620=y
+CONFIG_DTB_OMEGA2P=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -113,6 +113,10 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=y
CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRC16=y
+CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_STRIP_ASM_SYMS=y
@@ -123,7 +127,3 @@ CONFIG_PANIC_TIMEOUT=10
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_STACKTRACE=y
# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRC16=y
-CONFIG_XZ_DEC=y
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
index 41190c2036e6..63fe2da1b37f 100644
--- a/arch/mips/configs/pic32mzda_defconfig
+++ b/arch/mips/configs/pic32mzda_defconfig
@@ -1,11 +1,7 @@
-CONFIG_MACH_PIC32=y
-CONFIG_DTB_PIC32_MZDA_SK=y
-CONFIG_HZ_100=y
-CONFIG_PREEMPT_VOLUNTARY=y
-# CONFIG_SECCOMP is not set
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -14,6 +10,11 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MACH_PIC32=y
+CONFIG_DTB_PIC32_MZDA_SK=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -23,7 +24,6 @@ CONFIG_BLK_DEV_BSGLIB=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_SGI_PARTITION=y
CONFIG_BINFMT_MISC=m
-# CONFIG_SUSPEND is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_ALLOW_DEV_COREDUMP is not set
diff --git a/arch/mips/configs/pistachio_defconfig b/arch/mips/configs/pistachio_defconfig
index b22a3cf149b6..2f08d071ada6 100644
--- a/arch/mips/configs/pistachio_defconfig
+++ b/arch/mips/configs/pistachio_defconfig
@@ -1,23 +1,16 @@
-CONFIG_MACH_PISTACHIO=y
-CONFIG_MIPS_MT_SMP=y
-CONFIG_MIPS_CPS=y
-# CONFIG_COMPACTION is not set
-CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
-CONFIG_ZSMALLOC=y
-CONFIG_NR_CPUS=4
-CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="localhost"
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_SCHED=y
CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
@@ -29,14 +22,20 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
+CONFIG_MACH_PISTACHIO=y
+CONFIG_MIPS_CPS=y
+CONFIG_NR_CPUS=4
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
CONFIG_CPU_IDLE=y
# CONFIG_MIPS_CPS_CPUIDLE is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_COMPACTION is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_ZSMALLOC=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -66,7 +65,6 @@ CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_HTCP is not set
CONFIG_TCP_CONG_LP=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
@@ -89,13 +87,11 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
CONFIG_NETFILTER_XT_MATCH_DSCP=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=y
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
CONFIG_IP_NF_MANGLE=y
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
@@ -167,15 +163,14 @@ CONFIG_USB_NET_SMSC95XX=m
CONFIG_USB_NET_MCS7830=m
# CONFIG_USB_NET_CDC_SUBSET is not set
# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_LIBERTAS_THINFIRM=m
-CONFIG_USB_NET_RNDIS_WLAN=m
-CONFIG_MAC80211_HWSIM=m
CONFIG_HOSTAP=m
CONFIG_HOSTAP_FIRMWARE=y
CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_LIBERTAS_THINFIRM=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
-# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_MAC80211_HWSIM=m
+CONFIG_USB_NET_RNDIS_WLAN=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -183,7 +178,6 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
@@ -204,13 +198,10 @@ CONFIG_GPIO_SYSFS=y
CONFIG_POWER_SUPPLY=y
CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
-CONFIG_WATCHDOG_CORE=y
CONFIG_IMGPDC_WDT=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
-CONFIG_MEDIA_SUPPORT=y
CONFIG_RC_CORE=y
-# CONFIG_RC_DECODERS is not set
CONFIG_RC_DEVICES=y
CONFIG_IR_IMG=y
CONFIG_IR_IMG_NEC=y
@@ -220,8 +211,7 @@ CONFIG_IR_IMG_SHARP=y
CONFIG_IR_IMG_SANYO=y
CONFIG_IR_IMG_RC5=y
CONFIG_IR_IMG_RC6=y
-# CONFIG_DVB_TUNER_DIB0070 is not set
-# CONFIG_DVB_TUNER_DIB0090 is not set
+CONFIG_MEDIA_SUPPORT=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -229,10 +219,10 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_SOUND=y
CONFIG_SND=y
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_HRTIMER=m
CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
# CONFIG_SND_SPI is not set
CONFIG_SND_USB_AUDIO=m
CONFIG_USB=y
@@ -300,27 +290,9 @@ CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
-CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_SCHEDSTATS=y
-CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_CREDENTIALS=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_LKDTM=y
-CONFIG_TEST_UDELAY=m
-CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_YAMA=y
-CONFIG_DEFAULT_SECURITY_DAC=y
CONFIG_CRYPTO_AUTHENC=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_SHA1=y
@@ -328,9 +300,19 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=m
CONFIG_CRC7=m
-CONFIG_LIBCRC32C=m
# CONFIG_XZ_DEC_X86 is not set
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=y
+CONFIG_TEST_UDELAY=m
diff --git a/arch/mips/configs/pnx8335_stb225_defconfig b/arch/mips/configs/pnx8335_stb225_defconfig
index e73cdb08fc6e..aa0b169800e0 100644
--- a/arch/mips/configs/pnx8335_stb225_defconfig
+++ b/arch/mips/configs/pnx8335_stb225_defconfig
@@ -1,23 +1,21 @@
-CONFIG_NXP_STB225=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_128=y
-CONFIG_PREEMPT_VOLUNTARY=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_EXPERT=y
CONFIG_SLAB=y
+CONFIG_NXP_STB225=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_HZ_128=y
+# CONFIG_SECCOMP is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -42,17 +40,14 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=m
CONFIG_INPUT_EVBUG=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_VT_CONSOLE is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_PNX8XXX=y
CONFIG_SERIAL_PNX8XXX_CONSOLE=y
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -61,12 +56,9 @@ CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_SEQUENCER=m
-CONFIG_SND_MIXER_OSS=m
-CONFIG_SND_PCM_OSS=m
-CONFIG_SND_SEQUENCER_OSS=y
CONFIG_SND_VERBOSE_PRINTK=y
CONFIG_SND_DEBUG=y
+CONFIG_SND_SEQUENCER=m
CONFIG_EXT2_FS=m
# CONFIG_DNOTIFY is not set
CONFIG_MSDOS_FS=m
@@ -75,7 +67,6 @@ CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
diff --git a/arch/mips/configs/qi_lb60_defconfig b/arch/mips/configs/qi_lb60_defconfig
index d8b7211a7b0f..7671fe6a8042 100644
--- a/arch/mips/configs/qi_lb60_defconfig
+++ b/arch/mips/configs/qi_lb60_defconfig
@@ -1,11 +1,7 @@
-CONFIG_MACH_INGENIC=y
-# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_HZ_100=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_PREEMPT=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
@@ -13,6 +9,9 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_MACH_INGENIC=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -20,6 +19,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_EFI_PARTITION is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -49,7 +49,6 @@ CONFIG_MTD_NAND_JZ4740=y
CONFIG_MTD_UBI=y
CONFIG_NETDEVICES=y
# CONFIG_WLAN is not set
-# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -58,7 +57,6 @@ CONFIG_KEYBOARD_MATRIX=y
CONFIG_INPUT_MISC=y
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=2
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_DMA is not set
@@ -109,7 +107,6 @@ CONFIG_USB_GADGET_DEBUG=y
CONFIG_USB_ETH=y
# CONFIG_USB_ETH_RNDIS is not set
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_JZ4740=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_JZ4740=y
@@ -119,8 +116,6 @@ CONFIG_PWM=y
CONFIG_PWM_JZ4740=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
# CONFIG_DNOTIFY is not set
CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
@@ -171,6 +166,8 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=y
CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
+CONFIG_FONTS=y
+CONFIG_FONT_SUN8x16=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_STRIP_ASM_SYMS=y
@@ -181,7 +178,3 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_FTRACE is not set
CONFIG_KGDB=y
-CONFIG_RUNTIME_DEBUG=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_FONTS=y
-CONFIG_FONT_SUN8x16=y
diff --git a/arch/mips/configs/rb532_defconfig b/arch/mips/configs/rb532_defconfig
index 6fa56c6e53f5..7befe05fd813 100644
--- a/arch/mips/configs/rb532_defconfig
+++ b/arch/mips/configs/rb532_defconfig
@@ -1,29 +1,30 @@
-CONFIG_MIKROTIK_RB532=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_100=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_TINY_RCU=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-# CONFIG_KALLSYMS is not set
# CONFIG_ELF_CORE is not set
+# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_PCI_QUIRKS is not set
CONFIG_SLAB=y
+CONFIG_MIKROTIK_RB532=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
+# CONFIG_PCI_QUIRKS is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -33,7 +34,6 @@ CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
@@ -70,13 +70,9 @@ CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_SCTP=m
CONFIG_NETFILTER_XT_MATCH_STATE=y
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_FILTER=y
CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_NF_NAT=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
CONFIG_IP_NF_MANGLE=y
CONFIG_IP_NF_RAW=m
CONFIG_BRIDGE=y
@@ -122,31 +118,27 @@ CONFIG_ATA=y
CONFIG_PATA_RB532=y
CONFIG_NETDEVICES=y
CONFIG_IFB=m
-CONFIG_NET_ETHERNET=y
CONFIG_KORINA=y
-CONFIG_NET_PCI=y
CONFIG_VIA_RHINE=y
-CONFIG_ATMEL=m
CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
-# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_PPP_ASYNC=m
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_MISC=y
CONFIG_INPUT_RB532_BUTTON=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=2
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_LEGACY_PTYS is not set
CONFIG_HW_RANDOM=y
CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
@@ -171,13 +163,8 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_SUMMARY=y
CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_BSD_DISKLABEL=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_CRYPTO=y
CONFIG_CRYPTO_TEST=m
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC16=m
-CONFIG_LIBCRC32C=m
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index fb195e29e449..50a2c9ad583f 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -1,27 +1,24 @@
-CONFIG_MACH_TX49XX=y
-CONFIG_TOSHIBA_RBTX4927=y
-CONFIG_TOSHIBA_RBTX4938=y
-CONFIG_TOSHIBA_RBTX4939=y
-CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
+CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-# CONFIG_SECCOMP is not set
-CONFIG_SYSVIPC=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
-# CONFIG_PCSPKR_PLATFORM is not set
# CONFIG_EPOLL is not set
CONFIG_SLAB=y
+CONFIG_MACH_TX49XX=y
+CONFIG_TOSHIBA_RBTX4927=y
+CONFIG_TOSHIBA_RBTX4938=y
+CONFIG_TOSHIBA_RBTX4939=y
+CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -52,10 +49,8 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_IDE_TX4938=y
CONFIG_BLK_DEV_IDE_TX4939=y
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
CONFIG_NE2000=y
-CONFIG_NET_PCI=y
+CONFIG_SMC91X=y
CONFIG_TC35815=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
@@ -99,7 +94,6 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_JFFS2_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 5f71aa598b06..0f4b09f8a0ee 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -1,24 +1,23 @@
-CONFIG_SNI_RM=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_ARC_CONSOLE=y
-CONFIG_HZ_1000=y
-CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_RELAY=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
+CONFIG_SNI_RM=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_ARC_CONSOLE=y
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
-CONFIG_PCI=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=m
CONFIG_UNIX=y
@@ -27,8 +26,6 @@ CONFIG_NET_KEY_MIGRATE=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
-CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
@@ -48,7 +45,6 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
@@ -92,20 +88,12 @@ CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
@@ -114,7 +102,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -149,7 +136,6 @@ CONFIG_BRIDGE_EBT_MARK_T=m
CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_ULOG=m
CONFIG_BRIDGE=m
CONFIG_DECNET=m
CONFIG_NET_SCHED=y
@@ -222,7 +208,6 @@ CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_FC_ATTRS=y
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
CONFIG_ISCSI_TCP=m
CONFIG_SCSI_AIC94XX=m
# CONFIG_AIC94XX_DEBUG is not set
@@ -247,34 +232,30 @@ CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_NETDEVICES=y
-CONFIG_DUMMY=m
CONFIG_BONDING=m
+CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
-CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_ISA=y
-CONFIG_NE2000=m
-CONFIG_NET_PCI=y
CONFIG_PCNET32=y
-CONFIG_VIA_VELOCITY=m
-CONFIG_QLA3XXX=m
CONFIG_CHELSIO_T3=m
+CONFIG_NE2000=m
+CONFIG_QLA3XXX=m
CONFIG_NETXEN_NIC=m
+CONFIG_VIA_VELOCITY=m
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_PLIP=m
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
CONFIG_USB_USBNET=m
# CONFIG_USB_NET_CDC_SUBSET is not set
-CONFIG_PLIP=m
CONFIG_INPUT_FF_MEMLESS=m
CONFIG_SERIO_PARKBD=m
CONFIG_SERIO_RAW=m
@@ -329,7 +310,6 @@ CONFIG_USB_SERIAL_KLSI=m
CONFIG_USB_SERIAL_KOBIL_SCT=m
CONFIG_USB_SERIAL_MCT_U232=m
CONFIG_USB_SERIAL_PL2303=m
-CONFIG_USB_SERIAL_HP4X=m
CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_CYBERJACK=m
@@ -377,25 +357,11 @@ CONFIG_ROMFS_FS=m
CONFIG_SYSV_FS=m
CONFIG_UFS_FS=m
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
-CONFIG_RPCSEC_GSS_KRB5=m
-CONFIG_RPCSEC_GSS_SPKM3=m
-CONFIG_SMB_FS=m
CONFIG_CIFS=m
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_SMALLDOS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_AFS_FS=m
-CONFIG_PARTITION_ADVANCED=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_737=m
CONFIG_NLS_CODEPAGE_775=m
@@ -434,21 +400,14 @@ CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
CONFIG_NLS_UTF8=m
-CONFIG_DLM=m
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST6=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
index dbe6a4639d05..0392e38010e6 100644
--- a/arch/mips/configs/rt305x_defconfig
+++ b/arch/mips/configs/rt305x_defconfig
@@ -1,32 +1,29 @@
-CONFIG_RALINK=y
-CONFIG_DTB_RT305X_EVAL=y
-CONFIG_CPU_MIPS32_R2=y
-# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
-CONFIG_HZ_100=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
-CONFIG_INITRAMFS_ROOT_UID=1000
-CONFIG_INITRAMFS_ROOT_GID=1000
# CONFIG_RD_GZIP is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
+CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_RALINK=y
+CONFIG_DTB_RT305X_EVAL=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_SUSPEND is not set
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_COREDUMP is not set
-# CONFIG_SUSPEND is not set
+# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -38,7 +35,6 @@ CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
-CONFIG_ARPD=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
@@ -63,8 +59,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
@@ -100,14 +94,12 @@ CONFIG_PPP_ASYNC=m
CONFIG_ISDN=y
CONFIG_INPUT=m
CONFIG_INPUT_POLLDEV=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_MISC=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
@@ -142,17 +134,7 @@ CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
-CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-# CONFIG_SCHED_DEBUG is not set
-# CONFIG_FTRACE is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CRYPTO_MANAGER=m
CONFIG_CRYPTO_ARC4=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_ITU_T=m
CONFIG_CRC32_SARWATE=y
# CONFIG_XZ_DEC_X86 is not set
@@ -161,4 +143,11 @@ CONFIG_CRC32_SARWATE=y
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
-CONFIG_AVERAGE=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
index 1edd8430ad61..ad8981666ee4 100644
--- a/arch/mips/configs/sb1250_swarm_defconfig
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -1,30 +1,29 @@
-CONFIG_SIBYTE_SWARM=y
-CONFIG_CPU_SB1_PASS_2_2=y
-CONFIG_64BIT=y
-CONFIG_SMP=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_HZ_1000=y
CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_CGROUPS=y
CONFIG_CPUSETS=y
# CONFIG_PROC_PID_CPUSET is not set
CONFIG_CGROUP_CPUACCT=y
-CONFIG_RELAY=y
CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
+CONFIG_SIBYTE_SWARM=y
+CONFIG_CPU_SB1_PASS_2_2=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_1000=y
+CONFIG_PCI=y
+CONFIG_MIPS32_O32=y
+CONFIG_PM=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PCI=y
-CONFIG_MIPS32_COMPAT=y
-CONFIG_MIPS32_O32=y
-CONFIG_PM=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -43,8 +42,6 @@ CONFIG_TCP_MD5SIG=y
CONFIG_NETWORK_SECMARK=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_FW_LOADER=m
@@ -60,10 +57,8 @@ CONFIG_BLK_DEV_IDETAPE=y
CONFIG_RAID_ATTRS=m
CONFIG_NETDEVICES=y
CONFIG_MACVLAN=m
-CONFIG_BROADCOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
CONFIG_SB1250_MAC=y
+CONFIG_BROADCOM_PHY=y
# CONFIG_INPUT is not set
CONFIG_SERIO_RAW=m
# CONFIG_VT is not set
@@ -81,15 +76,9 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_DLM=m
-CONFIG_KEYS=y
-CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
-CONFIG_CRYPTO_CCM=m
-CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@@ -98,7 +87,6 @@ CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
diff --git a/arch/mips/configs/tb0219_defconfig b/arch/mips/configs/tb0219_defconfig
index 4041597e3170..f0a11a72307e 100644
--- a/arch/mips/configs/tb0219_defconfig
+++ b/arch/mips/configs/tb0219_defconfig
@@ -1,12 +1,9 @@
-CONFIG_MACH_VR41XX=y
-CONFIG_TANBAC_TB0219=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_TANBAC_TB0219=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -25,7 +22,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
@@ -33,33 +29,26 @@ CONFIG_SYN_COOKIES=y
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_FW_LOADER=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_XIP=y
CONFIG_NETDEVICES=y
-CONFIG_PHYLIB=m
-CONFIG_MARVELL_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_8139TOO=y
+CONFIG_R8169=y
CONFIG_VIA_RHINE=y
CONFIG_VIA_RHINE_MMIO=y
-CONFIG_R8169=y
CONFIG_VIA_VELOCITY=y
-# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_CICADA_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_VITESSE_PHY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_VR41XX=y
CONFIG_SERIAL_VR41XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
@@ -82,7 +71,6 @@ CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CRAMFS=m
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
diff --git a/arch/mips/configs/tb0226_defconfig b/arch/mips/configs/tb0226_defconfig
index 565f0441c50d..025e45656359 100644
--- a/arch/mips/configs/tb0226_defconfig
+++ b/arch/mips/configs/tb0226_defconfig
@@ -1,18 +1,14 @@
-CONFIG_MACH_VR41XX=y
-CONFIG_TANBAC_TB0226=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_TANBAC_TB0226=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -34,28 +30,21 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_XIP=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_SAS_LIBSAS=m
-# CONFIG_SCSI_SAS_LIBSAS_DEBUG is not set
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_E100=y
CONFIG_USB_CATC=m
CONFIG_USB_KAWETH=m
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_VR41XX=y
CONFIG_SERIAL_VR41XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
@@ -77,10 +66,8 @@ CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CRAMFS=m
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=32M console=ttyVR0,115200"
-CONFIG_CRC32=m
diff --git a/arch/mips/configs/tb0287_defconfig b/arch/mips/configs/tb0287_defconfig
index a702be602fb9..68490248e3f1 100644
--- a/arch/mips/configs/tb0287_defconfig
+++ b/arch/mips/configs/tb0287_defconfig
@@ -1,12 +1,8 @@
-CONFIG_MACH_VR41XX=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -25,7 +21,6 @@ CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
-CONFIG_NET_IPGRE=m
CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
@@ -36,36 +31,23 @@ CONFIG_TCP_CONG_CUBIC=m
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_FW_LOADER=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_XIP=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SCAN_ASYNC=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
CONFIG_PATA_SIL680=y
-CONFIG_IEEE1394=m
-CONFIG_IEEE1394_OHCI1394=m
-CONFIG_IEEE1394_SBP2=m
-CONFIG_IEEE1394_ETH1394=m
-CONFIG_IEEE1394_RAWIO=m
-CONFIG_IEEE1394_VIDEO1394=m
-CONFIG_IEEE1394_DV1394=m
CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
CONFIG_8139TOO=y
+CONFIG_R8169=y
CONFIG_VIA_RHINE=y
CONFIG_VIA_RHINE_MMIO=y
-CONFIG_R8169=y
CONFIG_VIA_VELOCITY=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_VR41XX=y
CONFIG_SERIAL_VR41XX_CONSOLE=y
# CONFIG_HW_RANDOM is not set
@@ -76,9 +58,6 @@ CONFIG_FB=y
CONFIG_FB_SM501=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
CONFIG_USB=m
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
@@ -97,9 +76,11 @@ CONFIG_TMPFS_POSIX_ACL=y
CONFIG_CRAMFS=m
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
+CONFIG_FONTS=y
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="cca=3 mem=64M console=ttyVR0,115200 ip=any root=/dev/nfs"
diff --git a/arch/mips/configs/vocore2_defconfig b/arch/mips/configs/vocore2_defconfig
index 9121e4194a63..ded3dce911d5 100644
--- a/arch/mips/configs/vocore2_defconfig
+++ b/arch/mips/configs/vocore2_defconfig
@@ -1,17 +1,9 @@
-CONFIG_RALINK=y
-CONFIG_SOC_MT7620=y
-CONFIG_DTB_VOCORE2=y
-CONFIG_CPU_MIPS32_R2=y
-# CONFIG_COMPACTION is not set
-CONFIG_HZ_100=y
-CONFIG_PREEMPT=y
-# CONFIG_SECCOMP is not set
-CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -30,8 +22,16 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_RALINK=y
+CONFIG_SOC_MT7620=y
+CONFIG_DTB_VOCORE2=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER=y
# CONFIG_SUSPEND is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -113,6 +113,10 @@ CONFIG_NLS_ISO8859_15=y
CONFIG_NLS_KOI8_R=y
CONFIG_NLS_KOI8_U=y
CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRC16=y
+CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_STRIP_ASM_SYMS=y
@@ -123,7 +127,3 @@ CONFIG_PANIC_TIMEOUT=10
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_STACKTRACE=y
# CONFIG_FTRACE is not set
-CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_LZO=y
-CONFIG_CRC16=y
-CONFIG_XZ_DEC=y
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index a84eac409c9c..891a5f77305d 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -1,18 +1,17 @@
-CONFIG_MACH_VR41XX=y
-CONFIG_IBM_WORKPAD=y
CONFIG_SYSVIPC=y
CONFIG_LOG_BUF_SHIFT=14
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
CONFIG_SLAB=y
+CONFIG_MACH_VR41XX=y
+CONFIG_IBM_WORKPAD=y
+CONFIG_PCCARD=y
+CONFIG_PCMCIA_VRC4171=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PCCARD=y
-CONFIG_PCMCIA_VRC4171=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -31,16 +30,14 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_IDECS=m
CONFIG_IDE_GENERIC=y
CONFIG_NETDEVICES=y
-CONFIG_NET_PCMCIA=y
-CONFIG_PCMCIA_3C589=m
CONFIG_PCMCIA_3C574=m
+CONFIG_PCMCIA_3C589=m
+CONFIG_PCMCIA_NMCLAN=m
CONFIG_PCMCIA_FMVJ18X=m
+CONFIG_PCMCIA_AXNET=m
CONFIG_PCMCIA_PCNET=m
-CONFIG_PCMCIA_NMCLAN=m
CONFIG_PCMCIA_SMC91C92=m
CONFIG_PCMCIA_XIRC2PS=m
-CONFIG_PCMCIA_AXNET=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@@ -62,7 +59,6 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_NFS_FS=m
-CONFIG_NFS_V3=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
CONFIG_CMDLINE_BOOL=y
diff --git a/arch/mips/configs/xway_defconfig b/arch/mips/configs/xway_defconfig
index fa750d501c11..c3cac29e8414 100644
--- a/arch/mips/configs/xway_defconfig
+++ b/arch/mips/configs/xway_defconfig
@@ -1,13 +1,3 @@
-CONFIG_LANTIQ=y
-CONFIG_PCI_LANTIQ=y
-CONFIG_XRX200_PHY_FW=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_MIPS_MT_SMP=y
-CONFIG_MIPS_VPE_LOADER=y
-# CONFIG_COMPACTION is not set
-CONFIG_NR_CPUS=2
-CONFIG_HZ_100=y
-# CONFIG_SECCOMP is not set
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -15,19 +5,28 @@ CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
+CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_LANTIQ=y
+CONFIG_PCI_LANTIQ=y
+CONFIG_XRX200_PHY_FW=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_MIPS_VPE_LOADER=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_PCI=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
-CONFIG_PCI=y
# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -63,7 +62,6 @@ CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
@@ -102,14 +100,12 @@ CONFIG_PPP_ASYNC=m
CONFIG_ISDN=y
CONFIG_INPUT=m
CONFIG_INPUT_POLLDEV=m
-# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_MISC=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
@@ -150,6 +146,9 @@ CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_SQUASHFS=y
# CONFIG_SQUASHFS_ZLIB is not set
CONFIG_SQUASHFS_XZ=y
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32_SARWATE=y
CONFIG_PRINTK_TIME=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_STRIP_ASM_SYMS=y
@@ -158,6 +157,3 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
CONFIG_CMDLINE_BOOL=y
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRC_ITU_T=m
-CONFIG_CRC32_SARWATE=y
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 9a81e72119da..f15d5db5dd67 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,4 +1,8 @@
# MIPS headers
+generated-y += syscall_table_32_o32.h
+generated-y += syscall_table_64_n32.h
+generated-y += syscall_table_64_n64.h
+generated-y += syscall_table_64_o32.h
generic-(CONFIG_GENERIC_CSUM) += checksum.h
generic-y += current.h
generic-y += device.h
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index d4ea7a5b60cf..e8fbfd419151 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -59,12 +59,13 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
int temp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
"1: ll %0, %1 # atomic_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
"\t" __scbeqz " %0, 1b \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else { \
@@ -85,13 +86,14 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
int temp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_" #op "_return \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
@@ -117,12 +119,13 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
int temp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
"1: ll %1, %2 # atomic_fetch_" #op " \n" \
" " #asm_op " %0, %1, %3 \n" \
" sc %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
" move %0, %1 \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
@@ -188,17 +191,19 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
int temp;
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
"1: ll %1, %2 # atomic_sub_if_positive\n"
- " .set mips0 \n"
+ " .set pop \n"
" subu %0, %1, %3 \n"
" move %1, %0 \n"
" bltz %0, 1f \n"
+ " .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
" sc %1, %2 \n"
"\t" __scbeqz " %1, 1b \n"
"1: \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i));
@@ -252,12 +257,13 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
"1: lld %0, %1 # atomic64_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" scd %0, %1 \n" \
"\t" __scbeqz " %0, 1b \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} else { \
@@ -278,13 +284,14 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
@@ -310,13 +317,14 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
long temp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
"1: lld %1, %2 # atomic64_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" scd %0, %2 \n" \
"\t" __scbeqz " %0, 1b \n" \
" move %0, %1 \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
: "=&r" (result), "=&r" (temp), \
"+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
@@ -382,6 +390,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_LEVEL" \n"
"1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
@@ -390,7 +399,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" scd %1, %2 \n"
"\t" __scbeqz " %1, 1b \n"
"1: \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter)
: "Ir" (i));
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index da1b8718861e..f2a840fb6a9a 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -58,12 +58,13 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
@@ -80,11 +81,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
@@ -110,12 +112,13 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (~(1UL << bit)));
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
@@ -132,11 +135,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
@@ -176,12 +180,13 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" beqzl %0, 1b \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit));
} else if (kernel_uses_llsc) {
@@ -190,11 +195,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
@@ -223,13 +229,14 @@ static inline int test_and_set_bit(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -239,11 +246,12 @@ static inline int test_and_set_bit(unsigned long nr,
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -277,13 +285,14 @@ static inline int test_and_set_bit_lock(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -293,11 +302,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -332,6 +342,7 @@ static inline int test_and_clear_bit(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
@@ -339,7 +350,7 @@ static inline int test_and_clear_bit(unsigned long nr,
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -365,12 +376,13 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -406,13 +418,14 @@ static inline int test_and_change_bit(unsigned long nr,
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1: " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
@@ -422,11 +435,12 @@ static inline int test_and_change_bit(unsigned long nr,
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 89e9fb7976fe..638de0c25249 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -47,9 +47,10 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
+ " .set push \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
"1: " ld " %0, %2 # __xchg_asm \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
" move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \
@@ -117,10 +118,11 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
+ " .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index cc2eb1b06050..f77e99f1722e 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -43,28 +43,16 @@
#undef barrier_before_unreachable
#define barrier_before_unreachable() asm volatile(".insn")
-#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
-#define GCC_IMM_ASM() "n"
-#define GCC_REG_ACCUM "$0"
+#if !defined(CONFIG_CC_IS_GCC) || \
+ (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
+# define GCC_OFF_SMALL_ASM() "ZC"
+#elif defined(CONFIG_CPU_MICROMIPS)
+# error "microMIPS compilation unsupported with GCC older than 4.9"
#else
-#define GCC_IMM_ASM() "rn"
-#define GCC_REG_ACCUM "accum"
+# define GCC_OFF_SMALL_ASM() "R"
#endif
#ifdef CONFIG_CPU_MIPSR6
-/* All MIPS R6 toolchains support the ZC constrain */
-#define GCC_OFF_SMALL_ASM() "ZC"
-#else
-#ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF_SMALL_ASM() "R"
-#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF_SMALL_ASM() "ZC"
-#else
-#error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif /* CONFIG_CPU_MICROMIPS */
-#endif /* CONFIG_CPU_MIPSR6 */
-
-#ifdef CONFIG_CPU_MIPSR6
#define MIPS_ISA_LEVEL "mips64r6"
#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
#define MIPS_ISA_LEVEL_RAW mips64r6
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 0edba3e75747..701e525641b8 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -15,6 +15,7 @@
#include <cpu-feature-overrides.h>
#define __ase(ase) (cpu_data[0].ases & (ase))
+#define __isa(isa) (cpu_data[0].isa_level & (isa))
#define __opt(opt) (cpu_data[0].options & (opt))
/*
@@ -53,6 +54,18 @@
#define __isa_lt_and_opt(isa, opt) ((MIPS_ISA_REV < (isa)) && __opt(opt))
/*
+ * Similarly allow for ISA level checks that take into account knowledge of the
+ * ISA targeted by the kernel build, provided by MIPS_ISA_REV.
+ */
+#define __isa_ge_and_flag(isa, flag) ((MIPS_ISA_REV >= (isa)) && __isa(flag))
+#define __isa_ge_or_flag(isa, flag) ((MIPS_ISA_REV >= (isa)) || __isa(flag))
+#define __isa_lt_and_flag(isa, flag) ((MIPS_ISA_REV < (isa)) && __isa(flag))
+#define __isa_range(ge, lt) \
+ ((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
+#define __isa_range_or_flag(ge, lt, flag) \
+ (__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
+
+/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
*/
@@ -115,10 +128,15 @@
#endif
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
#ifndef cpu_has_fpu
-#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
-#define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
+# ifdef CONFIG_MIPS_FP_SUPPORT
+# define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
+# define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
+# else
+# define cpu_has_fpu 0
+# define raw_cpu_has_fpu 0
+# endif
#else
-#define raw_cpu_has_fpu cpu_has_fpu
+# define raw_cpu_has_fpu cpu_has_fpu
#endif
#ifndef cpu_has_32fpr
#define cpu_has_32fpr __isa_ge_or_opt(1, MIPS_CPU_32FPR)
@@ -195,7 +213,9 @@
#endif
#ifndef cpu_has_mmips
-# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
+# if defined(__mips_micromips)
+# define cpu_has_mmips 1
+# elif defined(CONFIG_SYS_SUPPORTS_MICROMIPS)
# define cpu_has_mmips __opt(MIPS_CPU_MICROMIPS)
# else
# define cpu_has_mmips 0
@@ -246,48 +266,38 @@
#endif
#endif
-/* __builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r */
-#if !((defined(cpu_has_mips32r1) && cpu_has_mips32r1) || \
- (defined(cpu_has_mips32r2) && cpu_has_mips32r2) || \
- (defined(cpu_has_mips32r6) && cpu_has_mips32r6) || \
- (defined(cpu_has_mips64r1) && cpu_has_mips64r1) || \
- (defined(cpu_has_mips64r2) && cpu_has_mips64r2) || \
- (defined(cpu_has_mips64r6) && cpu_has_mips64r6))
-#define CPU_NO_EFFICIENT_FFS 1
-#endif
-
#ifndef cpu_has_mips_1
-# define cpu_has_mips_1 (!cpu_has_mips_r6)
+# define cpu_has_mips_1 (MIPS_ISA_REV < 6)
#endif
#ifndef cpu_has_mips_2
-# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
+# define cpu_has_mips_2 __isa_lt_and_flag(6, MIPS_CPU_ISA_II)
#endif
#ifndef cpu_has_mips_3
-# define cpu_has_mips_3 (cpu_data[0].isa_level & MIPS_CPU_ISA_III)
+# define cpu_has_mips_3 __isa_lt_and_flag(6, MIPS_CPU_ISA_III)
#endif
#ifndef cpu_has_mips_4
-# define cpu_has_mips_4 (cpu_data[0].isa_level & MIPS_CPU_ISA_IV)
+# define cpu_has_mips_4 __isa_lt_and_flag(6, MIPS_CPU_ISA_IV)
#endif
#ifndef cpu_has_mips_5
-# define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V)
+# define cpu_has_mips_5 __isa_lt_and_flag(6, MIPS_CPU_ISA_V)
#endif
#ifndef cpu_has_mips32r1
-# define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1)
+# define cpu_has_mips32r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M32R1)
#endif
#ifndef cpu_has_mips32r2
-# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
+# define cpu_has_mips32r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M32R2)
#endif
#ifndef cpu_has_mips32r6
-# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+# define cpu_has_mips32r6 __isa_ge_or_flag(6, MIPS_CPU_ISA_M32R6)
#endif
#ifndef cpu_has_mips64r1
-# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
+# define cpu_has_mips64r1 __isa_range_or_flag(1, 6, MIPS_CPU_ISA_M64R1)
#endif
#ifndef cpu_has_mips64r2
-# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
+# define cpu_has_mips64r2 __isa_range_or_flag(2, 6, MIPS_CPU_ISA_M64R2)
#endif
#ifndef cpu_has_mips64r6
-# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+# define cpu_has_mips64r6 __isa_ge_and_flag(6, MIPS_CPU_ISA_M64R6)
#endif
/*
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a41059d47d31..ed7ffe4e63a3 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -50,7 +50,7 @@ struct guest_info {
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
struct cpuinfo_mips {
- unsigned long asid_cache;
+ u64 asid_cache;
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
unsigned long asid_mask;
#endif
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index dacbdb84516a..532b49b1dbb3 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -248,8 +248,9 @@
#define PRID_REV_LOONGSON3A_R1 0x0005
#define PRID_REV_LOONGSON3B_R1 0x0006
#define PRID_REV_LOONGSON3B_R2 0x0007
-#define PRID_REV_LOONGSON3A_R2 0x0008
+#define PRID_REV_LOONGSON3A_R2_0 0x0008
#define PRID_REV_LOONGSON3A_R3_0 0x0009
+#define PRID_REV_LOONGSON3A_R2_1 0x000c
#define PRID_REV_LOONGSON3A_R3_1 0x000d
/*
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h
index b47a97527673..6d5b781ad518 100644
--- a/arch/mips/include/asm/dsemul.h
+++ b/arch/mips/include/asm/dsemul.h
@@ -52,7 +52,14 @@ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
*
* Return: True if an emulation frame was returned from, else false.
*/
+#ifdef CONFIG_MIPS_FP_SUPPORT
extern bool do_dsemulret(struct pt_regs *xcp);
+#else
+static inline bool do_dsemulret(struct pt_regs *xcp)
+{
+ return false;
+}
+#endif
/**
* dsemul_thread_cleanup() - Cleanup thread 'emulation' frame
@@ -63,8 +70,14 @@ extern bool do_dsemulret(struct pt_regs *xcp);
*
* Return: True if a frame was freed, else false.
*/
+#ifdef CONFIG_MIPS_FP_SUPPORT
extern bool dsemul_thread_cleanup(struct task_struct *tsk);
-
+#else
+static inline bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+ return false;
+}
+#endif
/**
* dsemul_thread_rollback() - Rollback from an 'emulation' frame
* @regs: User thread register context.
@@ -77,7 +90,14 @@ extern bool dsemul_thread_cleanup(struct task_struct *tsk);
*
* Return: True if a frame was exited, else false.
*/
+#ifdef CONFIG_MIPS_FP_SUPPORT
extern bool dsemul_thread_rollback(struct pt_regs *regs);
+#else
+static inline bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+ return false;
+}
+#endif
/**
* dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state
@@ -87,6 +107,13 @@ extern bool dsemul_thread_rollback(struct pt_regs *regs);
* for delay slot 'emulation' book-keeping is freed. This is to be called
* before @mm is freed in order to avoid memory leaks.
*/
+#ifdef CONFIG_MIPS_FP_SUPPORT
extern void dsemul_mm_cleanup(struct mm_struct *mm);
+#else
+static inline void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+ /* no-op */
+}
+#endif
#endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index fc467767329b..c5d147744423 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -21,12 +21,13 @@ static inline void edac_atomic_scrub(void *va, u32 size)
*/
__asm__ __volatile__ (
+ " .set push \n"
" .set mips2 \n"
"1: ll %0, %1 # edac_atomic_scrub \n"
" addu %0, $0 \n"
" sc %0, %1 \n"
" beqz %0, 1b \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
: GCC_OFF_SMALL_ASM() (*virt_addr));
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 0eb1a75be105..f8f44b1a6cbb 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -481,6 +481,8 @@ struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
struct arch_elf_state {
int nan_2008;
int fp_abi;
@@ -497,19 +499,35 @@ struct arch_elf_state {
.overall_fp_mode = -1, \
}
-/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
-extern bool mips_use_nan_legacy;
-extern bool mips_use_nan_2008;
-
extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
bool is_interp, struct arch_elf_state *state);
extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
struct arch_elf_state *state);
+/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
+extern bool mips_use_nan_legacy;
+extern bool mips_use_nan_2008;
+
extern void mips_set_personality_nan(struct arch_elf_state *state);
extern void mips_set_personality_fp(struct arch_elf_state *state);
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+struct arch_elf_state;
+
+static inline void mips_set_personality_nan(struct arch_elf_state *state)
+{
+ /* no-op */
+}
+
+static inline void mips_set_personality_fp(struct arch_elf_state *state)
+{
+ /* no-op */
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
#define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk)
extern int mips_elf_read_implies_exec(void *elf_ex, int exstack);
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index a2813fe381cf..42bc2bbbd3d7 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -30,13 +30,6 @@
#include <asm/mips_mt.h>
#endif
-struct sigcontext;
-struct sigcontext32;
-
-extern void _init_fpu(unsigned int);
-extern void _save_fp(struct task_struct *);
-extern void _restore_fp(struct task_struct *);
-
/*
* This enum specifies a mode in which we want the FPU to operate, for cores
* which implement the Status.FR bit. Note that the bottom bit of the value
@@ -51,6 +44,11 @@ enum fpu_mode {
#define FPU_FR_MASK 0x1
};
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+extern void _save_fp(struct task_struct *);
+extern void _restore_fp(struct task_struct *);
+
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
@@ -198,42 +196,36 @@ static inline void lose_fpu(int save)
preempt_enable();
}
-static inline int init_fpu(void)
+/**
+ * init_fp_ctx() - Initialize task FP context
+ * @target: The task whose FP context should be initialized.
+ *
+ * Initializes the FP context of the target task to sane default values if that
+ * target task does not already have valid FP context. Once the context has
+ * been initialized, the task will be marked as having used FP & thus having
+ * valid FP context.
+ *
+ * Returns: true if context is initialized, else false.
+ */
+static inline bool init_fp_ctx(struct task_struct *target)
{
- unsigned int fcr31 = current->thread.fpu.fcr31;
- int ret = 0;
+ /* If FP has been used then the target already has context */
+ if (tsk_used_math(target))
+ return false;
- if (cpu_has_fpu) {
- unsigned int config5;
-
- ret = __own_fpu();
- if (ret)
- return ret;
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
- if (!cpu_has_fre) {
- _init_fpu(fcr31);
+ /* FCSR has been preset by `mips_set_personality_nan'. */
- return 0;
- }
-
- /*
- * Ensure FRE is clear whilst running _init_fpu, since
- * single precision FP instructions are used. If FRE
- * was set then we'll just end up initialising all 32
- * 64b registers.
- */
- config5 = clear_c0_config5(MIPS_CONF5_FRE);
- enable_fpu_hazard();
+ /*
+ * Record that the target has "used" math, such that the context
+ * just initialised, and any modifications made by the caller,
+ * aren't discarded.
+ */
+ set_stopped_child_used_math(target);
- _init_fpu(fcr31);
-
- /* Restore FRE */
- write_c0_config5(config5);
- enable_fpu_hazard();
- } else
- fpu_emulator_init_fpu();
-
- return ret;
+ return true;
}
static inline void save_fp(struct task_struct *tsk)
@@ -260,4 +252,81 @@ static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
return tsk->thread.fpu.fpr;
}
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+/*
+ * When FP support is disabled we provide only a minimal set of stub functions
+ * to avoid callers needing to care too much about CONFIG_MIPS_FP_SUPPORT.
+ */
+
+static inline int __enable_fpu(enum fpu_mode mode)
+{
+ return SIGILL;
+}
+
+static inline void __disable_fpu(void)
+{
+ /* no-op */
+}
+
+
+static inline int is_fpu_owner(void)
+{
+ return 0;
+}
+
+static inline void clear_fpu_owner(void)
+{
+ /* no-op */
+}
+
+static inline int own_fpu_inatomic(int restore)
+{
+ return SIGILL;
+}
+
+static inline int own_fpu(int restore)
+{
+ return SIGILL;
+}
+
+static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
+{
+ /* no-op */
+}
+
+static inline void lose_fpu(int save)
+{
+ /* no-op */
+}
+
+static inline bool init_fp_ctx(struct task_struct *target)
+{
+ return false;
+}
+
+/*
+ * The following functions should only be called in paths where we know that FP
+ * support is enabled, typically a path where own_fpu() or __enable_fpu() have
+ * returned successfully. When CONFIG_MIPS_FP_SUPPORT=n it is known at compile
+ * time that this should never happen, so calls to these functions should be
+ * optimized away & never actually be emitted.
+ */
+
+extern void save_fp(struct task_struct *tsk)
+ __compiletime_error("save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern void _save_fp(struct task_struct *)
+ __compiletime_error("_save_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern void restore_fp(struct task_struct *tsk)
+ __compiletime_error("restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern void _restore_fp(struct task_struct *)
+ __compiletime_error("_restore_fp() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+extern union fpureg *get_fpu_regs(struct task_struct *tsk)
+ __compiletime_error("get_fpu_regs() should not be called when CONFIG_MIPS_FP_SUPPORT=n");
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
#endif /* _ASM_FPU_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index b36097d3cbf4..7e233055f7b4 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -188,17 +188,6 @@ int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
-#define SIGNALLING_NAN 0x7ff800007ff80000LL
-
-static inline void fpu_emulator_init_fpu(void)
-{
- struct task_struct *t = current;
- int i;
-
- for (i = 0; i < 32; i++)
- set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
-}
-
/*
* Mask the FCSR Cause bits according to the Enable bits, observing
* that Unimplemented is always enabled.
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index a9e61ea54ca9..8eff134b3a43 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -24,9 +24,10 @@
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
+ " .set push \n" \
" .set arch=r4000 \n" \
"1: ll %1, %4 # __futex_atomic_op \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
" " insn " \n" \
" .set arch=r4000 \n" \
"2: sc $1, %2 \n" \
@@ -35,7 +36,6 @@
"3: \n" \
" .insn \n" \
" .set pop \n" \
- " .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %6 \n" \
" j 3b \n" \
@@ -53,9 +53,10 @@
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
+ " .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
- " .set mips0 \n" \
+ " .set pop \n" \
" " insn " \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"2: "user_sc("$1", "%2")" \n" \
@@ -64,7 +65,6 @@
"3: \n" \
" .insn \n" \
" .set pop \n" \
- " .set mips0 \n" \
" .section .fixup,\"ax\" \n" \
"4: li %0, %6 \n" \
" j 3b \n" \
@@ -137,10 +137,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
+ " .set push \n"
" .set arch=r4000 \n"
"1: ll %1, %3 \n"
" bne %1, %z4, 3f \n"
- " .set mips0 \n"
+ " .set pop \n"
" move $1, %z5 \n"
" .set arch=r4000 \n"
"2: sc $1, %2 \n"
@@ -166,10 +167,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
"1: "user_ll("%1", "%3")" \n"
" bne %1, %z4, 3f \n"
- " .set mips0 \n"
+ " .set pop \n"
" move $1, %z5 \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
"2: "user_sc("$1", "%2")" \n"
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index e0fecf206f2c..0fa27446869a 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -66,10 +66,11 @@ do { \
unsigned long tmp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set "MIPS_ISA_LEVEL" \n" \
" dla %0, 1f \n" \
" jr.hb %0 \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
"1: \n" \
: "=r" (tmp)); \
} while (0)
@@ -141,10 +142,11 @@ do { \
unsigned long tmp; \
\
__asm__ __volatile__( \
+ " .set push \n" \
" .set mips64r2 \n" \
" dla %0, 1f \n" \
" jr.hb %0 \n" \
- " .set mips0 \n" \
+ " .set pop \n" \
"1: \n" \
: "=r" (tmp)); \
} while (0)
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 266257d56fb6..845fbbc7a2e3 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -218,6 +218,18 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
}
/*
+ * ioremap_prot - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
+
+ * ioremap_prot gives the caller control over cache coherency attributes (CCA)
+ */
+static inline void __iomem *ioremap_prot(phys_addr_t offset,
+ unsigned long size, unsigned long prot_val) {
+ return __ioremap_mode(offset, size, prot_val & _CACHE_MASK);
+}
+
+/*
* ioremap - map bus memory into CPU space
* @offset: bus address of the memory
* @size: size of the resource to map
@@ -342,13 +354,14 @@ static inline void pfx##write##bwlq(type val, \
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set arch=r4000" "\t\t# __writeq""\n\t" \
+ ".set push" "\t\t# __writeq""\n\t" \
+ ".set arch=r4000" "\n\t" \
"dsll32 %L0, %L0, 0" "\n\t" \
"dsrl32 %L0, %L0, 0" "\n\t" \
"dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \
"sd %L0, %2" "\n\t" \
- ".set mips0" "\n" \
+ ".set pop" "\n" \
: "=r" (__tmp) \
: "0" (__val), "m" (*__mem)); \
if (irq) \
@@ -375,11 +388,12 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set arch=r4000" "\t\t# __readq" "\n\t" \
+ ".set push" "\t\t# __readq" "\n\t" \
+ ".set arch=r4000" "\n\t" \
"ld %L0, %1" "\n\t" \
"dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
- ".set mips0" "\n" \
+ ".set pop" "\n" \
: "=r" (__val) \
: "m" (*__mem)); \
if (irq) \
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 2c1c53d12179..d2abd98471e8 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -411,11 +411,12 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+m" (*reg)
: "r" (val));
} while (unlikely(!temp));
@@ -427,11 +428,12 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+m" (*reg)
: "r" (~val));
} while (unlikely(!temp));
@@ -444,12 +446,13 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" or %0, %3 \n"
" " __SC "%0, %1 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (temp), "+m" (*reg)
: "r" (~change), "r" (val & change));
} while (unlikely(!temp));
@@ -933,7 +936,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
#define KVM_ARCH_WANT_MMU_NOTIFIER
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index ac8264eca1e9..02783e141c32 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -35,13 +35,14 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
" beqzl %0, 1b \n"
" addu %0, %1, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
: "memory");
@@ -49,13 +50,14 @@ static __inline__ long local_add_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
"1:" __LL "%1, %2 # local_add_return \n"
" addu %0, %1, %3 \n"
__SC "%0, %2 \n"
" beqz %0, 1b \n"
" addu %0, %1, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
: "memory");
@@ -80,13 +82,14 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set arch=r4000 \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
" beqzl %0, 1b \n"
" subu %0, %1, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
: "memory");
@@ -94,13 +97,14 @@ static __inline__ long local_sub_return(long i, local_t * l)
unsigned long temp;
__asm__ __volatile__(
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
"1:" __LL "%1, %2 # local_sub_return \n"
" subu %0, %1, %3 \n"
__SC "%0, %2 \n"
" beqz %0, 1b \n"
" subu %0, %1, %3 \n"
- " .set mips0 \n"
+ " .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
: "memory");
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index cbac603ced19..b5e288a12dfe 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -31,7 +31,7 @@
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
+ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
bnez t0, 1f
mfc0 t0, CP0_CONFIG6
or t0, 0x100
@@ -60,7 +60,7 @@
/* Enable STFill Buffer */
mfc0 t0, CP0_PRID
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
+ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2_0)
bnez t0, 1f
mfc0 t0, CP0_CONFIG6
or t0, 0x100
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index c9f7e231e66b..59c8b11c090e 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -21,6 +21,7 @@
#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
#define LEVELS_PER_SLICE 128
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h
index aac8ce8902e7..5dfd4d66d6fc 100644
--- a/arch/mips/include/asm/mach-rc32434/rb.h
+++ b/arch/mips/include/asm/mach-rc32434/rb.h
@@ -71,12 +71,6 @@ struct korina_device {
struct net_device *dev;
};
-struct cf_device {
- int gpio_pin;
- void *dev;
- struct gendisk *gd;
-};
-
struct mpmc_device {
unsigned char state;
spinlock_t lock;
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 212336b7c0f4..be4cf9d477be 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -255,12 +255,12 @@ static inline unsigned int dmt(void)
static inline void __raw_emt(void)
{
__asm__ __volatile__(
+ " .set push \n"
" .set noreorder \n"
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
- " .set mips0 \n"
- " .set reorder");
+ " .set pop");
}
/* enable multi-threaded execution if previous suggested it should be.
@@ -277,9 +277,10 @@ static inline void emt(int previous)
static inline void ehb(void)
{
__asm__ __volatile__(
+ " .set push \n"
" .set mips32r2 \n"
" ehb \n"
- " .set mips0 \n");
+ " .set pop \n");
}
#define mftc0(rt,sel) \
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 341a02c92985..402b80af91aa 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -1345,9 +1345,10 @@ do { \
: "=r" (__res)); \
else \
__asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips32\n\t" \
"mfc0\t%0, " #source ", " #sel "\n\t" \
- ".set\tmips0\n\t" \
+ ".set\tpop\n\t" \
: "=r" (__res)); \
__res; \
})
@@ -1358,15 +1359,17 @@ do { \
__res = __read_64bit_c0_split(source, sel, vol); \
else if (sel == 0) \
__asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips3\n\t" \
"dmfc0\t%0, " #source "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "=r" (__res)); \
else \
__asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dmfc0\t%0, " #source ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "=r" (__res)); \
__res; \
})
@@ -1391,9 +1394,10 @@ do { \
: : "Jr" ((unsigned int)(value))); \
else \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips32\n\t" \
"mtc0\t%z0, " #register ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: : "Jr" ((unsigned int)(value))); \
} while (0)
@@ -1403,15 +1407,17 @@ do { \
__write_64bit_c0_split(register, sel, value); \
else if (sel == 0) \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips3\n\t" \
"dmtc0\t%z0, " #register "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: : "Jr" (value)); \
else \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dmtc0\t%z0, " #register ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: : "Jr" (value)); \
} while (0)
@@ -1463,19 +1469,21 @@ do { \
local_irq_save(__flags); \
if (sel == 0) \
__asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dmfc0\t%L0, " #source "\n\t" \
"dsra\t%M0, %L0, 32\n\t" \
"sll\t%L0, %L0, 0\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "=r" (__val)); \
else \
__asm__ vol( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dmfc0\t%L0, " #source ", " #sel "\n\t" \
"dsra\t%M0, %L0, 32\n\t" \
"sll\t%L0, %L0, 0\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "=r" (__val)); \
local_irq_restore(__flags); \
\
@@ -1498,23 +1506,25 @@ do { \
: "+r" (__tmp)); \
else if (sel == 0) \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "+r" (__tmp)); \
else \
__asm__ __volatile__( \
+ ".set\tpush\n\t" \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc0\t%L0, " #source ", " #sel "\n\t" \
- ".set\tmips0" \
+ ".set\tpop" \
: "+r" (__tmp)); \
local_irq_restore(__flags); \
} while (0)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 0740be7d5d4a..88a108ce62c1 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,9 +7,8 @@
#include <linux/wait.h>
typedef struct {
- unsigned long asid[NR_CPUS];
+ u64 asid[NR_CPUS];
void *vdso;
- atomic_t fp_mode_switching;
/* lock to be held whilst modifying fp_bd_emupage_allocmap */
spinlock_t bd_emupage_lock;
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 94414561de0e..a589585be21b 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
-static unsigned long asid_version_mask(unsigned int cpu)
+static inline u64 asid_version_mask(unsigned int cpu)
{
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
- return ~(asid_mask | (asid_mask - 1));
+ return ~(u64)(asid_mask | (asid_mask - 1));
}
-static unsigned long asid_first_version(unsigned int cpu)
+static inline u64 asid_first_version(unsigned int cpu)
{
return ~asid_version_mask(cpu) + 1;
}
@@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
- unsigned long asid = asid_cache(cpu);
+ u64 asid = asid_cache(cpu);
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
- if (!asid) /* fix version if needed */
- asid = asid_first_version(cpu);
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index f085fba41da5..b826b8473e95 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -7,7 +7,18 @@
#define _ASM_MMZONE_H_
#include <asm/page.h>
-#include <mmzone.h>
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+# include <mmzone.h>
+#endif
+
+#ifndef pa_to_nid
+#define pa_to_nid(addr) 0
+#endif
+
+#ifndef nid_to_addrbase
+#define nid_to_addrbase(nid) 0
+#endif
#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/mips/include/asm/octeon/cvmx-agl-defs.h b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
index 542ee09510b3..3635ab384447 100644
--- a/arch/mips/include/asm/octeon/cvmx-agl-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-agl-defs.h
@@ -171,7 +171,6 @@ union cvmx_agl_gmx_bad_reg {
uint64_t reserved_38_63:26;
#endif
} cn52xx;
- struct cvmx_agl_gmx_bad_reg_cn52xx cn52xxp1;
struct cvmx_agl_gmx_bad_reg_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
@@ -199,13 +198,6 @@ union cvmx_agl_gmx_bad_reg {
uint64_t reserved_35_63:29;
#endif
} cn56xx;
- struct cvmx_agl_gmx_bad_reg_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_bad_reg_s cn61xx;
- struct cvmx_agl_gmx_bad_reg_s cn63xx;
- struct cvmx_agl_gmx_bad_reg_s cn63xxp1;
- struct cvmx_agl_gmx_bad_reg_s cn66xx;
- struct cvmx_agl_gmx_bad_reg_s cn68xx;
- struct cvmx_agl_gmx_bad_reg_s cn68xxp1;
};
union cvmx_agl_gmx_bist {
@@ -228,15 +220,6 @@ union cvmx_agl_gmx_bist {
uint64_t reserved_10_63:54;
#endif
} cn52xx;
- struct cvmx_agl_gmx_bist_cn52xx cn52xxp1;
- struct cvmx_agl_gmx_bist_cn52xx cn56xx;
- struct cvmx_agl_gmx_bist_cn52xx cn56xxp1;
- struct cvmx_agl_gmx_bist_s cn61xx;
- struct cvmx_agl_gmx_bist_s cn63xx;
- struct cvmx_agl_gmx_bist_s cn63xxp1;
- struct cvmx_agl_gmx_bist_s cn66xx;
- struct cvmx_agl_gmx_bist_s cn68xx;
- struct cvmx_agl_gmx_bist_s cn68xxp1;
};
union cvmx_agl_gmx_drv_ctl {
@@ -270,8 +253,6 @@ union cvmx_agl_gmx_drv_ctl {
uint64_t reserved_49_63:15;
#endif
} s;
- struct cvmx_agl_gmx_drv_ctl_s cn52xx;
- struct cvmx_agl_gmx_drv_ctl_s cn52xxp1;
struct cvmx_agl_gmx_drv_ctl_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
@@ -289,7 +270,6 @@ union cvmx_agl_gmx_drv_ctl {
uint64_t reserved_17_63:47;
#endif
} cn56xx;
- struct cvmx_agl_gmx_drv_ctl_cn56xx cn56xxp1;
};
union cvmx_agl_gmx_inf_mode {
@@ -305,10 +285,6 @@ union cvmx_agl_gmx_inf_mode {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_agl_gmx_inf_mode_s cn52xx;
- struct cvmx_agl_gmx_inf_mode_s cn52xxp1;
- struct cvmx_agl_gmx_inf_mode_s cn56xx;
- struct cvmx_agl_gmx_inf_mode_s cn56xxp1;
};
union cvmx_agl_gmx_prtx_cfg {
@@ -363,15 +339,6 @@ union cvmx_agl_gmx_prtx_cfg {
uint64_t reserved_6_63:58;
#endif
} cn52xx;
- struct cvmx_agl_gmx_prtx_cfg_cn52xx cn52xxp1;
- struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xx;
- struct cvmx_agl_gmx_prtx_cfg_cn52xx cn56xxp1;
- struct cvmx_agl_gmx_prtx_cfg_s cn61xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn63xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn63xxp1;
- struct cvmx_agl_gmx_prtx_cfg_s cn66xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn68xx;
- struct cvmx_agl_gmx_prtx_cfg_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam0 {
@@ -383,16 +350,6 @@ union cvmx_agl_gmx_rxx_adr_cam0 {
uint64_t adr:64;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam0_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam1 {
@@ -404,16 +361,6 @@ union cvmx_agl_gmx_rxx_adr_cam1 {
uint64_t adr:64;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam1_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam2 {
@@ -425,16 +372,6 @@ union cvmx_agl_gmx_rxx_adr_cam2 {
uint64_t adr:64;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam2_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam3 {
@@ -446,16 +383,6 @@ union cvmx_agl_gmx_rxx_adr_cam3 {
uint64_t adr:64;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam3_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam4 {
@@ -467,16 +394,6 @@ union cvmx_agl_gmx_rxx_adr_cam4 {
uint64_t adr:64;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam4_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam5 {
@@ -488,16 +405,6 @@ union cvmx_agl_gmx_rxx_adr_cam5 {
uint64_t adr:64;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam5_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_cam_en {
@@ -511,16 +418,6 @@ union cvmx_agl_gmx_rxx_adr_cam_en {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_cam_en_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_adr_ctl {
@@ -538,16 +435,6 @@ union cvmx_agl_gmx_rxx_adr_ctl {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xx;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xx;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn61xx;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xx;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn66xx;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xx;
- struct cvmx_agl_gmx_rxx_adr_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_decision {
@@ -561,16 +448,6 @@ union cvmx_agl_gmx_rxx_decision {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_agl_gmx_rxx_decision_s cn52xx;
- struct cvmx_agl_gmx_rxx_decision_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_decision_s cn56xx;
- struct cvmx_agl_gmx_rxx_decision_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_decision_s cn61xx;
- struct cvmx_agl_gmx_rxx_decision_s cn63xx;
- struct cvmx_agl_gmx_rxx_decision_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_decision_s cn66xx;
- struct cvmx_agl_gmx_rxx_decision_s cn68xx;
- struct cvmx_agl_gmx_rxx_decision_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_frm_chk {
@@ -627,15 +504,6 @@ union cvmx_agl_gmx_rxx_frm_chk {
uint64_t reserved_9_63:55;
#endif
} cn52xx;
- struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xx;
- struct cvmx_agl_gmx_rxx_frm_chk_cn52xx cn56xxp1;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn61xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn63xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn66xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn68xx;
- struct cvmx_agl_gmx_rxx_frm_chk_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_frm_ctl {
@@ -700,15 +568,6 @@ union cvmx_agl_gmx_rxx_frm_ctl {
uint64_t reserved_10_63:54;
#endif
} cn52xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_cn52xx cn56xxp1;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn61xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn66xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xx;
- struct cvmx_agl_gmx_rxx_frm_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_frm_max {
@@ -722,16 +581,6 @@ union cvmx_agl_gmx_rxx_frm_max {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_rxx_frm_max_s cn52xx;
- struct cvmx_agl_gmx_rxx_frm_max_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_max_s cn56xx;
- struct cvmx_agl_gmx_rxx_frm_max_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_frm_max_s cn61xx;
- struct cvmx_agl_gmx_rxx_frm_max_s cn63xx;
- struct cvmx_agl_gmx_rxx_frm_max_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_frm_max_s cn66xx;
- struct cvmx_agl_gmx_rxx_frm_max_s cn68xx;
- struct cvmx_agl_gmx_rxx_frm_max_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_frm_min {
@@ -745,16 +594,6 @@ union cvmx_agl_gmx_rxx_frm_min {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_rxx_frm_min_s cn52xx;
- struct cvmx_agl_gmx_rxx_frm_min_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_frm_min_s cn56xx;
- struct cvmx_agl_gmx_rxx_frm_min_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_frm_min_s cn61xx;
- struct cvmx_agl_gmx_rxx_frm_min_s cn63xx;
- struct cvmx_agl_gmx_rxx_frm_min_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_frm_min_s cn66xx;
- struct cvmx_agl_gmx_rxx_frm_min_s cn68xx;
- struct cvmx_agl_gmx_rxx_frm_min_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_ifg {
@@ -768,16 +607,6 @@ union cvmx_agl_gmx_rxx_ifg {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_agl_gmx_rxx_ifg_s cn52xx;
- struct cvmx_agl_gmx_rxx_ifg_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_ifg_s cn56xx;
- struct cvmx_agl_gmx_rxx_ifg_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_ifg_s cn61xx;
- struct cvmx_agl_gmx_rxx_ifg_s cn63xx;
- struct cvmx_agl_gmx_rxx_ifg_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_ifg_s cn66xx;
- struct cvmx_agl_gmx_rxx_ifg_s cn68xx;
- struct cvmx_agl_gmx_rxx_ifg_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_int_en {
@@ -872,15 +701,6 @@ union cvmx_agl_gmx_rxx_int_en {
uint64_t reserved_20_63:44;
#endif
} cn52xx;
- struct cvmx_agl_gmx_rxx_int_en_cn52xx cn52xxp1;
- struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xx;
- struct cvmx_agl_gmx_rxx_int_en_cn52xx cn56xxp1;
- struct cvmx_agl_gmx_rxx_int_en_s cn61xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn63xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_int_en_s cn66xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn68xx;
- struct cvmx_agl_gmx_rxx_int_en_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_int_reg {
@@ -975,15 +795,6 @@ union cvmx_agl_gmx_rxx_int_reg {
uint64_t reserved_20_63:44;
#endif
} cn52xx;
- struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn52xxp1;
- struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xx;
- struct cvmx_agl_gmx_rxx_int_reg_cn52xx cn56xxp1;
- struct cvmx_agl_gmx_rxx_int_reg_s cn61xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn63xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_int_reg_s cn66xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn68xx;
- struct cvmx_agl_gmx_rxx_int_reg_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_jabber {
@@ -997,16 +808,6 @@ union cvmx_agl_gmx_rxx_jabber {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_rxx_jabber_s cn52xx;
- struct cvmx_agl_gmx_rxx_jabber_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_jabber_s cn56xx;
- struct cvmx_agl_gmx_rxx_jabber_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_jabber_s cn61xx;
- struct cvmx_agl_gmx_rxx_jabber_s cn63xx;
- struct cvmx_agl_gmx_rxx_jabber_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_jabber_s cn66xx;
- struct cvmx_agl_gmx_rxx_jabber_s cn68xx;
- struct cvmx_agl_gmx_rxx_jabber_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_pause_drop_time {
@@ -1020,16 +821,6 @@ union cvmx_agl_gmx_rxx_pause_drop_time {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xx;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xx;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn61xx;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xx;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn66xx;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xx;
- struct cvmx_agl_gmx_rxx_pause_drop_time_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_rx_inbnd {
@@ -1047,12 +838,6 @@ union cvmx_agl_gmx_rxx_rx_inbnd {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_agl_gmx_rxx_rx_inbnd_s cn61xx;
- struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xx;
- struct cvmx_agl_gmx_rxx_rx_inbnd_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_rx_inbnd_s cn66xx;
- struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xx;
- struct cvmx_agl_gmx_rxx_rx_inbnd_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_ctl {
@@ -1066,16 +851,6 @@ union cvmx_agl_gmx_rxx_stats_ctl {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs {
@@ -1089,16 +864,6 @@ union cvmx_agl_gmx_rxx_stats_octs {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_octs_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_ctl {
@@ -1112,16 +877,6 @@ union cvmx_agl_gmx_rxx_stats_octs_ctl {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_octs_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_dmac {
@@ -1135,16 +890,6 @@ union cvmx_agl_gmx_rxx_stats_octs_dmac {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_octs_dmac_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_octs_drp {
@@ -1158,16 +903,6 @@ union cvmx_agl_gmx_rxx_stats_octs_drp {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_octs_drp_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts {
@@ -1181,16 +916,6 @@ union cvmx_agl_gmx_rxx_stats_pkts {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_bad {
@@ -1204,16 +929,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_bad {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_bad_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_ctl {
@@ -1227,16 +942,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_ctl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_dmac {
@@ -1250,16 +955,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_dmac {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_dmac_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_stats_pkts_drp {
@@ -1273,16 +968,6 @@ union cvmx_agl_gmx_rxx_stats_pkts_drp {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn61xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn66xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xx;
- struct cvmx_agl_gmx_rxx_stats_pkts_drp_s cn68xxp1;
};
union cvmx_agl_gmx_rxx_udd_skp {
@@ -1300,16 +985,6 @@ union cvmx_agl_gmx_rxx_udd_skp {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn52xx;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn52xxp1;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn56xx;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn56xxp1;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn61xx;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn63xx;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn63xxp1;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn66xx;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn68xx;
- struct cvmx_agl_gmx_rxx_udd_skp_s cn68xxp1;
};
union cvmx_agl_gmx_rx_bp_dropx {
@@ -1323,16 +998,6 @@ union cvmx_agl_gmx_rx_bp_dropx {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn52xx;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn52xxp1;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn56xx;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn56xxp1;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn61xx;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn63xx;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn63xxp1;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn66xx;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn68xx;
- struct cvmx_agl_gmx_rx_bp_dropx_s cn68xxp1;
};
union cvmx_agl_gmx_rx_bp_offx {
@@ -1346,16 +1011,6 @@ union cvmx_agl_gmx_rx_bp_offx {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_agl_gmx_rx_bp_offx_s cn52xx;
- struct cvmx_agl_gmx_rx_bp_offx_s cn52xxp1;
- struct cvmx_agl_gmx_rx_bp_offx_s cn56xx;
- struct cvmx_agl_gmx_rx_bp_offx_s cn56xxp1;
- struct cvmx_agl_gmx_rx_bp_offx_s cn61xx;
- struct cvmx_agl_gmx_rx_bp_offx_s cn63xx;
- struct cvmx_agl_gmx_rx_bp_offx_s cn63xxp1;
- struct cvmx_agl_gmx_rx_bp_offx_s cn66xx;
- struct cvmx_agl_gmx_rx_bp_offx_s cn68xx;
- struct cvmx_agl_gmx_rx_bp_offx_s cn68xxp1;
};
union cvmx_agl_gmx_rx_bp_onx {
@@ -1369,16 +1024,6 @@ union cvmx_agl_gmx_rx_bp_onx {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_agl_gmx_rx_bp_onx_s cn52xx;
- struct cvmx_agl_gmx_rx_bp_onx_s cn52xxp1;
- struct cvmx_agl_gmx_rx_bp_onx_s cn56xx;
- struct cvmx_agl_gmx_rx_bp_onx_s cn56xxp1;
- struct cvmx_agl_gmx_rx_bp_onx_s cn61xx;
- struct cvmx_agl_gmx_rx_bp_onx_s cn63xx;
- struct cvmx_agl_gmx_rx_bp_onx_s cn63xxp1;
- struct cvmx_agl_gmx_rx_bp_onx_s cn66xx;
- struct cvmx_agl_gmx_rx_bp_onx_s cn68xx;
- struct cvmx_agl_gmx_rx_bp_onx_s cn68xxp1;
};
union cvmx_agl_gmx_rx_prt_info {
@@ -1396,8 +1041,6 @@ union cvmx_agl_gmx_rx_prt_info {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_agl_gmx_rx_prt_info_s cn52xx;
- struct cvmx_agl_gmx_rx_prt_info_s cn52xxp1;
struct cvmx_agl_gmx_rx_prt_info_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
@@ -1411,13 +1054,6 @@ union cvmx_agl_gmx_rx_prt_info {
uint64_t reserved_17_63:47;
#endif
} cn56xx;
- struct cvmx_agl_gmx_rx_prt_info_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_rx_prt_info_s cn61xx;
- struct cvmx_agl_gmx_rx_prt_info_s cn63xx;
- struct cvmx_agl_gmx_rx_prt_info_s cn63xxp1;
- struct cvmx_agl_gmx_rx_prt_info_s cn66xx;
- struct cvmx_agl_gmx_rx_prt_info_s cn68xx;
- struct cvmx_agl_gmx_rx_prt_info_s cn68xxp1;
};
union cvmx_agl_gmx_rx_tx_status {
@@ -1435,8 +1071,6 @@ union cvmx_agl_gmx_rx_tx_status {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_agl_gmx_rx_tx_status_s cn52xx;
- struct cvmx_agl_gmx_rx_tx_status_s cn52xxp1;
struct cvmx_agl_gmx_rx_tx_status_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_5_63:59;
@@ -1450,13 +1084,6 @@ union cvmx_agl_gmx_rx_tx_status {
uint64_t reserved_5_63:59;
#endif
} cn56xx;
- struct cvmx_agl_gmx_rx_tx_status_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_rx_tx_status_s cn61xx;
- struct cvmx_agl_gmx_rx_tx_status_s cn63xx;
- struct cvmx_agl_gmx_rx_tx_status_s cn63xxp1;
- struct cvmx_agl_gmx_rx_tx_status_s cn66xx;
- struct cvmx_agl_gmx_rx_tx_status_s cn68xx;
- struct cvmx_agl_gmx_rx_tx_status_s cn68xxp1;
};
union cvmx_agl_gmx_smacx {
@@ -1470,16 +1097,6 @@ union cvmx_agl_gmx_smacx {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_smacx_s cn52xx;
- struct cvmx_agl_gmx_smacx_s cn52xxp1;
- struct cvmx_agl_gmx_smacx_s cn56xx;
- struct cvmx_agl_gmx_smacx_s cn56xxp1;
- struct cvmx_agl_gmx_smacx_s cn61xx;
- struct cvmx_agl_gmx_smacx_s cn63xx;
- struct cvmx_agl_gmx_smacx_s cn63xxp1;
- struct cvmx_agl_gmx_smacx_s cn66xx;
- struct cvmx_agl_gmx_smacx_s cn68xx;
- struct cvmx_agl_gmx_smacx_s cn68xxp1;
};
union cvmx_agl_gmx_stat_bp {
@@ -1495,16 +1112,6 @@ union cvmx_agl_gmx_stat_bp {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_agl_gmx_stat_bp_s cn52xx;
- struct cvmx_agl_gmx_stat_bp_s cn52xxp1;
- struct cvmx_agl_gmx_stat_bp_s cn56xx;
- struct cvmx_agl_gmx_stat_bp_s cn56xxp1;
- struct cvmx_agl_gmx_stat_bp_s cn61xx;
- struct cvmx_agl_gmx_stat_bp_s cn63xx;
- struct cvmx_agl_gmx_stat_bp_s cn63xxp1;
- struct cvmx_agl_gmx_stat_bp_s cn66xx;
- struct cvmx_agl_gmx_stat_bp_s cn68xx;
- struct cvmx_agl_gmx_stat_bp_s cn68xxp1;
};
union cvmx_agl_gmx_txx_append {
@@ -1524,16 +1131,6 @@ union cvmx_agl_gmx_txx_append {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_agl_gmx_txx_append_s cn52xx;
- struct cvmx_agl_gmx_txx_append_s cn52xxp1;
- struct cvmx_agl_gmx_txx_append_s cn56xx;
- struct cvmx_agl_gmx_txx_append_s cn56xxp1;
- struct cvmx_agl_gmx_txx_append_s cn61xx;
- struct cvmx_agl_gmx_txx_append_s cn63xx;
- struct cvmx_agl_gmx_txx_append_s cn63xxp1;
- struct cvmx_agl_gmx_txx_append_s cn66xx;
- struct cvmx_agl_gmx_txx_append_s cn68xx;
- struct cvmx_agl_gmx_txx_append_s cn68xxp1;
};
union cvmx_agl_gmx_txx_clk {
@@ -1547,12 +1144,6 @@ union cvmx_agl_gmx_txx_clk {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_agl_gmx_txx_clk_s cn61xx;
- struct cvmx_agl_gmx_txx_clk_s cn63xx;
- struct cvmx_agl_gmx_txx_clk_s cn63xxp1;
- struct cvmx_agl_gmx_txx_clk_s cn66xx;
- struct cvmx_agl_gmx_txx_clk_s cn68xx;
- struct cvmx_agl_gmx_txx_clk_s cn68xxp1;
};
union cvmx_agl_gmx_txx_ctl {
@@ -1568,16 +1159,6 @@ union cvmx_agl_gmx_txx_ctl {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_agl_gmx_txx_ctl_s cn52xx;
- struct cvmx_agl_gmx_txx_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_txx_ctl_s cn56xx;
- struct cvmx_agl_gmx_txx_ctl_s cn56xxp1;
- struct cvmx_agl_gmx_txx_ctl_s cn61xx;
- struct cvmx_agl_gmx_txx_ctl_s cn63xx;
- struct cvmx_agl_gmx_txx_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_txx_ctl_s cn66xx;
- struct cvmx_agl_gmx_txx_ctl_s cn68xx;
- struct cvmx_agl_gmx_txx_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_txx_min_pkt {
@@ -1591,16 +1172,6 @@ union cvmx_agl_gmx_txx_min_pkt {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_agl_gmx_txx_min_pkt_s cn52xx;
- struct cvmx_agl_gmx_txx_min_pkt_s cn52xxp1;
- struct cvmx_agl_gmx_txx_min_pkt_s cn56xx;
- struct cvmx_agl_gmx_txx_min_pkt_s cn56xxp1;
- struct cvmx_agl_gmx_txx_min_pkt_s cn61xx;
- struct cvmx_agl_gmx_txx_min_pkt_s cn63xx;
- struct cvmx_agl_gmx_txx_min_pkt_s cn63xxp1;
- struct cvmx_agl_gmx_txx_min_pkt_s cn66xx;
- struct cvmx_agl_gmx_txx_min_pkt_s cn68xx;
- struct cvmx_agl_gmx_txx_min_pkt_s cn68xxp1;
};
union cvmx_agl_gmx_txx_pause_pkt_interval {
@@ -1614,16 +1185,6 @@ union cvmx_agl_gmx_txx_pause_pkt_interval {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xx;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn52xxp1;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xx;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn56xxp1;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn61xx;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xx;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn63xxp1;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn66xx;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xx;
- struct cvmx_agl_gmx_txx_pause_pkt_interval_s cn68xxp1;
};
union cvmx_agl_gmx_txx_pause_pkt_time {
@@ -1637,16 +1198,6 @@ union cvmx_agl_gmx_txx_pause_pkt_time {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xx;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn52xxp1;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xx;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn56xxp1;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn61xx;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xx;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn63xxp1;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn66xx;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xx;
- struct cvmx_agl_gmx_txx_pause_pkt_time_s cn68xxp1;
};
union cvmx_agl_gmx_txx_pause_togo {
@@ -1660,16 +1211,6 @@ union cvmx_agl_gmx_txx_pause_togo {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_txx_pause_togo_s cn52xx;
- struct cvmx_agl_gmx_txx_pause_togo_s cn52xxp1;
- struct cvmx_agl_gmx_txx_pause_togo_s cn56xx;
- struct cvmx_agl_gmx_txx_pause_togo_s cn56xxp1;
- struct cvmx_agl_gmx_txx_pause_togo_s cn61xx;
- struct cvmx_agl_gmx_txx_pause_togo_s cn63xx;
- struct cvmx_agl_gmx_txx_pause_togo_s cn63xxp1;
- struct cvmx_agl_gmx_txx_pause_togo_s cn66xx;
- struct cvmx_agl_gmx_txx_pause_togo_s cn68xx;
- struct cvmx_agl_gmx_txx_pause_togo_s cn68xxp1;
};
union cvmx_agl_gmx_txx_pause_zero {
@@ -1683,16 +1224,6 @@ union cvmx_agl_gmx_txx_pause_zero {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_agl_gmx_txx_pause_zero_s cn52xx;
- struct cvmx_agl_gmx_txx_pause_zero_s cn52xxp1;
- struct cvmx_agl_gmx_txx_pause_zero_s cn56xx;
- struct cvmx_agl_gmx_txx_pause_zero_s cn56xxp1;
- struct cvmx_agl_gmx_txx_pause_zero_s cn61xx;
- struct cvmx_agl_gmx_txx_pause_zero_s cn63xx;
- struct cvmx_agl_gmx_txx_pause_zero_s cn63xxp1;
- struct cvmx_agl_gmx_txx_pause_zero_s cn66xx;
- struct cvmx_agl_gmx_txx_pause_zero_s cn68xx;
- struct cvmx_agl_gmx_txx_pause_zero_s cn68xxp1;
};
union cvmx_agl_gmx_txx_soft_pause {
@@ -1706,16 +1237,6 @@ union cvmx_agl_gmx_txx_soft_pause {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_txx_soft_pause_s cn52xx;
- struct cvmx_agl_gmx_txx_soft_pause_s cn52xxp1;
- struct cvmx_agl_gmx_txx_soft_pause_s cn56xx;
- struct cvmx_agl_gmx_txx_soft_pause_s cn56xxp1;
- struct cvmx_agl_gmx_txx_soft_pause_s cn61xx;
- struct cvmx_agl_gmx_txx_soft_pause_s cn63xx;
- struct cvmx_agl_gmx_txx_soft_pause_s cn63xxp1;
- struct cvmx_agl_gmx_txx_soft_pause_s cn66xx;
- struct cvmx_agl_gmx_txx_soft_pause_s cn68xx;
- struct cvmx_agl_gmx_txx_soft_pause_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat0 {
@@ -1729,16 +1250,6 @@ union cvmx_agl_gmx_txx_stat0 {
uint64_t xsdef:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat0_s cn52xx;
- struct cvmx_agl_gmx_txx_stat0_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat0_s cn56xx;
- struct cvmx_agl_gmx_txx_stat0_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat0_s cn61xx;
- struct cvmx_agl_gmx_txx_stat0_s cn63xx;
- struct cvmx_agl_gmx_txx_stat0_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat0_s cn66xx;
- struct cvmx_agl_gmx_txx_stat0_s cn68xx;
- struct cvmx_agl_gmx_txx_stat0_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat1 {
@@ -1752,16 +1263,6 @@ union cvmx_agl_gmx_txx_stat1 {
uint64_t scol:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat1_s cn52xx;
- struct cvmx_agl_gmx_txx_stat1_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat1_s cn56xx;
- struct cvmx_agl_gmx_txx_stat1_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat1_s cn61xx;
- struct cvmx_agl_gmx_txx_stat1_s cn63xx;
- struct cvmx_agl_gmx_txx_stat1_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat1_s cn66xx;
- struct cvmx_agl_gmx_txx_stat1_s cn68xx;
- struct cvmx_agl_gmx_txx_stat1_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat2 {
@@ -1775,16 +1276,6 @@ union cvmx_agl_gmx_txx_stat2 {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat2_s cn52xx;
- struct cvmx_agl_gmx_txx_stat2_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat2_s cn56xx;
- struct cvmx_agl_gmx_txx_stat2_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat2_s cn61xx;
- struct cvmx_agl_gmx_txx_stat2_s cn63xx;
- struct cvmx_agl_gmx_txx_stat2_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat2_s cn66xx;
- struct cvmx_agl_gmx_txx_stat2_s cn68xx;
- struct cvmx_agl_gmx_txx_stat2_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat3 {
@@ -1798,16 +1289,6 @@ union cvmx_agl_gmx_txx_stat3 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat3_s cn52xx;
- struct cvmx_agl_gmx_txx_stat3_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat3_s cn56xx;
- struct cvmx_agl_gmx_txx_stat3_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat3_s cn61xx;
- struct cvmx_agl_gmx_txx_stat3_s cn63xx;
- struct cvmx_agl_gmx_txx_stat3_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat3_s cn66xx;
- struct cvmx_agl_gmx_txx_stat3_s cn68xx;
- struct cvmx_agl_gmx_txx_stat3_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat4 {
@@ -1821,16 +1302,6 @@ union cvmx_agl_gmx_txx_stat4 {
uint64_t hist1:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat4_s cn52xx;
- struct cvmx_agl_gmx_txx_stat4_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat4_s cn56xx;
- struct cvmx_agl_gmx_txx_stat4_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat4_s cn61xx;
- struct cvmx_agl_gmx_txx_stat4_s cn63xx;
- struct cvmx_agl_gmx_txx_stat4_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat4_s cn66xx;
- struct cvmx_agl_gmx_txx_stat4_s cn68xx;
- struct cvmx_agl_gmx_txx_stat4_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat5 {
@@ -1844,16 +1315,6 @@ union cvmx_agl_gmx_txx_stat5 {
uint64_t hist3:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat5_s cn52xx;
- struct cvmx_agl_gmx_txx_stat5_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat5_s cn56xx;
- struct cvmx_agl_gmx_txx_stat5_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat5_s cn61xx;
- struct cvmx_agl_gmx_txx_stat5_s cn63xx;
- struct cvmx_agl_gmx_txx_stat5_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat5_s cn66xx;
- struct cvmx_agl_gmx_txx_stat5_s cn68xx;
- struct cvmx_agl_gmx_txx_stat5_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat6 {
@@ -1867,16 +1328,6 @@ union cvmx_agl_gmx_txx_stat6 {
uint64_t hist5:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat6_s cn52xx;
- struct cvmx_agl_gmx_txx_stat6_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat6_s cn56xx;
- struct cvmx_agl_gmx_txx_stat6_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat6_s cn61xx;
- struct cvmx_agl_gmx_txx_stat6_s cn63xx;
- struct cvmx_agl_gmx_txx_stat6_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat6_s cn66xx;
- struct cvmx_agl_gmx_txx_stat6_s cn68xx;
- struct cvmx_agl_gmx_txx_stat6_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat7 {
@@ -1890,16 +1341,6 @@ union cvmx_agl_gmx_txx_stat7 {
uint64_t hist7:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat7_s cn52xx;
- struct cvmx_agl_gmx_txx_stat7_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat7_s cn56xx;
- struct cvmx_agl_gmx_txx_stat7_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat7_s cn61xx;
- struct cvmx_agl_gmx_txx_stat7_s cn63xx;
- struct cvmx_agl_gmx_txx_stat7_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat7_s cn66xx;
- struct cvmx_agl_gmx_txx_stat7_s cn68xx;
- struct cvmx_agl_gmx_txx_stat7_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat8 {
@@ -1913,16 +1354,6 @@ union cvmx_agl_gmx_txx_stat8 {
uint64_t mcst:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat8_s cn52xx;
- struct cvmx_agl_gmx_txx_stat8_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat8_s cn56xx;
- struct cvmx_agl_gmx_txx_stat8_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat8_s cn61xx;
- struct cvmx_agl_gmx_txx_stat8_s cn63xx;
- struct cvmx_agl_gmx_txx_stat8_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat8_s cn66xx;
- struct cvmx_agl_gmx_txx_stat8_s cn68xx;
- struct cvmx_agl_gmx_txx_stat8_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stat9 {
@@ -1936,16 +1367,6 @@ union cvmx_agl_gmx_txx_stat9 {
uint64_t undflw:32;
#endif
} s;
- struct cvmx_agl_gmx_txx_stat9_s cn52xx;
- struct cvmx_agl_gmx_txx_stat9_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stat9_s cn56xx;
- struct cvmx_agl_gmx_txx_stat9_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stat9_s cn61xx;
- struct cvmx_agl_gmx_txx_stat9_s cn63xx;
- struct cvmx_agl_gmx_txx_stat9_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stat9_s cn66xx;
- struct cvmx_agl_gmx_txx_stat9_s cn68xx;
- struct cvmx_agl_gmx_txx_stat9_s cn68xxp1;
};
union cvmx_agl_gmx_txx_stats_ctl {
@@ -1959,16 +1380,6 @@ union cvmx_agl_gmx_txx_stats_ctl {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn52xx;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn52xxp1;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn56xx;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn56xxp1;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn61xx;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn63xx;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn63xxp1;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn66xx;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn68xx;
- struct cvmx_agl_gmx_txx_stats_ctl_s cn68xxp1;
};
union cvmx_agl_gmx_txx_thresh {
@@ -1982,16 +1393,6 @@ union cvmx_agl_gmx_txx_thresh {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_agl_gmx_txx_thresh_s cn52xx;
- struct cvmx_agl_gmx_txx_thresh_s cn52xxp1;
- struct cvmx_agl_gmx_txx_thresh_s cn56xx;
- struct cvmx_agl_gmx_txx_thresh_s cn56xxp1;
- struct cvmx_agl_gmx_txx_thresh_s cn61xx;
- struct cvmx_agl_gmx_txx_thresh_s cn63xx;
- struct cvmx_agl_gmx_txx_thresh_s cn63xxp1;
- struct cvmx_agl_gmx_txx_thresh_s cn66xx;
- struct cvmx_agl_gmx_txx_thresh_s cn68xx;
- struct cvmx_agl_gmx_txx_thresh_s cn68xxp1;
};
union cvmx_agl_gmx_tx_bp {
@@ -2005,8 +1406,6 @@ union cvmx_agl_gmx_tx_bp {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_agl_gmx_tx_bp_s cn52xx;
- struct cvmx_agl_gmx_tx_bp_s cn52xxp1;
struct cvmx_agl_gmx_tx_bp_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
@@ -2016,13 +1415,6 @@ union cvmx_agl_gmx_tx_bp {
uint64_t reserved_1_63:63;
#endif
} cn56xx;
- struct cvmx_agl_gmx_tx_bp_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_tx_bp_s cn61xx;
- struct cvmx_agl_gmx_tx_bp_s cn63xx;
- struct cvmx_agl_gmx_tx_bp_s cn63xxp1;
- struct cvmx_agl_gmx_tx_bp_s cn66xx;
- struct cvmx_agl_gmx_tx_bp_s cn68xx;
- struct cvmx_agl_gmx_tx_bp_s cn68xxp1;
};
union cvmx_agl_gmx_tx_col_attempt {
@@ -2036,16 +1428,6 @@ union cvmx_agl_gmx_tx_col_attempt {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_agl_gmx_tx_col_attempt_s cn52xx;
- struct cvmx_agl_gmx_tx_col_attempt_s cn52xxp1;
- struct cvmx_agl_gmx_tx_col_attempt_s cn56xx;
- struct cvmx_agl_gmx_tx_col_attempt_s cn56xxp1;
- struct cvmx_agl_gmx_tx_col_attempt_s cn61xx;
- struct cvmx_agl_gmx_tx_col_attempt_s cn63xx;
- struct cvmx_agl_gmx_tx_col_attempt_s cn63xxp1;
- struct cvmx_agl_gmx_tx_col_attempt_s cn66xx;
- struct cvmx_agl_gmx_tx_col_attempt_s cn68xx;
- struct cvmx_agl_gmx_tx_col_attempt_s cn68xxp1;
};
union cvmx_agl_gmx_tx_ifg {
@@ -2061,16 +1443,6 @@ union cvmx_agl_gmx_tx_ifg {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_agl_gmx_tx_ifg_s cn52xx;
- struct cvmx_agl_gmx_tx_ifg_s cn52xxp1;
- struct cvmx_agl_gmx_tx_ifg_s cn56xx;
- struct cvmx_agl_gmx_tx_ifg_s cn56xxp1;
- struct cvmx_agl_gmx_tx_ifg_s cn61xx;
- struct cvmx_agl_gmx_tx_ifg_s cn63xx;
- struct cvmx_agl_gmx_tx_ifg_s cn63xxp1;
- struct cvmx_agl_gmx_tx_ifg_s cn66xx;
- struct cvmx_agl_gmx_tx_ifg_s cn68xx;
- struct cvmx_agl_gmx_tx_ifg_s cn68xxp1;
};
union cvmx_agl_gmx_tx_int_en {
@@ -2129,7 +1501,6 @@ union cvmx_agl_gmx_tx_int_en {
uint64_t reserved_18_63:46;
#endif
} cn52xx;
- struct cvmx_agl_gmx_tx_int_en_cn52xx cn52xxp1;
struct cvmx_agl_gmx_tx_int_en_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
@@ -2155,13 +1526,6 @@ union cvmx_agl_gmx_tx_int_en {
uint64_t reserved_17_63:47;
#endif
} cn56xx;
- struct cvmx_agl_gmx_tx_int_en_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_tx_int_en_s cn61xx;
- struct cvmx_agl_gmx_tx_int_en_s cn63xx;
- struct cvmx_agl_gmx_tx_int_en_s cn63xxp1;
- struct cvmx_agl_gmx_tx_int_en_s cn66xx;
- struct cvmx_agl_gmx_tx_int_en_s cn68xx;
- struct cvmx_agl_gmx_tx_int_en_s cn68xxp1;
};
union cvmx_agl_gmx_tx_int_reg {
@@ -2220,7 +1584,6 @@ union cvmx_agl_gmx_tx_int_reg {
uint64_t reserved_18_63:46;
#endif
} cn52xx;
- struct cvmx_agl_gmx_tx_int_reg_cn52xx cn52xxp1;
struct cvmx_agl_gmx_tx_int_reg_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
@@ -2246,13 +1609,6 @@ union cvmx_agl_gmx_tx_int_reg {
uint64_t reserved_17_63:47;
#endif
} cn56xx;
- struct cvmx_agl_gmx_tx_int_reg_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_tx_int_reg_s cn61xx;
- struct cvmx_agl_gmx_tx_int_reg_s cn63xx;
- struct cvmx_agl_gmx_tx_int_reg_s cn63xxp1;
- struct cvmx_agl_gmx_tx_int_reg_s cn66xx;
- struct cvmx_agl_gmx_tx_int_reg_s cn68xx;
- struct cvmx_agl_gmx_tx_int_reg_s cn68xxp1;
};
union cvmx_agl_gmx_tx_jam {
@@ -2266,16 +1622,6 @@ union cvmx_agl_gmx_tx_jam {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_agl_gmx_tx_jam_s cn52xx;
- struct cvmx_agl_gmx_tx_jam_s cn52xxp1;
- struct cvmx_agl_gmx_tx_jam_s cn56xx;
- struct cvmx_agl_gmx_tx_jam_s cn56xxp1;
- struct cvmx_agl_gmx_tx_jam_s cn61xx;
- struct cvmx_agl_gmx_tx_jam_s cn63xx;
- struct cvmx_agl_gmx_tx_jam_s cn63xxp1;
- struct cvmx_agl_gmx_tx_jam_s cn66xx;
- struct cvmx_agl_gmx_tx_jam_s cn68xx;
- struct cvmx_agl_gmx_tx_jam_s cn68xxp1;
};
union cvmx_agl_gmx_tx_lfsr {
@@ -2289,16 +1635,6 @@ union cvmx_agl_gmx_tx_lfsr {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_tx_lfsr_s cn52xx;
- struct cvmx_agl_gmx_tx_lfsr_s cn52xxp1;
- struct cvmx_agl_gmx_tx_lfsr_s cn56xx;
- struct cvmx_agl_gmx_tx_lfsr_s cn56xxp1;
- struct cvmx_agl_gmx_tx_lfsr_s cn61xx;
- struct cvmx_agl_gmx_tx_lfsr_s cn63xx;
- struct cvmx_agl_gmx_tx_lfsr_s cn63xxp1;
- struct cvmx_agl_gmx_tx_lfsr_s cn66xx;
- struct cvmx_agl_gmx_tx_lfsr_s cn68xx;
- struct cvmx_agl_gmx_tx_lfsr_s cn68xxp1;
};
union cvmx_agl_gmx_tx_ovr_bp {
@@ -2320,8 +1656,6 @@ union cvmx_agl_gmx_tx_ovr_bp {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn52xx;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn52xxp1;
struct cvmx_agl_gmx_tx_ovr_bp_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -2339,13 +1673,6 @@ union cvmx_agl_gmx_tx_ovr_bp {
uint64_t reserved_9_63:55;
#endif
} cn56xx;
- struct cvmx_agl_gmx_tx_ovr_bp_cn56xx cn56xxp1;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn61xx;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn63xx;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn63xxp1;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn66xx;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn68xx;
- struct cvmx_agl_gmx_tx_ovr_bp_s cn68xxp1;
};
union cvmx_agl_gmx_tx_pause_pkt_dmac {
@@ -2359,16 +1686,6 @@ union cvmx_agl_gmx_tx_pause_pkt_dmac {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xx;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn52xxp1;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xx;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn56xxp1;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn61xx;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xx;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn63xxp1;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn66xx;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xx;
- struct cvmx_agl_gmx_tx_pause_pkt_dmac_s cn68xxp1;
};
union cvmx_agl_gmx_tx_pause_pkt_type {
@@ -2382,16 +1699,6 @@ union cvmx_agl_gmx_tx_pause_pkt_type {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xx;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn52xxp1;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xx;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn56xxp1;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn61xx;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xx;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn63xxp1;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn66xx;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xx;
- struct cvmx_agl_gmx_tx_pause_pkt_type_s cn68xxp1;
};
union cvmx_agl_prtx_ctl {
@@ -2447,12 +1754,6 @@ union cvmx_agl_prtx_ctl {
uint64_t drv_byp:1;
#endif
} s;
- struct cvmx_agl_prtx_ctl_s cn61xx;
- struct cvmx_agl_prtx_ctl_s cn63xx;
- struct cvmx_agl_prtx_ctl_s cn63xxp1;
- struct cvmx_agl_prtx_ctl_s cn66xx;
- struct cvmx_agl_prtx_ctl_s cn68xx;
- struct cvmx_agl_prtx_ctl_s cn68xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
index 1eef155979f3..70f4a5729581 100644
--- a/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-asxx-defs.h
@@ -68,9 +68,6 @@ union cvmx_asxx_gmii_rx_clk_set {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
- struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
- struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
};
union cvmx_asxx_gmii_rx_dat_set {
@@ -84,9 +81,6 @@ union cvmx_asxx_gmii_rx_dat_set {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
- struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
- struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
};
union cvmx_asxx_int_en {
@@ -121,12 +115,6 @@ union cvmx_asxx_int_en {
uint64_t reserved_11_63:53;
#endif
} cn30xx;
- struct cvmx_asxx_int_en_cn30xx cn31xx;
- struct cvmx_asxx_int_en_s cn38xx;
- struct cvmx_asxx_int_en_s cn38xxp2;
- struct cvmx_asxx_int_en_cn30xx cn50xx;
- struct cvmx_asxx_int_en_s cn58xx;
- struct cvmx_asxx_int_en_s cn58xxp1;
};
union cvmx_asxx_int_reg {
@@ -161,12 +149,6 @@ union cvmx_asxx_int_reg {
uint64_t reserved_11_63:53;
#endif
} cn30xx;
- struct cvmx_asxx_int_reg_cn30xx cn31xx;
- struct cvmx_asxx_int_reg_s cn38xx;
- struct cvmx_asxx_int_reg_s cn38xxp2;
- struct cvmx_asxx_int_reg_cn30xx cn50xx;
- struct cvmx_asxx_int_reg_s cn58xx;
- struct cvmx_asxx_int_reg_s cn58xxp1;
};
union cvmx_asxx_mii_rx_dat_set {
@@ -180,8 +162,6 @@ union cvmx_asxx_mii_rx_dat_set {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
- struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
};
union cvmx_asxx_prt_loop {
@@ -210,12 +190,6 @@ union cvmx_asxx_prt_loop {
uint64_t reserved_7_63:57;
#endif
} cn30xx;
- struct cvmx_asxx_prt_loop_cn30xx cn31xx;
- struct cvmx_asxx_prt_loop_s cn38xx;
- struct cvmx_asxx_prt_loop_s cn38xxp2;
- struct cvmx_asxx_prt_loop_cn30xx cn50xx;
- struct cvmx_asxx_prt_loop_s cn58xx;
- struct cvmx_asxx_prt_loop_s cn58xxp1;
};
union cvmx_asxx_rld_bypass {
@@ -229,10 +203,6 @@ union cvmx_asxx_rld_bypass {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_asxx_rld_bypass_s cn38xx;
- struct cvmx_asxx_rld_bypass_s cn38xxp2;
- struct cvmx_asxx_rld_bypass_s cn58xx;
- struct cvmx_asxx_rld_bypass_s cn58xxp1;
};
union cvmx_asxx_rld_bypass_setting {
@@ -246,10 +216,6 @@ union cvmx_asxx_rld_bypass_setting {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_rld_bypass_setting_s cn38xx;
- struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
- struct cvmx_asxx_rld_bypass_setting_s cn58xx;
- struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
};
union cvmx_asxx_rld_comp {
@@ -276,9 +242,6 @@ union cvmx_asxx_rld_comp {
uint64_t reserved_8_63:56;
#endif
} cn38xx;
- struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
- struct cvmx_asxx_rld_comp_s cn58xx;
- struct cvmx_asxx_rld_comp_s cn58xxp1;
};
union cvmx_asxx_rld_data_drv {
@@ -294,10 +257,6 @@ union cvmx_asxx_rld_data_drv {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_asxx_rld_data_drv_s cn38xx;
- struct cvmx_asxx_rld_data_drv_s cn38xxp2;
- struct cvmx_asxx_rld_data_drv_s cn58xx;
- struct cvmx_asxx_rld_data_drv_s cn58xxp1;
};
union cvmx_asxx_rld_fcram_mode {
@@ -311,8 +270,6 @@ union cvmx_asxx_rld_fcram_mode {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_asxx_rld_fcram_mode_s cn38xx;
- struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
};
union cvmx_asxx_rld_nctl_strong {
@@ -326,10 +283,6 @@ union cvmx_asxx_rld_nctl_strong {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_rld_nctl_strong_s cn38xx;
- struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
- struct cvmx_asxx_rld_nctl_strong_s cn58xx;
- struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
};
union cvmx_asxx_rld_nctl_weak {
@@ -343,10 +296,6 @@ union cvmx_asxx_rld_nctl_weak {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_rld_nctl_weak_s cn38xx;
- struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
- struct cvmx_asxx_rld_nctl_weak_s cn58xx;
- struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
};
union cvmx_asxx_rld_pctl_strong {
@@ -360,10 +309,6 @@ union cvmx_asxx_rld_pctl_strong {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_rld_pctl_strong_s cn38xx;
- struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
- struct cvmx_asxx_rld_pctl_strong_s cn58xx;
- struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
};
union cvmx_asxx_rld_pctl_weak {
@@ -377,10 +322,6 @@ union cvmx_asxx_rld_pctl_weak {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_rld_pctl_weak_s cn38xx;
- struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
- struct cvmx_asxx_rld_pctl_weak_s cn58xx;
- struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
};
union cvmx_asxx_rld_setting {
@@ -411,9 +352,6 @@ union cvmx_asxx_rld_setting {
uint64_t reserved_5_63:59;
#endif
} cn38xx;
- struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
- struct cvmx_asxx_rld_setting_s cn58xx;
- struct cvmx_asxx_rld_setting_s cn58xxp1;
};
union cvmx_asxx_rx_clk_setx {
@@ -427,13 +365,6 @@ union cvmx_asxx_rx_clk_setx {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_rx_clk_setx_s cn30xx;
- struct cvmx_asxx_rx_clk_setx_s cn31xx;
- struct cvmx_asxx_rx_clk_setx_s cn38xx;
- struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
- struct cvmx_asxx_rx_clk_setx_s cn50xx;
- struct cvmx_asxx_rx_clk_setx_s cn58xx;
- struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
};
union cvmx_asxx_rx_prt_en {
@@ -456,12 +387,6 @@ union cvmx_asxx_rx_prt_en {
uint64_t reserved_3_63:61;
#endif
} cn30xx;
- struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
- struct cvmx_asxx_rx_prt_en_s cn38xx;
- struct cvmx_asxx_rx_prt_en_s cn38xxp2;
- struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
- struct cvmx_asxx_rx_prt_en_s cn58xx;
- struct cvmx_asxx_rx_prt_en_s cn58xxp1;
};
union cvmx_asxx_rx_wol {
@@ -477,8 +402,6 @@ union cvmx_asxx_rx_wol {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_asxx_rx_wol_s cn38xx;
- struct cvmx_asxx_rx_wol_s cn38xxp2;
};
union cvmx_asxx_rx_wol_msk {
@@ -490,8 +413,6 @@ union cvmx_asxx_rx_wol_msk {
uint64_t msk:64;
#endif
} s;
- struct cvmx_asxx_rx_wol_msk_s cn38xx;
- struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
};
union cvmx_asxx_rx_wol_powok {
@@ -505,8 +426,6 @@ union cvmx_asxx_rx_wol_powok {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_asxx_rx_wol_powok_s cn38xx;
- struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
};
union cvmx_asxx_rx_wol_sig {
@@ -520,8 +439,6 @@ union cvmx_asxx_rx_wol_sig {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_asxx_rx_wol_sig_s cn38xx;
- struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
};
union cvmx_asxx_tx_clk_setx {
@@ -535,13 +452,6 @@ union cvmx_asxx_tx_clk_setx {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_asxx_tx_clk_setx_s cn30xx;
- struct cvmx_asxx_tx_clk_setx_s cn31xx;
- struct cvmx_asxx_tx_clk_setx_s cn38xx;
- struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
- struct cvmx_asxx_tx_clk_setx_s cn50xx;
- struct cvmx_asxx_tx_clk_setx_s cn58xx;
- struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
};
union cvmx_asxx_tx_comp_byp {
@@ -566,7 +476,6 @@ union cvmx_asxx_tx_comp_byp {
uint64_t reserved_9_63:55;
#endif
} cn30xx;
- struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
struct cvmx_asxx_tx_comp_byp_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
@@ -578,7 +487,6 @@ union cvmx_asxx_tx_comp_byp {
uint64_t reserved_8_63:56;
#endif
} cn38xx;
- struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
struct cvmx_asxx_tx_comp_byp_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
@@ -609,7 +517,6 @@ union cvmx_asxx_tx_comp_byp {
uint64_t reserved_13_63:51;
#endif
} cn58xx;
- struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
};
union cvmx_asxx_tx_hi_waterx {
@@ -632,12 +539,6 @@ union cvmx_asxx_tx_hi_waterx {
uint64_t reserved_3_63:61;
#endif
} cn30xx;
- struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
- struct cvmx_asxx_tx_hi_waterx_s cn38xx;
- struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
- struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
- struct cvmx_asxx_tx_hi_waterx_s cn58xx;
- struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
};
union cvmx_asxx_tx_prt_en {
@@ -660,12 +561,6 @@ union cvmx_asxx_tx_prt_en {
uint64_t reserved_3_63:61;
#endif
} cn30xx;
- struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
- struct cvmx_asxx_tx_prt_en_s cn38xx;
- struct cvmx_asxx_tx_prt_en_s cn38xxp2;
- struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
- struct cvmx_asxx_tx_prt_en_s cn58xx;
- struct cvmx_asxx_tx_prt_en_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 72d2e403a6e4..689a82cac740 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -146,18 +146,6 @@ struct cvmx_bootmem_desc {
extern int cvmx_bootmem_init(void *mem_desc_ptr);
/**
- * Allocate a block of memory from the free list that was passed
- * to the application by the bootloader.
- * This is an allocate-only algorithm, so freeing memory is not possible.
- *
- * @size: Size in bytes of block to allocate
- * @alignment: Alignment required - must be power of 2
- *
- * Returns pointer to block of memory, NULL on error
- */
-extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
-
-/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader at a specific
* address. This is an allocate-only algorithm, so
@@ -174,22 +162,6 @@ extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
uint64_t alignment);
/**
- * Allocate a block of memory from the free list that was
- * passed to the application by the bootloader within a specified
- * address range. This is an allocate-only algorithm, so
- * freeing memory is not possible. Allocation will fail if
- * memory cannot be allocated in the requested range.
- *
- * @size: Size in bytes of block to allocate
- * @min_addr: defines the minimum address of the range
- * @max_addr: defines the maximum address of the range
- * @alignment: Alignment required - must be power of 2
- * Returns pointer to block of memory, NULL on error
- */
-extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
- uint64_t min_addr, uint64_t max_addr);
-
-/**
* Frees a previously allocated named bootmem block.
*
* @name: name of block to free
@@ -214,27 +186,6 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
char *name);
-
-
-/**
- * Allocate a block of memory from the free list that was passed
- * to the application by the bootloader, and assign it a name in the
- * global named block table. (part of the cvmx_bootmem_descriptor_t structure)
- * Named blocks can later be freed.
- *
- * @size: Size in bytes of block to allocate
- * @address: Physical address to allocate memory at. If this
- * memory is not available, the allocation fails.
- * @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN
- * bytes
- *
- * Returns a pointer to block of memory, NULL on error
- */
-extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address,
- char *name);
-
-
-
/**
* Allocate a block of memory from a specific range of the free list
* that was passed to the application by the bootloader, and assign it
@@ -351,33 +302,6 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
char *name, uint32_t flags);
/**
- * Finds a named memory block by name.
- * Also used for finding an unused entry in the named block table.
- *
- * @name: Name of memory block to find. If NULL pointer given, then
- * finds unused descriptor, if available.
- *
- * @flags: Flags to control options for the allocation.
- *
- * Returns Pointer to memory block descriptor, NULL if not found.
- * If NULL returned when name parameter is NULL, then no memory
- * block descriptors are available.
- */
-struct cvmx_bootmem_named_block_desc *
-cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
-
-/**
- * Frees a named block.
- *
- * @name: name of block to free
- * @flags: flags for passing options
- *
- * Returns 0 on failure
- * 1 on success
- */
-int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
-
-/**
* Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h
index 148bc9a0085d..5babd88d4110 100644
--- a/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ciu2-defs.h
@@ -28,7081 +28,21 @@
#ifndef __CVMX_CIU2_DEFS_H__
#define __CVMX_CIU2_DEFS_H__
-#define CVMX_CIU2_ACK_IOX_INT(block_id) (CVMX_ADD_IO_SEG(0x00010701080C0800ull) + ((block_id) & 1) * 0x200000ull)
#define CVMX_CIU2_ACK_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_ACK_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_ACK_PPX_IP4(block_id) (CVMX_ADD_IO_SEG(0x00010701000C0400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108097800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B7800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A7800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108094800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B4800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A4800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070108098800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B8800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A8800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108095800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B5800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A5800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108093800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B3800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A3800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108096800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B6800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A6800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108092800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B2800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A2800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108091800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B1800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A1800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108090800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701080B0800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_IOX_INT_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701080A0800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP2_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098200ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8200ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP3_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100097400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_GPIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B7400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_GPIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A7400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100094400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_IO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B4400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_IO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A4400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100098400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MBOX_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B8400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MBOX_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A8400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100095400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MEM_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B5400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MEM_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A5400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100093400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MIO_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B3400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_MIO_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A3400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100096400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_PKT_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B6400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_PKT_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A6400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100092400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_RML_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B2400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_RML_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A2400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100091400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_WDOG_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B1400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_WDOG_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A1400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100090400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_WRKQ_W1C(block_id) (CVMX_ADD_IO_SEG(0x00010701000B0400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_EN_PPX_IP4_WRKQ_W1S(block_id) (CVMX_ADD_IO_SEG(0x00010701000A0400ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_INTR_CIU_READY (CVMX_ADD_IO_SEG(0x0001070100102008ull))
-#define CVMX_CIU2_INTR_RAM_ECC_CTL (CVMX_ADD_IO_SEG(0x0001070100102010ull))
-#define CVMX_CIU2_INTR_RAM_ECC_ST (CVMX_ADD_IO_SEG(0x0001070100102018ull))
-#define CVMX_CIU2_INTR_SLOWDOWN (CVMX_ADD_IO_SEG(0x0001070100102000ull))
-#define CVMX_CIU2_MSIRED_PPX_IP2(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_MSIRED_PPX_IP3(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_MSIRED_PPX_IP4(block_id) (CVMX_ADD_IO_SEG(0x00010701000C1400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_MSI_RCVX(offset) (CVMX_ADD_IO_SEG(0x00010701000C2000ull) + ((offset) & 255) * 8)
-#define CVMX_CIU2_MSI_SELX(offset) (CVMX_ADD_IO_SEG(0x00010701000C3000ull) + ((offset) & 255) * 8)
-#define CVMX_CIU2_RAW_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108047800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108044800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108045800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108043800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108046800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108042800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108041800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108040800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_RAW_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100047400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100044400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100045400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100043400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100046400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100042400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100041400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_RAW_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100040400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108087800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070108084800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070108088800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070108085800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070108083800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070108086800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070108082800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070108081800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_IOX_INT_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070108080800ull) + ((block_id) & 1) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP2_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP2_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP2_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP2_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP2_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP2_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_SRC_PPX_IP2_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_SRC_PPX_IP2_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081000ull) + ((block_id) & 31) * 0x200000ull)
#define CVMX_CIU2_SRC_PPX_IP2_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080000ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP3_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080200ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_GPIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100087400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_IO(block_id) (CVMX_ADD_IO_SEG(0x0001070100084400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_MBOX(block_id) (CVMX_ADD_IO_SEG(0x0001070100088400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_MEM(block_id) (CVMX_ADD_IO_SEG(0x0001070100085400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_MIO(block_id) (CVMX_ADD_IO_SEG(0x0001070100083400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_PKT(block_id) (CVMX_ADD_IO_SEG(0x0001070100086400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_RML(block_id) (CVMX_ADD_IO_SEG(0x0001070100082400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_WDOG(block_id) (CVMX_ADD_IO_SEG(0x0001070100081400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SRC_PPX_IP4_WRKQ(block_id) (CVMX_ADD_IO_SEG(0x0001070100080400ull) + ((block_id) & 31) * 0x200000ull)
-#define CVMX_CIU2_SUM_IOX_INT(offset) (CVMX_ADD_IO_SEG(0x0001070100000800ull) + ((offset) & 1) * 8)
#define CVMX_CIU2_SUM_PPX_IP2(offset) (CVMX_ADD_IO_SEG(0x0001070100000000ull) + ((offset) & 31) * 8)
#define CVMX_CIU2_SUM_PPX_IP3(offset) (CVMX_ADD_IO_SEG(0x0001070100000200ull) + ((offset) & 31) * 8)
-#define CVMX_CIU2_SUM_PPX_IP4(offset) (CVMX_ADD_IO_SEG(0x0001070100000400ull) + ((offset) & 31) * 8)
-
-union cvmx_ciu2_ack_iox_int {
- uint64_t u64;
- struct cvmx_ciu2_ack_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ack:1;
-#else
- uint64_t ack:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu2_ack_iox_int_s cn68xx;
- struct cvmx_ciu2_ack_iox_int_s cn68xxp1;
-};
-
-union cvmx_ciu2_ack_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu2_ack_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ack:1;
-#else
- uint64_t ack:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu2_ack_ppx_ip2_s cn68xx;
- struct cvmx_ciu2_ack_ppx_ip2_s cn68xxp1;
-};
-
-union cvmx_ciu2_ack_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu2_ack_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ack:1;
-#else
- uint64_t ack:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu2_ack_ppx_ip3_s cn68xx;
- struct cvmx_ciu2_ack_ppx_ip3_s cn68xxp1;
-};
-
-union cvmx_ciu2_ack_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu2_ack_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ack:1;
-#else
- uint64_t ack:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu2_ack_ppx_ip4_s cn68xx;
- struct cvmx_ciu2_ack_ppx_ip4_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_gpio {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_gpio_s cn68xx;
- struct cvmx_ciu2_en_iox_int_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_gpio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_gpio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_gpio_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_gpio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_gpio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_gpio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_gpio_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_gpio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_io {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_io_s cn68xx;
- struct cvmx_ciu2_en_iox_int_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_io_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_io_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_io_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_io_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_io_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_io_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_io_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_io_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mbox {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mbox_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mbox_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mbox_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mbox_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mbox_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mbox_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mbox_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mbox_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mbox_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mem {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mem_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mem_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mem_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mem_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mem_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mem_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mem_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mem_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mem_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mio {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mio_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mio_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_mio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_mio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_mio_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_mio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_pkt {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_pkt_s cn68xx;
- struct cvmx_ciu2_en_iox_int_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_pkt_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_pkt_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_pkt_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_pkt_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_pkt_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_pkt_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_pkt_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_pkt_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_rml {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_rml_s cn68xx;
- struct cvmx_ciu2_en_iox_int_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_rml_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_rml_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_rml_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_rml_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_rml_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_rml_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_rml_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_rml_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_wdog {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_wdog_s cn68xx;
- struct cvmx_ciu2_en_iox_int_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_wdog_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_wdog_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_wdog_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_wdog_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_wdog_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_wdog_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_wdog_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_wdog_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_wrkq_s cn68xx;
- struct cvmx_ciu2_en_iox_int_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_wrkq_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_wrkq_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_wrkq_w1c_s cn68xx;
- struct cvmx_ciu2_en_iox_int_wrkq_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_iox_int_wrkq_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_iox_int_wrkq_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_iox_int_wrkq_w1s_s cn68xx;
- struct cvmx_ciu2_en_iox_int_wrkq_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_gpio {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_gpio_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_gpio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_gpio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_gpio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_gpio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_io {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_io_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_io_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_io_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_io_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_io_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_io_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_io_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_io_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_io_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mbox {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mbox_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mbox_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mbox_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mbox_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mbox_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mem {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mem_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mem_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mem_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mem_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mem_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mio {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mio_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_mio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_mio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_pkt {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_pkt_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_pkt_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_pkt_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_pkt_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_pkt_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_rml {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_rml_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_rml_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_rml_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_rml_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_rml_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_rml_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_rml_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_rml_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_rml_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_wdog {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_wdog_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_wdog_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_wdog_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_wdog_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_wdog_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_wrkq_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip2_wrkq_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip2_wrkq_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_gpio {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_gpio_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_gpio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_gpio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_gpio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_gpio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_io {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_io_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_io_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_io_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_io_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_io_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_io_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_io_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_io_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_io_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mbox {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mbox_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mbox_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mbox_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mbox_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mbox_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mem {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mem_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mem_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mem_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mem_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mem_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mio {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mio_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_mio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_mio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_pkt {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_pkt_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_pkt_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_pkt_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_pkt_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_pkt_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_rml {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_rml_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_rml_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_rml_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_rml_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_rml_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_rml_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_rml_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_rml_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_rml_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_wdog {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_wdog_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_wdog_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_wdog_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_wdog_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_wdog_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_wrkq_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip3_wrkq_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip3_wrkq_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_gpio {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_gpio_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_gpio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_gpio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_gpio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_gpio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_io {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_io_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_io_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_io_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_io_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_io_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_io_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_io_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_io_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_io_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mbox {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mbox_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mbox_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mbox_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mbox_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mbox_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mem {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mem_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mem_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mem_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mem_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mem_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mio {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mio_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mio_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mio_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_mio_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_mio_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_pkt {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_pkt_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_pkt_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_pkt_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_pkt_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_pkt_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_rml {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_rml_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_rml_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_rml_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_rml_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_rml_w1c_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_rml_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_rml_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_rml_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_rml_w1s_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_wdog {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_wdog_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_wdog_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_wdog_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_wdog_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_wdog_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_wrkq_w1c {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_w1c_s cn68xxp1;
-};
-
-union cvmx_ciu2_en_ppx_ip4_wrkq_w1s {
- uint64_t u64;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s cn68xx;
- struct cvmx_ciu2_en_ppx_ip4_wrkq_w1s_s cn68xxp1;
-};
-
-union cvmx_ciu2_intr_ciu_ready {
- uint64_t u64;
- struct cvmx_ciu2_intr_ciu_ready_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t ready:1;
-#else
- uint64_t ready:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu2_intr_ciu_ready_s cn68xx;
- struct cvmx_ciu2_intr_ciu_ready_s cn68xxp1;
-};
-
-union cvmx_ciu2_intr_ram_ecc_ctl {
- uint64_t u64;
- struct cvmx_ciu2_intr_ram_ecc_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t flip_synd:2;
- uint64_t ecc_ena:1;
-#else
- uint64_t ecc_ena:1;
- uint64_t flip_synd:2;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_ciu2_intr_ram_ecc_ctl_s cn68xx;
- struct cvmx_ciu2_intr_ram_ecc_ctl_s cn68xxp1;
-};
-
-union cvmx_ciu2_intr_ram_ecc_st {
- uint64_t u64;
- struct cvmx_ciu2_intr_ram_ecc_st_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t addr:7;
- uint64_t reserved_13_15:3;
- uint64_t syndrom:9;
- uint64_t reserved_2_3:2;
- uint64_t dbe:1;
- uint64_t sbe:1;
-#else
- uint64_t sbe:1;
- uint64_t dbe:1;
- uint64_t reserved_2_3:2;
- uint64_t syndrom:9;
- uint64_t reserved_13_15:3;
- uint64_t addr:7;
- uint64_t reserved_23_63:41;
-#endif
- } s;
- struct cvmx_ciu2_intr_ram_ecc_st_s cn68xx;
- struct cvmx_ciu2_intr_ram_ecc_st_s cn68xxp1;
-};
-
-union cvmx_ciu2_intr_slowdown {
- uint64_t u64;
- struct cvmx_ciu2_intr_slowdown_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t ctl:3;
-#else
- uint64_t ctl:3;
- uint64_t reserved_3_63:61;
-#endif
- } s;
- struct cvmx_ciu2_intr_slowdown_s cn68xx;
- struct cvmx_ciu2_intr_slowdown_s cn68xxp1;
-};
-
-union cvmx_ciu2_msi_rcvx {
- uint64_t u64;
- struct cvmx_ciu2_msi_rcvx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t msi_rcv:1;
-#else
- uint64_t msi_rcv:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_ciu2_msi_rcvx_s cn68xx;
- struct cvmx_ciu2_msi_rcvx_s cn68xxp1;
-};
-
-union cvmx_ciu2_msi_selx {
- uint64_t u64;
- struct cvmx_ciu2_msi_selx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_13_63:51;
- uint64_t pp_num:5;
- uint64_t reserved_6_7:2;
- uint64_t ip_num:2;
- uint64_t reserved_1_3:3;
- uint64_t en:1;
-#else
- uint64_t en:1;
- uint64_t reserved_1_3:3;
- uint64_t ip_num:2;
- uint64_t reserved_6_7:2;
- uint64_t pp_num:5;
- uint64_t reserved_13_63:51;
-#endif
- } s;
- struct cvmx_ciu2_msi_selx_s cn68xx;
- struct cvmx_ciu2_msi_selx_s cn68xxp1;
-};
-
-union cvmx_ciu2_msired_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu2_msired_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t intr:1;
- uint64_t reserved_17_19:3;
- uint64_t newint:1;
- uint64_t reserved_8_15:8;
- uint64_t msi_num:8;
-#else
- uint64_t msi_num:8;
- uint64_t reserved_8_15:8;
- uint64_t newint:1;
- uint64_t reserved_17_19:3;
- uint64_t intr:1;
- uint64_t reserved_21_63:43;
-#endif
- } s;
- struct cvmx_ciu2_msired_ppx_ip2_s cn68xx;
- struct cvmx_ciu2_msired_ppx_ip2_s cn68xxp1;
-};
-
-union cvmx_ciu2_msired_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu2_msired_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t intr:1;
- uint64_t reserved_17_19:3;
- uint64_t newint:1;
- uint64_t reserved_8_15:8;
- uint64_t msi_num:8;
-#else
- uint64_t msi_num:8;
- uint64_t reserved_8_15:8;
- uint64_t newint:1;
- uint64_t reserved_17_19:3;
- uint64_t intr:1;
- uint64_t reserved_21_63:43;
-#endif
- } s;
- struct cvmx_ciu2_msired_ppx_ip3_s cn68xx;
- struct cvmx_ciu2_msired_ppx_ip3_s cn68xxp1;
-};
-
-union cvmx_ciu2_msired_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu2_msired_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t intr:1;
- uint64_t reserved_17_19:3;
- uint64_t newint:1;
- uint64_t reserved_8_15:8;
- uint64_t msi_num:8;
-#else
- uint64_t msi_num:8;
- uint64_t reserved_8_15:8;
- uint64_t newint:1;
- uint64_t reserved_17_19:3;
- uint64_t intr:1;
- uint64_t reserved_21_63:43;
-#endif
- } s;
- struct cvmx_ciu2_msired_ppx_ip4_s cn68xx;
- struct cvmx_ciu2_msired_ppx_ip4_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_gpio {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_gpio_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_io {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_io_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_mem {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_mem_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_mio {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_mio_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_pkt {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_pkt_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_rml {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_rml_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_wdog {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_wdog_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_iox_int_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_raw_iox_int_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_raw_iox_int_wrkq_s cn68xx;
- struct cvmx_ciu2_raw_iox_int_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_gpio {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_gpio_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_io {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_io_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_mem {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_mem_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_mio {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_mio_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_pkt {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_pkt_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_rml {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_rml_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_wdog {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_wdog_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip2_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip2_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip2_wrkq_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip2_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_gpio {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_gpio_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_io {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_io_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_mem {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_mem_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_mio {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_mio_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_pkt {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_pkt_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_rml {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_rml_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_wdog {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_wdog_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip3_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip3_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip3_wrkq_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip3_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_gpio {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_gpio_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_io {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_io_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_mem {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_mem_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_mio {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_mio_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_pkt {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_pkt_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_rml {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_rml_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_wdog {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_wdog_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_raw_ppx_ip4_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_raw_ppx_ip4_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_raw_ppx_ip4_wrkq_s cn68xx;
- struct cvmx_ciu2_raw_ppx_ip4_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_gpio {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_gpio_s cn68xx;
- struct cvmx_ciu2_src_iox_int_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_io {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_io_s cn68xx;
- struct cvmx_ciu2_src_iox_int_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_mbox {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_mbox_s cn68xx;
- struct cvmx_ciu2_src_iox_int_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_mem {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_mem_s cn68xx;
- struct cvmx_ciu2_src_iox_int_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_mio {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_mio_s cn68xx;
- struct cvmx_ciu2_src_iox_int_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_pkt {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_pkt_s cn68xx;
- struct cvmx_ciu2_src_iox_int_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_rml {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_rml_s cn68xx;
- struct cvmx_ciu2_src_iox_int_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_wdog {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_wdog_s cn68xx;
- struct cvmx_ciu2_src_iox_int_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_iox_int_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_src_iox_int_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_src_iox_int_wrkq_s cn68xx;
- struct cvmx_ciu2_src_iox_int_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_gpio {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_gpio_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_io {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_io_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_mbox {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_mbox_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_mem {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_mem_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_mio {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_mio_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_pkt {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_pkt_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_rml {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_rml_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_wdog {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_wdog_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip2_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip2_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip2_wrkq_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip2_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_gpio {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_gpio_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_io {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_io_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_mbox {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_mbox_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_mem {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_mem_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_mio {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_mio_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_pkt {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_pkt_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_rml {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_rml_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_wdog {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_wdog_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip3_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip3_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip3_wrkq_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip3_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_gpio {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_gpio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t gpio:16;
-#else
- uint64_t gpio:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_gpio_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_gpio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_io {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_io_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_34_63:30;
- uint64_t pem:2;
- uint64_t reserved_18_31:14;
- uint64_t pci_inta:2;
- uint64_t reserved_13_15:3;
- uint64_t msired:1;
- uint64_t pci_msi:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_intr:4;
-#else
- uint64_t pci_intr:4;
- uint64_t reserved_4_7:4;
- uint64_t pci_msi:4;
- uint64_t msired:1;
- uint64_t reserved_13_15:3;
- uint64_t pci_inta:2;
- uint64_t reserved_18_31:14;
- uint64_t pem:2;
- uint64_t reserved_34_63:30;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_io_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_io_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_mbox {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_mbox_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t mbox:4;
-#else
- uint64_t mbox:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_mbox_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_mbox_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_mem {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_mem_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t lmc:4;
-#else
- uint64_t lmc:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_mem_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_mem_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_mio {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_mio_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t rst:1;
- uint64_t reserved_49_62:14;
- uint64_t ptp:1;
- uint64_t reserved_45_47:3;
- uint64_t usb_hci:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_uctl:1;
- uint64_t reserved_38_39:2;
- uint64_t uart:2;
- uint64_t reserved_34_35:2;
- uint64_t twsi:2;
- uint64_t reserved_19_31:13;
- uint64_t bootdma:1;
- uint64_t mio:1;
- uint64_t nand:1;
- uint64_t reserved_12_15:4;
- uint64_t timer:4;
- uint64_t reserved_3_7:5;
- uint64_t ipd_drp:1;
- uint64_t ssoiq:1;
- uint64_t ipdppthr:1;
-#else
- uint64_t ipdppthr:1;
- uint64_t ssoiq:1;
- uint64_t ipd_drp:1;
- uint64_t reserved_3_7:5;
- uint64_t timer:4;
- uint64_t reserved_12_15:4;
- uint64_t nand:1;
- uint64_t mio:1;
- uint64_t bootdma:1;
- uint64_t reserved_19_31:13;
- uint64_t twsi:2;
- uint64_t reserved_34_35:2;
- uint64_t uart:2;
- uint64_t reserved_38_39:2;
- uint64_t usb_uctl:1;
- uint64_t reserved_41_43:3;
- uint64_t usb_hci:1;
- uint64_t reserved_45_47:3;
- uint64_t ptp:1;
- uint64_t reserved_49_62:14;
- uint64_t rst:1;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_mio_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_mio_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_pkt {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_54_63:10;
- uint64_t ilk_drp:2;
- uint64_t reserved_49_51:3;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_51:3;
- uint64_t ilk_drp:2;
- uint64_t reserved_54_63:10;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_pkt_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_pkt_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_49_63:15;
- uint64_t ilk:1;
- uint64_t reserved_41_47:7;
- uint64_t mii:1;
- uint64_t reserved_33_39:7;
- uint64_t agl:1;
- uint64_t reserved_13_31:19;
- uint64_t gmx_drp:5;
- uint64_t reserved_5_7:3;
- uint64_t agx:5;
-#else
- uint64_t agx:5;
- uint64_t reserved_5_7:3;
- uint64_t gmx_drp:5;
- uint64_t reserved_13_31:19;
- uint64_t agl:1;
- uint64_t reserved_33_39:7;
- uint64_t mii:1;
- uint64_t reserved_41_47:7;
- uint64_t ilk:1;
- uint64_t reserved_49_63:15;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_rml {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_rml_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_37_39:3;
- uint64_t dpi_dma:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_35:2;
- uint64_t dpi_dma:1;
- uint64_t reserved_37_39:3;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_rml_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_rml_cn68xxp1 {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_56_63:8;
- uint64_t trace:4;
- uint64_t reserved_49_51:3;
- uint64_t l2c:1;
- uint64_t reserved_41_47:7;
- uint64_t dfa:1;
- uint64_t reserved_34_39:6;
- uint64_t dpi:1;
- uint64_t sli:1;
- uint64_t reserved_31_31:1;
- uint64_t key:1;
- uint64_t rad:1;
- uint64_t tim:1;
- uint64_t reserved_25_27:3;
- uint64_t zip:1;
- uint64_t reserved_17_23:7;
- uint64_t sso:1;
- uint64_t reserved_8_15:8;
- uint64_t pko:1;
- uint64_t pip:1;
- uint64_t ipd:1;
- uint64_t fpa:1;
- uint64_t reserved_1_3:3;
- uint64_t iob:1;
-#else
- uint64_t iob:1;
- uint64_t reserved_1_3:3;
- uint64_t fpa:1;
- uint64_t ipd:1;
- uint64_t pip:1;
- uint64_t pko:1;
- uint64_t reserved_8_15:8;
- uint64_t sso:1;
- uint64_t reserved_17_23:7;
- uint64_t zip:1;
- uint64_t reserved_25_27:3;
- uint64_t tim:1;
- uint64_t rad:1;
- uint64_t key:1;
- uint64_t reserved_31_31:1;
- uint64_t sli:1;
- uint64_t dpi:1;
- uint64_t reserved_34_39:6;
- uint64_t dfa:1;
- uint64_t reserved_41_47:7;
- uint64_t l2c:1;
- uint64_t reserved_49_51:3;
- uint64_t trace:4;
- uint64_t reserved_56_63:8;
-#endif
- } cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_wdog {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_wdog_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t wdog:32;
-#else
- uint64_t wdog:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_wdog_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_wdog_s cn68xxp1;
-};
-
-union cvmx_ciu2_src_ppx_ip4_wrkq {
- uint64_t u64;
- struct cvmx_ciu2_src_ppx_ip4_wrkq_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t workq:64;
-#else
- uint64_t workq:64;
-#endif
- } s;
- struct cvmx_ciu2_src_ppx_ip4_wrkq_s cn68xx;
- struct cvmx_ciu2_src_ppx_ip4_wrkq_s cn68xxp1;
-};
-
-union cvmx_ciu2_sum_iox_int {
- uint64_t u64;
- struct cvmx_ciu2_sum_iox_int_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mbox:4;
- uint64_t reserved_8_59:52;
- uint64_t gpio:1;
- uint64_t pkt:1;
- uint64_t mem:1;
- uint64_t io:1;
- uint64_t mio:1;
- uint64_t rml:1;
- uint64_t wdog:1;
- uint64_t workq:1;
-#else
- uint64_t workq:1;
- uint64_t wdog:1;
- uint64_t rml:1;
- uint64_t mio:1;
- uint64_t io:1;
- uint64_t mem:1;
- uint64_t pkt:1;
- uint64_t gpio:1;
- uint64_t reserved_8_59:52;
- uint64_t mbox:4;
-#endif
- } s;
- struct cvmx_ciu2_sum_iox_int_s cn68xx;
- struct cvmx_ciu2_sum_iox_int_s cn68xxp1;
-};
-
-union cvmx_ciu2_sum_ppx_ip2 {
- uint64_t u64;
- struct cvmx_ciu2_sum_ppx_ip2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mbox:4;
- uint64_t reserved_8_59:52;
- uint64_t gpio:1;
- uint64_t pkt:1;
- uint64_t mem:1;
- uint64_t io:1;
- uint64_t mio:1;
- uint64_t rml:1;
- uint64_t wdog:1;
- uint64_t workq:1;
-#else
- uint64_t workq:1;
- uint64_t wdog:1;
- uint64_t rml:1;
- uint64_t mio:1;
- uint64_t io:1;
- uint64_t mem:1;
- uint64_t pkt:1;
- uint64_t gpio:1;
- uint64_t reserved_8_59:52;
- uint64_t mbox:4;
-#endif
- } s;
- struct cvmx_ciu2_sum_ppx_ip2_s cn68xx;
- struct cvmx_ciu2_sum_ppx_ip2_s cn68xxp1;
-};
-
-union cvmx_ciu2_sum_ppx_ip3 {
- uint64_t u64;
- struct cvmx_ciu2_sum_ppx_ip3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mbox:4;
- uint64_t reserved_8_59:52;
- uint64_t gpio:1;
- uint64_t pkt:1;
- uint64_t mem:1;
- uint64_t io:1;
- uint64_t mio:1;
- uint64_t rml:1;
- uint64_t wdog:1;
- uint64_t workq:1;
-#else
- uint64_t workq:1;
- uint64_t wdog:1;
- uint64_t rml:1;
- uint64_t mio:1;
- uint64_t io:1;
- uint64_t mem:1;
- uint64_t pkt:1;
- uint64_t gpio:1;
- uint64_t reserved_8_59:52;
- uint64_t mbox:4;
-#endif
- } s;
- struct cvmx_ciu2_sum_ppx_ip3_s cn68xx;
- struct cvmx_ciu2_sum_ppx_ip3_s cn68xxp1;
-};
-
-union cvmx_ciu2_sum_ppx_ip4 {
- uint64_t u64;
- struct cvmx_ciu2_sum_ppx_ip4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mbox:4;
- uint64_t reserved_8_59:52;
- uint64_t gpio:1;
- uint64_t pkt:1;
- uint64_t mem:1;
- uint64_t io:1;
- uint64_t mio:1;
- uint64_t rml:1;
- uint64_t wdog:1;
- uint64_t workq:1;
-#else
- uint64_t workq:1;
- uint64_t wdog:1;
- uint64_t rml:1;
- uint64_t mio:1;
- uint64_t io:1;
- uint64_t mem:1;
- uint64_t pkt:1;
- uint64_t gpio:1;
- uint64_t reserved_8_59:52;
- uint64_t mbox:4;
-#endif
- } s;
- struct cvmx_ciu2_sum_ppx_ip4_s cn68xx;
- struct cvmx_ciu2_sum_ppx_ip4_s cn68xxp1;
-};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-dbg-defs.h b/arch/mips/include/asm/octeon/cvmx-dbg-defs.h
index 40799cdae695..828d07d87f03 100644
--- a/arch/mips/include/asm/octeon/cvmx-dbg-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-dbg-defs.h
@@ -62,7 +62,6 @@ union cvmx_dbg_data {
uint64_t reserved_31_63:33;
#endif
} cn30xx;
- struct cvmx_dbg_data_cn30xx cn31xx;
struct cvmx_dbg_data_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -82,8 +81,6 @@ union cvmx_dbg_data {
uint64_t reserved_29_63:35;
#endif
} cn38xx;
- struct cvmx_dbg_data_cn38xx cn38xxp2;
- struct cvmx_dbg_data_cn30xx cn50xx;
struct cvmx_dbg_data_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -99,7 +96,6 @@ union cvmx_dbg_data {
uint64_t reserved_29_63:35;
#endif
} cn58xx;
- struct cvmx_dbg_data_cn58xx cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-dpi-defs.h b/arch/mips/include/asm/octeon/cvmx-dpi-defs.h
index dd5b0428de35..e8613e1f6930 100644
--- a/arch/mips/include/asm/octeon/cvmx-dpi-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-dpi-defs.h
@@ -89,7 +89,6 @@ union cvmx_dpi_bist_status {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_dpi_bist_status_s cn61xx;
struct cvmx_dpi_bist_status_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_45_63:19;
@@ -108,10 +107,6 @@ union cvmx_dpi_bist_status {
uint64_t reserved_37_63:27;
#endif
} cn63xxp1;
- struct cvmx_dpi_bist_status_s cn66xx;
- struct cvmx_dpi_bist_status_cn63xx cn68xx;
- struct cvmx_dpi_bist_status_cn63xx cn68xxp1;
- struct cvmx_dpi_bist_status_s cnf71xx;
};
union cvmx_dpi_ctl {
@@ -136,12 +131,6 @@ union cvmx_dpi_ctl {
uint64_t reserved_1_63:63;
#endif
} cn61xx;
- struct cvmx_dpi_ctl_s cn63xx;
- struct cvmx_dpi_ctl_s cn63xxp1;
- struct cvmx_dpi_ctl_s cn66xx;
- struct cvmx_dpi_ctl_s cn68xx;
- struct cvmx_dpi_ctl_s cn68xxp1;
- struct cvmx_dpi_ctl_cn61xx cnf71xx;
};
union cvmx_dpi_dmax_counts {
@@ -157,13 +146,6 @@ union cvmx_dpi_dmax_counts {
uint64_t reserved_39_63:25;
#endif
} s;
- struct cvmx_dpi_dmax_counts_s cn61xx;
- struct cvmx_dpi_dmax_counts_s cn63xx;
- struct cvmx_dpi_dmax_counts_s cn63xxp1;
- struct cvmx_dpi_dmax_counts_s cn66xx;
- struct cvmx_dpi_dmax_counts_s cn68xx;
- struct cvmx_dpi_dmax_counts_s cn68xxp1;
- struct cvmx_dpi_dmax_counts_s cnf71xx;
};
union cvmx_dpi_dmax_dbell {
@@ -177,13 +159,6 @@ union cvmx_dpi_dmax_dbell {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_dpi_dmax_dbell_s cn61xx;
- struct cvmx_dpi_dmax_dbell_s cn63xx;
- struct cvmx_dpi_dmax_dbell_s cn63xxp1;
- struct cvmx_dpi_dmax_dbell_s cn66xx;
- struct cvmx_dpi_dmax_dbell_s cn68xx;
- struct cvmx_dpi_dmax_dbell_s cn68xxp1;
- struct cvmx_dpi_dmax_dbell_s cnf71xx;
};
union cvmx_dpi_dmax_err_rsp_status {
@@ -197,11 +172,6 @@ union cvmx_dpi_dmax_err_rsp_status {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_dpi_dmax_err_rsp_status_s cn61xx;
- struct cvmx_dpi_dmax_err_rsp_status_s cn66xx;
- struct cvmx_dpi_dmax_err_rsp_status_s cn68xx;
- struct cvmx_dpi_dmax_err_rsp_status_s cn68xxp1;
- struct cvmx_dpi_dmax_err_rsp_status_s cnf71xx;
};
union cvmx_dpi_dmax_ibuff_saddr {
@@ -242,12 +212,6 @@ union cvmx_dpi_dmax_ibuff_saddr {
uint64_t reserved_62_63:2;
#endif
} cn61xx;
- struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn63xx;
- struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn63xxp1;
- struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cn66xx;
- struct cvmx_dpi_dmax_ibuff_saddr_s cn68xx;
- struct cvmx_dpi_dmax_ibuff_saddr_s cn68xxp1;
- struct cvmx_dpi_dmax_ibuff_saddr_cn61xx cnf71xx;
};
union cvmx_dpi_dmax_iflight {
@@ -261,11 +225,6 @@ union cvmx_dpi_dmax_iflight {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_dpi_dmax_iflight_s cn61xx;
- struct cvmx_dpi_dmax_iflight_s cn66xx;
- struct cvmx_dpi_dmax_iflight_s cn68xx;
- struct cvmx_dpi_dmax_iflight_s cn68xxp1;
- struct cvmx_dpi_dmax_iflight_s cnf71xx;
};
union cvmx_dpi_dmax_naddr {
@@ -288,12 +247,6 @@ union cvmx_dpi_dmax_naddr {
uint64_t reserved_36_63:28;
#endif
} cn61xx;
- struct cvmx_dpi_dmax_naddr_cn61xx cn63xx;
- struct cvmx_dpi_dmax_naddr_cn61xx cn63xxp1;
- struct cvmx_dpi_dmax_naddr_cn61xx cn66xx;
- struct cvmx_dpi_dmax_naddr_s cn68xx;
- struct cvmx_dpi_dmax_naddr_s cn68xxp1;
- struct cvmx_dpi_dmax_naddr_cn61xx cnf71xx;
};
union cvmx_dpi_dmax_reqbnk0 {
@@ -305,13 +258,6 @@ union cvmx_dpi_dmax_reqbnk0 {
uint64_t state:64;
#endif
} s;
- struct cvmx_dpi_dmax_reqbnk0_s cn61xx;
- struct cvmx_dpi_dmax_reqbnk0_s cn63xx;
- struct cvmx_dpi_dmax_reqbnk0_s cn63xxp1;
- struct cvmx_dpi_dmax_reqbnk0_s cn66xx;
- struct cvmx_dpi_dmax_reqbnk0_s cn68xx;
- struct cvmx_dpi_dmax_reqbnk0_s cn68xxp1;
- struct cvmx_dpi_dmax_reqbnk0_s cnf71xx;
};
union cvmx_dpi_dmax_reqbnk1 {
@@ -323,13 +269,6 @@ union cvmx_dpi_dmax_reqbnk1 {
uint64_t state:64;
#endif
} s;
- struct cvmx_dpi_dmax_reqbnk1_s cn61xx;
- struct cvmx_dpi_dmax_reqbnk1_s cn63xx;
- struct cvmx_dpi_dmax_reqbnk1_s cn63xxp1;
- struct cvmx_dpi_dmax_reqbnk1_s cn66xx;
- struct cvmx_dpi_dmax_reqbnk1_s cn68xx;
- struct cvmx_dpi_dmax_reqbnk1_s cn68xxp1;
- struct cvmx_dpi_dmax_reqbnk1_s cnf71xx;
};
union cvmx_dpi_dma_control {
@@ -379,7 +318,6 @@ union cvmx_dpi_dma_control {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_dpi_dma_control_s cn61xx;
struct cvmx_dpi_dma_control_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_61_63:3;
@@ -462,10 +400,6 @@ union cvmx_dpi_dma_control {
uint64_t reserved_59_63:5;
#endif
} cn63xxp1;
- struct cvmx_dpi_dma_control_cn63xx cn66xx;
- struct cvmx_dpi_dma_control_s cn68xx;
- struct cvmx_dpi_dma_control_cn63xx cn68xxp1;
- struct cvmx_dpi_dma_control_s cnf71xx;
};
union cvmx_dpi_dma_engx_en {
@@ -479,13 +413,6 @@ union cvmx_dpi_dma_engx_en {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_dma_engx_en_s cn61xx;
- struct cvmx_dpi_dma_engx_en_s cn63xx;
- struct cvmx_dpi_dma_engx_en_s cn63xxp1;
- struct cvmx_dpi_dma_engx_en_s cn66xx;
- struct cvmx_dpi_dma_engx_en_s cn68xx;
- struct cvmx_dpi_dma_engx_en_s cn68xxp1;
- struct cvmx_dpi_dma_engx_en_s cnf71xx;
};
union cvmx_dpi_dma_ppx_cnt {
@@ -499,9 +426,6 @@ union cvmx_dpi_dma_ppx_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_dpi_dma_ppx_cnt_s cn61xx;
- struct cvmx_dpi_dma_ppx_cnt_s cn68xx;
- struct cvmx_dpi_dma_ppx_cnt_s cnf71xx;
};
union cvmx_dpi_engx_buf {
@@ -521,7 +445,6 @@ union cvmx_dpi_engx_buf {
uint64_t reserved_37_63:27;
#endif
} s;
- struct cvmx_dpi_engx_buf_s cn61xx;
struct cvmx_dpi_engx_buf_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
@@ -533,11 +456,6 @@ union cvmx_dpi_engx_buf {
uint64_t reserved_8_63:56;
#endif
} cn63xx;
- struct cvmx_dpi_engx_buf_cn63xx cn63xxp1;
- struct cvmx_dpi_engx_buf_s cn66xx;
- struct cvmx_dpi_engx_buf_s cn68xx;
- struct cvmx_dpi_engx_buf_s cn68xxp1;
- struct cvmx_dpi_engx_buf_s cnf71xx;
};
union cvmx_dpi_info_reg {
@@ -557,8 +475,6 @@ union cvmx_dpi_info_reg {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_info_reg_s cn61xx;
- struct cvmx_dpi_info_reg_s cn63xx;
struct cvmx_dpi_info_reg_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -570,10 +486,6 @@ union cvmx_dpi_info_reg {
uint64_t reserved_2_63:62;
#endif
} cn63xxp1;
- struct cvmx_dpi_info_reg_s cn66xx;
- struct cvmx_dpi_info_reg_s cn68xx;
- struct cvmx_dpi_info_reg_s cn68xxp1;
- struct cvmx_dpi_info_reg_s cnf71xx;
};
union cvmx_dpi_int_en {
@@ -617,7 +529,6 @@ union cvmx_dpi_int_en {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_dpi_int_en_s cn61xx;
struct cvmx_dpi_int_en_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_26_63:38;
@@ -653,11 +564,6 @@ union cvmx_dpi_int_en {
uint64_t reserved_26_63:38;
#endif
} cn63xx;
- struct cvmx_dpi_int_en_cn63xx cn63xxp1;
- struct cvmx_dpi_int_en_s cn66xx;
- struct cvmx_dpi_int_en_cn63xx cn68xx;
- struct cvmx_dpi_int_en_cn63xx cn68xxp1;
- struct cvmx_dpi_int_en_s cnf71xx;
};
union cvmx_dpi_int_reg {
@@ -701,7 +607,6 @@ union cvmx_dpi_int_reg {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_dpi_int_reg_s cn61xx;
struct cvmx_dpi_int_reg_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_26_63:38;
@@ -737,11 +642,6 @@ union cvmx_dpi_int_reg {
uint64_t reserved_26_63:38;
#endif
} cn63xx;
- struct cvmx_dpi_int_reg_cn63xx cn63xxp1;
- struct cvmx_dpi_int_reg_s cn66xx;
- struct cvmx_dpi_int_reg_cn63xx cn68xx;
- struct cvmx_dpi_int_reg_cn63xx cn68xxp1;
- struct cvmx_dpi_int_reg_s cnf71xx;
};
union cvmx_dpi_ncbx_cfg {
@@ -755,10 +655,6 @@ union cvmx_dpi_ncbx_cfg {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_dpi_ncbx_cfg_s cn61xx;
- struct cvmx_dpi_ncbx_cfg_s cn66xx;
- struct cvmx_dpi_ncbx_cfg_s cn68xx;
- struct cvmx_dpi_ncbx_cfg_s cnf71xx;
};
union cvmx_dpi_pint_info {
@@ -776,13 +672,6 @@ union cvmx_dpi_pint_info {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_dpi_pint_info_s cn61xx;
- struct cvmx_dpi_pint_info_s cn63xx;
- struct cvmx_dpi_pint_info_s cn63xxp1;
- struct cvmx_dpi_pint_info_s cn66xx;
- struct cvmx_dpi_pint_info_s cn68xx;
- struct cvmx_dpi_pint_info_s cn68xxp1;
- struct cvmx_dpi_pint_info_s cnf71xx;
};
union cvmx_dpi_pkt_err_rsp {
@@ -796,13 +685,6 @@ union cvmx_dpi_pkt_err_rsp {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_dpi_pkt_err_rsp_s cn61xx;
- struct cvmx_dpi_pkt_err_rsp_s cn63xx;
- struct cvmx_dpi_pkt_err_rsp_s cn63xxp1;
- struct cvmx_dpi_pkt_err_rsp_s cn66xx;
- struct cvmx_dpi_pkt_err_rsp_s cn68xx;
- struct cvmx_dpi_pkt_err_rsp_s cn68xxp1;
- struct cvmx_dpi_pkt_err_rsp_s cnf71xx;
};
union cvmx_dpi_req_err_rsp {
@@ -816,13 +698,6 @@ union cvmx_dpi_req_err_rsp {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_req_err_rsp_s cn61xx;
- struct cvmx_dpi_req_err_rsp_s cn63xx;
- struct cvmx_dpi_req_err_rsp_s cn63xxp1;
- struct cvmx_dpi_req_err_rsp_s cn66xx;
- struct cvmx_dpi_req_err_rsp_s cn68xx;
- struct cvmx_dpi_req_err_rsp_s cn68xxp1;
- struct cvmx_dpi_req_err_rsp_s cnf71xx;
};
union cvmx_dpi_req_err_rsp_en {
@@ -836,13 +711,6 @@ union cvmx_dpi_req_err_rsp_en {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_req_err_rsp_en_s cn61xx;
- struct cvmx_dpi_req_err_rsp_en_s cn63xx;
- struct cvmx_dpi_req_err_rsp_en_s cn63xxp1;
- struct cvmx_dpi_req_err_rsp_en_s cn66xx;
- struct cvmx_dpi_req_err_rsp_en_s cn68xx;
- struct cvmx_dpi_req_err_rsp_en_s cn68xxp1;
- struct cvmx_dpi_req_err_rsp_en_s cnf71xx;
};
union cvmx_dpi_req_err_rst {
@@ -856,13 +724,6 @@ union cvmx_dpi_req_err_rst {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_req_err_rst_s cn61xx;
- struct cvmx_dpi_req_err_rst_s cn63xx;
- struct cvmx_dpi_req_err_rst_s cn63xxp1;
- struct cvmx_dpi_req_err_rst_s cn66xx;
- struct cvmx_dpi_req_err_rst_s cn68xx;
- struct cvmx_dpi_req_err_rst_s cn68xxp1;
- struct cvmx_dpi_req_err_rst_s cnf71xx;
};
union cvmx_dpi_req_err_rst_en {
@@ -876,13 +737,6 @@ union cvmx_dpi_req_err_rst_en {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_req_err_rst_en_s cn61xx;
- struct cvmx_dpi_req_err_rst_en_s cn63xx;
- struct cvmx_dpi_req_err_rst_en_s cn63xxp1;
- struct cvmx_dpi_req_err_rst_en_s cn66xx;
- struct cvmx_dpi_req_err_rst_en_s cn68xx;
- struct cvmx_dpi_req_err_rst_en_s cn68xxp1;
- struct cvmx_dpi_req_err_rst_en_s cnf71xx;
};
union cvmx_dpi_req_err_skip_comp {
@@ -900,11 +754,6 @@ union cvmx_dpi_req_err_skip_comp {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_dpi_req_err_skip_comp_s cn61xx;
- struct cvmx_dpi_req_err_skip_comp_s cn66xx;
- struct cvmx_dpi_req_err_skip_comp_s cn68xx;
- struct cvmx_dpi_req_err_skip_comp_s cn68xxp1;
- struct cvmx_dpi_req_err_skip_comp_s cnf71xx;
};
union cvmx_dpi_req_gbl_en {
@@ -918,13 +767,6 @@ union cvmx_dpi_req_gbl_en {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_dpi_req_gbl_en_s cn61xx;
- struct cvmx_dpi_req_gbl_en_s cn63xx;
- struct cvmx_dpi_req_gbl_en_s cn63xxp1;
- struct cvmx_dpi_req_gbl_en_s cn66xx;
- struct cvmx_dpi_req_gbl_en_s cn68xx;
- struct cvmx_dpi_req_gbl_en_s cn68xxp1;
- struct cvmx_dpi_req_gbl_en_s cnf71xx;
};
union cvmx_dpi_sli_prtx_cfg {
@@ -960,7 +802,6 @@ union cvmx_dpi_sli_prtx_cfg {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_dpi_sli_prtx_cfg_s cn61xx;
struct cvmx_dpi_sli_prtx_cfg_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
@@ -994,11 +835,6 @@ union cvmx_dpi_sli_prtx_cfg {
uint64_t reserved_25_63:39;
#endif
} cn63xx;
- struct cvmx_dpi_sli_prtx_cfg_cn63xx cn63xxp1;
- struct cvmx_dpi_sli_prtx_cfg_s cn66xx;
- struct cvmx_dpi_sli_prtx_cfg_cn63xx cn68xx;
- struct cvmx_dpi_sli_prtx_cfg_cn63xx cn68xxp1;
- struct cvmx_dpi_sli_prtx_cfg_s cnf71xx;
};
union cvmx_dpi_sli_prtx_err {
@@ -1012,13 +848,6 @@ union cvmx_dpi_sli_prtx_err {
uint64_t addr:61;
#endif
} s;
- struct cvmx_dpi_sli_prtx_err_s cn61xx;
- struct cvmx_dpi_sli_prtx_err_s cn63xx;
- struct cvmx_dpi_sli_prtx_err_s cn63xxp1;
- struct cvmx_dpi_sli_prtx_err_s cn66xx;
- struct cvmx_dpi_sli_prtx_err_s cn68xx;
- struct cvmx_dpi_sli_prtx_err_s cn68xxp1;
- struct cvmx_dpi_sli_prtx_err_s cnf71xx;
};
union cvmx_dpi_sli_prtx_err_info {
@@ -1040,13 +869,6 @@ union cvmx_dpi_sli_prtx_err_info {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_dpi_sli_prtx_err_info_s cn61xx;
- struct cvmx_dpi_sli_prtx_err_info_s cn63xx;
- struct cvmx_dpi_sli_prtx_err_info_s cn63xxp1;
- struct cvmx_dpi_sli_prtx_err_info_s cn66xx;
- struct cvmx_dpi_sli_prtx_err_info_s cn68xx;
- struct cvmx_dpi_sli_prtx_err_info_s cn68xxp1;
- struct cvmx_dpi_sli_prtx_err_info_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
index 887ff8e1f715..322943f7c4b6 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
@@ -81,11 +81,6 @@ union cvmx_fpa_addr_range_error {
uint64_t reserved_38_63:26;
#endif
} s;
- struct cvmx_fpa_addr_range_error_s cn61xx;
- struct cvmx_fpa_addr_range_error_s cn66xx;
- struct cvmx_fpa_addr_range_error_s cn68xx;
- struct cvmx_fpa_addr_range_error_s cn68xxp1;
- struct cvmx_fpa_addr_range_error_s cnf71xx;
};
union cvmx_fpa_bist_status {
@@ -107,24 +102,6 @@ union cvmx_fpa_bist_status {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_fpa_bist_status_s cn30xx;
- struct cvmx_fpa_bist_status_s cn31xx;
- struct cvmx_fpa_bist_status_s cn38xx;
- struct cvmx_fpa_bist_status_s cn38xxp2;
- struct cvmx_fpa_bist_status_s cn50xx;
- struct cvmx_fpa_bist_status_s cn52xx;
- struct cvmx_fpa_bist_status_s cn52xxp1;
- struct cvmx_fpa_bist_status_s cn56xx;
- struct cvmx_fpa_bist_status_s cn56xxp1;
- struct cvmx_fpa_bist_status_s cn58xx;
- struct cvmx_fpa_bist_status_s cn58xxp1;
- struct cvmx_fpa_bist_status_s cn61xx;
- struct cvmx_fpa_bist_status_s cn63xx;
- struct cvmx_fpa_bist_status_s cn63xxp1;
- struct cvmx_fpa_bist_status_s cn66xx;
- struct cvmx_fpa_bist_status_s cn68xx;
- struct cvmx_fpa_bist_status_s cn68xxp1;
- struct cvmx_fpa_bist_status_s cnf71xx;
};
union cvmx_fpa_ctl_status {
@@ -173,23 +150,6 @@ union cvmx_fpa_ctl_status {
uint64_t reserved_18_63:46;
#endif
} cn30xx;
- struct cvmx_fpa_ctl_status_cn30xx cn31xx;
- struct cvmx_fpa_ctl_status_cn30xx cn38xx;
- struct cvmx_fpa_ctl_status_cn30xx cn38xxp2;
- struct cvmx_fpa_ctl_status_cn30xx cn50xx;
- struct cvmx_fpa_ctl_status_cn30xx cn52xx;
- struct cvmx_fpa_ctl_status_cn30xx cn52xxp1;
- struct cvmx_fpa_ctl_status_cn30xx cn56xx;
- struct cvmx_fpa_ctl_status_cn30xx cn56xxp1;
- struct cvmx_fpa_ctl_status_cn30xx cn58xx;
- struct cvmx_fpa_ctl_status_cn30xx cn58xxp1;
- struct cvmx_fpa_ctl_status_s cn61xx;
- struct cvmx_fpa_ctl_status_s cn63xx;
- struct cvmx_fpa_ctl_status_cn30xx cn63xxp1;
- struct cvmx_fpa_ctl_status_s cn66xx;
- struct cvmx_fpa_ctl_status_s cn68xx;
- struct cvmx_fpa_ctl_status_s cn68xxp1;
- struct cvmx_fpa_ctl_status_s cnf71xx;
};
union cvmx_fpa_fpfx_marks {
@@ -205,19 +165,6 @@ union cvmx_fpa_fpfx_marks {
uint64_t reserved_22_63:42;
#endif
} s;
- struct cvmx_fpa_fpfx_marks_s cn38xx;
- struct cvmx_fpa_fpfx_marks_s cn38xxp2;
- struct cvmx_fpa_fpfx_marks_s cn56xx;
- struct cvmx_fpa_fpfx_marks_s cn56xxp1;
- struct cvmx_fpa_fpfx_marks_s cn58xx;
- struct cvmx_fpa_fpfx_marks_s cn58xxp1;
- struct cvmx_fpa_fpfx_marks_s cn61xx;
- struct cvmx_fpa_fpfx_marks_s cn63xx;
- struct cvmx_fpa_fpfx_marks_s cn63xxp1;
- struct cvmx_fpa_fpfx_marks_s cn66xx;
- struct cvmx_fpa_fpfx_marks_s cn68xx;
- struct cvmx_fpa_fpfx_marks_s cn68xxp1;
- struct cvmx_fpa_fpfx_marks_s cnf71xx;
};
union cvmx_fpa_fpfx_size {
@@ -231,19 +178,6 @@ union cvmx_fpa_fpfx_size {
uint64_t reserved_11_63:53;
#endif
} s;
- struct cvmx_fpa_fpfx_size_s cn38xx;
- struct cvmx_fpa_fpfx_size_s cn38xxp2;
- struct cvmx_fpa_fpfx_size_s cn56xx;
- struct cvmx_fpa_fpfx_size_s cn56xxp1;
- struct cvmx_fpa_fpfx_size_s cn58xx;
- struct cvmx_fpa_fpfx_size_s cn58xxp1;
- struct cvmx_fpa_fpfx_size_s cn61xx;
- struct cvmx_fpa_fpfx_size_s cn63xx;
- struct cvmx_fpa_fpfx_size_s cn63xxp1;
- struct cvmx_fpa_fpfx_size_s cn66xx;
- struct cvmx_fpa_fpfx_size_s cn68xx;
- struct cvmx_fpa_fpfx_size_s cn68xxp1;
- struct cvmx_fpa_fpfx_size_s cnf71xx;
};
union cvmx_fpa_fpf0_marks {
@@ -259,19 +193,6 @@ union cvmx_fpa_fpf0_marks {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_fpa_fpf0_marks_s cn38xx;
- struct cvmx_fpa_fpf0_marks_s cn38xxp2;
- struct cvmx_fpa_fpf0_marks_s cn56xx;
- struct cvmx_fpa_fpf0_marks_s cn56xxp1;
- struct cvmx_fpa_fpf0_marks_s cn58xx;
- struct cvmx_fpa_fpf0_marks_s cn58xxp1;
- struct cvmx_fpa_fpf0_marks_s cn61xx;
- struct cvmx_fpa_fpf0_marks_s cn63xx;
- struct cvmx_fpa_fpf0_marks_s cn63xxp1;
- struct cvmx_fpa_fpf0_marks_s cn66xx;
- struct cvmx_fpa_fpf0_marks_s cn68xx;
- struct cvmx_fpa_fpf0_marks_s cn68xxp1;
- struct cvmx_fpa_fpf0_marks_s cnf71xx;
};
union cvmx_fpa_fpf0_size {
@@ -285,19 +206,6 @@ union cvmx_fpa_fpf0_size {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_fpa_fpf0_size_s cn38xx;
- struct cvmx_fpa_fpf0_size_s cn38xxp2;
- struct cvmx_fpa_fpf0_size_s cn56xx;
- struct cvmx_fpa_fpf0_size_s cn56xxp1;
- struct cvmx_fpa_fpf0_size_s cn58xx;
- struct cvmx_fpa_fpf0_size_s cn58xxp1;
- struct cvmx_fpa_fpf0_size_s cn61xx;
- struct cvmx_fpa_fpf0_size_s cn63xx;
- struct cvmx_fpa_fpf0_size_s cn63xxp1;
- struct cvmx_fpa_fpf0_size_s cn66xx;
- struct cvmx_fpa_fpf0_size_s cn68xx;
- struct cvmx_fpa_fpf0_size_s cn68xxp1;
- struct cvmx_fpa_fpf0_size_s cnf71xx;
};
union cvmx_fpa_fpf8_marks {
@@ -313,8 +221,6 @@ union cvmx_fpa_fpf8_marks {
uint64_t reserved_22_63:42;
#endif
} s;
- struct cvmx_fpa_fpf8_marks_s cn68xx;
- struct cvmx_fpa_fpf8_marks_s cn68xxp1;
};
union cvmx_fpa_fpf8_size {
@@ -328,8 +234,6 @@ union cvmx_fpa_fpf8_size {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_fpa_fpf8_size_s cn68xx;
- struct cvmx_fpa_fpf8_size_s cn68xxp1;
};
union cvmx_fpa_int_enb {
@@ -496,16 +400,6 @@ union cvmx_fpa_int_enb {
uint64_t reserved_28_63:36;
#endif
} cn30xx;
- struct cvmx_fpa_int_enb_cn30xx cn31xx;
- struct cvmx_fpa_int_enb_cn30xx cn38xx;
- struct cvmx_fpa_int_enb_cn30xx cn38xxp2;
- struct cvmx_fpa_int_enb_cn30xx cn50xx;
- struct cvmx_fpa_int_enb_cn30xx cn52xx;
- struct cvmx_fpa_int_enb_cn30xx cn52xxp1;
- struct cvmx_fpa_int_enb_cn30xx cn56xx;
- struct cvmx_fpa_int_enb_cn30xx cn56xxp1;
- struct cvmx_fpa_int_enb_cn30xx cn58xx;
- struct cvmx_fpa_int_enb_cn30xx cn58xxp1;
struct cvmx_fpa_int_enb_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_50_63:14;
@@ -700,8 +594,6 @@ union cvmx_fpa_int_enb {
uint64_t reserved_44_63:20;
#endif
} cn63xx;
- struct cvmx_fpa_int_enb_cn30xx cn63xxp1;
- struct cvmx_fpa_int_enb_cn61xx cn66xx;
struct cvmx_fpa_int_enb_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_50_63:14;
@@ -809,8 +701,6 @@ union cvmx_fpa_int_enb {
uint64_t reserved_50_63:14;
#endif
} cn68xx;
- struct cvmx_fpa_int_enb_cn68xx cn68xxp1;
- struct cvmx_fpa_int_enb_cn61xx cnf71xx;
};
union cvmx_fpa_int_sum {
@@ -985,16 +875,6 @@ union cvmx_fpa_int_sum {
uint64_t reserved_28_63:36;
#endif
} cn30xx;
- struct cvmx_fpa_int_sum_cn30xx cn31xx;
- struct cvmx_fpa_int_sum_cn30xx cn38xx;
- struct cvmx_fpa_int_sum_cn30xx cn38xxp2;
- struct cvmx_fpa_int_sum_cn30xx cn50xx;
- struct cvmx_fpa_int_sum_cn30xx cn52xx;
- struct cvmx_fpa_int_sum_cn30xx cn52xxp1;
- struct cvmx_fpa_int_sum_cn30xx cn56xx;
- struct cvmx_fpa_int_sum_cn30xx cn56xxp1;
- struct cvmx_fpa_int_sum_cn30xx cn58xx;
- struct cvmx_fpa_int_sum_cn30xx cn58xxp1;
struct cvmx_fpa_int_sum_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_50_63:14;
@@ -1189,11 +1069,6 @@ union cvmx_fpa_int_sum {
uint64_t reserved_44_63:20;
#endif
} cn63xx;
- struct cvmx_fpa_int_sum_cn30xx cn63xxp1;
- struct cvmx_fpa_int_sum_cn61xx cn66xx;
- struct cvmx_fpa_int_sum_s cn68xx;
- struct cvmx_fpa_int_sum_s cn68xxp1;
- struct cvmx_fpa_int_sum_cn61xx cnf71xx;
};
union cvmx_fpa_packet_threshold {
@@ -1207,12 +1082,6 @@ union cvmx_fpa_packet_threshold {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_fpa_packet_threshold_s cn61xx;
- struct cvmx_fpa_packet_threshold_s cn63xx;
- struct cvmx_fpa_packet_threshold_s cn66xx;
- struct cvmx_fpa_packet_threshold_s cn68xx;
- struct cvmx_fpa_packet_threshold_s cn68xxp1;
- struct cvmx_fpa_packet_threshold_s cnf71xx;
};
union cvmx_fpa_poolx_end_addr {
@@ -1226,11 +1095,6 @@ union cvmx_fpa_poolx_end_addr {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_fpa_poolx_end_addr_s cn61xx;
- struct cvmx_fpa_poolx_end_addr_s cn66xx;
- struct cvmx_fpa_poolx_end_addr_s cn68xx;
- struct cvmx_fpa_poolx_end_addr_s cn68xxp1;
- struct cvmx_fpa_poolx_end_addr_s cnf71xx;
};
union cvmx_fpa_poolx_start_addr {
@@ -1244,11 +1108,6 @@ union cvmx_fpa_poolx_start_addr {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_fpa_poolx_start_addr_s cn61xx;
- struct cvmx_fpa_poolx_start_addr_s cn66xx;
- struct cvmx_fpa_poolx_start_addr_s cn68xx;
- struct cvmx_fpa_poolx_start_addr_s cn68xxp1;
- struct cvmx_fpa_poolx_start_addr_s cnf71xx;
};
union cvmx_fpa_poolx_threshold {
@@ -1271,11 +1130,6 @@ union cvmx_fpa_poolx_threshold {
uint64_t reserved_29_63:35;
#endif
} cn61xx;
- struct cvmx_fpa_poolx_threshold_cn61xx cn63xx;
- struct cvmx_fpa_poolx_threshold_cn61xx cn66xx;
- struct cvmx_fpa_poolx_threshold_s cn68xx;
- struct cvmx_fpa_poolx_threshold_s cn68xxp1;
- struct cvmx_fpa_poolx_threshold_cn61xx cnf71xx;
};
union cvmx_fpa_quex_available {
@@ -1298,23 +1152,6 @@ union cvmx_fpa_quex_available {
uint64_t reserved_29_63:35;
#endif
} cn30xx;
- struct cvmx_fpa_quex_available_cn30xx cn31xx;
- struct cvmx_fpa_quex_available_cn30xx cn38xx;
- struct cvmx_fpa_quex_available_cn30xx cn38xxp2;
- struct cvmx_fpa_quex_available_cn30xx cn50xx;
- struct cvmx_fpa_quex_available_cn30xx cn52xx;
- struct cvmx_fpa_quex_available_cn30xx cn52xxp1;
- struct cvmx_fpa_quex_available_cn30xx cn56xx;
- struct cvmx_fpa_quex_available_cn30xx cn56xxp1;
- struct cvmx_fpa_quex_available_cn30xx cn58xx;
- struct cvmx_fpa_quex_available_cn30xx cn58xxp1;
- struct cvmx_fpa_quex_available_cn30xx cn61xx;
- struct cvmx_fpa_quex_available_cn30xx cn63xx;
- struct cvmx_fpa_quex_available_cn30xx cn63xxp1;
- struct cvmx_fpa_quex_available_cn30xx cn66xx;
- struct cvmx_fpa_quex_available_s cn68xx;
- struct cvmx_fpa_quex_available_s cn68xxp1;
- struct cvmx_fpa_quex_available_cn30xx cnf71xx;
};
union cvmx_fpa_quex_page_index {
@@ -1328,24 +1165,6 @@ union cvmx_fpa_quex_page_index {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_fpa_quex_page_index_s cn30xx;
- struct cvmx_fpa_quex_page_index_s cn31xx;
- struct cvmx_fpa_quex_page_index_s cn38xx;
- struct cvmx_fpa_quex_page_index_s cn38xxp2;
- struct cvmx_fpa_quex_page_index_s cn50xx;
- struct cvmx_fpa_quex_page_index_s cn52xx;
- struct cvmx_fpa_quex_page_index_s cn52xxp1;
- struct cvmx_fpa_quex_page_index_s cn56xx;
- struct cvmx_fpa_quex_page_index_s cn56xxp1;
- struct cvmx_fpa_quex_page_index_s cn58xx;
- struct cvmx_fpa_quex_page_index_s cn58xxp1;
- struct cvmx_fpa_quex_page_index_s cn61xx;
- struct cvmx_fpa_quex_page_index_s cn63xx;
- struct cvmx_fpa_quex_page_index_s cn63xxp1;
- struct cvmx_fpa_quex_page_index_s cn66xx;
- struct cvmx_fpa_quex_page_index_s cn68xx;
- struct cvmx_fpa_quex_page_index_s cn68xxp1;
- struct cvmx_fpa_quex_page_index_s cnf71xx;
};
union cvmx_fpa_que8_page_index {
@@ -1359,8 +1178,6 @@ union cvmx_fpa_que8_page_index {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_fpa_que8_page_index_s cn68xx;
- struct cvmx_fpa_que8_page_index_s cn68xxp1;
};
union cvmx_fpa_que_act {
@@ -1376,24 +1193,6 @@ union cvmx_fpa_que_act {
uint64_t reserved_29_63:35;
#endif
} s;
- struct cvmx_fpa_que_act_s cn30xx;
- struct cvmx_fpa_que_act_s cn31xx;
- struct cvmx_fpa_que_act_s cn38xx;
- struct cvmx_fpa_que_act_s cn38xxp2;
- struct cvmx_fpa_que_act_s cn50xx;
- struct cvmx_fpa_que_act_s cn52xx;
- struct cvmx_fpa_que_act_s cn52xxp1;
- struct cvmx_fpa_que_act_s cn56xx;
- struct cvmx_fpa_que_act_s cn56xxp1;
- struct cvmx_fpa_que_act_s cn58xx;
- struct cvmx_fpa_que_act_s cn58xxp1;
- struct cvmx_fpa_que_act_s cn61xx;
- struct cvmx_fpa_que_act_s cn63xx;
- struct cvmx_fpa_que_act_s cn63xxp1;
- struct cvmx_fpa_que_act_s cn66xx;
- struct cvmx_fpa_que_act_s cn68xx;
- struct cvmx_fpa_que_act_s cn68xxp1;
- struct cvmx_fpa_que_act_s cnf71xx;
};
union cvmx_fpa_que_exp {
@@ -1409,24 +1208,6 @@ union cvmx_fpa_que_exp {
uint64_t reserved_29_63:35;
#endif
} s;
- struct cvmx_fpa_que_exp_s cn30xx;
- struct cvmx_fpa_que_exp_s cn31xx;
- struct cvmx_fpa_que_exp_s cn38xx;
- struct cvmx_fpa_que_exp_s cn38xxp2;
- struct cvmx_fpa_que_exp_s cn50xx;
- struct cvmx_fpa_que_exp_s cn52xx;
- struct cvmx_fpa_que_exp_s cn52xxp1;
- struct cvmx_fpa_que_exp_s cn56xx;
- struct cvmx_fpa_que_exp_s cn56xxp1;
- struct cvmx_fpa_que_exp_s cn58xx;
- struct cvmx_fpa_que_exp_s cn58xxp1;
- struct cvmx_fpa_que_exp_s cn61xx;
- struct cvmx_fpa_que_exp_s cn63xx;
- struct cvmx_fpa_que_exp_s cn63xxp1;
- struct cvmx_fpa_que_exp_s cn66xx;
- struct cvmx_fpa_que_exp_s cn68xx;
- struct cvmx_fpa_que_exp_s cn68xxp1;
- struct cvmx_fpa_que_exp_s cnf71xx;
};
union cvmx_fpa_wart_ctl {
@@ -1440,17 +1221,6 @@ union cvmx_fpa_wart_ctl {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_fpa_wart_ctl_s cn30xx;
- struct cvmx_fpa_wart_ctl_s cn31xx;
- struct cvmx_fpa_wart_ctl_s cn38xx;
- struct cvmx_fpa_wart_ctl_s cn38xxp2;
- struct cvmx_fpa_wart_ctl_s cn50xx;
- struct cvmx_fpa_wart_ctl_s cn52xx;
- struct cvmx_fpa_wart_ctl_s cn52xxp1;
- struct cvmx_fpa_wart_ctl_s cn56xx;
- struct cvmx_fpa_wart_ctl_s cn56xxp1;
- struct cvmx_fpa_wart_ctl_s cn58xx;
- struct cvmx_fpa_wart_ctl_s cn58xxp1;
};
union cvmx_fpa_wart_status {
@@ -1464,17 +1234,6 @@ union cvmx_fpa_wart_status {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_fpa_wart_status_s cn30xx;
- struct cvmx_fpa_wart_status_s cn31xx;
- struct cvmx_fpa_wart_status_s cn38xx;
- struct cvmx_fpa_wart_status_s cn38xxp2;
- struct cvmx_fpa_wart_status_s cn50xx;
- struct cvmx_fpa_wart_status_s cn52xx;
- struct cvmx_fpa_wart_status_s cn52xxp1;
- struct cvmx_fpa_wart_status_s cn56xx;
- struct cvmx_fpa_wart_status_s cn56xxp1;
- struct cvmx_fpa_wart_status_s cn58xx;
- struct cvmx_fpa_wart_status_s cn58xxp1;
};
union cvmx_fpa_wqe_threshold {
@@ -1488,12 +1247,6 @@ union cvmx_fpa_wqe_threshold {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_fpa_wqe_threshold_s cn61xx;
- struct cvmx_fpa_wqe_threshold_s cn63xx;
- struct cvmx_fpa_wqe_threshold_s cn66xx;
- struct cvmx_fpa_wqe_threshold_s cn68xx;
- struct cvmx_fpa_wqe_threshold_s cn68xxp1;
- struct cvmx_fpa_wqe_threshold_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
index 80e4f8358b81..bdba676f1f2c 100644
--- a/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gmxx-defs.h
@@ -28,82 +28,9 @@
#ifndef __CVMX_GMXX_DEFS_H__
#define __CVMX_GMXX_DEFS_H__
-static inline uint64_t CVMX_GMXX_BAD_REG(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000518ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_BIST(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000400ull) + (block_id) * 0x8000000ull;
-}
-
-#define CVMX_GMXX_BPID_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 15) + ((block_id) & 7) * 0x200000ull) * 8)
-#define CVMX_GMXX_BPID_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000700ull) + ((block_id) & 7) * 0x1000000ull)
-static inline uint64_t CVMX_GMXX_CLK_EN(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080007F0ull) + (block_id) * 0x8000000ull;
-}
-
-#define CVMX_GMXX_EBP_DIS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000608ull) + ((block_id) & 7) * 0x1000000ull)
-#define CVMX_GMXX_EBP_MSK(block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + ((block_id) & 7) * 0x1000000ull)
static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000550ull) + (block_id) * 0x1000000ull;
}
@@ -113,82 +40,15 @@ static inline uint64_t CVMX_GMXX_HG2_CONTROL(unsigned long block_id)
static inline uint64_t CVMX_GMXX_INF_MODE(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800080007F8ull) + (block_id) * 0x8000000ull;
}
-static inline uint64_t CVMX_GMXX_NXA_ADR(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000510ull) + (block_id) * 0x8000000ull;
-}
-
-#define CVMX_GMXX_PIPE_STATUS(block_id) (CVMX_ADD_IO_SEG(0x0001180008000760ull) + ((block_id) & 7) * 0x1000000ull)
-static inline uint64_t CVMX_GMXX_PRTX_CBFC_CTL(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000580ull) + (block_id) * 0x8000000ull;
-}
-
static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -197,23 +57,9 @@ static inline uint64_t CVMX_GMXX_PRTX_CFG(unsigned long offset, unsigned long bl
return CVMX_ADD_IO_SEG(0x0001180008000010ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-#define CVMX_GMXX_RXAUI_CTL(block_id) (CVMX_ADD_IO_SEG(0x0001180008000740ull) + ((block_id) & 7) * 0x1000000ull)
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000180ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -225,19 +71,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM0(unsigned long offset, unsigned lon
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000188ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -249,19 +82,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM1(unsigned long offset, unsigned lon
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000190ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -273,19 +93,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM2(unsigned long offset, unsigned lon
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000198ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -297,19 +104,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM3(unsigned long offset, unsigned lon
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800080001A0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -321,19 +115,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM4(unsigned long offset, unsigned lon
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -342,37 +123,9 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM5(unsigned long offset, unsigned lon
return CVMX_ADD_IO_SEG(0x00011800080001A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_ALL_EN(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000110ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000108ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -384,20 +137,6 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CAM_EN(unsigned long offset, unsigned l
static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -406,73 +145,9 @@ static inline uint64_t CVMX_GMXX_RXX_ADR_CTL(unsigned long offset, unsigned long
return CVMX_ADD_IO_SEG(0x0001180008000100ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_RXX_DECISION(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000040ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_FRM_CHK(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000020ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000018ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -483,48 +158,10 @@ static inline uint64_t CVMX_GMXX_RXX_FRM_CTL(unsigned long offset, unsigned long
#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000030ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000028ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
-static inline uint64_t CVMX_GMXX_RXX_IFG(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000058ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000008ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -536,20 +173,6 @@ static inline uint64_t CVMX_GMXX_RXX_INT_EN(unsigned long offset, unsigned long
static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000000ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -561,20 +184,6 @@ static inline uint64_t CVMX_GMXX_RXX_INT_REG(unsigned long offset, unsigned long
static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -583,471 +192,20 @@ static inline uint64_t CVMX_GMXX_RXX_JABBER(unsigned long offset, unsigned long
return CVMX_ADD_IO_SEG(0x0001180008000038ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_RXX_PAUSE_DROP_TIME(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000068ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000060ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
-static inline uint64_t CVMX_GMXX_RXX_STATS_CTL(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000050ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000088ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_CTL(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000098ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DMAC(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080000A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_OCTS_DRP(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080000B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000080ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_BAD(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080000C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_CTL(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000090ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DMAC(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080000A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_STATS_PKTS_DRP(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080000B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RXX_UDD_SKP(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000048ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_RX_BP_DROPX(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x0ull) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x200000ull) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000420ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
-}
-
-static inline uint64_t CVMX_GMXX_RX_BP_OFFX(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x0ull) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x200000ull) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000460ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
-}
-
-static inline uint64_t CVMX_GMXX_RX_BP_ONX(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x0ull) * 8;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x200000ull) * 8;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000440ull) + ((offset) + (block_id) * 0x1000000ull) * 8;
-}
-
-static inline uint64_t CVMX_GMXX_RX_HG2_STATUS(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000548ull) + (block_id) * 0x8000000ull;
-}
-
-#define CVMX_GMXX_RX_PASS_EN(block_id) (CVMX_ADD_IO_SEG(0x00011800080005F8ull) + ((block_id) & 1) * 0x8000000ull)
-#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000600ull) + (((offset) & 15) + ((block_id) & 1) * 0x1000000ull) * 8)
static inline uint64_t CVMX_GMXX_RX_PRTS(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x0001180008000410ull) + (block_id) * 0x8000000ull;
}
-static inline uint64_t CVMX_GMXX_RX_PRT_INFO(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080004E8ull) + (block_id) * 0x8000000ull;
-}
-
-#define CVMX_GMXX_RX_TX_STATUS(block_id) (CVMX_ADD_IO_SEG(0x00011800080007E8ull))
-static inline uint64_t CVMX_GMXX_RX_XAUI_BAD_COL(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000538ull) + (block_id) * 0x8000000ull;
-}
-
static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000530ull) + (block_id) * 0x1000000ull;
}
@@ -1057,20 +215,6 @@ static inline uint64_t CVMX_GMXX_RX_XAUI_CTL(unsigned long block_id)
static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1079,97 +223,9 @@ static inline uint64_t CVMX_GMXX_SMACX(unsigned long offset, unsigned long block
return CVMX_ADD_IO_SEG(0x0001180008000230ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_SOFT_BIST(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080007E8ull) + (block_id) * 0x1000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_STAT_BP(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000520ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TB_REG(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080007E0ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_APPEND(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000218ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1178,58 +234,10 @@ static inline uint64_t CVMX_GMXX_TXX_BURST(unsigned long offset, unsigned long b
return CVMX_ADD_IO_SEG(0x0001180008000228ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_TXX_CBFC_XOFF(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080005A0ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_CBFC_XON(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080005C0ull) + (block_id) * 0x8000000ull;
-}
-
#define CVMX_GMXX_TXX_CLK(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000208ull) + (((offset) & 3) + ((block_id) & 1) * 0x10000ull) * 2048)
static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1238,48 +246,9 @@ static inline uint64_t CVMX_GMXX_TXX_CTL(unsigned long offset, unsigned long blo
return CVMX_ADD_IO_SEG(0x0001180008000270ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_TXX_MIN_PKT(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000240ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000248ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1291,20 +260,6 @@ static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(unsigned long offset, un
static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1313,92 +268,9 @@ static inline uint64_t CVMX_GMXX_TXX_PAUSE_PKT_TIME(unsigned long offset, unsign
return CVMX_ADD_IO_SEG(0x0001180008000238ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_TXX_PAUSE_TOGO(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000258ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_PAUSE_ZERO(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000260ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-#define CVMX_GMXX_TXX_PIPE(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000310ull) + (((offset) & 3) + ((block_id) & 7) * 0x2000ull) * 2048)
-static inline uint64_t CVMX_GMXX_TXX_SGMII_CTL(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000300ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1407,323 +279,9 @@ static inline uint64_t CVMX_GMXX_TXX_SLOT(unsigned long offset, unsigned long bl
return CVMX_ADD_IO_SEG(0x0001180008000220ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_TXX_SOFT_PAUSE(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000250ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT0(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000280ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT1(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000288ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT2(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000290ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT3(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000298ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT4(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080002A0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT5(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080002A8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT6(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080002B0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT7(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080002B8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT8(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080002C0ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STAT9(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x00011800080002C8ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
-static inline uint64_t CVMX_GMXX_TXX_STATS_CTL(unsigned long offset, unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x0ull) * 2048;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x2000ull) * 2048;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000268ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
-}
-
static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x0ull) * 2048;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
@@ -1732,145 +290,9 @@ static inline uint64_t CVMX_GMXX_TXX_THRESH(unsigned long offset, unsigned long
return CVMX_ADD_IO_SEG(0x0001180008000210ull) + ((offset) + (block_id) * 0x10000ull) * 2048;
}
-static inline uint64_t CVMX_GMXX_TX_BP(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080004D0ull) + (block_id) * 0x8000000ull;
-}
-
-#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000780ull) + (((offset) & 1) + ((block_id) & 0) * 0x0ull) * 8)
-static inline uint64_t CVMX_GMXX_TX_COL_ATTEMPT(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000498ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TX_CORRUPT(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080004D8ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TX_HG2_REG1(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000558ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TX_HG2_REG2(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000560ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TX_IFG(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000488ull) + (block_id) * 0x8000000ull;
-}
-
static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000508ull) + (block_id) * 0x1000000ull;
}
@@ -1880,151 +302,24 @@ static inline uint64_t CVMX_GMXX_TX_INT_EN(unsigned long block_id)
static inline uint64_t CVMX_GMXX_TX_INT_REG(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x0001180008000500ull) + (block_id) * 0x8000000ull;
}
-static inline uint64_t CVMX_GMXX_TX_JAM(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000490ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TX_LFSR(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080004F8ull) + (block_id) * 0x8000000ull;
-}
-
static inline uint64_t CVMX_GMXX_TX_OVR_BP(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x00011800080004C8ull) + (block_id) * 0x8000000ull;
}
-static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_DMAC(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080004A0ull) + (block_id) * 0x8000000ull;
-}
-
-static inline uint64_t CVMX_GMXX_TX_PAUSE_PKT_TYPE(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x00011800080004A8ull) + (block_id) * 0x8000000ull;
-}
-
static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CN30XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN50XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN31XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN38XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN58XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000480ull) + (block_id) * 0x1000000ull;
}
@@ -2032,286 +327,19 @@ static inline uint64_t CVMX_GMXX_TX_PRTS(unsigned long block_id)
}
#define CVMX_GMXX_TX_SPI_CTL(block_id) (CVMX_ADD_IO_SEG(0x00011800080004C0ull) + ((block_id) & 1) * 0x8000000ull)
-#define CVMX_GMXX_TX_SPI_DRAIN(block_id) (CVMX_ADD_IO_SEG(0x00011800080004E0ull) + ((block_id) & 1) * 0x8000000ull)
#define CVMX_GMXX_TX_SPI_MAX(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B0ull) + ((block_id) & 1) * 0x8000000ull)
-#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001180008000680ull) + (((offset) & 31) + ((block_id) & 1) * 0x1000000ull) * 8)
#define CVMX_GMXX_TX_SPI_THRESH(block_id) (CVMX_ADD_IO_SEG(0x00011800080004B8ull) + ((block_id) & 1) * 0x8000000ull)
static inline uint64_t CVMX_GMXX_TX_XAUI_CTL(unsigned long block_id)
{
switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull;
case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x1000000ull;
}
return CVMX_ADD_IO_SEG(0x0001180008000528ull) + (block_id) * 0x8000000ull;
}
-static inline uint64_t CVMX_GMXX_XAUI_EXT_LOOPBACK(unsigned long block_id)
-{
- switch (cvmx_get_octeon_family()) {
- case OCTEON_CNF71XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN52XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN63XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN56XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN66XX & OCTEON_FAMILY_MASK:
- case OCTEON_CN61XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
- case OCTEON_CN68XX & OCTEON_FAMILY_MASK:
- return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x1000000ull;
- }
- return CVMX_ADD_IO_SEG(0x0001180008000540ull) + (block_id) * 0x8000000ull;
-}
-
void __cvmx_interrupt_gmxx_enable(int interface);
-union cvmx_gmxx_bad_reg {
- uint64_t u64;
- struct cvmx_gmxx_bad_reg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t loststat:4;
- uint64_t reserved_18_21:4;
- uint64_t out_ovr:16;
- uint64_t ncb_ovr:1;
- uint64_t out_col:1;
-#else
- uint64_t out_col:1;
- uint64_t ncb_ovr:1;
- uint64_t out_ovr:16;
- uint64_t reserved_18_21:4;
- uint64_t loststat:4;
- uint64_t statovr:1;
- uint64_t inb_nxa:4;
- uint64_t reserved_31_63:33;
-#endif
- } s;
- struct cvmx_gmxx_bad_reg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t reserved_25_25:1;
- uint64_t loststat:3;
- uint64_t reserved_5_21:17;
- uint64_t out_ovr:3;
- uint64_t reserved_0_1:2;
-#else
- uint64_t reserved_0_1:2;
- uint64_t out_ovr:3;
- uint64_t reserved_5_21:17;
- uint64_t loststat:3;
- uint64_t reserved_25_25:1;
- uint64_t statovr:1;
- uint64_t inb_nxa:4;
- uint64_t reserved_31_63:33;
-#endif
- } cn30xx;
- struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
- struct cvmx_gmxx_bad_reg_s cn38xx;
- struct cvmx_gmxx_bad_reg_s cn38xxp2;
- struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
- struct cvmx_gmxx_bad_reg_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t loststat:4;
- uint64_t reserved_6_21:16;
- uint64_t out_ovr:4;
- uint64_t reserved_0_1:2;
-#else
- uint64_t reserved_0_1:2;
- uint64_t out_ovr:4;
- uint64_t reserved_6_21:16;
- uint64_t loststat:4;
- uint64_t statovr:1;
- uint64_t inb_nxa:4;
- uint64_t reserved_31_63:33;
-#endif
- } cn52xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
- struct cvmx_gmxx_bad_reg_s cn58xx;
- struct cvmx_gmxx_bad_reg_s cn58xxp1;
- struct cvmx_gmxx_bad_reg_cn52xx cn61xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn63xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn63xxp1;
- struct cvmx_gmxx_bad_reg_cn52xx cn66xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn68xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn68xxp1;
- struct cvmx_gmxx_bad_reg_cn52xx cnf71xx;
-};
-
-union cvmx_gmxx_bist {
- uint64_t u64;
- struct cvmx_gmxx_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_25_63:39;
- uint64_t status:25;
-#else
- uint64_t status:25;
- uint64_t reserved_25_63:39;
-#endif
- } s;
- struct cvmx_gmxx_bist_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t status:10;
-#else
- uint64_t status:10;
- uint64_t reserved_10_63:54;
-#endif
- } cn30xx;
- struct cvmx_gmxx_bist_cn30xx cn31xx;
- struct cvmx_gmxx_bist_cn30xx cn38xx;
- struct cvmx_gmxx_bist_cn30xx cn38xxp2;
- struct cvmx_gmxx_bist_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_12_63:52;
- uint64_t status:12;
-#else
- uint64_t status:12;
- uint64_t reserved_12_63:52;
-#endif
- } cn50xx;
- struct cvmx_gmxx_bist_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t status:16;
-#else
- uint64_t status:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn52xx;
- struct cvmx_gmxx_bist_cn52xx cn52xxp1;
- struct cvmx_gmxx_bist_cn52xx cn56xx;
- struct cvmx_gmxx_bist_cn52xx cn56xxp1;
- struct cvmx_gmxx_bist_cn58xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t status:17;
-#else
- uint64_t status:17;
- uint64_t reserved_17_63:47;
-#endif
- } cn58xx;
- struct cvmx_gmxx_bist_cn58xx cn58xxp1;
- struct cvmx_gmxx_bist_s cn61xx;
- struct cvmx_gmxx_bist_s cn63xx;
- struct cvmx_gmxx_bist_s cn63xxp1;
- struct cvmx_gmxx_bist_s cn66xx;
- struct cvmx_gmxx_bist_s cn68xx;
- struct cvmx_gmxx_bist_s cn68xxp1;
- struct cvmx_gmxx_bist_s cnf71xx;
-};
-
-union cvmx_gmxx_bpid_mapx {
- uint64_t u64;
- struct cvmx_gmxx_bpid_mapx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t status:1;
- uint64_t reserved_9_15:7;
- uint64_t val:1;
- uint64_t reserved_6_7:2;
- uint64_t bpid:6;
-#else
- uint64_t bpid:6;
- uint64_t reserved_6_7:2;
- uint64_t val:1;
- uint64_t reserved_9_15:7;
- uint64_t status:1;
- uint64_t reserved_17_63:47;
-#endif
- } s;
- struct cvmx_gmxx_bpid_mapx_s cn68xx;
- struct cvmx_gmxx_bpid_mapx_s cn68xxp1;
-};
-
-union cvmx_gmxx_bpid_msk {
- uint64_t u64;
- struct cvmx_gmxx_bpid_msk_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t msk_or:16;
- uint64_t reserved_16_31:16;
- uint64_t msk_and:16;
-#else
- uint64_t msk_and:16;
- uint64_t reserved_16_31:16;
- uint64_t msk_or:16;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_bpid_msk_s cn68xx;
- struct cvmx_gmxx_bpid_msk_s cn68xxp1;
-};
-
-union cvmx_gmxx_clk_en {
- uint64_t u64;
- struct cvmx_gmxx_clk_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t clk_en:1;
-#else
- uint64_t clk_en:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_clk_en_s cn52xx;
- struct cvmx_gmxx_clk_en_s cn52xxp1;
- struct cvmx_gmxx_clk_en_s cn56xx;
- struct cvmx_gmxx_clk_en_s cn56xxp1;
- struct cvmx_gmxx_clk_en_s cn61xx;
- struct cvmx_gmxx_clk_en_s cn63xx;
- struct cvmx_gmxx_clk_en_s cn63xxp1;
- struct cvmx_gmxx_clk_en_s cn66xx;
- struct cvmx_gmxx_clk_en_s cn68xx;
- struct cvmx_gmxx_clk_en_s cn68xxp1;
- struct cvmx_gmxx_clk_en_s cnf71xx;
-};
-
-union cvmx_gmxx_ebp_dis {
- uint64_t u64;
- struct cvmx_gmxx_ebp_dis_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t dis:16;
-#else
- uint64_t dis:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_ebp_dis_s cn68xx;
- struct cvmx_gmxx_ebp_dis_s cn68xxp1;
-};
-
-union cvmx_gmxx_ebp_msk {
- uint64_t u64;
- struct cvmx_gmxx_ebp_msk_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t msk:16;
-#else
- uint64_t msk:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_ebp_msk_s cn68xx;
- struct cvmx_gmxx_ebp_msk_s cn68xxp1;
-};
-
union cvmx_gmxx_hg2_control {
uint64_t u64;
struct cvmx_gmxx_hg2_control_s {
@@ -2329,16 +357,6 @@ union cvmx_gmxx_hg2_control {
uint64_t reserved_19_63:45;
#endif
} s;
- struct cvmx_gmxx_hg2_control_s cn52xx;
- struct cvmx_gmxx_hg2_control_s cn52xxp1;
- struct cvmx_gmxx_hg2_control_s cn56xx;
- struct cvmx_gmxx_hg2_control_s cn61xx;
- struct cvmx_gmxx_hg2_control_s cn63xx;
- struct cvmx_gmxx_hg2_control_s cn63xxp1;
- struct cvmx_gmxx_hg2_control_s cn66xx;
- struct cvmx_gmxx_hg2_control_s cn68xx;
- struct cvmx_gmxx_hg2_control_s cn68xxp1;
- struct cvmx_gmxx_hg2_control_s cnf71xx;
};
union cvmx_gmxx_inf_mode {
@@ -2392,9 +410,6 @@ union cvmx_gmxx_inf_mode {
uint64_t reserved_2_63:62;
#endif
} cn31xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
- struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
struct cvmx_gmxx_inf_mode_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -2414,11 +429,6 @@ union cvmx_gmxx_inf_mode {
uint64_t reserved_10_63:54;
#endif
} cn52xx;
- struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
- struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
- struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
- struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
struct cvmx_gmxx_inf_mode_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -2438,8 +448,6 @@ union cvmx_gmxx_inf_mode {
uint64_t reserved_12_63:52;
#endif
} cn61xx;
- struct cvmx_gmxx_inf_mode_cn61xx cn63xx;
- struct cvmx_gmxx_inf_mode_cn61xx cn63xxp1;
struct cvmx_gmxx_inf_mode_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -2482,108 +490,6 @@ union cvmx_gmxx_inf_mode {
uint64_t reserved_12_63:52;
#endif
} cn68xx;
- struct cvmx_gmxx_inf_mode_cn68xx cn68xxp1;
- struct cvmx_gmxx_inf_mode_cn61xx cnf71xx;
-};
-
-union cvmx_gmxx_nxa_adr {
- uint64_t u64;
- struct cvmx_gmxx_nxa_adr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_23_63:41;
- uint64_t pipe:7;
- uint64_t reserved_6_15:10;
- uint64_t prt:6;
-#else
- uint64_t prt:6;
- uint64_t reserved_6_15:10;
- uint64_t pipe:7;
- uint64_t reserved_23_63:41;
-#endif
- } s;
- struct cvmx_gmxx_nxa_adr_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t prt:6;
-#else
- uint64_t prt:6;
- uint64_t reserved_6_63:58;
-#endif
- } cn30xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn31xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn38xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn38xxp2;
- struct cvmx_gmxx_nxa_adr_cn30xx cn50xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn52xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn52xxp1;
- struct cvmx_gmxx_nxa_adr_cn30xx cn56xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn56xxp1;
- struct cvmx_gmxx_nxa_adr_cn30xx cn58xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn58xxp1;
- struct cvmx_gmxx_nxa_adr_cn30xx cn61xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn63xx;
- struct cvmx_gmxx_nxa_adr_cn30xx cn63xxp1;
- struct cvmx_gmxx_nxa_adr_cn30xx cn66xx;
- struct cvmx_gmxx_nxa_adr_s cn68xx;
- struct cvmx_gmxx_nxa_adr_s cn68xxp1;
- struct cvmx_gmxx_nxa_adr_cn30xx cnf71xx;
-};
-
-union cvmx_gmxx_pipe_status {
- uint64_t u64;
- struct cvmx_gmxx_pipe_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t ovr:4;
- uint64_t reserved_12_15:4;
- uint64_t bp:4;
- uint64_t reserved_4_7:4;
- uint64_t stop:4;
-#else
- uint64_t stop:4;
- uint64_t reserved_4_7:4;
- uint64_t bp:4;
- uint64_t reserved_12_15:4;
- uint64_t ovr:4;
- uint64_t reserved_20_63:44;
-#endif
- } s;
- struct cvmx_gmxx_pipe_status_s cn68xx;
- struct cvmx_gmxx_pipe_status_s cn68xxp1;
-};
-
-union cvmx_gmxx_prtx_cbfc_ctl {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cbfc_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t phys_en:16;
- uint64_t logl_en:16;
- uint64_t phys_bp:16;
- uint64_t reserved_4_15:12;
- uint64_t bck_en:1;
- uint64_t drp_en:1;
- uint64_t tx_en:1;
- uint64_t rx_en:1;
-#else
- uint64_t rx_en:1;
- uint64_t tx_en:1;
- uint64_t drp_en:1;
- uint64_t bck_en:1;
- uint64_t reserved_4_15:12;
- uint64_t phys_bp:16;
- uint64_t logl_en:16;
- uint64_t phys_en:16;
-#endif
- } s;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn61xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn63xxp1;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn66xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn68xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn68xxp1;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cnf71xx;
};
union cvmx_gmxx_prtx_cfg {
@@ -2632,10 +538,6 @@ union cvmx_gmxx_prtx_cfg {
uint64_t reserved_4_63:60;
#endif
} cn30xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
struct cvmx_gmxx_prtx_cfg_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
@@ -2661,240 +563,6 @@ union cvmx_gmxx_prtx_cfg {
uint64_t reserved_14_63:50;
#endif
} cn52xx;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn52xxp1;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn56xx;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn56xxp1;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn61xx;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn63xx;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn63xxp1;
- struct cvmx_gmxx_prtx_cfg_cn52xx cn66xx;
- struct cvmx_gmxx_prtx_cfg_s cn68xx;
- struct cvmx_gmxx_prtx_cfg_s cn68xxp1;
- struct cvmx_gmxx_prtx_cfg_cn52xx cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam0 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t adr:64;
-#else
- uint64_t adr:64;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam1 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t adr:64;
-#else
- uint64_t adr:64;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam2 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t adr:64;
-#else
- uint64_t adr:64;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam3 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t adr:64;
-#else
- uint64_t adr:64;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam4 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t adr:64;
-#else
- uint64_t adr:64;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam5 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam5_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t adr:64;
-#else
- uint64_t adr:64;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam_all_en {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam_all_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t en:32;
-#else
- uint64_t en:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam_all_en_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam_all_en_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam_all_en_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam_all_en_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_adr_cam_en {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t en:8;
-#else
- uint64_t en:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn61xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn63xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn66xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn68xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cnf71xx;
};
union cvmx_gmxx_rxx_adr_ctl {
@@ -2912,174 +580,6 @@ union cvmx_gmxx_rxx_adr_ctl {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn61xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn63xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn63xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn66xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn68xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn68xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_decision {
- uint64_t u64;
- struct cvmx_gmxx_rxx_decision_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t cnt:5;
-#else
- uint64_t cnt:5;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_gmxx_rxx_decision_s cn30xx;
- struct cvmx_gmxx_rxx_decision_s cn31xx;
- struct cvmx_gmxx_rxx_decision_s cn38xx;
- struct cvmx_gmxx_rxx_decision_s cn38xxp2;
- struct cvmx_gmxx_rxx_decision_s cn50xx;
- struct cvmx_gmxx_rxx_decision_s cn52xx;
- struct cvmx_gmxx_rxx_decision_s cn52xxp1;
- struct cvmx_gmxx_rxx_decision_s cn56xx;
- struct cvmx_gmxx_rxx_decision_s cn56xxp1;
- struct cvmx_gmxx_rxx_decision_s cn58xx;
- struct cvmx_gmxx_rxx_decision_s cn58xxp1;
- struct cvmx_gmxx_rxx_decision_s cn61xx;
- struct cvmx_gmxx_rxx_decision_s cn63xx;
- struct cvmx_gmxx_rxx_decision_s cn63xxp1;
- struct cvmx_gmxx_rxx_decision_s cn66xx;
- struct cvmx_gmxx_rxx_decision_s cn68xx;
- struct cvmx_gmxx_rxx_decision_s cn68xxp1;
- struct cvmx_gmxx_rxx_decision_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_frm_chk {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_chk_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
-#else
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t maxerr:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t lenerr:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t reserved_10_63:54;
-#endif
- } s;
- struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_chk_cn50xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
-#else
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t alnerr:1;
- uint64_t reserved_6_6:1;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t niberr:1;
- uint64_t reserved_10_63:54;
-#endif
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
-#else
- uint64_t reserved_0_0:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_63:55;
-#endif
- } cn52xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
- struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t minerr:1;
-#else
- uint64_t minerr:1;
- uint64_t carext:1;
- uint64_t reserved_2_2:1;
- uint64_t jabber:1;
- uint64_t fcserr:1;
- uint64_t reserved_5_6:2;
- uint64_t rcverr:1;
- uint64_t skperr:1;
- uint64_t reserved_9_63:55;
-#endif
- } cn61xx;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx cn63xx;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx cn63xxp1;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx cn66xx;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx cn68xx;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx cn68xxp1;
- struct cvmx_gmxx_rxx_frm_chk_cn61xx cnf71xx;
};
union cvmx_gmxx_rxx_frm_ctl {
@@ -3165,8 +665,6 @@ union cvmx_gmxx_rxx_frm_ctl {
uint64_t reserved_8_63:56;
#endif
} cn31xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
@@ -3194,9 +692,6 @@ union cvmx_gmxx_rxx_frm_ctl {
uint64_t reserved_11_63:53;
#endif
} cn50xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -3251,7 +746,6 @@ union cvmx_gmxx_rxx_frm_ctl {
uint64_t reserved_11_63:53;
#endif
} cn58xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
struct cvmx_gmxx_rxx_frm_ctl_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
@@ -3283,12 +777,6 @@ union cvmx_gmxx_rxx_frm_ctl {
uint64_t reserved_13_63:51;
#endif
} cn61xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn63xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn63xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn66xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn68xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx cn68xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn61xx cnf71xx;
};
union cvmx_gmxx_rxx_frm_max {
@@ -3302,12 +790,6 @@ union cvmx_gmxx_rxx_frm_max {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_gmxx_rxx_frm_max_s cn30xx;
- struct cvmx_gmxx_rxx_frm_max_s cn31xx;
- struct cvmx_gmxx_rxx_frm_max_s cn38xx;
- struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_max_s cn58xx;
- struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
};
union cvmx_gmxx_rxx_frm_min {
@@ -3321,43 +803,6 @@ union cvmx_gmxx_rxx_frm_min {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_gmxx_rxx_frm_min_s cn30xx;
- struct cvmx_gmxx_rxx_frm_min_s cn31xx;
- struct cvmx_gmxx_rxx_frm_min_s cn38xx;
- struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_min_s cn58xx;
- struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_ifg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_ifg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t ifg:4;
-#else
- uint64_t ifg:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_gmxx_rxx_ifg_s cn30xx;
- struct cvmx_gmxx_rxx_ifg_s cn31xx;
- struct cvmx_gmxx_rxx_ifg_s cn38xx;
- struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
- struct cvmx_gmxx_rxx_ifg_s cn50xx;
- struct cvmx_gmxx_rxx_ifg_s cn52xx;
- struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn56xx;
- struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn58xx;
- struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn61xx;
- struct cvmx_gmxx_rxx_ifg_s cn63xx;
- struct cvmx_gmxx_rxx_ifg_s cn63xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn66xx;
- struct cvmx_gmxx_rxx_ifg_s cn68xx;
- struct cvmx_gmxx_rxx_ifg_s cn68xxp1;
- struct cvmx_gmxx_rxx_ifg_s cnf71xx;
};
union cvmx_gmxx_rxx_int_en {
@@ -3472,9 +917,6 @@ union cvmx_gmxx_rxx_int_en {
uint64_t reserved_19_63:45;
#endif
} cn30xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
struct cvmx_gmxx_rxx_int_en_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -3581,8 +1023,6 @@ union cvmx_gmxx_rxx_int_en {
uint64_t reserved_29_63:35;
#endif
} cn52xx;
- struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_27_63:37;
@@ -3685,7 +1125,6 @@ union cvmx_gmxx_rxx_int_en {
uint64_t reserved_20_63:44;
#endif
} cn58xx;
- struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
struct cvmx_gmxx_rxx_int_en_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -3745,12 +1184,6 @@ union cvmx_gmxx_rxx_int_en {
uint64_t reserved_29_63:35;
#endif
} cn61xx;
- struct cvmx_gmxx_rxx_int_en_cn61xx cn63xx;
- struct cvmx_gmxx_rxx_int_en_cn61xx cn63xxp1;
- struct cvmx_gmxx_rxx_int_en_cn61xx cn66xx;
- struct cvmx_gmxx_rxx_int_en_cn61xx cn68xx;
- struct cvmx_gmxx_rxx_int_en_cn61xx cn68xxp1;
- struct cvmx_gmxx_rxx_int_en_cn61xx cnf71xx;
};
union cvmx_gmxx_rxx_int_reg {
@@ -3865,9 +1298,6 @@ union cvmx_gmxx_rxx_int_reg {
uint64_t reserved_19_63:45;
#endif
} cn30xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
struct cvmx_gmxx_rxx_int_reg_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -3974,8 +1404,6 @@ union cvmx_gmxx_rxx_int_reg {
uint64_t reserved_29_63:35;
#endif
} cn52xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_27_63:37;
@@ -4078,7 +1506,6 @@ union cvmx_gmxx_rxx_int_reg {
uint64_t reserved_20_63:44;
#endif
} cn58xx;
- struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
struct cvmx_gmxx_rxx_int_reg_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -4138,12 +1565,6 @@ union cvmx_gmxx_rxx_int_reg {
uint64_t reserved_29_63:35;
#endif
} cn61xx;
- struct cvmx_gmxx_rxx_int_reg_cn61xx cn63xx;
- struct cvmx_gmxx_rxx_int_reg_cn61xx cn63xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn61xx cn66xx;
- struct cvmx_gmxx_rxx_int_reg_cn61xx cn68xx;
- struct cvmx_gmxx_rxx_int_reg_cn61xx cn68xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn61xx cnf71xx;
};
union cvmx_gmxx_rxx_jabber {
@@ -4157,51 +1578,6 @@ union cvmx_gmxx_rxx_jabber {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_gmxx_rxx_jabber_s cn30xx;
- struct cvmx_gmxx_rxx_jabber_s cn31xx;
- struct cvmx_gmxx_rxx_jabber_s cn38xx;
- struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
- struct cvmx_gmxx_rxx_jabber_s cn50xx;
- struct cvmx_gmxx_rxx_jabber_s cn52xx;
- struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn56xx;
- struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn58xx;
- struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn61xx;
- struct cvmx_gmxx_rxx_jabber_s cn63xx;
- struct cvmx_gmxx_rxx_jabber_s cn63xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn66xx;
- struct cvmx_gmxx_rxx_jabber_s cn68xx;
- struct cvmx_gmxx_rxx_jabber_s cn68xxp1;
- struct cvmx_gmxx_rxx_jabber_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_pause_drop_time {
- uint64_t u64;
- struct cvmx_gmxx_rxx_pause_drop_time_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t status:16;
-#else
- uint64_t status:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn61xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn63xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn63xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn66xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn68xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn68xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cnf71xx;
};
union cvmx_gmxx_rxx_rx_inbnd {
@@ -4219,588 +1595,6 @@ union cvmx_gmxx_rxx_rx_inbnd {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t rd_clr:1;
-#else
- uint64_t rd_clr:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn61xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn63xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn66xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn68xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_octs {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
-#else
- uint64_t cnt:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn61xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn63xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn66xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn68xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_octs_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
-#else
- uint64_t cnt:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn61xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn66xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_octs_dmac {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
-#else
- uint64_t cnt:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn61xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn66xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_octs_drp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_drp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
-#else
- uint64_t cnt:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn61xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn66xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_pkts {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn61xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn63xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn66xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn68xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_bad {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn61xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn66xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn61xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn66xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_dmac {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn61xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn66xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_drp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
-#else
- uint64_t cnt:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn61xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn63xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn66xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn68xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cnf71xx;
-};
-
-union cvmx_gmxx_rxx_udd_skp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_udd_skp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t fcssel:1;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
-#else
- uint64_t len:7;
- uint64_t reserved_7_7:1;
- uint64_t fcssel:1;
- uint64_t reserved_9_63:55;
-#endif
- } s;
- struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
- struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn61xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn63xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn63xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn66xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn68xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn68xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cnf71xx;
-};
-
-union cvmx_gmxx_rx_bp_dropx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_dropx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t mark:6;
-#else
- uint64_t mark:6;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn61xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn63xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn63xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn66xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn68xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn68xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cnf71xx;
-};
-
-union cvmx_gmxx_rx_bp_offx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_offx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t mark:6;
-#else
- uint64_t mark:6;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_gmxx_rx_bp_offx_s cn30xx;
- struct cvmx_gmxx_rx_bp_offx_s cn31xx;
- struct cvmx_gmxx_rx_bp_offx_s cn38xx;
- struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_offx_s cn50xx;
- struct cvmx_gmxx_rx_bp_offx_s cn52xx;
- struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn56xx;
- struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn58xx;
- struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn61xx;
- struct cvmx_gmxx_rx_bp_offx_s cn63xx;
- struct cvmx_gmxx_rx_bp_offx_s cn63xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn66xx;
- struct cvmx_gmxx_rx_bp_offx_s cn68xx;
- struct cvmx_gmxx_rx_bp_offx_s cn68xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cnf71xx;
-};
-
-union cvmx_gmxx_rx_bp_onx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_onx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_11_63:53;
- uint64_t mark:11;
-#else
- uint64_t mark:11;
- uint64_t reserved_11_63:53;
-#endif
- } s;
- struct cvmx_gmxx_rx_bp_onx_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_9_63:55;
- uint64_t mark:9;
-#else
- uint64_t mark:9;
- uint64_t reserved_9_63:55;
-#endif
- } cn30xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn31xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn38xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn38xxp2;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn50xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn52xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn52xxp1;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn56xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn56xxp1;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn58xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn58xxp1;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn61xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn63xx;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn63xxp1;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cn66xx;
- struct cvmx_gmxx_rx_bp_onx_s cn68xx;
- struct cvmx_gmxx_rx_bp_onx_s cn68xxp1;
- struct cvmx_gmxx_rx_bp_onx_cn30xx cnf71xx;
-};
-
-union cvmx_gmxx_rx_hg2_status {
- uint64_t u64;
- struct cvmx_gmxx_rx_hg2_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t phtim2go:16;
- uint64_t xof:16;
- uint64_t lgtim2go:16;
-#else
- uint64_t lgtim2go:16;
- uint64_t xof:16;
- uint64_t phtim2go:16;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_rx_hg2_status_s cn52xx;
- struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
- struct cvmx_gmxx_rx_hg2_status_s cn56xx;
- struct cvmx_gmxx_rx_hg2_status_s cn61xx;
- struct cvmx_gmxx_rx_hg2_status_s cn63xx;
- struct cvmx_gmxx_rx_hg2_status_s cn63xxp1;
- struct cvmx_gmxx_rx_hg2_status_s cn66xx;
- struct cvmx_gmxx_rx_hg2_status_s cn68xx;
- struct cvmx_gmxx_rx_hg2_status_s cn68xxp1;
- struct cvmx_gmxx_rx_hg2_status_s cnf71xx;
-};
-
-union cvmx_gmxx_rx_pass_en {
- uint64_t u64;
- struct cvmx_gmxx_rx_pass_en_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t en:16;
-#else
- uint64_t en:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_rx_pass_en_s cn38xx;
- struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
- struct cvmx_gmxx_rx_pass_en_s cn58xx;
- struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_pass_mapx {
- uint64_t u64;
- struct cvmx_gmxx_rx_pass_mapx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t dprt:4;
-#else
- uint64_t dprt:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
- struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
- struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
- struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_prt_info {
- uint64_t u64;
- struct cvmx_gmxx_rx_prt_info_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t drop:16;
- uint64_t commit:16;
-#else
- uint64_t commit:16;
- uint64_t drop:16;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_rx_prt_info_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_19_63:45;
- uint64_t drop:3;
- uint64_t reserved_3_15:13;
- uint64_t commit:3;
-#else
- uint64_t commit:3;
- uint64_t reserved_3_15:13;
- uint64_t drop:3;
- uint64_t reserved_19_63:45;
-#endif
- } cn30xx;
- struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
- struct cvmx_gmxx_rx_prt_info_s cn38xx;
- struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_20_63:44;
- uint64_t drop:4;
- uint64_t reserved_4_15:12;
- uint64_t commit:4;
-#else
- uint64_t commit:4;
- uint64_t reserved_4_15:12;
- uint64_t drop:4;
- uint64_t reserved_20_63:44;
-#endif
- } cn52xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
- struct cvmx_gmxx_rx_prt_info_s cn58xx;
- struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn61xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn63xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn63xxp1;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn66xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn68xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn68xxp1;
- struct cvmx_gmxx_rx_prt_info_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_18_63:46;
- uint64_t drop:2;
- uint64_t reserved_2_15:14;
- uint64_t commit:2;
-#else
- uint64_t commit:2;
- uint64_t reserved_2_15:14;
- uint64_t drop:2;
- uint64_t reserved_18_63:46;
-#endif
- } cnf71xx;
};
union cvmx_gmxx_rx_prts {
@@ -4814,74 +1608,6 @@ union cvmx_gmxx_rx_prts {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_gmxx_rx_prts_s cn30xx;
- struct cvmx_gmxx_rx_prts_s cn31xx;
- struct cvmx_gmxx_rx_prts_s cn38xx;
- struct cvmx_gmxx_rx_prts_s cn38xxp2;
- struct cvmx_gmxx_rx_prts_s cn50xx;
- struct cvmx_gmxx_rx_prts_s cn52xx;
- struct cvmx_gmxx_rx_prts_s cn52xxp1;
- struct cvmx_gmxx_rx_prts_s cn56xx;
- struct cvmx_gmxx_rx_prts_s cn56xxp1;
- struct cvmx_gmxx_rx_prts_s cn58xx;
- struct cvmx_gmxx_rx_prts_s cn58xxp1;
- struct cvmx_gmxx_rx_prts_s cn61xx;
- struct cvmx_gmxx_rx_prts_s cn63xx;
- struct cvmx_gmxx_rx_prts_s cn63xxp1;
- struct cvmx_gmxx_rx_prts_s cn66xx;
- struct cvmx_gmxx_rx_prts_s cn68xx;
- struct cvmx_gmxx_rx_prts_s cn68xxp1;
- struct cvmx_gmxx_rx_prts_s cnf71xx;
-};
-
-union cvmx_gmxx_rx_tx_status {
- uint64_t u64;
- struct cvmx_gmxx_rx_tx_status_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_7_63:57;
- uint64_t tx:3;
- uint64_t reserved_3_3:1;
- uint64_t rx:3;
-#else
- uint64_t rx:3;
- uint64_t reserved_3_3:1;
- uint64_t tx:3;
- uint64_t reserved_7_63:57;
-#endif
- } s;
- struct cvmx_gmxx_rx_tx_status_s cn30xx;
- struct cvmx_gmxx_rx_tx_status_s cn31xx;
- struct cvmx_gmxx_rx_tx_status_s cn50xx;
-};
-
-union cvmx_gmxx_rx_xaui_bad_col {
- uint64_t u64;
- struct cvmx_gmxx_rx_xaui_bad_col_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_40_63:24;
- uint64_t val:1;
- uint64_t state:3;
- uint64_t lane_rxc:4;
- uint64_t lane_rxd:32;
-#else
- uint64_t lane_rxd:32;
- uint64_t lane_rxc:4;
- uint64_t state:3;
- uint64_t val:1;
- uint64_t reserved_40_63:24;
-#endif
- } s;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn61xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn63xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn63xxp1;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn66xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn68xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn68xxp1;
- struct cvmx_gmxx_rx_xaui_bad_col_s cnf71xx;
};
union cvmx_gmxx_rx_xaui_ctl {
@@ -4895,913 +1621,6 @@ union cvmx_gmxx_rx_xaui_ctl {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
- struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
- struct cvmx_gmxx_rx_xaui_ctl_s cn61xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn63xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn63xxp1;
- struct cvmx_gmxx_rx_xaui_ctl_s cn66xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn68xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn68xxp1;
- struct cvmx_gmxx_rx_xaui_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_rxaui_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxaui_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t disparity:1;
-#else
- uint64_t disparity:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_rxaui_ctl_s cn68xx;
- struct cvmx_gmxx_rxaui_ctl_s cn68xxp1;
-};
-
-union cvmx_gmxx_smacx {
- uint64_t u64;
- struct cvmx_gmxx_smacx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t smac:48;
-#else
- uint64_t smac:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_smacx_s cn30xx;
- struct cvmx_gmxx_smacx_s cn31xx;
- struct cvmx_gmxx_smacx_s cn38xx;
- struct cvmx_gmxx_smacx_s cn38xxp2;
- struct cvmx_gmxx_smacx_s cn50xx;
- struct cvmx_gmxx_smacx_s cn52xx;
- struct cvmx_gmxx_smacx_s cn52xxp1;
- struct cvmx_gmxx_smacx_s cn56xx;
- struct cvmx_gmxx_smacx_s cn56xxp1;
- struct cvmx_gmxx_smacx_s cn58xx;
- struct cvmx_gmxx_smacx_s cn58xxp1;
- struct cvmx_gmxx_smacx_s cn61xx;
- struct cvmx_gmxx_smacx_s cn63xx;
- struct cvmx_gmxx_smacx_s cn63xxp1;
- struct cvmx_gmxx_smacx_s cn66xx;
- struct cvmx_gmxx_smacx_s cn68xx;
- struct cvmx_gmxx_smacx_s cn68xxp1;
- struct cvmx_gmxx_smacx_s cnf71xx;
-};
-
-union cvmx_gmxx_soft_bist {
- uint64_t u64;
- struct cvmx_gmxx_soft_bist_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t start_bist:1;
- uint64_t clear_bist:1;
-#else
- uint64_t clear_bist:1;
- uint64_t start_bist:1;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_gmxx_soft_bist_s cn63xx;
- struct cvmx_gmxx_soft_bist_s cn63xxp1;
- struct cvmx_gmxx_soft_bist_s cn66xx;
- struct cvmx_gmxx_soft_bist_s cn68xx;
- struct cvmx_gmxx_soft_bist_s cn68xxp1;
-};
-
-union cvmx_gmxx_stat_bp {
- uint64_t u64;
- struct cvmx_gmxx_stat_bp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t bp:1;
- uint64_t cnt:16;
-#else
- uint64_t cnt:16;
- uint64_t bp:1;
- uint64_t reserved_17_63:47;
-#endif
- } s;
- struct cvmx_gmxx_stat_bp_s cn30xx;
- struct cvmx_gmxx_stat_bp_s cn31xx;
- struct cvmx_gmxx_stat_bp_s cn38xx;
- struct cvmx_gmxx_stat_bp_s cn38xxp2;
- struct cvmx_gmxx_stat_bp_s cn50xx;
- struct cvmx_gmxx_stat_bp_s cn52xx;
- struct cvmx_gmxx_stat_bp_s cn52xxp1;
- struct cvmx_gmxx_stat_bp_s cn56xx;
- struct cvmx_gmxx_stat_bp_s cn56xxp1;
- struct cvmx_gmxx_stat_bp_s cn58xx;
- struct cvmx_gmxx_stat_bp_s cn58xxp1;
- struct cvmx_gmxx_stat_bp_s cn61xx;
- struct cvmx_gmxx_stat_bp_s cn63xx;
- struct cvmx_gmxx_stat_bp_s cn63xxp1;
- struct cvmx_gmxx_stat_bp_s cn66xx;
- struct cvmx_gmxx_stat_bp_s cn68xx;
- struct cvmx_gmxx_stat_bp_s cn68xxp1;
- struct cvmx_gmxx_stat_bp_s cnf71xx;
-};
-
-union cvmx_gmxx_tb_reg {
- uint64_t u64;
- struct cvmx_gmxx_tb_reg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t wr_magic:1;
-#else
- uint64_t wr_magic:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_tb_reg_s cn61xx;
- struct cvmx_gmxx_tb_reg_s cn66xx;
- struct cvmx_gmxx_tb_reg_s cn68xx;
- struct cvmx_gmxx_tb_reg_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_append {
- uint64_t u64;
- struct cvmx_gmxx_txx_append_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t force_fcs:1;
- uint64_t fcs:1;
- uint64_t pad:1;
- uint64_t preamble:1;
-#else
- uint64_t preamble:1;
- uint64_t pad:1;
- uint64_t fcs:1;
- uint64_t force_fcs:1;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_gmxx_txx_append_s cn30xx;
- struct cvmx_gmxx_txx_append_s cn31xx;
- struct cvmx_gmxx_txx_append_s cn38xx;
- struct cvmx_gmxx_txx_append_s cn38xxp2;
- struct cvmx_gmxx_txx_append_s cn50xx;
- struct cvmx_gmxx_txx_append_s cn52xx;
- struct cvmx_gmxx_txx_append_s cn52xxp1;
- struct cvmx_gmxx_txx_append_s cn56xx;
- struct cvmx_gmxx_txx_append_s cn56xxp1;
- struct cvmx_gmxx_txx_append_s cn58xx;
- struct cvmx_gmxx_txx_append_s cn58xxp1;
- struct cvmx_gmxx_txx_append_s cn61xx;
- struct cvmx_gmxx_txx_append_s cn63xx;
- struct cvmx_gmxx_txx_append_s cn63xxp1;
- struct cvmx_gmxx_txx_append_s cn66xx;
- struct cvmx_gmxx_txx_append_s cn68xx;
- struct cvmx_gmxx_txx_append_s cn68xxp1;
- struct cvmx_gmxx_txx_append_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_burst {
- uint64_t u64;
- struct cvmx_gmxx_txx_burst_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t burst:16;
-#else
- uint64_t burst:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_txx_burst_s cn30xx;
- struct cvmx_gmxx_txx_burst_s cn31xx;
- struct cvmx_gmxx_txx_burst_s cn38xx;
- struct cvmx_gmxx_txx_burst_s cn38xxp2;
- struct cvmx_gmxx_txx_burst_s cn50xx;
- struct cvmx_gmxx_txx_burst_s cn52xx;
- struct cvmx_gmxx_txx_burst_s cn52xxp1;
- struct cvmx_gmxx_txx_burst_s cn56xx;
- struct cvmx_gmxx_txx_burst_s cn56xxp1;
- struct cvmx_gmxx_txx_burst_s cn58xx;
- struct cvmx_gmxx_txx_burst_s cn58xxp1;
- struct cvmx_gmxx_txx_burst_s cn61xx;
- struct cvmx_gmxx_txx_burst_s cn63xx;
- struct cvmx_gmxx_txx_burst_s cn63xxp1;
- struct cvmx_gmxx_txx_burst_s cn66xx;
- struct cvmx_gmxx_txx_burst_s cn68xx;
- struct cvmx_gmxx_txx_burst_s cn68xxp1;
- struct cvmx_gmxx_txx_burst_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_cbfc_xoff {
- uint64_t u64;
- struct cvmx_gmxx_txx_cbfc_xoff_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t xoff:16;
-#else
- uint64_t xoff:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn61xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn63xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn63xxp1;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn66xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn68xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn68xxp1;
- struct cvmx_gmxx_txx_cbfc_xoff_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_cbfc_xon {
- uint64_t u64;
- struct cvmx_gmxx_txx_cbfc_xon_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t xon:16;
-#else
- uint64_t xon:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn61xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn63xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn63xxp1;
- struct cvmx_gmxx_txx_cbfc_xon_s cn66xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn68xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn68xxp1;
- struct cvmx_gmxx_txx_cbfc_xon_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_clk {
- uint64_t u64;
- struct cvmx_gmxx_txx_clk_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_6_63:58;
- uint64_t clk_cnt:6;
-#else
- uint64_t clk_cnt:6;
- uint64_t reserved_6_63:58;
-#endif
- } s;
- struct cvmx_gmxx_txx_clk_s cn30xx;
- struct cvmx_gmxx_txx_clk_s cn31xx;
- struct cvmx_gmxx_txx_clk_s cn38xx;
- struct cvmx_gmxx_txx_clk_s cn38xxp2;
- struct cvmx_gmxx_txx_clk_s cn50xx;
- struct cvmx_gmxx_txx_clk_s cn58xx;
- struct cvmx_gmxx_txx_clk_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t xsdef_en:1;
- uint64_t xscol_en:1;
-#else
- uint64_t xscol_en:1;
- uint64_t xsdef_en:1;
- uint64_t reserved_2_63:62;
-#endif
- } s;
- struct cvmx_gmxx_txx_ctl_s cn30xx;
- struct cvmx_gmxx_txx_ctl_s cn31xx;
- struct cvmx_gmxx_txx_ctl_s cn38xx;
- struct cvmx_gmxx_txx_ctl_s cn38xxp2;
- struct cvmx_gmxx_txx_ctl_s cn50xx;
- struct cvmx_gmxx_txx_ctl_s cn52xx;
- struct cvmx_gmxx_txx_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_ctl_s cn56xx;
- struct cvmx_gmxx_txx_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_ctl_s cn58xx;
- struct cvmx_gmxx_txx_ctl_s cn58xxp1;
- struct cvmx_gmxx_txx_ctl_s cn61xx;
- struct cvmx_gmxx_txx_ctl_s cn63xx;
- struct cvmx_gmxx_txx_ctl_s cn63xxp1;
- struct cvmx_gmxx_txx_ctl_s cn66xx;
- struct cvmx_gmxx_txx_ctl_s cn68xx;
- struct cvmx_gmxx_txx_ctl_s cn68xxp1;
- struct cvmx_gmxx_txx_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_min_pkt {
- uint64_t u64;
- struct cvmx_gmxx_txx_min_pkt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t min_size:8;
-#else
- uint64_t min_size:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_gmxx_txx_min_pkt_s cn30xx;
- struct cvmx_gmxx_txx_min_pkt_s cn31xx;
- struct cvmx_gmxx_txx_min_pkt_s cn38xx;
- struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
- struct cvmx_gmxx_txx_min_pkt_s cn50xx;
- struct cvmx_gmxx_txx_min_pkt_s cn52xx;
- struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn56xx;
- struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn58xx;
- struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn61xx;
- struct cvmx_gmxx_txx_min_pkt_s cn63xx;
- struct cvmx_gmxx_txx_min_pkt_s cn63xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn66xx;
- struct cvmx_gmxx_txx_min_pkt_s cn68xx;
- struct cvmx_gmxx_txx_min_pkt_s cn68xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_pause_pkt_interval {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_pkt_interval_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t interval:16;
-#else
- uint64_t interval:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn61xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn63xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn66xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn68xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_pause_pkt_time {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_pkt_time_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t time:16;
-#else
- uint64_t time:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn61xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn63xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn63xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn66xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn68xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn68xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_pause_togo {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_togo_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t msg_time:16;
- uint64_t time:16;
-#else
- uint64_t time:16;
- uint64_t msg_time:16;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_pause_togo_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t time:16;
-#else
- uint64_t time:16;
- uint64_t reserved_16_63:48;
-#endif
- } cn30xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
- struct cvmx_gmxx_txx_pause_togo_s cn52xx;
- struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_togo_s cn56xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
- struct cvmx_gmxx_txx_pause_togo_s cn61xx;
- struct cvmx_gmxx_txx_pause_togo_s cn63xx;
- struct cvmx_gmxx_txx_pause_togo_s cn63xxp1;
- struct cvmx_gmxx_txx_pause_togo_s cn66xx;
- struct cvmx_gmxx_txx_pause_togo_s cn68xx;
- struct cvmx_gmxx_txx_pause_togo_s cn68xxp1;
- struct cvmx_gmxx_txx_pause_togo_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_pause_zero {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_zero_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t send:1;
-#else
- uint64_t send:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_txx_pause_zero_s cn30xx;
- struct cvmx_gmxx_txx_pause_zero_s cn31xx;
- struct cvmx_gmxx_txx_pause_zero_s cn38xx;
- struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_zero_s cn50xx;
- struct cvmx_gmxx_txx_pause_zero_s cn52xx;
- struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn56xx;
- struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn58xx;
- struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn61xx;
- struct cvmx_gmxx_txx_pause_zero_s cn63xx;
- struct cvmx_gmxx_txx_pause_zero_s cn63xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn66xx;
- struct cvmx_gmxx_txx_pause_zero_s cn68xx;
- struct cvmx_gmxx_txx_pause_zero_s cn68xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_pipe {
- uint64_t u64;
- struct cvmx_gmxx_txx_pipe_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_33_63:31;
- uint64_t ign_bp:1;
- uint64_t reserved_21_31:11;
- uint64_t nump:5;
- uint64_t reserved_7_15:9;
- uint64_t base:7;
-#else
- uint64_t base:7;
- uint64_t reserved_7_15:9;
- uint64_t nump:5;
- uint64_t reserved_21_31:11;
- uint64_t ign_bp:1;
- uint64_t reserved_33_63:31;
-#endif
- } s;
- struct cvmx_gmxx_txx_pipe_s cn68xx;
- struct cvmx_gmxx_txx_pipe_s cn68xxp1;
-};
-
-union cvmx_gmxx_txx_sgmii_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_sgmii_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t align:1;
-#else
- uint64_t align:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn61xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn63xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn63xxp1;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn66xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn68xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn68xxp1;
- struct cvmx_gmxx_txx_sgmii_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_slot {
- uint64_t u64;
- struct cvmx_gmxx_txx_slot_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_10_63:54;
- uint64_t slot:10;
-#else
- uint64_t slot:10;
- uint64_t reserved_10_63:54;
-#endif
- } s;
- struct cvmx_gmxx_txx_slot_s cn30xx;
- struct cvmx_gmxx_txx_slot_s cn31xx;
- struct cvmx_gmxx_txx_slot_s cn38xx;
- struct cvmx_gmxx_txx_slot_s cn38xxp2;
- struct cvmx_gmxx_txx_slot_s cn50xx;
- struct cvmx_gmxx_txx_slot_s cn52xx;
- struct cvmx_gmxx_txx_slot_s cn52xxp1;
- struct cvmx_gmxx_txx_slot_s cn56xx;
- struct cvmx_gmxx_txx_slot_s cn56xxp1;
- struct cvmx_gmxx_txx_slot_s cn58xx;
- struct cvmx_gmxx_txx_slot_s cn58xxp1;
- struct cvmx_gmxx_txx_slot_s cn61xx;
- struct cvmx_gmxx_txx_slot_s cn63xx;
- struct cvmx_gmxx_txx_slot_s cn63xxp1;
- struct cvmx_gmxx_txx_slot_s cn66xx;
- struct cvmx_gmxx_txx_slot_s cn68xx;
- struct cvmx_gmxx_txx_slot_s cn68xxp1;
- struct cvmx_gmxx_txx_slot_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_soft_pause {
- uint64_t u64;
- struct cvmx_gmxx_txx_soft_pause_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t time:16;
-#else
- uint64_t time:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_txx_soft_pause_s cn30xx;
- struct cvmx_gmxx_txx_soft_pause_s cn31xx;
- struct cvmx_gmxx_txx_soft_pause_s cn38xx;
- struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
- struct cvmx_gmxx_txx_soft_pause_s cn50xx;
- struct cvmx_gmxx_txx_soft_pause_s cn52xx;
- struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn56xx;
- struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn58xx;
- struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn61xx;
- struct cvmx_gmxx_txx_soft_pause_s cn63xx;
- struct cvmx_gmxx_txx_soft_pause_s cn63xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn66xx;
- struct cvmx_gmxx_txx_soft_pause_s cn68xx;
- struct cvmx_gmxx_txx_soft_pause_s cn68xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat0 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat0_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t xsdef:32;
- uint64_t xscol:32;
-#else
- uint64_t xscol:32;
- uint64_t xsdef:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat0_s cn30xx;
- struct cvmx_gmxx_txx_stat0_s cn31xx;
- struct cvmx_gmxx_txx_stat0_s cn38xx;
- struct cvmx_gmxx_txx_stat0_s cn38xxp2;
- struct cvmx_gmxx_txx_stat0_s cn50xx;
- struct cvmx_gmxx_txx_stat0_s cn52xx;
- struct cvmx_gmxx_txx_stat0_s cn52xxp1;
- struct cvmx_gmxx_txx_stat0_s cn56xx;
- struct cvmx_gmxx_txx_stat0_s cn56xxp1;
- struct cvmx_gmxx_txx_stat0_s cn58xx;
- struct cvmx_gmxx_txx_stat0_s cn58xxp1;
- struct cvmx_gmxx_txx_stat0_s cn61xx;
- struct cvmx_gmxx_txx_stat0_s cn63xx;
- struct cvmx_gmxx_txx_stat0_s cn63xxp1;
- struct cvmx_gmxx_txx_stat0_s cn66xx;
- struct cvmx_gmxx_txx_stat0_s cn68xx;
- struct cvmx_gmxx_txx_stat0_s cn68xxp1;
- struct cvmx_gmxx_txx_stat0_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat1 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t scol:32;
- uint64_t mcol:32;
-#else
- uint64_t mcol:32;
- uint64_t scol:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat1_s cn30xx;
- struct cvmx_gmxx_txx_stat1_s cn31xx;
- struct cvmx_gmxx_txx_stat1_s cn38xx;
- struct cvmx_gmxx_txx_stat1_s cn38xxp2;
- struct cvmx_gmxx_txx_stat1_s cn50xx;
- struct cvmx_gmxx_txx_stat1_s cn52xx;
- struct cvmx_gmxx_txx_stat1_s cn52xxp1;
- struct cvmx_gmxx_txx_stat1_s cn56xx;
- struct cvmx_gmxx_txx_stat1_s cn56xxp1;
- struct cvmx_gmxx_txx_stat1_s cn58xx;
- struct cvmx_gmxx_txx_stat1_s cn58xxp1;
- struct cvmx_gmxx_txx_stat1_s cn61xx;
- struct cvmx_gmxx_txx_stat1_s cn63xx;
- struct cvmx_gmxx_txx_stat1_s cn63xxp1;
- struct cvmx_gmxx_txx_stat1_s cn66xx;
- struct cvmx_gmxx_txx_stat1_s cn68xx;
- struct cvmx_gmxx_txx_stat1_s cn68xxp1;
- struct cvmx_gmxx_txx_stat1_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat2 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
-#else
- uint64_t octs:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat2_s cn30xx;
- struct cvmx_gmxx_txx_stat2_s cn31xx;
- struct cvmx_gmxx_txx_stat2_s cn38xx;
- struct cvmx_gmxx_txx_stat2_s cn38xxp2;
- struct cvmx_gmxx_txx_stat2_s cn50xx;
- struct cvmx_gmxx_txx_stat2_s cn52xx;
- struct cvmx_gmxx_txx_stat2_s cn52xxp1;
- struct cvmx_gmxx_txx_stat2_s cn56xx;
- struct cvmx_gmxx_txx_stat2_s cn56xxp1;
- struct cvmx_gmxx_txx_stat2_s cn58xx;
- struct cvmx_gmxx_txx_stat2_s cn58xxp1;
- struct cvmx_gmxx_txx_stat2_s cn61xx;
- struct cvmx_gmxx_txx_stat2_s cn63xx;
- struct cvmx_gmxx_txx_stat2_s cn63xxp1;
- struct cvmx_gmxx_txx_stat2_s cn66xx;
- struct cvmx_gmxx_txx_stat2_s cn68xx;
- struct cvmx_gmxx_txx_stat2_s cn68xxp1;
- struct cvmx_gmxx_txx_stat2_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat3 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat3_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_32_63:32;
- uint64_t pkts:32;
-#else
- uint64_t pkts:32;
- uint64_t reserved_32_63:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat3_s cn30xx;
- struct cvmx_gmxx_txx_stat3_s cn31xx;
- struct cvmx_gmxx_txx_stat3_s cn38xx;
- struct cvmx_gmxx_txx_stat3_s cn38xxp2;
- struct cvmx_gmxx_txx_stat3_s cn50xx;
- struct cvmx_gmxx_txx_stat3_s cn52xx;
- struct cvmx_gmxx_txx_stat3_s cn52xxp1;
- struct cvmx_gmxx_txx_stat3_s cn56xx;
- struct cvmx_gmxx_txx_stat3_s cn56xxp1;
- struct cvmx_gmxx_txx_stat3_s cn58xx;
- struct cvmx_gmxx_txx_stat3_s cn58xxp1;
- struct cvmx_gmxx_txx_stat3_s cn61xx;
- struct cvmx_gmxx_txx_stat3_s cn63xx;
- struct cvmx_gmxx_txx_stat3_s cn63xxp1;
- struct cvmx_gmxx_txx_stat3_s cn66xx;
- struct cvmx_gmxx_txx_stat3_s cn68xx;
- struct cvmx_gmxx_txx_stat3_s cn68xxp1;
- struct cvmx_gmxx_txx_stat3_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat4 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat4_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t hist1:32;
- uint64_t hist0:32;
-#else
- uint64_t hist0:32;
- uint64_t hist1:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat4_s cn30xx;
- struct cvmx_gmxx_txx_stat4_s cn31xx;
- struct cvmx_gmxx_txx_stat4_s cn38xx;
- struct cvmx_gmxx_txx_stat4_s cn38xxp2;
- struct cvmx_gmxx_txx_stat4_s cn50xx;
- struct cvmx_gmxx_txx_stat4_s cn52xx;
- struct cvmx_gmxx_txx_stat4_s cn52xxp1;
- struct cvmx_gmxx_txx_stat4_s cn56xx;
- struct cvmx_gmxx_txx_stat4_s cn56xxp1;
- struct cvmx_gmxx_txx_stat4_s cn58xx;
- struct cvmx_gmxx_txx_stat4_s cn58xxp1;
- struct cvmx_gmxx_txx_stat4_s cn61xx;
- struct cvmx_gmxx_txx_stat4_s cn63xx;
- struct cvmx_gmxx_txx_stat4_s cn63xxp1;
- struct cvmx_gmxx_txx_stat4_s cn66xx;
- struct cvmx_gmxx_txx_stat4_s cn68xx;
- struct cvmx_gmxx_txx_stat4_s cn68xxp1;
- struct cvmx_gmxx_txx_stat4_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat5 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat5_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t hist3:32;
- uint64_t hist2:32;
-#else
- uint64_t hist2:32;
- uint64_t hist3:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat5_s cn30xx;
- struct cvmx_gmxx_txx_stat5_s cn31xx;
- struct cvmx_gmxx_txx_stat5_s cn38xx;
- struct cvmx_gmxx_txx_stat5_s cn38xxp2;
- struct cvmx_gmxx_txx_stat5_s cn50xx;
- struct cvmx_gmxx_txx_stat5_s cn52xx;
- struct cvmx_gmxx_txx_stat5_s cn52xxp1;
- struct cvmx_gmxx_txx_stat5_s cn56xx;
- struct cvmx_gmxx_txx_stat5_s cn56xxp1;
- struct cvmx_gmxx_txx_stat5_s cn58xx;
- struct cvmx_gmxx_txx_stat5_s cn58xxp1;
- struct cvmx_gmxx_txx_stat5_s cn61xx;
- struct cvmx_gmxx_txx_stat5_s cn63xx;
- struct cvmx_gmxx_txx_stat5_s cn63xxp1;
- struct cvmx_gmxx_txx_stat5_s cn66xx;
- struct cvmx_gmxx_txx_stat5_s cn68xx;
- struct cvmx_gmxx_txx_stat5_s cn68xxp1;
- struct cvmx_gmxx_txx_stat5_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat6 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat6_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t hist5:32;
- uint64_t hist4:32;
-#else
- uint64_t hist4:32;
- uint64_t hist5:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat6_s cn30xx;
- struct cvmx_gmxx_txx_stat6_s cn31xx;
- struct cvmx_gmxx_txx_stat6_s cn38xx;
- struct cvmx_gmxx_txx_stat6_s cn38xxp2;
- struct cvmx_gmxx_txx_stat6_s cn50xx;
- struct cvmx_gmxx_txx_stat6_s cn52xx;
- struct cvmx_gmxx_txx_stat6_s cn52xxp1;
- struct cvmx_gmxx_txx_stat6_s cn56xx;
- struct cvmx_gmxx_txx_stat6_s cn56xxp1;
- struct cvmx_gmxx_txx_stat6_s cn58xx;
- struct cvmx_gmxx_txx_stat6_s cn58xxp1;
- struct cvmx_gmxx_txx_stat6_s cn61xx;
- struct cvmx_gmxx_txx_stat6_s cn63xx;
- struct cvmx_gmxx_txx_stat6_s cn63xxp1;
- struct cvmx_gmxx_txx_stat6_s cn66xx;
- struct cvmx_gmxx_txx_stat6_s cn68xx;
- struct cvmx_gmxx_txx_stat6_s cn68xxp1;
- struct cvmx_gmxx_txx_stat6_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat7 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat7_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t hist7:32;
- uint64_t hist6:32;
-#else
- uint64_t hist6:32;
- uint64_t hist7:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat7_s cn30xx;
- struct cvmx_gmxx_txx_stat7_s cn31xx;
- struct cvmx_gmxx_txx_stat7_s cn38xx;
- struct cvmx_gmxx_txx_stat7_s cn38xxp2;
- struct cvmx_gmxx_txx_stat7_s cn50xx;
- struct cvmx_gmxx_txx_stat7_s cn52xx;
- struct cvmx_gmxx_txx_stat7_s cn52xxp1;
- struct cvmx_gmxx_txx_stat7_s cn56xx;
- struct cvmx_gmxx_txx_stat7_s cn56xxp1;
- struct cvmx_gmxx_txx_stat7_s cn58xx;
- struct cvmx_gmxx_txx_stat7_s cn58xxp1;
- struct cvmx_gmxx_txx_stat7_s cn61xx;
- struct cvmx_gmxx_txx_stat7_s cn63xx;
- struct cvmx_gmxx_txx_stat7_s cn63xxp1;
- struct cvmx_gmxx_txx_stat7_s cn66xx;
- struct cvmx_gmxx_txx_stat7_s cn68xx;
- struct cvmx_gmxx_txx_stat7_s cn68xxp1;
- struct cvmx_gmxx_txx_stat7_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat8 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat8_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t mcst:32;
- uint64_t bcst:32;
-#else
- uint64_t bcst:32;
- uint64_t mcst:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat8_s cn30xx;
- struct cvmx_gmxx_txx_stat8_s cn31xx;
- struct cvmx_gmxx_txx_stat8_s cn38xx;
- struct cvmx_gmxx_txx_stat8_s cn38xxp2;
- struct cvmx_gmxx_txx_stat8_s cn50xx;
- struct cvmx_gmxx_txx_stat8_s cn52xx;
- struct cvmx_gmxx_txx_stat8_s cn52xxp1;
- struct cvmx_gmxx_txx_stat8_s cn56xx;
- struct cvmx_gmxx_txx_stat8_s cn56xxp1;
- struct cvmx_gmxx_txx_stat8_s cn58xx;
- struct cvmx_gmxx_txx_stat8_s cn58xxp1;
- struct cvmx_gmxx_txx_stat8_s cn61xx;
- struct cvmx_gmxx_txx_stat8_s cn63xx;
- struct cvmx_gmxx_txx_stat8_s cn63xxp1;
- struct cvmx_gmxx_txx_stat8_s cn66xx;
- struct cvmx_gmxx_txx_stat8_s cn68xx;
- struct cvmx_gmxx_txx_stat8_s cn68xxp1;
- struct cvmx_gmxx_txx_stat8_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stat9 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat9_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t undflw:32;
- uint64_t ctl:32;
-#else
- uint64_t ctl:32;
- uint64_t undflw:32;
-#endif
- } s;
- struct cvmx_gmxx_txx_stat9_s cn30xx;
- struct cvmx_gmxx_txx_stat9_s cn31xx;
- struct cvmx_gmxx_txx_stat9_s cn38xx;
- struct cvmx_gmxx_txx_stat9_s cn38xxp2;
- struct cvmx_gmxx_txx_stat9_s cn50xx;
- struct cvmx_gmxx_txx_stat9_s cn52xx;
- struct cvmx_gmxx_txx_stat9_s cn52xxp1;
- struct cvmx_gmxx_txx_stat9_s cn56xx;
- struct cvmx_gmxx_txx_stat9_s cn56xxp1;
- struct cvmx_gmxx_txx_stat9_s cn58xx;
- struct cvmx_gmxx_txx_stat9_s cn58xxp1;
- struct cvmx_gmxx_txx_stat9_s cn61xx;
- struct cvmx_gmxx_txx_stat9_s cn63xx;
- struct cvmx_gmxx_txx_stat9_s cn63xxp1;
- struct cvmx_gmxx_txx_stat9_s cn66xx;
- struct cvmx_gmxx_txx_stat9_s cn68xx;
- struct cvmx_gmxx_txx_stat9_s cn68xxp1;
- struct cvmx_gmxx_txx_stat9_s cnf71xx;
-};
-
-union cvmx_gmxx_txx_stats_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_stats_ctl_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t rd_clr:1;
-#else
- uint64_t rd_clr:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
- struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn61xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn63xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn63xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn66xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn68xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn68xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cnf71xx;
};
union cvmx_gmxx_txx_thresh {
@@ -5824,7 +1643,6 @@ union cvmx_gmxx_txx_thresh {
uint64_t reserved_7_63:57;
#endif
} cn30xx;
- struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
struct cvmx_gmxx_txx_thresh_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -5834,240 +1652,6 @@ union cvmx_gmxx_txx_thresh {
uint64_t reserved_9_63:55;
#endif
} cn38xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn38xxp2;
- struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn52xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn52xxp1;
- struct cvmx_gmxx_txx_thresh_cn38xx cn56xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn56xxp1;
- struct cvmx_gmxx_txx_thresh_cn38xx cn58xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn58xxp1;
- struct cvmx_gmxx_txx_thresh_cn38xx cn61xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn63xx;
- struct cvmx_gmxx_txx_thresh_cn38xx cn63xxp1;
- struct cvmx_gmxx_txx_thresh_cn38xx cn66xx;
- struct cvmx_gmxx_txx_thresh_s cn68xx;
- struct cvmx_gmxx_txx_thresh_s cn68xxp1;
- struct cvmx_gmxx_txx_thresh_cn38xx cnf71xx;
-};
-
-union cvmx_gmxx_tx_bp {
- uint64_t u64;
- struct cvmx_gmxx_tx_bp_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t bp:4;
-#else
- uint64_t bp:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_gmxx_tx_bp_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t bp:3;
-#else
- uint64_t bp:3;
- uint64_t reserved_3_63:61;
-#endif
- } cn30xx;
- struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
- struct cvmx_gmxx_tx_bp_s cn38xx;
- struct cvmx_gmxx_tx_bp_s cn38xxp2;
- struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
- struct cvmx_gmxx_tx_bp_s cn52xx;
- struct cvmx_gmxx_tx_bp_s cn52xxp1;
- struct cvmx_gmxx_tx_bp_s cn56xx;
- struct cvmx_gmxx_tx_bp_s cn56xxp1;
- struct cvmx_gmxx_tx_bp_s cn58xx;
- struct cvmx_gmxx_tx_bp_s cn58xxp1;
- struct cvmx_gmxx_tx_bp_s cn61xx;
- struct cvmx_gmxx_tx_bp_s cn63xx;
- struct cvmx_gmxx_tx_bp_s cn63xxp1;
- struct cvmx_gmxx_tx_bp_s cn66xx;
- struct cvmx_gmxx_tx_bp_s cn68xx;
- struct cvmx_gmxx_tx_bp_s cn68xxp1;
- struct cvmx_gmxx_tx_bp_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t bp:2;
-#else
- uint64_t bp:2;
- uint64_t reserved_2_63:62;
-#endif
- } cnf71xx;
-};
-
-union cvmx_gmxx_tx_clk_mskx {
- uint64_t u64;
- struct cvmx_gmxx_tx_clk_mskx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_1_63:63;
- uint64_t msk:1;
-#else
- uint64_t msk:1;
- uint64_t reserved_1_63:63;
-#endif
- } s;
- struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
- struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
-};
-
-union cvmx_gmxx_tx_col_attempt {
- uint64_t u64;
- struct cvmx_gmxx_tx_col_attempt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t limit:5;
-#else
- uint64_t limit:5;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_gmxx_tx_col_attempt_s cn30xx;
- struct cvmx_gmxx_tx_col_attempt_s cn31xx;
- struct cvmx_gmxx_tx_col_attempt_s cn38xx;
- struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
- struct cvmx_gmxx_tx_col_attempt_s cn50xx;
- struct cvmx_gmxx_tx_col_attempt_s cn52xx;
- struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn56xx;
- struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn58xx;
- struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn61xx;
- struct cvmx_gmxx_tx_col_attempt_s cn63xx;
- struct cvmx_gmxx_tx_col_attempt_s cn63xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn66xx;
- struct cvmx_gmxx_tx_col_attempt_s cn68xx;
- struct cvmx_gmxx_tx_col_attempt_s cn68xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cnf71xx;
-};
-
-union cvmx_gmxx_tx_corrupt {
- uint64_t u64;
- struct cvmx_gmxx_tx_corrupt_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_4_63:60;
- uint64_t corrupt:4;
-#else
- uint64_t corrupt:4;
- uint64_t reserved_4_63:60;
-#endif
- } s;
- struct cvmx_gmxx_tx_corrupt_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_3_63:61;
- uint64_t corrupt:3;
-#else
- uint64_t corrupt:3;
- uint64_t reserved_3_63:61;
-#endif
- } cn30xx;
- struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
- struct cvmx_gmxx_tx_corrupt_s cn38xx;
- struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
- struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
- struct cvmx_gmxx_tx_corrupt_s cn52xx;
- struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn56xx;
- struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn58xx;
- struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn61xx;
- struct cvmx_gmxx_tx_corrupt_s cn63xx;
- struct cvmx_gmxx_tx_corrupt_s cn63xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn66xx;
- struct cvmx_gmxx_tx_corrupt_s cn68xx;
- struct cvmx_gmxx_tx_corrupt_s cn68xxp1;
- struct cvmx_gmxx_tx_corrupt_cnf71xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_2_63:62;
- uint64_t corrupt:2;
-#else
- uint64_t corrupt:2;
- uint64_t reserved_2_63:62;
-#endif
- } cnf71xx;
-};
-
-union cvmx_gmxx_tx_hg2_reg1 {
- uint64_t u64;
- struct cvmx_gmxx_tx_hg2_reg1_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t tx_xof:16;
-#else
- uint64_t tx_xof:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
- struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn61xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn63xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn63xxp1;
- struct cvmx_gmxx_tx_hg2_reg1_s cn66xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn68xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn68xxp1;
- struct cvmx_gmxx_tx_hg2_reg1_s cnf71xx;
-};
-
-union cvmx_gmxx_tx_hg2_reg2 {
- uint64_t u64;
- struct cvmx_gmxx_tx_hg2_reg2_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t tx_xon:16;
-#else
- uint64_t tx_xon:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
- struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn61xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn63xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn63xxp1;
- struct cvmx_gmxx_tx_hg2_reg2_s cn66xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn68xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn68xxp1;
- struct cvmx_gmxx_tx_hg2_reg2_s cnf71xx;
-};
-
-union cvmx_gmxx_tx_ifg {
- uint64_t u64;
- struct cvmx_gmxx_tx_ifg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t ifg2:4;
- uint64_t ifg1:4;
-#else
- uint64_t ifg1:4;
- uint64_t ifg2:4;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_gmxx_tx_ifg_s cn30xx;
- struct cvmx_gmxx_tx_ifg_s cn31xx;
- struct cvmx_gmxx_tx_ifg_s cn38xx;
- struct cvmx_gmxx_tx_ifg_s cn38xxp2;
- struct cvmx_gmxx_tx_ifg_s cn50xx;
- struct cvmx_gmxx_tx_ifg_s cn52xx;
- struct cvmx_gmxx_tx_ifg_s cn52xxp1;
- struct cvmx_gmxx_tx_ifg_s cn56xx;
- struct cvmx_gmxx_tx_ifg_s cn56xxp1;
- struct cvmx_gmxx_tx_ifg_s cn58xx;
- struct cvmx_gmxx_tx_ifg_s cn58xxp1;
- struct cvmx_gmxx_tx_ifg_s cn61xx;
- struct cvmx_gmxx_tx_ifg_s cn63xx;
- struct cvmx_gmxx_tx_ifg_s cn63xxp1;
- struct cvmx_gmxx_tx_ifg_s cn66xx;
- struct cvmx_gmxx_tx_ifg_s cn68xx;
- struct cvmx_gmxx_tx_ifg_s cn68xxp1;
- struct cvmx_gmxx_tx_ifg_s cnf71xx;
};
union cvmx_gmxx_tx_int_en {
@@ -6183,7 +1767,6 @@ union cvmx_gmxx_tx_int_en {
uint64_t reserved_16_63:48;
#endif
} cn38xxp2;
- struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
struct cvmx_gmxx_tx_int_en_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -6205,12 +1788,6 @@ union cvmx_gmxx_tx_int_en {
uint64_t reserved_20_63:44;
#endif
} cn52xx;
- struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
- struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
- struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
- struct cvmx_gmxx_tx_int_en_cn38xx cn58xx;
- struct cvmx_gmxx_tx_int_en_cn38xx cn58xxp1;
- struct cvmx_gmxx_tx_int_en_s cn61xx;
struct cvmx_gmxx_tx_int_en_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
@@ -6234,8 +1811,6 @@ union cvmx_gmxx_tx_int_en {
uint64_t reserved_24_63:40;
#endif
} cn63xx;
- struct cvmx_gmxx_tx_int_en_cn63xx cn63xxp1;
- struct cvmx_gmxx_tx_int_en_s cn66xx;
struct cvmx_gmxx_tx_int_en_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
@@ -6261,7 +1836,6 @@ union cvmx_gmxx_tx_int_en {
uint64_t reserved_25_63:39;
#endif
} cn68xx;
- struct cvmx_gmxx_tx_int_en_cn68xx cn68xxp1;
struct cvmx_gmxx_tx_int_en_cnf71xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
@@ -6410,7 +1984,6 @@ union cvmx_gmxx_tx_int_reg {
uint64_t reserved_16_63:48;
#endif
} cn38xxp2;
- struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
struct cvmx_gmxx_tx_int_reg_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -6432,12 +2005,6 @@ union cvmx_gmxx_tx_int_reg {
uint64_t reserved_20_63:44;
#endif
} cn52xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
- struct cvmx_gmxx_tx_int_reg_cn38xx cn58xx;
- struct cvmx_gmxx_tx_int_reg_cn38xx cn58xxp1;
- struct cvmx_gmxx_tx_int_reg_s cn61xx;
struct cvmx_gmxx_tx_int_reg_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
@@ -6461,8 +2028,6 @@ union cvmx_gmxx_tx_int_reg {
uint64_t reserved_24_63:40;
#endif
} cn63xx;
- struct cvmx_gmxx_tx_int_reg_cn63xx cn63xxp1;
- struct cvmx_gmxx_tx_int_reg_s cn66xx;
struct cvmx_gmxx_tx_int_reg_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
@@ -6488,7 +2053,6 @@ union cvmx_gmxx_tx_int_reg {
uint64_t reserved_25_63:39;
#endif
} cn68xx;
- struct cvmx_gmxx_tx_int_reg_cn68xx cn68xxp1;
struct cvmx_gmxx_tx_int_reg_cnf71xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63:39;
@@ -6524,68 +2088,6 @@ union cvmx_gmxx_tx_int_reg {
} cnf71xx;
};
-union cvmx_gmxx_tx_jam {
- uint64_t u64;
- struct cvmx_gmxx_tx_jam_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t jam:8;
-#else
- uint64_t jam:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_gmxx_tx_jam_s cn30xx;
- struct cvmx_gmxx_tx_jam_s cn31xx;
- struct cvmx_gmxx_tx_jam_s cn38xx;
- struct cvmx_gmxx_tx_jam_s cn38xxp2;
- struct cvmx_gmxx_tx_jam_s cn50xx;
- struct cvmx_gmxx_tx_jam_s cn52xx;
- struct cvmx_gmxx_tx_jam_s cn52xxp1;
- struct cvmx_gmxx_tx_jam_s cn56xx;
- struct cvmx_gmxx_tx_jam_s cn56xxp1;
- struct cvmx_gmxx_tx_jam_s cn58xx;
- struct cvmx_gmxx_tx_jam_s cn58xxp1;
- struct cvmx_gmxx_tx_jam_s cn61xx;
- struct cvmx_gmxx_tx_jam_s cn63xx;
- struct cvmx_gmxx_tx_jam_s cn63xxp1;
- struct cvmx_gmxx_tx_jam_s cn66xx;
- struct cvmx_gmxx_tx_jam_s cn68xx;
- struct cvmx_gmxx_tx_jam_s cn68xxp1;
- struct cvmx_gmxx_tx_jam_s cnf71xx;
-};
-
-union cvmx_gmxx_tx_lfsr {
- uint64_t u64;
- struct cvmx_gmxx_tx_lfsr_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t lfsr:16;
-#else
- uint64_t lfsr:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_tx_lfsr_s cn30xx;
- struct cvmx_gmxx_tx_lfsr_s cn31xx;
- struct cvmx_gmxx_tx_lfsr_s cn38xx;
- struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
- struct cvmx_gmxx_tx_lfsr_s cn50xx;
- struct cvmx_gmxx_tx_lfsr_s cn52xx;
- struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn56xx;
- struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn58xx;
- struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn61xx;
- struct cvmx_gmxx_tx_lfsr_s cn63xx;
- struct cvmx_gmxx_tx_lfsr_s cn63xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn66xx;
- struct cvmx_gmxx_tx_lfsr_s cn68xx;
- struct cvmx_gmxx_tx_lfsr_s cn68xxp1;
- struct cvmx_gmxx_tx_lfsr_s cnf71xx;
-};
-
union cvmx_gmxx_tx_ovr_bp {
uint64_t u64;
struct cvmx_gmxx_tx_ovr_bp_s {
@@ -6622,7 +2124,6 @@ union cvmx_gmxx_tx_ovr_bp {
uint64_t reserved_11_63:53;
#endif
} cn30xx;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
struct cvmx_gmxx_tx_ovr_bp_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -6636,20 +2137,6 @@ union cvmx_gmxx_tx_ovr_bp {
uint64_t reserved_12_63:52;
#endif
} cn38xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
- struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
- struct cvmx_gmxx_tx_ovr_bp_s cn61xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn63xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn63xxp1;
- struct cvmx_gmxx_tx_ovr_bp_s cn66xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn68xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn68xxp1;
struct cvmx_gmxx_tx_ovr_bp_cnf71xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
@@ -6673,68 +2160,6 @@ union cvmx_gmxx_tx_ovr_bp {
} cnf71xx;
};
-union cvmx_gmxx_tx_pause_pkt_dmac {
- uint64_t u64;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
- uint64_t dmac:48;
-#else
- uint64_t dmac:48;
- uint64_t reserved_48_63:16;
-#endif
- } s;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn61xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn63xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn66xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn68xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn68xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cnf71xx;
-};
-
-union cvmx_gmxx_tx_pause_pkt_type {
- uint64_t u64;
- struct cvmx_gmxx_tx_pause_pkt_type_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t type:16;
-#else
- uint64_t type:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn61xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn63xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn63xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn66xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn68xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn68xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cnf71xx;
-};
-
union cvmx_gmxx_tx_prts {
uint64_t u64;
struct cvmx_gmxx_tx_prts_s {
@@ -6746,24 +2171,6 @@ union cvmx_gmxx_tx_prts {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_gmxx_tx_prts_s cn30xx;
- struct cvmx_gmxx_tx_prts_s cn31xx;
- struct cvmx_gmxx_tx_prts_s cn38xx;
- struct cvmx_gmxx_tx_prts_s cn38xxp2;
- struct cvmx_gmxx_tx_prts_s cn50xx;
- struct cvmx_gmxx_tx_prts_s cn52xx;
- struct cvmx_gmxx_tx_prts_s cn52xxp1;
- struct cvmx_gmxx_tx_prts_s cn56xx;
- struct cvmx_gmxx_tx_prts_s cn56xxp1;
- struct cvmx_gmxx_tx_prts_s cn58xx;
- struct cvmx_gmxx_tx_prts_s cn58xxp1;
- struct cvmx_gmxx_tx_prts_s cn61xx;
- struct cvmx_gmxx_tx_prts_s cn63xx;
- struct cvmx_gmxx_tx_prts_s cn63xxp1;
- struct cvmx_gmxx_tx_prts_s cn66xx;
- struct cvmx_gmxx_tx_prts_s cn68xx;
- struct cvmx_gmxx_tx_prts_s cn68xxp1;
- struct cvmx_gmxx_tx_prts_s cnf71xx;
};
union cvmx_gmxx_tx_spi_ctl {
@@ -6779,26 +2186,6 @@ union cvmx_gmxx_tx_spi_ctl {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
- struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
- struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
- struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_drain {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_drain_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t drain:16;
-#else
- uint64_t drain:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_tx_spi_drain_s cn38xx;
- struct cvmx_gmxx_tx_spi_drain_s cn58xx;
- struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
};
union cvmx_gmxx_tx_spi_max {
@@ -6827,24 +2214,6 @@ union cvmx_gmxx_tx_spi_max {
uint64_t reserved_16_63:48;
#endif
} cn38xx;
- struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
- struct cvmx_gmxx_tx_spi_max_s cn58xx;
- struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_roundx {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_roundx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_16_63:48;
- uint64_t round:16;
-#else
- uint64_t round:16;
- uint64_t reserved_16_63:48;
-#endif
- } s;
- struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
- struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
};
union cvmx_gmxx_tx_spi_thresh {
@@ -6858,10 +2227,6 @@ union cvmx_gmxx_tx_spi_thresh {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
- struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
- struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
- struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
};
union cvmx_gmxx_tx_xaui_ctl {
@@ -6889,43 +2254,6 @@ union cvmx_gmxx_tx_xaui_ctl {
uint64_t reserved_11_63:53;
#endif
} s;
- struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
- struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
- struct cvmx_gmxx_tx_xaui_ctl_s cn61xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn63xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn63xxp1;
- struct cvmx_gmxx_tx_xaui_ctl_s cn66xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn68xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn68xxp1;
- struct cvmx_gmxx_tx_xaui_ctl_s cnf71xx;
-};
-
-union cvmx_gmxx_xaui_ext_loopback {
- uint64_t u64;
- struct cvmx_gmxx_xaui_ext_loopback_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_5_63:59;
- uint64_t en:1;
- uint64_t thresh:4;
-#else
- uint64_t thresh:4;
- uint64_t en:1;
- uint64_t reserved_5_63:59;
-#endif
- } s;
- struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
- struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
- struct cvmx_gmxx_xaui_ext_loopback_s cn61xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn63xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn63xxp1;
- struct cvmx_gmxx_xaui_ext_loopback_s cn66xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn68xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn68xxp1;
- struct cvmx_gmxx_xaui_ext_loopback_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
index 8123b8209369..5420fa667a9c 100644
--- a/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-gpio-defs.h
@@ -90,10 +90,6 @@ union cvmx_gpio_bit_cfgx {
uint64_t reserved_12_63:52;
#endif
} cn30xx;
- struct cvmx_gpio_bit_cfgx_cn30xx cn31xx;
- struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
- struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
- struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
struct cvmx_gpio_bit_cfgx_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_15_63:49;
@@ -117,20 +113,6 @@ union cvmx_gpio_bit_cfgx {
uint64_t reserved_15_63:49;
#endif
} cn52xx;
- struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1;
- struct cvmx_gpio_bit_cfgx_cn52xx cn56xx;
- struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1;
- struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
- struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
- struct cvmx_gpio_bit_cfgx_s cn61xx;
- struct cvmx_gpio_bit_cfgx_s cn63xx;
- struct cvmx_gpio_bit_cfgx_s cn63xxp1;
- struct cvmx_gpio_bit_cfgx_s cn66xx;
- struct cvmx_gpio_bit_cfgx_s cn68xx;
- struct cvmx_gpio_bit_cfgx_s cn68xxp1;
- struct cvmx_gpio_bit_cfgx_s cn70xx;
- struct cvmx_gpio_bit_cfgx_s cn73xx;
- struct cvmx_gpio_bit_cfgx_s cnf71xx;
};
union cvmx_gpio_boot_ena {
@@ -146,9 +128,6 @@ union cvmx_gpio_boot_ena {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_gpio_boot_ena_s cn30xx;
- struct cvmx_gpio_boot_ena_s cn31xx;
- struct cvmx_gpio_boot_ena_s cn50xx;
};
union cvmx_gpio_clk_genx {
@@ -162,17 +141,6 @@ union cvmx_gpio_clk_genx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_gpio_clk_genx_s cn52xx;
- struct cvmx_gpio_clk_genx_s cn52xxp1;
- struct cvmx_gpio_clk_genx_s cn56xx;
- struct cvmx_gpio_clk_genx_s cn56xxp1;
- struct cvmx_gpio_clk_genx_s cn61xx;
- struct cvmx_gpio_clk_genx_s cn63xx;
- struct cvmx_gpio_clk_genx_s cn63xxp1;
- struct cvmx_gpio_clk_genx_s cn66xx;
- struct cvmx_gpio_clk_genx_s cn68xx;
- struct cvmx_gpio_clk_genx_s cn68xxp1;
- struct cvmx_gpio_clk_genx_s cnf71xx;
};
union cvmx_gpio_clk_qlmx {
@@ -218,11 +186,6 @@ union cvmx_gpio_clk_qlmx {
uint64_t reserved_3_63:61;
#endif
} cn63xx;
- struct cvmx_gpio_clk_qlmx_cn63xx cn63xxp1;
- struct cvmx_gpio_clk_qlmx_cn61xx cn66xx;
- struct cvmx_gpio_clk_qlmx_s cn68xx;
- struct cvmx_gpio_clk_qlmx_s cn68xxp1;
- struct cvmx_gpio_clk_qlmx_cn61xx cnf71xx;
};
union cvmx_gpio_dbg_ena {
@@ -236,9 +199,6 @@ union cvmx_gpio_dbg_ena {
uint64_t reserved_21_63:43;
#endif
} s;
- struct cvmx_gpio_dbg_ena_s cn30xx;
- struct cvmx_gpio_dbg_ena_s cn31xx;
- struct cvmx_gpio_dbg_ena_s cn50xx;
};
union cvmx_gpio_int_clr {
@@ -252,24 +212,6 @@ union cvmx_gpio_int_clr {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_gpio_int_clr_s cn30xx;
- struct cvmx_gpio_int_clr_s cn31xx;
- struct cvmx_gpio_int_clr_s cn38xx;
- struct cvmx_gpio_int_clr_s cn38xxp2;
- struct cvmx_gpio_int_clr_s cn50xx;
- struct cvmx_gpio_int_clr_s cn52xx;
- struct cvmx_gpio_int_clr_s cn52xxp1;
- struct cvmx_gpio_int_clr_s cn56xx;
- struct cvmx_gpio_int_clr_s cn56xxp1;
- struct cvmx_gpio_int_clr_s cn58xx;
- struct cvmx_gpio_int_clr_s cn58xxp1;
- struct cvmx_gpio_int_clr_s cn61xx;
- struct cvmx_gpio_int_clr_s cn63xx;
- struct cvmx_gpio_int_clr_s cn63xxp1;
- struct cvmx_gpio_int_clr_s cn66xx;
- struct cvmx_gpio_int_clr_s cn68xx;
- struct cvmx_gpio_int_clr_s cn68xxp1;
- struct cvmx_gpio_int_clr_s cnf71xx;
};
union cvmx_gpio_multi_cast {
@@ -283,8 +225,6 @@ union cvmx_gpio_multi_cast {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_gpio_multi_cast_s cn61xx;
- struct cvmx_gpio_multi_cast_s cnf71xx;
};
union cvmx_gpio_pin_ena {
@@ -302,7 +242,6 @@ union cvmx_gpio_pin_ena {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_gpio_pin_ena_s cn66xx;
};
union cvmx_gpio_rx_dat {
@@ -316,8 +255,6 @@ union cvmx_gpio_rx_dat {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_gpio_rx_dat_s cn30xx;
- struct cvmx_gpio_rx_dat_s cn31xx;
struct cvmx_gpio_rx_dat_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
@@ -327,14 +264,6 @@ union cvmx_gpio_rx_dat {
uint64_t reserved_16_63:48;
#endif
} cn38xx;
- struct cvmx_gpio_rx_dat_cn38xx cn38xxp2;
- struct cvmx_gpio_rx_dat_s cn50xx;
- struct cvmx_gpio_rx_dat_cn38xx cn52xx;
- struct cvmx_gpio_rx_dat_cn38xx cn52xxp1;
- struct cvmx_gpio_rx_dat_cn38xx cn56xx;
- struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
- struct cvmx_gpio_rx_dat_cn38xx cn58xx;
- struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
struct cvmx_gpio_rx_dat_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -344,12 +273,6 @@ union cvmx_gpio_rx_dat {
uint64_t reserved_20_63:44;
#endif
} cn61xx;
- struct cvmx_gpio_rx_dat_cn38xx cn63xx;
- struct cvmx_gpio_rx_dat_cn38xx cn63xxp1;
- struct cvmx_gpio_rx_dat_cn61xx cn66xx;
- struct cvmx_gpio_rx_dat_cn38xx cn68xx;
- struct cvmx_gpio_rx_dat_cn38xx cn68xxp1;
- struct cvmx_gpio_rx_dat_cn61xx cnf71xx;
};
union cvmx_gpio_tim_ctl {
@@ -363,8 +286,6 @@ union cvmx_gpio_tim_ctl {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_gpio_tim_ctl_s cn68xx;
- struct cvmx_gpio_tim_ctl_s cn68xxp1;
};
union cvmx_gpio_tx_clr {
@@ -378,8 +299,6 @@ union cvmx_gpio_tx_clr {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_gpio_tx_clr_s cn30xx;
- struct cvmx_gpio_tx_clr_s cn31xx;
struct cvmx_gpio_tx_clr_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
@@ -389,14 +308,6 @@ union cvmx_gpio_tx_clr {
uint64_t reserved_16_63:48;
#endif
} cn38xx;
- struct cvmx_gpio_tx_clr_cn38xx cn38xxp2;
- struct cvmx_gpio_tx_clr_s cn50xx;
- struct cvmx_gpio_tx_clr_cn38xx cn52xx;
- struct cvmx_gpio_tx_clr_cn38xx cn52xxp1;
- struct cvmx_gpio_tx_clr_cn38xx cn56xx;
- struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
- struct cvmx_gpio_tx_clr_cn38xx cn58xx;
- struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
struct cvmx_gpio_tx_clr_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -406,12 +317,6 @@ union cvmx_gpio_tx_clr {
uint64_t reserved_20_63:44;
#endif
} cn61xx;
- struct cvmx_gpio_tx_clr_cn38xx cn63xx;
- struct cvmx_gpio_tx_clr_cn38xx cn63xxp1;
- struct cvmx_gpio_tx_clr_cn61xx cn66xx;
- struct cvmx_gpio_tx_clr_cn38xx cn68xx;
- struct cvmx_gpio_tx_clr_cn38xx cn68xxp1;
- struct cvmx_gpio_tx_clr_cn61xx cnf71xx;
};
union cvmx_gpio_tx_set {
@@ -425,8 +330,6 @@ union cvmx_gpio_tx_set {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_gpio_tx_set_s cn30xx;
- struct cvmx_gpio_tx_set_s cn31xx;
struct cvmx_gpio_tx_set_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
@@ -436,14 +339,6 @@ union cvmx_gpio_tx_set {
uint64_t reserved_16_63:48;
#endif
} cn38xx;
- struct cvmx_gpio_tx_set_cn38xx cn38xxp2;
- struct cvmx_gpio_tx_set_s cn50xx;
- struct cvmx_gpio_tx_set_cn38xx cn52xx;
- struct cvmx_gpio_tx_set_cn38xx cn52xxp1;
- struct cvmx_gpio_tx_set_cn38xx cn56xx;
- struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
- struct cvmx_gpio_tx_set_cn38xx cn58xx;
- struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
struct cvmx_gpio_tx_set_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -453,12 +348,6 @@ union cvmx_gpio_tx_set {
uint64_t reserved_20_63:44;
#endif
} cn61xx;
- struct cvmx_gpio_tx_set_cn38xx cn63xx;
- struct cvmx_gpio_tx_set_cn38xx cn63xxp1;
- struct cvmx_gpio_tx_set_cn61xx cn66xx;
- struct cvmx_gpio_tx_set_cn38xx cn68xx;
- struct cvmx_gpio_tx_set_cn38xx cn68xxp1;
- struct cvmx_gpio_tx_set_cn61xx cnf71xx;
};
union cvmx_gpio_xbit_cfgx {
@@ -505,11 +394,6 @@ union cvmx_gpio_xbit_cfgx {
uint64_t reserved_12_63:52;
#endif
} cn30xx;
- struct cvmx_gpio_xbit_cfgx_cn30xx cn31xx;
- struct cvmx_gpio_xbit_cfgx_cn30xx cn50xx;
- struct cvmx_gpio_xbit_cfgx_s cn61xx;
- struct cvmx_gpio_xbit_cfgx_s cn66xx;
- struct cvmx_gpio_xbit_cfgx_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
index f7a95d7de140..ac42b5066bd9 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -90,21 +90,4 @@ extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
extern int __cvmx_helper_rgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
index 63fd21335e4b..3a54dea58c0a 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
@@ -84,21 +84,4 @@ extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
extern int __cvmx_helper_sgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-util.h b/arch/mips/include/asm/octeon/cvmx-helper-util.h
index f446f212bbd4..e9a97e7ee604 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-util.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-util.h
@@ -45,29 +45,6 @@ extern const char
*cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
/**
- * Debug routine to dump the packet structure to the console
- *
- * @work: Work queue entry containing the packet to dump
- * Returns
- */
-extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
-
-/**
- * Setup Random Early Drop on a specific input queue
- *
- * @queue: Input queue to setup RED on (0-7)
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
- int drop_thresh);
-
-/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
index f8ce53f6f28f..51f45b495680 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
@@ -84,20 +84,4 @@ extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
extern int __cvmx_helper_xaui_link_set(int ipd_port,
cvmx_helper_link_info_t link_info);
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h
index 0ed87cb67e7f..ba0e76f578e0 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper.h
@@ -71,26 +71,6 @@ typedef union {
#include <asm/octeon/cvmx-helper-xaui.h>
/**
- * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
- * priorities[16]) is a function pointer. It is meant to allow
- * customization of the PKO queue priorities based on the port
- * number. Users should set this pointer to a function before
- * calling any cvmx-helper operations.
- */
-extern void (*cvmx_override_pko_queue_priority) (int pko_port,
- uint64_t priorities[16]);
-
-/**
- * cvmx_override_ipd_port_setup(int ipd_port) is a function
- * pointer. It is meant to allow customization of the IPD port
- * setup before packet input/output comes online. It is called
- * after cvmx-helper does the default IPD configuration, but
- * before IPD is enabled. Users should set this pointer to a
- * function before calling any cvmx-helper operations.
- */
-extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
-
-/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
* IPD. This should be called by the user program after any additional
@@ -195,20 +175,4 @@ extern int cvmx_helper_link_set(int ipd_port,
extern int cvmx_helper_interface_probe(int interface);
extern int cvmx_helper_interface_enumerate(int interface);
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
- int enable_external);
-
#endif /* __CVMX_HELPER_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-iob-defs.h b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
index 7936f816e93e..989b67bbac5b 100644
--- a/arch/mips/include/asm/octeon/cvmx-iob-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-iob-defs.h
@@ -119,16 +119,6 @@ union cvmx_iob_bist_status {
uint64_t reserved_18_63:46;
#endif
} cn30xx;
- struct cvmx_iob_bist_status_cn30xx cn31xx;
- struct cvmx_iob_bist_status_cn30xx cn38xx;
- struct cvmx_iob_bist_status_cn30xx cn38xxp2;
- struct cvmx_iob_bist_status_cn30xx cn50xx;
- struct cvmx_iob_bist_status_cn30xx cn52xx;
- struct cvmx_iob_bist_status_cn30xx cn52xxp1;
- struct cvmx_iob_bist_status_cn30xx cn56xx;
- struct cvmx_iob_bist_status_cn30xx cn56xxp1;
- struct cvmx_iob_bist_status_cn30xx cn58xx;
- struct cvmx_iob_bist_status_cn30xx cn58xxp1;
struct cvmx_iob_bist_status_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_23_63:41;
@@ -182,9 +172,6 @@ union cvmx_iob_bist_status {
uint64_t reserved_23_63:41;
#endif
} cn61xx;
- struct cvmx_iob_bist_status_cn61xx cn63xx;
- struct cvmx_iob_bist_status_cn61xx cn63xxp1;
- struct cvmx_iob_bist_status_cn61xx cn66xx;
struct cvmx_iob_bist_status_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
@@ -228,8 +215,6 @@ union cvmx_iob_bist_status {
uint64_t reserved_18_63:46;
#endif
} cn68xx;
- struct cvmx_iob_bist_status_cn68xx cn68xxp1;
- struct cvmx_iob_bist_status_cn61xx cnf71xx;
};
union cvmx_iob_ctl_status {
@@ -274,10 +259,6 @@ union cvmx_iob_ctl_status {
uint64_t reserved_5_63:59;
#endif
} cn30xx;
- struct cvmx_iob_ctl_status_cn30xx cn31xx;
- struct cvmx_iob_ctl_status_cn30xx cn38xx;
- struct cvmx_iob_ctl_status_cn30xx cn38xxp2;
- struct cvmx_iob_ctl_status_cn30xx cn50xx;
struct cvmx_iob_ctl_status_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
@@ -297,11 +278,6 @@ union cvmx_iob_ctl_status {
uint64_t reserved_6_63:58;
#endif
} cn52xx;
- struct cvmx_iob_ctl_status_cn30xx cn52xxp1;
- struct cvmx_iob_ctl_status_cn30xx cn56xx;
- struct cvmx_iob_ctl_status_cn30xx cn56xxp1;
- struct cvmx_iob_ctl_status_cn30xx cn58xx;
- struct cvmx_iob_ctl_status_cn30xx cn58xxp1;
struct cvmx_iob_ctl_status_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
@@ -346,8 +322,6 @@ union cvmx_iob_ctl_status {
uint64_t reserved_10_63:54;
#endif
} cn63xx;
- struct cvmx_iob_ctl_status_cn63xx cn63xxp1;
- struct cvmx_iob_ctl_status_cn61xx cn66xx;
struct cvmx_iob_ctl_status_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
@@ -371,8 +345,6 @@ union cvmx_iob_ctl_status {
uint64_t reserved_11_63:53;
#endif
} cn68xx;
- struct cvmx_iob_ctl_status_cn68xx cn68xxp1;
- struct cvmx_iob_ctl_status_cn61xx cnf71xx;
};
union cvmx_iob_dwb_pri_cnt {
@@ -388,19 +360,6 @@ union cvmx_iob_dwb_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_dwb_pri_cnt_s cn38xx;
- struct cvmx_iob_dwb_pri_cnt_s cn38xxp2;
- struct cvmx_iob_dwb_pri_cnt_s cn52xx;
- struct cvmx_iob_dwb_pri_cnt_s cn52xxp1;
- struct cvmx_iob_dwb_pri_cnt_s cn56xx;
- struct cvmx_iob_dwb_pri_cnt_s cn56xxp1;
- struct cvmx_iob_dwb_pri_cnt_s cn58xx;
- struct cvmx_iob_dwb_pri_cnt_s cn58xxp1;
- struct cvmx_iob_dwb_pri_cnt_s cn61xx;
- struct cvmx_iob_dwb_pri_cnt_s cn63xx;
- struct cvmx_iob_dwb_pri_cnt_s cn63xxp1;
- struct cvmx_iob_dwb_pri_cnt_s cn66xx;
- struct cvmx_iob_dwb_pri_cnt_s cnf71xx;
};
union cvmx_iob_fau_timeout {
@@ -416,24 +375,6 @@ union cvmx_iob_fau_timeout {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_iob_fau_timeout_s cn30xx;
- struct cvmx_iob_fau_timeout_s cn31xx;
- struct cvmx_iob_fau_timeout_s cn38xx;
- struct cvmx_iob_fau_timeout_s cn38xxp2;
- struct cvmx_iob_fau_timeout_s cn50xx;
- struct cvmx_iob_fau_timeout_s cn52xx;
- struct cvmx_iob_fau_timeout_s cn52xxp1;
- struct cvmx_iob_fau_timeout_s cn56xx;
- struct cvmx_iob_fau_timeout_s cn56xxp1;
- struct cvmx_iob_fau_timeout_s cn58xx;
- struct cvmx_iob_fau_timeout_s cn58xxp1;
- struct cvmx_iob_fau_timeout_s cn61xx;
- struct cvmx_iob_fau_timeout_s cn63xx;
- struct cvmx_iob_fau_timeout_s cn63xxp1;
- struct cvmx_iob_fau_timeout_s cn66xx;
- struct cvmx_iob_fau_timeout_s cn68xx;
- struct cvmx_iob_fau_timeout_s cn68xxp1;
- struct cvmx_iob_fau_timeout_s cnf71xx;
};
union cvmx_iob_i2c_pri_cnt {
@@ -449,19 +390,6 @@ union cvmx_iob_i2c_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_i2c_pri_cnt_s cn38xx;
- struct cvmx_iob_i2c_pri_cnt_s cn38xxp2;
- struct cvmx_iob_i2c_pri_cnt_s cn52xx;
- struct cvmx_iob_i2c_pri_cnt_s cn52xxp1;
- struct cvmx_iob_i2c_pri_cnt_s cn56xx;
- struct cvmx_iob_i2c_pri_cnt_s cn56xxp1;
- struct cvmx_iob_i2c_pri_cnt_s cn58xx;
- struct cvmx_iob_i2c_pri_cnt_s cn58xxp1;
- struct cvmx_iob_i2c_pri_cnt_s cn61xx;
- struct cvmx_iob_i2c_pri_cnt_s cn63xx;
- struct cvmx_iob_i2c_pri_cnt_s cn63xxp1;
- struct cvmx_iob_i2c_pri_cnt_s cn66xx;
- struct cvmx_iob_i2c_pri_cnt_s cnf71xx;
};
union cvmx_iob_inb_control_match {
@@ -481,24 +409,6 @@ union cvmx_iob_inb_control_match {
uint64_t reserved_29_63:35;
#endif
} s;
- struct cvmx_iob_inb_control_match_s cn30xx;
- struct cvmx_iob_inb_control_match_s cn31xx;
- struct cvmx_iob_inb_control_match_s cn38xx;
- struct cvmx_iob_inb_control_match_s cn38xxp2;
- struct cvmx_iob_inb_control_match_s cn50xx;
- struct cvmx_iob_inb_control_match_s cn52xx;
- struct cvmx_iob_inb_control_match_s cn52xxp1;
- struct cvmx_iob_inb_control_match_s cn56xx;
- struct cvmx_iob_inb_control_match_s cn56xxp1;
- struct cvmx_iob_inb_control_match_s cn58xx;
- struct cvmx_iob_inb_control_match_s cn58xxp1;
- struct cvmx_iob_inb_control_match_s cn61xx;
- struct cvmx_iob_inb_control_match_s cn63xx;
- struct cvmx_iob_inb_control_match_s cn63xxp1;
- struct cvmx_iob_inb_control_match_s cn66xx;
- struct cvmx_iob_inb_control_match_s cn68xx;
- struct cvmx_iob_inb_control_match_s cn68xxp1;
- struct cvmx_iob_inb_control_match_s cnf71xx;
};
union cvmx_iob_inb_control_match_enb {
@@ -518,24 +428,6 @@ union cvmx_iob_inb_control_match_enb {
uint64_t reserved_29_63:35;
#endif
} s;
- struct cvmx_iob_inb_control_match_enb_s cn30xx;
- struct cvmx_iob_inb_control_match_enb_s cn31xx;
- struct cvmx_iob_inb_control_match_enb_s cn38xx;
- struct cvmx_iob_inb_control_match_enb_s cn38xxp2;
- struct cvmx_iob_inb_control_match_enb_s cn50xx;
- struct cvmx_iob_inb_control_match_enb_s cn52xx;
- struct cvmx_iob_inb_control_match_enb_s cn52xxp1;
- struct cvmx_iob_inb_control_match_enb_s cn56xx;
- struct cvmx_iob_inb_control_match_enb_s cn56xxp1;
- struct cvmx_iob_inb_control_match_enb_s cn58xx;
- struct cvmx_iob_inb_control_match_enb_s cn58xxp1;
- struct cvmx_iob_inb_control_match_enb_s cn61xx;
- struct cvmx_iob_inb_control_match_enb_s cn63xx;
- struct cvmx_iob_inb_control_match_enb_s cn63xxp1;
- struct cvmx_iob_inb_control_match_enb_s cn66xx;
- struct cvmx_iob_inb_control_match_enb_s cn68xx;
- struct cvmx_iob_inb_control_match_enb_s cn68xxp1;
- struct cvmx_iob_inb_control_match_enb_s cnf71xx;
};
union cvmx_iob_inb_data_match {
@@ -547,24 +439,6 @@ union cvmx_iob_inb_data_match {
uint64_t data:64;
#endif
} s;
- struct cvmx_iob_inb_data_match_s cn30xx;
- struct cvmx_iob_inb_data_match_s cn31xx;
- struct cvmx_iob_inb_data_match_s cn38xx;
- struct cvmx_iob_inb_data_match_s cn38xxp2;
- struct cvmx_iob_inb_data_match_s cn50xx;
- struct cvmx_iob_inb_data_match_s cn52xx;
- struct cvmx_iob_inb_data_match_s cn52xxp1;
- struct cvmx_iob_inb_data_match_s cn56xx;
- struct cvmx_iob_inb_data_match_s cn56xxp1;
- struct cvmx_iob_inb_data_match_s cn58xx;
- struct cvmx_iob_inb_data_match_s cn58xxp1;
- struct cvmx_iob_inb_data_match_s cn61xx;
- struct cvmx_iob_inb_data_match_s cn63xx;
- struct cvmx_iob_inb_data_match_s cn63xxp1;
- struct cvmx_iob_inb_data_match_s cn66xx;
- struct cvmx_iob_inb_data_match_s cn68xx;
- struct cvmx_iob_inb_data_match_s cn68xxp1;
- struct cvmx_iob_inb_data_match_s cnf71xx;
};
union cvmx_iob_inb_data_match_enb {
@@ -576,24 +450,6 @@ union cvmx_iob_inb_data_match_enb {
uint64_t data:64;
#endif
} s;
- struct cvmx_iob_inb_data_match_enb_s cn30xx;
- struct cvmx_iob_inb_data_match_enb_s cn31xx;
- struct cvmx_iob_inb_data_match_enb_s cn38xx;
- struct cvmx_iob_inb_data_match_enb_s cn38xxp2;
- struct cvmx_iob_inb_data_match_enb_s cn50xx;
- struct cvmx_iob_inb_data_match_enb_s cn52xx;
- struct cvmx_iob_inb_data_match_enb_s cn52xxp1;
- struct cvmx_iob_inb_data_match_enb_s cn56xx;
- struct cvmx_iob_inb_data_match_enb_s cn56xxp1;
- struct cvmx_iob_inb_data_match_enb_s cn58xx;
- struct cvmx_iob_inb_data_match_enb_s cn58xxp1;
- struct cvmx_iob_inb_data_match_enb_s cn61xx;
- struct cvmx_iob_inb_data_match_enb_s cn63xx;
- struct cvmx_iob_inb_data_match_enb_s cn63xxp1;
- struct cvmx_iob_inb_data_match_enb_s cn66xx;
- struct cvmx_iob_inb_data_match_enb_s cn68xx;
- struct cvmx_iob_inb_data_match_enb_s cn68xxp1;
- struct cvmx_iob_inb_data_match_enb_s cnf71xx;
};
union cvmx_iob_int_enb {
@@ -632,20 +488,6 @@ union cvmx_iob_int_enb {
uint64_t reserved_4_63:60;
#endif
} cn30xx;
- struct cvmx_iob_int_enb_cn30xx cn31xx;
- struct cvmx_iob_int_enb_cn30xx cn38xx;
- struct cvmx_iob_int_enb_cn30xx cn38xxp2;
- struct cvmx_iob_int_enb_s cn50xx;
- struct cvmx_iob_int_enb_s cn52xx;
- struct cvmx_iob_int_enb_s cn52xxp1;
- struct cvmx_iob_int_enb_s cn56xx;
- struct cvmx_iob_int_enb_s cn56xxp1;
- struct cvmx_iob_int_enb_s cn58xx;
- struct cvmx_iob_int_enb_s cn58xxp1;
- struct cvmx_iob_int_enb_s cn61xx;
- struct cvmx_iob_int_enb_s cn63xx;
- struct cvmx_iob_int_enb_s cn63xxp1;
- struct cvmx_iob_int_enb_s cn66xx;
struct cvmx_iob_int_enb_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
@@ -653,8 +495,6 @@ union cvmx_iob_int_enb {
uint64_t reserved_0_63:64;
#endif
} cn68xx;
- struct cvmx_iob_int_enb_cn68xx cn68xxp1;
- struct cvmx_iob_int_enb_s cnf71xx;
};
union cvmx_iob_int_sum {
@@ -693,20 +533,6 @@ union cvmx_iob_int_sum {
uint64_t reserved_4_63:60;
#endif
} cn30xx;
- struct cvmx_iob_int_sum_cn30xx cn31xx;
- struct cvmx_iob_int_sum_cn30xx cn38xx;
- struct cvmx_iob_int_sum_cn30xx cn38xxp2;
- struct cvmx_iob_int_sum_s cn50xx;
- struct cvmx_iob_int_sum_s cn52xx;
- struct cvmx_iob_int_sum_s cn52xxp1;
- struct cvmx_iob_int_sum_s cn56xx;
- struct cvmx_iob_int_sum_s cn56xxp1;
- struct cvmx_iob_int_sum_s cn58xx;
- struct cvmx_iob_int_sum_s cn58xxp1;
- struct cvmx_iob_int_sum_s cn61xx;
- struct cvmx_iob_int_sum_s cn63xx;
- struct cvmx_iob_int_sum_s cn63xxp1;
- struct cvmx_iob_int_sum_s cn66xx;
struct cvmx_iob_int_sum_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_0_63:64;
@@ -714,8 +540,6 @@ union cvmx_iob_int_sum {
uint64_t reserved_0_63:64;
#endif
} cn68xx;
- struct cvmx_iob_int_sum_cn68xx cn68xxp1;
- struct cvmx_iob_int_sum_s cnf71xx;
};
union cvmx_iob_n2c_l2c_pri_cnt {
@@ -731,19 +555,6 @@ union cvmx_iob_n2c_l2c_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn38xxp2;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn52xxp1;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn56xxp1;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn58xxp1;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn61xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn63xxp1;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cn66xx;
- struct cvmx_iob_n2c_l2c_pri_cnt_s cnf71xx;
};
union cvmx_iob_n2c_rsp_pri_cnt {
@@ -759,19 +570,6 @@ union cvmx_iob_n2c_rsp_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn38xxp2;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn52xxp1;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn56xxp1;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn58xxp1;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn61xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn63xxp1;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cn66xx;
- struct cvmx_iob_n2c_rsp_pri_cnt_s cnf71xx;
};
union cvmx_iob_outb_com_pri_cnt {
@@ -787,21 +585,6 @@ union cvmx_iob_outb_com_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_outb_com_pri_cnt_s cn38xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn38xxp2;
- struct cvmx_iob_outb_com_pri_cnt_s cn52xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn52xxp1;
- struct cvmx_iob_outb_com_pri_cnt_s cn56xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn56xxp1;
- struct cvmx_iob_outb_com_pri_cnt_s cn58xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn58xxp1;
- struct cvmx_iob_outb_com_pri_cnt_s cn61xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn63xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn63xxp1;
- struct cvmx_iob_outb_com_pri_cnt_s cn66xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn68xx;
- struct cvmx_iob_outb_com_pri_cnt_s cn68xxp1;
- struct cvmx_iob_outb_com_pri_cnt_s cnf71xx;
};
union cvmx_iob_outb_control_match {
@@ -821,24 +604,6 @@ union cvmx_iob_outb_control_match {
uint64_t reserved_26_63:38;
#endif
} s;
- struct cvmx_iob_outb_control_match_s cn30xx;
- struct cvmx_iob_outb_control_match_s cn31xx;
- struct cvmx_iob_outb_control_match_s cn38xx;
- struct cvmx_iob_outb_control_match_s cn38xxp2;
- struct cvmx_iob_outb_control_match_s cn50xx;
- struct cvmx_iob_outb_control_match_s cn52xx;
- struct cvmx_iob_outb_control_match_s cn52xxp1;
- struct cvmx_iob_outb_control_match_s cn56xx;
- struct cvmx_iob_outb_control_match_s cn56xxp1;
- struct cvmx_iob_outb_control_match_s cn58xx;
- struct cvmx_iob_outb_control_match_s cn58xxp1;
- struct cvmx_iob_outb_control_match_s cn61xx;
- struct cvmx_iob_outb_control_match_s cn63xx;
- struct cvmx_iob_outb_control_match_s cn63xxp1;
- struct cvmx_iob_outb_control_match_s cn66xx;
- struct cvmx_iob_outb_control_match_s cn68xx;
- struct cvmx_iob_outb_control_match_s cn68xxp1;
- struct cvmx_iob_outb_control_match_s cnf71xx;
};
union cvmx_iob_outb_control_match_enb {
@@ -858,24 +623,6 @@ union cvmx_iob_outb_control_match_enb {
uint64_t reserved_26_63:38;
#endif
} s;
- struct cvmx_iob_outb_control_match_enb_s cn30xx;
- struct cvmx_iob_outb_control_match_enb_s cn31xx;
- struct cvmx_iob_outb_control_match_enb_s cn38xx;
- struct cvmx_iob_outb_control_match_enb_s cn38xxp2;
- struct cvmx_iob_outb_control_match_enb_s cn50xx;
- struct cvmx_iob_outb_control_match_enb_s cn52xx;
- struct cvmx_iob_outb_control_match_enb_s cn52xxp1;
- struct cvmx_iob_outb_control_match_enb_s cn56xx;
- struct cvmx_iob_outb_control_match_enb_s cn56xxp1;
- struct cvmx_iob_outb_control_match_enb_s cn58xx;
- struct cvmx_iob_outb_control_match_enb_s cn58xxp1;
- struct cvmx_iob_outb_control_match_enb_s cn61xx;
- struct cvmx_iob_outb_control_match_enb_s cn63xx;
- struct cvmx_iob_outb_control_match_enb_s cn63xxp1;
- struct cvmx_iob_outb_control_match_enb_s cn66xx;
- struct cvmx_iob_outb_control_match_enb_s cn68xx;
- struct cvmx_iob_outb_control_match_enb_s cn68xxp1;
- struct cvmx_iob_outb_control_match_enb_s cnf71xx;
};
union cvmx_iob_outb_data_match {
@@ -887,24 +634,6 @@ union cvmx_iob_outb_data_match {
uint64_t data:64;
#endif
} s;
- struct cvmx_iob_outb_data_match_s cn30xx;
- struct cvmx_iob_outb_data_match_s cn31xx;
- struct cvmx_iob_outb_data_match_s cn38xx;
- struct cvmx_iob_outb_data_match_s cn38xxp2;
- struct cvmx_iob_outb_data_match_s cn50xx;
- struct cvmx_iob_outb_data_match_s cn52xx;
- struct cvmx_iob_outb_data_match_s cn52xxp1;
- struct cvmx_iob_outb_data_match_s cn56xx;
- struct cvmx_iob_outb_data_match_s cn56xxp1;
- struct cvmx_iob_outb_data_match_s cn58xx;
- struct cvmx_iob_outb_data_match_s cn58xxp1;
- struct cvmx_iob_outb_data_match_s cn61xx;
- struct cvmx_iob_outb_data_match_s cn63xx;
- struct cvmx_iob_outb_data_match_s cn63xxp1;
- struct cvmx_iob_outb_data_match_s cn66xx;
- struct cvmx_iob_outb_data_match_s cn68xx;
- struct cvmx_iob_outb_data_match_s cn68xxp1;
- struct cvmx_iob_outb_data_match_s cnf71xx;
};
union cvmx_iob_outb_data_match_enb {
@@ -916,24 +645,6 @@ union cvmx_iob_outb_data_match_enb {
uint64_t data:64;
#endif
} s;
- struct cvmx_iob_outb_data_match_enb_s cn30xx;
- struct cvmx_iob_outb_data_match_enb_s cn31xx;
- struct cvmx_iob_outb_data_match_enb_s cn38xx;
- struct cvmx_iob_outb_data_match_enb_s cn38xxp2;
- struct cvmx_iob_outb_data_match_enb_s cn50xx;
- struct cvmx_iob_outb_data_match_enb_s cn52xx;
- struct cvmx_iob_outb_data_match_enb_s cn52xxp1;
- struct cvmx_iob_outb_data_match_enb_s cn56xx;
- struct cvmx_iob_outb_data_match_enb_s cn56xxp1;
- struct cvmx_iob_outb_data_match_enb_s cn58xx;
- struct cvmx_iob_outb_data_match_enb_s cn58xxp1;
- struct cvmx_iob_outb_data_match_enb_s cn61xx;
- struct cvmx_iob_outb_data_match_enb_s cn63xx;
- struct cvmx_iob_outb_data_match_enb_s cn63xxp1;
- struct cvmx_iob_outb_data_match_enb_s cn66xx;
- struct cvmx_iob_outb_data_match_enb_s cn68xx;
- struct cvmx_iob_outb_data_match_enb_s cn68xxp1;
- struct cvmx_iob_outb_data_match_enb_s cnf71xx;
};
union cvmx_iob_outb_fpa_pri_cnt {
@@ -949,21 +660,6 @@ union cvmx_iob_outb_fpa_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn38xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn38xxp2;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn52xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn52xxp1;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn56xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn56xxp1;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn58xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn58xxp1;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn61xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn63xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn63xxp1;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn66xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn68xx;
- struct cvmx_iob_outb_fpa_pri_cnt_s cn68xxp1;
- struct cvmx_iob_outb_fpa_pri_cnt_s cnf71xx;
};
union cvmx_iob_outb_req_pri_cnt {
@@ -979,21 +675,6 @@ union cvmx_iob_outb_req_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_outb_req_pri_cnt_s cn38xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn38xxp2;
- struct cvmx_iob_outb_req_pri_cnt_s cn52xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn52xxp1;
- struct cvmx_iob_outb_req_pri_cnt_s cn56xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn56xxp1;
- struct cvmx_iob_outb_req_pri_cnt_s cn58xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn58xxp1;
- struct cvmx_iob_outb_req_pri_cnt_s cn61xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn63xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn63xxp1;
- struct cvmx_iob_outb_req_pri_cnt_s cn66xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn68xx;
- struct cvmx_iob_outb_req_pri_cnt_s cn68xxp1;
- struct cvmx_iob_outb_req_pri_cnt_s cnf71xx;
};
union cvmx_iob_p2c_req_pri_cnt {
@@ -1009,19 +690,6 @@ union cvmx_iob_p2c_req_pri_cnt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_iob_p2c_req_pri_cnt_s cn38xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cn38xxp2;
- struct cvmx_iob_p2c_req_pri_cnt_s cn52xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cn52xxp1;
- struct cvmx_iob_p2c_req_pri_cnt_s cn56xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cn56xxp1;
- struct cvmx_iob_p2c_req_pri_cnt_s cn58xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cn58xxp1;
- struct cvmx_iob_p2c_req_pri_cnt_s cn61xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cn63xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cn63xxp1;
- struct cvmx_iob_p2c_req_pri_cnt_s cn66xx;
- struct cvmx_iob_p2c_req_pri_cnt_s cnf71xx;
};
union cvmx_iob_pkt_err {
@@ -1046,21 +714,6 @@ union cvmx_iob_pkt_err {
uint64_t reserved_6_63:58;
#endif
} cn30xx;
- struct cvmx_iob_pkt_err_cn30xx cn31xx;
- struct cvmx_iob_pkt_err_cn30xx cn38xx;
- struct cvmx_iob_pkt_err_cn30xx cn38xxp2;
- struct cvmx_iob_pkt_err_cn30xx cn50xx;
- struct cvmx_iob_pkt_err_cn30xx cn52xx;
- struct cvmx_iob_pkt_err_cn30xx cn52xxp1;
- struct cvmx_iob_pkt_err_cn30xx cn56xx;
- struct cvmx_iob_pkt_err_cn30xx cn56xxp1;
- struct cvmx_iob_pkt_err_cn30xx cn58xx;
- struct cvmx_iob_pkt_err_cn30xx cn58xxp1;
- struct cvmx_iob_pkt_err_s cn61xx;
- struct cvmx_iob_pkt_err_s cn63xx;
- struct cvmx_iob_pkt_err_s cn63xxp1;
- struct cvmx_iob_pkt_err_s cn66xx;
- struct cvmx_iob_pkt_err_s cnf71xx;
};
union cvmx_iob_to_cmb_credits {
@@ -1089,10 +742,6 @@ union cvmx_iob_to_cmb_credits {
uint64_t reserved_9_63:55;
#endif
} cn52xx;
- struct cvmx_iob_to_cmb_credits_cn52xx cn61xx;
- struct cvmx_iob_to_cmb_credits_cn52xx cn63xx;
- struct cvmx_iob_to_cmb_credits_cn52xx cn63xxp1;
- struct cvmx_iob_to_cmb_credits_cn52xx cn66xx;
struct cvmx_iob_to_cmb_credits_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -1106,8 +755,6 @@ union cvmx_iob_to_cmb_credits {
uint64_t reserved_9_63:55;
#endif
} cn68xx;
- struct cvmx_iob_to_cmb_credits_cn68xx cn68xxp1;
- struct cvmx_iob_to_cmb_credits_cn52xx cnf71xx;
};
union cvmx_iob_to_ncb_did_00_credits {
@@ -1121,8 +768,6 @@ union cvmx_iob_to_ncb_did_00_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_00_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_00_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_111_credits {
@@ -1136,8 +781,6 @@ union cvmx_iob_to_ncb_did_111_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_111_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_111_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_223_credits {
@@ -1151,8 +794,6 @@ union cvmx_iob_to_ncb_did_223_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_223_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_223_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_24_credits {
@@ -1166,8 +807,6 @@ union cvmx_iob_to_ncb_did_24_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_24_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_24_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_32_credits {
@@ -1181,8 +820,6 @@ union cvmx_iob_to_ncb_did_32_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_32_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_32_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_40_credits {
@@ -1196,8 +833,6 @@ union cvmx_iob_to_ncb_did_40_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_40_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_40_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_55_credits {
@@ -1211,8 +846,6 @@ union cvmx_iob_to_ncb_did_55_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_55_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_55_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_64_credits {
@@ -1226,8 +859,6 @@ union cvmx_iob_to_ncb_did_64_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_64_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_64_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_79_credits {
@@ -1241,8 +872,6 @@ union cvmx_iob_to_ncb_did_79_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_79_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_79_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_96_credits {
@@ -1256,8 +885,6 @@ union cvmx_iob_to_ncb_did_96_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_96_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_96_credits_s cn68xxp1;
};
union cvmx_iob_to_ncb_did_98_credits {
@@ -1271,8 +898,6 @@ union cvmx_iob_to_ncb_did_98_credits {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_iob_to_ncb_did_98_credits_s cn68xx;
- struct cvmx_iob_to_ncb_did_98_credits_s cn68xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
index 1193f73bb74a..c0a4ac7b41fb 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd-defs.h
@@ -108,24 +108,6 @@ union cvmx_ipd_1st_mbuff_skip {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_ipd_1st_mbuff_skip_s cn30xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn31xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn38xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn38xxp2;
- struct cvmx_ipd_1st_mbuff_skip_s cn50xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn52xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn52xxp1;
- struct cvmx_ipd_1st_mbuff_skip_s cn56xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn56xxp1;
- struct cvmx_ipd_1st_mbuff_skip_s cn58xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn58xxp1;
- struct cvmx_ipd_1st_mbuff_skip_s cn61xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn63xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn63xxp1;
- struct cvmx_ipd_1st_mbuff_skip_s cn66xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn68xx;
- struct cvmx_ipd_1st_mbuff_skip_s cn68xxp1;
- struct cvmx_ipd_1st_mbuff_skip_s cnf71xx;
};
union cvmx_ipd_1st_next_ptr_back {
@@ -139,24 +121,6 @@ union cvmx_ipd_1st_next_ptr_back {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_ipd_1st_next_ptr_back_s cn30xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn31xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn38xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn38xxp2;
- struct cvmx_ipd_1st_next_ptr_back_s cn50xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn52xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn52xxp1;
- struct cvmx_ipd_1st_next_ptr_back_s cn56xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn56xxp1;
- struct cvmx_ipd_1st_next_ptr_back_s cn58xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn58xxp1;
- struct cvmx_ipd_1st_next_ptr_back_s cn61xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn63xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn63xxp1;
- struct cvmx_ipd_1st_next_ptr_back_s cn66xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn68xx;
- struct cvmx_ipd_1st_next_ptr_back_s cn68xxp1;
- struct cvmx_ipd_1st_next_ptr_back_s cnf71xx;
};
union cvmx_ipd_2nd_next_ptr_back {
@@ -170,24 +134,6 @@ union cvmx_ipd_2nd_next_ptr_back {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_ipd_2nd_next_ptr_back_s cn30xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn31xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn38xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn38xxp2;
- struct cvmx_ipd_2nd_next_ptr_back_s cn50xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn52xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn52xxp1;
- struct cvmx_ipd_2nd_next_ptr_back_s cn56xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn56xxp1;
- struct cvmx_ipd_2nd_next_ptr_back_s cn58xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn58xxp1;
- struct cvmx_ipd_2nd_next_ptr_back_s cn61xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn63xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn63xxp1;
- struct cvmx_ipd_2nd_next_ptr_back_s cn66xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn68xx;
- struct cvmx_ipd_2nd_next_ptr_back_s cn68xxp1;
- struct cvmx_ipd_2nd_next_ptr_back_s cnf71xx;
};
union cvmx_ipd_bist_status {
@@ -284,10 +230,6 @@ union cvmx_ipd_bist_status {
uint64_t reserved_16_63:48;
#endif
} cn30xx;
- struct cvmx_ipd_bist_status_cn30xx cn31xx;
- struct cvmx_ipd_bist_status_cn30xx cn38xx;
- struct cvmx_ipd_bist_status_cn30xx cn38xxp2;
- struct cvmx_ipd_bist_status_cn30xx cn50xx;
struct cvmx_ipd_bist_status_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63:46;
@@ -331,18 +273,6 @@ union cvmx_ipd_bist_status {
uint64_t reserved_18_63:46;
#endif
} cn52xx;
- struct cvmx_ipd_bist_status_cn52xx cn52xxp1;
- struct cvmx_ipd_bist_status_cn52xx cn56xx;
- struct cvmx_ipd_bist_status_cn52xx cn56xxp1;
- struct cvmx_ipd_bist_status_cn30xx cn58xx;
- struct cvmx_ipd_bist_status_cn30xx cn58xxp1;
- struct cvmx_ipd_bist_status_cn52xx cn61xx;
- struct cvmx_ipd_bist_status_cn52xx cn63xx;
- struct cvmx_ipd_bist_status_cn52xx cn63xxp1;
- struct cvmx_ipd_bist_status_cn52xx cn66xx;
- struct cvmx_ipd_bist_status_s cn68xx;
- struct cvmx_ipd_bist_status_s cn68xxp1;
- struct cvmx_ipd_bist_status_cn52xx cnf71xx;
};
union cvmx_ipd_bp_prt_red_end {
@@ -365,10 +295,6 @@ union cvmx_ipd_bp_prt_red_end {
uint64_t reserved_36_63:28;
#endif
} cn30xx;
- struct cvmx_ipd_bp_prt_red_end_cn30xx cn31xx;
- struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xx;
- struct cvmx_ipd_bp_prt_red_end_cn30xx cn38xxp2;
- struct cvmx_ipd_bp_prt_red_end_cn30xx cn50xx;
struct cvmx_ipd_bp_prt_red_end_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
@@ -378,12 +304,6 @@ union cvmx_ipd_bp_prt_red_end {
uint64_t reserved_40_63:24;
#endif
} cn52xx;
- struct cvmx_ipd_bp_prt_red_end_cn52xx cn52xxp1;
- struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xx;
- struct cvmx_ipd_bp_prt_red_end_cn52xx cn56xxp1;
- struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xx;
- struct cvmx_ipd_bp_prt_red_end_cn30xx cn58xxp1;
- struct cvmx_ipd_bp_prt_red_end_s cn61xx;
struct cvmx_ipd_bp_prt_red_end_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_44_63:20;
@@ -393,9 +313,6 @@ union cvmx_ipd_bp_prt_red_end {
uint64_t reserved_44_63:20;
#endif
} cn63xx;
- struct cvmx_ipd_bp_prt_red_end_cn63xx cn63xxp1;
- struct cvmx_ipd_bp_prt_red_end_s cn66xx;
- struct cvmx_ipd_bp_prt_red_end_s cnf71xx;
};
union cvmx_ipd_bpidx_mbuf_th {
@@ -411,8 +328,6 @@ union cvmx_ipd_bpidx_mbuf_th {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_ipd_bpidx_mbuf_th_s cn68xx;
- struct cvmx_ipd_bpidx_mbuf_th_s cn68xxp1;
};
union cvmx_ipd_bpid_bp_counterx {
@@ -426,8 +341,6 @@ union cvmx_ipd_bpid_bp_counterx {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_ipd_bpid_bp_counterx_s cn68xx;
- struct cvmx_ipd_bpid_bp_counterx_s cn68xxp1;
};
union cvmx_ipd_clk_count {
@@ -439,24 +352,6 @@ union cvmx_ipd_clk_count {
uint64_t clk_cnt:64;
#endif
} s;
- struct cvmx_ipd_clk_count_s cn30xx;
- struct cvmx_ipd_clk_count_s cn31xx;
- struct cvmx_ipd_clk_count_s cn38xx;
- struct cvmx_ipd_clk_count_s cn38xxp2;
- struct cvmx_ipd_clk_count_s cn50xx;
- struct cvmx_ipd_clk_count_s cn52xx;
- struct cvmx_ipd_clk_count_s cn52xxp1;
- struct cvmx_ipd_clk_count_s cn56xx;
- struct cvmx_ipd_clk_count_s cn56xxp1;
- struct cvmx_ipd_clk_count_s cn58xx;
- struct cvmx_ipd_clk_count_s cn58xxp1;
- struct cvmx_ipd_clk_count_s cn61xx;
- struct cvmx_ipd_clk_count_s cn63xx;
- struct cvmx_ipd_clk_count_s cn63xxp1;
- struct cvmx_ipd_clk_count_s cn66xx;
- struct cvmx_ipd_clk_count_s cn68xx;
- struct cvmx_ipd_clk_count_s cn68xxp1;
- struct cvmx_ipd_clk_count_s cnf71xx;
};
union cvmx_ipd_credits {
@@ -472,8 +367,6 @@ union cvmx_ipd_credits {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_ipd_credits_s cn68xx;
- struct cvmx_ipd_credits_s cn68xxp1;
};
union cvmx_ipd_ctl_status {
@@ -544,8 +437,6 @@ union cvmx_ipd_ctl_status {
uint64_t reserved_10_63:54;
#endif
} cn30xx;
- struct cvmx_ipd_ctl_status_cn30xx cn31xx;
- struct cvmx_ipd_ctl_status_cn30xx cn38xx;
struct cvmx_ipd_ctl_status_cn38xxp2 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -604,10 +495,6 @@ union cvmx_ipd_ctl_status {
uint64_t reserved_15_63:49;
#endif
} cn50xx;
- struct cvmx_ipd_ctl_status_cn50xx cn52xx;
- struct cvmx_ipd_ctl_status_cn50xx cn52xxp1;
- struct cvmx_ipd_ctl_status_cn50xx cn56xx;
- struct cvmx_ipd_ctl_status_cn50xx cn56xxp1;
struct cvmx_ipd_ctl_status_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -637,9 +524,6 @@ union cvmx_ipd_ctl_status {
uint64_t reserved_12_63:52;
#endif
} cn58xx;
- struct cvmx_ipd_ctl_status_cn58xx cn58xxp1;
- struct cvmx_ipd_ctl_status_s cn61xx;
- struct cvmx_ipd_ctl_status_s cn63xx;
struct cvmx_ipd_ctl_status_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63:48;
@@ -677,10 +561,6 @@ union cvmx_ipd_ctl_status {
uint64_t reserved_16_63:48;
#endif
} cn63xxp1;
- struct cvmx_ipd_ctl_status_s cn66xx;
- struct cvmx_ipd_ctl_status_s cn68xx;
- struct cvmx_ipd_ctl_status_s cn68xxp1;
- struct cvmx_ipd_ctl_status_s cnf71xx;
};
union cvmx_ipd_ecc_ctl {
@@ -700,8 +580,6 @@ union cvmx_ipd_ecc_ctl {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_ipd_ecc_ctl_s cn68xx;
- struct cvmx_ipd_ecc_ctl_s cn68xxp1;
};
union cvmx_ipd_free_ptr_fifo_ctl {
@@ -723,8 +601,6 @@ union cvmx_ipd_free_ptr_fifo_ctl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_ipd_free_ptr_fifo_ctl_s cn68xx;
- struct cvmx_ipd_free_ptr_fifo_ctl_s cn68xxp1;
};
union cvmx_ipd_free_ptr_value {
@@ -738,8 +614,6 @@ union cvmx_ipd_free_ptr_value {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_ipd_free_ptr_value_s cn68xx;
- struct cvmx_ipd_free_ptr_value_s cn68xxp1;
};
union cvmx_ipd_hold_ptr_fifo_ctl {
@@ -761,8 +635,6 @@ union cvmx_ipd_hold_ptr_fifo_ctl {
uint64_t reserved_43_63:21;
#endif
} s;
- struct cvmx_ipd_hold_ptr_fifo_ctl_s cn68xx;
- struct cvmx_ipd_hold_ptr_fifo_ctl_s cn68xxp1;
};
union cvmx_ipd_int_enb {
@@ -837,7 +709,6 @@ union cvmx_ipd_int_enb {
uint64_t reserved_5_63:59;
#endif
} cn30xx;
- struct cvmx_ipd_int_enb_cn30xx cn31xx;
struct cvmx_ipd_int_enb_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -865,8 +736,6 @@ union cvmx_ipd_int_enb {
uint64_t reserved_10_63:54;
#endif
} cn38xx;
- struct cvmx_ipd_int_enb_cn30xx cn38xxp2;
- struct cvmx_ipd_int_enb_cn38xx cn50xx;
struct cvmx_ipd_int_enb_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -898,18 +767,6 @@ union cvmx_ipd_int_enb {
uint64_t reserved_12_63:52;
#endif
} cn52xx;
- struct cvmx_ipd_int_enb_cn52xx cn52xxp1;
- struct cvmx_ipd_int_enb_cn52xx cn56xx;
- struct cvmx_ipd_int_enb_cn52xx cn56xxp1;
- struct cvmx_ipd_int_enb_cn38xx cn58xx;
- struct cvmx_ipd_int_enb_cn38xx cn58xxp1;
- struct cvmx_ipd_int_enb_cn52xx cn61xx;
- struct cvmx_ipd_int_enb_cn52xx cn63xx;
- struct cvmx_ipd_int_enb_cn52xx cn63xxp1;
- struct cvmx_ipd_int_enb_cn52xx cn66xx;
- struct cvmx_ipd_int_enb_s cn68xx;
- struct cvmx_ipd_int_enb_s cn68xxp1;
- struct cvmx_ipd_int_enb_cn52xx cnf71xx;
};
union cvmx_ipd_int_sum {
@@ -984,7 +841,6 @@ union cvmx_ipd_int_sum {
uint64_t reserved_5_63:59;
#endif
} cn30xx;
- struct cvmx_ipd_int_sum_cn30xx cn31xx;
struct cvmx_ipd_int_sum_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -1012,8 +868,6 @@ union cvmx_ipd_int_sum {
uint64_t reserved_10_63:54;
#endif
} cn38xx;
- struct cvmx_ipd_int_sum_cn30xx cn38xxp2;
- struct cvmx_ipd_int_sum_cn38xx cn50xx;
struct cvmx_ipd_int_sum_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -1045,18 +899,6 @@ union cvmx_ipd_int_sum {
uint64_t reserved_12_63:52;
#endif
} cn52xx;
- struct cvmx_ipd_int_sum_cn52xx cn52xxp1;
- struct cvmx_ipd_int_sum_cn52xx cn56xx;
- struct cvmx_ipd_int_sum_cn52xx cn56xxp1;
- struct cvmx_ipd_int_sum_cn38xx cn58xx;
- struct cvmx_ipd_int_sum_cn38xx cn58xxp1;
- struct cvmx_ipd_int_sum_cn52xx cn61xx;
- struct cvmx_ipd_int_sum_cn52xx cn63xx;
- struct cvmx_ipd_int_sum_cn52xx cn63xxp1;
- struct cvmx_ipd_int_sum_cn52xx cn66xx;
- struct cvmx_ipd_int_sum_s cn68xx;
- struct cvmx_ipd_int_sum_s cn68xxp1;
- struct cvmx_ipd_int_sum_cn52xx cnf71xx;
};
union cvmx_ipd_next_pkt_ptr {
@@ -1070,8 +912,6 @@ union cvmx_ipd_next_pkt_ptr {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_ipd_next_pkt_ptr_s cn68xx;
- struct cvmx_ipd_next_pkt_ptr_s cn68xxp1;
};
union cvmx_ipd_next_wqe_ptr {
@@ -1085,8 +925,6 @@ union cvmx_ipd_next_wqe_ptr {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_ipd_next_wqe_ptr_s cn68xx;
- struct cvmx_ipd_next_wqe_ptr_s cn68xxp1;
};
union cvmx_ipd_not_1st_mbuff_skip {
@@ -1100,24 +938,6 @@ union cvmx_ipd_not_1st_mbuff_skip {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn30xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn31xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn38xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn38xxp2;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn50xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn52xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn52xxp1;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn56xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn56xxp1;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn58xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn58xxp1;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn61xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn63xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn63xxp1;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn66xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn68xx;
- struct cvmx_ipd_not_1st_mbuff_skip_s cn68xxp1;
- struct cvmx_ipd_not_1st_mbuff_skip_s cnf71xx;
};
union cvmx_ipd_on_bp_drop_pktx {
@@ -1129,8 +949,6 @@ union cvmx_ipd_on_bp_drop_pktx {
uint64_t prt_enb:64;
#endif
} s;
- struct cvmx_ipd_on_bp_drop_pktx_s cn68xx;
- struct cvmx_ipd_on_bp_drop_pktx_s cn68xxp1;
};
union cvmx_ipd_packet_mbuff_size {
@@ -1144,24 +962,6 @@ union cvmx_ipd_packet_mbuff_size {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_ipd_packet_mbuff_size_s cn30xx;
- struct cvmx_ipd_packet_mbuff_size_s cn31xx;
- struct cvmx_ipd_packet_mbuff_size_s cn38xx;
- struct cvmx_ipd_packet_mbuff_size_s cn38xxp2;
- struct cvmx_ipd_packet_mbuff_size_s cn50xx;
- struct cvmx_ipd_packet_mbuff_size_s cn52xx;
- struct cvmx_ipd_packet_mbuff_size_s cn52xxp1;
- struct cvmx_ipd_packet_mbuff_size_s cn56xx;
- struct cvmx_ipd_packet_mbuff_size_s cn56xxp1;
- struct cvmx_ipd_packet_mbuff_size_s cn58xx;
- struct cvmx_ipd_packet_mbuff_size_s cn58xxp1;
- struct cvmx_ipd_packet_mbuff_size_s cn61xx;
- struct cvmx_ipd_packet_mbuff_size_s cn63xx;
- struct cvmx_ipd_packet_mbuff_size_s cn63xxp1;
- struct cvmx_ipd_packet_mbuff_size_s cn66xx;
- struct cvmx_ipd_packet_mbuff_size_s cn68xx;
- struct cvmx_ipd_packet_mbuff_size_s cn68xxp1;
- struct cvmx_ipd_packet_mbuff_size_s cnf71xx;
};
union cvmx_ipd_pkt_err {
@@ -1175,8 +975,6 @@ union cvmx_ipd_pkt_err {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_ipd_pkt_err_s cn68xx;
- struct cvmx_ipd_pkt_err_s cn68xxp1;
};
union cvmx_ipd_pkt_ptr_valid {
@@ -1190,21 +988,6 @@ union cvmx_ipd_pkt_ptr_valid {
uint64_t reserved_29_63:35;
#endif
} s;
- struct cvmx_ipd_pkt_ptr_valid_s cn30xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn31xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn38xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn50xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn52xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn52xxp1;
- struct cvmx_ipd_pkt_ptr_valid_s cn56xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn56xxp1;
- struct cvmx_ipd_pkt_ptr_valid_s cn58xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn58xxp1;
- struct cvmx_ipd_pkt_ptr_valid_s cn61xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn63xx;
- struct cvmx_ipd_pkt_ptr_valid_s cn63xxp1;
- struct cvmx_ipd_pkt_ptr_valid_s cn66xx;
- struct cvmx_ipd_pkt_ptr_valid_s cnf71xx;
};
union cvmx_ipd_portx_bp_page_cnt {
@@ -1220,22 +1003,6 @@ union cvmx_ipd_portx_bp_page_cnt {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_ipd_portx_bp_page_cnt_s cn30xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn31xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn38xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn38xxp2;
- struct cvmx_ipd_portx_bp_page_cnt_s cn50xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn52xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn52xxp1;
- struct cvmx_ipd_portx_bp_page_cnt_s cn56xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn56xxp1;
- struct cvmx_ipd_portx_bp_page_cnt_s cn58xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn58xxp1;
- struct cvmx_ipd_portx_bp_page_cnt_s cn61xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn63xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cn63xxp1;
- struct cvmx_ipd_portx_bp_page_cnt_s cn66xx;
- struct cvmx_ipd_portx_bp_page_cnt_s cnf71xx;
};
union cvmx_ipd_portx_bp_page_cnt2 {
@@ -1251,15 +1018,6 @@ union cvmx_ipd_portx_bp_page_cnt2 {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn52xx;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn52xxp1;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn56xx;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn56xxp1;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn61xx;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn63xx;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn63xxp1;
- struct cvmx_ipd_portx_bp_page_cnt2_s cn66xx;
- struct cvmx_ipd_portx_bp_page_cnt2_s cnf71xx;
};
union cvmx_ipd_portx_bp_page_cnt3 {
@@ -1275,11 +1033,6 @@ union cvmx_ipd_portx_bp_page_cnt3 {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_ipd_portx_bp_page_cnt3_s cn61xx;
- struct cvmx_ipd_portx_bp_page_cnt3_s cn63xx;
- struct cvmx_ipd_portx_bp_page_cnt3_s cn63xxp1;
- struct cvmx_ipd_portx_bp_page_cnt3_s cn66xx;
- struct cvmx_ipd_portx_bp_page_cnt3_s cnf71xx;
};
union cvmx_ipd_port_bp_counters2_pairx {
@@ -1293,15 +1046,6 @@ union cvmx_ipd_port_bp_counters2_pairx {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn52xx;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn52xxp1;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn56xx;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn56xxp1;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn61xx;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn63xx;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn63xxp1;
- struct cvmx_ipd_port_bp_counters2_pairx_s cn66xx;
- struct cvmx_ipd_port_bp_counters2_pairx_s cnf71xx;
};
union cvmx_ipd_port_bp_counters3_pairx {
@@ -1315,11 +1059,6 @@ union cvmx_ipd_port_bp_counters3_pairx {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_ipd_port_bp_counters3_pairx_s cn61xx;
- struct cvmx_ipd_port_bp_counters3_pairx_s cn63xx;
- struct cvmx_ipd_port_bp_counters3_pairx_s cn63xxp1;
- struct cvmx_ipd_port_bp_counters3_pairx_s cn66xx;
- struct cvmx_ipd_port_bp_counters3_pairx_s cnf71xx;
};
union cvmx_ipd_port_bp_counters4_pairx {
@@ -1333,9 +1072,6 @@ union cvmx_ipd_port_bp_counters4_pairx {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_ipd_port_bp_counters4_pairx_s cn61xx;
- struct cvmx_ipd_port_bp_counters4_pairx_s cn66xx;
- struct cvmx_ipd_port_bp_counters4_pairx_s cnf71xx;
};
union cvmx_ipd_port_bp_counters_pairx {
@@ -1349,22 +1085,6 @@ union cvmx_ipd_port_bp_counters_pairx {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_ipd_port_bp_counters_pairx_s cn30xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn31xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn38xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn38xxp2;
- struct cvmx_ipd_port_bp_counters_pairx_s cn50xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn52xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn52xxp1;
- struct cvmx_ipd_port_bp_counters_pairx_s cn56xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn56xxp1;
- struct cvmx_ipd_port_bp_counters_pairx_s cn58xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn58xxp1;
- struct cvmx_ipd_port_bp_counters_pairx_s cn61xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn63xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cn63xxp1;
- struct cvmx_ipd_port_bp_counters_pairx_s cn66xx;
- struct cvmx_ipd_port_bp_counters_pairx_s cnf71xx;
};
union cvmx_ipd_port_ptr_fifo_ctl {
@@ -1384,8 +1104,6 @@ union cvmx_ipd_port_ptr_fifo_ctl {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_ipd_port_ptr_fifo_ctl_s cn68xx;
- struct cvmx_ipd_port_ptr_fifo_ctl_s cn68xxp1;
};
union cvmx_ipd_port_qos_x_cnt {
@@ -1399,17 +1117,6 @@ union cvmx_ipd_port_qos_x_cnt {
uint64_t wmark:32;
#endif
} s;
- struct cvmx_ipd_port_qos_x_cnt_s cn52xx;
- struct cvmx_ipd_port_qos_x_cnt_s cn52xxp1;
- struct cvmx_ipd_port_qos_x_cnt_s cn56xx;
- struct cvmx_ipd_port_qos_x_cnt_s cn56xxp1;
- struct cvmx_ipd_port_qos_x_cnt_s cn61xx;
- struct cvmx_ipd_port_qos_x_cnt_s cn63xx;
- struct cvmx_ipd_port_qos_x_cnt_s cn63xxp1;
- struct cvmx_ipd_port_qos_x_cnt_s cn66xx;
- struct cvmx_ipd_port_qos_x_cnt_s cn68xx;
- struct cvmx_ipd_port_qos_x_cnt_s cn68xxp1;
- struct cvmx_ipd_port_qos_x_cnt_s cnf71xx;
};
union cvmx_ipd_port_qos_intx {
@@ -1421,17 +1128,6 @@ union cvmx_ipd_port_qos_intx {
uint64_t intr:64;
#endif
} s;
- struct cvmx_ipd_port_qos_intx_s cn52xx;
- struct cvmx_ipd_port_qos_intx_s cn52xxp1;
- struct cvmx_ipd_port_qos_intx_s cn56xx;
- struct cvmx_ipd_port_qos_intx_s cn56xxp1;
- struct cvmx_ipd_port_qos_intx_s cn61xx;
- struct cvmx_ipd_port_qos_intx_s cn63xx;
- struct cvmx_ipd_port_qos_intx_s cn63xxp1;
- struct cvmx_ipd_port_qos_intx_s cn66xx;
- struct cvmx_ipd_port_qos_intx_s cn68xx;
- struct cvmx_ipd_port_qos_intx_s cn68xxp1;
- struct cvmx_ipd_port_qos_intx_s cnf71xx;
};
union cvmx_ipd_port_qos_int_enbx {
@@ -1443,17 +1139,6 @@ union cvmx_ipd_port_qos_int_enbx {
uint64_t enb:64;
#endif
} s;
- struct cvmx_ipd_port_qos_int_enbx_s cn52xx;
- struct cvmx_ipd_port_qos_int_enbx_s cn52xxp1;
- struct cvmx_ipd_port_qos_int_enbx_s cn56xx;
- struct cvmx_ipd_port_qos_int_enbx_s cn56xxp1;
- struct cvmx_ipd_port_qos_int_enbx_s cn61xx;
- struct cvmx_ipd_port_qos_int_enbx_s cn63xx;
- struct cvmx_ipd_port_qos_int_enbx_s cn63xxp1;
- struct cvmx_ipd_port_qos_int_enbx_s cn66xx;
- struct cvmx_ipd_port_qos_int_enbx_s cn68xx;
- struct cvmx_ipd_port_qos_int_enbx_s cn68xxp1;
- struct cvmx_ipd_port_qos_int_enbx_s cnf71xx;
};
union cvmx_ipd_port_sopx {
@@ -1465,8 +1150,6 @@ union cvmx_ipd_port_sopx {
uint64_t sop:64;
#endif
} s;
- struct cvmx_ipd_port_sopx_s cn68xx;
- struct cvmx_ipd_port_sopx_s cn68xxp1;
};
union cvmx_ipd_prc_hold_ptr_fifo_ctl {
@@ -1488,21 +1171,6 @@ union cvmx_ipd_prc_hold_ptr_fifo_ctl {
uint64_t reserved_39_63:25;
#endif
} s;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn30xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn31xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn38xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn50xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn52xxp1;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn56xxp1;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn58xxp1;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn61xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn63xxp1;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cn66xx;
- struct cvmx_ipd_prc_hold_ptr_fifo_ctl_s cnf71xx;
};
union cvmx_ipd_prc_port_ptr_fifo_ctl {
@@ -1522,21 +1190,6 @@ union cvmx_ipd_prc_port_ptr_fifo_ctl {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn30xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn31xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn38xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn50xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn52xxp1;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn56xxp1;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn58xxp1;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn61xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn63xxp1;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cn66xx;
- struct cvmx_ipd_prc_port_ptr_fifo_ctl_s cnf71xx;
};
union cvmx_ipd_ptr_count {
@@ -1558,24 +1211,6 @@ union cvmx_ipd_ptr_count {
uint64_t reserved_19_63:45;
#endif
} s;
- struct cvmx_ipd_ptr_count_s cn30xx;
- struct cvmx_ipd_ptr_count_s cn31xx;
- struct cvmx_ipd_ptr_count_s cn38xx;
- struct cvmx_ipd_ptr_count_s cn38xxp2;
- struct cvmx_ipd_ptr_count_s cn50xx;
- struct cvmx_ipd_ptr_count_s cn52xx;
- struct cvmx_ipd_ptr_count_s cn52xxp1;
- struct cvmx_ipd_ptr_count_s cn56xx;
- struct cvmx_ipd_ptr_count_s cn56xxp1;
- struct cvmx_ipd_ptr_count_s cn58xx;
- struct cvmx_ipd_ptr_count_s cn58xxp1;
- struct cvmx_ipd_ptr_count_s cn61xx;
- struct cvmx_ipd_ptr_count_s cn63xx;
- struct cvmx_ipd_ptr_count_s cn63xxp1;
- struct cvmx_ipd_ptr_count_s cn66xx;
- struct cvmx_ipd_ptr_count_s cn68xx;
- struct cvmx_ipd_ptr_count_s cn68xxp1;
- struct cvmx_ipd_ptr_count_s cnf71xx;
};
union cvmx_ipd_pwp_ptr_fifo_ctl {
@@ -1599,21 +1234,6 @@ union cvmx_ipd_pwp_ptr_fifo_ctl {
uint64_t reserved_61_63:3;
#endif
} s;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn30xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn31xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn38xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn50xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn52xxp1;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn56xxp1;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn58xxp1;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn61xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn63xxp1;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cn66xx;
- struct cvmx_ipd_pwp_ptr_fifo_ctl_s cnf71xx;
};
union cvmx_ipd_qosx_red_marks {
@@ -1627,24 +1247,6 @@ union cvmx_ipd_qosx_red_marks {
uint64_t drop:32;
#endif
} s;
- struct cvmx_ipd_qosx_red_marks_s cn30xx;
- struct cvmx_ipd_qosx_red_marks_s cn31xx;
- struct cvmx_ipd_qosx_red_marks_s cn38xx;
- struct cvmx_ipd_qosx_red_marks_s cn38xxp2;
- struct cvmx_ipd_qosx_red_marks_s cn50xx;
- struct cvmx_ipd_qosx_red_marks_s cn52xx;
- struct cvmx_ipd_qosx_red_marks_s cn52xxp1;
- struct cvmx_ipd_qosx_red_marks_s cn56xx;
- struct cvmx_ipd_qosx_red_marks_s cn56xxp1;
- struct cvmx_ipd_qosx_red_marks_s cn58xx;
- struct cvmx_ipd_qosx_red_marks_s cn58xxp1;
- struct cvmx_ipd_qosx_red_marks_s cn61xx;
- struct cvmx_ipd_qosx_red_marks_s cn63xx;
- struct cvmx_ipd_qosx_red_marks_s cn63xxp1;
- struct cvmx_ipd_qosx_red_marks_s cn66xx;
- struct cvmx_ipd_qosx_red_marks_s cn68xx;
- struct cvmx_ipd_qosx_red_marks_s cn68xxp1;
- struct cvmx_ipd_qosx_red_marks_s cnf71xx;
};
union cvmx_ipd_que0_free_page_cnt {
@@ -1658,24 +1260,6 @@ union cvmx_ipd_que0_free_page_cnt {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_ipd_que0_free_page_cnt_s cn30xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn31xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn38xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn38xxp2;
- struct cvmx_ipd_que0_free_page_cnt_s cn50xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn52xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn52xxp1;
- struct cvmx_ipd_que0_free_page_cnt_s cn56xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn56xxp1;
- struct cvmx_ipd_que0_free_page_cnt_s cn58xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn58xxp1;
- struct cvmx_ipd_que0_free_page_cnt_s cn61xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn63xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn63xxp1;
- struct cvmx_ipd_que0_free_page_cnt_s cn66xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn68xx;
- struct cvmx_ipd_que0_free_page_cnt_s cn68xxp1;
- struct cvmx_ipd_que0_free_page_cnt_s cnf71xx;
};
union cvmx_ipd_red_bpid_enablex {
@@ -1687,8 +1271,6 @@ union cvmx_ipd_red_bpid_enablex {
uint64_t prt_enb:64;
#endif
} s;
- struct cvmx_ipd_red_bpid_enablex_s cn68xx;
- struct cvmx_ipd_red_bpid_enablex_s cn68xxp1;
};
union cvmx_ipd_red_delay {
@@ -1704,8 +1286,6 @@ union cvmx_ipd_red_delay {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_ipd_red_delay_s cn68xx;
- struct cvmx_ipd_red_delay_s cn68xxp1;
};
union cvmx_ipd_red_port_enable {
@@ -1721,22 +1301,6 @@ union cvmx_ipd_red_port_enable {
uint64_t prb_dly:14;
#endif
} s;
- struct cvmx_ipd_red_port_enable_s cn30xx;
- struct cvmx_ipd_red_port_enable_s cn31xx;
- struct cvmx_ipd_red_port_enable_s cn38xx;
- struct cvmx_ipd_red_port_enable_s cn38xxp2;
- struct cvmx_ipd_red_port_enable_s cn50xx;
- struct cvmx_ipd_red_port_enable_s cn52xx;
- struct cvmx_ipd_red_port_enable_s cn52xxp1;
- struct cvmx_ipd_red_port_enable_s cn56xx;
- struct cvmx_ipd_red_port_enable_s cn56xxp1;
- struct cvmx_ipd_red_port_enable_s cn58xx;
- struct cvmx_ipd_red_port_enable_s cn58xxp1;
- struct cvmx_ipd_red_port_enable_s cn61xx;
- struct cvmx_ipd_red_port_enable_s cn63xx;
- struct cvmx_ipd_red_port_enable_s cn63xxp1;
- struct cvmx_ipd_red_port_enable_s cn66xx;
- struct cvmx_ipd_red_port_enable_s cnf71xx;
};
union cvmx_ipd_red_port_enable2 {
@@ -1759,10 +1323,6 @@ union cvmx_ipd_red_port_enable2 {
uint64_t reserved_4_63:60;
#endif
} cn52xx;
- struct cvmx_ipd_red_port_enable2_cn52xx cn52xxp1;
- struct cvmx_ipd_red_port_enable2_cn52xx cn56xx;
- struct cvmx_ipd_red_port_enable2_cn52xx cn56xxp1;
- struct cvmx_ipd_red_port_enable2_s cn61xx;
struct cvmx_ipd_red_port_enable2_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
@@ -1772,9 +1332,6 @@ union cvmx_ipd_red_port_enable2 {
uint64_t reserved_8_63:56;
#endif
} cn63xx;
- struct cvmx_ipd_red_port_enable2_cn63xx cn63xxp1;
- struct cvmx_ipd_red_port_enable2_s cn66xx;
- struct cvmx_ipd_red_port_enable2_s cnf71xx;
};
union cvmx_ipd_red_quex_param {
@@ -1794,24 +1351,6 @@ union cvmx_ipd_red_quex_param {
uint64_t reserved_49_63:15;
#endif
} s;
- struct cvmx_ipd_red_quex_param_s cn30xx;
- struct cvmx_ipd_red_quex_param_s cn31xx;
- struct cvmx_ipd_red_quex_param_s cn38xx;
- struct cvmx_ipd_red_quex_param_s cn38xxp2;
- struct cvmx_ipd_red_quex_param_s cn50xx;
- struct cvmx_ipd_red_quex_param_s cn52xx;
- struct cvmx_ipd_red_quex_param_s cn52xxp1;
- struct cvmx_ipd_red_quex_param_s cn56xx;
- struct cvmx_ipd_red_quex_param_s cn56xxp1;
- struct cvmx_ipd_red_quex_param_s cn58xx;
- struct cvmx_ipd_red_quex_param_s cn58xxp1;
- struct cvmx_ipd_red_quex_param_s cn61xx;
- struct cvmx_ipd_red_quex_param_s cn63xx;
- struct cvmx_ipd_red_quex_param_s cn63xxp1;
- struct cvmx_ipd_red_quex_param_s cn66xx;
- struct cvmx_ipd_red_quex_param_s cn68xx;
- struct cvmx_ipd_red_quex_param_s cn68xxp1;
- struct cvmx_ipd_red_quex_param_s cnf71xx;
};
union cvmx_ipd_req_wgt {
@@ -1837,7 +1376,6 @@ union cvmx_ipd_req_wgt {
uint64_t wgt7:8;
#endif
} s;
- struct cvmx_ipd_req_wgt_s cn68xx;
};
union cvmx_ipd_sub_port_bp_page_cnt {
@@ -1853,24 +1391,6 @@ union cvmx_ipd_sub_port_bp_page_cnt {
uint64_t reserved_31_63:33;
#endif
} s;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn30xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn31xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn38xxp2;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn50xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn52xxp1;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn56xxp1;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn58xxp1;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn61xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn63xxp1;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn66xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn68xx;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cn68xxp1;
- struct cvmx_ipd_sub_port_bp_page_cnt_s cnf71xx;
};
union cvmx_ipd_sub_port_fcs {
@@ -1897,7 +1417,6 @@ union cvmx_ipd_sub_port_fcs {
uint64_t reserved_3_63:61;
#endif
} cn30xx;
- struct cvmx_ipd_sub_port_fcs_cn30xx cn31xx;
struct cvmx_ipd_sub_port_fcs_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -1907,19 +1426,6 @@ union cvmx_ipd_sub_port_fcs {
uint64_t reserved_32_63:32;
#endif
} cn38xx;
- struct cvmx_ipd_sub_port_fcs_cn38xx cn38xxp2;
- struct cvmx_ipd_sub_port_fcs_cn30xx cn50xx;
- struct cvmx_ipd_sub_port_fcs_s cn52xx;
- struct cvmx_ipd_sub_port_fcs_s cn52xxp1;
- struct cvmx_ipd_sub_port_fcs_s cn56xx;
- struct cvmx_ipd_sub_port_fcs_s cn56xxp1;
- struct cvmx_ipd_sub_port_fcs_cn38xx cn58xx;
- struct cvmx_ipd_sub_port_fcs_cn38xx cn58xxp1;
- struct cvmx_ipd_sub_port_fcs_s cn61xx;
- struct cvmx_ipd_sub_port_fcs_s cn63xx;
- struct cvmx_ipd_sub_port_fcs_s cn63xxp1;
- struct cvmx_ipd_sub_port_fcs_s cn66xx;
- struct cvmx_ipd_sub_port_fcs_s cnf71xx;
};
union cvmx_ipd_sub_port_qos_cnt {
@@ -1935,17 +1441,6 @@ union cvmx_ipd_sub_port_qos_cnt {
uint64_t reserved_41_63:23;
#endif
} s;
- struct cvmx_ipd_sub_port_qos_cnt_s cn52xx;
- struct cvmx_ipd_sub_port_qos_cnt_s cn52xxp1;
- struct cvmx_ipd_sub_port_qos_cnt_s cn56xx;
- struct cvmx_ipd_sub_port_qos_cnt_s cn56xxp1;
- struct cvmx_ipd_sub_port_qos_cnt_s cn61xx;
- struct cvmx_ipd_sub_port_qos_cnt_s cn63xx;
- struct cvmx_ipd_sub_port_qos_cnt_s cn63xxp1;
- struct cvmx_ipd_sub_port_qos_cnt_s cn66xx;
- struct cvmx_ipd_sub_port_qos_cnt_s cn68xx;
- struct cvmx_ipd_sub_port_qos_cnt_s cn68xxp1;
- struct cvmx_ipd_sub_port_qos_cnt_s cnf71xx;
};
union cvmx_ipd_wqe_fpa_queue {
@@ -1959,24 +1454,6 @@ union cvmx_ipd_wqe_fpa_queue {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_ipd_wqe_fpa_queue_s cn30xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn31xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn38xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn38xxp2;
- struct cvmx_ipd_wqe_fpa_queue_s cn50xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn52xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn52xxp1;
- struct cvmx_ipd_wqe_fpa_queue_s cn56xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn56xxp1;
- struct cvmx_ipd_wqe_fpa_queue_s cn58xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn58xxp1;
- struct cvmx_ipd_wqe_fpa_queue_s cn61xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn63xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn63xxp1;
- struct cvmx_ipd_wqe_fpa_queue_s cn66xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn68xx;
- struct cvmx_ipd_wqe_fpa_queue_s cn68xxp1;
- struct cvmx_ipd_wqe_fpa_queue_s cnf71xx;
};
union cvmx_ipd_wqe_ptr_valid {
@@ -1990,21 +1467,6 @@ union cvmx_ipd_wqe_ptr_valid {
uint64_t reserved_29_63:35;
#endif
} s;
- struct cvmx_ipd_wqe_ptr_valid_s cn30xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn31xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn38xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn50xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn52xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn52xxp1;
- struct cvmx_ipd_wqe_ptr_valid_s cn56xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn56xxp1;
- struct cvmx_ipd_wqe_ptr_valid_s cn58xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn58xxp1;
- struct cvmx_ipd_wqe_ptr_valid_s cn61xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn63xx;
- struct cvmx_ipd_wqe_ptr_valid_s cn63xxp1;
- struct cvmx_ipd_wqe_ptr_valid_s cn66xx;
- struct cvmx_ipd_wqe_ptr_valid_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
index fe50671fd1bb..06ea13251448 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2t-defs.h
@@ -104,7 +104,6 @@ union cvmx_l2t_err {
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;)))))))))))))
} cn38xx;
- struct cvmx_l2t_err_cn38xx cn38xxp2;
struct cvmx_l2t_err_cn50xx {
__BITFIELD_FIELD(uint64_t reserved_28_63:36,
__BITFIELD_FIELD(uint64_t lck_intena2:1,
@@ -139,11 +138,6 @@ union cvmx_l2t_err {
__BITFIELD_FIELD(uint64_t ecc_ena:1,
;))))))))))))))
} cn52xx;
- struct cvmx_l2t_err_cn52xx cn52xxp1;
- struct cvmx_l2t_err_s cn56xx;
- struct cvmx_l2t_err_s cn56xxp1;
- struct cvmx_l2t_err_s cn58xx;
- struct cvmx_l2t_err_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-led-defs.h b/arch/mips/include/asm/octeon/cvmx-led-defs.h
index d36d42b8307b..0237907522cb 100644
--- a/arch/mips/include/asm/octeon/cvmx-led-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-led-defs.h
@@ -53,12 +53,6 @@ union cvmx_led_blink {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_led_blink_s cn38xx;
- struct cvmx_led_blink_s cn38xxp2;
- struct cvmx_led_blink_s cn56xx;
- struct cvmx_led_blink_s cn56xxp1;
- struct cvmx_led_blink_s cn58xx;
- struct cvmx_led_blink_s cn58xxp1;
};
union cvmx_led_clk_phase {
@@ -72,12 +66,6 @@ union cvmx_led_clk_phase {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_led_clk_phase_s cn38xx;
- struct cvmx_led_clk_phase_s cn38xxp2;
- struct cvmx_led_clk_phase_s cn56xx;
- struct cvmx_led_clk_phase_s cn56xxp1;
- struct cvmx_led_clk_phase_s cn58xx;
- struct cvmx_led_clk_phase_s cn58xxp1;
};
union cvmx_led_cylon {
@@ -91,12 +79,6 @@ union cvmx_led_cylon {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_led_cylon_s cn38xx;
- struct cvmx_led_cylon_s cn38xxp2;
- struct cvmx_led_cylon_s cn56xx;
- struct cvmx_led_cylon_s cn56xxp1;
- struct cvmx_led_cylon_s cn58xx;
- struct cvmx_led_cylon_s cn58xxp1;
};
union cvmx_led_dbg {
@@ -110,12 +92,6 @@ union cvmx_led_dbg {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_led_dbg_s cn38xx;
- struct cvmx_led_dbg_s cn38xxp2;
- struct cvmx_led_dbg_s cn56xx;
- struct cvmx_led_dbg_s cn56xxp1;
- struct cvmx_led_dbg_s cn58xx;
- struct cvmx_led_dbg_s cn58xxp1;
};
union cvmx_led_en {
@@ -129,12 +105,6 @@ union cvmx_led_en {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_led_en_s cn38xx;
- struct cvmx_led_en_s cn38xxp2;
- struct cvmx_led_en_s cn56xx;
- struct cvmx_led_en_s cn56xxp1;
- struct cvmx_led_en_s cn58xx;
- struct cvmx_led_en_s cn58xxp1;
};
union cvmx_led_polarity {
@@ -148,12 +118,6 @@ union cvmx_led_polarity {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_led_polarity_s cn38xx;
- struct cvmx_led_polarity_s cn38xxp2;
- struct cvmx_led_polarity_s cn56xx;
- struct cvmx_led_polarity_s cn56xxp1;
- struct cvmx_led_polarity_s cn58xx;
- struct cvmx_led_polarity_s cn58xxp1;
};
union cvmx_led_prt {
@@ -167,12 +131,6 @@ union cvmx_led_prt {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_led_prt_s cn38xx;
- struct cvmx_led_prt_s cn38xxp2;
- struct cvmx_led_prt_s cn56xx;
- struct cvmx_led_prt_s cn56xxp1;
- struct cvmx_led_prt_s cn58xx;
- struct cvmx_led_prt_s cn58xxp1;
};
union cvmx_led_prt_fmt {
@@ -186,12 +144,6 @@ union cvmx_led_prt_fmt {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_led_prt_fmt_s cn38xx;
- struct cvmx_led_prt_fmt_s cn38xxp2;
- struct cvmx_led_prt_fmt_s cn56xx;
- struct cvmx_led_prt_fmt_s cn56xxp1;
- struct cvmx_led_prt_fmt_s cn58xx;
- struct cvmx_led_prt_fmt_s cn58xxp1;
};
union cvmx_led_prt_statusx {
@@ -205,12 +157,6 @@ union cvmx_led_prt_statusx {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_led_prt_statusx_s cn38xx;
- struct cvmx_led_prt_statusx_s cn38xxp2;
- struct cvmx_led_prt_statusx_s cn56xx;
- struct cvmx_led_prt_statusx_s cn56xxp1;
- struct cvmx_led_prt_statusx_s cn58xx;
- struct cvmx_led_prt_statusx_s cn58xxp1;
};
union cvmx_led_udd_cntx {
@@ -224,12 +170,6 @@ union cvmx_led_udd_cntx {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_led_udd_cntx_s cn38xx;
- struct cvmx_led_udd_cntx_s cn38xxp2;
- struct cvmx_led_udd_cntx_s cn56xx;
- struct cvmx_led_udd_cntx_s cn56xxp1;
- struct cvmx_led_udd_cntx_s cn58xx;
- struct cvmx_led_udd_cntx_s cn58xxp1;
};
union cvmx_led_udd_datx {
@@ -243,12 +183,6 @@ union cvmx_led_udd_datx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_led_udd_datx_s cn38xx;
- struct cvmx_led_udd_datx_s cn38xxp2;
- struct cvmx_led_udd_datx_s cn56xx;
- struct cvmx_led_udd_datx_s cn56xxp1;
- struct cvmx_led_udd_datx_s cn58xx;
- struct cvmx_led_udd_datx_s cn58xxp1;
};
union cvmx_led_udd_dat_clrx {
@@ -262,12 +196,6 @@ union cvmx_led_udd_dat_clrx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_led_udd_dat_clrx_s cn38xx;
- struct cvmx_led_udd_dat_clrx_s cn38xxp2;
- struct cvmx_led_udd_dat_clrx_s cn56xx;
- struct cvmx_led_udd_dat_clrx_s cn56xxp1;
- struct cvmx_led_udd_dat_clrx_s cn58xx;
- struct cvmx_led_udd_dat_clrx_s cn58xxp1;
};
union cvmx_led_udd_dat_setx {
@@ -281,12 +209,6 @@ union cvmx_led_udd_dat_setx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_led_udd_dat_setx_s cn38xx;
- struct cvmx_led_udd_dat_setx_s cn38xxp2;
- struct cvmx_led_udd_dat_setx_s cn56xx;
- struct cvmx_led_udd_dat_setx_s cn56xxp1;
- struct cvmx_led_udd_dat_setx_s cn58xx;
- struct cvmx_led_udd_dat_setx_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
index 36f510721141..4167a4c7a28d 100644
--- a/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-lmcx-defs.h
@@ -189,11 +189,6 @@ union cvmx_lmcx_bist_ctl {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_lmcx_bist_ctl_s cn50xx;
- struct cvmx_lmcx_bist_ctl_s cn52xx;
- struct cvmx_lmcx_bist_ctl_s cn52xxp1;
- struct cvmx_lmcx_bist_ctl_s cn56xx;
- struct cvmx_lmcx_bist_ctl_s cn56xxp1;
};
union cvmx_lmcx_bist_result {
@@ -236,10 +231,6 @@ union cvmx_lmcx_bist_result {
uint64_t reserved_9_63:55;
#endif
} cn50xx;
- struct cvmx_lmcx_bist_result_s cn52xx;
- struct cvmx_lmcx_bist_result_s cn52xxp1;
- struct cvmx_lmcx_bist_result_s cn56xx;
- struct cvmx_lmcx_bist_result_s cn56xxp1;
};
union cvmx_lmcx_char_ctl {
@@ -263,7 +254,6 @@ union cvmx_lmcx_char_ctl {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_lmcx_char_ctl_s cn61xx;
struct cvmx_lmcx_char_ctl_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_42_63:22;
@@ -279,11 +269,6 @@ union cvmx_lmcx_char_ctl {
uint64_t reserved_42_63:22;
#endif
} cn63xx;
- struct cvmx_lmcx_char_ctl_cn63xx cn63xxp1;
- struct cvmx_lmcx_char_ctl_s cn66xx;
- struct cvmx_lmcx_char_ctl_s cn68xx;
- struct cvmx_lmcx_char_ctl_cn63xx cn68xxp1;
- struct cvmx_lmcx_char_ctl_s cnf71xx;
};
union cvmx_lmcx_char_mask0 {
@@ -295,13 +280,6 @@ union cvmx_lmcx_char_mask0 {
uint64_t mask:64;
#endif
} s;
- struct cvmx_lmcx_char_mask0_s cn61xx;
- struct cvmx_lmcx_char_mask0_s cn63xx;
- struct cvmx_lmcx_char_mask0_s cn63xxp1;
- struct cvmx_lmcx_char_mask0_s cn66xx;
- struct cvmx_lmcx_char_mask0_s cn68xx;
- struct cvmx_lmcx_char_mask0_s cn68xxp1;
- struct cvmx_lmcx_char_mask0_s cnf71xx;
};
union cvmx_lmcx_char_mask1 {
@@ -315,13 +293,6 @@ union cvmx_lmcx_char_mask1 {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_lmcx_char_mask1_s cn61xx;
- struct cvmx_lmcx_char_mask1_s cn63xx;
- struct cvmx_lmcx_char_mask1_s cn63xxp1;
- struct cvmx_lmcx_char_mask1_s cn66xx;
- struct cvmx_lmcx_char_mask1_s cn68xx;
- struct cvmx_lmcx_char_mask1_s cn68xxp1;
- struct cvmx_lmcx_char_mask1_s cnf71xx;
};
union cvmx_lmcx_char_mask2 {
@@ -333,13 +304,6 @@ union cvmx_lmcx_char_mask2 {
uint64_t mask:64;
#endif
} s;
- struct cvmx_lmcx_char_mask2_s cn61xx;
- struct cvmx_lmcx_char_mask2_s cn63xx;
- struct cvmx_lmcx_char_mask2_s cn63xxp1;
- struct cvmx_lmcx_char_mask2_s cn66xx;
- struct cvmx_lmcx_char_mask2_s cn68xx;
- struct cvmx_lmcx_char_mask2_s cn68xxp1;
- struct cvmx_lmcx_char_mask2_s cnf71xx;
};
union cvmx_lmcx_char_mask3 {
@@ -353,13 +317,6 @@ union cvmx_lmcx_char_mask3 {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_lmcx_char_mask3_s cn61xx;
- struct cvmx_lmcx_char_mask3_s cn63xx;
- struct cvmx_lmcx_char_mask3_s cn63xxp1;
- struct cvmx_lmcx_char_mask3_s cn66xx;
- struct cvmx_lmcx_char_mask3_s cn68xx;
- struct cvmx_lmcx_char_mask3_s cn68xxp1;
- struct cvmx_lmcx_char_mask3_s cnf71xx;
};
union cvmx_lmcx_char_mask4 {
@@ -393,13 +350,6 @@ union cvmx_lmcx_char_mask4 {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_lmcx_char_mask4_s cn61xx;
- struct cvmx_lmcx_char_mask4_s cn63xx;
- struct cvmx_lmcx_char_mask4_s cn63xxp1;
- struct cvmx_lmcx_char_mask4_s cn66xx;
- struct cvmx_lmcx_char_mask4_s cn68xx;
- struct cvmx_lmcx_char_mask4_s cn68xxp1;
- struct cvmx_lmcx_char_mask4_s cnf71xx;
};
union cvmx_lmcx_comp_ctl {
@@ -448,9 +398,6 @@ union cvmx_lmcx_comp_ctl {
uint64_t reserved_32_63:32;
#endif
} cn30xx;
- struct cvmx_lmcx_comp_ctl_cn30xx cn31xx;
- struct cvmx_lmcx_comp_ctl_cn30xx cn38xx;
- struct cvmx_lmcx_comp_ctl_cn30xx cn38xxp2;
struct cvmx_lmcx_comp_ctl_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -470,11 +417,6 @@ union cvmx_lmcx_comp_ctl {
uint64_t reserved_32_63:32;
#endif
} cn50xx;
- struct cvmx_lmcx_comp_ctl_cn50xx cn52xx;
- struct cvmx_lmcx_comp_ctl_cn50xx cn52xxp1;
- struct cvmx_lmcx_comp_ctl_cn50xx cn56xx;
- struct cvmx_lmcx_comp_ctl_cn50xx cn56xxp1;
- struct cvmx_lmcx_comp_ctl_cn50xx cn58xx;
struct cvmx_lmcx_comp_ctl_cn58xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -525,13 +467,6 @@ union cvmx_lmcx_comp_ctl2 {
uint64_t reserved_34_63:30;
#endif
} s;
- struct cvmx_lmcx_comp_ctl2_s cn61xx;
- struct cvmx_lmcx_comp_ctl2_s cn63xx;
- struct cvmx_lmcx_comp_ctl2_s cn63xxp1;
- struct cvmx_lmcx_comp_ctl2_s cn66xx;
- struct cvmx_lmcx_comp_ctl2_s cn68xx;
- struct cvmx_lmcx_comp_ctl2_s cn68xxp1;
- struct cvmx_lmcx_comp_ctl2_s cnf71xx;
};
union cvmx_lmcx_config {
@@ -587,7 +522,6 @@ union cvmx_lmcx_config {
uint64_t reserved_61_63:3;
#endif
} s;
- struct cvmx_lmcx_config_s cn61xx;
struct cvmx_lmcx_config_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_59_63:5;
@@ -723,9 +657,6 @@ union cvmx_lmcx_config {
uint64_t reserved_60_63:4;
#endif
} cn66xx;
- struct cvmx_lmcx_config_cn63xx cn68xx;
- struct cvmx_lmcx_config_cn63xx cn68xxp1;
- struct cvmx_lmcx_config_s cnf71xx;
};
union cvmx_lmcx_control {
@@ -787,7 +718,6 @@ union cvmx_lmcx_control {
uint64_t scramble_ena:1;
#endif
} s;
- struct cvmx_lmcx_control_s cn61xx;
struct cvmx_lmcx_control_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
@@ -833,7 +763,6 @@ union cvmx_lmcx_control {
uint64_t reserved_24_63:40;
#endif
} cn63xx;
- struct cvmx_lmcx_control_cn63xx cn63xxp1;
struct cvmx_lmcx_control_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t scramble_ena:1;
@@ -938,8 +867,6 @@ union cvmx_lmcx_control {
uint64_t reserved_63_63:1;
#endif
} cn68xx;
- struct cvmx_lmcx_control_cn68xx cn68xxp1;
- struct cvmx_lmcx_control_cn66xx cnf71xx;
};
union cvmx_lmcx_ctl {
@@ -1032,7 +959,6 @@ union cvmx_lmcx_ctl {
uint64_t reserved_32_63:32;
#endif
} cn30xx;
- struct cvmx_lmcx_ctl_cn30xx cn31xx;
struct cvmx_lmcx_ctl_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -1076,7 +1002,6 @@ union cvmx_lmcx_ctl {
uint64_t reserved_32_63:32;
#endif
} cn38xx;
- struct cvmx_lmcx_ctl_cn38xx cn38xxp2;
struct cvmx_lmcx_ctl_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -1165,9 +1090,6 @@ union cvmx_lmcx_ctl {
uint64_t reserved_32_63:32;
#endif
} cn52xx;
- struct cvmx_lmcx_ctl_cn52xx cn52xxp1;
- struct cvmx_lmcx_ctl_cn52xx cn56xx;
- struct cvmx_lmcx_ctl_cn52xx cn56xxp1;
struct cvmx_lmcx_ctl_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -1211,7 +1133,6 @@ union cvmx_lmcx_ctl {
uint64_t reserved_32_63:32;
#endif
} cn58xx;
- struct cvmx_lmcx_ctl_cn58xx cn58xxp1;
};
union cvmx_lmcx_ctl1 {
@@ -1284,9 +1205,6 @@ union cvmx_lmcx_ctl1 {
uint64_t reserved_21_63:43;
#endif
} cn52xx;
- struct cvmx_lmcx_ctl1_cn52xx cn52xxp1;
- struct cvmx_lmcx_ctl1_cn52xx cn56xx;
- struct cvmx_lmcx_ctl1_cn52xx cn56xxp1;
struct cvmx_lmcx_ctl1_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -1300,7 +1218,6 @@ union cvmx_lmcx_ctl1 {
uint64_t reserved_10_63:54;
#endif
} cn58xx;
- struct cvmx_lmcx_ctl1_cn58xx cn58xxp1;
};
union cvmx_lmcx_dclk_cnt {
@@ -1312,13 +1229,6 @@ union cvmx_lmcx_dclk_cnt {
uint64_t dclkcnt:64;
#endif
} s;
- struct cvmx_lmcx_dclk_cnt_s cn61xx;
- struct cvmx_lmcx_dclk_cnt_s cn63xx;
- struct cvmx_lmcx_dclk_cnt_s cn63xxp1;
- struct cvmx_lmcx_dclk_cnt_s cn66xx;
- struct cvmx_lmcx_dclk_cnt_s cn68xx;
- struct cvmx_lmcx_dclk_cnt_s cn68xxp1;
- struct cvmx_lmcx_dclk_cnt_s cnf71xx;
};
union cvmx_lmcx_dclk_cnt_hi {
@@ -1332,17 +1242,6 @@ union cvmx_lmcx_dclk_cnt_hi {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_dclk_cnt_hi_s cn30xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn31xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn38xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn38xxp2;
- struct cvmx_lmcx_dclk_cnt_hi_s cn50xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn52xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn52xxp1;
- struct cvmx_lmcx_dclk_cnt_hi_s cn56xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn56xxp1;
- struct cvmx_lmcx_dclk_cnt_hi_s cn58xx;
- struct cvmx_lmcx_dclk_cnt_hi_s cn58xxp1;
};
union cvmx_lmcx_dclk_cnt_lo {
@@ -1356,17 +1255,6 @@ union cvmx_lmcx_dclk_cnt_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_dclk_cnt_lo_s cn30xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn31xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn38xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn38xxp2;
- struct cvmx_lmcx_dclk_cnt_lo_s cn50xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn52xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn52xxp1;
- struct cvmx_lmcx_dclk_cnt_lo_s cn56xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn56xxp1;
- struct cvmx_lmcx_dclk_cnt_lo_s cn58xx;
- struct cvmx_lmcx_dclk_cnt_lo_s cn58xxp1;
};
union cvmx_lmcx_dclk_ctl {
@@ -1386,8 +1274,6 @@ union cvmx_lmcx_dclk_ctl {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_lmcx_dclk_ctl_s cn56xx;
- struct cvmx_lmcx_dclk_ctl_s cn56xxp1;
};
union cvmx_lmcx_ddr2_ctl {
@@ -1474,16 +1360,6 @@ union cvmx_lmcx_ddr2_ctl {
uint64_t reserved_32_63:32;
#endif
} cn30xx;
- struct cvmx_lmcx_ddr2_ctl_cn30xx cn31xx;
- struct cvmx_lmcx_ddr2_ctl_s cn38xx;
- struct cvmx_lmcx_ddr2_ctl_s cn38xxp2;
- struct cvmx_lmcx_ddr2_ctl_s cn50xx;
- struct cvmx_lmcx_ddr2_ctl_s cn52xx;
- struct cvmx_lmcx_ddr2_ctl_s cn52xxp1;
- struct cvmx_lmcx_ddr2_ctl_s cn56xx;
- struct cvmx_lmcx_ddr2_ctl_s cn56xxp1;
- struct cvmx_lmcx_ddr2_ctl_s cn58xx;
- struct cvmx_lmcx_ddr2_ctl_s cn58xxp1;
};
union cvmx_lmcx_ddr_pll_ctl {
@@ -1515,13 +1391,6 @@ union cvmx_lmcx_ddr_pll_ctl {
uint64_t reserved_27_63:37;
#endif
} s;
- struct cvmx_lmcx_ddr_pll_ctl_s cn61xx;
- struct cvmx_lmcx_ddr_pll_ctl_s cn63xx;
- struct cvmx_lmcx_ddr_pll_ctl_s cn63xxp1;
- struct cvmx_lmcx_ddr_pll_ctl_s cn66xx;
- struct cvmx_lmcx_ddr_pll_ctl_s cn68xx;
- struct cvmx_lmcx_ddr_pll_ctl_s cn68xxp1;
- struct cvmx_lmcx_ddr_pll_ctl_s cnf71xx;
};
union cvmx_lmcx_delay_cfg {
@@ -1539,7 +1408,6 @@ union cvmx_lmcx_delay_cfg {
uint64_t reserved_15_63:49;
#endif
} s;
- struct cvmx_lmcx_delay_cfg_s cn30xx;
struct cvmx_lmcx_delay_cfg_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
@@ -1557,13 +1425,6 @@ union cvmx_lmcx_delay_cfg {
uint64_t reserved_14_63:50;
#endif
} cn38xx;
- struct cvmx_lmcx_delay_cfg_cn38xx cn50xx;
- struct cvmx_lmcx_delay_cfg_cn38xx cn52xx;
- struct cvmx_lmcx_delay_cfg_cn38xx cn52xxp1;
- struct cvmx_lmcx_delay_cfg_cn38xx cn56xx;
- struct cvmx_lmcx_delay_cfg_cn38xx cn56xxp1;
- struct cvmx_lmcx_delay_cfg_cn38xx cn58xx;
- struct cvmx_lmcx_delay_cfg_cn38xx cn58xxp1;
};
union cvmx_lmcx_dimmx_params {
@@ -1605,13 +1466,6 @@ union cvmx_lmcx_dimmx_params {
uint64_t rc15:4;
#endif
} s;
- struct cvmx_lmcx_dimmx_params_s cn61xx;
- struct cvmx_lmcx_dimmx_params_s cn63xx;
- struct cvmx_lmcx_dimmx_params_s cn63xxp1;
- struct cvmx_lmcx_dimmx_params_s cn66xx;
- struct cvmx_lmcx_dimmx_params_s cn68xx;
- struct cvmx_lmcx_dimmx_params_s cn68xxp1;
- struct cvmx_lmcx_dimmx_params_s cnf71xx;
};
union cvmx_lmcx_dimm_ctl {
@@ -1631,13 +1485,6 @@ union cvmx_lmcx_dimm_ctl {
uint64_t reserved_46_63:18;
#endif
} s;
- struct cvmx_lmcx_dimm_ctl_s cn61xx;
- struct cvmx_lmcx_dimm_ctl_s cn63xx;
- struct cvmx_lmcx_dimm_ctl_s cn63xxp1;
- struct cvmx_lmcx_dimm_ctl_s cn66xx;
- struct cvmx_lmcx_dimm_ctl_s cn68xx;
- struct cvmx_lmcx_dimm_ctl_s cn68xxp1;
- struct cvmx_lmcx_dimm_ctl_s cnf71xx;
};
union cvmx_lmcx_dll_ctl {
@@ -1657,10 +1504,6 @@ union cvmx_lmcx_dll_ctl {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_lmcx_dll_ctl_s cn52xx;
- struct cvmx_lmcx_dll_ctl_s cn52xxp1;
- struct cvmx_lmcx_dll_ctl_s cn56xx;
- struct cvmx_lmcx_dll_ctl_s cn56xxp1;
};
union cvmx_lmcx_dll_ctl2 {
@@ -1684,7 +1527,6 @@ union cvmx_lmcx_dll_ctl2 {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_lmcx_dll_ctl2_s cn61xx;
struct cvmx_lmcx_dll_ctl2_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_15_63:49;
@@ -1702,11 +1544,6 @@ union cvmx_lmcx_dll_ctl2 {
uint64_t reserved_15_63:49;
#endif
} cn63xx;
- struct cvmx_lmcx_dll_ctl2_cn63xx cn63xxp1;
- struct cvmx_lmcx_dll_ctl2_cn63xx cn66xx;
- struct cvmx_lmcx_dll_ctl2_s cn68xx;
- struct cvmx_lmcx_dll_ctl2_s cn68xxp1;
- struct cvmx_lmcx_dll_ctl2_s cnf71xx;
};
union cvmx_lmcx_dll_ctl3 {
@@ -1748,7 +1585,6 @@ union cvmx_lmcx_dll_ctl3 {
uint64_t reserved_41_63:23;
#endif
} s;
- struct cvmx_lmcx_dll_ctl3_s cn61xx;
struct cvmx_lmcx_dll_ctl3_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -1776,11 +1612,6 @@ union cvmx_lmcx_dll_ctl3 {
uint64_t reserved_29_63:35;
#endif
} cn63xx;
- struct cvmx_lmcx_dll_ctl3_cn63xx cn63xxp1;
- struct cvmx_lmcx_dll_ctl3_cn63xx cn66xx;
- struct cvmx_lmcx_dll_ctl3_s cn68xx;
- struct cvmx_lmcx_dll_ctl3_s cn68xxp1;
- struct cvmx_lmcx_dll_ctl3_s cnf71xx;
};
union cvmx_lmcx_dual_memcfg {
@@ -1800,13 +1631,6 @@ union cvmx_lmcx_dual_memcfg {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_lmcx_dual_memcfg_s cn50xx;
- struct cvmx_lmcx_dual_memcfg_s cn52xx;
- struct cvmx_lmcx_dual_memcfg_s cn52xxp1;
- struct cvmx_lmcx_dual_memcfg_s cn56xx;
- struct cvmx_lmcx_dual_memcfg_s cn56xxp1;
- struct cvmx_lmcx_dual_memcfg_s cn58xx;
- struct cvmx_lmcx_dual_memcfg_s cn58xxp1;
struct cvmx_lmcx_dual_memcfg_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_19_63:45;
@@ -1820,12 +1644,6 @@ union cvmx_lmcx_dual_memcfg {
uint64_t reserved_19_63:45;
#endif
} cn61xx;
- struct cvmx_lmcx_dual_memcfg_cn61xx cn63xx;
- struct cvmx_lmcx_dual_memcfg_cn61xx cn63xxp1;
- struct cvmx_lmcx_dual_memcfg_cn61xx cn66xx;
- struct cvmx_lmcx_dual_memcfg_cn61xx cn68xx;
- struct cvmx_lmcx_dual_memcfg_cn61xx cn68xxp1;
- struct cvmx_lmcx_dual_memcfg_cn61xx cnf71xx;
};
union cvmx_lmcx_ecc_synd {
@@ -1845,24 +1663,6 @@ union cvmx_lmcx_ecc_synd {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_ecc_synd_s cn30xx;
- struct cvmx_lmcx_ecc_synd_s cn31xx;
- struct cvmx_lmcx_ecc_synd_s cn38xx;
- struct cvmx_lmcx_ecc_synd_s cn38xxp2;
- struct cvmx_lmcx_ecc_synd_s cn50xx;
- struct cvmx_lmcx_ecc_synd_s cn52xx;
- struct cvmx_lmcx_ecc_synd_s cn52xxp1;
- struct cvmx_lmcx_ecc_synd_s cn56xx;
- struct cvmx_lmcx_ecc_synd_s cn56xxp1;
- struct cvmx_lmcx_ecc_synd_s cn58xx;
- struct cvmx_lmcx_ecc_synd_s cn58xxp1;
- struct cvmx_lmcx_ecc_synd_s cn61xx;
- struct cvmx_lmcx_ecc_synd_s cn63xx;
- struct cvmx_lmcx_ecc_synd_s cn63xxp1;
- struct cvmx_lmcx_ecc_synd_s cn66xx;
- struct cvmx_lmcx_ecc_synd_s cn68xx;
- struct cvmx_lmcx_ecc_synd_s cn68xxp1;
- struct cvmx_lmcx_ecc_synd_s cnf71xx;
};
union cvmx_lmcx_fadr {
@@ -1891,16 +1691,6 @@ union cvmx_lmcx_fadr {
uint64_t reserved_32_63:32;
#endif
} cn30xx;
- struct cvmx_lmcx_fadr_cn30xx cn31xx;
- struct cvmx_lmcx_fadr_cn30xx cn38xx;
- struct cvmx_lmcx_fadr_cn30xx cn38xxp2;
- struct cvmx_lmcx_fadr_cn30xx cn50xx;
- struct cvmx_lmcx_fadr_cn30xx cn52xx;
- struct cvmx_lmcx_fadr_cn30xx cn52xxp1;
- struct cvmx_lmcx_fadr_cn30xx cn56xx;
- struct cvmx_lmcx_fadr_cn30xx cn56xxp1;
- struct cvmx_lmcx_fadr_cn30xx cn58xx;
- struct cvmx_lmcx_fadr_cn30xx cn58xxp1;
struct cvmx_lmcx_fadr_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_36_63:28;
@@ -1918,12 +1708,6 @@ union cvmx_lmcx_fadr {
uint64_t reserved_36_63:28;
#endif
} cn61xx;
- struct cvmx_lmcx_fadr_cn61xx cn63xx;
- struct cvmx_lmcx_fadr_cn61xx cn63xxp1;
- struct cvmx_lmcx_fadr_cn61xx cn66xx;
- struct cvmx_lmcx_fadr_cn61xx cn68xx;
- struct cvmx_lmcx_fadr_cn61xx cn68xxp1;
- struct cvmx_lmcx_fadr_cn61xx cnf71xx;
};
union cvmx_lmcx_ifb_cnt {
@@ -1935,13 +1719,6 @@ union cvmx_lmcx_ifb_cnt {
uint64_t ifbcnt:64;
#endif
} s;
- struct cvmx_lmcx_ifb_cnt_s cn61xx;
- struct cvmx_lmcx_ifb_cnt_s cn63xx;
- struct cvmx_lmcx_ifb_cnt_s cn63xxp1;
- struct cvmx_lmcx_ifb_cnt_s cn66xx;
- struct cvmx_lmcx_ifb_cnt_s cn68xx;
- struct cvmx_lmcx_ifb_cnt_s cn68xxp1;
- struct cvmx_lmcx_ifb_cnt_s cnf71xx;
};
union cvmx_lmcx_ifb_cnt_hi {
@@ -1955,17 +1732,6 @@ union cvmx_lmcx_ifb_cnt_hi {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_ifb_cnt_hi_s cn30xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn31xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn38xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn38xxp2;
- struct cvmx_lmcx_ifb_cnt_hi_s cn50xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn52xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn52xxp1;
- struct cvmx_lmcx_ifb_cnt_hi_s cn56xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn56xxp1;
- struct cvmx_lmcx_ifb_cnt_hi_s cn58xx;
- struct cvmx_lmcx_ifb_cnt_hi_s cn58xxp1;
};
union cvmx_lmcx_ifb_cnt_lo {
@@ -1979,17 +1745,6 @@ union cvmx_lmcx_ifb_cnt_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_ifb_cnt_lo_s cn30xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn31xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn38xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn38xxp2;
- struct cvmx_lmcx_ifb_cnt_lo_s cn50xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn52xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn52xxp1;
- struct cvmx_lmcx_ifb_cnt_lo_s cn56xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn56xxp1;
- struct cvmx_lmcx_ifb_cnt_lo_s cn58xx;
- struct cvmx_lmcx_ifb_cnt_lo_s cn58xxp1;
};
union cvmx_lmcx_int {
@@ -2007,13 +1762,6 @@ union cvmx_lmcx_int {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_lmcx_int_s cn61xx;
- struct cvmx_lmcx_int_s cn63xx;
- struct cvmx_lmcx_int_s cn63xxp1;
- struct cvmx_lmcx_int_s cn66xx;
- struct cvmx_lmcx_int_s cn68xx;
- struct cvmx_lmcx_int_s cn68xxp1;
- struct cvmx_lmcx_int_s cnf71xx;
};
union cvmx_lmcx_int_en {
@@ -2031,13 +1779,6 @@ union cvmx_lmcx_int_en {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_lmcx_int_en_s cn61xx;
- struct cvmx_lmcx_int_en_s cn63xx;
- struct cvmx_lmcx_int_en_s cn63xxp1;
- struct cvmx_lmcx_int_en_s cn66xx;
- struct cvmx_lmcx_int_en_s cn68xx;
- struct cvmx_lmcx_int_en_s cn68xxp1;
- struct cvmx_lmcx_int_en_s cnf71xx;
};
union cvmx_lmcx_mem_cfg0 {
@@ -2075,17 +1816,6 @@ union cvmx_lmcx_mem_cfg0 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_mem_cfg0_s cn30xx;
- struct cvmx_lmcx_mem_cfg0_s cn31xx;
- struct cvmx_lmcx_mem_cfg0_s cn38xx;
- struct cvmx_lmcx_mem_cfg0_s cn38xxp2;
- struct cvmx_lmcx_mem_cfg0_s cn50xx;
- struct cvmx_lmcx_mem_cfg0_s cn52xx;
- struct cvmx_lmcx_mem_cfg0_s cn52xxp1;
- struct cvmx_lmcx_mem_cfg0_s cn56xx;
- struct cvmx_lmcx_mem_cfg0_s cn56xxp1;
- struct cvmx_lmcx_mem_cfg0_s cn58xx;
- struct cvmx_lmcx_mem_cfg0_s cn58xxp1;
};
union cvmx_lmcx_mem_cfg1 {
@@ -2115,8 +1845,6 @@ union cvmx_lmcx_mem_cfg1 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_mem_cfg1_s cn30xx;
- struct cvmx_lmcx_mem_cfg1_s cn31xx;
struct cvmx_lmcx_mem_cfg1_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
@@ -2140,14 +1868,6 @@ union cvmx_lmcx_mem_cfg1 {
uint64_t reserved_31_63:33;
#endif
} cn38xx;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn38xxp2;
- struct cvmx_lmcx_mem_cfg1_s cn50xx;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn52xx;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn52xxp1;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn56xx;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn56xxp1;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn58xx;
- struct cvmx_lmcx_mem_cfg1_cn38xx cn58xxp1;
};
union cvmx_lmcx_modereg_params0 {
@@ -2189,13 +1909,6 @@ union cvmx_lmcx_modereg_params0 {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_lmcx_modereg_params0_s cn61xx;
- struct cvmx_lmcx_modereg_params0_s cn63xx;
- struct cvmx_lmcx_modereg_params0_s cn63xxp1;
- struct cvmx_lmcx_modereg_params0_s cn66xx;
- struct cvmx_lmcx_modereg_params0_s cn68xx;
- struct cvmx_lmcx_modereg_params0_s cn68xxp1;
- struct cvmx_lmcx_modereg_params0_s cnf71xx;
};
union cvmx_lmcx_modereg_params1 {
@@ -2255,13 +1968,6 @@ union cvmx_lmcx_modereg_params1 {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_lmcx_modereg_params1_s cn61xx;
- struct cvmx_lmcx_modereg_params1_s cn63xx;
- struct cvmx_lmcx_modereg_params1_s cn63xxp1;
- struct cvmx_lmcx_modereg_params1_s cn66xx;
- struct cvmx_lmcx_modereg_params1_s cn68xx;
- struct cvmx_lmcx_modereg_params1_s cn68xxp1;
- struct cvmx_lmcx_modereg_params1_s cnf71xx;
};
union cvmx_lmcx_nxm {
@@ -2300,15 +2006,6 @@ union cvmx_lmcx_nxm {
uint64_t reserved_8_63:56;
#endif
} cn52xx;
- struct cvmx_lmcx_nxm_cn52xx cn56xx;
- struct cvmx_lmcx_nxm_cn52xx cn58xx;
- struct cvmx_lmcx_nxm_s cn61xx;
- struct cvmx_lmcx_nxm_s cn63xx;
- struct cvmx_lmcx_nxm_s cn63xxp1;
- struct cvmx_lmcx_nxm_s cn66xx;
- struct cvmx_lmcx_nxm_s cn68xx;
- struct cvmx_lmcx_nxm_s cn68xxp1;
- struct cvmx_lmcx_nxm_s cnf71xx;
};
union cvmx_lmcx_ops_cnt {
@@ -2320,13 +2017,6 @@ union cvmx_lmcx_ops_cnt {
uint64_t opscnt:64;
#endif
} s;
- struct cvmx_lmcx_ops_cnt_s cn61xx;
- struct cvmx_lmcx_ops_cnt_s cn63xx;
- struct cvmx_lmcx_ops_cnt_s cn63xxp1;
- struct cvmx_lmcx_ops_cnt_s cn66xx;
- struct cvmx_lmcx_ops_cnt_s cn68xx;
- struct cvmx_lmcx_ops_cnt_s cn68xxp1;
- struct cvmx_lmcx_ops_cnt_s cnf71xx;
};
union cvmx_lmcx_ops_cnt_hi {
@@ -2340,17 +2030,6 @@ union cvmx_lmcx_ops_cnt_hi {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_ops_cnt_hi_s cn30xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn31xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn38xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn38xxp2;
- struct cvmx_lmcx_ops_cnt_hi_s cn50xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn52xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn52xxp1;
- struct cvmx_lmcx_ops_cnt_hi_s cn56xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn56xxp1;
- struct cvmx_lmcx_ops_cnt_hi_s cn58xx;
- struct cvmx_lmcx_ops_cnt_hi_s cn58xxp1;
};
union cvmx_lmcx_ops_cnt_lo {
@@ -2364,17 +2043,6 @@ union cvmx_lmcx_ops_cnt_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_ops_cnt_lo_s cn30xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn31xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn38xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn38xxp2;
- struct cvmx_lmcx_ops_cnt_lo_s cn50xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn52xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn52xxp1;
- struct cvmx_lmcx_ops_cnt_lo_s cn56xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn56xxp1;
- struct cvmx_lmcx_ops_cnt_lo_s cn58xx;
- struct cvmx_lmcx_ops_cnt_lo_s cn58xxp1;
};
union cvmx_lmcx_phy_ctl {
@@ -2404,8 +2072,6 @@ union cvmx_lmcx_phy_ctl {
uint64_t reserved_15_63:49;
#endif
} s;
- struct cvmx_lmcx_phy_ctl_s cn61xx;
- struct cvmx_lmcx_phy_ctl_s cn63xx;
struct cvmx_lmcx_phy_ctl_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
@@ -2429,10 +2095,6 @@ union cvmx_lmcx_phy_ctl {
uint64_t reserved_14_63:50;
#endif
} cn63xxp1;
- struct cvmx_lmcx_phy_ctl_s cn66xx;
- struct cvmx_lmcx_phy_ctl_s cn68xx;
- struct cvmx_lmcx_phy_ctl_s cn68xxp1;
- struct cvmx_lmcx_phy_ctl_s cnf71xx;
};
union cvmx_lmcx_pll_bwctl {
@@ -2448,10 +2110,6 @@ union cvmx_lmcx_pll_bwctl {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_lmcx_pll_bwctl_s cn30xx;
- struct cvmx_lmcx_pll_bwctl_s cn31xx;
- struct cvmx_lmcx_pll_bwctl_s cn38xx;
- struct cvmx_lmcx_pll_bwctl_s cn38xxp2;
};
union cvmx_lmcx_pll_ctl {
@@ -2520,9 +2178,6 @@ union cvmx_lmcx_pll_ctl {
uint64_t reserved_29_63:35;
#endif
} cn50xx;
- struct cvmx_lmcx_pll_ctl_s cn52xx;
- struct cvmx_lmcx_pll_ctl_s cn52xxp1;
- struct cvmx_lmcx_pll_ctl_cn50xx cn56xx;
struct cvmx_lmcx_pll_ctl_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -2552,8 +2207,6 @@ union cvmx_lmcx_pll_ctl {
uint64_t reserved_28_63:36;
#endif
} cn56xxp1;
- struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xx;
- struct cvmx_lmcx_pll_ctl_cn56xxp1 cn58xxp1;
};
union cvmx_lmcx_pll_status {
@@ -2575,12 +2228,6 @@ union cvmx_lmcx_pll_status {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_pll_status_s cn50xx;
- struct cvmx_lmcx_pll_status_s cn52xx;
- struct cvmx_lmcx_pll_status_s cn52xxp1;
- struct cvmx_lmcx_pll_status_s cn56xx;
- struct cvmx_lmcx_pll_status_s cn56xxp1;
- struct cvmx_lmcx_pll_status_s cn58xx;
struct cvmx_lmcx_pll_status_cn58xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -2615,10 +2262,6 @@ union cvmx_lmcx_read_level_ctl {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_lmcx_read_level_ctl_s cn52xx;
- struct cvmx_lmcx_read_level_ctl_s cn52xxp1;
- struct cvmx_lmcx_read_level_ctl_s cn56xx;
- struct cvmx_lmcx_read_level_ctl_s cn56xxp1;
};
union cvmx_lmcx_read_level_dbg {
@@ -2636,10 +2279,6 @@ union cvmx_lmcx_read_level_dbg {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_read_level_dbg_s cn52xx;
- struct cvmx_lmcx_read_level_dbg_s cn52xxp1;
- struct cvmx_lmcx_read_level_dbg_s cn56xx;
- struct cvmx_lmcx_read_level_dbg_s cn56xxp1;
};
union cvmx_lmcx_read_level_rankx {
@@ -2671,10 +2310,6 @@ union cvmx_lmcx_read_level_rankx {
uint64_t reserved_38_63:26;
#endif
} s;
- struct cvmx_lmcx_read_level_rankx_s cn52xx;
- struct cvmx_lmcx_read_level_rankx_s cn52xxp1;
- struct cvmx_lmcx_read_level_rankx_s cn56xx;
- struct cvmx_lmcx_read_level_rankx_s cn56xxp1;
};
union cvmx_lmcx_reset_ctl {
@@ -2694,13 +2329,6 @@ union cvmx_lmcx_reset_ctl {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_lmcx_reset_ctl_s cn61xx;
- struct cvmx_lmcx_reset_ctl_s cn63xx;
- struct cvmx_lmcx_reset_ctl_s cn63xxp1;
- struct cvmx_lmcx_reset_ctl_s cn66xx;
- struct cvmx_lmcx_reset_ctl_s cn68xx;
- struct cvmx_lmcx_reset_ctl_s cn68xxp1;
- struct cvmx_lmcx_reset_ctl_s cnf71xx;
};
union cvmx_lmcx_rlevel_ctl {
@@ -2730,8 +2358,6 @@ union cvmx_lmcx_rlevel_ctl {
uint64_t reserved_22_63:42;
#endif
} s;
- struct cvmx_lmcx_rlevel_ctl_s cn61xx;
- struct cvmx_lmcx_rlevel_ctl_s cn63xx;
struct cvmx_lmcx_rlevel_ctl_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -2745,10 +2371,6 @@ union cvmx_lmcx_rlevel_ctl {
uint64_t reserved_9_63:55;
#endif
} cn63xxp1;
- struct cvmx_lmcx_rlevel_ctl_s cn66xx;
- struct cvmx_lmcx_rlevel_ctl_s cn68xx;
- struct cvmx_lmcx_rlevel_ctl_s cn68xxp1;
- struct cvmx_lmcx_rlevel_ctl_s cnf71xx;
};
union cvmx_lmcx_rlevel_dbg {
@@ -2760,13 +2382,6 @@ union cvmx_lmcx_rlevel_dbg {
uint64_t bitmask:64;
#endif
} s;
- struct cvmx_lmcx_rlevel_dbg_s cn61xx;
- struct cvmx_lmcx_rlevel_dbg_s cn63xx;
- struct cvmx_lmcx_rlevel_dbg_s cn63xxp1;
- struct cvmx_lmcx_rlevel_dbg_s cn66xx;
- struct cvmx_lmcx_rlevel_dbg_s cn68xx;
- struct cvmx_lmcx_rlevel_dbg_s cn68xxp1;
- struct cvmx_lmcx_rlevel_dbg_s cnf71xx;
};
union cvmx_lmcx_rlevel_rankx {
@@ -2798,13 +2413,6 @@ union cvmx_lmcx_rlevel_rankx {
uint64_t reserved_56_63:8;
#endif
} s;
- struct cvmx_lmcx_rlevel_rankx_s cn61xx;
- struct cvmx_lmcx_rlevel_rankx_s cn63xx;
- struct cvmx_lmcx_rlevel_rankx_s cn63xxp1;
- struct cvmx_lmcx_rlevel_rankx_s cn66xx;
- struct cvmx_lmcx_rlevel_rankx_s cn68xx;
- struct cvmx_lmcx_rlevel_rankx_s cn68xxp1;
- struct cvmx_lmcx_rlevel_rankx_s cnf71xx;
};
union cvmx_lmcx_rodt_comp_ctl {
@@ -2826,13 +2434,6 @@ union cvmx_lmcx_rodt_comp_ctl {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_lmcx_rodt_comp_ctl_s cn50xx;
- struct cvmx_lmcx_rodt_comp_ctl_s cn52xx;
- struct cvmx_lmcx_rodt_comp_ctl_s cn52xxp1;
- struct cvmx_lmcx_rodt_comp_ctl_s cn56xx;
- struct cvmx_lmcx_rodt_comp_ctl_s cn56xxp1;
- struct cvmx_lmcx_rodt_comp_ctl_s cn58xx;
- struct cvmx_lmcx_rodt_comp_ctl_s cn58xxp1;
};
union cvmx_lmcx_rodt_ctl {
@@ -2860,17 +2461,6 @@ union cvmx_lmcx_rodt_ctl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_rodt_ctl_s cn30xx;
- struct cvmx_lmcx_rodt_ctl_s cn31xx;
- struct cvmx_lmcx_rodt_ctl_s cn38xx;
- struct cvmx_lmcx_rodt_ctl_s cn38xxp2;
- struct cvmx_lmcx_rodt_ctl_s cn50xx;
- struct cvmx_lmcx_rodt_ctl_s cn52xx;
- struct cvmx_lmcx_rodt_ctl_s cn52xxp1;
- struct cvmx_lmcx_rodt_ctl_s cn56xx;
- struct cvmx_lmcx_rodt_ctl_s cn56xxp1;
- struct cvmx_lmcx_rodt_ctl_s cn58xx;
- struct cvmx_lmcx_rodt_ctl_s cn58xxp1;
};
union cvmx_lmcx_rodt_mask {
@@ -2896,13 +2486,6 @@ union cvmx_lmcx_rodt_mask {
uint64_t rodt_d3_r1:8;
#endif
} s;
- struct cvmx_lmcx_rodt_mask_s cn61xx;
- struct cvmx_lmcx_rodt_mask_s cn63xx;
- struct cvmx_lmcx_rodt_mask_s cn63xxp1;
- struct cvmx_lmcx_rodt_mask_s cn66xx;
- struct cvmx_lmcx_rodt_mask_s cn68xx;
- struct cvmx_lmcx_rodt_mask_s cn68xxp1;
- struct cvmx_lmcx_rodt_mask_s cnf71xx;
};
union cvmx_lmcx_scramble_cfg0 {
@@ -2914,9 +2497,6 @@ union cvmx_lmcx_scramble_cfg0 {
uint64_t key:64;
#endif
} s;
- struct cvmx_lmcx_scramble_cfg0_s cn61xx;
- struct cvmx_lmcx_scramble_cfg0_s cn66xx;
- struct cvmx_lmcx_scramble_cfg0_s cnf71xx;
};
union cvmx_lmcx_scramble_cfg1 {
@@ -2928,9 +2508,6 @@ union cvmx_lmcx_scramble_cfg1 {
uint64_t key:64;
#endif
} s;
- struct cvmx_lmcx_scramble_cfg1_s cn61xx;
- struct cvmx_lmcx_scramble_cfg1_s cn66xx;
- struct cvmx_lmcx_scramble_cfg1_s cnf71xx;
};
union cvmx_lmcx_scrambled_fadr {
@@ -2952,9 +2529,6 @@ union cvmx_lmcx_scrambled_fadr {
uint64_t reserved_36_63:28;
#endif
} s;
- struct cvmx_lmcx_scrambled_fadr_s cn61xx;
- struct cvmx_lmcx_scrambled_fadr_s cn66xx;
- struct cvmx_lmcx_scrambled_fadr_s cnf71xx;
};
union cvmx_lmcx_slot_ctl0 {
@@ -2974,13 +2548,6 @@ union cvmx_lmcx_slot_ctl0 {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_lmcx_slot_ctl0_s cn61xx;
- struct cvmx_lmcx_slot_ctl0_s cn63xx;
- struct cvmx_lmcx_slot_ctl0_s cn63xxp1;
- struct cvmx_lmcx_slot_ctl0_s cn66xx;
- struct cvmx_lmcx_slot_ctl0_s cn68xx;
- struct cvmx_lmcx_slot_ctl0_s cn68xxp1;
- struct cvmx_lmcx_slot_ctl0_s cnf71xx;
};
union cvmx_lmcx_slot_ctl1 {
@@ -3000,13 +2567,6 @@ union cvmx_lmcx_slot_ctl1 {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_lmcx_slot_ctl1_s cn61xx;
- struct cvmx_lmcx_slot_ctl1_s cn63xx;
- struct cvmx_lmcx_slot_ctl1_s cn63xxp1;
- struct cvmx_lmcx_slot_ctl1_s cn66xx;
- struct cvmx_lmcx_slot_ctl1_s cn68xx;
- struct cvmx_lmcx_slot_ctl1_s cn68xxp1;
- struct cvmx_lmcx_slot_ctl1_s cnf71xx;
};
union cvmx_lmcx_slot_ctl2 {
@@ -3026,13 +2586,6 @@ union cvmx_lmcx_slot_ctl2 {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_lmcx_slot_ctl2_s cn61xx;
- struct cvmx_lmcx_slot_ctl2_s cn63xx;
- struct cvmx_lmcx_slot_ctl2_s cn63xxp1;
- struct cvmx_lmcx_slot_ctl2_s cn66xx;
- struct cvmx_lmcx_slot_ctl2_s cn68xx;
- struct cvmx_lmcx_slot_ctl2_s cn68xxp1;
- struct cvmx_lmcx_slot_ctl2_s cnf71xx;
};
union cvmx_lmcx_timing_params0 {
@@ -3095,7 +2648,6 @@ union cvmx_lmcx_timing_params0 {
uint64_t reserved_47_63:17;
#endif
} cn61xx;
- struct cvmx_lmcx_timing_params0_cn61xx cn63xx;
struct cvmx_lmcx_timing_params0_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_46_63:18;
@@ -3123,10 +2675,6 @@ union cvmx_lmcx_timing_params0 {
uint64_t reserved_46_63:18;
#endif
} cn63xxp1;
- struct cvmx_lmcx_timing_params0_cn61xx cn66xx;
- struct cvmx_lmcx_timing_params0_cn61xx cn68xx;
- struct cvmx_lmcx_timing_params0_cn61xx cn68xxp1;
- struct cvmx_lmcx_timing_params0_cn61xx cnf71xx;
};
union cvmx_lmcx_timing_params1 {
@@ -3162,8 +2710,6 @@ union cvmx_lmcx_timing_params1 {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_lmcx_timing_params1_s cn61xx;
- struct cvmx_lmcx_timing_params1_s cn63xx;
struct cvmx_lmcx_timing_params1_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_46_63:18;
@@ -3193,10 +2739,6 @@ union cvmx_lmcx_timing_params1 {
uint64_t reserved_46_63:18;
#endif
} cn63xxp1;
- struct cvmx_lmcx_timing_params1_s cn66xx;
- struct cvmx_lmcx_timing_params1_s cn68xx;
- struct cvmx_lmcx_timing_params1_s cn68xxp1;
- struct cvmx_lmcx_timing_params1_s cnf71xx;
};
union cvmx_lmcx_tro_ctl {
@@ -3212,13 +2754,6 @@ union cvmx_lmcx_tro_ctl {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_lmcx_tro_ctl_s cn61xx;
- struct cvmx_lmcx_tro_ctl_s cn63xx;
- struct cvmx_lmcx_tro_ctl_s cn63xxp1;
- struct cvmx_lmcx_tro_ctl_s cn66xx;
- struct cvmx_lmcx_tro_ctl_s cn68xx;
- struct cvmx_lmcx_tro_ctl_s cn68xxp1;
- struct cvmx_lmcx_tro_ctl_s cnf71xx;
};
union cvmx_lmcx_tro_stat {
@@ -3232,13 +2767,6 @@ union cvmx_lmcx_tro_stat {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_tro_stat_s cn61xx;
- struct cvmx_lmcx_tro_stat_s cn63xx;
- struct cvmx_lmcx_tro_stat_s cn63xxp1;
- struct cvmx_lmcx_tro_stat_s cn66xx;
- struct cvmx_lmcx_tro_stat_s cn68xx;
- struct cvmx_lmcx_tro_stat_s cn68xxp1;
- struct cvmx_lmcx_tro_stat_s cnf71xx;
};
union cvmx_lmcx_wlevel_ctl {
@@ -3260,8 +2788,6 @@ union cvmx_lmcx_wlevel_ctl {
uint64_t reserved_22_63:42;
#endif
} s;
- struct cvmx_lmcx_wlevel_ctl_s cn61xx;
- struct cvmx_lmcx_wlevel_ctl_s cn63xx;
struct cvmx_lmcx_wlevel_ctl_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -3273,10 +2799,6 @@ union cvmx_lmcx_wlevel_ctl {
uint64_t reserved_10_63:54;
#endif
} cn63xxp1;
- struct cvmx_lmcx_wlevel_ctl_s cn66xx;
- struct cvmx_lmcx_wlevel_ctl_s cn68xx;
- struct cvmx_lmcx_wlevel_ctl_s cn68xxp1;
- struct cvmx_lmcx_wlevel_ctl_s cnf71xx;
};
union cvmx_lmcx_wlevel_dbg {
@@ -3292,13 +2814,6 @@ union cvmx_lmcx_wlevel_dbg {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_lmcx_wlevel_dbg_s cn61xx;
- struct cvmx_lmcx_wlevel_dbg_s cn63xx;
- struct cvmx_lmcx_wlevel_dbg_s cn63xxp1;
- struct cvmx_lmcx_wlevel_dbg_s cn66xx;
- struct cvmx_lmcx_wlevel_dbg_s cn68xx;
- struct cvmx_lmcx_wlevel_dbg_s cn68xxp1;
- struct cvmx_lmcx_wlevel_dbg_s cnf71xx;
};
union cvmx_lmcx_wlevel_rankx {
@@ -3330,13 +2845,6 @@ union cvmx_lmcx_wlevel_rankx {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_lmcx_wlevel_rankx_s cn61xx;
- struct cvmx_lmcx_wlevel_rankx_s cn63xx;
- struct cvmx_lmcx_wlevel_rankx_s cn63xxp1;
- struct cvmx_lmcx_wlevel_rankx_s cn66xx;
- struct cvmx_lmcx_wlevel_rankx_s cn68xx;
- struct cvmx_lmcx_wlevel_rankx_s cn68xxp1;
- struct cvmx_lmcx_wlevel_rankx_s cnf71xx;
};
union cvmx_lmcx_wodt_ctl0 {
@@ -3363,7 +2871,6 @@ union cvmx_lmcx_wodt_ctl0 {
uint64_t reserved_32_63:32;
#endif
} cn30xx;
- struct cvmx_lmcx_wodt_ctl0_cn30xx cn31xx;
struct cvmx_lmcx_wodt_ctl0_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -3387,14 +2894,6 @@ union cvmx_lmcx_wodt_ctl0 {
uint64_t reserved_32_63:32;
#endif
} cn38xx;
- struct cvmx_lmcx_wodt_ctl0_cn38xx cn38xxp2;
- struct cvmx_lmcx_wodt_ctl0_cn38xx cn50xx;
- struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xx;
- struct cvmx_lmcx_wodt_ctl0_cn30xx cn52xxp1;
- struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xx;
- struct cvmx_lmcx_wodt_ctl0_cn30xx cn56xxp1;
- struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xx;
- struct cvmx_lmcx_wodt_ctl0_cn38xx cn58xxp1;
};
union cvmx_lmcx_wodt_ctl1 {
@@ -3414,12 +2913,6 @@ union cvmx_lmcx_wodt_ctl1 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_lmcx_wodt_ctl1_s cn30xx;
- struct cvmx_lmcx_wodt_ctl1_s cn31xx;
- struct cvmx_lmcx_wodt_ctl1_s cn52xx;
- struct cvmx_lmcx_wodt_ctl1_s cn52xxp1;
- struct cvmx_lmcx_wodt_ctl1_s cn56xx;
- struct cvmx_lmcx_wodt_ctl1_s cn56xxp1;
};
union cvmx_lmcx_wodt_mask {
@@ -3445,13 +2938,6 @@ union cvmx_lmcx_wodt_mask {
uint64_t wodt_d3_r1:8;
#endif
} s;
- struct cvmx_lmcx_wodt_mask_s cn61xx;
- struct cvmx_lmcx_wodt_mask_s cn63xx;
- struct cvmx_lmcx_wodt_mask_s cn63xxp1;
- struct cvmx_lmcx_wodt_mask_s cn66xx;
- struct cvmx_lmcx_wodt_mask_s cn68xx;
- struct cvmx_lmcx_wodt_mask_s cn68xxp1;
- struct cvmx_lmcx_wodt_mask_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
index 5196c04eee41..4ad95d040bb1 100644
--- a/arch/mips/include/asm/octeon/cvmx-mio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -188,7 +188,6 @@ union cvmx_mio_boot_bist_stat {
uint64_t reserved_4_63:60;
#endif
} cn30xx;
- struct cvmx_mio_boot_bist_stat_cn30xx cn31xx;
struct cvmx_mio_boot_bist_stat_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
@@ -202,7 +201,6 @@ union cvmx_mio_boot_bist_stat {
uint64_t reserved_3_63:61;
#endif
} cn38xx;
- struct cvmx_mio_boot_bist_stat_cn38xx cn38xxp2;
struct cvmx_mio_boot_bist_stat_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_6_63:58;
@@ -254,10 +252,6 @@ union cvmx_mio_boot_bist_stat {
uint64_t reserved_4_63:60;
#endif
} cn52xxp1;
- struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xx;
- struct cvmx_mio_boot_bist_stat_cn52xxp1 cn56xxp1;
- struct cvmx_mio_boot_bist_stat_cn38xx cn58xx;
- struct cvmx_mio_boot_bist_stat_cn38xx cn58xxp1;
struct cvmx_mio_boot_bist_stat_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -276,7 +270,6 @@ union cvmx_mio_boot_bist_stat {
uint64_t reserved_9_63:55;
#endif
} cn63xx;
- struct cvmx_mio_boot_bist_stat_cn63xx cn63xxp1;
struct cvmx_mio_boot_bist_stat_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -286,9 +279,6 @@ union cvmx_mio_boot_bist_stat {
uint64_t reserved_10_63:54;
#endif
} cn66xx;
- struct cvmx_mio_boot_bist_stat_cn66xx cn68xx;
- struct cvmx_mio_boot_bist_stat_cn66xx cn68xxp1;
- struct cvmx_mio_boot_bist_stat_cn61xx cnf71xx;
};
union cvmx_mio_boot_comp {
@@ -311,10 +301,6 @@ union cvmx_mio_boot_comp {
uint64_t reserved_10_63:54;
#endif
} cn50xx;
- struct cvmx_mio_boot_comp_cn50xx cn52xx;
- struct cvmx_mio_boot_comp_cn50xx cn52xxp1;
- struct cvmx_mio_boot_comp_cn50xx cn56xx;
- struct cvmx_mio_boot_comp_cn50xx cn56xxp1;
struct cvmx_mio_boot_comp_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -326,12 +312,6 @@ union cvmx_mio_boot_comp {
uint64_t reserved_12_63:52;
#endif
} cn61xx;
- struct cvmx_mio_boot_comp_cn61xx cn63xx;
- struct cvmx_mio_boot_comp_cn61xx cn63xxp1;
- struct cvmx_mio_boot_comp_cn61xx cn66xx;
- struct cvmx_mio_boot_comp_cn61xx cn68xx;
- struct cvmx_mio_boot_comp_cn61xx cn68xxp1;
- struct cvmx_mio_boot_comp_cn61xx cnf71xx;
};
union cvmx_mio_boot_dma_cfgx {
@@ -361,17 +341,6 @@ union cvmx_mio_boot_dma_cfgx {
uint64_t en:1;
#endif
} s;
- struct cvmx_mio_boot_dma_cfgx_s cn52xx;
- struct cvmx_mio_boot_dma_cfgx_s cn52xxp1;
- struct cvmx_mio_boot_dma_cfgx_s cn56xx;
- struct cvmx_mio_boot_dma_cfgx_s cn56xxp1;
- struct cvmx_mio_boot_dma_cfgx_s cn61xx;
- struct cvmx_mio_boot_dma_cfgx_s cn63xx;
- struct cvmx_mio_boot_dma_cfgx_s cn63xxp1;
- struct cvmx_mio_boot_dma_cfgx_s cn66xx;
- struct cvmx_mio_boot_dma_cfgx_s cn68xx;
- struct cvmx_mio_boot_dma_cfgx_s cn68xxp1;
- struct cvmx_mio_boot_dma_cfgx_s cnf71xx;
};
union cvmx_mio_boot_dma_intx {
@@ -387,17 +356,6 @@ union cvmx_mio_boot_dma_intx {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_boot_dma_intx_s cn52xx;
- struct cvmx_mio_boot_dma_intx_s cn52xxp1;
- struct cvmx_mio_boot_dma_intx_s cn56xx;
- struct cvmx_mio_boot_dma_intx_s cn56xxp1;
- struct cvmx_mio_boot_dma_intx_s cn61xx;
- struct cvmx_mio_boot_dma_intx_s cn63xx;
- struct cvmx_mio_boot_dma_intx_s cn63xxp1;
- struct cvmx_mio_boot_dma_intx_s cn66xx;
- struct cvmx_mio_boot_dma_intx_s cn68xx;
- struct cvmx_mio_boot_dma_intx_s cn68xxp1;
- struct cvmx_mio_boot_dma_intx_s cnf71xx;
};
union cvmx_mio_boot_dma_int_enx {
@@ -413,17 +371,6 @@ union cvmx_mio_boot_dma_int_enx {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_boot_dma_int_enx_s cn52xx;
- struct cvmx_mio_boot_dma_int_enx_s cn52xxp1;
- struct cvmx_mio_boot_dma_int_enx_s cn56xx;
- struct cvmx_mio_boot_dma_int_enx_s cn56xxp1;
- struct cvmx_mio_boot_dma_int_enx_s cn61xx;
- struct cvmx_mio_boot_dma_int_enx_s cn63xx;
- struct cvmx_mio_boot_dma_int_enx_s cn63xxp1;
- struct cvmx_mio_boot_dma_int_enx_s cn66xx;
- struct cvmx_mio_boot_dma_int_enx_s cn68xx;
- struct cvmx_mio_boot_dma_int_enx_s cn68xxp1;
- struct cvmx_mio_boot_dma_int_enx_s cnf71xx;
};
union cvmx_mio_boot_dma_timx {
@@ -463,17 +410,6 @@ union cvmx_mio_boot_dma_timx {
uint64_t dmack_pi:1;
#endif
} s;
- struct cvmx_mio_boot_dma_timx_s cn52xx;
- struct cvmx_mio_boot_dma_timx_s cn52xxp1;
- struct cvmx_mio_boot_dma_timx_s cn56xx;
- struct cvmx_mio_boot_dma_timx_s cn56xxp1;
- struct cvmx_mio_boot_dma_timx_s cn61xx;
- struct cvmx_mio_boot_dma_timx_s cn63xx;
- struct cvmx_mio_boot_dma_timx_s cn63xxp1;
- struct cvmx_mio_boot_dma_timx_s cn66xx;
- struct cvmx_mio_boot_dma_timx_s cn68xx;
- struct cvmx_mio_boot_dma_timx_s cn68xxp1;
- struct cvmx_mio_boot_dma_timx_s cnf71xx;
};
union cvmx_mio_boot_err {
@@ -489,24 +425,6 @@ union cvmx_mio_boot_err {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_boot_err_s cn30xx;
- struct cvmx_mio_boot_err_s cn31xx;
- struct cvmx_mio_boot_err_s cn38xx;
- struct cvmx_mio_boot_err_s cn38xxp2;
- struct cvmx_mio_boot_err_s cn50xx;
- struct cvmx_mio_boot_err_s cn52xx;
- struct cvmx_mio_boot_err_s cn52xxp1;
- struct cvmx_mio_boot_err_s cn56xx;
- struct cvmx_mio_boot_err_s cn56xxp1;
- struct cvmx_mio_boot_err_s cn58xx;
- struct cvmx_mio_boot_err_s cn58xxp1;
- struct cvmx_mio_boot_err_s cn61xx;
- struct cvmx_mio_boot_err_s cn63xx;
- struct cvmx_mio_boot_err_s cn63xxp1;
- struct cvmx_mio_boot_err_s cn66xx;
- struct cvmx_mio_boot_err_s cn68xx;
- struct cvmx_mio_boot_err_s cn68xxp1;
- struct cvmx_mio_boot_err_s cnf71xx;
};
union cvmx_mio_boot_int {
@@ -522,24 +440,6 @@ union cvmx_mio_boot_int {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_boot_int_s cn30xx;
- struct cvmx_mio_boot_int_s cn31xx;
- struct cvmx_mio_boot_int_s cn38xx;
- struct cvmx_mio_boot_int_s cn38xxp2;
- struct cvmx_mio_boot_int_s cn50xx;
- struct cvmx_mio_boot_int_s cn52xx;
- struct cvmx_mio_boot_int_s cn52xxp1;
- struct cvmx_mio_boot_int_s cn56xx;
- struct cvmx_mio_boot_int_s cn56xxp1;
- struct cvmx_mio_boot_int_s cn58xx;
- struct cvmx_mio_boot_int_s cn58xxp1;
- struct cvmx_mio_boot_int_s cn61xx;
- struct cvmx_mio_boot_int_s cn63xx;
- struct cvmx_mio_boot_int_s cn63xxp1;
- struct cvmx_mio_boot_int_s cn66xx;
- struct cvmx_mio_boot_int_s cn68xx;
- struct cvmx_mio_boot_int_s cn68xxp1;
- struct cvmx_mio_boot_int_s cnf71xx;
};
union cvmx_mio_boot_loc_adr {
@@ -555,24 +455,6 @@ union cvmx_mio_boot_loc_adr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_boot_loc_adr_s cn30xx;
- struct cvmx_mio_boot_loc_adr_s cn31xx;
- struct cvmx_mio_boot_loc_adr_s cn38xx;
- struct cvmx_mio_boot_loc_adr_s cn38xxp2;
- struct cvmx_mio_boot_loc_adr_s cn50xx;
- struct cvmx_mio_boot_loc_adr_s cn52xx;
- struct cvmx_mio_boot_loc_adr_s cn52xxp1;
- struct cvmx_mio_boot_loc_adr_s cn56xx;
- struct cvmx_mio_boot_loc_adr_s cn56xxp1;
- struct cvmx_mio_boot_loc_adr_s cn58xx;
- struct cvmx_mio_boot_loc_adr_s cn58xxp1;
- struct cvmx_mio_boot_loc_adr_s cn61xx;
- struct cvmx_mio_boot_loc_adr_s cn63xx;
- struct cvmx_mio_boot_loc_adr_s cn63xxp1;
- struct cvmx_mio_boot_loc_adr_s cn66xx;
- struct cvmx_mio_boot_loc_adr_s cn68xx;
- struct cvmx_mio_boot_loc_adr_s cn68xxp1;
- struct cvmx_mio_boot_loc_adr_s cnf71xx;
};
union cvmx_mio_boot_loc_cfgx {
@@ -592,24 +474,6 @@ union cvmx_mio_boot_loc_cfgx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_boot_loc_cfgx_s cn30xx;
- struct cvmx_mio_boot_loc_cfgx_s cn31xx;
- struct cvmx_mio_boot_loc_cfgx_s cn38xx;
- struct cvmx_mio_boot_loc_cfgx_s cn38xxp2;
- struct cvmx_mio_boot_loc_cfgx_s cn50xx;
- struct cvmx_mio_boot_loc_cfgx_s cn52xx;
- struct cvmx_mio_boot_loc_cfgx_s cn52xxp1;
- struct cvmx_mio_boot_loc_cfgx_s cn56xx;
- struct cvmx_mio_boot_loc_cfgx_s cn56xxp1;
- struct cvmx_mio_boot_loc_cfgx_s cn58xx;
- struct cvmx_mio_boot_loc_cfgx_s cn58xxp1;
- struct cvmx_mio_boot_loc_cfgx_s cn61xx;
- struct cvmx_mio_boot_loc_cfgx_s cn63xx;
- struct cvmx_mio_boot_loc_cfgx_s cn63xxp1;
- struct cvmx_mio_boot_loc_cfgx_s cn66xx;
- struct cvmx_mio_boot_loc_cfgx_s cn68xx;
- struct cvmx_mio_boot_loc_cfgx_s cn68xxp1;
- struct cvmx_mio_boot_loc_cfgx_s cnf71xx;
};
union cvmx_mio_boot_loc_dat {
@@ -621,24 +485,6 @@ union cvmx_mio_boot_loc_dat {
uint64_t data:64;
#endif
} s;
- struct cvmx_mio_boot_loc_dat_s cn30xx;
- struct cvmx_mio_boot_loc_dat_s cn31xx;
- struct cvmx_mio_boot_loc_dat_s cn38xx;
- struct cvmx_mio_boot_loc_dat_s cn38xxp2;
- struct cvmx_mio_boot_loc_dat_s cn50xx;
- struct cvmx_mio_boot_loc_dat_s cn52xx;
- struct cvmx_mio_boot_loc_dat_s cn52xxp1;
- struct cvmx_mio_boot_loc_dat_s cn56xx;
- struct cvmx_mio_boot_loc_dat_s cn56xxp1;
- struct cvmx_mio_boot_loc_dat_s cn58xx;
- struct cvmx_mio_boot_loc_dat_s cn58xxp1;
- struct cvmx_mio_boot_loc_dat_s cn61xx;
- struct cvmx_mio_boot_loc_dat_s cn63xx;
- struct cvmx_mio_boot_loc_dat_s cn63xxp1;
- struct cvmx_mio_boot_loc_dat_s cn66xx;
- struct cvmx_mio_boot_loc_dat_s cn68xx;
- struct cvmx_mio_boot_loc_dat_s cn68xxp1;
- struct cvmx_mio_boot_loc_dat_s cnf71xx;
};
union cvmx_mio_boot_pin_defs {
@@ -737,12 +583,6 @@ union cvmx_mio_boot_pin_defs {
uint64_t reserved_32_63:32;
#endif
} cn61xx;
- struct cvmx_mio_boot_pin_defs_cn52xx cn63xx;
- struct cvmx_mio_boot_pin_defs_cn52xx cn63xxp1;
- struct cvmx_mio_boot_pin_defs_cn52xx cn66xx;
- struct cvmx_mio_boot_pin_defs_cn52xx cn68xx;
- struct cvmx_mio_boot_pin_defs_cn52xx cn68xxp1;
- struct cvmx_mio_boot_pin_defs_cn61xx cnf71xx;
};
union cvmx_mio_boot_reg_cfgx {
@@ -803,7 +643,6 @@ union cvmx_mio_boot_reg_cfgx {
uint64_t reserved_37_63:27;
#endif
} cn30xx;
- struct cvmx_mio_boot_reg_cfgx_cn30xx cn31xx;
struct cvmx_mio_boot_reg_cfgx_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -821,7 +660,6 @@ union cvmx_mio_boot_reg_cfgx {
uint64_t reserved_32_63:32;
#endif
} cn38xx;
- struct cvmx_mio_boot_reg_cfgx_cn38xx cn38xxp2;
struct cvmx_mio_boot_reg_cfgx_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_42_63:22;
@@ -851,19 +689,6 @@ union cvmx_mio_boot_reg_cfgx {
uint64_t reserved_42_63:22;
#endif
} cn50xx;
- struct cvmx_mio_boot_reg_cfgx_s cn52xx;
- struct cvmx_mio_boot_reg_cfgx_s cn52xxp1;
- struct cvmx_mio_boot_reg_cfgx_s cn56xx;
- struct cvmx_mio_boot_reg_cfgx_s cn56xxp1;
- struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xx;
- struct cvmx_mio_boot_reg_cfgx_cn30xx cn58xxp1;
- struct cvmx_mio_boot_reg_cfgx_s cn61xx;
- struct cvmx_mio_boot_reg_cfgx_s cn63xx;
- struct cvmx_mio_boot_reg_cfgx_s cn63xxp1;
- struct cvmx_mio_boot_reg_cfgx_s cn66xx;
- struct cvmx_mio_boot_reg_cfgx_s cn68xx;
- struct cvmx_mio_boot_reg_cfgx_s cn68xxp1;
- struct cvmx_mio_boot_reg_cfgx_s cnf71xx;
};
union cvmx_mio_boot_reg_timx {
@@ -899,8 +724,6 @@ union cvmx_mio_boot_reg_timx {
uint64_t pagem:1;
#endif
} s;
- struct cvmx_mio_boot_reg_timx_s cn30xx;
- struct cvmx_mio_boot_reg_timx_s cn31xx;
struct cvmx_mio_boot_reg_timx_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t pagem:1;
@@ -932,21 +755,6 @@ union cvmx_mio_boot_reg_timx {
uint64_t pagem:1;
#endif
} cn38xx;
- struct cvmx_mio_boot_reg_timx_cn38xx cn38xxp2;
- struct cvmx_mio_boot_reg_timx_s cn50xx;
- struct cvmx_mio_boot_reg_timx_s cn52xx;
- struct cvmx_mio_boot_reg_timx_s cn52xxp1;
- struct cvmx_mio_boot_reg_timx_s cn56xx;
- struct cvmx_mio_boot_reg_timx_s cn56xxp1;
- struct cvmx_mio_boot_reg_timx_s cn58xx;
- struct cvmx_mio_boot_reg_timx_s cn58xxp1;
- struct cvmx_mio_boot_reg_timx_s cn61xx;
- struct cvmx_mio_boot_reg_timx_s cn63xx;
- struct cvmx_mio_boot_reg_timx_s cn63xxp1;
- struct cvmx_mio_boot_reg_timx_s cn66xx;
- struct cvmx_mio_boot_reg_timx_s cn68xx;
- struct cvmx_mio_boot_reg_timx_s cn68xxp1;
- struct cvmx_mio_boot_reg_timx_s cnf71xx;
};
union cvmx_mio_boot_thr {
@@ -981,23 +789,6 @@ union cvmx_mio_boot_thr {
uint64_t reserved_14_63:50;
#endif
} cn30xx;
- struct cvmx_mio_boot_thr_cn30xx cn31xx;
- struct cvmx_mio_boot_thr_cn30xx cn38xx;
- struct cvmx_mio_boot_thr_cn30xx cn38xxp2;
- struct cvmx_mio_boot_thr_cn30xx cn50xx;
- struct cvmx_mio_boot_thr_s cn52xx;
- struct cvmx_mio_boot_thr_s cn52xxp1;
- struct cvmx_mio_boot_thr_s cn56xx;
- struct cvmx_mio_boot_thr_s cn56xxp1;
- struct cvmx_mio_boot_thr_cn30xx cn58xx;
- struct cvmx_mio_boot_thr_cn30xx cn58xxp1;
- struct cvmx_mio_boot_thr_s cn61xx;
- struct cvmx_mio_boot_thr_s cn63xx;
- struct cvmx_mio_boot_thr_s cn63xxp1;
- struct cvmx_mio_boot_thr_s cn66xx;
- struct cvmx_mio_boot_thr_s cn68xx;
- struct cvmx_mio_boot_thr_s cn68xxp1;
- struct cvmx_mio_boot_thr_s cnf71xx;
};
union cvmx_mio_emm_buf_dat {
@@ -1009,8 +800,6 @@ union cvmx_mio_emm_buf_dat {
uint64_t dat:64;
#endif
} s;
- struct cvmx_mio_emm_buf_dat_s cn61xx;
- struct cvmx_mio_emm_buf_dat_s cnf71xx;
};
union cvmx_mio_emm_buf_idx {
@@ -1030,8 +819,6 @@ union cvmx_mio_emm_buf_idx {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_mio_emm_buf_idx_s cn61xx;
- struct cvmx_mio_emm_buf_idx_s cnf71xx;
};
union cvmx_mio_emm_cfg {
@@ -1049,8 +836,6 @@ union cvmx_mio_emm_cfg {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_mio_emm_cfg_s cn61xx;
- struct cvmx_mio_emm_cfg_s cnf71xx;
};
union cvmx_mio_emm_cmd {
@@ -1082,8 +867,6 @@ union cvmx_mio_emm_cmd {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_mio_emm_cmd_s cn61xx;
- struct cvmx_mio_emm_cmd_s cnf71xx;
};
union cvmx_mio_emm_dma {
@@ -1115,8 +898,6 @@ union cvmx_mio_emm_dma {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_mio_emm_dma_s cn61xx;
- struct cvmx_mio_emm_dma_s cnf71xx;
};
union cvmx_mio_emm_int {
@@ -1142,8 +923,6 @@ union cvmx_mio_emm_int {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_emm_int_s cn61xx;
- struct cvmx_mio_emm_int_s cnf71xx;
};
union cvmx_mio_emm_int_en {
@@ -1169,8 +948,6 @@ union cvmx_mio_emm_int_en {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_emm_int_en_s cn61xx;
- struct cvmx_mio_emm_int_en_s cnf71xx;
};
union cvmx_mio_emm_modex {
@@ -1196,8 +973,6 @@ union cvmx_mio_emm_modex {
uint64_t reserved_49_63:15;
#endif
} s;
- struct cvmx_mio_emm_modex_s cn61xx;
- struct cvmx_mio_emm_modex_s cnf71xx;
};
union cvmx_mio_emm_rca {
@@ -1211,8 +986,6 @@ union cvmx_mio_emm_rca {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_mio_emm_rca_s cn61xx;
- struct cvmx_mio_emm_rca_s cnf71xx;
};
union cvmx_mio_emm_rsp_hi {
@@ -1224,8 +997,6 @@ union cvmx_mio_emm_rsp_hi {
uint64_t dat:64;
#endif
} s;
- struct cvmx_mio_emm_rsp_hi_s cn61xx;
- struct cvmx_mio_emm_rsp_hi_s cnf71xx;
};
union cvmx_mio_emm_rsp_lo {
@@ -1237,8 +1008,6 @@ union cvmx_mio_emm_rsp_lo {
uint64_t dat:64;
#endif
} s;
- struct cvmx_mio_emm_rsp_lo_s cn61xx;
- struct cvmx_mio_emm_rsp_lo_s cnf71xx;
};
union cvmx_mio_emm_rsp_sts {
@@ -1298,8 +1067,6 @@ union cvmx_mio_emm_rsp_sts {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_mio_emm_rsp_sts_s cn61xx;
- struct cvmx_mio_emm_rsp_sts_s cnf71xx;
};
union cvmx_mio_emm_sample {
@@ -1317,8 +1084,6 @@ union cvmx_mio_emm_sample {
uint64_t reserved_26_63:38;
#endif
} s;
- struct cvmx_mio_emm_sample_s cn61xx;
- struct cvmx_mio_emm_sample_s cnf71xx;
};
union cvmx_mio_emm_sts_mask {
@@ -1332,8 +1097,6 @@ union cvmx_mio_emm_sts_mask {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_emm_sts_mask_s cn61xx;
- struct cvmx_mio_emm_sts_mask_s cnf71xx;
};
union cvmx_mio_emm_switch {
@@ -1371,8 +1134,6 @@ union cvmx_mio_emm_switch {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_mio_emm_switch_s cn61xx;
- struct cvmx_mio_emm_switch_s cnf71xx;
};
union cvmx_mio_emm_wdog {
@@ -1386,8 +1147,6 @@ union cvmx_mio_emm_wdog {
uint64_t reserved_26_63:38;
#endif
} s;
- struct cvmx_mio_emm_wdog_s cn61xx;
- struct cvmx_mio_emm_wdog_s cnf71xx;
};
union cvmx_mio_fus_bnk_datx {
@@ -1399,20 +1158,6 @@ union cvmx_mio_fus_bnk_datx {
uint64_t dat:64;
#endif
} s;
- struct cvmx_mio_fus_bnk_datx_s cn50xx;
- struct cvmx_mio_fus_bnk_datx_s cn52xx;
- struct cvmx_mio_fus_bnk_datx_s cn52xxp1;
- struct cvmx_mio_fus_bnk_datx_s cn56xx;
- struct cvmx_mio_fus_bnk_datx_s cn56xxp1;
- struct cvmx_mio_fus_bnk_datx_s cn58xx;
- struct cvmx_mio_fus_bnk_datx_s cn58xxp1;
- struct cvmx_mio_fus_bnk_datx_s cn61xx;
- struct cvmx_mio_fus_bnk_datx_s cn63xx;
- struct cvmx_mio_fus_bnk_datx_s cn63xxp1;
- struct cvmx_mio_fus_bnk_datx_s cn66xx;
- struct cvmx_mio_fus_bnk_datx_s cn68xx;
- struct cvmx_mio_fus_bnk_datx_s cn68xxp1;
- struct cvmx_mio_fus_bnk_datx_s cnf71xx;
};
union cvmx_mio_fus_dat0 {
@@ -1426,24 +1171,6 @@ union cvmx_mio_fus_dat0 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_fus_dat0_s cn30xx;
- struct cvmx_mio_fus_dat0_s cn31xx;
- struct cvmx_mio_fus_dat0_s cn38xx;
- struct cvmx_mio_fus_dat0_s cn38xxp2;
- struct cvmx_mio_fus_dat0_s cn50xx;
- struct cvmx_mio_fus_dat0_s cn52xx;
- struct cvmx_mio_fus_dat0_s cn52xxp1;
- struct cvmx_mio_fus_dat0_s cn56xx;
- struct cvmx_mio_fus_dat0_s cn56xxp1;
- struct cvmx_mio_fus_dat0_s cn58xx;
- struct cvmx_mio_fus_dat0_s cn58xxp1;
- struct cvmx_mio_fus_dat0_s cn61xx;
- struct cvmx_mio_fus_dat0_s cn63xx;
- struct cvmx_mio_fus_dat0_s cn63xxp1;
- struct cvmx_mio_fus_dat0_s cn66xx;
- struct cvmx_mio_fus_dat0_s cn68xx;
- struct cvmx_mio_fus_dat0_s cn68xxp1;
- struct cvmx_mio_fus_dat0_s cnf71xx;
};
union cvmx_mio_fus_dat1 {
@@ -1457,24 +1184,6 @@ union cvmx_mio_fus_dat1 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_fus_dat1_s cn30xx;
- struct cvmx_mio_fus_dat1_s cn31xx;
- struct cvmx_mio_fus_dat1_s cn38xx;
- struct cvmx_mio_fus_dat1_s cn38xxp2;
- struct cvmx_mio_fus_dat1_s cn50xx;
- struct cvmx_mio_fus_dat1_s cn52xx;
- struct cvmx_mio_fus_dat1_s cn52xxp1;
- struct cvmx_mio_fus_dat1_s cn56xx;
- struct cvmx_mio_fus_dat1_s cn56xxp1;
- struct cvmx_mio_fus_dat1_s cn58xx;
- struct cvmx_mio_fus_dat1_s cn58xxp1;
- struct cvmx_mio_fus_dat1_s cn61xx;
- struct cvmx_mio_fus_dat1_s cn63xx;
- struct cvmx_mio_fus_dat1_s cn63xxp1;
- struct cvmx_mio_fus_dat1_s cn66xx;
- struct cvmx_mio_fus_dat1_s cn68xx;
- struct cvmx_mio_fus_dat1_s cn68xxp1;
- struct cvmx_mio_fus_dat1_s cnf71xx;
};
union cvmx_mio_fus_dat2 {
@@ -1591,7 +1300,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_29_63:35;
#endif
} cn38xx;
- struct cvmx_mio_fus_dat2_cn38xx cn38xxp2;
struct cvmx_mio_fus_dat2_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
@@ -1654,7 +1362,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_34_63:30;
#endif
} cn52xx;
- struct cvmx_mio_fus_dat2_cn52xx cn52xxp1;
struct cvmx_mio_fus_dat2_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_34_63:30;
@@ -1686,7 +1393,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_34_63:30;
#endif
} cn56xx;
- struct cvmx_mio_fus_dat2_cn56xx cn56xxp1;
struct cvmx_mio_fus_dat2_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_30_63:34;
@@ -1710,7 +1416,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_30_63:34;
#endif
} cn58xx;
- struct cvmx_mio_fus_dat2_cn58xx cn58xxp1;
struct cvmx_mio_fus_dat2_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
@@ -1775,7 +1480,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_35_63:29;
#endif
} cn63xx;
- struct cvmx_mio_fus_dat2_cn63xx cn63xxp1;
struct cvmx_mio_fus_dat2_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
@@ -1840,7 +1544,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_37_63:27;
#endif
} cn68xx;
- struct cvmx_mio_fus_dat2_cn68xx cn68xxp1;
struct cvmx_mio_fus_dat2_cn70xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
@@ -1874,7 +1577,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_48_63:16;
#endif
} cn70xx;
- struct cvmx_mio_fus_dat2_cn70xx cn70xxp1;
struct cvmx_mio_fus_dat2_cn73xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_59_63:5;
@@ -1986,8 +1688,6 @@ union cvmx_mio_fus_dat2 {
uint64_t reserved_59_63:5;
#endif
} cn78xxp2;
- struct cvmx_mio_fus_dat2_cn61xx cnf71xx;
- struct cvmx_mio_fus_dat2_cn73xx cnf75xx;
};
union cvmx_mio_fus_dat3 {
@@ -2115,13 +1815,6 @@ union cvmx_mio_fus_dat3 {
uint64_t reserved_29_63:35;
#endif
} cn38xxp2;
- struct cvmx_mio_fus_dat3_cn38xx cn50xx;
- struct cvmx_mio_fus_dat3_cn38xx cn52xx;
- struct cvmx_mio_fus_dat3_cn38xx cn52xxp1;
- struct cvmx_mio_fus_dat3_cn38xx cn56xx;
- struct cvmx_mio_fus_dat3_cn38xx cn56xxp1;
- struct cvmx_mio_fus_dat3_cn38xx cn58xx;
- struct cvmx_mio_fus_dat3_cn38xx cn58xxp1;
struct cvmx_mio_fus_dat3_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_58_63:6;
@@ -2163,11 +1856,6 @@ union cvmx_mio_fus_dat3 {
uint64_t reserved_58_63:6;
#endif
} cn61xx;
- struct cvmx_mio_fus_dat3_cn61xx cn63xx;
- struct cvmx_mio_fus_dat3_cn61xx cn63xxp1;
- struct cvmx_mio_fus_dat3_cn61xx cn66xx;
- struct cvmx_mio_fus_dat3_cn61xx cn68xx;
- struct cvmx_mio_fus_dat3_cn61xx cn68xxp1;
struct cvmx_mio_fus_dat3_cn70xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t ema0:6;
@@ -2352,8 +2040,6 @@ union cvmx_mio_fus_dat3 {
uint64_t ema0:6;
#endif
} cn78xx;
- struct cvmx_mio_fus_dat3_cn73xx cn78xxp2;
- struct cvmx_mio_fus_dat3_cn61xx cnf71xx;
struct cvmx_mio_fus_dat3_cnf75xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t ema0:6;
@@ -2418,11 +2104,6 @@ union cvmx_mio_fus_ema {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_fus_ema_s cn50xx;
- struct cvmx_mio_fus_ema_s cn52xx;
- struct cvmx_mio_fus_ema_s cn52xxp1;
- struct cvmx_mio_fus_ema_s cn56xx;
- struct cvmx_mio_fus_ema_s cn56xxp1;
struct cvmx_mio_fus_ema_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -2432,14 +2113,6 @@ union cvmx_mio_fus_ema {
uint64_t reserved_2_63:62;
#endif
} cn58xx;
- struct cvmx_mio_fus_ema_cn58xx cn58xxp1;
- struct cvmx_mio_fus_ema_s cn61xx;
- struct cvmx_mio_fus_ema_s cn63xx;
- struct cvmx_mio_fus_ema_s cn63xxp1;
- struct cvmx_mio_fus_ema_s cn66xx;
- struct cvmx_mio_fus_ema_s cn68xx;
- struct cvmx_mio_fus_ema_s cn68xxp1;
- struct cvmx_mio_fus_ema_s cnf71xx;
};
union cvmx_mio_fus_pdf {
@@ -2451,19 +2124,6 @@ union cvmx_mio_fus_pdf {
uint64_t pdf:64;
#endif
} s;
- struct cvmx_mio_fus_pdf_s cn50xx;
- struct cvmx_mio_fus_pdf_s cn52xx;
- struct cvmx_mio_fus_pdf_s cn52xxp1;
- struct cvmx_mio_fus_pdf_s cn56xx;
- struct cvmx_mio_fus_pdf_s cn56xxp1;
- struct cvmx_mio_fus_pdf_s cn58xx;
- struct cvmx_mio_fus_pdf_s cn61xx;
- struct cvmx_mio_fus_pdf_s cn63xx;
- struct cvmx_mio_fus_pdf_s cn63xxp1;
- struct cvmx_mio_fus_pdf_s cn66xx;
- struct cvmx_mio_fus_pdf_s cn68xx;
- struct cvmx_mio_fus_pdf_s cn68xxp1;
- struct cvmx_mio_fus_pdf_s cnf71xx;
};
union cvmx_mio_fus_pll {
@@ -2504,12 +2164,6 @@ union cvmx_mio_fus_pll {
uint64_t reserved_2_63:62;
#endif
} cn50xx;
- struct cvmx_mio_fus_pll_cn50xx cn52xx;
- struct cvmx_mio_fus_pll_cn50xx cn52xxp1;
- struct cvmx_mio_fus_pll_cn50xx cn56xx;
- struct cvmx_mio_fus_pll_cn50xx cn56xxp1;
- struct cvmx_mio_fus_pll_cn50xx cn58xx;
- struct cvmx_mio_fus_pll_cn50xx cn58xxp1;
struct cvmx_mio_fus_pll_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63:56;
@@ -2529,12 +2183,6 @@ union cvmx_mio_fus_pll {
uint64_t reserved_8_63:56;
#endif
} cn61xx;
- struct cvmx_mio_fus_pll_cn61xx cn63xx;
- struct cvmx_mio_fus_pll_cn61xx cn63xxp1;
- struct cvmx_mio_fus_pll_cn61xx cn66xx;
- struct cvmx_mio_fus_pll_s cn68xx;
- struct cvmx_mio_fus_pll_s cn68xxp1;
- struct cvmx_mio_fus_pll_cn61xx cnf71xx;
};
union cvmx_mio_fus_prog {
@@ -2559,23 +2207,6 @@ union cvmx_mio_fus_prog {
uint64_t reserved_1_63:63;
#endif
} cn30xx;
- struct cvmx_mio_fus_prog_cn30xx cn31xx;
- struct cvmx_mio_fus_prog_cn30xx cn38xx;
- struct cvmx_mio_fus_prog_cn30xx cn38xxp2;
- struct cvmx_mio_fus_prog_cn30xx cn50xx;
- struct cvmx_mio_fus_prog_cn30xx cn52xx;
- struct cvmx_mio_fus_prog_cn30xx cn52xxp1;
- struct cvmx_mio_fus_prog_cn30xx cn56xx;
- struct cvmx_mio_fus_prog_cn30xx cn56xxp1;
- struct cvmx_mio_fus_prog_cn30xx cn58xx;
- struct cvmx_mio_fus_prog_cn30xx cn58xxp1;
- struct cvmx_mio_fus_prog_s cn61xx;
- struct cvmx_mio_fus_prog_s cn63xx;
- struct cvmx_mio_fus_prog_s cn63xxp1;
- struct cvmx_mio_fus_prog_s cn66xx;
- struct cvmx_mio_fus_prog_s cn68xx;
- struct cvmx_mio_fus_prog_s cn68xxp1;
- struct cvmx_mio_fus_prog_s cnf71xx;
};
union cvmx_mio_fus_prog_times {
@@ -2614,12 +2245,6 @@ union cvmx_mio_fus_prog_times {
uint64_t reserved_33_63:31;
#endif
} cn50xx;
- struct cvmx_mio_fus_prog_times_cn50xx cn52xx;
- struct cvmx_mio_fus_prog_times_cn50xx cn52xxp1;
- struct cvmx_mio_fus_prog_times_cn50xx cn56xx;
- struct cvmx_mio_fus_prog_times_cn50xx cn56xxp1;
- struct cvmx_mio_fus_prog_times_cn50xx cn58xx;
- struct cvmx_mio_fus_prog_times_cn50xx cn58xxp1;
struct cvmx_mio_fus_prog_times_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_35_63:29;
@@ -2641,12 +2266,6 @@ union cvmx_mio_fus_prog_times {
uint64_t reserved_35_63:29;
#endif
} cn61xx;
- struct cvmx_mio_fus_prog_times_cn61xx cn63xx;
- struct cvmx_mio_fus_prog_times_cn61xx cn63xxp1;
- struct cvmx_mio_fus_prog_times_cn61xx cn66xx;
- struct cvmx_mio_fus_prog_times_cn61xx cn68xx;
- struct cvmx_mio_fus_prog_times_cn61xx cn68xxp1;
- struct cvmx_mio_fus_prog_times_cn61xx cnf71xx;
};
union cvmx_mio_fus_rcmd {
@@ -2691,23 +2310,6 @@ union cvmx_mio_fus_rcmd {
uint64_t reserved_24_63:40;
#endif
} cn30xx;
- struct cvmx_mio_fus_rcmd_cn30xx cn31xx;
- struct cvmx_mio_fus_rcmd_cn30xx cn38xx;
- struct cvmx_mio_fus_rcmd_cn30xx cn38xxp2;
- struct cvmx_mio_fus_rcmd_cn30xx cn50xx;
- struct cvmx_mio_fus_rcmd_s cn52xx;
- struct cvmx_mio_fus_rcmd_s cn52xxp1;
- struct cvmx_mio_fus_rcmd_s cn56xx;
- struct cvmx_mio_fus_rcmd_s cn56xxp1;
- struct cvmx_mio_fus_rcmd_cn30xx cn58xx;
- struct cvmx_mio_fus_rcmd_cn30xx cn58xxp1;
- struct cvmx_mio_fus_rcmd_s cn61xx;
- struct cvmx_mio_fus_rcmd_s cn63xx;
- struct cvmx_mio_fus_rcmd_s cn63xxp1;
- struct cvmx_mio_fus_rcmd_s cn66xx;
- struct cvmx_mio_fus_rcmd_s cn68xx;
- struct cvmx_mio_fus_rcmd_s cn68xxp1;
- struct cvmx_mio_fus_rcmd_s cnf71xx;
};
union cvmx_mio_fus_read_times {
@@ -2729,13 +2331,6 @@ union cvmx_mio_fus_read_times {
uint64_t reserved_26_63:38;
#endif
} s;
- struct cvmx_mio_fus_read_times_s cn61xx;
- struct cvmx_mio_fus_read_times_s cn63xx;
- struct cvmx_mio_fus_read_times_s cn63xxp1;
- struct cvmx_mio_fus_read_times_s cn66xx;
- struct cvmx_mio_fus_read_times_s cn68xx;
- struct cvmx_mio_fus_read_times_s cn68xxp1;
- struct cvmx_mio_fus_read_times_s cnf71xx;
};
union cvmx_mio_fus_repair_res0 {
@@ -2755,13 +2350,6 @@ union cvmx_mio_fus_repair_res0 {
uint64_t reserved_55_63:9;
#endif
} s;
- struct cvmx_mio_fus_repair_res0_s cn61xx;
- struct cvmx_mio_fus_repair_res0_s cn63xx;
- struct cvmx_mio_fus_repair_res0_s cn63xxp1;
- struct cvmx_mio_fus_repair_res0_s cn66xx;
- struct cvmx_mio_fus_repair_res0_s cn68xx;
- struct cvmx_mio_fus_repair_res0_s cn68xxp1;
- struct cvmx_mio_fus_repair_res0_s cnf71xx;
};
union cvmx_mio_fus_repair_res1 {
@@ -2779,13 +2367,6 @@ union cvmx_mio_fus_repair_res1 {
uint64_t reserved_54_63:10;
#endif
} s;
- struct cvmx_mio_fus_repair_res1_s cn61xx;
- struct cvmx_mio_fus_repair_res1_s cn63xx;
- struct cvmx_mio_fus_repair_res1_s cn63xxp1;
- struct cvmx_mio_fus_repair_res1_s cn66xx;
- struct cvmx_mio_fus_repair_res1_s cn68xx;
- struct cvmx_mio_fus_repair_res1_s cn68xxp1;
- struct cvmx_mio_fus_repair_res1_s cnf71xx;
};
union cvmx_mio_fus_repair_res2 {
@@ -2799,13 +2380,6 @@ union cvmx_mio_fus_repair_res2 {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_mio_fus_repair_res2_s cn61xx;
- struct cvmx_mio_fus_repair_res2_s cn63xx;
- struct cvmx_mio_fus_repair_res2_s cn63xxp1;
- struct cvmx_mio_fus_repair_res2_s cn66xx;
- struct cvmx_mio_fus_repair_res2_s cn68xx;
- struct cvmx_mio_fus_repair_res2_s cn68xxp1;
- struct cvmx_mio_fus_repair_res2_s cnf71xx;
};
union cvmx_mio_fus_spr_repair_res {
@@ -2823,23 +2397,6 @@ union cvmx_mio_fus_spr_repair_res {
uint64_t reserved_42_63:22;
#endif
} s;
- struct cvmx_mio_fus_spr_repair_res_s cn30xx;
- struct cvmx_mio_fus_spr_repair_res_s cn31xx;
- struct cvmx_mio_fus_spr_repair_res_s cn38xx;
- struct cvmx_mio_fus_spr_repair_res_s cn50xx;
- struct cvmx_mio_fus_spr_repair_res_s cn52xx;
- struct cvmx_mio_fus_spr_repair_res_s cn52xxp1;
- struct cvmx_mio_fus_spr_repair_res_s cn56xx;
- struct cvmx_mio_fus_spr_repair_res_s cn56xxp1;
- struct cvmx_mio_fus_spr_repair_res_s cn58xx;
- struct cvmx_mio_fus_spr_repair_res_s cn58xxp1;
- struct cvmx_mio_fus_spr_repair_res_s cn61xx;
- struct cvmx_mio_fus_spr_repair_res_s cn63xx;
- struct cvmx_mio_fus_spr_repair_res_s cn63xxp1;
- struct cvmx_mio_fus_spr_repair_res_s cn66xx;
- struct cvmx_mio_fus_spr_repair_res_s cn68xx;
- struct cvmx_mio_fus_spr_repair_res_s cn68xxp1;
- struct cvmx_mio_fus_spr_repair_res_s cnf71xx;
};
union cvmx_mio_fus_spr_repair_sum {
@@ -2853,23 +2410,6 @@ union cvmx_mio_fus_spr_repair_sum {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_fus_spr_repair_sum_s cn30xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn31xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn38xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn50xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn52xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn52xxp1;
- struct cvmx_mio_fus_spr_repair_sum_s cn56xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn56xxp1;
- struct cvmx_mio_fus_spr_repair_sum_s cn58xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn58xxp1;
- struct cvmx_mio_fus_spr_repair_sum_s cn61xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn63xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn63xxp1;
- struct cvmx_mio_fus_spr_repair_sum_s cn66xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn68xx;
- struct cvmx_mio_fus_spr_repair_sum_s cn68xxp1;
- struct cvmx_mio_fus_spr_repair_sum_s cnf71xx;
};
union cvmx_mio_fus_tgg {
@@ -2883,9 +2423,6 @@ union cvmx_mio_fus_tgg {
uint64_t val:1;
#endif
} s;
- struct cvmx_mio_fus_tgg_s cn61xx;
- struct cvmx_mio_fus_tgg_s cn66xx;
- struct cvmx_mio_fus_tgg_s cnf71xx;
};
union cvmx_mio_fus_unlock {
@@ -2899,8 +2436,6 @@ union cvmx_mio_fus_unlock {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_mio_fus_unlock_s cn30xx;
- struct cvmx_mio_fus_unlock_s cn31xx;
};
union cvmx_mio_fus_wadr {
@@ -2914,10 +2449,6 @@ union cvmx_mio_fus_wadr {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_mio_fus_wadr_s cn30xx;
- struct cvmx_mio_fus_wadr_s cn31xx;
- struct cvmx_mio_fus_wadr_s cn38xx;
- struct cvmx_mio_fus_wadr_s cn38xxp2;
struct cvmx_mio_fus_wadr_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -2936,11 +2467,6 @@ union cvmx_mio_fus_wadr {
uint64_t reserved_3_63:61;
#endif
} cn52xx;
- struct cvmx_mio_fus_wadr_cn52xx cn52xxp1;
- struct cvmx_mio_fus_wadr_cn52xx cn56xx;
- struct cvmx_mio_fus_wadr_cn52xx cn56xxp1;
- struct cvmx_mio_fus_wadr_cn50xx cn58xx;
- struct cvmx_mio_fus_wadr_cn50xx cn58xxp1;
struct cvmx_mio_fus_wadr_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63:60;
@@ -2950,12 +2476,6 @@ union cvmx_mio_fus_wadr {
uint64_t reserved_4_63:60;
#endif
} cn61xx;
- struct cvmx_mio_fus_wadr_cn61xx cn63xx;
- struct cvmx_mio_fus_wadr_cn61xx cn63xxp1;
- struct cvmx_mio_fus_wadr_cn61xx cn66xx;
- struct cvmx_mio_fus_wadr_cn61xx cn68xx;
- struct cvmx_mio_fus_wadr_cn61xx cn68xxp1;
- struct cvmx_mio_fus_wadr_cn61xx cnf71xx;
};
union cvmx_mio_gpio_comp {
@@ -2971,13 +2491,6 @@ union cvmx_mio_gpio_comp {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_mio_gpio_comp_s cn61xx;
- struct cvmx_mio_gpio_comp_s cn63xx;
- struct cvmx_mio_gpio_comp_s cn63xxp1;
- struct cvmx_mio_gpio_comp_s cn66xx;
- struct cvmx_mio_gpio_comp_s cn68xx;
- struct cvmx_mio_gpio_comp_s cn68xxp1;
- struct cvmx_mio_gpio_comp_s cnf71xx;
};
union cvmx_mio_ndf_dma_cfg {
@@ -3007,14 +2520,6 @@ union cvmx_mio_ndf_dma_cfg {
uint64_t en:1;
#endif
} s;
- struct cvmx_mio_ndf_dma_cfg_s cn52xx;
- struct cvmx_mio_ndf_dma_cfg_s cn61xx;
- struct cvmx_mio_ndf_dma_cfg_s cn63xx;
- struct cvmx_mio_ndf_dma_cfg_s cn63xxp1;
- struct cvmx_mio_ndf_dma_cfg_s cn66xx;
- struct cvmx_mio_ndf_dma_cfg_s cn68xx;
- struct cvmx_mio_ndf_dma_cfg_s cn68xxp1;
- struct cvmx_mio_ndf_dma_cfg_s cnf71xx;
};
union cvmx_mio_ndf_dma_int {
@@ -3028,14 +2533,6 @@ union cvmx_mio_ndf_dma_int {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_ndf_dma_int_s cn52xx;
- struct cvmx_mio_ndf_dma_int_s cn61xx;
- struct cvmx_mio_ndf_dma_int_s cn63xx;
- struct cvmx_mio_ndf_dma_int_s cn63xxp1;
- struct cvmx_mio_ndf_dma_int_s cn66xx;
- struct cvmx_mio_ndf_dma_int_s cn68xx;
- struct cvmx_mio_ndf_dma_int_s cn68xxp1;
- struct cvmx_mio_ndf_dma_int_s cnf71xx;
};
union cvmx_mio_ndf_dma_int_en {
@@ -3049,14 +2546,6 @@ union cvmx_mio_ndf_dma_int_en {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_ndf_dma_int_en_s cn52xx;
- struct cvmx_mio_ndf_dma_int_en_s cn61xx;
- struct cvmx_mio_ndf_dma_int_en_s cn63xx;
- struct cvmx_mio_ndf_dma_int_en_s cn63xxp1;
- struct cvmx_mio_ndf_dma_int_en_s cn66xx;
- struct cvmx_mio_ndf_dma_int_en_s cn68xx;
- struct cvmx_mio_ndf_dma_int_en_s cn68xxp1;
- struct cvmx_mio_ndf_dma_int_en_s cnf71xx;
};
union cvmx_mio_pll_ctl {
@@ -3070,8 +2559,6 @@ union cvmx_mio_pll_ctl {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_mio_pll_ctl_s cn30xx;
- struct cvmx_mio_pll_ctl_s cn31xx;
};
union cvmx_mio_pll_setting {
@@ -3085,8 +2572,6 @@ union cvmx_mio_pll_setting {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_mio_pll_setting_s cn30xx;
- struct cvmx_mio_pll_setting_s cn31xx;
};
union cvmx_mio_ptp_ckout_hi_incr {
@@ -3100,10 +2585,6 @@ union cvmx_mio_ptp_ckout_hi_incr {
uint64_t nanosec:32;
#endif
} s;
- struct cvmx_mio_ptp_ckout_hi_incr_s cn61xx;
- struct cvmx_mio_ptp_ckout_hi_incr_s cn66xx;
- struct cvmx_mio_ptp_ckout_hi_incr_s cn68xx;
- struct cvmx_mio_ptp_ckout_hi_incr_s cnf71xx;
};
union cvmx_mio_ptp_ckout_lo_incr {
@@ -3117,10 +2598,6 @@ union cvmx_mio_ptp_ckout_lo_incr {
uint64_t nanosec:32;
#endif
} s;
- struct cvmx_mio_ptp_ckout_lo_incr_s cn61xx;
- struct cvmx_mio_ptp_ckout_lo_incr_s cn66xx;
- struct cvmx_mio_ptp_ckout_lo_incr_s cn68xx;
- struct cvmx_mio_ptp_ckout_lo_incr_s cnf71xx;
};
union cvmx_mio_ptp_ckout_thresh_hi {
@@ -3132,10 +2609,6 @@ union cvmx_mio_ptp_ckout_thresh_hi {
uint64_t nanosec:64;
#endif
} s;
- struct cvmx_mio_ptp_ckout_thresh_hi_s cn61xx;
- struct cvmx_mio_ptp_ckout_thresh_hi_s cn66xx;
- struct cvmx_mio_ptp_ckout_thresh_hi_s cn68xx;
- struct cvmx_mio_ptp_ckout_thresh_hi_s cnf71xx;
};
union cvmx_mio_ptp_ckout_thresh_lo {
@@ -3149,10 +2622,6 @@ union cvmx_mio_ptp_ckout_thresh_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_ptp_ckout_thresh_lo_s cn61xx;
- struct cvmx_mio_ptp_ckout_thresh_lo_s cn66xx;
- struct cvmx_mio_ptp_ckout_thresh_lo_s cn68xx;
- struct cvmx_mio_ptp_ckout_thresh_lo_s cnf71xx;
};
union cvmx_mio_ptp_clock_cfg {
@@ -3202,7 +2671,6 @@ union cvmx_mio_ptp_clock_cfg {
uint64_t reserved_42_63:22;
#endif
} s;
- struct cvmx_mio_ptp_clock_cfg_s cn61xx;
struct cvmx_mio_ptp_clock_cfg_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
@@ -3228,7 +2696,6 @@ union cvmx_mio_ptp_clock_cfg {
uint64_t reserved_24_63:40;
#endif
} cn63xx;
- struct cvmx_mio_ptp_clock_cfg_cn63xx cn63xxp1;
struct cvmx_mio_ptp_clock_cfg_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
@@ -3270,9 +2737,6 @@ union cvmx_mio_ptp_clock_cfg {
uint64_t reserved_40_63:24;
#endif
} cn66xx;
- struct cvmx_mio_ptp_clock_cfg_s cn68xx;
- struct cvmx_mio_ptp_clock_cfg_cn63xx cn68xxp1;
- struct cvmx_mio_ptp_clock_cfg_s cnf71xx;
};
union cvmx_mio_ptp_clock_comp {
@@ -3286,13 +2750,6 @@ union cvmx_mio_ptp_clock_comp {
uint64_t nanosec:32;
#endif
} s;
- struct cvmx_mio_ptp_clock_comp_s cn61xx;
- struct cvmx_mio_ptp_clock_comp_s cn63xx;
- struct cvmx_mio_ptp_clock_comp_s cn63xxp1;
- struct cvmx_mio_ptp_clock_comp_s cn66xx;
- struct cvmx_mio_ptp_clock_comp_s cn68xx;
- struct cvmx_mio_ptp_clock_comp_s cn68xxp1;
- struct cvmx_mio_ptp_clock_comp_s cnf71xx;
};
union cvmx_mio_ptp_clock_hi {
@@ -3304,13 +2761,6 @@ union cvmx_mio_ptp_clock_hi {
uint64_t nanosec:64;
#endif
} s;
- struct cvmx_mio_ptp_clock_hi_s cn61xx;
- struct cvmx_mio_ptp_clock_hi_s cn63xx;
- struct cvmx_mio_ptp_clock_hi_s cn63xxp1;
- struct cvmx_mio_ptp_clock_hi_s cn66xx;
- struct cvmx_mio_ptp_clock_hi_s cn68xx;
- struct cvmx_mio_ptp_clock_hi_s cn68xxp1;
- struct cvmx_mio_ptp_clock_hi_s cnf71xx;
};
union cvmx_mio_ptp_clock_lo {
@@ -3324,13 +2774,6 @@ union cvmx_mio_ptp_clock_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_ptp_clock_lo_s cn61xx;
- struct cvmx_mio_ptp_clock_lo_s cn63xx;
- struct cvmx_mio_ptp_clock_lo_s cn63xxp1;
- struct cvmx_mio_ptp_clock_lo_s cn66xx;
- struct cvmx_mio_ptp_clock_lo_s cn68xx;
- struct cvmx_mio_ptp_clock_lo_s cn68xxp1;
- struct cvmx_mio_ptp_clock_lo_s cnf71xx;
};
union cvmx_mio_ptp_evt_cnt {
@@ -3342,13 +2785,6 @@ union cvmx_mio_ptp_evt_cnt {
uint64_t cntr:64;
#endif
} s;
- struct cvmx_mio_ptp_evt_cnt_s cn61xx;
- struct cvmx_mio_ptp_evt_cnt_s cn63xx;
- struct cvmx_mio_ptp_evt_cnt_s cn63xxp1;
- struct cvmx_mio_ptp_evt_cnt_s cn66xx;
- struct cvmx_mio_ptp_evt_cnt_s cn68xx;
- struct cvmx_mio_ptp_evt_cnt_s cn68xxp1;
- struct cvmx_mio_ptp_evt_cnt_s cnf71xx;
};
union cvmx_mio_ptp_phy_1pps_in {
@@ -3362,7 +2798,6 @@ union cvmx_mio_ptp_phy_1pps_in {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_mio_ptp_phy_1pps_in_s cnf71xx;
};
union cvmx_mio_ptp_pps_hi_incr {
@@ -3376,10 +2811,6 @@ union cvmx_mio_ptp_pps_hi_incr {
uint64_t nanosec:32;
#endif
} s;
- struct cvmx_mio_ptp_pps_hi_incr_s cn61xx;
- struct cvmx_mio_ptp_pps_hi_incr_s cn66xx;
- struct cvmx_mio_ptp_pps_hi_incr_s cn68xx;
- struct cvmx_mio_ptp_pps_hi_incr_s cnf71xx;
};
union cvmx_mio_ptp_pps_lo_incr {
@@ -3393,10 +2824,6 @@ union cvmx_mio_ptp_pps_lo_incr {
uint64_t nanosec:32;
#endif
} s;
- struct cvmx_mio_ptp_pps_lo_incr_s cn61xx;
- struct cvmx_mio_ptp_pps_lo_incr_s cn66xx;
- struct cvmx_mio_ptp_pps_lo_incr_s cn68xx;
- struct cvmx_mio_ptp_pps_lo_incr_s cnf71xx;
};
union cvmx_mio_ptp_pps_thresh_hi {
@@ -3408,10 +2835,6 @@ union cvmx_mio_ptp_pps_thresh_hi {
uint64_t nanosec:64;
#endif
} s;
- struct cvmx_mio_ptp_pps_thresh_hi_s cn61xx;
- struct cvmx_mio_ptp_pps_thresh_hi_s cn66xx;
- struct cvmx_mio_ptp_pps_thresh_hi_s cn68xx;
- struct cvmx_mio_ptp_pps_thresh_hi_s cnf71xx;
};
union cvmx_mio_ptp_pps_thresh_lo {
@@ -3425,10 +2848,6 @@ union cvmx_mio_ptp_pps_thresh_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_ptp_pps_thresh_lo_s cn61xx;
- struct cvmx_mio_ptp_pps_thresh_lo_s cn66xx;
- struct cvmx_mio_ptp_pps_thresh_lo_s cn68xx;
- struct cvmx_mio_ptp_pps_thresh_lo_s cnf71xx;
};
union cvmx_mio_ptp_timestamp {
@@ -3440,13 +2859,6 @@ union cvmx_mio_ptp_timestamp {
uint64_t nanosec:64;
#endif
} s;
- struct cvmx_mio_ptp_timestamp_s cn61xx;
- struct cvmx_mio_ptp_timestamp_s cn63xx;
- struct cvmx_mio_ptp_timestamp_s cn63xxp1;
- struct cvmx_mio_ptp_timestamp_s cn66xx;
- struct cvmx_mio_ptp_timestamp_s cn68xx;
- struct cvmx_mio_ptp_timestamp_s cn68xxp1;
- struct cvmx_mio_ptp_timestamp_s cnf71xx;
};
union cvmx_mio_qlmx_cfg {
@@ -3511,8 +2923,6 @@ union cvmx_mio_qlmx_cfg {
uint64_t reserved_12_63:52;
#endif
} cn68xx;
- struct cvmx_mio_qlmx_cfg_cn68xx cn68xxp1;
- struct cvmx_mio_qlmx_cfg_cn61xx cnf71xx;
};
union cvmx_mio_rst_boot {
@@ -3622,7 +3032,6 @@ union cvmx_mio_rst_boot {
uint64_t reserved_36_63:28;
#endif
} cn63xx;
- struct cvmx_mio_rst_boot_cn63xx cn63xxp1;
struct cvmx_mio_rst_boot_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t chipkill:1;
@@ -3718,7 +3127,6 @@ union cvmx_mio_rst_boot {
uint64_t reserved_44_63:20;
#endif
} cn68xxp1;
- struct cvmx_mio_rst_boot_cn61xx cnf71xx;
};
union cvmx_mio_rst_cfg {
@@ -3751,7 +3159,6 @@ union cvmx_mio_rst_cfg {
uint64_t bist_delay:58;
#endif
} cn61xx;
- struct cvmx_mio_rst_cfg_cn61xx cn63xx;
struct cvmx_mio_rst_cfg_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t bist_delay:58;
@@ -3765,7 +3172,6 @@ union cvmx_mio_rst_cfg {
uint64_t bist_delay:58;
#endif
} cn63xxp1;
- struct cvmx_mio_rst_cfg_cn61xx cn66xx;
struct cvmx_mio_rst_cfg_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t bist_delay:56;
@@ -3781,8 +3187,6 @@ union cvmx_mio_rst_cfg {
uint64_t bist_delay:56;
#endif
} cn68xx;
- struct cvmx_mio_rst_cfg_cn68xx cn68xxp1;
- struct cvmx_mio_rst_cfg_cn61xx cnf71xx;
};
union cvmx_mio_rst_ckill {
@@ -3796,9 +3200,6 @@ union cvmx_mio_rst_ckill {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_mio_rst_ckill_s cn61xx;
- struct cvmx_mio_rst_ckill_s cn66xx;
- struct cvmx_mio_rst_ckill_s cnf71xx;
};
union cvmx_mio_rst_cntlx {
@@ -3834,7 +3235,6 @@ union cvmx_mio_rst_cntlx {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_mio_rst_cntlx_s cn61xx;
struct cvmx_mio_rst_cntlx_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -3860,8 +3260,6 @@ union cvmx_mio_rst_cntlx {
uint64_t reserved_10_63:54;
#endif
} cn66xx;
- struct cvmx_mio_rst_cntlx_cn66xx cn68xx;
- struct cvmx_mio_rst_cntlx_s cnf71xx;
};
union cvmx_mio_rst_ctlx {
@@ -3897,7 +3295,6 @@ union cvmx_mio_rst_ctlx {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_mio_rst_ctlx_s cn61xx;
struct cvmx_mio_rst_ctlx_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -3946,10 +3343,6 @@ union cvmx_mio_rst_ctlx {
uint64_t reserved_9_63:55;
#endif
} cn63xxp1;
- struct cvmx_mio_rst_ctlx_cn63xx cn66xx;
- struct cvmx_mio_rst_ctlx_cn63xx cn68xx;
- struct cvmx_mio_rst_ctlx_cn63xx cn68xxp1;
- struct cvmx_mio_rst_ctlx_s cnf71xx;
};
union cvmx_mio_rst_delay {
@@ -3965,13 +3358,6 @@ union cvmx_mio_rst_delay {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_mio_rst_delay_s cn61xx;
- struct cvmx_mio_rst_delay_s cn63xx;
- struct cvmx_mio_rst_delay_s cn63xxp1;
- struct cvmx_mio_rst_delay_s cn66xx;
- struct cvmx_mio_rst_delay_s cn68xx;
- struct cvmx_mio_rst_delay_s cn68xxp1;
- struct cvmx_mio_rst_delay_s cnf71xx;
};
union cvmx_mio_rst_int {
@@ -4014,12 +3400,6 @@ union cvmx_mio_rst_int {
uint64_t reserved_10_63:54;
#endif
} cn61xx;
- struct cvmx_mio_rst_int_cn61xx cn63xx;
- struct cvmx_mio_rst_int_cn61xx cn63xxp1;
- struct cvmx_mio_rst_int_s cn66xx;
- struct cvmx_mio_rst_int_cn61xx cn68xx;
- struct cvmx_mio_rst_int_cn61xx cn68xxp1;
- struct cvmx_mio_rst_int_cn61xx cnf71xx;
};
union cvmx_mio_rst_int_en {
@@ -4062,12 +3442,6 @@ union cvmx_mio_rst_int_en {
uint64_t reserved_10_63:54;
#endif
} cn61xx;
- struct cvmx_mio_rst_int_en_cn61xx cn63xx;
- struct cvmx_mio_rst_int_en_cn61xx cn63xxp1;
- struct cvmx_mio_rst_int_en_s cn66xx;
- struct cvmx_mio_rst_int_en_cn61xx cn68xx;
- struct cvmx_mio_rst_int_en_cn61xx cn68xxp1;
- struct cvmx_mio_rst_int_en_cn61xx cnf71xx;
};
union cvmx_mio_twsx_int {
@@ -4103,9 +3477,6 @@ union cvmx_mio_twsx_int {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_mio_twsx_int_s cn30xx;
- struct cvmx_mio_twsx_int_s cn31xx;
- struct cvmx_mio_twsx_int_s cn38xx;
struct cvmx_mio_twsx_int_cn38xxp2 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
@@ -4127,20 +3498,6 @@ union cvmx_mio_twsx_int {
uint64_t reserved_7_63:57;
#endif
} cn38xxp2;
- struct cvmx_mio_twsx_int_s cn50xx;
- struct cvmx_mio_twsx_int_s cn52xx;
- struct cvmx_mio_twsx_int_s cn52xxp1;
- struct cvmx_mio_twsx_int_s cn56xx;
- struct cvmx_mio_twsx_int_s cn56xxp1;
- struct cvmx_mio_twsx_int_s cn58xx;
- struct cvmx_mio_twsx_int_s cn58xxp1;
- struct cvmx_mio_twsx_int_s cn61xx;
- struct cvmx_mio_twsx_int_s cn63xx;
- struct cvmx_mio_twsx_int_s cn63xxp1;
- struct cvmx_mio_twsx_int_s cn66xx;
- struct cvmx_mio_twsx_int_s cn68xx;
- struct cvmx_mio_twsx_int_s cn68xxp1;
- struct cvmx_mio_twsx_int_s cnf71xx;
};
union cvmx_mio_twsx_sw_twsi {
@@ -4174,24 +3531,6 @@ union cvmx_mio_twsx_sw_twsi {
uint64_t v:1;
#endif
} s;
- struct cvmx_mio_twsx_sw_twsi_s cn30xx;
- struct cvmx_mio_twsx_sw_twsi_s cn31xx;
- struct cvmx_mio_twsx_sw_twsi_s cn38xx;
- struct cvmx_mio_twsx_sw_twsi_s cn38xxp2;
- struct cvmx_mio_twsx_sw_twsi_s cn50xx;
- struct cvmx_mio_twsx_sw_twsi_s cn52xx;
- struct cvmx_mio_twsx_sw_twsi_s cn52xxp1;
- struct cvmx_mio_twsx_sw_twsi_s cn56xx;
- struct cvmx_mio_twsx_sw_twsi_s cn56xxp1;
- struct cvmx_mio_twsx_sw_twsi_s cn58xx;
- struct cvmx_mio_twsx_sw_twsi_s cn58xxp1;
- struct cvmx_mio_twsx_sw_twsi_s cn61xx;
- struct cvmx_mio_twsx_sw_twsi_s cn63xx;
- struct cvmx_mio_twsx_sw_twsi_s cn63xxp1;
- struct cvmx_mio_twsx_sw_twsi_s cn66xx;
- struct cvmx_mio_twsx_sw_twsi_s cn68xx;
- struct cvmx_mio_twsx_sw_twsi_s cn68xxp1;
- struct cvmx_mio_twsx_sw_twsi_s cnf71xx;
};
union cvmx_mio_twsx_sw_twsi_ext {
@@ -4207,24 +3546,6 @@ union cvmx_mio_twsx_sw_twsi_ext {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn30xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn31xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn38xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn38xxp2;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn50xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn52xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn52xxp1;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn56xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn56xxp1;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn58xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn58xxp1;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn61xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn63xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn63xxp1;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn66xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn68xx;
- struct cvmx_mio_twsx_sw_twsi_ext_s cn68xxp1;
- struct cvmx_mio_twsx_sw_twsi_ext_s cnf71xx;
};
union cvmx_mio_twsx_twsi_sw {
@@ -4240,24 +3561,6 @@ union cvmx_mio_twsx_twsi_sw {
uint64_t v:2;
#endif
} s;
- struct cvmx_mio_twsx_twsi_sw_s cn30xx;
- struct cvmx_mio_twsx_twsi_sw_s cn31xx;
- struct cvmx_mio_twsx_twsi_sw_s cn38xx;
- struct cvmx_mio_twsx_twsi_sw_s cn38xxp2;
- struct cvmx_mio_twsx_twsi_sw_s cn50xx;
- struct cvmx_mio_twsx_twsi_sw_s cn52xx;
- struct cvmx_mio_twsx_twsi_sw_s cn52xxp1;
- struct cvmx_mio_twsx_twsi_sw_s cn56xx;
- struct cvmx_mio_twsx_twsi_sw_s cn56xxp1;
- struct cvmx_mio_twsx_twsi_sw_s cn58xx;
- struct cvmx_mio_twsx_twsi_sw_s cn58xxp1;
- struct cvmx_mio_twsx_twsi_sw_s cn61xx;
- struct cvmx_mio_twsx_twsi_sw_s cn63xx;
- struct cvmx_mio_twsx_twsi_sw_s cn63xxp1;
- struct cvmx_mio_twsx_twsi_sw_s cn66xx;
- struct cvmx_mio_twsx_twsi_sw_s cn68xx;
- struct cvmx_mio_twsx_twsi_sw_s cn68xxp1;
- struct cvmx_mio_twsx_twsi_sw_s cnf71xx;
};
union cvmx_mio_uartx_dlh {
@@ -4271,24 +3574,6 @@ union cvmx_mio_uartx_dlh {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_dlh_s cn30xx;
- struct cvmx_mio_uartx_dlh_s cn31xx;
- struct cvmx_mio_uartx_dlh_s cn38xx;
- struct cvmx_mio_uartx_dlh_s cn38xxp2;
- struct cvmx_mio_uartx_dlh_s cn50xx;
- struct cvmx_mio_uartx_dlh_s cn52xx;
- struct cvmx_mio_uartx_dlh_s cn52xxp1;
- struct cvmx_mio_uartx_dlh_s cn56xx;
- struct cvmx_mio_uartx_dlh_s cn56xxp1;
- struct cvmx_mio_uartx_dlh_s cn58xx;
- struct cvmx_mio_uartx_dlh_s cn58xxp1;
- struct cvmx_mio_uartx_dlh_s cn61xx;
- struct cvmx_mio_uartx_dlh_s cn63xx;
- struct cvmx_mio_uartx_dlh_s cn63xxp1;
- struct cvmx_mio_uartx_dlh_s cn66xx;
- struct cvmx_mio_uartx_dlh_s cn68xx;
- struct cvmx_mio_uartx_dlh_s cn68xxp1;
- struct cvmx_mio_uartx_dlh_s cnf71xx;
};
union cvmx_mio_uartx_dll {
@@ -4302,24 +3587,6 @@ union cvmx_mio_uartx_dll {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_dll_s cn30xx;
- struct cvmx_mio_uartx_dll_s cn31xx;
- struct cvmx_mio_uartx_dll_s cn38xx;
- struct cvmx_mio_uartx_dll_s cn38xxp2;
- struct cvmx_mio_uartx_dll_s cn50xx;
- struct cvmx_mio_uartx_dll_s cn52xx;
- struct cvmx_mio_uartx_dll_s cn52xxp1;
- struct cvmx_mio_uartx_dll_s cn56xx;
- struct cvmx_mio_uartx_dll_s cn56xxp1;
- struct cvmx_mio_uartx_dll_s cn58xx;
- struct cvmx_mio_uartx_dll_s cn58xxp1;
- struct cvmx_mio_uartx_dll_s cn61xx;
- struct cvmx_mio_uartx_dll_s cn63xx;
- struct cvmx_mio_uartx_dll_s cn63xxp1;
- struct cvmx_mio_uartx_dll_s cn66xx;
- struct cvmx_mio_uartx_dll_s cn68xx;
- struct cvmx_mio_uartx_dll_s cn68xxp1;
- struct cvmx_mio_uartx_dll_s cnf71xx;
};
union cvmx_mio_uartx_far {
@@ -4333,24 +3600,6 @@ union cvmx_mio_uartx_far {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uartx_far_s cn30xx;
- struct cvmx_mio_uartx_far_s cn31xx;
- struct cvmx_mio_uartx_far_s cn38xx;
- struct cvmx_mio_uartx_far_s cn38xxp2;
- struct cvmx_mio_uartx_far_s cn50xx;
- struct cvmx_mio_uartx_far_s cn52xx;
- struct cvmx_mio_uartx_far_s cn52xxp1;
- struct cvmx_mio_uartx_far_s cn56xx;
- struct cvmx_mio_uartx_far_s cn56xxp1;
- struct cvmx_mio_uartx_far_s cn58xx;
- struct cvmx_mio_uartx_far_s cn58xxp1;
- struct cvmx_mio_uartx_far_s cn61xx;
- struct cvmx_mio_uartx_far_s cn63xx;
- struct cvmx_mio_uartx_far_s cn63xxp1;
- struct cvmx_mio_uartx_far_s cn66xx;
- struct cvmx_mio_uartx_far_s cn68xx;
- struct cvmx_mio_uartx_far_s cn68xxp1;
- struct cvmx_mio_uartx_far_s cnf71xx;
};
union cvmx_mio_uartx_fcr {
@@ -4374,24 +3623,6 @@ union cvmx_mio_uartx_fcr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_fcr_s cn30xx;
- struct cvmx_mio_uartx_fcr_s cn31xx;
- struct cvmx_mio_uartx_fcr_s cn38xx;
- struct cvmx_mio_uartx_fcr_s cn38xxp2;
- struct cvmx_mio_uartx_fcr_s cn50xx;
- struct cvmx_mio_uartx_fcr_s cn52xx;
- struct cvmx_mio_uartx_fcr_s cn52xxp1;
- struct cvmx_mio_uartx_fcr_s cn56xx;
- struct cvmx_mio_uartx_fcr_s cn56xxp1;
- struct cvmx_mio_uartx_fcr_s cn58xx;
- struct cvmx_mio_uartx_fcr_s cn58xxp1;
- struct cvmx_mio_uartx_fcr_s cn61xx;
- struct cvmx_mio_uartx_fcr_s cn63xx;
- struct cvmx_mio_uartx_fcr_s cn63xxp1;
- struct cvmx_mio_uartx_fcr_s cn66xx;
- struct cvmx_mio_uartx_fcr_s cn68xx;
- struct cvmx_mio_uartx_fcr_s cn68xxp1;
- struct cvmx_mio_uartx_fcr_s cnf71xx;
};
union cvmx_mio_uartx_htx {
@@ -4405,24 +3636,6 @@ union cvmx_mio_uartx_htx {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uartx_htx_s cn30xx;
- struct cvmx_mio_uartx_htx_s cn31xx;
- struct cvmx_mio_uartx_htx_s cn38xx;
- struct cvmx_mio_uartx_htx_s cn38xxp2;
- struct cvmx_mio_uartx_htx_s cn50xx;
- struct cvmx_mio_uartx_htx_s cn52xx;
- struct cvmx_mio_uartx_htx_s cn52xxp1;
- struct cvmx_mio_uartx_htx_s cn56xx;
- struct cvmx_mio_uartx_htx_s cn56xxp1;
- struct cvmx_mio_uartx_htx_s cn58xx;
- struct cvmx_mio_uartx_htx_s cn58xxp1;
- struct cvmx_mio_uartx_htx_s cn61xx;
- struct cvmx_mio_uartx_htx_s cn63xx;
- struct cvmx_mio_uartx_htx_s cn63xxp1;
- struct cvmx_mio_uartx_htx_s cn66xx;
- struct cvmx_mio_uartx_htx_s cn68xx;
- struct cvmx_mio_uartx_htx_s cn68xxp1;
- struct cvmx_mio_uartx_htx_s cnf71xx;
};
union cvmx_mio_uartx_ier {
@@ -4446,24 +3659,6 @@ union cvmx_mio_uartx_ier {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_ier_s cn30xx;
- struct cvmx_mio_uartx_ier_s cn31xx;
- struct cvmx_mio_uartx_ier_s cn38xx;
- struct cvmx_mio_uartx_ier_s cn38xxp2;
- struct cvmx_mio_uartx_ier_s cn50xx;
- struct cvmx_mio_uartx_ier_s cn52xx;
- struct cvmx_mio_uartx_ier_s cn52xxp1;
- struct cvmx_mio_uartx_ier_s cn56xx;
- struct cvmx_mio_uartx_ier_s cn56xxp1;
- struct cvmx_mio_uartx_ier_s cn58xx;
- struct cvmx_mio_uartx_ier_s cn58xxp1;
- struct cvmx_mio_uartx_ier_s cn61xx;
- struct cvmx_mio_uartx_ier_s cn63xx;
- struct cvmx_mio_uartx_ier_s cn63xxp1;
- struct cvmx_mio_uartx_ier_s cn66xx;
- struct cvmx_mio_uartx_ier_s cn68xx;
- struct cvmx_mio_uartx_ier_s cn68xxp1;
- struct cvmx_mio_uartx_ier_s cnf71xx;
};
union cvmx_mio_uartx_iir {
@@ -4481,24 +3676,6 @@ union cvmx_mio_uartx_iir {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_iir_s cn30xx;
- struct cvmx_mio_uartx_iir_s cn31xx;
- struct cvmx_mio_uartx_iir_s cn38xx;
- struct cvmx_mio_uartx_iir_s cn38xxp2;
- struct cvmx_mio_uartx_iir_s cn50xx;
- struct cvmx_mio_uartx_iir_s cn52xx;
- struct cvmx_mio_uartx_iir_s cn52xxp1;
- struct cvmx_mio_uartx_iir_s cn56xx;
- struct cvmx_mio_uartx_iir_s cn56xxp1;
- struct cvmx_mio_uartx_iir_s cn58xx;
- struct cvmx_mio_uartx_iir_s cn58xxp1;
- struct cvmx_mio_uartx_iir_s cn61xx;
- struct cvmx_mio_uartx_iir_s cn63xx;
- struct cvmx_mio_uartx_iir_s cn63xxp1;
- struct cvmx_mio_uartx_iir_s cn66xx;
- struct cvmx_mio_uartx_iir_s cn68xx;
- struct cvmx_mio_uartx_iir_s cn68xxp1;
- struct cvmx_mio_uartx_iir_s cnf71xx;
};
union cvmx_mio_uartx_lcr {
@@ -4524,24 +3701,6 @@ union cvmx_mio_uartx_lcr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_lcr_s cn30xx;
- struct cvmx_mio_uartx_lcr_s cn31xx;
- struct cvmx_mio_uartx_lcr_s cn38xx;
- struct cvmx_mio_uartx_lcr_s cn38xxp2;
- struct cvmx_mio_uartx_lcr_s cn50xx;
- struct cvmx_mio_uartx_lcr_s cn52xx;
- struct cvmx_mio_uartx_lcr_s cn52xxp1;
- struct cvmx_mio_uartx_lcr_s cn56xx;
- struct cvmx_mio_uartx_lcr_s cn56xxp1;
- struct cvmx_mio_uartx_lcr_s cn58xx;
- struct cvmx_mio_uartx_lcr_s cn58xxp1;
- struct cvmx_mio_uartx_lcr_s cn61xx;
- struct cvmx_mio_uartx_lcr_s cn63xx;
- struct cvmx_mio_uartx_lcr_s cn63xxp1;
- struct cvmx_mio_uartx_lcr_s cn66xx;
- struct cvmx_mio_uartx_lcr_s cn68xx;
- struct cvmx_mio_uartx_lcr_s cn68xxp1;
- struct cvmx_mio_uartx_lcr_s cnf71xx;
};
union cvmx_mio_uartx_lsr {
@@ -4569,24 +3728,6 @@ union cvmx_mio_uartx_lsr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_lsr_s cn30xx;
- struct cvmx_mio_uartx_lsr_s cn31xx;
- struct cvmx_mio_uartx_lsr_s cn38xx;
- struct cvmx_mio_uartx_lsr_s cn38xxp2;
- struct cvmx_mio_uartx_lsr_s cn50xx;
- struct cvmx_mio_uartx_lsr_s cn52xx;
- struct cvmx_mio_uartx_lsr_s cn52xxp1;
- struct cvmx_mio_uartx_lsr_s cn56xx;
- struct cvmx_mio_uartx_lsr_s cn56xxp1;
- struct cvmx_mio_uartx_lsr_s cn58xx;
- struct cvmx_mio_uartx_lsr_s cn58xxp1;
- struct cvmx_mio_uartx_lsr_s cn61xx;
- struct cvmx_mio_uartx_lsr_s cn63xx;
- struct cvmx_mio_uartx_lsr_s cn63xxp1;
- struct cvmx_mio_uartx_lsr_s cn66xx;
- struct cvmx_mio_uartx_lsr_s cn68xx;
- struct cvmx_mio_uartx_lsr_s cn68xxp1;
- struct cvmx_mio_uartx_lsr_s cnf71xx;
};
union cvmx_mio_uartx_mcr {
@@ -4610,24 +3751,6 @@ union cvmx_mio_uartx_mcr {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_mio_uartx_mcr_s cn30xx;
- struct cvmx_mio_uartx_mcr_s cn31xx;
- struct cvmx_mio_uartx_mcr_s cn38xx;
- struct cvmx_mio_uartx_mcr_s cn38xxp2;
- struct cvmx_mio_uartx_mcr_s cn50xx;
- struct cvmx_mio_uartx_mcr_s cn52xx;
- struct cvmx_mio_uartx_mcr_s cn52xxp1;
- struct cvmx_mio_uartx_mcr_s cn56xx;
- struct cvmx_mio_uartx_mcr_s cn56xxp1;
- struct cvmx_mio_uartx_mcr_s cn58xx;
- struct cvmx_mio_uartx_mcr_s cn58xxp1;
- struct cvmx_mio_uartx_mcr_s cn61xx;
- struct cvmx_mio_uartx_mcr_s cn63xx;
- struct cvmx_mio_uartx_mcr_s cn63xxp1;
- struct cvmx_mio_uartx_mcr_s cn66xx;
- struct cvmx_mio_uartx_mcr_s cn68xx;
- struct cvmx_mio_uartx_mcr_s cn68xxp1;
- struct cvmx_mio_uartx_mcr_s cnf71xx;
};
union cvmx_mio_uartx_msr {
@@ -4655,24 +3778,6 @@ union cvmx_mio_uartx_msr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_msr_s cn30xx;
- struct cvmx_mio_uartx_msr_s cn31xx;
- struct cvmx_mio_uartx_msr_s cn38xx;
- struct cvmx_mio_uartx_msr_s cn38xxp2;
- struct cvmx_mio_uartx_msr_s cn50xx;
- struct cvmx_mio_uartx_msr_s cn52xx;
- struct cvmx_mio_uartx_msr_s cn52xxp1;
- struct cvmx_mio_uartx_msr_s cn56xx;
- struct cvmx_mio_uartx_msr_s cn56xxp1;
- struct cvmx_mio_uartx_msr_s cn58xx;
- struct cvmx_mio_uartx_msr_s cn58xxp1;
- struct cvmx_mio_uartx_msr_s cn61xx;
- struct cvmx_mio_uartx_msr_s cn63xx;
- struct cvmx_mio_uartx_msr_s cn63xxp1;
- struct cvmx_mio_uartx_msr_s cn66xx;
- struct cvmx_mio_uartx_msr_s cn68xx;
- struct cvmx_mio_uartx_msr_s cn68xxp1;
- struct cvmx_mio_uartx_msr_s cnf71xx;
};
union cvmx_mio_uartx_rbr {
@@ -4686,24 +3791,6 @@ union cvmx_mio_uartx_rbr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_rbr_s cn30xx;
- struct cvmx_mio_uartx_rbr_s cn31xx;
- struct cvmx_mio_uartx_rbr_s cn38xx;
- struct cvmx_mio_uartx_rbr_s cn38xxp2;
- struct cvmx_mio_uartx_rbr_s cn50xx;
- struct cvmx_mio_uartx_rbr_s cn52xx;
- struct cvmx_mio_uartx_rbr_s cn52xxp1;
- struct cvmx_mio_uartx_rbr_s cn56xx;
- struct cvmx_mio_uartx_rbr_s cn56xxp1;
- struct cvmx_mio_uartx_rbr_s cn58xx;
- struct cvmx_mio_uartx_rbr_s cn58xxp1;
- struct cvmx_mio_uartx_rbr_s cn61xx;
- struct cvmx_mio_uartx_rbr_s cn63xx;
- struct cvmx_mio_uartx_rbr_s cn63xxp1;
- struct cvmx_mio_uartx_rbr_s cn66xx;
- struct cvmx_mio_uartx_rbr_s cn68xx;
- struct cvmx_mio_uartx_rbr_s cn68xxp1;
- struct cvmx_mio_uartx_rbr_s cnf71xx;
};
union cvmx_mio_uartx_rfl {
@@ -4717,24 +3804,6 @@ union cvmx_mio_uartx_rfl {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_uartx_rfl_s cn30xx;
- struct cvmx_mio_uartx_rfl_s cn31xx;
- struct cvmx_mio_uartx_rfl_s cn38xx;
- struct cvmx_mio_uartx_rfl_s cn38xxp2;
- struct cvmx_mio_uartx_rfl_s cn50xx;
- struct cvmx_mio_uartx_rfl_s cn52xx;
- struct cvmx_mio_uartx_rfl_s cn52xxp1;
- struct cvmx_mio_uartx_rfl_s cn56xx;
- struct cvmx_mio_uartx_rfl_s cn56xxp1;
- struct cvmx_mio_uartx_rfl_s cn58xx;
- struct cvmx_mio_uartx_rfl_s cn58xxp1;
- struct cvmx_mio_uartx_rfl_s cn61xx;
- struct cvmx_mio_uartx_rfl_s cn63xx;
- struct cvmx_mio_uartx_rfl_s cn63xxp1;
- struct cvmx_mio_uartx_rfl_s cn66xx;
- struct cvmx_mio_uartx_rfl_s cn68xx;
- struct cvmx_mio_uartx_rfl_s cn68xxp1;
- struct cvmx_mio_uartx_rfl_s cnf71xx;
};
union cvmx_mio_uartx_rfw {
@@ -4752,24 +3821,6 @@ union cvmx_mio_uartx_rfw {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_mio_uartx_rfw_s cn30xx;
- struct cvmx_mio_uartx_rfw_s cn31xx;
- struct cvmx_mio_uartx_rfw_s cn38xx;
- struct cvmx_mio_uartx_rfw_s cn38xxp2;
- struct cvmx_mio_uartx_rfw_s cn50xx;
- struct cvmx_mio_uartx_rfw_s cn52xx;
- struct cvmx_mio_uartx_rfw_s cn52xxp1;
- struct cvmx_mio_uartx_rfw_s cn56xx;
- struct cvmx_mio_uartx_rfw_s cn56xxp1;
- struct cvmx_mio_uartx_rfw_s cn58xx;
- struct cvmx_mio_uartx_rfw_s cn58xxp1;
- struct cvmx_mio_uartx_rfw_s cn61xx;
- struct cvmx_mio_uartx_rfw_s cn63xx;
- struct cvmx_mio_uartx_rfw_s cn63xxp1;
- struct cvmx_mio_uartx_rfw_s cn66xx;
- struct cvmx_mio_uartx_rfw_s cn68xx;
- struct cvmx_mio_uartx_rfw_s cn68xxp1;
- struct cvmx_mio_uartx_rfw_s cnf71xx;
};
union cvmx_mio_uartx_sbcr {
@@ -4783,24 +3834,6 @@ union cvmx_mio_uartx_sbcr {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uartx_sbcr_s cn30xx;
- struct cvmx_mio_uartx_sbcr_s cn31xx;
- struct cvmx_mio_uartx_sbcr_s cn38xx;
- struct cvmx_mio_uartx_sbcr_s cn38xxp2;
- struct cvmx_mio_uartx_sbcr_s cn50xx;
- struct cvmx_mio_uartx_sbcr_s cn52xx;
- struct cvmx_mio_uartx_sbcr_s cn52xxp1;
- struct cvmx_mio_uartx_sbcr_s cn56xx;
- struct cvmx_mio_uartx_sbcr_s cn56xxp1;
- struct cvmx_mio_uartx_sbcr_s cn58xx;
- struct cvmx_mio_uartx_sbcr_s cn58xxp1;
- struct cvmx_mio_uartx_sbcr_s cn61xx;
- struct cvmx_mio_uartx_sbcr_s cn63xx;
- struct cvmx_mio_uartx_sbcr_s cn63xxp1;
- struct cvmx_mio_uartx_sbcr_s cn66xx;
- struct cvmx_mio_uartx_sbcr_s cn68xx;
- struct cvmx_mio_uartx_sbcr_s cn68xxp1;
- struct cvmx_mio_uartx_sbcr_s cnf71xx;
};
union cvmx_mio_uartx_scr {
@@ -4814,24 +3847,6 @@ union cvmx_mio_uartx_scr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_scr_s cn30xx;
- struct cvmx_mio_uartx_scr_s cn31xx;
- struct cvmx_mio_uartx_scr_s cn38xx;
- struct cvmx_mio_uartx_scr_s cn38xxp2;
- struct cvmx_mio_uartx_scr_s cn50xx;
- struct cvmx_mio_uartx_scr_s cn52xx;
- struct cvmx_mio_uartx_scr_s cn52xxp1;
- struct cvmx_mio_uartx_scr_s cn56xx;
- struct cvmx_mio_uartx_scr_s cn56xxp1;
- struct cvmx_mio_uartx_scr_s cn58xx;
- struct cvmx_mio_uartx_scr_s cn58xxp1;
- struct cvmx_mio_uartx_scr_s cn61xx;
- struct cvmx_mio_uartx_scr_s cn63xx;
- struct cvmx_mio_uartx_scr_s cn63xxp1;
- struct cvmx_mio_uartx_scr_s cn66xx;
- struct cvmx_mio_uartx_scr_s cn68xx;
- struct cvmx_mio_uartx_scr_s cn68xxp1;
- struct cvmx_mio_uartx_scr_s cnf71xx;
};
union cvmx_mio_uartx_sfe {
@@ -4845,24 +3860,6 @@ union cvmx_mio_uartx_sfe {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uartx_sfe_s cn30xx;
- struct cvmx_mio_uartx_sfe_s cn31xx;
- struct cvmx_mio_uartx_sfe_s cn38xx;
- struct cvmx_mio_uartx_sfe_s cn38xxp2;
- struct cvmx_mio_uartx_sfe_s cn50xx;
- struct cvmx_mio_uartx_sfe_s cn52xx;
- struct cvmx_mio_uartx_sfe_s cn52xxp1;
- struct cvmx_mio_uartx_sfe_s cn56xx;
- struct cvmx_mio_uartx_sfe_s cn56xxp1;
- struct cvmx_mio_uartx_sfe_s cn58xx;
- struct cvmx_mio_uartx_sfe_s cn58xxp1;
- struct cvmx_mio_uartx_sfe_s cn61xx;
- struct cvmx_mio_uartx_sfe_s cn63xx;
- struct cvmx_mio_uartx_sfe_s cn63xxp1;
- struct cvmx_mio_uartx_sfe_s cn66xx;
- struct cvmx_mio_uartx_sfe_s cn68xx;
- struct cvmx_mio_uartx_sfe_s cn68xxp1;
- struct cvmx_mio_uartx_sfe_s cnf71xx;
};
union cvmx_mio_uartx_srr {
@@ -4880,24 +3877,6 @@ union cvmx_mio_uartx_srr {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_mio_uartx_srr_s cn30xx;
- struct cvmx_mio_uartx_srr_s cn31xx;
- struct cvmx_mio_uartx_srr_s cn38xx;
- struct cvmx_mio_uartx_srr_s cn38xxp2;
- struct cvmx_mio_uartx_srr_s cn50xx;
- struct cvmx_mio_uartx_srr_s cn52xx;
- struct cvmx_mio_uartx_srr_s cn52xxp1;
- struct cvmx_mio_uartx_srr_s cn56xx;
- struct cvmx_mio_uartx_srr_s cn56xxp1;
- struct cvmx_mio_uartx_srr_s cn58xx;
- struct cvmx_mio_uartx_srr_s cn58xxp1;
- struct cvmx_mio_uartx_srr_s cn61xx;
- struct cvmx_mio_uartx_srr_s cn63xx;
- struct cvmx_mio_uartx_srr_s cn63xxp1;
- struct cvmx_mio_uartx_srr_s cn66xx;
- struct cvmx_mio_uartx_srr_s cn68xx;
- struct cvmx_mio_uartx_srr_s cn68xxp1;
- struct cvmx_mio_uartx_srr_s cnf71xx;
};
union cvmx_mio_uartx_srt {
@@ -4911,24 +3890,6 @@ union cvmx_mio_uartx_srt {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_uartx_srt_s cn30xx;
- struct cvmx_mio_uartx_srt_s cn31xx;
- struct cvmx_mio_uartx_srt_s cn38xx;
- struct cvmx_mio_uartx_srt_s cn38xxp2;
- struct cvmx_mio_uartx_srt_s cn50xx;
- struct cvmx_mio_uartx_srt_s cn52xx;
- struct cvmx_mio_uartx_srt_s cn52xxp1;
- struct cvmx_mio_uartx_srt_s cn56xx;
- struct cvmx_mio_uartx_srt_s cn56xxp1;
- struct cvmx_mio_uartx_srt_s cn58xx;
- struct cvmx_mio_uartx_srt_s cn58xxp1;
- struct cvmx_mio_uartx_srt_s cn61xx;
- struct cvmx_mio_uartx_srt_s cn63xx;
- struct cvmx_mio_uartx_srt_s cn63xxp1;
- struct cvmx_mio_uartx_srt_s cn66xx;
- struct cvmx_mio_uartx_srt_s cn68xx;
- struct cvmx_mio_uartx_srt_s cn68xxp1;
- struct cvmx_mio_uartx_srt_s cnf71xx;
};
union cvmx_mio_uartx_srts {
@@ -4942,24 +3903,6 @@ union cvmx_mio_uartx_srts {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uartx_srts_s cn30xx;
- struct cvmx_mio_uartx_srts_s cn31xx;
- struct cvmx_mio_uartx_srts_s cn38xx;
- struct cvmx_mio_uartx_srts_s cn38xxp2;
- struct cvmx_mio_uartx_srts_s cn50xx;
- struct cvmx_mio_uartx_srts_s cn52xx;
- struct cvmx_mio_uartx_srts_s cn52xxp1;
- struct cvmx_mio_uartx_srts_s cn56xx;
- struct cvmx_mio_uartx_srts_s cn56xxp1;
- struct cvmx_mio_uartx_srts_s cn58xx;
- struct cvmx_mio_uartx_srts_s cn58xxp1;
- struct cvmx_mio_uartx_srts_s cn61xx;
- struct cvmx_mio_uartx_srts_s cn63xx;
- struct cvmx_mio_uartx_srts_s cn63xxp1;
- struct cvmx_mio_uartx_srts_s cn66xx;
- struct cvmx_mio_uartx_srts_s cn68xx;
- struct cvmx_mio_uartx_srts_s cn68xxp1;
- struct cvmx_mio_uartx_srts_s cnf71xx;
};
union cvmx_mio_uartx_stt {
@@ -4973,24 +3916,6 @@ union cvmx_mio_uartx_stt {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_uartx_stt_s cn30xx;
- struct cvmx_mio_uartx_stt_s cn31xx;
- struct cvmx_mio_uartx_stt_s cn38xx;
- struct cvmx_mio_uartx_stt_s cn38xxp2;
- struct cvmx_mio_uartx_stt_s cn50xx;
- struct cvmx_mio_uartx_stt_s cn52xx;
- struct cvmx_mio_uartx_stt_s cn52xxp1;
- struct cvmx_mio_uartx_stt_s cn56xx;
- struct cvmx_mio_uartx_stt_s cn56xxp1;
- struct cvmx_mio_uartx_stt_s cn58xx;
- struct cvmx_mio_uartx_stt_s cn58xxp1;
- struct cvmx_mio_uartx_stt_s cn61xx;
- struct cvmx_mio_uartx_stt_s cn63xx;
- struct cvmx_mio_uartx_stt_s cn63xxp1;
- struct cvmx_mio_uartx_stt_s cn66xx;
- struct cvmx_mio_uartx_stt_s cn68xx;
- struct cvmx_mio_uartx_stt_s cn68xxp1;
- struct cvmx_mio_uartx_stt_s cnf71xx;
};
union cvmx_mio_uartx_tfl {
@@ -5004,24 +3929,6 @@ union cvmx_mio_uartx_tfl {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_uartx_tfl_s cn30xx;
- struct cvmx_mio_uartx_tfl_s cn31xx;
- struct cvmx_mio_uartx_tfl_s cn38xx;
- struct cvmx_mio_uartx_tfl_s cn38xxp2;
- struct cvmx_mio_uartx_tfl_s cn50xx;
- struct cvmx_mio_uartx_tfl_s cn52xx;
- struct cvmx_mio_uartx_tfl_s cn52xxp1;
- struct cvmx_mio_uartx_tfl_s cn56xx;
- struct cvmx_mio_uartx_tfl_s cn56xxp1;
- struct cvmx_mio_uartx_tfl_s cn58xx;
- struct cvmx_mio_uartx_tfl_s cn58xxp1;
- struct cvmx_mio_uartx_tfl_s cn61xx;
- struct cvmx_mio_uartx_tfl_s cn63xx;
- struct cvmx_mio_uartx_tfl_s cn63xxp1;
- struct cvmx_mio_uartx_tfl_s cn66xx;
- struct cvmx_mio_uartx_tfl_s cn68xx;
- struct cvmx_mio_uartx_tfl_s cn68xxp1;
- struct cvmx_mio_uartx_tfl_s cnf71xx;
};
union cvmx_mio_uartx_tfr {
@@ -5035,24 +3942,6 @@ union cvmx_mio_uartx_tfr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_tfr_s cn30xx;
- struct cvmx_mio_uartx_tfr_s cn31xx;
- struct cvmx_mio_uartx_tfr_s cn38xx;
- struct cvmx_mio_uartx_tfr_s cn38xxp2;
- struct cvmx_mio_uartx_tfr_s cn50xx;
- struct cvmx_mio_uartx_tfr_s cn52xx;
- struct cvmx_mio_uartx_tfr_s cn52xxp1;
- struct cvmx_mio_uartx_tfr_s cn56xx;
- struct cvmx_mio_uartx_tfr_s cn56xxp1;
- struct cvmx_mio_uartx_tfr_s cn58xx;
- struct cvmx_mio_uartx_tfr_s cn58xxp1;
- struct cvmx_mio_uartx_tfr_s cn61xx;
- struct cvmx_mio_uartx_tfr_s cn63xx;
- struct cvmx_mio_uartx_tfr_s cn63xxp1;
- struct cvmx_mio_uartx_tfr_s cn66xx;
- struct cvmx_mio_uartx_tfr_s cn68xx;
- struct cvmx_mio_uartx_tfr_s cn68xxp1;
- struct cvmx_mio_uartx_tfr_s cnf71xx;
};
union cvmx_mio_uartx_thr {
@@ -5066,24 +3955,6 @@ union cvmx_mio_uartx_thr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uartx_thr_s cn30xx;
- struct cvmx_mio_uartx_thr_s cn31xx;
- struct cvmx_mio_uartx_thr_s cn38xx;
- struct cvmx_mio_uartx_thr_s cn38xxp2;
- struct cvmx_mio_uartx_thr_s cn50xx;
- struct cvmx_mio_uartx_thr_s cn52xx;
- struct cvmx_mio_uartx_thr_s cn52xxp1;
- struct cvmx_mio_uartx_thr_s cn56xx;
- struct cvmx_mio_uartx_thr_s cn56xxp1;
- struct cvmx_mio_uartx_thr_s cn58xx;
- struct cvmx_mio_uartx_thr_s cn58xxp1;
- struct cvmx_mio_uartx_thr_s cn61xx;
- struct cvmx_mio_uartx_thr_s cn63xx;
- struct cvmx_mio_uartx_thr_s cn63xxp1;
- struct cvmx_mio_uartx_thr_s cn66xx;
- struct cvmx_mio_uartx_thr_s cn68xx;
- struct cvmx_mio_uartx_thr_s cn68xxp1;
- struct cvmx_mio_uartx_thr_s cnf71xx;
};
union cvmx_mio_uartx_usr {
@@ -5105,24 +3976,6 @@ union cvmx_mio_uartx_usr {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_mio_uartx_usr_s cn30xx;
- struct cvmx_mio_uartx_usr_s cn31xx;
- struct cvmx_mio_uartx_usr_s cn38xx;
- struct cvmx_mio_uartx_usr_s cn38xxp2;
- struct cvmx_mio_uartx_usr_s cn50xx;
- struct cvmx_mio_uartx_usr_s cn52xx;
- struct cvmx_mio_uartx_usr_s cn52xxp1;
- struct cvmx_mio_uartx_usr_s cn56xx;
- struct cvmx_mio_uartx_usr_s cn56xxp1;
- struct cvmx_mio_uartx_usr_s cn58xx;
- struct cvmx_mio_uartx_usr_s cn58xxp1;
- struct cvmx_mio_uartx_usr_s cn61xx;
- struct cvmx_mio_uartx_usr_s cn63xx;
- struct cvmx_mio_uartx_usr_s cn63xxp1;
- struct cvmx_mio_uartx_usr_s cn66xx;
- struct cvmx_mio_uartx_usr_s cn68xx;
- struct cvmx_mio_uartx_usr_s cn68xxp1;
- struct cvmx_mio_uartx_usr_s cnf71xx;
};
union cvmx_mio_uart2_dlh {
@@ -5136,8 +3989,6 @@ union cvmx_mio_uart2_dlh {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_dlh_s cn52xx;
- struct cvmx_mio_uart2_dlh_s cn52xxp1;
};
union cvmx_mio_uart2_dll {
@@ -5151,8 +4002,6 @@ union cvmx_mio_uart2_dll {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_dll_s cn52xx;
- struct cvmx_mio_uart2_dll_s cn52xxp1;
};
union cvmx_mio_uart2_far {
@@ -5166,8 +4015,6 @@ union cvmx_mio_uart2_far {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uart2_far_s cn52xx;
- struct cvmx_mio_uart2_far_s cn52xxp1;
};
union cvmx_mio_uart2_fcr {
@@ -5191,8 +4038,6 @@ union cvmx_mio_uart2_fcr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_fcr_s cn52xx;
- struct cvmx_mio_uart2_fcr_s cn52xxp1;
};
union cvmx_mio_uart2_htx {
@@ -5206,8 +4051,6 @@ union cvmx_mio_uart2_htx {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uart2_htx_s cn52xx;
- struct cvmx_mio_uart2_htx_s cn52xxp1;
};
union cvmx_mio_uart2_ier {
@@ -5231,8 +4074,6 @@ union cvmx_mio_uart2_ier {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_ier_s cn52xx;
- struct cvmx_mio_uart2_ier_s cn52xxp1;
};
union cvmx_mio_uart2_iir {
@@ -5250,8 +4091,6 @@ union cvmx_mio_uart2_iir {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_iir_s cn52xx;
- struct cvmx_mio_uart2_iir_s cn52xxp1;
};
union cvmx_mio_uart2_lcr {
@@ -5277,8 +4116,6 @@ union cvmx_mio_uart2_lcr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_lcr_s cn52xx;
- struct cvmx_mio_uart2_lcr_s cn52xxp1;
};
union cvmx_mio_uart2_lsr {
@@ -5306,8 +4143,6 @@ union cvmx_mio_uart2_lsr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_lsr_s cn52xx;
- struct cvmx_mio_uart2_lsr_s cn52xxp1;
};
union cvmx_mio_uart2_mcr {
@@ -5331,8 +4166,6 @@ union cvmx_mio_uart2_mcr {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_mio_uart2_mcr_s cn52xx;
- struct cvmx_mio_uart2_mcr_s cn52xxp1;
};
union cvmx_mio_uart2_msr {
@@ -5360,8 +4193,6 @@ union cvmx_mio_uart2_msr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_msr_s cn52xx;
- struct cvmx_mio_uart2_msr_s cn52xxp1;
};
union cvmx_mio_uart2_rbr {
@@ -5375,8 +4206,6 @@ union cvmx_mio_uart2_rbr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_rbr_s cn52xx;
- struct cvmx_mio_uart2_rbr_s cn52xxp1;
};
union cvmx_mio_uart2_rfl {
@@ -5390,8 +4219,6 @@ union cvmx_mio_uart2_rfl {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_uart2_rfl_s cn52xx;
- struct cvmx_mio_uart2_rfl_s cn52xxp1;
};
union cvmx_mio_uart2_rfw {
@@ -5409,8 +4236,6 @@ union cvmx_mio_uart2_rfw {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_mio_uart2_rfw_s cn52xx;
- struct cvmx_mio_uart2_rfw_s cn52xxp1;
};
union cvmx_mio_uart2_sbcr {
@@ -5424,8 +4249,6 @@ union cvmx_mio_uart2_sbcr {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uart2_sbcr_s cn52xx;
- struct cvmx_mio_uart2_sbcr_s cn52xxp1;
};
union cvmx_mio_uart2_scr {
@@ -5439,8 +4262,6 @@ union cvmx_mio_uart2_scr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_scr_s cn52xx;
- struct cvmx_mio_uart2_scr_s cn52xxp1;
};
union cvmx_mio_uart2_sfe {
@@ -5454,8 +4275,6 @@ union cvmx_mio_uart2_sfe {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uart2_sfe_s cn52xx;
- struct cvmx_mio_uart2_sfe_s cn52xxp1;
};
union cvmx_mio_uart2_srr {
@@ -5473,8 +4292,6 @@ union cvmx_mio_uart2_srr {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_mio_uart2_srr_s cn52xx;
- struct cvmx_mio_uart2_srr_s cn52xxp1;
};
union cvmx_mio_uart2_srt {
@@ -5488,8 +4305,6 @@ union cvmx_mio_uart2_srt {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_uart2_srt_s cn52xx;
- struct cvmx_mio_uart2_srt_s cn52xxp1;
};
union cvmx_mio_uart2_srts {
@@ -5503,8 +4318,6 @@ union cvmx_mio_uart2_srts {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_mio_uart2_srts_s cn52xx;
- struct cvmx_mio_uart2_srts_s cn52xxp1;
};
union cvmx_mio_uart2_stt {
@@ -5518,8 +4331,6 @@ union cvmx_mio_uart2_stt {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_mio_uart2_stt_s cn52xx;
- struct cvmx_mio_uart2_stt_s cn52xxp1;
};
union cvmx_mio_uart2_tfl {
@@ -5533,8 +4344,6 @@ union cvmx_mio_uart2_tfl {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_mio_uart2_tfl_s cn52xx;
- struct cvmx_mio_uart2_tfl_s cn52xxp1;
};
union cvmx_mio_uart2_tfr {
@@ -5548,8 +4357,6 @@ union cvmx_mio_uart2_tfr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_tfr_s cn52xx;
- struct cvmx_mio_uart2_tfr_s cn52xxp1;
};
union cvmx_mio_uart2_thr {
@@ -5563,8 +4370,6 @@ union cvmx_mio_uart2_thr {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_mio_uart2_thr_s cn52xx;
- struct cvmx_mio_uart2_thr_s cn52xxp1;
};
union cvmx_mio_uart2_usr {
@@ -5586,8 +4391,6 @@ union cvmx_mio_uart2_usr {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_mio_uart2_usr_s cn52xx;
- struct cvmx_mio_uart2_usr_s cn52xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
index 3155e6019dc8..cd60d43e809a 100644
--- a/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mixx-defs.h
@@ -80,15 +80,6 @@ union cvmx_mixx_bist {
uint64_t reserved_4_63:60;
#endif
} cn52xx;
- struct cvmx_mixx_bist_cn52xx cn52xxp1;
- struct cvmx_mixx_bist_cn52xx cn56xx;
- struct cvmx_mixx_bist_cn52xx cn56xxp1;
- struct cvmx_mixx_bist_s cn61xx;
- struct cvmx_mixx_bist_s cn63xx;
- struct cvmx_mixx_bist_s cn63xxp1;
- struct cvmx_mixx_bist_s cn66xx;
- struct cvmx_mixx_bist_s cn68xx;
- struct cvmx_mixx_bist_s cn68xxp1;
};
union cvmx_mixx_ctl {
@@ -137,15 +128,6 @@ union cvmx_mixx_ctl {
uint64_t reserved_8_63:56;
#endif
} cn52xx;
- struct cvmx_mixx_ctl_cn52xx cn52xxp1;
- struct cvmx_mixx_ctl_cn52xx cn56xx;
- struct cvmx_mixx_ctl_cn52xx cn56xxp1;
- struct cvmx_mixx_ctl_s cn61xx;
- struct cvmx_mixx_ctl_s cn63xx;
- struct cvmx_mixx_ctl_s cn63xxp1;
- struct cvmx_mixx_ctl_s cn66xx;
- struct cvmx_mixx_ctl_s cn68xx;
- struct cvmx_mixx_ctl_s cn68xxp1;
};
union cvmx_mixx_intena {
@@ -194,15 +176,6 @@ union cvmx_mixx_intena {
uint64_t reserved_7_63:57;
#endif
} cn52xx;
- struct cvmx_mixx_intena_cn52xx cn52xxp1;
- struct cvmx_mixx_intena_cn52xx cn56xx;
- struct cvmx_mixx_intena_cn52xx cn56xxp1;
- struct cvmx_mixx_intena_s cn61xx;
- struct cvmx_mixx_intena_s cn63xx;
- struct cvmx_mixx_intena_s cn63xxp1;
- struct cvmx_mixx_intena_s cn66xx;
- struct cvmx_mixx_intena_s cn68xx;
- struct cvmx_mixx_intena_s cn68xxp1;
};
union cvmx_mixx_ircnt {
@@ -216,16 +189,6 @@ union cvmx_mixx_ircnt {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_mixx_ircnt_s cn52xx;
- struct cvmx_mixx_ircnt_s cn52xxp1;
- struct cvmx_mixx_ircnt_s cn56xx;
- struct cvmx_mixx_ircnt_s cn56xxp1;
- struct cvmx_mixx_ircnt_s cn61xx;
- struct cvmx_mixx_ircnt_s cn63xx;
- struct cvmx_mixx_ircnt_s cn63xxp1;
- struct cvmx_mixx_ircnt_s cn66xx;
- struct cvmx_mixx_ircnt_s cn68xx;
- struct cvmx_mixx_ircnt_s cn68xxp1;
};
union cvmx_mixx_irhwm {
@@ -241,16 +204,6 @@ union cvmx_mixx_irhwm {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_mixx_irhwm_s cn52xx;
- struct cvmx_mixx_irhwm_s cn52xxp1;
- struct cvmx_mixx_irhwm_s cn56xx;
- struct cvmx_mixx_irhwm_s cn56xxp1;
- struct cvmx_mixx_irhwm_s cn61xx;
- struct cvmx_mixx_irhwm_s cn63xx;
- struct cvmx_mixx_irhwm_s cn63xxp1;
- struct cvmx_mixx_irhwm_s cn66xx;
- struct cvmx_mixx_irhwm_s cn68xx;
- struct cvmx_mixx_irhwm_s cn68xxp1;
};
union cvmx_mixx_iring1 {
@@ -283,15 +236,6 @@ union cvmx_mixx_iring1 {
uint64_t reserved_60_63:4;
#endif
} cn52xx;
- struct cvmx_mixx_iring1_cn52xx cn52xxp1;
- struct cvmx_mixx_iring1_cn52xx cn56xx;
- struct cvmx_mixx_iring1_cn52xx cn56xxp1;
- struct cvmx_mixx_iring1_s cn61xx;
- struct cvmx_mixx_iring1_s cn63xx;
- struct cvmx_mixx_iring1_s cn63xxp1;
- struct cvmx_mixx_iring1_s cn66xx;
- struct cvmx_mixx_iring1_s cn68xx;
- struct cvmx_mixx_iring1_s cn68xxp1;
};
union cvmx_mixx_iring2 {
@@ -309,16 +253,6 @@ union cvmx_mixx_iring2 {
uint64_t reserved_52_63:12;
#endif
} s;
- struct cvmx_mixx_iring2_s cn52xx;
- struct cvmx_mixx_iring2_s cn52xxp1;
- struct cvmx_mixx_iring2_s cn56xx;
- struct cvmx_mixx_iring2_s cn56xxp1;
- struct cvmx_mixx_iring2_s cn61xx;
- struct cvmx_mixx_iring2_s cn63xx;
- struct cvmx_mixx_iring2_s cn63xxp1;
- struct cvmx_mixx_iring2_s cn66xx;
- struct cvmx_mixx_iring2_s cn68xx;
- struct cvmx_mixx_iring2_s cn68xxp1;
};
union cvmx_mixx_isr {
@@ -367,15 +301,6 @@ union cvmx_mixx_isr {
uint64_t reserved_7_63:57;
#endif
} cn52xx;
- struct cvmx_mixx_isr_cn52xx cn52xxp1;
- struct cvmx_mixx_isr_cn52xx cn56xx;
- struct cvmx_mixx_isr_cn52xx cn56xxp1;
- struct cvmx_mixx_isr_s cn61xx;
- struct cvmx_mixx_isr_s cn63xx;
- struct cvmx_mixx_isr_s cn63xxp1;
- struct cvmx_mixx_isr_s cn66xx;
- struct cvmx_mixx_isr_s cn68xx;
- struct cvmx_mixx_isr_s cn68xxp1;
};
union cvmx_mixx_orcnt {
@@ -389,16 +314,6 @@ union cvmx_mixx_orcnt {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_mixx_orcnt_s cn52xx;
- struct cvmx_mixx_orcnt_s cn52xxp1;
- struct cvmx_mixx_orcnt_s cn56xx;
- struct cvmx_mixx_orcnt_s cn56xxp1;
- struct cvmx_mixx_orcnt_s cn61xx;
- struct cvmx_mixx_orcnt_s cn63xx;
- struct cvmx_mixx_orcnt_s cn63xxp1;
- struct cvmx_mixx_orcnt_s cn66xx;
- struct cvmx_mixx_orcnt_s cn68xx;
- struct cvmx_mixx_orcnt_s cn68xxp1;
};
union cvmx_mixx_orhwm {
@@ -412,16 +327,6 @@ union cvmx_mixx_orhwm {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_mixx_orhwm_s cn52xx;
- struct cvmx_mixx_orhwm_s cn52xxp1;
- struct cvmx_mixx_orhwm_s cn56xx;
- struct cvmx_mixx_orhwm_s cn56xxp1;
- struct cvmx_mixx_orhwm_s cn61xx;
- struct cvmx_mixx_orhwm_s cn63xx;
- struct cvmx_mixx_orhwm_s cn63xxp1;
- struct cvmx_mixx_orhwm_s cn66xx;
- struct cvmx_mixx_orhwm_s cn68xx;
- struct cvmx_mixx_orhwm_s cn68xxp1;
};
union cvmx_mixx_oring1 {
@@ -454,15 +359,6 @@ union cvmx_mixx_oring1 {
uint64_t reserved_60_63:4;
#endif
} cn52xx;
- struct cvmx_mixx_oring1_cn52xx cn52xxp1;
- struct cvmx_mixx_oring1_cn52xx cn56xx;
- struct cvmx_mixx_oring1_cn52xx cn56xxp1;
- struct cvmx_mixx_oring1_s cn61xx;
- struct cvmx_mixx_oring1_s cn63xx;
- struct cvmx_mixx_oring1_s cn63xxp1;
- struct cvmx_mixx_oring1_s cn66xx;
- struct cvmx_mixx_oring1_s cn68xx;
- struct cvmx_mixx_oring1_s cn68xxp1;
};
union cvmx_mixx_oring2 {
@@ -480,16 +376,6 @@ union cvmx_mixx_oring2 {
uint64_t reserved_52_63:12;
#endif
} s;
- struct cvmx_mixx_oring2_s cn52xx;
- struct cvmx_mixx_oring2_s cn52xxp1;
- struct cvmx_mixx_oring2_s cn56xx;
- struct cvmx_mixx_oring2_s cn56xxp1;
- struct cvmx_mixx_oring2_s cn61xx;
- struct cvmx_mixx_oring2_s cn63xx;
- struct cvmx_mixx_oring2_s cn63xxp1;
- struct cvmx_mixx_oring2_s cn66xx;
- struct cvmx_mixx_oring2_s cn68xx;
- struct cvmx_mixx_oring2_s cn68xxp1;
};
union cvmx_mixx_remcnt {
@@ -507,16 +393,6 @@ union cvmx_mixx_remcnt {
uint64_t reserved_52_63:12;
#endif
} s;
- struct cvmx_mixx_remcnt_s cn52xx;
- struct cvmx_mixx_remcnt_s cn52xxp1;
- struct cvmx_mixx_remcnt_s cn56xx;
- struct cvmx_mixx_remcnt_s cn56xxp1;
- struct cvmx_mixx_remcnt_s cn61xx;
- struct cvmx_mixx_remcnt_s cn63xx;
- struct cvmx_mixx_remcnt_s cn63xxp1;
- struct cvmx_mixx_remcnt_s cn66xx;
- struct cvmx_mixx_remcnt_s cn68xx;
- struct cvmx_mixx_remcnt_s cn68xxp1;
};
union cvmx_mixx_tsctl {
@@ -538,12 +414,6 @@ union cvmx_mixx_tsctl {
uint64_t reserved_21_63:43;
#endif
} s;
- struct cvmx_mixx_tsctl_s cn61xx;
- struct cvmx_mixx_tsctl_s cn63xx;
- struct cvmx_mixx_tsctl_s cn63xxp1;
- struct cvmx_mixx_tsctl_s cn66xx;
- struct cvmx_mixx_tsctl_s cn68xx;
- struct cvmx_mixx_tsctl_s cn68xxp1;
};
union cvmx_mixx_tstamp {
@@ -555,12 +425,6 @@ union cvmx_mixx_tstamp {
uint64_t tstamp:64;
#endif
} s;
- struct cvmx_mixx_tstamp_s cn61xx;
- struct cvmx_mixx_tstamp_s cn63xx;
- struct cvmx_mixx_tstamp_s cn63xxp1;
- struct cvmx_mixx_tstamp_s cn66xx;
- struct cvmx_mixx_tstamp_s cn68xx;
- struct cvmx_mixx_tstamp_s cn68xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-npei-defs.h b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
index 58114d414356..6a51b1ef8c9b 100644
--- a/arch/mips/include/asm/octeon/cvmx-npei-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-npei-defs.h
@@ -154,10 +154,6 @@ union cvmx_npei_bar1_indexx {
uint32_t reserved_18_31:14;
#endif
} s;
- struct cvmx_npei_bar1_indexx_s cn52xx;
- struct cvmx_npei_bar1_indexx_s cn52xxp1;
- struct cvmx_npei_bar1_indexx_s cn56xx;
- struct cvmx_npei_bar1_indexx_s cn56xxp1;
};
union cvmx_npei_bist_status {
@@ -485,7 +481,6 @@ union cvmx_npei_bist_status {
uint64_t reserved_46_63:18;
#endif
} cn52xxp1;
- struct cvmx_npei_bist_status_cn52xx cn56xx;
struct cvmx_npei_bist_status_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_58_63:6;
@@ -648,8 +643,6 @@ union cvmx_npei_bist_status2 {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_npei_bist_status2_s cn52xx;
- struct cvmx_npei_bist_status2_s cn56xx;
};
union cvmx_npei_ctl_port0 {
@@ -693,10 +686,6 @@ union cvmx_npei_ctl_port0 {
uint64_t reserved_21_63:43;
#endif
} s;
- struct cvmx_npei_ctl_port0_s cn52xx;
- struct cvmx_npei_ctl_port0_s cn52xxp1;
- struct cvmx_npei_ctl_port0_s cn56xx;
- struct cvmx_npei_ctl_port0_s cn56xxp1;
};
union cvmx_npei_ctl_port1 {
@@ -740,10 +729,6 @@ union cvmx_npei_ctl_port1 {
uint64_t reserved_21_63:43;
#endif
} s;
- struct cvmx_npei_ctl_port1_s cn52xx;
- struct cvmx_npei_ctl_port1_s cn52xxp1;
- struct cvmx_npei_ctl_port1_s cn56xx;
- struct cvmx_npei_ctl_port1_s cn56xxp1;
};
union cvmx_npei_ctl_status {
@@ -773,7 +758,6 @@ union cvmx_npei_ctl_status {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npei_ctl_status_s cn52xx;
struct cvmx_npei_ctl_status_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_44_63:20;
@@ -799,7 +783,6 @@ union cvmx_npei_ctl_status {
uint64_t reserved_44_63:20;
#endif
} cn52xxp1;
- struct cvmx_npei_ctl_status_s cn56xx;
struct cvmx_npei_ctl_status_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_15_63:49;
@@ -848,10 +831,6 @@ union cvmx_npei_ctl_status2 {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npei_ctl_status2_s cn52xx;
- struct cvmx_npei_ctl_status2_s cn52xxp1;
- struct cvmx_npei_ctl_status2_s cn56xx;
- struct cvmx_npei_ctl_status2_s cn56xxp1;
};
union cvmx_npei_data_out_cnt {
@@ -871,10 +850,6 @@ union cvmx_npei_data_out_cnt {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npei_data_out_cnt_s cn52xx;
- struct cvmx_npei_data_out_cnt_s cn52xxp1;
- struct cvmx_npei_data_out_cnt_s cn56xx;
- struct cvmx_npei_data_out_cnt_s cn56xxp1;
};
union cvmx_npei_dbg_data {
@@ -919,7 +894,6 @@ union cvmx_npei_dbg_data {
uint64_t reserved_29_63:35;
#endif
} cn52xx;
- struct cvmx_npei_dbg_data_cn52xx cn52xxp1;
struct cvmx_npei_dbg_data_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -941,7 +915,6 @@ union cvmx_npei_dbg_data {
uint64_t reserved_29_63:35;
#endif
} cn56xx;
- struct cvmx_npei_dbg_data_cn56xx cn56xxp1;
};
union cvmx_npei_dbg_select {
@@ -955,10 +928,6 @@ union cvmx_npei_dbg_select {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npei_dbg_select_s cn52xx;
- struct cvmx_npei_dbg_select_s cn52xxp1;
- struct cvmx_npei_dbg_select_s cn56xx;
- struct cvmx_npei_dbg_select_s cn56xxp1;
};
union cvmx_npei_dmax_counts {
@@ -974,10 +943,6 @@ union cvmx_npei_dmax_counts {
uint64_t reserved_39_63:25;
#endif
} s;
- struct cvmx_npei_dmax_counts_s cn52xx;
- struct cvmx_npei_dmax_counts_s cn52xxp1;
- struct cvmx_npei_dmax_counts_s cn56xx;
- struct cvmx_npei_dmax_counts_s cn56xxp1;
};
union cvmx_npei_dmax_dbell {
@@ -991,10 +956,6 @@ union cvmx_npei_dmax_dbell {
uint32_t reserved_16_31:16;
#endif
} s;
- struct cvmx_npei_dmax_dbell_s cn52xx;
- struct cvmx_npei_dmax_dbell_s cn52xxp1;
- struct cvmx_npei_dmax_dbell_s cn56xx;
- struct cvmx_npei_dmax_dbell_s cn56xxp1;
};
union cvmx_npei_dmax_ibuff_saddr {
@@ -1012,7 +973,6 @@ union cvmx_npei_dmax_ibuff_saddr {
uint64_t reserved_37_63:27;
#endif
} s;
- struct cvmx_npei_dmax_ibuff_saddr_s cn52xx;
struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_36_63:28;
@@ -1024,8 +984,6 @@ union cvmx_npei_dmax_ibuff_saddr {
uint64_t reserved_36_63:28;
#endif
} cn52xxp1;
- struct cvmx_npei_dmax_ibuff_saddr_s cn56xx;
- struct cvmx_npei_dmax_ibuff_saddr_cn52xxp1 cn56xxp1;
};
union cvmx_npei_dmax_naddr {
@@ -1039,10 +997,6 @@ union cvmx_npei_dmax_naddr {
uint64_t reserved_36_63:28;
#endif
} s;
- struct cvmx_npei_dmax_naddr_s cn52xx;
- struct cvmx_npei_dmax_naddr_s cn52xxp1;
- struct cvmx_npei_dmax_naddr_s cn56xx;
- struct cvmx_npei_dmax_naddr_s cn56xxp1;
};
union cvmx_npei_dma0_int_level {
@@ -1056,10 +1010,6 @@ union cvmx_npei_dma0_int_level {
uint64_t time:32;
#endif
} s;
- struct cvmx_npei_dma0_int_level_s cn52xx;
- struct cvmx_npei_dma0_int_level_s cn52xxp1;
- struct cvmx_npei_dma0_int_level_s cn56xx;
- struct cvmx_npei_dma0_int_level_s cn56xxp1;
};
union cvmx_npei_dma1_int_level {
@@ -1073,10 +1023,6 @@ union cvmx_npei_dma1_int_level {
uint64_t time:32;
#endif
} s;
- struct cvmx_npei_dma1_int_level_s cn52xx;
- struct cvmx_npei_dma1_int_level_s cn52xxp1;
- struct cvmx_npei_dma1_int_level_s cn56xx;
- struct cvmx_npei_dma1_int_level_s cn56xxp1;
};
union cvmx_npei_dma_cnts {
@@ -1090,10 +1036,6 @@ union cvmx_npei_dma_cnts {
uint64_t dma1:32;
#endif
} s;
- struct cvmx_npei_dma_cnts_s cn52xx;
- struct cvmx_npei_dma_cnts_s cn52xxp1;
- struct cvmx_npei_dma_cnts_s cn56xx;
- struct cvmx_npei_dma_cnts_s cn56xxp1;
};
union cvmx_npei_dma_control {
@@ -1137,7 +1079,6 @@ union cvmx_npei_dma_control {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_npei_dma_control_s cn52xx;
struct cvmx_npei_dma_control_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_38_63:26;
@@ -1173,7 +1114,6 @@ union cvmx_npei_dma_control {
uint64_t reserved_38_63:26;
#endif
} cn52xxp1;
- struct cvmx_npei_dma_control_s cn56xx;
struct cvmx_npei_dma_control_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_39_63:25;
@@ -1250,8 +1190,6 @@ union cvmx_npei_dma_pcie_req_num {
uint64_t dma_arb:1;
#endif
} s;
- struct cvmx_npei_dma_pcie_req_num_s cn52xx;
- struct cvmx_npei_dma_pcie_req_num_s cn56xx;
};
union cvmx_npei_dma_state1 {
@@ -1273,7 +1211,6 @@ union cvmx_npei_dma_state1 {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_npei_dma_state1_s cn52xx;
};
union cvmx_npei_dma_state1_p1 {
@@ -1332,7 +1269,6 @@ union cvmx_npei_dma_state1_p1 {
uint64_t reserved_60_63:4;
#endif
} cn52xxp1;
- struct cvmx_npei_dma_state1_p1_s cn56xxp1;
};
union cvmx_npei_dma_state2 {
@@ -1354,7 +1290,6 @@ union cvmx_npei_dma_state2 {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_npei_dma_state2_s cn52xx;
};
union cvmx_npei_dma_state2_p1 {
@@ -1393,7 +1328,6 @@ union cvmx_npei_dma_state2_p1 {
uint64_t reserved_45_63:19;
#endif
} cn52xxp1;
- struct cvmx_npei_dma_state2_p1_s cn56xxp1;
};
union cvmx_npei_dma_state3_p1 {
@@ -1413,8 +1347,6 @@ union cvmx_npei_dma_state3_p1 {
uint64_t reserved_60_63:4;
#endif
} s;
- struct cvmx_npei_dma_state3_p1_s cn52xxp1;
- struct cvmx_npei_dma_state3_p1_s cn56xxp1;
};
union cvmx_npei_dma_state4_p1 {
@@ -1434,8 +1366,6 @@ union cvmx_npei_dma_state4_p1 {
uint64_t reserved_52_63:12;
#endif
} s;
- struct cvmx_npei_dma_state4_p1_s cn52xxp1;
- struct cvmx_npei_dma_state4_p1_s cn56xxp1;
};
union cvmx_npei_dma_state5_p1 {
@@ -1451,7 +1381,6 @@ union cvmx_npei_dma_state5_p1 {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_npei_dma_state5_p1_s cn56xxp1;
};
union cvmx_npei_int_a_enb {
@@ -1483,7 +1412,6 @@ union cvmx_npei_int_a_enb {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_npei_int_a_enb_s cn52xx;
struct cvmx_npei_int_a_enb_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -1495,7 +1423,6 @@ union cvmx_npei_int_a_enb {
uint64_t reserved_2_63:62;
#endif
} cn52xxp1;
- struct cvmx_npei_int_a_enb_s cn56xx;
};
union cvmx_npei_int_a_enb2 {
@@ -1527,7 +1454,6 @@ union cvmx_npei_int_a_enb2 {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_npei_int_a_enb2_s cn52xx;
struct cvmx_npei_int_a_enb2_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -1539,7 +1465,6 @@ union cvmx_npei_int_a_enb2 {
uint64_t reserved_2_63:62;
#endif
} cn52xxp1;
- struct cvmx_npei_int_a_enb2_s cn56xx;
};
union cvmx_npei_int_a_sum {
@@ -1571,7 +1496,6 @@ union cvmx_npei_int_a_sum {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_npei_int_a_sum_s cn52xx;
struct cvmx_npei_int_a_sum_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -1583,7 +1507,6 @@ union cvmx_npei_int_a_sum {
uint64_t reserved_2_63:62;
#endif
} cn52xxp1;
- struct cvmx_npei_int_a_sum_s cn56xx;
};
union cvmx_npei_int_enb {
@@ -1721,7 +1644,6 @@ union cvmx_npei_int_enb {
uint64_t mio_inta:1;
#endif
} s;
- struct cvmx_npei_int_enb_s cn52xx;
struct cvmx_npei_int_enb_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t mio_inta:1;
@@ -1855,7 +1777,6 @@ union cvmx_npei_int_enb {
uint64_t mio_inta:1;
#endif
} cn52xxp1;
- struct cvmx_npei_int_enb_s cn56xx;
struct cvmx_npei_int_enb_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t mio_inta:1;
@@ -2122,7 +2043,6 @@ union cvmx_npei_int_enb2 {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_npei_int_enb2_s cn52xx;
struct cvmx_npei_int_enb2_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_62_63:2;
@@ -2254,7 +2174,6 @@ union cvmx_npei_int_enb2 {
uint64_t reserved_62_63:2;
#endif
} cn52xxp1;
- struct cvmx_npei_int_enb2_s cn56xx;
struct cvmx_npei_int_enb2_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_61_63:3;
@@ -2399,9 +2318,6 @@ union cvmx_npei_int_info {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_npei_int_info_s cn52xx;
- struct cvmx_npei_int_info_s cn56xx;
- struct cvmx_npei_int_info_s cn56xxp1;
};
union cvmx_npei_int_sum {
@@ -2539,7 +2455,6 @@ union cvmx_npei_int_sum {
uint64_t mio_inta:1;
#endif
} s;
- struct cvmx_npei_int_sum_s cn52xx;
struct cvmx_npei_int_sum_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t mio_inta:1;
@@ -2667,7 +2582,6 @@ union cvmx_npei_int_sum {
uint64_t mio_inta:1;
#endif
} cn52xxp1;
- struct cvmx_npei_int_sum_s cn56xx;
struct cvmx_npei_int_sum_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t mio_inta:1;
@@ -2924,9 +2838,6 @@ union cvmx_npei_int_sum2 {
uint64_t mio_inta:1;
#endif
} s;
- struct cvmx_npei_int_sum2_s cn52xx;
- struct cvmx_npei_int_sum2_s cn52xxp1;
- struct cvmx_npei_int_sum2_s cn56xx;
};
union cvmx_npei_last_win_rdata0 {
@@ -2938,10 +2849,6 @@ union cvmx_npei_last_win_rdata0 {
uint64_t data:64;
#endif
} s;
- struct cvmx_npei_last_win_rdata0_s cn52xx;
- struct cvmx_npei_last_win_rdata0_s cn52xxp1;
- struct cvmx_npei_last_win_rdata0_s cn56xx;
- struct cvmx_npei_last_win_rdata0_s cn56xxp1;
};
union cvmx_npei_last_win_rdata1 {
@@ -2953,10 +2860,6 @@ union cvmx_npei_last_win_rdata1 {
uint64_t data:64;
#endif
} s;
- struct cvmx_npei_last_win_rdata1_s cn52xx;
- struct cvmx_npei_last_win_rdata1_s cn52xxp1;
- struct cvmx_npei_last_win_rdata1_s cn56xx;
- struct cvmx_npei_last_win_rdata1_s cn56xxp1;
};
union cvmx_npei_mem_access_ctl {
@@ -2972,10 +2875,6 @@ union cvmx_npei_mem_access_ctl {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_npei_mem_access_ctl_s cn52xx;
- struct cvmx_npei_mem_access_ctl_s cn52xxp1;
- struct cvmx_npei_mem_access_ctl_s cn56xx;
- struct cvmx_npei_mem_access_ctl_s cn56xxp1;
};
union cvmx_npei_mem_access_subidx {
@@ -3007,10 +2906,6 @@ union cvmx_npei_mem_access_subidx {
uint64_t reserved_42_63:22;
#endif
} s;
- struct cvmx_npei_mem_access_subidx_s cn52xx;
- struct cvmx_npei_mem_access_subidx_s cn52xxp1;
- struct cvmx_npei_mem_access_subidx_s cn56xx;
- struct cvmx_npei_mem_access_subidx_s cn56xxp1;
};
union cvmx_npei_msi_enb0 {
@@ -3022,10 +2917,6 @@ union cvmx_npei_msi_enb0 {
uint64_t enb:64;
#endif
} s;
- struct cvmx_npei_msi_enb0_s cn52xx;
- struct cvmx_npei_msi_enb0_s cn52xxp1;
- struct cvmx_npei_msi_enb0_s cn56xx;
- struct cvmx_npei_msi_enb0_s cn56xxp1;
};
union cvmx_npei_msi_enb1 {
@@ -3037,10 +2928,6 @@ union cvmx_npei_msi_enb1 {
uint64_t enb:64;
#endif
} s;
- struct cvmx_npei_msi_enb1_s cn52xx;
- struct cvmx_npei_msi_enb1_s cn52xxp1;
- struct cvmx_npei_msi_enb1_s cn56xx;
- struct cvmx_npei_msi_enb1_s cn56xxp1;
};
union cvmx_npei_msi_enb2 {
@@ -3052,10 +2939,6 @@ union cvmx_npei_msi_enb2 {
uint64_t enb:64;
#endif
} s;
- struct cvmx_npei_msi_enb2_s cn52xx;
- struct cvmx_npei_msi_enb2_s cn52xxp1;
- struct cvmx_npei_msi_enb2_s cn56xx;
- struct cvmx_npei_msi_enb2_s cn56xxp1;
};
union cvmx_npei_msi_enb3 {
@@ -3067,10 +2950,6 @@ union cvmx_npei_msi_enb3 {
uint64_t enb:64;
#endif
} s;
- struct cvmx_npei_msi_enb3_s cn52xx;
- struct cvmx_npei_msi_enb3_s cn52xxp1;
- struct cvmx_npei_msi_enb3_s cn56xx;
- struct cvmx_npei_msi_enb3_s cn56xxp1;
};
union cvmx_npei_msi_rcv0 {
@@ -3082,10 +2961,6 @@ union cvmx_npei_msi_rcv0 {
uint64_t intr:64;
#endif
} s;
- struct cvmx_npei_msi_rcv0_s cn52xx;
- struct cvmx_npei_msi_rcv0_s cn52xxp1;
- struct cvmx_npei_msi_rcv0_s cn56xx;
- struct cvmx_npei_msi_rcv0_s cn56xxp1;
};
union cvmx_npei_msi_rcv1 {
@@ -3097,10 +2972,6 @@ union cvmx_npei_msi_rcv1 {
uint64_t intr:64;
#endif
} s;
- struct cvmx_npei_msi_rcv1_s cn52xx;
- struct cvmx_npei_msi_rcv1_s cn52xxp1;
- struct cvmx_npei_msi_rcv1_s cn56xx;
- struct cvmx_npei_msi_rcv1_s cn56xxp1;
};
union cvmx_npei_msi_rcv2 {
@@ -3112,10 +2983,6 @@ union cvmx_npei_msi_rcv2 {
uint64_t intr:64;
#endif
} s;
- struct cvmx_npei_msi_rcv2_s cn52xx;
- struct cvmx_npei_msi_rcv2_s cn52xxp1;
- struct cvmx_npei_msi_rcv2_s cn56xx;
- struct cvmx_npei_msi_rcv2_s cn56xxp1;
};
union cvmx_npei_msi_rcv3 {
@@ -3127,10 +2994,6 @@ union cvmx_npei_msi_rcv3 {
uint64_t intr:64;
#endif
} s;
- struct cvmx_npei_msi_rcv3_s cn52xx;
- struct cvmx_npei_msi_rcv3_s cn52xxp1;
- struct cvmx_npei_msi_rcv3_s cn56xx;
- struct cvmx_npei_msi_rcv3_s cn56xxp1;
};
union cvmx_npei_msi_rd_map {
@@ -3146,10 +3009,6 @@ union cvmx_npei_msi_rd_map {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npei_msi_rd_map_s cn52xx;
- struct cvmx_npei_msi_rd_map_s cn52xxp1;
- struct cvmx_npei_msi_rd_map_s cn56xx;
- struct cvmx_npei_msi_rd_map_s cn56xxp1;
};
union cvmx_npei_msi_w1c_enb0 {
@@ -3161,8 +3020,6 @@ union cvmx_npei_msi_w1c_enb0 {
uint64_t clr:64;
#endif
} s;
- struct cvmx_npei_msi_w1c_enb0_s cn52xx;
- struct cvmx_npei_msi_w1c_enb0_s cn56xx;
};
union cvmx_npei_msi_w1c_enb1 {
@@ -3174,8 +3031,6 @@ union cvmx_npei_msi_w1c_enb1 {
uint64_t clr:64;
#endif
} s;
- struct cvmx_npei_msi_w1c_enb1_s cn52xx;
- struct cvmx_npei_msi_w1c_enb1_s cn56xx;
};
union cvmx_npei_msi_w1c_enb2 {
@@ -3187,8 +3042,6 @@ union cvmx_npei_msi_w1c_enb2 {
uint64_t clr:64;
#endif
} s;
- struct cvmx_npei_msi_w1c_enb2_s cn52xx;
- struct cvmx_npei_msi_w1c_enb2_s cn56xx;
};
union cvmx_npei_msi_w1c_enb3 {
@@ -3200,8 +3053,6 @@ union cvmx_npei_msi_w1c_enb3 {
uint64_t clr:64;
#endif
} s;
- struct cvmx_npei_msi_w1c_enb3_s cn52xx;
- struct cvmx_npei_msi_w1c_enb3_s cn56xx;
};
union cvmx_npei_msi_w1s_enb0 {
@@ -3213,8 +3064,6 @@ union cvmx_npei_msi_w1s_enb0 {
uint64_t set:64;
#endif
} s;
- struct cvmx_npei_msi_w1s_enb0_s cn52xx;
- struct cvmx_npei_msi_w1s_enb0_s cn56xx;
};
union cvmx_npei_msi_w1s_enb1 {
@@ -3226,8 +3075,6 @@ union cvmx_npei_msi_w1s_enb1 {
uint64_t set:64;
#endif
} s;
- struct cvmx_npei_msi_w1s_enb1_s cn52xx;
- struct cvmx_npei_msi_w1s_enb1_s cn56xx;
};
union cvmx_npei_msi_w1s_enb2 {
@@ -3239,8 +3086,6 @@ union cvmx_npei_msi_w1s_enb2 {
uint64_t set:64;
#endif
} s;
- struct cvmx_npei_msi_w1s_enb2_s cn52xx;
- struct cvmx_npei_msi_w1s_enb2_s cn56xx;
};
union cvmx_npei_msi_w1s_enb3 {
@@ -3252,8 +3097,6 @@ union cvmx_npei_msi_w1s_enb3 {
uint64_t set:64;
#endif
} s;
- struct cvmx_npei_msi_w1s_enb3_s cn52xx;
- struct cvmx_npei_msi_w1s_enb3_s cn56xx;
};
union cvmx_npei_msi_wr_map {
@@ -3269,10 +3112,6 @@ union cvmx_npei_msi_wr_map {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npei_msi_wr_map_s cn52xx;
- struct cvmx_npei_msi_wr_map_s cn52xxp1;
- struct cvmx_npei_msi_wr_map_s cn56xx;
- struct cvmx_npei_msi_wr_map_s cn56xxp1;
};
union cvmx_npei_pcie_credit_cnt {
@@ -3296,8 +3135,6 @@ union cvmx_npei_pcie_credit_cnt {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_npei_pcie_credit_cnt_s cn52xx;
- struct cvmx_npei_pcie_credit_cnt_s cn56xx;
};
union cvmx_npei_pcie_msi_rcv {
@@ -3311,10 +3148,6 @@ union cvmx_npei_pcie_msi_rcv {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_npei_pcie_msi_rcv_s cn52xx;
- struct cvmx_npei_pcie_msi_rcv_s cn52xxp1;
- struct cvmx_npei_pcie_msi_rcv_s cn56xx;
- struct cvmx_npei_pcie_msi_rcv_s cn56xxp1;
};
union cvmx_npei_pcie_msi_rcv_b1 {
@@ -3330,10 +3163,6 @@ union cvmx_npei_pcie_msi_rcv_b1 {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npei_pcie_msi_rcv_b1_s cn52xx;
- struct cvmx_npei_pcie_msi_rcv_b1_s cn52xxp1;
- struct cvmx_npei_pcie_msi_rcv_b1_s cn56xx;
- struct cvmx_npei_pcie_msi_rcv_b1_s cn56xxp1;
};
union cvmx_npei_pcie_msi_rcv_b2 {
@@ -3349,10 +3178,6 @@ union cvmx_npei_pcie_msi_rcv_b2 {
uint64_t reserved_24_63:40;
#endif
} s;
- struct cvmx_npei_pcie_msi_rcv_b2_s cn52xx;
- struct cvmx_npei_pcie_msi_rcv_b2_s cn52xxp1;
- struct cvmx_npei_pcie_msi_rcv_b2_s cn56xx;
- struct cvmx_npei_pcie_msi_rcv_b2_s cn56xxp1;
};
union cvmx_npei_pcie_msi_rcv_b3 {
@@ -3368,10 +3193,6 @@ union cvmx_npei_pcie_msi_rcv_b3 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pcie_msi_rcv_b3_s cn52xx;
- struct cvmx_npei_pcie_msi_rcv_b3_s cn52xxp1;
- struct cvmx_npei_pcie_msi_rcv_b3_s cn56xx;
- struct cvmx_npei_pcie_msi_rcv_b3_s cn56xxp1;
};
union cvmx_npei_pktx_cnts {
@@ -3387,8 +3208,6 @@ union cvmx_npei_pktx_cnts {
uint64_t reserved_54_63:10;
#endif
} s;
- struct cvmx_npei_pktx_cnts_s cn52xx;
- struct cvmx_npei_pktx_cnts_s cn56xx;
};
union cvmx_npei_pktx_in_bp {
@@ -3402,8 +3221,6 @@ union cvmx_npei_pktx_in_bp {
uint64_t wmark:32;
#endif
} s;
- struct cvmx_npei_pktx_in_bp_s cn52xx;
- struct cvmx_npei_pktx_in_bp_s cn56xx;
};
union cvmx_npei_pktx_instr_baddr {
@@ -3417,8 +3234,6 @@ union cvmx_npei_pktx_instr_baddr {
uint64_t addr:61;
#endif
} s;
- struct cvmx_npei_pktx_instr_baddr_s cn52xx;
- struct cvmx_npei_pktx_instr_baddr_s cn56xx;
};
union cvmx_npei_pktx_instr_baoff_dbell {
@@ -3432,8 +3247,6 @@ union cvmx_npei_pktx_instr_baoff_dbell {
uint64_t aoff:32;
#endif
} s;
- struct cvmx_npei_pktx_instr_baoff_dbell_s cn52xx;
- struct cvmx_npei_pktx_instr_baoff_dbell_s cn56xx;
};
union cvmx_npei_pktx_instr_fifo_rsize {
@@ -3453,8 +3266,6 @@ union cvmx_npei_pktx_instr_fifo_rsize {
uint64_t max:9;
#endif
} s;
- struct cvmx_npei_pktx_instr_fifo_rsize_s cn52xx;
- struct cvmx_npei_pktx_instr_fifo_rsize_s cn56xx;
};
union cvmx_npei_pktx_instr_header {
@@ -3490,8 +3301,6 @@ union cvmx_npei_pktx_instr_header {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npei_pktx_instr_header_s cn52xx;
- struct cvmx_npei_pktx_instr_header_s cn56xx;
};
union cvmx_npei_pktx_slist_baddr {
@@ -3505,8 +3314,6 @@ union cvmx_npei_pktx_slist_baddr {
uint64_t addr:60;
#endif
} s;
- struct cvmx_npei_pktx_slist_baddr_s cn52xx;
- struct cvmx_npei_pktx_slist_baddr_s cn56xx;
};
union cvmx_npei_pktx_slist_baoff_dbell {
@@ -3520,8 +3327,6 @@ union cvmx_npei_pktx_slist_baoff_dbell {
uint64_t aoff:32;
#endif
} s;
- struct cvmx_npei_pktx_slist_baoff_dbell_s cn52xx;
- struct cvmx_npei_pktx_slist_baoff_dbell_s cn56xx;
};
union cvmx_npei_pktx_slist_fifo_rsize {
@@ -3535,8 +3340,6 @@ union cvmx_npei_pktx_slist_fifo_rsize {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pktx_slist_fifo_rsize_s cn52xx;
- struct cvmx_npei_pktx_slist_fifo_rsize_s cn56xx;
};
union cvmx_npei_pkt_cnt_int {
@@ -3550,8 +3353,6 @@ union cvmx_npei_pkt_cnt_int {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_cnt_int_s cn52xx;
- struct cvmx_npei_pkt_cnt_int_s cn56xx;
};
union cvmx_npei_pkt_cnt_int_enb {
@@ -3565,8 +3366,6 @@ union cvmx_npei_pkt_cnt_int_enb {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_cnt_int_enb_s cn52xx;
- struct cvmx_npei_pkt_cnt_int_enb_s cn56xx;
};
union cvmx_npei_pkt_data_out_es {
@@ -3578,8 +3377,6 @@ union cvmx_npei_pkt_data_out_es {
uint64_t es:64;
#endif
} s;
- struct cvmx_npei_pkt_data_out_es_s cn52xx;
- struct cvmx_npei_pkt_data_out_es_s cn56xx;
};
union cvmx_npei_pkt_data_out_ns {
@@ -3593,8 +3390,6 @@ union cvmx_npei_pkt_data_out_ns {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_data_out_ns_s cn52xx;
- struct cvmx_npei_pkt_data_out_ns_s cn56xx;
};
union cvmx_npei_pkt_data_out_ror {
@@ -3608,8 +3403,6 @@ union cvmx_npei_pkt_data_out_ror {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_data_out_ror_s cn52xx;
- struct cvmx_npei_pkt_data_out_ror_s cn56xx;
};
union cvmx_npei_pkt_dpaddr {
@@ -3623,8 +3416,6 @@ union cvmx_npei_pkt_dpaddr {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_dpaddr_s cn52xx;
- struct cvmx_npei_pkt_dpaddr_s cn56xx;
};
union cvmx_npei_pkt_in_bp {
@@ -3638,8 +3429,6 @@ union cvmx_npei_pkt_in_bp {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_in_bp_s cn52xx;
- struct cvmx_npei_pkt_in_bp_s cn56xx;
};
union cvmx_npei_pkt_in_donex_cnts {
@@ -3653,8 +3442,6 @@ union cvmx_npei_pkt_in_donex_cnts {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_in_donex_cnts_s cn52xx;
- struct cvmx_npei_pkt_in_donex_cnts_s cn56xx;
};
union cvmx_npei_pkt_in_instr_counts {
@@ -3668,8 +3455,6 @@ union cvmx_npei_pkt_in_instr_counts {
uint64_t wr_cnt:32;
#endif
} s;
- struct cvmx_npei_pkt_in_instr_counts_s cn52xx;
- struct cvmx_npei_pkt_in_instr_counts_s cn56xx;
};
union cvmx_npei_pkt_in_pcie_port {
@@ -3681,8 +3466,6 @@ union cvmx_npei_pkt_in_pcie_port {
uint64_t pp:64;
#endif
} s;
- struct cvmx_npei_pkt_in_pcie_port_s cn52xx;
- struct cvmx_npei_pkt_in_pcie_port_s cn56xx;
};
union cvmx_npei_pkt_input_control {
@@ -3712,8 +3495,6 @@ union cvmx_npei_pkt_input_control {
uint64_t reserved_23_63:41;
#endif
} s;
- struct cvmx_npei_pkt_input_control_s cn52xx;
- struct cvmx_npei_pkt_input_control_s cn56xx;
};
union cvmx_npei_pkt_instr_enb {
@@ -3727,8 +3508,6 @@ union cvmx_npei_pkt_instr_enb {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_instr_enb_s cn52xx;
- struct cvmx_npei_pkt_instr_enb_s cn56xx;
};
union cvmx_npei_pkt_instr_rd_size {
@@ -3740,8 +3519,6 @@ union cvmx_npei_pkt_instr_rd_size {
uint64_t rdsize:64;
#endif
} s;
- struct cvmx_npei_pkt_instr_rd_size_s cn52xx;
- struct cvmx_npei_pkt_instr_rd_size_s cn56xx;
};
union cvmx_npei_pkt_instr_size {
@@ -3755,8 +3532,6 @@ union cvmx_npei_pkt_instr_size {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_instr_size_s cn52xx;
- struct cvmx_npei_pkt_instr_size_s cn56xx;
};
union cvmx_npei_pkt_int_levels {
@@ -3772,8 +3547,6 @@ union cvmx_npei_pkt_int_levels {
uint64_t reserved_54_63:10;
#endif
} s;
- struct cvmx_npei_pkt_int_levels_s cn52xx;
- struct cvmx_npei_pkt_int_levels_s cn56xx;
};
union cvmx_npei_pkt_iptr {
@@ -3787,8 +3560,6 @@ union cvmx_npei_pkt_iptr {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_iptr_s cn52xx;
- struct cvmx_npei_pkt_iptr_s cn56xx;
};
union cvmx_npei_pkt_out_bmode {
@@ -3802,8 +3573,6 @@ union cvmx_npei_pkt_out_bmode {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_out_bmode_s cn52xx;
- struct cvmx_npei_pkt_out_bmode_s cn56xx;
};
union cvmx_npei_pkt_out_enb {
@@ -3817,8 +3586,6 @@ union cvmx_npei_pkt_out_enb {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_out_enb_s cn52xx;
- struct cvmx_npei_pkt_out_enb_s cn56xx;
};
union cvmx_npei_pkt_output_wmark {
@@ -3832,8 +3599,6 @@ union cvmx_npei_pkt_output_wmark {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_output_wmark_s cn52xx;
- struct cvmx_npei_pkt_output_wmark_s cn56xx;
};
union cvmx_npei_pkt_pcie_port {
@@ -3845,8 +3610,6 @@ union cvmx_npei_pkt_pcie_port {
uint64_t pp:64;
#endif
} s;
- struct cvmx_npei_pkt_pcie_port_s cn52xx;
- struct cvmx_npei_pkt_pcie_port_s cn56xx;
};
union cvmx_npei_pkt_port_in_rst {
@@ -3860,8 +3623,6 @@ union cvmx_npei_pkt_port_in_rst {
uint64_t in_rst:32;
#endif
} s;
- struct cvmx_npei_pkt_port_in_rst_s cn52xx;
- struct cvmx_npei_pkt_port_in_rst_s cn56xx;
};
union cvmx_npei_pkt_slist_es {
@@ -3873,8 +3634,6 @@ union cvmx_npei_pkt_slist_es {
uint64_t es:64;
#endif
} s;
- struct cvmx_npei_pkt_slist_es_s cn52xx;
- struct cvmx_npei_pkt_slist_es_s cn56xx;
};
union cvmx_npei_pkt_slist_id_size {
@@ -3890,8 +3649,6 @@ union cvmx_npei_pkt_slist_id_size {
uint64_t reserved_23_63:41;
#endif
} s;
- struct cvmx_npei_pkt_slist_id_size_s cn52xx;
- struct cvmx_npei_pkt_slist_id_size_s cn56xx;
};
union cvmx_npei_pkt_slist_ns {
@@ -3905,8 +3662,6 @@ union cvmx_npei_pkt_slist_ns {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_slist_ns_s cn52xx;
- struct cvmx_npei_pkt_slist_ns_s cn56xx;
};
union cvmx_npei_pkt_slist_ror {
@@ -3920,8 +3675,6 @@ union cvmx_npei_pkt_slist_ror {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_slist_ror_s cn52xx;
- struct cvmx_npei_pkt_slist_ror_s cn56xx;
};
union cvmx_npei_pkt_time_int {
@@ -3935,8 +3688,6 @@ union cvmx_npei_pkt_time_int {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_time_int_s cn52xx;
- struct cvmx_npei_pkt_time_int_s cn56xx;
};
union cvmx_npei_pkt_time_int_enb {
@@ -3950,8 +3701,6 @@ union cvmx_npei_pkt_time_int_enb {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_pkt_time_int_enb_s cn52xx;
- struct cvmx_npei_pkt_time_int_enb_s cn56xx;
};
union cvmx_npei_rsl_int_blocks {
@@ -4019,10 +3768,6 @@ union cvmx_npei_rsl_int_blocks {
uint64_t reserved_31_63:33;
#endif
} s;
- struct cvmx_npei_rsl_int_blocks_s cn52xx;
- struct cvmx_npei_rsl_int_blocks_s cn52xxp1;
- struct cvmx_npei_rsl_int_blocks_s cn56xx;
- struct cvmx_npei_rsl_int_blocks_s cn56xxp1;
};
union cvmx_npei_scratch_1 {
@@ -4034,10 +3779,6 @@ union cvmx_npei_scratch_1 {
uint64_t data:64;
#endif
} s;
- struct cvmx_npei_scratch_1_s cn52xx;
- struct cvmx_npei_scratch_1_s cn52xxp1;
- struct cvmx_npei_scratch_1_s cn56xx;
- struct cvmx_npei_scratch_1_s cn56xxp1;
};
union cvmx_npei_state1 {
@@ -4055,10 +3796,6 @@ union cvmx_npei_state1 {
uint64_t cpl1:12;
#endif
} s;
- struct cvmx_npei_state1_s cn52xx;
- struct cvmx_npei_state1_s cn52xxp1;
- struct cvmx_npei_state1_s cn56xx;
- struct cvmx_npei_state1_s cn56xxp1;
};
union cvmx_npei_state2 {
@@ -4082,10 +3819,6 @@ union cvmx_npei_state2 {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_npei_state2_s cn52xx;
- struct cvmx_npei_state2_s cn52xxp1;
- struct cvmx_npei_state2_s cn56xx;
- struct cvmx_npei_state2_s cn56xxp1;
};
union cvmx_npei_state3 {
@@ -4105,10 +3838,6 @@ union cvmx_npei_state3 {
uint64_t reserved_56_63:8;
#endif
} s;
- struct cvmx_npei_state3_s cn52xx;
- struct cvmx_npei_state3_s cn52xxp1;
- struct cvmx_npei_state3_s cn56xx;
- struct cvmx_npei_state3_s cn56xxp1;
};
union cvmx_npei_win_rd_addr {
@@ -4126,10 +3855,6 @@ union cvmx_npei_win_rd_addr {
uint64_t reserved_51_63:13;
#endif
} s;
- struct cvmx_npei_win_rd_addr_s cn52xx;
- struct cvmx_npei_win_rd_addr_s cn52xxp1;
- struct cvmx_npei_win_rd_addr_s cn56xx;
- struct cvmx_npei_win_rd_addr_s cn56xxp1;
};
union cvmx_npei_win_rd_data {
@@ -4141,10 +3866,6 @@ union cvmx_npei_win_rd_data {
uint64_t rd_data:64;
#endif
} s;
- struct cvmx_npei_win_rd_data_s cn52xx;
- struct cvmx_npei_win_rd_data_s cn52xxp1;
- struct cvmx_npei_win_rd_data_s cn56xx;
- struct cvmx_npei_win_rd_data_s cn56xxp1;
};
union cvmx_npei_win_wr_addr {
@@ -4162,10 +3883,6 @@ union cvmx_npei_win_wr_addr {
uint64_t reserved_49_63:15;
#endif
} s;
- struct cvmx_npei_win_wr_addr_s cn52xx;
- struct cvmx_npei_win_wr_addr_s cn52xxp1;
- struct cvmx_npei_win_wr_addr_s cn56xx;
- struct cvmx_npei_win_wr_addr_s cn56xxp1;
};
union cvmx_npei_win_wr_data {
@@ -4177,10 +3894,6 @@ union cvmx_npei_win_wr_data {
uint64_t wr_data:64;
#endif
} s;
- struct cvmx_npei_win_wr_data_s cn52xx;
- struct cvmx_npei_win_wr_data_s cn52xxp1;
- struct cvmx_npei_win_wr_data_s cn56xx;
- struct cvmx_npei_win_wr_data_s cn56xxp1;
};
union cvmx_npei_win_wr_mask {
@@ -4194,10 +3907,6 @@ union cvmx_npei_win_wr_mask {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_npei_win_wr_mask_s cn52xx;
- struct cvmx_npei_win_wr_mask_s cn52xxp1;
- struct cvmx_npei_win_wr_mask_s cn56xx;
- struct cvmx_npei_win_wr_mask_s cn56xxp1;
};
union cvmx_npei_window_ctl {
@@ -4211,10 +3920,6 @@ union cvmx_npei_window_ctl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npei_window_ctl_s cn52xx;
- struct cvmx_npei_window_ctl_s cn52xxp1;
- struct cvmx_npei_window_ctl_s cn56xx;
- struct cvmx_npei_window_ctl_s cn56xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-npi-defs.h b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
index 129bb250e534..ba4967fda480 100644
--- a/arch/mips/include/asm/octeon/cvmx-npi-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-npi-defs.h
@@ -160,13 +160,6 @@ union cvmx_npi_base_addr_inputx {
uint64_t baddr:61;
#endif
} s;
- struct cvmx_npi_base_addr_inputx_s cn30xx;
- struct cvmx_npi_base_addr_inputx_s cn31xx;
- struct cvmx_npi_base_addr_inputx_s cn38xx;
- struct cvmx_npi_base_addr_inputx_s cn38xxp2;
- struct cvmx_npi_base_addr_inputx_s cn50xx;
- struct cvmx_npi_base_addr_inputx_s cn58xx;
- struct cvmx_npi_base_addr_inputx_s cn58xxp1;
};
union cvmx_npi_base_addr_outputx {
@@ -180,13 +173,6 @@ union cvmx_npi_base_addr_outputx {
uint64_t baddr:61;
#endif
} s;
- struct cvmx_npi_base_addr_outputx_s cn30xx;
- struct cvmx_npi_base_addr_outputx_s cn31xx;
- struct cvmx_npi_base_addr_outputx_s cn38xx;
- struct cvmx_npi_base_addr_outputx_s cn38xxp2;
- struct cvmx_npi_base_addr_outputx_s cn50xx;
- struct cvmx_npi_base_addr_outputx_s cn58xx;
- struct cvmx_npi_base_addr_outputx_s cn58xxp1;
};
union cvmx_npi_bist_status {
@@ -281,9 +267,6 @@ union cvmx_npi_bist_status {
uint64_t reserved_20_63:44;
#endif
} cn30xx;
- struct cvmx_npi_bist_status_s cn31xx;
- struct cvmx_npi_bist_status_s cn38xx;
- struct cvmx_npi_bist_status_s cn38xxp2;
struct cvmx_npi_bist_status_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -329,8 +312,6 @@ union cvmx_npi_bist_status {
uint64_t reserved_20_63:44;
#endif
} cn50xx;
- struct cvmx_npi_bist_status_s cn58xx;
- struct cvmx_npi_bist_status_s cn58xxp1;
};
union cvmx_npi_buff_size_outputx {
@@ -346,13 +327,6 @@ union cvmx_npi_buff_size_outputx {
uint64_t reserved_23_63:41;
#endif
} s;
- struct cvmx_npi_buff_size_outputx_s cn30xx;
- struct cvmx_npi_buff_size_outputx_s cn31xx;
- struct cvmx_npi_buff_size_outputx_s cn38xx;
- struct cvmx_npi_buff_size_outputx_s cn38xxp2;
- struct cvmx_npi_buff_size_outputx_s cn50xx;
- struct cvmx_npi_buff_size_outputx_s cn58xx;
- struct cvmx_npi_buff_size_outputx_s cn58xxp1;
};
union cvmx_npi_comp_ctl {
@@ -368,9 +342,6 @@ union cvmx_npi_comp_ctl {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_npi_comp_ctl_s cn50xx;
- struct cvmx_npi_comp_ctl_s cn58xx;
- struct cvmx_npi_comp_ctl_s cn58xxp1;
};
union cvmx_npi_ctl_status {
@@ -498,11 +469,6 @@ union cvmx_npi_ctl_status {
uint64_t reserved_63_63:1;
#endif
} cn31xx;
- struct cvmx_npi_ctl_status_s cn38xx;
- struct cvmx_npi_ctl_status_s cn38xxp2;
- struct cvmx_npi_ctl_status_cn31xx cn50xx;
- struct cvmx_npi_ctl_status_s cn58xx;
- struct cvmx_npi_ctl_status_s cn58xxp1;
};
union cvmx_npi_dbg_select {
@@ -516,13 +482,6 @@ union cvmx_npi_dbg_select {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npi_dbg_select_s cn30xx;
- struct cvmx_npi_dbg_select_s cn31xx;
- struct cvmx_npi_dbg_select_s cn38xx;
- struct cvmx_npi_dbg_select_s cn38xxp2;
- struct cvmx_npi_dbg_select_s cn50xx;
- struct cvmx_npi_dbg_select_s cn58xx;
- struct cvmx_npi_dbg_select_s cn58xxp1;
};
union cvmx_npi_dma_control {
@@ -558,13 +517,6 @@ union cvmx_npi_dma_control {
uint64_t reserved_36_63:28;
#endif
} s;
- struct cvmx_npi_dma_control_s cn30xx;
- struct cvmx_npi_dma_control_s cn31xx;
- struct cvmx_npi_dma_control_s cn38xx;
- struct cvmx_npi_dma_control_s cn38xxp2;
- struct cvmx_npi_dma_control_s cn50xx;
- struct cvmx_npi_dma_control_s cn58xx;
- struct cvmx_npi_dma_control_s cn58xxp1;
};
union cvmx_npi_dma_highp_counts {
@@ -580,13 +532,6 @@ union cvmx_npi_dma_highp_counts {
uint64_t reserved_39_63:25;
#endif
} s;
- struct cvmx_npi_dma_highp_counts_s cn30xx;
- struct cvmx_npi_dma_highp_counts_s cn31xx;
- struct cvmx_npi_dma_highp_counts_s cn38xx;
- struct cvmx_npi_dma_highp_counts_s cn38xxp2;
- struct cvmx_npi_dma_highp_counts_s cn50xx;
- struct cvmx_npi_dma_highp_counts_s cn58xx;
- struct cvmx_npi_dma_highp_counts_s cn58xxp1;
};
union cvmx_npi_dma_highp_naddr {
@@ -602,13 +547,6 @@ union cvmx_npi_dma_highp_naddr {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_npi_dma_highp_naddr_s cn30xx;
- struct cvmx_npi_dma_highp_naddr_s cn31xx;
- struct cvmx_npi_dma_highp_naddr_s cn38xx;
- struct cvmx_npi_dma_highp_naddr_s cn38xxp2;
- struct cvmx_npi_dma_highp_naddr_s cn50xx;
- struct cvmx_npi_dma_highp_naddr_s cn58xx;
- struct cvmx_npi_dma_highp_naddr_s cn58xxp1;
};
union cvmx_npi_dma_lowp_counts {
@@ -624,13 +562,6 @@ union cvmx_npi_dma_lowp_counts {
uint64_t reserved_39_63:25;
#endif
} s;
- struct cvmx_npi_dma_lowp_counts_s cn30xx;
- struct cvmx_npi_dma_lowp_counts_s cn31xx;
- struct cvmx_npi_dma_lowp_counts_s cn38xx;
- struct cvmx_npi_dma_lowp_counts_s cn38xxp2;
- struct cvmx_npi_dma_lowp_counts_s cn50xx;
- struct cvmx_npi_dma_lowp_counts_s cn58xx;
- struct cvmx_npi_dma_lowp_counts_s cn58xxp1;
};
union cvmx_npi_dma_lowp_naddr {
@@ -646,13 +577,6 @@ union cvmx_npi_dma_lowp_naddr {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_npi_dma_lowp_naddr_s cn30xx;
- struct cvmx_npi_dma_lowp_naddr_s cn31xx;
- struct cvmx_npi_dma_lowp_naddr_s cn38xx;
- struct cvmx_npi_dma_lowp_naddr_s cn38xxp2;
- struct cvmx_npi_dma_lowp_naddr_s cn50xx;
- struct cvmx_npi_dma_lowp_naddr_s cn58xx;
- struct cvmx_npi_dma_lowp_naddr_s cn58xxp1;
};
union cvmx_npi_highp_dbell {
@@ -666,13 +590,6 @@ union cvmx_npi_highp_dbell {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npi_highp_dbell_s cn30xx;
- struct cvmx_npi_highp_dbell_s cn31xx;
- struct cvmx_npi_highp_dbell_s cn38xx;
- struct cvmx_npi_highp_dbell_s cn38xxp2;
- struct cvmx_npi_highp_dbell_s cn50xx;
- struct cvmx_npi_highp_dbell_s cn58xx;
- struct cvmx_npi_highp_dbell_s cn58xxp1;
};
union cvmx_npi_highp_ibuff_saddr {
@@ -686,13 +603,6 @@ union cvmx_npi_highp_ibuff_saddr {
uint64_t reserved_36_63:28;
#endif
} s;
- struct cvmx_npi_highp_ibuff_saddr_s cn30xx;
- struct cvmx_npi_highp_ibuff_saddr_s cn31xx;
- struct cvmx_npi_highp_ibuff_saddr_s cn38xx;
- struct cvmx_npi_highp_ibuff_saddr_s cn38xxp2;
- struct cvmx_npi_highp_ibuff_saddr_s cn50xx;
- struct cvmx_npi_highp_ibuff_saddr_s cn58xx;
- struct cvmx_npi_highp_ibuff_saddr_s cn58xxp1;
};
union cvmx_npi_input_control {
@@ -745,12 +655,6 @@ union cvmx_npi_input_control {
uint64_t reserved_22_63:42;
#endif
} cn30xx;
- struct cvmx_npi_input_control_cn30xx cn31xx;
- struct cvmx_npi_input_control_s cn38xx;
- struct cvmx_npi_input_control_cn30xx cn38xxp2;
- struct cvmx_npi_input_control_s cn50xx;
- struct cvmx_npi_input_control_s cn58xx;
- struct cvmx_npi_input_control_s cn58xxp1;
};
union cvmx_npi_int_enb {
@@ -1094,7 +998,6 @@ union cvmx_npi_int_enb {
uint64_t reserved_62_63:2;
#endif
} cn31xx;
- struct cvmx_npi_int_enb_s cn38xx;
struct cvmx_npi_int_enb_cn38xxp2 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_42_63:22;
@@ -1186,9 +1089,6 @@ union cvmx_npi_int_enb {
uint64_t reserved_42_63:22;
#endif
} cn38xxp2;
- struct cvmx_npi_int_enb_cn31xx cn50xx;
- struct cvmx_npi_int_enb_s cn58xx;
- struct cvmx_npi_int_enb_s cn58xxp1;
};
union cvmx_npi_int_sum {
@@ -1532,7 +1432,6 @@ union cvmx_npi_int_sum {
uint64_t reserved_62_63:2;
#endif
} cn31xx;
- struct cvmx_npi_int_sum_s cn38xx;
struct cvmx_npi_int_sum_cn38xxp2 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_42_63:22;
@@ -1624,9 +1523,6 @@ union cvmx_npi_int_sum {
uint64_t reserved_42_63:22;
#endif
} cn38xxp2;
- struct cvmx_npi_int_sum_cn31xx cn50xx;
- struct cvmx_npi_int_sum_s cn58xx;
- struct cvmx_npi_int_sum_s cn58xxp1;
};
union cvmx_npi_lowp_dbell {
@@ -1640,13 +1536,6 @@ union cvmx_npi_lowp_dbell {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_npi_lowp_dbell_s cn30xx;
- struct cvmx_npi_lowp_dbell_s cn31xx;
- struct cvmx_npi_lowp_dbell_s cn38xx;
- struct cvmx_npi_lowp_dbell_s cn38xxp2;
- struct cvmx_npi_lowp_dbell_s cn50xx;
- struct cvmx_npi_lowp_dbell_s cn58xx;
- struct cvmx_npi_lowp_dbell_s cn58xxp1;
};
union cvmx_npi_lowp_ibuff_saddr {
@@ -1660,13 +1549,6 @@ union cvmx_npi_lowp_ibuff_saddr {
uint64_t reserved_36_63:28;
#endif
} s;
- struct cvmx_npi_lowp_ibuff_saddr_s cn30xx;
- struct cvmx_npi_lowp_ibuff_saddr_s cn31xx;
- struct cvmx_npi_lowp_ibuff_saddr_s cn38xx;
- struct cvmx_npi_lowp_ibuff_saddr_s cn38xxp2;
- struct cvmx_npi_lowp_ibuff_saddr_s cn50xx;
- struct cvmx_npi_lowp_ibuff_saddr_s cn58xx;
- struct cvmx_npi_lowp_ibuff_saddr_s cn58xxp1;
};
union cvmx_npi_mem_access_subidx {
@@ -1696,7 +1578,6 @@ union cvmx_npi_mem_access_subidx {
uint64_t reserved_38_63:26;
#endif
} s;
- struct cvmx_npi_mem_access_subidx_s cn30xx;
struct cvmx_npi_mem_access_subidx_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_36_63:28;
@@ -1718,11 +1599,6 @@ union cvmx_npi_mem_access_subidx {
uint64_t reserved_36_63:28;
#endif
} cn31xx;
- struct cvmx_npi_mem_access_subidx_s cn38xx;
- struct cvmx_npi_mem_access_subidx_cn31xx cn38xxp2;
- struct cvmx_npi_mem_access_subidx_s cn50xx;
- struct cvmx_npi_mem_access_subidx_s cn58xx;
- struct cvmx_npi_mem_access_subidx_s cn58xxp1;
};
union cvmx_npi_msi_rcv {
@@ -1734,13 +1610,6 @@ union cvmx_npi_msi_rcv {
uint64_t int_vec:64;
#endif
} s;
- struct cvmx_npi_msi_rcv_s cn30xx;
- struct cvmx_npi_msi_rcv_s cn31xx;
- struct cvmx_npi_msi_rcv_s cn38xx;
- struct cvmx_npi_msi_rcv_s cn38xxp2;
- struct cvmx_npi_msi_rcv_s cn50xx;
- struct cvmx_npi_msi_rcv_s cn58xx;
- struct cvmx_npi_msi_rcv_s cn58xxp1;
};
union cvmx_npi_num_desc_outputx {
@@ -1754,13 +1623,6 @@ union cvmx_npi_num_desc_outputx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npi_num_desc_outputx_s cn30xx;
- struct cvmx_npi_num_desc_outputx_s cn31xx;
- struct cvmx_npi_num_desc_outputx_s cn38xx;
- struct cvmx_npi_num_desc_outputx_s cn38xxp2;
- struct cvmx_npi_num_desc_outputx_s cn50xx;
- struct cvmx_npi_num_desc_outputx_s cn58xx;
- struct cvmx_npi_num_desc_outputx_s cn58xxp1;
};
union cvmx_npi_output_control {
@@ -1932,7 +1794,6 @@ union cvmx_npi_output_control {
uint64_t reserved_46_63:18;
#endif
} cn31xx;
- struct cvmx_npi_output_control_s cn38xx;
struct cvmx_npi_output_control_cn38xxp2 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
@@ -2069,8 +1930,6 @@ union cvmx_npi_output_control {
uint64_t reserved_49_63:15;
#endif
} cn50xx;
- struct cvmx_npi_output_control_s cn58xx;
- struct cvmx_npi_output_control_s cn58xxp1;
};
union cvmx_npi_px_dbpair_addr {
@@ -2086,13 +1945,6 @@ union cvmx_npi_px_dbpair_addr {
uint64_t reserved_63_63:1;
#endif
} s;
- struct cvmx_npi_px_dbpair_addr_s cn30xx;
- struct cvmx_npi_px_dbpair_addr_s cn31xx;
- struct cvmx_npi_px_dbpair_addr_s cn38xx;
- struct cvmx_npi_px_dbpair_addr_s cn38xxp2;
- struct cvmx_npi_px_dbpair_addr_s cn50xx;
- struct cvmx_npi_px_dbpair_addr_s cn58xx;
- struct cvmx_npi_px_dbpair_addr_s cn58xxp1;
};
union cvmx_npi_px_instr_addr {
@@ -2106,13 +1958,6 @@ union cvmx_npi_px_instr_addr {
uint64_t state:3;
#endif
} s;
- struct cvmx_npi_px_instr_addr_s cn30xx;
- struct cvmx_npi_px_instr_addr_s cn31xx;
- struct cvmx_npi_px_instr_addr_s cn38xx;
- struct cvmx_npi_px_instr_addr_s cn38xxp2;
- struct cvmx_npi_px_instr_addr_s cn50xx;
- struct cvmx_npi_px_instr_addr_s cn58xx;
- struct cvmx_npi_px_instr_addr_s cn58xxp1;
};
union cvmx_npi_px_instr_cnts {
@@ -2128,13 +1973,6 @@ union cvmx_npi_px_instr_cnts {
uint64_t reserved_38_63:26;
#endif
} s;
- struct cvmx_npi_px_instr_cnts_s cn30xx;
- struct cvmx_npi_px_instr_cnts_s cn31xx;
- struct cvmx_npi_px_instr_cnts_s cn38xx;
- struct cvmx_npi_px_instr_cnts_s cn38xxp2;
- struct cvmx_npi_px_instr_cnts_s cn50xx;
- struct cvmx_npi_px_instr_cnts_s cn58xx;
- struct cvmx_npi_px_instr_cnts_s cn58xxp1;
};
union cvmx_npi_px_pair_cnts {
@@ -2150,13 +1988,6 @@ union cvmx_npi_px_pair_cnts {
uint64_t reserved_37_63:27;
#endif
} s;
- struct cvmx_npi_px_pair_cnts_s cn30xx;
- struct cvmx_npi_px_pair_cnts_s cn31xx;
- struct cvmx_npi_px_pair_cnts_s cn38xx;
- struct cvmx_npi_px_pair_cnts_s cn38xxp2;
- struct cvmx_npi_px_pair_cnts_s cn50xx;
- struct cvmx_npi_px_pair_cnts_s cn58xx;
- struct cvmx_npi_px_pair_cnts_s cn58xxp1;
};
union cvmx_npi_pci_burst_size {
@@ -2172,13 +2003,6 @@ union cvmx_npi_pci_burst_size {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_npi_pci_burst_size_s cn30xx;
- struct cvmx_npi_pci_burst_size_s cn31xx;
- struct cvmx_npi_pci_burst_size_s cn38xx;
- struct cvmx_npi_pci_burst_size_s cn38xxp2;
- struct cvmx_npi_pci_burst_size_s cn50xx;
- struct cvmx_npi_pci_burst_size_s cn58xx;
- struct cvmx_npi_pci_burst_size_s cn58xxp1;
};
union cvmx_npi_pci_int_arb_cfg {
@@ -2215,12 +2039,6 @@ union cvmx_npi_pci_int_arb_cfg {
uint64_t reserved_5_63:59;
#endif
} cn30xx;
- struct cvmx_npi_pci_int_arb_cfg_cn30xx cn31xx;
- struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xx;
- struct cvmx_npi_pci_int_arb_cfg_cn30xx cn38xxp2;
- struct cvmx_npi_pci_int_arb_cfg_s cn50xx;
- struct cvmx_npi_pci_int_arb_cfg_s cn58xx;
- struct cvmx_npi_pci_int_arb_cfg_s cn58xxp1;
};
union cvmx_npi_pci_read_cmd {
@@ -2234,13 +2052,6 @@ union cvmx_npi_pci_read_cmd {
uint64_t reserved_11_63:53;
#endif
} s;
- struct cvmx_npi_pci_read_cmd_s cn30xx;
- struct cvmx_npi_pci_read_cmd_s cn31xx;
- struct cvmx_npi_pci_read_cmd_s cn38xx;
- struct cvmx_npi_pci_read_cmd_s cn38xxp2;
- struct cvmx_npi_pci_read_cmd_s cn50xx;
- struct cvmx_npi_pci_read_cmd_s cn58xx;
- struct cvmx_npi_pci_read_cmd_s cn58xxp1;
};
union cvmx_npi_port32_instr_hdr {
@@ -2276,13 +2087,6 @@ union cvmx_npi_port32_instr_hdr {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npi_port32_instr_hdr_s cn30xx;
- struct cvmx_npi_port32_instr_hdr_s cn31xx;
- struct cvmx_npi_port32_instr_hdr_s cn38xx;
- struct cvmx_npi_port32_instr_hdr_s cn38xxp2;
- struct cvmx_npi_port32_instr_hdr_s cn50xx;
- struct cvmx_npi_port32_instr_hdr_s cn58xx;
- struct cvmx_npi_port32_instr_hdr_s cn58xxp1;
};
union cvmx_npi_port33_instr_hdr {
@@ -2318,12 +2122,6 @@ union cvmx_npi_port33_instr_hdr {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npi_port33_instr_hdr_s cn31xx;
- struct cvmx_npi_port33_instr_hdr_s cn38xx;
- struct cvmx_npi_port33_instr_hdr_s cn38xxp2;
- struct cvmx_npi_port33_instr_hdr_s cn50xx;
- struct cvmx_npi_port33_instr_hdr_s cn58xx;
- struct cvmx_npi_port33_instr_hdr_s cn58xxp1;
};
union cvmx_npi_port34_instr_hdr {
@@ -2359,10 +2157,6 @@ union cvmx_npi_port34_instr_hdr {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npi_port34_instr_hdr_s cn38xx;
- struct cvmx_npi_port34_instr_hdr_s cn38xxp2;
- struct cvmx_npi_port34_instr_hdr_s cn58xx;
- struct cvmx_npi_port34_instr_hdr_s cn58xxp1;
};
union cvmx_npi_port35_instr_hdr {
@@ -2398,10 +2192,6 @@ union cvmx_npi_port35_instr_hdr {
uint64_t reserved_44_63:20;
#endif
} s;
- struct cvmx_npi_port35_instr_hdr_s cn38xx;
- struct cvmx_npi_port35_instr_hdr_s cn38xxp2;
- struct cvmx_npi_port35_instr_hdr_s cn58xx;
- struct cvmx_npi_port35_instr_hdr_s cn58xxp1;
};
union cvmx_npi_port_bp_control {
@@ -2417,13 +2207,6 @@ union cvmx_npi_port_bp_control {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_npi_port_bp_control_s cn30xx;
- struct cvmx_npi_port_bp_control_s cn31xx;
- struct cvmx_npi_port_bp_control_s cn38xx;
- struct cvmx_npi_port_bp_control_s cn38xxp2;
- struct cvmx_npi_port_bp_control_s cn50xx;
- struct cvmx_npi_port_bp_control_s cn58xx;
- struct cvmx_npi_port_bp_control_s cn58xxp1;
};
union cvmx_npi_rsl_int_blocks {
@@ -2566,7 +2349,6 @@ union cvmx_npi_rsl_int_blocks {
uint64_t reserved_32_63:32;
#endif
} cn30xx;
- struct cvmx_npi_rsl_int_blocks_cn30xx cn31xx;
struct cvmx_npi_rsl_int_blocks_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63:32;
@@ -2638,7 +2420,6 @@ union cvmx_npi_rsl_int_blocks {
uint64_t reserved_32_63:32;
#endif
} cn38xx;
- struct cvmx_npi_rsl_int_blocks_cn38xx cn38xxp2;
struct cvmx_npi_rsl_int_blocks_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63:33;
@@ -2702,8 +2483,6 @@ union cvmx_npi_rsl_int_blocks {
uint64_t reserved_31_63:33;
#endif
} cn50xx;
- struct cvmx_npi_rsl_int_blocks_cn38xx cn58xx;
- struct cvmx_npi_rsl_int_blocks_cn38xx cn58xxp1;
};
union cvmx_npi_size_inputx {
@@ -2717,13 +2496,6 @@ union cvmx_npi_size_inputx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npi_size_inputx_s cn30xx;
- struct cvmx_npi_size_inputx_s cn31xx;
- struct cvmx_npi_size_inputx_s cn38xx;
- struct cvmx_npi_size_inputx_s cn38xxp2;
- struct cvmx_npi_size_inputx_s cn50xx;
- struct cvmx_npi_size_inputx_s cn58xx;
- struct cvmx_npi_size_inputx_s cn58xxp1;
};
union cvmx_npi_win_read_to {
@@ -2737,13 +2509,6 @@ union cvmx_npi_win_read_to {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_npi_win_read_to_s cn30xx;
- struct cvmx_npi_win_read_to_s cn31xx;
- struct cvmx_npi_win_read_to_s cn38xx;
- struct cvmx_npi_win_read_to_s cn38xxp2;
- struct cvmx_npi_win_read_to_s cn50xx;
- struct cvmx_npi_win_read_to_s cn58xx;
- struct cvmx_npi_win_read_to_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pci-defs.h b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
index 25d603f18298..be56b693b53b 100644
--- a/arch/mips/include/asm/octeon/cvmx-pci-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pci-defs.h
@@ -131,13 +131,6 @@ union cvmx_pci_bar1_indexx {
uint32_t reserved_18_31:14;
#endif
} s;
- struct cvmx_pci_bar1_indexx_s cn30xx;
- struct cvmx_pci_bar1_indexx_s cn31xx;
- struct cvmx_pci_bar1_indexx_s cn38xx;
- struct cvmx_pci_bar1_indexx_s cn38xxp2;
- struct cvmx_pci_bar1_indexx_s cn50xx;
- struct cvmx_pci_bar1_indexx_s cn58xx;
- struct cvmx_pci_bar1_indexx_s cn58xxp1;
};
union cvmx_pci_bist_reg {
@@ -169,7 +162,6 @@ union cvmx_pci_bist_reg {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_pci_bist_reg_s cn50xx;
};
union cvmx_pci_cfg00 {
@@ -183,13 +175,6 @@ union cvmx_pci_cfg00 {
uint32_t devid:16;
#endif
} s;
- struct cvmx_pci_cfg00_s cn30xx;
- struct cvmx_pci_cfg00_s cn31xx;
- struct cvmx_pci_cfg00_s cn38xx;
- struct cvmx_pci_cfg00_s cn38xxp2;
- struct cvmx_pci_cfg00_s cn50xx;
- struct cvmx_pci_cfg00_s cn58xx;
- struct cvmx_pci_cfg00_s cn58xxp1;
};
union cvmx_pci_cfg01 {
@@ -247,13 +232,6 @@ union cvmx_pci_cfg01 {
uint32_t dpe:1;
#endif
} s;
- struct cvmx_pci_cfg01_s cn30xx;
- struct cvmx_pci_cfg01_s cn31xx;
- struct cvmx_pci_cfg01_s cn38xx;
- struct cvmx_pci_cfg01_s cn38xxp2;
- struct cvmx_pci_cfg01_s cn50xx;
- struct cvmx_pci_cfg01_s cn58xx;
- struct cvmx_pci_cfg01_s cn58xxp1;
};
union cvmx_pci_cfg02 {
@@ -267,13 +245,6 @@ union cvmx_pci_cfg02 {
uint32_t cc:24;
#endif
} s;
- struct cvmx_pci_cfg02_s cn30xx;
- struct cvmx_pci_cfg02_s cn31xx;
- struct cvmx_pci_cfg02_s cn38xx;
- struct cvmx_pci_cfg02_s cn38xxp2;
- struct cvmx_pci_cfg02_s cn50xx;
- struct cvmx_pci_cfg02_s cn58xx;
- struct cvmx_pci_cfg02_s cn58xxp1;
};
union cvmx_pci_cfg03 {
@@ -297,13 +268,6 @@ union cvmx_pci_cfg03 {
uint32_t bcap:1;
#endif
} s;
- struct cvmx_pci_cfg03_s cn30xx;
- struct cvmx_pci_cfg03_s cn31xx;
- struct cvmx_pci_cfg03_s cn38xx;
- struct cvmx_pci_cfg03_s cn38xxp2;
- struct cvmx_pci_cfg03_s cn50xx;
- struct cvmx_pci_cfg03_s cn58xx;
- struct cvmx_pci_cfg03_s cn58xxp1;
};
union cvmx_pci_cfg04 {
@@ -323,13 +287,6 @@ union cvmx_pci_cfg04 {
uint32_t lbase:20;
#endif
} s;
- struct cvmx_pci_cfg04_s cn30xx;
- struct cvmx_pci_cfg04_s cn31xx;
- struct cvmx_pci_cfg04_s cn38xx;
- struct cvmx_pci_cfg04_s cn38xxp2;
- struct cvmx_pci_cfg04_s cn50xx;
- struct cvmx_pci_cfg04_s cn58xx;
- struct cvmx_pci_cfg04_s cn58xxp1;
};
union cvmx_pci_cfg05 {
@@ -341,13 +298,6 @@ union cvmx_pci_cfg05 {
uint32_t hbase:32;
#endif
} s;
- struct cvmx_pci_cfg05_s cn30xx;
- struct cvmx_pci_cfg05_s cn31xx;
- struct cvmx_pci_cfg05_s cn38xx;
- struct cvmx_pci_cfg05_s cn38xxp2;
- struct cvmx_pci_cfg05_s cn50xx;
- struct cvmx_pci_cfg05_s cn58xx;
- struct cvmx_pci_cfg05_s cn58xxp1;
};
union cvmx_pci_cfg06 {
@@ -367,13 +317,6 @@ union cvmx_pci_cfg06 {
uint32_t lbase:5;
#endif
} s;
- struct cvmx_pci_cfg06_s cn30xx;
- struct cvmx_pci_cfg06_s cn31xx;
- struct cvmx_pci_cfg06_s cn38xx;
- struct cvmx_pci_cfg06_s cn38xxp2;
- struct cvmx_pci_cfg06_s cn50xx;
- struct cvmx_pci_cfg06_s cn58xx;
- struct cvmx_pci_cfg06_s cn58xxp1;
};
union cvmx_pci_cfg07 {
@@ -385,13 +328,6 @@ union cvmx_pci_cfg07 {
uint32_t hbase:32;
#endif
} s;
- struct cvmx_pci_cfg07_s cn30xx;
- struct cvmx_pci_cfg07_s cn31xx;
- struct cvmx_pci_cfg07_s cn38xx;
- struct cvmx_pci_cfg07_s cn38xxp2;
- struct cvmx_pci_cfg07_s cn50xx;
- struct cvmx_pci_cfg07_s cn58xx;
- struct cvmx_pci_cfg07_s cn58xxp1;
};
union cvmx_pci_cfg08 {
@@ -409,13 +345,6 @@ union cvmx_pci_cfg08 {
uint32_t lbasez:28;
#endif
} s;
- struct cvmx_pci_cfg08_s cn30xx;
- struct cvmx_pci_cfg08_s cn31xx;
- struct cvmx_pci_cfg08_s cn38xx;
- struct cvmx_pci_cfg08_s cn38xxp2;
- struct cvmx_pci_cfg08_s cn50xx;
- struct cvmx_pci_cfg08_s cn58xx;
- struct cvmx_pci_cfg08_s cn58xxp1;
};
union cvmx_pci_cfg09 {
@@ -429,13 +358,6 @@ union cvmx_pci_cfg09 {
uint32_t hbase:25;
#endif
} s;
- struct cvmx_pci_cfg09_s cn30xx;
- struct cvmx_pci_cfg09_s cn31xx;
- struct cvmx_pci_cfg09_s cn38xx;
- struct cvmx_pci_cfg09_s cn38xxp2;
- struct cvmx_pci_cfg09_s cn50xx;
- struct cvmx_pci_cfg09_s cn58xx;
- struct cvmx_pci_cfg09_s cn58xxp1;
};
union cvmx_pci_cfg10 {
@@ -447,13 +369,6 @@ union cvmx_pci_cfg10 {
uint32_t cisp:32;
#endif
} s;
- struct cvmx_pci_cfg10_s cn30xx;
- struct cvmx_pci_cfg10_s cn31xx;
- struct cvmx_pci_cfg10_s cn38xx;
- struct cvmx_pci_cfg10_s cn38xxp2;
- struct cvmx_pci_cfg10_s cn50xx;
- struct cvmx_pci_cfg10_s cn58xx;
- struct cvmx_pci_cfg10_s cn58xxp1;
};
union cvmx_pci_cfg11 {
@@ -467,13 +382,6 @@ union cvmx_pci_cfg11 {
uint32_t ssid:16;
#endif
} s;
- struct cvmx_pci_cfg11_s cn30xx;
- struct cvmx_pci_cfg11_s cn31xx;
- struct cvmx_pci_cfg11_s cn38xx;
- struct cvmx_pci_cfg11_s cn38xxp2;
- struct cvmx_pci_cfg11_s cn50xx;
- struct cvmx_pci_cfg11_s cn58xx;
- struct cvmx_pci_cfg11_s cn58xxp1;
};
union cvmx_pci_cfg12 {
@@ -491,13 +399,6 @@ union cvmx_pci_cfg12 {
uint32_t erbar:16;
#endif
} s;
- struct cvmx_pci_cfg12_s cn30xx;
- struct cvmx_pci_cfg12_s cn31xx;
- struct cvmx_pci_cfg12_s cn38xx;
- struct cvmx_pci_cfg12_s cn38xxp2;
- struct cvmx_pci_cfg12_s cn50xx;
- struct cvmx_pci_cfg12_s cn58xx;
- struct cvmx_pci_cfg12_s cn58xxp1;
};
union cvmx_pci_cfg13 {
@@ -511,13 +412,6 @@ union cvmx_pci_cfg13 {
uint32_t reserved_8_31:24;
#endif
} s;
- struct cvmx_pci_cfg13_s cn30xx;
- struct cvmx_pci_cfg13_s cn31xx;
- struct cvmx_pci_cfg13_s cn38xx;
- struct cvmx_pci_cfg13_s cn38xxp2;
- struct cvmx_pci_cfg13_s cn50xx;
- struct cvmx_pci_cfg13_s cn58xx;
- struct cvmx_pci_cfg13_s cn58xxp1;
};
union cvmx_pci_cfg15 {
@@ -535,13 +429,6 @@ union cvmx_pci_cfg15 {
uint32_t ml:8;
#endif
} s;
- struct cvmx_pci_cfg15_s cn30xx;
- struct cvmx_pci_cfg15_s cn31xx;
- struct cvmx_pci_cfg15_s cn38xx;
- struct cvmx_pci_cfg15_s cn38xxp2;
- struct cvmx_pci_cfg15_s cn50xx;
- struct cvmx_pci_cfg15_s cn58xx;
- struct cvmx_pci_cfg15_s cn58xxp1;
};
union cvmx_pci_cfg16 {
@@ -583,13 +470,6 @@ union cvmx_pci_cfg16 {
uint32_t trdnpr:1;
#endif
} s;
- struct cvmx_pci_cfg16_s cn30xx;
- struct cvmx_pci_cfg16_s cn31xx;
- struct cvmx_pci_cfg16_s cn38xx;
- struct cvmx_pci_cfg16_s cn38xxp2;
- struct cvmx_pci_cfg16_s cn50xx;
- struct cvmx_pci_cfg16_s cn58xx;
- struct cvmx_pci_cfg16_s cn58xxp1;
};
union cvmx_pci_cfg17 {
@@ -601,13 +481,6 @@ union cvmx_pci_cfg17 {
uint32_t tscme:32;
#endif
} s;
- struct cvmx_pci_cfg17_s cn30xx;
- struct cvmx_pci_cfg17_s cn31xx;
- struct cvmx_pci_cfg17_s cn38xx;
- struct cvmx_pci_cfg17_s cn38xxp2;
- struct cvmx_pci_cfg17_s cn50xx;
- struct cvmx_pci_cfg17_s cn58xx;
- struct cvmx_pci_cfg17_s cn58xxp1;
};
union cvmx_pci_cfg18 {
@@ -619,13 +492,6 @@ union cvmx_pci_cfg18 {
uint32_t tdsrps:32;
#endif
} s;
- struct cvmx_pci_cfg18_s cn30xx;
- struct cvmx_pci_cfg18_s cn31xx;
- struct cvmx_pci_cfg18_s cn38xx;
- struct cvmx_pci_cfg18_s cn38xxp2;
- struct cvmx_pci_cfg18_s cn50xx;
- struct cvmx_pci_cfg18_s cn58xx;
- struct cvmx_pci_cfg18_s cn58xxp1;
};
union cvmx_pci_cfg19 {
@@ -671,13 +537,6 @@ union cvmx_pci_cfg19 {
uint32_t mrbcm:1;
#endif
} s;
- struct cvmx_pci_cfg19_s cn30xx;
- struct cvmx_pci_cfg19_s cn31xx;
- struct cvmx_pci_cfg19_s cn38xx;
- struct cvmx_pci_cfg19_s cn38xxp2;
- struct cvmx_pci_cfg19_s cn50xx;
- struct cvmx_pci_cfg19_s cn58xx;
- struct cvmx_pci_cfg19_s cn58xxp1;
};
union cvmx_pci_cfg20 {
@@ -689,13 +548,6 @@ union cvmx_pci_cfg20 {
uint32_t mdsp:32;
#endif
} s;
- struct cvmx_pci_cfg20_s cn30xx;
- struct cvmx_pci_cfg20_s cn31xx;
- struct cvmx_pci_cfg20_s cn38xx;
- struct cvmx_pci_cfg20_s cn38xxp2;
- struct cvmx_pci_cfg20_s cn50xx;
- struct cvmx_pci_cfg20_s cn58xx;
- struct cvmx_pci_cfg20_s cn58xxp1;
};
union cvmx_pci_cfg21 {
@@ -707,13 +559,6 @@ union cvmx_pci_cfg21 {
uint32_t scmre:32;
#endif
} s;
- struct cvmx_pci_cfg21_s cn30xx;
- struct cvmx_pci_cfg21_s cn31xx;
- struct cvmx_pci_cfg21_s cn38xx;
- struct cvmx_pci_cfg21_s cn38xxp2;
- struct cvmx_pci_cfg21_s cn50xx;
- struct cvmx_pci_cfg21_s cn58xx;
- struct cvmx_pci_cfg21_s cn58xxp1;
};
union cvmx_pci_cfg22 {
@@ -737,13 +582,6 @@ union cvmx_pci_cfg22 {
uint32_t mac:7;
#endif
} s;
- struct cvmx_pci_cfg22_s cn30xx;
- struct cvmx_pci_cfg22_s cn31xx;
- struct cvmx_pci_cfg22_s cn38xx;
- struct cvmx_pci_cfg22_s cn38xxp2;
- struct cvmx_pci_cfg22_s cn50xx;
- struct cvmx_pci_cfg22_s cn58xx;
- struct cvmx_pci_cfg22_s cn58xxp1;
};
union cvmx_pci_cfg56 {
@@ -767,13 +605,6 @@ union cvmx_pci_cfg56 {
uint32_t reserved_23_31:9;
#endif
} s;
- struct cvmx_pci_cfg56_s cn30xx;
- struct cvmx_pci_cfg56_s cn31xx;
- struct cvmx_pci_cfg56_s cn38xx;
- struct cvmx_pci_cfg56_s cn38xxp2;
- struct cvmx_pci_cfg56_s cn50xx;
- struct cvmx_pci_cfg56_s cn58xx;
- struct cvmx_pci_cfg56_s cn58xxp1;
};
union cvmx_pci_cfg57 {
@@ -809,13 +640,6 @@ union cvmx_pci_cfg57 {
uint32_t reserved_30_31:2;
#endif
} s;
- struct cvmx_pci_cfg57_s cn30xx;
- struct cvmx_pci_cfg57_s cn31xx;
- struct cvmx_pci_cfg57_s cn38xx;
- struct cvmx_pci_cfg57_s cn38xxp2;
- struct cvmx_pci_cfg57_s cn50xx;
- struct cvmx_pci_cfg57_s cn58xx;
- struct cvmx_pci_cfg57_s cn58xxp1;
};
union cvmx_pci_cfg58 {
@@ -845,13 +669,6 @@ union cvmx_pci_cfg58 {
uint32_t pmes:5;
#endif
} s;
- struct cvmx_pci_cfg58_s cn30xx;
- struct cvmx_pci_cfg58_s cn31xx;
- struct cvmx_pci_cfg58_s cn38xx;
- struct cvmx_pci_cfg58_s cn38xxp2;
- struct cvmx_pci_cfg58_s cn50xx;
- struct cvmx_pci_cfg58_s cn58xx;
- struct cvmx_pci_cfg58_s cn58xxp1;
};
union cvmx_pci_cfg59 {
@@ -881,13 +698,6 @@ union cvmx_pci_cfg59 {
uint32_t pmdia:8;
#endif
} s;
- struct cvmx_pci_cfg59_s cn30xx;
- struct cvmx_pci_cfg59_s cn31xx;
- struct cvmx_pci_cfg59_s cn38xx;
- struct cvmx_pci_cfg59_s cn38xxp2;
- struct cvmx_pci_cfg59_s cn50xx;
- struct cvmx_pci_cfg59_s cn58xx;
- struct cvmx_pci_cfg59_s cn58xxp1;
};
union cvmx_pci_cfg60 {
@@ -911,13 +721,6 @@ union cvmx_pci_cfg60 {
uint32_t reserved_24_31:8;
#endif
} s;
- struct cvmx_pci_cfg60_s cn30xx;
- struct cvmx_pci_cfg60_s cn31xx;
- struct cvmx_pci_cfg60_s cn38xx;
- struct cvmx_pci_cfg60_s cn38xxp2;
- struct cvmx_pci_cfg60_s cn50xx;
- struct cvmx_pci_cfg60_s cn58xx;
- struct cvmx_pci_cfg60_s cn58xxp1;
};
union cvmx_pci_cfg61 {
@@ -931,13 +734,6 @@ union cvmx_pci_cfg61 {
uint32_t msi31t2:30;
#endif
} s;
- struct cvmx_pci_cfg61_s cn30xx;
- struct cvmx_pci_cfg61_s cn31xx;
- struct cvmx_pci_cfg61_s cn38xx;
- struct cvmx_pci_cfg61_s cn38xxp2;
- struct cvmx_pci_cfg61_s cn50xx;
- struct cvmx_pci_cfg61_s cn58xx;
- struct cvmx_pci_cfg61_s cn58xxp1;
};
union cvmx_pci_cfg62 {
@@ -949,13 +745,6 @@ union cvmx_pci_cfg62 {
uint32_t msi:32;
#endif
} s;
- struct cvmx_pci_cfg62_s cn30xx;
- struct cvmx_pci_cfg62_s cn31xx;
- struct cvmx_pci_cfg62_s cn38xx;
- struct cvmx_pci_cfg62_s cn38xxp2;
- struct cvmx_pci_cfg62_s cn50xx;
- struct cvmx_pci_cfg62_s cn58xx;
- struct cvmx_pci_cfg62_s cn58xxp1;
};
union cvmx_pci_cfg63 {
@@ -969,13 +758,6 @@ union cvmx_pci_cfg63 {
uint32_t reserved_16_31:16;
#endif
} s;
- struct cvmx_pci_cfg63_s cn30xx;
- struct cvmx_pci_cfg63_s cn31xx;
- struct cvmx_pci_cfg63_s cn38xx;
- struct cvmx_pci_cfg63_s cn38xxp2;
- struct cvmx_pci_cfg63_s cn50xx;
- struct cvmx_pci_cfg63_s cn58xx;
- struct cvmx_pci_cfg63_s cn58xxp1;
};
union cvmx_pci_cnt_reg {
@@ -997,9 +779,6 @@ union cvmx_pci_cnt_reg {
uint64_t reserved_38_63:26;
#endif
} s;
- struct cvmx_pci_cnt_reg_s cn50xx;
- struct cvmx_pci_cnt_reg_s cn58xx;
- struct cvmx_pci_cnt_reg_s cn58xxp1;
};
union cvmx_pci_ctl_status_2 {
@@ -1053,7 +832,6 @@ union cvmx_pci_ctl_status_2 {
uint32_t reserved_29_31:3;
#endif
} s;
- struct cvmx_pci_ctl_status_2_s cn30xx;
struct cvmx_pci_ctl_status_2_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t reserved_20_31:12;
@@ -1091,11 +869,6 @@ union cvmx_pci_ctl_status_2 {
uint32_t reserved_20_31:12;
#endif
} cn31xx;
- struct cvmx_pci_ctl_status_2_s cn38xx;
- struct cvmx_pci_ctl_status_2_cn31xx cn38xxp2;
- struct cvmx_pci_ctl_status_2_s cn50xx;
- struct cvmx_pci_ctl_status_2_s cn58xx;
- struct cvmx_pci_ctl_status_2_s cn58xxp1;
};
union cvmx_pci_dbellx {
@@ -1109,13 +882,6 @@ union cvmx_pci_dbellx {
uint32_t reserved_16_31:16;
#endif
} s;
- struct cvmx_pci_dbellx_s cn30xx;
- struct cvmx_pci_dbellx_s cn31xx;
- struct cvmx_pci_dbellx_s cn38xx;
- struct cvmx_pci_dbellx_s cn38xxp2;
- struct cvmx_pci_dbellx_s cn50xx;
- struct cvmx_pci_dbellx_s cn58xx;
- struct cvmx_pci_dbellx_s cn58xxp1;
};
union cvmx_pci_dma_cntx {
@@ -1127,13 +893,6 @@ union cvmx_pci_dma_cntx {
uint32_t dma_cnt:32;
#endif
} s;
- struct cvmx_pci_dma_cntx_s cn30xx;
- struct cvmx_pci_dma_cntx_s cn31xx;
- struct cvmx_pci_dma_cntx_s cn38xx;
- struct cvmx_pci_dma_cntx_s cn38xxp2;
- struct cvmx_pci_dma_cntx_s cn50xx;
- struct cvmx_pci_dma_cntx_s cn58xx;
- struct cvmx_pci_dma_cntx_s cn58xxp1;
};
union cvmx_pci_dma_int_levx {
@@ -1145,13 +904,6 @@ union cvmx_pci_dma_int_levx {
uint32_t pkt_cnt:32;
#endif
} s;
- struct cvmx_pci_dma_int_levx_s cn30xx;
- struct cvmx_pci_dma_int_levx_s cn31xx;
- struct cvmx_pci_dma_int_levx_s cn38xx;
- struct cvmx_pci_dma_int_levx_s cn38xxp2;
- struct cvmx_pci_dma_int_levx_s cn50xx;
- struct cvmx_pci_dma_int_levx_s cn58xx;
- struct cvmx_pci_dma_int_levx_s cn58xxp1;
};
union cvmx_pci_dma_timex {
@@ -1163,13 +915,6 @@ union cvmx_pci_dma_timex {
uint32_t dma_time:32;
#endif
} s;
- struct cvmx_pci_dma_timex_s cn30xx;
- struct cvmx_pci_dma_timex_s cn31xx;
- struct cvmx_pci_dma_timex_s cn38xx;
- struct cvmx_pci_dma_timex_s cn38xxp2;
- struct cvmx_pci_dma_timex_s cn50xx;
- struct cvmx_pci_dma_timex_s cn58xx;
- struct cvmx_pci_dma_timex_s cn58xxp1;
};
union cvmx_pci_instr_countx {
@@ -1181,13 +926,6 @@ union cvmx_pci_instr_countx {
uint32_t icnt:32;
#endif
} s;
- struct cvmx_pci_instr_countx_s cn30xx;
- struct cvmx_pci_instr_countx_s cn31xx;
- struct cvmx_pci_instr_countx_s cn38xx;
- struct cvmx_pci_instr_countx_s cn38xxp2;
- struct cvmx_pci_instr_countx_s cn50xx;
- struct cvmx_pci_instr_countx_s cn58xx;
- struct cvmx_pci_instr_countx_s cn58xxp1;
};
union cvmx_pci_int_enb {
@@ -1405,11 +1143,6 @@ union cvmx_pci_int_enb {
uint64_t reserved_34_63:30;
#endif
} cn31xx;
- struct cvmx_pci_int_enb_s cn38xx;
- struct cvmx_pci_int_enb_s cn38xxp2;
- struct cvmx_pci_int_enb_cn31xx cn50xx;
- struct cvmx_pci_int_enb_s cn58xx;
- struct cvmx_pci_int_enb_s cn58xxp1;
};
union cvmx_pci_int_enb2 {
@@ -1627,11 +1360,6 @@ union cvmx_pci_int_enb2 {
uint64_t reserved_34_63:30;
#endif
} cn31xx;
- struct cvmx_pci_int_enb2_s cn38xx;
- struct cvmx_pci_int_enb2_s cn38xxp2;
- struct cvmx_pci_int_enb2_cn31xx cn50xx;
- struct cvmx_pci_int_enb2_s cn58xx;
- struct cvmx_pci_int_enb2_s cn58xxp1;
};
union cvmx_pci_int_sum {
@@ -1849,11 +1577,6 @@ union cvmx_pci_int_sum {
uint64_t reserved_34_63:30;
#endif
} cn31xx;
- struct cvmx_pci_int_sum_s cn38xx;
- struct cvmx_pci_int_sum_s cn38xxp2;
- struct cvmx_pci_int_sum_cn31xx cn50xx;
- struct cvmx_pci_int_sum_s cn58xx;
- struct cvmx_pci_int_sum_s cn58xxp1;
};
union cvmx_pci_int_sum2 {
@@ -2071,11 +1794,6 @@ union cvmx_pci_int_sum2 {
uint64_t reserved_34_63:30;
#endif
} cn31xx;
- struct cvmx_pci_int_sum2_s cn38xx;
- struct cvmx_pci_int_sum2_s cn38xxp2;
- struct cvmx_pci_int_sum2_cn31xx cn50xx;
- struct cvmx_pci_int_sum2_s cn58xx;
- struct cvmx_pci_int_sum2_s cn58xxp1;
};
union cvmx_pci_msi_rcv {
@@ -2089,13 +1807,6 @@ union cvmx_pci_msi_rcv {
uint32_t reserved_6_31:26;
#endif
} s;
- struct cvmx_pci_msi_rcv_s cn30xx;
- struct cvmx_pci_msi_rcv_s cn31xx;
- struct cvmx_pci_msi_rcv_s cn38xx;
- struct cvmx_pci_msi_rcv_s cn38xxp2;
- struct cvmx_pci_msi_rcv_s cn50xx;
- struct cvmx_pci_msi_rcv_s cn58xx;
- struct cvmx_pci_msi_rcv_s cn58xxp1;
};
union cvmx_pci_pkt_creditsx {
@@ -2109,13 +1820,6 @@ union cvmx_pci_pkt_creditsx {
uint32_t pkt_cnt:16;
#endif
} s;
- struct cvmx_pci_pkt_creditsx_s cn30xx;
- struct cvmx_pci_pkt_creditsx_s cn31xx;
- struct cvmx_pci_pkt_creditsx_s cn38xx;
- struct cvmx_pci_pkt_creditsx_s cn38xxp2;
- struct cvmx_pci_pkt_creditsx_s cn50xx;
- struct cvmx_pci_pkt_creditsx_s cn58xx;
- struct cvmx_pci_pkt_creditsx_s cn58xxp1;
};
union cvmx_pci_pkts_sentx {
@@ -2127,13 +1831,6 @@ union cvmx_pci_pkts_sentx {
uint32_t pkt_cnt:32;
#endif
} s;
- struct cvmx_pci_pkts_sentx_s cn30xx;
- struct cvmx_pci_pkts_sentx_s cn31xx;
- struct cvmx_pci_pkts_sentx_s cn38xx;
- struct cvmx_pci_pkts_sentx_s cn38xxp2;
- struct cvmx_pci_pkts_sentx_s cn50xx;
- struct cvmx_pci_pkts_sentx_s cn58xx;
- struct cvmx_pci_pkts_sentx_s cn58xxp1;
};
union cvmx_pci_pkts_sent_int_levx {
@@ -2145,13 +1842,6 @@ union cvmx_pci_pkts_sent_int_levx {
uint32_t pkt_cnt:32;
#endif
} s;
- struct cvmx_pci_pkts_sent_int_levx_s cn30xx;
- struct cvmx_pci_pkts_sent_int_levx_s cn31xx;
- struct cvmx_pci_pkts_sent_int_levx_s cn38xx;
- struct cvmx_pci_pkts_sent_int_levx_s cn38xxp2;
- struct cvmx_pci_pkts_sent_int_levx_s cn50xx;
- struct cvmx_pci_pkts_sent_int_levx_s cn58xx;
- struct cvmx_pci_pkts_sent_int_levx_s cn58xxp1;
};
union cvmx_pci_pkts_sent_timex {
@@ -2163,13 +1853,6 @@ union cvmx_pci_pkts_sent_timex {
uint32_t pkt_time:32;
#endif
} s;
- struct cvmx_pci_pkts_sent_timex_s cn30xx;
- struct cvmx_pci_pkts_sent_timex_s cn31xx;
- struct cvmx_pci_pkts_sent_timex_s cn38xx;
- struct cvmx_pci_pkts_sent_timex_s cn38xxp2;
- struct cvmx_pci_pkts_sent_timex_s cn50xx;
- struct cvmx_pci_pkts_sent_timex_s cn58xx;
- struct cvmx_pci_pkts_sent_timex_s cn58xxp1;
};
union cvmx_pci_read_cmd_6 {
@@ -2185,13 +1868,6 @@ union cvmx_pci_read_cmd_6 {
uint32_t reserved_9_31:23;
#endif
} s;
- struct cvmx_pci_read_cmd_6_s cn30xx;
- struct cvmx_pci_read_cmd_6_s cn31xx;
- struct cvmx_pci_read_cmd_6_s cn38xx;
- struct cvmx_pci_read_cmd_6_s cn38xxp2;
- struct cvmx_pci_read_cmd_6_s cn50xx;
- struct cvmx_pci_read_cmd_6_s cn58xx;
- struct cvmx_pci_read_cmd_6_s cn58xxp1;
};
union cvmx_pci_read_cmd_c {
@@ -2207,13 +1883,6 @@ union cvmx_pci_read_cmd_c {
uint32_t reserved_9_31:23;
#endif
} s;
- struct cvmx_pci_read_cmd_c_s cn30xx;
- struct cvmx_pci_read_cmd_c_s cn31xx;
- struct cvmx_pci_read_cmd_c_s cn38xx;
- struct cvmx_pci_read_cmd_c_s cn38xxp2;
- struct cvmx_pci_read_cmd_c_s cn50xx;
- struct cvmx_pci_read_cmd_c_s cn58xx;
- struct cvmx_pci_read_cmd_c_s cn58xxp1;
};
union cvmx_pci_read_cmd_e {
@@ -2229,13 +1898,6 @@ union cvmx_pci_read_cmd_e {
uint32_t reserved_9_31:23;
#endif
} s;
- struct cvmx_pci_read_cmd_e_s cn30xx;
- struct cvmx_pci_read_cmd_e_s cn31xx;
- struct cvmx_pci_read_cmd_e_s cn38xx;
- struct cvmx_pci_read_cmd_e_s cn38xxp2;
- struct cvmx_pci_read_cmd_e_s cn50xx;
- struct cvmx_pci_read_cmd_e_s cn58xx;
- struct cvmx_pci_read_cmd_e_s cn58xxp1;
};
union cvmx_pci_read_timeout {
@@ -2251,13 +1913,6 @@ union cvmx_pci_read_timeout {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pci_read_timeout_s cn30xx;
- struct cvmx_pci_read_timeout_s cn31xx;
- struct cvmx_pci_read_timeout_s cn38xx;
- struct cvmx_pci_read_timeout_s cn38xxp2;
- struct cvmx_pci_read_timeout_s cn50xx;
- struct cvmx_pci_read_timeout_s cn58xx;
- struct cvmx_pci_read_timeout_s cn58xxp1;
};
union cvmx_pci_scm_reg {
@@ -2271,13 +1926,6 @@ union cvmx_pci_scm_reg {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pci_scm_reg_s cn30xx;
- struct cvmx_pci_scm_reg_s cn31xx;
- struct cvmx_pci_scm_reg_s cn38xx;
- struct cvmx_pci_scm_reg_s cn38xxp2;
- struct cvmx_pci_scm_reg_s cn50xx;
- struct cvmx_pci_scm_reg_s cn58xx;
- struct cvmx_pci_scm_reg_s cn58xxp1;
};
union cvmx_pci_tsr_reg {
@@ -2291,13 +1939,6 @@ union cvmx_pci_tsr_reg {
uint64_t reserved_36_63:28;
#endif
} s;
- struct cvmx_pci_tsr_reg_s cn30xx;
- struct cvmx_pci_tsr_reg_s cn31xx;
- struct cvmx_pci_tsr_reg_s cn38xx;
- struct cvmx_pci_tsr_reg_s cn38xxp2;
- struct cvmx_pci_tsr_reg_s cn50xx;
- struct cvmx_pci_tsr_reg_s cn58xx;
- struct cvmx_pci_tsr_reg_s cn58xxp1;
};
union cvmx_pci_win_rd_addr {
@@ -2326,7 +1967,6 @@ union cvmx_pci_win_rd_addr {
uint64_t reserved_49_63:15;
#endif
} cn30xx;
- struct cvmx_pci_win_rd_addr_cn30xx cn31xx;
struct cvmx_pci_win_rd_addr_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_49_63:15;
@@ -2340,10 +1980,6 @@ union cvmx_pci_win_rd_addr {
uint64_t reserved_49_63:15;
#endif
} cn38xx;
- struct cvmx_pci_win_rd_addr_cn38xx cn38xxp2;
- struct cvmx_pci_win_rd_addr_cn30xx cn50xx;
- struct cvmx_pci_win_rd_addr_cn38xx cn58xx;
- struct cvmx_pci_win_rd_addr_cn38xx cn58xxp1;
};
union cvmx_pci_win_rd_data {
@@ -2355,13 +1991,6 @@ union cvmx_pci_win_rd_data {
uint64_t rd_data:64;
#endif
} s;
- struct cvmx_pci_win_rd_data_s cn30xx;
- struct cvmx_pci_win_rd_data_s cn31xx;
- struct cvmx_pci_win_rd_data_s cn38xx;
- struct cvmx_pci_win_rd_data_s cn38xxp2;
- struct cvmx_pci_win_rd_data_s cn50xx;
- struct cvmx_pci_win_rd_data_s cn58xx;
- struct cvmx_pci_win_rd_data_s cn58xxp1;
};
union cvmx_pci_win_wr_addr {
@@ -2379,13 +2008,6 @@ union cvmx_pci_win_wr_addr {
uint64_t reserved_49_63:15;
#endif
} s;
- struct cvmx_pci_win_wr_addr_s cn30xx;
- struct cvmx_pci_win_wr_addr_s cn31xx;
- struct cvmx_pci_win_wr_addr_s cn38xx;
- struct cvmx_pci_win_wr_addr_s cn38xxp2;
- struct cvmx_pci_win_wr_addr_s cn50xx;
- struct cvmx_pci_win_wr_addr_s cn58xx;
- struct cvmx_pci_win_wr_addr_s cn58xxp1;
};
union cvmx_pci_win_wr_data {
@@ -2397,13 +2019,6 @@ union cvmx_pci_win_wr_data {
uint64_t wr_data:64;
#endif
} s;
- struct cvmx_pci_win_wr_data_s cn30xx;
- struct cvmx_pci_win_wr_data_s cn31xx;
- struct cvmx_pci_win_wr_data_s cn38xx;
- struct cvmx_pci_win_wr_data_s cn38xxp2;
- struct cvmx_pci_win_wr_data_s cn50xx;
- struct cvmx_pci_win_wr_data_s cn58xx;
- struct cvmx_pci_win_wr_data_s cn58xxp1;
};
union cvmx_pci_win_wr_mask {
@@ -2417,13 +2032,6 @@ union cvmx_pci_win_wr_mask {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pci_win_wr_mask_s cn30xx;
- struct cvmx_pci_win_wr_mask_s cn31xx;
- struct cvmx_pci_win_wr_mask_s cn38xx;
- struct cvmx_pci_win_wr_mask_s cn38xxp2;
- struct cvmx_pci_win_wr_mask_s cn50xx;
- struct cvmx_pci_win_wr_mask_s cn58xx;
- struct cvmx_pci_win_wr_mask_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
index 39da7f9d7b3f..5f013269a89d 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsx-defs.h
@@ -361,17 +361,6 @@ union cvmx_pcsx_anx_adv_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_anx_adv_reg_s cn52xx;
- struct cvmx_pcsx_anx_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_adv_reg_s cn56xx;
- struct cvmx_pcsx_anx_adv_reg_s cn56xxp1;
- struct cvmx_pcsx_anx_adv_reg_s cn61xx;
- struct cvmx_pcsx_anx_adv_reg_s cn63xx;
- struct cvmx_pcsx_anx_adv_reg_s cn63xxp1;
- struct cvmx_pcsx_anx_adv_reg_s cn66xx;
- struct cvmx_pcsx_anx_adv_reg_s cn68xx;
- struct cvmx_pcsx_anx_adv_reg_s cn68xxp1;
- struct cvmx_pcsx_anx_adv_reg_s cnf71xx;
};
union cvmx_pcsx_anx_ext_st_reg {
@@ -393,17 +382,6 @@ union cvmx_pcsx_anx_ext_st_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_anx_ext_st_reg_s cn52xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_ext_st_reg_s cn56xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1;
- struct cvmx_pcsx_anx_ext_st_reg_s cn61xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn63xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn63xxp1;
- struct cvmx_pcsx_anx_ext_st_reg_s cn66xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn68xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn68xxp1;
- struct cvmx_pcsx_anx_ext_st_reg_s cnf71xx;
};
union cvmx_pcsx_anx_lp_abil_reg {
@@ -431,17 +409,6 @@ union cvmx_pcsx_anx_lp_abil_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn61xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn63xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn63xxp1;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn66xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn68xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn68xxp1;
- struct cvmx_pcsx_anx_lp_abil_reg_s cnf71xx;
};
union cvmx_pcsx_anx_results_reg {
@@ -463,17 +430,6 @@ union cvmx_pcsx_anx_results_reg {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_pcsx_anx_results_reg_s cn52xx;
- struct cvmx_pcsx_anx_results_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_results_reg_s cn56xx;
- struct cvmx_pcsx_anx_results_reg_s cn56xxp1;
- struct cvmx_pcsx_anx_results_reg_s cn61xx;
- struct cvmx_pcsx_anx_results_reg_s cn63xx;
- struct cvmx_pcsx_anx_results_reg_s cn63xxp1;
- struct cvmx_pcsx_anx_results_reg_s cn66xx;
- struct cvmx_pcsx_anx_results_reg_s cn68xx;
- struct cvmx_pcsx_anx_results_reg_s cn68xxp1;
- struct cvmx_pcsx_anx_results_reg_s cnf71xx;
};
union cvmx_pcsx_intx_en_reg {
@@ -542,16 +498,6 @@ union cvmx_pcsx_intx_en_reg {
uint64_t reserved_12_63:52;
#endif
} cn52xx;
- struct cvmx_pcsx_intx_en_reg_cn52xx cn52xxp1;
- struct cvmx_pcsx_intx_en_reg_cn52xx cn56xx;
- struct cvmx_pcsx_intx_en_reg_cn52xx cn56xxp1;
- struct cvmx_pcsx_intx_en_reg_s cn61xx;
- struct cvmx_pcsx_intx_en_reg_s cn63xx;
- struct cvmx_pcsx_intx_en_reg_s cn63xxp1;
- struct cvmx_pcsx_intx_en_reg_s cn66xx;
- struct cvmx_pcsx_intx_en_reg_s cn68xx;
- struct cvmx_pcsx_intx_en_reg_s cn68xxp1;
- struct cvmx_pcsx_intx_en_reg_s cnf71xx;
};
union cvmx_pcsx_intx_reg {
@@ -620,16 +566,6 @@ union cvmx_pcsx_intx_reg {
uint64_t reserved_12_63:52;
#endif
} cn52xx;
- struct cvmx_pcsx_intx_reg_cn52xx cn52xxp1;
- struct cvmx_pcsx_intx_reg_cn52xx cn56xx;
- struct cvmx_pcsx_intx_reg_cn52xx cn56xxp1;
- struct cvmx_pcsx_intx_reg_s cn61xx;
- struct cvmx_pcsx_intx_reg_s cn63xx;
- struct cvmx_pcsx_intx_reg_s cn63xxp1;
- struct cvmx_pcsx_intx_reg_s cn66xx;
- struct cvmx_pcsx_intx_reg_s cn68xx;
- struct cvmx_pcsx_intx_reg_s cn68xxp1;
- struct cvmx_pcsx_intx_reg_s cnf71xx;
};
union cvmx_pcsx_linkx_timer_count_reg {
@@ -643,17 +579,6 @@ union cvmx_pcsx_linkx_timer_count_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn61xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn63xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn63xxp1;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn66xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn68xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn68xxp1;
- struct cvmx_pcsx_linkx_timer_count_reg_s cnf71xx;
};
union cvmx_pcsx_log_anlx_reg {
@@ -671,17 +596,6 @@ union cvmx_pcsx_log_anlx_reg {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pcsx_log_anlx_reg_s cn52xx;
- struct cvmx_pcsx_log_anlx_reg_s cn52xxp1;
- struct cvmx_pcsx_log_anlx_reg_s cn56xx;
- struct cvmx_pcsx_log_anlx_reg_s cn56xxp1;
- struct cvmx_pcsx_log_anlx_reg_s cn61xx;
- struct cvmx_pcsx_log_anlx_reg_s cn63xx;
- struct cvmx_pcsx_log_anlx_reg_s cn63xxp1;
- struct cvmx_pcsx_log_anlx_reg_s cn66xx;
- struct cvmx_pcsx_log_anlx_reg_s cn68xx;
- struct cvmx_pcsx_log_anlx_reg_s cn68xxp1;
- struct cvmx_pcsx_log_anlx_reg_s cnf71xx;
};
union cvmx_pcsx_miscx_ctl_reg {
@@ -707,17 +621,6 @@ union cvmx_pcsx_miscx_ctl_reg {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_pcsx_miscx_ctl_reg_s cn52xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1;
- struct cvmx_pcsx_miscx_ctl_reg_s cn56xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1;
- struct cvmx_pcsx_miscx_ctl_reg_s cn61xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn63xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn63xxp1;
- struct cvmx_pcsx_miscx_ctl_reg_s cn66xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn68xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn68xxp1;
- struct cvmx_pcsx_miscx_ctl_reg_s cnf71xx;
};
union cvmx_pcsx_mrx_control_reg {
@@ -753,17 +656,6 @@ union cvmx_pcsx_mrx_control_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_mrx_control_reg_s cn52xx;
- struct cvmx_pcsx_mrx_control_reg_s cn52xxp1;
- struct cvmx_pcsx_mrx_control_reg_s cn56xx;
- struct cvmx_pcsx_mrx_control_reg_s cn56xxp1;
- struct cvmx_pcsx_mrx_control_reg_s cn61xx;
- struct cvmx_pcsx_mrx_control_reg_s cn63xx;
- struct cvmx_pcsx_mrx_control_reg_s cn63xxp1;
- struct cvmx_pcsx_mrx_control_reg_s cn66xx;
- struct cvmx_pcsx_mrx_control_reg_s cn68xx;
- struct cvmx_pcsx_mrx_control_reg_s cn68xxp1;
- struct cvmx_pcsx_mrx_control_reg_s cnf71xx;
};
union cvmx_pcsx_mrx_status_reg {
@@ -807,17 +699,6 @@ union cvmx_pcsx_mrx_status_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_mrx_status_reg_s cn52xx;
- struct cvmx_pcsx_mrx_status_reg_s cn52xxp1;
- struct cvmx_pcsx_mrx_status_reg_s cn56xx;
- struct cvmx_pcsx_mrx_status_reg_s cn56xxp1;
- struct cvmx_pcsx_mrx_status_reg_s cn61xx;
- struct cvmx_pcsx_mrx_status_reg_s cn63xx;
- struct cvmx_pcsx_mrx_status_reg_s cn63xxp1;
- struct cvmx_pcsx_mrx_status_reg_s cn66xx;
- struct cvmx_pcsx_mrx_status_reg_s cn68xx;
- struct cvmx_pcsx_mrx_status_reg_s cn68xxp1;
- struct cvmx_pcsx_mrx_status_reg_s cnf71xx;
};
union cvmx_pcsx_rxx_states_reg {
@@ -841,17 +722,6 @@ union cvmx_pcsx_rxx_states_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_rxx_states_reg_s cn52xx;
- struct cvmx_pcsx_rxx_states_reg_s cn52xxp1;
- struct cvmx_pcsx_rxx_states_reg_s cn56xx;
- struct cvmx_pcsx_rxx_states_reg_s cn56xxp1;
- struct cvmx_pcsx_rxx_states_reg_s cn61xx;
- struct cvmx_pcsx_rxx_states_reg_s cn63xx;
- struct cvmx_pcsx_rxx_states_reg_s cn63xxp1;
- struct cvmx_pcsx_rxx_states_reg_s cn66xx;
- struct cvmx_pcsx_rxx_states_reg_s cn68xx;
- struct cvmx_pcsx_rxx_states_reg_s cn68xxp1;
- struct cvmx_pcsx_rxx_states_reg_s cnf71xx;
};
union cvmx_pcsx_rxx_sync_reg {
@@ -867,17 +737,6 @@ union cvmx_pcsx_rxx_sync_reg {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pcsx_rxx_sync_reg_s cn52xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1;
- struct cvmx_pcsx_rxx_sync_reg_s cn56xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1;
- struct cvmx_pcsx_rxx_sync_reg_s cn61xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn63xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn63xxp1;
- struct cvmx_pcsx_rxx_sync_reg_s cn66xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn68xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn68xxp1;
- struct cvmx_pcsx_rxx_sync_reg_s cnf71xx;
};
union cvmx_pcsx_sgmx_an_adv_reg {
@@ -903,17 +762,6 @@ union cvmx_pcsx_sgmx_an_adv_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn61xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn63xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn63xxp1;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn66xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn68xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn68xxp1;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cnf71xx;
};
union cvmx_pcsx_sgmx_lp_adv_reg {
@@ -937,17 +785,6 @@ union cvmx_pcsx_sgmx_lp_adv_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn61xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn63xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn63xxp1;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn66xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn68xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn68xxp1;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cnf71xx;
};
union cvmx_pcsx_txx_states_reg {
@@ -965,17 +802,6 @@ union cvmx_pcsx_txx_states_reg {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_pcsx_txx_states_reg_s cn52xx;
- struct cvmx_pcsx_txx_states_reg_s cn52xxp1;
- struct cvmx_pcsx_txx_states_reg_s cn56xx;
- struct cvmx_pcsx_txx_states_reg_s cn56xxp1;
- struct cvmx_pcsx_txx_states_reg_s cn61xx;
- struct cvmx_pcsx_txx_states_reg_s cn63xx;
- struct cvmx_pcsx_txx_states_reg_s cn63xxp1;
- struct cvmx_pcsx_txx_states_reg_s cn66xx;
- struct cvmx_pcsx_txx_states_reg_s cn68xx;
- struct cvmx_pcsx_txx_states_reg_s cn68xxp1;
- struct cvmx_pcsx_txx_states_reg_s cnf71xx;
};
union cvmx_pcsx_tx_rxx_polarity_reg {
@@ -995,17 +821,6 @@ union cvmx_pcsx_tx_rxx_polarity_reg {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn61xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn63xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn63xxp1;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn66xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn68xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn68xxp1;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
index 847dd9dca6ea..b353775eeeb6 100644
--- a/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pcsxx-defs.h
@@ -293,16 +293,6 @@ union cvmx_pcsxx_10gbx_status_reg {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
- struct cvmx_pcsxx_10gbx_status_reg_s cn61xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn63xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn63xxp1;
- struct cvmx_pcsxx_10gbx_status_reg_s cn66xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn68xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn68xxp1;
};
union cvmx_pcsxx_bist_status_reg {
@@ -316,16 +306,6 @@ union cvmx_pcsxx_bist_status_reg {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_pcsxx_bist_status_reg_s cn52xx;
- struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_bist_status_reg_s cn56xx;
- struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
- struct cvmx_pcsxx_bist_status_reg_s cn61xx;
- struct cvmx_pcsxx_bist_status_reg_s cn63xx;
- struct cvmx_pcsxx_bist_status_reg_s cn63xxp1;
- struct cvmx_pcsxx_bist_status_reg_s cn66xx;
- struct cvmx_pcsxx_bist_status_reg_s cn68xx;
- struct cvmx_pcsxx_bist_status_reg_s cn68xxp1;
};
union cvmx_pcsxx_bit_lock_status_reg {
@@ -345,16 +325,6 @@ union cvmx_pcsxx_bit_lock_status_reg {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn61xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn63xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn63xxp1;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn66xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn68xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn68xxp1;
};
union cvmx_pcsxx_control1_reg {
@@ -384,16 +354,6 @@ union cvmx_pcsxx_control1_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsxx_control1_reg_s cn52xx;
- struct cvmx_pcsxx_control1_reg_s cn52xxp1;
- struct cvmx_pcsxx_control1_reg_s cn56xx;
- struct cvmx_pcsxx_control1_reg_s cn56xxp1;
- struct cvmx_pcsxx_control1_reg_s cn61xx;
- struct cvmx_pcsxx_control1_reg_s cn63xx;
- struct cvmx_pcsxx_control1_reg_s cn63xxp1;
- struct cvmx_pcsxx_control1_reg_s cn66xx;
- struct cvmx_pcsxx_control1_reg_s cn68xx;
- struct cvmx_pcsxx_control1_reg_s cn68xxp1;
};
union cvmx_pcsxx_control2_reg {
@@ -407,16 +367,6 @@ union cvmx_pcsxx_control2_reg {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pcsxx_control2_reg_s cn52xx;
- struct cvmx_pcsxx_control2_reg_s cn52xxp1;
- struct cvmx_pcsxx_control2_reg_s cn56xx;
- struct cvmx_pcsxx_control2_reg_s cn56xxp1;
- struct cvmx_pcsxx_control2_reg_s cn61xx;
- struct cvmx_pcsxx_control2_reg_s cn63xx;
- struct cvmx_pcsxx_control2_reg_s cn63xxp1;
- struct cvmx_pcsxx_control2_reg_s cn66xx;
- struct cvmx_pcsxx_control2_reg_s cn68xx;
- struct cvmx_pcsxx_control2_reg_s cn68xxp1;
};
union cvmx_pcsxx_int_en_reg {
@@ -461,15 +411,6 @@ union cvmx_pcsxx_int_en_reg {
uint64_t reserved_6_63:58;
#endif
} cn52xx;
- struct cvmx_pcsxx_int_en_reg_cn52xx cn52xxp1;
- struct cvmx_pcsxx_int_en_reg_cn52xx cn56xx;
- struct cvmx_pcsxx_int_en_reg_cn52xx cn56xxp1;
- struct cvmx_pcsxx_int_en_reg_s cn61xx;
- struct cvmx_pcsxx_int_en_reg_s cn63xx;
- struct cvmx_pcsxx_int_en_reg_s cn63xxp1;
- struct cvmx_pcsxx_int_en_reg_s cn66xx;
- struct cvmx_pcsxx_int_en_reg_s cn68xx;
- struct cvmx_pcsxx_int_en_reg_s cn68xxp1;
};
union cvmx_pcsxx_int_reg {
@@ -514,15 +455,6 @@ union cvmx_pcsxx_int_reg {
uint64_t reserved_6_63:58;
#endif
} cn52xx;
- struct cvmx_pcsxx_int_reg_cn52xx cn52xxp1;
- struct cvmx_pcsxx_int_reg_cn52xx cn56xx;
- struct cvmx_pcsxx_int_reg_cn52xx cn56xxp1;
- struct cvmx_pcsxx_int_reg_s cn61xx;
- struct cvmx_pcsxx_int_reg_s cn63xx;
- struct cvmx_pcsxx_int_reg_s cn63xxp1;
- struct cvmx_pcsxx_int_reg_s cn66xx;
- struct cvmx_pcsxx_int_reg_s cn68xx;
- struct cvmx_pcsxx_int_reg_s cn68xxp1;
};
union cvmx_pcsxx_log_anl_reg {
@@ -544,16 +476,6 @@ union cvmx_pcsxx_log_anl_reg {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_pcsxx_log_anl_reg_s cn52xx;
- struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
- struct cvmx_pcsxx_log_anl_reg_s cn56xx;
- struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
- struct cvmx_pcsxx_log_anl_reg_s cn61xx;
- struct cvmx_pcsxx_log_anl_reg_s cn63xx;
- struct cvmx_pcsxx_log_anl_reg_s cn63xxp1;
- struct cvmx_pcsxx_log_anl_reg_s cn66xx;
- struct cvmx_pcsxx_log_anl_reg_s cn68xx;
- struct cvmx_pcsxx_log_anl_reg_s cn68xxp1;
};
union cvmx_pcsxx_misc_ctl_reg {
@@ -573,16 +495,6 @@ union cvmx_pcsxx_misc_ctl_reg {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
- struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
- struct cvmx_pcsxx_misc_ctl_reg_s cn61xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn63xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn63xxp1;
- struct cvmx_pcsxx_misc_ctl_reg_s cn66xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn68xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn68xxp1;
};
union cvmx_pcsxx_rx_sync_states_reg {
@@ -602,16 +514,6 @@ union cvmx_pcsxx_rx_sync_states_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn61xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn63xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn63xxp1;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn66xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn68xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn68xxp1;
};
union cvmx_pcsxx_spd_abil_reg {
@@ -627,16 +529,6 @@ union cvmx_pcsxx_spd_abil_reg {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
- struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
- struct cvmx_pcsxx_spd_abil_reg_s cn61xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn63xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn63xxp1;
- struct cvmx_pcsxx_spd_abil_reg_s cn66xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn68xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn68xxp1;
};
union cvmx_pcsxx_status1_reg {
@@ -658,16 +550,6 @@ union cvmx_pcsxx_status1_reg {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pcsxx_status1_reg_s cn52xx;
- struct cvmx_pcsxx_status1_reg_s cn52xxp1;
- struct cvmx_pcsxx_status1_reg_s cn56xx;
- struct cvmx_pcsxx_status1_reg_s cn56xxp1;
- struct cvmx_pcsxx_status1_reg_s cn61xx;
- struct cvmx_pcsxx_status1_reg_s cn63xx;
- struct cvmx_pcsxx_status1_reg_s cn63xxp1;
- struct cvmx_pcsxx_status1_reg_s cn66xx;
- struct cvmx_pcsxx_status1_reg_s cn68xx;
- struct cvmx_pcsxx_status1_reg_s cn68xxp1;
};
union cvmx_pcsxx_status2_reg {
@@ -695,16 +577,6 @@ union cvmx_pcsxx_status2_reg {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pcsxx_status2_reg_s cn52xx;
- struct cvmx_pcsxx_status2_reg_s cn52xxp1;
- struct cvmx_pcsxx_status2_reg_s cn56xx;
- struct cvmx_pcsxx_status2_reg_s cn56xxp1;
- struct cvmx_pcsxx_status2_reg_s cn61xx;
- struct cvmx_pcsxx_status2_reg_s cn63xx;
- struct cvmx_pcsxx_status2_reg_s cn63xxp1;
- struct cvmx_pcsxx_status2_reg_s cn66xx;
- struct cvmx_pcsxx_status2_reg_s cn68xx;
- struct cvmx_pcsxx_status2_reg_s cn68xxp1;
};
union cvmx_pcsxx_tx_rx_polarity_reg {
@@ -724,7 +596,6 @@ union cvmx_pcsxx_tx_rx_polarity_reg {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63:62;
@@ -736,14 +607,6 @@ union cvmx_pcsxx_tx_rx_polarity_reg {
uint64_t reserved_2_63:62;
#endif
} cn52xxp1;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn61xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn63xxp1;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn66xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn68xxp1;
};
union cvmx_pcsxx_tx_rx_states_reg {
@@ -773,7 +636,6 @@ union cvmx_pcsxx_tx_rx_states_reg {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63:51;
@@ -797,14 +659,6 @@ union cvmx_pcsxx_tx_rx_states_reg {
uint64_t reserved_13_63:51;
#endif
} cn52xxp1;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
- struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn61xx;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn63xx;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn63xxp1;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn66xx;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn68xx;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn68xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pemx-defs.h b/arch/mips/include/asm/octeon/cvmx-pemx-defs.h
index 50a916f892fa..d2d6dba938e9 100644
--- a/arch/mips/include/asm/octeon/cvmx-pemx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pemx-defs.h
@@ -68,13 +68,6 @@ union cvmx_pemx_bar1_indexx {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_pemx_bar1_indexx_s cn61xx;
- struct cvmx_pemx_bar1_indexx_s cn63xx;
- struct cvmx_pemx_bar1_indexx_s cn63xxp1;
- struct cvmx_pemx_bar1_indexx_s cn66xx;
- struct cvmx_pemx_bar1_indexx_s cn68xx;
- struct cvmx_pemx_bar1_indexx_s cn68xxp1;
- struct cvmx_pemx_bar1_indexx_s cnf71xx;
};
union cvmx_pemx_bar2_mask {
@@ -90,11 +83,6 @@ union cvmx_pemx_bar2_mask {
uint64_t reserved_38_63:26;
#endif
} s;
- struct cvmx_pemx_bar2_mask_s cn61xx;
- struct cvmx_pemx_bar2_mask_s cn66xx;
- struct cvmx_pemx_bar2_mask_s cn68xx;
- struct cvmx_pemx_bar2_mask_s cn68xxp1;
- struct cvmx_pemx_bar2_mask_s cnf71xx;
};
union cvmx_pemx_bar_ctl {
@@ -114,13 +102,6 @@ union cvmx_pemx_bar_ctl {
uint64_t reserved_7_63:57;
#endif
} s;
- struct cvmx_pemx_bar_ctl_s cn61xx;
- struct cvmx_pemx_bar_ctl_s cn63xx;
- struct cvmx_pemx_bar_ctl_s cn63xxp1;
- struct cvmx_pemx_bar_ctl_s cn66xx;
- struct cvmx_pemx_bar_ctl_s cn68xx;
- struct cvmx_pemx_bar_ctl_s cn68xxp1;
- struct cvmx_pemx_bar_ctl_s cnf71xx;
};
union cvmx_pemx_bist_status {
@@ -148,13 +129,6 @@ union cvmx_pemx_bist_status {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pemx_bist_status_s cn61xx;
- struct cvmx_pemx_bist_status_s cn63xx;
- struct cvmx_pemx_bist_status_s cn63xxp1;
- struct cvmx_pemx_bist_status_s cn66xx;
- struct cvmx_pemx_bist_status_s cn68xx;
- struct cvmx_pemx_bist_status_s cn68xxp1;
- struct cvmx_pemx_bist_status_s cnf71xx;
};
union cvmx_pemx_bist_status2 {
@@ -186,13 +160,6 @@ union cvmx_pemx_bist_status2 {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_pemx_bist_status2_s cn61xx;
- struct cvmx_pemx_bist_status2_s cn63xx;
- struct cvmx_pemx_bist_status2_s cn63xxp1;
- struct cvmx_pemx_bist_status2_s cn66xx;
- struct cvmx_pemx_bist_status2_s cn68xx;
- struct cvmx_pemx_bist_status2_s cn68xxp1;
- struct cvmx_pemx_bist_status2_s cnf71xx;
};
union cvmx_pemx_cfg_rd {
@@ -206,13 +173,6 @@ union cvmx_pemx_cfg_rd {
uint64_t data:32;
#endif
} s;
- struct cvmx_pemx_cfg_rd_s cn61xx;
- struct cvmx_pemx_cfg_rd_s cn63xx;
- struct cvmx_pemx_cfg_rd_s cn63xxp1;
- struct cvmx_pemx_cfg_rd_s cn66xx;
- struct cvmx_pemx_cfg_rd_s cn68xx;
- struct cvmx_pemx_cfg_rd_s cn68xxp1;
- struct cvmx_pemx_cfg_rd_s cnf71xx;
};
union cvmx_pemx_cfg_wr {
@@ -226,13 +186,6 @@ union cvmx_pemx_cfg_wr {
uint64_t data:32;
#endif
} s;
- struct cvmx_pemx_cfg_wr_s cn61xx;
- struct cvmx_pemx_cfg_wr_s cn63xx;
- struct cvmx_pemx_cfg_wr_s cn63xxp1;
- struct cvmx_pemx_cfg_wr_s cn66xx;
- struct cvmx_pemx_cfg_wr_s cn68xx;
- struct cvmx_pemx_cfg_wr_s cn68xxp1;
- struct cvmx_pemx_cfg_wr_s cnf71xx;
};
union cvmx_pemx_cpl_lut_valid {
@@ -246,13 +199,6 @@ union cvmx_pemx_cpl_lut_valid {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pemx_cpl_lut_valid_s cn61xx;
- struct cvmx_pemx_cpl_lut_valid_s cn63xx;
- struct cvmx_pemx_cpl_lut_valid_s cn63xxp1;
- struct cvmx_pemx_cpl_lut_valid_s cn66xx;
- struct cvmx_pemx_cpl_lut_valid_s cn68xx;
- struct cvmx_pemx_cpl_lut_valid_s cn68xxp1;
- struct cvmx_pemx_cpl_lut_valid_s cnf71xx;
};
union cvmx_pemx_ctl_status {
@@ -298,13 +244,6 @@ union cvmx_pemx_ctl_status {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pemx_ctl_status_s cn61xx;
- struct cvmx_pemx_ctl_status_s cn63xx;
- struct cvmx_pemx_ctl_status_s cn63xxp1;
- struct cvmx_pemx_ctl_status_s cn66xx;
- struct cvmx_pemx_ctl_status_s cn68xx;
- struct cvmx_pemx_ctl_status_s cn68xxp1;
- struct cvmx_pemx_ctl_status_s cnf71xx;
};
union cvmx_pemx_dbg_info {
@@ -378,13 +317,6 @@ union cvmx_pemx_dbg_info {
uint64_t reserved_31_63:33;
#endif
} s;
- struct cvmx_pemx_dbg_info_s cn61xx;
- struct cvmx_pemx_dbg_info_s cn63xx;
- struct cvmx_pemx_dbg_info_s cn63xxp1;
- struct cvmx_pemx_dbg_info_s cn66xx;
- struct cvmx_pemx_dbg_info_s cn68xx;
- struct cvmx_pemx_dbg_info_s cn68xxp1;
- struct cvmx_pemx_dbg_info_s cnf71xx;
};
union cvmx_pemx_dbg_info_en {
@@ -458,13 +390,6 @@ union cvmx_pemx_dbg_info_en {
uint64_t reserved_31_63:33;
#endif
} s;
- struct cvmx_pemx_dbg_info_en_s cn61xx;
- struct cvmx_pemx_dbg_info_en_s cn63xx;
- struct cvmx_pemx_dbg_info_en_s cn63xxp1;
- struct cvmx_pemx_dbg_info_en_s cn66xx;
- struct cvmx_pemx_dbg_info_en_s cn68xx;
- struct cvmx_pemx_dbg_info_en_s cn68xxp1;
- struct cvmx_pemx_dbg_info_en_s cnf71xx;
};
union cvmx_pemx_diag_status {
@@ -484,13 +409,6 @@ union cvmx_pemx_diag_status {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pemx_diag_status_s cn61xx;
- struct cvmx_pemx_diag_status_s cn63xx;
- struct cvmx_pemx_diag_status_s cn63xxp1;
- struct cvmx_pemx_diag_status_s cn66xx;
- struct cvmx_pemx_diag_status_s cn68xx;
- struct cvmx_pemx_diag_status_s cn68xxp1;
- struct cvmx_pemx_diag_status_s cnf71xx;
};
union cvmx_pemx_inb_read_credits {
@@ -504,10 +422,6 @@ union cvmx_pemx_inb_read_credits {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_pemx_inb_read_credits_s cn61xx;
- struct cvmx_pemx_inb_read_credits_s cn66xx;
- struct cvmx_pemx_inb_read_credits_s cn68xx;
- struct cvmx_pemx_inb_read_credits_s cnf71xx;
};
union cvmx_pemx_int_enb {
@@ -547,13 +461,6 @@ union cvmx_pemx_int_enb {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_pemx_int_enb_s cn61xx;
- struct cvmx_pemx_int_enb_s cn63xx;
- struct cvmx_pemx_int_enb_s cn63xxp1;
- struct cvmx_pemx_int_enb_s cn66xx;
- struct cvmx_pemx_int_enb_s cn68xx;
- struct cvmx_pemx_int_enb_s cn68xxp1;
- struct cvmx_pemx_int_enb_s cnf71xx;
};
union cvmx_pemx_int_enb_int {
@@ -593,13 +500,6 @@ union cvmx_pemx_int_enb_int {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_pemx_int_enb_int_s cn61xx;
- struct cvmx_pemx_int_enb_int_s cn63xx;
- struct cvmx_pemx_int_enb_int_s cn63xxp1;
- struct cvmx_pemx_int_enb_int_s cn66xx;
- struct cvmx_pemx_int_enb_int_s cn68xx;
- struct cvmx_pemx_int_enb_int_s cn68xxp1;
- struct cvmx_pemx_int_enb_int_s cnf71xx;
};
union cvmx_pemx_int_sum {
@@ -639,13 +539,6 @@ union cvmx_pemx_int_sum {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_pemx_int_sum_s cn61xx;
- struct cvmx_pemx_int_sum_s cn63xx;
- struct cvmx_pemx_int_sum_s cn63xxp1;
- struct cvmx_pemx_int_sum_s cn66xx;
- struct cvmx_pemx_int_sum_s cn68xx;
- struct cvmx_pemx_int_sum_s cn68xxp1;
- struct cvmx_pemx_int_sum_s cnf71xx;
};
union cvmx_pemx_p2n_bar0_start {
@@ -659,13 +552,6 @@ union cvmx_pemx_p2n_bar0_start {
uint64_t addr:50;
#endif
} s;
- struct cvmx_pemx_p2n_bar0_start_s cn61xx;
- struct cvmx_pemx_p2n_bar0_start_s cn63xx;
- struct cvmx_pemx_p2n_bar0_start_s cn63xxp1;
- struct cvmx_pemx_p2n_bar0_start_s cn66xx;
- struct cvmx_pemx_p2n_bar0_start_s cn68xx;
- struct cvmx_pemx_p2n_bar0_start_s cn68xxp1;
- struct cvmx_pemx_p2n_bar0_start_s cnf71xx;
};
union cvmx_pemx_p2n_bar1_start {
@@ -679,13 +565,6 @@ union cvmx_pemx_p2n_bar1_start {
uint64_t addr:38;
#endif
} s;
- struct cvmx_pemx_p2n_bar1_start_s cn61xx;
- struct cvmx_pemx_p2n_bar1_start_s cn63xx;
- struct cvmx_pemx_p2n_bar1_start_s cn63xxp1;
- struct cvmx_pemx_p2n_bar1_start_s cn66xx;
- struct cvmx_pemx_p2n_bar1_start_s cn68xx;
- struct cvmx_pemx_p2n_bar1_start_s cn68xxp1;
- struct cvmx_pemx_p2n_bar1_start_s cnf71xx;
};
union cvmx_pemx_p2n_bar2_start {
@@ -699,13 +578,6 @@ union cvmx_pemx_p2n_bar2_start {
uint64_t addr:23;
#endif
} s;
- struct cvmx_pemx_p2n_bar2_start_s cn61xx;
- struct cvmx_pemx_p2n_bar2_start_s cn63xx;
- struct cvmx_pemx_p2n_bar2_start_s cn63xxp1;
- struct cvmx_pemx_p2n_bar2_start_s cn66xx;
- struct cvmx_pemx_p2n_bar2_start_s cn68xx;
- struct cvmx_pemx_p2n_bar2_start_s cn68xxp1;
- struct cvmx_pemx_p2n_bar2_start_s cnf71xx;
};
union cvmx_pemx_p2p_barx_end {
@@ -719,11 +591,6 @@ union cvmx_pemx_p2p_barx_end {
uint64_t addr:52;
#endif
} s;
- struct cvmx_pemx_p2p_barx_end_s cn63xx;
- struct cvmx_pemx_p2p_barx_end_s cn63xxp1;
- struct cvmx_pemx_p2p_barx_end_s cn66xx;
- struct cvmx_pemx_p2p_barx_end_s cn68xx;
- struct cvmx_pemx_p2p_barx_end_s cn68xxp1;
};
union cvmx_pemx_p2p_barx_start {
@@ -737,11 +604,6 @@ union cvmx_pemx_p2p_barx_start {
uint64_t addr:52;
#endif
} s;
- struct cvmx_pemx_p2p_barx_start_s cn63xx;
- struct cvmx_pemx_p2p_barx_start_s cn63xxp1;
- struct cvmx_pemx_p2p_barx_start_s cn66xx;
- struct cvmx_pemx_p2p_barx_start_s cn68xx;
- struct cvmx_pemx_p2p_barx_start_s cn68xxp1;
};
union cvmx_pemx_tlp_credits {
@@ -784,12 +646,6 @@ union cvmx_pemx_tlp_credits {
uint64_t reserved_56_63:8;
#endif
} cn61xx;
- struct cvmx_pemx_tlp_credits_s cn63xx;
- struct cvmx_pemx_tlp_credits_s cn63xxp1;
- struct cvmx_pemx_tlp_credits_s cn66xx;
- struct cvmx_pemx_tlp_credits_s cn68xx;
- struct cvmx_pemx_tlp_credits_s cn68xxp1;
- struct cvmx_pemx_tlp_credits_cn61xx cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
index 59b3dc565442..66561082529e 100644
--- a/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pescx-defs.h
@@ -80,7 +80,6 @@ union cvmx_pescx_bist_status {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_pescx_bist_status_s cn52xx;
struct cvmx_pescx_bist_status_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -112,8 +111,6 @@ union cvmx_pescx_bist_status {
uint64_t reserved_12_63:52;
#endif
} cn52xxp1;
- struct cvmx_pescx_bist_status_s cn56xx;
- struct cvmx_pescx_bist_status_cn52xxp1 cn56xxp1;
};
union cvmx_pescx_bist_status2 {
@@ -153,10 +150,6 @@ union cvmx_pescx_bist_status2 {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_pescx_bist_status2_s cn52xx;
- struct cvmx_pescx_bist_status2_s cn52xxp1;
- struct cvmx_pescx_bist_status2_s cn56xx;
- struct cvmx_pescx_bist_status2_s cn56xxp1;
};
union cvmx_pescx_cfg_rd {
@@ -170,10 +163,6 @@ union cvmx_pescx_cfg_rd {
uint64_t data:32;
#endif
} s;
- struct cvmx_pescx_cfg_rd_s cn52xx;
- struct cvmx_pescx_cfg_rd_s cn52xxp1;
- struct cvmx_pescx_cfg_rd_s cn56xx;
- struct cvmx_pescx_cfg_rd_s cn56xxp1;
};
union cvmx_pescx_cfg_wr {
@@ -187,10 +176,6 @@ union cvmx_pescx_cfg_wr {
uint64_t data:32;
#endif
} s;
- struct cvmx_pescx_cfg_wr_s cn52xx;
- struct cvmx_pescx_cfg_wr_s cn52xxp1;
- struct cvmx_pescx_cfg_wr_s cn56xx;
- struct cvmx_pescx_cfg_wr_s cn56xxp1;
};
union cvmx_pescx_cpl_lut_valid {
@@ -204,10 +189,6 @@ union cvmx_pescx_cpl_lut_valid {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pescx_cpl_lut_valid_s cn52xx;
- struct cvmx_pescx_cpl_lut_valid_s cn52xxp1;
- struct cvmx_pescx_cpl_lut_valid_s cn56xx;
- struct cvmx_pescx_cpl_lut_valid_s cn56xxp1;
};
union cvmx_pescx_ctl_status {
@@ -249,8 +230,6 @@ union cvmx_pescx_ctl_status {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_pescx_ctl_status_s cn52xx;
- struct cvmx_pescx_ctl_status_s cn52xxp1;
struct cvmx_pescx_ctl_status_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -288,7 +267,6 @@ union cvmx_pescx_ctl_status {
uint64_t reserved_28_63:36;
#endif
} cn56xx;
- struct cvmx_pescx_ctl_status_cn56xx cn56xxp1;
};
union cvmx_pescx_ctl_status2 {
@@ -304,7 +282,6 @@ union cvmx_pescx_ctl_status2 {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pescx_ctl_status2_s cn52xx;
struct cvmx_pescx_ctl_status2_cn52xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63:63;
@@ -314,8 +291,6 @@ union cvmx_pescx_ctl_status2 {
uint64_t reserved_1_63:63;
#endif
} cn52xxp1;
- struct cvmx_pescx_ctl_status2_s cn56xx;
- struct cvmx_pescx_ctl_status2_cn52xxp1 cn56xxp1;
};
union cvmx_pescx_dbg_info {
@@ -389,10 +364,6 @@ union cvmx_pescx_dbg_info {
uint64_t reserved_31_63:33;
#endif
} s;
- struct cvmx_pescx_dbg_info_s cn52xx;
- struct cvmx_pescx_dbg_info_s cn52xxp1;
- struct cvmx_pescx_dbg_info_s cn56xx;
- struct cvmx_pescx_dbg_info_s cn56xxp1;
};
union cvmx_pescx_dbg_info_en {
@@ -466,10 +437,6 @@ union cvmx_pescx_dbg_info_en {
uint64_t reserved_31_63:33;
#endif
} s;
- struct cvmx_pescx_dbg_info_en_s cn52xx;
- struct cvmx_pescx_dbg_info_en_s cn52xxp1;
- struct cvmx_pescx_dbg_info_en_s cn56xx;
- struct cvmx_pescx_dbg_info_en_s cn56xxp1;
};
union cvmx_pescx_diag_status {
@@ -489,10 +456,6 @@ union cvmx_pescx_diag_status {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pescx_diag_status_s cn52xx;
- struct cvmx_pescx_diag_status_s cn52xxp1;
- struct cvmx_pescx_diag_status_s cn56xx;
- struct cvmx_pescx_diag_status_s cn56xxp1;
};
union cvmx_pescx_p2n_bar0_start {
@@ -506,10 +469,6 @@ union cvmx_pescx_p2n_bar0_start {
uint64_t addr:50;
#endif
} s;
- struct cvmx_pescx_p2n_bar0_start_s cn52xx;
- struct cvmx_pescx_p2n_bar0_start_s cn52xxp1;
- struct cvmx_pescx_p2n_bar0_start_s cn56xx;
- struct cvmx_pescx_p2n_bar0_start_s cn56xxp1;
};
union cvmx_pescx_p2n_bar1_start {
@@ -523,10 +482,6 @@ union cvmx_pescx_p2n_bar1_start {
uint64_t addr:38;
#endif
} s;
- struct cvmx_pescx_p2n_bar1_start_s cn52xx;
- struct cvmx_pescx_p2n_bar1_start_s cn52xxp1;
- struct cvmx_pescx_p2n_bar1_start_s cn56xx;
- struct cvmx_pescx_p2n_bar1_start_s cn56xxp1;
};
union cvmx_pescx_p2n_bar2_start {
@@ -540,10 +495,6 @@ union cvmx_pescx_p2n_bar2_start {
uint64_t addr:25;
#endif
} s;
- struct cvmx_pescx_p2n_bar2_start_s cn52xx;
- struct cvmx_pescx_p2n_bar2_start_s cn52xxp1;
- struct cvmx_pescx_p2n_bar2_start_s cn56xx;
- struct cvmx_pescx_p2n_bar2_start_s cn56xxp1;
};
union cvmx_pescx_p2p_barx_end {
@@ -557,10 +508,6 @@ union cvmx_pescx_p2p_barx_end {
uint64_t addr:52;
#endif
} s;
- struct cvmx_pescx_p2p_barx_end_s cn52xx;
- struct cvmx_pescx_p2p_barx_end_s cn52xxp1;
- struct cvmx_pescx_p2p_barx_end_s cn56xx;
- struct cvmx_pescx_p2p_barx_end_s cn56xxp1;
};
union cvmx_pescx_p2p_barx_start {
@@ -574,10 +521,6 @@ union cvmx_pescx_p2p_barx_start {
uint64_t addr:52;
#endif
} s;
- struct cvmx_pescx_p2p_barx_start_s cn52xx;
- struct cvmx_pescx_p2p_barx_start_s cn52xxp1;
- struct cvmx_pescx_p2p_barx_start_s cn56xx;
- struct cvmx_pescx_p2p_barx_start_s cn56xxp1;
};
union cvmx_pescx_tlp_credits {
@@ -631,8 +574,6 @@ union cvmx_pescx_tlp_credits {
uint64_t reserved_38_63:26;
#endif
} cn52xxp1;
- struct cvmx_pescx_tlp_credits_cn52xx cn56xx;
- struct cvmx_pescx_tlp_credits_cn52xxp1 cn56xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pip-defs.h b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
index e975c7d2e485..e42f411bd2de 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
@@ -160,10 +160,6 @@ union cvmx_pip_alt_skip_cfgx {
uint64_t reserved_57_63:7;
#endif
} s;
- struct cvmx_pip_alt_skip_cfgx_s cn61xx;
- struct cvmx_pip_alt_skip_cfgx_s cn66xx;
- struct cvmx_pip_alt_skip_cfgx_s cn68xx;
- struct cvmx_pip_alt_skip_cfgx_s cnf71xx;
};
union cvmx_pip_bck_prs {
@@ -183,19 +179,6 @@ union cvmx_pip_bck_prs {
uint64_t bckprs:1;
#endif
} s;
- struct cvmx_pip_bck_prs_s cn38xx;
- struct cvmx_pip_bck_prs_s cn38xxp2;
- struct cvmx_pip_bck_prs_s cn56xx;
- struct cvmx_pip_bck_prs_s cn56xxp1;
- struct cvmx_pip_bck_prs_s cn58xx;
- struct cvmx_pip_bck_prs_s cn58xxp1;
- struct cvmx_pip_bck_prs_s cn61xx;
- struct cvmx_pip_bck_prs_s cn63xx;
- struct cvmx_pip_bck_prs_s cn63xxp1;
- struct cvmx_pip_bck_prs_s cn66xx;
- struct cvmx_pip_bck_prs_s cn68xx;
- struct cvmx_pip_bck_prs_s cn68xxp1;
- struct cvmx_pip_bck_prs_s cnf71xx;
};
union cvmx_pip_bist_status {
@@ -218,9 +201,6 @@ union cvmx_pip_bist_status {
uint64_t reserved_18_63:46;
#endif
} cn30xx;
- struct cvmx_pip_bist_status_cn30xx cn31xx;
- struct cvmx_pip_bist_status_cn30xx cn38xx;
- struct cvmx_pip_bist_status_cn30xx cn38xxp2;
struct cvmx_pip_bist_status_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63:47;
@@ -230,12 +210,6 @@ union cvmx_pip_bist_status {
uint64_t reserved_17_63:47;
#endif
} cn50xx;
- struct cvmx_pip_bist_status_cn30xx cn52xx;
- struct cvmx_pip_bist_status_cn30xx cn52xxp1;
- struct cvmx_pip_bist_status_cn30xx cn56xx;
- struct cvmx_pip_bist_status_cn30xx cn56xxp1;
- struct cvmx_pip_bist_status_cn30xx cn58xx;
- struct cvmx_pip_bist_status_cn30xx cn58xxp1;
struct cvmx_pip_bist_status_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -245,12 +219,6 @@ union cvmx_pip_bist_status {
uint64_t reserved_20_63:44;
#endif
} cn61xx;
- struct cvmx_pip_bist_status_cn30xx cn63xx;
- struct cvmx_pip_bist_status_cn30xx cn63xxp1;
- struct cvmx_pip_bist_status_cn61xx cn66xx;
- struct cvmx_pip_bist_status_s cn68xx;
- struct cvmx_pip_bist_status_cn61xx cn68xxp1;
- struct cvmx_pip_bist_status_cn61xx cnf71xx;
};
union cvmx_pip_bsel_ext_cfgx {
@@ -274,9 +242,6 @@ union cvmx_pip_bsel_ext_cfgx {
uint64_t reserved_56_63:8;
#endif
} s;
- struct cvmx_pip_bsel_ext_cfgx_s cn61xx;
- struct cvmx_pip_bsel_ext_cfgx_s cn68xx;
- struct cvmx_pip_bsel_ext_cfgx_s cnf71xx;
};
union cvmx_pip_bsel_ext_posx {
@@ -318,9 +283,6 @@ union cvmx_pip_bsel_ext_posx {
uint64_t pos7_val:1;
#endif
} s;
- struct cvmx_pip_bsel_ext_posx_s cn61xx;
- struct cvmx_pip_bsel_ext_posx_s cn68xx;
- struct cvmx_pip_bsel_ext_posx_s cnf71xx;
};
union cvmx_pip_bsel_tbl_entx {
@@ -383,8 +345,6 @@ union cvmx_pip_bsel_tbl_entx {
uint64_t tag_en:1;
#endif
} cn61xx;
- struct cvmx_pip_bsel_tbl_entx_s cn68xx;
- struct cvmx_pip_bsel_tbl_entx_cn61xx cnf71xx;
};
union cvmx_pip_clken {
@@ -398,13 +358,6 @@ union cvmx_pip_clken {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_pip_clken_s cn61xx;
- struct cvmx_pip_clken_s cn63xx;
- struct cvmx_pip_clken_s cn63xxp1;
- struct cvmx_pip_clken_s cn66xx;
- struct cvmx_pip_clken_s cn68xx;
- struct cvmx_pip_clken_s cn68xxp1;
- struct cvmx_pip_clken_s cnf71xx;
};
union cvmx_pip_crc_ctlx {
@@ -420,10 +373,6 @@ union cvmx_pip_crc_ctlx {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pip_crc_ctlx_s cn38xx;
- struct cvmx_pip_crc_ctlx_s cn38xxp2;
- struct cvmx_pip_crc_ctlx_s cn58xx;
- struct cvmx_pip_crc_ctlx_s cn58xxp1;
};
union cvmx_pip_crc_ivx {
@@ -437,10 +386,6 @@ union cvmx_pip_crc_ivx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pip_crc_ivx_s cn38xx;
- struct cvmx_pip_crc_ivx_s cn38xxp2;
- struct cvmx_pip_crc_ivx_s cn58xx;
- struct cvmx_pip_crc_ivx_s cn58xxp1;
};
union cvmx_pip_dec_ipsecx {
@@ -458,24 +403,6 @@ union cvmx_pip_dec_ipsecx {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_pip_dec_ipsecx_s cn30xx;
- struct cvmx_pip_dec_ipsecx_s cn31xx;
- struct cvmx_pip_dec_ipsecx_s cn38xx;
- struct cvmx_pip_dec_ipsecx_s cn38xxp2;
- struct cvmx_pip_dec_ipsecx_s cn50xx;
- struct cvmx_pip_dec_ipsecx_s cn52xx;
- struct cvmx_pip_dec_ipsecx_s cn52xxp1;
- struct cvmx_pip_dec_ipsecx_s cn56xx;
- struct cvmx_pip_dec_ipsecx_s cn56xxp1;
- struct cvmx_pip_dec_ipsecx_s cn58xx;
- struct cvmx_pip_dec_ipsecx_s cn58xxp1;
- struct cvmx_pip_dec_ipsecx_s cn61xx;
- struct cvmx_pip_dec_ipsecx_s cn63xx;
- struct cvmx_pip_dec_ipsecx_s cn63xxp1;
- struct cvmx_pip_dec_ipsecx_s cn66xx;
- struct cvmx_pip_dec_ipsecx_s cn68xx;
- struct cvmx_pip_dec_ipsecx_s cn68xxp1;
- struct cvmx_pip_dec_ipsecx_s cnf71xx;
};
union cvmx_pip_dsa_src_grp {
@@ -517,16 +444,6 @@ union cvmx_pip_dsa_src_grp {
uint64_t map15:4;
#endif
} s;
- struct cvmx_pip_dsa_src_grp_s cn52xx;
- struct cvmx_pip_dsa_src_grp_s cn52xxp1;
- struct cvmx_pip_dsa_src_grp_s cn56xx;
- struct cvmx_pip_dsa_src_grp_s cn61xx;
- struct cvmx_pip_dsa_src_grp_s cn63xx;
- struct cvmx_pip_dsa_src_grp_s cn63xxp1;
- struct cvmx_pip_dsa_src_grp_s cn66xx;
- struct cvmx_pip_dsa_src_grp_s cn68xx;
- struct cvmx_pip_dsa_src_grp_s cn68xxp1;
- struct cvmx_pip_dsa_src_grp_s cnf71xx;
};
union cvmx_pip_dsa_vid_grp {
@@ -568,16 +485,6 @@ union cvmx_pip_dsa_vid_grp {
uint64_t map15:4;
#endif
} s;
- struct cvmx_pip_dsa_vid_grp_s cn52xx;
- struct cvmx_pip_dsa_vid_grp_s cn52xxp1;
- struct cvmx_pip_dsa_vid_grp_s cn56xx;
- struct cvmx_pip_dsa_vid_grp_s cn61xx;
- struct cvmx_pip_dsa_vid_grp_s cn63xx;
- struct cvmx_pip_dsa_vid_grp_s cn63xxp1;
- struct cvmx_pip_dsa_vid_grp_s cn66xx;
- struct cvmx_pip_dsa_vid_grp_s cn68xx;
- struct cvmx_pip_dsa_vid_grp_s cn68xxp1;
- struct cvmx_pip_dsa_vid_grp_s cnf71xx;
};
union cvmx_pip_frm_len_chkx {
@@ -593,18 +500,6 @@ union cvmx_pip_frm_len_chkx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pip_frm_len_chkx_s cn50xx;
- struct cvmx_pip_frm_len_chkx_s cn52xx;
- struct cvmx_pip_frm_len_chkx_s cn52xxp1;
- struct cvmx_pip_frm_len_chkx_s cn56xx;
- struct cvmx_pip_frm_len_chkx_s cn56xxp1;
- struct cvmx_pip_frm_len_chkx_s cn61xx;
- struct cvmx_pip_frm_len_chkx_s cn63xx;
- struct cvmx_pip_frm_len_chkx_s cn63xxp1;
- struct cvmx_pip_frm_len_chkx_s cn66xx;
- struct cvmx_pip_frm_len_chkx_s cn68xx;
- struct cvmx_pip_frm_len_chkx_s cn68xxp1;
- struct cvmx_pip_frm_len_chkx_s cnf71xx;
};
union cvmx_pip_gbl_cfg {
@@ -630,24 +525,6 @@ union cvmx_pip_gbl_cfg {
uint64_t reserved_19_63:45;
#endif
} s;
- struct cvmx_pip_gbl_cfg_s cn30xx;
- struct cvmx_pip_gbl_cfg_s cn31xx;
- struct cvmx_pip_gbl_cfg_s cn38xx;
- struct cvmx_pip_gbl_cfg_s cn38xxp2;
- struct cvmx_pip_gbl_cfg_s cn50xx;
- struct cvmx_pip_gbl_cfg_s cn52xx;
- struct cvmx_pip_gbl_cfg_s cn52xxp1;
- struct cvmx_pip_gbl_cfg_s cn56xx;
- struct cvmx_pip_gbl_cfg_s cn56xxp1;
- struct cvmx_pip_gbl_cfg_s cn58xx;
- struct cvmx_pip_gbl_cfg_s cn58xxp1;
- struct cvmx_pip_gbl_cfg_s cn61xx;
- struct cvmx_pip_gbl_cfg_s cn63xx;
- struct cvmx_pip_gbl_cfg_s cn63xxp1;
- struct cvmx_pip_gbl_cfg_s cn66xx;
- struct cvmx_pip_gbl_cfg_s cn68xx;
- struct cvmx_pip_gbl_cfg_s cn68xxp1;
- struct cvmx_pip_gbl_cfg_s cnf71xx;
};
union cvmx_pip_gbl_ctl {
@@ -742,10 +619,6 @@ union cvmx_pip_gbl_ctl {
uint64_t reserved_17_63:47;
#endif
} cn30xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn31xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn38xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2;
- struct cvmx_pip_gbl_ctl_cn30xx cn50xx;
struct cvmx_pip_gbl_ctl_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_27_63:37;
@@ -795,8 +668,6 @@ union cvmx_pip_gbl_ctl {
uint64_t reserved_27_63:37;
#endif
} cn52xx;
- struct cvmx_pip_gbl_ctl_cn52xx cn52xxp1;
- struct cvmx_pip_gbl_ctl_cn52xx cn56xx;
struct cvmx_pip_gbl_ctl_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63:43;
@@ -838,8 +709,6 @@ union cvmx_pip_gbl_ctl {
uint64_t reserved_21_63:43;
#endif
} cn56xxp1;
- struct cvmx_pip_gbl_ctl_cn30xx cn58xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1;
struct cvmx_pip_gbl_ctl_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -891,9 +760,6 @@ union cvmx_pip_gbl_ctl {
uint64_t reserved_28_63:36;
#endif
} cn61xx;
- struct cvmx_pip_gbl_ctl_cn61xx cn63xx;
- struct cvmx_pip_gbl_ctl_cn61xx cn63xxp1;
- struct cvmx_pip_gbl_ctl_cn61xx cn66xx;
struct cvmx_pip_gbl_ctl_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -990,7 +856,6 @@ union cvmx_pip_gbl_ctl {
uint64_t reserved_28_63:36;
#endif
} cn68xxp1;
- struct cvmx_pip_gbl_ctl_cn61xx cnf71xx;
};
union cvmx_pip_hg_pri_qos {
@@ -1012,14 +877,6 @@ union cvmx_pip_hg_pri_qos {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_pip_hg_pri_qos_s cn52xx;
- struct cvmx_pip_hg_pri_qos_s cn52xxp1;
- struct cvmx_pip_hg_pri_qos_s cn56xx;
- struct cvmx_pip_hg_pri_qos_s cn61xx;
- struct cvmx_pip_hg_pri_qos_s cn63xx;
- struct cvmx_pip_hg_pri_qos_s cn63xxp1;
- struct cvmx_pip_hg_pri_qos_s cn66xx;
- struct cvmx_pip_hg_pri_qos_s cnf71xx;
};
union cvmx_pip_int_en {
@@ -1082,9 +939,6 @@ union cvmx_pip_int_en {
uint64_t reserved_9_63:55;
#endif
} cn30xx;
- struct cvmx_pip_int_en_cn30xx cn31xx;
- struct cvmx_pip_int_en_cn30xx cn38xx;
- struct cvmx_pip_int_en_cn30xx cn38xxp2;
struct cvmx_pip_int_en_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -1149,8 +1003,6 @@ union cvmx_pip_int_en {
uint64_t reserved_13_63:51;
#endif
} cn52xx;
- struct cvmx_pip_int_en_cn52xx cn52xxp1;
- struct cvmx_pip_int_en_s cn56xx;
struct cvmx_pip_int_en_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -1211,14 +1063,6 @@ union cvmx_pip_int_en {
uint64_t reserved_13_63:51;
#endif
} cn58xx;
- struct cvmx_pip_int_en_cn30xx cn58xxp1;
- struct cvmx_pip_int_en_s cn61xx;
- struct cvmx_pip_int_en_s cn63xx;
- struct cvmx_pip_int_en_s cn63xxp1;
- struct cvmx_pip_int_en_s cn66xx;
- struct cvmx_pip_int_en_s cn68xx;
- struct cvmx_pip_int_en_s cn68xxp1;
- struct cvmx_pip_int_en_s cnf71xx;
};
union cvmx_pip_int_reg {
@@ -1281,9 +1125,6 @@ union cvmx_pip_int_reg {
uint64_t reserved_9_63:55;
#endif
} cn30xx;
- struct cvmx_pip_int_reg_cn30xx cn31xx;
- struct cvmx_pip_int_reg_cn30xx cn38xx;
- struct cvmx_pip_int_reg_cn30xx cn38xxp2;
struct cvmx_pip_int_reg_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -1348,8 +1189,6 @@ union cvmx_pip_int_reg {
uint64_t reserved_13_63:51;
#endif
} cn52xx;
- struct cvmx_pip_int_reg_cn52xx cn52xxp1;
- struct cvmx_pip_int_reg_s cn56xx;
struct cvmx_pip_int_reg_cn56xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63:52;
@@ -1410,14 +1249,6 @@ union cvmx_pip_int_reg {
uint64_t reserved_13_63:51;
#endif
} cn58xx;
- struct cvmx_pip_int_reg_cn30xx cn58xxp1;
- struct cvmx_pip_int_reg_s cn61xx;
- struct cvmx_pip_int_reg_s cn63xx;
- struct cvmx_pip_int_reg_s cn63xxp1;
- struct cvmx_pip_int_reg_s cn66xx;
- struct cvmx_pip_int_reg_s cn68xx;
- struct cvmx_pip_int_reg_s cn68xxp1;
- struct cvmx_pip_int_reg_s cnf71xx;
};
union cvmx_pip_ip_offset {
@@ -1431,24 +1262,6 @@ union cvmx_pip_ip_offset {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_pip_ip_offset_s cn30xx;
- struct cvmx_pip_ip_offset_s cn31xx;
- struct cvmx_pip_ip_offset_s cn38xx;
- struct cvmx_pip_ip_offset_s cn38xxp2;
- struct cvmx_pip_ip_offset_s cn50xx;
- struct cvmx_pip_ip_offset_s cn52xx;
- struct cvmx_pip_ip_offset_s cn52xxp1;
- struct cvmx_pip_ip_offset_s cn56xx;
- struct cvmx_pip_ip_offset_s cn56xxp1;
- struct cvmx_pip_ip_offset_s cn58xx;
- struct cvmx_pip_ip_offset_s cn58xxp1;
- struct cvmx_pip_ip_offset_s cn61xx;
- struct cvmx_pip_ip_offset_s cn63xx;
- struct cvmx_pip_ip_offset_s cn63xxp1;
- struct cvmx_pip_ip_offset_s cn66xx;
- struct cvmx_pip_ip_offset_s cn68xx;
- struct cvmx_pip_ip_offset_s cn68xxp1;
- struct cvmx_pip_ip_offset_s cnf71xx;
};
union cvmx_pip_pri_tblx {
@@ -1488,8 +1301,6 @@ union cvmx_pip_pri_tblx {
uint64_t diff2_padd:8;
#endif
} s;
- struct cvmx_pip_pri_tblx_s cn68xx;
- struct cvmx_pip_pri_tblx_s cn68xxp1;
};
union cvmx_pip_prt_cfgx {
@@ -1596,7 +1407,6 @@ union cvmx_pip_prt_cfgx {
uint64_t reserved_37_63:27;
#endif
} cn30xx;
- struct cvmx_pip_prt_cfgx_cn30xx cn31xx;
struct cvmx_pip_prt_cfgx_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_37_63:27;
@@ -1638,7 +1448,6 @@ union cvmx_pip_prt_cfgx {
uint64_t reserved_37_63:27;
#endif
} cn38xx;
- struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2;
struct cvmx_pip_prt_cfgx_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_53_63:11;
@@ -1759,9 +1568,6 @@ union cvmx_pip_prt_cfgx {
uint64_t reserved_53_63:11;
#endif
} cn52xx;
- struct cvmx_pip_prt_cfgx_cn52xx cn52xxp1;
- struct cvmx_pip_prt_cfgx_cn52xx cn56xx;
- struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1;
struct cvmx_pip_prt_cfgx_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_37_63:27;
@@ -1805,11 +1611,6 @@ union cvmx_pip_prt_cfgx {
uint64_t reserved_37_63:27;
#endif
} cn58xx;
- struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1;
- struct cvmx_pip_prt_cfgx_cn52xx cn61xx;
- struct cvmx_pip_prt_cfgx_cn52xx cn63xx;
- struct cvmx_pip_prt_cfgx_cn52xx cn63xxp1;
- struct cvmx_pip_prt_cfgx_cn52xx cn66xx;
struct cvmx_pip_prt_cfgx_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_55_63:9;
@@ -1875,8 +1676,6 @@ union cvmx_pip_prt_cfgx {
uint64_t reserved_55_63:9;
#endif
} cn68xx;
- struct cvmx_pip_prt_cfgx_cn68xx cn68xxp1;
- struct cvmx_pip_prt_cfgx_cn52xx cnf71xx;
};
union cvmx_pip_prt_cfgbx {
@@ -1938,7 +1737,6 @@ union cvmx_pip_prt_cfgbx {
uint64_t reserved_39_63:25;
#endif
} cn66xx;
- struct cvmx_pip_prt_cfgbx_s cn68xx;
struct cvmx_pip_prt_cfgbx_cn68xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
@@ -1952,7 +1750,6 @@ union cvmx_pip_prt_cfgbx {
uint64_t reserved_24_63:40;
#endif
} cn68xxp1;
- struct cvmx_pip_prt_cfgbx_cn61xx cnf71xx;
};
union cvmx_pip_prt_tagx {
@@ -2083,9 +1880,6 @@ union cvmx_pip_prt_tagx {
uint64_t reserved_40_63:24;
#endif
} cn30xx;
- struct cvmx_pip_prt_tagx_cn30xx cn31xx;
- struct cvmx_pip_prt_tagx_cn30xx cn38xx;
- struct cvmx_pip_prt_tagx_cn30xx cn38xxp2;
struct cvmx_pip_prt_tagx_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_40_63:24;
@@ -2141,19 +1935,6 @@ union cvmx_pip_prt_tagx {
uint64_t reserved_40_63:24;
#endif
} cn50xx;
- struct cvmx_pip_prt_tagx_cn50xx cn52xx;
- struct cvmx_pip_prt_tagx_cn50xx cn52xxp1;
- struct cvmx_pip_prt_tagx_cn50xx cn56xx;
- struct cvmx_pip_prt_tagx_cn50xx cn56xxp1;
- struct cvmx_pip_prt_tagx_cn30xx cn58xx;
- struct cvmx_pip_prt_tagx_cn30xx cn58xxp1;
- struct cvmx_pip_prt_tagx_cn50xx cn61xx;
- struct cvmx_pip_prt_tagx_cn50xx cn63xx;
- struct cvmx_pip_prt_tagx_cn50xx cn63xxp1;
- struct cvmx_pip_prt_tagx_cn50xx cn66xx;
- struct cvmx_pip_prt_tagx_s cn68xx;
- struct cvmx_pip_prt_tagx_s cn68xxp1;
- struct cvmx_pip_prt_tagx_cn50xx cnf71xx;
};
union cvmx_pip_qos_diffx {
@@ -2167,22 +1948,6 @@ union cvmx_pip_qos_diffx {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_pip_qos_diffx_s cn30xx;
- struct cvmx_pip_qos_diffx_s cn31xx;
- struct cvmx_pip_qos_diffx_s cn38xx;
- struct cvmx_pip_qos_diffx_s cn38xxp2;
- struct cvmx_pip_qos_diffx_s cn50xx;
- struct cvmx_pip_qos_diffx_s cn52xx;
- struct cvmx_pip_qos_diffx_s cn52xxp1;
- struct cvmx_pip_qos_diffx_s cn56xx;
- struct cvmx_pip_qos_diffx_s cn56xxp1;
- struct cvmx_pip_qos_diffx_s cn58xx;
- struct cvmx_pip_qos_diffx_s cn58xxp1;
- struct cvmx_pip_qos_diffx_s cn61xx;
- struct cvmx_pip_qos_diffx_s cn63xx;
- struct cvmx_pip_qos_diffx_s cn63xxp1;
- struct cvmx_pip_qos_diffx_s cn66xx;
- struct cvmx_pip_qos_diffx_s cnf71xx;
};
union cvmx_pip_qos_vlanx {
@@ -2209,21 +1974,6 @@ union cvmx_pip_qos_vlanx {
uint64_t reserved_3_63:61;
#endif
} cn30xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn31xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn38xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2;
- struct cvmx_pip_qos_vlanx_cn30xx cn50xx;
- struct cvmx_pip_qos_vlanx_s cn52xx;
- struct cvmx_pip_qos_vlanx_s cn52xxp1;
- struct cvmx_pip_qos_vlanx_s cn56xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1;
- struct cvmx_pip_qos_vlanx_cn30xx cn58xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1;
- struct cvmx_pip_qos_vlanx_s cn61xx;
- struct cvmx_pip_qos_vlanx_s cn63xx;
- struct cvmx_pip_qos_vlanx_s cn63xxp1;
- struct cvmx_pip_qos_vlanx_s cn66xx;
- struct cvmx_pip_qos_vlanx_s cnf71xx;
};
union cvmx_pip_qos_watchx {
@@ -2274,9 +2024,6 @@ union cvmx_pip_qos_watchx {
uint64_t reserved_48_63:16;
#endif
} cn30xx;
- struct cvmx_pip_qos_watchx_cn30xx cn31xx;
- struct cvmx_pip_qos_watchx_cn30xx cn38xx;
- struct cvmx_pip_qos_watchx_cn30xx cn38xxp2;
struct cvmx_pip_qos_watchx_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_48_63:16;
@@ -2300,19 +2047,6 @@ union cvmx_pip_qos_watchx {
uint64_t reserved_48_63:16;
#endif
} cn50xx;
- struct cvmx_pip_qos_watchx_cn50xx cn52xx;
- struct cvmx_pip_qos_watchx_cn50xx cn52xxp1;
- struct cvmx_pip_qos_watchx_cn50xx cn56xx;
- struct cvmx_pip_qos_watchx_cn50xx cn56xxp1;
- struct cvmx_pip_qos_watchx_cn30xx cn58xx;
- struct cvmx_pip_qos_watchx_cn30xx cn58xxp1;
- struct cvmx_pip_qos_watchx_cn50xx cn61xx;
- struct cvmx_pip_qos_watchx_cn50xx cn63xx;
- struct cvmx_pip_qos_watchx_cn50xx cn63xxp1;
- struct cvmx_pip_qos_watchx_cn50xx cn66xx;
- struct cvmx_pip_qos_watchx_s cn68xx;
- struct cvmx_pip_qos_watchx_s cn68xxp1;
- struct cvmx_pip_qos_watchx_cn50xx cnf71xx;
};
union cvmx_pip_raw_word {
@@ -2326,24 +2060,6 @@ union cvmx_pip_raw_word {
uint64_t reserved_56_63:8;
#endif
} s;
- struct cvmx_pip_raw_word_s cn30xx;
- struct cvmx_pip_raw_word_s cn31xx;
- struct cvmx_pip_raw_word_s cn38xx;
- struct cvmx_pip_raw_word_s cn38xxp2;
- struct cvmx_pip_raw_word_s cn50xx;
- struct cvmx_pip_raw_word_s cn52xx;
- struct cvmx_pip_raw_word_s cn52xxp1;
- struct cvmx_pip_raw_word_s cn56xx;
- struct cvmx_pip_raw_word_s cn56xxp1;
- struct cvmx_pip_raw_word_s cn58xx;
- struct cvmx_pip_raw_word_s cn58xxp1;
- struct cvmx_pip_raw_word_s cn61xx;
- struct cvmx_pip_raw_word_s cn63xx;
- struct cvmx_pip_raw_word_s cn63xxp1;
- struct cvmx_pip_raw_word_s cn66xx;
- struct cvmx_pip_raw_word_s cn68xx;
- struct cvmx_pip_raw_word_s cn68xxp1;
- struct cvmx_pip_raw_word_s cnf71xx;
};
union cvmx_pip_sft_rst {
@@ -2357,23 +2073,6 @@ union cvmx_pip_sft_rst {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_pip_sft_rst_s cn30xx;
- struct cvmx_pip_sft_rst_s cn31xx;
- struct cvmx_pip_sft_rst_s cn38xx;
- struct cvmx_pip_sft_rst_s cn50xx;
- struct cvmx_pip_sft_rst_s cn52xx;
- struct cvmx_pip_sft_rst_s cn52xxp1;
- struct cvmx_pip_sft_rst_s cn56xx;
- struct cvmx_pip_sft_rst_s cn56xxp1;
- struct cvmx_pip_sft_rst_s cn58xx;
- struct cvmx_pip_sft_rst_s cn58xxp1;
- struct cvmx_pip_sft_rst_s cn61xx;
- struct cvmx_pip_sft_rst_s cn63xx;
- struct cvmx_pip_sft_rst_s cn63xxp1;
- struct cvmx_pip_sft_rst_s cn66xx;
- struct cvmx_pip_sft_rst_s cn68xx;
- struct cvmx_pip_sft_rst_s cn68xxp1;
- struct cvmx_pip_sft_rst_s cnf71xx;
};
union cvmx_pip_stat0_x {
@@ -2387,8 +2086,6 @@ union cvmx_pip_stat0_x {
uint64_t drp_pkts:32;
#endif
} s;
- struct cvmx_pip_stat0_x_s cn68xx;
- struct cvmx_pip_stat0_x_s cn68xxp1;
};
union cvmx_pip_stat0_prtx {
@@ -2402,22 +2099,6 @@ union cvmx_pip_stat0_prtx {
uint64_t drp_pkts:32;
#endif
} s;
- struct cvmx_pip_stat0_prtx_s cn30xx;
- struct cvmx_pip_stat0_prtx_s cn31xx;
- struct cvmx_pip_stat0_prtx_s cn38xx;
- struct cvmx_pip_stat0_prtx_s cn38xxp2;
- struct cvmx_pip_stat0_prtx_s cn50xx;
- struct cvmx_pip_stat0_prtx_s cn52xx;
- struct cvmx_pip_stat0_prtx_s cn52xxp1;
- struct cvmx_pip_stat0_prtx_s cn56xx;
- struct cvmx_pip_stat0_prtx_s cn56xxp1;
- struct cvmx_pip_stat0_prtx_s cn58xx;
- struct cvmx_pip_stat0_prtx_s cn58xxp1;
- struct cvmx_pip_stat0_prtx_s cn61xx;
- struct cvmx_pip_stat0_prtx_s cn63xx;
- struct cvmx_pip_stat0_prtx_s cn63xxp1;
- struct cvmx_pip_stat0_prtx_s cn66xx;
- struct cvmx_pip_stat0_prtx_s cnf71xx;
};
union cvmx_pip_stat10_x {
@@ -2431,8 +2112,6 @@ union cvmx_pip_stat10_x {
uint64_t bcast:32;
#endif
} s;
- struct cvmx_pip_stat10_x_s cn68xx;
- struct cvmx_pip_stat10_x_s cn68xxp1;
};
union cvmx_pip_stat10_prtx {
@@ -2446,15 +2125,6 @@ union cvmx_pip_stat10_prtx {
uint64_t bcast:32;
#endif
} s;
- struct cvmx_pip_stat10_prtx_s cn52xx;
- struct cvmx_pip_stat10_prtx_s cn52xxp1;
- struct cvmx_pip_stat10_prtx_s cn56xx;
- struct cvmx_pip_stat10_prtx_s cn56xxp1;
- struct cvmx_pip_stat10_prtx_s cn61xx;
- struct cvmx_pip_stat10_prtx_s cn63xx;
- struct cvmx_pip_stat10_prtx_s cn63xxp1;
- struct cvmx_pip_stat10_prtx_s cn66xx;
- struct cvmx_pip_stat10_prtx_s cnf71xx;
};
union cvmx_pip_stat11_x {
@@ -2468,8 +2138,6 @@ union cvmx_pip_stat11_x {
uint64_t bcast:32;
#endif
} s;
- struct cvmx_pip_stat11_x_s cn68xx;
- struct cvmx_pip_stat11_x_s cn68xxp1;
};
union cvmx_pip_stat11_prtx {
@@ -2483,15 +2151,6 @@ union cvmx_pip_stat11_prtx {
uint64_t bcast:32;
#endif
} s;
- struct cvmx_pip_stat11_prtx_s cn52xx;
- struct cvmx_pip_stat11_prtx_s cn52xxp1;
- struct cvmx_pip_stat11_prtx_s cn56xx;
- struct cvmx_pip_stat11_prtx_s cn56xxp1;
- struct cvmx_pip_stat11_prtx_s cn61xx;
- struct cvmx_pip_stat11_prtx_s cn63xx;
- struct cvmx_pip_stat11_prtx_s cn63xxp1;
- struct cvmx_pip_stat11_prtx_s cn66xx;
- struct cvmx_pip_stat11_prtx_s cnf71xx;
};
union cvmx_pip_stat1_x {
@@ -2505,8 +2164,6 @@ union cvmx_pip_stat1_x {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pip_stat1_x_s cn68xx;
- struct cvmx_pip_stat1_x_s cn68xxp1;
};
union cvmx_pip_stat1_prtx {
@@ -2520,22 +2177,6 @@ union cvmx_pip_stat1_prtx {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pip_stat1_prtx_s cn30xx;
- struct cvmx_pip_stat1_prtx_s cn31xx;
- struct cvmx_pip_stat1_prtx_s cn38xx;
- struct cvmx_pip_stat1_prtx_s cn38xxp2;
- struct cvmx_pip_stat1_prtx_s cn50xx;
- struct cvmx_pip_stat1_prtx_s cn52xx;
- struct cvmx_pip_stat1_prtx_s cn52xxp1;
- struct cvmx_pip_stat1_prtx_s cn56xx;
- struct cvmx_pip_stat1_prtx_s cn56xxp1;
- struct cvmx_pip_stat1_prtx_s cn58xx;
- struct cvmx_pip_stat1_prtx_s cn58xxp1;
- struct cvmx_pip_stat1_prtx_s cn61xx;
- struct cvmx_pip_stat1_prtx_s cn63xx;
- struct cvmx_pip_stat1_prtx_s cn63xxp1;
- struct cvmx_pip_stat1_prtx_s cn66xx;
- struct cvmx_pip_stat1_prtx_s cnf71xx;
};
union cvmx_pip_stat2_x {
@@ -2549,8 +2190,6 @@ union cvmx_pip_stat2_x {
uint64_t pkts:32;
#endif
} s;
- struct cvmx_pip_stat2_x_s cn68xx;
- struct cvmx_pip_stat2_x_s cn68xxp1;
};
union cvmx_pip_stat2_prtx {
@@ -2564,22 +2203,6 @@ union cvmx_pip_stat2_prtx {
uint64_t pkts:32;
#endif
} s;
- struct cvmx_pip_stat2_prtx_s cn30xx;
- struct cvmx_pip_stat2_prtx_s cn31xx;
- struct cvmx_pip_stat2_prtx_s cn38xx;
- struct cvmx_pip_stat2_prtx_s cn38xxp2;
- struct cvmx_pip_stat2_prtx_s cn50xx;
- struct cvmx_pip_stat2_prtx_s cn52xx;
- struct cvmx_pip_stat2_prtx_s cn52xxp1;
- struct cvmx_pip_stat2_prtx_s cn56xx;
- struct cvmx_pip_stat2_prtx_s cn56xxp1;
- struct cvmx_pip_stat2_prtx_s cn58xx;
- struct cvmx_pip_stat2_prtx_s cn58xxp1;
- struct cvmx_pip_stat2_prtx_s cn61xx;
- struct cvmx_pip_stat2_prtx_s cn63xx;
- struct cvmx_pip_stat2_prtx_s cn63xxp1;
- struct cvmx_pip_stat2_prtx_s cn66xx;
- struct cvmx_pip_stat2_prtx_s cnf71xx;
};
union cvmx_pip_stat3_x {
@@ -2593,8 +2216,6 @@ union cvmx_pip_stat3_x {
uint64_t bcst:32;
#endif
} s;
- struct cvmx_pip_stat3_x_s cn68xx;
- struct cvmx_pip_stat3_x_s cn68xxp1;
};
union cvmx_pip_stat3_prtx {
@@ -2608,22 +2229,6 @@ union cvmx_pip_stat3_prtx {
uint64_t bcst:32;
#endif
} s;
- struct cvmx_pip_stat3_prtx_s cn30xx;
- struct cvmx_pip_stat3_prtx_s cn31xx;
- struct cvmx_pip_stat3_prtx_s cn38xx;
- struct cvmx_pip_stat3_prtx_s cn38xxp2;
- struct cvmx_pip_stat3_prtx_s cn50xx;
- struct cvmx_pip_stat3_prtx_s cn52xx;
- struct cvmx_pip_stat3_prtx_s cn52xxp1;
- struct cvmx_pip_stat3_prtx_s cn56xx;
- struct cvmx_pip_stat3_prtx_s cn56xxp1;
- struct cvmx_pip_stat3_prtx_s cn58xx;
- struct cvmx_pip_stat3_prtx_s cn58xxp1;
- struct cvmx_pip_stat3_prtx_s cn61xx;
- struct cvmx_pip_stat3_prtx_s cn63xx;
- struct cvmx_pip_stat3_prtx_s cn63xxp1;
- struct cvmx_pip_stat3_prtx_s cn66xx;
- struct cvmx_pip_stat3_prtx_s cnf71xx;
};
union cvmx_pip_stat4_x {
@@ -2637,8 +2242,6 @@ union cvmx_pip_stat4_x {
uint64_t h65to127:32;
#endif
} s;
- struct cvmx_pip_stat4_x_s cn68xx;
- struct cvmx_pip_stat4_x_s cn68xxp1;
};
union cvmx_pip_stat4_prtx {
@@ -2652,22 +2255,6 @@ union cvmx_pip_stat4_prtx {
uint64_t h65to127:32;
#endif
} s;
- struct cvmx_pip_stat4_prtx_s cn30xx;
- struct cvmx_pip_stat4_prtx_s cn31xx;
- struct cvmx_pip_stat4_prtx_s cn38xx;
- struct cvmx_pip_stat4_prtx_s cn38xxp2;
- struct cvmx_pip_stat4_prtx_s cn50xx;
- struct cvmx_pip_stat4_prtx_s cn52xx;
- struct cvmx_pip_stat4_prtx_s cn52xxp1;
- struct cvmx_pip_stat4_prtx_s cn56xx;
- struct cvmx_pip_stat4_prtx_s cn56xxp1;
- struct cvmx_pip_stat4_prtx_s cn58xx;
- struct cvmx_pip_stat4_prtx_s cn58xxp1;
- struct cvmx_pip_stat4_prtx_s cn61xx;
- struct cvmx_pip_stat4_prtx_s cn63xx;
- struct cvmx_pip_stat4_prtx_s cn63xxp1;
- struct cvmx_pip_stat4_prtx_s cn66xx;
- struct cvmx_pip_stat4_prtx_s cnf71xx;
};
union cvmx_pip_stat5_x {
@@ -2681,8 +2268,6 @@ union cvmx_pip_stat5_x {
uint64_t h256to511:32;
#endif
} s;
- struct cvmx_pip_stat5_x_s cn68xx;
- struct cvmx_pip_stat5_x_s cn68xxp1;
};
union cvmx_pip_stat5_prtx {
@@ -2696,22 +2281,6 @@ union cvmx_pip_stat5_prtx {
uint64_t h256to511:32;
#endif
} s;
- struct cvmx_pip_stat5_prtx_s cn30xx;
- struct cvmx_pip_stat5_prtx_s cn31xx;
- struct cvmx_pip_stat5_prtx_s cn38xx;
- struct cvmx_pip_stat5_prtx_s cn38xxp2;
- struct cvmx_pip_stat5_prtx_s cn50xx;
- struct cvmx_pip_stat5_prtx_s cn52xx;
- struct cvmx_pip_stat5_prtx_s cn52xxp1;
- struct cvmx_pip_stat5_prtx_s cn56xx;
- struct cvmx_pip_stat5_prtx_s cn56xxp1;
- struct cvmx_pip_stat5_prtx_s cn58xx;
- struct cvmx_pip_stat5_prtx_s cn58xxp1;
- struct cvmx_pip_stat5_prtx_s cn61xx;
- struct cvmx_pip_stat5_prtx_s cn63xx;
- struct cvmx_pip_stat5_prtx_s cn63xxp1;
- struct cvmx_pip_stat5_prtx_s cn66xx;
- struct cvmx_pip_stat5_prtx_s cnf71xx;
};
union cvmx_pip_stat6_x {
@@ -2725,8 +2294,6 @@ union cvmx_pip_stat6_x {
uint64_t h1024to1518:32;
#endif
} s;
- struct cvmx_pip_stat6_x_s cn68xx;
- struct cvmx_pip_stat6_x_s cn68xxp1;
};
union cvmx_pip_stat6_prtx {
@@ -2740,22 +2307,6 @@ union cvmx_pip_stat6_prtx {
uint64_t h1024to1518:32;
#endif
} s;
- struct cvmx_pip_stat6_prtx_s cn30xx;
- struct cvmx_pip_stat6_prtx_s cn31xx;
- struct cvmx_pip_stat6_prtx_s cn38xx;
- struct cvmx_pip_stat6_prtx_s cn38xxp2;
- struct cvmx_pip_stat6_prtx_s cn50xx;
- struct cvmx_pip_stat6_prtx_s cn52xx;
- struct cvmx_pip_stat6_prtx_s cn52xxp1;
- struct cvmx_pip_stat6_prtx_s cn56xx;
- struct cvmx_pip_stat6_prtx_s cn56xxp1;
- struct cvmx_pip_stat6_prtx_s cn58xx;
- struct cvmx_pip_stat6_prtx_s cn58xxp1;
- struct cvmx_pip_stat6_prtx_s cn61xx;
- struct cvmx_pip_stat6_prtx_s cn63xx;
- struct cvmx_pip_stat6_prtx_s cn63xxp1;
- struct cvmx_pip_stat6_prtx_s cn66xx;
- struct cvmx_pip_stat6_prtx_s cnf71xx;
};
union cvmx_pip_stat7_x {
@@ -2769,8 +2320,6 @@ union cvmx_pip_stat7_x {
uint64_t fcs:32;
#endif
} s;
- struct cvmx_pip_stat7_x_s cn68xx;
- struct cvmx_pip_stat7_x_s cn68xxp1;
};
union cvmx_pip_stat7_prtx {
@@ -2784,22 +2333,6 @@ union cvmx_pip_stat7_prtx {
uint64_t fcs:32;
#endif
} s;
- struct cvmx_pip_stat7_prtx_s cn30xx;
- struct cvmx_pip_stat7_prtx_s cn31xx;
- struct cvmx_pip_stat7_prtx_s cn38xx;
- struct cvmx_pip_stat7_prtx_s cn38xxp2;
- struct cvmx_pip_stat7_prtx_s cn50xx;
- struct cvmx_pip_stat7_prtx_s cn52xx;
- struct cvmx_pip_stat7_prtx_s cn52xxp1;
- struct cvmx_pip_stat7_prtx_s cn56xx;
- struct cvmx_pip_stat7_prtx_s cn56xxp1;
- struct cvmx_pip_stat7_prtx_s cn58xx;
- struct cvmx_pip_stat7_prtx_s cn58xxp1;
- struct cvmx_pip_stat7_prtx_s cn61xx;
- struct cvmx_pip_stat7_prtx_s cn63xx;
- struct cvmx_pip_stat7_prtx_s cn63xxp1;
- struct cvmx_pip_stat7_prtx_s cn66xx;
- struct cvmx_pip_stat7_prtx_s cnf71xx;
};
union cvmx_pip_stat8_x {
@@ -2813,8 +2346,6 @@ union cvmx_pip_stat8_x {
uint64_t frag:32;
#endif
} s;
- struct cvmx_pip_stat8_x_s cn68xx;
- struct cvmx_pip_stat8_x_s cn68xxp1;
};
union cvmx_pip_stat8_prtx {
@@ -2828,22 +2359,6 @@ union cvmx_pip_stat8_prtx {
uint64_t frag:32;
#endif
} s;
- struct cvmx_pip_stat8_prtx_s cn30xx;
- struct cvmx_pip_stat8_prtx_s cn31xx;
- struct cvmx_pip_stat8_prtx_s cn38xx;
- struct cvmx_pip_stat8_prtx_s cn38xxp2;
- struct cvmx_pip_stat8_prtx_s cn50xx;
- struct cvmx_pip_stat8_prtx_s cn52xx;
- struct cvmx_pip_stat8_prtx_s cn52xxp1;
- struct cvmx_pip_stat8_prtx_s cn56xx;
- struct cvmx_pip_stat8_prtx_s cn56xxp1;
- struct cvmx_pip_stat8_prtx_s cn58xx;
- struct cvmx_pip_stat8_prtx_s cn58xxp1;
- struct cvmx_pip_stat8_prtx_s cn61xx;
- struct cvmx_pip_stat8_prtx_s cn63xx;
- struct cvmx_pip_stat8_prtx_s cn63xxp1;
- struct cvmx_pip_stat8_prtx_s cn66xx;
- struct cvmx_pip_stat8_prtx_s cnf71xx;
};
union cvmx_pip_stat9_x {
@@ -2857,8 +2372,6 @@ union cvmx_pip_stat9_x {
uint64_t jabber:32;
#endif
} s;
- struct cvmx_pip_stat9_x_s cn68xx;
- struct cvmx_pip_stat9_x_s cn68xxp1;
};
union cvmx_pip_stat9_prtx {
@@ -2872,22 +2385,6 @@ union cvmx_pip_stat9_prtx {
uint64_t jabber:32;
#endif
} s;
- struct cvmx_pip_stat9_prtx_s cn30xx;
- struct cvmx_pip_stat9_prtx_s cn31xx;
- struct cvmx_pip_stat9_prtx_s cn38xx;
- struct cvmx_pip_stat9_prtx_s cn38xxp2;
- struct cvmx_pip_stat9_prtx_s cn50xx;
- struct cvmx_pip_stat9_prtx_s cn52xx;
- struct cvmx_pip_stat9_prtx_s cn52xxp1;
- struct cvmx_pip_stat9_prtx_s cn56xx;
- struct cvmx_pip_stat9_prtx_s cn56xxp1;
- struct cvmx_pip_stat9_prtx_s cn58xx;
- struct cvmx_pip_stat9_prtx_s cn58xxp1;
- struct cvmx_pip_stat9_prtx_s cn61xx;
- struct cvmx_pip_stat9_prtx_s cn63xx;
- struct cvmx_pip_stat9_prtx_s cn63xxp1;
- struct cvmx_pip_stat9_prtx_s cn66xx;
- struct cvmx_pip_stat9_prtx_s cnf71xx;
};
union cvmx_pip_stat_ctl {
@@ -2914,23 +2411,6 @@ union cvmx_pip_stat_ctl {
uint64_t reserved_1_63:63;
#endif
} cn30xx;
- struct cvmx_pip_stat_ctl_cn30xx cn31xx;
- struct cvmx_pip_stat_ctl_cn30xx cn38xx;
- struct cvmx_pip_stat_ctl_cn30xx cn38xxp2;
- struct cvmx_pip_stat_ctl_cn30xx cn50xx;
- struct cvmx_pip_stat_ctl_cn30xx cn52xx;
- struct cvmx_pip_stat_ctl_cn30xx cn52xxp1;
- struct cvmx_pip_stat_ctl_cn30xx cn56xx;
- struct cvmx_pip_stat_ctl_cn30xx cn56xxp1;
- struct cvmx_pip_stat_ctl_cn30xx cn58xx;
- struct cvmx_pip_stat_ctl_cn30xx cn58xxp1;
- struct cvmx_pip_stat_ctl_cn30xx cn61xx;
- struct cvmx_pip_stat_ctl_cn30xx cn63xx;
- struct cvmx_pip_stat_ctl_cn30xx cn63xxp1;
- struct cvmx_pip_stat_ctl_cn30xx cn66xx;
- struct cvmx_pip_stat_ctl_s cn68xx;
- struct cvmx_pip_stat_ctl_s cn68xxp1;
- struct cvmx_pip_stat_ctl_cn30xx cnf71xx;
};
union cvmx_pip_stat_inb_errsx {
@@ -2944,22 +2424,6 @@ union cvmx_pip_stat_inb_errsx {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pip_stat_inb_errsx_s cn30xx;
- struct cvmx_pip_stat_inb_errsx_s cn31xx;
- struct cvmx_pip_stat_inb_errsx_s cn38xx;
- struct cvmx_pip_stat_inb_errsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_errsx_s cn50xx;
- struct cvmx_pip_stat_inb_errsx_s cn52xx;
- struct cvmx_pip_stat_inb_errsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn56xx;
- struct cvmx_pip_stat_inb_errsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn58xx;
- struct cvmx_pip_stat_inb_errsx_s cn58xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn61xx;
- struct cvmx_pip_stat_inb_errsx_s cn63xx;
- struct cvmx_pip_stat_inb_errsx_s cn63xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn66xx;
- struct cvmx_pip_stat_inb_errsx_s cnf71xx;
};
union cvmx_pip_stat_inb_errs_pkndx {
@@ -2973,8 +2437,6 @@ union cvmx_pip_stat_inb_errs_pkndx {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pip_stat_inb_errs_pkndx_s cn68xx;
- struct cvmx_pip_stat_inb_errs_pkndx_s cn68xxp1;
};
union cvmx_pip_stat_inb_octsx {
@@ -2988,22 +2450,6 @@ union cvmx_pip_stat_inb_octsx {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pip_stat_inb_octsx_s cn30xx;
- struct cvmx_pip_stat_inb_octsx_s cn31xx;
- struct cvmx_pip_stat_inb_octsx_s cn38xx;
- struct cvmx_pip_stat_inb_octsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_octsx_s cn50xx;
- struct cvmx_pip_stat_inb_octsx_s cn52xx;
- struct cvmx_pip_stat_inb_octsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn56xx;
- struct cvmx_pip_stat_inb_octsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn58xx;
- struct cvmx_pip_stat_inb_octsx_s cn58xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn61xx;
- struct cvmx_pip_stat_inb_octsx_s cn63xx;
- struct cvmx_pip_stat_inb_octsx_s cn63xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn66xx;
- struct cvmx_pip_stat_inb_octsx_s cnf71xx;
};
union cvmx_pip_stat_inb_octs_pkndx {
@@ -3017,8 +2463,6 @@ union cvmx_pip_stat_inb_octs_pkndx {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pip_stat_inb_octs_pkndx_s cn68xx;
- struct cvmx_pip_stat_inb_octs_pkndx_s cn68xxp1;
};
union cvmx_pip_stat_inb_pktsx {
@@ -3032,22 +2476,6 @@ union cvmx_pip_stat_inb_pktsx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pip_stat_inb_pktsx_s cn30xx;
- struct cvmx_pip_stat_inb_pktsx_s cn31xx;
- struct cvmx_pip_stat_inb_pktsx_s cn38xx;
- struct cvmx_pip_stat_inb_pktsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_pktsx_s cn50xx;
- struct cvmx_pip_stat_inb_pktsx_s cn52xx;
- struct cvmx_pip_stat_inb_pktsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn56xx;
- struct cvmx_pip_stat_inb_pktsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn58xx;
- struct cvmx_pip_stat_inb_pktsx_s cn58xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn61xx;
- struct cvmx_pip_stat_inb_pktsx_s cn63xx;
- struct cvmx_pip_stat_inb_pktsx_s cn63xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn66xx;
- struct cvmx_pip_stat_inb_pktsx_s cnf71xx;
};
union cvmx_pip_stat_inb_pkts_pkndx {
@@ -3061,8 +2489,6 @@ union cvmx_pip_stat_inb_pkts_pkndx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pip_stat_inb_pkts_pkndx_s cn68xx;
- struct cvmx_pip_stat_inb_pkts_pkndx_s cn68xxp1;
};
union cvmx_pip_sub_pkind_fcsx {
@@ -3074,8 +2500,6 @@ union cvmx_pip_sub_pkind_fcsx {
uint64_t port_bit:64;
#endif
} s;
- struct cvmx_pip_sub_pkind_fcsx_s cn68xx;
- struct cvmx_pip_sub_pkind_fcsx_s cn68xxp1;
};
union cvmx_pip_tag_incx {
@@ -3089,24 +2513,6 @@ union cvmx_pip_tag_incx {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pip_tag_incx_s cn30xx;
- struct cvmx_pip_tag_incx_s cn31xx;
- struct cvmx_pip_tag_incx_s cn38xx;
- struct cvmx_pip_tag_incx_s cn38xxp2;
- struct cvmx_pip_tag_incx_s cn50xx;
- struct cvmx_pip_tag_incx_s cn52xx;
- struct cvmx_pip_tag_incx_s cn52xxp1;
- struct cvmx_pip_tag_incx_s cn56xx;
- struct cvmx_pip_tag_incx_s cn56xxp1;
- struct cvmx_pip_tag_incx_s cn58xx;
- struct cvmx_pip_tag_incx_s cn58xxp1;
- struct cvmx_pip_tag_incx_s cn61xx;
- struct cvmx_pip_tag_incx_s cn63xx;
- struct cvmx_pip_tag_incx_s cn63xxp1;
- struct cvmx_pip_tag_incx_s cn66xx;
- struct cvmx_pip_tag_incx_s cn68xx;
- struct cvmx_pip_tag_incx_s cn68xxp1;
- struct cvmx_pip_tag_incx_s cnf71xx;
};
union cvmx_pip_tag_mask {
@@ -3120,24 +2526,6 @@ union cvmx_pip_tag_mask {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pip_tag_mask_s cn30xx;
- struct cvmx_pip_tag_mask_s cn31xx;
- struct cvmx_pip_tag_mask_s cn38xx;
- struct cvmx_pip_tag_mask_s cn38xxp2;
- struct cvmx_pip_tag_mask_s cn50xx;
- struct cvmx_pip_tag_mask_s cn52xx;
- struct cvmx_pip_tag_mask_s cn52xxp1;
- struct cvmx_pip_tag_mask_s cn56xx;
- struct cvmx_pip_tag_mask_s cn56xxp1;
- struct cvmx_pip_tag_mask_s cn58xx;
- struct cvmx_pip_tag_mask_s cn58xxp1;
- struct cvmx_pip_tag_mask_s cn61xx;
- struct cvmx_pip_tag_mask_s cn63xx;
- struct cvmx_pip_tag_mask_s cn63xxp1;
- struct cvmx_pip_tag_mask_s cn66xx;
- struct cvmx_pip_tag_mask_s cn68xx;
- struct cvmx_pip_tag_mask_s cn68xxp1;
- struct cvmx_pip_tag_mask_s cnf71xx;
};
union cvmx_pip_tag_secret {
@@ -3153,24 +2541,6 @@ union cvmx_pip_tag_secret {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pip_tag_secret_s cn30xx;
- struct cvmx_pip_tag_secret_s cn31xx;
- struct cvmx_pip_tag_secret_s cn38xx;
- struct cvmx_pip_tag_secret_s cn38xxp2;
- struct cvmx_pip_tag_secret_s cn50xx;
- struct cvmx_pip_tag_secret_s cn52xx;
- struct cvmx_pip_tag_secret_s cn52xxp1;
- struct cvmx_pip_tag_secret_s cn56xx;
- struct cvmx_pip_tag_secret_s cn56xxp1;
- struct cvmx_pip_tag_secret_s cn58xx;
- struct cvmx_pip_tag_secret_s cn58xxp1;
- struct cvmx_pip_tag_secret_s cn61xx;
- struct cvmx_pip_tag_secret_s cn63xx;
- struct cvmx_pip_tag_secret_s cn63xxp1;
- struct cvmx_pip_tag_secret_s cn66xx;
- struct cvmx_pip_tag_secret_s cn68xx;
- struct cvmx_pip_tag_secret_s cn68xxp1;
- struct cvmx_pip_tag_secret_s cnf71xx;
};
union cvmx_pip_todo_entry {
@@ -3186,24 +2556,6 @@ union cvmx_pip_todo_entry {
uint64_t val:1;
#endif
} s;
- struct cvmx_pip_todo_entry_s cn30xx;
- struct cvmx_pip_todo_entry_s cn31xx;
- struct cvmx_pip_todo_entry_s cn38xx;
- struct cvmx_pip_todo_entry_s cn38xxp2;
- struct cvmx_pip_todo_entry_s cn50xx;
- struct cvmx_pip_todo_entry_s cn52xx;
- struct cvmx_pip_todo_entry_s cn52xxp1;
- struct cvmx_pip_todo_entry_s cn56xx;
- struct cvmx_pip_todo_entry_s cn56xxp1;
- struct cvmx_pip_todo_entry_s cn58xx;
- struct cvmx_pip_todo_entry_s cn58xxp1;
- struct cvmx_pip_todo_entry_s cn61xx;
- struct cvmx_pip_todo_entry_s cn63xx;
- struct cvmx_pip_todo_entry_s cn63xxp1;
- struct cvmx_pip_todo_entry_s cn66xx;
- struct cvmx_pip_todo_entry_s cn68xx;
- struct cvmx_pip_todo_entry_s cn68xxp1;
- struct cvmx_pip_todo_entry_s cnf71xx;
};
union cvmx_pip_vlan_etypesx {
@@ -3221,10 +2573,6 @@ union cvmx_pip_vlan_etypesx {
uint64_t type3:16;
#endif
} s;
- struct cvmx_pip_vlan_etypesx_s cn61xx;
- struct cvmx_pip_vlan_etypesx_s cn66xx;
- struct cvmx_pip_vlan_etypesx_s cn68xx;
- struct cvmx_pip_vlan_etypesx_s cnf71xx;
};
union cvmx_pip_xstat0_prtx {
@@ -3238,9 +2586,6 @@ union cvmx_pip_xstat0_prtx {
uint64_t drp_pkts:32;
#endif
} s;
- struct cvmx_pip_xstat0_prtx_s cn63xx;
- struct cvmx_pip_xstat0_prtx_s cn63xxp1;
- struct cvmx_pip_xstat0_prtx_s cn66xx;
};
union cvmx_pip_xstat10_prtx {
@@ -3254,9 +2599,6 @@ union cvmx_pip_xstat10_prtx {
uint64_t bcast:32;
#endif
} s;
- struct cvmx_pip_xstat10_prtx_s cn63xx;
- struct cvmx_pip_xstat10_prtx_s cn63xxp1;
- struct cvmx_pip_xstat10_prtx_s cn66xx;
};
union cvmx_pip_xstat11_prtx {
@@ -3270,9 +2612,6 @@ union cvmx_pip_xstat11_prtx {
uint64_t bcast:32;
#endif
} s;
- struct cvmx_pip_xstat11_prtx_s cn63xx;
- struct cvmx_pip_xstat11_prtx_s cn63xxp1;
- struct cvmx_pip_xstat11_prtx_s cn66xx;
};
union cvmx_pip_xstat1_prtx {
@@ -3286,9 +2625,6 @@ union cvmx_pip_xstat1_prtx {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pip_xstat1_prtx_s cn63xx;
- struct cvmx_pip_xstat1_prtx_s cn63xxp1;
- struct cvmx_pip_xstat1_prtx_s cn66xx;
};
union cvmx_pip_xstat2_prtx {
@@ -3302,9 +2638,6 @@ union cvmx_pip_xstat2_prtx {
uint64_t pkts:32;
#endif
} s;
- struct cvmx_pip_xstat2_prtx_s cn63xx;
- struct cvmx_pip_xstat2_prtx_s cn63xxp1;
- struct cvmx_pip_xstat2_prtx_s cn66xx;
};
union cvmx_pip_xstat3_prtx {
@@ -3318,9 +2651,6 @@ union cvmx_pip_xstat3_prtx {
uint64_t bcst:32;
#endif
} s;
- struct cvmx_pip_xstat3_prtx_s cn63xx;
- struct cvmx_pip_xstat3_prtx_s cn63xxp1;
- struct cvmx_pip_xstat3_prtx_s cn66xx;
};
union cvmx_pip_xstat4_prtx {
@@ -3334,9 +2664,6 @@ union cvmx_pip_xstat4_prtx {
uint64_t h65to127:32;
#endif
} s;
- struct cvmx_pip_xstat4_prtx_s cn63xx;
- struct cvmx_pip_xstat4_prtx_s cn63xxp1;
- struct cvmx_pip_xstat4_prtx_s cn66xx;
};
union cvmx_pip_xstat5_prtx {
@@ -3350,9 +2677,6 @@ union cvmx_pip_xstat5_prtx {
uint64_t h256to511:32;
#endif
} s;
- struct cvmx_pip_xstat5_prtx_s cn63xx;
- struct cvmx_pip_xstat5_prtx_s cn63xxp1;
- struct cvmx_pip_xstat5_prtx_s cn66xx;
};
union cvmx_pip_xstat6_prtx {
@@ -3366,9 +2690,6 @@ union cvmx_pip_xstat6_prtx {
uint64_t h1024to1518:32;
#endif
} s;
- struct cvmx_pip_xstat6_prtx_s cn63xx;
- struct cvmx_pip_xstat6_prtx_s cn63xxp1;
- struct cvmx_pip_xstat6_prtx_s cn66xx;
};
union cvmx_pip_xstat7_prtx {
@@ -3382,9 +2703,6 @@ union cvmx_pip_xstat7_prtx {
uint64_t fcs:32;
#endif
} s;
- struct cvmx_pip_xstat7_prtx_s cn63xx;
- struct cvmx_pip_xstat7_prtx_s cn63xxp1;
- struct cvmx_pip_xstat7_prtx_s cn66xx;
};
union cvmx_pip_xstat8_prtx {
@@ -3398,9 +2716,6 @@ union cvmx_pip_xstat8_prtx {
uint64_t frag:32;
#endif
} s;
- struct cvmx_pip_xstat8_prtx_s cn63xx;
- struct cvmx_pip_xstat8_prtx_s cn63xxp1;
- struct cvmx_pip_xstat8_prtx_s cn66xx;
};
union cvmx_pip_xstat9_prtx {
@@ -3414,9 +2729,6 @@ union cvmx_pip_xstat9_prtx {
uint64_t jabber:32;
#endif
} s;
- struct cvmx_pip_xstat9_prtx_s cn63xx;
- struct cvmx_pip_xstat9_prtx_s cn63xxp1;
- struct cvmx_pip_xstat9_prtx_s cn66xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pko-defs.h b/arch/mips/include/asm/octeon/cvmx-pko-defs.h
index 87c3b970cad4..7e14c0d328f1 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko-defs.h
@@ -97,24 +97,6 @@ union cvmx_pko_mem_count0 {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pko_mem_count0_s cn30xx;
- struct cvmx_pko_mem_count0_s cn31xx;
- struct cvmx_pko_mem_count0_s cn38xx;
- struct cvmx_pko_mem_count0_s cn38xxp2;
- struct cvmx_pko_mem_count0_s cn50xx;
- struct cvmx_pko_mem_count0_s cn52xx;
- struct cvmx_pko_mem_count0_s cn52xxp1;
- struct cvmx_pko_mem_count0_s cn56xx;
- struct cvmx_pko_mem_count0_s cn56xxp1;
- struct cvmx_pko_mem_count0_s cn58xx;
- struct cvmx_pko_mem_count0_s cn58xxp1;
- struct cvmx_pko_mem_count0_s cn61xx;
- struct cvmx_pko_mem_count0_s cn63xx;
- struct cvmx_pko_mem_count0_s cn63xxp1;
- struct cvmx_pko_mem_count0_s cn66xx;
- struct cvmx_pko_mem_count0_s cn68xx;
- struct cvmx_pko_mem_count0_s cn68xxp1;
- struct cvmx_pko_mem_count0_s cnf71xx;
};
union cvmx_pko_mem_count1 {
@@ -128,24 +110,6 @@ union cvmx_pko_mem_count1 {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_pko_mem_count1_s cn30xx;
- struct cvmx_pko_mem_count1_s cn31xx;
- struct cvmx_pko_mem_count1_s cn38xx;
- struct cvmx_pko_mem_count1_s cn38xxp2;
- struct cvmx_pko_mem_count1_s cn50xx;
- struct cvmx_pko_mem_count1_s cn52xx;
- struct cvmx_pko_mem_count1_s cn52xxp1;
- struct cvmx_pko_mem_count1_s cn56xx;
- struct cvmx_pko_mem_count1_s cn56xxp1;
- struct cvmx_pko_mem_count1_s cn58xx;
- struct cvmx_pko_mem_count1_s cn58xxp1;
- struct cvmx_pko_mem_count1_s cn61xx;
- struct cvmx_pko_mem_count1_s cn63xx;
- struct cvmx_pko_mem_count1_s cn63xxp1;
- struct cvmx_pko_mem_count1_s cn66xx;
- struct cvmx_pko_mem_count1_s cn68xx;
- struct cvmx_pko_mem_count1_s cn68xxp1;
- struct cvmx_pko_mem_count1_s cnf71xx;
};
union cvmx_pko_mem_debug0 {
@@ -163,24 +127,6 @@ union cvmx_pko_mem_debug0 {
uint64_t fau:28;
#endif
} s;
- struct cvmx_pko_mem_debug0_s cn30xx;
- struct cvmx_pko_mem_debug0_s cn31xx;
- struct cvmx_pko_mem_debug0_s cn38xx;
- struct cvmx_pko_mem_debug0_s cn38xxp2;
- struct cvmx_pko_mem_debug0_s cn50xx;
- struct cvmx_pko_mem_debug0_s cn52xx;
- struct cvmx_pko_mem_debug0_s cn52xxp1;
- struct cvmx_pko_mem_debug0_s cn56xx;
- struct cvmx_pko_mem_debug0_s cn56xxp1;
- struct cvmx_pko_mem_debug0_s cn58xx;
- struct cvmx_pko_mem_debug0_s cn58xxp1;
- struct cvmx_pko_mem_debug0_s cn61xx;
- struct cvmx_pko_mem_debug0_s cn63xx;
- struct cvmx_pko_mem_debug0_s cn63xxp1;
- struct cvmx_pko_mem_debug0_s cn66xx;
- struct cvmx_pko_mem_debug0_s cn68xx;
- struct cvmx_pko_mem_debug0_s cn68xxp1;
- struct cvmx_pko_mem_debug0_s cnf71xx;
};
union cvmx_pko_mem_debug1 {
@@ -200,24 +146,6 @@ union cvmx_pko_mem_debug1 {
uint64_t i:1;
#endif
} s;
- struct cvmx_pko_mem_debug1_s cn30xx;
- struct cvmx_pko_mem_debug1_s cn31xx;
- struct cvmx_pko_mem_debug1_s cn38xx;
- struct cvmx_pko_mem_debug1_s cn38xxp2;
- struct cvmx_pko_mem_debug1_s cn50xx;
- struct cvmx_pko_mem_debug1_s cn52xx;
- struct cvmx_pko_mem_debug1_s cn52xxp1;
- struct cvmx_pko_mem_debug1_s cn56xx;
- struct cvmx_pko_mem_debug1_s cn56xxp1;
- struct cvmx_pko_mem_debug1_s cn58xx;
- struct cvmx_pko_mem_debug1_s cn58xxp1;
- struct cvmx_pko_mem_debug1_s cn61xx;
- struct cvmx_pko_mem_debug1_s cn63xx;
- struct cvmx_pko_mem_debug1_s cn63xxp1;
- struct cvmx_pko_mem_debug1_s cn66xx;
- struct cvmx_pko_mem_debug1_s cn68xx;
- struct cvmx_pko_mem_debug1_s cn68xxp1;
- struct cvmx_pko_mem_debug1_s cnf71xx;
};
union cvmx_pko_mem_debug10 {
@@ -242,9 +170,6 @@ union cvmx_pko_mem_debug10 {
uint64_t fau:28;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug10_cn30xx cn31xx;
- struct cvmx_pko_mem_debug10_cn30xx cn38xx;
- struct cvmx_pko_mem_debug10_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug10_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_49_63:15;
@@ -258,19 +183,6 @@ union cvmx_pko_mem_debug10 {
uint64_t reserved_49_63:15;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug10_cn50xx cn52xx;
- struct cvmx_pko_mem_debug10_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn56xx;
- struct cvmx_pko_mem_debug10_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn58xx;
- struct cvmx_pko_mem_debug10_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn61xx;
- struct cvmx_pko_mem_debug10_cn50xx cn63xx;
- struct cvmx_pko_mem_debug10_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn66xx;
- struct cvmx_pko_mem_debug10_cn50xx cn68xx;
- struct cvmx_pko_mem_debug10_cn50xx cn68xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cnf71xx;
};
union cvmx_pko_mem_debug11 {
@@ -305,9 +217,6 @@ union cvmx_pko_mem_debug11 {
uint64_t i:1;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug11_cn30xx cn31xx;
- struct cvmx_pko_mem_debug11_cn30xx cn38xx;
- struct cvmx_pko_mem_debug11_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug11_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_23_63:41;
@@ -329,19 +238,6 @@ union cvmx_pko_mem_debug11 {
uint64_t reserved_23_63:41;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug11_cn50xx cn52xx;
- struct cvmx_pko_mem_debug11_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn56xx;
- struct cvmx_pko_mem_debug11_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn58xx;
- struct cvmx_pko_mem_debug11_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn61xx;
- struct cvmx_pko_mem_debug11_cn50xx cn63xx;
- struct cvmx_pko_mem_debug11_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn66xx;
- struct cvmx_pko_mem_debug11_cn50xx cn68xx;
- struct cvmx_pko_mem_debug11_cn50xx cn68xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cnf71xx;
};
union cvmx_pko_mem_debug12 {
@@ -360,9 +256,6 @@ union cvmx_pko_mem_debug12 {
uint64_t data:64;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug12_cn30xx cn31xx;
- struct cvmx_pko_mem_debug12_cn30xx cn38xx;
- struct cvmx_pko_mem_debug12_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug12_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t fau:28;
@@ -376,16 +269,6 @@ union cvmx_pko_mem_debug12 {
uint64_t fau:28;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug12_cn50xx cn52xx;
- struct cvmx_pko_mem_debug12_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn56xx;
- struct cvmx_pko_mem_debug12_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn58xx;
- struct cvmx_pko_mem_debug12_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn61xx;
- struct cvmx_pko_mem_debug12_cn50xx cn63xx;
- struct cvmx_pko_mem_debug12_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn66xx;
struct cvmx_pko_mem_debug12_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t state:64;
@@ -393,8 +276,6 @@ union cvmx_pko_mem_debug12 {
uint64_t state:64;
#endif
} cn68xx;
- struct cvmx_pko_mem_debug12_cn68xx cn68xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cnf71xx;
};
union cvmx_pko_mem_debug13 {
@@ -419,9 +300,6 @@ union cvmx_pko_mem_debug13 {
uint64_t reserved_51_63:13;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug13_cn30xx cn31xx;
- struct cvmx_pko_mem_debug13_cn30xx cn38xx;
- struct cvmx_pko_mem_debug13_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug13_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t i:1;
@@ -437,16 +315,6 @@ union cvmx_pko_mem_debug13 {
uint64_t i:1;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug13_cn50xx cn52xx;
- struct cvmx_pko_mem_debug13_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn56xx;
- struct cvmx_pko_mem_debug13_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn58xx;
- struct cvmx_pko_mem_debug13_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn61xx;
- struct cvmx_pko_mem_debug13_cn50xx cn63xx;
- struct cvmx_pko_mem_debug13_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn66xx;
struct cvmx_pko_mem_debug13_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t state:64;
@@ -454,8 +322,6 @@ union cvmx_pko_mem_debug13 {
uint64_t state:64;
#endif
} cn68xx;
- struct cvmx_pko_mem_debug13_cn68xx cn68xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cnf71xx;
};
union cvmx_pko_mem_debug14 {
@@ -476,9 +342,6 @@ union cvmx_pko_mem_debug14 {
uint64_t reserved_17_63:47;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug14_cn30xx cn31xx;
- struct cvmx_pko_mem_debug14_cn30xx cn38xx;
- struct cvmx_pko_mem_debug14_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug14_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:64;
@@ -486,14 +349,6 @@ union cvmx_pko_mem_debug14 {
uint64_t data:64;
#endif
} cn52xx;
- struct cvmx_pko_mem_debug14_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug14_cn52xx cn56xx;
- struct cvmx_pko_mem_debug14_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug14_cn52xx cn61xx;
- struct cvmx_pko_mem_debug14_cn52xx cn63xx;
- struct cvmx_pko_mem_debug14_cn52xx cn63xxp1;
- struct cvmx_pko_mem_debug14_cn52xx cn66xx;
- struct cvmx_pko_mem_debug14_cn52xx cnf71xx;
};
union cvmx_pko_mem_debug2 {
@@ -513,24 +368,6 @@ union cvmx_pko_mem_debug2 {
uint64_t i:1;
#endif
} s;
- struct cvmx_pko_mem_debug2_s cn30xx;
- struct cvmx_pko_mem_debug2_s cn31xx;
- struct cvmx_pko_mem_debug2_s cn38xx;
- struct cvmx_pko_mem_debug2_s cn38xxp2;
- struct cvmx_pko_mem_debug2_s cn50xx;
- struct cvmx_pko_mem_debug2_s cn52xx;
- struct cvmx_pko_mem_debug2_s cn52xxp1;
- struct cvmx_pko_mem_debug2_s cn56xx;
- struct cvmx_pko_mem_debug2_s cn56xxp1;
- struct cvmx_pko_mem_debug2_s cn58xx;
- struct cvmx_pko_mem_debug2_s cn58xxp1;
- struct cvmx_pko_mem_debug2_s cn61xx;
- struct cvmx_pko_mem_debug2_s cn63xx;
- struct cvmx_pko_mem_debug2_s cn63xxp1;
- struct cvmx_pko_mem_debug2_s cn66xx;
- struct cvmx_pko_mem_debug2_s cn68xx;
- struct cvmx_pko_mem_debug2_s cn68xxp1;
- struct cvmx_pko_mem_debug2_s cnf71xx;
};
union cvmx_pko_mem_debug3 {
@@ -557,9 +394,6 @@ union cvmx_pko_mem_debug3 {
uint64_t i:1;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug3_cn30xx cn31xx;
- struct cvmx_pko_mem_debug3_cn30xx cn38xx;
- struct cvmx_pko_mem_debug3_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug3_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t data:64;
@@ -567,19 +401,6 @@ union cvmx_pko_mem_debug3 {
uint64_t data:64;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug3_cn50xx cn52xx;
- struct cvmx_pko_mem_debug3_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn56xx;
- struct cvmx_pko_mem_debug3_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn58xx;
- struct cvmx_pko_mem_debug3_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn61xx;
- struct cvmx_pko_mem_debug3_cn50xx cn63xx;
- struct cvmx_pko_mem_debug3_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn66xx;
- struct cvmx_pko_mem_debug3_cn50xx cn68xx;
- struct cvmx_pko_mem_debug3_cn50xx cn68xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cnf71xx;
};
union cvmx_pko_mem_debug4 {
@@ -598,9 +419,6 @@ union cvmx_pko_mem_debug4 {
uint64_t data:64;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug4_cn30xx cn31xx;
- struct cvmx_pko_mem_debug4_cn30xx cn38xx;
- struct cvmx_pko_mem_debug4_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug4_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t cmnd_segs:3;
@@ -673,18 +491,6 @@ union cvmx_pko_mem_debug4 {
uint64_t curr_siz:8;
#endif
} cn52xx;
- struct cvmx_pko_mem_debug4_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug4_cn52xx cn56xx;
- struct cvmx_pko_mem_debug4_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug4_cn50xx cn58xx;
- struct cvmx_pko_mem_debug4_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug4_cn52xx cn61xx;
- struct cvmx_pko_mem_debug4_cn52xx cn63xx;
- struct cvmx_pko_mem_debug4_cn52xx cn63xxp1;
- struct cvmx_pko_mem_debug4_cn52xx cn66xx;
- struct cvmx_pko_mem_debug4_cn52xx cn68xx;
- struct cvmx_pko_mem_debug4_cn52xx cn68xxp1;
- struct cvmx_pko_mem_debug4_cn52xx cnf71xx;
};
union cvmx_pko_mem_debug5 {
@@ -739,9 +545,6 @@ union cvmx_pko_mem_debug5 {
uint64_t dwri_mod:1;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug5_cn30xx cn31xx;
- struct cvmx_pko_mem_debug5_cn30xx cn38xx;
- struct cvmx_pko_mem_debug5_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug5_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t curr_ptr:29;
@@ -768,11 +571,6 @@ union cvmx_pko_mem_debug5 {
uint64_t reserved_54_63:10;
#endif
} cn52xx;
- struct cvmx_pko_mem_debug5_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug5_cn52xx cn56xx;
- struct cvmx_pko_mem_debug5_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug5_cn50xx cn58xx;
- struct cvmx_pko_mem_debug5_cn50xx cn58xxp1;
struct cvmx_pko_mem_debug5_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_56_63:8;
@@ -790,9 +588,6 @@ union cvmx_pko_mem_debug5 {
uint64_t reserved_56_63:8;
#endif
} cn61xx;
- struct cvmx_pko_mem_debug5_cn61xx cn63xx;
- struct cvmx_pko_mem_debug5_cn61xx cn63xxp1;
- struct cvmx_pko_mem_debug5_cn61xx cn66xx;
struct cvmx_pko_mem_debug5_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_57_63:7;
@@ -812,8 +607,6 @@ union cvmx_pko_mem_debug5 {
uint64_t reserved_57_63:7;
#endif
} cn68xx;
- struct cvmx_pko_mem_debug5_cn68xx cn68xxp1;
- struct cvmx_pko_mem_debug5_cn61xx cnf71xx;
};
union cvmx_pko_mem_debug6 {
@@ -866,9 +659,6 @@ union cvmx_pko_mem_debug6 {
uint64_t reserved_11_63:53;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug6_cn30xx cn31xx;
- struct cvmx_pko_mem_debug6_cn30xx cn38xx;
- struct cvmx_pko_mem_debug6_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug6_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
@@ -909,18 +699,6 @@ union cvmx_pko_mem_debug6 {
uint64_t reserved_37_63:27;
#endif
} cn52xx;
- struct cvmx_pko_mem_debug6_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug6_cn52xx cn56xx;
- struct cvmx_pko_mem_debug6_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug6_cn50xx cn58xx;
- struct cvmx_pko_mem_debug6_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug6_cn52xx cn61xx;
- struct cvmx_pko_mem_debug6_cn52xx cn63xx;
- struct cvmx_pko_mem_debug6_cn52xx cn63xxp1;
- struct cvmx_pko_mem_debug6_cn52xx cn66xx;
- struct cvmx_pko_mem_debug6_cn52xx cn68xx;
- struct cvmx_pko_mem_debug6_cn52xx cn68xxp1;
- struct cvmx_pko_mem_debug6_cn52xx cnf71xx;
};
union cvmx_pko_mem_debug7 {
@@ -945,9 +723,6 @@ union cvmx_pko_mem_debug7 {
uint64_t reserved_58_63:6;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug7_cn30xx cn31xx;
- struct cvmx_pko_mem_debug7_cn30xx cn38xx;
- struct cvmx_pko_mem_debug7_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug7_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t qos:5;
@@ -965,16 +740,6 @@ union cvmx_pko_mem_debug7 {
uint64_t qos:5;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug7_cn50xx cn52xx;
- struct cvmx_pko_mem_debug7_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn56xx;
- struct cvmx_pko_mem_debug7_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn58xx;
- struct cvmx_pko_mem_debug7_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn61xx;
- struct cvmx_pko_mem_debug7_cn50xx cn63xx;
- struct cvmx_pko_mem_debug7_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn66xx;
struct cvmx_pko_mem_debug7_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t qos:3;
@@ -992,8 +757,6 @@ union cvmx_pko_mem_debug7 {
uint64_t qos:3;
#endif
} cn68xx;
- struct cvmx_pko_mem_debug7_cn68xx cn68xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cnf71xx;
};
union cvmx_pko_mem_debug8 {
@@ -1028,9 +791,6 @@ union cvmx_pko_mem_debug8 {
uint64_t qos:5;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug8_cn30xx cn31xx;
- struct cvmx_pko_mem_debug8_cn30xx cn38xx;
- struct cvmx_pko_mem_debug8_cn30xx cn38xxp2;
struct cvmx_pko_mem_debug8_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -1073,11 +833,6 @@ union cvmx_pko_mem_debug8 {
uint64_t reserved_29_63:35;
#endif
} cn52xx;
- struct cvmx_pko_mem_debug8_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug8_cn52xx cn56xx;
- struct cvmx_pko_mem_debug8_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug8_cn50xx cn58xx;
- struct cvmx_pko_mem_debug8_cn50xx cn58xxp1;
struct cvmx_pko_mem_debug8_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_42_63:22;
@@ -1107,9 +862,6 @@ union cvmx_pko_mem_debug8 {
uint64_t reserved_42_63:22;
#endif
} cn61xx;
- struct cvmx_pko_mem_debug8_cn52xx cn63xx;
- struct cvmx_pko_mem_debug8_cn52xx cn63xxp1;
- struct cvmx_pko_mem_debug8_cn61xx cn66xx;
struct cvmx_pko_mem_debug8_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_37_63:27;
@@ -1133,8 +885,6 @@ union cvmx_pko_mem_debug8 {
uint64_t reserved_37_63:27;
#endif
} cn68xx;
- struct cvmx_pko_mem_debug8_cn68xx cn68xxp1;
- struct cvmx_pko_mem_debug8_cn61xx cnf71xx;
};
union cvmx_pko_mem_debug9 {
@@ -1167,7 +917,6 @@ union cvmx_pko_mem_debug9 {
uint64_t reserved_28_63:36;
#endif
} cn30xx;
- struct cvmx_pko_mem_debug9_cn30xx cn31xx;
struct cvmx_pko_mem_debug9_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -1187,7 +936,6 @@ union cvmx_pko_mem_debug9 {
uint64_t reserved_28_63:36;
#endif
} cn38xx;
- struct cvmx_pko_mem_debug9_cn38xx cn38xxp2;
struct cvmx_pko_mem_debug9_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_49_63:15;
@@ -1201,19 +949,6 @@ union cvmx_pko_mem_debug9 {
uint64_t reserved_49_63:15;
#endif
} cn50xx;
- struct cvmx_pko_mem_debug9_cn50xx cn52xx;
- struct cvmx_pko_mem_debug9_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn56xx;
- struct cvmx_pko_mem_debug9_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn58xx;
- struct cvmx_pko_mem_debug9_cn50xx cn58xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn61xx;
- struct cvmx_pko_mem_debug9_cn50xx cn63xx;
- struct cvmx_pko_mem_debug9_cn50xx cn63xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn66xx;
- struct cvmx_pko_mem_debug9_cn50xx cn68xx;
- struct cvmx_pko_mem_debug9_cn50xx cn68xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cnf71xx;
};
union cvmx_pko_mem_iport_ptrs {
@@ -1249,8 +984,6 @@ union cvmx_pko_mem_iport_ptrs {
uint64_t reserved_63_63:1;
#endif
} s;
- struct cvmx_pko_mem_iport_ptrs_s cn68xx;
- struct cvmx_pko_mem_iport_ptrs_s cn68xxp1;
};
union cvmx_pko_mem_iport_qos {
@@ -1272,8 +1005,6 @@ union cvmx_pko_mem_iport_qos {
uint64_t reserved_61_63:3;
#endif
} s;
- struct cvmx_pko_mem_iport_qos_s cn68xx;
- struct cvmx_pko_mem_iport_qos_s cn68xxp1;
};
union cvmx_pko_mem_iqueue_ptrs {
@@ -1303,8 +1034,6 @@ union cvmx_pko_mem_iqueue_ptrs {
uint64_t s_tail:1;
#endif
} s;
- struct cvmx_pko_mem_iqueue_ptrs_s cn68xx;
- struct cvmx_pko_mem_iqueue_ptrs_s cn68xxp1;
};
union cvmx_pko_mem_iqueue_qos {
@@ -1324,8 +1053,6 @@ union cvmx_pko_mem_iqueue_qos {
uint64_t reserved_61_63:3;
#endif
} s;
- struct cvmx_pko_mem_iqueue_qos_s cn68xx;
- struct cvmx_pko_mem_iqueue_qos_s cn68xxp1;
};
union cvmx_pko_mem_port_ptrs {
@@ -1349,15 +1076,6 @@ union cvmx_pko_mem_port_ptrs {
uint64_t reserved_62_63:2;
#endif
} s;
- struct cvmx_pko_mem_port_ptrs_s cn52xx;
- struct cvmx_pko_mem_port_ptrs_s cn52xxp1;
- struct cvmx_pko_mem_port_ptrs_s cn56xx;
- struct cvmx_pko_mem_port_ptrs_s cn56xxp1;
- struct cvmx_pko_mem_port_ptrs_s cn61xx;
- struct cvmx_pko_mem_port_ptrs_s cn63xx;
- struct cvmx_pko_mem_port_ptrs_s cn63xxp1;
- struct cvmx_pko_mem_port_ptrs_s cn66xx;
- struct cvmx_pko_mem_port_ptrs_s cnf71xx;
};
union cvmx_pko_mem_port_qos {
@@ -1377,15 +1095,6 @@ union cvmx_pko_mem_port_qos {
uint64_t reserved_61_63:3;
#endif
} s;
- struct cvmx_pko_mem_port_qos_s cn52xx;
- struct cvmx_pko_mem_port_qos_s cn52xxp1;
- struct cvmx_pko_mem_port_qos_s cn56xx;
- struct cvmx_pko_mem_port_qos_s cn56xxp1;
- struct cvmx_pko_mem_port_qos_s cn61xx;
- struct cvmx_pko_mem_port_qos_s cn63xx;
- struct cvmx_pko_mem_port_qos_s cn63xxp1;
- struct cvmx_pko_mem_port_qos_s cn66xx;
- struct cvmx_pko_mem_port_qos_s cnf71xx;
};
union cvmx_pko_mem_port_rate0 {
@@ -1420,16 +1129,6 @@ union cvmx_pko_mem_port_rate0 {
uint64_t reserved_51_63:13;
#endif
} cn52xx;
- struct cvmx_pko_mem_port_rate0_cn52xx cn52xxp1;
- struct cvmx_pko_mem_port_rate0_cn52xx cn56xx;
- struct cvmx_pko_mem_port_rate0_cn52xx cn56xxp1;
- struct cvmx_pko_mem_port_rate0_cn52xx cn61xx;
- struct cvmx_pko_mem_port_rate0_cn52xx cn63xx;
- struct cvmx_pko_mem_port_rate0_cn52xx cn63xxp1;
- struct cvmx_pko_mem_port_rate0_cn52xx cn66xx;
- struct cvmx_pko_mem_port_rate0_s cn68xx;
- struct cvmx_pko_mem_port_rate0_s cn68xxp1;
- struct cvmx_pko_mem_port_rate0_cn52xx cnf71xx;
};
union cvmx_pko_mem_port_rate1 {
@@ -1460,16 +1159,6 @@ union cvmx_pko_mem_port_rate1 {
uint64_t reserved_32_63:32;
#endif
} cn52xx;
- struct cvmx_pko_mem_port_rate1_cn52xx cn52xxp1;
- struct cvmx_pko_mem_port_rate1_cn52xx cn56xx;
- struct cvmx_pko_mem_port_rate1_cn52xx cn56xxp1;
- struct cvmx_pko_mem_port_rate1_cn52xx cn61xx;
- struct cvmx_pko_mem_port_rate1_cn52xx cn63xx;
- struct cvmx_pko_mem_port_rate1_cn52xx cn63xxp1;
- struct cvmx_pko_mem_port_rate1_cn52xx cn66xx;
- struct cvmx_pko_mem_port_rate1_s cn68xx;
- struct cvmx_pko_mem_port_rate1_s cn68xxp1;
- struct cvmx_pko_mem_port_rate1_cn52xx cnf71xx;
};
union cvmx_pko_mem_queue_ptrs {
@@ -1497,22 +1186,6 @@ union cvmx_pko_mem_queue_ptrs {
uint64_t s_tail:1;
#endif
} s;
- struct cvmx_pko_mem_queue_ptrs_s cn30xx;
- struct cvmx_pko_mem_queue_ptrs_s cn31xx;
- struct cvmx_pko_mem_queue_ptrs_s cn38xx;
- struct cvmx_pko_mem_queue_ptrs_s cn38xxp2;
- struct cvmx_pko_mem_queue_ptrs_s cn50xx;
- struct cvmx_pko_mem_queue_ptrs_s cn52xx;
- struct cvmx_pko_mem_queue_ptrs_s cn52xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn56xx;
- struct cvmx_pko_mem_queue_ptrs_s cn56xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn58xx;
- struct cvmx_pko_mem_queue_ptrs_s cn58xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn61xx;
- struct cvmx_pko_mem_queue_ptrs_s cn63xx;
- struct cvmx_pko_mem_queue_ptrs_s cn63xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn66xx;
- struct cvmx_pko_mem_queue_ptrs_s cnf71xx;
};
union cvmx_pko_mem_queue_qos {
@@ -1532,22 +1205,6 @@ union cvmx_pko_mem_queue_qos {
uint64_t reserved_61_63:3;
#endif
} s;
- struct cvmx_pko_mem_queue_qos_s cn30xx;
- struct cvmx_pko_mem_queue_qos_s cn31xx;
- struct cvmx_pko_mem_queue_qos_s cn38xx;
- struct cvmx_pko_mem_queue_qos_s cn38xxp2;
- struct cvmx_pko_mem_queue_qos_s cn50xx;
- struct cvmx_pko_mem_queue_qos_s cn52xx;
- struct cvmx_pko_mem_queue_qos_s cn52xxp1;
- struct cvmx_pko_mem_queue_qos_s cn56xx;
- struct cvmx_pko_mem_queue_qos_s cn56xxp1;
- struct cvmx_pko_mem_queue_qos_s cn58xx;
- struct cvmx_pko_mem_queue_qos_s cn58xxp1;
- struct cvmx_pko_mem_queue_qos_s cn61xx;
- struct cvmx_pko_mem_queue_qos_s cn63xx;
- struct cvmx_pko_mem_queue_qos_s cn63xxp1;
- struct cvmx_pko_mem_queue_qos_s cn66xx;
- struct cvmx_pko_mem_queue_qos_s cnf71xx;
};
union cvmx_pko_mem_throttle_int {
@@ -1569,8 +1226,6 @@ union cvmx_pko_mem_throttle_int {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_pko_mem_throttle_int_s cn68xx;
- struct cvmx_pko_mem_throttle_int_s cn68xxp1;
};
union cvmx_pko_mem_throttle_pipe {
@@ -1592,8 +1247,6 @@ union cvmx_pko_mem_throttle_pipe {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_pko_mem_throttle_pipe_s cn68xx;
- struct cvmx_pko_mem_throttle_pipe_s cn68xxp1;
};
union cvmx_pko_reg_bist_result {
@@ -1636,9 +1289,6 @@ union cvmx_pko_reg_bist_result {
uint64_t reserved_27_63:37;
#endif
} cn30xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn31xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn38xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2;
struct cvmx_pko_reg_bist_result_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_33_63:31;
@@ -1711,15 +1361,6 @@ union cvmx_pko_reg_bist_result {
uint64_t reserved_35_63:29;
#endif
} cn52xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1;
- struct cvmx_pko_reg_bist_result_cn52xx cn56xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1;
- struct cvmx_pko_reg_bist_result_cn50xx cn58xx;
- struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1;
- struct cvmx_pko_reg_bist_result_cn52xx cn61xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn63xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn63xxp1;
- struct cvmx_pko_reg_bist_result_cn52xx cn66xx;
struct cvmx_pko_reg_bist_result_cn68xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_36_63:28;
@@ -1808,7 +1449,6 @@ union cvmx_pko_reg_bist_result {
uint64_t reserved_35_63:29;
#endif
} cn68xxp1;
- struct cvmx_pko_reg_bist_result_cn52xx cnf71xx;
};
union cvmx_pko_reg_cmd_buf {
@@ -1826,24 +1466,6 @@ union cvmx_pko_reg_cmd_buf {
uint64_t reserved_23_63:41;
#endif
} s;
- struct cvmx_pko_reg_cmd_buf_s cn30xx;
- struct cvmx_pko_reg_cmd_buf_s cn31xx;
- struct cvmx_pko_reg_cmd_buf_s cn38xx;
- struct cvmx_pko_reg_cmd_buf_s cn38xxp2;
- struct cvmx_pko_reg_cmd_buf_s cn50xx;
- struct cvmx_pko_reg_cmd_buf_s cn52xx;
- struct cvmx_pko_reg_cmd_buf_s cn52xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn56xx;
- struct cvmx_pko_reg_cmd_buf_s cn56xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn58xx;
- struct cvmx_pko_reg_cmd_buf_s cn58xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn61xx;
- struct cvmx_pko_reg_cmd_buf_s cn63xx;
- struct cvmx_pko_reg_cmd_buf_s cn63xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn66xx;
- struct cvmx_pko_reg_cmd_buf_s cn68xx;
- struct cvmx_pko_reg_cmd_buf_s cn68xxp1;
- struct cvmx_pko_reg_cmd_buf_s cnf71xx;
};
union cvmx_pko_reg_crc_ctlx {
@@ -1859,10 +1481,6 @@ union cvmx_pko_reg_crc_ctlx {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pko_reg_crc_ctlx_s cn38xx;
- struct cvmx_pko_reg_crc_ctlx_s cn38xxp2;
- struct cvmx_pko_reg_crc_ctlx_s cn58xx;
- struct cvmx_pko_reg_crc_ctlx_s cn58xxp1;
};
union cvmx_pko_reg_crc_enable {
@@ -1876,10 +1494,6 @@ union cvmx_pko_reg_crc_enable {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pko_reg_crc_enable_s cn38xx;
- struct cvmx_pko_reg_crc_enable_s cn38xxp2;
- struct cvmx_pko_reg_crc_enable_s cn58xx;
- struct cvmx_pko_reg_crc_enable_s cn58xxp1;
};
union cvmx_pko_reg_crc_ivx {
@@ -1893,10 +1507,6 @@ union cvmx_pko_reg_crc_ivx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pko_reg_crc_ivx_s cn38xx;
- struct cvmx_pko_reg_crc_ivx_s cn38xxp2;
- struct cvmx_pko_reg_crc_ivx_s cn58xx;
- struct cvmx_pko_reg_crc_ivx_s cn58xxp1;
};
union cvmx_pko_reg_debug0 {
@@ -1917,23 +1527,6 @@ union cvmx_pko_reg_debug0 {
uint64_t reserved_17_63:47;
#endif
} cn30xx;
- struct cvmx_pko_reg_debug0_cn30xx cn31xx;
- struct cvmx_pko_reg_debug0_cn30xx cn38xx;
- struct cvmx_pko_reg_debug0_cn30xx cn38xxp2;
- struct cvmx_pko_reg_debug0_s cn50xx;
- struct cvmx_pko_reg_debug0_s cn52xx;
- struct cvmx_pko_reg_debug0_s cn52xxp1;
- struct cvmx_pko_reg_debug0_s cn56xx;
- struct cvmx_pko_reg_debug0_s cn56xxp1;
- struct cvmx_pko_reg_debug0_s cn58xx;
- struct cvmx_pko_reg_debug0_s cn58xxp1;
- struct cvmx_pko_reg_debug0_s cn61xx;
- struct cvmx_pko_reg_debug0_s cn63xx;
- struct cvmx_pko_reg_debug0_s cn63xxp1;
- struct cvmx_pko_reg_debug0_s cn66xx;
- struct cvmx_pko_reg_debug0_s cn68xx;
- struct cvmx_pko_reg_debug0_s cn68xxp1;
- struct cvmx_pko_reg_debug0_s cnf71xx;
};
union cvmx_pko_reg_debug1 {
@@ -1945,20 +1538,6 @@ union cvmx_pko_reg_debug1 {
uint64_t asserts:64;
#endif
} s;
- struct cvmx_pko_reg_debug1_s cn50xx;
- struct cvmx_pko_reg_debug1_s cn52xx;
- struct cvmx_pko_reg_debug1_s cn52xxp1;
- struct cvmx_pko_reg_debug1_s cn56xx;
- struct cvmx_pko_reg_debug1_s cn56xxp1;
- struct cvmx_pko_reg_debug1_s cn58xx;
- struct cvmx_pko_reg_debug1_s cn58xxp1;
- struct cvmx_pko_reg_debug1_s cn61xx;
- struct cvmx_pko_reg_debug1_s cn63xx;
- struct cvmx_pko_reg_debug1_s cn63xxp1;
- struct cvmx_pko_reg_debug1_s cn66xx;
- struct cvmx_pko_reg_debug1_s cn68xx;
- struct cvmx_pko_reg_debug1_s cn68xxp1;
- struct cvmx_pko_reg_debug1_s cnf71xx;
};
union cvmx_pko_reg_debug2 {
@@ -1970,20 +1549,6 @@ union cvmx_pko_reg_debug2 {
uint64_t asserts:64;
#endif
} s;
- struct cvmx_pko_reg_debug2_s cn50xx;
- struct cvmx_pko_reg_debug2_s cn52xx;
- struct cvmx_pko_reg_debug2_s cn52xxp1;
- struct cvmx_pko_reg_debug2_s cn56xx;
- struct cvmx_pko_reg_debug2_s cn56xxp1;
- struct cvmx_pko_reg_debug2_s cn58xx;
- struct cvmx_pko_reg_debug2_s cn58xxp1;
- struct cvmx_pko_reg_debug2_s cn61xx;
- struct cvmx_pko_reg_debug2_s cn63xx;
- struct cvmx_pko_reg_debug2_s cn63xxp1;
- struct cvmx_pko_reg_debug2_s cn66xx;
- struct cvmx_pko_reg_debug2_s cn68xx;
- struct cvmx_pko_reg_debug2_s cn68xxp1;
- struct cvmx_pko_reg_debug2_s cnf71xx;
};
union cvmx_pko_reg_debug3 {
@@ -1995,20 +1560,6 @@ union cvmx_pko_reg_debug3 {
uint64_t asserts:64;
#endif
} s;
- struct cvmx_pko_reg_debug3_s cn50xx;
- struct cvmx_pko_reg_debug3_s cn52xx;
- struct cvmx_pko_reg_debug3_s cn52xxp1;
- struct cvmx_pko_reg_debug3_s cn56xx;
- struct cvmx_pko_reg_debug3_s cn56xxp1;
- struct cvmx_pko_reg_debug3_s cn58xx;
- struct cvmx_pko_reg_debug3_s cn58xxp1;
- struct cvmx_pko_reg_debug3_s cn61xx;
- struct cvmx_pko_reg_debug3_s cn63xx;
- struct cvmx_pko_reg_debug3_s cn63xxp1;
- struct cvmx_pko_reg_debug3_s cn66xx;
- struct cvmx_pko_reg_debug3_s cn68xx;
- struct cvmx_pko_reg_debug3_s cn68xxp1;
- struct cvmx_pko_reg_debug3_s cnf71xx;
};
union cvmx_pko_reg_debug4 {
@@ -2020,8 +1571,6 @@ union cvmx_pko_reg_debug4 {
uint64_t asserts:64;
#endif
} s;
- struct cvmx_pko_reg_debug4_s cn68xx;
- struct cvmx_pko_reg_debug4_s cn68xxp1;
};
union cvmx_pko_reg_engine_inflight {
@@ -2090,9 +1639,6 @@ union cvmx_pko_reg_engine_inflight {
uint64_t reserved_40_63:24;
#endif
} cn52xx;
- struct cvmx_pko_reg_engine_inflight_cn52xx cn52xxp1;
- struct cvmx_pko_reg_engine_inflight_cn52xx cn56xx;
- struct cvmx_pko_reg_engine_inflight_cn52xx cn56xxp1;
struct cvmx_pko_reg_engine_inflight_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_56_63:8;
@@ -2159,11 +1705,6 @@ union cvmx_pko_reg_engine_inflight {
uint64_t reserved_48_63:16;
#endif
} cn63xx;
- struct cvmx_pko_reg_engine_inflight_cn63xx cn63xxp1;
- struct cvmx_pko_reg_engine_inflight_cn61xx cn66xx;
- struct cvmx_pko_reg_engine_inflight_s cn68xx;
- struct cvmx_pko_reg_engine_inflight_s cn68xxp1;
- struct cvmx_pko_reg_engine_inflight_cn61xx cnf71xx;
};
union cvmx_pko_reg_engine_inflight1 {
@@ -2183,8 +1724,6 @@ union cvmx_pko_reg_engine_inflight1 {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pko_reg_engine_inflight1_s cn68xx;
- struct cvmx_pko_reg_engine_inflight1_s cn68xxp1;
};
union cvmx_pko_reg_engine_storagex {
@@ -2226,8 +1765,6 @@ union cvmx_pko_reg_engine_storagex {
uint64_t engine15:4;
#endif
} s;
- struct cvmx_pko_reg_engine_storagex_s cn68xx;
- struct cvmx_pko_reg_engine_storagex_s cn68xxp1;
};
union cvmx_pko_reg_engine_thresh {
@@ -2250,9 +1787,6 @@ union cvmx_pko_reg_engine_thresh {
uint64_t reserved_10_63:54;
#endif
} cn52xx;
- struct cvmx_pko_reg_engine_thresh_cn52xx cn52xxp1;
- struct cvmx_pko_reg_engine_thresh_cn52xx cn56xx;
- struct cvmx_pko_reg_engine_thresh_cn52xx cn56xxp1;
struct cvmx_pko_reg_engine_thresh_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
@@ -2271,11 +1805,6 @@ union cvmx_pko_reg_engine_thresh {
uint64_t reserved_12_63:52;
#endif
} cn63xx;
- struct cvmx_pko_reg_engine_thresh_cn63xx cn63xxp1;
- struct cvmx_pko_reg_engine_thresh_cn61xx cn66xx;
- struct cvmx_pko_reg_engine_thresh_s cn68xx;
- struct cvmx_pko_reg_engine_thresh_s cn68xxp1;
- struct cvmx_pko_reg_engine_thresh_cn61xx cnf71xx;
};
union cvmx_pko_reg_error {
@@ -2306,9 +1835,6 @@ union cvmx_pko_reg_error {
uint64_t reserved_2_63:62;
#endif
} cn30xx;
- struct cvmx_pko_reg_error_cn30xx cn31xx;
- struct cvmx_pko_reg_error_cn30xx cn38xx;
- struct cvmx_pko_reg_error_cn30xx cn38xxp2;
struct cvmx_pko_reg_error_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
@@ -2322,19 +1848,6 @@ union cvmx_pko_reg_error {
uint64_t reserved_3_63:61;
#endif
} cn50xx;
- struct cvmx_pko_reg_error_cn50xx cn52xx;
- struct cvmx_pko_reg_error_cn50xx cn52xxp1;
- struct cvmx_pko_reg_error_cn50xx cn56xx;
- struct cvmx_pko_reg_error_cn50xx cn56xxp1;
- struct cvmx_pko_reg_error_cn50xx cn58xx;
- struct cvmx_pko_reg_error_cn50xx cn58xxp1;
- struct cvmx_pko_reg_error_cn50xx cn61xx;
- struct cvmx_pko_reg_error_cn50xx cn63xx;
- struct cvmx_pko_reg_error_cn50xx cn63xxp1;
- struct cvmx_pko_reg_error_cn50xx cn66xx;
- struct cvmx_pko_reg_error_s cn68xx;
- struct cvmx_pko_reg_error_s cn68xxp1;
- struct cvmx_pko_reg_error_cn50xx cnf71xx;
};
union cvmx_pko_reg_flags {
@@ -2379,16 +1892,6 @@ union cvmx_pko_reg_flags {
uint64_t reserved_4_63:60;
#endif
} cn30xx;
- struct cvmx_pko_reg_flags_cn30xx cn31xx;
- struct cvmx_pko_reg_flags_cn30xx cn38xx;
- struct cvmx_pko_reg_flags_cn30xx cn38xxp2;
- struct cvmx_pko_reg_flags_cn30xx cn50xx;
- struct cvmx_pko_reg_flags_cn30xx cn52xx;
- struct cvmx_pko_reg_flags_cn30xx cn52xxp1;
- struct cvmx_pko_reg_flags_cn30xx cn56xx;
- struct cvmx_pko_reg_flags_cn30xx cn56xxp1;
- struct cvmx_pko_reg_flags_cn30xx cn58xx;
- struct cvmx_pko_reg_flags_cn30xx cn58xxp1;
struct cvmx_pko_reg_flags_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -2410,10 +1913,6 @@ union cvmx_pko_reg_flags {
uint64_t reserved_9_63:55;
#endif
} cn61xx;
- struct cvmx_pko_reg_flags_cn30xx cn63xx;
- struct cvmx_pko_reg_flags_cn30xx cn63xxp1;
- struct cvmx_pko_reg_flags_cn61xx cn66xx;
- struct cvmx_pko_reg_flags_s cn68xx;
struct cvmx_pko_reg_flags_cn68xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_7_63:57;
@@ -2435,7 +1934,6 @@ union cvmx_pko_reg_flags {
uint64_t reserved_7_63:57;
#endif
} cn68xxp1;
- struct cvmx_pko_reg_flags_cn61xx cnf71xx;
};
union cvmx_pko_reg_gmx_port_mode {
@@ -2451,22 +1949,6 @@ union cvmx_pko_reg_gmx_port_mode {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_pko_reg_gmx_port_mode_s cn30xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn31xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn38xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2;
- struct cvmx_pko_reg_gmx_port_mode_s cn50xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn52xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn56xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn58xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn61xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn63xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn63xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn66xx;
- struct cvmx_pko_reg_gmx_port_mode_s cnf71xx;
};
union cvmx_pko_reg_int_mask {
@@ -2497,9 +1979,6 @@ union cvmx_pko_reg_int_mask {
uint64_t reserved_2_63:62;
#endif
} cn30xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn31xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn38xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2;
struct cvmx_pko_reg_int_mask_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63:61;
@@ -2513,19 +1992,6 @@ union cvmx_pko_reg_int_mask {
uint64_t reserved_3_63:61;
#endif
} cn50xx;
- struct cvmx_pko_reg_int_mask_cn50xx cn52xx;
- struct cvmx_pko_reg_int_mask_cn50xx cn52xxp1;
- struct cvmx_pko_reg_int_mask_cn50xx cn56xx;
- struct cvmx_pko_reg_int_mask_cn50xx cn56xxp1;
- struct cvmx_pko_reg_int_mask_cn50xx cn58xx;
- struct cvmx_pko_reg_int_mask_cn50xx cn58xxp1;
- struct cvmx_pko_reg_int_mask_cn50xx cn61xx;
- struct cvmx_pko_reg_int_mask_cn50xx cn63xx;
- struct cvmx_pko_reg_int_mask_cn50xx cn63xxp1;
- struct cvmx_pko_reg_int_mask_cn50xx cn66xx;
- struct cvmx_pko_reg_int_mask_s cn68xx;
- struct cvmx_pko_reg_int_mask_s cn68xxp1;
- struct cvmx_pko_reg_int_mask_cn50xx cnf71xx;
};
union cvmx_pko_reg_loopback_bpid {
@@ -2569,8 +2035,6 @@ union cvmx_pko_reg_loopback_bpid {
uint64_t reserved_59_63:5;
#endif
} s;
- struct cvmx_pko_reg_loopback_bpid_s cn68xx;
- struct cvmx_pko_reg_loopback_bpid_s cn68xxp1;
};
union cvmx_pko_reg_loopback_pkind {
@@ -2614,8 +2078,6 @@ union cvmx_pko_reg_loopback_pkind {
uint64_t reserved_59_63:5;
#endif
} s;
- struct cvmx_pko_reg_loopback_pkind_s cn68xx;
- struct cvmx_pko_reg_loopback_pkind_s cn68xxp1;
};
union cvmx_pko_reg_min_pkt {
@@ -2641,8 +2103,6 @@ union cvmx_pko_reg_min_pkt {
uint64_t size7:8;
#endif
} s;
- struct cvmx_pko_reg_min_pkt_s cn68xx;
- struct cvmx_pko_reg_min_pkt_s cn68xxp1;
};
union cvmx_pko_reg_preempt {
@@ -2656,17 +2116,6 @@ union cvmx_pko_reg_preempt {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pko_reg_preempt_s cn52xx;
- struct cvmx_pko_reg_preempt_s cn52xxp1;
- struct cvmx_pko_reg_preempt_s cn56xx;
- struct cvmx_pko_reg_preempt_s cn56xxp1;
- struct cvmx_pko_reg_preempt_s cn61xx;
- struct cvmx_pko_reg_preempt_s cn63xx;
- struct cvmx_pko_reg_preempt_s cn63xxp1;
- struct cvmx_pko_reg_preempt_s cn66xx;
- struct cvmx_pko_reg_preempt_s cn68xx;
- struct cvmx_pko_reg_preempt_s cn68xxp1;
- struct cvmx_pko_reg_preempt_s cnf71xx;
};
union cvmx_pko_reg_queue_mode {
@@ -2680,24 +2129,6 @@ union cvmx_pko_reg_queue_mode {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pko_reg_queue_mode_s cn30xx;
- struct cvmx_pko_reg_queue_mode_s cn31xx;
- struct cvmx_pko_reg_queue_mode_s cn38xx;
- struct cvmx_pko_reg_queue_mode_s cn38xxp2;
- struct cvmx_pko_reg_queue_mode_s cn50xx;
- struct cvmx_pko_reg_queue_mode_s cn52xx;
- struct cvmx_pko_reg_queue_mode_s cn52xxp1;
- struct cvmx_pko_reg_queue_mode_s cn56xx;
- struct cvmx_pko_reg_queue_mode_s cn56xxp1;
- struct cvmx_pko_reg_queue_mode_s cn58xx;
- struct cvmx_pko_reg_queue_mode_s cn58xxp1;
- struct cvmx_pko_reg_queue_mode_s cn61xx;
- struct cvmx_pko_reg_queue_mode_s cn63xx;
- struct cvmx_pko_reg_queue_mode_s cn63xxp1;
- struct cvmx_pko_reg_queue_mode_s cn66xx;
- struct cvmx_pko_reg_queue_mode_s cn68xx;
- struct cvmx_pko_reg_queue_mode_s cn68xxp1;
- struct cvmx_pko_reg_queue_mode_s cnf71xx;
};
union cvmx_pko_reg_queue_preempt {
@@ -2713,17 +2144,6 @@ union cvmx_pko_reg_queue_preempt {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pko_reg_queue_preempt_s cn52xx;
- struct cvmx_pko_reg_queue_preempt_s cn52xxp1;
- struct cvmx_pko_reg_queue_preempt_s cn56xx;
- struct cvmx_pko_reg_queue_preempt_s cn56xxp1;
- struct cvmx_pko_reg_queue_preempt_s cn61xx;
- struct cvmx_pko_reg_queue_preempt_s cn63xx;
- struct cvmx_pko_reg_queue_preempt_s cn63xxp1;
- struct cvmx_pko_reg_queue_preempt_s cn66xx;
- struct cvmx_pko_reg_queue_preempt_s cn68xx;
- struct cvmx_pko_reg_queue_preempt_s cn68xxp1;
- struct cvmx_pko_reg_queue_preempt_s cnf71xx;
};
union cvmx_pko_reg_queue_ptrs1 {
@@ -2739,18 +2159,6 @@ union cvmx_pko_reg_queue_ptrs1 {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_pko_reg_queue_ptrs1_s cn50xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn52xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn56xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn58xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn61xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn63xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn63xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn66xx;
- struct cvmx_pko_reg_queue_ptrs1_s cnf71xx;
};
union cvmx_pko_reg_read_idx {
@@ -2766,24 +2174,6 @@ union cvmx_pko_reg_read_idx {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_pko_reg_read_idx_s cn30xx;
- struct cvmx_pko_reg_read_idx_s cn31xx;
- struct cvmx_pko_reg_read_idx_s cn38xx;
- struct cvmx_pko_reg_read_idx_s cn38xxp2;
- struct cvmx_pko_reg_read_idx_s cn50xx;
- struct cvmx_pko_reg_read_idx_s cn52xx;
- struct cvmx_pko_reg_read_idx_s cn52xxp1;
- struct cvmx_pko_reg_read_idx_s cn56xx;
- struct cvmx_pko_reg_read_idx_s cn56xxp1;
- struct cvmx_pko_reg_read_idx_s cn58xx;
- struct cvmx_pko_reg_read_idx_s cn58xxp1;
- struct cvmx_pko_reg_read_idx_s cn61xx;
- struct cvmx_pko_reg_read_idx_s cn63xx;
- struct cvmx_pko_reg_read_idx_s cn63xxp1;
- struct cvmx_pko_reg_read_idx_s cn66xx;
- struct cvmx_pko_reg_read_idx_s cn68xx;
- struct cvmx_pko_reg_read_idx_s cn68xxp1;
- struct cvmx_pko_reg_read_idx_s cnf71xx;
};
union cvmx_pko_reg_throttle {
@@ -2797,8 +2187,6 @@ union cvmx_pko_reg_throttle {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pko_reg_throttle_s cn68xx;
- struct cvmx_pko_reg_throttle_s cn68xxp1;
};
union cvmx_pko_reg_timestamp {
@@ -2812,13 +2200,6 @@ union cvmx_pko_reg_timestamp {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_pko_reg_timestamp_s cn61xx;
- struct cvmx_pko_reg_timestamp_s cn63xx;
- struct cvmx_pko_reg_timestamp_s cn63xxp1;
- struct cvmx_pko_reg_timestamp_s cn66xx;
- struct cvmx_pko_reg_timestamp_s cn68xx;
- struct cvmx_pko_reg_timestamp_s cn68xxp1;
- struct cvmx_pko_reg_timestamp_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index 5f47f76ed510..20eb9c46a75a 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -611,7 +611,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- status->doorbell = debug8.cn58xx.doorbell;
+ status->doorbell = debug8.cn50xx.doorbell;
}
}
diff --git a/arch/mips/include/asm/octeon/cvmx-pow-defs.h b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
index 6a3db4b068ff..474dd544314b 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow-defs.h
@@ -160,8 +160,6 @@ union cvmx_pow_bist_stat {
uint64_t reserved_32_63:32;
#endif
} cn38xx;
- struct cvmx_pow_bist_stat_cn38xx cn38xxp2;
- struct cvmx_pow_bist_stat_cn31xx cn50xx;
struct cvmx_pow_bist_stat_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -191,7 +189,6 @@ union cvmx_pow_bist_stat {
uint64_t reserved_20_63:44;
#endif
} cn52xx;
- struct cvmx_pow_bist_stat_cn52xx cn52xxp1;
struct cvmx_pow_bist_stat_cn56xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -223,9 +220,6 @@ union cvmx_pow_bist_stat {
uint64_t reserved_28_63:36;
#endif
} cn56xx;
- struct cvmx_pow_bist_stat_cn56xx cn56xxp1;
- struct cvmx_pow_bist_stat_cn38xx cn58xx;
- struct cvmx_pow_bist_stat_cn38xx cn58xxp1;
struct cvmx_pow_bist_stat_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63:44;
@@ -276,7 +270,6 @@ union cvmx_pow_bist_stat {
uint64_t reserved_22_63:42;
#endif
} cn63xx;
- struct cvmx_pow_bist_stat_cn63xx cn63xxp1;
struct cvmx_pow_bist_stat_cn66xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_26_63:38;
@@ -302,7 +295,6 @@ union cvmx_pow_bist_stat {
uint64_t reserved_26_63:38;
#endif
} cn66xx;
- struct cvmx_pow_bist_stat_cn61xx cnf71xx;
};
union cvmx_pow_ds_pc {
@@ -316,22 +308,6 @@ union cvmx_pow_ds_pc {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_ds_pc_s cn30xx;
- struct cvmx_pow_ds_pc_s cn31xx;
- struct cvmx_pow_ds_pc_s cn38xx;
- struct cvmx_pow_ds_pc_s cn38xxp2;
- struct cvmx_pow_ds_pc_s cn50xx;
- struct cvmx_pow_ds_pc_s cn52xx;
- struct cvmx_pow_ds_pc_s cn52xxp1;
- struct cvmx_pow_ds_pc_s cn56xx;
- struct cvmx_pow_ds_pc_s cn56xxp1;
- struct cvmx_pow_ds_pc_s cn58xx;
- struct cvmx_pow_ds_pc_s cn58xxp1;
- struct cvmx_pow_ds_pc_s cn61xx;
- struct cvmx_pow_ds_pc_s cn63xx;
- struct cvmx_pow_ds_pc_s cn63xxp1;
- struct cvmx_pow_ds_pc_s cn66xx;
- struct cvmx_pow_ds_pc_s cnf71xx;
};
union cvmx_pow_ecc_err {
@@ -367,7 +343,6 @@ union cvmx_pow_ecc_err {
uint64_t reserved_45_63:19;
#endif
} s;
- struct cvmx_pow_ecc_err_s cn30xx;
struct cvmx_pow_ecc_err_cn31xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_14_63:50;
@@ -391,20 +366,6 @@ union cvmx_pow_ecc_err {
uint64_t reserved_14_63:50;
#endif
} cn31xx;
- struct cvmx_pow_ecc_err_s cn38xx;
- struct cvmx_pow_ecc_err_cn31xx cn38xxp2;
- struct cvmx_pow_ecc_err_s cn50xx;
- struct cvmx_pow_ecc_err_s cn52xx;
- struct cvmx_pow_ecc_err_s cn52xxp1;
- struct cvmx_pow_ecc_err_s cn56xx;
- struct cvmx_pow_ecc_err_s cn56xxp1;
- struct cvmx_pow_ecc_err_s cn58xx;
- struct cvmx_pow_ecc_err_s cn58xxp1;
- struct cvmx_pow_ecc_err_s cn61xx;
- struct cvmx_pow_ecc_err_s cn63xx;
- struct cvmx_pow_ecc_err_s cn63xxp1;
- struct cvmx_pow_ecc_err_s cn66xx;
- struct cvmx_pow_ecc_err_s cnf71xx;
};
union cvmx_pow_int_ctl {
@@ -420,22 +381,6 @@ union cvmx_pow_int_ctl {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_pow_int_ctl_s cn30xx;
- struct cvmx_pow_int_ctl_s cn31xx;
- struct cvmx_pow_int_ctl_s cn38xx;
- struct cvmx_pow_int_ctl_s cn38xxp2;
- struct cvmx_pow_int_ctl_s cn50xx;
- struct cvmx_pow_int_ctl_s cn52xx;
- struct cvmx_pow_int_ctl_s cn52xxp1;
- struct cvmx_pow_int_ctl_s cn56xx;
- struct cvmx_pow_int_ctl_s cn56xxp1;
- struct cvmx_pow_int_ctl_s cn58xx;
- struct cvmx_pow_int_ctl_s cn58xxp1;
- struct cvmx_pow_int_ctl_s cn61xx;
- struct cvmx_pow_int_ctl_s cn63xx;
- struct cvmx_pow_int_ctl_s cn63xxp1;
- struct cvmx_pow_int_ctl_s cn66xx;
- struct cvmx_pow_int_ctl_s cnf71xx;
};
union cvmx_pow_iq_cntx {
@@ -449,22 +394,6 @@ union cvmx_pow_iq_cntx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_iq_cntx_s cn30xx;
- struct cvmx_pow_iq_cntx_s cn31xx;
- struct cvmx_pow_iq_cntx_s cn38xx;
- struct cvmx_pow_iq_cntx_s cn38xxp2;
- struct cvmx_pow_iq_cntx_s cn50xx;
- struct cvmx_pow_iq_cntx_s cn52xx;
- struct cvmx_pow_iq_cntx_s cn52xxp1;
- struct cvmx_pow_iq_cntx_s cn56xx;
- struct cvmx_pow_iq_cntx_s cn56xxp1;
- struct cvmx_pow_iq_cntx_s cn58xx;
- struct cvmx_pow_iq_cntx_s cn58xxp1;
- struct cvmx_pow_iq_cntx_s cn61xx;
- struct cvmx_pow_iq_cntx_s cn63xx;
- struct cvmx_pow_iq_cntx_s cn63xxp1;
- struct cvmx_pow_iq_cntx_s cn66xx;
- struct cvmx_pow_iq_cntx_s cnf71xx;
};
union cvmx_pow_iq_com_cnt {
@@ -478,22 +407,6 @@ union cvmx_pow_iq_com_cnt {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_iq_com_cnt_s cn30xx;
- struct cvmx_pow_iq_com_cnt_s cn31xx;
- struct cvmx_pow_iq_com_cnt_s cn38xx;
- struct cvmx_pow_iq_com_cnt_s cn38xxp2;
- struct cvmx_pow_iq_com_cnt_s cn50xx;
- struct cvmx_pow_iq_com_cnt_s cn52xx;
- struct cvmx_pow_iq_com_cnt_s cn52xxp1;
- struct cvmx_pow_iq_com_cnt_s cn56xx;
- struct cvmx_pow_iq_com_cnt_s cn56xxp1;
- struct cvmx_pow_iq_com_cnt_s cn58xx;
- struct cvmx_pow_iq_com_cnt_s cn58xxp1;
- struct cvmx_pow_iq_com_cnt_s cn61xx;
- struct cvmx_pow_iq_com_cnt_s cn63xx;
- struct cvmx_pow_iq_com_cnt_s cn63xxp1;
- struct cvmx_pow_iq_com_cnt_s cn66xx;
- struct cvmx_pow_iq_com_cnt_s cnf71xx;
};
union cvmx_pow_iq_int {
@@ -507,15 +420,6 @@ union cvmx_pow_iq_int {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pow_iq_int_s cn52xx;
- struct cvmx_pow_iq_int_s cn52xxp1;
- struct cvmx_pow_iq_int_s cn56xx;
- struct cvmx_pow_iq_int_s cn56xxp1;
- struct cvmx_pow_iq_int_s cn61xx;
- struct cvmx_pow_iq_int_s cn63xx;
- struct cvmx_pow_iq_int_s cn63xxp1;
- struct cvmx_pow_iq_int_s cn66xx;
- struct cvmx_pow_iq_int_s cnf71xx;
};
union cvmx_pow_iq_int_en {
@@ -529,15 +433,6 @@ union cvmx_pow_iq_int_en {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pow_iq_int_en_s cn52xx;
- struct cvmx_pow_iq_int_en_s cn52xxp1;
- struct cvmx_pow_iq_int_en_s cn56xx;
- struct cvmx_pow_iq_int_en_s cn56xxp1;
- struct cvmx_pow_iq_int_en_s cn61xx;
- struct cvmx_pow_iq_int_en_s cn63xx;
- struct cvmx_pow_iq_int_en_s cn63xxp1;
- struct cvmx_pow_iq_int_en_s cn66xx;
- struct cvmx_pow_iq_int_en_s cnf71xx;
};
union cvmx_pow_iq_thrx {
@@ -551,15 +446,6 @@ union cvmx_pow_iq_thrx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_iq_thrx_s cn52xx;
- struct cvmx_pow_iq_thrx_s cn52xxp1;
- struct cvmx_pow_iq_thrx_s cn56xx;
- struct cvmx_pow_iq_thrx_s cn56xxp1;
- struct cvmx_pow_iq_thrx_s cn61xx;
- struct cvmx_pow_iq_thrx_s cn63xx;
- struct cvmx_pow_iq_thrx_s cn63xxp1;
- struct cvmx_pow_iq_thrx_s cn66xx;
- struct cvmx_pow_iq_thrx_s cnf71xx;
};
union cvmx_pow_nos_cnt {
@@ -591,9 +477,6 @@ union cvmx_pow_nos_cnt {
uint64_t reserved_9_63:55;
#endif
} cn31xx;
- struct cvmx_pow_nos_cnt_s cn38xx;
- struct cvmx_pow_nos_cnt_s cn38xxp2;
- struct cvmx_pow_nos_cnt_cn31xx cn50xx;
struct cvmx_pow_nos_cnt_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63:54;
@@ -603,12 +486,6 @@ union cvmx_pow_nos_cnt {
uint64_t reserved_10_63:54;
#endif
} cn52xx;
- struct cvmx_pow_nos_cnt_cn52xx cn52xxp1;
- struct cvmx_pow_nos_cnt_s cn56xx;
- struct cvmx_pow_nos_cnt_s cn56xxp1;
- struct cvmx_pow_nos_cnt_s cn58xx;
- struct cvmx_pow_nos_cnt_s cn58xxp1;
- struct cvmx_pow_nos_cnt_cn52xx cn61xx;
struct cvmx_pow_nos_cnt_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
@@ -618,9 +495,6 @@ union cvmx_pow_nos_cnt {
uint64_t reserved_11_63:53;
#endif
} cn63xx;
- struct cvmx_pow_nos_cnt_cn63xx cn63xxp1;
- struct cvmx_pow_nos_cnt_cn63xx cn66xx;
- struct cvmx_pow_nos_cnt_cn52xx cnf71xx;
};
union cvmx_pow_nw_tim {
@@ -634,22 +508,6 @@ union cvmx_pow_nw_tim {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_pow_nw_tim_s cn30xx;
- struct cvmx_pow_nw_tim_s cn31xx;
- struct cvmx_pow_nw_tim_s cn38xx;
- struct cvmx_pow_nw_tim_s cn38xxp2;
- struct cvmx_pow_nw_tim_s cn50xx;
- struct cvmx_pow_nw_tim_s cn52xx;
- struct cvmx_pow_nw_tim_s cn52xxp1;
- struct cvmx_pow_nw_tim_s cn56xx;
- struct cvmx_pow_nw_tim_s cn56xxp1;
- struct cvmx_pow_nw_tim_s cn58xx;
- struct cvmx_pow_nw_tim_s cn58xxp1;
- struct cvmx_pow_nw_tim_s cn61xx;
- struct cvmx_pow_nw_tim_s cn63xx;
- struct cvmx_pow_nw_tim_s cn63xxp1;
- struct cvmx_pow_nw_tim_s cn66xx;
- struct cvmx_pow_nw_tim_s cnf71xx;
};
union cvmx_pow_pf_rst_msk {
@@ -663,18 +521,6 @@ union cvmx_pow_pf_rst_msk {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_pow_pf_rst_msk_s cn50xx;
- struct cvmx_pow_pf_rst_msk_s cn52xx;
- struct cvmx_pow_pf_rst_msk_s cn52xxp1;
- struct cvmx_pow_pf_rst_msk_s cn56xx;
- struct cvmx_pow_pf_rst_msk_s cn56xxp1;
- struct cvmx_pow_pf_rst_msk_s cn58xx;
- struct cvmx_pow_pf_rst_msk_s cn58xxp1;
- struct cvmx_pow_pf_rst_msk_s cn61xx;
- struct cvmx_pow_pf_rst_msk_s cn63xx;
- struct cvmx_pow_pf_rst_msk_s cn63xxp1;
- struct cvmx_pow_pf_rst_msk_s cn66xx;
- struct cvmx_pow_pf_rst_msk_s cnf71xx;
};
union cvmx_pow_pp_grp_mskx {
@@ -713,21 +559,6 @@ union cvmx_pow_pp_grp_mskx {
uint64_t reserved_16_63:48;
#endif
} cn30xx;
- struct cvmx_pow_pp_grp_mskx_cn30xx cn31xx;
- struct cvmx_pow_pp_grp_mskx_cn30xx cn38xx;
- struct cvmx_pow_pp_grp_mskx_cn30xx cn38xxp2;
- struct cvmx_pow_pp_grp_mskx_s cn50xx;
- struct cvmx_pow_pp_grp_mskx_s cn52xx;
- struct cvmx_pow_pp_grp_mskx_s cn52xxp1;
- struct cvmx_pow_pp_grp_mskx_s cn56xx;
- struct cvmx_pow_pp_grp_mskx_s cn56xxp1;
- struct cvmx_pow_pp_grp_mskx_s cn58xx;
- struct cvmx_pow_pp_grp_mskx_s cn58xxp1;
- struct cvmx_pow_pp_grp_mskx_s cn61xx;
- struct cvmx_pow_pp_grp_mskx_s cn63xx;
- struct cvmx_pow_pp_grp_mskx_s cn63xxp1;
- struct cvmx_pow_pp_grp_mskx_s cn66xx;
- struct cvmx_pow_pp_grp_mskx_s cnf71xx;
};
union cvmx_pow_qos_rndx {
@@ -747,22 +578,6 @@ union cvmx_pow_qos_rndx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_qos_rndx_s cn30xx;
- struct cvmx_pow_qos_rndx_s cn31xx;
- struct cvmx_pow_qos_rndx_s cn38xx;
- struct cvmx_pow_qos_rndx_s cn38xxp2;
- struct cvmx_pow_qos_rndx_s cn50xx;
- struct cvmx_pow_qos_rndx_s cn52xx;
- struct cvmx_pow_qos_rndx_s cn52xxp1;
- struct cvmx_pow_qos_rndx_s cn56xx;
- struct cvmx_pow_qos_rndx_s cn56xxp1;
- struct cvmx_pow_qos_rndx_s cn58xx;
- struct cvmx_pow_qos_rndx_s cn58xxp1;
- struct cvmx_pow_qos_rndx_s cn61xx;
- struct cvmx_pow_qos_rndx_s cn63xx;
- struct cvmx_pow_qos_rndx_s cn63xxp1;
- struct cvmx_pow_qos_rndx_s cn66xx;
- struct cvmx_pow_qos_rndx_s cnf71xx;
};
union cvmx_pow_qos_thrx {
@@ -838,9 +653,6 @@ union cvmx_pow_qos_thrx {
uint64_t reserved_57_63:7;
#endif
} cn31xx;
- struct cvmx_pow_qos_thrx_s cn38xx;
- struct cvmx_pow_qos_thrx_s cn38xxp2;
- struct cvmx_pow_qos_thrx_cn31xx cn50xx;
struct cvmx_pow_qos_thrx_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_58_63:6;
@@ -866,12 +678,6 @@ union cvmx_pow_qos_thrx {
uint64_t reserved_58_63:6;
#endif
} cn52xx;
- struct cvmx_pow_qos_thrx_cn52xx cn52xxp1;
- struct cvmx_pow_qos_thrx_s cn56xx;
- struct cvmx_pow_qos_thrx_s cn56xxp1;
- struct cvmx_pow_qos_thrx_s cn58xx;
- struct cvmx_pow_qos_thrx_s cn58xxp1;
- struct cvmx_pow_qos_thrx_cn52xx cn61xx;
struct cvmx_pow_qos_thrx_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_59_63:5;
@@ -897,9 +703,6 @@ union cvmx_pow_qos_thrx {
uint64_t reserved_59_63:5;
#endif
} cn63xx;
- struct cvmx_pow_qos_thrx_cn63xx cn63xxp1;
- struct cvmx_pow_qos_thrx_cn63xx cn66xx;
- struct cvmx_pow_qos_thrx_cn52xx cnf71xx;
};
union cvmx_pow_ts_pc {
@@ -913,22 +716,6 @@ union cvmx_pow_ts_pc {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_ts_pc_s cn30xx;
- struct cvmx_pow_ts_pc_s cn31xx;
- struct cvmx_pow_ts_pc_s cn38xx;
- struct cvmx_pow_ts_pc_s cn38xxp2;
- struct cvmx_pow_ts_pc_s cn50xx;
- struct cvmx_pow_ts_pc_s cn52xx;
- struct cvmx_pow_ts_pc_s cn52xxp1;
- struct cvmx_pow_ts_pc_s cn56xx;
- struct cvmx_pow_ts_pc_s cn56xxp1;
- struct cvmx_pow_ts_pc_s cn58xx;
- struct cvmx_pow_ts_pc_s cn58xxp1;
- struct cvmx_pow_ts_pc_s cn61xx;
- struct cvmx_pow_ts_pc_s cn63xx;
- struct cvmx_pow_ts_pc_s cn63xxp1;
- struct cvmx_pow_ts_pc_s cn66xx;
- struct cvmx_pow_ts_pc_s cnf71xx;
};
union cvmx_pow_wa_com_pc {
@@ -942,22 +729,6 @@ union cvmx_pow_wa_com_pc {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_wa_com_pc_s cn30xx;
- struct cvmx_pow_wa_com_pc_s cn31xx;
- struct cvmx_pow_wa_com_pc_s cn38xx;
- struct cvmx_pow_wa_com_pc_s cn38xxp2;
- struct cvmx_pow_wa_com_pc_s cn50xx;
- struct cvmx_pow_wa_com_pc_s cn52xx;
- struct cvmx_pow_wa_com_pc_s cn52xxp1;
- struct cvmx_pow_wa_com_pc_s cn56xx;
- struct cvmx_pow_wa_com_pc_s cn56xxp1;
- struct cvmx_pow_wa_com_pc_s cn58xx;
- struct cvmx_pow_wa_com_pc_s cn58xxp1;
- struct cvmx_pow_wa_com_pc_s cn61xx;
- struct cvmx_pow_wa_com_pc_s cn63xx;
- struct cvmx_pow_wa_com_pc_s cn63xxp1;
- struct cvmx_pow_wa_com_pc_s cn66xx;
- struct cvmx_pow_wa_com_pc_s cnf71xx;
};
union cvmx_pow_wa_pcx {
@@ -971,22 +742,6 @@ union cvmx_pow_wa_pcx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_wa_pcx_s cn30xx;
- struct cvmx_pow_wa_pcx_s cn31xx;
- struct cvmx_pow_wa_pcx_s cn38xx;
- struct cvmx_pow_wa_pcx_s cn38xxp2;
- struct cvmx_pow_wa_pcx_s cn50xx;
- struct cvmx_pow_wa_pcx_s cn52xx;
- struct cvmx_pow_wa_pcx_s cn52xxp1;
- struct cvmx_pow_wa_pcx_s cn56xx;
- struct cvmx_pow_wa_pcx_s cn56xxp1;
- struct cvmx_pow_wa_pcx_s cn58xx;
- struct cvmx_pow_wa_pcx_s cn58xxp1;
- struct cvmx_pow_wa_pcx_s cn61xx;
- struct cvmx_pow_wa_pcx_s cn63xx;
- struct cvmx_pow_wa_pcx_s cn63xxp1;
- struct cvmx_pow_wa_pcx_s cn66xx;
- struct cvmx_pow_wa_pcx_s cnf71xx;
};
union cvmx_pow_wq_int {
@@ -1002,22 +757,6 @@ union cvmx_pow_wq_int {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_wq_int_s cn30xx;
- struct cvmx_pow_wq_int_s cn31xx;
- struct cvmx_pow_wq_int_s cn38xx;
- struct cvmx_pow_wq_int_s cn38xxp2;
- struct cvmx_pow_wq_int_s cn50xx;
- struct cvmx_pow_wq_int_s cn52xx;
- struct cvmx_pow_wq_int_s cn52xxp1;
- struct cvmx_pow_wq_int_s cn56xx;
- struct cvmx_pow_wq_int_s cn56xxp1;
- struct cvmx_pow_wq_int_s cn58xx;
- struct cvmx_pow_wq_int_s cn58xxp1;
- struct cvmx_pow_wq_int_s cn61xx;
- struct cvmx_pow_wq_int_s cn63xx;
- struct cvmx_pow_wq_int_s cn63xxp1;
- struct cvmx_pow_wq_int_s cn66xx;
- struct cvmx_pow_wq_int_s cnf71xx;
};
union cvmx_pow_wq_int_cntx {
@@ -1069,9 +808,6 @@ union cvmx_pow_wq_int_cntx {
uint64_t reserved_28_63:36;
#endif
} cn31xx;
- struct cvmx_pow_wq_int_cntx_s cn38xx;
- struct cvmx_pow_wq_int_cntx_s cn38xxp2;
- struct cvmx_pow_wq_int_cntx_cn31xx cn50xx;
struct cvmx_pow_wq_int_cntx_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -1089,12 +825,6 @@ union cvmx_pow_wq_int_cntx {
uint64_t reserved_28_63:36;
#endif
} cn52xx;
- struct cvmx_pow_wq_int_cntx_cn52xx cn52xxp1;
- struct cvmx_pow_wq_int_cntx_s cn56xx;
- struct cvmx_pow_wq_int_cntx_s cn56xxp1;
- struct cvmx_pow_wq_int_cntx_s cn58xx;
- struct cvmx_pow_wq_int_cntx_s cn58xxp1;
- struct cvmx_pow_wq_int_cntx_cn52xx cn61xx;
struct cvmx_pow_wq_int_cntx_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63:36;
@@ -1112,9 +842,6 @@ union cvmx_pow_wq_int_cntx {
uint64_t reserved_28_63:36;
#endif
} cn63xx;
- struct cvmx_pow_wq_int_cntx_cn63xx cn63xxp1;
- struct cvmx_pow_wq_int_cntx_cn63xx cn66xx;
- struct cvmx_pow_wq_int_cntx_cn52xx cnf71xx;
};
union cvmx_pow_wq_int_pc {
@@ -1134,22 +861,6 @@ union cvmx_pow_wq_int_pc {
uint64_t reserved_60_63:4;
#endif
} s;
- struct cvmx_pow_wq_int_pc_s cn30xx;
- struct cvmx_pow_wq_int_pc_s cn31xx;
- struct cvmx_pow_wq_int_pc_s cn38xx;
- struct cvmx_pow_wq_int_pc_s cn38xxp2;
- struct cvmx_pow_wq_int_pc_s cn50xx;
- struct cvmx_pow_wq_int_pc_s cn52xx;
- struct cvmx_pow_wq_int_pc_s cn52xxp1;
- struct cvmx_pow_wq_int_pc_s cn56xx;
- struct cvmx_pow_wq_int_pc_s cn56xxp1;
- struct cvmx_pow_wq_int_pc_s cn58xx;
- struct cvmx_pow_wq_int_pc_s cn58xxp1;
- struct cvmx_pow_wq_int_pc_s cn61xx;
- struct cvmx_pow_wq_int_pc_s cn63xx;
- struct cvmx_pow_wq_int_pc_s cn63xxp1;
- struct cvmx_pow_wq_int_pc_s cn66xx;
- struct cvmx_pow_wq_int_pc_s cnf71xx;
};
union cvmx_pow_wq_int_thrx {
@@ -1211,9 +922,6 @@ union cvmx_pow_wq_int_thrx {
uint64_t reserved_29_63:35;
#endif
} cn31xx;
- struct cvmx_pow_wq_int_thrx_s cn38xx;
- struct cvmx_pow_wq_int_thrx_s cn38xxp2;
- struct cvmx_pow_wq_int_thrx_cn31xx cn50xx;
struct cvmx_pow_wq_int_thrx_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -1233,12 +941,6 @@ union cvmx_pow_wq_int_thrx {
uint64_t reserved_29_63:35;
#endif
} cn52xx;
- struct cvmx_pow_wq_int_thrx_cn52xx cn52xxp1;
- struct cvmx_pow_wq_int_thrx_s cn56xx;
- struct cvmx_pow_wq_int_thrx_s cn56xxp1;
- struct cvmx_pow_wq_int_thrx_s cn58xx;
- struct cvmx_pow_wq_int_thrx_s cn58xxp1;
- struct cvmx_pow_wq_int_thrx_cn52xx cn61xx;
struct cvmx_pow_wq_int_thrx_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63:35;
@@ -1258,9 +960,6 @@ union cvmx_pow_wq_int_thrx {
uint64_t reserved_29_63:35;
#endif
} cn63xx;
- struct cvmx_pow_wq_int_thrx_cn63xx cn63xxp1;
- struct cvmx_pow_wq_int_thrx_cn63xx cn66xx;
- struct cvmx_pow_wq_int_thrx_cn52xx cnf71xx;
};
union cvmx_pow_ws_pcx {
@@ -1274,22 +973,6 @@ union cvmx_pow_ws_pcx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_pow_ws_pcx_s cn30xx;
- struct cvmx_pow_ws_pcx_s cn31xx;
- struct cvmx_pow_ws_pcx_s cn38xx;
- struct cvmx_pow_ws_pcx_s cn38xxp2;
- struct cvmx_pow_ws_pcx_s cn50xx;
- struct cvmx_pow_ws_pcx_s cn52xx;
- struct cvmx_pow_ws_pcx_s cn52xxp1;
- struct cvmx_pow_ws_pcx_s cn56xx;
- struct cvmx_pow_ws_pcx_s cn56xxp1;
- struct cvmx_pow_ws_pcx_s cn58xx;
- struct cvmx_pow_ws_pcx_s cn58xxp1;
- struct cvmx_pow_ws_pcx_s cn61xx;
- struct cvmx_pow_ws_pcx_s cn63xx;
- struct cvmx_pow_ws_pcx_s cn63xxp1;
- struct cvmx_pow_ws_pcx_s cn66xx;
- struct cvmx_pow_ws_pcx_s cnf71xx;
};
union cvmx_sso_wq_int_thrx {
diff --git a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
index 87d6f92a548a..94295d2fe22e 100644
--- a/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-rnm-defs.h
@@ -47,24 +47,6 @@ union cvmx_rnm_bist_status {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_rnm_bist_status_s cn30xx;
- struct cvmx_rnm_bist_status_s cn31xx;
- struct cvmx_rnm_bist_status_s cn38xx;
- struct cvmx_rnm_bist_status_s cn38xxp2;
- struct cvmx_rnm_bist_status_s cn50xx;
- struct cvmx_rnm_bist_status_s cn52xx;
- struct cvmx_rnm_bist_status_s cn52xxp1;
- struct cvmx_rnm_bist_status_s cn56xx;
- struct cvmx_rnm_bist_status_s cn56xxp1;
- struct cvmx_rnm_bist_status_s cn58xx;
- struct cvmx_rnm_bist_status_s cn58xxp1;
- struct cvmx_rnm_bist_status_s cn61xx;
- struct cvmx_rnm_bist_status_s cn63xx;
- struct cvmx_rnm_bist_status_s cn63xxp1;
- struct cvmx_rnm_bist_status_s cn66xx;
- struct cvmx_rnm_bist_status_s cn68xx;
- struct cvmx_rnm_bist_status_s cn68xxp1;
- struct cvmx_rnm_bist_status_s cnf71xx;
};
union cvmx_rnm_ctl_status {
@@ -109,9 +91,6 @@ union cvmx_rnm_ctl_status {
uint64_t reserved_4_63:60;
#endif
} cn30xx;
- struct cvmx_rnm_ctl_status_cn30xx cn31xx;
- struct cvmx_rnm_ctl_status_cn30xx cn38xx;
- struct cvmx_rnm_ctl_status_cn30xx cn38xxp2;
struct cvmx_rnm_ctl_status_cn50xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63:55;
@@ -131,13 +110,6 @@ union cvmx_rnm_ctl_status {
uint64_t reserved_9_63:55;
#endif
} cn50xx;
- struct cvmx_rnm_ctl_status_cn50xx cn52xx;
- struct cvmx_rnm_ctl_status_cn50xx cn52xxp1;
- struct cvmx_rnm_ctl_status_cn50xx cn56xx;
- struct cvmx_rnm_ctl_status_cn50xx cn56xxp1;
- struct cvmx_rnm_ctl_status_cn50xx cn58xx;
- struct cvmx_rnm_ctl_status_cn50xx cn58xxp1;
- struct cvmx_rnm_ctl_status_s cn61xx;
struct cvmx_rnm_ctl_status_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63:53;
@@ -161,11 +133,6 @@ union cvmx_rnm_ctl_status {
uint64_t reserved_11_63:53;
#endif
} cn63xx;
- struct cvmx_rnm_ctl_status_cn63xx cn63xxp1;
- struct cvmx_rnm_ctl_status_s cn66xx;
- struct cvmx_rnm_ctl_status_cn63xx cn68xx;
- struct cvmx_rnm_ctl_status_cn63xx cn68xxp1;
- struct cvmx_rnm_ctl_status_s cnf71xx;
};
union cvmx_rnm_eer_dbg {
@@ -177,13 +144,6 @@ union cvmx_rnm_eer_dbg {
uint64_t dat:64;
#endif
} s;
- struct cvmx_rnm_eer_dbg_s cn61xx;
- struct cvmx_rnm_eer_dbg_s cn63xx;
- struct cvmx_rnm_eer_dbg_s cn63xxp1;
- struct cvmx_rnm_eer_dbg_s cn66xx;
- struct cvmx_rnm_eer_dbg_s cn68xx;
- struct cvmx_rnm_eer_dbg_s cn68xxp1;
- struct cvmx_rnm_eer_dbg_s cnf71xx;
};
union cvmx_rnm_eer_key {
@@ -195,13 +155,6 @@ union cvmx_rnm_eer_key {
uint64_t key:64;
#endif
} s;
- struct cvmx_rnm_eer_key_s cn61xx;
- struct cvmx_rnm_eer_key_s cn63xx;
- struct cvmx_rnm_eer_key_s cn63xxp1;
- struct cvmx_rnm_eer_key_s cn66xx;
- struct cvmx_rnm_eer_key_s cn68xx;
- struct cvmx_rnm_eer_key_s cn68xxp1;
- struct cvmx_rnm_eer_key_s cnf71xx;
};
union cvmx_rnm_serial_num {
@@ -213,12 +166,6 @@ union cvmx_rnm_serial_num {
uint64_t dat:64;
#endif
} s;
- struct cvmx_rnm_serial_num_s cn61xx;
- struct cvmx_rnm_serial_num_s cn63xx;
- struct cvmx_rnm_serial_num_s cn66xx;
- struct cvmx_rnm_serial_num_s cn68xx;
- struct cvmx_rnm_serial_num_s cn68xxp1;
- struct cvmx_rnm_serial_num_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
index 0c9c3e74d4ae..accc9977d9cd 100644
--- a/arch/mips/include/asm/octeon/cvmx-rst-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
@@ -80,9 +80,6 @@ union cvmx_rst_boot {
uint64_t chipkill:1;
#endif
} s;
- struct cvmx_rst_boot_s cn70xx;
- struct cvmx_rst_boot_s cn70xxp1;
- struct cvmx_rst_boot_s cn78xx;
};
union cvmx_rst_cfg {
@@ -102,9 +99,6 @@ union cvmx_rst_cfg {
uint64_t bist_delay:58;
#endif
} s;
- struct cvmx_rst_cfg_s cn70xx;
- struct cvmx_rst_cfg_s cn70xxp1;
- struct cvmx_rst_cfg_s cn78xx;
};
union cvmx_rst_ckill {
@@ -118,9 +112,6 @@ union cvmx_rst_ckill {
uint64_t reserved_47_63:17;
#endif
} s;
- struct cvmx_rst_ckill_s cn70xx;
- struct cvmx_rst_ckill_s cn70xxp1;
- struct cvmx_rst_ckill_s cn78xx;
};
union cvmx_rst_ctlx {
@@ -150,9 +141,6 @@ union cvmx_rst_ctlx {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_rst_ctlx_s cn70xx;
- struct cvmx_rst_ctlx_s cn70xxp1;
- struct cvmx_rst_ctlx_s cn78xx;
};
union cvmx_rst_delay {
@@ -168,9 +156,6 @@ union cvmx_rst_delay {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_rst_delay_s cn70xx;
- struct cvmx_rst_delay_s cn70xxp1;
- struct cvmx_rst_delay_s cn78xx;
};
union cvmx_rst_eco {
@@ -184,7 +169,6 @@ union cvmx_rst_eco {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_rst_eco_s cn78xx;
};
union cvmx_rst_int {
@@ -215,8 +199,6 @@ union cvmx_rst_int {
uint64_t reserved_11_63:53;
#endif
} cn70xx;
- struct cvmx_rst_int_cn70xx cn70xxp1;
- struct cvmx_rst_int_s cn78xx;
};
union cvmx_rst_ocx {
@@ -230,7 +212,6 @@ union cvmx_rst_ocx {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_rst_ocx_s cn78xx;
};
union cvmx_rst_power_dbg {
@@ -244,7 +225,6 @@ union cvmx_rst_power_dbg {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_rst_power_dbg_s cn78xx;
};
union cvmx_rst_pp_power {
@@ -267,8 +247,6 @@ union cvmx_rst_pp_power {
uint64_t reserved_4_63:60;
#endif
} cn70xx;
- struct cvmx_rst_pp_power_cn70xx cn70xxp1;
- struct cvmx_rst_pp_power_s cn78xx;
};
union cvmx_rst_soft_prstx {
@@ -282,9 +260,6 @@ union cvmx_rst_soft_prstx {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_rst_soft_prstx_s cn70xx;
- struct cvmx_rst_soft_prstx_s cn70xxp1;
- struct cvmx_rst_soft_prstx_s cn78xx;
};
union cvmx_rst_soft_rst {
@@ -298,9 +273,6 @@ union cvmx_rst_soft_rst {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_rst_soft_rst_s cn70xx;
- struct cvmx_rst_soft_rst_s cn70xxp1;
- struct cvmx_rst_soft_rst_s cn78xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-smix-defs.h b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
index 8a278e6ddba9..7a928230b0c0 100644
--- a/arch/mips/include/asm/octeon/cvmx-smix-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-smix-defs.h
@@ -186,23 +186,6 @@ union cvmx_smix_clk {
uint64_t reserved_21_63:43;
#endif
} cn30xx;
- struct cvmx_smix_clk_cn30xx cn31xx;
- struct cvmx_smix_clk_cn30xx cn38xx;
- struct cvmx_smix_clk_cn30xx cn38xxp2;
- struct cvmx_smix_clk_s cn50xx;
- struct cvmx_smix_clk_s cn52xx;
- struct cvmx_smix_clk_s cn52xxp1;
- struct cvmx_smix_clk_s cn56xx;
- struct cvmx_smix_clk_s cn56xxp1;
- struct cvmx_smix_clk_cn30xx cn58xx;
- struct cvmx_smix_clk_cn30xx cn58xxp1;
- struct cvmx_smix_clk_s cn61xx;
- struct cvmx_smix_clk_s cn63xx;
- struct cvmx_smix_clk_s cn63xxp1;
- struct cvmx_smix_clk_s cn66xx;
- struct cvmx_smix_clk_s cn68xx;
- struct cvmx_smix_clk_s cn68xxp1;
- struct cvmx_smix_clk_s cnf71xx;
};
union cvmx_smix_cmd {
@@ -241,23 +224,6 @@ union cvmx_smix_cmd {
uint64_t reserved_17_63:47;
#endif
} cn30xx;
- struct cvmx_smix_cmd_cn30xx cn31xx;
- struct cvmx_smix_cmd_cn30xx cn38xx;
- struct cvmx_smix_cmd_cn30xx cn38xxp2;
- struct cvmx_smix_cmd_s cn50xx;
- struct cvmx_smix_cmd_s cn52xx;
- struct cvmx_smix_cmd_s cn52xxp1;
- struct cvmx_smix_cmd_s cn56xx;
- struct cvmx_smix_cmd_s cn56xxp1;
- struct cvmx_smix_cmd_cn30xx cn58xx;
- struct cvmx_smix_cmd_cn30xx cn58xxp1;
- struct cvmx_smix_cmd_s cn61xx;
- struct cvmx_smix_cmd_s cn63xx;
- struct cvmx_smix_cmd_s cn63xxp1;
- struct cvmx_smix_cmd_s cn66xx;
- struct cvmx_smix_cmd_s cn68xx;
- struct cvmx_smix_cmd_s cn68xxp1;
- struct cvmx_smix_cmd_s cnf71xx;
};
union cvmx_smix_en {
@@ -271,24 +237,6 @@ union cvmx_smix_en {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_smix_en_s cn30xx;
- struct cvmx_smix_en_s cn31xx;
- struct cvmx_smix_en_s cn38xx;
- struct cvmx_smix_en_s cn38xxp2;
- struct cvmx_smix_en_s cn50xx;
- struct cvmx_smix_en_s cn52xx;
- struct cvmx_smix_en_s cn52xxp1;
- struct cvmx_smix_en_s cn56xx;
- struct cvmx_smix_en_s cn56xxp1;
- struct cvmx_smix_en_s cn58xx;
- struct cvmx_smix_en_s cn58xxp1;
- struct cvmx_smix_en_s cn61xx;
- struct cvmx_smix_en_s cn63xx;
- struct cvmx_smix_en_s cn63xxp1;
- struct cvmx_smix_en_s cn66xx;
- struct cvmx_smix_en_s cn68xx;
- struct cvmx_smix_en_s cn68xxp1;
- struct cvmx_smix_en_s cnf71xx;
};
union cvmx_smix_rd_dat {
@@ -306,24 +254,6 @@ union cvmx_smix_rd_dat {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_smix_rd_dat_s cn30xx;
- struct cvmx_smix_rd_dat_s cn31xx;
- struct cvmx_smix_rd_dat_s cn38xx;
- struct cvmx_smix_rd_dat_s cn38xxp2;
- struct cvmx_smix_rd_dat_s cn50xx;
- struct cvmx_smix_rd_dat_s cn52xx;
- struct cvmx_smix_rd_dat_s cn52xxp1;
- struct cvmx_smix_rd_dat_s cn56xx;
- struct cvmx_smix_rd_dat_s cn56xxp1;
- struct cvmx_smix_rd_dat_s cn58xx;
- struct cvmx_smix_rd_dat_s cn58xxp1;
- struct cvmx_smix_rd_dat_s cn61xx;
- struct cvmx_smix_rd_dat_s cn63xx;
- struct cvmx_smix_rd_dat_s cn63xxp1;
- struct cvmx_smix_rd_dat_s cn66xx;
- struct cvmx_smix_rd_dat_s cn68xx;
- struct cvmx_smix_rd_dat_s cn68xxp1;
- struct cvmx_smix_rd_dat_s cnf71xx;
};
union cvmx_smix_wr_dat {
@@ -341,24 +271,6 @@ union cvmx_smix_wr_dat {
uint64_t reserved_18_63:46;
#endif
} s;
- struct cvmx_smix_wr_dat_s cn30xx;
- struct cvmx_smix_wr_dat_s cn31xx;
- struct cvmx_smix_wr_dat_s cn38xx;
- struct cvmx_smix_wr_dat_s cn38xxp2;
- struct cvmx_smix_wr_dat_s cn50xx;
- struct cvmx_smix_wr_dat_s cn52xx;
- struct cvmx_smix_wr_dat_s cn52xxp1;
- struct cvmx_smix_wr_dat_s cn56xx;
- struct cvmx_smix_wr_dat_s cn56xxp1;
- struct cvmx_smix_wr_dat_s cn58xx;
- struct cvmx_smix_wr_dat_s cn58xxp1;
- struct cvmx_smix_wr_dat_s cn61xx;
- struct cvmx_smix_wr_dat_s cn63xx;
- struct cvmx_smix_wr_dat_s cn63xxp1;
- struct cvmx_smix_wr_dat_s cn66xx;
- struct cvmx_smix_wr_dat_s cn68xx;
- struct cvmx_smix_wr_dat_s cn68xxp1;
- struct cvmx_smix_wr_dat_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
index f4c4e8051160..8471ed2dea51 100644
--- a/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-spxx-defs.h
@@ -58,10 +58,6 @@ union cvmx_spxx_bckprs_cnt {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_spxx_bckprs_cnt_s cn38xx;
- struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
- struct cvmx_spxx_bckprs_cnt_s cn58xx;
- struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
};
union cvmx_spxx_bist_stat {
@@ -79,10 +75,6 @@ union cvmx_spxx_bist_stat {
uint64_t reserved_3_63:61;
#endif
} s;
- struct cvmx_spxx_bist_stat_s cn38xx;
- struct cvmx_spxx_bist_stat_s cn38xxp2;
- struct cvmx_spxx_bist_stat_s cn58xx;
- struct cvmx_spxx_bist_stat_s cn58xxp1;
};
union cvmx_spxx_clk_ctl {
@@ -114,10 +106,6 @@ union cvmx_spxx_clk_ctl {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_spxx_clk_ctl_s cn38xx;
- struct cvmx_spxx_clk_ctl_s cn38xxp2;
- struct cvmx_spxx_clk_ctl_s cn58xx;
- struct cvmx_spxx_clk_ctl_s cn58xxp1;
};
union cvmx_spxx_clk_stat {
@@ -145,10 +133,6 @@ union cvmx_spxx_clk_stat {
uint64_t reserved_11_63:53;
#endif
} s;
- struct cvmx_spxx_clk_stat_s cn38xx;
- struct cvmx_spxx_clk_stat_s cn38xxp2;
- struct cvmx_spxx_clk_stat_s cn58xx;
- struct cvmx_spxx_clk_stat_s cn58xxp1;
};
union cvmx_spxx_dbg_deskew_ctl {
@@ -190,10 +174,6 @@ union cvmx_spxx_dbg_deskew_ctl {
uint64_t reserved_30_63:34;
#endif
} s;
- struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
- struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
- struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
- struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
};
union cvmx_spxx_dbg_deskew_state {
@@ -213,10 +193,6 @@ union cvmx_spxx_dbg_deskew_state {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_spxx_dbg_deskew_state_s cn38xx;
- struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
- struct cvmx_spxx_dbg_deskew_state_s cn58xx;
- struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
};
union cvmx_spxx_drv_ctl {
@@ -241,7 +217,6 @@ union cvmx_spxx_drv_ctl {
uint64_t reserved_16_63:48;
#endif
} cn38xx;
- struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
struct cvmx_spxx_drv_ctl_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63:40;
@@ -257,7 +232,6 @@ union cvmx_spxx_drv_ctl {
uint64_t reserved_24_63:40;
#endif
} cn58xx;
- struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
};
union cvmx_spxx_err_ctl {
@@ -279,10 +253,6 @@ union cvmx_spxx_err_ctl {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_spxx_err_ctl_s cn38xx;
- struct cvmx_spxx_err_ctl_s cn38xxp2;
- struct cvmx_spxx_err_ctl_s cn58xx;
- struct cvmx_spxx_err_ctl_s cn58xxp1;
};
union cvmx_spxx_int_dat {
@@ -304,10 +274,6 @@ union cvmx_spxx_int_dat {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_spxx_int_dat_s cn38xx;
- struct cvmx_spxx_int_dat_s cn38xxp2;
- struct cvmx_spxx_int_dat_s cn58xx;
- struct cvmx_spxx_int_dat_s cn58xxp1;
};
union cvmx_spxx_int_msk {
@@ -341,10 +307,6 @@ union cvmx_spxx_int_msk {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_spxx_int_msk_s cn38xx;
- struct cvmx_spxx_int_msk_s cn38xxp2;
- struct cvmx_spxx_int_msk_s cn58xx;
- struct cvmx_spxx_int_msk_s cn58xxp1;
};
union cvmx_spxx_int_reg {
@@ -382,10 +344,6 @@ union cvmx_spxx_int_reg {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_spxx_int_reg_s cn38xx;
- struct cvmx_spxx_int_reg_s cn38xxp2;
- struct cvmx_spxx_int_reg_s cn58xx;
- struct cvmx_spxx_int_reg_s cn58xxp1;
};
union cvmx_spxx_int_sync {
@@ -419,10 +377,6 @@ union cvmx_spxx_int_sync {
uint64_t reserved_12_63:52;
#endif
} s;
- struct cvmx_spxx_int_sync_s cn38xx;
- struct cvmx_spxx_int_sync_s cn38xxp2;
- struct cvmx_spxx_int_sync_s cn58xx;
- struct cvmx_spxx_int_sync_s cn58xxp1;
};
union cvmx_spxx_tpa_acc {
@@ -436,10 +390,6 @@ union cvmx_spxx_tpa_acc {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_spxx_tpa_acc_s cn38xx;
- struct cvmx_spxx_tpa_acc_s cn38xxp2;
- struct cvmx_spxx_tpa_acc_s cn58xx;
- struct cvmx_spxx_tpa_acc_s cn58xxp1;
};
union cvmx_spxx_tpa_max {
@@ -453,10 +403,6 @@ union cvmx_spxx_tpa_max {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_spxx_tpa_max_s cn38xx;
- struct cvmx_spxx_tpa_max_s cn38xxp2;
- struct cvmx_spxx_tpa_max_s cn58xx;
- struct cvmx_spxx_tpa_max_s cn58xxp1;
};
union cvmx_spxx_tpa_sel {
@@ -470,10 +416,6 @@ union cvmx_spxx_tpa_sel {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_spxx_tpa_sel_s cn38xx;
- struct cvmx_spxx_tpa_sel_s cn38xxp2;
- struct cvmx_spxx_tpa_sel_s cn58xx;
- struct cvmx_spxx_tpa_sel_s cn58xxp1;
};
union cvmx_spxx_trn4_ctl {
@@ -499,10 +441,6 @@ union cvmx_spxx_trn4_ctl {
uint64_t reserved_13_63:51;
#endif
} s;
- struct cvmx_spxx_trn4_ctl_s cn38xx;
- struct cvmx_spxx_trn4_ctl_s cn38xxp2;
- struct cvmx_spxx_trn4_ctl_s cn58xx;
- struct cvmx_spxx_trn4_ctl_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-sriox-defs.h b/arch/mips/include/asm/octeon/cvmx-sriox-defs.h
index 5140f2d2ad1c..34d0fadb5eb3 100644
--- a/arch/mips/include/asm/octeon/cvmx-sriox-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-sriox-defs.h
@@ -112,8 +112,6 @@ union cvmx_sriox_acc_ctrl {
uint64_t reserved_3_63:61;
#endif
} cn63xx;
- struct cvmx_sriox_acc_ctrl_cn63xx cn63xxp1;
- struct cvmx_sriox_acc_ctrl_s cn66xx;
};
union cvmx_sriox_asmbly_id {
@@ -129,9 +127,6 @@ union cvmx_sriox_asmbly_id {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_asmbly_id_s cn63xx;
- struct cvmx_sriox_asmbly_id_s cn63xxp1;
- struct cvmx_sriox_asmbly_id_s cn66xx;
};
union cvmx_sriox_asmbly_info {
@@ -147,9 +142,6 @@ union cvmx_sriox_asmbly_info {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_asmbly_info_s cn63xx;
- struct cvmx_sriox_asmbly_info_s cn63xxp1;
- struct cvmx_sriox_asmbly_info_s cn66xx;
};
union cvmx_sriox_bell_resp_ctrl {
@@ -169,9 +161,6 @@ union cvmx_sriox_bell_resp_ctrl {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_sriox_bell_resp_ctrl_s cn63xx;
- struct cvmx_sriox_bell_resp_ctrl_s cn63xxp1;
- struct cvmx_sriox_bell_resp_ctrl_s cn66xx;
};
union cvmx_sriox_bist_status {
@@ -305,7 +294,6 @@ union cvmx_sriox_bist_status {
uint64_t reserved_44_63:20;
#endif
} cn63xxp1;
- struct cvmx_sriox_bist_status_s cn66xx;
};
union cvmx_sriox_imsg_ctrl {
@@ -343,9 +331,6 @@ union cvmx_sriox_imsg_ctrl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_imsg_ctrl_s cn63xx;
- struct cvmx_sriox_imsg_ctrl_s cn63xxp1;
- struct cvmx_sriox_imsg_ctrl_s cn66xx;
};
union cvmx_sriox_imsg_inst_hdrx {
@@ -383,9 +368,6 @@ union cvmx_sriox_imsg_inst_hdrx {
uint64_t r:1;
#endif
} s;
- struct cvmx_sriox_imsg_inst_hdrx_s cn63xx;
- struct cvmx_sriox_imsg_inst_hdrx_s cn63xxp1;
- struct cvmx_sriox_imsg_inst_hdrx_s cn66xx;
};
union cvmx_sriox_imsg_qos_grpx {
@@ -443,9 +425,6 @@ union cvmx_sriox_imsg_qos_grpx {
uint64_t reserved_63_63:1;
#endif
} s;
- struct cvmx_sriox_imsg_qos_grpx_s cn63xx;
- struct cvmx_sriox_imsg_qos_grpx_s cn63xxp1;
- struct cvmx_sriox_imsg_qos_grpx_s cn66xx;
};
union cvmx_sriox_imsg_statusx {
@@ -503,9 +482,6 @@ union cvmx_sriox_imsg_statusx {
uint64_t val1:1;
#endif
} s;
- struct cvmx_sriox_imsg_statusx_s cn63xx;
- struct cvmx_sriox_imsg_statusx_s cn63xxp1;
- struct cvmx_sriox_imsg_statusx_s cn66xx;
};
union cvmx_sriox_imsg_vport_thr {
@@ -541,9 +517,6 @@ union cvmx_sriox_imsg_vport_thr {
uint64_t reserved_54_63:10;
#endif
} s;
- struct cvmx_sriox_imsg_vport_thr_s cn63xx;
- struct cvmx_sriox_imsg_vport_thr_s cn63xxp1;
- struct cvmx_sriox_imsg_vport_thr_s cn66xx;
};
union cvmx_sriox_imsg_vport_thr2 {
@@ -563,7 +536,6 @@ union cvmx_sriox_imsg_vport_thr2 {
uint64_t reserved_46_63:18;
#endif
} s;
- struct cvmx_sriox_imsg_vport_thr2_s cn66xx;
};
union cvmx_sriox_int2_enable {
@@ -577,8 +549,6 @@ union cvmx_sriox_int2_enable {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_sriox_int2_enable_s cn63xx;
- struct cvmx_sriox_int2_enable_s cn66xx;
};
union cvmx_sriox_int2_reg {
@@ -596,8 +566,6 @@ union cvmx_sriox_int2_reg {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_int2_reg_s cn63xx;
- struct cvmx_sriox_int2_reg_s cn66xx;
};
union cvmx_sriox_int_enable {
@@ -663,7 +631,6 @@ union cvmx_sriox_int_enable {
uint64_t reserved_27_63:37;
#endif
} s;
- struct cvmx_sriox_int_enable_s cn63xx;
struct cvmx_sriox_int_enable_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_22_63:42;
@@ -715,7 +682,6 @@ union cvmx_sriox_int_enable {
uint64_t reserved_22_63:42;
#endif
} cn63xxp1;
- struct cvmx_sriox_int_enable_s cn66xx;
};
union cvmx_sriox_int_info0 {
@@ -743,9 +709,6 @@ union cvmx_sriox_int_info0 {
uint64_t cmd:4;
#endif
} s;
- struct cvmx_sriox_int_info0_s cn63xx;
- struct cvmx_sriox_int_info0_s cn63xxp1;
- struct cvmx_sriox_int_info0_s cn66xx;
};
union cvmx_sriox_int_info1 {
@@ -757,9 +720,6 @@ union cvmx_sriox_int_info1 {
uint64_t info1:64;
#endif
} s;
- struct cvmx_sriox_int_info1_s cn63xx;
- struct cvmx_sriox_int_info1_s cn63xxp1;
- struct cvmx_sriox_int_info1_s cn66xx;
};
union cvmx_sriox_int_info2 {
@@ -791,9 +751,6 @@ union cvmx_sriox_int_info2 {
uint64_t prio:2;
#endif
} s;
- struct cvmx_sriox_int_info2_s cn63xx;
- struct cvmx_sriox_int_info2_s cn63xxp1;
- struct cvmx_sriox_int_info2_s cn66xx;
};
union cvmx_sriox_int_info3 {
@@ -813,9 +770,6 @@ union cvmx_sriox_int_info3 {
uint64_t prio:2;
#endif
} s;
- struct cvmx_sriox_int_info3_s cn63xx;
- struct cvmx_sriox_int_info3_s cn63xxp1;
- struct cvmx_sriox_int_info3_s cn66xx;
};
union cvmx_sriox_int_reg {
@@ -885,7 +839,6 @@ union cvmx_sriox_int_reg {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_int_reg_s cn63xx;
struct cvmx_sriox_int_reg_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_22_63:42;
@@ -937,7 +890,6 @@ union cvmx_sriox_int_reg {
uint64_t reserved_22_63:42;
#endif
} cn63xxp1;
- struct cvmx_sriox_int_reg_s cn66xx;
};
union cvmx_sriox_ip_feature {
@@ -990,8 +942,6 @@ union cvmx_sriox_ip_feature {
uint64_t ops:32;
#endif
} cn63xx;
- struct cvmx_sriox_ip_feature_cn63xx cn63xxp1;
- struct cvmx_sriox_ip_feature_s cn66xx;
};
union cvmx_sriox_mac_buffers {
@@ -1021,8 +971,6 @@ union cvmx_sriox_mac_buffers {
uint64_t reserved_56_63:8;
#endif
} s;
- struct cvmx_sriox_mac_buffers_s cn63xx;
- struct cvmx_sriox_mac_buffers_s cn66xx;
};
union cvmx_sriox_maint_op {
@@ -1044,9 +992,6 @@ union cvmx_sriox_maint_op {
uint64_t wr_data:32;
#endif
} s;
- struct cvmx_sriox_maint_op_s cn63xx;
- struct cvmx_sriox_maint_op_s cn63xxp1;
- struct cvmx_sriox_maint_op_s cn66xx;
};
union cvmx_sriox_maint_rd_data {
@@ -1062,9 +1007,6 @@ union cvmx_sriox_maint_rd_data {
uint64_t reserved_33_63:31;
#endif
} s;
- struct cvmx_sriox_maint_rd_data_s cn63xx;
- struct cvmx_sriox_maint_rd_data_s cn63xxp1;
- struct cvmx_sriox_maint_rd_data_s cn66xx;
};
union cvmx_sriox_mce_tx_ctl {
@@ -1078,9 +1020,6 @@ union cvmx_sriox_mce_tx_ctl {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_sriox_mce_tx_ctl_s cn63xx;
- struct cvmx_sriox_mce_tx_ctl_s cn63xxp1;
- struct cvmx_sriox_mce_tx_ctl_s cn66xx;
};
union cvmx_sriox_mem_op_ctrl {
@@ -1106,9 +1045,6 @@ union cvmx_sriox_mem_op_ctrl {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_sriox_mem_op_ctrl_s cn63xx;
- struct cvmx_sriox_mem_op_ctrl_s cn63xxp1;
- struct cvmx_sriox_mem_op_ctrl_s cn66xx;
};
union cvmx_sriox_omsg_ctrlx {
@@ -1140,7 +1076,6 @@ union cvmx_sriox_omsg_ctrlx {
uint64_t testmode:1;
#endif
} s;
- struct cvmx_sriox_omsg_ctrlx_s cn63xx;
struct cvmx_sriox_omsg_ctrlx_cn63xxp1 {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t testmode:1;
@@ -1166,7 +1101,6 @@ union cvmx_sriox_omsg_ctrlx {
uint64_t testmode:1;
#endif
} cn63xxp1;
- struct cvmx_sriox_omsg_ctrlx_s cn66xx;
};
union cvmx_sriox_omsg_done_countsx {
@@ -1182,8 +1116,6 @@ union cvmx_sriox_omsg_done_countsx {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_omsg_done_countsx_s cn63xx;
- struct cvmx_sriox_omsg_done_countsx_s cn66xx;
};
union cvmx_sriox_omsg_fmp_mrx {
@@ -1225,9 +1157,6 @@ union cvmx_sriox_omsg_fmp_mrx {
uint64_t reserved_15_63:49;
#endif
} s;
- struct cvmx_sriox_omsg_fmp_mrx_s cn63xx;
- struct cvmx_sriox_omsg_fmp_mrx_s cn63xxp1;
- struct cvmx_sriox_omsg_fmp_mrx_s cn66xx;
};
union cvmx_sriox_omsg_nmp_mrx {
@@ -1269,9 +1198,6 @@ union cvmx_sriox_omsg_nmp_mrx {
uint64_t reserved_15_63:49;
#endif
} s;
- struct cvmx_sriox_omsg_nmp_mrx_s cn63xx;
- struct cvmx_sriox_omsg_nmp_mrx_s cn63xxp1;
- struct cvmx_sriox_omsg_nmp_mrx_s cn66xx;
};
union cvmx_sriox_omsg_portx {
@@ -1302,8 +1228,6 @@ union cvmx_sriox_omsg_portx {
uint64_t reserved_32_63:32;
#endif
} cn63xx;
- struct cvmx_sriox_omsg_portx_cn63xx cn63xxp1;
- struct cvmx_sriox_omsg_portx_s cn66xx;
};
union cvmx_sriox_omsg_silo_thr {
@@ -1317,8 +1241,6 @@ union cvmx_sriox_omsg_silo_thr {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_sriox_omsg_silo_thr_s cn63xx;
- struct cvmx_sriox_omsg_silo_thr_s cn66xx;
};
union cvmx_sriox_omsg_sp_mrx {
@@ -1362,9 +1284,6 @@ union cvmx_sriox_omsg_sp_mrx {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_sriox_omsg_sp_mrx_s cn63xx;
- struct cvmx_sriox_omsg_sp_mrx_s cn63xxp1;
- struct cvmx_sriox_omsg_sp_mrx_s cn66xx;
};
union cvmx_sriox_priox_in_use {
@@ -1380,8 +1299,6 @@ union cvmx_sriox_priox_in_use {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_priox_in_use_s cn63xx;
- struct cvmx_sriox_priox_in_use_s cn66xx;
};
union cvmx_sriox_rx_bell {
@@ -1409,9 +1326,6 @@ union cvmx_sriox_rx_bell {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_sriox_rx_bell_s cn63xx;
- struct cvmx_sriox_rx_bell_s cn63xxp1;
- struct cvmx_sriox_rx_bell_s cn66xx;
};
union cvmx_sriox_rx_bell_seq {
@@ -1427,9 +1341,6 @@ union cvmx_sriox_rx_bell_seq {
uint64_t reserved_40_63:24;
#endif
} s;
- struct cvmx_sriox_rx_bell_seq_s cn63xx;
- struct cvmx_sriox_rx_bell_seq_s cn63xxp1;
- struct cvmx_sriox_rx_bell_seq_s cn66xx;
};
union cvmx_sriox_rx_status {
@@ -1457,9 +1368,6 @@ union cvmx_sriox_rx_status {
uint64_t rtn_pr3:8;
#endif
} s;
- struct cvmx_sriox_rx_status_s cn63xx;
- struct cvmx_sriox_rx_status_s cn63xxp1;
- struct cvmx_sriox_rx_status_s cn66xx;
};
union cvmx_sriox_s2m_typex {
@@ -1491,9 +1399,6 @@ union cvmx_sriox_s2m_typex {
uint64_t reserved_19_63:45;
#endif
} s;
- struct cvmx_sriox_s2m_typex_s cn63xx;
- struct cvmx_sriox_s2m_typex_s cn63xxp1;
- struct cvmx_sriox_s2m_typex_s cn66xx;
};
union cvmx_sriox_seq {
@@ -1507,9 +1412,6 @@ union cvmx_sriox_seq {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_seq_s cn63xx;
- struct cvmx_sriox_seq_s cn63xxp1;
- struct cvmx_sriox_seq_s cn66xx;
};
union cvmx_sriox_status_reg {
@@ -1525,9 +1427,6 @@ union cvmx_sriox_status_reg {
uint64_t reserved_2_63:62;
#endif
} s;
- struct cvmx_sriox_status_reg_s cn63xx;
- struct cvmx_sriox_status_reg_s cn63xxp1;
- struct cvmx_sriox_status_reg_s cn66xx;
};
union cvmx_sriox_tag_ctrl {
@@ -1549,9 +1448,6 @@ union cvmx_sriox_tag_ctrl {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_sriox_tag_ctrl_s cn63xx;
- struct cvmx_sriox_tag_ctrl_s cn63xxp1;
- struct cvmx_sriox_tag_ctrl_s cn66xx;
};
union cvmx_sriox_tlp_credits {
@@ -1573,9 +1469,6 @@ union cvmx_sriox_tlp_credits {
uint64_t reserved_28_63:36;
#endif
} s;
- struct cvmx_sriox_tlp_credits_s cn63xx;
- struct cvmx_sriox_tlp_credits_s cn63xxp1;
- struct cvmx_sriox_tlp_credits_s cn66xx;
};
union cvmx_sriox_tx_bell {
@@ -1605,9 +1498,6 @@ union cvmx_sriox_tx_bell {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_sriox_tx_bell_s cn63xx;
- struct cvmx_sriox_tx_bell_s cn63xxp1;
- struct cvmx_sriox_tx_bell_s cn66xx;
};
union cvmx_sriox_tx_bell_info {
@@ -1639,9 +1529,6 @@ union cvmx_sriox_tx_bell_info {
uint64_t reserved_48_63:16;
#endif
} s;
- struct cvmx_sriox_tx_bell_info_s cn63xx;
- struct cvmx_sriox_tx_bell_info_s cn63xxp1;
- struct cvmx_sriox_tx_bell_info_s cn66xx;
};
union cvmx_sriox_tx_ctrl {
@@ -1675,9 +1562,6 @@ union cvmx_sriox_tx_ctrl {
uint64_t reserved_53_63:11;
#endif
} s;
- struct cvmx_sriox_tx_ctrl_s cn63xx;
- struct cvmx_sriox_tx_ctrl_s cn63xxp1;
- struct cvmx_sriox_tx_ctrl_s cn66xx;
};
union cvmx_sriox_tx_emphasis {
@@ -1691,8 +1575,6 @@ union cvmx_sriox_tx_emphasis {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_sriox_tx_emphasis_s cn63xx;
- struct cvmx_sriox_tx_emphasis_s cn66xx;
};
union cvmx_sriox_tx_status {
@@ -1712,9 +1594,6 @@ union cvmx_sriox_tx_status {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_tx_status_s cn63xx;
- struct cvmx_sriox_tx_status_s cn63xxp1;
- struct cvmx_sriox_tx_status_s cn66xx;
};
union cvmx_sriox_wr_done_counts {
@@ -1730,8 +1609,6 @@ union cvmx_sriox_wr_done_counts {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_sriox_wr_done_counts_s cn63xx;
- struct cvmx_sriox_wr_done_counts_s cn66xx;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-srxx-defs.h b/arch/mips/include/asm/octeon/cvmx-srxx-defs.h
index c98e625cd4ed..76b2a42f53aa 100644
--- a/arch/mips/include/asm/octeon/cvmx-srxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-srxx-defs.h
@@ -52,10 +52,6 @@ union cvmx_srxx_com_ctl {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_srxx_com_ctl_s cn38xx;
- struct cvmx_srxx_com_ctl_s cn38xxp2;
- struct cvmx_srxx_com_ctl_s cn58xx;
- struct cvmx_srxx_com_ctl_s cn58xxp1;
};
union cvmx_srxx_ign_rx_full {
@@ -69,10 +65,6 @@ union cvmx_srxx_ign_rx_full {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_srxx_ign_rx_full_s cn38xx;
- struct cvmx_srxx_ign_rx_full_s cn38xxp2;
- struct cvmx_srxx_ign_rx_full_s cn58xx;
- struct cvmx_srxx_ign_rx_full_s cn58xxp1;
};
union cvmx_srxx_spi4_calx {
@@ -94,10 +86,6 @@ union cvmx_srxx_spi4_calx {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_srxx_spi4_calx_s cn38xx;
- struct cvmx_srxx_spi4_calx_s cn38xxp2;
- struct cvmx_srxx_spi4_calx_s cn58xx;
- struct cvmx_srxx_spi4_calx_s cn58xxp1;
};
union cvmx_srxx_spi4_stat {
@@ -115,10 +103,6 @@ union cvmx_srxx_spi4_stat {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_srxx_spi4_stat_s cn38xx;
- struct cvmx_srxx_spi4_stat_s cn38xxp2;
- struct cvmx_srxx_spi4_stat_s cn58xx;
- struct cvmx_srxx_spi4_stat_s cn58xxp1;
};
union cvmx_srxx_sw_tick_ctl {
@@ -140,9 +124,6 @@ union cvmx_srxx_sw_tick_ctl {
uint64_t reserved_14_63:50;
#endif
} s;
- struct cvmx_srxx_sw_tick_ctl_s cn38xx;
- struct cvmx_srxx_sw_tick_ctl_s cn58xx;
- struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
};
union cvmx_srxx_sw_tick_dat {
@@ -154,9 +135,6 @@ union cvmx_srxx_sw_tick_dat {
uint64_t dat:64;
#endif
} s;
- struct cvmx_srxx_sw_tick_dat_s cn38xx;
- struct cvmx_srxx_sw_tick_dat_s cn58xx;
- struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
index 3c409a854d91..f49d82145c57 100644
--- a/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-stxx-defs.h
@@ -64,10 +64,6 @@ union cvmx_stxx_arb_ctl {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_stxx_arb_ctl_s cn38xx;
- struct cvmx_stxx_arb_ctl_s cn38xxp2;
- struct cvmx_stxx_arb_ctl_s cn58xx;
- struct cvmx_stxx_arb_ctl_s cn58xxp1;
};
union cvmx_stxx_bckprs_cnt {
@@ -81,10 +77,6 @@ union cvmx_stxx_bckprs_cnt {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_stxx_bckprs_cnt_s cn38xx;
- struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
- struct cvmx_stxx_bckprs_cnt_s cn58xx;
- struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
};
union cvmx_stxx_com_ctl {
@@ -102,10 +94,6 @@ union cvmx_stxx_com_ctl {
uint64_t reserved_4_63:60;
#endif
} s;
- struct cvmx_stxx_com_ctl_s cn38xx;
- struct cvmx_stxx_com_ctl_s cn38xxp2;
- struct cvmx_stxx_com_ctl_s cn58xx;
- struct cvmx_stxx_com_ctl_s cn58xxp1;
};
union cvmx_stxx_dip_cnt {
@@ -121,10 +109,6 @@ union cvmx_stxx_dip_cnt {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_stxx_dip_cnt_s cn38xx;
- struct cvmx_stxx_dip_cnt_s cn38xxp2;
- struct cvmx_stxx_dip_cnt_s cn58xx;
- struct cvmx_stxx_dip_cnt_s cn58xxp1;
};
union cvmx_stxx_ign_cal {
@@ -138,10 +122,6 @@ union cvmx_stxx_ign_cal {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_stxx_ign_cal_s cn38xx;
- struct cvmx_stxx_ign_cal_s cn38xxp2;
- struct cvmx_stxx_ign_cal_s cn58xx;
- struct cvmx_stxx_ign_cal_s cn58xxp1;
};
union cvmx_stxx_int_msk {
@@ -169,10 +149,6 @@ union cvmx_stxx_int_msk {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_stxx_int_msk_s cn38xx;
- struct cvmx_stxx_int_msk_s cn38xxp2;
- struct cvmx_stxx_int_msk_s cn58xx;
- struct cvmx_stxx_int_msk_s cn58xxp1;
};
union cvmx_stxx_int_reg {
@@ -202,10 +178,6 @@ union cvmx_stxx_int_reg {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_stxx_int_reg_s cn38xx;
- struct cvmx_stxx_int_reg_s cn38xxp2;
- struct cvmx_stxx_int_reg_s cn58xx;
- struct cvmx_stxx_int_reg_s cn58xxp1;
};
union cvmx_stxx_int_sync {
@@ -233,10 +205,6 @@ union cvmx_stxx_int_sync {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_stxx_int_sync_s cn38xx;
- struct cvmx_stxx_int_sync_s cn38xxp2;
- struct cvmx_stxx_int_sync_s cn58xx;
- struct cvmx_stxx_int_sync_s cn58xxp1;
};
union cvmx_stxx_min_bst {
@@ -250,10 +218,6 @@ union cvmx_stxx_min_bst {
uint64_t reserved_9_63:55;
#endif
} s;
- struct cvmx_stxx_min_bst_s cn38xx;
- struct cvmx_stxx_min_bst_s cn38xxp2;
- struct cvmx_stxx_min_bst_s cn58xx;
- struct cvmx_stxx_min_bst_s cn58xxp1;
};
union cvmx_stxx_spi4_calx {
@@ -275,10 +239,6 @@ union cvmx_stxx_spi4_calx {
uint64_t reserved_17_63:47;
#endif
} s;
- struct cvmx_stxx_spi4_calx_s cn38xx;
- struct cvmx_stxx_spi4_calx_s cn38xxp2;
- struct cvmx_stxx_spi4_calx_s cn58xx;
- struct cvmx_stxx_spi4_calx_s cn58xxp1;
};
union cvmx_stxx_spi4_dat {
@@ -294,10 +254,6 @@ union cvmx_stxx_spi4_dat {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_stxx_spi4_dat_s cn38xx;
- struct cvmx_stxx_spi4_dat_s cn38xxp2;
- struct cvmx_stxx_spi4_dat_s cn58xx;
- struct cvmx_stxx_spi4_dat_s cn58xxp1;
};
union cvmx_stxx_spi4_stat {
@@ -315,10 +271,6 @@ union cvmx_stxx_spi4_stat {
uint64_t reserved_16_63:48;
#endif
} s;
- struct cvmx_stxx_spi4_stat_s cn38xx;
- struct cvmx_stxx_spi4_stat_s cn38xxp2;
- struct cvmx_stxx_spi4_stat_s cn58xx;
- struct cvmx_stxx_spi4_stat_s cn58xxp1;
};
union cvmx_stxx_stat_bytes_hi {
@@ -332,10 +284,6 @@ union cvmx_stxx_stat_bytes_hi {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_stxx_stat_bytes_hi_s cn38xx;
- struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
- struct cvmx_stxx_stat_bytes_hi_s cn58xx;
- struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
};
union cvmx_stxx_stat_bytes_lo {
@@ -349,10 +297,6 @@ union cvmx_stxx_stat_bytes_lo {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_stxx_stat_bytes_lo_s cn38xx;
- struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
- struct cvmx_stxx_stat_bytes_lo_s cn58xx;
- struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
};
union cvmx_stxx_stat_ctl {
@@ -368,10 +312,6 @@ union cvmx_stxx_stat_ctl {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_stxx_stat_ctl_s cn38xx;
- struct cvmx_stxx_stat_ctl_s cn38xxp2;
- struct cvmx_stxx_stat_ctl_s cn58xx;
- struct cvmx_stxx_stat_ctl_s cn58xxp1;
};
union cvmx_stxx_stat_pkt_xmt {
@@ -385,10 +325,6 @@ union cvmx_stxx_stat_pkt_xmt {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
- struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
- struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
- struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
};
#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
index bc5b80c6bbe2..6cf2280166dd 100644
--- a/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-uctlx-defs.h
@@ -63,13 +63,6 @@ union cvmx_uctlx_bist_status {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_uctlx_bist_status_s cn61xx;
- struct cvmx_uctlx_bist_status_s cn63xx;
- struct cvmx_uctlx_bist_status_s cn63xxp1;
- struct cvmx_uctlx_bist_status_s cn66xx;
- struct cvmx_uctlx_bist_status_s cn68xx;
- struct cvmx_uctlx_bist_status_s cn68xxp1;
- struct cvmx_uctlx_bist_status_s cnf71xx;
};
union cvmx_uctlx_clk_rst_ctl {
@@ -121,13 +114,6 @@ union cvmx_uctlx_clk_rst_ctl {
uint64_t reserved_25_63:39;
#endif
} s;
- struct cvmx_uctlx_clk_rst_ctl_s cn61xx;
- struct cvmx_uctlx_clk_rst_ctl_s cn63xx;
- struct cvmx_uctlx_clk_rst_ctl_s cn63xxp1;
- struct cvmx_uctlx_clk_rst_ctl_s cn66xx;
- struct cvmx_uctlx_clk_rst_ctl_s cn68xx;
- struct cvmx_uctlx_clk_rst_ctl_s cn68xxp1;
- struct cvmx_uctlx_clk_rst_ctl_s cnf71xx;
};
union cvmx_uctlx_ehci_ctl {
@@ -161,13 +147,6 @@ union cvmx_uctlx_ehci_ctl {
uint64_t reserved_20_63:44;
#endif
} s;
- struct cvmx_uctlx_ehci_ctl_s cn61xx;
- struct cvmx_uctlx_ehci_ctl_s cn63xx;
- struct cvmx_uctlx_ehci_ctl_s cn63xxp1;
- struct cvmx_uctlx_ehci_ctl_s cn66xx;
- struct cvmx_uctlx_ehci_ctl_s cn68xx;
- struct cvmx_uctlx_ehci_ctl_s cn68xxp1;
- struct cvmx_uctlx_ehci_ctl_s cnf71xx;
};
union cvmx_uctlx_ehci_fla {
@@ -181,13 +160,6 @@ union cvmx_uctlx_ehci_fla {
uint64_t reserved_6_63:58;
#endif
} s;
- struct cvmx_uctlx_ehci_fla_s cn61xx;
- struct cvmx_uctlx_ehci_fla_s cn63xx;
- struct cvmx_uctlx_ehci_fla_s cn63xxp1;
- struct cvmx_uctlx_ehci_fla_s cn66xx;
- struct cvmx_uctlx_ehci_fla_s cn68xx;
- struct cvmx_uctlx_ehci_fla_s cn68xxp1;
- struct cvmx_uctlx_ehci_fla_s cnf71xx;
};
union cvmx_uctlx_erto_ctl {
@@ -203,13 +175,6 @@ union cvmx_uctlx_erto_ctl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_uctlx_erto_ctl_s cn61xx;
- struct cvmx_uctlx_erto_ctl_s cn63xx;
- struct cvmx_uctlx_erto_ctl_s cn63xxp1;
- struct cvmx_uctlx_erto_ctl_s cn66xx;
- struct cvmx_uctlx_erto_ctl_s cn68xx;
- struct cvmx_uctlx_erto_ctl_s cn68xxp1;
- struct cvmx_uctlx_erto_ctl_s cnf71xx;
};
union cvmx_uctlx_if_ena {
@@ -223,13 +188,6 @@ union cvmx_uctlx_if_ena {
uint64_t reserved_1_63:63;
#endif
} s;
- struct cvmx_uctlx_if_ena_s cn61xx;
- struct cvmx_uctlx_if_ena_s cn63xx;
- struct cvmx_uctlx_if_ena_s cn63xxp1;
- struct cvmx_uctlx_if_ena_s cn66xx;
- struct cvmx_uctlx_if_ena_s cn68xx;
- struct cvmx_uctlx_if_ena_s cn68xxp1;
- struct cvmx_uctlx_if_ena_s cnf71xx;
};
union cvmx_uctlx_int_ena {
@@ -257,13 +215,6 @@ union cvmx_uctlx_int_ena {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_uctlx_int_ena_s cn61xx;
- struct cvmx_uctlx_int_ena_s cn63xx;
- struct cvmx_uctlx_int_ena_s cn63xxp1;
- struct cvmx_uctlx_int_ena_s cn66xx;
- struct cvmx_uctlx_int_ena_s cn68xx;
- struct cvmx_uctlx_int_ena_s cn68xxp1;
- struct cvmx_uctlx_int_ena_s cnf71xx;
};
union cvmx_uctlx_int_reg {
@@ -291,13 +242,6 @@ union cvmx_uctlx_int_reg {
uint64_t reserved_8_63:56;
#endif
} s;
- struct cvmx_uctlx_int_reg_s cn61xx;
- struct cvmx_uctlx_int_reg_s cn63xx;
- struct cvmx_uctlx_int_reg_s cn63xxp1;
- struct cvmx_uctlx_int_reg_s cn66xx;
- struct cvmx_uctlx_int_reg_s cn68xx;
- struct cvmx_uctlx_int_reg_s cn68xxp1;
- struct cvmx_uctlx_int_reg_s cnf71xx;
};
union cvmx_uctlx_ohci_ctl {
@@ -329,13 +273,6 @@ union cvmx_uctlx_ohci_ctl {
uint64_t reserved_19_63:45;
#endif
} s;
- struct cvmx_uctlx_ohci_ctl_s cn61xx;
- struct cvmx_uctlx_ohci_ctl_s cn63xx;
- struct cvmx_uctlx_ohci_ctl_s cn63xxp1;
- struct cvmx_uctlx_ohci_ctl_s cn66xx;
- struct cvmx_uctlx_ohci_ctl_s cn68xx;
- struct cvmx_uctlx_ohci_ctl_s cn68xxp1;
- struct cvmx_uctlx_ohci_ctl_s cnf71xx;
};
union cvmx_uctlx_orto_ctl {
@@ -351,13 +288,6 @@ union cvmx_uctlx_orto_ctl {
uint64_t reserved_32_63:32;
#endif
} s;
- struct cvmx_uctlx_orto_ctl_s cn61xx;
- struct cvmx_uctlx_orto_ctl_s cn63xx;
- struct cvmx_uctlx_orto_ctl_s cn63xxp1;
- struct cvmx_uctlx_orto_ctl_s cn66xx;
- struct cvmx_uctlx_orto_ctl_s cn68xx;
- struct cvmx_uctlx_orto_ctl_s cn68xxp1;
- struct cvmx_uctlx_orto_ctl_s cnf71xx;
};
union cvmx_uctlx_ppaf_wm {
@@ -371,11 +301,6 @@ union cvmx_uctlx_ppaf_wm {
uint64_t reserved_5_63:59;
#endif
} s;
- struct cvmx_uctlx_ppaf_wm_s cn61xx;
- struct cvmx_uctlx_ppaf_wm_s cn63xx;
- struct cvmx_uctlx_ppaf_wm_s cn63xxp1;
- struct cvmx_uctlx_ppaf_wm_s cn66xx;
- struct cvmx_uctlx_ppaf_wm_s cnf71xx;
};
union cvmx_uctlx_uphy_ctl_status {
@@ -407,13 +332,6 @@ union cvmx_uctlx_uphy_ctl_status {
uint64_t reserved_10_63:54;
#endif
} s;
- struct cvmx_uctlx_uphy_ctl_status_s cn61xx;
- struct cvmx_uctlx_uphy_ctl_status_s cn63xx;
- struct cvmx_uctlx_uphy_ctl_status_s cn63xxp1;
- struct cvmx_uctlx_uphy_ctl_status_s cn66xx;
- struct cvmx_uctlx_uphy_ctl_status_s cn68xx;
- struct cvmx_uctlx_uphy_ctl_status_s cn68xxp1;
- struct cvmx_uctlx_uphy_ctl_status_s cnf71xx;
};
union cvmx_uctlx_uphy_portx_ctl_status {
@@ -463,13 +381,6 @@ union cvmx_uctlx_uphy_portx_ctl_status {
uint64_t reserved_43_63:21;
#endif
} s;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cn61xx;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xx;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cn63xxp1;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cn66xx;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xx;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cn68xxp1;
- struct cvmx_uctlx_uphy_portx_ctl_status_s cnf71xx;
};
#endif
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index e8cc328fce2d..6b31c93b5eaa 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -154,6 +154,7 @@ typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) } )
+#define pte_pgprot(x) __pgprot(pte_val(x) & ~_PFN_MASK)
/*
* On R4000-style MMUs where a TLB entry is mapping a adjacent even / odd
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 0036ea0c7173..93a9dce31f25 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 129e0328367f..57933fc8fd98 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -214,8 +214,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ (
- " .set arch=r4000 \n"
" .set push \n"
+ " .set arch=r4000 \n"
" .set noreorder \n"
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
@@ -225,13 +225,12 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" nop \n"
"2: \n"
" .set pop \n"
- " .set mips0 \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
} else if (kernel_uses_llsc) {
__asm__ __volatile__ (
- " .set "MIPS_ISA_ARCH_LEVEL" \n"
" .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" .set noreorder \n"
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
@@ -241,7 +240,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
" nop \n"
"2: \n"
" .set pop \n"
- " .set mips0 \n"
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index ce3ed4d17813..aca909bd7841 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -255,8 +255,10 @@ struct thread_struct {
/* Saved cp0 stuff. */
unsigned long cp0_status;
+#ifdef CONFIG_MIPS_FP_SUPPORT
/* Saved fpu/fpu emulator stuff. */
struct mips_fpu_struct fpu FPU_ALIGN;
+#endif
/* Assigned branch delay slot 'emulation' frame */
atomic_t bd_emu_frame;
/* PC of the branch from a branch delay slot 'emulation' */
@@ -299,6 +301,17 @@ struct thread_struct {
#define FPAFF_INIT
#endif /* CONFIG_MIPS_MT_FPAFF */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+# define FPU_INIT \
+ .fpu = { \
+ .fpr = {{{0,},},}, \
+ .fcr31 = 0, \
+ .msacsr = 0, \
+ },
+#else
+# define FPU_INIT
+#endif
+
#define INIT_THREAD { \
/* \
* Saved main processor registers \
@@ -321,11 +334,7 @@ struct thread_struct {
/* \
* Saved FPU/FPU emulator stuff \
*/ \
- .fpu = { \
- .fpr = {{{0,},},}, \
- .fcr31 = 0, \
- .msacsr = 0, \
- }, \
+ FPU_INIT \
/* \
* FPU affinity state (null if not FPAFF) \
*/ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index d19b2d65336b..7f4a32d3345a 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -20,6 +20,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
+#include <asm/mmzone.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
@@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
+/* Currently, this is very specific to Loongson-3 */
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_node(long node) \
+{ \
+ unsigned long start = CAC_BASE | nid_to_addrbase(node); \
+ unsigned long end = start + current_cpu_data.desc.waysize; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache##lsize##_unroll32(addr|ws, indexop); \
+}
+
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
+
#endif /* _ASM_R4KCACHE_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 2161357cc68f..4d6ad907ae54 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -427,9 +427,10 @@
#ifdef CONFIG_CPU_MIPSR6
eretnc
#else
+ .set push
.set arch=r4000
eret
- .set mips0
+ .set pop
#endif
.endm
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index e610473d61b8..0f813bb753c6 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -84,7 +84,8 @@ do { \
* Check FCSR for any unmasked exceptions pending set with `ptrace',
* clear them and send a signal.
*/
-#define __sanitize_fcr31(next) \
+#ifdef CONFIG_MIPS_FP_SUPPORT
+# define __sanitize_fcr31(next) \
do { \
unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
void __user *pc; \
@@ -95,6 +96,9 @@ do { \
force_fcr31_sig(fcr31, pc, next); \
} \
} while (0)
+#else
+# define __sanitize_fcr31(next)
+#endif
/*
* For newly created kernel threads switch_to() will return to
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 0170602a1e4e..6cf8ffb5367e 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
- if (test_thread_flag(TIF_32BIT_REGS))
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return get_user(*arg, (int *)usp + n);
else
#endif
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 59dae37f6b8d..b1990dd75f27 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -157,6 +157,7 @@ Ip_u2u1s3(_slti);
Ip_u2u1s3(_sltiu);
Ip_u3u1u2(_sltu);
Ip_u2u1u3(_sra);
+Ip_u3u2u1(_srav);
Ip_u2u1u3(_srl);
Ip_u3u2u1(_srlv);
Ip_u3u1u2(_subu);
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index c68b8ae3efcb..b23d74a601b3 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -13,6 +13,9 @@
#define _ASM_UNISTD_H
#include <uapi/asm/unistd.h>
+#include <asm/unistd_nr_n32.h>
+#include <asm/unistd_nr_n64.h>
+#include <asm/unistd_nr_o32.h>
#ifdef CONFIG_MIPS32_N32
#define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls)
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index 7a4becd8963a..ed4bd032f456 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -1,5 +1,11 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_n32.h
+generated-y += unistd_n64.h
+generated-y += unistd_o32.h
+generated-y += unistd_nr_n32.h
+generated-y += unistd_nr_n64.h
+generated-y += unistd_nr_o32.h
generic-y += bpf_perf_event.h
generic-y += ipcbuf.h
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index c05dcf5ab414..40fbb5dd66df 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -369,8 +369,9 @@ enum mm_32a_minor_op {
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
+ mm_srlv32_op = 0x050,
mm_sra_op = 0x080,
- mm_srlv32_op = 0x090,
+ mm_srav_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
diff --git a/arch/mips/include/uapi/asm/sgidefs.h b/arch/mips/include/uapi/asm/sgidefs.h
index 26143e3b7c26..69c3de90c536 100644
--- a/arch/mips/include/uapi/asm/sgidefs.h
+++ b/arch/mips/include/uapi/asm/sgidefs.h
@@ -12,14 +12,6 @@
#define __ASM_SGIDEFS_H
/*
- * Using a Linux compiler for building Linux seems logic but not to
- * everybody.
- */
-#ifndef __linux__
-#error Use a Linux compiler or give up.
-#endif
-
-/*
* Definitions for the ISA levels
*
* With the introduction of MIPS32 / MIPS64 instruction sets definitions
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index f25dd1d83fb7..4abe387549ad 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -17,1085 +17,23 @@
#if _MIPS_SIM == _MIPS_SIM_ABI32
-/*
- * Linux o32 style syscalls are in the range from 4000 to 4999.
- */
-#define __NR_Linux 4000
-#define __NR_syscall (__NR_Linux + 0)
-#define __NR_exit (__NR_Linux + 1)
-#define __NR_fork (__NR_Linux + 2)
-#define __NR_read (__NR_Linux + 3)
-#define __NR_write (__NR_Linux + 4)
-#define __NR_open (__NR_Linux + 5)
-#define __NR_close (__NR_Linux + 6)
-#define __NR_waitpid (__NR_Linux + 7)
-#define __NR_creat (__NR_Linux + 8)
-#define __NR_link (__NR_Linux + 9)
-#define __NR_unlink (__NR_Linux + 10)
-#define __NR_execve (__NR_Linux + 11)
-#define __NR_chdir (__NR_Linux + 12)
-#define __NR_time (__NR_Linux + 13)
-#define __NR_mknod (__NR_Linux + 14)
-#define __NR_chmod (__NR_Linux + 15)
-#define __NR_lchown (__NR_Linux + 16)
-#define __NR_break (__NR_Linux + 17)
-#define __NR_unused18 (__NR_Linux + 18)
-#define __NR_lseek (__NR_Linux + 19)
-#define __NR_getpid (__NR_Linux + 20)
-#define __NR_mount (__NR_Linux + 21)
-#define __NR_umount (__NR_Linux + 22)
-#define __NR_setuid (__NR_Linux + 23)
-#define __NR_getuid (__NR_Linux + 24)
-#define __NR_stime (__NR_Linux + 25)
-#define __NR_ptrace (__NR_Linux + 26)
-#define __NR_alarm (__NR_Linux + 27)
-#define __NR_unused28 (__NR_Linux + 28)
-#define __NR_pause (__NR_Linux + 29)
-#define __NR_utime (__NR_Linux + 30)
-#define __NR_stty (__NR_Linux + 31)
-#define __NR_gtty (__NR_Linux + 32)
-#define __NR_access (__NR_Linux + 33)
-#define __NR_nice (__NR_Linux + 34)
-#define __NR_ftime (__NR_Linux + 35)
-#define __NR_sync (__NR_Linux + 36)
-#define __NR_kill (__NR_Linux + 37)
-#define __NR_rename (__NR_Linux + 38)
-#define __NR_mkdir (__NR_Linux + 39)
-#define __NR_rmdir (__NR_Linux + 40)
-#define __NR_dup (__NR_Linux + 41)
-#define __NR_pipe (__NR_Linux + 42)
-#define __NR_times (__NR_Linux + 43)
-#define __NR_prof (__NR_Linux + 44)
-#define __NR_brk (__NR_Linux + 45)
-#define __NR_setgid (__NR_Linux + 46)
-#define __NR_getgid (__NR_Linux + 47)
-#define __NR_signal (__NR_Linux + 48)
-#define __NR_geteuid (__NR_Linux + 49)
-#define __NR_getegid (__NR_Linux + 50)
-#define __NR_acct (__NR_Linux + 51)
-#define __NR_umount2 (__NR_Linux + 52)
-#define __NR_lock (__NR_Linux + 53)
-#define __NR_ioctl (__NR_Linux + 54)
-#define __NR_fcntl (__NR_Linux + 55)
-#define __NR_mpx (__NR_Linux + 56)
-#define __NR_setpgid (__NR_Linux + 57)
-#define __NR_ulimit (__NR_Linux + 58)
-#define __NR_unused59 (__NR_Linux + 59)
-#define __NR_umask (__NR_Linux + 60)
-#define __NR_chroot (__NR_Linux + 61)
-#define __NR_ustat (__NR_Linux + 62)
-#define __NR_dup2 (__NR_Linux + 63)
-#define __NR_getppid (__NR_Linux + 64)
-#define __NR_getpgrp (__NR_Linux + 65)
-#define __NR_setsid (__NR_Linux + 66)
-#define __NR_sigaction (__NR_Linux + 67)
-#define __NR_sgetmask (__NR_Linux + 68)
-#define __NR_ssetmask (__NR_Linux + 69)
-#define __NR_setreuid (__NR_Linux + 70)
-#define __NR_setregid (__NR_Linux + 71)
-#define __NR_sigsuspend (__NR_Linux + 72)
-#define __NR_sigpending (__NR_Linux + 73)
-#define __NR_sethostname (__NR_Linux + 74)
-#define __NR_setrlimit (__NR_Linux + 75)
-#define __NR_getrlimit (__NR_Linux + 76)
-#define __NR_getrusage (__NR_Linux + 77)
-#define __NR_gettimeofday (__NR_Linux + 78)
-#define __NR_settimeofday (__NR_Linux + 79)
-#define __NR_getgroups (__NR_Linux + 80)
-#define __NR_setgroups (__NR_Linux + 81)
-#define __NR_reserved82 (__NR_Linux + 82)
-#define __NR_symlink (__NR_Linux + 83)
-#define __NR_unused84 (__NR_Linux + 84)
-#define __NR_readlink (__NR_Linux + 85)
-#define __NR_uselib (__NR_Linux + 86)
-#define __NR_swapon (__NR_Linux + 87)
-#define __NR_reboot (__NR_Linux + 88)
-#define __NR_readdir (__NR_Linux + 89)
-#define __NR_mmap (__NR_Linux + 90)
-#define __NR_munmap (__NR_Linux + 91)
-#define __NR_truncate (__NR_Linux + 92)
-#define __NR_ftruncate (__NR_Linux + 93)
-#define __NR_fchmod (__NR_Linux + 94)
-#define __NR_fchown (__NR_Linux + 95)
-#define __NR_getpriority (__NR_Linux + 96)
-#define __NR_setpriority (__NR_Linux + 97)
-#define __NR_profil (__NR_Linux + 98)
-#define __NR_statfs (__NR_Linux + 99)
-#define __NR_fstatfs (__NR_Linux + 100)
-#define __NR_ioperm (__NR_Linux + 101)
-#define __NR_socketcall (__NR_Linux + 102)
-#define __NR_syslog (__NR_Linux + 103)
-#define __NR_setitimer (__NR_Linux + 104)
-#define __NR_getitimer (__NR_Linux + 105)
-#define __NR_stat (__NR_Linux + 106)
-#define __NR_lstat (__NR_Linux + 107)
-#define __NR_fstat (__NR_Linux + 108)
-#define __NR_unused109 (__NR_Linux + 109)
-#define __NR_iopl (__NR_Linux + 110)
-#define __NR_vhangup (__NR_Linux + 111)
-#define __NR_idle (__NR_Linux + 112)
-#define __NR_vm86 (__NR_Linux + 113)
-#define __NR_wait4 (__NR_Linux + 114)
-#define __NR_swapoff (__NR_Linux + 115)
-#define __NR_sysinfo (__NR_Linux + 116)
-#define __NR_ipc (__NR_Linux + 117)
-#define __NR_fsync (__NR_Linux + 118)
-#define __NR_sigreturn (__NR_Linux + 119)
-#define __NR_clone (__NR_Linux + 120)
-#define __NR_setdomainname (__NR_Linux + 121)
-#define __NR_uname (__NR_Linux + 122)
-#define __NR_modify_ldt (__NR_Linux + 123)
-#define __NR_adjtimex (__NR_Linux + 124)
-#define __NR_mprotect (__NR_Linux + 125)
-#define __NR_sigprocmask (__NR_Linux + 126)
-#define __NR_create_module (__NR_Linux + 127)
-#define __NR_init_module (__NR_Linux + 128)
-#define __NR_delete_module (__NR_Linux + 129)
-#define __NR_get_kernel_syms (__NR_Linux + 130)
-#define __NR_quotactl (__NR_Linux + 131)
-#define __NR_getpgid (__NR_Linux + 132)
-#define __NR_fchdir (__NR_Linux + 133)
-#define __NR_bdflush (__NR_Linux + 134)
-#define __NR_sysfs (__NR_Linux + 135)
-#define __NR_personality (__NR_Linux + 136)
-#define __NR_afs_syscall (__NR_Linux + 137) /* Syscall for Andrew File System */
-#define __NR_setfsuid (__NR_Linux + 138)
-#define __NR_setfsgid (__NR_Linux + 139)
-#define __NR__llseek (__NR_Linux + 140)
-#define __NR_getdents (__NR_Linux + 141)
-#define __NR__newselect (__NR_Linux + 142)
-#define __NR_flock (__NR_Linux + 143)
-#define __NR_msync (__NR_Linux + 144)
-#define __NR_readv (__NR_Linux + 145)
-#define __NR_writev (__NR_Linux + 146)
-#define __NR_cacheflush (__NR_Linux + 147)
-#define __NR_cachectl (__NR_Linux + 148)
-#define __NR_sysmips (__NR_Linux + 149)
-#define __NR_unused150 (__NR_Linux + 150)
-#define __NR_getsid (__NR_Linux + 151)
-#define __NR_fdatasync (__NR_Linux + 152)
-#define __NR__sysctl (__NR_Linux + 153)
-#define __NR_mlock (__NR_Linux + 154)
-#define __NR_munlock (__NR_Linux + 155)
-#define __NR_mlockall (__NR_Linux + 156)
-#define __NR_munlockall (__NR_Linux + 157)
-#define __NR_sched_setparam (__NR_Linux + 158)
-#define __NR_sched_getparam (__NR_Linux + 159)
-#define __NR_sched_setscheduler (__NR_Linux + 160)
-#define __NR_sched_getscheduler (__NR_Linux + 161)
-#define __NR_sched_yield (__NR_Linux + 162)
-#define __NR_sched_get_priority_max (__NR_Linux + 163)
-#define __NR_sched_get_priority_min (__NR_Linux + 164)
-#define __NR_sched_rr_get_interval (__NR_Linux + 165)
-#define __NR_nanosleep (__NR_Linux + 166)
-#define __NR_mremap (__NR_Linux + 167)
-#define __NR_accept (__NR_Linux + 168)
-#define __NR_bind (__NR_Linux + 169)
-#define __NR_connect (__NR_Linux + 170)
-#define __NR_getpeername (__NR_Linux + 171)
-#define __NR_getsockname (__NR_Linux + 172)
-#define __NR_getsockopt (__NR_Linux + 173)
-#define __NR_listen (__NR_Linux + 174)
-#define __NR_recv (__NR_Linux + 175)
-#define __NR_recvfrom (__NR_Linux + 176)
-#define __NR_recvmsg (__NR_Linux + 177)
-#define __NR_send (__NR_Linux + 178)
-#define __NR_sendmsg (__NR_Linux + 179)
-#define __NR_sendto (__NR_Linux + 180)
-#define __NR_setsockopt (__NR_Linux + 181)
-#define __NR_shutdown (__NR_Linux + 182)
-#define __NR_socket (__NR_Linux + 183)
-#define __NR_socketpair (__NR_Linux + 184)
-#define __NR_setresuid (__NR_Linux + 185)
-#define __NR_getresuid (__NR_Linux + 186)
-#define __NR_query_module (__NR_Linux + 187)
-#define __NR_poll (__NR_Linux + 188)
-#define __NR_nfsservctl (__NR_Linux + 189)
-#define __NR_setresgid (__NR_Linux + 190)
-#define __NR_getresgid (__NR_Linux + 191)
-#define __NR_prctl (__NR_Linux + 192)
-#define __NR_rt_sigreturn (__NR_Linux + 193)
-#define __NR_rt_sigaction (__NR_Linux + 194)
-#define __NR_rt_sigprocmask (__NR_Linux + 195)
-#define __NR_rt_sigpending (__NR_Linux + 196)
-#define __NR_rt_sigtimedwait (__NR_Linux + 197)
-#define __NR_rt_sigqueueinfo (__NR_Linux + 198)
-#define __NR_rt_sigsuspend (__NR_Linux + 199)
-#define __NR_pread64 (__NR_Linux + 200)
-#define __NR_pwrite64 (__NR_Linux + 201)
-#define __NR_chown (__NR_Linux + 202)
-#define __NR_getcwd (__NR_Linux + 203)
-#define __NR_capget (__NR_Linux + 204)
-#define __NR_capset (__NR_Linux + 205)
-#define __NR_sigaltstack (__NR_Linux + 206)
-#define __NR_sendfile (__NR_Linux + 207)
-#define __NR_getpmsg (__NR_Linux + 208)
-#define __NR_putpmsg (__NR_Linux + 209)
-#define __NR_mmap2 (__NR_Linux + 210)
-#define __NR_truncate64 (__NR_Linux + 211)
-#define __NR_ftruncate64 (__NR_Linux + 212)
-#define __NR_stat64 (__NR_Linux + 213)
-#define __NR_lstat64 (__NR_Linux + 214)
-#define __NR_fstat64 (__NR_Linux + 215)
-#define __NR_pivot_root (__NR_Linux + 216)
-#define __NR_mincore (__NR_Linux + 217)
-#define __NR_madvise (__NR_Linux + 218)
-#define __NR_getdents64 (__NR_Linux + 219)
-#define __NR_fcntl64 (__NR_Linux + 220)
-#define __NR_reserved221 (__NR_Linux + 221)
-#define __NR_gettid (__NR_Linux + 222)
-#define __NR_readahead (__NR_Linux + 223)
-#define __NR_setxattr (__NR_Linux + 224)
-#define __NR_lsetxattr (__NR_Linux + 225)
-#define __NR_fsetxattr (__NR_Linux + 226)
-#define __NR_getxattr (__NR_Linux + 227)
-#define __NR_lgetxattr (__NR_Linux + 228)
-#define __NR_fgetxattr (__NR_Linux + 229)
-#define __NR_listxattr (__NR_Linux + 230)
-#define __NR_llistxattr (__NR_Linux + 231)
-#define __NR_flistxattr (__NR_Linux + 232)
-#define __NR_removexattr (__NR_Linux + 233)
-#define __NR_lremovexattr (__NR_Linux + 234)
-#define __NR_fremovexattr (__NR_Linux + 235)
-#define __NR_tkill (__NR_Linux + 236)
-#define __NR_sendfile64 (__NR_Linux + 237)
-#define __NR_futex (__NR_Linux + 238)
-#define __NR_sched_setaffinity (__NR_Linux + 239)
-#define __NR_sched_getaffinity (__NR_Linux + 240)
-#define __NR_io_setup (__NR_Linux + 241)
-#define __NR_io_destroy (__NR_Linux + 242)
-#define __NR_io_getevents (__NR_Linux + 243)
-#define __NR_io_submit (__NR_Linux + 244)
-#define __NR_io_cancel (__NR_Linux + 245)
-#define __NR_exit_group (__NR_Linux + 246)
-#define __NR_lookup_dcookie (__NR_Linux + 247)
-#define __NR_epoll_create (__NR_Linux + 248)
-#define __NR_epoll_ctl (__NR_Linux + 249)
-#define __NR_epoll_wait (__NR_Linux + 250)
-#define __NR_remap_file_pages (__NR_Linux + 251)
-#define __NR_set_tid_address (__NR_Linux + 252)
-#define __NR_restart_syscall (__NR_Linux + 253)
-#define __NR_fadvise64 (__NR_Linux + 254)
-#define __NR_statfs64 (__NR_Linux + 255)
-#define __NR_fstatfs64 (__NR_Linux + 256)
-#define __NR_timer_create (__NR_Linux + 257)
-#define __NR_timer_settime (__NR_Linux + 258)
-#define __NR_timer_gettime (__NR_Linux + 259)
-#define __NR_timer_getoverrun (__NR_Linux + 260)
-#define __NR_timer_delete (__NR_Linux + 261)
-#define __NR_clock_settime (__NR_Linux + 262)
-#define __NR_clock_gettime (__NR_Linux + 263)
-#define __NR_clock_getres (__NR_Linux + 264)
-#define __NR_clock_nanosleep (__NR_Linux + 265)
-#define __NR_tgkill (__NR_Linux + 266)
-#define __NR_utimes (__NR_Linux + 267)
-#define __NR_mbind (__NR_Linux + 268)
-#define __NR_get_mempolicy (__NR_Linux + 269)
-#define __NR_set_mempolicy (__NR_Linux + 270)
-#define __NR_mq_open (__NR_Linux + 271)
-#define __NR_mq_unlink (__NR_Linux + 272)
-#define __NR_mq_timedsend (__NR_Linux + 273)
-#define __NR_mq_timedreceive (__NR_Linux + 274)
-#define __NR_mq_notify (__NR_Linux + 275)
-#define __NR_mq_getsetattr (__NR_Linux + 276)
-#define __NR_vserver (__NR_Linux + 277)
-#define __NR_waitid (__NR_Linux + 278)
-/* #define __NR_sys_setaltroot (__NR_Linux + 279) */
-#define __NR_add_key (__NR_Linux + 280)
-#define __NR_request_key (__NR_Linux + 281)
-#define __NR_keyctl (__NR_Linux + 282)
-#define __NR_set_thread_area (__NR_Linux + 283)
-#define __NR_inotify_init (__NR_Linux + 284)
-#define __NR_inotify_add_watch (__NR_Linux + 285)
-#define __NR_inotify_rm_watch (__NR_Linux + 286)
-#define __NR_migrate_pages (__NR_Linux + 287)
-#define __NR_openat (__NR_Linux + 288)
-#define __NR_mkdirat (__NR_Linux + 289)
-#define __NR_mknodat (__NR_Linux + 290)
-#define __NR_fchownat (__NR_Linux + 291)
-#define __NR_futimesat (__NR_Linux + 292)
-#define __NR_fstatat64 (__NR_Linux + 293)
-#define __NR_unlinkat (__NR_Linux + 294)
-#define __NR_renameat (__NR_Linux + 295)
-#define __NR_linkat (__NR_Linux + 296)
-#define __NR_symlinkat (__NR_Linux + 297)
-#define __NR_readlinkat (__NR_Linux + 298)
-#define __NR_fchmodat (__NR_Linux + 299)
-#define __NR_faccessat (__NR_Linux + 300)
-#define __NR_pselect6 (__NR_Linux + 301)
-#define __NR_ppoll (__NR_Linux + 302)
-#define __NR_unshare (__NR_Linux + 303)
-#define __NR_splice (__NR_Linux + 304)
-#define __NR_sync_file_range (__NR_Linux + 305)
-#define __NR_tee (__NR_Linux + 306)
-#define __NR_vmsplice (__NR_Linux + 307)
-#define __NR_move_pages (__NR_Linux + 308)
-#define __NR_set_robust_list (__NR_Linux + 309)
-#define __NR_get_robust_list (__NR_Linux + 310)
-#define __NR_kexec_load (__NR_Linux + 311)
-#define __NR_getcpu (__NR_Linux + 312)
-#define __NR_epoll_pwait (__NR_Linux + 313)
-#define __NR_ioprio_set (__NR_Linux + 314)
-#define __NR_ioprio_get (__NR_Linux + 315)
-#define __NR_utimensat (__NR_Linux + 316)
-#define __NR_signalfd (__NR_Linux + 317)
-#define __NR_timerfd (__NR_Linux + 318)
-#define __NR_eventfd (__NR_Linux + 319)
-#define __NR_fallocate (__NR_Linux + 320)
-#define __NR_timerfd_create (__NR_Linux + 321)
-#define __NR_timerfd_gettime (__NR_Linux + 322)
-#define __NR_timerfd_settime (__NR_Linux + 323)
-#define __NR_signalfd4 (__NR_Linux + 324)
-#define __NR_eventfd2 (__NR_Linux + 325)
-#define __NR_epoll_create1 (__NR_Linux + 326)
-#define __NR_dup3 (__NR_Linux + 327)
-#define __NR_pipe2 (__NR_Linux + 328)
-#define __NR_inotify_init1 (__NR_Linux + 329)
-#define __NR_preadv (__NR_Linux + 330)
-#define __NR_pwritev (__NR_Linux + 331)
-#define __NR_rt_tgsigqueueinfo (__NR_Linux + 332)
-#define __NR_perf_event_open (__NR_Linux + 333)
-#define __NR_accept4 (__NR_Linux + 334)
-#define __NR_recvmmsg (__NR_Linux + 335)
-#define __NR_fanotify_init (__NR_Linux + 336)
-#define __NR_fanotify_mark (__NR_Linux + 337)
-#define __NR_prlimit64 (__NR_Linux + 338)
-#define __NR_name_to_handle_at (__NR_Linux + 339)
-#define __NR_open_by_handle_at (__NR_Linux + 340)
-#define __NR_clock_adjtime (__NR_Linux + 341)
-#define __NR_syncfs (__NR_Linux + 342)
-#define __NR_sendmmsg (__NR_Linux + 343)
-#define __NR_setns (__NR_Linux + 344)
-#define __NR_process_vm_readv (__NR_Linux + 345)
-#define __NR_process_vm_writev (__NR_Linux + 346)
-#define __NR_kcmp (__NR_Linux + 347)
-#define __NR_finit_module (__NR_Linux + 348)
-#define __NR_sched_setattr (__NR_Linux + 349)
-#define __NR_sched_getattr (__NR_Linux + 350)
-#define __NR_renameat2 (__NR_Linux + 351)
-#define __NR_seccomp (__NR_Linux + 352)
-#define __NR_getrandom (__NR_Linux + 353)
-#define __NR_memfd_create (__NR_Linux + 354)
-#define __NR_bpf (__NR_Linux + 355)
-#define __NR_execveat (__NR_Linux + 356)
-#define __NR_userfaultfd (__NR_Linux + 357)
-#define __NR_membarrier (__NR_Linux + 358)
-#define __NR_mlock2 (__NR_Linux + 359)
-#define __NR_copy_file_range (__NR_Linux + 360)
-#define __NR_preadv2 (__NR_Linux + 361)
-#define __NR_pwritev2 (__NR_Linux + 362)
-#define __NR_pkey_mprotect (__NR_Linux + 363)
-#define __NR_pkey_alloc (__NR_Linux + 364)
-#define __NR_pkey_free (__NR_Linux + 365)
-#define __NR_statx (__NR_Linux + 366)
-#define __NR_rseq (__NR_Linux + 367)
-#define __NR_io_pgetevents (__NR_Linux + 368)
-
-
-/*
- * Offset of the last Linux o32 flavoured syscall
- */
-#define __NR_Linux_syscalls 368
+#define __NR_Linux 4000
+#include <asm/unistd_o32.h>
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
-#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 368
-
#if _MIPS_SIM == _MIPS_SIM_ABI64
-/*
- * Linux 64-bit syscalls are in the range from 5000 to 5999.
- */
-#define __NR_Linux 5000
-#define __NR_read (__NR_Linux + 0)
-#define __NR_write (__NR_Linux + 1)
-#define __NR_open (__NR_Linux + 2)
-#define __NR_close (__NR_Linux + 3)
-#define __NR_stat (__NR_Linux + 4)
-#define __NR_fstat (__NR_Linux + 5)
-#define __NR_lstat (__NR_Linux + 6)
-#define __NR_poll (__NR_Linux + 7)
-#define __NR_lseek (__NR_Linux + 8)
-#define __NR_mmap (__NR_Linux + 9)
-#define __NR_mprotect (__NR_Linux + 10)
-#define __NR_munmap (__NR_Linux + 11)
-#define __NR_brk (__NR_Linux + 12)
-#define __NR_rt_sigaction (__NR_Linux + 13)
-#define __NR_rt_sigprocmask (__NR_Linux + 14)
-#define __NR_ioctl (__NR_Linux + 15)
-#define __NR_pread64 (__NR_Linux + 16)
-#define __NR_pwrite64 (__NR_Linux + 17)
-#define __NR_readv (__NR_Linux + 18)
-#define __NR_writev (__NR_Linux + 19)
-#define __NR_access (__NR_Linux + 20)
-#define __NR_pipe (__NR_Linux + 21)
-#define __NR__newselect (__NR_Linux + 22)
-#define __NR_sched_yield (__NR_Linux + 23)
-#define __NR_mremap (__NR_Linux + 24)
-#define __NR_msync (__NR_Linux + 25)
-#define __NR_mincore (__NR_Linux + 26)
-#define __NR_madvise (__NR_Linux + 27)
-#define __NR_shmget (__NR_Linux + 28)
-#define __NR_shmat (__NR_Linux + 29)
-#define __NR_shmctl (__NR_Linux + 30)
-#define __NR_dup (__NR_Linux + 31)
-#define __NR_dup2 (__NR_Linux + 32)
-#define __NR_pause (__NR_Linux + 33)
-#define __NR_nanosleep (__NR_Linux + 34)
-#define __NR_getitimer (__NR_Linux + 35)
-#define __NR_setitimer (__NR_Linux + 36)
-#define __NR_alarm (__NR_Linux + 37)
-#define __NR_getpid (__NR_Linux + 38)
-#define __NR_sendfile (__NR_Linux + 39)
-#define __NR_socket (__NR_Linux + 40)
-#define __NR_connect (__NR_Linux + 41)
-#define __NR_accept (__NR_Linux + 42)
-#define __NR_sendto (__NR_Linux + 43)
-#define __NR_recvfrom (__NR_Linux + 44)
-#define __NR_sendmsg (__NR_Linux + 45)
-#define __NR_recvmsg (__NR_Linux + 46)
-#define __NR_shutdown (__NR_Linux + 47)
-#define __NR_bind (__NR_Linux + 48)
-#define __NR_listen (__NR_Linux + 49)
-#define __NR_getsockname (__NR_Linux + 50)
-#define __NR_getpeername (__NR_Linux + 51)
-#define __NR_socketpair (__NR_Linux + 52)
-#define __NR_setsockopt (__NR_Linux + 53)
-#define __NR_getsockopt (__NR_Linux + 54)
-#define __NR_clone (__NR_Linux + 55)
-#define __NR_fork (__NR_Linux + 56)
-#define __NR_execve (__NR_Linux + 57)
-#define __NR_exit (__NR_Linux + 58)
-#define __NR_wait4 (__NR_Linux + 59)
-#define __NR_kill (__NR_Linux + 60)
-#define __NR_uname (__NR_Linux + 61)
-#define __NR_semget (__NR_Linux + 62)
-#define __NR_semop (__NR_Linux + 63)
-#define __NR_semctl (__NR_Linux + 64)
-#define __NR_shmdt (__NR_Linux + 65)
-#define __NR_msgget (__NR_Linux + 66)
-#define __NR_msgsnd (__NR_Linux + 67)
-#define __NR_msgrcv (__NR_Linux + 68)
-#define __NR_msgctl (__NR_Linux + 69)
-#define __NR_fcntl (__NR_Linux + 70)
-#define __NR_flock (__NR_Linux + 71)
-#define __NR_fsync (__NR_Linux + 72)
-#define __NR_fdatasync (__NR_Linux + 73)
-#define __NR_truncate (__NR_Linux + 74)
-#define __NR_ftruncate (__NR_Linux + 75)
-#define __NR_getdents (__NR_Linux + 76)
-#define __NR_getcwd (__NR_Linux + 77)
-#define __NR_chdir (__NR_Linux + 78)
-#define __NR_fchdir (__NR_Linux + 79)
-#define __NR_rename (__NR_Linux + 80)
-#define __NR_mkdir (__NR_Linux + 81)
-#define __NR_rmdir (__NR_Linux + 82)
-#define __NR_creat (__NR_Linux + 83)
-#define __NR_link (__NR_Linux + 84)
-#define __NR_unlink (__NR_Linux + 85)
-#define __NR_symlink (__NR_Linux + 86)
-#define __NR_readlink (__NR_Linux + 87)
-#define __NR_chmod (__NR_Linux + 88)
-#define __NR_fchmod (__NR_Linux + 89)
-#define __NR_chown (__NR_Linux + 90)
-#define __NR_fchown (__NR_Linux + 91)
-#define __NR_lchown (__NR_Linux + 92)
-#define __NR_umask (__NR_Linux + 93)
-#define __NR_gettimeofday (__NR_Linux + 94)
-#define __NR_getrlimit (__NR_Linux + 95)
-#define __NR_getrusage (__NR_Linux + 96)
-#define __NR_sysinfo (__NR_Linux + 97)
-#define __NR_times (__NR_Linux + 98)
-#define __NR_ptrace (__NR_Linux + 99)
-#define __NR_getuid (__NR_Linux + 100)
-#define __NR_syslog (__NR_Linux + 101)
-#define __NR_getgid (__NR_Linux + 102)
-#define __NR_setuid (__NR_Linux + 103)
-#define __NR_setgid (__NR_Linux + 104)
-#define __NR_geteuid (__NR_Linux + 105)
-#define __NR_getegid (__NR_Linux + 106)
-#define __NR_setpgid (__NR_Linux + 107)
-#define __NR_getppid (__NR_Linux + 108)
-#define __NR_getpgrp (__NR_Linux + 109)
-#define __NR_setsid (__NR_Linux + 110)
-#define __NR_setreuid (__NR_Linux + 111)
-#define __NR_setregid (__NR_Linux + 112)
-#define __NR_getgroups (__NR_Linux + 113)
-#define __NR_setgroups (__NR_Linux + 114)
-#define __NR_setresuid (__NR_Linux + 115)
-#define __NR_getresuid (__NR_Linux + 116)
-#define __NR_setresgid (__NR_Linux + 117)
-#define __NR_getresgid (__NR_Linux + 118)
-#define __NR_getpgid (__NR_Linux + 119)
-#define __NR_setfsuid (__NR_Linux + 120)
-#define __NR_setfsgid (__NR_Linux + 121)
-#define __NR_getsid (__NR_Linux + 122)
-#define __NR_capget (__NR_Linux + 123)
-#define __NR_capset (__NR_Linux + 124)
-#define __NR_rt_sigpending (__NR_Linux + 125)
-#define __NR_rt_sigtimedwait (__NR_Linux + 126)
-#define __NR_rt_sigqueueinfo (__NR_Linux + 127)
-#define __NR_rt_sigsuspend (__NR_Linux + 128)
-#define __NR_sigaltstack (__NR_Linux + 129)
-#define __NR_utime (__NR_Linux + 130)
-#define __NR_mknod (__NR_Linux + 131)
-#define __NR_personality (__NR_Linux + 132)
-#define __NR_ustat (__NR_Linux + 133)
-#define __NR_statfs (__NR_Linux + 134)
-#define __NR_fstatfs (__NR_Linux + 135)
-#define __NR_sysfs (__NR_Linux + 136)
-#define __NR_getpriority (__NR_Linux + 137)
-#define __NR_setpriority (__NR_Linux + 138)
-#define __NR_sched_setparam (__NR_Linux + 139)
-#define __NR_sched_getparam (__NR_Linux + 140)
-#define __NR_sched_setscheduler (__NR_Linux + 141)
-#define __NR_sched_getscheduler (__NR_Linux + 142)
-#define __NR_sched_get_priority_max (__NR_Linux + 143)
-#define __NR_sched_get_priority_min (__NR_Linux + 144)
-#define __NR_sched_rr_get_interval (__NR_Linux + 145)
-#define __NR_mlock (__NR_Linux + 146)
-#define __NR_munlock (__NR_Linux + 147)
-#define __NR_mlockall (__NR_Linux + 148)
-#define __NR_munlockall (__NR_Linux + 149)
-#define __NR_vhangup (__NR_Linux + 150)
-#define __NR_pivot_root (__NR_Linux + 151)
-#define __NR__sysctl (__NR_Linux + 152)
-#define __NR_prctl (__NR_Linux + 153)
-#define __NR_adjtimex (__NR_Linux + 154)
-#define __NR_setrlimit (__NR_Linux + 155)
-#define __NR_chroot (__NR_Linux + 156)
-#define __NR_sync (__NR_Linux + 157)
-#define __NR_acct (__NR_Linux + 158)
-#define __NR_settimeofday (__NR_Linux + 159)
-#define __NR_mount (__NR_Linux + 160)
-#define __NR_umount2 (__NR_Linux + 161)
-#define __NR_swapon (__NR_Linux + 162)
-#define __NR_swapoff (__NR_Linux + 163)
-#define __NR_reboot (__NR_Linux + 164)
-#define __NR_sethostname (__NR_Linux + 165)
-#define __NR_setdomainname (__NR_Linux + 166)
-#define __NR_create_module (__NR_Linux + 167)
-#define __NR_init_module (__NR_Linux + 168)
-#define __NR_delete_module (__NR_Linux + 169)
-#define __NR_get_kernel_syms (__NR_Linux + 170)
-#define __NR_query_module (__NR_Linux + 171)
-#define __NR_quotactl (__NR_Linux + 172)
-#define __NR_nfsservctl (__NR_Linux + 173)
-#define __NR_getpmsg (__NR_Linux + 174)
-#define __NR_putpmsg (__NR_Linux + 175)
-#define __NR_afs_syscall (__NR_Linux + 176)
-#define __NR_reserved177 (__NR_Linux + 177)
-#define __NR_gettid (__NR_Linux + 178)
-#define __NR_readahead (__NR_Linux + 179)
-#define __NR_setxattr (__NR_Linux + 180)
-#define __NR_lsetxattr (__NR_Linux + 181)
-#define __NR_fsetxattr (__NR_Linux + 182)
-#define __NR_getxattr (__NR_Linux + 183)
-#define __NR_lgetxattr (__NR_Linux + 184)
-#define __NR_fgetxattr (__NR_Linux + 185)
-#define __NR_listxattr (__NR_Linux + 186)
-#define __NR_llistxattr (__NR_Linux + 187)
-#define __NR_flistxattr (__NR_Linux + 188)
-#define __NR_removexattr (__NR_Linux + 189)
-#define __NR_lremovexattr (__NR_Linux + 190)
-#define __NR_fremovexattr (__NR_Linux + 191)
-#define __NR_tkill (__NR_Linux + 192)
-#define __NR_reserved193 (__NR_Linux + 193)
-#define __NR_futex (__NR_Linux + 194)
-#define __NR_sched_setaffinity (__NR_Linux + 195)
-#define __NR_sched_getaffinity (__NR_Linux + 196)
-#define __NR_cacheflush (__NR_Linux + 197)
-#define __NR_cachectl (__NR_Linux + 198)
-#define __NR_sysmips (__NR_Linux + 199)
-#define __NR_io_setup (__NR_Linux + 200)
-#define __NR_io_destroy (__NR_Linux + 201)
-#define __NR_io_getevents (__NR_Linux + 202)
-#define __NR_io_submit (__NR_Linux + 203)
-#define __NR_io_cancel (__NR_Linux + 204)
-#define __NR_exit_group (__NR_Linux + 205)
-#define __NR_lookup_dcookie (__NR_Linux + 206)
-#define __NR_epoll_create (__NR_Linux + 207)
-#define __NR_epoll_ctl (__NR_Linux + 208)
-#define __NR_epoll_wait (__NR_Linux + 209)
-#define __NR_remap_file_pages (__NR_Linux + 210)
-#define __NR_rt_sigreturn (__NR_Linux + 211)
-#define __NR_set_tid_address (__NR_Linux + 212)
-#define __NR_restart_syscall (__NR_Linux + 213)
-#define __NR_semtimedop (__NR_Linux + 214)
-#define __NR_fadvise64 (__NR_Linux + 215)
-#define __NR_timer_create (__NR_Linux + 216)
-#define __NR_timer_settime (__NR_Linux + 217)
-#define __NR_timer_gettime (__NR_Linux + 218)
-#define __NR_timer_getoverrun (__NR_Linux + 219)
-#define __NR_timer_delete (__NR_Linux + 220)
-#define __NR_clock_settime (__NR_Linux + 221)
-#define __NR_clock_gettime (__NR_Linux + 222)
-#define __NR_clock_getres (__NR_Linux + 223)
-#define __NR_clock_nanosleep (__NR_Linux + 224)
-#define __NR_tgkill (__NR_Linux + 225)
-#define __NR_utimes (__NR_Linux + 226)
-#define __NR_mbind (__NR_Linux + 227)
-#define __NR_get_mempolicy (__NR_Linux + 228)
-#define __NR_set_mempolicy (__NR_Linux + 229)
-#define __NR_mq_open (__NR_Linux + 230)
-#define __NR_mq_unlink (__NR_Linux + 231)
-#define __NR_mq_timedsend (__NR_Linux + 232)
-#define __NR_mq_timedreceive (__NR_Linux + 233)
-#define __NR_mq_notify (__NR_Linux + 234)
-#define __NR_mq_getsetattr (__NR_Linux + 235)
-#define __NR_vserver (__NR_Linux + 236)
-#define __NR_waitid (__NR_Linux + 237)
-/* #define __NR_sys_setaltroot (__NR_Linux + 238) */
-#define __NR_add_key (__NR_Linux + 239)
-#define __NR_request_key (__NR_Linux + 240)
-#define __NR_keyctl (__NR_Linux + 241)
-#define __NR_set_thread_area (__NR_Linux + 242)
-#define __NR_inotify_init (__NR_Linux + 243)
-#define __NR_inotify_add_watch (__NR_Linux + 244)
-#define __NR_inotify_rm_watch (__NR_Linux + 245)
-#define __NR_migrate_pages (__NR_Linux + 246)
-#define __NR_openat (__NR_Linux + 247)
-#define __NR_mkdirat (__NR_Linux + 248)
-#define __NR_mknodat (__NR_Linux + 249)
-#define __NR_fchownat (__NR_Linux + 250)
-#define __NR_futimesat (__NR_Linux + 251)
-#define __NR_newfstatat (__NR_Linux + 252)
-#define __NR_unlinkat (__NR_Linux + 253)
-#define __NR_renameat (__NR_Linux + 254)
-#define __NR_linkat (__NR_Linux + 255)
-#define __NR_symlinkat (__NR_Linux + 256)
-#define __NR_readlinkat (__NR_Linux + 257)
-#define __NR_fchmodat (__NR_Linux + 258)
-#define __NR_faccessat (__NR_Linux + 259)
-#define __NR_pselect6 (__NR_Linux + 260)
-#define __NR_ppoll (__NR_Linux + 261)
-#define __NR_unshare (__NR_Linux + 262)
-#define __NR_splice (__NR_Linux + 263)
-#define __NR_sync_file_range (__NR_Linux + 264)
-#define __NR_tee (__NR_Linux + 265)
-#define __NR_vmsplice (__NR_Linux + 266)
-#define __NR_move_pages (__NR_Linux + 267)
-#define __NR_set_robust_list (__NR_Linux + 268)
-#define __NR_get_robust_list (__NR_Linux + 269)
-#define __NR_kexec_load (__NR_Linux + 270)
-#define __NR_getcpu (__NR_Linux + 271)
-#define __NR_epoll_pwait (__NR_Linux + 272)
-#define __NR_ioprio_set (__NR_Linux + 273)
-#define __NR_ioprio_get (__NR_Linux + 274)
-#define __NR_utimensat (__NR_Linux + 275)
-#define __NR_signalfd (__NR_Linux + 276)
-#define __NR_timerfd (__NR_Linux + 277)
-#define __NR_eventfd (__NR_Linux + 278)
-#define __NR_fallocate (__NR_Linux + 279)
-#define __NR_timerfd_create (__NR_Linux + 280)
-#define __NR_timerfd_gettime (__NR_Linux + 281)
-#define __NR_timerfd_settime (__NR_Linux + 282)
-#define __NR_signalfd4 (__NR_Linux + 283)
-#define __NR_eventfd2 (__NR_Linux + 284)
-#define __NR_epoll_create1 (__NR_Linux + 285)
-#define __NR_dup3 (__NR_Linux + 286)
-#define __NR_pipe2 (__NR_Linux + 287)
-#define __NR_inotify_init1 (__NR_Linux + 288)
-#define __NR_preadv (__NR_Linux + 289)
-#define __NR_pwritev (__NR_Linux + 290)
-#define __NR_rt_tgsigqueueinfo (__NR_Linux + 291)
-#define __NR_perf_event_open (__NR_Linux + 292)
-#define __NR_accept4 (__NR_Linux + 293)
-#define __NR_recvmmsg (__NR_Linux + 294)
-#define __NR_fanotify_init (__NR_Linux + 295)
-#define __NR_fanotify_mark (__NR_Linux + 296)
-#define __NR_prlimit64 (__NR_Linux + 297)
-#define __NR_name_to_handle_at (__NR_Linux + 298)
-#define __NR_open_by_handle_at (__NR_Linux + 299)
-#define __NR_clock_adjtime (__NR_Linux + 300)
-#define __NR_syncfs (__NR_Linux + 301)
-#define __NR_sendmmsg (__NR_Linux + 302)
-#define __NR_setns (__NR_Linux + 303)
-#define __NR_process_vm_readv (__NR_Linux + 304)
-#define __NR_process_vm_writev (__NR_Linux + 305)
-#define __NR_kcmp (__NR_Linux + 306)
-#define __NR_finit_module (__NR_Linux + 307)
-#define __NR_getdents64 (__NR_Linux + 308)
-#define __NR_sched_setattr (__NR_Linux + 309)
-#define __NR_sched_getattr (__NR_Linux + 310)
-#define __NR_renameat2 (__NR_Linux + 311)
-#define __NR_seccomp (__NR_Linux + 312)
-#define __NR_getrandom (__NR_Linux + 313)
-#define __NR_memfd_create (__NR_Linux + 314)
-#define __NR_bpf (__NR_Linux + 315)
-#define __NR_execveat (__NR_Linux + 316)
-#define __NR_userfaultfd (__NR_Linux + 317)
-#define __NR_membarrier (__NR_Linux + 318)
-#define __NR_mlock2 (__NR_Linux + 319)
-#define __NR_copy_file_range (__NR_Linux + 320)
-#define __NR_preadv2 (__NR_Linux + 321)
-#define __NR_pwritev2 (__NR_Linux + 322)
-#define __NR_pkey_mprotect (__NR_Linux + 323)
-#define __NR_pkey_alloc (__NR_Linux + 324)
-#define __NR_pkey_free (__NR_Linux + 325)
-#define __NR_statx (__NR_Linux + 326)
-#define __NR_rseq (__NR_Linux + 327)
-#define __NR_io_pgetevents (__NR_Linux + 328)
-
-/*
- * Offset of the last Linux 64-bit flavoured syscall
- */
-#define __NR_Linux_syscalls 328
+#define __NR_Linux 5000
+#include <asm/unistd_n64.h>
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
-#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 328
-
#if _MIPS_SIM == _MIPS_SIM_NABI32
-/*
- * Linux N32 syscalls are in the range from 6000 to 6999.
- */
-#define __NR_Linux 6000
-#define __NR_read (__NR_Linux + 0)
-#define __NR_write (__NR_Linux + 1)
-#define __NR_open (__NR_Linux + 2)
-#define __NR_close (__NR_Linux + 3)
-#define __NR_stat (__NR_Linux + 4)
-#define __NR_fstat (__NR_Linux + 5)
-#define __NR_lstat (__NR_Linux + 6)
-#define __NR_poll (__NR_Linux + 7)
-#define __NR_lseek (__NR_Linux + 8)
-#define __NR_mmap (__NR_Linux + 9)
-#define __NR_mprotect (__NR_Linux + 10)
-#define __NR_munmap (__NR_Linux + 11)
-#define __NR_brk (__NR_Linux + 12)
-#define __NR_rt_sigaction (__NR_Linux + 13)
-#define __NR_rt_sigprocmask (__NR_Linux + 14)
-#define __NR_ioctl (__NR_Linux + 15)
-#define __NR_pread64 (__NR_Linux + 16)
-#define __NR_pwrite64 (__NR_Linux + 17)
-#define __NR_readv (__NR_Linux + 18)
-#define __NR_writev (__NR_Linux + 19)
-#define __NR_access (__NR_Linux + 20)
-#define __NR_pipe (__NR_Linux + 21)
-#define __NR__newselect (__NR_Linux + 22)
-#define __NR_sched_yield (__NR_Linux + 23)
-#define __NR_mremap (__NR_Linux + 24)
-#define __NR_msync (__NR_Linux + 25)
-#define __NR_mincore (__NR_Linux + 26)
-#define __NR_madvise (__NR_Linux + 27)
-#define __NR_shmget (__NR_Linux + 28)
-#define __NR_shmat (__NR_Linux + 29)
-#define __NR_shmctl (__NR_Linux + 30)
-#define __NR_dup (__NR_Linux + 31)
-#define __NR_dup2 (__NR_Linux + 32)
-#define __NR_pause (__NR_Linux + 33)
-#define __NR_nanosleep (__NR_Linux + 34)
-#define __NR_getitimer (__NR_Linux + 35)
-#define __NR_setitimer (__NR_Linux + 36)
-#define __NR_alarm (__NR_Linux + 37)
-#define __NR_getpid (__NR_Linux + 38)
-#define __NR_sendfile (__NR_Linux + 39)
-#define __NR_socket (__NR_Linux + 40)
-#define __NR_connect (__NR_Linux + 41)
-#define __NR_accept (__NR_Linux + 42)
-#define __NR_sendto (__NR_Linux + 43)
-#define __NR_recvfrom (__NR_Linux + 44)
-#define __NR_sendmsg (__NR_Linux + 45)
-#define __NR_recvmsg (__NR_Linux + 46)
-#define __NR_shutdown (__NR_Linux + 47)
-#define __NR_bind (__NR_Linux + 48)
-#define __NR_listen (__NR_Linux + 49)
-#define __NR_getsockname (__NR_Linux + 50)
-#define __NR_getpeername (__NR_Linux + 51)
-#define __NR_socketpair (__NR_Linux + 52)
-#define __NR_setsockopt (__NR_Linux + 53)
-#define __NR_getsockopt (__NR_Linux + 54)
-#define __NR_clone (__NR_Linux + 55)
-#define __NR_fork (__NR_Linux + 56)
-#define __NR_execve (__NR_Linux + 57)
-#define __NR_exit (__NR_Linux + 58)
-#define __NR_wait4 (__NR_Linux + 59)
-#define __NR_kill (__NR_Linux + 60)
-#define __NR_uname (__NR_Linux + 61)
-#define __NR_semget (__NR_Linux + 62)
-#define __NR_semop (__NR_Linux + 63)
-#define __NR_semctl (__NR_Linux + 64)
-#define __NR_shmdt (__NR_Linux + 65)
-#define __NR_msgget (__NR_Linux + 66)
-#define __NR_msgsnd (__NR_Linux + 67)
-#define __NR_msgrcv (__NR_Linux + 68)
-#define __NR_msgctl (__NR_Linux + 69)
-#define __NR_fcntl (__NR_Linux + 70)
-#define __NR_flock (__NR_Linux + 71)
-#define __NR_fsync (__NR_Linux + 72)
-#define __NR_fdatasync (__NR_Linux + 73)
-#define __NR_truncate (__NR_Linux + 74)
-#define __NR_ftruncate (__NR_Linux + 75)
-#define __NR_getdents (__NR_Linux + 76)
-#define __NR_getcwd (__NR_Linux + 77)
-#define __NR_chdir (__NR_Linux + 78)
-#define __NR_fchdir (__NR_Linux + 79)
-#define __NR_rename (__NR_Linux + 80)
-#define __NR_mkdir (__NR_Linux + 81)
-#define __NR_rmdir (__NR_Linux + 82)
-#define __NR_creat (__NR_Linux + 83)
-#define __NR_link (__NR_Linux + 84)
-#define __NR_unlink (__NR_Linux + 85)
-#define __NR_symlink (__NR_Linux + 86)
-#define __NR_readlink (__NR_Linux + 87)
-#define __NR_chmod (__NR_Linux + 88)
-#define __NR_fchmod (__NR_Linux + 89)
-#define __NR_chown (__NR_Linux + 90)
-#define __NR_fchown (__NR_Linux + 91)
-#define __NR_lchown (__NR_Linux + 92)
-#define __NR_umask (__NR_Linux + 93)
-#define __NR_gettimeofday (__NR_Linux + 94)
-#define __NR_getrlimit (__NR_Linux + 95)
-#define __NR_getrusage (__NR_Linux + 96)
-#define __NR_sysinfo (__NR_Linux + 97)
-#define __NR_times (__NR_Linux + 98)
-#define __NR_ptrace (__NR_Linux + 99)
-#define __NR_getuid (__NR_Linux + 100)
-#define __NR_syslog (__NR_Linux + 101)
-#define __NR_getgid (__NR_Linux + 102)
-#define __NR_setuid (__NR_Linux + 103)
-#define __NR_setgid (__NR_Linux + 104)
-#define __NR_geteuid (__NR_Linux + 105)
-#define __NR_getegid (__NR_Linux + 106)
-#define __NR_setpgid (__NR_Linux + 107)
-#define __NR_getppid (__NR_Linux + 108)
-#define __NR_getpgrp (__NR_Linux + 109)
-#define __NR_setsid (__NR_Linux + 110)
-#define __NR_setreuid (__NR_Linux + 111)
-#define __NR_setregid (__NR_Linux + 112)
-#define __NR_getgroups (__NR_Linux + 113)
-#define __NR_setgroups (__NR_Linux + 114)
-#define __NR_setresuid (__NR_Linux + 115)
-#define __NR_getresuid (__NR_Linux + 116)
-#define __NR_setresgid (__NR_Linux + 117)
-#define __NR_getresgid (__NR_Linux + 118)
-#define __NR_getpgid (__NR_Linux + 119)
-#define __NR_setfsuid (__NR_Linux + 120)
-#define __NR_setfsgid (__NR_Linux + 121)
-#define __NR_getsid (__NR_Linux + 122)
-#define __NR_capget (__NR_Linux + 123)
-#define __NR_capset (__NR_Linux + 124)
-#define __NR_rt_sigpending (__NR_Linux + 125)
-#define __NR_rt_sigtimedwait (__NR_Linux + 126)
-#define __NR_rt_sigqueueinfo (__NR_Linux + 127)
-#define __NR_rt_sigsuspend (__NR_Linux + 128)
-#define __NR_sigaltstack (__NR_Linux + 129)
-#define __NR_utime (__NR_Linux + 130)
-#define __NR_mknod (__NR_Linux + 131)
-#define __NR_personality (__NR_Linux + 132)
-#define __NR_ustat (__NR_Linux + 133)
-#define __NR_statfs (__NR_Linux + 134)
-#define __NR_fstatfs (__NR_Linux + 135)
-#define __NR_sysfs (__NR_Linux + 136)
-#define __NR_getpriority (__NR_Linux + 137)
-#define __NR_setpriority (__NR_Linux + 138)
-#define __NR_sched_setparam (__NR_Linux + 139)
-#define __NR_sched_getparam (__NR_Linux + 140)
-#define __NR_sched_setscheduler (__NR_Linux + 141)
-#define __NR_sched_getscheduler (__NR_Linux + 142)
-#define __NR_sched_get_priority_max (__NR_Linux + 143)
-#define __NR_sched_get_priority_min (__NR_Linux + 144)
-#define __NR_sched_rr_get_interval (__NR_Linux + 145)
-#define __NR_mlock (__NR_Linux + 146)
-#define __NR_munlock (__NR_Linux + 147)
-#define __NR_mlockall (__NR_Linux + 148)
-#define __NR_munlockall (__NR_Linux + 149)
-#define __NR_vhangup (__NR_Linux + 150)
-#define __NR_pivot_root (__NR_Linux + 151)
-#define __NR__sysctl (__NR_Linux + 152)
-#define __NR_prctl (__NR_Linux + 153)
-#define __NR_adjtimex (__NR_Linux + 154)
-#define __NR_setrlimit (__NR_Linux + 155)
-#define __NR_chroot (__NR_Linux + 156)
-#define __NR_sync (__NR_Linux + 157)
-#define __NR_acct (__NR_Linux + 158)
-#define __NR_settimeofday (__NR_Linux + 159)
-#define __NR_mount (__NR_Linux + 160)
-#define __NR_umount2 (__NR_Linux + 161)
-#define __NR_swapon (__NR_Linux + 162)
-#define __NR_swapoff (__NR_Linux + 163)
-#define __NR_reboot (__NR_Linux + 164)
-#define __NR_sethostname (__NR_Linux + 165)
-#define __NR_setdomainname (__NR_Linux + 166)
-#define __NR_create_module (__NR_Linux + 167)
-#define __NR_init_module (__NR_Linux + 168)
-#define __NR_delete_module (__NR_Linux + 169)
-#define __NR_get_kernel_syms (__NR_Linux + 170)
-#define __NR_query_module (__NR_Linux + 171)
-#define __NR_quotactl (__NR_Linux + 172)
-#define __NR_nfsservctl (__NR_Linux + 173)
-#define __NR_getpmsg (__NR_Linux + 174)
-#define __NR_putpmsg (__NR_Linux + 175)
-#define __NR_afs_syscall (__NR_Linux + 176)
-#define __NR_reserved177 (__NR_Linux + 177)
-#define __NR_gettid (__NR_Linux + 178)
-#define __NR_readahead (__NR_Linux + 179)
-#define __NR_setxattr (__NR_Linux + 180)
-#define __NR_lsetxattr (__NR_Linux + 181)
-#define __NR_fsetxattr (__NR_Linux + 182)
-#define __NR_getxattr (__NR_Linux + 183)
-#define __NR_lgetxattr (__NR_Linux + 184)
-#define __NR_fgetxattr (__NR_Linux + 185)
-#define __NR_listxattr (__NR_Linux + 186)
-#define __NR_llistxattr (__NR_Linux + 187)
-#define __NR_flistxattr (__NR_Linux + 188)
-#define __NR_removexattr (__NR_Linux + 189)
-#define __NR_lremovexattr (__NR_Linux + 190)
-#define __NR_fremovexattr (__NR_Linux + 191)
-#define __NR_tkill (__NR_Linux + 192)
-#define __NR_reserved193 (__NR_Linux + 193)
-#define __NR_futex (__NR_Linux + 194)
-#define __NR_sched_setaffinity (__NR_Linux + 195)
-#define __NR_sched_getaffinity (__NR_Linux + 196)
-#define __NR_cacheflush (__NR_Linux + 197)
-#define __NR_cachectl (__NR_Linux + 198)
-#define __NR_sysmips (__NR_Linux + 199)
-#define __NR_io_setup (__NR_Linux + 200)
-#define __NR_io_destroy (__NR_Linux + 201)
-#define __NR_io_getevents (__NR_Linux + 202)
-#define __NR_io_submit (__NR_Linux + 203)
-#define __NR_io_cancel (__NR_Linux + 204)
-#define __NR_exit_group (__NR_Linux + 205)
-#define __NR_lookup_dcookie (__NR_Linux + 206)
-#define __NR_epoll_create (__NR_Linux + 207)
-#define __NR_epoll_ctl (__NR_Linux + 208)
-#define __NR_epoll_wait (__NR_Linux + 209)
-#define __NR_remap_file_pages (__NR_Linux + 210)
-#define __NR_rt_sigreturn (__NR_Linux + 211)
-#define __NR_fcntl64 (__NR_Linux + 212)
-#define __NR_set_tid_address (__NR_Linux + 213)
-#define __NR_restart_syscall (__NR_Linux + 214)
-#define __NR_semtimedop (__NR_Linux + 215)
-#define __NR_fadvise64 (__NR_Linux + 216)
-#define __NR_statfs64 (__NR_Linux + 217)
-#define __NR_fstatfs64 (__NR_Linux + 218)
-#define __NR_sendfile64 (__NR_Linux + 219)
-#define __NR_timer_create (__NR_Linux + 220)
-#define __NR_timer_settime (__NR_Linux + 221)
-#define __NR_timer_gettime (__NR_Linux + 222)
-#define __NR_timer_getoverrun (__NR_Linux + 223)
-#define __NR_timer_delete (__NR_Linux + 224)
-#define __NR_clock_settime (__NR_Linux + 225)
-#define __NR_clock_gettime (__NR_Linux + 226)
-#define __NR_clock_getres (__NR_Linux + 227)
-#define __NR_clock_nanosleep (__NR_Linux + 228)
-#define __NR_tgkill (__NR_Linux + 229)
-#define __NR_utimes (__NR_Linux + 230)
-#define __NR_mbind (__NR_Linux + 231)
-#define __NR_get_mempolicy (__NR_Linux + 232)
-#define __NR_set_mempolicy (__NR_Linux + 233)
-#define __NR_mq_open (__NR_Linux + 234)
-#define __NR_mq_unlink (__NR_Linux + 235)
-#define __NR_mq_timedsend (__NR_Linux + 236)
-#define __NR_mq_timedreceive (__NR_Linux + 237)
-#define __NR_mq_notify (__NR_Linux + 238)
-#define __NR_mq_getsetattr (__NR_Linux + 239)
-#define __NR_vserver (__NR_Linux + 240)
-#define __NR_waitid (__NR_Linux + 241)
-/* #define __NR_sys_setaltroot (__NR_Linux + 242) */
-#define __NR_add_key (__NR_Linux + 243)
-#define __NR_request_key (__NR_Linux + 244)
-#define __NR_keyctl (__NR_Linux + 245)
-#define __NR_set_thread_area (__NR_Linux + 246)
-#define __NR_inotify_init (__NR_Linux + 247)
-#define __NR_inotify_add_watch (__NR_Linux + 248)
-#define __NR_inotify_rm_watch (__NR_Linux + 249)
-#define __NR_migrate_pages (__NR_Linux + 250)
-#define __NR_openat (__NR_Linux + 251)
-#define __NR_mkdirat (__NR_Linux + 252)
-#define __NR_mknodat (__NR_Linux + 253)
-#define __NR_fchownat (__NR_Linux + 254)
-#define __NR_futimesat (__NR_Linux + 255)
-#define __NR_newfstatat (__NR_Linux + 256)
-#define __NR_unlinkat (__NR_Linux + 257)
-#define __NR_renameat (__NR_Linux + 258)
-#define __NR_linkat (__NR_Linux + 259)
-#define __NR_symlinkat (__NR_Linux + 260)
-#define __NR_readlinkat (__NR_Linux + 261)
-#define __NR_fchmodat (__NR_Linux + 262)
-#define __NR_faccessat (__NR_Linux + 263)
-#define __NR_pselect6 (__NR_Linux + 264)
-#define __NR_ppoll (__NR_Linux + 265)
-#define __NR_unshare (__NR_Linux + 266)
-#define __NR_splice (__NR_Linux + 267)
-#define __NR_sync_file_range (__NR_Linux + 268)
-#define __NR_tee (__NR_Linux + 269)
-#define __NR_vmsplice (__NR_Linux + 270)
-#define __NR_move_pages (__NR_Linux + 271)
-#define __NR_set_robust_list (__NR_Linux + 272)
-#define __NR_get_robust_list (__NR_Linux + 273)
-#define __NR_kexec_load (__NR_Linux + 274)
-#define __NR_getcpu (__NR_Linux + 275)
-#define __NR_epoll_pwait (__NR_Linux + 276)
-#define __NR_ioprio_set (__NR_Linux + 277)
-#define __NR_ioprio_get (__NR_Linux + 278)
-#define __NR_utimensat (__NR_Linux + 279)
-#define __NR_signalfd (__NR_Linux + 280)
-#define __NR_timerfd (__NR_Linux + 281)
-#define __NR_eventfd (__NR_Linux + 282)
-#define __NR_fallocate (__NR_Linux + 283)
-#define __NR_timerfd_create (__NR_Linux + 284)
-#define __NR_timerfd_gettime (__NR_Linux + 285)
-#define __NR_timerfd_settime (__NR_Linux + 286)
-#define __NR_signalfd4 (__NR_Linux + 287)
-#define __NR_eventfd2 (__NR_Linux + 288)
-#define __NR_epoll_create1 (__NR_Linux + 289)
-#define __NR_dup3 (__NR_Linux + 290)
-#define __NR_pipe2 (__NR_Linux + 291)
-#define __NR_inotify_init1 (__NR_Linux + 292)
-#define __NR_preadv (__NR_Linux + 293)
-#define __NR_pwritev (__NR_Linux + 294)
-#define __NR_rt_tgsigqueueinfo (__NR_Linux + 295)
-#define __NR_perf_event_open (__NR_Linux + 296)
-#define __NR_accept4 (__NR_Linux + 297)
-#define __NR_recvmmsg (__NR_Linux + 298)
-#define __NR_getdents64 (__NR_Linux + 299)
-#define __NR_fanotify_init (__NR_Linux + 300)
-#define __NR_fanotify_mark (__NR_Linux + 301)
-#define __NR_prlimit64 (__NR_Linux + 302)
-#define __NR_name_to_handle_at (__NR_Linux + 303)
-#define __NR_open_by_handle_at (__NR_Linux + 304)
-#define __NR_clock_adjtime (__NR_Linux + 305)
-#define __NR_syncfs (__NR_Linux + 306)
-#define __NR_sendmmsg (__NR_Linux + 307)
-#define __NR_setns (__NR_Linux + 308)
-#define __NR_process_vm_readv (__NR_Linux + 309)
-#define __NR_process_vm_writev (__NR_Linux + 310)
-#define __NR_kcmp (__NR_Linux + 311)
-#define __NR_finit_module (__NR_Linux + 312)
-#define __NR_sched_setattr (__NR_Linux + 313)
-#define __NR_sched_getattr (__NR_Linux + 314)
-#define __NR_renameat2 (__NR_Linux + 315)
-#define __NR_seccomp (__NR_Linux + 316)
-#define __NR_getrandom (__NR_Linux + 317)
-#define __NR_memfd_create (__NR_Linux + 318)
-#define __NR_bpf (__NR_Linux + 319)
-#define __NR_execveat (__NR_Linux + 320)
-#define __NR_userfaultfd (__NR_Linux + 321)
-#define __NR_membarrier (__NR_Linux + 322)
-#define __NR_mlock2 (__NR_Linux + 323)
-#define __NR_copy_file_range (__NR_Linux + 324)
-#define __NR_preadv2 (__NR_Linux + 325)
-#define __NR_pwritev2 (__NR_Linux + 326)
-#define __NR_pkey_mprotect (__NR_Linux + 327)
-#define __NR_pkey_alloc (__NR_Linux + 328)
-#define __NR_pkey_free (__NR_Linux + 329)
-#define __NR_statx (__NR_Linux + 330)
-#define __NR_rseq (__NR_Linux + 331)
-#define __NR_io_pgetevents (__NR_Linux + 332)
-
-/*
- * Offset of the last N32 flavoured syscall
- */
-#define __NR_Linux_syscalls 332
+#define __NR_Linux 6000
+#include <asm/unistd_n32.h>
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
-#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 332
-
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 210c2802cf4d..89b07ea8d249 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -42,9 +42,8 @@ sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o
sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o
obj-y += $(sw-y)
+obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o
obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o
-obj-$(CONFIG_CPU_R3000) += r2300_fpu.o
-obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
@@ -72,7 +71,7 @@ obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_32BIT) += scall32-o32.o
-obj-$(CONFIG_64BIT) += scall64-64.o
+obj-$(CONFIG_64BIT) += scall64-n64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index cbe4742d2fff..aebfda81120a 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -123,7 +123,6 @@ void output_thread_defines(void)
OFFSET(THREAD_REG31, task_struct, thread.reg31);
OFFSET(THREAD_STATUS, task_struct,
thread.cp0_status);
- OFFSET(THREAD_FPU, task_struct, thread.fpu);
OFFSET(THREAD_BVADDR, task_struct, \
thread.cp0_badvaddr);
@@ -135,8 +134,11 @@ void output_thread_defines(void)
BLANK();
}
+#ifdef CONFIG_MIPS_FP_SUPPORT
void output_thread_fpu_defines(void)
{
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
@@ -174,6 +176,7 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
}
+#endif
void output_mm_defines(void)
{
@@ -341,6 +344,7 @@ void output_pm_defines(void)
}
#endif
+#ifdef CONFIG_MIPS_FP_SUPPORT
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specific offsets. ");
@@ -382,6 +386,7 @@ void output_kvm_defines(void)
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
BLANK();
}
+#endif
#ifdef CONFIG_MIPS_CPS
void output_cps_defines(void)
diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S
index adaa82e00f2b..9e422d186a17 100644
--- a/arch/mips/kernel/bmips_5xxx_init.S
+++ b/arch/mips/kernel/bmips_5xxx_init.S
@@ -632,12 +632,6 @@ core_init:
bal set_zephyr
nop
-#if ENABLE_FPU==1
- /* initialize the Floating point unit (both TPs) */
- bal init_fpu
- nop
-#endif
-
/* set low latency memory bus */
li a0, 1
bal set_llmb
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index e48f6c0a9e4a..180ad081afcf 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -58,9 +58,6 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
- int bc_false = 0;
- unsigned int fcr31;
- unsigned int bit;
if (!cpu_has_mmips)
return 0;
@@ -139,8 +136,13 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case mm_bc2f_op:
- case mm_bc1f_op:
+ case mm_bc1f_op: {
+ int bc_false = 0;
+ unsigned int fcr31;
+ unsigned int bit;
+
bc_false = 1;
/* Fall through */
case mm_bc2t_op:
@@ -167,6 +169,8 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.pc_inc + dec_insn.next_pc_inc;
return 1;
}
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ }
break;
case mm_pool16c_op:
switch (insn.mm_i_format.rt) {
@@ -416,8 +420,8 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
int __compute_return_epc_for_insn(struct pt_regs *regs,
union mips_instruction insn)
{
- unsigned int bit, fcr31, dspcontrol, reg;
long epc = regs->cp0_epc;
+ unsigned int dspcontrol;
int ret = 0;
switch (insn.i_format.opcode) {
@@ -447,6 +451,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bltzl_op:
if (NO_R6EMU)
goto sigill_r2r6;
+ /* fall through */
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -460,6 +465,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgezl_op:
if (NO_R6EMU)
goto sigill_r2r6;
+ /* fall through */
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -555,6 +561,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
+ /* fall through */
case j_op:
epc += 4;
epc >>= 28;
@@ -571,6 +578,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case beql_op:
if (NO_R6EMU)
goto sigill_r2r6;
+ /* fall through */
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
@@ -585,6 +593,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bnel_op:
if (NO_R6EMU)
goto sigill_r2r6;
+ /* fall through */
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
@@ -599,6 +608,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r2r6;
+ /* fall through */
case blez_op:
/*
* Compact branches for R6 for the
@@ -634,6 +644,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r2r6;
+ /* fall through */
case bgtz_op:
/*
* Compact branches for R6 for the
@@ -667,23 +678,18 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
regs->cp0_epc = epc;
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
/*
* And now the FPA/cp1 branch instructions.
*/
- case cop1_op:
+ case cop1_op: {
+ unsigned int bit, fcr31, reg;
+
if (cpu_has_mips_r6 &&
((insn.i_format.rs == bc1eqz_op) ||
(insn.i_format.rs == bc1nez_op))) {
- if (!used_math()) { /* First time FPU user */
- ret = init_fpu();
- if (ret && NO_R6EMU) {
- ret = -ret;
- break;
- }
- ret = 0;
- set_used_math();
- }
- lose_fpu(1); /* Save FPU state for the emulator. */
+ if (!init_fp_ctx(current))
+ lose_fpu(1);
reg = insn.i_format.rt;
bit = get_fpr32(&current->thread.fpu.fpr[reg], 0) & 0x1;
if (insn.i_format.rs == bc1eqz_op)
@@ -736,6 +742,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
}
break;
}
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index c9e8622b5a16..bada74af7641 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -39,7 +39,7 @@ static inline void align_mod(const int align, const int mod)
".endr\n\t"
".set pop"
:
- : GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
+ : "n"(align), "n"(mod));
}
static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
@@ -92,7 +92,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
".set pop"
: "=&r" (lv1), "=r" (lw)
: "r" (m1), "r" (m2), "r" (s), "I" (0)
- : "hi", "lo", GCC_REG_ACCUM);
+ : "hi", "lo", "$0");
/* We have to use single integers for m1 and m2 and a double
* one for p to be sure the mulsidi3 gcc's RTL multiplication
* instruction has the workaround applied. Older versions of
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index d535fc706a8b..95b18a194f53 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -36,6 +36,8 @@
unsigned int elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
/*
* Get the FPU Implementation/Revision.
*/
@@ -58,19 +60,6 @@ static inline int __cpu_has_fpu(void)
return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
}
-static inline unsigned long cpu_get_msa_id(void)
-{
- unsigned long status, msa_id;
-
- status = read_c0_status();
- __enable_fpu(FPU_64BIT);
- enable_msa();
- msa_id = read_msa_ir();
- disable_msa();
- write_c0_status(status);
- return msa_id;
-}
-
/*
* Determine the FCSR mask for FPU hardware.
*/
@@ -326,6 +315,45 @@ static int __init fpu_disable(char *s)
__setup("nofpu", fpu_disable);
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+#define mips_fpu_disabled 1
+
+static inline unsigned long cpu_get_fpu_id(void)
+{
+ return FPIR_IMP_NONE;
+}
+
+static inline int __cpu_has_fpu(void)
+{
+ return 0;
+}
+
+static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
+{
+ /* no-op */
+}
+
+static void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
+{
+ /* no-op */
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ enable_msa();
+ msa_id = read_msa_ir();
+ disable_msa();
+ write_c0_status(status);
+ return msa_id;
+}
+
static int mips_dsp_disabled;
static int __init dsp_disable(char *s)
@@ -489,12 +517,16 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
switch (isa) {
case MIPS_CPU_ISA_M64R2:
c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ /* fall through */
case MIPS_CPU_ISA_M64R1:
c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ /* fall through */
case MIPS_CPU_ISA_V:
c->isa_level |= MIPS_CPU_ISA_V;
+ /* fall through */
case MIPS_CPU_ISA_IV:
c->isa_level |= MIPS_CPU_ISA_IV;
+ /* fall through */
case MIPS_CPU_ISA_III:
c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
break;
@@ -502,14 +534,17 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
/* R6 incompatible with everything else */
case MIPS_CPU_ISA_M64R6:
c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+ /* fall through */
case MIPS_CPU_ISA_M32R6:
c->isa_level |= MIPS_CPU_ISA_M32R6;
/* Break here so we don't add incompatible ISAs */
break;
case MIPS_CPU_ISA_M32R2:
c->isa_level |= MIPS_CPU_ISA_M32R2;
+ /* fall through */
case MIPS_CPU_ISA_M32R1:
c->isa_level |= MIPS_CPU_ISA_M32R1;
+ /* fall through */
case MIPS_CPU_ISA_II:
c->isa_level |= MIPS_CPU_ISA_II;
break;
@@ -1843,7 +1878,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
switch (c->processor_id & PRID_IMP_MASK) {
case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
switch (c->processor_id & PRID_REV_MASK) {
- case PRID_REV_LOONGSON3A_R2:
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
c->cputype = CPU_LOONGSON3;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 731325a61a78..72056d54a2b8 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -16,6 +16,8 @@
#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
bool mips_use_nan_legacy;
bool mips_use_nan_2008;
@@ -326,6 +328,8 @@ void mips_set_personality_nan(struct arch_elf_state *state)
}
}
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
int mips_elf_read_implies_exec(void *elf_ex, int exstack)
{
if (exstack != EXSTACK_DISABLE_X) {
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 7f3dfdbc3657..2ea0ec95efe9 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
unsigned long fp)
{
unsigned long old_parent_ra;
- struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
int faulted, insns;
@@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
if (unlikely(faulted))
goto out;
- if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
- NULL) == -EBUSY) {
- *parent_ra_addr = old_parent_ra;
- return;
- }
-
/*
* Get the recorded ip of the current mcount calling site in the
* __mcount_loc section, which will be used to filter the function
@@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
*/
insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
- trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+ self_ra -= (MCOUNT_INSN_SIZE * insns);
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
+ if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
*parent_ra_addr = old_parent_ra;
- }
return;
out:
ftrace_graph_stop();
@@ -410,13 +400,13 @@ unsigned long __init arch_syscall_addr(int nr)
unsigned long __init arch_syscall_addr(int nr)
{
#ifdef CONFIG_MIPS32_N32
- if (nr >= __NR_N32_Linux && nr <= __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
#endif
- if (nr >= __NR_64_Linux && nr <= __NR_64_Linux + __NR_64_Linux_syscalls)
+ if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
return (unsigned long)sys_call_table[nr - __NR_64_Linux];
#ifdef CONFIG_MIPS32_O32
- if (nr >= __NR_O32_Linux && nr <= __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
#endif
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 6c257b52f57f..398b905b027d 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -553,7 +553,9 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
+#ifdef CONFIG_MIPS_FP_SUPPORT
BUILD_HANDLER fpe fpe fpe silent /* #15 */
+#endif
BUILD_HANDLER ftlb ftlb none silent /* #16 */
BUILD_HANDLER msa msa sti silent /* #21 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
@@ -650,9 +652,10 @@ isrdhwr:
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
+ .set push
.set arch=r4000
eret
- .set mips0
+ .set pop
#endif
.set pop
END(handle_ri_rdhwr)
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 046846999efd..695f55477503 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -101,7 +101,8 @@ static void __cpuidle au1k_wait(void)
unsigned long c0status = read_c0_status() | 1; /* irqs on */
__asm__(
- " .set arch=r4000 \n"
+ " .set push \n"
+ " .set arch=r4000 \n"
" cache 0x14, 0(%0) \n"
" cache 0x14, 32(%0) \n"
" sync \n"
@@ -111,7 +112,7 @@ static void __cpuidle au1k_wait(void)
" nop \n"
" nop \n"
" nop \n"
- " .set mips0 \n"
+ " .set pop \n"
: : "r" (au1k_wait), "r" (c0status));
}
@@ -183,7 +184,7 @@ void __init check_wait(void)
cpu_wait = r4k_wait;
break;
case CPU_LOONGSON3:
- if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+ if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
cpu_wait = r4k_wait;
break;
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index 9f85b98d24ac..d5f7362e8c24 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -119,19 +119,11 @@ void mips_mt_regdump(unsigned long mvpctl)
local_irq_restore(flags);
}
-static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
-static int __init rps_disable(char *s)
-{
- mt_opt_norps = 1;
- return 1;
-}
-__setup("norps", rps_disable);
-
static int __init rpsctl_set(char *str)
{
get_option(&str, &mt_opt_rpsctl);
@@ -169,9 +161,6 @@ void mips_mt_set_cpuoptions(void)
unsigned int oconfig7 = read_c0_config7();
unsigned int nconfig7 = oconfig7;
- if (mt_opt_norps) {
- printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
- }
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
mt_opt_rpsctl);
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index eb18b186e858..cb22a558431e 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1174,13 +1174,6 @@ repeat:
fpu_emul:
regs->regs[31] = r31;
regs->cp0_epc = epc;
- if (!used_math()) { /* First time FPU user. */
- preempt_disable();
- err = init_fpu();
- preempt_enable();
- set_used_math();
- }
- lose_fpu(1); /* Save FPU state for the emulator. */
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
@@ -2242,7 +2235,7 @@ fpu_emul:
#ifdef CONFIG_DEBUG_FS
-static int mipsr2_stats_show(struct seq_file *s, void *unused)
+static int mipsr2_emul_show(struct seq_file *s, void *unused)
{
seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
@@ -2308,9 +2301,9 @@ static int mipsr2_stats_show(struct seq_file *s, void *unused)
return 0;
}
-static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+static int mipsr2_clear_show(struct seq_file *s, void *unused)
{
- mipsr2_stats_show(s, unused);
+ mipsr2_emul_show(s, unused);
__this_cpu_write((mipsr2emustats).movs, 0);
__this_cpu_write((mipsr2bdemustats).movs, 0);
@@ -2353,30 +2346,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
return 0;
}
-static int mipsr2_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mipsr2_stats_show, inode->i_private);
-}
-
-static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mipsr2_stats_clear_show, inode->i_private);
-}
-
-static const struct file_operations mipsr2_emul_fops = {
- .open = mipsr2_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations mipsr2_clear_fops = {
- .open = mipsr2_stats_clear_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
+DEFINE_SHOW_ATTRIBUTE(mipsr2_emul);
+DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
static int __init mipsr2_init_debugfs(void)
{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d4f7fd4550e1..6829a064aac8 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -737,10 +737,9 @@ static long prepare_for_fp_mode_switch(void *unused)
/*
* This is icky, but we use this to simply ensure that all CPUs have
* context switched, regardless of whether they were previously running
- * kernel or user code. This ensures that no CPU currently has its FPU
- * enabled, or is about to attempt to enable it through any path other
- * than enable_restore_fp_context() which will wait appropriately for
- * fp_mode_switching to be zero.
+ * kernel or user code. This ensures that no CPU that a mode-switching
+ * program may execute on keeps its FPU enabled (& in the old mode)
+ * throughout the mode switch.
*/
return 0;
}
@@ -829,8 +828,6 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
put_online_cpus();
- wake_up_var(&task->mm->context.fp_mode_switching);
-
return 0;
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index e5ba56c01ee0..ea54575255ea 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -50,25 +50,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
-static void init_fp_ctx(struct task_struct *target)
-{
- /* If FP has been used then the target already has context */
- if (tsk_used_math(target))
- return;
-
- /* Begin with data registers set to all 1s... */
- memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
-
- /* FCSR has been preset by `mips_set_personality_nan'. */
-
- /*
- * Record that the target has "used" math, such that the context
- * just initialised, and any modifications made by the caller,
- * aren't discarded.
- */
- set_stopped_child_used_math(target);
-}
-
/*
* Called by kernel/ptrace.c when detaching..
*
@@ -81,21 +62,6 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Set the Cause bits even
- * if a corresponding Enable bit is set. This will be noticed at
- * the time the thread is switched to and SIGFPE thrown accordingly.
- */
-static void ptrace_setfcr31(struct task_struct *child, u32 value)
-{
- u32 fcr31;
- u32 mask;
-
- fcr31 = child->thread.fpu.fcr31;
- mask = boot_cpu_data.fpu_msk31;
- child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
-}
-
-/*
* Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
@@ -151,55 +117,6 @@ int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
return 0;
}
-int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
-{
- int i;
-
- if (!access_ok(VERIFY_WRITE, data, 33 * 8))
- return -EIO;
-
- if (tsk_used_math(child)) {
- union fpureg *fregs = get_fpu_regs(child);
- for (i = 0; i < 32; i++)
- __put_user(get_fpr64(&fregs[i], 0),
- i + (__u64 __user *)data);
- } else {
- for (i = 0; i < 32; i++)
- __put_user((__u64) -1, i + (__u64 __user *) data);
- }
-
- __put_user(child->thread.fpu.fcr31, data + 64);
- __put_user(boot_cpu_data.fpu_id, data + 65);
-
- return 0;
-}
-
-int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
-{
- union fpureg *fregs;
- u64 fpr_val;
- u32 value;
- int i;
-
- if (!access_ok(VERIFY_READ, data, 33 * 8))
- return -EIO;
-
- init_fp_ctx(child);
- fregs = get_fpu_regs(child);
-
- for (i = 0; i < 32; i++) {
- __get_user(fpr_val, i + (__u64 __user *)data);
- set_fpr64(&fregs[i], 0, fpr_val);
- }
-
- __get_user(value, data + 64);
- ptrace_setfcr31(child, value);
-
- /* FIR may not be written. */
-
- return 0;
-}
-
int ptrace_get_watch_regs(struct task_struct *child,
struct pt_watch_regs __user *addr)
{
@@ -420,6 +337,73 @@ static int gpr64_set(struct task_struct *target,
#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
+ */
+static void ptrace_setfcr31(struct task_struct *child, u32 value)
+{
+ u32 fcr31;
+ u32 mask;
+
+ fcr31 = child->thread.fpu.fcr31;
+ mask = boot_cpu_data.fpu_msk31;
+ child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+}
+
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
+{
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, data, 33 * 8))
+ return -EIO;
+
+ if (tsk_used_math(child)) {
+ union fpureg *fregs = get_fpu_regs(child);
+ for (i = 0; i < 32; i++)
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
+ } else {
+ for (i = 0; i < 32; i++)
+ __put_user((__u64) -1, i + (__u64 __user *) data);
+ }
+
+ __put_user(child->thread.fpu.fcr31, data + 64);
+ __put_user(boot_cpu_data.fpu_id, data + 65);
+
+ return 0;
+}
+
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+{
+ union fpureg *fregs;
+ u64 fpr_val;
+ u32 value;
+ int i;
+
+ if (!access_ok(VERIFY_READ, data, 33 * 8))
+ return -EIO;
+
+ init_fp_ctx(child);
+ fregs = get_fpu_regs(child);
+
+ for (i = 0; i < 32; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
+
+ __get_user(value, data + 64);
+ ptrace_setfcr31(child, value);
+
+ /* FIR may not be written. */
+
+ return 0;
+}
+
/*
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
* !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
@@ -590,6 +574,178 @@ static int fpr_set(struct task_struct *target,
return err;
}
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
+static int fp_mode_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int fp_mode;
+
+ fp_mode = mips_get_process_fp_mode(target);
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'. We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int fp_mode;
+ int err;
+
+ BUG_ON(count % sizeof(int));
+
+ if (pos + count > sizeof(fp_mode))
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+ if (err)
+ return err;
+
+ if (count > 0)
+ err = mips_set_process_fp_mode(target, fp_mode);
+
+ return err;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+struct msa_control_regs {
+ unsigned int fir;
+ unsigned int fcsr;
+ unsigned int msair;
+ unsigned int msacsr;
+};
+
+static int copy_pad_fprs(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int *ppos, unsigned int *pcount,
+ void **pkbuf, void __user **pubuf,
+ unsigned int live_sz)
+{
+ int i, j, start, start_pad, err;
+ unsigned long long fill = ~0ull;
+ unsigned int cp_sz, pad_sz;
+
+ cp_sz = min(regset->size, live_sz);
+ pad_sz = regset->size - cp_sz;
+ WARN_ON(pad_sz % sizeof(fill));
+
+ i = start = err = 0;
+ for (; i < NUM_FPU_REGS; i++, start += regset->size) {
+ err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
+ &target->thread.fpu.fpr[i],
+ start, start + cp_sz);
+
+ start_pad = start + cp_sz;
+ for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
+ err |= user_regset_copyout(ppos, pcount, pkbuf, pubuf,
+ &fill, start_pad,
+ start_pad + sizeof(fill));
+ start_pad += sizeof(fill);
+ }
+ }
+
+ return err;
+}
+
+static int msa_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ const struct msa_control_regs ctrl_regs = {
+ .fir = boot_cpu_data.fpu_id,
+ .fcsr = target->thread.fpu.fcr31,
+ .msair = boot_cpu_data.msa_id,
+ .msacsr = target->thread.fpu.msacsr,
+ };
+ int err;
+
+ if (!tsk_used_math(target)) {
+ /* The task hasn't used FP or MSA, fill with 0xff */
+ err = copy_pad_fprs(target, regset, &pos, &count,
+ &kbuf, &ubuf, 0);
+ } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
+ /* Copy scalar FP context, fill the rest with 0xff */
+ err = copy_pad_fprs(target, regset, &pos, &count,
+ &kbuf, &ubuf, 8);
+ } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr,
+ 0, wr_size);
+ } else {
+ /* Copy as much context as possible, fill the rest with 0xff */
+ err = copy_pad_fprs(target, regset, &pos, &count,
+ &kbuf, &ubuf,
+ sizeof(target->thread.fpu.fpr[0]));
+ }
+
+ err |= user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ &ctrl_regs, wr_size,
+ wr_size + sizeof(ctrl_regs));
+ return err;
+}
+
+static int msa_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ struct msa_control_regs ctrl_regs;
+ unsigned int cp_sz;
+ int i, err, start;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr,
+ 0, wr_size);
+ } else {
+ /* Copy as much context as possible */
+ cp_sz = min_t(unsigned int, regset->size,
+ sizeof(target->thread.fpu.fpr[0]));
+
+ i = start = err = 0;
+ for (; i < NUM_FPU_REGS; i++, start += regset->size) {
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr[i],
+ start, start + cp_sz);
+ }
+ }
+
+ if (!err)
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
+ wr_size, wr_size + sizeof(ctrl_regs));
+ if (!err) {
+ target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
+ target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
+ }
+
+ return err;
+}
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
/*
@@ -759,57 +915,16 @@ static int dsp_active(struct task_struct *target,
return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
}
-/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
-static int fp_mode_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int fp_mode;
-
- fp_mode = mips_get_process_fp_mode(target);
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
- sizeof(fp_mode));
-}
-
-/*
- * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
- *
- * We optimize for the case where `count % sizeof(int) == 0', which
- * is supposed to have been guaranteed by the kernel before calling
- * us, e.g. in `ptrace_regset'. We enforce that requirement, so
- * that we can safely avoid preinitializing temporaries for partial
- * mode writes.
- */
-static int fp_mode_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int fp_mode;
- int err;
-
- BUG_ON(count % sizeof(int));
-
- if (pos + count > sizeof(fp_mode))
- return -EIO;
-
- err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
- sizeof(fp_mode));
- if (err)
- return err;
-
- if (count > 0)
- err = mips_set_process_fp_mode(target, fp_mode);
-
- return err;
-}
-
enum mips_regset {
REGSET_GPR,
- REGSET_FPR,
REGSET_DSP,
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ REGSET_FPR,
REGSET_FP_MODE,
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ REGSET_MSA,
+#endif
};
struct pt_regs_offset {
@@ -907,14 +1022,6 @@ static const struct user_regset mips_regsets[] = {
.get = gpr32_get,
.set = gpr32_set,
},
- [REGSET_FPR] = {
- .core_note_type = NT_PRFPREG,
- .n = ELF_NFPREG,
- .size = sizeof(elf_fpreg_t),
- .align = sizeof(elf_fpreg_t),
- .get = fpr_get,
- .set = fpr_set,
- },
[REGSET_DSP] = {
.core_note_type = NT_MIPS_DSP,
.n = NUM_DSP_REGS + 1,
@@ -924,6 +1031,15 @@ static const struct user_regset mips_regsets[] = {
.set = dsp32_set,
.active = dsp_active,
},
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
[REGSET_FP_MODE] = {
.core_note_type = NT_MIPS_FP_MODE,
.n = 1,
@@ -932,6 +1048,17 @@ static const struct user_regset mips_regsets[] = {
.get = fp_mode_get,
.set = fp_mode_set,
},
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ [REGSET_MSA] = {
+ .core_note_type = NT_MIPS_MSA,
+ .n = NUM_FPU_REGS + 1,
+ .size = 16,
+ .align = 16,
+ .get = msa_get,
+ .set = msa_set,
+ },
+#endif
};
static const struct user_regset_view user_mips_view = {
@@ -955,14 +1082,6 @@ static const struct user_regset mips64_regsets[] = {
.get = gpr64_get,
.set = gpr64_set,
},
- [REGSET_FPR] = {
- .core_note_type = NT_PRFPREG,
- .n = ELF_NFPREG,
- .size = sizeof(elf_fpreg_t),
- .align = sizeof(elf_fpreg_t),
- .get = fpr_get,
- .set = fpr_set,
- },
[REGSET_DSP] = {
.core_note_type = NT_MIPS_DSP,
.n = NUM_DSP_REGS + 1,
@@ -972,6 +1091,7 @@ static const struct user_regset mips64_regsets[] = {
.set = dsp64_set,
.active = dsp_active,
},
+#ifdef CONFIG_MIPS_FP_SUPPORT
[REGSET_FP_MODE] = {
.core_note_type = NT_MIPS_FP_MODE,
.n = 1,
@@ -980,6 +1100,25 @@ static const struct user_regset mips64_regsets[] = {
.get = fp_mode_get,
.set = fp_mode_set,
},
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = fpr_get,
+ .set = fpr_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ [REGSET_MSA] = {
+ .core_note_type = NT_MIPS_MSA,
+ .n = NUM_FPU_REGS + 1,
+ .size = 16,
+ .align = 16,
+ .get = msa_get,
+ .set = msa_set,
+ },
+#endif
};
static const struct user_regset_view user_mips64_view = {
@@ -1040,7 +1179,6 @@ long arch_ptrace(struct task_struct *child, long request,
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
- union fpureg *fregs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
@@ -1050,7 +1188,10 @@ long arch_ptrace(struct task_struct *child, long request,
case 0 ... 31:
tmp = regs->regs[addr];
break;
- case FPR_BASE ... FPR_BASE + 31:
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs;
+
if (!tsk_used_math(child)) {
/* FP not yet used */
tmp = -1;
@@ -1072,6 +1213,15 @@ long arch_ptrace(struct task_struct *child, long request,
#endif
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
+ }
+ case FPC_CSR:
+ tmp = child->thread.fpu.fcr31;
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = boot_cpu_data.fpu_id;
+ break;
+#endif
case PC:
tmp = regs->cp0_epc;
break;
@@ -1092,13 +1242,6 @@ long arch_ptrace(struct task_struct *child, long request,
tmp = regs->acx;
break;
#endif
- case FPC_CSR:
- tmp = child->thread.fpu.fcr31;
- break;
- case FPC_EIR:
- /* implementation / version register */
- tmp = boot_cpu_data.fpu_id;
- break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -1149,6 +1292,7 @@ long arch_ptrace(struct task_struct *child, long request,
mips_syscall_is_indirect(child, regs))
mips_syscall_update_nr(child, regs);
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
@@ -1168,6 +1312,11 @@ long arch_ptrace(struct task_struct *child, long request,
set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
+ case FPC_CSR:
+ init_fp_ctx(child);
+ ptrace_setfcr31(child, data);
+ break;
+#endif
case PC:
regs->cp0_epc = data;
break;
@@ -1182,10 +1331,6 @@ long arch_ptrace(struct task_struct *child, long request,
regs->acx = data;
break;
#endif
- case FPC_CSR:
- init_fp_ctx(child);
- ptrace_setfcr31(child, data);
- break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -1221,6 +1366,7 @@ long arch_ptrace(struct task_struct *child, long request,
ret = ptrace_setregs(child, datavp);
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, datavp);
break;
@@ -1228,7 +1374,7 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, datavp);
break;
-
+#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datalp);
break;
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index bc348d44d151..2525eca9c962 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -82,7 +82,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
- union fpureg *fregs;
unsigned int tmp;
regs = task_pt_regs(child);
@@ -92,7 +91,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case 0 ... 31:
tmp = regs->regs[addr];
break;
- case FPR_BASE ... FPR_BASE + 31:
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs;
+
if (!tsk_used_math(child)) {
/* FP not yet used */
tmp = -1;
@@ -111,6 +113,15 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
}
tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
+ }
+ case FPC_CSR:
+ tmp = child->thread.fpu.fcr31;
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = boot_cpu_data.fpu_id;
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
case PC:
tmp = regs->cp0_epc;
break;
@@ -126,13 +137,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case MMLO:
tmp = regs->lo;
break;
- case FPC_CSR:
- tmp = child->thread.fpu.fcr31;
- break;
- case FPC_EIR:
- /* implementation / version register */
- tmp = boot_cpu_data.fpu_id;
- break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -203,6 +207,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
mips_syscall_is_indirect(child, regs))
mips_syscall_update_nr(child, regs);
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case FPR_BASE ... FPR_BASE + 31: {
union fpureg *fregs = get_fpu_regs(child);
@@ -225,6 +230,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
+ case FPC_CSR:
+ child->thread.fpu.fcr31 = data;
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
case PC:
regs->cp0_epc = data;
break;
@@ -234,9 +243,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case MMLO:
regs->lo = data;
break;
- case FPC_CSR:
- child->thread.fpu.fcr31 = data;
- break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -274,6 +280,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
(struct user_pt_regs __user *) (__u64) data);
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case PTRACE_GETFPREGS:
ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
break;
@@ -281,7 +288,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_SETFPREGS:
ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
break;
-
+#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned int __user *) (unsigned long) data);
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 3062ba66c563..12e58053544f 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -52,64 +52,6 @@ LEAF(_restore_fp)
jr ra
END(_restore_fp)
-/*
- * Load the FPU with signalling NANS. This bit pattern we're using has
- * the property that no matter whether considered as single or as double
- * precision represents signaling NANS.
- *
- * The value to initialize fcr31 to comes in $a0.
- */
-
- .set push
- SET_HARDFLOAT
-
-LEAF(_init_fpu)
- mfc0 t0, CP0_STATUS
- li t1, ST0_CU1
- or t0, t1
- mtc0 t0, CP0_STATUS
-
- ctc1 a0, fcr31
-
- li t0, -1
-
- mtc1 t0, $f0
- mtc1 t0, $f1
- mtc1 t0, $f2
- mtc1 t0, $f3
- mtc1 t0, $f4
- mtc1 t0, $f5
- mtc1 t0, $f6
- mtc1 t0, $f7
- mtc1 t0, $f8
- mtc1 t0, $f9
- mtc1 t0, $f10
- mtc1 t0, $f11
- mtc1 t0, $f12
- mtc1 t0, $f13
- mtc1 t0, $f14
- mtc1 t0, $f15
- mtc1 t0, $f16
- mtc1 t0, $f17
- mtc1 t0, $f18
- mtc1 t0, $f19
- mtc1 t0, $f20
- mtc1 t0, $f21
- mtc1 t0, $f22
- mtc1 t0, $f23
- mtc1 t0, $f24
- mtc1 t0, $f25
- mtc1 t0, $f26
- mtc1 t0, $f27
- mtc1 t0, $f28
- mtc1 t0, $f29
- mtc1 t0, $f30
- mtc1 t0, $f31
- jr ra
- END(_init_fpu)
-
- .set pop
-
.set noreorder
/**
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 8e3a6020c613..59be5c812aa2 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -86,150 +86,6 @@ LEAF(_init_msa_upper)
#endif
-/*
- * Load the FPU with signalling NANS. This bit pattern we're using has
- * the property that no matter whether considered as single or as double
- * precision represents signaling NANS.
- *
- * The value to initialize fcr31 to comes in $a0.
- */
-
- .set push
- SET_HARDFLOAT
-
-LEAF(_init_fpu)
- mfc0 t0, CP0_STATUS
- li t1, ST0_CU1
- or t0, t1
- mtc0 t0, CP0_STATUS
- enable_fpu_hazard
-
- ctc1 a0, fcr31
-
- li t1, -1 # SNaN
-
-#ifdef CONFIG_64BIT
- sll t0, t0, 5
- bgez t0, 1f # 16 / 32 register mode?
-
- dmtc1 t1, $f1
- dmtc1 t1, $f3
- dmtc1 t1, $f5
- dmtc1 t1, $f7
- dmtc1 t1, $f9
- dmtc1 t1, $f11
- dmtc1 t1, $f13
- dmtc1 t1, $f15
- dmtc1 t1, $f17
- dmtc1 t1, $f19
- dmtc1 t1, $f21
- dmtc1 t1, $f23
- dmtc1 t1, $f25
- dmtc1 t1, $f27
- dmtc1 t1, $f29
- dmtc1 t1, $f31
-1:
-#endif
-
-#ifdef CONFIG_CPU_MIPS32
- mtc1 t1, $f0
- mtc1 t1, $f1
- mtc1 t1, $f2
- mtc1 t1, $f3
- mtc1 t1, $f4
- mtc1 t1, $f5
- mtc1 t1, $f6
- mtc1 t1, $f7
- mtc1 t1, $f8
- mtc1 t1, $f9
- mtc1 t1, $f10
- mtc1 t1, $f11
- mtc1 t1, $f12
- mtc1 t1, $f13
- mtc1 t1, $f14
- mtc1 t1, $f15
- mtc1 t1, $f16
- mtc1 t1, $f17
- mtc1 t1, $f18
- mtc1 t1, $f19
- mtc1 t1, $f20
- mtc1 t1, $f21
- mtc1 t1, $f22
- mtc1 t1, $f23
- mtc1 t1, $f24
- mtc1 t1, $f25
- mtc1 t1, $f26
- mtc1 t1, $f27
- mtc1 t1, $f28
- mtc1 t1, $f29
- mtc1 t1, $f30
- mtc1 t1, $f31
-
-#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
- .set push
- .set MIPS_ISA_LEVEL_RAW
- .set fp=64
- sll t0, t0, 5 # is Status.FR set?
- bgez t0, 1f # no: skip setting upper 32b
-
- mthc1 t1, $f0
- mthc1 t1, $f1
- mthc1 t1, $f2
- mthc1 t1, $f3
- mthc1 t1, $f4
- mthc1 t1, $f5
- mthc1 t1, $f6
- mthc1 t1, $f7
- mthc1 t1, $f8
- mthc1 t1, $f9
- mthc1 t1, $f10
- mthc1 t1, $f11
- mthc1 t1, $f12
- mthc1 t1, $f13
- mthc1 t1, $f14
- mthc1 t1, $f15
- mthc1 t1, $f16
- mthc1 t1, $f17
- mthc1 t1, $f18
- mthc1 t1, $f19
- mthc1 t1, $f20
- mthc1 t1, $f21
- mthc1 t1, $f22
- mthc1 t1, $f23
- mthc1 t1, $f24
- mthc1 t1, $f25
- mthc1 t1, $f26
- mthc1 t1, $f27
- mthc1 t1, $f28
- mthc1 t1, $f29
- mthc1 t1, $f30
- mthc1 t1, $f31
-1: .set pop
-#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
-#else
- .set MIPS_ISA_ARCH_LEVEL_RAW
- dmtc1 t1, $f0
- dmtc1 t1, $f2
- dmtc1 t1, $f4
- dmtc1 t1, $f6
- dmtc1 t1, $f8
- dmtc1 t1, $f10
- dmtc1 t1, $f12
- dmtc1 t1, $f14
- dmtc1 t1, $f16
- dmtc1 t1, $f18
- dmtc1 t1, $f20
- dmtc1 t1, $f22
- dmtc1 t1, $f24
- dmtc1 t1, $f26
- dmtc1 t1, $f28
- dmtc1 t1, $f30
-#endif
- jr ra
- END(_init_fpu)
-
- .set pop /* SET_HARDFLOAT */
-
.set noreorder
/**
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 91d3c8c46097..d9434cd0f568 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -22,9 +22,6 @@
#include <asm/war.h>
#include <asm/asm-offsets.h>
-/* Highest syscall used of any syscall flavour */
-#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
-
.align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
@@ -89,7 +86,7 @@ loads_done:
bnez t0, syscall_trace_entry # -> yes
syscall_common:
subu v0, v0, __NR_O32_Linux # check syscall number
- sltiu t0, v0, __NR_O32_Linux_syscalls + 1
+ sltiu t0, v0, __NR_O32_Linux_syscalls
beqz t0, illegal_syscall
sll t0, v0, 2
@@ -185,7 +182,7 @@ illegal_syscall:
LEAF(sys_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
- sltiu v0, t0, __NR_O32_Linux_syscalls + 1
+ sltiu v0, t0, __NR_O32_Linux_syscalls
beqz t0, einval # do not recurse
sll t1, t0, 2
beqz v0, einval
@@ -208,248 +205,6 @@ einval: li v0, -ENOSYS
jr ra
END(sys_syscall)
- .align 2
- .type sys_call_table, @object
-EXPORT(sys_call_table)
- PTR sys_syscall /* 4000 */
- PTR sys_exit
- PTR __sys_fork
- PTR sys_read
- PTR sys_write
- PTR sys_open /* 4005 */
- PTR sys_close
- PTR sys_waitpid
- PTR sys_creat
- PTR sys_link
- PTR sys_unlink /* 4010 */
- PTR sys_execve
- PTR sys_chdir
- PTR sys_time
- PTR sys_mknod
- PTR sys_chmod /* 4015 */
- PTR sys_lchown
- PTR sys_ni_syscall
- PTR sys_ni_syscall /* was sys_stat */
- PTR sys_lseek
- PTR sys_getpid /* 4020 */
- PTR sys_mount
- PTR sys_oldumount
- PTR sys_setuid
- PTR sys_getuid
- PTR sys_stime /* 4025 */
- PTR sys_ptrace
- PTR sys_alarm
- PTR sys_ni_syscall /* was sys_fstat */
- PTR sys_pause
- PTR sys_utime /* 4030 */
- PTR sys_ni_syscall
- PTR sys_ni_syscall
- PTR sys_access
- PTR sys_nice
- PTR sys_ni_syscall /* 4035 */
- PTR sys_sync
- PTR sys_kill
- PTR sys_rename
- PTR sys_mkdir
- PTR sys_rmdir /* 4040 */
- PTR sys_dup
- PTR sysm_pipe
- PTR sys_times
- PTR sys_ni_syscall
- PTR sys_brk /* 4045 */
- PTR sys_setgid
- PTR sys_getgid
- PTR sys_ni_syscall /* was signal(2) */
- PTR sys_geteuid
- PTR sys_getegid /* 4050 */
- PTR sys_acct
- PTR sys_umount
- PTR sys_ni_syscall
- PTR sys_ioctl
- PTR sys_fcntl /* 4055 */
- PTR sys_ni_syscall
- PTR sys_setpgid
- PTR sys_ni_syscall
- PTR sys_olduname
- PTR sys_umask /* 4060 */
- PTR sys_chroot
- PTR sys_ustat
- PTR sys_dup2
- PTR sys_getppid
- PTR sys_getpgrp /* 4065 */
- PTR sys_setsid
- PTR sys_sigaction
- PTR sys_sgetmask
- PTR sys_ssetmask
- PTR sys_setreuid /* 4070 */
- PTR sys_setregid
- PTR sys_sigsuspend
- PTR sys_sigpending
- PTR sys_sethostname
- PTR sys_setrlimit /* 4075 */
- PTR sys_getrlimit
- PTR sys_getrusage
- PTR sys_gettimeofday
- PTR sys_settimeofday
- PTR sys_getgroups /* 4080 */
- PTR sys_setgroups
- PTR sys_ni_syscall /* old_select */
- PTR sys_symlink
- PTR sys_ni_syscall /* was sys_lstat */
- PTR sys_readlink /* 4085 */
- PTR sys_uselib
- PTR sys_swapon
- PTR sys_reboot
- PTR sys_old_readdir
- PTR sys_mips_mmap /* 4090 */
- PTR sys_munmap
- PTR sys_truncate
- PTR sys_ftruncate
- PTR sys_fchmod
- PTR sys_fchown /* 4095 */
- PTR sys_getpriority
- PTR sys_setpriority
- PTR sys_ni_syscall
- PTR sys_statfs
- PTR sys_fstatfs /* 4100 */
- PTR sys_ni_syscall /* was ioperm(2) */
- PTR sys_socketcall
- PTR sys_syslog
- PTR sys_setitimer
- PTR sys_getitimer /* 4105 */
- PTR sys_newstat
- PTR sys_newlstat
- PTR sys_newfstat
- PTR sys_uname
- PTR sys_ni_syscall /* 4110 was iopl(2) */
- PTR sys_vhangup
- PTR sys_ni_syscall /* was sys_idle() */
- PTR sys_ni_syscall /* was sys_vm86 */
- PTR sys_wait4
- PTR sys_swapoff /* 4115 */
- PTR sys_sysinfo
- PTR sys_ipc
- PTR sys_fsync
- PTR sys_sigreturn
- PTR __sys_clone /* 4120 */
- PTR sys_setdomainname
- PTR sys_newuname
- PTR sys_ni_syscall /* sys_modify_ldt */
- PTR sys_adjtimex
- PTR sys_mprotect /* 4125 */
- PTR sys_sigprocmask
- PTR sys_ni_syscall /* was create_module */
- PTR sys_init_module
- PTR sys_delete_module
- PTR sys_ni_syscall /* 4130 was get_kernel_syms */
- PTR sys_quotactl
- PTR sys_getpgid
- PTR sys_fchdir
- PTR sys_bdflush
- PTR sys_sysfs /* 4135 */
- PTR sys_personality
- PTR sys_ni_syscall /* for afs_syscall */
- PTR sys_setfsuid
- PTR sys_setfsgid
- PTR sys_llseek /* 4140 */
- PTR sys_getdents
- PTR sys_select
- PTR sys_flock
- PTR sys_msync
- PTR sys_readv /* 4145 */
- PTR sys_writev
- PTR sys_cacheflush
- PTR sys_cachectl
- PTR __sys_sysmips
- PTR sys_ni_syscall /* 4150 */
- PTR sys_getsid
- PTR sys_fdatasync
- PTR sys_sysctl
- PTR sys_mlock
- PTR sys_munlock /* 4155 */
- PTR sys_mlockall
- PTR sys_munlockall
- PTR sys_sched_setparam
- PTR sys_sched_getparam
- PTR sys_sched_setscheduler /* 4160 */
- PTR sys_sched_getscheduler
- PTR sys_sched_yield
- PTR sys_sched_get_priority_max
- PTR sys_sched_get_priority_min
- PTR sys_sched_rr_get_interval /* 4165 */
- PTR sys_nanosleep
- PTR sys_mremap
- PTR sys_accept
- PTR sys_bind
- PTR sys_connect /* 4170 */
- PTR sys_getpeername
- PTR sys_getsockname
- PTR sys_getsockopt
- PTR sys_listen
- PTR sys_recv /* 4175 */
- PTR sys_recvfrom
- PTR sys_recvmsg
- PTR sys_send
- PTR sys_sendmsg
- PTR sys_sendto /* 4180 */
- PTR sys_setsockopt
- PTR sys_shutdown
- PTR sys_socket
- PTR sys_socketpair
- PTR sys_setresuid /* 4185 */
- PTR sys_getresuid
- PTR sys_ni_syscall /* was sys_query_module */
- PTR sys_poll
- PTR sys_ni_syscall /* was nfsservctl */
- PTR sys_setresgid /* 4190 */
- PTR sys_getresgid
- PTR sys_prctl
- PTR sys_rt_sigreturn
- PTR sys_rt_sigaction
- PTR sys_rt_sigprocmask /* 4195 */
- PTR sys_rt_sigpending
- PTR sys_rt_sigtimedwait
- PTR sys_rt_sigqueueinfo
- PTR sys_rt_sigsuspend
- PTR sys_pread64 /* 4200 */
- PTR sys_pwrite64
- PTR sys_chown
- PTR sys_getcwd
- PTR sys_capget
- PTR sys_capset /* 4205 */
- PTR sys_sigaltstack
- PTR sys_sendfile
- PTR sys_ni_syscall
- PTR sys_ni_syscall
- PTR sys_mips_mmap2 /* 4210 */
- PTR sys_truncate64
- PTR sys_ftruncate64
- PTR sys_stat64
- PTR sys_lstat64
- PTR sys_fstat64 /* 4215 */
- PTR sys_pivot_root
- PTR sys_mincore
- PTR sys_madvise
- PTR sys_getdents64
- PTR sys_fcntl64 /* 4220 */
- PTR sys_ni_syscall
- PTR sys_gettid
- PTR sys_readahead
- PTR sys_setxattr
- PTR sys_lsetxattr /* 4225 */
- PTR sys_fsetxattr
- PTR sys_getxattr
- PTR sys_lgetxattr
- PTR sys_fgetxattr
- PTR sys_listxattr /* 4230 */
- PTR sys_llistxattr
- PTR sys_flistxattr
- PTR sys_removexattr
- PTR sys_lremovexattr
- PTR sys_fremovexattr /* 4235 */
- PTR sys_tkill
- PTR sys_sendfile64
- PTR sys_futex
#ifdef CONFIG_MIPS_MT_FPAFF
/*
* For FPU affinity scheduling on MIPS MT processors, we need to
@@ -458,137 +213,13 @@ EXPORT(sys_call_table)
* these hooks for the 32-bit kernel - there is no MIPS64 MT processor
* atm.
*/
- PTR mipsmt_sys_sched_setaffinity
- PTR mipsmt_sys_sched_getaffinity
-#else
- PTR sys_sched_setaffinity
- PTR sys_sched_getaffinity /* 4240 */
+#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity
+#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity
#endif /* CONFIG_MIPS_MT_FPAFF */
- PTR sys_io_setup
- PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
- PTR sys_io_cancel /* 4245 */
- PTR sys_exit_group
- PTR sys_lookup_dcookie
- PTR sys_epoll_create
- PTR sys_epoll_ctl
- PTR sys_epoll_wait /* 4250 */
- PTR sys_remap_file_pages
- PTR sys_set_tid_address
- PTR sys_restart_syscall
- PTR sys_fadvise64_64
- PTR sys_statfs64 /* 4255 */
- PTR sys_fstatfs64
- PTR sys_timer_create
- PTR sys_timer_settime
- PTR sys_timer_gettime
- PTR sys_timer_getoverrun /* 4260 */
- PTR sys_timer_delete
- PTR sys_clock_settime
- PTR sys_clock_gettime
- PTR sys_clock_getres
- PTR sys_clock_nanosleep /* 4265 */
- PTR sys_tgkill
- PTR sys_utimes
- PTR sys_mbind
- PTR sys_get_mempolicy
- PTR sys_set_mempolicy /* 4270 */
- PTR sys_mq_open
- PTR sys_mq_unlink
- PTR sys_mq_timedsend
- PTR sys_mq_timedreceive
- PTR sys_mq_notify /* 4275 */
- PTR sys_mq_getsetattr
- PTR sys_ni_syscall /* sys_vserver */
- PTR sys_waitid
- PTR sys_ni_syscall /* available, was setaltroot */
- PTR sys_add_key /* 4280 */
- PTR sys_request_key
- PTR sys_keyctl
- PTR sys_set_thread_area
- PTR sys_inotify_init
- PTR sys_inotify_add_watch /* 4285 */
- PTR sys_inotify_rm_watch
- PTR sys_migrate_pages
- PTR sys_openat
- PTR sys_mkdirat
- PTR sys_mknodat /* 4290 */
- PTR sys_fchownat
- PTR sys_futimesat
- PTR sys_fstatat64
- PTR sys_unlinkat
- PTR sys_renameat /* 4295 */
- PTR sys_linkat
- PTR sys_symlinkat
- PTR sys_readlinkat
- PTR sys_fchmodat
- PTR sys_faccessat /* 4300 */
- PTR sys_pselect6
- PTR sys_ppoll
- PTR sys_unshare
- PTR sys_splice
- PTR sys_sync_file_range /* 4305 */
- PTR sys_tee
- PTR sys_vmsplice
- PTR sys_move_pages
- PTR sys_set_robust_list
- PTR sys_get_robust_list /* 4310 */
- PTR sys_kexec_load
- PTR sys_getcpu
- PTR sys_epoll_pwait
- PTR sys_ioprio_set
- PTR sys_ioprio_get /* 4315 */
- PTR sys_utimensat
- PTR sys_signalfd
- PTR sys_ni_syscall /* was timerfd */
- PTR sys_eventfd
- PTR sys_fallocate /* 4320 */
- PTR sys_timerfd_create
- PTR sys_timerfd_gettime
- PTR sys_timerfd_settime
- PTR sys_signalfd4
- PTR sys_eventfd2 /* 4325 */
- PTR sys_epoll_create1
- PTR sys_dup3
- PTR sys_pipe2
- PTR sys_inotify_init1
- PTR sys_preadv /* 4330 */
- PTR sys_pwritev
- PTR sys_rt_tgsigqueueinfo
- PTR sys_perf_event_open
- PTR sys_accept4
- PTR sys_recvmmsg /* 4335 */
- PTR sys_fanotify_init
- PTR sys_fanotify_mark
- PTR sys_prlimit64
- PTR sys_name_to_handle_at
- PTR sys_open_by_handle_at /* 4340 */
- PTR sys_clock_adjtime
- PTR sys_syncfs
- PTR sys_sendmmsg
- PTR sys_setns
- PTR sys_process_vm_readv /* 4345 */
- PTR sys_process_vm_writev
- PTR sys_kcmp
- PTR sys_finit_module
- PTR sys_sched_setattr
- PTR sys_sched_getattr /* 4350 */
- PTR sys_renameat2
- PTR sys_seccomp
- PTR sys_getrandom
- PTR sys_memfd_create
- PTR sys_bpf /* 4355 */
- PTR sys_execveat
- PTR sys_userfaultfd
- PTR sys_membarrier
- PTR sys_mlock2
- PTR sys_copy_file_range /* 4360 */
- PTR sys_preadv2
- PTR sys_pwritev2
- PTR sys_pkey_mprotect
- PTR sys_pkey_alloc
- PTR sys_pkey_free /* 4365 */
- PTR sys_statx
- PTR sys_rseq
- PTR sys_io_pgetevents
+
+#define __SYSCALL(nr, entry, nargs) PTR entry
+ .align 2
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+#include <asm/syscall_table_32_o32.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
deleted file mode 100644
index 358d9599983d..000000000000
--- a/arch/mips/kernel/scall64-64.S
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
- * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
- * Copyright (C) 2001 MIPS Technologies, Inc.
- */
-#include <linux/errno.h>
-#include <asm/asm.h>
-#include <asm/asmmacro.h>
-#include <asm/irqflags.h>
-#include <asm/mipsregs.h>
-#include <asm/regdef.h>
-#include <asm/stackframe.h>
-#include <asm/asm-offsets.h>
-#include <asm/sysmips.h>
-#include <asm/thread_info.h>
-#include <asm/unistd.h>
-#include <asm/war.h>
-
-#ifndef CONFIG_BINFMT_ELF32
-/* Neither O32 nor N32, so define handle_sys here */
-#define handle_sys64 handle_sys
-#endif
-
- .align 5
-NESTED(handle_sys64, PT_SIZE, sp)
-#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
- /*
- * When 32-bit compatibility is configured scall_o32.S
- * already did this.
- */
- .set noat
- SAVE_SOME
- TRACE_IRQS_ON_RELOAD
- STI
- .set at
-#endif
-
-#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
- ld t1, PT_EPC(sp) # skip syscall on return
- daddiu t1, 4 # skip to next instruction
- sd t1, PT_EPC(sp)
-#endif
-
- sd a3, PT_R26(sp) # save a3 for syscall restarting
-
- li t1, _TIF_WORK_SYSCALL_ENTRY
- LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
- and t0, t1, t0
- bnez t0, syscall_trace_entry
-
-syscall_common:
- dsubu t2, v0, __NR_64_Linux
- sltiu t0, t2, __NR_64_Linux_syscalls + 1
- beqz t0, illegal_syscall
-
- dsll t0, t2, 3 # offset into table
- dla t2, sys_call_table
- daddu t0, t2, t0
- ld t2, (t0) # syscall routine
- beqz t2, illegal_syscall
-
- jalr t2 # Do The Real Thing (TM)
-
- li t0, -EMAXERRNO - 1 # error?
- sltu t0, t0, v0
- sd t0, PT_R7(sp) # set error flag
- beqz t0, 1f
-
- ld t1, PT_R2(sp) # syscall number
- dnegu v0 # error
- sd t1, PT_R0(sp) # save it for syscall restarting
-1: sd v0, PT_R2(sp) # result
-
-n64_syscall_exit:
- j syscall_exit_partial
-
-/* ------------------------------------------------------------------------ */
-
-syscall_trace_entry:
- SAVE_STATIC
- move a0, sp
- move a1, v0
- jal syscall_trace_enter
-
- bltz v0, 1f # seccomp failed? Skip syscall
-
- RESTORE_STATIC
- ld v0, PT_R2(sp) # Restore syscall (maybe modified)
- ld a0, PT_R4(sp) # Restore argument registers
- ld a1, PT_R5(sp)
- ld a2, PT_R6(sp)
- ld a3, PT_R7(sp)
- ld a4, PT_R8(sp)
- ld a5, PT_R9(sp)
- j syscall_common
-
-1: j syscall_exit
-
-illegal_syscall:
- /* This also isn't a 64-bit syscall, throw an error. */
- li v0, ENOSYS # error
- sd v0, PT_R2(sp)
- li t0, 1 # set error flag
- sd t0, PT_R7(sp)
- j n64_syscall_exit
- END(handle_sys64)
-
- .align 3
- .type sys_call_table, @object
-EXPORT(sys_call_table)
- PTR sys_read /* 5000 */
- PTR sys_write
- PTR sys_open
- PTR sys_close
- PTR sys_newstat
- PTR sys_newfstat /* 5005 */
- PTR sys_newlstat
- PTR sys_poll
- PTR sys_lseek
- PTR sys_mips_mmap
- PTR sys_mprotect /* 5010 */
- PTR sys_munmap
- PTR sys_brk
- PTR sys_rt_sigaction
- PTR sys_rt_sigprocmask
- PTR sys_ioctl /* 5015 */
- PTR sys_pread64
- PTR sys_pwrite64
- PTR sys_readv
- PTR sys_writev
- PTR sys_access /* 5020 */
- PTR sysm_pipe
- PTR sys_select
- PTR sys_sched_yield
- PTR sys_mremap
- PTR sys_msync /* 5025 */
- PTR sys_mincore
- PTR sys_madvise
- PTR sys_shmget
- PTR sys_shmat
- PTR sys_shmctl /* 5030 */
- PTR sys_dup
- PTR sys_dup2
- PTR sys_pause
- PTR sys_nanosleep
- PTR sys_getitimer /* 5035 */
- PTR sys_setitimer
- PTR sys_alarm
- PTR sys_getpid
- PTR sys_sendfile64
- PTR sys_socket /* 5040 */
- PTR sys_connect
- PTR sys_accept
- PTR sys_sendto
- PTR sys_recvfrom
- PTR sys_sendmsg /* 5045 */
- PTR sys_recvmsg
- PTR sys_shutdown
- PTR sys_bind
- PTR sys_listen
- PTR sys_getsockname /* 5050 */
- PTR sys_getpeername
- PTR sys_socketpair
- PTR sys_setsockopt
- PTR sys_getsockopt
- PTR __sys_clone /* 5055 */
- PTR __sys_fork
- PTR sys_execve
- PTR sys_exit
- PTR sys_wait4
- PTR sys_kill /* 5060 */
- PTR sys_newuname
- PTR sys_semget
- PTR sys_semop
- PTR sys_semctl
- PTR sys_shmdt /* 5065 */
- PTR sys_msgget
- PTR sys_msgsnd
- PTR sys_msgrcv
- PTR sys_msgctl
- PTR sys_fcntl /* 5070 */
- PTR sys_flock
- PTR sys_fsync
- PTR sys_fdatasync
- PTR sys_truncate
- PTR sys_ftruncate /* 5075 */
- PTR sys_getdents
- PTR sys_getcwd
- PTR sys_chdir
- PTR sys_fchdir
- PTR sys_rename /* 5080 */
- PTR sys_mkdir
- PTR sys_rmdir
- PTR sys_creat
- PTR sys_link
- PTR sys_unlink /* 5085 */
- PTR sys_symlink
- PTR sys_readlink
- PTR sys_chmod
- PTR sys_fchmod
- PTR sys_chown /* 5090 */
- PTR sys_fchown
- PTR sys_lchown
- PTR sys_umask
- PTR sys_gettimeofday
- PTR sys_getrlimit /* 5095 */
- PTR sys_getrusage
- PTR sys_sysinfo
- PTR sys_times
- PTR sys_ptrace
- PTR sys_getuid /* 5100 */
- PTR sys_syslog
- PTR sys_getgid
- PTR sys_setuid
- PTR sys_setgid
- PTR sys_geteuid /* 5105 */
- PTR sys_getegid
- PTR sys_setpgid
- PTR sys_getppid
- PTR sys_getpgrp
- PTR sys_setsid /* 5110 */
- PTR sys_setreuid
- PTR sys_setregid
- PTR sys_getgroups
- PTR sys_setgroups
- PTR sys_setresuid /* 5115 */
- PTR sys_getresuid
- PTR sys_setresgid
- PTR sys_getresgid
- PTR sys_getpgid
- PTR sys_setfsuid /* 5120 */
- PTR sys_setfsgid
- PTR sys_getsid
- PTR sys_capget
- PTR sys_capset
- PTR sys_rt_sigpending /* 5125 */
- PTR sys_rt_sigtimedwait
- PTR sys_rt_sigqueueinfo
- PTR sys_rt_sigsuspend
- PTR sys_sigaltstack
- PTR sys_utime /* 5130 */
- PTR sys_mknod
- PTR sys_personality
- PTR sys_ustat
- PTR sys_statfs
- PTR sys_fstatfs /* 5135 */
- PTR sys_sysfs
- PTR sys_getpriority
- PTR sys_setpriority
- PTR sys_sched_setparam
- PTR sys_sched_getparam /* 5140 */
- PTR sys_sched_setscheduler
- PTR sys_sched_getscheduler
- PTR sys_sched_get_priority_max
- PTR sys_sched_get_priority_min
- PTR sys_sched_rr_get_interval /* 5145 */
- PTR sys_mlock
- PTR sys_munlock
- PTR sys_mlockall
- PTR sys_munlockall
- PTR sys_vhangup /* 5150 */
- PTR sys_pivot_root
- PTR sys_sysctl
- PTR sys_prctl
- PTR sys_adjtimex
- PTR sys_setrlimit /* 5155 */
- PTR sys_chroot
- PTR sys_sync
- PTR sys_acct
- PTR sys_settimeofday
- PTR sys_mount /* 5160 */
- PTR sys_umount
- PTR sys_swapon
- PTR sys_swapoff
- PTR sys_reboot
- PTR sys_sethostname /* 5165 */
- PTR sys_setdomainname
- PTR sys_ni_syscall /* was create_module */
- PTR sys_init_module
- PTR sys_delete_module
- PTR sys_ni_syscall /* 5170, was get_kernel_syms */
- PTR sys_ni_syscall /* was query_module */
- PTR sys_quotactl
- PTR sys_ni_syscall /* was nfsservctl */
- PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 5175 for putpmsg */
- PTR sys_ni_syscall /* res. for afs_syscall */
- PTR sys_ni_syscall /* res. for security */
- PTR sys_gettid
- PTR sys_readahead
- PTR sys_setxattr /* 5180 */
- PTR sys_lsetxattr
- PTR sys_fsetxattr
- PTR sys_getxattr
- PTR sys_lgetxattr
- PTR sys_fgetxattr /* 5185 */
- PTR sys_listxattr
- PTR sys_llistxattr
- PTR sys_flistxattr
- PTR sys_removexattr
- PTR sys_lremovexattr /* 5190 */
- PTR sys_fremovexattr
- PTR sys_tkill
- PTR sys_ni_syscall
- PTR sys_futex
- PTR sys_sched_setaffinity /* 5195 */
- PTR sys_sched_getaffinity
- PTR sys_cacheflush
- PTR sys_cachectl
- PTR __sys_sysmips
- PTR sys_io_setup /* 5200 */
- PTR sys_io_destroy
- PTR sys_io_getevents
- PTR sys_io_submit
- PTR sys_io_cancel
- PTR sys_exit_group /* 5205 */
- PTR sys_lookup_dcookie
- PTR sys_epoll_create
- PTR sys_epoll_ctl
- PTR sys_epoll_wait
- PTR sys_remap_file_pages /* 5210 */
- PTR sys_rt_sigreturn
- PTR sys_set_tid_address
- PTR sys_restart_syscall
- PTR sys_semtimedop
- PTR sys_fadvise64_64 /* 5215 */
- PTR sys_timer_create
- PTR sys_timer_settime
- PTR sys_timer_gettime
- PTR sys_timer_getoverrun
- PTR sys_timer_delete /* 5220 */
- PTR sys_clock_settime
- PTR sys_clock_gettime
- PTR sys_clock_getres
- PTR sys_clock_nanosleep
- PTR sys_tgkill /* 5225 */
- PTR sys_utimes
- PTR sys_mbind
- PTR sys_get_mempolicy
- PTR sys_set_mempolicy
- PTR sys_mq_open /* 5230 */
- PTR sys_mq_unlink
- PTR sys_mq_timedsend
- PTR sys_mq_timedreceive
- PTR sys_mq_notify
- PTR sys_mq_getsetattr /* 5235 */
- PTR sys_ni_syscall /* sys_vserver */
- PTR sys_waitid
- PTR sys_ni_syscall /* available, was setaltroot */
- PTR sys_add_key
- PTR sys_request_key /* 5240 */
- PTR sys_keyctl
- PTR sys_set_thread_area
- PTR sys_inotify_init
- PTR sys_inotify_add_watch
- PTR sys_inotify_rm_watch /* 5245 */
- PTR sys_migrate_pages
- PTR sys_openat
- PTR sys_mkdirat
- PTR sys_mknodat
- PTR sys_fchownat /* 5250 */
- PTR sys_futimesat
- PTR sys_newfstatat
- PTR sys_unlinkat
- PTR sys_renameat
- PTR sys_linkat /* 5255 */
- PTR sys_symlinkat
- PTR sys_readlinkat
- PTR sys_fchmodat
- PTR sys_faccessat
- PTR sys_pselect6 /* 5260 */
- PTR sys_ppoll
- PTR sys_unshare
- PTR sys_splice
- PTR sys_sync_file_range
- PTR sys_tee /* 5265 */
- PTR sys_vmsplice
- PTR sys_move_pages
- PTR sys_set_robust_list
- PTR sys_get_robust_list
- PTR sys_kexec_load /* 5270 */
- PTR sys_getcpu
- PTR sys_epoll_pwait
- PTR sys_ioprio_set
- PTR sys_ioprio_get
- PTR sys_utimensat /* 5275 */
- PTR sys_signalfd
- PTR sys_ni_syscall /* was timerfd */
- PTR sys_eventfd
- PTR sys_fallocate
- PTR sys_timerfd_create /* 5280 */
- PTR sys_timerfd_gettime
- PTR sys_timerfd_settime
- PTR sys_signalfd4
- PTR sys_eventfd2
- PTR sys_epoll_create1 /* 5285 */
- PTR sys_dup3
- PTR sys_pipe2
- PTR sys_inotify_init1
- PTR sys_preadv
- PTR sys_pwritev /* 5290 */
- PTR sys_rt_tgsigqueueinfo
- PTR sys_perf_event_open
- PTR sys_accept4
- PTR sys_recvmmsg
- PTR sys_fanotify_init /* 5295 */
- PTR sys_fanotify_mark
- PTR sys_prlimit64
- PTR sys_name_to_handle_at
- PTR sys_open_by_handle_at
- PTR sys_clock_adjtime /* 5300 */
- PTR sys_syncfs
- PTR sys_sendmmsg
- PTR sys_setns
- PTR sys_process_vm_readv
- PTR sys_process_vm_writev /* 5305 */
- PTR sys_kcmp
- PTR sys_finit_module
- PTR sys_getdents64
- PTR sys_sched_setattr
- PTR sys_sched_getattr /* 5310 */
- PTR sys_renameat2
- PTR sys_seccomp
- PTR sys_getrandom
- PTR sys_memfd_create
- PTR sys_bpf /* 5315 */
- PTR sys_execveat
- PTR sys_userfaultfd
- PTR sys_membarrier
- PTR sys_mlock2
- PTR sys_copy_file_range /* 5320 */
- PTR sys_preadv2
- PTR sys_pwritev2
- PTR sys_pkey_mprotect
- PTR sys_pkey_alloc
- PTR sys_pkey_free /* 5325 */
- PTR sys_statx
- PTR sys_rseq
- PTR sys_io_pgetevents
- .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index c65eaacc1abf..c761ddfed9e6 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_N32_Linux # check syscall number
- sltiu t0, t0, __NR_N32_Linux_syscalls + 1
+ sltiu t0, t0, __NR_N32_Linux_syscalls
#ifndef CONFIG_MIPS32_O32
ld t1, PT_EPC(sp) # skip syscall on return
@@ -87,7 +87,7 @@ n32_syscall_trace_entry:
ld a5, PT_R9(sp)
dsubu t2, v0, __NR_N32_Linux # check (new) syscall number
- sltiu t0, t2, __NR_N32_Linux_syscalls + 1
+ sltiu t0, t2, __NR_N32_Linux_syscalls
beqz t0, not_n32_scall
j syscall_common
@@ -101,339 +101,8 @@ not_n32_scall:
END(handle_sysn32)
+#define __SYSCALL(nr, entry, nargs) PTR entry
.type sysn32_call_table, @object
EXPORT(sysn32_call_table)
- PTR sys_read /* 6000 */
- PTR sys_write
- PTR sys_open
- PTR sys_close
- PTR sys_newstat
- PTR sys_newfstat /* 6005 */
- PTR sys_newlstat
- PTR sys_poll
- PTR sys_lseek
- PTR sys_mips_mmap
- PTR sys_mprotect /* 6010 */
- PTR sys_munmap
- PTR sys_brk
- PTR compat_sys_rt_sigaction
- PTR compat_sys_rt_sigprocmask
- PTR compat_sys_ioctl /* 6015 */
- PTR sys_pread64
- PTR sys_pwrite64
- PTR compat_sys_readv
- PTR compat_sys_writev
- PTR sys_access /* 6020 */
- PTR sysm_pipe
- PTR compat_sys_select
- PTR sys_sched_yield
- PTR sys_mremap
- PTR sys_msync /* 6025 */
- PTR sys_mincore
- PTR sys_madvise
- PTR sys_shmget
- PTR sys_shmat
- PTR compat_sys_shmctl /* 6030 */
- PTR sys_dup
- PTR sys_dup2
- PTR sys_pause
- PTR compat_sys_nanosleep
- PTR compat_sys_getitimer /* 6035 */
- PTR compat_sys_setitimer
- PTR sys_alarm
- PTR sys_getpid
- PTR compat_sys_sendfile
- PTR sys_socket /* 6040 */
- PTR sys_connect
- PTR sys_accept
- PTR sys_sendto
- PTR compat_sys_recvfrom
- PTR compat_sys_sendmsg /* 6045 */
- PTR compat_sys_recvmsg
- PTR sys_shutdown
- PTR sys_bind
- PTR sys_listen
- PTR sys_getsockname /* 6050 */
- PTR sys_getpeername
- PTR sys_socketpair
- PTR compat_sys_setsockopt
- PTR compat_sys_getsockopt
- PTR __sys_clone /* 6055 */
- PTR __sys_fork
- PTR compat_sys_execve
- PTR sys_exit
- PTR compat_sys_wait4
- PTR sys_kill /* 6060 */
- PTR sys_newuname
- PTR sys_semget
- PTR sys_semop
- PTR compat_sys_semctl
- PTR sys_shmdt /* 6065 */
- PTR sys_msgget
- PTR compat_sys_msgsnd
- PTR compat_sys_msgrcv
- PTR compat_sys_msgctl
- PTR compat_sys_fcntl /* 6070 */
- PTR sys_flock
- PTR sys_fsync
- PTR sys_fdatasync
- PTR sys_truncate
- PTR sys_ftruncate /* 6075 */
- PTR compat_sys_getdents
- PTR sys_getcwd
- PTR sys_chdir
- PTR sys_fchdir
- PTR sys_rename /* 6080 */
- PTR sys_mkdir
- PTR sys_rmdir
- PTR sys_creat
- PTR sys_link
- PTR sys_unlink /* 6085 */
- PTR sys_symlink
- PTR sys_readlink
- PTR sys_chmod
- PTR sys_fchmod
- PTR sys_chown /* 6090 */
- PTR sys_fchown
- PTR sys_lchown
- PTR sys_umask
- PTR compat_sys_gettimeofday
- PTR compat_sys_getrlimit /* 6095 */
- PTR compat_sys_getrusage
- PTR compat_sys_sysinfo
- PTR compat_sys_times
- PTR compat_sys_ptrace
- PTR sys_getuid /* 6100 */
- PTR sys_syslog
- PTR sys_getgid
- PTR sys_setuid
- PTR sys_setgid
- PTR sys_geteuid /* 6105 */
- PTR sys_getegid
- PTR sys_setpgid
- PTR sys_getppid
- PTR sys_getpgrp
- PTR sys_setsid /* 6110 */
- PTR sys_setreuid
- PTR sys_setregid
- PTR sys_getgroups
- PTR sys_setgroups
- PTR sys_setresuid /* 6115 */
- PTR sys_getresuid
- PTR sys_setresgid
- PTR sys_getresgid
- PTR sys_getpgid
- PTR sys_setfsuid /* 6120 */
- PTR sys_setfsgid
- PTR sys_getsid
- PTR sys_capget
- PTR sys_capset
- PTR compat_sys_rt_sigpending /* 6125 */
- PTR compat_sys_rt_sigtimedwait
- PTR compat_sys_rt_sigqueueinfo
- PTR compat_sys_rt_sigsuspend
- PTR compat_sys_sigaltstack
- PTR compat_sys_utime /* 6130 */
- PTR sys_mknod
- PTR sys_32_personality
- PTR compat_sys_ustat
- PTR compat_sys_statfs
- PTR compat_sys_fstatfs /* 6135 */
- PTR sys_sysfs
- PTR sys_getpriority
- PTR sys_setpriority
- PTR sys_sched_setparam
- PTR sys_sched_getparam /* 6140 */
- PTR sys_sched_setscheduler
- PTR sys_sched_getscheduler
- PTR sys_sched_get_priority_max
- PTR sys_sched_get_priority_min
- PTR compat_sys_sched_rr_get_interval /* 6145 */
- PTR sys_mlock
- PTR sys_munlock
- PTR sys_mlockall
- PTR sys_munlockall
- PTR sys_vhangup /* 6150 */
- PTR sys_pivot_root
- PTR compat_sys_sysctl
- PTR sys_prctl
- PTR compat_sys_adjtimex
- PTR compat_sys_setrlimit /* 6155 */
- PTR sys_chroot
- PTR sys_sync
- PTR sys_acct
- PTR compat_sys_settimeofday
- PTR compat_sys_mount /* 6160 */
- PTR sys_umount
- PTR sys_swapon
- PTR sys_swapoff
- PTR sys_reboot
- PTR sys_sethostname /* 6165 */
- PTR sys_setdomainname
- PTR sys_ni_syscall /* was create_module */
- PTR sys_init_module
- PTR sys_delete_module
- PTR sys_ni_syscall /* 6170, was get_kernel_syms */
- PTR sys_ni_syscall /* was query_module */
- PTR sys_quotactl
- PTR sys_ni_syscall /* was nfsservctl */
- PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 6175 for putpmsg */
- PTR sys_ni_syscall /* res. for afs_syscall */
- PTR sys_ni_syscall /* res. for security */
- PTR sys_gettid
- PTR sys_readahead
- PTR sys_setxattr /* 6180 */
- PTR sys_lsetxattr
- PTR sys_fsetxattr
- PTR sys_getxattr
- PTR sys_lgetxattr
- PTR sys_fgetxattr /* 6185 */
- PTR sys_listxattr
- PTR sys_llistxattr
- PTR sys_flistxattr
- PTR sys_removexattr
- PTR sys_lremovexattr /* 6190 */
- PTR sys_fremovexattr
- PTR sys_tkill
- PTR sys_ni_syscall
- PTR compat_sys_futex
- PTR compat_sys_sched_setaffinity /* 6195 */
- PTR compat_sys_sched_getaffinity
- PTR sys_cacheflush
- PTR sys_cachectl
- PTR __sys_sysmips
- PTR compat_sys_io_setup /* 6200 */
- PTR sys_io_destroy
- PTR compat_sys_io_getevents
- PTR compat_sys_io_submit
- PTR sys_io_cancel
- PTR sys_exit_group /* 6205 */
- PTR sys_lookup_dcookie
- PTR sys_epoll_create
- PTR sys_epoll_ctl
- PTR sys_epoll_wait
- PTR sys_remap_file_pages /* 6210 */
- PTR sysn32_rt_sigreturn
- PTR compat_sys_fcntl64
- PTR sys_set_tid_address
- PTR sys_restart_syscall
- PTR compat_sys_semtimedop /* 6215 */
- PTR sys_fadvise64_64
- PTR compat_sys_statfs64
- PTR compat_sys_fstatfs64
- PTR sys_sendfile64
- PTR compat_sys_timer_create /* 6220 */
- PTR compat_sys_timer_settime
- PTR compat_sys_timer_gettime
- PTR sys_timer_getoverrun
- PTR sys_timer_delete
- PTR compat_sys_clock_settime /* 6225 */
- PTR compat_sys_clock_gettime
- PTR compat_sys_clock_getres
- PTR compat_sys_clock_nanosleep
- PTR sys_tgkill
- PTR compat_sys_utimes /* 6230 */
- PTR compat_sys_mbind
- PTR compat_sys_get_mempolicy
- PTR compat_sys_set_mempolicy
- PTR compat_sys_mq_open
- PTR sys_mq_unlink /* 6235 */
- PTR compat_sys_mq_timedsend
- PTR compat_sys_mq_timedreceive
- PTR compat_sys_mq_notify
- PTR compat_sys_mq_getsetattr
- PTR sys_ni_syscall /* 6240, sys_vserver */
- PTR compat_sys_waitid
- PTR sys_ni_syscall /* available, was setaltroot */
- PTR sys_add_key
- PTR sys_request_key
- PTR compat_sys_keyctl /* 6245 */
- PTR sys_set_thread_area
- PTR sys_inotify_init
- PTR sys_inotify_add_watch
- PTR sys_inotify_rm_watch
- PTR compat_sys_migrate_pages /* 6250 */
- PTR sys_openat
- PTR sys_mkdirat
- PTR sys_mknodat
- PTR sys_fchownat
- PTR compat_sys_futimesat /* 6255 */
- PTR sys_newfstatat
- PTR sys_unlinkat
- PTR sys_renameat
- PTR sys_linkat
- PTR sys_symlinkat /* 6260 */
- PTR sys_readlinkat
- PTR sys_fchmodat
- PTR sys_faccessat
- PTR compat_sys_pselect6
- PTR compat_sys_ppoll /* 6265 */
- PTR sys_unshare
- PTR sys_splice
- PTR sys_sync_file_range
- PTR sys_tee
- PTR compat_sys_vmsplice /* 6270 */
- PTR compat_sys_move_pages
- PTR compat_sys_set_robust_list
- PTR compat_sys_get_robust_list
- PTR compat_sys_kexec_load
- PTR sys_getcpu /* 6275 */
- PTR compat_sys_epoll_pwait
- PTR sys_ioprio_set
- PTR sys_ioprio_get
- PTR compat_sys_utimensat
- PTR compat_sys_signalfd /* 6280 */
- PTR sys_ni_syscall /* was timerfd */
- PTR sys_eventfd
- PTR sys_fallocate
- PTR sys_timerfd_create
- PTR compat_sys_timerfd_gettime /* 6285 */
- PTR compat_sys_timerfd_settime
- PTR compat_sys_signalfd4
- PTR sys_eventfd2
- PTR sys_epoll_create1
- PTR sys_dup3 /* 6290 */
- PTR sys_pipe2
- PTR sys_inotify_init1
- PTR compat_sys_preadv
- PTR compat_sys_pwritev
- PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
- PTR sys_perf_event_open
- PTR sys_accept4
- PTR compat_sys_recvmmsg
- PTR sys_getdents64
- PTR sys_fanotify_init /* 6300 */
- PTR sys_fanotify_mark
- PTR sys_prlimit64
- PTR sys_name_to_handle_at
- PTR sys_open_by_handle_at
- PTR compat_sys_clock_adjtime /* 6305 */
- PTR sys_syncfs
- PTR compat_sys_sendmmsg
- PTR sys_setns
- PTR compat_sys_process_vm_readv
- PTR compat_sys_process_vm_writev /* 6310 */
- PTR sys_kcmp
- PTR sys_finit_module
- PTR sys_sched_setattr
- PTR sys_sched_getattr
- PTR sys_renameat2 /* 6315 */
- PTR sys_seccomp
- PTR sys_getrandom
- PTR sys_memfd_create
- PTR sys_bpf
- PTR compat_sys_execveat /* 6320 */
- PTR sys_userfaultfd
- PTR sys_membarrier
- PTR sys_mlock2
- PTR sys_copy_file_range
- PTR compat_sys_preadv2 /* 6325 */
- PTR compat_sys_pwritev2
- PTR sys_pkey_mprotect
- PTR sys_pkey_alloc
- PTR sys_pkey_free
- PTR sys_statx /* 6330 */
- PTR sys_rseq
- PTR compat_sys_io_pgetevents
- .size sysn32_call_table,.-sysn32_call_table
+#include <asm/syscall_table_64_n32.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
new file mode 100644
index 000000000000..727fb8a1b0eb
--- /dev/null
+++ b/arch/mips/kernel/scall64-n64.S
@@ -0,0 +1,117 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+#include <asm/sysmips.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/war.h>
+
+#ifndef CONFIG_BINFMT_ELF32
+/* Neither O32 nor N32, so define handle_sys here */
+#define handle_sys64 handle_sys
+#endif
+
+ .align 5
+NESTED(handle_sys64, PT_SIZE, sp)
+#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
+ /*
+ * When 32-bit compatibility is configured scall_o32.S
+ * already did this.
+ */
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+#endif
+
+#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
+ ld t1, PT_EPC(sp) # skip syscall on return
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+#endif
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, syscall_trace_entry
+
+syscall_common:
+ dsubu t2, v0, __NR_64_Linux
+ sltiu t0, t2, __NR_64_Linux_syscalls
+ beqz t0, illegal_syscall
+
+ dsll t0, t2, 3 # offset into table
+ dla t2, sys_call_table
+ daddu t0, t2, t0
+ ld t2, (t0) # syscall routine
+ beqz t2, illegal_syscall
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+n64_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+ move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+ j syscall_common
+
+1: j syscall_exit
+
+illegal_syscall:
+ /* This also isn't a 64-bit syscall, throw an error. */
+ li v0, ENOSYS # error
+ sd v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sd t0, PT_R7(sp)
+ j n64_syscall_exit
+ END(handle_sys64)
+
+#define __SYSCALL(nr, entry, nargs) PTR entry
+ .align 3
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+#include <asm/syscall_table_64_n64.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 73913f072e39..f158c5894a9a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -34,7 +34,7 @@ NESTED(handle_sys, PT_SIZE, sp)
ld t1, PT_EPC(sp) # skip syscall on return
dsubu t0, v0, __NR_O32_Linux # check syscall number
- sltiu t0, t0, __NR_O32_Linux_syscalls + 1
+ sltiu t0, t0, __NR_O32_Linux_syscalls
daddiu t1, 4 # skip to next instruction
sd t1, PT_EPC(sp)
beqz t0, not_o32_scall
@@ -144,7 +144,7 @@ trace_a_syscall:
ld a7, PT_R11(sp) # For indirect syscalls
dsubu t0, v0, __NR_O32_Linux # check (new) syscall number
- sltiu t0, t0, __NR_O32_Linux_syscalls + 1
+ sltiu t0, t0, __NR_O32_Linux_syscalls
beqz t0, not_o32_scall
j syscall_common
@@ -193,7 +193,7 @@ not_o32_scall:
LEAF(sys32_syscall)
subu t0, a0, __NR_O32_Linux # check syscall number
- sltiu v0, t0, __NR_O32_Linux_syscalls + 1
+ sltiu v0, t0, __NR_O32_Linux_syscalls
beqz t0, einval # do not recurse
dsll t1, t0, 3
beqz v0, einval
@@ -213,376 +213,9 @@ einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
+#define __SYSCALL(nr, entry, nargs) PTR entry
.align 3
.type sys32_call_table,@object
EXPORT(sys32_call_table)
- PTR sys32_syscall /* 4000 */
- PTR sys_exit
- PTR __sys_fork
- PTR sys_read
- PTR sys_write
- PTR compat_sys_open /* 4005 */
- PTR sys_close
- PTR sys_waitpid
- PTR sys_creat
- PTR sys_link
- PTR sys_unlink /* 4010 */
- PTR compat_sys_execve
- PTR sys_chdir
- PTR compat_sys_time
- PTR sys_mknod
- PTR sys_chmod /* 4015 */
- PTR sys_lchown
- PTR sys_ni_syscall
- PTR sys_ni_syscall /* was sys_stat */
- PTR sys_lseek
- PTR sys_getpid /* 4020 */
- PTR compat_sys_mount
- PTR sys_oldumount
- PTR sys_setuid
- PTR sys_getuid
- PTR compat_sys_stime /* 4025 */
- PTR compat_sys_ptrace
- PTR sys_alarm
- PTR sys_ni_syscall /* was sys_fstat */
- PTR sys_pause
- PTR compat_sys_utime /* 4030 */
- PTR sys_ni_syscall
- PTR sys_ni_syscall
- PTR sys_access
- PTR sys_nice
- PTR sys_ni_syscall /* 4035 */
- PTR sys_sync
- PTR sys_kill
- PTR sys_rename
- PTR sys_mkdir
- PTR sys_rmdir /* 4040 */
- PTR sys_dup
- PTR sysm_pipe
- PTR compat_sys_times
- PTR sys_ni_syscall
- PTR sys_brk /* 4045 */
- PTR sys_setgid
- PTR sys_getgid
- PTR sys_ni_syscall /* was signal 2 */
- PTR sys_geteuid
- PTR sys_getegid /* 4050 */
- PTR sys_acct
- PTR sys_umount
- PTR sys_ni_syscall
- PTR compat_sys_ioctl
- PTR compat_sys_fcntl /* 4055 */
- PTR sys_ni_syscall
- PTR sys_setpgid
- PTR sys_ni_syscall
- PTR sys_olduname
- PTR sys_umask /* 4060 */
- PTR sys_chroot
- PTR compat_sys_ustat
- PTR sys_dup2
- PTR sys_getppid
- PTR sys_getpgrp /* 4065 */
- PTR sys_setsid
- PTR sys_32_sigaction
- PTR sys_sgetmask
- PTR sys_ssetmask
- PTR sys_setreuid /* 4070 */
- PTR sys_setregid
- PTR sys32_sigsuspend
- PTR compat_sys_sigpending
- PTR sys_sethostname
- PTR compat_sys_setrlimit /* 4075 */
- PTR compat_sys_getrlimit
- PTR compat_sys_getrusage
- PTR compat_sys_gettimeofday
- PTR compat_sys_settimeofday
- PTR sys_getgroups /* 4080 */
- PTR sys_setgroups
- PTR sys_ni_syscall /* old_select */
- PTR sys_symlink
- PTR sys_ni_syscall /* was sys_lstat */
- PTR sys_readlink /* 4085 */
- PTR sys_uselib
- PTR sys_swapon
- PTR sys_reboot
- PTR compat_sys_old_readdir
- PTR sys_mips_mmap /* 4090 */
- PTR sys_munmap
- PTR compat_sys_truncate
- PTR compat_sys_ftruncate
- PTR sys_fchmod
- PTR sys_fchown /* 4095 */
- PTR sys_getpriority
- PTR sys_setpriority
- PTR sys_ni_syscall
- PTR compat_sys_statfs
- PTR compat_sys_fstatfs /* 4100 */
- PTR sys_ni_syscall /* sys_ioperm */
- PTR compat_sys_socketcall
- PTR sys_syslog
- PTR compat_sys_setitimer
- PTR compat_sys_getitimer /* 4105 */
- PTR compat_sys_newstat
- PTR compat_sys_newlstat
- PTR compat_sys_newfstat
- PTR sys_uname
- PTR sys_ni_syscall /* sys_ioperm *//* 4110 */
- PTR sys_vhangup
- PTR sys_ni_syscall /* was sys_idle */
- PTR sys_ni_syscall /* sys_vm86 */
- PTR compat_sys_wait4
- PTR sys_swapoff /* 4115 */
- PTR compat_sys_sysinfo
- PTR compat_sys_ipc
- PTR sys_fsync
- PTR sys32_sigreturn
- PTR __sys_clone /* 4120 */
- PTR sys_setdomainname
- PTR sys_newuname
- PTR sys_ni_syscall /* sys_modify_ldt */
- PTR compat_sys_adjtimex
- PTR sys_mprotect /* 4125 */
- PTR compat_sys_sigprocmask
- PTR sys_ni_syscall /* was creat_module */
- PTR sys_init_module
- PTR sys_delete_module
- PTR sys_ni_syscall /* 4130, get_kernel_syms */
- PTR sys_quotactl
- PTR sys_getpgid
- PTR sys_fchdir
- PTR sys_bdflush
- PTR sys_sysfs /* 4135 */
- PTR sys_32_personality
- PTR sys_ni_syscall /* for afs_syscall */
- PTR sys_setfsuid
- PTR sys_setfsgid
- PTR sys_32_llseek /* 4140 */
- PTR compat_sys_getdents
- PTR compat_sys_select
- PTR sys_flock
- PTR sys_msync
- PTR compat_sys_readv /* 4145 */
- PTR compat_sys_writev
- PTR sys_cacheflush
- PTR sys_cachectl
- PTR __sys_sysmips
- PTR sys_ni_syscall /* 4150 */
- PTR sys_getsid
- PTR sys_fdatasync
- PTR compat_sys_sysctl
- PTR sys_mlock
- PTR sys_munlock /* 4155 */
- PTR sys_mlockall
- PTR sys_munlockall
- PTR sys_sched_setparam
- PTR sys_sched_getparam
- PTR sys_sched_setscheduler /* 4160 */
- PTR sys_sched_getscheduler
- PTR sys_sched_yield
- PTR sys_sched_get_priority_max
- PTR sys_sched_get_priority_min
- PTR compat_sys_sched_rr_get_interval /* 4165 */
- PTR compat_sys_nanosleep
- PTR sys_mremap
- PTR sys_accept
- PTR sys_bind
- PTR sys_connect /* 4170 */
- PTR sys_getpeername
- PTR sys_getsockname
- PTR compat_sys_getsockopt
- PTR sys_listen
- PTR compat_sys_recv /* 4175 */
- PTR compat_sys_recvfrom
- PTR compat_sys_recvmsg
- PTR sys_send
- PTR compat_sys_sendmsg
- PTR sys_sendto /* 4180 */
- PTR compat_sys_setsockopt
- PTR sys_shutdown
- PTR sys_socket
- PTR sys_socketpair
- PTR sys_setresuid /* 4185 */
- PTR sys_getresuid
- PTR sys_ni_syscall /* was query_module */
- PTR sys_poll
- PTR sys_ni_syscall /* was nfsservctl */
- PTR sys_setresgid /* 4190 */
- PTR sys_getresgid
- PTR sys_prctl
- PTR sys32_rt_sigreturn
- PTR compat_sys_rt_sigaction
- PTR compat_sys_rt_sigprocmask /* 4195 */
- PTR compat_sys_rt_sigpending
- PTR compat_sys_rt_sigtimedwait
- PTR compat_sys_rt_sigqueueinfo
- PTR compat_sys_rt_sigsuspend
- PTR sys_32_pread /* 4200 */
- PTR sys_32_pwrite
- PTR sys_chown
- PTR sys_getcwd
- PTR sys_capget
- PTR sys_capset /* 4205 */
- PTR compat_sys_sigaltstack
- PTR compat_sys_sendfile
- PTR sys_ni_syscall
- PTR sys_ni_syscall
- PTR sys_mips_mmap2 /* 4210 */
- PTR sys_32_truncate64
- PTR sys_32_ftruncate64
- PTR sys_newstat
- PTR sys_newlstat
- PTR sys_newfstat /* 4215 */
- PTR sys_pivot_root
- PTR sys_mincore
- PTR sys_madvise
- PTR sys_getdents64
- PTR compat_sys_fcntl64 /* 4220 */
- PTR sys_ni_syscall
- PTR sys_gettid
- PTR sys32_readahead
- PTR sys_setxattr
- PTR sys_lsetxattr /* 4225 */
- PTR sys_fsetxattr
- PTR sys_getxattr
- PTR sys_lgetxattr
- PTR sys_fgetxattr
- PTR sys_listxattr /* 4230 */
- PTR sys_llistxattr
- PTR sys_flistxattr
- PTR sys_removexattr
- PTR sys_lremovexattr
- PTR sys_fremovexattr /* 4235 */
- PTR sys_tkill
- PTR sys_sendfile64
- PTR compat_sys_futex
- PTR compat_sys_sched_setaffinity
- PTR compat_sys_sched_getaffinity /* 4240 */
- PTR compat_sys_io_setup
- PTR sys_io_destroy
- PTR compat_sys_io_getevents
- PTR compat_sys_io_submit
- PTR sys_io_cancel /* 4245 */
- PTR sys_exit_group
- PTR compat_sys_lookup_dcookie
- PTR sys_epoll_create
- PTR sys_epoll_ctl
- PTR sys_epoll_wait /* 4250 */
- PTR sys_remap_file_pages
- PTR sys_set_tid_address
- PTR sys_restart_syscall
- PTR sys32_fadvise64_64
- PTR compat_sys_statfs64 /* 4255 */
- PTR compat_sys_fstatfs64
- PTR compat_sys_timer_create
- PTR compat_sys_timer_settime
- PTR compat_sys_timer_gettime
- PTR sys_timer_getoverrun /* 4260 */
- PTR sys_timer_delete
- PTR compat_sys_clock_settime
- PTR compat_sys_clock_gettime
- PTR compat_sys_clock_getres
- PTR compat_sys_clock_nanosleep /* 4265 */
- PTR sys_tgkill
- PTR compat_sys_utimes
- PTR compat_sys_mbind
- PTR compat_sys_get_mempolicy
- PTR compat_sys_set_mempolicy /* 4270 */
- PTR compat_sys_mq_open
- PTR sys_mq_unlink
- PTR compat_sys_mq_timedsend
- PTR compat_sys_mq_timedreceive
- PTR compat_sys_mq_notify /* 4275 */
- PTR compat_sys_mq_getsetattr
- PTR sys_ni_syscall /* sys_vserver */
- PTR compat_sys_waitid
- PTR sys_ni_syscall /* available, was setaltroot */
- PTR sys_add_key /* 4280 */
- PTR sys_request_key
- PTR compat_sys_keyctl
- PTR sys_set_thread_area
- PTR sys_inotify_init
- PTR sys_inotify_add_watch /* 4285 */
- PTR sys_inotify_rm_watch
- PTR compat_sys_migrate_pages
- PTR compat_sys_openat
- PTR sys_mkdirat
- PTR sys_mknodat /* 4290 */
- PTR sys_fchownat
- PTR compat_sys_futimesat
- PTR sys_newfstatat
- PTR sys_unlinkat
- PTR sys_renameat /* 4295 */
- PTR sys_linkat
- PTR sys_symlinkat
- PTR sys_readlinkat
- PTR sys_fchmodat
- PTR sys_faccessat /* 4300 */
- PTR compat_sys_pselect6
- PTR compat_sys_ppoll
- PTR sys_unshare
- PTR sys_splice
- PTR sys32_sync_file_range /* 4305 */
- PTR sys_tee
- PTR compat_sys_vmsplice
- PTR compat_sys_move_pages
- PTR compat_sys_set_robust_list
- PTR compat_sys_get_robust_list /* 4310 */
- PTR compat_sys_kexec_load
- PTR sys_getcpu
- PTR compat_sys_epoll_pwait
- PTR sys_ioprio_set
- PTR sys_ioprio_get /* 4315 */
- PTR compat_sys_utimensat
- PTR compat_sys_signalfd
- PTR sys_ni_syscall /* was timerfd */
- PTR sys_eventfd
- PTR sys32_fallocate /* 4320 */
- PTR sys_timerfd_create
- PTR compat_sys_timerfd_gettime
- PTR compat_sys_timerfd_settime
- PTR compat_sys_signalfd4
- PTR sys_eventfd2 /* 4325 */
- PTR sys_epoll_create1
- PTR sys_dup3
- PTR sys_pipe2
- PTR sys_inotify_init1
- PTR compat_sys_preadv /* 4330 */
- PTR compat_sys_pwritev
- PTR compat_sys_rt_tgsigqueueinfo
- PTR sys_perf_event_open
- PTR sys_accept4
- PTR compat_sys_recvmmsg /* 4335 */
- PTR sys_fanotify_init
- PTR compat_sys_fanotify_mark
- PTR sys_prlimit64
- PTR sys_name_to_handle_at
- PTR compat_sys_open_by_handle_at /* 4340 */
- PTR compat_sys_clock_adjtime
- PTR sys_syncfs
- PTR compat_sys_sendmmsg
- PTR sys_setns
- PTR compat_sys_process_vm_readv /* 4345 */
- PTR compat_sys_process_vm_writev
- PTR sys_kcmp
- PTR sys_finit_module
- PTR sys_sched_setattr
- PTR sys_sched_getattr /* 4350 */
- PTR sys_renameat2
- PTR sys_seccomp
- PTR sys_getrandom
- PTR sys_memfd_create
- PTR sys_bpf /* 4355 */
- PTR compat_sys_execveat
- PTR sys_userfaultfd
- PTR sys_membarrier
- PTR sys_mlock2
- PTR sys_copy_file_range /* 4360 */
- PTR compat_sys_preadv2
- PTR compat_sys_pwritev2
- PTR sys_pkey_mprotect
- PTR sys_pkey_alloc
- PTR sys_pkey_free /* 4365 */
- PTR sys_statx
- PTR sys_rseq
- PTR compat_sys_io_pgetevents
- .size sys32_call_table,.-sys32_call_table
+#include <asm/syscall_table_64_o32.h>
+#undef __SYSCALL
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 109ed163a6a6..d3a23758592c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -62,6 +62,8 @@ struct rt_sigframe {
struct ucontext rs_uc;
};
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
/*
* Thread saved context copy to/from a signal context presumed to be on the
* user stack, and therefore accessed with appropriate macros from uaccess.h.
@@ -104,6 +106,20 @@ static int copy_fp_from_sigcontext(void __user *sc)
return err;
}
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int copy_fp_to_sigcontext(void __user *sc)
+{
+ return 0;
+}
+
+static int copy_fp_from_sigcontext(void __user *sc)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
/*
* Wrappers for the assembly _{save,restore}_fp_context functions.
*/
@@ -142,6 +158,8 @@ static inline void __user *sc_to_extcontext(void __user *sc)
return &uc->uc_extcontext;
}
+#ifdef CONFIG_CPU_HAS_MSA
+
static int save_msa_extcontext(void __user *buf)
{
struct msa_extcontext __user *msa = buf;
@@ -195,9 +213,6 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
unsigned int csr;
int i, err;
- if (!IS_ENABLED(CONFIG_CPU_HAS_MSA))
- return SIGSYS;
-
if (size != sizeof(*msa))
return -EINVAL;
@@ -234,6 +249,20 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
return err;
}
+#else /* !CONFIG_CPU_HAS_MSA */
+
+static int save_msa_extcontext(void __user *buf)
+{
+ return 0;
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ return SIGSYS;
+}
+
+#endif /* !CONFIG_CPU_HAS_MSA */
+
static int save_extcontext(void __user *buf)
{
int sz;
@@ -880,7 +909,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
user_enter();
}
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
static int smp_save_fp_context(void __user *sc)
{
return raw_cpu_has_fpu
@@ -908,7 +937,7 @@ static int signal_setup(void)
(offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
restore_fp_context = smp_restore_fp_context;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 69c17b549fd3..41a0db08cd37 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -106,6 +106,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
if (cpu_has_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__ (
+ " .set push \n"
" .set arch=r4000 \n"
" li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
@@ -122,7 +123,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" "STR(PTR)" 1b, 4b \n"
" "STR(PTR)" 2b, 4b \n"
" .previous \n"
- " .set mips0 \n"
+ " .set pop \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
@@ -132,6 +133,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
+ " .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
"1: \n"
@@ -150,7 +152,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" "STR(PTR)" 1b, 5b \n"
" "STR(PTR)" 2b, 5b \n"
" .previous \n"
- " .set mips0 \n"
+ " .set pop \n"
: [old] "=&r" (old),
[err] "=&r" (err),
[tmp] "=&r" (tmp)
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..a3d4bec695c6
--- /dev/null
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscalln32 := $(srctree)/$(src)/syscall_n32.tbl
+syscalln64 := $(srctree)/$(src)/syscall_n64.tbl
+syscallo32 := $(srctree)/$(src)/syscall_o32.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+sysnr := $(srctree)/$(src)/syscallnr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_sysnr = SYSNR $@
+ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
+ '$(sysnr_abis_$(basetarget))' \
+ '$(sysnr_pfx_$(basetarget))' \
+ '$(sysnr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+syshdr_offset_unistd_n32 := __NR_Linux
+$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_offset_unistd_n64 := __NR_Linux
+$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_offset_unistd_o32 := __NR_Linux
+$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr)
+ $(call if_changed,syshdr)
+
+sysnr_pfx_unistd_nr_n32 := N32
+sysnr_offset_unistd_nr_n32 := 6000
+$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr)
+ $(call if_changed,sysnr)
+
+sysnr_pfx_unistd_nr_n64 := 64
+sysnr_offset_unistd_nr_n64 := 5000
+$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr)
+ $(call if_changed,sysnr)
+
+sysnr_pfx_unistd_nr_o32 := O32
+sysnr_offset_unistd_nr_o32 := 4000
+$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr)
+ $(call if_changed,sysnr)
+
+systbl_abi_syscall_table_32_o32 := 32_o32
+systbl_offset_syscall_table_32_o32 := 4000
+$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abi_syscall_table_64_n32 := 64_n32
+systbl_offset_syscall_table_64_n32 := 6000
+$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abi_syscall_table_64_n64 := 64_n64
+systbl_offset_syscall_table_64_n64 := 5000
+$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abi_syscall_table_64_o32 := 64_o32
+systbl_offset_syscall_table_64_o32 := 4000
+$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_n32.h \
+ unistd_n64.h \
+ unistd_o32.h \
+ unistd_nr_n32.h \
+ unistd_nr_n64.h \
+ unistd_nr_o32.h
+kapisyshdr-y += syscall_table_32_o32.h \
+ syscall_table_64_n32.h \
+ syscall_table_64_n64.h \
+ syscall_table_64_o32.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
new file mode 100644
index 000000000000..53d5862649ae
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -0,0 +1,343 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> is always "n32" for this file.
+#
+0 n32 read sys_read
+1 n32 write sys_write
+2 n32 open sys_open
+3 n32 close sys_close
+4 n32 stat sys_newstat
+5 n32 fstat sys_newfstat
+6 n32 lstat sys_newlstat
+7 n32 poll sys_poll
+8 n32 lseek sys_lseek
+9 n32 mmap sys_mips_mmap
+10 n32 mprotect sys_mprotect
+11 n32 munmap sys_munmap
+12 n32 brk sys_brk
+13 n32 rt_sigaction compat_sys_rt_sigaction
+14 n32 rt_sigprocmask compat_sys_rt_sigprocmask
+15 n32 ioctl compat_sys_ioctl
+16 n32 pread64 sys_pread64
+17 n32 pwrite64 sys_pwrite64
+18 n32 readv compat_sys_readv
+19 n32 writev compat_sys_writev
+20 n32 access sys_access
+21 n32 pipe sysm_pipe
+22 n32 _newselect compat_sys_select
+23 n32 sched_yield sys_sched_yield
+24 n32 mremap sys_mremap
+25 n32 msync sys_msync
+26 n32 mincore sys_mincore
+27 n32 madvise sys_madvise
+28 n32 shmget sys_shmget
+29 n32 shmat sys_shmat
+30 n32 shmctl compat_sys_shmctl
+31 n32 dup sys_dup
+32 n32 dup2 sys_dup2
+33 n32 pause sys_pause
+34 n32 nanosleep compat_sys_nanosleep
+35 n32 getitimer compat_sys_getitimer
+36 n32 setitimer compat_sys_setitimer
+37 n32 alarm sys_alarm
+38 n32 getpid sys_getpid
+39 n32 sendfile compat_sys_sendfile
+40 n32 socket sys_socket
+41 n32 connect sys_connect
+42 n32 accept sys_accept
+43 n32 sendto sys_sendto
+44 n32 recvfrom compat_sys_recvfrom
+45 n32 sendmsg compat_sys_sendmsg
+46 n32 recvmsg compat_sys_recvmsg
+47 n32 shutdown sys_shutdown
+48 n32 bind sys_bind
+49 n32 listen sys_listen
+50 n32 getsockname sys_getsockname
+51 n32 getpeername sys_getpeername
+52 n32 socketpair sys_socketpair
+53 n32 setsockopt compat_sys_setsockopt
+54 n32 getsockopt compat_sys_getsockopt
+55 n32 clone __sys_clone
+56 n32 fork __sys_fork
+57 n32 execve compat_sys_execve
+58 n32 exit sys_exit
+59 n32 wait4 compat_sys_wait4
+60 n32 kill sys_kill
+61 n32 uname sys_newuname
+62 n32 semget sys_semget
+63 n32 semop sys_semop
+64 n32 semctl compat_sys_semctl
+65 n32 shmdt sys_shmdt
+66 n32 msgget sys_msgget
+67 n32 msgsnd compat_sys_msgsnd
+68 n32 msgrcv compat_sys_msgrcv
+69 n32 msgctl compat_sys_msgctl
+70 n32 fcntl compat_sys_fcntl
+71 n32 flock sys_flock
+72 n32 fsync sys_fsync
+73 n32 fdatasync sys_fdatasync
+74 n32 truncate sys_truncate
+75 n32 ftruncate sys_ftruncate
+76 n32 getdents compat_sys_getdents
+77 n32 getcwd sys_getcwd
+78 n32 chdir sys_chdir
+79 n32 fchdir sys_fchdir
+80 n32 rename sys_rename
+81 n32 mkdir sys_mkdir
+82 n32 rmdir sys_rmdir
+83 n32 creat sys_creat
+84 n32 link sys_link
+85 n32 unlink sys_unlink
+86 n32 symlink sys_symlink
+87 n32 readlink sys_readlink
+88 n32 chmod sys_chmod
+89 n32 fchmod sys_fchmod
+90 n32 chown sys_chown
+91 n32 fchown sys_fchown
+92 n32 lchown sys_lchown
+93 n32 umask sys_umask
+94 n32 gettimeofday compat_sys_gettimeofday
+95 n32 getrlimit compat_sys_getrlimit
+96 n32 getrusage compat_sys_getrusage
+97 n32 sysinfo compat_sys_sysinfo
+98 n32 times compat_sys_times
+99 n32 ptrace compat_sys_ptrace
+100 n32 getuid sys_getuid
+101 n32 syslog sys_syslog
+102 n32 getgid sys_getgid
+103 n32 setuid sys_setuid
+104 n32 setgid sys_setgid
+105 n32 geteuid sys_geteuid
+106 n32 getegid sys_getegid
+107 n32 setpgid sys_setpgid
+108 n32 getppid sys_getppid
+109 n32 getpgrp sys_getpgrp
+110 n32 setsid sys_setsid
+111 n32 setreuid sys_setreuid
+112 n32 setregid sys_setregid
+113 n32 getgroups sys_getgroups
+114 n32 setgroups sys_setgroups
+115 n32 setresuid sys_setresuid
+116 n32 getresuid sys_getresuid
+117 n32 setresgid sys_setresgid
+118 n32 getresgid sys_getresgid
+119 n32 getpgid sys_getpgid
+120 n32 setfsuid sys_setfsuid
+121 n32 setfsgid sys_setfsgid
+122 n32 getsid sys_getsid
+123 n32 capget sys_capget
+124 n32 capset sys_capset
+125 n32 rt_sigpending compat_sys_rt_sigpending
+126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait
+127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+128 n32 rt_sigsuspend compat_sys_rt_sigsuspend
+129 n32 sigaltstack compat_sys_sigaltstack
+130 n32 utime compat_sys_utime
+131 n32 mknod sys_mknod
+132 n32 personality sys_32_personality
+133 n32 ustat compat_sys_ustat
+134 n32 statfs compat_sys_statfs
+135 n32 fstatfs compat_sys_fstatfs
+136 n32 sysfs sys_sysfs
+137 n32 getpriority sys_getpriority
+138 n32 setpriority sys_setpriority
+139 n32 sched_setparam sys_sched_setparam
+140 n32 sched_getparam sys_sched_getparam
+141 n32 sched_setscheduler sys_sched_setscheduler
+142 n32 sched_getscheduler sys_sched_getscheduler
+143 n32 sched_get_priority_max sys_sched_get_priority_max
+144 n32 sched_get_priority_min sys_sched_get_priority_min
+145 n32 sched_rr_get_interval compat_sys_sched_rr_get_interval
+146 n32 mlock sys_mlock
+147 n32 munlock sys_munlock
+148 n32 mlockall sys_mlockall
+149 n32 munlockall sys_munlockall
+150 n32 vhangup sys_vhangup
+151 n32 pivot_root sys_pivot_root
+152 n32 _sysctl compat_sys_sysctl
+153 n32 prctl sys_prctl
+154 n32 adjtimex compat_sys_adjtimex
+155 n32 setrlimit compat_sys_setrlimit
+156 n32 chroot sys_chroot
+157 n32 sync sys_sync
+158 n32 acct sys_acct
+159 n32 settimeofday compat_sys_settimeofday
+160 n32 mount compat_sys_mount
+161 n32 umount2 sys_umount
+162 n32 swapon sys_swapon
+163 n32 swapoff sys_swapoff
+164 n32 reboot sys_reboot
+165 n32 sethostname sys_sethostname
+166 n32 setdomainname sys_setdomainname
+167 n32 create_module sys_ni_syscall
+168 n32 init_module sys_init_module
+169 n32 delete_module sys_delete_module
+170 n32 get_kernel_syms sys_ni_syscall
+171 n32 query_module sys_ni_syscall
+172 n32 quotactl sys_quotactl
+173 n32 nfsservctl sys_ni_syscall
+174 n32 getpmsg sys_ni_syscall
+175 n32 putpmsg sys_ni_syscall
+176 n32 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n32 reserved177 sys_ni_syscall
+178 n32 gettid sys_gettid
+179 n32 readahead sys_readahead
+180 n32 setxattr sys_setxattr
+181 n32 lsetxattr sys_lsetxattr
+182 n32 fsetxattr sys_fsetxattr
+183 n32 getxattr sys_getxattr
+184 n32 lgetxattr sys_lgetxattr
+185 n32 fgetxattr sys_fgetxattr
+186 n32 listxattr sys_listxattr
+187 n32 llistxattr sys_llistxattr
+188 n32 flistxattr sys_flistxattr
+189 n32 removexattr sys_removexattr
+190 n32 lremovexattr sys_lremovexattr
+191 n32 fremovexattr sys_fremovexattr
+192 n32 tkill sys_tkill
+193 n32 reserved193 sys_ni_syscall
+194 n32 futex compat_sys_futex
+195 n32 sched_setaffinity compat_sys_sched_setaffinity
+196 n32 sched_getaffinity compat_sys_sched_getaffinity
+197 n32 cacheflush sys_cacheflush
+198 n32 cachectl sys_cachectl
+199 n32 sysmips __sys_sysmips
+200 n32 io_setup compat_sys_io_setup
+201 n32 io_destroy sys_io_destroy
+202 n32 io_getevents compat_sys_io_getevents
+203 n32 io_submit compat_sys_io_submit
+204 n32 io_cancel sys_io_cancel
+205 n32 exit_group sys_exit_group
+206 n32 lookup_dcookie sys_lookup_dcookie
+207 n32 epoll_create sys_epoll_create
+208 n32 epoll_ctl sys_epoll_ctl
+209 n32 epoll_wait sys_epoll_wait
+210 n32 remap_file_pages sys_remap_file_pages
+211 n32 rt_sigreturn sysn32_rt_sigreturn
+212 n32 fcntl64 compat_sys_fcntl64
+213 n32 set_tid_address sys_set_tid_address
+214 n32 restart_syscall sys_restart_syscall
+215 n32 semtimedop compat_sys_semtimedop
+216 n32 fadvise64 sys_fadvise64_64
+217 n32 statfs64 compat_sys_statfs64
+218 n32 fstatfs64 compat_sys_fstatfs64
+219 n32 sendfile64 sys_sendfile64
+220 n32 timer_create compat_sys_timer_create
+221 n32 timer_settime compat_sys_timer_settime
+222 n32 timer_gettime compat_sys_timer_gettime
+223 n32 timer_getoverrun sys_timer_getoverrun
+224 n32 timer_delete sys_timer_delete
+225 n32 clock_settime compat_sys_clock_settime
+226 n32 clock_gettime compat_sys_clock_gettime
+227 n32 clock_getres compat_sys_clock_getres
+228 n32 clock_nanosleep compat_sys_clock_nanosleep
+229 n32 tgkill sys_tgkill
+230 n32 utimes compat_sys_utimes
+231 n32 mbind compat_sys_mbind
+232 n32 get_mempolicy compat_sys_get_mempolicy
+233 n32 set_mempolicy compat_sys_set_mempolicy
+234 n32 mq_open compat_sys_mq_open
+235 n32 mq_unlink sys_mq_unlink
+236 n32 mq_timedsend compat_sys_mq_timedsend
+237 n32 mq_timedreceive compat_sys_mq_timedreceive
+238 n32 mq_notify compat_sys_mq_notify
+239 n32 mq_getsetattr compat_sys_mq_getsetattr
+240 n32 vserver sys_ni_syscall
+241 n32 waitid compat_sys_waitid
+# 242 was sys_setaltroot
+243 n32 add_key sys_add_key
+244 n32 request_key sys_request_key
+245 n32 keyctl compat_sys_keyctl
+246 n32 set_thread_area sys_set_thread_area
+247 n32 inotify_init sys_inotify_init
+248 n32 inotify_add_watch sys_inotify_add_watch
+249 n32 inotify_rm_watch sys_inotify_rm_watch
+250 n32 migrate_pages compat_sys_migrate_pages
+251 n32 openat sys_openat
+252 n32 mkdirat sys_mkdirat
+253 n32 mknodat sys_mknodat
+254 n32 fchownat sys_fchownat
+255 n32 futimesat compat_sys_futimesat
+256 n32 newfstatat sys_newfstatat
+257 n32 unlinkat sys_unlinkat
+258 n32 renameat sys_renameat
+259 n32 linkat sys_linkat
+260 n32 symlinkat sys_symlinkat
+261 n32 readlinkat sys_readlinkat
+262 n32 fchmodat sys_fchmodat
+263 n32 faccessat sys_faccessat
+264 n32 pselect6 compat_sys_pselect6
+265 n32 ppoll compat_sys_ppoll
+266 n32 unshare sys_unshare
+267 n32 splice sys_splice
+268 n32 sync_file_range sys_sync_file_range
+269 n32 tee sys_tee
+270 n32 vmsplice compat_sys_vmsplice
+271 n32 move_pages compat_sys_move_pages
+272 n32 set_robust_list compat_sys_set_robust_list
+273 n32 get_robust_list compat_sys_get_robust_list
+274 n32 kexec_load compat_sys_kexec_load
+275 n32 getcpu sys_getcpu
+276 n32 epoll_pwait compat_sys_epoll_pwait
+277 n32 ioprio_set sys_ioprio_set
+278 n32 ioprio_get sys_ioprio_get
+279 n32 utimensat compat_sys_utimensat
+280 n32 signalfd compat_sys_signalfd
+281 n32 timerfd sys_ni_syscall
+282 n32 eventfd sys_eventfd
+283 n32 fallocate sys_fallocate
+284 n32 timerfd_create sys_timerfd_create
+285 n32 timerfd_gettime compat_sys_timerfd_gettime
+286 n32 timerfd_settime compat_sys_timerfd_settime
+287 n32 signalfd4 compat_sys_signalfd4
+288 n32 eventfd2 sys_eventfd2
+289 n32 epoll_create1 sys_epoll_create1
+290 n32 dup3 sys_dup3
+291 n32 pipe2 sys_pipe2
+292 n32 inotify_init1 sys_inotify_init1
+293 n32 preadv compat_sys_preadv
+294 n32 pwritev compat_sys_pwritev
+295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+296 n32 perf_event_open sys_perf_event_open
+297 n32 accept4 sys_accept4
+298 n32 recvmmsg compat_sys_recvmmsg
+299 n32 getdents64 sys_getdents64
+300 n32 fanotify_init sys_fanotify_init
+301 n32 fanotify_mark sys_fanotify_mark
+302 n32 prlimit64 sys_prlimit64
+303 n32 name_to_handle_at sys_name_to_handle_at
+304 n32 open_by_handle_at sys_open_by_handle_at
+305 n32 clock_adjtime compat_sys_clock_adjtime
+306 n32 syncfs sys_syncfs
+307 n32 sendmmsg compat_sys_sendmmsg
+308 n32 setns sys_setns
+309 n32 process_vm_readv compat_sys_process_vm_readv
+310 n32 process_vm_writev compat_sys_process_vm_writev
+311 n32 kcmp sys_kcmp
+312 n32 finit_module sys_finit_module
+313 n32 sched_setattr sys_sched_setattr
+314 n32 sched_getattr sys_sched_getattr
+315 n32 renameat2 sys_renameat2
+316 n32 seccomp sys_seccomp
+317 n32 getrandom sys_getrandom
+318 n32 memfd_create sys_memfd_create
+319 n32 bpf sys_bpf
+320 n32 execveat compat_sys_execveat
+321 n32 userfaultfd sys_userfaultfd
+322 n32 membarrier sys_membarrier
+323 n32 mlock2 sys_mlock2
+324 n32 copy_file_range sys_copy_file_range
+325 n32 preadv2 compat_sys_preadv2
+326 n32 pwritev2 compat_sys_pwritev2
+327 n32 pkey_mprotect sys_pkey_mprotect
+328 n32 pkey_alloc sys_pkey_alloc
+329 n32 pkey_free sys_pkey_free
+330 n32 statx sys_statx
+331 n32 rseq sys_rseq
+332 n32 io_pgetevents compat_sys_io_pgetevents
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
new file mode 100644
index 000000000000..a8286ccbb66c
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -0,0 +1,339 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "n64" for this file.
+#
+0 n64 read sys_read
+1 n64 write sys_write
+2 n64 open sys_open
+3 n64 close sys_close
+4 n64 stat sys_newstat
+5 n64 fstat sys_newfstat
+6 n64 lstat sys_newlstat
+7 n64 poll sys_poll
+8 n64 lseek sys_lseek
+9 n64 mmap sys_mips_mmap
+10 n64 mprotect sys_mprotect
+11 n64 munmap sys_munmap
+12 n64 brk sys_brk
+13 n64 rt_sigaction sys_rt_sigaction
+14 n64 rt_sigprocmask sys_rt_sigprocmask
+15 n64 ioctl sys_ioctl
+16 n64 pread64 sys_pread64
+17 n64 pwrite64 sys_pwrite64
+18 n64 readv sys_readv
+19 n64 writev sys_writev
+20 n64 access sys_access
+21 n64 pipe sysm_pipe
+22 n64 _newselect sys_select
+23 n64 sched_yield sys_sched_yield
+24 n64 mremap sys_mremap
+25 n64 msync sys_msync
+26 n64 mincore sys_mincore
+27 n64 madvise sys_madvise
+28 n64 shmget sys_shmget
+29 n64 shmat sys_shmat
+30 n64 shmctl sys_shmctl
+31 n64 dup sys_dup
+32 n64 dup2 sys_dup2
+33 n64 pause sys_pause
+34 n64 nanosleep sys_nanosleep
+35 n64 getitimer sys_getitimer
+36 n64 setitimer sys_setitimer
+37 n64 alarm sys_alarm
+38 n64 getpid sys_getpid
+39 n64 sendfile sys_sendfile64
+40 n64 socket sys_socket
+41 n64 connect sys_connect
+42 n64 accept sys_accept
+43 n64 sendto sys_sendto
+44 n64 recvfrom sys_recvfrom
+45 n64 sendmsg sys_sendmsg
+46 n64 recvmsg sys_recvmsg
+47 n64 shutdown sys_shutdown
+48 n64 bind sys_bind
+49 n64 listen sys_listen
+50 n64 getsockname sys_getsockname
+51 n64 getpeername sys_getpeername
+52 n64 socketpair sys_socketpair
+53 n64 setsockopt sys_setsockopt
+54 n64 getsockopt sys_getsockopt
+55 n64 clone __sys_clone
+56 n64 fork __sys_fork
+57 n64 execve sys_execve
+58 n64 exit sys_exit
+59 n64 wait4 sys_wait4
+60 n64 kill sys_kill
+61 n64 uname sys_newuname
+62 n64 semget sys_semget
+63 n64 semop sys_semop
+64 n64 semctl sys_semctl
+65 n64 shmdt sys_shmdt
+66 n64 msgget sys_msgget
+67 n64 msgsnd sys_msgsnd
+68 n64 msgrcv sys_msgrcv
+69 n64 msgctl sys_msgctl
+70 n64 fcntl sys_fcntl
+71 n64 flock sys_flock
+72 n64 fsync sys_fsync
+73 n64 fdatasync sys_fdatasync
+74 n64 truncate sys_truncate
+75 n64 ftruncate sys_ftruncate
+76 n64 getdents sys_getdents
+77 n64 getcwd sys_getcwd
+78 n64 chdir sys_chdir
+79 n64 fchdir sys_fchdir
+80 n64 rename sys_rename
+81 n64 mkdir sys_mkdir
+82 n64 rmdir sys_rmdir
+83 n64 creat sys_creat
+84 n64 link sys_link
+85 n64 unlink sys_unlink
+86 n64 symlink sys_symlink
+87 n64 readlink sys_readlink
+88 n64 chmod sys_chmod
+89 n64 fchmod sys_fchmod
+90 n64 chown sys_chown
+91 n64 fchown sys_fchown
+92 n64 lchown sys_lchown
+93 n64 umask sys_umask
+94 n64 gettimeofday sys_gettimeofday
+95 n64 getrlimit sys_getrlimit
+96 n64 getrusage sys_getrusage
+97 n64 sysinfo sys_sysinfo
+98 n64 times sys_times
+99 n64 ptrace sys_ptrace
+100 n64 getuid sys_getuid
+101 n64 syslog sys_syslog
+102 n64 getgid sys_getgid
+103 n64 setuid sys_setuid
+104 n64 setgid sys_setgid
+105 n64 geteuid sys_geteuid
+106 n64 getegid sys_getegid
+107 n64 setpgid sys_setpgid
+108 n64 getppid sys_getppid
+109 n64 getpgrp sys_getpgrp
+110 n64 setsid sys_setsid
+111 n64 setreuid sys_setreuid
+112 n64 setregid sys_setregid
+113 n64 getgroups sys_getgroups
+114 n64 setgroups sys_setgroups
+115 n64 setresuid sys_setresuid
+116 n64 getresuid sys_getresuid
+117 n64 setresgid sys_setresgid
+118 n64 getresgid sys_getresgid
+119 n64 getpgid sys_getpgid
+120 n64 setfsuid sys_setfsuid
+121 n64 setfsgid sys_setfsgid
+122 n64 getsid sys_getsid
+123 n64 capget sys_capget
+124 n64 capset sys_capset
+125 n64 rt_sigpending sys_rt_sigpending
+126 n64 rt_sigtimedwait sys_rt_sigtimedwait
+127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo
+128 n64 rt_sigsuspend sys_rt_sigsuspend
+129 n64 sigaltstack sys_sigaltstack
+130 n64 utime sys_utime
+131 n64 mknod sys_mknod
+132 n64 personality sys_personality
+133 n64 ustat sys_ustat
+134 n64 statfs sys_statfs
+135 n64 fstatfs sys_fstatfs
+136 n64 sysfs sys_sysfs
+137 n64 getpriority sys_getpriority
+138 n64 setpriority sys_setpriority
+139 n64 sched_setparam sys_sched_setparam
+140 n64 sched_getparam sys_sched_getparam
+141 n64 sched_setscheduler sys_sched_setscheduler
+142 n64 sched_getscheduler sys_sched_getscheduler
+143 n64 sched_get_priority_max sys_sched_get_priority_max
+144 n64 sched_get_priority_min sys_sched_get_priority_min
+145 n64 sched_rr_get_interval sys_sched_rr_get_interval
+146 n64 mlock sys_mlock
+147 n64 munlock sys_munlock
+148 n64 mlockall sys_mlockall
+149 n64 munlockall sys_munlockall
+150 n64 vhangup sys_vhangup
+151 n64 pivot_root sys_pivot_root
+152 n64 _sysctl sys_sysctl
+153 n64 prctl sys_prctl
+154 n64 adjtimex sys_adjtimex
+155 n64 setrlimit sys_setrlimit
+156 n64 chroot sys_chroot
+157 n64 sync sys_sync
+158 n64 acct sys_acct
+159 n64 settimeofday sys_settimeofday
+160 n64 mount sys_mount
+161 n64 umount2 sys_umount
+162 n64 swapon sys_swapon
+163 n64 swapoff sys_swapoff
+164 n64 reboot sys_reboot
+165 n64 sethostname sys_sethostname
+166 n64 setdomainname sys_setdomainname
+167 n64 create_module sys_ni_syscall
+168 n64 init_module sys_init_module
+169 n64 delete_module sys_delete_module
+170 n64 get_kernel_syms sys_ni_syscall
+171 n64 query_module sys_ni_syscall
+172 n64 quotactl sys_quotactl
+173 n64 nfsservctl sys_ni_syscall
+174 n64 getpmsg sys_ni_syscall
+175 n64 putpmsg sys_ni_syscall
+176 n64 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n64 reserved177 sys_ni_syscall
+178 n64 gettid sys_gettid
+179 n64 readahead sys_readahead
+180 n64 setxattr sys_setxattr
+181 n64 lsetxattr sys_lsetxattr
+182 n64 fsetxattr sys_fsetxattr
+183 n64 getxattr sys_getxattr
+184 n64 lgetxattr sys_lgetxattr
+185 n64 fgetxattr sys_fgetxattr
+186 n64 listxattr sys_listxattr
+187 n64 llistxattr sys_llistxattr
+188 n64 flistxattr sys_flistxattr
+189 n64 removexattr sys_removexattr
+190 n64 lremovexattr sys_lremovexattr
+191 n64 fremovexattr sys_fremovexattr
+192 n64 tkill sys_tkill
+193 n64 reserved193 sys_ni_syscall
+194 n64 futex sys_futex
+195 n64 sched_setaffinity sys_sched_setaffinity
+196 n64 sched_getaffinity sys_sched_getaffinity
+197 n64 cacheflush sys_cacheflush
+198 n64 cachectl sys_cachectl
+199 n64 sysmips __sys_sysmips
+200 n64 io_setup sys_io_setup
+201 n64 io_destroy sys_io_destroy
+202 n64 io_getevents sys_io_getevents
+203 n64 io_submit sys_io_submit
+204 n64 io_cancel sys_io_cancel
+205 n64 exit_group sys_exit_group
+206 n64 lookup_dcookie sys_lookup_dcookie
+207 n64 epoll_create sys_epoll_create
+208 n64 epoll_ctl sys_epoll_ctl
+209 n64 epoll_wait sys_epoll_wait
+210 n64 remap_file_pages sys_remap_file_pages
+211 n64 rt_sigreturn sys_rt_sigreturn
+212 n64 set_tid_address sys_set_tid_address
+213 n64 restart_syscall sys_restart_syscall
+214 n64 semtimedop sys_semtimedop
+215 n64 fadvise64 sys_fadvise64_64
+216 n64 timer_create sys_timer_create
+217 n64 timer_settime sys_timer_settime
+218 n64 timer_gettime sys_timer_gettime
+219 n64 timer_getoverrun sys_timer_getoverrun
+220 n64 timer_delete sys_timer_delete
+221 n64 clock_settime sys_clock_settime
+222 n64 clock_gettime sys_clock_gettime
+223 n64 clock_getres sys_clock_getres
+224 n64 clock_nanosleep sys_clock_nanosleep
+225 n64 tgkill sys_tgkill
+226 n64 utimes sys_utimes
+227 n64 mbind sys_mbind
+228 n64 get_mempolicy sys_get_mempolicy
+229 n64 set_mempolicy sys_set_mempolicy
+230 n64 mq_open sys_mq_open
+231 n64 mq_unlink sys_mq_unlink
+232 n64 mq_timedsend sys_mq_timedsend
+233 n64 mq_timedreceive sys_mq_timedreceive
+234 n64 mq_notify sys_mq_notify
+235 n64 mq_getsetattr sys_mq_getsetattr
+236 n64 vserver sys_ni_syscall
+237 n64 waitid sys_waitid
+# 238 was sys_setaltroot
+239 n64 add_key sys_add_key
+240 n64 request_key sys_request_key
+241 n64 keyctl sys_keyctl
+242 n64 set_thread_area sys_set_thread_area
+243 n64 inotify_init sys_inotify_init
+244 n64 inotify_add_watch sys_inotify_add_watch
+245 n64 inotify_rm_watch sys_inotify_rm_watch
+246 n64 migrate_pages sys_migrate_pages
+247 n64 openat sys_openat
+248 n64 mkdirat sys_mkdirat
+249 n64 mknodat sys_mknodat
+250 n64 fchownat sys_fchownat
+251 n64 futimesat sys_futimesat
+252 n64 newfstatat sys_newfstatat
+253 n64 unlinkat sys_unlinkat
+254 n64 renameat sys_renameat
+255 n64 linkat sys_linkat
+256 n64 symlinkat sys_symlinkat
+257 n64 readlinkat sys_readlinkat
+258 n64 fchmodat sys_fchmodat
+259 n64 faccessat sys_faccessat
+260 n64 pselect6 sys_pselect6
+261 n64 ppoll sys_ppoll
+262 n64 unshare sys_unshare
+263 n64 splice sys_splice
+264 n64 sync_file_range sys_sync_file_range
+265 n64 tee sys_tee
+266 n64 vmsplice sys_vmsplice
+267 n64 move_pages sys_move_pages
+268 n64 set_robust_list sys_set_robust_list
+269 n64 get_robust_list sys_get_robust_list
+270 n64 kexec_load sys_kexec_load
+271 n64 getcpu sys_getcpu
+272 n64 epoll_pwait sys_epoll_pwait
+273 n64 ioprio_set sys_ioprio_set
+274 n64 ioprio_get sys_ioprio_get
+275 n64 utimensat sys_utimensat
+276 n64 signalfd sys_signalfd
+277 n64 timerfd sys_ni_syscall
+278 n64 eventfd sys_eventfd
+279 n64 fallocate sys_fallocate
+280 n64 timerfd_create sys_timerfd_create
+281 n64 timerfd_gettime sys_timerfd_gettime
+282 n64 timerfd_settime sys_timerfd_settime
+283 n64 signalfd4 sys_signalfd4
+284 n64 eventfd2 sys_eventfd2
+285 n64 epoll_create1 sys_epoll_create1
+286 n64 dup3 sys_dup3
+287 n64 pipe2 sys_pipe2
+288 n64 inotify_init1 sys_inotify_init1
+289 n64 preadv sys_preadv
+290 n64 pwritev sys_pwritev
+291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+292 n64 perf_event_open sys_perf_event_open
+293 n64 accept4 sys_accept4
+294 n64 recvmmsg sys_recvmmsg
+295 n64 fanotify_init sys_fanotify_init
+296 n64 fanotify_mark sys_fanotify_mark
+297 n64 prlimit64 sys_prlimit64
+298 n64 name_to_handle_at sys_name_to_handle_at
+299 n64 open_by_handle_at sys_open_by_handle_at
+300 n64 clock_adjtime sys_clock_adjtime
+301 n64 syncfs sys_syncfs
+302 n64 sendmmsg sys_sendmmsg
+303 n64 setns sys_setns
+304 n64 process_vm_readv sys_process_vm_readv
+305 n64 process_vm_writev sys_process_vm_writev
+306 n64 kcmp sys_kcmp
+307 n64 finit_module sys_finit_module
+308 n64 getdents64 sys_getdents64
+309 n64 sched_setattr sys_sched_setattr
+310 n64 sched_getattr sys_sched_getattr
+311 n64 renameat2 sys_renameat2
+312 n64 seccomp sys_seccomp
+313 n64 getrandom sys_getrandom
+314 n64 memfd_create sys_memfd_create
+315 n64 bpf sys_bpf
+316 n64 execveat sys_execveat
+317 n64 userfaultfd sys_userfaultfd
+318 n64 membarrier sys_membarrier
+319 n64 mlock2 sys_mlock2
+320 n64 copy_file_range sys_copy_file_range
+321 n64 preadv2 sys_preadv2
+322 n64 pwritev2 sys_pwritev2
+323 n64 pkey_mprotect sys_pkey_mprotect
+324 n64 pkey_alloc sys_pkey_alloc
+325 n64 pkey_free sys_pkey_free
+326 n64 statx sys_statx
+327 n64 rseq sys_rseq
+328 n64 io_pgetevents sys_io_pgetevents
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
new file mode 100644
index 000000000000..3d5a47b80d2b
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -0,0 +1,382 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> is always "o32" for this file.
+#
+0 o32 syscall sys_syscall sys32_syscall
+1 o32 exit sys_exit
+2 o32 fork __sys_fork
+3 o32 read sys_read
+4 o32 write sys_write
+5 o32 open sys_open compat_sys_open
+6 o32 close sys_close
+7 o32 waitpid sys_waitpid
+8 o32 creat sys_creat
+9 o32 link sys_link
+10 o32 unlink sys_unlink
+11 o32 execve sys_execve compat_sys_execve
+12 o32 chdir sys_chdir
+13 o32 time sys_time compat_sys_time
+14 o32 mknod sys_mknod
+15 o32 chmod sys_chmod
+16 o32 lchown sys_lchown
+17 o32 break sys_ni_syscall
+# 18 was sys_stat
+18 o32 unused18 sys_ni_syscall
+19 o32 lseek sys_lseek
+20 o32 getpid sys_getpid
+21 o32 mount sys_mount compat_sys_mount
+22 o32 umount sys_oldumount
+23 o32 setuid sys_setuid
+24 o32 getuid sys_getuid
+25 o32 stime sys_stime compat_sys_stime
+26 o32 ptrace sys_ptrace compat_sys_ptrace
+27 o32 alarm sys_alarm
+# 28 was sys_fstat
+28 o32 unused28 sys_ni_syscall
+29 o32 pause sys_pause
+30 o32 utime sys_utime compat_sys_utime
+31 o32 stty sys_ni_syscall
+32 o32 gtty sys_ni_syscall
+33 o32 access sys_access
+34 o32 nice sys_nice
+35 o32 ftime sys_ni_syscall
+36 o32 sync sys_sync
+37 o32 kill sys_kill
+38 o32 rename sys_rename
+39 o32 mkdir sys_mkdir
+40 o32 rmdir sys_rmdir
+41 o32 dup sys_dup
+42 o32 pipe sysm_pipe
+43 o32 times sys_times compat_sys_times
+44 o32 prof sys_ni_syscall
+45 o32 brk sys_brk
+46 o32 setgid sys_setgid
+47 o32 getgid sys_getgid
+48 o32 signal sys_ni_syscall
+49 o32 geteuid sys_geteuid
+50 o32 getegid sys_getegid
+51 o32 acct sys_acct
+52 o32 umount2 sys_umount
+53 o32 lock sys_ni_syscall
+54 o32 ioctl sys_ioctl compat_sys_ioctl
+55 o32 fcntl sys_fcntl compat_sys_fcntl
+56 o32 mpx sys_ni_syscall
+57 o32 setpgid sys_setpgid
+58 o32 ulimit sys_ni_syscall
+59 o32 unused59 sys_olduname
+60 o32 umask sys_umask
+61 o32 chroot sys_chroot
+62 o32 ustat sys_ustat compat_sys_ustat
+63 o32 dup2 sys_dup2
+64 o32 getppid sys_getppid
+65 o32 getpgrp sys_getpgrp
+66 o32 setsid sys_setsid
+67 o32 sigaction sys_sigaction sys_32_sigaction
+68 o32 sgetmask sys_sgetmask
+69 o32 ssetmask sys_ssetmask
+70 o32 setreuid sys_setreuid
+71 o32 setregid sys_setregid
+72 o32 sigsuspend sys_sigsuspend sys32_sigsuspend
+73 o32 sigpending sys_sigpending compat_sys_sigpending
+74 o32 sethostname sys_sethostname
+75 o32 setrlimit sys_setrlimit compat_sys_setrlimit
+76 o32 getrlimit sys_getrlimit compat_sys_getrlimit
+77 o32 getrusage sys_getrusage compat_sys_getrusage
+78 o32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 o32 settimeofday sys_settimeofday compat_sys_settimeofday
+80 o32 getgroups sys_getgroups
+81 o32 setgroups sys_setgroups
+# 82 was old_select
+82 o32 reserved82 sys_ni_syscall
+83 o32 symlink sys_symlink
+# 84 was sys_lstat
+84 o32 unused84 sys_ni_syscall
+85 o32 readlink sys_readlink
+86 o32 uselib sys_uselib
+87 o32 swapon sys_swapon
+88 o32 reboot sys_reboot
+89 o32 readdir sys_old_readdir compat_sys_old_readdir
+90 o32 mmap sys_mips_mmap
+91 o32 munmap sys_munmap
+92 o32 truncate sys_truncate compat_sys_truncate
+93 o32 ftruncate sys_ftruncate compat_sys_ftruncate
+94 o32 fchmod sys_fchmod
+95 o32 fchown sys_fchown
+96 o32 getpriority sys_getpriority
+97 o32 setpriority sys_setpriority
+98 o32 profil sys_ni_syscall
+99 o32 statfs sys_statfs compat_sys_statfs
+100 o32 fstatfs sys_fstatfs compat_sys_fstatfs
+101 o32 ioperm sys_ni_syscall
+102 o32 socketcall sys_socketcall compat_sys_socketcall
+103 o32 syslog sys_syslog
+104 o32 setitimer sys_setitimer compat_sys_setitimer
+105 o32 getitimer sys_getitimer compat_sys_getitimer
+106 o32 stat sys_newstat compat_sys_newstat
+107 o32 lstat sys_newlstat compat_sys_newlstat
+108 o32 fstat sys_newfstat compat_sys_newfstat
+109 o32 unused109 sys_uname
+110 o32 iopl sys_ni_syscall
+111 o32 vhangup sys_vhangup
+112 o32 idle sys_ni_syscall
+113 o32 vm86 sys_ni_syscall
+114 o32 wait4 sys_wait4 compat_sys_wait4
+115 o32 swapoff sys_swapoff
+116 o32 sysinfo sys_sysinfo compat_sys_sysinfo
+117 o32 ipc sys_ipc compat_sys_ipc
+118 o32 fsync sys_fsync
+119 o32 sigreturn sys_sigreturn sys32_sigreturn
+120 o32 clone __sys_clone
+121 o32 setdomainname sys_setdomainname
+122 o32 uname sys_newuname
+123 o32 modify_ldt sys_ni_syscall
+124 o32 adjtimex sys_adjtimex compat_sys_adjtimex
+125 o32 mprotect sys_mprotect
+126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+127 o32 create_module sys_ni_syscall
+128 o32 init_module sys_init_module
+129 o32 delete_module sys_delete_module
+130 o32 get_kernel_syms sys_ni_syscall
+131 o32 quotactl sys_quotactl
+132 o32 getpgid sys_getpgid
+133 o32 fchdir sys_fchdir
+134 o32 bdflush sys_bdflush
+135 o32 sysfs sys_sysfs
+136 o32 personality sys_personality sys_32_personality
+137 o32 afs_syscall sys_ni_syscall
+138 o32 setfsuid sys_setfsuid
+139 o32 setfsgid sys_setfsgid
+140 o32 _llseek sys_llseek sys_32_llseek
+141 o32 getdents sys_getdents compat_sys_getdents
+142 o32 _newselect sys_select compat_sys_select
+143 o32 flock sys_flock
+144 o32 msync sys_msync
+145 o32 readv sys_readv compat_sys_readv
+146 o32 writev sys_writev compat_sys_writev
+147 o32 cacheflush sys_cacheflush
+148 o32 cachectl sys_cachectl
+149 o32 sysmips __sys_sysmips
+150 o32 unused150 sys_ni_syscall
+151 o32 getsid sys_getsid
+152 o32 fdatasync sys_fdatasync
+153 o32 _sysctl sys_sysctl compat_sys_sysctl
+154 o32 mlock sys_mlock
+155 o32 munlock sys_munlock
+156 o32 mlockall sys_mlockall
+157 o32 munlockall sys_munlockall
+158 o32 sched_setparam sys_sched_setparam
+159 o32 sched_getparam sys_sched_getparam
+160 o32 sched_setscheduler sys_sched_setscheduler
+161 o32 sched_getscheduler sys_sched_getscheduler
+162 o32 sched_yield sys_sched_yield
+163 o32 sched_get_priority_max sys_sched_get_priority_max
+164 o32 sched_get_priority_min sys_sched_get_priority_min
+165 o32 sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+166 o32 nanosleep sys_nanosleep compat_sys_nanosleep
+167 o32 mremap sys_mremap
+168 o32 accept sys_accept
+169 o32 bind sys_bind
+170 o32 connect sys_connect
+171 o32 getpeername sys_getpeername
+172 o32 getsockname sys_getsockname
+173 o32 getsockopt sys_getsockopt compat_sys_getsockopt
+174 o32 listen sys_listen
+175 o32 recv sys_recv compat_sys_recv
+176 o32 recvfrom sys_recvfrom compat_sys_recvfrom
+177 o32 recvmsg sys_recvmsg compat_sys_recvmsg
+178 o32 send sys_send
+179 o32 sendmsg sys_sendmsg compat_sys_sendmsg
+180 o32 sendto sys_sendto
+181 o32 setsockopt sys_setsockopt compat_sys_setsockopt
+182 o32 shutdown sys_shutdown
+183 o32 socket sys_socket
+184 o32 socketpair sys_socketpair
+185 o32 setresuid sys_setresuid
+186 o32 getresuid sys_getresuid
+187 o32 query_module sys_ni_syscall
+188 o32 poll sys_poll
+189 o32 nfsservctl sys_ni_syscall
+190 o32 setresgid sys_setresgid
+191 o32 getresgid sys_getresgid
+192 o32 prctl sys_prctl
+193 o32 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+197 o32 rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+200 o32 pread64 sys_pread64 sys_32_pread
+201 o32 pwrite64 sys_pwrite64 sys_32_pwrite
+202 o32 chown sys_chown
+203 o32 getcwd sys_getcwd
+204 o32 capget sys_capget
+205 o32 capset sys_capset
+206 o32 sigaltstack sys_sigaltstack compat_sys_sigaltstack
+207 o32 sendfile sys_sendfile compat_sys_sendfile
+208 o32 getpmsg sys_ni_syscall
+209 o32 putpmsg sys_ni_syscall
+210 o32 mmap2 sys_mips_mmap2
+211 o32 truncate64 sys_truncate64 sys_32_truncate64
+212 o32 ftruncate64 sys_ftruncate64 sys_32_ftruncate64
+213 o32 stat64 sys_stat64 sys_newstat
+214 o32 lstat64 sys_lstat64 sys_newlstat
+215 o32 fstat64 sys_fstat64 sys_newfstat
+216 o32 pivot_root sys_pivot_root
+217 o32 mincore sys_mincore
+218 o32 madvise sys_madvise
+219 o32 getdents64 sys_getdents64
+220 o32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+221 o32 reserved221 sys_ni_syscall
+222 o32 gettid sys_gettid
+223 o32 readahead sys_readahead sys32_readahead
+224 o32 setxattr sys_setxattr
+225 o32 lsetxattr sys_lsetxattr
+226 o32 fsetxattr sys_fsetxattr
+227 o32 getxattr sys_getxattr
+228 o32 lgetxattr sys_lgetxattr
+229 o32 fgetxattr sys_fgetxattr
+230 o32 listxattr sys_listxattr
+231 o32 llistxattr sys_llistxattr
+232 o32 flistxattr sys_flistxattr
+233 o32 removexattr sys_removexattr
+234 o32 lremovexattr sys_lremovexattr
+235 o32 fremovexattr sys_fremovexattr
+236 o32 tkill sys_tkill
+237 o32 sendfile64 sys_sendfile64
+238 o32 futex sys_futex compat_sys_futex
+239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+241 o32 io_setup sys_io_setup compat_sys_io_setup
+242 o32 io_destroy sys_io_destroy
+243 o32 io_getevents sys_io_getevents compat_sys_io_getevents
+244 o32 io_submit sys_io_submit compat_sys_io_submit
+245 o32 io_cancel sys_io_cancel
+246 o32 exit_group sys_exit_group
+247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+248 o32 epoll_create sys_epoll_create
+249 o32 epoll_ctl sys_epoll_ctl
+250 o32 epoll_wait sys_epoll_wait
+251 o32 remap_file_pages sys_remap_file_pages
+252 o32 set_tid_address sys_set_tid_address
+253 o32 restart_syscall sys_restart_syscall
+254 o32 fadvise64 sys_fadvise64_64 sys32_fadvise64_64
+255 o32 statfs64 sys_statfs64 compat_sys_statfs64
+256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+257 o32 timer_create sys_timer_create compat_sys_timer_create
+258 o32 timer_settime sys_timer_settime compat_sys_timer_settime
+259 o32 timer_gettime sys_timer_gettime compat_sys_timer_gettime
+260 o32 timer_getoverrun sys_timer_getoverrun
+261 o32 timer_delete sys_timer_delete
+262 o32 clock_settime sys_clock_settime compat_sys_clock_settime
+263 o32 clock_gettime sys_clock_gettime compat_sys_clock_gettime
+264 o32 clock_getres sys_clock_getres compat_sys_clock_getres
+265 o32 clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+266 o32 tgkill sys_tgkill
+267 o32 utimes sys_utimes compat_sys_utimes
+268 o32 mbind sys_mbind compat_sys_mbind
+269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+271 o32 mq_open sys_mq_open compat_sys_mq_open
+272 o32 mq_unlink sys_mq_unlink
+273 o32 mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+274 o32 mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+275 o32 mq_notify sys_mq_notify compat_sys_mq_notify
+276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+277 o32 vserver sys_ni_syscall
+278 o32 waitid sys_waitid compat_sys_waitid
+# 279 was sys_setaltroot
+280 o32 add_key sys_add_key
+281 o32 request_key sys_request_key
+282 o32 keyctl sys_keyctl compat_sys_keyctl
+283 o32 set_thread_area sys_set_thread_area
+284 o32 inotify_init sys_inotify_init
+285 o32 inotify_add_watch sys_inotify_add_watch
+286 o32 inotify_rm_watch sys_inotify_rm_watch
+287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages
+288 o32 openat sys_openat compat_sys_openat
+289 o32 mkdirat sys_mkdirat
+290 o32 mknodat sys_mknodat
+291 o32 fchownat sys_fchownat
+292 o32 futimesat sys_futimesat compat_sys_futimesat
+293 o32 fstatat64 sys_fstatat64 sys_newfstatat
+294 o32 unlinkat sys_unlinkat
+295 o32 renameat sys_renameat
+296 o32 linkat sys_linkat
+297 o32 symlinkat sys_symlinkat
+298 o32 readlinkat sys_readlinkat
+299 o32 fchmodat sys_fchmodat
+300 o32 faccessat sys_faccessat
+301 o32 pselect6 sys_pselect6 compat_sys_pselect6
+302 o32 ppoll sys_ppoll compat_sys_ppoll
+303 o32 unshare sys_unshare
+304 o32 splice sys_splice
+305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
+306 o32 tee sys_tee
+307 o32 vmsplice sys_vmsplice compat_sys_vmsplice
+308 o32 move_pages sys_move_pages compat_sys_move_pages
+309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
+310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
+311 o32 kexec_load sys_kexec_load compat_sys_kexec_load
+312 o32 getcpu sys_getcpu
+313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+314 o32 ioprio_set sys_ioprio_set
+315 o32 ioprio_get sys_ioprio_get
+316 o32 utimensat sys_utimensat compat_sys_utimensat
+317 o32 signalfd sys_signalfd compat_sys_signalfd
+318 o32 timerfd sys_ni_syscall
+319 o32 eventfd sys_eventfd
+320 o32 fallocate sys_fallocate sys32_fallocate
+321 o32 timerfd_create sys_timerfd_create
+322 o32 timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+323 o32 timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4
+325 o32 eventfd2 sys_eventfd2
+326 o32 epoll_create1 sys_epoll_create1
+327 o32 dup3 sys_dup3
+328 o32 pipe2 sys_pipe2
+329 o32 inotify_init1 sys_inotify_init1
+330 o32 preadv sys_preadv compat_sys_preadv
+331 o32 pwritev sys_pwritev compat_sys_pwritev
+332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+333 o32 perf_event_open sys_perf_event_open
+334 o32 accept4 sys_accept4
+335 o32 recvmmsg sys_recvmmsg compat_sys_recvmmsg
+336 o32 fanotify_init sys_fanotify_init
+337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+338 o32 prlimit64 sys_prlimit64
+339 o32 name_to_handle_at sys_name_to_handle_at
+340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+341 o32 clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+342 o32 syncfs sys_syncfs
+343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg
+344 o32 setns sys_setns
+345 o32 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+346 o32 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+347 o32 kcmp sys_kcmp
+348 o32 finit_module sys_finit_module
+349 o32 sched_setattr sys_sched_setattr
+350 o32 sched_getattr sys_sched_getattr
+351 o32 renameat2 sys_renameat2
+352 o32 seccomp sys_seccomp
+353 o32 getrandom sys_getrandom
+354 o32 memfd_create sys_memfd_create
+355 o32 bpf sys_bpf
+356 o32 execveat sys_execveat compat_sys_execveat
+357 o32 userfaultfd sys_userfaultfd
+358 o32 membarrier sys_membarrier
+359 o32 mlock2 sys_mlock2
+360 o32 copy_file_range sys_copy_file_range
+361 o32 preadv2 sys_preadv2 compat_sys_preadv2
+362 o32 pwritev2 sys_pwritev2 compat_sys_pwritev2
+363 o32 pkey_mprotect sys_pkey_mprotect
+364 o32 pkey_alloc sys_pkey_alloc
+365 o32 pkey_free sys_pkey_free
+366 o32 statx sys_statx
+367 o32 rseq sys_rseq
+368 o32 io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..d2bcfa8f4d1a
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+ printf "\n"
+) > "$out"
diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh
new file mode 100644
index 000000000000..60bbdb3fe03a
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscallnr.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ nxt=$((nr+1))
+ done
+
+ printf "#define __NR_%s_Linux\t%s\n" "${prefix}" "${offset}"
+ printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+ printf "\n"
+) > "$out"
diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..acd338d33bbe
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry compat ; do
+ if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then
+ emit $((nxt+offset)) $((nr+offset)) $compat
+ else
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ fi
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 15e103c6d799..c91097f7b32f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -50,6 +50,7 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/idle.h>
+#include <asm/isa-rev.h>
#include <asm/mips-cps.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/mipsregs.h>
@@ -277,8 +278,10 @@ static void __show_regs(const struct pt_regs *regs)
#ifdef CONFIG_CPU_HAS_SMARTMIPS
printk("Acx : %0*lx\n", field, regs->acx);
#endif
- printk("Hi : %0*lx\n", field, regs->hi);
- printk("Lo : %0*lx\n", field, regs->lo);
+ if (MIPS_ISA_REV < 6) {
+ printk("Hi : %0*lx\n", field, regs->hi);
+ printk("Lo : %0*lx\n", field, regs->lo);
+ }
/*
* Saved cp0 registers
@@ -706,6 +709,8 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
/*
* Send SIGFPE according to FCSR Cause bits, which must have already
* been masked against Enable bits. This is impotant as Inexact can
@@ -794,9 +799,6 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
- /* Save the FP context to struct thread_struct */
- lose_fpu(1);
-
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
@@ -848,8 +850,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
- /* Ensure 'resume' not overwrite saved fp context again. */
- lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
@@ -876,6 +876,45 @@ out:
exception_exit(prev_state);
}
+/*
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
+ * emulated more than some threshold number of instructions, force migration to
+ * a "CPU" that has FP support.
+ */
+static void mt_ase_fp_affinity(void)
+{
+#ifdef CONFIG_MIPS_MT_FPAFF
+ if (mt_fpemul_threshold > 0 &&
+ ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
+ /*
+ * If there's no FPU present, or if the application has already
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+ if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ current->thread.user_cpus_allowed
+ = current->cpus_allowed;
+ cpumask_and(&tmask, &current->cpus_allowed,
+ &mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
+ set_thread_flag(TIF_FPUBOUND);
+ }
+ }
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ return -1;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
{
@@ -1160,35 +1199,6 @@ out:
}
/*
- * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
- * emulated more than some threshold number of instructions, force migration to
- * a "CPU" that has FP support.
- */
-static void mt_ase_fp_affinity(void)
-{
-#ifdef CONFIG_MIPS_MT_FPAFF
- if (mt_fpemul_threshold > 0 &&
- ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
- /*
- * If there's no FPU present, or if the application has already
- * restricted the allowed set to exclude any CPUs with FPUs,
- * we'll skip the procedure.
- */
- if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
- cpumask_t tmask;
-
- current->thread.user_cpus_allowed
- = current->cpus_allowed;
- cpumask_and(&tmask, &current->cpus_allowed,
- &mt_fpu_cpumask);
- set_cpus_allowed_ptr(current, &tmask);
- set_thread_flag(TIF_FPUBOUND);
- }
- }
-#endif /* CONFIG_MIPS_MT_FPAFF */
-}
-
-/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(cu2_chain);
@@ -1215,23 +1225,25 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner, prior_msa;
+ bool first_fp;
+
+ /* Initialize context if it hasn't been used already */
+ first_fp = init_fp_ctx(current);
- if (!used_math()) {
- /* First time FP context user. */
+ if (first_fp) {
preempt_disable();
- err = init_fpu();
+ err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
- init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
preempt_enable();
- if (!err)
- set_used_math();
return err;
}
@@ -1322,17 +1334,23 @@ out:
return 0;
}
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int enable_restore_fp_context(int msa)
+{
+ return SIGILL;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
enum ctx_state prev_state;
unsigned int __user *epc;
unsigned long old_epc, old31;
- void __user *fault_addr;
unsigned int opcode;
- unsigned long fcr31;
unsigned int cpid;
- int status, err;
- int sig;
+ int status;
prev_state = exception_enter();
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
@@ -1370,6 +1388,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case 3:
/*
* The COP3 opcode space and consequently the CP0.Status.CU3
@@ -1389,7 +1408,11 @@ asmlinkage void do_cpu(struct pt_regs *regs)
}
/* Fall through. */
- case 1:
+ case 1: {
+ void __user *fault_addr;
+ unsigned long fcr31;
+ int err, sig;
+
err = enable_restore_fp_context(0);
if (raw_cpu_has_fpu && !err)
@@ -1410,6 +1433,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
mt_ase_fp_affinity();
break;
+ }
+#else /* CONFIG_MIPS_FP_SUPPORT */
+ case 1:
+ case 3:
+ force_sig(SIGILL, current);
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index ce446eed62d2..c60e7719ef77 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -882,18 +882,12 @@ do { \
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
+ unsigned long origpc, orig31, value;
union mips_instruction insn;
- unsigned long value;
- unsigned int res, preempted;
- unsigned long origpc;
- unsigned long orig31;
- void __user *fault_addr = NULL;
+ unsigned int res;
#ifdef CONFIG_EVA
mm_segment_t seg;
#endif
- union fpureg *fpr;
- enum msa_2b_fmt df;
- unsigned int wd;
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
@@ -1212,15 +1206,18 @@ static void emulate_load_store_insn(struct pt_regs *regs,
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
- case cop1x_op:
+ case cop1x_op: {
+ void __user *fault_addr = NULL;
+
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
- lose_fpu(1); /* Save FPU state for the emulator. */
res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
own_fpu(1); /* Restore FPU state. */
@@ -1231,8 +1228,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (res == 0)
break;
return;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ case msa_op: {
+ unsigned int wd, preempted;
+ enum msa_2b_fmt df;
+ union fpureg *fpr;
- case msa_op:
if (!cpu_has_msa)
goto sigill;
@@ -1309,6 +1314,8 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
break;
+ }
+#endif /* CONFIG_CPU_HAS_MSA */
#ifndef CONFIG_CPU_MIPSR6
/*
@@ -1393,7 +1400,6 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
unsigned long origpc, contpc;
union mips_instruction insn;
struct mm_decoded_insn mminsn;
- void __user *fault_addr = NULL;
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
@@ -1709,6 +1715,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
/* LL,SC,LLD,SCD are not serviced */
goto sigbus;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case mm_pool32f_op:
switch (insn.mm_x_format.func) {
case mm_lwxc1_func:
@@ -1723,7 +1730,9 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
case mm_ldc132_op:
case mm_sdc132_op:
case mm_lwc132_op:
- case mm_swc132_op:
+ case mm_swc132_op: {
+ void __user *fault_addr = NULL;
+
fpu_emul:
/* roll back jump/branch */
regs->cp0_epc = origpc;
@@ -1733,7 +1742,6 @@ fpu_emul:
BUG_ON(!used_math());
BUG_ON(!is_fpu_owner());
- lose_fpu(1); /* save the FPU state for the emulator */
res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
own_fpu(1); /* restore FPU state */
@@ -1744,6 +1752,8 @@ fpu_emul:
if (res == 0)
goto success;
return;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
case mm_lh32_op:
reg = insn.mm_i_format.rt;
@@ -2338,7 +2348,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
set_fs(seg);
return;
- }
+ }
goto sigbus;
}
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 48a9c6b90e07..9df3ebdc7b0f 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map delay slot emulation page */
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
- VM_READ|VM_WRITE|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
0, NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 971a504001c2..cb7e9ed7a453 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -72,7 +72,7 @@ SECTIONS
/* Exception table for data bus errors */
__dbe_table : {
__start___dbe_table = .;
- *(__dbe_table)
+ KEEP(*(__dbe_table))
__stop___dbe_table = .;
}
@@ -123,7 +123,7 @@ SECTIONS
. = ALIGN(4);
.mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
__mips_machines_start = .;
- *(.mips.machines.init)
+ KEEP(*(.mips.machines.init))
__mips_machines_end = .;
}
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index 0e61a5b7647f..ba73b4077668 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -27,12 +27,15 @@ void mips_install_watch_registers(struct task_struct *t)
case 4:
write_c0_watchlo3(watches->watchlo[3]);
write_c0_watchhi3(watchhi | watches->watchhi[3]);
+ /* fall through */
case 3:
write_c0_watchlo2(watches->watchlo[2]);
write_c0_watchhi2(watchhi | watches->watchhi[2]);
+ /* fall through */
case 2:
write_c0_watchlo1(watches->watchlo[1]);
write_c0_watchhi1(watchhi | watches->watchhi[1]);
+ /* fall through */
case 1:
write_c0_watchlo0(watches->watchlo[0]);
write_c0_watchhi0(watchhi | watches->watchhi[0]);
@@ -55,10 +58,13 @@ void mips_read_watch_registers(void)
BUG();
case 4:
watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask);
+ /* fall through */
case 3:
watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask);
+ /* fall through */
case 2:
watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask);
+ /* fall through */
case 1:
watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask);
}
@@ -85,18 +91,25 @@ void mips_clear_watch_registers(void)
BUG();
case 8:
write_c0_watchlo7(0);
+ /* fall through */
case 7:
write_c0_watchlo6(0);
+ /* fall through */
case 6:
write_c0_watchlo5(0);
+ /* fall through */
case 5:
write_c0_watchlo4(0);
+ /* fall through */
case 4:
write_c0_watchlo3(0);
+ /* fall through */
case 3:
write_c0_watchlo2(0);
+ /* fall through */
case 2:
write_c0_watchlo1(0);
+ /* fall through */
case 1:
write_c0_watchlo0(0);
}
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 76b93a9c8c9b..760aec70dce5 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -18,6 +18,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
+ depends on MIPS_FP_SUPPORT
select EXPORT_UASM
select PREEMPT_NOTIFIERS
select ANON_INODES
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 1fcc4d149054..3734cd58895e 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1004,14 +1004,37 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- bool is_dirty = false;
+ bool flush = false;
int r;
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log_protect(kvm, log, &flush);
- if (is_dirty) {
+ if (flush) {
+ slots = kvm_memslots(kvm);
+ memslot = id_to_memslot(slots, log->slot);
+
+ /* Let implementation handle TLB/GVA invalidation */
+ kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
+ }
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ bool flush = false;
+ int r;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_clear_dirty_log_protect(kvm, log, &flush);
+
+ if (flush) {
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index d8dcdb350405..97e538a8c1be 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -551,7 +551,7 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
(pte_dirty(old_pte) && !pte_dirty(hva_pte));
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
unsigned long end = hva + PAGE_SIZE;
int ret;
@@ -559,6 +559,7 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
if (ret)
kvm_mips_callbacks->flush_shadow_all(kvm);
+ return 0;
}
static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 8f68ee02a8c2..72e5f8fb2b35 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -197,7 +197,8 @@ void __init prom_init_env(void)
cpu_clock_freq = 797000000;
break;
case PRID_REV_LOONGSON3A_R1:
- case PRID_REV_LOONGSON3A_R2:
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
cpu_clock_freq = 900000000;
diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c
index 621d6af5f6eb..9efdfe430ff0 100644
--- a/arch/mips/loongson64/loongson-3/cop2-ex.c
+++ b/arch/mips/loongson64/loongson-3/cop2-ex.c
@@ -43,11 +43,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action,
/* If FPU is owned, we needn't init or restore fp */
if (!fpu_owned) {
set_thread_flag(TIF_USEDFPU);
- if (!used_math()) {
- _init_fpu(current->thread.fpu.fcr31);
- set_used_math();
- } else
- _restore_fp(current);
+ init_fp_ctx(current);
+ _restore_fp(current);
}
preempt_enable();
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index b5c1e0aa955e..8fba0aa48bf4 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -682,7 +682,8 @@ void play_dead(void)
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
break;
- case PRID_REV_LOONGSON3A_R2:
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
case PRID_REV_LOONGSON3A_R3_0:
case PRID_REV_LOONGSON3A_R3_1:
play_dead_at_ckseg1 =
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 62deb025970b..82e2993c1a2c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2831,6 +2831,13 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
u16 *instr_ptr;
int sig = 0;
+ /*
+ * Initialize context if it hasn't been used already, otherwise ensure
+ * it has been saved to struct thread_struct.
+ */
+ if (!init_fp_ctx(current))
+ lose_fpu(1);
+
oldepc = xcp->cp0_epc;
do {
prevepc = xcp->cp0_epc;
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 5450f4d1c920..e2d46cb93ca9 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
- struct emuframe __user *fr;
- int err, fr_idx;
+ unsigned long fr_uaddr;
+ struct emuframe fr;
+ int fr_idx, ret;
/* NOP is easy */
if (ir == 0)
@@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
fr_idx = alloc_emuframe();
if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
- fr = &dsemul_page()[fr_idx];
/* Retrieve the appropriately encoded break instruction */
break_math = BREAK_MATH(isa16);
/* Write the instructions to the frame */
if (isa16) {
- err = __put_user(ir >> 16,
- (u16 __user *)(&fr->emul));
- err |= __put_user(ir & 0xffff,
- (u16 __user *)((long)(&fr->emul) + 2));
- err |= __put_user(break_math >> 16,
- (u16 __user *)(&fr->badinst));
- err |= __put_user(break_math & 0xffff,
- (u16 __user *)((long)(&fr->badinst) + 2));
+ union mips_instruction _emul = {
+ .halfword = { ir >> 16, ir }
+ };
+ union mips_instruction _badinst = {
+ .halfword = { break_math >> 16, break_math }
+ };
+
+ fr.emul = _emul.word;
+ fr.badinst = _badinst.word;
} else {
- err = __put_user(ir, &fr->emul);
- err |= __put_user(break_math, &fr->badinst);
+ fr.emul = ir;
+ fr.badinst = break_math;
}
- if (unlikely(err)) {
+ /* Write the frame to user memory */
+ fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
+ ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
+ FOLL_FORCE | FOLL_WRITE);
+ if (unlikely(ret != sizeof(fr))) {
MIPS_FPU_EMU_INC_STATS(errors);
free_emuframe(fr_idx, current->mm);
return SIGBUS;
@@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
atomic_set(&current->thread.bd_emu_frame, fr_idx);
/* Change user register context to execute the frame */
- regs->cp0_epc = (unsigned long)&fr->emul | isa16;
-
- /* Ensure the icache observes our newly written frame */
- flush_cache_sigtramp((unsigned long)&fr->emul);
+ regs->cp0_epc = fr_uaddr | isa16;
return 0;
}
diff --git a/arch/mips/math-emu/me-debugfs.c b/arch/mips/math-emu/me-debugfs.c
index 62566385ce0e..58798f527356 100644
--- a/arch/mips/math-emu/me-debugfs.c
+++ b/arch/mips/math-emu/me-debugfs.c
@@ -183,17 +183,7 @@ static int fpuemustats_clear_show(struct seq_file *s, void *unused)
return 0;
}
-static int fpuemustats_clear_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fpuemustats_clear_show, inode->i_private);
-}
-
-static const struct file_operations fpuemustats_clear_fops = {
- .open = fpuemustats_clear_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fpuemustats_clear);
static int __init debugfs_fpuemu(void)
{
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 3466fcdae0ca..01848cdf2074 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
pmd_t *pmdp;
pte_t *ptep;
- pr_debug("cpage[%08lx,%08lx]\n",
+ pr_debug("cpage[%08llx,%08lx]\n",
cpu_context(smp_processor_id(), mm), addr);
/* No ASID => no such page in the cache. */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 05bd77727fb9..d0b64df51eb2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
r4k_blast_scache = blast_scache128;
}
+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache_node = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_node = blast_scache16_node;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_node = blast_scache32_node;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_node = blast_scache64_node;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_node = blast_scache128_node;
+}
+
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
- case CPU_LOONGSON3:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;
+ case CPU_LOONGSON3:
+ /* Use get_ebase_cpunum() for both NUMA=y/n */
+ r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+ break;
+
case CPU_BMIPS5000:
r4k_blast_scache();
__sync();
@@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
blast_scache_range(addr, addr + size);
+ }
preempt_enable();
__sync();
return;
@@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else {
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
@@ -1251,6 +1280,7 @@ static void probe_pcache(void)
case CPU_VR4133:
write_c0_config(config & ~VR41_CONF_P4K);
+ /* fall through */
case CPU_VR4131:
/* Workaround for cache instruction bug of VR4131 */
if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
@@ -1352,7 +1382,7 @@ static void probe_pcache(void)
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = 0;
- if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+ if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2_0)
c->options |= MIPS_CPU_PREFETCH;
break;
@@ -1498,6 +1528,7 @@ static void probe_pcache(void)
c->dcache.flags |= MIPS_CACHE_PINDEX;
break;
}
+ /* fall through */
default:
if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE)
c->dcache.flags |= MIPS_CACHE_ALIASES;
@@ -1918,6 +1949,7 @@ void r4k_cache_init(void)
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
+ r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
r4k_blast_dcache_user_page_setup();
r4k_blast_icache_user_page_setup();
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 067714291643..37b1cb246332 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -576,6 +576,7 @@ void build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R5500:
if (m4kc_tlbp_war())
uasm_i_nop(p);
+ /* fall through */
case CPU_ALCHEMY:
tlbw(p);
break;
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 24e5b0d06899..75ef90486fe6 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -104,6 +104,7 @@ static const struct insn insn_table_MM[insn_invalid] = {
[insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM},
[insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD},
[insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD},
+ [insn_srav] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srav_op), RT | RS | RD},
[insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD},
[insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD},
[insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD},
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 60ceb93c71a0..6abe40fc413d 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -171,6 +171,7 @@ static const struct insn insn_table[insn_invalid] = {
[insn_sltiu] = {M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM},
[insn_sltu] = {M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD},
[insn_sra] = {M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE},
+ [insn_srav] = {M(spec_op, 0, 0, 0, 0, srav_op), RS | RT | RD},
[insn_srl] = {M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE},
[insn_srlv] = {M(spec_op, 0, 0, 0, 0, srlv_op), RS | RT | RD},
[insn_subu] = {M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD},
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 57570c0649b4..45b6264ff308 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -61,10 +61,10 @@ enum opcode {
insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_multu, insn_nor,
insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sb,
insn_sc, insn_scd, insn_sd, insn_sh, insn_sll, insn_sllv,
- insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srl,
- insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
- insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
- insn_xori, insn_yield,
+ insn_slt, insn_slti, insn_sltiu, insn_sltu, insn_sra, insn_srav,
+ insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
+ insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
+ insn_xor, insn_xori, insn_yield,
insn_invalid /* insn_invalid must be last */
};
@@ -353,6 +353,7 @@ I_u2u1s3(_slti)
I_u2u1s3(_sltiu)
I_u3u1u2(_sltu)
I_u2u1u3(_sra)
+I_u3u2u1(_srav)
I_u2u1u3(_srl)
I_u3u2u1(_srlv)
I_u2u1u3(_rotr)
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 17c7fd471a27..94c11f5eac74 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -6,7 +6,6 @@
# Copyright (C) 2008 Wind River Systems, Inc.
# written by Ralf Baechle <ralf@linux-mips.org>
#
-obj-y += malta-display.o
obj-y += malta-dt.o
obj-y += malta-dtshim.o
obj-y += malta-init.o
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
deleted file mode 100644
index ee0bd50f754b..000000000000
--- a/arch/mips/mti-malta/malta-display.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Display routines for display messages in MIPS boards ascii display.
- *
- * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc.
- * All rights reserved.
- * Authors: Carsten Langgaard <carstenl@mips.com>
- * Steven J. Hill <sjhill@mips.com>
- */
-#include <linux/compiler.h>
-#include <linux/timer.h>
-#include <linux/io.h>
-
-#include <asm/mips-boards/generic.h>
-
-extern const char display_string[];
-static unsigned int display_count;
-static unsigned int max_display_count;
-
-void mips_display_message(const char *str)
-{
- static unsigned int __iomem *display = NULL;
- int i;
-
- if (unlikely(display == NULL))
- display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
-
- for (i = 0; i <= 14; i += 2) {
- if (*str)
- __raw_writel(*str++, display + i);
- else
- __raw_writel(' ', display + i);
- }
-}
-
-static void scroll_display_message(struct timer_list *unused);
-static DEFINE_TIMER(mips_scroll_timer, scroll_display_message);
-
-static void scroll_display_message(struct timer_list *unused)
-{
- mips_display_message(&display_string[display_count++]);
- if (display_count == max_display_count)
- display_count = 0;
-
- mod_timer(&mips_scroll_timer, jiffies + HZ);
-}
-
-void mips_scroll_message(void)
-{
- del_timer_sync(&mips_scroll_timer);
- max_display_count = strlen(display_string) + 1 - 8;
- mod_timer(&mips_scroll_timer, jiffies + 1);
-}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 009f2918b320..ff2c1d809538 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -118,8 +118,6 @@ phys_addr_t mips_cpc_default_phys_base(void)
void __init prom_init(void)
{
- mips_display_message("LINUX");
-
/*
* early setup of _pcictrl_bonito so that we can determine
* the system controller on a CORE_EMUL board
@@ -277,7 +275,6 @@ mips_pci_controller:
default:
/* Unknown system controller */
- mips_display_message("SC Error");
while (1); /* We die here... */
}
board_nmi_handler_setup = mips_nmi_setup;
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 5d4c5e5fbd69..85c6c11ebcea 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -81,8 +81,6 @@ const char *get_system_type(void)
return "MIPS Malta";
}
-const char display_string[] = " LINUX ON MALTA ";
-
#ifdef CONFIG_BLK_DEV_FD
static void __init fd_activate(void)
{
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index d22b7edc3886..f403574a1e6f 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -251,8 +251,6 @@ void __init plat_time_init(void)
printk("CPU frequency %d.%02d MHz\n", freq/1000000,
(freq%1000000)*100/1000000);
- mips_scroll_message();
-
#ifdef CONFIG_I8253
/* Only Malta has a PIT. */
setup_pit_timer();
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 4d8cb9bb8365..3a0e34f4e615 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1159,19 +1159,19 @@ jmp_cmp:
emit_load(r_A, r_skb, off, ctx);
break;
case BPF_ANC | SKF_AD_VLAN_TAG:
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
ctx->flags |= SEEN_SKB | SEEN_A;
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
vlan_tci) != 2);
off = offsetof(struct sk_buff, vlan_tci);
- emit_half_load_unsigned(r_s0, r_skb, off, ctx);
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
- emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
- } else {
- emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
- /* return 1 if present */
- emit_sltu(r_A, r_zero, r_A, ctx);
- }
+ emit_half_load_unsigned(r_A, r_skb, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx);
+ if (PKT_VLAN_PRESENT_BIT)
+ emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx);
+ if (PKT_VLAN_PRESENT_BIT < 7)
+ emit_andi(r_A, r_A, 1, ctx);
break;
case BPF_ANC | SKF_AD_PKTTYPE:
ctx->flags |= SEEN_SKB;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index aeb7b1b0f202..b16710a8a9e7 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -854,6 +854,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_ARSH | BPF_X: /* ALU_REG */
src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
if (src < 0 || dst < 0)
@@ -913,6 +914,9 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_RSH:
emit_instr(ctx, srlv, dst, dst, src);
break;
+ case BPF_ARSH:
+ emit_instr(ctx, srav, dst, dst, src);
+ break;
default:
pr_err("ALU_REG NOT HANDLED\n");
return -EINVAL;
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index 8feae9154baf..45266406b585 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2004, 2006 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2018 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -8,6 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/dma-mapping.h>
#include <linux/pci.h>
/*
@@ -22,6 +24,57 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
quirk_sb1250_pci);
/*
+ * The BCM1250, etc. PCI host bridge does not support DAC on its 32-bit
+ * bus, so we set the bus's DMA mask accordingly. However the HT link
+ * down the artificial PCI-HT bridge supports 40-bit addressing and the
+ * SP1011 HT-PCI bridge downstream supports both DAC and a 64-bit bus
+ * width, so we record the PCI-HT bridge's secondary and subordinate bus
+ * numbers and do not set the mask for devices present in the inclusive
+ * range of those.
+ */
+struct sb1250_bus_dma_mask_exclude {
+ bool set;
+ unsigned char start;
+ unsigned char end;
+};
+
+static int sb1250_bus_dma_mask(struct pci_dev *dev, void *data)
+{
+ struct sb1250_bus_dma_mask_exclude *exclude = data;
+ bool exclude_this;
+ bool ht_bridge;
+
+ exclude_this = exclude->set && (dev->bus->number >= exclude->start &&
+ dev->bus->number <= exclude->end);
+ ht_bridge = !exclude->set && (dev->vendor == PCI_VENDOR_ID_SIBYTE &&
+ dev->device == PCI_DEVICE_ID_BCM1250_HT);
+
+ if (exclude_this) {
+ dev_dbg(&dev->dev, "not disabling DAC for device");
+ } else if (ht_bridge) {
+ exclude->start = dev->subordinate->number;
+ exclude->end = pci_bus_max_busnr(dev->subordinate);
+ exclude->set = true;
+ dev_dbg(&dev->dev, "not disabling DAC for [bus %02x-%02x]",
+ exclude->start, exclude->end);
+ } else {
+ dev_dbg(&dev->dev, "disabling DAC for device");
+ dev->dev.bus_dma_mask = DMA_BIT_MASK(32);
+ }
+
+ return 0;
+}
+
+static void quirk_sb1250_pci_dac(struct pci_dev *dev)
+{
+ struct sb1250_bus_dma_mask_exclude exclude = { .set = false };
+
+ pci_walk_bus(dev->bus, sb1250_bus_dma_mask, &exclude);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
+ quirk_sb1250_pci_dac);
+
+/*
* The BCM1250, etc. PCI/HT bridge reports as a host bridge.
*/
static void quirk_sb1250_ht(struct pci_dev *dev)
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 958899ffe99c..bafbf69e7dc4 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -445,8 +445,7 @@ static int rt3883_pci_probe(struct platform_device *pdev)
/* find the PCI host bridge child node */
for_each_child_of_node(np, child) {
- if (child->type &&
- of_node_cmp(child->type, "pci") == 0) {
+ if (of_node_is_type(child, "pci")) {
rpc->pci_controller.of_node = child;
break;
}
@@ -464,8 +463,7 @@ static int rt3883_pci_probe(struct platform_device *pdev)
for_each_available_child_of_node(rpc->pci_controller.of_node, child) {
int devfn;
- if (!child->type ||
- of_node_cmp(child->type, "pci") != 0)
+ if (!of_node_is_type(child, "pci"))
continue;
devfn = of_pci_get_devfn(child);
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 41b71c4352c2..c1ce6f43642b 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
};
static struct rt2880_pmx_func nd_sd_grp[] = {
FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
- FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
+ FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
};
static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 2b23ad640f39..828d8cc3a5df 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -23,6 +23,7 @@
#include <linux/mtd/platnand.h>
#include <linux/mtd/mtd.h>
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/serial_8250.h>
@@ -127,14 +128,18 @@ static struct resource cf_slot0_res[] = {
}
};
-static struct cf_device cf_slot0_data = {
- .gpio_pin = CF_GPIO_NUM
+static struct gpiod_lookup_table cf_slot0_gpio_table = {
+ .dev_id = "pata-rb532-cf",
+ .table = {
+ GPIO_LOOKUP("gpio0", CF_GPIO_NUM,
+ NULL, GPIO_ACTIVE_HIGH),
+ { },
+ },
};
static struct platform_device cf_slot0 = {
.id = -1,
.name = "pata-rb532-cf",
- .dev.platform_data = &cf_slot0_data,
.resource = cf_slot0_res,
.num_resources = ARRAY_SIZE(cf_slot0_res),
};
@@ -305,6 +310,7 @@ static int __init plat_setup_devices(void)
dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data);
+ gpiod_add_lookup_table(&cf_slot0_gpio_table);
return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
}
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index b3d6bf23a662..3ef3fb658136 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,4 +1,5 @@
obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 000000000000..eb47a94f3583
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMA support for Broadcom SiByte platforms.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 58a0315ad743..f6fd340e39c2 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -50,6 +50,7 @@ VDSO_LDFLAGS := \
$(call cc-ldoption, -Wl$(comma)--build-id)
GCOV_PROFILE := n
+UBSAN_SANITIZE := n
#
# Shared build commands.
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index a0a9679ad5de..8a41372551ff 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
- struct ftrace_graph_ent trace;
unsigned long old;
- int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
- return;
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, NULL);
-
- if (err == -EBUSY)
- return;
-
- *parent = return_hooker;
+ if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+ *parent = return_hooker;
}
noinline void ftrace_graph_caller(void)
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index d047a09d660f..c19af26febe6 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS
KBUILD_CFLAGS_KERNEL += -mlong-calls
endif
+# Without this, "ld -r" results in .text sections that are too big (> 0x40000)
+# for branches to reach stubs. And multiple .text sections trigger a warning
+# when creating the sysfs module information section.
+ifndef CONFIG_64BIT
+KBUILD_CFLAGS_MODULE += -ffunction-sections
+endif
+
# select which processor to optimise for
cflags-$(CONFIG_PA7000) += -march=1.1 -mschedule=7100
cflags-$(CONFIG_PA7200) += -march=1.1 -mschedule=7200
@@ -156,3 +163,6 @@ define archhelp
@echo ' copy to $$(INSTALL_PATH)'
@echo ' zinstall - Install compressed vmlinuz kernel'
endef
+
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 2013d639e735..0b1e354c8c24 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,6 @@
+generated-y += syscall_table_32.h
+generated-y += syscall_table_64.h
+generated-y += syscall_table_c32.h
generic-y += barrier.h
generic-y += current.h
generic-y += device.h
diff --git a/arch/parisc/include/asm/alternative.h b/arch/parisc/include/asm/alternative.h
index bf485a94d0b4..793d8baa3a10 100644
--- a/arch/parisc/include/asm/alternative.h
+++ b/arch/parisc/include/asm/alternative.h
@@ -2,6 +2,7 @@
#ifndef __ASM_PARISC_ALTERNATIVE_H
#define __ASM_PARISC_ALTERNATIVE_H
+#define ALT_COND_ALWAYS 0x80 /* always replace instruction */
#define ALT_COND_NO_SMP 0x01 /* when running UP instead of SMP */
#define ALT_COND_NO_DCACHE 0x02 /* if system has no d-cache */
#define ALT_COND_NO_ICACHE 0x04 /* if system has no i-cache */
@@ -26,6 +27,9 @@ struct alt_instr {
};
void set_kernel_text_rw(int enable_read_write);
+void apply_alternatives_all(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end,
+ const char *module_name);
/* Alternative SMP implementation. */
#define ALTERNATIVE(cond, replacement) "!0:" \
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index bc37a4953eaa..c2c2afb28941 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -4,10 +4,18 @@
#include <uapi/asm/unistd.h>
+#define __NR_Linux_syscalls __NR_syscalls
+
#ifndef __ASSEMBLY__
#define SYS_ify(syscall_name) __NR_##syscall_name
+#define __IGNORE_select /* newselect */
+#define __IGNORE_fadvise64 /* fadvise64_64 */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
#ifndef ASM_LINE_SEP
# define ASM_LINE_SEP ;
#endif
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index adb5c64831c7..d31b4261cafc 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -1,6 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_32.h
+generated-y += unistd_64.h
generic-y += auxvec.h
generic-y += bpf_perf_event.h
generic-y += kvm_para.h
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index dc77c5a51db7..98dc953656af 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -2,379 +2,12 @@
#ifndef _UAPI_ASM_PARISC_UNISTD_H_
#define _UAPI_ASM_PARISC_UNISTD_H_
-/*
- * Linux system call numbers.
- *
- * Cary Coutant says that we should just use another syscall gateway
- * page to avoid clashing with the HPUX space, and I think he's right:
- * it will would keep a branch out of our syscall entry path, at the
- * very least. If we decide to change it later, we can ``just'' tweak
- * the LINUX_GATEWAY_ADDR define at the bottom and make __NR_Linux be
- * 1024 or something. Oh, and recompile libc. =)
- */
+#ifdef __LP64__
+#include <asm/unistd_64.h>
+#else
+#include <asm/unistd_32.h>
+#endif
-#define __NR_Linux 0
-#define __NR_restart_syscall (__NR_Linux + 0)
-#define __NR_exit (__NR_Linux + 1)
-#define __NR_fork (__NR_Linux + 2)
-#define __NR_read (__NR_Linux + 3)
-#define __NR_write (__NR_Linux + 4)
-#define __NR_open (__NR_Linux + 5)
-#define __NR_close (__NR_Linux + 6)
-#define __NR_waitpid (__NR_Linux + 7)
-#define __NR_creat (__NR_Linux + 8)
-#define __NR_link (__NR_Linux + 9)
-#define __NR_unlink (__NR_Linux + 10)
-#define __NR_execve (__NR_Linux + 11)
-#define __NR_chdir (__NR_Linux + 12)
-#define __NR_time (__NR_Linux + 13)
-#define __NR_mknod (__NR_Linux + 14)
-#define __NR_chmod (__NR_Linux + 15)
-#define __NR_lchown (__NR_Linux + 16)
-#define __NR_socket (__NR_Linux + 17)
-#define __NR_stat (__NR_Linux + 18)
-#define __NR_lseek (__NR_Linux + 19)
-#define __NR_getpid (__NR_Linux + 20)
-#define __NR_mount (__NR_Linux + 21)
-#define __NR_bind (__NR_Linux + 22)
-#define __NR_setuid (__NR_Linux + 23)
-#define __NR_getuid (__NR_Linux + 24)
-#define __NR_stime (__NR_Linux + 25)
-#define __NR_ptrace (__NR_Linux + 26)
-#define __NR_alarm (__NR_Linux + 27)
-#define __NR_fstat (__NR_Linux + 28)
-#define __NR_pause (__NR_Linux + 29)
-#define __NR_utime (__NR_Linux + 30)
-#define __NR_connect (__NR_Linux + 31)
-#define __NR_listen (__NR_Linux + 32)
-#define __NR_access (__NR_Linux + 33)
-#define __NR_nice (__NR_Linux + 34)
-#define __NR_accept (__NR_Linux + 35)
-#define __NR_sync (__NR_Linux + 36)
-#define __NR_kill (__NR_Linux + 37)
-#define __NR_rename (__NR_Linux + 38)
-#define __NR_mkdir (__NR_Linux + 39)
-#define __NR_rmdir (__NR_Linux + 40)
-#define __NR_dup (__NR_Linux + 41)
-#define __NR_pipe (__NR_Linux + 42)
-#define __NR_times (__NR_Linux + 43)
-#define __NR_getsockname (__NR_Linux + 44)
-#define __NR_brk (__NR_Linux + 45)
-#define __NR_setgid (__NR_Linux + 46)
-#define __NR_getgid (__NR_Linux + 47)
-#define __NR_signal (__NR_Linux + 48)
-#define __NR_geteuid (__NR_Linux + 49)
-#define __NR_getegid (__NR_Linux + 50)
-#define __NR_acct (__NR_Linux + 51)
-#define __NR_umount2 (__NR_Linux + 52)
-#define __NR_getpeername (__NR_Linux + 53)
-#define __NR_ioctl (__NR_Linux + 54)
-#define __NR_fcntl (__NR_Linux + 55)
-#define __NR_socketpair (__NR_Linux + 56)
-#define __NR_setpgid (__NR_Linux + 57)
-#define __NR_send (__NR_Linux + 58)
-#define __NR_uname (__NR_Linux + 59)
-#define __NR_umask (__NR_Linux + 60)
-#define __NR_chroot (__NR_Linux + 61)
-#define __NR_ustat (__NR_Linux + 62)
-#define __NR_dup2 (__NR_Linux + 63)
-#define __NR_getppid (__NR_Linux + 64)
-#define __NR_getpgrp (__NR_Linux + 65)
-#define __NR_setsid (__NR_Linux + 66)
-#define __NR_pivot_root (__NR_Linux + 67)
-#define __NR_sgetmask (__NR_Linux + 68)
-#define __NR_ssetmask (__NR_Linux + 69)
-#define __NR_setreuid (__NR_Linux + 70)
-#define __NR_setregid (__NR_Linux + 71)
-#define __NR_mincore (__NR_Linux + 72)
-#define __NR_sigpending (__NR_Linux + 73)
-#define __NR_sethostname (__NR_Linux + 74)
-#define __NR_setrlimit (__NR_Linux + 75)
-#define __NR_getrlimit (__NR_Linux + 76)
-#define __NR_getrusage (__NR_Linux + 77)
-#define __NR_gettimeofday (__NR_Linux + 78)
-#define __NR_settimeofday (__NR_Linux + 79)
-#define __NR_getgroups (__NR_Linux + 80)
-#define __NR_setgroups (__NR_Linux + 81)
-#define __NR_sendto (__NR_Linux + 82)
-#define __NR_symlink (__NR_Linux + 83)
-#define __NR_lstat (__NR_Linux + 84)
-#define __NR_readlink (__NR_Linux + 85)
-#define __NR_uselib (__NR_Linux + 86)
-#define __NR_swapon (__NR_Linux + 87)
-#define __NR_reboot (__NR_Linux + 88)
-#define __NR_mmap2 (__NR_Linux + 89)
-#define __NR_mmap (__NR_Linux + 90)
-#define __NR_munmap (__NR_Linux + 91)
-#define __NR_truncate (__NR_Linux + 92)
-#define __NR_ftruncate (__NR_Linux + 93)
-#define __NR_fchmod (__NR_Linux + 94)
-#define __NR_fchown (__NR_Linux + 95)
-#define __NR_getpriority (__NR_Linux + 96)
-#define __NR_setpriority (__NR_Linux + 97)
-#define __NR_recv (__NR_Linux + 98)
-#define __NR_statfs (__NR_Linux + 99)
-#define __NR_fstatfs (__NR_Linux + 100)
-#define __NR_stat64 (__NR_Linux + 101)
-/* #define __NR_socketcall (__NR_Linux + 102) */
-#define __NR_syslog (__NR_Linux + 103)
-#define __NR_setitimer (__NR_Linux + 104)
-#define __NR_getitimer (__NR_Linux + 105)
-#define __NR_capget (__NR_Linux + 106)
-#define __NR_capset (__NR_Linux + 107)
-#define __NR_pread64 (__NR_Linux + 108)
-#define __NR_pwrite64 (__NR_Linux + 109)
-#define __NR_getcwd (__NR_Linux + 110)
-#define __NR_vhangup (__NR_Linux + 111)
-#define __NR_fstat64 (__NR_Linux + 112)
-#define __NR_vfork (__NR_Linux + 113)
-#define __NR_wait4 (__NR_Linux + 114)
-#define __NR_swapoff (__NR_Linux + 115)
-#define __NR_sysinfo (__NR_Linux + 116)
-#define __NR_shutdown (__NR_Linux + 117)
-#define __NR_fsync (__NR_Linux + 118)
-#define __NR_madvise (__NR_Linux + 119)
-#define __NR_clone (__NR_Linux + 120)
-#define __NR_setdomainname (__NR_Linux + 121)
-#define __NR_sendfile (__NR_Linux + 122)
-#define __NR_recvfrom (__NR_Linux + 123)
-#define __NR_adjtimex (__NR_Linux + 124)
-#define __NR_mprotect (__NR_Linux + 125)
-#define __NR_sigprocmask (__NR_Linux + 126)
-#define __NR_create_module (__NR_Linux + 127) /* not used */
-#define __NR_init_module (__NR_Linux + 128)
-#define __NR_delete_module (__NR_Linux + 129)
-#define __NR_get_kernel_syms (__NR_Linux + 130) /* not used */
-#define __NR_quotactl (__NR_Linux + 131)
-#define __NR_getpgid (__NR_Linux + 132)
-#define __NR_fchdir (__NR_Linux + 133)
-#define __NR_bdflush (__NR_Linux + 134)
-#define __NR_sysfs (__NR_Linux + 135)
-#define __NR_personality (__NR_Linux + 136)
-#define __NR_afs_syscall (__NR_Linux + 137) /* not used */
-#define __NR_setfsuid (__NR_Linux + 138)
-#define __NR_setfsgid (__NR_Linux + 139)
-#define __NR__llseek (__NR_Linux + 140)
-#define __NR_getdents (__NR_Linux + 141)
-#define __NR__newselect (__NR_Linux + 142)
-#define __NR_flock (__NR_Linux + 143)
-#define __NR_msync (__NR_Linux + 144)
-#define __NR_readv (__NR_Linux + 145)
-#define __NR_writev (__NR_Linux + 146)
-#define __NR_getsid (__NR_Linux + 147)
-#define __NR_fdatasync (__NR_Linux + 148)
-#define __NR__sysctl (__NR_Linux + 149)
-#define __NR_mlock (__NR_Linux + 150)
-#define __NR_munlock (__NR_Linux + 151)
-#define __NR_mlockall (__NR_Linux + 152)
-#define __NR_munlockall (__NR_Linux + 153)
-#define __NR_sched_setparam (__NR_Linux + 154)
-#define __NR_sched_getparam (__NR_Linux + 155)
-#define __NR_sched_setscheduler (__NR_Linux + 156)
-#define __NR_sched_getscheduler (__NR_Linux + 157)
-#define __NR_sched_yield (__NR_Linux + 158)
-#define __NR_sched_get_priority_max (__NR_Linux + 159)
-#define __NR_sched_get_priority_min (__NR_Linux + 160)
-#define __NR_sched_rr_get_interval (__NR_Linux + 161)
-#define __NR_nanosleep (__NR_Linux + 162)
-#define __NR_mremap (__NR_Linux + 163)
-#define __NR_setresuid (__NR_Linux + 164)
-#define __NR_getresuid (__NR_Linux + 165)
-#define __NR_sigaltstack (__NR_Linux + 166)
-#define __NR_query_module (__NR_Linux + 167) /* not used */
-#define __NR_poll (__NR_Linux + 168)
-#define __NR_nfsservctl (__NR_Linux + 169) /* not used */
-#define __NR_setresgid (__NR_Linux + 170)
-#define __NR_getresgid (__NR_Linux + 171)
-#define __NR_prctl (__NR_Linux + 172)
-#define __NR_rt_sigreturn (__NR_Linux + 173)
-#define __NR_rt_sigaction (__NR_Linux + 174)
-#define __NR_rt_sigprocmask (__NR_Linux + 175)
-#define __NR_rt_sigpending (__NR_Linux + 176)
-#define __NR_rt_sigtimedwait (__NR_Linux + 177)
-#define __NR_rt_sigqueueinfo (__NR_Linux + 178)
-#define __NR_rt_sigsuspend (__NR_Linux + 179)
-#define __NR_chown (__NR_Linux + 180)
-#define __NR_setsockopt (__NR_Linux + 181)
-#define __NR_getsockopt (__NR_Linux + 182)
-#define __NR_sendmsg (__NR_Linux + 183)
-#define __NR_recvmsg (__NR_Linux + 184)
-#define __NR_semop (__NR_Linux + 185)
-#define __NR_semget (__NR_Linux + 186)
-#define __NR_semctl (__NR_Linux + 187)
-#define __NR_msgsnd (__NR_Linux + 188)
-#define __NR_msgrcv (__NR_Linux + 189)
-#define __NR_msgget (__NR_Linux + 190)
-#define __NR_msgctl (__NR_Linux + 191)
-#define __NR_shmat (__NR_Linux + 192)
-#define __NR_shmdt (__NR_Linux + 193)
-#define __NR_shmget (__NR_Linux + 194)
-#define __NR_shmctl (__NR_Linux + 195)
-#define __NR_getpmsg (__NR_Linux + 196) /* not used */
-#define __NR_putpmsg (__NR_Linux + 197) /* not used */
-#define __NR_lstat64 (__NR_Linux + 198)
-#define __NR_truncate64 (__NR_Linux + 199)
-#define __NR_ftruncate64 (__NR_Linux + 200)
-#define __NR_getdents64 (__NR_Linux + 201)
-#define __NR_fcntl64 (__NR_Linux + 202)
-#define __NR_attrctl (__NR_Linux + 203) /* not used */
-#define __NR_acl_get (__NR_Linux + 204) /* not used */
-#define __NR_acl_set (__NR_Linux + 205) /* not used */
-#define __NR_gettid (__NR_Linux + 206)
-#define __NR_readahead (__NR_Linux + 207)
-#define __NR_tkill (__NR_Linux + 208)
-#define __NR_sendfile64 (__NR_Linux + 209)
-#define __NR_futex (__NR_Linux + 210)
-#define __NR_sched_setaffinity (__NR_Linux + 211)
-#define __NR_sched_getaffinity (__NR_Linux + 212)
-#define __NR_set_thread_area (__NR_Linux + 213) /* not used */
-#define __NR_get_thread_area (__NR_Linux + 214) /* not used */
-#define __NR_io_setup (__NR_Linux + 215)
-#define __NR_io_destroy (__NR_Linux + 216)
-#define __NR_io_getevents (__NR_Linux + 217)
-#define __NR_io_submit (__NR_Linux + 218)
-#define __NR_io_cancel (__NR_Linux + 219)
-#define __NR_alloc_hugepages (__NR_Linux + 220) /* not used */
-#define __NR_free_hugepages (__NR_Linux + 221) /* not used */
-#define __NR_exit_group (__NR_Linux + 222)
-#define __NR_lookup_dcookie (__NR_Linux + 223)
-#define __NR_epoll_create (__NR_Linux + 224)
-#define __NR_epoll_ctl (__NR_Linux + 225)
-#define __NR_epoll_wait (__NR_Linux + 226)
-#define __NR_remap_file_pages (__NR_Linux + 227)
-#define __NR_semtimedop (__NR_Linux + 228)
-#define __NR_mq_open (__NR_Linux + 229)
-#define __NR_mq_unlink (__NR_Linux + 230)
-#define __NR_mq_timedsend (__NR_Linux + 231)
-#define __NR_mq_timedreceive (__NR_Linux + 232)
-#define __NR_mq_notify (__NR_Linux + 233)
-#define __NR_mq_getsetattr (__NR_Linux + 234)
-#define __NR_waitid (__NR_Linux + 235)
-#define __NR_fadvise64_64 (__NR_Linux + 236)
-#define __NR_set_tid_address (__NR_Linux + 237)
-#define __NR_setxattr (__NR_Linux + 238)
-#define __NR_lsetxattr (__NR_Linux + 239)
-#define __NR_fsetxattr (__NR_Linux + 240)
-#define __NR_getxattr (__NR_Linux + 241)
-#define __NR_lgetxattr (__NR_Linux + 242)
-#define __NR_fgetxattr (__NR_Linux + 243)
-#define __NR_listxattr (__NR_Linux + 244)
-#define __NR_llistxattr (__NR_Linux + 245)
-#define __NR_flistxattr (__NR_Linux + 246)
-#define __NR_removexattr (__NR_Linux + 247)
-#define __NR_lremovexattr (__NR_Linux + 248)
-#define __NR_fremovexattr (__NR_Linux + 249)
-#define __NR_timer_create (__NR_Linux + 250)
-#define __NR_timer_settime (__NR_Linux + 251)
-#define __NR_timer_gettime (__NR_Linux + 252)
-#define __NR_timer_getoverrun (__NR_Linux + 253)
-#define __NR_timer_delete (__NR_Linux + 254)
-#define __NR_clock_settime (__NR_Linux + 255)
-#define __NR_clock_gettime (__NR_Linux + 256)
-#define __NR_clock_getres (__NR_Linux + 257)
-#define __NR_clock_nanosleep (__NR_Linux + 258)
-#define __NR_tgkill (__NR_Linux + 259)
-#define __NR_mbind (__NR_Linux + 260)
-#define __NR_get_mempolicy (__NR_Linux + 261)
-#define __NR_set_mempolicy (__NR_Linux + 262)
-#define __NR_vserver (__NR_Linux + 263) /* not used */
-#define __NR_add_key (__NR_Linux + 264)
-#define __NR_request_key (__NR_Linux + 265)
-#define __NR_keyctl (__NR_Linux + 266)
-#define __NR_ioprio_set (__NR_Linux + 267)
-#define __NR_ioprio_get (__NR_Linux + 268)
-#define __NR_inotify_init (__NR_Linux + 269)
-#define __NR_inotify_add_watch (__NR_Linux + 270)
-#define __NR_inotify_rm_watch (__NR_Linux + 271)
-#define __NR_migrate_pages (__NR_Linux + 272)
-#define __NR_pselect6 (__NR_Linux + 273)
-#define __NR_ppoll (__NR_Linux + 274)
-#define __NR_openat (__NR_Linux + 275)
-#define __NR_mkdirat (__NR_Linux + 276)
-#define __NR_mknodat (__NR_Linux + 277)
-#define __NR_fchownat (__NR_Linux + 278)
-#define __NR_futimesat (__NR_Linux + 279)
-#define __NR_fstatat64 (__NR_Linux + 280)
-#define __NR_unlinkat (__NR_Linux + 281)
-#define __NR_renameat (__NR_Linux + 282)
-#define __NR_linkat (__NR_Linux + 283)
-#define __NR_symlinkat (__NR_Linux + 284)
-#define __NR_readlinkat (__NR_Linux + 285)
-#define __NR_fchmodat (__NR_Linux + 286)
-#define __NR_faccessat (__NR_Linux + 287)
-#define __NR_unshare (__NR_Linux + 288)
-#define __NR_set_robust_list (__NR_Linux + 289)
-#define __NR_get_robust_list (__NR_Linux + 290)
-#define __NR_splice (__NR_Linux + 291)
-#define __NR_sync_file_range (__NR_Linux + 292)
-#define __NR_tee (__NR_Linux + 293)
-#define __NR_vmsplice (__NR_Linux + 294)
-#define __NR_move_pages (__NR_Linux + 295)
-#define __NR_getcpu (__NR_Linux + 296)
-#define __NR_epoll_pwait (__NR_Linux + 297)
-#define __NR_statfs64 (__NR_Linux + 298)
-#define __NR_fstatfs64 (__NR_Linux + 299)
-#define __NR_kexec_load (__NR_Linux + 300)
-#define __NR_utimensat (__NR_Linux + 301)
-#define __NR_signalfd (__NR_Linux + 302)
-#define __NR_timerfd (__NR_Linux + 303) /* not used */
-#define __NR_eventfd (__NR_Linux + 304)
-#define __NR_fallocate (__NR_Linux + 305)
-#define __NR_timerfd_create (__NR_Linux + 306)
-#define __NR_timerfd_settime (__NR_Linux + 307)
-#define __NR_timerfd_gettime (__NR_Linux + 308)
-#define __NR_signalfd4 (__NR_Linux + 309)
-#define __NR_eventfd2 (__NR_Linux + 310)
-#define __NR_epoll_create1 (__NR_Linux + 311)
-#define __NR_dup3 (__NR_Linux + 312)
-#define __NR_pipe2 (__NR_Linux + 313)
-#define __NR_inotify_init1 (__NR_Linux + 314)
-#define __NR_preadv (__NR_Linux + 315)
-#define __NR_pwritev (__NR_Linux + 316)
-#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
-#define __NR_perf_event_open (__NR_Linux + 318)
-#define __NR_recvmmsg (__NR_Linux + 319)
-#define __NR_accept4 (__NR_Linux + 320)
-#define __NR_prlimit64 (__NR_Linux + 321)
-#define __NR_fanotify_init (__NR_Linux + 322)
-#define __NR_fanotify_mark (__NR_Linux + 323)
-#define __NR_clock_adjtime (__NR_Linux + 324)
-#define __NR_name_to_handle_at (__NR_Linux + 325)
-#define __NR_open_by_handle_at (__NR_Linux + 326)
-#define __NR_syncfs (__NR_Linux + 327)
-#define __NR_setns (__NR_Linux + 328)
-#define __NR_sendmmsg (__NR_Linux + 329)
-#define __NR_process_vm_readv (__NR_Linux + 330)
-#define __NR_process_vm_writev (__NR_Linux + 331)
-#define __NR_kcmp (__NR_Linux + 332)
-#define __NR_finit_module (__NR_Linux + 333)
-#define __NR_sched_setattr (__NR_Linux + 334)
-#define __NR_sched_getattr (__NR_Linux + 335)
-#define __NR_utimes (__NR_Linux + 336)
-#define __NR_renameat2 (__NR_Linux + 337)
-#define __NR_seccomp (__NR_Linux + 338)
-#define __NR_getrandom (__NR_Linux + 339)
-#define __NR_memfd_create (__NR_Linux + 340)
-#define __NR_bpf (__NR_Linux + 341)
-#define __NR_execveat (__NR_Linux + 342)
-#define __NR_membarrier (__NR_Linux + 343)
-#define __NR_userfaultfd (__NR_Linux + 344)
-#define __NR_mlock2 (__NR_Linux + 345)
-#define __NR_copy_file_range (__NR_Linux + 346)
-#define __NR_preadv2 (__NR_Linux + 347)
-#define __NR_pwritev2 (__NR_Linux + 348)
-#define __NR_statx (__NR_Linux + 349)
-#define __NR_io_pgetevents (__NR_Linux + 350)
-
-#define __NR_Linux_syscalls (__NR_io_pgetevents + 1)
-
-
-#define __IGNORE_select /* newselect */
-#define __IGNORE_fadvise64 /* fadvise64_64 */
-#define __IGNORE_pkey_mprotect
-#define __IGNORE_pkey_alloc
-#define __IGNORE_pkey_free
-
-#define LINUX_GATEWAY_ADDR 0x100
+#define LINUX_GATEWAY_ADDR 0x100
#endif /* _UAPI_ASM_PARISC_UNISTD_H_ */
diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile
index e5de34d00b1a..8e5f1ab65c68 100644
--- a/arch/parisc/kernel/Makefile
+++ b/arch/parisc/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := head.o vmlinux.lds
obj-y := cache.o pacache.o setup.o pdt.o traps.o time.o irq.o \
pa7300lc.o syscall.o entry.o sys_parisc.o firmware.o \
- ptrace.o hardware.o inventory.o drivers.o \
+ ptrace.o hardware.o inventory.o drivers.o alternative.o \
signal.o hpmc.o real2.o parisc_ksyms.o unaligned.o \
process.o processor.o pdc_cons.o pdc_chassis.o unwind.o
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
new file mode 100644
index 000000000000..bf2274e01a96
--- /dev/null
+++ b/arch/parisc/kernel/alternative.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Alternative live-patching for parisc.
+ * Copyright (C) 2018 Helge Deller <deller@gmx.de>
+ *
+ */
+
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/alternative.h>
+
+#include <linux/module.h>
+
+static int no_alternatives;
+static int __init setup_no_alternatives(char *str)
+{
+ no_alternatives = 1;
+ return 1;
+}
+__setup("no-alternatives", setup_no_alternatives);
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end, const char *module_name)
+{
+ struct alt_instr *entry;
+ int index = 0, applied = 0;
+ int num_cpus = num_online_cpus();
+
+ for (entry = start; entry < end; entry++, index++) {
+
+ u32 *from, len, cond, replacement;
+
+ from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset);
+ len = entry->len;
+ cond = entry->cond;
+ replacement = entry->replacement;
+
+ WARN_ON(!cond);
+
+ if (cond != ALT_COND_ALWAYS && no_alternatives)
+ continue;
+
+ pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
+ index, cond, len, from, replacement);
+
+ if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1))
+ continue;
+ if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
+ continue;
+ if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
+ continue;
+
+ /*
+ * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
+ * set (bit #61, big endian), we have to flush and sync every
+ * time IO-PDIR is changed in Ike/Astro.
+ */
+ if ((cond & ALT_COND_NO_IOC_FDC) &&
+ (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC))
+ continue;
+
+ /* Want to replace pdtlb by a pdtlb,l instruction? */
+ if (replacement == INSN_PxTLB) {
+ replacement = *from;
+ if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */
+ replacement |= (1 << 10); /* set el bit */
+ }
+
+ /*
+ * Replace instruction with NOPs?
+ * For long distance insert a branch instruction instead.
+ */
+ if (replacement == INSN_NOP && len > 1)
+ replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */
+
+ pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
+ index, cond, len, from, replacement);
+
+ /* Replace instruction */
+ *from = replacement;
+ applied++;
+ }
+
+ pr_info("%s%salternatives: applied %d out of %d patches\n",
+ module_name ? : "", module_name ? " " : "",
+ applied, index);
+}
+
+
+void __init apply_alternatives_all(void)
+{
+ set_kernel_text_rw(1);
+
+ apply_alternatives((struct alt_instr *) &__alt_instructions,
+ (struct alt_instr *) &__alt_instructions_end, NULL);
+
+ set_kernel_text_rw(0);
+}
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index 6fa8535d3cce..e46a4157a894 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
unsigned long self_addr)
{
unsigned long old;
- struct ftrace_graph_ent trace;
extern int parisc_return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
@@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
old = *parent;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
- return;
-
- if (ftrace_push_return_trace(old, self_addr, &trace.depth,
- 0, NULL) == -EBUSY)
- return;
-
- /* activate parisc_return_to_handler() as return point */
- *parent = (unsigned long) &parisc_return_to_handler;
+ if (!function_graph_enter(old, self_addr, 0, NULL))
+ /* activate parisc_return_to_handler() as return point */
+ *parent = (unsigned long) &parisc_return_to_handler;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index b5b3cb00f1fb..43778420614b 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -877,6 +877,8 @@ int module_finalize(const Elf_Ehdr *hdr,
int i;
unsigned long nsyms;
const char *strtab = NULL;
+ const Elf_Shdr *s;
+ char *secstrings;
Elf_Sym *newptr, *oldptr;
Elf_Shdr *symhdr = NULL;
#ifdef DEBUG
@@ -948,6 +950,18 @@ int module_finalize(const Elf_Ehdr *hdr,
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
DEBUGP("NEW num_symtab %lu\n", nsyms);
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
+
+ /* find .altinstructions section */
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ void *aseg = (void *) s->sh_addr;
+ char *secname = secstrings + s->sh_name;
+
+ if (!strcmp(".altinstructions", secname))
+ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size, me->name);
+ }
+
return 0;
}
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 54818cd78bd0..f2cf86ac279b 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -301,86 +301,6 @@ static int __init parisc_init_resources(void)
return 0;
}
-static int no_alternatives __initdata;
-static int __init setup_no_alternatives(char *str)
-{
- no_alternatives = 1;
- return 1;
-}
-__setup("no-alternatives", setup_no_alternatives);
-
-static void __init apply_alternatives_all(void)
-{
- struct alt_instr *entry;
- int index = 0, applied = 0;
-
-
- pr_info("alternatives: %spatching kernel code\n",
- no_alternatives ? "NOT " : "");
- if (no_alternatives)
- return;
-
- set_kernel_text_rw(1);
-
- for (entry = (struct alt_instr *) &__alt_instructions;
- entry < (struct alt_instr *) &__alt_instructions_end;
- entry++, index++) {
-
- u32 *from, len, cond, replacement;
-
- from = (u32 *)((ulong)&entry->orig_offset + entry->orig_offset);
- len = entry->len;
- cond = entry->cond;
- replacement = entry->replacement;
-
- WARN_ON(!cond);
- pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
- index, cond, len, from, replacement);
-
- if ((cond & ALT_COND_NO_SMP) && (num_online_cpus() != 1))
- continue;
- if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
- continue;
- if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
- continue;
-
- /*
- * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
- * set (bit #61, big endian), we have to flush and sync every
- * time IO-PDIR is changed in Ike/Astro.
- */
- if ((cond & ALT_COND_NO_IOC_FDC) &&
- (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC))
- continue;
-
- /* Want to replace pdtlb by a pdtlb,l instruction? */
- if (replacement == INSN_PxTLB) {
- replacement = *from;
- if (boot_cpu_data.cpu_type >= pcxu) /* >= pa2.0 ? */
- replacement |= (1 << 10); /* set el bit */
- }
-
- /*
- * Replace instruction with NOPs?
- * For long distance insert a branch instruction instead.
- */
- if (replacement == INSN_NOP && len > 1)
- replacement = 0xe8000002 + (len-2)*8; /* "b,n .+8" */
-
- pr_debug("Do %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
- index, cond, len, from, replacement);
-
- /* Replace instruction */
- *from = replacement;
- applied++;
- }
-
- pr_info("alternatives: applied %d out of %d patches\n", applied, index);
-
- set_kernel_text_rw(0);
-}
-
-
extern void gsc_init(void);
extern void processor_init(void);
extern void ccio_init(void);
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index a9bc90dc4ae7..4f77bd9be66b 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -923,19 +923,24 @@ ENTRY(lws_table)
END(lws_table)
/* End of lws table */
+#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry
.align 8
ENTRY(sys_call_table)
.export sys_call_table,data
-#include "syscall_table.S"
+#ifdef CONFIG_64BIT
+#include <asm/syscall_table_c32.h> /* Compat syscalls */
+#else
+#include <asm/syscall_table_32.h> /* 32-bit native syscalls */
+#endif
END(sys_call_table)
#ifdef CONFIG_64BIT
.align 8
ENTRY(sys_call_table64)
-#define SYSCALL_TABLE_64BIT
-#include "syscall_table.S"
+#include <asm/syscall_table_64.h> /* 64-bit native syscalls */
END(sys_call_table64)
#endif
+#undef __SYSCALL
/*
All light-weight-syscall atomic operations
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
deleted file mode 100644
index fe3f2a49d2b1..000000000000
--- a/arch/parisc/kernel/syscall_table.S
+++ /dev/null
@@ -1,459 +0,0 @@
-/* System Call Table
- *
- * Copyright (C) 1999-2004 Matthew Wilcox <willy at parisc-linux.org>
- * Copyright (C) 2000-2001 John Marvin <jsm at parisc-linux.org>
- * Copyright (C) 2000 Alan Modra <amodra at parisc-linux.org>
- * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
- * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
- * Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
- * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
- * Copyright (C) 2000 Grant Grundler <grundler at parisc-linux.org>
- * Copyright (C) 2001 Richard Hirst <rhirst with parisc-linux.org>
- * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
- * Copyright (C) 2001-2007 Helge Deller <deller at parisc-linux.org>
- * Copyright (C) 2000-2001 Thomas Bogendoerfer <tsbogend at parisc-linux.org>
- * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
- * Copyright (C) 2005-2006 Kyle McMartin <kyle at parisc-linux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
-/* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
- * narrow palinux. Use ENTRY_DIFF for those where a 32-bit specific
- * implementation is required on wide palinux. Use ENTRY_COMP where
- * the compatibility layer has a useful 32-bit implementation.
- */
-#define ENTRY_SAME(_name_) .dword sys_##_name_
-#define ENTRY_DIFF(_name_) .dword sys32_##_name_
-#define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
-#define ENTRY_OURS(_name_) .dword parisc_##_name_
-#define ENTRY_COMP(_name_) .dword compat_sys_##_name_
-#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
-#define ENTRY_SAME(_name_) .dword sys_##_name_
-#define ENTRY_DIFF(_name_) .dword sys_##_name_
-#define ENTRY_UHOH(_name_) .dword sys_##_name_
-#define ENTRY_OURS(_name_) .dword sys_##_name_
-#define ENTRY_COMP(_name_) .dword sys_##_name_
-#else
-#define ENTRY_SAME(_name_) .word sys_##_name_
-#define ENTRY_DIFF(_name_) .word sys_##_name_
-#define ENTRY_UHOH(_name_) .word sys_##_name_
-#define ENTRY_OURS(_name_) .word parisc_##_name_
-#define ENTRY_COMP(_name_) .word sys_##_name_
-#endif
-
-90: ENTRY_SAME(restart_syscall) /* 0 */
-91: ENTRY_SAME(exit)
- ENTRY_SAME(fork_wrapper)
- ENTRY_SAME(read)
- ENTRY_SAME(write)
- ENTRY_COMP(open) /* 5 */
- ENTRY_SAME(close)
- ENTRY_SAME(waitpid)
- ENTRY_SAME(creat)
- ENTRY_SAME(link)
- ENTRY_SAME(unlink) /* 10 */
- ENTRY_COMP(execve)
- ENTRY_SAME(chdir)
- /* See comments in kernel/time.c!!! Maybe we don't need this? */
- ENTRY_COMP(time)
- ENTRY_SAME(mknod)
- ENTRY_SAME(chmod) /* 15 */
- ENTRY_SAME(lchown)
- ENTRY_SAME(socket)
- /* struct stat is MAYBE identical wide and narrow ?? */
- ENTRY_COMP(newstat)
- ENTRY_COMP(lseek)
- ENTRY_SAME(getpid) /* 20 */
- /* the 'void * data' parameter may need re-packing in wide */
- ENTRY_COMP(mount)
- /* concerned about struct sockaddr in wide/narrow */
- /* ---> I think sockaddr is OK unless the compiler packs the struct */
- /* differently to align the char array */
- ENTRY_SAME(bind)
- ENTRY_SAME(setuid)
- ENTRY_SAME(getuid)
- ENTRY_COMP(stime) /* 25 */
- ENTRY_COMP(ptrace)
- ENTRY_SAME(alarm)
- /* see stat comment */
- ENTRY_COMP(newfstat)
- ENTRY_SAME(pause)
- /* struct utimbuf uses time_t which might vary */
- ENTRY_COMP(utime) /* 30 */
- /* struct sockaddr... */
- ENTRY_SAME(connect)
- ENTRY_SAME(listen)
- ENTRY_SAME(access)
- ENTRY_SAME(nice)
- /* struct sockaddr... */
- ENTRY_SAME(accept) /* 35 */
- ENTRY_SAME(sync)
- ENTRY_SAME(kill)
- ENTRY_SAME(rename)
- ENTRY_SAME(mkdir)
- ENTRY_SAME(rmdir) /* 40 */
- ENTRY_SAME(dup)
- ENTRY_SAME(pipe)
- ENTRY_COMP(times)
- /* struct sockaddr... */
- ENTRY_SAME(getsockname)
- /* it seems possible brk() could return a >4G pointer... */
- ENTRY_SAME(brk) /* 45 */
- ENTRY_SAME(setgid)
- ENTRY_SAME(getgid)
- ENTRY_SAME(signal)
- ENTRY_SAME(geteuid)
- ENTRY_SAME(getegid) /* 50 */
- ENTRY_SAME(acct)
- ENTRY_SAME(umount)
- /* struct sockaddr... */
- ENTRY_SAME(getpeername)
- ENTRY_COMP(ioctl)
- ENTRY_COMP(fcntl) /* 55 */
- ENTRY_SAME(socketpair)
- ENTRY_SAME(setpgid)
- ENTRY_SAME(send)
- ENTRY_SAME(newuname)
- ENTRY_SAME(umask) /* 60 */
- ENTRY_SAME(chroot)
- ENTRY_COMP(ustat)
- ENTRY_SAME(dup2)
- ENTRY_SAME(getppid)
- ENTRY_SAME(getpgrp) /* 65 */
- ENTRY_SAME(setsid)
- ENTRY_SAME(pivot_root)
- /* I don't like this */
- ENTRY_UHOH(sgetmask)
- ENTRY_UHOH(ssetmask)
- ENTRY_SAME(setreuid) /* 70 */
- ENTRY_SAME(setregid)
- ENTRY_SAME(mincore)
- ENTRY_COMP(sigpending)
- ENTRY_SAME(sethostname)
- /* Following 3 have linux-common-code structs containing longs -( */
- ENTRY_COMP(setrlimit) /* 75 */
- ENTRY_COMP(getrlimit)
- ENTRY_COMP(getrusage)
- /* struct timeval and timezone are maybe?? consistent wide and narrow */
- ENTRY_COMP(gettimeofday)
- ENTRY_COMP(settimeofday)
- ENTRY_SAME(getgroups) /* 80 */
- ENTRY_SAME(setgroups)
- /* struct socketaddr... */
- ENTRY_SAME(sendto)
- ENTRY_SAME(symlink)
- /* see stat comment */
- ENTRY_COMP(newlstat)
- ENTRY_SAME(readlink) /* 85 */
- ENTRY_SAME(ni_syscall) /* was uselib */
- ENTRY_SAME(swapon)
- ENTRY_SAME(reboot)
- ENTRY_SAME(mmap2)
- ENTRY_SAME(mmap) /* 90 */
- ENTRY_SAME(munmap)
- ENTRY_COMP(truncate)
- ENTRY_COMP(ftruncate)
- ENTRY_SAME(fchmod)
- ENTRY_SAME(fchown) /* 95 */
- ENTRY_SAME(getpriority)
- ENTRY_SAME(setpriority)
- ENTRY_SAME(recv)
- ENTRY_COMP(statfs)
- ENTRY_COMP(fstatfs) /* 100 */
- ENTRY_SAME(stat64)
- ENTRY_SAME(ni_syscall) /* was socketcall */
- ENTRY_SAME(syslog)
- /* even though manpage says struct timeval contains longs, ours has
- * time_t and suseconds_t -- both of which are safe wide/narrow */
- ENTRY_COMP(setitimer)
- ENTRY_COMP(getitimer) /* 105 */
- ENTRY_SAME(capget)
- ENTRY_SAME(capset)
- ENTRY_OURS(pread64)
- ENTRY_OURS(pwrite64)
- ENTRY_SAME(getcwd) /* 110 */
- ENTRY_SAME(vhangup)
- ENTRY_SAME(fstat64)
- ENTRY_SAME(vfork_wrapper)
- /* struct rusage contains longs... */
- ENTRY_COMP(wait4)
- ENTRY_SAME(swapoff) /* 115 */
- ENTRY_COMP(sysinfo)
- ENTRY_SAME(shutdown)
- ENTRY_SAME(fsync)
- ENTRY_SAME(madvise)
- ENTRY_SAME(clone_wrapper) /* 120 */
- ENTRY_SAME(setdomainname)
- ENTRY_COMP(sendfile)
- /* struct sockaddr... */
- ENTRY_SAME(recvfrom)
- /* struct timex contains longs */
- ENTRY_COMP(adjtimex)
- ENTRY_SAME(mprotect) /* 125 */
- /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */
- ENTRY_COMP(sigprocmask)
- ENTRY_SAME(ni_syscall) /* create_module */
- ENTRY_SAME(init_module)
- ENTRY_SAME(delete_module)
- ENTRY_SAME(ni_syscall) /* 130: get_kernel_syms */
- /* time_t inside struct dqblk */
- ENTRY_SAME(quotactl)
- ENTRY_SAME(getpgid)
- ENTRY_SAME(fchdir)
- ENTRY_SAME(bdflush)
- ENTRY_SAME(sysfs) /* 135 */
- ENTRY_OURS(personality)
- ENTRY_SAME(ni_syscall) /* for afs_syscall */
- ENTRY_SAME(setfsuid)
- ENTRY_SAME(setfsgid)
- /* I think this might work */
- ENTRY_SAME(llseek) /* 140 */
- ENTRY_COMP(getdents)
- /* it is POSSIBLE that select will be OK because even though fd_set
- * contains longs, the macros and sizes are clever. */
- ENTRY_COMP(select)
- ENTRY_SAME(flock)
- ENTRY_SAME(msync)
- /* struct iovec contains pointers */
- ENTRY_COMP(readv) /* 145 */
- ENTRY_COMP(writev)
- ENTRY_SAME(getsid)
- ENTRY_SAME(fdatasync)
- /* struct __sysctl_args is a mess */
- ENTRY_COMP(sysctl)
- ENTRY_SAME(mlock) /* 150 */
- ENTRY_SAME(munlock)
- ENTRY_SAME(mlockall)
- ENTRY_SAME(munlockall)
- /* struct sched_param is ok for now */
- ENTRY_SAME(sched_setparam)
- ENTRY_SAME(sched_getparam) /* 155 */
- ENTRY_SAME(sched_setscheduler)
- ENTRY_SAME(sched_getscheduler)
- ENTRY_SAME(sched_yield)
- ENTRY_SAME(sched_get_priority_max)
- ENTRY_SAME(sched_get_priority_min) /* 160 */
- ENTRY_COMP(sched_rr_get_interval)
- ENTRY_COMP(nanosleep)
- ENTRY_SAME(mremap)
- ENTRY_SAME(setresuid)
- ENTRY_SAME(getresuid) /* 165 */
- ENTRY_COMP(sigaltstack)
- ENTRY_SAME(ni_syscall) /* query_module */
- ENTRY_SAME(poll)
- /* structs contain pointers and an in_addr... */
- ENTRY_SAME(ni_syscall) /* was nfsservctl */
- ENTRY_SAME(setresgid) /* 170 */
- ENTRY_SAME(getresgid)
- ENTRY_SAME(prctl)
- /* signals need a careful review */
- ENTRY_SAME(rt_sigreturn_wrapper)
- ENTRY_COMP(rt_sigaction)
- ENTRY_COMP(rt_sigprocmask) /* 175 */
- ENTRY_COMP(rt_sigpending)
- ENTRY_COMP(rt_sigtimedwait)
- /* even though the struct siginfo_t is different, it appears like
- * all the paths use values which should be same wide and narrow.
- * Also the struct is padded to 128 bytes which means we don't have
- * to worry about faulting trying to copy in a larger 64-bit
- * struct from a 32-bit user-space app.
- */
- ENTRY_COMP(rt_sigqueueinfo)
- ENTRY_COMP(rt_sigsuspend)
- ENTRY_SAME(chown) /* 180 */
- /* setsockopt() used by iptables: SO_SET_REPLACE/SO_SET_ADD_COUNTERS */
- ENTRY_COMP(setsockopt)
- ENTRY_COMP(getsockopt)
- ENTRY_COMP(sendmsg)
- ENTRY_COMP(recvmsg)
- ENTRY_SAME(semop) /* 185 */
- ENTRY_SAME(semget)
- ENTRY_COMP(semctl)
- ENTRY_COMP(msgsnd)
- ENTRY_COMP(msgrcv)
- ENTRY_SAME(msgget) /* 190 */
- ENTRY_COMP(msgctl)
- ENTRY_COMP(shmat)
- ENTRY_SAME(shmdt)
- ENTRY_SAME(shmget)
- ENTRY_COMP(shmctl) /* 195 */
- ENTRY_SAME(ni_syscall) /* streams1 */
- ENTRY_SAME(ni_syscall) /* streams2 */
- ENTRY_SAME(lstat64)
- ENTRY_OURS(truncate64)
- ENTRY_OURS(ftruncate64) /* 200 */
- ENTRY_SAME(getdents64)
- ENTRY_COMP(fcntl64)
- ENTRY_SAME(ni_syscall) /* attrctl -- dead */
- ENTRY_SAME(ni_syscall) /* acl_get -- dead */
- ENTRY_SAME(ni_syscall) /* 205 (acl_set -- dead) */
- ENTRY_SAME(gettid)
- ENTRY_OURS(readahead)
- ENTRY_SAME(tkill)
- ENTRY_COMP(sendfile64)
- ENTRY_COMP(futex) /* 210 */
- ENTRY_COMP(sched_setaffinity)
- ENTRY_COMP(sched_getaffinity)
- ENTRY_SAME(ni_syscall) /* set_thread_area */
- ENTRY_SAME(ni_syscall) /* get_thread_area */
- ENTRY_COMP(io_setup) /* 215 */
- ENTRY_SAME(io_destroy)
- ENTRY_COMP(io_getevents)
- ENTRY_COMP(io_submit)
- ENTRY_SAME(io_cancel)
- ENTRY_SAME(ni_syscall) /* 220: was alloc_hugepages */
- ENTRY_SAME(ni_syscall) /* was free_hugepages */
- ENTRY_SAME(exit_group)
- ENTRY_COMP(lookup_dcookie)
- ENTRY_SAME(epoll_create)
- ENTRY_SAME(epoll_ctl) /* 225 */
- ENTRY_SAME(epoll_wait)
- ENTRY_SAME(remap_file_pages)
- ENTRY_COMP(semtimedop)
- ENTRY_COMP(mq_open)
- ENTRY_SAME(mq_unlink) /* 230 */
- ENTRY_COMP(mq_timedsend)
- ENTRY_COMP(mq_timedreceive)
- ENTRY_COMP(mq_notify)
- ENTRY_COMP(mq_getsetattr)
- ENTRY_COMP(waitid) /* 235 */
- ENTRY_OURS(fadvise64_64)
- ENTRY_SAME(set_tid_address)
- ENTRY_SAME(setxattr)
- ENTRY_SAME(lsetxattr)
- ENTRY_SAME(fsetxattr) /* 240 */
- ENTRY_SAME(getxattr)
- ENTRY_SAME(lgetxattr)
- ENTRY_SAME(fgetxattr)
- ENTRY_SAME(listxattr)
- ENTRY_SAME(llistxattr) /* 245 */
- ENTRY_SAME(flistxattr)
- ENTRY_SAME(removexattr)
- ENTRY_SAME(lremovexattr)
- ENTRY_SAME(fremovexattr)
- ENTRY_COMP(timer_create) /* 250 */
- ENTRY_COMP(timer_settime)
- ENTRY_COMP(timer_gettime)
- ENTRY_SAME(timer_getoverrun)
- ENTRY_SAME(timer_delete)
- ENTRY_COMP(clock_settime) /* 255 */
- ENTRY_COMP(clock_gettime)
- ENTRY_COMP(clock_getres)
- ENTRY_COMP(clock_nanosleep)
- ENTRY_SAME(tgkill)
- ENTRY_COMP(mbind) /* 260 */
- ENTRY_COMP(get_mempolicy)
- ENTRY_COMP(set_mempolicy)
- ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
- ENTRY_SAME(add_key)
- ENTRY_SAME(request_key) /* 265 */
- ENTRY_COMP(keyctl)
- ENTRY_SAME(ioprio_set)
- ENTRY_SAME(ioprio_get)
- ENTRY_SAME(inotify_init)
- ENTRY_SAME(inotify_add_watch) /* 270 */
- ENTRY_SAME(inotify_rm_watch)
- ENTRY_SAME(migrate_pages)
- ENTRY_COMP(pselect6)
- ENTRY_COMP(ppoll)
- ENTRY_COMP(openat) /* 275 */
- ENTRY_SAME(mkdirat)
- ENTRY_SAME(mknodat)
- ENTRY_SAME(fchownat)
- ENTRY_COMP(futimesat)
- ENTRY_SAME(fstatat64) /* 280 */
- ENTRY_SAME(unlinkat)
- ENTRY_SAME(renameat)
- ENTRY_SAME(linkat)
- ENTRY_SAME(symlinkat)
- ENTRY_SAME(readlinkat) /* 285 */
- ENTRY_SAME(fchmodat)
- ENTRY_SAME(faccessat)
- ENTRY_SAME(unshare)
- ENTRY_COMP(set_robust_list)
- ENTRY_COMP(get_robust_list) /* 290 */
- ENTRY_SAME(splice)
- ENTRY_OURS(sync_file_range)
- ENTRY_SAME(tee)
- ENTRY_COMP(vmsplice)
- ENTRY_COMP(move_pages) /* 295 */
- ENTRY_SAME(getcpu)
- ENTRY_COMP(epoll_pwait)
- ENTRY_COMP(statfs64)
- ENTRY_COMP(fstatfs64)
- ENTRY_COMP(kexec_load) /* 300 */
- ENTRY_COMP(utimensat)
- ENTRY_COMP(signalfd)
- ENTRY_SAME(ni_syscall) /* was timerfd */
- ENTRY_SAME(eventfd)
- ENTRY_OURS(fallocate) /* 305 */
- ENTRY_SAME(timerfd_create)
- ENTRY_COMP(timerfd_settime)
- ENTRY_COMP(timerfd_gettime)
- ENTRY_COMP(signalfd4)
- ENTRY_SAME(eventfd2) /* 310 */
- ENTRY_SAME(epoll_create1)
- ENTRY_SAME(dup3)
- ENTRY_SAME(pipe2)
- ENTRY_SAME(inotify_init1)
- ENTRY_COMP(preadv) /* 315 */
- ENTRY_COMP(pwritev)
- ENTRY_COMP(rt_tgsigqueueinfo)
- ENTRY_SAME(perf_event_open)
- ENTRY_COMP(recvmmsg)
- ENTRY_SAME(accept4) /* 320 */
- ENTRY_SAME(prlimit64)
- ENTRY_SAME(fanotify_init)
- ENTRY_DIFF(fanotify_mark)
- ENTRY_COMP(clock_adjtime)
- ENTRY_SAME(name_to_handle_at) /* 325 */
- ENTRY_COMP(open_by_handle_at)
- ENTRY_SAME(syncfs)
- ENTRY_SAME(setns)
- ENTRY_COMP(sendmmsg)
- ENTRY_COMP(process_vm_readv) /* 330 */
- ENTRY_COMP(process_vm_writev)
- ENTRY_SAME(kcmp)
- ENTRY_SAME(finit_module)
- ENTRY_SAME(sched_setattr)
- ENTRY_SAME(sched_getattr) /* 335 */
- ENTRY_COMP(utimes)
- ENTRY_SAME(renameat2)
- ENTRY_SAME(seccomp)
- ENTRY_SAME(getrandom)
- ENTRY_SAME(memfd_create) /* 340 */
- ENTRY_SAME(bpf)
- ENTRY_COMP(execveat)
- ENTRY_SAME(membarrier)
- ENTRY_SAME(userfaultfd)
- ENTRY_SAME(mlock2) /* 345 */
- ENTRY_SAME(copy_file_range)
- ENTRY_COMP(preadv2)
- ENTRY_COMP(pwritev2)
- ENTRY_SAME(statx)
- ENTRY_COMP(io_pgetevents) /* 350 */
-
-
-.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
-.error "size of syscall table does not fit value of __NR_Linux_syscalls"
-.endif
-
-#undef ENTRY_SAME
-#undef ENTRY_DIFF
-#undef ENTRY_UHOH
-#undef ENTRY_COMP
-#undef ENTRY_OURS
diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..c22a21c39f30
--- /dev/null
+++ b/arch/parisc/kernel/syscalls/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+syshdr_abis_unistd_32 := common,32
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_abis_unistd_64 := common,64
+$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+systbl_abis_syscall_table_32 := common,32
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_64 := common,64
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_c32 := common,32
+systbl_abi_syscall_table_c32 := c32
+$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h unistd_64.h
+kapisyshdr-y += syscall_table_32.h \
+ syscall_table_64.h \
+ syscall_table_c32.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..9bbd2f9f56c8
--- /dev/null
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -0,0 +1,369 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for parisc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, 64, or 32 for this file.
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork_wrapper
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+13 common time sys_time compat_sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common socket sys_socket
+18 common stat sys_newstat compat_sys_newstat
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount compat_sys_mount
+22 common bind sys_bind
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 common stime sys_stime compat_sys_stime
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 common fstat sys_newfstat compat_sys_newfstat
+29 common pause sys_pause
+30 common utime sys_utime compat_sys_utime
+31 common connect sys_connect
+32 common listen sys_listen
+33 common access sys_access
+34 common nice sys_nice
+35 common accept sys_accept
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+44 common getsockname sys_getsockname
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 common signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 common acct sys_acct
+52 common umount2 sys_umount
+53 common getpeername sys_getpeername
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+56 common socketpair sys_socketpair
+57 common setpgid sys_setpgid
+58 common send sys_send
+59 common uname sys_newuname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common pivot_root sys_pivot_root
+68 common sgetmask sys_sgetmask sys32_unimplemented
+69 common ssetmask sys_ssetmask sys32_unimplemented
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 common mincore sys_mincore
+73 common sigpending sys_sigpending compat_sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 common getrlimit sys_getrlimit compat_sys_getrlimit
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 common sendto sys_sendto
+83 common symlink sys_symlink
+84 common lstat sys_newlstat compat_sys_newlstat
+85 common readlink sys_readlink
+86 common uselib sys_ni_syscall
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common mmap2 sys_mmap2
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common recv sys_recv
+99 common statfs sys_statfs compat_sys_statfs
+100 common fstatfs sys_fstatfs compat_sys_fstatfs
+101 common stat64 sys_stat64
+# 102 was socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common capget sys_capget
+107 common capset sys_capset
+108 32 pread64 parisc_pread64
+108 64 pread64 sys_pread64
+109 32 pwrite64 parisc_pwrite64
+109 64 pwrite64 sys_pwrite64
+110 common getcwd sys_getcwd
+111 common vhangup sys_vhangup
+112 common fstat64 sys_fstat64
+113 common vfork sys_vfork_wrapper
+114 common wait4 sys_wait4 compat_sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 common shutdown sys_shutdown
+118 common fsync sys_fsync
+119 common madvise sys_madvise
+120 common clone sys_clone_wrapper
+121 common setdomainname sys_setdomainname
+122 common sendfile sys_sendfile compat_sys_sendfile
+123 common recvfrom sys_recvfrom
+124 common adjtimex sys_adjtimex compat_sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
+# 127 was create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 32 personality parisc_personality
+136 64 personality sys_personality
+# 137 was afs_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv compat_sys_readv
+146 common writev sys_writev compat_sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_sysctl compat_sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep compat_sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+# 167 was query_module
+168 common poll sys_poll
+# 169 was nfsservctl
+170 common setresgid sys_setresgid
+171 common getresgid sys_getresgid
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper
+174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+180 common chown sys_chown
+181 common setsockopt sys_setsockopt compat_sys_setsockopt
+182 common getsockopt sys_getsockopt compat_sys_getsockopt
+183 common sendmsg sys_sendmsg compat_sys_sendmsg
+184 common recvmsg sys_recvmsg compat_sys_recvmsg
+185 common semop sys_semop
+186 common semget sys_semget
+187 common semctl sys_semctl compat_sys_semctl
+188 common msgsnd sys_msgsnd compat_sys_msgsnd
+189 common msgrcv sys_msgrcv compat_sys_msgrcv
+190 common msgget sys_msgget
+191 common msgctl sys_msgctl compat_sys_msgctl
+192 common shmat sys_shmat compat_sys_shmat
+193 common shmdt sys_shmdt
+194 common shmget sys_shmget
+195 common shmctl sys_shmctl compat_sys_shmctl
+# 196 was getpmsg
+# 197 was putpmsg
+198 common lstat64 sys_lstat64
+199 32 truncate64 parisc_truncate64
+199 64 truncate64 sys_truncate64
+200 32 ftruncate64 parisc_ftruncate64
+200 64 ftruncate64 sys_ftruncate64
+201 common getdents64 sys_getdents64
+202 common fcntl64 sys_fcntl64 compat_sys_fcntl64
+# 203 was attrctl
+# 204 was acl_get
+# 205 was acl_set
+206 common gettid sys_gettid
+207 32 readahead parisc_readahead
+207 64 readahead sys_readahead
+208 common tkill sys_tkill
+209 common sendfile64 sys_sendfile64 compat_sys_sendfile64
+210 common futex sys_futex compat_sys_futex
+211 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+212 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+# 213 was set_thread_area
+# 214 was get_thread_area
+215 common io_setup sys_io_setup compat_sys_io_setup
+216 common io_destroy sys_io_destroy
+217 common io_getevents sys_io_getevents compat_sys_io_getevents
+218 common io_submit sys_io_submit compat_sys_io_submit
+219 common io_cancel sys_io_cancel
+# 220 was alloc_hugepages
+# 221 was free_hugepages
+222 common exit_group sys_exit_group
+223 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+224 common epoll_create sys_epoll_create
+225 common epoll_ctl sys_epoll_ctl
+226 common epoll_wait sys_epoll_wait
+227 common remap_file_pages sys_remap_file_pages
+228 common semtimedop sys_semtimedop compat_sys_semtimedop
+229 common mq_open sys_mq_open compat_sys_mq_open
+230 common mq_unlink sys_mq_unlink
+231 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+232 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+233 common mq_notify sys_mq_notify compat_sys_mq_notify
+234 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+235 common waitid sys_waitid compat_sys_waitid
+236 32 fadvise64_64 parisc_fadvise64_64
+236 64 fadvise64_64 sys_fadvise64_64
+237 common set_tid_address sys_set_tid_address
+238 common setxattr sys_setxattr
+239 common lsetxattr sys_lsetxattr
+240 common fsetxattr sys_fsetxattr
+241 common getxattr sys_getxattr
+242 common lgetxattr sys_lgetxattr
+243 common fgetxattr sys_fgetxattr
+244 common listxattr sys_listxattr
+245 common llistxattr sys_llistxattr
+246 common flistxattr sys_flistxattr
+247 common removexattr sys_removexattr
+248 common lremovexattr sys_lremovexattr
+249 common fremovexattr sys_fremovexattr
+250 common timer_create sys_timer_create compat_sys_timer_create
+251 common timer_settime sys_timer_settime compat_sys_timer_settime
+252 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+253 common timer_getoverrun sys_timer_getoverrun
+254 common timer_delete sys_timer_delete
+255 common clock_settime sys_clock_settime compat_sys_clock_settime
+256 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+257 common clock_getres sys_clock_getres compat_sys_clock_getres
+258 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+259 common tgkill sys_tgkill
+260 common mbind sys_mbind compat_sys_mbind
+261 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+262 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+# 263 was vserver
+264 common add_key sys_add_key
+265 common request_key sys_request_key
+266 common keyctl sys_keyctl compat_sys_keyctl
+267 common ioprio_set sys_ioprio_set
+268 common ioprio_get sys_ioprio_get
+269 common inotify_init sys_inotify_init
+270 common inotify_add_watch sys_inotify_add_watch
+271 common inotify_rm_watch sys_inotify_rm_watch
+272 common migrate_pages sys_migrate_pages
+273 common pselect6 sys_pselect6 compat_sys_pselect6
+274 common ppoll sys_ppoll compat_sys_ppoll
+275 common openat sys_openat compat_sys_openat
+276 common mkdirat sys_mkdirat
+277 common mknodat sys_mknodat
+278 common fchownat sys_fchownat
+279 common futimesat sys_futimesat compat_sys_futimesat
+280 common fstatat64 sys_fstatat64
+281 common unlinkat sys_unlinkat
+282 common renameat sys_renameat
+283 common linkat sys_linkat
+284 common symlinkat sys_symlinkat
+285 common readlinkat sys_readlinkat
+286 common fchmodat sys_fchmodat
+287 common faccessat sys_faccessat
+288 common unshare sys_unshare
+289 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+290 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+291 common splice sys_splice
+292 32 sync_file_range parisc_sync_file_range
+292 64 sync_file_range sys_sync_file_range
+293 common tee sys_tee
+294 common vmsplice sys_vmsplice compat_sys_vmsplice
+295 common move_pages sys_move_pages compat_sys_move_pages
+296 common getcpu sys_getcpu
+297 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+298 common statfs64 sys_statfs64 compat_sys_statfs64
+299 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+300 common kexec_load sys_kexec_load compat_sys_kexec_load
+301 common utimensat sys_utimensat compat_sys_utimensat
+302 common signalfd sys_signalfd compat_sys_signalfd
+# 303 was timerfd
+304 common eventfd sys_eventfd
+305 32 fallocate parisc_fallocate
+305 64 fallocate sys_fallocate
+306 common timerfd_create sys_timerfd_create
+307 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+308 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+309 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+310 common eventfd2 sys_eventfd2
+311 common epoll_create1 sys_epoll_create1
+312 common dup3 sys_dup3
+313 common pipe2 sys_pipe2
+314 common inotify_init1 sys_inotify_init1
+315 common preadv sys_preadv compat_sys_preadv
+316 common pwritev sys_pwritev compat_sys_pwritev
+317 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+318 common perf_event_open sys_perf_event_open
+319 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+320 common accept4 sys_accept4
+321 common prlimit64 sys_prlimit64
+322 common fanotify_init sys_fanotify_init
+323 common fanotify_mark sys_fanotify_mark sys32_fanotify_mark
+324 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+325 common name_to_handle_at sys_name_to_handle_at
+326 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+327 common syncfs sys_syncfs
+328 common setns sys_setns
+329 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+330 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+331 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+332 common kcmp sys_kcmp
+333 common finit_module sys_finit_module
+334 common sched_setattr sys_sched_setattr
+335 common sched_getattr sys_sched_getattr
+336 common utimes sys_utimes compat_sys_utimes
+337 common renameat2 sys_renameat2
+338 common seccomp sys_seccomp
+339 common getrandom sys_getrandom
+340 common memfd_create sys_memfd_create
+341 common bpf sys_bpf
+342 common execveat sys_execveat compat_sys_execveat
+343 common membarrier sys_membarrier
+344 common userfaultfd sys_userfaultfd
+345 common mlock2 sys_mlock2
+346 common copy_file_range sys_copy_file_range
+347 common preadv2 sys_preadv2 compat_sys_preadv2
+348 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+349 common statx sys_statx
+350 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..50242b747d7c
--- /dev/null
+++ b/arch/parisc/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_PARISC_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+) > "$out"
diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..45b5bae26240
--- /dev/null
+++ b/arch/parisc/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry compat ; do
+ if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
+ emit $((nxt+offset)) $((nr+offset)) $compat
+ else
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ fi
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 4bc8edd83cee..50f27a656051 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -128,6 +128,7 @@ config PPC
#
# Please keep this list sorted alphabetically.
#
+ select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
@@ -373,9 +374,9 @@ config PPC_ADV_DEBUG_DAC_RANGE
depends on PPC_ADV_DEBUG_REGS && 44x
default y
-config ZONE_DMA32
+config ZONE_DMA
bool
- default y if PPC64
+ default y if PPC_BOOK3E_64
config PGTABLE_LEVELS
int
@@ -555,7 +556,7 @@ config RELOCATABLE_TEST
config CRASH_DUMP
bool "Build a dump capture kernel"
- depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
+ depends on PPC64 || PPC_BOOK3S_32 || FSL_BOOKE || (44x && !SMP)
select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
help
Build a kernel suitable for use as a dump capture kernel.
@@ -868,10 +869,6 @@ config ISA
have an IBM RS/6000 or pSeries machine, say Y. If you have an
embedded board, consult your board documentation.
-config ZONE_DMA
- bool
- default y
-
config GENERIC_ISA_DMA
bool
depends on ISA_DMA_API
@@ -1095,7 +1092,7 @@ config PHYSICAL_START_BOOL
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if PHYSICAL_START_BOOL
- default "0x02000000" if PPC_STD_MMU && CRASH_DUMP && !NONSTATIC_KERNEL
+ default "0x02000000" if PPC_BOOK3S && CRASH_DUMP && !NONSTATIC_KERNEL
default "0x00000000"
config PHYSICAL_ALIGN
@@ -1145,7 +1142,7 @@ config PIN_TLB_DATA
config PIN_TLB_IMMR
bool "Pinned TLB for IMMR"
- depends on PIN_TLB
+ depends on PIN_TLB || PPC_EARLY_DEBUG_CPM
default y
config PIN_TLB_TEXT
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 8a2ce14d68d0..488c9edffa58 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -30,6 +30,10 @@ endif
endif
endif
+ifdef CONFIG_PPC_BOOK3S_32
+KBUILD_CFLAGS += -mcpu=powerpc
+endif
+
ifeq ($(CROSS_COMPILE),)
KBUILD_DEFCONFIG := $(shell uname -m)_defconfig
else
@@ -152,7 +156,14 @@ endif
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
-CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 $(MULTIPLEWORD)
+# Clang unconditionally reserves r2 on ppc32 and does not support the flag
+# https://bugs.llvm.org/show_bug.cgi?id=39555
+CFLAGS-$(CONFIG_PPC32) := $(call cc-option, -ffixed-r2)
+
+# Clang doesn't support -mmultiple / -mno-multiple
+# https://bugs.llvm.org/show_bug.cgi?id=39556
+CFLAGS-$(CONFIG_PPC32) += $(call cc-option, $(MULTIPLEWORD))
+
CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
ifdef CONFIG_PPC_BOOK3S_64
@@ -228,10 +239,6 @@ KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
-# Enable unit-at-a-time mode when possible. It shrinks the
-# kernel considerably.
-KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
-
# FIXME: the module load should be taught about the additional relocs
# generated by this.
# revert to pre-gcc-4.4 behaviour of .eh_frame
@@ -241,10 +248,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
# often slow when they are implemented at all
KBUILD_CFLAGS += $(call cc-option,-mno-string)
-ifdef CONFIG_6xx
-KBUILD_CFLAGS += -mcpu=powerpc
-endif
-
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
@@ -317,6 +320,14 @@ PHONY += ppc64le_defconfig
ppc64le_defconfig:
$(call merge_into_defconfig,ppc64_defconfig,le)
+PHONY += ppc64le_guest_defconfig
+ppc64le_guest_defconfig:
+ $(call merge_into_defconfig,ppc64_defconfig,le guest)
+
+PHONY += ppc64_guest_defconfig
+ppc64_guest_defconfig:
+ $(call merge_into_defconfig,ppc64_defconfig,be guest)
+
PHONY += powernv_be_defconfig
powernv_be_defconfig:
$(call merge_into_defconfig,powernv_defconfig,be)
@@ -402,6 +413,9 @@ archclean:
archprepare: checkbin
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/powerpc/kernel/syscalls all
+
ifdef CONFIG_STACKPROTECTOR
prepare: stack_protector_prepare
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 39354365f54a..ed9883169190 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -197,7 +197,7 @@ $(obj)/empty.c:
$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
$(Q)cp $< $@
-$(obj)/serial.c: $(obj)/autoconf.h
+$(srctree)/$(src)/serial.c: $(obj)/autoconf.h
$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
$(Q)cp $< $@
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 32dfe6d083f3..9b9d17437373 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
RELA = 7
RELACOUNT = 0x6ffffff9
- .text
+ .data
/* A procedure descriptor used when booting this as a COFF file.
* When making COFF, this comes first in the link and we're
* linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
.globl _zimage_start_opd
_zimage_start_opd:
.long 0x500000, 0, 0, 0
+ .text
+ b _zimage_start
#ifdef __powerpc64__
.balign 8
diff --git a/arch/powerpc/boot/dts/bamboo.dts b/arch/powerpc/boot/dts/bamboo.dts
index 538e42b1120d..b5861fa3836c 100644
--- a/arch/powerpc/boot/dts/bamboo.dts
+++ b/arch/powerpc/boot/dts/bamboo.dts
@@ -268,8 +268,10 @@
/* Outbound ranges, one memory and one IO,
* later cannot be changed. Chip supports a second
* IO range but we don't use it for now
+ * The chip also supports a larger memory range but
+ * it's not naturally aligned, so our code will break
*/
- ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x40000000
+ ranges = <0x02000000 0x00000000 0xa0000000 0x00000000 0xa0000000 0x00000000 0x20000000
0x02000000 0x00000000 0x00000000 0x00000000 0xe0000000 0x00000000 0x00100000
0x01000000 0x00000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
index 88d8423f8ac5..bb7b9b9f3f5f 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-pre.dtsi
@@ -70,14 +70,14 @@
cpu0: PowerPC,e6500@0 {
device_type = "cpu";
reg = <0 1>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu1: PowerPC,e6500@2 {
device_type = "cpu";
reg = <2 3>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
index f3f968c51f4b..388ba1b15f8c 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-pre.dtsi
@@ -75,28 +75,28 @@
cpu0: PowerPC,e6500@0 {
device_type = "cpu";
reg = <0 1>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu1: PowerPC,e6500@2 {
device_type = "cpu";
reg = <2 3>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu2: PowerPC,e6500@4 {
device_type = "cpu";
reg = <4 5>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu3: PowerPC,e6500@6 {
device_type = "cpu";
reg = <6 7>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 1b33f5157c8a..4f044b41a776 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -398,21 +398,6 @@
};
/include/ "qoriq-clockgen2.dtsi"
- clockgen: global-utilities@e1000 {
- compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
- reg = <0xe1000 0x1000>;
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0", "pll0-div2", "pll0-div4",
- "pll1", "pll1-div2", "pll1-div4";
- clock-output-names = "cmux0";
- };
- };
rcpm: global-utilities@e2000 {
compatible = "fsl,b4-rcpm", "fsl,qoriq-rcpm-2.0";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts
index 11bea3e6a43f..58ac17496c89 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn.dts
@@ -169,100 +169,100 @@
interrupt-map-mask = <0xff00 0 0 7>;
interrupt-map = <
/* IDSEL 0x11 func 0 - PCI slot 1 */
- 0x8800 0 0 1 &mpic 2 1
- 0x8800 0 0 2 &mpic 3 1
- 0x8800 0 0 3 &mpic 4 1
- 0x8800 0 0 4 &mpic 1 1
+ 0x8800 0 0 1 &mpic 2 1 0 0
+ 0x8800 0 0 2 &mpic 3 1 0 0
+ 0x8800 0 0 3 &mpic 4 1 0 0
+ 0x8800 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 1 - PCI slot 1 */
- 0x8900 0 0 1 &mpic 2 1
- 0x8900 0 0 2 &mpic 3 1
- 0x8900 0 0 3 &mpic 4 1
- 0x8900 0 0 4 &mpic 1 1
+ 0x8900 0 0 1 &mpic 2 1 0 0
+ 0x8900 0 0 2 &mpic 3 1 0 0
+ 0x8900 0 0 3 &mpic 4 1 0 0
+ 0x8900 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 2 - PCI slot 1 */
- 0x8a00 0 0 1 &mpic 2 1
- 0x8a00 0 0 2 &mpic 3 1
- 0x8a00 0 0 3 &mpic 4 1
- 0x8a00 0 0 4 &mpic 1 1
+ 0x8a00 0 0 1 &mpic 2 1 0 0
+ 0x8a00 0 0 2 &mpic 3 1 0 0
+ 0x8a00 0 0 3 &mpic 4 1 0 0
+ 0x8a00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 3 - PCI slot 1 */
- 0x8b00 0 0 1 &mpic 2 1
- 0x8b00 0 0 2 &mpic 3 1
- 0x8b00 0 0 3 &mpic 4 1
- 0x8b00 0 0 4 &mpic 1 1
+ 0x8b00 0 0 1 &mpic 2 1 0 0
+ 0x8b00 0 0 2 &mpic 3 1 0 0
+ 0x8b00 0 0 3 &mpic 4 1 0 0
+ 0x8b00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 4 - PCI slot 1 */
- 0x8c00 0 0 1 &mpic 2 1
- 0x8c00 0 0 2 &mpic 3 1
- 0x8c00 0 0 3 &mpic 4 1
- 0x8c00 0 0 4 &mpic 1 1
+ 0x8c00 0 0 1 &mpic 2 1 0 0
+ 0x8c00 0 0 2 &mpic 3 1 0 0
+ 0x8c00 0 0 3 &mpic 4 1 0 0
+ 0x8c00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 5 - PCI slot 1 */
- 0x8d00 0 0 1 &mpic 2 1
- 0x8d00 0 0 2 &mpic 3 1
- 0x8d00 0 0 3 &mpic 4 1
- 0x8d00 0 0 4 &mpic 1 1
+ 0x8d00 0 0 1 &mpic 2 1 0 0
+ 0x8d00 0 0 2 &mpic 3 1 0 0
+ 0x8d00 0 0 3 &mpic 4 1 0 0
+ 0x8d00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 6 - PCI slot 1 */
- 0x8e00 0 0 1 &mpic 2 1
- 0x8e00 0 0 2 &mpic 3 1
- 0x8e00 0 0 3 &mpic 4 1
- 0x8e00 0 0 4 &mpic 1 1
+ 0x8e00 0 0 1 &mpic 2 1 0 0
+ 0x8e00 0 0 2 &mpic 3 1 0 0
+ 0x8e00 0 0 3 &mpic 4 1 0 0
+ 0x8e00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 7 - PCI slot 1 */
- 0x8f00 0 0 1 &mpic 2 1
- 0x8f00 0 0 2 &mpic 3 1
- 0x8f00 0 0 3 &mpic 4 1
- 0x8f00 0 0 4 &mpic 1 1
+ 0x8f00 0 0 1 &mpic 2 1 0 0
+ 0x8f00 0 0 2 &mpic 3 1 0 0
+ 0x8f00 0 0 3 &mpic 4 1 0 0
+ 0x8f00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x12 func 0 - PCI slot 2 */
- 0x9000 0 0 1 &mpic 3 1
- 0x9000 0 0 2 &mpic 4 1
- 0x9000 0 0 3 &mpic 1 1
- 0x9000 0 0 4 &mpic 2 1
+ 0x9000 0 0 1 &mpic 3 1 0 0
+ 0x9000 0 0 2 &mpic 4 1 0 0
+ 0x9000 0 0 3 &mpic 1 1 0 0
+ 0x9000 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 1 - PCI slot 2 */
- 0x9100 0 0 1 &mpic 3 1
- 0x9100 0 0 2 &mpic 4 1
- 0x9100 0 0 3 &mpic 1 1
- 0x9100 0 0 4 &mpic 2 1
+ 0x9100 0 0 1 &mpic 3 1 0 0
+ 0x9100 0 0 2 &mpic 4 1 0 0
+ 0x9100 0 0 3 &mpic 1 1 0 0
+ 0x9100 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 2 - PCI slot 2 */
- 0x9200 0 0 1 &mpic 3 1
- 0x9200 0 0 2 &mpic 4 1
- 0x9200 0 0 3 &mpic 1 1
- 0x9200 0 0 4 &mpic 2 1
+ 0x9200 0 0 1 &mpic 3 1 0 0
+ 0x9200 0 0 2 &mpic 4 1 0 0
+ 0x9200 0 0 3 &mpic 1 1 0 0
+ 0x9200 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 3 - PCI slot 2 */
- 0x9300 0 0 1 &mpic 3 1
- 0x9300 0 0 2 &mpic 4 1
- 0x9300 0 0 3 &mpic 1 1
- 0x9300 0 0 4 &mpic 2 1
+ 0x9300 0 0 1 &mpic 3 1 0 0
+ 0x9300 0 0 2 &mpic 4 1 0 0
+ 0x9300 0 0 3 &mpic 1 1 0 0
+ 0x9300 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 4 - PCI slot 2 */
- 0x9400 0 0 1 &mpic 3 1
- 0x9400 0 0 2 &mpic 4 1
- 0x9400 0 0 3 &mpic 1 1
- 0x9400 0 0 4 &mpic 2 1
+ 0x9400 0 0 1 &mpic 3 1 0 0
+ 0x9400 0 0 2 &mpic 4 1 0 0
+ 0x9400 0 0 3 &mpic 1 1 0 0
+ 0x9400 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 5 - PCI slot 2 */
- 0x9500 0 0 1 &mpic 3 1
- 0x9500 0 0 2 &mpic 4 1
- 0x9500 0 0 3 &mpic 1 1
- 0x9500 0 0 4 &mpic 2 1
+ 0x9500 0 0 1 &mpic 3 1 0 0
+ 0x9500 0 0 2 &mpic 4 1 0 0
+ 0x9500 0 0 3 &mpic 1 1 0 0
+ 0x9500 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 6 - PCI slot 2 */
- 0x9600 0 0 1 &mpic 3 1
- 0x9600 0 0 2 &mpic 4 1
- 0x9600 0 0 3 &mpic 1 1
- 0x9600 0 0 4 &mpic 2 1
+ 0x9600 0 0 1 &mpic 3 1 0 0
+ 0x9600 0 0 2 &mpic 4 1 0 0
+ 0x9600 0 0 3 &mpic 1 1 0 0
+ 0x9600 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 7 - PCI slot 2 */
- 0x9700 0 0 1 &mpic 3 1
- 0x9700 0 0 2 &mpic 4 1
- 0x9700 0 0 3 &mpic 1 1
- 0x9700 0 0 4 &mpic 2 1
+ 0x9700 0 0 1 &mpic 3 1 0 0
+ 0x9700 0 0 2 &mpic 4 1 0 0
+ 0x9700 0 0 3 &mpic 1 1 0 0
+ 0x9700 0 0 4 &mpic 2 1 0 0
// IDSEL 0x1c USB
0xe000 0 0 1 &i8259 12 2
diff --git a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts
index 7ff62046a9ea..e64b91e321f6 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8641_hpcn_36b.dts
@@ -136,100 +136,100 @@
interrupt-map-mask = <0xff00 0 0 7>;
interrupt-map = <
/* IDSEL 0x11 func 0 - PCI slot 1 */
- 0x8800 0 0 1 &mpic 2 1
- 0x8800 0 0 2 &mpic 3 1
- 0x8800 0 0 3 &mpic 4 1
- 0x8800 0 0 4 &mpic 1 1
+ 0x8800 0 0 1 &mpic 2 1 0 0
+ 0x8800 0 0 2 &mpic 3 1 0 0
+ 0x8800 0 0 3 &mpic 4 1 0 0
+ 0x8800 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 1 - PCI slot 1 */
- 0x8900 0 0 1 &mpic 2 1
- 0x8900 0 0 2 &mpic 3 1
- 0x8900 0 0 3 &mpic 4 1
- 0x8900 0 0 4 &mpic 1 1
+ 0x8900 0 0 1 &mpic 2 1 0 0
+ 0x8900 0 0 2 &mpic 3 1 0 0
+ 0x8900 0 0 3 &mpic 4 1 0 0
+ 0x8900 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 2 - PCI slot 1 */
- 0x8a00 0 0 1 &mpic 2 1
- 0x8a00 0 0 2 &mpic 3 1
- 0x8a00 0 0 3 &mpic 4 1
- 0x8a00 0 0 4 &mpic 1 1
+ 0x8a00 0 0 1 &mpic 2 1 0 0
+ 0x8a00 0 0 2 &mpic 3 1 0 0
+ 0x8a00 0 0 3 &mpic 4 1 0 0
+ 0x8a00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 3 - PCI slot 1 */
- 0x8b00 0 0 1 &mpic 2 1
- 0x8b00 0 0 2 &mpic 3 1
- 0x8b00 0 0 3 &mpic 4 1
- 0x8b00 0 0 4 &mpic 1 1
+ 0x8b00 0 0 1 &mpic 2 1 0 0
+ 0x8b00 0 0 2 &mpic 3 1 0 0
+ 0x8b00 0 0 3 &mpic 4 1 0 0
+ 0x8b00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 4 - PCI slot 1 */
- 0x8c00 0 0 1 &mpic 2 1
- 0x8c00 0 0 2 &mpic 3 1
- 0x8c00 0 0 3 &mpic 4 1
- 0x8c00 0 0 4 &mpic 1 1
+ 0x8c00 0 0 1 &mpic 2 1 0 0
+ 0x8c00 0 0 2 &mpic 3 1 0 0
+ 0x8c00 0 0 3 &mpic 4 1 0 0
+ 0x8c00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 5 - PCI slot 1 */
- 0x8d00 0 0 1 &mpic 2 1
- 0x8d00 0 0 2 &mpic 3 1
- 0x8d00 0 0 3 &mpic 4 1
- 0x8d00 0 0 4 &mpic 1 1
+ 0x8d00 0 0 1 &mpic 2 1 0 0
+ 0x8d00 0 0 2 &mpic 3 1 0 0
+ 0x8d00 0 0 3 &mpic 4 1 0 0
+ 0x8d00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 6 - PCI slot 1 */
- 0x8e00 0 0 1 &mpic 2 1
- 0x8e00 0 0 2 &mpic 3 1
- 0x8e00 0 0 3 &mpic 4 1
- 0x8e00 0 0 4 &mpic 1 1
+ 0x8e00 0 0 1 &mpic 2 1 0 0
+ 0x8e00 0 0 2 &mpic 3 1 0 0
+ 0x8e00 0 0 3 &mpic 4 1 0 0
+ 0x8e00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x11 func 7 - PCI slot 1 */
- 0x8f00 0 0 1 &mpic 2 1
- 0x8f00 0 0 2 &mpic 3 1
- 0x8f00 0 0 3 &mpic 4 1
- 0x8f00 0 0 4 &mpic 1 1
+ 0x8f00 0 0 1 &mpic 2 1 0 0
+ 0x8f00 0 0 2 &mpic 3 1 0 0
+ 0x8f00 0 0 3 &mpic 4 1 0 0
+ 0x8f00 0 0 4 &mpic 1 1 0 0
/* IDSEL 0x12 func 0 - PCI slot 2 */
- 0x9000 0 0 1 &mpic 3 1
- 0x9000 0 0 2 &mpic 4 1
- 0x9000 0 0 3 &mpic 1 1
- 0x9000 0 0 4 &mpic 2 1
+ 0x9000 0 0 1 &mpic 3 1 0 0
+ 0x9000 0 0 2 &mpic 4 1 0 0
+ 0x9000 0 0 3 &mpic 1 1 0 0
+ 0x9000 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 1 - PCI slot 2 */
- 0x9100 0 0 1 &mpic 3 1
- 0x9100 0 0 2 &mpic 4 1
- 0x9100 0 0 3 &mpic 1 1
- 0x9100 0 0 4 &mpic 2 1
+ 0x9100 0 0 1 &mpic 3 1 0 0
+ 0x9100 0 0 2 &mpic 4 1 0 0
+ 0x9100 0 0 3 &mpic 1 1 0 0
+ 0x9100 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 2 - PCI slot 2 */
- 0x9200 0 0 1 &mpic 3 1
- 0x9200 0 0 2 &mpic 4 1
- 0x9200 0 0 3 &mpic 1 1
- 0x9200 0 0 4 &mpic 2 1
+ 0x9200 0 0 1 &mpic 3 1 0 0
+ 0x9200 0 0 2 &mpic 4 1 0 0
+ 0x9200 0 0 3 &mpic 1 1 0 0
+ 0x9200 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 3 - PCI slot 2 */
- 0x9300 0 0 1 &mpic 3 1
- 0x9300 0 0 2 &mpic 4 1
- 0x9300 0 0 3 &mpic 1 1
- 0x9300 0 0 4 &mpic 2 1
+ 0x9300 0 0 1 &mpic 3 1 0 0
+ 0x9300 0 0 2 &mpic 4 1 0 0
+ 0x9300 0 0 3 &mpic 1 1 0 0
+ 0x9300 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 4 - PCI slot 2 */
- 0x9400 0 0 1 &mpic 3 1
- 0x9400 0 0 2 &mpic 4 1
- 0x9400 0 0 3 &mpic 1 1
- 0x9400 0 0 4 &mpic 2 1
+ 0x9400 0 0 1 &mpic 3 1 0 0
+ 0x9400 0 0 2 &mpic 4 1 0 0
+ 0x9400 0 0 3 &mpic 1 1 0 0
+ 0x9400 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 5 - PCI slot 2 */
- 0x9500 0 0 1 &mpic 3 1
- 0x9500 0 0 2 &mpic 4 1
- 0x9500 0 0 3 &mpic 1 1
- 0x9500 0 0 4 &mpic 2 1
+ 0x9500 0 0 1 &mpic 3 1 0 0
+ 0x9500 0 0 2 &mpic 4 1 0 0
+ 0x9500 0 0 3 &mpic 1 1 0 0
+ 0x9500 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 6 - PCI slot 2 */
- 0x9600 0 0 1 &mpic 3 1
- 0x9600 0 0 2 &mpic 4 1
- 0x9600 0 0 3 &mpic 1 1
- 0x9600 0 0 4 &mpic 2 1
+ 0x9600 0 0 1 &mpic 3 1 0 0
+ 0x9600 0 0 2 &mpic 4 1 0 0
+ 0x9600 0 0 3 &mpic 1 1 0 0
+ 0x9600 0 0 4 &mpic 2 1 0 0
/* IDSEL 0x12 func 7 - PCI slot 2 */
- 0x9700 0 0 1 &mpic 3 1
- 0x9700 0 0 2 &mpic 4 1
- 0x9700 0 0 3 &mpic 1 1
- 0x9700 0 0 4 &mpic 2 1
+ 0x9700 0 0 1 &mpic 3 1 0 0
+ 0x9700 0 0 2 &mpic 4 1 0 0
+ 0x9700 0 0 3 &mpic 1 1 0 0
+ 0x9700 0 0 4 &mpic 2 1 0 0
// IDSEL 0x1c USB
0xe000 0 0 1 &i8259 12 2
diff --git a/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi
index eeb7c65d5f22..50039d4fa278 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8641si-post.dtsi
@@ -97,6 +97,7 @@
&pci0 {
compatible = "fsl,mpc8641-pcie";
device_type = "pci";
+ #interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
bus-range = <0x0 0xff>;
@@ -123,6 +124,7 @@
&pci1 {
compatible = "fsl,mpc8641-pcie";
device_type = "pci";
+ #interrupt-cells = <1>;
#size-cells = <2>;
#address-cells = <3>;
bus-range = <0x0 0xff>;
diff --git a/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi b/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi
index 25f81eea60e0..a13876c05c1e 100644
--- a/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020rdb-pc.dtsi
@@ -205,13 +205,13 @@
mdio@24000 {
phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
- interrupts = <3 1>;
+ interrupts = <3 1 0 0>;
reg = <0x0>;
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
- interrupts = <2 1>;
+ interrupts = <2 1 0 0>;
reg = <0x1>;
};
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 51e975d7631a..872e4485dc3f 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -327,24 +327,6 @@
/include/ "qoriq-clockgen1.dtsi"
global-utilities@e1000 {
compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
-
- mux2: mux2@40 {
- #clock-cells = <0>;
- reg = <0x40 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux2";
- };
-
- mux3: mux3@60 {
- #clock-cells = <0>;
- reg = <0x60 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux3";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
index 941274c41f21..6318962e8d14 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-pre.dtsi
@@ -89,7 +89,7 @@
cpu0: PowerPC,e500mc@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_0>;
fsl,portid-mapping = <0x80000000>;
L2_0: l2-cache {
@@ -99,7 +99,7 @@
cpu1: PowerPC,e500mc@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x40000000>;
L2_1: l2-cache {
@@ -109,7 +109,7 @@
cpu2: PowerPC,e500mc@2 {
device_type = "cpu";
reg = <2>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x20000000>;
L2_2: l2-cache {
@@ -119,7 +119,7 @@
cpu3: PowerPC,e500mc@3 {
device_type = "cpu";
reg = <3>;
- clocks = <&mux3>;
+ clocks = <&clockgen 1 3>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x10000000>;
L2_3: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index 187676fa8d83..81bc75aca2e0 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -354,24 +354,6 @@
/include/ "qoriq-clockgen1.dtsi"
global-utilities@e1000 {
compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
-
- mux2: mux2@40 {
- #clock-cells = <0>;
- reg = <0x40 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux2";
- };
-
- mux3: mux3@60 {
- #clock-cells = <0>;
- reg = <0x60 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux3";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
index 50b73e8e638f..db92f1151a48 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-pre.dtsi
@@ -90,7 +90,7 @@
cpu0: PowerPC,e500mc@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_0>;
fsl,portid-mapping = <0x80000000>;
L2_0: l2-cache {
@@ -100,7 +100,7 @@
cpu1: PowerPC,e500mc@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x40000000>;
L2_1: l2-cache {
@@ -110,7 +110,7 @@
cpu2: PowerPC,e500mc@2 {
device_type = "cpu";
reg = <2>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x20000000>;
L2_2: l2-cache {
@@ -120,7 +120,7 @@
cpu3: PowerPC,e500mc@3 {
device_type = "cpu";
reg = <3>;
- clocks = <&mux3>;
+ clocks = <&clockgen 1 3>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x10000000>;
L2_3: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index a0252085f858..4da49b6dd3f5 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -374,76 +374,6 @@
/include/ "qoriq-clockgen1.dtsi"
global-utilities@e1000 {
compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
-
- pll2: pll2@840 {
- #clock-cells = <1>;
- reg = <0x840 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll2", "pll2-div2";
- };
-
- pll3: pll3@860 {
- #clock-cells = <1>;
- reg = <0x860 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll3", "pll3-div2";
- };
-
- mux2: mux2@40 {
- #clock-cells = <0>;
- reg = <0x40 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux2";
- };
-
- mux3: mux3@60 {
- #clock-cells = <0>;
- reg = <0x60 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux3";
- };
-
- mux4: mux4@80 {
- #clock-cells = <0>;
- reg = <0x80 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>;
- clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2";
- clock-output-names = "cmux4";
- };
-
- mux5: mux5@a0 {
- #clock-cells = <0>;
- reg = <0xa0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>;
- clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2";
- clock-output-names = "cmux5";
- };
-
- mux6: mux6@c0 {
- #clock-cells = <0>;
- reg = <0xc0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>;
- clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2";
- clock-output-names = "cmux6";
- };
-
- mux7: mux7@e0 {
- #clock-cells = <0>;
- reg = <0xe0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll2 0>, <&pll2 1>, <&pll3 0>, <&pll3 1>;
- clock-names = "pll2", "pll2-div2", "pll3", "pll3-div2";
- clock-output-names = "cmux7";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
index d56a546b73e6..0a7c65a00e5e 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-pre.dtsi
@@ -94,7 +94,7 @@
cpu0: PowerPC,e500mc@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_0>;
fsl,portid-mapping = <0x80000000>;
L2_0: l2-cache {
@@ -104,7 +104,7 @@
cpu1: PowerPC,e500mc@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x40000000>;
L2_1: l2-cache {
@@ -114,7 +114,7 @@
cpu2: PowerPC,e500mc@2 {
device_type = "cpu";
reg = <2>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x20000000>;
L2_2: l2-cache {
@@ -124,7 +124,7 @@
cpu3: PowerPC,e500mc@3 {
device_type = "cpu";
reg = <3>;
- clocks = <&mux3>;
+ clocks = <&clockgen 1 3>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x10000000>;
L2_3: l2-cache {
@@ -134,7 +134,7 @@
cpu4: PowerPC,e500mc@4 {
device_type = "cpu";
reg = <4>;
- clocks = <&mux4>;
+ clocks = <&clockgen 1 4>;
next-level-cache = <&L2_4>;
fsl,portid-mapping = <0x08000000>;
L2_4: l2-cache {
@@ -144,7 +144,7 @@
cpu5: PowerPC,e500mc@5 {
device_type = "cpu";
reg = <5>;
- clocks = <&mux5>;
+ clocks = <&clockgen 1 5>;
next-level-cache = <&L2_5>;
fsl,portid-mapping = <0x04000000>;
L2_5: l2-cache {
@@ -154,7 +154,7 @@
cpu6: PowerPC,e500mc@6 {
device_type = "cpu";
reg = <6>;
- clocks = <&mux6>;
+ clocks = <&clockgen 1 6>;
next-level-cache = <&L2_6>;
fsl,portid-mapping = <0x02000000>;
L2_6: l2-cache {
@@ -164,7 +164,7 @@
cpu7: PowerPC,e500mc@7 {
device_type = "cpu";
reg = <7>;
- clocks = <&mux7>;
+ clocks = <&clockgen 1 7>;
next-level-cache = <&L2_7>;
fsl,portid-mapping = <0x01000000>;
L2_7: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
index bfba0b4f1cbb..2d74ea85e5df 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-pre.dtsi
@@ -96,7 +96,7 @@
cpu0: PowerPC,e5500@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_0>;
fsl,portid-mapping = <0x80000000>;
L2_0: l2-cache {
@@ -106,7 +106,7 @@
cpu1: PowerPC,e5500@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x40000000>;
L2_1: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index e2bd9313e632..16b454b504e2 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -319,24 +319,6 @@
/include/ "qoriq-clockgen1.dtsi"
global-utilities@e1000 {
compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0";
-
- mux2: mux2@40 {
- #clock-cells = <0>;
- reg = <0x40 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux2";
- };
-
- mux3: mux3@60 {
- #clock-cells = <0>;
- reg = <0x60 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux3";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi
index dbd57750fc02..ed89dbbdacf0 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-pre.dtsi
@@ -102,7 +102,7 @@
cpu0: PowerPC,e5500@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_0>;
fsl,portid-mapping = <0x80000000>;
L2_0: l2-cache {
@@ -112,7 +112,7 @@
cpu1: PowerPC,e5500@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x40000000>;
L2_1: l2-cache {
@@ -122,7 +122,7 @@
cpu2: PowerPC,e5500@2 {
device_type = "cpu";
reg = <2>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x20000000>;
L2_2: l2-cache {
@@ -132,7 +132,7 @@
cpu3: PowerPC,e5500@3 {
device_type = "cpu";
reg = <3>;
- clocks = <&mux3>;
+ clocks = <&clockgen 1 3>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x10000000>;
L2_3: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
index 88cd70de4f86..463c1ed9ffdd 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
@@ -34,53 +34,6 @@
clockgen: global-utilities@e1000 {
compatible = "fsl,qoriq-clockgen-1.0";
- ranges = <0x0 0xe1000 0x1000>;
reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
#clock-cells = <2>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-1.0", "fixed-clock";
- clock-output-names = "sysclk";
- };
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2";
- };
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2";
- };
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux0";
- };
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-1.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
- clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
- clock-output-names = "cmux1";
- };
- platform_pll: platform-pll@c00 {
- #clock-cells = <1>;
- reg = <0xc00 0x4>;
- compatible = "fsl,qoriq-platform-pll-1.0";
- clocks = <&sysclk>;
- clock-output-names = "platform-pll", "platform-pll-div2";
- };
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
index 6dfd7c5357ab..0361050bb56a 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
@@ -34,36 +34,6 @@
clockgen: global-utilities@e1000 {
compatible = "fsl,qoriq-clockgen-2.0";
- ranges = <0x0 0xe1000 0x1000>;
reg = <0xe1000 0x1000>;
- #address-cells = <1>;
- #size-cells = <1>;
#clock-cells = <2>;
-
- sysclk: sysclk {
- #clock-cells = <0>;
- compatible = "fsl,qoriq-sysclk-2.0", "fixed-clock";
- clock-output-names = "sysclk";
- };
- pll0: pll0@800 {
- #clock-cells = <1>;
- reg = <0x800 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll0", "pll0-div2", "pll0-div4";
- };
- pll1: pll1@820 {
- #clock-cells = <1>;
- reg = <0x820 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll1", "pll1-div2", "pll1-div4";
- };
- platform_pll: platform-pll@c00 {
- #clock-cells = <1>;
- reg = <0xc00 0x4>;
- compatible = "fsl,qoriq-platform-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "platform-pll", "platform-pll-div2";
- };
};
diff --git a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
index 4908af501098..d552044c5afc 100644
--- a/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
@@ -345,22 +345,6 @@
/include/ "qoriq-clockgen2.dtsi"
global-utilities@e1000 {
compatible = "fsl,t1023-clockgen", "fsl,qoriq-clockgen-2.0";
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 4>;
- compatible = "fsl,core-mux-clock";
- clocks = <&pll0 0>, <&pll0 1>;
- clock-names = "pll0_0", "pll0_1";
- clock-output-names = "cmux0";
- };
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 4>;
- compatible = "fsl,core-mux-clock";
- clocks = <&pll0 0>, <&pll0 1>;
- clock-names = "pll0_0", "pll0_1";
- clock-output-names = "cmux1";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi
index 9d08a363bab3..d87ea13164f2 100644
--- a/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi
@@ -74,7 +74,7 @@
cpu0: PowerPC,e5500@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
#cooling-cells = <2>;
L2_1: l2-cache {
@@ -84,7 +84,7 @@
cpu1: PowerPC,e5500@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_2>;
#cooling-cells = <2>;
L2_2: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 145c7f43b5b6..315d0557eefc 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -425,50 +425,6 @@
/include/ "qoriq-clockgen2.dtsi"
global-utilities@e1000 {
compatible = "fsl,t1040-clockgen", "fsl,qoriq-clockgen-2.0";
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0", "pll0-div2", "pll1-div4",
- "pll1", "pll1-div2", "pll1-div4";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0", "pll0-div2", "pll1-div4",
- "pll1", "pll1-div2", "pll1-div4";
- clock-output-names = "cmux1";
- };
-
- mux2: mux2@40 {
- #clock-cells = <0>;
- reg = <0x40 4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0", "pll0-div2", "pll1-div4",
- "pll1", "pll1-div2", "pll1-div4";
- clock-output-names = "cmux2";
- };
-
- mux3: mux3@60 {
- #clock-cells = <0>;
- reg = <0x60 4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0_0", "pll0_1", "pll0_2",
- "pll1_0", "pll1_1", "pll1_2";
- clock-output-names = "cmux3";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi
index 6db0ee8b1384..dd59e4b69480 100644
--- a/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi
@@ -74,7 +74,7 @@
cpu0: PowerPC,e5500@0 {
device_type = "cpu";
reg = <0>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
#cooling-cells = <2>;
L2_1: l2-cache {
@@ -84,7 +84,7 @@
cpu1: PowerPC,e5500@1 {
device_type = "cpu";
reg = <1>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_2>;
#cooling-cells = <2>;
L2_2: l2-cache {
@@ -94,7 +94,7 @@
cpu2: PowerPC,e5500@2 {
device_type = "cpu";
reg = <2>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_3>;
#cooling-cells = <2>;
L2_3: l2-cache {
@@ -104,7 +104,7 @@
cpu3: PowerPC,e5500@3 {
device_type = "cpu";
reg = <3>;
- clocks = <&mux3>;
+ clocks = <&clockgen 1 3>;
next-level-cache = <&L2_4>;
#cooling-cells = <2>;
L2_4: l2-cache {
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index a97296c64eb2..ecbb447920bc 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -535,28 +535,6 @@
/include/ "qoriq-clockgen2.dtsi"
global-utilities@e1000 {
compatible = "fsl,t2080-clockgen", "fsl,qoriq-clockgen-2.0";
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0", "pll0-div2", "pll0-div4",
- "pll1", "pll1-div2", "pll1-div4";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>;
- clock-names = "pll0", "pll0-div2", "pll0-div4",
- "pll1", "pll1-div2", "pll1-div4";
- clock-output-names = "cmux1";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi b/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi
index c2e57203910d..3f745de44284 100644
--- a/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t208xsi-pre.dtsi
@@ -81,28 +81,28 @@
cpu0: PowerPC,e6500@0 {
device_type = "cpu";
reg = <0 1>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu1: PowerPC,e6500@2 {
device_type = "cpu";
reg = <2 3>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu2: PowerPC,e6500@4 {
device_type = "cpu";
reg = <4 5>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu3: PowerPC,e6500@6 {
device_type = "cpu";
reg = <6 7>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 68c4eadc19e3..fcac73486d48 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -950,67 +950,6 @@
/include/ "qoriq-clockgen2.dtsi"
global-utilities@e1000 {
compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
-
- pll2: pll2@840 {
- #clock-cells = <1>;
- reg = <0x840 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll2", "pll2-div2", "pll2-div4";
- };
-
- pll3: pll3@860 {
- #clock-cells = <1>;
- reg = <0x860 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll3", "pll3-div2", "pll3-div4";
- };
-
- pll4: pll4@880 {
- #clock-cells = <1>;
- reg = <0x880 0x4>;
- compatible = "fsl,qoriq-core-pll-2.0";
- clocks = <&sysclk>;
- clock-output-names = "pll4", "pll4-div2", "pll4-div4";
- };
-
- mux0: mux0@0 {
- #clock-cells = <0>;
- reg = <0x0 0x4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>,
- <&pll2 0>, <&pll2 1>, <&pll2 2>;
- clock-names = "pll0", "pll0-div2", "pll0-div4",
- "pll1", "pll1-div2", "pll1-div4",
- "pll2", "pll2-div2", "pll2-div4";
- clock-output-names = "cmux0";
- };
-
- mux1: mux1@20 {
- #clock-cells = <0>;
- reg = <0x20 0x4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll0 0>, <&pll0 1>, <&pll0 2>,
- <&pll1 0>, <&pll1 1>, <&pll1 2>,
- <&pll2 0>, <&pll2 1>, <&pll2 2>;
- clock-names = "pll0", "pll0-div2", "pll0-div4",
- "pll1", "pll1-div2", "pll1-div4",
- "pll2", "pll2-div2", "pll2-div4";
- clock-output-names = "cmux1";
- };
-
- mux2: mux2@40 {
- #clock-cells = <0>;
- reg = <0x40 0x4>;
- compatible = "fsl,qoriq-core-mux-2.0";
- clocks = <&pll3 0>, <&pll3 1>, <&pll3 2>,
- <&pll4 0>, <&pll4 1>, <&pll4 2>;
- clock-names = "pll3", "pll3-div2", "pll3-div4",
- "pll4", "pll4-div2", "pll4-div4";
- clock-output-names = "cmux2";
- };
};
rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
index 038cf8fadee4..632314c6faa9 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-pre.dtsi
@@ -90,84 +90,84 @@
cpu0: PowerPC,e6500@0 {
device_type = "cpu";
reg = <0 1>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu1: PowerPC,e6500@2 {
device_type = "cpu";
reg = <2 3>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu2: PowerPC,e6500@4 {
device_type = "cpu";
reg = <4 5>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu3: PowerPC,e6500@6 {
device_type = "cpu";
reg = <6 7>;
- clocks = <&mux0>;
+ clocks = <&clockgen 1 0>;
next-level-cache = <&L2_1>;
fsl,portid-mapping = <0x80000000>;
};
cpu4: PowerPC,e6500@8 {
device_type = "cpu";
reg = <8 9>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x40000000>;
};
cpu5: PowerPC,e6500@10 {
device_type = "cpu";
reg = <10 11>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x40000000>;
};
cpu6: PowerPC,e6500@12 {
device_type = "cpu";
reg = <12 13>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x40000000>;
};
cpu7: PowerPC,e6500@14 {
device_type = "cpu";
reg = <14 15>;
- clocks = <&mux1>;
+ clocks = <&clockgen 1 1>;
next-level-cache = <&L2_2>;
fsl,portid-mapping = <0x40000000>;
};
cpu8: PowerPC,e6500@16 {
device_type = "cpu";
reg = <16 17>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x20000000>;
};
cpu9: PowerPC,e6500@18 {
device_type = "cpu";
reg = <18 19>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x20000000>;
};
cpu10: PowerPC,e6500@20 {
device_type = "cpu";
reg = <20 21>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x20000000>;
};
cpu11: PowerPC,e6500@22 {
device_type = "cpu";
reg = <22 23>;
- clocks = <&mux2>;
+ clocks = <&clockgen 1 2>;
next-level-cache = <&L2_3>;
fsl,portid-mapping = <0x20000000>;
};
diff --git a/arch/powerpc/boot/dts/mpc832x_rdb.dts b/arch/powerpc/boot/dts/mpc832x_rdb.dts
index 647cae14c16d..be6ef3531b28 100644
--- a/arch/powerpc/boot/dts/mpc832x_rdb.dts
+++ b/arch/powerpc/boot/dts/mpc832x_rdb.dts
@@ -311,13 +311,9 @@
compatible = "fsl,ucc-mdio";
phy00:ethernet-phy@0 {
- interrupt-parent = <&ipic>;
- interrupts = <0>;
reg = <0x0>;
};
phy04:ethernet-phy@4 {
- interrupt-parent = <&ipic>;
- interrupts = <0>;
reg = <0x4>;
};
};
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index f045f8494bf9..b0491b8c0199 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -93,7 +93,8 @@ static void *serial_get_stdout_devp(void)
if (devp == NULL)
goto err_out;
- if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
+ if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0 ||
+ getprop(devp, "stdout-path", path, MAX_PATH_LEN) > 0) {
devp = finddevice(path);
if (devp == NULL)
goto err_out;
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index e0567dc41968..d592ba27b122 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -25,6 +25,7 @@ CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index f686cc1eac0b..ceb3c770786f 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -246,7 +246,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_LATENCYTOP=y
CONFIG_BOOTX_TEXT=y
-CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
diff --git a/arch/powerpc/configs/guest.config b/arch/powerpc/configs/guest.config
new file mode 100644
index 000000000000..8b8cd18ecd7c
--- /dev/null
+++ b/arch/powerpc/configs/guest.config
@@ -0,0 +1,13 @@
+CONFIG_VIRTIO_BLK=y
+CONFIG_VIRTIO_BLK_SCSI=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_FAILOVER=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_KVM_GUEST=y
+CONFIG_EPAPR_PARAVIRT=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VHOST_NET=y
+CONFIG_VHOST=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index f71eddafb02f..c5f2005005d3 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -108,7 +108,6 @@ CONFIG_LATENCYTOP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
-CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index 62948d198d7f..50b610b48914 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -297,7 +297,6 @@ CONFIG_LATENCYTOP=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_BOOTX_TEXT=y
-CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_SHA512=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index f2515674a1e2..91fdb619b484 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,4 +1,3 @@
-CONFIG_PPC64=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ=y
@@ -9,21 +8,22 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
+CONFIG_NUMA_BALANCING=y
CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
CONFIG_CGROUP_BPF=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_KPROBES=y
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PARTITION_ADVANCED=y
+CONFIG_PPC64=y
+CONFIG_NR_CPUS=2048
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
CONFIG_SCANLOG=m
@@ -45,14 +45,11 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_FREQ_PMAC64=y
CONFIG_HZ_100=y
-CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_KSM=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=y
@@ -60,6 +57,23 @@ CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM_BOOK3S_64=m
+CONFIG_KVM_BOOK3S_64_HV=m
+CONFIG_VHOST_NET=m
+CONFIG_OPROFILE=m
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_KSM=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -163,7 +177,6 @@ CONFIG_TIGON3=y
CONFIG_BNX2X=m
CONFIG_CHELSIO_T1=m
CONFIG_BE2NET=m
-CONFIG_S2IO=m
CONFIG_IBMVETH=m
CONFIG_EHEA=m
CONFIG_E100=y
@@ -174,6 +187,7 @@ CONFIG_IXGBE=m
CONFIG_I40E=m
CONFIG_MLX4_EN=m
CONFIG_MYRI10GE=m
+CONFIG_S2IO=m
CONFIG_PASEMI_MAC=y
CONFIG_QLGE=m
CONFIG_NETXEN_NIC=m
@@ -284,7 +298,7 @@ CONFIG_REISERFS_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
-CONFIG_XFS_FS=m
+CONFIG_XFS_FS=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_BTRFS_FS=m
CONFIG_BTRFS_FS_POSIX_ACL=y
@@ -323,25 +337,6 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_STACK_USAGE=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-CONFIG_SOFTLOCKUP_DETECTOR=y
-CONFIG_HARDLOCKUP_DETECTOR=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_LATENCYTOP=y
-CONFIG_FTRACE=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_FUNCTION_GRAPH_TRACER=y
-CONFIG_SCHED_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_CODE_PATCHING_SELFTEST=y
-CONFIG_FTR_FIXUP_SELFTEST=y
-CONFIG_MSI_BITMAP_SELFTEST=y
-CONFIG_XMON=y
-CONFIG_BOOTX_TEXT=y
-CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
@@ -364,8 +359,20 @@ CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_CRYPTO_DEV_VMX=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM_BOOK3S_64=m
-CONFIG_KVM_BOOK3S_64_HV=m
-CONFIG_VHOST_NET=m
CONFIG_PRINTK_TIME=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_HARDLOCKUP_DETECTOR=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_LATENCYTOP=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_CODE_PATCHING_SELFTEST=y
+CONFIG_FTR_FIXUP_SELFTEST=y
+CONFIG_MSI_BITMAP_SELFTEST=y
+CONFIG_XMON=y
+CONFIG_BOOTX_TEXT=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 7ee736f20774..53687c3a70c4 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -1155,7 +1155,6 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_XMON=y
CONFIG_BOOTX_TEXT=y
-CONFIG_PPC_EARLY_DEBUG=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_NETWORK_XFRM=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 5e09a40cbcbf..ea79c519863d 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -290,9 +290,7 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_LATENCYTOP=y
-CONFIG_FTRACE=y
CONFIG_FUNCTION_TRACER=y
-CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 3196d227e351..77ff7fb24823 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,3 +1,7 @@
+generated-y += syscall_table_32.h
+generated-y += syscall_table_64.h
+generated-y += syscall_table_c32.h
+generated-y += syscall_table_spu.h
generic-y += div64.h
generic-y += export.h
generic-y += irq_regs.h
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index ec691d489656..6f201b199c02 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -61,7 +61,6 @@ void RunModeException(struct pt_regs *regs);
void single_step_exception(struct pt_regs *regs);
void program_check_exception(struct pt_regs *regs);
void alignment_exception(struct pt_regs *regs);
-void slb_miss_bad_addr(struct pt_regs *regs);
void StackOverflow(struct pt_regs *regs);
void kernel_fp_unavailable_exception(struct pt_regs *regs);
void altivec_unavailable_exception(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
index f2892c7ab73e..2a0a467d2985 100644
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ b/arch/powerpc/include/asm/book3s/32/hash.h
@@ -26,6 +26,7 @@
#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
#define _PAGE_DIRTY 0x080 /* C: page changed */
#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
#define _PAGE_RW 0x400 /* software: user write access allowed */
#define _PAGE_SPECIAL 0x800 /* software: Special page */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index e38c91388c40..0c261ba2c826 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
#define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
+
/*
* 32-bit hash table MMU support
*/
@@ -9,6 +10,8 @@
* BATs
*/
+#include <asm/page.h>
+
/* Block size masks */
#define BL_128K 0x000
#define BL_256K 0x001
@@ -34,14 +37,20 @@
#define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
((x & 0x0000000e00000000ULL) >> 24) | \
((x & 0x0000000100000000ULL) >> 30)))
+#define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \
+ (((u64)(x) << 24) & 0x0000000e00000000ULL) | \
+ (((u64)(x) << 30) & 0x0000000100000000ULL))
#else
#define BAT_PHYS_ADDR(x) (x)
+#define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)
#endif
struct ppc_bat {
u32 batu;
u32 batl;
};
+
+typedef pte_t *pgtable_t;
#endif /* !__ASSEMBLY__ */
/*
@@ -83,6 +92,12 @@ typedef struct {
unsigned long vdso_base;
} mm_context_t;
+/* patch sites */
+extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
+extern s32 patch__hash_page_B, patch__hash_page_C;
+extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
+extern s32 patch__flush_hash_B;
+
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 82e44b1a00ae..b5b955eb2fb7 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -25,10 +25,7 @@
extern void __bad_pte(pmd_t *pmd);
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) ({ \
- BUG_ON(!(shift)); \
- pgtable_cache[(shift) - 1]; \
- })
+#define PGT_CACHE(shift) pgtable_cache[shift]
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -50,8 +47,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
#define __pmd_free_tlb(tlb,x,a) do { } while (0)
/* #define pgd_populate(mm, pmd, pte) BUG() */
-#ifndef CONFIG_BOOKE
-
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
pte_t *pte)
{
@@ -61,46 +56,31 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
- *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
-}
-
-#define pmd_pgtable(pmd) pmd_page(pmd)
-#else
-
-static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
- pte_t *pte)
-{
- *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
-}
-
-static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
- pgtable_t pte_page)
-{
- *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+ *pmdp = __pmd(__pa(pte_page) | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-#endif
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+void pte_frag_destroy(void *pte_frag);
+pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
+void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
+ pte_fragment_free((unsigned long *)ptepage, 0);
}
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
- pgtable_page_dtor(virt_to_page(table));
- free_page((unsigned long)table);
+ pte_fragment_free((unsigned long *)table, 0);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
@@ -138,6 +118,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- pgtable_free_tlb(tlb, page_address(table), 0);
+ pgtable_free_tlb(tlb, table, 0);
}
#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index c21d33704633..49d76adb9bc5 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -10,9 +10,9 @@
/* And here we include common definitions */
#define _PAGE_KERNEL_RO 0
-#define _PAGE_KERNEL_ROX 0
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
@@ -66,11 +66,11 @@ static inline bool pte_user(pte_t pte)
*/
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
@@ -318,7 +318,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
int psize)
{
unsigned long set = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
pte_update(ptep, 0, set);
@@ -328,24 +328,10 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
-/*
- * Note that on Book E processors, the pmd contains the kernel virtual
- * (lowmem) address of the pte page. The physical address is less useful
- * because everything runs with translation enabled (even the TLB miss
- * handler). On everything else the pmd contains the physical address
- * of the pte page. -- paulus
- */
-#ifndef CONFIG_BOOKE
#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+ ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-#else
-#define pmd_page_vaddr(pmd) \
- ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
-#define pmd_page(pmd) \
- pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
-#endif
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
@@ -360,7 +346,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define pte_offset_kernel(dir, addr) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+ ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
+ (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
/*
@@ -384,7 +371,7 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY);
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
-static inline bool pte_exec(pte_t pte) { return true; }
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
static inline int pte_present(pte_t pte)
{
@@ -451,7 +438,7 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_exprotect(pte_t pte)
{
- return pte;
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
}
static inline pte_t pte_mkclean(pte_t pte)
@@ -466,7 +453,7 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkexec(pte_t pte)
{
- return pte;
+ return __pte(pte_val(pte) | _PAGE_EXEC);
}
static inline pte_t pte_mkpte(pte_t pte)
@@ -524,7 +511,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
-#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
* helper pte_update() which does an atomic update. We need to do that
* because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
@@ -537,7 +524,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
else
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#elif defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
* can just store as long as we do the two halves in the right order
* with a barrier in between. This is possible because we take care,
@@ -560,7 +547,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
-#elif defined(CONFIG_PPC_STD_MMU_32)
+#else
/* Third case is 32-bit hash table in UP mode, we need to preserve
* the _PAGE_HASHPTE bit since we may not have invalidated the previous
* translation in the hash yet (done in a subsequent flush_tlb_xxx())
@@ -568,9 +555,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));
-
-#else
-#error "Not supported "
#endif
}
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 15bc16b1dc9c..cf5ba5254299 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -1,11 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
-/*
- * Entries per page directory level. The PTE level must use a 64b record
- * for each page table entry. The PMD and PGD level use a 32b record for
- * each entry by assuming that each entry is page aligned.
- */
+
#define H_PTE_INDEX_SIZE 9
#define H_PMD_INDEX_SIZE 7
#define H_PUD_INDEX_SIZE 9
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 6328857f259f..1ceee000c18d 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
#define _ASM_POWERPC_BOOK3S_64_MMU_H_
+#include <asm/page.h>
+
#ifndef __ASSEMBLY__
/*
* Page size definition
@@ -24,6 +26,13 @@ struct mmu_psize_def {
};
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+/*
+ * For BOOK3s 64 with 4k and 64K linux page size
+ * we want to use pointers, because the page table
+ * actually store pfn
+ */
+typedef pte_t *pgtable_t;
+
#endif /* __ASSEMBLY__ */
/* 64-bit classic hash table MMU */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index 391ed2c3b697..4aba625389c4 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -37,10 +37,7 @@ extern struct vmemmap_backing *vmemmap_list;
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) ({ \
- BUG_ON(!(shift)); \
- pgtable_cache[(shift) - 1]; \
- })
+#define PGT_CACHE(shift) pgtable_cache[shift]
extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long);
@@ -50,6 +47,7 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
#ifdef CONFIG_SMP
extern void __tlb_remove_table(void *_table);
#endif
+void pte_frag_destroy(void *pte_frag);
static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 6c99e846a8c9..2e6ada28da64 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1304,7 +1304,7 @@ static inline int pgd_devmap(pgd_t pgd)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline const int pud_pfn(pud_t pud)
+static inline int pud_pfn(pud_t pud)
{
/*
* Currently all calls to pud_pfn() are gated around a pud_devmap()
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 66298461b640..40ea5b3781c6 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -71,7 +71,7 @@ extern struct ppc64_caches ppc64_caches;
#else
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
extern long _get_L2CR(void);
extern long _get_L3CR(void);
extern void _set_L2CR(unsigned long);
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 3d5acd2b113a..2074b40f3fb5 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -33,14 +33,33 @@ unsigned int create_cond_branch(const unsigned int *addr,
int patch_branch(unsigned int *addr, unsigned long target, int flags);
int patch_instruction(unsigned int *addr, unsigned int instr);
int raw_patch_instruction(unsigned int *addr, unsigned int instr);
-int patch_instruction_site(s32 *addr, unsigned int instr);
-int patch_branch_site(s32 *site, unsigned long target, int flags);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
+static inline int patch_instruction_site(s32 *site, unsigned int instr)
+{
+ return patch_instruction((unsigned int *)patch_site_addr(site), instr);
+}
+
+static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
+{
+ return patch_branch((unsigned int *)patch_site_addr(site), target, flags);
+}
+
+static inline int modify_instruction(unsigned int *addr, unsigned int clr,
+ unsigned int set)
+{
+ return patch_instruction(addr, (*addr & ~clr) | set);
+}
+
+static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
+{
+ return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
+}
+
int instr_is_relative_branch(unsigned int instr);
int instr_is_relative_link_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 29f49a35d6ee..d05f0c28e515 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -44,6 +44,7 @@ extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
int machine_check_8xx(struct pt_regs *regs);
+int machine_check_83xx(struct pt_regs *regs);
extern void cpu_down_flush_e500v2(void);
extern void cpu_down_flush_e500mc(void);
@@ -296,7 +297,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_USE_RTC)
#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE)
#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_L2CR | \
@@ -367,15 +368,15 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
-#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE)
+#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_MAYBE_CAN_NAP)
#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_COMMON)
+ CPU_FTR_COMMON | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
+ CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON)
#define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE)
#define CPU_FTRS_40X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 5201f2b7838c..ebf66809f2d3 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -39,9 +39,6 @@ extern int dma_nommu_mmap_coherent(struct device *dev,
* to ensure it is consistent.
*/
struct device;
-extern void *__dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp);
-extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
@@ -52,8 +49,6 @@ extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
* Cache coherent cores.
*/
-#define __dma_alloc_coherent(dev, gfp, size, handle) NULL
-#define __dma_free_coherent(size, addr) ((void)0)
#define __dma_sync(addr, size, rw) ((void)0)
#define __dma_sync_page(pg, off, sz, rw) ((void)0)
@@ -111,7 +106,5 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
extern u64 __dma_get_required_mask(struct device *dev);
-#define ARCH_HAS_DMA_MMAP_COHERENT
-
#endif /* __KERNEL__ */
#endif /* _ASM_DMA_MAPPING_H */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 1e7a33592e29..188776befaf9 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -48,6 +48,10 @@
#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
+/* Alignement per CMA requirement. */
+#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order))
+
/* Firmware provided dump sections */
#define FADUMP_CPU_STATE_DATA 0x0001
#define FADUMP_HPTE_REGION 0x0002
@@ -141,6 +145,7 @@ struct fw_dump {
unsigned long fadump_supported:1;
unsigned long dump_active:1;
unsigned long dump_registered:1;
+ unsigned long nocma:1;
};
/*
@@ -200,7 +205,7 @@ struct fad_crash_memory_ranges {
unsigned long long size;
};
-extern int is_fadump_boot_memory_area(u64 addr, ulong size);
+extern int is_fadump_memory_area(u64 addr, ulong size);
extern int early_init_dt_scan_fw_dump(unsigned long node,
const char *uname, int depth, void *data);
extern int fadump_reserve_mem(void);
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index 33b6f9c892c8..40a6c9261a6b 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -221,6 +221,17 @@ label##3: \
FTR_ENTRY_OFFSET 953b-954b; \
.popsection;
+#define START_BTB_FLUSH_SECTION \
+955: \
+
+#define END_BTB_FLUSH_SECTION \
+956: \
+ .pushsection __btb_flush_fixup,"a"; \
+ .align 2; \
+957: \
+ FTR_ENTRY_OFFSET 955b-957b; \
+ FTR_ENTRY_OFFSET 956b-957b; \
+ .popsection;
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -230,6 +241,7 @@ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
void apply_feature_fixups(void);
void setup_feature_keys(void);
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 383da1ab9e23..8d40565ad0c3 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -5,8 +5,6 @@
#ifdef CONFIG_HUGETLB_PAGE
#include <asm/page.h>
-extern struct kmem_cache *hugepte_cache;
-
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb.h>
@@ -76,7 +74,9 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned long idx = 0;
pte_t *dir = hugepd_page(hpd);
-#ifndef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_8xx
+ idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+#elif !defined(CONFIG_PPC_FSL_BOOK3E)
idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
#endif
@@ -129,15 +129,14 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- pte_t pte;
- pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_hugetlb_page(vma, addr);
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
-extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty);
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
static inline void arch_clear_hugepage_flags(struct page *page)
{
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 33a4fc891947..463c63a9fcf1 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -335,6 +335,7 @@
#define H_SET_PARTITION_TABLE 0xF800
#define H_ENTER_NESTED 0xF804
#define H_TLB_INVALIDATE 0xF808
+#define H_COPY_TOFROM_GUEST 0xF80C
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index e746becd9d6f..7f19fbd3ba55 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -29,12 +29,14 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <linux/device.h>
#include <linux/compiler.h>
+#include <linux/mm.h>
#include <asm/page.h>
#include <asm/byteorder.h>
#include <asm/synch.h>
#include <asm/delay.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
+#include <asm/pgtable.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -804,6 +806,8 @@ extern void __iounmap_at(void *ea, unsigned long size);
*/
static inline unsigned long virt_to_phys(volatile void * address)
{
+ WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !virt_addr_valid(address));
+
return __pa((unsigned long)address);
}
@@ -827,7 +831,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* Change "struct page" to physical address.
*/
-#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+static inline phys_addr_t page_to_phys(struct page *page)
+{
+ unsigned long pfn = page_to_pfn(page);
+
+ WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && !pfn_valid(pfn));
+
+ return PFN_PHYS(pfn);
+}
/*
* 32 bits still uses virt_to_bus() for it's implementation of DMA
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 55312990d1d2..17524d222a7b 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -213,11 +213,12 @@ struct iommu_table_group {
extern void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number, unsigned long pe_num);
-extern int iommu_add_device(struct device *dev);
+extern int iommu_add_device(struct iommu_table_group *table_group,
+ struct device *dev);
extern void iommu_del_device(struct device *dev);
-extern int __init tce_iommu_bus_notifier_init(void);
-extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction);
+extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction);
#else
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
@@ -225,7 +226,8 @@ static inline void iommu_register_group(struct iommu_table_group *table_group,
{
}
-static inline int iommu_add_device(struct device *dev)
+static inline int iommu_add_device(struct iommu_table_group *table_group,
+ struct device *dev)
{
return 0;
}
@@ -233,11 +235,6 @@ static inline int iommu_add_device(struct device *dev)
static inline void iommu_del_device(struct device *dev)
{
}
-
-static inline int __init tce_iommu_bus_notifier_init(void)
-{
- return 0;
-}
#endif /* !CONFIG_IOMMU_API */
#else
diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h
index fb59829983b8..3dbd47f2bffe 100644
--- a/arch/powerpc/include/asm/ipic.h
+++ b/arch/powerpc/include/asm/ipic.h
@@ -69,7 +69,6 @@ enum ipic_mcp_irq {
IPIC_MCP_MU = 7,
};
-extern int ipic_set_priority(unsigned int irq, unsigned int priority);
extern void ipic_set_highest_priority(unsigned int irq);
extern void ipic_set_default_priority(void);
extern void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq);
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 09f8e9ba69bc..38f1b879f569 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -188,6 +188,13 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
+extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
+ gva_t eaddr, void *to, void *from,
+ unsigned long n);
+extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *to, unsigned long n);
+extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *from, unsigned long n);
extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, u64 root,
u64 *pte_ret_p);
@@ -196,8 +203,11 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
int table_index, u64 *pte_ret_p);
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data, bool iswrite);
+extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
+ unsigned int pshift, unsigned int lpid);
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
- unsigned int shift, struct kvm_memory_slot *memslot,
+ unsigned int shift,
+ const struct kvm_memory_slot *memslot,
unsigned int lpid);
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
bool writing, unsigned long gpa,
@@ -215,16 +225,14 @@ extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
-extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
- unsigned long gpa, unsigned int shift,
- struct kvm_memory_slot *memslot,
- unsigned int lpid);
extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map);
+extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
/* XXX remove this export when load_last_inst() is generic */
@@ -242,7 +250,7 @@ extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode);
-extern void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
+extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
unsigned long gfn, unsigned long psize);
extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index);
@@ -298,6 +306,7 @@ long kvmhv_nested_init(void);
void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
+long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
@@ -307,7 +316,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 6d298145d564..21b1ed5df888 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -55,6 +55,7 @@ struct kvm_nested_guest {
cpumask_t need_tlb_flush;
cpumask_t cpu_in_guest;
short prev_cpu[NR_CPUS];
+ u8 radix; /* is this nested guest radix */
};
/*
@@ -150,6 +151,18 @@ static inline bool kvm_is_radix(struct kvm *kvm)
return kvm->arch.radix;
}
+static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
+{
+ bool radix;
+
+ if (vcpu->arch.nested)
+ radix = vcpu->arch.nested->radix;
+ else
+ radix = kvm_is_radix(vcpu->kvm);
+
+ return radix;
+}
+
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
@@ -624,8 +637,11 @@ extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long *rmapp, struct rmap_nested **n_rmap);
extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
struct rmap_nested **n_rmap);
+extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long clr, unsigned long set,
+ unsigned long hpa, unsigned long nbytes);
extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *memslot,
unsigned long gpa, unsigned long hpa,
unsigned long nbytes);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index fac6f631ed29..0f98f00da2ea 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -72,7 +72,7 @@ extern int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
@@ -793,6 +793,7 @@ struct kvm_vcpu_arch {
/* For support of nested guests */
struct kvm_nested_guest *nested;
u32 nested_vcpu_id;
+ gpa_t nested_io_gpr;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
@@ -827,6 +828,8 @@ struct kvm_vcpu_arch {
#define KVM_MMIO_REG_FQPR 0x00c0
#define KVM_MMIO_REG_VSX 0x0100
#define KVM_MMIO_REG_VMX 0x0180
+#define KVM_MMIO_REG_NESTED_GPR 0xffc0
+
#define __KVM_HAVE_ARCH_WQP
#define __KVM_HAVE_CREATE_DEVICE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9b89b1918dfc..eb0d79f0ca45 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -224,7 +224,8 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new);
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
struct kvm_ppc_smmu_info *info);
extern void kvmppc_core_flush_memslot(struct kvm *kvm,
@@ -294,7 +295,8 @@ struct kvmppc_ops {
void (*commit_memory_region)(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new);
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
unsigned long end);
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
@@ -326,6 +328,10 @@ struct kvmppc_ops {
unsigned long flags);
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
int (*enable_nested)(struct kvm *kvm);
+ int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
+ int size);
+ int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
+ int size);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index eb20eb3b8fb0..25607604a7a5 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -48,7 +48,7 @@
#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
/* Enable >32-bit physical addresses on 32-bit processor, only used
- * by CONFIG_6xx currently as BookE supports that from day 1
+ * by CONFIG_PPC_BOOK3S_32 currently as BookE supports that from day 1
*/
#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
@@ -131,16 +131,37 @@ DECLARE_PER_CPU(int, next_tlbcam_idx);
#endif
enum {
- MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx |
- MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E |
- MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS |
- MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX |
- MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU |
+ MMU_FTRS_POSSIBLE =
+#ifdef CONFIG_PPC_BOOK3S
+ MMU_FTR_HPTE_TABLE |
+#endif
+#ifdef CONFIG_PPC_8xx
+ MMU_FTR_TYPE_8xx |
+#endif
+#ifdef CONFIG_40x
+ MMU_FTR_TYPE_40x |
+#endif
+#ifdef CONFIG_44x
+ MMU_FTR_TYPE_44x |
+#endif
+#if defined(CONFIG_E200) || defined(CONFIG_E500)
+ MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
+#endif
+#ifdef CONFIG_PPC_47x
+ MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
+#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+ MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU |
+#endif
+#ifdef CONFIG_PPC_BOOK3E_64
MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
+#endif
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
#endif
@@ -338,21 +359,11 @@ static inline void mmu_early_init_devtree(void) { }
#endif /* __ASSEMBLY__ */
#endif
-#if defined(CONFIG_PPC_STD_MMU_32)
+#if defined(CONFIG_PPC_BOOK3S_32)
/* 32-bit classic hash table MMU */
#include <asm/book3s/32/mmu-hash.h>
-#elif defined(CONFIG_40x)
-/* 40x-style software loaded TLB */
-# include <asm/mmu-40x.h>
-#elif defined(CONFIG_44x)
-/* 44x-style software loaded TLB */
-# include <asm/mmu-44x.h>
-#elif defined(CONFIG_PPC_BOOK3E_MMU)
-/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
-# include <asm/mmu-book3e.h>
-#elif defined (CONFIG_PPC_8xx)
-/* Motorola/Freescale 8xx software loaded TLB */
-# include <asm/mmu-8xx.h>
+#elif defined(CONFIG_PPC_MMU_NOHASH)
+#include <asm/nohash/mmu.h>
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 0381394a425b..6ee8195a2ffb 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -21,9 +21,12 @@ struct mm_iommu_table_group_mem_t;
extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(struct mm_struct *mm);
-extern long mm_iommu_get(struct mm_struct *mm,
+extern long mm_iommu_new(struct mm_struct *mm,
unsigned long ua, unsigned long entries,
struct mm_iommu_table_group_mem_t **pmem);
+extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
+ unsigned long entries, unsigned long dev_hpa,
+ struct mm_iommu_table_group_mem_t **pmem);
extern long mm_iommu_put(struct mm_struct *mm,
struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(struct mm_struct *mm);
@@ -32,15 +35,23 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
struct mm_struct *mm, unsigned long ua, unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
+extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
+ unsigned int pageshift, unsigned long *size);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
+#else
+static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
+ unsigned int pageshift, unsigned long *size)
+{
+ return false;
+}
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
extern void set_context(unsigned long id, pgd_t *pgd);
@@ -217,13 +228,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
#endif
}
-static inline int arch_dup_mmap(struct mm_struct *oldmm,
- struct mm_struct *mm)
-{
- return 0;
-}
-
-#ifndef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_BOOK3E_64
static inline void arch_exit_mmap(struct mm_struct *mm)
{
}
@@ -247,6 +252,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
#ifdef CONFIG_PPC_MEM_KEYS
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign);
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
#else /* CONFIG_PPC_MEM_KEYS */
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
@@ -259,6 +265,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
#define thread_pkey_regs_save(thread)
#define thread_pkey_regs_restore(new_thread, old_thread)
#define thread_pkey_regs_init(thread)
+#define arch_dup_pkeys(oldmm, mm)
static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
{
@@ -267,5 +274,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
#endif /* CONFIG_PPC_MEM_KEYS */
+static inline int arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+ arch_dup_pkeys(oldmm, mm);
+ return 0;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
index 74f4edb5916e..74f4edb5916e 100644
--- a/arch/powerpc/include/asm/mmu-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 295b3dbb2698..28aa3b339c5e 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -111,6 +111,9 @@ typedef struct {
unsigned long vdso_base;
} mm_context_t;
+/* patch sites */
+extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
+
#endif /* !__ASSEMBLY__ */
#ifndef CONFIG_PPC_EARLY_DEBUG_44x
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index fa05aa566ece..b0f764c827c0 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -190,6 +190,7 @@ typedef struct {
struct slice_mask mask_8m;
# endif
#endif
+ void *pte_frag;
} mm_context_t;
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
@@ -244,6 +245,9 @@ extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
#define mmu_virtual_psize MMU_PAGE_4K
#elif defined(CONFIG_PPC_16K_PAGES)
#define mmu_virtual_psize MMU_PAGE_16K
+#define PTE_FRAG_NR 4
+#define PTE_FRAG_SIZE_SHIFT 12
+#define PTE_FRAG_SIZE (1UL << 12)
#else
#error "Unsupported PAGE_SIZE"
#endif
diff --git a/arch/powerpc/include/asm/nohash/32/mmu.h b/arch/powerpc/include/asm/nohash/32/mmu.h
new file mode 100644
index 000000000000..7d94a36d57d2
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/32/mmu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_32_MMU_H_
+#define _ASM_POWERPC_NOHASH_32_MMU_H_
+
+#include <asm/page.h>
+
+#if defined(CONFIG_40x)
+/* 40x-style software loaded TLB */
+#include <asm/nohash/32/mmu-40x.h>
+#elif defined(CONFIG_44x)
+/* 44x-style software loaded TLB */
+#include <asm/nohash/32/mmu-44x.h>
+#elif defined(CONFIG_PPC_BOOK3E_MMU)
+/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
+#include <asm/nohash/mmu-book3e.h>
+#elif defined (CONFIG_PPC_8xx)
+/* Motorola/Freescale 8xx software loaded TLB */
+#include <asm/nohash/32/mmu-8xx.h>
+#endif
+
+#ifndef __ASSEMBLY__
+typedef pte_t *pgtable_t;
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_32_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 8825953c225b..17963951bdb0 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -25,10 +25,7 @@
extern void __bad_pte(pmd_t *pmd);
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) ({ \
- BUG_ON(!(shift)); \
- pgtable_cache[(shift) - 1]; \
- })
+#define PGT_CACHE(shift) pgtable_cache[shift]
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -61,11 +58,10 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
- *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_USER |
- _PMD_PRESENT);
+ *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#else
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
@@ -77,31 +73,32 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
pgtable_t pte_page)
{
- *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+ *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
#endif
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
+void pte_frag_destroy(void *pte_frag);
+pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel);
+void pte_fragment_free(unsigned long *table, int kernel);
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ pte_fragment_free((unsigned long *)pte, 1);
}
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
- pgtable_page_dtor(ptepage);
- __free_page(ptepage);
+ pte_fragment_free((unsigned long *)ptepage, 0);
}
static inline void pgtable_free(void *table, unsigned index_size)
{
if (!index_size) {
- pgtable_page_dtor(virt_to_page(table));
- free_page((unsigned long)table);
+ pte_fragment_free((unsigned long *)table, 0);
} else {
BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
kmem_cache_free(PGT_CACHE(index_size), table);
@@ -140,6 +137,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
tlb_flush_pgtable(tlb, address);
- pgtable_free_tlb(tlb, page_address(table), 0);
+ pgtable_free_tlb(tlb, table, 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 3ffb0ff5a038..bed433358260 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -232,7 +232,13 @@ static inline unsigned long pte_update(pte_t *p,
: "cc" );
#else /* PTE_ATOMIC_UPDATES */
unsigned long old = pte_val(*p);
- *p = __pte((old & ~clr) | set);
+ unsigned long new = (old & ~clr) | set;
+
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+ p->pte = p->pte1 = p->pte2 = p->pte3 = new;
+#else
+ *p = __pte(new);
+#endif
#endif /* !PTE_ATOMIC_UPDATES */
#ifdef CONFIG_44x
@@ -333,12 +339,12 @@ static inline int pte_young(pte_t pte)
*/
#ifndef CONFIG_BOOKE
#define pmd_page_vaddr(pmd) \
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+ ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
- ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+ ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
#define pmd_page(pmd) \
pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
#endif
@@ -357,7 +363,8 @@ static inline int pte_young(pte_t pte)
(pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
pte_index(addr))
#define pte_offset_map(dir, addr) \
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+ ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
+ (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
#define pte_unmap(pte) kunmap_atomic(pte)
/*
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 661f4599f2fc..12c6811e344b 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -33,7 +33,7 @@
* is cleared in the TLB miss handler before the TLB entry is loaded.
* - All other bits of the PTE are loaded into TLBLO without
* modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
- * software PTE bits. We actually use use bits 21, 24, 25, and
+ * software PTE bits. We actually use bits 21, 24, 25, and
* 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
* PRESENT.
*/
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index 6bfe041ef59d..c9e4b2d90f65 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -65,9 +65,6 @@
#define _PTE_NONE_MASK 0
-/* Until my rework is finished, 8xx still needs atomic PTE updates */
-#define PTE_ATOMIC_UPDATES 1
-
#ifdef CONFIG_PPC_16K_PAGES
#define _PAGE_PSIZE _PAGE_SPS
#else
diff --git a/arch/powerpc/include/asm/nohash/64/mmu.h b/arch/powerpc/include/asm/nohash/64/mmu.h
new file mode 100644
index 000000000000..e6585480dfc4
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/64/mmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_64_MMU_H_
+#define _ASM_POWERPC_NOHASH_64_MMU_H_
+
+/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
+#include <asm/nohash/mmu-book3e.h>
+
+#ifndef __ASSEMBLY__
+typedef struct page *pgtable_t;
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index e2d62d033708..e95eb499a174 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -36,10 +36,7 @@ extern struct vmemmap_backing *vmemmap_list;
#define MAX_PGTABLE_INDEX_SIZE 0xf
extern struct kmem_cache *pgtable_cache[];
-#define PGT_CACHE(shift) ({ \
- BUG_ON(!(shift)); \
- pgtable_cache[(shift) - 1]; \
- })
+#define PGT_CACHE(shift) pgtable_cache[shift]
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-book3e.h
index e20072972e35..e20072972e35 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-book3e.h
diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h
new file mode 100644
index 000000000000..a037cb1efb57
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/mmu.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_NOHASH_MMU_H_
+#define _ASM_POWERPC_NOHASH_MMU_H_
+
+#ifdef CONFIG_PPC64
+#include <asm/nohash/64/mmu.h>
+#else
+#include <asm/nohash/32/mmu.h>
+#endif
+
+#endif /* _ASM_POWERPC_NOHASH_MMU_H_ */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 70ff23974b59..1ca1c1864b32 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -209,7 +209,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
/* Anything else just stores the PTE normally. That covers all 64-bit
* cases, and 32-bit non-hash with 32-bit PTEs.
*/
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+ ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte);
+#else
*ptep = pte;
+#endif
/*
* With hardware tablewalk, a sync is needed to ensure that
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index ff3866473afe..a55b01c90bb1 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -347,6 +347,7 @@ extern int opal_async_comp_init(void);
extern int opal_sensor_init(void);
extern int opal_hmi_handler_init(void);
extern int opal_event_init(void);
+int opal_power_control_init(void);
extern int opal_machine_check(struct pt_regs *regs);
extern bool opal_mce_check_early_recovery(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index f6a1265face2..5c5ea2413413 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -289,7 +289,7 @@ static inline bool pfn_valid(unsigned long pfn)
* page tables at arbitrary addresses, this breaks and will have to change.
*/
#ifdef CONFIG_PPC64
-#define PD_HUGE 0x8000000000000000
+#define PD_HUGE 0x8000000000000000UL
#else
#define PD_HUGE 0x80000000
#endif
@@ -335,23 +335,11 @@ void arch_free_page(struct page *page, int order);
#endif
struct vm_area_struct;
-#ifdef CONFIG_PPC_BOOK3S_64
-/*
- * For BOOK3s 64 with 4k and 64K linux page size
- * we want to use pointers, because the page table
- * actually store pfn
- */
-typedef pte_t *pgtable_t;
-#else
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64)
-typedef pte_t *pgtable_t;
-#else
-typedef struct page *pgtable_t;
-#endif
-#endif
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
#include <asm/slice.h>
+#define ARCH_ZONE_DMA_BITS 31
+
#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index 5c378e9b78c8..683dfbc67ca8 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -22,7 +22,8 @@
#define PTE_FLAGS_OFFSET 0
#endif
-#ifdef CONFIG_PPC_256K_PAGES
+#if defined(CONFIG_PPC_256K_PAGES) || \
+ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
#else
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 94d449031b18..aee4fcc24990 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -129,6 +129,7 @@ struct pci_controller {
#endif /* CONFIG_PPC64 */
void *private_data;
+ struct npu *npu;
};
/* These are used for config access before all the PCI probing
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 2af9ded80540..0c72f1897063 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -129,5 +129,9 @@ extern void pcibios_scan_phb(struct pci_controller *hose);
extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
+extern int pnv_npu2_init(struct pci_controller *hose);
+extern int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
+ unsigned long msr);
+extern int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev);
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 8bf1b6351716..35926cd6cd0b 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -26,6 +26,8 @@
#include <asm/ptrace.h>
#include <asm/reg.h>
+#define perf_arch_bpf_user_pt_regs(regs) &regs->user_regs
+
/*
* Overload regs->result to specify whether we should use the MSR (result
* is zero) or the SIAR (result is non zero).
@@ -37,4 +39,7 @@
(regs)->gpr[1] = current_stack_pointer(); \
asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
} while (0)
+
+/* To support perf_regs sier update */
+extern bool is_sier_available(void);
#endif
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 67a8a9585d50..e60aeb46d6a0 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -41,6 +41,8 @@ struct power_pmu {
void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
u32 flags, struct pt_regs *regs);
void (*get_mem_weight)(u64 *weight);
+ unsigned long group_constraint_mask;
+ unsigned long group_constraint_val;
u64 (*bhrb_filter_map)(u64 branch_sample_type);
void (*config_bhrb)(u64 pmu_bhrb_filter);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index eccb30b38b47..3b0edf041b2e 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -3,7 +3,11 @@
#define _ASM_POWERPC_PGTABLE_TYPES_H
/* PTE level */
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t;
+#else
typedef struct { pte_basic_t pte; } pte_t;
+#endif
#define __pte(x) ((pte_t) { (x) })
static inline pte_basic_t pte_val(pte_t x)
{
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 9679b7519a35..dad1d27e196d 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
-void limit_zone_pfn(enum zone_type zone, unsigned long max_pfn);
int dma_pfn_limit_to_zone(u64 pfn_limit);
extern void paging_init(void);
@@ -101,7 +100,7 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
/* can we use this in kvm */
unsigned long vmalloc_to_phys(void *vmalloc_addr);
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_add(unsigned int shift);
void pgtable_cache_init(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
@@ -110,6 +109,35 @@ void mark_initmem_nx(void);
static inline void mark_initmem_nx(void) { }
#endif
+/*
+ * When used, PTE_FRAG_NR is defined in subarch pgtable.h
+ * so we are sure it is included when arriving here.
+ */
+#ifdef PTE_FRAG_NR
+static inline void *pte_frag_get(mm_context_t *ctx)
+{
+ return ctx->pte_frag;
+}
+
+static inline void pte_frag_set(mm_context_t *ctx, void *p)
+{
+ ctx->pte_frag = p;
+}
+#else
+#define PTE_FRAG_NR 1
+#define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+static inline void *pte_frag_get(mm_context_t *ctx)
+{
+ return NULL;
+}
+
+static inline void pte_frag_set(mm_context_t *ctx, void *p)
+{
+}
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index a6e9e314c707..19a8834e0398 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -257,6 +257,7 @@
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MFVSRD 0x7c000066
#define PPC_INST_MTVSRD 0x7c000166
+#define PPC_INST_SC 0x44000002
#define PPC_INST_SLBFEE 0x7c0007a7
#define PPC_INST_SLBIA 0x7c0003e4
@@ -342,6 +343,8 @@
#define PPC_INST_SLW 0x7c000030
#define PPC_INST_SLD 0x7c000036
#define PPC_INST_SRW 0x7c000430
+#define PPC_INST_SRAW 0x7c000630
+#define PPC_INST_SRAWI 0x7c000670
#define PPC_INST_SRD 0x7c000436
#define PPC_INST_SRAD 0x7c000634
#define PPC_INST_SRADI 0x7c000674
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index b5d023680801..e0637730a8e7 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -480,26 +480,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
ori rd,rd,((KERNELBASE>>48)&0xFFFF);\
rotldi rd,rd,48
#else
-/*
- * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
- * physical base address of RAM at compile time.
- */
#define toreal(rd) tophys(rd,rd)
#define fromreal(rd) tovirt(rd,rd)
-#define tophys(rd,rs) \
-0: addis rd,rs,-PAGE_OFFSET@h; \
- .section ".vtop_fixup","aw"; \
- .align 1; \
- .long 0b; \
- .previous
-
-#define tovirt(rd,rs) \
-0: addis rd,rs,PAGE_OFFSET@h; \
- .section ".ptov_fixup","aw"; \
- .align 1; \
- .long 0b; \
- .previous
+#define tophys(rd, rs) addis rd, rs, -PAGE_OFFSET@h
+#define tovirt(rd, rs) addis rd, rs, PAGE_OFFSET@h
#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -821,4 +806,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define BTB_FLUSH(reg) \
+ lis reg,BUCSR_INIT@h; \
+ ori reg,reg,BUCSR_INIT@l; \
+ mtspr SPRN_BUCSR,reg; \
+ isync;
+#else
+#define BTB_FLUSH(reg)
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index de52c3166ba4..1c98ef1f2d5b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -582,7 +582,7 @@
#define HID0_POWER9_RADIX __MASK(63 - 8)
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
@@ -769,6 +769,8 @@
#define SRR1_PROGTRAP 0x00020000 /* Trap */
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
+#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */
+
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
#define HSRR1_DENORM 0x00100000 /* Denorm exception */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 1fffbba8d6a5..65676e2325b8 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -67,6 +67,13 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
#endif
+#ifdef CONFIG_PPC_FSL_BOOK3E
+void setup_spectre_v2(void);
+#else
+static inline void setup_spectre_v2(void) {};
+#endif
+void do_btb_flush_fixups(void);
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h
index d89beaba26ff..8b957aabb826 100644
--- a/arch/powerpc/include/asm/sfp-machine.h
+++ b/arch/powerpc/include/asm/sfp-machine.h
@@ -213,30 +213,18 @@
* respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
* (i.e. carry out) is not stored anywhere, and is lost.
*/
-#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
do { \
if (__builtin_constant_p (bh) && (bh) == 0) \
- __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "%r" ((USItype)(ah)), \
- "%r" ((USItype)(al)), \
- "rI" ((USItype)(bl))); \
- else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
- __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "%r" ((USItype)(ah)), \
- "%r" ((USItype)(al)), \
- "rI" ((USItype)(bl))); \
+ __asm__ ("add%I4c %1,%3,%4\n\taddze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("add%I4c %1,%3,%4\n\taddme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\
else \
- __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "%r" ((USItype)(ah)), \
- "r" ((USItype)(bh)), \
- "%r" ((USItype)(al)), \
- "rI" ((USItype)(bl))); \
+ __asm__ ("add%I5c %1,%4,%5\n\tadde %0,%2,%3" \
+ : "=r" (sh), "=&r" (sl) \
+ : "%r" (ah), "r" (bh), "%r" (al), "rI" (bl)); \
} while (0)
/* sub_ddmmss is used in op-2.h and udivmodti4.c and should be equivalent to
@@ -248,44 +236,24 @@
* and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
* and is lost.
*/
-#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
do { \
if (__builtin_constant_p (ah) && (ah) == 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(bh)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
- else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(bh)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
+ __asm__ ("subf%I3c %1,%4,%3\n\tsubfze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (ah) && (ah) == ~(USItype) 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\tsubfme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\
else if (__builtin_constant_p (bh) && (bh) == 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(ah)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
- else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
- __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(ah)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
+ __asm__ ("subf%I3c %1,%4,%3\n\taddme %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
+ else if (__builtin_constant_p (bh) && (bh) == ~(USItype) 0) \
+ __asm__ ("subf%I3c %1,%4,%3\n\taddze %0,%2" \
+ : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\
else \
- __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
- : "=r" ((USItype)(sh)), \
- "=&r" ((USItype)(sl)) \
- : "r" ((USItype)(ah)), \
- "r" ((USItype)(bh)), \
- "rI" ((USItype)(al)), \
- "r" ((USItype)(bl))); \
+ __asm__ ("subf%I4c %1,%5,%4\n\tsubfe %0,%3,%2" \
+ : "=r" (sh), "=&r" (sl) \
+ : "r" (ah), "r" (bh), "rI" (al), "r" (bl)); \
} while (0)
/* asm fragments for mul and div */
@@ -294,13 +262,10 @@
* UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
* word product in HIGH_PROD and LOW_PROD.
*/
-#define umul_ppmm(ph, pl, m0, m1) \
+#define umul_ppmm(ph, pl, m0, m1) \
do { \
USItype __m0 = (m0), __m1 = (m1); \
- __asm__ ("mulhwu %0,%1,%2" \
- : "=r" ((USItype)(ph)) \
- : "%r" (__m0), \
- "r" (__m1)); \
+ __asm__ ("mulhwu %0,%1,%2" : "=r" (ph) : "%r" (m0), "r" (m1)); \
(pl) = __m0 * __m1; \
} while (0)
@@ -312,9 +277,10 @@
* significant bit of DENOMINATOR must be 1, then the pre-processor symbol
* UDIV_NEEDS_NORMALIZATION is defined to 1.
*/
-#define udiv_qrnnd(q, r, n1, n0, d) \
+#define udiv_qrnnd(q, r, n1, n0, d) \
do { \
- UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
+ UWtype __d1, __d0, __q1, __q0; \
+ UWtype __r1, __r0, __m; \
__d1 = __ll_highpart (d); \
__d0 = __ll_lowpart (d); \
\
@@ -325,7 +291,7 @@
if (__r1 < __m) \
{ \
__q1--, __r1 += (d); \
- if (__r1 >= (d)) /* we didn't get carry when adding to __r1 */ \
+ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
if (__r1 < __m) \
__q1--, __r1 += (d); \
} \
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
index a595461c9cb0..44816cbc4198 100644
--- a/arch/powerpc/include/asm/slice.h
+++ b/arch/powerpc/include/asm/slice.h
@@ -10,6 +10,10 @@
#include <asm/nohash/32/slice.h>
#endif
+#ifndef __ASSEMBLY__
+
+struct mm_struct;
+
#ifdef CONFIG_PPC_MM_SLICES
#ifdef CONFIG_HUGETLB_PAGE
@@ -18,10 +22,6 @@
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#ifndef __ASSEMBLY__
-
-struct mm_struct;
-
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
int topdown);
@@ -34,8 +34,12 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
void slice_init_new_context_exec(struct mm_struct *mm);
void slice_setup_new_exec(void);
-#endif /* __ASSEMBLY__ */
+#else /* CONFIG_PPC_MM_SLICES */
+
+static inline void slice_init_new_context_exec(struct mm_struct *mm) {}
#endif /* CONFIG_PPC_MM_SLICES */
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_SLICE_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index ab9f3f0a8637..1a0e7a8b1c81 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -18,9 +18,8 @@
#include <linux/thread_info.h>
/* ftrace syscalls requires exporting the sys_call_table */
-#ifdef CONFIG_FTRACE_SYSCALLS
extern const unsigned long sys_call_table[];
-#endif /* CONFIG_FTRACE_SYSCALLS */
+extern const unsigned long compat_sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
deleted file mode 100644
index 01b5171ea189..000000000000
--- a/arch/powerpc/include/asm/systbl.h
+++ /dev/null
@@ -1,396 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * List of powerpc syscalls. For the meaning of the _SPU suffix see
- * arch/powerpc/platforms/cell/spu_callbacks.c
- */
-
-SYSCALL(restart_syscall)
-SYSCALL(exit)
-PPC_SYS(fork)
-SYSCALL_SPU(read)
-SYSCALL_SPU(write)
-COMPAT_SYS_SPU(open)
-SYSCALL_SPU(close)
-SYSCALL_SPU(waitpid)
-SYSCALL_SPU(creat)
-SYSCALL_SPU(link)
-SYSCALL_SPU(unlink)
-COMPAT_SYS(execve)
-SYSCALL_SPU(chdir)
-COMPAT_SYS_SPU(time)
-SYSCALL_SPU(mknod)
-SYSCALL_SPU(chmod)
-SYSCALL_SPU(lchown)
-SYSCALL(ni_syscall)
-OLDSYS(stat)
-COMPAT_SYS_SPU(lseek)
-SYSCALL_SPU(getpid)
-COMPAT_SYS(mount)
-SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
-SYSCALL_SPU(setuid)
-SYSCALL_SPU(getuid)
-COMPAT_SYS_SPU(stime)
-COMPAT_SYS(ptrace)
-SYSCALL_SPU(alarm)
-OLDSYS(fstat)
-SYSCALL(pause)
-COMPAT_SYS(utime)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(access)
-SYSCALL_SPU(nice)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(sync)
-SYSCALL_SPU(kill)
-SYSCALL_SPU(rename)
-SYSCALL_SPU(mkdir)
-SYSCALL_SPU(rmdir)
-SYSCALL_SPU(dup)
-SYSCALL_SPU(pipe)
-COMPAT_SYS_SPU(times)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(brk)
-SYSCALL_SPU(setgid)
-SYSCALL_SPU(getgid)
-SYSCALL(signal)
-SYSCALL_SPU(geteuid)
-SYSCALL_SPU(getegid)
-SYSCALL(acct)
-SYSCALL(umount)
-SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(ioctl)
-COMPAT_SYS_SPU(fcntl)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(setpgid)
-SYSCALL(ni_syscall)
-SYSX(sys_ni_syscall,sys_olduname,sys_olduname)
-SYSCALL_SPU(umask)
-SYSCALL_SPU(chroot)
-COMPAT_SYS(ustat)
-SYSCALL_SPU(dup2)
-SYSCALL_SPU(getppid)
-SYSCALL_SPU(getpgrp)
-SYSCALL_SPU(setsid)
-SYS32ONLY(sigaction)
-SYSCALL_SPU(sgetmask)
-SYSCALL_SPU(ssetmask)
-SYSCALL_SPU(setreuid)
-SYSCALL_SPU(setregid)
-#define compat_sys_sigsuspend sys_sigsuspend
-SYS32ONLY(sigsuspend)
-SYSX(sys_ni_syscall,compat_sys_sigpending,sys_sigpending)
-SYSCALL_SPU(sethostname)
-COMPAT_SYS_SPU(setrlimit)
-SYSX(sys_ni_syscall,compat_sys_old_getrlimit,sys_old_getrlimit)
-COMPAT_SYS_SPU(getrusage)
-COMPAT_SYS_SPU(gettimeofday)
-COMPAT_SYS_SPU(settimeofday)
-SYSCALL_SPU(getgroups)
-SYSCALL_SPU(setgroups)
-SYSX(sys_ni_syscall,sys_ni_syscall,ppc_select)
-SYSCALL_SPU(symlink)
-OLDSYS(lstat)
-SYSCALL_SPU(readlink)
-SYSCALL(uselib)
-SYSCALL(swapon)
-SYSCALL(reboot)
-SYSX(sys_ni_syscall,compat_sys_old_readdir,sys_old_readdir)
-SYSCALL_SPU(mmap)
-SYSCALL_SPU(munmap)
-COMPAT_SYS_SPU(truncate)
-COMPAT_SYS_SPU(ftruncate)
-SYSCALL_SPU(fchmod)
-SYSCALL_SPU(fchown)
-SYSCALL_SPU(getpriority)
-SYSCALL_SPU(setpriority)
-SYSCALL(ni_syscall)
-COMPAT_SYS(statfs)
-COMPAT_SYS(fstatfs)
-SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(socketcall)
-SYSCALL_SPU(syslog)
-COMPAT_SYS_SPU(setitimer)
-COMPAT_SYS_SPU(getitimer)
-COMPAT_SYS_SPU(newstat)
-COMPAT_SYS_SPU(newlstat)
-COMPAT_SYS_SPU(newfstat)
-SYSX(sys_ni_syscall,sys_uname,sys_uname)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(vhangup)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(wait4)
-SYSCALL(swapoff)
-COMPAT_SYS_SPU(sysinfo)
-COMPAT_SYS(ipc)
-SYSCALL_SPU(fsync)
-SYS32ONLY(sigreturn)
-PPC_SYS(clone)
-SYSCALL_SPU(setdomainname)
-SYSCALL_SPU(newuname)
-SYSCALL(ni_syscall)
-COMPAT_SYS_SPU(adjtimex)
-SYSCALL_SPU(mprotect)
-SYSX(sys_ni_syscall,compat_sys_sigprocmask,sys_sigprocmask)
-SYSCALL(ni_syscall)
-SYSCALL(init_module)
-SYSCALL(delete_module)
-SYSCALL(ni_syscall)
-SYSCALL(quotactl)
-SYSCALL_SPU(getpgid)
-SYSCALL_SPU(fchdir)
-SYSCALL_SPU(bdflush)
-SYSCALL_SPU(sysfs)
-SYSX_SPU(ppc64_personality,ppc64_personality,sys_personality)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(setfsuid)
-SYSCALL_SPU(setfsgid)
-SYSCALL_SPU(llseek)
-COMPAT_SYS_SPU(getdents)
-COMPAT_SPU_NEW(select)
-SYSCALL_SPU(flock)
-SYSCALL_SPU(msync)
-COMPAT_SYS_SPU(readv)
-COMPAT_SYS_SPU(writev)
-SYSCALL_SPU(getsid)
-SYSCALL_SPU(fdatasync)
-COMPAT_SYS(sysctl)
-SYSCALL_SPU(mlock)
-SYSCALL_SPU(munlock)
-SYSCALL_SPU(mlockall)
-SYSCALL_SPU(munlockall)
-SYSCALL_SPU(sched_setparam)
-SYSCALL_SPU(sched_getparam)
-SYSCALL_SPU(sched_setscheduler)
-SYSCALL_SPU(sched_getscheduler)
-SYSCALL_SPU(sched_yield)
-SYSCALL_SPU(sched_get_priority_max)
-SYSCALL_SPU(sched_get_priority_min)
-COMPAT_SYS_SPU(sched_rr_get_interval)
-COMPAT_SYS_SPU(nanosleep)
-SYSCALL_SPU(mremap)
-SYSCALL_SPU(setresuid)
-SYSCALL_SPU(getresuid)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(poll)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(setresgid)
-SYSCALL_SPU(getresgid)
-SYSCALL_SPU(prctl)
-COMPAT_SYS(rt_sigreturn)
-COMPAT_SYS(rt_sigaction)
-COMPAT_SYS(rt_sigprocmask)
-COMPAT_SYS(rt_sigpending)
-COMPAT_SYS(rt_sigtimedwait)
-COMPAT_SYS(rt_sigqueueinfo)
-COMPAT_SYS(rt_sigsuspend)
-COMPAT_SYS_SPU(pread64)
-COMPAT_SYS_SPU(pwrite64)
-SYSCALL_SPU(chown)
-SYSCALL_SPU(getcwd)
-SYSCALL_SPU(capget)
-SYSCALL_SPU(capset)
-COMPAT_SYS(sigaltstack)
-SYSX_SPU(sys_sendfile64,compat_sys_sendfile,sys_sendfile)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-PPC_SYS(vfork)
-COMPAT_SYS_SPU(getrlimit)
-COMPAT_SYS_SPU(readahead)
-SYS32ONLY(mmap2)
-SYS32ONLY(truncate64)
-SYS32ONLY(ftruncate64)
-SYSX(sys_ni_syscall,sys_stat64,sys_stat64)
-SYSX(sys_ni_syscall,sys_lstat64,sys_lstat64)
-SYSX(sys_ni_syscall,sys_fstat64,sys_fstat64)
-SYSCALL(pciconfig_read)
-SYSCALL(pciconfig_write)
-SYSCALL(pciconfig_iobase)
-SYSCALL(ni_syscall)
-SYSCALL_SPU(getdents64)
-SYSCALL_SPU(pivot_root)
-SYSX(sys_ni_syscall,compat_sys_fcntl64,sys_fcntl64)
-SYSCALL_SPU(madvise)
-SYSCALL_SPU(mincore)
-SYSCALL_SPU(gettid)
-SYSCALL_SPU(tkill)
-SYSCALL_SPU(setxattr)
-SYSCALL_SPU(lsetxattr)
-SYSCALL_SPU(fsetxattr)
-SYSCALL_SPU(getxattr)
-SYSCALL_SPU(lgetxattr)
-SYSCALL_SPU(fgetxattr)
-SYSCALL_SPU(listxattr)
-SYSCALL_SPU(llistxattr)
-SYSCALL_SPU(flistxattr)
-SYSCALL_SPU(removexattr)
-SYSCALL_SPU(lremovexattr)
-SYSCALL_SPU(fremovexattr)
-COMPAT_SYS_SPU(futex)
-COMPAT_SYS_SPU(sched_setaffinity)
-COMPAT_SYS_SPU(sched_getaffinity)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYS32ONLY(sendfile64)
-COMPAT_SYS_SPU(io_setup)
-SYSCALL_SPU(io_destroy)
-COMPAT_SYS_SPU(io_getevents)
-COMPAT_SYS_SPU(io_submit)
-SYSCALL_SPU(io_cancel)
-SYSCALL(set_tid_address)
-SYSX_SPU(sys_fadvise64,ppc32_fadvise64,sys_fadvise64)
-SYSCALL(exit_group)
-COMPAT_SYS(lookup_dcookie)
-SYSCALL_SPU(epoll_create)
-SYSCALL_SPU(epoll_ctl)
-SYSCALL_SPU(epoll_wait)
-SYSCALL_SPU(remap_file_pages)
-COMPAT_SYS_SPU(timer_create)
-COMPAT_SYS_SPU(timer_settime)
-COMPAT_SYS_SPU(timer_gettime)
-SYSCALL_SPU(timer_getoverrun)
-SYSCALL_SPU(timer_delete)
-COMPAT_SYS_SPU(clock_settime)
-COMPAT_SYS_SPU(clock_gettime)
-COMPAT_SYS_SPU(clock_getres)
-COMPAT_SYS_SPU(clock_nanosleep)
-SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext)
-SYSCALL_SPU(tgkill)
-COMPAT_SYS_SPU(utimes)
-COMPAT_SYS_SPU(statfs64)
-COMPAT_SYS_SPU(fstatfs64)
-SYSX(sys_ni_syscall,ppc_fadvise64_64,ppc_fadvise64_64)
-SYSCALL_SPU(rtas)
-OLDSYS(debug_setcontext)
-SYSCALL(ni_syscall)
-COMPAT_SYS(migrate_pages)
-COMPAT_SYS(mbind)
-COMPAT_SYS(get_mempolicy)
-COMPAT_SYS(set_mempolicy)
-COMPAT_SYS(mq_open)
-SYSCALL(mq_unlink)
-COMPAT_SYS(mq_timedsend)
-COMPAT_SYS(mq_timedreceive)
-COMPAT_SYS(mq_notify)
-COMPAT_SYS(mq_getsetattr)
-COMPAT_SYS(kexec_load)
-SYSCALL(add_key)
-SYSCALL(request_key)
-COMPAT_SYS(keyctl)
-COMPAT_SYS(waitid)
-SYSCALL(ioprio_set)
-SYSCALL(ioprio_get)
-SYSCALL(inotify_init)
-SYSCALL(inotify_add_watch)
-SYSCALL(inotify_rm_watch)
-SYSCALL(spu_run)
-SYSCALL(spu_create)
-COMPAT_SYS(pselect6)
-COMPAT_SYS(ppoll)
-SYSCALL_SPU(unshare)
-SYSCALL_SPU(splice)
-SYSCALL_SPU(tee)
-COMPAT_SYS_SPU(vmsplice)
-COMPAT_SYS_SPU(openat)
-SYSCALL_SPU(mkdirat)
-SYSCALL_SPU(mknodat)
-SYSCALL_SPU(fchownat)
-COMPAT_SYS_SPU(futimesat)
-SYSX_SPU(sys_newfstatat,sys_fstatat64,sys_fstatat64)
-SYSCALL_SPU(unlinkat)
-SYSCALL_SPU(renameat)
-SYSCALL_SPU(linkat)
-SYSCALL_SPU(symlinkat)
-SYSCALL_SPU(readlinkat)
-SYSCALL_SPU(fchmodat)
-SYSCALL_SPU(faccessat)
-COMPAT_SYS_SPU(get_robust_list)
-COMPAT_SYS_SPU(set_robust_list)
-COMPAT_SYS_SPU(move_pages)
-SYSCALL_SPU(getcpu)
-COMPAT_SYS(epoll_pwait)
-COMPAT_SYS_SPU(utimensat)
-COMPAT_SYS_SPU(signalfd)
-SYSCALL_SPU(timerfd_create)
-SYSCALL_SPU(eventfd)
-COMPAT_SYS_SPU(sync_file_range2)
-COMPAT_SYS(fallocate)
-SYSCALL(subpage_prot)
-COMPAT_SYS_SPU(timerfd_settime)
-COMPAT_SYS_SPU(timerfd_gettime)
-COMPAT_SYS_SPU(signalfd4)
-SYSCALL_SPU(eventfd2)
-SYSCALL_SPU(epoll_create1)
-SYSCALL_SPU(dup3)
-SYSCALL_SPU(pipe2)
-SYSCALL(inotify_init1)
-SYSCALL_SPU(perf_event_open)
-COMPAT_SYS_SPU(preadv)
-COMPAT_SYS_SPU(pwritev)
-COMPAT_SYS(rt_tgsigqueueinfo)
-SYSCALL(fanotify_init)
-COMPAT_SYS(fanotify_mark)
-SYSCALL_SPU(prlimit64)
-SYSCALL_SPU(socket)
-SYSCALL_SPU(bind)
-SYSCALL_SPU(connect)
-SYSCALL_SPU(listen)
-SYSCALL_SPU(accept)
-SYSCALL_SPU(getsockname)
-SYSCALL_SPU(getpeername)
-SYSCALL_SPU(socketpair)
-SYSCALL_SPU(send)
-SYSCALL_SPU(sendto)
-COMPAT_SYS_SPU(recv)
-COMPAT_SYS_SPU(recvfrom)
-SYSCALL_SPU(shutdown)
-COMPAT_SYS_SPU(setsockopt)
-COMPAT_SYS_SPU(getsockopt)
-COMPAT_SYS_SPU(sendmsg)
-COMPAT_SYS_SPU(recvmsg)
-COMPAT_SYS_SPU(recvmmsg)
-SYSCALL_SPU(accept4)
-SYSCALL_SPU(name_to_handle_at)
-COMPAT_SYS_SPU(open_by_handle_at)
-COMPAT_SYS_SPU(clock_adjtime)
-SYSCALL_SPU(syncfs)
-COMPAT_SYS_SPU(sendmmsg)
-SYSCALL_SPU(setns)
-COMPAT_SYS(process_vm_readv)
-COMPAT_SYS(process_vm_writev)
-SYSCALL(finit_module)
-SYSCALL(kcmp) /* sys_kcmp */
-SYSCALL_SPU(sched_setattr)
-SYSCALL_SPU(sched_getattr)
-SYSCALL_SPU(renameat2)
-SYSCALL_SPU(seccomp)
-SYSCALL_SPU(getrandom)
-SYSCALL_SPU(memfd_create)
-SYSCALL_SPU(bpf)
-COMPAT_SYS(execveat)
-PPC64ONLY(switch_endian)
-SYSCALL_SPU(userfaultfd)
-SYSCALL_SPU(membarrier)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(ni_syscall)
-SYSCALL(mlock2)
-SYSCALL(copy_file_range)
-COMPAT_SYS_SPU(preadv2)
-COMPAT_SYS_SPU(pwritev2)
-SYSCALL(kexec_file_load)
-SYSCALL(statx)
-SYSCALL(pkey_alloc)
-SYSCALL(pkey_free)
-SYSCALL(pkey_mprotect)
-SYSCALL(rseq)
-COMPAT_SYS(io_pgetevents)
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index b80d492ceb29..54bf7e68a7e1 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -43,7 +43,7 @@ struct div_result {
/* Accessor functions for the timebase (RTC on 601) registers. */
/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
#define __USE_RTC() (cpu_has_feature(CPU_FTR_USE_RTC))
#else
#define __USE_RTC() 0
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index f0e571b2dc7c..e24c67d5ba75 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -40,7 +40,7 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
-#ifdef CONFIG_PPC_STD_MMU_32
+#ifdef CONFIG_PPC_BOOK3S_32
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(tlb->mm, ptep, address);
#endif
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 15bea9a0f260..ebc0b916dcf9 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
#endif
#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr), \
+ (__chk_user_ptr(addr), (void)(type), \
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index b0de85b477e1..a3c35e6d6ffb 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -11,8 +11,7 @@
#include <uapi/asm/unistd.h>
-
-#define NR_syscalls 389
+#define NR_syscalls __NR_syscalls
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index a658091a19f9..8ab8ba1b71bc 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -1,7 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-generic-y += bpf_perf_event.h
+generated-y += unistd_32.h
+generated-y += unistd_64.h
generic-y += param.h
generic-y += poll.h
generic-y += resource.h
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..b551b741653d
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index 9e52c86ccbd3..ff91192407d1 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -46,6 +46,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_TRAP,
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
+ PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 985534d0b448..5f84e3dc98d0 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -10,395 +10,10 @@
#ifndef _UAPI_ASM_POWERPC_UNISTD_H_
#define _UAPI_ASM_POWERPC_UNISTD_H_
-
-#define __NR_restart_syscall 0
-#define __NR_exit 1
-#define __NR_fork 2
-#define __NR_read 3
-#define __NR_write 4
-#define __NR_open 5
-#define __NR_close 6
-#define __NR_waitpid 7
-#define __NR_creat 8
-#define __NR_link 9
-#define __NR_unlink 10
-#define __NR_execve 11
-#define __NR_chdir 12
-#define __NR_time 13
-#define __NR_mknod 14
-#define __NR_chmod 15
-#define __NR_lchown 16
-#define __NR_break 17
-#define __NR_oldstat 18
-#define __NR_lseek 19
-#define __NR_getpid 20
-#define __NR_mount 21
-#define __NR_umount 22
-#define __NR_setuid 23
-#define __NR_getuid 24
-#define __NR_stime 25
-#define __NR_ptrace 26
-#define __NR_alarm 27
-#define __NR_oldfstat 28
-#define __NR_pause 29
-#define __NR_utime 30
-#define __NR_stty 31
-#define __NR_gtty 32
-#define __NR_access 33
-#define __NR_nice 34
-#define __NR_ftime 35
-#define __NR_sync 36
-#define __NR_kill 37
-#define __NR_rename 38
-#define __NR_mkdir 39
-#define __NR_rmdir 40
-#define __NR_dup 41
-#define __NR_pipe 42
-#define __NR_times 43
-#define __NR_prof 44
-#define __NR_brk 45
-#define __NR_setgid 46
-#define __NR_getgid 47
-#define __NR_signal 48
-#define __NR_geteuid 49
-#define __NR_getegid 50
-#define __NR_acct 51
-#define __NR_umount2 52
-#define __NR_lock 53
-#define __NR_ioctl 54
-#define __NR_fcntl 55
-#define __NR_mpx 56
-#define __NR_setpgid 57
-#define __NR_ulimit 58
-#define __NR_oldolduname 59
-#define __NR_umask 60
-#define __NR_chroot 61
-#define __NR_ustat 62
-#define __NR_dup2 63
-#define __NR_getppid 64
-#define __NR_getpgrp 65
-#define __NR_setsid 66
-#define __NR_sigaction 67
-#define __NR_sgetmask 68
-#define __NR_ssetmask 69
-#define __NR_setreuid 70
-#define __NR_setregid 71
-#define __NR_sigsuspend 72
-#define __NR_sigpending 73
-#define __NR_sethostname 74
-#define __NR_setrlimit 75
-#define __NR_getrlimit 76
-#define __NR_getrusage 77
-#define __NR_gettimeofday 78
-#define __NR_settimeofday 79
-#define __NR_getgroups 80
-#define __NR_setgroups 81
-#define __NR_select 82
-#define __NR_symlink 83
-#define __NR_oldlstat 84
-#define __NR_readlink 85
-#define __NR_uselib 86
-#define __NR_swapon 87
-#define __NR_reboot 88
-#define __NR_readdir 89
-#define __NR_mmap 90
-#define __NR_munmap 91
-#define __NR_truncate 92
-#define __NR_ftruncate 93
-#define __NR_fchmod 94
-#define __NR_fchown 95
-#define __NR_getpriority 96
-#define __NR_setpriority 97
-#define __NR_profil 98
-#define __NR_statfs 99
-#define __NR_fstatfs 100
-#define __NR_ioperm 101
-#define __NR_socketcall 102
-#define __NR_syslog 103
-#define __NR_setitimer 104
-#define __NR_getitimer 105
-#define __NR_stat 106
-#define __NR_lstat 107
-#define __NR_fstat 108
-#define __NR_olduname 109
-#define __NR_iopl 110
-#define __NR_vhangup 111
-#define __NR_idle 112
-#define __NR_vm86 113
-#define __NR_wait4 114
-#define __NR_swapoff 115
-#define __NR_sysinfo 116
-#define __NR_ipc 117
-#define __NR_fsync 118
-#define __NR_sigreturn 119
-#define __NR_clone 120
-#define __NR_setdomainname 121
-#define __NR_uname 122
-#define __NR_modify_ldt 123
-#define __NR_adjtimex 124
-#define __NR_mprotect 125
-#define __NR_sigprocmask 126
-#define __NR_create_module 127
-#define __NR_init_module 128
-#define __NR_delete_module 129
-#define __NR_get_kernel_syms 130
-#define __NR_quotactl 131
-#define __NR_getpgid 132
-#define __NR_fchdir 133
-#define __NR_bdflush 134
-#define __NR_sysfs 135
-#define __NR_personality 136
-#define __NR_afs_syscall 137 /* Syscall for Andrew File System */
-#define __NR_setfsuid 138
-#define __NR_setfsgid 139
-#define __NR__llseek 140
-#define __NR_getdents 141
-#define __NR__newselect 142
-#define __NR_flock 143
-#define __NR_msync 144
-#define __NR_readv 145
-#define __NR_writev 146
-#define __NR_getsid 147
-#define __NR_fdatasync 148
-#define __NR__sysctl 149
-#define __NR_mlock 150
-#define __NR_munlock 151
-#define __NR_mlockall 152
-#define __NR_munlockall 153
-#define __NR_sched_setparam 154
-#define __NR_sched_getparam 155
-#define __NR_sched_setscheduler 156
-#define __NR_sched_getscheduler 157
-#define __NR_sched_yield 158
-#define __NR_sched_get_priority_max 159
-#define __NR_sched_get_priority_min 160
-#define __NR_sched_rr_get_interval 161
-#define __NR_nanosleep 162
-#define __NR_mremap 163
-#define __NR_setresuid 164
-#define __NR_getresuid 165
-#define __NR_query_module 166
-#define __NR_poll 167
-#define __NR_nfsservctl 168
-#define __NR_setresgid 169
-#define __NR_getresgid 170
-#define __NR_prctl 171
-#define __NR_rt_sigreturn 172
-#define __NR_rt_sigaction 173
-#define __NR_rt_sigprocmask 174
-#define __NR_rt_sigpending 175
-#define __NR_rt_sigtimedwait 176
-#define __NR_rt_sigqueueinfo 177
-#define __NR_rt_sigsuspend 178
-#define __NR_pread64 179
-#define __NR_pwrite64 180
-#define __NR_chown 181
-#define __NR_getcwd 182
-#define __NR_capget 183
-#define __NR_capset 184
-#define __NR_sigaltstack 185
-#define __NR_sendfile 186
-#define __NR_getpmsg 187 /* some people actually want streams */
-#define __NR_putpmsg 188 /* some people actually want streams */
-#define __NR_vfork 189
-#define __NR_ugetrlimit 190 /* SuS compliant getrlimit */
-#define __NR_readahead 191
-#ifndef __powerpc64__ /* these are 32-bit only */
-#define __NR_mmap2 192
-#define __NR_truncate64 193
-#define __NR_ftruncate64 194
-#define __NR_stat64 195
-#define __NR_lstat64 196
-#define __NR_fstat64 197
-#endif
-#define __NR_pciconfig_read 198
-#define __NR_pciconfig_write 199
-#define __NR_pciconfig_iobase 200
-#define __NR_multiplexer 201
-#define __NR_getdents64 202
-#define __NR_pivot_root 203
-#ifndef __powerpc64__
-#define __NR_fcntl64 204
-#endif
-#define __NR_madvise 205
-#define __NR_mincore 206
-#define __NR_gettid 207
-#define __NR_tkill 208
-#define __NR_setxattr 209
-#define __NR_lsetxattr 210
-#define __NR_fsetxattr 211
-#define __NR_getxattr 212
-#define __NR_lgetxattr 213
-#define __NR_fgetxattr 214
-#define __NR_listxattr 215
-#define __NR_llistxattr 216
-#define __NR_flistxattr 217
-#define __NR_removexattr 218
-#define __NR_lremovexattr 219
-#define __NR_fremovexattr 220
-#define __NR_futex 221
-#define __NR_sched_setaffinity 222
-#define __NR_sched_getaffinity 223
-/* 224 currently unused */
-#define __NR_tuxcall 225
#ifndef __powerpc64__
-#define __NR_sendfile64 226
-#endif
-#define __NR_io_setup 227
-#define __NR_io_destroy 228
-#define __NR_io_getevents 229
-#define __NR_io_submit 230
-#define __NR_io_cancel 231
-#define __NR_set_tid_address 232
-#define __NR_fadvise64 233
-#define __NR_exit_group 234
-#define __NR_lookup_dcookie 235
-#define __NR_epoll_create 236
-#define __NR_epoll_ctl 237
-#define __NR_epoll_wait 238
-#define __NR_remap_file_pages 239
-#define __NR_timer_create 240
-#define __NR_timer_settime 241
-#define __NR_timer_gettime 242
-#define __NR_timer_getoverrun 243
-#define __NR_timer_delete 244
-#define __NR_clock_settime 245
-#define __NR_clock_gettime 246
-#define __NR_clock_getres 247
-#define __NR_clock_nanosleep 248
-#define __NR_swapcontext 249
-#define __NR_tgkill 250
-#define __NR_utimes 251
-#define __NR_statfs64 252
-#define __NR_fstatfs64 253
-#ifndef __powerpc64__
-#define __NR_fadvise64_64 254
-#endif
-#define __NR_rtas 255
-#define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
-#define __NR_migrate_pages 258
-#define __NR_mbind 259
-#define __NR_get_mempolicy 260
-#define __NR_set_mempolicy 261
-#define __NR_mq_open 262
-#define __NR_mq_unlink 263
-#define __NR_mq_timedsend 264
-#define __NR_mq_timedreceive 265
-#define __NR_mq_notify 266
-#define __NR_mq_getsetattr 267
-#define __NR_kexec_load 268
-#define __NR_add_key 269
-#define __NR_request_key 270
-#define __NR_keyctl 271
-#define __NR_waitid 272
-#define __NR_ioprio_set 273
-#define __NR_ioprio_get 274
-#define __NR_inotify_init 275
-#define __NR_inotify_add_watch 276
-#define __NR_inotify_rm_watch 277
-#define __NR_spu_run 278
-#define __NR_spu_create 279
-#define __NR_pselect6 280
-#define __NR_ppoll 281
-#define __NR_unshare 282
-#define __NR_splice 283
-#define __NR_tee 284
-#define __NR_vmsplice 285
-#define __NR_openat 286
-#define __NR_mkdirat 287
-#define __NR_mknodat 288
-#define __NR_fchownat 289
-#define __NR_futimesat 290
-#ifdef __powerpc64__
-#define __NR_newfstatat 291
+#include <asm/unistd_32.h>
#else
-#define __NR_fstatat64 291
+#include <asm/unistd_64.h>
#endif
-#define __NR_unlinkat 292
-#define __NR_renameat 293
-#define __NR_linkat 294
-#define __NR_symlinkat 295
-#define __NR_readlinkat 296
-#define __NR_fchmodat 297
-#define __NR_faccessat 298
-#define __NR_get_robust_list 299
-#define __NR_set_robust_list 300
-#define __NR_move_pages 301
-#define __NR_getcpu 302
-#define __NR_epoll_pwait 303
-#define __NR_utimensat 304
-#define __NR_signalfd 305
-#define __NR_timerfd_create 306
-#define __NR_eventfd 307
-#define __NR_sync_file_range2 308
-#define __NR_fallocate 309
-#define __NR_subpage_prot 310
-#define __NR_timerfd_settime 311
-#define __NR_timerfd_gettime 312
-#define __NR_signalfd4 313
-#define __NR_eventfd2 314
-#define __NR_epoll_create1 315
-#define __NR_dup3 316
-#define __NR_pipe2 317
-#define __NR_inotify_init1 318
-#define __NR_perf_event_open 319
-#define __NR_preadv 320
-#define __NR_pwritev 321
-#define __NR_rt_tgsigqueueinfo 322
-#define __NR_fanotify_init 323
-#define __NR_fanotify_mark 324
-#define __NR_prlimit64 325
-#define __NR_socket 326
-#define __NR_bind 327
-#define __NR_connect 328
-#define __NR_listen 329
-#define __NR_accept 330
-#define __NR_getsockname 331
-#define __NR_getpeername 332
-#define __NR_socketpair 333
-#define __NR_send 334
-#define __NR_sendto 335
-#define __NR_recv 336
-#define __NR_recvfrom 337
-#define __NR_shutdown 338
-#define __NR_setsockopt 339
-#define __NR_getsockopt 340
-#define __NR_sendmsg 341
-#define __NR_recvmsg 342
-#define __NR_recvmmsg 343
-#define __NR_accept4 344
-#define __NR_name_to_handle_at 345
-#define __NR_open_by_handle_at 346
-#define __NR_clock_adjtime 347
-#define __NR_syncfs 348
-#define __NR_sendmmsg 349
-#define __NR_setns 350
-#define __NR_process_vm_readv 351
-#define __NR_process_vm_writev 352
-#define __NR_finit_module 353
-#define __NR_kcmp 354
-#define __NR_sched_setattr 355
-#define __NR_sched_getattr 356
-#define __NR_renameat2 357
-#define __NR_seccomp 358
-#define __NR_getrandom 359
-#define __NR_memfd_create 360
-#define __NR_bpf 361
-#define __NR_execveat 362
-#define __NR_switch_endian 363
-#define __NR_userfaultfd 364
-#define __NR_membarrier 365
-#define __NR_mlock2 378
-#define __NR_copy_file_range 379
-#define __NR_preadv2 380
-#define __NR_pwritev2 381
-#define __NR_kexec_file_load 382
-#define __NR_statx 383
-#define __NR_pkey_alloc 384
-#define __NR_pkey_free 385
-#define __NR_pkey_mprotect 386
-#define __NR_rseq 387
-#define __NR_io_pgetevents 388
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 53d4b8d5b54d..cb7f0bb9ee71 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -69,7 +69,7 @@ obj-$(CONFIG_FA_DUMP) += fadump.o
ifdef CONFIG_PPC32
obj-$(CONFIG_E500) += idle_e500.o
endif
-obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
+obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
ifdef CONFIG_FSL_BOOKE
@@ -160,16 +160,6 @@ extra-$(CONFIG_ALTIVEC) += vector.o
extra-$(CONFIG_PPC64) += entry_64.o
extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
-extra-y += systbl_chk.i
-$(obj)/systbl.o: systbl_chk
-
-quiet_cmd_systbl_chk = CALL $<
- cmd_systbl_chk = $(CONFIG_SHELL) $< $(obj)/systbl_chk.i
-
-PHONY += systbl_chk
-systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
- $(call cmd,systbl_chk)
-
ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
$(obj)/built-in.a: prom_init_check
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index b4241ed1456e..6dfceaa820e4 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -232,20 +232,12 @@ static int btext_initialize(struct device_node *np)
int __init btext_find_display(int allow_nonstdout)
{
- const char *name;
- struct device_node *np = NULL;
+ struct device_node *np = of_stdout;
int rc = -ENODEV;
- name = of_get_property(of_chosen, "linux,stdout-path", NULL);
- if (name != NULL) {
- np = of_find_node_by_path(name);
- if (np != NULL) {
- if (strcmp(np->type, "display") != 0) {
- printk("boot stdout isn't a display !\n");
- of_node_put(np);
- np = NULL;
- }
- }
+ if (!of_node_is_type(np, "display")) {
+ printk("boot stdout isn't a display !\n");
+ np = NULL;
}
if (np)
rc = btext_initialize(np);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index be57bd07596d..53102764fd2f 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -428,7 +428,7 @@ static void link_cache_lists(struct cache *smaller, struct cache *bigger)
static void do_subsidiary_caches_debugcheck(struct cache *cache)
{
WARN_ON_ONCE(cache->level != 1);
- WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
+ WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu"));
}
static void do_subsidiary_caches(struct cache *cache)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index fa3c2c91290c..8c069e96c478 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -326,7 +326,7 @@ _GLOBAL(__save_cpu_setup)
lis r5,cpu_state_storage@h
ori r5,r5,cpu_state_storage@l
- /* Save HID0 (common to all CONFIG_6xx cpus) */
+ /* Save HID0 (common to all CONFIG_PPC_BOOK3S_32 cpus) */
mfspr r3,SPRN_HID0
stw r3,CS_HID0(r5)
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 8d142e5d84cd..5fbc890d1094 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -17,7 +17,7 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
-#include <asm/mmu-book3e.h>
+#include <asm/nohash/mmu-book3e.h>
#include <asm/asm-offsets.h>
#include <asm/mpc85xx.h>
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2da01340c84c..1eab54bc6ee9 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1141,6 +1141,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_generic,
.platform = "ppc603",
},
+#ifdef CONFIG_PPC_83xx
{ /* e300c1 (a 603e core, plus some) on 83xx */
.pvr_mask = 0x7fff0000,
.pvr_value = 0x00830000,
@@ -1151,7 +1152,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.platform = "ppc603",
},
{ /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
@@ -1165,7 +1166,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.platform = "ppc603",
},
{ /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
@@ -1179,7 +1180,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
@@ -1196,12 +1197,13 @@ static struct cpu_spec __initdata cpu_specs[] = {
.icache_bsize = 32,
.dcache_bsize = 32,
.cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
+ .machine_check = machine_check_83xx,
.num_pmcs = 4,
.oprofile_cpu_type = "ppc/e300",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.platform = "ppc603",
},
+#endif
{ /* default match, we assume split I/D cache & TB (non-601)... */
.pvr_mask = 0x00000000,
.pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 5ebacf0fe41a..9c9bcaae2f75 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -6,7 +6,6 @@
* busses using the iommu infrastructure
*/
-#include <linux/export.h>
#include <asm/iommu.h>
/*
@@ -117,4 +116,3 @@ struct dma_map_ops dma_iommu_ops = {
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
};
-EXPORT_SYMBOL(dma_iommu_ops);
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 430a7d0aa2cb..7d5fc9751622 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -107,12 +107,8 @@ int __init swiotlb_setup_bus_notifier(void)
void __init swiotlb_detect_4g(void)
{
- if ((memblock_end_of_DRAM() - 1) > 0xffffffff) {
+ if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
ppc_swiotlb_enable = 1;
-#ifdef CONFIG_ZONE_DMA32
- limit_zone_pfn(ZONE_DMA32, (1ULL << 32) >> PAGE_SHIFT);
-#endif
- }
}
static int __init check_swiotlb_enabled(void)
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index dbfc7056d7df..b1903ebb2e9c 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -50,7 +50,8 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask)
return 1;
#ifdef CONFIG_FSL_SOC
- /* Freescale gets another chance via ZONE_DMA/ZONE_DMA32, however
+ /*
+ * Freescale gets another chance via ZONE_DMA, however
* that will have to be refined if/when they support iommus
*/
return 1;
@@ -62,18 +63,12 @@ static int dma_nommu_dma_supported(struct device *dev, u64 mask)
#endif
}
+#ifndef CONFIG_NOT_COHERENT_CACHE
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
void *ret;
-#ifdef CONFIG_NOT_COHERENT_CACHE
- ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
- if (ret == NULL)
- return NULL;
- *dma_handle += get_dma_offset(dev);
- return ret;
-#else
struct page *page;
int node = dev_to_node(dev);
#ifdef CONFIG_FSL_SOC
@@ -94,13 +89,10 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
}
switch (zone) {
+#ifdef CONFIG_ZONE_DMA
case ZONE_DMA:
flag |= GFP_DMA;
break;
-#ifdef CONFIG_ZONE_DMA32
- case ZONE_DMA32:
- flag |= GFP_DMA32;
- break;
#endif
};
#endif /* CONFIG_FSL_SOC */
@@ -113,19 +105,15 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
*dma_handle = __pa(ret) + get_dma_offset(dev);
return ret;
-#endif
}
void __dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
-#ifdef CONFIG_NOT_COHERENT_CACHE
- __dma_free_coherent(size, vaddr);
-#else
free_pages((unsigned long)vaddr, get_order(size));
-#endif
}
+#endif /* !CONFIG_NOT_COHERENT_CACHE */
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
@@ -210,10 +198,15 @@ static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
return nents;
}
-static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
unsigned long attrs)
{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
}
static u64 dma_nommu_get_required_mask(struct device *dev)
@@ -247,6 +240,8 @@ static inline void dma_nommu_unmap_page(struct device *dev,
enum dma_data_direction direction,
unsigned long attrs)
{
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ __dma_sync(bus_to_virt(dma_address), size, direction);
}
#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 6cae6b56ffd6..3230137469ab 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1808,10 +1808,10 @@ static int eeh_freeze_dbgfs_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
- eeh_enable_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get,
- eeh_freeze_dbgfs_set, "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(eeh_enable_dbgfs_ops, eeh_enable_dbgfs_get,
+ eeh_enable_dbgfs_set, "0x%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(eeh_freeze_dbgfs_ops, eeh_freeze_dbgfs_get,
+ eeh_freeze_dbgfs_set, "0x%llx\n");
#endif
static int __init eeh_init_proc(void)
@@ -1819,12 +1819,12 @@ static int __init eeh_init_proc(void)
if (machine_is(pseries) || machine_is(powernv)) {
proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
#ifdef CONFIG_DEBUG_FS
- debugfs_create_file("eeh_enable", 0600,
- powerpc_debugfs_root, NULL,
- &eeh_enable_dbgfs_ops);
- debugfs_create_file("eeh_max_freezes", 0600,
- powerpc_debugfs_root, NULL,
- &eeh_freeze_dbgfs_ops);
+ debugfs_create_file_unsafe("eeh_enable", 0600,
+ powerpc_debugfs_root, NULL,
+ &eeh_enable_dbgfs_ops);
+ debugfs_create_file_unsafe("eeh_max_freezes", 0600,
+ powerpc_debugfs_root, NULL,
+ &eeh_freeze_dbgfs_ops);
#endif
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 9446248eb6b8..99eab7bc7edc 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -60,7 +60,7 @@ static int eeh_result_priority(enum pci_ers_result result)
}
};
-const char *pci_ers_result_name(enum pci_ers_result result)
+static const char *pci_ers_result_name(enum pci_ers_result result)
{
switch (result) {
case PCI_ERS_RESULT_NONE:
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index 61c9356bf9c9..227e57f980df 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -35,7 +35,7 @@
*/
static DEFINE_SPINLOCK(eeh_eventlist_lock);
-static struct semaphore eeh_eventlist_sem;
+static DECLARE_COMPLETION(eeh_eventlist_event);
static LIST_HEAD(eeh_eventlist);
/**
@@ -55,7 +55,7 @@ static int eeh_event_handler(void * dummy)
struct eeh_pe *pe;
while (!kthread_should_stop()) {
- if (down_interruptible(&eeh_eventlist_sem))
+ if (wait_for_completion_interruptible(&eeh_eventlist_event))
break;
/* Fetch EEH event from the queue */
@@ -102,9 +102,6 @@ int eeh_event_init(void)
struct task_struct *t;
int ret = 0;
- /* Initialize semaphore */
- sema_init(&eeh_eventlist_sem, 0);
-
t = kthread_run(eeh_event_handler, NULL, "eehd");
if (IS_ERR(t)) {
ret = PTR_ERR(t);
@@ -142,7 +139,7 @@ int eeh_send_failure_event(struct eeh_pe *pe)
spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
/* For EEH deamon to knick in */
- up(&eeh_eventlist_sem);
+ complete(&eeh_eventlist_event);
return 0;
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 77decded1175..0768dfd8a64e 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -200,14 +200,14 @@ transfer_to_handler:
cmplw r1,r9 /* if r1 <= ksp_limit */
ble- stack_ovf /* then the kernel stack overflowed */
5:
-#if defined(CONFIG_6xx) || defined(CONFIG_E500)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
CURRENT_THREAD_INFO(r9, r1)
tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12
bt- 31-TLF_NAPPING,4f
bt- 31-TLF_SLEEPING,7f
-#endif /* CONFIG_6xx || CONFIG_E500 */
+#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
.globl transfer_to_handler_cont
transfer_to_handler_cont:
3:
@@ -273,7 +273,7 @@ reenable_mmu: /* re-enable mmu so we can */
RFI /* jump to handler, enable MMU */
#endif /* CONFIG_TRACE_IRQFLAGS */
-#if defined (CONFIG_6xx) || defined(CONFIG_E500)
+#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
stw r12,TI_LOCAL_FLAGS(r9)
b power_save_ppc32_restore
@@ -612,7 +612,7 @@ ppc_swapcontext:
handle_page_fault:
stw r4,_DAR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
andis. r0,r5,DSISR_DABRMATCH@h
bne- handle_dabr_fault
#endif
@@ -629,7 +629,7 @@ handle_page_fault:
bl bad_page_fault
b ret_from_except_full
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
/* We have a data breakpoint exception - handle it */
handle_dabr_fault:
SAVE_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 7b1693adff2a..435927f549c4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -54,6 +54,9 @@
SYS_CALL_TABLE:
.tc sys_call_table[TC],sys_call_table
+COMPAT_SYS_CALL_TABLE:
+ .tc compat_sys_call_table[TC],compat_sys_call_table
+
/* This value is used to mark exception frames on the stack. */
exception_marker:
.tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
@@ -80,6 +83,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
std r0,GPR0(r1)
std r10,GPR1(r1)
beq 2f /* if from kernel mode */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
2: std r2,GPR2(r1)
std r3,GPR3(r1)
@@ -173,7 +181,7 @@ system_call: /* label this so stack traces look sane */
ld r11,SYS_CALL_TABLE@toc(2)
andis. r10,r10,_TIF_32BIT@h
beq 15f
- addi r11,r11,8 /* use 32-bit syscall entries */
+ ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
clrldi r3,r3,32
clrldi r4,r4,32
clrldi r5,r5,32
@@ -181,7 +189,7 @@ system_call: /* label this so stack traces look sane */
clrldi r7,r7,32
clrldi r8,r8,32
15:
- slwi r0,r0,4
+ slwi r0,r0,3
barrier_nospec_asm
/*
@@ -286,6 +294,10 @@ BEGIN_FTR_SECTION
HMT_MEDIUM_LOW
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ std r8, PACATMSCRATCH(r13)
+#endif
+
ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
ld r2,GPR2(r1)
ld r1,GPR1(r1)
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 6d6e144a28ce..afb638778f44 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -296,7 +296,8 @@ ret_from_mc_except:
andi. r10,r11,MSR_PR; /* save stack pointer */ \
beq 1f; /* branch around if supervisor */ \
ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
-1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
+1: type##_BTB_FLUSH \
+ cmpdi cr1,r1,0; /* check if SP makes sense */ \
bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
@@ -328,6 +329,29 @@ ret_from_mc_except:
#define SPRN_MC_SRR0 SPRN_MCSRR0
#define SPRN_MC_SRR1 SPRN_MCSRR1
+#ifdef CONFIG_PPC_FSL_BOOK3E
+#define GEN_BTB_FLUSH \
+ START_BTB_FLUSH_SECTION \
+ beq 1f; \
+ BTB_FLUSH(r10) \
+ 1: \
+ END_BTB_FLUSH_SECTION
+
+#define CRIT_BTB_FLUSH \
+ START_BTB_FLUSH_SECTION \
+ BTB_FLUSH(r10) \
+ END_BTB_FLUSH_SECTION
+
+#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
+#define MC_BTB_FLUSH CRIT_BTB_FLUSH
+#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
+#else
+#define GEN_BTB_FLUSH
+#define CRIT_BTB_FLUSH
+#define DBG_BTB_FLUSH
+#define GDBELL_BTB_FLUSH
+#endif
+
#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 89d32bb79d5e..9e253ce27e08 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -995,7 +995,16 @@ EXC_COMMON_BEGIN(h_data_storage_common)
bl save_nvgprs
RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_MMU_FTR_SECTION
+ ld r4,PACA_EXGEN+EX_DAR(r13)
+ lwz r5,PACA_EXGEN+EX_DSISR(r13)
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ li r5,SIGSEGV
+ bl bad_page_fault
+MMU_FTR_SECTION_ELSE
bl unknown_exception
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
b ret_from_except
@@ -1031,7 +1040,7 @@ TRAMP_REAL_BEGIN(hmi_exception_early)
EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
EXCEPTION_PROLOG_COMMON_3(0xe60)
addi r3,r1,STACK_FRAME_OVERHEAD
- BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */
+ BRANCH_LINK_TO_FAR(DOTSYM(hmi_exception_realmode)) /* Function call ABI */
cmpdi cr0,r3,0
/* Windup the stack. */
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 761b28b1427d..45a8d0be1c96 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -35,6 +35,7 @@
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
+#include <linux/cma.h>
#include <asm/debugfs.h>
#include <asm/page.h>
@@ -46,6 +47,9 @@
static struct fw_dump fw_dump;
static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
+#ifdef CONFIG_CMA
+static struct cma *fadump_cma;
+#endif
static DEFINE_MUTEX(fadump_mutex);
struct fad_crash_memory_ranges *crash_memory_ranges;
@@ -53,6 +57,67 @@ int crash_memory_ranges_size;
int crash_mem_ranges;
int max_crash_mem_ranges;
+#ifdef CONFIG_CMA
+/*
+ * fadump_cma_init() - Initialize CMA area from a fadump reserved memory
+ *
+ * This function initializes CMA area from fadump reserved memory.
+ * The total size of fadump reserved memory covers for boot memory size
+ * + cpu data size + hpte size and metadata.
+ * Initialize only the area equivalent to boot memory size for CMA use.
+ * The reamining portion of fadump reserved memory will be not given
+ * to CMA and pages for thoes will stay reserved. boot memory size is
+ * aligned per CMA requirement to satisy cma_init_reserved_mem() call.
+ * But for some reason even if it fails we still have the memory reservation
+ * with us and we can still continue doing fadump.
+ */
+int __init fadump_cma_init(void)
+{
+ unsigned long long base, size;
+ int rc;
+
+ if (!fw_dump.fadump_enabled)
+ return 0;
+
+ /*
+ * Do not use CMA if user has provided fadump=nocma kernel parameter.
+ * Return 1 to continue with fadump old behaviour.
+ */
+ if (fw_dump.nocma)
+ return 1;
+
+ base = fw_dump.reserve_dump_area_start;
+ size = fw_dump.boot_memory_size;
+
+ if (!size)
+ return 0;
+
+ rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma);
+ if (rc) {
+ pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc);
+ /*
+ * Though the CMA init has failed we still have memory
+ * reservation with us. The reserved memory will be
+ * blocked from production system usage. Hence return 1,
+ * so that we can continue with fadump.
+ */
+ return 1;
+ }
+
+ /*
+ * So we now have successfully initialized cma area for fadump.
+ */
+ pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
+ "bytes of memory reserved for firmware-assisted dump\n",
+ cma_get_size(fadump_cma),
+ (unsigned long)cma_get_base(fadump_cma) >> 20,
+ fw_dump.reserve_dump_area_size);
+ return 1;
+}
+#else
+static int __init fadump_cma_init(void) { return 1; }
+#endif /* CONFIG_CMA */
+
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
const char *uname, int depth, void *data)
@@ -118,13 +183,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
/*
* If fadump is registered, check if the memory provided
- * falls within boot memory area.
+ * falls within boot memory area and reserved memory area.
*/
-int is_fadump_boot_memory_area(u64 addr, ulong size)
+int is_fadump_memory_area(u64 addr, ulong size)
{
+ u64 d_start = fw_dump.reserve_dump_area_start;
+ u64 d_end = d_start + fw_dump.reserve_dump_area_size;
+
if (!fw_dump.dump_registered)
return 0;
+ if (((addr + size) > d_start) && (addr <= d_end))
+ return 1;
+
return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
}
@@ -172,6 +243,35 @@ static int is_boot_memory_area_contiguous(void)
return ret;
}
+/*
+ * Returns true, if there are no holes in reserved memory area,
+ * false otherwise.
+ */
+static bool is_reserved_memory_area_contiguous(void)
+{
+ struct memblock_region *reg;
+ unsigned long start, end;
+ unsigned long d_start = fw_dump.reserve_dump_area_start;
+ unsigned long d_end = d_start + fw_dump.reserve_dump_area_size;
+
+ for_each_memblock(memory, reg) {
+ start = max(d_start, (unsigned long)reg->base);
+ end = min(d_end, (unsigned long)(reg->base + reg->size));
+ if (d_start < end) {
+ /* Memory hole from d_start to start */
+ if (start > d_start)
+ break;
+
+ if (end == d_end)
+ return true;
+
+ d_start = end + 1;
+ }
+ }
+
+ return false;
+}
+
/* Print firmware assisted dump configurations for debugging purpose. */
static void fadump_show_config(void)
{
@@ -378,8 +478,15 @@ int __init fadump_reserve_mem(void)
*/
if (fdm_active)
fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len);
- else
+ else {
fw_dump.boot_memory_size = fadump_calculate_reserve_size();
+#ifdef CONFIG_CMA
+ if (!fw_dump.nocma)
+ fw_dump.boot_memory_size =
+ ALIGN(fw_dump.boot_memory_size,
+ FADUMP_CMA_ALIGNMENT);
+#endif
+ }
/*
* Calculate the memory boundary.
@@ -426,8 +533,9 @@ int __init fadump_reserve_mem(void)
fw_dump.fadumphdr_addr =
be64_to_cpu(fdm_active->rmr_region.destination_address) +
be64_to_cpu(fdm_active->rmr_region.source_len);
- pr_debug("fadumphdr_addr = %p\n",
- (void *) fw_dump.fadumphdr_addr);
+ pr_debug("fadumphdr_addr = %pa\n", &fw_dump.fadumphdr_addr);
+ fw_dump.reserve_dump_area_start = base;
+ fw_dump.reserve_dump_area_size = size;
} else {
size = get_fadump_area_size();
@@ -455,10 +563,11 @@ int __init fadump_reserve_mem(void)
(unsigned long)(size >> 20),
(unsigned long)(base >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
- }
- fw_dump.reserve_dump_area_start = base;
- fw_dump.reserve_dump_area_size = size;
+ fw_dump.reserve_dump_area_start = base;
+ fw_dump.reserve_dump_area_size = size;
+ return fadump_cma_init();
+ }
return 1;
}
@@ -477,6 +586,10 @@ static int __init early_fadump_param(char *p)
fw_dump.fadump_enabled = 1;
else if (strncmp(p, "off", 3) == 0)
fw_dump.fadump_enabled = 0;
+ else if (strncmp(p, "nocma", 5) == 0) {
+ fw_dump.fadump_enabled = 1;
+ fw_dump.nocma = 1;
+ }
return 0;
}
@@ -525,8 +638,10 @@ static int register_fw_dump(struct fadump_mem_struct *fdm)
break;
case -3:
if (!is_boot_memory_area_contiguous())
- pr_err("Can't have holes in boot memory area while "
- "registering fadump\n");
+ pr_err("Can't have holes in boot memory area while registering fadump\n");
+ else if (!is_reserved_memory_area_contiguous())
+ pr_err("Can't have holes in reserved memory area while"
+ " registering fadump\n");
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Parameter Error(%d).\n", rc);
@@ -1229,7 +1344,7 @@ static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
return 0;
}
-static int fadump_invalidate_dump(struct fadump_mem_struct *fdm)
+static int fadump_invalidate_dump(const struct fadump_mem_struct *fdm)
{
int rc = 0;
unsigned int wait_time;
@@ -1260,9 +1375,8 @@ void fadump_cleanup(void)
{
/* Invalidate the registration only if dump is active. */
if (fw_dump.dump_active) {
- init_fadump_mem_struct(&fdm,
- be64_to_cpu(fdm_active->cpu_state_data.destination_address));
- fadump_invalidate_dump(&fdm);
+ /* pass the same memory dump structure provided by platform */
+ fadump_invalidate_dump(fdm_active);
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
@@ -1531,17 +1645,7 @@ static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
0644, fadump_register_show,
fadump_register_store);
-static int fadump_region_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fadump_region_show, inode->i_private);
-}
-
-static const struct file_operations fadump_region_fops = {
- .open = fadump_region_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fadump_region);
static void fadump_init_files(void)
{
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 61ca27929355..05b08db3901d 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -176,10 +176,10 @@ __after_mmu_off:
bl reloc_offset
li r24,0 /* cpu# */
bl call_setup_cpu /* Call setup_cpu for this CPU */
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
bl reloc_offset
bl init_idle_6xx
-#endif /* CONFIG_6xx */
+#endif /* CONFIG_PPC_BOOK3S_32 */
/*
@@ -393,7 +393,9 @@ DataAccess:
bne 1f /* if not, try to put a PTE */
mfspr r4,SPRN_DAR /* into the hash table */
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
+BEGIN_MMU_FTR_SECTION
bl hash_page
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
1: lwz r5,_DSISR(r11) /* get DSISR value */
mfspr r4,SPRN_DAR
EXC_XFER_LITE(0x300, handle_page_fault)
@@ -408,7 +410,9 @@ InstructionAccess:
beq 1f /* if so, try to put a PTE */
li r3,0 /* into the hash table */
mr r4,r12 /* SRR0 is fault address */
+BEGIN_MMU_FTR_SECTION
bl hash_page
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
1: mr r4,r12
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
EXC_XFER_LITE(0x400, handle_page_fault)
@@ -499,7 +503,7 @@ InstructionTLBMiss:
lis r1,PAGE_OFFSET@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2,SPRN_SPRG_THREAD
- li r1,_PAGE_USER|_PAGE_PRESENT /* low addresses tested as user */
+ li r1,_PAGE_USER|_PAGE_PRESENT|_PAGE_EXEC /* low addresses tested as user */
lwz r2,PGDIR(r2)
bge- 112f
mfspr r2,SPRN_SRR1 /* and MSR_PR bit from SRR1 */
@@ -836,10 +840,10 @@ __secondary_start:
lis r3,-KERNELBASE@h
mr r4,r24
bl call_setup_cpu /* Call setup_cpu for this CPU */
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
lis r3,-KERNELBASE@h
bl init_idle_6xx
-#endif /* CONFIG_6xx */
+#endif /* CONFIG_PPC_BOOK3S_32 */
/* get current_thread_info and current */
lis r1,secondary_ti@ha
@@ -880,14 +884,14 @@ __secondary_start:
/*
* Those generic dummy functions are kept for CPUs not
- * included in CONFIG_6xx
+ * included in CONFIG_PPC_BOOK3S_32
*/
-#if !defined(CONFIG_6xx)
+#if !defined(CONFIG_PPC_BOOK3S_32)
_ENTRY(__save_cpu_setup)
blr
_ENTRY(__restore_cpu_setup)
blr
-#endif /* !defined(CONFIG_6xx) */
+#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
/*
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 37e4a7cf0065..bf23c19c92d6 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -40,6 +40,7 @@
#include <asm/ptrace.h>
#include <asm/synch.h>
#include <asm/export.h>
+#include <asm/code-patching-asm.h>
#include "head_booke.h"
@@ -382,10 +383,9 @@ interrupt_base:
/* Increment, rollover, and store TLB index */
addi r13,r13,1
+ patch_site 0f, patch__tlb_44x_hwater_D
/* Compare with watermark (instruction gets patched) */
- .globl tlb_44x_patch_hwater_D
-tlb_44x_patch_hwater_D:
- cmpwi 0,r13,1 /* reserve entries */
+0: cmpwi 0,r13,1 /* reserve entries */
ble 5f
li r13,0
5:
@@ -478,10 +478,9 @@ tlb_44x_patch_hwater_D:
/* Increment, rollover, and store TLB index */
addi r13,r13,1
+ patch_site 0f, patch__tlb_44x_hwater_I
/* Compare with watermark (instruction gets patched) */
- .globl tlb_44x_patch_hwater_I
-tlb_44x_patch_hwater_I:
- cmpwi 0,r13,1 /* reserve entries */
+0: cmpwi 0,r13,1 /* reserve entries */
ble 5f
li r13,0
5:
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3b67b9533c82..57deb1e9ffea 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -106,6 +106,23 @@ turn_on_mmu:
mtspr SPRN_SRR0,r0
rfi /* enables MMU */
+
+#ifdef CONFIG_PERF_EVENTS
+ .align 4
+
+ .globl itlb_miss_counter
+itlb_miss_counter:
+ .space 4
+
+ .globl dtlb_miss_counter
+dtlb_miss_counter:
+ .space 4
+
+ .globl instruction_counter
+instruction_counter:
+ .space 4
+#endif
+
/*
* Exception entry code. This code runs with address translation
* turned off, i.e. using physical addresses.
@@ -149,6 +166,9 @@ turn_on_mmu:
li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR); /* can take exceptions */ \
mtmsr r10; \
stw r0,GPR0(r11); \
+ lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */ \
+ addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
+ stw r10, 8(r11); \
SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11)
@@ -275,7 +295,7 @@ SystemCall:
. = 0x1100
/*
* For the MPC8xx, this is a software tablewalk to load the instruction
- * TLB. The task switch loads the M_TW register with the pointer to the first
+ * TLB. The task switch loads the M_TWB register with the pointer to the first
* level table.
* If we discover there is no second level table (value is zero) or if there
* is an invalid pte, we load that into the TLB, which causes another fault
@@ -285,186 +305,154 @@ SystemCall:
*/
#ifdef CONFIG_8xx_CPU15
-#define INVALIDATE_ADJACENT_PAGES_CPU15(tmp, addr) \
- addi tmp, addr, PAGE_SIZE; \
- tlbie tmp; \
- addi tmp, addr, -PAGE_SIZE; \
- tlbie tmp
+#define INVALIDATE_ADJACENT_PAGES_CPU15(addr) \
+ addi addr, addr, PAGE_SIZE; \
+ tlbie addr; \
+ addi addr, addr, -(PAGE_SIZE << 1); \
+ tlbie addr; \
+ addi addr, addr, PAGE_SIZE
#else
-#define INVALIDATE_ADJACENT_PAGES_CPU15(tmp, addr)
+#define INVALIDATE_ADJACENT_PAGES_CPU15(addr)
#endif
InstructionTLBMiss:
mtspr SPRN_SPRG_SCRATCH0, r10
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
mtspr SPRN_SPRG_SCRATCH1, r11
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
- mtspr SPRN_SPRG_SCRATCH2, r12
#endif
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
- INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10)
+ INVALIDATE_ADJACENT_PAGES_CPU15(r10)
+ mtspr SPRN_MD_EPN, r10
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
- mfcr r12
-#endif
#ifdef ITLB_MISS_KERNEL
+ mfcr r11
#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
- andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
+ cmpi cr0, r10, 0 /* Address >= 0x80000000 */
#else
- rlwinm r11, r10, 16, 0xfff8
- cmpli cr0, r11, PAGE_OFFSET@h
+ rlwinm r10, r10, 16, 0xfff8
+ cmpli cr0, r10, PAGE_OFFSET@h
#ifndef CONFIG_PIN_TLB_TEXT
/* It is assumed that kernel code fits into the first 8M page */
-0: cmpli cr7, r11, (PAGE_OFFSET + 0x0800000)@h
+0: cmpli cr7, r10, (PAGE_OFFSET + 0x0800000)@h
patch_site 0b, patch__itlbmiss_linmem_top
#endif
#endif
#endif
- mfspr r11, SPRN_M_TW /* Get level 1 table */
+ mfspr r10, SPRN_M_TWB /* Get level 1 table */
#ifdef ITLB_MISS_KERNEL
#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
- beq+ 3f
+ bge+ 3f
#else
blt+ 3f
#endif
#ifndef CONFIG_PIN_TLB_TEXT
blt cr7, ITLBMissLinear
#endif
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ rlwinm r10, r10, 0, 20, 31
+ oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
#endif
- /* Insert level 1 index */
- rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
- lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+ lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
+ mtspr SPRN_MI_TWC, r10 /* Set segment attributes */
- /* Extract level 2 index */
- rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
-#ifdef CONFIG_HUGETLB_PAGE
- mtcr r11
- bt- 28, 10f /* bit 28 = Large page (8M) */
- bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
-#endif
- rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
+ mtspr SPRN_MD_TWC, r10
+ mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
-4:
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
- mtcr r12
+#ifdef ITLB_MISS_KERNEL
+ mtcr r11
#endif
- /* Load the MI_TWC with the attributes for this "segment." */
- mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
-
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
- li r11, RPN_PATTERN | 0x200
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20 and 23 must be clear.
* Software indicator bits 22, 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
- rlwimi r11, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
- rlwimi r10, r11, 0, 0x0ff0 /* Set 22, 24-27, clear 20,23 */
+ rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
+ rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
+ ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
/* Restore registers */
0: mfspr r10, SPRN_SPRG_SCRATCH0
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
mfspr r11, SPRN_SPRG_SCRATCH1
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
- mfspr r12, SPRN_SPRG_SCRATCH2
#endif
rfi
patch_site 0b, patch__itlbmiss_exit_1
#ifdef CONFIG_PERF_EVENTS
patch_site 0f, patch__itlbmiss_perf
-0: lis r10, (itlb_miss_counter - PAGE_OFFSET)@ha
- lwz r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
- addi r11, r11, 1
- stw r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
-#endif
+0: lwz r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
+ addi r10, r10, 1
+ stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
mfspr r10, SPRN_SPRG_SCRATCH0
+#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
mfspr r11, SPRN_SPRG_SCRATCH1
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
- mfspr r12, SPRN_SPRG_SCRATCH2
#endif
rfi
-
-#ifdef CONFIG_HUGETLB_PAGE
-10: /* 8M pages */
-#ifdef CONFIG_PPC_16K_PAGES
- /* Extract level 2 index */
- rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
- /* Add level 2 base */
- rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
-#else
- /* Level 2 base */
- rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif
- lwz r10, 0(r10) /* Get the pte */
- b 4b
-20: /* 512k pages */
- /* Extract level 2 index */
- rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
- /* Add level 2 base */
- rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
- lwz r10, 0(r10) /* Get the pte */
- b 4b
+#ifndef CONFIG_PIN_TLB_TEXT
+ITLBMissLinear:
+ mtcr r11
+ /* Set 8M byte page and mark it valid */
+ li r11, MI_PS8MEG | MI_SVALID
+ mtspr SPRN_MI_TWC, r11
+ rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
+ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
+ _PAGE_PRESENT
+ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
+
+0: mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH1
+ rfi
+ patch_site 0b, patch__itlbmiss_exit_2
#endif
. = 0x1200
DataStoreTLBMiss:
mtspr SPRN_SPRG_SCRATCH0, r10
mtspr SPRN_SPRG_SCRATCH1, r11
- mtspr SPRN_SPRG_SCRATCH2, r12
- mfcr r12
+ mfcr r11
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
mfspr r10, SPRN_MD_EPN
- rlwinm r11, r10, 16, 0xfff8
- cmpli cr0, r11, PAGE_OFFSET@h
- mfspr r11, SPRN_M_TW /* Get level 1 table */
- blt+ 3f
- rlwinm r11, r10, 16, 0xfff8
+ rlwinm r10, r10, 16, 0xfff8
+ cmpli cr0, r10, PAGE_OFFSET@h
#ifndef CONFIG_PIN_TLB_IMMR
- cmpli cr0, r11, VIRT_IMMR_BASE@h
+ cmpli cr6, r10, VIRT_IMMR_BASE@h
#endif
-0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+0: cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h
patch_site 0b, patch__dtlbmiss_linmem_top
+
+ mfspr r10, SPRN_M_TWB /* Get level 1 table */
+ blt+ 3f
#ifndef CONFIG_PIN_TLB_IMMR
-0: beq- DTLBMissIMMR
+0: beq- cr6, DTLBMissIMMR
patch_site 0b, patch__dtlbmiss_immr_jmp
#endif
blt cr7, DTLBMissLinear
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
+ rlwinm r10, r10, 0, 20, 31
+ oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
-
- /* Insert level 1 index */
- rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
- lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
-
- /* We have a pte table, so load fetch the pte from the table.
- */
- /* Extract level 2 index */
- rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
-#ifdef CONFIG_HUGETLB_PAGE
mtcr r11
- bt- 28, 10f /* bit 28 = Large page (8M) */
- bt- 29, 20f /* bit 29 = Large page (8M or 512k) */
-#endif
- rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
+ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
+
+ mtspr SPRN_MD_TWC, r11
+ mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
-4:
- mtcr r12
/* Insert the Guarded flag into the TWC from the Linux PTE.
* It is bit 27 of both the Linux PTE and the TWC (at least
@@ -503,44 +491,55 @@ DataStoreTLBMiss:
0: mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r12, SPRN_SPRG_SCRATCH2
rfi
patch_site 0b, patch__dtlbmiss_exit_1
#ifdef CONFIG_PERF_EVENTS
patch_site 0f, patch__dtlbmiss_perf
-0: lis r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
- lwz r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
- addi r11, r11, 1
- stw r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
-#endif
+0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ addi r10, r10, 1
+ stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r12, SPRN_SPRG_SCRATCH2
rfi
-
-#ifdef CONFIG_HUGETLB_PAGE
-10: /* 8M pages */
- /* Extract level 2 index */
-#ifdef CONFIG_PPC_16K_PAGES
- rlwinm r10, r10, 32 - (PAGE_SHIFT_8M - PAGE_SHIFT), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
- /* Add level 2 base */
- rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
-#else
- /* Level 2 base */
- rlwinm r10, r11, 0, ~HUGEPD_SHIFT_MASK
#endif
- lwz r10, 0(r10) /* Get the pte */
- b 4b
-20: /* 512k pages */
- /* Extract level 2 index */
- rlwinm r10, r10, 32 - (PAGE_SHIFT_512K - PAGE_SHIFT), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
- /* Add level 2 base */
- rlwimi r10, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
- lwz r10, 0(r10) /* Get the pte */
- b 4b
-#endif
+DTLBMissIMMR:
+ mtcr r11
+ /* Set 512k byte guarded page and mark it valid */
+ li r10, MD_PS512K | MD_GUARDED | MD_SVALID
+ mtspr SPRN_MD_TWC, r10
+ mfspr r10, SPRN_IMMR /* Get current IMMR */
+ rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
+ _PAGE_PRESENT | _PAGE_NO_CACHE
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+
+0: mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH1
+ rfi
+ patch_site 0b, patch__dtlbmiss_exit_2
+
+DTLBMissLinear:
+ mtcr r11
+ /* Set 8M byte page and mark it valid */
+ li r11, MD_PS8MEG | MD_SVALID
+ mtspr SPRN_MD_TWC, r11
+ rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
+ _PAGE_PRESENT
+ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
+
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR */
+
+0: mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH1
+ rfi
+ patch_site 0b, patch__dtlbmiss_exit_3
/* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction
@@ -625,16 +624,13 @@ DataBreakpoint:
. = 0x1d00
InstructionBreakpoint:
mtspr SPRN_SPRG_SCRATCH0, r10
- mtspr SPRN_SPRG_SCRATCH1, r11
- lis r10, (instruction_counter - PAGE_OFFSET)@ha
- lwz r11, (instruction_counter - PAGE_OFFSET)@l(r10)
- addi r11, r11, -1
- stw r11, (instruction_counter - PAGE_OFFSET)@l(r10)
+ lwz r10, (instruction_counter - PAGE_OFFSET)@l(0)
+ addi r10, r10, -1
+ stw r10, (instruction_counter - PAGE_OFFSET)@l(0)
lis r10, 0xffff
ori r10, r10, 0x01
mtspr SPRN_COUNTA, r10
mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
rfi
#else
EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_EE)
@@ -644,67 +640,6 @@ InstructionBreakpoint:
. = 0x2000
-/*
- * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM.
- * not enough space in the DataStoreTLBMiss area.
- */
-DTLBMissIMMR:
- mtcr r12
- /* Set 512k byte guarded page and mark it valid */
- li r10, MD_PS512K | MD_GUARDED | MD_SVALID
- mtspr SPRN_MD_TWC, r10
- mfspr r10, SPRN_IMMR /* Get current IMMR */
- rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT | _PAGE_NO_CACHE
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- li r11, RPN_PATTERN
- mtspr SPRN_DAR, r11 /* Tag DAR */
-
-0: mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r12, SPRN_SPRG_SCRATCH2
- rfi
- patch_site 0b, patch__dtlbmiss_exit_2
-
-DTLBMissLinear:
- mtcr r12
- /* Set 8M byte page and mark it valid */
- li r11, MD_PS8MEG | MD_SVALID
- mtspr SPRN_MD_TWC, r11
- rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- li r11, RPN_PATTERN
- mtspr SPRN_DAR, r11 /* Tag DAR */
-
-0: mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r12, SPRN_SPRG_SCRATCH2
- rfi
- patch_site 0b, patch__dtlbmiss_exit_3
-
-#ifndef CONFIG_PIN_TLB_TEXT
-ITLBMissLinear:
- mtcr r12
- /* Set 8M byte page and mark it valid */
- li r11, MI_PS8MEG | MI_SVALID
- mtspr SPRN_MI_TWC, r11
- rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
- ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT
- mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
-
-0: mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r12, SPRN_SPRG_SCRATCH2
- rfi
- patch_site 0b, patch__itlbmiss_exit_2
-#endif
-
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
* by decoding the registers used by the dcbx instruction and adding them.
* DAR is set to the calculated address.
@@ -712,12 +647,13 @@ ITLBMissLinear:
/* define if you don't want to use self modifying code */
#define NO_SELF_MODIFYING_CODE
FixupDAR:/* Entry point for dcbx workaround. */
- mtspr SPRN_SPRG_SCRATCH2, r10
+ mtspr SPRN_M_TW, r10
/* fetch instruction from memory. */
mfspr r10, SPRN_SRR0
+ mtspr SPRN_MD_EPN, r10
rlwinm r11, r10, 16, 0xfff8
cmpli cr0, r11, PAGE_OFFSET@h
- mfspr r11, SPRN_M_TW /* Get level 1 table */
+ mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ 3f
rlwinm r11, r10, 16, 0xfff8
@@ -727,17 +663,17 @@ FixupDAR:/* Entry point for dcbx workaround. */
/* create physical page address from effective address */
tophys(r11, r10)
blt- cr7, 201f
- lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
- /* Insert level 1 index */
-3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
+ mfspr r11, SPRN_M_TWB /* Get level 1 table */
+ rlwinm r11, r11, 0, 20, 31
+ oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
+3:
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
+ mtspr SPRN_MD_TWC, r11
mtcr r11
+ mfspr r11, SPRN_MD_TWC
+ lwz r11, 0(r11) /* Get the pte */
bt 28,200f /* bit 28 = Large page (8M) */
bt 29,202f /* bit 29 = Large page (8M or 512K) */
- rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
- /* Insert level 2 index */
- rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
- lwz r11, 0(r11) /* Get the pte */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
201: lwz r11,0(r11)
@@ -756,26 +692,15 @@ FixupDAR:/* Entry point for dcbx workaround. */
beq+ 142f
cmpwi cr0, r10, 1964 /* Is icbi? */
beq+ 142f
-141: mfspr r10,SPRN_SPRG_SCRATCH2
+141: mfspr r10,SPRN_M_TW
b DARFixed /* Nope, go back to normal TLB processing */
- /* concat physical page address(r11) and page offset(r10) */
200:
-#ifdef CONFIG_PPC_16K_PAGES
- rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1) - 1
- rlwimi r11, r10, 32 - (PAGE_SHIFT_8M - 2), 32 + PAGE_SHIFT_8M - (PAGE_SHIFT << 1), 29
-#else
- rlwinm r11, r10, 0, ~HUGEPD_SHIFT_MASK
-#endif
- lwz r11, 0(r11) /* Get the pte */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
b 201b
202:
- rlwinm r11, r11, 0, 0, 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1) - 1
- rlwimi r11, r10, 32 - (PAGE_SHIFT_512K - 2), 32 + PAGE_SHIFT_512K - (PAGE_SHIFT << 1), 29
- lwz r11, 0(r11) /* Get the pte */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
b 201b
@@ -802,7 +727,7 @@ modified_instr:
bne+ 143f
subf r10,r0,r10 /* r10=r10-r0, only if reg RA is r0 */
143: mtdar r10 /* store faulting EA in DAR */
- mfspr r10,SPRN_SPRG_SCRATCH2
+ mfspr r10,SPRN_M_TW
b DARFixed /* Go back to normal TLB handling */
#else
mfctr r10
@@ -856,7 +781,7 @@ modified_instr:
mfdar r11
mtctr r11 /* restore ctr reg from DAR */
mtdar r10 /* save fault EA to DAR */
- mfspr r10,SPRN_SPRG_SCRATCH2
+ mfspr r10,SPRN_M_TW
b DARFixed /* Go back to normal TLB handling */
/* special handling for r10,r11 since these are modified already */
@@ -891,7 +816,7 @@ start_here:
lis r6, swapper_pg_dir@ha
tophys(r6,r6)
- mtspr SPRN_M_TW, r6
+ mtspr SPRN_M_TWB, r6
bl early_init /* We have to do this with MMU on */
@@ -1065,17 +990,3 @@ swapper_pg_dir:
*/
abatron_pteptrs:
.space 8
-
-#ifdef CONFIG_PERF_EVENTS
- .globl itlb_miss_counter
-itlb_miss_counter:
- .space 4
-
- .globl dtlb_miss_counter
-dtlb_miss_counter:
- .space 4
-
- .globl instruction_counter
-instruction_counter:
- .space 4
-#endif
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index d0862a100d29..15ac51072eb3 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -43,6 +43,9 @@
andi. r11, r11, MSR_PR; /* check whether user or kernel */\
mr r11, r1; \
beq 1f; \
+START_BTB_FLUSH_SECTION \
+ BTB_FLUSH(r11) \
+END_BTB_FLUSH_SECTION \
/* if from user, start at top of this thread's kernel stack */ \
lwz r11, THREAD_INFO-THREAD(r10); \
ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
@@ -128,6 +131,9 @@
stw r9,_CCR(r8); /* save CR on stack */\
mfspr r11,exc_level_srr1; /* check whether user or kernel */\
DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
+START_BTB_FLUSH_SECTION \
+ BTB_FLUSH(r10) \
+END_BTB_FLUSH_SECTION \
andi. r11,r11,MSR_PR; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index e2750b856c8f..2386ce2a9c6e 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -453,6 +453,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
+START_BTB_FLUSH_SECTION
+ mfspr r11, SPRN_SRR1
+ andi. r10,r11,MSR_PR
+ beq 1f
+ BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -547,6 +554,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mfcr r13
stw r13, THREAD_NORMSAVE(3)(r10)
DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
+START_BTB_FLUSH_SECTION
+ mfspr r11, SPRN_SRR1
+ andi. r10,r11,MSR_PR
+ beq 1f
+ BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
+
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ca7f73488c62..d0625480b59e 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -47,6 +47,7 @@
#include <asm/fadump.h>
#include <asm/vio.h>
#include <asm/tce.h>
+#include <asm/mmu_context.h>
#define DBG(...)
@@ -993,15 +994,19 @@ int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
}
EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
-long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction)
+long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction)
{
long ret;
+ unsigned long size = 0;
ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
- (*direction == DMA_BIDIRECTIONAL)))
+ (*direction == DMA_BIDIRECTIONAL)) &&
+ !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
+ &size))
SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
/* if (unlikely(ret))
@@ -1073,11 +1078,8 @@ void iommu_release_ownership(struct iommu_table *tbl)
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
-int iommu_add_device(struct device *dev)
+int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
{
- struct iommu_table *tbl;
- struct iommu_table_group_link *tgl;
-
/*
* The sysfs entries should be populated before
* binding IOMMU group. If sysfs entries isn't
@@ -1093,32 +1095,10 @@ int iommu_add_device(struct device *dev)
return -EBUSY;
}
- tbl = get_iommu_table_base(dev);
- if (!tbl) {
- pr_debug("%s: Skipping device %s with no tbl\n",
- __func__, dev_name(dev));
- return 0;
- }
-
- tgl = list_first_entry_or_null(&tbl->it_group_list,
- struct iommu_table_group_link, next);
- if (!tgl) {
- pr_debug("%s: Skipping device %s with no group\n",
- __func__, dev_name(dev));
- return 0;
- }
pr_debug("%s: Adding %s to iommu group %d\n",
- __func__, dev_name(dev),
- iommu_group_id(tgl->table_group->group));
-
- if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
- pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
- __func__, IOMMU_PAGE_SIZE(tbl),
- PAGE_SIZE, dev_name(dev));
- return -EINVAL;
- }
+ __func__, dev_name(dev), iommu_group_id(table_group->group));
- return iommu_group_add_device(tgl->table_group->group, dev);
+ return iommu_group_add_device(table_group->group, dev);
}
EXPORT_SYMBOL_GPL(iommu_add_device);
@@ -1138,31 +1118,4 @@ void iommu_del_device(struct device *dev)
iommu_group_remove_device(dev);
}
EXPORT_SYMBOL_GPL(iommu_del_device);
-
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
-
- switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- return iommu_add_device(dev);
- case BUS_NOTIFY_DEL_DEVICE:
- if (dev->iommu_group)
- iommu_del_device(dev);
- return 0;
- default:
- return 0;
- }
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
- .notifier_call = tce_iommu_bus_notifier,
-};
-
-int __init tce_iommu_bus_notifier_init(void)
-{
- bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
- return 0;
-}
#endif /* CONFIG_IOMMU_API */
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index fda3ae48480c..0e7099da4f25 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -327,8 +327,7 @@ static int isa_bridge_notify(struct notifier_block *nb, unsigned long action,
/* Check if we have no ISA device, and this happens to be one,
* register it as such if it has an OF device
*/
- if (!isa_bridge_devnode && devnode && devnode->type &&
- !strcmp(devnode->type, "isa"))
+ if (!isa_bridge_devnode && of_node_is_type(devnode, "isa"))
isa_bridge_find_late(pdev, devnode);
return 0;
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 33b34a58fc62..7cea5978f21f 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -192,7 +192,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
/* Add port, irq will be dealt with later. We passed a translated
* IO port value. It will be fixed up later along with the irq
*/
- if (tsi && !strcmp(tsi->type, "tsi-bridge"))
+ if (of_node_is_type(tsi, "tsi-bridge"))
return add_legacy_port(np, -1, UPIO_TSI, addr, addr,
0, legacy_port_flags, 0);
else
@@ -372,6 +372,8 @@ void __init find_legacy_serial_ports(void)
/* Now find out if one of these is out firmware console */
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (path == NULL)
+ path = of_get_property(of_chosen, "stdout-path", NULL);
if (path != NULL) {
stdout = of_find_node_by_path(path);
if (stdout)
@@ -398,8 +400,7 @@ void __init find_legacy_serial_ports(void)
/* Next, fill our array with ISA ports */
for_each_node_by_type(np, "serial") {
struct device_node *isa = of_get_parent(np);
- if (isa && (!strcmp(isa->name, "isa") ||
- !strcmp(isa->name, "lpc"))) {
+ if (of_node_name_eq(isa, "isa") || of_node_name_eq(isa, "lpc")) {
if (of_device_is_available(np)) {
index = add_legacy_isa_port(np, isa);
if (index >= 0 && np == stdout)
@@ -413,11 +414,12 @@ void __init find_legacy_serial_ports(void)
/* Next, try to locate PCI ports */
for (np = NULL; (np = of_find_all_nodes(np));) {
struct device_node *pci, *parent = of_get_parent(np);
- if (parent && !strcmp(parent->name, "isa")) {
+ if (of_node_name_eq(parent, "isa")) {
of_node_put(parent);
continue;
}
- if (strcmp(np->name, "serial") && strcmp(np->type, "serial")) {
+ if (!of_node_name_eq(np, "serial") &&
+ !of_node_is_type(np, "serial")) {
of_node_put(parent);
continue;
}
@@ -595,8 +597,10 @@ static int __init check_legacy_serial_console(void)
/* We are getting a weird phandle from OF ... */
/* ... So use the full path instead */
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name == NULL)
+ name = of_get_property(of_chosen, "stdout-path", NULL);
if (name == NULL) {
- DBG(" no linux,stdout-path !\n");
+ DBG(" no stdout-path !\n");
return -ENODEV;
}
prom_stdout = of_find_node_by_path(name);
diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
index c77e95e9b384..0d20c7ad40fa 100644
--- a/arch/powerpc/kernel/machine_kexec_file_64.c
+++ b/arch/powerpc/kernel/machine_kexec_file_64.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/kexec.h>
-#include <linux/memblock.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <asm/ima.h>
@@ -47,59 +46,6 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
}
/**
- * arch_kexec_walk_mem - call func(data) for each unreserved memory block
- * @kbuf: Context info for the search. Also passed to @func.
- * @func: Function to call for each memory block.
- *
- * This function is used by kexec_add_buffer and kexec_locate_mem_hole
- * to find unreserved memory to load kexec segments into.
- *
- * Return: The memory walk will stop when func returns a non-zero value
- * and that value will be returned. If all free regions are visited without
- * func returning non-zero, then zero will be returned.
- */
-int arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *))
-{
- int ret = 0;
- u64 i;
- phys_addr_t mstart, mend;
- struct resource res = { };
-
- if (kbuf->top_down) {
- for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0,
- &mstart, &mend, NULL) {
- /*
- * In memblock, end points to the first byte after the
- * range while in kexec, end points to the last byte
- * in the range.
- */
- res.start = mstart;
- res.end = mend - 1;
- ret = func(&res, kbuf);
- if (ret)
- break;
- }
- } else {
- for_each_free_mem_range(i, NUMA_NO_NODE, 0, &mstart, &mend,
- NULL) {
- /*
- * In memblock, end points to the first byte after the
- * range while in kexec, end points to the last byte
- * in the range.
- */
- res.start = mstart;
- res.end = mend - 1;
- ret = func(&res, kbuf);
- if (ret)
- break;
- }
- }
-
- return ret;
-}
-
-/**
* setup_purgatory - initialize the purgatory's global variables
* @image: kexec image.
* @slave_code: Slave code for the purgatory.
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 695b24a2d954..57d2ffb2d45c 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -153,7 +153,7 @@ _GLOBAL(call_setup_cpu)
mtctr r5
bctr
-#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
+#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
/* This gets called by via-pmu.c to switch the PLL selection
* on 750fx CPU. This function should really be moved to some
@@ -223,7 +223,7 @@ _GLOBAL(low_choose_7447a_dfs)
mtmsr r7
blr
-#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
+#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
/*
* complement mask on the msr then "or" some values on.
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index dab616a33b8d..f2197654be07 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
- phb->controller_ops.teardown_msi_irqs(dev);
+ /*
+ * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
+ * so check the pointer again.
+ */
+ if (phb->controller_ops.teardown_msi_irqs)
+ phb->controller_ops.teardown_msi_irqs(dev);
}
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 22e9d281324d..38b03a330cd2 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -563,8 +563,6 @@ static int nvram_pstore_init(void)
nvram_pstore_info.buf = oops_data;
nvram_pstore_info.bufsize = oops_data_sz;
- spin_lock_init(&nvram_pstore_info.buf_lock);
-
rc = pstore_register(&nvram_pstore_info);
if (rc && (rc != -EPERM))
/* Print error only when pstore.backend == nvram */
@@ -809,6 +807,7 @@ static long dev_nvram_ioctl(struct file *file, unsigned int cmd,
#ifdef CONFIG_PPC_PMAC
case OBSOLETE_PMAC_NVRAM_GET_OFFSET:
printk(KERN_WARNING "nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n");
+ /* fall through */
case IOC_NVRAM_GET_OFFSET: {
int part, offset;
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 98f04725def7..24191ea2d9a7 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -125,16 +125,13 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn)
{
struct pci_dev *dev;
- const char *type;
dev = pci_alloc_dev(bus);
if (!dev)
return NULL;
- type = of_get_property(node, "device_type", NULL);
- if (type == NULL)
- type = "";
- pr_debug(" create device, devfn: %x, type: %s\n", devfn, type);
+ pr_debug(" create device, devfn: %x, type: %s\n", devfn,
+ of_node_get_device_type(node));
dev->dev.of_node = of_node_get(node);
dev->dev.parent = bus->bridge;
@@ -167,12 +164,12 @@ struct pci_dev *of_create_pci_dev(struct device_node *node,
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
- if (!strcmp(type, "pci") || !strcmp(type, "pciex")) {
+ if (of_node_is_type(node, "pci") || of_node_is_type(node, "pciex")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
set_pcie_hotplug_bridge(dev);
- } else if (!strcmp(type, "cardbus")) {
+ } else if (of_node_is_type(node, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index 58eaa3ddf7b9..2de71faca911 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -29,7 +29,7 @@ static void dummy_perf(struct pt_regs *regs)
{
#if defined(CONFIG_FSL_EMB_PERFMON)
mtpmr(PMRN_PMGC0, mfpmr(PMRN_PMGC0) & ~PMGC0_PMIE);
-#elif defined(CONFIG_PPC64) || defined(CONFIG_6xx)
+#elif defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
if (cur_cpu_spec->pmc_type == PPC_PMC_IBM)
mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~(MMCR0_PMXE|MMCR0_PMAO));
#else
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index fe758cedb93f..4181ec715f88 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -124,12 +124,12 @@ static void __init move_device_tree(void)
size = fdt_totalsize(initial_boot_params);
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
- overlaps_crashkernel(start, size) ||
- overlaps_initrd(start, size)) {
+ !memblock_is_memory(start + size - 1) ||
+ overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
p = __va(memblock_phys_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
initial_boot_params = p;
- DBG("Moved device tree to 0x%p\n", p);
+ DBG("Moved device tree to 0x%px\n", p);
}
DBG("<- move_device_tree\n");
@@ -689,7 +689,7 @@ void __init early_init_devtree(void *params)
{
phys_addr_t limit;
- DBG(" -> early_init_devtree(%p)\n", params);
+ DBG(" -> early_init_devtree(%px)\n", params);
/* Too early to BUG_ON(), do it by hand */
if (!early_init_dt_verify(params))
@@ -749,7 +749,7 @@ void __init early_init_devtree(void *params)
memblock_allow_resize();
memblock_dump_all();
- DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
+ DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
/* We may need to relocate the flat tree, do it now.
* FIXME .. and the initrd too? */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index afb819f4ca68..cdd5d1d3ae41 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -3263,27 +3263,40 @@ static inline int do_seccomp(struct pt_regs *regs) { return 0; }
*/
long do_syscall_trace_enter(struct pt_regs *regs)
{
+ u32 flags;
+
user_exit();
- if (test_thread_flag(TIF_SYSCALL_EMU)) {
- ptrace_report_syscall(regs);
- /*
- * Returning -1 will skip the syscall execution. We want to
- * avoid clobbering any register also, thus, not 'gotoing'
- * skip label.
- */
- return -1;
- }
+ flags = READ_ONCE(current_thread_info()->flags) &
+ (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
- /*
- * The tracer may decide to abort the syscall, if so tracehook
- * will return !0. Note that the tracer may also just change
- * regs->gpr[0] to an invalid syscall number, that is handled
- * below on the exit path.
- */
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs))
- goto skip;
+ if (flags) {
+ int rc = tracehook_report_syscall_entry(regs);
+
+ if (unlikely(flags & _TIF_SYSCALL_EMU)) {
+ /*
+ * A nonzero return code from
+ * tracehook_report_syscall_entry() tells us to prevent
+ * the syscall execution, but we are not going to
+ * execute it anyway.
+ *
+ * Returning -1 will skip the syscall execution. We want
+ * to avoid clobbering any registers, so we don't goto
+ * the skip label below.
+ */
+ return -1;
+ }
+
+ if (rc) {
+ /*
+ * The tracer decided to abort the syscall. Note that
+ * the tracer may also just change regs->gpr[0] to an
+ * invalid syscall number, that is handled below on the
+ * exit path.
+ */
+ goto skip;
+ }
+ }
/* Run seccomp after ptrace; allow it to set gpr[3]. */
if (do_seccomp(regs))
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index f6f469fc4073..9b8631533e02 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -4,6 +4,7 @@
//
// Copyright 2018, Michael Ellerman, IBM Corporation.
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/seq_buf.h>
@@ -22,10 +23,14 @@ enum count_cache_flush_type {
COUNT_CACHE_FLUSH_SW = 0x2,
COUNT_CACHE_FLUSH_HW = 0x4,
};
-static enum count_cache_flush_type count_cache_flush_type;
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
bool barrier_nospec_enabled;
static bool no_nospec;
+static bool btb_flush_enabled;
+#ifdef CONFIG_PPC_FSL_BOOK3E
+static bool no_spectrev2;
+#endif
static void enable_barrier_nospec(bool enable)
{
@@ -101,6 +106,23 @@ static __init int barrier_nospec_debugfs_init(void)
device_initcall(barrier_nospec_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+static int __init handle_nospectre_v2(char *p)
+{
+ no_spectrev2 = true;
+
+ return 0;
+}
+early_param("nospectre_v2", handle_nospectre_v2);
+void setup_spectre_v2(void)
+{
+ if (no_spectrev2)
+ do_btb_flush_fixups();
+ else
+ btb_flush_enabled = true;
+}
+#endif /* CONFIG_PPC_FSL_BOOK3E */
+
#ifdef CONFIG_PPC_BOOK3S_64
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -191,8 +213,11 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
seq_buf_printf(&s, "(hardware accelerated)");
- } else
+ } else if (btb_flush_enabled) {
+ seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
+ } else {
seq_buf_printf(&s, "Vulnerable");
+ }
seq_buf_printf(&s, "\n");
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 93ee3703b42f..ca00fbb97cf8 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -687,7 +687,7 @@ int check_legacy_ioport(unsigned long base_port)
return ret;
parent = of_get_parent(np);
if (parent) {
- if (strcmp(parent->type, "isa") == 0)
+ if (of_node_is_type(parent, "isa"))
ret = 0;
of_node_put(parent);
}
@@ -800,7 +800,7 @@ static __init void print_system_info(void)
#ifdef CONFIG_PPC_BOOK3S_64
pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
#endif
-#ifdef CONFIG_PPC_STD_MMU_32
+#ifdef CONFIG_PPC_BOOK3S_32
pr_info("Hash_size = 0x%lx\n", Hash_size);
#endif
pr_info("phys_mem_size = 0x%llx\n",
@@ -830,7 +830,7 @@ static __init void print_system_info(void)
if (htab_hash_mask)
pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif
-#ifdef CONFIG_PPC_STD_MMU_32
+#ifdef CONFIG_PPC_BOOK3S_32
if (Hash)
pr_info("Hash = 0x%p\n", Hash);
if (Hash_mask)
@@ -974,6 +974,7 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
setup_barrier_nospec();
+ setup_spectre_v2();
paging_init();
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 81909600013a..947f904688b0 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -59,7 +59,6 @@ unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
-EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
EXPORT_SYMBOL(DMA_MODE_WRITE);
@@ -101,8 +100,7 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
*/
notrace void __init machine_init(u64 dt_ptr)
{
- unsigned int *addr = (unsigned int *)((unsigned long)&patch__memset_nocache +
- patch__memset_nocache);
+ unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache);
unsigned long insn;
/* Configure static keys first, now that we're relocated. */
@@ -240,7 +238,7 @@ void __init exc_lvl_early_init(void)
void __init setup_power_save(void)
{
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
cpu_has_feature(CPU_FTR_CAN_NAP))
ppc_md.power_save = ppc6xx_idle;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index e6474a45cef5..2d47cc79e5b3 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -470,9 +470,9 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
return 1;
if (sigret) {
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
- || __put_user(0x44000002UL, &frame->tramp[1]))
+ /* Set up the sigreturn trampoline: li 0,sigret; sc */
+ if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
+ || __put_user(PPC_INST_SC, &frame->tramp[1]))
return 1;
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
@@ -619,9 +619,9 @@ static int save_tm_user_regs(struct pt_regs *regs,
if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
return 1;
if (sigret) {
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
- || __put_user(0x44000002UL, &frame->tramp[1]))
+ /* Set up the sigreturn trampoline: li 0,sigret; sc */
+ if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
+ || __put_user(PPC_INST_SC, &frame->tramp[1]))
return 1;
flush_icache_range((unsigned long) &frame->tramp[0],
(unsigned long) &frame->tramp[2]);
@@ -848,7 +848,23 @@ static long restore_tm_user_regs(struct pt_regs *regs,
/* If TM bits are set to the reserved value, it's an invalid context */
if (MSR_TM_RESV(msr_hi))
return 1;
- /* Pull in the MSR TM bits from the user context */
+
+ /*
+ * Disabling preemption, since it is unsafe to be preempted
+ * with MSR[TS] set without recheckpointing.
+ */
+ preempt_disable();
+
+ /*
+ * CAUTION:
+ * After regs->MSR[TS] being updated, make sure that get_user(),
+ * put_user() or similar functions are *not* called. These
+ * functions can generate page faults which will cause the process
+ * to be de-scheduled with MSR[TS] set but without calling
+ * tm_recheckpoint(). This can cause a bug.
+ *
+ * Pull in the MSR TM bits from the user context
+ */
regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
@@ -873,6 +889,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
}
#endif
+ preempt_enable();
+
return 0;
}
#endif
@@ -1140,11 +1158,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
{
struct rt_sigframe __user *rt_sf;
struct pt_regs *regs = current_pt_regs();
+ int tm_restore = 0;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
struct ucontext __user *uc_transact;
unsigned long msr_hi;
unsigned long tmp;
- int tm_restore = 0;
#endif
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@@ -1192,11 +1210,19 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto bad;
}
}
- if (!tm_restore)
- /* Fall through, for non-TM restore */
+ if (!tm_restore) {
+ /*
+ * Unset regs->msr because ucontext MSR TS is not
+ * set, and recheckpoint was not called. This avoid
+ * hitting a TM Bad thing at RFID
+ */
+ regs->msr &= ~MSR_TS_MASK;
+ }
+ /* Fall through, for non-TM restore */
#endif
- if (do_setcontext(&rt_sf->uc, regs, 1))
- goto bad;
+ if (!tm_restore)
+ if (do_setcontext(&rt_sf->uc, regs, 1))
+ goto bad;
/*
* It's not clear whether or why it is desirable to save the
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 83d51bf586c7..0935fe6c282a 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -467,20 +467,6 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
if (MSR_TM_RESV(msr))
return -EINVAL;
- /* pull in MSR TS bits from user context */
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
-
- /*
- * Ensure that TM is enabled in regs->msr before we leave the signal
- * handler. It could be the case that (a) user disabled the TM bit
- * through the manipulation of the MSR bits in uc_mcontext or (b) the
- * TM bit was disabled because a sufficient number of context switches
- * happened whilst in the signal handler and load_tm overflowed,
- * disabling the TM bit. In either case we can end up with an illegal
- * TM state leading to a TM Bad Thing when we return to userspace.
- */
- regs->msr |= MSR_TM;
-
/* pull in MSR LE from user context */
regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
@@ -572,6 +558,34 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
tm_enable();
/* Make sure the transaction is marked as failed */
tsk->thread.tm_texasr |= TEXASR_FS;
+
+ /*
+ * Disabling preemption, since it is unsafe to be preempted
+ * with MSR[TS] set without recheckpointing.
+ */
+ preempt_disable();
+
+ /* pull in MSR TS bits from user context */
+ regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+
+ /*
+ * Ensure that TM is enabled in regs->msr before we leave the signal
+ * handler. It could be the case that (a) user disabled the TM bit
+ * through the manipulation of the MSR bits in uc_mcontext or (b) the
+ * TM bit was disabled because a sufficient number of context switches
+ * happened whilst in the signal handler and load_tm overflowed,
+ * disabling the TM bit. In either case we can end up with an illegal
+ * TM state leading to a TM Bad Thing when we return to userspace.
+ *
+ * CAUTION:
+ * After regs->MSR[TS] being updated, make sure that get_user(),
+ * put_user() or similar functions are *not* called. These
+ * functions can generate page faults which will cause the process
+ * to be de-scheduled with MSR[TS] set but without calling
+ * tm_recheckpoint(). This can cause a bug.
+ */
+ regs->msr |= MSR_TM;
+
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&tsk->thread);
@@ -585,6 +599,8 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
regs->msr |= MSR_VEC;
}
+ preempt_enable();
+
return err;
}
#endif
@@ -598,11 +614,12 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
long err = 0;
/* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
- err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]);
+ err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) |
+ (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]);
/* li r0, __NR_[rt_]sigreturn| */
- err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]);
+ err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[1]);
/* sc */
- err |= __put_user(0x44000002UL, &tramp[2]);
+ err |= __put_user(PPC_INST_SC, &tramp[2]);
/* Minimal traceback info */
for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
@@ -740,11 +757,23 @@ SYSCALL_DEFINE0(rt_sigreturn)
&uc_transact->uc_mcontext))
goto badframe;
}
- else
- /* Fall through, for non-TM restore */
#endif
- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
- goto badframe;
+ /* Fall through, for non-TM restore */
+ if (!MSR_TM_ACTIVE(msr)) {
+ /*
+ * Unset MSR[TS] on the thread regs since MSR from user
+ * context does not have MSR active, and recheckpoint was
+ * not called since restore_tm_sigcontexts() was not called
+ * also.
+ *
+ * If not unsetting it, the code can RFID to userspace with
+ * MSR[TS] set, but without CPU in the proper state,
+ * causing a TM bad thing.
+ */
+ current->thread.regs->msr &= ~MSR_TS_MASK;
+ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+ goto badframe;
+ }
if (restore_altstack(&uc->uc_stack))
goto badframe;
diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..27b48954808d
--- /dev/null
+++ b/arch/powerpc/kernel/syscalls/Makefile
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+syshdr_abis_unistd_32 := common,nospu,32
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_abis_unistd_64 := common,nospu,64
+$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+systbl_abis_syscall_table_32 := common,nospu,32
+systbl_abi_syscall_table_32 := 32
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_64 := common,nospu,64
+systbl_abi_syscall_table_64 := 64
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_c32 := common,nospu,32
+systbl_abi_syscall_table_c32 := c32
+$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_spu := common,spu
+systbl_abi_syscall_table_spu := spu
+$(kapi)/syscall_table_spu.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h unistd_64.h
+kapisyshdr-y += syscall_table_32.h \
+ syscall_table_64.h \
+ syscall_table_c32.h \
+ syscall_table_spu.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..db3bbb8744af
--- /dev/null
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -0,0 +1,427 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for powerpc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, spu, nospu, 64, or 32 for this file.
+#
+0 nospu restart_syscall sys_restart_syscall
+1 nospu exit sys_exit
+2 nospu fork ppc_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 nospu execve sys_execve compat_sys_execve
+12 common chdir sys_chdir
+13 common time sys_time compat_sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common break sys_ni_syscall
+18 32 oldstat sys_stat sys_ni_syscall
+18 64 oldstat sys_ni_syscall
+18 spu oldstat sys_ni_syscall
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 nospu mount sys_mount compat_sys_mount
+22 32 umount sys_oldumount
+22 64 umount sys_ni_syscall
+22 spu umount sys_ni_syscall
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 common stime sys_stime compat_sys_stime
+26 nospu ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 32 oldfstat sys_fstat sys_ni_syscall
+28 64 oldfstat sys_ni_syscall
+28 spu oldfstat sys_ni_syscall
+29 nospu pause sys_pause
+30 nospu utime sys_utime compat_sys_utime
+31 common stty sys_ni_syscall
+32 common gtty sys_ni_syscall
+33 common access sys_access
+34 common nice sys_nice
+35 common ftime sys_ni_syscall
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times compat_sys_times
+44 common prof sys_ni_syscall
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 nospu signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 nospu acct sys_acct
+52 nospu umount2 sys_umount
+53 common lock sys_ni_syscall
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common fcntl sys_fcntl compat_sys_fcntl
+56 common mpx sys_ni_syscall
+57 common setpgid sys_setpgid
+58 common ulimit sys_ni_syscall
+59 32 oldolduname sys_olduname
+59 64 oldolduname sys_ni_syscall
+59 spu oldolduname sys_ni_syscall
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 nospu ustat sys_ustat compat_sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 32 sigaction sys_sigaction compat_sys_sigaction
+67 64 sigaction sys_ni_syscall
+67 spu sigaction sys_ni_syscall
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 32 sigsuspend sys_sigsuspend
+72 64 sigsuspend sys_ni_syscall
+72 spu sigsuspend sys_ni_syscall
+73 32 sigpending sys_sigpending compat_sys_sigpending
+73 64 sigpending sys_ni_syscall
+73 spu sigpending sys_ni_syscall
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit compat_sys_setrlimit
+76 32 getrlimit sys_old_getrlimit compat_sys_old_getrlimit
+76 64 getrlimit sys_ni_syscall
+76 spu getrlimit sys_ni_syscall
+77 common getrusage sys_getrusage compat_sys_getrusage
+78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 common settimeofday sys_settimeofday compat_sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 32 select ppc_select sys_ni_syscall
+82 64 select sys_ni_syscall
+82 spu select sys_ni_syscall
+83 common symlink sys_symlink
+84 32 oldlstat sys_lstat sys_ni_syscall
+84 64 oldlstat sys_ni_syscall
+84 spu oldlstat sys_ni_syscall
+85 common readlink sys_readlink
+86 nospu uselib sys_uselib
+87 nospu swapon sys_swapon
+88 nospu reboot sys_reboot
+89 32 readdir sys_old_readdir compat_sys_old_readdir
+89 64 readdir sys_ni_syscall
+89 spu readdir sys_ni_syscall
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate compat_sys_truncate
+93 common ftruncate sys_ftruncate compat_sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common profil sys_ni_syscall
+99 nospu statfs sys_statfs compat_sys_statfs
+100 nospu fstatfs sys_fstatfs compat_sys_fstatfs
+101 common ioperm sys_ni_syscall
+102 common socketcall sys_socketcall compat_sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer compat_sys_setitimer
+105 common getitimer sys_getitimer compat_sys_getitimer
+106 common stat sys_newstat compat_sys_newstat
+107 common lstat sys_newlstat compat_sys_newlstat
+108 common fstat sys_newfstat compat_sys_newfstat
+109 32 olduname sys_uname
+109 64 olduname sys_ni_syscall
+109 spu olduname sys_ni_syscall
+110 common iopl sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+113 common vm86 sys_ni_syscall
+114 common wait4 sys_wait4 compat_sys_wait4
+115 nospu swapoff sys_swapoff
+116 common sysinfo sys_sysinfo compat_sys_sysinfo
+117 nospu ipc sys_ipc compat_sys_ipc
+118 common fsync sys_fsync
+119 32 sigreturn sys_sigreturn compat_sys_sigreturn
+119 64 sigreturn sys_ni_syscall
+119 spu sigreturn sys_ni_syscall
+120 nospu clone ppc_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common modify_ldt sys_ni_syscall
+124 common adjtimex sys_adjtimex compat_sys_adjtimex
+125 common mprotect sys_mprotect
+126 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+126 64 sigprocmask sys_ni_syscall
+126 spu sigprocmask sys_ni_syscall
+127 common create_module sys_ni_syscall
+128 nospu init_module sys_init_module
+129 nospu delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 nospu quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 32 personality sys_personality ppc64_personality
+136 64 personality ppc64_personality
+136 spu personality ppc64_personality
+137 common afs_syscall sys_ni_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents compat_sys_getdents
+142 common _newselect sys_select compat_sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv compat_sys_readv
+146 common writev sys_writev compat_sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 nospu _sysctl sys_sysctl compat_sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep compat_sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common query_module sys_ni_syscall
+167 common poll sys_poll
+168 common nfsservctl sys_ni_syscall
+169 common setresgid sys_setresgid
+170 common getresgid sys_getresgid
+171 common prctl sys_prctl
+172 nospu rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
+173 nospu rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+174 nospu rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+175 nospu rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+176 nospu rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+179 common pread64 sys_pread64 compat_sys_pread64
+180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+181 common chown sys_chown
+182 common getcwd sys_getcwd
+183 common capget sys_capget
+184 common capset sys_capset
+185 nospu sigaltstack sys_sigaltstack compat_sys_sigaltstack
+186 32 sendfile sys_sendfile compat_sys_sendfile
+186 64 sendfile sys_sendfile64
+186 spu sendfile sys_sendfile64
+187 common getpmsg sys_ni_syscall
+188 common putpmsg sys_ni_syscall
+189 nospu vfork ppc_vfork
+190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
+191 common readahead sys_readahead compat_sys_readahead
+192 32 mmap2 sys_mmap2 compat_sys_mmap2
+193 32 truncate64 sys_truncate64 compat_sys_truncate64
+194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+195 32 stat64 sys_stat64
+196 32 lstat64 sys_lstat64
+197 32 fstat64 sys_fstat64
+198 nospu pciconfig_read sys_pciconfig_read
+199 nospu pciconfig_write sys_pciconfig_write
+200 nospu pciconfig_iobase sys_pciconfig_iobase
+201 common multiplexer sys_ni_syscall
+202 common getdents64 sys_getdents64
+203 common pivot_root sys_pivot_root
+204 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+205 common madvise sys_madvise
+206 common mincore sys_mincore
+207 common gettid sys_gettid
+208 common tkill sys_tkill
+209 common setxattr sys_setxattr
+210 common lsetxattr sys_lsetxattr
+211 common fsetxattr sys_fsetxattr
+212 common getxattr sys_getxattr
+213 common lgetxattr sys_lgetxattr
+214 common fgetxattr sys_fgetxattr
+215 common listxattr sys_listxattr
+216 common llistxattr sys_llistxattr
+217 common flistxattr sys_flistxattr
+218 common removexattr sys_removexattr
+219 common lremovexattr sys_lremovexattr
+220 common fremovexattr sys_fremovexattr
+221 common futex sys_futex compat_sys_futex
+222 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+223 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+# 224 unused
+225 common tuxcall sys_ni_syscall
+226 32 sendfile64 sys_sendfile64 compat_sys_sendfile64
+227 common io_setup sys_io_setup compat_sys_io_setup
+228 common io_destroy sys_io_destroy
+229 common io_getevents sys_io_getevents compat_sys_io_getevents
+230 common io_submit sys_io_submit compat_sys_io_submit
+231 common io_cancel sys_io_cancel
+232 nospu set_tid_address sys_set_tid_address
+233 common fadvise64 sys_fadvise64 ppc32_fadvise64
+234 nospu exit_group sys_exit_group
+235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+236 common epoll_create sys_epoll_create
+237 common epoll_ctl sys_epoll_ctl
+238 common epoll_wait sys_epoll_wait
+239 common remap_file_pages sys_remap_file_pages
+240 common timer_create sys_timer_create compat_sys_timer_create
+241 common timer_settime sys_timer_settime compat_sys_timer_settime
+242 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+243 common timer_getoverrun sys_timer_getoverrun
+244 common timer_delete sys_timer_delete
+245 common clock_settime sys_clock_settime compat_sys_clock_settime
+246 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+247 common clock_getres sys_clock_getres compat_sys_clock_getres
+248 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+249 32 swapcontext ppc_swapcontext ppc32_swapcontext
+249 64 swapcontext ppc64_swapcontext
+249 spu swapcontext sys_ni_syscall
+250 common tgkill sys_tgkill
+251 common utimes sys_utimes compat_sys_utimes
+252 common statfs64 sys_statfs64 compat_sys_statfs64
+253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+254 32 fadvise64_64 ppc_fadvise64_64
+254 spu fadvise64_64 sys_ni_syscall
+255 common rtas sys_rtas
+256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
+256 64 sys_debug_setcontext sys_ni_syscall
+256 spu sys_debug_setcontext sys_ni_syscall
+# 257 reserved for vserver
+258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
+259 nospu mbind sys_mbind compat_sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+262 nospu mq_open sys_mq_open compat_sys_mq_open
+263 nospu mq_unlink sys_mq_unlink
+264 nospu mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+265 nospu mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+266 nospu mq_notify sys_mq_notify compat_sys_mq_notify
+267 nospu mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+268 nospu kexec_load sys_kexec_load compat_sys_kexec_load
+269 nospu add_key sys_add_key
+270 nospu request_key sys_request_key
+271 nospu keyctl sys_keyctl compat_sys_keyctl
+272 nospu waitid sys_waitid compat_sys_waitid
+273 nospu ioprio_set sys_ioprio_set
+274 nospu ioprio_get sys_ioprio_get
+275 nospu inotify_init sys_inotify_init
+276 nospu inotify_add_watch sys_inotify_add_watch
+277 nospu inotify_rm_watch sys_inotify_rm_watch
+278 nospu spu_run sys_spu_run
+279 nospu spu_create sys_spu_create
+280 nospu pselect6 sys_pselect6 compat_sys_pselect6
+281 nospu ppoll sys_ppoll compat_sys_ppoll
+282 common unshare sys_unshare
+283 common splice sys_splice
+284 common tee sys_tee
+285 common vmsplice sys_vmsplice compat_sys_vmsplice
+286 common openat sys_openat compat_sys_openat
+287 common mkdirat sys_mkdirat
+288 common mknodat sys_mknodat
+289 common fchownat sys_fchownat
+290 common futimesat sys_futimesat compat_sys_futimesat
+291 32 fstatat64 sys_fstatat64
+291 64 newfstatat sys_newfstatat
+291 spu newfstatat sys_newfstatat
+292 common unlinkat sys_unlinkat
+293 common renameat sys_renameat
+294 common linkat sys_linkat
+295 common symlinkat sys_symlinkat
+296 common readlinkat sys_readlinkat
+297 common fchmodat sys_fchmodat
+298 common faccessat sys_faccessat
+299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+301 common move_pages sys_move_pages compat_sys_move_pages
+302 common getcpu sys_getcpu
+303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+304 common utimensat sys_utimensat compat_sys_utimensat
+305 common signalfd sys_signalfd compat_sys_signalfd
+306 common timerfd_create sys_timerfd_create
+307 common eventfd sys_eventfd
+308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
+309 nospu fallocate sys_fallocate compat_sys_fallocate
+310 nospu subpage_prot sys_subpage_prot
+311 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+312 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+313 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+314 common eventfd2 sys_eventfd2
+315 common epoll_create1 sys_epoll_create1
+316 common dup3 sys_dup3
+317 common pipe2 sys_pipe2
+318 nospu inotify_init1 sys_inotify_init1
+319 common perf_event_open sys_perf_event_open
+320 common preadv sys_preadv compat_sys_preadv
+321 common pwritev sys_pwritev compat_sys_pwritev
+322 nospu rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+323 nospu fanotify_init sys_fanotify_init
+324 nospu fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+325 common prlimit64 sys_prlimit64
+326 common socket sys_socket
+327 common bind sys_bind
+328 common connect sys_connect
+329 common listen sys_listen
+330 common accept sys_accept
+331 common getsockname sys_getsockname
+332 common getpeername sys_getpeername
+333 common socketpair sys_socketpair
+334 common send sys_send
+335 common sendto sys_sendto
+336 common recv sys_recv compat_sys_recv
+337 common recvfrom sys_recvfrom compat_sys_recvfrom
+338 common shutdown sys_shutdown
+339 common setsockopt sys_setsockopt compat_sys_setsockopt
+340 common getsockopt sys_getsockopt compat_sys_getsockopt
+341 common sendmsg sys_sendmsg compat_sys_sendmsg
+342 common recvmsg sys_recvmsg compat_sys_recvmsg
+343 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+344 common accept4 sys_accept4
+345 common name_to_handle_at sys_name_to_handle_at
+346 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+347 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+348 common syncfs sys_syncfs
+349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+350 common setns sys_setns
+351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+353 nospu finit_module sys_finit_module
+354 nospu kcmp sys_kcmp
+355 common sched_setattr sys_sched_setattr
+356 common sched_getattr sys_sched_getattr
+357 common renameat2 sys_renameat2
+358 common seccomp sys_seccomp
+359 common getrandom sys_getrandom
+360 common memfd_create sys_memfd_create
+361 common bpf sys_bpf
+362 nospu execveat sys_execveat compat_sys_execveat
+363 32 switch_endian sys_ni_syscall
+363 64 switch_endian ppc_switch_endian
+363 spu switch_endian sys_ni_syscall
+364 common userfaultfd sys_userfaultfd
+365 common membarrier sys_membarrier
+378 nospu mlock2 sys_mlock2
+379 nospu copy_file_range sys_copy_file_range
+380 common preadv2 sys_preadv2 compat_sys_preadv2
+381 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+382 nospu kexec_file_load sys_kexec_file_load
+383 nospu statx sys_statx
+384 nospu pkey_alloc sys_pkey_alloc
+385 nospu pkey_free sys_pkey_free
+386 nospu pkey_mprotect sys_pkey_mprotect
+387 nospu rseq sys_rseq
+388 nospu io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/arch/powerpc/kernel/syscalls/syscallhdr.sh b/arch/powerpc/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..c0a9a32937f1
--- /dev/null
+++ b/arch/powerpc/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_POWERPC_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+ printf "\n"
+) > "$out"
diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..fd620490a542
--- /dev/null
+++ b/arch/powerpc/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s,sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s,%s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry compat ; do
+ if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
+ emit $((nxt+offset)) $((nr+offset)) $compat
+ else
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ fi
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 755dc98a57ae..e8e93c2c7d03 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -457,7 +457,7 @@ static ssize_t __used \
#define HAS_PPC_PMC_CLASSIC 1
#define HAS_PPC_PMC_IBM 1
#define HAS_PPC_PMC_PA6T 1
-#elif defined(CONFIG_6xx)
+#elif defined(CONFIG_PPC_BOOK3S_32)
#define HAS_PPC_PMC_CLASSIC 1
#define HAS_PPC_PMC_IBM 1
#define HAS_PPC_PMC_G4 1
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 919a32746ede..23265a28740b 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -16,28 +16,6 @@
#include <asm/ppc_asm.h>
-#ifdef CONFIG_PPC64
-#define SYSCALL(func) .8byte DOTSYM(sys_##func),DOTSYM(sys_##func)
-#define COMPAT_SYS(func) .8byte DOTSYM(sys_##func),DOTSYM(compat_sys_##func)
-#define PPC_SYS(func) .8byte DOTSYM(ppc_##func),DOTSYM(ppc_##func)
-#define OLDSYS(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
-#define SYS32ONLY(func) .8byte DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
-#define PPC64ONLY(func) .8byte DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
-#define SYSX(f, f3264, f32) .8byte DOTSYM(f),DOTSYM(f3264)
-#else
-#define SYSCALL(func) .long sys_##func
-#define COMPAT_SYS(func) .long sys_##func
-#define PPC_SYS(func) .long ppc_##func
-#define OLDSYS(func) .long sys_##func
-#define SYS32ONLY(func) .long sys_##func
-#define PPC64ONLY(func) .long sys_ni_syscall
-#define SYSX(f, f3264, f32) .long f32
-#endif
-#define SYSCALL_SPU(func) SYSCALL(func)
-#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
-#define COMPAT_SPU_NEW(func) COMPAT_SYS(func)
-#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
-
.section .rodata,"a"
#ifdef CONFIG_PPC64
@@ -46,5 +24,21 @@
.globl sys_call_table
sys_call_table:
+#ifdef CONFIG_PPC64
+#define __SYSCALL(nr, entry, nargs) .8byte DOTSYM(entry)
+#include <asm/syscall_table_64.h>
+#undef __SYSCALL
+#else
+#define __SYSCALL(nr, entry, nargs) .long entry
+#include <asm/syscall_table_32.h>
+#undef __SYSCALL
+#endif
-#include <asm/systbl.h>
+#ifdef CONFIG_COMPAT
+.globl compat_sys_call_table
+compat_sys_call_table:
+#define compat_sys_sigsuspend sys_sigsuspend
+#define __SYSCALL(nr, entry, nargs) .8byte DOTSYM(entry)
+#include <asm/syscall_table_c32.h>
+#undef __SYSCALL
+#endif
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
deleted file mode 100644
index 4653258722ac..000000000000
--- a/arch/powerpc/kernel/systbl_chk.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * This file, when run through CPP produces a list of syscall numbers
- * in the order of systbl.h. That way we can check for gaps and syscalls
- * that are out of order.
- *
- * Unfortunately, we cannot check for the correct ordering of entries
- * using SYSX().
- *
- * Copyright © IBM Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <asm/unistd.h>
-
-#define SYSCALL(func) __NR_##func
-#define COMPAT_SYS(func) __NR_##func
-#define PPC_SYS(func) __NR_##func
-#ifdef CONFIG_PPC64
-#define OLDSYS(func) -1
-#define SYS32ONLY(func) -1
-#define PPC64ONLY(func) __NR_##func
-#else
-#define OLDSYS(func) __NR_old##func
-#define SYS32ONLY(func) __NR_##func
-#define PPC64ONLY(func) -1
-#endif
-#define SYSX(f, f3264, f32) -1
-
-#define SYSCALL_SPU(func) SYSCALL(func)
-#define COMPAT_SYS_SPU(func) COMPAT_SYS(func)
-#define COMPAT_SPU_NEW(func) COMPAT_SYS(_new##func)
-#define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32)
-
-/* Just insert a marker for ni_syscalls */
-#define __NR_ni_syscall -1
-
-/*
- * These are the known exceptions.
- * Hopefully, there will be no more.
- */
-#define __NR_llseek __NR__llseek
-#undef __NR_umount
-#define __NR_umount __NR_umount2
-#define __NR_old_getrlimit __NR_getrlimit
-#define __NR_newstat __NR_stat
-#define __NR_newlstat __NR_lstat
-#define __NR_newfstat __NR_fstat
-#define __NR_newuname __NR_uname
-#define __NR_sysctl __NR__sysctl
-#define __NR_olddebug_setcontext __NR_sys_debug_setcontext
-
-/* We call sys_ugetrlimit for syscall number __NR_getrlimit */
-#define getrlimit ugetrlimit
-
-START_TABLE
-#include <asm/systbl.h>
-END_TABLE NR_syscalls
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4bf051d3e21e..29746dc28df5 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -107,7 +107,7 @@ static int is_b_op(unsigned int op)
static unsigned long find_bl_target(unsigned long ip, unsigned int op)
{
- static int offset;
+ int offset;
offset = (op & 0x03fffffc);
/* make it signed */
@@ -950,7 +950,6 @@ int ftrace_disable_ftrace_graph_caller(void)
*/
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
{
- struct ftrace_graph_ent trace;
unsigned long return_hooker;
if (unlikely(ftrace_graph_is_dead()))
@@ -961,18 +960,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
return_hooker = ppc_function_entry(return_to_handler);
- trace.func = ip;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
- goto out;
-
- if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
- NULL) == -EBUSY)
- goto out;
-
- parent = return_hooker;
+ if (!function_graph_enter(parent, ip, 0, NULL))
+ parent = return_hooker;
out:
return parent;
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 9a86572db1ef..00af2c4febf4 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -1434,7 +1434,8 @@ void program_check_exception(struct pt_regs *regs)
goto bail;
} else {
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
- "at %lx (msr 0x%lx)\n", regs->nip, regs->msr);
+ "at %lx (msr 0x%lx) tm_scratch=%llx\n",
+ regs->nip, regs->msr, get_paca()->tm_scratch);
die("Unrecoverable exception", regs, SIGABRT);
}
}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 65b3bdb99f0b..7725a9714736 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -671,15 +671,18 @@ static void __init vdso_setup_syscall_map(void)
{
unsigned int i;
extern unsigned long *sys_call_table;
+#ifdef CONFIG_PPC64
+ extern unsigned long *compat_sys_call_table;
+#endif
extern unsigned long sys_ni_syscall;
for (i = 0; i < NR_syscalls; i++) {
#ifdef CONFIG_PPC64
- if (sys_call_table[i*2] != sys_ni_syscall)
+ if (sys_call_table[i] != sys_ni_syscall)
vdso_data->syscall_map_64[i >> 5] |=
0x80000000UL >> (i & 0x1f);
- if (sys_call_table[i*2+1] != sys_ni_syscall)
+ if (compat_sys_call_table[i] != sys_ni_syscall)
vdso_data->syscall_map_32[i >> 5] |=
0x80000000UL >> (i & 0x1f);
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 434581bcd5b4..ad1c77f71f54 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -170,6 +170,14 @@ SECTIONS
}
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+#ifdef CONFIG_PPC_FSL_BOOK3E
+ . = ALIGN(8);
+ __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
+ __start__btb_flush_fixup = .;
+ *(__btb_flush_fixup)
+ __stop__btb_flush_fixup = .;
+ }
+#endif
EXCEPTION_TABLE(0)
NOTES :kernel :notes
@@ -206,12 +214,6 @@ SECTIONS
.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
INIT_DATA
- __vtop_table_begin = .;
- KEEP(*(.vtop_fixup));
- __vtop_table_end = .;
- __ptov_table_begin = .;
- KEEP(*(.ptov_fixup));
- __ptov_table_end = .;
}
.init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
@@ -308,6 +310,10 @@ SECTIONS
#ifdef CONFIG_PPC32
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
+#ifdef CONFIG_UBSAN
+ *(.data..Lubsan_data*)
+ *(.data..Lubsan_type*)
+#endif
*(.data.rel*)
*(SDATA_MAIN)
*(.sdata2)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index fd9893bc7aa1..bd1a677dd9e4 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -830,9 +830,10 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new)
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
+ kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -850,9 +851,10 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
+ return 0;
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c615617e78ac..6f2d2fb4e098 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ spin_lock(&kvm->mmu_lock);
/*
* This assumes it is acceptable to lose reference and
* change bits across a reset.
*/
memset(memslot->arch.rmap, 0,
memslot->npages * sizeof(*memslot->arch.rmap));
+ spin_unlock(&kvm->mmu_lock);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
@@ -896,11 +899,12 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
gfn = memslot->base_gfn;
rmapp = memslot->arch.rmap;
+ if (kvm_is_radix(kvm)) {
+ kvmppc_radix_flush_memslot(kvm, memslot);
+ return;
+ }
+
for (n = memslot->npages; n; --n, ++gfn) {
- if (kvm_is_radix(kvm)) {
- kvm_unmap_radix(kvm, memslot, gfn);
- continue;
- }
/*
* Testing the present bit without locking is OK because
* the memslot has been marked invalid already, and hence
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index d68162ee159b..fb88167a402a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -29,6 +29,103 @@
*/
static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
+unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
+ gva_t eaddr, void *to, void *from,
+ unsigned long n)
+{
+ unsigned long quadrant, ret = n;
+ int old_pid, old_lpid;
+ bool is_load = !!to;
+
+ /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
+ if (kvmhv_on_pseries())
+ return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
+ __pa(to), __pa(from), n);
+
+ quadrant = 1;
+ if (!pid)
+ quadrant = 2;
+ if (is_load)
+ from = (void *) (eaddr | (quadrant << 62));
+ else
+ to = (void *) (eaddr | (quadrant << 62));
+
+ preempt_disable();
+
+ /* switch the lpid first to avoid running host with unallocated pid */
+ old_lpid = mfspr(SPRN_LPID);
+ if (old_lpid != lpid)
+ mtspr(SPRN_LPID, lpid);
+ if (quadrant == 1) {
+ old_pid = mfspr(SPRN_PID);
+ if (old_pid != pid)
+ mtspr(SPRN_PID, pid);
+ }
+ isync();
+
+ pagefault_disable();
+ if (is_load)
+ ret = raw_copy_from_user(to, from, n);
+ else
+ ret = raw_copy_to_user(to, from, n);
+ pagefault_enable();
+
+ /* switch the pid first to avoid running host with unallocated pid */
+ if (quadrant == 1 && pid != old_pid)
+ mtspr(SPRN_PID, old_pid);
+ if (lpid != old_lpid)
+ mtspr(SPRN_LPID, old_lpid);
+ isync();
+
+ preempt_enable();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
+
+static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
+ void *to, void *from, unsigned long n)
+{
+ int lpid = vcpu->kvm->arch.lpid;
+ int pid = vcpu->arch.pid;
+
+ /* This would cause a data segment intr so don't allow the access */
+ if (eaddr & (0x3FFUL << 52))
+ return -EINVAL;
+
+ /* Should we be using the nested lpid */
+ if (vcpu->arch.nested)
+ lpid = vcpu->arch.nested->shadow_lpid;
+
+ /* If accessing quadrant 3 then pid is expected to be 0 */
+ if (((eaddr >> 62) & 0x3) == 0x3)
+ pid = 0;
+
+ eaddr &= ~(0xFFFUL << 52);
+
+ return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
+}
+
+long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
+ unsigned long n)
+{
+ long ret;
+
+ ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
+ if (ret > 0)
+ memset(to + (n - ret), 0, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
+
+long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
+ unsigned long n)
+{
+ return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
+}
+EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
+
int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, u64 root,
u64 *pte_ret_p)
@@ -197,8 +294,8 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
-static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid)
+void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
+ unsigned int pshift, unsigned int lpid)
{
unsigned long psize = PAGE_SIZE;
int psi;
@@ -284,7 +381,8 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
/* Called with kvm->mmu_lock held */
void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
- unsigned int shift, struct kvm_memory_slot *memslot,
+ unsigned int shift,
+ const struct kvm_memory_slot *memslot,
unsigned int lpid)
{
@@ -683,6 +781,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
pte_t pte, *ptep;
unsigned int shift, level;
int ret;
+ bool large_enable;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
@@ -732,12 +831,15 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
pte = *ptep;
local_irq_enable();
+ /* If we're logging dirty pages, always map single pages */
+ large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
+
/* Get pte level from shift/size */
- if (shift == PUD_SHIFT &&
+ if (large_enable && shift == PUD_SHIFT &&
(gpa & (PUD_SIZE - PAGE_SIZE)) ==
(hva & (PUD_SIZE - PAGE_SIZE))) {
level = 2;
- } else if (shift == PMD_SHIFT &&
+ } else if (large_enable && shift == PMD_SHIFT &&
(gpa & (PMD_SIZE - PAGE_SIZE)) ==
(hva & (PMD_SIZE - PAGE_SIZE))) {
level = 1;
@@ -857,7 +959,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return ret;
}
-/* Called with kvm->lock held */
+/* Called with kvm->mmu_lock held */
int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
@@ -872,7 +974,7 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
return 0;
}
-/* Called with kvm->lock held */
+/* Called with kvm->mmu_lock held */
int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
@@ -880,18 +982,24 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
int ref = 0;
+ unsigned long old, *rmapp;
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
- kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
- gpa, shift);
+ old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
+ gpa, shift);
/* XXX need to flush tlb here? */
+ /* Also clear bit in ptes in shadow pgtable for nested guests */
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
+ old & PTE_RPN_MASK,
+ 1UL << shift);
ref = 1;
}
return ref;
}
-/* Called with kvm->lock held */
+/* Called with kvm->mmu_lock held */
int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn)
{
@@ -915,15 +1023,23 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
pte_t *ptep;
unsigned int shift;
int ret = 0;
+ unsigned long old, *rmapp;
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
if (shift)
ret = 1 << (shift - PAGE_SHIFT);
- kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
- gpa, shift);
+ spin_lock(&kvm->mmu_lock);
+ old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
+ gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
+ /* Also clear bit in ptes in shadow pgtable for nested guests */
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
+ old & PTE_RPN_MASK,
+ 1UL << shift);
+ spin_unlock(&kvm->mmu_lock);
}
return ret;
}
@@ -953,6 +1069,26 @@ long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
return 0;
}
+void kvmppc_radix_flush_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ unsigned long n;
+ pte_t *ptep;
+ unsigned long gpa;
+ unsigned int shift;
+
+ gpa = memslot->base_gfn << PAGE_SHIFT;
+ spin_lock(&kvm->mmu_lock);
+ for (n = memslot->npages; n; --n) {
+ ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ if (ptep && pte_present(*ptep))
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
+ kvm->arch.lpid);
+ gpa += PAGE_SIZE;
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
int psize, int *indexp)
{
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 62a8d03ba7e9..532ab79734c7 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -397,12 +397,13 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
return H_SUCCESS;
}
-static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
+static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg(mm, tbl, entry, &hpa, &dir);
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -433,7 +434,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
unsigned long hpa = 0;
long ret;
- if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
+ if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir)))
return H_TOO_HARD;
if (dir == DMA_NONE)
@@ -441,7 +442,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
if (ret != H_SUCCESS)
- iommu_tce_xchg(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
return ret;
}
@@ -487,7 +488,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (mm_iommu_mapped_inc(mem))
return H_TOO_HARD;
- ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
+ ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
return H_TOO_HARD;
@@ -566,7 +567,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
entry, ua, dir);
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
goto unlock_exit;
}
}
@@ -655,7 +656,8 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
+ entry);
goto unlock_exit;
}
}
@@ -704,7 +706,7 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret;
WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
}
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d65b961661fb..5a066fc299e1 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -983,7 +983,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) {
kvmppc_set_gpr(vcpu, 3, 0);
+ vcpu->arch.hcall_needed = 0;
return -EINTR;
+ } else if (ret == H_TOO_HARD) {
+ kvmppc_set_gpr(vcpu, 3, 0);
+ vcpu->arch.hcall_needed = 0;
+ return RESUME_HOST;
}
break;
case H_TLB_INVALIDATE:
@@ -991,7 +996,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (nesting_enabled(vcpu->kvm))
ret = kvmhv_do_nested_tlbie(vcpu);
break;
-
+ case H_COPY_TOFROM_GUEST:
+ ret = H_FUNCTION;
+ if (nesting_enabled(vcpu->kvm))
+ ret = kvmhv_copy_tofrom_guest_nested(vcpu);
+ break;
default:
return RESUME_HOST;
}
@@ -1335,7 +1344,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
+static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
@@ -1393,7 +1402,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(vcpu);
+ r = kvmhv_nested_page_fault(run, vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
@@ -1403,7 +1412,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(vcpu);
+ r = kvmhv_nested_page_fault(run, vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
@@ -4058,7 +4067,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
if (!nested)
r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
else
- r = kvmppc_handle_nested_exit(vcpu);
+ r = kvmppc_handle_nested_exit(kvm_run, vcpu);
}
vcpu->arch.ret = r;
@@ -4370,7 +4379,8 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new)
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
@@ -4382,6 +4392,23 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
*/
if (npages)
atomic64_inc(&kvm->arch.mmio_update);
+
+ /*
+ * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels
+ * have already called kvm_arch_flush_shadow_memslot() to
+ * flush shadow mappings. For KVM_MR_CREATE we have no
+ * previous mappings. So the only case to handle is
+ * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit
+ * has been changed.
+ * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES
+ * to get rid of any THP PTEs in the partition-scoped page tables
+ * so we can track dirtiness at the page level; we flush when
+ * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to
+ * using THP PTEs.
+ */
+ if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) &&
+ ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES))
+ kvmppc_radix_flush_memslot(kvm, old);
}
/*
@@ -4531,12 +4558,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm);
+ kvmppc_rmap_reset(kvm);
+ kvm->arch.process_table = 0;
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ spin_lock(&kvm->mmu_lock);
+ kvm->arch.radix = 0;
+ spin_unlock(&kvm->mmu_lock);
kvmppc_free_radix(kvm);
kvmppc_update_lpcr(kvm, LPCR_VPM1,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
- kvmppc_rmap_reset(kvm);
- kvm->arch.radix = 0;
- kvm->arch.process_table = 0;
return 0;
}
@@ -4548,12 +4578,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
err = kvmppc_init_vm_radix(kvm);
if (err)
return err;
-
+ kvmppc_rmap_reset(kvm);
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ spin_lock(&kvm->mmu_lock);
+ kvm->arch.radix = 1;
+ spin_unlock(&kvm->mmu_lock);
kvmppc_free_hpt(&kvm->arch.hpt);
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
- kvmppc_rmap_reset(kvm);
- kvm->arch.radix = 1;
return 0;
}
@@ -5213,6 +5245,44 @@ static int kvmhv_enable_nested(struct kvm *kvm)
return 0;
}
+static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
+ int size)
+{
+ int rc = -EINVAL;
+
+ if (kvmhv_vcpu_is_radix(vcpu)) {
+ rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size);
+
+ if (rc > 0)
+ rc = -EINVAL;
+ }
+
+ /* For now quadrants are the only way to access nested guest memory */
+ if (rc && vcpu->arch.nested)
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
+ int size)
+{
+ int rc = -EINVAL;
+
+ if (kvmhv_vcpu_is_radix(vcpu)) {
+ rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size);
+
+ if (rc > 0)
+ rc = -EINVAL;
+ }
+
+ /* For now quadrants are the only way to access nested guest memory */
+ if (rc && vcpu->arch.nested)
+ rc = -EAGAIN;
+
+ return rc;
+}
+
static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -5253,6 +5323,8 @@ static struct kvmppc_ops kvm_ops_hv = {
.get_rmmu_info = kvmhv_get_rmmu_info,
.set_smt_mode = kvmhv_set_smt_mode,
.enable_nested = kvmhv_enable_nested,
+ .load_from_eaddr = kvmhv_load_from_eaddr,
+ .store_to_eaddr = kvmhv_store_to_eaddr,
};
static int kvm_init_subcore_bitmap(void)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 401d2ecbebc5..735e0ac6f5b2 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -195,6 +195,26 @@ void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
vcpu->arch.ppr = hr->ppr;
}
+static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
+{
+ /* No need to reflect the page fault to L1, we've handled it */
+ vcpu->arch.trap = 0;
+
+ /*
+ * Since the L2 gprs have already been written back into L1 memory when
+ * we complete the mmio, store the L1 memory location of the L2 gpr
+ * being loaded into by the mmio so that the loaded value can be
+ * written there in kvmppc_complete_mmio_load()
+ */
+ if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
+ && (vcpu->mmio_is_write == 0)) {
+ vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
+ offsetof(struct pt_regs,
+ gpr[vcpu->arch.io_gpr]);
+ vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
+ }
+}
+
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
{
long int err, r;
@@ -316,6 +336,11 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (r == -EINTR)
return H_INTERRUPT;
+ if (vcpu->mmio_needed) {
+ kvmhv_nested_mmio_needed(vcpu, regs_ptr);
+ return H_TOO_HARD;
+ }
+
return vcpu->arch.trap;
}
@@ -437,6 +462,81 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
}
/*
+ * Handle the H_COPY_TOFROM_GUEST hcall.
+ * r4 = L1 lpid of nested guest
+ * r5 = pid
+ * r6 = eaddr to access
+ * r7 = to buffer (L1 gpa)
+ * r8 = from buffer (L1 gpa)
+ * r9 = n bytes to copy
+ */
+long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
+{
+ struct kvm_nested_guest *gp;
+ int l1_lpid = kvmppc_get_gpr(vcpu, 4);
+ int pid = kvmppc_get_gpr(vcpu, 5);
+ gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
+ gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
+ gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
+ void *buf;
+ unsigned long n = kvmppc_get_gpr(vcpu, 9);
+ bool is_load = !!gp_to;
+ long rc;
+
+ if (gp_to && gp_from) /* One must be NULL to determine the direction */
+ return H_PARAMETER;
+
+ if (eaddr & (0xFFFUL << 52))
+ return H_PARAMETER;
+
+ buf = kzalloc(n, GFP_KERNEL);
+ if (!buf)
+ return H_NO_MEM;
+
+ gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
+ if (!gp) {
+ rc = H_PARAMETER;
+ goto out_free;
+ }
+
+ mutex_lock(&gp->tlb_lock);
+
+ if (is_load) {
+ /* Load from the nested guest into our buffer */
+ rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
+ eaddr, buf, NULL, n);
+ if (rc)
+ goto not_found;
+
+ /* Write what was loaded into our buffer back to the L1 guest */
+ rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
+ if (rc)
+ goto not_found;
+ } else {
+ /* Load the data to be stored from the L1 guest into our buf */
+ rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
+ if (rc)
+ goto not_found;
+
+ /* Store from our buffer into the nested guest */
+ rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
+ eaddr, NULL, buf, n);
+ if (rc)
+ goto not_found;
+ }
+
+out_unlock:
+ mutex_unlock(&gp->tlb_lock);
+ kvmhv_put_nested(gp);
+out_free:
+ kfree(buf);
+ return rc;
+not_found:
+ rc = H_NOT_FOUND;
+ goto out_unlock;
+}
+
+/*
* Reload the partition table entry for a guest.
* Caller must hold gp->tlb_lock.
*/
@@ -480,6 +580,7 @@ struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
if (shadow_lpid < 0)
goto out_free2;
gp->shadow_lpid = shadow_lpid;
+ gp->radix = 1;
memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
@@ -687,6 +788,57 @@ void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
*n_rmap = NULL;
}
+static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
+ unsigned long clr, unsigned long set,
+ unsigned long hpa, unsigned long mask)
+{
+ struct kvm_nested_guest *gp;
+ unsigned long gpa;
+ unsigned int shift, lpid;
+ pte_t *ptep;
+
+ gpa = n_rmap & RMAP_NESTED_GPA_MASK;
+ lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
+ gp = kvmhv_find_nested(kvm, lpid);
+ if (!gp)
+ return;
+
+ /* Find the pte */
+ ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ /*
+ * If the pte is present and the pfn is still the same, update the pte.
+ * If the pfn has changed then this is a stale rmap entry, the nested
+ * gpa actually points somewhere else now, and there is nothing to do.
+ * XXX A future optimisation would be to remove the rmap entry here.
+ */
+ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
+ __radix_pte_update(ptep, clr, set);
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
+ }
+}
+
+/*
+ * For a given list of rmap entries, update the rc bits in all ptes in shadow
+ * page tables for nested guests which are referenced by the rmap list.
+ */
+void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long clr, unsigned long set,
+ unsigned long hpa, unsigned long nbytes)
+{
+ struct llist_node *entry = ((struct llist_head *) rmapp)->first;
+ struct rmap_nested *cursor;
+ unsigned long rmap, mask;
+
+ if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
+ return;
+
+ mask = PTE_RPN_MASK & ~(nbytes - 1);
+ hpa &= mask;
+
+ for_each_nest_rmap_safe(cursor, entry, &rmap)
+ kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
+}
+
static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
unsigned long hpa, unsigned long mask)
{
@@ -723,7 +875,7 @@ static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
/* called with kvm->mmu_lock held */
void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
+ const struct kvm_memory_slot *memslot,
unsigned long gpa, unsigned long hpa,
unsigned long nbytes)
{
@@ -1049,7 +1201,7 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
bool writing = !!(dsisr & DSISR_ISSTORE);
u64 pgflags;
- bool ret;
+ long ret;
/* Are the rc bits set in the L1 partition scoped pte? */
pgflags = _PAGE_ACCESSED;
@@ -1062,16 +1214,22 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
gpte.raddr, kvm->arch.lpid);
- spin_unlock(&kvm->mmu_lock);
- if (!ret)
- return -EINVAL;
+ if (!ret) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
gp->shadow_lpid);
if (!ret)
- return -EINVAL;
- return 0;
+ ret = -EINVAL;
+ else
+ ret = 0;
+
+out_unlock:
+ spin_unlock(&kvm->mmu_lock);
+ return ret;
}
static inline int kvmppc_radix_level_to_shift(int level)
@@ -1099,7 +1257,8 @@ static inline int kvmppc_radix_shift_to_level(int shift)
}
/* called with gp->tlb_lock held */
-static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
+static long int __kvmhv_nested_page_fault(struct kvm_run *run,
+ struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp)
{
struct kvm *kvm = vcpu->kvm;
@@ -1180,9 +1339,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST;
}
- /* passthrough of emulated MMIO case... */
- pr_err("emulated MMIO passthrough?\n");
- return -EINVAL;
+
+ /* passthrough of emulated MMIO case */
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
if (writing) {
@@ -1220,6 +1379,8 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
return ret;
shift = kvmppc_radix_level_to_shift(level);
}
+ /* Align gfn to the start of the page */
+ gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
@@ -1227,6 +1388,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
perm |= gpte.may_read ? 0UL : _PAGE_READ;
perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
+ /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
+ perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
+ perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
pte = __pte(pte_val(pte) & ~perm);
/* What size pte can we insert? */
@@ -1264,13 +1428,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
return RESUME_GUEST;
}
-long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
+long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *gp = vcpu->arch.nested;
long int ret;
mutex_lock(&gp->tlb_lock);
- ret = __kvmhv_nested_page_fault(vcpu, gp);
+ ret = __kvmhv_nested_page_fault(run, vcpu, gp);
mutex_unlock(&gp->tlb_lock);
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index a67cf1cdeda4..3b3791ed74a6 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -107,7 +107,7 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
/* Update the dirty bitmap of a memslot */
-void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
+void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
unsigned long gfn, unsigned long psize)
{
unsigned long npages;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 4efd65d9e828..811a3c2fb0e9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -587,6 +587,7 @@ void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
case PVR_POWER8:
case PVR_POWER8E:
case PVR_POWER8NVL:
+ case PVR_POWER9:
vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
BOOK3S_HFLAG_NEW_TLBIE;
break;
@@ -1913,7 +1914,8 @@ static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new)
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
return;
}
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index b0b2bfc2ff51..f27ee57ab46e 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -1015,17 +1015,7 @@ static int xics_debug_show(struct seq_file *m, void *private)
return 0;
}
-static int xics_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, xics_debug_show, inode->i_private);
-}
-
-static const struct file_operations xics_debug_fops = {
- .open = xics_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(xics_debug);
static void xics_debugfs_init(struct kvmppc_xics *xics)
{
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index ad4a370703d3..f78d002f0fe0 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1968,17 +1968,7 @@ static int xive_debug_show(struct seq_file *m, void *private)
return 0;
}
-static int xive_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, xive_debug_show, inode->i_private);
-}
-
-static const struct file_operations xive_debug_fops = {
- .open = xive_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(xive_debug);
static void xive_debugfs_init(struct kvmppc_xive *xive)
{
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index a9ca016da670..dbec4128bb51 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -1833,7 +1833,8 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
void kvmppc_core_commit_memory_region(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
- const struct kvm_memory_slot *new)
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
}
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 051af7d97327..4e5081e58409 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -75,6 +75,10 @@
PPC_LL r1, VCPU_HOST_STACK(r4)
PPC_LL r2, HOST_R2(r1)
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+
mfspr r10, SPRN_PID
lwz r8, VCPU_HOST_PID(r4)
PPC_LL r11, VCPU_SHARED(r4)
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index 94f04fcb373e..962ee90a0dfe 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -20,7 +20,7 @@
#define KVM_E500_H
#include <linux/kvm_host.h>
-#include <asm/mmu-book3e.h>
+#include <asm/nohash/mmu-book3e.h>
#include <asm/tlb.h>
#include <asm/cputhreads.h>
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 3f8189eb56ed..fde1de08b4d7 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
vcpu->arch.pwrmgtcr0 = spr_val;
break;
+ case SPRN_BUCSR:
+ /*
+ * If we are here, it means that we have already flushed the
+ * branch predictor, so just return to guest.
+ */
+ break;
+
/* extra exceptions */
#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 8f2985e46f6f..c3f312b2bcb3 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -757,10 +757,11 @@ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
return 0;
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
/* The page will get remapped properly on its next fault */
kvm_unmap_hva(kvm, hva);
+ return 0;
}
/*****************************************/
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2869a299c4ed..b90a7d154180 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -331,10 +331,17 @@ int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
{
ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
struct kvmppc_pte pte;
- int r;
+ int r = -EINVAL;
vcpu->stat.st++;
+ if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
+ r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
+ size);
+
+ if ((!r) || (r == -EAGAIN))
+ return r;
+
r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
XLATE_WRITE, &pte);
if (r < 0)
@@ -367,10 +374,17 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
{
ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
struct kvmppc_pte pte;
- int rc;
+ int rc = -EINVAL;
vcpu->stat.ld++;
+ if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
+ rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
+ size);
+
+ if ((!rc) || (rc == -EAGAIN))
+ return rc;
+
rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
XLATE_READ, &pte);
if (rc)
@@ -518,7 +532,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PPC_UNSET_IRQ:
case KVM_CAP_PPC_IRQ_LEVEL:
case KVM_CAP_ENABLE_CAP:
- case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_ONE_REG:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
@@ -543,8 +556,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64:
- /* fallthrough */
+ r = 1;
+ break;
case KVM_CAP_SPAPR_TCE_VFIO:
+ r = !!cpu_has_feature(CPU_FTR_HVMODE);
+ break;
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
@@ -696,7 +712,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- kvmppc_core_commit_memory_region(kvm, mem, old, new);
+ kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -1192,6 +1208,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
kvmppc_set_vmx_byte(vcpu, gpr);
break;
#endif
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ case KVM_MMIO_REG_NESTED_GPR:
+ if (kvmppc_need_byteswap(vcpu))
+ gpr = swab64(gpr);
+ kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
+ sizeof(gpr));
+ break;
+#endif
default:
BUG();
}
@@ -2084,8 +2108,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
}
-static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
- struct kvm_enable_cap *cap)
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
{
int r;
@@ -2273,15 +2297,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
break;
}
- case KVM_ENABLE_CAP:
- {
- struct kvm_enable_cap cap;
- r = -EFAULT;
- if (copy_from_user(&cap, argp, sizeof(cap)))
- goto out;
- r = kvm_vm_ioctl_enable_cap(kvm, &cap);
- break;
- }
#ifdef CONFIG_SPAPR_TCE_IOMMU
case KVM_CREATE_SPAPR_TCE_64: {
struct kvm_create_spapr_tce_64 create_tce_64;
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 89502cbccb1b..506413a2c25e 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -204,22 +204,6 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
return patch_instruction(addr, create_branch(addr, target, flags));
}
-int patch_branch_site(s32 *site, unsigned long target, int flags)
-{
- unsigned int *addr;
-
- addr = (unsigned int *)((unsigned long)site + *site);
- return patch_instruction(addr, create_branch(addr, target, flags));
-}
-
-int patch_instruction_site(s32 *site, unsigned int instr)
-{
- unsigned int *addr;
-
- addr = (unsigned int *)((unsigned long)site + *site);
- return patch_instruction(addr, instr);
-}
-
bool is_offset_in_branch_range(long offset)
{
/*
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index e613b02bb2f0..5169cc805464 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -118,7 +118,7 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
#ifdef CONFIG_PPC_BOOK3S_64
-void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
+static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
{
unsigned int instrs[3], *dest;
long *start, *end;
@@ -168,7 +168,7 @@ void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
-void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
+static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
{
unsigned int instrs[6], *dest;
long *start, *end;
@@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
+
+static void patch_btb_flush_section(long *curr)
+{
+ unsigned int *start, *end;
+
+ start = (void *)curr + *curr;
+ end = (void *)curr + *(curr + 1);
+ for (; start < end; start++) {
+ pr_devel("patching dest %lx\n", (unsigned long)start);
+ patch_instruction(start, PPC_INST_NOP);
+ }
+}
+
+void do_btb_flush_fixups(void)
+{
+ long *start, *end;
+
+ start = PTRRELOC(&__start__btb_flush_fixup);
+ end = PTRRELOC(&__stop__btb_flush_fixup);
+
+ for (; start < end; start += 2)
+ patch_btb_flush_section(start);
+}
#endif /* CONFIG_PPC_FSL_BOOK3E */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 12d92518e898..ea2b9af08a48 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -29,6 +29,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
+#include <asm/code-patching.h>
#include "mmu_decl.h"
@@ -43,22 +44,13 @@ unsigned long tlb_47x_boltmap[1024/8];
static void ppc44x_update_tlb_hwater(void)
{
- extern unsigned int tlb_44x_patch_hwater_D[];
- extern unsigned int tlb_44x_patch_hwater_I[];
-
/* The TLB miss handlers hard codes the watermark in a cmpli
* instruction to improve performances rather than loading it
* from the global variable. Thus, we patch the instructions
* in the 2 TLB miss handlers when updating the value
*/
- tlb_44x_patch_hwater_D[0] = (tlb_44x_patch_hwater_D[0] & 0xffff0000) |
- tlb_44x_hwater;
- flush_icache_range((unsigned long)&tlb_44x_patch_hwater_D[0],
- (unsigned long)&tlb_44x_patch_hwater_D[1]);
- tlb_44x_patch_hwater_I[0] = (tlb_44x_patch_hwater_I[0] & 0xffff0000) |
- tlb_44x_hwater;
- flush_icache_range((unsigned long)&tlb_44x_patch_hwater_I[0],
- (unsigned long)&tlb_44x_patch_hwater_I[1]);
+ modify_instruction_site(&patch__tlb_44x_hwater_D, 0xffff, tlb_44x_hwater);
+ modify_instruction_site(&patch__tlb_44x_hwater_I, 0xffff, tlb_44x_hwater);
}
/*
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 01b7f5107c3a..bfa503cff351 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -100,11 +100,7 @@ static void __init mmu_mapin_immr(void)
static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
{
- unsigned int instr = *(unsigned int *)patch_site_addr(site);
-
- instr &= 0xffff0000;
- instr |= (unsigned long)__va(mapped) >> 16;
- patch_instruction_site(site, instr);
+ modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
}
unsigned long __init mmu_mapin_ram(unsigned long top)
@@ -175,12 +171,12 @@ void set_context(unsigned long id, pgd_t *pgd)
*(ptr + 1) = pgd;
#endif
- /* Register M_TW will contain base address of level 1 table minus the
+ /* Register M_TWB will contain base address of level 1 table minus the
* lower part of the kernel PGDIR base address, so that all accesses to
* level 1 table are done relative to lower part of kernel PGDIR base
* address.
*/
- mtspr(SPRN_M_TW, __pa(pgd) - offset);
+ mtspr(SPRN_M_TWB, __pa(pgd) - offset);
/* Update context */
mtspr(SPRN_M_CASID, id - 1);
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index ca96e7be4d0e..f965fc33a8b7 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -15,10 +15,13 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
-obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o
+obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o \
+ $(hash64-y) mmu_context_book3s64.o \
+ pgtable-book3s64.o pgtable-frag.o
+obj-$(CONFIG_PPC32) += pgtable-frag.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
-obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
-obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o
+obj-$(CONFIG_PPC_BOOK3S_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
+obj-$(CONFIG_PPC_BOOK3S) += tlb_hash$(BITS).o
ifdef CONFIG_PPC_BOOK3S_64
obj-$(CONFIG_PPC_4K_PAGES) += hash64_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash64_64k.o
@@ -47,7 +50,7 @@ ifdef CONFIG_PPC_PTDUMP
obj-$(CONFIG_4xx) += dump_linuxpagetables-generic.o
obj-$(CONFIG_PPC_8xx) += dump_linuxpagetables-8xx.o
obj-$(CONFIG_PPC_BOOK3E_MMU) += dump_linuxpagetables-generic.o
-obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o
+obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o dump_bats.o dump_sr.o
obj-$(CONFIG_PPC_BOOK3S_64) += dump_linuxpagetables-book3s64.o
endif
obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index b6e7b5952ab5..e955539686a4 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -29,7 +29,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <linux/highmem.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/export.h>
#include <asm/tlbflush.h>
@@ -151,8 +151,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
* Allocate DMA-coherent memory space and return both the kernel remapped
* virtual and bus address for that space.
*/
-void *
-__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
struct ppc_vm_region *c;
@@ -223,7 +223,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
/*
* Set the "dma handle"
*/
- *handle = page_to_phys(page);
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
do {
SetPageReserved(page);
@@ -249,12 +249,12 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
no_page:
return NULL;
}
-EXPORT_SYMBOL(__dma_alloc_coherent);
/*
* free a page as defined by the above mapping.
*/
-void __dma_free_coherent(size_t size, void *vaddr)
+void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct ppc_vm_region *c;
unsigned long flags, addr;
@@ -309,7 +309,6 @@ void __dma_free_coherent(size_t size, void *vaddr)
__func__, vaddr);
dump_stack();
}
-EXPORT_SYMBOL(__dma_free_coherent);
/*
* make an area consistent.
@@ -401,7 +400,7 @@ EXPORT_SYMBOL(__dma_sync_page);
/*
* Return the PFN for a given cpu virtual address returned by
- * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ * __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent()
*/
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
{
diff --git a/arch/powerpc/mm/dump_bats.c b/arch/powerpc/mm/dump_bats.c
new file mode 100644
index 000000000000..a0d23e96e841
--- /dev/null
+++ b/arch/powerpc/mm/dump_bats.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018, Christophe Leroy CS S.I.
+ * <christophe.leroy@c-s.fr>
+ *
+ * This dumps the content of BATS
+ */
+
+#include <asm/debugfs.h>
+#include <asm/pgtable.h>
+#include <asm/cpu_has_feature.h>
+
+static char *pp_601(int k, int pp)
+{
+ if (pp == 0)
+ return k ? "NA" : "RWX";
+ if (pp == 1)
+ return k ? "ROX" : "RWX";
+ if (pp == 2)
+ return k ? "RWX" : "RWX";
+ return k ? "ROX" : "ROX";
+}
+
+static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper)
+{
+ u32 blpi = upper & 0xfffe0000;
+ u32 k = (upper >> 2) & 3;
+ u32 pp = upper & 3;
+ phys_addr_t pbn = PHYS_BAT_ADDR(lower);
+ u32 bsm = lower & 0x3ff;
+ u32 size = (bsm + 1) << 17;
+
+ seq_printf(m, "%d: ", idx);
+ if (!(lower & 0x40)) {
+ seq_puts(m, " -\n");
+ return;
+ }
+
+ seq_printf(m, "0x%08x-0x%08x ", blpi, blpi + size - 1);
+#ifdef CONFIG_PHYS_64BIT
+ seq_printf(m, "0x%016llx ", pbn);
+#else
+ seq_printf(m, "0x%08x ", pbn);
+#endif
+
+ seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp));
+
+ if (lower & _PAGE_WRITETHRU)
+ seq_puts(m, "write through ");
+ if (lower & _PAGE_NO_CACHE)
+ seq_puts(m, "no cache ");
+ if (lower & _PAGE_COHERENT)
+ seq_puts(m, "coherent ");
+ seq_puts(m, "\n");
+}
+
+#define BAT_SHOW_601(_m, _n, _l, _u) bat_show_601(_m, _n, mfspr(_l), mfspr(_u))
+
+static int bats_show_601(struct seq_file *m, void *v)
+{
+ seq_puts(m, "---[ Block Address Translation ]---\n");
+
+ BAT_SHOW_601(m, 0, SPRN_IBAT0L, SPRN_IBAT0U);
+ BAT_SHOW_601(m, 1, SPRN_IBAT1L, SPRN_IBAT1U);
+ BAT_SHOW_601(m, 2, SPRN_IBAT2L, SPRN_IBAT2U);
+ BAT_SHOW_601(m, 3, SPRN_IBAT3L, SPRN_IBAT3U);
+
+ return 0;
+}
+
+static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool is_d)
+{
+ u32 bepi = upper & 0xfffe0000;
+ u32 bl = (upper >> 2) & 0x7ff;
+ u32 k = upper & 3;
+ phys_addr_t brpn = PHYS_BAT_ADDR(lower);
+ u32 size = (bl + 1) << 17;
+
+ seq_printf(m, "%d: ", idx);
+ if (k == 0) {
+ seq_puts(m, " -\n");
+ return;
+ }
+
+ seq_printf(m, "0x%08x-0x%08x ", bepi, bepi + size - 1);
+#ifdef CONFIG_PHYS_64BIT
+ seq_printf(m, "0x%016llx ", brpn);
+#else
+ seq_printf(m, "0x%08x ", brpn);
+#endif
+
+ if (k == 1)
+ seq_puts(m, "User ");
+ else if (k == 2)
+ seq_puts(m, "Kernel ");
+ else
+ seq_puts(m, "Kernel/User ");
+
+ if (lower & BPP_RX)
+ seq_puts(m, is_d ? "RO " : "EXEC ");
+ else if (lower & BPP_RW)
+ seq_puts(m, is_d ? "RW " : "EXEC ");
+ else
+ seq_puts(m, is_d ? "NA " : "NX ");
+
+ if (lower & _PAGE_WRITETHRU)
+ seq_puts(m, "write through ");
+ if (lower & _PAGE_NO_CACHE)
+ seq_puts(m, "no cache ");
+ if (lower & _PAGE_COHERENT)
+ seq_puts(m, "coherent ");
+ if (lower & _PAGE_GUARDED)
+ seq_puts(m, "guarded ");
+ seq_puts(m, "\n");
+}
+
+#define BAT_SHOW_603(_m, _n, _l, _u, _d) bat_show_603(_m, _n, mfspr(_l), mfspr(_u), _d)
+
+static int bats_show_603(struct seq_file *m, void *v)
+{
+ seq_puts(m, "---[ Instruction Block Address Translation ]---\n");
+
+ BAT_SHOW_603(m, 0, SPRN_IBAT0L, SPRN_IBAT0U, false);
+ BAT_SHOW_603(m, 1, SPRN_IBAT1L, SPRN_IBAT1U, false);
+ BAT_SHOW_603(m, 2, SPRN_IBAT2L, SPRN_IBAT2U, false);
+ BAT_SHOW_603(m, 3, SPRN_IBAT3L, SPRN_IBAT3U, false);
+ if (mmu_has_feature(MMU_FTR_USE_HIGH_BATS)) {
+ BAT_SHOW_603(m, 4, SPRN_IBAT4L, SPRN_IBAT4U, false);
+ BAT_SHOW_603(m, 5, SPRN_IBAT5L, SPRN_IBAT5U, false);
+ BAT_SHOW_603(m, 6, SPRN_IBAT6L, SPRN_IBAT6U, false);
+ BAT_SHOW_603(m, 7, SPRN_IBAT7L, SPRN_IBAT7U, false);
+ }
+
+ seq_puts(m, "\n---[ Data Block Address Translation ]---\n");
+
+ BAT_SHOW_603(m, 0, SPRN_DBAT0L, SPRN_DBAT0U, true);
+ BAT_SHOW_603(m, 1, SPRN_DBAT1L, SPRN_DBAT1U, true);
+ BAT_SHOW_603(m, 2, SPRN_DBAT2L, SPRN_DBAT2U, true);
+ BAT_SHOW_603(m, 3, SPRN_DBAT3L, SPRN_DBAT3U, true);
+ if (mmu_has_feature(MMU_FTR_USE_HIGH_BATS)) {
+ BAT_SHOW_603(m, 4, SPRN_DBAT4L, SPRN_DBAT4U, true);
+ BAT_SHOW_603(m, 5, SPRN_DBAT5L, SPRN_DBAT5U, true);
+ BAT_SHOW_603(m, 6, SPRN_DBAT6L, SPRN_DBAT6U, true);
+ BAT_SHOW_603(m, 7, SPRN_DBAT7L, SPRN_DBAT7U, true);
+ }
+
+ return 0;
+}
+
+static int bats_open(struct inode *inode, struct file *file)
+{
+ if (cpu_has_feature(CPU_FTR_601))
+ return single_open(file, bats_show_601, NULL);
+
+ return single_open(file, bats_show_603, NULL);
+}
+
+static const struct file_operations bats_fops = {
+ .open = bats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init bats_init(void)
+{
+ struct dentry *debugfs_file;
+
+ debugfs_file = debugfs_create_file("block_address_translation", 0400,
+ powerpc_debugfs_root, NULL, &bats_fops);
+ return debugfs_file ? 0 : -ENOMEM;
+}
+device_initcall(bats_init);
diff --git a/arch/powerpc/mm/dump_linuxpagetables-generic.c b/arch/powerpc/mm/dump_linuxpagetables-generic.c
index 1e3829ec1348..3fe98a0974c6 100644
--- a/arch/powerpc/mm/dump_linuxpagetables-generic.c
+++ b/arch/powerpc/mm/dump_linuxpagetables-generic.c
@@ -21,13 +21,11 @@ static const struct flag_info flag_array[] = {
.set = "rw",
.clear = "r ",
}, {
-#ifndef CONFIG_PPC_BOOK3S_32
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
.set = " X ",
.clear = " ",
}, {
-#endif
.mask = _PAGE_PRESENT,
.val = _PAGE_PRESENT,
.set = "present",
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index 2b74f8adf4d0..6aa41669ac1a 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/io.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
diff --git a/arch/powerpc/mm/dump_sr.c b/arch/powerpc/mm/dump_sr.c
new file mode 100644
index 000000000000..501843664bb9
--- /dev/null
+++ b/arch/powerpc/mm/dump_sr.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018, Christophe Leroy CS S.I.
+ * <christophe.leroy@c-s.fr>
+ *
+ * This dumps the content of Segment Registers
+ */
+
+#include <asm/debugfs.h>
+
+static void seg_show(struct seq_file *m, int i)
+{
+ u32 val = mfsrin(i << 28);
+
+ seq_printf(m, "0x%01x0000000-0x%01xfffffff ", i, i);
+ seq_printf(m, "Kern key %d ", (val >> 30) & 1);
+ seq_printf(m, "User key %d ", (val >> 29) & 1);
+ if (val & 0x80000000) {
+ seq_printf(m, "Device 0x%03x", (val >> 20) & 0x1ff);
+ seq_printf(m, "-0x%05x", val & 0xfffff);
+ } else {
+ if (val & 0x10000000)
+ seq_puts(m, "No Exec ");
+ seq_printf(m, "VSID 0x%06x", val & 0xffffff);
+ }
+ seq_puts(m, "\n");
+}
+
+static int sr_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ seq_puts(m, "---[ User Segments ]---\n");
+ for (i = 0; i < TASK_SIZE >> 28; i++)
+ seg_show(m, i);
+
+ seq_puts(m, "\n---[ Kernel Segments ]---\n");
+ for (; i < 16; i++)
+ seg_show(m, i);
+
+ return 0;
+}
+
+static int sr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, sr_show, NULL);
+}
+
+static const struct file_operations sr_fops = {
+ .open = sr_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init sr_init(void)
+{
+ struct dentry *debugfs_file;
+
+ debugfs_file = debugfs_create_file("segment_registers", 0400,
+ powerpc_debugfs_root, NULL, &sr_fops);
+ return debugfs_file ? 0 : -ENOMEM;
+}
+device_initcall(sr_init);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 1697e903bbf2..a6dcfda3e11e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -226,7 +226,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
unsigned long address)
{
- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
+ /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
+ if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
+ DSISR_PROTFAULT))) {
printk_ratelimited(KERN_CRIT "kernel tried to execute"
" exec-protected page (%lx) -"
"exploit attempt? (uid: %d)\n",
@@ -341,10 +343,21 @@ static inline void cmo_account_page_fault(void)
static inline void cmo_account_page_fault(void) { }
#endif /* CONFIG_PPC_SMLPAR */
-#ifdef CONFIG_PPC_STD_MMU
-static void sanity_check_fault(bool is_write, unsigned long error_code)
+#ifdef CONFIG_PPC_BOOK3S
+static void sanity_check_fault(bool is_write, bool is_user,
+ unsigned long error_code, unsigned long address)
{
/*
+ * Userspace trying to access kernel address, we get PROTFAULT for that.
+ */
+ if (is_user && address >= TASK_SIZE) {
+ pr_crit_ratelimited("%s[%d]: User access of kernel address (%lx) - exploit attempt? (uid: %d)\n",
+ current->comm, current->pid, address,
+ from_kuid(&init_user_ns, current_uid()));
+ return;
+ }
+
+ /*
* For hash translation mode, we should never get a
* PROTFAULT. Any update to pte to reduce access will result in us
* removing the hash page table entry, thus resulting in a DSISR_NOHPTE
@@ -373,12 +386,15 @@ static void sanity_check_fault(bool is_write, unsigned long error_code)
* For radix, we can get prot fault for autonuma case, because radix
* page table will have them marked noaccess for user.
*/
- if (!radix_enabled() && !is_write)
- WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
+ if (radix_enabled() || is_write)
+ return;
+
+ WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
}
#else
-static void sanity_check_fault(bool is_write, unsigned long error_code) { }
-#endif /* CONFIG_PPC_STD_MMU */
+static void sanity_check_fault(bool is_write, bool is_user,
+ unsigned long error_code, unsigned long address) { }
+#endif /* CONFIG_PPC_BOOK3S */
/*
* Define the correct "is_write" bit in error_code based
@@ -435,7 +451,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
}
/* Additional sanity check(s) */
- sanity_check_fault(is_write, error_code);
+ sanity_check_fault(is_write, is_user, error_code, address);
/*
* The kernel should never take an execute fault nor should it
@@ -636,21 +652,23 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
switch (TRAP(regs)) {
case 0x300:
case 0x380:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "data at address 0x%08lx\n", regs->dar);
+ case 0xe00:
+ pr_alert("BUG: %s at 0x%08lx\n",
+ regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
+ "Unable to handle kernel data access", regs->dar);
break;
case 0x400:
case 0x480:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "instruction fetch\n");
+ pr_alert("BUG: Unable to handle kernel instruction fetch%s",
+ regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
break;
case 0x600:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "unaligned access at address 0x%08lx\n", regs->dar);
+ pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
+ regs->dar);
break;
default:
- printk(KERN_ALERT "Unable to handle kernel paging request for "
- "unknown fault\n");
+ pr_alert("BUG: Unable to handle unknown paging fault at 0x%08lx\n",
+ regs->dar);
break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index 26acf6c8c20c..1e2df3e9f9ea 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -28,6 +28,7 @@
#include <asm/asm-offsets.h>
#include <asm/export.h>
#include <asm/feature-fixups.h>
+#include <asm/code-patching-asm.h>
#ifdef CONFIG_SMP
.section .bss
@@ -337,11 +338,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
SET_V(r5) /* set V (valid) bit */
+ patch_site 0f, patch__hash_page_A0
+ patch_site 1f, patch__hash_page_A1
+ patch_site 2f, patch__hash_page_A2
/* Get the address of the primary PTE group in the hash table (r3) */
-_GLOBAL(hash_page_patch_A)
- addis r0,r7,Hash_base@h /* base address of hash table */
- rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
- rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+0: addis r0,r7,Hash_base@h /* base address of hash table */
+1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
+2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r0 /* make primary hash */
li r0,8 /* PTEs/group */
@@ -366,10 +369,10 @@ _GLOBAL(hash_page_patch_A)
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ found_slot
+ patch_site 0f, patch__hash_page_B
/* Search the secondary PTEG for a matching PTE */
ori r5,r5,PTE_H /* set H (secondary hash) bit */
-_GLOBAL(hash_page_patch_B)
- xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
+0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xori r4,r4,(-PTEG_SIZE & 0xffff)
addi r4,r4,-HPTE_SIZE
mtctr r0
@@ -393,10 +396,10 @@ _GLOBAL(hash_page_patch_B)
addi r6,r6,1
stw r6,primary_pteg_full@l(r4)
+ patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */
ori r5,r5,PTE_H /* set H (secondary hash) bit */
-_GLOBAL(hash_page_patch_C)
- xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
+0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
xori r4,r4,(-PTEG_SIZE & 0xffff)
addi r4,r4,-HPTE_SIZE
mtctr r0
@@ -577,11 +580,13 @@ _GLOBAL(flush_hash_pages)
stwcx. r8,0,r5 /* update the pte */
bne- 33b
+ patch_site 0f, patch__flush_hash_A0
+ patch_site 1f, patch__flush_hash_A1
+ patch_site 2f, patch__flush_hash_A2
/* Get the address of the primary PTE group in the hash table (r3) */
-_GLOBAL(flush_hash_patch_A)
- addis r8,r7,Hash_base@h /* base address of hash table */
- rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
- rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+0: addis r8,r7,Hash_base@h /* base address of hash table */
+1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
+2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r8,r0,r8 /* make primary hash */
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
@@ -593,11 +598,11 @@ _GLOBAL(flush_hash_patch_A)
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
beq+ 3f
+ patch_site 0f, patch__flush_hash_B
/* Search the secondary PTEG for a matching PTE */
ori r11,r11,PTE_H /* set H (secondary hash) bit */
li r0,8 /* PTEs/group */
-_GLOBAL(flush_hash_patch_B)
- xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
+0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
xori r12,r12,(-PTEG_SIZE & 0xffff)
addi r12,r12,-HPTE_SIZE
mtctr r0
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8cf035e68378..9e732bb2c84a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -42,6 +42,8 @@ EXPORT_SYMBOL(HPAGE_SHIFT);
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
+#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
+
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
/*
@@ -61,14 +63,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
int num_hugepd;
if (pshift >= pdshift) {
- cachep = hugepte_cache;
+ cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift);
+ } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ cachep = PGT_CACHE(PTE_INDEX_SIZE);
+ num_hugepd = 1;
} else {
cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1;
}
- new = kmem_cache_zalloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -264,7 +269,7 @@ static void hugepd_free_rcu_callback(struct rcu_head *head)
unsigned int i;
for (i = 0; i < batch->index; i++)
- kmem_cache_free(hugepte_cache, batch->ptes[i]);
+ kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
free_page((unsigned long)batch);
}
@@ -277,7 +282,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
if (atomic_read(&tlb->mm->mm_users) < 2 ||
mm_is_thread_local(tlb->mm)) {
- kmem_cache_free(hugepte_cache, hugepte);
+ kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
put_cpu_var(hugepd_freelist_cur);
return;
}
@@ -289,7 +294,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
(*batchp)->ptes[(*batchp)->index++] = hugepte;
if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
- call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
+ call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
*batchp = NULL;
}
put_cpu_var(hugepd_freelist_cur);
@@ -329,6 +334,9 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ pgtable_free_tlb(tlb, hugepte,
+ get_hugepd_cache_index(PTE_INDEX_SIZE));
else
pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(pdshift - shift));
@@ -652,7 +660,6 @@ static int __init hugepage_setup_sz(char *str)
}
__setup("hugepagesz=", hugepage_setup_sz);
-struct kmem_cache *hugepte_cache;
static int __init hugetlbpage_init(void)
{
int psize;
@@ -699,24 +706,13 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift > shift)
- pgtable_cache_add(pdshift - shift, NULL);
+ if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
+ pgtable_cache_add(PTE_INDEX_SIZE);
+ else if (pdshift > shift)
+ pgtable_cache_add(pdshift - shift);
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
- else if (!hugepte_cache) {
- /*
- * Create a kmem cache for hugeptes. The bottom bits in
- * the pte have size information encoded in them, so
- * align them to allow this
- */
- hugepte_cache = kmem_cache_create("hugepte-cache",
- sizeof(pte_t),
- HUGEPD_SHIFT_MASK + 1,
- 0, NULL);
- if (hugepte_cache == NULL)
- panic("%s: Unable to create kmem cache "
- "for hugeptes\n", __func__);
-
- }
+ else
+ pgtable_cache_add(PTE_T_ORDER);
#endif
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 2b656e67f2ea..1e6910eb70ed 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -25,22 +25,40 @@
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
-static void pgd_ctor(void *addr)
-{
- memset(addr, 0, PGD_TABLE_SIZE);
+#define CTOR(shift) static void ctor_##shift(void *addr) \
+{ \
+ memset(addr, 0, sizeof(void *) << (shift)); \
}
-static void pud_ctor(void *addr)
-{
- memset(addr, 0, PUD_TABLE_SIZE);
-}
+CTOR(0); CTOR(1); CTOR(2); CTOR(3); CTOR(4); CTOR(5); CTOR(6); CTOR(7);
+CTOR(8); CTOR(9); CTOR(10); CTOR(11); CTOR(12); CTOR(13); CTOR(14); CTOR(15);
-static void pmd_ctor(void *addr)
+static inline void (*ctor(int shift))(void *)
{
- memset(addr, 0, PMD_TABLE_SIZE);
+ BUILD_BUG_ON(MAX_PGTABLE_INDEX_SIZE != 15);
+
+ switch (shift) {
+ case 0: return ctor_0;
+ case 1: return ctor_1;
+ case 2: return ctor_2;
+ case 3: return ctor_3;
+ case 4: return ctor_4;
+ case 5: return ctor_5;
+ case 6: return ctor_6;
+ case 7: return ctor_7;
+ case 8: return ctor_8;
+ case 9: return ctor_9;
+ case 10: return ctor_10;
+ case 11: return ctor_11;
+ case 12: return ctor_12;
+ case 13: return ctor_13;
+ case 14: return ctor_14;
+ case 15: return ctor_15;
+ }
+ return NULL;
}
-struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
+struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE + 1];
EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
/*
@@ -50,7 +68,7 @@ EXPORT_SYMBOL_GPL(pgtable_cache); /* used by kvm_hv module */
* everything else. Caches created by this function are used for all
* the higher level pagetables, and for hugepage pagetables.
*/
-void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
+void pgtable_cache_add(unsigned int shift)
{
char *name;
unsigned long table_size = sizeof(void *) << shift;
@@ -71,19 +89,19 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
* moment, gcc doesn't seem to recognize is_power_of_2 as a
* constant expression, so so much for that. */
BUG_ON(!is_power_of_2(minalign));
- BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
+ BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
if (PGT_CACHE(shift))
return; /* Already have a cache of this size */
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
- new = kmem_cache_create(name, table_size, align, 0, ctor);
+ new = kmem_cache_create(name, table_size, align, 0, ctor(shift));
if (!new)
panic("Could not allocate pgtable cache for order %d", shift);
kfree(name);
- pgtable_cache[shift - 1] = new;
+ pgtable_cache[shift] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
@@ -91,15 +109,15 @@ EXPORT_SYMBOL_GPL(pgtable_cache_add); /* used by kvm_hv module */
void pgtable_cache_init(void)
{
- pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
+ pgtable_cache_add(PGD_INDEX_SIZE);
- if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
- pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
+ if (PMD_CACHE_INDEX)
+ pgtable_cache_add(PMD_CACHE_INDEX);
/*
* In all current configs, when the PUD index exists it's the
* same size as either the pgd or pmd index except with THP enabled
* on book3s 64
*/
- if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
- pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
+ if (PUD_CACHE_INDEX)
+ pgtable_cache_add(PUD_CACHE_INDEX);
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7a9886f98b0c..a5091c034747 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -188,15 +188,20 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
for (; start < end; start += page_size) {
- void *p;
+ void *p = NULL;
int rc;
if (vmemmap_populated(start, page_size))
continue;
+ /*
+ * Allocate from the altmap first if we have one. This may
+ * fail due to alignment issues when using 16MB hugepages, so
+ * fall back to system memory if the altmap allocation fail.
+ */
if (altmap)
p = altmap_alloc_block_buf(page_size, altmap);
- else
+ if (!p)
p = vmemmap_alloc_block_buf(page_size, node);
if (!p)
return -ENOMEM;
@@ -255,8 +260,15 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
{
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
unsigned long page_order = get_order(page_size);
+ unsigned long alt_start = ~0, alt_end = ~0;
+ unsigned long base_pfn;
start = _ALIGN_DOWN(start, page_size);
+ if (altmap) {
+ alt_start = altmap->base_pfn;
+ alt_end = altmap->base_pfn + altmap->reserve +
+ altmap->free + altmap->alloc + altmap->align;
+ }
pr_debug("vmemmap_free %lx...%lx\n", start, end);
@@ -280,8 +292,9 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
page = pfn_to_page(addr >> PAGE_SHIFT);
section_base = pfn_to_page(vmemmap_section_start(start));
nr_pages = 1 << page_order;
+ base_pfn = PHYS_PFN(addr);
- if (altmap) {
+ if (base_pfn >= alt_start && base_pfn < alt_end) {
vmem_altmap_free(altmap, nr_pages);
} else if (PageReserved(page)) {
/* allocated from bootmem */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0a64fffabee1..20394e52fe27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void)
}
#endif
-static bool zone_limits_final;
-
/*
- * The memory zones past TOP_ZONE are managed by generic mm code.
- * These should be set to zero since that's what every other
- * architecture does.
+ * Zones usage:
+ *
+ * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
+ * everything else. GFP_DMA32 page allocations automatically fall back to
+ * ZONE_DMA.
+ *
+ * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
+ * inform the generic DMA mapping code. 32-bit only devices (if not handled
+ * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
+ * otherwise served by ZONE_DMA.
*/
-static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- [0 ... TOP_ZONE ] = ~0UL,
- [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
-};
-
-/*
- * Restrict the specified zone and all more restrictive zones
- * to be below the specified pfn. May not be called after
- * paging_init().
- */
-void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
-{
- int i;
-
- if (WARN_ON(zone_limits_final))
- return;
-
- for (i = zone; i >= 0; i--) {
- if (max_zone_pfns[i] > pfn_limit)
- max_zone_pfns[i] = pfn_limit;
- }
-}
+static unsigned long max_zone_pfns[MAX_NR_ZONES];
/*
* Find the least restrictive zone that is entirely below the
@@ -324,11 +308,14 @@ void __init paging_init(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
- limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
+ max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
#endif
- limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
- zone_limits_final = true;
+
free_area_init_nodes(max_zone_pfns);
mark_nonram_nosave();
@@ -503,7 +490,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
-#ifdef CONFIG_PPC_STD_MMU
+#ifdef CONFIG_PPC_BOOK3S
/*
* We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held
@@ -541,7 +528,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
}
hash_preload(vma->vm_mm, address, is_exec, trap);
-#endif /* CONFIG_PPC_STD_MMU */
+#endif /* CONFIG_PPC_BOOK3S */
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
&& defined(CONFIG_HUGETLB_PAGE)
if (is_vm_hugetlb_page(vma))
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index f84e14f23e50..bb52320b7369 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -15,6 +15,7 @@
#include <linux/sched/mm.h>
#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
#if defined(CONFIG_PPC32)
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -97,3 +98,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
switch_mmu_context(prev, next, tsk);
}
+#ifdef CONFIG_PPC32
+void arch_exit_mmap(struct mm_struct *mm)
+{
+ void *frag = pte_frag_get(&mm->context);
+
+ if (frag)
+ pte_frag_destroy(frag);
+}
+#endif
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index 510f103d7813..f720c5cc0b5e 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -164,21 +164,6 @@ static void destroy_contexts(mm_context_t *ctx)
}
}
-static void pte_frag_destroy(void *pte_frag)
-{
- int count;
- struct page *page;
-
- page = virt_to_page(pte_frag);
- /* drop all the pending references */
- count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
- /* We allow PTE_FRAG_NR fragments from a PTE page */
- if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
- pgtable_page_dtor(page);
- __free_page(page);
- }
-}
-
static void pmd_frag_destroy(void *pmd_frag)
{
int count;
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index 56c2234cc6ae..a712a650a8b6 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -36,6 +36,8 @@ struct mm_iommu_table_group_mem_t {
u64 ua; /* userspace address */
u64 entries; /* number of entries in hpas[] */
u64 *hpas; /* vmalloc'ed */
+#define MM_IOMMU_TABLE_INVALID_HPA ((uint64_t)-1)
+ u64 dev_hpa; /* Device memory base address */
};
static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
@@ -126,7 +128,8 @@ static int mm_iommu_move_page_from_cma(struct page *page)
return 0;
}
-long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
+static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
+ unsigned long entries, unsigned long dev_hpa,
struct mm_iommu_table_group_mem_t **pmem)
{
struct mm_iommu_table_group_mem_t *mem;
@@ -140,12 +143,6 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
next) {
- if ((mem->ua == ua) && (mem->entries == entries)) {
- ++mem->used;
- *pmem = mem;
- goto unlock_exit;
- }
-
/* Overlap? */
if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
(ua < (mem->ua +
@@ -156,11 +153,13 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
- ret = mm_iommu_adjust_locked_vm(mm, entries, true);
- if (ret)
- goto unlock_exit;
+ if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
+ ret = mm_iommu_adjust_locked_vm(mm, entries, true);
+ if (ret)
+ goto unlock_exit;
- locked_entries = entries;
+ locked_entries = entries;
+ }
mem = kzalloc(sizeof(*mem), GFP_KERNEL);
if (!mem) {
@@ -168,6 +167,13 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto unlock_exit;
}
+ if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
+ mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
+ mem->dev_hpa = dev_hpa;
+ goto good_exit;
+ }
+ mem->dev_hpa = MM_IOMMU_TABLE_INVALID_HPA;
+
/*
* For a starting point for a maximum page size calculation
* we use @ua and @entries natural alignment to allow IOMMU pages
@@ -236,6 +242,7 @@ populate:
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
+good_exit:
atomic64_set(&mem->mapped, 1);
mem->used = 1;
mem->ua = ua;
@@ -252,13 +259,31 @@ unlock_exit:
return ret;
}
-EXPORT_SYMBOL_GPL(mm_iommu_get);
+
+long mm_iommu_new(struct mm_struct *mm, unsigned long ua, unsigned long entries,
+ struct mm_iommu_table_group_mem_t **pmem)
+{
+ return mm_iommu_do_alloc(mm, ua, entries, MM_IOMMU_TABLE_INVALID_HPA,
+ pmem);
+}
+EXPORT_SYMBOL_GPL(mm_iommu_new);
+
+long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
+ unsigned long entries, unsigned long dev_hpa,
+ struct mm_iommu_table_group_mem_t **pmem)
+{
+ return mm_iommu_do_alloc(mm, ua, entries, dev_hpa, pmem);
+}
+EXPORT_SYMBOL_GPL(mm_iommu_newdev);
static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
{
long i;
struct page *page = NULL;
+ if (!mem->hpas)
+ return;
+
for (i = 0; i < mem->entries; ++i) {
if (!mem->hpas[i])
continue;
@@ -300,6 +325,7 @@ static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
{
long ret = 0;
+ unsigned long entries, dev_hpa;
mutex_lock(&mem_list_mutex);
@@ -321,9 +347,12 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
}
/* @mapped became 0 so now mappings are disabled, release the region */
+ entries = mem->entries;
+ dev_hpa = mem->dev_hpa;
mm_iommu_release(mem);
- mm_iommu_adjust_locked_vm(mm, mem->entries, false);
+ if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
+ mm_iommu_adjust_locked_vm(mm, entries, false);
unlock_exit:
mutex_unlock(&mem_list_mutex);
@@ -368,27 +397,32 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
return ret;
}
-struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
+struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
+ mutex_lock(&mem_list_mutex);
+
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if ((mem->ua == ua) && (mem->entries == entries)) {
ret = mem;
+ ++mem->used;
break;
}
}
+ mutex_unlock(&mem_list_mutex);
+
return ret;
}
-EXPORT_SYMBOL_GPL(mm_iommu_find);
+EXPORT_SYMBOL_GPL(mm_iommu_get);
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
- u64 *va = &mem->hpas[entry];
+ u64 *va;
if (entry >= mem->entries)
return -EFAULT;
@@ -396,6 +430,12 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (pageshift > mem->pageshift)
return -EFAULT;
+ if (!mem->hpas) {
+ *hpa = mem->dev_hpa + (ua - mem->ua);
+ return 0;
+ }
+
+ va = &mem->hpas[entry];
*hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
@@ -406,7 +446,6 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
- void *va = &mem->hpas[entry];
unsigned long *pa;
if (entry >= mem->entries)
@@ -415,7 +454,12 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (pageshift > mem->pageshift)
return -EFAULT;
- pa = (void *) vmalloc_to_phys(va);
+ if (!mem->hpas) {
+ *hpa = mem->dev_hpa + (ua - mem->ua);
+ return 0;
+ }
+
+ pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
if (!pa)
return -EFAULT;
@@ -435,6 +479,9 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
if (!mem)
return;
+ if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
+ return;
+
entry = (ua - mem->ua) >> PAGE_SHIFT;
va = &mem->hpas[entry];
@@ -445,6 +492,33 @@ extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
*pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
}
+bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
+ unsigned int pageshift, unsigned long *size)
+{
+ struct mm_iommu_table_group_mem_t *mem;
+ unsigned long end;
+
+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
+ if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
+ continue;
+
+ end = mem->dev_hpa + (mem->entries << PAGE_SHIFT);
+ if ((mem->dev_hpa <= hpa) && (hpa < end)) {
+ /*
+ * Since the IOMMU page size might be bigger than
+ * PAGE_SIZE, the amount of preregistered memory
+ * starting from @hpa might be smaller than 1<<pageshift
+ * and the caller needs to distinguish this situation.
+ */
+ *size = min(1UL << pageshift, end - hpa);
+ return true;
+ }
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(mm_iommu_is_devmem);
+
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{
if (atomic64_inc_not_zero(&mem->mapped))
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 2faca46ad720..22d71a58167f 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -372,7 +372,6 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
pr_hard("initing context for mm @%p\n", mm);
-#ifdef CONFIG_PPC_MM_SLICES
/*
* We have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we properly
@@ -382,9 +381,9 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
*/
if (mm->context.id == 0)
slice_init_new_context_exec(mm);
-#endif
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
+ pte_frag_set(&mm->context, NULL);
return 0;
}
@@ -487,4 +486,3 @@ void __init mmu_context_init(void)
next_context = FIRST_CONTEXT;
nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
}
-
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 8574fbbc45e0..c4a717da65eb 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -155,7 +155,7 @@ struct tlbcam {
};
#endif
-#if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
/* 6xx have BATS */
/* FSL_BOOKE have TLBCAM */
/* 8xx have LTLB */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index ce28ae5ca080..87f0dd004295 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1475,7 +1475,7 @@ static int dt_update_callback(struct notifier_block *nb,
switch (action) {
case OF_RECONFIG_UPDATE_PROPERTY:
- if (!of_prop_cmp(update->dn->type, "cpu") &&
+ if (of_node_is_type(update->dn, "cpu") &&
!of_prop_cmp(update->prop->name, "ibm,associativity")) {
u32 core_id;
of_property_read_u32(update->dn, "reg", &core_id);
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 9f93c9f985c5..f3c31f5e1026 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -244,6 +244,9 @@ static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
{
void *pmd_frag, *ret;
+ if (PMD_FRAG_NR == 1)
+ return NULL;
+
spin_lock(&mm->page_table_lock);
ret = mm->context.pmd_frag;
if (ret) {
@@ -322,91 +325,6 @@ void pmd_fragment_free(unsigned long *pmd)
}
}
-static pte_t *get_pte_from_cache(struct mm_struct *mm)
-{
- void *pte_frag, *ret;
-
- spin_lock(&mm->page_table_lock);
- ret = mm->context.pte_frag;
- if (ret) {
- pte_frag = ret + PTE_FRAG_SIZE;
- /*
- * If we have taken up all the fragments mark PTE page NULL
- */
- if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
- pte_frag = NULL;
- mm->context.pte_frag = pte_frag;
- }
- spin_unlock(&mm->page_table_lock);
- return (pte_t *)ret;
-}
-
-static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
-{
- void *ret = NULL;
- struct page *page;
-
- if (!kernel) {
- page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
- if (!page)
- return NULL;
- if (!pgtable_page_ctor(page)) {
- __free_page(page);
- return NULL;
- }
- } else {
- page = alloc_page(PGALLOC_GFP);
- if (!page)
- return NULL;
- }
-
- atomic_set(&page->pt_frag_refcount, 1);
-
- ret = page_address(page);
- /*
- * if we support only one fragment just return the
- * allocated page.
- */
- if (PTE_FRAG_NR == 1)
- return ret;
- spin_lock(&mm->page_table_lock);
- /*
- * If we find pgtable_page set, we return
- * the allocated page with single fragement
- * count.
- */
- if (likely(!mm->context.pte_frag)) {
- atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
- mm->context.pte_frag = ret + PTE_FRAG_SIZE;
- }
- spin_unlock(&mm->page_table_lock);
-
- return (pte_t *)ret;
-}
-
-pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
-{
- pte_t *pte;
-
- pte = get_pte_from_cache(mm);
- if (pte)
- return pte;
-
- return __alloc_for_ptecache(mm, kernel);
-}
-
-void pte_fragment_free(unsigned long *table, int kernel)
-{
- struct page *page = virt_to_page(table);
-
- BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
- if (atomic_dec_and_test(&page->pt_frag_refcount)) {
- if (!kernel)
- pgtable_page_dtor(page);
- __free_page(page);
- }
-}
-
static inline void pgtable_free(void *table, int index)
{
switch (index) {
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
new file mode 100644
index 000000000000..af23a587f019
--- /dev/null
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Handling Page Tables through page fragments
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/hugetlb.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+void pte_frag_destroy(void *pte_frag)
+{
+ int count;
+ struct page *page;
+
+ page = virt_to_page(pte_frag);
+ /* drop all the pending references */
+ count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
+ /* We allow PTE_FRAG_NR fragments from a PTE page */
+ if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
+ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+}
+
+static pte_t *get_pte_from_cache(struct mm_struct *mm)
+{
+ void *pte_frag, *ret;
+
+ if (PTE_FRAG_NR == 1)
+ return NULL;
+
+ spin_lock(&mm->page_table_lock);
+ ret = pte_frag_get(&mm->context);
+ if (ret) {
+ pte_frag = ret + PTE_FRAG_SIZE;
+ /*
+ * If we have taken up all the fragments mark PTE page NULL
+ */
+ if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
+ pte_frag = NULL;
+ pte_frag_set(&mm->context, pte_frag);
+ }
+ spin_unlock(&mm->page_table_lock);
+ return (pte_t *)ret;
+}
+
+static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
+{
+ void *ret = NULL;
+ struct page *page;
+
+ if (!kernel) {
+ page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
+ if (!page)
+ return NULL;
+ if (!pgtable_page_ctor(page)) {
+ __free_page(page);
+ return NULL;
+ }
+ } else {
+ page = alloc_page(PGALLOC_GFP);
+ if (!page)
+ return NULL;
+ }
+
+ atomic_set(&page->pt_frag_refcount, 1);
+
+ ret = page_address(page);
+ /*
+ * if we support only one fragment just return the
+ * allocated page.
+ */
+ if (PTE_FRAG_NR == 1)
+ return ret;
+ spin_lock(&mm->page_table_lock);
+ /*
+ * If we find pgtable_page set, we return
+ * the allocated page with single fragement
+ * count.
+ */
+ if (likely(!pte_frag_get(&mm->context))) {
+ atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
+ pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
+ }
+ spin_unlock(&mm->page_table_lock);
+
+ return (pte_t *)ret;
+}
+
+pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
+{
+ pte_t *pte;
+
+ pte = get_pte_from_cache(mm);
+ if (pte)
+ return pte;
+
+ return __alloc_for_ptecache(mm, kernel);
+}
+
+void pte_fragment_free(unsigned long *table, int kernel)
+{
+ struct page *page = virt_to_page(table);
+
+ BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
+ if (atomic_dec_and_test(&page->pt_frag_refcount)) {
+ if (!kernel)
+ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 010e1c616cb2..d3d61d29b4f1 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -74,7 +74,7 @@ static struct page *maybe_pte_to_page(pte_t pte)
* support falls into the same category.
*/
-static pte_t set_pte_filter(pte_t pte)
+static pte_t set_pte_filter_hash(pte_t pte)
{
if (radix_enabled())
return pte;
@@ -93,14 +93,12 @@ static pte_t set_pte_filter(pte_t pte)
return pte;
}
-static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
- int dirty)
-{
- return pte;
-}
-
#else /* CONFIG_PPC_BOOK3S */
+static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
+
+#endif /* CONFIG_PPC_BOOK3S */
+
/* Embedded type MMU with HW exec support. This is a bit more complicated
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
@@ -109,6 +107,9 @@ static pte_t set_pte_filter(pte_t pte)
{
struct page *pg;
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return set_pte_filter_hash(pte);
+
/* No exec permission in the first place, move on */
if (!pte_exec(pte) || !pte_looks_normal(pte))
return pte;
@@ -138,6 +139,9 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
{
struct page *pg;
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return pte;
+
/* So here, we only care about exec faults, as we use them
* to recover lost _PAGE_EXEC and perform I$/D$ coherency
* if necessary. Also if _PAGE_EXEC is already set, same deal,
@@ -172,8 +176,6 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
return pte_mkexec(pte);
}
-#endif /* CONFIG_PPC_BOOK3S */
-
/*
* set_pte stores a linux PTE into the linux page table.
*/
@@ -221,9 +223,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
}
#ifdef CONFIG_HUGETLB_PAGE
-extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep,
- pte_t pte, int dirty)
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
{
#ifdef HUGETLB_NEED_PRELOAD
/*
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index bda3c6f1bd32..d67215248d82 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -45,32 +45,15 @@ extern char etext[], _stext[], _sinittext[], _einittext[];
__ref pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
- pte_t *pte;
+ if (!slab_is_available())
+ return memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
- if (slab_is_available()) {
- pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
- } else {
- pte = __va(memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE));
- if (pte)
- clear_page(pte);
- }
- return pte;
+ return (pte_t *)pte_fragment_alloc(mm, address, 1);
}
pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
- struct page *ptepage;
-
- gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT;
-
- ptepage = alloc_pages(flags, 0);
- if (!ptepage)
- return NULL;
- if (!pgtable_page_ctor(ptepage)) {
- __free_page(ptepage);
- return NULL;
- }
- return ptepage;
+ return (pgtable_t)pte_fragment_alloc(mm, address, 0);
}
void __iomem *
@@ -160,7 +143,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
- if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
+ if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
page_is_ram(__phys_to_pfn(p))) {
printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
(unsigned long long)p, __builtin_return_address(0));
@@ -260,7 +243,7 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
ktext = ((char *)v >= _stext && (char *)v < etext) ||
((char *)v >= _sinittext && (char *)v < _einittext);
map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
-#ifdef CONFIG_PPC_STD_MMU_32
+#ifdef CONFIG_PPC_BOOK3S_32
if (ktext)
hash_preload(&init_mm, v, false, 0x300);
#endif
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index b271b283c785..587807763737 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -6,20 +6,21 @@
*/
#include <asm/mman.h>
+#include <asm/mmu_context.h>
#include <asm/setup.h>
#include <linux/pkeys.h>
#include <linux/of_device.h>
DEFINE_STATIC_KEY_TRUE(pkey_disabled);
-bool pkey_execute_disable_supported;
int pkeys_total; /* Total pkeys as per device tree */
-bool pkeys_devtree_defined; /* pkey property exported by device tree */
u32 initial_allocation_mask; /* Bits set for the initially allocated keys */
u32 reserved_allocation_mask; /* Bits set for reserved keys */
-u64 pkey_amr_mask; /* Bits in AMR not to be touched */
-u64 pkey_iamr_mask; /* Bits in AMR not to be touched */
-u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */
-int execute_only_key = 2;
+static bool pkey_execute_disable_supported;
+static bool pkeys_devtree_defined; /* property exported by device tree */
+static u64 pkey_amr_mask; /* Bits in AMR not to be touched */
+static u64 pkey_iamr_mask; /* Bits in AMR not to be touched */
+static u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */
+static int execute_only_key = 2;
#define AMR_BITS_PER_PKEY 2
#define AMR_RD_BIT 0x1UL
@@ -57,7 +58,7 @@ static inline bool pkey_mmu_enabled(void)
return cpu_has_feature(CPU_FTR_PKEY);
}
-int pkey_initialize(void)
+static int pkey_initialize(void)
{
int os_reserved, i;
@@ -414,3 +415,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
return pkey_access_permitted(vma_pkey(vma), write, execute);
}
+
+void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
+{
+ if (static_branch_likely(&pkey_disabled))
+ return;
+
+ /* Duplicate the oldmm pkey state in mm: */
+ mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
+ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
+}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index f6f575bae3bc..3f4193201ee7 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -31,6 +31,7 @@
#include <asm/prom.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
+#include <asm/code-patching.h>
#include "mmu_decl.h"
@@ -52,7 +53,7 @@ struct batrange { /* stores address ranges mapped by BATs */
phys_addr_t v_block_mapped(unsigned long va)
{
int b;
- for (b = 0; b < 4; ++b)
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
return bat_addrs[b].phys + (va - bat_addrs[b].start);
return 0;
@@ -64,7 +65,7 @@ phys_addr_t v_block_mapped(unsigned long va)
unsigned long p_block_mapped(phys_addr_t pa)
{
int b;
- for (b = 0; b < 4; ++b)
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
if (pa >= bat_addrs[b].phys
&& pa < (bat_addrs[b].limit-bat_addrs[b].start)
+bat_addrs[b].phys)
@@ -182,22 +183,8 @@ void __init MMU_init_hw(void)
unsigned int hmask, mb, mb2;
unsigned int n_hpteg, lg_n_hpteg;
- extern unsigned int hash_page_patch_A[];
- extern unsigned int hash_page_patch_B[], hash_page_patch_C[];
- extern unsigned int hash_page[];
- extern unsigned int flush_hash_patch_A[], flush_hash_patch_B[];
-
- if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
- /*
- * Put a blr (procedure return) instruction at the
- * start of hash_page, since we can still get DSI
- * exceptions on a 603.
- */
- hash_page[0] = 0x4e800020;
- flush_icache_range((unsigned long) &hash_page[0],
- (unsigned long) &hash_page[1]);
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
- }
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
@@ -244,31 +231,19 @@ void __init MMU_init_hw(void)
if (lg_n_hpteg > 16)
mb2 = 16 - LG_HPTEG_SIZE;
- hash_page_patch_A[0] = (hash_page_patch_A[0] & ~0xffff)
- | ((unsigned int)(Hash) >> 16);
- hash_page_patch_A[1] = (hash_page_patch_A[1] & ~0x7c0) | (mb << 6);
- hash_page_patch_A[2] = (hash_page_patch_A[2] & ~0x7c0) | (mb2 << 6);
- hash_page_patch_B[0] = (hash_page_patch_B[0] & ~0xffff) | hmask;
- hash_page_patch_C[0] = (hash_page_patch_C[0] & ~0xffff) | hmask;
-
- /*
- * Ensure that the locations we've patched have been written
- * out from the data cache and invalidated in the instruction
- * cache, on those machines with split caches.
- */
- flush_icache_range((unsigned long) &hash_page_patch_A[0],
- (unsigned long) &hash_page_patch_C[1]);
+ modify_instruction_site(&patch__hash_page_A0, 0xffff, (unsigned int)Hash >> 16);
+ modify_instruction_site(&patch__hash_page_A1, 0x7c0, mb << 6);
+ modify_instruction_site(&patch__hash_page_A2, 0x7c0, mb2 << 6);
+ modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
+ modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
/*
* Patch up the instructions in hashtable.S:flush_hash_page
*/
- flush_hash_patch_A[0] = (flush_hash_patch_A[0] & ~0xffff)
- | ((unsigned int)(Hash) >> 16);
- flush_hash_patch_A[1] = (flush_hash_patch_A[1] & ~0x7c0) | (mb << 6);
- flush_hash_patch_A[2] = (flush_hash_patch_A[2] & ~0x7c0) | (mb2 << 6);
- flush_hash_patch_B[0] = (flush_hash_patch_B[0] & ~0xffff) | hmask;
- flush_icache_range((unsigned long) &flush_hash_patch_A[0],
- (unsigned long) &flush_hash_patch_B[1]);
+ modify_instruction_site(&patch__flush_hash_A0, 0xffff, (unsigned int)Hash >> 16);
+ modify_instruction_site(&patch__flush_hash_A1, 0x7c0, mb << 6);
+ modify_instruction_site(&patch__flush_hash_A2, 0x7c0, mb2 << 6);
+ modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
}
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 7fd20c52a8ec..9ed90064f542 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -70,6 +70,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
std r15,EX_TLB_R15(r12)
std r10,EX_TLB_CR(r12)
#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ mfspr r11, SPRN_SRR1
+ andi. r10,r11,MSR_PR
+ beq 1f
+ BTB_FLUSH(r10)
+1:
+END_BTB_FLUSH_SECTION
std r7,EX_TLB_R7(r12)
#endif
TLB_MISS_PROLOG_STATS
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 47fc6660845d..c2d5192ed64f 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -152,6 +152,10 @@
___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRAW(d, a, s) EMIT(PPC_INST_SRAW | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRAWI(d, a, i) EMIT(PPC_INST_SRAWI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i))
#define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \
___PPC_RS(a) | ___PPC_RB(s))
#define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index d5bfe24bb3b5..91d223cf512b 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -379,18 +379,17 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
hash));
break;
case BPF_ANC | SKF_AD_VLAN_TAG:
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
vlan_tci));
- if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
- PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
- } else {
- PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
- PPC_SRWI(r_A, r_A, 12);
- }
+ break;
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+ PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
+ PPC_ANDI(r_A, r_A, 1);
break;
case BPF_ANC | SKF_AD_QUEUE:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 50b129785aee..7ce57657d3b8 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR();
}
-static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
+static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
+ u64 func)
+{
+#ifdef PPC64_ELF_ABI_v1
+ /* func points to the function descriptor */
+ PPC_LI64(b2p[TMP_REG_2], func);
+ /* Load actual entry point from function descriptor */
+ PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
+ /* ... and move it to LR */
+ PPC_MTLR(b2p[TMP_REG_1]);
+ /*
+ * Load TOC from function descriptor at offset 8.
+ * We can clobber r2 since we get called through a
+ * function pointer (so caller will save/restore r2)
+ * and since we don't use a TOC ourself.
+ */
+ PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
+#else
+ /* We can clobber r12 */
+ PPC_FUNC_ADDR(12, func);
+ PPC_MTLR(12);
+#endif
+ PPC_BLRL();
+}
+
+static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
+ u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
{
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
- int i;
+ int i, ret;
/* Start of epilogue code - will only be valid 2nd pass onwards */
u32 exit_addr = addrs[flen];
@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u32 src_reg = b2p[insn[i].src_reg];
s16 off = insn[i].off;
s32 imm = insn[i].imm;
+ bool func_addr_fixed;
+ u64 func_addr;
u64 imm64;
- u8 *func;
u32 true_cond;
u32 tmp_idx;
@@ -502,9 +529,15 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
if (imm != 0)
PPC_SRDI(dst_reg, dst_reg, imm);
break;
+ case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
+ PPC_SRAW(dst_reg, dst_reg, src_reg);
+ goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
PPC_SRAD(dst_reg, dst_reg, src_reg);
break;
+ case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
+ PPC_SRAWI(dst_reg, dst_reg, imm);
+ goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
if (imm != 0)
PPC_SRADI(dst_reg, dst_reg, imm);
@@ -711,23 +744,15 @@ emit_clear:
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
- /* bpf function call */
- if (insn[i].src_reg == BPF_PSEUDO_CALL)
- if (!extra_pass)
- func = NULL;
- else if (fp->aux->func && off < fp->aux->func_cnt)
- /* use the subprog id from the off
- * field to lookup the callee address
- */
- func = (u8 *) fp->aux->func[off]->bpf_func;
- else
- return -EINVAL;
- /* kernel helper call */
- else
- func = (u8 *) __bpf_call_base + imm;
-
- bpf_jit_emit_func_call(image, ctx, (u64)func);
+ ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
+ &func_addr, &func_addr_fixed);
+ if (ret < 0)
+ return ret;
+ if (func_addr_fixed)
+ bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ else
+ bpf_jit_emit_func_call_rel(image, ctx, func_addr);
/* move return value from r3 to BPF_REG_0 */
PPC_MR(b2p[BPF_REG_0], 3);
break;
@@ -872,6 +897,55 @@ cond_branch:
return 0;
}
+/* Fix the branch target addresses for subprog calls */
+static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx, u32 *addrs)
+{
+ const struct bpf_insn *insn = fp->insnsi;
+ bool func_addr_fixed;
+ u64 func_addr;
+ u32 tmp_idx;
+ int i, ret;
+
+ for (i = 0; i < fp->len; i++) {
+ /*
+ * During the extra pass, only the branch target addresses for
+ * the subprog calls need to be fixed. All other instructions
+ * can left untouched.
+ *
+ * The JITed image length does not change because we already
+ * ensure that the JITed instruction sequence for these calls
+ * are of fixed length by padding them with NOPs.
+ */
+ if (insn[i].code == (BPF_JMP | BPF_CALL) &&
+ insn[i].src_reg == BPF_PSEUDO_CALL) {
+ ret = bpf_jit_get_func_addr(fp, &insn[i], true,
+ &func_addr,
+ &func_addr_fixed);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Save ctx->idx as this would currently point to the
+ * end of the JITed image and set it to the offset of
+ * the instruction sequence corresponding to the
+ * subprog call temporarily.
+ */
+ tmp_idx = ctx->idx;
+ ctx->idx = addrs[i] / 4;
+ bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
+ /*
+ * Restore ctx->idx here. This is safe as the length
+ * of the JITed sequence remains unchanged.
+ */
+ ctx->idx = tmp_idx;
+ }
+ }
+
+ return 0;
+}
+
struct powerpc64_jit_data {
struct bpf_binary_header *header;
u32 *addrs;
@@ -970,6 +1044,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+ if (extra_pass) {
+ /*
+ * Do not touch the prologue and epilogue as they will remain
+ * unchanged. Only fix the branch target address for subprog
+ * calls in the body.
+ *
+ * This does not change the offsets and lengths of the subprog
+ * call instruction sequences and hence, the size of the JITed
+ * image as well.
+ */
+ bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
+
+ /* There is no need to perform the usual passes. */
+ goto skip_codegen_passes;
+ }
+
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
@@ -983,6 +1073,7 @@ skip_init_ctx:
proglen - (cgctx.idx * 4), cgctx.seen);
}
+skip_codegen_passes:
if (bpf_jit_enable > 1)
/*
* Note that we output the base address of the code_base
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 8d26d7416481..bb2d94c8cbe6 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -16,4 +16,4 @@ oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
cell/spu_task_sync.o
oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_power4.o op_model_pa6t.o
oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
-oprofile-$(CONFIG_6xx) += op_model_7450.o
+oprofile-$(CONFIG_PPC_BOOK3S_32) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index bf094c5a4bd9..a11132865504 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -212,7 +212,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
model = &op_model_pa6t;
break;
#endif
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
case PPC_OPROFILE_G4:
model = &op_model_7450;
break;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 81f8a0c838ae..b0723002a396 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -10,6 +10,7 @@
*/
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/sched/clock.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
@@ -130,6 +131,14 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
#endif /* CONFIG_PPC32 */
+bool is_sier_available(void)
+{
+ if (ppmu->flags & PPMU_HAS_SIER)
+ return true;
+
+ return false;
+}
+
static bool regs_use_siar(struct pt_regs *regs)
{
/*
@@ -864,6 +873,8 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
int i, j;
unsigned long addf = ppmu->add_fields;
unsigned long tadd = ppmu->test_adder;
+ unsigned long grp_mask = ppmu->group_constraint_mask;
+ unsigned long grp_val = ppmu->group_constraint_val;
if (n_ev > ppmu->n_counter)
return -1;
@@ -884,15 +895,23 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
for (i = 0; i < n_ev; ++i) {
nv = (value | cpuhw->avalues[i][0]) +
(value & cpuhw->avalues[i][0] & addf);
- if ((((nv + tadd) ^ value) & mask) != 0 ||
- (((nv + tadd) ^ cpuhw->avalues[i][0]) &
- cpuhw->amasks[i][0]) != 0)
+
+ if (((((nv + tadd) ^ value) & mask) & (~grp_mask)) != 0)
+ break;
+
+ if (((((nv + tadd) ^ cpuhw->avalues[i][0]) & cpuhw->amasks[i][0])
+ & (~grp_mask)) != 0)
break;
+
value = nv;
mask |= cpuhw->amasks[i][0];
}
- if (i == n_ev)
- return 0; /* all OK */
+ if (i == n_ev) {
+ if ((value & mask & grp_mask) != (mask & grp_val))
+ return -1;
+ else
+ return 0; /* all OK */
+ }
/* doesn't work, gather alternatives... */
if (!ppmu->get_alternatives)
@@ -2148,7 +2167,7 @@ static bool pmc_overflow(unsigned long val)
/*
* Performance monitor interrupt stuff
*/
-static void perf_event_interrupt(struct pt_regs *regs)
+static void __perf_event_interrupt(struct pt_regs *regs)
{
int i, j;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
@@ -2232,6 +2251,14 @@ static void perf_event_interrupt(struct pt_regs *regs)
irq_exit();
}
+static void perf_event_interrupt(struct pt_regs *regs)
+{
+ u64 start_clock = sched_clock();
+
+ __perf_event_interrupt(regs);
+ perf_sample_event_took(sched_clock() - start_clock);
+}
+
static int power_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 6954636b16d1..f292a3f284f1 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -28,13 +28,13 @@ static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
-struct imc_pmu_ref *nest_imc_refc;
+static struct imc_pmu_ref *nest_imc_refc;
static int nest_pmus;
/* Core IMC data structures and variables */
static cpumask_t core_imc_cpumask;
-struct imc_pmu_ref *core_imc_refc;
+static struct imc_pmu_ref *core_imc_refc;
static struct imc_pmu *core_imc_pmu;
/* Thread IMC data structures and variables */
@@ -43,7 +43,7 @@ static DEFINE_PER_CPU(u64 *, thread_imc_mem);
static struct imc_pmu *thread_imc_pmu;
static int thread_imc_mem_size;
-struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
+static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct imc_pmu, pmu);
}
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 177de814286f..a6c24d866b2f 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -148,6 +148,14 @@ static bool is_thresh_cmp_valid(u64 event)
return true;
}
+static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
+{
+ unsigned int cache;
+
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
+ return cache;
+}
+
static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
{
u64 ret = PERF_MEM_NA;
@@ -226,8 +234,13 @@ void isa207_get_mem_weight(u64 *weight)
u64 mmcra = mfspr(SPRN_MMCRA);
u64 exp = MMCRA_THR_CTR_EXP(mmcra);
u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
+ u64 sier = mfspr(SPRN_SIER);
+ u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- *weight = mantissa << (2 * exp);
+ if (val == 0 || val == 7)
+ *weight = 0;
+ else
+ *weight = mantissa << (2 * exp);
}
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
@@ -274,19 +287,27 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
if (unit >= 6 && unit <= 9) {
- /*
- * L2/L3 events contain a cache selector field, which is
- * supposed to be programmed into MMCRC. However MMCRC is only
- * HV writable, and there is no API for guest kernels to modify
- * it. The solution is for the hypervisor to initialise the
- * field to zeroes, and for us to only ever allow events that
- * have a cache selector of zero. The bank selector (bit 3) is
- * irrelevant, as long as the rest of the value is 0.
- */
- if (cache & 0x7)
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ mask |= CNST_CACHE_GROUP_MASK;
+ value |= CNST_CACHE_GROUP_VAL(event & 0xff);
+
+ mask |= CNST_CACHE_PMC4_MASK;
+ if (pmc == 4)
+ value |= CNST_CACHE_PMC4_VAL;
+ } else if (cache & 0x7) {
+ /*
+ * L2/L3 events contain a cache selector field, which is
+ * supposed to be programmed into MMCRC. However MMCRC is only
+ * HV writable, and there is no API for guest kernels to modify
+ * it. The solution is for the hypervisor to initialise the
+ * field to zeroes, and for us to only ever allow events that
+ * have a cache selector of zero. The bank selector (bit 3) is
+ * irrelevant, as long as the rest of the value is 0.
+ */
return -1;
+ }
- } else if (event & EVENT_IS_L1) {
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
mask |= CNST_L1_QUAL_MASK;
value |= CNST_L1_QUAL_VAL(cache);
}
@@ -389,11 +410,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
/* In continuous sampling mode, update SDAR on TLB miss */
mmcra_sdar_mode(event[i], &mmcra);
- if (event[i] & EVENT_IS_L1) {
- cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
- mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
- cache >>= 1;
- mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ cache = dc_ic_rld_quad_l1_sel(event[i]);
+ mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
+ } else {
+ if (event[i] & EVENT_IS_L1) {
+ cache = dc_ic_rld_quad_l1_sel(event[i]);
+ mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
+ }
}
if (is_event_marked(event[i])) {
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 0028f4b9490d..91350f42a662 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -134,6 +134,11 @@
#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
+#define CNST_CACHE_GROUP_VAL(v) (((v) & 0xffull) << 55)
+#define CNST_CACHE_GROUP_MASK CNST_CACHE_GROUP_VAL(0xff)
+#define CNST_CACHE_PMC4_VAL (1ull << 54)
+#define CNST_CACHE_PMC4_MASK CNST_CACHE_PMC4_VAL
+
/*
* For NC we are counting up to 4 events. This requires three bits, and we need
* the fifth event to overflow and set the 4th bit. To achieve that we bias the
@@ -163,8 +168,8 @@
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
#define MMCR1_FAB_SHIFT 36
-#define MMCR1_DC_QUAL_SHIFT 47
-#define MMCR1_IC_QUAL_SHIFT 46
+#define MMCR1_DC_IC_QUAL_MASK 0x3
+#define MMCR1_DC_IC_QUAL_SHIFT 46
/* MMCR1 Combine bits macro for power9 */
#define p9_MMCR1_COMBINE_SHIFT(pmc) (38 - ((pmc - 1) * 2))
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index 09ceea6175ba..5c36b3a8d47a 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -69,6 +69,7 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap),
PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
+ PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
};
u64 perf_reg_value(struct pt_regs *regs, int idx)
@@ -76,6 +77,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
return 0;
+ if (idx == PERF_REG_POWERPC_SIER &&
+ (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
+ IS_ENABLED(CONFIG_PPC32) ||
+ !is_sier_available()))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index e012b1030a5b..0ff9c43733e9 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -63,16 +63,8 @@
* MMCRA[9:11] = thresh_cmp[0:2]
* MMCRA[12:18] = thresh_cmp[3:9]
*
- * if unit == 6 or unit == 7
- * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
- * else if unit == 8 or unit == 9:
- * if cache_sel[0] == 0: # L3 bank
- * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
- * else if cache_sel[0] == 1:
- * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
- * else if cache_sel[1]: # L1 event
- * MMCR1[16] = cache_sel[2]
- * MMCR1[17] = cache_sel[3]
+ * MMCR1[16] = cache_sel[2]
+ * MMCR1[17] = cache_sel[3]
*
* if mark:
* MMCRA[63] = 1 (SAMPLE_ENABLE)
@@ -179,8 +171,6 @@ CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
-CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
-CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
@@ -205,8 +195,6 @@ static struct attribute *power9_events_attr[] = {
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3),
CACHE_EVENT_PTR(PM_L3_PREF_ALL),
- CACHE_EVENT_PTR(PM_L2_ST_MISS),
- CACHE_EVENT_PTR(PM_L2_ST),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_BR_CMPL),
CACHE_EVENT_PTR(PM_DTLB_MISS),
@@ -354,8 +342,8 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
- [ C(RESULT_ACCESS) ] = PM_L2_ST,
- [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
@@ -427,6 +415,8 @@ static struct power_pmu power9_pmu = {
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
+ .group_constraint_mask = CNST_CACHE_PMC4_MASK,
+ .group_constraint_val = CNST_CACHE_PMC4_VAL,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power9_config_bhrb,
.bhrb_filter_map = power9_bhrb_filter_map,
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index a886c2c22097..f467247fd1c4 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -47,7 +47,7 @@ static int __init warp_probe(void)
if (!of_machine_is_compatible("pika,warp"))
return 0;
- /* For __dma_alloc_coherent */
+ /* For __dma_nommu_alloc_coherent */
ISA_DMA_THRESHOLD = ~0L;
return 1;
@@ -179,9 +179,9 @@ static int pika_setup_leds(void)
}
for_each_child_of_node(np, child)
- if (strcmp(child->name, "green") == 0)
+ if (of_node_name_eq(child, "green"))
green_led = of_get_gpio(child, 0);
- else if (strcmp(child->name, "red") == 0)
+ else if (of_node_name_eq(child, "red"))
red_led = of_get_gpio(child, 0);
of_node_put(np);
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index f5bbd4563342..f2610a02844a 100644
--- a/arch/powerpc/platforms/4xx/ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
@@ -223,8 +223,6 @@ static void __init ocm_init_node(int count, struct device_node *node)
INIT_LIST_HEAD(&ocm->c.list);
ocm->ready = 1;
-
- return;
}
static int ocm_debugfs_show(struct seq_file *m, void *v)
@@ -242,9 +240,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
seq_printf(m, "PhysAddr : 0x%llx\n", ocm->phys);
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
- seq_printf(m, "MemTotal(C) : %d Bytes\n", ocm->c.memtotal);
-
- seq_printf(m, "\n");
+ seq_printf(m, "MemTotal(C) : %d Bytes\n\n", ocm->c.memtotal);
seq_printf(m, "NC.PhysAddr : 0x%llx\n", ocm->nc.phys);
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
@@ -256,9 +252,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
blk->size, blk->owner);
}
- seq_printf(m, "\n");
-
- seq_printf(m, "C.PhysAddr : 0x%llx\n", ocm->c.phys);
+ seq_printf(m, "\nC.PhysAddr : 0x%llx\n", ocm->c.phys);
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
@@ -268,7 +262,7 @@ static int ocm_debugfs_show(struct seq_file *m, void *v)
blk->size, blk->owner);
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
return 0;
@@ -338,7 +332,6 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
ocm_blk = kzalloc(sizeof(*ocm_blk), GFP_KERNEL);
if (!ocm_blk) {
- printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
rh_free(ocm_reg->rh, offset);
break;
}
@@ -392,10 +385,8 @@ static int __init ppc4xx_ocm_init(void)
return 0;
ocm_nodes = kzalloc((count * sizeof(struct ocm_info)), GFP_KERNEL);
- if (!ocm_nodes) {
- printk(KERN_ERR "PPC4XX OCM: failed to allocate OCM nodes!\n");
+ if (!ocm_nodes)
return -ENOMEM;
- }
ocm_count = count;
count = 0;
diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
index 5aca523551ae..e6e2adcc7b64 100644
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -1399,7 +1399,6 @@ static void __init ppc_476fpe_pciex_check_link(struct ppc4xx_pciex_port *port)
printk(KERN_WARNING "PCIE%d: Link up failed\n", port->index);
iounmap(mbase);
- return;
}
static struct ppc4xx_pciex_hwops ppc_476fpe_pcie_hwops __initdata =
@@ -2081,7 +2080,6 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
const u32 *pval;
int portno;
unsigned int dcrs;
- const char *val;
/* First, proceed to core initialization as we assume there's
* only one PCIe core in the system
@@ -2127,10 +2125,9 @@ static void __init ppc4xx_probe_pciex_bridge(struct device_node *np)
* Resulting from this setup this PCIe port will be configured
* as root-complex or as endpoint.
*/
- val = of_get_property(port->node, "device_type", NULL);
- if (!strcmp(val, "pci-endpoint")) {
+ if (of_node_is_type(port->node, "pci-endpoint")) {
port->endpoint = 1;
- } else if (!strcmp(val, "pci")) {
+ } else if (of_node_is_type(port->node, "pci")) {
port->endpoint = 0;
} else {
printk(KERN_ERR "PCIE: missing or incorrect device_type for %pOF\n",
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index b59eab6cbb1b..0c495823152c 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_MPC512x
bool "512x-based boards"
- depends on 6xx
+ depends on PPC_BOOK3S_32
select COMMON_CLK
select FSL_SOC
select IPIC
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 55a587070342..67f8c2d8fc0e 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_MPC52xx
bool "52xx-based boards"
- depends on 6xx
+ depends on PPC_BOOK3S_32
select COMMON_CLK
select PPC_PCI_CHOICE
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 1ecbf176d35a..61538869e88a 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -82,11 +82,9 @@ static void __init efika_pcisetup(void)
return;
}
- for (pcictrl = NULL;;) {
- pcictrl = of_get_next_child(root, pcictrl);
- if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0))
+ for_each_child_of_node(root, pcictrl)
+ if (of_node_name_eq(pcictrl, "pci"))
break;
- }
of_node_put(root);
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
index 1947a88bc69f..1af81de1c4e6 100644
--- a/arch/powerpc/platforms/82xx/Kconfig
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig PPC_82xx
bool "82xx-based boards (PQ II)"
- depends on 6xx
+ depends on PPC_BOOK3S_32
if PPC_82xx
@@ -54,7 +54,7 @@ config PQ2ADS
config 8260
bool
- depends on 6xx
+ depends on PPC_BOOK3S_32
select CPM2
help
The MPC8260 is a typical embedded CPU made by Freescale. Selecting
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 071f53b0c0a0..ff0c69dfdf1a 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig PPC_83xx
bool "83xx-based boards"
- depends on 6xx
+ depends on PPC_BOOK3S_32
select PPC_UDBG_16550
select PPC_PCI_CHOICE
select FSL_PCI if PCI
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index d75c9816a5c9..2b6589fe812d 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -14,6 +14,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
+#include <asm/debug.h>
#include <asm/io.h>
#include <asm/hw_irq.h>
#include <asm/ipic.h>
@@ -150,3 +151,19 @@ void __init mpc83xx_setup_arch(void)
mpc83xx_setup_pci();
}
+
+int machine_check_83xx(struct pt_regs *regs)
+{
+ u32 mask = 1 << (31 - IPIC_MCP_WDT);
+
+ if (!(regs->msr & SRR1_MCE_MCP) || !(ipic_get_mcp_status() & mask))
+ return machine_check_generic(regs);
+ ipic_clear_mcp_status(mask);
+
+ if (debugger_fault_handler(regs))
+ return 1;
+
+ die("Watchdog NMI Reset", regs, 0);
+
+ return 1;
+}
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index ac191a7a1337..b0dac307bebf 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -68,16 +68,6 @@ void __init corenet_gen_setup_arch(void)
swiotlb_detect_4g();
-#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32)
- /*
- * Inbound windows don't cover the full lower 4 GiB
- * due to conflicts with PCICSRBAR and outbound windows,
- * so limit the DMA32 zone to 2 GiB, to allow consistent
- * allocations to succeed.
- */
- limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT));
-#endif
-
pr_info("%s board\n", ppc_md.name);
mpc85xx_qe_init();
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index b63a8548366f..27631c607f3d 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -45,15 +45,6 @@ static void __init qemu_e500_setup_arch(void)
fsl_pci_assign_primary();
swiotlb_detect_4g();
-#if defined(CONFIG_FSL_PCI) && defined(CONFIG_ZONE_DMA32)
- /*
- * Inbound windows don't cover the full lower 4 GiB
- * due to conflicts with PCICSRBAR and outbound windows,
- * so limit the DMA32 zone to 2 GiB, to allow consistent
- * allocations to succeed.
- */
- limit_zone_pfn(ZONE_DMA32, 1UL << (31 - PAGE_SHIFT));
-#endif
mpc85xx_smp_init();
}
diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
index dac36ba82fea..2d1652108ba1 100644
--- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c
+++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
@@ -39,7 +39,7 @@ struct device_node *cpld_node;
*/
static void t1042rdb_set_monitor_port(enum fsl_diu_monitor_port port)
{
- static void __iomem *cpld_base;
+ void __iomem *cpld_base;
cpld_base = of_iomap(cpld_node, 0);
if (!cpld_base) {
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index bcd179d3ed92..df692aa6b578 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -2,7 +2,7 @@
config PPC_86xx
menuconfig PPC_86xx
bool "86xx-based boards"
- depends on 6xx
+ depends on PPC_BOOK3S_32
select FSL_SOC
select ALTIVEC
help
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 020e84a47a32..9f2c1ecc85c3 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -86,8 +86,7 @@ smp_86xx_kick_cpu(int nr)
mdelay(1);
/* Restore the exception vector */
- *vector = save_vector;
- flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+ patch_instruction(vector, save_vector);
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 260a56b7602d..5c48dd823e15 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -40,7 +40,7 @@ config EPAPR_PARAVIRT
config PPC_NATIVE
bool
- depends on 6xx || PPC64
+ depends on PPC_BOOK3S_32 || PPC64
help
Support for running natively on the hardware, i.e. without
a hypervisor. This option is not user-selectable but should
@@ -48,7 +48,7 @@ config PPC_NATIVE
config PPC_OF_BOOT_TRAMPOLINE
bool "Support booting from Open Firmware or yaboot"
- depends on 6xx || PPC64
+ depends on PPC_BOOK3S_32 || PPC64
default y
help
Support from booting from Open Firmware or yaboot using an
@@ -197,7 +197,7 @@ endmenu
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
- depends on 6xx && PPC_PMAC
+ depends on PPC_BOOK3S_32 && PPC_PMAC
help
Some versions of the PPC601 (the first PowerPC chip) have bugs which
mean that extra synchronization instructions are required near
@@ -211,7 +211,7 @@ config PPC601_SYNC_FIX
config TAU
bool "On-chip CPU temperature sensor support"
- depends on 6xx
+ depends on PPC_BOOK3S_32
help
G3 and G4 processors have an on-chip temperature sensor called the
'Thermal Assist Unit (TAU)', which, in theory, can measure the on-die
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index f4e2c5729374..ab176fd3dfb5 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -24,6 +24,7 @@ choice
config PPC_BOOK3S_32
bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
select PPC_FPU
+ select PPC_HAVE_PMU_SUPPORT
config PPC_85xx
bool "Freescale 85xx"
@@ -179,11 +180,6 @@ config PPC_BOOK3E
def_bool y
depends on PPC_BOOK3E_64
-config 6xx
- def_bool y
- depends on PPC32 && PPC_BOOK3S
- select PPC_HAVE_PMU_SUPPORT
-
config E500
select FSL_EMB_PERFMON
select PPC_FSL_BOOK3E
@@ -266,7 +262,7 @@ config PHYS_64BIT
config ALTIVEC
bool "AltiVec Support"
- depends on 6xx || PPC_BOOK3S_64 || (PPC_E500MC && PPC64)
+ depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64)
---help---
This option enables kernel support for the Altivec extensions to the
PowerPC processor. The kernel currently supports saving and restoring
@@ -316,14 +312,6 @@ config SPE
If in doubt, say Y here.
-config PPC_STD_MMU
- def_bool y
- depends on PPC_BOOK3S
-
-config PPC_STD_MMU_32
- def_bool y
- depends on PPC_STD_MMU && PPC32
-
config ARCH_ENABLE_SPLIT_PMD_PTLOCK
def_bool y
depends on PPC_BOOK3S_64
@@ -358,7 +346,7 @@ config ARCH_ENABLE_HUGEPAGE_MIGRATION
config PPC_MMU_NOHASH
def_bool y
- depends on !PPC_STD_MMU
+ depends on !PPC_BOOK3S
config PPC_BOOK3E_MMU
def_bool y
@@ -412,7 +400,8 @@ config NR_CPUS
config NOT_COHERENT_CACHE
bool
- depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
+ depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
+ GAMECUBE_COMMON || AMIGAONE
default n if PPC_47x
default y
diff --git a/arch/powerpc/platforms/amigaone/Kconfig b/arch/powerpc/platforms/amigaone/Kconfig
index 03dc1e37c25b..e03d26d41957 100644
--- a/arch/powerpc/platforms/amigaone/Kconfig
+++ b/arch/powerpc/platforms/amigaone/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config AMIGAONE
bool "Eyetech AmigaOne/MAI Teron"
- depends on 6xx && BROKEN_ON_SMP
+ depends on PPC_BOOK3S_32 && BROKEN_ON_SMP
select PPC_I8259
select PPC_INDIRECT_PCI
select PPC_UDBG_16550
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index b926438d73af..27ee65b89099 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -53,7 +53,7 @@ static struct cbe_regs_map *cbe_find_map(struct device_node *np)
int i;
struct device_node *tmp_np;
- if (strcasecmp(np->type, "spe")) {
+ if (!of_node_is_type(np, "spe")) {
for (i = 0; i < cbe_regs_map_count; i++)
if (cbe_regs_maps[i].cpu_node == np ||
cbe_regs_maps[i].be_node == np)
@@ -70,8 +70,8 @@ static struct cbe_regs_map *cbe_find_map(struct device_node *np)
tmp_np = tmp_np->parent;
/* on a correct devicetree we wont get up to root */
BUG_ON(!tmp_np);
- } while (strcasecmp(tmp_np->type, "cpu") &&
- strcasecmp(tmp_np->type, "be"));
+ } while (!of_node_is_type(tmp_np, "cpu") ||
+ !of_node_is_type(tmp_np, "be"));
np->data = cbe_find_map(tmp_np);
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 7d31b8d14661..e2e1371a71e2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -131,7 +131,7 @@ static int cell_setup_phb(struct pci_controller *phb)
np = phb->dn;
model = of_get_property(np, "model", NULL);
- if (model == NULL || strcmp(np->name, "pci"))
+ if (model == NULL || !of_node_name_eq(np, "pci"))
return 0;
/* Setup workarounds for spider */
@@ -168,8 +168,7 @@ static int __init cell_publish_devices(void)
* platform devices for the PCI host bridges
*/
for_each_child_of_node(root, np) {
- if (np->type == NULL || (strcmp(np->type, "pci") != 0 &&
- strcmp(np->type, "pciex") != 0))
+ if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "pciex"))
continue;
of_platform_device_create(np, NULL, NULL);
}
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index 8ae86200ef6c..125f2a5f02de 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -34,20 +34,9 @@
*/
static void *spu_syscall_table[] = {
-#define SYSCALL(func) sys_ni_syscall,
-#define COMPAT_SYS(func) sys_ni_syscall,
-#define PPC_SYS(func) sys_ni_syscall,
-#define OLDSYS(func) sys_ni_syscall,
-#define SYS32ONLY(func) sys_ni_syscall,
-#define PPC64ONLY(func) sys_ni_syscall,
-#define SYSX(f, f3264, f32) sys_ni_syscall,
-
-#define SYSCALL_SPU(func) sys_##func,
-#define COMPAT_SYS_SPU(func) sys_##func,
-#define COMPAT_SPU_NEW(func) sys_##func,
-#define SYSX_SPU(f, f3264, f32) f,
-
-#include <asm/systbl.h>
+#define __SYSCALL(nr, entry, nargs) entry,
+#include <asm/syscall_table_spu.h>
+#undef __SYSCALL
};
long spu_sys_callback(struct spu_syscall_block *s)
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index f7e36373f6e0..bed935c51ec2 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -458,7 +458,6 @@ static void init_affinity_node(int cbe)
struct device_node *vic_dn, *last_spu_dn;
phandle avoid_ph;
const phandle *vic_handles;
- const char *name;
int lenp, i, added;
last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
@@ -480,12 +479,7 @@ static void init_affinity_node(int cbe)
if (!vic_dn)
continue;
- /* a neighbour might be spe, mic-tm, or bif0 */
- name = of_get_property(vic_dn, "name", NULL);
- if (!name)
- continue;
-
- if (strcmp(name, "spe") == 0) {
+ if (of_node_name_eq(vic_dn, "spe") ) {
spu = devnode_spu(cbe, vic_dn);
avoid_ph = last_spu_dn->phandle;
} else {
@@ -498,7 +492,7 @@ static void init_affinity_node(int cbe)
spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
if (!spu)
continue;
- if (!strcmp(name, "mic-tm")) {
+ if (of_node_name_eq(vic_dn, "mic-tm")) {
last_spu->has_mem_affinity = 1;
spu->has_mem_affinity = 1;
}
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index ead99eff875a..43a2484aad49 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_CHRP
bool "Common Hardware Reference Platform (CHRP) based machines"
- depends on 6xx
+ depends on PPC_BOOK3S_32
select HAVE_PCSPKR_PLATFORM
select MPIC
select PPC_I8259
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 5ddb57b82921..b020c757d2bf 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -230,8 +230,8 @@ chrp_find_bridges(void)
else if (strncmp(machine, "Pegasos", 7) == 0)
is_pegasos = 1;
}
- for (dev = root->child; dev != NULL; dev = dev->sibling) {
- if (dev->type == NULL || strcmp(dev->type, "pci") != 0)
+ for_each_child_of_node(root, dev) {
+ if (!of_node_is_type(dev, "pci"))
continue;
++index;
/* The GG2 bridge on the LongTrail doesn't have an address */
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index d6d8ffc0271e..e66644e0fb40 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -280,20 +280,14 @@ static __init void chrp_init(void)
node = of_find_node_by_path(property);
if (!node)
return;
- property = of_get_property(node, "device_type", NULL);
- if (!property)
- goto out_put;
- if (strcmp(property, "serial"))
+ if (!of_node_is_type(node, "serial"))
goto out_put;
/*
* The 9pin connector is either /failsafe
* or /pci@80000000/isa@C/serial@i2F8
* The optional graphics card has also type 'serial' in VGA mode.
*/
- property = of_get_property(node, "name", NULL);
- if (!property)
- goto out_put;
- if (!strcmp(property, "failsafe") || !strcmp(property, "serial"))
+ if (of_node_name_eq(node, "failsafe") || of_node_name_eq(node, "serial"))
add_preferred_console("ttyS", 0, NULL);
out_put:
of_node_put(node);
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 8ea16db5ff48..527d4aa46537 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config EMBEDDED6xx
bool "Embedded 6xx/7xx/7xxx-based boards"
- depends on 6xx && BROKEN_ON_SMP
+ depends on PPC_BOOK3S_32 && BROKEN_ON_SMP
config LINKSTATION
bool "Linkstation / Kurobox(HG) from Buffalo"
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index e3821379e86f..13fba004b7e7 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -604,10 +604,8 @@ void __init maple_pci_init(void)
printk(KERN_CRIT "maple_find_bridges: can't find root of device tree\n");
return;
}
- for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
- if (!np->type)
- continue;
- if (strcmp(np->type, "pci") && strcmp(np->type, "ht"))
+ for_each_child_of_node(root, np) {
+ if (!of_node_is_type(np, "pci") && !of_node_is_type(np, "ht"))
continue;
if ((of_device_is_compatible(np, "u4-pcie") ||
of_device_is_compatible(np, "u3-agp")) &&
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 53384eb42a76..d18d16489a15 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -255,15 +255,13 @@ int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
chan->ring_size = ring_size;
- chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
+ chan->ring_virt = dma_zalloc_coherent(&dma_pdev->dev,
ring_size * sizeof(u64),
&chan->ring_dma, GFP_KERNEL);
if (!chan->ring_virt)
return -ENOMEM;
- memset(chan->ring_virt, 0, ring_size * sizeof(u64));
-
return 0;
}
EXPORT_SYMBOL(pasemi_dma_alloc_ring);
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index c3c64172482d..fdc839d93837 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <asm/pci-bridge.h>
+#include <asm/isa-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
@@ -108,6 +109,61 @@ static int workaround_5945(struct pci_bus *bus, unsigned int devfn,
return 1;
}
+#ifdef CONFIG_PPC_PASEMI_NEMO
+#define PXP_ERR_CFG_REG 0x4
+#define PXP_IGNORE_PCIE_ERRORS 0x800
+#define SB600_BUS 5
+
+static void sb600_set_flag(int bus)
+{
+ static void __iomem *iob_mapbase = NULL;
+ struct resource res;
+ struct device_node *dn;
+ int err;
+
+ if (iob_mapbase == NULL) {
+ dn = of_find_compatible_node(NULL, "isa", "pasemi,1682m-iob");
+ if (!dn) {
+ pr_crit("NEMO SB600 missing iob node\n");
+ return;
+ }
+
+ err = of_address_to_resource(dn, 0, &res);
+ of_node_put(dn);
+
+ if (err) {
+ pr_crit("NEMO SB600 missing resource\n");
+ return;
+ }
+
+ pr_info("NEMO SB600 IOB base %08llx\n",res.start);
+
+ iob_mapbase = ioremap(res.start + 0x100, 0x94);
+ }
+
+ if (iob_mapbase != NULL) {
+ if (bus == SB600_BUS) {
+ /*
+ * This is the SB600's bus, tell the PCI-e root port
+ * to allow non-zero devices to enumerate.
+ */
+ out_le32(iob_mapbase + PXP_ERR_CFG_REG, in_le32(iob_mapbase + PXP_ERR_CFG_REG) | PXP_IGNORE_PCIE_ERRORS);
+ } else {
+ /*
+ * Only scan device 0 on other busses
+ */
+ out_le32(iob_mapbase + PXP_ERR_CFG_REG, in_le32(iob_mapbase + PXP_ERR_CFG_REG) & ~PXP_IGNORE_PCIE_ERRORS);
+ }
+ }
+}
+
+#else
+
+static void sb600_set_flag(int bus)
+{
+}
+#endif
+
static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn,
int offset, int len, u32 *val)
{
@@ -126,6 +182,8 @@ static int pa_pxp_read_config(struct pci_bus *bus, unsigned int devfn,
addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset);
+ sb600_set_flag(bus->number);
+
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
@@ -160,6 +218,8 @@ static int pa_pxp_write_config(struct pci_bus *bus, unsigned int devfn,
addr = pa_pxp_cfg_addr(hose, bus->number, devfn, offset);
+ sb600_set_flag(bus->number);
+
/*
* Note: the caller has already checked that offset is
* suitably aligned and that len is 1, 2 or 4.
@@ -210,6 +270,12 @@ static int __init pas_add_bridge(struct device_node *dev)
/* Interpret the "ranges" property */
pci_process_bridge_OF_ranges(hose, dev, 1);
+ /*
+ * Scan for an isa bridge. This is needed to find the SB600 on the nemo
+ * and does nothing on machines without one.
+ */
+ isa_bridge_find_early(hose);
+
return 0;
}
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 9a6eb04cca83..c0532999f854 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -34,6 +34,7 @@
#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
+#include <asm/i8259.h>
#include <asm/mpic.h>
#include <asm/smp.h>
#include <asm/time.h>
@@ -72,6 +73,40 @@ static void __noreturn pas_restart(char *cmd)
out_le32(reset_reg, 0x6000000);
}
+#ifdef CONFIG_PPC_PASEMI_NEMO
+void pas_shutdown(void)
+{
+ /* Set the PLD bit that makes the SB600 think the power button is being pressed */
+ void __iomem *pld_map = ioremap(0xf5000000,4096);
+ while (1)
+ out_8(pld_map+7,0x01);
+}
+
+/* RTC platform device structure as is not in device tree */
+static struct resource rtc_resource[] = {{
+ .name = "rtc",
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO,
+}, {
+ .name = "rtc",
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ,
+}};
+
+static inline void nemo_init_rtc(void)
+{
+ platform_device_register_simple("rtc_cmos", -1, rtc_resource, 2);
+}
+
+#else
+
+static inline void nemo_init_rtc(void)
+{
+}
+#endif
+
#ifdef CONFIG_SMP
static arch_spinlock_t timebase_lock;
static unsigned long timebase;
@@ -183,6 +218,42 @@ static int __init pas_setup_mce_regs(void)
}
machine_device_initcall(pasemi, pas_setup_mce_regs);
+#ifdef CONFIG_PPC_PASEMI_NEMO
+static void sb600_8259_cascade(struct irq_desc *desc)
+{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int cascade_irq = i8259_irq();
+
+ if (cascade_irq)
+ generic_handle_irq(cascade_irq);
+
+ chip->irq_eoi(&desc->irq_data);
+}
+
+static void nemo_init_IRQ(struct mpic *mpic)
+{
+ struct device_node *np;
+ int gpio_virq;
+ /* Connect the SB600's legacy i8259 controller */
+ np = of_find_node_by_path("/pxp@0,e0000000");
+ i8259_init(np, 0);
+ of_node_put(np);
+
+ gpio_virq = irq_create_mapping(NULL, 3);
+ irq_set_irq_type(gpio_virq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_chained_handler(gpio_virq, sb600_8259_cascade);
+ mpic_unmask_irq(irq_get_irq_data(gpio_virq));
+
+ irq_set_default_host(mpic->irqhost);
+}
+
+#else
+
+static inline void nemo_init_IRQ(struct mpic *mpic)
+{
+}
+#endif
+
static __init void pas_init_IRQ(void)
{
struct device_node *np;
@@ -243,6 +314,8 @@ static __init void pas_init_IRQ(void)
mpic_unmask_irq(irq_get_irq_data(nmi_virq));
}
+ nemo_init_IRQ(mpic);
+
of_node_put(mpic_node);
of_node_put(root);
}
@@ -404,6 +477,8 @@ static int __init pasemi_publish_devices(void)
/* Publish OF platform devices for SDC and other non-PCI devices */
of_platform_bus_probe(NULL, pasemi_bus_ids, NULL);
+ nemo_init_rtc();
+
return 0;
}
machine_device_initcall(pasemi, pasemi_publish_devices);
@@ -418,6 +493,17 @@ static int __init pas_probe(void)
!of_machine_is_compatible("pasemi,pwrficient"))
return 0;
+#ifdef CONFIG_PPC_PASEMI_NEMO
+ /*
+ * Check for the Nemo motherboard here, if we are running on one
+ * change the machine definition to fit
+ */
+ if (of_machine_is_compatible("pasemi,nemo")) {
+ pm_power_off = pas_shutdown;
+ ppc_md.name = "A-EON Amigaone X1000";
+ }
+#endif
+
iommu_init_early_pasemi();
return 1;
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
index 27862feee4a5..f0641b6e6075 100644
--- a/arch/powerpc/platforms/powermac/cache.S
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -28,7 +28,7 @@
*/
_GLOBAL(flush_disable_caches)
-#ifndef CONFIG_6xx
+#ifndef CONFIG_PPC_BOOK3S_32
blr
#else
BEGIN_FTR_SECTION
@@ -356,4 +356,4 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
mtmsr r11 /* restore DR and EE */
isync
blr
-#endif /* CONFIG_6xx */
+#endif /* CONFIG_PPC_BOOK3S_32 */
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index ed2f54b3f173..c3e5ee8b5175 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -51,7 +51,7 @@
#define DBG(fmt...)
#endif
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
extern int powersave_lowspeed;
#endif
@@ -173,9 +173,9 @@ static long ohare_htw_scc_enable(struct device_node *node, long param,
macio = macio_find(node, 0);
if (!macio)
return -ENODEV;
- if (!strcmp(node->name, "ch-a"))
+ if (of_node_name_eq(node, "ch-a"))
chan_mask = MACIO_FLAG_SCCA_ON;
- else if (!strcmp(node->name, "ch-b"))
+ else if (of_node_name_eq(node, "ch-b"))
chan_mask = MACIO_FLAG_SCCB_ON;
else
return -ENODEV;
@@ -610,9 +610,9 @@ static long core99_scc_enable(struct device_node *node, long param, long value)
macio = macio_find(node, 0);
if (!macio)
return -ENODEV;
- if (!strcmp(node->name, "ch-a"))
+ if (of_node_name_eq(node, "ch-a"))
chan_mask = MACIO_FLAG_SCCA_ON;
- else if (!strcmp(node->name, "ch-b"))
+ else if (of_node_name_eq(node, "ch-b"))
chan_mask = MACIO_FLAG_SCCB_ON;
else
return -ENODEV;
@@ -1392,8 +1392,7 @@ static long g5_mpic_enable(struct device_node *node, long param, long value)
if (parent == NULL)
return 0;
- is_u3 = strcmp(parent->name, "u3") == 0 ||
- strcmp(parent->name, "u4") == 0;
+ is_u3 = of_node_name_eq(parent, "u3") || of_node_name_eq(parent, "u4");
of_node_put(parent);
if (!is_u3)
return 0;
@@ -1471,6 +1470,7 @@ static long g5_i2s_enable(struct device_node *node, long param, long value)
case 2:
if (macio->type == macio_shasta)
break;
+ /* fall through */
default:
return -ENODEV;
}
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index d4d411820597..4de058a20d2b 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -617,7 +617,7 @@ static void __init kw_i2c_probe(void)
* but not for now
*/
child = of_get_next_child(np, NULL);
- multibus = !child || strcmp(child->name, "i2c-bus");
+ multibus = !of_node_name_eq(child, "i2c-bus");
of_node_put(child);
/* For a multibus setup, we get the bus count based on the
@@ -917,10 +917,9 @@ static void __init smu_i2c_probe(void)
* type as older device trees mix i2c busses and other things
* at the same level
*/
- for (busnode = NULL;
- (busnode = of_get_next_child(controller, busnode)) != NULL;) {
- if (strcmp(busnode->type, "i2c") &&
- strcmp(busnode->type, "i2c-bus"))
+ for_each_child_of_node(controller, busnode) {
+ if (!of_node_is_type(busnode, "i2c") &&
+ !of_node_is_type(busnode, "i2c-bus"))
continue;
reg = of_get_property(busnode, "reg", NULL);
if (reg == NULL)
@@ -1206,7 +1205,7 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
if (bus != pmac_i2c_find_bus(np))
continue;
for (p = whitelist; p->name != NULL; p++) {
- if (strcmp(np->name, p->name))
+ if (!of_node_name_eq(np, p->name))
continue;
if (p->compatible &&
!of_device_is_compatible(np, p->compatible))
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 04527d13d5a4..3d7420503c37 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -501,9 +501,7 @@ static void __init init_p2pbridge(void)
/* XXX it would be better here to identify the specific
PCI-PCI bridge chip we have. */
p2pbridge = of_find_node_by_name(NULL, "pci-bridge");
- if (p2pbridge == NULL
- || p2pbridge->parent == NULL
- || strcmp(p2pbridge->parent->name, "pci") != 0)
+ if (p2pbridge == NULL || !of_node_name_eq(p2pbridge->parent, "pci"))
goto done;
if (pci_device_from_OF_node(p2pbridge, &bus, &devfn) < 0) {
DBG("Can't find PCI infos for PCI<->PCI bridge\n");
@@ -828,14 +826,14 @@ static int __init pmac_add_bridge(struct device_node *dev)
if (of_device_is_compatible(dev, "uni-north")) {
primary = setup_uninorth(hose, &rsrc);
disp_name = "UniNorth";
- } else if (strcmp(dev->name, "pci") == 0) {
+ } else if (of_node_name_eq(dev, "pci")) {
/* XXX assume this is a mpc106 (grackle) */
setup_grackle(hose);
disp_name = "Grackle (MPC106)";
- } else if (strcmp(dev->name, "bandit") == 0) {
+ } else if (of_node_name_eq(dev, "bandit")) {
setup_bandit(hose, &rsrc);
disp_name = "Bandit";
- } else if (strcmp(dev->name, "chaos") == 0) {
+ } else if (of_node_name_eq(dev, "chaos")) {
setup_chaos(hose, &rsrc);
disp_name = "Chaos";
primary = 0;
@@ -914,16 +912,14 @@ void __init pmac_pci_init(void)
"of device tree\n");
return;
}
- for (np = NULL; (np = of_get_next_child(root, np)) != NULL;) {
- if (np->name == NULL)
- continue;
- if (strcmp(np->name, "bandit") == 0
- || strcmp(np->name, "chaos") == 0
- || strcmp(np->name, "pci") == 0) {
+ for_each_child_of_node(root, np) {
+ if (of_node_name_eq(np, "bandit")
+ || of_node_name_eq(np, "chaos")
+ || of_node_name_eq(np, "pci")) {
if (pmac_add_bridge(np) == 0)
of_node_get(np);
}
- if (strcmp(np->name, "ht") == 0) {
+ if (of_node_name_eq(np, "ht")) {
of_node_get(np);
ht = np;
}
@@ -983,7 +979,7 @@ static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
/* Firewire & GMAC were disabled after PCI probe, the driver is
* claiming them, we must re-enable them now.
*/
- if (uninorth_child && !strcmp(node->name, "firewire") &&
+ if (uninorth_child && of_node_name_eq(node, "firewire") &&
(of_device_is_compatible(node, "pci106b,18") ||
of_device_is_compatible(node, "pci106b,30") ||
of_device_is_compatible(node, "pci11c1,5811"))) {
@@ -991,7 +987,7 @@ static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
pmac_call_feature(PMAC_FTR_1394_ENABLE, node, 0, 1);
updatecfg = 1;
}
- if (uninorth_child && !strcmp(node->name, "ethernet") &&
+ if (uninorth_child && of_node_name_eq(node, "ethernet") &&
of_device_is_compatible(node, "gmac")) {
pmac_call_feature(PMAC_FTR_GMAC_ENABLE, node, 0, 1);
updatecfg = 1;
@@ -1262,4 +1258,3 @@ struct pci_controller_ops pmac_pci_controller_ops = {
.enable_device_hook = pmac_pci_enable_device_hook,
#endif
};
-
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index fd2e210559c8..62311e84a423 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -101,9 +101,8 @@ static void macio_gpio_init_one(struct macio_chip *macio)
* Find the "gpio" parent node
*/
- for (gparent = NULL;
- (gparent = of_get_next_child(macio->of_node, gparent)) != NULL;)
- if (strcmp(gparent->name, "gpio") == 0)
+ for_each_child_of_node(macio->of_node, gparent)
+ if (of_node_name_eq(gparent, "gpio"))
break;
if (gparent == NULL)
return;
@@ -313,7 +312,7 @@ static void uninorth_install_pfunc(void)
* Install handlers for the hwclock child if any
*/
for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;)
- if (strcmp(np->name, "hw-clock") == 0) {
+ if (of_node_name_eq(np, "hw-clock")) {
unin_hwclock = np;
break;
}
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 57bbff465964..c292ffac2ed4 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -417,7 +417,7 @@ int of_irq_parse_oldworld(struct device_node *device, int index,
if (ints != NULL)
break;
device = device->parent;
- if (device && strcmp(device->type, "pci") != 0)
+ if (!of_node_is_type(device, "pci"))
break;
}
if (ints == NULL)
@@ -553,13 +553,13 @@ void __init pmac_pic_init(void)
for_each_node_with_property(np, "interrupt-controller") {
/* Skip /chosen/interrupt-controller */
- if (strcmp(np->name, "chosen") == 0)
+ if (of_node_name_eq(np, "chosen"))
continue;
/* It seems like at least one person wants
* to use BootX on a machine with an AppleKiwi
* controller which happens to pretend to be an
* interrupt controller too. */
- if (strcmp(np->name, "AppleKiwi") == 0)
+ if (of_node_name_eq(np, "AppleKiwi"))
continue;
/* I think we found one ! */
of_irq_dflt_pic = np;
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 2f00e3daafb0..2e8221e20ee8 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -560,15 +560,9 @@ static int __init check_pmac_serial_console(void)
}
pr_debug("stdout is %pOF\n", prom_stdout);
- name = of_get_property(prom_stdout, "name", NULL);
- if (!name) {
- pr_debug(" stdout package has no name !\n");
- goto not_found;
- }
-
- if (strcmp(name, "ch-a") == 0)
+ if (of_node_name_eq(prom_stdout, "ch-a"))
offset = 0;
- else if (strcmp(name, "ch-b") == 0)
+ else if (of_node_name_eq(prom_stdout, "ch-b"))
offset = 1;
else
goto not_found;
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index f89808b9713d..fb64b09cad9d 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -56,7 +56,7 @@
* vector that will be called by the ROM on wakeup
*/
_GLOBAL(low_sleep_handler)
-#ifndef CONFIG_6xx
+#ifndef CONFIG_PPC_BOOK3S_32
blr
#else
mflr r0
@@ -394,5 +394,5 @@ sleep_storage:
.long 0
.balign L1_CACHE_BYTES, 0
-#endif /* CONFIG_6xx */
+#endif /* CONFIG_PPC_BOOK3S_32 */
.section .text
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 447da6db450a..35be6e0b886d 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -832,8 +832,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- *vector = save_vector;
- flush_icache_range((unsigned long) vector, (unsigned long) vector + 4);
+ patch_instruction(vector, save_vector);
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c
index 64f38f0d15ed..12158bb4fed7 100644
--- a/arch/powerpc/platforms/powermac/udbg_adb.c
+++ b/arch/powerpc/platforms/powermac/udbg_adb.c
@@ -194,7 +194,7 @@ int __init udbg_adb_init(int force_btext)
*/
for_each_node_by_name(np, "keyboard") {
struct device_node *parent = of_get_parent(np);
- int found = (parent && strcmp(parent->type, "adb") == 0);
+ int found = of_node_is_type(parent, "adb");
of_node_put(parent);
if (found)
break;
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 8901973ed683..415b74d7c253 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -87,7 +87,7 @@ void udbg_scc_init(int force_scc)
for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
if (ch == stdout)
ch_def = of_node_get(ch);
- if (strcmp(ch->name, "ch-a") == 0)
+ if (of_node_name_eq(ch, "ch-a"))
ch_a = of_node_get(ch);
}
if (ch_def == NULL && !force_scc)
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index abc0be7507c8..f38078976c5d 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -564,8 +564,8 @@ static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
{
struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
+ u8 fstate = 0;
+ __be16 pcierr = 0;
s64 rc;
int result = 0;
@@ -603,8 +603,8 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
{
struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
+ u8 fstate = 0;
+ __be16 pcierr = 0;
s64 rc;
int result;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 75b935252981..d7f742ed48ba 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -9,32 +9,19 @@
* License as published by the Free Software Foundation.
*/
-#include <linux/slab.h>
#include <linux/mmu_notifier.h>
#include <linux/mmu_context.h>
#include <linux/of.h>
-#include <linux/export.h>
#include <linux/pci.h>
#include <linux/memblock.h>
-#include <linux/iommu.h>
#include <linux/sizes.h>
#include <asm/debugfs.h>
-#include <asm/tlb.h>
#include <asm/powernv.h>
-#include <asm/reg.h>
-#include <asm/opal.h>
-#include <asm/io.h>
-#include <asm/iommu.h>
-#include <asm/pnv-pci.h>
-#include <asm/msi_bitmap.h>
#include <asm/opal.h>
-#include "powernv.h"
#include "pci.h"
-#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
-
/*
* spinlock to protect initialisation of an npu_context for a particular
* mm_struct.
@@ -133,15 +120,25 @@ static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
return pe;
}
-long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
+static long pnv_npu_unset_window(struct iommu_table_group *table_group,
+ int num);
+
+static long pnv_npu_set_window(struct iommu_table_group *table_group, int num,
struct iommu_table *tbl)
{
+ struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
struct pnv_phb *phb = npe->phb;
int64_t rc;
const unsigned long size = tbl->it_indirect_levels ?
tbl->it_level_size : tbl->it_size;
const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
const __u64 win_size = tbl->it_size << tbl->it_page_shift;
+ int num2 = (num == 0) ? 1 : 0;
+
+ /* NPU has just one TVE so if there is another table, remove it first */
+ if (npe->table_group.tables[num2])
+ pnv_npu_unset_window(&npe->table_group, num2);
pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
start_addr, start_addr + win_size - 1,
@@ -167,11 +164,16 @@ long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
return 0;
}
-long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num)
+static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num)
{
+ struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
struct pnv_phb *phb = npe->phb;
int64_t rc;
+ if (!npe->table_group.tables[num])
+ return 0;
+
pe_info(npe, "Removing DMA window\n");
rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
@@ -210,7 +212,8 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
if (!gpe)
return;
- rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
+ rc = pnv_npu_set_window(&npe->table_group, 0,
+ gpe->table_group.tables[0]);
/*
* NVLink devices use the same TCE table configuration as
@@ -235,7 +238,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe)
if (phb->type != PNV_PHB_NPU_NVLINK || !npe->pdev)
return -EINVAL;
- rc = pnv_npu_unset_window(npe, 0);
+ rc = pnv_npu_unset_window(&npe->table_group, 0);
if (rc != OPAL_SUCCESS)
return rc;
@@ -288,11 +291,15 @@ void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass)
}
}
+#ifdef CONFIG_IOMMU_API
/* Switch ownership from platform code to external user (e.g. VFIO) */
-void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
+static void pnv_npu_take_ownership(struct iommu_table_group *table_group)
{
+ struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
struct pnv_phb *phb = npe->phb;
int64_t rc;
+ struct pci_dev *gpdev = NULL;
/*
* Note: NPU has just a single TVE in the hardware which means that
@@ -301,7 +308,7 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
* if it was enabled at the moment of ownership change.
*/
if (npe->table_group.tables[0]) {
- pnv_npu_unset_window(npe, 0);
+ pnv_npu_unset_window(&npe->table_group, 0);
return;
}
@@ -314,30 +321,315 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe)
return;
}
pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
+
+ get_gpu_pci_dev_and_pe(npe, &gpdev);
+ if (gpdev)
+ pnv_npu2_unmap_lpar_dev(gpdev);
}
-struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe)
+static void pnv_npu_release_ownership(struct iommu_table_group *table_group)
{
- struct pnv_phb *phb = npe->phb;
- struct pci_bus *pbus = phb->hose->bus;
- struct pci_dev *npdev, *gpdev = NULL, *gptmp;
- struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+ struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
+ table_group);
+ struct pci_dev *gpdev = NULL;
+
+ get_gpu_pci_dev_and_pe(npe, &gpdev);
+ if (gpdev)
+ pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV);
+}
+
+static struct iommu_table_group_ops pnv_pci_npu_ops = {
+ .set_window = pnv_npu_set_window,
+ .unset_window = pnv_npu_unset_window,
+ .take_ownership = pnv_npu_take_ownership,
+ .release_ownership = pnv_npu_release_ownership,
+};
+#endif /* !CONFIG_IOMMU_API */
+
+/*
+ * NPU2 ATS
+ */
+/* Maximum possible number of ATSD MMIO registers per NPU */
+#define NV_NMMU_ATSD_REGS 8
+#define NV_NPU_MAX_PE_NUM 16
+
+/*
+ * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or
+ * up to 3 x (GPU + 2xNPUs) (POWER9).
+ */
+struct npu_comp {
+ struct iommu_table_group table_group;
+ int pe_num;
+ struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM];
+};
+
+/* An NPU descriptor, valid for POWER9 only */
+struct npu {
+ int index;
+ __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
+ unsigned int mmio_atsd_count;
+
+ /* Bitmask for MMIO register usage */
+ unsigned long mmio_atsd_usage;
+
+ /* Do we need to explicitly flush the nest mmu? */
+ bool nmmu_flush;
+
+ struct npu_comp npucomp;
+};
+
+#ifdef CONFIG_IOMMU_API
+static long pnv_npu_peers_create_table_userspace(
+ struct iommu_table_group *table_group,
+ int num, __u32 page_shift, __u64 window_size, __u32 levels,
+ struct iommu_table **ptbl)
+{
+ struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
+ table_group);
+
+ if (!npucomp->pe_num || !npucomp->pe[0] ||
+ !npucomp->pe[0]->table_group.ops ||
+ !npucomp->pe[0]->table_group.ops->create_table)
+ return -EFAULT;
+
+ return npucomp->pe[0]->table_group.ops->create_table(
+ &npucomp->pe[0]->table_group, num, page_shift,
+ window_size, levels, ptbl);
+}
+
+static long pnv_npu_peers_set_window(struct iommu_table_group *table_group,
+ int num, struct iommu_table *tbl)
+{
+ int i, j;
+ long ret = 0;
+ struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
+ table_group);
+
+ for (i = 0; i < npucomp->pe_num; ++i) {
+ struct pnv_ioda_pe *pe = npucomp->pe[i];
+
+ if (!pe->table_group.ops->set_window)
+ continue;
+
+ ret = pe->table_group.ops->set_window(&pe->table_group,
+ num, tbl);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for (j = 0; j < i; ++j) {
+ struct pnv_ioda_pe *pe = npucomp->pe[j];
+
+ if (!pe->table_group.ops->unset_window)
+ continue;
+
+ ret = pe->table_group.ops->unset_window(
+ &pe->table_group, num);
+ if (ret)
+ break;
+ }
+ } else {
+ table_group->tables[num] = iommu_tce_table_get(tbl);
+ }
+
+ return ret;
+}
- if (!gpe || !gpdev)
+static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group,
+ int num)
+{
+ int i, j;
+ long ret = 0;
+ struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
+ table_group);
+
+ for (i = 0; i < npucomp->pe_num; ++i) {
+ struct pnv_ioda_pe *pe = npucomp->pe[i];
+
+ WARN_ON(npucomp->table_group.tables[num] !=
+ table_group->tables[num]);
+ if (!npucomp->table_group.tables[num])
+ continue;
+
+ if (!pe->table_group.ops->unset_window)
+ continue;
+
+ ret = pe->table_group.ops->unset_window(&pe->table_group, num);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for (j = 0; j < i; ++j) {
+ struct pnv_ioda_pe *pe = npucomp->pe[j];
+
+ if (!npucomp->table_group.tables[num])
+ continue;
+
+ if (!pe->table_group.ops->set_window)
+ continue;
+
+ ret = pe->table_group.ops->set_window(&pe->table_group,
+ num, table_group->tables[num]);
+ if (ret)
+ break;
+ }
+ } else if (table_group->tables[num]) {
+ iommu_tce_table_put(table_group->tables[num]);
+ table_group->tables[num] = NULL;
+ }
+
+ return ret;
+}
+
+static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
+{
+ int i;
+ struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
+ table_group);
+
+ for (i = 0; i < npucomp->pe_num; ++i) {
+ struct pnv_ioda_pe *pe = npucomp->pe[i];
+
+ if (!pe->table_group.ops->take_ownership)
+ continue;
+ pe->table_group.ops->take_ownership(&pe->table_group);
+ }
+}
+
+static void pnv_npu_peers_release_ownership(
+ struct iommu_table_group *table_group)
+{
+ int i;
+ struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
+ table_group);
+
+ for (i = 0; i < npucomp->pe_num; ++i) {
+ struct pnv_ioda_pe *pe = npucomp->pe[i];
+
+ if (!pe->table_group.ops->release_ownership)
+ continue;
+ pe->table_group.ops->release_ownership(&pe->table_group);
+ }
+}
+
+static struct iommu_table_group_ops pnv_npu_peers_ops = {
+ .get_table_size = pnv_pci_ioda2_get_table_size,
+ .create_table = pnv_npu_peers_create_table_userspace,
+ .set_window = pnv_npu_peers_set_window,
+ .unset_window = pnv_npu_peers_unset_window,
+ .take_ownership = pnv_npu_peers_take_ownership,
+ .release_ownership = pnv_npu_peers_release_ownership,
+};
+
+static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
+ struct pnv_ioda_pe *pe)
+{
+ if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM))
+ return;
+
+ npucomp->pe[npucomp->pe_num] = pe;
+ ++npucomp->pe_num;
+}
+
+struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
+{
+ struct iommu_table_group *table_group;
+ struct npu_comp *npucomp;
+ struct pci_dev *gpdev = NULL;
+ struct pci_controller *hose;
+ struct pci_dev *npdev = NULL;
+
+ list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) {
+ npdev = pnv_pci_get_npu_dev(gpdev, 0);
+ if (npdev)
+ break;
+ }
+
+ if (!npdev)
+ /* It is not an NPU attached device, skip */
+ return NULL;
+
+ hose = pci_bus_to_host(npdev->bus);
+
+ if (hose->npu) {
+ table_group = &hose->npu->npucomp.table_group;
+
+ if (!table_group->group) {
+ table_group->ops = &pnv_npu_peers_ops;
+ iommu_register_group(table_group,
+ hose->global_number,
+ pe->pe_number);
+ }
+ } else {
+ /* Create a group for 1 GPU and attached NPUs for POWER8 */
+ pe->npucomp = kzalloc(sizeof(pe->npucomp), GFP_KERNEL);
+ table_group = &pe->npucomp->table_group;
+ table_group->ops = &pnv_npu_peers_ops;
+ iommu_register_group(table_group, hose->global_number,
+ pe->pe_number);
+ }
+
+ /* Steal capabilities from a GPU PE */
+ table_group->max_dynamic_windows_supported =
+ pe->table_group.max_dynamic_windows_supported;
+ table_group->tce32_start = pe->table_group.tce32_start;
+ table_group->tce32_size = pe->table_group.tce32_size;
+ table_group->max_levels = pe->table_group.max_levels;
+ if (!table_group->pgsizes)
+ table_group->pgsizes = pe->table_group.pgsizes;
+
+ npucomp = container_of(table_group, struct npu_comp, table_group);
+ pnv_comp_attach_table_group(npucomp, pe);
+
+ return table_group;
+}
+
+struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
+{
+ struct iommu_table_group *table_group;
+ struct npu_comp *npucomp;
+ struct pci_dev *gpdev = NULL;
+ struct pci_dev *npdev;
+ struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev);
+
+ WARN_ON(!(pe->flags & PNV_IODA_PE_DEV));
+ if (!gpe)
return NULL;
- list_for_each_entry(npdev, &pbus->devices, bus_list) {
- gptmp = pnv_pci_get_gpu_dev(npdev);
+ /*
+ * IODA2 bridges get this set up from pci_controller_ops::setup_bridge
+ * but NPU bridges do not have this hook defined so we do it here.
+ * We do not setup other table group parameters as they won't be used
+ * anyway - NVLink bridges are subordinate PEs.
+ */
+ pe->table_group.ops = &pnv_pci_npu_ops;
+
+ table_group = iommu_group_get_iommudata(
+ iommu_group_get(&gpdev->dev));
+
+ /*
+ * On P9 NPU PHB and PCI PHB support different page sizes,
+ * keep only matching. We expect here that NVLink bridge PE pgsizes is
+ * initialized by the caller.
+ */
+ table_group->pgsizes &= pe->table_group.pgsizes;
+ npucomp = container_of(table_group, struct npu_comp, table_group);
+ pnv_comp_attach_table_group(npucomp, pe);
+
+ list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) {
+ struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev);
- if (gptmp != gpdev)
+ if (gpdevtmp != gpdev)
continue;
- pe_info(gpe, "Attached NPU %s\n", dev_name(&npdev->dev));
- iommu_group_add_device(gpe->table_group.group, &npdev->dev);
+ iommu_add_device(table_group, &npdev->dev);
}
- return gpe;
+ return table_group;
}
+#endif /* CONFIG_IOMMU_API */
/* Maximum number of nvlinks per npu */
#define NV_MAX_LINKS 6
@@ -490,7 +782,6 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
int i, j;
struct npu *npu;
struct pci_dev *npdev;
- struct pnv_phb *nphb;
for (i = 0; i <= max_npu2_index; i++) {
mmio_atsd_reg[i].reg = -1;
@@ -505,8 +796,10 @@ static void acquire_atsd_reg(struct npu_context *npu_context,
if (!npdev)
continue;
- nphb = pci_bus_to_host(npdev->bus)->private_data;
- npu = &nphb->npu;
+ npu = pci_bus_to_host(npdev->bus)->npu;
+ if (!npu)
+ continue;
+
mmio_atsd_reg[i].npu = npu;
mmio_atsd_reg[i].reg = get_mmio_atsd_reg(npu);
while (mmio_atsd_reg[i].reg < 0) {
@@ -671,9 +964,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
u32 nvlink_index;
struct device_node *nvlink_dn;
struct mm_struct *mm = current->mm;
- struct pnv_phb *nphb;
struct npu *npu;
struct npu_context *npu_context;
+ struct pci_controller *hose;
/*
* At present we don't support GPUs connected to multiple NPUs and I'm
@@ -681,13 +974,14 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
*/
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
- if (!firmware_has_feature(FW_FEATURE_OPAL))
- return ERR_PTR(-ENODEV);
-
if (!npdev)
/* No nvlink associated with this GPU device */
return ERR_PTR(-ENODEV);
+ /* We only support DR/PR/HV in pnv_npu2_map_lpar_dev() */
+ if (flags & ~(MSR_DR | MSR_PR | MSR_HV))
+ return ERR_PTR(-EINVAL);
+
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
@@ -701,20 +995,10 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
return ERR_PTR(-EINVAL);
}
- nphb = pci_bus_to_host(npdev->bus)->private_data;
- npu = &nphb->npu;
-
- /*
- * Setup the NPU context table for a particular GPU. These need to be
- * per-GPU as we need the tables to filter ATSDs when there are no
- * active contexts on a particular GPU. It is safe for these to be
- * called concurrently with destroy as the OPAL call takes appropriate
- * locks and refcounts on init/destroy.
- */
- rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn));
- if (rc < 0)
- return ERR_PTR(-ENOSPC);
+ hose = pci_bus_to_host(npdev->bus);
+ npu = hose->npu;
+ if (!npu)
+ return ERR_PTR(-ENODEV);
/*
* We store the npu pci device so we can more easily get at the
@@ -726,9 +1010,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
if (npu_context->release_cb != cb ||
npu_context->priv != priv) {
spin_unlock(&npu_context_lock);
- opal_npu_destroy_context(nphb->opal_id, mm->context.id,
- PCI_DEVID(gpdev->bus->number,
- gpdev->devfn));
return ERR_PTR(-EINVAL);
}
@@ -754,9 +1035,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
if (rc) {
kfree(npu_context);
- opal_npu_destroy_context(nphb->opal_id, mm->context.id,
- PCI_DEVID(gpdev->bus->number,
- gpdev->devfn));
return ERR_PTR(rc);
}
@@ -776,7 +1054,7 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
*/
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], npdev);
- if (!nphb->npu.nmmu_flush) {
+ if (!npu->nmmu_flush) {
/*
* If we're not explicitly flushing ourselves we need to mark
* the thread for global flushes
@@ -809,27 +1087,24 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
struct pci_dev *gpdev)
{
int removed;
- struct pnv_phb *nphb;
struct npu *npu;
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
struct device_node *nvlink_dn;
u32 nvlink_index;
+ struct pci_controller *hose;
if (WARN_ON(!npdev))
return;
- if (!firmware_has_feature(FW_FEATURE_OPAL))
+ hose = pci_bus_to_host(npdev->bus);
+ npu = hose->npu;
+ if (!npu)
return;
-
- nphb = pci_bus_to_host(npdev->bus)->private_data;
- npu = &nphb->npu;
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
&nvlink_index)))
return;
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
- opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn));
spin_lock(&npu_context_lock);
removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
spin_unlock(&npu_context_lock);
@@ -857,13 +1132,12 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
u64 rc = 0, result = 0;
int i, is_write;
struct page *page[1];
+ const char __user *u;
+ char c;
/* mmap_sem should be held so the struct_mm must be present */
struct mm_struct *mm = context->mm;
- if (!firmware_has_feature(FW_FEATURE_OPAL))
- return -ENODEV;
-
WARN_ON(!rwsem_is_locked(&mm->mmap_sem));
for (i = 0; i < count; i++) {
@@ -872,18 +1146,17 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
is_write ? FOLL_WRITE : 0,
page, NULL, NULL);
- /*
- * To support virtualised environments we will have to do an
- * access to the page to ensure it gets faulted into the
- * hypervisor. For the moment virtualisation is not supported in
- * other areas so leave the access out.
- */
if (rc != 1) {
status[i] = rc;
result = -EFAULT;
continue;
}
+ /* Make sure partition scoped tree gets a pte */
+ u = page_address(page[0]);
+ if (__get_user(c, u))
+ result = -EFAULT;
+
status[i] = 0;
put_page(page[0]);
}
@@ -892,42 +1165,127 @@ int pnv_npu2_handle_fault(struct npu_context *context, uintptr_t *ea,
}
EXPORT_SYMBOL(pnv_npu2_handle_fault);
-int pnv_npu2_init(struct pnv_phb *phb)
+int pnv_npu2_init(struct pci_controller *hose)
{
unsigned int i;
u64 mmio_atsd;
- struct device_node *dn;
- struct pci_dev *gpdev;
static int npu_index;
- uint64_t rc = 0;
-
- phb->npu.nmmu_flush =
- of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
- for_each_child_of_node(phb->hose->dn, dn) {
- gpdev = pnv_pci_get_gpu_dev(get_pci_dev(dn));
- if (gpdev) {
- rc = opal_npu_map_lpar(phb->opal_id,
- PCI_DEVID(gpdev->bus->number, gpdev->devfn),
- 0, 0);
- if (rc)
- dev_err(&gpdev->dev,
- "Error %lld mapping device to LPAR\n",
- rc);
- }
- }
+ struct npu *npu;
+ int ret;
+
+ npu = kzalloc(sizeof(*npu), GFP_KERNEL);
+ if (!npu)
+ return -ENOMEM;
- for (i = 0; !of_property_read_u64_index(phb->hose->dn, "ibm,mmio-atsd",
- i, &mmio_atsd); i++)
- phb->npu.mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
+ npu->nmmu_flush = of_property_read_bool(hose->dn, "ibm,nmmu-flush");
- pr_info("NPU%lld: Found %d MMIO ATSD registers", phb->opal_id, i);
- phb->npu.mmio_atsd_count = i;
- phb->npu.mmio_atsd_usage = 0;
+ for (i = 0; i < ARRAY_SIZE(npu->mmio_atsd_regs) &&
+ !of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
+ i, &mmio_atsd); i++)
+ npu->mmio_atsd_regs[i] = ioremap(mmio_atsd, 32);
+
+ pr_info("NPU%d: Found %d MMIO ATSD registers", hose->global_number, i);
+ npu->mmio_atsd_count = i;
+ npu->mmio_atsd_usage = 0;
npu_index++;
- if (WARN_ON(npu_index >= NV_MAX_NPUS))
- return -ENOSPC;
+ if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
+ ret = -ENOSPC;
+ goto fail_exit;
+ }
max_npu2_index = npu_index;
- phb->npu.index = npu_index;
+ npu->index = npu_index;
+ hose->npu = npu;
+
+ return 0;
+
+fail_exit:
+ for (i = 0; i < npu->mmio_atsd_count; ++i)
+ iounmap(npu->mmio_atsd_regs[i]);
+
+ kfree(npu);
+
+ return ret;
+}
+
+int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
+ unsigned long msr)
+{
+ int ret;
+ struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
+ struct pci_controller *hose;
+ struct pnv_phb *nphb;
+
+ if (!npdev)
+ return -ENODEV;
+
+ hose = pci_bus_to_host(npdev->bus);
+ nphb = hose->private_data;
+
+ dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
+ nphb->opal_id, lparid);
+ /*
+ * Currently we only support radix and non-zero LPCR only makes sense
+ * for hash tables so skiboot expects the LPCR parameter to be a zero.
+ */
+ ret = opal_npu_map_lpar(nphb->opal_id,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn), lparid,
+ 0 /* LPCR bits */);
+ if (ret) {
+ dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
+ return ret;
+ }
+
+ dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
+ nphb->opal_id, msr);
+ ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ if (ret < 0)
+ dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
+ else
+ ret = 0;
return 0;
}
+EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev);
+
+void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr)
+{
+ struct pci_dev *gpdev;
+
+ list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list)
+ pnv_npu2_map_lpar_dev(gpdev, 0, msr);
+}
+
+int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
+{
+ int ret;
+ struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
+ struct pci_controller *hose;
+ struct pnv_phb *nphb;
+
+ if (!npdev)
+ return -ENODEV;
+
+ hose = pci_bus_to_host(npdev->bus);
+ nphb = hose->private_data;
+
+ dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
+ nphb->opal_id);
+ ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn));
+ if (ret < 0) {
+ dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
+ return ret;
+ }
+
+ /* Set LPID to 0 anyway, just to be safe */
+ dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
+ ret = opal_npu_map_lpar(nphb->opal_id,
+ PCI_DEVID(gpdev->bus->number, gpdev->devfn), 0 /*LPID*/,
+ 0 /* LPCR bits */);
+ if (ret)
+ dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev);
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index 58dc3308237f..89ab1da57657 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -138,7 +138,7 @@ static struct notifier_block opal_power_control_nb = {
.priority = 0,
};
-static int __init opal_power_control_init(void)
+int __init opal_power_control_init(void)
{
int ret, supported = 0;
struct device_node *np;
@@ -176,4 +176,3 @@ static int __init opal_power_control_init(void)
return 0;
}
-machine_subsys_initcall(powernv, opal_power_control_init);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index beed86f4224b..79586f127521 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -877,7 +877,7 @@ static int __init opal_init(void)
consoles = of_find_node_by_path("/ibm,opal/consoles");
if (consoles) {
for_each_child_of_node(consoles, np) {
- if (strcmp(np->name, "serial"))
+ if (!of_node_name_eq(np, "serial"))
continue;
of_platform_device_create(np, NULL, NULL);
}
@@ -960,6 +960,9 @@ static int __init opal_init(void)
/* Initialise OPAL sensor groups */
opal_sensor_groups_init();
+ /* Initialise OPAL Power control interface */
+ opal_power_control_init();
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index fe9691040f54..697449afb3f7 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -299,7 +299,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
if (alloc_userspace_copy) {
offset = 0;
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
- levels, tce_table_size, &offset,
+ tmplevels, tce_table_size, &offset,
&total_allocated_uas);
if (!uas)
goto free_tces_exit;
@@ -368,6 +368,7 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
found = false;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
if (table_group->tables[i] == tbl) {
+ iommu_tce_table_put(tbl);
table_group->tables[i] = NULL;
found = true;
break;
@@ -393,7 +394,7 @@ long pnv_pci_link_table_and_group(int node, int num,
tgl->table_group = table_group;
list_add_rcu(&tgl->next, &tbl->it_group_list);
- table_group->tables[num] = tbl;
+ table_group->tables[num] = iommu_tce_table_get(tbl);
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index dd807446801e..1d6406a051f1 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -190,7 +190,8 @@ static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
unsigned int pe_num = pe->pe_number;
WARN_ON(pe->pdev);
-
+ WARN_ON(pe->npucomp); /* NPUs are not supposed to be freed */
+ kfree(pe->npucomp);
memset(pe, 0, sizeof(struct pnv_ioda_pe));
clear_bit(pe_num, phb->ioda.pe_alloc);
}
@@ -517,8 +518,6 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
phb->init_m64 = pnv_ioda1_init_m64;
else
phb->init_m64 = pnv_ioda2_init_m64;
- phb->reserve_m64_pe = pnv_ioda_reserve_m64_pe;
- phb->pick_m64_pe = pnv_ioda_pick_m64_pe;
}
static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
@@ -604,8 +603,8 @@ static int pnv_ioda_unfreeze_pe(struct pnv_phb *phb, int pe_no, int opt)
static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
{
struct pnv_ioda_pe *slave, *pe;
- u8 fstate, state;
- __be16 pcierr;
+ u8 fstate = 0, state;
+ __be16 pcierr = 0;
s64 rc;
/* Sanity check on PE number */
@@ -663,10 +662,6 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
return state;
}
-/* Currently those 2 are only used when MSIs are enabled, this will change
- * but in the meantime, we need to protect them to avoid warnings
- */
-#ifdef CONFIG_PCI_MSI
struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -679,7 +674,6 @@ struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
return NULL;
return &phb->ioda.pe_array[pdn->pe_number];
}
-#endif /* CONFIG_PCI_MSI */
static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
struct pnv_ioda_pe *parent,
@@ -1160,8 +1154,8 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
/* Check if PE is determined by M64 */
- if (!pe && phb->pick_m64_pe)
- pe = phb->pick_m64_pe(bus, all);
+ if (!pe)
+ pe = pnv_ioda_pick_m64_pe(bus, all);
/* The PE number isn't pinned by M64 */
if (!pe)
@@ -1273,19 +1267,20 @@ static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
static void pnv_pci_ioda_setup_PEs(void)
{
- struct pci_controller *hose, *tmp;
+ struct pci_controller *hose;
struct pnv_phb *phb;
struct pci_bus *bus;
struct pci_dev *pdev;
+ struct pnv_ioda_pe *pe;
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
if (phb->type == PNV_PHB_NPU_NVLINK) {
/* PE#0 is needed for error reporting */
pnv_ioda_reserve_pe(phb, 0);
pnv_ioda_setup_npu_PEs(hose->bus);
if (phb->model == PNV_PHB_MODEL_NPU2)
- pnv_npu2_init(phb);
+ WARN_ON_ONCE(pnv_npu2_init(hose));
}
if (phb->type == PNV_PHB_NPU_OCAPI) {
bus = hose->bus;
@@ -1293,6 +1288,14 @@ static void pnv_pci_ioda_setup_PEs(void)
pnv_ioda_setup_dev_PE(pdev);
}
}
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb = hose->private_data;
+ if (phb->type != PNV_PHB_IODA2)
+ continue;
+
+ list_for_each_entry(pe, &phb->ioda.pe_list, list)
+ pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
+ }
}
#ifdef CONFIG_PCI_IOV
@@ -1531,6 +1534,11 @@ void pnv_pci_sriov_disable(struct pci_dev *pdev)
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe);
+#ifdef CONFIG_IOMMU_API
+static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
+ struct iommu_table_group *table_group, struct pci_bus *bus);
+
+#endif
static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_bus *bus;
@@ -1584,6 +1592,9 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
mutex_unlock(&phb->ioda.pe_list_mutex);
pnv_pci_ioda2_setup_dma_pe(phb, pe);
+#ifdef CONFIG_IOMMU_API
+ pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
+#endif
}
}
@@ -1923,21 +1934,16 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
return mask;
}
-static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
- struct pci_bus *bus,
- bool add_to_group)
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
{
struct pci_dev *dev;
list_for_each_entry(dev, &bus->devices, bus_list) {
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
set_dma_offset(&dev->dev, pe->tce_bypass_base);
- if (add_to_group)
- iommu_add_device(&dev->dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate,
- add_to_group);
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
}
}
@@ -2366,16 +2372,8 @@ found:
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
iommu_init_table(tbl, phb->hose->node);
- if (pe->flags & PNV_IODA_PE_DEV) {
- /*
- * Setting table base here only for carrying iommu_group
- * further down to let iommu_add_device() do the job.
- * pnv_pci_ioda_dma_dev_setup will override it later anyway.
- */
- set_iommu_table_base(&pe->pdev->dev, tbl);
- iommu_add_device(&pe->pdev->dev);
- } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
return;
fail:
@@ -2527,14 +2525,6 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);
- /*
- * Setting table base here only for carrying iommu_group
- * further down to let iommu_add_device() do the job.
- * pnv_pci_ioda_dma_dev_setup will override it later anyway.
- */
- if (pe->flags & PNV_IODA_PE_DEV)
- set_iommu_table_base(&pe->pdev->dev, tbl);
-
return 0;
}
@@ -2565,7 +2555,7 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
#endif
#ifdef CONFIG_IOMMU_API
-static unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
+unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
__u64 window_size, __u32 levels)
{
unsigned long bytes = 0;
@@ -2616,7 +2606,7 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_set_bypass(pe, false);
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (pe->pbus)
- pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
iommu_tce_table_put(tbl);
}
@@ -2627,7 +2617,7 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
pnv_pci_ioda2_setup_default_config(pe);
if (pe->pbus)
- pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
}
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
@@ -2639,131 +2629,100 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
.release_ownership = pnv_ioda2_release_ownership,
};
-static int gpe_table_group_to_npe_cb(struct device *dev, void *opaque)
+static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe,
+ struct iommu_table_group *table_group,
+ struct pci_bus *bus)
{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe **ptmppe = opaque;
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
- struct pci_dn *pdn = pci_get_pdn(pdev);
-
- if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return 0;
-
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
- if (phb->type != PNV_PHB_NPU_NVLINK)
- return 0;
+ struct pci_dev *dev;
- *ptmppe = &phb->ioda.pe_array[pdn->pe_number];
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ iommu_add_device(table_group, &dev->dev);
- return 1;
+ if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
+ pnv_ioda_setup_bus_iommu_group_add_devices(pe,
+ table_group, dev->subordinate);
+ }
}
-/*
- * This returns PE of associated NPU.
- * This assumes that NPU is in the same IOMMU group with GPU and there is
- * no other PEs.
- */
-static struct pnv_ioda_pe *gpe_table_group_to_npe(
- struct iommu_table_group *table_group)
+static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
+ struct iommu_table_group *table_group, struct pci_bus *bus)
{
- struct pnv_ioda_pe *npe = NULL;
- int ret = iommu_group_for_each_dev(table_group->group, &npe,
- gpe_table_group_to_npe_cb);
- BUG_ON(!ret || !npe);
+ if (pe->flags & PNV_IODA_PE_DEV)
+ iommu_add_device(table_group, &pe->pdev->dev);
- return npe;
+ if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus)
+ pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group,
+ bus);
}
-static long pnv_pci_ioda2_npu_set_window(struct iommu_table_group *table_group,
- int num, struct iommu_table *tbl)
-{
- struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group);
- int num2 = (num == 0) ? 1 : 0;
- long ret = pnv_pci_ioda2_set_window(table_group, num, tbl);
-
- if (ret)
- return ret;
-
- if (table_group->tables[num2])
- pnv_npu_unset_window(npe, num2);
-
- ret = pnv_npu_set_window(npe, num, tbl);
- if (ret) {
- pnv_pci_ioda2_unset_window(table_group, num);
- if (table_group->tables[num2])
- pnv_npu_set_window(npe, num2,
- table_group->tables[num2]);
- }
-
- return ret;
-}
+static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
-static long pnv_pci_ioda2_npu_unset_window(
- struct iommu_table_group *table_group,
- int num)
+static void pnv_pci_ioda_setup_iommu_api(void)
{
- struct pnv_ioda_pe *npe = gpe_table_group_to_npe(table_group);
- int num2 = (num == 0) ? 1 : 0;
- long ret = pnv_pci_ioda2_unset_window(table_group, num);
-
- if (ret)
- return ret;
-
- if (!npe->table_group.tables[num])
- return 0;
-
- ret = pnv_npu_unset_window(npe, num);
- if (ret)
- return ret;
-
- if (table_group->tables[num2])
- ret = pnv_npu_set_window(npe, num2, table_group->tables[num2]);
-
- return ret;
-}
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
-static void pnv_ioda2_npu_take_ownership(struct iommu_table_group *table_group)
-{
/*
- * Detach NPU first as pnv_ioda2_take_ownership() will destroy
- * the iommu_table if 32bit DMA is enabled.
+ * There are 4 types of PEs:
+ * - PNV_IODA_PE_BUS: a downstream port with an adapter,
+ * created from pnv_pci_setup_bridge();
+ * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it,
+ * created from pnv_pci_setup_bridge();
+ * - PNV_IODA_PE_VF: a SRIOV virtual function,
+ * created from pnv_pcibios_sriov_enable();
+ * - PNV_IODA_PE_DEV: an NPU or OCAPI device,
+ * created from pnv_pci_ioda_fixup().
+ *
+ * Normally a PE is represented by an IOMMU group, however for
+ * devices with side channels the groups need to be more strict.
*/
- pnv_npu_take_ownership(gpe_table_group_to_npe(table_group));
- pnv_ioda2_take_ownership(table_group);
-}
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb = hose->private_data;
-static struct iommu_table_group_ops pnv_pci_ioda2_npu_ops = {
- .get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_pci_ioda2_create_table_userspace,
- .set_window = pnv_pci_ioda2_npu_set_window,
- .unset_window = pnv_pci_ioda2_npu_unset_window,
- .take_ownership = pnv_ioda2_npu_take_ownership,
- .release_ownership = pnv_ioda2_release_ownership,
-};
+ if (phb->type == PNV_PHB_NPU_NVLINK)
+ continue;
-static void pnv_pci_ioda_setup_iommu_api(void)
-{
- struct pci_controller *hose, *tmp;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe, *gpe;
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ struct iommu_table_group *table_group;
+
+ table_group = pnv_try_setup_npu_table_group(pe);
+ if (!table_group) {
+ if (!pnv_pci_ioda_pe_dma_weight(pe))
+ continue;
+
+ table_group = &pe->table_group;
+ iommu_register_group(&pe->table_group,
+ pe->phb->hose->global_number,
+ pe->pe_number);
+ }
+ pnv_ioda_setup_bus_iommu_group(pe, table_group,
+ pe->pbus);
+ }
+ }
/*
* Now we have all PHBs discovered, time to add NPU devices to
* the corresponding IOMMU groups.
*/
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ list_for_each_entry(hose, &hose_list, list_node) {
+ unsigned long pgsizes;
+
phb = hose->private_data;
if (phb->type != PNV_PHB_NPU_NVLINK)
continue;
+ pgsizes = pnv_ioda_parse_tce_sizes(phb);
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- gpe = pnv_pci_npu_setup_iommu(pe);
- if (gpe)
- gpe->table_group.ops = &pnv_pci_ioda2_npu_ops;
+ /*
+ * IODA2 bridges get this set up from
+ * pci_controller_ops::setup_bridge but NPU bridges
+ * do not have this hook defined so we do it here.
+ */
+ pe->table_group.pgsizes = pgsizes;
+ pnv_npu_compound_attach(pe);
}
}
}
@@ -2810,9 +2769,6 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
- iommu_register_group(&pe->table_group, phb->hose->global_number,
- pe->pe_number);
-
/* The PE will reserve all possible 32-bits space */
pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
phb->ioda.m32_pci_base);
@@ -2833,10 +2789,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
return;
if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
}
-#ifdef CONFIG_PCI_MSI
int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
{
struct pnv_phb *phb = container_of(chip, struct pnv_phb,
@@ -2982,9 +2937,6 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
count, phb->msi_base);
}
-#else
-static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
-#endif /* CONFIG_PCI_MSI */
#ifdef CONFIG_PCI_IOV
static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
@@ -3402,8 +3354,7 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
return;
/* Reserve PEs according to used M64 resources */
- if (phb->reserve_m64_pe)
- phb->reserve_m64_pe(bus, NULL, all);
+ pnv_ioda_reserve_m64_pe(bus, NULL, all);
/*
* Assign PE. We might run here because of partial hotplug.
@@ -3687,6 +3638,15 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
pnv_ioda_release_pe(pe);
}
+static void pnv_npu_disable_device(struct pci_dev *pdev)
+{
+ struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
+ struct eeh_pe *eehpe = edev ? edev->pe : NULL;
+
+ if (eehpe && eeh_ops && eeh_ops->reset)
+ eeh_ops->reset(eehpe, EEH_RESET_HOT);
+}
+
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
@@ -3698,10 +3658,8 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
.dma_bus_setup = pnv_pci_dma_bus_setup,
-#ifdef CONFIG_PCI_MSI
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
-#endif
.enable_device_hook = pnv_pci_enable_device_hook,
.release_device = pnv_pci_release_device,
.window_alignment = pnv_pci_window_alignment,
@@ -3722,15 +3680,14 @@ static int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
-#ifdef CONFIG_PCI_MSI
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
-#endif
.enable_device_hook = pnv_pci_enable_device_hook,
.window_alignment = pnv_pci_window_alignment,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.dma_set_mask = pnv_npu_dma_set_mask,
.shutdown = pnv_pci_ioda_shutdown,
+ .disable_device = pnv_npu_disable_device,
};
static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 13aef2323bbc..45fb70b4bfa7 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -160,7 +160,6 @@ exit:
}
EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
-#ifdef CONFIG_PCI_MSI
int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
@@ -229,7 +228,6 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
}
}
-#endif /* CONFIG_PCI_MSI */
/* Nicely print the contents of the PE State Tables (PEST). */
static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
@@ -602,8 +600,8 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
{
struct pnv_phb *phb = pdn->phb->private_data;
- u8 fstate;
- __be16 pcierr;
+ u8 fstate = 0;
+ __be16 pcierr = 0;
unsigned int pe_no;
s64 rc;
@@ -1127,4 +1125,45 @@ void __init pnv_pci_init(void)
set_pci_dma_ops(&dma_iommu_ops);
}
-machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
+static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev;
+ struct pci_dn *pdn;
+ struct pnv_ioda_pe *pe;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ pdev = to_pci_dev(dev);
+ pdn = pci_get_pdn(pdev);
+ hose = pci_bus_to_host(pdev->bus);
+ phb = hose->private_data;
+
+ WARN_ON_ONCE(!phb);
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb)
+ return 0;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ iommu_add_device(&pe->table_group, dev);
+ return 0;
+ case BUS_NOTIFY_DEL_DEVICE:
+ iommu_del_device(dev);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block pnv_tce_iommu_bus_nb = {
+ .notifier_call = pnv_tce_iommu_bus_notifier,
+};
+
+static int __init pnv_tce_iommu_bus_notifier_init(void)
+{
+ bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
+ return 0;
+}
+machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 8b37b28e3831..8e36da379252 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -8,9 +8,6 @@
struct pci_dn;
-/* Maximum possible number of ATSD MMIO registers per NPU */
-#define NV_NMMU_ATSD_REGS 8
-
enum pnv_phb_type {
PNV_PHB_IODA1 = 0,
PNV_PHB_IODA2 = 1,
@@ -65,6 +62,7 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
struct iommu_table_group table_group;
+ struct npu_comp *npucomp;
/* 64-bit TCE bypass region */
bool tce_bypass_enabled;
@@ -106,20 +104,14 @@ struct pnv_phb {
struct dentry *dbgfs;
#endif
-#ifdef CONFIG_PCI_MSI
unsigned int msi_base;
unsigned int msi32_support;
struct msi_bitmap msi_bmp;
-#endif
int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
- void (*fixup_phb)(struct pci_controller *hose);
int (*init_m64)(struct pnv_phb *phb);
- void (*reserve_m64_pe)(struct pci_bus *bus,
- unsigned long *pe_bitmap, bool all);
- struct pnv_ioda_pe *(*pick_m64_pe)(struct pci_bus *bus, bool all);
int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
int (*unfreeze_pe)(struct pnv_phb *phb, int pe_no, int opt);
@@ -180,19 +172,6 @@ struct pnv_phb {
unsigned int diag_data_size;
u8 *diag_data;
- /* Nvlink2 data */
- struct npu {
- int index;
- __be64 *mmio_atsd_regs[NV_NMMU_ATSD_REGS];
- unsigned int mmio_atsd_count;
-
- /* Bitmask for MMIO register usage */
- unsigned long mmio_atsd_usage;
-
- /* Do we need to explicitly flush the nest mmu? */
- bool nmmu_flush;
- } npu;
-
int p2p_target_count;
};
@@ -210,6 +189,7 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_init_npu_phb(struct device_node *np);
extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
+extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
@@ -220,6 +200,8 @@ extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
+extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
+ __u64 window_size, __u32 levels);
extern int pnv_eeh_post_init(void);
extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
@@ -235,12 +217,10 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
-extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num,
- struct iommu_table *tbl);
-extern long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num);
-extern void pnv_npu_take_ownership(struct pnv_ioda_pe *npe);
-extern void pnv_npu_release_ownership(struct pnv_ioda_pe *npe);
-extern int pnv_npu2_init(struct pnv_phb *phb);
+extern struct iommu_table_group *pnv_try_setup_npu_table_group(
+ struct pnv_ioda_pe *pe);
+extern struct iommu_table_group *pnv_npu_compound_attach(
+ struct pnv_ioda_pe *pe);
/* pci-ioda-tce.c */
#define POWERNV_IOMMU_DEFAULT_LEVELS 1
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 4f7276ebdf9c..4d3929fbc08f 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -30,7 +30,7 @@ static char *cop_to_str(int cop)
}
}
-static int info_dbg_show(struct seq_file *s, void *private)
+static int info_show(struct seq_file *s, void *private)
{
struct vas_window *window = s->private;
@@ -49,17 +49,7 @@ unlock:
return 0;
}
-static int info_dbg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, info_dbg_show, inode->i_private);
-}
-
-static const struct file_operations info_fops = {
- .open = info_dbg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(info);
static inline void print_reg(struct seq_file *s, struct vas_window *win,
char *name, u32 reg)
@@ -67,7 +57,7 @@ static inline void print_reg(struct seq_file *s, struct vas_window *win,
seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name);
}
-static int hvwc_dbg_show(struct seq_file *s, void *private)
+static int hvwc_show(struct seq_file *s, void *private)
{
struct vas_window *window = s->private;
@@ -115,17 +105,7 @@ unlock:
return 0;
}
-static int hvwc_dbg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, hvwc_dbg_show, inode->i_private);
-}
-
-static const struct file_operations hvwc_fops = {
- .open = hvwc_dbg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(hvwc);
void vas_window_free_dbgdir(struct vas_window *window)
{
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 2e4bd32154b5..472b784f01eb 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -140,8 +140,7 @@ config IBMEBUS
Bus device driver for GX bus based adapters.
config PAPR_SCM
- depends on PPC_PSERIES && MEMORY_HOTPLUG
- select LIBNVDIMM
+ depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
tristate "Support for the PAPR Storage Class Memory interface"
help
Enable access to hypervisor provided storage class memory.
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 2a983b5a52e1..d291b618a559 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -197,6 +197,7 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
+ of_node_put(dr_node);
dlpar_free_cc_nodes(lmb_node);
if (!found) {
@@ -313,7 +314,6 @@ out:
static int pseries_remove_mem_node(struct device_node *np)
{
- const char *type;
const __be32 *regs;
unsigned long base;
unsigned int lmb_size;
@@ -322,8 +322,7 @@ static int pseries_remove_mem_node(struct device_node *np)
/*
* Check to see if we are actually removing memory
*/
- type = of_get_property(np, "device_type", NULL);
- if (type == NULL || strcmp(type, "memory") != 0)
+ if (!of_node_is_type(np, "memory"))
return 0;
/*
@@ -355,8 +354,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
phys_addr = lmb->base_addr;
#ifdef CONFIG_FA_DUMP
- /* Don't hot-remove memory that falls in fadump boot memory area */
- if (is_fadump_boot_memory_area(phys_addr, block_sz))
+ /*
+ * Don't hot-remove memory that falls in fadump boot memory area
+ * and memory that is reserved for capturing old kernel memory.
+ */
+ if (is_fadump_memory_area(phys_addr, block_sz))
return false;
#endif
@@ -936,7 +938,6 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
static int pseries_add_mem_node(struct device_node *np)
{
- const char *type;
const __be32 *regs;
unsigned long base;
unsigned int lmb_size;
@@ -945,8 +946,7 @@ static int pseries_add_mem_node(struct device_node *np)
/*
* Check to see if we are actually adding memory
*/
- type = of_get_property(np, "device_type", NULL);
- if (type == NULL || strcmp(type, "memory") != 0)
+ if (!of_node_is_type(np, "memory"))
return 0;
/*
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 06f02960b439..8fc8fe0b9848 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -57,7 +57,6 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
{
struct iommu_table_group *table_group;
struct iommu_table *tbl;
- struct iommu_table_group_link *tgl;
table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
node);
@@ -68,22 +67,13 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
if (!tbl)
goto free_group;
- tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
- node);
- if (!tgl)
- goto free_table;
-
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
- tgl->table_group = table_group;
- list_add_rcu(&tgl->next, &tbl->it_group_list);
table_group->tables[0] = tbl;
return table_group;
-free_table:
- kfree(tbl);
free_group:
kfree(table_group);
return NULL;
@@ -93,23 +83,12 @@ static void iommu_pseries_free_group(struct iommu_table_group *table_group,
const char *node_name)
{
struct iommu_table *tbl;
-#ifdef CONFIG_IOMMU_API
- struct iommu_table_group_link *tgl;
-#endif
if (!table_group)
return;
tbl = table_group->tables[0];
#ifdef CONFIG_IOMMU_API
- tgl = list_first_entry_or_null(&tbl->it_group_list,
- struct iommu_table_group_link, next);
-
- WARN_ON_ONCE(!tgl);
- if (tgl) {
- list_del_rcu(&tgl->next);
- kfree(tgl);
- }
if (table_group->group) {
iommu_group_put(table_group->group);
BUG_ON(table_group->group);
@@ -645,7 +624,6 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
iommu_table_setparms(pci->phb, dn, tbl);
tbl->it_ops = &iommu_table_pseries_ops;
iommu_init_table(tbl, pci->phb->node);
- iommu_register_group(pci->table_group, pci_domain_nr(bus), 0);
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
@@ -756,10 +734,7 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
iommu_table_setparms(phb, dn, tbl);
tbl->it_ops = &iommu_table_pseries_ops;
iommu_init_table(tbl, phb->node);
- iommu_register_group(PCI_DN(dn)->table_group,
- pci_domain_nr(phb->bus), 0);
set_iommu_table_base(&dev->dev, tbl);
- iommu_add_device(&dev->dev);
return;
}
@@ -770,11 +745,10 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
dn = dn->parent;
- if (dn && PCI_DN(dn)) {
+ if (dn && PCI_DN(dn))
set_iommu_table_base(&dev->dev,
PCI_DN(dn)->table_group->tables[0]);
- iommu_add_device(&dev->dev);
- } else
+ else
printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
pci_name(dev));
}
@@ -964,6 +938,37 @@ struct failed_ddw_pdn {
static LIST_HEAD(failed_ddw_pdn_list);
+static phys_addr_t ddw_memory_hotplug_max(void)
+{
+ phys_addr_t max_addr = memory_hotplug_max();
+ struct device_node *memory;
+
+ for_each_node_by_type(memory, "memory") {
+ unsigned long start, size;
+ int ranges, n_mem_addr_cells, n_mem_size_cells, len;
+ const __be32 *memcell_buf;
+
+ memcell_buf = of_get_property(memory, "reg", &len);
+ if (!memcell_buf || len <= 0)
+ continue;
+
+ n_mem_addr_cells = of_n_addr_cells(memory);
+ n_mem_size_cells = of_n_size_cells(memory);
+
+ /* ranges in cell */
+ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
+
+ start = of_read_number(memcell_buf, n_mem_addr_cells);
+ memcell_buf += n_mem_addr_cells;
+ size = of_read_number(memcell_buf, n_mem_size_cells);
+ memcell_buf += n_mem_size_cells;
+
+ max_addr = max_t(phys_addr_t, max_addr, start + size);
+ }
+
+ return max_addr;
+}
+
/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
@@ -1053,7 +1058,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
- max_addr = memory_hotplug_max();
+ max_addr = ddw_memory_hotplug_max();
if (query.largest_available_block < (max_addr >> page_shift)) {
dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
@@ -1190,7 +1195,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
}
set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
- iommu_add_device(&dev->dev);
+ iommu_add_device(pci->table_group, &dev->dev);
}
static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
@@ -1395,4 +1400,27 @@ static int __init disable_multitce(char *str)
__setup("multitce=", disable_multitce);
+static int tce_iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ case BUS_NOTIFY_DEL_DEVICE:
+ iommu_del_device(dev);
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block tce_iommu_bus_nb = {
+ .notifier_call = tce_iommu_bus_notifier,
+};
+
+static int __init tce_iommu_bus_notifier_init(void)
+{
+ bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
+ return 0;
+}
machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index ee9372b65ca5..7d6457ab5d34 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -55,7 +55,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
do {
rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
p->blocks, BIND_ANY_ADDR, token);
- token = be64_to_cpu(ret[0]);
+ token = ret[0];
cond_resched();
} while (rc == H_BUSY);
@@ -64,7 +64,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
return -ENXIO;
}
- p->bound_addr = be64_to_cpu(ret[1]);
+ p->bound_addr = ret[1];
dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
@@ -82,7 +82,7 @@ static int drc_pmem_unbind(struct papr_scm_priv *p)
do {
rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
p->bound_addr, p->blocks, token);
- token = be64_to_cpu(ret);
+ token = ret[0];
cond_resched();
} while (rc == H_BUSY);
@@ -223,6 +223,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
goto err;
}
+ if (nvdimm_bus_check_dimm_count(p->bus, 1))
+ goto err;
+
/* now add the region */
memset(&mapping, 0, sizeof(mapping));
@@ -257,9 +260,12 @@ err: nvdimm_bus_unregister(p->bus);
static int papr_scm_probe(struct platform_device *pdev)
{
- uint32_t drc_index, metadata_size, unit_cap[2];
struct device_node *dn = pdev->dev.of_node;
+ u32 drc_index, metadata_size;
+ u64 blocks, block_size;
struct papr_scm_priv *p;
+ const char *uuid_str;
+ u64 uuid[2];
int rc;
/* check we have all the required DT properties */
@@ -268,8 +274,18 @@ static int papr_scm_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) {
- dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn);
+ if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
+ dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
+ return -ENODEV;
+ }
+
+ if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
+ dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
+ return -ENODEV;
+ }
+
+ if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
+ dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
return -ENODEV;
}
@@ -282,8 +298,13 @@ static int papr_scm_probe(struct platform_device *pdev)
p->dn = dn;
p->drc_index = drc_index;
- p->block_size = unit_cap[0];
- p->blocks = unit_cap[1];
+ p->block_size = block_size;
+ p->blocks = blocks;
+
+ /* We just need to ensure that set cookies are unique across */
+ uuid_parse(uuid_str, (uuid_t *) uuid);
+ p->nd_set.cookie1 = uuid[0];
+ p->nd_set.cookie2 = uuid[1];
/* might be zero */
p->metadata_size = metadata_size;
@@ -296,7 +317,7 @@ static int papr_scm_probe(struct platform_device *pdev)
/* setup the resource for the newly bound range */
p->res.start = p->bound_addr;
- p->res.end = p->bound_addr + p->blocks * p->block_size;
+ p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 41d8a4d1d02e..7725825d887d 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -29,6 +29,7 @@
#include <asm/pci-bridge.h>
#include <asm/prom.h>
#include <asm/ppc-pci.h>
+#include <asm/pci.h>
#include "pseries.h"
#if 0
@@ -237,6 +238,8 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void)
{
+ struct pci_controller *hose;
+
pSeries_request_regions();
eeh_probe_devices();
@@ -246,6 +249,25 @@ void __init pSeries_final_fixup(void)
ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable;
ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable;
#endif
+ list_for_each_entry(hose, &hose_list, list_node) {
+ struct device_node *dn = hose->dn, *nvdn;
+
+ while (1) {
+ dn = of_find_all_nodes(dn);
+ if (!dn)
+ break;
+ nvdn = of_parse_phandle(dn, "ibm,nvlink", 0);
+ if (!nvdn)
+ continue;
+ if (!of_device_is_compatible(nvdn, "ibm,npu-link"))
+ continue;
+ if (!of_device_is_compatible(nvdn->parent,
+ "ibm,power9-npu"))
+ continue;
+ WARN_ON_ONCE(pnv_npu2_init(hose));
+ break;
+ }
+ }
}
/*
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
index a27f40eb57b1..27f0a915c8a9 100644
--- a/arch/powerpc/platforms/pseries/pmem.c
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -52,8 +52,8 @@ static ssize_t pmem_drc_add_node(u32 drc_index)
/* NB: The of reconfig notifier creates platform device from the node */
rc = dlpar_attach_node(dn, pmem_node);
if (rc) {
- pr_err("Failed to attach node %s, rc: %d, drc index: %x\n",
- dn->name, rc, drc_index);
+ pr_err("Failed to attach node %pOF, rc: %d, drc index: %x\n",
+ dn, rc, drc_index);
if (dlpar_release_drc(drc_index))
dlpar_free_cc_nodes(dn);
@@ -93,8 +93,8 @@ static ssize_t pmem_drc_remove_node(u32 drc_index)
rc = dlpar_release_drc(drc_index);
if (rc) {
- pr_err("Failed to release drc (%x) for CPU %s, rc: %d\n",
- drc_index, dn->name, rc);
+ pr_err("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
+ drc_index, dn, rc);
dlpar_attach_node(dn, pmem_node);
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 0f553dcfa548..41f62ca27c63 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -190,7 +190,7 @@ static void __init pseries_setup_i8259_cascade(void)
of_node_put(old);
if (np == NULL)
break;
- if (strcmp(np->name, "pci") != 0)
+ if (!of_node_name_eq(np, "pci"))
continue;
addrp = of_get_property(np, "8259-interrupt-acknowledge", NULL);
if (addrp == NULL)
@@ -469,8 +469,8 @@ static void __init find_and_init_phbs(void)
struct device_node *root = of_find_node_by_path("/");
for_each_child_of_node(root, node) {
- if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
- strcmp(node->type, "pciex") != 0))
+ if (!of_node_is_type(node, "pci") &&
+ !of_node_is_type(node, "pciex"))
continue;
phb = pcibios_alloc_controller(node);
@@ -978,11 +978,7 @@ static void pseries_power_off(void)
static int __init pSeries_probe(void)
{
- const char *dtype = of_get_property(of_root, "device_type", NULL);
-
- if (dtype == NULL)
- return 0;
- if (strcmp(dtype, "chrp"))
+ if (!of_node_is_type(of_root, "chrp"))
return 0;
/* Cell blades firmware claims to be chrp while it's not. Until this
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index a29ad7db918a..1fad4649735b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1355,9 +1355,9 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
*/
parent_node = of_get_parent(of_node);
if (parent_node) {
- if (!strcmp(parent_node->type, "ibm,platform-facilities"))
+ if (of_node_is_type(parent_node, "ibm,platform-facilities"))
family = PFO;
- else if (!strcmp(parent_node->type, "vdevice"))
+ else if (of_node_is_type(parent_node, "vdevice"))
family = VDEVICE;
else {
pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
@@ -1394,9 +1394,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (viodev->family == VDEVICE) {
unsigned int unit_address;
- if (of_node->type != NULL)
- viodev->type = of_node->type;
- else {
+ viodev->type = of_node_get_device_type(of_node);
+ if (!viodev->type) {
pr_warn("%s: node %pOFn is missing the 'device_type' "
"property.\n", __func__, of_node);
goto out;
@@ -1671,32 +1670,30 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
{
char kobj_name[20];
struct device_node *vnode_parent;
- const char *dev_type;
vnode_parent = of_get_parent(vnode);
if (!vnode_parent)
return NULL;
- dev_type = of_get_property(vnode_parent, "device_type", NULL);
- of_node_put(vnode_parent);
- if (!dev_type)
- return NULL;
-
/* construct the kobject name from the device node */
- if (!strcmp(dev_type, "vdevice")) {
+ if (of_node_is_type(vnode_parent, "vdevice")) {
const __be32 *prop;
prop = of_get_property(vnode, "reg", NULL);
if (!prop)
- return NULL;
+ goto out;
snprintf(kobj_name, sizeof(kobj_name), "%x",
(uint32_t)of_read_number(prop, 1));
- } else if (!strcmp(dev_type, "ibm,platform-facilities"))
+ } else if (of_node_is_type(vnode_parent, "ibm,platform-facilities"))
snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
else
- return NULL;
+ goto out;
+ of_node_put(vnode_parent);
return vio_find_name(kobj_name);
+out:
+ of_node_put(vnode_parent);
+ return NULL;
}
EXPORT_SYMBOL(vio_find_node);
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 2caa4defdfb6..aaf23283ba0c 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -48,7 +48,7 @@ obj-$(CONFIG_PPC_MPC512x) += mpc5xxx_clocks.o
obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o
ifdef CONFIG_SUSPEND
-obj-$(CONFIG_6xx) += 6xx-suspend.o
+obj-$(CONFIG_PPC_BOOK3S_32) += 6xx-suspend.o
endif
obj-$(CONFIG_PPC_SCOM) += scom.o
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
index 12dd18fd4795..6c13d9a7b7b2 100644
--- a/arch/powerpc/sysdev/fsl_rio.h
+++ b/arch/powerpc/sysdev/fsl_rio.h
@@ -41,7 +41,7 @@
#define DOORBELL_ROWAR_PCI 0x02000000 /* PCI window */
#define DOORBELL_ROWAR_NREAD 0x00040000 /* NREAD */
#define DOORBELL_ROWAR_MAINTRD 0x00070000 /* maintenance read */
-#define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserverd */
+#define DOORBELL_ROWAR_RES 0x00002000 /* wrtpy: reserved */
#define DOORBELL_ROWAR_MAINTWD 0x00007000
#define DOORBELL_ROWAR_SIZE 0x0000000b /* window size is 4k */
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 88b35a3dcdc5..8b0ebf3940d2 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -756,15 +756,13 @@ fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
}
/* Initialize outbound message descriptor ring */
- rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
+ rmu->msg_tx_ring.virt = dma_zalloc_coherent(priv->dev,
rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
&rmu->msg_tx_ring.phys, GFP_KERNEL);
if (!rmu->msg_tx_ring.virt) {
rc = -ENOMEM;
goto out_dma;
}
- memset(rmu->msg_tx_ring.virt, 0,
- rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
rmu->msg_tx_ring.tx_slot = 0;
/* Point dequeue/enqueue pointers at first entry in ring */
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 6300123ce965..8030a0f55e96 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -771,34 +771,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
return ipic;
}
-int ipic_set_priority(unsigned int virq, unsigned int priority)
-{
- struct ipic *ipic = ipic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
- u32 temp;
-
- if (priority > 7)
- return -EINVAL;
- if (src > 127)
- return -EINVAL;
- if (ipic_info[src].prio == 0)
- return -EINVAL;
-
- temp = ipic_read(ipic->regs, ipic_info[src].prio);
-
- if (priority < 4) {
- temp &= ~(0x7 << (20 + (3 - priority) * 3));
- temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3);
- } else {
- temp &= ~(0x7 << (4 + (7 - priority) * 3));
- temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3);
- }
-
- ipic_write(ipic->regs, ipic_info[src].prio, temp);
-
- return 0;
-}
-
void ipic_set_highest_priority(unsigned int virq)
{
struct ipic *ipic = ipic_from_irq(virq);
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index 0f6fd5d04d33..a707b24a7ddb 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -60,7 +60,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
parent = scom_find_parent(dev);
if (parent == NULL)
- return 0;
+ return NULL;
/*
* We support "scom-reg" properties for adding scom registers
@@ -83,7 +83,7 @@ scom_map_t scom_map_device(struct device_node *dev, int index)
size >>= 2;
if (index >= (size / (2*cells)))
- return 0;
+ return NULL;
reg = of_read_number(&prop[index * cells * 2], cells);
cnt = of_read_number(&prop[index * cells * 2 + cells], cells);
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 9824074ec1b5..94a69a62f5db 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -309,7 +309,7 @@ static void xive_do_queue_eoi(struct xive_cpu *xc)
* EOI an interrupt at the source. There are several methods
* to do this depending on the HW version and source type
*/
-void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
+static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh
index 1fad3fb90e7c..3ce5c093b19d 100755
--- a/arch/powerpc/tools/checkpatch.sh
+++ b/arch/powerpc/tools/checkpatch.sh
@@ -19,4 +19,5 @@ exec $script_base/../../../scripts/checkpatch.pl \
--ignore GLOBAL_INITIALISERS \
--ignore LINE_SPACING \
--ignore MULTIPLE_ASSIGNMENTS \
+ --ignore DT_SPLIT_BINDING_PATCH \
$@
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 36b8dc47a3c3..757b8499aba2 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -75,6 +75,9 @@ static int xmon_gate;
#define xmon_owner 0
#endif /* CONFIG_SMP */
+#ifdef CONFIG_PPC_PSERIES
+static int set_indicator_token = RTAS_UNKNOWN_SERVICE;
+#endif
static unsigned long in_xmon __read_mostly = 0;
static int xmon_on = IS_ENABLED(CONFIG_XMON_DEFAULT);
@@ -273,7 +276,7 @@ Commands:\n\
X exit monitor and don't recover\n"
#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E)
" u dump segment table or SLB\n"
-#elif defined(CONFIG_PPC_STD_MMU_32)
+#elif defined(CONFIG_PPC_BOOK3S_32)
" u dump segment registers\n"
#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E)
" u dump TLB\n"
@@ -358,7 +361,6 @@ static inline void disable_surveillance(void)
#ifdef CONFIG_PPC_PSERIES
/* Since this can't be a module, args should end up below 4GB. */
static struct rtas_args args;
- int token;
/*
* At this point we have got all the cpus we can into
@@ -367,11 +369,11 @@ static inline void disable_surveillance(void)
* If we did try to take rtas.lock there would be a
* real possibility of deadlock.
*/
- token = rtas_token("set-indicator");
- if (token == RTAS_UNKNOWN_SERVICE)
+ if (set_indicator_token == RTAS_UNKNOWN_SERVICE)
return;
- rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
+ rtas_call_unlocked(&args, set_indicator_token, 3, 1, NULL,
+ SURVEILLANCE_TOKEN, 0, 0);
#endif /* CONFIG_PPC_PSERIES */
}
@@ -1058,7 +1060,7 @@ cmds(struct pt_regs *excp)
case 'P':
show_tasks();
break;
-#ifdef CONFIG_PPC_STD_MMU
+#ifdef CONFIG_PPC_BOOK3S
case 'u':
dump_segments();
break;
@@ -2793,7 +2795,7 @@ print_address(unsigned long addr)
xmon_print_symbol(addr, "\t# ", "");
}
-void
+static void
dump_log_buf(void)
{
struct kmsg_dumper dumper = { .active = 1 };
@@ -2994,13 +2996,13 @@ static void show_task(struct task_struct *tsk)
printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
tsk->thread.ksp,
- tsk->pid, tsk->parent->pid,
+ tsk->pid, rcu_dereference(tsk->parent)->pid,
state, task_thread_info(tsk)->cpu,
tsk->comm);
}
#ifdef CONFIG_PPC_BOOK3S_64
-void format_pte(void *ptep, unsigned long pte)
+static void format_pte(void *ptep, unsigned long pte)
{
pte_t entry = __pte(pte);
@@ -3495,14 +3497,14 @@ void dump_segments(void)
}
#endif
-#ifdef CONFIG_PPC_STD_MMU_32
+#ifdef CONFIG_PPC_BOOK3S_32
void dump_segments(void)
{
int i;
printf("sr0-15 =");
for (i = 0; i < 16; ++i)
- printf(" %x", mfsrin(i));
+ printf(" %x", mfsrin(i << 28));
printf("\n");
}
#endif
@@ -3688,6 +3690,14 @@ static void xmon_init(int enable)
__debugger_iabr_match = xmon_iabr_match;
__debugger_break_match = xmon_break_match;
__debugger_fault_handler = xmon_fault_handler;
+
+#ifdef CONFIG_PPC_PSERIES
+ /*
+ * Get the token here to avoid trying to get a lock
+ * during the crash, causing a deadlock.
+ */
+ set_indicator_token = rtas_token("set-indicator");
+#endif
} else {
__debugger = NULL;
__debugger_ipi = NULL;
@@ -4033,6 +4043,7 @@ static int do_spu_cmd(void)
subcmd = inchar();
if (isxdigit(subcmd) || subcmd == '\n')
termch = subcmd;
+ /* fall through */
case 'f':
scanhex(&num);
if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 51d89c4b1dca..106539bb914e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -23,6 +23,7 @@ config RISCV
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
+ select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_SMP_IDLE_THREAD
@@ -226,39 +227,48 @@ endmenu
menu "Boot options"
-config CMDLINE_BOOL
- bool "Built-in kernel command line"
+config CMDLINE
+ string "Built-in kernel command line"
help
- For most platforms, it is firmware or second stage bootloader
- that by default specifies the kernel command line options.
- However, it might be necessary or advantageous to either override
- the default kernel command line or add a few extra options to it.
- For such cases, this option allows hardcoding command line options
- directly into the kernel.
+ For most platforms, the arguments for the kernel's command line
+ are provided at run-time, during boot. However, there are cases
+ where either no arguments are being provided or the provided
+ arguments are insufficient or even invalid.
- For that, choose 'Y' here and fill in the extra boot parameters
- in CONFIG_CMDLINE.
+ When that occurs, it is possible to define a built-in command
+ line here and choose how the kernel should use it later on.
- The built-in options will be concatenated to the default command
- line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
- command line will be ignored and replaced by the built-in string.
+choice
+ prompt "Built-in command line usage" if CMDLINE != ""
+ default CMDLINE_FALLBACK
+ help
+ Choose how the kernel will handle the provided built-in command
+ line.
-config CMDLINE
- string "Built-in kernel command string"
- depends on CMDLINE_BOOL
- default ""
+config CMDLINE_FALLBACK
+ bool "Use bootloader kernel arguments if available"
help
- Supply command-line options at build time by entering them here.
+ Use the built-in command line as fallback in case we get nothing
+ during boot. This is the default behaviour.
+
+config CMDLINE_EXTEND
+ bool "Extend bootloader kernel arguments"
+ help
+ The command-line arguments provided during boot will be
+ appended to the built-in command line. This is useful in
+ cases where the provided arguments are insufficient and
+ you don't want to or cannot modify them.
+
config CMDLINE_FORCE
- bool "Built-in command line overrides bootloader arguments"
- depends on CMDLINE_BOOL
+ bool "Always use the default kernel command string"
help
- Set this option to 'Y' to have the kernel ignore the bootloader
- or firmware command line. Instead, the built-in command line
- will be used exclusively.
+ Always use the built-in command line, even if we get one during
+ boot. This is useful in case you need to override the provided
+ command line on systems where you don't have or want control
+ over it.
- If you don't know what to do here, say N.
+endchoice
endmenu
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
index c5a72f17c469..e69de29bb2d1 100644
--- a/arch/riscv/Kconfig.debug
+++ b/arch/riscv/Kconfig.debug
@@ -1,2 +0,0 @@
-config EARLY_PRINTK
- def_bool y
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index ef4f15df9adf..f399659d3b8d 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -46,6 +46,7 @@ CONFIG_INPUT_MOUSEDEV=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index c452359c9cb8..93826771b616 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -303,6 +303,15 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
ATOMIC_OPS()
+#define atomic_xchg_relaxed atomic_xchg_relaxed
+#define atomic_xchg_acquire atomic_xchg_acquire
+#define atomic_xchg_release atomic_xchg_release
+#define atomic_xchg atomic_xchg
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#define atomic_cmpxchg atomic_cmpxchg
+
#undef ATOMIC_OPS
#undef ATOMIC_OP
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index cb35ffd8ec6b..638dee3f7e88 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -28,6 +28,7 @@ static int __init_cache_level(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
int levels = 0, leaves = 0, level;
if (of_property_read_bool(np, "cache-size"))
@@ -39,7 +40,10 @@ static int __init_cache_level(unsigned int cpu)
if (leaves > 0)
levels = 1;
+ prev = np;
while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
if (!of_device_is_compatible(np, "cache"))
break;
if (of_property_read_u32(np, "cache-level", &level))
@@ -55,8 +59,10 @@ static int __init_cache_level(unsigned int cpu)
levels = level;
}
+ of_node_put(np);
this_cpu_ci->num_levels = levels;
this_cpu_ci->num_leaves = leaves;
+
return 0;
}
@@ -65,6 +71,7 @@ static int __populate_cache_leaves(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
int levels = 1, level = 1;
if (of_property_read_bool(np, "cache-size"))
@@ -74,7 +81,10 @@ static int __populate_cache_leaves(unsigned int cpu)
if (of_property_read_bool(np, "d-cache-size"))
ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
+ prev = np;
while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
if (!of_device_is_compatible(np, "cache"))
break;
if (of_property_read_u32(np, "cache-level", &level))
@@ -89,6 +99,7 @@ static int __populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, np, CACHE_TYPE_DATA, level);
levels = level;
}
+ of_node_put(np);
return 0;
}
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index b4a7d4427fbb..f8fa2c63aa89 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -158,6 +158,7 @@ static int c_show(struct seq_file *m, void *v)
&& strcmp(compat, "riscv"))
seq_printf(m, "uarch\t\t: %s\n", compat);
seq_puts(m, "\n");
+ of_node_put(node);
return 0;
}
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 0339087aa652..a6e369edbbd7 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -56,8 +56,10 @@ void riscv_fill_hwcap(void)
if (of_property_read_string(node, "riscv,isa", &isa)) {
pr_warning("Unable to find \"riscv,isa\" devicetree entry");
+ of_node_put(node);
return;
}
+ of_node_put(node);
for (i = 0; i < strlen(isa); ++i)
elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
index 1157b6b52d25..a840b7d074f7 100644
--- a/arch/riscv/kernel/ftrace.c
+++ b/arch/riscv/kernel/ftrace.c
@@ -132,8 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
unsigned long old;
- struct ftrace_graph_ent trace;
- int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
@@ -144,17 +142,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
*/
old = *parent;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- if (!ftrace_graph_entry(&trace))
- return;
-
- err = ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, parent);
- if (err == -EBUSY)
- return;
- *parent = return_hooker;
+ if (function_graph_enter(old, self_addr, frame_pointer, parent))
+ *parent = return_hooker;
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
index a243fae1c1db..667ee70defea 100644
--- a/arch/riscv/kernel/perf_event.c
+++ b/arch/riscv/kernel/perf_event.c
@@ -476,6 +476,7 @@ int __init init_hw_perf_events(void)
if (of_id)
riscv_pmu = of_id->data;
+ of_node_put(node);
}
perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 2c290e6aaa6e..fc8006a042eb 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -35,31 +35,9 @@
#include <asm/sections.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
-#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/thread_info.h>
-#ifdef CONFIG_EARLY_PRINTK
-static void sbi_console_write(struct console *co, const char *buf,
- unsigned int n)
-{
- int i;
-
- for (i = 0; i < n; ++i) {
- if (buf[i] == '\n')
- sbi_console_putchar('\r');
- sbi_console_putchar(buf[i]);
- }
-}
-
-struct console riscv_sbi_early_console_dev __initdata = {
- .name = "early",
- .write = sbi_console_write,
- .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
- .index = -1
-};
-#endif
-
#ifdef CONFIG_DUMMY_CONSOLE
struct screen_info screen_info = {
.orig_video_lines = 30,
@@ -219,12 +197,6 @@ static void __init setup_bootmem(void)
void __init setup_arch(char **cmdline_p)
{
-#if defined(CONFIG_EARLY_PRINTK)
- if (likely(early_console == NULL)) {
- early_console = &riscv_sbi_early_console_dev;
- register_console(early_console);
- }
-#endif
*cmdline_p = boot_command_line;
parse_early_param();
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 18cda0e8cf94..fc185ecabb0a 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -57,12 +57,15 @@ void __init setup_smp(void)
while ((dn = of_find_node_by_type(dn, "cpu"))) {
hart = riscv_of_processor_hartid(dn);
- if (hart < 0)
+ if (hart < 0) {
+ of_node_put(dn);
continue;
+ }
if (hart == cpuid_to_hartid_map(0)) {
BUG_ON(found_boot_cpu);
found_boot_cpu = 1;
+ of_node_put(dn);
continue;
}
@@ -70,6 +73,7 @@ void __init setup_smp(void)
set_cpu_possible(cpuid, true);
set_cpu_present(cpuid, true);
cpuid++;
+ of_node_put(dn);
}
BUG_ON(!found_boot_cpu);
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 1911c8f6b8a6..40470e669a35 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -26,6 +26,7 @@ void __init time_init(void)
cpu = of_find_node_by_path("/cpus");
if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n");
+ of_node_put(cpu);
riscv_timebase = prop;
lpj_fine = riscv_timebase / HZ;
diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S
index 69abb1277234..237bc9fd0763 100644
--- a/arch/riscv/lib/tishift.S
+++ b/arch/riscv/lib/tishift.S
@@ -10,33 +10,36 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
- .globl __lshrti3
-__lshrti3:
- beqz a2, .L1
- li a5,64
- sub a5,a5,a2
- addi sp,sp,-16
- sext.w a4,a5
- blez a5, .L2
- sext.w a2,a2
- sll a4,a1,a4
- srl a0,a0,a2
- srl a1,a1,a2
- or a0,a0,a4
- sd a1,8(sp)
- sd a0,0(sp)
- ld a0,0(sp)
- ld a1,8(sp)
- addi sp,sp,16
- ret
+
+#include <linux/linkage.h>
+
+ENTRY(__lshrti3)
+ beqz a2, .L1
+ li a5,64
+ sub a5,a5,a2
+ addi sp,sp,-16
+ sext.w a4,a5
+ blez a5, .L2
+ sext.w a2,a2
+ sll a4,a1,a4
+ srl a0,a0,a2
+ srl a1,a1,a2
+ or a0,a0,a4
+ sd a1,8(sp)
+ sd a0,0(sp)
+ ld a0,0(sp)
+ ld a1,8(sp)
+ addi sp,sp,16
+ ret
.L1:
- ret
+ ret
.L2:
- negw a4,a4
- srl a1,a1,a4
- sd a1,0(sp)
- sd zero,8(sp)
- ld a0,0(sp)
- ld a1,8(sp)
- addi sp,sp,16
- ret
+ negw a4,a4
+ srl a1,a1,a4
+ sd a1,0(sp)
+ sd zero,8(sp)
+ ld a0,0(sp)
+ ld a1,8(sp)
+ addi sp,sp,16
+ ret
+ENDPROC(__lshrti3)
diff --git a/arch/riscv/lib/udivdi3.S b/arch/riscv/lib/udivdi3.S
index cb01ae5b181a..7f1c0af182a3 100644
--- a/arch/riscv/lib/udivdi3.S
+++ b/arch/riscv/lib/udivdi3.S
@@ -11,28 +11,30 @@
* GNU General Public License for more details.
*/
- .globl __udivdi3
-__udivdi3:
- mv a2, a1
- mv a1, a0
- li a0, -1
- beqz a2, .L5
- li a3, 1
- bgeu a2, a1, .L2
+#include <linux/linkage.h>
+
+ENTRY(__udivdi3)
+ mv a2, a1
+ mv a1, a0
+ li a0, -1
+ beqz a2, .L5
+ li a3, 1
+ bgeu a2, a1, .L2
.L1:
- blez a2, .L2
- slli a2, a2, 1
- slli a3, a3, 1
- bgtu a1, a2, .L1
+ blez a2, .L2
+ slli a2, a2, 1
+ slli a3, a3, 1
+ bgtu a1, a2, .L1
.L2:
- li a0, 0
+ li a0, 0
.L3:
- bltu a1, a2, .L4
- sub a1, a1, a2
- or a0, a0, a3
+ bltu a1, a2, .L4
+ sub a1, a1, a2
+ or a0, a0, a3
.L4:
- srli a3, a3, 1
- srli a2, a2, 1
- bnez a3, .L3
+ srli a3, a3, 1
+ srli a2, a2, 1
+ bnez a3, .L3
.L5:
- ret
+ ret
+ENDPROC(__udivdi3)
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 812d9498d97b..dd456725189f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -137,7 +137,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
sctx->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.cip)) {
pr_err("Allocating AES fallback algorithm %s failed\n",
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index 23a14d187fb1..b5ea9e14c017 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -8,6 +8,8 @@
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+/* We use the MSB mostly because its available */
+#define PREEMPT_NEED_RESCHED 0x80000000
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
static inline int preempt_count(void)
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 84be7f02d0c2..39b13d71a8fe 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init);
*/
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
{
- struct ftrace_graph_ent trace;
-
if (unlikely(ftrace_graph_is_dead()))
goto out;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
ip -= MCOUNT_INSN_SIZE;
- trace.func = ip;
- trace.depth = current->curr_ret_stack + 1;
- /* Only trace if the calling function expects to. */
- if (!ftrace_graph_entry(&trace))
- goto out;
- if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
- NULL) == -EBUSY)
- goto out;
- parent = (unsigned long) return_to_handler;
+ if (!function_graph_enter(parent, ip, 0, NULL))
+ parent = (unsigned long) return_to_handler;
out:
return parent;
}
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index f413f57f8d20..32023b4f9dc0 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -134,16 +134,6 @@ int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
return ret;
}
-/*
- * The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole
- * and provide kbuf->mem by hand.
- */
-int arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *))
-{
- return 1;
-}
-
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 74091fd3101e..d5523adeddbf 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
break;
case PERF_TYPE_HARDWARE:
+ if (is_sampling_event(event)) /* No sampling support */
+ return -ENOENT;
ev = attr->config;
/* Count user space (problem-state) only */
if (!attr->exclude_user && attr->exclude_kernel) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index fe24150ff666..7f4bc58a53b9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -11,6 +11,9 @@
* Jason J. Herne <jjherne@us.ibm.com>
*/
+#define KMSG_COMPONENT "kvm-s390"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/fs.h>
@@ -44,10 +47,6 @@
#include "kvm-s390.h"
#include "gaccess.h"
-#define KMSG_COMPONENT "kvm-s390"
-#undef pr_fmt
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
#define CREATE_TRACE_POINTS
#include "trace.h"
#include "trace-s390.h"
@@ -417,19 +416,30 @@ static void kvm_s390_cpu_feat_init(void)
int kvm_arch_init(void *opaque)
{
+ int rc;
+
kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
if (!kvm_s390_dbf)
return -ENOMEM;
if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
- debug_unregister(kvm_s390_dbf);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_debug_unreg;
}
kvm_s390_cpu_feat_init();
/* Register floating interrupt controller interface. */
- return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
+ rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
+ if (rc) {
+ pr_err("Failed to register FLIC rc=%d\n", rc);
+ goto out_debug_unreg;
+ }
+ return 0;
+
+out_debug_unreg:
+ debug_unregister(kvm_s390_dbf);
+ return rc;
}
void kvm_arch_exit(void)
@@ -464,7 +474,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_CSS_SUPPORT:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
- case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_S390_IRQCHIP:
case KVM_CAP_VM_ATTRIBUTES:
case KVM_CAP_MP_STATE:
@@ -607,7 +616,7 @@ static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
}
}
-static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
{
int r;
@@ -1933,14 +1942,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = kvm_s390_inject_vm(kvm, &s390int);
break;
}
- case KVM_ENABLE_CAP: {
- struct kvm_enable_cap cap;
- r = -EFAULT;
- if (copy_from_user(&cap, argp, sizeof(cap)))
- break;
- r = kvm_vm_ioctl_enable_cap(kvm, &cap);
- break;
- }
case KVM_CREATE_IRQCHIP: {
struct kvm_irq_routing_entry routing;
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 814f26520aa2..db6bb2f97a2c 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -131,6 +131,7 @@ void crst_table_downgrade(struct mm_struct *mm)
}
pgd = mm->pgd;
+ mm_dec_nr_pmds(mm);
mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
mm->context.asce_limit = _REGION3_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
@@ -351,7 +352,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
- call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d7052cbe984f..3ff758eeb71d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -821,10 +821,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/*
* BPF_ARSH
*/
+ case BPF_ALU | BPF_ARSH | BPF_X: /* ((s32) dst) >>= src */
+ /* sra %dst,%dst,0(%src) */
+ EMIT4_DISP(0x8a000000, dst_reg, src_reg, 0);
+ EMIT_ZERO(dst_reg);
+ break;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* ((s64) dst) >>= src */
/* srag %dst,%dst,0(%src) */
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
+ case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
+ if (imm == 0)
+ break;
+ /* sra %dst,imm(%r0) */
+ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ EMIT_ZERO(dst_reg);
+ break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
if (imm == 0)
break;
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index c521ade2557c..4009bef62fe9 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -228,6 +228,9 @@ archclean:
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=arch/sh/kernel/vsyscall
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/sh/kernel/syscalls all
+
define archhelp
@echo ' zImage - Compressed kernel image'
@echo ' romImage - Compressed ROM image, if supported'
diff --git a/arch/sh/boards/mach-dreamcast/Makefile b/arch/sh/boards/mach-dreamcast/Makefile
index 7b97546c7e5f..62b024bc2a3e 100644
--- a/arch/sh/boards/mach-dreamcast/Makefile
+++ b/arch/sh/boards/mach-dreamcast/Makefile
@@ -2,5 +2,5 @@
# Makefile for the Sega Dreamcast specific parts of the kernel
#
-obj-y := setup.o irq.o rtc.o
-
+obj-y := setup.o irq.o
+obj-$(CONFIG_RTC_DRV_GENERIC) += rtc.o
diff --git a/arch/sh/boards/mach-dreamcast/rtc.c b/arch/sh/boards/mach-dreamcast/rtc.c
index 061d65714fcc..0eb12c45fa59 100644
--- a/arch/sh/boards/mach-dreamcast/rtc.c
+++ b/arch/sh/boards/mach-dreamcast/rtc.c
@@ -11,8 +11,9 @@
*/
#include <linux/time.h>
-#include <asm/rtc.h>
-#include <asm/io.h>
+#include <linux/rtc.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
/* The AICA RTC has an Epoch of 1/1/1950, so we must subtract 20 years (in
seconds) to get the standard Unix Epoch when getting the time, and add
@@ -26,13 +27,15 @@
/**
* aica_rtc_gettimeofday - Get the time from the AICA RTC
- * @ts: pointer to resulting timespec
+ * @dev: the RTC device (ignored)
+ * @tm: pointer to resulting RTC time structure
*
* Grabs the current RTC seconds counter and adjusts it to the Unix Epoch.
*/
-static void aica_rtc_gettimeofday(struct timespec *ts)
+static int aica_rtc_gettimeofday(struct device *dev, struct rtc_time *tm)
{
unsigned long val1, val2;
+ time64_t t;
do {
val1 = ((__raw_readl(AICA_RTC_SECS_H) & 0xffff) << 16) |
@@ -42,22 +45,26 @@ static void aica_rtc_gettimeofday(struct timespec *ts)
(__raw_readl(AICA_RTC_SECS_L) & 0xffff);
} while (val1 != val2);
- ts->tv_sec = val1 - TWENTY_YEARS;
+ /* normalize to 1970..2106 time range */
+ t = (u32)(val1 - TWENTY_YEARS);
- /* Can't get nanoseconds with just a seconds counter. */
- ts->tv_nsec = 0;
+ rtc_time64_to_tm(t, tm);
+
+ return 0;
}
/**
* aica_rtc_settimeofday - Set the AICA RTC to the current time
- * @secs: contains the time_t to set
+ * @dev: the RTC device (ignored)
+ * @tm: pointer to new RTC time structure
*
* Adjusts the given @tv to the AICA Epoch and sets the RTC seconds counter.
*/
-static int aica_rtc_settimeofday(const time_t secs)
+static int aica_rtc_settimeofday(struct device *dev, struct rtc_time *tm)
{
unsigned long val1, val2;
- unsigned long adj = secs + TWENTY_YEARS;
+ time64_t secs = rtc_tm_to_time64(tm);
+ u32 adj = secs + TWENTY_YEARS;
do {
__raw_writel((adj & 0xffff0000) >> 16, AICA_RTC_SECS_H);
@@ -73,9 +80,19 @@ static int aica_rtc_settimeofday(const time_t secs)
return 0;
}
-void aica_time_init(void)
+static const struct rtc_class_ops rtc_generic_ops = {
+ .read_time = aica_rtc_gettimeofday,
+ .set_time = aica_rtc_settimeofday,
+};
+
+static int __init aica_time_init(void)
{
- rtc_sh_get_time = aica_rtc_gettimeofday;
- rtc_sh_set_time = aica_rtc_settimeofday;
-}
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+ &rtc_generic_ops,
+ sizeof(rtc_generic_ops));
+ return PTR_ERR_OR_ZERO(pdev);
+}
+arch_initcall(aica_time_init);
diff --git a/arch/sh/boards/mach-dreamcast/setup.c b/arch/sh/boards/mach-dreamcast/setup.c
index ad1a4db72e04..672c2ad8f8d5 100644
--- a/arch/sh/boards/mach-dreamcast/setup.c
+++ b/arch/sh/boards/mach-dreamcast/setup.c
@@ -30,7 +30,6 @@
static void __init dreamcast_setup(char **cmdline_p)
{
- board_time_init = aica_time_init;
}
static struct sh_machine_vector mv_dreamcast __initmv = {
diff --git a/arch/sh/boards/mach-sh03/Makefile b/arch/sh/boards/mach-sh03/Makefile
index 400306a796ec..47007a3a2fc8 100644
--- a/arch/sh/boards/mach-sh03/Makefile
+++ b/arch/sh/boards/mach-sh03/Makefile
@@ -2,4 +2,5 @@
# Makefile for the Interface (CTP/PCI-SH03) specific parts of the kernel
#
-obj-y := setup.o rtc.o
+obj-y := setup.o
+obj-$(CONFIG_RTC_DRV_GENERIC) += rtc.o
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index dc3d50e3b7a2..8b23ed7c201c 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -13,8 +13,9 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/spinlock.h>
-#include <asm/io.h>
-#include <asm/rtc.h>
+#include <linux/io.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
#define RTC_BASE 0xb0000000
#define RTC_SEC1 (RTC_BASE + 0)
@@ -38,7 +39,7 @@
static DEFINE_SPINLOCK(sh03_rtc_lock);
-unsigned long get_cmos_time(void)
+static int sh03_rtc_gettimeofday(struct device *dev, struct rtc_time *tm)
{
unsigned int year, mon, day, hour, min, sec;
@@ -75,17 +76,18 @@ unsigned long get_cmos_time(void)
}
spin_unlock(&sh03_rtc_lock);
- return mktime(year, mon, day, hour, min, sec);
-}
-void sh03_rtc_gettimeofday(struct timespec *tv)
-{
+ tm->tm_sec = sec;
+ tm->tm_min = min;
+ tm->tm_hour = hour;
+ tm->tm_mday = day;
+ tm->tm_mon = mon;
+ tm->tm_year = year - 1900;
- tv->tv_sec = get_cmos_time();
- tv->tv_nsec = 0;
+ return 0;
}
-static int set_rtc_mmss(unsigned long nowtime)
+static int set_rtc_mmss(struct rtc_time *tm)
{
int retval = 0;
int real_seconds, real_minutes, cmos_minutes;
@@ -97,8 +99,8 @@ static int set_rtc_mmss(unsigned long nowtime)
if (!(__raw_readb(RTC_CTL) & RTC_BUSY))
break;
cmos_minutes = (__raw_readb(RTC_MIN1) & 0xf) + (__raw_readb(RTC_MIN10) & 0xf) * 10;
- real_seconds = nowtime % 60;
- real_minutes = nowtime / 60;
+ real_seconds = tm->tm_sec;
+ real_minutes = tm->tm_min;
if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
real_minutes += 30; /* correct for half hour time zone */
real_minutes %= 60;
@@ -112,22 +114,31 @@ static int set_rtc_mmss(unsigned long nowtime)
printk_once(KERN_NOTICE
"set_rtc_mmss: can't update from %d to %d\n",
cmos_minutes, real_minutes);
- retval = -1;
+ retval = -EINVAL;
}
spin_unlock(&sh03_rtc_lock);
return retval;
}
-int sh03_rtc_settimeofday(const time_t secs)
+int sh03_rtc_settimeofday(struct device *dev, struct rtc_time *tm)
{
- unsigned long nowtime = secs;
-
- return set_rtc_mmss(nowtime);
+ return set_rtc_mmss(tm);
}
-void sh03_time_init(void)
+static const struct rtc_class_ops rtc_generic_ops = {
+ .read_time = sh03_rtc_gettimeofday,
+ .set_time = sh03_rtc_settimeofday,
+};
+
+static int __init sh03_time_init(void)
{
- rtc_sh_get_time = sh03_rtc_gettimeofday;
- rtc_sh_set_time = sh03_rtc_settimeofday;
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+ &rtc_generic_ops,
+ sizeof(rtc_generic_ops));
+
+ return PTR_ERR_OR_ZERO(pdev);
}
+arch_initcall(sh03_time_init);
diff --git a/arch/sh/boards/mach-sh03/setup.c b/arch/sh/boards/mach-sh03/setup.c
index 85e7059a77e9..3901b6031ad5 100644
--- a/arch/sh/boards/mach-sh03/setup.c
+++ b/arch/sh/boards/mach-sh03/setup.c
@@ -22,14 +22,6 @@ static void __init init_sh03_IRQ(void)
plat_irq_setup_pins(IRQ_MODE_IRQ);
}
-/* arch/sh/boards/sh03/rtc.c */
-void sh03_time_init(void);
-
-static void __init sh03_setup(char **cmdline_p)
-{
- board_time_init = sh03_time_init;
-}
-
static struct resource cf_ide_resources[] = {
[0] = {
.start = 0x1f0,
@@ -101,6 +93,5 @@ device_initcall(sh03_devices_setup);
static struct sh_machine_vector mv_sh03 __initmv = {
.mv_name = "Interface (CTP/PCI-SH03)",
- .mv_setup = sh03_setup,
.mv_init_irq = init_sh03_IRQ,
};
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index cde370cad4ae..6e9786548ac6 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -117,18 +117,10 @@ static void __init sh_of_mem_reserve(void)
early_init_fdt_scan_reserved_mem();
}
-static void __init sh_of_time_init(void)
-{
- pr_info("SH generic board support: scanning for clocksource devices\n");
- timer_probe();
-}
-
static void __init sh_of_setup(char **cmdline_p)
{
struct device_node *root;
- board_time_init = sh_of_time_init;
-
sh_mv.mv_name = "Unknown SH model";
root = of_find_node_by_path("/");
if (root) {
diff --git a/arch/sh/configs/dreamcast_defconfig b/arch/sh/configs/dreamcast_defconfig
index 3f08dc54480b..1d27666c029f 100644
--- a/arch/sh/configs/dreamcast_defconfig
+++ b/arch/sh/configs/dreamcast_defconfig
@@ -70,3 +70,5 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=y
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2156223405a1..489ffdfb1517 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -130,3 +130,5 @@ CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_DEFLATE=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
CONFIG_CRC_CCITT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GENERIC=y
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 6a5609a55965..b15caf34813a 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,3 +1,4 @@
+generated-y += syscall_table.h
generic-y += compat.h
generic-y += current.h
generic-y += delay.h
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 98cb8c802b1a..4f7f235f15f8 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -24,6 +24,7 @@
#define __IO_PREFIX generic
#include <asm/io_generic.h>
#include <asm/io_trapped.h>
+#include <asm-generic/pci_iomap.h>
#include <mach/mangle-port.h>
#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile u8 __force *)(a) = (v))
diff --git a/arch/sh/include/asm/rtc.h b/arch/sh/include/asm/rtc.h
index c63555ee1255..69dbae2949b0 100644
--- a/arch/sh/include/asm/rtc.h
+++ b/arch/sh/include/asm/rtc.h
@@ -3,9 +3,6 @@
#define _ASM_RTC_H
void time_init(void);
-extern void (*board_time_init)(void);
-extern void (*rtc_sh_get_time)(struct timespec *);
-extern int (*rtc_sh_set_time)(const time_t);
#define RTC_CAP_4_DIGIT_YEAR (1 << 0)
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index a99234b61051..a97f93ca3bd7 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -5,6 +5,8 @@
# include <asm/unistd_64.h>
# endif
+#define NR_syscalls __NR_syscalls
+
# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
diff --git a/arch/sh/include/mach-dreamcast/mach/sysasic.h b/arch/sh/include/mach-dreamcast/mach/sysasic.h
index 58f710e1ebc2..59effd1ed3e1 100644
--- a/arch/sh/include/mach-dreamcast/mach/sysasic.h
+++ b/arch/sh/include/mach-dreamcast/mach/sysasic.h
@@ -42,7 +42,6 @@
/* arch/sh/boards/mach-dreamcast/irq.c */
extern int systemasic_irq_demux(int);
extern void systemasic_irq_init(void);
-extern void aica_time_init(void);
#endif /* __ASM_SH_DREAMCAST_SYSASIC_H */
diff --git a/arch/sh/include/uapi/asm/Kbuild b/arch/sh/include/uapi/asm/Kbuild
index ba4d39cb321d..a55e317c1ef2 100644
--- a/arch/sh/include/uapi/asm/Kbuild
+++ b/arch/sh/include/uapi/asm/Kbuild
@@ -1,6 +1,7 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_32.h
generic-y += bitsperlong.h
generic-y += bpf_perf_event.h
generic-y += errno.h
diff --git a/arch/sh/include/uapi/asm/unistd_32.h b/arch/sh/include/uapi/asm/unistd_32.h
index 58f04cf3d1d9..31c85aa251ab 100644
--- a/arch/sh/include/uapi/asm/unistd_32.h
+++ b/arch/sh/include/uapi/asm/unistd_32.h
@@ -396,6 +396,8 @@
#define __NR_preadv2 381
#define __NR_pwritev2 382
-#define NR_syscalls 383
+#ifdef __KERNEL__
+#define __NR_syscalls 383
+#endif
#endif /* __ASM_SH_UNISTD_32_H */
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index 6f809a53aa24..75da54851f02 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -416,6 +416,8 @@
#define __NR_preadv2 392
#define __NR_pwritev2 393
-#define NR_syscalls 394
+#ifdef __KERNEL__
+#define __NR_syscalls 394
+#endif
#endif /* __ASM_SH_UNISTD_64_H */
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 96dd9f7da250..1b04270e5460 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void)
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
- int faulted, err;
- struct ftrace_graph_ent trace;
+ int faulted;
unsigned long return_hooker = (unsigned long)&return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
@@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
- err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL);
- if (err == -EBUSY) {
+ if (function_graph_enter(old, self_addr, 0, NULL))
__raw_writel(old, parent);
- return;
- }
-
- trace.func = self_addr;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
- current->curr_ret_stack--;
- __raw_writel(old, parent);
- }
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..659faefdcb1d
--- /dev/null
+++ b/arch/sh/kernel/syscalls/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+$(kapi)/syscall_table.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h
+kapisyshdr-y += syscall_table.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..21ec75288562
--- /dev/null
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -0,0 +1,392 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for sh
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+13 common time sys_time
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown16
+# 17 was break
+18 common oldstat sys_stat
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+23 common setuid sys_setuid16
+24 common getuid sys_getuid16
+25 common stime sys_stime
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+28 common oldfstat sys_fstat
+29 common pause sys_pause
+30 common utime sys_utime
+# 31 was stty
+# 32 was gtty
+33 common access sys_access
+34 common nice sys_nice
+# 35 was ftime
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_sh_pipe
+43 common times sys_times
+# 44 was prof
+45 common brk sys_brk
+46 common setgid sys_setgid16
+47 common getgid sys_getgid16
+48 common signal sys_signal
+49 common geteuid sys_geteuid16
+50 common getegid sys_getegid16
+51 common acct sys_acct
+52 common umount2 sys_umount
+# 53 was lock
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+# 56 was mpx
+57 common setpgid sys_setpgid
+# 58 was ulimit
+# 59 was olduname
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid16
+71 common setregid sys_setregid16
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+76 common getrlimit sys_old_getrlimit
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+80 common getgroups sys_getgroups16
+81 common setgroups sys_setgroups16
+# 82 was select
+83 common symlink sys_symlink
+84 common oldlstat sys_lstat
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_old_readdir
+90 common mmap old_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown16
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+# 98 was profil
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+# 101 was ioperm
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+109 common olduname sys_uname
+# 110 was iopl
+111 common vhangup sys_vhangup
+# 112 was idle
+# 113 was vm86old
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common cacheflush sys_cacheflush
+124 common adjtimex sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+# 127 was create_module
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+# 130 was get_kernel_syms
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+# 137 was afs_syscall
+138 common setfsuid sys_setfsuid16
+139 common setfsgid sys_setfsgid16
+140 common _llseek sys_llseek
+141 common getdents sys_getdents
+142 common _newselect sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_sysctl
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid16
+165 common getresuid sys_getresuid16
+# 166 was vm86
+# 167 was query_module
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid16
+171 common getresgid sys_getresgid16
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread_wrapper
+181 common pwrite64 sys_pwrite_wrapper
+182 common chown sys_chown16
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack
+187 common sendfile sys_sendfile
+# 188 is reserved for getpmsg
+# 189 is reserved for putpmsg
+190 common vfork sys_vfork
+191 common ugetrlimit sys_getrlimit
+192 common mmap2 sys_mmap2
+193 common truncate64 sys_truncate64
+194 common ftruncate64 sys_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common pivot_root sys_pivot_root
+218 common mincore sys_mincore
+219 common madvise sys_madvise
+220 common getdents64 sys_getdents64
+221 common fcntl64 sys_fcntl64
+# 222 is reserved for tux
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex
+241 common sched_setaffinity sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity
+# 243 is reserved for set_thread_area
+# 244 is reserved for get_thread_area
+245 common io_setup sys_io_setup
+246 common io_destroy sys_io_destroy
+247 common io_getevents sys_io_getevents
+248 common io_submit sys_io_submit
+249 common io_cancel sys_io_cancel
+250 common fadvise64 sys_fadvise64
+# 251 is unused
+252 common exit_group sys_exit_group
+253 common lookup_dcookie sys_lookup_dcookie
+254 common epoll_create sys_epoll_create
+255 common epoll_ctl sys_epoll_ctl
+256 common epoll_wait sys_epoll_wait
+257 common remap_file_pages sys_remap_file_pages
+258 common set_tid_address sys_set_tid_address
+259 common timer_create sys_timer_create
+260 common timer_settime sys_timer_settime
+261 common timer_gettime sys_timer_gettime
+262 common timer_getoverrun sys_timer_getoverrun
+263 common timer_delete sys_timer_delete
+264 common clock_settime sys_clock_settime
+265 common clock_gettime sys_clock_gettime
+266 common clock_getres sys_clock_getres
+267 common clock_nanosleep sys_clock_nanosleep
+268 common statfs64 sys_statfs64
+269 common fstatfs64 sys_fstatfs64
+270 common tgkill sys_tgkill
+271 common utimes sys_utimes
+272 common fadvise64_64 sys_fadvise64_64_wrapper
+# 273 is reserved for vserver
+274 common mbind sys_mbind
+275 common get_mempolicy sys_get_mempolicy
+276 common set_mempolicy sys_set_mempolicy
+277 common mq_open sys_mq_open
+278 common mq_unlink sys_mq_unlink
+279 common mq_timedsend sys_mq_timedsend
+280 common mq_timedreceive sys_mq_timedreceive
+281 common mq_notify sys_mq_notify
+282 common mq_getsetattr sys_mq_getsetattr
+283 common kexec_load sys_kexec_load
+284 common waitid sys_waitid
+285 common add_key sys_add_key
+286 common request_key sys_request_key
+287 common keyctl sys_keyctl
+288 common ioprio_set sys_ioprio_set
+289 common ioprio_get sys_ioprio_get
+290 common inotify_init sys_inotify_init
+291 common inotify_add_watch sys_inotify_add_watch
+292 common inotify_rm_watch sys_inotify_rm_watch
+# 293 is unused
+294 common migrate_pages sys_migrate_pages
+295 common openat sys_openat
+296 common mkdirat sys_mkdirat
+297 common mknodat sys_mknodat
+298 common fchownat sys_fchownat
+299 common futimesat sys_futimesat
+300 common fstatat64 sys_fstatat64
+301 common unlinkat sys_unlinkat
+302 common renameat sys_renameat
+303 common linkat sys_linkat
+304 common symlinkat sys_symlinkat
+305 common readlinkat sys_readlinkat
+306 common fchmodat sys_fchmodat
+307 common faccessat sys_faccessat
+308 common pselect6 sys_pselect6
+309 common ppoll sys_ppoll
+310 common unshare sys_unshare
+311 common set_robust_list sys_set_robust_list
+312 common get_robust_list sys_get_robust_list
+313 common splice sys_splice
+314 common sync_file_range sys_sync_file_range
+315 common tee sys_tee
+316 common vmsplice sys_vmsplice
+317 common move_pages sys_move_pages
+318 common getcpu sys_getcpu
+319 common epoll_pwait sys_epoll_pwait
+320 common utimensat sys_utimensat
+321 common signalfd sys_signalfd
+322 common timerfd_create sys_timerfd_create
+323 common eventfd sys_eventfd
+324 common fallocate sys_fallocate
+325 common timerfd_settime sys_timerfd_settime
+326 common timerfd_gettime sys_timerfd_gettime
+327 common signalfd4 sys_signalfd4
+328 common eventfd2 sys_eventfd2
+329 common epoll_create1 sys_epoll_create1
+330 common dup3 sys_dup3
+331 common pipe2 sys_pipe2
+332 common inotify_init1 sys_inotify_init1
+333 common preadv sys_preadv
+334 common pwritev sys_pwritev
+335 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+336 common perf_event_open sys_perf_event_open
+337 common fanotify_init sys_fanotify_init
+338 common fanotify_mark sys_fanotify_mark
+339 common prlimit64 sys_prlimit64
+340 common socket sys_socket
+341 common bind sys_bind
+342 common connect sys_connect
+343 common listen sys_listen
+344 common accept sys_accept
+345 common getsockname sys_getsockname
+346 common getpeername sys_getpeername
+347 common socketpair sys_socketpair
+348 common send sys_send
+349 common sendto sys_sendto
+350 common recv sys_recv
+351 common recvfrom sys_recvfrom
+352 common shutdown sys_shutdown
+353 common setsockopt sys_setsockopt
+354 common getsockopt sys_getsockopt
+355 common sendmsg sys_sendmsg
+356 common recvmsg sys_recvmsg
+357 common recvmmsg sys_recvmmsg
+358 common accept4 sys_accept4
+359 common name_to_handle_at sys_name_to_handle_at
+360 common open_by_handle_at sys_open_by_handle_at
+361 common clock_adjtime sys_clock_adjtime
+362 common syncfs sys_syncfs
+363 common sendmmsg sys_sendmmsg
+364 common setns sys_setns
+365 common process_vm_readv sys_process_vm_readv
+366 common process_vm_writev sys_process_vm_writev
+367 common kcmp sys_kcmp
+368 common finit_module sys_finit_module
+369 common sched_getattr sys_sched_getattr
+370 common sched_setattr sys_sched_setattr
+371 common renameat2 sys_renameat2
+372 common seccomp sys_seccomp
+373 common getrandom sys_getrandom
+374 common memfd_create sys_memfd_create
+375 common bpf sys_bpf
+376 common execveat sys_execveat
+377 common userfaultfd sys_userfaultfd
+378 common membarrier sys_membarrier
+379 common mlock2 sys_mlock2
+380 common copy_file_range sys_copy_file_range
+381 common preadv2 sys_preadv2
+382 common pwritev2 sys_pwritev2
diff --git a/arch/sh/kernel/syscalls/syscallhdr.sh b/arch/sh/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..1de0334e577f
--- /dev/null
+++ b/arch/sh/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_SH_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+) > "$out"
diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..85d78d9309ad
--- /dev/null
+++ b/arch/sh/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry ; do
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/sh/kernel/syscalls_32.S b/arch/sh/kernel/syscalls_32.S
index 254bc22ee57d..54978e01bf94 100644
--- a/arch/sh/kernel/syscalls_32.S
+++ b/arch/sh/kernel/syscalls_32.S
@@ -14,389 +14,8 @@
#include <linux/sys.h>
#include <linux/linkage.h>
+#define __SYSCALL(nr, entry, nargs) .long entry
.data
ENTRY(sys_call_table)
- .long sys_restart_syscall /* 0 - old "setup()" system call*/
- .long sys_exit
- .long sys_fork
- .long sys_read
- .long sys_write
- .long sys_open /* 5 */
- .long sys_close
- .long sys_waitpid
- .long sys_creat
- .long sys_link
- .long sys_unlink /* 10 */
- .long sys_execve
- .long sys_chdir
- .long sys_time
- .long sys_mknod
- .long sys_chmod /* 15 */
- .long sys_lchown16
- .long sys_ni_syscall /* old break syscall holder */
- .long sys_stat
- .long sys_lseek
- .long sys_getpid /* 20 */
- .long sys_mount
- .long sys_oldumount
- .long sys_setuid16
- .long sys_getuid16
- .long sys_stime /* 25 */
- .long sys_ptrace
- .long sys_alarm
- .long sys_fstat
- .long sys_pause
- .long sys_utime /* 30 */
- .long sys_ni_syscall /* old stty syscall holder */
- .long sys_ni_syscall /* old gtty syscall holder */
- .long sys_access
- .long sys_nice
- .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
- .long sys_sync
- .long sys_kill
- .long sys_rename
- .long sys_mkdir
- .long sys_rmdir /* 40 */
- .long sys_dup
- .long sys_sh_pipe
- .long sys_times
- .long sys_ni_syscall /* old prof syscall holder */
- .long sys_brk /* 45 */
- .long sys_setgid16
- .long sys_getgid16
- .long sys_signal
- .long sys_geteuid16
- .long sys_getegid16 /* 50 */
- .long sys_acct
- .long sys_umount /* recycled never used phys() */
- .long sys_ni_syscall /* old lock syscall holder */
- .long sys_ioctl
- .long sys_fcntl /* 55 */
- .long sys_ni_syscall /* old mpx syscall holder */
- .long sys_setpgid
- .long sys_ni_syscall /* old ulimit syscall holder */
- .long sys_ni_syscall /* sys_olduname */
- .long sys_umask /* 60 */
- .long sys_chroot
- .long sys_ustat
- .long sys_dup2
- .long sys_getppid
- .long sys_getpgrp /* 65 */
- .long sys_setsid
- .long sys_sigaction
- .long sys_sgetmask
- .long sys_ssetmask
- .long sys_setreuid16 /* 70 */
- .long sys_setregid16
- .long sys_sigsuspend
- .long sys_sigpending
- .long sys_sethostname
- .long sys_setrlimit /* 75 */
- .long sys_old_getrlimit
- .long sys_getrusage
- .long sys_gettimeofday
- .long sys_settimeofday
- .long sys_getgroups16 /* 80 */
- .long sys_setgroups16
- .long sys_ni_syscall /* sys_oldselect */
- .long sys_symlink
- .long sys_lstat
- .long sys_readlink /* 85 */
- .long sys_uselib
- .long sys_swapon
- .long sys_reboot
- .long sys_old_readdir
- .long old_mmap /* 90 */
- .long sys_munmap
- .long sys_truncate
- .long sys_ftruncate
- .long sys_fchmod
- .long sys_fchown16 /* 95 */
- .long sys_getpriority
- .long sys_setpriority
- .long sys_ni_syscall /* old profil syscall holder */
- .long sys_statfs
- .long sys_fstatfs /* 100 */
- .long sys_ni_syscall /* ioperm */
- .long sys_socketcall
- .long sys_syslog
- .long sys_setitimer
- .long sys_getitimer /* 105 */
- .long sys_newstat
- .long sys_newlstat
- .long sys_newfstat
- .long sys_uname
- .long sys_ni_syscall /* 110 */ /* iopl */
- .long sys_vhangup
- .long sys_ni_syscall /* idle */
- .long sys_ni_syscall /* vm86old */
- .long sys_wait4
- .long sys_swapoff /* 115 */
- .long sys_sysinfo
- .long sys_ipc
- .long sys_fsync
- .long sys_sigreturn
- .long sys_clone /* 120 */
- .long sys_setdomainname
- .long sys_newuname
- .long sys_cacheflush /* x86: sys_modify_ldt */
- .long sys_adjtimex
- .long sys_mprotect /* 125 */
- .long sys_sigprocmask
- .long sys_ni_syscall /* old "create_module" */
- .long sys_init_module
- .long sys_delete_module
- .long sys_ni_syscall /* 130: old "get_kernel_syms" */
- .long sys_quotactl
- .long sys_getpgid
- .long sys_fchdir
- .long sys_bdflush
- .long sys_sysfs /* 135 */
- .long sys_personality
- .long sys_ni_syscall /* for afs_syscall */
- .long sys_setfsuid16
- .long sys_setfsgid16
- .long sys_llseek /* 140 */
- .long sys_getdents
- .long sys_select
- .long sys_flock
- .long sys_msync
- .long sys_readv /* 145 */
- .long sys_writev
- .long sys_getsid
- .long sys_fdatasync
- .long sys_sysctl
- .long sys_mlock /* 150 */
- .long sys_munlock
- .long sys_mlockall
- .long sys_munlockall
- .long sys_sched_setparam
- .long sys_sched_getparam /* 155 */
- .long sys_sched_setscheduler
- .long sys_sched_getscheduler
- .long sys_sched_yield
- .long sys_sched_get_priority_max
- .long sys_sched_get_priority_min /* 160 */
- .long sys_sched_rr_get_interval
- .long sys_nanosleep
- .long sys_mremap
- .long sys_setresuid16
- .long sys_getresuid16 /* 165 */
- .long sys_ni_syscall /* vm86 */
- .long sys_ni_syscall /* old "query_module" */
- .long sys_poll
- .long sys_ni_syscall /* was nfsservctl */
- .long sys_setresgid16 /* 170 */
- .long sys_getresgid16
- .long sys_prctl
- .long sys_rt_sigreturn
- .long sys_rt_sigaction
- .long sys_rt_sigprocmask /* 175 */
- .long sys_rt_sigpending
- .long sys_rt_sigtimedwait
- .long sys_rt_sigqueueinfo
- .long sys_rt_sigsuspend
- .long sys_pread_wrapper /* 180 */
- .long sys_pwrite_wrapper
- .long sys_chown16
- .long sys_getcwd
- .long sys_capget
- .long sys_capset /* 185 */
- .long sys_sigaltstack
- .long sys_sendfile
- .long sys_ni_syscall /* getpmsg */
- .long sys_ni_syscall /* putpmsg */
- .long sys_vfork /* 190 */
- .long sys_getrlimit
- .long sys_mmap2
- .long sys_truncate64
- .long sys_ftruncate64
- .long sys_stat64 /* 195 */
- .long sys_lstat64
- .long sys_fstat64
- .long sys_lchown
- .long sys_getuid
- .long sys_getgid /* 200 */
- .long sys_geteuid
- .long sys_getegid
- .long sys_setreuid
- .long sys_setregid
- .long sys_getgroups /* 205 */
- .long sys_setgroups
- .long sys_fchown
- .long sys_setresuid
- .long sys_getresuid
- .long sys_setresgid /* 210 */
- .long sys_getresgid
- .long sys_chown
- .long sys_setuid
- .long sys_setgid
- .long sys_setfsuid /* 215 */
- .long sys_setfsgid
- .long sys_pivot_root
- .long sys_mincore
- .long sys_madvise
- .long sys_getdents64 /* 220 */
- .long sys_fcntl64
- .long sys_ni_syscall /* reserved for TUX */
- .long sys_ni_syscall /* Reserved for Security */
- .long sys_gettid
- .long sys_readahead /* 225 */
- .long sys_setxattr
- .long sys_lsetxattr
- .long sys_fsetxattr
- .long sys_getxattr
- .long sys_lgetxattr /* 230 */
- .long sys_fgetxattr
- .long sys_listxattr
- .long sys_llistxattr
- .long sys_flistxattr
- .long sys_removexattr /* 235 */
- .long sys_lremovexattr
- .long sys_fremovexattr
- .long sys_tkill
- .long sys_sendfile64
- .long sys_futex /* 240 */
- .long sys_sched_setaffinity
- .long sys_sched_getaffinity
- .long sys_ni_syscall /* reserved for set_thread_area */
- .long sys_ni_syscall /* reserved for get_thread_area */
- .long sys_io_setup /* 245 */
- .long sys_io_destroy
- .long sys_io_getevents
- .long sys_io_submit
- .long sys_io_cancel
- .long sys_fadvise64 /* 250 */
- .long sys_ni_syscall
- .long sys_exit_group
- .long sys_lookup_dcookie
- .long sys_epoll_create
- .long sys_epoll_ctl /* 255 */
- .long sys_epoll_wait
- .long sys_remap_file_pages
- .long sys_set_tid_address
- .long sys_timer_create
- .long sys_timer_settime /* 260 */
- .long sys_timer_gettime
- .long sys_timer_getoverrun
- .long sys_timer_delete
- .long sys_clock_settime
- .long sys_clock_gettime /* 265 */
- .long sys_clock_getres
- .long sys_clock_nanosleep
- .long sys_statfs64
- .long sys_fstatfs64
- .long sys_tgkill /* 270 */
- .long sys_utimes
- .long sys_fadvise64_64_wrapper
- .long sys_ni_syscall /* Reserved for vserver */
- .long sys_mbind
- .long sys_get_mempolicy /* 275 */
- .long sys_set_mempolicy
- .long sys_mq_open
- .long sys_mq_unlink
- .long sys_mq_timedsend
- .long sys_mq_timedreceive /* 280 */
- .long sys_mq_notify
- .long sys_mq_getsetattr
- .long sys_kexec_load
- .long sys_waitid
- .long sys_add_key /* 285 */
- .long sys_request_key
- .long sys_keyctl
- .long sys_ioprio_set
- .long sys_ioprio_get
- .long sys_inotify_init /* 290 */
- .long sys_inotify_add_watch
- .long sys_inotify_rm_watch
- .long sys_ni_syscall
- .long sys_migrate_pages
- .long sys_openat /* 295 */
- .long sys_mkdirat
- .long sys_mknodat
- .long sys_fchownat
- .long sys_futimesat
- .long sys_fstatat64 /* 300 */
- .long sys_unlinkat
- .long sys_renameat
- .long sys_linkat
- .long sys_symlinkat
- .long sys_readlinkat /* 305 */
- .long sys_fchmodat
- .long sys_faccessat
- .long sys_pselect6
- .long sys_ppoll
- .long sys_unshare /* 310 */
- .long sys_set_robust_list
- .long sys_get_robust_list
- .long sys_splice
- .long sys_sync_file_range
- .long sys_tee /* 315 */
- .long sys_vmsplice
- .long sys_move_pages
- .long sys_getcpu
- .long sys_epoll_pwait
- .long sys_utimensat /* 320 */
- .long sys_signalfd
- .long sys_timerfd_create
- .long sys_eventfd
- .long sys_fallocate
- .long sys_timerfd_settime /* 325 */
- .long sys_timerfd_gettime
- .long sys_signalfd4
- .long sys_eventfd2
- .long sys_epoll_create1
- .long sys_dup3 /* 330 */
- .long sys_pipe2
- .long sys_inotify_init1
- .long sys_preadv
- .long sys_pwritev
- .long sys_rt_tgsigqueueinfo /* 335 */
- .long sys_perf_event_open
- .long sys_fanotify_init
- .long sys_fanotify_mark
- .long sys_prlimit64
- /* Broken-out socket family */
- .long sys_socket /* 340 */
- .long sys_bind
- .long sys_connect
- .long sys_listen
- .long sys_accept
- .long sys_getsockname /* 345 */
- .long sys_getpeername
- .long sys_socketpair
- .long sys_send
- .long sys_sendto
- .long sys_recv /* 350 */
- .long sys_recvfrom
- .long sys_shutdown
- .long sys_setsockopt
- .long sys_getsockopt
- .long sys_sendmsg /* 355 */
- .long sys_recvmsg
- .long sys_recvmmsg
- .long sys_accept4
- .long sys_name_to_handle_at
- .long sys_open_by_handle_at /* 360 */
- .long sys_clock_adjtime
- .long sys_syncfs
- .long sys_sendmmsg
- .long sys_setns
- .long sys_process_vm_readv /* 365 */
- .long sys_process_vm_writev
- .long sys_kcmp
- .long sys_finit_module
- .long sys_sched_getattr
- .long sys_sched_setattr /* 370 */
- .long sys_renameat2
- .long sys_seccomp
- .long sys_getrandom
- .long sys_memfd_create
- .long sys_bpf /* 375 */
- .long sys_execveat
- .long sys_userfaultfd
- .long sys_membarrier
- .long sys_mlock2
- .long sys_copy_file_range /* 380 */
- .long sys_preadv2
- .long sys_pwritev2
+#include <asm/syscall_table.h>
+#undef __SYSCALL
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index fcd5e41977d1..8a1c6c8ab4ec 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -22,77 +22,6 @@
#include <asm/clock.h>
#include <asm/rtc.h>
-/* Dummy RTC ops */
-static void null_rtc_get_time(struct timespec *tv)
-{
- tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
- tv->tv_nsec = 0;
-}
-
-static int null_rtc_set_time(const time_t secs)
-{
- return 0;
-}
-
-void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
-int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
-
-void read_persistent_clock(struct timespec *ts)
-{
- rtc_sh_get_time(ts);
-}
-
-#ifdef CONFIG_GENERIC_CMOS_UPDATE
-int update_persistent_clock(struct timespec now)
-{
- return rtc_sh_set_time(now.tv_sec);
-}
-#endif
-
-static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
-{
- struct timespec tv;
-
- rtc_sh_get_time(&tv);
- rtc_time_to_tm(tv.tv_sec, tm);
- return 0;
-}
-
-static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
-{
- unsigned long secs;
-
- rtc_tm_to_time(tm, &secs);
- if ((rtc_sh_set_time == null_rtc_set_time) ||
- (rtc_sh_set_time(secs) < 0))
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-static const struct rtc_class_ops rtc_generic_ops = {
- .read_time = rtc_generic_get_time,
- .set_time = rtc_generic_set_time,
-};
-
-static int __init rtc_generic_init(void)
-{
- struct platform_device *pdev;
-
- if (rtc_sh_get_time == null_rtc_get_time)
- return -ENODEV;
-
- pdev = platform_device_register_data(NULL, "rtc-generic", -1,
- &rtc_generic_ops,
- sizeof(rtc_generic_ops));
-
-
- return PTR_ERR_OR_ZERO(pdev);
-}
-device_initcall(rtc_generic_init);
-
-void (*board_time_init)(void);
-
static void __init sh_late_time_init(void)
{
/*
@@ -110,8 +39,7 @@ static void __init sh_late_time_init(void)
void __init time_init(void)
{
- if (board_time_init)
- board_time_init();
+ timer_probe();
clk_init();
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index 048a033d6102..4a0919581697 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -81,6 +81,9 @@ install:
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+archheaders:
+ $(Q)$(MAKE) $(build)=arch/sparc/kernel/syscalls all
+
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/sparc/vdso $@
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index 3cd4f6b198b6..a9b8b0b94a8d 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -476,11 +476,6 @@ static bool __init sparc64_has_aes_opcode(void)
static int __init aes_sparc64_mod_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- INIT_LIST_HEAD(&algs[i].cra_list);
-
if (sparc64_has_aes_opcode()) {
pr_info("Using sparc64 aes opcodes optimized AES implementation\n");
return crypto_register_algs(algs, ARRAY_SIZE(algs));
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 561a84d93cf6..900d5c617e83 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -299,11 +299,6 @@ static bool __init sparc64_has_camellia_opcode(void)
static int __init camellia_sparc64_mod_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- INIT_LIST_HEAD(&algs[i].cra_list);
-
if (sparc64_has_camellia_opcode()) {
pr_info("Using sparc64 camellia opcodes optimized CAMELLIA implementation\n");
return crypto_register_algs(algs, ARRAY_SIZE(algs));
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 61af794aa2d3..56499ea39fd3 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -510,11 +510,6 @@ static bool __init sparc64_has_des_opcode(void)
static int __init des_sparc64_mod_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(algs); i++)
- INIT_LIST_HEAD(&algs[i].cra_list);
-
if (sparc64_has_des_opcode()) {
pr_info("Using sparc64 des opcodes optimized DES implementation\n");
return crypto_register_algs(algs, ARRAY_SIZE(algs));
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 410b263ef5c8..b82f64e28f55 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -1,6 +1,8 @@
# User exported sparc header files
-
+generated-y += syscall_table_32.h
+generated-y += syscall_table_64.h
+generated-y += syscall_table_c32.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += exec.h
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 2a050eab69a0..3729fc35ba83 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -528,9 +528,9 @@ static int sun_pci_fd_test_drive(unsigned long port, int drive)
static int __init ebus_fdthree_p(struct device_node *dp)
{
- if (!strcmp(dp->name, "fdthree"))
+ if (of_node_name_eq(dp, "fdthree"))
return 1;
- if (!strcmp(dp->name, "floppy")) {
+ if (of_node_name_eq(dp, "floppy")) {
const char *compat;
compat = of_get_property(dp, "compatible", NULL);
@@ -555,7 +555,7 @@ static unsigned long __init sun_floppy_init(void)
op = NULL;
for_each_node_by_name(dp, "SUNW,fdtwo") {
- if (strcmp(dp->parent->name, "sbus"))
+ if (!of_node_name_eq(dp->parent, "sbus"))
continue;
op = of_find_device_by_node(dp);
if (op)
@@ -656,7 +656,7 @@ static unsigned long __init sun_floppy_init(void)
*/
config = 0;
for (dp = ebus_dp->child; dp; dp = dp->sibling) {
- if (!strcmp(dp->name, "ecpp")) {
+ if (of_node_name_eq(dp, "ecpp")) {
struct platform_device *ecpp_op;
ecpp_op = of_find_device_by_node(dp);
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 77ea406ff9df..c1e05e4ab9e3 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -225,7 +225,6 @@ void leon_update_virq_handling(unsigned int virq,
irq_flow_handler_t flow_handler,
const char *name, int do_ack);
void leon_init_timers(void);
-void leon_trans_init(struct device_node *dp);
void leon_node_init(struct device_node *dp, struct device_node ***nextp);
void init_leon(void);
void poke_leonsparc(void);
diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h
index 3c5a1c620f0f..03b27090c0c8 100644
--- a/arch/sparc/include/asm/parport.h
+++ b/arch/sparc/include/asm/parport.h
@@ -117,7 +117,7 @@ static int ecpp_probe(struct platform_device *op)
int slot, err;
parent = op->dev.of_node->parent;
- if (!strcmp(parent->name, "dma")) {
+ if (of_node_name_eq(parent, "dma")) {
p = parport_pc_probe_port(base, base + 0x400,
op->archdata.irqs[0], PARPORT_DMA_NOFIFO,
op->dev.parent->parent, 0);
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index 00f87dbd0b17..5194d86ef72d 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -17,6 +17,8 @@
#include <uapi/asm/unistd.h>
+#define NR_syscalls __NR_syscalls
+
#ifdef __32bit_syscall_numbers__
#else
#define __NR_time 231 /* Linux sparc32 */
@@ -46,4 +48,20 @@
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
+#ifdef __32bit_syscall_numbers__
+/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+ * it never had the plain ones and there is no value to adding those
+ * old versions into the syscall table.
+ */
+#define __IGNORE_setresuid
+#define __IGNORE_getresuid
+#define __IGNORE_setresgid
+#define __IGNORE_getresgid
+#endif
+
+/* Sparc doesn't have protection keys. */
+#define __IGNORE_pkey_mprotect
+#define __IGNORE_pkey_alloc
+#define __IGNORE_pkey_free
+
#endif /* _SPARC_UNISTD_H */
diff --git a/arch/sparc/include/uapi/asm/Kbuild b/arch/sparc/include/uapi/asm/Kbuild
index 4680ba246b55..ae72977287e3 100644
--- a/arch/sparc/include/uapi/asm/Kbuild
+++ b/arch/sparc/include/uapi/asm/Kbuild
@@ -1,5 +1,7 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generated-y += unistd_32.h
+generated-y += unistd_64.h
generic-y += bpf_perf_event.h
generic-y += types.h
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 45b4bf1875e6..7f5d773b8cfc 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -21,433 +21,13 @@
#endif
#endif
-#define __NR_restart_syscall 0 /* Linux Specific */
-#define __NR_exit 1 /* Common */
-#define __NR_fork 2 /* Common */
-#define __NR_read 3 /* Common */
-#define __NR_write 4 /* Common */
-#define __NR_open 5 /* Common */
-#define __NR_close 6 /* Common */
-#define __NR_wait4 7 /* Common */
-#define __NR_creat 8 /* Common */
-#define __NR_link 9 /* Common */
-#define __NR_unlink 10 /* Common */
-#define __NR_execv 11 /* SunOS Specific */
-#define __NR_chdir 12 /* Common */
-#define __NR_chown 13 /* Common */
-#define __NR_mknod 14 /* Common */
-#define __NR_chmod 15 /* Common */
-#define __NR_lchown 16 /* Common */
-#define __NR_brk 17 /* Common */
-#define __NR_perfctr 18 /* Performance counter operations */
-#define __NR_lseek 19 /* Common */
-#define __NR_getpid 20 /* Common */
-#define __NR_capget 21 /* Linux Specific */
-#define __NR_capset 22 /* Linux Specific */
-#define __NR_setuid 23 /* Implemented via setreuid in SunOS */
-#define __NR_getuid 24 /* Common */
-#define __NR_vmsplice 25 /* ENOSYS under SunOS */
-#define __NR_ptrace 26 /* Common */
-#define __NR_alarm 27 /* Implemented via setitimer in SunOS */
-#define __NR_sigaltstack 28 /* Common */
-#define __NR_pause 29 /* Is sigblock(0)->sigpause() in SunOS */
-#define __NR_utime 30 /* Implemented via utimes() under SunOS */
-#ifdef __32bit_syscall_numbers__
-#define __NR_lchown32 31 /* Linux sparc32 specific */
-#define __NR_fchown32 32 /* Linux sparc32 specific */
-#endif
-#define __NR_access 33 /* Common */
-#define __NR_nice 34 /* Implemented via get/setpriority() in SunOS */
-#ifdef __32bit_syscall_numbers__
-#define __NR_chown32 35 /* Linux sparc32 specific */
-#endif
-#define __NR_sync 36 /* Common */
-#define __NR_kill 37 /* Common */
-#define __NR_stat 38 /* Common */
-#define __NR_sendfile 39 /* Linux Specific */
-#define __NR_lstat 40 /* Common */
-#define __NR_dup 41 /* Common */
-#define __NR_pipe 42 /* Common */
-#define __NR_times 43 /* Implemented via getrusage() in SunOS */
-#ifdef __32bit_syscall_numbers__
-#define __NR_getuid32 44 /* Linux sparc32 specific */
-#endif
-#define __NR_umount2 45 /* Linux Specific */
-#define __NR_setgid 46 /* Implemented via setregid() in SunOS */
-#define __NR_getgid 47 /* Common */
-#define __NR_signal 48 /* Implemented via sigvec() in SunOS */
-#define __NR_geteuid 49 /* SunOS calls getuid() */
-#define __NR_getegid 50 /* SunOS calls getgid() */
-#define __NR_acct 51 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_getgid32 53 /* Linux sparc32 specific */
-#else
-#define __NR_memory_ordering 52 /* Linux Specific */
-#endif
-#define __NR_ioctl 54 /* Common */
-#define __NR_reboot 55 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_mmap2 56 /* Linux sparc32 Specific */
-#endif
-#define __NR_symlink 57 /* Common */
-#define __NR_readlink 58 /* Common */
-#define __NR_execve 59 /* Common */
-#define __NR_umask 60 /* Common */
-#define __NR_chroot 61 /* Common */
-#define __NR_fstat 62 /* Common */
-#define __NR_fstat64 63 /* Linux Specific */
-#define __NR_getpagesize 64 /* Common */
-#define __NR_msync 65 /* Common in newer 1.3.x revs... */
-#define __NR_vfork 66 /* Common */
-#define __NR_pread64 67 /* Linux Specific */
-#define __NR_pwrite64 68 /* Linux Specific */
-#ifdef __32bit_syscall_numbers__
-#define __NR_geteuid32 69 /* Linux sparc32, sbrk under SunOS */
-#define __NR_getegid32 70 /* Linux sparc32, sstk under SunOS */
-#endif
-#define __NR_mmap 71 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setreuid32 72 /* Linux sparc32, vadvise under SunOS */
-#endif
-#define __NR_munmap 73 /* Common */
-#define __NR_mprotect 74 /* Common */
-#define __NR_madvise 75 /* Common */
-#define __NR_vhangup 76 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_truncate64 77 /* Linux sparc32 Specific */
-#endif
-#define __NR_mincore 78 /* Common */
-#define __NR_getgroups 79 /* Common */
-#define __NR_setgroups 80 /* Common */
-#define __NR_getpgrp 81 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setgroups32 82 /* Linux sparc32, setpgrp under SunOS */
-#endif
-#define __NR_setitimer 83 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_ftruncate64 84 /* Linux sparc32 Specific */
-#endif
-#define __NR_swapon 85 /* Common */
-#define __NR_getitimer 86 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setuid32 87 /* Linux sparc32, gethostname under SunOS */
-#endif
-#define __NR_sethostname 88 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setgid32 89 /* Linux sparc32, getdtablesize under SunOS */
-#endif
-#define __NR_dup2 90 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setfsuid32 91 /* Linux sparc32, getdopt under SunOS */
-#endif
-#define __NR_fcntl 92 /* Common */
-#define __NR_select 93 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setfsgid32 94 /* Linux sparc32, setdopt under SunOS */
-#endif
-#define __NR_fsync 95 /* Common */
-#define __NR_setpriority 96 /* Common */
-#define __NR_socket 97 /* Common */
-#define __NR_connect 98 /* Common */
-#define __NR_accept 99 /* Common */
-#define __NR_getpriority 100 /* Common */
-#define __NR_rt_sigreturn 101 /* Linux Specific */
-#define __NR_rt_sigaction 102 /* Linux Specific */
-#define __NR_rt_sigprocmask 103 /* Linux Specific */
-#define __NR_rt_sigpending 104 /* Linux Specific */
-#define __NR_rt_sigtimedwait 105 /* Linux Specific */
-#define __NR_rt_sigqueueinfo 106 /* Linux Specific */
-#define __NR_rt_sigsuspend 107 /* Linux Specific */
-#ifdef __32bit_syscall_numbers__
-#define __NR_setresuid32 108 /* Linux Specific, sigvec under SunOS */
-#define __NR_getresuid32 109 /* Linux Specific, sigblock under SunOS */
-#define __NR_setresgid32 110 /* Linux Specific, sigsetmask under SunOS */
-#define __NR_getresgid32 111 /* Linux Specific, sigpause under SunOS */
-#define __NR_setregid32 112 /* Linux sparc32, sigstack under SunOS */
+#ifdef __arch64__
+#include <asm/unistd_64.h>
#else
-#define __NR_setresuid 108 /* Linux Specific, sigvec under SunOS */
-#define __NR_getresuid 109 /* Linux Specific, sigblock under SunOS */
-#define __NR_setresgid 110 /* Linux Specific, sigsetmask under SunOS */
-#define __NR_getresgid 111 /* Linux Specific, sigpause under SunOS */
-#endif
-#define __NR_recvmsg 113 /* Common */
-#define __NR_sendmsg 114 /* Common */
-#ifdef __32bit_syscall_numbers__
-#define __NR_getgroups32 115 /* Linux sparc32, vtrace under SunOS */
-#endif
-#define __NR_gettimeofday 116 /* Common */
-#define __NR_getrusage 117 /* Common */
-#define __NR_getsockopt 118 /* Common */
-#define __NR_getcwd 119 /* Linux Specific */
-#define __NR_readv 120 /* Common */
-#define __NR_writev 121 /* Common */
-#define __NR_settimeofday 122 /* Common */
-#define __NR_fchown 123 /* Common */
-#define __NR_fchmod 124 /* Common */
-#define __NR_recvfrom 125 /* Common */
-#define __NR_setreuid 126 /* Common */
-#define __NR_setregid 127 /* Common */
-#define __NR_rename 128 /* Common */
-#define __NR_truncate 129 /* Common */
-#define __NR_ftruncate 130 /* Common */
-#define __NR_flock 131 /* Common */
-#define __NR_lstat64 132 /* Linux Specific */
-#define __NR_sendto 133 /* Common */
-#define __NR_shutdown 134 /* Common */
-#define __NR_socketpair 135 /* Common */
-#define __NR_mkdir 136 /* Common */
-#define __NR_rmdir 137 /* Common */
-#define __NR_utimes 138 /* SunOS Specific */
-#define __NR_stat64 139 /* Linux Specific */
-#define __NR_sendfile64 140 /* adjtime under SunOS */
-#define __NR_getpeername 141 /* Common */
-#define __NR_futex 142 /* gethostid under SunOS */
-#define __NR_gettid 143 /* ENOSYS under SunOS */
-#define __NR_getrlimit 144 /* Common */
-#define __NR_setrlimit 145 /* Common */
-#define __NR_pivot_root 146 /* Linux Specific, killpg under SunOS */
-#define __NR_prctl 147 /* ENOSYS under SunOS */
-#define __NR_pciconfig_read 148 /* ENOSYS under SunOS */
-#define __NR_pciconfig_write 149 /* ENOSYS under SunOS */
-#define __NR_getsockname 150 /* Common */
-#define __NR_inotify_init 151 /* Linux specific */
-#define __NR_inotify_add_watch 152 /* Linux specific */
-#define __NR_poll 153 /* Common */
-#define __NR_getdents64 154 /* Linux specific */
-#ifdef __32bit_syscall_numbers__
-#define __NR_fcntl64 155 /* Linux sparc32 Specific */
+#include <asm/unistd_32.h>
#endif
-#define __NR_inotify_rm_watch 156 /* Linux specific */
-#define __NR_statfs 157 /* Common */
-#define __NR_fstatfs 158 /* Common */
-#define __NR_umount 159 /* Common */
-#define __NR_sched_set_affinity 160 /* Linux specific, async_daemon under SunOS */
-#define __NR_sched_get_affinity 161 /* Linux specific, getfh under SunOS */
-#define __NR_getdomainname 162 /* SunOS Specific */
-#define __NR_setdomainname 163 /* Common */
-#ifndef __32bit_syscall_numbers__
-#define __NR_utrap_install 164 /* SYSV ABI/v9 required */
-#endif
-#define __NR_quotactl 165 /* Common */
-#define __NR_set_tid_address 166 /* Linux specific, exportfs under SunOS */
-#define __NR_mount 167 /* Common */
-#define __NR_ustat 168 /* Common */
-#define __NR_setxattr 169 /* SunOS: semsys */
-#define __NR_lsetxattr 170 /* SunOS: msgsys */
-#define __NR_fsetxattr 171 /* SunOS: shmsys */
-#define __NR_getxattr 172 /* SunOS: auditsys */
-#define __NR_lgetxattr 173 /* SunOS: rfssys */
-#define __NR_getdents 174 /* Common */
-#define __NR_setsid 175 /* Common */
-#define __NR_fchdir 176 /* Common */
-#define __NR_fgetxattr 177 /* SunOS: fchroot */
-#define __NR_listxattr 178 /* SunOS: vpixsys */
-#define __NR_llistxattr 179 /* SunOS: aioread */
-#define __NR_flistxattr 180 /* SunOS: aiowrite */
-#define __NR_removexattr 181 /* SunOS: aiowait */
-#define __NR_lremovexattr 182 /* SunOS: aiocancel */
-#define __NR_sigpending 183 /* Common */
-#define __NR_query_module 184 /* Linux Specific */
-#define __NR_setpgid 185 /* Common */
-#define __NR_fremovexattr 186 /* SunOS: pathconf */
-#define __NR_tkill 187 /* SunOS: fpathconf */
-#define __NR_exit_group 188 /* Linux specific, sysconf undef SunOS */
-#define __NR_uname 189 /* Linux Specific */
-#define __NR_init_module 190 /* Linux Specific */
-#define __NR_personality 191 /* Linux Specific */
-#define __NR_remap_file_pages 192 /* Linux Specific */
-#define __NR_epoll_create 193 /* Linux Specific */
-#define __NR_epoll_ctl 194 /* Linux Specific */
-#define __NR_epoll_wait 195 /* Linux Specific */
-#define __NR_ioprio_set 196 /* Linux Specific */
-#define __NR_getppid 197 /* Linux Specific */
-#define __NR_sigaction 198 /* Linux Specific */
-#define __NR_sgetmask 199 /* Linux Specific */
-#define __NR_ssetmask 200 /* Linux Specific */
-#define __NR_sigsuspend 201 /* Linux Specific */
-#define __NR_oldlstat 202 /* Linux Specific */
-#define __NR_uselib 203 /* Linux Specific */
-#define __NR_readdir 204 /* Linux Specific */
-#define __NR_readahead 205 /* Linux Specific */
-#define __NR_socketcall 206 /* Linux Specific */
-#define __NR_syslog 207 /* Linux Specific */
-#define __NR_lookup_dcookie 208 /* Linux Specific */
-#define __NR_fadvise64 209 /* Linux Specific */
-#define __NR_fadvise64_64 210 /* Linux Specific */
-#define __NR_tgkill 211 /* Linux Specific */
-#define __NR_waitpid 212 /* Linux Specific */
-#define __NR_swapoff 213 /* Linux Specific */
-#define __NR_sysinfo 214 /* Linux Specific */
-#define __NR_ipc 215 /* Linux Specific */
-#define __NR_sigreturn 216 /* Linux Specific */
-#define __NR_clone 217 /* Linux Specific */
-#define __NR_ioprio_get 218 /* Linux Specific */
-#define __NR_adjtimex 219 /* Linux Specific */
-#define __NR_sigprocmask 220 /* Linux Specific */
-#define __NR_create_module 221 /* Linux Specific */
-#define __NR_delete_module 222 /* Linux Specific */
-#define __NR_get_kernel_syms 223 /* Linux Specific */
-#define __NR_getpgid 224 /* Linux Specific */
-#define __NR_bdflush 225 /* Linux Specific */
-#define __NR_sysfs 226 /* Linux Specific */
-#define __NR_afs_syscall 227 /* Linux Specific */
-#define __NR_setfsuid 228 /* Linux Specific */
-#define __NR_setfsgid 229 /* Linux Specific */
-#define __NR__newselect 230 /* Linux Specific */
-#ifdef __32bit_syscall_numbers__
-#define __NR_time 231 /* Linux Specific */
-#else
-#endif
-#define __NR_splice 232 /* Linux Specific */
-#define __NR_stime 233 /* Linux Specific */
-#define __NR_statfs64 234 /* Linux Specific */
-#define __NR_fstatfs64 235 /* Linux Specific */
-#define __NR__llseek 236 /* Linux Specific */
-#define __NR_mlock 237
-#define __NR_munlock 238
-#define __NR_mlockall 239
-#define __NR_munlockall 240
-#define __NR_sched_setparam 241
-#define __NR_sched_getparam 242
-#define __NR_sched_setscheduler 243
-#define __NR_sched_getscheduler 244
-#define __NR_sched_yield 245
-#define __NR_sched_get_priority_max 246
-#define __NR_sched_get_priority_min 247
-#define __NR_sched_rr_get_interval 248
-#define __NR_nanosleep 249
-#define __NR_mremap 250
-#define __NR__sysctl 251
-#define __NR_getsid 252
-#define __NR_fdatasync 253
-#define __NR_nfsservctl 254
-#define __NR_sync_file_range 255
-#define __NR_clock_settime 256
-#define __NR_clock_gettime 257
-#define __NR_clock_getres 258
-#define __NR_clock_nanosleep 259
-#define __NR_sched_getaffinity 260
-#define __NR_sched_setaffinity 261
-#define __NR_timer_settime 262
-#define __NR_timer_gettime 263
-#define __NR_timer_getoverrun 264
-#define __NR_timer_delete 265
-#define __NR_timer_create 266
-/* #define __NR_vserver 267 Reserved for VSERVER */
-#define __NR_io_setup 268
-#define __NR_io_destroy 269
-#define __NR_io_submit 270
-#define __NR_io_cancel 271
-#define __NR_io_getevents 272
-#define __NR_mq_open 273
-#define __NR_mq_unlink 274
-#define __NR_mq_timedsend 275
-#define __NR_mq_timedreceive 276
-#define __NR_mq_notify 277
-#define __NR_mq_getsetattr 278
-#define __NR_waitid 279
-#define __NR_tee 280
-#define __NR_add_key 281
-#define __NR_request_key 282
-#define __NR_keyctl 283
-#define __NR_openat 284
-#define __NR_mkdirat 285
-#define __NR_mknodat 286
-#define __NR_fchownat 287
-#define __NR_futimesat 288
-#define __NR_fstatat64 289
-#define __NR_unlinkat 290
-#define __NR_renameat 291
-#define __NR_linkat 292
-#define __NR_symlinkat 293
-#define __NR_readlinkat 294
-#define __NR_fchmodat 295
-#define __NR_faccessat 296
-#define __NR_pselect6 297
-#define __NR_ppoll 298
-#define __NR_unshare 299
-#define __NR_set_robust_list 300
-#define __NR_get_robust_list 301
-#define __NR_migrate_pages 302
-#define __NR_mbind 303
-#define __NR_get_mempolicy 304
-#define __NR_set_mempolicy 305
-#define __NR_kexec_load 306
-#define __NR_move_pages 307
-#define __NR_getcpu 308
-#define __NR_epoll_pwait 309
-#define __NR_utimensat 310
-#define __NR_signalfd 311
-#define __NR_timerfd_create 312
-#define __NR_eventfd 313
-#define __NR_fallocate 314
-#define __NR_timerfd_settime 315
-#define __NR_timerfd_gettime 316
-#define __NR_signalfd4 317
-#define __NR_eventfd2 318
-#define __NR_epoll_create1 319
-#define __NR_dup3 320
-#define __NR_pipe2 321
-#define __NR_inotify_init1 322
-#define __NR_accept4 323
-#define __NR_preadv 324
-#define __NR_pwritev 325
-#define __NR_rt_tgsigqueueinfo 326
-#define __NR_perf_event_open 327
-#define __NR_recvmmsg 328
-#define __NR_fanotify_init 329
-#define __NR_fanotify_mark 330
-#define __NR_prlimit64 331
-#define __NR_name_to_handle_at 332
-#define __NR_open_by_handle_at 333
-#define __NR_clock_adjtime 334
-#define __NR_syncfs 335
-#define __NR_sendmmsg 336
-#define __NR_setns 337
-#define __NR_process_vm_readv 338
-#define __NR_process_vm_writev 339
-#define __NR_kern_features 340
-#define __NR_kcmp 341
-#define __NR_finit_module 342
-#define __NR_sched_setattr 343
-#define __NR_sched_getattr 344
-#define __NR_renameat2 345
-#define __NR_seccomp 346
-#define __NR_getrandom 347
-#define __NR_memfd_create 348
-#define __NR_bpf 349
-#define __NR_execveat 350
-#define __NR_membarrier 351
-#define __NR_userfaultfd 352
-#define __NR_bind 353
-#define __NR_listen 354
-#define __NR_setsockopt 355
-#define __NR_mlock2 356
-#define __NR_copy_file_range 357
-#define __NR_preadv2 358
-#define __NR_pwritev2 359
-#define __NR_statx 360
-#define __NR_io_pgetevents 361
-
-#define NR_syscalls 362
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
-#ifdef __32bit_syscall_numbers__
-/* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
- * it never had the plain ones and there is no value to adding those
- * old versions into the syscall table.
- */
-#define __IGNORE_setresuid
-#define __IGNORE_getresuid
-#define __IGNORE_setresgid
-#define __IGNORE_getresgid
-#endif
-
-/* Sparc doesn't have protection keys. */
-#define __IGNORE_pkey_mprotect
-#define __IGNORE_pkey_alloc
-#define __IGNORE_pkey_free
-
#endif /* _UAPI_SPARC_UNISTD_H */
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c
index 4e8f56c3793c..4843f48bfe85 100644
--- a/arch/sparc/kernel/auxio_64.c
+++ b/arch/sparc/kernel/auxio_64.c
@@ -108,23 +108,22 @@ static int auxio_probe(struct platform_device *dev)
struct device_node *dp = dev->dev.of_node;
unsigned long size;
- if (!strcmp(dp->parent->name, "ebus")) {
+ if (of_node_name_eq(dp->parent, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
- } else if (!strcmp(dp->parent->name, "sbus")) {
+ } else if (of_node_name_eq(dp->parent, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
- printk("auxio: Unknown parent bus type [%s]\n",
- dp->parent->name);
+ printk("auxio: Unknown parent bus type [%pOFn]\n",
+ dp->parent);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
- printk(KERN_INFO "AUXIO: Found device at %s\n",
- dp->full_name);
+ printk(KERN_INFO "AUXIO: Found device at %pOF\n", dp);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c
index 38ae4fdc9eb4..bfae98ab8638 100644
--- a/arch/sparc/kernel/central.c
+++ b/arch/sparc/kernel/central.c
@@ -168,7 +168,7 @@ static int fhc_probe(struct platform_device *op)
goto out;
}
- if (!strcmp(op->dev.of_node->parent->name, "central"))
+ if (of_node_name_eq(op->dev.of_node->parent, "central"))
p->central = true;
p->pregs = of_ioremap(&op->resource[0], 0,
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 0de4bcb8261f..61fe1b951ba3 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -464,8 +464,8 @@ static int jbusmc_probe(struct platform_device *op)
mc_list_add(&p->list);
- printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n",
- op->dev.of_node->full_name);
+ printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %pOF\n",
+ op->dev.of_node);
dev_set_drvdata(&op->dev, p);
@@ -747,8 +747,8 @@ static int chmc_probe(struct platform_device *op)
mc_list_add(&p->list);
- printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n",
- dp->full_name,
+ printk(KERN_INFO PFX "UltraSPARC-III memory controller at %pOF [%s]\n",
+ dp,
(p->layout_size ? "ACTIVE" : "INACTIVE"));
dev_set_drvdata(&op->dev, p);
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c
index 915dda4ae412..684b84ce397f 100644
--- a/arch/sparc/kernel/ftrace.c
+++ b/arch/sparc/kernel/ftrace.c
@@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
- struct ftrace_graph_ent trace;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return parent + 8UL;
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
- return parent + 8UL;
-
- if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
- frame_pointer, NULL) == -EBUSY)
+ if (function_graph_enter(parent, self_addr, frame_pointer, NULL))
return parent + 8UL;
return return_hooker;
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 0626bae5e3da..b1a09080e8da 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
- iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
+ iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->tbl.map)
return -ENOMEM;
- memset(iommu->tbl.map, 0, sz);
iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
(tlb_type != hypervisor ? iommu_flushall : NULL),
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 713670e6d13d..3ec9f1402aad 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -915,7 +915,7 @@ static void map_prom_timers(void)
dp = of_find_node_by_path("/");
dp = dp->child;
while (dp) {
- if (!strcmp(dp->name, "counter-timer"))
+ if (of_node_name_eq(dp, "counter-timer"))
break;
dp = dp->sibling;
}
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 84b233752f28..39229940d725 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -484,20 +484,6 @@ static void leon_load_profile_irq(int cpu, unsigned int limit)
{
}
-void __init leon_trans_init(struct device_node *dp)
-{
- if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
- struct property *p;
- p = of_find_property(dp, "mid", (void *)0);
- if (p) {
- int mid;
- dp->name = prom_early_alloc(5 + 1);
- memcpy(&mid, p->value, p->length);
- sprintf((char *)dp->name, "cpu%.2d", mid);
- }
- }
-}
-
#ifdef CONFIG_SMP
void leon_clear_profile_irq(int cpu)
{
diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c
index e4abe9b8f97a..4ebf51e6e78e 100644
--- a/arch/sparc/kernel/of_device_32.c
+++ b/arch/sparc/kernel/of_device_32.c
@@ -22,7 +22,7 @@
static int of_bus_pci_match(struct device_node *np)
{
- if (!strcmp(np->type, "pci") || !strcmp(np->type, "pciex")) {
+ if (of_node_is_type(np, "pci") || of_node_is_type(np, "pciex")) {
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
@@ -107,7 +107,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags)
static int of_bus_ambapp_match(struct device_node *np)
{
- return !strcmp(np->type, "ambapp");
+ return of_node_is_type(np, "ambapp");
}
static void of_bus_ambapp_count_cells(struct device_node *child,
@@ -232,10 +232,10 @@ static int __init use_1to1_mapping(struct device_node *pp)
* But, we should still pass the translation work up
* to the SBUS itself.
*/
- if (!strcmp(pp->name, "dma") ||
- !strcmp(pp->name, "espdma") ||
- !strcmp(pp->name, "ledma") ||
- !strcmp(pp->name, "lebuffer"))
+ if (of_node_name_eq(pp, "dma") ||
+ of_node_name_eq(pp, "espdma") ||
+ of_node_name_eq(pp, "ledma") ||
+ of_node_name_eq(pp, "lebuffer"))
return 0;
return 1;
@@ -324,8 +324,8 @@ static void __init build_device_resources(struct platform_device *op,
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
- printk("%s reg[%d] -> %llx\n",
- op->dev.of_node->full_name, index,
+ printk("%pOF reg[%d] -> %llx\n",
+ op->dev.of_node, index,
result);
if (result != OF_BAD_ADDR) {
@@ -333,7 +333,7 @@ static void __init build_device_resources(struct platform_device *op,
r->end = result + size - 1;
r->flags = flags | ((result >> 32ULL) & 0xffUL);
}
- r->name = op->dev.of_node->name;
+ r->name = op->dev.of_node->full_name;
}
}
@@ -386,8 +386,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
op->dev.dma_mask = &op->dev.coherent_dma_mask;
if (of_device_register(op)) {
- printk("%s: Could not register of device.\n",
- dp->full_name);
+ printk("%pOF: Could not register of device.\n", dp);
kfree(op);
op = NULL;
}
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 6df6086968c6..5a9f86b1d4e7 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(of_iounmap);
static int of_bus_pci_match(struct device_node *np)
{
- if (!strcmp(np->name, "pci")) {
+ if (of_node_name_eq(np, "pci")) {
const char *model = of_get_property(np, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
@@ -77,7 +77,7 @@ static int of_bus_simba_match(struct device_node *np)
/* Treat PCI busses lacking ranges property just like
* simba.
*/
- if (!strcmp(np->name, "pci")) {
+ if (of_node_name_eq(np, "pci")) {
if (!of_find_property(np, "ranges", NULL))
return 1;
}
@@ -170,8 +170,8 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
*/
static int of_bus_fhc_match(struct device_node *np)
{
- return !strcmp(np->name, "fhc") ||
- !strcmp(np->name, "central");
+ return of_node_name_eq(np, "fhc") ||
+ of_node_name_eq(np, "central");
}
#define of_bus_fhc_count_cells of_bus_sbus_count_cells
@@ -295,17 +295,17 @@ static int __init use_1to1_mapping(struct device_node *pp)
* But, we should still pass the translation work up
* to the SBUS itself.
*/
- if (!strcmp(pp->name, "dma") ||
- !strcmp(pp->name, "espdma") ||
- !strcmp(pp->name, "ledma") ||
- !strcmp(pp->name, "lebuffer"))
+ if (of_node_name_eq(pp, "dma") ||
+ of_node_name_eq(pp, "espdma") ||
+ of_node_name_eq(pp, "ledma") ||
+ of_node_name_eq(pp, "lebuffer"))
return 0;
/* Similarly for all PCI bridges, if we get this far
* it lacks a ranges property, and this will include
* cases like Simba.
*/
- if (!strcmp(pp->name, "pci"))
+ if (of_node_name_eq(pp, "pci"))
return 0;
return 1;
@@ -341,9 +341,9 @@ static void __init build_device_resources(struct platform_device *op,
/* Prevent overrunning the op->resources[] array. */
if (num_reg > PROMREG_MAX) {
- printk(KERN_WARNING "%s: Too many regs (%d), "
+ printk(KERN_WARNING "%pOF: Too many regs (%d), "
"limiting to %d.\n",
- op->dev.of_node->full_name, num_reg, PROMREG_MAX);
+ op->dev.of_node, num_reg, PROMREG_MAX);
num_reg = PROMREG_MAX;
}
@@ -401,8 +401,8 @@ static void __init build_device_resources(struct platform_device *op,
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
- printk("%s reg[%d] -> %llx\n",
- op->dev.of_node->full_name, index,
+ printk("%pOF reg[%d] -> %llx\n",
+ op->dev.of_node, index,
result);
if (result != OF_BAD_ADDR) {
@@ -413,7 +413,7 @@ static void __init build_device_resources(struct platform_device *op,
r->end = result + size - 1;
r->flags = flags;
}
- r->name = op->dev.of_node->name;
+ r->name = op->dev.of_node->full_name;
}
}
@@ -548,8 +548,8 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
dp->irq_trans->data);
if (of_irq_verbose)
- printk("%s: direct translate %x --> %x\n",
- dp->full_name, orig_irq, irq);
+ printk("%pOF: direct translate %x --> %x\n",
+ dp, orig_irq, irq);
goto out;
}
@@ -579,10 +579,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
&irq);
if (of_irq_verbose)
- printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
- op->dev.of_node->full_name,
- pp->full_name, this_orig_irq,
- of_node_full_name(iret), irq);
+ printk("%pOF: Apply [%pOF:%x] imap --> [%pOF:%x]\n",
+ op->dev.of_node,
+ pp, this_orig_irq, iret, irq);
if (!iret)
break;
@@ -592,15 +591,15 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
break;
}
} else {
- if (!strcmp(pp->name, "pci")) {
+ if (of_node_name_eq(pp, "pci")) {
unsigned int this_orig_irq = irq;
irq = pci_irq_swizzle(dp, pp, irq);
if (of_irq_verbose)
- printk("%s: PCI swizzle [%s] "
+ printk("%pOF: PCI swizzle [%pOF] "
"%x --> %x\n",
- op->dev.of_node->full_name,
- pp->full_name, this_orig_irq,
+ op->dev.of_node,
+ pp, this_orig_irq,
irq);
}
@@ -619,8 +618,8 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
irq = ip->irq_trans->irq_build(op->dev.of_node, irq,
ip->irq_trans->data);
if (of_irq_verbose)
- printk("%s: Apply IRQ trans [%s] %x --> %x\n",
- op->dev.of_node->full_name, ip->full_name, orig_irq, irq);
+ printk("%pOF: Apply IRQ trans [%pOF] %x --> %x\n",
+ op->dev.of_node, ip, orig_irq, irq);
out:
nid = of_node_to_nid(dp);
@@ -656,9 +655,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
/* Prevent overrunning the op->irqs[] array. */
if (op->archdata.num_irqs > PROMINTR_MAX) {
- printk(KERN_WARNING "%s: Too many irqs (%d), "
+ printk(KERN_WARNING "%pOF: Too many irqs (%d), "
"limiting to %d.\n",
- dp->full_name, op->archdata.num_irqs, PROMINTR_MAX);
+ dp, op->archdata.num_irqs, PROMINTR_MAX);
op->archdata.num_irqs = PROMINTR_MAX;
}
memcpy(op->archdata.irqs, irq, op->archdata.num_irqs * 4);
@@ -680,8 +679,7 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
op->dev.dma_mask = &op->dev.coherent_dma_mask;
if (of_device_register(op)) {
- printk("%s: Could not register of device.\n",
- dp->full_name);
+ printk("%pOF: Could not register of device.\n", dp);
kfree(op);
op = NULL;
}
diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c
index de0ee3971f00..b186b7f0f6c4 100644
--- a/arch/sparc/kernel/of_device_common.c
+++ b/arch/sparc/kernel/of_device_common.c
@@ -151,8 +151,8 @@ int of_bus_sbus_match(struct device_node *np)
struct device_node *dp = np;
while (dp) {
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi"))
+ if (of_node_name_eq(dp, "sbus") ||
+ of_node_name_eq(dp, "sbi"))
return 1;
/* Have a look at use_1to1_mapping(). We're trying
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 17ea16a1337c..bcfec6a85d23 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -267,7 +267,6 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct dev_archdata *sd;
struct platform_device *op;
struct pci_dev *dev;
- const char *type;
u32 class;
dev = pci_alloc_dev(bus);
@@ -283,16 +282,12 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
sd->stc = &pbm->stc;
sd->numa_node = pbm->numa_node;
- if (!strcmp(node->name, "ebus"))
+ if (of_node_name_eq(node, "ebus"))
of_propagate_archdata(op);
- type = of_get_property(node, "device_type", NULL);
- if (type == NULL)
- type = "";
-
if (ofpci_verbose)
pci_info(bus," create device, devfn: %x, type: %s\n",
- devfn, type);
+ devfn, of_node_get_device_type(node));
dev->sysdata = node;
dev->dev.parent = bus->bridge;
@@ -336,11 +331,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
- if (!strcmp(node->name, "pci")) {
+ if (of_node_name_eq(node, "pci")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
- } else if (!strcmp(type, "cardbus")) {
+ } else if (of_node_is_type(node, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
@@ -431,13 +426,13 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
u64 size;
if (ofpci_verbose)
- pci_info(dev, "of_scan_pci_bridge(%s)\n", node->full_name);
+ pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
- pci_info(dev, "Can't get bus-range for PCI-PCI bridge %s\n",
- node->full_name);
+ pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n",
+ node);
return;
}
@@ -455,8 +450,8 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
- pci_err(dev, "Failed to create pci bus for %s\n",
- node->full_name);
+ pci_err(dev, "Failed to create pci bus for %pOF\n",
+ node);
return;
}
@@ -512,13 +507,13 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
res = bus->resource[0];
if (res->flags) {
pci_err(dev, "ignoring extra I/O range"
- " for bridge %s\n", node->full_name);
+ " for bridge %pOF\n", node);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
pci_err(dev, "too many memory ranges"
- " for bridge %s\n", node->full_name);
+ " for bridge %pOF\n", node);
continue;
}
res = bus->resource[i];
@@ -554,14 +549,14 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
struct pci_dev *dev;
if (ofpci_verbose)
- pci_info(bus, "scan_bus[%s] bus no %d\n",
- node->full_name, bus->number);
+ pci_info(bus, "scan_bus[%pOF] bus no %d\n",
+ node, bus->number);
child = NULL;
prev_devfn = -1;
while ((child = of_get_next_child(node, child)) != NULL) {
if (ofpci_verbose)
- pci_info(bus, " * %s\n", child->full_name);
+ pci_info(bus, " * %pOF\n", child);
reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20)
continue;
@@ -598,7 +593,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char *
pdev = to_pci_dev(dev);
dp = pdev->dev.of_node;
- return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
+ return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
@@ -698,7 +693,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device_node *node = pbm->op->dev.of_node;
struct pci_bus *bus;
- printk("PCI: Scanning PBM %s\n", node->full_name);
+ printk("PCI: Scanning PBM %pOF\n", node);
pci_add_resource_offset(&resources, &pbm->io_space,
pbm->io_offset);
@@ -714,8 +709,7 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
pbm, &resources);
if (!bus) {
- printk(KERN_ERR "Failed to create bus for %s\n",
- node->full_name);
+ printk(KERN_ERR "Failed to create bus for %pOF\n", node);
pci_free_resource_list(&resources);
return NULL;
}
@@ -1111,8 +1105,8 @@ static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
sp = prop->names;
if (ofpci_verbose)
- pci_info(bus, "Making slots for [%s] mask[0x%02x]\n",
- node->full_name, mask);
+ pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n",
+ node, mask);
i = 0;
while (mask) {
diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c
index 8107286be9ab..3c38ca40a22b 100644
--- a/arch/sparc/kernel/pci_sabre.c
+++ b/arch/sparc/kernel/pci_sabre.c
@@ -475,7 +475,7 @@ static int sabre_probe(struct platform_device *op)
* different ways, inconsistently.
*/
for_each_node_by_type(cpu_dp, "cpu") {
- if (!strcmp(cpu_dp->name, "SUNW,UltraSPARC-IIe"))
+ if (of_node_name_eq(cpu_dp, "SUNW,UltraSPARC-IIe"))
hummingbird_p = 1;
}
}
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c
index 92627abce311..d941875dd718 100644
--- a/arch/sparc/kernel/power.c
+++ b/arch/sparc/kernel/power.c
@@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op)
power_reg = of_ioremap(res, 0, 0x4, "power");
- printk(KERN_INFO "%s: Control reg at %llx\n",
- op->dev.of_node->name, res->start);
+ printk(KERN_INFO "%pOFn: Control reg at %llx\n",
+ op->dev.of_node, res->start);
if (has_button_interrupt(irq, op->dev.of_node)) {
if (request_irq(irq,
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index d9662cf7e648..26cca65e9246 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -110,7 +110,7 @@ void machine_restart(char * cmd)
void machine_power_off(void)
{
if (auxio_power_register &&
- (strcmp(of_console_device->type, "serial") || scons_pwroff)) {
+ (!of_node_is_type(of_console_device, "serial") || scons_pwroff)) {
u8 power_register = sbus_readb(auxio_power_register);
power_register |= AUXIO_POWER_OFF;
sbus_writeb(power_register, auxio_power_register);
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c
index d41e2a749c5d..42d7f2a7da6d 100644
--- a/arch/sparc/kernel/prom_32.c
+++ b/arch/sparc/kernel/prom_32.c
@@ -60,6 +60,7 @@ void * __init prom_early_alloc(unsigned long size)
*/
static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *rprop;
@@ -69,13 +70,14 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
regs = rprop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io, regs->phys_addr);
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
@@ -85,7 +87,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io,
regs->phys_addr);
}
@@ -93,6 +95,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
@@ -105,12 +108,12 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
- dp->name,
+ name,
devfn >> 3);
}
}
@@ -118,6 +121,7 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
@@ -128,13 +132,14 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io, regs->phys_addr);
}
/* "name:vendor:device@irq,addrlo" */
static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct amba_prom_registers *regs;
unsigned int *intr, *device, *vendor, reg0;
struct property *prop;
@@ -168,7 +173,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
device = prop->value;
sprintf(tmp_buf, "%s:%d:%d@%x,%x",
- dp->name, *vendor, *device,
+ name, *vendor, *device,
*intr, reg0);
}
@@ -177,14 +182,14 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
struct device_node *parent = dp->parent;
if (parent != NULL) {
- if (!strcmp(parent->type, "pci") ||
- !strcmp(parent->type, "pciex"))
+ if (of_node_is_type(parent, "pci") ||
+ of_node_is_type(parent, "pciex"))
return pci_path_component(dp, tmp_buf);
- if (!strcmp(parent->type, "sbus"))
+ if (of_node_is_type(parent, "sbus"))
return sbus_path_component(dp, tmp_buf);
- if (!strcmp(parent->type, "ebus"))
+ if (of_node_is_type(parent, "ebus"))
return ebus_path_component(dp, tmp_buf);
- if (!strcmp(parent->type, "ambapp"))
+ if (of_node_is_type(parent, "ambapp"))
return ambapp_path_component(dp, tmp_buf);
/* "isa" is handled with platform naming */
@@ -196,12 +201,13 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
char * __init build_path_component(struct device_node *dp)
{
+ const char *name = of_get_property(dp, "name", NULL);
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
- strcpy(tmp_buf, dp->name);
+ strcpy(tmp_buf, name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
@@ -255,7 +261,7 @@ void __init of_console_init(void)
}
of_console_device = dp;
- strcpy(of_console_path, dp->full_name);
+ sprintf(of_console_path, "%pOF", dp);
if (!strcmp(type, "serial")) {
strcat(of_console_path,
(skip ? ":b" : ":a"));
@@ -278,15 +284,9 @@ void __init of_console_init(void)
prom_halt();
}
dp = of_find_node_by_phandle(node);
- type = of_get_property(dp, "device_type", NULL);
- if (!type) {
- prom_printf("Console stdout lacks "
- "device_type property.\n");
- prom_halt();
- }
-
- if (strcmp(type, "display") && strcmp(type, "serial")) {
+ if (!of_node_is_type(dp, "display") &&
+ !of_node_is_type(dp, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
@@ -295,7 +295,7 @@ void __init of_console_init(void)
of_console_device = dp;
if (prom_vers == PROM_V2) {
- strcpy(of_console_path, dp->full_name);
+ sprintf(of_console_path, "%pOF", dp);
switch (*romvec->pv_stdout) {
case PROMDEV_TTYA:
strcat(of_console_path, ":a");
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c
index c37955d127fe..e897a4ded3a1 100644
--- a/arch/sparc/kernel/prom_64.c
+++ b/arch/sparc/kernel/prom_64.c
@@ -72,6 +72,7 @@ void * __init prom_early_alloc(unsigned long size)
*/
static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *rprop;
u32 high_bits, low_bits, type;
@@ -83,7 +84,7 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
regs = rprop->value;
if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
@@ -98,21 +99,22 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
if (low_bits)
sprintf(tmp_buf, "%s@%s%x,%x",
- dp->name, prefix,
+ name, prefix,
high_bits, low_bits);
else
sprintf(tmp_buf, "%s@%s%x",
- dp->name,
+ name,
prefix,
high_bits);
} else if (type == 12) {
sprintf(tmp_buf, "%s@%x",
- dp->name, high_bits);
+ name, high_bits);
}
}
static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
@@ -123,7 +125,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
if (!of_node_is_root(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
@@ -139,7 +141,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
mask = 0x7fffff;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
*(u32 *)prop->value,
(unsigned int) (regs->phys_addr & mask));
}
@@ -148,6 +150,7 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_registers *regs;
struct property *prop;
@@ -157,7 +160,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
regs->which_io,
regs->phys_addr);
}
@@ -165,6 +168,7 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
@@ -177,12 +181,12 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
- dp->name,
+ name,
devfn >> 3);
}
}
@@ -190,6 +194,7 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
/* "name@UPA_PORTID,offset" */
static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
@@ -204,7 +209,7 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
return;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
*(u32 *) prop->value,
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
@@ -212,6 +217,7 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
/* "name@reg" */
static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -221,12 +227,13 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
- sprintf(tmp_buf, "%s@%x", dp->name, *regs);
+ sprintf(tmp_buf, "%s@%x", name, *regs);
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct linux_prom64_registers *regs;
struct property *prop;
@@ -237,7 +244,7 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
- dp->name,
+ name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
@@ -245,6 +252,7 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
/* "name@bus,addr" */
static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -258,12 +266,13 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
* property of the i2c bus node etc. etc.
*/
sprintf(tmp_buf, "%s@%x,%x",
- dp->name, regs[0], regs[1]);
+ name, regs[0], regs[1]);
}
/* "name@reg0[,reg1]" */
static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -275,16 +284,17 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
if (prop->length == sizeof(u32) || regs[1] == 1) {
sprintf(tmp_buf, "%s@%x",
- dp->name, regs[0]);
+ name, regs[0]);
} else {
sprintf(tmp_buf, "%s@%x,%x",
- dp->name, regs[0], regs[1]);
+ name, regs[0], regs[1]);
}
}
/* "name@reg0reg1[,reg2reg3]" */
static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
{
+ const char *name = of_get_property(dp, "name", NULL);
struct property *prop;
u32 *regs;
@@ -296,10 +306,10 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf
if (regs[2] || regs[3]) {
sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
- dp->name, regs[0], regs[1], regs[2], regs[3]);
+ name, regs[0], regs[1], regs[2], regs[3]);
} else {
sprintf(tmp_buf, "%s@%08x%08x",
- dp->name, regs[0], regs[1]);
+ name, regs[0], regs[1]);
}
}
@@ -308,37 +318,37 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
struct device_node *parent = dp->parent;
if (parent != NULL) {
- if (!strcmp(parent->type, "pci") ||
- !strcmp(parent->type, "pciex")) {
+ if (of_node_is_type(parent, "pci") ||
+ of_node_is_type(parent, "pciex")) {
pci_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "sbus")) {
+ if (of_node_is_type(parent, "sbus")) {
sbus_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "upa")) {
+ if (of_node_is_type(parent, "upa")) {
upa_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "ebus")) {
+ if (of_node_is_type(parent, "ebus")) {
ebus_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->name, "usb") ||
- !strcmp(parent->name, "hub")) {
+ if (of_node_name_eq(parent, "usb") ||
+ of_node_name_eq(parent, "hub")) {
usb_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "i2c")) {
+ if (of_node_is_type(parent, "i2c")) {
i2c_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "firewire")) {
+ if (of_node_is_type(parent, "firewire")) {
ieee1394_path_component(dp, tmp_buf);
return;
}
- if (!strcmp(parent->type, "virtual-devices")) {
+ if (of_node_is_type(parent, "virtual-devices")) {
vdev_path_component(dp, tmp_buf);
return;
}
@@ -356,12 +366,13 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
char * __init build_path_component(struct device_node *dp)
{
+ const char *name = of_get_property(dp, "name", NULL);
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
- strcpy(tmp_buf, dp->name);
+ strcpy(tmp_buf, name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
@@ -594,7 +605,6 @@ void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
- const char *type;
phandle node;
of_console_path = prom_early_alloc(256);
@@ -617,13 +627,8 @@ void __init of_console_init(void)
}
dp = of_find_node_by_phandle(node);
- type = of_get_property(dp, "device_type", NULL);
- if (!type) {
- prom_printf("Console stdout lacks device_type property.\n");
- prom_halt();
- }
- if (strcmp(type, "display") && strcmp(type, "serial")) {
+ if (!of_node_is_type(dp, "display") && !of_node_is_type(dp, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index f3fecac7facb..28aff1c524b5 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -193,7 +193,7 @@ static int sabre_device_needs_wsync(struct device_node *dp)
* the DMA synchronization handling
*/
while (parent) {
- if (!strcmp(parent->type, "pci"))
+ if (of_node_is_type(parent, "pci"))
break;
parent = parent->parent;
}
@@ -725,11 +725,11 @@ static unsigned int central_build_irq(struct device_node *dp,
unsigned long imap, iclr;
u32 tmp;
- if (!strcmp(dp->name, "eeprom")) {
+ if (of_node_name_eq(dp, "eeprom")) {
res = &central_op->resource[5];
- } else if (!strcmp(dp->name, "zs")) {
+ } else if (of_node_name_eq(dp, "zs")) {
res = &central_op->resource[4];
- } else if (!strcmp(dp->name, "clock-board")) {
+ } else if (of_node_name_eq(dp, "clock-board")) {
res = &central_op->resource[3];
} else {
return ino;
@@ -824,19 +824,19 @@ void __init irq_trans_init(struct device_node *dp)
}
#endif
#ifdef CONFIG_SBUS
- if (!strcmp(dp->name, "sbus") ||
- !strcmp(dp->name, "sbi")) {
+ if (of_node_name_eq(dp, "sbus") ||
+ of_node_name_eq(dp, "sbi")) {
sbus_irq_trans_init(dp);
return;
}
#endif
- if (!strcmp(dp->name, "fhc") &&
- !strcmp(dp->parent->name, "central")) {
+ if (of_node_name_eq(dp, "fhc") &&
+ of_node_name_eq(dp->parent, "central")) {
central_irq_trans_init(dp);
return;
}
- if (!strcmp(dp->name, "virtual-devices") ||
- !strcmp(dp->name, "niu")) {
+ if (of_node_name_eq(dp, "virtual-devices") ||
+ of_node_name_eq(dp, "niu")) {
sun4v_vdev_irq_trans_init(dp);
return;
}
diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c
index 7933ee365207..69c1b6c047d5 100644
--- a/arch/sparc/kernel/reboot.c
+++ b/arch/sparc/kernel/reboot.c
@@ -7,6 +7,7 @@
#include <linux/reboot.h>
#include <linux/export.h>
#include <linux/pm.h>
+#include <linux/of.h>
#include <asm/oplib.h>
#include <asm/prom.h>
@@ -25,7 +26,7 @@ EXPORT_SYMBOL(pm_power_off);
void machine_power_off(void)
{
- if (strcmp(of_console_device->type, "serial") || scons_pwroff)
+ if (!of_node_is_type(of_console_device, "serial") || scons_pwroff)
prom_halt_power_off();
prom_halt();
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index c133dfc37c5c..41c5deb581b8 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -67,8 +67,8 @@ void sbus_set_sbus64(struct device *dev, int bursts)
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (!regs) {
- printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n",
- op->dev.of_node->full_name);
+ printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %pOF\n",
+ op->dev.of_node);
return;
}
slot = regs->which_io;
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 13664c377196..3fd238e54af9 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -310,25 +310,24 @@ void __init setup_arch(char **cmdline_p)
register_console(&prom_early_console);
- printk("ARCH: ");
switch(sparc_cpu_model) {
case sun4m:
- printk("SUN4M\n");
+ pr_info("ARCH: SUN4M\n");
break;
case sun4d:
- printk("SUN4D\n");
+ pr_info("ARCH: SUN4D\n");
break;
case sun4e:
- printk("SUN4E\n");
+ pr_info("ARCH: SUN4E\n");
break;
case sun4u:
- printk("SUN4U\n");
+ pr_info("ARCH: SUN4U\n");
break;
case sparc_leon:
- printk("LEON\n");
+ pr_info("ARCH: LEON\n");
break;
default:
- printk("UNKNOWN!\n");
+ pr_info("ARCH: UNKNOWN!\n");
break;
}
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index cd2825cb8420..ecc788aa07bd 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -642,9 +642,9 @@ void __init setup_arch(char **cmdline_p)
register_console(&prom_early_console);
if (tlb_type == hypervisor)
- printk("ARCH: SUN4V\n");
+ pr_info("ARCH: SUN4V\n");
else
- printk("ARCH: SUN4U\n");
+ pr_info("ARCH: SUN4U\n");
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 4c5b3fcbed94..e800ce13cc6e 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -683,6 +683,7 @@ void do_signal32(struct pt_regs * regs)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
+ /* fall through */
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 5665261cee37..83953780ca01 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -508,6 +508,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->pc -= 4;
regs->npc -= 4;
pt_regs_clear_syscall(regs);
+ /* fall through */
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->pc -= 4;
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index e9de1803a22e..ca70787efd8e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -533,6 +533,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
regs->tpc -= 4;
regs->tnpc -= 4;
pt_regs_clear_syscall(regs);
+ /* fall through */
case ERESTART_RESTARTBLOCK:
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index d869d409fce6..9a137c70e8d1 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -335,12 +335,12 @@ static unsigned int sun4d_build_device_irq(struct platform_device *op,
irq = real_irq;
while (bus) {
- if (!strcmp(bus->name, "sbi")) {
+ if (of_node_name_eq(bus, "sbi")) {
bus_connection = "io-unit";
break;
}
- if (!strcmp(bus->name, "bootbus")) {
+ if (of_node_name_eq(bus, "bootbus")) {
bus_connection = "cpu-unit";
break;
}
@@ -360,16 +360,16 @@ static unsigned int sun4d_build_device_irq(struct platform_device *op,
* If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
* lacks a "board#" property, something is very wrong.
*/
- if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
- printk(KERN_ERR "%s: Error, parent is not %s.\n",
- bus->full_name, bus_connection);
+ if (!of_node_name_eq(bus->parent, bus_connection)) {
+ printk(KERN_ERR "%pOF: Error, parent is not %s.\n",
+ bus, bus_connection);
goto err_out;
}
board_parent = bus->parent;
board = of_getintprop_default(board_parent, "board#", -1);
if (board == -1) {
- printk(KERN_ERR "%s: Error, lacks board# property.\n",
- board_parent->full_name);
+ printk(KERN_ERR "%pOF: Error, lacks board# property.\n",
+ board_parent);
goto err_out;
}
diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile
new file mode 100644
index 000000000000..c22a21c39f30
--- /dev/null
+++ b/arch/sparc/kernel/syscalls/Makefile
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+syshdr_abis_unistd_32 := common,32
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+syshdr_abis_unistd_64 := common,64
+$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+systbl_abis_syscall_table_32 := common,32
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_64 := common,64
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+systbl_abis_syscall_table_c32 := common,32
+systbl_abi_syscall_table_c32 := c32
+$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h unistd_64.h
+kapisyshdr-y += syscall_table_32.h \
+ syscall_table_64.h \
+ syscall_table_c32.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000000..c8c77c05ea97
--- /dev/null
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -0,0 +1,409 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for sparc
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> can be common, 64, or 32 for this file.
+#
+0 common restart_syscall sys_restart_syscall
+1 32 exit sys_exit sparc_exit
+1 64 exit sparc_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open compat_sys_open
+6 common close sys_close
+7 common wait4 sys_wait4 compat_sys_wait4
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 32 execv sunos_execv
+11 64 execv sys_nis_syscall
+12 common chdir sys_chdir
+13 32 chown sys_chown16
+13 64 chown sys_chown
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 32 lchown sys_lchown16
+16 64 lchown sys_lchown
+17 common brk sys_brk
+18 common perfctr sys_nis_syscall
+19 common lseek sys_lseek compat_sys_lseek
+20 common getpid sys_getpid
+21 common capget sys_capget
+22 common capset sys_capset
+23 32 setuid sys_setuid16
+23 64 setuid sys_setuid
+24 32 getuid sys_getuid16
+24 64 getuid sys_getuid
+25 common vmsplice sys_vmsplice compat_sys_vmsplice
+26 common ptrace sys_ptrace compat_sys_ptrace
+27 common alarm sys_alarm
+28 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
+29 32 pause sys_pause
+29 64 pause sys_nis_syscall
+30 common utime sys_utime compat_sys_utime
+31 32 lchown32 sys_lchown
+32 32 fchown32 sys_fchown
+33 common access sys_access
+34 common nice sys_nice
+35 32 chown32 sys_chown
+36 common sync sys_sync
+37 common kill sys_kill
+38 common stat sys_newstat compat_sys_newstat
+39 32 sendfile sys_sendfile compat_sys_sendfile
+39 64 sendfile sys_sendfile64
+40 common lstat sys_newlstat compat_sys_newlstat
+41 common dup sys_dup
+42 common pipe sys_sparc_pipe
+43 common times sys_times compat_sys_times
+44 32 getuid32 sys_getuid
+45 common umount2 sys_umount
+46 32 setgid sys_setgid16
+46 64 setgid sys_setgid
+47 32 getgid sys_getgid16
+47 64 getgid sys_getgid
+48 common signal sys_signal
+49 32 geteuid sys_geteuid16
+49 64 geteuid sys_geteuid
+50 32 getegid sys_getegid16
+50 64 getegid sys_getegid
+51 common acct sys_acct
+52 64 memory_ordering sys_memory_ordering
+53 32 getgid32 sys_getgid
+54 common ioctl sys_ioctl compat_sys_ioctl
+55 common reboot sys_reboot
+56 32 mmap2 sys_mmap2 sys32_mmap2
+57 common symlink sys_symlink
+58 common readlink sys_readlink
+59 32 execve sys_execve sys32_execve
+59 64 execve sys64_execve
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common fstat sys_newfstat compat_sys_newfstat
+63 common fstat64 sys_fstat64 compat_sys_fstat64
+64 common getpagesize sys_getpagesize
+65 common msync sys_msync
+66 common vfork sys_vfork
+67 common pread64 sys_pread64 compat_sys_pread64
+68 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+69 32 geteuid32 sys_geteuid
+70 32 getegid32 sys_getegid
+71 common mmap sys_mmap
+72 32 setreuid32 sys_setreuid
+73 32 munmap sys_munmap
+73 64 munmap sys_64_munmap
+74 common mprotect sys_mprotect
+75 common madvise sys_madvise
+76 common vhangup sys_vhangup
+77 32 truncate64 sys_truncate64 compat_sys_truncate64
+78 common mincore sys_mincore
+79 32 getgroups sys_getgroups16
+79 64 getgroups sys_getgroups
+80 32 setgroups sys_setgroups16
+80 64 setgroups sys_setgroups
+81 common getpgrp sys_getpgrp
+82 32 setgroups32 sys_setgroups
+83 common setitimer sys_setitimer compat_sys_setitimer
+84 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+85 common swapon sys_swapon
+86 common getitimer sys_getitimer compat_sys_getitimer
+87 32 setuid32 sys_setuid
+88 common sethostname sys_sethostname
+89 32 setgid32 sys_setgid
+90 common dup2 sys_dup2
+91 32 setfsuid32 sys_setfsuid
+92 common fcntl sys_fcntl compat_sys_fcntl
+93 common select sys_select
+94 32 setfsgid32 sys_setfsgid
+95 common fsync sys_fsync
+96 common setpriority sys_setpriority
+97 common socket sys_socket
+98 common connect sys_connect
+99 common accept sys_accept
+100 common getpriority sys_getpriority
+101 common rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+102 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+103 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+104 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+105 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+106 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+107 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+108 32 setresuid32 sys_setresuid
+108 64 setresuid sys_setresuid
+109 32 getresuid32 sys_getresuid
+109 64 getresuid sys_getresuid
+110 32 setresgid32 sys_setresgid
+110 64 setresgid sys_setresgid
+111 32 getresgid32 sys_getresgid
+111 64 getresgid sys_getresgid
+112 32 setregid32 sys_setregid
+113 common recvmsg sys_recvmsg compat_sys_recvmsg
+114 common sendmsg sys_sendmsg compat_sys_sendmsg
+115 32 getgroups32 sys_getgroups
+116 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
+117 common getrusage sys_getrusage compat_sys_getrusage
+118 common getsockopt sys_getsockopt compat_sys_getsockopt
+119 common getcwd sys_getcwd
+120 common readv sys_readv compat_sys_readv
+121 common writev sys_writev compat_sys_writev
+122 common settimeofday sys_settimeofday compat_sys_settimeofday
+123 32 fchown sys_fchown16
+123 64 fchown sys_fchown
+124 common fchmod sys_fchmod
+125 common recvfrom sys_recvfrom
+126 32 setreuid sys_setreuid16
+126 64 setreuid sys_setreuid
+127 32 setregid sys_setregid16
+127 64 setregid sys_setregid
+128 common rename sys_rename
+129 common truncate sys_truncate compat_sys_truncate
+130 common ftruncate sys_ftruncate compat_sys_ftruncate
+131 common flock sys_flock
+132 common lstat64 sys_lstat64 compat_sys_lstat64
+133 common sendto sys_sendto
+134 common shutdown sys_shutdown
+135 common socketpair sys_socketpair
+136 common mkdir sys_mkdir
+137 common rmdir sys_rmdir
+138 common utimes sys_utimes compat_sys_utimes
+139 common stat64 sys_stat64 compat_sys_stat64
+140 common sendfile64 sys_sendfile64
+141 common getpeername sys_getpeername
+142 common futex sys_futex compat_sys_futex
+143 common gettid sys_gettid
+144 common getrlimit sys_getrlimit compat_sys_getrlimit
+145 common setrlimit sys_setrlimit compat_sys_setrlimit
+146 common pivot_root sys_pivot_root
+147 common prctl sys_prctl
+148 common pciconfig_read sys_pciconfig_read
+149 common pciconfig_write sys_pciconfig_write
+150 common getsockname sys_getsockname
+151 common inotify_init sys_inotify_init
+152 common inotify_add_watch sys_inotify_add_watch
+153 common poll sys_poll
+154 common getdents64 sys_getdents64
+155 32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+156 common inotify_rm_watch sys_inotify_rm_watch
+157 common statfs sys_statfs compat_sys_statfs
+158 common fstatfs sys_fstatfs compat_sys_fstatfs
+159 common umount sys_oldumount
+160 common sched_set_affinity sys_sched_setaffinity compat_sys_sched_setaffinity
+161 common sched_get_affinity sys_sched_getaffinity compat_sys_sched_getaffinity
+162 common getdomainname sys_getdomainname
+163 common setdomainname sys_setdomainname
+164 64 utrap_install sys_utrap_install
+165 common quotactl sys_quotactl
+166 common set_tid_address sys_set_tid_address
+167 common mount sys_mount compat_sys_mount
+168 common ustat sys_ustat compat_sys_ustat
+169 common setxattr sys_setxattr
+170 common lsetxattr sys_lsetxattr
+171 common fsetxattr sys_fsetxattr
+172 common getxattr sys_getxattr
+173 common lgetxattr sys_lgetxattr
+174 common getdents sys_getdents compat_sys_getdents
+175 common setsid sys_setsid
+176 common fchdir sys_fchdir
+177 common fgetxattr sys_fgetxattr
+178 common listxattr sys_listxattr
+179 common llistxattr sys_llistxattr
+180 common flistxattr sys_flistxattr
+181 common removexattr sys_removexattr
+182 common lremovexattr sys_lremovexattr
+183 32 sigpending sys_sigpending compat_sys_sigpending
+183 64 sigpending sys_nis_syscall
+184 common query_module sys_ni_syscall
+185 common setpgid sys_setpgid
+186 common fremovexattr sys_fremovexattr
+187 common tkill sys_tkill
+188 32 exit_group sys_exit_group sparc_exit_group
+188 64 exit_group sparc_exit_group
+189 common uname sys_newuname
+190 common init_module sys_init_module
+191 32 personality sys_personality sys_sparc64_personality
+191 64 personality sys_sparc64_personality
+192 32 remap_file_pages sys_sparc_remap_file_pages sys_remap_file_pages
+192 64 remap_file_pages sys_remap_file_pages
+193 common epoll_create sys_epoll_create
+194 common epoll_ctl sys_epoll_ctl
+195 common epoll_wait sys_epoll_wait
+196 common ioprio_set sys_ioprio_set
+197 common getppid sys_getppid
+198 32 sigaction sys_sparc_sigaction compat_sys_sparc_sigaction
+198 64 sigaction sys_nis_syscall
+199 common sgetmask sys_sgetmask
+200 common ssetmask sys_ssetmask
+201 32 sigsuspend sys_sigsuspend
+201 64 sigsuspend sys_nis_syscall
+202 common oldlstat sys_newlstat compat_sys_newlstat
+203 common uselib sys_uselib
+204 32 readdir sys_old_readdir compat_sys_old_readdir
+204 64 readdir sys_nis_syscall
+205 common readahead sys_readahead compat_sys_readahead
+206 common socketcall sys_socketcall sys32_socketcall
+207 common syslog sys_syslog
+208 common lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+209 common fadvise64 sys_fadvise64 compat_sys_fadvise64
+210 common fadvise64_64 sys_fadvise64_64 compat_sys_fadvise64_64
+211 common tgkill sys_tgkill
+212 common waitpid sys_waitpid
+213 common swapoff sys_swapoff
+214 common sysinfo sys_sysinfo compat_sys_sysinfo
+215 32 ipc sys_ipc compat_sys_ipc
+215 64 ipc sys_sparc_ipc
+216 32 sigreturn sys_sigreturn sys32_sigreturn
+216 64 sigreturn sys_nis_syscall
+217 common clone sys_clone
+218 common ioprio_get sys_ioprio_get
+219 common adjtimex sys_adjtimex compat_sys_adjtimex
+220 32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+220 64 sigprocmask sys_nis_syscall
+221 common create_module sys_ni_syscall
+222 common delete_module sys_delete_module
+223 common get_kernel_syms sys_ni_syscall
+224 common getpgid sys_getpgid
+225 common bdflush sys_bdflush
+226 common sysfs sys_sysfs
+227 common afs_syscall sys_nis_syscall
+228 common setfsuid sys_setfsuid16
+229 common setfsgid sys_setfsgid16
+230 common _newselect sys_select compat_sys_select
+231 32 time sys_time compat_sys_time
+232 common splice sys_splice
+233 common stime sys_stime compat_sys_stime
+234 common statfs64 sys_statfs64 compat_sys_statfs64
+235 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+236 common _llseek sys_llseek
+237 common mlock sys_mlock
+238 common munlock sys_munlock
+239 common mlockall sys_mlockall
+240 common munlockall sys_munlockall
+241 common sched_setparam sys_sched_setparam
+242 common sched_getparam sys_sched_getparam
+243 common sched_setscheduler sys_sched_setscheduler
+244 common sched_getscheduler sys_sched_getscheduler
+245 common sched_yield sys_sched_yield
+246 common sched_get_priority_max sys_sched_get_priority_max
+247 common sched_get_priority_min sys_sched_get_priority_min
+248 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
+249 common nanosleep sys_nanosleep compat_sys_nanosleep
+250 32 mremap sys_mremap
+250 64 mremap sys_64_mremap
+251 common _sysctl sys_sysctl compat_sys_sysctl
+252 common getsid sys_getsid
+253 common fdatasync sys_fdatasync
+254 32 nfsservctl sys_ni_syscall sys_nis_syscall
+254 64 nfsservctl sys_nis_syscall
+255 common sync_file_range sys_sync_file_range compat_sys_sync_file_range
+256 common clock_settime sys_clock_settime compat_sys_clock_settime
+257 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
+258 common clock_getres sys_clock_getres compat_sys_clock_getres
+259 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+260 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+261 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+262 common timer_settime sys_timer_settime compat_sys_timer_settime
+263 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+264 common timer_getoverrun sys_timer_getoverrun
+265 common timer_delete sys_timer_delete
+266 common timer_create sys_timer_create compat_sys_timer_create
+# 267 was vserver
+267 common vserver sys_nis_syscall
+268 common io_setup sys_io_setup compat_sys_io_setup
+269 common io_destroy sys_io_destroy
+270 common io_submit sys_io_submit compat_sys_io_submit
+271 common io_cancel sys_io_cancel
+272 common io_getevents sys_io_getevents compat_sys_io_getevents
+273 common mq_open sys_mq_open compat_sys_mq_open
+274 common mq_unlink sys_mq_unlink
+275 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
+276 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+277 common mq_notify sys_mq_notify compat_sys_mq_notify
+278 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+279 common waitid sys_waitid compat_sys_waitid
+280 common tee sys_tee
+281 common add_key sys_add_key
+282 common request_key sys_request_key
+283 common keyctl sys_keyctl compat_sys_keyctl
+284 common openat sys_openat compat_sys_openat
+285 common mkdirat sys_mkdirat
+286 common mknodat sys_mknodat
+287 common fchownat sys_fchownat
+288 common futimesat sys_futimesat compat_sys_futimesat
+289 common fstatat64 sys_fstatat64 compat_sys_fstatat64
+290 common unlinkat sys_unlinkat
+291 common renameat sys_renameat
+292 common linkat sys_linkat
+293 common symlinkat sys_symlinkat
+294 common readlinkat sys_readlinkat
+295 common fchmodat sys_fchmodat
+296 common faccessat sys_faccessat
+297 common pselect6 sys_pselect6 compat_sys_pselect6
+298 common ppoll sys_ppoll compat_sys_ppoll
+299 common unshare sys_unshare
+300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
+301 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
+302 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+303 common mbind sys_mbind compat_sys_mbind
+304 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
+305 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+306 common kexec_load sys_kexec_load compat_sys_kexec_load
+307 common move_pages sys_move_pages compat_sys_move_pages
+308 common getcpu sys_getcpu
+309 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+310 common utimensat sys_utimensat compat_sys_utimensat
+311 common signalfd sys_signalfd compat_sys_signalfd
+312 common timerfd_create sys_timerfd_create
+313 common eventfd sys_eventfd
+314 common fallocate sys_fallocate compat_sys_fallocate
+315 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
+316 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+317 common signalfd4 sys_signalfd4 compat_sys_signalfd4
+318 common eventfd2 sys_eventfd2
+319 common epoll_create1 sys_epoll_create1
+320 common dup3 sys_dup3
+321 common pipe2 sys_pipe2
+322 common inotify_init1 sys_inotify_init1
+323 common accept4 sys_accept4
+324 common preadv sys_preadv compat_sys_preadv
+325 common pwritev sys_pwritev compat_sys_pwritev
+326 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+327 common perf_event_open sys_perf_event_open
+328 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+329 common fanotify_init sys_fanotify_init
+330 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+331 common prlimit64 sys_prlimit64
+332 common name_to_handle_at sys_name_to_handle_at
+333 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+334 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+335 common syncfs sys_syncfs
+336 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
+337 common setns sys_setns
+338 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
+339 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+340 32 kern_features sys_ni_syscall sys_kern_features
+340 64 kern_features sys_kern_features
+341 common kcmp sys_kcmp
+342 common finit_module sys_finit_module
+343 common sched_setattr sys_sched_setattr
+344 common sched_getattr sys_sched_getattr
+345 common renameat2 sys_renameat2
+346 common seccomp sys_seccomp
+347 common getrandom sys_getrandom
+348 common memfd_create sys_memfd_create
+349 common bpf sys_bpf
+350 32 execveat sys_execveat sys32_execveat
+350 64 execveat sys64_execveat
+351 common membarrier sys_membarrier
+352 common userfaultfd sys_userfaultfd
+353 common bind sys_bind
+354 common listen sys_listen
+355 common setsockopt sys_setsockopt compat_sys_setsockopt
+356 common mlock2 sys_mlock2
+357 common copy_file_range sys_copy_file_range
+358 common preadv2 sys_preadv2 compat_sys_preadv2
+359 common pwritev2 sys_pwritev2 compat_sys_pwritev2
+360 common statx sys_statx
+361 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
diff --git a/arch/sparc/kernel/syscalls/syscallhdr.sh b/arch/sparc/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000000..626b5740a9f1
--- /dev/null
+++ b/arch/sparc/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_SPARC_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+) > "$out"
diff --git a/arch/sparc/kernel/syscalls/syscalltbl.sh b/arch/sparc/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000000..77cf0143ba19
--- /dev/null
+++ b/arch/sparc/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_nis_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry compat ; do
+ if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
+ emit $((nxt+offset)) $((nr+offset)) $compat
+ else
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ fi
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 621a363098ec..ab9e4d57685a 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -9,85 +9,10 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
-
+#define __SYSCALL(nr, entry, nargs) .long entry
.data
.align 4
-
- /* First, the Linux native syscall table. */
-
.globl sys_call_table
sys_call_table:
-/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
-/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
-/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
-/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
-/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
-/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
-/*40*/ .long sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_getuid
-/*45*/ .long sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
-/*50*/ .long sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, sys_ioctl
-/*55*/ .long sys_reboot, sys_mmap2, sys_symlink, sys_readlink, sys_execve
-/*60*/ .long sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
-/*65*/ .long sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_geteuid
-/*70*/ .long sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
-/*75*/ .long sys_madvise, sys_vhangup, sys_truncate64, sys_mincore, sys_getgroups16
-/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
-/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
-/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
-/*95*/ .long sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
-/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
-/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
-/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_recvmsg, sys_sendmsg
-/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
-/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
-/*125*/ .long sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
-/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
-/*135*/ .long sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
-/*140*/ .long sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
-/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .long sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
-/*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
-/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
-/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
-/*170*/ .long sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
-/*175*/ .long sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
-/*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall
-/*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname
-/*190*/ .long sys_init_module, sys_personality, sys_sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl
-/*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_sparc_sigaction, sys_sgetmask
-/*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir
-/*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
-/*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
-/*215*/ .long sys_ipc, sys_sigreturn, sys_clone, sys_ioprio_get, sys_adjtimex
-/*220*/ .long sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
-/*225*/ .long sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
-/*230*/ .long sys_select, sys_time, sys_splice, sys_stime, sys_statfs64
- /* "We are the Knights of the Forest of Ni!!" */
-/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
-/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall
-/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
-/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
-/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
-/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
-/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
-/*285*/ .long sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
-/*290*/ .long sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
-/*295*/ .long sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
-/*300*/ .long sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
-/*305*/ .long sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .long sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
-/*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
-/*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
-/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
-/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
-/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
-/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/ .long sys_statx, sys_io_pgetevents
+#include <asm/syscall_table_32.h> /* 32-bit native syscalls */
+#undef __SYSCALL
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index ff9389a1c9f3..a27394bf7d7f 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -10,167 +10,18 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
-
+#define __SYSCALL(nr, entry, nargs) .word entry
.text
.align 4
-
#ifdef CONFIG_COMPAT
- /* First, the 32-bit Linux native syscall table. */
-
.globl sys_call_table32
sys_call_table32:
-/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
-/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
-/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
-/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
-/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
- .word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile
-/*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid
- .word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16
-/*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl
- .word sys_reboot, sys32_mmap2, sys_symlink, sys_readlink, sys32_execve
-/*60*/ .word sys_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize
- .word sys_msync, sys_vfork, compat_sys_pread64, compat_sys_pwrite64, sys_geteuid
-/*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect
- .word sys_madvise, sys_vhangup, compat_sys_truncate64, sys_mincore, sys_getgroups16
-/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, compat_sys_ftruncate64
- .word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
-/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, compat_sys_select, sys_setfsgid
- .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
-/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
- .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
-/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, compat_sys_recvmsg, compat_sys_sendmsg
- .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, compat_sys_getsockopt, sys_getcwd
-/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
- .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
-/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
- .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
-/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
- .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
- .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
-/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
- .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
-/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
- .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
-/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall
- .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
-/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
- .word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask
-/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir
- .word compat_sys_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, compat_sys_fadvise64
-/*210*/ .word compat_sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo
- .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex
-/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
- .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16
-/*230*/ .word compat_sys_select, compat_sys_time, sys_splice, compat_sys_stime, compat_sys_statfs64
- .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
- .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, compat_sys_sched_rr_get_interval, compat_sys_nanosleep
-/*250*/ .word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
- .word compat_sys_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, compat_sys_clock_nanosleep
-/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, compat_sys_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
-/*270*/ .word compat_sys_io_submit, sys_io_cancel, compat_sys_io_getevents, compat_sys_mq_open, sys_mq_unlink
- .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
-/*280*/ .word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat
- .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64
-/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
- .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare
-/*300*/ .word compat_sys_set_robust_list, compat_sys_get_robust_list, compat_sys_migrate_pages, compat_sys_mbind, compat_sys_get_mempolicy
- .word compat_sys_set_mempolicy, compat_sys_kexec_load, compat_sys_move_pages, sys_getcpu, compat_sys_epoll_pwait
-/*310*/ .word compat_sys_utimensat, compat_sys_signalfd, sys_timerfd_create, sys_eventfd, compat_sys_fallocate
- .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv
- .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init
-/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
- .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
-/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
- .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
-/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
-/*360*/ .word sys_statx, compat_sys_io_pgetevents
-
+#include <asm/syscall_table_c32.h> /* Compat syscalls */
#endif /* CONFIG_COMPAT */
- /* Now the 64-bit native Linux syscall table. */
-
.align 4
.globl sys_call_table64, sys_call_table
sys_call_table64:
sys_call_table:
-/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
-/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
-/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek
-/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
-/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
-/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
- .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
-/*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall
- .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid
-/*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl
- .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys64_execve
-/*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize
- .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall
-/*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect
- .word sys_madvise, sys_vhangup, sys_nis_syscall, sys_mincore, sys_getgroups
-/*80*/ .word sys_setgroups, sys_getpgrp, sys_nis_syscall, sys_setitimer, sys_nis_syscall
- .word sys_swapon, sys_getitimer, sys_nis_syscall, sys_sethostname, sys_nis_syscall
-/*90*/ .word sys_dup2, sys_nis_syscall, sys_fcntl, sys_select, sys_nis_syscall
- .word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
-/*100*/ .word sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
- .word sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
-/*110*/ .word sys_setresgid, sys_getresgid, sys_nis_syscall, sys_recvmsg, sys_sendmsg
- .word sys_nis_syscall, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
-/*120*/ .word sys_readv, sys_writev, sys_settimeofday, sys_fchown, sys_fchmod
- .word sys_recvfrom, sys_setreuid, sys_setregid, sys_rename, sys_truncate
-/*130*/ .word sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
- .word sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
-/*140*/ .word sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
- .word sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
- .word sys_nis_syscall, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
-/*160*/ .word sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_utrap_install
- .word sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
-/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents
- .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
-/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall
- .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname
-/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl
- .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask
-/*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall
- .word sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64
-/*210*/ .word sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo
- .word sys_sparc_ipc, sys_nis_syscall, sys_clone, sys_ioprio_get, sys_adjtimex
-/*220*/ .word sys_nis_syscall, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid
- .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid, sys_setfsgid
-/*230*/ .word sys_select, sys_nis_syscall, sys_splice, sys_stime, sys_statfs64
- .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
-/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
- .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
-/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall
- .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
-/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
-/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
- .word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
-/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
- .word sys_mkdirat, sys_mknodat, sys_fchownat, sys_futimesat, sys_fstatat64
-/*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat
- .word sys_fchmodat, sys_faccessat, sys_pselect6, sys_ppoll, sys_unshare
-/*300*/ .word sys_set_robust_list, sys_get_robust_list, sys_migrate_pages, sys_mbind, sys_get_mempolicy
- .word sys_set_mempolicy, sys_kexec_load, sys_move_pages, sys_getcpu, sys_epoll_pwait
-/*310*/ .word sys_utimensat, sys_signalfd, sys_timerfd_create, sys_eventfd, sys_fallocate
- .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1
-/*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv
- .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init
-/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
- .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
-/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
- .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
-/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/ .word sys_statx, sys_io_pgetevents
+#include <asm/syscall_table_64.h> /* 64-bit native syscalls */
+#undef __SYSCALL
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 5f356dc8e178..3eb77943ce12 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -445,8 +445,8 @@ static int rtc_probe(struct platform_device *op)
{
struct resource *r;
- printk(KERN_INFO "%s: RTC regs at 0x%llx\n",
- op->dev.of_node->full_name, op->resource[0].start);
+ printk(KERN_INFO "%pOF: RTC regs at 0x%llx\n",
+ op->dev.of_node, op->resource[0].start);
/* The CMOS RTC driver only accepts IORESOURCE_IO, so cons
* up a fake resource so that the probe works for all cases.
@@ -501,8 +501,8 @@ static struct platform_device rtc_bq4802_device = {
static int bq4802_probe(struct platform_device *op)
{
- printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n",
- op->dev.of_node->full_name, op->resource[0].start);
+ printk(KERN_INFO "%pOF: BQ4802 regs at 0x%llx\n",
+ op->dev.of_node, op->resource[0].start);
rtc_bq4802_device.resource = &op->resource[0];
return platform_device_register(&rtc_bq4802_device);
@@ -561,12 +561,12 @@ static int mostek_probe(struct platform_device *op)
/* On an Enterprise system there can be multiple mostek clocks.
* We should only match the one that is on the central FHC bus.
*/
- if (!strcmp(dp->parent->name, "fhc") &&
- strcmp(dp->parent->parent->name, "central") != 0)
+ if (of_node_name_eq(dp->parent, "fhc") &&
+ !of_node_name_eq(dp->parent->parent, "central"))
return -ENODEV;
- printk(KERN_INFO "%s: Mostek regs at 0x%llx\n",
- dp->full_name, op->resource[0].start);
+ printk(KERN_INFO "%pOF: Mostek regs at 0x%llx\n",
+ dp, op->resource[0].start);
m48t59_rtc.resource = &op->resource[0];
return platform_device_register(&m48t59_rtc);
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 32bae68e34c1..c7cad9b7bba7 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -193,7 +193,7 @@ show_pciobppath_attr(struct device *dev, struct device_attribute *attr,
vdev = to_vio_dev(dev);
dp = vdev->dp;
- return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
+ return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH,
@@ -366,12 +366,9 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (parent == NULL) {
dp = cdev_node;
} else if (to_vio_dev(parent) == root_vdev) {
- dp = of_get_next_child(cdev_node, NULL);
- while (dp) {
- if (!strcmp(dp->type, type))
+ for_each_child_of_node(cdev_node, dp) {
+ if (of_node_is_type(dp, type))
break;
-
- dp = of_get_next_child(cdev_node, dp);
}
} else {
dp = to_vio_dev(parent)->dp;
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index a5ff88643d5c..84cc8f7f83e9 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -552,15 +552,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
emit_skb_load32(hash, r_A);
break;
case BPF_ANC | SKF_AD_VLAN_TAG:
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
emit_skb_load16(vlan_tci, r_A);
- if (code != (BPF_ANC | SKF_AD_VLAN_TAG)) {
- emit_alu_K(SRL, 12);
+ break;
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+ __emit_skb_load8(__pkt_vlan_present_offset, r_A);
+ if (PKT_VLAN_PRESENT_BIT)
+ emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
emit_andi(r_A, 1, r_A);
- } else {
- emit_loadimm(~VLAN_TAG_PRESENT, r_TMP);
- emit_and(r_A, r_TMP, r_A);
- }
break;
case BPF_LD | BPF_W | BPF_LEN:
emit_skb_load32(len, r_A);
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 222785af550b..65428e79b2f3 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -791,7 +791,7 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
}
/* Just skip the save instruction and the ctx register move. */
-#define BPF_TAILCALL_PROLOGUE_SKIP 16
+#define BPF_TAILCALL_PROLOGUE_SKIP 32
#define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128)
static void build_prologue(struct jit_ctx *ctx)
@@ -824,9 +824,15 @@ static void build_prologue(struct jit_ctx *ctx)
const u8 vfp = bpf2sparc[BPF_REG_FP];
emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx);
+ } else {
+ emit_nop(ctx);
}
emit_reg_move(I0, O0, ctx);
+ emit_reg_move(I1, O1, ctx);
+ emit_reg_move(I2, O2, ctx);
+ emit_reg_move(I3, O3, ctx);
+ emit_reg_move(I4, O4, ctx);
/* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
}
@@ -1270,6 +1276,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp2 = bpf2sparc[TMP_REG_2];
u32 opcode = 0, rs2;
+ if (insn->dst_reg == BPF_REG_FP)
+ ctx->saw_frame_pointer = true;
+
ctx->tmp_2_used = true;
emit_loadimm(imm, tmp2, ctx);
@@ -1308,6 +1317,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp = bpf2sparc[TMP_REG_1];
u32 opcode = 0, rs2;
+ if (insn->dst_reg == BPF_REG_FP)
+ ctx->saw_frame_pointer = true;
+
switch (BPF_SIZE(code)) {
case BPF_W:
opcode = ST32;
@@ -1340,6 +1352,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp2 = bpf2sparc[TMP_REG_2];
const u8 tmp3 = bpf2sparc[TMP_REG_3];
+ if (insn->dst_reg == BPF_REG_FP)
+ ctx->saw_frame_pointer = true;
+
ctx->tmp_1_used = true;
ctx->tmp_2_used = true;
ctx->tmp_3_used = true;
@@ -1360,6 +1375,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 tmp2 = bpf2sparc[TMP_REG_2];
const u8 tmp3 = bpf2sparc[TMP_REG_3];
+ if (insn->dst_reg == BPF_REG_FP)
+ ctx->saw_frame_pointer = true;
+
ctx->tmp_1_used = true;
ctx->tmp_2_used = true;
ctx->tmp_3_used = true;
@@ -1425,12 +1443,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
struct bpf_prog *tmp, *orig_prog = prog;
struct sparc64_jit_data *jit_data;
struct bpf_binary_header *header;
+ u32 prev_image_size, image_size;
bool tmp_blinded = false;
bool extra_pass = false;
struct jit_ctx ctx;
- u32 image_size;
u8 *image_ptr;
- int pass;
+ int pass, i;
if (!prog->jit_requested)
return orig_prog;
@@ -1461,61 +1479,82 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
header = jit_data->header;
extra_pass = true;
image_size = sizeof(u32) * ctx.idx;
+ prev_image_size = image_size;
+ pass = 1;
goto skip_init_ctx;
}
memset(&ctx, 0, sizeof(ctx));
ctx.prog = prog;
- ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
+ ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL);
if (ctx.offset == NULL) {
prog = orig_prog;
goto out_off;
}
- /* Fake pass to detect features used, and get an accurate assessment
- * of what the final image size will be.
+ /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook
+ * the offset array so that we converge faster.
*/
- if (build_body(&ctx)) {
- prog = orig_prog;
- goto out_off;
- }
- build_prologue(&ctx);
- build_epilogue(&ctx);
-
- /* Now we know the actual image size. */
- image_size = sizeof(u32) * ctx.idx;
- header = bpf_jit_binary_alloc(image_size, &image_ptr,
- sizeof(u32), jit_fill_hole);
- if (header == NULL) {
- prog = orig_prog;
- goto out_off;
- }
+ for (i = 0; i < prog->len; i++)
+ ctx.offset[i] = i * (12 * 4);
- ctx.image = (u32 *)image_ptr;
-skip_init_ctx:
- for (pass = 1; pass < 3; pass++) {
+ prev_image_size = ~0U;
+ for (pass = 1; pass < 40; pass++) {
ctx.idx = 0;
build_prologue(&ctx);
-
if (build_body(&ctx)) {
- bpf_jit_binary_free(header);
prog = orig_prog;
goto out_off;
}
-
build_epilogue(&ctx);
if (bpf_jit_enable > 1)
- pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass,
- image_size - (ctx.idx * 4),
+ pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass,
+ ctx.idx * 4,
ctx.tmp_1_used ? '1' : ' ',
ctx.tmp_2_used ? '2' : ' ',
ctx.tmp_3_used ? '3' : ' ',
ctx.saw_frame_pointer ? 'F' : ' ',
ctx.saw_call ? 'C' : ' ',
ctx.saw_tail_call ? 'T' : ' ');
+
+ if (ctx.idx * 4 == prev_image_size)
+ break;
+ prev_image_size = ctx.idx * 4;
+ cond_resched();
+ }
+
+ /* Now we know the actual image size. */
+ image_size = sizeof(u32) * ctx.idx;
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ if (header == NULL) {
+ prog = orig_prog;
+ goto out_off;
+ }
+
+ ctx.image = (u32 *)image_ptr;
+skip_init_ctx:
+ ctx.idx = 0;
+
+ build_prologue(&ctx);
+
+ if (build_body(&ctx)) {
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_off;
+ }
+
+ build_epilogue(&ctx);
+
+ if (ctx.idx * 4 != prev_image_size) {
+ pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
+ prev_image_size, ctx.idx * 4);
+ bpf_jit_binary_free(header);
+ prog = orig_prog;
+ goto out_off;
}
if (bpf_jit_enable > 1)
@@ -1536,6 +1575,7 @@ skip_init_ctx:
prog->jited_len = image_size;
if (!prog->is_func || extra_pass) {
+ bpf_prog_fill_jited_linfo(prog, ctx.offset);
out_off:
kfree(ctx.offset);
kfree(jit_data);
diff --git a/arch/sparc/oprofile/init.c b/arch/sparc/oprofile/init.c
index f9024bccff16..43730c9b1c86 100644
--- a/arch/sparc/oprofile/init.c
+++ b/arch/sparc/oprofile/init.c
@@ -53,7 +53,7 @@ static void timer_stop(void)
{
nmi_adjust_hz(1);
unregister_die_notifier(&profile_timer_exceptions_nb);
- synchronize_sched(); /* Allow already-started NMIs to complete. */
+ synchronize_rcu(); /* Allow already-started NMIs to complete. */
}
static int op_nmi_timer_init(struct oprofile_operations *ops)
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index a6e18ca4cc18..74e97f77e23b 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -34,7 +34,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \
- -z max-page-size=8192 -z common-page-size=8192
+ -z max-page-size=8192
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0038a2d10a7a..c625f57472f7 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/
# Xen paravirtualization support
obj-$(CONFIG_XEN) += xen/
+obj-$(CONFIG_PVH) += platform/pvh/
+
# Hyper-V paravirtualization support
obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index c14d4a35be13..57552f2b37eb 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -442,19 +442,23 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
- Without compiler support, at least indirect branches in assembler
- code are eliminated. Since this includes the syscall entry path,
- it is not entirely pointless.
-
-config INTEL_RDT
- bool "Intel Resource Director Technology support"
- depends on X86 && CPU_SUP_INTEL
+config RESCTRL
+ bool "Resource Control support"
+ depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
select KERNFS
help
- Select to enable resource allocation and monitoring which are
- sub-features of Intel Resource Director Technology(RDT). More
- information about RDT can be found in the Intel x86
- Architecture Software Developer Manual.
+ Enable Resource Control support.
+
+ Provide support for the allocation and monitoring of system resources
+ usage by the CPU.
+
+ Intel calls this Intel Resource Director Technology
+ (Intel(R) RDT). More information about RDT can be found in the
+ Intel x86 Architecture Software Developer Manual.
+
+ AMD calls this AMD Platform Quality of Service (AMD QoS).
+ More information about AMD QoS can be found in the AMD64 Technology
+ Platform Quality of Service Extensions manual.
Say N if unsure.
@@ -798,6 +802,12 @@ config KVM_GUEST
underlying device model, the host provides the guest with
timing infrastructure such as time of day, and system time
+config PVH
+ bool "Support for running PVH guests"
+ ---help---
+ This option enables the PVH entry point for guest virtual machines
+ as specified in the x86/HVM direct boot ABI.
+
config KVM_DEBUG_FS
bool "Enable debug information for KVM Guests in debugfs"
depends on KVM_GUEST && DEBUG_FS
@@ -1002,13 +1012,7 @@ config NR_CPUS
to the kernel image.
config SCHED_SMT
- bool "SMT (Hyperthreading) scheduler support"
- depends on SMP
- ---help---
- SMT scheduler support improves the CPU scheduler's decision making
- when dealing with Intel Pentium 4 chips with HyperThreading at a
- cost of slightly increased overhead in some places. If unsure say
- N here.
+ def_bool y if SMP
config SCHED_MC
def_bool y
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 88398fdf8129..16c3145c0a5f 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -130,10 +130,6 @@ else
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
-
- # -funit-at-a-time shrinks the kernel .text considerably
- # unfortunately it makes reading oopses harder.
- KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
endif
ifdef CONFIG_X86_X32
@@ -220,9 +216,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
-ifneq ($(RETPOLINE_CFLAGS),)
- KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
-endif
+ KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
endif
archscripts: scripts_basic
@@ -234,13 +228,6 @@ archscripts: scripts_basic
archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
-archmacros:
- $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
-
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
-export ASM_MACRO_FLAGS
-KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
-
###
# Kernel objects
@@ -306,6 +293,13 @@ ifndef CC_HAVE_ASM_GOTO
@echo Compiler lacks asm-goto support.
@exit 1
endif
+ifdef CONFIG_RETPOLINE
+ifeq ($(RETPOLINE_CFLAGS),)
+ @echo "You are building kernel with non-retpoline compiler." >&2
+ @echo "Please update your compiler." >&2
+ @false
+endif
+endif
archclean:
$(Q)rm -rf $(objtree)/arch/i386
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 91085a08de6c..1db7913795f5 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -26,13 +26,6 @@ cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
# an unresolved reference.
cflags-y += -ffreestanding
-# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
-# a lot more stack due to the lack of sharing of stacklots. Also, gcc
-# 4.3.0 needs -funit-at-a-time for extern inline functions.
-KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \
- echo $(call cc-option,-fno-unit-at-a-time); \
- else echo $(call cc-option,-funit-at-a-time); fi ;)
-
KBUILD_CFLAGS += $(cflags-y)
else
@@ -54,6 +47,4 @@ ELF_FORMAT := elf64-x86-64
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib64
LINK-y += -m64
-# Do unit-at-a-time unconditionally on x86_64, following the host
-KBUILD_CFLAGS += $(call cc-option,-funit-at-a-time)
endif
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index ef5a9cc66fb8..32a09eb5c101 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -309,7 +309,7 @@ void query_edd(void);
void __attribute__((noreturn)) die(void);
/* memory.c */
-int detect_memory(void);
+void detect_memory(void);
/* pm.c */
void __attribute__((noreturn)) go_to_protected_mode(void);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 8b4c5e001157..544ac4fafd11 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1,3 +1,4 @@
+
/* -----------------------------------------------------------------------
*
* Copyright 2011 Intel Corporation; author Matt Fleming
@@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
return status;
}
+static efi_status_t allocate_e820(struct boot_params *params,
+ struct setup_data **e820ext,
+ u32 *e820ext_size)
+{
+ unsigned long map_size, desc_size, buff_size;
+ struct efi_boot_memmap boot_map;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ __u32 nr_desc;
+
+ boot_map.map = &map;
+ boot_map.map_size = &map_size;
+ boot_map.desc_size = &desc_size;
+ boot_map.desc_ver = NULL;
+ boot_map.key_ptr = NULL;
+ boot_map.buff_size = &buff_size;
+
+ status = efi_get_memory_map(sys_table, &boot_map);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ nr_desc = buff_size / desc_size;
+
+ if (nr_desc > ARRAY_SIZE(params->e820_table)) {
+ u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
+
+ status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
+ }
+
+ return EFI_SUCCESS;
+}
+
struct exit_boot_struct {
struct boot_params *boot_params;
struct efi_info *efi;
- struct setup_data *e820ext;
- __u32 e820ext_size;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
struct efi_boot_memmap *map,
void *priv)
{
- static bool first = true;
const char *signature;
__u32 nr_desc;
efi_status_t status;
struct exit_boot_struct *p = priv;
- if (first) {
- nr_desc = *map->buff_size / *map->desc_size;
- if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
- u32 nr_e820ext = nr_desc -
- ARRAY_SIZE(p->boot_params->e820_table);
-
- status = alloc_e820ext(nr_e820ext, &p->e820ext,
- &p->e820ext_size);
- if (status != EFI_SUCCESS)
- return status;
- }
- first = false;
- }
-
signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
: EFI32_LOADER_SIGNATURE;
memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
@@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
{
unsigned long map_sz, key, desc_size, buff_size;
efi_memory_desc_t *mem_map;
- struct setup_data *e820ext;
- __u32 e820ext_size;
+ struct setup_data *e820ext = NULL;
+ __u32 e820ext_size = 0;
efi_status_t status;
__u32 desc_version;
struct efi_boot_memmap map;
@@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
map.buff_size = &buff_size;
priv.boot_params = boot_params;
priv.efi = &boot_params->efi_info;
- priv.e820ext = NULL;
- priv.e820ext_size = 0;
+
+ status = allocate_e820(boot_params, &e820ext, &e820ext_size);
+ if (status != EFI_SUCCESS)
+ return status;
/* Might as well exit boot services now */
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
if (status != EFI_SUCCESS)
return status;
- e820ext = priv.e820ext;
- e820ext_size = priv.e820ext_size;
-
/* Historic? */
boot_params->alt_mem_k = 32 * 1024;
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 4c881c850125..850b8762e889 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -300,7 +300,7 @@ _start:
# Part 2 of the header, from the old setup.S
.ascii "HdrS" # header signature
- .word 0x020e # header version number (>= 0x0105)
+ .word 0x020d # header version number (>= 0x0105)
# or else old loadlin-1.5 will fail)
.globl realmode_swtch
realmode_swtch: .word 0, 0 # default_switch, SETUPSEG
@@ -558,10 +558,6 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
init_size: .long INIT_SIZE # kernel initialization size
handover_offset: .long 0 # Filled in by build.c
-acpi_rsdp_addr: .quad 0 # 64-bit physical pointer to the
- # ACPI RSDP table, added with
- # version 2.14
-
# End of setup header #####################################################
.section ".entrytext", "ax"
diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
index 7df2b28207be..f06c147b5140 100644
--- a/arch/x86/boot/memory.c
+++ b/arch/x86/boot/memory.c
@@ -17,7 +17,7 @@
#define SMAP 0x534d4150 /* ASCII "SMAP" */
-static int detect_memory_e820(void)
+static void detect_memory_e820(void)
{
int count = 0;
struct biosregs ireg, oreg;
@@ -68,10 +68,10 @@ static int detect_memory_e820(void)
count++;
} while (ireg.ebx && count < ARRAY_SIZE(boot_params.e820_table));
- return boot_params.e820_entries = count;
+ boot_params.e820_entries = count;
}
-static int detect_memory_e801(void)
+static void detect_memory_e801(void)
{
struct biosregs ireg, oreg;
@@ -80,7 +80,7 @@ static int detect_memory_e801(void)
intcall(0x15, &ireg, &oreg);
if (oreg.eflags & X86_EFLAGS_CF)
- return -1;
+ return;
/* Do we really need to do this? */
if (oreg.cx || oreg.dx) {
@@ -89,7 +89,7 @@ static int detect_memory_e801(void)
}
if (oreg.ax > 15*1024) {
- return -1; /* Bogus! */
+ return; /* Bogus! */
} else if (oreg.ax == 15*1024) {
boot_params.alt_mem_k = (oreg.bx << 6) + oreg.ax;
} else {
@@ -102,11 +102,9 @@ static int detect_memory_e801(void)
*/
boot_params.alt_mem_k = oreg.ax;
}
-
- return 0;
}
-static int detect_memory_88(void)
+static void detect_memory_88(void)
{
struct biosregs ireg, oreg;
@@ -115,22 +113,13 @@ static int detect_memory_88(void)
intcall(0x15, &ireg, &oreg);
boot_params.screen_info.ext_mem_k = oreg.ax;
-
- return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */
}
-int detect_memory(void)
+void detect_memory(void)
{
- int err = -1;
-
- if (detect_memory_e820() > 0)
- err = 0;
-
- if (!detect_memory_e801())
- err = 0;
+ detect_memory_e820();
- if (!detect_memory_88())
- err = 0;
+ detect_memory_e801();
- return err;
+ detect_memory_88();
}
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index bf0e82400358..a93d44e58f9c 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -132,6 +132,7 @@ static void die(const char * str, ...)
va_list args;
va_start(args, str);
vfprintf(stderr, str, args);
+ va_end(args);
fputc('\n', stderr);
exit(1);
}
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a4b0007a54e1..45734e1cf967 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -8,6 +8,7 @@ OBJECT_FILES_NON_STANDARD := y
avx_supported := $(call as-instr,vpxor %xmm0$(comma)%xmm0$(comma)%xmm0,yes,no)
avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
$(comma)4)$(comma)%ymm2,yes,no)
+avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)
sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)
sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
@@ -23,7 +24,7 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
-obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
+obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha-x86_64.o
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
@@ -46,6 +47,9 @@ obj-$(CONFIG_CRYPTO_MORUS1280_GLUE) += morus1280_glue.o
obj-$(CONFIG_CRYPTO_MORUS640_SSE2) += morus640-sse2.o
obj-$(CONFIG_CRYPTO_MORUS1280_SSE2) += morus1280-sse2.o
+obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
+obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
+
# These modules require assembler to support AVX.
ifeq ($(avx_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64) += \
@@ -74,7 +78,7 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
-chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
+chacha-x86_64-y := chacha-ssse3-x86_64.o chacha_glue.o
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o
@@ -84,6 +88,8 @@ aegis256-aesni-y := aegis256-aesni-asm.o aegis256-aesni-glue.o
morus640-sse2-y := morus640-sse2-asm.o morus640-sse2-glue.o
morus1280-sse2-y := morus1280-sse2-asm.o morus1280-sse2-glue.o
+nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o
+
ifeq ($(avx_supported),yes)
camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \
camellia_aesni_avx_glue.o
@@ -97,10 +103,16 @@ endif
ifeq ($(avx2_supported),yes)
camellia-aesni-avx2-y := camellia-aesni-avx2-asm_64.o camellia_aesni_avx2_glue.o
- chacha20-x86_64-y += chacha20-avx2-x86_64.o
+ chacha-x86_64-y += chacha-avx2-x86_64.o
serpent-avx2-y := serpent-avx2-asm_64.o serpent_avx2_glue.o
morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o
+
+ nhpoly1305-avx2-y := nh-avx2-x86_64.o nhpoly1305-avx2-glue.o
+endif
+
+ifeq ($(avx512_supported),yes)
+ chacha-x86_64-y += chacha-avx512vl-x86_64.o
endif
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 1985ea0b551b..91c039ab5699 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -182,43 +182,30 @@ aad_shift_arr:
.text
-##define the fields of the gcm aes context
-#{
-# u8 expanded_keys[16*11] store expanded keys
-# u8 shifted_hkey_1[16] store HashKey <<1 mod poly here
-# u8 shifted_hkey_2[16] store HashKey^2 <<1 mod poly here
-# u8 shifted_hkey_3[16] store HashKey^3 <<1 mod poly here
-# u8 shifted_hkey_4[16] store HashKey^4 <<1 mod poly here
-# u8 shifted_hkey_5[16] store HashKey^5 <<1 mod poly here
-# u8 shifted_hkey_6[16] store HashKey^6 <<1 mod poly here
-# u8 shifted_hkey_7[16] store HashKey^7 <<1 mod poly here
-# u8 shifted_hkey_8[16] store HashKey^8 <<1 mod poly here
-# u8 shifted_hkey_1_k[16] store XOR HashKey <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_2_k[16] store XOR HashKey^2 <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_3_k[16] store XOR HashKey^3 <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_4_k[16] store XOR HashKey^4 <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_5_k[16] store XOR HashKey^5 <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_6_k[16] store XOR HashKey^6 <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_7_k[16] store XOR HashKey^7 <<1 mod poly here (for Karatsuba purposes)
-# u8 shifted_hkey_8_k[16] store XOR HashKey^8 <<1 mod poly here (for Karatsuba purposes)
-#} gcm_ctx#
-
-HashKey = 16*11 # store HashKey <<1 mod poly here
-HashKey_2 = 16*12 # store HashKey^2 <<1 mod poly here
-HashKey_3 = 16*13 # store HashKey^3 <<1 mod poly here
-HashKey_4 = 16*14 # store HashKey^4 <<1 mod poly here
-HashKey_5 = 16*15 # store HashKey^5 <<1 mod poly here
-HashKey_6 = 16*16 # store HashKey^6 <<1 mod poly here
-HashKey_7 = 16*17 # store HashKey^7 <<1 mod poly here
-HashKey_8 = 16*18 # store HashKey^8 <<1 mod poly here
-HashKey_k = 16*19 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes)
-HashKey_2_k = 16*20 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes)
-HashKey_3_k = 16*21 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes)
-HashKey_4_k = 16*22 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes)
-HashKey_5_k = 16*23 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes)
-HashKey_6_k = 16*24 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes)
-HashKey_7_k = 16*25 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes)
-HashKey_8_k = 16*26 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes)
+#define AadHash 16*0
+#define AadLen 16*1
+#define InLen (16*1)+8
+#define PBlockEncKey 16*2
+#define OrigIV 16*3
+#define CurCount 16*4
+#define PBlockLen 16*5
+
+HashKey = 16*6 # store HashKey <<1 mod poly here
+HashKey_2 = 16*7 # store HashKey^2 <<1 mod poly here
+HashKey_3 = 16*8 # store HashKey^3 <<1 mod poly here
+HashKey_4 = 16*9 # store HashKey^4 <<1 mod poly here
+HashKey_5 = 16*10 # store HashKey^5 <<1 mod poly here
+HashKey_6 = 16*11 # store HashKey^6 <<1 mod poly here
+HashKey_7 = 16*12 # store HashKey^7 <<1 mod poly here
+HashKey_8 = 16*13 # store HashKey^8 <<1 mod poly here
+HashKey_k = 16*14 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes)
+HashKey_2_k = 16*15 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes)
+HashKey_3_k = 16*16 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes)
+HashKey_4_k = 16*17 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes)
+HashKey_5_k = 16*18 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes)
+HashKey_6_k = 16*19 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes)
+HashKey_7_k = 16*20 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes)
+HashKey_8_k = 16*21 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes)
#define arg1 %rdi
#define arg2 %rsi
@@ -229,6 +216,8 @@ HashKey_8_k = 16*26 # store XOR of HashKey^8 <<1 mod poly here (for Karatsu
#define arg7 STACK_OFFSET+8*1(%r14)
#define arg8 STACK_OFFSET+8*2(%r14)
#define arg9 STACK_OFFSET+8*3(%r14)
+#define arg10 STACK_OFFSET+8*4(%r14)
+#define keysize 2*15*16(arg1)
i = 0
j = 0
@@ -267,19 +256,636 @@ VARIABLE_OFFSET = 16*8
# Utility Macros
################################
+.macro FUNC_SAVE
+ #the number of pushes must equal STACK_OFFSET
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %rsp, %r14
+
+
+
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+.endm
+
+.macro FUNC_RESTORE
+ mov %r14, %rsp
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+.endm
+
# Encryption of a single block
-.macro ENCRYPT_SINGLE_BLOCK XMM0
+.macro ENCRYPT_SINGLE_BLOCK REP XMM0
vpxor (arg1), \XMM0, \XMM0
- i = 1
- setreg
-.rep 9
+ i = 1
+ setreg
+.rep \REP
vaesenc 16*i(arg1), \XMM0, \XMM0
- i = (i+1)
- setreg
+ i = (i+1)
+ setreg
.endr
- vaesenclast 16*10(arg1), \XMM0, \XMM0
+ vaesenclast 16*i(arg1), \XMM0, \XMM0
.endm
+# combined for GCM encrypt and decrypt functions
+# clobbering all xmm registers
+# clobbering r10, r11, r12, r13, r14, r15
+.macro GCM_ENC_DEC INITIAL_BLOCKS GHASH_8_ENCRYPT_8_PARALLEL GHASH_LAST_8 GHASH_MUL ENC_DEC REP
+ vmovdqu AadHash(arg2), %xmm8
+ vmovdqu HashKey(arg2), %xmm13 # xmm13 = HashKey
+ add arg5, InLen(arg2)
+
+ # initialize the data pointer offset as zero
+ xor %r11d, %r11d
+
+ PARTIAL_BLOCK \GHASH_MUL, arg3, arg4, arg5, %r11, %xmm8, \ENC_DEC
+ sub %r11, arg5
+
+ mov arg5, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # r13 = r13 - (r13 mod 16)
+
+ mov %r13, %r12
+ shr $4, %r12
+ and $7, %r12
+ jz _initial_num_blocks_is_0\@
+
+ cmp $7, %r12
+ je _initial_num_blocks_is_7\@
+ cmp $6, %r12
+ je _initial_num_blocks_is_6\@
+ cmp $5, %r12
+ je _initial_num_blocks_is_5\@
+ cmp $4, %r12
+ je _initial_num_blocks_is_4\@
+ cmp $3, %r12
+ je _initial_num_blocks_is_3\@
+ cmp $2, %r12
+ je _initial_num_blocks_is_2\@
+
+ jmp _initial_num_blocks_is_1\@
+
+_initial_num_blocks_is_7\@:
+ \INITIAL_BLOCKS \REP, 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*7, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_6\@:
+ \INITIAL_BLOCKS \REP, 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*6, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_5\@:
+ \INITIAL_BLOCKS \REP, 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*5, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_4\@:
+ \INITIAL_BLOCKS \REP, 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*4, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_3\@:
+ \INITIAL_BLOCKS \REP, 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*3, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_2\@:
+ \INITIAL_BLOCKS \REP, 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*2, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_1\@:
+ \INITIAL_BLOCKS \REP, 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*1, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_0\@:
+ \INITIAL_BLOCKS \REP, 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+
+
+_initial_blocks_encrypted\@:
+ cmp $0, %r13
+ je _zero_cipher_left\@
+
+ sub $128, %r13
+ je _eight_cipher_left\@
+
+
+
+
+ vmovd %xmm9, %r15d
+ and $255, %r15d
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+
+_encrypt_by_8_new\@:
+ cmp $(255-8), %r15d
+ jg _encrypt_by_8\@
+
+
+
+ add $8, %r15b
+ \GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
+ add $128, %r11
+ sub $128, %r13
+ jne _encrypt_by_8_new\@
+
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ jmp _eight_cipher_left\@
+
+_encrypt_by_8\@:
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ add $8, %r15b
+ \GHASH_8_ENCRYPT_8_PARALLEL \REP, %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ add $128, %r11
+ sub $128, %r13
+ jne _encrypt_by_8_new\@
+
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+
+
+
+_eight_cipher_left\@:
+ \GHASH_LAST_8 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
+
+
+_zero_cipher_left\@:
+ vmovdqu %xmm14, AadHash(arg2)
+ vmovdqu %xmm9, CurCount(arg2)
+
+ # check for 0 length
+ mov arg5, %r13
+ and $15, %r13 # r13 = (arg5 mod 16)
+
+ je _multiple_of_16_bytes\@
+
+ # handle the last <16 Byte block separately
+
+ mov %r13, PBlockLen(arg2)
+
+ vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
+ vmovdqu %xmm9, CurCount(arg2)
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+ ENCRYPT_SINGLE_BLOCK \REP, %xmm9 # E(K, Yn)
+ vmovdqu %xmm9, PBlockEncKey(arg2)
+
+ cmp $16, arg5
+ jge _large_enough_update\@
+
+ lea (arg4,%r11,1), %r10
+ mov %r13, %r12
+
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm1
+
+ lea SHIFT_MASK+16(%rip), %r12
+ sub %r13, %r12 # adjust the shuffle mask pointer to be
+ # able to shift 16-r13 bytes (r13 is the
+ # number of bytes in plaintext mod 16)
+
+ jmp _final_ghash_mul\@
+
+_large_enough_update\@:
+ sub $16, %r11
+ add %r13, %r11
+
+ # receive the last <16 Byte block
+ vmovdqu (arg4, %r11, 1), %xmm1
+
+ sub %r13, %r11
+ add $16, %r11
+
+ lea SHIFT_MASK+16(%rip), %r12
+ # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+ # (r13 is the number of bytes in plaintext mod 16)
+ sub %r13, %r12
+ # get the appropriate shuffle mask
+ vmovdqu (%r12), %xmm2
+ # shift right 16-r13 bytes
+ vpshufb %xmm2, %xmm1, %xmm1
+
+_final_ghash_mul\@:
+ .if \ENC_DEC == DEC
+ vmovdqa %xmm1, %xmm2
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to
+ # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm2, %xmm2
+ vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
+ vpxor %xmm2, %xmm14, %xmm14
+
+ vmovdqu %xmm14, AadHash(arg2)
+ .else
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to
+ # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ vpxor %xmm9, %xmm14, %xmm14
+
+ vmovdqu %xmm14, AadHash(arg2)
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext
+ .endif
+
+
+ #############################
+ # output r13 Bytes
+ vmovq %xmm9, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left\@
+
+ mov %rax, (arg3 , %r11)
+ add $8, %r11
+ vpsrldq $8, %xmm9, %xmm9
+ vmovq %xmm9, %rax
+ sub $8, %r13
+
+_less_than_8_bytes_left\@:
+ movb %al, (arg3 , %r11)
+ add $1, %r11
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left\@
+ #############################
+
+_multiple_of_16_bytes\@:
+.endm
+
+
+# GCM_COMPLETE Finishes update of tag of last partial block
+# Output: Authorization Tag (AUTH_TAG)
+# Clobbers rax, r10-r12, and xmm0, xmm1, xmm5-xmm15
+.macro GCM_COMPLETE GHASH_MUL REP AUTH_TAG AUTH_TAG_LEN
+ vmovdqu AadHash(arg2), %xmm14
+ vmovdqu HashKey(arg2), %xmm13
+
+ mov PBlockLen(arg2), %r12
+ cmp $0, %r12
+ je _partial_done\@
+
+ #GHASH computation for the last <16 Byte block
+ \GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+
+_partial_done\@:
+ mov AadLen(arg2), %r12 # r12 = aadLen (number of bytes)
+ shl $3, %r12 # convert into number of bits
+ vmovd %r12d, %xmm15 # len(A) in xmm15
+
+ mov InLen(arg2), %r12
+ shl $3, %r12 # len(C) in bits (*128)
+ vmovq %r12, %xmm1
+ vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000
+ vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C)
+
+ vpxor %xmm15, %xmm14, %xmm14
+ \GHASH_MUL %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation
+ vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap
+
+ vmovdqu OrigIV(arg2), %xmm9
+
+ ENCRYPT_SINGLE_BLOCK \REP, %xmm9 # E(K, Y0)
+
+ vpxor %xmm14, %xmm9, %xmm9
+
+
+
+_return_T\@:
+ mov \AUTH_TAG, %r10 # r10 = authTag
+ mov \AUTH_TAG_LEN, %r11 # r11 = auth_tag_len
+
+ cmp $16, %r11
+ je _T_16\@
+
+ cmp $8, %r11
+ jl _T_4\@
+
+_T_8\@:
+ vmovq %xmm9, %rax
+ mov %rax, (%r10)
+ add $8, %r10
+ sub $8, %r11
+ vpsrldq $8, %xmm9, %xmm9
+ cmp $0, %r11
+ je _return_T_done\@
+_T_4\@:
+ vmovd %xmm9, %eax
+ mov %eax, (%r10)
+ add $4, %r10
+ sub $4, %r11
+ vpsrldq $4, %xmm9, %xmm9
+ cmp $0, %r11
+ je _return_T_done\@
+_T_123\@:
+ vmovd %xmm9, %eax
+ cmp $2, %r11
+ jl _T_1\@
+ mov %ax, (%r10)
+ cmp $2, %r11
+ je _return_T_done\@
+ add $2, %r10
+ sar $16, %eax
+_T_1\@:
+ mov %al, (%r10)
+ jmp _return_T_done\@
+
+_T_16\@:
+ vmovdqu %xmm9, (%r10)
+
+_return_T_done\@:
+.endm
+
+.macro CALC_AAD_HASH GHASH_MUL AAD AADLEN T1 T2 T3 T4 T5 T6 T7 T8
+
+ mov \AAD, %r10 # r10 = AAD
+ mov \AADLEN, %r12 # r12 = aadLen
+
+
+ mov %r12, %r11
+
+ vpxor \T8, \T8, \T8
+ vpxor \T7, \T7, \T7
+ cmp $16, %r11
+ jl _get_AAD_rest8\@
+_get_AAD_blocks\@:
+ vmovdqu (%r10), \T7
+ vpshufb SHUF_MASK(%rip), \T7, \T7
+ vpxor \T7, \T8, \T8
+ \GHASH_MUL \T8, \T2, \T1, \T3, \T4, \T5, \T6
+ add $16, %r10
+ sub $16, %r12
+ sub $16, %r11
+ cmp $16, %r11
+ jge _get_AAD_blocks\@
+ vmovdqu \T8, \T7
+ cmp $0, %r11
+ je _get_AAD_done\@
+
+ vpxor \T7, \T7, \T7
+
+ /* read the last <16B of AAD. since we have at least 4B of
+ data right after the AAD (the ICV, and maybe some CT), we can
+ read 4B/8B blocks safely, and then get rid of the extra stuff */
+_get_AAD_rest8\@:
+ cmp $4, %r11
+ jle _get_AAD_rest4\@
+ movq (%r10), \T1
+ add $8, %r10
+ sub $8, %r11
+ vpslldq $8, \T1, \T1
+ vpsrldq $8, \T7, \T7
+ vpxor \T1, \T7, \T7
+ jmp _get_AAD_rest8\@
+_get_AAD_rest4\@:
+ cmp $0, %r11
+ jle _get_AAD_rest0\@
+ mov (%r10), %eax
+ movq %rax, \T1
+ add $4, %r10
+ sub $4, %r11
+ vpslldq $12, \T1, \T1
+ vpsrldq $4, \T7, \T7
+ vpxor \T1, \T7, \T7
+_get_AAD_rest0\@:
+ /* finalize: shift out the extra bytes we read, and align
+ left. since pslldq can only shift by an immediate, we use
+ vpshufb and an array of shuffle masks */
+ movq %r12, %r11
+ salq $4, %r11
+ vmovdqu aad_shift_arr(%r11), \T1
+ vpshufb \T1, \T7, \T7
+_get_AAD_rest_final\@:
+ vpshufb SHUF_MASK(%rip), \T7, \T7
+ vpxor \T8, \T7, \T7
+ \GHASH_MUL \T7, \T2, \T1, \T3, \T4, \T5, \T6
+
+_get_AAD_done\@:
+ vmovdqu \T7, AadHash(arg2)
+.endm
+
+.macro INIT GHASH_MUL PRECOMPUTE
+ mov arg6, %r11
+ mov %r11, AadLen(arg2) # ctx_data.aad_length = aad_length
+ xor %r11d, %r11d
+ mov %r11, InLen(arg2) # ctx_data.in_length = 0
+
+ mov %r11, PBlockLen(arg2) # ctx_data.partial_block_length = 0
+ mov %r11, PBlockEncKey(arg2) # ctx_data.partial_block_enc_key = 0
+ mov arg3, %rax
+ movdqu (%rax), %xmm0
+ movdqu %xmm0, OrigIV(arg2) # ctx_data.orig_IV = iv
+
+ vpshufb SHUF_MASK(%rip), %xmm0, %xmm0
+ movdqu %xmm0, CurCount(arg2) # ctx_data.current_counter = iv
+
+ vmovdqu (arg4), %xmm6 # xmm6 = HashKey
+
+ vpshufb SHUF_MASK(%rip), %xmm6, %xmm6
+ ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey
+ vmovdqa %xmm6, %xmm2
+ vpsllq $1, %xmm6, %xmm6
+ vpsrlq $63, %xmm2, %xmm2
+ vmovdqa %xmm2, %xmm1
+ vpslldq $8, %xmm2, %xmm2
+ vpsrldq $8, %xmm1, %xmm1
+ vpor %xmm2, %xmm6, %xmm6
+ #reduction
+ vpshufd $0b00100100, %xmm1, %xmm2
+ vpcmpeqd TWOONE(%rip), %xmm2, %xmm2
+ vpand POLY(%rip), %xmm2, %xmm2
+ vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly
+ #######################################################################
+ vmovdqu %xmm6, HashKey(arg2) # store HashKey<<1 mod poly
+
+ CALC_AAD_HASH \GHASH_MUL, arg5, arg6, %xmm2, %xmm6, %xmm3, %xmm4, %xmm5, %xmm7, %xmm1, %xmm0
+
+ \PRECOMPUTE %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
+.endm
+
+
+# Reads DLEN bytes starting at DPTR and stores in XMMDst
+# where 0 < DLEN < 16
+# Clobbers %rax, DLEN
+.macro READ_PARTIAL_BLOCK DPTR DLEN XMMDst
+ vpxor \XMMDst, \XMMDst, \XMMDst
+
+ cmp $8, \DLEN
+ jl _read_lt8_\@
+ mov (\DPTR), %rax
+ vpinsrq $0, %rax, \XMMDst, \XMMDst
+ sub $8, \DLEN
+ jz _done_read_partial_block_\@
+ xor %eax, %eax
+_read_next_byte_\@:
+ shl $8, %rax
+ mov 7(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_\@
+ vpinsrq $1, %rax, \XMMDst, \XMMDst
+ jmp _done_read_partial_block_\@
+_read_lt8_\@:
+ xor %eax, %eax
+_read_next_byte_lt8_\@:
+ shl $8, %rax
+ mov -1(\DPTR, \DLEN, 1), %al
+ dec \DLEN
+ jnz _read_next_byte_lt8_\@
+ vpinsrq $0, %rax, \XMMDst, \XMMDst
+_done_read_partial_block_\@:
+.endm
+
+# PARTIAL_BLOCK: Handles encryption/decryption and the tag partial blocks
+# between update calls.
+# Requires the input data be at least 1 byte long due to READ_PARTIAL_BLOCK
+# Outputs encrypted bytes, and updates hash and partial info in gcm_data_context
+# Clobbers rax, r10, r12, r13, xmm0-6, xmm9-13
+.macro PARTIAL_BLOCK GHASH_MUL CYPH_PLAIN_OUT PLAIN_CYPH_IN PLAIN_CYPH_LEN DATA_OFFSET \
+ AAD_HASH ENC_DEC
+ mov PBlockLen(arg2), %r13
+ cmp $0, %r13
+ je _partial_block_done_\@ # Leave Macro if no partial blocks
+ # Read in input data without over reading
+ cmp $16, \PLAIN_CYPH_LEN
+ jl _fewer_than_16_bytes_\@
+ vmovdqu (\PLAIN_CYPH_IN), %xmm1 # If more than 16 bytes, just fill xmm
+ jmp _data_read_\@
+
+_fewer_than_16_bytes_\@:
+ lea (\PLAIN_CYPH_IN, \DATA_OFFSET, 1), %r10
+ mov \PLAIN_CYPH_LEN, %r12
+ READ_PARTIAL_BLOCK %r10 %r12 %xmm1
+
+ mov PBlockLen(arg2), %r13
+
+_data_read_\@: # Finished reading in data
+
+ vmovdqu PBlockEncKey(arg2), %xmm9
+ vmovdqu HashKey(arg2), %xmm13
+
+ lea SHIFT_MASK(%rip), %r12
+
+ # adjust the shuffle mask pointer to be able to shift r13 bytes
+ # r16-r13 is the number of bytes in plaintext mod 16)
+ add %r13, %r12
+ vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask
+ vpshufb %xmm2, %xmm9, %xmm9 # shift right r13 bytes
+
+.if \ENC_DEC == DEC
+ vmovdqa %xmm1, %xmm3
+ pxor %xmm1, %xmm9 # Cyphertext XOR E(K, Yn)
+
+ mov \PLAIN_CYPH_LEN, %r10
+ add %r13, %r10
+ # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
+ sub $16, %r10
+ # Determine if if partial block is not being filled and
+ # shift mask accordingly
+ jge _no_extra_mask_1_\@
+ sub %r10, %r12
+_no_extra_mask_1_\@:
+
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ # get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out bottom r13 bytes of xmm9
+
+ vpand %xmm1, %xmm3, %xmm3
+ vmovdqa SHUF_MASK(%rip), %xmm10
+ vpshufb %xmm10, %xmm3, %xmm3
+ vpshufb %xmm2, %xmm3, %xmm3
+ vpxor %xmm3, \AAD_HASH, \AAD_HASH
+
+ cmp $0, %r10
+ jl _partial_incomplete_1_\@
+
+ # GHASH computation for the last <16 Byte block
+ \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ xor %eax,%eax
+
+ mov %rax, PBlockLen(arg2)
+ jmp _dec_done_\@
+_partial_incomplete_1_\@:
+ add \PLAIN_CYPH_LEN, PBlockLen(arg2)
+_dec_done_\@:
+ vmovdqu \AAD_HASH, AadHash(arg2)
+.else
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+
+ mov \PLAIN_CYPH_LEN, %r10
+ add %r13, %r10
+ # Set r10 to be the amount of data left in CYPH_PLAIN_IN after filling
+ sub $16, %r10
+ # Determine if if partial block is not being filled and
+ # shift mask accordingly
+ jge _no_extra_mask_2_\@
+ sub %r10, %r12
+_no_extra_mask_2_\@:
+
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+ # get the appropriate mask to mask out bottom r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9
+
+ vmovdqa SHUF_MASK(%rip), %xmm1
+ vpshufb %xmm1, %xmm9, %xmm9
+ vpshufb %xmm2, %xmm9, %xmm9
+ vpxor %xmm9, \AAD_HASH, \AAD_HASH
+
+ cmp $0, %r10
+ jl _partial_incomplete_2_\@
+
+ # GHASH computation for the last <16 Byte block
+ \GHASH_MUL \AAD_HASH, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ xor %eax,%eax
+
+ mov %rax, PBlockLen(arg2)
+ jmp _encode_done_\@
+_partial_incomplete_2_\@:
+ add \PLAIN_CYPH_LEN, PBlockLen(arg2)
+_encode_done_\@:
+ vmovdqu \AAD_HASH, AadHash(arg2)
+
+ vmovdqa SHUF_MASK(%rip), %xmm10
+ # shuffle xmm9 back to output as ciphertext
+ vpshufb %xmm10, %xmm9, %xmm9
+ vpshufb %xmm2, %xmm9, %xmm9
+.endif
+ # output encrypted Bytes
+ cmp $0, %r10
+ jl _partial_fill_\@
+ mov %r13, %r12
+ mov $16, %r13
+ # Set r13 to be the number of bytes to write out
+ sub %r12, %r13
+ jmp _count_set_\@
+_partial_fill_\@:
+ mov \PLAIN_CYPH_LEN, %r13
+_count_set_\@:
+ vmovdqa %xmm9, %xmm0
+ vmovq %xmm0, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left_\@
+
+ mov %rax, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
+ add $8, \DATA_OFFSET
+ psrldq $8, %xmm0
+ vmovq %xmm0, %rax
+ sub $8, %r13
+_less_than_8_bytes_left_\@:
+ movb %al, (\CYPH_PLAIN_OUT, \DATA_OFFSET, 1)
+ add $1, \DATA_OFFSET
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left_\@
+_partial_block_done_\@:
+.endm # PARTIAL_BLOCK
+
#ifdef CONFIG_AS_AVX
###############################################################################
# GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
@@ -341,49 +947,49 @@ VARIABLE_OFFSET = 16*8
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_k(arg1)
+ vmovdqu \T1, HashKey_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
- vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly
+ vmovdqu \T5, HashKey_2(arg2) # [HashKey_2] = HashKey^2<<1 mod poly
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_2_k(arg1)
+ vmovdqu \T1, HashKey_2_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
- vmovdqa \T5, HashKey_3(arg1)
+ vmovdqu \T5, HashKey_3(arg2)
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_3_k(arg1)
+ vmovdqu \T1, HashKey_3_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
- vmovdqa \T5, HashKey_4(arg1)
+ vmovdqu \T5, HashKey_4(arg2)
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_4_k(arg1)
+ vmovdqu \T1, HashKey_4_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
- vmovdqa \T5, HashKey_5(arg1)
+ vmovdqu \T5, HashKey_5(arg2)
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_5_k(arg1)
+ vmovdqu \T1, HashKey_5_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
- vmovdqa \T5, HashKey_6(arg1)
+ vmovdqu \T5, HashKey_6(arg2)
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_6_k(arg1)
+ vmovdqu \T1, HashKey_6_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
- vmovdqa \T5, HashKey_7(arg1)
+ vmovdqu \T5, HashKey_7(arg2)
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_7_k(arg1)
+ vmovdqu \T1, HashKey_7_k(arg2)
GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
- vmovdqa \T5, HashKey_8(arg1)
+ vmovdqu \T5, HashKey_8(arg2)
vpshufd $0b01001110, \T5, \T1
vpxor \T5, \T1, \T1
- vmovdqa \T1, HashKey_8_k(arg1)
+ vmovdqu \T1, HashKey_8_k(arg2)
.endm
@@ -392,84 +998,15 @@ VARIABLE_OFFSET = 16*8
## num_initial_blocks = b mod 4#
## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext
## r10, r11, r12, rax are clobbered
-## arg1, arg2, arg3, r14 are used as a pointer only, not modified
+## arg1, arg3, arg4, r14 are used as a pointer only, not modified
-.macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
+.macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
i = (8-\num_initial_blocks)
- j = 0
setreg
-
- mov arg6, %r10 # r10 = AAD
- mov arg7, %r12 # r12 = aadLen
-
-
- mov %r12, %r11
-
- vpxor reg_j, reg_j, reg_j
- vpxor reg_i, reg_i, reg_i
- cmp $16, %r11
- jl _get_AAD_rest8\@
-_get_AAD_blocks\@:
- vmovdqu (%r10), reg_i
- vpshufb SHUF_MASK(%rip), reg_i, reg_i
- vpxor reg_i, reg_j, reg_j
- GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6
- add $16, %r10
- sub $16, %r12
- sub $16, %r11
- cmp $16, %r11
- jge _get_AAD_blocks\@
- vmovdqu reg_j, reg_i
- cmp $0, %r11
- je _get_AAD_done\@
-
- vpxor reg_i, reg_i, reg_i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some CT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\@:
- cmp $4, %r11
- jle _get_AAD_rest4\@
- movq (%r10), \T1
- add $8, %r10
- sub $8, %r11
- vpslldq $8, \T1, \T1
- vpsrldq $8, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
- jmp _get_AAD_rest8\@
-_get_AAD_rest4\@:
- cmp $0, %r11
- jle _get_AAD_rest0\@
- mov (%r10), %eax
- movq %rax, \T1
- add $4, %r10
- sub $4, %r11
- vpslldq $12, \T1, \T1
- vpsrldq $4, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
-_get_AAD_rest0\@:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \T1
- vpshufb \T1, reg_i, reg_i
-_get_AAD_rest_final\@:
- vpshufb SHUF_MASK(%rip), reg_i, reg_i
- vpxor reg_j, reg_i, reg_i
- GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6
-
-_get_AAD_done\@:
- # initialize the data pointer offset as zero
- xor %r11d, %r11d
+ vmovdqu AadHash(arg2), reg_i
# start AES for num_initial_blocks blocks
- mov arg5, %rax # rax = *Y0
- vmovdqu (%rax), \CTR # CTR = Y0
- vpshufb SHUF_MASK(%rip), \CTR, \CTR
-
+ vmovdqu CurCount(arg2), \CTR
i = (9-\num_initial_blocks)
setreg
@@ -490,10 +1027,10 @@ _get_AAD_done\@:
setreg
.endr
- j = 1
- setreg
-.rep 9
- vmovdqa 16*j(arg1), \T_key
+ j = 1
+ setreg
+.rep \REP
+ vmovdqa 16*j(arg1), \T_key
i = (9-\num_initial_blocks)
setreg
.rep \num_initial_blocks
@@ -502,12 +1039,11 @@ _get_AAD_done\@:
setreg
.endr
- j = (j+1)
- setreg
+ j = (j+1)
+ setreg
.endr
-
- vmovdqa 16*10(arg1), \T_key
+ vmovdqa 16*j(arg1), \T_key
i = (9-\num_initial_blocks)
setreg
.rep \num_initial_blocks
@@ -519,9 +1055,9 @@ _get_AAD_done\@:
i = (9-\num_initial_blocks)
setreg
.rep \num_initial_blocks
- vmovdqu (arg3, %r11), \T1
+ vmovdqu (arg4, %r11), \T1
vpxor \T1, reg_i, reg_i
- vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for num_initial_blocks blocks
+ vmovdqu reg_i, (arg3 , %r11) # write back ciphertext for num_initial_blocks blocks
add $16, %r11
.if \ENC_DEC == DEC
vmovdqa \T1, reg_i
@@ -595,9 +1131,9 @@ _get_AAD_done\@:
vpxor \T_key, \XMM7, \XMM7
vpxor \T_key, \XMM8, \XMM8
- i = 1
- setreg
-.rep 9 # do 9 rounds
+ i = 1
+ setreg
+.rep \REP # do REP rounds
vmovdqa 16*i(arg1), \T_key
vaesenc \T_key, \XMM1, \XMM1
vaesenc \T_key, \XMM2, \XMM2
@@ -607,11 +1143,10 @@ _get_AAD_done\@:
vaesenc \T_key, \XMM6, \XMM6
vaesenc \T_key, \XMM7, \XMM7
vaesenc \T_key, \XMM8, \XMM8
- i = (i+1)
- setreg
+ i = (i+1)
+ setreg
.endr
-
vmovdqa 16*i(arg1), \T_key
vaesenclast \T_key, \XMM1, \XMM1
vaesenclast \T_key, \XMM2, \XMM2
@@ -622,58 +1157,58 @@ _get_AAD_done\@:
vaesenclast \T_key, \XMM7, \XMM7
vaesenclast \T_key, \XMM8, \XMM8
- vmovdqu (arg3, %r11), \T1
+ vmovdqu (arg4, %r11), \T1
vpxor \T1, \XMM1, \XMM1
- vmovdqu \XMM1, (arg2 , %r11)
+ vmovdqu \XMM1, (arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM1
.endif
- vmovdqu 16*1(arg3, %r11), \T1
+ vmovdqu 16*1(arg4, %r11), \T1
vpxor \T1, \XMM2, \XMM2
- vmovdqu \XMM2, 16*1(arg2 , %r11)
+ vmovdqu \XMM2, 16*1(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM2
.endif
- vmovdqu 16*2(arg3, %r11), \T1
+ vmovdqu 16*2(arg4, %r11), \T1
vpxor \T1, \XMM3, \XMM3
- vmovdqu \XMM3, 16*2(arg2 , %r11)
+ vmovdqu \XMM3, 16*2(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM3
.endif
- vmovdqu 16*3(arg3, %r11), \T1
+ vmovdqu 16*3(arg4, %r11), \T1
vpxor \T1, \XMM4, \XMM4
- vmovdqu \XMM4, 16*3(arg2 , %r11)
+ vmovdqu \XMM4, 16*3(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM4
.endif
- vmovdqu 16*4(arg3, %r11), \T1
+ vmovdqu 16*4(arg4, %r11), \T1
vpxor \T1, \XMM5, \XMM5
- vmovdqu \XMM5, 16*4(arg2 , %r11)
+ vmovdqu \XMM5, 16*4(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM5
.endif
- vmovdqu 16*5(arg3, %r11), \T1
+ vmovdqu 16*5(arg4, %r11), \T1
vpxor \T1, \XMM6, \XMM6
- vmovdqu \XMM6, 16*5(arg2 , %r11)
+ vmovdqu \XMM6, 16*5(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM6
.endif
- vmovdqu 16*6(arg3, %r11), \T1
+ vmovdqu 16*6(arg4, %r11), \T1
vpxor \T1, \XMM7, \XMM7
- vmovdqu \XMM7, 16*6(arg2 , %r11)
+ vmovdqu \XMM7, 16*6(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM7
.endif
- vmovdqu 16*7(arg3, %r11), \T1
+ vmovdqu 16*7(arg4, %r11), \T1
vpxor \T1, \XMM8, \XMM8
- vmovdqu \XMM8, 16*7(arg2 , %r11)
+ vmovdqu \XMM8, 16*7(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM8
.endif
@@ -698,9 +1233,9 @@ _initial_blocks_done\@:
# encrypt 8 blocks at a time
# ghash the 8 previously encrypted ciphertext blocks
-# arg1, arg2, arg3 are used as pointers only, not modified
+# arg1, arg3, arg4 are used as pointers only, not modified
# r11 is the data offset value
-.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
+.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
vmovdqa \XMM1, \T2
vmovdqa \XMM2, TMP2(%rsp)
@@ -784,14 +1319,14 @@ _initial_blocks_done\@:
#######################################################################
- vmovdqa HashKey_8(arg1), \T5
+ vmovdqu HashKey_8(arg2), \T5
vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
vpshufd $0b01001110, \T2, \T6
vpxor \T2, \T6, \T6
- vmovdqa HashKey_8_k(arg1), \T5
+ vmovdqu HashKey_8_k(arg2), \T5
vpclmulqdq $0x00, \T5, \T6, \T6
vmovdqu 16*3(arg1), \T1
@@ -805,7 +1340,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP2(%rsp), \T1
- vmovdqa HashKey_7(arg1), \T5
+ vmovdqu HashKey_7(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -813,7 +1348,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_7_k(arg1), \T5
+ vmovdqu HashKey_7_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -830,7 +1365,7 @@ _initial_blocks_done\@:
#######################################################################
vmovdqa TMP3(%rsp), \T1
- vmovdqa HashKey_6(arg1), \T5
+ vmovdqu HashKey_6(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -838,7 +1373,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_6_k(arg1), \T5
+ vmovdqu HashKey_6_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -853,7 +1388,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP4(%rsp), \T1
- vmovdqa HashKey_5(arg1), \T5
+ vmovdqu HashKey_5(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -861,7 +1396,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_5_k(arg1), \T5
+ vmovdqu HashKey_5_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -877,7 +1412,7 @@ _initial_blocks_done\@:
vmovdqa TMP5(%rsp), \T1
- vmovdqa HashKey_4(arg1), \T5
+ vmovdqu HashKey_4(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -885,7 +1420,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_4_k(arg1), \T5
+ vmovdqu HashKey_4_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -900,7 +1435,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP6(%rsp), \T1
- vmovdqa HashKey_3(arg1), \T5
+ vmovdqu HashKey_3(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -908,7 +1443,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_3_k(arg1), \T5
+ vmovdqu HashKey_3_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -924,7 +1459,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP7(%rsp), \T1
- vmovdqa HashKey_2(arg1), \T5
+ vmovdqu HashKey_2(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -932,7 +1467,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_2_k(arg1), \T5
+ vmovdqu HashKey_2_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -949,7 +1484,7 @@ _initial_blocks_done\@:
vaesenc \T5, \XMM8, \XMM8
vmovdqa TMP8(%rsp), \T1
- vmovdqa HashKey(arg1), \T5
+ vmovdqu HashKey(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
vpclmulqdq $0x00, \T5, \T1, \T3
@@ -957,7 +1492,7 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \T1, \T3
vpxor \T1, \T3, \T3
- vmovdqa HashKey_k(arg1), \T5
+ vmovdqu HashKey_k(arg2), \T5
vpclmulqdq $0x10, \T5, \T3, \T3
vpxor \T3, \T6, \T6
@@ -966,17 +1501,35 @@ _initial_blocks_done\@:
vmovdqu 16*10(arg1), \T5
+ i = 11
+ setreg
+.rep (\REP-9)
+
+ vaesenc \T5, \XMM1, \XMM1
+ vaesenc \T5, \XMM2, \XMM2
+ vaesenc \T5, \XMM3, \XMM3
+ vaesenc \T5, \XMM4, \XMM4
+ vaesenc \T5, \XMM5, \XMM5
+ vaesenc \T5, \XMM6, \XMM6
+ vaesenc \T5, \XMM7, \XMM7
+ vaesenc \T5, \XMM8, \XMM8
+
+ vmovdqu 16*i(arg1), \T5
+ i = i + 1
+ setreg
+.endr
+
i = 0
j = 1
setreg
.rep 8
- vpxor 16*i(arg3, %r11), \T5, \T2
+ vpxor 16*i(arg4, %r11), \T5, \T2
.if \ENC_DEC == ENC
vaesenclast \T2, reg_j, reg_j
.else
vaesenclast \T2, reg_j, \T3
- vmovdqu 16*i(arg3, %r11), reg_j
- vmovdqu \T3, 16*i(arg2, %r11)
+ vmovdqu 16*i(arg4, %r11), reg_j
+ vmovdqu \T3, 16*i(arg3, %r11)
.endif
i = (i+1)
j = (j+1)
@@ -1008,14 +1561,14 @@ _initial_blocks_done\@:
vpxor \T2, \T7, \T7 # first phase of the reduction complete
#######################################################################
.if \ENC_DEC == ENC
- vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM1, 16*0(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM2, 16*1(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM3, 16*2(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM4, 16*3(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM5, 16*4(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM6, 16*5(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM7, 16*6(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer
.endif
#######################################################################
@@ -1056,25 +1609,25 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM1, \T2
vpxor \XMM1, \T2, \T2
- vmovdqa HashKey_8(arg1), \T5
+ vmovdqu HashKey_8(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM1, \T6
vpclmulqdq $0x00, \T5, \XMM1, \T7
- vmovdqa HashKey_8_k(arg1), \T3
+ vmovdqu HashKey_8_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \XMM1
######################
vpshufd $0b01001110, \XMM2, \T2
vpxor \XMM2, \T2, \T2
- vmovdqa HashKey_7(arg1), \T5
+ vmovdqu HashKey_7(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM2, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM2, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_7_k(arg1), \T3
+ vmovdqu HashKey_7_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1082,14 +1635,14 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM3, \T2
vpxor \XMM3, \T2, \T2
- vmovdqa HashKey_6(arg1), \T5
+ vmovdqu HashKey_6(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM3, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM3, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_6_k(arg1), \T3
+ vmovdqu HashKey_6_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1097,14 +1650,14 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM4, \T2
vpxor \XMM4, \T2, \T2
- vmovdqa HashKey_5(arg1), \T5
+ vmovdqu HashKey_5(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM4, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM4, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_5_k(arg1), \T3
+ vmovdqu HashKey_5_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1112,14 +1665,14 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM5, \T2
vpxor \XMM5, \T2, \T2
- vmovdqa HashKey_4(arg1), \T5
+ vmovdqu HashKey_4(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM5, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM5, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_4_k(arg1), \T3
+ vmovdqu HashKey_4_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1127,14 +1680,14 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM6, \T2
vpxor \XMM6, \T2, \T2
- vmovdqa HashKey_3(arg1), \T5
+ vmovdqu HashKey_3(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM6, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM6, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_3_k(arg1), \T3
+ vmovdqu HashKey_3_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1142,14 +1695,14 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM7, \T2
vpxor \XMM7, \T2, \T2
- vmovdqa HashKey_2(arg1), \T5
+ vmovdqu HashKey_2(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM7, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM7, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_2_k(arg1), \T3
+ vmovdqu HashKey_2_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1157,14 +1710,14 @@ _initial_blocks_done\@:
vpshufd $0b01001110, \XMM8, \T2
vpxor \XMM8, \T2, \T2
- vmovdqa HashKey(arg1), \T5
+ vmovdqu HashKey(arg2), \T5
vpclmulqdq $0x11, \T5, \XMM8, \T4
vpxor \T4, \T6, \T6
vpclmulqdq $0x00, \T5, \XMM8, \T4
vpxor \T4, \T7, \T7
- vmovdqa HashKey_k(arg1), \T3
+ vmovdqu HashKey_k(arg2), \T3
vpclmulqdq $0x00, \T3, \T2, \T2
vpxor \T2, \XMM1, \XMM1
@@ -1210,413 +1763,112 @@ _initial_blocks_done\@:
.endm
-
-# combined for GCM encrypt and decrypt functions
-# clobbering all xmm registers
-# clobbering r10, r11, r12, r13, r14, r15
-.macro GCM_ENC_DEC_AVX ENC_DEC
-
- #the number of pushes must equal STACK_OFFSET
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, %r14
-
-
-
-
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
-
-
- vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey
-
- mov arg4, %r13 # save the number of bytes of plaintext/ciphertext
- and $-16, %r13 # r13 = r13 - (r13 mod 16)
-
- mov %r13, %r12
- shr $4, %r12
- and $7, %r12
- jz _initial_num_blocks_is_0\@
-
- cmp $7, %r12
- je _initial_num_blocks_is_7\@
- cmp $6, %r12
- je _initial_num_blocks_is_6\@
- cmp $5, %r12
- je _initial_num_blocks_is_5\@
- cmp $4, %r12
- je _initial_num_blocks_is_4\@
- cmp $3, %r12
- je _initial_num_blocks_is_3\@
- cmp $2, %r12
- je _initial_num_blocks_is_2\@
-
- jmp _initial_num_blocks_is_1\@
-
-_initial_num_blocks_is_7\@:
- INITIAL_BLOCKS_AVX 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*7, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_6\@:
- INITIAL_BLOCKS_AVX 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*6, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_5\@:
- INITIAL_BLOCKS_AVX 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*5, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_4\@:
- INITIAL_BLOCKS_AVX 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*4, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_3\@:
- INITIAL_BLOCKS_AVX 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*3, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_2\@:
- INITIAL_BLOCKS_AVX 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*2, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_1\@:
- INITIAL_BLOCKS_AVX 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*1, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_0\@:
- INITIAL_BLOCKS_AVX 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
-
-
-_initial_blocks_encrypted\@:
- cmp $0, %r13
- je _zero_cipher_left\@
-
- sub $128, %r13
- je _eight_cipher_left\@
-
-
-
-
- vmovd %xmm9, %r15d
- and $255, %r15d
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-
-
-_encrypt_by_8_new\@:
- cmp $(255-8), %r15d
- jg _encrypt_by_8\@
-
-
-
- add $8, %r15b
- GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
- add $128, %r11
- sub $128, %r13
- jne _encrypt_by_8_new\@
-
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- jmp _eight_cipher_left\@
-
-_encrypt_by_8\@:
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- add $8, %r15b
- GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- add $128, %r11
- sub $128, %r13
- jne _encrypt_by_8_new\@
-
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-
-
-
-
-_eight_cipher_left\@:
- GHASH_LAST_8_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
-
-
-_zero_cipher_left\@:
- cmp $16, arg4
- jl _only_less_than_16\@
-
- mov arg4, %r13
- and $15, %r13 # r13 = (arg4 mod 16)
-
- je _multiple_of_16_bytes\@
-
- # handle the last <16 Byte block seperately
-
-
- vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
-
- sub $16, %r11
- add %r13, %r11
- vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block
-
- lea SHIFT_MASK+16(%rip), %r12
- sub %r13, %r12 # adjust the shuffle mask pointer to be
- # able to shift 16-r13 bytes (r13 is the
- # number of bytes in plaintext mod 16)
- vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask
- vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes
- jmp _final_ghash_mul\@
-
-_only_less_than_16\@:
- # check for 0 length
- mov arg4, %r13
- and $15, %r13 # r13 = (arg4 mod 16)
-
- je _multiple_of_16_bytes\@
-
- # handle the last <16 Byte block seperately
-
-
- vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
-
-
- lea SHIFT_MASK+16(%rip), %r12
- sub %r13, %r12 # adjust the shuffle mask pointer to be
- # able to shift 16-r13 bytes (r13 is the
- # number of bytes in plaintext mod 16)
-
-_get_last_16_byte_loop\@:
- movb (arg3, %r11), %al
- movb %al, TMP1 (%rsp , %r11)
- add $1, %r11
- cmp %r13, %r11
- jne _get_last_16_byte_loop\@
-
- vmovdqu TMP1(%rsp), %xmm1
-
- sub $16, %r11
-
-_final_ghash_mul\@:
- .if \ENC_DEC == DEC
- vmovdqa %xmm1, %xmm2
- vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
- vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to
- # mask out top 16-r13 bytes of xmm9
- vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
- vpand %xmm1, %xmm2, %xmm2
- vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
- vpxor %xmm2, %xmm14, %xmm14
- #GHASH computation for the last <16 Byte block
- GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- sub %r13, %r11
- add $16, %r11
- .else
- vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
- vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to
- # mask out top 16-r13 bytes of xmm9
- vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- vpxor %xmm9, %xmm14, %xmm14
- #GHASH computation for the last <16 Byte block
- GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- sub %r13, %r11
- add $16, %r11
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext
- .endif
-
-
- #############################
- # output r13 Bytes
- vmovq %xmm9, %rax
- cmp $8, %r13
- jle _less_than_8_bytes_left\@
-
- mov %rax, (arg2 , %r11)
- add $8, %r11
- vpsrldq $8, %xmm9, %xmm9
- vmovq %xmm9, %rax
- sub $8, %r13
-
-_less_than_8_bytes_left\@:
- movb %al, (arg2 , %r11)
- add $1, %r11
- shr $8, %rax
- sub $1, %r13
- jne _less_than_8_bytes_left\@
- #############################
-
-_multiple_of_16_bytes\@:
- mov arg7, %r12 # r12 = aadLen (number of bytes)
- shl $3, %r12 # convert into number of bits
- vmovd %r12d, %xmm15 # len(A) in xmm15
-
- shl $3, arg4 # len(C) in bits (*128)
- vmovq arg4, %xmm1
- vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000
- vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C)
-
- vpxor %xmm15, %xmm14, %xmm14
- GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation
- vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap
-
- mov arg5, %rax # rax = *Y0
- vmovdqu (%rax), %xmm9 # xmm9 = Y0
-
- ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0)
-
- vpxor %xmm14, %xmm9, %xmm9
-
-
-
-_return_T\@:
- mov arg8, %r10 # r10 = authTag
- mov arg9, %r11 # r11 = auth_tag_len
-
- cmp $16, %r11
- je _T_16\@
-
- cmp $8, %r11
- jl _T_4\@
-
-_T_8\@:
- vmovq %xmm9, %rax
- mov %rax, (%r10)
- add $8, %r10
- sub $8, %r11
- vpsrldq $8, %xmm9, %xmm9
- cmp $0, %r11
- je _return_T_done\@
-_T_4\@:
- vmovd %xmm9, %eax
- mov %eax, (%r10)
- add $4, %r10
- sub $4, %r11
- vpsrldq $4, %xmm9, %xmm9
- cmp $0, %r11
- je _return_T_done\@
-_T_123\@:
- vmovd %xmm9, %eax
- cmp $2, %r11
- jl _T_1\@
- mov %ax, (%r10)
- cmp $2, %r11
- je _return_T_done\@
- add $2, %r10
- sar $16, %eax
-_T_1\@:
- mov %al, (%r10)
- jmp _return_T_done\@
-
-_T_16\@:
- vmovdqu %xmm9, (%r10)
-
-_return_T_done\@:
- mov %r14, %rsp
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-.endm
-
-
#############################################################
#void aesni_gcm_precomp_avx_gen2
# (gcm_data *my_ctx_data,
-# u8 *hash_subkey)# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */
+# gcm_context_data *data,
+# u8 *hash_subkey# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */
+# u8 *iv, /* Pre-counter block j0: 4 byte salt
+# (from Security Association) concatenated with 8 byte
+# Initialisation Vector (from IPSec ESP Payload)
+# concatenated with 0x00000001. 16-byte aligned pointer. */
+# const u8 *aad, /* Additional Authentication Data (AAD)*/
+# u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
#############################################################
-ENTRY(aesni_gcm_precomp_avx_gen2)
- #the number of pushes must equal STACK_OFFSET
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, %r14
-
-
-
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
-
- vmovdqu (arg2), %xmm6 # xmm6 = HashKey
-
- vpshufb SHUF_MASK(%rip), %xmm6, %xmm6
- ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey
- vmovdqa %xmm6, %xmm2
- vpsllq $1, %xmm6, %xmm6
- vpsrlq $63, %xmm2, %xmm2
- vmovdqa %xmm2, %xmm1
- vpslldq $8, %xmm2, %xmm2
- vpsrldq $8, %xmm1, %xmm1
- vpor %xmm2, %xmm6, %xmm6
- #reduction
- vpshufd $0b00100100, %xmm1, %xmm2
- vpcmpeqd TWOONE(%rip), %xmm2, %xmm2
- vpand POLY(%rip), %xmm2, %xmm2
- vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly
- #######################################################################
- vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly
-
-
- PRECOMPUTE_AVX %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
-
- mov %r14, %rsp
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
+ENTRY(aesni_gcm_init_avx_gen2)
+ FUNC_SAVE
+ INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
+ FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_precomp_avx_gen2)
+ENDPROC(aesni_gcm_init_avx_gen2)
###############################################################################
-#void aesni_gcm_enc_avx_gen2(
+#void aesni_gcm_enc_update_avx_gen2(
# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# gcm_context_data *data,
# u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */
# const u8 *in, /* Plaintext input */
-# u64 plaintext_len, /* Length of data in Bytes for encryption. */
-# u8 *iv, /* Pre-counter block j0: 4 byte salt
-# (from Security Association) concatenated with 8 byte
-# Initialisation Vector (from IPSec ESP Payload)
-# concatenated with 0x00000001. 16-byte aligned pointer. */
-# const u8 *aad, /* Additional Authentication Data (AAD)*/
-# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
-# u8 *auth_tag, /* Authenticated Tag output. */
-# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
-# Valid values are 16 (most likely), 12 or 8. */
+# u64 plaintext_len) /* Length of data in Bytes for encryption. */
###############################################################################
-ENTRY(aesni_gcm_enc_avx_gen2)
- GCM_ENC_DEC_AVX ENC
- ret
-ENDPROC(aesni_gcm_enc_avx_gen2)
+ENTRY(aesni_gcm_enc_update_avx_gen2)
+ FUNC_SAVE
+ mov keysize, %eax
+ cmp $32, %eax
+ je key_256_enc_update
+ cmp $16, %eax
+ je key_128_enc_update
+ # must be 192
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
+ FUNC_RESTORE
+ ret
+key_128_enc_update:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
+ FUNC_RESTORE
+ ret
+key_256_enc_update:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_enc_update_avx_gen2)
###############################################################################
-#void aesni_gcm_dec_avx_gen2(
+#void aesni_gcm_dec_update_avx_gen2(
# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# gcm_context_data *data,
# u8 *out, /* Plaintext output. Decrypt in-place is allowed. */
# const u8 *in, /* Ciphertext input */
-# u64 plaintext_len, /* Length of data in Bytes for encryption. */
-# u8 *iv, /* Pre-counter block j0: 4 byte salt
-# (from Security Association) concatenated with 8 byte
-# Initialisation Vector (from IPSec ESP Payload)
-# concatenated with 0x00000001. 16-byte aligned pointer. */
-# const u8 *aad, /* Additional Authentication Data (AAD)*/
-# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
+# u64 plaintext_len) /* Length of data in Bytes for encryption. */
+###############################################################################
+ENTRY(aesni_gcm_dec_update_avx_gen2)
+ FUNC_SAVE
+ mov keysize,%eax
+ cmp $32, %eax
+ je key_256_dec_update
+ cmp $16, %eax
+ je key_128_dec_update
+ # must be 192
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
+ FUNC_RESTORE
+ ret
+key_128_dec_update:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
+ FUNC_RESTORE
+ ret
+key_256_dec_update:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_dec_update_avx_gen2)
+
+###############################################################################
+#void aesni_gcm_finalize_avx_gen2(
+# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# gcm_context_data *data,
# u8 *auth_tag, /* Authenticated Tag output. */
# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
# Valid values are 16 (most likely), 12 or 8. */
###############################################################################
-ENTRY(aesni_gcm_dec_avx_gen2)
- GCM_ENC_DEC_AVX DEC
- ret
-ENDPROC(aesni_gcm_dec_avx_gen2)
+ENTRY(aesni_gcm_finalize_avx_gen2)
+ FUNC_SAVE
+ mov keysize,%eax
+ cmp $32, %eax
+ je key_256_finalize
+ cmp $16, %eax
+ je key_128_finalize
+ # must be 192
+ GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
+ FUNC_RESTORE
+ ret
+key_128_finalize:
+ GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
+ FUNC_RESTORE
+ ret
+key_256_finalize:
+ GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_finalize_avx_gen2)
+
#endif /* CONFIG_AS_AVX */
#ifdef CONFIG_AS_AVX2
@@ -1670,113 +1922,42 @@ ENDPROC(aesni_gcm_dec_avx_gen2)
# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
vmovdqa \HK, \T5
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
- vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly
+ vmovdqu \T5, HashKey_2(arg2) # [HashKey_2] = HashKey^2<<1 mod poly
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
- vmovdqa \T5, HashKey_3(arg1)
+ vmovdqu \T5, HashKey_3(arg2)
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
- vmovdqa \T5, HashKey_4(arg1)
+ vmovdqu \T5, HashKey_4(arg2)
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
- vmovdqa \T5, HashKey_5(arg1)
+ vmovdqu \T5, HashKey_5(arg2)
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
- vmovdqa \T5, HashKey_6(arg1)
+ vmovdqu \T5, HashKey_6(arg2)
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
- vmovdqa \T5, HashKey_7(arg1)
+ vmovdqu \T5, HashKey_7(arg2)
GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
- vmovdqa \T5, HashKey_8(arg1)
+ vmovdqu \T5, HashKey_8(arg2)
.endm
-
## if a = number of total plaintext bytes
## b = floor(a/16)
## num_initial_blocks = b mod 4#
## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext
## r10, r11, r12, rax are clobbered
-## arg1, arg2, arg3, r14 are used as a pointer only, not modified
+## arg1, arg3, arg4, r14 are used as a pointer only, not modified
-.macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
+.macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
i = (8-\num_initial_blocks)
- j = 0
setreg
-
- mov arg6, %r10 # r10 = AAD
- mov arg7, %r12 # r12 = aadLen
-
-
- mov %r12, %r11
-
- vpxor reg_j, reg_j, reg_j
- vpxor reg_i, reg_i, reg_i
-
- cmp $16, %r11
- jl _get_AAD_rest8\@
-_get_AAD_blocks\@:
- vmovdqu (%r10), reg_i
- vpshufb SHUF_MASK(%rip), reg_i, reg_i
- vpxor reg_i, reg_j, reg_j
- GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6
- add $16, %r10
- sub $16, %r12
- sub $16, %r11
- cmp $16, %r11
- jge _get_AAD_blocks\@
- vmovdqu reg_j, reg_i
- cmp $0, %r11
- je _get_AAD_done\@
-
- vpxor reg_i, reg_i, reg_i
-
- /* read the last <16B of AAD. since we have at least 4B of
- data right after the AAD (the ICV, and maybe some CT), we can
- read 4B/8B blocks safely, and then get rid of the extra stuff */
-_get_AAD_rest8\@:
- cmp $4, %r11
- jle _get_AAD_rest4\@
- movq (%r10), \T1
- add $8, %r10
- sub $8, %r11
- vpslldq $8, \T1, \T1
- vpsrldq $8, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
- jmp _get_AAD_rest8\@
-_get_AAD_rest4\@:
- cmp $0, %r11
- jle _get_AAD_rest0\@
- mov (%r10), %eax
- movq %rax, \T1
- add $4, %r10
- sub $4, %r11
- vpslldq $12, \T1, \T1
- vpsrldq $4, reg_i, reg_i
- vpxor \T1, reg_i, reg_i
-_get_AAD_rest0\@:
- /* finalize: shift out the extra bytes we read, and align
- left. since pslldq can only shift by an immediate, we use
- vpshufb and an array of shuffle masks */
- movq %r12, %r11
- salq $4, %r11
- movdqu aad_shift_arr(%r11), \T1
- vpshufb \T1, reg_i, reg_i
-_get_AAD_rest_final\@:
- vpshufb SHUF_MASK(%rip), reg_i, reg_i
- vpxor reg_j, reg_i, reg_i
- GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6
-
-_get_AAD_done\@:
- # initialize the data pointer offset as zero
- xor %r11d, %r11d
+ vmovdqu AadHash(arg2), reg_i
# start AES for num_initial_blocks blocks
- mov arg5, %rax # rax = *Y0
- vmovdqu (%rax), \CTR # CTR = Y0
- vpshufb SHUF_MASK(%rip), \CTR, \CTR
-
+ vmovdqu CurCount(arg2), \CTR
i = (9-\num_initial_blocks)
setreg
@@ -1799,7 +1980,7 @@ _get_AAD_done\@:
j = 1
setreg
-.rep 9
+.rep \REP
vmovdqa 16*j(arg1), \T_key
i = (9-\num_initial_blocks)
setreg
@@ -1814,7 +1995,7 @@ _get_AAD_done\@:
.endr
- vmovdqa 16*10(arg1), \T_key
+ vmovdqa 16*j(arg1), \T_key
i = (9-\num_initial_blocks)
setreg
.rep \num_initial_blocks
@@ -1826,9 +2007,9 @@ _get_AAD_done\@:
i = (9-\num_initial_blocks)
setreg
.rep \num_initial_blocks
- vmovdqu (arg3, %r11), \T1
+ vmovdqu (arg4, %r11), \T1
vpxor \T1, reg_i, reg_i
- vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for
+ vmovdqu reg_i, (arg3 , %r11) # write back ciphertext for
# num_initial_blocks blocks
add $16, %r11
.if \ENC_DEC == DEC
@@ -1905,7 +2086,7 @@ _get_AAD_done\@:
i = 1
setreg
-.rep 9 # do 9 rounds
+.rep \REP # do REP rounds
vmovdqa 16*i(arg1), \T_key
vaesenc \T_key, \XMM1, \XMM1
vaesenc \T_key, \XMM2, \XMM2
@@ -1930,58 +2111,58 @@ _get_AAD_done\@:
vaesenclast \T_key, \XMM7, \XMM7
vaesenclast \T_key, \XMM8, \XMM8
- vmovdqu (arg3, %r11), \T1
+ vmovdqu (arg4, %r11), \T1
vpxor \T1, \XMM1, \XMM1
- vmovdqu \XMM1, (arg2 , %r11)
+ vmovdqu \XMM1, (arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM1
.endif
- vmovdqu 16*1(arg3, %r11), \T1
+ vmovdqu 16*1(arg4, %r11), \T1
vpxor \T1, \XMM2, \XMM2
- vmovdqu \XMM2, 16*1(arg2 , %r11)
+ vmovdqu \XMM2, 16*1(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM2
.endif
- vmovdqu 16*2(arg3, %r11), \T1
+ vmovdqu 16*2(arg4, %r11), \T1
vpxor \T1, \XMM3, \XMM3
- vmovdqu \XMM3, 16*2(arg2 , %r11)
+ vmovdqu \XMM3, 16*2(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM3
.endif
- vmovdqu 16*3(arg3, %r11), \T1
+ vmovdqu 16*3(arg4, %r11), \T1
vpxor \T1, \XMM4, \XMM4
- vmovdqu \XMM4, 16*3(arg2 , %r11)
+ vmovdqu \XMM4, 16*3(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM4
.endif
- vmovdqu 16*4(arg3, %r11), \T1
+ vmovdqu 16*4(arg4, %r11), \T1
vpxor \T1, \XMM5, \XMM5
- vmovdqu \XMM5, 16*4(arg2 , %r11)
+ vmovdqu \XMM5, 16*4(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM5
.endif
- vmovdqu 16*5(arg3, %r11), \T1
+ vmovdqu 16*5(arg4, %r11), \T1
vpxor \T1, \XMM6, \XMM6
- vmovdqu \XMM6, 16*5(arg2 , %r11)
+ vmovdqu \XMM6, 16*5(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM6
.endif
- vmovdqu 16*6(arg3, %r11), \T1
+ vmovdqu 16*6(arg4, %r11), \T1
vpxor \T1, \XMM7, \XMM7
- vmovdqu \XMM7, 16*6(arg2 , %r11)
+ vmovdqu \XMM7, 16*6(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM7
.endif
- vmovdqu 16*7(arg3, %r11), \T1
+ vmovdqu 16*7(arg4, %r11), \T1
vpxor \T1, \XMM8, \XMM8
- vmovdqu \XMM8, 16*7(arg2 , %r11)
+ vmovdqu \XMM8, 16*7(arg3 , %r11)
.if \ENC_DEC == DEC
vmovdqa \T1, \XMM8
.endif
@@ -2010,9 +2191,9 @@ _initial_blocks_done\@:
# encrypt 8 blocks at a time
# ghash the 8 previously encrypted ciphertext blocks
-# arg1, arg2, arg3 are used as pointers only, not modified
+# arg1, arg3, arg4 are used as pointers only, not modified
# r11 is the data offset value
-.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
+.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
vmovdqa \XMM1, \T2
vmovdqa \XMM2, TMP2(%rsp)
@@ -2096,7 +2277,7 @@ _initial_blocks_done\@:
#######################################################################
- vmovdqa HashKey_8(arg1), \T5
+ vmovdqu HashKey_8(arg2), \T5
vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
vpclmulqdq $0x01, \T5, \T2, \T6 # T6 = a1*b0
@@ -2114,7 +2295,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP2(%rsp), \T1
- vmovdqa HashKey_7(arg1), \T5
+ vmovdqu HashKey_7(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
@@ -2140,7 +2321,7 @@ _initial_blocks_done\@:
#######################################################################
vmovdqa TMP3(%rsp), \T1
- vmovdqa HashKey_6(arg1), \T5
+ vmovdqu HashKey_6(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
@@ -2164,7 +2345,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP4(%rsp), \T1
- vmovdqa HashKey_5(arg1), \T5
+ vmovdqu HashKey_5(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
@@ -2189,7 +2370,7 @@ _initial_blocks_done\@:
vmovdqa TMP5(%rsp), \T1
- vmovdqa HashKey_4(arg1), \T5
+ vmovdqu HashKey_4(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
@@ -2213,7 +2394,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP6(%rsp), \T1
- vmovdqa HashKey_3(arg1), \T5
+ vmovdqu HashKey_3(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
@@ -2237,7 +2418,7 @@ _initial_blocks_done\@:
vaesenc \T1, \XMM8, \XMM8
vmovdqa TMP7(%rsp), \T1
- vmovdqa HashKey_2(arg1), \T5
+ vmovdqu HashKey_2(arg2), \T5
vpclmulqdq $0x11, \T5, \T1, \T3
vpxor \T3, \T4, \T4
@@ -2264,7 +2445,7 @@ _initial_blocks_done\@:
vaesenc \T5, \XMM8, \XMM8
vmovdqa TMP8(%rsp), \T1
- vmovdqa HashKey(arg1), \T5
+ vmovdqu HashKey(arg2), \T5
vpclmulqdq $0x00, \T5, \T1, \T3
vpxor \T3, \T7, \T7
@@ -2281,17 +2462,34 @@ _initial_blocks_done\@:
vmovdqu 16*10(arg1), \T5
+ i = 11
+ setreg
+.rep (\REP-9)
+ vaesenc \T5, \XMM1, \XMM1
+ vaesenc \T5, \XMM2, \XMM2
+ vaesenc \T5, \XMM3, \XMM3
+ vaesenc \T5, \XMM4, \XMM4
+ vaesenc \T5, \XMM5, \XMM5
+ vaesenc \T5, \XMM6, \XMM6
+ vaesenc \T5, \XMM7, \XMM7
+ vaesenc \T5, \XMM8, \XMM8
+
+ vmovdqu 16*i(arg1), \T5
+ i = i + 1
+ setreg
+.endr
+
i = 0
j = 1
setreg
.rep 8
- vpxor 16*i(arg3, %r11), \T5, \T2
+ vpxor 16*i(arg4, %r11), \T5, \T2
.if \ENC_DEC == ENC
vaesenclast \T2, reg_j, reg_j
.else
vaesenclast \T2, reg_j, \T3
- vmovdqu 16*i(arg3, %r11), reg_j
- vmovdqu \T3, 16*i(arg2, %r11)
+ vmovdqu 16*i(arg4, %r11), reg_j
+ vmovdqu \T3, 16*i(arg3, %r11)
.endif
i = (i+1)
j = (j+1)
@@ -2317,14 +2515,14 @@ _initial_blocks_done\@:
vpxor \T2, \T7, \T7 # first phase of the reduction complete
#######################################################################
.if \ENC_DEC == ENC
- vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer
- vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM1, 16*0(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM2, 16*1(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM3, 16*2(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM4, 16*3(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM5, 16*4(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM6, 16*5(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM7, 16*6(arg3,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM8, 16*7(arg3,%r11) # Write to the Ciphertext buffer
.endif
#######################################################################
@@ -2361,7 +2559,7 @@ _initial_blocks_done\@:
## Karatsuba Method
- vmovdqa HashKey_8(arg1), \T5
+ vmovdqu HashKey_8(arg2), \T5
vpshufd $0b01001110, \XMM1, \T2
vpshufd $0b01001110, \T5, \T3
@@ -2375,7 +2573,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey_7(arg1), \T5
+ vmovdqu HashKey_7(arg2), \T5
vpshufd $0b01001110, \XMM2, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM2, \T2, \T2
@@ -2393,7 +2591,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey_6(arg1), \T5
+ vmovdqu HashKey_6(arg2), \T5
vpshufd $0b01001110, \XMM3, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM3, \T2, \T2
@@ -2411,7 +2609,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey_5(arg1), \T5
+ vmovdqu HashKey_5(arg2), \T5
vpshufd $0b01001110, \XMM4, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM4, \T2, \T2
@@ -2429,7 +2627,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey_4(arg1), \T5
+ vmovdqu HashKey_4(arg2), \T5
vpshufd $0b01001110, \XMM5, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM5, \T2, \T2
@@ -2447,7 +2645,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey_3(arg1), \T5
+ vmovdqu HashKey_3(arg2), \T5
vpshufd $0b01001110, \XMM6, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM6, \T2, \T2
@@ -2465,7 +2663,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey_2(arg1), \T5
+ vmovdqu HashKey_2(arg2), \T5
vpshufd $0b01001110, \XMM7, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM7, \T2, \T2
@@ -2483,7 +2681,7 @@ _initial_blocks_done\@:
######################
- vmovdqa HashKey(arg1), \T5
+ vmovdqu HashKey(arg2), \T5
vpshufd $0b01001110, \XMM8, \T2
vpshufd $0b01001110, \T5, \T3
vpxor \XMM8, \T2, \T2
@@ -2536,411 +2734,110 @@ _initial_blocks_done\@:
-# combined for GCM encrypt and decrypt functions
-# clobbering all xmm registers
-# clobbering r10, r11, r12, r13, r14, r15
-.macro GCM_ENC_DEC_AVX2 ENC_DEC
-
- #the number of pushes must equal STACK_OFFSET
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, %r14
-
-
-
-
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
-
-
- vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey
-
- mov arg4, %r13 # save the number of bytes of plaintext/ciphertext
- and $-16, %r13 # r13 = r13 - (r13 mod 16)
-
- mov %r13, %r12
- shr $4, %r12
- and $7, %r12
- jz _initial_num_blocks_is_0\@
-
- cmp $7, %r12
- je _initial_num_blocks_is_7\@
- cmp $6, %r12
- je _initial_num_blocks_is_6\@
- cmp $5, %r12
- je _initial_num_blocks_is_5\@
- cmp $4, %r12
- je _initial_num_blocks_is_4\@
- cmp $3, %r12
- je _initial_num_blocks_is_3\@
- cmp $2, %r12
- je _initial_num_blocks_is_2\@
-
- jmp _initial_num_blocks_is_1\@
-
-_initial_num_blocks_is_7\@:
- INITIAL_BLOCKS_AVX2 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*7, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_6\@:
- INITIAL_BLOCKS_AVX2 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*6, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_5\@:
- INITIAL_BLOCKS_AVX2 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*5, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_4\@:
- INITIAL_BLOCKS_AVX2 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*4, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_3\@:
- INITIAL_BLOCKS_AVX2 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*3, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_2\@:
- INITIAL_BLOCKS_AVX2 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*2, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_1\@:
- INITIAL_BLOCKS_AVX2 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
- sub $16*1, %r13
- jmp _initial_blocks_encrypted\@
-
-_initial_num_blocks_is_0\@:
- INITIAL_BLOCKS_AVX2 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
-
-
-_initial_blocks_encrypted\@:
- cmp $0, %r13
- je _zero_cipher_left\@
-
- sub $128, %r13
- je _eight_cipher_left\@
-
-
-
-
- vmovd %xmm9, %r15d
- and $255, %r15d
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-
-
-_encrypt_by_8_new\@:
- cmp $(255-8), %r15d
- jg _encrypt_by_8\@
-
-
-
- add $8, %r15b
- GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
- add $128, %r11
- sub $128, %r13
- jne _encrypt_by_8_new\@
-
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- jmp _eight_cipher_left\@
-
-_encrypt_by_8\@:
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- add $8, %r15b
- GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- add $128, %r11
- sub $128, %r13
- jne _encrypt_by_8_new\@
-
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
-
-
-
-
-_eight_cipher_left\@:
- GHASH_LAST_8_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
-
-
-_zero_cipher_left\@:
- cmp $16, arg4
- jl _only_less_than_16\@
-
- mov arg4, %r13
- and $15, %r13 # r13 = (arg4 mod 16)
-
- je _multiple_of_16_bytes\@
-
- # handle the last <16 Byte block seperately
-
-
- vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
-
- sub $16, %r11
- add %r13, %r11
- vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block
-
- lea SHIFT_MASK+16(%rip), %r12
- sub %r13, %r12 # adjust the shuffle mask pointer
- # to be able to shift 16-r13 bytes
- # (r13 is the number of bytes in plaintext mod 16)
- vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask
- vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes
- jmp _final_ghash_mul\@
-
-_only_less_than_16\@:
- # check for 0 length
- mov arg4, %r13
- and $15, %r13 # r13 = (arg4 mod 16)
-
- je _multiple_of_16_bytes\@
-
- # handle the last <16 Byte block seperately
-
-
- vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
-
-
- lea SHIFT_MASK+16(%rip), %r12
- sub %r13, %r12 # adjust the shuffle mask pointer to be
- # able to shift 16-r13 bytes (r13 is the
- # number of bytes in plaintext mod 16)
-
-_get_last_16_byte_loop\@:
- movb (arg3, %r11), %al
- movb %al, TMP1 (%rsp , %r11)
- add $1, %r11
- cmp %r13, %r11
- jne _get_last_16_byte_loop\@
-
- vmovdqu TMP1(%rsp), %xmm1
-
- sub $16, %r11
-
-_final_ghash_mul\@:
- .if \ENC_DEC == DEC
- vmovdqa %xmm1, %xmm2
- vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
- vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9
- vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
- vpand %xmm1, %xmm2, %xmm2
- vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
- vpxor %xmm2, %xmm14, %xmm14
- #GHASH computation for the last <16 Byte block
- GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- sub %r13, %r11
- add $16, %r11
- .else
- vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
- vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9
- vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
- vpxor %xmm9, %xmm14, %xmm14
- #GHASH computation for the last <16 Byte block
- GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
- sub %r13, %r11
- add $16, %r11
- vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext
- .endif
-
-
- #############################
- # output r13 Bytes
- vmovq %xmm9, %rax
- cmp $8, %r13
- jle _less_than_8_bytes_left\@
-
- mov %rax, (arg2 , %r11)
- add $8, %r11
- vpsrldq $8, %xmm9, %xmm9
- vmovq %xmm9, %rax
- sub $8, %r13
-
-_less_than_8_bytes_left\@:
- movb %al, (arg2 , %r11)
- add $1, %r11
- shr $8, %rax
- sub $1, %r13
- jne _less_than_8_bytes_left\@
- #############################
-
-_multiple_of_16_bytes\@:
- mov arg7, %r12 # r12 = aadLen (number of bytes)
- shl $3, %r12 # convert into number of bits
- vmovd %r12d, %xmm15 # len(A) in xmm15
-
- shl $3, arg4 # len(C) in bits (*128)
- vmovq arg4, %xmm1
- vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000
- vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C)
-
- vpxor %xmm15, %xmm14, %xmm14
- GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation
- vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap
-
- mov arg5, %rax # rax = *Y0
- vmovdqu (%rax), %xmm9 # xmm9 = Y0
-
- ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0)
-
- vpxor %xmm14, %xmm9, %xmm9
-
-
-
-_return_T\@:
- mov arg8, %r10 # r10 = authTag
- mov arg9, %r11 # r11 = auth_tag_len
-
- cmp $16, %r11
- je _T_16\@
-
- cmp $8, %r11
- jl _T_4\@
-
-_T_8\@:
- vmovq %xmm9, %rax
- mov %rax, (%r10)
- add $8, %r10
- sub $8, %r11
- vpsrldq $8, %xmm9, %xmm9
- cmp $0, %r11
- je _return_T_done\@
-_T_4\@:
- vmovd %xmm9, %eax
- mov %eax, (%r10)
- add $4, %r10
- sub $4, %r11
- vpsrldq $4, %xmm9, %xmm9
- cmp $0, %r11
- je _return_T_done\@
-_T_123\@:
- vmovd %xmm9, %eax
- cmp $2, %r11
- jl _T_1\@
- mov %ax, (%r10)
- cmp $2, %r11
- je _return_T_done\@
- add $2, %r10
- sar $16, %eax
-_T_1\@:
- mov %al, (%r10)
- jmp _return_T_done\@
-
-_T_16\@:
- vmovdqu %xmm9, (%r10)
-
-_return_T_done\@:
- mov %r14, %rsp
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-.endm
-
-
#############################################################
-#void aesni_gcm_precomp_avx_gen4
+#void aesni_gcm_init_avx_gen4
# (gcm_data *my_ctx_data,
-# u8 *hash_subkey)# /* H, the Hash sub key input.
-# Data starts on a 16-byte boundary. */
+# gcm_context_data *data,
+# u8 *iv, /* Pre-counter block j0: 4 byte salt
+# (from Security Association) concatenated with 8 byte
+# Initialisation Vector (from IPSec ESP Payload)
+# concatenated with 0x00000001. 16-byte aligned pointer. */
+# u8 *hash_subkey# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */
+# const u8 *aad, /* Additional Authentication Data (AAD)*/
+# u64 aad_len) /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
#############################################################
-ENTRY(aesni_gcm_precomp_avx_gen4)
- #the number of pushes must equal STACK_OFFSET
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, %r14
-
-
-
- sub $VARIABLE_OFFSET, %rsp
- and $~63, %rsp # align rsp to 64 bytes
-
- vmovdqu (arg2), %xmm6 # xmm6 = HashKey
-
- vpshufb SHUF_MASK(%rip), %xmm6, %xmm6
- ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey
- vmovdqa %xmm6, %xmm2
- vpsllq $1, %xmm6, %xmm6
- vpsrlq $63, %xmm2, %xmm2
- vmovdqa %xmm2, %xmm1
- vpslldq $8, %xmm2, %xmm2
- vpsrldq $8, %xmm1, %xmm1
- vpor %xmm2, %xmm6, %xmm6
- #reduction
- vpshufd $0b00100100, %xmm1, %xmm2
- vpcmpeqd TWOONE(%rip), %xmm2, %xmm2
- vpand POLY(%rip), %xmm2, %xmm2
- vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly
- #######################################################################
- vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly
-
-
- PRECOMPUTE_AVX2 %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
-
- mov %r14, %rsp
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
+ENTRY(aesni_gcm_init_avx_gen4)
+ FUNC_SAVE
+ INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
+ FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_precomp_avx_gen4)
-
+ENDPROC(aesni_gcm_init_avx_gen4)
###############################################################################
#void aesni_gcm_enc_avx_gen4(
# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# gcm_context_data *data,
# u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */
# const u8 *in, /* Plaintext input */
-# u64 plaintext_len, /* Length of data in Bytes for encryption. */
-# u8 *iv, /* Pre-counter block j0: 4 byte salt
-# (from Security Association) concatenated with 8 byte
-# Initialisation Vector (from IPSec ESP Payload)
-# concatenated with 0x00000001. 16-byte aligned pointer. */
-# const u8 *aad, /* Additional Authentication Data (AAD)*/
-# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
-# u8 *auth_tag, /* Authenticated Tag output. */
-# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
-# Valid values are 16 (most likely), 12 or 8. */
+# u64 plaintext_len) /* Length of data in Bytes for encryption. */
###############################################################################
-ENTRY(aesni_gcm_enc_avx_gen4)
- GCM_ENC_DEC_AVX2 ENC
+ENTRY(aesni_gcm_enc_update_avx_gen4)
+ FUNC_SAVE
+ mov keysize,%eax
+ cmp $32, %eax
+ je key_256_enc_update4
+ cmp $16, %eax
+ je key_128_enc_update4
+ # must be 192
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
+ FUNC_RESTORE
+ ret
+key_128_enc_update4:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
+ FUNC_RESTORE
+ ret
+key_256_enc_update4:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
+ FUNC_RESTORE
ret
-ENDPROC(aesni_gcm_enc_avx_gen4)
+ENDPROC(aesni_gcm_enc_update_avx_gen4)
###############################################################################
-#void aesni_gcm_dec_avx_gen4(
+#void aesni_gcm_dec_update_avx_gen4(
# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# gcm_context_data *data,
# u8 *out, /* Plaintext output. Decrypt in-place is allowed. */
# const u8 *in, /* Ciphertext input */
-# u64 plaintext_len, /* Length of data in Bytes for encryption. */
-# u8 *iv, /* Pre-counter block j0: 4 byte salt
-# (from Security Association) concatenated with 8 byte
-# Initialisation Vector (from IPSec ESP Payload)
-# concatenated with 0x00000001. 16-byte aligned pointer. */
-# const u8 *aad, /* Additional Authentication Data (AAD)*/
-# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
+# u64 plaintext_len) /* Length of data in Bytes for encryption. */
+###############################################################################
+ENTRY(aesni_gcm_dec_update_avx_gen4)
+ FUNC_SAVE
+ mov keysize,%eax
+ cmp $32, %eax
+ je key_256_dec_update4
+ cmp $16, %eax
+ je key_128_dec_update4
+ # must be 192
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
+ FUNC_RESTORE
+ ret
+key_128_dec_update4:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
+ FUNC_RESTORE
+ ret
+key_256_dec_update4:
+ GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_dec_update_avx_gen4)
+
+###############################################################################
+#void aesni_gcm_finalize_avx_gen4(
+# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# gcm_context_data *data,
# u8 *auth_tag, /* Authenticated Tag output. */
# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
-# Valid values are 16 (most likely), 12 or 8. */
+# Valid values are 16 (most likely), 12 or 8. */
###############################################################################
-ENTRY(aesni_gcm_dec_avx_gen4)
- GCM_ENC_DEC_AVX2 DEC
- ret
-ENDPROC(aesni_gcm_dec_avx_gen4)
+ENTRY(aesni_gcm_finalize_avx_gen4)
+ FUNC_SAVE
+ mov keysize,%eax
+ cmp $32, %eax
+ je key_256_finalize4
+ cmp $16, %eax
+ je key_128_finalize4
+ # must be 192
+ GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
+ FUNC_RESTORE
+ ret
+key_128_finalize4:
+ GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
+ FUNC_RESTORE
+ ret
+key_256_finalize4:
+ GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
+ FUNC_RESTORE
+ ret
+ENDPROC(aesni_gcm_finalize_avx_gen4)
#endif /* CONFIG_AS_AVX2 */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 661f7daf43da..1321700d6647 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -84,7 +84,7 @@ struct gcm_context_data {
u8 current_counter[GCM_BLOCK_LEN];
u64 partial_block_len;
u64 unused;
- u8 hash_keys[GCM_BLOCK_LEN * 8];
+ u8 hash_keys[GCM_BLOCK_LEN * 16];
};
asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
@@ -175,6 +175,32 @@ asmlinkage void aesni_gcm_finalize(void *ctx,
struct gcm_context_data *gdata,
u8 *auth_tag, unsigned long auth_tag_len);
+static struct aesni_gcm_tfm_s {
+void (*init)(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *iv,
+ u8 *hash_subkey, const u8 *aad,
+ unsigned long aad_len);
+void (*enc_update)(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in,
+ unsigned long plaintext_len);
+void (*dec_update)(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in,
+ unsigned long ciphertext_len);
+void (*finalize)(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *auth_tag, unsigned long auth_tag_len);
+} *aesni_gcm_tfm;
+
+struct aesni_gcm_tfm_s aesni_gcm_tfm_sse = {
+ .init = &aesni_gcm_init,
+ .enc_update = &aesni_gcm_enc_update,
+ .dec_update = &aesni_gcm_dec_update,
+ .finalize = &aesni_gcm_finalize,
+};
+
#ifdef CONFIG_AS_AVX
asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
@@ -183,136 +209,94 @@ asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
void *keys, u8 *out, unsigned int num_bytes);
/*
- * asmlinkage void aesni_gcm_precomp_avx_gen2()
+ * asmlinkage void aesni_gcm_init_avx_gen2()
* gcm_data *my_ctx_data, context data
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
*/
-asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
+asmlinkage void aesni_gcm_init_avx_gen2(void *my_ctx_data,
+ struct gcm_context_data *gdata,
+ u8 *iv,
+ u8 *hash_subkey,
+ const u8 *aad,
+ unsigned long aad_len);
+
+asmlinkage void aesni_gcm_enc_update_avx_gen2(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in, unsigned long plaintext_len);
+asmlinkage void aesni_gcm_dec_update_avx_gen2(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in,
+ unsigned long ciphertext_len);
+asmlinkage void aesni_gcm_finalize_avx_gen2(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *auth_tag, unsigned long auth_tag_len);
-asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-static void aesni_gcm_enc_avx(void *ctx,
- struct gcm_context_data *data, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len)
-{
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
- if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
- aesni_gcm_enc(ctx, data, out, in,
- plaintext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
- } else {
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
- aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
- aad_len, auth_tag, auth_tag_len);
- }
-}
+struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen2 = {
+ .init = &aesni_gcm_init_avx_gen2,
+ .enc_update = &aesni_gcm_enc_update_avx_gen2,
+ .dec_update = &aesni_gcm_dec_update_avx_gen2,
+ .finalize = &aesni_gcm_finalize_avx_gen2,
+};
-static void aesni_gcm_dec_avx(void *ctx,
- struct gcm_context_data *data, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len)
-{
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
- if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_dec(ctx, data, out, in,
- ciphertext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
- } else {
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
- aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
- aad_len, auth_tag, auth_tag_len);
- }
-}
#endif
#ifdef CONFIG_AS_AVX2
/*
- * asmlinkage void aesni_gcm_precomp_avx_gen4()
+ * asmlinkage void aesni_gcm_init_avx_gen4()
* gcm_data *my_ctx_data, context data
* u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
*/
-asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
+asmlinkage void aesni_gcm_init_avx_gen4(void *my_ctx_data,
+ struct gcm_context_data *gdata,
+ u8 *iv,
+ u8 *hash_subkey,
+ const u8 *aad,
+ unsigned long aad_len);
+
+asmlinkage void aesni_gcm_enc_update_avx_gen4(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in, unsigned long plaintext_len);
+asmlinkage void aesni_gcm_dec_update_avx_gen4(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
+ const u8 *in,
+ unsigned long ciphertext_len);
+asmlinkage void aesni_gcm_finalize_avx_gen4(void *ctx,
+ struct gcm_context_data *gdata,
+ u8 *auth_tag, unsigned long auth_tag_len);
-asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long plaintext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
+asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx,
+ struct gcm_context_data *gdata, u8 *out,
const u8 *in, unsigned long ciphertext_len, u8 *iv,
const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
-static void aesni_gcm_enc_avx2(void *ctx,
- struct gcm_context_data *data, u8 *out,
- const u8 *in, unsigned long plaintext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len)
-{
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
- if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_enc(ctx, data, out, in,
- plaintext_len, iv, hash_subkey, aad,
- aad_len, auth_tag, auth_tag_len);
- } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
- aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
- aad_len, auth_tag, auth_tag_len);
- } else {
- aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
- aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
- aad_len, auth_tag, auth_tag_len);
- }
-}
+struct aesni_gcm_tfm_s aesni_gcm_tfm_avx_gen4 = {
+ .init = &aesni_gcm_init_avx_gen4,
+ .enc_update = &aesni_gcm_enc_update_avx_gen4,
+ .dec_update = &aesni_gcm_dec_update_avx_gen4,
+ .finalize = &aesni_gcm_finalize_avx_gen4,
+};
-static void aesni_gcm_dec_avx2(void *ctx,
- struct gcm_context_data *data, u8 *out,
- const u8 *in, unsigned long ciphertext_len, u8 *iv,
- u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
- u8 *auth_tag, unsigned long auth_tag_len)
-{
- struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
- if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
- aesni_gcm_dec(ctx, data, out, in,
- ciphertext_len, iv, hash_subkey,
- aad, aad_len, auth_tag, auth_tag_len);
- } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
- aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
- aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
- aad_len, auth_tag, auth_tag_len);
- } else {
- aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
- aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
- aad_len, auth_tag, auth_tag_len);
- }
-}
#endif
-static void (*aesni_gcm_enc_tfm)(void *ctx,
- struct gcm_context_data *data, u8 *out,
- const u8 *in, unsigned long plaintext_len,
- u8 *iv, u8 *hash_subkey, const u8 *aad,
- unsigned long aad_len, u8 *auth_tag,
- unsigned long auth_tag_len);
-
-static void (*aesni_gcm_dec_tfm)(void *ctx,
- struct gcm_context_data *data, u8 *out,
- const u8 *in, unsigned long ciphertext_len,
- u8 *iv, u8 *hash_subkey, const u8 *aad,
- unsigned long aad_len, u8 *auth_tag,
- unsigned long auth_tag_len);
-
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
@@ -794,6 +778,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
struct gcm_context_data data AESNI_ALIGN_ATTR;
struct scatter_walk dst_sg_walk = {};
unsigned long left = req->cryptlen;
@@ -811,6 +796,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
if (!enc)
left -= auth_tag_len;
+#ifdef CONFIG_AS_AVX2
+ if (left < AVX_GEN4_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen4)
+ gcm_tfm = &aesni_gcm_tfm_avx_gen2;
+#endif
+#ifdef CONFIG_AS_AVX
+ if (left < AVX_GEN2_OPTSIZE && gcm_tfm == &aesni_gcm_tfm_avx_gen2)
+ gcm_tfm = &aesni_gcm_tfm_sse;
+#endif
+
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length &&
(!PageHighMem(sg_page(req->src)) ||
@@ -835,7 +829,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
}
kernel_fpu_begin();
- aesni_gcm_init(aes_ctx, &data, iv,
+ gcm_tfm->init(aes_ctx, &data, iv,
hash_subkey, assoc, assoclen);
if (req->src != req->dst) {
while (left) {
@@ -846,10 +840,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
len = min(srclen, dstlen);
if (len) {
if (enc)
- aesni_gcm_enc_update(aes_ctx, &data,
+ gcm_tfm->enc_update(aes_ctx, &data,
dst, src, len);
else
- aesni_gcm_dec_update(aes_ctx, &data,
+ gcm_tfm->dec_update(aes_ctx, &data,
dst, src, len);
}
left -= len;
@@ -867,10 +861,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
len = scatterwalk_clamp(&src_sg_walk, left);
if (len) {
if (enc)
- aesni_gcm_enc_update(aes_ctx, &data,
+ gcm_tfm->enc_update(aes_ctx, &data,
src, src, len);
else
- aesni_gcm_dec_update(aes_ctx, &data,
+ gcm_tfm->dec_update(aes_ctx, &data,
src, src, len);
}
left -= len;
@@ -879,7 +873,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
scatterwalk_done(&src_sg_walk, 1, left);
}
}
- aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
+ gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
kernel_fpu_end();
if (!assocmem)
@@ -912,147 +906,15 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
- u8 one_entry_in_sg = 0;
- u8 *src, *dst, *assoc;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk = {};
- struct gcm_context_data data AESNI_ALIGN_ATTR;
-
- if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
- aesni_gcm_enc_tfm == aesni_gcm_enc ||
- req->cryptlen < AVX_GEN2_OPTSIZE) {
- return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
- aes_ctx);
- }
- if (sg_is_last(req->src) &&
- (!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length <= PAGE_SIZE) &&
- sg_is_last(req->dst) &&
- (!PageHighMem(sg_page(req->dst)) ||
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
- one_entry_in_sg = 1;
- scatterwalk_start(&src_sg_walk, req->src);
- assoc = scatterwalk_map(&src_sg_walk);
- src = assoc + req->assoclen;
- dst = src;
- if (unlikely(req->src != req->dst)) {
- scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
- }
- } else {
- /* Allocate memory for src, dst, assoc */
- assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
- GFP_ATOMIC);
- if (unlikely(!assoc))
- return -ENOMEM;
- scatterwalk_map_and_copy(assoc, req->src, 0,
- req->assoclen + req->cryptlen, 0);
- src = assoc + req->assoclen;
- dst = src;
- }
-
- kernel_fpu_begin();
- aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
- hash_subkey, assoc, assoclen,
- dst + req->cryptlen, auth_tag_len);
- kernel_fpu_end();
-
- /* The authTag (aka the Integrity Check Value) needs to be written
- * back to the packet. */
- if (one_entry_in_sg) {
- if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst - req->assoclen);
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
- scatterwalk_done(&dst_sg_walk, 1, 0);
- }
- scatterwalk_unmap(assoc);
- scatterwalk_advance(&src_sg_walk, req->src->length);
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
- } else {
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
- req->cryptlen + auth_tag_len, 1);
- kfree(assoc);
- }
- return 0;
+ return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
+ aes_ctx);
}
static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
u8 *hash_subkey, u8 *iv, void *aes_ctx)
{
- u8 one_entry_in_sg = 0;
- u8 *src, *dst, *assoc;
- unsigned long tempCipherLen = 0;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- unsigned long auth_tag_len = crypto_aead_authsize(tfm);
- u8 authTag[16];
- struct scatter_walk src_sg_walk;
- struct scatter_walk dst_sg_walk = {};
- struct gcm_context_data data AESNI_ALIGN_ATTR;
- int retval = 0;
-
- if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
- aesni_gcm_enc_tfm == aesni_gcm_enc ||
- req->cryptlen < AVX_GEN2_OPTSIZE) {
- return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
- aes_ctx);
- }
- tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
-
- if (sg_is_last(req->src) &&
- (!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length <= PAGE_SIZE) &&
- sg_is_last(req->dst) && req->dst->length &&
- (!PageHighMem(sg_page(req->dst)) ||
- req->dst->offset + req->dst->length <= PAGE_SIZE)) {
- one_entry_in_sg = 1;
- scatterwalk_start(&src_sg_walk, req->src);
- assoc = scatterwalk_map(&src_sg_walk);
- src = assoc + req->assoclen;
- dst = src;
- if (unlikely(req->src != req->dst)) {
- scatterwalk_start(&dst_sg_walk, req->dst);
- dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
- }
- } else {
- /* Allocate memory for src, dst, assoc */
- assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
- if (!assoc)
- return -ENOMEM;
- scatterwalk_map_and_copy(assoc, req->src, 0,
- req->assoclen + req->cryptlen, 0);
- src = assoc + req->assoclen;
- dst = src;
- }
-
-
- kernel_fpu_begin();
- aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
- hash_subkey, assoc, assoclen,
- authTag, auth_tag_len);
- kernel_fpu_end();
-
- /* Compare generated tag with passed in tag. */
- retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
- -EBADMSG : 0;
-
- if (one_entry_in_sg) {
- if (unlikely(req->src != req->dst)) {
- scatterwalk_unmap(dst - req->assoclen);
- scatterwalk_advance(&dst_sg_walk, req->dst->length);
- scatterwalk_done(&dst_sg_walk, 1, 0);
- }
- scatterwalk_unmap(assoc);
- scatterwalk_advance(&src_sg_walk, req->src->length);
- scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
- } else {
- scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
- tempCipherLen, 1);
- kfree(assoc);
- }
- return retval;
-
+ return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
+ aes_ctx);
}
static int helper_rfc4106_encrypt(struct aead_request *req)
@@ -1420,21 +1282,18 @@ static int __init aesni_init(void)
#ifdef CONFIG_AS_AVX2
if (boot_cpu_has(X86_FEATURE_AVX2)) {
pr_info("AVX2 version of gcm_enc/dec engaged.\n");
- aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
- aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
+ aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen4;
} else
#endif
#ifdef CONFIG_AS_AVX
if (boot_cpu_has(X86_FEATURE_AVX)) {
pr_info("AVX version of gcm_enc/dec engaged.\n");
- aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
- aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
+ aesni_gcm_tfm = &aesni_gcm_tfm_avx_gen2;
} else
#endif
{
pr_info("SSE version of gcm_enc/dec engaged.\n");
- aesni_gcm_enc_tfm = aesni_gcm_enc;
- aesni_gcm_dec_tfm = aesni_gcm_dec;
+ aesni_gcm_tfm = &aesni_gcm_tfm_sse;
}
aesni_ctr_enc_tfm = aesni_ctr_enc;
#ifdef CONFIG_AS_AVX
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 41034745d6a2..d1ce49119da8 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -1,5 +1,5 @@
/*
- * Glue Code for the AVX assembler implemention of the Cast5 Cipher
+ * Glue Code for the AVX assembler implementation of the Cast5 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 9fb66b5e94b2..18965c39305e 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -1,5 +1,5 @@
/*
- * Glue Code for the AVX assembler implemention of the Cast6 Cipher
+ * Glue Code for the AVX assembler implementation of the Cast6 Cipher
*
* Copyright (C) 2012 Johannes Goetzfried
* <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S
new file mode 100644
index 000000000000..32903fd450af
--- /dev/null
+++ b/arch/x86/crypto/chacha-avx2-x86_64.S
@@ -0,0 +1,1025 @@
+/*
+ * ChaCha 256-bit cipher algorithm, x64 AVX2 functions
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/linkage.h>
+
+.section .rodata.cst32.ROT8, "aM", @progbits, 32
+.align 32
+ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
+ .octa 0x0e0d0c0f0a09080b0605040702010003
+
+.section .rodata.cst32.ROT16, "aM", @progbits, 32
+.align 32
+ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
+ .octa 0x0d0c0f0e09080b0a0504070601000302
+
+.section .rodata.cst32.CTRINC, "aM", @progbits, 32
+.align 32
+CTRINC: .octa 0x00000003000000020000000100000000
+ .octa 0x00000007000000060000000500000004
+
+.section .rodata.cst32.CTR2BL, "aM", @progbits, 32
+.align 32
+CTR2BL: .octa 0x00000000000000000000000000000000
+ .octa 0x00000000000000000000000000000001
+
+.section .rodata.cst32.CTR4BL, "aM", @progbits, 32
+.align 32
+CTR4BL: .octa 0x00000000000000000000000000000002
+ .octa 0x00000000000000000000000000000003
+
+.text
+
+ENTRY(chacha_2block_xor_avx2)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 2 data blocks output, o
+ # %rdx: up to 2 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+
+ # This function encrypts two ChaCha blocks by loading the state
+ # matrix twice across four AVX registers. It performs matrix operations
+ # on four words in each matrix in parallel, but requires shuffling to
+ # rearrange the words after each round.
+
+ vzeroupper
+
+ # x0..3[0-2] = s0..3
+ vbroadcasti128 0x00(%rdi),%ymm0
+ vbroadcasti128 0x10(%rdi),%ymm1
+ vbroadcasti128 0x20(%rdi),%ymm2
+ vbroadcasti128 0x30(%rdi),%ymm3
+
+ vpaddd CTR2BL(%rip),%ymm3,%ymm3
+
+ vmovdqa %ymm0,%ymm8
+ vmovdqa %ymm1,%ymm9
+ vmovdqa %ymm2,%ymm10
+ vmovdqa %ymm3,%ymm11
+
+ vmovdqa ROT8(%rip),%ymm4
+ vmovdqa ROT16(%rip),%ymm5
+
+ mov %rcx,%rax
+
+.Ldoubleround:
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm5,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm6
+ vpslld $12,%ymm6,%ymm6
+ vpsrld $20,%ymm1,%ymm1
+ vpor %ymm6,%ymm1,%ymm1
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm4,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm7
+ vpslld $7,%ymm7,%ymm7
+ vpsrld $25,%ymm1,%ymm1
+ vpor %ymm7,%ymm1,%ymm1
+
+ # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm1,%ymm1
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm3,%ymm3
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm5,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm6
+ vpslld $12,%ymm6,%ymm6
+ vpsrld $20,%ymm1,%ymm1
+ vpor %ymm6,%ymm1,%ymm1
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm4,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm7
+ vpslld $7,%ymm7,%ymm7
+ vpsrld $25,%ymm1,%ymm1
+ vpor %ymm7,%ymm1,%ymm1
+
+ # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm1,%ymm1
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm3,%ymm3
+
+ sub $2,%r8d
+ jnz .Ldoubleround
+
+ # o0 = i0 ^ (x0 + s0)
+ vpaddd %ymm8,%ymm0,%ymm7
+ cmp $0x10,%rax
+ jl .Lxorpart2
+ vpxor 0x00(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x00(%rsi)
+ vextracti128 $1,%ymm7,%xmm0
+ # o1 = i1 ^ (x1 + s1)
+ vpaddd %ymm9,%ymm1,%ymm7
+ cmp $0x20,%rax
+ jl .Lxorpart2
+ vpxor 0x10(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x10(%rsi)
+ vextracti128 $1,%ymm7,%xmm1
+ # o2 = i2 ^ (x2 + s2)
+ vpaddd %ymm10,%ymm2,%ymm7
+ cmp $0x30,%rax
+ jl .Lxorpart2
+ vpxor 0x20(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x20(%rsi)
+ vextracti128 $1,%ymm7,%xmm2
+ # o3 = i3 ^ (x3 + s3)
+ vpaddd %ymm11,%ymm3,%ymm7
+ cmp $0x40,%rax
+ jl .Lxorpart2
+ vpxor 0x30(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x30(%rsi)
+ vextracti128 $1,%ymm7,%xmm3
+
+ # xor and write second block
+ vmovdqa %xmm0,%xmm7
+ cmp $0x50,%rax
+ jl .Lxorpart2
+ vpxor 0x40(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x40(%rsi)
+
+ vmovdqa %xmm1,%xmm7
+ cmp $0x60,%rax
+ jl .Lxorpart2
+ vpxor 0x50(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x50(%rsi)
+
+ vmovdqa %xmm2,%xmm7
+ cmp $0x70,%rax
+ jl .Lxorpart2
+ vpxor 0x60(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x60(%rsi)
+
+ vmovdqa %xmm3,%xmm7
+ cmp $0x80,%rax
+ jl .Lxorpart2
+ vpxor 0x70(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x70(%rsi)
+
+.Ldone2:
+ vzeroupper
+ ret
+
+.Lxorpart2:
+ # xor remaining bytes from partial register into output
+ mov %rax,%r9
+ and $0x0f,%r9
+ jz .Ldone2
+ and $~0x0f,%rax
+
+ mov %rsi,%r11
+
+ lea 8(%rsp),%r10
+ sub $0x10,%rsp
+ and $~31,%rsp
+
+ lea (%rdx,%rax),%rsi
+ mov %rsp,%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ vpxor 0x00(%rsp),%xmm7,%xmm7
+ vmovdqa %xmm7,0x00(%rsp)
+
+ mov %rsp,%rsi
+ lea (%r11,%rax),%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ lea -8(%r10),%rsp
+ jmp .Ldone2
+
+ENDPROC(chacha_2block_xor_avx2)
+
+ENTRY(chacha_4block_xor_avx2)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 4 data blocks output, o
+ # %rdx: up to 4 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+
+ # This function encrypts four ChaCha blocks by loading the state
+ # matrix four times across eight AVX registers. It performs matrix
+ # operations on four words in two matrices in parallel, sequentially
+ # to the operations on the four words of the other two matrices. The
+ # required word shuffling has a rather high latency, we can do the
+ # arithmetic on two matrix-pairs without much slowdown.
+
+ vzeroupper
+
+ # x0..3[0-4] = s0..3
+ vbroadcasti128 0x00(%rdi),%ymm0
+ vbroadcasti128 0x10(%rdi),%ymm1
+ vbroadcasti128 0x20(%rdi),%ymm2
+ vbroadcasti128 0x30(%rdi),%ymm3
+
+ vmovdqa %ymm0,%ymm4
+ vmovdqa %ymm1,%ymm5
+ vmovdqa %ymm2,%ymm6
+ vmovdqa %ymm3,%ymm7
+
+ vpaddd CTR2BL(%rip),%ymm3,%ymm3
+ vpaddd CTR4BL(%rip),%ymm7,%ymm7
+
+ vmovdqa %ymm0,%ymm11
+ vmovdqa %ymm1,%ymm12
+ vmovdqa %ymm2,%ymm13
+ vmovdqa %ymm3,%ymm14
+ vmovdqa %ymm7,%ymm15
+
+ vmovdqa ROT8(%rip),%ymm8
+ vmovdqa ROT16(%rip),%ymm9
+
+ mov %rcx,%rax
+
+.Ldoubleround4:
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm9,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+ vpshufb %ymm9,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm10
+ vpslld $12,%ymm10,%ymm10
+ vpsrld $20,%ymm1,%ymm1
+ vpor %ymm10,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxor %ymm6,%ymm5,%ymm5
+ vmovdqa %ymm5,%ymm10
+ vpslld $12,%ymm10,%ymm10
+ vpsrld $20,%ymm5,%ymm5
+ vpor %ymm10,%ymm5,%ymm5
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm8,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+ vpshufb %ymm8,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm10
+ vpslld $7,%ymm10,%ymm10
+ vpsrld $25,%ymm1,%ymm1
+ vpor %ymm10,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxor %ymm6,%ymm5,%ymm5
+ vmovdqa %ymm5,%ymm10
+ vpslld $7,%ymm10,%ymm10
+ vpsrld $25,%ymm5,%ymm5
+ vpor %ymm10,%ymm5,%ymm5
+
+ # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm1,%ymm1
+ vpshufd $0x39,%ymm5,%ymm5
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm3,%ymm3
+ vpshufd $0x93,%ymm7,%ymm7
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm9,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+ vpshufb %ymm9,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm10
+ vpslld $12,%ymm10,%ymm10
+ vpsrld $20,%ymm1,%ymm1
+ vpor %ymm10,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxor %ymm6,%ymm5,%ymm5
+ vmovdqa %ymm5,%ymm10
+ vpslld $12,%ymm10,%ymm10
+ vpsrld $20,%ymm5,%ymm5
+ vpor %ymm10,%ymm5,%ymm5
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpshufb %ymm8,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+ vpshufb %ymm8,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vmovdqa %ymm1,%ymm10
+ vpslld $7,%ymm10,%ymm10
+ vpsrld $25,%ymm1,%ymm1
+ vpor %ymm10,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxor %ymm6,%ymm5,%ymm5
+ vmovdqa %ymm5,%ymm10
+ vpslld $7,%ymm10,%ymm10
+ vpsrld $25,%ymm5,%ymm5
+ vpor %ymm10,%ymm5,%ymm5
+
+ # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm1,%ymm1
+ vpshufd $0x93,%ymm5,%ymm5
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm3,%ymm3
+ vpshufd $0x39,%ymm7,%ymm7
+
+ sub $2,%r8d
+ jnz .Ldoubleround4
+
+ # o0 = i0 ^ (x0 + s0), first block
+ vpaddd %ymm11,%ymm0,%ymm10
+ cmp $0x10,%rax
+ jl .Lxorpart4
+ vpxor 0x00(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x00(%rsi)
+ vextracti128 $1,%ymm10,%xmm0
+ # o1 = i1 ^ (x1 + s1), first block
+ vpaddd %ymm12,%ymm1,%ymm10
+ cmp $0x20,%rax
+ jl .Lxorpart4
+ vpxor 0x10(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x10(%rsi)
+ vextracti128 $1,%ymm10,%xmm1
+ # o2 = i2 ^ (x2 + s2), first block
+ vpaddd %ymm13,%ymm2,%ymm10
+ cmp $0x30,%rax
+ jl .Lxorpart4
+ vpxor 0x20(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x20(%rsi)
+ vextracti128 $1,%ymm10,%xmm2
+ # o3 = i3 ^ (x3 + s3), first block
+ vpaddd %ymm14,%ymm3,%ymm10
+ cmp $0x40,%rax
+ jl .Lxorpart4
+ vpxor 0x30(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x30(%rsi)
+ vextracti128 $1,%ymm10,%xmm3
+
+ # xor and write second block
+ vmovdqa %xmm0,%xmm10
+ cmp $0x50,%rax
+ jl .Lxorpart4
+ vpxor 0x40(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x40(%rsi)
+
+ vmovdqa %xmm1,%xmm10
+ cmp $0x60,%rax
+ jl .Lxorpart4
+ vpxor 0x50(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x50(%rsi)
+
+ vmovdqa %xmm2,%xmm10
+ cmp $0x70,%rax
+ jl .Lxorpart4
+ vpxor 0x60(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x60(%rsi)
+
+ vmovdqa %xmm3,%xmm10
+ cmp $0x80,%rax
+ jl .Lxorpart4
+ vpxor 0x70(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x70(%rsi)
+
+ # o0 = i0 ^ (x0 + s0), third block
+ vpaddd %ymm11,%ymm4,%ymm10
+ cmp $0x90,%rax
+ jl .Lxorpart4
+ vpxor 0x80(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x80(%rsi)
+ vextracti128 $1,%ymm10,%xmm4
+ # o1 = i1 ^ (x1 + s1), third block
+ vpaddd %ymm12,%ymm5,%ymm10
+ cmp $0xa0,%rax
+ jl .Lxorpart4
+ vpxor 0x90(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x90(%rsi)
+ vextracti128 $1,%ymm10,%xmm5
+ # o2 = i2 ^ (x2 + s2), third block
+ vpaddd %ymm13,%ymm6,%ymm10
+ cmp $0xb0,%rax
+ jl .Lxorpart4
+ vpxor 0xa0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xa0(%rsi)
+ vextracti128 $1,%ymm10,%xmm6
+ # o3 = i3 ^ (x3 + s3), third block
+ vpaddd %ymm15,%ymm7,%ymm10
+ cmp $0xc0,%rax
+ jl .Lxorpart4
+ vpxor 0xb0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xb0(%rsi)
+ vextracti128 $1,%ymm10,%xmm7
+
+ # xor and write fourth block
+ vmovdqa %xmm4,%xmm10
+ cmp $0xd0,%rax
+ jl .Lxorpart4
+ vpxor 0xc0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xc0(%rsi)
+
+ vmovdqa %xmm5,%xmm10
+ cmp $0xe0,%rax
+ jl .Lxorpart4
+ vpxor 0xd0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xd0(%rsi)
+
+ vmovdqa %xmm6,%xmm10
+ cmp $0xf0,%rax
+ jl .Lxorpart4
+ vpxor 0xe0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xe0(%rsi)
+
+ vmovdqa %xmm7,%xmm10
+ cmp $0x100,%rax
+ jl .Lxorpart4
+ vpxor 0xf0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xf0(%rsi)
+
+.Ldone4:
+ vzeroupper
+ ret
+
+.Lxorpart4:
+ # xor remaining bytes from partial register into output
+ mov %rax,%r9
+ and $0x0f,%r9
+ jz .Ldone4
+ and $~0x0f,%rax
+
+ mov %rsi,%r11
+
+ lea 8(%rsp),%r10
+ sub $0x10,%rsp
+ and $~31,%rsp
+
+ lea (%rdx,%rax),%rsi
+ mov %rsp,%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ vpxor 0x00(%rsp),%xmm10,%xmm10
+ vmovdqa %xmm10,0x00(%rsp)
+
+ mov %rsp,%rsi
+ lea (%r11,%rax),%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ lea -8(%r10),%rsp
+ jmp .Ldone4
+
+ENDPROC(chacha_4block_xor_avx2)
+
+ENTRY(chacha_8block_xor_avx2)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 8 data blocks output, o
+ # %rdx: up to 8 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+
+ # This function encrypts eight consecutive ChaCha blocks by loading
+ # the state matrix in AVX registers eight times. As we need some
+ # scratch registers, we save the first four registers on the stack. The
+ # algorithm performs each operation on the corresponding word of each
+ # state matrix, hence requires no word shuffling. For final XORing step
+ # we transpose the matrix by interleaving 32-, 64- and then 128-bit
+ # words, which allows us to do XOR in AVX registers. 8/16-bit word
+ # rotation is done with the slightly better performing byte shuffling,
+ # 7/12-bit word rotation uses traditional shift+OR.
+
+ vzeroupper
+ # 4 * 32 byte stack, 32-byte aligned
+ lea 8(%rsp),%r10
+ and $~31, %rsp
+ sub $0x80, %rsp
+ mov %rcx,%rax
+
+ # x0..15[0-7] = s[0..15]
+ vpbroadcastd 0x00(%rdi),%ymm0
+ vpbroadcastd 0x04(%rdi),%ymm1
+ vpbroadcastd 0x08(%rdi),%ymm2
+ vpbroadcastd 0x0c(%rdi),%ymm3
+ vpbroadcastd 0x10(%rdi),%ymm4
+ vpbroadcastd 0x14(%rdi),%ymm5
+ vpbroadcastd 0x18(%rdi),%ymm6
+ vpbroadcastd 0x1c(%rdi),%ymm7
+ vpbroadcastd 0x20(%rdi),%ymm8
+ vpbroadcastd 0x24(%rdi),%ymm9
+ vpbroadcastd 0x28(%rdi),%ymm10
+ vpbroadcastd 0x2c(%rdi),%ymm11
+ vpbroadcastd 0x30(%rdi),%ymm12
+ vpbroadcastd 0x34(%rdi),%ymm13
+ vpbroadcastd 0x38(%rdi),%ymm14
+ vpbroadcastd 0x3c(%rdi),%ymm15
+ # x0..3 on stack
+ vmovdqa %ymm0,0x00(%rsp)
+ vmovdqa %ymm1,0x20(%rsp)
+ vmovdqa %ymm2,0x40(%rsp)
+ vmovdqa %ymm3,0x60(%rsp)
+
+ vmovdqa CTRINC(%rip),%ymm1
+ vmovdqa ROT8(%rip),%ymm2
+ vmovdqa ROT16(%rip),%ymm3
+
+ # x12 += counter values 0-3
+ vpaddd %ymm1,%ymm12,%ymm12
+
+.Ldoubleround8:
+ # x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+ vpaddd 0x00(%rsp),%ymm4,%ymm0
+ vmovdqa %ymm0,0x00(%rsp)
+ vpxor %ymm0,%ymm12,%ymm12
+ vpshufb %ymm3,%ymm12,%ymm12
+ # x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+ vpaddd 0x20(%rsp),%ymm5,%ymm0
+ vmovdqa %ymm0,0x20(%rsp)
+ vpxor %ymm0,%ymm13,%ymm13
+ vpshufb %ymm3,%ymm13,%ymm13
+ # x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+ vpaddd 0x40(%rsp),%ymm6,%ymm0
+ vmovdqa %ymm0,0x40(%rsp)
+ vpxor %ymm0,%ymm14,%ymm14
+ vpshufb %ymm3,%ymm14,%ymm14
+ # x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+ vpaddd 0x60(%rsp),%ymm7,%ymm0
+ vmovdqa %ymm0,0x60(%rsp)
+ vpxor %ymm0,%ymm15,%ymm15
+ vpshufb %ymm3,%ymm15,%ymm15
+
+ # x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+ vpaddd %ymm12,%ymm8,%ymm8
+ vpxor %ymm8,%ymm4,%ymm4
+ vpslld $12,%ymm4,%ymm0
+ vpsrld $20,%ymm4,%ymm4
+ vpor %ymm0,%ymm4,%ymm4
+ # x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+ vpaddd %ymm13,%ymm9,%ymm9
+ vpxor %ymm9,%ymm5,%ymm5
+ vpslld $12,%ymm5,%ymm0
+ vpsrld $20,%ymm5,%ymm5
+ vpor %ymm0,%ymm5,%ymm5
+ # x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+ vpaddd %ymm14,%ymm10,%ymm10
+ vpxor %ymm10,%ymm6,%ymm6
+ vpslld $12,%ymm6,%ymm0
+ vpsrld $20,%ymm6,%ymm6
+ vpor %ymm0,%ymm6,%ymm6
+ # x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+ vpaddd %ymm15,%ymm11,%ymm11
+ vpxor %ymm11,%ymm7,%ymm7
+ vpslld $12,%ymm7,%ymm0
+ vpsrld $20,%ymm7,%ymm7
+ vpor %ymm0,%ymm7,%ymm7
+
+ # x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+ vpaddd 0x00(%rsp),%ymm4,%ymm0
+ vmovdqa %ymm0,0x00(%rsp)
+ vpxor %ymm0,%ymm12,%ymm12
+ vpshufb %ymm2,%ymm12,%ymm12
+ # x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+ vpaddd 0x20(%rsp),%ymm5,%ymm0
+ vmovdqa %ymm0,0x20(%rsp)
+ vpxor %ymm0,%ymm13,%ymm13
+ vpshufb %ymm2,%ymm13,%ymm13
+ # x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+ vpaddd 0x40(%rsp),%ymm6,%ymm0
+ vmovdqa %ymm0,0x40(%rsp)
+ vpxor %ymm0,%ymm14,%ymm14
+ vpshufb %ymm2,%ymm14,%ymm14
+ # x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+ vpaddd 0x60(%rsp),%ymm7,%ymm0
+ vmovdqa %ymm0,0x60(%rsp)
+ vpxor %ymm0,%ymm15,%ymm15
+ vpshufb %ymm2,%ymm15,%ymm15
+
+ # x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+ vpaddd %ymm12,%ymm8,%ymm8
+ vpxor %ymm8,%ymm4,%ymm4
+ vpslld $7,%ymm4,%ymm0
+ vpsrld $25,%ymm4,%ymm4
+ vpor %ymm0,%ymm4,%ymm4
+ # x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+ vpaddd %ymm13,%ymm9,%ymm9
+ vpxor %ymm9,%ymm5,%ymm5
+ vpslld $7,%ymm5,%ymm0
+ vpsrld $25,%ymm5,%ymm5
+ vpor %ymm0,%ymm5,%ymm5
+ # x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+ vpaddd %ymm14,%ymm10,%ymm10
+ vpxor %ymm10,%ymm6,%ymm6
+ vpslld $7,%ymm6,%ymm0
+ vpsrld $25,%ymm6,%ymm6
+ vpor %ymm0,%ymm6,%ymm6
+ # x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+ vpaddd %ymm15,%ymm11,%ymm11
+ vpxor %ymm11,%ymm7,%ymm7
+ vpslld $7,%ymm7,%ymm0
+ vpsrld $25,%ymm7,%ymm7
+ vpor %ymm0,%ymm7,%ymm7
+
+ # x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+ vpaddd 0x00(%rsp),%ymm5,%ymm0
+ vmovdqa %ymm0,0x00(%rsp)
+ vpxor %ymm0,%ymm15,%ymm15
+ vpshufb %ymm3,%ymm15,%ymm15
+ # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0
+ vpaddd 0x20(%rsp),%ymm6,%ymm0
+ vmovdqa %ymm0,0x20(%rsp)
+ vpxor %ymm0,%ymm12,%ymm12
+ vpshufb %ymm3,%ymm12,%ymm12
+ # x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+ vpaddd 0x40(%rsp),%ymm7,%ymm0
+ vmovdqa %ymm0,0x40(%rsp)
+ vpxor %ymm0,%ymm13,%ymm13
+ vpshufb %ymm3,%ymm13,%ymm13
+ # x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+ vpaddd 0x60(%rsp),%ymm4,%ymm0
+ vmovdqa %ymm0,0x60(%rsp)
+ vpxor %ymm0,%ymm14,%ymm14
+ vpshufb %ymm3,%ymm14,%ymm14
+
+ # x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+ vpaddd %ymm15,%ymm10,%ymm10
+ vpxor %ymm10,%ymm5,%ymm5
+ vpslld $12,%ymm5,%ymm0
+ vpsrld $20,%ymm5,%ymm5
+ vpor %ymm0,%ymm5,%ymm5
+ # x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+ vpaddd %ymm12,%ymm11,%ymm11
+ vpxor %ymm11,%ymm6,%ymm6
+ vpslld $12,%ymm6,%ymm0
+ vpsrld $20,%ymm6,%ymm6
+ vpor %ymm0,%ymm6,%ymm6
+ # x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+ vpaddd %ymm13,%ymm8,%ymm8
+ vpxor %ymm8,%ymm7,%ymm7
+ vpslld $12,%ymm7,%ymm0
+ vpsrld $20,%ymm7,%ymm7
+ vpor %ymm0,%ymm7,%ymm7
+ # x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+ vpaddd %ymm14,%ymm9,%ymm9
+ vpxor %ymm9,%ymm4,%ymm4
+ vpslld $12,%ymm4,%ymm0
+ vpsrld $20,%ymm4,%ymm4
+ vpor %ymm0,%ymm4,%ymm4
+
+ # x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+ vpaddd 0x00(%rsp),%ymm5,%ymm0
+ vmovdqa %ymm0,0x00(%rsp)
+ vpxor %ymm0,%ymm15,%ymm15
+ vpshufb %ymm2,%ymm15,%ymm15
+ # x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+ vpaddd 0x20(%rsp),%ymm6,%ymm0
+ vmovdqa %ymm0,0x20(%rsp)
+ vpxor %ymm0,%ymm12,%ymm12
+ vpshufb %ymm2,%ymm12,%ymm12
+ # x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+ vpaddd 0x40(%rsp),%ymm7,%ymm0
+ vmovdqa %ymm0,0x40(%rsp)
+ vpxor %ymm0,%ymm13,%ymm13
+ vpshufb %ymm2,%ymm13,%ymm13
+ # x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+ vpaddd 0x60(%rsp),%ymm4,%ymm0
+ vmovdqa %ymm0,0x60(%rsp)
+ vpxor %ymm0,%ymm14,%ymm14
+ vpshufb %ymm2,%ymm14,%ymm14
+
+ # x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+ vpaddd %ymm15,%ymm10,%ymm10
+ vpxor %ymm10,%ymm5,%ymm5
+ vpslld $7,%ymm5,%ymm0
+ vpsrld $25,%ymm5,%ymm5
+ vpor %ymm0,%ymm5,%ymm5
+ # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+ vpaddd %ymm12,%ymm11,%ymm11
+ vpxor %ymm11,%ymm6,%ymm6
+ vpslld $7,%ymm6,%ymm0
+ vpsrld $25,%ymm6,%ymm6
+ vpor %ymm0,%ymm6,%ymm6
+ # x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+ vpaddd %ymm13,%ymm8,%ymm8
+ vpxor %ymm8,%ymm7,%ymm7
+ vpslld $7,%ymm7,%ymm0
+ vpsrld $25,%ymm7,%ymm7
+ vpor %ymm0,%ymm7,%ymm7
+ # x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+ vpaddd %ymm14,%ymm9,%ymm9
+ vpxor %ymm9,%ymm4,%ymm4
+ vpslld $7,%ymm4,%ymm0
+ vpsrld $25,%ymm4,%ymm4
+ vpor %ymm0,%ymm4,%ymm4
+
+ sub $2,%r8d
+ jnz .Ldoubleround8
+
+ # x0..15[0-3] += s[0..15]
+ vpbroadcastd 0x00(%rdi),%ymm0
+ vpaddd 0x00(%rsp),%ymm0,%ymm0
+ vmovdqa %ymm0,0x00(%rsp)
+ vpbroadcastd 0x04(%rdi),%ymm0
+ vpaddd 0x20(%rsp),%ymm0,%ymm0
+ vmovdqa %ymm0,0x20(%rsp)
+ vpbroadcastd 0x08(%rdi),%ymm0
+ vpaddd 0x40(%rsp),%ymm0,%ymm0
+ vmovdqa %ymm0,0x40(%rsp)
+ vpbroadcastd 0x0c(%rdi),%ymm0
+ vpaddd 0x60(%rsp),%ymm0,%ymm0
+ vmovdqa %ymm0,0x60(%rsp)
+ vpbroadcastd 0x10(%rdi),%ymm0
+ vpaddd %ymm0,%ymm4,%ymm4
+ vpbroadcastd 0x14(%rdi),%ymm0
+ vpaddd %ymm0,%ymm5,%ymm5
+ vpbroadcastd 0x18(%rdi),%ymm0
+ vpaddd %ymm0,%ymm6,%ymm6
+ vpbroadcastd 0x1c(%rdi),%ymm0
+ vpaddd %ymm0,%ymm7,%ymm7
+ vpbroadcastd 0x20(%rdi),%ymm0
+ vpaddd %ymm0,%ymm8,%ymm8
+ vpbroadcastd 0x24(%rdi),%ymm0
+ vpaddd %ymm0,%ymm9,%ymm9
+ vpbroadcastd 0x28(%rdi),%ymm0
+ vpaddd %ymm0,%ymm10,%ymm10
+ vpbroadcastd 0x2c(%rdi),%ymm0
+ vpaddd %ymm0,%ymm11,%ymm11
+ vpbroadcastd 0x30(%rdi),%ymm0
+ vpaddd %ymm0,%ymm12,%ymm12
+ vpbroadcastd 0x34(%rdi),%ymm0
+ vpaddd %ymm0,%ymm13,%ymm13
+ vpbroadcastd 0x38(%rdi),%ymm0
+ vpaddd %ymm0,%ymm14,%ymm14
+ vpbroadcastd 0x3c(%rdi),%ymm0
+ vpaddd %ymm0,%ymm15,%ymm15
+
+ # x12 += counter values 0-3
+ vpaddd %ymm1,%ymm12,%ymm12
+
+ # interleave 32-bit words in state n, n+1
+ vmovdqa 0x00(%rsp),%ymm0
+ vmovdqa 0x20(%rsp),%ymm1
+ vpunpckldq %ymm1,%ymm0,%ymm2
+ vpunpckhdq %ymm1,%ymm0,%ymm1
+ vmovdqa %ymm2,0x00(%rsp)
+ vmovdqa %ymm1,0x20(%rsp)
+ vmovdqa 0x40(%rsp),%ymm0
+ vmovdqa 0x60(%rsp),%ymm1
+ vpunpckldq %ymm1,%ymm0,%ymm2
+ vpunpckhdq %ymm1,%ymm0,%ymm1
+ vmovdqa %ymm2,0x40(%rsp)
+ vmovdqa %ymm1,0x60(%rsp)
+ vmovdqa %ymm4,%ymm0
+ vpunpckldq %ymm5,%ymm0,%ymm4
+ vpunpckhdq %ymm5,%ymm0,%ymm5
+ vmovdqa %ymm6,%ymm0
+ vpunpckldq %ymm7,%ymm0,%ymm6
+ vpunpckhdq %ymm7,%ymm0,%ymm7
+ vmovdqa %ymm8,%ymm0
+ vpunpckldq %ymm9,%ymm0,%ymm8
+ vpunpckhdq %ymm9,%ymm0,%ymm9
+ vmovdqa %ymm10,%ymm0
+ vpunpckldq %ymm11,%ymm0,%ymm10
+ vpunpckhdq %ymm11,%ymm0,%ymm11
+ vmovdqa %ymm12,%ymm0
+ vpunpckldq %ymm13,%ymm0,%ymm12
+ vpunpckhdq %ymm13,%ymm0,%ymm13
+ vmovdqa %ymm14,%ymm0
+ vpunpckldq %ymm15,%ymm0,%ymm14
+ vpunpckhdq %ymm15,%ymm0,%ymm15
+
+ # interleave 64-bit words in state n, n+2
+ vmovdqa 0x00(%rsp),%ymm0
+ vmovdqa 0x40(%rsp),%ymm2
+ vpunpcklqdq %ymm2,%ymm0,%ymm1
+ vpunpckhqdq %ymm2,%ymm0,%ymm2
+ vmovdqa %ymm1,0x00(%rsp)
+ vmovdqa %ymm2,0x40(%rsp)
+ vmovdqa 0x20(%rsp),%ymm0
+ vmovdqa 0x60(%rsp),%ymm2
+ vpunpcklqdq %ymm2,%ymm0,%ymm1
+ vpunpckhqdq %ymm2,%ymm0,%ymm2
+ vmovdqa %ymm1,0x20(%rsp)
+ vmovdqa %ymm2,0x60(%rsp)
+ vmovdqa %ymm4,%ymm0
+ vpunpcklqdq %ymm6,%ymm0,%ymm4
+ vpunpckhqdq %ymm6,%ymm0,%ymm6
+ vmovdqa %ymm5,%ymm0
+ vpunpcklqdq %ymm7,%ymm0,%ymm5
+ vpunpckhqdq %ymm7,%ymm0,%ymm7
+ vmovdqa %ymm8,%ymm0
+ vpunpcklqdq %ymm10,%ymm0,%ymm8
+ vpunpckhqdq %ymm10,%ymm0,%ymm10
+ vmovdqa %ymm9,%ymm0
+ vpunpcklqdq %ymm11,%ymm0,%ymm9
+ vpunpckhqdq %ymm11,%ymm0,%ymm11
+ vmovdqa %ymm12,%ymm0
+ vpunpcklqdq %ymm14,%ymm0,%ymm12
+ vpunpckhqdq %ymm14,%ymm0,%ymm14
+ vmovdqa %ymm13,%ymm0
+ vpunpcklqdq %ymm15,%ymm0,%ymm13
+ vpunpckhqdq %ymm15,%ymm0,%ymm15
+
+ # interleave 128-bit words in state n, n+4
+ # xor/write first four blocks
+ vmovdqa 0x00(%rsp),%ymm1
+ vperm2i128 $0x20,%ymm4,%ymm1,%ymm0
+ cmp $0x0020,%rax
+ jl .Lxorpart8
+ vpxor 0x0000(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0000(%rsi)
+ vperm2i128 $0x31,%ymm4,%ymm1,%ymm4
+
+ vperm2i128 $0x20,%ymm12,%ymm8,%ymm0
+ cmp $0x0040,%rax
+ jl .Lxorpart8
+ vpxor 0x0020(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0020(%rsi)
+ vperm2i128 $0x31,%ymm12,%ymm8,%ymm12
+
+ vmovdqa 0x40(%rsp),%ymm1
+ vperm2i128 $0x20,%ymm6,%ymm1,%ymm0
+ cmp $0x0060,%rax
+ jl .Lxorpart8
+ vpxor 0x0040(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0040(%rsi)
+ vperm2i128 $0x31,%ymm6,%ymm1,%ymm6
+
+ vperm2i128 $0x20,%ymm14,%ymm10,%ymm0
+ cmp $0x0080,%rax
+ jl .Lxorpart8
+ vpxor 0x0060(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0060(%rsi)
+ vperm2i128 $0x31,%ymm14,%ymm10,%ymm14
+
+ vmovdqa 0x20(%rsp),%ymm1
+ vperm2i128 $0x20,%ymm5,%ymm1,%ymm0
+ cmp $0x00a0,%rax
+ jl .Lxorpart8
+ vpxor 0x0080(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0080(%rsi)
+ vperm2i128 $0x31,%ymm5,%ymm1,%ymm5
+
+ vperm2i128 $0x20,%ymm13,%ymm9,%ymm0
+ cmp $0x00c0,%rax
+ jl .Lxorpart8
+ vpxor 0x00a0(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x00a0(%rsi)
+ vperm2i128 $0x31,%ymm13,%ymm9,%ymm13
+
+ vmovdqa 0x60(%rsp),%ymm1
+ vperm2i128 $0x20,%ymm7,%ymm1,%ymm0
+ cmp $0x00e0,%rax
+ jl .Lxorpart8
+ vpxor 0x00c0(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x00c0(%rsi)
+ vperm2i128 $0x31,%ymm7,%ymm1,%ymm7
+
+ vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
+ cmp $0x0100,%rax
+ jl .Lxorpart8
+ vpxor 0x00e0(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x00e0(%rsi)
+ vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
+
+ # xor remaining blocks, write to output
+ vmovdqa %ymm4,%ymm0
+ cmp $0x0120,%rax
+ jl .Lxorpart8
+ vpxor 0x0100(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0100(%rsi)
+
+ vmovdqa %ymm12,%ymm0
+ cmp $0x0140,%rax
+ jl .Lxorpart8
+ vpxor 0x0120(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0120(%rsi)
+
+ vmovdqa %ymm6,%ymm0
+ cmp $0x0160,%rax
+ jl .Lxorpart8
+ vpxor 0x0140(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0140(%rsi)
+
+ vmovdqa %ymm14,%ymm0
+ cmp $0x0180,%rax
+ jl .Lxorpart8
+ vpxor 0x0160(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0160(%rsi)
+
+ vmovdqa %ymm5,%ymm0
+ cmp $0x01a0,%rax
+ jl .Lxorpart8
+ vpxor 0x0180(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x0180(%rsi)
+
+ vmovdqa %ymm13,%ymm0
+ cmp $0x01c0,%rax
+ jl .Lxorpart8
+ vpxor 0x01a0(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x01a0(%rsi)
+
+ vmovdqa %ymm7,%ymm0
+ cmp $0x01e0,%rax
+ jl .Lxorpart8
+ vpxor 0x01c0(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x01c0(%rsi)
+
+ vmovdqa %ymm15,%ymm0
+ cmp $0x0200,%rax
+ jl .Lxorpart8
+ vpxor 0x01e0(%rdx),%ymm0,%ymm0
+ vmovdqu %ymm0,0x01e0(%rsi)
+
+.Ldone8:
+ vzeroupper
+ lea -8(%r10),%rsp
+ ret
+
+.Lxorpart8:
+ # xor remaining bytes from partial register into output
+ mov %rax,%r9
+ and $0x1f,%r9
+ jz .Ldone8
+ and $~0x1f,%rax
+
+ mov %rsi,%r11
+
+ lea (%rdx,%rax),%rsi
+ mov %rsp,%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ vpxor 0x00(%rsp),%ymm0,%ymm0
+ vmovdqa %ymm0,0x00(%rsp)
+
+ mov %rsp,%rsi
+ lea (%r11,%rax),%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ jmp .Ldone8
+
+ENDPROC(chacha_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S
new file mode 100644
index 000000000000..848f9c75fd4f
--- /dev/null
+++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S
@@ -0,0 +1,836 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * ChaCha 256-bit cipher algorithm, x64 AVX-512VL functions
+ *
+ * Copyright (C) 2018 Martin Willi
+ */
+
+#include <linux/linkage.h>
+
+.section .rodata.cst32.CTR2BL, "aM", @progbits, 32
+.align 32
+CTR2BL: .octa 0x00000000000000000000000000000000
+ .octa 0x00000000000000000000000000000001
+
+.section .rodata.cst32.CTR4BL, "aM", @progbits, 32
+.align 32
+CTR4BL: .octa 0x00000000000000000000000000000002
+ .octa 0x00000000000000000000000000000003
+
+.section .rodata.cst32.CTR8BL, "aM", @progbits, 32
+.align 32
+CTR8BL: .octa 0x00000003000000020000000100000000
+ .octa 0x00000007000000060000000500000004
+
+.text
+
+ENTRY(chacha_2block_xor_avx512vl)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 2 data blocks output, o
+ # %rdx: up to 2 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+
+ # This function encrypts two ChaCha blocks by loading the state
+ # matrix twice across four AVX registers. It performs matrix operations
+ # on four words in each matrix in parallel, but requires shuffling to
+ # rearrange the words after each round.
+
+ vzeroupper
+
+ # x0..3[0-2] = s0..3
+ vbroadcasti128 0x00(%rdi),%ymm0
+ vbroadcasti128 0x10(%rdi),%ymm1
+ vbroadcasti128 0x20(%rdi),%ymm2
+ vbroadcasti128 0x30(%rdi),%ymm3
+
+ vpaddd CTR2BL(%rip),%ymm3,%ymm3
+
+ vmovdqa %ymm0,%ymm8
+ vmovdqa %ymm1,%ymm9
+ vmovdqa %ymm2,%ymm10
+ vmovdqa %ymm3,%ymm11
+
+.Ldoubleround:
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $16,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $12,%ymm1,%ymm1
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $8,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $7,%ymm1,%ymm1
+
+ # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm1,%ymm1
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm3,%ymm3
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $16,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $12,%ymm1,%ymm1
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $8,%ymm3,%ymm3
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $7,%ymm1,%ymm1
+
+ # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm1,%ymm1
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm3,%ymm3
+
+ sub $2,%r8d
+ jnz .Ldoubleround
+
+ # o0 = i0 ^ (x0 + s0)
+ vpaddd %ymm8,%ymm0,%ymm7
+ cmp $0x10,%rcx
+ jl .Lxorpart2
+ vpxord 0x00(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x00(%rsi)
+ vextracti128 $1,%ymm7,%xmm0
+ # o1 = i1 ^ (x1 + s1)
+ vpaddd %ymm9,%ymm1,%ymm7
+ cmp $0x20,%rcx
+ jl .Lxorpart2
+ vpxord 0x10(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x10(%rsi)
+ vextracti128 $1,%ymm7,%xmm1
+ # o2 = i2 ^ (x2 + s2)
+ vpaddd %ymm10,%ymm2,%ymm7
+ cmp $0x30,%rcx
+ jl .Lxorpart2
+ vpxord 0x20(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x20(%rsi)
+ vextracti128 $1,%ymm7,%xmm2
+ # o3 = i3 ^ (x3 + s3)
+ vpaddd %ymm11,%ymm3,%ymm7
+ cmp $0x40,%rcx
+ jl .Lxorpart2
+ vpxord 0x30(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x30(%rsi)
+ vextracti128 $1,%ymm7,%xmm3
+
+ # xor and write second block
+ vmovdqa %xmm0,%xmm7
+ cmp $0x50,%rcx
+ jl .Lxorpart2
+ vpxord 0x40(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x40(%rsi)
+
+ vmovdqa %xmm1,%xmm7
+ cmp $0x60,%rcx
+ jl .Lxorpart2
+ vpxord 0x50(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x50(%rsi)
+
+ vmovdqa %xmm2,%xmm7
+ cmp $0x70,%rcx
+ jl .Lxorpart2
+ vpxord 0x60(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x60(%rsi)
+
+ vmovdqa %xmm3,%xmm7
+ cmp $0x80,%rcx
+ jl .Lxorpart2
+ vpxord 0x70(%rdx),%xmm7,%xmm6
+ vmovdqu %xmm6,0x70(%rsi)
+
+.Ldone2:
+ vzeroupper
+ ret
+
+.Lxorpart2:
+ # xor remaining bytes from partial register into output
+ mov %rcx,%rax
+ and $0xf,%rcx
+ jz .Ldone8
+ mov %rax,%r9
+ and $~0xf,%r9
+
+ mov $1,%rax
+ shld %cl,%rax,%rax
+ sub $1,%rax
+ kmovq %rax,%k1
+
+ vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z}
+ vpxord %xmm7,%xmm1,%xmm1
+ vmovdqu8 %xmm1,(%rsi,%r9){%k1}
+
+ jmp .Ldone2
+
+ENDPROC(chacha_2block_xor_avx512vl)
+
+ENTRY(chacha_4block_xor_avx512vl)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 4 data blocks output, o
+ # %rdx: up to 4 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+
+ # This function encrypts four ChaCha blocks by loading the state
+ # matrix four times across eight AVX registers. It performs matrix
+ # operations on four words in two matrices in parallel, sequentially
+ # to the operations on the four words of the other two matrices. The
+ # required word shuffling has a rather high latency, we can do the
+ # arithmetic on two matrix-pairs without much slowdown.
+
+ vzeroupper
+
+ # x0..3[0-4] = s0..3
+ vbroadcasti128 0x00(%rdi),%ymm0
+ vbroadcasti128 0x10(%rdi),%ymm1
+ vbroadcasti128 0x20(%rdi),%ymm2
+ vbroadcasti128 0x30(%rdi),%ymm3
+
+ vmovdqa %ymm0,%ymm4
+ vmovdqa %ymm1,%ymm5
+ vmovdqa %ymm2,%ymm6
+ vmovdqa %ymm3,%ymm7
+
+ vpaddd CTR2BL(%rip),%ymm3,%ymm3
+ vpaddd CTR4BL(%rip),%ymm7,%ymm7
+
+ vmovdqa %ymm0,%ymm11
+ vmovdqa %ymm1,%ymm12
+ vmovdqa %ymm2,%ymm13
+ vmovdqa %ymm3,%ymm14
+ vmovdqa %ymm7,%ymm15
+
+.Ldoubleround4:
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $16,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxord %ymm4,%ymm7,%ymm7
+ vprold $16,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $12,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxord %ymm6,%ymm5,%ymm5
+ vprold $12,%ymm5,%ymm5
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $8,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxord %ymm4,%ymm7,%ymm7
+ vprold $8,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $7,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxord %ymm6,%ymm5,%ymm5
+ vprold $7,%ymm5,%ymm5
+
+ # x1 = shuffle32(x1, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm1,%ymm1
+ vpshufd $0x39,%ymm5,%ymm5
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ # x3 = shuffle32(x3, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm3,%ymm3
+ vpshufd $0x93,%ymm7,%ymm7
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 16)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $16,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxord %ymm4,%ymm7,%ymm7
+ vprold $16,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 12)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $12,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxord %ymm6,%ymm5,%ymm5
+ vprold $12,%ymm5,%ymm5
+
+ # x0 += x1, x3 = rotl32(x3 ^ x0, 8)
+ vpaddd %ymm1,%ymm0,%ymm0
+ vpxord %ymm0,%ymm3,%ymm3
+ vprold $8,%ymm3,%ymm3
+
+ vpaddd %ymm5,%ymm4,%ymm4
+ vpxord %ymm4,%ymm7,%ymm7
+ vprold $8,%ymm7,%ymm7
+
+ # x2 += x3, x1 = rotl32(x1 ^ x2, 7)
+ vpaddd %ymm3,%ymm2,%ymm2
+ vpxord %ymm2,%ymm1,%ymm1
+ vprold $7,%ymm1,%ymm1
+
+ vpaddd %ymm7,%ymm6,%ymm6
+ vpxord %ymm6,%ymm5,%ymm5
+ vprold $7,%ymm5,%ymm5
+
+ # x1 = shuffle32(x1, MASK(2, 1, 0, 3))
+ vpshufd $0x93,%ymm1,%ymm1
+ vpshufd $0x93,%ymm5,%ymm5
+ # x2 = shuffle32(x2, MASK(1, 0, 3, 2))
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ # x3 = shuffle32(x3, MASK(0, 3, 2, 1))
+ vpshufd $0x39,%ymm3,%ymm3
+ vpshufd $0x39,%ymm7,%ymm7
+
+ sub $2,%r8d
+ jnz .Ldoubleround4
+
+ # o0 = i0 ^ (x0 + s0), first block
+ vpaddd %ymm11,%ymm0,%ymm10
+ cmp $0x10,%rcx
+ jl .Lxorpart4
+ vpxord 0x00(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x00(%rsi)
+ vextracti128 $1,%ymm10,%xmm0
+ # o1 = i1 ^ (x1 + s1), first block
+ vpaddd %ymm12,%ymm1,%ymm10
+ cmp $0x20,%rcx
+ jl .Lxorpart4
+ vpxord 0x10(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x10(%rsi)
+ vextracti128 $1,%ymm10,%xmm1
+ # o2 = i2 ^ (x2 + s2), first block
+ vpaddd %ymm13,%ymm2,%ymm10
+ cmp $0x30,%rcx
+ jl .Lxorpart4
+ vpxord 0x20(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x20(%rsi)
+ vextracti128 $1,%ymm10,%xmm2
+ # o3 = i3 ^ (x3 + s3), first block
+ vpaddd %ymm14,%ymm3,%ymm10
+ cmp $0x40,%rcx
+ jl .Lxorpart4
+ vpxord 0x30(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x30(%rsi)
+ vextracti128 $1,%ymm10,%xmm3
+
+ # xor and write second block
+ vmovdqa %xmm0,%xmm10
+ cmp $0x50,%rcx
+ jl .Lxorpart4
+ vpxord 0x40(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x40(%rsi)
+
+ vmovdqa %xmm1,%xmm10
+ cmp $0x60,%rcx
+ jl .Lxorpart4
+ vpxord 0x50(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x50(%rsi)
+
+ vmovdqa %xmm2,%xmm10
+ cmp $0x70,%rcx
+ jl .Lxorpart4
+ vpxord 0x60(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x60(%rsi)
+
+ vmovdqa %xmm3,%xmm10
+ cmp $0x80,%rcx
+ jl .Lxorpart4
+ vpxord 0x70(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x70(%rsi)
+
+ # o0 = i0 ^ (x0 + s0), third block
+ vpaddd %ymm11,%ymm4,%ymm10
+ cmp $0x90,%rcx
+ jl .Lxorpart4
+ vpxord 0x80(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x80(%rsi)
+ vextracti128 $1,%ymm10,%xmm4
+ # o1 = i1 ^ (x1 + s1), third block
+ vpaddd %ymm12,%ymm5,%ymm10
+ cmp $0xa0,%rcx
+ jl .Lxorpart4
+ vpxord 0x90(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0x90(%rsi)
+ vextracti128 $1,%ymm10,%xmm5
+ # o2 = i2 ^ (x2 + s2), third block
+ vpaddd %ymm13,%ymm6,%ymm10
+ cmp $0xb0,%rcx
+ jl .Lxorpart4
+ vpxord 0xa0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xa0(%rsi)
+ vextracti128 $1,%ymm10,%xmm6
+ # o3 = i3 ^ (x3 + s3), third block
+ vpaddd %ymm15,%ymm7,%ymm10
+ cmp $0xc0,%rcx
+ jl .Lxorpart4
+ vpxord 0xb0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xb0(%rsi)
+ vextracti128 $1,%ymm10,%xmm7
+
+ # xor and write fourth block
+ vmovdqa %xmm4,%xmm10
+ cmp $0xd0,%rcx
+ jl .Lxorpart4
+ vpxord 0xc0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xc0(%rsi)
+
+ vmovdqa %xmm5,%xmm10
+ cmp $0xe0,%rcx
+ jl .Lxorpart4
+ vpxord 0xd0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xd0(%rsi)
+
+ vmovdqa %xmm6,%xmm10
+ cmp $0xf0,%rcx
+ jl .Lxorpart4
+ vpxord 0xe0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xe0(%rsi)
+
+ vmovdqa %xmm7,%xmm10
+ cmp $0x100,%rcx
+ jl .Lxorpart4
+ vpxord 0xf0(%rdx),%xmm10,%xmm9
+ vmovdqu %xmm9,0xf0(%rsi)
+
+.Ldone4:
+ vzeroupper
+ ret
+
+.Lxorpart4:
+ # xor remaining bytes from partial register into output
+ mov %rcx,%rax
+ and $0xf,%rcx
+ jz .Ldone8
+ mov %rax,%r9
+ and $~0xf,%r9
+
+ mov $1,%rax
+ shld %cl,%rax,%rax
+ sub $1,%rax
+ kmovq %rax,%k1
+
+ vmovdqu8 (%rdx,%r9),%xmm1{%k1}{z}
+ vpxord %xmm10,%xmm1,%xmm1
+ vmovdqu8 %xmm1,(%rsi,%r9){%k1}
+
+ jmp .Ldone4
+
+ENDPROC(chacha_4block_xor_avx512vl)
+
+ENTRY(chacha_8block_xor_avx512vl)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 8 data blocks output, o
+ # %rdx: up to 8 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+
+ # This function encrypts eight consecutive ChaCha blocks by loading
+ # the state matrix in AVX registers eight times. Compared to AVX2, this
+ # mostly benefits from the new rotate instructions in VL and the
+ # additional registers.
+
+ vzeroupper
+
+ # x0..15[0-7] = s[0..15]
+ vpbroadcastd 0x00(%rdi),%ymm0
+ vpbroadcastd 0x04(%rdi),%ymm1
+ vpbroadcastd 0x08(%rdi),%ymm2
+ vpbroadcastd 0x0c(%rdi),%ymm3
+ vpbroadcastd 0x10(%rdi),%ymm4
+ vpbroadcastd 0x14(%rdi),%ymm5
+ vpbroadcastd 0x18(%rdi),%ymm6
+ vpbroadcastd 0x1c(%rdi),%ymm7
+ vpbroadcastd 0x20(%rdi),%ymm8
+ vpbroadcastd 0x24(%rdi),%ymm9
+ vpbroadcastd 0x28(%rdi),%ymm10
+ vpbroadcastd 0x2c(%rdi),%ymm11
+ vpbroadcastd 0x30(%rdi),%ymm12
+ vpbroadcastd 0x34(%rdi),%ymm13
+ vpbroadcastd 0x38(%rdi),%ymm14
+ vpbroadcastd 0x3c(%rdi),%ymm15
+
+ # x12 += counter values 0-3
+ vpaddd CTR8BL(%rip),%ymm12,%ymm12
+
+ vmovdqa64 %ymm0,%ymm16
+ vmovdqa64 %ymm1,%ymm17
+ vmovdqa64 %ymm2,%ymm18
+ vmovdqa64 %ymm3,%ymm19
+ vmovdqa64 %ymm4,%ymm20
+ vmovdqa64 %ymm5,%ymm21
+ vmovdqa64 %ymm6,%ymm22
+ vmovdqa64 %ymm7,%ymm23
+ vmovdqa64 %ymm8,%ymm24
+ vmovdqa64 %ymm9,%ymm25
+ vmovdqa64 %ymm10,%ymm26
+ vmovdqa64 %ymm11,%ymm27
+ vmovdqa64 %ymm12,%ymm28
+ vmovdqa64 %ymm13,%ymm29
+ vmovdqa64 %ymm14,%ymm30
+ vmovdqa64 %ymm15,%ymm31
+
+.Ldoubleround8:
+ # x0 += x4, x12 = rotl32(x12 ^ x0, 16)
+ vpaddd %ymm0,%ymm4,%ymm0
+ vpxord %ymm0,%ymm12,%ymm12
+ vprold $16,%ymm12,%ymm12
+ # x1 += x5, x13 = rotl32(x13 ^ x1, 16)
+ vpaddd %ymm1,%ymm5,%ymm1
+ vpxord %ymm1,%ymm13,%ymm13
+ vprold $16,%ymm13,%ymm13
+ # x2 += x6, x14 = rotl32(x14 ^ x2, 16)
+ vpaddd %ymm2,%ymm6,%ymm2
+ vpxord %ymm2,%ymm14,%ymm14
+ vprold $16,%ymm14,%ymm14
+ # x3 += x7, x15 = rotl32(x15 ^ x3, 16)
+ vpaddd %ymm3,%ymm7,%ymm3
+ vpxord %ymm3,%ymm15,%ymm15
+ vprold $16,%ymm15,%ymm15
+
+ # x8 += x12, x4 = rotl32(x4 ^ x8, 12)
+ vpaddd %ymm12,%ymm8,%ymm8
+ vpxord %ymm8,%ymm4,%ymm4
+ vprold $12,%ymm4,%ymm4
+ # x9 += x13, x5 = rotl32(x5 ^ x9, 12)
+ vpaddd %ymm13,%ymm9,%ymm9
+ vpxord %ymm9,%ymm5,%ymm5
+ vprold $12,%ymm5,%ymm5
+ # x10 += x14, x6 = rotl32(x6 ^ x10, 12)
+ vpaddd %ymm14,%ymm10,%ymm10
+ vpxord %ymm10,%ymm6,%ymm6
+ vprold $12,%ymm6,%ymm6
+ # x11 += x15, x7 = rotl32(x7 ^ x11, 12)
+ vpaddd %ymm15,%ymm11,%ymm11
+ vpxord %ymm11,%ymm7,%ymm7
+ vprold $12,%ymm7,%ymm7
+
+ # x0 += x4, x12 = rotl32(x12 ^ x0, 8)
+ vpaddd %ymm0,%ymm4,%ymm0
+ vpxord %ymm0,%ymm12,%ymm12
+ vprold $8,%ymm12,%ymm12
+ # x1 += x5, x13 = rotl32(x13 ^ x1, 8)
+ vpaddd %ymm1,%ymm5,%ymm1
+ vpxord %ymm1,%ymm13,%ymm13
+ vprold $8,%ymm13,%ymm13
+ # x2 += x6, x14 = rotl32(x14 ^ x2, 8)
+ vpaddd %ymm2,%ymm6,%ymm2
+ vpxord %ymm2,%ymm14,%ymm14
+ vprold $8,%ymm14,%ymm14
+ # x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+ vpaddd %ymm3,%ymm7,%ymm3
+ vpxord %ymm3,%ymm15,%ymm15
+ vprold $8,%ymm15,%ymm15
+
+ # x8 += x12, x4 = rotl32(x4 ^ x8, 7)
+ vpaddd %ymm12,%ymm8,%ymm8
+ vpxord %ymm8,%ymm4,%ymm4
+ vprold $7,%ymm4,%ymm4
+ # x9 += x13, x5 = rotl32(x5 ^ x9, 7)
+ vpaddd %ymm13,%ymm9,%ymm9
+ vpxord %ymm9,%ymm5,%ymm5
+ vprold $7,%ymm5,%ymm5
+ # x10 += x14, x6 = rotl32(x6 ^ x10, 7)
+ vpaddd %ymm14,%ymm10,%ymm10
+ vpxord %ymm10,%ymm6,%ymm6
+ vprold $7,%ymm6,%ymm6
+ # x11 += x15, x7 = rotl32(x7 ^ x11, 7)
+ vpaddd %ymm15,%ymm11,%ymm11
+ vpxord %ymm11,%ymm7,%ymm7
+ vprold $7,%ymm7,%ymm7
+
+ # x0 += x5, x15 = rotl32(x15 ^ x0, 16)
+ vpaddd %ymm0,%ymm5,%ymm0
+ vpxord %ymm0,%ymm15,%ymm15
+ vprold $16,%ymm15,%ymm15
+ # x1 += x6, x12 = rotl32(x12 ^ x1, 16)
+ vpaddd %ymm1,%ymm6,%ymm1
+ vpxord %ymm1,%ymm12,%ymm12
+ vprold $16,%ymm12,%ymm12
+ # x2 += x7, x13 = rotl32(x13 ^ x2, 16)
+ vpaddd %ymm2,%ymm7,%ymm2
+ vpxord %ymm2,%ymm13,%ymm13
+ vprold $16,%ymm13,%ymm13
+ # x3 += x4, x14 = rotl32(x14 ^ x3, 16)
+ vpaddd %ymm3,%ymm4,%ymm3
+ vpxord %ymm3,%ymm14,%ymm14
+ vprold $16,%ymm14,%ymm14
+
+ # x10 += x15, x5 = rotl32(x5 ^ x10, 12)
+ vpaddd %ymm15,%ymm10,%ymm10
+ vpxord %ymm10,%ymm5,%ymm5
+ vprold $12,%ymm5,%ymm5
+ # x11 += x12, x6 = rotl32(x6 ^ x11, 12)
+ vpaddd %ymm12,%ymm11,%ymm11
+ vpxord %ymm11,%ymm6,%ymm6
+ vprold $12,%ymm6,%ymm6
+ # x8 += x13, x7 = rotl32(x7 ^ x8, 12)
+ vpaddd %ymm13,%ymm8,%ymm8
+ vpxord %ymm8,%ymm7,%ymm7
+ vprold $12,%ymm7,%ymm7
+ # x9 += x14, x4 = rotl32(x4 ^ x9, 12)
+ vpaddd %ymm14,%ymm9,%ymm9
+ vpxord %ymm9,%ymm4,%ymm4
+ vprold $12,%ymm4,%ymm4
+
+ # x0 += x5, x15 = rotl32(x15 ^ x0, 8)
+ vpaddd %ymm0,%ymm5,%ymm0
+ vpxord %ymm0,%ymm15,%ymm15
+ vprold $8,%ymm15,%ymm15
+ # x1 += x6, x12 = rotl32(x12 ^ x1, 8)
+ vpaddd %ymm1,%ymm6,%ymm1
+ vpxord %ymm1,%ymm12,%ymm12
+ vprold $8,%ymm12,%ymm12
+ # x2 += x7, x13 = rotl32(x13 ^ x2, 8)
+ vpaddd %ymm2,%ymm7,%ymm2
+ vpxord %ymm2,%ymm13,%ymm13
+ vprold $8,%ymm13,%ymm13
+ # x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+ vpaddd %ymm3,%ymm4,%ymm3
+ vpxord %ymm3,%ymm14,%ymm14
+ vprold $8,%ymm14,%ymm14
+
+ # x10 += x15, x5 = rotl32(x5 ^ x10, 7)
+ vpaddd %ymm15,%ymm10,%ymm10
+ vpxord %ymm10,%ymm5,%ymm5
+ vprold $7,%ymm5,%ymm5
+ # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
+ vpaddd %ymm12,%ymm11,%ymm11
+ vpxord %ymm11,%ymm6,%ymm6
+ vprold $7,%ymm6,%ymm6
+ # x8 += x13, x7 = rotl32(x7 ^ x8, 7)
+ vpaddd %ymm13,%ymm8,%ymm8
+ vpxord %ymm8,%ymm7,%ymm7
+ vprold $7,%ymm7,%ymm7
+ # x9 += x14, x4 = rotl32(x4 ^ x9, 7)
+ vpaddd %ymm14,%ymm9,%ymm9
+ vpxord %ymm9,%ymm4,%ymm4
+ vprold $7,%ymm4,%ymm4
+
+ sub $2,%r8d
+ jnz .Ldoubleround8
+
+ # x0..15[0-3] += s[0..15]
+ vpaddd %ymm16,%ymm0,%ymm0
+ vpaddd %ymm17,%ymm1,%ymm1
+ vpaddd %ymm18,%ymm2,%ymm2
+ vpaddd %ymm19,%ymm3,%ymm3
+ vpaddd %ymm20,%ymm4,%ymm4
+ vpaddd %ymm21,%ymm5,%ymm5
+ vpaddd %ymm22,%ymm6,%ymm6
+ vpaddd %ymm23,%ymm7,%ymm7
+ vpaddd %ymm24,%ymm8,%ymm8
+ vpaddd %ymm25,%ymm9,%ymm9
+ vpaddd %ymm26,%ymm10,%ymm10
+ vpaddd %ymm27,%ymm11,%ymm11
+ vpaddd %ymm28,%ymm12,%ymm12
+ vpaddd %ymm29,%ymm13,%ymm13
+ vpaddd %ymm30,%ymm14,%ymm14
+ vpaddd %ymm31,%ymm15,%ymm15
+
+ # interleave 32-bit words in state n, n+1
+ vpunpckldq %ymm1,%ymm0,%ymm16
+ vpunpckhdq %ymm1,%ymm0,%ymm17
+ vpunpckldq %ymm3,%ymm2,%ymm18
+ vpunpckhdq %ymm3,%ymm2,%ymm19
+ vpunpckldq %ymm5,%ymm4,%ymm20
+ vpunpckhdq %ymm5,%ymm4,%ymm21
+ vpunpckldq %ymm7,%ymm6,%ymm22
+ vpunpckhdq %ymm7,%ymm6,%ymm23
+ vpunpckldq %ymm9,%ymm8,%ymm24
+ vpunpckhdq %ymm9,%ymm8,%ymm25
+ vpunpckldq %ymm11,%ymm10,%ymm26
+ vpunpckhdq %ymm11,%ymm10,%ymm27
+ vpunpckldq %ymm13,%ymm12,%ymm28
+ vpunpckhdq %ymm13,%ymm12,%ymm29
+ vpunpckldq %ymm15,%ymm14,%ymm30
+ vpunpckhdq %ymm15,%ymm14,%ymm31
+
+ # interleave 64-bit words in state n, n+2
+ vpunpcklqdq %ymm18,%ymm16,%ymm0
+ vpunpcklqdq %ymm19,%ymm17,%ymm1
+ vpunpckhqdq %ymm18,%ymm16,%ymm2
+ vpunpckhqdq %ymm19,%ymm17,%ymm3
+ vpunpcklqdq %ymm22,%ymm20,%ymm4
+ vpunpcklqdq %ymm23,%ymm21,%ymm5
+ vpunpckhqdq %ymm22,%ymm20,%ymm6
+ vpunpckhqdq %ymm23,%ymm21,%ymm7
+ vpunpcklqdq %ymm26,%ymm24,%ymm8
+ vpunpcklqdq %ymm27,%ymm25,%ymm9
+ vpunpckhqdq %ymm26,%ymm24,%ymm10
+ vpunpckhqdq %ymm27,%ymm25,%ymm11
+ vpunpcklqdq %ymm30,%ymm28,%ymm12
+ vpunpcklqdq %ymm31,%ymm29,%ymm13
+ vpunpckhqdq %ymm30,%ymm28,%ymm14
+ vpunpckhqdq %ymm31,%ymm29,%ymm15
+
+ # interleave 128-bit words in state n, n+4
+ # xor/write first four blocks
+ vmovdqa64 %ymm0,%ymm16
+ vperm2i128 $0x20,%ymm4,%ymm0,%ymm0
+ cmp $0x0020,%rcx
+ jl .Lxorpart8
+ vpxord 0x0000(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0000(%rsi)
+ vmovdqa64 %ymm16,%ymm0
+ vperm2i128 $0x31,%ymm4,%ymm0,%ymm4
+
+ vperm2i128 $0x20,%ymm12,%ymm8,%ymm0
+ cmp $0x0040,%rcx
+ jl .Lxorpart8
+ vpxord 0x0020(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0020(%rsi)
+ vperm2i128 $0x31,%ymm12,%ymm8,%ymm12
+
+ vperm2i128 $0x20,%ymm6,%ymm2,%ymm0
+ cmp $0x0060,%rcx
+ jl .Lxorpart8
+ vpxord 0x0040(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0040(%rsi)
+ vperm2i128 $0x31,%ymm6,%ymm2,%ymm6
+
+ vperm2i128 $0x20,%ymm14,%ymm10,%ymm0
+ cmp $0x0080,%rcx
+ jl .Lxorpart8
+ vpxord 0x0060(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0060(%rsi)
+ vperm2i128 $0x31,%ymm14,%ymm10,%ymm14
+
+ vperm2i128 $0x20,%ymm5,%ymm1,%ymm0
+ cmp $0x00a0,%rcx
+ jl .Lxorpart8
+ vpxord 0x0080(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0080(%rsi)
+ vperm2i128 $0x31,%ymm5,%ymm1,%ymm5
+
+ vperm2i128 $0x20,%ymm13,%ymm9,%ymm0
+ cmp $0x00c0,%rcx
+ jl .Lxorpart8
+ vpxord 0x00a0(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x00a0(%rsi)
+ vperm2i128 $0x31,%ymm13,%ymm9,%ymm13
+
+ vperm2i128 $0x20,%ymm7,%ymm3,%ymm0
+ cmp $0x00e0,%rcx
+ jl .Lxorpart8
+ vpxord 0x00c0(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x00c0(%rsi)
+ vperm2i128 $0x31,%ymm7,%ymm3,%ymm7
+
+ vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
+ cmp $0x0100,%rcx
+ jl .Lxorpart8
+ vpxord 0x00e0(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x00e0(%rsi)
+ vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
+
+ # xor remaining blocks, write to output
+ vmovdqa64 %ymm4,%ymm0
+ cmp $0x0120,%rcx
+ jl .Lxorpart8
+ vpxord 0x0100(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0100(%rsi)
+
+ vmovdqa64 %ymm12,%ymm0
+ cmp $0x0140,%rcx
+ jl .Lxorpart8
+ vpxord 0x0120(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0120(%rsi)
+
+ vmovdqa64 %ymm6,%ymm0
+ cmp $0x0160,%rcx
+ jl .Lxorpart8
+ vpxord 0x0140(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0140(%rsi)
+
+ vmovdqa64 %ymm14,%ymm0
+ cmp $0x0180,%rcx
+ jl .Lxorpart8
+ vpxord 0x0160(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0160(%rsi)
+
+ vmovdqa64 %ymm5,%ymm0
+ cmp $0x01a0,%rcx
+ jl .Lxorpart8
+ vpxord 0x0180(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x0180(%rsi)
+
+ vmovdqa64 %ymm13,%ymm0
+ cmp $0x01c0,%rcx
+ jl .Lxorpart8
+ vpxord 0x01a0(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x01a0(%rsi)
+
+ vmovdqa64 %ymm7,%ymm0
+ cmp $0x01e0,%rcx
+ jl .Lxorpart8
+ vpxord 0x01c0(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x01c0(%rsi)
+
+ vmovdqa64 %ymm15,%ymm0
+ cmp $0x0200,%rcx
+ jl .Lxorpart8
+ vpxord 0x01e0(%rdx),%ymm0,%ymm0
+ vmovdqu64 %ymm0,0x01e0(%rsi)
+
+.Ldone8:
+ vzeroupper
+ ret
+
+.Lxorpart8:
+ # xor remaining bytes from partial register into output
+ mov %rcx,%rax
+ and $0x1f,%rcx
+ jz .Ldone8
+ mov %rax,%r9
+ and $~0x1f,%r9
+
+ mov $1,%rax
+ shld %cl,%rax,%rax
+ sub $1,%rax
+ kmovq %rax,%k1
+
+ vmovdqu8 (%rdx,%r9),%ymm1{%k1}{z}
+ vpxord %ymm0,%ymm1,%ymm1
+ vmovdqu8 %ymm1,(%rsi,%r9){%k1}
+
+ jmp .Ldone8
+
+ENDPROC(chacha_8block_xor_avx512vl)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S
index 512a2b500fd1..c05a7a963dc3 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
@@ -1,5 +1,5 @@
/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, x64 SSSE3 functions
+ * ChaCha 256-bit cipher algorithm, x64 SSSE3 functions
*
* Copyright (C) 2015 Martin Willi
*
@@ -10,6 +10,7 @@
*/
#include <linux/linkage.h>
+#include <asm/frame.h>
.section .rodata.cst16.ROT8, "aM", @progbits, 16
.align 16
@@ -23,35 +24,25 @@ CTRINC: .octa 0x00000003000000020000000100000000
.text
-ENTRY(chacha20_block_xor_ssse3)
- # %rdi: Input state matrix, s
- # %rsi: 1 data block output, o
- # %rdx: 1 data block input, i
-
- # This function encrypts one ChaCha20 block by loading the state matrix
- # in four SSE registers. It performs matrix operation on four words in
- # parallel, but requireds shuffling to rearrange the words after each
- # round. 8/16-bit word rotation is done with the slightly better
- # performing SSSE3 byte shuffling, 7/12-bit word rotation uses
- # traditional shift+OR.
-
- # x0..3 = s0..3
- movdqa 0x00(%rdi),%xmm0
- movdqa 0x10(%rdi),%xmm1
- movdqa 0x20(%rdi),%xmm2
- movdqa 0x30(%rdi),%xmm3
- movdqa %xmm0,%xmm8
- movdqa %xmm1,%xmm9
- movdqa %xmm2,%xmm10
- movdqa %xmm3,%xmm11
+/*
+ * chacha_permute - permute one block
+ *
+ * Permute one 64-byte block where the state matrix is in %xmm0-%xmm3. This
+ * function performs matrix operations on four words in parallel, but requires
+ * shuffling to rearrange the words after each round. 8/16-bit word rotation is
+ * done with the slightly better performing SSSE3 byte shuffling, 7/12-bit word
+ * rotation uses traditional shift+OR.
+ *
+ * The round count is given in %r8d.
+ *
+ * Clobbers: %r8d, %xmm4-%xmm7
+ */
+chacha_permute:
movdqa ROT8(%rip),%xmm4
movdqa ROT16(%rip),%xmm5
- mov $10,%ecx
-
.Ldoubleround:
-
# x0 += x1, x3 = rotl32(x3 ^ x0, 16)
paddd %xmm1,%xmm0
pxor %xmm0,%xmm3
@@ -118,39 +109,129 @@ ENTRY(chacha20_block_xor_ssse3)
# x3 = shuffle32(x3, MASK(0, 3, 2, 1))
pshufd $0x39,%xmm3,%xmm3
- dec %ecx
+ sub $2,%r8d
jnz .Ldoubleround
+ ret
+ENDPROC(chacha_permute)
+
+ENTRY(chacha_block_xor_ssse3)
+ # %rdi: Input state matrix, s
+ # %rsi: up to 1 data block output, o
+ # %rdx: up to 1 data block input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
+ FRAME_BEGIN
+
+ # x0..3 = s0..3
+ movdqa 0x00(%rdi),%xmm0
+ movdqa 0x10(%rdi),%xmm1
+ movdqa 0x20(%rdi),%xmm2
+ movdqa 0x30(%rdi),%xmm3
+ movdqa %xmm0,%xmm8
+ movdqa %xmm1,%xmm9
+ movdqa %xmm2,%xmm10
+ movdqa %xmm3,%xmm11
+
+ mov %rcx,%rax
+ call chacha_permute
+
# o0 = i0 ^ (x0 + s0)
- movdqu 0x00(%rdx),%xmm4
paddd %xmm8,%xmm0
+ cmp $0x10,%rax
+ jl .Lxorpart
+ movdqu 0x00(%rdx),%xmm4
pxor %xmm4,%xmm0
movdqu %xmm0,0x00(%rsi)
# o1 = i1 ^ (x1 + s1)
- movdqu 0x10(%rdx),%xmm5
paddd %xmm9,%xmm1
- pxor %xmm5,%xmm1
- movdqu %xmm1,0x10(%rsi)
+ movdqa %xmm1,%xmm0
+ cmp $0x20,%rax
+ jl .Lxorpart
+ movdqu 0x10(%rdx),%xmm0
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x10(%rsi)
# o2 = i2 ^ (x2 + s2)
- movdqu 0x20(%rdx),%xmm6
paddd %xmm10,%xmm2
- pxor %xmm6,%xmm2
- movdqu %xmm2,0x20(%rsi)
+ movdqa %xmm2,%xmm0
+ cmp $0x30,%rax
+ jl .Lxorpart
+ movdqu 0x20(%rdx),%xmm0
+ pxor %xmm2,%xmm0
+ movdqu %xmm0,0x20(%rsi)
# o3 = i3 ^ (x3 + s3)
- movdqu 0x30(%rdx),%xmm7
paddd %xmm11,%xmm3
- pxor %xmm7,%xmm3
- movdqu %xmm3,0x30(%rsi)
+ movdqa %xmm3,%xmm0
+ cmp $0x40,%rax
+ jl .Lxorpart
+ movdqu 0x30(%rdx),%xmm0
+ pxor %xmm3,%xmm0
+ movdqu %xmm0,0x30(%rsi)
+
+.Ldone:
+ FRAME_END
+ ret
+
+.Lxorpart:
+ # xor remaining bytes from partial register into output
+ mov %rax,%r9
+ and $0x0f,%r9
+ jz .Ldone
+ and $~0x0f,%rax
+
+ mov %rsi,%r11
+
+ lea 8(%rsp),%r10
+ sub $0x10,%rsp
+ and $~31,%rsp
+
+ lea (%rdx,%rax),%rsi
+ mov %rsp,%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ pxor 0x00(%rsp),%xmm0
+ movdqa %xmm0,0x00(%rsp)
+ mov %rsp,%rsi
+ lea (%r11,%rax),%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ lea -8(%r10),%rsp
+ jmp .Ldone
+
+ENDPROC(chacha_block_xor_ssse3)
+
+ENTRY(hchacha_block_ssse3)
+ # %rdi: Input state matrix, s
+ # %rsi: output (8 32-bit words)
+ # %edx: nrounds
+ FRAME_BEGIN
+
+ movdqa 0x00(%rdi),%xmm0
+ movdqa 0x10(%rdi),%xmm1
+ movdqa 0x20(%rdi),%xmm2
+ movdqa 0x30(%rdi),%xmm3
+
+ mov %edx,%r8d
+ call chacha_permute
+
+ movdqu %xmm0,0x00(%rsi)
+ movdqu %xmm3,0x10(%rsi)
+
+ FRAME_END
ret
-ENDPROC(chacha20_block_xor_ssse3)
+ENDPROC(hchacha_block_ssse3)
-ENTRY(chacha20_4block_xor_ssse3)
+ENTRY(chacha_4block_xor_ssse3)
# %rdi: Input state matrix, s
- # %rsi: 4 data blocks output, o
- # %rdx: 4 data blocks input, i
+ # %rsi: up to 4 data blocks output, o
+ # %rdx: up to 4 data blocks input, i
+ # %rcx: input/output length in bytes
+ # %r8d: nrounds
- # This function encrypts four consecutive ChaCha20 blocks by loading the
+ # This function encrypts four consecutive ChaCha blocks by loading the
# the state matrix in SSE registers four times. As we need some scratch
# registers, we save the first four registers on the stack. The
# algorithm performs each operation on the corresponding word of each
@@ -163,6 +244,7 @@ ENTRY(chacha20_4block_xor_ssse3)
lea 8(%rsp),%r10
sub $0x80,%rsp
and $~63,%rsp
+ mov %rcx,%rax
# x0..15[0-3] = s0..3[0..3]
movq 0x00(%rdi),%xmm1
@@ -202,8 +284,6 @@ ENTRY(chacha20_4block_xor_ssse3)
# x12 += counter values 0-3
paddd %xmm1,%xmm12
- mov $10,%ecx
-
.Ldoubleround4:
# x0 += x4, x12 = rotl32(x12 ^ x0, 16)
movdqa 0x00(%rsp),%xmm0
@@ -421,7 +501,7 @@ ENTRY(chacha20_4block_xor_ssse3)
psrld $25,%xmm4
por %xmm0,%xmm4
- dec %ecx
+ sub $2,%r8d
jnz .Ldoubleround4
# x0[0-3] += s0[0]
@@ -573,58 +653,143 @@ ENTRY(chacha20_4block_xor_ssse3)
# xor with corresponding input, write to output
movdqa 0x00(%rsp),%xmm0
+ cmp $0x10,%rax
+ jl .Lxorpart4
movdqu 0x00(%rdx),%xmm1
pxor %xmm1,%xmm0
movdqu %xmm0,0x00(%rsi)
- movdqa 0x10(%rsp),%xmm0
- movdqu 0x80(%rdx),%xmm1
+
+ movdqu %xmm4,%xmm0
+ cmp $0x20,%rax
+ jl .Lxorpart4
+ movdqu 0x10(%rdx),%xmm1
pxor %xmm1,%xmm0
- movdqu %xmm0,0x80(%rsi)
+ movdqu %xmm0,0x10(%rsi)
+
+ movdqu %xmm8,%xmm0
+ cmp $0x30,%rax
+ jl .Lxorpart4
+ movdqu 0x20(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x20(%rsi)
+
+ movdqu %xmm12,%xmm0
+ cmp $0x40,%rax
+ jl .Lxorpart4
+ movdqu 0x30(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x30(%rsi)
+
movdqa 0x20(%rsp),%xmm0
+ cmp $0x50,%rax
+ jl .Lxorpart4
movdqu 0x40(%rdx),%xmm1
pxor %xmm1,%xmm0
movdqu %xmm0,0x40(%rsi)
+
+ movdqu %xmm6,%xmm0
+ cmp $0x60,%rax
+ jl .Lxorpart4
+ movdqu 0x50(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x50(%rsi)
+
+ movdqu %xmm10,%xmm0
+ cmp $0x70,%rax
+ jl .Lxorpart4
+ movdqu 0x60(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x60(%rsi)
+
+ movdqu %xmm14,%xmm0
+ cmp $0x80,%rax
+ jl .Lxorpart4
+ movdqu 0x70(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x70(%rsi)
+
+ movdqa 0x10(%rsp),%xmm0
+ cmp $0x90,%rax
+ jl .Lxorpart4
+ movdqu 0x80(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x80(%rsi)
+
+ movdqu %xmm5,%xmm0
+ cmp $0xa0,%rax
+ jl .Lxorpart4
+ movdqu 0x90(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0x90(%rsi)
+
+ movdqu %xmm9,%xmm0
+ cmp $0xb0,%rax
+ jl .Lxorpart4
+ movdqu 0xa0(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0xa0(%rsi)
+
+ movdqu %xmm13,%xmm0
+ cmp $0xc0,%rax
+ jl .Lxorpart4
+ movdqu 0xb0(%rdx),%xmm1
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0xb0(%rsi)
+
movdqa 0x30(%rsp),%xmm0
+ cmp $0xd0,%rax
+ jl .Lxorpart4
movdqu 0xc0(%rdx),%xmm1
pxor %xmm1,%xmm0
movdqu %xmm0,0xc0(%rsi)
- movdqu 0x10(%rdx),%xmm1
- pxor %xmm1,%xmm4
- movdqu %xmm4,0x10(%rsi)
- movdqu 0x90(%rdx),%xmm1
- pxor %xmm1,%xmm5
- movdqu %xmm5,0x90(%rsi)
- movdqu 0x50(%rdx),%xmm1
- pxor %xmm1,%xmm6
- movdqu %xmm6,0x50(%rsi)
+
+ movdqu %xmm7,%xmm0
+ cmp $0xe0,%rax
+ jl .Lxorpart4
movdqu 0xd0(%rdx),%xmm1
- pxor %xmm1,%xmm7
- movdqu %xmm7,0xd0(%rsi)
- movdqu 0x20(%rdx),%xmm1
- pxor %xmm1,%xmm8
- movdqu %xmm8,0x20(%rsi)
- movdqu 0xa0(%rdx),%xmm1
- pxor %xmm1,%xmm9
- movdqu %xmm9,0xa0(%rsi)
- movdqu 0x60(%rdx),%xmm1
- pxor %xmm1,%xmm10
- movdqu %xmm10,0x60(%rsi)
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0xd0(%rsi)
+
+ movdqu %xmm11,%xmm0
+ cmp $0xf0,%rax
+ jl .Lxorpart4
movdqu 0xe0(%rdx),%xmm1
- pxor %xmm1,%xmm11
- movdqu %xmm11,0xe0(%rsi)
- movdqu 0x30(%rdx),%xmm1
- pxor %xmm1,%xmm12
- movdqu %xmm12,0x30(%rsi)
- movdqu 0xb0(%rdx),%xmm1
- pxor %xmm1,%xmm13
- movdqu %xmm13,0xb0(%rsi)
- movdqu 0x70(%rdx),%xmm1
- pxor %xmm1,%xmm14
- movdqu %xmm14,0x70(%rsi)
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0xe0(%rsi)
+
+ movdqu %xmm15,%xmm0
+ cmp $0x100,%rax
+ jl .Lxorpart4
movdqu 0xf0(%rdx),%xmm1
- pxor %xmm1,%xmm15
- movdqu %xmm15,0xf0(%rsi)
+ pxor %xmm1,%xmm0
+ movdqu %xmm0,0xf0(%rsi)
+.Ldone4:
lea -8(%r10),%rsp
ret
-ENDPROC(chacha20_4block_xor_ssse3)
+
+.Lxorpart4:
+ # xor remaining bytes from partial register into output
+ mov %rax,%r9
+ and $0x0f,%r9
+ jz .Ldone4
+ and $~0x0f,%rax
+
+ mov %rsi,%r11
+
+ lea (%rdx,%rax),%rsi
+ mov %rsp,%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ pxor 0x00(%rsp),%xmm0
+ movdqa %xmm0,0x00(%rsp)
+
+ mov %rsp,%rsi
+ lea (%r11,%rax),%rdi
+ mov %r9,%rcx
+ rep movsb
+
+ jmp .Ldone4
+
+ENDPROC(chacha_4block_xor_ssse3)
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
deleted file mode 100644
index f3cd26f48332..000000000000
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, x64 AVX2 functions
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/linkage.h>
-
-.section .rodata.cst32.ROT8, "aM", @progbits, 32
-.align 32
-ROT8: .octa 0x0e0d0c0f0a09080b0605040702010003
- .octa 0x0e0d0c0f0a09080b0605040702010003
-
-.section .rodata.cst32.ROT16, "aM", @progbits, 32
-.align 32
-ROT16: .octa 0x0d0c0f0e09080b0a0504070601000302
- .octa 0x0d0c0f0e09080b0a0504070601000302
-
-.section .rodata.cst32.CTRINC, "aM", @progbits, 32
-.align 32
-CTRINC: .octa 0x00000003000000020000000100000000
- .octa 0x00000007000000060000000500000004
-
-.text
-
-ENTRY(chacha20_8block_xor_avx2)
- # %rdi: Input state matrix, s
- # %rsi: 8 data blocks output, o
- # %rdx: 8 data blocks input, i
-
- # This function encrypts eight consecutive ChaCha20 blocks by loading
- # the state matrix in AVX registers eight times. As we need some
- # scratch registers, we save the first four registers on the stack. The
- # algorithm performs each operation on the corresponding word of each
- # state matrix, hence requires no word shuffling. For final XORing step
- # we transpose the matrix by interleaving 32-, 64- and then 128-bit
- # words, which allows us to do XOR in AVX registers. 8/16-bit word
- # rotation is done with the slightly better performing byte shuffling,
- # 7/12-bit word rotation uses traditional shift+OR.
-
- vzeroupper
- # 4 * 32 byte stack, 32-byte aligned
- lea 8(%rsp),%r10
- and $~31, %rsp
- sub $0x80, %rsp
-
- # x0..15[0-7] = s[0..15]
- vpbroadcastd 0x00(%rdi),%ymm0
- vpbroadcastd 0x04(%rdi),%ymm1
- vpbroadcastd 0x08(%rdi),%ymm2
- vpbroadcastd 0x0c(%rdi),%ymm3
- vpbroadcastd 0x10(%rdi),%ymm4
- vpbroadcastd 0x14(%rdi),%ymm5
- vpbroadcastd 0x18(%rdi),%ymm6
- vpbroadcastd 0x1c(%rdi),%ymm7
- vpbroadcastd 0x20(%rdi),%ymm8
- vpbroadcastd 0x24(%rdi),%ymm9
- vpbroadcastd 0x28(%rdi),%ymm10
- vpbroadcastd 0x2c(%rdi),%ymm11
- vpbroadcastd 0x30(%rdi),%ymm12
- vpbroadcastd 0x34(%rdi),%ymm13
- vpbroadcastd 0x38(%rdi),%ymm14
- vpbroadcastd 0x3c(%rdi),%ymm15
- # x0..3 on stack
- vmovdqa %ymm0,0x00(%rsp)
- vmovdqa %ymm1,0x20(%rsp)
- vmovdqa %ymm2,0x40(%rsp)
- vmovdqa %ymm3,0x60(%rsp)
-
- vmovdqa CTRINC(%rip),%ymm1
- vmovdqa ROT8(%rip),%ymm2
- vmovdqa ROT16(%rip),%ymm3
-
- # x12 += counter values 0-3
- vpaddd %ymm1,%ymm12,%ymm12
-
- mov $10,%ecx
-
-.Ldoubleround8:
- # x0 += x4, x12 = rotl32(x12 ^ x0, 16)
- vpaddd 0x00(%rsp),%ymm4,%ymm0
- vmovdqa %ymm0,0x00(%rsp)
- vpxor %ymm0,%ymm12,%ymm12
- vpshufb %ymm3,%ymm12,%ymm12
- # x1 += x5, x13 = rotl32(x13 ^ x1, 16)
- vpaddd 0x20(%rsp),%ymm5,%ymm0
- vmovdqa %ymm0,0x20(%rsp)
- vpxor %ymm0,%ymm13,%ymm13
- vpshufb %ymm3,%ymm13,%ymm13
- # x2 += x6, x14 = rotl32(x14 ^ x2, 16)
- vpaddd 0x40(%rsp),%ymm6,%ymm0
- vmovdqa %ymm0,0x40(%rsp)
- vpxor %ymm0,%ymm14,%ymm14
- vpshufb %ymm3,%ymm14,%ymm14
- # x3 += x7, x15 = rotl32(x15 ^ x3, 16)
- vpaddd 0x60(%rsp),%ymm7,%ymm0
- vmovdqa %ymm0,0x60(%rsp)
- vpxor %ymm0,%ymm15,%ymm15
- vpshufb %ymm3,%ymm15,%ymm15
-
- # x8 += x12, x4 = rotl32(x4 ^ x8, 12)
- vpaddd %ymm12,%ymm8,%ymm8
- vpxor %ymm8,%ymm4,%ymm4
- vpslld $12,%ymm4,%ymm0
- vpsrld $20,%ymm4,%ymm4
- vpor %ymm0,%ymm4,%ymm4
- # x9 += x13, x5 = rotl32(x5 ^ x9, 12)
- vpaddd %ymm13,%ymm9,%ymm9
- vpxor %ymm9,%ymm5,%ymm5
- vpslld $12,%ymm5,%ymm0
- vpsrld $20,%ymm5,%ymm5
- vpor %ymm0,%ymm5,%ymm5
- # x10 += x14, x6 = rotl32(x6 ^ x10, 12)
- vpaddd %ymm14,%ymm10,%ymm10
- vpxor %ymm10,%ymm6,%ymm6
- vpslld $12,%ymm6,%ymm0
- vpsrld $20,%ymm6,%ymm6
- vpor %ymm0,%ymm6,%ymm6
- # x11 += x15, x7 = rotl32(x7 ^ x11, 12)
- vpaddd %ymm15,%ymm11,%ymm11
- vpxor %ymm11,%ymm7,%ymm7
- vpslld $12,%ymm7,%ymm0
- vpsrld $20,%ymm7,%ymm7
- vpor %ymm0,%ymm7,%ymm7
-
- # x0 += x4, x12 = rotl32(x12 ^ x0, 8)
- vpaddd 0x00(%rsp),%ymm4,%ymm0
- vmovdqa %ymm0,0x00(%rsp)
- vpxor %ymm0,%ymm12,%ymm12
- vpshufb %ymm2,%ymm12,%ymm12
- # x1 += x5, x13 = rotl32(x13 ^ x1, 8)
- vpaddd 0x20(%rsp),%ymm5,%ymm0
- vmovdqa %ymm0,0x20(%rsp)
- vpxor %ymm0,%ymm13,%ymm13
- vpshufb %ymm2,%ymm13,%ymm13
- # x2 += x6, x14 = rotl32(x14 ^ x2, 8)
- vpaddd 0x40(%rsp),%ymm6,%ymm0
- vmovdqa %ymm0,0x40(%rsp)
- vpxor %ymm0,%ymm14,%ymm14
- vpshufb %ymm2,%ymm14,%ymm14
- # x3 += x7, x15 = rotl32(x15 ^ x3, 8)
- vpaddd 0x60(%rsp),%ymm7,%ymm0
- vmovdqa %ymm0,0x60(%rsp)
- vpxor %ymm0,%ymm15,%ymm15
- vpshufb %ymm2,%ymm15,%ymm15
-
- # x8 += x12, x4 = rotl32(x4 ^ x8, 7)
- vpaddd %ymm12,%ymm8,%ymm8
- vpxor %ymm8,%ymm4,%ymm4
- vpslld $7,%ymm4,%ymm0
- vpsrld $25,%ymm4,%ymm4
- vpor %ymm0,%ymm4,%ymm4
- # x9 += x13, x5 = rotl32(x5 ^ x9, 7)
- vpaddd %ymm13,%ymm9,%ymm9
- vpxor %ymm9,%ymm5,%ymm5
- vpslld $7,%ymm5,%ymm0
- vpsrld $25,%ymm5,%ymm5
- vpor %ymm0,%ymm5,%ymm5
- # x10 += x14, x6 = rotl32(x6 ^ x10, 7)
- vpaddd %ymm14,%ymm10,%ymm10
- vpxor %ymm10,%ymm6,%ymm6
- vpslld $7,%ymm6,%ymm0
- vpsrld $25,%ymm6,%ymm6
- vpor %ymm0,%ymm6,%ymm6
- # x11 += x15, x7 = rotl32(x7 ^ x11, 7)
- vpaddd %ymm15,%ymm11,%ymm11
- vpxor %ymm11,%ymm7,%ymm7
- vpslld $7,%ymm7,%ymm0
- vpsrld $25,%ymm7,%ymm7
- vpor %ymm0,%ymm7,%ymm7
-
- # x0 += x5, x15 = rotl32(x15 ^ x0, 16)
- vpaddd 0x00(%rsp),%ymm5,%ymm0
- vmovdqa %ymm0,0x00(%rsp)
- vpxor %ymm0,%ymm15,%ymm15
- vpshufb %ymm3,%ymm15,%ymm15
- # x1 += x6, x12 = rotl32(x12 ^ x1, 16)%ymm0
- vpaddd 0x20(%rsp),%ymm6,%ymm0
- vmovdqa %ymm0,0x20(%rsp)
- vpxor %ymm0,%ymm12,%ymm12
- vpshufb %ymm3,%ymm12,%ymm12
- # x2 += x7, x13 = rotl32(x13 ^ x2, 16)
- vpaddd 0x40(%rsp),%ymm7,%ymm0
- vmovdqa %ymm0,0x40(%rsp)
- vpxor %ymm0,%ymm13,%ymm13
- vpshufb %ymm3,%ymm13,%ymm13
- # x3 += x4, x14 = rotl32(x14 ^ x3, 16)
- vpaddd 0x60(%rsp),%ymm4,%ymm0
- vmovdqa %ymm0,0x60(%rsp)
- vpxor %ymm0,%ymm14,%ymm14
- vpshufb %ymm3,%ymm14,%ymm14
-
- # x10 += x15, x5 = rotl32(x5 ^ x10, 12)
- vpaddd %ymm15,%ymm10,%ymm10
- vpxor %ymm10,%ymm5,%ymm5
- vpslld $12,%ymm5,%ymm0
- vpsrld $20,%ymm5,%ymm5
- vpor %ymm0,%ymm5,%ymm5
- # x11 += x12, x6 = rotl32(x6 ^ x11, 12)
- vpaddd %ymm12,%ymm11,%ymm11
- vpxor %ymm11,%ymm6,%ymm6
- vpslld $12,%ymm6,%ymm0
- vpsrld $20,%ymm6,%ymm6
- vpor %ymm0,%ymm6,%ymm6
- # x8 += x13, x7 = rotl32(x7 ^ x8, 12)
- vpaddd %ymm13,%ymm8,%ymm8
- vpxor %ymm8,%ymm7,%ymm7
- vpslld $12,%ymm7,%ymm0
- vpsrld $20,%ymm7,%ymm7
- vpor %ymm0,%ymm7,%ymm7
- # x9 += x14, x4 = rotl32(x4 ^ x9, 12)
- vpaddd %ymm14,%ymm9,%ymm9
- vpxor %ymm9,%ymm4,%ymm4
- vpslld $12,%ymm4,%ymm0
- vpsrld $20,%ymm4,%ymm4
- vpor %ymm0,%ymm4,%ymm4
-
- # x0 += x5, x15 = rotl32(x15 ^ x0, 8)
- vpaddd 0x00(%rsp),%ymm5,%ymm0
- vmovdqa %ymm0,0x00(%rsp)
- vpxor %ymm0,%ymm15,%ymm15
- vpshufb %ymm2,%ymm15,%ymm15
- # x1 += x6, x12 = rotl32(x12 ^ x1, 8)
- vpaddd 0x20(%rsp),%ymm6,%ymm0
- vmovdqa %ymm0,0x20(%rsp)
- vpxor %ymm0,%ymm12,%ymm12
- vpshufb %ymm2,%ymm12,%ymm12
- # x2 += x7, x13 = rotl32(x13 ^ x2, 8)
- vpaddd 0x40(%rsp),%ymm7,%ymm0
- vmovdqa %ymm0,0x40(%rsp)
- vpxor %ymm0,%ymm13,%ymm13
- vpshufb %ymm2,%ymm13,%ymm13
- # x3 += x4, x14 = rotl32(x14 ^ x3, 8)
- vpaddd 0x60(%rsp),%ymm4,%ymm0
- vmovdqa %ymm0,0x60(%rsp)
- vpxor %ymm0,%ymm14,%ymm14
- vpshufb %ymm2,%ymm14,%ymm14
-
- # x10 += x15, x5 = rotl32(x5 ^ x10, 7)
- vpaddd %ymm15,%ymm10,%ymm10
- vpxor %ymm10,%ymm5,%ymm5
- vpslld $7,%ymm5,%ymm0
- vpsrld $25,%ymm5,%ymm5
- vpor %ymm0,%ymm5,%ymm5
- # x11 += x12, x6 = rotl32(x6 ^ x11, 7)
- vpaddd %ymm12,%ymm11,%ymm11
- vpxor %ymm11,%ymm6,%ymm6
- vpslld $7,%ymm6,%ymm0
- vpsrld $25,%ymm6,%ymm6
- vpor %ymm0,%ymm6,%ymm6
- # x8 += x13, x7 = rotl32(x7 ^ x8, 7)
- vpaddd %ymm13,%ymm8,%ymm8
- vpxor %ymm8,%ymm7,%ymm7
- vpslld $7,%ymm7,%ymm0
- vpsrld $25,%ymm7,%ymm7
- vpor %ymm0,%ymm7,%ymm7
- # x9 += x14, x4 = rotl32(x4 ^ x9, 7)
- vpaddd %ymm14,%ymm9,%ymm9
- vpxor %ymm9,%ymm4,%ymm4
- vpslld $7,%ymm4,%ymm0
- vpsrld $25,%ymm4,%ymm4
- vpor %ymm0,%ymm4,%ymm4
-
- dec %ecx
- jnz .Ldoubleround8
-
- # x0..15[0-3] += s[0..15]
- vpbroadcastd 0x00(%rdi),%ymm0
- vpaddd 0x00(%rsp),%ymm0,%ymm0
- vmovdqa %ymm0,0x00(%rsp)
- vpbroadcastd 0x04(%rdi),%ymm0
- vpaddd 0x20(%rsp),%ymm0,%ymm0
- vmovdqa %ymm0,0x20(%rsp)
- vpbroadcastd 0x08(%rdi),%ymm0
- vpaddd 0x40(%rsp),%ymm0,%ymm0
- vmovdqa %ymm0,0x40(%rsp)
- vpbroadcastd 0x0c(%rdi),%ymm0
- vpaddd 0x60(%rsp),%ymm0,%ymm0
- vmovdqa %ymm0,0x60(%rsp)
- vpbroadcastd 0x10(%rdi),%ymm0
- vpaddd %ymm0,%ymm4,%ymm4
- vpbroadcastd 0x14(%rdi),%ymm0
- vpaddd %ymm0,%ymm5,%ymm5
- vpbroadcastd 0x18(%rdi),%ymm0
- vpaddd %ymm0,%ymm6,%ymm6
- vpbroadcastd 0x1c(%rdi),%ymm0
- vpaddd %ymm0,%ymm7,%ymm7
- vpbroadcastd 0x20(%rdi),%ymm0
- vpaddd %ymm0,%ymm8,%ymm8
- vpbroadcastd 0x24(%rdi),%ymm0
- vpaddd %ymm0,%ymm9,%ymm9
- vpbroadcastd 0x28(%rdi),%ymm0
- vpaddd %ymm0,%ymm10,%ymm10
- vpbroadcastd 0x2c(%rdi),%ymm0
- vpaddd %ymm0,%ymm11,%ymm11
- vpbroadcastd 0x30(%rdi),%ymm0
- vpaddd %ymm0,%ymm12,%ymm12
- vpbroadcastd 0x34(%rdi),%ymm0
- vpaddd %ymm0,%ymm13,%ymm13
- vpbroadcastd 0x38(%rdi),%ymm0
- vpaddd %ymm0,%ymm14,%ymm14
- vpbroadcastd 0x3c(%rdi),%ymm0
- vpaddd %ymm0,%ymm15,%ymm15
-
- # x12 += counter values 0-3
- vpaddd %ymm1,%ymm12,%ymm12
-
- # interleave 32-bit words in state n, n+1
- vmovdqa 0x00(%rsp),%ymm0
- vmovdqa 0x20(%rsp),%ymm1
- vpunpckldq %ymm1,%ymm0,%ymm2
- vpunpckhdq %ymm1,%ymm0,%ymm1
- vmovdqa %ymm2,0x00(%rsp)
- vmovdqa %ymm1,0x20(%rsp)
- vmovdqa 0x40(%rsp),%ymm0
- vmovdqa 0x60(%rsp),%ymm1
- vpunpckldq %ymm1,%ymm0,%ymm2
- vpunpckhdq %ymm1,%ymm0,%ymm1
- vmovdqa %ymm2,0x40(%rsp)
- vmovdqa %ymm1,0x60(%rsp)
- vmovdqa %ymm4,%ymm0
- vpunpckldq %ymm5,%ymm0,%ymm4
- vpunpckhdq %ymm5,%ymm0,%ymm5
- vmovdqa %ymm6,%ymm0
- vpunpckldq %ymm7,%ymm0,%ymm6
- vpunpckhdq %ymm7,%ymm0,%ymm7
- vmovdqa %ymm8,%ymm0
- vpunpckldq %ymm9,%ymm0,%ymm8
- vpunpckhdq %ymm9,%ymm0,%ymm9
- vmovdqa %ymm10,%ymm0
- vpunpckldq %ymm11,%ymm0,%ymm10
- vpunpckhdq %ymm11,%ymm0,%ymm11
- vmovdqa %ymm12,%ymm0
- vpunpckldq %ymm13,%ymm0,%ymm12
- vpunpckhdq %ymm13,%ymm0,%ymm13
- vmovdqa %ymm14,%ymm0
- vpunpckldq %ymm15,%ymm0,%ymm14
- vpunpckhdq %ymm15,%ymm0,%ymm15
-
- # interleave 64-bit words in state n, n+2
- vmovdqa 0x00(%rsp),%ymm0
- vmovdqa 0x40(%rsp),%ymm2
- vpunpcklqdq %ymm2,%ymm0,%ymm1
- vpunpckhqdq %ymm2,%ymm0,%ymm2
- vmovdqa %ymm1,0x00(%rsp)
- vmovdqa %ymm2,0x40(%rsp)
- vmovdqa 0x20(%rsp),%ymm0
- vmovdqa 0x60(%rsp),%ymm2
- vpunpcklqdq %ymm2,%ymm0,%ymm1
- vpunpckhqdq %ymm2,%ymm0,%ymm2
- vmovdqa %ymm1,0x20(%rsp)
- vmovdqa %ymm2,0x60(%rsp)
- vmovdqa %ymm4,%ymm0
- vpunpcklqdq %ymm6,%ymm0,%ymm4
- vpunpckhqdq %ymm6,%ymm0,%ymm6
- vmovdqa %ymm5,%ymm0
- vpunpcklqdq %ymm7,%ymm0,%ymm5
- vpunpckhqdq %ymm7,%ymm0,%ymm7
- vmovdqa %ymm8,%ymm0
- vpunpcklqdq %ymm10,%ymm0,%ymm8
- vpunpckhqdq %ymm10,%ymm0,%ymm10
- vmovdqa %ymm9,%ymm0
- vpunpcklqdq %ymm11,%ymm0,%ymm9
- vpunpckhqdq %ymm11,%ymm0,%ymm11
- vmovdqa %ymm12,%ymm0
- vpunpcklqdq %ymm14,%ymm0,%ymm12
- vpunpckhqdq %ymm14,%ymm0,%ymm14
- vmovdqa %ymm13,%ymm0
- vpunpcklqdq %ymm15,%ymm0,%ymm13
- vpunpckhqdq %ymm15,%ymm0,%ymm15
-
- # interleave 128-bit words in state n, n+4
- vmovdqa 0x00(%rsp),%ymm0
- vperm2i128 $0x20,%ymm4,%ymm0,%ymm1
- vperm2i128 $0x31,%ymm4,%ymm0,%ymm4
- vmovdqa %ymm1,0x00(%rsp)
- vmovdqa 0x20(%rsp),%ymm0
- vperm2i128 $0x20,%ymm5,%ymm0,%ymm1
- vperm2i128 $0x31,%ymm5,%ymm0,%ymm5
- vmovdqa %ymm1,0x20(%rsp)
- vmovdqa 0x40(%rsp),%ymm0
- vperm2i128 $0x20,%ymm6,%ymm0,%ymm1
- vperm2i128 $0x31,%ymm6,%ymm0,%ymm6
- vmovdqa %ymm1,0x40(%rsp)
- vmovdqa 0x60(%rsp),%ymm0
- vperm2i128 $0x20,%ymm7,%ymm0,%ymm1
- vperm2i128 $0x31,%ymm7,%ymm0,%ymm7
- vmovdqa %ymm1,0x60(%rsp)
- vperm2i128 $0x20,%ymm12,%ymm8,%ymm0
- vperm2i128 $0x31,%ymm12,%ymm8,%ymm12
- vmovdqa %ymm0,%ymm8
- vperm2i128 $0x20,%ymm13,%ymm9,%ymm0
- vperm2i128 $0x31,%ymm13,%ymm9,%ymm13
- vmovdqa %ymm0,%ymm9
- vperm2i128 $0x20,%ymm14,%ymm10,%ymm0
- vperm2i128 $0x31,%ymm14,%ymm10,%ymm14
- vmovdqa %ymm0,%ymm10
- vperm2i128 $0x20,%ymm15,%ymm11,%ymm0
- vperm2i128 $0x31,%ymm15,%ymm11,%ymm15
- vmovdqa %ymm0,%ymm11
-
- # xor with corresponding input, write to output
- vmovdqa 0x00(%rsp),%ymm0
- vpxor 0x0000(%rdx),%ymm0,%ymm0
- vmovdqu %ymm0,0x0000(%rsi)
- vmovdqa 0x20(%rsp),%ymm0
- vpxor 0x0080(%rdx),%ymm0,%ymm0
- vmovdqu %ymm0,0x0080(%rsi)
- vmovdqa 0x40(%rsp),%ymm0
- vpxor 0x0040(%rdx),%ymm0,%ymm0
- vmovdqu %ymm0,0x0040(%rsi)
- vmovdqa 0x60(%rsp),%ymm0
- vpxor 0x00c0(%rdx),%ymm0,%ymm0
- vmovdqu %ymm0,0x00c0(%rsi)
- vpxor 0x0100(%rdx),%ymm4,%ymm4
- vmovdqu %ymm4,0x0100(%rsi)
- vpxor 0x0180(%rdx),%ymm5,%ymm5
- vmovdqu %ymm5,0x00180(%rsi)
- vpxor 0x0140(%rdx),%ymm6,%ymm6
- vmovdqu %ymm6,0x0140(%rsi)
- vpxor 0x01c0(%rdx),%ymm7,%ymm7
- vmovdqu %ymm7,0x01c0(%rsi)
- vpxor 0x0020(%rdx),%ymm8,%ymm8
- vmovdqu %ymm8,0x0020(%rsi)
- vpxor 0x00a0(%rdx),%ymm9,%ymm9
- vmovdqu %ymm9,0x00a0(%rsi)
- vpxor 0x0060(%rdx),%ymm10,%ymm10
- vmovdqu %ymm10,0x0060(%rsi)
- vpxor 0x00e0(%rdx),%ymm11,%ymm11
- vmovdqu %ymm11,0x00e0(%rsi)
- vpxor 0x0120(%rdx),%ymm12,%ymm12
- vmovdqu %ymm12,0x0120(%rsi)
- vpxor 0x01a0(%rdx),%ymm13,%ymm13
- vmovdqu %ymm13,0x01a0(%rsi)
- vpxor 0x0160(%rdx),%ymm14,%ymm14
- vmovdqu %ymm14,0x0160(%rsi)
- vpxor 0x01e0(%rdx),%ymm15,%ymm15
- vmovdqu %ymm15,0x01e0(%rsi)
-
- vzeroupper
- lea -8(%r10),%rsp
- ret
-ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
deleted file mode 100644
index dce7c5d39c2f..000000000000
--- a/arch/x86/crypto/chacha20_glue.c
+++ /dev/null
@@ -1,146 +0,0 @@
-/*
- * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <asm/fpu/api.h>
-#include <asm/simd.h>
-
-#define CHACHA20_STATE_ALIGN 16
-
-asmlinkage void chacha20_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
-asmlinkage void chacha20_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src);
-#ifdef CONFIG_AS_AVX2
-asmlinkage void chacha20_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src);
-static bool chacha20_use_avx2;
-#endif
-
-static void chacha20_dosimd(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
-{
- u8 buf[CHACHA20_BLOCK_SIZE];
-
-#ifdef CONFIG_AS_AVX2
- if (chacha20_use_avx2) {
- while (bytes >= CHACHA20_BLOCK_SIZE * 8) {
- chacha20_8block_xor_avx2(state, dst, src);
- bytes -= CHACHA20_BLOCK_SIZE * 8;
- src += CHACHA20_BLOCK_SIZE * 8;
- dst += CHACHA20_BLOCK_SIZE * 8;
- state[12] += 8;
- }
- }
-#endif
- while (bytes >= CHACHA20_BLOCK_SIZE * 4) {
- chacha20_4block_xor_ssse3(state, dst, src);
- bytes -= CHACHA20_BLOCK_SIZE * 4;
- src += CHACHA20_BLOCK_SIZE * 4;
- dst += CHACHA20_BLOCK_SIZE * 4;
- state[12] += 4;
- }
- while (bytes >= CHACHA20_BLOCK_SIZE) {
- chacha20_block_xor_ssse3(state, dst, src);
- bytes -= CHACHA20_BLOCK_SIZE;
- src += CHACHA20_BLOCK_SIZE;
- dst += CHACHA20_BLOCK_SIZE;
- state[12]++;
- }
- if (bytes) {
- memcpy(buf, src, bytes);
- chacha20_block_xor_ssse3(state, buf, buf);
- memcpy(dst, buf, bytes);
- }
-}
-
-static int chacha20_simd(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
- u32 *state, state_buf[16 + 2] __aligned(8);
- struct skcipher_walk walk;
- int err;
-
- BUILD_BUG_ON(CHACHA20_STATE_ALIGN != 16);
- state = PTR_ALIGN(state_buf + 0, CHACHA20_STATE_ALIGN);
-
- if (req->cryptlen <= CHACHA20_BLOCK_SIZE || !may_use_simd())
- return crypto_chacha20_crypt(req);
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_chacha20_init(state, ctx, walk.iv);
-
- kernel_fpu_begin();
-
- while (walk.nbytes >= CHACHA20_BLOCK_SIZE) {
- chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
- rounddown(walk.nbytes, CHACHA20_BLOCK_SIZE));
- err = skcipher_walk_done(&walk,
- walk.nbytes % CHACHA20_BLOCK_SIZE);
- }
-
- if (walk.nbytes) {
- chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr,
- walk.nbytes);
- err = skcipher_walk_done(&walk, 0);
- }
-
- kernel_fpu_end();
-
- return err;
-}
-
-static struct skcipher_alg alg = {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-simd",
- .base.cra_priority = 300,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .chunksize = CHACHA20_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = chacha20_simd,
- .decrypt = chacha20_simd,
-};
-
-static int __init chacha20_simd_mod_init(void)
-{
- if (!boot_cpu_has(X86_FEATURE_SSSE3))
- return -ENODEV;
-
-#ifdef CONFIG_AS_AVX2
- chacha20_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
- boot_cpu_has(X86_FEATURE_AVX2) &&
- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
-#endif
- return crypto_register_skcipher(&alg);
-}
-
-static void __exit chacha20_simd_mod_fini(void)
-{
- crypto_unregister_skcipher(&alg);
-}
-
-module_init(chacha20_simd_mod_init);
-module_exit(chacha20_simd_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("chacha20 cipher algorithm, SIMD accelerated");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-simd");
diff --git a/arch/x86/crypto/chacha_glue.c b/arch/x86/crypto/chacha_glue.c
new file mode 100644
index 000000000000..45c1c4143176
--- /dev/null
+++ b/arch/x86/crypto/chacha_glue.c
@@ -0,0 +1,304 @@
+/*
+ * x64 SIMD accelerated ChaCha and XChaCha stream ciphers,
+ * including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2015 Martin Willi
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+#include <asm/simd.h>
+
+#define CHACHA_STATE_ALIGN 16
+
+asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds);
+#ifdef CONFIG_AS_AVX2
+asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+static bool chacha_use_avx2;
+#ifdef CONFIG_AS_AVX512
+asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src,
+ unsigned int len, int nrounds);
+static bool chacha_use_avx512vl;
+#endif
+#endif
+
+static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks)
+{
+ len = min(len, maxblocks * CHACHA_BLOCK_SIZE);
+ return round_up(len, CHACHA_BLOCK_SIZE) / CHACHA_BLOCK_SIZE;
+}
+
+static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+#ifdef CONFIG_AS_AVX2
+#ifdef CONFIG_AS_AVX512
+ if (chacha_use_avx512vl) {
+ while (bytes >= CHACHA_BLOCK_SIZE * 8) {
+ chacha_8block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 8;
+ src += CHACHA_BLOCK_SIZE * 8;
+ dst += CHACHA_BLOCK_SIZE * 8;
+ state[12] += 8;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 4) {
+ chacha_8block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ state[12] += chacha_advance(bytes, 8);
+ return;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 2) {
+ chacha_4block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ state[12] += chacha_advance(bytes, 4);
+ return;
+ }
+ if (bytes) {
+ chacha_2block_xor_avx512vl(state, dst, src, bytes,
+ nrounds);
+ state[12] += chacha_advance(bytes, 2);
+ return;
+ }
+ }
+#endif
+ if (chacha_use_avx2) {
+ while (bytes >= CHACHA_BLOCK_SIZE * 8) {
+ chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 8;
+ src += CHACHA_BLOCK_SIZE * 8;
+ dst += CHACHA_BLOCK_SIZE * 8;
+ state[12] += 8;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 4) {
+ chacha_8block_xor_avx2(state, dst, src, bytes, nrounds);
+ state[12] += chacha_advance(bytes, 8);
+ return;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE * 2) {
+ chacha_4block_xor_avx2(state, dst, src, bytes, nrounds);
+ state[12] += chacha_advance(bytes, 4);
+ return;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE) {
+ chacha_2block_xor_avx2(state, dst, src, bytes, nrounds);
+ state[12] += chacha_advance(bytes, 2);
+ return;
+ }
+ }
+#endif
+ while (bytes >= CHACHA_BLOCK_SIZE * 4) {
+ chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
+ bytes -= CHACHA_BLOCK_SIZE * 4;
+ src += CHACHA_BLOCK_SIZE * 4;
+ dst += CHACHA_BLOCK_SIZE * 4;
+ state[12] += 4;
+ }
+ if (bytes > CHACHA_BLOCK_SIZE) {
+ chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds);
+ state[12] += chacha_advance(bytes, 4);
+ return;
+ }
+ if (bytes) {
+ chacha_block_xor_ssse3(state, dst, src, bytes, nrounds);
+ state[12]++;
+ }
+}
+
+static int chacha_simd_stream_xor(struct skcipher_walk *walk,
+ struct chacha_ctx *ctx, u8 *iv)
+{
+ u32 *state, state_buf[16 + 2] __aligned(8);
+ int next_yield = 4096; /* bytes until next FPU yield */
+ int err = 0;
+
+ BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
+ state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
+
+ crypto_chacha_init(state, ctx, iv);
+
+ while (walk->nbytes > 0) {
+ unsigned int nbytes = walk->nbytes;
+
+ if (nbytes < walk->total) {
+ nbytes = round_down(nbytes, walk->stride);
+ next_yield -= nbytes;
+ }
+
+ chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr,
+ nbytes, ctx->nrounds);
+
+ if (next_yield <= 0) {
+ /* temporarily allow preemption */
+ kernel_fpu_end();
+ kernel_fpu_begin();
+ next_yield = 4096;
+ }
+
+ err = skcipher_walk_done(walk, walk->nbytes - nbytes);
+ }
+
+ return err;
+}
+
+static int chacha_simd(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ int err;
+
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+ return crypto_chacha_crypt(req);
+
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
+
+ kernel_fpu_begin();
+ err = chacha_simd_stream_xor(&walk, ctx, req->iv);
+ kernel_fpu_end();
+ return err;
+}
+
+static int xchacha_simd(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct skcipher_walk walk;
+ struct chacha_ctx subctx;
+ u32 *state, state_buf[16 + 2] __aligned(8);
+ u8 real_iv[16];
+ int err;
+
+ if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable())
+ return crypto_xchacha_crypt(req);
+
+ err = skcipher_walk_virt(&walk, req, true);
+ if (err)
+ return err;
+
+ BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16);
+ state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN);
+ crypto_chacha_init(state, ctx, req->iv);
+
+ kernel_fpu_begin();
+
+ hchacha_block_ssse3(state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ memcpy(&real_iv[0], req->iv + 24, 8);
+ memcpy(&real_iv[8], req->iv + 16, 8);
+ err = chacha_simd_stream_xor(&walk, &subctx, real_iv);
+
+ kernel_fpu_end();
+
+ return err;
+}
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-simd",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = chacha_simd,
+ .decrypt = chacha_simd,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-simd",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = xchacha_simd,
+ .decrypt = xchacha_simd,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-simd",
+ .base.cra_priority = 300,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha12_setkey,
+ .encrypt = xchacha_simd,
+ .decrypt = xchacha_simd,
+ },
+};
+
+static int __init chacha_simd_mod_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SSSE3))
+ return -ENODEV;
+
+#ifdef CONFIG_AS_AVX2
+ chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) &&
+ boot_cpu_has(X86_FEATURE_AVX2) &&
+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
+#ifdef CONFIG_AS_AVX512
+ chacha_use_avx512vl = chacha_use_avx2 &&
+ boot_cpu_has(X86_FEATURE_AVX512VL) &&
+ boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */
+#endif
+#endif
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_simd_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_init(chacha_simd_mod_init);
+module_exit(chacha_simd_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (x64 SIMD accelerated)");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-simd");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-simd");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-simd");
diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
new file mode 100644
index 000000000000..f7946ea1b704
--- /dev/null
+++ b/arch/x86/crypto/nh-avx2-x86_64.S
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NH - ε-almost-universal hash function, x86_64 AVX2 accelerated
+ *
+ * Copyright 2018 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+#define PASS0_SUMS %ymm0
+#define PASS1_SUMS %ymm1
+#define PASS2_SUMS %ymm2
+#define PASS3_SUMS %ymm3
+#define K0 %ymm4
+#define K0_XMM %xmm4
+#define K1 %ymm5
+#define K1_XMM %xmm5
+#define K2 %ymm6
+#define K2_XMM %xmm6
+#define K3 %ymm7
+#define K3_XMM %xmm7
+#define T0 %ymm8
+#define T1 %ymm9
+#define T2 %ymm10
+#define T2_XMM %xmm10
+#define T3 %ymm11
+#define T3_XMM %xmm11
+#define T4 %ymm12
+#define T5 %ymm13
+#define T6 %ymm14
+#define T7 %ymm15
+#define KEY %rdi
+#define MESSAGE %rsi
+#define MESSAGE_LEN %rdx
+#define HASH %rcx
+
+.macro _nh_2xstride k0, k1, k2, k3
+
+ // Add message words to key words
+ vpaddd \k0, T3, T0
+ vpaddd \k1, T3, T1
+ vpaddd \k2, T3, T2
+ vpaddd \k3, T3, T3
+
+ // Multiply 32x32 => 64 and accumulate
+ vpshufd $0x10, T0, T4
+ vpshufd $0x32, T0, T0
+ vpshufd $0x10, T1, T5
+ vpshufd $0x32, T1, T1
+ vpshufd $0x10, T2, T6
+ vpshufd $0x32, T2, T2
+ vpshufd $0x10, T3, T7
+ vpshufd $0x32, T3, T3
+ vpmuludq T4, T0, T0
+ vpmuludq T5, T1, T1
+ vpmuludq T6, T2, T2
+ vpmuludq T7, T3, T3
+ vpaddq T0, PASS0_SUMS, PASS0_SUMS
+ vpaddq T1, PASS1_SUMS, PASS1_SUMS
+ vpaddq T2, PASS2_SUMS, PASS2_SUMS
+ vpaddq T3, PASS3_SUMS, PASS3_SUMS
+.endm
+
+/*
+ * void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
+ * u8 hash[NH_HASH_BYTES])
+ *
+ * It's guaranteed that message_len % 16 == 0.
+ */
+ENTRY(nh_avx2)
+
+ vmovdqu 0x00(KEY), K0
+ vmovdqu 0x10(KEY), K1
+ add $0x20, KEY
+ vpxor PASS0_SUMS, PASS0_SUMS, PASS0_SUMS
+ vpxor PASS1_SUMS, PASS1_SUMS, PASS1_SUMS
+ vpxor PASS2_SUMS, PASS2_SUMS, PASS2_SUMS
+ vpxor PASS3_SUMS, PASS3_SUMS, PASS3_SUMS
+
+ sub $0x40, MESSAGE_LEN
+ jl .Lloop4_done
+.Lloop4:
+ vmovdqu (MESSAGE), T3
+ vmovdqu 0x00(KEY), K2
+ vmovdqu 0x10(KEY), K3
+ _nh_2xstride K0, K1, K2, K3
+
+ vmovdqu 0x20(MESSAGE), T3
+ vmovdqu 0x20(KEY), K0
+ vmovdqu 0x30(KEY), K1
+ _nh_2xstride K2, K3, K0, K1
+
+ add $0x40, MESSAGE
+ add $0x40, KEY
+ sub $0x40, MESSAGE_LEN
+ jge .Lloop4
+
+.Lloop4_done:
+ and $0x3f, MESSAGE_LEN
+ jz .Ldone
+
+ cmp $0x20, MESSAGE_LEN
+ jl .Llast
+
+ // 2 or 3 strides remain; do 2 more.
+ vmovdqu (MESSAGE), T3
+ vmovdqu 0x00(KEY), K2
+ vmovdqu 0x10(KEY), K3
+ _nh_2xstride K0, K1, K2, K3
+ add $0x20, MESSAGE
+ add $0x20, KEY
+ sub $0x20, MESSAGE_LEN
+ jz .Ldone
+ vmovdqa K2, K0
+ vmovdqa K3, K1
+.Llast:
+ // Last stride. Zero the high 128 bits of the message and keys so they
+ // don't affect the result when processing them like 2 strides.
+ vmovdqu (MESSAGE), T3_XMM
+ vmovdqa K0_XMM, K0_XMM
+ vmovdqa K1_XMM, K1_XMM
+ vmovdqu 0x00(KEY), K2_XMM
+ vmovdqu 0x10(KEY), K3_XMM
+ _nh_2xstride K0, K1, K2, K3
+
+.Ldone:
+ // Sum the accumulators for each pass, then store the sums to 'hash'
+
+ // PASS0_SUMS is (0A 0B 0C 0D)
+ // PASS1_SUMS is (1A 1B 1C 1D)
+ // PASS2_SUMS is (2A 2B 2C 2D)
+ // PASS3_SUMS is (3A 3B 3C 3D)
+ // We need the horizontal sums:
+ // (0A + 0B + 0C + 0D,
+ // 1A + 1B + 1C + 1D,
+ // 2A + 2B + 2C + 2D,
+ // 3A + 3B + 3C + 3D)
+ //
+
+ vpunpcklqdq PASS1_SUMS, PASS0_SUMS, T0 // T0 = (0A 1A 0C 1C)
+ vpunpckhqdq PASS1_SUMS, PASS0_SUMS, T1 // T1 = (0B 1B 0D 1D)
+ vpunpcklqdq PASS3_SUMS, PASS2_SUMS, T2 // T2 = (2A 3A 2C 3C)
+ vpunpckhqdq PASS3_SUMS, PASS2_SUMS, T3 // T3 = (2B 3B 2D 3D)
+
+ vinserti128 $0x1, T2_XMM, T0, T4 // T4 = (0A 1A 2A 3A)
+ vinserti128 $0x1, T3_XMM, T1, T5 // T5 = (0B 1B 2B 3B)
+ vperm2i128 $0x31, T2, T0, T0 // T0 = (0C 1C 2C 3C)
+ vperm2i128 $0x31, T3, T1, T1 // T1 = (0D 1D 2D 3D)
+
+ vpaddq T5, T4, T4
+ vpaddq T1, T0, T0
+ vpaddq T4, T0, T0
+ vmovdqu T0, (HASH)
+ ret
+ENDPROC(nh_avx2)
diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S
new file mode 100644
index 000000000000..51f52d4ab4bb
--- /dev/null
+++ b/arch/x86/crypto/nh-sse2-x86_64.S
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NH - ε-almost-universal hash function, x86_64 SSE2 accelerated
+ *
+ * Copyright 2018 Google LLC
+ *
+ * Author: Eric Biggers <ebiggers@google.com>
+ */
+
+#include <linux/linkage.h>
+
+#define PASS0_SUMS %xmm0
+#define PASS1_SUMS %xmm1
+#define PASS2_SUMS %xmm2
+#define PASS3_SUMS %xmm3
+#define K0 %xmm4
+#define K1 %xmm5
+#define K2 %xmm6
+#define K3 %xmm7
+#define T0 %xmm8
+#define T1 %xmm9
+#define T2 %xmm10
+#define T3 %xmm11
+#define T4 %xmm12
+#define T5 %xmm13
+#define T6 %xmm14
+#define T7 %xmm15
+#define KEY %rdi
+#define MESSAGE %rsi
+#define MESSAGE_LEN %rdx
+#define HASH %rcx
+
+.macro _nh_stride k0, k1, k2, k3, offset
+
+ // Load next message stride
+ movdqu \offset(MESSAGE), T1
+
+ // Load next key stride
+ movdqu \offset(KEY), \k3
+
+ // Add message words to key words
+ movdqa T1, T2
+ movdqa T1, T3
+ paddd T1, \k0 // reuse k0 to avoid a move
+ paddd \k1, T1
+ paddd \k2, T2
+ paddd \k3, T3
+
+ // Multiply 32x32 => 64 and accumulate
+ pshufd $0x10, \k0, T4
+ pshufd $0x32, \k0, \k0
+ pshufd $0x10, T1, T5
+ pshufd $0x32, T1, T1
+ pshufd $0x10, T2, T6
+ pshufd $0x32, T2, T2
+ pshufd $0x10, T3, T7
+ pshufd $0x32, T3, T3
+ pmuludq T4, \k0
+ pmuludq T5, T1
+ pmuludq T6, T2
+ pmuludq T7, T3
+ paddq \k0, PASS0_SUMS
+ paddq T1, PASS1_SUMS
+ paddq T2, PASS2_SUMS
+ paddq T3, PASS3_SUMS
+.endm
+
+/*
+ * void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
+ * u8 hash[NH_HASH_BYTES])
+ *
+ * It's guaranteed that message_len % 16 == 0.
+ */
+ENTRY(nh_sse2)
+
+ movdqu 0x00(KEY), K0
+ movdqu 0x10(KEY), K1
+ movdqu 0x20(KEY), K2
+ add $0x30, KEY
+ pxor PASS0_SUMS, PASS0_SUMS
+ pxor PASS1_SUMS, PASS1_SUMS
+ pxor PASS2_SUMS, PASS2_SUMS
+ pxor PASS3_SUMS, PASS3_SUMS
+
+ sub $0x40, MESSAGE_LEN
+ jl .Lloop4_done
+.Lloop4:
+ _nh_stride K0, K1, K2, K3, 0x00
+ _nh_stride K1, K2, K3, K0, 0x10
+ _nh_stride K2, K3, K0, K1, 0x20
+ _nh_stride K3, K0, K1, K2, 0x30
+ add $0x40, KEY
+ add $0x40, MESSAGE
+ sub $0x40, MESSAGE_LEN
+ jge .Lloop4
+
+.Lloop4_done:
+ and $0x3f, MESSAGE_LEN
+ jz .Ldone
+ _nh_stride K0, K1, K2, K3, 0x00
+
+ sub $0x10, MESSAGE_LEN
+ jz .Ldone
+ _nh_stride K1, K2, K3, K0, 0x10
+
+ sub $0x10, MESSAGE_LEN
+ jz .Ldone
+ _nh_stride K2, K3, K0, K1, 0x20
+
+.Ldone:
+ // Sum the accumulators for each pass, then store the sums to 'hash'
+ movdqa PASS0_SUMS, T0
+ movdqa PASS2_SUMS, T1
+ punpcklqdq PASS1_SUMS, T0 // => (PASS0_SUM_A PASS1_SUM_A)
+ punpcklqdq PASS3_SUMS, T1 // => (PASS2_SUM_A PASS3_SUM_A)
+ punpckhqdq PASS1_SUMS, PASS0_SUMS // => (PASS0_SUM_B PASS1_SUM_B)
+ punpckhqdq PASS3_SUMS, PASS2_SUMS // => (PASS2_SUM_B PASS3_SUM_B)
+ paddq PASS0_SUMS, T0
+ paddq PASS2_SUMS, T1
+ movdqu T0, 0x00(HASH)
+ movdqu T1, 0x10(HASH)
+ ret
+ENDPROC(nh_sse2)
diff --git a/arch/x86/crypto/nhpoly1305-avx2-glue.c b/arch/x86/crypto/nhpoly1305-avx2-glue.c
new file mode 100644
index 000000000000..20d815ea4b6a
--- /dev/null
+++ b/arch/x86/crypto/nhpoly1305-avx2-glue.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
+ * (AVX2 accelerated version)
+ *
+ * Copyright 2018 Google LLC
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/nhpoly1305.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+
+asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
+ u8 hash[NH_HASH_BYTES]);
+
+/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */
+static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES])
+{
+ nh_avx2(key, message, message_len, (u8 *)hash);
+}
+
+static int nhpoly1305_avx2_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ if (srclen < 64 || !irq_fpu_usable())
+ return crypto_nhpoly1305_update(desc, src, srclen);
+
+ do {
+ unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+
+ kernel_fpu_begin();
+ crypto_nhpoly1305_update_helper(desc, src, n, _nh_avx2);
+ kernel_fpu_end();
+ src += n;
+ srclen -= n;
+ } while (srclen);
+ return 0;
+}
+
+static struct shash_alg nhpoly1305_alg = {
+ .base.cra_name = "nhpoly1305",
+ .base.cra_driver_name = "nhpoly1305-avx2",
+ .base.cra_priority = 300,
+ .base.cra_ctxsize = sizeof(struct nhpoly1305_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = crypto_nhpoly1305_init,
+ .update = nhpoly1305_avx2_update,
+ .final = crypto_nhpoly1305_final,
+ .setkey = crypto_nhpoly1305_setkey,
+ .descsize = sizeof(struct nhpoly1305_state),
+};
+
+static int __init nhpoly1305_mod_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_AVX2) ||
+ !boot_cpu_has(X86_FEATURE_OSXSAVE))
+ return -ENODEV;
+
+ return crypto_register_shash(&nhpoly1305_alg);
+}
+
+static void __exit nhpoly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&nhpoly1305_alg);
+}
+
+module_init(nhpoly1305_mod_init);
+module_exit(nhpoly1305_mod_exit);
+
+MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (AVX2-accelerated)");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("nhpoly1305");
+MODULE_ALIAS_CRYPTO("nhpoly1305-avx2");
diff --git a/arch/x86/crypto/nhpoly1305-sse2-glue.c b/arch/x86/crypto/nhpoly1305-sse2-glue.c
new file mode 100644
index 000000000000..ed68d164ce14
--- /dev/null
+++ b/arch/x86/crypto/nhpoly1305-sse2-glue.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
+ * (SSE2 accelerated version)
+ *
+ * Copyright 2018 Google LLC
+ */
+
+#include <crypto/internal/hash.h>
+#include <crypto/nhpoly1305.h>
+#include <linux/module.h>
+#include <asm/fpu/api.h>
+
+asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
+ u8 hash[NH_HASH_BYTES]);
+
+/* wrapper to avoid indirect call to assembly, which doesn't work with CFI */
+static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES])
+{
+ nh_sse2(key, message, message_len, (u8 *)hash);
+}
+
+static int nhpoly1305_sse2_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ if (srclen < 64 || !irq_fpu_usable())
+ return crypto_nhpoly1305_update(desc, src, srclen);
+
+ do {
+ unsigned int n = min_t(unsigned int, srclen, PAGE_SIZE);
+
+ kernel_fpu_begin();
+ crypto_nhpoly1305_update_helper(desc, src, n, _nh_sse2);
+ kernel_fpu_end();
+ src += n;
+ srclen -= n;
+ } while (srclen);
+ return 0;
+}
+
+static struct shash_alg nhpoly1305_alg = {
+ .base.cra_name = "nhpoly1305",
+ .base.cra_driver_name = "nhpoly1305-sse2",
+ .base.cra_priority = 200,
+ .base.cra_ctxsize = sizeof(struct nhpoly1305_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = crypto_nhpoly1305_init,
+ .update = nhpoly1305_sse2_update,
+ .final = crypto_nhpoly1305_final,
+ .setkey = crypto_nhpoly1305_setkey,
+ .descsize = sizeof(struct nhpoly1305_state),
+};
+
+static int __init nhpoly1305_mod_init(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_XMM2))
+ return -ENODEV;
+
+ return crypto_register_shash(&nhpoly1305_alg);
+}
+
+static void __exit nhpoly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&nhpoly1305_alg);
+}
+
+module_init(nhpoly1305_mod_init);
+module_exit(nhpoly1305_mod_exit);
+
+MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function (SSE2-accelerated)");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("nhpoly1305");
+MODULE_ALIAS_CRYPTO("nhpoly1305-sse2");
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index f012b7e28ad1..88cc01506c84 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -83,35 +83,37 @@ static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx,
if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) {
if (unlikely(!sctx->wset)) {
if (!sctx->uset) {
- memcpy(sctx->u, dctx->r, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u, dctx->r);
+ memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
+ poly1305_simd_mult(sctx->u, dctx->r.r);
sctx->uset = true;
}
memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u + 5, dctx->r);
+ poly1305_simd_mult(sctx->u + 5, dctx->r.r);
memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u + 10, dctx->r);
+ poly1305_simd_mult(sctx->u + 10, dctx->r.r);
sctx->wset = true;
}
blocks = srclen / (POLY1305_BLOCK_SIZE * 4);
- poly1305_4block_avx2(dctx->h, src, dctx->r, blocks, sctx->u);
+ poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks,
+ sctx->u);
src += POLY1305_BLOCK_SIZE * 4 * blocks;
srclen -= POLY1305_BLOCK_SIZE * 4 * blocks;
}
#endif
if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) {
if (unlikely(!sctx->uset)) {
- memcpy(sctx->u, dctx->r, sizeof(sctx->u));
- poly1305_simd_mult(sctx->u, dctx->r);
+ memcpy(sctx->u, dctx->r.r, sizeof(sctx->u));
+ poly1305_simd_mult(sctx->u, dctx->r.r);
sctx->uset = true;
}
blocks = srclen / (POLY1305_BLOCK_SIZE * 2);
- poly1305_2block_sse2(dctx->h, src, dctx->r, blocks, sctx->u);
+ poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks,
+ sctx->u);
src += POLY1305_BLOCK_SIZE * 2 * blocks;
srclen -= POLY1305_BLOCK_SIZE * 2 * blocks;
}
if (srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_block_sse2(dctx->h, src, dctx->r, 1);
+ poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1);
srclen -= POLY1305_BLOCK_SIZE;
}
return srclen;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 25e5a6bda8c3..20d0885b00fb 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -352,7 +352,7 @@ For 32-bit we have the following conventions - kernel is built with
.macro CALL_enter_from_user_mode
#ifdef CONFIG_CONTEXT_TRACKING
#ifdef HAVE_JUMP_LABEL
- STATIC_BRANCH_JMP l_yes=.Lafter_call_\@, key=context_tracking_enabled, branch=1
+ STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0
#endif
call enter_from_user_mode
.Lafter_call_\@:
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 3b2490b81918..7bc105f47d21 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
/*
* In order to return to user mode, we need to have IRQs off with
* none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
- * can be set at any time on preemptable kernels if we have IRQs on,
+ * can be set at any time on preemptible kernels if we have IRQs on,
* so we need to loop. Disabling preemption wouldn't help: doing the
* work to clear some of the flags can sleep.
*/
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index ce25d84023c0..1f0efdb7b629 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -566,6 +566,7 @@ ENTRY(interrupt_entry)
ret
END(interrupt_entry)
+_ASM_NOKPROBE(interrupt_entry)
/* Interrupt entry/exit. */
@@ -766,6 +767,7 @@ native_irq_return_ldt:
jmp native_irq_return_iret
#endif
END(common_interrupt)
+_ASM_NOKPROBE(common_interrupt)
/*
* APIC interrupts.
@@ -780,6 +782,7 @@ ENTRY(\sym)
call \do_sym /* rdi points to pt_regs */
jmp ret_from_intr
END(\sym)
+_ASM_NOKPROBE(\sym)
.endm
/* Make sure APIC interrupt handlers end up in the irqentry section: */
@@ -960,6 +963,7 @@ ENTRY(\sym)
jmp error_exit
.endif
+_ASM_NOKPROBE(\sym)
END(\sym)
.endm
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 141d415a8c80..5bfe2243a08f 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
- -z max-page-size=4096 -z common-page-size=4096
+ -z max-page-size=4096
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
@@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
- -z max-page-size=4096 -z common-page-size=4096
+ -z max-page-size=4096
# x32-rebranded versions
vobjx32s-y := $(vobjs-y:.o=-x32.o)
@@ -171,7 +171,8 @@ quiet_cmd_vdso = VDSO $@
sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
- $(call ld-option, --build-id) -Bsymbolic
+ $(call ld-option, --build-id) $(call ld-option, --eh-frame-hdr) \
+ -Bsymbolic
GCOV_PROFILE := n
#
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index acfd5ba7d943..93c6dc7812d0 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -7,16 +7,6 @@
* This script controls its layout.
*/
-#if defined(BUILD_VDSO64)
-# define SHDR_SIZE 64
-#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
-# define SHDR_SIZE 40
-#else
-# error unknown VDSO target
-#endif
-
-#define NUM_FAKE_SHDRS 13
-
SECTIONS
{
/*
@@ -60,20 +50,8 @@ SECTIONS
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
-
- /*
- * Ideally this would live in a C file, but that won't
- * work cleanly for x32 until we start building the x32
- * C code using an x32 toolchain.
- */
- VDSO_FAKE_SECTION_TABLE_START = .;
- . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
- VDSO_FAKE_SECTION_TABLE_END = .;
} :text
- .fake_shstrtab : { *(.fake_shstrtab) } :text
-
-
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
@@ -87,11 +65,6 @@ SECTIONS
.text : { *(.text*) } :text =0x90909090,
- /*
- * At the end so that eu-elflint stays happy when vdso2c strips
- * these. A better implementation would avoid allocating space
- * for these.
- */
.altinstructions : { *(.altinstructions) } :text
.altinstr_replacement : { *(.altinstr_replacement) } :text
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 4674f58581a1..8e470b018512 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -76,8 +76,6 @@ enum {
sym_hpet_page,
sym_pvclock_page,
sym_hvclock_page,
- sym_VDSO_FAKE_SECTION_TABLE_START,
- sym_VDSO_FAKE_SECTION_TABLE_END,
};
const int special_pages[] = {
@@ -98,12 +96,6 @@ struct vdso_sym required_syms[] = {
[sym_hpet_page] = {"hpet_page", true},
[sym_pvclock_page] = {"pvclock_page", true},
[sym_hvclock_page] = {"hvclock_page", true},
- [sym_VDSO_FAKE_SECTION_TABLE_START] = {
- "VDSO_FAKE_SECTION_TABLE_START", false
- },
- [sym_VDSO_FAKE_SECTION_TABLE_END] = {
- "VDSO_FAKE_SECTION_TABLE_END", false
- },
{"VDSO32_NOTE_MASK", true},
{"__kernel_vsyscall", true},
{"__kernel_sigreturn", true},
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 7eb878561910..babc4e7a519c 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
* abusing from userspace install_speciall_mapping, which may
* not do accounting and rlimit right.
* We could search vma near context.vdso, but it's a slowpath,
- * so let's explicitely check all VMAs to be completely sure.
+ * so let's explicitly check all VMAs to be completely sure.
*/
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (vma_is_special_mapping(vma, &vdso_mapping) ||
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 85fd85d52ffd..d78bcc03e60e 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -102,7 +102,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
struct thread_struct *thread = &current->thread;
- thread->error_code = 6; /* user fault, no page, write */
+ thread->error_code = X86_PF_USER | X86_PF_WRITE;
thread->cr2 = ptr;
thread->trap_nr = X86_TRAP_PF;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 106911b603bd..374a19712e20 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event)
if (config == -1LL)
return -EINVAL;
- /*
- * Branch tracing:
- */
- if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
- !attr->freq && hwc->sample_period == 1) {
- /* BTS is not supported by this architecture. */
- if (!x86_pmu.bts_active)
- return -EOPNOTSUPP;
-
- /* BTS is currently only allowed for user-mode. */
- if (!attr->exclude_kernel)
- return -EOPNOTSUPP;
-
- /* disallow bts if conflicting events are present */
- if (x86_add_exclusive(x86_lbr_exclusive_lbr))
- return -EBUSY;
-
- event->destroy = hw_perf_lbr_event_destroy;
- }
-
hwc->config |= config;
return 0;
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 24ffa1e88cf9..a01ef1b0f883 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -589,7 +589,7 @@ static __init int bts_init(void)
* the AUX buffer.
*
* However, since this driver supports per-CPU and per-task inherit
- * we cannot use the user mapping since it will not be availble
+ * we cannot use the user mapping since it will not be available
* if we're not running the owning process.
*
* With PTI we can't use the kernal map either, because its not
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 273c62e81546..40e12cfc87f6 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added)
* in sequence on the same PMC or on different PMCs.
*
* In practise it appears some of these events do in fact count, and
- * we need to programm all 4 events.
+ * we need to program all 4 events.
*/
static void intel_pmu_nhm_workaround(void)
{
@@ -2306,14 +2306,18 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
return handled;
}
-static bool disable_counter_freezing;
+static bool disable_counter_freezing = true;
static int __init intel_perf_counter_freezing_setup(char *s)
{
- disable_counter_freezing = true;
- pr_info("Intel PMU Counter freezing feature disabled\n");
+ bool res;
+
+ if (kstrtobool(s, &res))
+ return -EINVAL;
+
+ disable_counter_freezing = !res;
return 1;
}
-__setup("disable_counter_freezing", intel_perf_counter_freezing_setup);
+__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
/*
* Simplified handler for Arch Perfmon v4:
@@ -2470,16 +2474,7 @@ done:
static struct event_constraint *
intel_bts_constraints(struct perf_event *event)
{
- struct hw_perf_event *hwc = &event->hw;
- unsigned int hw_event, bts_event;
-
- if (event->attr.freq)
- return NULL;
-
- hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
- bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-
- if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
+ if (unlikely(intel_pmu_has_bts(event)))
return &bts_constraint;
return NULL;
@@ -3098,6 +3093,43 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
return flags;
}
+static int intel_pmu_bts_config(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+
+ if (unlikely(intel_pmu_has_bts(event))) {
+ /* BTS is not supported by this architecture. */
+ if (!x86_pmu.bts_active)
+ return -EOPNOTSUPP;
+
+ /* BTS is currently only allowed for user-mode. */
+ if (!attr->exclude_kernel)
+ return -EOPNOTSUPP;
+
+ /* BTS is not allowed for precise events. */
+ if (attr->precise_ip)
+ return -EOPNOTSUPP;
+
+ /* disallow bts if conflicting events are present */
+ if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+ return -EBUSY;
+
+ event->destroy = hw_perf_lbr_event_destroy;
+ }
+
+ return 0;
+}
+
+static int core_pmu_hw_config(struct perf_event *event)
+{
+ int ret = x86_pmu_hw_config(event);
+
+ if (ret)
+ return ret;
+
+ return intel_pmu_bts_config(event);
+}
+
static int intel_pmu_hw_config(struct perf_event *event)
{
int ret = x86_pmu_hw_config(event);
@@ -3105,6 +3137,10 @@ static int intel_pmu_hw_config(struct perf_event *event)
if (ret)
return ret;
+ ret = intel_pmu_bts_config(event);
+ if (ret)
+ return ret;
+
if (event->attr.precise_ip) {
if (!event->attr.freq) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
@@ -3127,7 +3163,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
/*
* BTS is set up earlier in this path, so don't account twice
*/
- if (!intel_pmu_has_bts(event)) {
+ if (!unlikely(intel_pmu_has_bts(event))) {
/* disallow lbr if conflicting events are present */
if (x86_add_exclusive(x86_lbr_exclusive_lbr))
return -EBUSY;
@@ -3596,7 +3632,7 @@ static __initconst const struct x86_pmu core_pmu = {
.enable_all = core_pmu_enable_all,
.enable = core_pmu_enable_event,
.disable = x86_pmu_disable_event,
- .hw_config = x86_pmu_hw_config,
+ .hw_config = core_pmu_hw_config,
.schedule_events = x86_schedule_events,
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index b7b01d762d32..e9acf1d2e7b2 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
/*
* We must however always use iregs for the unwinder to stay sane; the
* record BP,SP,IP can point into thin air when the record is from a
- * previous PMI context or an (I)RET happend between the record and
+ * previous PMI context or an (I)RET happened between the record and
* PMI.
*/
if (sample_type & PERF_SAMPLE_CALLCHAIN)
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index d32c0eed38ca..dee579efb2b2 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -1259,7 +1259,7 @@ again:
}
/*
* Perf does test runs to see if a whole group can be assigned
- * together succesfully. There can be multiple rounds of this.
+ * together successfully. There can be multiple rounds of this.
* Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
* bits, such that the next round of group assignments will
* cause the above p4_should_swap_ts to pass instead of fail.
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 3a0aa83cbd07..9494ca68fd9d 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -68,6 +68,7 @@ static struct pt_cap_desc {
PT_CAP(topa_output, 0, CPUID_ECX, BIT(0)),
PT_CAP(topa_multiple_entries, 0, CPUID_ECX, BIT(1)),
PT_CAP(single_range_output, 0, CPUID_ECX, BIT(2)),
+ PT_CAP(output_subsys, 0, CPUID_ECX, BIT(3)),
PT_CAP(payloads_lip, 0, CPUID_ECX, BIT(31)),
PT_CAP(num_address_ranges, 1, CPUID_EAX, 0x3),
PT_CAP(mtc_periods, 1, CPUID_EAX, 0xffff0000),
@@ -75,14 +76,21 @@ static struct pt_cap_desc {
PT_CAP(psb_periods, 1, CPUID_EBX, 0xffff0000),
};
-static u32 pt_cap_get(enum pt_capabilities cap)
+u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
{
- struct pt_cap_desc *cd = &pt_caps[cap];
- u32 c = pt_pmu.caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
+ struct pt_cap_desc *cd = &pt_caps[capability];
+ u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
unsigned int shift = __ffs(cd->mask);
return (c & cd->mask) >> shift;
}
+EXPORT_SYMBOL_GPL(intel_pt_validate_cap);
+
+u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
+{
+ return intel_pt_validate_cap(pt_pmu.caps, cap);
+}
+EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
static ssize_t pt_cap_show(struct device *cdev,
struct device_attribute *attr,
@@ -92,7 +100,7 @@ static ssize_t pt_cap_show(struct device *cdev,
container_of(attr, struct dev_ext_attribute, attr);
enum pt_capabilities cap = (long)ea->var;
- return snprintf(buf, PAGE_SIZE, "%x\n", pt_cap_get(cap));
+ return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
}
static struct attribute_group pt_cap_group __ro_after_init = {
@@ -310,16 +318,16 @@ static bool pt_event_valid(struct perf_event *event)
return false;
if (config & RTIT_CTL_CYC_PSB) {
- if (!pt_cap_get(PT_CAP_psb_cyc))
+ if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
return false;
- allowed = pt_cap_get(PT_CAP_psb_periods);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
requested = (config & RTIT_CTL_PSB_FREQ) >>
RTIT_CTL_PSB_FREQ_OFFSET;
if (requested && (!(allowed & BIT(requested))))
return false;
- allowed = pt_cap_get(PT_CAP_cycle_thresholds);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
requested = (config & RTIT_CTL_CYC_THRESH) >>
RTIT_CTL_CYC_THRESH_OFFSET;
if (requested && (!(allowed & BIT(requested))))
@@ -334,10 +342,10 @@ static bool pt_event_valid(struct perf_event *event)
* Spec says that setting mtc period bits while mtc bit in
* CPUID is 0 will #GP, so better safe than sorry.
*/
- if (!pt_cap_get(PT_CAP_mtc))
+ if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
return false;
- allowed = pt_cap_get(PT_CAP_mtc_periods);
+ allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
if (!allowed)
return false;
@@ -349,11 +357,11 @@ static bool pt_event_valid(struct perf_event *event)
}
if (config & RTIT_CTL_PWR_EVT_EN &&
- !pt_cap_get(PT_CAP_power_event_trace))
+ !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
return false;
if (config & RTIT_CTL_PTW) {
- if (!pt_cap_get(PT_CAP_ptwrite))
+ if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
return false;
/* FUPonPTW without PTW doesn't make sense */
@@ -598,7 +606,7 @@ static struct topa *topa_alloc(int cpu, gfp_t gfp)
* In case of singe-entry ToPA, always put the self-referencing END
* link as the 2nd entry in the table
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, 1)->base = topa->phys >> TOPA_SHIFT;
TOPA_ENTRY(topa, 1)->end = 1;
}
@@ -638,7 +646,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
topa->offset = last->offset + last->size;
buf->last = topa;
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return;
BUG_ON(last->last != TENTS_PER_PAGE - 1);
@@ -654,7 +662,7 @@ static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
static bool topa_table_full(struct topa *topa)
{
/* single-entry ToPA is a special case */
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return !!topa->last;
return topa->last == TENTS_PER_PAGE - 1;
@@ -690,7 +698,8 @@ static int topa_insert_pages(struct pt_buffer *buf, gfp_t gfp)
TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
TOPA_ENTRY(topa, -1)->size = order;
- if (!buf->snapshot && !pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (!buf->snapshot &&
+ !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(topa, -1)->intr = 1;
TOPA_ENTRY(topa, -1)->stop = 1;
}
@@ -725,7 +734,7 @@ static void pt_topa_dump(struct pt_buffer *buf)
topa->table[i].intr ? 'I' : ' ',
topa->table[i].stop ? 'S' : ' ',
*(u64 *)&topa->table[i]);
- if ((pt_cap_get(PT_CAP_topa_multiple_entries) &&
+ if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
topa->table[i].stop) ||
topa->table[i].end)
break;
@@ -828,7 +837,7 @@ static void pt_handle_status(struct pt *pt)
* means we are already losing data; need to let the decoder
* know.
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries) ||
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
buf->output_off == sizes(TOPA_ENTRY(buf->cur, buf->cur_idx)->size)) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
@@ -840,7 +849,8 @@ static void pt_handle_status(struct pt *pt)
* Also on single-entry ToPA implementations, interrupt will come
* before the output reaches its output region's boundary.
*/
- if (!pt_cap_get(PT_CAP_topa_multiple_entries) && !buf->snapshot &&
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
+ !buf->snapshot &&
pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
void *head = pt_buffer_region(buf);
@@ -931,7 +941,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
return 0;
/* clear STOP and INT from current entry */
@@ -1082,7 +1092,7 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
pt_buffer_setup_topa_index(buf);
/* link last table to the first one, unless we're double buffering */
- if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
+ if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
TOPA_ENTRY(buf->last, -1)->base = buf->first->phys >> TOPA_SHIFT;
TOPA_ENTRY(buf->last, -1)->end = 1;
}
@@ -1153,7 +1163,7 @@ static int pt_addr_filters_init(struct perf_event *event)
struct pt_filters *filters;
int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
- if (!pt_cap_get(PT_CAP_num_address_ranges))
+ if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return 0;
filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
@@ -1202,7 +1212,7 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
return -EINVAL;
}
- if (++range > pt_cap_get(PT_CAP_num_address_ranges))
+ if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
return -EOPNOTSUPP;
}
@@ -1507,12 +1517,12 @@ static __init int pt_init(void)
if (ret)
return ret;
- if (!pt_cap_get(PT_CAP_topa_output)) {
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
pr_warn("ToPA output is not supported on this CPU\n");
return -ENODEV;
}
- if (!pt_cap_get(PT_CAP_topa_multiple_entries))
+ if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
pt_pmu.pmu.capabilities =
PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_SW_DOUBLEBUF;
@@ -1530,7 +1540,7 @@ static __init int pt_init(void)
pt_pmu.pmu.addr_filters_sync = pt_event_addr_filters_sync;
pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
pt_pmu.pmu.nr_addr_filters =
- pt_cap_get(PT_CAP_num_address_ranges);
+ intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 0eb41d07b79a..269e15a9086c 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -20,43 +20,6 @@
#define __INTEL_PT_H__
/*
- * PT MSR bit definitions
- */
-#define RTIT_CTL_TRACEEN BIT(0)
-#define RTIT_CTL_CYCLEACC BIT(1)
-#define RTIT_CTL_OS BIT(2)
-#define RTIT_CTL_USR BIT(3)
-#define RTIT_CTL_PWR_EVT_EN BIT(4)
-#define RTIT_CTL_FUP_ON_PTW BIT(5)
-#define RTIT_CTL_CR3EN BIT(7)
-#define RTIT_CTL_TOPA BIT(8)
-#define RTIT_CTL_MTC_EN BIT(9)
-#define RTIT_CTL_TSC_EN BIT(10)
-#define RTIT_CTL_DISRETC BIT(11)
-#define RTIT_CTL_PTW_EN BIT(12)
-#define RTIT_CTL_BRANCH_EN BIT(13)
-#define RTIT_CTL_MTC_RANGE_OFFSET 14
-#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
-#define RTIT_CTL_CYC_THRESH_OFFSET 19
-#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
-#define RTIT_CTL_PSB_FREQ_OFFSET 24
-#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
-#define RTIT_CTL_ADDR0_OFFSET 32
-#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET)
-#define RTIT_CTL_ADDR1_OFFSET 36
-#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET)
-#define RTIT_CTL_ADDR2_OFFSET 40
-#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET)
-#define RTIT_CTL_ADDR3_OFFSET 44
-#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET)
-#define RTIT_STATUS_FILTEREN BIT(0)
-#define RTIT_STATUS_CONTEXTEN BIT(1)
-#define RTIT_STATUS_TRIGGEREN BIT(2)
-#define RTIT_STATUS_BUFFOVF BIT(3)
-#define RTIT_STATUS_ERROR BIT(4)
-#define RTIT_STATUS_STOPPED BIT(5)
-
-/*
* Single-entry ToPA: when this close to region boundary, switch
* buffers to avoid losing data.
*/
@@ -82,30 +45,9 @@ struct topa_entry {
u64 rsvd4 : 16;
};
-#define PT_CPUID_LEAVES 2
-#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
-
/* TSC to Core Crystal Clock Ratio */
#define CPUID_TSC_LEAF 0x15
-enum pt_capabilities {
- PT_CAP_max_subleaf = 0,
- PT_CAP_cr3_filtering,
- PT_CAP_psb_cyc,
- PT_CAP_ip_filtering,
- PT_CAP_mtc,
- PT_CAP_ptwrite,
- PT_CAP_power_event_trace,
- PT_CAP_topa_output,
- PT_CAP_topa_multiple_entries,
- PT_CAP_single_range_output,
- PT_CAP_payloads_lip,
- PT_CAP_num_address_ranges,
- PT_CAP_mtc_periods,
- PT_CAP_cycle_thresholds,
- PT_CAP_psb_periods,
-};
-
struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index adae087cecdd..78d7b7031bfc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -859,11 +859,16 @@ static inline int amd_pmu_init(void)
static inline bool intel_pmu_has_bts(struct perf_event *event)
{
- if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
- !event->attr.freq && event->hw.sample_period == 1)
- return true;
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned int hw_event, bts_event;
+
+ if (event->attr.freq)
+ return false;
+
+ hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+ bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
- return false;
+ return hw_event == bts_event && hwc->sample_period == 1;
}
int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
index b8e60cc50461..dd0a843f766d 100644
--- a/arch/x86/hyperv/nested.c
+++ b/arch/x86/hyperv/nested.c
@@ -7,6 +7,7 @@
*
* Author : Lan Tianyu <Tianyu.Lan@microsoft.com>
*/
+#define pr_fmt(fmt) "Hyper-V: " fmt
#include <linux/types.h>
@@ -54,3 +55,82 @@ fault:
return ret;
}
EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping);
+
+int hyperv_fill_flush_guest_mapping_list(
+ struct hv_guest_mapping_flush_list *flush,
+ u64 start_gfn, u64 pages)
+{
+ u64 cur = start_gfn;
+ u64 additional_pages;
+ int gpa_n = 0;
+
+ do {
+ /*
+ * If flush requests exceed max flush count, go back to
+ * flush tlbs without range.
+ */
+ if (gpa_n >= HV_MAX_FLUSH_REP_COUNT)
+ return -ENOSPC;
+
+ additional_pages = min_t(u64, pages, HV_MAX_FLUSH_PAGES) - 1;
+
+ flush->gpa_list[gpa_n].page.additional_pages = additional_pages;
+ flush->gpa_list[gpa_n].page.largepage = false;
+ flush->gpa_list[gpa_n].page.basepfn = cur;
+
+ pages -= additional_pages + 1;
+ cur += additional_pages + 1;
+ gpa_n++;
+ } while (pages > 0);
+
+ return gpa_n;
+}
+EXPORT_SYMBOL_GPL(hyperv_fill_flush_guest_mapping_list);
+
+int hyperv_flush_guest_mapping_range(u64 as,
+ hyperv_fill_flush_list_func fill_flush_list_func, void *data)
+{
+ struct hv_guest_mapping_flush_list **flush_pcpu;
+ struct hv_guest_mapping_flush_list *flush;
+ u64 status = 0;
+ unsigned long flags;
+ int ret = -ENOTSUPP;
+ int gpa_n = 0;
+
+ if (!hv_hypercall_pg || !fill_flush_list_func)
+ goto fault;
+
+ local_irq_save(flags);
+
+ flush_pcpu = (struct hv_guest_mapping_flush_list **)
+ this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ flush = *flush_pcpu;
+ if (unlikely(!flush)) {
+ local_irq_restore(flags);
+ goto fault;
+ }
+
+ flush->address_space = as;
+ flush->flags = 0;
+
+ gpa_n = fill_flush_list_func(flush, data);
+ if (gpa_n < 0) {
+ local_irq_restore(flags);
+ goto fault;
+ }
+
+ status = hv_do_rep_hypercall(HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST,
+ gpa_n, 0, flush, NULL);
+
+ local_irq_restore(flags);
+
+ if (!(status & HV_HYPERCALL_RESULT_MASK))
+ ret = 0;
+ else
+ ret = status;
+fault:
+ trace_hyperv_nested_flush_guest_mapping_range(as, ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hyperv_flush_guest_mapping_range);
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
index 8e4ea39e55d0..31b627b43a8e 100644
--- a/arch/x86/include/asm/alternative-asm.h
+++ b/arch/x86/include/asm/alternative-asm.h
@@ -7,24 +7,16 @@
#include <asm/asm.h>
#ifdef CONFIG_SMP
-.macro LOCK_PREFIX_HERE
+ .macro LOCK_PREFIX
+672: lock
.pushsection .smp_locks,"a"
.balign 4
- .long 671f - . # offset
+ .long 672b - .
.popsection
-671:
-.endm
-
-.macro LOCK_PREFIX insn:vararg
- LOCK_PREFIX_HERE
- lock \insn
-.endm
+ .endm
#else
-.macro LOCK_PREFIX_HERE
-.endm
-
-.macro LOCK_PREFIX insn:vararg
-.endm
+ .macro LOCK_PREFIX
+ .endm
#endif
/*
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index d7faa16622d8..0660e14690c8 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -31,8 +31,15 @@
*/
#ifdef CONFIG_SMP
-#define LOCK_PREFIX_HERE "LOCK_PREFIX_HERE\n\t"
-#define LOCK_PREFIX "LOCK_PREFIX "
+#define LOCK_PREFIX_HERE \
+ ".pushsection .smp_locks,\"a\"\n" \
+ ".balign 4\n" \
+ ".long 671f - .\n" /* offset */ \
+ ".popsection\n" \
+ "671:"
+
+#define LOCK_PREFIX LOCK_PREFIX_HERE "\n\tlock; "
+
#else /* ! CONFIG_SMP */
#define LOCK_PREFIX_HERE ""
#define LOCK_PREFIX ""
@@ -167,7 +174,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
/*
* Alternative inline assembly with input.
*
- * Pecularities:
+ * Peculiarities:
* No memory clobber here.
* Argument numbers start with 1.
* Best is to use constraints that are fixed size (like (%1) ... "r")
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 34a10b2d5b73..fc0693569f7a 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -5,15 +5,9 @@
#include <asm/cpufeatures.h>
#ifdef CONFIG_64BIT
-/* popcnt %edi, %eax */
-#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
-/* popcnt %rdi, %rax */
-#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
#define REG_IN "D"
#define REG_OUT "a"
#else
-/* popcnt %eax, %eax */
-#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
#define REG_IN "a"
#define REG_OUT "a"
#endif
@@ -24,7 +18,7 @@ static __always_inline unsigned int __arch_hweight32(unsigned int w)
{
unsigned int res;
- asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
+ asm (ALTERNATIVE("call __sw_hweight32", "popcntl %1, %0", X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
@@ -52,7 +46,7 @@ static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res;
- asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
+ asm (ALTERNATIVE("call __sw_hweight64", "popcntq %1, %0", X86_FEATURE_POPCNT)
: "="REG_OUT (res)
: REG_IN (w));
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 21b086786404..6467757bb39f 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -120,25 +120,12 @@
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
- ASM_EXTABLE_HANDLE from to handler
-
-.macro ASM_EXTABLE_HANDLE from:req to:req handler:req
- .pushsection "__ex_table","a"
- .balign 4
- .long (\from) - .
- .long (\to) - .
- .long (\handler) - .
+ .pushsection "__ex_table","a" ; \
+ .balign 4 ; \
+ .long (from) - . ; \
+ .long (to) - . ; \
+ .long (handler) - . ; \
.popsection
-.endm
-#else /* __ASSEMBLY__ */
-
-# define _ASM_EXTABLE_HANDLE(from, to, handler) \
- "ASM_EXTABLE_HANDLE from=" #from " to=" #to \
- " handler=\"" #handler "\"\n\t"
-
-/* For C file, we already have NOKPROBE_SYMBOL macro */
-
-#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
@@ -161,7 +148,6 @@
_ASM_PTR (entry); \
.popsection
-#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
@@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm
-#endif /* __ASSEMBLY__ */
+
+#else
+# define _EXPAND_EXTABLE_HANDLE(x) #x
+# define _ASM_EXTABLE_HANDLE(from, to, handler) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 4\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
+ " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
+ " .popsection\n"
+
+# define _ASM_EXTABLE(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+
+# define _ASM_EXTABLE_UA(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
+# define _ASM_EXTABLE_FAULT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
+
+# define _ASM_EXTABLE_EX(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
+
+# define _ASM_EXTABLE_REFCOUNT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_refcount)
+
+/* For C file, we already have NOKPROBE_SYMBOL macro */
+#endif
#ifndef __ASSEMBLY__
/*
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index a07ffd23e4dd..f6f6ef436599 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -36,6 +36,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
+ boot_params->acpi_rsdp_addr = 0;
memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
(char *)&boot_params->ext_ramdisk_image);
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 5090035e6d16..6804d6642767 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -4,8 +4,6 @@
#include <linux/stringify.h>
-#ifndef __ASSEMBLY__
-
/*
* Despite that some emulators terminate on UD2, we use it for WARN().
*
@@ -22,15 +20,53 @@
#define LEN_UD2 2
+#ifdef CONFIG_GENERIC_BUG
+
+#ifdef CONFIG_X86_32
+# define __BUG_REL(val) ".long " __stringify(val)
+#else
+# define __BUG_REL(val) ".long " __stringify(val) " - 2b"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_FLAGS(ins, flags) \
+do { \
+ asm volatile("1:\t" ins "\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
+ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
+ "\t" __BUG_REL(%c0) "\t# bug_entry::file\n" \
+ "\t.word %c1" "\t# bug_entry::line\n" \
+ "\t.word %c2" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c3\n" \
+ ".popsection" \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+
+#else /* !CONFIG_DEBUG_BUGVERBOSE */
+
#define _BUG_FLAGS(ins, flags) \
do { \
- asm volatile("ASM_BUG ins=\"" ins "\" file=%c0 line=%c1 " \
- "flags=%c2 size=%c3" \
- : : "i" (__FILE__), "i" (__LINE__), \
- "i" (flags), \
+ asm volatile("1:\t" ins "\n" \
+ ".pushsection __bug_table,\"aw\"\n" \
+ "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
+ "\t.word %c0" "\t# bug_entry::flags\n" \
+ "\t.org 2b+%c1\n" \
+ ".popsection" \
+ : : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
+#else
+
+#define _BUG_FLAGS(ins, flags) asm volatile(ins)
+
+#endif /* CONFIG_GENERIC_BUG */
+
#define HAVE_ARCH_BUG
#define BUG() \
do { \
@@ -46,54 +82,4 @@ do { \
#include <asm-generic/bug.h>
-#else /* __ASSEMBLY__ */
-
-#ifdef CONFIG_GENERIC_BUG
-
-#ifdef CONFIG_X86_32
-.macro __BUG_REL val:req
- .long \val
-.endm
-#else
-.macro __BUG_REL val:req
- .long \val - 2b
-.endm
-#endif
-
-#ifdef CONFIG_DEBUG_BUGVERBOSE
-
-.macro ASM_BUG ins:req file:req line:req flags:req size:req
-1: \ins
- .pushsection __bug_table,"aw"
-2: __BUG_REL val=1b # bug_entry::bug_addr
- __BUG_REL val=\file # bug_entry::file
- .word \line # bug_entry::line
- .word \flags # bug_entry::flags
- .org 2b+\size
- .popsection
-.endm
-
-#else /* !CONFIG_DEBUG_BUGVERBOSE */
-
-.macro ASM_BUG ins:req file:req line:req flags:req size:req
-1: \ins
- .pushsection __bug_table,"aw"
-2: __BUG_REL val=1b # bug_entry::bug_addr
- .word \flags # bug_entry::flags
- .org 2b+\size
- .popsection
-.endm
-
-#endif /* CONFIG_DEBUG_BUGVERBOSE */
-
-#else /* CONFIG_GENERIC_BUG */
-
-.macro ASM_BUG ins:req file:req line:req flags:req size:req
- \ins
-.endm
-
-#endif /* CONFIG_GENERIC_BUG */
-
-#endif /* __ASSEMBLY__ */
-
#endif /* _ASM_X86_BUG_H */
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index bfb85e5844ab..a8bfac131256 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -7,7 +7,7 @@
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
/*
- * Non-existant functions to indicate usage errors at link time
+ * Non-existent functions to indicate usage errors at link time
* (or compile-time if the compiler implements __compiletime_error().
*/
extern void __xchg_wrong_size(void)
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 7d442722ef24..aced6c9290d6 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -2,10 +2,10 @@
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
#include <asm/processor.h>
+
+#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
+
#include <asm/asm.h>
#include <linux/bitops.h>
@@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline __pure bool _static_cpu_has(u16 bit)
{
- asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] "
- "cap_byte=\"%[cap_byte]\" "
- "feature=%P[feature] t_yes=%l[t_yes] "
- "t_no=%l[t_no] always=%P[always]"
+ asm_volatile_goto("1: jmp 6f\n"
+ "2:\n"
+ ".skip -(((5f-4f) - (2b-1b)) > 0) * "
+ "((5f-4f) - (2b-1b)),0x90\n"
+ "3:\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 4f - .\n" /* repl offset */
+ " .word %P[always]\n" /* always replace */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 5f - 4f\n" /* repl len */
+ " .byte 3b - 2b\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_replacement,\"ax\"\n"
+ "4: jmp %l[t_no]\n"
+ "5:\n"
+ ".previous\n"
+ ".section .altinstructions,\"a\"\n"
+ " .long 1b - .\n" /* src offset */
+ " .long 0\n" /* no replacement */
+ " .word %P[feature]\n" /* feature bit */
+ " .byte 3b - 1b\n" /* src len */
+ " .byte 0\n" /* repl len */
+ " .byte 0\n" /* pad len */
+ ".previous\n"
+ ".section .altinstr_aux,\"ax\"\n"
+ "6:\n"
+ " testb %[bitnum],%[cap_byte]\n"
+ " jnz %l[t_yes]\n"
+ " jmp %l[t_no]\n"
+ ".previous\n"
: : [feature] "i" (bit),
[always] "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)),
@@ -199,44 +226,5 @@ t_no:
#define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \
boot_cpu_data.x86_model
-#else /* __ASSEMBLY__ */
-
-.macro STATIC_CPU_HAS bitnum:req cap_byte:req feature:req t_yes:req t_no:req always:req
-1:
- jmp 6f
-2:
- .skip -(((5f-4f) - (2b-1b)) > 0) * ((5f-4f) - (2b-1b)),0x90
-3:
- .section .altinstructions,"a"
- .long 1b - . /* src offset */
- .long 4f - . /* repl offset */
- .word \always /* always replace */
- .byte 3b - 1b /* src len */
- .byte 5f - 4f /* repl len */
- .byte 3b - 2b /* pad len */
- .previous
- .section .altinstr_replacement,"ax"
-4:
- jmp \t_no
-5:
- .previous
- .section .altinstructions,"a"
- .long 1b - . /* src offset */
- .long 0 /* no replacement */
- .word \feature /* feature bit */
- .byte 3b - 1b /* src len */
- .byte 0 /* repl len */
- .byte 0 /* pad len */
- .previous
- .section .altinstr_aux,"ax"
-6:
- testb \bitnum,\cap_byte
- jnz \t_yes
- jmp \t_no
- .previous
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */
#endif /* _ASM_X86_CPUFEATURE_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 28c4a502b419..6d6122524711 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -281,9 +281,11 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
+#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index a7adb2bfbf0b..0acf5ee45a21 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -6,5 +6,6 @@ int crash_load_segments(struct kimage *image);
int crash_copy_backup_region(struct kimage *image);
int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params);
+void crash_smp_send_stop(void);
#endif /* _ASM_X86_CRASH_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 33833d1909af..a5ea841cc6d2 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -16,6 +16,12 @@
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
#endif
+#ifdef CONFIG_X86_SMAP
+# define DISABLE_SMAP 0
+#else
+# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
+#endif
+
#ifdef CONFIG_X86_INTEL_UMIP
# define DISABLE_UMIP 0
#else
@@ -68,7 +74,7 @@
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index eea40d52ca78..107283b1eb1e 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -19,7 +19,7 @@
* This is the main reason why we're doing stable VA mappings for RT
* services.
*
- * This flag is used in conjuction with a chicken bit called
+ * This flag is used in conjunction with a chicken bit called
* "efi=old_map" which can be used as a fallback to the old runtime
* services mapping method in case there's some b0rkage with a
* particular EFI implementation (haha, it is hard to hold up the
@@ -82,8 +82,7 @@ struct efi_scratch {
#define arch_efi_call_virt_setup() \
({ \
efi_sync_low_kernel_mappings(); \
- preempt_disable(); \
- __kernel_fpu_begin(); \
+ kernel_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
\
if (!efi_enabled(EFI_OLD_MEMMAP)) \
@@ -99,8 +98,7 @@ struct efi_scratch {
efi_switch_mm(efi_scratch.prev_mm); \
\
firmware_restrict_branch_speculation_end(); \
- __kernel_fpu_end(); \
- preempt_enable(); \
+ kernel_fpu_end(); \
})
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
@@ -141,6 +139,8 @@ extern int __init efi_reuse_config(u64 tables, int nr_tables);
extern void efi_delete_dummy_variable(void);
extern void efi_switch_mm(struct mm_struct *mm);
extern void efi_recover_from_page_fault(unsigned long phys_addr);
+extern void efi_free_boot_services(void);
+extern void efi_reserve_boot_services(void);
struct efi_setup_data {
u64 fw_vendor;
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index a9caac9d4a72..b56d504af654 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -12,17 +12,12 @@
#define _ASM_X86_FPU_API_H
/*
- * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
- * and they don't touch the preempt state on their own.
- * If you enable preemption after __kernel_fpu_begin(), preempt notifier
- * should call the __kernel_fpu_end() to prevent the kernel/user FPU
- * state from getting corrupted. KVM for example uses this model.
- *
- * All other cases use kernel_fpu_begin/end() which disable preemption
- * during kernel FPU usage.
+ * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
+ * disables preemption so be careful if you intend to use it for long periods
+ * of time.
+ * If you intend to use the FPU in softirq you need to check first with
+ * irq_fpu_usable() if it is possible.
*/
-extern void __kernel_fpu_begin(void);
-extern void __kernel_fpu_end(void);
extern void kernel_fpu_begin(void);
extern void kernel_fpu_end(void);
extern bool irq_fpu_usable(void);
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 5f7290e6e954..fa2c93cb42a2 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \
({ \
int err; \
+ \
+ might_fault(); \
+ \
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
@@ -226,7 +229,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
"3: movl $-2,%[err]\n\t" \
"jmp 2b\n\t" \
".popsection\n\t" \
- _ASM_EXTABLE_UA(1b, 3b) \
+ _ASM_EXTABLE(1b, 3b) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
diff --git a/arch/x86/include/asm/fsgsbase.h b/arch/x86/include/asm/fsgsbase.h
index eb377b6e9eed..bca4c743de77 100644
--- a/arch/x86/include/asm/fsgsbase.h
+++ b/arch/x86/include/asm/fsgsbase.h
@@ -16,8 +16,8 @@
*/
extern unsigned long x86_fsbase_read_task(struct task_struct *task);
extern unsigned long x86_gsbase_read_task(struct task_struct *task);
-extern int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
-extern int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
+extern void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase);
+extern void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase);
/* Helper functions for reading/writing FS/GS base */
@@ -39,8 +39,15 @@ static inline unsigned long x86_gsbase_read_cpu_inactive(void)
return gsbase;
}
-extern void x86_fsbase_write_cpu(unsigned long fsbase);
-extern void x86_gsbase_write_cpu_inactive(unsigned long gsbase);
+static inline void x86_fsbase_write_cpu(unsigned long fsbase)
+{
+ wrmsrl(MSR_FS_BASE, fsbase);
+}
+
+static inline void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
+{
+ wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
+}
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 4139f7650fe5..705dafc2d11a 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -10,6 +10,7 @@
#define _ASM_X86_HYPERV_TLFS_H
#include <linux/types.h>
+#include <asm/page.h>
/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
@@ -30,158 +31,150 @@
/*
* Feature identification. EAX indicates which features are available
* to the partition based upon the current partition privileges.
+ * These are HYPERV_CPUID_FEATURES.EAX bits.
*/
/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
-#define HV_X64_MSR_VP_RUNTIME_AVAILABLE (1 << 0)
+#define HV_X64_MSR_VP_RUNTIME_AVAILABLE BIT(0)
/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
-#define HV_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
-/* Partition reference TSC MSR is available */
-#define HV_MSR_REFERENCE_TSC_AVAILABLE (1 << 9)
-/* Partition Guest IDLE MSR is available */
-#define HV_X64_MSR_GUEST_IDLE_AVAILABLE (1 << 10)
-
-/* A partition's reference time stamp counter (TSC) page */
-#define HV_X64_MSR_REFERENCE_TSC 0x40000021
-
-/*
- * There is a single feature flag that signifies if the partition has access
- * to MSRs with local APIC and TSC frequencies.
- */
-#define HV_X64_ACCESS_FREQUENCY_MSRS (1 << 11)
-
-/* AccessReenlightenmentControls privilege */
-#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13)
-
+#define HV_MSR_TIME_REF_COUNT_AVAILABLE BIT(1)
/*
* Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
* and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
*/
-#define HV_X64_MSR_SYNIC_AVAILABLE (1 << 2)
+#define HV_X64_MSR_SYNIC_AVAILABLE BIT(2)
/*
* Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
* HV_X64_MSR_STIMER3_COUNT) available
*/
-#define HV_MSR_SYNTIMER_AVAILABLE (1 << 3)
+#define HV_MSR_SYNTIMER_AVAILABLE BIT(3)
/*
* APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
* are available
*/
-#define HV_X64_MSR_APIC_ACCESS_AVAILABLE (1 << 4)
+#define HV_X64_MSR_APIC_ACCESS_AVAILABLE BIT(4)
/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
-#define HV_X64_MSR_HYPERCALL_AVAILABLE (1 << 5)
+#define HV_X64_MSR_HYPERCALL_AVAILABLE BIT(5)
/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
-#define HV_X64_MSR_VP_INDEX_AVAILABLE (1 << 6)
+#define HV_X64_MSR_VP_INDEX_AVAILABLE BIT(6)
/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
-#define HV_X64_MSR_RESET_AVAILABLE (1 << 7)
- /*
- * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
- * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
- * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
- */
-#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
-
-/* Frequency MSRs available */
-#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE (1 << 8)
-
-/* Crash MSR available */
-#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
-
-/* stimer Direct Mode is available */
-#define HV_STIMER_DIRECT_MODE_AVAILABLE (1 << 19)
+#define HV_X64_MSR_RESET_AVAILABLE BIT(7)
+/*
+ * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
+ * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
+ * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
+ */
+#define HV_X64_MSR_STAT_PAGES_AVAILABLE BIT(8)
+/* Partition reference TSC MSR is available */
+#define HV_MSR_REFERENCE_TSC_AVAILABLE BIT(9)
+/* Partition Guest IDLE MSR is available */
+#define HV_X64_MSR_GUEST_IDLE_AVAILABLE BIT(10)
+/*
+ * There is a single feature flag that signifies if the partition has access
+ * to MSRs with local APIC and TSC frequencies.
+ */
+#define HV_X64_ACCESS_FREQUENCY_MSRS BIT(11)
+/* AccessReenlightenmentControls privilege */
+#define HV_X64_ACCESS_REENLIGHTENMENT BIT(13)
/*
- * Feature identification: EBX indicates which flags were specified at
- * partition creation. The format is the same as the partition creation
- * flag structure defined in section Partition Creation Flags.
+ * Feature identification: indicates which flags were specified at partition
+ * creation. The format is the same as the partition creation flag structure
+ * defined in section Partition Creation Flags.
+ * These are HYPERV_CPUID_FEATURES.EBX bits.
*/
-#define HV_X64_CREATE_PARTITIONS (1 << 0)
-#define HV_X64_ACCESS_PARTITION_ID (1 << 1)
-#define HV_X64_ACCESS_MEMORY_POOL (1 << 2)
-#define HV_X64_ADJUST_MESSAGE_BUFFERS (1 << 3)
-#define HV_X64_POST_MESSAGES (1 << 4)
-#define HV_X64_SIGNAL_EVENTS (1 << 5)
-#define HV_X64_CREATE_PORT (1 << 6)
-#define HV_X64_CONNECT_PORT (1 << 7)
-#define HV_X64_ACCESS_STATS (1 << 8)
-#define HV_X64_DEBUGGING (1 << 11)
-#define HV_X64_CPU_POWER_MANAGEMENT (1 << 12)
-#define HV_X64_CONFIGURE_PROFILER (1 << 13)
+#define HV_X64_CREATE_PARTITIONS BIT(0)
+#define HV_X64_ACCESS_PARTITION_ID BIT(1)
+#define HV_X64_ACCESS_MEMORY_POOL BIT(2)
+#define HV_X64_ADJUST_MESSAGE_BUFFERS BIT(3)
+#define HV_X64_POST_MESSAGES BIT(4)
+#define HV_X64_SIGNAL_EVENTS BIT(5)
+#define HV_X64_CREATE_PORT BIT(6)
+#define HV_X64_CONNECT_PORT BIT(7)
+#define HV_X64_ACCESS_STATS BIT(8)
+#define HV_X64_DEBUGGING BIT(11)
+#define HV_X64_CPU_POWER_MANAGEMENT BIT(12)
/*
* Feature identification. EDX indicates which miscellaneous features
* are available to the partition.
+ * These are HYPERV_CPUID_FEATURES.EDX bits.
*/
/* The MWAIT instruction is available (per section MONITOR / MWAIT) */
-#define HV_X64_MWAIT_AVAILABLE (1 << 0)
+#define HV_X64_MWAIT_AVAILABLE BIT(0)
/* Guest debugging support is available */
-#define HV_X64_GUEST_DEBUGGING_AVAILABLE (1 << 1)
+#define HV_X64_GUEST_DEBUGGING_AVAILABLE BIT(1)
/* Performance Monitor support is available*/
-#define HV_X64_PERF_MONITOR_AVAILABLE (1 << 2)
+#define HV_X64_PERF_MONITOR_AVAILABLE BIT(2)
/* Support for physical CPU dynamic partitioning events is available*/
-#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE (1 << 3)
+#define HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE BIT(3)
/*
* Support for passing hypercall input parameter block via XMM
* registers is available
*/
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE (1 << 4)
+#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4)
/* Support for a virtual guest idle state is available */
-#define HV_X64_GUEST_IDLE_STATE_AVAILABLE (1 << 5)
-/* Guest crash data handler available */
-#define HV_X64_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
+#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
+/* Frequency MSRs available */
+#define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE BIT(8)
+/* Crash MSR available */
+#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
+/* stimer Direct Mode is available */
+#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
/*
* Implementation recommendations. Indicates which behaviors the hypervisor
* recommends the OS implement for optimal performance.
+ * These are HYPERV_CPUID_ENLIGHTMENT_INFO.EAX bits.
+ */
+/*
+ * Recommend using hypercall for address space switches rather
+ * than MOV to CR3 instruction
*/
- /*
- * Recommend using hypercall for address space switches rather
- * than MOV to CR3 instruction
- */
-#define HV_X64_AS_SWITCH_RECOMMENDED (1 << 0)
+#define HV_X64_AS_SWITCH_RECOMMENDED BIT(0)
/* Recommend using hypercall for local TLB flushes rather
* than INVLPG or MOV to CR3 instructions */
-#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED (1 << 1)
+#define HV_X64_LOCAL_TLB_FLUSH_RECOMMENDED BIT(1)
/*
* Recommend using hypercall for remote TLB flushes rather
* than inter-processor interrupts
*/
-#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED (1 << 2)
+#define HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED BIT(2)
/*
* Recommend using MSRs for accessing APIC registers
* EOI, ICR and TPR rather than their memory-mapped counterparts
*/
-#define HV_X64_APIC_ACCESS_RECOMMENDED (1 << 3)
+#define HV_X64_APIC_ACCESS_RECOMMENDED BIT(3)
/* Recommend using the hypervisor-provided MSR to initiate a system RESET */
-#define HV_X64_SYSTEM_RESET_RECOMMENDED (1 << 4)
+#define HV_X64_SYSTEM_RESET_RECOMMENDED BIT(4)
/*
* Recommend using relaxed timing for this partition. If used,
* the VM should disable any watchdog timeouts that rely on the
* timely delivery of external interrupts
*/
-#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
+#define HV_X64_RELAXED_TIMING_RECOMMENDED BIT(5)
/*
* Recommend not using Auto End-Of-Interrupt feature
*/
-#define HV_DEPRECATING_AEOI_RECOMMENDED (1 << 9)
+#define HV_DEPRECATING_AEOI_RECOMMENDED BIT(9)
/*
* Recommend using cluster IPI hypercalls.
*/
-#define HV_X64_CLUSTER_IPI_RECOMMENDED (1 << 10)
+#define HV_X64_CLUSTER_IPI_RECOMMENDED BIT(10)
/* Recommend using the newer ExProcessorMasks interface */
-#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11)
+#define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
/* Recommend using enlightened VMCS */
-#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED (1 << 14)
+#define HV_X64_ENLIGHTENED_VMCS_RECOMMENDED BIT(14)
-/*
- * Crash notification flags.
- */
-#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
-#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+/* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
+#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
+#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+
+/* Hyper-V specific model specific registers (MSRs) */
/* MSR used to identify the guest OS. */
#define HV_X64_MSR_GUEST_OS_ID 0x40000000
@@ -201,6 +194,9 @@
/* MSR used to read the per-partition time reference counter */
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+/* A partition's reference time stamp counter (TSC) page */
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+
/* MSR used to retrieve the TSC frequency */
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
@@ -258,9 +254,11 @@
#define HV_X64_MSR_CRASH_P3 0x40000103
#define HV_X64_MSR_CRASH_P4 0x40000104
#define HV_X64_MSR_CRASH_CTL 0x40000105
-#define HV_X64_MSR_CRASH_CTL_NOTIFY (1ULL << 63)
-#define HV_X64_MSR_CRASH_PARAMS \
- (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+
+/* TSC emulation after migration */
+#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
+#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
+#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
/*
* Declare the MSR used to setup pages used to communicate with the hypervisor.
@@ -271,7 +269,7 @@ union hv_x64_msr_hypercall_contents {
u64 enable:1;
u64 reserved:11;
u64 guest_physical_address:52;
- };
+ } __packed;
};
/*
@@ -283,7 +281,7 @@ struct ms_hyperv_tsc_page {
volatile u64 tsc_scale;
volatile s64 tsc_offset;
u64 reserved2[509];
-};
+} __packed;
/*
* The guest OS needs to register the guest ID with the hypervisor.
@@ -311,39 +309,37 @@ struct ms_hyperv_tsc_page {
#define HV_LINUX_VENDOR_ID 0x8100
-/* TSC emulation after migration */
-#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
-
-/* Nested features (CPUID 0x4000000A) EAX */
-#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
-#define HV_X64_NESTED_MSR_BITMAP BIT(19)
-
struct hv_reenlightenment_control {
__u64 vector:8;
__u64 reserved1:8;
__u64 enabled:1;
__u64 reserved2:15;
__u64 target_vp:32;
-};
-
-#define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107
-#define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108
+} __packed;
struct hv_tsc_emulation_control {
__u64 enabled:1;
__u64 reserved:63;
-};
+} __packed;
struct hv_tsc_emulation_status {
__u64 inprogress:1;
__u64 reserved:63;
-};
+} __packed;
#define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT 12
#define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK \
(~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
+/*
+ * Crash notification (HV_X64_MSR_CRASH_CTL) flags.
+ */
+#define HV_CRASH_CTL_CRASH_NOTIFY_MSG BIT_ULL(62)
+#define HV_CRASH_CTL_CRASH_NOTIFY BIT_ULL(63)
+#define HV_X64_MSR_CRASH_PARAMS \
+ (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
+
#define HV_IPI_LOW_VECTOR 0x10
#define HV_IPI_HIGH_VECTOR 0xff
@@ -358,6 +354,7 @@ struct hv_tsc_emulation_status {
#define HVCALL_POST_MESSAGE 0x005c
#define HVCALL_SIGNAL_EVENT 0x005d
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
+#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
@@ -409,7 +406,7 @@ typedef struct _HV_REFERENCE_TSC_PAGE {
__u32 res1;
__u64 tsc_scale;
__s64 tsc_offset;
-} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
+} __packed HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
/* Define the number of synthetic interrupt sources. */
#define HV_SYNIC_SINT_COUNT (16)
@@ -466,7 +463,7 @@ union hv_message_flags {
struct {
__u8 msg_pending:1;
__u8 reserved:7;
- };
+ } __packed;
};
/* Define port identifier type. */
@@ -475,7 +472,7 @@ union hv_port_id {
struct {
__u32 id:24;
__u32 reserved:8;
- } u;
+ } __packed u;
};
/* Define synthetic interrupt controller message header. */
@@ -488,7 +485,7 @@ struct hv_message_header {
__u64 sender;
union hv_port_id port;
};
-};
+} __packed;
/* Define synthetic interrupt controller message format. */
struct hv_message {
@@ -496,12 +493,12 @@ struct hv_message {
union {
__u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
} u;
-};
+} __packed;
/* Define the synthetic interrupt message page layout. */
struct hv_message_page {
struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
-};
+} __packed;
/* Define timer message payload structure. */
struct hv_timer_message_payload {
@@ -509,7 +506,7 @@ struct hv_timer_message_payload {
__u32 reserved;
__u64 expiration_time; /* When the timer expired */
__u64 delivery_time; /* When the message was delivered */
-};
+} __packed;
/* Define virtual processor assist page structure. */
struct hv_vp_assist_page {
@@ -518,8 +515,9 @@ struct hv_vp_assist_page {
__u64 vtl_control[2];
__u64 nested_enlightenments_control[2];
__u32 enlighten_vmentry;
+ __u32 padding;
__u64 current_nested_vmcs;
-};
+} __packed;
struct hv_enlightened_vmcs {
u32 revision_id;
@@ -533,6 +531,8 @@ struct hv_enlightened_vmcs {
u16 host_gs_selector;
u16 host_tr_selector;
+ u16 padding16_1;
+
u64 host_ia32_pat;
u64 host_ia32_efer;
@@ -651,7 +651,7 @@ struct hv_enlightened_vmcs {
u64 ept_pointer;
u16 virtual_processor_id;
- u16 padding16[3];
+ u16 padding16_2[3];
u64 padding64_2[5];
u64 guest_physical_address;
@@ -693,7 +693,7 @@ struct hv_enlightened_vmcs {
u32 nested_flush_hypercall:1;
u32 msr_bitmap:1;
u32 reserved:30;
- } hv_enlightenments_control;
+ } __packed hv_enlightenments_control;
u32 hv_vp_id;
u64 hv_vm_id;
@@ -703,7 +703,7 @@ struct hv_enlightened_vmcs {
u64 padding64_5[7];
u64 xss_exit_bitmap;
u64 padding64_6[7];
-};
+} __packed;
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP BIT(0)
@@ -725,36 +725,129 @@ struct hv_enlightened_vmcs {
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL 0xFFFF
-#define HV_STIMER_ENABLE (1ULL << 0)
-#define HV_STIMER_PERIODIC (1ULL << 1)
-#define HV_STIMER_LAZY (1ULL << 2)
-#define HV_STIMER_AUTOENABLE (1ULL << 3)
-#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
+/* Define synthetic interrupt controller flag constants. */
+#define HV_EVENT_FLAGS_COUNT (256 * 8)
+#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
+
+/*
+ * Synthetic timer configuration.
+ */
+union hv_stimer_config {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 periodic:1;
+ u64 lazy:1;
+ u64 auto_enable:1;
+ u64 apic_vector:8;
+ u64 direct_mode:1;
+ u64 reserved_z0:3;
+ u64 sintx:4;
+ u64 reserved_z1:44;
+ } __packed;
+};
+
+
+/* Define the synthetic interrupt controller event flags format. */
+union hv_synic_event_flags {
+ unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
+};
+
+/* Define SynIC control register. */
+union hv_synic_scontrol {
+ u64 as_uint64;
+ struct {
+ u64 enable:1;
+ u64 reserved:63;
+ } __packed;
+};
+
+/* Define synthetic interrupt source. */
+union hv_synic_sint {
+ u64 as_uint64;
+ struct {
+ u64 vector:8;
+ u64 reserved1:8;
+ u64 masked:1;
+ u64 auto_eoi:1;
+ u64 reserved2:46;
+ } __packed;
+};
+
+/* Define the format of the SIMP register */
+union hv_synic_simp {
+ u64 as_uint64;
+ struct {
+ u64 simp_enabled:1;
+ u64 preserved:11;
+ u64 base_simp_gpa:52;
+ } __packed;
+};
+
+/* Define the format of the SIEFP register */
+union hv_synic_siefp {
+ u64 as_uint64;
+ struct {
+ u64 siefp_enabled:1;
+ u64 preserved:11;
+ u64 base_siefp_gpa:52;
+ } __packed;
+};
struct hv_vpset {
u64 format;
u64 valid_bank_mask;
u64 bank_contents[];
-};
+} __packed;
/* HvCallSendSyntheticClusterIpi hypercall */
struct hv_send_ipi {
u32 vector;
u32 reserved;
u64 cpu_mask;
-};
+} __packed;
/* HvCallSendSyntheticClusterIpiEx hypercall */
struct hv_send_ipi_ex {
u32 vector;
u32 reserved;
struct hv_vpset vp_set;
-};
+} __packed;
/* HvFlushGuestPhysicalAddressSpace hypercalls */
struct hv_guest_mapping_flush {
u64 address_space;
u64 flags;
+} __packed;
+
+/*
+ * HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
+ * by the bitwidth of "additional_pages" in union hv_gpa_page_range.
+ */
+#define HV_MAX_FLUSH_PAGES (2048)
+
+/* HvFlushGuestPhysicalAddressList hypercall */
+union hv_gpa_page_range {
+ u64 address_space;
+ struct {
+ u64 additional_pages:11;
+ u64 largepage:1;
+ u64 basepfn:52;
+ } page;
+};
+
+/*
+ * All input flush parameters should be in single page. The max flush
+ * count is equal with how many entries of union hv_gpa_page_range can
+ * be populated into the input parameter page.
+ */
+#define HV_MAX_FLUSH_REP_COUNT (PAGE_SIZE - 2 * sizeof(u64) / \
+ sizeof(union hv_gpa_page_range))
+
+struct hv_guest_mapping_flush_list {
+ u64 address_space;
+ u64 flags;
+ union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
};
/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
@@ -763,7 +856,7 @@ struct hv_tlb_flush {
u64 flags;
u64 processor_mask;
u64 gva_list[];
-};
+} __packed;
/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
struct hv_tlb_flush_ex {
@@ -771,6 +864,6 @@ struct hv_tlb_flush_ex {
u64 flags;
struct hv_vpset hv_vp_set;
u64 gva_list[];
-};
+} __packed;
#endif
diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h
index b523f51c5400..634f99b1dc22 100644
--- a/arch/x86/include/asm/intel_pt.h
+++ b/arch/x86/include/asm/intel_pt.h
@@ -2,10 +2,36 @@
#ifndef _ASM_X86_INTEL_PT_H
#define _ASM_X86_INTEL_PT_H
+#define PT_CPUID_LEAVES 2
+#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
+
+enum pt_capabilities {
+ PT_CAP_max_subleaf = 0,
+ PT_CAP_cr3_filtering,
+ PT_CAP_psb_cyc,
+ PT_CAP_ip_filtering,
+ PT_CAP_mtc,
+ PT_CAP_ptwrite,
+ PT_CAP_power_event_trace,
+ PT_CAP_topa_output,
+ PT_CAP_topa_multiple_entries,
+ PT_CAP_single_range_output,
+ PT_CAP_output_subsys,
+ PT_CAP_payloads_lip,
+ PT_CAP_num_address_ranges,
+ PT_CAP_mtc_periods,
+ PT_CAP_cycle_thresholds,
+ PT_CAP_psb_periods,
+};
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
void cpu_emergency_stop_pt(void);
+extern u32 intel_pt_validate_hw_cap(enum pt_capabilities cap);
+extern u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities cap);
#else
static inline void cpu_emergency_stop_pt(void) {}
+static inline u32 intel_pt_validate_hw_cap(enum pt_capabilities cap) { return 0; }
+static inline u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability) { return 0; }
#endif
#endif /* _ASM_X86_INTEL_PT_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 2395bb794c7b..fbb16e6b6c18 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -30,6 +30,9 @@ extern void fixup_irqs(void);
#ifdef CONFIG_HAVE_KVM
extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
+extern __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs);
+extern __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs);
+extern __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs);
#endif
extern void (*x86_platform_ipi_callback)(void);
@@ -41,9 +44,13 @@ extern __visible unsigned int do_IRQ(struct pt_regs *regs);
extern void init_ISA_irqs(void);
+extern void __init init_IRQ(void);
+
#ifdef CONFIG_X86_LOCAL_APIC
void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
bool exclude_self);
+
+extern __visible void smp_x86_platform_ipi(struct pt_regs *regs);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
#endif
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index 800ffce0db29..80b35e3adf03 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -10,6 +10,7 @@ static inline bool arch_irq_work_has_interrupt(void)
return boot_cpu_has(X86_FEATURE_APIC);
}
extern void arch_irq_work_raise(void);
+extern __visible void smp_irq_work_interrupt(struct pt_regs *regs);
#else
static inline bool arch_irq_work_has_interrupt(void)
{
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index a5fb34fe56a4..21efc9d07ed9 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -2,6 +2,19 @@
#ifndef _ASM_X86_JUMP_LABEL_H
#define _ASM_X86_JUMP_LABEL_H
+#ifndef HAVE_JUMP_LABEL
+/*
+ * For better or for worse, if jump labels (the gcc extension) are missing,
+ * then the entire static branch patching infrastructure is compiled out.
+ * If that happens, the code in here will malfunction. Raise a compiler
+ * error instead.
+ *
+ * In theory, jump labels and the static branch patching infrastructure
+ * could be decoupled to fix this.
+ */
+#error asm/jump_label.h included on a non-jump-label kernel
+#endif
+
#define JUMP_LABEL_NOP_SIZE 5
#ifdef CONFIG_X86_64
@@ -20,9 +33,15 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
- "branch=\"%c1\""
- : : "i" (key), "i" (branch) : : l_yes);
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+
return false;
l_yes:
return true;
@@ -30,8 +49,14 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
- "branch=\"%c1\""
+ asm_volatile_goto("1:"
+ ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
+ "2:\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -41,26 +66,37 @@ l_yes:
#else /* __ASSEMBLY__ */
-.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
-.Lstatic_branch_nop_\@:
- .byte STATIC_KEY_INIT_NOP
-.Lstatic_branch_no_after_\@:
+.macro STATIC_JUMP_IF_TRUE target, key, def
+.Lstatic_jump_\@:
+ .if \def
+ /* Equivalent to "jmp.d32 \target" */
+ .byte 0xe9
+ .long \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+ .else
+ .byte STATIC_KEY_INIT_NOP
+ .endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
- .long .Lstatic_branch_nop_\@ - ., \l_yes - .
- _ASM_PTR \key + \branch - .
+ .long .Lstatic_jump_\@ - ., \target - .
+ _ASM_PTR \key - .
.popsection
.endm
-.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
-.Lstatic_branch_jmp_\@:
- .byte 0xe9
- .long \l_yes - .Lstatic_branch_jmp_after_\@
-.Lstatic_branch_jmp_after_\@:
+.macro STATIC_JUMP_IF_FALSE target, key, def
+.Lstatic_jump_\@:
+ .if \def
+ .byte STATIC_KEY_INIT_NOP
+ .else
+ /* Equivalent to "jmp.d32 \target" */
+ .byte 0xe9
+ .long \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+ .endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
- .long .Lstatic_branch_jmp_\@ - ., \l_yes - .
- _ASM_PTR \key + \branch - .
+ .long .Lstatic_jump_\@ - ., \target - .
+ _ASM_PTR \key + 1 - .
.popsection
.endm
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 55e51ff7e421..4660ce90de7f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -439,6 +439,11 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */
};
+struct kvm_tlb_range {
+ u64 start_gfn;
+ u64 pages;
+};
+
enum pmc_type {
KVM_PMC_GP = 0,
KVM_PMC_FIXED,
@@ -497,7 +502,7 @@ struct kvm_mtrr {
struct kvm_vcpu_hv_stimer {
struct hrtimer timer;
int index;
- u64 config;
+ union hv_stimer_config config;
u64 count;
u64 exp_time;
struct hv_message msg;
@@ -601,17 +606,16 @@ struct kvm_vcpu_arch {
/*
* QEMU userspace and the guest each have their own FPU state.
- * In vcpu_run, we switch between the user and guest FPU contexts.
- * While running a VCPU, the VCPU thread will have the guest FPU
- * context.
+ * In vcpu_run, we switch between the user, maintained in the
+ * task_struct struct, and guest FPU contexts. While running a VCPU,
+ * the VCPU thread will have the guest FPU context.
*
* Note that while the PKRU state lives inside the fpu registers,
* it is switched out separately at VMENTER and VMEXIT time. The
* "guest_fpu" state here contains the guest FPU context, with the
* host PRKU bits.
*/
- struct fpu user_fpu;
- struct fpu guest_fpu;
+ struct fpu *guest_fpu;
u64 xcr0;
u64 guest_supported_xcr0;
@@ -1042,6 +1046,8 @@ struct kvm_x86_ops {
void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
int (*tlb_remote_flush)(struct kvm *kvm);
+ int (*tlb_remote_flush_with_range)(struct kvm *kvm,
+ struct kvm_tlb_range *range);
/*
* Flush any TLB entries associated with the given GVA.
@@ -1094,7 +1100,8 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
- void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ /* Returns actual tsc_offset set in active VMCS */
+ u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
@@ -1105,6 +1112,7 @@ struct kvm_x86_ops {
bool (*mpx_supported)(void);
bool (*xsaves_supported)(void);
bool (*umip_emulated)(void);
+ bool (*pt_supported)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
@@ -1185,6 +1193,7 @@ struct kvm_x86_ops {
int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
+ uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
};
struct kvm_arch_async_pf {
@@ -1195,6 +1204,7 @@ struct kvm_arch_async_pf {
};
extern struct kvm_x86_ops *kvm_x86_ops;
+extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
@@ -1491,7 +1501,7 @@ asmlinkage void kvm_spurious_fault(void);
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
- "call kvm_spurious_fault \n\t" \
+ "jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
@@ -1502,7 +1512,7 @@ asmlinkage void kvm_spurious_fault(void);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 4c723632c036..5ed3cf1c3934 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -92,6 +92,7 @@ void kvm_async_pf_task_wait(u32 token, int interrupt_kernel);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void);
+void do_async_page_fault(struct pt_regs *regs, unsigned long error_code);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
void __init kvm_spinlock_init(void);
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 1d0a7778e163..cc60e617931c 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -22,6 +22,11 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv;
+
+typedef int (*hyperv_fill_flush_list_func)(
+ struct hv_guest_mapping_flush_list *flush,
+ void *data);
+
/*
* Generate the guest ID.
*/
@@ -348,6 +353,11 @@ void set_hv_tscchange_cb(void (*cb)(void));
void clear_hv_tscchange_cb(void);
void hyperv_stop_tsc_emulation(void);
int hyperv_flush_guest_mapping(u64 as);
+int hyperv_flush_guest_mapping_range(u64 as,
+ hyperv_fill_flush_list_func fill_func, void *data);
+int hyperv_fill_flush_guest_mapping_list(
+ struct hv_guest_mapping_flush_list *flush,
+ u64 start_gfn, u64 end_gfn);
#ifdef CONFIG_X86_64
void hv_apic_init(void);
@@ -370,6 +380,11 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
return NULL;
}
static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
+static inline int hyperv_flush_guest_mapping_range(u64 as,
+ hyperv_fill_flush_list_func fill_func, void *data)
+{
+ return -1;
+}
#endif /* CONFIG_HYPERV */
#ifdef CONFIG_HYPERV_TSCPAGE
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 80f4a4f38c79..8e40c2446fd1 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -41,9 +41,10 @@
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
@@ -120,7 +121,43 @@
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
#define MSR_IA32_RTIT_CTL 0x00000570
+#define RTIT_CTL_TRACEEN BIT(0)
+#define RTIT_CTL_CYCLEACC BIT(1)
+#define RTIT_CTL_OS BIT(2)
+#define RTIT_CTL_USR BIT(3)
+#define RTIT_CTL_PWR_EVT_EN BIT(4)
+#define RTIT_CTL_FUP_ON_PTW BIT(5)
+#define RTIT_CTL_FABRIC_EN BIT(6)
+#define RTIT_CTL_CR3EN BIT(7)
+#define RTIT_CTL_TOPA BIT(8)
+#define RTIT_CTL_MTC_EN BIT(9)
+#define RTIT_CTL_TSC_EN BIT(10)
+#define RTIT_CTL_DISRETC BIT(11)
+#define RTIT_CTL_PTW_EN BIT(12)
+#define RTIT_CTL_BRANCH_EN BIT(13)
+#define RTIT_CTL_MTC_RANGE_OFFSET 14
+#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
+#define RTIT_CTL_CYC_THRESH_OFFSET 19
+#define RTIT_CTL_CYC_THRESH (0x0full << RTIT_CTL_CYC_THRESH_OFFSET)
+#define RTIT_CTL_PSB_FREQ_OFFSET 24
+#define RTIT_CTL_PSB_FREQ (0x0full << RTIT_CTL_PSB_FREQ_OFFSET)
+#define RTIT_CTL_ADDR0_OFFSET 32
+#define RTIT_CTL_ADDR0 (0x0full << RTIT_CTL_ADDR0_OFFSET)
+#define RTIT_CTL_ADDR1_OFFSET 36
+#define RTIT_CTL_ADDR1 (0x0full << RTIT_CTL_ADDR1_OFFSET)
+#define RTIT_CTL_ADDR2_OFFSET 40
+#define RTIT_CTL_ADDR2 (0x0full << RTIT_CTL_ADDR2_OFFSET)
+#define RTIT_CTL_ADDR3_OFFSET 44
+#define RTIT_CTL_ADDR3 (0x0full << RTIT_CTL_ADDR3_OFFSET)
#define MSR_IA32_RTIT_STATUS 0x00000571
+#define RTIT_STATUS_FILTEREN BIT(0)
+#define RTIT_STATUS_CONTEXTEN BIT(1)
+#define RTIT_STATUS_TRIGGEREN BIT(2)
+#define RTIT_STATUS_BUFFOVF BIT(3)
+#define RTIT_STATUS_ERROR BIT(4)
+#define RTIT_STATUS_STOPPED BIT(5)
+#define RTIT_STATUS_BYTECNT_OFFSET 32
+#define RTIT_STATUS_BYTECNT (0x1ffffull << RTIT_STATUS_BYTECNT_OFFSET)
#define MSR_IA32_RTIT_ADDR0_A 0x00000580
#define MSR_IA32_RTIT_ADDR0_B 0x00000581
#define MSR_IA32_RTIT_ADDR1_A 0x00000582
@@ -389,6 +426,7 @@
#define MSR_F15H_NB_PERF_CTR 0xc0010241
#define MSR_F15H_PTSC 0xc0010280
#define MSR_F15H_IC_CFG 0xc0011021
+#define MSR_F15H_EX_CFG 0xc001102c
/* Fam 10h MSRs */
#define MSR_FAM10H_MMIO_CONF_BASE 0xc0010058
@@ -770,6 +808,7 @@
#define VMX_BASIC_INOUT 0x0040000000000000LLU
/* MSR_IA32_VMX_MISC bits */
+#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
/* AMD-V MSRs */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 80dc14422495..dad12b767ba0 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -3,6 +3,8 @@
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
#define _ASM_X86_NOSPEC_BRANCH_H_
+#include <linux/static_key.h>
+
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
@@ -162,11 +164,12 @@
_ASM_PTR " 999b\n\t" \
".popsection\n\t"
-#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_64
/*
- * Since the inline asm uses the %V modifier which is only in newer GCC,
- * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ * Inline asm uses the %V modifier which is only in newer GCC
+ * which is ensured when CONFIG_RETPOLINE is defined.
*/
# define CALL_NOSPEC \
ANNOTATE_NOSPEC_ALTERNATIVE \
@@ -181,7 +184,7 @@
X86_FEATURE_RETPOLINE_AMD)
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
-#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+#else /* CONFIG_X86_32 */
/*
* For i386 we use the original ret-equivalent retpoline, because
* otherwise we'll run out of registers. We don't care about CET
@@ -211,6 +214,7 @@
X86_FEATURE_RETPOLINE_AMD)
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
#else /* No retpoline for C / inline asm */
# define CALL_NOSPEC "call *%[thunk_target]\n"
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
@@ -219,13 +223,20 @@
/* The Spectre V2 mitigation variants */
enum spectre_v2_mitigation {
SPECTRE_V2_NONE,
- SPECTRE_V2_RETPOLINE_MINIMAL,
- SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
SPECTRE_V2_RETPOLINE_GENERIC,
SPECTRE_V2_RETPOLINE_AMD,
SPECTRE_V2_IBRS_ENHANCED,
};
+/* The indirect branch speculation control variants */
+enum spectre_v2_user_mitigation {
+ SPECTRE_V2_USER_NONE,
+ SPECTRE_V2_USER_STRICT,
+ SPECTRE_V2_USER_STRICT_PREFERRED,
+ SPECTRE_V2_USER_PRCTL,
+ SPECTRE_V2_USER_SECCOMP,
+};
+
/* The Speculative Store Bypass disable variants */
enum ssb_mitigation {
SPEC_STORE_BYPASS_NONE,
@@ -303,6 +314,10 @@ do { \
preempt_enable(); \
} while (0)
+DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4bf42f9e4eea..a97f28d914d5 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -26,6 +26,11 @@ struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
+__visible void __native_queued_spin_unlock(struct qspinlock *lock);
+bool pv_is_native_spin_unlock(void);
+__visible bool __native_vcpu_is_preempted(long cpu);
+bool pv_is_native_vcpu_is_preempted(void);
+
static inline u64 paravirt_steal_clock(int cpu)
{
return PVOP_CALL1(u64, time.steal_clock, cpu);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 26942ad63830..488c59686a73 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -348,11 +348,23 @@ extern struct paravirt_patch_template pv_ops;
#define paravirt_clobber(clobber) \
[paravirt_clobber] "i" (clobber)
+/*
+ * Generate some code, and mark it as patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR " 771b\n" \
+ " .byte " type "\n" \
+ " .byte 772b-771b\n" \
+ " .short " clobber "\n" \
+ ".popsection\n"
+
/* Generate patchable code, with the default asm parameters. */
-#define paravirt_call \
- "PARAVIRT_CALL type=\"%c[paravirt_typenum]\"" \
- " clobber=\"%c[paravirt_clobber]\"" \
- " pv_opptr=\"%c[paravirt_opptr]\";"
+#define paravirt_alt(insn_string) \
+ _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
/* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
@@ -373,6 +385,16 @@ unsigned native_patch(u8 type, void *ibuf, unsigned long addr, unsigned len);
int paravirt_disable_iospace(void);
/*
+ * This generates an indirect call based on the operation type number.
+ * The type number, computed in PARAVIRT_PATCH, is derived from the
+ * offset into the paravirt_patch_template structure, and can therefore be
+ * freely converted back into a structure offset.
+ */
+#define PARAVIRT_CALL \
+ ANNOTATE_RETPOLINE_SAFE \
+ "call *%c[paravirt_opptr];"
+
+/*
* These macros are intended to wrap calls through one of the paravirt
* ops structs, so that they can be later identified and patched at
* runtime.
@@ -509,7 +531,7 @@ int paravirt_disable_iospace(void);
/* since this condition will never hold */ \
if (sizeof(rettype) > sizeof(unsigned long)) { \
asm volatile(pre \
- paravirt_call \
+ paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@@ -519,7 +541,7 @@ int paravirt_disable_iospace(void);
__ret = (rettype)((((u64)__edx) << 32) | __eax); \
} else { \
asm volatile(pre \
- paravirt_call \
+ paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@@ -546,7 +568,7 @@ int paravirt_disable_iospace(void);
PVOP_VCALL_ARGS; \
PVOP_TEST_NULL(op); \
asm volatile(pre \
- paravirt_call \
+ paravirt_alt(PARAVIRT_CALL) \
post \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
@@ -664,26 +686,6 @@ struct paravirt_patch_site {
extern struct paravirt_patch_site __parainstructions[],
__parainstructions_end[];
-#else /* __ASSEMBLY__ */
-
-/*
- * This generates an indirect call based on the operation type number.
- * The type number, computed in PARAVIRT_PATCH, is derived from the
- * offset into the paravirt_patch_template structure, and can therefore be
- * freely converted back into a structure offset.
- */
-.macro PARAVIRT_CALL type:req clobber:req pv_opptr:req
-771: ANNOTATE_RETPOLINE_SAFE
- call *\pv_opptr
-772: .pushsection .parainstructions,"a"
- _ASM_ALIGN
- _ASM_PTR 771b
- .byte \type
- .byte 772b-771b
- .short \clobber
- .popsection
-.endm
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_TYPES_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 959d618dbb17..73bb404f4d2a 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -121,7 +121,14 @@ extern void __init dmi_check_pciprobe(void);
extern void __init dmi_check_skip_isa_align(void);
/* some common used subsys_initcalls */
+#ifdef CONFIG_PCI
extern int __init pci_acpi_init(void);
+#else
+static inline int __init pci_acpi_init(void)
+{
+ return -EINVAL;
+}
+#endif
extern void __init pcibios_irq_init(void);
extern int __init pcibios_init(void);
extern int pci_legacy_init(void);
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index ec7f43327033..1ea41aaef68b 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -80,6 +80,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
}
+static inline void pmd_populate_kernel_safe(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
+ set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
+}
+
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte)
{
@@ -132,6 +139,12 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
}
+
+static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
+ set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd)));
+}
#endif /* CONFIG_X86_PAE */
#if CONFIG_PGTABLE_LEVELS > 3
@@ -141,6 +154,12 @@ static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
}
+static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
+ set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
+}
+
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
gfp_t gfp = GFP_KERNEL_ACCOUNT;
@@ -173,6 +192,14 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
}
+static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
+{
+ if (!pgtable_l5_enabled())
+ return;
+ paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
+ set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
+}
+
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
{
gfp_t gfp = GFP_KERNEL_ACCOUNT;
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 84bd9bdc1987..88bca456da99 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -111,6 +111,11 @@ extern unsigned int ptrs_per_p4d;
*/
#define MAXMEM (1UL << MAX_PHYSMEM_BITS)
+#define GUARD_HOLE_PGD_ENTRY -256UL
+#define GUARD_HOLE_SIZE (16UL << PGDIR_SHIFT)
+#define GUARD_HOLE_BASE_ADDR (GUARD_HOLE_PGD_ENTRY << PGDIR_SHIFT)
+#define GUARD_HOLE_END_ADDR (GUARD_HOLE_BASE_ADDR + GUARD_HOLE_SIZE)
+
#define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 106b7d0e2dae..d6ff0bbdb394 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -564,8 +564,12 @@ extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
unsigned int *level);
extern pmd_t *lookup_pmd_address(unsigned long address);
extern phys_addr_t slow_virt_to_phys(void *__address);
-extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
- unsigned numpages, unsigned long page_flags);
+extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
+ unsigned long address,
+ unsigned numpages,
+ unsigned long page_flags);
+extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned long numpages);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 90cb2f36c042..99a7fa9ab0a3 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,9 @@
DECLARE_PER_CPU(int, __preempt_count);
+/* We use the MSB mostly because its available */
+#define PREEMPT_NEED_RESCHED 0x80000000
+
/*
* We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
* that a decrement hitting 0 means we can and should reschedule.
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index a671a1145906..04c17be9b5fd 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -26,6 +26,7 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_APM 1
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
+void nmi_panic_self_stop(struct pt_regs *regs);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
void run_crash_ipi_callback(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h
index a8b5e1e13319..dbaed55c1c24 100644
--- a/arch/x86/include/asm/refcount.h
+++ b/arch/x86/include/asm/refcount.h
@@ -4,41 +4,6 @@
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity.
*/
-
-#ifdef __ASSEMBLY__
-
-#include <asm/asm.h>
-#include <asm/bug.h>
-
-.macro REFCOUNT_EXCEPTION counter:req
- .pushsection .text..refcount
-111: lea \counter, %_ASM_CX
-112: ud2
- ASM_UNREACHABLE
- .popsection
-113: _ASM_EXTABLE_REFCOUNT(112b, 113b)
-.endm
-
-/* Trigger refcount exception if refcount result is negative. */
-.macro REFCOUNT_CHECK_LT_ZERO counter:req
- js 111f
- REFCOUNT_EXCEPTION counter="\counter"
-.endm
-
-/* Trigger refcount exception if refcount result is zero or negative. */
-.macro REFCOUNT_CHECK_LE_ZERO counter:req
- jz 111f
- REFCOUNT_CHECK_LT_ZERO counter="\counter"
-.endm
-
-/* Trigger refcount exception unconditionally. */
-.macro REFCOUNT_ERROR counter:req
- jmp 111f
- REFCOUNT_EXCEPTION counter="\counter"
-.endm
-
-#else /* __ASSEMBLY__ */
-
#include <linux/refcount.h>
#include <asm/bug.h>
@@ -50,12 +15,35 @@
* central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text.
*/
+#define _REFCOUNT_EXCEPTION \
+ ".pushsection .text..refcount\n" \
+ "111:\tlea %[var], %%" _ASM_CX "\n" \
+ "112:\t" ASM_UD2 "\n" \
+ ASM_UNREACHABLE \
+ ".popsection\n" \
+ "113:\n" \
+ _ASM_EXTABLE_REFCOUNT(112b, 113b)
+
+/* Trigger refcount exception if refcount result is negative. */
+#define REFCOUNT_CHECK_LT_ZERO \
+ "js 111f\n\t" \
+ _REFCOUNT_EXCEPTION
+
+/* Trigger refcount exception if refcount result is zero or negative. */
+#define REFCOUNT_CHECK_LE_ZERO \
+ "jz 111f\n\t" \
+ REFCOUNT_CHECK_LT_ZERO
+
+/* Trigger refcount exception unconditionally. */
+#define REFCOUNT_ERROR \
+ "jmp 111f\n\t" \
+ _REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{
asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
- "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
- : [counter] "+m" (r->refs.counter)
+ REFCOUNT_CHECK_LT_ZERO
+ : [var] "+m" (r->refs.counter)
: "ir" (i)
: "cc", "cx");
}
@@ -63,32 +51,31 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r)
{
asm volatile(LOCK_PREFIX "incl %0\n\t"
- "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
- : [counter] "+m" (r->refs.counter)
+ REFCOUNT_CHECK_LT_ZERO
+ : [var] "+m" (r->refs.counter)
: : "cc", "cx");
}
static __always_inline void refcount_dec(refcount_t *r)
{
asm volatile(LOCK_PREFIX "decl %0\n\t"
- "REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\""
- : [counter] "+m" (r->refs.counter)
+ REFCOUNT_CHECK_LE_ZERO
+ : [var] "+m" (r->refs.counter)
: : "cc", "cx");
}
static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{
-
return GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
- "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
+ REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "er", i, "cx");
}
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
return GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
- "REFCOUNT_CHECK_LT_ZERO counter=\"%[var]\"",
+ REFCOUNT_CHECK_LT_ZERO,
r->refs.counter, e, "cx");
}
@@ -106,8 +93,8 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) {
- asm volatile("REFCOUNT_ERROR counter=\"%[counter]\""
- : : [counter] "m" (r->refs.counter)
+ asm volatile(REFCOUNT_ERROR
+ : : [var] "m" (r->refs.counter)
: "cc", "cx");
break;
}
@@ -122,6 +109,4 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return refcount_add_not_zero(1, r);
}
-#endif /* __ASSEMBLY__ */
-
#endif
diff --git a/arch/x86/include/asm/intel_rdt_sched.h b/arch/x86/include/asm/resctrl_sched.h
index 9acb06b6f81e..54990fe2a3ae 100644
--- a/arch/x86/include/asm/intel_rdt_sched.h
+++ b/arch/x86/include/asm/resctrl_sched.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_RDT_SCHED_H
-#define _ASM_X86_INTEL_RDT_SCHED_H
+#ifndef _ASM_X86_RESCTRL_SCHED_H
+#define _ASM_X86_RESCTRL_SCHED_H
-#ifdef CONFIG_INTEL_RDT
+#ifdef CONFIG_RESCTRL
#include <linux/sched.h>
#include <linux/jump_label.h>
@@ -10,7 +10,7 @@
#define IA32_PQR_ASSOC 0x0c8f
/**
- * struct intel_pqr_state - State cache for the PQR MSR
+ * struct resctrl_pqr_state - State cache for the PQR MSR
* @cur_rmid: The cached Resource Monitoring ID
* @cur_closid: The cached Class Of Service ID
* @default_rmid: The user assigned Resource Monitoring ID
@@ -24,21 +24,21 @@
* The cache also helps to avoid pointless updates if the value does
* not change.
*/
-struct intel_pqr_state {
+struct resctrl_pqr_state {
u32 cur_rmid;
u32 cur_closid;
u32 default_rmid;
u32 default_closid;
};
-DECLARE_PER_CPU(struct intel_pqr_state, pqr_state);
+DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state);
DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
/*
- * __intel_rdt_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
+ * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR
*
* Following considerations are made so that this has minimal impact
* on scheduler hot path:
@@ -51,9 +51,9 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* simple as possible.
* Must be called with preemption disabled.
*/
-static void __intel_rdt_sched_in(void)
+static void __resctrl_sched_in(void)
{
- struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
u32 closid = state->default_closid;
u32 rmid = state->default_rmid;
@@ -78,16 +78,16 @@ static void __intel_rdt_sched_in(void)
}
}
-static inline void intel_rdt_sched_in(void)
+static inline void resctrl_sched_in(void)
{
if (static_branch_likely(&rdt_enable_key))
- __intel_rdt_sched_in();
+ __resctrl_sched_in();
}
#else
-static inline void intel_rdt_sched_in(void) {}
+static inline void resctrl_sched_in(void) {}
-#endif /* CONFIG_INTEL_RDT */
+#endif /* CONFIG_RESCTRL */
-#endif /* _ASM_X86_INTEL_RDT_SCHED_H */
+#endif /* _ASM_X86_RESCTRL_SCHED_H */
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index ae13bc974416..ed8ec011a9fd 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -46,6 +46,9 @@ extern unsigned long saved_video_mode;
extern void reserve_standard_io_resources(void);
extern void i386_reserve_resources(void);
+extern unsigned long __startup_64(unsigned long physaddr, struct boot_params *bp);
+extern unsigned long __startup_secondary_64(void);
+extern int early_make_pgtable(unsigned long address);
#ifdef CONFIG_X86_INTEL_MID
extern void x86_intel_mid_early_setup(void);
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index bd26834724e5..2fcbd6f33ef7 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -17,4 +17,9 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
struct pt_regs *regs, unsigned long mask);
+
+#ifdef CONFIG_X86_X32_ABI
+asmlinkage long sys32_x32_rt_sigreturn(void);
+#endif
+
#endif /* _ASM_X86_SIGHANDLING_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 547c4fe50711..2e95b6c1bca3 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -148,6 +148,12 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
void smp_store_boot_cpu_info(void);
void smp_store_cpu_info(int id);
+
+asmlinkage __visible void smp_reboot_interrupt(void);
+__visible void smp_reschedule_interrupt(struct pt_regs *regs);
+__visible void smp_call_function_interrupt(struct pt_regs *regs);
+__visible void smp_call_function_single_interrupt(struct pt_regs *r);
+
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
index ae7c2c5cd7f0..5393babc0598 100644
--- a/arch/x86/include/asm/spec-ctrl.h
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
{
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
}
+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+ BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
{
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
@@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void);
static inline void speculative_store_bypass_ht_init(void) { }
#endif
-extern void speculative_store_bypass_update(unsigned long tif);
-
-static inline void speculative_store_bypass_update_current(void)
-{
- speculative_store_bypass_update(current_thread_info()->flags);
-}
+extern void speculation_ctrl_update(unsigned long tif);
+extern void speculation_ctrl_update_current(void);
#endif
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 93b462e48067..dec9c1e84c78 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -290,11 +290,4 @@ struct __attribute__ ((__packed__)) vmcb {
#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
-#define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda"
-#define SVM_VMRUN ".byte 0x0f, 0x01, 0xd8"
-#define SVM_VMSAVE ".byte 0x0f, 0x01, 0xdb"
-#define SVM_CLGI ".byte 0x0f, 0x01, 0xdd"
-#define SVM_STGI ".byte 0x0f, 0x01, 0xdc"
-#define SVM_INVLPGA ".byte 0x0f, 0x01, 0xdf"
-
#endif
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 36bd243843d6..7cf1a270d891 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
__visible struct task_struct *__switch_to(struct task_struct *prev,
struct task_struct *next);
-struct tss_struct;
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss);
/* This runs runs on the previous thread's stack. */
static inline void prepare_switch_to(struct task_struct *next)
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 2ff2a30a264f..e0eccbcb8447 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -79,10 +79,12 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
-#define TIF_SSBD 5 /* Reduced data speculation */
+#define TIF_SSBD 5 /* Speculative store bypass disable */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
+#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
+#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
@@ -110,6 +112,8 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
+#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
@@ -136,17 +140,19 @@ struct thread_info {
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
_TIF_NOHZ)
-/* work to do on any return to user space */
-#define _TIF_ALLWORK_MASK \
- (_TIF_SYSCALL_TRACE | _TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
- _TIF_NEED_RESCHED | _TIF_SINGLESTEP | _TIF_SYSCALL_EMU | \
- _TIF_SYSCALL_AUDIT | _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE | \
- _TIF_PATCH_PENDING | _TIF_NOHZ | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_FSCHECK)
-
/* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+#define _TIF_WORK_CTXSW_BASE \
+ (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
+ _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
+
+/*
+ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
+ */
+#ifdef CONFIG_SMP
+# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
+#else
+# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
+#endif
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index d760611cfc35..f4204bf377fc 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -169,10 +169,14 @@ struct tlb_state {
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+ /* Last user mm for optimizing IBPB */
+ union {
+ struct mm_struct *last_user_mm;
+ unsigned long last_user_mm_ibpb;
+ };
+
u16 loaded_mm_asid;
u16 next_asid;
- /* last user mm's ctx id */
- u64 last_ctx_id;
/*
* We can be in one of several states:
diff --git a/arch/x86/include/asm/trace/exceptions.h b/arch/x86/include/asm/trace/exceptions.h
index 69615e387973..e0e6d7f21399 100644
--- a/arch/x86/include/asm/trace/exceptions.h
+++ b/arch/x86/include/asm/trace/exceptions.h
@@ -45,6 +45,7 @@ DEFINE_PAGE_FAULT_EVENT(page_fault_user);
DEFINE_PAGE_FAULT_EVENT(page_fault_kernel);
#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE exceptions
#endif /* _TRACE_PAGE_FAULT_H */
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index 2e6245a023ef..ace464f09681 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -42,6 +42,20 @@ TRACE_EVENT(hyperv_nested_flush_guest_mapping,
TP_printk("address space %llx ret %d", __entry->as, __entry->ret)
);
+TRACE_EVENT(hyperv_nested_flush_guest_mapping_range,
+ TP_PROTO(u64 as, int ret),
+ TP_ARGS(as, ret),
+
+ TP_STRUCT__entry(
+ __field(u64, as)
+ __field(int, ret)
+ ),
+ TP_fast_assign(__entry->as = as;
+ __entry->ret = ret;
+ ),
+ TP_printk("address space %llx ret %d", __entry->as, __entry->ret)
+ );
+
TRACE_EVENT(hyperv_send_ipi_mask,
TP_PROTO(const struct cpumask *cpus,
int vector),
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 0af81b590a0c..33b9d0f0aafe 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -389,6 +389,7 @@ TRACE_EVENT(vector_free_moved,
#endif /* CONFIG_X86_LOCAL_APIC */
#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE irq_vectors
#endif /* _TRACE_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 3de69330e6c5..7d6f3f3fad78 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -61,34 +61,38 @@ asmlinkage void xen_machine_check(void);
asmlinkage void xen_simd_coprocessor_error(void);
#endif
-dotraplinkage void do_divide_error(struct pt_regs *, long);
-dotraplinkage void do_debug(struct pt_regs *, long);
-dotraplinkage void do_nmi(struct pt_regs *, long);
-dotraplinkage void do_int3(struct pt_regs *, long);
-dotraplinkage void do_overflow(struct pt_regs *, long);
-dotraplinkage void do_bounds(struct pt_regs *, long);
-dotraplinkage void do_invalid_op(struct pt_regs *, long);
-dotraplinkage void do_device_not_available(struct pt_regs *, long);
-dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
-dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
-dotraplinkage void do_segment_not_present(struct pt_regs *, long);
-dotraplinkage void do_stack_segment(struct pt_regs *, long);
+dotraplinkage void do_divide_error(struct pt_regs *regs, long error_code);
+dotraplinkage void do_debug(struct pt_regs *regs, long error_code);
+dotraplinkage void do_nmi(struct pt_regs *regs, long error_code);
+dotraplinkage void do_int3(struct pt_regs *regs, long error_code);
+dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
+dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
+dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
+dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
+dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
+dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
+dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);
+dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_64
-dotraplinkage void do_double_fault(struct pt_regs *, long);
+dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code);
+asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs);
+asmlinkage __visible notrace
+struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s);
+void __init trap_init(void);
#endif
-dotraplinkage void do_general_protection(struct pt_regs *, long);
-dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
-dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
-dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
-dotraplinkage void do_alignment_check(struct pt_regs *, long);
+dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code);
+dotraplinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code);
+dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *regs, long error_code);
+dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code);
+dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_MCE
-dotraplinkage void do_machine_check(struct pt_regs *, long);
+dotraplinkage void do_machine_check(struct pt_regs *regs, long error_code);
#endif
-dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
+dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code);
#ifdef CONFIG_X86_32
-dotraplinkage void do_iret_error(struct pt_regs *, long);
+dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code);
#endif
-dotraplinkage void do_mce(struct pt_regs *, long);
+dotraplinkage void do_mce(struct pt_regs *regs, long error_code);
static inline int get_si_code(unsigned long condition)
{
@@ -104,11 +108,16 @@ extern int panic_on_unrecovered_nmi;
void math_emulate(struct math_emu_info *);
#ifndef CONFIG_X86_32
-asmlinkage void smp_thermal_interrupt(void);
-asmlinkage void smp_threshold_interrupt(void);
-asmlinkage void smp_deferred_error_interrupt(void);
+asmlinkage void smp_thermal_interrupt(struct pt_regs *regs);
+asmlinkage void smp_threshold_interrupt(struct pt_regs *regs);
+asmlinkage void smp_deferred_error_interrupt(struct pt_regs *regs);
#endif
+void smp_apic_timer_interrupt(struct pt_regs *regs);
+void smp_spurious_interrupt(struct pt_regs *regs);
+void smp_error_interrupt(struct pt_regs *regs);
+asmlinkage void smp_irq_move_cleanup_interrupt(void);
+
extern void ist_enter(struct pt_regs *regs);
extern void ist_exit(struct pt_regs *regs);
extern void ist_begin_non_atomic(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index eb5bbfeccb66..8a0c25c6bf09 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -35,6 +35,7 @@ extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);
extern void tsc_early_init(void);
extern void tsc_init(void);
+extern unsigned long calibrate_delay_is_known(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index ade0f153947d..4e4133e86484 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -77,7 +77,10 @@
#define SECONDARY_EXEC_ENCLS_EXITING 0x00008000
#define SECONDARY_EXEC_RDSEED_EXITING 0x00010000
#define SECONDARY_EXEC_ENABLE_PML 0x00020000
+#define SECONDARY_EXEC_PT_CONCEAL_VMX 0x00080000
#define SECONDARY_EXEC_XSAVES 0x00100000
+#define SECONDARY_EXEC_PT_USE_GPA 0x01000000
+#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC 0x00400000
#define SECONDARY_EXEC_TSC_SCALING 0x02000000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -98,6 +101,8 @@
#define VM_EXIT_LOAD_IA32_EFER 0x00200000
#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
#define VM_EXIT_CLEAR_BNDCFGS 0x00800000
+#define VM_EXIT_PT_CONCEAL_PIP 0x01000000
+#define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
@@ -109,6 +114,8 @@
#define VM_ENTRY_LOAD_IA32_PAT 0x00004000
#define VM_ENTRY_LOAD_IA32_EFER 0x00008000
#define VM_ENTRY_LOAD_BNDCFGS 0x00010000
+#define VM_ENTRY_PT_CONCEAL_PIP 0x00020000
+#define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
@@ -240,6 +247,8 @@ enum vmcs_field {
GUEST_PDPTR3_HIGH = 0x00002811,
GUEST_BNDCFGS = 0x00002812,
GUEST_BNDCFGS_HIGH = 0x00002813,
+ GUEST_IA32_RTIT_CTL = 0x00002814,
+ GUEST_IA32_RTIT_CTL_HIGH = 0x00002815,
HOST_IA32_PAT = 0x00002c00,
HOST_IA32_PAT_HIGH = 0x00002c01,
HOST_IA32_EFER = 0x00002c02,
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 0f842104862c..b85a7c54c6a1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -303,6 +303,4 @@ extern void x86_init_noop(void);
extern void x86_init_uint_noop(unsigned int unused);
extern bool x86_pnpbios_disabled(void);
-void x86_verify_bootdata_version(void);
-
#endif
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 22f89d040ddd..60733f137e9a 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -16,9 +16,6 @@
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
-/* version flags */
-#define VERSION_WRITTEN 0x8000
-
/* loadflags */
#define LOADED_HIGH (1<<0)
#define KASLR_FLAG (1<<1)
@@ -89,7 +86,6 @@ struct setup_header {
__u64 pref_address;
__u32 init_size;
__u32 handover_offset;
- __u64 acpi_rsdp_addr;
} __attribute__((packed));
struct sys_desc_table {
@@ -159,7 +155,8 @@ struct boot_params {
__u8 _pad2[4]; /* 0x054 */
__u64 tboot_addr; /* 0x058 */
struct ist_info ist_info; /* 0x060 */
- __u8 _pad3[16]; /* 0x070 */
+ __u64 acpi_rsdp_addr; /* 0x070 */
+ __u8 _pad3[8]; /* 0x078 */
__u8 hd0_info[16]; /* obsolete! */ /* 0x080 */
__u8 hd1_info[16]; /* obsolete! */ /* 0x090 */
struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 92c76bf97ad8..2624de16cd7a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic);
/**
* acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
* has been registered
- * @handle: ACPI handle of the IOAPIC deivce
+ * @handle: ACPI handle of the IOAPIC device
* @gsi_base: GSI base associated with the IOAPIC
*
* Assume caller holds some type of lock to serialize acpi_ioapic_registered()
@@ -1776,5 +1776,5 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
u64 x86_default_get_root_pointer(void)
{
- return boot_params.hdr.acpi_rsdp_addr;
+ return boot_params.acpi_rsdp_addr;
}
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index a6eca647bc76..cc51275c8759 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -11,14 +11,15 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/spinlock.h>
+#include <linux/pci_ids.h>
#include <asm/amd_nb.h>
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
-#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@@ -28,9 +29,11 @@ static u32 *flush_words;
static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
{}
};
+
#define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704
const struct pci_device_id amd_nb_misc_ids[] = {
@@ -44,6 +47,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
@@ -57,6 +61,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
@@ -214,7 +219,10 @@ int amd_cache_northbridges(void)
const struct pci_device_id *root_ids = amd_root_ids;
struct pci_dev *root, *misc, *link;
struct amd_northbridge *nb;
- u16 i = 0;
+ u16 roots_per_misc = 0;
+ u16 misc_count = 0;
+ u16 root_count = 0;
+ u16 i, j;
if (amd_northbridges.num)
return 0;
@@ -227,26 +235,55 @@ int amd_cache_northbridges(void)
misc = NULL;
while ((misc = next_northbridge(misc, misc_ids)) != NULL)
- i++;
+ misc_count++;
- if (!i)
+ if (!misc_count)
return -ENODEV;
- nb = kcalloc(i, sizeof(struct amd_northbridge), GFP_KERNEL);
+ root = NULL;
+ while ((root = next_northbridge(root, root_ids)) != NULL)
+ root_count++;
+
+ if (root_count) {
+ roots_per_misc = root_count / misc_count;
+
+ /*
+ * There should be _exactly_ N roots for each DF/SMN
+ * interface.
+ */
+ if (!roots_per_misc || (root_count % roots_per_misc)) {
+ pr_info("Unsupported AMD DF/PCI configuration found\n");
+ return -ENODEV;
+ }
+ }
+
+ nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
if (!nb)
return -ENOMEM;
amd_northbridges.nb = nb;
- amd_northbridges.num = i;
+ amd_northbridges.num = misc_count;
link = misc = root = NULL;
- for (i = 0; i != amd_northbridges.num; i++) {
+ for (i = 0; i < amd_northbridges.num; i++) {
node_to_amd_nb(i)->root = root =
next_northbridge(root, root_ids);
node_to_amd_nb(i)->misc = misc =
next_northbridge(misc, misc_ids);
node_to_amd_nb(i)->link = link =
next_northbridge(link, link_ids);
+
+ /*
+ * If there are more PCI root devices than data fabric/
+ * system management network interfaces, then the (N)
+ * PCI roots per DF/SMN interface are functionally the
+ * same (for DF/SMN access) and N-1 are redundant. N-1
+ * PCI roots should be skipped per DF/SMN interface so
+ * the following DF/SMN interfaces get mapped to
+ * correct PCI roots.
+ */
+ for (j = 1; j < roots_per_misc; j++)
+ root = next_northbridge(root, root_ids);
}
if (amd_gart_present())
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 2c4d5ece7456..58176b56354e 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -264,18 +264,23 @@ static int __init parse_gart_mem(char *p)
}
early_param("gart_fix_e820", parse_gart_mem);
+/*
+ * With kexec/kdump, if the first kernel doesn't shut down the GART and the
+ * second kernel allocates a different GART region, there might be two
+ * overlapping GART regions present:
+ *
+ * - the first still used by the GART initialized in the first kernel.
+ * - (sub-)set of it used as normal RAM by the second kernel.
+ *
+ * which leads to memory corruptions and a kernel panic eventually.
+ *
+ * This can also happen if the BIOS has forgotten to mark the GART region
+ * as reserved.
+ *
+ * Try to update the e820 map to mark that new region as reserved.
+ */
void __init early_gart_iommu_check(void)
{
- /*
- * in case it is enabled before, esp for kexec/kdump,
- * previous kernel already enable that. memset called
- * by allocate_aperture/__alloc_bootmem_nopanic cause restart.
- * or second kernel have different position for GART hole. and new
- * kernel could use hole as RAM that is still used by GART set by
- * first kernel
- * or BIOS forget to put that in reserved.
- * try to update e820 to make that region as reserved.
- */
u32 agp_aper_order = 0;
int i, fix, slot, valid_agp = 0;
u32 ctl;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 32b2b7a41ef5..b7bcdd781651 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -44,6 +44,7 @@
#include <asm/mpspec.h>
#include <asm/i8259.h>
#include <asm/proto.h>
+#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/io_apic.h>
#include <asm/desc.h>
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index e84c9eb4e5b4..0005c284a5c5 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -8,6 +8,7 @@
* Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
* James Cleverdon.
*/
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
@@ -16,13 +17,13 @@
#include <linux/ctype.h>
#include <linux/hardirq.h>
#include <linux/export.h>
+
#include <asm/smp.h>
-#include <asm/apic.h>
#include <asm/ipi.h>
+#include <asm/apic.h>
+#include <asm/apic_flat_64.h>
#include <asm/jailhouse_para.h>
-#include <linux/acpi.h>
-
static struct apic apic_physflat;
static struct apic apic_flat;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 652e7ffa9b9d..3173e07d3791 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <asm/irqdomain.h>
#include <asm/hw_irq.h>
+#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/i8259.h>
#include <asm/desc.h>
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 391f358ebb4c..a555da094157 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -1079,7 +1079,7 @@ late_initcall(uv_init_heartbeat);
#endif /* !CONFIG_HOTPLUG_CPU */
/* Direct Legacy VGA I/O traffic to designated IOH */
-int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
+static int uv_set_vga_state(struct pci_dev *pdev, bool decode, unsigned int command_bits, u32 flags)
{
int domain, bus, rc;
@@ -1148,7 +1148,7 @@ static void get_mn(struct mn *mnp)
mnp->m_shift = mnp->m_val ? 64 - mnp->m_val : 0;
}
-void __init uv_init_hub_info(struct uv_hub_info_s *hi)
+static void __init uv_init_hub_info(struct uv_hub_info_s *hi)
{
union uvh_node_id_u node_id;
struct mn mn;
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 72adf6c335dc..168543d077d7 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -29,7 +29,8 @@
# include "asm-offsets_64.c"
#endif
-void common(void) {
+static void __used common(void)
+{
BLANK();
OFFSET(TASK_threadsp, task_struct, thread.sp);
#ifdef CONFIG_STACKPROTECTOR
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 1979a76bfadd..5136e6818da8 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -9,6 +9,7 @@
#include <linux/memblock.h>
#include <asm/proto.h>
+#include <asm/setup.h>
/*
* Some BIOSes seem to corrupt the low 64k of memory during events
@@ -136,7 +137,7 @@ void __init setup_bios_corruption_check(void)
}
-void check_for_bios_corruption(void)
+static void check_for_bios_corruption(void)
{
int i;
int corruption = 0;
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 1f5d2291c31e..ac78f90aea56 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -36,13 +36,10 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
-obj-$(CONFIG_INTEL_RDT) += intel_rdt.o intel_rdt_rdtgroup.o intel_rdt_monitor.o
-obj-$(CONFIG_INTEL_RDT) += intel_rdt_ctrlmondata.o intel_rdt_pseudo_lock.o
-CFLAGS_intel_rdt_pseudo_lock.o = -I$(src)
-
-obj-$(CONFIG_X86_MCE) += mcheck/
+obj-$(CONFIG_X86_MCE) += mce/
obj-$(CONFIG_MTRR) += mtrr/
obj-$(CONFIG_MICROCODE) += microcode/
+obj-$(CONFIG_RESCTRL) += resctrl/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index eeea634bee0a..69f6bbb41be0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -15,6 +15,7 @@
#include <asm/smp.h>
#include <asm/pci-direct.h>
#include <asm/delay.h>
+#include <asm/debugreg.h>
#ifdef CONFIG_X86_64
# include <asm/mmconfig.h>
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 7eba34df54c3..804c49493938 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -12,6 +12,7 @@
#include <linux/ktime.h>
#include <linux/math64.h>
#include <linux/percpu.h>
+#include <linux/cpufreq.h>
#include <linux/smp.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index c37e66e493bf..8654b8b0c848 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
+#include <linux/sched/smt.h>
#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
@@ -31,6 +32,8 @@
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
+#include "cpu.h"
+
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
@@ -53,6 +56,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
u64 __ro_after_init x86_amd_ls_cfg_base;
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
+/* Control conditional STIBP in switch_to() */
+DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+/* Control conditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+/* Control unconditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
void __init check_bugs(void)
{
identify_boot_cpu();
@@ -123,31 +133,6 @@ void __init check_bugs(void)
#endif
}
-/* The kernel command line selection */
-enum spectre_v2_mitigation_cmd {
- SPECTRE_V2_CMD_NONE,
- SPECTRE_V2_CMD_AUTO,
- SPECTRE_V2_CMD_FORCE,
- SPECTRE_V2_CMD_RETPOLINE,
- SPECTRE_V2_CMD_RETPOLINE_GENERIC,
- SPECTRE_V2_CMD_RETPOLINE_AMD,
-};
-
-static const char *spectre_v2_strings[] = {
- [SPECTRE_V2_NONE] = "Vulnerable",
- [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
- [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
- [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
- [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
- [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
-};
-
-#undef pr_fmt
-#define pr_fmt(fmt) "Spectre V2 : " fmt
-
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
- SPECTRE_V2_NONE;
-
void
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
{
@@ -169,6 +154,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+ /* Conditional STIBP enabled? */
+ if (static_branch_unlikely(&switch_to_cond_stibp))
+ hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
if (hostval != guestval) {
msrval = setguest ? guestval : hostval;
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -202,7 +191,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
ssbd_spec_ctrl_to_tif(hostval);
- speculative_store_bypass_update(tif);
+ speculation_ctrl_update(tif);
}
}
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -217,6 +206,15 @@ static void x86_amd_ssb_disable(void)
wrmsrl(MSR_AMD64_LS_CFG, msrval);
}
+#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V2 : " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+ SPECTRE_V2_NONE;
+
+static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+ SPECTRE_V2_USER_NONE;
+
#ifdef RETPOLINE
static bool spectre_v2_bad_module;
@@ -238,67 +236,227 @@ static inline const char *spectre_v2_module_string(void)
static inline const char *spectre_v2_module_string(void) { return ""; }
#endif
-static void __init spec2_print_if_insecure(const char *reason)
+static inline bool match_option(const char *arg, int arglen, const char *opt)
{
- if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- pr_info("%s selected on command line.\n", reason);
+ int len = strlen(opt);
+
+ return len == arglen && !strncmp(arg, opt, len);
}
-static void __init spec2_print_if_secure(const char *reason)
+/* The kernel command line selection for spectre v2 */
+enum spectre_v2_mitigation_cmd {
+ SPECTRE_V2_CMD_NONE,
+ SPECTRE_V2_CMD_AUTO,
+ SPECTRE_V2_CMD_FORCE,
+ SPECTRE_V2_CMD_RETPOLINE,
+ SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+ SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+enum spectre_v2_user_cmd {
+ SPECTRE_V2_USER_CMD_NONE,
+ SPECTRE_V2_USER_CMD_AUTO,
+ SPECTRE_V2_USER_CMD_FORCE,
+ SPECTRE_V2_USER_CMD_PRCTL,
+ SPECTRE_V2_USER_CMD_PRCTL_IBPB,
+ SPECTRE_V2_USER_CMD_SECCOMP,
+ SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
+};
+
+static const char * const spectre_v2_user_strings[] = {
+ [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
+ [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
+ [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
+ [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
+ [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
+};
+
+static const struct {
+ const char *option;
+ enum spectre_v2_user_cmd cmd;
+ bool secure;
+} v2_user_options[] __initdata = {
+ { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
+ { "off", SPECTRE_V2_USER_CMD_NONE, false },
+ { "on", SPECTRE_V2_USER_CMD_FORCE, true },
+ { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
+ { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
+ { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
+ { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
+};
+
+static void __init spec_v2_user_print_cond(const char *reason, bool secure)
{
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- pr_info("%s selected on command line.\n", reason);
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+ pr_info("spectre_v2_user=%s forced on command line.\n", reason);
}
-static inline bool retp_compiler(void)
+static enum spectre_v2_user_cmd __init
+spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
{
- return __is_defined(RETPOLINE);
+ char arg[20];
+ int ret, i;
+
+ switch (v2_cmd) {
+ case SPECTRE_V2_CMD_NONE:
+ return SPECTRE_V2_USER_CMD_NONE;
+ case SPECTRE_V2_CMD_FORCE:
+ return SPECTRE_V2_USER_CMD_FORCE;
+ default:
+ break;
+ }
+
+ ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
+ arg, sizeof(arg));
+ if (ret < 0)
+ return SPECTRE_V2_USER_CMD_AUTO;
+
+ for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
+ if (match_option(arg, ret, v2_user_options[i].option)) {
+ spec_v2_user_print_cond(v2_user_options[i].option,
+ v2_user_options[i].secure);
+ return v2_user_options[i].cmd;
+ }
+ }
+
+ pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
+ return SPECTRE_V2_USER_CMD_AUTO;
}
-static inline bool match_option(const char *arg, int arglen, const char *opt)
+static void __init
+spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
{
- int len = strlen(opt);
+ enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
+ bool smt_possible = IS_ENABLED(CONFIG_SMP);
+ enum spectre_v2_user_cmd cmd;
- return len == arglen && !strncmp(arg, opt, len);
+ if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+ return;
+
+ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+ smt_possible = false;
+
+ cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+ switch (cmd) {
+ case SPECTRE_V2_USER_CMD_NONE:
+ goto set_mode;
+ case SPECTRE_V2_USER_CMD_FORCE:
+ mode = SPECTRE_V2_USER_STRICT;
+ break;
+ case SPECTRE_V2_USER_CMD_PRCTL:
+ case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ mode = SPECTRE_V2_USER_PRCTL;
+ break;
+ case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_SECCOMP:
+ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ if (IS_ENABLED(CONFIG_SECCOMP))
+ mode = SPECTRE_V2_USER_SECCOMP;
+ else
+ mode = SPECTRE_V2_USER_PRCTL;
+ break;
+ }
+
+ /*
+ * At this point, an STIBP mode other than "off" has been set.
+ * If STIBP support is not being forced, check if STIBP always-on
+ * is preferred.
+ */
+ if (mode != SPECTRE_V2_USER_STRICT &&
+ boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
+ mode = SPECTRE_V2_USER_STRICT_PREFERRED;
+
+ /* Initialize Indirect Branch Prediction Barrier */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+
+ switch (cmd) {
+ case SPECTRE_V2_USER_CMD_FORCE:
+ case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+ static_branch_enable(&switch_mm_always_ibpb);
+ break;
+ case SPECTRE_V2_USER_CMD_PRCTL:
+ case SPECTRE_V2_USER_CMD_AUTO:
+ case SPECTRE_V2_USER_CMD_SECCOMP:
+ static_branch_enable(&switch_mm_cond_ibpb);
+ break;
+ default:
+ break;
+ }
+
+ pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+ static_key_enabled(&switch_mm_always_ibpb) ?
+ "always-on" : "conditional");
+ }
+
+ /* If enhanced IBRS is enabled no STIBP required */
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return;
+
+ /*
+ * If SMT is not possible or STIBP is not available clear the STIBP
+ * mode.
+ */
+ if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+ mode = SPECTRE_V2_USER_NONE;
+set_mode:
+ spectre_v2_user = mode;
+ /* Only print the STIBP mode when SMT possible */
+ if (smt_possible)
+ pr_info("%s\n", spectre_v2_user_strings[mode]);
}
+static const char * const spectre_v2_strings[] = {
+ [SPECTRE_V2_NONE] = "Vulnerable",
+ [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
+ [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
+ [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
+};
+
static const struct {
const char *option;
enum spectre_v2_mitigation_cmd cmd;
bool secure;
-} mitigation_options[] = {
- { "off", SPECTRE_V2_CMD_NONE, false },
- { "on", SPECTRE_V2_CMD_FORCE, true },
- { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
- { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
- { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
- { "auto", SPECTRE_V2_CMD_AUTO, false },
+} mitigation_options[] __initdata = {
+ { "off", SPECTRE_V2_CMD_NONE, false },
+ { "on", SPECTRE_V2_CMD_FORCE, true },
+ { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
+ { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
+ { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+ { "auto", SPECTRE_V2_CMD_AUTO, false },
};
+static void __init spec_v2_print_cond(const char *reason, bool secure)
+{
+ if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+ pr_info("%s selected on command line.\n", reason);
+}
+
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
{
+ enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
char arg[20];
int ret, i;
- enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
return SPECTRE_V2_CMD_NONE;
- else {
- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
- if (ret < 0)
- return SPECTRE_V2_CMD_AUTO;
- for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
- if (!match_option(arg, ret, mitigation_options[i].option))
- continue;
- cmd = mitigation_options[i].cmd;
- break;
- }
+ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+ if (ret < 0)
+ return SPECTRE_V2_CMD_AUTO;
- if (i >= ARRAY_SIZE(mitigation_options)) {
- pr_err("unknown option (%s). Switching to AUTO select\n", arg);
- return SPECTRE_V2_CMD_AUTO;
- }
+ for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
+ if (!match_option(arg, ret, mitigation_options[i].option))
+ continue;
+ cmd = mitigation_options[i].cmd;
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mitigation_options)) {
+ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+ return SPECTRE_V2_CMD_AUTO;
}
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -316,54 +474,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return SPECTRE_V2_CMD_AUTO;
}
- if (mitigation_options[i].secure)
- spec2_print_if_secure(mitigation_options[i].option);
- else
- spec2_print_if_insecure(mitigation_options[i].option);
-
+ spec_v2_print_cond(mitigation_options[i].option,
+ mitigation_options[i].secure);
return cmd;
}
-static bool stibp_needed(void)
-{
- if (spectre_v2_enabled == SPECTRE_V2_NONE)
- return false;
-
- if (!boot_cpu_has(X86_FEATURE_STIBP))
- return false;
-
- return true;
-}
-
-static void update_stibp_msr(void *info)
-{
- wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-}
-
-void arch_smt_update(void)
-{
- u64 mask;
-
- if (!stibp_needed())
- return;
-
- mutex_lock(&spec_ctrl_mutex);
- mask = x86_spec_ctrl_base;
- if (cpu_smt_control == CPU_SMT_ENABLED)
- mask |= SPEC_CTRL_STIBP;
- else
- mask &= ~SPEC_CTRL_STIBP;
-
- if (mask != x86_spec_ctrl_base) {
- pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
- cpu_smt_control == CPU_SMT_ENABLED ?
- "Enabling" : "Disabling");
- x86_spec_ctrl_base = mask;
- on_each_cpu(update_stibp_msr, NULL, 1);
- }
- mutex_unlock(&spec_ctrl_mutex);
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -417,14 +532,12 @@ retpoline_auto:
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
goto retpoline_generic;
}
- mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
- SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+ mode = SPECTRE_V2_RETPOLINE_AMD;
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
} else {
retpoline_generic:
- mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
- SPECTRE_V2_RETPOLINE_MINIMAL;
+ mode = SPECTRE_V2_RETPOLINE_GENERIC;
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
}
@@ -443,12 +556,6 @@ specv2_set_mode:
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
- /* Initialize Indirect Branch Prediction Barrier if supported */
- if (boot_cpu_has(X86_FEATURE_IBPB)) {
- setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
- pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
- }
-
/*
* Retpoline means the kernel is safe because it has no indirect
* branches. Enhanced IBRS protects firmware too, so, enable restricted
@@ -465,10 +572,68 @@ specv2_set_mode:
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
+ /* Set up IBPB and STIBP depending on the general spectre V2 command */
+ spectre_v2_user_select_mitigation(cmd);
+
/* Enable STIBP if appropriate */
arch_smt_update();
}
+static void update_stibp_msr(void * __unused)
+{
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
+{
+ u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+ if (sched_smt_active())
+ mask |= SPEC_CTRL_STIBP;
+
+ if (mask == x86_spec_ctrl_base)
+ return;
+
+ pr_info("Update user space SMT mitigation: STIBP %s\n",
+ mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
+}
+
+/* Update the static key controlling the evaluation of TIF_SPEC_IB */
+static void update_indir_branch_cond(void)
+{
+ if (sched_smt_active())
+ static_branch_enable(&switch_to_cond_stibp);
+ else
+ static_branch_disable(&switch_to_cond_stibp);
+}
+
+void arch_smt_update(void)
+{
+ /* Enhanced IBRS implies STIBP. No update required. */
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return;
+
+ mutex_lock(&spec_ctrl_mutex);
+
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ break;
+ case SPECTRE_V2_USER_STRICT:
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
+ update_stibp_strict();
+ break;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ update_indir_branch_cond();
+ break;
+ }
+
+ mutex_unlock(&spec_ctrl_mutex);
+}
+
#undef pr_fmt
#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
@@ -483,7 +648,7 @@ enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_SECCOMP,
};
-static const char *ssb_strings[] = {
+static const char * const ssb_strings[] = {
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
@@ -493,7 +658,7 @@ static const char *ssb_strings[] = {
static const struct {
const char *option;
enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[] = {
+} ssb_mitigation_options[] __initdata = {
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
@@ -604,10 +769,25 @@ static void ssb_select_mitigation(void)
#undef pr_fmt
#define pr_fmt(fmt) "Speculation prctl: " fmt
-static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+static void task_update_spec_tif(struct task_struct *tsk)
{
- bool update;
+ /* Force the update of the real TIF bits */
+ set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
+ /*
+ * Immediately update the speculation control MSRs for the current
+ * task, but for a non-current task delay setting the CPU
+ * mitigation until it is scheduled next.
+ *
+ * This can only happen for SECCOMP mitigation. For PRCTL it's
+ * always the current task.
+ */
+ if (tsk == current)
+ speculation_ctrl_update_current();
+}
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
return -ENXIO;
@@ -618,28 +798,58 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
if (task_spec_ssb_force_disable(task))
return -EPERM;
task_clear_spec_ssb_disable(task);
- update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+ task_update_spec_tif(task);
break;
case PR_SPEC_DISABLE:
task_set_spec_ssb_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ task_update_spec_tif(task);
break;
case PR_SPEC_FORCE_DISABLE:
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
- update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ task_update_spec_tif(task);
break;
default:
return -ERANGE;
}
+ return 0;
+}
- /*
- * If being set on non-current task, delay setting the CPU
- * mitigation until it is next scheduled.
- */
- if (task == current && update)
- speculative_store_bypass_update_current();
-
+static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ return 0;
+ /*
+ * Indirect branch speculation is always disabled in strict
+ * mode.
+ */
+ if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return -EPERM;
+ task_clear_spec_ib_disable(task);
+ task_update_spec_tif(task);
+ break;
+ case PR_SPEC_DISABLE:
+ case PR_SPEC_FORCE_DISABLE:
+ /*
+ * Indirect branch speculation is always allowed when
+ * mitigation is force disabled.
+ */
+ if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+ return -EPERM;
+ if (spectre_v2_user == SPECTRE_V2_USER_STRICT ||
+ spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED)
+ return 0;
+ task_set_spec_ib_disable(task);
+ if (ctrl == PR_SPEC_FORCE_DISABLE)
+ task_set_spec_ib_force_disable(task);
+ task_update_spec_tif(task);
+ break;
+ default:
+ return -ERANGE;
+ }
return 0;
}
@@ -649,6 +859,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_set(task, ctrl);
+ case PR_SPEC_INDIRECT_BRANCH:
+ return ib_prctl_set(task, ctrl);
default:
return -ENODEV;
}
@@ -659,6 +871,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
{
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+ if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+ ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
}
#endif
@@ -681,11 +895,36 @@ static int ssb_prctl_get(struct task_struct *task)
}
}
+static int ib_prctl_get(struct task_struct *task)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+ return PR_SPEC_NOT_AFFECTED;
+
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ return PR_SPEC_ENABLE;
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ if (task_spec_ib_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ib_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ case SPECTRE_V2_USER_STRICT:
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
+ return PR_SPEC_DISABLE;
+ default:
+ return PR_SPEC_NOT_AFFECTED;
+ }
+}
+
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
{
switch (which) {
case PR_SPEC_STORE_BYPASS:
return ssb_prctl_get(task);
+ case PR_SPEC_INDIRECT_BRANCH:
+ return ib_prctl_get(task);
default:
return -ENODEV;
}
@@ -779,7 +1018,8 @@ static void __init l1tf_select_mitigation(void)
#endif
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
- if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+ if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
+ e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
@@ -823,7 +1063,7 @@ early_param("l1tf", l1tf_cmdline);
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
#if IS_ENABLED(CONFIG_KVM_INTEL)
-static const char *l1tf_vmx_states[] = {
+static const char * const l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_AUTO] = "auto",
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
@@ -839,13 +1079,14 @@ static ssize_t l1tf_show_state(char *buf)
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
- cpu_smt_control == CPU_SMT_ENABLED))
+ sched_smt_active())) {
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation]);
+ }
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
l1tf_vmx_states[l1tf_vmx_mitigation],
- cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+ sched_smt_active() ? "vulnerable" : "disabled");
}
#else
static ssize_t l1tf_show_state(char *buf)
@@ -854,11 +1095,41 @@ static ssize_t l1tf_show_state(char *buf)
}
#endif
+static char *stibp_state(void)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+ return "";
+
+ switch (spectre_v2_user) {
+ case SPECTRE_V2_USER_NONE:
+ return ", STIBP: disabled";
+ case SPECTRE_V2_USER_STRICT:
+ return ", STIBP: forced";
+ case SPECTRE_V2_USER_STRICT_PREFERRED:
+ return ", STIBP: always-on";
+ case SPECTRE_V2_USER_PRCTL:
+ case SPECTRE_V2_USER_SECCOMP:
+ if (static_key_enabled(&switch_to_cond_stibp))
+ return ", STIBP: conditional";
+ }
+ return "";
+}
+
+static char *ibpb_state(void)
+{
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ if (static_key_enabled(&switch_mm_always_ibpb))
+ return ", IBPB: always-on";
+ if (static_key_enabled(&switch_mm_cond_ibpb))
+ return ", IBPB: conditional";
+ return ", IBPB: disabled";
+ }
+ return "";
+}
+
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
- int ret;
-
if (!boot_cpu_has_bug(bug))
return sprintf(buf, "Not affected\n");
@@ -876,13 +1147,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
case X86_BUG_SPECTRE_V2:
- ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
- boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ ibpb_state(),
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
- (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
+ stibp_state(),
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
spectre_v2_module_string());
- return ret;
case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index dc1b9342e9c4..c4d1023fb0ab 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -17,6 +17,7 @@
#include <linux/pci.h>
#include <asm/cpufeature.h>
+#include <asm/cacheinfo.h>
#include <asm/amd_nb.h>
#include <asm/smp.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ffb181f959d2..cb28e98a0659 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -353,7 +353,7 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
cr4_set_bits(X86_CR4_UMIP);
- pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
+ pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
return;
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index da5446acc241..5eb946b9a9f3 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -49,9 +49,6 @@ extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void get_cpu_address_sizes(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
-extern u32 get_scattered_cpuid_leaf(unsigned int level,
- unsigned int sub_leaf,
- enum cpuid_regs_idx reg);
extern void init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
extern void init_hygon_cacheinfo(struct cpuinfo_x86 *c);
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mce/Makefile
index bcc7c54c7041..9f020c994154 100644
--- a/arch/x86/kernel/cpu/mcheck/Makefile
+++ b/arch/x86/kernel/cpu/mce/Makefile
@@ -1,14 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y = mce.o mce-severity.o mce-genpool.o
+obj-y = core.o severity.o genpool.o
obj-$(CONFIG_X86_ANCIENT_MCE) += winchip.o p5.o
-obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
-obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
+obj-$(CONFIG_X86_MCE_INTEL) += intel.o
+obj-$(CONFIG_X86_MCE_AMD) += amd.o
obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
+
+mce-inject-y := inject.o
obj-$(CONFIG_X86_MCE_INJECT) += mce-inject.o
obj-$(CONFIG_X86_THERMAL_VECTOR) += therm_throt.o
-obj-$(CONFIG_ACPI_APEI) += mce-apei.o
+obj-$(CONFIG_ACPI_APEI) += apei.o
obj-$(CONFIG_X86_MCELOG_LEGACY) += dev-mcelog.o
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mce/amd.c
index dd33c357548f..89298c83de53 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -23,12 +23,13 @@
#include <linux/string.h>
#include <asm/amd_nb.h>
+#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
-#include "mce-internal.h"
+#include "internal.h"
#define NR_BLOCKS 5
#define THRESHOLD_MAX 0xFFF
@@ -56,7 +57,7 @@
/* Threshold LVT offset is at MSR0xC0000410[15:12] */
#define SMCA_THR_LVT_OFF 0xF000
-static bool thresholding_en;
+static bool thresholding_irq_en;
static const char * const th_names[] = {
"load_store",
@@ -99,7 +100,7 @@ static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
};
-const char *smca_get_name(enum smca_bank_types t)
+static const char *smca_get_name(enum smca_bank_types t)
{
if (t >= N_SMCA_BANK_TYPES)
return NULL;
@@ -534,9 +535,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
set_offset:
offset = setup_APIC_mce_threshold(offset, new);
-
- if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
- mce_threshold_vector = amd_threshold_interrupt;
+ if (offset == new)
+ thresholding_irq_en = true;
done:
mce_threshold_block_init(&b, offset);
@@ -825,7 +825,7 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
mce_log(&m);
}
-asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
+asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
@@ -1357,9 +1357,6 @@ int mce_threshold_remove_device(unsigned int cpu)
{
unsigned int bank;
- if (!thresholding_en)
- return 0;
-
for (bank = 0; bank < mca_cfg.banks; ++bank) {
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
continue;
@@ -1377,9 +1374,6 @@ int mce_threshold_create_device(unsigned int cpu)
struct threshold_bank **bp;
int err = 0;
- if (!thresholding_en)
- return 0;
-
bp = per_cpu(threshold_banks, cpu);
if (bp)
return 0;
@@ -1408,9 +1402,6 @@ static __init int threshold_init_device(void)
{
unsigned lcpu = 0;
- if (mce_threshold_vector == amd_threshold_interrupt)
- thresholding_en = true;
-
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
int err = mce_threshold_create_device(lcpu);
@@ -1419,6 +1410,9 @@ static __init int threshold_init_device(void)
return err;
}
+ if (thresholding_irq_en)
+ mce_threshold_vector = amd_threshold_interrupt;
+
return 0;
}
/*
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mce/apei.c
index 2eee85379689..1d9b3ce662a0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -36,7 +36,7 @@
#include <acpi/ghes.h>
#include <asm/mce.h>
-#include "mce-internal.h"
+#include "internal.h"
void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
{
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mce/core.c
index 36d2696c9563..672c7225cb1b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -8,8 +8,6 @@
* Author: Andi Kleen
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
@@ -52,7 +50,7 @@
#include <asm/msr.h>
#include <asm/reboot.h>
-#include "mce-internal.h"
+#include "internal.h"
static DEFINE_MUTEX(mce_log_mutex);
@@ -686,7 +684,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count);
* errors here. However this would be quite problematic --
* we would need to reimplement the Monarch handling and
* it would mess up the exclusion between exception handler
- * and poll hander -- * so we skip this for now.
+ * and poll handler -- * so we skip this for now.
* These cases should not happen anyways, or only when the CPU
* is already totally * confused. In this case it's likely it will
* not fully execute the machine check handler either.
diff --git a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c b/arch/x86/kernel/cpu/mce/dev-mcelog.c
index 27f394ac983f..9690ec5c8051 100644
--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+++ b/arch/x86/kernel/cpu/mce/dev-mcelog.c
@@ -8,14 +8,12 @@
* Author: Andi Kleen
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include <linux/kmod.h>
#include <linux/poll.h>
-#include "mce-internal.h"
+#include "internal.h"
static BLOCKING_NOTIFIER_HEAD(mce_injector_chain);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mce/genpool.c
index 217cd4449bc9..3395549c51d3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
+++ b/arch/x86/kernel/cpu/mce/genpool.c
@@ -10,7 +10,7 @@
#include <linux/mm.h>
#include <linux/genalloc.h>
#include <linux/llist.h>
-#include "mce-internal.h"
+#include "internal.h"
/*
* printk() is not safe in MCE context. This is a lock-less memory allocator
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 1fc424c40a31..8492ef7d9015 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -38,7 +38,7 @@
#include <asm/nmi.h>
#include <asm/smp.h>
-#include "mce-internal.h"
+#include "internal.h"
/*
* Collect all the MCi_XXX settings
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mce/intel.c
index d05be307d081..e43eb6732630 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -18,7 +18,7 @@
#include <asm/msr.h>
#include <asm/mce.h>
-#include "mce-internal.h"
+#include "internal.h"
/*
* Support for Intel Correct Machine Check Interrupts. This allows
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mce/internal.h
index ceb67cd5918f..af5eab1e65e2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mce/internal.h
@@ -2,6 +2,9 @@
#ifndef __X86_MCE_INTERNAL_H__
#define __X86_MCE_INTERNAL_H__
+#undef pr_fmt
+#define pr_fmt(fmt) "mce: " fmt
+
#include <linux/device.h>
#include <asm/mce.h>
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mce/p5.c
index 5cddf831720f..4ae6df556526 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mce/p5.c
@@ -14,6 +14,8 @@
#include <asm/mce.h>
#include <asm/msr.h>
+#include "internal.h"
+
/* By default disabled */
int mce_p5_enabled __read_mostly;
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mce/severity.c
index 44396d521987..dc3e26e905a3 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -16,7 +16,7 @@
#include <asm/mce.h>
#include <linux/uaccess.h>
-#include "mce-internal.h"
+#include "internal.h"
/*
* Grade an mce by severity. In general the most severe ones are processed
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mce/therm_throt.c
index 2da67b70ba98..10a3b0599300 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -25,11 +25,14 @@
#include <linux/cpu.h>
#include <asm/processor.h>
+#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/trace/irq_vectors.h>
+#include "internal.h"
+
/* How long to wait between reporting thermal events */
#define CHECK_INTERVAL (300 * HZ)
@@ -390,7 +393,7 @@ static void unexpected_thermal_interrupt(void)
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
-asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
+asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mce/threshold.c
index 2b584b319eff..28812cc15300 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mce/threshold.c
@@ -6,10 +6,13 @@
#include <linux/kernel.h>
#include <asm/irq_vectors.h>
+#include <asm/traps.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/trace/irq_vectors.h>
+#include "internal.h"
+
static void default_threshold_interrupt(void)
{
pr_err("Unexpected threshold interrupt at vector %x\n",
@@ -18,7 +21,7 @@ static void default_threshold_interrupt(void)
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
-asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
+asmlinkage __visible void __irq_entry smp_threshold_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mce/winchip.c
index 3b45b270a865..a30ea13cccc2 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mce/winchip.c
@@ -13,6 +13,8 @@
#include <asm/mce.h>
#include <asm/msr.h>
+#include "internal.h"
+
/* Machine check handler for WinChip C6: */
static void winchip_machine_check(struct pt_regs *regs, long error_code)
{
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 07b5fc00b188..51adde0a0f1a 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -5,7 +5,7 @@
* CPUs and later.
*
* Copyright (C) 2008-2011 Advanced Micro Devices Inc.
- * 2013-2016 Borislav Petkov <bp@alien8.de>
+ * 2013-2018 Borislav Petkov <bp@alien8.de>
*
* Author: Peter Oruba <peter.oruba@amd.com>
*
@@ -38,7 +38,10 @@
#include <asm/cpu.h>
#include <asm/msr.h>
-static struct equiv_cpu_entry *equiv_cpu_table;
+static struct equiv_cpu_table {
+ unsigned int num_entries;
+ struct equiv_cpu_entry *entry;
+} equiv_table;
/*
* This points to the current valid container of microcode patches which we will
@@ -63,13 +66,225 @@ static u8 amd_ucode_patch[PATCH_MAX_SIZE];
static const char
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
-static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
+static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
{
- for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
- if (sig == equiv_table->installed_cpu)
- return equiv_table->equiv_cpu;
+ unsigned int i;
+
+ if (!et || !et->num_entries)
+ return 0;
+
+ for (i = 0; i < et->num_entries; i++) {
+ struct equiv_cpu_entry *e = &et->entry[i];
+
+ if (sig == e->installed_cpu)
+ return e->equiv_cpu;
+
+ e++;
+ }
+ return 0;
+}
+
+/*
+ * Check whether there is a valid microcode container file at the beginning
+ * of @buf of size @buf_size. Set @early to use this function in the early path.
+ */
+static bool verify_container(const u8 *buf, size_t buf_size, bool early)
+{
+ u32 cont_magic;
+
+ if (buf_size <= CONTAINER_HDR_SZ) {
+ if (!early)
+ pr_debug("Truncated microcode container header.\n");
+
+ return false;
+ }
+
+ cont_magic = *(const u32 *)buf;
+ if (cont_magic != UCODE_MAGIC) {
+ if (!early)
+ pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
+
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Check whether there is a valid, non-truncated CPU equivalence table at the
+ * beginning of @buf of size @buf_size. Set @early to use this function in the
+ * early path.
+ */
+static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
+{
+ const u32 *hdr = (const u32 *)buf;
+ u32 cont_type, equiv_tbl_len;
+
+ if (!verify_container(buf, buf_size, early))
+ return false;
+
+ cont_type = hdr[1];
+ if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
+ if (!early)
+ pr_debug("Wrong microcode container equivalence table type: %u.\n",
+ cont_type);
+
+ return false;
+ }
+
+ buf_size -= CONTAINER_HDR_SZ;
+
+ equiv_tbl_len = hdr[2];
+ if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
+ buf_size < equiv_tbl_len) {
+ if (!early)
+ pr_debug("Truncated equivalence table.\n");
+
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Check whether there is a valid, non-truncated microcode patch section at the
+ * beginning of @buf of size @buf_size. Set @early to use this function in the
+ * early path.
+ *
+ * On success, @sh_psize returns the patch size according to the section header,
+ * to the caller.
+ */
+static bool
+__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
+{
+ u32 p_type, p_size;
+ const u32 *hdr;
+
+ if (buf_size < SECTION_HDR_SIZE) {
+ if (!early)
+ pr_debug("Truncated patch section.\n");
+
+ return false;
+ }
+
+ hdr = (const u32 *)buf;
+ p_type = hdr[0];
+ p_size = hdr[1];
+
+ if (p_type != UCODE_UCODE_TYPE) {
+ if (!early)
+ pr_debug("Invalid type field (0x%x) in container file section header.\n",
+ p_type);
+
+ return false;
+ }
+
+ if (p_size < sizeof(struct microcode_header_amd)) {
+ if (!early)
+ pr_debug("Patch of size %u too short.\n", p_size);
+
+ return false;
+ }
+
+ *sh_psize = p_size;
+
+ return true;
+}
+
+/*
+ * Check whether the passed remaining file @buf_size is large enough to contain
+ * a patch of the indicated @sh_psize (and also whether this size does not
+ * exceed the per-family maximum). @sh_psize is the size read from the section
+ * header.
+ */
+static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
+{
+ u32 max_size;
+
+ if (family >= 0x15)
+ return min_t(u32, sh_psize, buf_size);
+
+#define F1XH_MPB_MAX_SIZE 2048
+#define F14H_MPB_MAX_SIZE 1824
+
+ switch (family) {
+ case 0x10 ... 0x12:
+ max_size = F1XH_MPB_MAX_SIZE;
+ break;
+ case 0x14:
+ max_size = F14H_MPB_MAX_SIZE;
+ break;
+ default:
+ WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
+ return 0;
+ break;
+ }
+
+ if (sh_psize > min_t(u32, buf_size, max_size))
+ return 0;
+
+ return sh_psize;
+}
+
+/*
+ * Verify the patch in @buf.
+ *
+ * Returns:
+ * negative: on error
+ * positive: patch is not for this family, skip it
+ * 0: success
+ */
+static int
+verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
+{
+ struct microcode_header_amd *mc_hdr;
+ unsigned int ret;
+ u32 sh_psize;
+ u16 proc_id;
+ u8 patch_fam;
+
+ if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
+ return -1;
+
+ /*
+ * The section header length is not included in this indicated size
+ * but is present in the leftover file length so we need to subtract
+ * it before passing this value to the function below.
+ */
+ buf_size -= SECTION_HDR_SIZE;
+
+ /*
+ * Check if the remaining buffer is big enough to contain a patch of
+ * size sh_psize, as the section claims.
+ */
+ if (buf_size < sh_psize) {
+ if (!early)
+ pr_debug("Patch of size %u truncated.\n", sh_psize);
+
+ return -1;
+ }
+
+ ret = __verify_patch_size(family, sh_psize, buf_size);
+ if (!ret) {
+ if (!early)
+ pr_debug("Per-family patch size mismatch.\n");
+ return -1;
+ }
+
+ *patch_size = sh_psize;
+
+ mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
+ if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
+ if (!early)
+ pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
+ return -1;
}
+ proc_id = mc_hdr->processor_rev_id;
+ patch_fam = 0xf + (proc_id >> 12);
+ if (patch_fam != family)
+ return 1;
+
return 0;
}
@@ -80,26 +295,28 @@ static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
* Returns the amount of bytes consumed while scanning. @desc contains all the
* data we're going to use in later stages of the application.
*/
-static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
+static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
{
- struct equiv_cpu_entry *eq;
- ssize_t orig_size = size;
+ struct equiv_cpu_table table;
+ size_t orig_size = size;
u32 *hdr = (u32 *)ucode;
u16 eq_id;
u8 *buf;
- /* Am I looking at an equivalence table header? */
- if (hdr[0] != UCODE_MAGIC ||
- hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
- hdr[2] == 0)
- return CONTAINER_HDR_SZ;
+ if (!verify_equivalence_table(ucode, size, true))
+ return 0;
buf = ucode;
- eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
+ table.entry = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
+ table.num_entries = hdr[2] / sizeof(struct equiv_cpu_entry);
- /* Find the equivalence ID of our CPU in this table: */
- eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
+ /*
+ * Find the equivalence ID of our CPU in this table. Even if this table
+ * doesn't contain a patch for the CPU, scan through the whole container
+ * so that it can be skipped in case there are other containers appended.
+ */
+ eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
buf += hdr[2] + CONTAINER_HDR_SZ;
size -= hdr[2] + CONTAINER_HDR_SZ;
@@ -111,29 +328,29 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
while (size > 0) {
struct microcode_amd *mc;
u32 patch_size;
+ int ret;
+
+ ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
+ if (ret < 0) {
+ /*
+ * Patch verification failed, skip to the next
+ * container, if there's one:
+ */
+ goto out;
+ } else if (ret > 0) {
+ goto skip;
+ }
- hdr = (u32 *)buf;
-
- if (hdr[0] != UCODE_UCODE_TYPE)
- break;
-
- /* Sanity-check patch size. */
- patch_size = hdr[1];
- if (patch_size > PATCH_MAX_SIZE)
- break;
-
- /* Skip patch section header: */
- buf += SECTION_HDR_SIZE;
- size -= SECTION_HDR_SIZE;
-
- mc = (struct microcode_amd *)buf;
+ mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
if (eq_id == mc->hdr.processor_rev_id) {
desc->psize = patch_size;
desc->mc = mc;
}
- buf += patch_size;
- size -= patch_size;
+skip:
+ /* Skip patch section header too: */
+ buf += patch_size + SECTION_HDR_SIZE;
+ size -= patch_size + SECTION_HDR_SIZE;
}
/*
@@ -150,6 +367,7 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
return 0;
}
+out:
return orig_size - size;
}
@@ -159,15 +377,18 @@ static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
*/
static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
{
- ssize_t rem = size;
-
- while (rem >= 0) {
- ssize_t s = parse_container(ucode, rem, desc);
+ while (size) {
+ size_t s = parse_container(ucode, size, desc);
if (!s)
return;
- ucode += s;
- rem -= s;
+ /* catch wraparound */
+ if (size >= s) {
+ ucode += s;
+ size -= s;
+ } else {
+ return;
+ }
}
}
@@ -364,21 +585,7 @@ void reload_ucode_amd(void)
static u16 __find_equiv_id(unsigned int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
- return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
-}
-
-static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
-{
- int i = 0;
-
- BUG_ON(!equiv_cpu_table);
-
- while (equiv_cpu_table[i].equiv_cpu != 0) {
- if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
- return equiv_cpu_table[i].installed_cpu;
- i++;
- }
- return 0;
+ return find_equiv_id(&equiv_table, uci->cpu_sig.sig);
}
/*
@@ -461,43 +668,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
return 0;
}
-static unsigned int verify_patch_size(u8 family, u32 patch_size,
- unsigned int size)
-{
- u32 max_size;
-
-#define F1XH_MPB_MAX_SIZE 2048
-#define F14H_MPB_MAX_SIZE 1824
-#define F15H_MPB_MAX_SIZE 4096
-#define F16H_MPB_MAX_SIZE 3458
-#define F17H_MPB_MAX_SIZE 3200
-
- switch (family) {
- case 0x14:
- max_size = F14H_MPB_MAX_SIZE;
- break;
- case 0x15:
- max_size = F15H_MPB_MAX_SIZE;
- break;
- case 0x16:
- max_size = F16H_MPB_MAX_SIZE;
- break;
- case 0x17:
- max_size = F17H_MPB_MAX_SIZE;
- break;
- default:
- max_size = F1XH_MPB_MAX_SIZE;
- break;
- }
-
- if (patch_size > min_t(u32, size, max_size)) {
- pr_err("patch size mismatch\n");
- return 0;
- }
-
- return patch_size;
-}
-
static enum ucode_state apply_microcode_amd(int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -548,34 +718,34 @@ out:
return ret;
}
-static int install_equiv_cpu_table(const u8 *buf)
+static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
{
- unsigned int *ibuf = (unsigned int *)buf;
- unsigned int type = ibuf[1];
- unsigned int size = ibuf[2];
+ u32 equiv_tbl_len;
+ const u32 *hdr;
- if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
- pr_err("empty section/"
- "invalid type field in container file section header\n");
- return -EINVAL;
- }
+ if (!verify_equivalence_table(buf, buf_size, false))
+ return 0;
+
+ hdr = (const u32 *)buf;
+ equiv_tbl_len = hdr[2];
- equiv_cpu_table = vmalloc(size);
- if (!equiv_cpu_table) {
+ equiv_table.entry = vmalloc(equiv_tbl_len);
+ if (!equiv_table.entry) {
pr_err("failed to allocate equivalent CPU table\n");
- return -ENOMEM;
+ return 0;
}
- memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
+ memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
+ equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
/* add header length */
- return size + CONTAINER_HDR_SZ;
+ return equiv_tbl_len + CONTAINER_HDR_SZ;
}
static void free_equiv_cpu_table(void)
{
- vfree(equiv_cpu_table);
- equiv_cpu_table = NULL;
+ vfree(equiv_table.entry);
+ memset(&equiv_table, 0, sizeof(equiv_table));
}
static void cleanup(void)
@@ -585,47 +755,23 @@ static void cleanup(void)
}
/*
- * We return the current size even if some of the checks failed so that
+ * Return a non-negative value even if some of the checks failed so that
* we can skip over the next patch. If we return a negative value, we
* signal a grave error like a memory allocation has failed and the
* driver cannot continue functioning normally. In such cases, we tear
* down everything we've used up so far and exit.
*/
-static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
+ unsigned int *patch_size)
{
struct microcode_header_amd *mc_hdr;
struct ucode_patch *patch;
- unsigned int patch_size, crnt_size, ret;
- u32 proc_fam;
u16 proc_id;
+ int ret;
- patch_size = *(u32 *)(fw + 4);
- crnt_size = patch_size + SECTION_HDR_SIZE;
- mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
- proc_id = mc_hdr->processor_rev_id;
-
- proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
- if (!proc_fam) {
- pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
- return crnt_size;
- }
-
- /* check if patch is for the current family */
- proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
- if (proc_fam != family)
- return crnt_size;
-
- if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
- pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
- mc_hdr->patch_id);
- return crnt_size;
- }
-
- ret = verify_patch_size(family, patch_size, leftover);
- if (!ret) {
- pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
- return crnt_size;
- }
+ ret = verify_patch(family, fw, leftover, patch_size, false);
+ if (ret)
+ return ret;
patch = kzalloc(sizeof(*patch), GFP_KERNEL);
if (!patch) {
@@ -633,13 +779,16 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
return -EINVAL;
}
- patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
+ patch->data = kmemdup(fw + SECTION_HDR_SIZE, *patch_size, GFP_KERNEL);
if (!patch->data) {
pr_err("Patch data allocation failure.\n");
kfree(patch);
return -EINVAL;
}
+ mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
+ proc_id = mc_hdr->processor_rev_id;
+
INIT_LIST_HEAD(&patch->plist);
patch->patch_id = mc_hdr->patch_id;
patch->equiv_cpu = proc_id;
@@ -650,39 +799,38 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
/* ... and add to cache. */
update_cache(patch);
- return crnt_size;
+ return 0;
}
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
size_t size)
{
- enum ucode_state ret = UCODE_ERROR;
- unsigned int leftover;
u8 *fw = (u8 *)data;
- int crnt_size = 0;
- int offset;
+ size_t offset;
- offset = install_equiv_cpu_table(data);
- if (offset < 0) {
- pr_err("failed to create equivalent cpu table\n");
- return ret;
- }
- fw += offset;
- leftover = size - offset;
+ offset = install_equiv_cpu_table(data, size);
+ if (!offset)
+ return UCODE_ERROR;
+
+ fw += offset;
+ size -= offset;
if (*(u32 *)fw != UCODE_UCODE_TYPE) {
pr_err("invalid type field in container file section header\n");
free_equiv_cpu_table();
- return ret;
+ return UCODE_ERROR;
}
- while (leftover) {
- crnt_size = verify_and_add_patch(family, fw, leftover);
- if (crnt_size < 0)
- return ret;
+ while (size > 0) {
+ unsigned int crnt_size = 0;
+ int ret;
- fw += crnt_size;
- leftover -= crnt_size;
+ ret = verify_and_add_patch(family, fw, size, &crnt_size);
+ if (ret < 0)
+ return UCODE_ERROR;
+
+ fw += crnt_size + SECTION_HDR_SIZE;
+ size -= (crnt_size + SECTION_HDR_SIZE);
}
return UCODE_OK;
@@ -761,10 +909,8 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
}
ret = UCODE_ERROR;
- if (*(u32 *)fw->data != UCODE_MAGIC) {
- pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
+ if (!verify_container(fw->data, fw->size, false))
goto fw_release;
- }
ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 2e173d47b450..4d36dcc1cf87 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -165,6 +165,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg;
+ memset(&gentry, 0, sizeof(gentry));
+
switch (cmd) {
case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile
new file mode 100644
index 000000000000..6895049ceef7
--- /dev/null
+++ b/arch/x86/kernel/cpu/resctrl/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_RESCTRL) += core.o rdtgroup.o monitor.o
+obj-$(CONFIG_RESCTRL) += ctrlmondata.o pseudo_lock.o
+CFLAGS_pseudo_lock.o = -I$(src)
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/resctrl/core.c
index 44272b7107ad..c3a9dc63edf2 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -22,7 +22,7 @@
* Software Developer Manual June 2016, volume 3, section 17.17.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) "resctrl: " fmt
#include <linux/slab.h>
#include <linux/err.h>
@@ -30,22 +30,19 @@
#include <linux/cpuhotplug.h>
#include <asm/intel-family.h>
-#include <asm/intel_rdt_sched.h>
-#include "intel_rdt.h"
-
-#define MBA_IS_LINEAR 0x4
-#define MBA_MAX_MBPS U32_MAX
+#include <asm/resctrl_sched.h>
+#include "internal.h"
/* Mutex to protect rdtgroup access. */
DEFINE_MUTEX(rdtgroup_mutex);
/*
- * The cached intel_pqr_state is strictly per CPU and can never be
+ * The cached resctrl_pqr_state is strictly per CPU and can never be
* updated from a remote CPU. Functions which modify the state
* are called with interrupts disabled and no preemption, which
* is sufficient for the protection.
*/
-DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
+DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
/*
* Used to store the max resource name width and max resource data width
@@ -60,9 +57,13 @@ int max_name_width, max_data_width;
bool rdt_alloc_capable;
static void
-mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
+mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
+ struct rdt_resource *r);
static void
cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
+static void
+mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
+ struct rdt_resource *r);
#define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
@@ -72,7 +73,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3,
.name = "L3",
.domains = domain_init(RDT_RESOURCE_L3),
- .msr_base = IA32_L3_CBM_BASE,
+ .msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 3,
.cache = {
@@ -89,7 +90,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3DATA,
.name = "L3DATA",
.domains = domain_init(RDT_RESOURCE_L3DATA),
- .msr_base = IA32_L3_CBM_BASE,
+ .msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 3,
.cache = {
@@ -106,7 +107,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L3CODE,
.name = "L3CODE",
.domains = domain_init(RDT_RESOURCE_L3CODE),
- .msr_base = IA32_L3_CBM_BASE,
+ .msr_base = MSR_IA32_L3_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 3,
.cache = {
@@ -123,7 +124,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2,
.name = "L2",
.domains = domain_init(RDT_RESOURCE_L2),
- .msr_base = IA32_L2_CBM_BASE,
+ .msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 2,
.cache = {
@@ -140,7 +141,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2DATA,
.name = "L2DATA",
.domains = domain_init(RDT_RESOURCE_L2DATA),
- .msr_base = IA32_L2_CBM_BASE,
+ .msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 2,
.cache = {
@@ -157,7 +158,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_L2CODE,
.name = "L2CODE",
.domains = domain_init(RDT_RESOURCE_L2CODE),
- .msr_base = IA32_L2_CBM_BASE,
+ .msr_base = MSR_IA32_L2_CBM_BASE,
.msr_update = cat_wrmsr,
.cache_level = 2,
.cache = {
@@ -174,10 +175,7 @@ struct rdt_resource rdt_resources_all[] = {
.rid = RDT_RESOURCE_MBA,
.name = "MB",
.domains = domain_init(RDT_RESOURCE_MBA),
- .msr_base = IA32_MBA_THRTL_BASE,
- .msr_update = mba_wrmsr,
.cache_level = 3,
- .parse_ctrlval = parse_bw,
.format_str = "%d=%*u",
.fflags = RFTYPE_RES_MB,
},
@@ -211,9 +209,10 @@ static inline void cache_alloc_hsw_probe(void)
struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
u32 l, h, max_cbm = BIT_MASK(20) - 1;
- if (wrmsr_safe(IA32_L3_CBM_BASE, max_cbm, 0))
+ if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
return;
- rdmsr(IA32_L3_CBM_BASE, l, h);
+
+ rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
/* If all the bits were set in MSR, return success */
if (l != max_cbm)
@@ -259,7 +258,7 @@ static inline bool rdt_get_mb_table(struct rdt_resource *r)
return false;
}
-static bool rdt_get_mem_config(struct rdt_resource *r)
+static bool __get_mem_config_intel(struct rdt_resource *r)
{
union cpuid_0x10_3_eax eax;
union cpuid_0x10_x_edx edx;
@@ -285,6 +284,30 @@ static bool rdt_get_mem_config(struct rdt_resource *r)
return true;
}
+static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
+{
+ union cpuid_0x10_3_eax eax;
+ union cpuid_0x10_x_edx edx;
+ u32 ebx, ecx;
+
+ cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
+ r->num_closid = edx.split.cos_max + 1;
+ r->default_ctrl = MAX_MBA_BW_AMD;
+
+ /* AMD does not use delay */
+ r->membw.delay_linear = false;
+
+ r->membw.min_bw = 0;
+ r->membw.bw_gran = 1;
+ /* Max value is 2048, Data width should be 4 in decimal */
+ r->data_width = 4;
+
+ r->alloc_capable = true;
+ r->alloc_enabled = true;
+
+ return true;
+}
+
static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{
union cpuid_0x10_1_eax eax;
@@ -344,6 +367,15 @@ static int get_cache_id(int cpu, int level)
return -1;
}
+static void
+mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+{
+ unsigned int i;
+
+ for (i = m->low; i < m->high; i++)
+ wrmsrl(r->msr_base + i, d->ctrl_val[i]);
+}
+
/*
* Map the memory b/w percentage value to delay values
* that can be written to QOS_MSRs.
@@ -359,7 +391,8 @@ u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
}
static void
-mba_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
+mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
+ struct rdt_resource *r)
{
unsigned int i;
@@ -421,7 +454,7 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
struct list_head *l;
if (id < 0)
- return ERR_PTR(id);
+ return ERR_PTR(-ENODEV);
list_for_each(l, &r->domains) {
d = list_entry(l, struct rdt_domain, list);
@@ -639,7 +672,7 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r)
static void clear_closid_rmid(int cpu)
{
- struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+ struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
state->default_closid = 0;
state->default_rmid = 0;
@@ -648,7 +681,7 @@ static void clear_closid_rmid(int cpu)
wrmsr(IA32_PQR_ASSOC, 0, 0);
}
-static int intel_rdt_online_cpu(unsigned int cpu)
+static int resctrl_online_cpu(unsigned int cpu)
{
struct rdt_resource *r;
@@ -674,7 +707,7 @@ static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
}
}
-static int intel_rdt_offline_cpu(unsigned int cpu)
+static int resctrl_offline_cpu(unsigned int cpu)
{
struct rdtgroup *rdtgrp;
struct rdt_resource *r;
@@ -794,6 +827,19 @@ static bool __init rdt_cpu_has(int flag)
return ret;
}
+static __init bool get_mem_config(void)
+{
+ if (!rdt_cpu_has(X86_FEATURE_MBA))
+ return false;
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
+
+ return false;
+}
+
static __init bool get_rdt_alloc_resources(void)
{
bool ret = false;
@@ -818,10 +864,9 @@ static __init bool get_rdt_alloc_resources(void)
ret = true;
}
- if (rdt_cpu_has(X86_FEATURE_MBA)) {
- if (rdt_get_mem_config(&rdt_resources_all[RDT_RESOURCE_MBA]))
- ret = true;
- }
+ if (get_mem_config())
+ ret = true;
+
return ret;
}
@@ -840,7 +885,7 @@ static __init bool get_rdt_mon_resources(void)
return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
}
-static __init void rdt_quirks(void)
+static __init void __check_quirks_intel(void)
{
switch (boot_cpu_data.x86_model) {
case INTEL_FAM6_HASWELL_X:
@@ -855,30 +900,91 @@ static __init void rdt_quirks(void)
}
}
+static __init void check_quirks(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ __check_quirks_intel();
+}
+
static __init bool get_rdt_resources(void)
{
- rdt_quirks();
rdt_alloc_capable = get_rdt_alloc_resources();
rdt_mon_capable = get_rdt_mon_resources();
return (rdt_mon_capable || rdt_alloc_capable);
}
+static __init void rdt_init_res_defs_intel(void)
+{
+ struct rdt_resource *r;
+
+ for_each_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_L3 ||
+ r->rid == RDT_RESOURCE_L3DATA ||
+ r->rid == RDT_RESOURCE_L3CODE ||
+ r->rid == RDT_RESOURCE_L2 ||
+ r->rid == RDT_RESOURCE_L2DATA ||
+ r->rid == RDT_RESOURCE_L2CODE)
+ r->cbm_validate = cbm_validate_intel;
+ else if (r->rid == RDT_RESOURCE_MBA) {
+ r->msr_base = MSR_IA32_MBA_THRTL_BASE;
+ r->msr_update = mba_wrmsr_intel;
+ r->parse_ctrlval = parse_bw_intel;
+ }
+ }
+}
+
+static __init void rdt_init_res_defs_amd(void)
+{
+ struct rdt_resource *r;
+
+ for_each_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_L3 ||
+ r->rid == RDT_RESOURCE_L3DATA ||
+ r->rid == RDT_RESOURCE_L3CODE ||
+ r->rid == RDT_RESOURCE_L2 ||
+ r->rid == RDT_RESOURCE_L2DATA ||
+ r->rid == RDT_RESOURCE_L2CODE)
+ r->cbm_validate = cbm_validate_amd;
+ else if (r->rid == RDT_RESOURCE_MBA) {
+ r->msr_base = MSR_IA32_MBA_BW_BASE;
+ r->msr_update = mba_wrmsr_amd;
+ r->parse_ctrlval = parse_bw_amd;
+ }
+ }
+}
+
+static __init void rdt_init_res_defs(void)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ rdt_init_res_defs_intel();
+ else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ rdt_init_res_defs_amd();
+}
+
static enum cpuhp_state rdt_online;
-static int __init intel_rdt_late_init(void)
+static int __init resctrl_late_init(void)
{
struct rdt_resource *r;
int state, ret;
+ /*
+ * Initialize functions(or definitions) that are different
+ * between vendors here.
+ */
+ rdt_init_res_defs();
+
+ check_quirks();
+
if (!get_rdt_resources())
return -ENODEV;
rdt_init_padding();
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
- "x86/rdt/cat:online:",
- intel_rdt_online_cpu, intel_rdt_offline_cpu);
+ "x86/resctrl/cat:online:",
+ resctrl_online_cpu, resctrl_offline_cpu);
if (state < 0)
return state;
@@ -890,20 +996,20 @@ static int __init intel_rdt_late_init(void)
rdt_online = state;
for_each_alloc_capable_rdt_resource(r)
- pr_info("Intel RDT %s allocation detected\n", r->name);
+ pr_info("%s allocation detected\n", r->name);
for_each_mon_capable_rdt_resource(r)
- pr_info("Intel RDT %s monitoring detected\n", r->name);
+ pr_info("%s monitoring detected\n", r->name);
return 0;
}
-late_initcall(intel_rdt_late_init);
+late_initcall(resctrl_late_init);
-static void __exit intel_rdt_exit(void)
+static void __exit resctrl_exit(void)
{
cpuhp_remove_state(rdt_online);
rdtgroup_exit();
}
-__exitcall(intel_rdt_exit);
+__exitcall(resctrl_exit);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
index 27937458c231..2dbd990a2eb7 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
@@ -23,10 +23,58 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/kernfs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include "intel_rdt.h"
+#include "internal.h"
+
+/*
+ * Check whether MBA bandwidth percentage value is correct. The value is
+ * checked against the minimum and maximum bandwidth values specified by
+ * the hardware. The allocated bandwidth percentage is rounded to the next
+ * control step available on the hardware.
+ */
+static bool bw_validate_amd(char *buf, unsigned long *data,
+ struct rdt_resource *r)
+{
+ unsigned long bw;
+ int ret;
+
+ ret = kstrtoul(buf, 10, &bw);
+ if (ret) {
+ rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
+ return false;
+ }
+
+ if (bw < r->membw.min_bw || bw > r->default_ctrl) {
+ rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
+ r->membw.min_bw, r->default_ctrl);
+ return false;
+ }
+
+ *data = roundup(bw, (unsigned long)r->membw.bw_gran);
+ return true;
+}
+
+int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
+{
+ unsigned long bw_val;
+
+ if (d->have_new_ctrl) {
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
+ return -EINVAL;
+ }
+
+ if (!bw_validate_amd(data->buf, &bw_val, r))
+ return -EINVAL;
+
+ d->new_ctrl = bw_val;
+ d->have_new_ctrl = true;
+
+ return 0;
+}
/*
* Check whether MBA bandwidth percentage value is correct. The value is
@@ -64,13 +112,13 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
- struct rdt_domain *d)
+int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
unsigned long bw_val;
if (d->have_new_ctrl) {
- rdt_last_cmd_printf("duplicate domain %d\n", d->id);
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
@@ -88,7 +136,7 @@ int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
* are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
* Additionally Haswell requires at least two bits set.
*/
-static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
+bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r)
{
unsigned long first_bit, zero_bit, val;
unsigned int cbm_len = r->cache.cbm_len;
@@ -96,12 +144,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
ret = kstrtoul(buf, 16, &val);
if (ret) {
- rdt_last_cmd_printf("non-hex character in mask %s\n", buf);
+ rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
return false;
}
if (val == 0 || val > r->default_ctrl) {
- rdt_last_cmd_puts("mask out of range\n");
+ rdt_last_cmd_puts("Mask out of range\n");
return false;
}
@@ -109,12 +157,12 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
if (find_next_bit(&val, cbm_len, zero_bit) < cbm_len) {
- rdt_last_cmd_printf("mask %lx has non-consecutive 1-bits\n", val);
+ rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
return false;
}
if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("Need at least %d bits in mask\n",
+ rdt_last_cmd_printf("Need at least %d bits in the mask\n",
r->cache.min_cbm_bits);
return false;
}
@@ -124,6 +172,30 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
}
/*
+ * Check whether a cache bit mask is valid. AMD allows non-contiguous
+ * bitmasks
+ */
+bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul(buf, 16, &val);
+ if (ret) {
+ rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
+ return false;
+ }
+
+ if (val > r->default_ctrl) {
+ rdt_last_cmd_puts("Mask out of range\n");
+ return false;
+ }
+
+ *data = val;
+ return true;
+}
+
+/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
@@ -134,7 +206,7 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
u32 cbm_val;
if (d->have_new_ctrl) {
- rdt_last_cmd_printf("duplicate domain %d\n", d->id);
+ rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
return -EINVAL;
}
@@ -144,17 +216,17 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
*/
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
rdtgroup_pseudo_locked_in_hierarchy(d)) {
- rdt_last_cmd_printf("pseudo-locked region in hierarchy\n");
+ rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
return -EINVAL;
}
- if (!cbm_validate(data->buf, &cbm_val, r))
+ if (!r->cbm_validate(data->buf, &cbm_val, r))
return -EINVAL;
if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_SHAREABLE) &&
rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
- rdt_last_cmd_printf("CBM overlaps with pseudo-locked region\n");
+ rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
return -EINVAL;
}
@@ -163,14 +235,14 @@ int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
* either is exclusive.
*/
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, true)) {
- rdt_last_cmd_printf("overlaps with exclusive group\n");
+ rdt_last_cmd_puts("Overlaps with exclusive group\n");
return -EINVAL;
}
if (rdtgroup_cbm_overlaps(r, d, cbm_val, rdtgrp->closid, false)) {
if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
- rdt_last_cmd_printf("overlaps with other group\n");
+ rdt_last_cmd_puts("Overlaps with other group\n");
return -EINVAL;
}
}
@@ -292,7 +364,7 @@ static int rdtgroup_parse_resource(char *resname, char *tok,
if (!strcmp(resname, r->name) && rdtgrp->closid < r->num_closid)
return parse_line(tok, r, rdtgrp);
}
- rdt_last_cmd_printf("unknown/unsupported resource name '%s'\n", resname);
+ rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
return -EINVAL;
}
@@ -310,9 +382,11 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
return -EINVAL;
buf[nbytes - 1] = '\0';
+ cpus_read_lock();
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) {
rdtgroup_kn_unlock(of->kn);
+ cpus_read_unlock();
return -ENOENT;
}
rdt_last_cmd_clear();
@@ -323,7 +397,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
*/
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
ret = -EINVAL;
- rdt_last_cmd_puts("resource group is pseudo-locked\n");
+ rdt_last_cmd_puts("Resource group is pseudo-locked\n");
goto out;
}
@@ -367,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
out:
rdtgroup_kn_unlock(of->kn);
+ cpus_read_unlock();
return ret ?: nbytes;
}
@@ -463,7 +538,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg)
r = &rdt_resources_all[resid];
d = rdt_find_domain(r, domid, NULL);
- if (!d) {
+ if (IS_ERR_OR_NULL(d)) {
ret = -ENOENT;
goto out;
}
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/resctrl/internal.h
index 3736f6dc9545..822b7db634ee 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -1,20 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_INTEL_RDT_H
-#define _ASM_X86_INTEL_RDT_H
+#ifndef _ASM_X86_RESCTRL_INTERNAL_H
+#define _ASM_X86_RESCTRL_INTERNAL_H
#include <linux/sched.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
-#define IA32_L3_QOS_CFG 0xc81
-#define IA32_L2_QOS_CFG 0xc82
-#define IA32_L3_CBM_BASE 0xc90
-#define IA32_L2_CBM_BASE 0xd10
-#define IA32_MBA_THRTL_BASE 0xd50
+#define MSR_IA32_L3_QOS_CFG 0xc81
+#define MSR_IA32_L2_QOS_CFG 0xc82
+#define MSR_IA32_L3_CBM_BASE 0xc90
+#define MSR_IA32_L2_CBM_BASE 0xd10
+#define MSR_IA32_MBA_THRTL_BASE 0xd50
+#define MSR_IA32_MBA_BW_BASE 0xc0000200
-#define L3_QOS_CDP_ENABLE 0x01ULL
+#define MSR_IA32_QM_CTR 0x0c8e
+#define MSR_IA32_QM_EVTSEL 0x0c8d
-#define L2_QOS_CDP_ENABLE 0x01ULL
+#define L3_QOS_CDP_ENABLE 0x01ULL
+
+#define L2_QOS_CDP_ENABLE 0x01ULL
/*
* Event IDs are used to program IA32_QM_EVTSEL before reading event
@@ -29,6 +33,9 @@
#define MBM_CNTR_WIDTH 24
#define MBM_OVERFLOW_INTERVAL 1000
#define MAX_MBA_BW 100u
+#define MBA_IS_LINEAR 0x4
+#define MBA_MAX_MBPS U32_MAX
+#define MAX_MBA_BW_AMD 0x800
#define RMID_VAL_ERROR BIT_ULL(63)
#define RMID_VAL_UNAVAIL BIT_ULL(62)
@@ -69,7 +76,7 @@ struct rmid_read {
u64 val;
};
-extern unsigned int intel_cqm_threshold;
+extern unsigned int resctrl_cqm_threshold;
extern bool rdt_alloc_capable;
extern bool rdt_mon_capable;
extern unsigned int rdt_mon_features;
@@ -391,9 +398,9 @@ struct rdt_parse_data {
* struct rdt_resource - attributes of an RDT resource
* @rid: The index of the resource
* @alloc_enabled: Is allocation enabled on this machine
- * @mon_enabled: Is monitoring enabled for this feature
+ * @mon_enabled: Is monitoring enabled for this feature
* @alloc_capable: Is allocation available on this machine
- * @mon_capable: Is monitor feature available on this machine
+ * @mon_capable: Is monitor feature available on this machine
* @name: Name to use in "schemata" file
* @num_closid: Number of CLOSIDs available
* @cache_level: Which cache level defines scope of this resource
@@ -405,10 +412,11 @@ struct rdt_parse_data {
* @cache: Cache allocation related data
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
- * @evt_list: List of monitoring events
- * @num_rmid: Number of RMIDs available
- * @mon_scale: cqm counter * mon_scale = occupancy in bytes
- * @fflags: flags to choose base and info files
+ * @cbm_validate Cache bitmask validate function
+ * @evt_list: List of monitoring events
+ * @num_rmid: Number of RMIDs available
+ * @mon_scale: cqm counter * mon_scale = occupancy in bytes
+ * @fflags: flags to choose base and info files
*/
struct rdt_resource {
int rid;
@@ -431,6 +439,7 @@ struct rdt_resource {
int (*parse_ctrlval)(struct rdt_parse_data *data,
struct rdt_resource *r,
struct rdt_domain *d);
+ bool (*cbm_validate)(char *buf, u32 *data, struct rdt_resource *r);
struct list_head evt_list;
int num_rmid;
unsigned int mon_scale;
@@ -439,8 +448,10 @@ struct rdt_resource {
int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
struct rdt_domain *d);
-int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
- struct rdt_domain *d);
+int parse_bw_intel(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
+int parse_bw_amd(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -463,6 +474,10 @@ enum {
RDT_NUM_RESOURCES,
};
+#define for_each_rdt_resource(r) \
+ for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
+ r++)
+
#define for_each_capable_rdt_resource(r) \
for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
r++) \
@@ -567,5 +582,7 @@ void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms);
void cqm_handle_limbo(struct work_struct *work);
bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d);
void __check_limbo(struct rdt_domain *d, bool force_free);
+bool cbm_validate_intel(char *buf, u32 *data, struct rdt_resource *r);
+bool cbm_validate_amd(char *buf, u32 *data, struct rdt_resource *r);
-#endif /* _ASM_X86_INTEL_RDT_H */
+#endif /* _ASM_X86_RESCTRL_INTERNAL_H */
diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index b0f3aed76b75..f33f11f69078 100644
--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -26,10 +26,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
-#include "intel_rdt.h"
-
-#define MSR_IA32_QM_CTR 0x0c8e
-#define MSR_IA32_QM_EVTSEL 0x0c8d
+#include "internal.h"
struct rmid_entry {
u32 rmid;
@@ -73,7 +70,7 @@ unsigned int rdt_mon_features;
* This is the threshold cache occupancy at which we will consider an
* RMID available for re-allocation.
*/
-unsigned int intel_cqm_threshold;
+unsigned int resctrl_cqm_threshold;
static inline struct rmid_entry *__rmid_entry(u32 rmid)
{
@@ -107,7 +104,7 @@ static bool rmid_dirty(struct rmid_entry *entry)
{
u64 val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
- return val >= intel_cqm_threshold;
+ return val >= resctrl_cqm_threshold;
}
/*
@@ -187,7 +184,7 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
list_for_each_entry(d, &r->domains, list) {
if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
val = __rmid_read(entry->rmid, QOS_L3_OCCUP_EVENT_ID);
- if (val <= intel_cqm_threshold)
+ if (val <= resctrl_cqm_threshold)
continue;
}
@@ -625,6 +622,7 @@ static void l3_mon_evt_init(struct rdt_resource *r)
int rdt_get_mon_l3_config(struct rdt_resource *r)
{
+ unsigned int cl_size = boot_cpu_data.x86_cache_size;
int ret;
r->mon_scale = boot_cpu_data.x86_cache_occ_scale;
@@ -637,10 +635,10 @@ int rdt_get_mon_l3_config(struct rdt_resource *r)
*
* For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
*/
- intel_cqm_threshold = boot_cpu_data.x86_cache_size * 1024 / r->num_rmid;
+ resctrl_cqm_threshold = cl_size * 1024 / r->num_rmid;
/* h/w works in units of "boot_cpu_data.x86_cache_occ_scale" */
- intel_cqm_threshold /= r->mon_scale;
+ resctrl_cqm_threshold /= r->mon_scale;
ret = dom_data_init(r);
if (ret)
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 815b4e92522c..14bed6af8377 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -24,14 +24,14 @@
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
-#include <asm/intel_rdt_sched.h>
+#include <asm/resctrl_sched.h>
#include <asm/perf_event.h>
#include "../../events/perf_event.h" /* For X86_CONFIG() */
-#include "intel_rdt.h"
+#include "internal.h"
#define CREATE_TRACE_POINTS
-#include "intel_rdt_pseudo_lock_event.h"
+#include "pseudo_lock_event.h"
/*
* MSR_MISC_FEATURE_CONTROL register enables the modification of hardware
@@ -213,7 +213,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
for_each_cpu(cpu, &plr->d->cpu_mask) {
pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL);
if (!pm_req) {
- rdt_last_cmd_puts("fail allocating mem for PM QoS\n");
+ rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n");
ret = -ENOMEM;
goto out_err;
}
@@ -222,7 +222,7 @@ static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
DEV_PM_QOS_RESUME_LATENCY,
30);
if (ret < 0) {
- rdt_last_cmd_printf("fail to add latency req cpu%d\n",
+ rdt_last_cmd_printf("Failed to add latency req CPU%d\n",
cpu);
kfree(pm_req);
ret = -1;
@@ -289,7 +289,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
plr->cpu = cpumask_first(&plr->d->cpu_mask);
if (!cpu_online(plr->cpu)) {
- rdt_last_cmd_printf("cpu %u associated with cache not online\n",
+ rdt_last_cmd_printf("CPU %u associated with cache not online\n",
plr->cpu);
ret = -ENODEV;
goto out_region;
@@ -307,7 +307,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
}
ret = -1;
- rdt_last_cmd_puts("unable to determine cache line size\n");
+ rdt_last_cmd_puts("Unable to determine cache line size\n");
out_region:
pseudo_lock_region_clear(plr);
return ret;
@@ -361,14 +361,14 @@ static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
* KMALLOC_MAX_SIZE.
*/
if (plr->size > KMALLOC_MAX_SIZE) {
- rdt_last_cmd_puts("requested region exceeds maximum size\n");
+ rdt_last_cmd_puts("Requested region exceeds maximum size\n");
ret = -E2BIG;
goto out_region;
}
plr->kmem = kzalloc(plr->size, GFP_KERNEL);
if (!plr->kmem) {
- rdt_last_cmd_puts("unable to allocate memory\n");
+ rdt_last_cmd_puts("Unable to allocate memory\n");
ret = -ENOMEM;
goto out_region;
}
@@ -665,7 +665,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
* default closid associated with it.
*/
if (rdtgrp == &rdtgroup_default) {
- rdt_last_cmd_puts("cannot pseudo-lock default group\n");
+ rdt_last_cmd_puts("Cannot pseudo-lock default group\n");
return -EINVAL;
}
@@ -707,17 +707,17 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
*/
prefetch_disable_bits = get_prefetch_disable_bits();
if (prefetch_disable_bits == 0) {
- rdt_last_cmd_puts("pseudo-locking not supported\n");
+ rdt_last_cmd_puts("Pseudo-locking not supported\n");
return -EINVAL;
}
if (rdtgroup_monitor_in_progress(rdtgrp)) {
- rdt_last_cmd_puts("monitoring in progress\n");
+ rdt_last_cmd_puts("Monitoring in progress\n");
return -EINVAL;
}
if (rdtgroup_tasks_assigned(rdtgrp)) {
- rdt_last_cmd_puts("tasks assigned to resource group\n");
+ rdt_last_cmd_puts("Tasks assigned to resource group\n");
return -EINVAL;
}
@@ -727,13 +727,13 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
}
if (rdtgroup_locksetup_user_restrict(rdtgrp)) {
- rdt_last_cmd_puts("unable to modify resctrl permissions\n");
+ rdt_last_cmd_puts("Unable to modify resctrl permissions\n");
return -EIO;
}
ret = pseudo_lock_init(rdtgrp);
if (ret) {
- rdt_last_cmd_puts("unable to init pseudo-lock region\n");
+ rdt_last_cmd_puts("Unable to init pseudo-lock region\n");
goto out_release;
}
@@ -770,7 +770,7 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
if (rdt_mon_capable) {
ret = alloc_rmid();
if (ret < 0) {
- rdt_last_cmd_puts("out of RMIDs\n");
+ rdt_last_cmd_puts("Out of RMIDs\n");
return ret;
}
rdtgrp->mon.rmid = ret;
@@ -1304,7 +1304,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
"pseudo_lock/%u", plr->cpu);
if (IS_ERR(thread)) {
ret = PTR_ERR(thread);
- rdt_last_cmd_printf("locking thread returned error %d\n", ret);
+ rdt_last_cmd_printf("Locking thread returned error %d\n", ret);
goto out_cstates;
}
@@ -1322,13 +1322,13 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* the cleared, but not freed, plr struct resulting in an
* empty pseudo-locking loop.
*/
- rdt_last_cmd_puts("locking thread interrupted\n");
+ rdt_last_cmd_puts("Locking thread interrupted\n");
goto out_cstates;
}
ret = pseudo_lock_minor_get(&new_minor);
if (ret < 0) {
- rdt_last_cmd_puts("unable to obtain a new minor number\n");
+ rdt_last_cmd_puts("Unable to obtain a new minor number\n");
goto out_cstates;
}
@@ -1360,7 +1360,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
if (IS_ERR(dev)) {
ret = PTR_ERR(dev);
- rdt_last_cmd_printf("failed to create character device: %d\n",
+ rdt_last_cmd_printf("Failed to create character device: %d\n",
ret);
goto out_debugfs;
}
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h
index 2c041e6d9f05..428ebbd4270b 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock_event.h
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock_event.h
@@ -39,5 +39,5 @@ TRACE_EVENT(pseudo_lock_l3,
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE intel_rdt_pseudo_lock_event
+#define TRACE_INCLUDE_FILE pseudo_lock_event
#include <trace/define_trace.h>
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index f27b8115ffa2..8388adf241b2 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -35,8 +35,8 @@
#include <uapi/linux/magic.h>
-#include <asm/intel_rdt_sched.h>
-#include "intel_rdt.h"
+#include <asm/resctrl_sched.h>
+#include "internal.h"
DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
@@ -298,7 +298,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
}
/*
- * This is safe against intel_rdt_sched_in() called from __switch_to()
+ * This is safe against resctrl_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
* from update_closid_rmid() is proteced against __switch_to() because
* preemption is disabled.
@@ -317,7 +317,7 @@ static void update_cpu_closid_rmid(void *info)
* executing task might have its own closid selected. Just reuse
* the context switch code.
*/
- intel_rdt_sched_in();
+ resctrl_sched_in();
}
/*
@@ -345,7 +345,7 @@ static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
/* Check whether cpus belong to parent ctrl group */
cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
if (cpumask_weight(tmpmask)) {
- rdt_last_cmd_puts("can only add CPUs to mongroup that belong to parent\n");
+ rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
return -EINVAL;
}
@@ -470,14 +470,14 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
rdt_last_cmd_clear();
if (!rdtgrp) {
ret = -ENOENT;
- rdt_last_cmd_puts("directory was removed\n");
+ rdt_last_cmd_puts("Directory was removed\n");
goto unlock;
}
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL;
- rdt_last_cmd_puts("pseudo-locking in progress\n");
+ rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock;
}
@@ -487,7 +487,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
ret = cpumask_parse(buf, newmask);
if (ret) {
- rdt_last_cmd_puts("bad cpu list/mask\n");
+ rdt_last_cmd_puts("Bad CPU list/mask\n");
goto unlock;
}
@@ -495,7 +495,7 @@ static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
cpumask_andnot(tmpmask, newmask, cpu_online_mask);
if (cpumask_weight(tmpmask)) {
ret = -EINVAL;
- rdt_last_cmd_puts("can only assign online cpus\n");
+ rdt_last_cmd_puts("Can only assign online CPUs\n");
goto unlock;
}
@@ -542,7 +542,7 @@ static void move_myself(struct callback_head *head)
preempt_disable();
/* update PQR_ASSOC MSR to make resource group go into effect */
- intel_rdt_sched_in();
+ resctrl_sched_in();
preempt_enable();
kfree(callback);
@@ -574,7 +574,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
*/
atomic_dec(&rdtgrp->waitcount);
kfree(callback);
- rdt_last_cmd_puts("task exited\n");
+ rdt_last_cmd_puts("Task exited\n");
} else {
/*
* For ctrl_mon groups move both closid and rmid.
@@ -692,7 +692,7 @@ static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
ret = -EINVAL;
- rdt_last_cmd_puts("pseudo-locking in progress\n");
+ rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto unlock;
}
@@ -926,7 +926,7 @@ static int max_threshold_occ_show(struct kernfs_open_file *of,
{
struct rdt_resource *r = of->kn->parent->priv;
- seq_printf(seq, "%u\n", intel_cqm_threshold * r->mon_scale);
+ seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
return 0;
}
@@ -945,7 +945,7 @@ static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
if (bytes > (boot_cpu_data.x86_cache_size * 1024))
return -EINVAL;
- intel_cqm_threshold = bytes / r->mon_scale;
+ resctrl_cqm_threshold = bytes / r->mon_scale;
return nbytes;
}
@@ -1029,7 +1029,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
* peer RDT CDP resource. Hence the WARN.
*/
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
- if (WARN_ON(!_d_cdp)) {
+ if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
_r_cdp = NULL;
ret = -EINVAL;
}
@@ -1158,14 +1158,14 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
rdtgrp->closid, false)) {
- rdt_last_cmd_puts("schemata overlaps\n");
+ rdt_last_cmd_puts("Schemata overlaps\n");
return false;
}
}
}
if (!has_cache) {
- rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
+ rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
return false;
}
@@ -1206,7 +1206,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out;
if (mode == RDT_MODE_PSEUDO_LOCKED) {
- rdt_last_cmd_printf("cannot change pseudo-locked group\n");
+ rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
ret = -EINVAL;
goto out;
}
@@ -1235,7 +1235,7 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
goto out;
rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
} else {
- rdt_last_cmd_printf("unknown/unsupported mode\n");
+ rdt_last_cmd_puts("Unknown or unsupported mode\n");
ret = -EINVAL;
}
@@ -1722,14 +1722,14 @@ static void l3_qos_cfg_update(void *arg)
{
bool *enable = arg;
- wrmsrl(IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
+ wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
}
static void l2_qos_cfg_update(void *arg)
{
bool *enable = arg;
- wrmsrl(IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
+ wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
}
static inline bool is_mba_linear(void)
@@ -1878,7 +1878,10 @@ static int parse_rdtgroupfs_options(char *data)
if (ret)
goto out;
} else if (!strcmp(token, "mba_MBps")) {
- ret = set_mba_sc(true);
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ ret = set_mba_sc(true);
+ else
+ ret = -EINVAL;
if (ret)
goto out;
} else {
@@ -2540,7 +2543,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
tmp_cbm = d->new_ctrl;
if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
r->cache.min_cbm_bits) {
- rdt_last_cmd_printf("no space on %s:%d\n",
+ rdt_last_cmd_printf("No space on %s:%d\n",
r->name, d->id);
return -ENOSPC;
}
@@ -2557,7 +2560,7 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
- rdt_last_cmd_puts("failed to initialize allocations\n");
+ rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret;
}
rdtgrp->mode = RDT_MODE_SHAREABLE;
@@ -2580,7 +2583,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdt_last_cmd_clear();
if (!prdtgrp) {
ret = -ENODEV;
- rdt_last_cmd_puts("directory was removed\n");
+ rdt_last_cmd_puts("Directory was removed\n");
goto out_unlock;
}
@@ -2588,7 +2591,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
(prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
ret = -EINVAL;
- rdt_last_cmd_puts("pseudo-locking in progress\n");
+ rdt_last_cmd_puts("Pseudo-locking in progress\n");
goto out_unlock;
}
@@ -2596,7 +2599,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
if (!rdtgrp) {
ret = -ENOSPC;
- rdt_last_cmd_puts("kernel out of memory\n");
+ rdt_last_cmd_puts("Kernel out of memory\n");
goto out_unlock;
}
*r = rdtgrp;
@@ -2637,7 +2640,7 @@ static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
if (rdt_mon_capable) {
ret = alloc_rmid();
if (ret < 0) {
- rdt_last_cmd_puts("out of RMIDs\n");
+ rdt_last_cmd_puts("Out of RMIDs\n");
goto out_destroy;
}
rdtgrp->mon.rmid = ret;
@@ -2725,7 +2728,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
kn = rdtgrp->kn;
ret = closid_alloc();
if (ret < 0) {
- rdt_last_cmd_puts("out of CLOSIDs\n");
+ rdt_last_cmd_puts("Out of CLOSIDs\n");
goto out_common_fail;
}
closid = ret;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 772c219b6889..94aa1c72ca98 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -5,9 +5,10 @@
#include <linux/cpu.h>
#include <asm/pat.h>
+#include <asm/apic.h>
#include <asm/processor.h>
-#include <asm/apic.h>
+#include "cpu.h"
struct cpuid_bit {
u16 feature;
@@ -17,7 +18,11 @@ struct cpuid_bit {
u32 sub_leaf;
};
-/* Please keep the leaf sorted by cpuid_bit.level for faster search. */
+/*
+ * Please keep the leaf sorted by cpuid_bit.level for faster search.
+ * X86_FEATURE_MBA is supported by both Intel and AMD. But the CPUID
+ * levels are different and there is a separate entry for each.
+ */
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
@@ -29,6 +34,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
{ X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
{ 0, 0, 0, 0, 0 }
@@ -56,27 +62,3 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
set_cpu_cap(c, cb->feature);
}
}
-
-u32 get_scattered_cpuid_leaf(unsigned int level, unsigned int sub_leaf,
- enum cpuid_regs_idx reg)
-{
- const struct cpuid_bit *cb;
- u32 cpuid_val = 0;
-
- for (cb = cpuid_bits; cb->feature; cb++) {
-
- if (level > cb->level)
- continue;
-
- if (level < cb->level)
- break;
-
- if (reg == cb->reg && sub_leaf == cb->sub_leaf) {
- if (cpu_has(&boot_cpu_data, cb->feature))
- cpuid_val |= BIT(cb->bit);
- }
- }
-
- return cpuid_val;
-}
-EXPORT_SYMBOL_GPL(get_scattered_cpuid_leaf);
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 71ca064e3794..8f6c784141d1 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -10,6 +10,8 @@
#include <asm/pat.h>
#include <asm/processor.h>
+#include "cpu.h"
+
/* leaf 0xb SMT level */
#define SMT_LEVEL 0
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index f631a3f15587..c8b07d8ea5a2 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -37,6 +37,7 @@
#include <asm/reboot.h>
#include <asm/virtext.h>
#include <asm/intel_pt.h>
+#include <asm/crash.h>
/* Used while preparing memory map entries for second kernel */
struct crash_memmap_data {
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index eb8ab3915268..22369dd5de3b 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
/**
* copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
- * memory with the encryption mask set to accomodate kdump on SME-enabled
+ * memory with the encryption mask set to accommodate kdump on SME-enabled
* machines.
*/
ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 7299dcbf8e85..8d85e00bb40a 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -23,6 +23,7 @@
#include <asm/pci_x86.h>
#include <asm/setup.h>
#include <asm/i8259.h>
+#include <asm/prom.h>
__initdata u64 initial_dtb;
char __initdata cmd_line[COMMAND_LINE_SIZE];
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 2ea85b32421a..2e5003fef51a 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -93,7 +93,7 @@ bool irq_fpu_usable(void)
}
EXPORT_SYMBOL(irq_fpu_usable);
-void __kernel_fpu_begin(void)
+static void __kernel_fpu_begin(void)
{
struct fpu *fpu = &current->thread.fpu;
@@ -111,9 +111,8 @@ void __kernel_fpu_begin(void)
__cpu_invalidate_fpregs_state();
}
}
-EXPORT_SYMBOL(__kernel_fpu_begin);
-void __kernel_fpu_end(void)
+static void __kernel_fpu_end(void)
{
struct fpu *fpu = &current->thread.fpu;
@@ -122,7 +121,6 @@ void __kernel_fpu_end(void)
kernel_fpu_enable();
}
-EXPORT_SYMBOL(__kernel_fpu_end);
void kernel_fpu_begin(void)
{
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 61a949d84dfa..d99a8ee9e185 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
}
+ local_bh_disable();
fpu->initialized = 1;
- preempt_disable();
fpu__restore(fpu);
- preempt_enable();
+ local_bh_enable();
return err;
} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 87a57b7642d3..9cc108456d0b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -444,7 +444,7 @@ static int xfeature_uncompacted_offset(int xfeature_nr)
* format. Checking a supervisor state's uncompacted offset is
* an error.
*/
- if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) {
+ if (XFEATURE_MASK_SUPERVISOR & BIT_ULL(xfeature_nr)) {
WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
return -1;
}
@@ -808,10 +808,8 @@ void fpu__resume_cpu(void)
* Given an xstate feature mask, calculate where in the xsave
* buffer the state is. Callers should ensure that the buffer
* is valid.
- *
- * Note: does not work for compacted buffers.
*/
-void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
+static void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
{
int feature_nr = fls64(xstate_feature_mask) - 1;
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 01ebcb6f263e..7ee8067cbf45 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
{
unsigned long old;
int faulted;
- struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
@@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
return;
}
- trace.func = self_addr;
- trace.depth = current->curr_ret_stack + 1;
-
- /* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace)) {
+ if (function_graph_enter(old, self_addr, frame_pointer, parent))
*parent = old;
- return;
- }
-
- if (ftrace_push_return_trace(old, self_addr, &trace.depth,
- frame_pointer, parent) == -EBUSY) {
- *parent = old;
- return;
- }
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 76fa3b836598..ec6fefbfd3c0 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -37,7 +37,6 @@ asmlinkage __visible void __init i386_start_kernel(void)
cr4_init_shadow();
sanitize_boot_params(&boot_params);
- x86_verify_bootdata_version();
x86_early_init_platform_quirks();
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 7663a8eb602b..16b1cbd3a61e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -457,8 +457,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
if (!boot_params.hdr.version)
copy_bootdata(__va(real_mode_data));
- x86_verify_bootdata_version();
-
x86_early_init_platform_quirks();
switch (boot_params.hdr.hardware_subarch) {
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 747c758f67b7..d1dbe8e4eb82 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -386,7 +386,7 @@ NEXT_PAGE(early_dynamic_pgts)
.data
-#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH)
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
NEXT_PGD_PAGE(init_top_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
.org init_top_pgt + L4_PAGE_OFFSET*8, 0
diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c
index 108c48d0d40e..1b2ee55a2dfb 100644
--- a/arch/x86/kernel/jailhouse.c
+++ b/arch/x86/kernel/jailhouse.c
@@ -19,6 +19,7 @@
#include <asm/pci_x86.h>
#include <asm/reboot.h>
#include <asm/setup.h>
+#include <asm/jailhouse_para.h>
static __initdata struct jailhouse_setup_data setup_data;
static unsigned int precalibrated_tsc_khz;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index c33b06f5faa4..4ba75afba527 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -751,7 +751,7 @@ STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
/*
* Called from kretprobe_trampoline
*/
-__visible __used void *trampoline_handler(struct pt_regs *regs)
+static __used void *trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
@@ -1026,12 +1026,10 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
-bool arch_within_kprobe_blacklist(unsigned long addr)
+int __init arch_populate_kprobe_blacklist(void)
{
- return (addr >= (unsigned long)__kprobes_text_start &&
- addr < (unsigned long)__kprobes_text_end) ||
- (addr >= (unsigned long)__entry_text_start &&
- addr < (unsigned long)__entry_text_end);
+ return kprobe_add_area_blacklist((unsigned long)__entry_text_start,
+ (unsigned long)__entry_text_end);
}
int __init arch_init_kprobes(void)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 40b16b270656..6adf6e6c2933 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
int len = 0, ret;
while (len < RELATIVEJUMP_SIZE) {
- ret = __copy_instruction(dest + len, src + len, real, &insn);
+ ret = __copy_instruction(dest + len, src + len, real + len, &insn);
if (!ret || !can_boost(&insn, src + len))
return -EINVAL;
len += ret;
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 30084ecaa20f..e811d4d1c824 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/* KVM paravirtual clock driver. A clocksource implementation
Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/clocksource.h>
diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S
deleted file mode 100644
index 161c95059044..000000000000
--- a/arch/x86/kernel/macros.S
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file includes headers whose assembly part includes macros which are
- * commonly used. The macros are precompiled into assmebly file which is later
- * assembled together with each compiled file.
- */
-
-#include <linux/compiler.h>
-#include <asm/refcount.h>
-#include <asm/alternative-asm.h>
-#include <asm/bug.h>
-#include <asm/paravirt.h>
-#include <asm/asm.h>
-#include <asm/cpufeature.h>
-#include <asm/jump_label.h>
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c93fcfdf1673..90ae0ca51083 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -22,6 +22,8 @@
#include <linux/utsname.h>
#include <linux/stackprotector.h>
#include <linux/cpuidle.h>
+#include <linux/acpi.h>
+#include <linux/elf-randomize.h>
#include <trace/events/power.h>
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
@@ -39,6 +41,9 @@
#include <asm/desc.h>
#include <asm/prctl.h>
#include <asm/spec-ctrl.h>
+#include <asm/proto.h>
+
+#include "process.h"
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -252,11 +257,12 @@ void arch_setup_new_exec(void)
enable_cpuid();
}
-static inline void switch_to_bitmap(struct tss_struct *tss,
- struct thread_struct *prev,
+static inline void switch_to_bitmap(struct thread_struct *prev,
struct thread_struct *next,
unsigned long tifp, unsigned long tifn)
{
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+
if (tifn & _TIF_IO_BITMAP) {
/*
* Copy the relevant range of the IO bitmap.
@@ -395,32 +401,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
}
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
+/*
+ * Update the MSRs managing speculation control, during context switch.
+ *
+ * tifp: Previous task's thread flags
+ * tifn: Next task's thread flags
+ */
+static __always_inline void __speculation_ctrl_update(unsigned long tifp,
+ unsigned long tifn)
{
- u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+ unsigned long tif_diff = tifp ^ tifn;
+ u64 msr = x86_spec_ctrl_base;
+ bool updmsr = false;
+
+ /*
+ * If TIF_SSBD is different, select the proper mitigation
+ * method. Note that if SSBD mitigation is disabled or permanentely
+ * enabled this branch can't be taken because nothing can set
+ * TIF_SSBD.
+ */
+ if (tif_diff & _TIF_SSBD) {
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+ amd_set_ssb_virt_state(tifn);
+ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+ amd_set_core_ssb_state(tifn);
+ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+ msr |= ssbd_tif_to_spec_ctrl(tifn);
+ updmsr = true;
+ }
+ }
+
+ /*
+ * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+ * otherwise avoid the MSR write.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ static_branch_unlikely(&switch_to_cond_stibp)) {
+ updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+ msr |= stibp_tif_to_spec_ctrl(tifn);
+ }
- wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+ if (updmsr)
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
}
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
{
- if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
- amd_set_ssb_virt_state(tifn);
- else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
- amd_set_core_ssb_state(tifn);
- else
- intel_set_ssb_state(tifn);
+ if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
+ if (task_spec_ssb_disable(tsk))
+ set_tsk_thread_flag(tsk, TIF_SSBD);
+ else
+ clear_tsk_thread_flag(tsk, TIF_SSBD);
+
+ if (task_spec_ib_disable(tsk))
+ set_tsk_thread_flag(tsk, TIF_SPEC_IB);
+ else
+ clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
+ }
+ /* Return the updated threadinfo flags*/
+ return task_thread_info(tsk)->flags;
}
-void speculative_store_bypass_update(unsigned long tif)
+void speculation_ctrl_update(unsigned long tif)
{
+ /* Forced update. Make sure all relevant TIF flags are different */
preempt_disable();
- __speculative_store_bypass_update(tif);
+ __speculation_ctrl_update(~tif, tif);
preempt_enable();
}
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss)
+/* Called from seccomp/prctl update */
+void speculation_ctrl_update_current(void)
+{
+ preempt_disable();
+ speculation_ctrl_update(speculation_ctrl_update_tif(current));
+ preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev, *next;
unsigned long tifp, tifn;
@@ -430,7 +489,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
tifn = READ_ONCE(task_thread_info(next_p)->flags);
tifp = READ_ONCE(task_thread_info(prev_p)->flags);
- switch_to_bitmap(tss, prev, next, tifp, tifn);
+ switch_to_bitmap(prev, next, tifp, tifn);
propagate_user_return_notify(prev_p, next_p);
@@ -451,8 +510,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
if ((tifp ^ tifn) & _TIF_NOCPUID)
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
- if ((tifp ^ tifn) & _TIF_SSBD)
- __speculative_store_bypass_update(tifn);
+ if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
+ __speculation_ctrl_update(tifp, tifn);
+ } else {
+ speculation_ctrl_update_tif(prev_p);
+ tifn = speculation_ctrl_update_tif(next_p);
+
+ /* Enforce MSR update to ensure consistent state */
+ __speculation_ctrl_update(~tifn, tifn);
+ }
}
/*
@@ -730,7 +796,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (p == current || p->state == TASK_RUNNING)
return 0;
if (!try_get_task_stack(p))
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
new file mode 100644
index 000000000000..320ab978fb1f
--- /dev/null
+++ b/arch/x86/kernel/process.h
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Code shared between 32 and 64 bit
+
+#include <asm/spec-ctrl.h>
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
+
+/*
+ * This needs to be inline to optimize for the common case where no extra
+ * work needs to be done.
+ */
+static inline void switch_to_extra(struct task_struct *prev,
+ struct task_struct *next)
+{
+ unsigned long next_tif = task_thread_info(next)->flags;
+ unsigned long prev_tif = task_thread_info(prev)->flags;
+
+ if (IS_ENABLED(CONFIG_SMP)) {
+ /*
+ * Avoid __switch_to_xtra() invocation when conditional
+ * STIBP is disabled and the only different bit is
+ * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
+ * in the TIF_WORK_CTXSW masks.
+ */
+ if (!static_branch_likely(&switch_to_cond_stibp)) {
+ prev_tif &= ~_TIF_SPEC_IB;
+ next_tif &= ~_TIF_SPEC_IB;
+ }
+ }
+
+ /*
+ * __switch_to_xtra() handles debug registers, i/o bitmaps,
+ * speculation mitigations etc.
+ */
+ if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
+ prev_tif & _TIF_WORK_CTXSW_PREV))
+ __switch_to_xtra(prev, next);
+}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 5046a3c9dec2..e471d8e6f0b2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -44,9 +44,6 @@
#include <asm/processor.h>
#include <asm/fpu/internal.h>
#include <asm/desc.h>
-#ifdef CONFIG_MATH_EMULATION
-#include <asm/math_emu.h>
-#endif
#include <linux/err.h>
@@ -56,9 +53,11 @@
#include <asm/debugreg.h>
#include <asm/switch_to.h>
#include <asm/vm86.h>
-#include <asm/intel_rdt_sched.h>
+#include <asm/resctrl_sched.h>
#include <asm/proto.h>
+#include "process.h"
+
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
@@ -232,7 +231,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
@@ -264,12 +262,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
set_iopl_mask(next->iopl);
- /*
- * Now maybe handle debug registers and/or IO bitmaps
- */
- if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
- task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
- __switch_to_xtra(prev_p, next_p, tss);
+ switch_to_extra(prev_p, next_p);
/*
* Leave lazy mode, flushing any hypercalls made here.
@@ -302,7 +295,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
this_cpu_write(current_task, next_p);
/* Load the Intel cache allocation PQR MSR. */
- intel_rdt_sched_in();
+ resctrl_sched_in();
return prev_p;
}
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 0e0b4288a4b2..6a62f4af9fcf 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
#include <asm/switch_to.h>
#include <asm/xen/hypervisor.h>
#include <asm/vdso.h>
-#include <asm/intel_rdt_sched.h>
+#include <asm/resctrl_sched.h>
#include <asm/unistd.h>
#include <asm/fsgsbase.h>
#ifdef CONFIG_IA32_EMULATION
@@ -60,13 +60,15 @@
#include <asm/unistd_32_ia32.h>
#endif
+#include "process.h"
+
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
unsigned int fsindex, gsindex;
- unsigned int ds, cs, es;
+ unsigned int ds, es;
show_iret_regs(regs);
@@ -98,7 +100,6 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
}
asm("movl %%ds,%0" : "=r" (ds));
- asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es));
asm("movl %%fs,%0" : "=r" (fsindex));
asm("movl %%gs,%0" : "=r" (gsindex));
@@ -114,7 +115,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
fs, fsindex, gs, gsindex, shadowgs);
- printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
+ printk(KERN_DEFAULT "CS: %04lx DS: %04x ES: %04x CR0: %016lx\n", regs->cs, ds,
es, cr0);
printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
cr4);
@@ -337,24 +338,6 @@ static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
return base;
}
-void x86_fsbase_write_cpu(unsigned long fsbase)
-{
- /*
- * Set the selector to 0 as a notion, that the segment base is
- * overwritten, which will be checked for skipping the segment load
- * during context switch.
- */
- loadseg(FS, 0);
- wrmsrl(MSR_FS_BASE, fsbase);
-}
-
-void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
-{
- /* Set the selector to 0 for the same reason as %fs above. */
- loadseg(GS, 0);
- wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
-}
-
unsigned long x86_fsbase_read_task(struct task_struct *task)
{
unsigned long fsbase;
@@ -383,38 +366,18 @@ unsigned long x86_gsbase_read_task(struct task_struct *task)
return gsbase;
}
-int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
+void x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
{
- /*
- * Not strictly needed for %fs, but do it for symmetry
- * with %gs
- */
- if (unlikely(fsbase >= TASK_SIZE_MAX))
- return -EPERM;
+ WARN_ON_ONCE(task == current);
- preempt_disable();
task->thread.fsbase = fsbase;
- if (task == current)
- x86_fsbase_write_cpu(fsbase);
- task->thread.fsindex = 0;
- preempt_enable();
-
- return 0;
}
-int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
+void x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
{
- if (unlikely(gsbase >= TASK_SIZE_MAX))
- return -EPERM;
+ WARN_ON_ONCE(task == current);
- preempt_disable();
task->thread.gsbase = gsbase;
- if (task == current)
- x86_gsbase_write_cpu_inactive(gsbase);
- task->thread.gsindex = 0;
- preempt_enable();
-
- return 0;
}
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
@@ -553,7 +516,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *prev_fpu = &prev->fpu;
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
- struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
this_cpu_read(irq_count) != -1);
@@ -617,12 +579,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/* Reload sp0. */
update_task_stack(next_p);
- /*
- * Now maybe reload the debug registers and handle I/O bitmaps
- */
- if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
- task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
- __switch_to_xtra(prev_p, next_p, tss);
+ switch_to_extra(prev_p, next_p);
#ifdef CONFIG_XEN_PV
/*
@@ -664,7 +621,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
}
/* Load the Intel cache allocation PQR MSR. */
- intel_rdt_sched_in();
+ resctrl_sched_in();
return prev_p;
}
@@ -688,7 +645,7 @@ void set_personality_64bit(void)
/* TBD: overwrites user setup. Should have two bits.
But 64bit processes have always behaved this way,
so it's not too bad. The main problem is just that
- 32bit childs are affected again. */
+ 32bit children are affected again. */
current->personality &= ~READ_IMPLIES_EXEC;
}
@@ -758,11 +715,60 @@ long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
switch (option) {
case ARCH_SET_GS: {
- ret = x86_gsbase_write_task(task, arg2);
+ if (unlikely(arg2 >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ /*
+ * ARCH_SET_GS has always overwritten the index
+ * and the base. Zero is the most sensible value
+ * to put in the index, and is the only value that
+ * makes any sense if FSGSBASE is unavailable.
+ */
+ if (task == current) {
+ loadseg(GS, 0);
+ x86_gsbase_write_cpu_inactive(arg2);
+
+ /*
+ * On non-FSGSBASE systems, save_base_legacy() expects
+ * that we also fill in thread.gsbase.
+ */
+ task->thread.gsbase = arg2;
+
+ } else {
+ task->thread.gsindex = 0;
+ x86_gsbase_write_task(task, arg2);
+ }
+ preempt_enable();
break;
}
case ARCH_SET_FS: {
- ret = x86_fsbase_write_task(task, arg2);
+ /*
+ * Not strictly needed for %fs, but do it for symmetry
+ * with %gs
+ */
+ if (unlikely(arg2 >= TASK_SIZE_MAX))
+ return -EPERM;
+
+ preempt_disable();
+ /*
+ * Set the selector to 0 for the same reason
+ * as %gs above.
+ */
+ if (task == current) {
+ loadseg(FS, 0);
+ x86_fsbase_write_cpu(arg2);
+
+ /*
+ * On non-FSGSBASE systems, save_base_legacy() expects
+ * that we also fill in thread.fsbase.
+ */
+ task->thread.fsbase = arg2;
+ } else {
+ task->thread.fsindex = 0;
+ x86_fsbase_write_task(task, arg2);
+ }
+ preempt_enable();
break;
}
case ARCH_GET_FS: {
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index ffae9b9740fd..4b8ee05dd6ad 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -397,11 +397,12 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX)
return -EIO;
/*
- * When changing the FS base, use the same
- * mechanism as for do_arch_prctl_64().
+ * When changing the FS base, use do_arch_prctl_64()
+ * to set the index to zero and to set the base
+ * as requested.
*/
if (child->thread.fsbase != value)
- return x86_fsbase_write_task(child, value);
+ return do_arch_prctl_64(child, ARCH_SET_FS, value);
return 0;
case offsetof(struct user_regs_struct,gs_base):
/*
@@ -410,7 +411,7 @@ static int putreg(struct task_struct *child,
if (value >= TASK_SIZE_MAX)
return -EIO;
if (child->thread.gsbase != value)
- return x86_gsbase_write_task(child, value);
+ return do_arch_prctl_64(child, ARCH_SET_GS, value);
return 0;
#endif
}
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 736348ead421..8451f38ad399 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <asm/hpet.h>
+#include <asm/setup.h>
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b74e7bfed6ab..d494b9bfe618 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1280,23 +1280,6 @@ void __init setup_arch(char **cmdline_p)
unwind_init();
}
-/*
- * From boot protocol 2.14 onwards we expect the bootloader to set the
- * version to "0x8000 | <used version>". In case we find a version >= 2.14
- * without the 0x8000 we assume the boot loader supports 2.13 only and
- * reset the version accordingly. The 0x8000 flag is removed in any case.
- */
-void __init x86_verify_bootdata_version(void)
-{
- if (boot_params.hdr.version & VERSION_WRITTEN)
- boot_params.hdr.version &= ~VERSION_WRITTEN;
- else if (boot_params.hdr.version >= 0x020e)
- boot_params.hdr.version = 0x020d;
-
- if (boot_params.hdr.version < 0x020e)
- boot_params.hdr.acpi_rsdp_addr = 0;
-}
-
#ifdef CONFIG_X86_32
static struct resource video_ram_resource = {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a9134d1910b9..ccd1f2a8e557 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1347,7 +1347,7 @@ void __init calculate_max_logical_packages(void)
* extrapolate the boot cpu's data to all packages.
*/
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
- __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+ __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
pr_info("Max logical packages: %u\n", __max_logical_packages);
}
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 623965e86b65..fa51723571c8 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -19,12 +19,15 @@
#include <linux/dmi.h>
#include <linux/err.h>
+#include <linux/efi.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/screen_info.h>
#include <video/vga.h>
+
+#include <asm/efi.h>
#include <asm/sysfb.h>
enum {
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 5bd30c442794..496748ed266a 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -10,6 +10,8 @@
#include <asm/hw_irq.h>
#include <asm/desc.h>
+#include <asm/trace/exceptions.h>
+#include <asm/trace/irq_vectors.h>
DEFINE_STATIC_KEY_FALSE(trace_pagefault_key);
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index dc4f2fdf5e57..69b3a7c30013 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -16,7 +16,7 @@ kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o page_track.o debugfs.o
-kvm-intel-y += vmx.o pmu_intel.o
+kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o
kvm-amd-y += svm.o pmu_amd.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7bcfa61375c0..bbffa6c54697 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -67,9 +67,6 @@ u64 kvm_supported_xcr0(void)
#define F(x) bit(X86_FEATURE_##x)
-/* For scattered features from cpufeatures.h; we currently expose none */
-#define KF(x) bit(KVM_CPUID_BIT_##x)
-
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -337,6 +334,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
+ unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -380,8 +378,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
- F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
- F(AMD_SSB_NO);
+ F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
+ F(AMD_SSB_NO) | F(AMD_STIBP);
/* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features =
@@ -395,7 +393,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
+ F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt;
/* cpuid 0xD.1.eax */
const u32 kvm_cpuid_D_1_eax_x86_features =
@@ -411,7 +409,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.edx*/
const u32 kvm_cpuid_7_0_edx_x86_features =
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
- F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+ F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -426,7 +424,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
switch (function) {
case 0:
- entry->eax = min(entry->eax, (u32)0xd);
+ entry->eax = min(entry->eax, (u32)(f_intel_pt ? 0x14 : 0xd));
break;
case 1:
entry->edx &= kvm_cpuid_1_edx_x86_features;
@@ -603,6 +601,23 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
}
break;
}
+ /* Intel PT */
+ case 0x14: {
+ int t, times = entry->eax;
+
+ if (!f_intel_pt)
+ break;
+
+ entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ for (t = 1; t <= times; ++t) {
+ if (*nent >= maxnent)
+ goto out;
+ do_cpuid_1_ent(&entry[t], function, t);
+ entry[t].flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
+ ++*nent;
+ }
+ break;
+ }
case KVM_CPUID_SIGNATURE: {
static const char signature[12] = "KVMKVMKVM\0\0";
const u32 *sigptr = (const u32 *)signature;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 4e80080f277a..c90a5352d158 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -38,6 +38,9 @@
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
+static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
+ bool vcpu_kick);
+
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{
return atomic64_read(&synic->sint[sint]);
@@ -158,59 +161,24 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
return (synic->active) ? synic : NULL;
}
-static void synic_clear_sint_msg_pending(struct kvm_vcpu_hv_synic *synic,
- u32 sint)
-{
- struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
- struct page *page;
- gpa_t gpa;
- struct hv_message *msg;
- struct hv_message_page *msg_page;
-
- gpa = synic->msg_page & PAGE_MASK;
- page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page)) {
- vcpu_err(vcpu, "Hyper-V SynIC can't get msg page, gpa 0x%llx\n",
- gpa);
- return;
- }
- msg_page = kmap_atomic(page);
-
- msg = &msg_page->sint_message[sint];
- msg->header.message_flags.msg_pending = 0;
-
- kunmap_atomic(msg_page);
- kvm_release_page_dirty(page);
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
-}
-
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer;
- int gsi, idx, stimers_pending;
+ int gsi, idx;
trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
- if (synic->msg_page & HV_SYNIC_SIMP_ENABLE)
- synic_clear_sint_msg_pending(synic, sint);
-
/* Try to deliver pending Hyper-V SynIC timers messages */
- stimers_pending = 0;
for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
stimer = &hv_vcpu->stimer[idx];
- if (stimer->msg_pending &&
- (stimer->config & HV_STIMER_ENABLE) &&
- HV_STIMER_SINT(stimer->config) == sint) {
- set_bit(stimer->index,
- hv_vcpu->stimer_pending_bitmap);
- stimers_pending++;
- }
+ if (stimer->msg_pending && stimer->config.enable &&
+ !stimer->config.direct_mode &&
+ stimer->config.sintx == sint)
+ stimer_mark_pending(stimer, false);
}
- if (stimers_pending)
- kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = atomic_read(&synic->sint_to_gsi[sint]);
@@ -497,7 +465,7 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm);
ktime_now = ktime_get();
- if (stimer->config & HV_STIMER_PERIODIC) {
+ if (stimer->config.periodic) {
if (stimer->exp_time) {
if (time_now >= stimer->exp_time) {
u64 remainder;
@@ -546,13 +514,18 @@ static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
bool host)
{
+ union hv_stimer_config new_config = {.as_uint64 = config},
+ old_config = {.as_uint64 = stimer->config.as_uint64};
+
trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, config, host);
stimer_cleanup(stimer);
- if ((stimer->config & HV_STIMER_ENABLE) && HV_STIMER_SINT(config) == 0)
- config &= ~HV_STIMER_ENABLE;
- stimer->config = config;
+ if (old_config.enable &&
+ !new_config.direct_mode && new_config.sintx == 0)
+ new_config.enable = 0;
+ stimer->config.as_uint64 = new_config.as_uint64;
+
stimer_mark_pending(stimer, false);
return 0;
}
@@ -566,16 +539,16 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
stimer_cleanup(stimer);
stimer->count = count;
if (stimer->count == 0)
- stimer->config &= ~HV_STIMER_ENABLE;
- else if (stimer->config & HV_STIMER_AUTOENABLE)
- stimer->config |= HV_STIMER_ENABLE;
+ stimer->config.enable = 0;
+ else if (stimer->config.auto_enable)
+ stimer->config.enable = 1;
stimer_mark_pending(stimer, false);
return 0;
}
static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
{
- *pconfig = stimer->config;
+ *pconfig = stimer->config.as_uint64;
return 0;
}
@@ -586,44 +559,60 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
}
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
- struct hv_message *src_msg)
+ struct hv_message *src_msg, bool no_retry)
{
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
- struct page *page;
- gpa_t gpa;
- struct hv_message *dst_msg;
+ int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
+ gfn_t msg_page_gfn;
+ struct hv_message_header hv_hdr;
int r;
- struct hv_message_page *msg_page;
if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
return -ENOENT;
- gpa = synic->msg_page & PAGE_MASK;
- page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT);
- if (is_error_page(page))
- return -EFAULT;
+ msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
- msg_page = kmap_atomic(page);
- dst_msg = &msg_page->sint_message[sint];
- if (sync_cmpxchg(&dst_msg->header.message_type, HVMSG_NONE,
- src_msg->header.message_type) != HVMSG_NONE) {
- dst_msg->header.message_flags.msg_pending = 1;
- r = -EAGAIN;
- } else {
- memcpy(&dst_msg->u.payload, &src_msg->u.payload,
- src_msg->header.payload_size);
- dst_msg->header.message_type = src_msg->header.message_type;
- dst_msg->header.payload_size = src_msg->header.payload_size;
- r = synic_set_irq(synic, sint);
- if (r >= 1)
- r = 0;
- else if (r == 0)
- r = -EFAULT;
+ /*
+ * Strictly following the spec-mandated ordering would assume setting
+ * .msg_pending before checking .message_type. However, this function
+ * is only called in vcpu context so the entire update is atomic from
+ * guest POV and thus the exact order here doesn't matter.
+ */
+ r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
+ msg_off + offsetof(struct hv_message,
+ header.message_type),
+ sizeof(hv_hdr.message_type));
+ if (r < 0)
+ return r;
+
+ if (hv_hdr.message_type != HVMSG_NONE) {
+ if (no_retry)
+ return 0;
+
+ hv_hdr.message_flags.msg_pending = 1;
+ r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
+ &hv_hdr.message_flags,
+ msg_off +
+ offsetof(struct hv_message,
+ header.message_flags),
+ sizeof(hv_hdr.message_flags));
+ if (r < 0)
+ return r;
+ return -EAGAIN;
}
- kunmap_atomic(msg_page);
- kvm_release_page_dirty(page);
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
- return r;
+
+ r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
+ sizeof(src_msg->header) +
+ src_msg->header.payload_size);
+ if (r < 0)
+ return r;
+
+ r = synic_set_irq(synic, sint);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -EFAULT;
+ return 0;
}
static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
@@ -633,24 +622,45 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
struct hv_timer_message_payload *payload =
(struct hv_timer_message_payload *)&msg->u.payload;
+ /*
+ * To avoid piling up periodic ticks, don't retry message
+ * delivery for them (within "lazy" lost ticks policy).
+ */
+ bool no_retry = stimer->config.periodic;
+
payload->expiration_time = stimer->exp_time;
payload->delivery_time = get_time_ref_counter(vcpu->kvm);
return synic_deliver_msg(vcpu_to_synic(vcpu),
- HV_STIMER_SINT(stimer->config), msg);
+ stimer->config.sintx, msg,
+ no_retry);
+}
+
+static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
+{
+ struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
+ struct kvm_lapic_irq irq = {
+ .delivery_mode = APIC_DM_FIXED,
+ .vector = stimer->config.apic_vector
+ };
+
+ return !kvm_apic_set_irq(vcpu, &irq, NULL);
}
static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
{
- int r;
+ int r, direct = stimer->config.direct_mode;
stimer->msg_pending = true;
- r = stimer_send_msg(stimer);
+ if (!direct)
+ r = stimer_send_msg(stimer);
+ else
+ r = stimer_notify_direct(stimer);
trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id,
- stimer->index, r);
+ stimer->index, direct, r);
if (!r) {
stimer->msg_pending = false;
- if (!(stimer->config & HV_STIMER_PERIODIC))
- stimer->config &= ~HV_STIMER_ENABLE;
+ if (!(stimer->config.periodic))
+ stimer->config.enable = 0;
}
}
@@ -664,7 +674,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
stimer = &hv_vcpu->stimer[i];
- if (stimer->config & HV_STIMER_ENABLE) {
+ if (stimer->config.enable) {
exp_time = stimer->exp_time;
if (exp_time) {
@@ -674,7 +684,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
stimer_expiration(stimer);
}
- if ((stimer->config & HV_STIMER_ENABLE) &&
+ if ((stimer->config.enable) &&
stimer->count) {
if (!stimer->msg_pending)
stimer_start(stimer);
@@ -815,9 +825,9 @@ static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
if (host)
- hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
+ hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
- if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
+ if (!host && (data & HV_CRASH_CTL_CRASH_NOTIFY)) {
vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
hv->hv_crash_param[0],
@@ -1758,3 +1768,124 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
return kvm_hv_eventfd_deassign(kvm, args->conn_id);
return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
}
+
+int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries)
+{
+ uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
+ struct kvm_cpuid_entry2 cpuid_entries[] = {
+ { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
+ { .function = HYPERV_CPUID_INTERFACE },
+ { .function = HYPERV_CPUID_VERSION },
+ { .function = HYPERV_CPUID_FEATURES },
+ { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
+ { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
+ { .function = HYPERV_CPUID_NESTED_FEATURES },
+ };
+ int i, nent = ARRAY_SIZE(cpuid_entries);
+
+ /* Skip NESTED_FEATURES if eVMCS is not supported */
+ if (!evmcs_ver)
+ --nent;
+
+ if (cpuid->nent < nent)
+ return -E2BIG;
+
+ if (cpuid->nent > nent)
+ cpuid->nent = nent;
+
+ for (i = 0; i < nent; i++) {
+ struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
+ u32 signature[3];
+
+ switch (ent->function) {
+ case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
+ memcpy(signature, "Linux KVM Hv", 12);
+
+ ent->eax = HYPERV_CPUID_NESTED_FEATURES;
+ ent->ebx = signature[0];
+ ent->ecx = signature[1];
+ ent->edx = signature[2];
+ break;
+
+ case HYPERV_CPUID_INTERFACE:
+ memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
+ ent->eax = signature[0];
+ break;
+
+ case HYPERV_CPUID_VERSION:
+ /*
+ * We implement some Hyper-V 2016 functions so let's use
+ * this version.
+ */
+ ent->eax = 0x00003839;
+ ent->ebx = 0x000A0000;
+ break;
+
+ case HYPERV_CPUID_FEATURES:
+ ent->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ ent->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+ ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
+ ent->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+ ent->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ ent->eax |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+ ent->eax |= HV_X64_MSR_RESET_AVAILABLE;
+ ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
+ ent->eax |= HV_X64_MSR_GUEST_IDLE_AVAILABLE;
+ ent->eax |= HV_X64_ACCESS_FREQUENCY_MSRS;
+ ent->eax |= HV_X64_ACCESS_REENLIGHTENMENT;
+
+ ent->ebx |= HV_X64_POST_MESSAGES;
+ ent->ebx |= HV_X64_SIGNAL_EVENTS;
+
+ ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
+ ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
+
+ break;
+
+ case HYPERV_CPUID_ENLIGHTMENT_INFO:
+ ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ ent->eax |= HV_X64_SYSTEM_RESET_RECOMMENDED;
+ ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+ ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
+ ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
+ ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
+
+ /*
+ * Default number of spinlock retry attempts, matches
+ * HyperV 2016.
+ */
+ ent->ebx = 0x00000FFF;
+
+ break;
+
+ case HYPERV_CPUID_IMPLEMENT_LIMITS:
+ /* Maximum number of virtual processors */
+ ent->eax = KVM_MAX_VCPUS;
+ /*
+ * Maximum number of logical processors, matches
+ * HyperV 2016.
+ */
+ ent->ebx = 64;
+
+ break;
+
+ case HYPERV_CPUID_NESTED_FEATURES:
+ ent->eax = evmcs_ver;
+
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (copy_to_user(entries, cpuid_entries,
+ nent * sizeof(struct kvm_cpuid_entry2)))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index 0e66c12ed2c3..fd7cf13a2144 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -24,6 +24,8 @@
#ifndef __ARCH_X86_KVM_HYPERV_H__
#define __ARCH_X86_KVM_HYPERV_H__
+#include <linux/kvm_host.h>
+
static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu)
{
return &vcpu->arch.hyperv;
@@ -95,5 +97,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
+int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
#endif
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 9619dcc2b325..f8f56a93358b 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -2,6 +2,8 @@
#ifndef ASM_KVM_CACHE_REGS_H
#define ASM_KVM_CACHE_REGS_H
+#include <linux/kvm_host.h>
+
#define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 89db20f8cb70..9f089e2e09d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -55,7 +55,7 @@
#define PRIo64 "o"
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
/* 14 is the version for Xeon and Pentium 8.4.8*/
#define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
@@ -251,10 +251,9 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
if (enabled != apic->sw_enabled) {
apic->sw_enabled = enabled;
- if (enabled) {
+ if (enabled)
static_key_slow_dec_deferred(&apic_sw_disabled);
- recalculate_apic_map(apic->vcpu->kvm);
- } else
+ else
static_key_slow_inc(&apic_sw_disabled.key);
}
}
@@ -576,6 +575,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
+ if (unlikely(!map)) {
+ count = -EOPNOTSUPP;
+ goto out;
+ }
+
if (min > map->max_apic_id)
goto out;
/* Bits above cluster_size are masked in the caller. */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cf5f572f2305..ce770b446238 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -264,6 +264,35 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
+
+static inline bool kvm_available_flush_tlb_with_range(void)
+{
+ return kvm_x86_ops->tlb_remote_flush_with_range;
+}
+
+static void kvm_flush_remote_tlbs_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
+{
+ int ret = -ENOTSUPP;
+
+ if (range && kvm_x86_ops->tlb_remote_flush_with_range)
+ ret = kvm_x86_ops->tlb_remote_flush_with_range(kvm, range);
+
+ if (ret)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
+ u64 start_gfn, u64 pages)
+{
+ struct kvm_tlb_range range;
+
+ range.start_gfn = start_gfn;
+ range.pages = pages;
+
+ kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
{
BUG_ON((mmio_mask & mmio_value) != mmio_value);
@@ -1456,8 +1485,12 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
{
- if (__drop_large_spte(vcpu->kvm, sptep))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ if (__drop_large_spte(vcpu->kvm, sptep)) {
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+ KVM_PAGES_PER_HPAGE(sp->role.level));
+ }
}
/*
@@ -1743,10 +1776,12 @@ restart:
}
}
- if (need_flush)
- kvm_flush_remote_tlbs(kvm);
+ if (need_flush && kvm_available_flush_tlb_with_range()) {
+ kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
+ return 0;
+ }
- return 0;
+ return need_flush;
}
struct slot_rmap_walk_iterator {
@@ -1880,9 +1915,9 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+ return kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
}
static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1925,7 +1960,8 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+ KVM_PAGES_PER_HPAGE(sp->role.level));
}
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -2441,7 +2477,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
account_shadowed(vcpu->kvm, sp);
if (level == PT_PAGE_TABLE_LEVEL &&
rmap_write_protect(vcpu, gfn))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
@@ -2561,7 +2597,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
return;
drop_parent_pte(child, sptep);
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
}
}
@@ -2985,8 +3021,10 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
ret = RET_PF_EMULATE;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
+
if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
+ KVM_PAGES_PER_HPAGE(level));
if (unlikely(is_mmio_spte(*sptep)))
ret = RET_PF_EMULATE;
@@ -5074,9 +5112,9 @@ static bool need_remote_flush(u64 old, u64 new)
}
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
- const u8 *new, int *bytes)
+ int *bytes)
{
- u64 gentry;
+ u64 gentry = 0;
int r;
/*
@@ -5088,22 +5126,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7;
*bytes = 8;
- r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
- if (r)
- gentry = 0;
- new = (const u8 *)&gentry;
}
- switch (*bytes) {
- case 4:
- gentry = *(const u32 *)new;
- break;
- case 8:
- gentry = *(const u64 *)new;
- break;
- default:
- gentry = 0;
- break;
+ if (*bytes == 4 || *bytes == 8) {
+ r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
+ if (r)
+ gentry = 0;
}
return gentry;
@@ -5207,8 +5235,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
- gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
-
/*
* No need to care whether allocation memory is successful
* or not since pte prefetch is skiped if it does not have
@@ -5217,6 +5243,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
+
+ gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
+
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
@@ -5595,8 +5624,13 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
+ bool flush_tlb = true;
+ bool flush = false;
int i;
+ if (kvm_available_flush_tlb_with_range())
+ flush_tlb = false;
+
spin_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
@@ -5608,12 +5642,17 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
if (start >= end)
continue;
- slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true);
+ flush |= slot_handle_level_range(kvm, memslot,
+ kvm_zap_rmapp, PT_PAGE_TABLE_LEVEL,
+ PT_MAX_HUGEPAGE_LEVEL, start,
+ end - 1, flush_tlb);
}
}
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+ gfn_end - gfn_start + 1);
+
spin_unlock(&kvm->mmu_lock);
}
@@ -5647,12 +5686,13 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
* spte from present to present (changing the spte from present
* to nonpresent will flush all the TLBs immediately), in other
* words, the only case we care is mmu_spte_update() where we
- * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
+ * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
* instead of PT_WRITABLE_MASK, that means it does not depend
* on PT_WRITABLE_MASK anymore.
*/
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
@@ -5680,7 +5720,13 @@ restart:
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
pte_list_remove(rmap_head, sptep);
- need_tlb_flush = 1;
+
+ if (kvm_available_flush_tlb_with_range())
+ kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
+ KVM_PAGES_PER_HPAGE(sp->role.level));
+ else
+ need_tlb_flush = 1;
+
goto restart;
}
}
@@ -5716,7 +5762,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
* dirty_bitmap.
*/
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
@@ -5734,7 +5781,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
lockdep_assert_held(&kvm->slots_lock);
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
@@ -5751,7 +5799,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
/* see kvm_mmu_slot_leaf_clear_dirty */
if (flush)
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+ memslot->npages);
}
EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 7cf2185b7eb5..6bdca39829bc 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -894,7 +894,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
- kvm_flush_remote_tlbs(vcpu->kvm);
+ kvm_flush_remote_tlbs_with_address(vcpu->kvm,
+ sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
if (!rmap_can_add(vcpu))
break;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e21ccc46792..307e5bddb6d9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -675,11 +675,6 @@ struct svm_cpu_data {
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
-struct svm_init_data {
- int cpu;
- int r;
-};
-
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
@@ -711,17 +706,17 @@ static u32 svm_msrpm_offset(u32 msr)
static inline void clgi(void)
{
- asm volatile (__ex(SVM_CLGI));
+ asm volatile (__ex("clgi"));
}
static inline void stgi(void)
{
- asm volatile (__ex(SVM_STGI));
+ asm volatile (__ex("stgi"));
}
static inline void invlpga(unsigned long addr, u32 asid)
{
- asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
+ asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
}
static int get_npt_level(struct kvm_vcpu *vcpu)
@@ -1446,7 +1441,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
return vcpu->arch.tsc_offset;
}
-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 g_tsc_offset = 0;
@@ -1456,14 +1451,16 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
g_tsc_offset = svm->vmcb->control.tsc_offset -
svm->nested.hsave->control.tsc_offset;
svm->nested.hsave->control.tsc_offset = offset;
- } else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset,
- offset);
+ }
+
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ svm->vmcb->control.tsc_offset - g_tsc_offset,
+ offset);
svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ return svm->vmcb->control.tsc_offset;
}
static void avic_init_vmcb(struct vcpu_svm *svm)
@@ -1664,20 +1661,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
static int avic_init_access_page(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- int ret;
+ int ret = 0;
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done)
- return 0;
+ goto out;
- ret = x86_set_memory_region(kvm,
- APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
- APIC_DEFAULT_PHYS_BASE,
- PAGE_SIZE);
+ ret = __x86_set_memory_region(kvm,
+ APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+ APIC_DEFAULT_PHYS_BASE,
+ PAGE_SIZE);
if (ret)
- return ret;
+ goto out;
kvm->arch.apic_access_page_done = true;
- return 0;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return ret;
}
static int avic_init_backing_page(struct kvm_vcpu *vcpu)
@@ -2125,6 +2125,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
goto out;
}
+ svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+ if (!svm->vcpu.arch.guest_fpu) {
+ printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
+ err = -ENOMEM;
+ goto free_partial_svm;
+ }
+
err = kvm_vcpu_init(&svm->vcpu, kvm, id);
if (err)
goto free_svm;
@@ -2184,26 +2191,39 @@ free_page1:
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
+ kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
+free_partial_svm:
kmem_cache_free(kvm_vcpu_cache, svm);
out:
return ERR_PTR(err);
}
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
+}
+
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ /*
+ * The vmcb page can be recycled, causing a false negative in
+ * svm_vcpu_load(). So, ensure that no logical CPU has this
+ * vmcb page recorded as its current vmcb.
+ */
+ svm_clear_current_vmcb(svm->vmcb);
+
__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
__free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
kmem_cache_free(kvm_vcpu_cache, svm);
- /*
- * The vmcb page can be recycled, causing a false negative in
- * svm_vcpu_load(). So do a full IBPB now.
- */
- indirect_branch_prediction_barrier();
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -2923,6 +2943,8 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
+
+ vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
@@ -2935,6 +2957,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}
@@ -3261,6 +3284,8 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr
dst->event_inj_err = from->event_inj_err;
dst->nested_cr3 = from->nested_cr3;
dst->virt_ext = from->virt_ext;
+ dst->pause_filter_count = from->pause_filter_count;
+ dst->pause_filter_thresh = from->pause_filter_thresh;
}
static int nested_svm_vmexit(struct vcpu_svm *svm)
@@ -3339,6 +3364,11 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
nested_vmcb->control.event_inj = 0;
nested_vmcb->control.event_inj_err = 0;
+ nested_vmcb->control.pause_filter_count =
+ svm->vmcb->control.pause_filter_count;
+ nested_vmcb->control.pause_filter_thresh =
+ svm->vmcb->control.pause_filter_thresh;
+
/* We always set V_INTR_MASKING and remember the old value in hflags */
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
@@ -3444,7 +3474,6 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
- kvm_mmu_unload(&svm->vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu);
}
@@ -3516,6 +3545,11 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
+ svm->vmcb->control.pause_filter_count =
+ nested_vmcb->control.pause_filter_count;
+ svm->vmcb->control.pause_filter_thresh =
+ nested_vmcb->control.pause_filter_thresh;
+
nested_svm_unmap(page);
/* Enter Guest-Mode */
@@ -5620,9 +5654,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
/* Enter guest mode */
"push %%" _ASM_AX " \n\t"
"mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
- __ex(SVM_VMLOAD) "\n\t"
- __ex(SVM_VMRUN) "\n\t"
- __ex(SVM_VMSAVE) "\n\t"
+ __ex("vmload %%" _ASM_AX) "\n\t"
+ __ex("vmrun %%" _ASM_AX) "\n\t"
+ __ex("vmsave %%" _ASM_AX) "\n\t"
"pop %%" _ASM_AX " \n\t"
/* Save guest registers, load host registers */
@@ -5820,6 +5854,13 @@ static bool svm_cpu_has_accelerated_tpr(void)
static bool svm_has_emulated_msr(int index)
{
+ switch (index) {
+ case MSR_IA32_MCG_EXT_CTL:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
@@ -5908,6 +5949,11 @@ static bool svm_umip_emulated(void)
return false;
}
+static bool svm_pt_supported(void)
+{
+ return false;
+}
+
static bool svm_has_wbinvd_exit(void)
{
return true;
@@ -7037,6 +7083,12 @@ failed:
return ret;
}
+static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+{
+ /* Not supported */
+ return 0;
+}
+
static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version)
{
@@ -7143,13 +7195,14 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.mpx_supported = svm_mpx_supported,
.xsaves_supported = svm_xsaves_supported,
.umip_emulated = svm_umip_emulated,
+ .pt_supported = svm_pt_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
.has_wbinvd_exit = svm_has_wbinvd_exit,
.read_l1_tsc_offset = svm_read_l1_tsc_offset,
- .write_tsc_offset = svm_write_tsc_offset,
+ .write_l1_tsc_offset = svm_write_l1_tsc_offset,
.set_tdp_cr3 = set_tdp_cr3,
@@ -7175,6 +7228,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.mem_enc_unreg_region = svm_unregister_enc_region,
.nested_enable_evmcs = nested_enable_evmcs,
+ .nested_get_evmcs_version = nested_get_evmcs_version,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0659465a745c..705f40ae2532 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1254,24 +1254,26 @@ TRACE_EVENT(kvm_hv_stimer_callback,
* Tracepoint for stimer_expiration.
*/
TRACE_EVENT(kvm_hv_stimer_expiration,
- TP_PROTO(int vcpu_id, int timer_index, int msg_send_result),
- TP_ARGS(vcpu_id, timer_index, msg_send_result),
+ TP_PROTO(int vcpu_id, int timer_index, int direct, int msg_send_result),
+ TP_ARGS(vcpu_id, timer_index, direct, msg_send_result),
TP_STRUCT__entry(
__field(int, vcpu_id)
__field(int, timer_index)
+ __field(int, direct)
__field(int, msg_send_result)
),
TP_fast_assign(
__entry->vcpu_id = vcpu_id;
__entry->timer_index = timer_index;
+ __entry->direct = direct;
__entry->msg_send_result = msg_send_result;
),
- TP_printk("vcpu_id %d timer %d msg send result %d",
+ TP_printk("vcpu_id %d timer %d direct %d send result %d",
__entry->vcpu_id, __entry->timer_index,
- __entry->msg_send_result)
+ __entry->direct, __entry->msg_send_result)
);
/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
deleted file mode 100644
index 4555077d69ce..000000000000
--- a/arch/x86/kvm/vmx.c
+++ /dev/null
@@ -1,15218 +0,0 @@
-/*
- * Kernel-based Virtual Machine driver for Linux
- *
- * This module enables machines with Intel VT-x extensions to run virtual
- * machines without emulation or binary translation.
- *
- * Copyright (C) 2006 Qumranet, Inc.
- * Copyright 2010 Red Hat, Inc. and/or its affiliates.
- *
- * Authors:
- * Avi Kivity <avi@qumranet.com>
- * Yaniv Kamay <yaniv@qumranet.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#include "irq.h"
-#include "mmu.h"
-#include "cpuid.h"
-#include "lapic.h"
-#include "hyperv.h"
-
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/sched.h>
-#include <linux/moduleparam.h>
-#include <linux/mod_devicetable.h>
-#include <linux/trace_events.h>
-#include <linux/slab.h>
-#include <linux/tboot.h>
-#include <linux/hrtimer.h>
-#include <linux/frame.h>
-#include <linux/nospec.h>
-#include "kvm_cache_regs.h"
-#include "x86.h"
-
-#include <asm/asm.h>
-#include <asm/cpu.h>
-#include <asm/io.h>
-#include <asm/desc.h>
-#include <asm/vmx.h>
-#include <asm/virtext.h>
-#include <asm/mce.h>
-#include <asm/fpu/internal.h>
-#include <asm/perf_event.h>
-#include <asm/debugreg.h>
-#include <asm/kexec.h>
-#include <asm/apic.h>
-#include <asm/irq_remapping.h>
-#include <asm/mmu_context.h>
-#include <asm/spec-ctrl.h>
-#include <asm/mshyperv.h>
-
-#include "trace.h"
-#include "pmu.h"
-#include "vmx_evmcs.h"
-
-#define __ex(x) __kvm_handle_fault_on_reboot(x)
-#define __ex_clear(x, reg) \
- ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
-
-MODULE_AUTHOR("Qumranet");
-MODULE_LICENSE("GPL");
-
-static const struct x86_cpu_id vmx_cpu_id[] = {
- X86_FEATURE_MATCH(X86_FEATURE_VMX),
- {}
-};
-MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
-
-static bool __read_mostly enable_vpid = 1;
-module_param_named(vpid, enable_vpid, bool, 0444);
-
-static bool __read_mostly enable_vnmi = 1;
-module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
-
-static bool __read_mostly flexpriority_enabled = 1;
-module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
-
-static bool __read_mostly enable_ept = 1;
-module_param_named(ept, enable_ept, bool, S_IRUGO);
-
-static bool __read_mostly enable_unrestricted_guest = 1;
-module_param_named(unrestricted_guest,
- enable_unrestricted_guest, bool, S_IRUGO);
-
-static bool __read_mostly enable_ept_ad_bits = 1;
-module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
-
-static bool __read_mostly emulate_invalid_guest_state = true;
-module_param(emulate_invalid_guest_state, bool, S_IRUGO);
-
-static bool __read_mostly fasteoi = 1;
-module_param(fasteoi, bool, S_IRUGO);
-
-static bool __read_mostly enable_apicv = 1;
-module_param(enable_apicv, bool, S_IRUGO);
-
-static bool __read_mostly enable_shadow_vmcs = 1;
-module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
-/*
- * If nested=1, nested virtualization is supported, i.e., guests may use
- * VMX and be a hypervisor for its own guests. If nested=0, guests may not
- * use VMX instructions.
- */
-static bool __read_mostly nested = 1;
-module_param(nested, bool, S_IRUGO);
-
-static bool __read_mostly nested_early_check = 0;
-module_param(nested_early_check, bool, S_IRUGO);
-
-static u64 __read_mostly host_xss;
-
-static bool __read_mostly enable_pml = 1;
-module_param_named(pml, enable_pml, bool, S_IRUGO);
-
-#define MSR_TYPE_R 1
-#define MSR_TYPE_W 2
-#define MSR_TYPE_RW 3
-
-#define MSR_BITMAP_MODE_X2APIC 1
-#define MSR_BITMAP_MODE_X2APIC_APICV 2
-
-#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
-
-/* Guest_tsc -> host_tsc conversion requires 64-bit division. */
-static int __read_mostly cpu_preemption_timer_multi;
-static bool __read_mostly enable_preemption_timer = 1;
-#ifdef CONFIG_X86_64
-module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
-#endif
-
-#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
-#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
-#define KVM_VM_CR0_ALWAYS_ON \
- (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
- X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS \
- (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
-
-#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
-#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
-#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
-
-#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
-
-#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
-
-/*
- * Hyper-V requires all of these, so mark them as supported even though
- * they are just treated the same as all-context.
- */
-#define VMX_VPID_EXTENT_SUPPORTED_MASK \
- (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
- VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
- VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
- VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
-
-/*
- * These 2 parameters are used to config the controls for Pause-Loop Exiting:
- * ple_gap: upper bound on the amount of time between two successive
- * executions of PAUSE in a loop. Also indicate if ple enabled.
- * According to test, this time is usually smaller than 128 cycles.
- * ple_window: upper bound on the amount of time a guest is allowed to execute
- * in a PAUSE loop. Tests indicate that most spinlocks are held for
- * less than 2^12 cycles
- * Time is measured based on a counter that runs at the same rate as the TSC,
- * refer SDM volume 3b section 21.6.13 & 22.1.3.
- */
-static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
-
-static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
-module_param(ple_window, uint, 0444);
-
-/* Default doubles per-vcpu window every exit. */
-static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
-module_param(ple_window_grow, uint, 0444);
-
-/* Default resets per-vcpu window every exit to ple_window. */
-static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
-module_param(ple_window_shrink, uint, 0444);
-
-/* Default is to compute the maximum so we can never overflow. */
-static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
-module_param(ple_window_max, uint, 0444);
-
-extern const ulong vmx_return;
-extern const ulong vmx_early_consistency_check_return;
-
-static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
-static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
-static DEFINE_MUTEX(vmx_l1d_flush_mutex);
-
-/* Storage for pre module init parameter parsing */
-static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
-
-static const struct {
- const char *option;
- bool for_parse;
-} vmentry_l1d_param[] = {
- [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
- [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
- [VMENTER_L1D_FLUSH_COND] = {"cond", true},
- [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
- [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
- [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
-};
-
-#define L1D_CACHE_ORDER 4
-static void *vmx_l1d_flush_pages;
-
-static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
-{
- struct page *page;
- unsigned int i;
-
- if (!enable_ept) {
- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
- return 0;
- }
-
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
- u64 msr;
-
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
- if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
- return 0;
- }
- }
-
- /* If set to auto use the default l1tf mitigation method */
- if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
- switch (l1tf_mitigation) {
- case L1TF_MITIGATION_OFF:
- l1tf = VMENTER_L1D_FLUSH_NEVER;
- break;
- case L1TF_MITIGATION_FLUSH_NOWARN:
- case L1TF_MITIGATION_FLUSH:
- case L1TF_MITIGATION_FLUSH_NOSMT:
- l1tf = VMENTER_L1D_FLUSH_COND;
- break;
- case L1TF_MITIGATION_FULL:
- case L1TF_MITIGATION_FULL_FORCE:
- l1tf = VMENTER_L1D_FLUSH_ALWAYS;
- break;
- }
- } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
- l1tf = VMENTER_L1D_FLUSH_ALWAYS;
- }
-
- if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
- !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
- if (!page)
- return -ENOMEM;
- vmx_l1d_flush_pages = page_address(page);
-
- /*
- * Initialize each page with a different pattern in
- * order to protect against KSM in the nested
- * virtualization case.
- */
- for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
- memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
- PAGE_SIZE);
- }
- }
-
- l1tf_vmx_mitigation = l1tf;
-
- if (l1tf != VMENTER_L1D_FLUSH_NEVER)
- static_branch_enable(&vmx_l1d_should_flush);
- else
- static_branch_disable(&vmx_l1d_should_flush);
-
- if (l1tf == VMENTER_L1D_FLUSH_COND)
- static_branch_enable(&vmx_l1d_flush_cond);
- else
- static_branch_disable(&vmx_l1d_flush_cond);
- return 0;
-}
-
-static int vmentry_l1d_flush_parse(const char *s)
-{
- unsigned int i;
-
- if (s) {
- for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
- if (vmentry_l1d_param[i].for_parse &&
- sysfs_streq(s, vmentry_l1d_param[i].option))
- return i;
- }
- }
- return -EINVAL;
-}
-
-static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
-{
- int l1tf, ret;
-
- l1tf = vmentry_l1d_flush_parse(s);
- if (l1tf < 0)
- return l1tf;
-
- if (!boot_cpu_has(X86_BUG_L1TF))
- return 0;
-
- /*
- * Has vmx_init() run already? If not then this is the pre init
- * parameter parsing. In that case just store the value and let
- * vmx_init() do the proper setup after enable_ept has been
- * established.
- */
- if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
- vmentry_l1d_flush_param = l1tf;
- return 0;
- }
-
- mutex_lock(&vmx_l1d_flush_mutex);
- ret = vmx_setup_l1d_flush(l1tf);
- mutex_unlock(&vmx_l1d_flush_mutex);
- return ret;
-}
-
-static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
-{
- if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
- return sprintf(s, "???\n");
-
- return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
-}
-
-static const struct kernel_param_ops vmentry_l1d_flush_ops = {
- .set = vmentry_l1d_flush_set,
- .get = vmentry_l1d_flush_get,
-};
-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
-
-enum ept_pointers_status {
- EPT_POINTERS_CHECK = 0,
- EPT_POINTERS_MATCH = 1,
- EPT_POINTERS_MISMATCH = 2
-};
-
-struct kvm_vmx {
- struct kvm kvm;
-
- unsigned int tss_addr;
- bool ept_identity_pagetable_done;
- gpa_t ept_identity_map_addr;
-
- enum ept_pointers_status ept_pointers_match;
- spinlock_t ept_pointer_lock;
-};
-
-#define NR_AUTOLOAD_MSRS 8
-
-struct vmcs_hdr {
- u32 revision_id:31;
- u32 shadow_vmcs:1;
-};
-
-struct vmcs {
- struct vmcs_hdr hdr;
- u32 abort;
- char data[0];
-};
-
-/*
- * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
- * and whose values change infrequently, but are not constant. I.e. this is
- * used as a write-through cache of the corresponding VMCS fields.
- */
-struct vmcs_host_state {
- unsigned long cr3; /* May not match real cr3 */
- unsigned long cr4; /* May not match real cr4 */
- unsigned long gs_base;
- unsigned long fs_base;
-
- u16 fs_sel, gs_sel, ldt_sel;
-#ifdef CONFIG_X86_64
- u16 ds_sel, es_sel;
-#endif
-};
-
-/*
- * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
- * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
- * loaded on this CPU (so we can clear them if the CPU goes down).
- */
-struct loaded_vmcs {
- struct vmcs *vmcs;
- struct vmcs *shadow_vmcs;
- int cpu;
- bool launched;
- bool nmi_known_unmasked;
- bool hv_timer_armed;
- /* Support for vnmi-less CPUs */
- int soft_vnmi_blocked;
- ktime_t entry_time;
- s64 vnmi_blocked_time;
- unsigned long *msr_bitmap;
- struct list_head loaded_vmcss_on_cpu_link;
- struct vmcs_host_state host_state;
-};
-
-struct shared_msr_entry {
- unsigned index;
- u64 data;
- u64 mask;
-};
-
-/*
- * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
- * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
- * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
- * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
- * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
- * More than one of these structures may exist, if L1 runs multiple L2 guests.
- * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
- * underlying hardware which will be used to run L2.
- * This structure is packed to ensure that its layout is identical across
- * machines (necessary for live migration).
- *
- * IMPORTANT: Changing the layout of existing fields in this structure
- * will break save/restore compatibility with older kvm releases. When
- * adding new fields, either use space in the reserved padding* arrays
- * or add the new fields to the end of the structure.
- */
-typedef u64 natural_width;
-struct __packed vmcs12 {
- /* According to the Intel spec, a VMCS region must start with the
- * following two fields. Then follow implementation-specific data.
- */
- struct vmcs_hdr hdr;
- u32 abort;
-
- u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
- u32 padding[7]; /* room for future expansion */
-
- u64 io_bitmap_a;
- u64 io_bitmap_b;
- u64 msr_bitmap;
- u64 vm_exit_msr_store_addr;
- u64 vm_exit_msr_load_addr;
- u64 vm_entry_msr_load_addr;
- u64 tsc_offset;
- u64 virtual_apic_page_addr;
- u64 apic_access_addr;
- u64 posted_intr_desc_addr;
- u64 ept_pointer;
- u64 eoi_exit_bitmap0;
- u64 eoi_exit_bitmap1;
- u64 eoi_exit_bitmap2;
- u64 eoi_exit_bitmap3;
- u64 xss_exit_bitmap;
- u64 guest_physical_address;
- u64 vmcs_link_pointer;
- u64 guest_ia32_debugctl;
- u64 guest_ia32_pat;
- u64 guest_ia32_efer;
- u64 guest_ia32_perf_global_ctrl;
- u64 guest_pdptr0;
- u64 guest_pdptr1;
- u64 guest_pdptr2;
- u64 guest_pdptr3;
- u64 guest_bndcfgs;
- u64 host_ia32_pat;
- u64 host_ia32_efer;
- u64 host_ia32_perf_global_ctrl;
- u64 vmread_bitmap;
- u64 vmwrite_bitmap;
- u64 vm_function_control;
- u64 eptp_list_address;
- u64 pml_address;
- u64 padding64[3]; /* room for future expansion */
- /*
- * To allow migration of L1 (complete with its L2 guests) between
- * machines of different natural widths (32 or 64 bit), we cannot have
- * unsigned long fields with no explict size. We use u64 (aliased
- * natural_width) instead. Luckily, x86 is little-endian.
- */
- natural_width cr0_guest_host_mask;
- natural_width cr4_guest_host_mask;
- natural_width cr0_read_shadow;
- natural_width cr4_read_shadow;
- natural_width cr3_target_value0;
- natural_width cr3_target_value1;
- natural_width cr3_target_value2;
- natural_width cr3_target_value3;
- natural_width exit_qualification;
- natural_width guest_linear_address;
- natural_width guest_cr0;
- natural_width guest_cr3;
- natural_width guest_cr4;
- natural_width guest_es_base;
- natural_width guest_cs_base;
- natural_width guest_ss_base;
- natural_width guest_ds_base;
- natural_width guest_fs_base;
- natural_width guest_gs_base;
- natural_width guest_ldtr_base;
- natural_width guest_tr_base;
- natural_width guest_gdtr_base;
- natural_width guest_idtr_base;
- natural_width guest_dr7;
- natural_width guest_rsp;
- natural_width guest_rip;
- natural_width guest_rflags;
- natural_width guest_pending_dbg_exceptions;
- natural_width guest_sysenter_esp;
- natural_width guest_sysenter_eip;
- natural_width host_cr0;
- natural_width host_cr3;
- natural_width host_cr4;
- natural_width host_fs_base;
- natural_width host_gs_base;
- natural_width host_tr_base;
- natural_width host_gdtr_base;
- natural_width host_idtr_base;
- natural_width host_ia32_sysenter_esp;
- natural_width host_ia32_sysenter_eip;
- natural_width host_rsp;
- natural_width host_rip;
- natural_width paddingl[8]; /* room for future expansion */
- u32 pin_based_vm_exec_control;
- u32 cpu_based_vm_exec_control;
- u32 exception_bitmap;
- u32 page_fault_error_code_mask;
- u32 page_fault_error_code_match;
- u32 cr3_target_count;
- u32 vm_exit_controls;
- u32 vm_exit_msr_store_count;
- u32 vm_exit_msr_load_count;
- u32 vm_entry_controls;
- u32 vm_entry_msr_load_count;
- u32 vm_entry_intr_info_field;
- u32 vm_entry_exception_error_code;
- u32 vm_entry_instruction_len;
- u32 tpr_threshold;
- u32 secondary_vm_exec_control;
- u32 vm_instruction_error;
- u32 vm_exit_reason;
- u32 vm_exit_intr_info;
- u32 vm_exit_intr_error_code;
- u32 idt_vectoring_info_field;
- u32 idt_vectoring_error_code;
- u32 vm_exit_instruction_len;
- u32 vmx_instruction_info;
- u32 guest_es_limit;
- u32 guest_cs_limit;
- u32 guest_ss_limit;
- u32 guest_ds_limit;
- u32 guest_fs_limit;
- u32 guest_gs_limit;
- u32 guest_ldtr_limit;
- u32 guest_tr_limit;
- u32 guest_gdtr_limit;
- u32 guest_idtr_limit;
- u32 guest_es_ar_bytes;
- u32 guest_cs_ar_bytes;
- u32 guest_ss_ar_bytes;
- u32 guest_ds_ar_bytes;
- u32 guest_fs_ar_bytes;
- u32 guest_gs_ar_bytes;
- u32 guest_ldtr_ar_bytes;
- u32 guest_tr_ar_bytes;
- u32 guest_interruptibility_info;
- u32 guest_activity_state;
- u32 guest_sysenter_cs;
- u32 host_ia32_sysenter_cs;
- u32 vmx_preemption_timer_value;
- u32 padding32[7]; /* room for future expansion */
- u16 virtual_processor_id;
- u16 posted_intr_nv;
- u16 guest_es_selector;
- u16 guest_cs_selector;
- u16 guest_ss_selector;
- u16 guest_ds_selector;
- u16 guest_fs_selector;
- u16 guest_gs_selector;
- u16 guest_ldtr_selector;
- u16 guest_tr_selector;
- u16 guest_intr_status;
- u16 host_es_selector;
- u16 host_cs_selector;
- u16 host_ss_selector;
- u16 host_ds_selector;
- u16 host_fs_selector;
- u16 host_gs_selector;
- u16 host_tr_selector;
- u16 guest_pml_index;
-};
-
-/*
- * For save/restore compatibility, the vmcs12 field offsets must not change.
- */
-#define CHECK_OFFSET(field, loc) \
- BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
- "Offset of " #field " in struct vmcs12 has changed.")
-
-static inline void vmx_check_vmcs12_offsets(void) {
- CHECK_OFFSET(hdr, 0);
- CHECK_OFFSET(abort, 4);
- CHECK_OFFSET(launch_state, 8);
- CHECK_OFFSET(io_bitmap_a, 40);
- CHECK_OFFSET(io_bitmap_b, 48);
- CHECK_OFFSET(msr_bitmap, 56);
- CHECK_OFFSET(vm_exit_msr_store_addr, 64);
- CHECK_OFFSET(vm_exit_msr_load_addr, 72);
- CHECK_OFFSET(vm_entry_msr_load_addr, 80);
- CHECK_OFFSET(tsc_offset, 88);
- CHECK_OFFSET(virtual_apic_page_addr, 96);
- CHECK_OFFSET(apic_access_addr, 104);
- CHECK_OFFSET(posted_intr_desc_addr, 112);
- CHECK_OFFSET(ept_pointer, 120);
- CHECK_OFFSET(eoi_exit_bitmap0, 128);
- CHECK_OFFSET(eoi_exit_bitmap1, 136);
- CHECK_OFFSET(eoi_exit_bitmap2, 144);
- CHECK_OFFSET(eoi_exit_bitmap3, 152);
- CHECK_OFFSET(xss_exit_bitmap, 160);
- CHECK_OFFSET(guest_physical_address, 168);
- CHECK_OFFSET(vmcs_link_pointer, 176);
- CHECK_OFFSET(guest_ia32_debugctl, 184);
- CHECK_OFFSET(guest_ia32_pat, 192);
- CHECK_OFFSET(guest_ia32_efer, 200);
- CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
- CHECK_OFFSET(guest_pdptr0, 216);
- CHECK_OFFSET(guest_pdptr1, 224);
- CHECK_OFFSET(guest_pdptr2, 232);
- CHECK_OFFSET(guest_pdptr3, 240);
- CHECK_OFFSET(guest_bndcfgs, 248);
- CHECK_OFFSET(host_ia32_pat, 256);
- CHECK_OFFSET(host_ia32_efer, 264);
- CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
- CHECK_OFFSET(vmread_bitmap, 280);
- CHECK_OFFSET(vmwrite_bitmap, 288);
- CHECK_OFFSET(vm_function_control, 296);
- CHECK_OFFSET(eptp_list_address, 304);
- CHECK_OFFSET(pml_address, 312);
- CHECK_OFFSET(cr0_guest_host_mask, 344);
- CHECK_OFFSET(cr4_guest_host_mask, 352);
- CHECK_OFFSET(cr0_read_shadow, 360);
- CHECK_OFFSET(cr4_read_shadow, 368);
- CHECK_OFFSET(cr3_target_value0, 376);
- CHECK_OFFSET(cr3_target_value1, 384);
- CHECK_OFFSET(cr3_target_value2, 392);
- CHECK_OFFSET(cr3_target_value3, 400);
- CHECK_OFFSET(exit_qualification, 408);
- CHECK_OFFSET(guest_linear_address, 416);
- CHECK_OFFSET(guest_cr0, 424);
- CHECK_OFFSET(guest_cr3, 432);
- CHECK_OFFSET(guest_cr4, 440);
- CHECK_OFFSET(guest_es_base, 448);
- CHECK_OFFSET(guest_cs_base, 456);
- CHECK_OFFSET(guest_ss_base, 464);
- CHECK_OFFSET(guest_ds_base, 472);
- CHECK_OFFSET(guest_fs_base, 480);
- CHECK_OFFSET(guest_gs_base, 488);
- CHECK_OFFSET(guest_ldtr_base, 496);
- CHECK_OFFSET(guest_tr_base, 504);
- CHECK_OFFSET(guest_gdtr_base, 512);
- CHECK_OFFSET(guest_idtr_base, 520);
- CHECK_OFFSET(guest_dr7, 528);
- CHECK_OFFSET(guest_rsp, 536);
- CHECK_OFFSET(guest_rip, 544);
- CHECK_OFFSET(guest_rflags, 552);
- CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
- CHECK_OFFSET(guest_sysenter_esp, 568);
- CHECK_OFFSET(guest_sysenter_eip, 576);
- CHECK_OFFSET(host_cr0, 584);
- CHECK_OFFSET(host_cr3, 592);
- CHECK_OFFSET(host_cr4, 600);
- CHECK_OFFSET(host_fs_base, 608);
- CHECK_OFFSET(host_gs_base, 616);
- CHECK_OFFSET(host_tr_base, 624);
- CHECK_OFFSET(host_gdtr_base, 632);
- CHECK_OFFSET(host_idtr_base, 640);
- CHECK_OFFSET(host_ia32_sysenter_esp, 648);
- CHECK_OFFSET(host_ia32_sysenter_eip, 656);
- CHECK_OFFSET(host_rsp, 664);
- CHECK_OFFSET(host_rip, 672);
- CHECK_OFFSET(pin_based_vm_exec_control, 744);
- CHECK_OFFSET(cpu_based_vm_exec_control, 748);
- CHECK_OFFSET(exception_bitmap, 752);
- CHECK_OFFSET(page_fault_error_code_mask, 756);
- CHECK_OFFSET(page_fault_error_code_match, 760);
- CHECK_OFFSET(cr3_target_count, 764);
- CHECK_OFFSET(vm_exit_controls, 768);
- CHECK_OFFSET(vm_exit_msr_store_count, 772);
- CHECK_OFFSET(vm_exit_msr_load_count, 776);
- CHECK_OFFSET(vm_entry_controls, 780);
- CHECK_OFFSET(vm_entry_msr_load_count, 784);
- CHECK_OFFSET(vm_entry_intr_info_field, 788);
- CHECK_OFFSET(vm_entry_exception_error_code, 792);
- CHECK_OFFSET(vm_entry_instruction_len, 796);
- CHECK_OFFSET(tpr_threshold, 800);
- CHECK_OFFSET(secondary_vm_exec_control, 804);
- CHECK_OFFSET(vm_instruction_error, 808);
- CHECK_OFFSET(vm_exit_reason, 812);
- CHECK_OFFSET(vm_exit_intr_info, 816);
- CHECK_OFFSET(vm_exit_intr_error_code, 820);
- CHECK_OFFSET(idt_vectoring_info_field, 824);
- CHECK_OFFSET(idt_vectoring_error_code, 828);
- CHECK_OFFSET(vm_exit_instruction_len, 832);
- CHECK_OFFSET(vmx_instruction_info, 836);
- CHECK_OFFSET(guest_es_limit, 840);
- CHECK_OFFSET(guest_cs_limit, 844);
- CHECK_OFFSET(guest_ss_limit, 848);
- CHECK_OFFSET(guest_ds_limit, 852);
- CHECK_OFFSET(guest_fs_limit, 856);
- CHECK_OFFSET(guest_gs_limit, 860);
- CHECK_OFFSET(guest_ldtr_limit, 864);
- CHECK_OFFSET(guest_tr_limit, 868);
- CHECK_OFFSET(guest_gdtr_limit, 872);
- CHECK_OFFSET(guest_idtr_limit, 876);
- CHECK_OFFSET(guest_es_ar_bytes, 880);
- CHECK_OFFSET(guest_cs_ar_bytes, 884);
- CHECK_OFFSET(guest_ss_ar_bytes, 888);
- CHECK_OFFSET(guest_ds_ar_bytes, 892);
- CHECK_OFFSET(guest_fs_ar_bytes, 896);
- CHECK_OFFSET(guest_gs_ar_bytes, 900);
- CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
- CHECK_OFFSET(guest_tr_ar_bytes, 908);
- CHECK_OFFSET(guest_interruptibility_info, 912);
- CHECK_OFFSET(guest_activity_state, 916);
- CHECK_OFFSET(guest_sysenter_cs, 920);
- CHECK_OFFSET(host_ia32_sysenter_cs, 924);
- CHECK_OFFSET(vmx_preemption_timer_value, 928);
- CHECK_OFFSET(virtual_processor_id, 960);
- CHECK_OFFSET(posted_intr_nv, 962);
- CHECK_OFFSET(guest_es_selector, 964);
- CHECK_OFFSET(guest_cs_selector, 966);
- CHECK_OFFSET(guest_ss_selector, 968);
- CHECK_OFFSET(guest_ds_selector, 970);
- CHECK_OFFSET(guest_fs_selector, 972);
- CHECK_OFFSET(guest_gs_selector, 974);
- CHECK_OFFSET(guest_ldtr_selector, 976);
- CHECK_OFFSET(guest_tr_selector, 978);
- CHECK_OFFSET(guest_intr_status, 980);
- CHECK_OFFSET(host_es_selector, 982);
- CHECK_OFFSET(host_cs_selector, 984);
- CHECK_OFFSET(host_ss_selector, 986);
- CHECK_OFFSET(host_ds_selector, 988);
- CHECK_OFFSET(host_fs_selector, 990);
- CHECK_OFFSET(host_gs_selector, 992);
- CHECK_OFFSET(host_tr_selector, 994);
- CHECK_OFFSET(guest_pml_index, 996);
-}
-
-/*
- * VMCS12_REVISION is an arbitrary id that should be changed if the content or
- * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
- * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
- *
- * IMPORTANT: Changing this value will break save/restore compatibility with
- * older kvm releases.
- */
-#define VMCS12_REVISION 0x11e57ed0
-
-/*
- * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
- * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
- * current implementation, 4K are reserved to avoid future complications.
- */
-#define VMCS12_SIZE 0x1000
-
-/*
- * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
- * supported VMCS12 field encoding.
- */
-#define VMCS12_MAX_FIELD_INDEX 0x17
-
-struct nested_vmx_msrs {
- /*
- * We only store the "true" versions of the VMX capability MSRs. We
- * generate the "non-true" versions by setting the must-be-1 bits
- * according to the SDM.
- */
- u32 procbased_ctls_low;
- u32 procbased_ctls_high;
- u32 secondary_ctls_low;
- u32 secondary_ctls_high;
- u32 pinbased_ctls_low;
- u32 pinbased_ctls_high;
- u32 exit_ctls_low;
- u32 exit_ctls_high;
- u32 entry_ctls_low;
- u32 entry_ctls_high;
- u32 misc_low;
- u32 misc_high;
- u32 ept_caps;
- u32 vpid_caps;
- u64 basic;
- u64 cr0_fixed0;
- u64 cr0_fixed1;
- u64 cr4_fixed0;
- u64 cr4_fixed1;
- u64 vmcs_enum;
- u64 vmfunc_controls;
-};
-
-/*
- * The nested_vmx structure is part of vcpu_vmx, and holds information we need
- * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
- */
-struct nested_vmx {
- /* Has the level1 guest done vmxon? */
- bool vmxon;
- gpa_t vmxon_ptr;
- bool pml_full;
-
- /* The guest-physical address of the current VMCS L1 keeps for L2 */
- gpa_t current_vmptr;
- /*
- * Cache of the guest's VMCS, existing outside of guest memory.
- * Loaded from guest memory during VMPTRLD. Flushed to guest
- * memory during VMCLEAR and VMPTRLD.
- */
- struct vmcs12 *cached_vmcs12;
- /*
- * Cache of the guest's shadow VMCS, existing outside of guest
- * memory. Loaded from guest memory during VM entry. Flushed
- * to guest memory during VM exit.
- */
- struct vmcs12 *cached_shadow_vmcs12;
- /*
- * Indicates if the shadow vmcs or enlightened vmcs must be updated
- * with the data held by struct vmcs12.
- */
- bool need_vmcs12_sync;
- bool dirty_vmcs12;
-
- /*
- * vmcs02 has been initialized, i.e. state that is constant for
- * vmcs02 has been written to the backing VMCS. Initialization
- * is delayed until L1 actually attempts to run a nested VM.
- */
- bool vmcs02_initialized;
-
- bool change_vmcs01_virtual_apic_mode;
-
- /*
- * Enlightened VMCS has been enabled. It does not mean that L1 has to
- * use it. However, VMX features available to L1 will be limited based
- * on what the enlightened VMCS supports.
- */
- bool enlightened_vmcs_enabled;
-
- /* L2 must run next, and mustn't decide to exit to L1. */
- bool nested_run_pending;
-
- struct loaded_vmcs vmcs02;
-
- /*
- * Guest pages referred to in the vmcs02 with host-physical
- * pointers, so we must keep them pinned while L2 runs.
- */
- struct page *apic_access_page;
- struct page *virtual_apic_page;
- struct page *pi_desc_page;
- struct pi_desc *pi_desc;
- bool pi_pending;
- u16 posted_intr_nv;
-
- struct hrtimer preemption_timer;
- bool preemption_timer_expired;
-
- /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
- u64 vmcs01_debugctl;
- u64 vmcs01_guest_bndcfgs;
-
- u16 vpid02;
- u16 last_vpid;
-
- struct nested_vmx_msrs msrs;
-
- /* SMM related state */
- struct {
- /* in VMX operation on SMM entry? */
- bool vmxon;
- /* in guest mode on SMM entry? */
- bool guest_mode;
- } smm;
-
- gpa_t hv_evmcs_vmptr;
- struct page *hv_evmcs_page;
- struct hv_enlightened_vmcs *hv_evmcs;
-};
-
-#define POSTED_INTR_ON 0
-#define POSTED_INTR_SN 1
-
-/* Posted-Interrupt Descriptor */
-struct pi_desc {
- u32 pir[8]; /* Posted interrupt requested */
- union {
- struct {
- /* bit 256 - Outstanding Notification */
- u16 on : 1,
- /* bit 257 - Suppress Notification */
- sn : 1,
- /* bit 271:258 - Reserved */
- rsvd_1 : 14;
- /* bit 279:272 - Notification Vector */
- u8 nv;
- /* bit 287:280 - Reserved */
- u8 rsvd_2;
- /* bit 319:288 - Notification Destination */
- u32 ndst;
- };
- u64 control;
- };
- u32 rsvd[6];
-} __aligned(64);
-
-static bool pi_test_and_set_on(struct pi_desc *pi_desc)
-{
- return test_and_set_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
-{
- return test_and_clear_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
-{
- return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
-}
-
-static inline void pi_clear_sn(struct pi_desc *pi_desc)
-{
- return clear_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline void pi_set_sn(struct pi_desc *pi_desc)
-{
- return set_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline void pi_clear_on(struct pi_desc *pi_desc)
-{
- clear_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline int pi_test_on(struct pi_desc *pi_desc)
-{
- return test_bit(POSTED_INTR_ON,
- (unsigned long *)&pi_desc->control);
-}
-
-static inline int pi_test_sn(struct pi_desc *pi_desc)
-{
- return test_bit(POSTED_INTR_SN,
- (unsigned long *)&pi_desc->control);
-}
-
-struct vmx_msrs {
- unsigned int nr;
- struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
-};
-
-struct vcpu_vmx {
- struct kvm_vcpu vcpu;
- unsigned long host_rsp;
- u8 fail;
- u8 msr_bitmap_mode;
- u32 exit_intr_info;
- u32 idt_vectoring_info;
- ulong rflags;
- struct shared_msr_entry *guest_msrs;
- int nmsrs;
- int save_nmsrs;
- unsigned long host_idt_base;
-#ifdef CONFIG_X86_64
- u64 msr_host_kernel_gs_base;
- u64 msr_guest_kernel_gs_base;
-#endif
-
- u64 arch_capabilities;
- u64 spec_ctrl;
-
- u32 vm_entry_controls_shadow;
- u32 vm_exit_controls_shadow;
- u32 secondary_exec_control;
-
- /*
- * loaded_vmcs points to the VMCS currently used in this vcpu. For a
- * non-nested (L1) guest, it always points to vmcs01. For a nested
- * guest (L2), it points to a different VMCS. loaded_cpu_state points
- * to the VMCS whose state is loaded into the CPU registers that only
- * need to be switched when transitioning to/from the kernel; a NULL
- * value indicates that host state is loaded.
- */
- struct loaded_vmcs vmcs01;
- struct loaded_vmcs *loaded_vmcs;
- struct loaded_vmcs *loaded_cpu_state;
- bool __launched; /* temporary, used in vmx_vcpu_run */
- struct msr_autoload {
- struct vmx_msrs guest;
- struct vmx_msrs host;
- } msr_autoload;
-
- struct {
- int vm86_active;
- ulong save_rflags;
- struct kvm_segment segs[8];
- } rmode;
- struct {
- u32 bitmask; /* 4 bits per segment (1 bit per field) */
- struct kvm_save_segment {
- u16 selector;
- unsigned long base;
- u32 limit;
- u32 ar;
- } seg[8];
- } segment_cache;
- int vpid;
- bool emulation_required;
-
- u32 exit_reason;
-
- /* Posted interrupt descriptor */
- struct pi_desc pi_desc;
-
- /* Support for a guest hypervisor (nested VMX) */
- struct nested_vmx nested;
-
- /* Dynamic PLE window. */
- int ple_window;
- bool ple_window_dirty;
-
- bool req_immediate_exit;
-
- /* Support for PML */
-#define PML_ENTITY_NUM 512
- struct page *pml_pg;
-
- /* apic deadline value in host tsc */
- u64 hv_deadline_tsc;
-
- u64 current_tsc_ratio;
-
- u32 host_pkru;
-
- unsigned long host_debugctlmsr;
-
- /*
- * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
- * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
- * in msr_ia32_feature_control_valid_bits.
- */
- u64 msr_ia32_feature_control;
- u64 msr_ia32_feature_control_valid_bits;
- u64 ept_pointer;
-};
-
-enum segment_cache_field {
- SEG_FIELD_SEL = 0,
- SEG_FIELD_BASE = 1,
- SEG_FIELD_LIMIT = 2,
- SEG_FIELD_AR = 3,
-
- SEG_FIELD_NR = 4
-};
-
-static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
-{
- return container_of(kvm, struct kvm_vmx, kvm);
-}
-
-static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
-{
- return container_of(vcpu, struct vcpu_vmx, vcpu);
-}
-
-static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
-{
- return &(to_vmx(vcpu)->pi_desc);
-}
-
-#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
-#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
-#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name)
-#define FIELD64(number, name) \
- FIELD(number, name), \
- [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
-
-
-static u16 shadow_read_only_fields[] = {
-#define SHADOW_FIELD_RO(x) x,
-#include "vmx_shadow_fields.h"
-};
-static int max_shadow_read_only_fields =
- ARRAY_SIZE(shadow_read_only_fields);
-
-static u16 shadow_read_write_fields[] = {
-#define SHADOW_FIELD_RW(x) x,
-#include "vmx_shadow_fields.h"
-};
-static int max_shadow_read_write_fields =
- ARRAY_SIZE(shadow_read_write_fields);
-
-static const unsigned short vmcs_field_to_offset_table[] = {
- FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
- FIELD(POSTED_INTR_NV, posted_intr_nv),
- FIELD(GUEST_ES_SELECTOR, guest_es_selector),
- FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
- FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
- FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
- FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
- FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
- FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
- FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
- FIELD(GUEST_INTR_STATUS, guest_intr_status),
- FIELD(GUEST_PML_INDEX, guest_pml_index),
- FIELD(HOST_ES_SELECTOR, host_es_selector),
- FIELD(HOST_CS_SELECTOR, host_cs_selector),
- FIELD(HOST_SS_SELECTOR, host_ss_selector),
- FIELD(HOST_DS_SELECTOR, host_ds_selector),
- FIELD(HOST_FS_SELECTOR, host_fs_selector),
- FIELD(HOST_GS_SELECTOR, host_gs_selector),
- FIELD(HOST_TR_SELECTOR, host_tr_selector),
- FIELD64(IO_BITMAP_A, io_bitmap_a),
- FIELD64(IO_BITMAP_B, io_bitmap_b),
- FIELD64(MSR_BITMAP, msr_bitmap),
- FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
- FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
- FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
- FIELD64(PML_ADDRESS, pml_address),
- FIELD64(TSC_OFFSET, tsc_offset),
- FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
- FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
- FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
- FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
- FIELD64(EPT_POINTER, ept_pointer),
- FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
- FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
- FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
- FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
- FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
- FIELD64(VMREAD_BITMAP, vmread_bitmap),
- FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
- FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
- FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
- FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
- FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
- FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
- FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
- FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
- FIELD64(GUEST_PDPTR0, guest_pdptr0),
- FIELD64(GUEST_PDPTR1, guest_pdptr1),
- FIELD64(GUEST_PDPTR2, guest_pdptr2),
- FIELD64(GUEST_PDPTR3, guest_pdptr3),
- FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
- FIELD64(HOST_IA32_PAT, host_ia32_pat),
- FIELD64(HOST_IA32_EFER, host_ia32_efer),
- FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
- FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
- FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
- FIELD(EXCEPTION_BITMAP, exception_bitmap),
- FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
- FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
- FIELD(CR3_TARGET_COUNT, cr3_target_count),
- FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
- FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
- FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
- FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
- FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
- FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
- FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
- FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
- FIELD(TPR_THRESHOLD, tpr_threshold),
- FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
- FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
- FIELD(VM_EXIT_REASON, vm_exit_reason),
- FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
- FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
- FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
- FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
- FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
- FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
- FIELD(GUEST_ES_LIMIT, guest_es_limit),
- FIELD(GUEST_CS_LIMIT, guest_cs_limit),
- FIELD(GUEST_SS_LIMIT, guest_ss_limit),
- FIELD(GUEST_DS_LIMIT, guest_ds_limit),
- FIELD(GUEST_FS_LIMIT, guest_fs_limit),
- FIELD(GUEST_GS_LIMIT, guest_gs_limit),
- FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
- FIELD(GUEST_TR_LIMIT, guest_tr_limit),
- FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
- FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
- FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
- FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
- FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
- FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
- FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
- FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
- FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
- FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
- FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
- FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
- FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
- FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
- FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
- FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
- FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
- FIELD(CR0_READ_SHADOW, cr0_read_shadow),
- FIELD(CR4_READ_SHADOW, cr4_read_shadow),
- FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
- FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
- FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
- FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
- FIELD(EXIT_QUALIFICATION, exit_qualification),
- FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
- FIELD(GUEST_CR0, guest_cr0),
- FIELD(GUEST_CR3, guest_cr3),
- FIELD(GUEST_CR4, guest_cr4),
- FIELD(GUEST_ES_BASE, guest_es_base),
- FIELD(GUEST_CS_BASE, guest_cs_base),
- FIELD(GUEST_SS_BASE, guest_ss_base),
- FIELD(GUEST_DS_BASE, guest_ds_base),
- FIELD(GUEST_FS_BASE, guest_fs_base),
- FIELD(GUEST_GS_BASE, guest_gs_base),
- FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
- FIELD(GUEST_TR_BASE, guest_tr_base),
- FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
- FIELD(GUEST_IDTR_BASE, guest_idtr_base),
- FIELD(GUEST_DR7, guest_dr7),
- FIELD(GUEST_RSP, guest_rsp),
- FIELD(GUEST_RIP, guest_rip),
- FIELD(GUEST_RFLAGS, guest_rflags),
- FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
- FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
- FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
- FIELD(HOST_CR0, host_cr0),
- FIELD(HOST_CR3, host_cr3),
- FIELD(HOST_CR4, host_cr4),
- FIELD(HOST_FS_BASE, host_fs_base),
- FIELD(HOST_GS_BASE, host_gs_base),
- FIELD(HOST_TR_BASE, host_tr_base),
- FIELD(HOST_GDTR_BASE, host_gdtr_base),
- FIELD(HOST_IDTR_BASE, host_idtr_base),
- FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
- FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
- FIELD(HOST_RSP, host_rsp),
- FIELD(HOST_RIP, host_rip),
-};
-
-static inline short vmcs_field_to_offset(unsigned long field)
-{
- const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
- unsigned short offset;
- unsigned index;
-
- if (field >> 15)
- return -ENOENT;
-
- index = ROL16(field, 6);
- if (index >= size)
- return -ENOENT;
-
- index = array_index_nospec(index, size);
- offset = vmcs_field_to_offset_table[index];
- if (offset == 0)
- return -ENOENT;
- return offset;
-}
-
-static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->nested.cached_vmcs12;
-}
-
-static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
-}
-
-static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
-static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
-static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
-static bool vmx_xsaves_supported(void);
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg);
-static void vmx_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg);
-static bool guest_state_valid(struct kvm_vcpu *vcpu);
-static u32 vmx_segment_access_rights(struct kvm_segment *var);
-static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
-static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
-static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
-static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
- u16 error_code);
-static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type);
-
-static DEFINE_PER_CPU(struct vmcs *, vmxarea);
-static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
-/*
- * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
- * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
- */
-static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
-
-/*
- * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
- * can find which vCPU should be waken up.
- */
-static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
-static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
-
-enum {
- VMX_VMREAD_BITMAP,
- VMX_VMWRITE_BITMAP,
- VMX_BITMAP_NR
-};
-
-static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
-
-#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
-#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
-
-static bool cpu_has_load_ia32_efer;
-static bool cpu_has_load_perf_global_ctrl;
-
-static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
-static DEFINE_SPINLOCK(vmx_vpid_lock);
-
-static struct vmcs_config {
- int size;
- int order;
- u32 basic_cap;
- u32 revision_id;
- u32 pin_based_exec_ctrl;
- u32 cpu_based_exec_ctrl;
- u32 cpu_based_2nd_exec_ctrl;
- u32 vmexit_ctrl;
- u32 vmentry_ctrl;
- struct nested_vmx_msrs nested;
-} vmcs_config;
-
-static struct vmx_capability {
- u32 ept;
- u32 vpid;
-} vmx_capability;
-
-#define VMX_SEGMENT_FIELD(seg) \
- [VCPU_SREG_##seg] = { \
- .selector = GUEST_##seg##_SELECTOR, \
- .base = GUEST_##seg##_BASE, \
- .limit = GUEST_##seg##_LIMIT, \
- .ar_bytes = GUEST_##seg##_AR_BYTES, \
- }
-
-static const struct kvm_vmx_segment_field {
- unsigned selector;
- unsigned base;
- unsigned limit;
- unsigned ar_bytes;
-} kvm_vmx_segment_fields[] = {
- VMX_SEGMENT_FIELD(CS),
- VMX_SEGMENT_FIELD(DS),
- VMX_SEGMENT_FIELD(ES),
- VMX_SEGMENT_FIELD(FS),
- VMX_SEGMENT_FIELD(GS),
- VMX_SEGMENT_FIELD(SS),
- VMX_SEGMENT_FIELD(TR),
- VMX_SEGMENT_FIELD(LDTR),
-};
-
-static u64 host_efer;
-
-static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
-
-/*
- * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
- * away by decrementing the array size.
- */
-static const u32 vmx_msr_index[] = {
-#ifdef CONFIG_X86_64
- MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
-#endif
- MSR_EFER, MSR_TSC_AUX, MSR_STAR,
-};
-
-DEFINE_STATIC_KEY_FALSE(enable_evmcs);
-
-#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
-
-#define KVM_EVMCS_VERSION 1
-
-/*
- * Enlightened VMCSv1 doesn't support these:
- *
- * POSTED_INTR_NV = 0x00000002,
- * GUEST_INTR_STATUS = 0x00000810,
- * APIC_ACCESS_ADDR = 0x00002014,
- * POSTED_INTR_DESC_ADDR = 0x00002016,
- * EOI_EXIT_BITMAP0 = 0x0000201c,
- * EOI_EXIT_BITMAP1 = 0x0000201e,
- * EOI_EXIT_BITMAP2 = 0x00002020,
- * EOI_EXIT_BITMAP3 = 0x00002022,
- * GUEST_PML_INDEX = 0x00000812,
- * PML_ADDRESS = 0x0000200e,
- * VM_FUNCTION_CONTROL = 0x00002018,
- * EPTP_LIST_ADDRESS = 0x00002024,
- * VMREAD_BITMAP = 0x00002026,
- * VMWRITE_BITMAP = 0x00002028,
- *
- * TSC_MULTIPLIER = 0x00002032,
- * PLE_GAP = 0x00004020,
- * PLE_WINDOW = 0x00004022,
- * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
- * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
- * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
- *
- * Currently unsupported in KVM:
- * GUEST_IA32_RTIT_CTL = 0x00002814,
- */
-#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
- PIN_BASED_VMX_PREEMPTION_TIMER)
-#define EVMCS1_UNSUPPORTED_2NDEXEC \
- (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
- SECONDARY_EXEC_APIC_REGISTER_VIRT | \
- SECONDARY_EXEC_ENABLE_PML | \
- SECONDARY_EXEC_ENABLE_VMFUNC | \
- SECONDARY_EXEC_SHADOW_VMCS | \
- SECONDARY_EXEC_TSC_SCALING | \
- SECONDARY_EXEC_PAUSE_LOOP_EXITING)
-#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
-#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
-#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
-
-#if IS_ENABLED(CONFIG_HYPERV)
-static bool __read_mostly enlightened_vmcs = true;
-module_param(enlightened_vmcs, bool, 0444);
-
-static inline void evmcs_write64(unsigned long field, u64 value)
-{
- u16 clean_field;
- int offset = get_evmcs_offset(field, &clean_field);
-
- if (offset < 0)
- return;
-
- *(u64 *)((char *)current_evmcs + offset) = value;
-
- current_evmcs->hv_clean_fields &= ~clean_field;
-}
-
-static inline void evmcs_write32(unsigned long field, u32 value)
-{
- u16 clean_field;
- int offset = get_evmcs_offset(field, &clean_field);
-
- if (offset < 0)
- return;
-
- *(u32 *)((char *)current_evmcs + offset) = value;
- current_evmcs->hv_clean_fields &= ~clean_field;
-}
-
-static inline void evmcs_write16(unsigned long field, u16 value)
-{
- u16 clean_field;
- int offset = get_evmcs_offset(field, &clean_field);
-
- if (offset < 0)
- return;
-
- *(u16 *)((char *)current_evmcs + offset) = value;
- current_evmcs->hv_clean_fields &= ~clean_field;
-}
-
-static inline u64 evmcs_read64(unsigned long field)
-{
- int offset = get_evmcs_offset(field, NULL);
-
- if (offset < 0)
- return 0;
-
- return *(u64 *)((char *)current_evmcs + offset);
-}
-
-static inline u32 evmcs_read32(unsigned long field)
-{
- int offset = get_evmcs_offset(field, NULL);
-
- if (offset < 0)
- return 0;
-
- return *(u32 *)((char *)current_evmcs + offset);
-}
-
-static inline u16 evmcs_read16(unsigned long field)
-{
- int offset = get_evmcs_offset(field, NULL);
-
- if (offset < 0)
- return 0;
-
- return *(u16 *)((char *)current_evmcs + offset);
-}
-
-static inline void evmcs_touch_msr_bitmap(void)
-{
- if (unlikely(!current_evmcs))
- return;
-
- if (current_evmcs->hv_enlightenments_control.msr_bitmap)
- current_evmcs->hv_clean_fields &=
- ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
-}
-
-static void evmcs_load(u64 phys_addr)
-{
- struct hv_vp_assist_page *vp_ap =
- hv_get_vp_assist_page(smp_processor_id());
-
- vp_ap->current_nested_vmcs = phys_addr;
- vp_ap->enlighten_vmentry = 1;
-}
-
-static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
-{
- vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
-
- vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
- vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
-
-}
-
-/* check_ept_pointer() should be under protection of ept_pointer_lock. */
-static void check_ept_pointer_match(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- u64 tmp_eptp = INVALID_PAGE;
- int i;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!VALID_PAGE(tmp_eptp)) {
- tmp_eptp = to_vmx(vcpu)->ept_pointer;
- } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
- to_kvm_vmx(kvm)->ept_pointers_match
- = EPT_POINTERS_MISMATCH;
- return;
- }
- }
-
- to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
-}
-
-static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- int ret = -ENOTSUPP, i;
-
- spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
-
- if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
- check_ept_pointer_match(kvm);
-
- /*
- * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
- * base of EPT PML4 table, strip off EPT configuration information.
- */
- if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
- kvm_for_each_vcpu(i, vcpu, kvm)
- ret |= hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer & PAGE_MASK);
- } else {
- ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
- }
-
- spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- return ret;
-}
-#else /* !IS_ENABLED(CONFIG_HYPERV) */
-static inline void evmcs_write64(unsigned long field, u64 value) {}
-static inline void evmcs_write32(unsigned long field, u32 value) {}
-static inline void evmcs_write16(unsigned long field, u16 value) {}
-static inline u64 evmcs_read64(unsigned long field) { return 0; }
-static inline u32 evmcs_read32(unsigned long field) { return 0; }
-static inline u16 evmcs_read16(unsigned long field) { return 0; }
-static inline void evmcs_load(u64 phys_addr) {}
-static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
-static inline void evmcs_touch_msr_bitmap(void) {}
-#endif /* IS_ENABLED(CONFIG_HYPERV) */
-
-static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
- uint16_t *vmcs_version)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /* We don't support disabling the feature for simplicity. */
- if (vmx->nested.enlightened_vmcs_enabled)
- return 0;
-
- vmx->nested.enlightened_vmcs_enabled = true;
-
- /*
- * vmcs_version represents the range of supported Enlightened VMCS
- * versions: lower 8 bits is the minimal version, higher 8 bits is the
- * maximum supported version. KVM supports versions from 1 to
- * KVM_EVMCS_VERSION.
- */
- if (vmcs_version)
- *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1;
-
- vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
- vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
- vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
- vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
- vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC;
-
- return 0;
-}
-
-static inline bool is_exception_n(u32 intr_info, u8 vector)
-{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
- INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
-}
-
-static inline bool is_debug(u32 intr_info)
-{
- return is_exception_n(intr_info, DB_VECTOR);
-}
-
-static inline bool is_breakpoint(u32 intr_info)
-{
- return is_exception_n(intr_info, BP_VECTOR);
-}
-
-static inline bool is_page_fault(u32 intr_info)
-{
- return is_exception_n(intr_info, PF_VECTOR);
-}
-
-static inline bool is_invalid_opcode(u32 intr_info)
-{
- return is_exception_n(intr_info, UD_VECTOR);
-}
-
-static inline bool is_gp_fault(u32 intr_info)
-{
- return is_exception_n(intr_info, GP_VECTOR);
-}
-
-static inline bool is_machine_check(u32 intr_info)
-{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
- INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
-}
-
-/* Undocumented: icebp/int1 */
-static inline bool is_icebp(u32 intr_info)
-{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
-}
-
-static inline bool cpu_has_vmx_msr_bitmap(void)
-{
- return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
-}
-
-static inline bool cpu_has_vmx_tpr_shadow(void)
-{
- return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
-}
-
-static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
-{
- return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
-}
-
-static inline bool cpu_has_secondary_exec_ctrls(void)
-{
- return vmcs_config.cpu_based_exec_ctrl &
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
-}
-
-static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-}
-
-static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
-}
-
-static inline bool cpu_has_vmx_apic_register_virt(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_APIC_REGISTER_VIRT;
-}
-
-static inline bool cpu_has_vmx_virtual_intr_delivery(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
-}
-
-static inline bool cpu_has_vmx_encls_vmexit(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_ENCLS_EXITING;
-}
-
-/*
- * Comment's format: document - errata name - stepping - processor name.
- * Refer from
- * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
- */
-static u32 vmx_preemption_cpu_tfms[] = {
-/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
-0x000206E6,
-/* 323056.pdf - AAX65 - C2 - Xeon L3406 */
-/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
-/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
-0x00020652,
-/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
-0x00020655,
-/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
-/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
-/*
- * 320767.pdf - AAP86 - B1 -
- * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
- */
-0x000106E5,
-/* 321333.pdf - AAM126 - C0 - Xeon 3500 */
-0x000106A0,
-/* 321333.pdf - AAM126 - C1 - Xeon 3500 */
-0x000106A1,
-/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
-0x000106A4,
- /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
- /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
- /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
-0x000106A5,
-};
-
-static inline bool cpu_has_broken_vmx_preemption_timer(void)
-{
- u32 eax = cpuid_eax(0x00000001), i;
-
- /* Clear the reserved bits */
- eax &= ~(0x3U << 14 | 0xfU << 28);
- for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
- if (eax == vmx_preemption_cpu_tfms[i])
- return true;
-
- return false;
-}
-
-static inline bool cpu_has_vmx_preemption_timer(void)
-{
- return vmcs_config.pin_based_exec_ctrl &
- PIN_BASED_VMX_PREEMPTION_TIMER;
-}
-
-static inline bool cpu_has_vmx_posted_intr(void)
-{
- return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
- vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
-}
-
-static inline bool cpu_has_vmx_apicv(void)
-{
- return cpu_has_vmx_apic_register_virt() &&
- cpu_has_vmx_virtual_intr_delivery() &&
- cpu_has_vmx_posted_intr();
-}
-
-static inline bool cpu_has_vmx_flexpriority(void)
-{
- return cpu_has_vmx_tpr_shadow() &&
- cpu_has_vmx_virtualize_apic_accesses();
-}
-
-static inline bool cpu_has_vmx_ept_execute_only(void)
-{
- return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
-}
-
-static inline bool cpu_has_vmx_ept_2m_page(void)
-{
- return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
-}
-
-static inline bool cpu_has_vmx_ept_1g_page(void)
-{
- return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
-}
-
-static inline bool cpu_has_vmx_ept_4levels(void)
-{
- return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
-}
-
-static inline bool cpu_has_vmx_ept_mt_wb(void)
-{
- return vmx_capability.ept & VMX_EPTP_WB_BIT;
-}
-
-static inline bool cpu_has_vmx_ept_5levels(void)
-{
- return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
-}
-
-static inline bool cpu_has_vmx_ept_ad_bits(void)
-{
- return vmx_capability.ept & VMX_EPT_AD_BIT;
-}
-
-static inline bool cpu_has_vmx_invept_context(void)
-{
- return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
-}
-
-static inline bool cpu_has_vmx_invept_global(void)
-{
- return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
-}
-
-static inline bool cpu_has_vmx_invvpid_individual_addr(void)
-{
- return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
-}
-
-static inline bool cpu_has_vmx_invvpid_single(void)
-{
- return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
-}
-
-static inline bool cpu_has_vmx_invvpid_global(void)
-{
- return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
-}
-
-static inline bool cpu_has_vmx_invvpid(void)
-{
- return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
-}
-
-static inline bool cpu_has_vmx_ept(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_ENABLE_EPT;
-}
-
-static inline bool cpu_has_vmx_unrestricted_guest(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_UNRESTRICTED_GUEST;
-}
-
-static inline bool cpu_has_vmx_ple(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_PAUSE_LOOP_EXITING;
-}
-
-static inline bool cpu_has_vmx_basic_inout(void)
-{
- return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
-}
-
-static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
-{
- return flexpriority_enabled && lapic_in_kernel(vcpu);
-}
-
-static inline bool cpu_has_vmx_vpid(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_ENABLE_VPID;
-}
-
-static inline bool cpu_has_vmx_rdtscp(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_RDTSCP;
-}
-
-static inline bool cpu_has_vmx_invpcid(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_ENABLE_INVPCID;
-}
-
-static inline bool cpu_has_virtual_nmis(void)
-{
- return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
-}
-
-static inline bool cpu_has_vmx_wbinvd_exit(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_WBINVD_EXITING;
-}
-
-static inline bool cpu_has_vmx_shadow_vmcs(void)
-{
- u64 vmx_msr;
- rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
- /* check if the cpu supports writing r/o exit information fields */
- if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
- return false;
-
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_SHADOW_VMCS;
-}
-
-static inline bool cpu_has_vmx_pml(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
-}
-
-static inline bool cpu_has_vmx_tsc_scaling(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_TSC_SCALING;
-}
-
-static inline bool cpu_has_vmx_vmfunc(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_ENABLE_VMFUNC;
-}
-
-static bool vmx_umip_emulated(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_DESC;
-}
-
-static inline bool report_flexpriority(void)
-{
- return flexpriority_enabled;
-}
-
-static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
-{
- return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
-}
-
-/*
- * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
- * to modify any valid field of the VMCS, or are the VM-exit
- * information fields read-only?
- */
-static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->nested.msrs.misc_low &
- MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
-}
-
-static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
-}
-
-static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
- CPU_BASED_MONITOR_TRAP_FLAG;
-}
-
-static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
-{
- return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
- SECONDARY_EXEC_SHADOW_VMCS;
-}
-
-static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
-{
- return vmcs12->cpu_based_vm_exec_control & bit;
-}
-
-static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
-{
- return (vmcs12->cpu_based_vm_exec_control &
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
- (vmcs12->secondary_vm_exec_control & bit);
-}
-
-static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
-{
- return vmcs12->pin_based_vm_exec_control &
- PIN_BASED_VMX_PREEMPTION_TIMER;
-}
-
-static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
-{
- return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
-}
-
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
-{
- return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
-}
-
-static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
-}
-
-static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
-}
-
-static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
-}
-
-static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
-}
-
-static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
-}
-
-static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
-}
-
-static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
-}
-
-static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
-{
- return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
-}
-
-static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
-}
-
-static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has_vmfunc(vmcs12) &&
- (vmcs12->vm_function_control &
- VMX_VMFUNC_EPTP_SWITCHING);
-}
-
-static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
-{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
-}
-
-static inline bool is_nmi(u32 intr_info)
-{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
-}
-
-static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
- u32 exit_intr_info,
- unsigned long exit_qualification);
-
-static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
-{
- int i;
-
- for (i = 0; i < vmx->nmsrs; ++i)
- if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
- return i;
- return -1;
-}
-
-static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
-{
- struct {
- u64 vpid : 16;
- u64 rsvd : 48;
- u64 gva;
- } operand = { vpid, 0, gva };
- bool error;
-
- asm volatile (__ex("invvpid %2, %1") CC_SET(na)
- : CC_OUT(na) (error) : "r"(ext), "m"(operand));
- BUG_ON(error);
-}
-
-static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
-{
- struct {
- u64 eptp, gpa;
- } operand = {eptp, gpa};
- bool error;
-
- asm volatile (__ex("invept %2, %1") CC_SET(na)
- : CC_OUT(na) (error) : "r"(ext), "m"(operand));
- BUG_ON(error);
-}
-
-static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
-{
- int i;
-
- i = __find_msr_index(vmx, msr);
- if (i >= 0)
- return &vmx->guest_msrs[i];
- return NULL;
-}
-
-static void vmcs_clear(struct vmcs *vmcs)
-{
- u64 phys_addr = __pa(vmcs);
- bool error;
-
- asm volatile (__ex("vmclear %1") CC_SET(na)
- : CC_OUT(na) (error) : "m"(phys_addr));
- if (unlikely(error))
- printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
- vmcs, phys_addr);
-}
-
-static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
-{
- vmcs_clear(loaded_vmcs->vmcs);
- if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
- vmcs_clear(loaded_vmcs->shadow_vmcs);
- loaded_vmcs->cpu = -1;
- loaded_vmcs->launched = 0;
-}
-
-static void vmcs_load(struct vmcs *vmcs)
-{
- u64 phys_addr = __pa(vmcs);
- bool error;
-
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_load(phys_addr);
-
- asm volatile (__ex("vmptrld %1") CC_SET(na)
- : CC_OUT(na) (error) : "m"(phys_addr));
- if (unlikely(error))
- printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
- vmcs, phys_addr);
-}
-
-#ifdef CONFIG_KEXEC_CORE
-/*
- * This bitmap is used to indicate whether the vmclear
- * operation is enabled on all cpus. All disabled by
- * default.
- */
-static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
-
-static inline void crash_enable_local_vmclear(int cpu)
-{
- cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline void crash_disable_local_vmclear(int cpu)
-{
- cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static inline int crash_local_vmclear_enabled(int cpu)
-{
- return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
-}
-
-static void crash_vmclear_local_loaded_vmcss(void)
-{
- int cpu = raw_smp_processor_id();
- struct loaded_vmcs *v;
-
- if (!crash_local_vmclear_enabled(cpu))
- return;
-
- list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
- loaded_vmcss_on_cpu_link)
- vmcs_clear(v->vmcs);
-}
-#else
-static inline void crash_enable_local_vmclear(int cpu) { }
-static inline void crash_disable_local_vmclear(int cpu) { }
-#endif /* CONFIG_KEXEC_CORE */
-
-static void __loaded_vmcs_clear(void *arg)
-{
- struct loaded_vmcs *loaded_vmcs = arg;
- int cpu = raw_smp_processor_id();
-
- if (loaded_vmcs->cpu != cpu)
- return; /* vcpu migration can race with cpu offline */
- if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
- per_cpu(current_vmcs, cpu) = NULL;
- crash_disable_local_vmclear(cpu);
- list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
-
- /*
- * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
- * is before setting loaded_vmcs->vcpu to -1 which is done in
- * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
- * then adds the vmcs into percpu list before it is deleted.
- */
- smp_wmb();
-
- loaded_vmcs_init(loaded_vmcs);
- crash_enable_local_vmclear(cpu);
-}
-
-static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
-{
- int cpu = loaded_vmcs->cpu;
-
- if (cpu != -1)
- smp_call_function_single(cpu,
- __loaded_vmcs_clear, loaded_vmcs, 1);
-}
-
-static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
-{
- if (vpid == 0)
- return true;
-
- if (cpu_has_vmx_invvpid_individual_addr()) {
- __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
- return true;
- }
-
- return false;
-}
-
-static inline void vpid_sync_vcpu_single(int vpid)
-{
- if (vpid == 0)
- return;
-
- if (cpu_has_vmx_invvpid_single())
- __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
-}
-
-static inline void vpid_sync_vcpu_global(void)
-{
- if (cpu_has_vmx_invvpid_global())
- __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
-}
-
-static inline void vpid_sync_context(int vpid)
-{
- if (cpu_has_vmx_invvpid_single())
- vpid_sync_vcpu_single(vpid);
- else
- vpid_sync_vcpu_global();
-}
-
-static inline void ept_sync_global(void)
-{
- __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
-}
-
-static inline void ept_sync_context(u64 eptp)
-{
- if (cpu_has_vmx_invept_context())
- __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
- else
- ept_sync_global();
-}
-
-static __always_inline void vmcs_check16(unsigned long field)
-{
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
- "16-bit accessor invalid for 64-bit field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
- "16-bit accessor invalid for 64-bit high field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
- "16-bit accessor invalid for 32-bit high field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
- "16-bit accessor invalid for natural width field");
-}
-
-static __always_inline void vmcs_check32(unsigned long field)
-{
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
- "32-bit accessor invalid for 16-bit field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
- "32-bit accessor invalid for natural width field");
-}
-
-static __always_inline void vmcs_check64(unsigned long field)
-{
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
- "64-bit accessor invalid for 16-bit field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
- "64-bit accessor invalid for 64-bit high field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
- "64-bit accessor invalid for 32-bit field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
- "64-bit accessor invalid for natural width field");
-}
-
-static __always_inline void vmcs_checkl(unsigned long field)
-{
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
- "Natural width accessor invalid for 16-bit field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
- "Natural width accessor invalid for 64-bit field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
- "Natural width accessor invalid for 64-bit high field");
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
- "Natural width accessor invalid for 32-bit field");
-}
-
-static __always_inline unsigned long __vmcs_readl(unsigned long field)
-{
- unsigned long value;
-
- asm volatile (__ex_clear("vmread %1, %0", "%k0")
- : "=r"(value) : "r"(field));
- return value;
-}
-
-static __always_inline u16 vmcs_read16(unsigned long field)
-{
- vmcs_check16(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_read16(field);
- return __vmcs_readl(field);
-}
-
-static __always_inline u32 vmcs_read32(unsigned long field)
-{
- vmcs_check32(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_read32(field);
- return __vmcs_readl(field);
-}
-
-static __always_inline u64 vmcs_read64(unsigned long field)
-{
- vmcs_check64(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_read64(field);
-#ifdef CONFIG_X86_64
- return __vmcs_readl(field);
-#else
- return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
-#endif
-}
-
-static __always_inline unsigned long vmcs_readl(unsigned long field)
-{
- vmcs_checkl(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_read64(field);
- return __vmcs_readl(field);
-}
-
-static noinline void vmwrite_error(unsigned long field, unsigned long value)
-{
- printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
- field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
- dump_stack();
-}
-
-static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
-{
- bool error;
-
- asm volatile (__ex("vmwrite %2, %1") CC_SET(na)
- : CC_OUT(na) (error) : "r"(field), "rm"(value));
- if (unlikely(error))
- vmwrite_error(field, value);
-}
-
-static __always_inline void vmcs_write16(unsigned long field, u16 value)
-{
- vmcs_check16(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_write16(field, value);
-
- __vmcs_writel(field, value);
-}
-
-static __always_inline void vmcs_write32(unsigned long field, u32 value)
-{
- vmcs_check32(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_write32(field, value);
-
- __vmcs_writel(field, value);
-}
-
-static __always_inline void vmcs_write64(unsigned long field, u64 value)
-{
- vmcs_check64(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_write64(field, value);
-
- __vmcs_writel(field, value);
-#ifndef CONFIG_X86_64
- asm volatile ("");
- __vmcs_writel(field+1, value >> 32);
-#endif
-}
-
-static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
-{
- vmcs_checkl(field);
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_write64(field, value);
-
- __vmcs_writel(field, value);
-}
-
-static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
-{
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
- "vmcs_clear_bits does not support 64-bit fields");
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_write32(field, evmcs_read32(field) & ~mask);
-
- __vmcs_writel(field, __vmcs_readl(field) & ~mask);
-}
-
-static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
-{
- BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
- "vmcs_set_bits does not support 64-bit fields");
- if (static_branch_unlikely(&enable_evmcs))
- return evmcs_write32(field, evmcs_read32(field) | mask);
-
- __vmcs_writel(field, __vmcs_readl(field) | mask);
-}
-
-static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
-{
- vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
-}
-
-static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
-{
- vmcs_write32(VM_ENTRY_CONTROLS, val);
- vmx->vm_entry_controls_shadow = val;
-}
-
-static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
-{
- if (vmx->vm_entry_controls_shadow != val)
- vm_entry_controls_init(vmx, val);
-}
-
-static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
-{
- return vmx->vm_entry_controls_shadow;
-}
-
-
-static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
-}
-
-static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
-}
-
-static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
-{
- vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
-}
-
-static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
-{
- vmcs_write32(VM_EXIT_CONTROLS, val);
- vmx->vm_exit_controls_shadow = val;
-}
-
-static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
-{
- if (vmx->vm_exit_controls_shadow != val)
- vm_exit_controls_init(vmx, val);
-}
-
-static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
-{
- return vmx->vm_exit_controls_shadow;
-}
-
-
-static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
-}
-
-static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
-{
- vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
-}
-
-static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
-{
- vmx->segment_cache.bitmask = 0;
-}
-
-static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
- unsigned field)
-{
- bool ret;
- u32 mask = 1 << (seg * SEG_FIELD_NR + field);
-
- if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
- vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
- vmx->segment_cache.bitmask = 0;
- }
- ret = vmx->segment_cache.bitmask & mask;
- vmx->segment_cache.bitmask |= mask;
- return ret;
-}
-
-static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
-{
- u16 *p = &vmx->segment_cache.seg[seg].selector;
-
- if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
- *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
- return *p;
-}
-
-static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
-{
- ulong *p = &vmx->segment_cache.seg[seg].base;
-
- if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
- *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
- return *p;
-}
-
-static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
-{
- u32 *p = &vmx->segment_cache.seg[seg].limit;
-
- if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
- *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
- return *p;
-}
-
-static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
-{
- u32 *p = &vmx->segment_cache.seg[seg].ar;
-
- if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
- *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
- return *p;
-}
-
-static void update_exception_bitmap(struct kvm_vcpu *vcpu)
-{
- u32 eb;
-
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
- (1u << DB_VECTOR) | (1u << AC_VECTOR);
- /*
- * Guest access to VMware backdoor ports could legitimately
- * trigger #GP because of TSS I/O permission bitmap.
- * We intercept those #GP and allow access to them anyway
- * as VMware does.
- */
- if (enable_vmware_backdoor)
- eb |= (1u << GP_VECTOR);
- if ((vcpu->guest_debug &
- (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
- (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
- eb |= 1u << BP_VECTOR;
- if (to_vmx(vcpu)->rmode.vm86_active)
- eb = ~0;
- if (enable_ept)
- eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
-
- /* When we are running a nested L2 guest and L1 specified for it a
- * certain exception bitmap, we must trap the same exceptions and pass
- * them to L1. When running L2, we will only handle the exceptions
- * specified above if L1 did not want them.
- */
- if (is_guest_mode(vcpu))
- eb |= get_vmcs12(vcpu)->exception_bitmap;
-
- vmcs_write32(EXCEPTION_BITMAP, eb);
-}
-
-/*
- * Check if MSR is intercepted for currently loaded MSR bitmap.
- */
-static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
-{
- unsigned long *msr_bitmap;
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return true;
-
- msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
-
- if (msr <= 0x1fff) {
- return !!test_bit(msr, msr_bitmap + 0x800 / f);
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- return !!test_bit(msr, msr_bitmap + 0xc00 / f);
- }
-
- return true;
-}
-
-/*
- * Check if MSR is intercepted for L01 MSR bitmap.
- */
-static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
-{
- unsigned long *msr_bitmap;
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return true;
-
- msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
-
- if (msr <= 0x1fff) {
- return !!test_bit(msr, msr_bitmap + 0x800 / f);
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- return !!test_bit(msr, msr_bitmap + 0xc00 / f);
- }
-
- return true;
-}
-
-static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
- unsigned long entry, unsigned long exit)
-{
- vm_entry_controls_clearbit(vmx, entry);
- vm_exit_controls_clearbit(vmx, exit);
-}
-
-static int find_msr(struct vmx_msrs *m, unsigned int msr)
-{
- unsigned int i;
-
- for (i = 0; i < m->nr; ++i) {
- if (m->val[i].index == msr)
- return i;
- }
- return -ENOENT;
-}
-
-static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
-{
- int i;
- struct msr_autoload *m = &vmx->msr_autoload;
-
- switch (msr) {
- case MSR_EFER:
- if (cpu_has_load_ia32_efer) {
- clear_atomic_switch_msr_special(vmx,
- VM_ENTRY_LOAD_IA32_EFER,
- VM_EXIT_LOAD_IA32_EFER);
- return;
- }
- break;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- if (cpu_has_load_perf_global_ctrl) {
- clear_atomic_switch_msr_special(vmx,
- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
- VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
- return;
- }
- break;
- }
- i = find_msr(&m->guest, msr);
- if (i < 0)
- goto skip_guest;
- --m->guest.nr;
- m->guest.val[i] = m->guest.val[m->guest.nr];
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
-
-skip_guest:
- i = find_msr(&m->host, msr);
- if (i < 0)
- return;
-
- --m->host.nr;
- m->host.val[i] = m->host.val[m->host.nr];
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
-}
-
-static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
- unsigned long entry, unsigned long exit,
- unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
- u64 guest_val, u64 host_val)
-{
- vmcs_write64(guest_val_vmcs, guest_val);
- if (host_val_vmcs != HOST_IA32_EFER)
- vmcs_write64(host_val_vmcs, host_val);
- vm_entry_controls_setbit(vmx, entry);
- vm_exit_controls_setbit(vmx, exit);
-}
-
-static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
- u64 guest_val, u64 host_val, bool entry_only)
-{
- int i, j = 0;
- struct msr_autoload *m = &vmx->msr_autoload;
-
- switch (msr) {
- case MSR_EFER:
- if (cpu_has_load_ia32_efer) {
- add_atomic_switch_msr_special(vmx,
- VM_ENTRY_LOAD_IA32_EFER,
- VM_EXIT_LOAD_IA32_EFER,
- GUEST_IA32_EFER,
- HOST_IA32_EFER,
- guest_val, host_val);
- return;
- }
- break;
- case MSR_CORE_PERF_GLOBAL_CTRL:
- if (cpu_has_load_perf_global_ctrl) {
- add_atomic_switch_msr_special(vmx,
- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
- VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
- GUEST_IA32_PERF_GLOBAL_CTRL,
- HOST_IA32_PERF_GLOBAL_CTRL,
- guest_val, host_val);
- return;
- }
- break;
- case MSR_IA32_PEBS_ENABLE:
- /* PEBS needs a quiescent period after being disabled (to write
- * a record). Disabling PEBS through VMX MSR swapping doesn't
- * provide that period, so a CPU could write host's record into
- * guest's memory.
- */
- wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
- }
-
- i = find_msr(&m->guest, msr);
- if (!entry_only)
- j = find_msr(&m->host, msr);
-
- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
- printk_once(KERN_WARNING "Not enough msr switch entries. "
- "Can't add msr %x\n", msr);
- return;
- }
- if (i < 0) {
- i = m->guest.nr++;
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
- }
- m->guest.val[i].index = msr;
- m->guest.val[i].value = guest_val;
-
- if (entry_only)
- return;
-
- if (j < 0) {
- j = m->host.nr++;
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
- }
- m->host.val[j].index = msr;
- m->host.val[j].value = host_val;
-}
-
-static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
-{
- u64 guest_efer = vmx->vcpu.arch.efer;
- u64 ignore_bits = 0;
-
- if (!enable_ept) {
- /*
- * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
- * host CPUID is more efficient than testing guest CPUID
- * or CR4. Host SMEP is anyway a requirement for guest SMEP.
- */
- if (boot_cpu_has(X86_FEATURE_SMEP))
- guest_efer |= EFER_NX;
- else if (!(guest_efer & EFER_NX))
- ignore_bits |= EFER_NX;
- }
-
- /*
- * LMA and LME handled by hardware; SCE meaningless outside long mode.
- */
- ignore_bits |= EFER_SCE;
-#ifdef CONFIG_X86_64
- ignore_bits |= EFER_LMA | EFER_LME;
- /* SCE is meaningful only in long mode on Intel */
- if (guest_efer & EFER_LMA)
- ignore_bits &= ~(u64)EFER_SCE;
-#endif
-
- /*
- * On EPT, we can't emulate NX, so we must switch EFER atomically.
- * On CPUs that support "load IA32_EFER", always switch EFER
- * atomically, since it's faster than switching it manually.
- */
- if (cpu_has_load_ia32_efer ||
- (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
- if (!(guest_efer & EFER_LMA))
- guest_efer &= ~EFER_LME;
- if (guest_efer != host_efer)
- add_atomic_switch_msr(vmx, MSR_EFER,
- guest_efer, host_efer, false);
- else
- clear_atomic_switch_msr(vmx, MSR_EFER);
- return false;
- } else {
- clear_atomic_switch_msr(vmx, MSR_EFER);
-
- guest_efer &= ~ignore_bits;
- guest_efer |= host_efer & ignore_bits;
-
- vmx->guest_msrs[efer_offset].data = guest_efer;
- vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
-
- return true;
- }
-}
-
-#ifdef CONFIG_X86_32
-/*
- * On 32-bit kernels, VM exits still load the FS and GS bases from the
- * VMCS rather than the segment table. KVM uses this helper to figure
- * out the current bases to poke them into the VMCS before entry.
- */
-static unsigned long segment_base(u16 selector)
-{
- struct desc_struct *table;
- unsigned long v;
-
- if (!(selector & ~SEGMENT_RPL_MASK))
- return 0;
-
- table = get_current_gdt_ro();
-
- if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- u16 ldt_selector = kvm_read_ldt();
-
- if (!(ldt_selector & ~SEGMENT_RPL_MASK))
- return 0;
-
- table = (struct desc_struct *)segment_base(ldt_selector);
- }
- v = get_desc_base(&table[selector >> 3]);
- return v;
-}
-#endif
-
-static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs_host_state *host_state;
-#ifdef CONFIG_X86_64
- int cpu = raw_smp_processor_id();
-#endif
- unsigned long fs_base, gs_base;
- u16 fs_sel, gs_sel;
- int i;
-
- vmx->req_immediate_exit = false;
-
- if (vmx->loaded_cpu_state)
- return;
-
- vmx->loaded_cpu_state = vmx->loaded_vmcs;
- host_state = &vmx->loaded_cpu_state->host_state;
-
- /*
- * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
- * allow segment selectors with cpl > 0 or ti == 1.
- */
- host_state->ldt_sel = kvm_read_ldt();
-
-#ifdef CONFIG_X86_64
- savesegment(ds, host_state->ds_sel);
- savesegment(es, host_state->es_sel);
-
- gs_base = cpu_kernelmode_gs_base(cpu);
- if (likely(is_64bit_mm(current->mm))) {
- save_fsgs_for_kvm();
- fs_sel = current->thread.fsindex;
- gs_sel = current->thread.gsindex;
- fs_base = current->thread.fsbase;
- vmx->msr_host_kernel_gs_base = current->thread.gsbase;
- } else {
- savesegment(fs, fs_sel);
- savesegment(gs, gs_sel);
- fs_base = read_msr(MSR_FS_BASE);
- vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
- }
-
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
-#else
- savesegment(fs, fs_sel);
- savesegment(gs, gs_sel);
- fs_base = segment_base(fs_sel);
- gs_base = segment_base(gs_sel);
-#endif
-
- if (unlikely(fs_sel != host_state->fs_sel)) {
- if (!(fs_sel & 7))
- vmcs_write16(HOST_FS_SELECTOR, fs_sel);
- else
- vmcs_write16(HOST_FS_SELECTOR, 0);
- host_state->fs_sel = fs_sel;
- }
- if (unlikely(gs_sel != host_state->gs_sel)) {
- if (!(gs_sel & 7))
- vmcs_write16(HOST_GS_SELECTOR, gs_sel);
- else
- vmcs_write16(HOST_GS_SELECTOR, 0);
- host_state->gs_sel = gs_sel;
- }
- if (unlikely(fs_base != host_state->fs_base)) {
- vmcs_writel(HOST_FS_BASE, fs_base);
- host_state->fs_base = fs_base;
- }
- if (unlikely(gs_base != host_state->gs_base)) {
- vmcs_writel(HOST_GS_BASE, gs_base);
- host_state->gs_base = gs_base;
- }
-
- for (i = 0; i < vmx->save_nmsrs; ++i)
- kvm_set_shared_msr(vmx->guest_msrs[i].index,
- vmx->guest_msrs[i].data,
- vmx->guest_msrs[i].mask);
-}
-
-static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
-{
- struct vmcs_host_state *host_state;
-
- if (!vmx->loaded_cpu_state)
- return;
-
- WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
- host_state = &vmx->loaded_cpu_state->host_state;
-
- ++vmx->vcpu.stat.host_state_reload;
- vmx->loaded_cpu_state = NULL;
-
-#ifdef CONFIG_X86_64
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
-#endif
- if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
- kvm_load_ldt(host_state->ldt_sel);
-#ifdef CONFIG_X86_64
- load_gs_index(host_state->gs_sel);
-#else
- loadsegment(gs, host_state->gs_sel);
-#endif
- }
- if (host_state->fs_sel & 7)
- loadsegment(fs, host_state->fs_sel);
-#ifdef CONFIG_X86_64
- if (unlikely(host_state->ds_sel | host_state->es_sel)) {
- loadsegment(ds, host_state->ds_sel);
- loadsegment(es, host_state->es_sel);
- }
-#endif
- invalidate_tss_limit();
-#ifdef CONFIG_X86_64
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
-#endif
- load_fixmap_gdt(raw_smp_processor_id());
-}
-
-#ifdef CONFIG_X86_64
-static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
-{
- preempt_disable();
- if (vmx->loaded_cpu_state)
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
- preempt_enable();
- return vmx->msr_guest_kernel_gs_base;
-}
-
-static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
-{
- preempt_disable();
- if (vmx->loaded_cpu_state)
- wrmsrl(MSR_KERNEL_GS_BASE, data);
- preempt_enable();
- vmx->msr_guest_kernel_gs_base = data;
-}
-#endif
-
-static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
-{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct pi_desc old, new;
- unsigned int dest;
-
- /*
- * In case of hot-plug or hot-unplug, we may have to undo
- * vmx_vcpu_pi_put even if there is no assigned device. And we
- * always keep PI.NDST up to date for simplicity: it makes the
- * code easier, and CPU migration is not a fast path.
- */
- if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
- return;
-
- /*
- * First handle the simple case where no cmpxchg is necessary; just
- * allow posting non-urgent interrupts.
- *
- * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
- * PI.NDST: pi_post_block will do it for us and the wakeup_handler
- * expects the VCPU to be on the blocked_vcpu_list that matches
- * PI.NDST.
- */
- if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
- vcpu->cpu == cpu) {
- pi_clear_sn(pi_desc);
- return;
- }
-
- /* The full case. */
- do {
- old.control = new.control = pi_desc->control;
-
- dest = cpu_physical_id(cpu);
-
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
-
- new.sn = 0;
- } while (cmpxchg64(&pi_desc->control, old.control,
- new.control) != old.control);
-}
-
-static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
-{
- vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
- vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
-}
-
-/*
- * Switches to specified vcpu, until a matching vcpu_put(), but assumes
- * vcpu mutex is already taken.
- */
-static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
-
- if (!already_loaded) {
- loaded_vmcs_clear(vmx->loaded_vmcs);
- local_irq_disable();
- crash_disable_local_vmclear(cpu);
-
- /*
- * Read loaded_vmcs->cpu should be before fetching
- * loaded_vmcs->loaded_vmcss_on_cpu_link.
- * See the comments in __loaded_vmcs_clear().
- */
- smp_rmb();
-
- list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
- &per_cpu(loaded_vmcss_on_cpu, cpu));
- crash_enable_local_vmclear(cpu);
- local_irq_enable();
- }
-
- if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
- per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
- vmcs_load(vmx->loaded_vmcs->vmcs);
- indirect_branch_prediction_barrier();
- }
-
- if (!already_loaded) {
- void *gdt = get_current_gdt_ro();
- unsigned long sysenter_esp;
-
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-
- /*
- * Linux uses per-cpu TSS and GDT, so set these when switching
- * processors. See 22.2.4.
- */
- vmcs_writel(HOST_TR_BASE,
- (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
- vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
-
- /*
- * VM exits change the host TR limit to 0x67 after a VM
- * exit. This is okay, since 0x67 covers everything except
- * the IO bitmap and have have code to handle the IO bitmap
- * being lost after a VM exit.
- */
- BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
-
- rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
- vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
-
- vmx->loaded_vmcs->cpu = cpu;
- }
-
- /* Setup TSC multiplier */
- if (kvm_has_tsc_control &&
- vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
- decache_tsc_multiplier(vmx);
-
- vmx_vcpu_pi_load(vcpu, cpu);
- vmx->host_pkru = read_pkru();
- vmx->host_debugctlmsr = get_debugctlmsr();
-}
-
-static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
-{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
-
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
- return;
-
- /* Set SN when the vCPU is preempted */
- if (vcpu->preempted)
- pi_set_sn(pi_desc);
-}
-
-static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
-{
- vmx_vcpu_pi_put(vcpu);
-
- vmx_prepare_switch_to_host(to_vmx(vcpu));
-}
-
-static bool emulation_required(struct kvm_vcpu *vcpu)
-{
- return emulate_invalid_guest_state && !guest_state_valid(vcpu);
-}
-
-static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
-
-/*
- * Return the cr0 value that a nested guest would read. This is a combination
- * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
- * its hypervisor (cr0_read_shadow).
- */
-static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
-{
- return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
- (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
-}
-static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
-{
- return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
- (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
-}
-
-static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
-{
- unsigned long rflags, save_rflags;
-
- if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
- __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
- rflags = vmcs_readl(GUEST_RFLAGS);
- if (to_vmx(vcpu)->rmode.vm86_active) {
- rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
- save_rflags = to_vmx(vcpu)->rmode.save_rflags;
- rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
- }
- to_vmx(vcpu)->rflags = rflags;
- }
- return to_vmx(vcpu)->rflags;
-}
-
-static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
-{
- unsigned long old_rflags = vmx_get_rflags(vcpu);
-
- __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
- to_vmx(vcpu)->rflags = rflags;
- if (to_vmx(vcpu)->rmode.vm86_active) {
- to_vmx(vcpu)->rmode.save_rflags = rflags;
- rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
- }
- vmcs_writel(GUEST_RFLAGS, rflags);
-
- if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
- to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
-}
-
-static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
-{
- u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
- int ret = 0;
-
- if (interruptibility & GUEST_INTR_STATE_STI)
- ret |= KVM_X86_SHADOW_INT_STI;
- if (interruptibility & GUEST_INTR_STATE_MOV_SS)
- ret |= KVM_X86_SHADOW_INT_MOV_SS;
-
- return ret;
-}
-
-static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
-{
- u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
- u32 interruptibility = interruptibility_old;
-
- interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
-
- if (mask & KVM_X86_SHADOW_INT_MOV_SS)
- interruptibility |= GUEST_INTR_STATE_MOV_SS;
- else if (mask & KVM_X86_SHADOW_INT_STI)
- interruptibility |= GUEST_INTR_STATE_STI;
-
- if ((interruptibility != interruptibility_old))
- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
-}
-
-static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
-{
- unsigned long rip;
-
- rip = kvm_rip_read(vcpu);
- rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- kvm_rip_write(vcpu, rip);
-
- /* skipping an emulated instruction also counts */
- vmx_set_interrupt_shadow(vcpu, 0);
-}
-
-static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
- unsigned long exit_qual)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- unsigned int nr = vcpu->arch.exception.nr;
- u32 intr_info = nr | INTR_INFO_VALID_MASK;
-
- if (vcpu->arch.exception.has_error_code) {
- vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
- intr_info |= INTR_INFO_DELIVER_CODE_MASK;
- }
-
- if (kvm_exception_is_soft(nr))
- intr_info |= INTR_TYPE_SOFT_EXCEPTION;
- else
- intr_info |= INTR_TYPE_HARD_EXCEPTION;
-
- if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
- vmx_get_nmi_mask(vcpu))
- intr_info |= INTR_INFO_UNBLOCK_NMI;
-
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
-}
-
-/*
- * KVM wants to inject page-faults which it got to the guest. This function
- * checks whether in a nested guest, we need to inject them to L1 or L2.
- */
-static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- unsigned int nr = vcpu->arch.exception.nr;
- bool has_payload = vcpu->arch.exception.has_payload;
- unsigned long payload = vcpu->arch.exception.payload;
-
- if (nr == PF_VECTOR) {
- if (vcpu->arch.exception.nested_apf) {
- *exit_qual = vcpu->arch.apf.nested_apf_token;
- return 1;
- }
- if (nested_vmx_is_page_fault_vmexit(vmcs12,
- vcpu->arch.exception.error_code)) {
- *exit_qual = has_payload ? payload : vcpu->arch.cr2;
- return 1;
- }
- } else if (vmcs12->exception_bitmap & (1u << nr)) {
- if (nr == DB_VECTOR) {
- if (!has_payload) {
- payload = vcpu->arch.dr6;
- payload &= ~(DR6_FIXED_1 | DR6_BT);
- payload ^= DR6_RTM;
- }
- *exit_qual = payload;
- } else
- *exit_qual = 0;
- return 1;
- }
-
- return 0;
-}
-
-static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
-{
- /*
- * Ensure that we clear the HLT state in the VMCS. We don't need to
- * explicitly skip the instruction because if the HLT state is set,
- * then the instruction is already executing and RIP has already been
- * advanced.
- */
- if (kvm_hlt_in_guest(vcpu->kvm) &&
- vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
- vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
-}
-
-static void vmx_queue_exception(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned nr = vcpu->arch.exception.nr;
- bool has_error_code = vcpu->arch.exception.has_error_code;
- u32 error_code = vcpu->arch.exception.error_code;
- u32 intr_info = nr | INTR_INFO_VALID_MASK;
-
- kvm_deliver_exception_payload(vcpu);
-
- if (has_error_code) {
- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
- intr_info |= INTR_INFO_DELIVER_CODE_MASK;
- }
-
- if (vmx->rmode.vm86_active) {
- int inc_eip = 0;
- if (kvm_exception_is_soft(nr))
- inc_eip = vcpu->arch.event_exit_inst_len;
- if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- return;
- }
-
- WARN_ON_ONCE(vmx->emulation_required);
-
- if (kvm_exception_is_soft(nr)) {
- vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
- vmx->vcpu.arch.event_exit_inst_len);
- intr_info |= INTR_TYPE_SOFT_EXCEPTION;
- } else
- intr_info |= INTR_TYPE_HARD_EXCEPTION;
-
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
-
- vmx_clear_hlt(vcpu);
-}
-
-static bool vmx_rdtscp_supported(void)
-{
- return cpu_has_vmx_rdtscp();
-}
-
-static bool vmx_invpcid_supported(void)
-{
- return cpu_has_vmx_invpcid();
-}
-
-/*
- * Swap MSR entry in host/guest MSR entry array.
- */
-static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
-{
- struct shared_msr_entry tmp;
-
- tmp = vmx->guest_msrs[to];
- vmx->guest_msrs[to] = vmx->guest_msrs[from];
- vmx->guest_msrs[from] = tmp;
-}
-
-/*
- * Set up the vmcs to automatically save and restore system
- * msrs. Don't touch the 64-bit msrs if the guest is in legacy
- * mode, as fiddling with msrs is very expensive.
- */
-static void setup_msrs(struct vcpu_vmx *vmx)
-{
- int save_nmsrs, index;
-
- save_nmsrs = 0;
-#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu)) {
- index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
- if (index >= 0)
- move_msr_up(vmx, index, save_nmsrs++);
- index = __find_msr_index(vmx, MSR_LSTAR);
- if (index >= 0)
- move_msr_up(vmx, index, save_nmsrs++);
- index = __find_msr_index(vmx, MSR_CSTAR);
- if (index >= 0)
- move_msr_up(vmx, index, save_nmsrs++);
- index = __find_msr_index(vmx, MSR_TSC_AUX);
- if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
- move_msr_up(vmx, index, save_nmsrs++);
- /*
- * MSR_STAR is only needed on long mode guests, and only
- * if efer.sce is enabled.
- */
- index = __find_msr_index(vmx, MSR_STAR);
- if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
- move_msr_up(vmx, index, save_nmsrs++);
- }
-#endif
- index = __find_msr_index(vmx, MSR_EFER);
- if (index >= 0 && update_transition_efer(vmx, index))
- move_msr_up(vmx, index, save_nmsrs++);
-
- vmx->save_nmsrs = save_nmsrs;
-
- if (cpu_has_vmx_msr_bitmap())
- vmx_update_msr_bitmap(&vmx->vcpu);
-}
-
-static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
- return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
-
- return vcpu->arch.tsc_offset;
-}
-
-/*
- * writes 'offset' into guest's timestamp counter offset register
- */
-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
-{
- if (is_guest_mode(vcpu)) {
- /*
- * We're here if L1 chose not to trap WRMSR to TSC. According
- * to the spec, this should set L1's TSC; The offset that L1
- * set for L2 remains unchanged, and still needs to be added
- * to the newly set TSC to get L2's TSC.
- */
- struct vmcs12 *vmcs12;
- /* recalculate vmcs02.TSC_OFFSET: */
- vmcs12 = get_vmcs12(vcpu);
- vmcs_write64(TSC_OFFSET, offset +
- (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
- vmcs12->tsc_offset : 0));
- } else {
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- vmcs_read64(TSC_OFFSET), offset);
- vmcs_write64(TSC_OFFSET, offset);
- }
-}
-
-/*
- * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
- * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
- * all guests if the "nested" module option is off, and can also be disabled
- * for a single guest by disabling its VMX cpuid bit.
- */
-static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
-{
- return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
-}
-
-/*
- * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
- * returned for the various VMX controls MSRs when nested VMX is enabled.
- * The same values should also be used to verify that vmcs12 control fields are
- * valid during nested entry from L1 to L2.
- * Each of these control msrs has a low and high 32-bit half: A low bit is on
- * if the corresponding bit in the (32-bit) control field *must* be on, and a
- * bit in the high half is on if the corresponding bit in the control field
- * may be on. See also vmx_control_verify().
- */
-static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
-{
- if (!nested) {
- memset(msrs, 0, sizeof(*msrs));
- return;
- }
-
- /*
- * Note that as a general rule, the high half of the MSRs (bits in
- * the control fields which may be 1) should be initialized by the
- * intersection of the underlying hardware's MSR (i.e., features which
- * can be supported) and the list of features we want to expose -
- * because they are known to be properly supported in our code.
- * Also, usually, the low half of the MSRs (bits which must be 1) can
- * be set to 0, meaning that L1 may turn off any of these bits. The
- * reason is that if one of these bits is necessary, it will appear
- * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
- * fields of vmcs01 and vmcs02, will turn these bits off - and
- * nested_vmx_exit_reflected() will not pass related exits to L1.
- * These rules have exceptions below.
- */
-
- /* pin-based controls */
- rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
- msrs->pinbased_ctls_low,
- msrs->pinbased_ctls_high);
- msrs->pinbased_ctls_low |=
- PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
- msrs->pinbased_ctls_high &=
- PIN_BASED_EXT_INTR_MASK |
- PIN_BASED_NMI_EXITING |
- PIN_BASED_VIRTUAL_NMIS |
- (apicv ? PIN_BASED_POSTED_INTR : 0);
- msrs->pinbased_ctls_high |=
- PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
- PIN_BASED_VMX_PREEMPTION_TIMER;
-
- /* exit controls */
- rdmsr(MSR_IA32_VMX_EXIT_CTLS,
- msrs->exit_ctls_low,
- msrs->exit_ctls_high);
- msrs->exit_ctls_low =
- VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
-
- msrs->exit_ctls_high &=
-#ifdef CONFIG_X86_64
- VM_EXIT_HOST_ADDR_SPACE_SIZE |
-#endif
- VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
- msrs->exit_ctls_high |=
- VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
- VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
- VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
-
- /* We support free control of debug control saving. */
- msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
-
- /* entry controls */
- rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
- msrs->entry_ctls_low,
- msrs->entry_ctls_high);
- msrs->entry_ctls_low =
- VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
- msrs->entry_ctls_high &=
-#ifdef CONFIG_X86_64
- VM_ENTRY_IA32E_MODE |
-#endif
- VM_ENTRY_LOAD_IA32_PAT;
- msrs->entry_ctls_high |=
- (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
-
- /* We support free control of debug control loading. */
- msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
-
- /* cpu-based controls */
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
- msrs->procbased_ctls_low,
- msrs->procbased_ctls_high);
- msrs->procbased_ctls_low =
- CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
- msrs->procbased_ctls_high &=
- CPU_BASED_VIRTUAL_INTR_PENDING |
- CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
- CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
- CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
-#ifdef CONFIG_X86_64
- CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
-#endif
- CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
- CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
- CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
- CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
- CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
- /*
- * We can allow some features even when not supported by the
- * hardware. For example, L1 can specify an MSR bitmap - and we
- * can use it to avoid exits to L1 - even when L0 runs L2
- * without MSR bitmaps.
- */
- msrs->procbased_ctls_high |=
- CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
- CPU_BASED_USE_MSR_BITMAPS;
-
- /* We support free control of CR3 access interception. */
- msrs->procbased_ctls_low &=
- ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
-
- /*
- * secondary cpu-based controls. Do not include those that
- * depend on CPUID bits, they are added later by vmx_cpuid_update.
- */
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
- msrs->secondary_ctls_low,
- msrs->secondary_ctls_high);
- msrs->secondary_ctls_low = 0;
- msrs->secondary_ctls_high &=
- SECONDARY_EXEC_DESC |
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_WBINVD_EXITING;
-
- /*
- * We can emulate "VMCS shadowing," even if the hardware
- * doesn't support it.
- */
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_SHADOW_VMCS;
-
- if (enable_ept) {
- /* nested EPT: emulate EPT also to L1 */
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_EPT;
- msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
- VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
- if (cpu_has_vmx_ept_execute_only())
- msrs->ept_caps |=
- VMX_EPT_EXECUTE_ONLY_BIT;
- msrs->ept_caps &= vmx_capability.ept;
- msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
- VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
- VMX_EPT_1GB_PAGE_BIT;
- if (enable_ept_ad_bits) {
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_PML;
- msrs->ept_caps |= VMX_EPT_AD_BIT;
- }
- }
-
- if (cpu_has_vmx_vmfunc()) {
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_VMFUNC;
- /*
- * Advertise EPTP switching unconditionally
- * since we emulate it
- */
- if (enable_ept)
- msrs->vmfunc_controls =
- VMX_VMFUNC_EPTP_SWITCHING;
- }
-
- /*
- * Old versions of KVM use the single-context version without
- * checking for support, so declare that it is supported even
- * though it is treated as global context. The alternative is
- * not failing the single-context invvpid, and it is worse.
- */
- if (enable_vpid) {
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_VPID;
- msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
- VMX_VPID_EXTENT_SUPPORTED_MASK;
- }
-
- if (enable_unrestricted_guest)
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_UNRESTRICTED_GUEST;
-
- if (flexpriority_enabled)
- msrs->secondary_ctls_high |=
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-
- /* miscellaneous data */
- rdmsr(MSR_IA32_VMX_MISC,
- msrs->misc_low,
- msrs->misc_high);
- msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
- msrs->misc_low |=
- MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
- VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
- VMX_MISC_ACTIVITY_HLT;
- msrs->misc_high = 0;
-
- /*
- * This MSR reports some information about VMX support. We
- * should return information about the VMX we emulate for the
- * guest, and the VMCS structure we give it - not about the
- * VMX support of the underlying hardware.
- */
- msrs->basic =
- VMCS12_REVISION |
- VMX_BASIC_TRUE_CTLS |
- ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
- (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
-
- if (cpu_has_vmx_basic_inout())
- msrs->basic |= VMX_BASIC_INOUT;
-
- /*
- * These MSRs specify bits which the guest must keep fixed on
- * while L1 is in VMXON mode (in L1's root mode, or running an L2).
- * We picked the standard core2 setting.
- */
-#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
-#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
- msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
- msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
-
- /* These MSRs specify bits which the guest must keep fixed off. */
- rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
- rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
-
- /* highest index: VMX_PREEMPTION_TIMER_VALUE */
- msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
-}
-
-/*
- * if fixed0[i] == 1: val[i] must be 1
- * if fixed1[i] == 0: val[i] must be 0
- */
-static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
-{
- return ((val & fixed1) | fixed0) == val;
-}
-
-static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
-{
- return fixed_bits_valid(control, low, high);
-}
-
-static inline u64 vmx_control_msr(u32 low, u32 high)
-{
- return low | ((u64)high << 32);
-}
-
-static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
-{
- superset &= mask;
- subset &= mask;
-
- return (superset | subset) == superset;
-}
-
-static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
-{
- const u64 feature_and_reserved =
- /* feature (except bit 48; see below) */
- BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
- /* reserved */
- BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
- u64 vmx_basic = vmx->nested.msrs.basic;
-
- if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
- return -EINVAL;
-
- /*
- * KVM does not emulate a version of VMX that constrains physical
- * addresses of VMX structures (e.g. VMCS) to 32-bits.
- */
- if (data & BIT_ULL(48))
- return -EINVAL;
-
- if (vmx_basic_vmcs_revision_id(vmx_basic) !=
- vmx_basic_vmcs_revision_id(data))
- return -EINVAL;
-
- if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
- return -EINVAL;
-
- vmx->nested.msrs.basic = data;
- return 0;
-}
-
-static int
-vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
-{
- u64 supported;
- u32 *lowp, *highp;
-
- switch (msr_index) {
- case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
- lowp = &vmx->nested.msrs.pinbased_ctls_low;
- highp = &vmx->nested.msrs.pinbased_ctls_high;
- break;
- case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- lowp = &vmx->nested.msrs.procbased_ctls_low;
- highp = &vmx->nested.msrs.procbased_ctls_high;
- break;
- case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- lowp = &vmx->nested.msrs.exit_ctls_low;
- highp = &vmx->nested.msrs.exit_ctls_high;
- break;
- case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- lowp = &vmx->nested.msrs.entry_ctls_low;
- highp = &vmx->nested.msrs.entry_ctls_high;
- break;
- case MSR_IA32_VMX_PROCBASED_CTLS2:
- lowp = &vmx->nested.msrs.secondary_ctls_low;
- highp = &vmx->nested.msrs.secondary_ctls_high;
- break;
- default:
- BUG();
- }
-
- supported = vmx_control_msr(*lowp, *highp);
-
- /* Check must-be-1 bits are still 1. */
- if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
- return -EINVAL;
-
- /* Check must-be-0 bits are still 0. */
- if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
- return -EINVAL;
-
- *lowp = data;
- *highp = data >> 32;
- return 0;
-}
-
-static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
-{
- const u64 feature_and_reserved_bits =
- /* feature */
- BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
- BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
- /* reserved */
- GENMASK_ULL(13, 9) | BIT_ULL(31);
- u64 vmx_misc;
-
- vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
- vmx->nested.msrs.misc_high);
-
- if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
- return -EINVAL;
-
- if ((vmx->nested.msrs.pinbased_ctls_high &
- PIN_BASED_VMX_PREEMPTION_TIMER) &&
- vmx_misc_preemption_timer_rate(data) !=
- vmx_misc_preemption_timer_rate(vmx_misc))
- return -EINVAL;
-
- if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
- return -EINVAL;
-
- if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
- return -EINVAL;
-
- if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
- return -EINVAL;
-
- vmx->nested.msrs.misc_low = data;
- vmx->nested.msrs.misc_high = data >> 32;
-
- /*
- * If L1 has read-only VM-exit information fields, use the
- * less permissive vmx_vmwrite_bitmap to specify write
- * permissions for the shadow VMCS.
- */
- if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
- vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
-
- return 0;
-}
-
-static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
-{
- u64 vmx_ept_vpid_cap;
-
- vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
- vmx->nested.msrs.vpid_caps);
-
- /* Every bit is either reserved or a feature bit. */
- if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
- return -EINVAL;
-
- vmx->nested.msrs.ept_caps = data;
- vmx->nested.msrs.vpid_caps = data >> 32;
- return 0;
-}
-
-static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
-{
- u64 *msr;
-
- switch (msr_index) {
- case MSR_IA32_VMX_CR0_FIXED0:
- msr = &vmx->nested.msrs.cr0_fixed0;
- break;
- case MSR_IA32_VMX_CR4_FIXED0:
- msr = &vmx->nested.msrs.cr4_fixed0;
- break;
- default:
- BUG();
- }
-
- /*
- * 1 bits (which indicates bits which "must-be-1" during VMX operation)
- * must be 1 in the restored value.
- */
- if (!is_bitwise_subset(data, *msr, -1ULL))
- return -EINVAL;
-
- *msr = data;
- return 0;
-}
-
-/*
- * Called when userspace is restoring VMX MSRs.
- *
- * Returns 0 on success, non-0 otherwise.
- */
-static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /*
- * Don't allow changes to the VMX capability MSRs while the vCPU
- * is in VMX operation.
- */
- if (vmx->nested.vmxon)
- return -EBUSY;
-
- switch (msr_index) {
- case MSR_IA32_VMX_BASIC:
- return vmx_restore_vmx_basic(vmx, data);
- case MSR_IA32_VMX_PINBASED_CTLS:
- case MSR_IA32_VMX_PROCBASED_CTLS:
- case MSR_IA32_VMX_EXIT_CTLS:
- case MSR_IA32_VMX_ENTRY_CTLS:
- /*
- * The "non-true" VMX capability MSRs are generated from the
- * "true" MSRs, so we do not support restoring them directly.
- *
- * If userspace wants to emulate VMX_BASIC[55]=0, userspace
- * should restore the "true" MSRs with the must-be-1 bits
- * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
- * DEFAULT SETTINGS".
- */
- return -EINVAL;
- case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
- case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- case MSR_IA32_VMX_PROCBASED_CTLS2:
- return vmx_restore_control_msr(vmx, msr_index, data);
- case MSR_IA32_VMX_MISC:
- return vmx_restore_vmx_misc(vmx, data);
- case MSR_IA32_VMX_CR0_FIXED0:
- case MSR_IA32_VMX_CR4_FIXED0:
- return vmx_restore_fixed0_msr(vmx, msr_index, data);
- case MSR_IA32_VMX_CR0_FIXED1:
- case MSR_IA32_VMX_CR4_FIXED1:
- /*
- * These MSRs are generated based on the vCPU's CPUID, so we
- * do not support restoring them directly.
- */
- return -EINVAL;
- case MSR_IA32_VMX_EPT_VPID_CAP:
- return vmx_restore_vmx_ept_vpid_cap(vmx, data);
- case MSR_IA32_VMX_VMCS_ENUM:
- vmx->nested.msrs.vmcs_enum = data;
- return 0;
- default:
- /*
- * The rest of the VMX capability MSRs do not support restore.
- */
- return -EINVAL;
- }
-}
-
-/* Returns 0 on success, non-0 otherwise. */
-static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
-{
- switch (msr_index) {
- case MSR_IA32_VMX_BASIC:
- *pdata = msrs->basic;
- break;
- case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
- case MSR_IA32_VMX_PINBASED_CTLS:
- *pdata = vmx_control_msr(
- msrs->pinbased_ctls_low,
- msrs->pinbased_ctls_high);
- if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
- *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
- break;
- case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
- case MSR_IA32_VMX_PROCBASED_CTLS:
- *pdata = vmx_control_msr(
- msrs->procbased_ctls_low,
- msrs->procbased_ctls_high);
- if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
- *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
- break;
- case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- case MSR_IA32_VMX_EXIT_CTLS:
- *pdata = vmx_control_msr(
- msrs->exit_ctls_low,
- msrs->exit_ctls_high);
- if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
- *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
- break;
- case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- case MSR_IA32_VMX_ENTRY_CTLS:
- *pdata = vmx_control_msr(
- msrs->entry_ctls_low,
- msrs->entry_ctls_high);
- if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
- *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
- break;
- case MSR_IA32_VMX_MISC:
- *pdata = vmx_control_msr(
- msrs->misc_low,
- msrs->misc_high);
- break;
- case MSR_IA32_VMX_CR0_FIXED0:
- *pdata = msrs->cr0_fixed0;
- break;
- case MSR_IA32_VMX_CR0_FIXED1:
- *pdata = msrs->cr0_fixed1;
- break;
- case MSR_IA32_VMX_CR4_FIXED0:
- *pdata = msrs->cr4_fixed0;
- break;
- case MSR_IA32_VMX_CR4_FIXED1:
- *pdata = msrs->cr4_fixed1;
- break;
- case MSR_IA32_VMX_VMCS_ENUM:
- *pdata = msrs->vmcs_enum;
- break;
- case MSR_IA32_VMX_PROCBASED_CTLS2:
- *pdata = vmx_control_msr(
- msrs->secondary_ctls_low,
- msrs->secondary_ctls_high);
- break;
- case MSR_IA32_VMX_EPT_VPID_CAP:
- *pdata = msrs->ept_caps |
- ((u64)msrs->vpid_caps << 32);
- break;
- case MSR_IA32_VMX_VMFUNC:
- *pdata = msrs->vmfunc_controls;
- break;
- default:
- return 1;
- }
-
- return 0;
-}
-
-static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
- uint64_t val)
-{
- uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
-
- return !(val & ~valid_bits);
-}
-
-static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
-{
- switch (msr->index) {
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
- if (!nested)
- return 1;
- return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
- default:
- return 1;
- }
-
- return 0;
-}
-
-/*
- * Reads an msr value (of 'msr_index') into 'pdata'.
- * Returns 0 on success, non-0 otherwise.
- * Assumes vcpu_load() was already called.
- */
-static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct shared_msr_entry *msr;
-
- switch (msr_info->index) {
-#ifdef CONFIG_X86_64
- case MSR_FS_BASE:
- msr_info->data = vmcs_readl(GUEST_FS_BASE);
- break;
- case MSR_GS_BASE:
- msr_info->data = vmcs_readl(GUEST_GS_BASE);
- break;
- case MSR_KERNEL_GS_BASE:
- msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
- break;
-#endif
- case MSR_EFER:
- return kvm_get_msr_common(vcpu, msr_info);
- case MSR_IA32_SPEC_CTRL:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
- return 1;
-
- msr_info->data = to_vmx(vcpu)->spec_ctrl;
- break;
- case MSR_IA32_ARCH_CAPABILITIES:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
- return 1;
- msr_info->data = to_vmx(vcpu)->arch_capabilities;
- break;
- case MSR_IA32_SYSENTER_CS:
- msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
- break;
- case MSR_IA32_SYSENTER_EIP:
- msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
- break;
- case MSR_IA32_SYSENTER_ESP:
- msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
- break;
- case MSR_IA32_BNDCFGS:
- if (!kvm_mpx_supported() ||
- (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
- return 1;
- msr_info->data = vmcs_read64(GUEST_BNDCFGS);
- break;
- case MSR_IA32_MCG_EXT_CTL:
- if (!msr_info->host_initiated &&
- !(vmx->msr_ia32_feature_control &
- FEATURE_CONTROL_LMCE))
- return 1;
- msr_info->data = vcpu->arch.mcg_ext_ctl;
- break;
- case MSR_IA32_FEATURE_CONTROL:
- msr_info->data = vmx->msr_ia32_feature_control;
- break;
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
- if (!nested_vmx_allowed(vcpu))
- return 1;
- return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
- &msr_info->data);
- case MSR_IA32_XSS:
- if (!vmx_xsaves_supported())
- return 1;
- msr_info->data = vcpu->arch.ia32_xss;
- break;
- case MSR_TSC_AUX:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
- /* Otherwise falls through */
- default:
- msr = find_msr_entry(vmx, msr_info->index);
- if (msr) {
- msr_info->data = msr->data;
- break;
- }
- return kvm_get_msr_common(vcpu, msr_info);
- }
-
- return 0;
-}
-
-static void vmx_leave_nested(struct kvm_vcpu *vcpu);
-
-/*
- * Writes msr value into into the appropriate "register".
- * Returns 0 on success, non-0 otherwise.
- * Assumes vcpu_load() was already called.
- */
-static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct shared_msr_entry *msr;
- int ret = 0;
- u32 msr_index = msr_info->index;
- u64 data = msr_info->data;
-
- switch (msr_index) {
- case MSR_EFER:
- ret = kvm_set_msr_common(vcpu, msr_info);
- break;
-#ifdef CONFIG_X86_64
- case MSR_FS_BASE:
- vmx_segment_cache_clear(vmx);
- vmcs_writel(GUEST_FS_BASE, data);
- break;
- case MSR_GS_BASE:
- vmx_segment_cache_clear(vmx);
- vmcs_writel(GUEST_GS_BASE, data);
- break;
- case MSR_KERNEL_GS_BASE:
- vmx_write_guest_kernel_gs_base(vmx, data);
- break;
-#endif
- case MSR_IA32_SYSENTER_CS:
- vmcs_write32(GUEST_SYSENTER_CS, data);
- break;
- case MSR_IA32_SYSENTER_EIP:
- vmcs_writel(GUEST_SYSENTER_EIP, data);
- break;
- case MSR_IA32_SYSENTER_ESP:
- vmcs_writel(GUEST_SYSENTER_ESP, data);
- break;
- case MSR_IA32_BNDCFGS:
- if (!kvm_mpx_supported() ||
- (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
- return 1;
- if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
- (data & MSR_IA32_BNDCFGS_RSVD))
- return 1;
- vmcs_write64(GUEST_BNDCFGS, data);
- break;
- case MSR_IA32_SPEC_CTRL:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
- return 1;
-
- /* The STIBP bit doesn't fault even if it's not advertised */
- if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
- return 1;
-
- vmx->spec_ctrl = data;
-
- if (!data)
- break;
-
- /*
- * For non-nested:
- * When it's written (to non-zero) for the first time, pass
- * it through.
- *
- * For nested:
- * The handling of the MSR bitmap for L2 guests is done in
- * nested_vmx_merge_msr_bitmap. We should not touch the
- * vmcs02.msr_bitmap here since it gets completely overwritten
- * in the merging. We update the vmcs01 here for L1 as well
- * since it will end up touching the MSR anyway now.
- */
- vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
- MSR_IA32_SPEC_CTRL,
- MSR_TYPE_RW);
- break;
- case MSR_IA32_PRED_CMD:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
- return 1;
-
- if (data & ~PRED_CMD_IBPB)
- return 1;
-
- if (!data)
- break;
-
- wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
-
- /*
- * For non-nested:
- * When it's written (to non-zero) for the first time, pass
- * it through.
- *
- * For nested:
- * The handling of the MSR bitmap for L2 guests is done in
- * nested_vmx_merge_msr_bitmap. We should not touch the
- * vmcs02.msr_bitmap here since it gets completely overwritten
- * in the merging.
- */
- vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
- MSR_TYPE_W);
- break;
- case MSR_IA32_ARCH_CAPABILITIES:
- if (!msr_info->host_initiated)
- return 1;
- vmx->arch_capabilities = data;
- break;
- case MSR_IA32_CR_PAT:
- if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
- return 1;
- vmcs_write64(GUEST_IA32_PAT, data);
- vcpu->arch.pat = data;
- break;
- }
- ret = kvm_set_msr_common(vcpu, msr_info);
- break;
- case MSR_IA32_TSC_ADJUST:
- ret = kvm_set_msr_common(vcpu, msr_info);
- break;
- case MSR_IA32_MCG_EXT_CTL:
- if ((!msr_info->host_initiated &&
- !(to_vmx(vcpu)->msr_ia32_feature_control &
- FEATURE_CONTROL_LMCE)) ||
- (data & ~MCG_EXT_CTL_LMCE_EN))
- return 1;
- vcpu->arch.mcg_ext_ctl = data;
- break;
- case MSR_IA32_FEATURE_CONTROL:
- if (!vmx_feature_control_msr_valid(vcpu, data) ||
- (to_vmx(vcpu)->msr_ia32_feature_control &
- FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
- return 1;
- vmx->msr_ia32_feature_control = data;
- if (msr_info->host_initiated && data == 0)
- vmx_leave_nested(vcpu);
- break;
- case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
- if (!msr_info->host_initiated)
- return 1; /* they are read-only */
- if (!nested_vmx_allowed(vcpu))
- return 1;
- return vmx_set_vmx_msr(vcpu, msr_index, data);
- case MSR_IA32_XSS:
- if (!vmx_xsaves_supported())
- return 1;
- /*
- * The only supported bit as of Skylake is bit 8, but
- * it is not supported on KVM.
- */
- if (data != 0)
- return 1;
- vcpu->arch.ia32_xss = data;
- if (vcpu->arch.ia32_xss != host_xss)
- add_atomic_switch_msr(vmx, MSR_IA32_XSS,
- vcpu->arch.ia32_xss, host_xss, false);
- else
- clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
- break;
- case MSR_TSC_AUX:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
- /* Check reserved bit, higher 32 bits should be zero */
- if ((data >> 32) != 0)
- return 1;
- /* Otherwise falls through */
- default:
- msr = find_msr_entry(vmx, msr_index);
- if (msr) {
- u64 old_msr_data = msr->data;
- msr->data = data;
- if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
- preempt_disable();
- ret = kvm_set_shared_msr(msr->index, msr->data,
- msr->mask);
- preempt_enable();
- if (ret)
- msr->data = old_msr_data;
- }
- break;
- }
- ret = kvm_set_msr_common(vcpu, msr_info);
- }
-
- return ret;
-}
-
-static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
-{
- __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
- switch (reg) {
- case VCPU_REGS_RSP:
- vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
- break;
- case VCPU_REGS_RIP:
- vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
- break;
- case VCPU_EXREG_PDPTR:
- if (enable_ept)
- ept_save_pdptrs(vcpu);
- break;
- default:
- break;
- }
-}
-
-static __init int cpu_has_kvm_support(void)
-{
- return cpu_has_vmx();
-}
-
-static __init int vmx_disabled_by_bios(void)
-{
- u64 msr;
-
- rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
- if (msr & FEATURE_CONTROL_LOCKED) {
- /* launched w/ TXT and VMX disabled */
- if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
- && tboot_enabled())
- return 1;
- /* launched w/o TXT and VMX only enabled w/ TXT */
- if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
- && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
- && !tboot_enabled()) {
- printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
- "activate TXT before enabling KVM\n");
- return 1;
- }
- /* launched w/o TXT and VMX disabled */
- if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
- && !tboot_enabled())
- return 1;
- }
-
- return 0;
-}
-
-static void kvm_cpu_vmxon(u64 addr)
-{
- cr4_set_bits(X86_CR4_VMXE);
- intel_pt_handle_vmx(1);
-
- asm volatile ("vmxon %0" : : "m"(addr));
-}
-
-static int hardware_enable(void)
-{
- int cpu = raw_smp_processor_id();
- u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
- u64 old, test_bits;
-
- if (cr4_read_shadow() & X86_CR4_VMXE)
- return -EBUSY;
-
- /*
- * This can happen if we hot-added a CPU but failed to allocate
- * VP assist page for it.
- */
- if (static_branch_unlikely(&enable_evmcs) &&
- !hv_get_vp_assist_page(cpu))
- return -EFAULT;
-
- INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
- INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
-
- /*
- * Now we can enable the vmclear operation in kdump
- * since the loaded_vmcss_on_cpu list on this cpu
- * has been initialized.
- *
- * Though the cpu is not in VMX operation now, there
- * is no problem to enable the vmclear operation
- * for the loaded_vmcss_on_cpu list is empty!
- */
- crash_enable_local_vmclear(cpu);
-
- rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
-
- test_bits = FEATURE_CONTROL_LOCKED;
- test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
- if (tboot_enabled())
- test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
-
- if ((old & test_bits) != test_bits) {
- /* enable and lock */
- wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
- }
- kvm_cpu_vmxon(phys_addr);
- if (enable_ept)
- ept_sync_global();
-
- return 0;
-}
-
-static void vmclear_local_loaded_vmcss(void)
-{
- int cpu = raw_smp_processor_id();
- struct loaded_vmcs *v, *n;
-
- list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
- loaded_vmcss_on_cpu_link)
- __loaded_vmcs_clear(v);
-}
-
-
-/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
- * tricks.
- */
-static void kvm_cpu_vmxoff(void)
-{
- asm volatile (__ex("vmxoff"));
-
- intel_pt_handle_vmx(0);
- cr4_clear_bits(X86_CR4_VMXE);
-}
-
-static void hardware_disable(void)
-{
- vmclear_local_loaded_vmcss();
- kvm_cpu_vmxoff();
-}
-
-static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
- u32 msr, u32 *result)
-{
- u32 vmx_msr_low, vmx_msr_high;
- u32 ctl = ctl_min | ctl_opt;
-
- rdmsr(msr, vmx_msr_low, vmx_msr_high);
-
- ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
- ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
-
- /* Ensure minimum (required) set of control bits are supported. */
- if (ctl_min & ~ctl)
- return -EIO;
-
- *result = ctl;
- return 0;
-}
-
-static __init bool allow_1_setting(u32 msr, u32 ctl)
-{
- u32 vmx_msr_low, vmx_msr_high;
-
- rdmsr(msr, vmx_msr_low, vmx_msr_high);
- return vmx_msr_high & ctl;
-}
-
-static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
-{
- u32 vmx_msr_low, vmx_msr_high;
- u32 min, opt, min2, opt2;
- u32 _pin_based_exec_control = 0;
- u32 _cpu_based_exec_control = 0;
- u32 _cpu_based_2nd_exec_control = 0;
- u32 _vmexit_control = 0;
- u32 _vmentry_control = 0;
-
- memset(vmcs_conf, 0, sizeof(*vmcs_conf));
- min = CPU_BASED_HLT_EXITING |
-#ifdef CONFIG_X86_64
- CPU_BASED_CR8_LOAD_EXITING |
- CPU_BASED_CR8_STORE_EXITING |
-#endif
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_UNCOND_IO_EXITING |
- CPU_BASED_MOV_DR_EXITING |
- CPU_BASED_USE_TSC_OFFSETING |
- CPU_BASED_MWAIT_EXITING |
- CPU_BASED_MONITOR_EXITING |
- CPU_BASED_INVLPG_EXITING |
- CPU_BASED_RDPMC_EXITING;
-
- opt = CPU_BASED_TPR_SHADOW |
- CPU_BASED_USE_MSR_BITMAPS |
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
- &_cpu_based_exec_control) < 0)
- return -EIO;
-#ifdef CONFIG_X86_64
- if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
- _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
- ~CPU_BASED_CR8_STORE_EXITING;
-#endif
- if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
- min2 = 0;
- opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_WBINVD_EXITING |
- SECONDARY_EXEC_ENABLE_VPID |
- SECONDARY_EXEC_ENABLE_EPT |
- SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_PAUSE_LOOP_EXITING |
- SECONDARY_EXEC_DESC |
- SECONDARY_EXEC_RDTSCP |
- SECONDARY_EXEC_ENABLE_INVPCID |
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_SHADOW_VMCS |
- SECONDARY_EXEC_XSAVES |
- SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_RDRAND_EXITING |
- SECONDARY_EXEC_ENABLE_PML |
- SECONDARY_EXEC_TSC_SCALING |
- SECONDARY_EXEC_ENABLE_VMFUNC |
- SECONDARY_EXEC_ENCLS_EXITING;
- if (adjust_vmx_controls(min2, opt2,
- MSR_IA32_VMX_PROCBASED_CTLS2,
- &_cpu_based_2nd_exec_control) < 0)
- return -EIO;
- }
-#ifndef CONFIG_X86_64
- if (!(_cpu_based_2nd_exec_control &
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
- _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
-#endif
-
- if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
- _cpu_based_2nd_exec_control &= ~(
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
-
- rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
- &vmx_capability.ept, &vmx_capability.vpid);
-
- if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
- /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
- enabled */
- _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_INVLPG_EXITING);
- } else if (vmx_capability.ept) {
- vmx_capability.ept = 0;
- pr_warn_once("EPT CAP should not exist if not support "
- "1-setting enable EPT VM-execution control\n");
- }
- if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
- vmx_capability.vpid) {
- vmx_capability.vpid = 0;
- pr_warn_once("VPID CAP should not exist if not support "
- "1-setting enable VPID VM-execution control\n");
- }
-
- min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
-#ifdef CONFIG_X86_64
- min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
-#endif
- opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
- VM_EXIT_CLEAR_BNDCFGS;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
- &_vmexit_control) < 0)
- return -EIO;
-
- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
- opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
- PIN_BASED_VMX_PREEMPTION_TIMER;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
- &_pin_based_exec_control) < 0)
- return -EIO;
-
- if (cpu_has_broken_vmx_preemption_timer())
- _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
- if (!(_cpu_based_2nd_exec_control &
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
- _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
-
- min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
- opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
- &_vmentry_control) < 0)
- return -EIO;
-
- rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
-
- /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
- if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
- return -EIO;
-
-#ifdef CONFIG_X86_64
- /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
- if (vmx_msr_high & (1u<<16))
- return -EIO;
-#endif
-
- /* Require Write-Back (WB) memory type for VMCS accesses. */
- if (((vmx_msr_high >> 18) & 15) != 6)
- return -EIO;
-
- vmcs_conf->size = vmx_msr_high & 0x1fff;
- vmcs_conf->order = get_order(vmcs_conf->size);
- vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
-
- vmcs_conf->revision_id = vmx_msr_low;
-
- vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
- vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
- vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
- vmcs_conf->vmexit_ctrl = _vmexit_control;
- vmcs_conf->vmentry_ctrl = _vmentry_control;
-
- if (static_branch_unlikely(&enable_evmcs))
- evmcs_sanitize_exec_ctrls(vmcs_conf);
-
- cpu_has_load_ia32_efer =
- allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
- VM_ENTRY_LOAD_IA32_EFER)
- && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
- VM_EXIT_LOAD_IA32_EFER);
-
- cpu_has_load_perf_global_ctrl =
- allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
- && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
- VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
-
- /*
- * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
- * but due to errata below it can't be used. Workaround is to use
- * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
- *
- * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
- *
- * AAK155 (model 26)
- * AAP115 (model 30)
- * AAT100 (model 37)
- * BC86,AAY89,BD102 (model 44)
- * BA97 (model 46)
- *
- */
- if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
- switch (boot_cpu_data.x86_model) {
- case 26:
- case 30:
- case 37:
- case 44:
- case 46:
- cpu_has_load_perf_global_ctrl = false;
- printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
- "does not work properly. Using workaround\n");
- break;
- default:
- break;
- }
- }
-
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- rdmsrl(MSR_IA32_XSS, host_xss);
-
- return 0;
-}
-
-static struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
-{
- int node = cpu_to_node(cpu);
- struct page *pages;
- struct vmcs *vmcs;
-
- pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
- if (!pages)
- return NULL;
- vmcs = page_address(pages);
- memset(vmcs, 0, vmcs_config.size);
-
- /* KVM supports Enlightened VMCS v1 only */
- if (static_branch_unlikely(&enable_evmcs))
- vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
- else
- vmcs->hdr.revision_id = vmcs_config.revision_id;
-
- if (shadow)
- vmcs->hdr.shadow_vmcs = 1;
- return vmcs;
-}
-
-static void free_vmcs(struct vmcs *vmcs)
-{
- free_pages((unsigned long)vmcs, vmcs_config.order);
-}
-
-/*
- * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
- */
-static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
-{
- if (!loaded_vmcs->vmcs)
- return;
- loaded_vmcs_clear(loaded_vmcs);
- free_vmcs(loaded_vmcs->vmcs);
- loaded_vmcs->vmcs = NULL;
- if (loaded_vmcs->msr_bitmap)
- free_page((unsigned long)loaded_vmcs->msr_bitmap);
- WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
-}
-
-static struct vmcs *alloc_vmcs(bool shadow)
-{
- return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
-}
-
-static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
-{
- loaded_vmcs->vmcs = alloc_vmcs(false);
- if (!loaded_vmcs->vmcs)
- return -ENOMEM;
-
- loaded_vmcs->shadow_vmcs = NULL;
- loaded_vmcs_init(loaded_vmcs);
-
- if (cpu_has_vmx_msr_bitmap()) {
- loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!loaded_vmcs->msr_bitmap)
- goto out_vmcs;
- memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
-
- if (IS_ENABLED(CONFIG_HYPERV) &&
- static_branch_unlikely(&enable_evmcs) &&
- (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
- struct hv_enlightened_vmcs *evmcs =
- (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
-
- evmcs->hv_enlightenments_control.msr_bitmap = 1;
- }
- }
-
- memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
-
- return 0;
-
-out_vmcs:
- free_loaded_vmcs(loaded_vmcs);
- return -ENOMEM;
-}
-
-static void free_kvm_area(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- free_vmcs(per_cpu(vmxarea, cpu));
- per_cpu(vmxarea, cpu) = NULL;
- }
-}
-
-enum vmcs_field_width {
- VMCS_FIELD_WIDTH_U16 = 0,
- VMCS_FIELD_WIDTH_U64 = 1,
- VMCS_FIELD_WIDTH_U32 = 2,
- VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
-};
-
-static inline int vmcs_field_width(unsigned long field)
-{
- if (0x1 & field) /* the *_HIGH fields are all 32 bit */
- return VMCS_FIELD_WIDTH_U32;
- return (field >> 13) & 0x3 ;
-}
-
-static inline int vmcs_field_readonly(unsigned long field)
-{
- return (((field >> 10) & 0x3) == 1);
-}
-
-static void init_vmcs_shadow_fields(void)
-{
- int i, j;
-
- for (i = j = 0; i < max_shadow_read_only_fields; i++) {
- u16 field = shadow_read_only_fields[i];
- if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
- (i + 1 == max_shadow_read_only_fields ||
- shadow_read_only_fields[i + 1] != field + 1))
- pr_err("Missing field from shadow_read_only_field %x\n",
- field + 1);
-
- clear_bit(field, vmx_vmread_bitmap);
-#ifdef CONFIG_X86_64
- if (field & 1)
- continue;
-#endif
- if (j < i)
- shadow_read_only_fields[j] = field;
- j++;
- }
- max_shadow_read_only_fields = j;
-
- for (i = j = 0; i < max_shadow_read_write_fields; i++) {
- u16 field = shadow_read_write_fields[i];
- if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
- (i + 1 == max_shadow_read_write_fields ||
- shadow_read_write_fields[i + 1] != field + 1))
- pr_err("Missing field from shadow_read_write_field %x\n",
- field + 1);
-
- /*
- * PML and the preemption timer can be emulated, but the
- * processor cannot vmwrite to fields that don't exist
- * on bare metal.
- */
- switch (field) {
- case GUEST_PML_INDEX:
- if (!cpu_has_vmx_pml())
- continue;
- break;
- case VMX_PREEMPTION_TIMER_VALUE:
- if (!cpu_has_vmx_preemption_timer())
- continue;
- break;
- case GUEST_INTR_STATUS:
- if (!cpu_has_vmx_apicv())
- continue;
- break;
- default:
- break;
- }
-
- clear_bit(field, vmx_vmwrite_bitmap);
- clear_bit(field, vmx_vmread_bitmap);
-#ifdef CONFIG_X86_64
- if (field & 1)
- continue;
-#endif
- if (j < i)
- shadow_read_write_fields[j] = field;
- j++;
- }
- max_shadow_read_write_fields = j;
-}
-
-static __init int alloc_kvm_area(void)
-{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct vmcs *vmcs;
-
- vmcs = alloc_vmcs_cpu(false, cpu);
- if (!vmcs) {
- free_kvm_area();
- return -ENOMEM;
- }
-
- /*
- * When eVMCS is enabled, alloc_vmcs_cpu() sets
- * vmcs->revision_id to KVM_EVMCS_VERSION instead of
- * revision_id reported by MSR_IA32_VMX_BASIC.
- *
- * However, even though not explictly documented by
- * TLFS, VMXArea passed as VMXON argument should
- * still be marked with revision_id reported by
- * physical CPU.
- */
- if (static_branch_unlikely(&enable_evmcs))
- vmcs->hdr.revision_id = vmcs_config.revision_id;
-
- per_cpu(vmxarea, cpu) = vmcs;
- }
- return 0;
-}
-
-static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
- struct kvm_segment *save)
-{
- if (!emulate_invalid_guest_state) {
- /*
- * CS and SS RPL should be equal during guest entry according
- * to VMX spec, but in reality it is not always so. Since vcpu
- * is in the middle of the transition from real mode to
- * protected mode it is safe to assume that RPL 0 is a good
- * default value.
- */
- if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
- save->selector &= ~SEGMENT_RPL_MASK;
- save->dpl = save->selector & SEGMENT_RPL_MASK;
- save->s = 1;
- }
- vmx_set_segment(vcpu, save, seg);
-}
-
-static void enter_pmode(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /*
- * Update real mode segment cache. It may be not up-to-date if sement
- * register was written while vcpu was in a guest mode.
- */
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
-
- vmx->rmode.vm86_active = 0;
-
- vmx_segment_cache_clear(vmx);
-
- vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
-
- flags = vmcs_readl(GUEST_RFLAGS);
- flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
- flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
- vmcs_writel(GUEST_RFLAGS, flags);
-
- vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
- (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
-
- update_exception_bitmap(vcpu);
-
- fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
- fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
- fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
- fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
- fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
- fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
-}
-
-static void fix_rmode_seg(int seg, struct kvm_segment *save)
-{
- const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
- struct kvm_segment var = *save;
-
- var.dpl = 0x3;
- if (seg == VCPU_SREG_CS)
- var.type = 0x3;
-
- if (!emulate_invalid_guest_state) {
- var.selector = var.base >> 4;
- var.base = var.base & 0xffff0;
- var.limit = 0xffff;
- var.g = 0;
- var.db = 0;
- var.present = 1;
- var.s = 1;
- var.l = 0;
- var.unusable = 0;
- var.type = 0x3;
- var.avl = 0;
- if (save->base & 0xf)
- printk_once(KERN_WARNING "kvm: segment base is not "
- "paragraph aligned when entering "
- "protected mode (seg=%d)", seg);
- }
-
- vmcs_write16(sf->selector, var.selector);
- vmcs_writel(sf->base, var.base);
- vmcs_write32(sf->limit, var.limit);
- vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
-}
-
-static void enter_rmode(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
-
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
- vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
-
- vmx->rmode.vm86_active = 1;
-
- /*
- * Very old userspace does not call KVM_SET_TSS_ADDR before entering
- * vcpu. Warn the user that an update is overdue.
- */
- if (!kvm_vmx->tss_addr)
- printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
- "called before entering vcpu\n");
-
- vmx_segment_cache_clear(vmx);
-
- vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
- vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
- vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
-
- flags = vmcs_readl(GUEST_RFLAGS);
- vmx->rmode.save_rflags = flags;
-
- flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
-
- vmcs_writel(GUEST_RFLAGS, flags);
- vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
- update_exception_bitmap(vcpu);
-
- fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
- fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
- fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
- fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
- fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
- fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
-
- kvm_mmu_reset_context(vcpu);
-}
-
-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
-
- if (!msr)
- return;
-
- vcpu->arch.efer = efer;
- if (efer & EFER_LMA) {
- vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
- msr->data = efer;
- } else {
- vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
-
- msr->data = efer & ~EFER_LME;
- }
- setup_msrs(vmx);
-}
-
-#ifdef CONFIG_X86_64
-
-static void enter_lmode(struct kvm_vcpu *vcpu)
-{
- u32 guest_tr_ar;
-
- vmx_segment_cache_clear(to_vmx(vcpu));
-
- guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
- if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
- pr_debug_ratelimited("%s: tss fixup for long mode. \n",
- __func__);
- vmcs_write32(GUEST_TR_AR_BYTES,
- (guest_tr_ar & ~VMX_AR_TYPE_MASK)
- | VMX_AR_TYPE_BUSY_64_TSS);
- }
- vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
-}
-
-static void exit_lmode(struct kvm_vcpu *vcpu)
-{
- vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
- vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
-}
-
-#endif
-
-static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
- bool invalidate_gpa)
-{
- if (enable_ept && (invalidate_gpa || !enable_vpid)) {
- if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
- return;
- ept_sync_context(construct_eptp(vcpu,
- vcpu->arch.mmu->root_hpa));
- } else {
- vpid_sync_context(vpid);
- }
-}
-
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
-{
- __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
-}
-
-static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
-{
- int vpid = to_vmx(vcpu)->vpid;
-
- if (!vpid_sync_vcpu_addr(vpid, addr))
- vpid_sync_context(vpid);
-
- /*
- * If VPIDs are not supported or enabled, then the above is a no-op.
- * But we don't really need a TLB flush in that case anyway, because
- * each VM entry/exit includes an implicit flush when VPID is 0.
- */
-}
-
-static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
-{
- ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
-
- vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
- vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
-}
-
-static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
-{
- if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
- vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
-}
-
-static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
-{
- ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
-
- vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
- vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
-}
-
-static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
-{
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-
- if (!test_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_dirty))
- return;
-
- if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
- vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
- vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
- vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
- vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
- }
-}
-
-static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
-{
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-
- if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
- mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
- mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
- mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
- mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
- }
-
- __set_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_avail);
- __set_bit(VCPU_EXREG_PDPTR,
- (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
-static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
- u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
- SECONDARY_EXEC_UNRESTRICTED_GUEST &&
- nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
- fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
-
- return fixed_bits_valid(val, fixed0, fixed1);
-}
-
-static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
- u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
-
- return fixed_bits_valid(val, fixed0, fixed1);
-}
-
-static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
- u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
-
- return fixed_bits_valid(val, fixed0, fixed1);
-}
-
-/* No difference in the restrictions on guest and host CR4 in VMX operation. */
-#define nested_guest_cr4_valid nested_cr4_valid
-#define nested_host_cr4_valid nested_cr4_valid
-
-static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-
-static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
- unsigned long cr0,
- struct kvm_vcpu *vcpu)
-{
- if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
- vmx_decache_cr3(vcpu);
- if (!(cr0 & X86_CR0_PG)) {
- /* From paging/starting to nonpaging */
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
- (CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING));
- vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
- } else if (!is_paging(vcpu)) {
- /* From nonpaging to paging */
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
- ~(CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING));
- vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
- }
-
- if (!(cr0 & X86_CR0_WP))
- *hw_cr0 &= ~X86_CR0_WP;
-}
-
-static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long hw_cr0;
-
- hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
- if (enable_unrestricted_guest)
- hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
- else {
- hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
-
- if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
- enter_pmode(vcpu);
-
- if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
- enter_rmode(vcpu);
- }
-
-#ifdef CONFIG_X86_64
- if (vcpu->arch.efer & EFER_LME) {
- if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
- enter_lmode(vcpu);
- if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
- exit_lmode(vcpu);
- }
-#endif
-
- if (enable_ept && !enable_unrestricted_guest)
- ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
-
- vmcs_writel(CR0_READ_SHADOW, cr0);
- vmcs_writel(GUEST_CR0, hw_cr0);
- vcpu->arch.cr0 = cr0;
-
- /* depends on vcpu->arch.cr0 to be set to a new value */
- vmx->emulation_required = emulation_required(vcpu);
-}
-
-static int get_ept_level(struct kvm_vcpu *vcpu)
-{
- if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
- return 5;
- return 4;
-}
-
-static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
-{
- u64 eptp = VMX_EPTP_MT_WB;
-
- eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
-
- if (enable_ept_ad_bits &&
- (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
- eptp |= VMX_EPTP_AD_ENABLE_BIT;
- eptp |= (root_hpa & PAGE_MASK);
-
- return eptp;
-}
-
-static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
-{
- struct kvm *kvm = vcpu->kvm;
- unsigned long guest_cr3;
- u64 eptp;
-
- guest_cr3 = cr3;
- if (enable_ept) {
- eptp = construct_eptp(vcpu, cr3);
- vmcs_write64(EPT_POINTER, eptp);
-
- if (kvm_x86_ops->tlb_remote_flush) {
- spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- to_vmx(vcpu)->ept_pointer = eptp;
- to_kvm_vmx(kvm)->ept_pointers_match
- = EPT_POINTERS_CHECK;
- spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- }
-
- if (enable_unrestricted_guest || is_paging(vcpu) ||
- is_guest_mode(vcpu))
- guest_cr3 = kvm_read_cr3(vcpu);
- else
- guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
- ept_load_pdptrs(vcpu);
- }
-
- vmcs_writel(GUEST_CR3, guest_cr3);
-}
-
-static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
-{
- /*
- * Pass through host's Machine Check Enable value to hw_cr4, which
- * is in force while we are in guest mode. Do not let guests control
- * this bit, even if host CR4.MCE == 0.
- */
- unsigned long hw_cr4;
-
- hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
- if (enable_unrestricted_guest)
- hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
- else if (to_vmx(vcpu)->rmode.vm86_active)
- hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
- else
- hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
-
- if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
- if (cr4 & X86_CR4_UMIP) {
- vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_DESC);
- hw_cr4 &= ~X86_CR4_UMIP;
- } else if (!is_guest_mode(vcpu) ||
- !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_DESC);
- }
-
- if (cr4 & X86_CR4_VMXE) {
- /*
- * To use VMXON (and later other VMX instructions), a guest
- * must first be able to turn on cr4.VMXE (see handle_vmon()).
- * So basically the check on whether to allow nested VMX
- * is here. We operate under the default treatment of SMM,
- * so VMX cannot be enabled under SMM.
- */
- if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
- return 1;
- }
-
- if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
- return 1;
-
- vcpu->arch.cr4 = cr4;
-
- if (!enable_unrestricted_guest) {
- if (enable_ept) {
- if (!is_paging(vcpu)) {
- hw_cr4 &= ~X86_CR4_PAE;
- hw_cr4 |= X86_CR4_PSE;
- } else if (!(cr4 & X86_CR4_PAE)) {
- hw_cr4 &= ~X86_CR4_PAE;
- }
- }
-
- /*
- * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
- * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
- * to be manually disabled when guest switches to non-paging
- * mode.
- *
- * If !enable_unrestricted_guest, the CPU is always running
- * with CR0.PG=1 and CR4 needs to be modified.
- * If enable_unrestricted_guest, the CPU automatically
- * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
- */
- if (!is_paging(vcpu))
- hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
- }
-
- vmcs_writel(CR4_READ_SHADOW, cr4);
- vmcs_writel(GUEST_CR4, hw_cr4);
- return 0;
-}
-
-static void vmx_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 ar;
-
- if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
- *var = vmx->rmode.segs[seg];
- if (seg == VCPU_SREG_TR
- || var->selector == vmx_read_guest_seg_selector(vmx, seg))
- return;
- var->base = vmx_read_guest_seg_base(vmx, seg);
- var->selector = vmx_read_guest_seg_selector(vmx, seg);
- return;
- }
- var->base = vmx_read_guest_seg_base(vmx, seg);
- var->limit = vmx_read_guest_seg_limit(vmx, seg);
- var->selector = vmx_read_guest_seg_selector(vmx, seg);
- ar = vmx_read_guest_seg_ar(vmx, seg);
- var->unusable = (ar >> 16) & 1;
- var->type = ar & 15;
- var->s = (ar >> 4) & 1;
- var->dpl = (ar >> 5) & 3;
- /*
- * Some userspaces do not preserve unusable property. Since usable
- * segment has to be present according to VMX spec we can use present
- * property to amend userspace bug by making unusable segment always
- * nonpresent. vmx_segment_access_rights() already marks nonpresent
- * segment as unusable.
- */
- var->present = !var->unusable;
- var->avl = (ar >> 12) & 1;
- var->l = (ar >> 13) & 1;
- var->db = (ar >> 14) & 1;
- var->g = (ar >> 15) & 1;
-}
-
-static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_segment s;
-
- if (to_vmx(vcpu)->rmode.vm86_active) {
- vmx_get_segment(vcpu, &s, seg);
- return s.base;
- }
- return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
-}
-
-static int vmx_get_cpl(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (unlikely(vmx->rmode.vm86_active))
- return 0;
- else {
- int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
- return VMX_AR_DPL(ar);
- }
-}
-
-static u32 vmx_segment_access_rights(struct kvm_segment *var)
-{
- u32 ar;
-
- if (var->unusable || !var->present)
- ar = 1 << 16;
- else {
- ar = var->type & 15;
- ar |= (var->s & 1) << 4;
- ar |= (var->dpl & 3) << 5;
- ar |= (var->present & 1) << 7;
- ar |= (var->avl & 1) << 12;
- ar |= (var->l & 1) << 13;
- ar |= (var->db & 1) << 14;
- ar |= (var->g & 1) << 15;
- }
-
- return ar;
-}
-
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
-
- vmx_segment_cache_clear(vmx);
-
- if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
- vmx->rmode.segs[seg] = *var;
- if (seg == VCPU_SREG_TR)
- vmcs_write16(sf->selector, var->selector);
- else if (var->s)
- fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
- goto out;
- }
-
- vmcs_writel(sf->base, var->base);
- vmcs_write32(sf->limit, var->limit);
- vmcs_write16(sf->selector, var->selector);
-
- /*
- * Fix the "Accessed" bit in AR field of segment registers for older
- * qemu binaries.
- * IA32 arch specifies that at the time of processor reset the
- * "Accessed" bit in the AR field of segment registers is 1. And qemu
- * is setting it to 0 in the userland code. This causes invalid guest
- * state vmexit when "unrestricted guest" mode is turned on.
- * Fix for this setup issue in cpu_reset is being pushed in the qemu
- * tree. Newer qemu binaries with that qemu fix would not need this
- * kvm hack.
- */
- if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
- var->type |= 0x1; /* Accessed */
-
- vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
-
-out:
- vmx->emulation_required = emulation_required(vcpu);
-}
-
-static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
-{
- u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
-
- *db = (ar >> 14) & 1;
- *l = (ar >> 13) & 1;
-}
-
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
-{
- dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
- dt->address = vmcs_readl(GUEST_IDTR_BASE);
-}
-
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
-{
- vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
- vmcs_writel(GUEST_IDTR_BASE, dt->address);
-}
-
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
-{
- dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
- dt->address = vmcs_readl(GUEST_GDTR_BASE);
-}
-
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
-{
- vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
- vmcs_writel(GUEST_GDTR_BASE, dt->address);
-}
-
-static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_segment var;
- u32 ar;
-
- vmx_get_segment(vcpu, &var, seg);
- var.dpl = 0x3;
- if (seg == VCPU_SREG_CS)
- var.type = 0x3;
- ar = vmx_segment_access_rights(&var);
-
- if (var.base != (var.selector << 4))
- return false;
- if (var.limit != 0xffff)
- return false;
- if (ar != 0xf3)
- return false;
-
- return true;
-}
-
-static bool code_segment_valid(struct kvm_vcpu *vcpu)
-{
- struct kvm_segment cs;
- unsigned int cs_rpl;
-
- vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- cs_rpl = cs.selector & SEGMENT_RPL_MASK;
-
- if (cs.unusable)
- return false;
- if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
- return false;
- if (!cs.s)
- return false;
- if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
- if (cs.dpl > cs_rpl)
- return false;
- } else {
- if (cs.dpl != cs_rpl)
- return false;
- }
- if (!cs.present)
- return false;
-
- /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
- return true;
-}
-
-static bool stack_segment_valid(struct kvm_vcpu *vcpu)
-{
- struct kvm_segment ss;
- unsigned int ss_rpl;
-
- vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
- ss_rpl = ss.selector & SEGMENT_RPL_MASK;
-
- if (ss.unusable)
- return true;
- if (ss.type != 3 && ss.type != 7)
- return false;
- if (!ss.s)
- return false;
- if (ss.dpl != ss_rpl) /* DPL != RPL */
- return false;
- if (!ss.present)
- return false;
-
- return true;
-}
-
-static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
-{
- struct kvm_segment var;
- unsigned int rpl;
-
- vmx_get_segment(vcpu, &var, seg);
- rpl = var.selector & SEGMENT_RPL_MASK;
-
- if (var.unusable)
- return true;
- if (!var.s)
- return false;
- if (!var.present)
- return false;
- if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
- if (var.dpl < rpl) /* DPL < RPL */
- return false;
- }
-
- /* TODO: Add other members to kvm_segment_field to allow checking for other access
- * rights flags
- */
- return true;
-}
-
-static bool tr_valid(struct kvm_vcpu *vcpu)
-{
- struct kvm_segment tr;
-
- vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
-
- if (tr.unusable)
- return false;
- if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
- return false;
- if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
- return false;
- if (!tr.present)
- return false;
-
- return true;
-}
-
-static bool ldtr_valid(struct kvm_vcpu *vcpu)
-{
- struct kvm_segment ldtr;
-
- vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
-
- if (ldtr.unusable)
- return true;
- if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
- return false;
- if (ldtr.type != 2)
- return false;
- if (!ldtr.present)
- return false;
-
- return true;
-}
-
-static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
-{
- struct kvm_segment cs, ss;
-
- vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
- vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
-
- return ((cs.selector & SEGMENT_RPL_MASK) ==
- (ss.selector & SEGMENT_RPL_MASK));
-}
-
-/*
- * Check if guest state is valid. Returns true if valid, false if
- * not.
- * We assume that registers are always usable
- */
-static bool guest_state_valid(struct kvm_vcpu *vcpu)
-{
- if (enable_unrestricted_guest)
- return true;
-
- /* real mode guest state checks */
- if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
- if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
- return false;
- if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
- return false;
- if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
- return false;
- if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
- return false;
- if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
- return false;
- if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
- return false;
- } else {
- /* protected mode guest state checks */
- if (!cs_ss_rpl_check(vcpu))
- return false;
- if (!code_segment_valid(vcpu))
- return false;
- if (!stack_segment_valid(vcpu))
- return false;
- if (!data_segment_valid(vcpu, VCPU_SREG_DS))
- return false;
- if (!data_segment_valid(vcpu, VCPU_SREG_ES))
- return false;
- if (!data_segment_valid(vcpu, VCPU_SREG_FS))
- return false;
- if (!data_segment_valid(vcpu, VCPU_SREG_GS))
- return false;
- if (!tr_valid(vcpu))
- return false;
- if (!ldtr_valid(vcpu))
- return false;
- }
- /* TODO:
- * - Add checks on RIP
- * - Add checks on RFLAGS
- */
-
- return true;
-}
-
-static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
-{
- return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
-}
-
-static int init_rmode_tss(struct kvm *kvm)
-{
- gfn_t fn;
- u16 data = 0;
- int idx, r;
-
- idx = srcu_read_lock(&kvm->srcu);
- fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
- r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
- if (r < 0)
- goto out;
- data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
- r = kvm_write_guest_page(kvm, fn++, &data,
- TSS_IOPB_BASE_OFFSET, sizeof(u16));
- if (r < 0)
- goto out;
- r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
- if (r < 0)
- goto out;
- r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
- if (r < 0)
- goto out;
- data = ~0;
- r = kvm_write_guest_page(kvm, fn, &data,
- RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
- sizeof(u8));
-out:
- srcu_read_unlock(&kvm->srcu, idx);
- return r;
-}
-
-static int init_rmode_identity_map(struct kvm *kvm)
-{
- struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
- int i, idx, r = 0;
- kvm_pfn_t identity_map_pfn;
- u32 tmp;
-
- /* Protect kvm_vmx->ept_identity_pagetable_done. */
- mutex_lock(&kvm->slots_lock);
-
- if (likely(kvm_vmx->ept_identity_pagetable_done))
- goto out2;
-
- if (!kvm_vmx->ept_identity_map_addr)
- kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
- identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
-
- r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
- kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
- if (r < 0)
- goto out2;
-
- idx = srcu_read_lock(&kvm->srcu);
- r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
- if (r < 0)
- goto out;
- /* Set up identity-mapping pagetable for EPT in real mode */
- for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
- tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
- _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
- r = kvm_write_guest_page(kvm, identity_map_pfn,
- &tmp, i * sizeof(tmp), sizeof(tmp));
- if (r < 0)
- goto out;
- }
- kvm_vmx->ept_identity_pagetable_done = true;
-
-out:
- srcu_read_unlock(&kvm->srcu, idx);
-
-out2:
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-static void seg_setup(int seg)
-{
- const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
- unsigned int ar;
-
- vmcs_write16(sf->selector, 0);
- vmcs_writel(sf->base, 0);
- vmcs_write32(sf->limit, 0xffff);
- ar = 0x93;
- if (seg == VCPU_SREG_CS)
- ar |= 0x08; /* code segment */
-
- vmcs_write32(sf->ar_bytes, ar);
-}
-
-static int alloc_apic_access_page(struct kvm *kvm)
-{
- struct page *page;
- int r = 0;
-
- mutex_lock(&kvm->slots_lock);
- if (kvm->arch.apic_access_page_done)
- goto out;
- r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
- APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
- if (r)
- goto out;
-
- page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
- if (is_error_page(page)) {
- r = -EFAULT;
- goto out;
- }
-
- /*
- * Do not pin the page in memory, so that memory hot-unplug
- * is able to migrate it.
- */
- put_page(page);
- kvm->arch.apic_access_page_done = true;
-out:
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-static int allocate_vpid(void)
-{
- int vpid;
-
- if (!enable_vpid)
- return 0;
- spin_lock(&vmx_vpid_lock);
- vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
- if (vpid < VMX_NR_VPIDS)
- __set_bit(vpid, vmx_vpid_bitmap);
- else
- vpid = 0;
- spin_unlock(&vmx_vpid_lock);
- return vpid;
-}
-
-static void free_vpid(int vpid)
-{
- if (!enable_vpid || vpid == 0)
- return;
- spin_lock(&vmx_vpid_lock);
- __clear_bit(vpid, vmx_vpid_bitmap);
- spin_unlock(&vmx_vpid_lock);
-}
-
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
-{
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- if (static_branch_unlikely(&enable_evmcs))
- evmcs_touch_msr_bitmap();
-
- /*
- * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
- * have the write-low and read-high bitmap offsets the wrong way round.
- * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
- */
- if (msr <= 0x1fff) {
- if (type & MSR_TYPE_R)
- /* read-low */
- __clear_bit(msr, msr_bitmap + 0x000 / f);
-
- if (type & MSR_TYPE_W)
- /* write-low */
- __clear_bit(msr, msr_bitmap + 0x800 / f);
-
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- if (type & MSR_TYPE_R)
- /* read-high */
- __clear_bit(msr, msr_bitmap + 0x400 / f);
-
- if (type & MSR_TYPE_W)
- /* write-high */
- __clear_bit(msr, msr_bitmap + 0xc00 / f);
-
- }
-}
-
-static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
-{
- int f = sizeof(unsigned long);
-
- if (!cpu_has_vmx_msr_bitmap())
- return;
-
- if (static_branch_unlikely(&enable_evmcs))
- evmcs_touch_msr_bitmap();
-
- /*
- * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
- * have the write-low and read-high bitmap offsets the wrong way round.
- * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
- */
- if (msr <= 0x1fff) {
- if (type & MSR_TYPE_R)
- /* read-low */
- __set_bit(msr, msr_bitmap + 0x000 / f);
-
- if (type & MSR_TYPE_W)
- /* write-low */
- __set_bit(msr, msr_bitmap + 0x800 / f);
-
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- if (type & MSR_TYPE_R)
- /* read-high */
- __set_bit(msr, msr_bitmap + 0x400 / f);
-
- if (type & MSR_TYPE_W)
- /* write-high */
- __set_bit(msr, msr_bitmap + 0xc00 / f);
-
- }
-}
-
-static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type, bool value)
-{
- if (value)
- vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
- else
- vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
-}
-
-/*
- * If a msr is allowed by L0, we should check whether it is allowed by L1.
- * The corresponding bit will be cleared unless both of L0 and L1 allow it.
- */
-static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
- unsigned long *msr_bitmap_nested,
- u32 msr, int type)
-{
- int f = sizeof(unsigned long);
-
- /*
- * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
- * have the write-low and read-high bitmap offsets the wrong way round.
- * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
- */
- if (msr <= 0x1fff) {
- if (type & MSR_TYPE_R &&
- !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
- /* read-low */
- __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
-
- if (type & MSR_TYPE_W &&
- !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
- /* write-low */
- __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
-
- } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
- msr &= 0x1fff;
- if (type & MSR_TYPE_R &&
- !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
- /* read-high */
- __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
-
- if (type & MSR_TYPE_W &&
- !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
- /* write-high */
- __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
-
- }
-}
-
-static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
-{
- u8 mode = 0;
-
- if (cpu_has_secondary_exec_ctrls() &&
- (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
- mode |= MSR_BITMAP_MODE_X2APIC;
- if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
- mode |= MSR_BITMAP_MODE_X2APIC_APICV;
- }
-
- return mode;
-}
-
-#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
-
-static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
- u8 mode)
-{
- int msr;
-
- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
- unsigned word = msr / BITS_PER_LONG;
- msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
- msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
- }
-
- if (mode & MSR_BITMAP_MODE_X2APIC) {
- /*
- * TPR reads and writes can be virtualized even if virtual interrupt
- * delivery is not in use.
- */
- vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
- if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
- vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
- vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
- vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
- }
- }
-}
-
-static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
- u8 mode = vmx_msr_bitmap_mode(vcpu);
- u8 changed = mode ^ vmx->msr_bitmap_mode;
-
- if (!changed)
- return;
-
- if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
- vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
-
- vmx->msr_bitmap_mode = mode;
-}
-
-static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
-{
- return enable_apicv;
-}
-
-static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- gfn_t gfn;
-
- /*
- * Don't need to mark the APIC access page dirty; it is never
- * written to by the CPU during APIC virtualization.
- */
-
- if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
- kvm_vcpu_mark_page_dirty(vcpu, gfn);
- }
-
- if (nested_cpu_has_posted_intr(vmcs12)) {
- gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
- kvm_vcpu_mark_page_dirty(vcpu, gfn);
- }
-}
-
-
-static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int max_irr;
- void *vapic_page;
- u16 status;
-
- if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
- return;
-
- vmx->nested.pi_pending = false;
- if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
-
- max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
- if (max_irr != 256) {
- vapic_page = kmap(vmx->nested.virtual_apic_page);
- __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
- vapic_page, &max_irr);
- kunmap(vmx->nested.virtual_apic_page);
-
- status = vmcs_read16(GUEST_INTR_STATUS);
- if ((u8)max_irr > ((u8)status & 0xff)) {
- status &= ~0xff;
- status |= (u8)max_irr;
- vmcs_write16(GUEST_INTR_STATUS, status);
- }
- }
-
- nested_mark_vmcs12_pages_dirty(vcpu);
-}
-
-static u8 vmx_get_rvi(void)
-{
- return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
-}
-
-static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- void *vapic_page;
- u32 vppr;
- int rvi;
-
- if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
- !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
- WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
- return false;
-
- rvi = vmx_get_rvi();
-
- vapic_page = kmap(vmx->nested.virtual_apic_page);
- vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
- kunmap(vmx->nested.virtual_apic_page);
-
- return ((rvi & 0xf0) > (vppr & 0xf0));
-}
-
-static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
- bool nested)
-{
-#ifdef CONFIG_SMP
- int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
-
- if (vcpu->mode == IN_GUEST_MODE) {
- /*
- * The vector of interrupt to be delivered to vcpu had
- * been set in PIR before this function.
- *
- * Following cases will be reached in this block, and
- * we always send a notification event in all cases as
- * explained below.
- *
- * Case 1: vcpu keeps in non-root mode. Sending a
- * notification event posts the interrupt to vcpu.
- *
- * Case 2: vcpu exits to root mode and is still
- * runnable. PIR will be synced to vIRR before the
- * next vcpu entry. Sending a notification event in
- * this case has no effect, as vcpu is not in root
- * mode.
- *
- * Case 3: vcpu exits to root mode and is blocked.
- * vcpu_block() has already synced PIR to vIRR and
- * never blocks vcpu if vIRR is not cleared. Therefore,
- * a blocked vcpu here does not wait for any requested
- * interrupts in PIR, and sending a notification event
- * which has no effect is safe here.
- */
-
- apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
- return true;
- }
-#endif
- return false;
-}
-
-static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
- int vector)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (is_guest_mode(vcpu) &&
- vector == vmx->nested.posted_intr_nv) {
- /*
- * If a posted intr is not recognized by hardware,
- * we will accomplish it in the next vmentry.
- */
- vmx->nested.pi_pending = true;
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- /* the PIR and ON have been set by L1. */
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
- kvm_vcpu_kick(vcpu);
- return 0;
- }
- return -1;
-}
-/*
- * Send interrupt to vcpu via posted interrupt way.
- * 1. If target vcpu is running(non-root mode), send posted interrupt
- * notification to vcpu and hardware will sync PIR to vIRR atomically.
- * 2. If target vcpu isn't running(root mode), kick it to pick up the
- * interrupt from PIR in next vmentry.
- */
-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int r;
-
- r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
- if (!r)
- return;
-
- if (pi_test_and_set_pir(vector, &vmx->pi_desc))
- return;
-
- /* If a previous notification has sent the IPI, nothing to do. */
- if (pi_test_and_set_on(&vmx->pi_desc))
- return;
-
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
- kvm_vcpu_kick(vcpu);
-}
-
-/*
- * Set up the vmcs's constant host-state fields, i.e., host-state fields that
- * will not change in the lifetime of the guest.
- * Note that host-state that does change is set elsewhere. E.g., host-state
- * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
- */
-static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
-{
- u32 low32, high32;
- unsigned long tmpl;
- struct desc_ptr dt;
- unsigned long cr0, cr3, cr4;
-
- cr0 = read_cr0();
- WARN_ON(cr0 & X86_CR0_TS);
- vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
-
- /*
- * Save the most likely value for this task's CR3 in the VMCS.
- * We can't use __get_current_cr3_fast() because we're not atomic.
- */
- cr3 = __read_cr3();
- vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
- vmx->loaded_vmcs->host_state.cr3 = cr3;
-
- /* Save the most likely value for this task's CR4 in the VMCS. */
- cr4 = cr4_read_shadow();
- vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
- vmx->loaded_vmcs->host_state.cr4 = cr4;
-
- vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
-#ifdef CONFIG_X86_64
- /*
- * Load null selectors, so we can avoid reloading them in
- * vmx_prepare_switch_to_host(), in case userspace uses
- * the null selectors too (the expected case).
- */
- vmcs_write16(HOST_DS_SELECTOR, 0);
- vmcs_write16(HOST_ES_SELECTOR, 0);
-#else
- vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
-#endif
- vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
-
- store_idt(&dt);
- vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
- vmx->host_idt_base = dt.address;
-
- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
-
- rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
- vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
- rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
- vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
-
- if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
- rdmsr(MSR_IA32_CR_PAT, low32, high32);
- vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
- }
-
- if (cpu_has_load_ia32_efer)
- vmcs_write64(HOST_IA32_EFER, host_efer);
-}
-
-static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
-{
- vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
- if (enable_ept)
- vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
- if (is_guest_mode(&vmx->vcpu))
- vmx->vcpu.arch.cr4_guest_owned_bits &=
- ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
- vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
-}
-
-static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
-{
- u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
-
- if (!kvm_vcpu_apicv_active(&vmx->vcpu))
- pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
-
- if (!enable_vnmi)
- pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
-
- /* Enable the preemption timer dynamically */
- pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
- return pin_based_exec_ctrl;
-}
-
-static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
- if (cpu_has_secondary_exec_ctrls()) {
- if (kvm_vcpu_apicv_active(vcpu))
- vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
- else
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
- }
-
- if (cpu_has_vmx_msr_bitmap())
- vmx_update_msr_bitmap(vcpu);
-}
-
-static u32 vmx_exec_control(struct vcpu_vmx *vmx)
-{
- u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
-
- if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
- exec_control &= ~CPU_BASED_MOV_DR_EXITING;
-
- if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
- exec_control &= ~CPU_BASED_TPR_SHADOW;
-#ifdef CONFIG_X86_64
- exec_control |= CPU_BASED_CR8_STORE_EXITING |
- CPU_BASED_CR8_LOAD_EXITING;
-#endif
- }
- if (!enable_ept)
- exec_control |= CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_INVLPG_EXITING;
- if (kvm_mwait_in_guest(vmx->vcpu.kvm))
- exec_control &= ~(CPU_BASED_MWAIT_EXITING |
- CPU_BASED_MONITOR_EXITING);
- if (kvm_hlt_in_guest(vmx->vcpu.kvm))
- exec_control &= ~CPU_BASED_HLT_EXITING;
- return exec_control;
-}
-
-static bool vmx_rdrand_supported(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_RDRAND_EXITING;
-}
-
-static bool vmx_rdseed_supported(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_RDSEED_EXITING;
-}
-
-static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
-{
- struct kvm_vcpu *vcpu = &vmx->vcpu;
-
- u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
-
- if (!cpu_need_virtualize_apic_accesses(vcpu))
- exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- if (vmx->vpid == 0)
- exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
- if (!enable_ept) {
- exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
- enable_unrestricted_guest = 0;
- }
- if (!enable_unrestricted_guest)
- exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
- if (kvm_pause_in_guest(vmx->vcpu.kvm))
- exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
- if (!kvm_vcpu_apicv_active(vcpu))
- exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
- exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
-
- /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
- * in vmx_set_cr4. */
- exec_control &= ~SECONDARY_EXEC_DESC;
-
- /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
- (handle_vmptrld).
- We can NOT enable shadow_vmcs here because we don't have yet
- a current VMCS12
- */
- exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
-
- if (!enable_pml)
- exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
-
- if (vmx_xsaves_supported()) {
- /* Exposing XSAVES only when XSAVE is exposed */
- bool xsaves_enabled =
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
-
- if (!xsaves_enabled)
- exec_control &= ~SECONDARY_EXEC_XSAVES;
-
- if (nested) {
- if (xsaves_enabled)
- vmx->nested.msrs.secondary_ctls_high |=
- SECONDARY_EXEC_XSAVES;
- else
- vmx->nested.msrs.secondary_ctls_high &=
- ~SECONDARY_EXEC_XSAVES;
- }
- }
-
- if (vmx_rdtscp_supported()) {
- bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
- if (!rdtscp_enabled)
- exec_control &= ~SECONDARY_EXEC_RDTSCP;
-
- if (nested) {
- if (rdtscp_enabled)
- vmx->nested.msrs.secondary_ctls_high |=
- SECONDARY_EXEC_RDTSCP;
- else
- vmx->nested.msrs.secondary_ctls_high &=
- ~SECONDARY_EXEC_RDTSCP;
- }
- }
-
- if (vmx_invpcid_supported()) {
- /* Exposing INVPCID only when PCID is exposed */
- bool invpcid_enabled =
- guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
- guest_cpuid_has(vcpu, X86_FEATURE_PCID);
-
- if (!invpcid_enabled) {
- exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
- guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
- }
-
- if (nested) {
- if (invpcid_enabled)
- vmx->nested.msrs.secondary_ctls_high |=
- SECONDARY_EXEC_ENABLE_INVPCID;
- else
- vmx->nested.msrs.secondary_ctls_high &=
- ~SECONDARY_EXEC_ENABLE_INVPCID;
- }
- }
-
- if (vmx_rdrand_supported()) {
- bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
- if (rdrand_enabled)
- exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING;
-
- if (nested) {
- if (rdrand_enabled)
- vmx->nested.msrs.secondary_ctls_high |=
- SECONDARY_EXEC_RDRAND_EXITING;
- else
- vmx->nested.msrs.secondary_ctls_high &=
- ~SECONDARY_EXEC_RDRAND_EXITING;
- }
- }
-
- if (vmx_rdseed_supported()) {
- bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
- if (rdseed_enabled)
- exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING;
-
- if (nested) {
- if (rdseed_enabled)
- vmx->nested.msrs.secondary_ctls_high |=
- SECONDARY_EXEC_RDSEED_EXITING;
- else
- vmx->nested.msrs.secondary_ctls_high &=
- ~SECONDARY_EXEC_RDSEED_EXITING;
- }
- }
-
- vmx->secondary_exec_control = exec_control;
-}
-
-static void ept_set_mmio_spte_mask(void)
-{
- /*
- * EPT Misconfigurations can be generated if the value of bits 2:0
- * of an EPT paging-structure entry is 110b (write/execute).
- */
- kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
- VMX_EPT_MISCONFIG_WX_VALUE);
-}
-
-#define VMX_XSS_EXIT_BITMAP 0
-/*
- * Sets up the vmcs for emulated real mode.
- */
-static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
-{
- int i;
-
- if (enable_shadow_vmcs) {
- /*
- * At vCPU creation, "VMWRITE to any supported field
- * in the VMCS" is supported, so use the more
- * permissive vmx_vmread_bitmap to specify both read
- * and write permissions for the shadow VMCS.
- */
- vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
- vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
- }
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
-
- vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
-
- /* Control */
- vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
- vmx->hv_deadline_tsc = -1;
-
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
-
- if (cpu_has_secondary_exec_ctrls()) {
- vmx_compute_secondary_exec_control(vmx);
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- vmx->secondary_exec_control);
- }
-
- if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
- vmcs_write64(EOI_EXIT_BITMAP0, 0);
- vmcs_write64(EOI_EXIT_BITMAP1, 0);
- vmcs_write64(EOI_EXIT_BITMAP2, 0);
- vmcs_write64(EOI_EXIT_BITMAP3, 0);
-
- vmcs_write16(GUEST_INTR_STATUS, 0);
-
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
- vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
- }
-
- if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
- vmcs_write32(PLE_GAP, ple_gap);
- vmx->ple_window = ple_window;
- vmx->ple_window_dirty = true;
- }
-
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
- vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
-
- vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
- vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
- vmx_set_constant_host_state(vmx);
- vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
- vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
-
- if (cpu_has_vmx_vmfunc())
- vmcs_write64(VM_FUNCTION_CONTROL, 0);
-
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
-
- if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
- vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
-
- for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
- u32 index = vmx_msr_index[i];
- u32 data_low, data_high;
- int j = vmx->nmsrs;
-
- if (rdmsr_safe(index, &data_low, &data_high) < 0)
- continue;
- if (wrmsr_safe(index, data_low, data_high) < 0)
- continue;
- vmx->guest_msrs[j].index = i;
- vmx->guest_msrs[j].data = 0;
- vmx->guest_msrs[j].mask = -1ull;
- ++vmx->nmsrs;
- }
-
- vmx->arch_capabilities = kvm_get_arch_capabilities();
-
- vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
-
- /* 22.2.1, 20.8.1 */
- vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
-
- vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
- vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
-
- set_cr4_guest_host_mask(vmx);
-
- if (vmx_xsaves_supported())
- vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
-
- if (enable_pml) {
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
- }
-
- if (cpu_has_vmx_encls_vmexit())
- vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
-}
-
-static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct msr_data apic_base_msr;
- u64 cr0;
-
- vmx->rmode.vm86_active = 0;
- vmx->spec_ctrl = 0;
-
- vcpu->arch.microcode_version = 0x100000000ULL;
- vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
- kvm_set_cr8(vcpu, 0);
-
- if (!init_event) {
- apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
- MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_reset_bsp(vcpu))
- apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
- apic_base_msr.host_initiated = true;
- kvm_set_apic_base(vcpu, &apic_base_msr);
- }
-
- vmx_segment_cache_clear(vmx);
-
- seg_setup(VCPU_SREG_CS);
- vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
- vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
-
- seg_setup(VCPU_SREG_DS);
- seg_setup(VCPU_SREG_ES);
- seg_setup(VCPU_SREG_FS);
- seg_setup(VCPU_SREG_GS);
- seg_setup(VCPU_SREG_SS);
-
- vmcs_write16(GUEST_TR_SELECTOR, 0);
- vmcs_writel(GUEST_TR_BASE, 0);
- vmcs_write32(GUEST_TR_LIMIT, 0xffff);
- vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
-
- vmcs_write16(GUEST_LDTR_SELECTOR, 0);
- vmcs_writel(GUEST_LDTR_BASE, 0);
- vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
- vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
-
- if (!init_event) {
- vmcs_write32(GUEST_SYSENTER_CS, 0);
- vmcs_writel(GUEST_SYSENTER_ESP, 0);
- vmcs_writel(GUEST_SYSENTER_EIP, 0);
- vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
- }
-
- kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
- kvm_rip_write(vcpu, 0xfff0);
-
- vmcs_writel(GUEST_GDTR_BASE, 0);
- vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
-
- vmcs_writel(GUEST_IDTR_BASE, 0);
- vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
-
- vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
- vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
- if (kvm_mpx_supported())
- vmcs_write64(GUEST_BNDCFGS, 0);
-
- setup_msrs(vmx);
-
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
-
- if (cpu_has_vmx_tpr_shadow() && !init_event) {
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
- if (cpu_need_tpr_shadow(vcpu))
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
- __pa(vcpu->arch.apic->regs));
- vmcs_write32(TPR_THRESHOLD, 0);
- }
-
- kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
-
- if (vmx->vpid != 0)
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
-
- cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
- vmx->vcpu.arch.cr0 = cr0;
- vmx_set_cr0(vcpu, cr0); /* enter rmode */
- vmx_set_cr4(vcpu, 0);
- vmx_set_efer(vcpu, 0);
-
- update_exception_bitmap(vcpu);
-
- vpid_sync_context(vmx->vpid);
- if (init_event)
- vmx_clear_hlt(vcpu);
-}
-
-/*
- * In nested virtualization, check if L1 asked to exit on external interrupts.
- * For most existing hypervisors, this will always return true.
- */
-static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
-{
- return get_vmcs12(vcpu)->pin_based_vm_exec_control &
- PIN_BASED_EXT_INTR_MASK;
-}
-
-/*
- * In nested virtualization, check if L1 has set
- * VM_EXIT_ACK_INTR_ON_EXIT
- */
-static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
-{
- return get_vmcs12(vcpu)->vm_exit_controls &
- VM_EXIT_ACK_INTR_ON_EXIT;
-}
-
-static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
-{
- return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
-}
-
-static void enable_irq_window(struct kvm_vcpu *vcpu)
-{
- vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_VIRTUAL_INTR_PENDING);
-}
-
-static void enable_nmi_window(struct kvm_vcpu *vcpu)
-{
- if (!enable_vnmi ||
- vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
- enable_irq_window(vcpu);
- return;
- }
-
- vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_VIRTUAL_NMI_PENDING);
-}
-
-static void vmx_inject_irq(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- uint32_t intr;
- int irq = vcpu->arch.interrupt.nr;
-
- trace_kvm_inj_virq(irq);
-
- ++vcpu->stat.irq_injections;
- if (vmx->rmode.vm86_active) {
- int inc_eip = 0;
- if (vcpu->arch.interrupt.soft)
- inc_eip = vcpu->arch.event_exit_inst_len;
- if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- return;
- }
- intr = irq | INTR_INFO_VALID_MASK;
- if (vcpu->arch.interrupt.soft) {
- intr |= INTR_TYPE_SOFT_INTR;
- vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
- vmx->vcpu.arch.event_exit_inst_len);
- } else
- intr |= INTR_TYPE_EXT_INTR;
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
-
- vmx_clear_hlt(vcpu);
-}
-
-static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!enable_vnmi) {
- /*
- * Tracking the NMI-blocked state in software is built upon
- * finding the next open IRQ window. This, in turn, depends on
- * well-behaving guests: They have to keep IRQs disabled at
- * least as long as the NMI handler runs. Otherwise we may
- * cause NMI nesting, maybe breaking the guest. But as this is
- * highly unlikely, we can live with the residual risk.
- */
- vmx->loaded_vmcs->soft_vnmi_blocked = 1;
- vmx->loaded_vmcs->vnmi_blocked_time = 0;
- }
-
- ++vcpu->stat.nmi_injections;
- vmx->loaded_vmcs->nmi_known_unmasked = false;
-
- if (vmx->rmode.vm86_active) {
- if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- return;
- }
-
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
- INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
-
- vmx_clear_hlt(vcpu);
-}
-
-static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool masked;
-
- if (!enable_vnmi)
- return vmx->loaded_vmcs->soft_vnmi_blocked;
- if (vmx->loaded_vmcs->nmi_known_unmasked)
- return false;
- masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
- vmx->loaded_vmcs->nmi_known_unmasked = !masked;
- return masked;
-}
-
-static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!enable_vnmi) {
- if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
- vmx->loaded_vmcs->soft_vnmi_blocked = masked;
- vmx->loaded_vmcs->vnmi_blocked_time = 0;
- }
- } else {
- vmx->loaded_vmcs->nmi_known_unmasked = !masked;
- if (masked)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- }
-}
-
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
-{
- if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
-
- if (!enable_vnmi &&
- to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
- return 0;
-
- return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
- | GUEST_INTR_STATE_NMI));
-}
-
-static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
-{
- return (!to_vmx(vcpu)->nested.nested_run_pending &&
- vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
-}
-
-static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
-{
- int ret;
-
- if (enable_unrestricted_guest)
- return 0;
-
- ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
- PAGE_SIZE * 3);
- if (ret)
- return ret;
- to_kvm_vmx(kvm)->tss_addr = addr;
- return init_rmode_tss(kvm);
-}
-
-static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
-{
- to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
- return 0;
-}
-
-static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
-{
- switch (vec) {
- case BP_VECTOR:
- /*
- * Update instruction length as we may reinject the exception
- * from user space while in guest debugging mode.
- */
- to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
- vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
- return false;
- /* fall through */
- case DB_VECTOR:
- if (vcpu->guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
- return false;
- /* fall through */
- case DE_VECTOR:
- case OF_VECTOR:
- case BR_VECTOR:
- case UD_VECTOR:
- case DF_VECTOR:
- case SS_VECTOR:
- case GP_VECTOR:
- case MF_VECTOR:
- return true;
- break;
- }
- return false;
-}
-
-static int handle_rmode_exception(struct kvm_vcpu *vcpu,
- int vec, u32 err_code)
-{
- /*
- * Instruction with address size override prefix opcode 0x67
- * Cause the #SS fault with 0 error code in VM86 mode.
- */
- if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
- if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
- if (vcpu->arch.halt_request) {
- vcpu->arch.halt_request = 0;
- return kvm_vcpu_halt(vcpu);
- }
- return 1;
- }
- return 0;
- }
-
- /*
- * Forward all other exceptions that are valid in real mode.
- * FIXME: Breaks guest debugging in real mode, needs to be fixed with
- * the required debugging infrastructure rework.
- */
- kvm_queue_exception(vcpu, vec);
- return 1;
-}
-
-/*
- * Trigger machine check on the host. We assume all the MSRs are already set up
- * by the CPU and that we still run on the same CPU as the MCE occurred on.
- * We pass a fake environment to the machine check handler because we want
- * the guest to be always treated like user space, no matter what context
- * it used internally.
- */
-static void kvm_machine_check(void)
-{
-#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
- struct pt_regs regs = {
- .cs = 3, /* Fake ring 3 no matter what the guest ran on */
- .flags = X86_EFLAGS_IF,
- };
-
- do_machine_check(&regs, 0);
-#endif
-}
-
-static int handle_machine_check(struct kvm_vcpu *vcpu)
-{
- /* already handled by vcpu_run */
- return 1;
-}
-
-static int handle_exception(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_run *kvm_run = vcpu->run;
- u32 intr_info, ex_no, error_code;
- unsigned long cr2, rip, dr6;
- u32 vect_info;
- enum emulation_result er;
-
- vect_info = vmx->idt_vectoring_info;
- intr_info = vmx->exit_intr_info;
-
- if (is_machine_check(intr_info))
- return handle_machine_check(vcpu);
-
- if (is_nmi(intr_info))
- return 1; /* already handled by vmx_vcpu_run() */
-
- if (is_invalid_opcode(intr_info))
- return handle_ud(vcpu);
-
- error_code = 0;
- if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
- error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
-
- if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
- WARN_ON_ONCE(!enable_vmware_backdoor);
- er = kvm_emulate_instruction(vcpu,
- EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
- if (er == EMULATE_USER_EXIT)
- return 0;
- else if (er != EMULATE_DONE)
- kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
- return 1;
- }
-
- /*
- * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
- * MMIO, it is better to report an internal error.
- * See the comments in vmx_handle_exit.
- */
- if ((vect_info & VECTORING_INFO_VALID_MASK) &&
- !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
- vcpu->run->internal.ndata = 3;
- vcpu->run->internal.data[0] = vect_info;
- vcpu->run->internal.data[1] = intr_info;
- vcpu->run->internal.data[2] = error_code;
- return 0;
- }
-
- if (is_page_fault(intr_info)) {
- cr2 = vmcs_readl(EXIT_QUALIFICATION);
- /* EPT won't cause page fault directly */
- WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
- return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
- }
-
- ex_no = intr_info & INTR_INFO_VECTOR_MASK;
-
- if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
- return handle_rmode_exception(vcpu, ex_no, error_code);
-
- switch (ex_no) {
- case AC_VECTOR:
- kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
- return 1;
- case DB_VECTOR:
- dr6 = vmcs_readl(EXIT_QUALIFICATION);
- if (!(vcpu->guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= dr6 | DR6_RTM;
- if (is_icebp(intr_info))
- skip_emulated_instruction(vcpu);
-
- kvm_queue_exception(vcpu, DB_VECTOR);
- return 1;
- }
- kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
- kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
- /* fall through */
- case BP_VECTOR:
- /*
- * Update instruction length as we may reinject #BP from
- * user space while in guest debugging mode. Reading it for
- * #DB as well causes no harm, it is not used in that case.
- */
- vmx->vcpu.arch.event_exit_inst_len =
- vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- kvm_run->exit_reason = KVM_EXIT_DEBUG;
- rip = kvm_rip_read(vcpu);
- kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
- kvm_run->debug.arch.exception = ex_no;
- break;
- default:
- kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
- kvm_run->ex.exception = ex_no;
- kvm_run->ex.error_code = error_code;
- break;
- }
- return 0;
-}
-
-static int handle_external_interrupt(struct kvm_vcpu *vcpu)
-{
- ++vcpu->stat.irq_exits;
- return 1;
-}
-
-static int handle_triple_fault(struct kvm_vcpu *vcpu)
-{
- vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
- vcpu->mmio_needed = 0;
- return 0;
-}
-
-static int handle_io(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification;
- int size, in, string;
- unsigned port;
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- string = (exit_qualification & 16) != 0;
-
- ++vcpu->stat.io_exits;
-
- if (string)
- return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
-
- port = exit_qualification >> 16;
- size = (exit_qualification & 7) + 1;
- in = (exit_qualification & 8) != 0;
-
- return kvm_fast_pio(vcpu, size, port, in);
-}
-
-static void
-vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
-{
- /*
- * Patch in the VMCALL instruction:
- */
- hypercall[0] = 0x0f;
- hypercall[1] = 0x01;
- hypercall[2] = 0xc1;
-}
-
-/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
-static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
-{
- if (is_guest_mode(vcpu)) {
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- unsigned long orig_val = val;
-
- /*
- * We get here when L2 changed cr0 in a way that did not change
- * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
- * but did change L0 shadowed bits. So we first calculate the
- * effective cr0 value that L1 would like to write into the
- * hardware. It consists of the L2-owned bits from the new
- * value combined with the L1-owned bits from L1's guest_cr0.
- */
- val = (val & ~vmcs12->cr0_guest_host_mask) |
- (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
-
- if (!nested_guest_cr0_valid(vcpu, val))
- return 1;
-
- if (kvm_set_cr0(vcpu, val))
- return 1;
- vmcs_writel(CR0_READ_SHADOW, orig_val);
- return 0;
- } else {
- if (to_vmx(vcpu)->nested.vmxon &&
- !nested_host_cr0_valid(vcpu, val))
- return 1;
-
- return kvm_set_cr0(vcpu, val);
- }
-}
-
-static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
-{
- if (is_guest_mode(vcpu)) {
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- unsigned long orig_val = val;
-
- /* analogously to handle_set_cr0 */
- val = (val & ~vmcs12->cr4_guest_host_mask) |
- (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
- if (kvm_set_cr4(vcpu, val))
- return 1;
- vmcs_writel(CR4_READ_SHADOW, orig_val);
- return 0;
- } else
- return kvm_set_cr4(vcpu, val);
-}
-
-static int handle_desc(struct kvm_vcpu *vcpu)
-{
- WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
- return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
-}
-
-static int handle_cr(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification, val;
- int cr;
- int reg;
- int err;
- int ret;
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- cr = exit_qualification & 15;
- reg = (exit_qualification >> 8) & 15;
- switch ((exit_qualification >> 4) & 3) {
- case 0: /* mov to cr */
- val = kvm_register_readl(vcpu, reg);
- trace_kvm_cr_write(cr, val);
- switch (cr) {
- case 0:
- err = handle_set_cr0(vcpu, val);
- return kvm_complete_insn_gp(vcpu, err);
- case 3:
- WARN_ON_ONCE(enable_unrestricted_guest);
- err = kvm_set_cr3(vcpu, val);
- return kvm_complete_insn_gp(vcpu, err);
- case 4:
- err = handle_set_cr4(vcpu, val);
- return kvm_complete_insn_gp(vcpu, err);
- case 8: {
- u8 cr8_prev = kvm_get_cr8(vcpu);
- u8 cr8 = (u8)val;
- err = kvm_set_cr8(vcpu, cr8);
- ret = kvm_complete_insn_gp(vcpu, err);
- if (lapic_in_kernel(vcpu))
- return ret;
- if (cr8_prev <= cr8)
- return ret;
- /*
- * TODO: we might be squashing a
- * KVM_GUESTDBG_SINGLESTEP-triggered
- * KVM_EXIT_DEBUG here.
- */
- vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
- return 0;
- }
- }
- break;
- case 2: /* clts */
- WARN_ONCE(1, "Guest should always own CR0.TS");
- vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
- trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
- return kvm_skip_emulated_instruction(vcpu);
- case 1: /*mov from cr*/
- switch (cr) {
- case 3:
- WARN_ON_ONCE(enable_unrestricted_guest);
- val = kvm_read_cr3(vcpu);
- kvm_register_write(vcpu, reg, val);
- trace_kvm_cr_read(cr, val);
- return kvm_skip_emulated_instruction(vcpu);
- case 8:
- val = kvm_get_cr8(vcpu);
- kvm_register_write(vcpu, reg, val);
- trace_kvm_cr_read(cr, val);
- return kvm_skip_emulated_instruction(vcpu);
- }
- break;
- case 3: /* lmsw */
- val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
- trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
- kvm_lmsw(vcpu, val);
-
- return kvm_skip_emulated_instruction(vcpu);
- default:
- break;
- }
- vcpu->run->exit_reason = 0;
- vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
- (int)(exit_qualification >> 4) & 3, cr);
- return 0;
-}
-
-static int handle_dr(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification;
- int dr, dr7, reg;
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
-
- /* First, if DR does not exist, trigger UD */
- if (!kvm_require_dr(vcpu, dr))
- return 1;
-
- /* Do not handle if the CPL > 0, will trigger GP on re-entry */
- if (!kvm_require_cpl(vcpu, 0))
- return 1;
- dr7 = vmcs_readl(GUEST_DR7);
- if (dr7 & DR7_GD) {
- /*
- * As the vm-exit takes precedence over the debug trap, we
- * need to emulate the latter, either for the host or the
- * guest debugging itself.
- */
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
- vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
- vcpu->run->debug.arch.dr7 = dr7;
- vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
- vcpu->run->debug.arch.exception = DB_VECTOR;
- vcpu->run->exit_reason = KVM_EXIT_DEBUG;
- return 0;
- } else {
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
- kvm_queue_exception(vcpu, DB_VECTOR);
- return 1;
- }
- }
-
- if (vcpu->guest_debug == 0) {
- vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_MOV_DR_EXITING);
-
- /*
- * No more DR vmexits; force a reload of the debug registers
- * and reenter on this instruction. The next vmexit will
- * retrieve the full state of the debug registers.
- */
- vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
- return 1;
- }
-
- reg = DEBUG_REG_ACCESS_REG(exit_qualification);
- if (exit_qualification & TYPE_MOV_FROM_DR) {
- unsigned long val;
-
- if (kvm_get_dr(vcpu, dr, &val))
- return 1;
- kvm_register_write(vcpu, reg, val);
- } else
- if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
- return 1;
-
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.dr6;
-}
-
-static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
-{
-}
-
-static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
-{
- get_debugreg(vcpu->arch.db[0], 0);
- get_debugreg(vcpu->arch.db[1], 1);
- get_debugreg(vcpu->arch.db[2], 2);
- get_debugreg(vcpu->arch.db[3], 3);
- get_debugreg(vcpu->arch.dr6, 6);
- vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
-
- vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
- vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
-}
-
-static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
-{
- vmcs_writel(GUEST_DR7, val);
-}
-
-static int handle_cpuid(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_cpuid(vcpu);
-}
-
-static int handle_rdmsr(struct kvm_vcpu *vcpu)
-{
- u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- struct msr_data msr_info;
-
- msr_info.index = ecx;
- msr_info.host_initiated = false;
- if (vmx_get_msr(vcpu, &msr_info)) {
- trace_kvm_msr_read_ex(ecx);
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- trace_kvm_msr_read(ecx, msr_info.data);
-
- /* FIXME: handling of bits 32:63 of rax, rdx */
- vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
- vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int handle_wrmsr(struct kvm_vcpu *vcpu)
-{
- struct msr_data msr;
- u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
- u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
- | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
-
- msr.data = data;
- msr.index = ecx;
- msr.host_initiated = false;
- if (kvm_set_msr(vcpu, &msr) != 0) {
- trace_kvm_msr_write_ex(ecx, data);
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- trace_kvm_msr_write(ecx, data);
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
-{
- kvm_apic_update_ppr(vcpu);
- return 1;
-}
-
-static int handle_interrupt_window(struct kvm_vcpu *vcpu)
-{
- vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_VIRTUAL_INTR_PENDING);
-
- kvm_make_request(KVM_REQ_EVENT, vcpu);
-
- ++vcpu->stat.irq_window_exits;
- return 1;
-}
-
-static int handle_halt(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_halt(vcpu);
-}
-
-static int handle_vmcall(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_hypercall(vcpu);
-}
-
-static int handle_invd(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
-}
-
-static int handle_invlpg(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- kvm_mmu_invlpg(vcpu, exit_qualification);
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int handle_rdpmc(struct kvm_vcpu *vcpu)
-{
- int err;
-
- err = kvm_rdpmc(vcpu);
- return kvm_complete_insn_gp(vcpu, err);
-}
-
-static int handle_wbinvd(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_wbinvd(vcpu);
-}
-
-static int handle_xsetbv(struct kvm_vcpu *vcpu)
-{
- u64 new_bv = kvm_read_edx_eax(vcpu);
- u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
-
- if (kvm_set_xcr(vcpu, index, new_bv) == 0)
- return kvm_skip_emulated_instruction(vcpu);
- return 1;
-}
-
-static int handle_xsaves(struct kvm_vcpu *vcpu)
-{
- kvm_skip_emulated_instruction(vcpu);
- WARN(1, "this should never happen\n");
- return 1;
-}
-
-static int handle_xrstors(struct kvm_vcpu *vcpu)
-{
- kvm_skip_emulated_instruction(vcpu);
- WARN(1, "this should never happen\n");
- return 1;
-}
-
-static int handle_apic_access(struct kvm_vcpu *vcpu)
-{
- if (likely(fasteoi)) {
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- int access_type, offset;
-
- access_type = exit_qualification & APIC_ACCESS_TYPE;
- offset = exit_qualification & APIC_ACCESS_OFFSET;
- /*
- * Sane guest uses MOV to write EOI, with written value
- * not cared. So make a short-circuit here by avoiding
- * heavy instruction emulation.
- */
- if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
- (offset == APIC_EOI)) {
- kvm_lapic_set_eoi(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
- }
- return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
-}
-
-static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- int vector = exit_qualification & 0xff;
-
- /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
- kvm_apic_set_eoi_accelerated(vcpu, vector);
- return 1;
-}
-
-static int handle_apic_write(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- u32 offset = exit_qualification & 0xfff;
-
- /* APIC-write VM exit is trap-like and thus no need to adjust IP */
- kvm_apic_write_nodecode(vcpu, offset);
- return 1;
-}
-
-static int handle_task_switch(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long exit_qualification;
- bool has_error_code = false;
- u32 error_code = 0;
- u16 tss_selector;
- int reason, type, idt_v, idt_index;
-
- idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
- idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
- type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- reason = (u32)exit_qualification >> 30;
- if (reason == TASK_SWITCH_GATE && idt_v) {
- switch (type) {
- case INTR_TYPE_NMI_INTR:
- vcpu->arch.nmi_injected = false;
- vmx_set_nmi_mask(vcpu, true);
- break;
- case INTR_TYPE_EXT_INTR:
- case INTR_TYPE_SOFT_INTR:
- kvm_clear_interrupt_queue(vcpu);
- break;
- case INTR_TYPE_HARD_EXCEPTION:
- if (vmx->idt_vectoring_info &
- VECTORING_INFO_DELIVER_CODE_MASK) {
- has_error_code = true;
- error_code =
- vmcs_read32(IDT_VECTORING_ERROR_CODE);
- }
- /* fall through */
- case INTR_TYPE_SOFT_EXCEPTION:
- kvm_clear_exception_queue(vcpu);
- break;
- default:
- break;
- }
- }
- tss_selector = exit_qualification;
-
- if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
- type != INTR_TYPE_EXT_INTR &&
- type != INTR_TYPE_NMI_INTR))
- skip_emulated_instruction(vcpu);
-
- if (kvm_task_switch(vcpu, tss_selector,
- type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
- has_error_code, error_code) == EMULATE_FAIL) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
- return 0;
- }
-
- /*
- * TODO: What about debug traps on tss switch?
- * Are we supposed to inject them and update dr6?
- */
-
- return 1;
-}
-
-static int handle_ept_violation(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification;
- gpa_t gpa;
- u64 error_code;
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- /*
- * EPT violation happened while executing iret from NMI,
- * "blocked by NMI" bit has to be set before next VM entry.
- * There are errata that may cause this bit to not be set:
- * AAK134, BY25.
- */
- if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
- enable_vnmi &&
- (exit_qualification & INTR_INFO_UNBLOCK_NMI))
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
-
- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- trace_kvm_page_fault(gpa, exit_qualification);
-
- /* Is it a read fault? */
- error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
- ? PFERR_USER_MASK : 0;
- /* Is it a write fault? */
- error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
- ? PFERR_WRITE_MASK : 0;
- /* Is it a fetch fault? */
- error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
- ? PFERR_FETCH_MASK : 0;
- /* ept page table entry is present? */
- error_code |= (exit_qualification &
- (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
- EPT_VIOLATION_EXECUTABLE))
- ? PFERR_PRESENT_MASK : 0;
-
- error_code |= (exit_qualification & 0x100) != 0 ?
- PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
-
- vcpu->arch.exit_qualification = exit_qualification;
- return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
-}
-
-static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
-{
- gpa_t gpa;
-
- /*
- * A nested guest cannot optimize MMIO vmexits, because we have an
- * nGPA here instead of the required GPA.
- */
- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- if (!is_guest_mode(vcpu) &&
- !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
- trace_kvm_fast_mmio(gpa);
- /*
- * Doing kvm_skip_emulated_instruction() depends on undefined
- * behavior: Intel's manual doesn't mandate
- * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
- * occurs and while on real hardware it was observed to be set,
- * other hypervisors (namely Hyper-V) don't set it, we end up
- * advancing IP with some random value. Disable fast mmio when
- * running nested and keep it for real hardware in hope that
- * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
- */
- if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
- return kvm_skip_emulated_instruction(vcpu);
- else
- return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
- EMULATE_DONE;
- }
-
- return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
-}
-
-static int handle_nmi_window(struct kvm_vcpu *vcpu)
-{
- WARN_ON_ONCE(!enable_vnmi);
- vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_VIRTUAL_NMI_PENDING);
- ++vcpu->stat.nmi_window_exits;
- kvm_make_request(KVM_REQ_EVENT, vcpu);
-
- return 1;
-}
-
-static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- enum emulation_result err = EMULATE_DONE;
- int ret = 1;
- u32 cpu_exec_ctrl;
- bool intr_window_requested;
- unsigned count = 130;
-
- /*
- * We should never reach the point where we are emulating L2
- * due to invalid guest state as that means we incorrectly
- * allowed a nested VMEntry with an invalid vmcs12.
- */
- WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
-
- cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
- intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
-
- while (vmx->emulation_required && count-- != 0) {
- if (intr_window_requested && vmx_interrupt_allowed(vcpu))
- return handle_interrupt_window(&vmx->vcpu);
-
- if (kvm_test_request(KVM_REQ_EVENT, vcpu))
- return 1;
-
- err = kvm_emulate_instruction(vcpu, 0);
-
- if (err == EMULATE_USER_EXIT) {
- ++vcpu->stat.mmio_exits;
- ret = 0;
- goto out;
- }
-
- if (err != EMULATE_DONE)
- goto emulation_error;
-
- if (vmx->emulation_required && !vmx->rmode.vm86_active &&
- vcpu->arch.exception.pending)
- goto emulation_error;
-
- if (vcpu->arch.halt_request) {
- vcpu->arch.halt_request = 0;
- ret = kvm_vcpu_halt(vcpu);
- goto out;
- }
-
- if (signal_pending(current))
- goto out;
- if (need_resched())
- schedule();
- }
-
-out:
- return ret;
-
-emulation_error:
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
- return 0;
-}
-
-static void grow_ple_window(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int old = vmx->ple_window;
-
- vmx->ple_window = __grow_ple_window(old, ple_window,
- ple_window_grow,
- ple_window_max);
-
- if (vmx->ple_window != old)
- vmx->ple_window_dirty = true;
-
- trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
-}
-
-static void shrink_ple_window(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int old = vmx->ple_window;
-
- vmx->ple_window = __shrink_ple_window(old, ple_window,
- ple_window_shrink,
- ple_window);
-
- if (vmx->ple_window != old)
- vmx->ple_window_dirty = true;
-
- trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
-}
-
-/*
- * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
- */
-static void wakeup_handler(void)
-{
- struct kvm_vcpu *vcpu;
- int cpu = smp_processor_id();
-
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
- list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
- blocked_vcpu_list) {
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
-
- if (pi_test_on(pi_desc) == 1)
- kvm_vcpu_kick(vcpu);
- }
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
-}
-
-static void vmx_enable_tdp(void)
-{
- kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
- enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
- enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK,
- cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
- VMX_EPT_RWX_MASK, 0ull);
-
- ept_set_mmio_spte_mask();
- kvm_enable_tdp();
-}
-
-static __init int hardware_setup(void)
-{
- unsigned long host_bndcfgs;
- int r = -ENOMEM, i;
-
- rdmsrl_safe(MSR_EFER, &host_efer);
-
- for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
- kvm_define_shared_msr(i, vmx_msr_index[i]);
-
- for (i = 0; i < VMX_BITMAP_NR; i++) {
- vmx_bitmap[i] = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_bitmap[i])
- goto out;
- }
-
- memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
- memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-
- if (setup_vmcs_config(&vmcs_config) < 0) {
- r = -EIO;
- goto out;
- }
-
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
- if (boot_cpu_has(X86_FEATURE_MPX)) {
- rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
- WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
- }
-
- if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
- !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
- enable_vpid = 0;
-
- if (!cpu_has_vmx_ept() ||
- !cpu_has_vmx_ept_4levels() ||
- !cpu_has_vmx_ept_mt_wb() ||
- !cpu_has_vmx_invept_global())
- enable_ept = 0;
-
- if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
- enable_ept_ad_bits = 0;
-
- if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
- enable_unrestricted_guest = 0;
-
- if (!cpu_has_vmx_flexpriority())
- flexpriority_enabled = 0;
-
- if (!cpu_has_virtual_nmis())
- enable_vnmi = 0;
-
- /*
- * set_apic_access_page_addr() is used to reload apic access
- * page upon invalidation. No need to do anything if not
- * using the APIC_ACCESS_ADDR VMCS field.
- */
- if (!flexpriority_enabled)
- kvm_x86_ops->set_apic_access_page_addr = NULL;
-
- if (!cpu_has_vmx_tpr_shadow())
- kvm_x86_ops->update_cr8_intercept = NULL;
-
- if (enable_ept && !cpu_has_vmx_ept_2m_page())
- kvm_disable_largepages();
-
-#if IS_ENABLED(CONFIG_HYPERV)
- if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
- && enable_ept)
- kvm_x86_ops->tlb_remote_flush = vmx_hv_remote_flush_tlb;
-#endif
-
- if (!cpu_has_vmx_ple()) {
- ple_gap = 0;
- ple_window = 0;
- ple_window_grow = 0;
- ple_window_max = 0;
- ple_window_shrink = 0;
- }
-
- if (!cpu_has_vmx_apicv()) {
- enable_apicv = 0;
- kvm_x86_ops->sync_pir_to_irr = NULL;
- }
-
- if (cpu_has_vmx_tsc_scaling()) {
- kvm_has_tsc_control = true;
- kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
- kvm_tsc_scaling_ratio_frac_bits = 48;
- }
-
- set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
-
- if (enable_ept)
- vmx_enable_tdp();
- else
- kvm_disable_tdp();
-
- if (!nested) {
- kvm_x86_ops->get_nested_state = NULL;
- kvm_x86_ops->set_nested_state = NULL;
- }
-
- /*
- * Only enable PML when hardware supports PML feature, and both EPT
- * and EPT A/D bit features are enabled -- PML depends on them to work.
- */
- if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
- enable_pml = 0;
-
- if (!enable_pml) {
- kvm_x86_ops->slot_enable_log_dirty = NULL;
- kvm_x86_ops->slot_disable_log_dirty = NULL;
- kvm_x86_ops->flush_log_dirty = NULL;
- kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
- }
-
- if (!cpu_has_vmx_preemption_timer())
- kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
-
- if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
- u64 vmx_msr;
-
- rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
- cpu_preemption_timer_multi =
- vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
- } else {
- kvm_x86_ops->set_hv_timer = NULL;
- kvm_x86_ops->cancel_hv_timer = NULL;
- }
-
- if (!cpu_has_vmx_shadow_vmcs())
- enable_shadow_vmcs = 0;
- if (enable_shadow_vmcs)
- init_vmcs_shadow_fields();
-
- kvm_set_posted_intr_wakeup_handler(wakeup_handler);
- nested_vmx_setup_ctls_msrs(&vmcs_config.nested, enable_apicv);
-
- kvm_mce_cap_supported |= MCG_LMCE_P;
-
- return alloc_kvm_area();
-
-out:
- for (i = 0; i < VMX_BITMAP_NR; i++)
- free_page((unsigned long)vmx_bitmap[i]);
-
- return r;
-}
-
-static __exit void hardware_unsetup(void)
-{
- int i;
-
- for (i = 0; i < VMX_BITMAP_NR; i++)
- free_page((unsigned long)vmx_bitmap[i]);
-
- free_kvm_area();
-}
-
-/*
- * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
- * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
- */
-static int handle_pause(struct kvm_vcpu *vcpu)
-{
- if (!kvm_pause_in_guest(vcpu->kvm))
- grow_ple_window(vcpu);
-
- /*
- * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
- * VM-execution control is ignored if CPL > 0. OTOH, KVM
- * never set PAUSE_EXITING and just set PLE if supported,
- * so the vcpu must be CPL=0 if it gets a PAUSE exit.
- */
- kvm_vcpu_on_spin(vcpu, true);
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int handle_nop(struct kvm_vcpu *vcpu)
-{
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int handle_mwait(struct kvm_vcpu *vcpu)
-{
- printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
- return handle_nop(vcpu);
-}
-
-static int handle_invalid_op(struct kvm_vcpu *vcpu)
-{
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
-}
-
-static int handle_monitor_trap(struct kvm_vcpu *vcpu)
-{
- return 1;
-}
-
-static int handle_monitor(struct kvm_vcpu *vcpu)
-{
- printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
- return handle_nop(vcpu);
-}
-
-/*
- * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
- * set the success or error code of an emulated VMX instruction (as specified
- * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
- * instruction.
- */
-static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
-{
- vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
- & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
- X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
-{
- vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
- & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
- X86_EFLAGS_SF | X86_EFLAGS_OF))
- | X86_EFLAGS_CF);
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
- u32 vm_instruction_error)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /*
- * failValid writes the error number to the current VMCS, which
- * can't be done if there isn't a current VMCS.
- */
- if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
- return nested_vmx_failInvalid(vcpu);
-
- vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
- & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
- X86_EFLAGS_SF | X86_EFLAGS_OF))
- | X86_EFLAGS_ZF);
- get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
- /*
- * We don't need to force a shadow sync because
- * VM_INSTRUCTION_ERROR is not shadowed
- */
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
-{
- /* TODO: not to reset guest simply here. */
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
-}
-
-static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
-{
- struct vcpu_vmx *vmx =
- container_of(timer, struct vcpu_vmx, nested.preemption_timer);
-
- vmx->nested.preemption_timer_expired = true;
- kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
- kvm_vcpu_kick(&vmx->vcpu);
-
- return HRTIMER_NORESTART;
-}
-
-/*
- * Decode the memory-address operand of a vmx instruction, as recorded on an
- * exit caused by such an instruction (run by a guest hypervisor).
- * On success, returns 0. When the operand is invalid, returns 1 and throws
- * #UD or #GP.
- */
-static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
- unsigned long exit_qualification,
- u32 vmx_instruction_info, bool wr, gva_t *ret)
-{
- gva_t off;
- bool exn;
- struct kvm_segment s;
-
- /*
- * According to Vol. 3B, "Information for VM Exits Due to Instruction
- * Execution", on an exit, vmx_instruction_info holds most of the
- * addressing components of the operand. Only the displacement part
- * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
- * For how an actual address is calculated from all these components,
- * refer to Vol. 1, "Operand Addressing".
- */
- int scaling = vmx_instruction_info & 3;
- int addr_size = (vmx_instruction_info >> 7) & 7;
- bool is_reg = vmx_instruction_info & (1u << 10);
- int seg_reg = (vmx_instruction_info >> 15) & 7;
- int index_reg = (vmx_instruction_info >> 18) & 0xf;
- bool index_is_valid = !(vmx_instruction_info & (1u << 22));
- int base_reg = (vmx_instruction_info >> 23) & 0xf;
- bool base_is_valid = !(vmx_instruction_info & (1u << 27));
-
- if (is_reg) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- /* Addr = segment_base + offset */
- /* offset = base + [index * scale] + displacement */
- off = exit_qualification; /* holds the displacement */
- if (base_is_valid)
- off += kvm_register_read(vcpu, base_reg);
- if (index_is_valid)
- off += kvm_register_read(vcpu, index_reg)<<scaling;
- vmx_get_segment(vcpu, &s, seg_reg);
- *ret = s.base + off;
-
- if (addr_size == 1) /* 32 bit */
- *ret &= 0xffffffff;
-
- /* Checks for #GP/#SS exceptions. */
- exn = false;
- if (is_long_mode(vcpu)) {
- /* Long mode: #GP(0)/#SS(0) if the memory address is in a
- * non-canonical form. This is the only check on the memory
- * destination for long mode!
- */
- exn = is_noncanonical_address(*ret, vcpu);
- } else if (is_protmode(vcpu)) {
- /* Protected mode: apply checks for segment validity in the
- * following order:
- * - segment type check (#GP(0) may be thrown)
- * - usability check (#GP(0)/#SS(0))
- * - limit check (#GP(0)/#SS(0))
- */
- if (wr)
- /* #GP(0) if the destination operand is located in a
- * read-only data segment or any code segment.
- */
- exn = ((s.type & 0xa) == 0 || (s.type & 8));
- else
- /* #GP(0) if the source operand is located in an
- * execute-only code segment
- */
- exn = ((s.type & 0xa) == 8);
- if (exn) {
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
- /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
- */
- exn = (s.unusable != 0);
- /* Protected mode: #GP(0)/#SS(0) if the memory
- * operand is outside the segment limit.
- */
- exn = exn || (off + sizeof(u64) > s.limit);
- }
- if (exn) {
- kvm_queue_exception_e(vcpu,
- seg_reg == VCPU_SREG_SS ?
- SS_VECTOR : GP_VECTOR,
- 0);
- return 1;
- }
-
- return 0;
-}
-
-static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
-{
- gva_t gva;
- struct x86_exception e;
-
- if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
- return 1;
-
- if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
-
- return 0;
-}
-
-/*
- * Allocate a shadow VMCS and associate it with the currently loaded
- * VMCS, unless such a shadow VMCS already exists. The newly allocated
- * VMCS is also VMCLEARed, so that it is ready for use.
- */
-static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
-
- /*
- * We should allocate a shadow vmcs for vmcs01 only when L1
- * executes VMXON and free it when L1 executes VMXOFF.
- * As it is invalid to execute VMXON twice, we shouldn't reach
- * here when vmcs01 already have an allocated shadow vmcs.
- */
- WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
-
- if (!loaded_vmcs->shadow_vmcs) {
- loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
- if (loaded_vmcs->shadow_vmcs)
- vmcs_clear(loaded_vmcs->shadow_vmcs);
- }
- return loaded_vmcs->shadow_vmcs;
-}
-
-static int enter_vmx_operation(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int r;
-
- r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
- if (r < 0)
- goto out_vmcs02;
-
- vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
- if (!vmx->nested.cached_vmcs12)
- goto out_cached_vmcs12;
-
- vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
- if (!vmx->nested.cached_shadow_vmcs12)
- goto out_cached_shadow_vmcs12;
-
- if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
- goto out_shadow_vmcs;
-
- hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
-
- vmx->nested.vpid02 = allocate_vpid();
-
- vmx->nested.vmcs02_initialized = false;
- vmx->nested.vmxon = true;
- return 0;
-
-out_shadow_vmcs:
- kfree(vmx->nested.cached_shadow_vmcs12);
-
-out_cached_shadow_vmcs12:
- kfree(vmx->nested.cached_vmcs12);
-
-out_cached_vmcs12:
- free_loaded_vmcs(&vmx->nested.vmcs02);
-
-out_vmcs02:
- return -ENOMEM;
-}
-
-/*
- * Emulate the VMXON instruction.
- * Currently, we just remember that VMX is active, and do not save or even
- * inspect the argument to VMXON (the so-called "VMXON pointer") because we
- * do not currently need to store anything in that guest-allocated memory
- * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
- * argument is different from the VMXON pointer (which the spec says they do).
- */
-static int handle_vmon(struct kvm_vcpu *vcpu)
-{
- int ret;
- gpa_t vmptr;
- struct page *page;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
- | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
-
- /*
- * The Intel VMX Instruction Reference lists a bunch of bits that are
- * prerequisite to running VMXON, most notably cr4.VMXE must be set to
- * 1 (see vmx_set_cr4() for when we allow the guest to set this).
- * Otherwise, we should fail with #UD. But most faulting conditions
- * have already been checked by hardware, prior to the VM-exit for
- * VMXON. We do test guest cr4.VMXE because processor CR4 always has
- * that bit set to 1 in non-root mode.
- */
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- /* CPL=0 must be checked manually. */
- if (vmx_get_cpl(vcpu)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- if (vmx->nested.vmxon)
- return nested_vmx_failValid(vcpu,
- VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
-
- if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
- != VMXON_NEEDED_FEATURES) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
-
- /*
- * SDM 3: 24.11.5
- * The first 4 bytes of VMXON region contain the supported
- * VMCS revision identifier
- *
- * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
- * which replaces physical address width with 32
- */
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
- return nested_vmx_failInvalid(vcpu);
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page))
- return nested_vmx_failInvalid(vcpu);
-
- if (*(u32 *)kmap(page) != VMCS12_REVISION) {
- kunmap(page);
- kvm_release_page_clean(page);
- return nested_vmx_failInvalid(vcpu);
- }
- kunmap(page);
- kvm_release_page_clean(page);
-
- vmx->nested.vmxon_ptr = vmptr;
- ret = enter_vmx_operation(vcpu);
- if (ret)
- return ret;
-
- return nested_vmx_succeed(vcpu);
-}
-
-/*
- * Intel's VMX Instruction Reference specifies a common set of prerequisites
- * for running VMX instructions (except VMXON, whose prerequisites are
- * slightly different). It also specifies what exception to inject otherwise.
- * Note that many of these exceptions have priority over VM exits, so they
- * don't have to be checked again here.
- */
-static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
-{
- if (!to_vmx(vcpu)->nested.vmxon) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 0;
- }
-
- if (vmx_get_cpl(vcpu)) {
- kvm_inject_gp(vcpu, 0);
- return 0;
- }
-
- return 1;
-}
-
-static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
-{
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
- vmcs_write64(VMCS_LINK_POINTER, -1ull);
-}
-
-static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!vmx->nested.hv_evmcs)
- return;
-
- kunmap(vmx->nested.hv_evmcs_page);
- kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
- vmx->nested.hv_evmcs_vmptr = -1ull;
- vmx->nested.hv_evmcs_page = NULL;
- vmx->nested.hv_evmcs = NULL;
-}
-
-static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (vmx->nested.current_vmptr == -1ull)
- return;
-
- if (enable_shadow_vmcs) {
- /* copy to memory all shadowed fields in case
- they were modified */
- copy_shadow_to_vmcs12(vmx);
- vmx->nested.need_vmcs12_sync = false;
- vmx_disable_shadow_vmcs(vmx);
- }
- vmx->nested.posted_intr_nv = -1;
-
- /* Flush VMCS12 to guest memory */
- kvm_vcpu_write_guest_page(vcpu,
- vmx->nested.current_vmptr >> PAGE_SHIFT,
- vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
-
- kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
-
- vmx->nested.current_vmptr = -1ull;
-}
-
-/*
- * Free whatever needs to be freed from vmx->nested when L1 goes down, or
- * just stops using VMX.
- */
-static void free_nested(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
- return;
-
- vmx->nested.vmxon = false;
- vmx->nested.smm.vmxon = false;
- free_vpid(vmx->nested.vpid02);
- vmx->nested.posted_intr_nv = -1;
- vmx->nested.current_vmptr = -1ull;
- if (enable_shadow_vmcs) {
- vmx_disable_shadow_vmcs(vmx);
- vmcs_clear(vmx->vmcs01.shadow_vmcs);
- free_vmcs(vmx->vmcs01.shadow_vmcs);
- vmx->vmcs01.shadow_vmcs = NULL;
- }
- kfree(vmx->nested.cached_vmcs12);
- kfree(vmx->nested.cached_shadow_vmcs12);
- /* Unpin physical memory we referred to in the vmcs02 */
- if (vmx->nested.apic_access_page) {
- kvm_release_page_dirty(vmx->nested.apic_access_page);
- vmx->nested.apic_access_page = NULL;
- }
- if (vmx->nested.virtual_apic_page) {
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- if (vmx->nested.pi_desc_page) {
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- }
-
- kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
-
- nested_release_evmcs(vcpu);
-
- free_loaded_vmcs(&vmx->nested.vmcs02);
-}
-
-/* Emulate the VMXOFF instruction */
-static int handle_vmoff(struct kvm_vcpu *vcpu)
-{
- if (!nested_vmx_check_permission(vcpu))
- return 1;
- free_nested(vcpu);
- return nested_vmx_succeed(vcpu);
-}
-
-/* Emulate the VMCLEAR instruction */
-static int handle_vmclear(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 zero = 0;
- gpa_t vmptr;
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
-
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
- return nested_vmx_failValid(vcpu,
- VMXERR_VMCLEAR_INVALID_ADDRESS);
-
- if (vmptr == vmx->nested.vmxon_ptr)
- return nested_vmx_failValid(vcpu,
- VMXERR_VMCLEAR_VMXON_POINTER);
-
- if (vmx->nested.hv_evmcs_page) {
- if (vmptr == vmx->nested.hv_evmcs_vmptr)
- nested_release_evmcs(vcpu);
- } else {
- if (vmptr == vmx->nested.current_vmptr)
- nested_release_vmcs12(vcpu);
-
- kvm_vcpu_write_guest(vcpu,
- vmptr + offsetof(struct vmcs12,
- launch_state),
- &zero, sizeof(zero));
- }
-
- return nested_vmx_succeed(vcpu);
-}
-
-static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
-
-/* Emulate the VMLAUNCH instruction */
-static int handle_vmlaunch(struct kvm_vcpu *vcpu)
-{
- return nested_vmx_run(vcpu, true);
-}
-
-/* Emulate the VMRESUME instruction */
-static int handle_vmresume(struct kvm_vcpu *vcpu)
-{
-
- return nested_vmx_run(vcpu, false);
-}
-
-/*
- * Read a vmcs12 field. Since these can have varying lengths and we return
- * one type, we chose the biggest type (u64) and zero-extend the return value
- * to that size. Note that the caller, handle_vmread, might need to use only
- * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
- * 64-bit fields are to be returned).
- */
-static inline int vmcs12_read_any(struct vmcs12 *vmcs12,
- unsigned long field, u64 *ret)
-{
- short offset = vmcs_field_to_offset(field);
- char *p;
-
- if (offset < 0)
- return offset;
-
- p = (char *)vmcs12 + offset;
-
- switch (vmcs_field_width(field)) {
- case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
- *ret = *((natural_width *)p);
- return 0;
- case VMCS_FIELD_WIDTH_U16:
- *ret = *((u16 *)p);
- return 0;
- case VMCS_FIELD_WIDTH_U32:
- *ret = *((u32 *)p);
- return 0;
- case VMCS_FIELD_WIDTH_U64:
- *ret = *((u64 *)p);
- return 0;
- default:
- WARN_ON(1);
- return -ENOENT;
- }
-}
-
-
-static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
- unsigned long field, u64 field_value){
- short offset = vmcs_field_to_offset(field);
- char *p = (char *)vmcs12 + offset;
- if (offset < 0)
- return offset;
-
- switch (vmcs_field_width(field)) {
- case VMCS_FIELD_WIDTH_U16:
- *(u16 *)p = field_value;
- return 0;
- case VMCS_FIELD_WIDTH_U32:
- *(u32 *)p = field_value;
- return 0;
- case VMCS_FIELD_WIDTH_U64:
- *(u64 *)p = field_value;
- return 0;
- case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
- *(natural_width *)p = field_value;
- return 0;
- default:
- WARN_ON(1);
- return -ENOENT;
- }
-
-}
-
-static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
-{
- struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
- struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
-
- vmcs12->hdr.revision_id = evmcs->revision_id;
-
- /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
- vmcs12->tpr_threshold = evmcs->tpr_threshold;
- vmcs12->guest_rip = evmcs->guest_rip;
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
- vmcs12->guest_rsp = evmcs->guest_rsp;
- vmcs12->guest_rflags = evmcs->guest_rflags;
- vmcs12->guest_interruptibility_info =
- evmcs->guest_interruptibility_info;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
- vmcs12->cpu_based_vm_exec_control =
- evmcs->cpu_based_vm_exec_control;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
- vmcs12->exception_bitmap = evmcs->exception_bitmap;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
- vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
- vmcs12->vm_entry_intr_info_field =
- evmcs->vm_entry_intr_info_field;
- vmcs12->vm_entry_exception_error_code =
- evmcs->vm_entry_exception_error_code;
- vmcs12->vm_entry_instruction_len =
- evmcs->vm_entry_instruction_len;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
- vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
- vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
- vmcs12->host_cr0 = evmcs->host_cr0;
- vmcs12->host_cr3 = evmcs->host_cr3;
- vmcs12->host_cr4 = evmcs->host_cr4;
- vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
- vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
- vmcs12->host_rip = evmcs->host_rip;
- vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
- vmcs12->host_es_selector = evmcs->host_es_selector;
- vmcs12->host_cs_selector = evmcs->host_cs_selector;
- vmcs12->host_ss_selector = evmcs->host_ss_selector;
- vmcs12->host_ds_selector = evmcs->host_ds_selector;
- vmcs12->host_fs_selector = evmcs->host_fs_selector;
- vmcs12->host_gs_selector = evmcs->host_gs_selector;
- vmcs12->host_tr_selector = evmcs->host_tr_selector;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
- vmcs12->pin_based_vm_exec_control =
- evmcs->pin_based_vm_exec_control;
- vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
- vmcs12->secondary_vm_exec_control =
- evmcs->secondary_vm_exec_control;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
- vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
- vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
- vmcs12->msr_bitmap = evmcs->msr_bitmap;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
- vmcs12->guest_es_base = evmcs->guest_es_base;
- vmcs12->guest_cs_base = evmcs->guest_cs_base;
- vmcs12->guest_ss_base = evmcs->guest_ss_base;
- vmcs12->guest_ds_base = evmcs->guest_ds_base;
- vmcs12->guest_fs_base = evmcs->guest_fs_base;
- vmcs12->guest_gs_base = evmcs->guest_gs_base;
- vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
- vmcs12->guest_tr_base = evmcs->guest_tr_base;
- vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
- vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
- vmcs12->guest_es_limit = evmcs->guest_es_limit;
- vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
- vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
- vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
- vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
- vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
- vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
- vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
- vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
- vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
- vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
- vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
- vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
- vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
- vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
- vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
- vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
- vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
- vmcs12->guest_es_selector = evmcs->guest_es_selector;
- vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
- vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
- vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
- vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
- vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
- vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
- vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
- vmcs12->tsc_offset = evmcs->tsc_offset;
- vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
- vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
- vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
- vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
- vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
- vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
- vmcs12->guest_cr0 = evmcs->guest_cr0;
- vmcs12->guest_cr3 = evmcs->guest_cr3;
- vmcs12->guest_cr4 = evmcs->guest_cr4;
- vmcs12->guest_dr7 = evmcs->guest_dr7;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
- vmcs12->host_fs_base = evmcs->host_fs_base;
- vmcs12->host_gs_base = evmcs->host_gs_base;
- vmcs12->host_tr_base = evmcs->host_tr_base;
- vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
- vmcs12->host_idtr_base = evmcs->host_idtr_base;
- vmcs12->host_rsp = evmcs->host_rsp;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
- vmcs12->ept_pointer = evmcs->ept_pointer;
- vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
- }
-
- if (unlikely(!(evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
- vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
- vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
- vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
- vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
- vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
- vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
- vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
- vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
- vmcs12->guest_pending_dbg_exceptions =
- evmcs->guest_pending_dbg_exceptions;
- vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
- vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
- vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
- vmcs12->guest_activity_state = evmcs->guest_activity_state;
- vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
- }
-
- /*
- * Not used?
- * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
- * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
- * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
- * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
- * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
- * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
- * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
- * vmcs12->page_fault_error_code_mask =
- * evmcs->page_fault_error_code_mask;
- * vmcs12->page_fault_error_code_match =
- * evmcs->page_fault_error_code_match;
- * vmcs12->cr3_target_count = evmcs->cr3_target_count;
- * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
- * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
- * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
- */
-
- /*
- * Read only fields:
- * vmcs12->guest_physical_address = evmcs->guest_physical_address;
- * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
- * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
- * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
- * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
- * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
- * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
- * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
- * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
- * vmcs12->exit_qualification = evmcs->exit_qualification;
- * vmcs12->guest_linear_address = evmcs->guest_linear_address;
- *
- * Not present in struct vmcs12:
- * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
- * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
- * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
- * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
- */
-
- return 0;
-}
-
-static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
-{
- struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
- struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
-
- /*
- * Should not be changed by KVM:
- *
- * evmcs->host_es_selector = vmcs12->host_es_selector;
- * evmcs->host_cs_selector = vmcs12->host_cs_selector;
- * evmcs->host_ss_selector = vmcs12->host_ss_selector;
- * evmcs->host_ds_selector = vmcs12->host_ds_selector;
- * evmcs->host_fs_selector = vmcs12->host_fs_selector;
- * evmcs->host_gs_selector = vmcs12->host_gs_selector;
- * evmcs->host_tr_selector = vmcs12->host_tr_selector;
- * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
- * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
- * evmcs->host_cr0 = vmcs12->host_cr0;
- * evmcs->host_cr3 = vmcs12->host_cr3;
- * evmcs->host_cr4 = vmcs12->host_cr4;
- * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
- * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
- * evmcs->host_rip = vmcs12->host_rip;
- * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
- * evmcs->host_fs_base = vmcs12->host_fs_base;
- * evmcs->host_gs_base = vmcs12->host_gs_base;
- * evmcs->host_tr_base = vmcs12->host_tr_base;
- * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
- * evmcs->host_idtr_base = vmcs12->host_idtr_base;
- * evmcs->host_rsp = vmcs12->host_rsp;
- * sync_vmcs12() doesn't read these:
- * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
- * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
- * evmcs->msr_bitmap = vmcs12->msr_bitmap;
- * evmcs->ept_pointer = vmcs12->ept_pointer;
- * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
- * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
- * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
- * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
- * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
- * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
- * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
- * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
- * evmcs->tpr_threshold = vmcs12->tpr_threshold;
- * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
- * evmcs->exception_bitmap = vmcs12->exception_bitmap;
- * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
- * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
- * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
- * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
- * evmcs->page_fault_error_code_mask =
- * vmcs12->page_fault_error_code_mask;
- * evmcs->page_fault_error_code_match =
- * vmcs12->page_fault_error_code_match;
- * evmcs->cr3_target_count = vmcs12->cr3_target_count;
- * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
- * evmcs->tsc_offset = vmcs12->tsc_offset;
- * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
- * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
- * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
- * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
- * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
- * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
- * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
- * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
- *
- * Not present in struct vmcs12:
- * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
- * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
- * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
- * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
- */
-
- evmcs->guest_es_selector = vmcs12->guest_es_selector;
- evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
- evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
- evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
- evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
- evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
- evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
- evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
-
- evmcs->guest_es_limit = vmcs12->guest_es_limit;
- evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
- evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
- evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
- evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
- evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
- evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
- evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
- evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
- evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
-
- evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
- evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
- evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
- evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
- evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
- evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
- evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
- evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
-
- evmcs->guest_es_base = vmcs12->guest_es_base;
- evmcs->guest_cs_base = vmcs12->guest_cs_base;
- evmcs->guest_ss_base = vmcs12->guest_ss_base;
- evmcs->guest_ds_base = vmcs12->guest_ds_base;
- evmcs->guest_fs_base = vmcs12->guest_fs_base;
- evmcs->guest_gs_base = vmcs12->guest_gs_base;
- evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
- evmcs->guest_tr_base = vmcs12->guest_tr_base;
- evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
- evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
-
- evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
- evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
-
- evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
- evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
- evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
- evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
-
- evmcs->guest_pending_dbg_exceptions =
- vmcs12->guest_pending_dbg_exceptions;
- evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
- evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
-
- evmcs->guest_activity_state = vmcs12->guest_activity_state;
- evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
-
- evmcs->guest_cr0 = vmcs12->guest_cr0;
- evmcs->guest_cr3 = vmcs12->guest_cr3;
- evmcs->guest_cr4 = vmcs12->guest_cr4;
- evmcs->guest_dr7 = vmcs12->guest_dr7;
-
- evmcs->guest_physical_address = vmcs12->guest_physical_address;
-
- evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
- evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
- evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
- evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
- evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
- evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
- evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
- evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
-
- evmcs->exit_qualification = vmcs12->exit_qualification;
-
- evmcs->guest_linear_address = vmcs12->guest_linear_address;
- evmcs->guest_rsp = vmcs12->guest_rsp;
- evmcs->guest_rflags = vmcs12->guest_rflags;
-
- evmcs->guest_interruptibility_info =
- vmcs12->guest_interruptibility_info;
- evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
- evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
- evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
- evmcs->vm_entry_exception_error_code =
- vmcs12->vm_entry_exception_error_code;
- evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
-
- evmcs->guest_rip = vmcs12->guest_rip;
-
- evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
-
- return 0;
-}
-
-/*
- * Copy the writable VMCS shadow fields back to the VMCS12, in case
- * they have been modified by the L1 guest. Note that the "read-only"
- * VM-exit information fields are actually writable if the vCPU is
- * configured to support "VMWRITE to any supported field in the VMCS."
- */
-static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
-{
- const u16 *fields[] = {
- shadow_read_write_fields,
- shadow_read_only_fields
- };
- const int max_fields[] = {
- max_shadow_read_write_fields,
- max_shadow_read_only_fields
- };
- int i, q;
- unsigned long field;
- u64 field_value;
- struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
-
- preempt_disable();
-
- vmcs_load(shadow_vmcs);
-
- for (q = 0; q < ARRAY_SIZE(fields); q++) {
- for (i = 0; i < max_fields[q]; i++) {
- field = fields[q][i];
- field_value = __vmcs_readl(field);
- vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
- }
- /*
- * Skip the VM-exit information fields if they are read-only.
- */
- if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
- break;
- }
-
- vmcs_clear(shadow_vmcs);
- vmcs_load(vmx->loaded_vmcs->vmcs);
-
- preempt_enable();
-}
-
-static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
-{
- const u16 *fields[] = {
- shadow_read_write_fields,
- shadow_read_only_fields
- };
- const int max_fields[] = {
- max_shadow_read_write_fields,
- max_shadow_read_only_fields
- };
- int i, q;
- unsigned long field;
- u64 field_value = 0;
- struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
-
- vmcs_load(shadow_vmcs);
-
- for (q = 0; q < ARRAY_SIZE(fields); q++) {
- for (i = 0; i < max_fields[q]; i++) {
- field = fields[q][i];
- vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
- __vmcs_writel(field, field_value);
- }
- }
-
- vmcs_clear(shadow_vmcs);
- vmcs_load(vmx->loaded_vmcs->vmcs);
-}
-
-static int handle_vmread(struct kvm_vcpu *vcpu)
-{
- unsigned long field;
- u64 field_value;
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- gva_t gva = 0;
- struct vmcs12 *vmcs12;
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
- return nested_vmx_failInvalid(vcpu);
-
- if (!is_guest_mode(vcpu))
- vmcs12 = get_vmcs12(vcpu);
- else {
- /*
- * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
- * to shadowed-field sets the ALU flags for VMfailInvalid.
- */
- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
- return nested_vmx_failInvalid(vcpu);
- vmcs12 = get_shadow_vmcs12(vcpu);
- }
-
- /* Decode instruction info and find the field to read */
- field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
- /* Read the field, zero-extended to a u64 field_value */
- if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
- return nested_vmx_failValid(vcpu,
- VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-
- /*
- * Now copy part of this value to register or memory, as requested.
- * Note that the number of bits actually copied is 32 or 64 depending
- * on the guest's mode (32 or 64 bit), not on the given field's length.
- */
- if (vmx_instruction_info & (1u << 10)) {
- kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
- field_value);
- } else {
- if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, true, &gva))
- return 1;
- /* _system ok, nested_vmx_check_permission has verified cpl=0 */
- kvm_write_guest_virt_system(vcpu, gva, &field_value,
- (is_long_mode(vcpu) ? 8 : 4), NULL);
- }
-
- return nested_vmx_succeed(vcpu);
-}
-
-
-static int handle_vmwrite(struct kvm_vcpu *vcpu)
-{
- unsigned long field;
- gva_t gva;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-
- /* The value to write might be 32 or 64 bits, depending on L1's long
- * mode, and eventually we need to write that into a field of several
- * possible lengths. The code below first zero-extends the value to 64
- * bit (field_value), and then copies only the appropriate number of
- * bits into the vmcs12 field.
- */
- u64 field_value = 0;
- struct x86_exception e;
- struct vmcs12 *vmcs12;
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- if (vmx->nested.current_vmptr == -1ull)
- return nested_vmx_failInvalid(vcpu);
-
- if (vmx_instruction_info & (1u << 10))
- field_value = kvm_register_readl(vcpu,
- (((vmx_instruction_info) >> 3) & 0xf));
- else {
- if (get_vmx_mem_address(vcpu, exit_qualification,
- vmx_instruction_info, false, &gva))
- return 1;
- if (kvm_read_guest_virt(vcpu, gva, &field_value,
- (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
- }
-
-
- field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
- /*
- * If the vCPU supports "VMWRITE to any supported field in the
- * VMCS," then the "read-only" fields are actually read/write.
- */
- if (vmcs_field_readonly(field) &&
- !nested_cpu_has_vmwrite_any_field(vcpu))
- return nested_vmx_failValid(vcpu,
- VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
-
- if (!is_guest_mode(vcpu))
- vmcs12 = get_vmcs12(vcpu);
- else {
- /*
- * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
- * to shadowed-field sets the ALU flags for VMfailInvalid.
- */
- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
- return nested_vmx_failInvalid(vcpu);
- vmcs12 = get_shadow_vmcs12(vcpu);
- }
-
- if (vmcs12_write_any(vmcs12, field, field_value) < 0)
- return nested_vmx_failValid(vcpu,
- VMXERR_UNSUPPORTED_VMCS_COMPONENT);
-
- /*
- * Do not track vmcs12 dirty-state if in guest-mode
- * as we actually dirty shadow vmcs12 instead of vmcs12.
- */
- if (!is_guest_mode(vcpu)) {
- switch (field) {
-#define SHADOW_FIELD_RW(x) case x:
-#include "vmx_shadow_fields.h"
- /*
- * The fields that can be updated by L1 without a vmexit are
- * always updated in the vmcs02, the others go down the slow
- * path of prepare_vmcs02.
- */
- break;
- default:
- vmx->nested.dirty_vmcs12 = true;
- break;
- }
- }
-
- return nested_vmx_succeed(vcpu);
-}
-
-static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
-{
- vmx->nested.current_vmptr = vmptr;
- if (enable_shadow_vmcs) {
- vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_SHADOW_VMCS);
- vmcs_write64(VMCS_LINK_POINTER,
- __pa(vmx->vmcs01.shadow_vmcs));
- vmx->nested.need_vmcs12_sync = true;
- }
- vmx->nested.dirty_vmcs12 = true;
-}
-
-/* Emulate the VMPTRLD instruction */
-static int handle_vmptrld(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t vmptr;
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- if (nested_vmx_get_vmptr(vcpu, &vmptr))
- return 1;
-
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
- return nested_vmx_failValid(vcpu,
- VMXERR_VMPTRLD_INVALID_ADDRESS);
-
- if (vmptr == vmx->nested.vmxon_ptr)
- return nested_vmx_failValid(vcpu,
- VMXERR_VMPTRLD_VMXON_POINTER);
-
- /* Forbid normal VMPTRLD if Enlightened version was used */
- if (vmx->nested.hv_evmcs)
- return 1;
-
- if (vmx->nested.current_vmptr != vmptr) {
- struct vmcs12 *new_vmcs12;
- struct page *page;
- page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page))
- return nested_vmx_failInvalid(vcpu);
-
- new_vmcs12 = kmap(page);
- if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
- (new_vmcs12->hdr.shadow_vmcs &&
- !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
- kunmap(page);
- kvm_release_page_clean(page);
- return nested_vmx_failValid(vcpu,
- VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- }
-
- nested_release_vmcs12(vcpu);
-
- /*
- * Load VMCS12 from guest memory since it is not already
- * cached.
- */
- memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
- kunmap(page);
- kvm_release_page_clean(page);
-
- set_current_vmptr(vmx, vmptr);
- }
-
- return nested_vmx_succeed(vcpu);
-}
-
-/*
- * This is an equivalent of the nested hypervisor executing the vmptrld
- * instruction.
- */
-static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
- bool from_launch)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct hv_vp_assist_page assist_page;
-
- if (likely(!vmx->nested.enlightened_vmcs_enabled))
- return 1;
-
- if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
- return 1;
-
- if (unlikely(!assist_page.enlighten_vmentry))
- return 1;
-
- if (unlikely(assist_page.current_nested_vmcs !=
- vmx->nested.hv_evmcs_vmptr)) {
-
- if (!vmx->nested.hv_evmcs)
- vmx->nested.current_vmptr = -1ull;
-
- nested_release_evmcs(vcpu);
-
- vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
- vcpu, assist_page.current_nested_vmcs);
-
- if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
- return 0;
-
- vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
-
- if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) {
- nested_release_evmcs(vcpu);
- return 0;
- }
-
- vmx->nested.dirty_vmcs12 = true;
- /*
- * As we keep L2 state for one guest only 'hv_clean_fields' mask
- * can't be used when we switch between them. Reset it here for
- * simplicity.
- */
- vmx->nested.hv_evmcs->hv_clean_fields &=
- ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
-
- /*
- * Unlike normal vmcs12, enlightened vmcs12 is not fully
- * reloaded from guest's memory (read only fields, fields not
- * present in struct hv_enlightened_vmcs, ...). Make sure there
- * are no leftovers.
- */
- if (from_launch)
- memset(vmx->nested.cached_vmcs12, 0,
- sizeof(*vmx->nested.cached_vmcs12));
-
- }
- return 1;
-}
-
-/* Emulate the VMPTRST instruction */
-static int handle_vmptrst(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
- u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
- struct x86_exception e;
- gva_t gva;
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
- return 1;
-
- if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
- return 1;
- /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
- if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
- sizeof(gpa_t), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
- return nested_vmx_succeed(vcpu);
-}
-
-/* Emulate the INVEPT instruction */
-static int handle_invept(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 vmx_instruction_info, types;
- unsigned long type;
- gva_t gva;
- struct x86_exception e;
- struct {
- u64 eptp, gpa;
- } operand;
-
- if (!(vmx->nested.msrs.secondary_ctls_high &
- SECONDARY_EXEC_ENABLE_EPT) ||
- !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
-
- types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
-
- if (type >= 32 || !(types & (1 << type)))
- return nested_vmx_failValid(vcpu,
- VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-
- /* According to the Intel VMX instruction reference, the memory
- * operand is read even if it isn't needed (e.g., for type==global)
- */
- if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
- return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
-
- switch (type) {
- case VMX_EPT_EXTENT_GLOBAL:
- /*
- * TODO: track mappings and invalidate
- * single context requests appropriately
- */
- case VMX_EPT_EXTENT_CONTEXT:
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- break;
- default:
- BUG_ON(1);
- break;
- }
-
- return nested_vmx_succeed(vcpu);
-}
-
-static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
-}
-
-static int handle_invvpid(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 vmx_instruction_info;
- unsigned long type, types;
- gva_t gva;
- struct x86_exception e;
- struct {
- u64 vpid;
- u64 gla;
- } operand;
- u16 vpid02;
-
- if (!(vmx->nested.msrs.secondary_ctls_high &
- SECONDARY_EXEC_ENABLE_VPID) ||
- !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
-
- types = (vmx->nested.msrs.vpid_caps &
- VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
-
- if (type >= 32 || !(types & (1 << type)))
- return nested_vmx_failValid(vcpu,
- VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-
- /* according to the intel vmx instruction reference, the memory
- * operand is read even if it isn't needed (e.g., for type==global)
- */
- if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
- return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
- if (operand.vpid >> 16)
- return nested_vmx_failValid(vcpu,
- VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
-
- vpid02 = nested_get_vpid02(vcpu);
- switch (type) {
- case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
- if (!operand.vpid ||
- is_noncanonical_address(operand.gla, vcpu))
- return nested_vmx_failValid(vcpu,
- VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- if (cpu_has_vmx_invvpid_individual_addr()) {
- __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
- vpid02, operand.gla);
- } else
- __vmx_flush_tlb(vcpu, vpid02, false);
- break;
- case VMX_VPID_EXTENT_SINGLE_CONTEXT:
- case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
- if (!operand.vpid)
- return nested_vmx_failValid(vcpu,
- VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- __vmx_flush_tlb(vcpu, vpid02, false);
- break;
- case VMX_VPID_EXTENT_ALL_CONTEXT:
- __vmx_flush_tlb(vcpu, vpid02, false);
- break;
- default:
- WARN_ON_ONCE(1);
- return kvm_skip_emulated_instruction(vcpu);
- }
-
- return nested_vmx_succeed(vcpu);
-}
-
-static int handle_invpcid(struct kvm_vcpu *vcpu)
-{
- u32 vmx_instruction_info;
- unsigned long type;
- bool pcid_enabled;
- gva_t gva;
- struct x86_exception e;
- unsigned i;
- unsigned long roots_to_free = 0;
- struct {
- u64 pcid;
- u64 gla;
- } operand;
-
- if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
-
- if (type > 3) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- /* According to the Intel instruction reference, the memory operand
- * is read even if it isn't needed (e.g., for type==all)
- */
- if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
- vmx_instruction_info, false, &gva))
- return 1;
-
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
-
- if (operand.pcid >> 12 != 0) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
-
- switch (type) {
- case INVPCID_TYPE_INDIV_ADDR:
- if ((!pcid_enabled && (operand.pcid != 0)) ||
- is_noncanonical_address(operand.gla, vcpu)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
- kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
- return kvm_skip_emulated_instruction(vcpu);
-
- case INVPCID_TYPE_SINGLE_CTXT:
- if (!pcid_enabled && (operand.pcid != 0)) {
- kvm_inject_gp(vcpu, 0);
- return 1;
- }
-
- if (kvm_get_active_pcid(vcpu) == operand.pcid) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- }
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
- == operand.pcid)
- roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
-
- kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
- /*
- * If neither the current cr3 nor any of the prev_roots use the
- * given PCID, then nothing needs to be done here because a
- * resync will happen anyway before switching to any other CR3.
- */
-
- return kvm_skip_emulated_instruction(vcpu);
-
- case INVPCID_TYPE_ALL_NON_GLOBAL:
- /*
- * Currently, KVM doesn't mark global entries in the shadow
- * page tables, so a non-global flush just degenerates to a
- * global flush. If needed, we could optimize this later by
- * keeping track of global entries in shadow page tables.
- */
-
- /* fall-through */
- case INVPCID_TYPE_ALL_INCL_GLOBAL:
- kvm_mmu_unload(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
-
- default:
- BUG(); /* We have already checked above that type <= 3 */
- }
-}
-
-static int handle_pml_full(struct kvm_vcpu *vcpu)
-{
- unsigned long exit_qualification;
-
- trace_kvm_pml_full(vcpu->vcpu_id);
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- /*
- * PML buffer FULL happened while executing iret from NMI,
- * "blocked by NMI" bit has to be set before next VM entry.
- */
- if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
- enable_vnmi &&
- (exit_qualification & INTR_INFO_UNBLOCK_NMI))
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
-
- /*
- * PML buffer already flushed at beginning of VMEXIT. Nothing to do
- * here.., and there's no userspace involvement needed for PML.
- */
- return 1;
-}
-
-static int handle_preemption_timer(struct kvm_vcpu *vcpu)
-{
- if (!to_vmx(vcpu)->req_immediate_exit)
- kvm_lapic_expired_hv_timer(vcpu);
- return 1;
-}
-
-static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
-
- /* Check for memory type validity */
- switch (address & VMX_EPTP_MT_MASK) {
- case VMX_EPTP_MT_UC:
- if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
- return false;
- break;
- case VMX_EPTP_MT_WB:
- if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
- return false;
- break;
- default:
- return false;
- }
-
- /* only 4 levels page-walk length are valid */
- if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
- return false;
-
- /* Reserved bits should not be set */
- if (address >> maxphyaddr || ((address >> 7) & 0x1f))
- return false;
-
- /* AD, if set, should be supported */
- if (address & VMX_EPTP_AD_ENABLE_BIT) {
- if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
- return false;
- }
-
- return true;
-}
-
-static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
- u64 address;
- bool accessed_dirty;
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
-
- if (!nested_cpu_has_eptp_switching(vmcs12) ||
- !nested_cpu_has_ept(vmcs12))
- return 1;
-
- if (index >= VMFUNC_EPTP_ENTRIES)
- return 1;
-
-
- if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
- &address, index * 8, 8))
- return 1;
-
- accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
-
- /*
- * If the (L2) guest does a vmfunc to the currently
- * active ept pointer, we don't have to do anything else
- */
- if (vmcs12->ept_pointer != address) {
- if (!valid_ept_address(vcpu, address))
- return 1;
-
- kvm_mmu_unload(vcpu);
- mmu->ept_ad = accessed_dirty;
- mmu->mmu_role.base.ad_disabled = !accessed_dirty;
- vmcs12->ept_pointer = address;
- /*
- * TODO: Check what's the correct approach in case
- * mmu reload fails. Currently, we just let the next
- * reload potentially fail
- */
- kvm_mmu_reload(vcpu);
- }
-
- return 0;
-}
-
-static int handle_vmfunc(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs12 *vmcs12;
- u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
-
- /*
- * VMFUNC is only supported for nested guests, but we always enable the
- * secondary control for simplicity; for non-nested mode, fake that we
- * didn't by injecting #UD.
- */
- if (!is_guest_mode(vcpu)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-
- vmcs12 = get_vmcs12(vcpu);
- if ((vmcs12->vm_function_control & (1 << function)) == 0)
- goto fail;
-
- switch (function) {
- case 0:
- if (nested_vmx_eptp_switching(vcpu, vmcs12))
- goto fail;
- break;
- default:
- goto fail;
- }
- return kvm_skip_emulated_instruction(vcpu);
-
-fail:
- nested_vmx_vmexit(vcpu, vmx->exit_reason,
- vmcs_read32(VM_EXIT_INTR_INFO),
- vmcs_readl(EXIT_QUALIFICATION));
- return 1;
-}
-
-static int handle_encls(struct kvm_vcpu *vcpu)
-{
- /*
- * SGX virtualization is not yet supported. There is no software
- * enable bit for SGX, so we have to trap ENCLS and inject a #UD
- * to prevent the guest from executing ENCLS.
- */
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
-}
-
-/*
- * The exit handlers return 1 if the exit was handled fully and guest execution
- * may resume. Otherwise they set the kvm_run parameter to indicate what needs
- * to be done to userspace and return 0.
- */
-static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
- [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
- [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
- [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
- [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
- [EXIT_REASON_IO_INSTRUCTION] = handle_io,
- [EXIT_REASON_CR_ACCESS] = handle_cr,
- [EXIT_REASON_DR_ACCESS] = handle_dr,
- [EXIT_REASON_CPUID] = handle_cpuid,
- [EXIT_REASON_MSR_READ] = handle_rdmsr,
- [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
- [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
- [EXIT_REASON_HLT] = handle_halt,
- [EXIT_REASON_INVD] = handle_invd,
- [EXIT_REASON_INVLPG] = handle_invlpg,
- [EXIT_REASON_RDPMC] = handle_rdpmc,
- [EXIT_REASON_VMCALL] = handle_vmcall,
- [EXIT_REASON_VMCLEAR] = handle_vmclear,
- [EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
- [EXIT_REASON_VMPTRLD] = handle_vmptrld,
- [EXIT_REASON_VMPTRST] = handle_vmptrst,
- [EXIT_REASON_VMREAD] = handle_vmread,
- [EXIT_REASON_VMRESUME] = handle_vmresume,
- [EXIT_REASON_VMWRITE] = handle_vmwrite,
- [EXIT_REASON_VMOFF] = handle_vmoff,
- [EXIT_REASON_VMON] = handle_vmon,
- [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
- [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
- [EXIT_REASON_APIC_WRITE] = handle_apic_write,
- [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
- [EXIT_REASON_WBINVD] = handle_wbinvd,
- [EXIT_REASON_XSETBV] = handle_xsetbv,
- [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
- [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
- [EXIT_REASON_GDTR_IDTR] = handle_desc,
- [EXIT_REASON_LDTR_TR] = handle_desc,
- [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
- [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
- [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
- [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
- [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
- [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
- [EXIT_REASON_INVEPT] = handle_invept,
- [EXIT_REASON_INVVPID] = handle_invvpid,
- [EXIT_REASON_RDRAND] = handle_invalid_op,
- [EXIT_REASON_RDSEED] = handle_invalid_op,
- [EXIT_REASON_XSAVES] = handle_xsaves,
- [EXIT_REASON_XRSTORS] = handle_xrstors,
- [EXIT_REASON_PML_FULL] = handle_pml_full,
- [EXIT_REASON_INVPCID] = handle_invpcid,
- [EXIT_REASON_VMFUNC] = handle_vmfunc,
- [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
- [EXIT_REASON_ENCLS] = handle_encls,
-};
-
-static const int kvm_vmx_max_exit_handlers =
- ARRAY_SIZE(kvm_vmx_exit_handlers);
-
-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- unsigned long exit_qualification;
- gpa_t bitmap, last_bitmap;
- unsigned int port;
- int size;
- u8 b;
-
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- port = exit_qualification >> 16;
- size = (exit_qualification & 7) + 1;
-
- last_bitmap = (gpa_t)-1;
- b = -1;
-
- while (size > 0) {
- if (port < 0x8000)
- bitmap = vmcs12->io_bitmap_a;
- else if (port < 0x10000)
- bitmap = vmcs12->io_bitmap_b;
- else
- return true;
- bitmap += (port & 0x7fff) / 8;
-
- if (last_bitmap != bitmap)
- if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
- return true;
- if (b & (1 << (port & 7)))
- return true;
-
- port++;
- size--;
- last_bitmap = bitmap;
- }
-
- return false;
-}
-
-/*
- * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
- * rather than handle it ourselves in L0. I.e., check whether L1 expressed
- * disinterest in the current event (read or write a specific MSR) by using an
- * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
- */
-static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12, u32 exit_reason)
-{
- u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
- gpa_t bitmap;
-
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
- return true;
-
- /*
- * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
- * for the four combinations of read/write and low/high MSR numbers.
- * First we need to figure out which of the four to use:
- */
- bitmap = vmcs12->msr_bitmap;
- if (exit_reason == EXIT_REASON_MSR_WRITE)
- bitmap += 2048;
- if (msr_index >= 0xc0000000) {
- msr_index -= 0xc0000000;
- bitmap += 1024;
- }
-
- /* Then read the msr_index'th bit from this bitmap: */
- if (msr_index < 1024*8) {
- unsigned char b;
- if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
- return true;
- return 1 & (b >> (msr_index & 7));
- } else
- return true; /* let L1 handle the wrong parameter */
-}
-
-/*
- * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
- * rather than handle it ourselves in L0. I.e., check if L1 wanted to
- * intercept (via guest_host_mask etc.) the current event.
- */
-static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
- int cr = exit_qualification & 15;
- int reg;
- unsigned long val;
-
- switch ((exit_qualification >> 4) & 3) {
- case 0: /* mov to cr */
- reg = (exit_qualification >> 8) & 15;
- val = kvm_register_readl(vcpu, reg);
- switch (cr) {
- case 0:
- if (vmcs12->cr0_guest_host_mask &
- (val ^ vmcs12->cr0_read_shadow))
- return true;
- break;
- case 3:
- if ((vmcs12->cr3_target_count >= 1 &&
- vmcs12->cr3_target_value0 == val) ||
- (vmcs12->cr3_target_count >= 2 &&
- vmcs12->cr3_target_value1 == val) ||
- (vmcs12->cr3_target_count >= 3 &&
- vmcs12->cr3_target_value2 == val) ||
- (vmcs12->cr3_target_count >= 4 &&
- vmcs12->cr3_target_value3 == val))
- return false;
- if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
- return true;
- break;
- case 4:
- if (vmcs12->cr4_guest_host_mask &
- (vmcs12->cr4_read_shadow ^ val))
- return true;
- break;
- case 8:
- if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
- return true;
- break;
- }
- break;
- case 2: /* clts */
- if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
- (vmcs12->cr0_read_shadow & X86_CR0_TS))
- return true;
- break;
- case 1: /* mov from cr */
- switch (cr) {
- case 3:
- if (vmcs12->cpu_based_vm_exec_control &
- CPU_BASED_CR3_STORE_EXITING)
- return true;
- break;
- case 8:
- if (vmcs12->cpu_based_vm_exec_control &
- CPU_BASED_CR8_STORE_EXITING)
- return true;
- break;
- }
- break;
- case 3: /* lmsw */
- /*
- * lmsw can change bits 1..3 of cr0, and only set bit 0 of
- * cr0. Other attempted changes are ignored, with no exit.
- */
- val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
- if (vmcs12->cr0_guest_host_mask & 0xe &
- (val ^ vmcs12->cr0_read_shadow))
- return true;
- if ((vmcs12->cr0_guest_host_mask & 0x1) &&
- !(vmcs12->cr0_read_shadow & 0x1) &&
- (val & 0x1))
- return true;
- break;
- }
- return false;
-}
-
-static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12, gpa_t bitmap)
-{
- u32 vmx_instruction_info;
- unsigned long field;
- u8 b;
-
- if (!nested_cpu_has_shadow_vmcs(vmcs12))
- return true;
-
- /* Decode instruction info and find the field to access */
- vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
-
- /* Out-of-range fields always cause a VM exit from L2 to L1 */
- if (field >> 15)
- return true;
-
- if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
- return true;
-
- return 1 & (b >> (field & 7));
-}
-
-/*
- * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
- * should handle it ourselves in L0 (and then continue L2). Only call this
- * when in is_guest_mode (L2).
- */
-static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
-{
- u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (vmx->nested.nested_run_pending)
- return false;
-
- if (unlikely(vmx->fail)) {
- pr_info_ratelimited("%s failed vm entry %x\n", __func__,
- vmcs_read32(VM_INSTRUCTION_ERROR));
- return true;
- }
-
- /*
- * The host physical addresses of some pages of guest memory
- * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
- * Page). The CPU may write to these pages via their host
- * physical address while L2 is running, bypassing any
- * address-translation-based dirty tracking (e.g. EPT write
- * protection).
- *
- * Mark them dirty on every exit from L2 to prevent them from
- * getting out of sync with dirty tracking.
- */
- nested_mark_vmcs12_pages_dirty(vcpu);
-
- trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
- vmcs_readl(EXIT_QUALIFICATION),
- vmx->idt_vectoring_info,
- intr_info,
- vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
- KVM_ISA_VMX);
-
- switch (exit_reason) {
- case EXIT_REASON_EXCEPTION_NMI:
- if (is_nmi(intr_info))
- return false;
- else if (is_page_fault(intr_info))
- return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
- else if (is_debug(intr_info) &&
- vcpu->guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
- return false;
- else if (is_breakpoint(intr_info) &&
- vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
- return false;
- return vmcs12->exception_bitmap &
- (1u << (intr_info & INTR_INFO_VECTOR_MASK));
- case EXIT_REASON_EXTERNAL_INTERRUPT:
- return false;
- case EXIT_REASON_TRIPLE_FAULT:
- return true;
- case EXIT_REASON_PENDING_INTERRUPT:
- return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
- case EXIT_REASON_NMI_WINDOW:
- return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
- case EXIT_REASON_TASK_SWITCH:
- return true;
- case EXIT_REASON_CPUID:
- return true;
- case EXIT_REASON_HLT:
- return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
- case EXIT_REASON_INVD:
- return true;
- case EXIT_REASON_INVLPG:
- return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
- case EXIT_REASON_RDPMC:
- return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
- case EXIT_REASON_RDRAND:
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
- case EXIT_REASON_RDSEED:
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
- case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
- return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
- case EXIT_REASON_VMREAD:
- return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
- vmcs12->vmread_bitmap);
- case EXIT_REASON_VMWRITE:
- return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
- vmcs12->vmwrite_bitmap);
- case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
- case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
- case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
- case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
- case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
- /*
- * VMX instructions trap unconditionally. This allows L1 to
- * emulate them for its L2 guest, i.e., allows 3-level nesting!
- */
- return true;
- case EXIT_REASON_CR_ACCESS:
- return nested_vmx_exit_handled_cr(vcpu, vmcs12);
- case EXIT_REASON_DR_ACCESS:
- return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
- case EXIT_REASON_IO_INSTRUCTION:
- return nested_vmx_exit_handled_io(vcpu, vmcs12);
- case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
- case EXIT_REASON_MSR_READ:
- case EXIT_REASON_MSR_WRITE:
- return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
- case EXIT_REASON_INVALID_STATE:
- return true;
- case EXIT_REASON_MWAIT_INSTRUCTION:
- return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
- case EXIT_REASON_MONITOR_TRAP_FLAG:
- return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
- case EXIT_REASON_MONITOR_INSTRUCTION:
- return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
- case EXIT_REASON_PAUSE_INSTRUCTION:
- return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
- nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_PAUSE_LOOP_EXITING);
- case EXIT_REASON_MCE_DURING_VMENTRY:
- return false;
- case EXIT_REASON_TPR_BELOW_THRESHOLD:
- return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
- case EXIT_REASON_APIC_ACCESS:
- case EXIT_REASON_APIC_WRITE:
- case EXIT_REASON_EOI_INDUCED:
- /*
- * The controls for "virtualize APIC accesses," "APIC-
- * register virtualization," and "virtual-interrupt
- * delivery" only come from vmcs12.
- */
- return true;
- case EXIT_REASON_EPT_VIOLATION:
- /*
- * L0 always deals with the EPT violation. If nested EPT is
- * used, and the nested mmu code discovers that the address is
- * missing in the guest EPT table (EPT12), the EPT violation
- * will be injected with nested_ept_inject_page_fault()
- */
- return false;
- case EXIT_REASON_EPT_MISCONFIG:
- /*
- * L2 never uses directly L1's EPT, but rather L0's own EPT
- * table (shadow on EPT) or a merged EPT table that L0 built
- * (EPT on EPT). So any problems with the structure of the
- * table is L0's fault.
- */
- return false;
- case EXIT_REASON_INVPCID:
- return
- nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
- nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
- case EXIT_REASON_WBINVD:
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
- case EXIT_REASON_XSETBV:
- return true;
- case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
- /*
- * This should never happen, since it is not possible to
- * set XSS to a non-zero value---neither in L1 nor in L2.
- * If if it were, XSS would have to be checked against
- * the XSS exit bitmap in vmcs12.
- */
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
- case EXIT_REASON_PREEMPTION_TIMER:
- return false;
- case EXIT_REASON_PML_FULL:
- /* We emulate PML support to L1. */
- return false;
- case EXIT_REASON_VMFUNC:
- /* VM functions are emulated through L2->L0 vmexits. */
- return false;
- case EXIT_REASON_ENCLS:
- /* SGX is never exposed to L1 */
- return false;
- default:
- return true;
- }
-}
-
-static int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason)
-{
- u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-
- /*
- * At this point, the exit interruption info in exit_intr_info
- * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
- * we need to query the in-kernel LAPIC.
- */
- WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
- if ((exit_intr_info &
- (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
- (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- vmcs12->vm_exit_intr_error_code =
- vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
- }
-
- nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
- vmcs_readl(EXIT_QUALIFICATION));
- return 1;
-}
-
-static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
-{
- *info1 = vmcs_readl(EXIT_QUALIFICATION);
- *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
-}
-
-static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
-{
- if (vmx->pml_pg) {
- __free_page(vmx->pml_pg);
- vmx->pml_pg = NULL;
- }
-}
-
-static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 *pml_buf;
- u16 pml_idx;
-
- pml_idx = vmcs_read16(GUEST_PML_INDEX);
-
- /* Do nothing if PML buffer is empty */
- if (pml_idx == (PML_ENTITY_NUM - 1))
- return;
-
- /* PML index always points to next available PML buffer entity */
- if (pml_idx >= PML_ENTITY_NUM)
- pml_idx = 0;
- else
- pml_idx++;
-
- pml_buf = page_address(vmx->pml_pg);
- for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
- u64 gpa;
-
- gpa = pml_buf[pml_idx];
- WARN_ON(gpa & (PAGE_SIZE - 1));
- kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
- }
-
- /* reset PML index */
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
-}
-
-/*
- * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
- * Called before reporting dirty_bitmap to userspace.
- */
-static void kvm_flush_pml_buffers(struct kvm *kvm)
-{
- int i;
- struct kvm_vcpu *vcpu;
- /*
- * We only need to kick vcpu out of guest mode here, as PML buffer
- * is flushed at beginning of all VMEXITs, and it's obvious that only
- * vcpus running in guest are possible to have unflushed GPAs in PML
- * buffer.
- */
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_kick(vcpu);
-}
-
-static void vmx_dump_sel(char *name, uint32_t sel)
-{
- pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
- name, vmcs_read16(sel),
- vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
- vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
- vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
-}
-
-static void vmx_dump_dtsel(char *name, uint32_t limit)
-{
- pr_err("%s limit=0x%08x, base=0x%016lx\n",
- name, vmcs_read32(limit),
- vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
-}
-
-static void dump_vmcs(void)
-{
- u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
- u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
- u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
- u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
- u32 secondary_exec_control = 0;
- unsigned long cr4 = vmcs_readl(GUEST_CR4);
- u64 efer = vmcs_read64(GUEST_IA32_EFER);
- int i, n;
-
- if (cpu_has_secondary_exec_ctrls())
- secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
-
- pr_err("*** Guest State ***\n");
- pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
- vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
- vmcs_readl(CR0_GUEST_HOST_MASK));
- pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
- cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
- pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
- if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
- (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
- {
- pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
- vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
- pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
- vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
- }
- pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
- vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
- pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
- vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
- pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
- vmcs_readl(GUEST_SYSENTER_ESP),
- vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
- vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
- vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
- vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
- vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
- vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
- vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
- vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
- vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
- vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
- vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
- if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
- (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
- pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
- efer, vmcs_read64(GUEST_IA32_PAT));
- pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
- vmcs_read64(GUEST_IA32_DEBUGCTL),
- vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
- if (cpu_has_load_perf_global_ctrl &&
- vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
- pr_err("PerfGlobCtl = 0x%016llx\n",
- vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
- if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
- pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
- pr_err("Interruptibility = %08x ActivityState = %08x\n",
- vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
- vmcs_read32(GUEST_ACTIVITY_STATE));
- if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
- pr_err("InterruptStatus = %04x\n",
- vmcs_read16(GUEST_INTR_STATUS));
-
- pr_err("*** Host State ***\n");
- pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
- vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
- pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
- vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
- vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
- vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
- vmcs_read16(HOST_TR_SELECTOR));
- pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
- vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
- vmcs_readl(HOST_TR_BASE));
- pr_err("GDTBase=%016lx IDTBase=%016lx\n",
- vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
- pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
- vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
- vmcs_readl(HOST_CR4));
- pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
- vmcs_readl(HOST_IA32_SYSENTER_ESP),
- vmcs_read32(HOST_IA32_SYSENTER_CS),
- vmcs_readl(HOST_IA32_SYSENTER_EIP));
- if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
- pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
- vmcs_read64(HOST_IA32_EFER),
- vmcs_read64(HOST_IA32_PAT));
- if (cpu_has_load_perf_global_ctrl &&
- vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
- pr_err("PerfGlobCtl = 0x%016llx\n",
- vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
-
- pr_err("*** Control State ***\n");
- pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
- pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
- pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
- pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
- vmcs_read32(EXCEPTION_BITMAP),
- vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
- vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
- pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
- vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
- vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
- vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
- pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
- vmcs_read32(VM_EXIT_INTR_INFO),
- vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
- vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
- pr_err(" reason=%08x qualification=%016lx\n",
- vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
- pr_err("IDTVectoring: info=%08x errcode=%08x\n",
- vmcs_read32(IDT_VECTORING_INFO_FIELD),
- vmcs_read32(IDT_VECTORING_ERROR_CODE));
- pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
- if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
- pr_err("TSC Multiplier = 0x%016llx\n",
- vmcs_read64(TSC_MULTIPLIER));
- if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
- pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
- if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
- pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
- if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
- pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
- n = vmcs_read32(CR3_TARGET_COUNT);
- for (i = 0; i + 1 < n; i += 4)
- pr_err("CR3 target%u=%016lx target%u=%016lx\n",
- i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
- i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
- if (i < n)
- pr_err("CR3 target%u=%016lx\n",
- i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
- if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
- pr_err("PLE Gap=%08x Window=%08x\n",
- vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
- if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
- pr_err("Virtual processor ID = 0x%04x\n",
- vmcs_read16(VIRTUAL_PROCESSOR_ID));
-}
-
-/*
- * The guest has exited. See if we can fix it or if we need userspace
- * assistance.
- */
-static int vmx_handle_exit(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exit_reason = vmx->exit_reason;
- u32 vectoring_info = vmx->idt_vectoring_info;
-
- trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
-
- /*
- * Flush logged GPAs PML buffer, this will make dirty_bitmap more
- * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
- * querying dirty_bitmap, we only need to kick all vcpus out of guest
- * mode as if vcpus is in root mode, the PML buffer must has been
- * flushed already.
- */
- if (enable_pml)
- vmx_flush_pml_buffer(vcpu);
-
- /* If guest state is invalid, start emulating */
- if (vmx->emulation_required)
- return handle_invalid_guest_state(vcpu);
-
- if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
- return nested_vmx_reflect_vmexit(vcpu, exit_reason);
-
- if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
- dump_vmcs();
- vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- vcpu->run->fail_entry.hardware_entry_failure_reason
- = exit_reason;
- return 0;
- }
-
- if (unlikely(vmx->fail)) {
- vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- vcpu->run->fail_entry.hardware_entry_failure_reason
- = vmcs_read32(VM_INSTRUCTION_ERROR);
- return 0;
- }
-
- /*
- * Note:
- * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
- * delivery event since it indicates guest is accessing MMIO.
- * The vm-exit can be triggered again after return to guest that
- * will cause infinite loop.
- */
- if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
- (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
- exit_reason != EXIT_REASON_EPT_VIOLATION &&
- exit_reason != EXIT_REASON_PML_FULL &&
- exit_reason != EXIT_REASON_TASK_SWITCH)) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
- vcpu->run->internal.ndata = 3;
- vcpu->run->internal.data[0] = vectoring_info;
- vcpu->run->internal.data[1] = exit_reason;
- vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
- if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
- vcpu->run->internal.ndata++;
- vcpu->run->internal.data[3] =
- vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- }
- return 0;
- }
-
- if (unlikely(!enable_vnmi &&
- vmx->loaded_vmcs->soft_vnmi_blocked)) {
- if (vmx_interrupt_allowed(vcpu)) {
- vmx->loaded_vmcs->soft_vnmi_blocked = 0;
- } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
- vcpu->arch.nmi_pending) {
- /*
- * This CPU don't support us in finding the end of an
- * NMI-blocked window if the guest runs with IRQs
- * disabled. So we pull the trigger after 1 s of
- * futile waiting, but inform the user about this.
- */
- printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
- "state on VCPU %d after 1 s timeout\n",
- __func__, vcpu->vcpu_id);
- vmx->loaded_vmcs->soft_vnmi_blocked = 0;
- }
- }
-
- if (exit_reason < kvm_vmx_max_exit_handlers
- && kvm_vmx_exit_handlers[exit_reason])
- return kvm_vmx_exit_handlers[exit_reason](vcpu);
- else {
- vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
- exit_reason);
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
-}
-
-/*
- * Software based L1D cache flush which is used when microcode providing
- * the cache control MSR is not loaded.
- *
- * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
- * flush it is required to read in 64 KiB because the replacement algorithm
- * is not exactly LRU. This could be sized at runtime via topology
- * information but as all relevant affected CPUs have 32KiB L1D cache size
- * there is no point in doing so.
- */
-static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
-{
- int size = PAGE_SIZE << L1D_CACHE_ORDER;
-
- /*
- * This code is only executed when the the flush mode is 'cond' or
- * 'always'
- */
- if (static_branch_likely(&vmx_l1d_flush_cond)) {
- bool flush_l1d;
-
- /*
- * Clear the per-vcpu flush bit, it gets set again
- * either from vcpu_run() or from one of the unsafe
- * VMEXIT handlers.
- */
- flush_l1d = vcpu->arch.l1tf_flush_l1d;
- vcpu->arch.l1tf_flush_l1d = false;
-
- /*
- * Clear the per-cpu flush bit, it gets set again from
- * the interrupt handlers.
- */
- flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
- kvm_clear_cpu_l1tf_flush_l1d();
-
- if (!flush_l1d)
- return;
- }
-
- vcpu->stat.l1d_flush++;
-
- if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
- wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
- return;
- }
-
- asm volatile(
- /* First ensure the pages are in the TLB */
- "xorl %%eax, %%eax\n"
- ".Lpopulate_tlb:\n\t"
- "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
- "addl $4096, %%eax\n\t"
- "cmpl %%eax, %[size]\n\t"
- "jne .Lpopulate_tlb\n\t"
- "xorl %%eax, %%eax\n\t"
- "cpuid\n\t"
- /* Now fill the cache */
- "xorl %%eax, %%eax\n"
- ".Lfill_cache:\n"
- "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
- "addl $64, %%eax\n\t"
- "cmpl %%eax, %[size]\n\t"
- "jne .Lfill_cache\n\t"
- "lfence\n"
- :: [flush_pages] "r" (vmx_l1d_flush_pages),
- [size] "r" (size)
- : "eax", "ebx", "ecx", "edx");
-}
-
-static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (is_guest_mode(vcpu) &&
- nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
- return;
-
- if (irr == -1 || tpr < irr) {
- vmcs_write32(TPR_THRESHOLD, 0);
- return;
- }
-
- vmcs_write32(TPR_THRESHOLD, irr);
-}
-
-static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
-{
- u32 sec_exec_control;
-
- if (!lapic_in_kernel(vcpu))
- return;
-
- if (!flexpriority_enabled &&
- !cpu_has_vmx_virtualize_x2apic_mode())
- return;
-
- /* Postpone execution until vmcs01 is the current VMCS. */
- if (is_guest_mode(vcpu)) {
- to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
- return;
- }
-
- sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
- sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
-
- switch (kvm_get_apic_mode(vcpu)) {
- case LAPIC_MODE_INVALID:
- WARN_ONCE(true, "Invalid local APIC state");
- case LAPIC_MODE_DISABLED:
- break;
- case LAPIC_MODE_XAPIC:
- if (flexpriority_enabled) {
- sec_exec_control |=
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- vmx_flush_tlb(vcpu, true);
- }
- break;
- case LAPIC_MODE_X2APIC:
- if (cpu_has_vmx_virtualize_x2apic_mode())
- sec_exec_control |=
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
- break;
- }
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
-
- vmx_update_msr_bitmap(vcpu);
-}
-
-static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
-{
- if (!is_guest_mode(vcpu)) {
- vmcs_write64(APIC_ACCESS_ADDR, hpa);
- vmx_flush_tlb(vcpu, true);
- }
-}
-
-static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
-{
- u16 status;
- u8 old;
-
- if (max_isr == -1)
- max_isr = 0;
-
- status = vmcs_read16(GUEST_INTR_STATUS);
- old = status >> 8;
- if (max_isr != old) {
- status &= 0xff;
- status |= max_isr << 8;
- vmcs_write16(GUEST_INTR_STATUS, status);
- }
-}
-
-static void vmx_set_rvi(int vector)
-{
- u16 status;
- u8 old;
-
- if (vector == -1)
- vector = 0;
-
- status = vmcs_read16(GUEST_INTR_STATUS);
- old = (u8)status & 0xff;
- if ((u8)vector != old) {
- status &= ~0xff;
- status |= (u8)vector;
- vmcs_write16(GUEST_INTR_STATUS, status);
- }
-}
-
-static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
-{
- /*
- * When running L2, updating RVI is only relevant when
- * vmcs12 virtual-interrupt-delivery enabled.
- * However, it can be enabled only when L1 also
- * intercepts external-interrupts and in that case
- * we should not update vmcs02 RVI but instead intercept
- * interrupt. Therefore, do nothing when running L2.
- */
- if (!is_guest_mode(vcpu))
- vmx_set_rvi(max_irr);
-}
-
-static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int max_irr;
- bool max_irr_updated;
-
- WARN_ON(!vcpu->arch.apicv_active);
- if (pi_test_on(&vmx->pi_desc)) {
- pi_clear_on(&vmx->pi_desc);
- /*
- * IOMMU can write to PIR.ON, so the barrier matters even on UP.
- * But on x86 this is just a compiler barrier anyway.
- */
- smp_mb__after_atomic();
- max_irr_updated =
- kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
-
- /*
- * If we are running L2 and L1 has a new pending interrupt
- * which can be injected, we should re-evaluate
- * what should be done with this new L1 interrupt.
- * If L1 intercepts external-interrupts, we should
- * exit from L2 to L1. Otherwise, interrupt should be
- * delivered directly to L2.
- */
- if (is_guest_mode(vcpu) && max_irr_updated) {
- if (nested_exit_on_intr(vcpu))
- kvm_vcpu_exiting_guest_mode(vcpu);
- else
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- }
- } else {
- max_irr = kvm_lapic_find_highest_irr(vcpu);
- }
- vmx_hwapic_irr_update(vcpu, max_irr);
- return max_irr;
-}
-
-static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
-{
- u8 rvi = vmx_get_rvi();
- u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
-
- return ((rvi & 0xf0) > (vppr & 0xf0));
-}
-
-static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
-{
- if (!kvm_vcpu_apicv_active(vcpu))
- return;
-
- vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
- vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
- vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
- vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
-}
-
-static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- pi_clear_on(&vmx->pi_desc);
- memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
-}
-
-static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
-{
- u32 exit_intr_info = 0;
- u16 basic_exit_reason = (u16)vmx->exit_reason;
-
- if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
- || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
- return;
-
- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- vmx->exit_intr_info = exit_intr_info;
-
- /* if exit due to PF check for async PF */
- if (is_page_fault(exit_intr_info))
- vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
-
- /* Handle machine checks before interrupts are enabled */
- if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
- is_machine_check(exit_intr_info))
- kvm_machine_check();
-
- /* We need to handle NMIs before interrupts are enabled */
- if (is_nmi(exit_intr_info)) {
- kvm_before_interrupt(&vmx->vcpu);
- asm("int $2");
- kvm_after_interrupt(&vmx->vcpu);
- }
-}
-
-static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
-{
- u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
-
- if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
- == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
- unsigned int vector;
- unsigned long entry;
- gate_desc *desc;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-#ifdef CONFIG_X86_64
- unsigned long tmp;
-#endif
-
- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
- desc = (gate_desc *)vmx->host_idt_base + vector;
- entry = gate_offset(desc);
- asm volatile(
-#ifdef CONFIG_X86_64
- "mov %%" _ASM_SP ", %[sp]\n\t"
- "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
- "push $%c[ss]\n\t"
- "push %[sp]\n\t"
-#endif
- "pushf\n\t"
- __ASM_SIZE(push) " $%c[cs]\n\t"
- CALL_NOSPEC
- :
-#ifdef CONFIG_X86_64
- [sp]"=&r"(tmp),
-#endif
- ASM_CALL_CONSTRAINT
- :
- THUNK_TARGET(entry),
- [ss]"i"(__KERNEL_DS),
- [cs]"i"(__KERNEL_CS)
- );
- }
-}
-STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
-
-static bool vmx_has_emulated_msr(int index)
-{
- switch (index) {
- case MSR_IA32_SMBASE:
- /*
- * We cannot do SMM unless we can run the guest in big
- * real mode.
- */
- return enable_unrestricted_guest || emulate_invalid_guest_state;
- case MSR_AMD64_VIRT_SPEC_CTRL:
- /* This is AMD only. */
- return false;
- default:
- return true;
- }
-}
-
-static bool vmx_mpx_supported(void)
-{
- return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
- (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
-}
-
-static bool vmx_xsaves_supported(void)
-{
- return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_XSAVES;
-}
-
-static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
-{
- u32 exit_intr_info;
- bool unblock_nmi;
- u8 vector;
- bool idtv_info_valid;
-
- idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
-
- if (enable_vnmi) {
- if (vmx->loaded_vmcs->nmi_known_unmasked)
- return;
- /*
- * Can't use vmx->exit_intr_info since we're not sure what
- * the exit reason is.
- */
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
- /*
- * SDM 3: 27.7.1.2 (September 2008)
- * Re-set bit "block by NMI" before VM entry if vmexit caused by
- * a guest IRET fault.
- * SDM 3: 23.2.2 (September 2008)
- * Bit 12 is undefined in any of the following cases:
- * If the VM exit sets the valid bit in the IDT-vectoring
- * information field.
- * If the VM exit is due to a double fault.
- */
- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
- vector != DF_VECTOR && !idtv_info_valid)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmx->loaded_vmcs->nmi_known_unmasked =
- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
- & GUEST_INTR_STATE_NMI);
- } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
- vmx->loaded_vmcs->vnmi_blocked_time +=
- ktime_to_ns(ktime_sub(ktime_get(),
- vmx->loaded_vmcs->entry_time));
-}
-
-static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
- u32 idt_vectoring_info,
- int instr_len_field,
- int error_code_field)
-{
- u8 vector;
- int type;
- bool idtv_info_valid;
-
- idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
-
- vcpu->arch.nmi_injected = false;
- kvm_clear_exception_queue(vcpu);
- kvm_clear_interrupt_queue(vcpu);
-
- if (!idtv_info_valid)
- return;
-
- kvm_make_request(KVM_REQ_EVENT, vcpu);
-
- vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
- type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
-
- switch (type) {
- case INTR_TYPE_NMI_INTR:
- vcpu->arch.nmi_injected = true;
- /*
- * SDM 3: 27.7.1.2 (September 2008)
- * Clear bit "block by NMI" before VM entry if a NMI
- * delivery faulted.
- */
- vmx_set_nmi_mask(vcpu, false);
- break;
- case INTR_TYPE_SOFT_EXCEPTION:
- vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
- case INTR_TYPE_HARD_EXCEPTION:
- if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
- u32 err = vmcs_read32(error_code_field);
- kvm_requeue_exception_e(vcpu, vector, err);
- } else
- kvm_requeue_exception(vcpu, vector);
- break;
- case INTR_TYPE_SOFT_INTR:
- vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
- /* fall through */
- case INTR_TYPE_EXT_INTR:
- kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
- break;
- default:
- break;
- }
-}
-
-static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
-{
- __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
- VM_EXIT_INSTRUCTION_LEN,
- IDT_VECTORING_ERROR_CODE);
-}
-
-static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
-{
- __vmx_complete_interrupts(vcpu,
- vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
- VM_ENTRY_INSTRUCTION_LEN,
- VM_ENTRY_EXCEPTION_ERROR_CODE);
-
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
-}
-
-static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
-{
- int i, nr_msrs;
- struct perf_guest_switch_msr *msrs;
-
- msrs = perf_guest_get_msrs(&nr_msrs);
-
- if (!msrs)
- return;
-
- for (i = 0; i < nr_msrs; i++)
- if (msrs[i].host == msrs[i].guest)
- clear_atomic_switch_msr(vmx, msrs[i].msr);
- else
- add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
- msrs[i].host, false);
-}
-
-static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
-{
- vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
- if (!vmx->loaded_vmcs->hv_timer_armed)
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
- vmx->loaded_vmcs->hv_timer_armed = true;
-}
-
-static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 tscl;
- u32 delta_tsc;
-
- if (vmx->req_immediate_exit) {
- vmx_arm_hv_timer(vmx, 0);
- return;
- }
-
- if (vmx->hv_deadline_tsc != -1) {
- tscl = rdtsc();
- if (vmx->hv_deadline_tsc > tscl)
- /* set_hv_timer ensures the delta fits in 32-bits */
- delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
- cpu_preemption_timer_multi);
- else
- delta_tsc = 0;
-
- vmx_arm_hv_timer(vmx, delta_tsc);
- return;
- }
-
- if (vmx->loaded_vmcs->hv_timer_armed)
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
- vmx->loaded_vmcs->hv_timer_armed = false;
-}
-
-static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr3, cr4, evmcs_rsp;
-
- /* Record the guest's net vcpu time for enforced NMI injections. */
- if (unlikely(!enable_vnmi &&
- vmx->loaded_vmcs->soft_vnmi_blocked))
- vmx->loaded_vmcs->entry_time = ktime_get();
-
- /* Don't enter VMX if guest state is invalid, let the exit handler
- start emulation until we arrive back to a valid state */
- if (vmx->emulation_required)
- return;
-
- if (vmx->ple_window_dirty) {
- vmx->ple_window_dirty = false;
- vmcs_write32(PLE_WINDOW, vmx->ple_window);
- }
-
- if (vmx->nested.need_vmcs12_sync) {
- /*
- * hv_evmcs may end up being not mapped after migration (when
- * L2 was running), map it here to make sure vmcs12 changes are
- * properly reflected.
- */
- if (vmx->nested.enlightened_vmcs_enabled &&
- !vmx->nested.hv_evmcs)
- nested_vmx_handle_enlightened_vmptrld(vcpu, false);
-
- if (vmx->nested.hv_evmcs) {
- copy_vmcs12_to_enlightened(vmx);
- /* All fields are clean */
- vmx->nested.hv_evmcs->hv_clean_fields |=
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- } else {
- copy_vmcs12_to_shadow(vmx);
- }
- vmx->nested.need_vmcs12_sync = false;
- }
-
- if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
- cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
- vmcs_writel(HOST_CR3, cr3);
- vmx->loaded_vmcs->host_state.cr3 = cr3;
- }
-
- cr4 = cr4_read_shadow();
- if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
- vmcs_writel(HOST_CR4, cr4);
- vmx->loaded_vmcs->host_state.cr4 = cr4;
- }
-
- /* When single-stepping over STI and MOV SS, we must clear the
- * corresponding interruptibility bits in the guest state. Otherwise
- * vmentry fails as it then expects bit 14 (BS) in pending debug
- * exceptions being set, but that's not correct for the guest debugging
- * case. */
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vmx_set_interrupt_shadow(vcpu, 0);
-
- if (static_cpu_has(X86_FEATURE_PKU) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
- vcpu->arch.pkru != vmx->host_pkru)
- __write_pkru(vcpu->arch.pkru);
-
- atomic_switch_perf_msrs(vmx);
-
- vmx_update_hv_timer(vcpu);
-
- /*
- * If this vCPU has touched SPEC_CTRL, restore the guest's value if
- * it's non-zero. Since vmentry is serialising on affected CPUs, there
- * is no need to worry about the conditional branch over the wrmsr
- * being speculatively taken.
- */
- x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
-
- vmx->__launched = vmx->loaded_vmcs->launched;
-
- evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
- (unsigned long)&current_evmcs->host_rsp : 0;
-
- if (static_branch_unlikely(&vmx_l1d_should_flush))
- vmx_l1d_flush(vcpu);
-
- asm(
- /* Store host registers */
- "push %%" _ASM_DX "; push %%" _ASM_BP ";"
- "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
- "push %%" _ASM_CX " \n\t"
- "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
- "je 1f \n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
- /* Avoid VMWRITE when Enlightened VMCS is in use */
- "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
- "jz 2f \n\t"
- "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
- "jmp 1f \n\t"
- "2: \n\t"
- __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
- "1: \n\t"
- /* Reload cr2 if changed */
- "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
- "mov %%cr2, %%" _ASM_DX " \n\t"
- "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
- "je 3f \n\t"
- "mov %%" _ASM_AX", %%cr2 \n\t"
- "3: \n\t"
- /* Check if vmlaunch of vmresume is needed */
- "cmpl $0, %c[launched](%0) \n\t"
- /* Load guest registers. Don't clobber flags. */
- "mov %c[rax](%0), %%" _ASM_AX " \n\t"
- "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
- "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
- "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
- "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
- "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
-#ifdef CONFIG_X86_64
- "mov %c[r8](%0), %%r8 \n\t"
- "mov %c[r9](%0), %%r9 \n\t"
- "mov %c[r10](%0), %%r10 \n\t"
- "mov %c[r11](%0), %%r11 \n\t"
- "mov %c[r12](%0), %%r12 \n\t"
- "mov %c[r13](%0), %%r13 \n\t"
- "mov %c[r14](%0), %%r14 \n\t"
- "mov %c[r15](%0), %%r15 \n\t"
-#endif
- "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
-
- /* Enter guest mode */
- "jne 1f \n\t"
- __ex("vmlaunch") "\n\t"
- "jmp 2f \n\t"
- "1: " __ex("vmresume") "\n\t"
- "2: "
- /* Save guest registers, load host registers, keep flags */
- "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
- "pop %0 \n\t"
- "setbe %c[fail](%0)\n\t"
- "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
- "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
- __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
- "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
- "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
- "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
- "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
-#ifdef CONFIG_X86_64
- "mov %%r8, %c[r8](%0) \n\t"
- "mov %%r9, %c[r9](%0) \n\t"
- "mov %%r10, %c[r10](%0) \n\t"
- "mov %%r11, %c[r11](%0) \n\t"
- "mov %%r12, %c[r12](%0) \n\t"
- "mov %%r13, %c[r13](%0) \n\t"
- "mov %%r14, %c[r14](%0) \n\t"
- "mov %%r15, %c[r15](%0) \n\t"
- /*
- * Clear host registers marked as clobbered to prevent
- * speculative use.
- */
- "xor %%r8d, %%r8d \n\t"
- "xor %%r9d, %%r9d \n\t"
- "xor %%r10d, %%r10d \n\t"
- "xor %%r11d, %%r11d \n\t"
- "xor %%r12d, %%r12d \n\t"
- "xor %%r13d, %%r13d \n\t"
- "xor %%r14d, %%r14d \n\t"
- "xor %%r15d, %%r15d \n\t"
-#endif
- "mov %%cr2, %%" _ASM_AX " \n\t"
- "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
-
- "xor %%eax, %%eax \n\t"
- "xor %%ebx, %%ebx \n\t"
- "xor %%esi, %%esi \n\t"
- "xor %%edi, %%edi \n\t"
- "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
- ".pushsection .rodata \n\t"
- ".global vmx_return \n\t"
- "vmx_return: " _ASM_PTR " 2b \n\t"
- ".popsection"
- : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
- [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
- [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
- [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
- [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
- [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
- [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
- [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
- [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
- [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
-#ifdef CONFIG_X86_64
- [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
- [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
- [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
- [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
- [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
- [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
- [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
- [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
-#endif
- [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
- [wordsize]"i"(sizeof(ulong))
- : "cc", "memory"
-#ifdef CONFIG_X86_64
- , "rax", "rbx", "rdi"
- , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-#else
- , "eax", "ebx", "edi"
-#endif
- );
-
- /*
- * We do not use IBRS in the kernel. If this vCPU has used the
- * SPEC_CTRL MSR it may have left it on; save the value and
- * turn it off. This is much more efficient than blindly adding
- * it to the atomic save/restore list. Especially as the former
- * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
- *
- * For non-nested case:
- * If the L01 MSR bitmap does not intercept the MSR, then we need to
- * save it.
- *
- * For nested case:
- * If the L02 MSR bitmap does not intercept the MSR, then we need to
- * save it.
- */
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
- vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
-
- x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
-
- /* Eliminate branch target predictions from guest mode */
- vmexit_fill_RSB();
-
- /* All fields are clean at this point */
- if (static_branch_unlikely(&enable_evmcs))
- current_evmcs->hv_clean_fields |=
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
-
- /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
- if (vmx->host_debugctlmsr)
- update_debugctlmsr(vmx->host_debugctlmsr);
-
-#ifndef CONFIG_X86_64
- /*
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- *
- * We can't defer this to vmx_prepare_switch_to_host() since that
- * function may be executed in interrupt context, which saves and
- * restore segments around it, nullifying its effect.
- */
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
-#endif
-
- vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
- | (1 << VCPU_EXREG_RFLAGS)
- | (1 << VCPU_EXREG_PDPTR)
- | (1 << VCPU_EXREG_SEGMENTS)
- | (1 << VCPU_EXREG_CR3));
- vcpu->arch.regs_dirty = 0;
-
- /*
- * eager fpu is enabled if PKEY is supported and CR4 is switched
- * back on host, so it is safe to read guest PKRU from current
- * XSAVE.
- */
- if (static_cpu_has(X86_FEATURE_PKU) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
- vcpu->arch.pkru = __read_pkru();
- if (vcpu->arch.pkru != vmx->host_pkru)
- __write_pkru(vmx->host_pkru);
- }
-
- vmx->nested.nested_run_pending = 0;
- vmx->idt_vectoring_info = 0;
-
- vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
- if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
- return;
-
- vmx->loaded_vmcs->launched = 1;
- vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
-
- vmx_complete_atomic_exit(vmx);
- vmx_recover_nmi_blocking(vmx);
- vmx_complete_interrupts(vmx);
-}
-STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
-
-static struct kvm *vmx_vm_alloc(void)
-{
- struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
- return &kvm_vmx->kvm;
-}
-
-static void vmx_vm_free(struct kvm *kvm)
-{
- vfree(to_kvm_vmx(kvm));
-}
-
-static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int cpu;
-
- if (vmx->loaded_vmcs == vmcs)
- return;
-
- cpu = get_cpu();
- vmx_vcpu_put(vcpu);
- vmx->loaded_vmcs = vmcs;
- vmx_vcpu_load(vcpu, cpu);
- put_cpu();
-
- vm_entry_controls_reset_shadow(vmx);
- vm_exit_controls_reset_shadow(vmx);
- vmx_segment_cache_clear(vmx);
-}
-
-/*
- * Ensure that the current vmcs of the logical processor is the
- * vmcs01 of the vcpu before calling free_nested().
- */
-static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
-{
- vcpu_load(vcpu);
- vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
- free_nested(vcpu);
- vcpu_put(vcpu);
-}
-
-static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (enable_pml)
- vmx_destroy_pml_buffer(vmx);
- free_vpid(vmx->vpid);
- leave_guest_mode(vcpu);
- vmx_free_vcpu_nested(vcpu);
- free_loaded_vmcs(vmx->loaded_vmcs);
- kfree(vmx->guest_msrs);
- kvm_vcpu_uninit(vcpu);
- kmem_cache_free(kvm_vcpu_cache, vmx);
-}
-
-static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
-{
- int err;
- struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- unsigned long *msr_bitmap;
- int cpu;
-
- if (!vmx)
- return ERR_PTR(-ENOMEM);
-
- vmx->vpid = allocate_vpid();
-
- err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
- if (err)
- goto free_vcpu;
-
- err = -ENOMEM;
-
- /*
- * If PML is turned on, failure on enabling PML just results in failure
- * of creating the vcpu, therefore we can simplify PML logic (by
- * avoiding dealing with cases, such as enabling PML partially on vcpus
- * for the guest, etc.
- */
- if (enable_pml) {
- vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!vmx->pml_pg)
- goto uninit_vcpu;
- }
-
- vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
- BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
- > PAGE_SIZE);
-
- if (!vmx->guest_msrs)
- goto free_pml;
-
- err = alloc_loaded_vmcs(&vmx->vmcs01);
- if (err < 0)
- goto free_msrs;
-
- msr_bitmap = vmx->vmcs01.msr_bitmap;
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
- vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
- vmx->msr_bitmap_mode = 0;
-
- vmx->loaded_vmcs = &vmx->vmcs01;
- cpu = get_cpu();
- vmx_vcpu_load(&vmx->vcpu, cpu);
- vmx->vcpu.cpu = cpu;
- vmx_vcpu_setup(vmx);
- vmx_vcpu_put(&vmx->vcpu);
- put_cpu();
- if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
- err = alloc_apic_access_page(kvm);
- if (err)
- goto free_vmcs;
- }
-
- if (enable_ept && !enable_unrestricted_guest) {
- err = init_rmode_identity_map(kvm);
- if (err)
- goto free_vmcs;
- }
-
- if (nested)
- nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
- kvm_vcpu_apicv_active(&vmx->vcpu));
-
- vmx->nested.posted_intr_nv = -1;
- vmx->nested.current_vmptr = -1ull;
-
- vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
-
- /*
- * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
- * or POSTED_INTR_WAKEUP_VECTOR.
- */
- vmx->pi_desc.nv = POSTED_INTR_VECTOR;
- vmx->pi_desc.sn = 1;
-
- return &vmx->vcpu;
-
-free_vmcs:
- free_loaded_vmcs(vmx->loaded_vmcs);
-free_msrs:
- kfree(vmx->guest_msrs);
-free_pml:
- vmx_destroy_pml_buffer(vmx);
-uninit_vcpu:
- kvm_vcpu_uninit(&vmx->vcpu);
-free_vcpu:
- free_vpid(vmx->vpid);
- kmem_cache_free(kvm_vcpu_cache, vmx);
- return ERR_PTR(err);
-}
-
-#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
-#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
-
-static int vmx_vm_init(struct kvm *kvm)
-{
- spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
-
- if (!ple_gap)
- kvm->arch.pause_in_guest = true;
-
- if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
- switch (l1tf_mitigation) {
- case L1TF_MITIGATION_OFF:
- case L1TF_MITIGATION_FLUSH_NOWARN:
- /* 'I explicitly don't care' is set */
- break;
- case L1TF_MITIGATION_FLUSH:
- case L1TF_MITIGATION_FLUSH_NOSMT:
- case L1TF_MITIGATION_FULL:
- /*
- * Warn upon starting the first VM in a potentially
- * insecure environment.
- */
- if (cpu_smt_control == CPU_SMT_ENABLED)
- pr_warn_once(L1TF_MSG_SMT);
- if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
- pr_warn_once(L1TF_MSG_L1D);
- break;
- case L1TF_MITIGATION_FULL_FORCE:
- /* Flush is enforced */
- break;
- }
- }
- return 0;
-}
-
-static void __init vmx_check_processor_compat(void *rtn)
-{
- struct vmcs_config vmcs_conf;
-
- *(int *)rtn = 0;
- if (setup_vmcs_config(&vmcs_conf) < 0)
- *(int *)rtn = -EIO;
- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, enable_apicv);
- if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
- printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
- smp_processor_id());
- *(int *)rtn = -EIO;
- }
-}
-
-static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
-{
- u8 cache;
- u64 ipat = 0;
-
- /* For VT-d and EPT combination
- * 1. MMIO: always map as UC
- * 2. EPT with VT-d:
- * a. VT-d without snooping control feature: can't guarantee the
- * result, try to trust guest.
- * b. VT-d with snooping control feature: snooping control feature of
- * VT-d engine can guarantee the cache correctness. Just set it
- * to WB to keep consistent with host. So the same as item 3.
- * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
- * consistent with host MTRR
- */
- if (is_mmio) {
- cache = MTRR_TYPE_UNCACHABLE;
- goto exit;
- }
-
- if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
- ipat = VMX_EPT_IPAT_BIT;
- cache = MTRR_TYPE_WRBACK;
- goto exit;
- }
-
- if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
- ipat = VMX_EPT_IPAT_BIT;
- if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
- cache = MTRR_TYPE_WRBACK;
- else
- cache = MTRR_TYPE_UNCACHABLE;
- goto exit;
- }
-
- cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
-
-exit:
- return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
-}
-
-static int vmx_get_lpage_level(void)
-{
- if (enable_ept && !cpu_has_vmx_ept_1g_page())
- return PT_DIRECTORY_LEVEL;
- else
- /* For shadow and EPT supported 1GB page */
- return PT_PDPE_LEVEL;
-}
-
-static void vmcs_set_secondary_exec_control(u32 new_ctl)
-{
- /*
- * These bits in the secondary execution controls field
- * are dynamic, the others are mostly based on the hypervisor
- * architecture and the guest's CPUID. Do not touch the
- * dynamic bits.
- */
- u32 mask =
- SECONDARY_EXEC_SHADOW_VMCS |
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_DESC;
-
- u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
-
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
- (new_ctl & ~mask) | (cur_ctl & mask));
-}
-
-/*
- * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
- * (indicating "allowed-1") if they are supported in the guest's CPUID.
- */
-static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_cpuid_entry2 *entry;
-
- vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
- vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
-
-#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
- if (entry && (entry->_reg & (_cpuid_mask))) \
- vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
-} while (0)
-
- entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
- cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME));
- cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME));
- cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC));
- cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE));
- cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE));
- cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE));
- cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE));
- cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE));
- cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR));
- cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
- cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX));
- cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX));
- cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID));
- cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE));
-
- entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
- cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE));
- cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
- cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
- cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
- cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP));
-
-#undef cr4_fixed1_update
-}
-
-static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (kvm_mpx_supported()) {
- bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
-
- if (mpx_enabled) {
- vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
- vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
- } else {
- vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
- vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
- }
- }
-}
-
-static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (cpu_has_secondary_exec_ctrls()) {
- vmx_compute_secondary_exec_control(vmx);
- vmcs_set_secondary_exec_control(vmx->secondary_exec_control);
- }
-
- if (nested_vmx_allowed(vcpu))
- to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
- FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
- else
- to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
- ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
-
- if (nested_vmx_allowed(vcpu)) {
- nested_vmx_cr_fixed1_bits_update(vcpu);
- nested_vmx_entry_exit_ctls_update(vcpu);
- }
-}
-
-static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
-{
- if (func == 1 && nested)
- entry->ecx |= bit(X86_FEATURE_VMX);
-}
-
-static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
- struct x86_exception *fault)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 exit_reason;
- unsigned long exit_qualification = vcpu->arch.exit_qualification;
-
- if (vmx->nested.pml_full) {
- exit_reason = EXIT_REASON_PML_FULL;
- vmx->nested.pml_full = false;
- exit_qualification &= INTR_INFO_UNBLOCK_NMI;
- } else if (fault->error_code & PFERR_RSVD_MASK)
- exit_reason = EXIT_REASON_EPT_MISCONFIG;
- else
- exit_reason = EXIT_REASON_EPT_VIOLATION;
-
- nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
- vmcs12->guest_physical_address = fault->address;
-}
-
-static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
-{
- return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
-}
-
-/* Callbacks for nested_ept_init_mmu_context: */
-
-static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
-{
- /* return the page table to be shadowed - in our case, EPT12 */
- return get_vmcs12(vcpu)->ept_pointer;
-}
-
-static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
-{
- WARN_ON(mmu_is_nested(vcpu));
-
- vcpu->arch.mmu = &vcpu->arch.guest_mmu;
- kvm_init_shadow_ept_mmu(vcpu,
- to_vmx(vcpu)->nested.msrs.ept_caps &
- VMX_EPT_EXECUTE_ONLY_BIT,
- nested_ept_ad_enabled(vcpu),
- nested_ept_get_cr3(vcpu));
- vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
- vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
- vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
- vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
-
- vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
-}
-
-static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.mmu = &vcpu->arch.root_mmu;
- vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
-}
-
-static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
- u16 error_code)
-{
- bool inequality, bit;
-
- bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
- inequality =
- (error_code & vmcs12->page_fault_error_code_mask) !=
- vmcs12->page_fault_error_code_match;
- return inequality ^ bit;
-}
-
-static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
- struct x86_exception *fault)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- WARN_ON(!is_guest_mode(vcpu));
-
- if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
- !to_vmx(vcpu)->nested.nested_run_pending) {
- vmcs12->vm_exit_intr_error_code = fault->error_code;
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
- PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
- INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
- fault->address);
- } else {
- kvm_inject_page_fault(vcpu, fault);
- }
-}
-
-static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12);
-
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct page *page;
- u64 hpa;
-
- if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
- /*
- * Translate L1 physical address to host physical
- * address for vmcs02. Keep the page pinned, so this
- * physical address remains valid. We keep a reference
- * to it so we can release it later.
- */
- if (vmx->nested.apic_access_page) { /* shouldn't happen */
- kvm_release_page_dirty(vmx->nested.apic_access_page);
- vmx->nested.apic_access_page = NULL;
- }
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
- /*
- * If translation failed, no matter: This feature asks
- * to exit when accessing the given address, and if it
- * can never be accessed, this feature won't do
- * anything anyway.
- */
- if (!is_error_page(page)) {
- vmx->nested.apic_access_page = page;
- hpa = page_to_phys(vmx->nested.apic_access_page);
- vmcs_write64(APIC_ACCESS_ADDR, hpa);
- } else {
- vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
- }
- }
-
- if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
- if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
-
- /*
- * If translation failed, VM entry will fail because
- * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
- * Failing the vm entry is _not_ what the processor
- * does but it's basically the only possibility we
- * have. We could still enter the guest if CR8 load
- * exits are enabled, CR8 store exits are enabled, and
- * virtualize APIC access is disabled; in this case
- * the processor would never use the TPR shadow and we
- * could simply clear the bit from the execution
- * control. But such a configuration is useless, so
- * let's keep the code simple.
- */
- if (!is_error_page(page)) {
- vmx->nested.virtual_apic_page = page;
- hpa = page_to_phys(vmx->nested.virtual_apic_page);
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
- }
- }
-
- if (nested_cpu_has_posted_intr(vmcs12)) {
- if (vmx->nested.pi_desc_page) { /* shouldn't happen */
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- }
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
- if (is_error_page(page))
- return;
- vmx->nested.pi_desc_page = page;
- vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc =
- (struct pi_desc *)((void *)vmx->nested.pi_desc +
- (unsigned long)(vmcs12->posted_intr_desc_addr &
- (PAGE_SIZE - 1)));
- vmcs_write64(POSTED_INTR_DESC_ADDR,
- page_to_phys(vmx->nested.pi_desc_page) +
- (unsigned long)(vmcs12->posted_intr_desc_addr &
- (PAGE_SIZE - 1)));
- }
- if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
- vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_USE_MSR_BITMAPS);
- else
- vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
- CPU_BASED_USE_MSR_BITMAPS);
-}
-
-static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
-{
- u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /*
- * A timer value of zero is architecturally guaranteed to cause
- * a VMExit prior to executing any instructions in the guest.
- */
- if (preemption_timeout == 0) {
- vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
- return;
- }
-
- if (vcpu->arch.virtual_tsc_khz == 0)
- return;
-
- preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
- preemption_timeout *= 1000000;
- do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
- hrtimer_start(&vmx->nested.preemption_timer,
- ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
-}
-
-static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
- return 0;
-
- if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
- !page_address_valid(vcpu, vmcs12->io_bitmap_b))
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
- return 0;
-
- if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
- return 0;
-
- if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * Merge L0's and L1's MSR bitmap, return false to indicate that
- * we do not use the hardware.
- */
-static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- int msr;
- struct page *page;
- unsigned long *msr_bitmap_l1;
- unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
- /*
- * pred_cmd & spec_ctrl are trying to verify two things:
- *
- * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
- * ensures that we do not accidentally generate an L02 MSR bitmap
- * from the L12 MSR bitmap that is too permissive.
- * 2. That L1 or L2s have actually used the MSR. This avoids
- * unnecessarily merging of the bitmap if the MSR is unused. This
- * works properly because we only update the L01 MSR bitmap lazily.
- * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
- * updated to reflect this when L1 (or its L2s) actually write to
- * the MSR.
- */
- bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
- bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
-
- /* Nothing to do if the MSR bitmap is not in use. */
- if (!cpu_has_vmx_msr_bitmap() ||
- !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
- return false;
-
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
- !pred_cmd && !spec_ctrl)
- return false;
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
- if (is_error_page(page))
- return false;
-
- msr_bitmap_l1 = (unsigned long *)kmap(page);
- if (nested_cpu_has_apic_reg_virt(vmcs12)) {
- /*
- * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
- * just lets the processor take the value from the virtual-APIC page;
- * take those 256 bits directly from the L1 bitmap.
- */
- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
- unsigned word = msr / BITS_PER_LONG;
- msr_bitmap_l0[word] = msr_bitmap_l1[word];
- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
- }
- } else {
- for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
- unsigned word = msr / BITS_PER_LONG;
- msr_bitmap_l0[word] = ~0;
- msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
- }
- }
-
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- X2APIC_MSR(APIC_TASKPRI),
- MSR_TYPE_W);
-
- if (nested_cpu_has_vid(vmcs12)) {
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- X2APIC_MSR(APIC_EOI),
- MSR_TYPE_W);
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- X2APIC_MSR(APIC_SELF_IPI),
- MSR_TYPE_W);
- }
-
- if (spec_ctrl)
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_SPEC_CTRL,
- MSR_TYPE_R | MSR_TYPE_W);
-
- if (pred_cmd)
- nested_vmx_disable_intercept_for_msr(
- msr_bitmap_l1, msr_bitmap_l0,
- MSR_IA32_PRED_CMD,
- MSR_TYPE_W);
-
- kunmap(page);
- kvm_release_page_clean(page);
-
- return true;
-}
-
-static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- struct vmcs12 *shadow;
- struct page *page;
-
- if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
- vmcs12->vmcs_link_pointer == -1ull)
- return;
-
- shadow = get_shadow_vmcs12(vcpu);
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
-
- memcpy(shadow, kmap(page), VMCS12_SIZE);
-
- kunmap(page);
- kvm_release_page_clean(page);
-}
-
-static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
- vmcs12->vmcs_link_pointer == -1ull)
- return;
-
- kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
- get_shadow_vmcs12(vcpu), VMCS12_SIZE);
-}
-
-static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
- !page_address_valid(vcpu, vmcs12->apic_access_addr))
- return -EINVAL;
- else
- return 0;
-}
-
-static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
- !nested_cpu_has_apic_reg_virt(vmcs12) &&
- !nested_cpu_has_vid(vmcs12) &&
- !nested_cpu_has_posted_intr(vmcs12))
- return 0;
-
- /*
- * If virtualize x2apic mode is enabled,
- * virtualize apic access must be disabled.
- */
- if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
- nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
- return -EINVAL;
-
- /*
- * If virtual interrupt delivery is enabled,
- * we must exit on external interrupts.
- */
- if (nested_cpu_has_vid(vmcs12) &&
- !nested_exit_on_intr(vcpu))
- return -EINVAL;
-
- /*
- * bits 15:8 should be zero in posted_intr_nv,
- * the descriptor address has been already checked
- * in nested_get_vmcs12_pages.
- *
- * bits 5:0 of posted_intr_desc_addr should be zero.
- */
- if (nested_cpu_has_posted_intr(vmcs12) &&
- (!nested_cpu_has_vid(vmcs12) ||
- !nested_exit_intr_ack_set(vcpu) ||
- (vmcs12->posted_intr_nv & 0xff00) ||
- (vmcs12->posted_intr_desc_addr & 0x3f) ||
- (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
- return -EINVAL;
-
- /* tpr shadow is needed by all apicv features. */
- if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
- unsigned long count_field,
- unsigned long addr_field)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- int maxphyaddr;
- u64 count, addr;
-
- if (vmcs12_read_any(vmcs12, count_field, &count) ||
- vmcs12_read_any(vmcs12, addr_field, &addr)) {
- WARN_ON(1);
- return -EINVAL;
- }
- if (count == 0)
- return 0;
- maxphyaddr = cpuid_maxphyaddr(vcpu);
- if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
- (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
- pr_debug_ratelimited(
- "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
- addr_field, maxphyaddr, count, addr);
- return -EINVAL;
- }
- return 0;
-}
-
-static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (vmcs12->vm_exit_msr_load_count == 0 &&
- vmcs12->vm_exit_msr_store_count == 0 &&
- vmcs12->vm_entry_msr_load_count == 0)
- return 0; /* Fast path */
- if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
- VM_EXIT_MSR_LOAD_ADDR) ||
- nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
- VM_EXIT_MSR_STORE_ADDR) ||
- nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
- VM_ENTRY_MSR_LOAD_ADDR))
- return -EINVAL;
- return 0;
-}
-
-static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has_pml(vmcs12))
- return 0;
-
- if (!nested_cpu_has_ept(vmcs12) ||
- !page_address_valid(vcpu, vmcs12->pml_address))
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has_shadow_vmcs(vmcs12))
- return 0;
-
- if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
- !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
- return -EINVAL;
-
- return 0;
-}
-
-static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
- struct vmx_msr_entry *e)
-{
- /* x2APIC MSR accesses are not allowed */
- if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
- return -EINVAL;
- if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
- e->index == MSR_IA32_UCODE_REV)
- return -EINVAL;
- if (e->reserved != 0)
- return -EINVAL;
- return 0;
-}
-
-static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
- struct vmx_msr_entry *e)
-{
- if (e->index == MSR_FS_BASE ||
- e->index == MSR_GS_BASE ||
- e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
- nested_vmx_msr_check_common(vcpu, e))
- return -EINVAL;
- return 0;
-}
-
-static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
- struct vmx_msr_entry *e)
-{
- if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
- nested_vmx_msr_check_common(vcpu, e))
- return -EINVAL;
- return 0;
-}
-
-/*
- * Load guest's/host's msr at nested entry/exit.
- * return 0 for success, entry index for failure.
- */
-static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
-{
- u32 i;
- struct vmx_msr_entry e;
- struct msr_data msr;
-
- msr.host_initiated = false;
- for (i = 0; i < count; i++) {
- if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
- &e, sizeof(e))) {
- pr_debug_ratelimited(
- "%s cannot read MSR entry (%u, 0x%08llx)\n",
- __func__, i, gpa + i * sizeof(e));
- goto fail;
- }
- if (nested_vmx_load_msr_check(vcpu, &e)) {
- pr_debug_ratelimited(
- "%s check failed (%u, 0x%x, 0x%x)\n",
- __func__, i, e.index, e.reserved);
- goto fail;
- }
- msr.index = e.index;
- msr.data = e.value;
- if (kvm_set_msr(vcpu, &msr)) {
- pr_debug_ratelimited(
- "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
- __func__, i, e.index, e.value);
- goto fail;
- }
- }
- return 0;
-fail:
- return i + 1;
-}
-
-static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
-{
- u32 i;
- struct vmx_msr_entry e;
-
- for (i = 0; i < count; i++) {
- struct msr_data msr_info;
- if (kvm_vcpu_read_guest(vcpu,
- gpa + i * sizeof(e),
- &e, 2 * sizeof(u32))) {
- pr_debug_ratelimited(
- "%s cannot read MSR entry (%u, 0x%08llx)\n",
- __func__, i, gpa + i * sizeof(e));
- return -EINVAL;
- }
- if (nested_vmx_store_msr_check(vcpu, &e)) {
- pr_debug_ratelimited(
- "%s check failed (%u, 0x%x, 0x%x)\n",
- __func__, i, e.index, e.reserved);
- return -EINVAL;
- }
- msr_info.host_initiated = false;
- msr_info.index = e.index;
- if (kvm_get_msr(vcpu, &msr_info)) {
- pr_debug_ratelimited(
- "%s cannot read MSR (%u, 0x%x)\n",
- __func__, i, e.index);
- return -EINVAL;
- }
- if (kvm_vcpu_write_guest(vcpu,
- gpa + i * sizeof(e) +
- offsetof(struct vmx_msr_entry, value),
- &msr_info.data, sizeof(msr_info.data))) {
- pr_debug_ratelimited(
- "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
- __func__, i, e.index, msr_info.data);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
-{
- unsigned long invalid_mask;
-
- invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
- return (val & invalid_mask) == 0;
-}
-
-/*
- * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
- * emulating VM entry into a guest with EPT enabled.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
- */
-static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
- u32 *entry_failure_code)
-{
- if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
- if (!nested_cr3_valid(vcpu, cr3)) {
- *entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
- }
-
- /*
- * If PAE paging and EPT are both on, CR3 is not used by the CPU and
- * must not be dereferenced.
- */
- if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
- !nested_ept) {
- if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
- *entry_failure_code = ENTRY_FAIL_PDPTE;
- return 1;
- }
- }
- }
-
- if (!nested_ept)
- kvm_mmu_new_cr3(vcpu, cr3, false);
-
- vcpu->arch.cr3 = cr3;
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
-
- kvm_init_mmu(vcpu, false);
-
- return 0;
-}
-
-/*
- * Returns if KVM is able to config CPU to tag TLB entries
- * populated by L2 differently than TLB entries populated
- * by L1.
- *
- * If L1 uses EPT, then TLB entries are tagged with different EPTP.
- *
- * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
- * with different VPID (L1 entries are tagged with vmx->vpid
- * while L2 entries are tagged with vmx->nested.vpid02).
- */
-static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- return nested_cpu_has_ept(vmcs12) ||
- (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
-}
-
-static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
-{
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
- return vmcs12->guest_ia32_efer;
- else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
- return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
- else
- return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
-}
-
-static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
-{
- /*
- * If vmcs02 hasn't been initialized, set the constant vmcs02 state
- * according to L0's settings (vmcs12 is irrelevant here). Host
- * fields that come from L0 and are not constant, e.g. HOST_CR3,
- * will be set as needed prior to VMLAUNCH/VMRESUME.
- */
- if (vmx->nested.vmcs02_initialized)
- return;
- vmx->nested.vmcs02_initialized = true;
-
- /*
- * We don't care what the EPTP value is we just need to guarantee
- * it's valid so we don't get a false positive when doing early
- * consistency checks.
- */
- if (enable_ept && nested_early_check)
- vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
-
- /* All VMFUNCs are currently emulated through L0 vmexits. */
- if (cpu_has_vmx_vmfunc())
- vmcs_write64(VM_FUNCTION_CONTROL, 0);
-
- if (cpu_has_vmx_posted_intr())
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
-
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
-
- if (enable_pml)
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
-
- /*
- * Set the MSR load/store lists to match L0's settings. Only the
- * addresses are constant (for vmcs02), the counts can change based
- * on L2's behavior, e.g. switching to/from long mode.
- */
- vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
-
- vmx_set_constant_host_state(vmx);
-}
-
-static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
- struct vmcs12 *vmcs12)
-{
- prepare_vmcs02_constant_state(vmx);
-
- vmcs_write64(VMCS_LINK_POINTER, -1ull);
-
- if (enable_vpid) {
- if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
- else
- vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
- }
-}
-
-static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
-{
- u32 exec_control, vmcs12_exec_ctrl;
- u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
-
- if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
- prepare_vmcs02_early_full(vmx, vmcs12);
-
- /*
- * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
- * entry, but only if the current (host) sp changed from the value
- * we wrote last (vmx->host_rsp). This cache is no longer relevant
- * if we switch vmcs, and rather than hold a separate cache per vmcs,
- * here we just force the write to happen on entry. host_rsp will
- * also be written unconditionally by nested_vmx_check_vmentry_hw()
- * if we are doing early consistency checks via hardware.
- */
- vmx->host_rsp = 0;
-
- /*
- * PIN CONTROLS
- */
- exec_control = vmcs12->pin_based_vm_exec_control;
-
- /* Preemption timer setting is computed directly in vmx_vcpu_run. */
- exec_control |= vmcs_config.pin_based_exec_ctrl;
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
- vmx->loaded_vmcs->hv_timer_armed = false;
-
- /* Posted interrupts setting is only taken from vmcs12. */
- if (nested_cpu_has_posted_intr(vmcs12)) {
- vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
- vmx->nested.pi_pending = false;
- } else {
- exec_control &= ~PIN_BASED_POSTED_INTR;
- }
- vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
-
- /*
- * EXEC CONTROLS
- */
- exec_control = vmx_exec_control(vmx); /* L0's desires */
- exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
- exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
- exec_control &= ~CPU_BASED_TPR_SHADOW;
- exec_control |= vmcs12->cpu_based_vm_exec_control;
-
- /*
- * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
- * nested_get_vmcs12_pages can't fix it up, the illegal value
- * will result in a VM entry failure.
- */
- if (exec_control & CPU_BASED_TPR_SHADOW) {
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
- vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
- } else {
-#ifdef CONFIG_X86_64
- exec_control |= CPU_BASED_CR8_LOAD_EXITING |
- CPU_BASED_CR8_STORE_EXITING;
-#endif
- }
-
- /*
- * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
- * for I/O port accesses.
- */
- exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
- exec_control |= CPU_BASED_UNCOND_IO_EXITING;
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
-
- /*
- * SECONDARY EXEC CONTROLS
- */
- if (cpu_has_secondary_exec_ctrls()) {
- exec_control = vmx->secondary_exec_control;
-
- /* Take the following fields only from vmcs12 */
- exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_ENABLE_INVPCID |
- SECONDARY_EXEC_RDTSCP |
- SECONDARY_EXEC_XSAVES |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_ENABLE_VMFUNC);
- if (nested_cpu_has(vmcs12,
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
- vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
- ~SECONDARY_EXEC_ENABLE_PML;
- exec_control |= vmcs12_exec_ctrl;
- }
-
- /* VMCS shadowing for L2 is emulated for now */
- exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
-
- if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
- vmcs_write16(GUEST_INTR_STATUS,
- vmcs12->guest_intr_status);
-
- /*
- * Write an illegal value to APIC_ACCESS_ADDR. Later,
- * nested_get_vmcs12_pages will either fix it up or
- * remove the VM execution control.
- */
- if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
- vmcs_write64(APIC_ACCESS_ADDR, -1ull);
-
- if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
- vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
-
- vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
- }
-
- /*
- * ENTRY CONTROLS
- *
- * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
- * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
- * on the related bits (if supported by the CPU) in the hope that
- * we can avoid VMWrites during vmx_set_efer().
- */
- exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) &
- ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
- if (cpu_has_load_ia32_efer) {
- if (guest_efer & EFER_LMA)
- exec_control |= VM_ENTRY_IA32E_MODE;
- if (guest_efer != host_efer)
- exec_control |= VM_ENTRY_LOAD_IA32_EFER;
- }
- vm_entry_controls_init(vmx, exec_control);
-
- /*
- * EXIT CONTROLS
- *
- * L2->L1 exit controls are emulated - the hardware exit is to L0 so
- * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
- * bits may be modified by vmx_set_efer() in prepare_vmcs02().
- */
- exec_control = vmcs_config.vmexit_ctrl;
- if (cpu_has_load_ia32_efer && guest_efer != host_efer)
- exec_control |= VM_EXIT_LOAD_IA32_EFER;
- vm_exit_controls_init(vmx, exec_control);
-
- /*
- * Conceptually we want to copy the PML address and index from
- * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
- * since we always flush the log on each vmexit and never change
- * the PML address (once set), this happens to be equivalent to
- * simply resetting the index in vmcs02.
- */
- if (enable_pml)
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
-
- /*
- * Interrupt/Exception Fields
- */
- if (vmx->nested.nested_run_pending) {
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
- vmcs12->vm_entry_intr_info_field);
- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
- vmcs12->vm_entry_exception_error_code);
- vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
- vmcs12->vm_entry_instruction_len);
- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
- vmcs12->guest_interruptibility_info);
- vmx->loaded_vmcs->nmi_known_unmasked =
- !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
- } else {
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
- }
-}
-
-static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
-{
- struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
-
- if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
- vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
- vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
- vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
- vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
- vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
- vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
- vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
- vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
- vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
- vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
- vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
- vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
- vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
- vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
- vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
- vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
- vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
- vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
- vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
- vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
- vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
- vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
- vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
- vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
- vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
- vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
- vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
- vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
- vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
- vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
- vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
- vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
- vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
- vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
- }
-
- if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
- vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
- vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
- vmcs12->guest_pending_dbg_exceptions);
- vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
- vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
-
- /*
- * L1 may access the L2's PDPTR, so save them to construct
- * vmcs12
- */
- if (enable_ept) {
- vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
- vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
- vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
- vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
- }
- }
-
- if (nested_cpu_has_xsaves(vmcs12))
- vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
-
- /*
- * Whether page-faults are trapped is determined by a combination of
- * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
- * If enable_ept, L0 doesn't care about page faults and we should
- * set all of these to L1's desires. However, if !enable_ept, L0 does
- * care about (at least some) page faults, and because it is not easy
- * (if at all possible?) to merge L0 and L1's desires, we simply ask
- * to exit on each and every L2 page fault. This is done by setting
- * MASK=MATCH=0 and (see below) EB.PF=1.
- * Note that below we don't need special code to set EB.PF beyond the
- * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
- * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
- * !enable_ept, EB.PF is 1, so the "or" will always be 1.
- */
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
- enable_ept ? vmcs12->page_fault_error_code_mask : 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
- enable_ept ? vmcs12->page_fault_error_code_match : 0);
-
- if (cpu_has_vmx_apicv()) {
- vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
- vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
- vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
- vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
- }
-
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
-
- set_cr4_guest_host_mask(vmx);
-
- if (kvm_mpx_supported()) {
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
- else
- vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
- }
-}
-
-/*
- * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
- * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
- * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
- * guest in a way that will both be appropriate to L1's requests, and our
- * needs. In addition to modifying the active vmcs (which is vmcs02), this
- * function also has additional necessary side-effects, like setting various
- * vcpu->arch fields.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
- */
-static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
- u32 *entry_failure_code)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
-
- if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
- prepare_vmcs02_full(vmx, vmcs12);
- vmx->nested.dirty_vmcs12 = false;
- }
-
- /*
- * First, the fields that are shadowed. This must be kept in sync
- * with vmx_shadow_fields.h.
- */
- if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
- vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
- vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
- }
-
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
- kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
- } else {
- kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
- }
- vmx_set_rflags(vcpu, vmcs12->guest_rflags);
-
- vmx->nested.preemption_timer_expired = false;
- if (nested_cpu_has_preemption_timer(vmcs12))
- vmx_start_preemption_timer(vcpu);
-
- /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
- * bitwise-or of what L1 wants to trap for L2, and what we want to
- * trap. Note that CR0.TS also needs updating - we do this later.
- */
- update_exception_bitmap(vcpu);
- vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
- vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
-
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
- vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
- vcpu->arch.pat = vmcs12->guest_ia32_pat;
- } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
- vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
- }
-
- vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
-
- if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
-
- if (enable_vpid) {
- /*
- * There is no direct mapping between vpid02 and vpid12, the
- * vpid02 is per-vCPU for L0 and reused while the value of
- * vpid12 is changed w/ one invvpid during nested vmentry.
- * The vpid12 is allocated by L1 for L2, so it will not
- * influence global bitmap(for vpid01 and vpid02 allocation)
- * even if spawn a lot of nested vCPUs.
- */
- if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
- if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
- vmx->nested.last_vpid = vmcs12->virtual_processor_id;
- __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
- }
- } else {
- /*
- * If L1 use EPT, then L0 needs to execute INVEPT on
- * EPTP02 instead of EPTP01. Therefore, delay TLB
- * flush until vmcs02->eptp is fully updated by
- * KVM_REQ_LOAD_CR3. Note that this assumes
- * KVM_REQ_TLB_FLUSH is evaluated after
- * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
- */
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- }
- }
-
- if (nested_cpu_has_ept(vmcs12))
- nested_ept_init_mmu_context(vcpu);
- else if (nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
- vmx_flush_tlb(vcpu, true);
-
- /*
- * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
- * bits which we consider mandatory enabled.
- * The CR0_READ_SHADOW is what L2 should have expected to read given
- * the specifications by L1; It's not enough to take
- * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
- * have more bits than L1 expected.
- */
- vmx_set_cr0(vcpu, vmcs12->guest_cr0);
- vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
-
- vmx_set_cr4(vcpu, vmcs12->guest_cr4);
- vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
-
- vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
- /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
- vmx_set_efer(vcpu, vcpu->arch.efer);
-
- /*
- * Guest state is invalid and unrestricted guest is disabled,
- * which means L1 attempted VMEntry to L2 with invalid state.
- * Fail the VMEntry.
- */
- if (vmx->emulation_required) {
- *entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
- }
-
- /* Shadow page tables on either EPT or shadow page tables. */
- if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
- entry_failure_code))
- return 1;
-
- if (!enable_ept)
- vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
-
- kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
- kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
- return 0;
-}
-
-static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
-{
- if (!nested_cpu_has_nmi_exiting(vmcs12) &&
- nested_cpu_has_virtual_nmis(vmcs12))
- return -EINVAL;
-
- if (!nested_cpu_has_virtual_nmis(vmcs12) &&
- nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
- return -EINVAL;
-
- return 0;
-}
-
-static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- bool ia32e;
-
- if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
- vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_apicv_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_pml_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
- vmx->nested.msrs.procbased_ctls_low,
- vmx->nested.msrs.procbased_ctls_high) ||
- (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
- !vmx_control_verify(vmcs12->secondary_vm_exec_control,
- vmx->nested.msrs.secondary_ctls_low,
- vmx->nested.msrs.secondary_ctls_high)) ||
- !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
- vmx->nested.msrs.pinbased_ctls_low,
- vmx->nested.msrs.pinbased_ctls_high) ||
- !vmx_control_verify(vmcs12->vm_exit_controls,
- vmx->nested.msrs.exit_ctls_low,
- vmx->nested.msrs.exit_ctls_high) ||
- !vmx_control_verify(vmcs12->vm_entry_controls,
- vmx->nested.msrs.entry_ctls_low,
- vmx->nested.msrs.entry_ctls_high))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_vmx_check_nmi_controls(vmcs12))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_cpu_has_vmfunc(vmcs12)) {
- if (vmcs12->vm_function_control &
- ~vmx->nested.msrs.vmfunc_controls)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (nested_cpu_has_eptp_switching(vmcs12)) {
- if (!nested_cpu_has_ept(vmcs12) ||
- !page_address_valid(vcpu, vmcs12->eptp_list_address))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
- }
- }
-
- if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
- !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
- !nested_cr3_valid(vcpu, vmcs12->host_cr3))
- return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
-
- /*
- * If the load IA32_EFER VM-exit control is 1, bits reserved in the
- * IA32_EFER MSR must be 0 in the field for that register. In addition,
- * the values of the LMA and LME bits in the field must each be that of
- * the host address-space size VM-exit control.
- */
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
- ia32e = (vmcs12->vm_exit_controls &
- VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
- if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
- return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
- }
-
- /*
- * From the Intel SDM, volume 3:
- * Fields relevant to VM-entry event injection must be set properly.
- * These fields are the VM-entry interruption-information field, the
- * VM-entry exception error code, and the VM-entry instruction length.
- */
- if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
- u32 intr_info = vmcs12->vm_entry_intr_info_field;
- u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
- u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
- bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
- bool should_have_error_code;
- bool urg = nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_UNRESTRICTED_GUEST);
- bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
-
- /* VM-entry interruption-info field: interruption type */
- if (intr_type == INTR_TYPE_RESERVED ||
- (intr_type == INTR_TYPE_OTHER_EVENT &&
- !nested_cpu_supports_monitor_trap_flag(vcpu)))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- /* VM-entry interruption-info field: vector */
- if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
- (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
- (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- /* VM-entry interruption-info field: deliver error code */
- should_have_error_code =
- intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
- x86_exception_has_error_code(vector);
- if (has_error_code != should_have_error_code)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- /* VM-entry exception error code */
- if (has_error_code &&
- vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- /* VM-entry interruption-info field: reserved bits */
- if (intr_info & INTR_INFO_RESVD_BITS_MASK)
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- /* VM-entry instruction length */
- switch (intr_type) {
- case INTR_TYPE_SOFT_EXCEPTION:
- case INTR_TYPE_SOFT_INTR:
- case INTR_TYPE_PRIV_SW_EXCEPTION:
- if ((vmcs12->vm_entry_instruction_len > 15) ||
- (vmcs12->vm_entry_instruction_len == 0 &&
- !nested_cpu_has_zero_length_injection(vcpu)))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
- }
- }
-
- if (nested_cpu_has_ept(vmcs12) &&
- !valid_ept_address(vcpu, vmcs12->ept_pointer))
- return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
-
- return 0;
-}
-
-static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- int r;
- struct page *page;
- struct vmcs12 *shadow;
-
- if (vmcs12->vmcs_link_pointer == -1ull)
- return 0;
-
- if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
- return -EINVAL;
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
- if (is_error_page(page))
- return -EINVAL;
-
- r = 0;
- shadow = kmap(page);
- if (shadow->hdr.revision_id != VMCS12_REVISION ||
- shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
- r = -EINVAL;
- kunmap(page);
- kvm_release_page_clean(page);
- return r;
-}
-
-static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
- u32 *exit_qual)
-{
- bool ia32e;
-
- *exit_qual = ENTRY_FAIL_DEFAULT;
-
- if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
- !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
- return 1;
-
- if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
- *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
- return 1;
- }
-
- /*
- * If the load IA32_EFER VM-entry control is 1, the following checks
- * are performed on the field for the IA32_EFER MSR:
- * - Bits reserved in the IA32_EFER MSR must be 0.
- * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
- * the IA-32e mode guest VM-exit control. It must also be identical
- * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
- * CR0.PG) is 1.
- */
- if (to_vmx(vcpu)->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
- ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
- if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
- ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
- ((vmcs12->guest_cr0 & X86_CR0_PG) &&
- ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
- return 1;
- }
-
- if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
- (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
- (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
- return 1;
-
- return 0;
-}
-
-static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr3, cr4;
-
- if (!nested_early_check)
- return 0;
-
- if (vmx->msr_autoload.host.nr)
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
- if (vmx->msr_autoload.guest.nr)
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
-
- preempt_disable();
-
- vmx_prepare_switch_to_guest(vcpu);
-
- /*
- * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
- * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
- * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
- * there is no need to preserve other bits or save/restore the field.
- */
- vmcs_writel(GUEST_RFLAGS, 0);
-
- vmcs_writel(HOST_RIP, vmx_early_consistency_check_return);
-
- cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
- vmcs_writel(HOST_CR3, cr3);
- vmx->loaded_vmcs->host_state.cr3 = cr3;
- }
-
- cr4 = cr4_read_shadow();
- if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
- vmcs_writel(HOST_CR4, cr4);
- vmx->loaded_vmcs->host_state.cr4 = cr4;
- }
-
- vmx->__launched = vmx->loaded_vmcs->launched;
-
- asm(
- /* Set HOST_RSP */
- __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
-
- /* Check if vmlaunch of vmresume is needed */
- "cmpl $0, %c[launched](%0)\n\t"
- "je 1f\n\t"
- __ex("vmresume") "\n\t"
- "jmp 2f\n\t"
- "1: " __ex("vmlaunch") "\n\t"
- "jmp 2f\n\t"
- "2: "
-
- /* Set vmx->fail accordingly */
- "setbe %c[fail](%0)\n\t"
-
- ".pushsection .rodata\n\t"
- ".global vmx_early_consistency_check_return\n\t"
- "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t"
- ".popsection"
- :
- : "c"(vmx), "d"((unsigned long)HOST_RSP),
- [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
- [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp))
- : "rax", "cc", "memory"
- );
-
- vmcs_writel(HOST_RIP, vmx_return);
-
- preempt_enable();
-
- if (vmx->msr_autoload.host.nr)
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
- if (vmx->msr_autoload.guest.nr)
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
-
- if (vmx->fail) {
- WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
- VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- vmx->fail = 0;
- return 1;
- }
-
- /*
- * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
- */
- local_irq_enable();
- if (hw_breakpoint_active())
- set_debugreg(__this_cpu_read(cpu_dr7), 7);
-
- /*
- * A non-failing VMEntry means we somehow entered guest mode with
- * an illegal RIP, and that's just the tip of the iceberg. There
- * is no telling what memory has been modified or what state has
- * been exposed to unknown code. Hitting this all but guarantees
- * a (very critical) hardware issue.
- */
- WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
- VMX_EXIT_REASONS_FAILED_VMENTRY));
-
- return 0;
-}
-STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
-
-static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12);
-
-/*
- * If from_vmentry is false, this is being called from state restore (either RSM
- * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
-+ *
-+ * Returns:
-+ * 0 - success, i.e. proceed with actual VMEnter
-+ * 1 - consistency check VMExit
-+ * -1 - consistency check VMFail
- */
-static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
- bool from_vmentry)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- bool evaluate_pending_interrupts;
- u32 exit_reason = EXIT_REASON_INVALID_STATE;
- u32 exit_qual;
-
- evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
- (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
- if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
- evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
-
- if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
- vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
- if (kvm_mpx_supported() &&
- !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
- vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
-
- vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
-
- prepare_vmcs02_early(vmx, vmcs12);
-
- if (from_vmentry) {
- nested_get_vmcs12_pages(vcpu);
-
- if (nested_vmx_check_vmentry_hw(vcpu)) {
- vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- return -1;
- }
-
- if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
- goto vmentry_fail_vmexit;
- }
-
- enter_guest_mode(vcpu);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
- vcpu->arch.tsc_offset += vmcs12->tsc_offset;
-
- if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
- goto vmentry_fail_vmexit_guest_mode;
-
- if (from_vmentry) {
- exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
- exit_qual = nested_vmx_load_msr(vcpu,
- vmcs12->vm_entry_msr_load_addr,
- vmcs12->vm_entry_msr_load_count);
- if (exit_qual)
- goto vmentry_fail_vmexit_guest_mode;
- } else {
- /*
- * The MMU is not initialized to point at the right entities yet and
- * "get pages" would need to read data from the guest (i.e. we will
- * need to perform gpa to hpa translation). Request a call
- * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
- * have already been set at vmentry time and should not be reset.
- */
- kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
- }
-
- /*
- * If L1 had a pending IRQ/NMI until it executed
- * VMLAUNCH/VMRESUME which wasn't delivered because it was
- * disallowed (e.g. interrupts disabled), L0 needs to
- * evaluate if this pending event should cause an exit from L2
- * to L1 or delivered directly to L2 (e.g. In case L1 don't
- * intercept EXTERNAL_INTERRUPT).
- *
- * Usually this would be handled by the processor noticing an
- * IRQ/NMI window request, or checking RVI during evaluation of
- * pending virtual interrupts. However, this setting was done
- * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
- * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
- */
- if (unlikely(evaluate_pending_interrupts))
- kvm_make_request(KVM_REQ_EVENT, vcpu);
-
- /*
- * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
- * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
- * returned as far as L1 is concerned. It will only return (and set
- * the success flag) when L2 exits (see nested_vmx_vmexit()).
- */
- return 0;
-
- /*
- * A failed consistency check that leads to a VMExit during L1's
- * VMEnter to L2 is a variation of a normal VMexit, as explained in
- * 26.7 "VM-entry failures during or after loading guest state".
- */
-vmentry_fail_vmexit_guest_mode:
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
- vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
- leave_guest_mode(vcpu);
-
-vmentry_fail_vmexit:
- vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-
- if (!from_vmentry)
- return 1;
-
- load_vmcs12_host_state(vcpu, vmcs12);
- vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
- vmcs12->exit_qualification = exit_qual;
- if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
- vmx->nested.need_vmcs12_sync = true;
- return 1;
-}
-
-/*
- * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
- * for running an L2 nested guest.
- */
-static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
-{
- struct vmcs12 *vmcs12;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
- int ret;
-
- if (!nested_vmx_check_permission(vcpu))
- return 1;
-
- if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
- return 1;
-
- if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
- return nested_vmx_failInvalid(vcpu);
-
- vmcs12 = get_vmcs12(vcpu);
-
- /*
- * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
- * that there *is* a valid VMCS pointer, RFLAGS.CF is set
- * rather than RFLAGS.ZF, and no error number is stored to the
- * VM-instruction error field.
- */
- if (vmcs12->hdr.shadow_vmcs)
- return nested_vmx_failInvalid(vcpu);
-
- if (vmx->nested.hv_evmcs) {
- copy_enlightened_to_vmcs12(vmx);
- /* Enlightened VMCS doesn't have launch state */
- vmcs12->launch_state = !launch;
- } else if (enable_shadow_vmcs) {
- copy_shadow_to_vmcs12(vmx);
- }
-
- /*
- * The nested entry process starts with enforcing various prerequisites
- * on vmcs12 as required by the Intel SDM, and act appropriately when
- * they fail: As the SDM explains, some conditions should cause the
- * instruction to fail, while others will cause the instruction to seem
- * to succeed, but return an EXIT_REASON_INVALID_STATE.
- * To speed up the normal (success) code path, we should avoid checking
- * for misconfigurations which will anyway be caught by the processor
- * when using the merged vmcs02.
- */
- if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
- return nested_vmx_failValid(vcpu,
- VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
-
- if (vmcs12->launch_state == launch)
- return nested_vmx_failValid(vcpu,
- launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
- : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
-
- ret = check_vmentry_prereqs(vcpu, vmcs12);
- if (ret)
- return nested_vmx_failValid(vcpu, ret);
-
- /*
- * We're finally done with prerequisite checking, and can start with
- * the nested entry.
- */
- vmx->nested.nested_run_pending = 1;
- ret = nested_vmx_enter_non_root_mode(vcpu, true);
- vmx->nested.nested_run_pending = !ret;
- if (ret > 0)
- return 1;
- else if (ret)
- return nested_vmx_failValid(vcpu,
- VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-
- /* Hide L1D cache contents from the nested guest. */
- vmx->vcpu.arch.l1tf_flush_l1d = true;
-
- /*
- * Must happen outside of nested_vmx_enter_non_root_mode() as it will
- * also be used as part of restoring nVMX state for
- * snapshot restore (migration).
- *
- * In this flow, it is assumed that vmcs12 cache was
- * trasferred as part of captured nVMX state and should
- * therefore not be read from guest memory (which may not
- * exist on destination host yet).
- */
- nested_cache_shadow_vmcs12(vcpu, vmcs12);
-
- /*
- * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken
- * by event injection, halt vcpu.
- */
- if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
- !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) {
- vmx->nested.nested_run_pending = 0;
- return kvm_vcpu_halt(vcpu);
- }
- return 1;
-}
-
-/*
- * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
- * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
- * This function returns the new value we should put in vmcs12.guest_cr0.
- * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
- * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
- * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
- * didn't trap the bit, because if L1 did, so would L0).
- * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
- * been modified by L2, and L1 knows it. So just leave the old value of
- * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
- * isn't relevant, because if L0 traps this bit it can set it to anything.
- * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
- * changed these bits, and therefore they need to be updated, but L0
- * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
- * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
- */
-static inline unsigned long
-vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
-{
- return
- /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
- /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
- /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
- vcpu->arch.cr0_guest_owned_bits));
-}
-
-static inline unsigned long
-vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
-{
- return
- /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
- /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
- /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
- vcpu->arch.cr4_guest_owned_bits));
-}
-
-static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- u32 idt_vectoring;
- unsigned int nr;
-
- if (vcpu->arch.exception.injected) {
- nr = vcpu->arch.exception.nr;
- idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
-
- if (kvm_exception_is_soft(nr)) {
- vmcs12->vm_exit_instruction_len =
- vcpu->arch.event_exit_inst_len;
- idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
- } else
- idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
-
- if (vcpu->arch.exception.has_error_code) {
- idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
- vmcs12->idt_vectoring_error_code =
- vcpu->arch.exception.error_code;
- }
-
- vmcs12->idt_vectoring_info_field = idt_vectoring;
- } else if (vcpu->arch.nmi_injected) {
- vmcs12->idt_vectoring_info_field =
- INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
- } else if (vcpu->arch.interrupt.injected) {
- nr = vcpu->arch.interrupt.nr;
- idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
-
- if (vcpu->arch.interrupt.soft) {
- idt_vectoring |= INTR_TYPE_SOFT_INTR;
- vmcs12->vm_entry_instruction_len =
- vcpu->arch.event_exit_inst_len;
- } else
- idt_vectoring |= INTR_TYPE_EXT_INTR;
-
- vmcs12->idt_vectoring_info_field = idt_vectoring;
- }
-}
-
-static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long exit_qual;
- bool block_nested_events =
- vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
-
- if (vcpu->arch.exception.pending &&
- nested_vmx_check_exception(vcpu, &exit_qual)) {
- if (block_nested_events)
- return -EBUSY;
- nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
- return 0;
- }
-
- if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
- vmx->nested.preemption_timer_expired) {
- if (block_nested_events)
- return -EBUSY;
- nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
- return 0;
- }
-
- if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
- if (block_nested_events)
- return -EBUSY;
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
- NMI_VECTOR | INTR_TYPE_NMI_INTR |
- INTR_INFO_VALID_MASK, 0);
- /*
- * The NMI-triggered VM exit counts as injection:
- * clear this one and block further NMIs.
- */
- vcpu->arch.nmi_pending = 0;
- vmx_set_nmi_mask(vcpu, true);
- return 0;
- }
-
- if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
- nested_exit_on_intr(vcpu)) {
- if (block_nested_events)
- return -EBUSY;
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
- return 0;
- }
-
- vmx_complete_nested_posted_interrupt(vcpu);
- return 0;
-}
-
-static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
-{
- to_vmx(vcpu)->req_immediate_exit = true;
-}
-
-static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
-{
- ktime_t remaining =
- hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
- u64 value;
-
- if (ktime_to_ns(remaining) <= 0)
- return 0;
-
- value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
- do_div(value, 1000000);
- return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
-}
-
-/*
- * Update the guest state fields of vmcs12 to reflect changes that
- * occurred while L2 was running. (The "IA-32e mode guest" bit of the
- * VM-entry controls is also updated, since this is really a guest
- * state bit.)
- */
-static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
-{
- vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
- vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
-
- vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
- vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
- vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
-
- vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
- vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
- vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
- vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
- vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
- vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
- vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
- vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
- vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
- vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
- vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
- vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
- vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
- vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
- vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
- vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
- vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
- vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
- vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
- vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
- vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
- vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
- vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
- vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
- vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
- vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
- vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
- vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
- vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
- vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
- vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
- vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
- vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
- vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
- vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
- vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
-
- vmcs12->guest_interruptibility_info =
- vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
- vmcs12->guest_pending_dbg_exceptions =
- vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
- if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
- vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
- else
- vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
-
- if (nested_cpu_has_preemption_timer(vmcs12)) {
- if (vmcs12->vm_exit_controls &
- VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
- vmcs12->vmx_preemption_timer_value =
- vmx_get_preemption_timer_value(vcpu);
- hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
- }
-
- /*
- * In some cases (usually, nested EPT), L2 is allowed to change its
- * own CR3 without exiting. If it has changed it, we must keep it.
- * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
- * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
- *
- * Additionally, restore L2's PDPTR to vmcs12.
- */
- if (enable_ept) {
- vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
- vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
- vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
- vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
- vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
- }
-
- vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
-
- if (nested_cpu_has_vid(vmcs12))
- vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
-
- vmcs12->vm_entry_controls =
- (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
- (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
-
- if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
- kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
- vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
- }
-
- /* TODO: These cannot have changed unless we have MSR bitmaps and
- * the relevant bit asks not to trap the change */
- if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
- vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
- if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
- vmcs12->guest_ia32_efer = vcpu->arch.efer;
- vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
- vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
- vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
- if (kvm_mpx_supported())
- vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
-}
-
-/*
- * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
- * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
- * and this function updates it to reflect the changes to the guest state while
- * L2 was running (and perhaps made some exits which were handled directly by L0
- * without going back to L1), and to reflect the exit reason.
- * Note that we do not have to copy here all VMCS fields, just those that
- * could have changed by the L2 guest or the exit - i.e., the guest-state and
- * exit-information fields only. Other fields are modified by L1 with VMWRITE,
- * which already writes to vmcs12 directly.
- */
-static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
- u32 exit_reason, u32 exit_intr_info,
- unsigned long exit_qualification)
-{
- /* update guest state fields: */
- sync_vmcs12(vcpu, vmcs12);
-
- /* update exit information fields: */
-
- vmcs12->vm_exit_reason = exit_reason;
- vmcs12->exit_qualification = exit_qualification;
- vmcs12->vm_exit_intr_info = exit_intr_info;
-
- vmcs12->idt_vectoring_info_field = 0;
- vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
- vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-
- if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
- vmcs12->launch_state = 1;
-
- /* vm_entry_intr_info_field is cleared on exit. Emulate this
- * instead of reading the real value. */
- vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
-
- /*
- * Transfer the event that L0 or L1 may wanted to inject into
- * L2 to IDT_VECTORING_INFO_FIELD.
- */
- vmcs12_save_pending_event(vcpu, vmcs12);
- }
-
- /*
- * Drop what we picked up for L2 via vmx_complete_interrupts. It is
- * preserved above and would only end up incorrectly in L1.
- */
- vcpu->arch.nmi_injected = false;
- kvm_clear_exception_queue(vcpu);
- kvm_clear_interrupt_queue(vcpu);
-}
-
-/*
- * A part of what we need to when the nested L2 guest exits and we want to
- * run its L1 parent, is to reset L1's guest state to the host state specified
- * in vmcs12.
- * This function is to be called not only on normal nested exit, but also on
- * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
- * Failures During or After Loading Guest State").
- * This function should be called when the active VMCS is L1's (vmcs01).
- */
-static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- struct kvm_segment seg;
- u32 entry_failure_code;
-
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
- vcpu->arch.efer = vmcs12->host_ia32_efer;
- else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
- vcpu->arch.efer |= (EFER_LMA | EFER_LME);
- else
- vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
- vmx_set_efer(vcpu, vcpu->arch.efer);
-
- kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
- kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
- vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
- vmx_set_interrupt_shadow(vcpu, 0);
-
- /*
- * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
- * actually changed, because vmx_set_cr0 refers to efer set above.
- *
- * CR0_GUEST_HOST_MASK is already set in the original vmcs01
- * (KVM doesn't change it);
- */
- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
- vmx_set_cr0(vcpu, vmcs12->host_cr0);
-
- /* Same as above - no reason to call set_cr4_guest_host_mask(). */
- vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
- vmx_set_cr4(vcpu, vmcs12->host_cr4);
-
- nested_ept_uninit_mmu_context(vcpu);
-
- /*
- * Only PDPTE load can fail as the value of cr3 was checked on entry and
- * couldn't have changed.
- */
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
- if (!enable_ept)
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-
- /*
- * If vmcs01 doesn't use VPID, CPU flushes TLB on every
- * VMEntry/VMExit. Thus, no need to flush TLB.
- *
- * If vmcs12 doesn't use VPID, L1 expects TLB to be
- * flushed on every VMEntry/VMExit.
- *
- * Otherwise, we can preserve TLB entries as long as we are
- * able to tag L1 TLB entries differently than L2 TLB entries.
- *
- * If vmcs12 uses EPT, we need to execute this flush on EPTP01
- * and therefore we request the TLB flush to happen only after VMCS EPTP
- * has been set by KVM_REQ_LOAD_CR3.
- */
- if (enable_vpid &&
- (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- }
-
- vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
- vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
- vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
- vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
- vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
- vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
- vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
-
- /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
- if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
- vmcs_write64(GUEST_BNDCFGS, 0);
-
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
- vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
- vcpu->arch.pat = vmcs12->host_ia32_pat;
- }
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
- vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
- vmcs12->host_ia32_perf_global_ctrl);
-
- /* Set L1 segment info according to Intel SDM
- 27.5.2 Loading Host Segment and Descriptor-Table Registers */
- seg = (struct kvm_segment) {
- .base = 0,
- .limit = 0xFFFFFFFF,
- .selector = vmcs12->host_cs_selector,
- .type = 11,
- .present = 1,
- .s = 1,
- .g = 1
- };
- if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
- seg.l = 1;
- else
- seg.db = 1;
- vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
- seg = (struct kvm_segment) {
- .base = 0,
- .limit = 0xFFFFFFFF,
- .type = 3,
- .present = 1,
- .s = 1,
- .db = 1,
- .g = 1
- };
- seg.selector = vmcs12->host_ds_selector;
- vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
- seg.selector = vmcs12->host_es_selector;
- vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
- seg.selector = vmcs12->host_ss_selector;
- vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
- seg.selector = vmcs12->host_fs_selector;
- seg.base = vmcs12->host_fs_base;
- vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
- seg.selector = vmcs12->host_gs_selector;
- seg.base = vmcs12->host_gs_base;
- vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
- seg = (struct kvm_segment) {
- .base = vmcs12->host_tr_base,
- .limit = 0x67,
- .selector = vmcs12->host_tr_selector,
- .type = 11,
- .present = 1
- };
- vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
-
- kvm_set_dr(vcpu, 7, 0x400);
- vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
-
- if (cpu_has_vmx_msr_bitmap())
- vmx_update_msr_bitmap(vcpu);
-
- if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
- vmcs12->vm_exit_msr_load_count))
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
-}
-
-static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
-{
- struct shared_msr_entry *efer_msr;
- unsigned int i;
-
- if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
- return vmcs_read64(GUEST_IA32_EFER);
-
- if (cpu_has_load_ia32_efer)
- return host_efer;
-
- for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
- if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
- return vmx->msr_autoload.guest.val[i].value;
- }
-
- efer_msr = find_msr_entry(vmx, MSR_EFER);
- if (efer_msr)
- return efer_msr->data;
-
- return host_efer;
-}
-
-static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmx_msr_entry g, h;
- struct msr_data msr;
- gpa_t gpa;
- u32 i, j;
-
- vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
-
- if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
- /*
- * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
- * as vmcs01.GUEST_DR7 contains a userspace defined value
- * and vcpu->arch.dr7 is not squirreled away before the
- * nested VMENTER (not worth adding a variable in nested_vmx).
- */
- if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
- kvm_set_dr(vcpu, 7, DR7_FIXED_1);
- else
- WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
- }
-
- /*
- * Note that calling vmx_set_{efer,cr0,cr4} is important as they
- * handle a variety of side effects to KVM's software model.
- */
- vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
-
- vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
- vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
-
- vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
- vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
-
- nested_ept_uninit_mmu_context(vcpu);
- vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
- __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
-
- /*
- * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
- * from vmcs01 (if necessary). The PDPTRs are not loaded on
- * VMFail, like everything else we just need to ensure our
- * software model is up-to-date.
- */
- ept_save_pdptrs(vcpu);
-
- kvm_mmu_reset_context(vcpu);
-
- if (cpu_has_vmx_msr_bitmap())
- vmx_update_msr_bitmap(vcpu);
-
- /*
- * This nasty bit of open coding is a compromise between blindly
- * loading L1's MSRs using the exit load lists (incorrect emulation
- * of VMFail), leaving the nested VM's MSRs in the software model
- * (incorrect behavior) and snapshotting the modified MSRs (too
- * expensive since the lists are unbound by hardware). For each
- * MSR that was (prematurely) loaded from the nested VMEntry load
- * list, reload it from the exit load list if it exists and differs
- * from the guest value. The intent is to stuff host state as
- * silently as possible, not to fully process the exit load list.
- */
- msr.host_initiated = false;
- for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
- gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
- if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
- pr_debug_ratelimited(
- "%s read MSR index failed (%u, 0x%08llx)\n",
- __func__, i, gpa);
- goto vmabort;
- }
-
- for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
- gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
- if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
- pr_debug_ratelimited(
- "%s read MSR failed (%u, 0x%08llx)\n",
- __func__, j, gpa);
- goto vmabort;
- }
- if (h.index != g.index)
- continue;
- if (h.value == g.value)
- break;
-
- if (nested_vmx_load_msr_check(vcpu, &h)) {
- pr_debug_ratelimited(
- "%s check failed (%u, 0x%x, 0x%x)\n",
- __func__, j, h.index, h.reserved);
- goto vmabort;
- }
-
- msr.index = h.index;
- msr.data = h.value;
- if (kvm_set_msr(vcpu, &msr)) {
- pr_debug_ratelimited(
- "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
- __func__, j, h.index, h.value);
- goto vmabort;
- }
- }
- }
-
- return;
-
-vmabort:
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
-}
-
-/*
- * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
- * and modify vmcs12 to make it see what it would expect to see there if
- * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
- */
-static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
- u32 exit_intr_info,
- unsigned long exit_qualification)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- /* trying to cancel vmlaunch/vmresume is a bug */
- WARN_ON_ONCE(vmx->nested.nested_run_pending);
-
- leave_guest_mode(vcpu);
-
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
- vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
-
- if (likely(!vmx->fail)) {
- if (exit_reason == -1)
- sync_vmcs12(vcpu, vmcs12);
- else
- prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
- exit_qualification);
-
- /*
- * Must happen outside of sync_vmcs12() as it will
- * also be used to capture vmcs12 cache as part of
- * capturing nVMX state for snapshot (migration).
- *
- * Otherwise, this flush will dirty guest memory at a
- * point it is already assumed by user-space to be
- * immutable.
- */
- nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
-
- if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
- vmcs12->vm_exit_msr_store_count))
- nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
- } else {
- /*
- * The only expected VM-instruction error is "VM entry with
- * invalid control field(s)." Anything else indicates a
- * problem with L0. And we should never get here with a
- * VMFail of any type if early consistency checks are enabled.
- */
- WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
- VMXERR_ENTRY_INVALID_CONTROL_FIELD);
- WARN_ON_ONCE(nested_early_check);
- }
-
- vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-
- /* Update any VMCS fields that might have changed while L2 ran */
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
- vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
-
- if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
-
- if (vmx->nested.change_vmcs01_virtual_apic_mode) {
- vmx->nested.change_vmcs01_virtual_apic_mode = false;
- vmx_set_virtual_apic_mode(vcpu);
- } else if (!nested_cpu_has_ept(vmcs12) &&
- nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
- vmx_flush_tlb(vcpu, true);
- }
-
- /* This is needed for same reason as it was needed in prepare_vmcs02 */
- vmx->host_rsp = 0;
-
- /* Unpin physical memory we referred to in vmcs02 */
- if (vmx->nested.apic_access_page) {
- kvm_release_page_dirty(vmx->nested.apic_access_page);
- vmx->nested.apic_access_page = NULL;
- }
- if (vmx->nested.virtual_apic_page) {
- kvm_release_page_dirty(vmx->nested.virtual_apic_page);
- vmx->nested.virtual_apic_page = NULL;
- }
- if (vmx->nested.pi_desc_page) {
- kunmap(vmx->nested.pi_desc_page);
- kvm_release_page_dirty(vmx->nested.pi_desc_page);
- vmx->nested.pi_desc_page = NULL;
- vmx->nested.pi_desc = NULL;
- }
-
- /*
- * We are now running in L2, mmu_notifier will force to reload the
- * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
- */
- kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
-
- if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
- vmx->nested.need_vmcs12_sync = true;
-
- /* in case we halted in L2 */
- vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-
- if (likely(!vmx->fail)) {
- /*
- * TODO: SDM says that with acknowledge interrupt on
- * exit, bit 31 of the VM-exit interrupt information
- * (valid interrupt) is always set to 1 on
- * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
- * need kvm_cpu_has_interrupt(). See the commit
- * message for details.
- */
- if (nested_exit_intr_ack_set(vcpu) &&
- exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
- kvm_cpu_has_interrupt(vcpu)) {
- int irq = kvm_cpu_get_interrupt(vcpu);
- WARN_ON(irq < 0);
- vmcs12->vm_exit_intr_info = irq |
- INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
- }
-
- if (exit_reason != -1)
- trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
- vmcs12->exit_qualification,
- vmcs12->idt_vectoring_info_field,
- vmcs12->vm_exit_intr_info,
- vmcs12->vm_exit_intr_error_code,
- KVM_ISA_VMX);
-
- load_vmcs12_host_state(vcpu, vmcs12);
-
- return;
- }
-
- /*
- * After an early L2 VM-entry failure, we're now back
- * in L1 which thinks it just finished a VMLAUNCH or
- * VMRESUME instruction, so we need to set the failure
- * flag and the VM-instruction error field of the VMCS
- * accordingly, and skip the emulated instruction.
- */
- (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-
- /*
- * Restore L1's host state to KVM's software model. We're here
- * because a consistency check was caught by hardware, which
- * means some amount of guest state has been propagated to KVM's
- * model and needs to be unwound to the host's state.
- */
- nested_vmx_restore_host_state(vcpu);
-
- vmx->fail = 0;
-}
-
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
-static void vmx_leave_nested(struct kvm_vcpu *vcpu)
-{
- if (is_guest_mode(vcpu)) {
- to_vmx(vcpu)->nested.nested_run_pending = 0;
- nested_vmx_vmexit(vcpu, -1, 0, 0);
- }
- free_nested(vcpu);
-}
-
-static int vmx_check_intercept(struct kvm_vcpu *vcpu,
- struct x86_instruction_info *info,
- enum x86_intercept_stage stage)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
-
- /*
- * RDPID causes #UD if disabled through secondary execution controls.
- * Because it is marked as EmulateOnUD, we need to intercept it here.
- */
- if (info->intercept == x86_intercept_rdtscp &&
- !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
- ctxt->exception.vector = UD_VECTOR;
- ctxt->exception.error_code_valid = false;
- return X86EMUL_PROPAGATE_FAULT;
- }
-
- /* TODO: check more intercepts... */
- return X86EMUL_CONTINUE;
-}
-
-#ifdef CONFIG_X86_64
-/* (a << shift) / divisor, return 1 if overflow otherwise 0 */
-static inline int u64_shl_div_u64(u64 a, unsigned int shift,
- u64 divisor, u64 *result)
-{
- u64 low = a << shift, high = a >> (64 - shift);
-
- /* To avoid the overflow on divq */
- if (high >= divisor)
- return 1;
-
- /* Low hold the result, high hold rem which is discarded */
- asm("divq %2\n\t" : "=a" (low), "=d" (high) :
- "rm" (divisor), "0" (low), "1" (high));
- *result = low;
-
- return 0;
-}
-
-static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
-{
- struct vcpu_vmx *vmx;
- u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
-
- if (kvm_mwait_in_guest(vcpu->kvm))
- return -EOPNOTSUPP;
-
- vmx = to_vmx(vcpu);
- tscl = rdtsc();
- guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
- delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
- lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
-
- if (delta_tsc > lapic_timer_advance_cycles)
- delta_tsc -= lapic_timer_advance_cycles;
- else
- delta_tsc = 0;
-
- /* Convert to host delta tsc if tsc scaling is enabled */
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
- u64_shl_div_u64(delta_tsc,
- kvm_tsc_scaling_ratio_frac_bits,
- vcpu->arch.tsc_scaling_ratio,
- &delta_tsc))
- return -ERANGE;
-
- /*
- * If the delta tsc can't fit in the 32 bit after the multi shift,
- * we can't use the preemption timer.
- * It's possible that it fits on later vmentries, but checking
- * on every vmentry is costly so we just use an hrtimer.
- */
- if (delta_tsc >> (cpu_preemption_timer_multi + 32))
- return -ERANGE;
-
- vmx->hv_deadline_tsc = tscl + delta_tsc;
- return delta_tsc == 0;
-}
-
-static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
-{
- to_vmx(vcpu)->hv_deadline_tsc = -1;
-}
-#endif
-
-static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
-{
- if (!kvm_pause_in_guest(vcpu->kvm))
- shrink_ple_window(vcpu);
-}
-
-static void vmx_slot_enable_log_dirty(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
- kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
-}
-
-static void vmx_slot_disable_log_dirty(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- kvm_mmu_slot_set_dirty(kvm, slot);
-}
-
-static void vmx_flush_log_dirty(struct kvm *kvm)
-{
- kvm_flush_pml_buffers(kvm);
-}
-
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12;
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- gpa_t gpa;
- struct page *page = NULL;
- u64 *pml_address;
-
- if (is_guest_mode(vcpu)) {
- WARN_ON_ONCE(vmx->nested.pml_full);
-
- /*
- * Check if PML is enabled for the nested guest.
- * Whether eptp bit 6 is set is already checked
- * as part of A/D emulation.
- */
- vmcs12 = get_vmcs12(vcpu);
- if (!nested_cpu_has_pml(vmcs12))
- return 0;
-
- if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
- vmx->nested.pml_full = true;
- return 1;
- }
-
- gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
-
- page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
- if (is_error_page(page))
- return 0;
-
- pml_address = kmap(page);
- pml_address[vmcs12->guest_pml_index--] = gpa;
- kunmap(page);
- kvm_release_page_clean(page);
- }
-
- return 0;
-}
-
-static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- gfn_t offset, unsigned long mask)
-{
- kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
-}
-
-static void __pi_post_block(struct kvm_vcpu *vcpu)
-{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct pi_desc old, new;
- unsigned int dest;
-
- do {
- old.control = new.control = pi_desc->control;
- WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
- "Wakeup handler not enabled while the VCPU is blocked\n");
-
- dest = cpu_physical_id(vcpu->cpu);
-
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
-
- /* set 'NV' to 'notification vector' */
- new.nv = POSTED_INTR_VECTOR;
- } while (cmpxchg64(&pi_desc->control, old.control,
- new.control) != old.control);
-
- if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- list_del(&vcpu->blocked_vcpu_list);
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- vcpu->pre_pcpu = -1;
- }
-}
-
-/*
- * This routine does the following things for vCPU which is going
- * to be blocked if VT-d PI is enabled.
- * - Store the vCPU to the wakeup list, so when interrupts happen
- * we can find the right vCPU to wake up.
- * - Change the Posted-interrupt descriptor as below:
- * 'NDST' <-- vcpu->pre_pcpu
- * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
- * - If 'ON' is set during this process, which means at least one
- * interrupt is posted for this vCPU, we cannot block it, in
- * this case, return 1, otherwise, return 0.
- *
- */
-static int pi_pre_block(struct kvm_vcpu *vcpu)
-{
- unsigned int dest;
- struct pi_desc old, new;
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
-
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
- return 0;
-
- WARN_ON(irqs_disabled());
- local_irq_disable();
- if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
- vcpu->pre_pcpu = vcpu->cpu;
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- list_add_tail(&vcpu->blocked_vcpu_list,
- &per_cpu(blocked_vcpu_on_cpu,
- vcpu->pre_pcpu));
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- }
-
- do {
- old.control = new.control = pi_desc->control;
-
- WARN((pi_desc->sn == 1),
- "Warning: SN field of posted-interrupts "
- "is set before blocking\n");
-
- /*
- * Since vCPU can be preempted during this process,
- * vcpu->cpu could be different with pre_pcpu, we
- * need to set pre_pcpu as the destination of wakeup
- * notification event, then we can find the right vCPU
- * to wakeup in wakeup handler if interrupts happen
- * when the vCPU is in blocked state.
- */
- dest = cpu_physical_id(vcpu->pre_pcpu);
-
- if (x2apic_enabled())
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
-
- /* set 'NV' to 'wakeup vector' */
- new.nv = POSTED_INTR_WAKEUP_VECTOR;
- } while (cmpxchg64(&pi_desc->control, old.control,
- new.control) != old.control);
-
- /* We should not block the vCPU if an interrupt is posted for it. */
- if (pi_test_on(pi_desc) == 1)
- __pi_post_block(vcpu);
-
- local_irq_enable();
- return (vcpu->pre_pcpu == -1);
-}
-
-static int vmx_pre_block(struct kvm_vcpu *vcpu)
-{
- if (pi_pre_block(vcpu))
- return 1;
-
- if (kvm_lapic_hv_timer_in_use(vcpu))
- kvm_lapic_switch_to_sw_timer(vcpu);
-
- return 0;
-}
-
-static void pi_post_block(struct kvm_vcpu *vcpu)
-{
- if (vcpu->pre_pcpu == -1)
- return;
-
- WARN_ON(irqs_disabled());
- local_irq_disable();
- __pi_post_block(vcpu);
- local_irq_enable();
-}
-
-static void vmx_post_block(struct kvm_vcpu *vcpu)
-{
- if (kvm_x86_ops->set_hv_timer)
- kvm_lapic_switch_to_hv_timer(vcpu);
-
- pi_post_block(vcpu);
-}
-
-/*
- * vmx_update_pi_irte - set IRTE for Posted-Interrupts
- *
- * @kvm: kvm
- * @host_irq: host irq of the interrupt
- * @guest_irq: gsi of the interrupt
- * @set: set or unset PI
- * returns 0 on success, < 0 on failure
- */
-static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set)
-{
- struct kvm_kernel_irq_routing_entry *e;
- struct kvm_irq_routing_table *irq_rt;
- struct kvm_lapic_irq irq;
- struct kvm_vcpu *vcpu;
- struct vcpu_data vcpu_info;
- int idx, ret = 0;
-
- if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(kvm->vcpus[0]))
- return 0;
-
- idx = srcu_read_lock(&kvm->irq_srcu);
- irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
- if (guest_irq >= irq_rt->nr_rt_entries ||
- hlist_empty(&irq_rt->map[guest_irq])) {
- pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
- guest_irq, irq_rt->nr_rt_entries);
- goto out;
- }
-
- hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
- if (e->type != KVM_IRQ_ROUTING_MSI)
- continue;
- /*
- * VT-d PI cannot support posting multicast/broadcast
- * interrupts to a vCPU, we still use interrupt remapping
- * for these kind of interrupts.
- *
- * For lowest-priority interrupts, we only support
- * those with single CPU as the destination, e.g. user
- * configures the interrupts via /proc/irq or uses
- * irqbalance to make the interrupts single-CPU.
- *
- * We will support full lowest-priority interrupt later.
- */
-
- kvm_set_msi_irq(kvm, e, &irq);
- if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
- /*
- * Make sure the IRTE is in remapped mode if
- * we don't handle it in posted mode.
- */
- ret = irq_set_vcpu_affinity(host_irq, NULL);
- if (ret < 0) {
- printk(KERN_INFO
- "failed to back to remapped mode, irq: %u\n",
- host_irq);
- goto out;
- }
-
- continue;
- }
-
- vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
- vcpu_info.vector = irq.vector;
-
- trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
- vcpu_info.vector, vcpu_info.pi_desc_addr, set);
-
- if (set)
- ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
- else
- ret = irq_set_vcpu_affinity(host_irq, NULL);
-
- if (ret < 0) {
- printk(KERN_INFO "%s: failed to update PI IRTE\n",
- __func__);
- goto out;
- }
- }
-
- ret = 0;
-out:
- srcu_read_unlock(&kvm->irq_srcu, idx);
- return ret;
-}
-
-static void vmx_setup_mce(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.mcg_cap & MCG_LMCE_P)
- to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
- FEATURE_CONTROL_LMCE;
- else
- to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
- ~FEATURE_CONTROL_LMCE;
-}
-
-static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
-{
- /* we need a nested vmexit to enter SMM, postpone if run is pending */
- if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
- return 1;
-}
-
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
- if (vmx->nested.smm.guest_mode)
- nested_vmx_vmexit(vcpu, -1, 0, 0);
-
- vmx->nested.smm.vmxon = vmx->nested.vmxon;
- vmx->nested.vmxon = false;
- vmx_clear_hlt(vcpu);
- return 0;
-}
-
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int ret;
-
- if (vmx->nested.smm.vmxon) {
- vmx->nested.vmxon = true;
- vmx->nested.smm.vmxon = false;
- }
-
- if (vmx->nested.smm.guest_mode) {
- vcpu->arch.hflags &= ~HF_SMM_MASK;
- ret = nested_vmx_enter_non_root_mode(vcpu, false);
- vcpu->arch.hflags |= HF_SMM_MASK;
- if (ret)
- return ret;
-
- vmx->nested.smm.guest_mode = false;
- }
- return 0;
-}
-
-static int enable_smi_window(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- /*
- * In case we do two consecutive get/set_nested_state()s while L2 was
- * running hv_evmcs may end up not being mapped (we map it from
- * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
- * have vmcs12 if it is true.
- */
- return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
- vmx->nested.hv_evmcs;
-}
-
-static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
- struct kvm_nested_state __user *user_kvm_nested_state,
- u32 user_data_size)
-{
- struct vcpu_vmx *vmx;
- struct vmcs12 *vmcs12;
- struct kvm_nested_state kvm_state = {
- .flags = 0,
- .format = 0,
- .size = sizeof(kvm_state),
- .vmx.vmxon_pa = -1ull,
- .vmx.vmcs_pa = -1ull,
- };
-
- if (!vcpu)
- return kvm_state.size + 2 * VMCS12_SIZE;
-
- vmx = to_vmx(vcpu);
- vmcs12 = get_vmcs12(vcpu);
-
- if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
- kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
-
- if (nested_vmx_allowed(vcpu) &&
- (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
- kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
- kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
-
- if (vmx_has_valid_vmcs12(vcpu)) {
- kvm_state.size += VMCS12_SIZE;
-
- if (is_guest_mode(vcpu) &&
- nested_cpu_has_shadow_vmcs(vmcs12) &&
- vmcs12->vmcs_link_pointer != -1ull)
- kvm_state.size += VMCS12_SIZE;
- }
-
- if (vmx->nested.smm.vmxon)
- kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
-
- if (vmx->nested.smm.guest_mode)
- kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
-
- if (is_guest_mode(vcpu)) {
- kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
-
- if (vmx->nested.nested_run_pending)
- kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
- }
- }
-
- if (user_data_size < kvm_state.size)
- goto out;
-
- if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
- return -EFAULT;
-
- if (!vmx_has_valid_vmcs12(vcpu))
- goto out;
-
- /*
- * When running L2, the authoritative vmcs12 state is in the
- * vmcs02. When running L1, the authoritative vmcs12 state is
- * in the shadow or enlightened vmcs linked to vmcs01, unless
- * need_vmcs12_sync is set, in which case, the authoritative
- * vmcs12 state is in the vmcs12 already.
- */
- if (is_guest_mode(vcpu)) {
- sync_vmcs12(vcpu, vmcs12);
- } else if (!vmx->nested.need_vmcs12_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
- else if (enable_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
- }
-
- if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
- return -EFAULT;
-
- if (nested_cpu_has_shadow_vmcs(vmcs12) &&
- vmcs12->vmcs_link_pointer != -1ull) {
- if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
- get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
- return -EFAULT;
- }
-
-out:
- return kvm_state.size;
-}
-
-static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
- struct kvm_nested_state __user *user_kvm_nested_state,
- struct kvm_nested_state *kvm_state)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs12 *vmcs12;
- u32 exit_qual;
- int ret;
-
- if (kvm_state->format != 0)
- return -EINVAL;
-
- if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
- nested_enable_evmcs(vcpu, NULL);
-
- if (!nested_vmx_allowed(vcpu))
- return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
-
- if (kvm_state->vmx.vmxon_pa == -1ull) {
- if (kvm_state->vmx.smm.flags)
- return -EINVAL;
-
- if (kvm_state->vmx.vmcs_pa != -1ull)
- return -EINVAL;
-
- vmx_leave_nested(vcpu);
- return 0;
- }
-
- if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
- return -EINVAL;
-
- if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
- (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
- return -EINVAL;
-
- if (kvm_state->vmx.smm.flags &
- ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
- return -EINVAL;
-
- /*
- * SMM temporarily disables VMX, so we cannot be in guest mode,
- * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
- * must be zero.
- */
- if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
- return -EINVAL;
-
- if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
- !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
- return -EINVAL;
-
- vmx_leave_nested(vcpu);
- if (kvm_state->vmx.vmxon_pa == -1ull)
- return 0;
-
- vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
- ret = enter_vmx_operation(vcpu);
- if (ret)
- return ret;
-
- /* Empty 'VMXON' state is permitted */
- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
- return 0;
-
- if (kvm_state->vmx.vmcs_pa != -1ull) {
- if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
- !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
- return -EINVAL;
-
- set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
- } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
- /*
- * Sync eVMCS upon entry as we may not have
- * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
- */
- vmx->nested.need_vmcs12_sync = true;
- } else {
- return -EINVAL;
- }
-
- if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
- vmx->nested.smm.vmxon = true;
- vmx->nested.vmxon = false;
-
- if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
- vmx->nested.smm.guest_mode = true;
- }
-
- vmcs12 = get_vmcs12(vcpu);
- if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
- return -EFAULT;
-
- if (vmcs12->hdr.revision_id != VMCS12_REVISION)
- return -EINVAL;
-
- if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
- return 0;
-
- vmx->nested.nested_run_pending =
- !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
-
- if (nested_cpu_has_shadow_vmcs(vmcs12) &&
- vmcs12->vmcs_link_pointer != -1ull) {
- struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
- if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
- return -EINVAL;
-
- if (copy_from_user(shadow_vmcs12,
- user_kvm_nested_state->data + VMCS12_SIZE,
- sizeof(*vmcs12)))
- return -EFAULT;
-
- if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
- !shadow_vmcs12->hdr.shadow_vmcs)
- return -EINVAL;
- }
-
- if (check_vmentry_prereqs(vcpu, vmcs12) ||
- check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
- return -EINVAL;
-
- vmx->nested.dirty_vmcs12 = true;
- ret = nested_vmx_enter_non_root_mode(vcpu, false);
- if (ret)
- return -EINVAL;
-
- return 0;
-}
-
-static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
- .cpu_has_kvm_support = cpu_has_kvm_support,
- .disabled_by_bios = vmx_disabled_by_bios,
- .hardware_setup = hardware_setup,
- .hardware_unsetup = hardware_unsetup,
- .check_processor_compatibility = vmx_check_processor_compat,
- .hardware_enable = hardware_enable,
- .hardware_disable = hardware_disable,
- .cpu_has_accelerated_tpr = report_flexpriority,
- .has_emulated_msr = vmx_has_emulated_msr,
-
- .vm_init = vmx_vm_init,
- .vm_alloc = vmx_vm_alloc,
- .vm_free = vmx_vm_free,
-
- .vcpu_create = vmx_create_vcpu,
- .vcpu_free = vmx_free_vcpu,
- .vcpu_reset = vmx_vcpu_reset,
-
- .prepare_guest_switch = vmx_prepare_switch_to_guest,
- .vcpu_load = vmx_vcpu_load,
- .vcpu_put = vmx_vcpu_put,
-
- .update_bp_intercept = update_exception_bitmap,
- .get_msr_feature = vmx_get_msr_feature,
- .get_msr = vmx_get_msr,
- .set_msr = vmx_set_msr,
- .get_segment_base = vmx_get_segment_base,
- .get_segment = vmx_get_segment,
- .set_segment = vmx_set_segment,
- .get_cpl = vmx_get_cpl,
- .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
- .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
- .decache_cr3 = vmx_decache_cr3,
- .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
- .set_cr0 = vmx_set_cr0,
- .set_cr3 = vmx_set_cr3,
- .set_cr4 = vmx_set_cr4,
- .set_efer = vmx_set_efer,
- .get_idt = vmx_get_idt,
- .set_idt = vmx_set_idt,
- .get_gdt = vmx_get_gdt,
- .set_gdt = vmx_set_gdt,
- .get_dr6 = vmx_get_dr6,
- .set_dr6 = vmx_set_dr6,
- .set_dr7 = vmx_set_dr7,
- .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
- .cache_reg = vmx_cache_reg,
- .get_rflags = vmx_get_rflags,
- .set_rflags = vmx_set_rflags,
-
- .tlb_flush = vmx_flush_tlb,
- .tlb_flush_gva = vmx_flush_tlb_gva,
-
- .run = vmx_vcpu_run,
- .handle_exit = vmx_handle_exit,
- .skip_emulated_instruction = skip_emulated_instruction,
- .set_interrupt_shadow = vmx_set_interrupt_shadow,
- .get_interrupt_shadow = vmx_get_interrupt_shadow,
- .patch_hypercall = vmx_patch_hypercall,
- .set_irq = vmx_inject_irq,
- .set_nmi = vmx_inject_nmi,
- .queue_exception = vmx_queue_exception,
- .cancel_injection = vmx_cancel_injection,
- .interrupt_allowed = vmx_interrupt_allowed,
- .nmi_allowed = vmx_nmi_allowed,
- .get_nmi_mask = vmx_get_nmi_mask,
- .set_nmi_mask = vmx_set_nmi_mask,
- .enable_nmi_window = enable_nmi_window,
- .enable_irq_window = enable_irq_window,
- .update_cr8_intercept = update_cr8_intercept,
- .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
- .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
- .get_enable_apicv = vmx_get_enable_apicv,
- .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
- .load_eoi_exitmap = vmx_load_eoi_exitmap,
- .apicv_post_state_restore = vmx_apicv_post_state_restore,
- .hwapic_irr_update = vmx_hwapic_irr_update,
- .hwapic_isr_update = vmx_hwapic_isr_update,
- .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
- .sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
-
- .set_tss_addr = vmx_set_tss_addr,
- .set_identity_map_addr = vmx_set_identity_map_addr,
- .get_tdp_level = get_ept_level,
- .get_mt_mask = vmx_get_mt_mask,
-
- .get_exit_info = vmx_get_exit_info,
-
- .get_lpage_level = vmx_get_lpage_level,
-
- .cpuid_update = vmx_cpuid_update,
-
- .rdtscp_supported = vmx_rdtscp_supported,
- .invpcid_supported = vmx_invpcid_supported,
-
- .set_supported_cpuid = vmx_set_supported_cpuid,
-
- .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
-
- .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
- .write_tsc_offset = vmx_write_tsc_offset,
-
- .set_tdp_cr3 = vmx_set_cr3,
-
- .check_intercept = vmx_check_intercept,
- .handle_external_intr = vmx_handle_external_intr,
- .mpx_supported = vmx_mpx_supported,
- .xsaves_supported = vmx_xsaves_supported,
- .umip_emulated = vmx_umip_emulated,
-
- .check_nested_events = vmx_check_nested_events,
- .request_immediate_exit = vmx_request_immediate_exit,
-
- .sched_in = vmx_sched_in,
-
- .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
- .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
- .flush_log_dirty = vmx_flush_log_dirty,
- .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
- .write_log_dirty = vmx_write_pml_buffer,
-
- .pre_block = vmx_pre_block,
- .post_block = vmx_post_block,
-
- .pmu_ops = &intel_pmu_ops,
-
- .update_pi_irte = vmx_update_pi_irte,
-
-#ifdef CONFIG_X86_64
- .set_hv_timer = vmx_set_hv_timer,
- .cancel_hv_timer = vmx_cancel_hv_timer,
-#endif
-
- .setup_mce = vmx_setup_mce,
-
- .get_nested_state = vmx_get_nested_state,
- .set_nested_state = vmx_set_nested_state,
- .get_vmcs12_pages = nested_get_vmcs12_pages,
-
- .smi_allowed = vmx_smi_allowed,
- .pre_enter_smm = vmx_pre_enter_smm,
- .pre_leave_smm = vmx_pre_leave_smm,
- .enable_smi_window = enable_smi_window,
-
- .nested_enable_evmcs = nested_enable_evmcs,
-};
-
-static void vmx_cleanup_l1d_flush(void)
-{
- if (vmx_l1d_flush_pages) {
- free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
- vmx_l1d_flush_pages = NULL;
- }
- /* Restore state so sysfs ignores VMX */
- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
-}
-
-static void vmx_exit(void)
-{
-#ifdef CONFIG_KEXEC_CORE
- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
- synchronize_rcu();
-#endif
-
- kvm_exit();
-
-#if IS_ENABLED(CONFIG_HYPERV)
- if (static_branch_unlikely(&enable_evmcs)) {
- int cpu;
- struct hv_vp_assist_page *vp_ap;
- /*
- * Reset everything to support using non-enlightened VMCS
- * access later (e.g. when we reload the module with
- * enlightened_vmcs=0)
- */
- for_each_online_cpu(cpu) {
- vp_ap = hv_get_vp_assist_page(cpu);
-
- if (!vp_ap)
- continue;
-
- vp_ap->current_nested_vmcs = 0;
- vp_ap->enlighten_vmentry = 0;
- }
-
- static_branch_disable(&enable_evmcs);
- }
-#endif
- vmx_cleanup_l1d_flush();
-}
-module_exit(vmx_exit);
-
-static int __init vmx_init(void)
-{
- int r;
-
-#if IS_ENABLED(CONFIG_HYPERV)
- /*
- * Enlightened VMCS usage should be recommended and the host needs
- * to support eVMCS v1 or above. We can also disable eVMCS support
- * with module parameter.
- */
- if (enlightened_vmcs &&
- ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
- (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
- KVM_EVMCS_VERSION) {
- int cpu;
-
- /* Check that we have assist pages on all online CPUs */
- for_each_online_cpu(cpu) {
- if (!hv_get_vp_assist_page(cpu)) {
- enlightened_vmcs = false;
- break;
- }
- }
-
- if (enlightened_vmcs) {
- pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
- static_branch_enable(&enable_evmcs);
- }
- } else {
- enlightened_vmcs = false;
- }
-#endif
-
- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
- __alignof__(struct vcpu_vmx), THIS_MODULE);
- if (r)
- return r;
-
- /*
- * Must be called after kvm_init() so enable_ept is properly set
- * up. Hand the parameter mitigation value in which was stored in
- * the pre module init parser. If no parameter was given, it will
- * contain 'auto' which will be turned into the default 'cond'
- * mitigation mode.
- */
- if (boot_cpu_has(X86_BUG_L1TF)) {
- r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
- if (r) {
- vmx_exit();
- return r;
- }
- }
-
-#ifdef CONFIG_KEXEC_CORE
- rcu_assign_pointer(crash_vmclear_loaded_vmcss,
- crash_vmclear_local_loaded_vmcss);
-#endif
- vmx_check_vmcs12_offsets();
-
- return 0;
-}
-module_init(vmx_init);
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
new file mode 100644
index 000000000000..854e144131c6
--- /dev/null
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_CAPS_H
+#define __KVM_X86_VMX_CAPS_H
+
+#include "lapic.h"
+
+extern bool __read_mostly enable_vpid;
+extern bool __read_mostly flexpriority_enabled;
+extern bool __read_mostly enable_ept;
+extern bool __read_mostly enable_unrestricted_guest;
+extern bool __read_mostly enable_ept_ad_bits;
+extern bool __read_mostly enable_pml;
+extern int __read_mostly pt_mode;
+
+#define PT_MODE_SYSTEM 0
+#define PT_MODE_HOST_GUEST 1
+
+struct nested_vmx_msrs {
+ /*
+ * We only store the "true" versions of the VMX capability MSRs. We
+ * generate the "non-true" versions by setting the must-be-1 bits
+ * according to the SDM.
+ */
+ u32 procbased_ctls_low;
+ u32 procbased_ctls_high;
+ u32 secondary_ctls_low;
+ u32 secondary_ctls_high;
+ u32 pinbased_ctls_low;
+ u32 pinbased_ctls_high;
+ u32 exit_ctls_low;
+ u32 exit_ctls_high;
+ u32 entry_ctls_low;
+ u32 entry_ctls_high;
+ u32 misc_low;
+ u32 misc_high;
+ u32 ept_caps;
+ u32 vpid_caps;
+ u64 basic;
+ u64 cr0_fixed0;
+ u64 cr0_fixed1;
+ u64 cr4_fixed0;
+ u64 cr4_fixed1;
+ u64 vmcs_enum;
+ u64 vmfunc_controls;
+};
+
+struct vmcs_config {
+ int size;
+ int order;
+ u32 basic_cap;
+ u32 revision_id;
+ u32 pin_based_exec_ctrl;
+ u32 cpu_based_exec_ctrl;
+ u32 cpu_based_2nd_exec_ctrl;
+ u32 vmexit_ctrl;
+ u32 vmentry_ctrl;
+ struct nested_vmx_msrs nested;
+};
+extern struct vmcs_config vmcs_config;
+
+struct vmx_capability {
+ u32 ept;
+ u32 vpid;
+};
+extern struct vmx_capability vmx_capability;
+
+static inline bool cpu_has_vmx_basic_inout(void)
+{
+ return (((u64)vmcs_config.basic_cap << 32) & VMX_BASIC_INOUT);
+}
+
+static inline bool cpu_has_virtual_nmis(void)
+{
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
+}
+
+static inline bool cpu_has_vmx_preemption_timer(void)
+{
+ return vmcs_config.pin_based_exec_ctrl &
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+}
+
+static inline bool cpu_has_vmx_posted_intr(void)
+{
+ return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
+ vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+}
+
+static inline bool cpu_has_load_ia32_efer(void)
+{
+ return (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_EFER) &&
+ (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_EFER);
+}
+
+static inline bool cpu_has_load_perf_global_ctrl(void)
+{
+ return (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
+ (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
+}
+
+static inline bool vmx_mpx_supported(void)
+{
+ return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
+ (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
+}
+
+static inline bool cpu_has_vmx_tpr_shadow(void)
+{
+ return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
+}
+
+static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
+{
+ return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
+}
+
+static inline bool cpu_has_vmx_msr_bitmap(void)
+{
+ return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
+}
+
+static inline bool cpu_has_secondary_exec_ctrls(void)
+{
+ return vmcs_config.cpu_based_exec_ctrl &
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+}
+
+static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+}
+
+static inline bool cpu_has_vmx_ept(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_EPT;
+}
+
+static inline bool vmx_umip_emulated(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_DESC;
+}
+
+static inline bool cpu_has_vmx_rdtscp(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_RDTSCP;
+}
+
+static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+}
+
+static inline bool cpu_has_vmx_vpid(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_VPID;
+}
+
+static inline bool cpu_has_vmx_wbinvd_exit(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_WBINVD_EXITING;
+}
+
+static inline bool cpu_has_vmx_unrestricted_guest(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
+}
+
+static inline bool cpu_has_vmx_apic_register_virt(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_APIC_REGISTER_VIRT;
+}
+
+static inline bool cpu_has_vmx_virtual_intr_delivery(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
+}
+
+static inline bool cpu_has_vmx_ple(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+}
+
+static inline bool vmx_rdrand_supported(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_RDRAND_EXITING;
+}
+
+static inline bool cpu_has_vmx_invpcid(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_INVPCID;
+}
+
+static inline bool cpu_has_vmx_vmfunc(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_VMFUNC;
+}
+
+static inline bool cpu_has_vmx_shadow_vmcs(void)
+{
+ u64 vmx_msr;
+
+ /* check if the cpu supports writing r/o exit information fields */
+ rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
+ if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
+ return false;
+
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_SHADOW_VMCS;
+}
+
+static inline bool cpu_has_vmx_encls_vmexit(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENCLS_EXITING;
+}
+
+static inline bool vmx_rdseed_supported(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_RDSEED_EXITING;
+}
+
+static inline bool cpu_has_vmx_pml(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
+}
+
+static inline bool vmx_xsaves_supported(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_XSAVES;
+}
+
+static inline bool cpu_has_vmx_tsc_scaling(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_TSC_SCALING;
+}
+
+static inline bool cpu_has_vmx_apicv(void)
+{
+ return cpu_has_vmx_apic_register_virt() &&
+ cpu_has_vmx_virtual_intr_delivery() &&
+ cpu_has_vmx_posted_intr();
+}
+
+static inline bool cpu_has_vmx_flexpriority(void)
+{
+ return cpu_has_vmx_tpr_shadow() &&
+ cpu_has_vmx_virtualize_apic_accesses();
+}
+
+static inline bool cpu_has_vmx_ept_execute_only(void)
+{
+ return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_4levels(void)
+{
+ return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_5levels(void)
+{
+ return vmx_capability.ept & VMX_EPT_PAGE_WALK_5_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_mt_wb(void)
+{
+ return vmx_capability.ept & VMX_EPTP_WB_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_2m_page(void)
+{
+ return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_1g_page(void)
+{
+ return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_ad_bits(void)
+{
+ return vmx_capability.ept & VMX_EPT_AD_BIT;
+}
+
+static inline bool cpu_has_vmx_invept_context(void)
+{
+ return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
+}
+
+static inline bool cpu_has_vmx_invept_global(void)
+{
+ return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
+}
+
+static inline bool cpu_has_vmx_invvpid(void)
+{
+ return vmx_capability.vpid & VMX_VPID_INVVPID_BIT;
+}
+
+static inline bool cpu_has_vmx_invvpid_individual_addr(void)
+{
+ return vmx_capability.vpid & VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT;
+}
+
+static inline bool cpu_has_vmx_invvpid_single(void)
+{
+ return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
+}
+
+static inline bool cpu_has_vmx_invvpid_global(void)
+{
+ return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
+}
+
+static inline bool cpu_has_vmx_intel_pt(void)
+{
+ u64 vmx_msr;
+
+ rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
+ return (vmx_msr & MSR_IA32_VMX_MISC_INTEL_PT) &&
+ (vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) &&
+ (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_IA32_RTIT_CTL) &&
+ (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
+}
+
+#endif /* __KVM_X86_VMX_CAPS_H */
diff --git a/arch/x86/kvm/vmx_evmcs.h b/arch/x86/kvm/vmx/evmcs.c
index 210a884090ad..95bc2247478d 100644
--- a/arch/x86/kvm/vmx_evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -1,20 +1,22 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __KVM_X86_VMX_EVMCS_H
-#define __KVM_X86_VMX_EVMCS_H
+// SPDX-License-Identifier: GPL-2.0
-#include <asm/hyperv-tlfs.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include "evmcs.h"
+#include "vmcs.h"
+#include "vmx.h"
+
+DEFINE_STATIC_KEY_FALSE(enable_evmcs);
+
+#if IS_ENABLED(CONFIG_HYPERV)
#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
{EVMCS1_OFFSET(name), clean_field}
-struct evmcs_field {
- u16 offset;
- u16 clean_field;
-};
-
-static const struct evmcs_field vmcs_field_to_evmcs_1[] = {
+const struct evmcs_field vmcs_field_to_evmcs_1[] = {
/* 64 bit rw */
EVMCS1_FIELD(GUEST_RIP, guest_rip,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE),
@@ -298,27 +300,53 @@ static const struct evmcs_field vmcs_field_to_evmcs_1[] = {
EVMCS1_FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT),
};
+const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
-static __always_inline int get_evmcs_offset(unsigned long field,
- u16 *clean_field)
+void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{
- unsigned int index = ROL16(field, 6);
- const struct evmcs_field *evmcs_field;
+ vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
- if (unlikely(index >= ARRAY_SIZE(vmcs_field_to_evmcs_1))) {
- WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
- field);
- return -ENOENT;
- }
+ vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
- evmcs_field = &vmcs_field_to_evmcs_1[index];
+}
+#endif
- if (clean_field)
- *clean_field = evmcs_field->clean_field;
+uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ /*
+ * vmcs_version represents the range of supported Enlightened VMCS
+ * versions: lower 8 bits is the minimal version, higher 8 bits is the
+ * maximum supported version. KVM supports versions from 1 to
+ * KVM_EVMCS_VERSION.
+ */
+ if (vmx->nested.enlightened_vmcs_enabled)
+ return (KVM_EVMCS_VERSION << 8) | 1;
- return evmcs_field->offset;
+ return 0;
}
-#undef ROL16
+int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (vmcs_version)
+ *vmcs_version = nested_get_evmcs_version(vcpu);
+
+ /* We don't support disabling the feature for simplicity. */
+ if (vmx->nested.enlightened_vmcs_enabled)
+ return 0;
-#endif /* __KVM_X86_VMX_EVMCS_H */
+ vmx->nested.enlightened_vmcs_enabled = true;
+
+ vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
+ vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+ vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
new file mode 100644
index 000000000000..e0fcef85b332
--- /dev/null
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_EVMCS_H
+#define __KVM_X86_VMX_EVMCS_H
+
+#include <linux/jump_label.h>
+
+#include <asm/hyperv-tlfs.h>
+#include <asm/mshyperv.h>
+#include <asm/vmx.h>
+
+#include "capabilities.h"
+#include "vmcs.h"
+
+struct vmcs_config;
+
+DECLARE_STATIC_KEY_FALSE(enable_evmcs);
+
+#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
+
+#define KVM_EVMCS_VERSION 1
+
+/*
+ * Enlightened VMCSv1 doesn't support these:
+ *
+ * POSTED_INTR_NV = 0x00000002,
+ * GUEST_INTR_STATUS = 0x00000810,
+ * APIC_ACCESS_ADDR = 0x00002014,
+ * POSTED_INTR_DESC_ADDR = 0x00002016,
+ * EOI_EXIT_BITMAP0 = 0x0000201c,
+ * EOI_EXIT_BITMAP1 = 0x0000201e,
+ * EOI_EXIT_BITMAP2 = 0x00002020,
+ * EOI_EXIT_BITMAP3 = 0x00002022,
+ * GUEST_PML_INDEX = 0x00000812,
+ * PML_ADDRESS = 0x0000200e,
+ * VM_FUNCTION_CONTROL = 0x00002018,
+ * EPTP_LIST_ADDRESS = 0x00002024,
+ * VMREAD_BITMAP = 0x00002026,
+ * VMWRITE_BITMAP = 0x00002028,
+ *
+ * TSC_MULTIPLIER = 0x00002032,
+ * PLE_GAP = 0x00004020,
+ * PLE_WINDOW = 0x00004022,
+ * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
+ * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
+ * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
+ *
+ * Currently unsupported in KVM:
+ * GUEST_IA32_RTIT_CTL = 0x00002814,
+ */
+#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
+ PIN_BASED_VMX_PREEMPTION_TIMER)
+#define EVMCS1_UNSUPPORTED_2NDEXEC \
+ (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
+ SECONDARY_EXEC_APIC_REGISTER_VIRT | \
+ SECONDARY_EXEC_ENABLE_PML | \
+ SECONDARY_EXEC_ENABLE_VMFUNC | \
+ SECONDARY_EXEC_SHADOW_VMCS | \
+ SECONDARY_EXEC_TSC_SCALING | \
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING)
+#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
+
+#if IS_ENABLED(CONFIG_HYPERV)
+
+struct evmcs_field {
+ u16 offset;
+ u16 clean_field;
+};
+
+extern const struct evmcs_field vmcs_field_to_evmcs_1[];
+extern const unsigned int nr_evmcs_1_fields;
+
+#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
+
+static __always_inline int get_evmcs_offset(unsigned long field,
+ u16 *clean_field)
+{
+ unsigned int index = ROL16(field, 6);
+ const struct evmcs_field *evmcs_field;
+
+ if (unlikely(index >= nr_evmcs_1_fields)) {
+ WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
+ field);
+ return -ENOENT;
+ }
+
+ evmcs_field = &vmcs_field_to_evmcs_1[index];
+
+ if (clean_field)
+ *clean_field = evmcs_field->clean_field;
+
+ return evmcs_field->offset;
+}
+
+#undef ROL16
+
+static inline void evmcs_write64(unsigned long field, u64 value)
+{
+ u16 clean_field;
+ int offset = get_evmcs_offset(field, &clean_field);
+
+ if (offset < 0)
+ return;
+
+ *(u64 *)((char *)current_evmcs + offset) = value;
+
+ current_evmcs->hv_clean_fields &= ~clean_field;
+}
+
+static inline void evmcs_write32(unsigned long field, u32 value)
+{
+ u16 clean_field;
+ int offset = get_evmcs_offset(field, &clean_field);
+
+ if (offset < 0)
+ return;
+
+ *(u32 *)((char *)current_evmcs + offset) = value;
+ current_evmcs->hv_clean_fields &= ~clean_field;
+}
+
+static inline void evmcs_write16(unsigned long field, u16 value)
+{
+ u16 clean_field;
+ int offset = get_evmcs_offset(field, &clean_field);
+
+ if (offset < 0)
+ return;
+
+ *(u16 *)((char *)current_evmcs + offset) = value;
+ current_evmcs->hv_clean_fields &= ~clean_field;
+}
+
+static inline u64 evmcs_read64(unsigned long field)
+{
+ int offset = get_evmcs_offset(field, NULL);
+
+ if (offset < 0)
+ return 0;
+
+ return *(u64 *)((char *)current_evmcs + offset);
+}
+
+static inline u32 evmcs_read32(unsigned long field)
+{
+ int offset = get_evmcs_offset(field, NULL);
+
+ if (offset < 0)
+ return 0;
+
+ return *(u32 *)((char *)current_evmcs + offset);
+}
+
+static inline u16 evmcs_read16(unsigned long field)
+{
+ int offset = get_evmcs_offset(field, NULL);
+
+ if (offset < 0)
+ return 0;
+
+ return *(u16 *)((char *)current_evmcs + offset);
+}
+
+static inline void evmcs_touch_msr_bitmap(void)
+{
+ if (unlikely(!current_evmcs))
+ return;
+
+ if (current_evmcs->hv_enlightenments_control.msr_bitmap)
+ current_evmcs->hv_clean_fields &=
+ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP;
+}
+
+static inline void evmcs_load(u64 phys_addr)
+{
+ struct hv_vp_assist_page *vp_ap =
+ hv_get_vp_assist_page(smp_processor_id());
+
+ vp_ap->current_nested_vmcs = phys_addr;
+ vp_ap->enlighten_vmentry = 1;
+}
+
+void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
+#else /* !IS_ENABLED(CONFIG_HYPERV) */
+static inline void evmcs_write64(unsigned long field, u64 value) {}
+static inline void evmcs_write32(unsigned long field, u32 value) {}
+static inline void evmcs_write16(unsigned long field, u16 value) {}
+static inline u64 evmcs_read64(unsigned long field) { return 0; }
+static inline u32 evmcs_read32(unsigned long field) { return 0; }
+static inline u16 evmcs_read16(unsigned long field) { return 0; }
+static inline void evmcs_load(u64 phys_addr) {}
+static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
+static inline void evmcs_touch_msr_bitmap(void) {}
+#endif /* IS_ENABLED(CONFIG_HYPERV) */
+
+uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
+int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version);
+
+#endif /* __KVM_X86_VMX_EVMCS_H */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
new file mode 100644
index 000000000000..3170e291215d
--- /dev/null
+++ b/arch/x86/kvm/vmx/nested.c
@@ -0,0 +1,5721 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/frame.h>
+#include <linux/percpu.h>
+
+#include <asm/debugreg.h>
+#include <asm/mmu_context.h>
+
+#include "cpuid.h"
+#include "hyperv.h"
+#include "mmu.h"
+#include "nested.h"
+#include "trace.h"
+#include "x86.h"
+
+static bool __read_mostly enable_shadow_vmcs = 1;
+module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
+
+static bool __read_mostly nested_early_check = 0;
+module_param(nested_early_check, bool, S_IRUGO);
+
+/*
+ * Hyper-V requires all of these, so mark them as supported even though
+ * they are just treated the same as all-context.
+ */
+#define VMX_VPID_EXTENT_SUPPORTED_MASK \
+ (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
+ VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
+ VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
+
+#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
+
+enum {
+ VMX_VMREAD_BITMAP,
+ VMX_VMWRITE_BITMAP,
+ VMX_BITMAP_NR
+};
+static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
+
+#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
+#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
+
+static u16 shadow_read_only_fields[] = {
+#define SHADOW_FIELD_RO(x) x,
+#include "vmcs_shadow_fields.h"
+};
+static int max_shadow_read_only_fields =
+ ARRAY_SIZE(shadow_read_only_fields);
+
+static u16 shadow_read_write_fields[] = {
+#define SHADOW_FIELD_RW(x) x,
+#include "vmcs_shadow_fields.h"
+};
+static int max_shadow_read_write_fields =
+ ARRAY_SIZE(shadow_read_write_fields);
+
+void init_vmcs_shadow_fields(void)
+{
+ int i, j;
+
+ memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
+ memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
+
+ for (i = j = 0; i < max_shadow_read_only_fields; i++) {
+ u16 field = shadow_read_only_fields[i];
+
+ if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
+ (i + 1 == max_shadow_read_only_fields ||
+ shadow_read_only_fields[i + 1] != field + 1))
+ pr_err("Missing field from shadow_read_only_field %x\n",
+ field + 1);
+
+ clear_bit(field, vmx_vmread_bitmap);
+#ifdef CONFIG_X86_64
+ if (field & 1)
+ continue;
+#endif
+ if (j < i)
+ shadow_read_only_fields[j] = field;
+ j++;
+ }
+ max_shadow_read_only_fields = j;
+
+ for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+ u16 field = shadow_read_write_fields[i];
+
+ if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
+ (i + 1 == max_shadow_read_write_fields ||
+ shadow_read_write_fields[i + 1] != field + 1))
+ pr_err("Missing field from shadow_read_write_field %x\n",
+ field + 1);
+
+ /*
+ * PML and the preemption timer can be emulated, but the
+ * processor cannot vmwrite to fields that don't exist
+ * on bare metal.
+ */
+ switch (field) {
+ case GUEST_PML_INDEX:
+ if (!cpu_has_vmx_pml())
+ continue;
+ break;
+ case VMX_PREEMPTION_TIMER_VALUE:
+ if (!cpu_has_vmx_preemption_timer())
+ continue;
+ break;
+ case GUEST_INTR_STATUS:
+ if (!cpu_has_vmx_apicv())
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ clear_bit(field, vmx_vmwrite_bitmap);
+ clear_bit(field, vmx_vmread_bitmap);
+#ifdef CONFIG_X86_64
+ if (field & 1)
+ continue;
+#endif
+ if (j < i)
+ shadow_read_write_fields[j] = field;
+ j++;
+ }
+ max_shadow_read_write_fields = j;
+}
+
+/*
+ * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
+ * set the success or error code of an emulated VMX instruction (as specified
+ * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
+ * instruction.
+ */
+static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_CF);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
+ u32 vm_instruction_error)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * failValid writes the error number to the current VMCS, which
+ * can't be done if there isn't a current VMCS.
+ */
+ if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+ return nested_vmx_failInvalid(vcpu);
+
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_ZF);
+ get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
+ /*
+ * We don't need to force a shadow sync because
+ * VM_INSTRUCTION_ERROR is not shadowed
+ */
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
+{
+ /* TODO: not to reset guest simply here. */
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
+}
+
+static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
+{
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
+}
+
+static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.hv_evmcs)
+ return;
+
+ kunmap(vmx->nested.hv_evmcs_page);
+ kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
+ vmx->nested.hv_evmcs_vmptr = -1ull;
+ vmx->nested.hv_evmcs_page = NULL;
+ vmx->nested.hv_evmcs = NULL;
+}
+
+/*
+ * Free whatever needs to be freed from vmx->nested when L1 goes down, or
+ * just stops using VMX.
+ */
+static void free_nested(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
+ return;
+
+ vmx->nested.vmxon = false;
+ vmx->nested.smm.vmxon = false;
+ free_vpid(vmx->nested.vpid02);
+ vmx->nested.posted_intr_nv = -1;
+ vmx->nested.current_vmptr = -1ull;
+ if (enable_shadow_vmcs) {
+ vmx_disable_shadow_vmcs(vmx);
+ vmcs_clear(vmx->vmcs01.shadow_vmcs);
+ free_vmcs(vmx->vmcs01.shadow_vmcs);
+ vmx->vmcs01.shadow_vmcs = NULL;
+ }
+ kfree(vmx->nested.cached_vmcs12);
+ kfree(vmx->nested.cached_shadow_vmcs12);
+ /* Unpin physical memory we referred to in the vmcs02 */
+ if (vmx->nested.apic_access_page) {
+ kvm_release_page_dirty(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = NULL;
+ }
+ if (vmx->nested.virtual_apic_page) {
+ kvm_release_page_dirty(vmx->nested.virtual_apic_page);
+ vmx->nested.virtual_apic_page = NULL;
+ }
+ if (vmx->nested.pi_desc_page) {
+ kunmap(vmx->nested.pi_desc_page);
+ kvm_release_page_dirty(vmx->nested.pi_desc_page);
+ vmx->nested.pi_desc_page = NULL;
+ vmx->nested.pi_desc = NULL;
+ }
+
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+
+ nested_release_evmcs(vcpu);
+
+ free_loaded_vmcs(&vmx->nested.vmcs02);
+}
+
+static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int cpu;
+
+ if (vmx->loaded_vmcs == vmcs)
+ return;
+
+ cpu = get_cpu();
+ vmx_vcpu_put(vcpu);
+ vmx->loaded_vmcs = vmcs;
+ vmx_vcpu_load(vcpu, cpu);
+ put_cpu();
+
+ vm_entry_controls_reset_shadow(vmx);
+ vm_exit_controls_reset_shadow(vmx);
+ vmx_segment_cache_clear(vmx);
+}
+
+/*
+ * Ensure that the current vmcs of the logical processor is the
+ * vmcs01 of the vcpu before calling free_nested().
+ */
+void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
+{
+ vcpu_load(vcpu);
+ vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
+ free_nested(vcpu);
+ vcpu_put(vcpu);
+}
+
+static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exit_reason;
+ unsigned long exit_qualification = vcpu->arch.exit_qualification;
+
+ if (vmx->nested.pml_full) {
+ exit_reason = EXIT_REASON_PML_FULL;
+ vmx->nested.pml_full = false;
+ exit_qualification &= INTR_INFO_UNBLOCK_NMI;
+ } else if (fault->error_code & PFERR_RSVD_MASK)
+ exit_reason = EXIT_REASON_EPT_MISCONFIG;
+ else
+ exit_reason = EXIT_REASON_EPT_VIOLATION;
+
+ nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
+ vmcs12->guest_physical_address = fault->address;
+}
+
+static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(mmu_is_nested(vcpu));
+
+ vcpu->arch.mmu = &vcpu->arch.guest_mmu;
+ kvm_init_shadow_ept_mmu(vcpu,
+ to_vmx(vcpu)->nested.msrs.ept_caps &
+ VMX_EPT_EXECUTE_ONLY_BIT,
+ nested_ept_ad_enabled(vcpu),
+ nested_ept_get_cr3(vcpu));
+ vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
+ vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
+ vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
+ vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
+
+ vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
+}
+
+static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+}
+
+static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
+ u16 error_code)
+{
+ bool inequality, bit;
+
+ bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
+ inequality =
+ (error_code & vmcs12->page_fault_error_code_mask) !=
+ vmcs12->page_fault_error_code_match;
+ return inequality ^ bit;
+}
+
+
+/*
+ * KVM wants to inject page-faults which it got to the guest. This function
+ * checks whether in a nested guest, we need to inject them to L1 or L2.
+ */
+static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned int nr = vcpu->arch.exception.nr;
+ bool has_payload = vcpu->arch.exception.has_payload;
+ unsigned long payload = vcpu->arch.exception.payload;
+
+ if (nr == PF_VECTOR) {
+ if (vcpu->arch.exception.nested_apf) {
+ *exit_qual = vcpu->arch.apf.nested_apf_token;
+ return 1;
+ }
+ if (nested_vmx_is_page_fault_vmexit(vmcs12,
+ vcpu->arch.exception.error_code)) {
+ *exit_qual = has_payload ? payload : vcpu->arch.cr2;
+ return 1;
+ }
+ } else if (vmcs12->exception_bitmap & (1u << nr)) {
+ if (nr == DB_VECTOR) {
+ if (!has_payload) {
+ payload = vcpu->arch.dr6;
+ payload &= ~(DR6_FIXED_1 | DR6_BT);
+ payload ^= DR6_RTM;
+ }
+ *exit_qual = payload;
+ } else
+ *exit_qual = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+
+static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ WARN_ON(!is_guest_mode(vcpu));
+
+ if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
+ !to_vmx(vcpu)->nested.nested_run_pending) {
+ vmcs12->vm_exit_intr_error_code = fault->error_code;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
+ INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
+ fault->address);
+ } else {
+ kvm_inject_page_fault(vcpu, fault);
+ }
+}
+
+static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
+{
+ return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
+}
+
+static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
+ !page_address_valid(vcpu, vmcs12->io_bitmap_b))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Check if MSR is intercepted for L01 MSR bitmap.
+ */
+static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
+/*
+ * If a msr is allowed by L0, we should check whether it is allowed by L1.
+ * The corresponding bit will be cleared unless both of L0 and L1 allow it.
+ */
+static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
+ unsigned long *msr_bitmap_nested,
+ u32 msr, int type)
+{
+ int f = sizeof(unsigned long);
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if (msr <= 0x1fff) {
+ if (type & MSR_TYPE_R &&
+ !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
+ /* read-low */
+ __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
+
+ if (type & MSR_TYPE_W &&
+ !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
+ /* write-low */
+ __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
+
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ if (type & MSR_TYPE_R &&
+ !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
+ /* read-high */
+ __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
+
+ if (type & MSR_TYPE_W &&
+ !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
+ /* write-high */
+ __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
+
+ }
+}
+
+/*
+ * Merge L0's and L1's MSR bitmap, return false to indicate that
+ * we do not use the hardware.
+ */
+static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ int msr;
+ struct page *page;
+ unsigned long *msr_bitmap_l1;
+ unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
+ /*
+ * pred_cmd & spec_ctrl are trying to verify two things:
+ *
+ * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
+ * ensures that we do not accidentally generate an L02 MSR bitmap
+ * from the L12 MSR bitmap that is too permissive.
+ * 2. That L1 or L2s have actually used the MSR. This avoids
+ * unnecessarily merging of the bitmap if the MSR is unused. This
+ * works properly because we only update the L01 MSR bitmap lazily.
+ * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+ bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+ bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
+
+ /* Nothing to do if the MSR bitmap is not in use. */
+ if (!cpu_has_vmx_msr_bitmap() ||
+ !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
+ return false;
+
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !pred_cmd && !spec_ctrl)
+ return false;
+
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
+ if (is_error_page(page))
+ return false;
+
+ msr_bitmap_l1 = (unsigned long *)kmap(page);
+ if (nested_cpu_has_apic_reg_virt(vmcs12)) {
+ /*
+ * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
+ * just lets the processor take the value from the virtual-APIC page;
+ * take those 256 bits directly from the L1 bitmap.
+ */
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap_l0[word] = msr_bitmap_l1[word];
+ msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
+ }
+ } else {
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap_l0[word] = ~0;
+ msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
+ }
+ }
+
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_TASKPRI),
+ MSR_TYPE_W);
+
+ if (nested_cpu_has_vid(vmcs12)) {
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_EOI),
+ MSR_TYPE_W);
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ X2APIC_MSR(APIC_SELF_IPI),
+ MSR_TYPE_W);
+ }
+
+ if (spec_ctrl)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_R | MSR_TYPE_W);
+
+ if (pred_cmd)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+
+ kunmap(page);
+ kvm_release_page_clean(page);
+
+ return true;
+}
+
+static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vmcs12 *shadow;
+ struct page *page;
+
+ if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
+ vmcs12->vmcs_link_pointer == -1ull)
+ return;
+
+ shadow = get_shadow_vmcs12(vcpu);
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
+
+ memcpy(shadow, kmap(page), VMCS12_SIZE);
+
+ kunmap(page);
+ kvm_release_page_clean(page);
+}
+
+static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
+ vmcs12->vmcs_link_pointer == -1ull)
+ return;
+
+ kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
+ get_shadow_vmcs12(vcpu), VMCS12_SIZE);
+}
+
+/*
+ * In nested virtualization, check if L1 has set
+ * VM_EXIT_ACK_INTR_ON_EXIT
+ */
+static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
+{
+ return get_vmcs12(vcpu)->vm_exit_controls &
+ VM_EXIT_ACK_INTR_ON_EXIT;
+}
+
+static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
+{
+ return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
+}
+
+static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
+ !page_address_valid(vcpu, vmcs12->apic_access_addr))
+ return -EINVAL;
+ else
+ return 0;
+}
+
+static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !nested_cpu_has_apic_reg_virt(vmcs12) &&
+ !nested_cpu_has_vid(vmcs12) &&
+ !nested_cpu_has_posted_intr(vmcs12))
+ return 0;
+
+ /*
+ * If virtualize x2apic mode is enabled,
+ * virtualize apic access must be disabled.
+ */
+ if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ return -EINVAL;
+
+ /*
+ * If virtual interrupt delivery is enabled,
+ * we must exit on external interrupts.
+ */
+ if (nested_cpu_has_vid(vmcs12) &&
+ !nested_exit_on_intr(vcpu))
+ return -EINVAL;
+
+ /*
+ * bits 15:8 should be zero in posted_intr_nv,
+ * the descriptor address has been already checked
+ * in nested_get_vmcs12_pages.
+ *
+ * bits 5:0 of posted_intr_desc_addr should be zero.
+ */
+ if (nested_cpu_has_posted_intr(vmcs12) &&
+ (!nested_cpu_has_vid(vmcs12) ||
+ !nested_exit_intr_ack_set(vcpu) ||
+ (vmcs12->posted_intr_nv & 0xff00) ||
+ (vmcs12->posted_intr_desc_addr & 0x3f) ||
+ (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
+ return -EINVAL;
+
+ /* tpr shadow is needed by all apicv features. */
+ if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
+ u32 count, u64 addr)
+{
+ int maxphyaddr;
+
+ if (count == 0)
+ return 0;
+ maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
+ (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
+ vmcs12->vm_exit_msr_load_addr) ||
+ nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
+ vmcs12->vm_exit_msr_store_addr))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
+ vmcs12->vm_entry_msr_load_addr))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
+
+ if (!nested_cpu_has_ept(vmcs12) ||
+ !page_address_valid(vcpu, vmcs12->pml_address))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
+ !nested_cpu_has_ept(vmcs12))
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
+ !nested_cpu_has_ept(vmcs12))
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_shadow_vmcs(vmcs12))
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
+ !page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
+ struct vmx_msr_entry *e)
+{
+ /* x2APIC MSR accesses are not allowed */
+ if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
+ return -EINVAL;
+ if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
+ e->index == MSR_IA32_UCODE_REV)
+ return -EINVAL;
+ if (e->reserved != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
+ struct vmx_msr_entry *e)
+{
+ if (e->index == MSR_FS_BASE ||
+ e->index == MSR_GS_BASE ||
+ e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
+ nested_vmx_msr_check_common(vcpu, e))
+ return -EINVAL;
+ return 0;
+}
+
+static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
+ struct vmx_msr_entry *e)
+{
+ if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
+ nested_vmx_msr_check_common(vcpu, e))
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Load guest's/host's msr at nested entry/exit.
+ * return 0 for success, entry index for failure.
+ */
+static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i;
+ struct vmx_msr_entry e;
+ struct msr_data msr;
+
+ msr.host_initiated = false;
+ for (i = 0; i < count; i++) {
+ if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
+ &e, sizeof(e))) {
+ pr_debug_ratelimited(
+ "%s cannot read MSR entry (%u, 0x%08llx)\n",
+ __func__, i, gpa + i * sizeof(e));
+ goto fail;
+ }
+ if (nested_vmx_load_msr_check(vcpu, &e)) {
+ pr_debug_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, i, e.index, e.reserved);
+ goto fail;
+ }
+ msr.index = e.index;
+ msr.data = e.value;
+ if (kvm_set_msr(vcpu, &msr)) {
+ pr_debug_ratelimited(
+ "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
+ __func__, i, e.index, e.value);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ return i + 1;
+}
+
+static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+{
+ u32 i;
+ struct vmx_msr_entry e;
+
+ for (i = 0; i < count; i++) {
+ struct msr_data msr_info;
+ if (kvm_vcpu_read_guest(vcpu,
+ gpa + i * sizeof(e),
+ &e, 2 * sizeof(u32))) {
+ pr_debug_ratelimited(
+ "%s cannot read MSR entry (%u, 0x%08llx)\n",
+ __func__, i, gpa + i * sizeof(e));
+ return -EINVAL;
+ }
+ if (nested_vmx_store_msr_check(vcpu, &e)) {
+ pr_debug_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, i, e.index, e.reserved);
+ return -EINVAL;
+ }
+ msr_info.host_initiated = false;
+ msr_info.index = e.index;
+ if (kvm_get_msr(vcpu, &msr_info)) {
+ pr_debug_ratelimited(
+ "%s cannot read MSR (%u, 0x%x)\n",
+ __func__, i, e.index);
+ return -EINVAL;
+ }
+ if (kvm_vcpu_write_guest(vcpu,
+ gpa + i * sizeof(e) +
+ offsetof(struct vmx_msr_entry, value),
+ &msr_info.data, sizeof(msr_info.data))) {
+ pr_debug_ratelimited(
+ "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
+ __func__, i, e.index, msr_info.data);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ unsigned long invalid_mask;
+
+ invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
+ return (val & invalid_mask) == 0;
+}
+
+/*
+ * Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
+ * emulating VM entry into a guest with EPT enabled.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+ u32 *entry_failure_code)
+{
+ if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
+ if (!nested_cr3_valid(vcpu, cr3)) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
+ return 1;
+ }
+
+ /*
+ * If PAE paging and EPT are both on, CR3 is not used by the CPU and
+ * must not be dereferenced.
+ */
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
+ !nested_ept) {
+ if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
+ *entry_failure_code = ENTRY_FAIL_PDPTE;
+ return 1;
+ }
+ }
+ }
+
+ if (!nested_ept)
+ kvm_mmu_new_cr3(vcpu, cr3, false);
+
+ vcpu->arch.cr3 = cr3;
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ kvm_init_mmu(vcpu, false);
+
+ return 0;
+}
+
+/*
+ * Returns if KVM is able to config CPU to tag TLB entries
+ * populated by L2 differently than TLB entries populated
+ * by L1.
+ *
+ * If L1 uses EPT, then TLB entries are tagged with different EPTP.
+ *
+ * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
+ * with different VPID (L1 entries are tagged with vmx->vpid
+ * while L2 entries are tagged with vmx->nested.vpid02).
+ */
+static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ return nested_cpu_has_ept(vmcs12) ||
+ (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
+}
+
+static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
+}
+
+
+static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
+{
+ return fixed_bits_valid(control, low, high);
+}
+
+static inline u64 vmx_control_msr(u32 low, u32 high)
+{
+ return low | ((u64)high << 32);
+}
+
+static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
+{
+ superset &= mask;
+ subset &= mask;
+
+ return (superset | subset) == superset;
+}
+
+static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
+{
+ const u64 feature_and_reserved =
+ /* feature (except bit 48; see below) */
+ BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
+ /* reserved */
+ BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
+ u64 vmx_basic = vmx->nested.msrs.basic;
+
+ if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
+ return -EINVAL;
+
+ /*
+ * KVM does not emulate a version of VMX that constrains physical
+ * addresses of VMX structures (e.g. VMCS) to 32-bits.
+ */
+ if (data & BIT_ULL(48))
+ return -EINVAL;
+
+ if (vmx_basic_vmcs_revision_id(vmx_basic) !=
+ vmx_basic_vmcs_revision_id(data))
+ return -EINVAL;
+
+ if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
+ return -EINVAL;
+
+ vmx->nested.msrs.basic = data;
+ return 0;
+}
+
+static int
+vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u64 supported;
+ u32 *lowp, *highp;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ lowp = &vmx->nested.msrs.pinbased_ctls_low;
+ highp = &vmx->nested.msrs.pinbased_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ lowp = &vmx->nested.msrs.procbased_ctls_low;
+ highp = &vmx->nested.msrs.procbased_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ lowp = &vmx->nested.msrs.exit_ctls_low;
+ highp = &vmx->nested.msrs.exit_ctls_high;
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ lowp = &vmx->nested.msrs.entry_ctls_low;
+ highp = &vmx->nested.msrs.entry_ctls_high;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ lowp = &vmx->nested.msrs.secondary_ctls_low;
+ highp = &vmx->nested.msrs.secondary_ctls_high;
+ break;
+ default:
+ BUG();
+ }
+
+ supported = vmx_control_msr(*lowp, *highp);
+
+ /* Check must-be-1 bits are still 1. */
+ if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
+ return -EINVAL;
+
+ /* Check must-be-0 bits are still 0. */
+ if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
+ return -EINVAL;
+
+ *lowp = data;
+ *highp = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
+{
+ const u64 feature_and_reserved_bits =
+ /* feature */
+ BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
+ BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
+ /* reserved */
+ GENMASK_ULL(13, 9) | BIT_ULL(31);
+ u64 vmx_misc;
+
+ vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
+ vmx->nested.msrs.misc_high);
+
+ if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
+ return -EINVAL;
+
+ if ((vmx->nested.msrs.pinbased_ctls_high &
+ PIN_BASED_VMX_PREEMPTION_TIMER) &&
+ vmx_misc_preemption_timer_rate(data) !=
+ vmx_misc_preemption_timer_rate(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
+ return -EINVAL;
+
+ if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
+ return -EINVAL;
+
+ vmx->nested.msrs.misc_low = data;
+ vmx->nested.msrs.misc_high = data >> 32;
+
+ /*
+ * If L1 has read-only VM-exit information fields, use the
+ * less permissive vmx_vmwrite_bitmap to specify write
+ * permissions for the shadow VMCS.
+ */
+ if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
+ vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
+
+ return 0;
+}
+
+static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
+{
+ u64 vmx_ept_vpid_cap;
+
+ vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
+ vmx->nested.msrs.vpid_caps);
+
+ /* Every bit is either reserved or a feature bit. */
+ if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
+ return -EINVAL;
+
+ vmx->nested.msrs.ept_caps = data;
+ vmx->nested.msrs.vpid_caps = data >> 32;
+ return 0;
+}
+
+static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
+{
+ u64 *msr;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_CR0_FIXED0:
+ msr = &vmx->nested.msrs.cr0_fixed0;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED0:
+ msr = &vmx->nested.msrs.cr4_fixed0;
+ break;
+ default:
+ BUG();
+ }
+
+ /*
+ * 1 bits (which indicates bits which "must-be-1" during VMX operation)
+ * must be 1 in the restored value.
+ */
+ if (!is_bitwise_subset(data, *msr, -1ULL))
+ return -EINVAL;
+
+ *msr = data;
+ return 0;
+}
+
+/*
+ * Called when userspace is restoring VMX MSRs.
+ *
+ * Returns 0 on success, non-0 otherwise.
+ */
+int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * Don't allow changes to the VMX capability MSRs while the vCPU
+ * is in VMX operation.
+ */
+ if (vmx->nested.vmxon)
+ return -EBUSY;
+
+ switch (msr_index) {
+ case MSR_IA32_VMX_BASIC:
+ return vmx_restore_vmx_basic(vmx, data);
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ case MSR_IA32_VMX_EXIT_CTLS:
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ /*
+ * The "non-true" VMX capability MSRs are generated from the
+ * "true" MSRs, so we do not support restoring them directly.
+ *
+ * If userspace wants to emulate VMX_BASIC[55]=0, userspace
+ * should restore the "true" MSRs with the must-be-1 bits
+ * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
+ * DEFAULT SETTINGS".
+ */
+ return -EINVAL;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ return vmx_restore_control_msr(vmx, msr_index, data);
+ case MSR_IA32_VMX_MISC:
+ return vmx_restore_vmx_misc(vmx, data);
+ case MSR_IA32_VMX_CR0_FIXED0:
+ case MSR_IA32_VMX_CR4_FIXED0:
+ return vmx_restore_fixed0_msr(vmx, msr_index, data);
+ case MSR_IA32_VMX_CR0_FIXED1:
+ case MSR_IA32_VMX_CR4_FIXED1:
+ /*
+ * These MSRs are generated based on the vCPU's CPUID, so we
+ * do not support restoring them directly.
+ */
+ return -EINVAL;
+ case MSR_IA32_VMX_EPT_VPID_CAP:
+ return vmx_restore_vmx_ept_vpid_cap(vmx, data);
+ case MSR_IA32_VMX_VMCS_ENUM:
+ vmx->nested.msrs.vmcs_enum = data;
+ return 0;
+ default:
+ /*
+ * The rest of the VMX capability MSRs do not support restore.
+ */
+ return -EINVAL;
+ }
+}
+
+/* Returns 0 on success, non-0 otherwise. */
+int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
+{
+ switch (msr_index) {
+ case MSR_IA32_VMX_BASIC:
+ *pdata = msrs->basic;
+ break;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
+ case MSR_IA32_VMX_PINBASED_CTLS:
+ *pdata = vmx_control_msr(
+ msrs->pinbased_ctls_low,
+ msrs->pinbased_ctls_high);
+ if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
+ *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ break;
+ case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+ case MSR_IA32_VMX_PROCBASED_CTLS:
+ *pdata = vmx_control_msr(
+ msrs->procbased_ctls_low,
+ msrs->procbased_ctls_high);
+ if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
+ *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ break;
+ case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+ case MSR_IA32_VMX_EXIT_CTLS:
+ *pdata = vmx_control_msr(
+ msrs->exit_ctls_low,
+ msrs->exit_ctls_high);
+ if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
+ *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
+ break;
+ case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+ case MSR_IA32_VMX_ENTRY_CTLS:
+ *pdata = vmx_control_msr(
+ msrs->entry_ctls_low,
+ msrs->entry_ctls_high);
+ if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
+ *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
+ break;
+ case MSR_IA32_VMX_MISC:
+ *pdata = vmx_control_msr(
+ msrs->misc_low,
+ msrs->misc_high);
+ break;
+ case MSR_IA32_VMX_CR0_FIXED0:
+ *pdata = msrs->cr0_fixed0;
+ break;
+ case MSR_IA32_VMX_CR0_FIXED1:
+ *pdata = msrs->cr0_fixed1;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED0:
+ *pdata = msrs->cr4_fixed0;
+ break;
+ case MSR_IA32_VMX_CR4_FIXED1:
+ *pdata = msrs->cr4_fixed1;
+ break;
+ case MSR_IA32_VMX_VMCS_ENUM:
+ *pdata = msrs->vmcs_enum;
+ break;
+ case MSR_IA32_VMX_PROCBASED_CTLS2:
+ *pdata = vmx_control_msr(
+ msrs->secondary_ctls_low,
+ msrs->secondary_ctls_high);
+ break;
+ case MSR_IA32_VMX_EPT_VPID_CAP:
+ *pdata = msrs->ept_caps |
+ ((u64)msrs->vpid_caps << 32);
+ break;
+ case MSR_IA32_VMX_VMFUNC:
+ *pdata = msrs->vmfunc_controls;
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Copy the writable VMCS shadow fields back to the VMCS12, in case
+ * they have been modified by the L1 guest. Note that the "read-only"
+ * VM-exit information fields are actually writable if the vCPU is
+ * configured to support "VMWRITE to any supported field in the VMCS."
+ */
+static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
+{
+ const u16 *fields[] = {
+ shadow_read_write_fields,
+ shadow_read_only_fields
+ };
+ const int max_fields[] = {
+ max_shadow_read_write_fields,
+ max_shadow_read_only_fields
+ };
+ int i, q;
+ unsigned long field;
+ u64 field_value;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+
+ preempt_disable();
+
+ vmcs_load(shadow_vmcs);
+
+ for (q = 0; q < ARRAY_SIZE(fields); q++) {
+ for (i = 0; i < max_fields[q]; i++) {
+ field = fields[q][i];
+ field_value = __vmcs_readl(field);
+ vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
+ }
+ /*
+ * Skip the VM-exit information fields if they are read-only.
+ */
+ if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
+ break;
+ }
+
+ vmcs_clear(shadow_vmcs);
+ vmcs_load(vmx->loaded_vmcs->vmcs);
+
+ preempt_enable();
+}
+
+static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
+{
+ const u16 *fields[] = {
+ shadow_read_write_fields,
+ shadow_read_only_fields
+ };
+ const int max_fields[] = {
+ max_shadow_read_write_fields,
+ max_shadow_read_only_fields
+ };
+ int i, q;
+ unsigned long field;
+ u64 field_value = 0;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
+
+ vmcs_load(shadow_vmcs);
+
+ for (q = 0; q < ARRAY_SIZE(fields); q++) {
+ for (i = 0; i < max_fields[q]; i++) {
+ field = fields[q][i];
+ vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
+ __vmcs_writel(field, field_value);
+ }
+ }
+
+ vmcs_clear(shadow_vmcs);
+ vmcs_load(vmx->loaded_vmcs->vmcs);
+}
+
+static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
+{
+ struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+
+ /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
+ vmcs12->tpr_threshold = evmcs->tpr_threshold;
+ vmcs12->guest_rip = evmcs->guest_rip;
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
+ vmcs12->guest_rsp = evmcs->guest_rsp;
+ vmcs12->guest_rflags = evmcs->guest_rflags;
+ vmcs12->guest_interruptibility_info =
+ evmcs->guest_interruptibility_info;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+ vmcs12->cpu_based_vm_exec_control =
+ evmcs->cpu_based_vm_exec_control;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+ vmcs12->exception_bitmap = evmcs->exception_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
+ vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
+ vmcs12->vm_entry_intr_info_field =
+ evmcs->vm_entry_intr_info_field;
+ vmcs12->vm_entry_exception_error_code =
+ evmcs->vm_entry_exception_error_code;
+ vmcs12->vm_entry_instruction_len =
+ evmcs->vm_entry_instruction_len;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+ vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
+ vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
+ vmcs12->host_cr0 = evmcs->host_cr0;
+ vmcs12->host_cr3 = evmcs->host_cr3;
+ vmcs12->host_cr4 = evmcs->host_cr4;
+ vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
+ vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
+ vmcs12->host_rip = evmcs->host_rip;
+ vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
+ vmcs12->host_es_selector = evmcs->host_es_selector;
+ vmcs12->host_cs_selector = evmcs->host_cs_selector;
+ vmcs12->host_ss_selector = evmcs->host_ss_selector;
+ vmcs12->host_ds_selector = evmcs->host_ds_selector;
+ vmcs12->host_fs_selector = evmcs->host_fs_selector;
+ vmcs12->host_gs_selector = evmcs->host_gs_selector;
+ vmcs12->host_tr_selector = evmcs->host_tr_selector;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+ vmcs12->pin_based_vm_exec_control =
+ evmcs->pin_based_vm_exec_control;
+ vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
+ vmcs12->secondary_vm_exec_control =
+ evmcs->secondary_vm_exec_control;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
+ vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
+ vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
+ vmcs12->msr_bitmap = evmcs->msr_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
+ vmcs12->guest_es_base = evmcs->guest_es_base;
+ vmcs12->guest_cs_base = evmcs->guest_cs_base;
+ vmcs12->guest_ss_base = evmcs->guest_ss_base;
+ vmcs12->guest_ds_base = evmcs->guest_ds_base;
+ vmcs12->guest_fs_base = evmcs->guest_fs_base;
+ vmcs12->guest_gs_base = evmcs->guest_gs_base;
+ vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
+ vmcs12->guest_tr_base = evmcs->guest_tr_base;
+ vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
+ vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
+ vmcs12->guest_es_limit = evmcs->guest_es_limit;
+ vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
+ vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
+ vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
+ vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
+ vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
+ vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
+ vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
+ vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
+ vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
+ vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
+ vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
+ vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
+ vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
+ vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
+ vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
+ vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
+ vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
+ vmcs12->guest_es_selector = evmcs->guest_es_selector;
+ vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
+ vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
+ vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
+ vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
+ vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
+ vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
+ vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
+ vmcs12->tsc_offset = evmcs->tsc_offset;
+ vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
+ vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
+ vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
+ vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
+ vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
+ vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
+ vmcs12->guest_cr0 = evmcs->guest_cr0;
+ vmcs12->guest_cr3 = evmcs->guest_cr3;
+ vmcs12->guest_cr4 = evmcs->guest_cr4;
+ vmcs12->guest_dr7 = evmcs->guest_dr7;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
+ vmcs12->host_fs_base = evmcs->host_fs_base;
+ vmcs12->host_gs_base = evmcs->host_gs_base;
+ vmcs12->host_tr_base = evmcs->host_tr_base;
+ vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
+ vmcs12->host_idtr_base = evmcs->host_idtr_base;
+ vmcs12->host_rsp = evmcs->host_rsp;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
+ vmcs12->ept_pointer = evmcs->ept_pointer;
+ vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
+ vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
+ vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
+ vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
+ vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
+ vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
+ vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
+ vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
+ vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
+ vmcs12->guest_pending_dbg_exceptions =
+ evmcs->guest_pending_dbg_exceptions;
+ vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
+ vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
+ vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
+ vmcs12->guest_activity_state = evmcs->guest_activity_state;
+ vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
+ }
+
+ /*
+ * Not used?
+ * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
+ * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
+ * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
+ * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
+ * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
+ * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
+ * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
+ * vmcs12->page_fault_error_code_mask =
+ * evmcs->page_fault_error_code_mask;
+ * vmcs12->page_fault_error_code_match =
+ * evmcs->page_fault_error_code_match;
+ * vmcs12->cr3_target_count = evmcs->cr3_target_count;
+ * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
+ * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
+ * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
+ */
+
+ /*
+ * Read only fields:
+ * vmcs12->guest_physical_address = evmcs->guest_physical_address;
+ * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
+ * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
+ * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
+ * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
+ * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
+ * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
+ * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
+ * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
+ * vmcs12->exit_qualification = evmcs->exit_qualification;
+ * vmcs12->guest_linear_address = evmcs->guest_linear_address;
+ *
+ * Not present in struct vmcs12:
+ * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
+ * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
+ * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
+ * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
+ */
+
+ return 0;
+}
+
+static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
+{
+ struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+
+ /*
+ * Should not be changed by KVM:
+ *
+ * evmcs->host_es_selector = vmcs12->host_es_selector;
+ * evmcs->host_cs_selector = vmcs12->host_cs_selector;
+ * evmcs->host_ss_selector = vmcs12->host_ss_selector;
+ * evmcs->host_ds_selector = vmcs12->host_ds_selector;
+ * evmcs->host_fs_selector = vmcs12->host_fs_selector;
+ * evmcs->host_gs_selector = vmcs12->host_gs_selector;
+ * evmcs->host_tr_selector = vmcs12->host_tr_selector;
+ * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
+ * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
+ * evmcs->host_cr0 = vmcs12->host_cr0;
+ * evmcs->host_cr3 = vmcs12->host_cr3;
+ * evmcs->host_cr4 = vmcs12->host_cr4;
+ * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
+ * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
+ * evmcs->host_rip = vmcs12->host_rip;
+ * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
+ * evmcs->host_fs_base = vmcs12->host_fs_base;
+ * evmcs->host_gs_base = vmcs12->host_gs_base;
+ * evmcs->host_tr_base = vmcs12->host_tr_base;
+ * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
+ * evmcs->host_idtr_base = vmcs12->host_idtr_base;
+ * evmcs->host_rsp = vmcs12->host_rsp;
+ * sync_vmcs12() doesn't read these:
+ * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
+ * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
+ * evmcs->msr_bitmap = vmcs12->msr_bitmap;
+ * evmcs->ept_pointer = vmcs12->ept_pointer;
+ * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
+ * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
+ * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
+ * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
+ * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
+ * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
+ * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
+ * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
+ * evmcs->tpr_threshold = vmcs12->tpr_threshold;
+ * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
+ * evmcs->exception_bitmap = vmcs12->exception_bitmap;
+ * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
+ * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
+ * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
+ * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
+ * evmcs->page_fault_error_code_mask =
+ * vmcs12->page_fault_error_code_mask;
+ * evmcs->page_fault_error_code_match =
+ * vmcs12->page_fault_error_code_match;
+ * evmcs->cr3_target_count = vmcs12->cr3_target_count;
+ * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
+ * evmcs->tsc_offset = vmcs12->tsc_offset;
+ * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
+ * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
+ * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
+ * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
+ * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
+ * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
+ * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
+ * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
+ *
+ * Not present in struct vmcs12:
+ * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
+ * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
+ * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
+ * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
+ */
+
+ evmcs->guest_es_selector = vmcs12->guest_es_selector;
+ evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
+ evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
+ evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
+ evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
+ evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
+ evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
+ evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
+
+ evmcs->guest_es_limit = vmcs12->guest_es_limit;
+ evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
+ evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
+ evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
+ evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
+ evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
+ evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
+ evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
+ evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
+ evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
+
+ evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
+ evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
+ evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
+ evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
+ evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
+ evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
+ evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
+ evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
+
+ evmcs->guest_es_base = vmcs12->guest_es_base;
+ evmcs->guest_cs_base = vmcs12->guest_cs_base;
+ evmcs->guest_ss_base = vmcs12->guest_ss_base;
+ evmcs->guest_ds_base = vmcs12->guest_ds_base;
+ evmcs->guest_fs_base = vmcs12->guest_fs_base;
+ evmcs->guest_gs_base = vmcs12->guest_gs_base;
+ evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
+ evmcs->guest_tr_base = vmcs12->guest_tr_base;
+ evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
+ evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
+
+ evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
+ evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
+
+ evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
+ evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
+ evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
+ evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
+
+ evmcs->guest_pending_dbg_exceptions =
+ vmcs12->guest_pending_dbg_exceptions;
+ evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
+ evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
+
+ evmcs->guest_activity_state = vmcs12->guest_activity_state;
+ evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
+
+ evmcs->guest_cr0 = vmcs12->guest_cr0;
+ evmcs->guest_cr3 = vmcs12->guest_cr3;
+ evmcs->guest_cr4 = vmcs12->guest_cr4;
+ evmcs->guest_dr7 = vmcs12->guest_dr7;
+
+ evmcs->guest_physical_address = vmcs12->guest_physical_address;
+
+ evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
+ evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
+ evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
+ evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
+ evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
+ evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
+ evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
+ evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
+
+ evmcs->exit_qualification = vmcs12->exit_qualification;
+
+ evmcs->guest_linear_address = vmcs12->guest_linear_address;
+ evmcs->guest_rsp = vmcs12->guest_rsp;
+ evmcs->guest_rflags = vmcs12->guest_rflags;
+
+ evmcs->guest_interruptibility_info =
+ vmcs12->guest_interruptibility_info;
+ evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
+ evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
+ evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
+ evmcs->vm_entry_exception_error_code =
+ vmcs12->vm_entry_exception_error_code;
+ evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
+
+ evmcs->guest_rip = vmcs12->guest_rip;
+
+ evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
+
+ return 0;
+}
+
+/*
+ * This is an equivalent of the nested hypervisor executing the vmptrld
+ * instruction.
+ */
+static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
+ bool from_launch)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct hv_vp_assist_page assist_page;
+
+ if (likely(!vmx->nested.enlightened_vmcs_enabled))
+ return 1;
+
+ if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
+ return 1;
+
+ if (unlikely(!assist_page.enlighten_vmentry))
+ return 1;
+
+ if (unlikely(assist_page.current_nested_vmcs !=
+ vmx->nested.hv_evmcs_vmptr)) {
+
+ if (!vmx->nested.hv_evmcs)
+ vmx->nested.current_vmptr = -1ull;
+
+ nested_release_evmcs(vcpu);
+
+ vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
+ vcpu, assist_page.current_nested_vmcs);
+
+ if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
+ return 0;
+
+ vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
+
+ /*
+ * Currently, KVM only supports eVMCS version 1
+ * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
+ * value to first u32 field of eVMCS which should specify eVMCS
+ * VersionNumber.
+ *
+ * Guest should be aware of supported eVMCS versions by host by
+ * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
+ * expected to set this CPUID leaf according to the value
+ * returned in vmcs_version from nested_enable_evmcs().
+ *
+ * However, it turns out that Microsoft Hyper-V fails to comply
+ * to their own invented interface: When Hyper-V use eVMCS, it
+ * just sets first u32 field of eVMCS to revision_id specified
+ * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
+ * which is one of the supported versions specified in
+ * CPUID.0x4000000A.EAX[0:15].
+ *
+ * To overcome Hyper-V bug, we accept here either a supported
+ * eVMCS version or VMCS12 revision_id as valid values for first
+ * u32 field of eVMCS.
+ */
+ if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
+ (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
+ nested_release_evmcs(vcpu);
+ return 0;
+ }
+
+ vmx->nested.dirty_vmcs12 = true;
+ /*
+ * As we keep L2 state for one guest only 'hv_clean_fields' mask
+ * can't be used when we switch between them. Reset it here for
+ * simplicity.
+ */
+ vmx->nested.hv_evmcs->hv_clean_fields &=
+ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
+
+ /*
+ * Unlike normal vmcs12, enlightened vmcs12 is not fully
+ * reloaded from guest's memory (read only fields, fields not
+ * present in struct hv_enlightened_vmcs, ...). Make sure there
+ * are no leftovers.
+ */
+ if (from_launch) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ memset(vmcs12, 0, sizeof(*vmcs12));
+ vmcs12->hdr.revision_id = VMCS12_REVISION;
+ }
+
+ }
+ return 1;
+}
+
+void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * hv_evmcs may end up being not mapped after migration (when
+ * L2 was running), map it here to make sure vmcs12 changes are
+ * properly reflected.
+ */
+ if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
+ nested_vmx_handle_enlightened_vmptrld(vcpu, false);
+
+ if (vmx->nested.hv_evmcs) {
+ copy_vmcs12_to_enlightened(vmx);
+ /* All fields are clean */
+ vmx->nested.hv_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ } else {
+ copy_vmcs12_to_shadow(vmx);
+ }
+
+ vmx->nested.need_vmcs12_sync = false;
+}
+
+static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
+{
+ struct vcpu_vmx *vmx =
+ container_of(timer, struct vcpu_vmx, nested.preemption_timer);
+
+ vmx->nested.preemption_timer_expired = true;
+ kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
+ kvm_vcpu_kick(&vmx->vcpu);
+
+ return HRTIMER_NORESTART;
+}
+
+static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * A timer value of zero is architecturally guaranteed to cause
+ * a VMExit prior to executing any instructions in the guest.
+ */
+ if (preemption_timeout == 0) {
+ vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
+ return;
+ }
+
+ if (vcpu->arch.virtual_tsc_khz == 0)
+ return;
+
+ preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
+ preemption_timeout *= 1000000;
+ do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
+ hrtimer_start(&vmx->nested.preemption_timer,
+ ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
+}
+
+static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
+ return vmcs12->guest_ia32_efer;
+ else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
+ else
+ return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
+}
+
+static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
+{
+ /*
+ * If vmcs02 hasn't been initialized, set the constant vmcs02 state
+ * according to L0's settings (vmcs12 is irrelevant here). Host
+ * fields that come from L0 and are not constant, e.g. HOST_CR3,
+ * will be set as needed prior to VMLAUNCH/VMRESUME.
+ */
+ if (vmx->nested.vmcs02_initialized)
+ return;
+ vmx->nested.vmcs02_initialized = true;
+
+ /*
+ * We don't care what the EPTP value is we just need to guarantee
+ * it's valid so we don't get a false positive when doing early
+ * consistency checks.
+ */
+ if (enable_ept && nested_early_check)
+ vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
+
+ /* All VMFUNCs are currently emulated through L0 vmexits. */
+ if (cpu_has_vmx_vmfunc())
+ vmcs_write64(VM_FUNCTION_CONTROL, 0);
+
+ if (cpu_has_vmx_posted_intr())
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
+
+ if (enable_pml)
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+
+ /*
+ * Set the MSR load/store lists to match L0's settings. Only the
+ * addresses are constant (for vmcs02), the counts can change based
+ * on L2's behavior, e.g. switching to/from long mode.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ vmx_set_constant_host_state(vmx);
+}
+
+static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
+ struct vmcs12 *vmcs12)
+{
+ prepare_vmcs02_constant_state(vmx);
+
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
+
+ if (enable_vpid) {
+ if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
+ else
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+ }
+}
+
+static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ u32 exec_control, vmcs12_exec_ctrl;
+ u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
+
+ if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+ prepare_vmcs02_early_full(vmx, vmcs12);
+
+ /*
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+ * entry, but only if the current (host) sp changed from the value
+ * we wrote last (vmx->host_rsp). This cache is no longer relevant
+ * if we switch vmcs, and rather than hold a separate cache per vmcs,
+ * here we just force the write to happen on entry. host_rsp will
+ * also be written unconditionally by nested_vmx_check_vmentry_hw()
+ * if we are doing early consistency checks via hardware.
+ */
+ vmx->host_rsp = 0;
+
+ /*
+ * PIN CONTROLS
+ */
+ exec_control = vmcs12->pin_based_vm_exec_control;
+
+ /* Preemption timer setting is computed directly in vmx_vcpu_run. */
+ exec_control |= vmcs_config.pin_based_exec_ctrl;
+ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ vmx->loaded_vmcs->hv_timer_armed = false;
+
+ /* Posted interrupts setting is only taken from vmcs12. */
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
+ vmx->nested.pi_pending = false;
+ } else {
+ exec_control &= ~PIN_BASED_POSTED_INTR;
+ }
+ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
+
+ /*
+ * EXEC CONTROLS
+ */
+ exec_control = vmx_exec_control(vmx); /* L0's desires */
+ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+ exec_control |= vmcs12->cpu_based_vm_exec_control;
+
+ /*
+ * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
+ * nested_get_vmcs12_pages can't fix it up, the illegal value
+ * will result in a VM entry failure.
+ */
+ if (exec_control & CPU_BASED_TPR_SHADOW) {
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
+ vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ } else {
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING;
+#endif
+ }
+
+ /*
+ * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
+ * for I/O port accesses.
+ */
+ exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
+ exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+
+ /*
+ * SECONDARY EXEC CONTROLS
+ */
+ if (cpu_has_secondary_exec_ctrls()) {
+ exec_control = vmx->secondary_exec_control;
+
+ /* Take the following fields only from vmcs12 */
+ exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_ENABLE_INVPCID |
+ SECONDARY_EXEC_RDTSCP |
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_ENABLE_VMFUNC);
+ if (nested_cpu_has(vmcs12,
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
+ vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
+ ~SECONDARY_EXEC_ENABLE_PML;
+ exec_control |= vmcs12_exec_ctrl;
+ }
+
+ /* VMCS shadowing for L2 is emulated for now */
+ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
+
+ if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
+ vmcs_write16(GUEST_INTR_STATUS,
+ vmcs12->guest_intr_status);
+
+ /*
+ * Write an illegal value to APIC_ACCESS_ADDR. Later,
+ * nested_get_vmcs12_pages will either fix it up or
+ * remove the VM execution control.
+ */
+ if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
+ vmcs_write64(APIC_ACCESS_ADDR, -1ull);
+
+ if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
+ vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+ }
+
+ /*
+ * ENTRY CONTROLS
+ *
+ * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
+ * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
+ * on the related bits (if supported by the CPU) in the hope that
+ * we can avoid VMWrites during vmx_set_efer().
+ */
+ exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
+ ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
+ if (cpu_has_load_ia32_efer()) {
+ if (guest_efer & EFER_LMA)
+ exec_control |= VM_ENTRY_IA32E_MODE;
+ if (guest_efer != host_efer)
+ exec_control |= VM_ENTRY_LOAD_IA32_EFER;
+ }
+ vm_entry_controls_init(vmx, exec_control);
+
+ /*
+ * EXIT CONTROLS
+ *
+ * L2->L1 exit controls are emulated - the hardware exit is to L0 so
+ * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
+ * bits may be modified by vmx_set_efer() in prepare_vmcs02().
+ */
+ exec_control = vmx_vmexit_ctrl();
+ if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
+ exec_control |= VM_EXIT_LOAD_IA32_EFER;
+ vm_exit_controls_init(vmx, exec_control);
+
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit and never change
+ * the PML address (once set), this happens to be equivalent to
+ * simply resetting the index in vmcs02.
+ */
+ if (enable_pml)
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+
+ /*
+ * Interrupt/Exception Fields
+ */
+ if (vmx->nested.nested_run_pending) {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->vm_entry_intr_info_field);
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->vm_entry_exception_error_code);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_entry_instruction_len);
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+ vmcs12->guest_interruptibility_info);
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
+ } else {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
+ }
+}
+
+static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
+ vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
+ vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
+ vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+ vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
+ vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
+ vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
+ vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
+ vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
+ vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
+ vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
+ vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
+ vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
+ vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
+ vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
+ vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
+ vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
+ vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
+ vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+ vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+ vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
+ vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
+ vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+ }
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs12->guest_pending_dbg_exceptions);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+
+ /*
+ * L1 may access the L2's PDPTR, so save them to construct
+ * vmcs12
+ */
+ if (enable_ept) {
+ vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
+ vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
+ vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
+ vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
+ }
+ }
+
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
+
+ /*
+ * Whether page-faults are trapped is determined by a combination of
+ * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
+ * If enable_ept, L0 doesn't care about page faults and we should
+ * set all of these to L1's desires. However, if !enable_ept, L0 does
+ * care about (at least some) page faults, and because it is not easy
+ * (if at all possible?) to merge L0 and L1's desires, we simply ask
+ * to exit on each and every L2 page fault. This is done by setting
+ * MASK=MATCH=0 and (see below) EB.PF=1.
+ * Note that below we don't need special code to set EB.PF beyond the
+ * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
+ * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
+ * !enable_ept, EB.PF is 1, so the "or" will always be 1.
+ */
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
+ enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
+ enable_ept ? vmcs12->page_fault_error_code_match : 0);
+
+ if (cpu_has_vmx_apicv()) {
+ vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
+ vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
+ vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
+ vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
+ }
+
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ set_cr4_guest_host_mask(vmx);
+
+ if (kvm_mpx_supported()) {
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ else
+ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+ }
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ u32 *entry_failure_code)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+
+ if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
+ prepare_vmcs02_full(vmx, vmcs12);
+ vmx->nested.dirty_vmcs12 = false;
+ }
+
+ /*
+ * First, the fields that are shadowed. This must be kept in sync
+ * with vmcs_shadow_fields.h.
+ */
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+ vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+ vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
+ }
+
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
+ kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+ } else {
+ kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+ }
+ vmx_set_rflags(vcpu, vmcs12->guest_rflags);
+
+ vmx->nested.preemption_timer_expired = false;
+ if (nested_cpu_has_preemption_timer(vmcs12))
+ vmx_start_preemption_timer(vcpu);
+
+ /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
+ * bitwise-or of what L1 wants to trap for L2, and what we want to
+ * trap. Note that CR0.TS also needs updating - we do this later.
+ */
+ update_exception_bitmap(vcpu);
+ vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
+ vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
+ vcpu->arch.pat = vmcs12->guest_ia32_pat;
+ } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+ }
+
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
+
+ if (enable_vpid) {
+ /*
+ * There is no direct mapping between vpid02 and vpid12, the
+ * vpid02 is per-vCPU for L0 and reused while the value of
+ * vpid12 is changed w/ one invvpid during nested vmentry.
+ * The vpid12 is allocated by L1 for L2, so it will not
+ * influence global bitmap(for vpid01 and vpid02 allocation)
+ * even if spawn a lot of nested vCPUs.
+ */
+ if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
+ if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+ vmx->nested.last_vpid = vmcs12->virtual_processor_id;
+ __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
+ }
+ } else {
+ /*
+ * If L1 use EPT, then L0 needs to execute INVEPT on
+ * EPTP02 instead of EPTP01. Therefore, delay TLB
+ * flush until vmcs02->eptp is fully updated by
+ * KVM_REQ_LOAD_CR3. Note that this assumes
+ * KVM_REQ_TLB_FLUSH is evaluated after
+ * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
+ */
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
+ }
+
+ if (nested_cpu_has_ept(vmcs12))
+ nested_ept_init_mmu_context(vcpu);
+ else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ vmx_flush_tlb(vcpu, true);
+
+ /*
+ * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
+ * bits which we consider mandatory enabled.
+ * The CR0_READ_SHADOW is what L2 should have expected to read given
+ * the specifications by L1; It's not enough to take
+ * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
+ * have more bits than L1 expected.
+ */
+ vmx_set_cr0(vcpu, vmcs12->guest_cr0);
+ vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
+
+ vmx_set_cr4(vcpu, vmcs12->guest_cr4);
+ vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
+
+ vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
+ /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ /*
+ * Guest state is invalid and unrestricted guest is disabled,
+ * which means L1 attempted VMEntry to L2 with invalid state.
+ * Fail the VMEntry.
+ */
+ if (vmx->emulation_required) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
+ return 1;
+ }
+
+ /* Shadow page tables on either EPT or shadow page tables. */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
+ entry_failure_code))
+ return 1;
+
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
+ return 0;
+}
+
+static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
+{
+ if (!nested_cpu_has_nmi_exiting(vmcs12) &&
+ nested_cpu_has_virtual_nmis(vmcs12))
+ return -EINVAL;
+
+ if (!nested_cpu_has_virtual_nmis(vmcs12) &&
+ nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
+
+ /* Check for memory type validity */
+ switch (address & VMX_EPTP_MT_MASK) {
+ case VMX_EPTP_MT_UC:
+ if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
+ return false;
+ break;
+ case VMX_EPTP_MT_WB:
+ if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ /* only 4 levels page-walk length are valid */
+ if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
+ return false;
+
+ /* Reserved bits should not be set */
+ if (address >> maxphyaddr || ((address >> 7) & 0x1f))
+ return false;
+
+ /* AD, if set, should be supported */
+ if (address & VMX_EPTP_AD_ENABLE_BIT) {
+ if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Checks related to VM-Execution Control Fields
+ */
+static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
+ vmx->nested.msrs.pinbased_ctls_low,
+ vmx->nested.msrs.pinbased_ctls_high) ||
+ !vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
+ vmx->nested.msrs.procbased_ctls_low,
+ vmx->nested.msrs.procbased_ctls_high))
+ return -EINVAL;
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
+ !vmx_control_verify(vmcs12->secondary_vm_exec_control,
+ vmx->nested.msrs.secondary_ctls_low,
+ vmx->nested.msrs.secondary_ctls_high))
+ return -EINVAL;
+
+ if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
+ nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
+ nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
+ nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
+ nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
+ nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
+ nested_vmx_check_nmi_controls(vmcs12) ||
+ nested_vmx_check_pml_controls(vcpu, vmcs12) ||
+ nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
+ nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
+ nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
+ (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
+ return -EINVAL;
+
+ if (nested_cpu_has_ept(vmcs12) &&
+ !valid_ept_address(vcpu, vmcs12->ept_pointer))
+ return -EINVAL;
+
+ if (nested_cpu_has_vmfunc(vmcs12)) {
+ if (vmcs12->vm_function_control &
+ ~vmx->nested.msrs.vmfunc_controls)
+ return -EINVAL;
+
+ if (nested_cpu_has_eptp_switching(vmcs12)) {
+ if (!nested_cpu_has_ept(vmcs12) ||
+ !page_address_valid(vcpu, vmcs12->eptp_list_address))
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Checks related to VM-Exit Control Fields
+ */
+static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx_control_verify(vmcs12->vm_exit_controls,
+ vmx->nested.msrs.exit_ctls_low,
+ vmx->nested.msrs.exit_ctls_high) ||
+ nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Checks related to VM-Entry Control Fields
+ */
+static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx_control_verify(vmcs12->vm_entry_controls,
+ vmx->nested.msrs.entry_ctls_low,
+ vmx->nested.msrs.entry_ctls_high))
+ return -EINVAL;
+
+ /*
+ * From the Intel SDM, volume 3:
+ * Fields relevant to VM-entry event injection must be set properly.
+ * These fields are the VM-entry interruption-information field, the
+ * VM-entry exception error code, and the VM-entry instruction length.
+ */
+ if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+ u32 intr_info = vmcs12->vm_entry_intr_info_field;
+ u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+ u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+ bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+ bool should_have_error_code;
+ bool urg = nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_UNRESTRICTED_GUEST);
+ bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+ /* VM-entry interruption-info field: interruption type */
+ if (intr_type == INTR_TYPE_RESERVED ||
+ (intr_type == INTR_TYPE_OTHER_EVENT &&
+ !nested_cpu_supports_monitor_trap_flag(vcpu)))
+ return -EINVAL;
+
+ /* VM-entry interruption-info field: vector */
+ if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+ (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+ (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+ return -EINVAL;
+
+ /* VM-entry interruption-info field: deliver error code */
+ should_have_error_code =
+ intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+ x86_exception_has_error_code(vector);
+ if (has_error_code != should_have_error_code)
+ return -EINVAL;
+
+ /* VM-entry exception error code */
+ if (has_error_code &&
+ vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+ return -EINVAL;
+
+ /* VM-entry interruption-info field: reserved bits */
+ if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+ return -EINVAL;
+
+ /* VM-entry instruction length */
+ switch (intr_type) {
+ case INTR_TYPE_SOFT_EXCEPTION:
+ case INTR_TYPE_SOFT_INTR:
+ case INTR_TYPE_PRIV_SW_EXCEPTION:
+ if ((vmcs12->vm_entry_instruction_len > 15) ||
+ (vmcs12->vm_entry_instruction_len == 0 &&
+ !nested_cpu_has_zero_length_injection(vcpu)))
+ return -EINVAL;
+ }
+ }
+
+ if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Checks related to Host Control Registers and MSRs
+ */
+static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ bool ia32e;
+
+ if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
+ !nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
+ !nested_cr3_valid(vcpu, vmcs12->host_cr3))
+ return -EINVAL;
+ /*
+ * If the load IA32_EFER VM-exit control is 1, bits reserved in the
+ * IA32_EFER MSR must be 0 in the field for that register. In addition,
+ * the values of the LMA and LME bits in the field must each be that of
+ * the host address-space size VM-exit control.
+ */
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
+ ia32e = (vmcs12->vm_exit_controls &
+ VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
+ if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Checks related to Guest Non-register State
+ */
+static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
+{
+ if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
+ vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
+ nested_check_vm_exit_controls(vcpu, vmcs12) ||
+ nested_check_vm_entry_controls(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ if (nested_check_host_control_regs(vcpu, vmcs12))
+ return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+
+ if (nested_check_guest_non_reg_state(vmcs12))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ return 0;
+}
+
+static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ int r;
+ struct page *page;
+ struct vmcs12 *shadow;
+
+ if (vmcs12->vmcs_link_pointer == -1ull)
+ return 0;
+
+ if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
+ return -EINVAL;
+
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
+ if (is_error_page(page))
+ return -EINVAL;
+
+ r = 0;
+ shadow = kmap(page);
+ if (shadow->hdr.revision_id != VMCS12_REVISION ||
+ shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
+ r = -EINVAL;
+ kunmap(page);
+ kvm_release_page_clean(page);
+ return r;
+}
+
+static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12,
+ u32 *exit_qual)
+{
+ bool ia32e;
+
+ *exit_qual = ENTRY_FAIL_DEFAULT;
+
+ if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
+ !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
+ return 1;
+
+ if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
+ *exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
+ return 1;
+ }
+
+ /*
+ * If the load IA32_EFER VM-entry control is 1, the following checks
+ * are performed on the field for the IA32_EFER MSR:
+ * - Bits reserved in the IA32_EFER MSR must be 0.
+ * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
+ * the IA-32e mode guest VM-exit control. It must also be identical
+ * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
+ * CR0.PG) is 1.
+ */
+ if (to_vmx(vcpu)->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
+ ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
+ if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
+ ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
+ ((vmcs12->guest_cr0 & X86_CR0_PG) &&
+ ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
+ return 1;
+ }
+
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+ (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+ (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+ return 1;
+
+ return 0;
+}
+
+static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long cr3, cr4;
+
+ if (!nested_early_check)
+ return 0;
+
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+
+ preempt_disable();
+
+ vmx_prepare_switch_to_guest(vcpu);
+
+ /*
+ * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
+ * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
+ * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
+ * there is no need to preserve other bits or save/restore the field.
+ */
+ vmcs_writel(GUEST_RFLAGS, 0);
+
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
+ cr4 = cr4_read_shadow();
+ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+ vmcs_writel(HOST_CR4, cr4);
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
+ }
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+
+ asm(
+ /* Set HOST_RSP */
+ "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
+ __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%1)\n\t"
+ "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
+
+ /* Check if vmlaunch or vmresume is needed */
+ "cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
+
+ "call vmx_vmenter\n\t"
+
+ /* Set vmx->fail accordingly */
+ "setbe %c[fail](%% " _ASM_CX")\n\t"
+ : ASM_CALL_CONSTRAINT
+ : "c"(vmx), "d"((unsigned long)HOST_RSP),
+ [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
+ [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+ [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
+ [wordsize]"i"(sizeof(ulong))
+ : "rax", "cc", "memory"
+ );
+
+ preempt_enable();
+
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ if (vmx->fail) {
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ vmx->fail = 0;
+ return 1;
+ }
+
+ /*
+ * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
+ */
+ local_irq_enable();
+ if (hw_breakpoint_active())
+ set_debugreg(__this_cpu_read(cpu_dr7), 7);
+
+ /*
+ * A non-failing VMEntry means we somehow entered guest mode with
+ * an illegal RIP, and that's just the tip of the iceberg. There
+ * is no telling what memory has been modified or what state has
+ * been exposed to unknown code. Hitting this all but guarantees
+ * a (very critical) hardware issue.
+ */
+ WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
+ VMX_EXIT_REASONS_FAILED_VMENTRY));
+
+ return 0;
+}
+STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
+
+
+static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12);
+
+static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct page *page;
+ u64 hpa;
+
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ /*
+ * Translate L1 physical address to host physical
+ * address for vmcs02. Keep the page pinned, so this
+ * physical address remains valid. We keep a reference
+ * to it so we can release it later.
+ */
+ if (vmx->nested.apic_access_page) { /* shouldn't happen */
+ kvm_release_page_dirty(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = NULL;
+ }
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
+ /*
+ * If translation failed, no matter: This feature asks
+ * to exit when accessing the given address, and if it
+ * can never be accessed, this feature won't do
+ * anything anyway.
+ */
+ if (!is_error_page(page)) {
+ vmx->nested.apic_access_page = page;
+ hpa = page_to_phys(vmx->nested.apic_access_page);
+ vmcs_write64(APIC_ACCESS_ADDR, hpa);
+ } else {
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+ }
+ }
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+ if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
+ kvm_release_page_dirty(vmx->nested.virtual_apic_page);
+ vmx->nested.virtual_apic_page = NULL;
+ }
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
+
+ /*
+ * If translation failed, VM entry will fail because
+ * prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
+ * Failing the vm entry is _not_ what the processor
+ * does but it's basically the only possibility we
+ * have. We could still enter the guest if CR8 load
+ * exits are enabled, CR8 store exits are enabled, and
+ * virtualize APIC access is disabled; in this case
+ * the processor would never use the TPR shadow and we
+ * could simply clear the bit from the execution
+ * control. But such a configuration is useless, so
+ * let's keep the code simple.
+ */
+ if (!is_error_page(page)) {
+ vmx->nested.virtual_apic_page = page;
+ hpa = page_to_phys(vmx->nested.virtual_apic_page);
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
+ }
+ }
+
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ if (vmx->nested.pi_desc_page) { /* shouldn't happen */
+ kunmap(vmx->nested.pi_desc_page);
+ kvm_release_page_dirty(vmx->nested.pi_desc_page);
+ vmx->nested.pi_desc_page = NULL;
+ vmx->nested.pi_desc = NULL;
+ vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
+ }
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
+ if (is_error_page(page))
+ return;
+ vmx->nested.pi_desc_page = page;
+ vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
+ vmx->nested.pi_desc =
+ (struct pi_desc *)((void *)vmx->nested.pi_desc +
+ (unsigned long)(vmcs12->posted_intr_desc_addr &
+ (PAGE_SIZE - 1)));
+ vmcs_write64(POSTED_INTR_DESC_ADDR,
+ page_to_phys(vmx->nested.pi_desc_page) +
+ (unsigned long)(vmcs12->posted_intr_desc_addr &
+ (PAGE_SIZE - 1)));
+ }
+ if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
+ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_USE_MSR_BITMAPS);
+ else
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_USE_MSR_BITMAPS);
+}
+
+/*
+ * Intel's VMX Instruction Reference specifies a common set of prerequisites
+ * for running VMX instructions (except VMXON, whose prerequisites are
+ * slightly different). It also specifies what exception to inject otherwise.
+ * Note that many of these exceptions have priority over VM exits, so they
+ * don't have to be checked again here.
+ */
+static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
+{
+ if (!to_vmx(vcpu)->nested.vmxon) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 0;
+ }
+
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 0;
+ }
+
+ return 1;
+}
+
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+ u8 rvi = vmx_get_rvi();
+ u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
+static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12);
+
+/*
+ * If from_vmentry is false, this is being called from state restore (either RSM
+ * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
++ *
++ * Returns:
++ * 0 - success, i.e. proceed with actual VMEnter
++ * 1 - consistency check VMExit
++ * -1 - consistency check VMFail
+ */
+int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ bool evaluate_pending_interrupts;
+ u32 exit_reason = EXIT_REASON_INVALID_STATE;
+ u32 exit_qual;
+
+ evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+ (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+ if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+ evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
+
+ if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
+ vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ if (kvm_mpx_supported() &&
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+
+ vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
+
+ prepare_vmcs02_early(vmx, vmcs12);
+
+ if (from_vmentry) {
+ nested_get_vmcs12_pages(vcpu);
+
+ if (nested_vmx_check_vmentry_hw(vcpu)) {
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ return -1;
+ }
+
+ if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ goto vmentry_fail_vmexit;
+ }
+
+ enter_guest_mode(vcpu);
+ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+ vcpu->arch.tsc_offset += vmcs12->tsc_offset;
+
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+ goto vmentry_fail_vmexit_guest_mode;
+
+ if (from_vmentry) {
+ exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+ exit_qual = nested_vmx_load_msr(vcpu,
+ vmcs12->vm_entry_msr_load_addr,
+ vmcs12->vm_entry_msr_load_count);
+ if (exit_qual)
+ goto vmentry_fail_vmexit_guest_mode;
+ } else {
+ /*
+ * The MMU is not initialized to point at the right entities yet and
+ * "get pages" would need to read data from the guest (i.e. we will
+ * need to perform gpa to hpa translation). Request a call
+ * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
+ * have already been set at vmentry time and should not be reset.
+ */
+ kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+ }
+
+ /*
+ * If L1 had a pending IRQ/NMI until it executed
+ * VMLAUNCH/VMRESUME which wasn't delivered because it was
+ * disallowed (e.g. interrupts disabled), L0 needs to
+ * evaluate if this pending event should cause an exit from L2
+ * to L1 or delivered directly to L2 (e.g. In case L1 don't
+ * intercept EXTERNAL_INTERRUPT).
+ *
+ * Usually this would be handled by the processor noticing an
+ * IRQ/NMI window request, or checking RVI during evaluation of
+ * pending virtual interrupts. However, this setting was done
+ * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+ * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+ */
+ if (unlikely(evaluate_pending_interrupts))
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ /*
+ * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
+ * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
+ * returned as far as L1 is concerned. It will only return (and set
+ * the success flag) when L2 exits (see nested_vmx_vmexit()).
+ */
+ return 0;
+
+ /*
+ * A failed consistency check that leads to a VMExit during L1's
+ * VMEnter to L2 is a variation of a normal VMexit, as explained in
+ * 26.7 "VM-entry failures during or after loading guest state".
+ */
+vmentry_fail_vmexit_guest_mode:
+ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+ vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+ leave_guest_mode(vcpu);
+
+vmentry_fail_vmexit:
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+
+ if (!from_vmentry)
+ return 1;
+
+ load_vmcs12_host_state(vcpu, vmcs12);
+ vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+ vmcs12->exit_qualification = exit_qual;
+ if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ vmx->nested.need_vmcs12_sync = true;
+ return 1;
+}
+
+/*
+ * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
+ * for running an L2 nested guest.
+ */
+static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
+ int ret;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
+ return 1;
+
+ if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
+
+ vmcs12 = get_vmcs12(vcpu);
+
+ /*
+ * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
+ * that there *is* a valid VMCS pointer, RFLAGS.CF is set
+ * rather than RFLAGS.ZF, and no error number is stored to the
+ * VM-instruction error field.
+ */
+ if (vmcs12->hdr.shadow_vmcs)
+ return nested_vmx_failInvalid(vcpu);
+
+ if (vmx->nested.hv_evmcs) {
+ copy_enlightened_to_vmcs12(vmx);
+ /* Enlightened VMCS doesn't have launch state */
+ vmcs12->launch_state = !launch;
+ } else if (enable_shadow_vmcs) {
+ copy_shadow_to_vmcs12(vmx);
+ }
+
+ /*
+ * The nested entry process starts with enforcing various prerequisites
+ * on vmcs12 as required by the Intel SDM, and act appropriately when
+ * they fail: As the SDM explains, some conditions should cause the
+ * instruction to fail, while others will cause the instruction to seem
+ * to succeed, but return an EXIT_REASON_INVALID_STATE.
+ * To speed up the normal (success) code path, we should avoid checking
+ * for misconfigurations which will anyway be caught by the processor
+ * when using the merged vmcs02.
+ */
+ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
+
+ if (vmcs12->launch_state == launch)
+ return nested_vmx_failValid(vcpu,
+ launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
+ : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
+
+ ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
+ if (ret)
+ return nested_vmx_failValid(vcpu, ret);
+
+ /*
+ * We're finally done with prerequisite checking, and can start with
+ * the nested entry.
+ */
+ vmx->nested.nested_run_pending = 1;
+ ret = nested_vmx_enter_non_root_mode(vcpu, true);
+ vmx->nested.nested_run_pending = !ret;
+ if (ret > 0)
+ return 1;
+ else if (ret)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+ /* Hide L1D cache contents from the nested guest. */
+ vmx->vcpu.arch.l1tf_flush_l1d = true;
+
+ /*
+ * Must happen outside of nested_vmx_enter_non_root_mode() as it will
+ * also be used as part of restoring nVMX state for
+ * snapshot restore (migration).
+ *
+ * In this flow, it is assumed that vmcs12 cache was
+ * trasferred as part of captured nVMX state and should
+ * therefore not be read from guest memory (which may not
+ * exist on destination host yet).
+ */
+ nested_cache_shadow_vmcs12(vcpu, vmcs12);
+
+ /*
+ * If we're entering a halted L2 vcpu and the L2 vcpu won't be
+ * awakened by event injection or by an NMI-window VM-exit or
+ * by an interrupt-window VM-exit, halt the vcpu.
+ */
+ if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
+ !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
+ !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
+ !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
+ (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
+ vmx->nested.nested_run_pending = 0;
+ return kvm_vcpu_halt(vcpu);
+ }
+ return 1;
+}
+
+/*
+ * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
+ * because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
+ * This function returns the new value we should put in vmcs12.guest_cr0.
+ * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
+ * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
+ * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
+ * didn't trap the bit, because if L1 did, so would L0).
+ * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
+ * been modified by L2, and L1 knows it. So just leave the old value of
+ * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
+ * isn't relevant, because if L0 traps this bit it can set it to anything.
+ * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
+ * changed these bits, and therefore they need to be updated, but L0
+ * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
+ * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
+ */
+static inline unsigned long
+vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ return
+ /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
+ /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
+ /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
+ vcpu->arch.cr0_guest_owned_bits));
+}
+
+static inline unsigned long
+vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ return
+ /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
+ /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
+ /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
+ vcpu->arch.cr4_guest_owned_bits));
+}
+
+static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ u32 idt_vectoring;
+ unsigned int nr;
+
+ if (vcpu->arch.exception.injected) {
+ nr = vcpu->arch.exception.nr;
+ idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
+
+ if (kvm_exception_is_soft(nr)) {
+ vmcs12->vm_exit_instruction_len =
+ vcpu->arch.event_exit_inst_len;
+ idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
+ } else
+ idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
+
+ if (vcpu->arch.exception.has_error_code) {
+ idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
+ vmcs12->idt_vectoring_error_code =
+ vcpu->arch.exception.error_code;
+ }
+
+ vmcs12->idt_vectoring_info_field = idt_vectoring;
+ } else if (vcpu->arch.nmi_injected) {
+ vmcs12->idt_vectoring_info_field =
+ INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
+ } else if (vcpu->arch.interrupt.injected) {
+ nr = vcpu->arch.interrupt.nr;
+ idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
+
+ if (vcpu->arch.interrupt.soft) {
+ idt_vectoring |= INTR_TYPE_SOFT_INTR;
+ vmcs12->vm_entry_instruction_len =
+ vcpu->arch.event_exit_inst_len;
+ } else
+ idt_vectoring |= INTR_TYPE_EXT_INTR;
+
+ vmcs12->idt_vectoring_info_field = idt_vectoring;
+ }
+}
+
+
+static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ gfn_t gfn;
+
+ /*
+ * Don't need to mark the APIC access page dirty; it is never
+ * written to by the CPU during APIC virtualization.
+ */
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+ gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+}
+
+static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int max_irr;
+ void *vapic_page;
+ u16 status;
+
+ if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
+ return;
+
+ vmx->nested.pi_pending = false;
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return;
+
+ max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (max_irr != 256) {
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
+ vapic_page, &max_irr);
+ kunmap(vmx->nested.virtual_apic_page);
+
+ status = vmcs_read16(GUEST_INTR_STATUS);
+ if ((u8)max_irr > ((u8)status & 0xff)) {
+ status &= ~0xff;
+ status |= (u8)max_irr;
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+ }
+
+ nested_mark_vmcs12_pages_dirty(vcpu);
+}
+
+static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
+ unsigned long exit_qual)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned int nr = vcpu->arch.exception.nr;
+ u32 intr_info = nr | INTR_INFO_VALID_MASK;
+
+ if (vcpu->arch.exception.has_error_code) {
+ vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
+ intr_info |= INTR_INFO_DELIVER_CODE_MASK;
+ }
+
+ if (kvm_exception_is_soft(nr))
+ intr_info |= INTR_TYPE_SOFT_EXCEPTION;
+ else
+ intr_info |= INTR_TYPE_HARD_EXCEPTION;
+
+ if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
+ vmx_get_nmi_mask(vcpu))
+ intr_info |= INTR_INFO_UNBLOCK_NMI;
+
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
+}
+
+static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long exit_qual;
+ bool block_nested_events =
+ vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
+
+ if (vcpu->arch.exception.pending &&
+ nested_vmx_check_exception(vcpu, &exit_qual)) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+ return 0;
+ }
+
+ if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
+ vmx->nested.preemption_timer_expired) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
+ return 0;
+ }
+
+ if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ NMI_VECTOR | INTR_TYPE_NMI_INTR |
+ INTR_INFO_VALID_MASK, 0);
+ /*
+ * The NMI-triggered VM exit counts as injection:
+ * clear this one and block further NMIs.
+ */
+ vcpu->arch.nmi_pending = 0;
+ vmx_set_nmi_mask(vcpu, true);
+ return 0;
+ }
+
+ if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
+ nested_exit_on_intr(vcpu)) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
+ return 0;
+ }
+
+ vmx_complete_nested_posted_interrupt(vcpu);
+ return 0;
+}
+
+static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
+{
+ ktime_t remaining =
+ hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
+ u64 value;
+
+ if (ktime_to_ns(remaining) <= 0)
+ return 0;
+
+ value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
+ do_div(value, 1000000);
+ return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
+}
+
+/*
+ * Update the guest state fields of vmcs12 to reflect changes that
+ * occurred while L2 was running. (The "IA-32e mode guest" bit of the
+ * VM-entry controls is also updated, since this is really a guest
+ * state bit.)
+ */
+static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
+ vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
+
+ vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
+ vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
+ vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
+
+ vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
+ vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
+ vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
+ vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
+ vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
+ vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
+ vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
+ vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
+ vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
+ vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
+ vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
+ vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
+ vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
+ vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
+ vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
+ vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
+ vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
+ vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
+ vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
+ vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
+ vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
+ vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
+ vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
+ vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
+ vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
+ vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
+ vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
+ vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
+ vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
+ vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
+ vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
+ vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
+ vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
+ vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
+ vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
+ vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
+
+ vmcs12->guest_interruptibility_info =
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+ vmcs12->guest_pending_dbg_exceptions =
+ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
+ else
+ vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
+
+ if (nested_cpu_has_preemption_timer(vmcs12)) {
+ if (vmcs12->vm_exit_controls &
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+ vmcs12->vmx_preemption_timer_value =
+ vmx_get_preemption_timer_value(vcpu);
+ hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
+ }
+
+ /*
+ * In some cases (usually, nested EPT), L2 is allowed to change its
+ * own CR3 without exiting. If it has changed it, we must keep it.
+ * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
+ * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
+ *
+ * Additionally, restore L2's PDPTR to vmcs12.
+ */
+ if (enable_ept) {
+ vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
+ vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
+ vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
+ vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
+ vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
+ }
+
+ vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
+
+ if (nested_cpu_has_vid(vmcs12))
+ vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
+
+ vmcs12->vm_entry_controls =
+ (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
+ (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
+ kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
+ vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ }
+
+ /* TODO: These cannot have changed unless we have MSR bitmaps and
+ * the relevant bit asks not to trap the change */
+ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
+ vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
+ if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
+ vmcs12->guest_ia32_efer = vcpu->arch.efer;
+ vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
+ vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
+ vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
+ if (kvm_mpx_supported())
+ vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+}
+
+/*
+ * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
+ * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
+ * and this function updates it to reflect the changes to the guest state while
+ * L2 was running (and perhaps made some exits which were handled directly by L0
+ * without going back to L1), and to reflect the exit reason.
+ * Note that we do not have to copy here all VMCS fields, just those that
+ * could have changed by the L2 guest or the exit - i.e., the guest-state and
+ * exit-information fields only. Other fields are modified by L1 with VMWRITE,
+ * which already writes to vmcs12 directly.
+ */
+static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ u32 exit_reason, u32 exit_intr_info,
+ unsigned long exit_qualification)
+{
+ /* update guest state fields: */
+ sync_vmcs12(vcpu, vmcs12);
+
+ /* update exit information fields: */
+
+ vmcs12->vm_exit_reason = exit_reason;
+ vmcs12->exit_qualification = exit_qualification;
+ vmcs12->vm_exit_intr_info = exit_intr_info;
+
+ vmcs12->idt_vectoring_info_field = 0;
+ vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+
+ if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
+ vmcs12->launch_state = 1;
+
+ /* vm_entry_intr_info_field is cleared on exit. Emulate this
+ * instead of reading the real value. */
+ vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
+
+ /*
+ * Transfer the event that L0 or L1 may wanted to inject into
+ * L2 to IDT_VECTORING_INFO_FIELD.
+ */
+ vmcs12_save_pending_event(vcpu, vmcs12);
+
+ /*
+ * According to spec, there's no need to store the guest's
+ * MSRs if the exit is due to a VM-entry failure that occurs
+ * during or after loading the guest state. Since this exit
+ * does not fall in that category, we need to save the MSRs.
+ */
+ if (nested_vmx_store_msr(vcpu,
+ vmcs12->vm_exit_msr_store_addr,
+ vmcs12->vm_exit_msr_store_count))
+ nested_vmx_abort(vcpu,
+ VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ }
+
+ /*
+ * Drop what we picked up for L2 via vmx_complete_interrupts. It is
+ * preserved above and would only end up incorrectly in L1.
+ */
+ vcpu->arch.nmi_injected = false;
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
+}
+
+/*
+ * A part of what we need to when the nested L2 guest exits and we want to
+ * run its L1 parent, is to reset L1's guest state to the host state specified
+ * in vmcs12.
+ * This function is to be called not only on normal nested exit, but also on
+ * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
+ * Failures During or After Loading Guest State").
+ * This function should be called when the active VMCS is L1's (vmcs01).
+ */
+static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ struct kvm_segment seg;
+ u32 entry_failure_code;
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
+ vcpu->arch.efer = vmcs12->host_ia32_efer;
+ else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+ vcpu->arch.efer |= (EFER_LMA | EFER_LME);
+ else
+ vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
+ vmx_set_efer(vcpu, vcpu->arch.efer);
+
+ kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
+ kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
+ vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ vmx_set_interrupt_shadow(vcpu, 0);
+
+ /*
+ * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
+ * actually changed, because vmx_set_cr0 refers to efer set above.
+ *
+ * CR0_GUEST_HOST_MASK is already set in the original vmcs01
+ * (KVM doesn't change it);
+ */
+ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmx_set_cr0(vcpu, vmcs12->host_cr0);
+
+ /* Same as above - no reason to call set_cr4_guest_host_mask(). */
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ vmx_set_cr4(vcpu, vmcs12->host_cr4);
+
+ nested_ept_uninit_mmu_context(vcpu);
+
+ /*
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
+ * couldn't have changed.
+ */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+
+ /*
+ * If vmcs01 doesn't use VPID, CPU flushes TLB on every
+ * VMEntry/VMExit. Thus, no need to flush TLB.
+ *
+ * If vmcs12 doesn't use VPID, L1 expects TLB to be
+ * flushed on every VMEntry/VMExit.
+ *
+ * Otherwise, we can preserve TLB entries as long as we are
+ * able to tag L1 TLB entries differently than L2 TLB entries.
+ *
+ * If vmcs12 uses EPT, we need to execute this flush on EPTP01
+ * and therefore we request the TLB flush to happen only after VMCS EPTP
+ * has been set by KVM_REQ_LOAD_CR3.
+ */
+ if (enable_vpid &&
+ (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
+
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
+ vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
+ vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
+
+ /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
+ if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
+ vmcs_write64(GUEST_BNDCFGS, 0);
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
+ vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
+ vcpu->arch.pat = vmcs12->host_ia32_pat;
+ }
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+ vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
+ vmcs12->host_ia32_perf_global_ctrl);
+
+ /* Set L1 segment info according to Intel SDM
+ 27.5.2 Loading Host Segment and Descriptor-Table Registers */
+ seg = (struct kvm_segment) {
+ .base = 0,
+ .limit = 0xFFFFFFFF,
+ .selector = vmcs12->host_cs_selector,
+ .type = 11,
+ .present = 1,
+ .s = 1,
+ .g = 1
+ };
+ if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+ seg.l = 1;
+ else
+ seg.db = 1;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
+ seg = (struct kvm_segment) {
+ .base = 0,
+ .limit = 0xFFFFFFFF,
+ .type = 3,
+ .present = 1,
+ .s = 1,
+ .db = 1,
+ .g = 1
+ };
+ seg.selector = vmcs12->host_ds_selector;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
+ seg.selector = vmcs12->host_es_selector;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
+ seg.selector = vmcs12->host_ss_selector;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
+ seg.selector = vmcs12->host_fs_selector;
+ seg.base = vmcs12->host_fs_base;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
+ seg.selector = vmcs12->host_gs_selector;
+ seg.base = vmcs12->host_gs_base;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
+ seg = (struct kvm_segment) {
+ .base = vmcs12->host_tr_base,
+ .limit = 0x67,
+ .selector = vmcs12->host_tr_selector,
+ .type = 11,
+ .present = 1
+ };
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
+
+ kvm_set_dr(vcpu, 7, 0x400);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_update_msr_bitmap(vcpu);
+
+ if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
+ vmcs12->vm_exit_msr_load_count))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+}
+
+static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
+{
+ struct shared_msr_entry *efer_msr;
+ unsigned int i;
+
+ if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
+ return vmcs_read64(GUEST_IA32_EFER);
+
+ if (cpu_has_load_ia32_efer())
+ return host_efer;
+
+ for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
+ if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
+ return vmx->msr_autoload.guest.val[i].value;
+ }
+
+ efer_msr = find_msr_entry(vmx, MSR_EFER);
+ if (efer_msr)
+ return efer_msr->data;
+
+ return host_efer;
+}
+
+static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmx_msr_entry g, h;
+ struct msr_data msr;
+ gpa_t gpa;
+ u32 i, j;
+
+ vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ /*
+ * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
+ * as vmcs01.GUEST_DR7 contains a userspace defined value
+ * and vcpu->arch.dr7 is not squirreled away before the
+ * nested VMENTER (not worth adding a variable in nested_vmx).
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+ kvm_set_dr(vcpu, 7, DR7_FIXED_1);
+ else
+ WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
+ }
+
+ /*
+ * Note that calling vmx_set_{efer,cr0,cr4} is important as they
+ * handle a variety of side effects to KVM's software model.
+ */
+ vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
+
+ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
+
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
+
+ nested_ept_uninit_mmu_context(vcpu);
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ /*
+ * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
+ * from vmcs01 (if necessary). The PDPTRs are not loaded on
+ * VMFail, like everything else we just need to ensure our
+ * software model is up-to-date.
+ */
+ ept_save_pdptrs(vcpu);
+
+ kvm_mmu_reset_context(vcpu);
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_update_msr_bitmap(vcpu);
+
+ /*
+ * This nasty bit of open coding is a compromise between blindly
+ * loading L1's MSRs using the exit load lists (incorrect emulation
+ * of VMFail), leaving the nested VM's MSRs in the software model
+ * (incorrect behavior) and snapshotting the modified MSRs (too
+ * expensive since the lists are unbound by hardware). For each
+ * MSR that was (prematurely) loaded from the nested VMEntry load
+ * list, reload it from the exit load list if it exists and differs
+ * from the guest value. The intent is to stuff host state as
+ * silently as possible, not to fully process the exit load list.
+ */
+ msr.host_initiated = false;
+ for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
+ gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
+ if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
+ pr_debug_ratelimited(
+ "%s read MSR index failed (%u, 0x%08llx)\n",
+ __func__, i, gpa);
+ goto vmabort;
+ }
+
+ for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
+ gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
+ if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
+ pr_debug_ratelimited(
+ "%s read MSR failed (%u, 0x%08llx)\n",
+ __func__, j, gpa);
+ goto vmabort;
+ }
+ if (h.index != g.index)
+ continue;
+ if (h.value == g.value)
+ break;
+
+ if (nested_vmx_load_msr_check(vcpu, &h)) {
+ pr_debug_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, j, h.index, h.reserved);
+ goto vmabort;
+ }
+
+ msr.index = h.index;
+ msr.data = h.value;
+ if (kvm_set_msr(vcpu, &msr)) {
+ pr_debug_ratelimited(
+ "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
+ __func__, j, h.index, h.value);
+ goto vmabort;
+ }
+ }
+ }
+
+ return;
+
+vmabort:
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+}
+
+/*
+ * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
+ * and modify vmcs12 to make it see what it would expect to see there if
+ * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
+ */
+void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ u32 exit_intr_info, unsigned long exit_qualification)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ /* trying to cancel vmlaunch/vmresume is a bug */
+ WARN_ON_ONCE(vmx->nested.nested_run_pending);
+
+ leave_guest_mode(vcpu);
+
+ if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+ vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+
+ if (likely(!vmx->fail)) {
+ if (exit_reason == -1)
+ sync_vmcs12(vcpu, vmcs12);
+ else
+ prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
+ exit_qualification);
+
+ /*
+ * Must happen outside of sync_vmcs12() as it will
+ * also be used to capture vmcs12 cache as part of
+ * capturing nVMX state for snapshot (migration).
+ *
+ * Otherwise, this flush will dirty guest memory at a
+ * point it is already assumed by user-space to be
+ * immutable.
+ */
+ nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
+ } else {
+ /*
+ * The only expected VM-instruction error is "VM entry with
+ * invalid control field(s)." Anything else indicates a
+ * problem with L0. And we should never get here with a
+ * VMFail of any type if early consistency checks are enabled.
+ */
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ WARN_ON_ONCE(nested_early_check);
+ }
+
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+
+ /* Update any VMCS fields that might have changed while L2 ran */
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+
+ if (kvm_has_tsc_control)
+ decache_tsc_multiplier(vmx);
+
+ if (vmx->nested.change_vmcs01_virtual_apic_mode) {
+ vmx->nested.change_vmcs01_virtual_apic_mode = false;
+ vmx_set_virtual_apic_mode(vcpu);
+ } else if (!nested_cpu_has_ept(vmcs12) &&
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ vmx_flush_tlb(vcpu, true);
+ }
+
+ /* This is needed for same reason as it was needed in prepare_vmcs02 */
+ vmx->host_rsp = 0;
+
+ /* Unpin physical memory we referred to in vmcs02 */
+ if (vmx->nested.apic_access_page) {
+ kvm_release_page_dirty(vmx->nested.apic_access_page);
+ vmx->nested.apic_access_page = NULL;
+ }
+ if (vmx->nested.virtual_apic_page) {
+ kvm_release_page_dirty(vmx->nested.virtual_apic_page);
+ vmx->nested.virtual_apic_page = NULL;
+ }
+ if (vmx->nested.pi_desc_page) {
+ kunmap(vmx->nested.pi_desc_page);
+ kvm_release_page_dirty(vmx->nested.pi_desc_page);
+ vmx->nested.pi_desc_page = NULL;
+ vmx->nested.pi_desc = NULL;
+ }
+
+ /*
+ * We are now running in L2, mmu_notifier will force to reload the
+ * page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
+ */
+ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+
+ if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+ vmx->nested.need_vmcs12_sync = true;
+
+ /* in case we halted in L2 */
+ vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+ if (likely(!vmx->fail)) {
+ /*
+ * TODO: SDM says that with acknowledge interrupt on
+ * exit, bit 31 of the VM-exit interrupt information
+ * (valid interrupt) is always set to 1 on
+ * EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
+ * need kvm_cpu_has_interrupt(). See the commit
+ * message for details.
+ */
+ if (nested_exit_intr_ack_set(vcpu) &&
+ exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
+ kvm_cpu_has_interrupt(vcpu)) {
+ int irq = kvm_cpu_get_interrupt(vcpu);
+ WARN_ON(irq < 0);
+ vmcs12->vm_exit_intr_info = irq |
+ INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
+ }
+
+ if (exit_reason != -1)
+ trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
+ vmcs12->exit_qualification,
+ vmcs12->idt_vectoring_info_field,
+ vmcs12->vm_exit_intr_info,
+ vmcs12->vm_exit_intr_error_code,
+ KVM_ISA_VMX);
+
+ load_vmcs12_host_state(vcpu, vmcs12);
+
+ return;
+ }
+
+ /*
+ * After an early L2 VM-entry failure, we're now back
+ * in L1 which thinks it just finished a VMLAUNCH or
+ * VMRESUME instruction, so we need to set the failure
+ * flag and the VM-instruction error field of the VMCS
+ * accordingly, and skip the emulated instruction.
+ */
+ (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+ /*
+ * Restore L1's host state to KVM's software model. We're here
+ * because a consistency check was caught by hardware, which
+ * means some amount of guest state has been propagated to KVM's
+ * model and needs to be unwound to the host's state.
+ */
+ nested_vmx_restore_host_state(vcpu);
+
+ vmx->fail = 0;
+}
+
+/*
+ * Decode the memory-address operand of a vmx instruction, as recorded on an
+ * exit caused by such an instruction (run by a guest hypervisor).
+ * On success, returns 0. When the operand is invalid, returns 1 and throws
+ * #UD or #GP.
+ */
+int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ u32 vmx_instruction_info, bool wr, gva_t *ret)
+{
+ gva_t off;
+ bool exn;
+ struct kvm_segment s;
+
+ /*
+ * According to Vol. 3B, "Information for VM Exits Due to Instruction
+ * Execution", on an exit, vmx_instruction_info holds most of the
+ * addressing components of the operand. Only the displacement part
+ * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
+ * For how an actual address is calculated from all these components,
+ * refer to Vol. 1, "Operand Addressing".
+ */
+ int scaling = vmx_instruction_info & 3;
+ int addr_size = (vmx_instruction_info >> 7) & 7;
+ bool is_reg = vmx_instruction_info & (1u << 10);
+ int seg_reg = (vmx_instruction_info >> 15) & 7;
+ int index_reg = (vmx_instruction_info >> 18) & 0xf;
+ bool index_is_valid = !(vmx_instruction_info & (1u << 22));
+ int base_reg = (vmx_instruction_info >> 23) & 0xf;
+ bool base_is_valid = !(vmx_instruction_info & (1u << 27));
+
+ if (is_reg) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ /* Addr = segment_base + offset */
+ /* offset = base + [index * scale] + displacement */
+ off = exit_qualification; /* holds the displacement */
+ if (base_is_valid)
+ off += kvm_register_read(vcpu, base_reg);
+ if (index_is_valid)
+ off += kvm_register_read(vcpu, index_reg)<<scaling;
+ vmx_get_segment(vcpu, &s, seg_reg);
+ *ret = s.base + off;
+
+ if (addr_size == 1) /* 32 bit */
+ *ret &= 0xffffffff;
+
+ /* Checks for #GP/#SS exceptions. */
+ exn = false;
+ if (is_long_mode(vcpu)) {
+ /* Long mode: #GP(0)/#SS(0) if the memory address is in a
+ * non-canonical form. This is the only check on the memory
+ * destination for long mode!
+ */
+ exn = is_noncanonical_address(*ret, vcpu);
+ } else if (is_protmode(vcpu)) {
+ /* Protected mode: apply checks for segment validity in the
+ * following order:
+ * - segment type check (#GP(0) may be thrown)
+ * - usability check (#GP(0)/#SS(0))
+ * - limit check (#GP(0)/#SS(0))
+ */
+ if (wr)
+ /* #GP(0) if the destination operand is located in a
+ * read-only data segment or any code segment.
+ */
+ exn = ((s.type & 0xa) == 0 || (s.type & 8));
+ else
+ /* #GP(0) if the source operand is located in an
+ * execute-only code segment
+ */
+ exn = ((s.type & 0xa) == 8);
+ if (exn) {
+ kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+ return 1;
+ }
+ /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
+ */
+ exn = (s.unusable != 0);
+ /* Protected mode: #GP(0)/#SS(0) if the memory
+ * operand is outside the segment limit.
+ */
+ exn = exn || (off + sizeof(u64) > s.limit);
+ }
+ if (exn) {
+ kvm_queue_exception_e(vcpu,
+ seg_reg == VCPU_SREG_SS ?
+ SS_VECTOR : GP_VECTOR,
+ 0);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
+{
+ gva_t gva;
+ struct x86_exception e;
+
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate a shadow VMCS and associate it with the currently loaded
+ * VMCS, unless such a shadow VMCS already exists. The newly allocated
+ * VMCS is also VMCLEARed, so that it is ready for use.
+ */
+static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
+
+ /*
+ * We should allocate a shadow vmcs for vmcs01 only when L1
+ * executes VMXON and free it when L1 executes VMXOFF.
+ * As it is invalid to execute VMXON twice, we shouldn't reach
+ * here when vmcs01 already have an allocated shadow vmcs.
+ */
+ WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
+
+ if (!loaded_vmcs->shadow_vmcs) {
+ loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
+ if (loaded_vmcs->shadow_vmcs)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+ }
+ return loaded_vmcs->shadow_vmcs;
+}
+
+static int enter_vmx_operation(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
+ if (r < 0)
+ goto out_vmcs02;
+
+ vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ if (!vmx->nested.cached_vmcs12)
+ goto out_cached_vmcs12;
+
+ vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
+ if (!vmx->nested.cached_shadow_vmcs12)
+ goto out_cached_shadow_vmcs12;
+
+ if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
+ goto out_shadow_vmcs;
+
+ hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
+
+ vmx->nested.vpid02 = allocate_vpid();
+
+ vmx->nested.vmcs02_initialized = false;
+ vmx->nested.vmxon = true;
+
+ if (pt_mode == PT_MODE_HOST_GUEST) {
+ vmx->pt_desc.guest.ctl = 0;
+ pt_update_intercept_for_msr(vmx);
+ }
+
+ return 0;
+
+out_shadow_vmcs:
+ kfree(vmx->nested.cached_shadow_vmcs12);
+
+out_cached_shadow_vmcs12:
+ kfree(vmx->nested.cached_vmcs12);
+
+out_cached_vmcs12:
+ free_loaded_vmcs(&vmx->nested.vmcs02);
+
+out_vmcs02:
+ return -ENOMEM;
+}
+
+/*
+ * Emulate the VMXON instruction.
+ * Currently, we just remember that VMX is active, and do not save or even
+ * inspect the argument to VMXON (the so-called "VMXON pointer") because we
+ * do not currently need to store anything in that guest-allocated memory
+ * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
+ * argument is different from the VMXON pointer (which the spec says they do).
+ */
+static int handle_vmon(struct kvm_vcpu *vcpu)
+{
+ int ret;
+ gpa_t vmptr;
+ struct page *page;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
+ | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+
+ /*
+ * The Intel VMX Instruction Reference lists a bunch of bits that are
+ * prerequisite to running VMXON, most notably cr4.VMXE must be set to
+ * 1 (see vmx_set_cr4() for when we allow the guest to set this).
+ * Otherwise, we should fail with #UD. But most faulting conditions
+ * have already been checked by hardware, prior to the VM-exit for
+ * VMXON. We do test guest cr4.VMXE because processor CR4 always has
+ * that bit set to 1 in non-root mode.
+ */
+ if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ /* CPL=0 must be checked manually. */
+ if (vmx_get_cpl(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ if (vmx->nested.vmxon)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
+
+ if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
+ != VMXON_NEEDED_FEATURES) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ if (nested_vmx_get_vmptr(vcpu, &vmptr))
+ return 1;
+
+ /*
+ * SDM 3: 24.11.5
+ * The first 4 bytes of VMXON region contain the supported
+ * VMCS revision identifier
+ *
+ * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
+ * which replaces physical address width with 32
+ */
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failInvalid(vcpu);
+
+ page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
+ if (is_error_page(page))
+ return nested_vmx_failInvalid(vcpu);
+
+ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
+ kunmap(page);
+ kvm_release_page_clean(page);
+ return nested_vmx_failInvalid(vcpu);
+ }
+ kunmap(page);
+ kvm_release_page_clean(page);
+
+ vmx->nested.vmxon_ptr = vmptr;
+ ret = enter_vmx_operation(vcpu);
+ if (ret)
+ return ret;
+
+ return nested_vmx_succeed(vcpu);
+}
+
+static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (vmx->nested.current_vmptr == -1ull)
+ return;
+
+ if (enable_shadow_vmcs) {
+ /* copy to memory all shadowed fields in case
+ they were modified */
+ copy_shadow_to_vmcs12(vmx);
+ vmx->nested.need_vmcs12_sync = false;
+ vmx_disable_shadow_vmcs(vmx);
+ }
+ vmx->nested.posted_intr_nv = -1;
+
+ /* Flush VMCS12 to guest memory */
+ kvm_vcpu_write_guest_page(vcpu,
+ vmx->nested.current_vmptr >> PAGE_SHIFT,
+ vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
+
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+
+ vmx->nested.current_vmptr = -1ull;
+}
+
+/* Emulate the VMXOFF instruction */
+static int handle_vmoff(struct kvm_vcpu *vcpu)
+{
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+ free_nested(vcpu);
+ return nested_vmx_succeed(vcpu);
+}
+
+/* Emulate the VMCLEAR instruction */
+static int handle_vmclear(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 zero = 0;
+ gpa_t vmptr;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (nested_vmx_get_vmptr(vcpu, &vmptr))
+ return 1;
+
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMCLEAR_INVALID_ADDRESS);
+
+ if (vmptr == vmx->nested.vmxon_ptr)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMCLEAR_VMXON_POINTER);
+
+ if (vmx->nested.hv_evmcs_page) {
+ if (vmptr == vmx->nested.hv_evmcs_vmptr)
+ nested_release_evmcs(vcpu);
+ } else {
+ if (vmptr == vmx->nested.current_vmptr)
+ nested_release_vmcs12(vcpu);
+
+ kvm_vcpu_write_guest(vcpu,
+ vmptr + offsetof(struct vmcs12,
+ launch_state),
+ &zero, sizeof(zero));
+ }
+
+ return nested_vmx_succeed(vcpu);
+}
+
+static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
+
+/* Emulate the VMLAUNCH instruction */
+static int handle_vmlaunch(struct kvm_vcpu *vcpu)
+{
+ return nested_vmx_run(vcpu, true);
+}
+
+/* Emulate the VMRESUME instruction */
+static int handle_vmresume(struct kvm_vcpu *vcpu)
+{
+
+ return nested_vmx_run(vcpu, false);
+}
+
+static int handle_vmread(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ u64 field_value;
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gva_t gva = 0;
+ struct vmcs12 *vmcs12;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
+
+ if (!is_guest_mode(vcpu))
+ vmcs12 = get_vmcs12(vcpu);
+ else {
+ /*
+ * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
+ * to shadowed-field sets the ALU flags for VMfailInvalid.
+ */
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
+ return nested_vmx_failInvalid(vcpu);
+ vmcs12 = get_shadow_vmcs12(vcpu);
+ }
+
+ /* Decode instruction info and find the field to read */
+ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ /* Read the field, zero-extended to a u64 field_value */
+ if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
+ /*
+ * Now copy part of this value to register or memory, as requested.
+ * Note that the number of bits actually copied is 32 or 64 depending
+ * on the guest's mode (32 or 64 bit), not on the given field's length.
+ */
+ if (vmx_instruction_info & (1u << 10)) {
+ kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+ field_value);
+ } else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, true, &gva))
+ return 1;
+ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+ kvm_write_guest_virt_system(vcpu, gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4), NULL);
+ }
+
+ return nested_vmx_succeed(vcpu);
+}
+
+
+static int handle_vmwrite(struct kvm_vcpu *vcpu)
+{
+ unsigned long field;
+ gva_t gva;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+
+ /* The value to write might be 32 or 64 bits, depending on L1's long
+ * mode, and eventually we need to write that into a field of several
+ * possible lengths. The code below first zero-extends the value to 64
+ * bit (field_value), and then copies only the appropriate number of
+ * bits into the vmcs12 field.
+ */
+ u64 field_value = 0;
+ struct x86_exception e;
+ struct vmcs12 *vmcs12;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (vmx->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
+
+ if (vmx_instruction_info & (1u << 10))
+ field_value = kvm_register_readl(vcpu,
+ (((vmx_instruction_info) >> 3) & 0xf));
+ else {
+ if (get_vmx_mem_address(vcpu, exit_qualification,
+ vmx_instruction_info, false, &gva))
+ return 1;
+ if (kvm_read_guest_virt(vcpu, gva, &field_value,
+ (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ }
+
+
+ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+ /*
+ * If the vCPU supports "VMWRITE to any supported field in the
+ * VMCS," then the "read-only" fields are actually read/write.
+ */
+ if (vmcs_field_readonly(field) &&
+ !nested_cpu_has_vmwrite_any_field(vcpu))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
+
+ if (!is_guest_mode(vcpu))
+ vmcs12 = get_vmcs12(vcpu);
+ else {
+ /*
+ * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
+ * to shadowed-field sets the ALU flags for VMfailInvalid.
+ */
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
+ return nested_vmx_failInvalid(vcpu);
+ vmcs12 = get_shadow_vmcs12(vcpu);
+ }
+
+ if (vmcs12_write_any(vmcs12, field, field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
+ /*
+ * Do not track vmcs12 dirty-state if in guest-mode
+ * as we actually dirty shadow vmcs12 instead of vmcs12.
+ */
+ if (!is_guest_mode(vcpu)) {
+ switch (field) {
+#define SHADOW_FIELD_RW(x) case x:
+#include "vmcs_shadow_fields.h"
+ /*
+ * The fields that can be updated by L1 without a vmexit are
+ * always updated in the vmcs02, the others go down the slow
+ * path of prepare_vmcs02.
+ */
+ break;
+ default:
+ vmx->nested.dirty_vmcs12 = true;
+ break;
+ }
+ }
+
+ return nested_vmx_succeed(vcpu);
+}
+
+static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
+{
+ vmx->nested.current_vmptr = vmptr;
+ if (enable_shadow_vmcs) {
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_SHADOW_VMCS);
+ vmcs_write64(VMCS_LINK_POINTER,
+ __pa(vmx->vmcs01.shadow_vmcs));
+ vmx->nested.need_vmcs12_sync = true;
+ }
+ vmx->nested.dirty_vmcs12 = true;
+}
+
+/* Emulate the VMPTRLD instruction */
+static int handle_vmptrld(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gpa_t vmptr;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (nested_vmx_get_vmptr(vcpu, &vmptr))
+ return 1;
+
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INVALID_ADDRESS);
+
+ if (vmptr == vmx->nested.vmxon_ptr)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_VMXON_POINTER);
+
+ /* Forbid normal VMPTRLD if Enlightened version was used */
+ if (vmx->nested.hv_evmcs)
+ return 1;
+
+ if (vmx->nested.current_vmptr != vmptr) {
+ struct vmcs12 *new_vmcs12;
+ struct page *page;
+
+ page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
+ if (is_error_page(page)) {
+ /*
+ * Reads from an unbacked page return all 1s,
+ * which means that the 32 bits located at the
+ * given physical address won't match the required
+ * VMCS12_REVISION identifier.
+ */
+ nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ new_vmcs12 = kmap(page);
+ if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+ (new_vmcs12->hdr.shadow_vmcs &&
+ !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
+ kunmap(page);
+ kvm_release_page_clean(page);
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
+ }
+
+ nested_release_vmcs12(vcpu);
+
+ /*
+ * Load VMCS12 from guest memory since it is not already
+ * cached.
+ */
+ memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
+ kunmap(page);
+ kvm_release_page_clean(page);
+
+ set_current_vmptr(vmx, vmptr);
+ }
+
+ return nested_vmx_succeed(vcpu);
+}
+
+/* Emulate the VMPTRST instruction */
+static int handle_vmptrst(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
+ u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
+ struct x86_exception e;
+ gva_t gva;
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ return 1;
+
+ if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
+ return 1;
+ /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
+ if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
+ sizeof(gpa_t), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ return nested_vmx_succeed(vcpu);
+}
+
+/* Emulate the INVEPT instruction */
+static int handle_invept(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 vmx_instruction_info, types;
+ unsigned long type;
+ gva_t gva;
+ struct x86_exception e;
+ struct {
+ u64 eptp, gpa;
+ } operand;
+
+ if (!(vmx->nested.msrs.secondary_ctls_high &
+ SECONDARY_EXEC_ENABLE_EPT) ||
+ !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+ types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
+
+ if (type >= 32 || !(types & (1 << type)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+
+ /* According to the Intel VMX instruction reference, the memory
+ * operand is read even if it isn't needed (e.g., for type==global)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ switch (type) {
+ case VMX_EPT_EXTENT_GLOBAL:
+ /*
+ * TODO: track mappings and invalidate
+ * single context requests appropriately
+ */
+ case VMX_EPT_EXTENT_CONTEXT:
+ kvm_mmu_sync_roots(vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ return nested_vmx_succeed(vcpu);
+}
+
+static int handle_invvpid(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 vmx_instruction_info;
+ unsigned long type, types;
+ gva_t gva;
+ struct x86_exception e;
+ struct {
+ u64 vpid;
+ u64 gla;
+ } operand;
+ u16 vpid02;
+
+ if (!(vmx->nested.msrs.secondary_ctls_high &
+ SECONDARY_EXEC_ENABLE_VPID) ||
+ !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+ types = (vmx->nested.msrs.vpid_caps &
+ VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
+
+ if (type >= 32 || !(types & (1 << type)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+
+ /* according to the intel vmx instruction reference, the memory
+ * operand is read even if it isn't needed (e.g., for type==global)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+ if (operand.vpid >> 16)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+
+ vpid02 = nested_get_vpid02(vcpu);
+ switch (type) {
+ case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
+ if (!operand.vpid ||
+ is_noncanonical_address(operand.gla, vcpu))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ if (cpu_has_vmx_invvpid_individual_addr()) {
+ __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
+ vpid02, operand.gla);
+ } else
+ __vmx_flush_tlb(vcpu, vpid02, false);
+ break;
+ case VMX_VPID_EXTENT_SINGLE_CONTEXT:
+ case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
+ if (!operand.vpid)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ __vmx_flush_tlb(vcpu, vpid02, false);
+ break;
+ case VMX_VPID_EXTENT_ALL_CONTEXT:
+ __vmx_flush_tlb(vcpu, vpid02, false);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+
+ return nested_vmx_succeed(vcpu);
+}
+
+static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
+ u64 address;
+ bool accessed_dirty;
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
+ if (!nested_cpu_has_eptp_switching(vmcs12) ||
+ !nested_cpu_has_ept(vmcs12))
+ return 1;
+
+ if (index >= VMFUNC_EPTP_ENTRIES)
+ return 1;
+
+
+ if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
+ &address, index * 8, 8))
+ return 1;
+
+ accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
+
+ /*
+ * If the (L2) guest does a vmfunc to the currently
+ * active ept pointer, we don't have to do anything else
+ */
+ if (vmcs12->ept_pointer != address) {
+ if (!valid_ept_address(vcpu, address))
+ return 1;
+
+ kvm_mmu_unload(vcpu);
+ mmu->ept_ad = accessed_dirty;
+ mmu->mmu_role.base.ad_disabled = !accessed_dirty;
+ vmcs12->ept_pointer = address;
+ /*
+ * TODO: Check what's the correct approach in case
+ * mmu reload fails. Currently, we just let the next
+ * reload potentially fail
+ */
+ kvm_mmu_reload(vcpu);
+ }
+
+ return 0;
+}
+
+static int handle_vmfunc(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12;
+ u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
+
+ /*
+ * VMFUNC is only supported for nested guests, but we always enable the
+ * secondary control for simplicity; for non-nested mode, fake that we
+ * didn't by injecting #UD.
+ */
+ if (!is_guest_mode(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmcs12 = get_vmcs12(vcpu);
+ if ((vmcs12->vm_function_control & (1 << function)) == 0)
+ goto fail;
+
+ switch (function) {
+ case 0:
+ if (nested_vmx_eptp_switching(vcpu, vmcs12))
+ goto fail;
+ break;
+ default:
+ goto fail;
+ }
+ return kvm_skip_emulated_instruction(vcpu);
+
+fail:
+ nested_vmx_vmexit(vcpu, vmx->exit_reason,
+ vmcs_read32(VM_EXIT_INTR_INFO),
+ vmcs_readl(EXIT_QUALIFICATION));
+ return 1;
+}
+
+
+static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ unsigned long exit_qualification;
+ gpa_t bitmap, last_bitmap;
+ unsigned int port;
+ int size;
+ u8 b;
+
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ port = exit_qualification >> 16;
+ size = (exit_qualification & 7) + 1;
+
+ last_bitmap = (gpa_t)-1;
+ b = -1;
+
+ while (size > 0) {
+ if (port < 0x8000)
+ bitmap = vmcs12->io_bitmap_a;
+ else if (port < 0x10000)
+ bitmap = vmcs12->io_bitmap_b;
+ else
+ return true;
+ bitmap += (port & 0x7fff) / 8;
+
+ if (last_bitmap != bitmap)
+ if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
+ return true;
+ if (b & (1 << (port & 7)))
+ return true;
+
+ port++;
+ size--;
+ last_bitmap = bitmap;
+ }
+
+ return false;
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * rather than handle it ourselves in L0. I.e., check whether L1 expressed
+ * disinterest in the current event (read or write a specific MSR) by using an
+ * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
+ */
+static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12, u32 exit_reason)
+{
+ u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
+ gpa_t bitmap;
+
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
+ return true;
+
+ /*
+ * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
+ * for the four combinations of read/write and low/high MSR numbers.
+ * First we need to figure out which of the four to use:
+ */
+ bitmap = vmcs12->msr_bitmap;
+ if (exit_reason == EXIT_REASON_MSR_WRITE)
+ bitmap += 2048;
+ if (msr_index >= 0xc0000000) {
+ msr_index -= 0xc0000000;
+ bitmap += 1024;
+ }
+
+ /* Then read the msr_index'th bit from this bitmap: */
+ if (msr_index < 1024*8) {
+ unsigned char b;
+ if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
+ return true;
+ return 1 & (b >> (msr_index & 7));
+ } else
+ return true; /* let L1 handle the wrong parameter */
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
+ * rather than handle it ourselves in L0. I.e., check if L1 wanted to
+ * intercept (via guest_host_mask etc.) the current event.
+ */
+static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int cr = exit_qualification & 15;
+ int reg;
+ unsigned long val;
+
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
+ reg = (exit_qualification >> 8) & 15;
+ val = kvm_register_readl(vcpu, reg);
+ switch (cr) {
+ case 0:
+ if (vmcs12->cr0_guest_host_mask &
+ (val ^ vmcs12->cr0_read_shadow))
+ return true;
+ break;
+ case 3:
+ if ((vmcs12->cr3_target_count >= 1 &&
+ vmcs12->cr3_target_value0 == val) ||
+ (vmcs12->cr3_target_count >= 2 &&
+ vmcs12->cr3_target_value1 == val) ||
+ (vmcs12->cr3_target_count >= 3 &&
+ vmcs12->cr3_target_value2 == val) ||
+ (vmcs12->cr3_target_count >= 4 &&
+ vmcs12->cr3_target_value3 == val))
+ return false;
+ if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
+ return true;
+ break;
+ case 4:
+ if (vmcs12->cr4_guest_host_mask &
+ (vmcs12->cr4_read_shadow ^ val))
+ return true;
+ break;
+ case 8:
+ if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
+ return true;
+ break;
+ }
+ break;
+ case 2: /* clts */
+ if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
+ (vmcs12->cr0_read_shadow & X86_CR0_TS))
+ return true;
+ break;
+ case 1: /* mov from cr */
+ switch (cr) {
+ case 3:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR3_STORE_EXITING)
+ return true;
+ break;
+ case 8:
+ if (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_CR8_STORE_EXITING)
+ return true;
+ break;
+ }
+ break;
+ case 3: /* lmsw */
+ /*
+ * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ * cr0. Other attempted changes are ignored, with no exit.
+ */
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+ if (vmcs12->cr0_guest_host_mask & 0xe &
+ (val ^ vmcs12->cr0_read_shadow))
+ return true;
+ if ((vmcs12->cr0_guest_host_mask & 0x1) &&
+ !(vmcs12->cr0_read_shadow & 0x1) &&
+ (val & 0x1))
+ return true;
+ break;
+ }
+ return false;
+}
+
+static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12, gpa_t bitmap)
+{
+ u32 vmx_instruction_info;
+ unsigned long field;
+ u8 b;
+
+ if (!nested_cpu_has_shadow_vmcs(vmcs12))
+ return true;
+
+ /* Decode instruction info and find the field to access */
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+
+ /* Out-of-range fields always cause a VM exit from L2 to L1 */
+ if (field >> 15)
+ return true;
+
+ if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
+ return true;
+
+ return 1 & (b >> (field & 7));
+}
+
+/*
+ * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
+ * should handle it ourselves in L0 (and then continue L2). Only call this
+ * when in is_guest_mode (L2).
+ */
+bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
+{
+ u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (vmx->nested.nested_run_pending)
+ return false;
+
+ if (unlikely(vmx->fail)) {
+ pr_info_ratelimited("%s failed vm entry %x\n", __func__,
+ vmcs_read32(VM_INSTRUCTION_ERROR));
+ return true;
+ }
+
+ /*
+ * The host physical addresses of some pages of guest memory
+ * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
+ * Page). The CPU may write to these pages via their host
+ * physical address while L2 is running, bypassing any
+ * address-translation-based dirty tracking (e.g. EPT write
+ * protection).
+ *
+ * Mark them dirty on every exit from L2 to prevent them from
+ * getting out of sync with dirty tracking.
+ */
+ nested_mark_vmcs12_pages_dirty(vcpu);
+
+ trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
+ vmcs_readl(EXIT_QUALIFICATION),
+ vmx->idt_vectoring_info,
+ intr_info,
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
+ KVM_ISA_VMX);
+
+ switch (exit_reason) {
+ case EXIT_REASON_EXCEPTION_NMI:
+ if (is_nmi(intr_info))
+ return false;
+ else if (is_page_fault(intr_info))
+ return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
+ else if (is_debug(intr_info) &&
+ vcpu->guest_debug &
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+ return false;
+ else if (is_breakpoint(intr_info) &&
+ vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ return false;
+ return vmcs12->exception_bitmap &
+ (1u << (intr_info & INTR_INFO_VECTOR_MASK));
+ case EXIT_REASON_EXTERNAL_INTERRUPT:
+ return false;
+ case EXIT_REASON_TRIPLE_FAULT:
+ return true;
+ case EXIT_REASON_PENDING_INTERRUPT:
+ return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
+ case EXIT_REASON_NMI_WINDOW:
+ return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
+ case EXIT_REASON_TASK_SWITCH:
+ return true;
+ case EXIT_REASON_CPUID:
+ return true;
+ case EXIT_REASON_HLT:
+ return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
+ case EXIT_REASON_INVD:
+ return true;
+ case EXIT_REASON_INVLPG:
+ return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
+ case EXIT_REASON_RDPMC:
+ return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
+ case EXIT_REASON_RDRAND:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
+ case EXIT_REASON_RDSEED:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
+ case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
+ return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
+ case EXIT_REASON_VMREAD:
+ return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
+ vmcs12->vmread_bitmap);
+ case EXIT_REASON_VMWRITE:
+ return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
+ vmcs12->vmwrite_bitmap);
+ case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
+ case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
+ case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
+ case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
+ /*
+ * VMX instructions trap unconditionally. This allows L1 to
+ * emulate them for its L2 guest, i.e., allows 3-level nesting!
+ */
+ return true;
+ case EXIT_REASON_CR_ACCESS:
+ return nested_vmx_exit_handled_cr(vcpu, vmcs12);
+ case EXIT_REASON_DR_ACCESS:
+ return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
+ case EXIT_REASON_IO_INSTRUCTION:
+ return nested_vmx_exit_handled_io(vcpu, vmcs12);
+ case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
+ case EXIT_REASON_MSR_READ:
+ case EXIT_REASON_MSR_WRITE:
+ return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
+ case EXIT_REASON_INVALID_STATE:
+ return true;
+ case EXIT_REASON_MWAIT_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+ case EXIT_REASON_MONITOR_TRAP_FLAG:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
+ case EXIT_REASON_MONITOR_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
+ case EXIT_REASON_PAUSE_INSTRUCTION:
+ return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
+ nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING);
+ case EXIT_REASON_MCE_DURING_VMENTRY:
+ return false;
+ case EXIT_REASON_TPR_BELOW_THRESHOLD:
+ return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
+ case EXIT_REASON_APIC_ACCESS:
+ case EXIT_REASON_APIC_WRITE:
+ case EXIT_REASON_EOI_INDUCED:
+ /*
+ * The controls for "virtualize APIC accesses," "APIC-
+ * register virtualization," and "virtual-interrupt
+ * delivery" only come from vmcs12.
+ */
+ return true;
+ case EXIT_REASON_EPT_VIOLATION:
+ /*
+ * L0 always deals with the EPT violation. If nested EPT is
+ * used, and the nested mmu code discovers that the address is
+ * missing in the guest EPT table (EPT12), the EPT violation
+ * will be injected with nested_ept_inject_page_fault()
+ */
+ return false;
+ case EXIT_REASON_EPT_MISCONFIG:
+ /*
+ * L2 never uses directly L1's EPT, but rather L0's own EPT
+ * table (shadow on EPT) or a merged EPT table that L0 built
+ * (EPT on EPT). So any problems with the structure of the
+ * table is L0's fault.
+ */
+ return false;
+ case EXIT_REASON_INVPCID:
+ return
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
+ nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
+ case EXIT_REASON_WBINVD:
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
+ case EXIT_REASON_XSETBV:
+ return true;
+ case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
+ /*
+ * This should never happen, since it is not possible to
+ * set XSS to a non-zero value---neither in L1 nor in L2.
+ * If if it were, XSS would have to be checked against
+ * the XSS exit bitmap in vmcs12.
+ */
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
+ case EXIT_REASON_PREEMPTION_TIMER:
+ return false;
+ case EXIT_REASON_PML_FULL:
+ /* We emulate PML support to L1. */
+ return false;
+ case EXIT_REASON_VMFUNC:
+ /* VM functions are emulated through L2->L0 vmexits. */
+ return false;
+ case EXIT_REASON_ENCLS:
+ /* SGX is never exposed to L1 */
+ return false;
+ default:
+ return true;
+ }
+}
+
+
+static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ u32 user_data_size)
+{
+ struct vcpu_vmx *vmx;
+ struct vmcs12 *vmcs12;
+ struct kvm_nested_state kvm_state = {
+ .flags = 0,
+ .format = 0,
+ .size = sizeof(kvm_state),
+ .vmx.vmxon_pa = -1ull,
+ .vmx.vmcs_pa = -1ull,
+ };
+
+ if (!vcpu)
+ return kvm_state.size + 2 * VMCS12_SIZE;
+
+ vmx = to_vmx(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
+ kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
+
+ if (nested_vmx_allowed(vcpu) &&
+ (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
+ kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
+ kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
+
+ if (vmx_has_valid_vmcs12(vcpu)) {
+ kvm_state.size += VMCS12_SIZE;
+
+ if (is_guest_mode(vcpu) &&
+ nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != -1ull)
+ kvm_state.size += VMCS12_SIZE;
+ }
+
+ if (vmx->nested.smm.vmxon)
+ kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
+
+ if (vmx->nested.smm.guest_mode)
+ kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
+
+ if (is_guest_mode(vcpu)) {
+ kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
+
+ if (vmx->nested.nested_run_pending)
+ kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
+ }
+ }
+
+ if (user_data_size < kvm_state.size)
+ goto out;
+
+ if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
+ return -EFAULT;
+
+ if (!vmx_has_valid_vmcs12(vcpu))
+ goto out;
+
+ /*
+ * When running L2, the authoritative vmcs12 state is in the
+ * vmcs02. When running L1, the authoritative vmcs12 state is
+ * in the shadow or enlightened vmcs linked to vmcs01, unless
+ * need_vmcs12_sync is set, in which case, the authoritative
+ * vmcs12 state is in the vmcs12 already.
+ */
+ if (is_guest_mode(vcpu)) {
+ sync_vmcs12(vcpu, vmcs12);
+ } else if (!vmx->nested.need_vmcs12_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
+
+ if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
+ return -EFAULT;
+
+ if (nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != -1ull) {
+ if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
+ get_shadow_vmcs12(vcpu), sizeof(*vmcs12)))
+ return -EFAULT;
+ }
+
+out:
+ return kvm_state.size;
+}
+
+/*
+ * Forcibly leave nested mode in order to be able to reset the VCPU later on.
+ */
+void vmx_leave_nested(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.nested_run_pending = 0;
+ nested_vmx_vmexit(vcpu, -1, 0, 0);
+ }
+ free_nested(vcpu);
+}
+
+static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
+ struct kvm_nested_state __user *user_kvm_nested_state,
+ struct kvm_nested_state *kvm_state)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12;
+ u32 exit_qual;
+ int ret;
+
+ if (kvm_state->format != 0)
+ return -EINVAL;
+
+ if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
+ nested_enable_evmcs(vcpu, NULL);
+
+ if (!nested_vmx_allowed(vcpu))
+ return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
+
+ if (kvm_state->vmx.vmxon_pa == -1ull) {
+ if (kvm_state->vmx.smm.flags)
+ return -EINVAL;
+
+ if (kvm_state->vmx.vmcs_pa != -1ull)
+ return -EINVAL;
+
+ vmx_leave_nested(vcpu);
+ return 0;
+ }
+
+ if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
+ return -EINVAL;
+
+ if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+ (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+ return -EINVAL;
+
+ if (kvm_state->vmx.smm.flags &
+ ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
+ return -EINVAL;
+
+ /*
+ * SMM temporarily disables VMX, so we cannot be in guest mode,
+ * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
+ * must be zero.
+ */
+ if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
+ return -EINVAL;
+
+ if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
+ !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
+ return -EINVAL;
+
+ vmx_leave_nested(vcpu);
+ if (kvm_state->vmx.vmxon_pa == -1ull)
+ return 0;
+
+ vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
+ ret = enter_vmx_operation(vcpu);
+ if (ret)
+ return ret;
+
+ /* Empty 'VMXON' state is permitted */
+ if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+ return 0;
+
+ if (kvm_state->vmx.vmcs_pa != -1ull) {
+ if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
+ !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+ return -EINVAL;
+
+ set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+ } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
+ /*
+ * Sync eVMCS upon entry as we may not have
+ * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
+ */
+ vmx->nested.need_vmcs12_sync = true;
+ } else {
+ return -EINVAL;
+ }
+
+ if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
+ vmx->nested.smm.vmxon = true;
+ vmx->nested.vmxon = false;
+
+ if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
+ vmx->nested.smm.guest_mode = true;
+ }
+
+ vmcs12 = get_vmcs12(vcpu);
+ if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
+ return -EFAULT;
+
+ if (vmcs12->hdr.revision_id != VMCS12_REVISION)
+ return -EINVAL;
+
+ if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
+ return 0;
+
+ vmx->nested.nested_run_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+
+ if (nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != -1ull) {
+ struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
+
+ if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
+ return -EINVAL;
+
+ if (copy_from_user(shadow_vmcs12,
+ user_kvm_nested_state->data + VMCS12_SIZE,
+ sizeof(*vmcs12)))
+ return -EFAULT;
+
+ if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+ !shadow_vmcs12->hdr.shadow_vmcs)
+ return -EINVAL;
+ }
+
+ if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
+ nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ return -EINVAL;
+
+ vmx->nested.dirty_vmcs12 = true;
+ ret = nested_vmx_enter_non_root_mode(vcpu, false);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+void nested_vmx_vcpu_setup(void)
+{
+ if (enable_shadow_vmcs) {
+ /*
+ * At vCPU creation, "VMWRITE to any supported field
+ * in the VMCS" is supported, so use the more
+ * permissive vmx_vmread_bitmap to specify both read
+ * and write permissions for the shadow VMCS.
+ */
+ vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
+ vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
+ }
+}
+
+/*
+ * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
+ * returned for the various VMX controls MSRs when nested VMX is enabled.
+ * The same values should also be used to verify that vmcs12 control fields are
+ * valid during nested entry from L1 to L2.
+ * Each of these control msrs has a low and high 32-bit half: A low bit is on
+ * if the corresponding bit in the (32-bit) control field *must* be on, and a
+ * bit in the high half is on if the corresponding bit in the control field
+ * may be on. See also vmx_control_verify().
+ */
+void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+ bool apicv)
+{
+ /*
+ * Note that as a general rule, the high half of the MSRs (bits in
+ * the control fields which may be 1) should be initialized by the
+ * intersection of the underlying hardware's MSR (i.e., features which
+ * can be supported) and the list of features we want to expose -
+ * because they are known to be properly supported in our code.
+ * Also, usually, the low half of the MSRs (bits which must be 1) can
+ * be set to 0, meaning that L1 may turn off any of these bits. The
+ * reason is that if one of these bits is necessary, it will appear
+ * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
+ * fields of vmcs01 and vmcs02, will turn these bits off - and
+ * nested_vmx_exit_reflected() will not pass related exits to L1.
+ * These rules have exceptions below.
+ */
+
+ /* pin-based controls */
+ rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
+ msrs->pinbased_ctls_low,
+ msrs->pinbased_ctls_high);
+ msrs->pinbased_ctls_low |=
+ PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ msrs->pinbased_ctls_high &=
+ PIN_BASED_EXT_INTR_MASK |
+ PIN_BASED_NMI_EXITING |
+ PIN_BASED_VIRTUAL_NMIS |
+ (apicv ? PIN_BASED_POSTED_INTR : 0);
+ msrs->pinbased_ctls_high |=
+ PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+
+ /* exit controls */
+ rdmsr(MSR_IA32_VMX_EXIT_CTLS,
+ msrs->exit_ctls_low,
+ msrs->exit_ctls_high);
+ msrs->exit_ctls_low =
+ VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
+
+ msrs->exit_ctls_high &=
+#ifdef CONFIG_X86_64
+ VM_EXIT_HOST_ADDR_SPACE_SIZE |
+#endif
+ VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
+ msrs->exit_ctls_high |=
+ VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
+ VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
+
+ /* We support free control of debug control saving. */
+ msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
+
+ /* entry controls */
+ rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
+ msrs->entry_ctls_low,
+ msrs->entry_ctls_high);
+ msrs->entry_ctls_low =
+ VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
+ msrs->entry_ctls_high &=
+#ifdef CONFIG_X86_64
+ VM_ENTRY_IA32E_MODE |
+#endif
+ VM_ENTRY_LOAD_IA32_PAT;
+ msrs->entry_ctls_high |=
+ (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
+
+ /* We support free control of debug control loading. */
+ msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
+
+ /* cpu-based controls */
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
+ msrs->procbased_ctls_low,
+ msrs->procbased_ctls_high);
+ msrs->procbased_ctls_low =
+ CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+ msrs->procbased_ctls_high &=
+ CPU_BASED_VIRTUAL_INTR_PENDING |
+ CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+#ifdef CONFIG_X86_64
+ CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
+#endif
+ CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
+ CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
+ CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
+ CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ /*
+ * We can allow some features even when not supported by the
+ * hardware. For example, L1 can specify an MSR bitmap - and we
+ * can use it to avoid exits to L1 - even when L0 runs L2
+ * without MSR bitmaps.
+ */
+ msrs->procbased_ctls_high |=
+ CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+ CPU_BASED_USE_MSR_BITMAPS;
+
+ /* We support free control of CR3 access interception. */
+ msrs->procbased_ctls_low &=
+ ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
+
+ /*
+ * secondary cpu-based controls. Do not include those that
+ * depend on CPUID bits, they are added later by vmx_cpuid_update.
+ */
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+ msrs->secondary_ctls_low,
+ msrs->secondary_ctls_high);
+ msrs->secondary_ctls_low = 0;
+ msrs->secondary_ctls_high &=
+ SECONDARY_EXEC_DESC |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ SECONDARY_EXEC_WBINVD_EXITING;
+
+ /*
+ * We can emulate "VMCS shadowing," even if the hardware
+ * doesn't support it.
+ */
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_SHADOW_VMCS;
+
+ if (enable_ept) {
+ /* nested EPT: emulate EPT also to L1 */
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_EPT;
+ msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
+ VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
+ if (cpu_has_vmx_ept_execute_only())
+ msrs->ept_caps |=
+ VMX_EPT_EXECUTE_ONLY_BIT;
+ msrs->ept_caps &= ept_caps;
+ msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
+ VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
+ VMX_EPT_1GB_PAGE_BIT;
+ if (enable_ept_ad_bits) {
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_PML;
+ msrs->ept_caps |= VMX_EPT_AD_BIT;
+ }
+ }
+
+ if (cpu_has_vmx_vmfunc()) {
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_VMFUNC;
+ /*
+ * Advertise EPTP switching unconditionally
+ * since we emulate it
+ */
+ if (enable_ept)
+ msrs->vmfunc_controls =
+ VMX_VMFUNC_EPTP_SWITCHING;
+ }
+
+ /*
+ * Old versions of KVM use the single-context version without
+ * checking for support, so declare that it is supported even
+ * though it is treated as global context. The alternative is
+ * not failing the single-context invvpid, and it is worse.
+ */
+ if (enable_vpid) {
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_VPID;
+ msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
+ VMX_VPID_EXTENT_SUPPORTED_MASK;
+ }
+
+ if (enable_unrestricted_guest)
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_UNRESTRICTED_GUEST;
+
+ if (flexpriority_enabled)
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
+ /* miscellaneous data */
+ rdmsr(MSR_IA32_VMX_MISC,
+ msrs->misc_low,
+ msrs->misc_high);
+ msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
+ msrs->misc_low |=
+ MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
+ VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
+ VMX_MISC_ACTIVITY_HLT;
+ msrs->misc_high = 0;
+
+ /*
+ * This MSR reports some information about VMX support. We
+ * should return information about the VMX we emulate for the
+ * guest, and the VMCS structure we give it - not about the
+ * VMX support of the underlying hardware.
+ */
+ msrs->basic =
+ VMCS12_REVISION |
+ VMX_BASIC_TRUE_CTLS |
+ ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
+ (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
+
+ if (cpu_has_vmx_basic_inout())
+ msrs->basic |= VMX_BASIC_INOUT;
+
+ /*
+ * These MSRs specify bits which the guest must keep fixed on
+ * while L1 is in VMXON mode (in L1's root mode, or running an L2).
+ * We picked the standard core2 setting.
+ */
+#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
+#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
+ msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
+ msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
+
+ /* These MSRs specify bits which the guest must keep fixed off. */
+ rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
+ rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
+
+ /* highest index: VMX_PREEMPTION_TIMER_VALUE */
+ msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
+}
+
+void nested_vmx_hardware_unsetup(void)
+{
+ int i;
+
+ if (enable_shadow_vmcs) {
+ for (i = 0; i < VMX_BITMAP_NR; i++)
+ free_page((unsigned long)vmx_bitmap[i]);
+ }
+}
+
+__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
+{
+ int i;
+
+ if (!cpu_has_vmx_shadow_vmcs())
+ enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs) {
+ for (i = 0; i < VMX_BITMAP_NR; i++) {
+ vmx_bitmap[i] = (unsigned long *)
+ __get_free_page(GFP_KERNEL);
+ if (!vmx_bitmap[i]) {
+ nested_vmx_hardware_unsetup();
+ return -ENOMEM;
+ }
+ }
+
+ init_vmcs_shadow_fields();
+ }
+
+ exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
+ exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
+ exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
+ exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
+ exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
+ exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
+ exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
+ exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
+ exit_handlers[EXIT_REASON_VMON] = handle_vmon,
+ exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
+ exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
+ exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
+
+ kvm_x86_ops->check_nested_events = vmx_check_nested_events;
+ kvm_x86_ops->get_nested_state = vmx_get_nested_state;
+ kvm_x86_ops->set_nested_state = vmx_set_nested_state;
+ kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
+ kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
+ kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
new file mode 100644
index 000000000000..e847ff1019a2
--- /dev/null
+++ b/arch/x86/kvm/vmx/nested.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_NESTED_H
+#define __KVM_X86_VMX_NESTED_H
+
+#include "kvm_cache_regs.h"
+#include "vmcs12.h"
+#include "vmx.h"
+
+void vmx_leave_nested(struct kvm_vcpu *vcpu);
+void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
+ bool apicv);
+void nested_vmx_hardware_unsetup(void);
+__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
+void nested_vmx_vcpu_setup(void);
+void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
+int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
+bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
+void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ u32 exit_intr_info, unsigned long exit_qualification);
+void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu);
+int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
+int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
+int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
+ u32 vmx_instruction_info, bool wr, gva_t *ret);
+
+static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.cached_vmcs12;
+}
+
+static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
+}
+
+static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * In case we do two consecutive get/set_nested_state()s while L2 was
+ * running hv_evmcs may end up not being mapped (we map it from
+ * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
+ * have vmcs12 if it is true.
+ */
+ return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
+ vmx->nested.hv_evmcs;
+}
+
+static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
+{
+ /* return the page table to be shadowed - in our case, EPT12 */
+ return get_vmcs12(vcpu)->ept_pointer;
+}
+
+static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
+{
+ return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT;
+}
+
+/*
+ * Reflect a VM Exit into L1.
+ */
+static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu,
+ u32 exit_reason)
+{
+ u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ /*
+ * At this point, the exit interruption info in exit_intr_info
+ * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT
+ * we need to query the in-kernel LAPIC.
+ */
+ WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT);
+ if ((exit_intr_info &
+ (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
+ (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ vmcs12->vm_exit_intr_error_code =
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+ }
+
+ nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info,
+ vmcs_readl(EXIT_QUALIFICATION));
+ return 1;
+}
+
+/*
+ * Return the cr0 value that a nested guest would read. This is a combination
+ * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
+ * its hypervisor (cr0_read_shadow).
+ */
+static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
+{
+ return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
+ (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
+}
+static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
+{
+ return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
+ (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
+}
+
+static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu)
+{
+ return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low);
+}
+
+/*
+ * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE
+ * to modify any valid field of the VMCS, or are the VM-exit
+ * information fields read-only?
+ */
+static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.misc_low &
+ MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
+}
+
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+ CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
+static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
+ SECONDARY_EXEC_SHADOW_VMCS;
+}
+
+static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
+{
+ return vmcs12->cpu_based_vm_exec_control & bit;
+}
+
+static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
+{
+ return (vmcs12->cpu_based_vm_exec_control &
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
+ (vmcs12->secondary_vm_exec_control & bit);
+}
+
+static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control &
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+}
+
+static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING;
+}
+
+static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
+}
+
+static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
+}
+
+static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
+}
+
+static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML);
+}
+
+static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
+}
+
+static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
+}
+
+static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
+}
+
+static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+}
+
+static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
+{
+ return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
+}
+
+static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC);
+}
+
+static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has_vmfunc(vmcs12) &&
+ (vmcs12->vm_function_control &
+ VMX_VMFUNC_EPTP_SWITCHING);
+}
+
+static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS);
+}
+
+static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12)
+{
+ return vmcs12->vm_exit_controls &
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
+}
+
+/*
+ * In nested virtualization, check if L1 asked to exit on external interrupts.
+ * For most existing hypervisors, this will always return true.
+ */
+static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
+{
+ return get_vmcs12(vcpu)->pin_based_vm_exec_control &
+ PIN_BASED_EXT_INTR_MASK;
+}
+
+/*
+ * if fixed0[i] == 1: val[i] must be 1
+ * if fixed1[i] == 0: val[i] must be 0
+ */
+static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
+{
+ return ((val & fixed1) | fixed0) == val;
+}
+
+static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high &
+ SECONDARY_EXEC_UNRESTRICTED_GUEST &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
+ fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1;
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0;
+ u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1;
+
+ return fixed_bits_valid(val, fixed0, fixed1);
+}
+
+/* No difference in the restrictions on guest and host CR4 in VMX operation. */
+#define nested_guest_cr4_valid nested_cr4_valid
+#define nested_host_cr4_valid nested_cr4_valid
+
+#endif /* __KVM_X86_VMX_NESTED_H */
diff --git a/arch/x86/kvm/vmx/ops.h b/arch/x86/kvm/vmx/ops.h
new file mode 100644
index 000000000000..b8e50f76fefc
--- /dev/null
+++ b/arch/x86/kvm/vmx/ops.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_INSN_H
+#define __KVM_X86_VMX_INSN_H
+
+#include <linux/nospec.h>
+
+#include <asm/kvm_host.h>
+#include <asm/vmx.h>
+
+#include "evmcs.h"
+#include "vmcs.h"
+
+#define __ex(x) __kvm_handle_fault_on_reboot(x)
+#define __ex_clear(x, reg) \
+ ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
+
+static __always_inline void vmcs_check16(unsigned long field)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
+ "16-bit accessor invalid for 64-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
+ "16-bit accessor invalid for 64-bit high field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
+ "16-bit accessor invalid for 32-bit high field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
+ "16-bit accessor invalid for natural width field");
+}
+
+static __always_inline void vmcs_check32(unsigned long field)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
+ "32-bit accessor invalid for 16-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
+ "32-bit accessor invalid for natural width field");
+}
+
+static __always_inline void vmcs_check64(unsigned long field)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
+ "64-bit accessor invalid for 16-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
+ "64-bit accessor invalid for 64-bit high field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
+ "64-bit accessor invalid for 32-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
+ "64-bit accessor invalid for natural width field");
+}
+
+static __always_inline void vmcs_checkl(unsigned long field)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
+ "Natural width accessor invalid for 16-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
+ "Natural width accessor invalid for 64-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
+ "Natural width accessor invalid for 64-bit high field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
+ "Natural width accessor invalid for 32-bit field");
+}
+
+static __always_inline unsigned long __vmcs_readl(unsigned long field)
+{
+ unsigned long value;
+
+ asm volatile (__ex_clear("vmread %1, %0", "%k0")
+ : "=r"(value) : "r"(field));
+ return value;
+}
+
+static __always_inline u16 vmcs_read16(unsigned long field)
+{
+ vmcs_check16(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_read16(field);
+ return __vmcs_readl(field);
+}
+
+static __always_inline u32 vmcs_read32(unsigned long field)
+{
+ vmcs_check32(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_read32(field);
+ return __vmcs_readl(field);
+}
+
+static __always_inline u64 vmcs_read64(unsigned long field)
+{
+ vmcs_check64(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_read64(field);
+#ifdef CONFIG_X86_64
+ return __vmcs_readl(field);
+#else
+ return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
+#endif
+}
+
+static __always_inline unsigned long vmcs_readl(unsigned long field)
+{
+ vmcs_checkl(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_read64(field);
+ return __vmcs_readl(field);
+}
+
+static noinline void vmwrite_error(unsigned long field, unsigned long value)
+{
+ printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
+ field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
+ dump_stack();
+}
+
+static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
+{
+ bool error;
+
+ asm volatile (__ex("vmwrite %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(field), "rm"(value));
+ if (unlikely(error))
+ vmwrite_error(field, value);
+}
+
+static __always_inline void vmcs_write16(unsigned long field, u16 value)
+{
+ vmcs_check16(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_write16(field, value);
+
+ __vmcs_writel(field, value);
+}
+
+static __always_inline void vmcs_write32(unsigned long field, u32 value)
+{
+ vmcs_check32(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_write32(field, value);
+
+ __vmcs_writel(field, value);
+}
+
+static __always_inline void vmcs_write64(unsigned long field, u64 value)
+{
+ vmcs_check64(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_write64(field, value);
+
+ __vmcs_writel(field, value);
+#ifndef CONFIG_X86_64
+ asm volatile ("");
+ __vmcs_writel(field+1, value >> 32);
+#endif
+}
+
+static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
+{
+ vmcs_checkl(field);
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_write64(field, value);
+
+ __vmcs_writel(field, value);
+}
+
+static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
+ "vmcs_clear_bits does not support 64-bit fields");
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_write32(field, evmcs_read32(field) & ~mask);
+
+ __vmcs_writel(field, __vmcs_readl(field) & ~mask);
+}
+
+static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
+{
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
+ "vmcs_set_bits does not support 64-bit fields");
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_write32(field, evmcs_read32(field) | mask);
+
+ __vmcs_writel(field, __vmcs_readl(field) | mask);
+}
+
+static inline void vmcs_clear(struct vmcs *vmcs)
+{
+ u64 phys_addr = __pa(vmcs);
+ bool error;
+
+ asm volatile (__ex("vmclear %1") CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr));
+ if (unlikely(error))
+ printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
+ vmcs, phys_addr);
+}
+
+static inline void vmcs_load(struct vmcs *vmcs)
+{
+ u64 phys_addr = __pa(vmcs);
+ bool error;
+
+ if (static_branch_unlikely(&enable_evmcs))
+ return evmcs_load(phys_addr);
+
+ asm volatile (__ex("vmptrld %1") CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr));
+ if (unlikely(error))
+ printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
+ vmcs, phys_addr);
+}
+
+static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
+{
+ struct {
+ u64 vpid : 16;
+ u64 rsvd : 48;
+ u64 gva;
+ } operand = { vpid, 0, gva };
+ bool error;
+
+ asm volatile (__ex("invvpid %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(ext), "m"(operand));
+ BUG_ON(error);
+}
+
+static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
+{
+ struct {
+ u64 eptp, gpa;
+ } operand = {eptp, gpa};
+ bool error;
+
+ asm volatile (__ex("invept %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(ext), "m"(operand));
+ BUG_ON(error);
+}
+
+static inline bool vpid_sync_vcpu_addr(int vpid, gva_t addr)
+{
+ if (vpid == 0)
+ return true;
+
+ if (cpu_has_vmx_invvpid_individual_addr()) {
+ __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
+ return true;
+ }
+
+ return false;
+}
+
+static inline void vpid_sync_vcpu_single(int vpid)
+{
+ if (vpid == 0)
+ return;
+
+ if (cpu_has_vmx_invvpid_single())
+ __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
+}
+
+static inline void vpid_sync_vcpu_global(void)
+{
+ if (cpu_has_vmx_invvpid_global())
+ __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
+}
+
+static inline void vpid_sync_context(int vpid)
+{
+ if (cpu_has_vmx_invvpid_single())
+ vpid_sync_vcpu_single(vpid);
+ else
+ vpid_sync_vcpu_global();
+}
+
+static inline void ept_sync_global(void)
+{
+ __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
+}
+
+static inline void ept_sync_context(u64 eptp)
+{
+ if (cpu_has_vmx_invept_context())
+ __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
+ else
+ ept_sync_global();
+}
+
+#endif /* __KVM_X86_VMX_INSN_H */
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 5ab4a364348e..5ab4a364348e 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
new file mode 100644
index 000000000000..6def3ba88e3b
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_VMCS_H
+#define __KVM_X86_VMX_VMCS_H
+
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/nospec.h>
+
+#include <asm/kvm.h>
+#include <asm/vmx.h>
+
+#include "capabilities.h"
+
+struct vmcs_hdr {
+ u32 revision_id:31;
+ u32 shadow_vmcs:1;
+};
+
+struct vmcs {
+ struct vmcs_hdr hdr;
+ u32 abort;
+ char data[0];
+};
+
+DECLARE_PER_CPU(struct vmcs *, current_vmcs);
+
+/*
+ * vmcs_host_state tracks registers that are loaded from the VMCS on VMEXIT
+ * and whose values change infrequently, but are not constant. I.e. this is
+ * used as a write-through cache of the corresponding VMCS fields.
+ */
+struct vmcs_host_state {
+ unsigned long cr3; /* May not match real cr3 */
+ unsigned long cr4; /* May not match real cr4 */
+ unsigned long gs_base;
+ unsigned long fs_base;
+
+ u16 fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+ u16 ds_sel, es_sel;
+#endif
+};
+
+/*
+ * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
+ * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
+ * loaded on this CPU (so we can clear them if the CPU goes down).
+ */
+struct loaded_vmcs {
+ struct vmcs *vmcs;
+ struct vmcs *shadow_vmcs;
+ int cpu;
+ bool launched;
+ bool nmi_known_unmasked;
+ bool hv_timer_armed;
+ /* Support for vnmi-less CPUs */
+ int soft_vnmi_blocked;
+ ktime_t entry_time;
+ s64 vnmi_blocked_time;
+ unsigned long *msr_bitmap;
+ struct list_head loaded_vmcss_on_cpu_link;
+ struct vmcs_host_state host_state;
+};
+
+static inline bool is_exception_n(u32 intr_info, u8 vector)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+ INTR_INFO_VALID_MASK)) ==
+ (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+}
+
+static inline bool is_debug(u32 intr_info)
+{
+ return is_exception_n(intr_info, DB_VECTOR);
+}
+
+static inline bool is_breakpoint(u32 intr_info)
+{
+ return is_exception_n(intr_info, BP_VECTOR);
+}
+
+static inline bool is_page_fault(u32 intr_info)
+{
+ return is_exception_n(intr_info, PF_VECTOR);
+}
+
+static inline bool is_invalid_opcode(u32 intr_info)
+{
+ return is_exception_n(intr_info, UD_VECTOR);
+}
+
+static inline bool is_gp_fault(u32 intr_info)
+{
+ return is_exception_n(intr_info, GP_VECTOR);
+}
+
+static inline bool is_machine_check(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
+ INTR_INFO_VALID_MASK)) ==
+ (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+}
+
+/* Undocumented: icebp/int1 */
+static inline bool is_icebp(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+}
+
+static inline bool is_nmi(u32 intr_info)
+{
+ return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+}
+
+enum vmcs_field_width {
+ VMCS_FIELD_WIDTH_U16 = 0,
+ VMCS_FIELD_WIDTH_U64 = 1,
+ VMCS_FIELD_WIDTH_U32 = 2,
+ VMCS_FIELD_WIDTH_NATURAL_WIDTH = 3
+};
+
+static inline int vmcs_field_width(unsigned long field)
+{
+ if (0x1 & field) /* the *_HIGH fields are all 32 bit */
+ return VMCS_FIELD_WIDTH_U32;
+ return (field >> 13) & 0x3;
+}
+
+static inline int vmcs_field_readonly(unsigned long field)
+{
+ return (((field >> 10) & 0x3) == 1);
+}
+
+#endif /* __KVM_X86_VMX_VMCS_H */
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
new file mode 100644
index 000000000000..53dfb401316d
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmcs12.h"
+
+#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
+#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
+#define FIELD(number, name) [ROL16(number, 6)] = VMCS12_OFFSET(name)
+#define FIELD64(number, name) \
+ FIELD(number, name), \
+ [ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
+
+const unsigned short vmcs_field_to_offset_table[] = {
+ FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
+ FIELD(POSTED_INTR_NV, posted_intr_nv),
+ FIELD(GUEST_ES_SELECTOR, guest_es_selector),
+ FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
+ FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
+ FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
+ FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
+ FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
+ FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
+ FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
+ FIELD(GUEST_INTR_STATUS, guest_intr_status),
+ FIELD(GUEST_PML_INDEX, guest_pml_index),
+ FIELD(HOST_ES_SELECTOR, host_es_selector),
+ FIELD(HOST_CS_SELECTOR, host_cs_selector),
+ FIELD(HOST_SS_SELECTOR, host_ss_selector),
+ FIELD(HOST_DS_SELECTOR, host_ds_selector),
+ FIELD(HOST_FS_SELECTOR, host_fs_selector),
+ FIELD(HOST_GS_SELECTOR, host_gs_selector),
+ FIELD(HOST_TR_SELECTOR, host_tr_selector),
+ FIELD64(IO_BITMAP_A, io_bitmap_a),
+ FIELD64(IO_BITMAP_B, io_bitmap_b),
+ FIELD64(MSR_BITMAP, msr_bitmap),
+ FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
+ FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
+ FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
+ FIELD64(PML_ADDRESS, pml_address),
+ FIELD64(TSC_OFFSET, tsc_offset),
+ FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
+ FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
+ FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
+ FIELD64(VM_FUNCTION_CONTROL, vm_function_control),
+ FIELD64(EPT_POINTER, ept_pointer),
+ FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
+ FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
+ FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
+ FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
+ FIELD64(EPTP_LIST_ADDRESS, eptp_list_address),
+ FIELD64(VMREAD_BITMAP, vmread_bitmap),
+ FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
+ FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
+ FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
+ FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
+ FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
+ FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
+ FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
+ FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
+ FIELD64(GUEST_PDPTR0, guest_pdptr0),
+ FIELD64(GUEST_PDPTR1, guest_pdptr1),
+ FIELD64(GUEST_PDPTR2, guest_pdptr2),
+ FIELD64(GUEST_PDPTR3, guest_pdptr3),
+ FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
+ FIELD64(HOST_IA32_PAT, host_ia32_pat),
+ FIELD64(HOST_IA32_EFER, host_ia32_efer),
+ FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
+ FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
+ FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
+ FIELD(EXCEPTION_BITMAP, exception_bitmap),
+ FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
+ FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
+ FIELD(CR3_TARGET_COUNT, cr3_target_count),
+ FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
+ FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
+ FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
+ FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
+ FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
+ FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
+ FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
+ FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
+ FIELD(TPR_THRESHOLD, tpr_threshold),
+ FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
+ FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
+ FIELD(VM_EXIT_REASON, vm_exit_reason),
+ FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
+ FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
+ FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
+ FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
+ FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
+ FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
+ FIELD(GUEST_ES_LIMIT, guest_es_limit),
+ FIELD(GUEST_CS_LIMIT, guest_cs_limit),
+ FIELD(GUEST_SS_LIMIT, guest_ss_limit),
+ FIELD(GUEST_DS_LIMIT, guest_ds_limit),
+ FIELD(GUEST_FS_LIMIT, guest_fs_limit),
+ FIELD(GUEST_GS_LIMIT, guest_gs_limit),
+ FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
+ FIELD(GUEST_TR_LIMIT, guest_tr_limit),
+ FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
+ FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
+ FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
+ FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
+ FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
+ FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
+ FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
+ FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
+ FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
+ FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
+ FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
+ FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
+ FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
+ FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
+ FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
+ FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
+ FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
+ FIELD(CR0_READ_SHADOW, cr0_read_shadow),
+ FIELD(CR4_READ_SHADOW, cr4_read_shadow),
+ FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
+ FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
+ FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
+ FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
+ FIELD(EXIT_QUALIFICATION, exit_qualification),
+ FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
+ FIELD(GUEST_CR0, guest_cr0),
+ FIELD(GUEST_CR3, guest_cr3),
+ FIELD(GUEST_CR4, guest_cr4),
+ FIELD(GUEST_ES_BASE, guest_es_base),
+ FIELD(GUEST_CS_BASE, guest_cs_base),
+ FIELD(GUEST_SS_BASE, guest_ss_base),
+ FIELD(GUEST_DS_BASE, guest_ds_base),
+ FIELD(GUEST_FS_BASE, guest_fs_base),
+ FIELD(GUEST_GS_BASE, guest_gs_base),
+ FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
+ FIELD(GUEST_TR_BASE, guest_tr_base),
+ FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
+ FIELD(GUEST_IDTR_BASE, guest_idtr_base),
+ FIELD(GUEST_DR7, guest_dr7),
+ FIELD(GUEST_RSP, guest_rsp),
+ FIELD(GUEST_RIP, guest_rip),
+ FIELD(GUEST_RFLAGS, guest_rflags),
+ FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
+ FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
+ FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
+ FIELD(HOST_CR0, host_cr0),
+ FIELD(HOST_CR3, host_cr3),
+ FIELD(HOST_CR4, host_cr4),
+ FIELD(HOST_FS_BASE, host_fs_base),
+ FIELD(HOST_GS_BASE, host_gs_base),
+ FIELD(HOST_TR_BASE, host_tr_base),
+ FIELD(HOST_GDTR_BASE, host_gdtr_base),
+ FIELD(HOST_IDTR_BASE, host_idtr_base),
+ FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
+ FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
+ FIELD(HOST_RSP, host_rsp),
+ FIELD(HOST_RIP, host_rip),
+};
+const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs_field_to_offset_table);
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
new file mode 100644
index 000000000000..3a742428ad17
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_VMCS12_H
+#define __KVM_X86_VMX_VMCS12_H
+
+#include <linux/build_bug.h>
+
+#include "vmcs.h"
+
+/*
+ * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
+ * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
+ * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
+ * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
+ * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
+ * More than one of these structures may exist, if L1 runs multiple L2 guests.
+ * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
+ * underlying hardware which will be used to run L2.
+ * This structure is packed to ensure that its layout is identical across
+ * machines (necessary for live migration).
+ *
+ * IMPORTANT: Changing the layout of existing fields in this structure
+ * will break save/restore compatibility with older kvm releases. When
+ * adding new fields, either use space in the reserved padding* arrays
+ * or add the new fields to the end of the structure.
+ */
+typedef u64 natural_width;
+struct __packed vmcs12 {
+ /* According to the Intel spec, a VMCS region must start with the
+ * following two fields. Then follow implementation-specific data.
+ */
+ struct vmcs_hdr hdr;
+ u32 abort;
+
+ u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
+ u32 padding[7]; /* room for future expansion */
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 apic_access_addr;
+ u64 posted_intr_desc_addr;
+ u64 ept_pointer;
+ u64 eoi_exit_bitmap0;
+ u64 eoi_exit_bitmap1;
+ u64 eoi_exit_bitmap2;
+ u64 eoi_exit_bitmap3;
+ u64 xss_exit_bitmap;
+ u64 guest_physical_address;
+ u64 vmcs_link_pointer;
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+ u64 guest_bndcfgs;
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+ u64 host_ia32_perf_global_ctrl;
+ u64 vmread_bitmap;
+ u64 vmwrite_bitmap;
+ u64 vm_function_control;
+ u64 eptp_list_address;
+ u64 pml_address;
+ u64 padding64[3]; /* room for future expansion */
+ /*
+ * To allow migration of L1 (complete with its L2 guests) between
+ * machines of different natural widths (32 or 64 bit), we cannot have
+ * unsigned long fields with no explicit size. We use u64 (aliased
+ * natural_width) instead. Luckily, x86 is little-endian.
+ */
+ natural_width cr0_guest_host_mask;
+ natural_width cr4_guest_host_mask;
+ natural_width cr0_read_shadow;
+ natural_width cr4_read_shadow;
+ natural_width cr3_target_value0;
+ natural_width cr3_target_value1;
+ natural_width cr3_target_value2;
+ natural_width cr3_target_value3;
+ natural_width exit_qualification;
+ natural_width guest_linear_address;
+ natural_width guest_cr0;
+ natural_width guest_cr3;
+ natural_width guest_cr4;
+ natural_width guest_es_base;
+ natural_width guest_cs_base;
+ natural_width guest_ss_base;
+ natural_width guest_ds_base;
+ natural_width guest_fs_base;
+ natural_width guest_gs_base;
+ natural_width guest_ldtr_base;
+ natural_width guest_tr_base;
+ natural_width guest_gdtr_base;
+ natural_width guest_idtr_base;
+ natural_width guest_dr7;
+ natural_width guest_rsp;
+ natural_width guest_rip;
+ natural_width guest_rflags;
+ natural_width guest_pending_dbg_exceptions;
+ natural_width guest_sysenter_esp;
+ natural_width guest_sysenter_eip;
+ natural_width host_cr0;
+ natural_width host_cr3;
+ natural_width host_cr4;
+ natural_width host_fs_base;
+ natural_width host_gs_base;
+ natural_width host_tr_base;
+ natural_width host_gdtr_base;
+ natural_width host_idtr_base;
+ natural_width host_ia32_sysenter_esp;
+ natural_width host_ia32_sysenter_eip;
+ natural_width host_rsp;
+ natural_width host_rip;
+ natural_width paddingl[8]; /* room for future expansion */
+ u32 pin_based_vm_exec_control;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+ u32 cr3_target_count;
+ u32 vm_exit_controls;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_controls;
+ u32 vm_entry_msr_load_count;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+ u32 secondary_vm_exec_control;
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+ u32 guest_interruptibility_info;
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+ u32 host_ia32_sysenter_cs;
+ u32 vmx_preemption_timer_value;
+ u32 padding32[7]; /* room for future expansion */
+ u16 virtual_processor_id;
+ u16 posted_intr_nv;
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+ u16 guest_intr_status;
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+ u16 guest_pml_index;
+};
+
+/*
+ * VMCS12_REVISION is an arbitrary id that should be changed if the content or
+ * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
+ * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
+ *
+ * IMPORTANT: Changing this value will break save/restore compatibility with
+ * older kvm releases.
+ */
+#define VMCS12_REVISION 0x11e57ed0
+
+/*
+ * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
+ * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
+ * current implementation, 4K are reserved to avoid future complications.
+ */
+#define VMCS12_SIZE 0x1000
+
+/*
+ * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
+ * supported VMCS12 field encoding.
+ */
+#define VMCS12_MAX_FIELD_INDEX 0x17
+
+/*
+ * For save/restore compatibility, the vmcs12 field offsets must not change.
+ */
+#define CHECK_OFFSET(field, loc) \
+ BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
+ "Offset of " #field " in struct vmcs12 has changed.")
+
+static inline void vmx_check_vmcs12_offsets(void)
+{
+ CHECK_OFFSET(hdr, 0);
+ CHECK_OFFSET(abort, 4);
+ CHECK_OFFSET(launch_state, 8);
+ CHECK_OFFSET(io_bitmap_a, 40);
+ CHECK_OFFSET(io_bitmap_b, 48);
+ CHECK_OFFSET(msr_bitmap, 56);
+ CHECK_OFFSET(vm_exit_msr_store_addr, 64);
+ CHECK_OFFSET(vm_exit_msr_load_addr, 72);
+ CHECK_OFFSET(vm_entry_msr_load_addr, 80);
+ CHECK_OFFSET(tsc_offset, 88);
+ CHECK_OFFSET(virtual_apic_page_addr, 96);
+ CHECK_OFFSET(apic_access_addr, 104);
+ CHECK_OFFSET(posted_intr_desc_addr, 112);
+ CHECK_OFFSET(ept_pointer, 120);
+ CHECK_OFFSET(eoi_exit_bitmap0, 128);
+ CHECK_OFFSET(eoi_exit_bitmap1, 136);
+ CHECK_OFFSET(eoi_exit_bitmap2, 144);
+ CHECK_OFFSET(eoi_exit_bitmap3, 152);
+ CHECK_OFFSET(xss_exit_bitmap, 160);
+ CHECK_OFFSET(guest_physical_address, 168);
+ CHECK_OFFSET(vmcs_link_pointer, 176);
+ CHECK_OFFSET(guest_ia32_debugctl, 184);
+ CHECK_OFFSET(guest_ia32_pat, 192);
+ CHECK_OFFSET(guest_ia32_efer, 200);
+ CHECK_OFFSET(guest_ia32_perf_global_ctrl, 208);
+ CHECK_OFFSET(guest_pdptr0, 216);
+ CHECK_OFFSET(guest_pdptr1, 224);
+ CHECK_OFFSET(guest_pdptr2, 232);
+ CHECK_OFFSET(guest_pdptr3, 240);
+ CHECK_OFFSET(guest_bndcfgs, 248);
+ CHECK_OFFSET(host_ia32_pat, 256);
+ CHECK_OFFSET(host_ia32_efer, 264);
+ CHECK_OFFSET(host_ia32_perf_global_ctrl, 272);
+ CHECK_OFFSET(vmread_bitmap, 280);
+ CHECK_OFFSET(vmwrite_bitmap, 288);
+ CHECK_OFFSET(vm_function_control, 296);
+ CHECK_OFFSET(eptp_list_address, 304);
+ CHECK_OFFSET(pml_address, 312);
+ CHECK_OFFSET(cr0_guest_host_mask, 344);
+ CHECK_OFFSET(cr4_guest_host_mask, 352);
+ CHECK_OFFSET(cr0_read_shadow, 360);
+ CHECK_OFFSET(cr4_read_shadow, 368);
+ CHECK_OFFSET(cr3_target_value0, 376);
+ CHECK_OFFSET(cr3_target_value1, 384);
+ CHECK_OFFSET(cr3_target_value2, 392);
+ CHECK_OFFSET(cr3_target_value3, 400);
+ CHECK_OFFSET(exit_qualification, 408);
+ CHECK_OFFSET(guest_linear_address, 416);
+ CHECK_OFFSET(guest_cr0, 424);
+ CHECK_OFFSET(guest_cr3, 432);
+ CHECK_OFFSET(guest_cr4, 440);
+ CHECK_OFFSET(guest_es_base, 448);
+ CHECK_OFFSET(guest_cs_base, 456);
+ CHECK_OFFSET(guest_ss_base, 464);
+ CHECK_OFFSET(guest_ds_base, 472);
+ CHECK_OFFSET(guest_fs_base, 480);
+ CHECK_OFFSET(guest_gs_base, 488);
+ CHECK_OFFSET(guest_ldtr_base, 496);
+ CHECK_OFFSET(guest_tr_base, 504);
+ CHECK_OFFSET(guest_gdtr_base, 512);
+ CHECK_OFFSET(guest_idtr_base, 520);
+ CHECK_OFFSET(guest_dr7, 528);
+ CHECK_OFFSET(guest_rsp, 536);
+ CHECK_OFFSET(guest_rip, 544);
+ CHECK_OFFSET(guest_rflags, 552);
+ CHECK_OFFSET(guest_pending_dbg_exceptions, 560);
+ CHECK_OFFSET(guest_sysenter_esp, 568);
+ CHECK_OFFSET(guest_sysenter_eip, 576);
+ CHECK_OFFSET(host_cr0, 584);
+ CHECK_OFFSET(host_cr3, 592);
+ CHECK_OFFSET(host_cr4, 600);
+ CHECK_OFFSET(host_fs_base, 608);
+ CHECK_OFFSET(host_gs_base, 616);
+ CHECK_OFFSET(host_tr_base, 624);
+ CHECK_OFFSET(host_gdtr_base, 632);
+ CHECK_OFFSET(host_idtr_base, 640);
+ CHECK_OFFSET(host_ia32_sysenter_esp, 648);
+ CHECK_OFFSET(host_ia32_sysenter_eip, 656);
+ CHECK_OFFSET(host_rsp, 664);
+ CHECK_OFFSET(host_rip, 672);
+ CHECK_OFFSET(pin_based_vm_exec_control, 744);
+ CHECK_OFFSET(cpu_based_vm_exec_control, 748);
+ CHECK_OFFSET(exception_bitmap, 752);
+ CHECK_OFFSET(page_fault_error_code_mask, 756);
+ CHECK_OFFSET(page_fault_error_code_match, 760);
+ CHECK_OFFSET(cr3_target_count, 764);
+ CHECK_OFFSET(vm_exit_controls, 768);
+ CHECK_OFFSET(vm_exit_msr_store_count, 772);
+ CHECK_OFFSET(vm_exit_msr_load_count, 776);
+ CHECK_OFFSET(vm_entry_controls, 780);
+ CHECK_OFFSET(vm_entry_msr_load_count, 784);
+ CHECK_OFFSET(vm_entry_intr_info_field, 788);
+ CHECK_OFFSET(vm_entry_exception_error_code, 792);
+ CHECK_OFFSET(vm_entry_instruction_len, 796);
+ CHECK_OFFSET(tpr_threshold, 800);
+ CHECK_OFFSET(secondary_vm_exec_control, 804);
+ CHECK_OFFSET(vm_instruction_error, 808);
+ CHECK_OFFSET(vm_exit_reason, 812);
+ CHECK_OFFSET(vm_exit_intr_info, 816);
+ CHECK_OFFSET(vm_exit_intr_error_code, 820);
+ CHECK_OFFSET(idt_vectoring_info_field, 824);
+ CHECK_OFFSET(idt_vectoring_error_code, 828);
+ CHECK_OFFSET(vm_exit_instruction_len, 832);
+ CHECK_OFFSET(vmx_instruction_info, 836);
+ CHECK_OFFSET(guest_es_limit, 840);
+ CHECK_OFFSET(guest_cs_limit, 844);
+ CHECK_OFFSET(guest_ss_limit, 848);
+ CHECK_OFFSET(guest_ds_limit, 852);
+ CHECK_OFFSET(guest_fs_limit, 856);
+ CHECK_OFFSET(guest_gs_limit, 860);
+ CHECK_OFFSET(guest_ldtr_limit, 864);
+ CHECK_OFFSET(guest_tr_limit, 868);
+ CHECK_OFFSET(guest_gdtr_limit, 872);
+ CHECK_OFFSET(guest_idtr_limit, 876);
+ CHECK_OFFSET(guest_es_ar_bytes, 880);
+ CHECK_OFFSET(guest_cs_ar_bytes, 884);
+ CHECK_OFFSET(guest_ss_ar_bytes, 888);
+ CHECK_OFFSET(guest_ds_ar_bytes, 892);
+ CHECK_OFFSET(guest_fs_ar_bytes, 896);
+ CHECK_OFFSET(guest_gs_ar_bytes, 900);
+ CHECK_OFFSET(guest_ldtr_ar_bytes, 904);
+ CHECK_OFFSET(guest_tr_ar_bytes, 908);
+ CHECK_OFFSET(guest_interruptibility_info, 912);
+ CHECK_OFFSET(guest_activity_state, 916);
+ CHECK_OFFSET(guest_sysenter_cs, 920);
+ CHECK_OFFSET(host_ia32_sysenter_cs, 924);
+ CHECK_OFFSET(vmx_preemption_timer_value, 928);
+ CHECK_OFFSET(virtual_processor_id, 960);
+ CHECK_OFFSET(posted_intr_nv, 962);
+ CHECK_OFFSET(guest_es_selector, 964);
+ CHECK_OFFSET(guest_cs_selector, 966);
+ CHECK_OFFSET(guest_ss_selector, 968);
+ CHECK_OFFSET(guest_ds_selector, 970);
+ CHECK_OFFSET(guest_fs_selector, 972);
+ CHECK_OFFSET(guest_gs_selector, 974);
+ CHECK_OFFSET(guest_ldtr_selector, 976);
+ CHECK_OFFSET(guest_tr_selector, 978);
+ CHECK_OFFSET(guest_intr_status, 980);
+ CHECK_OFFSET(host_es_selector, 982);
+ CHECK_OFFSET(host_cs_selector, 984);
+ CHECK_OFFSET(host_ss_selector, 986);
+ CHECK_OFFSET(host_ds_selector, 988);
+ CHECK_OFFSET(host_fs_selector, 990);
+ CHECK_OFFSET(host_gs_selector, 992);
+ CHECK_OFFSET(host_tr_selector, 994);
+ CHECK_OFFSET(guest_pml_index, 996);
+}
+
+extern const unsigned short vmcs_field_to_offset_table[];
+extern const unsigned int nr_vmcs12_fields;
+
+#define ROL16(val, n) ((u16)(((u16)(val) << (n)) | ((u16)(val) >> (16 - (n)))))
+
+static inline short vmcs_field_to_offset(unsigned long field)
+{
+ unsigned short offset;
+ unsigned int index;
+
+ if (field >> 15)
+ return -ENOENT;
+
+ index = ROL16(field, 6);
+ if (index >= nr_vmcs12_fields)
+ return -ENOENT;
+
+ index = array_index_nospec(index, nr_vmcs12_fields);
+ offset = vmcs_field_to_offset_table[index];
+ if (offset == 0)
+ return -ENOENT;
+ return offset;
+}
+
+#undef ROL16
+
+/*
+ * Read a vmcs12 field. Since these can have varying lengths and we return
+ * one type, we chose the biggest type (u64) and zero-extend the return value
+ * to that size. Note that the caller, handle_vmread, might need to use only
+ * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
+ * 64-bit fields are to be returned).
+ */
+static inline int vmcs12_read_any(struct vmcs12 *vmcs12,
+ unsigned long field, u64 *ret)
+{
+ short offset = vmcs_field_to_offset(field);
+ char *p;
+
+ if (offset < 0)
+ return offset;
+
+ p = (char *)vmcs12 + offset;
+
+ switch (vmcs_field_width(field)) {
+ case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
+ *ret = *((natural_width *)p);
+ return 0;
+ case VMCS_FIELD_WIDTH_U16:
+ *ret = *((u16 *)p);
+ return 0;
+ case VMCS_FIELD_WIDTH_U32:
+ *ret = *((u32 *)p);
+ return 0;
+ case VMCS_FIELD_WIDTH_U64:
+ *ret = *((u64 *)p);
+ return 0;
+ default:
+ WARN_ON(1);
+ return -ENOENT;
+ }
+}
+
+static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
+ unsigned long field, u64 field_value){
+ short offset = vmcs_field_to_offset(field);
+ char *p = (char *)vmcs12 + offset;
+
+ if (offset < 0)
+ return offset;
+
+ switch (vmcs_field_width(field)) {
+ case VMCS_FIELD_WIDTH_U16:
+ *(u16 *)p = field_value;
+ return 0;
+ case VMCS_FIELD_WIDTH_U32:
+ *(u32 *)p = field_value;
+ return 0;
+ case VMCS_FIELD_WIDTH_U64:
+ *(u64 *)p = field_value;
+ return 0;
+ case VMCS_FIELD_WIDTH_NATURAL_WIDTH:
+ *(natural_width *)p = field_value;
+ return 0;
+ default:
+ WARN_ON(1);
+ return -ENOENT;
+ }
+
+}
+
+#endif /* __KVM_X86_VMX_VMCS12_H */
diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
index 132432f375c2..132432f375c2 100644
--- a/arch/x86/kvm/vmx_shadow_fields.h
+++ b/arch/x86/kvm/vmx/vmcs_shadow_fields.h
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
new file mode 100644
index 000000000000..bcef2c7e9bc4
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+ .text
+
+/**
+ * vmx_vmenter - VM-Enter the current loaded VMCS
+ *
+ * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME
+ *
+ * Returns:
+ * %RFLAGS.CF is set on VM-Fail Invalid
+ * %RFLAGS.ZF is set on VM-Fail Valid
+ * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
+ *
+ * Note that VMRESUME/VMLAUNCH fall-through and return directly if
+ * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump
+ * to vmx_vmexit.
+ */
+ENTRY(vmx_vmenter)
+ /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */
+ je 2f
+
+1: vmresume
+ ret
+
+2: vmlaunch
+ ret
+
+3: cmpb $0, kvm_rebooting
+ jne 4f
+ call kvm_spurious_fault
+4: ret
+
+ .pushsection .fixup, "ax"
+5: jmp 3b
+ .popsection
+
+ _ASM_EXTABLE(1b, 5b)
+ _ASM_EXTABLE(2b, 5b)
+
+ENDPROC(vmx_vmenter)
+
+/**
+ * vmx_vmexit - Handle a VMX VM-Exit
+ *
+ * Returns:
+ * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit
+ *
+ * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump
+ * here after hardware loads the host's state, i.e. this is the destination
+ * referred to by VMCS.HOST_RIP.
+ */
+ENTRY(vmx_vmexit)
+ ret
+ENDPROC(vmx_vmexit)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
new file mode 100644
index 000000000000..4d39f731bc33
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -0,0 +1,7935 @@
+/*
+ * Kernel-based Virtual Machine driver for Linux
+ *
+ * This module enables machines with Intel VT-x extensions to run virtual
+ * machines without emulation or binary translation.
+ *
+ * Copyright (C) 2006 Qumranet, Inc.
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ * Avi Kivity <avi@qumranet.com>
+ * Yaniv Kamay <yaniv@qumranet.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/frame.h>
+#include <linux/highmem.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/tboot.h>
+#include <linux/trace_events.h>
+
+#include <asm/apic.h>
+#include <asm/asm.h>
+#include <asm/cpu.h>
+#include <asm/debugreg.h>
+#include <asm/desc.h>
+#include <asm/fpu/internal.h>
+#include <asm/io.h>
+#include <asm/irq_remapping.h>
+#include <asm/kexec.h>
+#include <asm/perf_event.h>
+#include <asm/mce.h>
+#include <asm/mmu_context.h>
+#include <asm/mshyperv.h>
+#include <asm/spec-ctrl.h>
+#include <asm/virtext.h>
+#include <asm/vmx.h>
+
+#include "capabilities.h"
+#include "cpuid.h"
+#include "evmcs.h"
+#include "irq.h"
+#include "kvm_cache_regs.h"
+#include "lapic.h"
+#include "mmu.h"
+#include "nested.h"
+#include "ops.h"
+#include "pmu.h"
+#include "trace.h"
+#include "vmcs.h"
+#include "vmcs12.h"
+#include "vmx.h"
+#include "x86.h"
+
+MODULE_AUTHOR("Qumranet");
+MODULE_LICENSE("GPL");
+
+static const struct x86_cpu_id vmx_cpu_id[] = {
+ X86_FEATURE_MATCH(X86_FEATURE_VMX),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
+bool __read_mostly enable_vpid = 1;
+module_param_named(vpid, enable_vpid, bool, 0444);
+
+static bool __read_mostly enable_vnmi = 1;
+module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
+
+bool __read_mostly flexpriority_enabled = 1;
+module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
+
+bool __read_mostly enable_ept = 1;
+module_param_named(ept, enable_ept, bool, S_IRUGO);
+
+bool __read_mostly enable_unrestricted_guest = 1;
+module_param_named(unrestricted_guest,
+ enable_unrestricted_guest, bool, S_IRUGO);
+
+bool __read_mostly enable_ept_ad_bits = 1;
+module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
+
+static bool __read_mostly emulate_invalid_guest_state = true;
+module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+
+static bool __read_mostly fasteoi = 1;
+module_param(fasteoi, bool, S_IRUGO);
+
+static bool __read_mostly enable_apicv = 1;
+module_param(enable_apicv, bool, S_IRUGO);
+
+/*
+ * If nested=1, nested virtualization is supported, i.e., guests may use
+ * VMX and be a hypervisor for its own guests. If nested=0, guests may not
+ * use VMX instructions.
+ */
+static bool __read_mostly nested = 1;
+module_param(nested, bool, S_IRUGO);
+
+static u64 __read_mostly host_xss;
+
+bool __read_mostly enable_pml = 1;
+module_param_named(pml, enable_pml, bool, S_IRUGO);
+
+#define MSR_BITMAP_MODE_X2APIC 1
+#define MSR_BITMAP_MODE_X2APIC_APICV 2
+
+#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
+
+/* Guest_tsc -> host_tsc conversion requires 64-bit division. */
+static int __read_mostly cpu_preemption_timer_multi;
+static bool __read_mostly enable_preemption_timer = 1;
+#ifdef CONFIG_X86_64
+module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
+#endif
+
+#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
+#define KVM_VM_CR0_ALWAYS_ON \
+ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
+ X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
+#define KVM_CR4_GUEST_OWNED_BITS \
+ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
+
+#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
+#define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
+ RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
+ RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
+ RTIT_STATUS_BYTECNT))
+
+#define MSR_IA32_RTIT_OUTPUT_BASE_MASK \
+ (~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f)
+
+/*
+ * These 2 parameters are used to config the controls for Pause-Loop Exiting:
+ * ple_gap: upper bound on the amount of time between two successive
+ * executions of PAUSE in a loop. Also indicate if ple enabled.
+ * According to test, this time is usually smaller than 128 cycles.
+ * ple_window: upper bound on the amount of time a guest is allowed to execute
+ * in a PAUSE loop. Tests indicate that most spinlocks are held for
+ * less than 2^12 cycles
+ * Time is measured based on a counter that runs at the same rate as the TSC,
+ * refer SDM volume 3b section 21.6.13 & 22.1.3.
+ */
+static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
+module_param(ple_gap, uint, 0444);
+
+static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
+module_param(ple_window, uint, 0444);
+
+/* Default doubles per-vcpu window every exit. */
+static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
+module_param(ple_window_grow, uint, 0444);
+
+/* Default resets per-vcpu window every exit to ple_window. */
+static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
+module_param(ple_window_shrink, uint, 0444);
+
+/* Default is to compute the maximum so we can never overflow. */
+static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
+module_param(ple_window_max, uint, 0444);
+
+/* Default is SYSTEM mode, 1 for host-guest mode */
+int __read_mostly pt_mode = PT_MODE_SYSTEM;
+module_param(pt_mode, int, S_IRUGO);
+
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
+static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+/* Storage for pre module init parameter parsing */
+static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+static const struct {
+ const char *option;
+ bool for_parse;
+} vmentry_l1d_param[] = {
+ [VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
+ [VMENTER_L1D_FLUSH_NEVER] = {"never", true},
+ [VMENTER_L1D_FLUSH_COND] = {"cond", true},
+ [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
+ [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
+ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
+};
+
+#define L1D_CACHE_ORDER 4
+static void *vmx_l1d_flush_pages;
+
+static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+{
+ struct page *page;
+ unsigned int i;
+
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+ return 0;
+ }
+
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
+ u64 msr;
+
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+ if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+ return 0;
+ }
+ }
+
+ /* If set to auto use the default l1tf mitigation method */
+ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ l1tf = VMENTER_L1D_FLUSH_NEVER;
+ break;
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ l1tf = VMENTER_L1D_FLUSH_COND;
+ break;
+ case L1TF_MITIGATION_FULL:
+ case L1TF_MITIGATION_FULL_FORCE:
+ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+ break;
+ }
+ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
+ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
+ }
+
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
+
+ /*
+ * Initialize each page with a different pattern in
+ * order to protect against KSM in the nested
+ * virtualization case.
+ */
+ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
+ PAGE_SIZE);
+ }
+ }
+
+ l1tf_vmx_mitigation = l1tf;
+
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+ static_branch_enable(&vmx_l1d_should_flush);
+ else
+ static_branch_disable(&vmx_l1d_should_flush);
+
+ if (l1tf == VMENTER_L1D_FLUSH_COND)
+ static_branch_enable(&vmx_l1d_flush_cond);
+ else
+ static_branch_disable(&vmx_l1d_flush_cond);
+ return 0;
+}
+
+static int vmentry_l1d_flush_parse(const char *s)
+{
+ unsigned int i;
+
+ if (s) {
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+ if (vmentry_l1d_param[i].for_parse &&
+ sysfs_streq(s, vmentry_l1d_param[i].option))
+ return i;
+ }
+ }
+ return -EINVAL;
+}
+
+static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+{
+ int l1tf, ret;
+
+ l1tf = vmentry_l1d_flush_parse(s);
+ if (l1tf < 0)
+ return l1tf;
+
+ if (!boot_cpu_has(X86_BUG_L1TF))
+ return 0;
+
+ /*
+ * Has vmx_init() run already? If not then this is the pre init
+ * parameter parsing. In that case just store the value and let
+ * vmx_init() do the proper setup after enable_ept has been
+ * established.
+ */
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
+ vmentry_l1d_flush_param = l1tf;
+ return 0;
+ }
+
+ mutex_lock(&vmx_l1d_flush_mutex);
+ ret = vmx_setup_l1d_flush(l1tf);
+ mutex_unlock(&vmx_l1d_flush_mutex);
+ return ret;
+}
+
+static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+{
+ if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
+ return sprintf(s, "???\n");
+
+ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+}
+
+static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+};
+module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
+static bool guest_state_valid(struct kvm_vcpu *vcpu);
+static u32 vmx_segment_access_rights(struct kvm_segment *var);
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type);
+
+void vmx_vmexit(void);
+
+static DEFINE_PER_CPU(struct vmcs *, vmxarea);
+DEFINE_PER_CPU(struct vmcs *, current_vmcs);
+/*
+ * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
+ * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
+ */
+static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
+
+/*
+ * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
+ * can find which vCPU should be waken up.
+ */
+static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
+static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
+
+static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
+static DEFINE_SPINLOCK(vmx_vpid_lock);
+
+struct vmcs_config vmcs_config;
+struct vmx_capability vmx_capability;
+
+#define VMX_SEGMENT_FIELD(seg) \
+ [VCPU_SREG_##seg] = { \
+ .selector = GUEST_##seg##_SELECTOR, \
+ .base = GUEST_##seg##_BASE, \
+ .limit = GUEST_##seg##_LIMIT, \
+ .ar_bytes = GUEST_##seg##_AR_BYTES, \
+ }
+
+static const struct kvm_vmx_segment_field {
+ unsigned selector;
+ unsigned base;
+ unsigned limit;
+ unsigned ar_bytes;
+} kvm_vmx_segment_fields[] = {
+ VMX_SEGMENT_FIELD(CS),
+ VMX_SEGMENT_FIELD(DS),
+ VMX_SEGMENT_FIELD(ES),
+ VMX_SEGMENT_FIELD(FS),
+ VMX_SEGMENT_FIELD(GS),
+ VMX_SEGMENT_FIELD(SS),
+ VMX_SEGMENT_FIELD(TR),
+ VMX_SEGMENT_FIELD(LDTR),
+};
+
+u64 host_efer;
+
+/*
+ * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
+ * will emulate SYSCALL in legacy mode if the vendor string in guest
+ * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
+ * support this emulation, IA32_STAR must always be included in
+ * vmx_msr_index[], even in i386 builds.
+ */
+const u32 vmx_msr_index[] = {
+#ifdef CONFIG_X86_64
+ MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
+#endif
+ MSR_EFER, MSR_TSC_AUX, MSR_STAR,
+};
+
+#if IS_ENABLED(CONFIG_HYPERV)
+static bool __read_mostly enlightened_vmcs = true;
+module_param(enlightened_vmcs, bool, 0444);
+
+/* check_ept_pointer() should be under protection of ept_pointer_lock. */
+static void check_ept_pointer_match(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ u64 tmp_eptp = INVALID_PAGE;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!VALID_PAGE(tmp_eptp)) {
+ tmp_eptp = to_vmx(vcpu)->ept_pointer;
+ } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
+ to_kvm_vmx(kvm)->ept_pointers_match
+ = EPT_POINTERS_MISMATCH;
+ return;
+ }
+ }
+
+ to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
+}
+
+int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+ void *data)
+{
+ struct kvm_tlb_range *range = data;
+
+ return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
+ range->pages);
+}
+
+static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
+{
+ u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
+
+ /*
+ * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
+ * of the base of EPT PML4 table, strip off EPT configuration
+ * information.
+ */
+ if (range)
+ return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
+ kvm_fill_hv_flush_list_func, (void *)range);
+ else
+ return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
+}
+
+static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
+{
+ struct kvm_vcpu *vcpu;
+ int ret = -ENOTSUPP, i;
+
+ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+
+ if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
+ check_ept_pointer_match(kvm);
+
+ if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /* If ept_pointer is invalid pointer, bypass flush request. */
+ if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
+ ret |= __hv_remote_flush_tlb_with_range(
+ kvm, vcpu, range);
+ }
+ } else {
+ ret = __hv_remote_flush_tlb_with_range(kvm,
+ kvm_get_vcpu(kvm, 0), range);
+ }
+
+ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ return ret;
+}
+static int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
+
+#endif /* IS_ENABLED(CONFIG_HYPERV) */
+
+/*
+ * Comment's format: document - errata name - stepping - processor name.
+ * Refer from
+ * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
+ */
+static u32 vmx_preemption_cpu_tfms[] = {
+/* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
+0x000206E6,
+/* 323056.pdf - AAX65 - C2 - Xeon L3406 */
+/* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
+/* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
+0x00020652,
+/* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
+0x00020655,
+/* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
+/* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
+/*
+ * 320767.pdf - AAP86 - B1 -
+ * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
+ */
+0x000106E5,
+/* 321333.pdf - AAM126 - C0 - Xeon 3500 */
+0x000106A0,
+/* 321333.pdf - AAM126 - C1 - Xeon 3500 */
+0x000106A1,
+/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
+0x000106A4,
+ /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
+ /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
+ /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
+0x000106A5,
+ /* Xeon E3-1220 V2 */
+0x000306A8,
+};
+
+static inline bool cpu_has_broken_vmx_preemption_timer(void)
+{
+ u32 eax = cpuid_eax(0x00000001), i;
+
+ /* Clear the reserved bits */
+ eax &= ~(0x3U << 14 | 0xfU << 28);
+ for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
+ if (eax == vmx_preemption_cpu_tfms[i])
+ return true;
+
+ return false;
+}
+
+static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
+{
+ return flexpriority_enabled && lapic_in_kernel(vcpu);
+}
+
+static inline bool report_flexpriority(void)
+{
+ return flexpriority_enabled;
+}
+
+static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
+{
+ int i;
+
+ for (i = 0; i < vmx->nmsrs; ++i)
+ if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
+ return i;
+ return -1;
+}
+
+struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+{
+ int i;
+
+ i = __find_msr_index(vmx, msr);
+ if (i >= 0)
+ return &vmx->guest_msrs[i];
+ return NULL;
+}
+
+void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
+{
+ vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+ loaded_vmcs->cpu = -1;
+ loaded_vmcs->launched = 0;
+}
+
+#ifdef CONFIG_KEXEC_CORE
+/*
+ * This bitmap is used to indicate whether the vmclear
+ * operation is enabled on all cpus. All disabled by
+ * default.
+ */
+static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
+
+static inline void crash_enable_local_vmclear(int cpu)
+{
+ cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
+}
+
+static inline void crash_disable_local_vmclear(int cpu)
+{
+ cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
+}
+
+static inline int crash_local_vmclear_enabled(int cpu)
+{
+ return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
+}
+
+static void crash_vmclear_local_loaded_vmcss(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct loaded_vmcs *v;
+
+ if (!crash_local_vmclear_enabled(cpu))
+ return;
+
+ list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
+ loaded_vmcss_on_cpu_link)
+ vmcs_clear(v->vmcs);
+}
+#else
+static inline void crash_enable_local_vmclear(int cpu) { }
+static inline void crash_disable_local_vmclear(int cpu) { }
+#endif /* CONFIG_KEXEC_CORE */
+
+static void __loaded_vmcs_clear(void *arg)
+{
+ struct loaded_vmcs *loaded_vmcs = arg;
+ int cpu = raw_smp_processor_id();
+
+ if (loaded_vmcs->cpu != cpu)
+ return; /* vcpu migration can race with cpu offline */
+ if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
+ per_cpu(current_vmcs, cpu) = NULL;
+ crash_disable_local_vmclear(cpu);
+ list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
+
+ /*
+ * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
+ * is before setting loaded_vmcs->vcpu to -1 which is done in
+ * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
+ * then adds the vmcs into percpu list before it is deleted.
+ */
+ smp_wmb();
+
+ loaded_vmcs_init(loaded_vmcs);
+ crash_enable_local_vmclear(cpu);
+}
+
+void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
+{
+ int cpu = loaded_vmcs->cpu;
+
+ if (cpu != -1)
+ smp_call_function_single(cpu,
+ __loaded_vmcs_clear, loaded_vmcs, 1);
+}
+
+static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
+ unsigned field)
+{
+ bool ret;
+ u32 mask = 1 << (seg * SEG_FIELD_NR + field);
+
+ if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
+ vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
+ vmx->segment_cache.bitmask = 0;
+ }
+ ret = vmx->segment_cache.bitmask & mask;
+ vmx->segment_cache.bitmask |= mask;
+ return ret;
+}
+
+static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
+{
+ u16 *p = &vmx->segment_cache.seg[seg].selector;
+
+ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
+ *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
+ return *p;
+}
+
+static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
+{
+ ulong *p = &vmx->segment_cache.seg[seg].base;
+
+ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
+ *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
+ return *p;
+}
+
+static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
+{
+ u32 *p = &vmx->segment_cache.seg[seg].limit;
+
+ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
+ *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
+ return *p;
+}
+
+static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
+{
+ u32 *p = &vmx->segment_cache.seg[seg].ar;
+
+ if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
+ *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
+ return *p;
+}
+
+void update_exception_bitmap(struct kvm_vcpu *vcpu)
+{
+ u32 eb;
+
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+ (1u << DB_VECTOR) | (1u << AC_VECTOR);
+ /*
+ * Guest access to VMware backdoor ports could legitimately
+ * trigger #GP because of TSS I/O permission bitmap.
+ * We intercept those #GP and allow access to them anyway
+ * as VMware does.
+ */
+ if (enable_vmware_backdoor)
+ eb |= (1u << GP_VECTOR);
+ if ((vcpu->guest_debug &
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
+ eb |= 1u << BP_VECTOR;
+ if (to_vmx(vcpu)->rmode.vm86_active)
+ eb = ~0;
+ if (enable_ept)
+ eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
+
+ /* When we are running a nested L2 guest and L1 specified for it a
+ * certain exception bitmap, we must trap the same exceptions and pass
+ * them to L1. When running L2, we will only handle the exceptions
+ * specified above if L1 did not want them.
+ */
+ if (is_guest_mode(vcpu))
+ eb |= get_vmcs12(vcpu)->exception_bitmap;
+
+ vmcs_write32(EXCEPTION_BITMAP, eb);
+}
+
+/*
+ * Check if MSR is intercepted for currently loaded MSR bitmap.
+ */
+static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
+static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+ unsigned long entry, unsigned long exit)
+{
+ vm_entry_controls_clearbit(vmx, entry);
+ vm_exit_controls_clearbit(vmx, exit);
+}
+
+static int find_msr(struct vmx_msrs *m, unsigned int msr)
+{
+ unsigned int i;
+
+ for (i = 0; i < m->nr; ++i) {
+ if (m->val[i].index == msr)
+ return i;
+ }
+ return -ENOENT;
+}
+
+static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+{
+ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+ case MSR_EFER:
+ if (cpu_has_load_ia32_efer()) {
+ clear_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_EFER,
+ VM_EXIT_LOAD_IA32_EFER);
+ return;
+ }
+ break;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ if (cpu_has_load_perf_global_ctrl()) {
+ clear_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
+ return;
+ }
+ break;
+ }
+ i = find_msr(&m->guest, msr);
+ if (i < 0)
+ goto skip_guest;
+ --m->guest.nr;
+ m->guest.val[i] = m->guest.val[m->guest.nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+
+skip_guest:
+ i = find_msr(&m->host, msr);
+ if (i < 0)
+ return;
+
+ --m->host.nr;
+ m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+}
+
+static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+ unsigned long entry, unsigned long exit,
+ unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
+ u64 guest_val, u64 host_val)
+{
+ vmcs_write64(guest_val_vmcs, guest_val);
+ if (host_val_vmcs != HOST_IA32_EFER)
+ vmcs_write64(host_val_vmcs, host_val);
+ vm_entry_controls_setbit(vmx, entry);
+ vm_exit_controls_setbit(vmx, exit);
+}
+
+static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val, bool entry_only)
+{
+ int i, j = 0;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+ case MSR_EFER:
+ if (cpu_has_load_ia32_efer()) {
+ add_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_EFER,
+ VM_EXIT_LOAD_IA32_EFER,
+ GUEST_IA32_EFER,
+ HOST_IA32_EFER,
+ guest_val, host_val);
+ return;
+ }
+ break;
+ case MSR_CORE_PERF_GLOBAL_CTRL:
+ if (cpu_has_load_perf_global_ctrl()) {
+ add_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
+ GUEST_IA32_PERF_GLOBAL_CTRL,
+ HOST_IA32_PERF_GLOBAL_CTRL,
+ guest_val, host_val);
+ return;
+ }
+ break;
+ case MSR_IA32_PEBS_ENABLE:
+ /* PEBS needs a quiescent period after being disabled (to write
+ * a record). Disabling PEBS through VMX MSR swapping doesn't
+ * provide that period, so a CPU could write host's record into
+ * guest's memory.
+ */
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+ i = find_msr(&m->guest, msr);
+ if (!entry_only)
+ j = find_msr(&m->host, msr);
+
+ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+ }
+ if (i < 0) {
+ i = m->guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ }
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+
+ if (entry_only)
+ return;
+
+ if (j < 0) {
+ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+ m->host.val[j].index = msr;
+ m->host.val[j].value = host_val;
+}
+
+static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+{
+ u64 guest_efer = vmx->vcpu.arch.efer;
+ u64 ignore_bits = 0;
+
+ if (!enable_ept) {
+ /*
+ * NX is needed to handle CR0.WP=1, CR4.SMEP=1. Testing
+ * host CPUID is more efficient than testing guest CPUID
+ * or CR4. Host SMEP is anyway a requirement for guest SMEP.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMEP))
+ guest_efer |= EFER_NX;
+ else if (!(guest_efer & EFER_NX))
+ ignore_bits |= EFER_NX;
+ }
+
+ /*
+ * LMA and LME handled by hardware; SCE meaningless outside long mode.
+ */
+ ignore_bits |= EFER_SCE;
+#ifdef CONFIG_X86_64
+ ignore_bits |= EFER_LMA | EFER_LME;
+ /* SCE is meaningful only in long mode on Intel */
+ if (guest_efer & EFER_LMA)
+ ignore_bits &= ~(u64)EFER_SCE;
+#endif
+
+ /*
+ * On EPT, we can't emulate NX, so we must switch EFER atomically.
+ * On CPUs that support "load IA32_EFER", always switch EFER
+ * atomically, since it's faster than switching it manually.
+ */
+ if (cpu_has_load_ia32_efer() ||
+ (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
+ if (!(guest_efer & EFER_LMA))
+ guest_efer &= ~EFER_LME;
+ if (guest_efer != host_efer)
+ add_atomic_switch_msr(vmx, MSR_EFER,
+ guest_efer, host_efer, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+ return false;
+ } else {
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+
+ guest_efer &= ~ignore_bits;
+ guest_efer |= host_efer & ignore_bits;
+
+ vmx->guest_msrs[efer_offset].data = guest_efer;
+ vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+ return true;
+ }
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * On 32-bit kernels, VM exits still load the FS and GS bases from the
+ * VMCS rather than the segment table. KVM uses this helper to figure
+ * out the current bases to poke them into the VMCS before entry.
+ */
+static unsigned long segment_base(u16 selector)
+{
+ struct desc_struct *table;
+ unsigned long v;
+
+ if (!(selector & ~SEGMENT_RPL_MASK))
+ return 0;
+
+ table = get_current_gdt_ro();
+
+ if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
+ u16 ldt_selector = kvm_read_ldt();
+
+ if (!(ldt_selector & ~SEGMENT_RPL_MASK))
+ return 0;
+
+ table = (struct desc_struct *)segment_base(ldt_selector);
+ }
+ v = get_desc_base(&table[selector >> 3]);
+ return v;
+}
+#endif
+
+static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
+{
+ u32 i;
+
+ wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+ wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ for (i = 0; i < addr_range; i++) {
+ wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
+ wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
+ }
+}
+
+static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
+{
+ u32 i;
+
+ rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
+ rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
+ rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
+ for (i = 0; i < addr_range; i++) {
+ rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
+ rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
+ }
+}
+
+static void pt_guest_enter(struct vcpu_vmx *vmx)
+{
+ if (pt_mode == PT_MODE_SYSTEM)
+ return;
+
+ /*
+ * GUEST_IA32_RTIT_CTL is already set in the VMCS.
+ * Save host state before VM entry.
+ */
+ rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
+ if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
+ wrmsrl(MSR_IA32_RTIT_CTL, 0);
+ pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
+ pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
+ }
+}
+
+static void pt_guest_exit(struct vcpu_vmx *vmx)
+{
+ if (pt_mode == PT_MODE_SYSTEM)
+ return;
+
+ if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
+ pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
+ pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
+ }
+
+ /* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */
+ wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
+}
+
+void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs_host_state *host_state;
+#ifdef CONFIG_X86_64
+ int cpu = raw_smp_processor_id();
+#endif
+ unsigned long fs_base, gs_base;
+ u16 fs_sel, gs_sel;
+ int i;
+
+ vmx->req_immediate_exit = false;
+
+ /*
+ * Note that guest MSRs to be saved/restored can also be changed
+ * when guest state is loaded. This happens when guest transitions
+ * to/from long-mode by setting MSR_EFER.LMA.
+ */
+ if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
+ vmx->guest_msrs_dirty = false;
+ for (i = 0; i < vmx->save_nmsrs; ++i)
+ kvm_set_shared_msr(vmx->guest_msrs[i].index,
+ vmx->guest_msrs[i].data,
+ vmx->guest_msrs[i].mask);
+
+ }
+
+ if (vmx->loaded_cpu_state)
+ return;
+
+ vmx->loaded_cpu_state = vmx->loaded_vmcs;
+ host_state = &vmx->loaded_cpu_state->host_state;
+
+ /*
+ * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
+ * allow segment selectors with cpl > 0 or ti == 1.
+ */
+ host_state->ldt_sel = kvm_read_ldt();
+
+#ifdef CONFIG_X86_64
+ savesegment(ds, host_state->ds_sel);
+ savesegment(es, host_state->es_sel);
+
+ gs_base = cpu_kernelmode_gs_base(cpu);
+ if (likely(is_64bit_mm(current->mm))) {
+ save_fsgs_for_kvm();
+ fs_sel = current->thread.fsindex;
+ gs_sel = current->thread.gsindex;
+ fs_base = current->thread.fsbase;
+ vmx->msr_host_kernel_gs_base = current->thread.gsbase;
+ } else {
+ savesegment(fs, fs_sel);
+ savesegment(gs, gs_sel);
+ fs_base = read_msr(MSR_FS_BASE);
+ vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+ }
+
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+#else
+ savesegment(fs, fs_sel);
+ savesegment(gs, gs_sel);
+ fs_base = segment_base(fs_sel);
+ gs_base = segment_base(gs_sel);
+#endif
+
+ if (unlikely(fs_sel != host_state->fs_sel)) {
+ if (!(fs_sel & 7))
+ vmcs_write16(HOST_FS_SELECTOR, fs_sel);
+ else
+ vmcs_write16(HOST_FS_SELECTOR, 0);
+ host_state->fs_sel = fs_sel;
+ }
+ if (unlikely(gs_sel != host_state->gs_sel)) {
+ if (!(gs_sel & 7))
+ vmcs_write16(HOST_GS_SELECTOR, gs_sel);
+ else
+ vmcs_write16(HOST_GS_SELECTOR, 0);
+ host_state->gs_sel = gs_sel;
+ }
+ if (unlikely(fs_base != host_state->fs_base)) {
+ vmcs_writel(HOST_FS_BASE, fs_base);
+ host_state->fs_base = fs_base;
+ }
+ if (unlikely(gs_base != host_state->gs_base)) {
+ vmcs_writel(HOST_GS_BASE, gs_base);
+ host_state->gs_base = gs_base;
+ }
+}
+
+static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
+{
+ struct vmcs_host_state *host_state;
+
+ if (!vmx->loaded_cpu_state)
+ return;
+
+ WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
+ host_state = &vmx->loaded_cpu_state->host_state;
+
+ ++vmx->vcpu.stat.host_state_reload;
+ vmx->loaded_cpu_state = NULL;
+
+#ifdef CONFIG_X86_64
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+#endif
+ if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
+ kvm_load_ldt(host_state->ldt_sel);
+#ifdef CONFIG_X86_64
+ load_gs_index(host_state->gs_sel);
+#else
+ loadsegment(gs, host_state->gs_sel);
+#endif
+ }
+ if (host_state->fs_sel & 7)
+ loadsegment(fs, host_state->fs_sel);
+#ifdef CONFIG_X86_64
+ if (unlikely(host_state->ds_sel | host_state->es_sel)) {
+ loadsegment(ds, host_state->ds_sel);
+ loadsegment(es, host_state->es_sel);
+ }
+#endif
+ invalidate_tss_limit();
+#ifdef CONFIG_X86_64
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+#endif
+ load_fixmap_gdt(raw_smp_processor_id());
+}
+
+#ifdef CONFIG_X86_64
+static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
+{
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ preempt_enable();
+ return vmx->msr_guest_kernel_gs_base;
+}
+
+static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
+{
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ wrmsrl(MSR_KERNEL_GS_BASE, data);
+ preempt_enable();
+ vmx->msr_guest_kernel_gs_base = data;
+}
+#endif
+
+static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct pi_desc old, new;
+ unsigned int dest;
+
+ /*
+ * In case of hot-plug or hot-unplug, we may have to undo
+ * vmx_vcpu_pi_put even if there is no assigned device. And we
+ * always keep PI.NDST up to date for simplicity: it makes the
+ * code easier, and CPU migration is not a fast path.
+ */
+ if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
+ return;
+
+ /*
+ * First handle the simple case where no cmpxchg is necessary; just
+ * allow posting non-urgent interrupts.
+ *
+ * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
+ * PI.NDST: pi_post_block will do it for us and the wakeup_handler
+ * expects the VCPU to be on the blocked_vcpu_list that matches
+ * PI.NDST.
+ */
+ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR ||
+ vcpu->cpu == cpu) {
+ pi_clear_sn(pi_desc);
+ return;
+ }
+
+ /* The full case. */
+ do {
+ old.control = new.control = pi_desc->control;
+
+ dest = cpu_physical_id(cpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ new.sn = 0;
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
+}
+
+/*
+ * Switches to specified vcpu, until a matching vcpu_put(), but assumes
+ * vcpu mutex is already taken.
+ */
+void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
+
+ if (!already_loaded) {
+ loaded_vmcs_clear(vmx->loaded_vmcs);
+ local_irq_disable();
+ crash_disable_local_vmclear(cpu);
+
+ /*
+ * Read loaded_vmcs->cpu should be before fetching
+ * loaded_vmcs->loaded_vmcss_on_cpu_link.
+ * See the comments in __loaded_vmcs_clear().
+ */
+ smp_rmb();
+
+ list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
+ &per_cpu(loaded_vmcss_on_cpu, cpu));
+ crash_enable_local_vmclear(cpu);
+ local_irq_enable();
+ }
+
+ if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
+ per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
+ vmcs_load(vmx->loaded_vmcs->vmcs);
+ indirect_branch_prediction_barrier();
+ }
+
+ if (!already_loaded) {
+ void *gdt = get_current_gdt_ro();
+ unsigned long sysenter_esp;
+
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+
+ /*
+ * Linux uses per-cpu TSS and GDT, so set these when switching
+ * processors. See 22.2.4.
+ */
+ vmcs_writel(HOST_TR_BASE,
+ (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
+ vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
+
+ /*
+ * VM exits change the host TR limit to 0x67 after a VM
+ * exit. This is okay, since 0x67 covers everything except
+ * the IO bitmap and have have code to handle the IO bitmap
+ * being lost after a VM exit.
+ */
+ BUILD_BUG_ON(IO_BITMAP_OFFSET - 1 != 0x67);
+
+ rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
+ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+
+ vmx->loaded_vmcs->cpu = cpu;
+ }
+
+ /* Setup TSC multiplier */
+ if (kvm_has_tsc_control &&
+ vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
+ decache_tsc_multiplier(vmx);
+
+ vmx_vcpu_pi_load(vcpu, cpu);
+ vmx->host_pkru = read_pkru();
+ vmx->host_debugctlmsr = get_debugctlmsr();
+}
+
+static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
+ return;
+
+ /* Set SN when the vCPU is preempted */
+ if (vcpu->preempted)
+ pi_set_sn(pi_desc);
+}
+
+void vmx_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vmx_vcpu_pi_put(vcpu);
+
+ vmx_prepare_switch_to_host(to_vmx(vcpu));
+}
+
+static bool emulation_required(struct kvm_vcpu *vcpu)
+{
+ return emulate_invalid_guest_state && !guest_state_valid(vcpu);
+}
+
+static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+
+unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
+{
+ unsigned long rflags, save_rflags;
+
+ if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
+ __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
+ rflags = vmcs_readl(GUEST_RFLAGS);
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+ rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ }
+ to_vmx(vcpu)->rflags = rflags;
+ }
+ return to_vmx(vcpu)->rflags;
+}
+
+void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+ unsigned long old_rflags = vmx_get_rflags(vcpu);
+
+ __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
+ to_vmx(vcpu)->rflags = rflags;
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ to_vmx(vcpu)->rmode.save_rflags = rflags;
+ rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+ }
+ vmcs_writel(GUEST_RFLAGS, rflags);
+
+ if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM)
+ to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
+}
+
+u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
+{
+ u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+ int ret = 0;
+
+ if (interruptibility & GUEST_INTR_STATE_STI)
+ ret |= KVM_X86_SHADOW_INT_STI;
+ if (interruptibility & GUEST_INTR_STATE_MOV_SS)
+ ret |= KVM_X86_SHADOW_INT_MOV_SS;
+
+ return ret;
+}
+
+void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+{
+ u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
+ u32 interruptibility = interruptibility_old;
+
+ interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
+
+ if (mask & KVM_X86_SHADOW_INT_MOV_SS)
+ interruptibility |= GUEST_INTR_STATE_MOV_SS;
+ else if (mask & KVM_X86_SHADOW_INT_STI)
+ interruptibility |= GUEST_INTR_STATE_STI;
+
+ if ((interruptibility != interruptibility_old))
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
+}
+
+static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long value;
+
+ /*
+ * Any MSR write that attempts to change bits marked reserved will
+ * case a #GP fault.
+ */
+ if (data & vmx->pt_desc.ctl_bitmask)
+ return 1;
+
+ /*
+ * Any attempt to modify IA32_RTIT_CTL while TraceEn is set will
+ * result in a #GP unless the same write also clears TraceEn.
+ */
+ if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
+ ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
+ return 1;
+
+ /*
+ * WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this bit
+ * and FabricEn would cause #GP, if
+ * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0
+ */
+ if ((data & RTIT_CTL_TRACEEN) && !(data & RTIT_CTL_TOPA) &&
+ !(data & RTIT_CTL_FABRIC_EN) &&
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_single_range_output))
+ return 1;
+
+ /*
+ * MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
+ * utilize encodings marked reserved will casue a #GP fault.
+ */
+ value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
+ !test_bit((data & RTIT_CTL_MTC_RANGE) >>
+ RTIT_CTL_MTC_RANGE_OFFSET, &value))
+ return 1;
+ value = intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_cycle_thresholds);
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
+ !test_bit((data & RTIT_CTL_CYC_THRESH) >>
+ RTIT_CTL_CYC_THRESH_OFFSET, &value))
+ return 1;
+ value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
+ !test_bit((data & RTIT_CTL_PSB_FREQ) >>
+ RTIT_CTL_PSB_FREQ_OFFSET, &value))
+ return 1;
+
+ /*
+ * If ADDRx_CFG is reserved or the encodings is >2 will
+ * cause a #GP fault.
+ */
+ value = (data & RTIT_CTL_ADDR0) >> RTIT_CTL_ADDR0_OFFSET;
+ if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2))
+ return 1;
+ value = (data & RTIT_CTL_ADDR1) >> RTIT_CTL_ADDR1_OFFSET;
+ if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2))
+ return 1;
+ value = (data & RTIT_CTL_ADDR2) >> RTIT_CTL_ADDR2_OFFSET;
+ if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2))
+ return 1;
+ value = (data & RTIT_CTL_ADDR3) >> RTIT_CTL_ADDR3_OFFSET;
+ if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2))
+ return 1;
+
+ return 0;
+}
+
+
+static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+ unsigned long rip;
+
+ rip = kvm_rip_read(vcpu);
+ rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ kvm_rip_write(vcpu, rip);
+
+ /* skipping an emulated instruction also counts */
+ vmx_set_interrupt_shadow(vcpu, 0);
+}
+
+static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Ensure that we clear the HLT state in the VMCS. We don't need to
+ * explicitly skip the instruction because if the HLT state is set,
+ * then the instruction is already executing and RIP has already been
+ * advanced.
+ */
+ if (kvm_hlt_in_guest(vcpu->kvm) &&
+ vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
+ vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
+}
+
+static void vmx_queue_exception(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned nr = vcpu->arch.exception.nr;
+ bool has_error_code = vcpu->arch.exception.has_error_code;
+ u32 error_code = vcpu->arch.exception.error_code;
+ u32 intr_info = nr | INTR_INFO_VALID_MASK;
+
+ kvm_deliver_exception_payload(vcpu);
+
+ if (has_error_code) {
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+ intr_info |= INTR_INFO_DELIVER_CODE_MASK;
+ }
+
+ if (vmx->rmode.vm86_active) {
+ int inc_eip = 0;
+ if (kvm_exception_is_soft(nr))
+ inc_eip = vcpu->arch.event_exit_inst_len;
+ if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ return;
+ }
+
+ WARN_ON_ONCE(vmx->emulation_required);
+
+ if (kvm_exception_is_soft(nr)) {
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmx->vcpu.arch.event_exit_inst_len);
+ intr_info |= INTR_TYPE_SOFT_EXCEPTION;
+ } else
+ intr_info |= INTR_TYPE_HARD_EXCEPTION;
+
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
+
+ vmx_clear_hlt(vcpu);
+}
+
+static bool vmx_rdtscp_supported(void)
+{
+ return cpu_has_vmx_rdtscp();
+}
+
+static bool vmx_invpcid_supported(void)
+{
+ return cpu_has_vmx_invpcid();
+}
+
+/*
+ * Swap MSR entry in host/guest MSR entry array.
+ */
+static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
+{
+ struct shared_msr_entry tmp;
+
+ tmp = vmx->guest_msrs[to];
+ vmx->guest_msrs[to] = vmx->guest_msrs[from];
+ vmx->guest_msrs[from] = tmp;
+}
+
+/*
+ * Set up the vmcs to automatically save and restore system
+ * msrs. Don't touch the 64-bit msrs if the guest is in legacy
+ * mode, as fiddling with msrs is very expensive.
+ */
+static void setup_msrs(struct vcpu_vmx *vmx)
+{
+ int save_nmsrs, index;
+
+ save_nmsrs = 0;
+#ifdef CONFIG_X86_64
+ /*
+ * The SYSCALL MSRs are only needed on long mode guests, and only
+ * when EFER.SCE is set.
+ */
+ if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
+ index = __find_msr_index(vmx, MSR_STAR);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_LSTAR);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
+ if (index >= 0)
+ move_msr_up(vmx, index, save_nmsrs++);
+ }
+#endif
+ index = __find_msr_index(vmx, MSR_EFER);
+ if (index >= 0 && update_transition_efer(vmx, index))
+ move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_TSC_AUX);
+ if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
+ move_msr_up(vmx, index, save_nmsrs++);
+
+ vmx->save_nmsrs = save_nmsrs;
+ vmx->guest_msrs_dirty = true;
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_update_msr_bitmap(&vmx->vcpu);
+}
+
+static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (is_guest_mode(vcpu) &&
+ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+ return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
+
+ return vcpu->arch.tsc_offset;
+}
+
+static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ u64 g_tsc_offset = 0;
+
+ /*
+ * We're here if L1 chose not to trap WRMSR to TSC. According
+ * to the spec, this should set L1's TSC; The offset that L1
+ * set for L2 remains unchanged, and still needs to be added
+ * to the newly set TSC to get L2's TSC.
+ */
+ if (is_guest_mode(vcpu) &&
+ (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+ g_tsc_offset = vmcs12->tsc_offset;
+
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ vcpu->arch.tsc_offset - g_tsc_offset,
+ offset);
+ vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
+ return offset + g_tsc_offset;
+}
+
+/*
+ * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
+ * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
+ * all guests if the "nested" module option is off, and can also be disabled
+ * for a single guest by disabling its VMX cpuid bit.
+ */
+bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
+{
+ return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
+}
+
+static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
+ uint64_t val)
+{
+ uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits;
+
+ return !(val & ~valid_bits);
+}
+
+static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
+{
+ switch (msr->index) {
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ if (!nested)
+ return 1;
+ return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Reads an msr value (of 'msr_index') into 'pdata'.
+ * Returns 0 on success, non-0 otherwise.
+ * Assumes vcpu_load() was already called.
+ */
+static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct shared_msr_entry *msr;
+ u32 index;
+
+ switch (msr_info->index) {
+#ifdef CONFIG_X86_64
+ case MSR_FS_BASE:
+ msr_info->data = vmcs_readl(GUEST_FS_BASE);
+ break;
+ case MSR_GS_BASE:
+ msr_info->data = vmcs_readl(GUEST_GS_BASE);
+ break;
+ case MSR_KERNEL_GS_BASE:
+ msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
+ break;
+#endif
+ case MSR_EFER:
+ return kvm_get_msr_common(vcpu, msr_info);
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
+ return 1;
+ msr_info->data = to_vmx(vcpu)->arch_capabilities;
+ break;
+ case MSR_IA32_SYSENTER_CS:
+ msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
+ break;
+ case MSR_IA32_BNDCFGS:
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
+ return 1;
+ msr_info->data = vmcs_read64(GUEST_BNDCFGS);
+ break;
+ case MSR_IA32_MCG_EXT_CTL:
+ if (!msr_info->host_initiated &&
+ !(vmx->msr_ia32_feature_control &
+ FEATURE_CONTROL_LMCE))
+ return 1;
+ msr_info->data = vcpu->arch.mcg_ext_ctl;
+ break;
+ case MSR_IA32_FEATURE_CONTROL:
+ msr_info->data = vmx->msr_ia32_feature_control;
+ break;
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
+ &msr_info->data);
+ case MSR_IA32_XSS:
+ if (!vmx_xsaves_supported())
+ return 1;
+ msr_info->data = vcpu->arch.ia32_xss;
+ break;
+ case MSR_IA32_RTIT_CTL:
+ if (pt_mode != PT_MODE_HOST_GUEST)
+ return 1;
+ msr_info->data = vmx->pt_desc.guest.ctl;
+ break;
+ case MSR_IA32_RTIT_STATUS:
+ if (pt_mode != PT_MODE_HOST_GUEST)
+ return 1;
+ msr_info->data = vmx->pt_desc.guest.status;
+ break;
+ case MSR_IA32_RTIT_CR3_MATCH:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_cr3_filtering))
+ return 1;
+ msr_info->data = vmx->pt_desc.guest.cr3_match;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_BASE:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (!intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_topa_output) &&
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_single_range_output)))
+ return 1;
+ msr_info->data = vmx->pt_desc.guest.output_base;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_MASK:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (!intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_topa_output) &&
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_single_range_output)))
+ return 1;
+ msr_info->data = vmx->pt_desc.guest.output_mask;
+ break;
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
+ index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_num_address_ranges)))
+ return 1;
+ if (index % 2)
+ msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
+ else
+ msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
+ break;
+ case MSR_TSC_AUX:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+ return 1;
+ /* Otherwise falls through */
+ default:
+ msr = find_msr_entry(vmx, msr_info->index);
+ if (msr) {
+ msr_info->data = msr->data;
+ break;
+ }
+ return kvm_get_msr_common(vcpu, msr_info);
+ }
+
+ return 0;
+}
+
+/*
+ * Writes msr value into into the appropriate "register".
+ * Returns 0 on success, non-0 otherwise.
+ * Assumes vcpu_load() was already called.
+ */
+static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct shared_msr_entry *msr;
+ int ret = 0;
+ u32 msr_index = msr_info->index;
+ u64 data = msr_info->data;
+ u32 index;
+
+ switch (msr_index) {
+ case MSR_EFER:
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ break;
+#ifdef CONFIG_X86_64
+ case MSR_FS_BASE:
+ vmx_segment_cache_clear(vmx);
+ vmcs_writel(GUEST_FS_BASE, data);
+ break;
+ case MSR_GS_BASE:
+ vmx_segment_cache_clear(vmx);
+ vmcs_writel(GUEST_GS_BASE, data);
+ break;
+ case MSR_KERNEL_GS_BASE:
+ vmx_write_guest_kernel_gs_base(vmx, data);
+ break;
+#endif
+ case MSR_IA32_SYSENTER_CS:
+ vmcs_write32(GUEST_SYSENTER_CS, data);
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ vmcs_writel(GUEST_SYSENTER_EIP, data);
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ vmcs_writel(GUEST_SYSENTER_ESP, data);
+ break;
+ case MSR_IA32_BNDCFGS:
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_MPX)))
+ return 1;
+ if (is_noncanonical_address(data & PAGE_MASK, vcpu) ||
+ (data & MSR_IA32_BNDCFGS_RSVD))
+ return 1;
+ vmcs_write64(GUEST_BNDCFGS, data);
+ break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ vmx->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging. We update the vmcs01 here for L1 as well
+ * since it will end up touching the MSR anyway now.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_RW);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated)
+ return 1;
+ vmx->arch_capabilities = data;
+ break;
+ case MSR_IA32_CR_PAT:
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
+ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ return 1;
+ vmcs_write64(GUEST_IA32_PAT, data);
+ vcpu->arch.pat = data;
+ break;
+ }
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ break;
+ case MSR_IA32_TSC_ADJUST:
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ break;
+ case MSR_IA32_MCG_EXT_CTL:
+ if ((!msr_info->host_initiated &&
+ !(to_vmx(vcpu)->msr_ia32_feature_control &
+ FEATURE_CONTROL_LMCE)) ||
+ (data & ~MCG_EXT_CTL_LMCE_EN))
+ return 1;
+ vcpu->arch.mcg_ext_ctl = data;
+ break;
+ case MSR_IA32_FEATURE_CONTROL:
+ if (!vmx_feature_control_msr_valid(vcpu, data) ||
+ (to_vmx(vcpu)->msr_ia32_feature_control &
+ FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
+ return 1;
+ vmx->msr_ia32_feature_control = data;
+ if (msr_info->host_initiated && data == 0)
+ vmx_leave_nested(vcpu);
+ break;
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ if (!msr_info->host_initiated)
+ return 1; /* they are read-only */
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ return vmx_set_vmx_msr(vcpu, msr_index, data);
+ case MSR_IA32_XSS:
+ if (!vmx_xsaves_supported())
+ return 1;
+ /*
+ * The only supported bit as of Skylake is bit 8, but
+ * it is not supported on KVM.
+ */
+ if (data != 0)
+ return 1;
+ vcpu->arch.ia32_xss = data;
+ if (vcpu->arch.ia32_xss != host_xss)
+ add_atomic_switch_msr(vmx, MSR_IA32_XSS,
+ vcpu->arch.ia32_xss, host_xss, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ break;
+ case MSR_IA32_RTIT_CTL:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ vmx_rtit_ctl_check(vcpu, data) ||
+ vmx->nested.vmxon)
+ return 1;
+ vmcs_write64(GUEST_IA32_RTIT_CTL, data);
+ vmx->pt_desc.guest.ctl = data;
+ pt_update_intercept_for_msr(vmx);
+ break;
+ case MSR_IA32_RTIT_STATUS:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
+ (data & MSR_IA32_RTIT_STATUS_MASK))
+ return 1;
+ vmx->pt_desc.guest.status = data;
+ break;
+ case MSR_IA32_RTIT_CR3_MATCH:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_cr3_filtering))
+ return 1;
+ vmx->pt_desc.guest.cr3_match = data;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_BASE:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
+ (!intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_topa_output) &&
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_single_range_output)) ||
+ (data & MSR_IA32_RTIT_OUTPUT_BASE_MASK))
+ return 1;
+ vmx->pt_desc.guest.output_base = data;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_MASK:
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
+ (!intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_topa_output) &&
+ !intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_single_range_output)))
+ return 1;
+ vmx->pt_desc.guest.output_mask = data;
+ break;
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
+ index = msr_info->index - MSR_IA32_RTIT_ADDR0_A;
+ if ((pt_mode != PT_MODE_HOST_GUEST) ||
+ (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) ||
+ (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_num_address_ranges)))
+ return 1;
+ if (index % 2)
+ vmx->pt_desc.guest.addr_b[index / 2] = data;
+ else
+ vmx->pt_desc.guest.addr_a[index / 2] = data;
+ break;
+ case MSR_TSC_AUX:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+ return 1;
+ /* Check reserved bit, higher 32 bits should be zero */
+ if ((data >> 32) != 0)
+ return 1;
+ /* Otherwise falls through */
+ default:
+ msr = find_msr_entry(vmx, msr_index);
+ if (msr) {
+ u64 old_msr_data = msr->data;
+ msr->data = data;
+ if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+ preempt_disable();
+ ret = kvm_set_shared_msr(msr->index, msr->data,
+ msr->mask);
+ preempt_enable();
+ if (ret)
+ msr->data = old_msr_data;
+ }
+ break;
+ }
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ }
+
+ return ret;
+}
+
+static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
+{
+ __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ switch (reg) {
+ case VCPU_REGS_RSP:
+ vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
+ break;
+ case VCPU_REGS_RIP:
+ vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
+ break;
+ case VCPU_EXREG_PDPTR:
+ if (enable_ept)
+ ept_save_pdptrs(vcpu);
+ break;
+ default:
+ break;
+ }
+}
+
+static __init int cpu_has_kvm_support(void)
+{
+ return cpu_has_vmx();
+}
+
+static __init int vmx_disabled_by_bios(void)
+{
+ u64 msr;
+
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
+ if (msr & FEATURE_CONTROL_LOCKED) {
+ /* launched w/ TXT and VMX disabled */
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+ && tboot_enabled())
+ return 1;
+ /* launched w/o TXT and VMX only enabled w/ TXT */
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+ && !tboot_enabled()) {
+ printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
+ "activate TXT before enabling KVM\n");
+ return 1;
+ }
+ /* launched w/o TXT and VMX disabled */
+ if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+ && !tboot_enabled())
+ return 1;
+ }
+
+ return 0;
+}
+
+static void kvm_cpu_vmxon(u64 addr)
+{
+ cr4_set_bits(X86_CR4_VMXE);
+ intel_pt_handle_vmx(1);
+
+ asm volatile ("vmxon %0" : : "m"(addr));
+}
+
+static int hardware_enable(void)
+{
+ int cpu = raw_smp_processor_id();
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
+ u64 old, test_bits;
+
+ if (cr4_read_shadow() & X86_CR4_VMXE)
+ return -EBUSY;
+
+ /*
+ * This can happen if we hot-added a CPU but failed to allocate
+ * VP assist page for it.
+ */
+ if (static_branch_unlikely(&enable_evmcs) &&
+ !hv_get_vp_assist_page(cpu))
+ return -EFAULT;
+
+ INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
+ INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
+ spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+
+ /*
+ * Now we can enable the vmclear operation in kdump
+ * since the loaded_vmcss_on_cpu list on this cpu
+ * has been initialized.
+ *
+ * Though the cpu is not in VMX operation now, there
+ * is no problem to enable the vmclear operation
+ * for the loaded_vmcss_on_cpu list is empty!
+ */
+ crash_enable_local_vmclear(cpu);
+
+ rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
+
+ test_bits = FEATURE_CONTROL_LOCKED;
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ if (tboot_enabled())
+ test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
+
+ if ((old & test_bits) != test_bits) {
+ /* enable and lock */
+ wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
+ }
+ kvm_cpu_vmxon(phys_addr);
+ if (enable_ept)
+ ept_sync_global();
+
+ return 0;
+}
+
+static void vmclear_local_loaded_vmcss(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct loaded_vmcs *v, *n;
+
+ list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
+ loaded_vmcss_on_cpu_link)
+ __loaded_vmcs_clear(v);
+}
+
+
+/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
+ * tricks.
+ */
+static void kvm_cpu_vmxoff(void)
+{
+ asm volatile (__ex("vmxoff"));
+
+ intel_pt_handle_vmx(0);
+ cr4_clear_bits(X86_CR4_VMXE);
+}
+
+static void hardware_disable(void)
+{
+ vmclear_local_loaded_vmcss();
+ kvm_cpu_vmxoff();
+}
+
+static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
+ u32 msr, u32 *result)
+{
+ u32 vmx_msr_low, vmx_msr_high;
+ u32 ctl = ctl_min | ctl_opt;
+
+ rdmsr(msr, vmx_msr_low, vmx_msr_high);
+
+ ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
+ ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
+
+ /* Ensure minimum (required) set of control bits are supported. */
+ if (ctl_min & ~ctl)
+ return -EIO;
+
+ *result = ctl;
+ return 0;
+}
+
+static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
+ struct vmx_capability *vmx_cap)
+{
+ u32 vmx_msr_low, vmx_msr_high;
+ u32 min, opt, min2, opt2;
+ u32 _pin_based_exec_control = 0;
+ u32 _cpu_based_exec_control = 0;
+ u32 _cpu_based_2nd_exec_control = 0;
+ u32 _vmexit_control = 0;
+ u32 _vmentry_control = 0;
+
+ memset(vmcs_conf, 0, sizeof(*vmcs_conf));
+ min = CPU_BASED_HLT_EXITING |
+#ifdef CONFIG_X86_64
+ CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING |
+#endif
+ CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_UNCOND_IO_EXITING |
+ CPU_BASED_MOV_DR_EXITING |
+ CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING |
+ CPU_BASED_INVLPG_EXITING |
+ CPU_BASED_RDPMC_EXITING;
+
+ opt = CPU_BASED_TPR_SHADOW |
+ CPU_BASED_USE_MSR_BITMAPS |
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
+ &_cpu_based_exec_control) < 0)
+ return -EIO;
+#ifdef CONFIG_X86_64
+ if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+ _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
+ ~CPU_BASED_CR8_STORE_EXITING;
+#endif
+ if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
+ min2 = 0;
+ opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_WBINVD_EXITING |
+ SECONDARY_EXEC_ENABLE_VPID |
+ SECONDARY_EXEC_ENABLE_EPT |
+ SECONDARY_EXEC_UNRESTRICTED_GUEST |
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+ SECONDARY_EXEC_DESC |
+ SECONDARY_EXEC_RDTSCP |
+ SECONDARY_EXEC_ENABLE_INVPCID |
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
+ SECONDARY_EXEC_SHADOW_VMCS |
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_RDSEED_EXITING |
+ SECONDARY_EXEC_RDRAND_EXITING |
+ SECONDARY_EXEC_ENABLE_PML |
+ SECONDARY_EXEC_TSC_SCALING |
+ SECONDARY_EXEC_PT_USE_GPA |
+ SECONDARY_EXEC_PT_CONCEAL_VMX |
+ SECONDARY_EXEC_ENABLE_VMFUNC |
+ SECONDARY_EXEC_ENCLS_EXITING;
+ if (adjust_vmx_controls(min2, opt2,
+ MSR_IA32_VMX_PROCBASED_CTLS2,
+ &_cpu_based_2nd_exec_control) < 0)
+ return -EIO;
+ }
+#ifndef CONFIG_X86_64
+ if (!(_cpu_based_2nd_exec_control &
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
+#endif
+
+ if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+ _cpu_based_2nd_exec_control &= ~(
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+
+ rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
+ &vmx_cap->ept, &vmx_cap->vpid);
+
+ if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
+ /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
+ enabled */
+ _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_INVLPG_EXITING);
+ } else if (vmx_cap->ept) {
+ vmx_cap->ept = 0;
+ pr_warn_once("EPT CAP should not exist if not support "
+ "1-setting enable EPT VM-execution control\n");
+ }
+ if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) &&
+ vmx_cap->vpid) {
+ vmx_cap->vpid = 0;
+ pr_warn_once("VPID CAP should not exist if not support "
+ "1-setting enable VPID VM-execution control\n");
+ }
+
+ min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
+#ifdef CONFIG_X86_64
+ min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
+#endif
+ opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VM_EXIT_SAVE_IA32_PAT |
+ VM_EXIT_LOAD_IA32_PAT |
+ VM_EXIT_LOAD_IA32_EFER |
+ VM_EXIT_CLEAR_BNDCFGS |
+ VM_EXIT_PT_CONCEAL_PIP |
+ VM_EXIT_CLEAR_IA32_RTIT_CTL;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
+ &_vmexit_control) < 0)
+ return -EIO;
+
+ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
+ &_pin_based_exec_control) < 0)
+ return -EIO;
+
+ if (cpu_has_broken_vmx_preemption_timer())
+ _pin_based_exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ if (!(_cpu_based_2nd_exec_control &
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
+ _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
+
+ min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
+ opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VM_ENTRY_LOAD_IA32_PAT |
+ VM_ENTRY_LOAD_IA32_EFER |
+ VM_ENTRY_LOAD_BNDCFGS |
+ VM_ENTRY_PT_CONCEAL_PIP |
+ VM_ENTRY_LOAD_IA32_RTIT_CTL;
+ if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
+ &_vmentry_control) < 0)
+ return -EIO;
+
+ /*
+ * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
+ * can't be used due to an errata where VM Exit may incorrectly clear
+ * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the
+ * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
+ */
+ if (boot_cpu_data.x86 == 0x6) {
+ switch (boot_cpu_data.x86_model) {
+ case 26: /* AAK155 */
+ case 30: /* AAP115 */
+ case 37: /* AAT100 */
+ case 44: /* BC86,AAY89,BD102 */
+ case 46: /* BA97 */
+ _vmexit_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
+ "does not work properly. Using workaround\n");
+ break;
+ default:
+ break;
+ }
+ }
+
+
+ rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
+
+ /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
+ if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
+ return -EIO;
+
+#ifdef CONFIG_X86_64
+ /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
+ if (vmx_msr_high & (1u<<16))
+ return -EIO;
+#endif
+
+ /* Require Write-Back (WB) memory type for VMCS accesses. */
+ if (((vmx_msr_high >> 18) & 15) != 6)
+ return -EIO;
+
+ vmcs_conf->size = vmx_msr_high & 0x1fff;
+ vmcs_conf->order = get_order(vmcs_conf->size);
+ vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
+
+ vmcs_conf->revision_id = vmx_msr_low;
+
+ vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
+ vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
+ vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
+ vmcs_conf->vmexit_ctrl = _vmexit_control;
+ vmcs_conf->vmentry_ctrl = _vmentry_control;
+
+ if (static_branch_unlikely(&enable_evmcs))
+ evmcs_sanitize_exec_ctrls(vmcs_conf);
+
+ return 0;
+}
+
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu)
+{
+ int node = cpu_to_node(cpu);
+ struct page *pages;
+ struct vmcs *vmcs;
+
+ pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
+ if (!pages)
+ return NULL;
+ vmcs = page_address(pages);
+ memset(vmcs, 0, vmcs_config.size);
+
+ /* KVM supports Enlightened VMCS v1 only */
+ if (static_branch_unlikely(&enable_evmcs))
+ vmcs->hdr.revision_id = KVM_EVMCS_VERSION;
+ else
+ vmcs->hdr.revision_id = vmcs_config.revision_id;
+
+ if (shadow)
+ vmcs->hdr.shadow_vmcs = 1;
+ return vmcs;
+}
+
+void free_vmcs(struct vmcs *vmcs)
+{
+ free_pages((unsigned long)vmcs, vmcs_config.order);
+}
+
+/*
+ * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
+ */
+void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ if (!loaded_vmcs->vmcs)
+ return;
+ loaded_vmcs_clear(loaded_vmcs);
+ free_vmcs(loaded_vmcs->vmcs);
+ loaded_vmcs->vmcs = NULL;
+ if (loaded_vmcs->msr_bitmap)
+ free_page((unsigned long)loaded_vmcs->msr_bitmap);
+ WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
+}
+
+int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ loaded_vmcs->vmcs = alloc_vmcs(false);
+ if (!loaded_vmcs->vmcs)
+ return -ENOMEM;
+
+ loaded_vmcs->shadow_vmcs = NULL;
+ loaded_vmcs_init(loaded_vmcs);
+
+ if (cpu_has_vmx_msr_bitmap()) {
+ loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!loaded_vmcs->msr_bitmap)
+ goto out_vmcs;
+ memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
+
+ if (IS_ENABLED(CONFIG_HYPERV) &&
+ static_branch_unlikely(&enable_evmcs) &&
+ (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)) {
+ struct hv_enlightened_vmcs *evmcs =
+ (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs;
+
+ evmcs->hv_enlightenments_control.msr_bitmap = 1;
+ }
+ }
+
+ memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
+
+ return 0;
+
+out_vmcs:
+ free_loaded_vmcs(loaded_vmcs);
+ return -ENOMEM;
+}
+
+static void free_kvm_area(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ free_vmcs(per_cpu(vmxarea, cpu));
+ per_cpu(vmxarea, cpu) = NULL;
+ }
+}
+
+static __init int alloc_kvm_area(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct vmcs *vmcs;
+
+ vmcs = alloc_vmcs_cpu(false, cpu);
+ if (!vmcs) {
+ free_kvm_area();
+ return -ENOMEM;
+ }
+
+ /*
+ * When eVMCS is enabled, alloc_vmcs_cpu() sets
+ * vmcs->revision_id to KVM_EVMCS_VERSION instead of
+ * revision_id reported by MSR_IA32_VMX_BASIC.
+ *
+ * However, even though not explicitly documented by
+ * TLFS, VMXArea passed as VMXON argument should
+ * still be marked with revision_id reported by
+ * physical CPU.
+ */
+ if (static_branch_unlikely(&enable_evmcs))
+ vmcs->hdr.revision_id = vmcs_config.revision_id;
+
+ per_cpu(vmxarea, cpu) = vmcs;
+ }
+ return 0;
+}
+
+static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
+ struct kvm_segment *save)
+{
+ if (!emulate_invalid_guest_state) {
+ /*
+ * CS and SS RPL should be equal during guest entry according
+ * to VMX spec, but in reality it is not always so. Since vcpu
+ * is in the middle of the transition from real mode to
+ * protected mode it is safe to assume that RPL 0 is a good
+ * default value.
+ */
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
+ save->selector &= ~SEGMENT_RPL_MASK;
+ save->dpl = save->selector & SEGMENT_RPL_MASK;
+ save->s = 1;
+ }
+ vmx_set_segment(vcpu, save, seg);
+}
+
+static void enter_pmode(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * Update real mode segment cache. It may be not up-to-date if sement
+ * register was written while vcpu was in a guest mode.
+ */
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
+
+ vmx->rmode.vm86_active = 0;
+
+ vmx_segment_cache_clear(vmx);
+
+ vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
+
+ flags = vmcs_readl(GUEST_RFLAGS);
+ flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+ flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+ vmcs_writel(GUEST_RFLAGS, flags);
+
+ vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
+ (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
+
+ update_exception_bitmap(vcpu);
+
+ fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
+ fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
+ fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
+}
+
+static void fix_rmode_seg(int seg, struct kvm_segment *save)
+{
+ const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ struct kvm_segment var = *save;
+
+ var.dpl = 0x3;
+ if (seg == VCPU_SREG_CS)
+ var.type = 0x3;
+
+ if (!emulate_invalid_guest_state) {
+ var.selector = var.base >> 4;
+ var.base = var.base & 0xffff0;
+ var.limit = 0xffff;
+ var.g = 0;
+ var.db = 0;
+ var.present = 1;
+ var.s = 1;
+ var.l = 0;
+ var.unusable = 0;
+ var.type = 0x3;
+ var.avl = 0;
+ if (save->base & 0xf)
+ printk_once(KERN_WARNING "kvm: segment base is not "
+ "paragraph aligned when entering "
+ "protected mode (seg=%d)", seg);
+ }
+
+ vmcs_write16(sf->selector, var.selector);
+ vmcs_writel(sf->base, var.base);
+ vmcs_write32(sf->limit, var.limit);
+ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
+}
+
+static void enter_rmode(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm);
+
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
+ vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
+
+ vmx->rmode.vm86_active = 1;
+
+ /*
+ * Very old userspace does not call KVM_SET_TSS_ADDR before entering
+ * vcpu. Warn the user that an update is overdue.
+ */
+ if (!kvm_vmx->tss_addr)
+ printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
+ "called before entering vcpu\n");
+
+ vmx_segment_cache_clear(vmx);
+
+ vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
+ vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
+ vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
+
+ flags = vmcs_readl(GUEST_RFLAGS);
+ vmx->rmode.save_rflags = flags;
+
+ flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+
+ vmcs_writel(GUEST_RFLAGS, flags);
+ vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
+ update_exception_bitmap(vcpu);
+
+ fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
+ fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
+ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
+ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
+ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
+ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
+
+ kvm_mmu_reset_context(vcpu);
+}
+
+void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+
+ if (!msr)
+ return;
+
+ vcpu->arch.efer = efer;
+ if (efer & EFER_LMA) {
+ vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
+ msr->data = efer;
+ } else {
+ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
+
+ msr->data = efer & ~EFER_LME;
+ }
+ setup_msrs(vmx);
+}
+
+#ifdef CONFIG_X86_64
+
+static void enter_lmode(struct kvm_vcpu *vcpu)
+{
+ u32 guest_tr_ar;
+
+ vmx_segment_cache_clear(to_vmx(vcpu));
+
+ guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
+ if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
+ pr_debug_ratelimited("%s: tss fixup for long mode. \n",
+ __func__);
+ vmcs_write32(GUEST_TR_AR_BYTES,
+ (guest_tr_ar & ~VMX_AR_TYPE_MASK)
+ | VMX_AR_TYPE_BUSY_64_TSS);
+ }
+ vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
+}
+
+static void exit_lmode(struct kvm_vcpu *vcpu)
+{
+ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
+ vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
+}
+
+#endif
+
+static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ int vpid = to_vmx(vcpu)->vpid;
+
+ if (!vpid_sync_vcpu_addr(vpid, addr))
+ vpid_sync_context(vpid);
+
+ /*
+ * If VPIDs are not supported or enabled, then the above is a no-op.
+ * But we don't really need a TLB flush in that case anyway, because
+ * each VM entry/exit includes an implicit flush when VPID is 0.
+ */
+}
+
+static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+{
+ ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+
+ vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
+ vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
+}
+
+static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
+{
+ if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+}
+
+static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
+{
+ ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+ vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
+ vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
+}
+
+static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
+ if (!test_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_dirty))
+ return;
+
+ if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
+ vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
+ vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
+ vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
+ vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
+ }
+}
+
+void ept_save_pdptrs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
+ if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
+ mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
+ mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
+ mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
+ mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
+ }
+
+ __set_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_avail);
+ __set_bit(VCPU_EXREG_PDPTR,
+ (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
+ unsigned long cr0,
+ struct kvm_vcpu *vcpu)
+{
+ if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+ vmx_decache_cr3(vcpu);
+ if (!(cr0 & X86_CR0_PG)) {
+ /* From paging/starting to nonpaging */
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
+ vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
+ (CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING));
+ vcpu->arch.cr0 = cr0;
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ } else if (!is_paging(vcpu)) {
+ /* From nonpaging to paging */
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
+ vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+ ~(CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING));
+ vcpu->arch.cr0 = cr0;
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
+
+ if (!(cr0 & X86_CR0_WP))
+ *hw_cr0 &= ~X86_CR0_WP;
+}
+
+void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long hw_cr0;
+
+ hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
+ if (enable_unrestricted_guest)
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else {
+ hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
+
+ if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
+ enter_pmode(vcpu);
+
+ if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
+ enter_rmode(vcpu);
+ }
+
+#ifdef CONFIG_X86_64
+ if (vcpu->arch.efer & EFER_LME) {
+ if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
+ enter_lmode(vcpu);
+ if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
+ exit_lmode(vcpu);
+ }
+#endif
+
+ if (enable_ept && !enable_unrestricted_guest)
+ ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
+
+ vmcs_writel(CR0_READ_SHADOW, cr0);
+ vmcs_writel(GUEST_CR0, hw_cr0);
+ vcpu->arch.cr0 = cr0;
+
+ /* depends on vcpu->arch.cr0 to be set to a new value */
+ vmx->emulation_required = emulation_required(vcpu);
+}
+
+static int get_ept_level(struct kvm_vcpu *vcpu)
+{
+ if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
+ return 5;
+ return 4;
+}
+
+u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
+{
+ u64 eptp = VMX_EPTP_MT_WB;
+
+ eptp |= (get_ept_level(vcpu) == 5) ? VMX_EPTP_PWL_5 : VMX_EPTP_PWL_4;
+
+ if (enable_ept_ad_bits &&
+ (!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
+ eptp |= VMX_EPTP_AD_ENABLE_BIT;
+ eptp |= (root_hpa & PAGE_MASK);
+
+ return eptp;
+}
+
+void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long guest_cr3;
+ u64 eptp;
+
+ guest_cr3 = cr3;
+ if (enable_ept) {
+ eptp = construct_eptp(vcpu, cr3);
+ vmcs_write64(EPT_POINTER, eptp);
+
+ if (kvm_x86_ops->tlb_remote_flush) {
+ spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ to_vmx(vcpu)->ept_pointer = eptp;
+ to_kvm_vmx(kvm)->ept_pointers_match
+ = EPT_POINTERS_CHECK;
+ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ }
+
+ if (enable_unrestricted_guest || is_paging(vcpu) ||
+ is_guest_mode(vcpu))
+ guest_cr3 = kvm_read_cr3(vcpu);
+ else
+ guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
+ ept_load_pdptrs(vcpu);
+ }
+
+ vmcs_writel(GUEST_CR3, guest_cr3);
+}
+
+int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+ /*
+ * Pass through host's Machine Check Enable value to hw_cr4, which
+ * is in force while we are in guest mode. Do not let guests control
+ * this bit, even if host CR4.MCE == 0.
+ */
+ unsigned long hw_cr4;
+
+ hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE);
+ if (enable_unrestricted_guest)
+ hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST;
+ else if (to_vmx(vcpu)->rmode.vm86_active)
+ hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON;
+ else
+ hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
+
+ if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
+ if (cr4 & X86_CR4_UMIP) {
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_DESC);
+ hw_cr4 &= ~X86_CR4_UMIP;
+ } else if (!is_guest_mode(vcpu) ||
+ !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_DESC);
+ }
+
+ if (cr4 & X86_CR4_VMXE) {
+ /*
+ * To use VMXON (and later other VMX instructions), a guest
+ * must first be able to turn on cr4.VMXE (see handle_vmon()).
+ * So basically the check on whether to allow nested VMX
+ * is here. We operate under the default treatment of SMM,
+ * so VMX cannot be enabled under SMM.
+ */
+ if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
+ return 1;
+ }
+
+ if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
+ return 1;
+
+ vcpu->arch.cr4 = cr4;
+
+ if (!enable_unrestricted_guest) {
+ if (enable_ept) {
+ if (!is_paging(vcpu)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ hw_cr4 |= X86_CR4_PSE;
+ } else if (!(cr4 & X86_CR4_PAE)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ }
+ }
+
+ /*
+ * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in
+ * hardware. To emulate this behavior, SMEP/SMAP/PKU needs
+ * to be manually disabled when guest switches to non-paging
+ * mode.
+ *
+ * If !enable_unrestricted_guest, the CPU is always running
+ * with CR0.PG=1 and CR4 needs to be modified.
+ * If enable_unrestricted_guest, the CPU automatically
+ * disables SMEP/SMAP/PKU when the guest sets CR0.PG=0.
+ */
+ if (!is_paging(vcpu))
+ hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
+ }
+
+ vmcs_writel(CR4_READ_SHADOW, cr4);
+ vmcs_writel(GUEST_CR4, hw_cr4);
+ return 0;
+}
+
+void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 ar;
+
+ if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
+ *var = vmx->rmode.segs[seg];
+ if (seg == VCPU_SREG_TR
+ || var->selector == vmx_read_guest_seg_selector(vmx, seg))
+ return;
+ var->base = vmx_read_guest_seg_base(vmx, seg);
+ var->selector = vmx_read_guest_seg_selector(vmx, seg);
+ return;
+ }
+ var->base = vmx_read_guest_seg_base(vmx, seg);
+ var->limit = vmx_read_guest_seg_limit(vmx, seg);
+ var->selector = vmx_read_guest_seg_selector(vmx, seg);
+ ar = vmx_read_guest_seg_ar(vmx, seg);
+ var->unusable = (ar >> 16) & 1;
+ var->type = ar & 15;
+ var->s = (ar >> 4) & 1;
+ var->dpl = (ar >> 5) & 3;
+ /*
+ * Some userspaces do not preserve unusable property. Since usable
+ * segment has to be present according to VMX spec we can use present
+ * property to amend userspace bug by making unusable segment always
+ * nonpresent. vmx_segment_access_rights() already marks nonpresent
+ * segment as unusable.
+ */
+ var->present = !var->unusable;
+ var->avl = (ar >> 12) & 1;
+ var->l = (ar >> 13) & 1;
+ var->db = (ar >> 14) & 1;
+ var->g = (ar >> 15) & 1;
+}
+
+static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_segment s;
+
+ if (to_vmx(vcpu)->rmode.vm86_active) {
+ vmx_get_segment(vcpu, &s, seg);
+ return s.base;
+ }
+ return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
+}
+
+int vmx_get_cpl(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (unlikely(vmx->rmode.vm86_active))
+ return 0;
+ else {
+ int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
+ return VMX_AR_DPL(ar);
+ }
+}
+
+static u32 vmx_segment_access_rights(struct kvm_segment *var)
+{
+ u32 ar;
+
+ if (var->unusable || !var->present)
+ ar = 1 << 16;
+ else {
+ ar = var->type & 15;
+ ar |= (var->s & 1) << 4;
+ ar |= (var->dpl & 3) << 5;
+ ar |= (var->present & 1) << 7;
+ ar |= (var->avl & 1) << 12;
+ ar |= (var->l & 1) << 13;
+ ar |= (var->db & 1) << 14;
+ ar |= (var->g & 1) << 15;
+ }
+
+ return ar;
+}
+
+void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+
+ vmx_segment_cache_clear(vmx);
+
+ if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
+ vmx->rmode.segs[seg] = *var;
+ if (seg == VCPU_SREG_TR)
+ vmcs_write16(sf->selector, var->selector);
+ else if (var->s)
+ fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
+ goto out;
+ }
+
+ vmcs_writel(sf->base, var->base);
+ vmcs_write32(sf->limit, var->limit);
+ vmcs_write16(sf->selector, var->selector);
+
+ /*
+ * Fix the "Accessed" bit in AR field of segment registers for older
+ * qemu binaries.
+ * IA32 arch specifies that at the time of processor reset the
+ * "Accessed" bit in the AR field of segment registers is 1. And qemu
+ * is setting it to 0 in the userland code. This causes invalid guest
+ * state vmexit when "unrestricted guest" mode is turned on.
+ * Fix for this setup issue in cpu_reset is being pushed in the qemu
+ * tree. Newer qemu binaries with that qemu fix would not need this
+ * kvm hack.
+ */
+ if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
+ var->type |= 0x1; /* Accessed */
+
+ vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
+
+out:
+ vmx->emulation_required = emulation_required(vcpu);
+}
+
+static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
+{
+ u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
+
+ *db = (ar >> 14) & 1;
+ *l = (ar >> 13) & 1;
+}
+
+static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+ dt->address = vmcs_readl(GUEST_IDTR_BASE);
+}
+
+static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+ vmcs_writel(GUEST_IDTR_BASE, dt->address);
+}
+
+static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+ dt->address = vmcs_readl(GUEST_GDTR_BASE);
+}
+
+static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
+{
+ vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+ vmcs_writel(GUEST_GDTR_BASE, dt->address);
+}
+
+static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_segment var;
+ u32 ar;
+
+ vmx_get_segment(vcpu, &var, seg);
+ var.dpl = 0x3;
+ if (seg == VCPU_SREG_CS)
+ var.type = 0x3;
+ ar = vmx_segment_access_rights(&var);
+
+ if (var.base != (var.selector << 4))
+ return false;
+ if (var.limit != 0xffff)
+ return false;
+ if (ar != 0xf3)
+ return false;
+
+ return true;
+}
+
+static bool code_segment_valid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs;
+ unsigned int cs_rpl;
+
+ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
+ cs_rpl = cs.selector & SEGMENT_RPL_MASK;
+
+ if (cs.unusable)
+ return false;
+ if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
+ return false;
+ if (!cs.s)
+ return false;
+ if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
+ if (cs.dpl > cs_rpl)
+ return false;
+ } else {
+ if (cs.dpl != cs_rpl)
+ return false;
+ }
+ if (!cs.present)
+ return false;
+
+ /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
+ return true;
+}
+
+static bool stack_segment_valid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment ss;
+ unsigned int ss_rpl;
+
+ vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
+ ss_rpl = ss.selector & SEGMENT_RPL_MASK;
+
+ if (ss.unusable)
+ return true;
+ if (ss.type != 3 && ss.type != 7)
+ return false;
+ if (!ss.s)
+ return false;
+ if (ss.dpl != ss_rpl) /* DPL != RPL */
+ return false;
+ if (!ss.present)
+ return false;
+
+ return true;
+}
+
+static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
+{
+ struct kvm_segment var;
+ unsigned int rpl;
+
+ vmx_get_segment(vcpu, &var, seg);
+ rpl = var.selector & SEGMENT_RPL_MASK;
+
+ if (var.unusable)
+ return true;
+ if (!var.s)
+ return false;
+ if (!var.present)
+ return false;
+ if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
+ if (var.dpl < rpl) /* DPL < RPL */
+ return false;
+ }
+
+ /* TODO: Add other members to kvm_segment_field to allow checking for other access
+ * rights flags
+ */
+ return true;
+}
+
+static bool tr_valid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment tr;
+
+ vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
+
+ if (tr.unusable)
+ return false;
+ if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
+ return false;
+ if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
+ return false;
+ if (!tr.present)
+ return false;
+
+ return true;
+}
+
+static bool ldtr_valid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment ldtr;
+
+ vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
+
+ if (ldtr.unusable)
+ return true;
+ if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
+ return false;
+ if (ldtr.type != 2)
+ return false;
+ if (!ldtr.present)
+ return false;
+
+ return true;
+}
+
+static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
+{
+ struct kvm_segment cs, ss;
+
+ vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
+ vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
+
+ return ((cs.selector & SEGMENT_RPL_MASK) ==
+ (ss.selector & SEGMENT_RPL_MASK));
+}
+
+/*
+ * Check if guest state is valid. Returns true if valid, false if
+ * not.
+ * We assume that registers are always usable
+ */
+static bool guest_state_valid(struct kvm_vcpu *vcpu)
+{
+ if (enable_unrestricted_guest)
+ return true;
+
+ /* real mode guest state checks */
+ if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
+ if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
+ return false;
+ if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
+ return false;
+ if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
+ return false;
+ if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
+ return false;
+ if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
+ return false;
+ if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
+ return false;
+ } else {
+ /* protected mode guest state checks */
+ if (!cs_ss_rpl_check(vcpu))
+ return false;
+ if (!code_segment_valid(vcpu))
+ return false;
+ if (!stack_segment_valid(vcpu))
+ return false;
+ if (!data_segment_valid(vcpu, VCPU_SREG_DS))
+ return false;
+ if (!data_segment_valid(vcpu, VCPU_SREG_ES))
+ return false;
+ if (!data_segment_valid(vcpu, VCPU_SREG_FS))
+ return false;
+ if (!data_segment_valid(vcpu, VCPU_SREG_GS))
+ return false;
+ if (!tr_valid(vcpu))
+ return false;
+ if (!ldtr_valid(vcpu))
+ return false;
+ }
+ /* TODO:
+ * - Add checks on RIP
+ * - Add checks on RFLAGS
+ */
+
+ return true;
+}
+
+static int init_rmode_tss(struct kvm *kvm)
+{
+ gfn_t fn;
+ u16 data = 0;
+ int idx, r;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT;
+ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
+ if (r < 0)
+ goto out;
+ data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
+ r = kvm_write_guest_page(kvm, fn++, &data,
+ TSS_IOPB_BASE_OFFSET, sizeof(u16));
+ if (r < 0)
+ goto out;
+ r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
+ if (r < 0)
+ goto out;
+ r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
+ if (r < 0)
+ goto out;
+ data = ~0;
+ r = kvm_write_guest_page(kvm, fn, &data,
+ RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
+ sizeof(u8));
+out:
+ srcu_read_unlock(&kvm->srcu, idx);
+ return r;
+}
+
+static int init_rmode_identity_map(struct kvm *kvm)
+{
+ struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
+ int i, idx, r = 0;
+ kvm_pfn_t identity_map_pfn;
+ u32 tmp;
+
+ /* Protect kvm_vmx->ept_identity_pagetable_done. */
+ mutex_lock(&kvm->slots_lock);
+
+ if (likely(kvm_vmx->ept_identity_pagetable_done))
+ goto out2;
+
+ if (!kvm_vmx->ept_identity_map_addr)
+ kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+ identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT;
+
+ r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
+ kvm_vmx->ept_identity_map_addr, PAGE_SIZE);
+ if (r < 0)
+ goto out2;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
+ if (r < 0)
+ goto out;
+ /* Set up identity-mapping pagetable for EPT in real mode */
+ for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
+ tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
+ r = kvm_write_guest_page(kvm, identity_map_pfn,
+ &tmp, i * sizeof(tmp), sizeof(tmp));
+ if (r < 0)
+ goto out;
+ }
+ kvm_vmx->ept_identity_pagetable_done = true;
+
+out:
+ srcu_read_unlock(&kvm->srcu, idx);
+
+out2:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+static void seg_setup(int seg)
+{
+ const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+ unsigned int ar;
+
+ vmcs_write16(sf->selector, 0);
+ vmcs_writel(sf->base, 0);
+ vmcs_write32(sf->limit, 0xffff);
+ ar = 0x93;
+ if (seg == VCPU_SREG_CS)
+ ar |= 0x08; /* code segment */
+
+ vmcs_write32(sf->ar_bytes, ar);
+}
+
+static int alloc_apic_access_page(struct kvm *kvm)
+{
+ struct page *page;
+ int r = 0;
+
+ mutex_lock(&kvm->slots_lock);
+ if (kvm->arch.apic_access_page_done)
+ goto out;
+ r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+ APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
+ if (r)
+ goto out;
+
+ page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+ if (is_error_page(page)) {
+ r = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * Do not pin the page in memory, so that memory hot-unplug
+ * is able to migrate it.
+ */
+ put_page(page);
+ kvm->arch.apic_access_page_done = true;
+out:
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+int allocate_vpid(void)
+{
+ int vpid;
+
+ if (!enable_vpid)
+ return 0;
+ spin_lock(&vmx_vpid_lock);
+ vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
+ if (vpid < VMX_NR_VPIDS)
+ __set_bit(vpid, vmx_vpid_bitmap);
+ else
+ vpid = 0;
+ spin_unlock(&vmx_vpid_lock);
+ return vpid;
+}
+
+void free_vpid(int vpid)
+{
+ if (!enable_vpid || vpid == 0)
+ return;
+ spin_lock(&vmx_vpid_lock);
+ __clear_bit(vpid, vmx_vpid_bitmap);
+ spin_unlock(&vmx_vpid_lock);
+}
+
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
+{
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ if (static_branch_unlikely(&enable_evmcs))
+ evmcs_touch_msr_bitmap();
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if (msr <= 0x1fff) {
+ if (type & MSR_TYPE_R)
+ /* read-low */
+ __clear_bit(msr, msr_bitmap + 0x000 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-low */
+ __clear_bit(msr, msr_bitmap + 0x800 / f);
+
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ if (type & MSR_TYPE_R)
+ /* read-high */
+ __clear_bit(msr, msr_bitmap + 0x400 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-high */
+ __clear_bit(msr, msr_bitmap + 0xc00 / f);
+
+ }
+}
+
+static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
+{
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return;
+
+ if (static_branch_unlikely(&enable_evmcs))
+ evmcs_touch_msr_bitmap();
+
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+ * have the write-low and read-high bitmap offsets the wrong way round.
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if (msr <= 0x1fff) {
+ if (type & MSR_TYPE_R)
+ /* read-low */
+ __set_bit(msr, msr_bitmap + 0x000 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-low */
+ __set_bit(msr, msr_bitmap + 0x800 / f);
+
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ if (type & MSR_TYPE_R)
+ /* read-high */
+ __set_bit(msr, msr_bitmap + 0x400 / f);
+
+ if (type & MSR_TYPE_W)
+ /* write-high */
+ __set_bit(msr, msr_bitmap + 0xc00 / f);
+
+ }
+}
+
+static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type, bool value)
+{
+ if (value)
+ vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
+ else
+ vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
+}
+
+static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
+{
+ u8 mode = 0;
+
+ if (cpu_has_secondary_exec_ctrls() &&
+ (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
+ mode |= MSR_BITMAP_MODE_X2APIC;
+ if (enable_apicv && kvm_vcpu_apicv_active(vcpu))
+ mode |= MSR_BITMAP_MODE_X2APIC_APICV;
+ }
+
+ return mode;
+}
+
+static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
+ u8 mode)
+{
+ int msr;
+
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
+ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+ }
+
+ if (mode & MSR_BITMAP_MODE_X2APIC) {
+ /*
+ * TPR reads and writes can be virtualized even if virtual interrupt
+ * delivery is not in use.
+ */
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
+ if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
+ }
+ }
+}
+
+void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+ u8 mode = vmx_msr_bitmap_mode(vcpu);
+ u8 changed = mode ^ vmx->msr_bitmap_mode;
+
+ if (!changed)
+ return;
+
+ if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
+ vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
+
+ vmx->msr_bitmap_mode = mode;
+}
+
+void pt_update_intercept_for_msr(struct vcpu_vmx *vmx)
+{
+ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+ bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
+ u32 i;
+
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_STATUS,
+ MSR_TYPE_RW, flag);
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_BASE,
+ MSR_TYPE_RW, flag);
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_OUTPUT_MASK,
+ MSR_TYPE_RW, flag);
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_IA32_RTIT_CR3_MATCH,
+ MSR_TYPE_RW, flag);
+ for (i = 0; i < vmx->pt_desc.addr_range; i++) {
+ vmx_set_intercept_for_msr(msr_bitmap,
+ MSR_IA32_RTIT_ADDR0_A + i * 2, MSR_TYPE_RW, flag);
+ vmx_set_intercept_for_msr(msr_bitmap,
+ MSR_IA32_RTIT_ADDR0_B + i * 2, MSR_TYPE_RW, flag);
+ }
+}
+
+static bool vmx_get_enable_apicv(struct kvm_vcpu *vcpu)
+{
+ return enable_apicv;
+}
+
+static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ void *vapic_page;
+ u32 vppr;
+ int rvi;
+
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
+ !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
+ WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+ return false;
+
+ rvi = vmx_get_rvi();
+
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
+ kunmap(vmx->nested.virtual_apic_page);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
+static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
+ bool nested)
+{
+#ifdef CONFIG_SMP
+ int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
+
+ if (vcpu->mode == IN_GUEST_MODE) {
+ /*
+ * The vector of interrupt to be delivered to vcpu had
+ * been set in PIR before this function.
+ *
+ * Following cases will be reached in this block, and
+ * we always send a notification event in all cases as
+ * explained below.
+ *
+ * Case 1: vcpu keeps in non-root mode. Sending a
+ * notification event posts the interrupt to vcpu.
+ *
+ * Case 2: vcpu exits to root mode and is still
+ * runnable. PIR will be synced to vIRR before the
+ * next vcpu entry. Sending a notification event in
+ * this case has no effect, as vcpu is not in root
+ * mode.
+ *
+ * Case 3: vcpu exits to root mode and is blocked.
+ * vcpu_block() has already synced PIR to vIRR and
+ * never blocks vcpu if vIRR is not cleared. Therefore,
+ * a blocked vcpu here does not wait for any requested
+ * interrupts in PIR, and sending a notification event
+ * which has no effect is safe here.
+ */
+
+ apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
+ return true;
+ }
+#endif
+ return false;
+}
+
+static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
+ int vector)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (is_guest_mode(vcpu) &&
+ vector == vmx->nested.posted_intr_nv) {
+ /*
+ * If a posted intr is not recognized by hardware,
+ * we will accomplish it in the next vmentry.
+ */
+ vmx->nested.pi_pending = true;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ /* the PIR and ON have been set by L1. */
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
+ kvm_vcpu_kick(vcpu);
+ return 0;
+ }
+ return -1;
+}
+/*
+ * Send interrupt to vcpu via posted interrupt way.
+ * 1. If target vcpu is running(non-root mode), send posted interrupt
+ * notification to vcpu and hardware will sync PIR to vIRR atomically.
+ * 2. If target vcpu isn't running(root mode), kick it to pick up the
+ * interrupt from PIR in next vmentry.
+ */
+static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int r;
+
+ r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
+ if (!r)
+ return;
+
+ if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+ return;
+
+ /* If a previous notification has sent the IPI, nothing to do. */
+ if (pi_test_and_set_on(&vmx->pi_desc))
+ return;
+
+ if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ kvm_vcpu_kick(vcpu);
+}
+
+/*
+ * Set up the vmcs's constant host-state fields, i.e., host-state fields that
+ * will not change in the lifetime of the guest.
+ * Note that host-state that does change is set elsewhere. E.g., host-state
+ * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
+ */
+void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
+{
+ u32 low32, high32;
+ unsigned long tmpl;
+ struct desc_ptr dt;
+ unsigned long cr0, cr3, cr4;
+
+ cr0 = read_cr0();
+ WARN_ON(cr0 & X86_CR0_TS);
+ vmcs_writel(HOST_CR0, cr0); /* 22.2.3 */
+
+ /*
+ * Save the most likely value for this task's CR3 in the VMCS.
+ * We can't use __get_current_cr3_fast() because we're not atomic.
+ */
+ cr3 = __read_cr3();
+ vmcs_writel(HOST_CR3, cr3); /* 22.2.3 FIXME: shadow tables */
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+
+ /* Save the most likely value for this task's CR4 in the VMCS. */
+ cr4 = cr4_read_shadow();
+ vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
+
+ vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+#ifdef CONFIG_X86_64
+ /*
+ * Load null selectors, so we can avoid reloading them in
+ * vmx_prepare_switch_to_host(), in case userspace uses
+ * the null selectors too (the expected case).
+ */
+ vmcs_write16(HOST_DS_SELECTOR, 0);
+ vmcs_write16(HOST_ES_SELECTOR, 0);
+#else
+ vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+#endif
+ vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+ vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
+
+ store_idt(&dt);
+ vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
+ vmx->host_idt_base = dt.address;
+
+ vmcs_writel(HOST_RIP, (unsigned long)vmx_vmexit); /* 22.2.5 */
+
+ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
+ vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+ rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
+ vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
+
+ if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
+ rdmsr(MSR_IA32_CR_PAT, low32, high32);
+ vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
+ }
+
+ if (cpu_has_load_ia32_efer())
+ vmcs_write64(HOST_IA32_EFER, host_efer);
+}
+
+void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
+{
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ if (is_guest_mode(&vmx->vcpu))
+ vmx->vcpu.arch.cr4_guest_owned_bits &=
+ ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
+ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
+}
+
+static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
+{
+ u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
+
+ if (!kvm_vcpu_apicv_active(&vmx->vcpu))
+ pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
+
+ if (!enable_vnmi)
+ pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
+
+ /* Enable the preemption timer dynamically */
+ pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ return pin_based_exec_ctrl;
+}
+
+static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
+ if (cpu_has_secondary_exec_ctrls()) {
+ if (kvm_vcpu_apicv_active(vcpu))
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ else
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ }
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_update_msr_bitmap(vcpu);
+}
+
+u32 vmx_exec_control(struct vcpu_vmx *vmx)
+{
+ u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
+
+ if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
+ exec_control &= ~CPU_BASED_MOV_DR_EXITING;
+
+ if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_STORE_EXITING |
+ CPU_BASED_CR8_LOAD_EXITING;
+#endif
+ }
+ if (!enable_ept)
+ exec_control |= CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_INVLPG_EXITING;
+ if (kvm_mwait_in_guest(vmx->vcpu.kvm))
+ exec_control &= ~(CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING);
+ if (kvm_hlt_in_guest(vmx->vcpu.kvm))
+ exec_control &= ~CPU_BASED_HLT_EXITING;
+ return exec_control;
+}
+
+
+static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
+{
+ struct kvm_vcpu *vcpu = &vmx->vcpu;
+
+ u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
+
+ if (pt_mode == PT_MODE_SYSTEM)
+ exec_control &= ~(SECONDARY_EXEC_PT_USE_GPA | SECONDARY_EXEC_PT_CONCEAL_VMX);
+ if (!cpu_need_virtualize_apic_accesses(vcpu))
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (vmx->vpid == 0)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
+ if (!enable_ept) {
+ exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+ enable_unrestricted_guest = 0;
+ }
+ if (!enable_unrestricted_guest)
+ exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ if (kvm_pause_in_guest(vmx->vcpu.kvm))
+ exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ if (!kvm_vcpu_apicv_active(vcpu))
+ exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+
+ /* SECONDARY_EXEC_DESC is enabled/disabled on writes to CR4.UMIP,
+ * in vmx_set_cr4. */
+ exec_control &= ~SECONDARY_EXEC_DESC;
+
+ /* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
+ (handle_vmptrld).
+ We can NOT enable shadow_vmcs here because we don't have yet
+ a current VMCS12
+ */
+ exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
+
+ if (!enable_pml)
+ exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
+
+ if (vmx_xsaves_supported()) {
+ /* Exposing XSAVES only when XSAVE is exposed */
+ bool xsaves_enabled =
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
+
+ if (!xsaves_enabled)
+ exec_control &= ~SECONDARY_EXEC_XSAVES;
+
+ if (nested) {
+ if (xsaves_enabled)
+ vmx->nested.msrs.secondary_ctls_high |=
+ SECONDARY_EXEC_XSAVES;
+ else
+ vmx->nested.msrs.secondary_ctls_high &=
+ ~SECONDARY_EXEC_XSAVES;
+ }
+ }
+
+ if (vmx_rdtscp_supported()) {
+ bool rdtscp_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP);
+ if (!rdtscp_enabled)
+ exec_control &= ~SECONDARY_EXEC_RDTSCP;
+
+ if (nested) {
+ if (rdtscp_enabled)
+ vmx->nested.msrs.secondary_ctls_high |=
+ SECONDARY_EXEC_RDTSCP;
+ else
+ vmx->nested.msrs.secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDTSCP;
+ }
+ }
+
+ if (vmx_invpcid_supported()) {
+ /* Exposing INVPCID only when PCID is exposed */
+ bool invpcid_enabled =
+ guest_cpuid_has(vcpu, X86_FEATURE_INVPCID) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_PCID);
+
+ if (!invpcid_enabled) {
+ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+ guest_cpuid_clear(vcpu, X86_FEATURE_INVPCID);
+ }
+
+ if (nested) {
+ if (invpcid_enabled)
+ vmx->nested.msrs.secondary_ctls_high |=
+ SECONDARY_EXEC_ENABLE_INVPCID;
+ else
+ vmx->nested.msrs.secondary_ctls_high &=
+ ~SECONDARY_EXEC_ENABLE_INVPCID;
+ }
+ }
+
+ if (vmx_rdrand_supported()) {
+ bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND);
+ if (rdrand_enabled)
+ exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING;
+
+ if (nested) {
+ if (rdrand_enabled)
+ vmx->nested.msrs.secondary_ctls_high |=
+ SECONDARY_EXEC_RDRAND_EXITING;
+ else
+ vmx->nested.msrs.secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDRAND_EXITING;
+ }
+ }
+
+ if (vmx_rdseed_supported()) {
+ bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED);
+ if (rdseed_enabled)
+ exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING;
+
+ if (nested) {
+ if (rdseed_enabled)
+ vmx->nested.msrs.secondary_ctls_high |=
+ SECONDARY_EXEC_RDSEED_EXITING;
+ else
+ vmx->nested.msrs.secondary_ctls_high &=
+ ~SECONDARY_EXEC_RDSEED_EXITING;
+ }
+ }
+
+ vmx->secondary_exec_control = exec_control;
+}
+
+static void ept_set_mmio_spte_mask(void)
+{
+ /*
+ * EPT Misconfigurations can be generated if the value of bits 2:0
+ * of an EPT paging-structure entry is 110b (write/execute).
+ */
+ kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
+ VMX_EPT_MISCONFIG_WX_VALUE);
+}
+
+#define VMX_XSS_EXIT_BITMAP 0
+
+/*
+ * Sets up the vmcs for emulated real mode.
+ */
+static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
+{
+ int i;
+
+ if (nested)
+ nested_vmx_vcpu_setup();
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
+
+ vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
+
+ /* Control */
+ vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
+ vmx->hv_deadline_tsc = -1;
+
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
+
+ if (cpu_has_secondary_exec_ctrls()) {
+ vmx_compute_secondary_exec_control(vmx);
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ vmx->secondary_exec_control);
+ }
+
+ if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
+ vmcs_write64(EOI_EXIT_BITMAP0, 0);
+ vmcs_write64(EOI_EXIT_BITMAP1, 0);
+ vmcs_write64(EOI_EXIT_BITMAP2, 0);
+ vmcs_write64(EOI_EXIT_BITMAP3, 0);
+
+ vmcs_write16(GUEST_INTR_STATUS, 0);
+
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_VECTOR);
+ vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
+ }
+
+ if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
+ vmcs_write32(PLE_GAP, ple_gap);
+ vmx->ple_window = ple_window;
+ vmx->ple_window_dirty = true;
+ }
+
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
+ vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
+
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
+ vmx_set_constant_host_state(vmx);
+ vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
+ vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
+
+ if (cpu_has_vmx_vmfunc())
+ vmcs_write64(VM_FUNCTION_CONTROL, 0);
+
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+
+ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
+ u32 index = vmx_msr_index[i];
+ u32 data_low, data_high;
+ int j = vmx->nmsrs;
+
+ if (rdmsr_safe(index, &data_low, &data_high) < 0)
+ continue;
+ if (wrmsr_safe(index, data_low, data_high) < 0)
+ continue;
+ vmx->guest_msrs[j].index = i;
+ vmx->guest_msrs[j].data = 0;
+ vmx->guest_msrs[j].mask = -1ull;
+ ++vmx->nmsrs;
+ }
+
+ vmx->arch_capabilities = kvm_get_arch_capabilities();
+
+ vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
+
+ /* 22.2.1, 20.8.1 */
+ vm_entry_controls_init(vmx, vmx_vmentry_ctrl());
+
+ vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+
+ set_cr4_guest_host_mask(vmx);
+
+ if (vmx_xsaves_supported())
+ vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
+
+ if (enable_pml) {
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+
+ if (cpu_has_vmx_encls_vmexit())
+ vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+
+ if (pt_mode == PT_MODE_HOST_GUEST) {
+ memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
+ /* Bit[6~0] are forced to 1, writes are ignored. */
+ vmx->pt_desc.guest.output_mask = 0x7F;
+ vmcs_write64(GUEST_IA32_RTIT_CTL, 0);
+ }
+}
+
+static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct msr_data apic_base_msr;
+ u64 cr0;
+
+ vmx->rmode.vm86_active = 0;
+ vmx->spec_ctrl = 0;
+
+ vcpu->arch.microcode_version = 0x100000000ULL;
+ vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
+ kvm_set_cr8(vcpu, 0);
+
+ if (!init_event) {
+ apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
+ MSR_IA32_APICBASE_ENABLE;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+ apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
+ apic_base_msr.host_initiated = true;
+ kvm_set_apic_base(vcpu, &apic_base_msr);
+ }
+
+ vmx_segment_cache_clear(vmx);
+
+ seg_setup(VCPU_SREG_CS);
+ vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
+ vmcs_writel(GUEST_CS_BASE, 0xffff0000ul);
+
+ seg_setup(VCPU_SREG_DS);
+ seg_setup(VCPU_SREG_ES);
+ seg_setup(VCPU_SREG_FS);
+ seg_setup(VCPU_SREG_GS);
+ seg_setup(VCPU_SREG_SS);
+
+ vmcs_write16(GUEST_TR_SELECTOR, 0);
+ vmcs_writel(GUEST_TR_BASE, 0);
+ vmcs_write32(GUEST_TR_LIMIT, 0xffff);
+ vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
+
+ vmcs_write16(GUEST_LDTR_SELECTOR, 0);
+ vmcs_writel(GUEST_LDTR_BASE, 0);
+ vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
+ vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
+
+ if (!init_event) {
+ vmcs_write32(GUEST_SYSENTER_CS, 0);
+ vmcs_writel(GUEST_SYSENTER_ESP, 0);
+ vmcs_writel(GUEST_SYSENTER_EIP, 0);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
+ }
+
+ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ kvm_rip_write(vcpu, 0xfff0);
+
+ vmcs_writel(GUEST_GDTR_BASE, 0);
+ vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
+
+ vmcs_writel(GUEST_IDTR_BASE, 0);
+ vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
+
+ vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0);
+ if (kvm_mpx_supported())
+ vmcs_write64(GUEST_BNDCFGS, 0);
+
+ setup_msrs(vmx);
+
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
+
+ if (cpu_has_vmx_tpr_shadow() && !init_event) {
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+ if (cpu_need_tpr_shadow(vcpu))
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+ __pa(vcpu->arch.apic->regs));
+ vmcs_write32(TPR_THRESHOLD, 0);
+ }
+
+ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+
+ if (vmx->vpid != 0)
+ vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+
+ cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+ vmx->vcpu.arch.cr0 = cr0;
+ vmx_set_cr0(vcpu, cr0); /* enter rmode */
+ vmx_set_cr4(vcpu, 0);
+ vmx_set_efer(vcpu, 0);
+
+ update_exception_bitmap(vcpu);
+
+ vpid_sync_context(vmx->vpid);
+ if (init_event)
+ vmx_clear_hlt(vcpu);
+}
+
+static void enable_irq_window(struct kvm_vcpu *vcpu)
+{
+ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_VIRTUAL_INTR_PENDING);
+}
+
+static void enable_nmi_window(struct kvm_vcpu *vcpu)
+{
+ if (!enable_vnmi ||
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+ enable_irq_window(vcpu);
+ return;
+ }
+
+ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_VIRTUAL_NMI_PENDING);
+}
+
+static void vmx_inject_irq(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ uint32_t intr;
+ int irq = vcpu->arch.interrupt.nr;
+
+ trace_kvm_inj_virq(irq);
+
+ ++vcpu->stat.irq_injections;
+ if (vmx->rmode.vm86_active) {
+ int inc_eip = 0;
+ if (vcpu->arch.interrupt.soft)
+ inc_eip = vcpu->arch.event_exit_inst_len;
+ if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ return;
+ }
+ intr = irq | INTR_INFO_VALID_MASK;
+ if (vcpu->arch.interrupt.soft) {
+ intr |= INTR_TYPE_SOFT_INTR;
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmx->vcpu.arch.event_exit_inst_len);
+ } else
+ intr |= INTR_TYPE_EXT_INTR;
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
+
+ vmx_clear_hlt(vcpu);
+}
+
+static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!enable_vnmi) {
+ /*
+ * Tracking the NMI-blocked state in software is built upon
+ * finding the next open IRQ window. This, in turn, depends on
+ * well-behaving guests: They have to keep IRQs disabled at
+ * least as long as the NMI handler runs. Otherwise we may
+ * cause NMI nesting, maybe breaking the guest. But as this is
+ * highly unlikely, we can live with the residual risk.
+ */
+ vmx->loaded_vmcs->soft_vnmi_blocked = 1;
+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
+ }
+
+ ++vcpu->stat.nmi_injections;
+ vmx->loaded_vmcs->nmi_known_unmasked = false;
+
+ if (vmx->rmode.vm86_active) {
+ if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+ return;
+ }
+
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
+
+ vmx_clear_hlt(vcpu);
+}
+
+bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool masked;
+
+ if (!enable_vnmi)
+ return vmx->loaded_vmcs->soft_vnmi_blocked;
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
+ return false;
+ masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+ return masked;
+}
+
+void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!enable_vnmi) {
+ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
+ vmx->loaded_vmcs->soft_vnmi_blocked = masked;
+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
+ }
+ } else {
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+ if (masked)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
+}
+
+static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+{
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return 0;
+
+ if (!enable_vnmi &&
+ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+ return 0;
+
+ return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
+ | GUEST_INTR_STATE_NMI));
+}
+
+static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ return (!to_vmx(vcpu)->nested.nested_run_pending &&
+ vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
+}
+
+static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
+{
+ int ret;
+
+ if (enable_unrestricted_guest)
+ return 0;
+
+ ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
+ PAGE_SIZE * 3);
+ if (ret)
+ return ret;
+ to_kvm_vmx(kvm)->tss_addr = addr;
+ return init_rmode_tss(kvm);
+}
+
+static int vmx_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
+{
+ to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr;
+ return 0;
+}
+
+static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
+{
+ switch (vec) {
+ case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject the exception
+ * from user space while in guest debugging mode.
+ */
+ to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ return false;
+ /* fall through */
+ case DB_VECTOR:
+ if (vcpu->guest_debug &
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+ return false;
+ /* fall through */
+ case DE_VECTOR:
+ case OF_VECTOR:
+ case BR_VECTOR:
+ case UD_VECTOR:
+ case DF_VECTOR:
+ case SS_VECTOR:
+ case GP_VECTOR:
+ case MF_VECTOR:
+ return true;
+ break;
+ }
+ return false;
+}
+
+static int handle_rmode_exception(struct kvm_vcpu *vcpu,
+ int vec, u32 err_code)
+{
+ /*
+ * Instruction with address size override prefix opcode 0x67
+ * Cause the #SS fault with 0 error code in VM86 mode.
+ */
+ if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
+ if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+ if (vcpu->arch.halt_request) {
+ vcpu->arch.halt_request = 0;
+ return kvm_vcpu_halt(vcpu);
+ }
+ return 1;
+ }
+ return 0;
+ }
+
+ /*
+ * Forward all other exceptions that are valid in real mode.
+ * FIXME: Breaks guest debugging in real mode, needs to be fixed with
+ * the required debugging infrastructure rework.
+ */
+ kvm_queue_exception(vcpu, vec);
+ return 1;
+}
+
+/*
+ * Trigger machine check on the host. We assume all the MSRs are already set up
+ * by the CPU and that we still run on the same CPU as the MCE occurred on.
+ * We pass a fake environment to the machine check handler because we want
+ * the guest to be always treated like user space, no matter what context
+ * it used internally.
+ */
+static void kvm_machine_check(void)
+{
+#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
+ struct pt_regs regs = {
+ .cs = 3, /* Fake ring 3 no matter what the guest ran on */
+ .flags = X86_EFLAGS_IF,
+ };
+
+ do_machine_check(&regs, 0);
+#endif
+}
+
+static int handle_machine_check(struct kvm_vcpu *vcpu)
+{
+ /* already handled by vcpu_run */
+ return 1;
+}
+
+static int handle_exception(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_run *kvm_run = vcpu->run;
+ u32 intr_info, ex_no, error_code;
+ unsigned long cr2, rip, dr6;
+ u32 vect_info;
+ enum emulation_result er;
+
+ vect_info = vmx->idt_vectoring_info;
+ intr_info = vmx->exit_intr_info;
+
+ if (is_machine_check(intr_info))
+ return handle_machine_check(vcpu);
+
+ if (is_nmi(intr_info))
+ return 1; /* already handled by vmx_vcpu_run() */
+
+ if (is_invalid_opcode(intr_info))
+ return handle_ud(vcpu);
+
+ error_code = 0;
+ if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
+ error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
+
+ if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
+ WARN_ON_ONCE(!enable_vmware_backdoor);
+ er = kvm_emulate_instruction(vcpu,
+ EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
+ if (er == EMULATE_USER_EXIT)
+ return 0;
+ else if (er != EMULATE_DONE)
+ kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
+ return 1;
+ }
+
+ /*
+ * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
+ * MMIO, it is better to report an internal error.
+ * See the comments in vmx_handle_exit.
+ */
+ if ((vect_info & VECTORING_INFO_VALID_MASK) &&
+ !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
+ vcpu->run->internal.ndata = 3;
+ vcpu->run->internal.data[0] = vect_info;
+ vcpu->run->internal.data[1] = intr_info;
+ vcpu->run->internal.data[2] = error_code;
+ return 0;
+ }
+
+ if (is_page_fault(intr_info)) {
+ cr2 = vmcs_readl(EXIT_QUALIFICATION);
+ /* EPT won't cause page fault directly */
+ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
+ return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
+ }
+
+ ex_no = intr_info & INTR_INFO_VECTOR_MASK;
+
+ if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
+ return handle_rmode_exception(vcpu, ex_no, error_code);
+
+ switch (ex_no) {
+ case AC_VECTOR:
+ kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
+ return 1;
+ case DB_VECTOR:
+ dr6 = vmcs_readl(EXIT_QUALIFICATION);
+ if (!(vcpu->guest_debug &
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= dr6 | DR6_RTM;
+ if (is_icebp(intr_info))
+ skip_emulated_instruction(vcpu);
+
+ kvm_queue_exception(vcpu, DB_VECTOR);
+ return 1;
+ }
+ kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
+ kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
+ /* fall through */
+ case BP_VECTOR:
+ /*
+ * Update instruction length as we may reinject #BP from
+ * user space while in guest debugging mode. Reading it for
+ * #DB as well causes no harm, it is not used in that case.
+ */
+ vmx->vcpu.arch.event_exit_inst_len =
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ rip = kvm_rip_read(vcpu);
+ kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+ kvm_run->debug.arch.exception = ex_no;
+ break;
+ default:
+ kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
+ kvm_run->ex.exception = ex_no;
+ kvm_run->ex.error_code = error_code;
+ break;
+ }
+ return 0;
+}
+
+static int handle_external_interrupt(struct kvm_vcpu *vcpu)
+{
+ ++vcpu->stat.irq_exits;
+ return 1;
+}
+
+static int handle_triple_fault(struct kvm_vcpu *vcpu)
+{
+ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
+ return 0;
+}
+
+static int handle_io(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification;
+ int size, in, string;
+ unsigned port;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ string = (exit_qualification & 16) != 0;
+
+ ++vcpu->stat.io_exits;
+
+ if (string)
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
+
+ port = exit_qualification >> 16;
+ size = (exit_qualification & 7) + 1;
+ in = (exit_qualification & 8) != 0;
+
+ return kvm_fast_pio(vcpu, size, port, in);
+}
+
+static void
+vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+{
+ /*
+ * Patch in the VMCALL instruction:
+ */
+ hypercall[0] = 0x0f;
+ hypercall[1] = 0x01;
+ hypercall[2] = 0xc1;
+}
+
+/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
+static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (is_guest_mode(vcpu)) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned long orig_val = val;
+
+ /*
+ * We get here when L2 changed cr0 in a way that did not change
+ * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
+ * but did change L0 shadowed bits. So we first calculate the
+ * effective cr0 value that L1 would like to write into the
+ * hardware. It consists of the L2-owned bits from the new
+ * value combined with the L1-owned bits from L1's guest_cr0.
+ */
+ val = (val & ~vmcs12->cr0_guest_host_mask) |
+ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
+
+ if (!nested_guest_cr0_valid(vcpu, val))
+ return 1;
+
+ if (kvm_set_cr0(vcpu, val))
+ return 1;
+ vmcs_writel(CR0_READ_SHADOW, orig_val);
+ return 0;
+ } else {
+ if (to_vmx(vcpu)->nested.vmxon &&
+ !nested_host_cr0_valid(vcpu, val))
+ return 1;
+
+ return kvm_set_cr0(vcpu, val);
+ }
+}
+
+static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ if (is_guest_mode(vcpu)) {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned long orig_val = val;
+
+ /* analogously to handle_set_cr0 */
+ val = (val & ~vmcs12->cr4_guest_host_mask) |
+ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
+ if (kvm_set_cr4(vcpu, val))
+ return 1;
+ vmcs_writel(CR4_READ_SHADOW, orig_val);
+ return 0;
+ } else
+ return kvm_set_cr4(vcpu, val);
+}
+
+static int handle_desc(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
+static int handle_cr(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification, val;
+ int cr;
+ int reg;
+ int err;
+ int ret;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ cr = exit_qualification & 15;
+ reg = (exit_qualification >> 8) & 15;
+ switch ((exit_qualification >> 4) & 3) {
+ case 0: /* mov to cr */
+ val = kvm_register_readl(vcpu, reg);
+ trace_kvm_cr_write(cr, val);
+ switch (cr) {
+ case 0:
+ err = handle_set_cr0(vcpu, val);
+ return kvm_complete_insn_gp(vcpu, err);
+ case 3:
+ WARN_ON_ONCE(enable_unrestricted_guest);
+ err = kvm_set_cr3(vcpu, val);
+ return kvm_complete_insn_gp(vcpu, err);
+ case 4:
+ err = handle_set_cr4(vcpu, val);
+ return kvm_complete_insn_gp(vcpu, err);
+ case 8: {
+ u8 cr8_prev = kvm_get_cr8(vcpu);
+ u8 cr8 = (u8)val;
+ err = kvm_set_cr8(vcpu, cr8);
+ ret = kvm_complete_insn_gp(vcpu, err);
+ if (lapic_in_kernel(vcpu))
+ return ret;
+ if (cr8_prev <= cr8)
+ return ret;
+ /*
+ * TODO: we might be squashing a
+ * KVM_GUESTDBG_SINGLESTEP-triggered
+ * KVM_EXIT_DEBUG here.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
+ return 0;
+ }
+ }
+ break;
+ case 2: /* clts */
+ WARN_ONCE(1, "Guest should always own CR0.TS");
+ vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+ trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
+ return kvm_skip_emulated_instruction(vcpu);
+ case 1: /*mov from cr*/
+ switch (cr) {
+ case 3:
+ WARN_ON_ONCE(enable_unrestricted_guest);
+ val = kvm_read_cr3(vcpu);
+ kvm_register_write(vcpu, reg, val);
+ trace_kvm_cr_read(cr, val);
+ return kvm_skip_emulated_instruction(vcpu);
+ case 8:
+ val = kvm_get_cr8(vcpu);
+ kvm_register_write(vcpu, reg, val);
+ trace_kvm_cr_read(cr, val);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ break;
+ case 3: /* lmsw */
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+ trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
+ kvm_lmsw(vcpu, val);
+
+ return kvm_skip_emulated_instruction(vcpu);
+ default:
+ break;
+ }
+ vcpu->run->exit_reason = 0;
+ vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
+ (int)(exit_qualification >> 4) & 3, cr);
+ return 0;
+}
+
+static int handle_dr(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification;
+ int dr, dr7, reg;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
+
+ /* First, if DR does not exist, trigger UD */
+ if (!kvm_require_dr(vcpu, dr))
+ return 1;
+
+ /* Do not handle if the CPL > 0, will trigger GP on re-entry */
+ if (!kvm_require_cpl(vcpu, 0))
+ return 1;
+ dr7 = vmcs_readl(GUEST_DR7);
+ if (dr7 & DR7_GD) {
+ /*
+ * As the vm-exit takes precedence over the debug trap, we
+ * need to emulate the latter, either for the host or the
+ * guest debugging itself.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
+ vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
+ vcpu->run->debug.arch.dr7 = dr7;
+ vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
+ vcpu->run->debug.arch.exception = DB_VECTOR;
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ return 0;
+ } else {
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
+ kvm_queue_exception(vcpu, DB_VECTOR);
+ return 1;
+ }
+ }
+
+ if (vcpu->guest_debug == 0) {
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_MOV_DR_EXITING);
+
+ /*
+ * No more DR vmexits; force a reload of the debug registers
+ * and reenter on this instruction. The next vmexit will
+ * retrieve the full state of the debug registers.
+ */
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
+ return 1;
+ }
+
+ reg = DEBUG_REG_ACCESS_REG(exit_qualification);
+ if (exit_qualification & TYPE_MOV_FROM_DR) {
+ unsigned long val;
+
+ if (kvm_get_dr(vcpu, dr, &val))
+ return 1;
+ kvm_register_write(vcpu, reg, val);
+ } else
+ if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
+ return 1;
+
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.dr6;
+}
+
+static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
+{
+}
+
+static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
+{
+ get_debugreg(vcpu->arch.db[0], 0);
+ get_debugreg(vcpu->arch.db[1], 1);
+ get_debugreg(vcpu->arch.db[2], 2);
+ get_debugreg(vcpu->arch.db[3], 3);
+ get_debugreg(vcpu->arch.dr6, 6);
+ vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
+
+ vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
+ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
+}
+
+static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+ vmcs_writel(GUEST_DR7, val);
+}
+
+static int handle_cpuid(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_cpuid(vcpu);
+}
+
+static int handle_rdmsr(struct kvm_vcpu *vcpu)
+{
+ u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+ struct msr_data msr_info;
+
+ msr_info.index = ecx;
+ msr_info.host_initiated = false;
+ if (vmx_get_msr(vcpu, &msr_info)) {
+ trace_kvm_msr_read_ex(ecx);
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ trace_kvm_msr_read(ecx, msr_info.data);
+
+ /* FIXME: handling of bits 32:63 of rax, rdx */
+ vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
+ vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int handle_wrmsr(struct kvm_vcpu *vcpu)
+{
+ struct msr_data msr;
+ u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+ u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
+ | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
+
+ msr.data = data;
+ msr.index = ecx;
+ msr.host_initiated = false;
+ if (kvm_set_msr(vcpu, &msr) != 0) {
+ trace_kvm_msr_write_ex(ecx, data);
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ trace_kvm_msr_write(ecx, data);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
+{
+ kvm_apic_update_ppr(vcpu);
+ return 1;
+}
+
+static int handle_interrupt_window(struct kvm_vcpu *vcpu)
+{
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_VIRTUAL_INTR_PENDING);
+
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ ++vcpu->stat.irq_window_exits;
+ return 1;
+}
+
+static int handle_halt(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_halt(vcpu);
+}
+
+static int handle_vmcall(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_hypercall(vcpu);
+}
+
+static int handle_invd(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
+static int handle_invlpg(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ kvm_mmu_invlpg(vcpu, exit_qualification);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int handle_rdpmc(struct kvm_vcpu *vcpu)
+{
+ int err;
+
+ err = kvm_rdpmc(vcpu);
+ return kvm_complete_insn_gp(vcpu, err);
+}
+
+static int handle_wbinvd(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_wbinvd(vcpu);
+}
+
+static int handle_xsetbv(struct kvm_vcpu *vcpu)
+{
+ u64 new_bv = kvm_read_edx_eax(vcpu);
+ u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
+
+ if (kvm_set_xcr(vcpu, index, new_bv) == 0)
+ return kvm_skip_emulated_instruction(vcpu);
+ return 1;
+}
+
+static int handle_xsaves(struct kvm_vcpu *vcpu)
+{
+ kvm_skip_emulated_instruction(vcpu);
+ WARN(1, "this should never happen\n");
+ return 1;
+}
+
+static int handle_xrstors(struct kvm_vcpu *vcpu)
+{
+ kvm_skip_emulated_instruction(vcpu);
+ WARN(1, "this should never happen\n");
+ return 1;
+}
+
+static int handle_apic_access(struct kvm_vcpu *vcpu)
+{
+ if (likely(fasteoi)) {
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int access_type, offset;
+
+ access_type = exit_qualification & APIC_ACCESS_TYPE;
+ offset = exit_qualification & APIC_ACCESS_OFFSET;
+ /*
+ * Sane guest uses MOV to write EOI, with written value
+ * not cared. So make a short-circuit here by avoiding
+ * heavy instruction emulation.
+ */
+ if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
+ (offset == APIC_EOI)) {
+ kvm_lapic_set_eoi(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ }
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
+static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ int vector = exit_qualification & 0xff;
+
+ /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
+ kvm_apic_set_eoi_accelerated(vcpu, vector);
+ return 1;
+}
+
+static int handle_apic_write(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ u32 offset = exit_qualification & 0xfff;
+
+ /* APIC-write VM exit is trap-like and thus no need to adjust IP */
+ kvm_apic_write_nodecode(vcpu, offset);
+ return 1;
+}
+
+static int handle_task_switch(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long exit_qualification;
+ bool has_error_code = false;
+ u32 error_code = 0;
+ u16 tss_selector;
+ int reason, type, idt_v, idt_index;
+
+ idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
+ idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
+ type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ reason = (u32)exit_qualification >> 30;
+ if (reason == TASK_SWITCH_GATE && idt_v) {
+ switch (type) {
+ case INTR_TYPE_NMI_INTR:
+ vcpu->arch.nmi_injected = false;
+ vmx_set_nmi_mask(vcpu, true);
+ break;
+ case INTR_TYPE_EXT_INTR:
+ case INTR_TYPE_SOFT_INTR:
+ kvm_clear_interrupt_queue(vcpu);
+ break;
+ case INTR_TYPE_HARD_EXCEPTION:
+ if (vmx->idt_vectoring_info &
+ VECTORING_INFO_DELIVER_CODE_MASK) {
+ has_error_code = true;
+ error_code =
+ vmcs_read32(IDT_VECTORING_ERROR_CODE);
+ }
+ /* fall through */
+ case INTR_TYPE_SOFT_EXCEPTION:
+ kvm_clear_exception_queue(vcpu);
+ break;
+ default:
+ break;
+ }
+ }
+ tss_selector = exit_qualification;
+
+ if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
+ type != INTR_TYPE_EXT_INTR &&
+ type != INTR_TYPE_NMI_INTR))
+ skip_emulated_instruction(vcpu);
+
+ if (kvm_task_switch(vcpu, tss_selector,
+ type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
+ has_error_code, error_code) == EMULATE_FAIL) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return 0;
+ }
+
+ /*
+ * TODO: What about debug traps on tss switch?
+ * Are we supposed to inject them and update dr6?
+ */
+
+ return 1;
+}
+
+static int handle_ept_violation(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification;
+ gpa_t gpa;
+ u64 error_code;
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ /*
+ * EPT violation happened while executing iret from NMI,
+ * "blocked by NMI" bit has to be set before next VM entry.
+ * There are errata that may cause this bit to not be set:
+ * AAK134, BY25.
+ */
+ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ enable_vnmi &&
+ (exit_qualification & INTR_INFO_UNBLOCK_NMI))
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
+
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ trace_kvm_page_fault(gpa, exit_qualification);
+
+ /* Is it a read fault? */
+ error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
+ ? PFERR_USER_MASK : 0;
+ /* Is it a write fault? */
+ error_code |= (exit_qualification & EPT_VIOLATION_ACC_WRITE)
+ ? PFERR_WRITE_MASK : 0;
+ /* Is it a fetch fault? */
+ error_code |= (exit_qualification & EPT_VIOLATION_ACC_INSTR)
+ ? PFERR_FETCH_MASK : 0;
+ /* ept page table entry is present? */
+ error_code |= (exit_qualification &
+ (EPT_VIOLATION_READABLE | EPT_VIOLATION_WRITABLE |
+ EPT_VIOLATION_EXECUTABLE))
+ ? PFERR_PRESENT_MASK : 0;
+
+ error_code |= (exit_qualification & 0x100) != 0 ?
+ PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
+
+ vcpu->arch.exit_qualification = exit_qualification;
+ return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
+}
+
+static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
+{
+ gpa_t gpa;
+
+ /*
+ * A nested guest cannot optimize MMIO vmexits, because we have an
+ * nGPA here instead of the required GPA.
+ */
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ if (!is_guest_mode(vcpu) &&
+ !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
+ trace_kvm_fast_mmio(gpa);
+ /*
+ * Doing kvm_skip_emulated_instruction() depends on undefined
+ * behavior: Intel's manual doesn't mandate
+ * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
+ * occurs and while on real hardware it was observed to be set,
+ * other hypervisors (namely Hyper-V) don't set it, we end up
+ * advancing IP with some random value. Disable fast mmio when
+ * running nested and keep it for real hardware in hope that
+ * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
+ */
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+ return kvm_skip_emulated_instruction(vcpu);
+ else
+ return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+ EMULATE_DONE;
+ }
+
+ return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
+}
+
+static int handle_nmi_window(struct kvm_vcpu *vcpu)
+{
+ WARN_ON_ONCE(!enable_vnmi);
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_VIRTUAL_NMI_PENDING);
+ ++vcpu->stat.nmi_window_exits;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ return 1;
+}
+
+static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ enum emulation_result err = EMULATE_DONE;
+ int ret = 1;
+ u32 cpu_exec_ctrl;
+ bool intr_window_requested;
+ unsigned count = 130;
+
+ /*
+ * We should never reach the point where we are emulating L2
+ * due to invalid guest state as that means we incorrectly
+ * allowed a nested VMEntry with an invalid vmcs12.
+ */
+ WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
+
+ cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
+
+ while (vmx->emulation_required && count-- != 0) {
+ if (intr_window_requested && vmx_interrupt_allowed(vcpu))
+ return handle_interrupt_window(&vmx->vcpu);
+
+ if (kvm_test_request(KVM_REQ_EVENT, vcpu))
+ return 1;
+
+ err = kvm_emulate_instruction(vcpu, 0);
+
+ if (err == EMULATE_USER_EXIT) {
+ ++vcpu->stat.mmio_exits;
+ ret = 0;
+ goto out;
+ }
+
+ if (err != EMULATE_DONE)
+ goto emulation_error;
+
+ if (vmx->emulation_required && !vmx->rmode.vm86_active &&
+ vcpu->arch.exception.pending)
+ goto emulation_error;
+
+ if (vcpu->arch.halt_request) {
+ vcpu->arch.halt_request = 0;
+ ret = kvm_vcpu_halt(vcpu);
+ goto out;
+ }
+
+ if (signal_pending(current))
+ goto out;
+ if (need_resched())
+ schedule();
+ }
+
+out:
+ return ret;
+
+emulation_error:
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return 0;
+}
+
+static void grow_ple_window(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int old = vmx->ple_window;
+
+ vmx->ple_window = __grow_ple_window(old, ple_window,
+ ple_window_grow,
+ ple_window_max);
+
+ if (vmx->ple_window != old)
+ vmx->ple_window_dirty = true;
+
+ trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
+}
+
+static void shrink_ple_window(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int old = vmx->ple_window;
+
+ vmx->ple_window = __shrink_ple_window(old, ple_window,
+ ple_window_shrink,
+ ple_window);
+
+ if (vmx->ple_window != old)
+ vmx->ple_window_dirty = true;
+
+ trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
+}
+
+/*
+ * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
+ */
+static void wakeup_handler(void)
+{
+ struct kvm_vcpu *vcpu;
+ int cpu = smp_processor_id();
+
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
+ blocked_vcpu_list) {
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (pi_test_on(pi_desc) == 1)
+ kvm_vcpu_kick(vcpu);
+ }
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+}
+
+static void vmx_enable_tdp(void)
+{
+ kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
+ enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
+ enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK,
+ cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
+ VMX_EPT_RWX_MASK, 0ull);
+
+ ept_set_mmio_spte_mask();
+ kvm_enable_tdp();
+}
+
+/*
+ * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
+ * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
+ */
+static int handle_pause(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_pause_in_guest(vcpu->kvm))
+ grow_ple_window(vcpu);
+
+ /*
+ * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
+ * VM-execution control is ignored if CPL > 0. OTOH, KVM
+ * never set PAUSE_EXITING and just set PLE if supported,
+ * so the vcpu must be CPL=0 if it gets a PAUSE exit.
+ */
+ kvm_vcpu_on_spin(vcpu, true);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int handle_nop(struct kvm_vcpu *vcpu)
+{
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static int handle_mwait(struct kvm_vcpu *vcpu)
+{
+ printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
+ return handle_nop(vcpu);
+}
+
+static int handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
+static int handle_monitor_trap(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
+static int handle_monitor(struct kvm_vcpu *vcpu)
+{
+ printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
+ return handle_nop(vcpu);
+}
+
+static int handle_invpcid(struct kvm_vcpu *vcpu)
+{
+ u32 vmx_instruction_info;
+ unsigned long type;
+ bool pcid_enabled;
+ gva_t gva;
+ struct x86_exception e;
+ unsigned i;
+ unsigned long roots_to_free = 0;
+ struct {
+ u64 pcid;
+ u64 gla;
+ } operand;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+ if (type > 3) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ /* According to the Intel instruction reference, the memory operand
+ * is read even if it isn't needed (e.g., for type==all)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, false, &gva))
+ return 1;
+
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ if (operand.pcid >> 12 != 0) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
+
+ switch (type) {
+ case INVPCID_TYPE_INDIV_ADDR:
+ if ((!pcid_enabled && (operand.pcid != 0)) ||
+ is_noncanonical_address(operand.gla, vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+ kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
+ return kvm_skip_emulated_instruction(vcpu);
+
+ case INVPCID_TYPE_SINGLE_CTXT:
+ if (!pcid_enabled && (operand.pcid != 0)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ if (kvm_get_active_pcid(vcpu) == operand.pcid) {
+ kvm_mmu_sync_roots(vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
+ == operand.pcid)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+
+ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
+ /*
+ * If neither the current cr3 nor any of the prev_roots use the
+ * given PCID, then nothing needs to be done here because a
+ * resync will happen anyway before switching to any other CR3.
+ */
+
+ return kvm_skip_emulated_instruction(vcpu);
+
+ case INVPCID_TYPE_ALL_NON_GLOBAL:
+ /*
+ * Currently, KVM doesn't mark global entries in the shadow
+ * page tables, so a non-global flush just degenerates to a
+ * global flush. If needed, we could optimize this later by
+ * keeping track of global entries in shadow page tables.
+ */
+
+ /* fall-through */
+ case INVPCID_TYPE_ALL_INCL_GLOBAL:
+ kvm_mmu_unload(vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
+
+ default:
+ BUG(); /* We have already checked above that type <= 3 */
+ }
+}
+
+static int handle_pml_full(struct kvm_vcpu *vcpu)
+{
+ unsigned long exit_qualification;
+
+ trace_kvm_pml_full(vcpu->vcpu_id);
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ /*
+ * PML buffer FULL happened while executing iret from NMI,
+ * "blocked by NMI" bit has to be set before next VM entry.
+ */
+ if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ enable_vnmi &&
+ (exit_qualification & INTR_INFO_UNBLOCK_NMI))
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+
+ /*
+ * PML buffer already flushed at beginning of VMEXIT. Nothing to do
+ * here.., and there's no userspace involvement needed for PML.
+ */
+ return 1;
+}
+
+static int handle_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ if (!to_vmx(vcpu)->req_immediate_exit)
+ kvm_lapic_expired_hv_timer(vcpu);
+ return 1;
+}
+
+/*
+ * When nested=0, all VMX instruction VM Exits filter here. The handlers
+ * are overwritten by nested_vmx_setup() when nested=1.
+ */
+static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
+static int handle_encls(struct kvm_vcpu *vcpu)
+{
+ /*
+ * SGX virtualization is not yet supported. There is no software
+ * enable bit for SGX, so we have to trap ENCLS and inject a #UD
+ * to prevent the guest from executing ENCLS.
+ */
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
+/*
+ * The exit handlers return 1 if the exit was handled fully and guest execution
+ * may resume. Otherwise they set the kvm_run parameter to indicate what needs
+ * to be done to userspace and return 0.
+ */
+static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
+ [EXIT_REASON_EXCEPTION_NMI] = handle_exception,
+ [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
+ [EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
+ [EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
+ [EXIT_REASON_IO_INSTRUCTION] = handle_io,
+ [EXIT_REASON_CR_ACCESS] = handle_cr,
+ [EXIT_REASON_DR_ACCESS] = handle_dr,
+ [EXIT_REASON_CPUID] = handle_cpuid,
+ [EXIT_REASON_MSR_READ] = handle_rdmsr,
+ [EXIT_REASON_MSR_WRITE] = handle_wrmsr,
+ [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
+ [EXIT_REASON_HLT] = handle_halt,
+ [EXIT_REASON_INVD] = handle_invd,
+ [EXIT_REASON_INVLPG] = handle_invlpg,
+ [EXIT_REASON_RDPMC] = handle_rdpmc,
+ [EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_VMCLEAR] = handle_vmx_instruction,
+ [EXIT_REASON_VMLAUNCH] = handle_vmx_instruction,
+ [EXIT_REASON_VMPTRLD] = handle_vmx_instruction,
+ [EXIT_REASON_VMPTRST] = handle_vmx_instruction,
+ [EXIT_REASON_VMREAD] = handle_vmx_instruction,
+ [EXIT_REASON_VMRESUME] = handle_vmx_instruction,
+ [EXIT_REASON_VMWRITE] = handle_vmx_instruction,
+ [EXIT_REASON_VMOFF] = handle_vmx_instruction,
+ [EXIT_REASON_VMON] = handle_vmx_instruction,
+ [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
+ [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
+ [EXIT_REASON_APIC_WRITE] = handle_apic_write,
+ [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
+ [EXIT_REASON_WBINVD] = handle_wbinvd,
+ [EXIT_REASON_XSETBV] = handle_xsetbv,
+ [EXIT_REASON_TASK_SWITCH] = handle_task_switch,
+ [EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
+ [EXIT_REASON_GDTR_IDTR] = handle_desc,
+ [EXIT_REASON_LDTR_TR] = handle_desc,
+ [EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
+ [EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
+ [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
+ [EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
+ [EXIT_REASON_INVEPT] = handle_vmx_instruction,
+ [EXIT_REASON_INVVPID] = handle_vmx_instruction,
+ [EXIT_REASON_RDRAND] = handle_invalid_op,
+ [EXIT_REASON_RDSEED] = handle_invalid_op,
+ [EXIT_REASON_XSAVES] = handle_xsaves,
+ [EXIT_REASON_XRSTORS] = handle_xrstors,
+ [EXIT_REASON_PML_FULL] = handle_pml_full,
+ [EXIT_REASON_INVPCID] = handle_invpcid,
+ [EXIT_REASON_VMFUNC] = handle_vmx_instruction,
+ [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
+ [EXIT_REASON_ENCLS] = handle_encls,
+};
+
+static const int kvm_vmx_max_exit_handlers =
+ ARRAY_SIZE(kvm_vmx_exit_handlers);
+
+static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+ *info1 = vmcs_readl(EXIT_QUALIFICATION);
+ *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
+}
+
+static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
+{
+ if (vmx->pml_pg) {
+ __free_page(vmx->pml_pg);
+ vmx->pml_pg = NULL;
+ }
+}
+
+static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 *pml_buf;
+ u16 pml_idx;
+
+ pml_idx = vmcs_read16(GUEST_PML_INDEX);
+
+ /* Do nothing if PML buffer is empty */
+ if (pml_idx == (PML_ENTITY_NUM - 1))
+ return;
+
+ /* PML index always points to next available PML buffer entity */
+ if (pml_idx >= PML_ENTITY_NUM)
+ pml_idx = 0;
+ else
+ pml_idx++;
+
+ pml_buf = page_address(vmx->pml_pg);
+ for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
+ u64 gpa;
+
+ gpa = pml_buf[pml_idx];
+ WARN_ON(gpa & (PAGE_SIZE - 1));
+ kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
+ }
+
+ /* reset PML index */
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+}
+
+/*
+ * Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
+ * Called before reporting dirty_bitmap to userspace.
+ */
+static void kvm_flush_pml_buffers(struct kvm *kvm)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+ /*
+ * We only need to kick vcpu out of guest mode here, as PML buffer
+ * is flushed at beginning of all VMEXITs, and it's obvious that only
+ * vcpus running in guest are possible to have unflushed GPAs in PML
+ * buffer.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
+}
+
+static void vmx_dump_sel(char *name, uint32_t sel)
+{
+ pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
+ name, vmcs_read16(sel),
+ vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
+ vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
+ vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
+}
+
+static void vmx_dump_dtsel(char *name, uint32_t limit)
+{
+ pr_err("%s limit=0x%08x, base=0x%016lx\n",
+ name, vmcs_read32(limit),
+ vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
+}
+
+static void dump_vmcs(void)
+{
+ u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
+ u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
+ u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
+ u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
+ u32 secondary_exec_control = 0;
+ unsigned long cr4 = vmcs_readl(GUEST_CR4);
+ u64 efer = vmcs_read64(GUEST_IA32_EFER);
+ int i, n;
+
+ if (cpu_has_secondary_exec_ctrls())
+ secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+
+ pr_err("*** Guest State ***\n");
+ pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
+ vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
+ vmcs_readl(CR0_GUEST_HOST_MASK));
+ pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
+ cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
+ pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
+ if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
+ (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
+ {
+ pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
+ vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
+ pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
+ vmcs_read64(GUEST_PDPTR2), vmcs_read64(GUEST_PDPTR3));
+ }
+ pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
+ vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
+ pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
+ vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
+ pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
+ vmcs_readl(GUEST_SYSENTER_ESP),
+ vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
+ vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
+ vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
+ vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
+ vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
+ vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
+ vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
+ vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
+ vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
+ vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
+ vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
+ if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
+ (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
+ pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
+ efer, vmcs_read64(GUEST_IA32_PAT));
+ pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
+ vmcs_read64(GUEST_IA32_DEBUGCTL),
+ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
+ if (cpu_has_load_perf_global_ctrl() &&
+ vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+ pr_err("PerfGlobCtl = 0x%016llx\n",
+ vmcs_read64(GUEST_IA32_PERF_GLOBAL_CTRL));
+ if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
+ pr_err("BndCfgS = 0x%016llx\n", vmcs_read64(GUEST_BNDCFGS));
+ pr_err("Interruptibility = %08x ActivityState = %08x\n",
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
+ vmcs_read32(GUEST_ACTIVITY_STATE));
+ if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
+ pr_err("InterruptStatus = %04x\n",
+ vmcs_read16(GUEST_INTR_STATUS));
+
+ pr_err("*** Host State ***\n");
+ pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
+ vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
+ pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
+ vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
+ vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
+ vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
+ vmcs_read16(HOST_TR_SELECTOR));
+ pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
+ vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
+ vmcs_readl(HOST_TR_BASE));
+ pr_err("GDTBase=%016lx IDTBase=%016lx\n",
+ vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
+ pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
+ vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
+ vmcs_readl(HOST_CR4));
+ pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
+ vmcs_readl(HOST_IA32_SYSENTER_ESP),
+ vmcs_read32(HOST_IA32_SYSENTER_CS),
+ vmcs_readl(HOST_IA32_SYSENTER_EIP));
+ if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
+ pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
+ vmcs_read64(HOST_IA32_EFER),
+ vmcs_read64(HOST_IA32_PAT));
+ if (cpu_has_load_perf_global_ctrl() &&
+ vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+ pr_err("PerfGlobCtl = 0x%016llx\n",
+ vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
+
+ pr_err("*** Control State ***\n");
+ pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
+ pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
+ pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
+ pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
+ vmcs_read32(EXCEPTION_BITMAP),
+ vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
+ vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
+ pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
+ vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
+ vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
+ vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
+ pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
+ vmcs_read32(VM_EXIT_INTR_INFO),
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
+ vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
+ pr_err(" reason=%08x qualification=%016lx\n",
+ vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
+ pr_err("IDTVectoring: info=%08x errcode=%08x\n",
+ vmcs_read32(IDT_VECTORING_INFO_FIELD),
+ vmcs_read32(IDT_VECTORING_ERROR_CODE));
+ pr_err("TSC Offset = 0x%016llx\n", vmcs_read64(TSC_OFFSET));
+ if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
+ pr_err("TSC Multiplier = 0x%016llx\n",
+ vmcs_read64(TSC_MULTIPLIER));
+ if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
+ pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
+ if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
+ pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
+ if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
+ pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
+ n = vmcs_read32(CR3_TARGET_COUNT);
+ for (i = 0; i + 1 < n; i += 4)
+ pr_err("CR3 target%u=%016lx target%u=%016lx\n",
+ i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
+ i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
+ if (i < n)
+ pr_err("CR3 target%u=%016lx\n",
+ i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
+ if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
+ pr_err("PLE Gap=%08x Window=%08x\n",
+ vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
+ if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
+ pr_err("Virtual processor ID = 0x%04x\n",
+ vmcs_read16(VIRTUAL_PROCESSOR_ID));
+}
+
+/*
+ * The guest has exited. See if we can fix it or if we need userspace
+ * assistance.
+ */
+static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exit_reason = vmx->exit_reason;
+ u32 vectoring_info = vmx->idt_vectoring_info;
+
+ trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
+
+ /*
+ * Flush logged GPAs PML buffer, this will make dirty_bitmap more
+ * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
+ * querying dirty_bitmap, we only need to kick all vcpus out of guest
+ * mode as if vcpus is in root mode, the PML buffer must has been
+ * flushed already.
+ */
+ if (enable_pml)
+ vmx_flush_pml_buffer(vcpu);
+
+ /* If guest state is invalid, start emulating */
+ if (vmx->emulation_required)
+ return handle_invalid_guest_state(vcpu);
+
+ if (is_guest_mode(vcpu) && nested_vmx_exit_reflected(vcpu, exit_reason))
+ return nested_vmx_reflect_vmexit(vcpu, exit_reason);
+
+ if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
+ dump_vmcs();
+ vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ vcpu->run->fail_entry.hardware_entry_failure_reason
+ = exit_reason;
+ return 0;
+ }
+
+ if (unlikely(vmx->fail)) {
+ vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ vcpu->run->fail_entry.hardware_entry_failure_reason
+ = vmcs_read32(VM_INSTRUCTION_ERROR);
+ return 0;
+ }
+
+ /*
+ * Note:
+ * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
+ * delivery event since it indicates guest is accessing MMIO.
+ * The vm-exit can be triggered again after return to guest that
+ * will cause infinite loop.
+ */
+ if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+ exit_reason != EXIT_REASON_EPT_VIOLATION &&
+ exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+ vcpu->run->internal.ndata = 3;
+ vcpu->run->internal.data[0] = vectoring_info;
+ vcpu->run->internal.data[1] = exit_reason;
+ vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
+ if (exit_reason == EXIT_REASON_EPT_MISCONFIG) {
+ vcpu->run->internal.ndata++;
+ vcpu->run->internal.data[3] =
+ vmcs_read64(GUEST_PHYSICAL_ADDRESS);
+ }
+ return 0;
+ }
+
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked)) {
+ if (vmx_interrupt_allowed(vcpu)) {
+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
+ vcpu->arch.nmi_pending) {
+ /*
+ * This CPU don't support us in finding the end of an
+ * NMI-blocked window if the guest runs with IRQs
+ * disabled. So we pull the trigger after 1 s of
+ * futile waiting, but inform the user about this.
+ */
+ printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
+ "state on VCPU %d after 1 s timeout\n",
+ __func__, vcpu->vcpu_id);
+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+ }
+ }
+
+ if (exit_reason < kvm_vmx_max_exit_handlers
+ && kvm_vmx_exit_handlers[exit_reason])
+ return kvm_vmx_exit_handlers[exit_reason](vcpu);
+ else {
+ vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
+ exit_reason);
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+}
+
+/*
+ * Software based L1D cache flush which is used when microcode providing
+ * the cache control MSR is not loaded.
+ *
+ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
+ * flush it is required to read in 64 KiB because the replacement algorithm
+ * is not exactly LRU. This could be sized at runtime via topology
+ * information but as all relevant affected CPUs have 32KiB L1D cache size
+ * there is no point in doing so.
+ */
+static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+{
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+ * 'always'
+ */
+ if (static_branch_likely(&vmx_l1d_flush_cond)) {
+ bool flush_l1d;
+
+ /*
+ * Clear the per-vcpu flush bit, it gets set again
+ * either from vcpu_run() or from one of the unsafe
+ * VMEXIT handlers.
+ */
+ flush_l1d = vcpu->arch.l1tf_flush_l1d;
+ vcpu->arch.l1tf_flush_l1d = false;
+
+ /*
+ * Clear the per-cpu flush bit, it gets set again from
+ * the interrupt handlers.
+ */
+ flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
+ kvm_clear_cpu_l1tf_flush_l1d();
+
+ if (!flush_l1d)
+ return;
+ }
+
+ vcpu->stat.l1d_flush++;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+ return;
+ }
+
+ asm volatile(
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+ ".Lpopulate_tlb:\n\t"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $4096, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lpopulate_tlb\n\t"
+ "xorl %%eax, %%eax\n\t"
+ "cpuid\n\t"
+ /* Now fill the cache */
+ "xorl %%eax, %%eax\n"
+ ".Lfill_cache:\n"
+ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $64, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lfill_cache\n\t"
+ "lfence\n"
+ :: [flush_pages] "r" (vmx_l1d_flush_pages),
+ [size] "r" (size)
+ : "eax", "ebx", "ecx", "edx");
+}
+
+static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (is_guest_mode(vcpu) &&
+ nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
+ return;
+
+ if (irr == -1 || tpr < irr) {
+ vmcs_write32(TPR_THRESHOLD, 0);
+ return;
+ }
+
+ vmcs_write32(TPR_THRESHOLD, irr);
+}
+
+void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
+{
+ u32 sec_exec_control;
+
+ if (!lapic_in_kernel(vcpu))
+ return;
+
+ if (!flexpriority_enabled &&
+ !cpu_has_vmx_virtualize_x2apic_mode())
+ return;
+
+ /* Postpone execution until vmcs01 is the current VMCS. */
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
+ return;
+ }
+
+ sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
+
+ switch (kvm_get_apic_mode(vcpu)) {
+ case LAPIC_MODE_INVALID:
+ WARN_ONCE(true, "Invalid local APIC state");
+ case LAPIC_MODE_DISABLED:
+ break;
+ case LAPIC_MODE_XAPIC:
+ if (flexpriority_enabled) {
+ sec_exec_control |=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ vmx_flush_tlb(vcpu, true);
+ }
+ break;
+ case LAPIC_MODE_X2APIC:
+ if (cpu_has_vmx_virtualize_x2apic_mode())
+ sec_exec_control |=
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
+ break;
+ }
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
+
+ vmx_update_msr_bitmap(vcpu);
+}
+
+static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+{
+ if (!is_guest_mode(vcpu)) {
+ vmcs_write64(APIC_ACCESS_ADDR, hpa);
+ vmx_flush_tlb(vcpu, true);
+ }
+}
+
+static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
+{
+ u16 status;
+ u8 old;
+
+ if (max_isr == -1)
+ max_isr = 0;
+
+ status = vmcs_read16(GUEST_INTR_STATUS);
+ old = status >> 8;
+ if (max_isr != old) {
+ status &= 0xff;
+ status |= max_isr << 8;
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+}
+
+static void vmx_set_rvi(int vector)
+{
+ u16 status;
+ u8 old;
+
+ if (vector == -1)
+ vector = 0;
+
+ status = vmcs_read16(GUEST_INTR_STATUS);
+ old = (u8)status & 0xff;
+ if ((u8)vector != old) {
+ status &= ~0xff;
+ status |= (u8)vector;
+ vmcs_write16(GUEST_INTR_STATUS, status);
+ }
+}
+
+static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
+{
+ /*
+ * When running L2, updating RVI is only relevant when
+ * vmcs12 virtual-interrupt-delivery enabled.
+ * However, it can be enabled only when L1 also
+ * intercepts external-interrupts and in that case
+ * we should not update vmcs02 RVI but instead intercept
+ * interrupt. Therefore, do nothing when running L2.
+ */
+ if (!is_guest_mode(vcpu))
+ vmx_set_rvi(max_irr);
+}
+
+static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int max_irr;
+ bool max_irr_updated;
+
+ WARN_ON(!vcpu->arch.apicv_active);
+ if (pi_test_on(&vmx->pi_desc)) {
+ pi_clear_on(&vmx->pi_desc);
+ /*
+ * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+ * But on x86 this is just a compiler barrier anyway.
+ */
+ smp_mb__after_atomic();
+ max_irr_updated =
+ kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
+
+ /*
+ * If we are running L2 and L1 has a new pending interrupt
+ * which can be injected, we should re-evaluate
+ * what should be done with this new L1 interrupt.
+ * If L1 intercepts external-interrupts, we should
+ * exit from L2 to L1. Otherwise, interrupt should be
+ * delivered directly to L2.
+ */
+ if (is_guest_mode(vcpu) && max_irr_updated) {
+ if (nested_exit_on_intr(vcpu))
+ kvm_vcpu_exiting_guest_mode(vcpu);
+ else
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ }
+ } else {
+ max_irr = kvm_lapic_find_highest_irr(vcpu);
+ }
+ vmx_hwapic_irr_update(vcpu, max_irr);
+ return max_irr;
+}
+
+static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+{
+ if (!kvm_vcpu_apicv_active(vcpu))
+ return;
+
+ vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
+ vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
+ vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
+}
+
+static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ pi_clear_on(&vmx->pi_desc);
+ memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
+}
+
+static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
+{
+ u32 exit_intr_info = 0;
+ u16 basic_exit_reason = (u16)vmx->exit_reason;
+
+ if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
+ || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
+ return;
+
+ if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ vmx->exit_intr_info = exit_intr_info;
+
+ /* if exit due to PF check for async PF */
+ if (is_page_fault(exit_intr_info))
+ vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
+
+ /* Handle machine checks before interrupts are enabled */
+ if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
+ is_machine_check(exit_intr_info))
+ kvm_machine_check();
+
+ /* We need to handle NMIs before interrupts are enabled */
+ if (is_nmi(exit_intr_info)) {
+ kvm_before_interrupt(&vmx->vcpu);
+ asm("int $2");
+ kvm_after_interrupt(&vmx->vcpu);
+ }
+}
+
+static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
+{
+ u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
+ == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
+ unsigned int vector;
+ unsigned long entry;
+ gate_desc *desc;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+#ifdef CONFIG_X86_64
+ unsigned long tmp;
+#endif
+
+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+ desc = (gate_desc *)vmx->host_idt_base + vector;
+ entry = gate_offset(desc);
+ asm volatile(
+#ifdef CONFIG_X86_64
+ "mov %%" _ASM_SP ", %[sp]\n\t"
+ "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
+ "push $%c[ss]\n\t"
+ "push %[sp]\n\t"
+#endif
+ "pushf\n\t"
+ __ASM_SIZE(push) " $%c[cs]\n\t"
+ CALL_NOSPEC
+ :
+#ifdef CONFIG_X86_64
+ [sp]"=&r"(tmp),
+#endif
+ ASM_CALL_CONSTRAINT
+ :
+ THUNK_TARGET(entry),
+ [ss]"i"(__KERNEL_DS),
+ [cs]"i"(__KERNEL_CS)
+ );
+ }
+}
+STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
+
+static bool vmx_has_emulated_msr(int index)
+{
+ switch (index) {
+ case MSR_IA32_SMBASE:
+ /*
+ * We cannot do SMM unless we can run the guest in big
+ * real mode.
+ */
+ return enable_unrestricted_guest || emulate_invalid_guest_state;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ /* This is AMD only. */
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool vmx_pt_supported(void)
+{
+ return pt_mode == PT_MODE_HOST_GUEST;
+}
+
+static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
+{
+ u32 exit_intr_info;
+ bool unblock_nmi;
+ u8 vector;
+ bool idtv_info_valid;
+
+ idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
+
+ if (enable_vnmi) {
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
+ return;
+ /*
+ * Can't use vmx->exit_intr_info since we're not sure what
+ * the exit reason is.
+ */
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+ /*
+ * SDM 3: 27.7.1.2 (September 2008)
+ * Re-set bit "block by NMI" before VM entry if vmexit caused by
+ * a guest IRET fault.
+ * SDM 3: 23.2.2 (September 2008)
+ * Bit 12 is undefined in any of the following cases:
+ * If the VM exit sets the valid bit in the IDT-vectoring
+ * information field.
+ * If the VM exit is due to a double fault.
+ */
+ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+ vector != DF_VECTOR && !idtv_info_valid)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+ & GUEST_INTR_STATE_NMI);
+ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->vnmi_blocked_time +=
+ ktime_to_ns(ktime_sub(ktime_get(),
+ vmx->loaded_vmcs->entry_time));
+}
+
+static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
+ u32 idt_vectoring_info,
+ int instr_len_field,
+ int error_code_field)
+{
+ u8 vector;
+ int type;
+ bool idtv_info_valid;
+
+ idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
+
+ vcpu->arch.nmi_injected = false;
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
+
+ if (!idtv_info_valid)
+ return;
+
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
+ type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
+
+ switch (type) {
+ case INTR_TYPE_NMI_INTR:
+ vcpu->arch.nmi_injected = true;
+ /*
+ * SDM 3: 27.7.1.2 (September 2008)
+ * Clear bit "block by NMI" before VM entry if a NMI
+ * delivery faulted.
+ */
+ vmx_set_nmi_mask(vcpu, false);
+ break;
+ case INTR_TYPE_SOFT_EXCEPTION:
+ vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
+ /* fall through */
+ case INTR_TYPE_HARD_EXCEPTION:
+ if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
+ u32 err = vmcs_read32(error_code_field);
+ kvm_requeue_exception_e(vcpu, vector, err);
+ } else
+ kvm_requeue_exception(vcpu, vector);
+ break;
+ case INTR_TYPE_SOFT_INTR:
+ vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
+ /* fall through */
+ case INTR_TYPE_EXT_INTR:
+ kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
+ break;
+ default:
+ break;
+ }
+}
+
+static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
+{
+ __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
+ VM_EXIT_INSTRUCTION_LEN,
+ IDT_VECTORING_ERROR_CODE);
+}
+
+static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
+{
+ __vmx_complete_interrupts(vcpu,
+ vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
+ VM_ENTRY_INSTRUCTION_LEN,
+ VM_ENTRY_EXCEPTION_ERROR_CODE);
+
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
+}
+
+static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+{
+ int i, nr_msrs;
+ struct perf_guest_switch_msr *msrs;
+
+ msrs = perf_guest_get_msrs(&nr_msrs);
+
+ if (!msrs)
+ return;
+
+ for (i = 0; i < nr_msrs; i++)
+ if (msrs[i].host == msrs[i].guest)
+ clear_atomic_switch_msr(vmx, msrs[i].msr);
+ else
+ add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
+ msrs[i].host, false);
+}
+
+static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
+ if (!vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = true;
+}
+
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u64 tscl;
+ u32 delta_tsc;
+
+ if (vmx->req_immediate_exit) {
+ vmx_arm_hv_timer(vmx, 0);
+ return;
+ }
+
+ if (vmx->hv_deadline_tsc != -1) {
+ tscl = rdtsc();
+ if (vmx->hv_deadline_tsc > tscl)
+ /* set_hv_timer ensures the delta fits in 32-bits */
+ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+ cpu_preemption_timer_multi);
+ else
+ delta_tsc = 0;
+
+ vmx_arm_hv_timer(vmx, delta_tsc);
+ return;
+ }
+
+ if (vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = false;
+}
+
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long cr3, cr4, evmcs_rsp;
+
+ /* Record the guest's net vcpu time for enforced NMI injections. */
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->entry_time = ktime_get();
+
+ /* Don't enter VMX if guest state is invalid, let the exit handler
+ start emulation until we arrive back to a valid state */
+ if (vmx->emulation_required)
+ return;
+
+ if (vmx->ple_window_dirty) {
+ vmx->ple_window_dirty = false;
+ vmcs_write32(PLE_WINDOW, vmx->ple_window);
+ }
+
+ if (vmx->nested.need_vmcs12_sync)
+ nested_sync_from_vmcs12(vcpu);
+
+ if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
+ vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
+ if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
+ vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
+
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
+ cr4 = cr4_read_shadow();
+ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+ vmcs_writel(HOST_CR4, cr4);
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
+ }
+
+ /* When single-stepping over STI and MOV SS, we must clear the
+ * corresponding interruptibility bits in the guest state. Otherwise
+ * vmentry fails as it then expects bit 14 (BS) in pending debug
+ * exceptions being set, but that's not correct for the guest debugging
+ * case. */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+ vmx_set_interrupt_shadow(vcpu, 0);
+
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
+ vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vcpu->arch.pkru);
+
+ pt_guest_enter(vmx);
+
+ atomic_switch_perf_msrs(vmx);
+
+ vmx_update_hv_timer(vcpu);
+
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+
+ evmcs_rsp = static_branch_unlikely(&enable_evmcs) ?
+ (unsigned long)&current_evmcs->host_rsp : 0;
+
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+
+ asm(
+ /* Store host registers */
+ "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+ "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
+ "push %%" _ASM_CX " \n\t"
+ "sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
+ "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
+ "je 1f \n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
+ /* Avoid VMWRITE when Enlightened VMCS is in use */
+ "test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
+ "jz 2f \n\t"
+ "mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
+ "jmp 1f \n\t"
+ "2: \n\t"
+ __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
+ "1: \n\t"
+ "add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
+
+ /* Reload cr2 if changed */
+ "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
+ "mov %%cr2, %%" _ASM_DX " \n\t"
+ "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
+ "je 3f \n\t"
+ "mov %%" _ASM_AX", %%cr2 \n\t"
+ "3: \n\t"
+ /* Check if vmlaunch or vmresume is needed */
+ "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
+ /* Load guest registers. Don't clobber flags. */
+ "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
+ "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
+ "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
+ "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
+ "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
+ "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
+#ifdef CONFIG_X86_64
+ "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t"
+ "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t"
+ "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
+ "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
+ "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
+ "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
+ "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
+ "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
+#endif
+ /* Load guest RCX. This kills the vmx_vcpu pointer! */
+ "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
+
+ /* Enter guest mode */
+ "call vmx_vmenter\n\t"
+
+ /* Save guest's RCX to the stack placeholder (see above) */
+ "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
+
+ /* Load host's RCX, i.e. the vmx_vcpu pointer */
+ "pop %%" _ASM_CX " \n\t"
+
+ /* Set vmx->fail based on EFLAGS.{CF,ZF} */
+ "setbe %c[fail](%%" _ASM_CX ")\n\t"
+
+ /* Save all guest registers, including RCX from the stack */
+ "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
+ __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
+#ifdef CONFIG_X86_64
+ "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t"
+ "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t"
+ "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
+ "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
+ "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
+ "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
+ "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
+ "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
+ /*
+ * Clear host registers marked as clobbered to prevent
+ * speculative use.
+ */
+ "xor %%r8d, %%r8d \n\t"
+ "xor %%r9d, %%r9d \n\t"
+ "xor %%r10d, %%r10d \n\t"
+ "xor %%r11d, %%r11d \n\t"
+ "xor %%r12d, %%r12d \n\t"
+ "xor %%r13d, %%r13d \n\t"
+ "xor %%r14d, %%r14d \n\t"
+ "xor %%r15d, %%r15d \n\t"
+#endif
+ "mov %%cr2, %%" _ASM_AX " \n\t"
+ "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
+
+ "xor %%eax, %%eax \n\t"
+ "xor %%ebx, %%ebx \n\t"
+ "xor %%esi, %%esi \n\t"
+ "xor %%edi, %%edi \n\t"
+ "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
+ : ASM_CALL_CONSTRAINT
+ : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp),
+ [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
+ [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+ [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
+ [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
+ [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
+ [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
+ [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
+ [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
+ [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
+ [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
+#ifdef CONFIG_X86_64
+ [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
+ [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
+ [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
+ [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
+ [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
+ [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
+ [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
+ [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
+#endif
+ [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
+ [wordsize]"i"(sizeof(ulong))
+ : "cc", "memory"
+#ifdef CONFIG_X86_64
+ , "rax", "rbx", "rdi"
+ , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+ , "eax", "ebx", "edi"
+#endif
+ );
+
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * For non-nested case:
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ *
+ * For nested case:
+ * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+
+ /* Eliminate branch target predictions from guest mode */
+ vmexit_fill_RSB();
+
+ /* All fields are clean at this point */
+ if (static_branch_unlikely(&enable_evmcs))
+ current_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+
+ /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+ if (vmx->host_debugctlmsr)
+ update_debugctlmsr(vmx->host_debugctlmsr);
+
+#ifndef CONFIG_X86_64
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ *
+ * We can't defer this to vmx_prepare_switch_to_host() since that
+ * function may be executed in interrupt context, which saves and
+ * restore segments around it, nullifying its effect.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
+
+ vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
+ | (1 << VCPU_EXREG_RFLAGS)
+ | (1 << VCPU_EXREG_PDPTR)
+ | (1 << VCPU_EXREG_SEGMENTS)
+ | (1 << VCPU_EXREG_CR3));
+ vcpu->arch.regs_dirty = 0;
+
+ pt_guest_exit(vmx);
+
+ /*
+ * eager fpu is enabled if PKEY is supported and CR4 is switched
+ * back on host, so it is safe to read guest PKRU from current
+ * XSAVE.
+ */
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
+ vcpu->arch.pkru = __read_pkru();
+ if (vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vmx->host_pkru);
+ }
+
+ vmx->nested.nested_run_pending = 0;
+ vmx->idt_vectoring_info = 0;
+
+ vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
+ if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ return;
+
+ vmx->loaded_vmcs->launched = 1;
+ vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
+
+ vmx_complete_atomic_exit(vmx);
+ vmx_recover_nmi_blocking(vmx);
+ vmx_complete_interrupts(vmx);
+}
+STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
+
+static struct kvm *vmx_vm_alloc(void)
+{
+ struct kvm_vmx *kvm_vmx = vzalloc(sizeof(struct kvm_vmx));
+ return &kvm_vmx->kvm;
+}
+
+static void vmx_vm_free(struct kvm *kvm)
+{
+ vfree(to_kvm_vmx(kvm));
+}
+
+static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (enable_pml)
+ vmx_destroy_pml_buffer(vmx);
+ free_vpid(vmx->vpid);
+ leave_guest_mode(vcpu);
+ nested_vmx_free_vcpu(vcpu);
+ free_loaded_vmcs(vmx->loaded_vmcs);
+ kfree(vmx->guest_msrs);
+ kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
+ kmem_cache_free(kvm_vcpu_cache, vmx);
+}
+
+static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
+{
+ int err;
+ struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ unsigned long *msr_bitmap;
+ int cpu;
+
+ if (!vmx)
+ return ERR_PTR(-ENOMEM);
+
+ vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL);
+ if (!vmx->vcpu.arch.guest_fpu) {
+ printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
+ err = -ENOMEM;
+ goto free_partial_vcpu;
+ }
+
+ vmx->vpid = allocate_vpid();
+
+ err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
+ if (err)
+ goto free_vcpu;
+
+ err = -ENOMEM;
+
+ /*
+ * If PML is turned on, failure on enabling PML just results in failure
+ * of creating the vcpu, therefore we can simplify PML logic (by
+ * avoiding dealing with cases, such as enabling PML partially on vcpus
+ * for the guest, etc.
+ */
+ if (enable_pml) {
+ vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vmx->pml_pg)
+ goto uninit_vcpu;
+ }
+
+ vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
+ > PAGE_SIZE);
+
+ if (!vmx->guest_msrs)
+ goto free_pml;
+
+ err = alloc_loaded_vmcs(&vmx->vmcs01);
+ if (err < 0)
+ goto free_msrs;
+
+ msr_bitmap = vmx->vmcs01.msr_bitmap;
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_TSC, MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+ vmx->msr_bitmap_mode = 0;
+
+ vmx->loaded_vmcs = &vmx->vmcs01;
+ cpu = get_cpu();
+ vmx_vcpu_load(&vmx->vcpu, cpu);
+ vmx->vcpu.cpu = cpu;
+ vmx_vcpu_setup(vmx);
+ vmx_vcpu_put(&vmx->vcpu);
+ put_cpu();
+ if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
+ err = alloc_apic_access_page(kvm);
+ if (err)
+ goto free_vmcs;
+ }
+
+ if (enable_ept && !enable_unrestricted_guest) {
+ err = init_rmode_identity_map(kvm);
+ if (err)
+ goto free_vmcs;
+ }
+
+ if (nested)
+ nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
+ vmx_capability.ept,
+ kvm_vcpu_apicv_active(&vmx->vcpu));
+ else
+ memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
+
+ vmx->nested.posted_intr_nv = -1;
+ vmx->nested.current_vmptr = -1ull;
+
+ vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED;
+
+ /*
+ * Enforce invariant: pi_desc.nv is always either POSTED_INTR_VECTOR
+ * or POSTED_INTR_WAKEUP_VECTOR.
+ */
+ vmx->pi_desc.nv = POSTED_INTR_VECTOR;
+ vmx->pi_desc.sn = 1;
+
+ vmx->ept_pointer = INVALID_PAGE;
+
+ return &vmx->vcpu;
+
+free_vmcs:
+ free_loaded_vmcs(vmx->loaded_vmcs);
+free_msrs:
+ kfree(vmx->guest_msrs);
+free_pml:
+ vmx_destroy_pml_buffer(vmx);
+uninit_vcpu:
+ kvm_vcpu_uninit(&vmx->vcpu);
+free_vcpu:
+ free_vpid(vmx->vpid);
+ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
+free_partial_vcpu:
+ kmem_cache_free(kvm_vcpu_cache, vmx);
+ return ERR_PTR(err);
+}
+
+#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+
+static int vmx_vm_init(struct kvm *kvm)
+{
+ spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
+
+ if (!ple_gap)
+ kvm->arch.pause_in_guest = true;
+
+ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
+ switch (l1tf_mitigation) {
+ case L1TF_MITIGATION_OFF:
+ case L1TF_MITIGATION_FLUSH_NOWARN:
+ /* 'I explicitly don't care' is set */
+ break;
+ case L1TF_MITIGATION_FLUSH:
+ case L1TF_MITIGATION_FLUSH_NOSMT:
+ case L1TF_MITIGATION_FULL:
+ /*
+ * Warn upon starting the first VM in a potentially
+ * insecure environment.
+ */
+ if (cpu_smt_control == CPU_SMT_ENABLED)
+ pr_warn_once(L1TF_MSG_SMT);
+ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
+ pr_warn_once(L1TF_MSG_L1D);
+ break;
+ case L1TF_MITIGATION_FULL_FORCE:
+ /* Flush is enforced */
+ break;
+ }
+ }
+ return 0;
+}
+
+static void __init vmx_check_processor_compat(void *rtn)
+{
+ struct vmcs_config vmcs_conf;
+ struct vmx_capability vmx_cap;
+
+ *(int *)rtn = 0;
+ if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
+ *(int *)rtn = -EIO;
+ if (nested)
+ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
+ enable_apicv);
+ if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
+ printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
+ smp_processor_id());
+ *(int *)rtn = -EIO;
+ }
+}
+
+static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
+{
+ u8 cache;
+ u64 ipat = 0;
+
+ /* For VT-d and EPT combination
+ * 1. MMIO: always map as UC
+ * 2. EPT with VT-d:
+ * a. VT-d without snooping control feature: can't guarantee the
+ * result, try to trust guest.
+ * b. VT-d with snooping control feature: snooping control feature of
+ * VT-d engine can guarantee the cache correctness. Just set it
+ * to WB to keep consistent with host. So the same as item 3.
+ * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
+ * consistent with host MTRR
+ */
+ if (is_mmio) {
+ cache = MTRR_TYPE_UNCACHABLE;
+ goto exit;
+ }
+
+ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
+ ipat = VMX_EPT_IPAT_BIT;
+ cache = MTRR_TYPE_WRBACK;
+ goto exit;
+ }
+
+ if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
+ ipat = VMX_EPT_IPAT_BIT;
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
+ cache = MTRR_TYPE_WRBACK;
+ else
+ cache = MTRR_TYPE_UNCACHABLE;
+ goto exit;
+ }
+
+ cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+
+exit:
+ return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
+}
+
+static int vmx_get_lpage_level(void)
+{
+ if (enable_ept && !cpu_has_vmx_ept_1g_page())
+ return PT_DIRECTORY_LEVEL;
+ else
+ /* For shadow and EPT supported 1GB page */
+ return PT_PDPE_LEVEL;
+}
+
+static void vmcs_set_secondary_exec_control(u32 new_ctl)
+{
+ /*
+ * These bits in the secondary execution controls field
+ * are dynamic, the others are mostly based on the hypervisor
+ * architecture and the guest's CPUID. Do not touch the
+ * dynamic bits.
+ */
+ u32 mask =
+ SECONDARY_EXEC_SHADOW_VMCS |
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
+ SECONDARY_EXEC_DESC;
+
+ u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ (new_ctl & ~mask) | (cur_ctl & mask));
+}
+
+/*
+ * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
+ * (indicating "allowed-1") if they are supported in the guest's CPUID.
+ */
+static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_cpuid_entry2 *entry;
+
+ vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
+ vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
+
+#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do { \
+ if (entry && (entry->_reg & (_cpuid_mask))) \
+ vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
+} while (0)
+
+ entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+ cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME));
+ cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME));
+ cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC));
+ cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE));
+ cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE));
+ cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE));
+ cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE));
+ cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE));
+ cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR));
+ cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
+ cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX));
+ cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX));
+ cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID));
+ cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE));
+
+ entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+ cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE));
+ cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
+ cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
+ cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
+ cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP));
+
+#undef cr4_fixed1_update
+}
+
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (kvm_mpx_supported()) {
+ bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+ if (mpx_enabled) {
+ vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+ } else {
+ vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+ }
+ }
+}
+
+static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_cpuid_entry2 *best = NULL;
+ int i;
+
+ for (i = 0; i < PT_CPUID_LEAVES; i++) {
+ best = kvm_find_cpuid_entry(vcpu, 0x14, i);
+ if (!best)
+ return;
+ vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
+ vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
+ vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
+ vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
+ }
+
+ /* Get the number of configurable Address Ranges for filtering */
+ vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps,
+ PT_CAP_num_address_ranges);
+
+ /* Initialize and clear the no dependency bits */
+ vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
+ RTIT_CTL_USR | RTIT_CTL_TSC_EN | RTIT_CTL_DISRETC);
+
+ /*
+ * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise
+ * will inject an #GP
+ */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
+ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
+
+ /*
+ * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and
+ * PSBFreq can be set
+ */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
+ vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
+ RTIT_CTL_CYC_THRESH | RTIT_CTL_PSB_FREQ);
+
+ /*
+ * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and
+ * MTCFreq can be set
+ */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
+ vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
+ RTIT_CTL_BRANCH_EN | RTIT_CTL_MTC_RANGE);
+
+ /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
+ vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
+ RTIT_CTL_PTW_EN);
+
+ /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
+ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
+
+ /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
+ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
+
+ /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */
+ if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
+ vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
+
+ /* unmask address range configure area */
+ for (i = 0; i < vmx->pt_desc.addr_range; i++)
+ vmx->pt_desc.ctl_bitmask &= ~(0xf << (32 + i * 4));
+}
+
+static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (cpu_has_secondary_exec_ctrls()) {
+ vmx_compute_secondary_exec_control(vmx);
+ vmcs_set_secondary_exec_control(vmx->secondary_exec_control);
+ }
+
+ if (nested_vmx_allowed(vcpu))
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
+ FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ else
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
+ ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+
+ if (nested_vmx_allowed(vcpu)) {
+ nested_vmx_cr_fixed1_bits_update(vcpu);
+ nested_vmx_entry_exit_ctls_update(vcpu);
+ }
+
+ if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_INTEL_PT))
+ update_intel_pt_cfg(vcpu);
+}
+
+static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+{
+ if (func == 1 && nested)
+ entry->ecx |= bit(X86_FEATURE_VMX);
+}
+
+static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ to_vmx(vcpu)->req_immediate_exit = true;
+}
+
+static int vmx_check_intercept(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info,
+ enum x86_intercept_stage stage)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+
+ /*
+ * RDPID causes #UD if disabled through secondary execution controls.
+ * Because it is marked as EmulateOnUD, we need to intercept it here.
+ */
+ if (info->intercept == x86_intercept_rdtscp &&
+ !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+ ctxt->exception.vector = UD_VECTOR;
+ ctxt->exception.error_code_valid = false;
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ /* TODO: check more intercepts... */
+ return X86EMUL_CONTINUE;
+}
+
+#ifdef CONFIG_X86_64
+/* (a << shift) / divisor, return 1 if overflow otherwise 0 */
+static inline int u64_shl_div_u64(u64 a, unsigned int shift,
+ u64 divisor, u64 *result)
+{
+ u64 low = a << shift, high = a >> (64 - shift);
+
+ /* To avoid the overflow on divq */
+ if (high >= divisor)
+ return 1;
+
+ /* Low hold the result, high hold rem which is discarded */
+ asm("divq %2\n\t" : "=a" (low), "=d" (high) :
+ "rm" (divisor), "0" (low), "1" (high));
+ *result = low;
+
+ return 0;
+}
+
+static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
+{
+ struct vcpu_vmx *vmx;
+ u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
+
+ if (kvm_mwait_in_guest(vcpu->kvm))
+ return -EOPNOTSUPP;
+
+ vmx = to_vmx(vcpu);
+ tscl = rdtsc();
+ guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
+ delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
+ lapic_timer_advance_cycles = nsec_to_cycles(vcpu, lapic_timer_advance_ns);
+
+ if (delta_tsc > lapic_timer_advance_cycles)
+ delta_tsc -= lapic_timer_advance_cycles;
+ else
+ delta_tsc = 0;
+
+ /* Convert to host delta tsc if tsc scaling is enabled */
+ if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
+ u64_shl_div_u64(delta_tsc,
+ kvm_tsc_scaling_ratio_frac_bits,
+ vcpu->arch.tsc_scaling_ratio,
+ &delta_tsc))
+ return -ERANGE;
+
+ /*
+ * If the delta tsc can't fit in the 32 bit after the multi shift,
+ * we can't use the preemption timer.
+ * It's possible that it fits on later vmentries, but checking
+ * on every vmentry is costly so we just use an hrtimer.
+ */
+ if (delta_tsc >> (cpu_preemption_timer_multi + 32))
+ return -ERANGE;
+
+ vmx->hv_deadline_tsc = tscl + delta_tsc;
+ return delta_tsc == 0;
+}
+
+static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
+{
+ to_vmx(vcpu)->hv_deadline_tsc = -1;
+}
+#endif
+
+static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
+{
+ if (!kvm_pause_in_guest(vcpu->kvm))
+ shrink_ple_window(vcpu);
+}
+
+static void vmx_slot_enable_log_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
+ kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
+}
+
+static void vmx_slot_disable_log_dirty(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ kvm_mmu_slot_set_dirty(kvm, slot);
+}
+
+static void vmx_flush_log_dirty(struct kvm *kvm)
+{
+ kvm_flush_pml_buffers(kvm);
+}
+
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gpa_t gpa;
+ struct page *page = NULL;
+ u64 *pml_address;
+
+ if (is_guest_mode(vcpu)) {
+ WARN_ON_ONCE(vmx->nested.pml_full);
+
+ /*
+ * Check if PML is enabled for the nested guest.
+ * Whether eptp bit 6 is set is already checked
+ * as part of A/D emulation.
+ */
+ vmcs12 = get_vmcs12(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
+
+ if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
+ vmx->nested.pml_full = true;
+ return 1;
+ }
+
+ gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+
+ page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->pml_address);
+ if (is_error_page(page))
+ return 0;
+
+ pml_address = kmap(page);
+ pml_address[vmcs12->guest_pml_index--] = gpa;
+ kunmap(page);
+ kvm_release_page_clean(page);
+ }
+
+ return 0;
+}
+
+static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ gfn_t offset, unsigned long mask)
+{
+ kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
+}
+
+static void __pi_post_block(struct kvm_vcpu *vcpu)
+{
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct pi_desc old, new;
+ unsigned int dest;
+
+ do {
+ old.control = new.control = pi_desc->control;
+ WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
+ "Wakeup handler not enabled while the VCPU is blocked\n");
+
+ dest = cpu_physical_id(vcpu->cpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ /* set 'NV' to 'notification vector' */
+ new.nv = POSTED_INTR_VECTOR;
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
+
+ if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ list_del(&vcpu->blocked_vcpu_list);
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ vcpu->pre_pcpu = -1;
+ }
+}
+
+/*
+ * This routine does the following things for vCPU which is going
+ * to be blocked if VT-d PI is enabled.
+ * - Store the vCPU to the wakeup list, so when interrupts happen
+ * we can find the right vCPU to wake up.
+ * - Change the Posted-interrupt descriptor as below:
+ * 'NDST' <-- vcpu->pre_pcpu
+ * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
+ * - If 'ON' is set during this process, which means at least one
+ * interrupt is posted for this vCPU, we cannot block it, in
+ * this case, return 1, otherwise, return 0.
+ *
+ */
+static int pi_pre_block(struct kvm_vcpu *vcpu)
+{
+ unsigned int dest;
+ struct pi_desc old, new;
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(vcpu))
+ return 0;
+
+ WARN_ON(irqs_disabled());
+ local_irq_disable();
+ if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
+ vcpu->pre_pcpu = vcpu->cpu;
+ spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ list_add_tail(&vcpu->blocked_vcpu_list,
+ &per_cpu(blocked_vcpu_on_cpu,
+ vcpu->pre_pcpu));
+ spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
+ }
+
+ do {
+ old.control = new.control = pi_desc->control;
+
+ WARN((pi_desc->sn == 1),
+ "Warning: SN field of posted-interrupts "
+ "is set before blocking\n");
+
+ /*
+ * Since vCPU can be preempted during this process,
+ * vcpu->cpu could be different with pre_pcpu, we
+ * need to set pre_pcpu as the destination of wakeup
+ * notification event, then we can find the right vCPU
+ * to wakeup in wakeup handler if interrupts happen
+ * when the vCPU is in blocked state.
+ */
+ dest = cpu_physical_id(vcpu->pre_pcpu);
+
+ if (x2apic_enabled())
+ new.ndst = dest;
+ else
+ new.ndst = (dest << 8) & 0xFF00;
+
+ /* set 'NV' to 'wakeup vector' */
+ new.nv = POSTED_INTR_WAKEUP_VECTOR;
+ } while (cmpxchg64(&pi_desc->control, old.control,
+ new.control) != old.control);
+
+ /* We should not block the vCPU if an interrupt is posted for it. */
+ if (pi_test_on(pi_desc) == 1)
+ __pi_post_block(vcpu);
+
+ local_irq_enable();
+ return (vcpu->pre_pcpu == -1);
+}
+
+static int vmx_pre_block(struct kvm_vcpu *vcpu)
+{
+ if (pi_pre_block(vcpu))
+ return 1;
+
+ if (kvm_lapic_hv_timer_in_use(vcpu))
+ kvm_lapic_switch_to_sw_timer(vcpu);
+
+ return 0;
+}
+
+static void pi_post_block(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->pre_pcpu == -1)
+ return;
+
+ WARN_ON(irqs_disabled());
+ local_irq_disable();
+ __pi_post_block(vcpu);
+ local_irq_enable();
+}
+
+static void vmx_post_block(struct kvm_vcpu *vcpu)
+{
+ if (kvm_x86_ops->set_hv_timer)
+ kvm_lapic_switch_to_hv_timer(vcpu);
+
+ pi_post_block(vcpu);
+}
+
+/*
+ * vmx_update_pi_irte - set IRTE for Posted-Interrupts
+ *
+ * @kvm: kvm
+ * @host_irq: host irq of the interrupt
+ * @guest_irq: gsi of the interrupt
+ * @set: set or unset PI
+ * returns 0 on success, < 0 on failure
+ */
+static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set)
+{
+ struct kvm_kernel_irq_routing_entry *e;
+ struct kvm_irq_routing_table *irq_rt;
+ struct kvm_lapic_irq irq;
+ struct kvm_vcpu *vcpu;
+ struct vcpu_data vcpu_info;
+ int idx, ret = 0;
+
+ if (!kvm_arch_has_assigned_device(kvm) ||
+ !irq_remapping_cap(IRQ_POSTING_CAP) ||
+ !kvm_vcpu_apicv_active(kvm->vcpus[0]))
+ return 0;
+
+ idx = srcu_read_lock(&kvm->irq_srcu);
+ irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
+ if (guest_irq >= irq_rt->nr_rt_entries ||
+ hlist_empty(&irq_rt->map[guest_irq])) {
+ pr_warn_once("no route for guest_irq %u/%u (broken user space?)\n",
+ guest_irq, irq_rt->nr_rt_entries);
+ goto out;
+ }
+
+ hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
+ if (e->type != KVM_IRQ_ROUTING_MSI)
+ continue;
+ /*
+ * VT-d PI cannot support posting multicast/broadcast
+ * interrupts to a vCPU, we still use interrupt remapping
+ * for these kind of interrupts.
+ *
+ * For lowest-priority interrupts, we only support
+ * those with single CPU as the destination, e.g. user
+ * configures the interrupts via /proc/irq or uses
+ * irqbalance to make the interrupts single-CPU.
+ *
+ * We will support full lowest-priority interrupt later.
+ */
+
+ kvm_set_msi_irq(kvm, e, &irq);
+ if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
+ /*
+ * Make sure the IRTE is in remapped mode if
+ * we don't handle it in posted mode.
+ */
+ ret = irq_set_vcpu_affinity(host_irq, NULL);
+ if (ret < 0) {
+ printk(KERN_INFO
+ "failed to back to remapped mode, irq: %u\n",
+ host_irq);
+ goto out;
+ }
+
+ continue;
+ }
+
+ vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
+ vcpu_info.vector = irq.vector;
+
+ trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
+ vcpu_info.vector, vcpu_info.pi_desc_addr, set);
+
+ if (set)
+ ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
+ else
+ ret = irq_set_vcpu_affinity(host_irq, NULL);
+
+ if (ret < 0) {
+ printk(KERN_INFO "%s: failed to update PI IRTE\n",
+ __func__);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+ return ret;
+}
+
+static void vmx_setup_mce(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.mcg_cap & MCG_LMCE_P)
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |=
+ FEATURE_CONTROL_LMCE;
+ else
+ to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
+ ~FEATURE_CONTROL_LMCE;
+}
+
+static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+{
+ /* we need a nested vmexit to enter SMM, postpone if run is pending */
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return 0;
+ return 1;
+}
+
+static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
+ if (vmx->nested.smm.guest_mode)
+ nested_vmx_vmexit(vcpu, -1, 0, 0);
+
+ vmx->nested.smm.vmxon = vmx->nested.vmxon;
+ vmx->nested.vmxon = false;
+ vmx_clear_hlt(vcpu);
+ return 0;
+}
+
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ int ret;
+
+ if (vmx->nested.smm.vmxon) {
+ vmx->nested.vmxon = true;
+ vmx->nested.smm.vmxon = false;
+ }
+
+ if (vmx->nested.smm.guest_mode) {
+ vcpu->arch.hflags &= ~HF_SMM_MASK;
+ ret = nested_vmx_enter_non_root_mode(vcpu, false);
+ vcpu->arch.hflags |= HF_SMM_MASK;
+ if (ret)
+ return ret;
+
+ vmx->nested.smm.guest_mode = false;
+ }
+ return 0;
+}
+
+static int enable_smi_window(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static __init int hardware_setup(void)
+{
+ unsigned long host_bndcfgs;
+ int r, i;
+
+ rdmsrl_safe(MSR_EFER, &host_efer);
+
+ for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
+ kvm_define_shared_msr(i, vmx_msr_index[i]);
+
+ if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
+ return -EIO;
+
+ if (boot_cpu_has(X86_FEATURE_NX))
+ kvm_enable_efer_bits(EFER_NX);
+
+ if (boot_cpu_has(X86_FEATURE_MPX)) {
+ rdmsrl(MSR_IA32_BNDCFGS, host_bndcfgs);
+ WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
+ }
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ rdmsrl(MSR_IA32_XSS, host_xss);
+
+ if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
+ !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
+ enable_vpid = 0;
+
+ if (!cpu_has_vmx_ept() ||
+ !cpu_has_vmx_ept_4levels() ||
+ !cpu_has_vmx_ept_mt_wb() ||
+ !cpu_has_vmx_invept_global())
+ enable_ept = 0;
+
+ if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
+ enable_ept_ad_bits = 0;
+
+ if (!cpu_has_vmx_unrestricted_guest() || !enable_ept)
+ enable_unrestricted_guest = 0;
+
+ if (!cpu_has_vmx_flexpriority())
+ flexpriority_enabled = 0;
+
+ if (!cpu_has_virtual_nmis())
+ enable_vnmi = 0;
+
+ /*
+ * set_apic_access_page_addr() is used to reload apic access
+ * page upon invalidation. No need to do anything if not
+ * using the APIC_ACCESS_ADDR VMCS field.
+ */
+ if (!flexpriority_enabled)
+ kvm_x86_ops->set_apic_access_page_addr = NULL;
+
+ if (!cpu_has_vmx_tpr_shadow())
+ kvm_x86_ops->update_cr8_intercept = NULL;
+
+ if (enable_ept && !cpu_has_vmx_ept_2m_page())
+ kvm_disable_largepages();
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (ms_hyperv.nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
+ && enable_ept) {
+ kvm_x86_ops->tlb_remote_flush = hv_remote_flush_tlb;
+ kvm_x86_ops->tlb_remote_flush_with_range =
+ hv_remote_flush_tlb_with_range;
+ }
+#endif
+
+ if (!cpu_has_vmx_ple()) {
+ ple_gap = 0;
+ ple_window = 0;
+ ple_window_grow = 0;
+ ple_window_max = 0;
+ ple_window_shrink = 0;
+ }
+
+ if (!cpu_has_vmx_apicv()) {
+ enable_apicv = 0;
+ kvm_x86_ops->sync_pir_to_irr = NULL;
+ }
+
+ if (cpu_has_vmx_tsc_scaling()) {
+ kvm_has_tsc_control = true;
+ kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
+ kvm_tsc_scaling_ratio_frac_bits = 48;
+ }
+
+ set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
+ if (enable_ept)
+ vmx_enable_tdp();
+ else
+ kvm_disable_tdp();
+
+ /*
+ * Only enable PML when hardware supports PML feature, and both EPT
+ * and EPT A/D bit features are enabled -- PML depends on them to work.
+ */
+ if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
+ enable_pml = 0;
+
+ if (!enable_pml) {
+ kvm_x86_ops->slot_enable_log_dirty = NULL;
+ kvm_x86_ops->slot_disable_log_dirty = NULL;
+ kvm_x86_ops->flush_log_dirty = NULL;
+ kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
+ }
+
+ if (!cpu_has_vmx_preemption_timer())
+ kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
+
+ if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
+ u64 vmx_msr;
+
+ rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
+ cpu_preemption_timer_multi =
+ vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ } else {
+ kvm_x86_ops->set_hv_timer = NULL;
+ kvm_x86_ops->cancel_hv_timer = NULL;
+ }
+
+ kvm_set_posted_intr_wakeup_handler(wakeup_handler);
+
+ kvm_mce_cap_supported |= MCG_LMCE_P;
+
+ if (pt_mode != PT_MODE_SYSTEM && pt_mode != PT_MODE_HOST_GUEST)
+ return -EINVAL;
+ if (!enable_ept || !cpu_has_vmx_intel_pt())
+ pt_mode = PT_MODE_SYSTEM;
+
+ if (nested) {
+ nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
+ vmx_capability.ept, enable_apicv);
+
+ r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
+ if (r)
+ return r;
+ }
+
+ r = alloc_kvm_area();
+ if (r)
+ nested_vmx_hardware_unsetup();
+ return r;
+}
+
+static __exit void hardware_unsetup(void)
+{
+ if (nested)
+ nested_vmx_hardware_unsetup();
+
+ free_kvm_area();
+}
+
+static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .cpu_has_kvm_support = cpu_has_kvm_support,
+ .disabled_by_bios = vmx_disabled_by_bios,
+ .hardware_setup = hardware_setup,
+ .hardware_unsetup = hardware_unsetup,
+ .check_processor_compatibility = vmx_check_processor_compat,
+ .hardware_enable = hardware_enable,
+ .hardware_disable = hardware_disable,
+ .cpu_has_accelerated_tpr = report_flexpriority,
+ .has_emulated_msr = vmx_has_emulated_msr,
+
+ .vm_init = vmx_vm_init,
+ .vm_alloc = vmx_vm_alloc,
+ .vm_free = vmx_vm_free,
+
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+ .vcpu_reset = vmx_vcpu_reset,
+
+ .prepare_guest_switch = vmx_prepare_switch_to_guest,
+ .vcpu_load = vmx_vcpu_load,
+ .vcpu_put = vmx_vcpu_put,
+
+ .update_bp_intercept = update_exception_bitmap,
+ .get_msr_feature = vmx_get_msr_feature,
+ .get_msr = vmx_get_msr,
+ .set_msr = vmx_set_msr,
+ .get_segment_base = vmx_get_segment_base,
+ .get_segment = vmx_get_segment,
+ .set_segment = vmx_set_segment,
+ .get_cpl = vmx_get_cpl,
+ .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+ .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
+ .decache_cr3 = vmx_decache_cr3,
+ .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
+ .set_cr0 = vmx_set_cr0,
+ .set_cr3 = vmx_set_cr3,
+ .set_cr4 = vmx_set_cr4,
+ .set_efer = vmx_set_efer,
+ .get_idt = vmx_get_idt,
+ .set_idt = vmx_set_idt,
+ .get_gdt = vmx_get_gdt,
+ .set_gdt = vmx_set_gdt,
+ .get_dr6 = vmx_get_dr6,
+ .set_dr6 = vmx_set_dr6,
+ .set_dr7 = vmx_set_dr7,
+ .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
+ .cache_reg = vmx_cache_reg,
+ .get_rflags = vmx_get_rflags,
+ .set_rflags = vmx_set_rflags,
+
+ .tlb_flush = vmx_flush_tlb,
+ .tlb_flush_gva = vmx_flush_tlb_gva,
+
+ .run = vmx_vcpu_run,
+ .handle_exit = vmx_handle_exit,
+ .skip_emulated_instruction = skip_emulated_instruction,
+ .set_interrupt_shadow = vmx_set_interrupt_shadow,
+ .get_interrupt_shadow = vmx_get_interrupt_shadow,
+ .patch_hypercall = vmx_patch_hypercall,
+ .set_irq = vmx_inject_irq,
+ .set_nmi = vmx_inject_nmi,
+ .queue_exception = vmx_queue_exception,
+ .cancel_injection = vmx_cancel_injection,
+ .interrupt_allowed = vmx_interrupt_allowed,
+ .nmi_allowed = vmx_nmi_allowed,
+ .get_nmi_mask = vmx_get_nmi_mask,
+ .set_nmi_mask = vmx_set_nmi_mask,
+ .enable_nmi_window = enable_nmi_window,
+ .enable_irq_window = enable_irq_window,
+ .update_cr8_intercept = update_cr8_intercept,
+ .set_virtual_apic_mode = vmx_set_virtual_apic_mode,
+ .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
+ .get_enable_apicv = vmx_get_enable_apicv,
+ .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
+ .apicv_post_state_restore = vmx_apicv_post_state_restore,
+ .hwapic_irr_update = vmx_hwapic_irr_update,
+ .hwapic_isr_update = vmx_hwapic_isr_update,
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+
+ .set_tss_addr = vmx_set_tss_addr,
+ .set_identity_map_addr = vmx_set_identity_map_addr,
+ .get_tdp_level = get_ept_level,
+ .get_mt_mask = vmx_get_mt_mask,
+
+ .get_exit_info = vmx_get_exit_info,
+
+ .get_lpage_level = vmx_get_lpage_level,
+
+ .cpuid_update = vmx_cpuid_update,
+
+ .rdtscp_supported = vmx_rdtscp_supported,
+ .invpcid_supported = vmx_invpcid_supported,
+
+ .set_supported_cpuid = vmx_set_supported_cpuid,
+
+ .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+
+ .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
+ .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+
+ .set_tdp_cr3 = vmx_set_cr3,
+
+ .check_intercept = vmx_check_intercept,
+ .handle_external_intr = vmx_handle_external_intr,
+ .mpx_supported = vmx_mpx_supported,
+ .xsaves_supported = vmx_xsaves_supported,
+ .umip_emulated = vmx_umip_emulated,
+ .pt_supported = vmx_pt_supported,
+
+ .request_immediate_exit = vmx_request_immediate_exit,
+
+ .sched_in = vmx_sched_in,
+
+ .slot_enable_log_dirty = vmx_slot_enable_log_dirty,
+ .slot_disable_log_dirty = vmx_slot_disable_log_dirty,
+ .flush_log_dirty = vmx_flush_log_dirty,
+ .enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+ .write_log_dirty = vmx_write_pml_buffer,
+
+ .pre_block = vmx_pre_block,
+ .post_block = vmx_post_block,
+
+ .pmu_ops = &intel_pmu_ops,
+
+ .update_pi_irte = vmx_update_pi_irte,
+
+#ifdef CONFIG_X86_64
+ .set_hv_timer = vmx_set_hv_timer,
+ .cancel_hv_timer = vmx_cancel_hv_timer,
+#endif
+
+ .setup_mce = vmx_setup_mce,
+
+ .smi_allowed = vmx_smi_allowed,
+ .pre_enter_smm = vmx_pre_enter_smm,
+ .pre_leave_smm = vmx_pre_leave_smm,
+ .enable_smi_window = enable_smi_window,
+
+ .check_nested_events = NULL,
+ .get_nested_state = NULL,
+ .set_nested_state = NULL,
+ .get_vmcs12_pages = NULL,
+ .nested_enable_evmcs = NULL,
+};
+
+static void vmx_cleanup_l1d_flush(void)
+{
+ if (vmx_l1d_flush_pages) {
+ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ vmx_l1d_flush_pages = NULL;
+ }
+ /* Restore state so sysfs ignores VMX */
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+}
+
+static void vmx_exit(void)
+{
+#ifdef CONFIG_KEXEC_CORE
+ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+ synchronize_rcu();
+#endif
+
+ kvm_exit();
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ if (static_branch_unlikely(&enable_evmcs)) {
+ int cpu;
+ struct hv_vp_assist_page *vp_ap;
+ /*
+ * Reset everything to support using non-enlightened VMCS
+ * access later (e.g. when we reload the module with
+ * enlightened_vmcs=0)
+ */
+ for_each_online_cpu(cpu) {
+ vp_ap = hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->current_nested_vmcs = 0;
+ vp_ap->enlighten_vmentry = 0;
+ }
+
+ static_branch_disable(&enable_evmcs);
+ }
+#endif
+ vmx_cleanup_l1d_flush();
+}
+module_exit(vmx_exit);
+
+static int __init vmx_init(void)
+{
+ int r;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ /*
+ * Enlightened VMCS usage should be recommended and the host needs
+ * to support eVMCS v1 or above. We can also disable eVMCS support
+ * with module parameter.
+ */
+ if (enlightened_vmcs &&
+ ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED &&
+ (ms_hyperv.nested_features & HV_X64_ENLIGHTENED_VMCS_VERSION) >=
+ KVM_EVMCS_VERSION) {
+ int cpu;
+
+ /* Check that we have assist pages on all online CPUs */
+ for_each_online_cpu(cpu) {
+ if (!hv_get_vp_assist_page(cpu)) {
+ enlightened_vmcs = false;
+ break;
+ }
+ }
+
+ if (enlightened_vmcs) {
+ pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");
+ static_branch_enable(&enable_evmcs);
+ }
+ } else {
+ enlightened_vmcs = false;
+ }
+#endif
+
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ return r;
+
+ /*
+ * Must be called after kvm_init() so enable_ept is properly set
+ * up. Hand the parameter mitigation value in which was stored in
+ * the pre module init parser. If no parameter was given, it will
+ * contain 'auto' which will be turned into the default 'cond'
+ * mitigation mode.
+ */
+ if (boot_cpu_has(X86_BUG_L1TF)) {
+ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+ if (r) {
+ vmx_exit();
+ return r;
+ }
+ }
+
+#ifdef CONFIG_KEXEC_CORE
+ rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+ crash_vmclear_local_loaded_vmcss);
+#endif
+ vmx_check_vmcs12_offsets();
+
+ return 0;
+}
+module_init(vmx_init);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
new file mode 100644
index 000000000000..99328954c2fc
--- /dev/null
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -0,0 +1,519 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_VMX_H
+#define __KVM_X86_VMX_H
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm.h>
+#include <asm/intel_pt.h>
+
+#include "capabilities.h"
+#include "ops.h"
+#include "vmcs.h"
+
+extern const u32 vmx_msr_index[];
+extern u64 host_efer;
+
+#define MSR_TYPE_R 1
+#define MSR_TYPE_W 2
+#define MSR_TYPE_RW 3
+
+#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
+
+#define NR_AUTOLOAD_MSRS 8
+
+struct vmx_msrs {
+ unsigned int nr;
+ struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
+};
+
+struct shared_msr_entry {
+ unsigned index;
+ u64 data;
+ u64 mask;
+};
+
+enum segment_cache_field {
+ SEG_FIELD_SEL = 0,
+ SEG_FIELD_BASE = 1,
+ SEG_FIELD_LIMIT = 2,
+ SEG_FIELD_AR = 3,
+
+ SEG_FIELD_NR = 4
+};
+
+/* Posted-Interrupt Descriptor */
+struct pi_desc {
+ u32 pir[8]; /* Posted interrupt requested */
+ union {
+ struct {
+ /* bit 256 - Outstanding Notification */
+ u16 on : 1,
+ /* bit 257 - Suppress Notification */
+ sn : 1,
+ /* bit 271:258 - Reserved */
+ rsvd_1 : 14;
+ /* bit 279:272 - Notification Vector */
+ u8 nv;
+ /* bit 287:280 - Reserved */
+ u8 rsvd_2;
+ /* bit 319:288 - Notification Destination */
+ u32 ndst;
+ };
+ u64 control;
+ };
+ u32 rsvd[6];
+} __aligned(64);
+
+#define RTIT_ADDR_RANGE 4
+
+struct pt_ctx {
+ u64 ctl;
+ u64 status;
+ u64 output_base;
+ u64 output_mask;
+ u64 cr3_match;
+ u64 addr_a[RTIT_ADDR_RANGE];
+ u64 addr_b[RTIT_ADDR_RANGE];
+};
+
+struct pt_desc {
+ u64 ctl_bitmask;
+ u32 addr_range;
+ u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+ struct pt_ctx host;
+ struct pt_ctx guest;
+};
+
+/*
+ * The nested_vmx structure is part of vcpu_vmx, and holds information we need
+ * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
+ */
+struct nested_vmx {
+ /* Has the level1 guest done vmxon? */
+ bool vmxon;
+ gpa_t vmxon_ptr;
+ bool pml_full;
+
+ /* The guest-physical address of the current VMCS L1 keeps for L2 */
+ gpa_t current_vmptr;
+ /*
+ * Cache of the guest's VMCS, existing outside of guest memory.
+ * Loaded from guest memory during VMPTRLD. Flushed to guest
+ * memory during VMCLEAR and VMPTRLD.
+ */
+ struct vmcs12 *cached_vmcs12;
+ /*
+ * Cache of the guest's shadow VMCS, existing outside of guest
+ * memory. Loaded from guest memory during VM entry. Flushed
+ * to guest memory during VM exit.
+ */
+ struct vmcs12 *cached_shadow_vmcs12;
+ /*
+ * Indicates if the shadow vmcs or enlightened vmcs must be updated
+ * with the data held by struct vmcs12.
+ */
+ bool need_vmcs12_sync;
+ bool dirty_vmcs12;
+
+ /*
+ * vmcs02 has been initialized, i.e. state that is constant for
+ * vmcs02 has been written to the backing VMCS. Initialization
+ * is delayed until L1 actually attempts to run a nested VM.
+ */
+ bool vmcs02_initialized;
+
+ bool change_vmcs01_virtual_apic_mode;
+
+ /*
+ * Enlightened VMCS has been enabled. It does not mean that L1 has to
+ * use it. However, VMX features available to L1 will be limited based
+ * on what the enlightened VMCS supports.
+ */
+ bool enlightened_vmcs_enabled;
+
+ /* L2 must run next, and mustn't decide to exit to L1. */
+ bool nested_run_pending;
+
+ struct loaded_vmcs vmcs02;
+
+ /*
+ * Guest pages referred to in the vmcs02 with host-physical
+ * pointers, so we must keep them pinned while L2 runs.
+ */
+ struct page *apic_access_page;
+ struct page *virtual_apic_page;
+ struct page *pi_desc_page;
+ struct pi_desc *pi_desc;
+ bool pi_pending;
+ u16 posted_intr_nv;
+
+ struct hrtimer preemption_timer;
+ bool preemption_timer_expired;
+
+ /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
+ u64 vmcs01_debugctl;
+ u64 vmcs01_guest_bndcfgs;
+
+ u16 vpid02;
+ u16 last_vpid;
+
+ struct nested_vmx_msrs msrs;
+
+ /* SMM related state */
+ struct {
+ /* in VMX operation on SMM entry? */
+ bool vmxon;
+ /* in guest mode on SMM entry? */
+ bool guest_mode;
+ } smm;
+
+ gpa_t hv_evmcs_vmptr;
+ struct page *hv_evmcs_page;
+ struct hv_enlightened_vmcs *hv_evmcs;
+};
+
+struct vcpu_vmx {
+ struct kvm_vcpu vcpu;
+ unsigned long host_rsp;
+ u8 fail;
+ u8 msr_bitmap_mode;
+ u32 exit_intr_info;
+ u32 idt_vectoring_info;
+ ulong rflags;
+ struct shared_msr_entry *guest_msrs;
+ int nmsrs;
+ int save_nmsrs;
+ bool guest_msrs_dirty;
+ unsigned long host_idt_base;
+#ifdef CONFIG_X86_64
+ u64 msr_host_kernel_gs_base;
+ u64 msr_guest_kernel_gs_base;
+#endif
+
+ u64 arch_capabilities;
+ u64 spec_ctrl;
+
+ u32 vm_entry_controls_shadow;
+ u32 vm_exit_controls_shadow;
+ u32 secondary_exec_control;
+
+ /*
+ * loaded_vmcs points to the VMCS currently used in this vcpu. For a
+ * non-nested (L1) guest, it always points to vmcs01. For a nested
+ * guest (L2), it points to a different VMCS. loaded_cpu_state points
+ * to the VMCS whose state is loaded into the CPU registers that only
+ * need to be switched when transitioning to/from the kernel; a NULL
+ * value indicates that host state is loaded.
+ */
+ struct loaded_vmcs vmcs01;
+ struct loaded_vmcs *loaded_vmcs;
+ struct loaded_vmcs *loaded_cpu_state;
+ bool __launched; /* temporary, used in vmx_vcpu_run */
+ struct msr_autoload {
+ struct vmx_msrs guest;
+ struct vmx_msrs host;
+ } msr_autoload;
+
+ struct {
+ int vm86_active;
+ ulong save_rflags;
+ struct kvm_segment segs[8];
+ } rmode;
+ struct {
+ u32 bitmask; /* 4 bits per segment (1 bit per field) */
+ struct kvm_save_segment {
+ u16 selector;
+ unsigned long base;
+ u32 limit;
+ u32 ar;
+ } seg[8];
+ } segment_cache;
+ int vpid;
+ bool emulation_required;
+
+ u32 exit_reason;
+
+ /* Posted interrupt descriptor */
+ struct pi_desc pi_desc;
+
+ /* Support for a guest hypervisor (nested VMX) */
+ struct nested_vmx nested;
+
+ /* Dynamic PLE window. */
+ int ple_window;
+ bool ple_window_dirty;
+
+ bool req_immediate_exit;
+
+ /* Support for PML */
+#define PML_ENTITY_NUM 512
+ struct page *pml_pg;
+
+ /* apic deadline value in host tsc */
+ u64 hv_deadline_tsc;
+
+ u64 current_tsc_ratio;
+
+ u32 host_pkru;
+
+ unsigned long host_debugctlmsr;
+
+ /*
+ * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
+ * msr_ia32_feature_control. FEATURE_CONTROL_LOCKED is always included
+ * in msr_ia32_feature_control_valid_bits.
+ */
+ u64 msr_ia32_feature_control;
+ u64 msr_ia32_feature_control_valid_bits;
+ u64 ept_pointer;
+
+ struct pt_desc pt_desc;
+};
+
+enum ept_pointers_status {
+ EPT_POINTERS_CHECK = 0,
+ EPT_POINTERS_MATCH = 1,
+ EPT_POINTERS_MISMATCH = 2
+};
+
+struct kvm_vmx {
+ struct kvm kvm;
+
+ unsigned int tss_addr;
+ bool ept_identity_pagetable_done;
+ gpa_t ept_identity_map_addr;
+
+ enum ept_pointers_status ept_pointers_match;
+ spinlock_t ept_pointer_lock;
+};
+
+bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
+void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
+void vmx_vcpu_put(struct kvm_vcpu *vcpu);
+int allocate_vpid(void);
+void free_vpid(int vpid);
+void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
+void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+int vmx_get_cpl(struct kvm_vcpu *vcpu);
+unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
+void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
+u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
+void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
+void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
+void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
+void ept_save_pdptrs(struct kvm_vcpu *vcpu);
+void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
+u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
+void update_exception_bitmap(struct kvm_vcpu *vcpu);
+void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
+void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
+void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
+struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
+void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
+
+#define POSTED_INTR_ON 0
+#define POSTED_INTR_SN 1
+
+static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
+{
+ return test_and_set_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
+{
+ return test_and_clear_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+{
+ return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+}
+
+static inline void pi_clear_sn(struct pi_desc *pi_desc)
+{
+ return clear_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_set_sn(struct pi_desc *pi_desc)
+{
+ return set_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline void pi_clear_on(struct pi_desc *pi_desc)
+{
+ clear_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline int pi_test_on(struct pi_desc *pi_desc)
+{
+ return test_bit(POSTED_INTR_ON,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline int pi_test_sn(struct pi_desc *pi_desc)
+{
+ return test_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline u8 vmx_get_rvi(void)
+{
+ return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
+static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx)
+{
+ vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS);
+}
+
+static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_ENTRY_CONTROLS, val);
+ vmx->vm_entry_controls_shadow = val;
+}
+
+static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_entry_controls_shadow != val)
+ vm_entry_controls_init(vmx, val);
+}
+
+static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_entry_controls_shadow;
+}
+
+static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
+}
+
+static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
+}
+
+static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx)
+{
+ vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS);
+}
+
+static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_EXIT_CONTROLS, val);
+ vmx->vm_exit_controls_shadow = val;
+}
+
+static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_exit_controls_shadow != val)
+ vm_exit_controls_init(vmx, val);
+}
+
+static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_exit_controls_shadow;
+}
+
+static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
+}
+
+static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
+}
+
+static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
+{
+ vmx->segment_cache.bitmask = 0;
+}
+
+static inline u32 vmx_vmentry_ctrl(void)
+{
+ u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
+ if (pt_mode == PT_MODE_SYSTEM)
+ vmentry_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP | VM_EXIT_CLEAR_IA32_RTIT_CTL);
+ /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
+ return vmentry_ctrl &
+ ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
+}
+
+static inline u32 vmx_vmexit_ctrl(void)
+{
+ u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
+ if (pt_mode == PT_MODE_SYSTEM)
+ vmexit_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP | VM_ENTRY_LOAD_IA32_RTIT_CTL);
+ /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
+ return vmcs_config.vmexit_ctrl &
+ ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
+}
+
+u32 vmx_exec_control(struct vcpu_vmx *vmx);
+
+static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
+{
+ return container_of(kvm, struct kvm_vmx, kvm);
+}
+
+static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
+{
+ return container_of(vcpu, struct vcpu_vmx, vcpu);
+}
+
+static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
+{
+ return &(to_vmx(vcpu)->pi_desc);
+}
+
+struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu);
+void free_vmcs(struct vmcs *vmcs);
+int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
+void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
+void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs);
+void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
+
+static inline struct vmcs *alloc_vmcs(bool shadow)
+{
+ return alloc_vmcs_cpu(shadow, raw_smp_processor_id());
+}
+
+u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
+
+static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
+ bool invalidate_gpa)
+{
+ if (enable_ept && (invalidate_gpa || !enable_vpid)) {
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
+ return;
+ ept_sync_context(construct_eptp(vcpu,
+ vcpu->arch.mmu->root_hpa));
+ } else {
+ vpid_sync_context(vpid);
+ }
+}
+
+static inline void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+{
+ __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
+}
+
+static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
+{
+ vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
+ vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
+}
+
+#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5cd5647120f2..02c8e095a239 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -69,6 +69,7 @@
#include <asm/irq_remapping.h>
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
+#include <asm/intel_pt.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -213,6 +214,9 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
u64 __read_mostly host_xcr0;
+struct kmem_cache *x86_fpu_cache;
+EXPORT_SYMBOL_GPL(x86_fpu_cache);
+
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
@@ -1121,7 +1125,13 @@ static u32 msrs_to_save[] = {
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
- MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES
+ MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES,
+ MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
+ MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
+ MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
+ MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
+ MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
+ MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
};
static unsigned num_msrs_to_save;
@@ -1665,8 +1675,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
- kvm_x86_ops->write_tsc_offset(vcpu, offset);
- vcpu->arch.tsc_offset = offset;
+ vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -1794,7 +1803,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
s64 adjustment)
{
- kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
+ u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+ kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
}
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -2426,6 +2436,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
case MSR_AMD64_DC_CFG:
+ case MSR_F15H_EX_CFG:
break;
case MSR_IA32_UCODE_REV:
@@ -2721,6 +2732,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_AMD64_BU_CFG2:
case MSR_IA32_PERF_CTL:
case MSR_AMD64_DC_CFG:
+ case MSR_F15H_EX_CFG:
msr_info->data = 0;
break;
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
@@ -2997,6 +3009,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_TLBFLUSH:
case KVM_CAP_HYPERV_SEND_IPI:
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
+ case KVM_CAP_HYPERV_CPUID:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -3008,7 +3021,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_TIME:
case KVM_CAP_IOAPIC_POLARITY_IGNORED:
case KVM_CAP_TSC_DEADLINE_TIMER:
- case KVM_CAP_ENABLE_CAP_VM:
case KVM_CAP_DISABLE_QUIRKS:
case KVM_CAP_SET_BOOT_CPU_ID:
case KVM_CAP_SPLIT_IRQCHIP:
@@ -3630,7 +3642,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
{
- struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
+ struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
u64 xstate_bv = xsave->header.xfeatures;
u64 valid;
@@ -3672,7 +3684,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
{
- struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
+ struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave;
u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
u64 valid;
@@ -3720,7 +3732,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
fill_xsave((u8 *) guest_xsave->region, vcpu);
} else {
memcpy(guest_xsave->region,
- &vcpu->arch.guest_fpu.state.fxsave,
+ &vcpu->arch.guest_fpu->state.fxsave,
sizeof(struct fxregs_state));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XFEATURE_MASK_FPSSE;
@@ -3750,7 +3762,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
- memcpy(&vcpu->arch.guest_fpu.state.fxsave,
+ memcpy(&vcpu->arch.guest_fpu->state.fxsave,
guest_xsave->region, sizeof(struct fxregs_state));
}
return 0;
@@ -3828,6 +3840,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return kvm_hv_activate_synic(vcpu, cap->cap ==
KVM_CAP_HYPERV_SYNIC2);
case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
+ if (!kvm_x86_ops->nested_enable_evmcs)
+ return -ENOTTY;
r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
if (!r) {
user_ptr = (void __user *)(uintptr_t)cap->args[0];
@@ -4190,6 +4204,25 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
break;
}
+ case KVM_GET_SUPPORTED_HV_CPUID: {
+ struct kvm_cpuid2 __user *cpuid_arg = argp;
+ struct kvm_cpuid2 cpuid;
+
+ r = -EFAULT;
+ if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
+ goto out;
+
+ r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid,
+ cpuid_arg->entries);
+ if (r)
+ goto out;
+
+ r = -EFAULT;
+ if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
+ goto out;
+ r = 0;
+ break;
+ }
default:
r = -EINVAL;
}
@@ -4394,7 +4427,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- bool is_dirty = false;
+ bool flush = false;
int r;
mutex_lock(&kvm->slots_lock);
@@ -4405,14 +4438,41 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
if (kvm_x86_ops->flush_log_dirty)
kvm_x86_ops->flush_log_dirty(kvm);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log_protect(kvm, log, &flush);
/*
* All the TLBs can be flushed out of mmu lock, see the comments in
* kvm_mmu_slot_remove_write_access().
*/
lockdep_assert_held(&kvm->slots_lock);
- if (is_dirty)
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+{
+ bool flush = false;
+ int r;
+
+ mutex_lock(&kvm->slots_lock);
+
+ /*
+ * Flush potentially hardware-cached dirty pages to dirty_bitmap.
+ */
+ if (kvm_x86_ops->flush_log_dirty)
+ kvm_x86_ops->flush_log_dirty(kvm);
+
+ r = kvm_clear_dirty_log_protect(kvm, log, &flush);
+
+ /*
+ * All the TLBs can be flushed out of mmu lock, see the comments in
+ * kvm_mmu_slot_remove_write_access().
+ */
+ lockdep_assert_held(&kvm->slots_lock);
+ if (flush)
kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock);
@@ -4431,8 +4491,8 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
return 0;
}
-static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
- struct kvm_enable_cap *cap)
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
{
int r;
@@ -4765,15 +4825,6 @@ set_identity_unlock:
r = 0;
break;
}
- case KVM_ENABLE_CAP: {
- struct kvm_enable_cap cap;
-
- r = -EFAULT;
- if (copy_from_user(&cap, argp, sizeof(cap)))
- goto out;
- r = kvm_vm_ioctl_enable_cap(kvm, &cap);
- break;
- }
case KVM_MEMORY_ENCRYPT_OP: {
r = -ENOTTY;
if (kvm_x86_ops->mem_enc_op)
@@ -4842,6 +4893,30 @@ static void kvm_init_msr_list(void)
if (!kvm_x86_ops->rdtscp_supported())
continue;
break;
+ case MSR_IA32_RTIT_CTL:
+ case MSR_IA32_RTIT_STATUS:
+ if (!kvm_x86_ops->pt_supported())
+ continue;
+ break;
+ case MSR_IA32_RTIT_CR3_MATCH:
+ if (!kvm_x86_ops->pt_supported() ||
+ !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
+ continue;
+ break;
+ case MSR_IA32_RTIT_OUTPUT_BASE:
+ case MSR_IA32_RTIT_OUTPUT_MASK:
+ if (!kvm_x86_ops->pt_supported() ||
+ (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
+ !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
+ continue;
+ break;
+ case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
+ if (!kvm_x86_ops->pt_supported() ||
+ msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >=
+ intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
+ continue;
+ break;
+ }
default:
break;
}
@@ -6813,11 +6888,30 @@ int kvm_arch_init(void *opaque)
goto out;
}
+ /*
+ * KVM explicitly assumes that the guest has an FPU and
+ * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
+ * vCPU's FPU state as a fxregs_state struct.
+ */
+ if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
+ printk(KERN_ERR "kvm: inadequate fpu\n");
+ r = -EOPNOTSUPP;
+ goto out;
+ }
+
r = -ENOMEM;
+ x86_fpu_cache = kmem_cache_create("x86_fpu", sizeof(struct fpu),
+ __alignof__(struct fpu), SLAB_ACCOUNT,
+ NULL);
+ if (!x86_fpu_cache) {
+ printk(KERN_ERR "kvm: failed to allocate cache for x86 fpu\n");
+ goto out;
+ }
+
shared_msrs = alloc_percpu(struct kvm_shared_msrs);
if (!shared_msrs) {
printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
- goto out;
+ goto out_free_x86_fpu_cache;
}
r = kvm_mmu_module_init();
@@ -6850,6 +6944,8 @@ int kvm_arch_init(void *opaque)
out_free_percpu:
free_percpu(shared_msrs);
+out_free_x86_fpu_cache:
+ kmem_cache_destroy(x86_fpu_cache);
out:
return r;
}
@@ -6873,6 +6969,7 @@ void kvm_arch_exit(void)
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
free_percpu(shared_msrs);
+ kmem_cache_destroy(x86_fpu_cache);
}
int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
@@ -6918,6 +7015,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
clock_pairing.nsec = ts.tv_nsec;
clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
clock_pairing.flags = 0;
+ memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
ret = 0;
if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
@@ -7445,7 +7543,7 @@ void kvm_make_scan_ioapic_request(struct kvm *kvm)
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
- if (!kvm_apic_hw_enabled(vcpu->arch.apic))
+ if (!kvm_apic_present(vcpu))
return;
bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
@@ -7455,7 +7553,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
else {
if (vcpu->arch.apicv_active)
kvm_x86_ops->sync_pir_to_irr(vcpu);
- kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+ if (ioapic_in_kernel(vcpu->kvm))
+ kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
if (is_guest_mode(vcpu))
@@ -7994,9 +8093,9 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
+ copy_fpregs_to_fpstate(&current->thread.fpu);
/* PKRU is separately restored in kvm_x86_ops->run. */
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
~XFEATURE_MASK_PKRU);
preempt_enable();
trace_kvm_fpu(1);
@@ -8006,8 +8105,8 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+ copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
+ copy_kernel_to_fpregs(&current->thread.fpu.state);
preempt_enable();
++vcpu->stat.fpu_reload;
trace_kvm_fpu(0);
@@ -8501,7 +8600,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu_load(vcpu);
- fxsave = &vcpu->arch.guest_fpu.state.fxsave;
+ fxsave = &vcpu->arch.guest_fpu->state.fxsave;
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd;
@@ -8521,7 +8620,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
vcpu_load(vcpu);
- fxsave = &vcpu->arch.guest_fpu.state.fxsave;
+ fxsave = &vcpu->arch.guest_fpu->state.fxsave;
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
@@ -8577,9 +8676,9 @@ static int sync_regs(struct kvm_vcpu *vcpu)
static void fx_init(struct kvm_vcpu *vcpu)
{
- fpstate_init(&vcpu->arch.guest_fpu.state);
+ fpstate_init(&vcpu->arch.guest_fpu->state);
if (boot_cpu_has(X86_FEATURE_XSAVES))
- vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
+ vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv =
host_xcr0 | XSTATE_COMPACTION_ENABLED;
/*
@@ -8617,6 +8716,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
kvm_vcpu_reset(vcpu, false);
@@ -8703,11 +8803,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
*/
if (init_event)
kvm_put_guest_fpu(vcpu);
- mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
+ mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
XFEATURE_MASK_BNDREGS);
if (mpx_state_buffer)
memset(mpx_state_buffer, 0, sizeof(struct mpx_bndreg_state));
- mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave,
+ mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
XFEATURE_MASK_BNDCSR);
if (mpx_state_buffer)
memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr));
@@ -8719,7 +8819,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_pmu_reset(vcpu);
vcpu->arch.smbase = 0x30000;
- vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
vcpu->arch.msr_misc_features_enables = 0;
vcpu->arch.xcr0 = XFEATURE_MASK_FP;
@@ -9278,7 +9377,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* with dirty logging disabled in order to eliminate unnecessary GPA
* logging in PML buffer (and potential PML buffer full VMEXT). This
* guarantees leaving PML enabled during guest's lifetime won't have
- * any additonal overhead from PML when guest is running with dirty
+ * any additional overhead from PML when guest is running with dirty
* logging disabled for memory slots.
*
* kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
index 225fe2f0bfec..cd84f067e41d 100644
--- a/arch/x86/mm/debug_pagetables.c
+++ b/arch/x86/mm/debug_pagetables.c
@@ -10,20 +10,9 @@ static int ptdump_show(struct seq_file *m, void *v)
return 0;
}
-static int ptdump_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ptdump_show, NULL);
-}
-
-static const struct file_operations ptdump_fops = {
- .owner = THIS_MODULE,
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump);
-static int ptdump_show_curknl(struct seq_file *m, void *v)
+static int ptdump_curknl_show(struct seq_file *m, void *v)
{
if (current->mm->pgd) {
down_read(&current->mm->mmap_sem);
@@ -33,23 +22,12 @@ static int ptdump_show_curknl(struct seq_file *m, void *v)
return 0;
}
-static int ptdump_open_curknl(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ptdump_show_curknl, NULL);
-}
-
-static const struct file_operations ptdump_curknl_fops = {
- .owner = THIS_MODULE,
- .open = ptdump_open_curknl,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump_curknl);
#ifdef CONFIG_PAGE_TABLE_ISOLATION
static struct dentry *pe_curusr;
-static int ptdump_show_curusr(struct seq_file *m, void *v)
+static int ptdump_curusr_show(struct seq_file *m, void *v)
{
if (current->mm->pgd) {
down_read(&current->mm->mmap_sem);
@@ -59,42 +37,20 @@ static int ptdump_show_curusr(struct seq_file *m, void *v)
return 0;
}
-static int ptdump_open_curusr(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ptdump_show_curusr, NULL);
-}
-
-static const struct file_operations ptdump_curusr_fops = {
- .owner = THIS_MODULE,
- .open = ptdump_open_curusr,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump_curusr);
#endif
#if defined(CONFIG_EFI) && defined(CONFIG_X86_64)
static struct dentry *pe_efi;
-static int ptdump_show_efi(struct seq_file *m, void *v)
+static int ptdump_efi_show(struct seq_file *m, void *v)
{
if (efi_mm.pgd)
ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false);
return 0;
}
-static int ptdump_open_efi(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ptdump_show_efi, NULL);
-}
-
-static const struct file_operations ptdump_efi_fops = {
- .owner = THIS_MODULE,
- .open = ptdump_open_efi,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump_efi);
#endif
static struct dentry *dir, *pe_knl, *pe_curknl;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index fc37bbd23eb8..abcb8d00b014 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -55,10 +55,10 @@ struct addr_marker {
enum address_markers_idx {
USER_SPACE_NR = 0,
KERNEL_SPACE_NR,
- LOW_KERNEL_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && defined(CONFIG_X86_5LEVEL)
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
LDT_NR,
#endif
+ LOW_KERNEL_NR,
VMALLOC_START_NR,
VMEMMAP_START_NR,
#ifdef CONFIG_KASAN
@@ -66,9 +66,6 @@ enum address_markers_idx {
KASAN_SHADOW_END_NR,
#endif
CPU_ENTRY_AREA_NR,
-#if defined(CONFIG_MODIFY_LDT_SYSCALL) && !defined(CONFIG_X86_5LEVEL)
- LDT_NR,
-#endif
#ifdef CONFIG_X86_ESPFIX64
ESPFIX_START_NR,
#endif
@@ -512,11 +509,11 @@ static inline bool is_hypervisor_range(int idx)
{
#ifdef CONFIG_X86_64
/*
- * ffff800000000000 - ffff87ffffffffff is reserved for
- * the hypervisor.
+ * A hole in the beginning of kernel address space reserved
+ * for a hypervisor.
*/
- return (idx >= pgd_index(__PAGE_OFFSET) - 16) &&
- (idx < pgd_index(__PAGE_OFFSET));
+ return (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
+ (idx < pgd_index(GUARD_HOLE_END_ADDR));
#else
return false;
#endif
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 71d4b9d4d43f..2ff25ad33233 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -27,6 +27,7 @@
#include <asm/vm86.h> /* struct vm86 */
#include <asm/mmu_context.h> /* vma_pkey() */
#include <asm/efi.h> /* efi_recover_from_page_fault()*/
+#include <asm/desc.h> /* store_idt(), ... */
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>
@@ -571,10 +572,55 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
return 0;
}
+static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
+{
+ u32 offset = (index >> 3) * sizeof(struct desc_struct);
+ unsigned long addr;
+ struct ldttss_desc desc;
+
+ if (index == 0) {
+ pr_alert("%s: NULL\n", name);
+ return;
+ }
+
+ if (offset + sizeof(struct ldttss_desc) >= gdt->size) {
+ pr_alert("%s: 0x%hx -- out of bounds\n", name, index);
+ return;
+ }
+
+ if (probe_kernel_read(&desc, (void *)(gdt->address + offset),
+ sizeof(struct ldttss_desc))) {
+ pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
+ name, index);
+ return;
+ }
+
+ addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24);
+#ifdef CONFIG_X86_64
+ addr |= ((u64)desc.base3 << 32);
+#endif
+ pr_alert("%s: 0x%hx -- base=0x%lx limit=0x%x\n",
+ name, index, addr, (desc.limit0 | (desc.limit1 << 16)));
+}
+
+/*
+ * This helper function transforms the #PF error_code bits into
+ * "[PROT] [USER]" type of descriptive, almost human-readable error strings:
+ */
+static void err_str_append(unsigned long error_code, char *buf, unsigned long mask, const char *txt)
+{
+ if (error_code & mask) {
+ if (buf[0])
+ strcat(buf, " ");
+ strcat(buf, txt);
+ }
+}
+
static void
-show_fault_oops(struct pt_regs *regs, unsigned long error_code,
- unsigned long address)
+show_fault_oops(struct pt_regs *regs, unsigned long error_code, unsigned long address)
{
+ char err_txt[64];
+
if (!oops_may_print())
return;
@@ -602,6 +648,52 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
(void *)address);
+ err_txt[0] = 0;
+
+ /*
+ * Note: length of these appended strings including the separation space and the
+ * zero delimiter must fit into err_txt[].
+ */
+ err_str_append(error_code, err_txt, X86_PF_PROT, "[PROT]" );
+ err_str_append(error_code, err_txt, X86_PF_WRITE, "[WRITE]");
+ err_str_append(error_code, err_txt, X86_PF_USER, "[USER]" );
+ err_str_append(error_code, err_txt, X86_PF_RSVD, "[RSVD]" );
+ err_str_append(error_code, err_txt, X86_PF_INSTR, "[INSTR]");
+ err_str_append(error_code, err_txt, X86_PF_PK, "[PK]" );
+
+ pr_alert("#PF error: %s\n", error_code ? err_txt : "[normal kernel read fault]");
+
+ if (!(error_code & X86_PF_USER) && user_mode(regs)) {
+ struct desc_ptr idt, gdt;
+ u16 ldtr, tr;
+
+ pr_alert("This was a system access from user code\n");
+
+ /*
+ * This can happen for quite a few reasons. The more obvious
+ * ones are faults accessing the GDT, or LDT. Perhaps
+ * surprisingly, if the CPU tries to deliver a benign or
+ * contributory exception from user code and gets a page fault
+ * during delivery, the page fault can be delivered as though
+ * it originated directly from user code. This could happen
+ * due to wrong permissions on the IDT, GDT, LDT, TSS, or
+ * kernel or IST stack.
+ */
+ store_idt(&idt);
+
+ /* Usable even on Xen PV -- it's just slow. */
+ native_store_gdt(&gdt);
+
+ pr_alert("IDT: 0x%lx (limit=0x%hx) GDT: 0x%lx (limit=0x%hx)\n",
+ idt.address, idt.size, gdt.address, gdt.size);
+
+ store_ldt(ldtr);
+ show_ldttss(&gdt, "LDTR", ldtr);
+
+ store_tr(tr);
+ show_ldttss(&gdt, "TR", tr);
+ }
+
dump_pagetable(address);
}
@@ -621,16 +713,30 @@ pgtable_bad(struct pt_regs *regs, unsigned long error_code,
tsk->comm, address);
dump_pagetable(address);
- tsk->thread.cr2 = address;
- tsk->thread.trap_nr = X86_TRAP_PF;
- tsk->thread.error_code = error_code;
-
if (__die("Bad pagetable", regs, error_code))
sig = 0;
oops_end(flags, regs, sig);
}
+static void set_signal_archinfo(unsigned long address,
+ unsigned long error_code)
+{
+ struct task_struct *tsk = current;
+
+ /*
+ * To avoid leaking information about the kernel page
+ * table layout, pretend that user-mode accesses to
+ * kernel addresses are always protection faults.
+ */
+ if (address >= TASK_SIZE_MAX)
+ error_code |= X86_PF_PROT;
+
+ tsk->thread.trap_nr = X86_TRAP_PF;
+ tsk->thread.error_code = error_code | X86_PF_USER;
+ tsk->thread.cr2 = address;
+}
+
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long address, int signal, int si_code)
@@ -639,6 +745,15 @@ no_context(struct pt_regs *regs, unsigned long error_code,
unsigned long flags;
int sig;
+ if (user_mode(regs)) {
+ /*
+ * This is an implicit supervisor-mode access from user
+ * mode. Bypass all the kernel-mode recovery code and just
+ * OOPS.
+ */
+ goto oops;
+ }
+
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) {
/*
@@ -656,9 +771,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
* faulting through the emulate_vsyscall() logic.
*/
if (current->thread.sig_on_uaccess_err && signal) {
- tsk->thread.trap_nr = X86_TRAP_PF;
- tsk->thread.error_code = error_code | X86_PF_USER;
- tsk->thread.cr2 = address;
+ set_signal_archinfo(address, error_code);
/* XXX: hwpoison faults will set the wrong code. */
force_sig_fault(signal, si_code, (void __user *)address,
@@ -726,6 +839,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (IS_ENABLED(CONFIG_EFI))
efi_recover_from_page_fault(address);
+oops:
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice:
@@ -737,10 +851,6 @@ no_context(struct pt_regs *regs, unsigned long error_code,
if (task_stack_end_corrupted(tsk))
printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
- tsk->thread.cr2 = address;
- tsk->thread.trap_nr = X86_TRAP_PF;
- tsk->thread.error_code = error_code;
-
sig = SIGKILL;
if (__die("Oops", regs, error_code))
sig = 0;
@@ -794,7 +904,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */
- if (error_code & X86_PF_USER) {
+ if (user_mode(regs) && (error_code & X86_PF_USER)) {
/*
* It's possible to have interrupts off here:
*/
@@ -821,9 +931,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
- tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_nr = X86_TRAP_PF;
+ set_signal_archinfo(address, error_code);
if (si_code == SEGV_PKUERR)
force_sig_pkuerr((void __user *)address, pkey);
@@ -937,9 +1045,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
if (is_prefetch(regs, error_code, address))
return;
- tsk->thread.cr2 = address;
- tsk->thread.error_code = error_code;
- tsk->thread.trap_nr = X86_TRAP_PF;
+ set_signal_archinfo(address, error_code);
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
@@ -1148,23 +1254,6 @@ static int fault_in_kernel_space(unsigned long address)
return address >= TASK_SIZE_MAX;
}
-static inline bool smap_violation(int error_code, struct pt_regs *regs)
-{
- if (!IS_ENABLED(CONFIG_X86_SMAP))
- return false;
-
- if (!static_cpu_has(X86_FEATURE_SMAP))
- return false;
-
- if (error_code & X86_PF_USER)
- return false;
-
- if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
- return false;
-
- return true;
-}
-
/*
* Called for all faults where 'address' is part of the kernel address
* space. Might get called for faults that originate from *code* that
@@ -1230,7 +1319,6 @@ void do_user_addr_fault(struct pt_regs *regs,
unsigned long hw_error_code,
unsigned long address)
{
- unsigned long sw_error_code;
struct vm_area_struct *vma;
struct task_struct *tsk;
struct mm_struct *mm;
@@ -1252,10 +1340,16 @@ void do_user_addr_fault(struct pt_regs *regs,
pgtable_bad(regs, hw_error_code, address);
/*
- * Check for invalid kernel (supervisor) access to user
- * pages in the user address space.
+ * If SMAP is on, check for invalid kernel (supervisor) access to user
+ * pages in the user address space. The odd case here is WRUSS,
+ * which, according to the preliminary documentation, does not respect
+ * SMAP and will have the USER bit set so, in all cases, SMAP
+ * enforcement appears to be consistent with the USER bit.
*/
- if (unlikely(smap_violation(hw_error_code, regs))) {
+ if (unlikely(cpu_feature_enabled(X86_FEATURE_SMAP) &&
+ !(hw_error_code & X86_PF_USER) &&
+ !(regs->flags & X86_EFLAGS_AC)))
+ {
bad_area_nosemaphore(regs, hw_error_code, address);
return;
}
@@ -1270,13 +1364,6 @@ void do_user_addr_fault(struct pt_regs *regs,
}
/*
- * hw_error_code is literally the "page fault error code" passed to
- * the kernel directly from the hardware. But, we will shortly be
- * modifying it in software, so give it a new name.
- */
- sw_error_code = hw_error_code;
-
- /*
* It's safe to allow irq's after cr2 has been saved and the
* vmalloc fault has been handled.
*
@@ -1285,26 +1372,6 @@ void do_user_addr_fault(struct pt_regs *regs,
*/
if (user_mode(regs)) {
local_irq_enable();
- /*
- * Up to this point, X86_PF_USER set in hw_error_code
- * indicated a user-mode access. But, after this,
- * X86_PF_USER in sw_error_code will indicate either
- * that, *or* an implicit kernel(supervisor)-mode access
- * which originated from user mode.
- */
- if (!(hw_error_code & X86_PF_USER)) {
- /*
- * The CPU was in user mode, but the CPU says
- * the fault was not a user-mode access.
- * Must be an implicit kernel-mode access,
- * which we do not expect to happen in the
- * user address space.
- */
- pr_warn_once("kernel-mode error from user-mode: %lx\n",
- hw_error_code);
-
- sw_error_code |= X86_PF_USER;
- }
flags |= FAULT_FLAG_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
@@ -1313,9 +1380,9 @@ void do_user_addr_fault(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (sw_error_code & X86_PF_WRITE)
+ if (hw_error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
- if (sw_error_code & X86_PF_INSTR)
+ if (hw_error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
#ifdef CONFIG_X86_64
@@ -1328,7 +1395,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* The vsyscall page does not have a "real" VMA, so do this
* emulation before we go searching for VMAs.
*/
- if ((sw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
+ if ((hw_error_code & X86_PF_INSTR) && is_vsyscall_vaddr(address)) {
if (emulate_vsyscall(regs, address))
return;
}
@@ -1344,18 +1411,15 @@ void do_user_addr_fault(struct pt_regs *regs,
* Only do the expensive exception table search when we might be at
* risk of a deadlock. This happens if we
* 1. Failed to acquire mmap_sem, and
- * 2. The access did not originate in userspace. Note: either the
- * hardware or earlier page fault code may set X86_PF_USER
- * in sw_error_code.
+ * 2. The access did not originate in userspace.
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
- if (!(sw_error_code & X86_PF_USER) &&
- !search_exception_tables(regs->ip)) {
+ if (!user_mode(regs) && !search_exception_tables(regs->ip)) {
/*
* Fault from code in kernel from
* which we do not expect faults.
*/
- bad_area_nosemaphore(regs, sw_error_code, address);
+ bad_area_nosemaphore(regs, hw_error_code, address);
return;
}
retry:
@@ -1371,29 +1435,17 @@ retry:
vma = find_vma(mm, address);
if (unlikely(!vma)) {
- bad_area(regs, sw_error_code, address);
+ bad_area(regs, hw_error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
- bad_area(regs, sw_error_code, address);
+ bad_area(regs, hw_error_code, address);
return;
}
- if (sw_error_code & X86_PF_USER) {
- /*
- * Accessing the stack below %sp is always a bug.
- * The large cushion allows instructions like enter
- * and pusha to work. ("enter $65535, $31" pushes
- * 32 pointers and then decrements %sp by 65535.)
- */
- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
- bad_area(regs, sw_error_code, address);
- return;
- }
- }
if (unlikely(expand_stack(vma, address))) {
- bad_area(regs, sw_error_code, address);
+ bad_area(regs, hw_error_code, address);
return;
}
@@ -1402,8 +1454,8 @@ retry:
* we can handle it..
*/
good_area:
- if (unlikely(access_error(sw_error_code, vma))) {
- bad_area_access_error(regs, sw_error_code, address, vma);
+ if (unlikely(access_error(hw_error_code, vma))) {
+ bad_area_access_error(regs, hw_error_code, address, vma);
return;
}
@@ -1442,13 +1494,13 @@ good_area:
return;
/* Not returning to user mode? Handle exceptions or die: */
- no_context(regs, sw_error_code, address, SIGBUS, BUS_ADRERR);
+ no_context(regs, hw_error_code, address, SIGBUS, BUS_ADRERR);
return;
}
up_read(&mm->mmap_sem);
if (unlikely(fault & VM_FAULT_ERROR)) {
- mm_fault_error(regs, sw_error_code, address, fault);
+ mm_fault_error(regs, hw_error_code, address, fault);
return;
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ef99f3892e1f..427a955a2cf2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
pages = generic_max_swapfile_size();
- if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
unsigned long long l1tf_limit = l1tf_pfn_limit();
/*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5fab264948c2..484c1b92f078 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -432,7 +432,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PAGE_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pte(pte, __pte(0));
+ set_pte_safe(pte, __pte(0));
continue;
}
@@ -452,7 +452,7 @@ phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++;
- set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+ set_pte_safe(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
}
@@ -487,7 +487,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PMD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pmd(pmd, __pmd(0));
+ set_pmd_safe(pmd, __pmd(0));
continue;
}
@@ -524,7 +524,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_2M)) {
pages++;
spin_lock(&init_mm.page_table_lock);
- set_pte((pte_t *)pmd,
+ set_pte_safe((pte_t *)pmd,
pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
spin_unlock(&init_mm.page_table_lock);
@@ -536,7 +536,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
spin_lock(&init_mm.page_table_lock);
- pmd_populate_kernel(&init_mm, pmd, pte);
+ pmd_populate_kernel_safe(&init_mm, pmd, pte);
spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
@@ -573,7 +573,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & PUD_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_pud(pud, __pud(0));
+ set_pud_safe(pud, __pud(0));
continue;
}
@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
paddr_end,
page_size_mask,
prot);
- __flush_tlb_all();
continue;
}
/*
@@ -611,7 +610,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
if (page_size_mask & (1<<PG_LEVEL_1G)) {
pages++;
spin_lock(&init_mm.page_table_lock);
- set_pte((pte_t *)pud,
+ set_pte_safe((pte_t *)pud,
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE));
spin_unlock(&init_mm.page_table_lock);
@@ -624,10 +623,9 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
page_size_mask, prot);
spin_lock(&init_mm.page_table_lock);
- pud_populate(&init_mm, pud, pmd);
+ pud_populate_safe(&init_mm, pud, pmd);
spin_unlock(&init_mm.page_table_lock);
}
- __flush_tlb_all();
update_page_count(PG_LEVEL_1G, pages);
@@ -659,7 +657,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
E820_TYPE_RAM) &&
!e820__mapped_any(paddr & P4D_MASK, paddr_next,
E820_TYPE_RESERVED_KERN))
- set_p4d(p4d, __p4d(0));
+ set_p4d_safe(p4d, __p4d(0));
continue;
}
@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pud_init(pud, paddr,
paddr_end,
page_size_mask);
- __flush_tlb_all();
continue;
}
@@ -677,10 +674,9 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
page_size_mask);
spin_lock(&init_mm.page_table_lock);
- p4d_populate(&init_mm, p4d, pud);
+ p4d_populate_safe(&init_mm, p4d, pud);
spin_unlock(&init_mm.page_table_lock);
}
- __flush_tlb_all();
return paddr_last;
}
@@ -723,9 +719,9 @@ kernel_physical_mapping_init(unsigned long paddr_start,
spin_lock(&init_mm.page_table_lock);
if (pgtable_l5_enabled())
- pgd_populate(&init_mm, pgd, p4d);
+ pgd_populate_safe(&init_mm, pgd, p4d);
else
- p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
+ p4d_populate_safe(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d);
spin_unlock(&init_mm.page_table_lock);
pgd_changed = true;
}
@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
if (pgd_changed)
sync_global_pgds(vaddr_start, vaddr_end - 1);
- __flush_tlb_all();
-
return paddr_last;
}
diff --git a/arch/x86/mm/mm_internal.h b/arch/x86/mm/mm_internal.h
index 4e1f6e1b8159..319bde386d5f 100644
--- a/arch/x86/mm/mm_internal.h
+++ b/arch/x86/mm/mm_internal.h
@@ -19,4 +19,6 @@ extern int after_bootmem;
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
+extern unsigned long tlb_single_page_flush_ceiling;
+
#endif /* __X86_MM_INTERNAL_H */
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 08f8f76a4852..facce271e8b9 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -23,7 +23,8 @@
static __read_mostly int print = 1;
enum {
- NTEST = 400,
+ NTEST = 3 * 100,
+ NPAGES = 100,
#ifdef CONFIG_X86_64
LPS = (1 << PMD_SHIFT),
#elif defined(CONFIG_X86_PAE)
@@ -110,6 +111,9 @@ static int print_split(struct split_state *s)
static unsigned long addr[NTEST];
static unsigned int len[NTEST];
+static struct page *pages[NPAGES];
+static unsigned long addrs[NPAGES];
+
/* Change the global bit on random pages in the direct mapping */
static int pageattr_test(void)
{
@@ -120,7 +124,6 @@ static int pageattr_test(void)
unsigned int level;
int i, k;
int err;
- unsigned long test_addr;
if (print)
printk(KERN_INFO "CPA self-test:\n");
@@ -137,7 +140,7 @@ static int pageattr_test(void)
unsigned long pfn = prandom_u32() % max_pfn_mapped;
addr[i] = (unsigned long)__va(pfn << PAGE_SHIFT);
- len[i] = prandom_u32() % 100;
+ len[i] = prandom_u32() % NPAGES;
len[i] = min_t(unsigned long, len[i], max_pfn_mapped - pfn - 1);
if (len[i] == 0)
@@ -167,14 +170,29 @@ static int pageattr_test(void)
break;
}
__set_bit(pfn + k, bm);
+ addrs[k] = addr[i] + k*PAGE_SIZE;
+ pages[k] = pfn_to_page(pfn + k);
}
if (!addr[i] || !pte || !k) {
addr[i] = 0;
continue;
}
- test_addr = addr[i];
- err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0);
+ switch (i % 3) {
+ case 0:
+ err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
+ break;
+
+ case 1:
+ err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1);
+ break;
+
+ case 2:
+ err = cpa_set_pages_array(pages, len[i], PAGE_CPA_TEST);
+ break;
+ }
+
+
if (err < 0) {
printk(KERN_ERR "CPA %d failed %d\n", i, err);
failed++;
@@ -206,8 +224,7 @@ static int pageattr_test(void)
failed++;
continue;
}
- test_addr = addr[i];
- err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
+ err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err);
failed++;
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index db7a10082238..4f8972311a77 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,6 +26,8 @@
#include <asm/pat.h>
#include <asm/set_memory.h>
+#include "mm_internal.h"
+
/*
* The current flushing context - we pass it instead of 5 arguments:
*/
@@ -35,11 +37,11 @@ struct cpa_data {
pgprot_t mask_set;
pgprot_t mask_clr;
unsigned long numpages;
- int flags;
+ unsigned long curpage;
unsigned long pfn;
- unsigned force_split : 1,
+ unsigned int flags;
+ unsigned int force_split : 1,
force_static_prot : 1;
- int curpage;
struct page **pages;
};
@@ -228,19 +230,28 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn)
#endif
+static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx)
+{
+ if (cpa->flags & CPA_PAGES_ARRAY) {
+ struct page *page = cpa->pages[idx];
+
+ if (unlikely(PageHighMem(page)))
+ return 0;
+
+ return (unsigned long)page_address(page);
+ }
+
+ if (cpa->flags & CPA_ARRAY)
+ return cpa->vaddr[idx];
+
+ return *cpa->vaddr + idx * PAGE_SIZE;
+}
+
/*
* Flushing functions
*/
-/**
- * clflush_cache_range - flush a cache range with clflush
- * @vaddr: virtual start address
- * @size: number of bytes to flush
- *
- * clflushopt is an unordered instruction which needs fencing with mfence or
- * sfence to avoid ordering issues.
- */
-void clflush_cache_range(void *vaddr, unsigned int size)
+static void clflush_cache_range_opt(void *vaddr, unsigned int size)
{
const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
@@ -249,11 +260,22 @@ void clflush_cache_range(void *vaddr, unsigned int size)
if (p >= vend)
return;
- mb();
-
for (; p < vend; p += clflush_size)
clflushopt(p);
+}
+/**
+ * clflush_cache_range - flush a cache range with clflush
+ * @vaddr: virtual start address
+ * @size: number of bytes to flush
+ *
+ * CLFLUSHOPT is an unordered instruction which needs fencing with MFENCE or
+ * SFENCE to avoid ordering issues.
+ */
+void clflush_cache_range(void *vaddr, unsigned int size)
+{
+ mb();
+ clflush_cache_range_opt(vaddr, size);
mb();
}
EXPORT_SYMBOL_GPL(clflush_cache_range);
@@ -285,79 +307,49 @@ static void cpa_flush_all(unsigned long cache)
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
-static bool __cpa_flush_range(unsigned long start, int numpages, int cache)
+void __cpa_flush_tlb(void *data)
{
- BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
-
- WARN_ON(PAGE_ALIGN(start) != start);
-
- if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- cpa_flush_all(cache);
- return true;
- }
+ struct cpa_data *cpa = data;
+ unsigned int i;
- flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
-
- return !cache;
+ for (i = 0; i < cpa->numpages; i++)
+ __flush_tlb_one_kernel(__cpa_addr(cpa, i));
}
-static void cpa_flush_range(unsigned long start, int numpages, int cache)
+static void cpa_flush(struct cpa_data *data, int cache)
{
- unsigned int i, level;
- unsigned long addr;
+ struct cpa_data *cpa = data;
+ unsigned int i;
- if (__cpa_flush_range(start, numpages, cache))
- return;
-
- /*
- * We only need to flush on one CPU,
- * clflush is a MESI-coherent instruction that
- * will cause all other CPUs to flush the same
- * cachelines:
- */
- for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
- pte_t *pte = lookup_address(addr, &level);
+ BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
- /*
- * Only flush present addresses:
- */
- if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range((void *) addr, PAGE_SIZE);
+ if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ cpa_flush_all(cache);
+ return;
}
-}
-static void cpa_flush_array(unsigned long baddr, unsigned long *start,
- int numpages, int cache,
- int in_flags, struct page **pages)
-{
- unsigned int i, level;
+ if (cpa->numpages <= tlb_single_page_flush_ceiling)
+ on_each_cpu(__cpa_flush_tlb, cpa, 1);
+ else
+ flush_tlb_all();
- if (__cpa_flush_range(baddr, numpages, cache))
+ if (!cache)
return;
- /*
- * We only need to flush on one CPU,
- * clflush is a MESI-coherent instruction that
- * will cause all other CPUs to flush the same
- * cachelines:
- */
- for (i = 0; i < numpages; i++) {
- unsigned long addr;
- pte_t *pte;
-
- if (in_flags & CPA_PAGES_ARRAY)
- addr = (unsigned long)page_address(pages[i]);
- else
- addr = start[i];
+ mb();
+ for (i = 0; i < cpa->numpages; i++) {
+ unsigned long addr = __cpa_addr(cpa, i);
+ unsigned int level;
- pte = lookup_address(addr, &level);
+ pte_t *pte = lookup_address(addr, &level);
/*
* Only flush present addresses:
*/
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
- clflush_cache_range((void *)addr, PAGE_SIZE);
+ clflush_cache_range_opt((void *)addr, PAGE_SIZE);
}
+ mb();
}
static bool overlaps(unsigned long r1_start, unsigned long r1_end,
@@ -1468,15 +1460,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
unsigned int level;
pte_t *kpte, old_pte;
- if (cpa->flags & CPA_PAGES_ARRAY) {
- struct page *page = cpa->pages[cpa->curpage];
- if (unlikely(PageHighMem(page)))
- return 0;
- address = (unsigned long)page_address(page);
- } else if (cpa->flags & CPA_ARRAY)
- address = cpa->vaddr[cpa->curpage];
- else
- address = *cpa->vaddr;
+ address = __cpa_addr(cpa, cpa->curpage);
repeat:
kpte = _lookup_address_cpa(cpa, address, &level);
if (!kpte)
@@ -1557,22 +1541,14 @@ static int cpa_process_alias(struct cpa_data *cpa)
* No need to redo, when the primary call touched the direct
* mapping already:
*/
- if (cpa->flags & CPA_PAGES_ARRAY) {
- struct page *page = cpa->pages[cpa->curpage];
- if (unlikely(PageHighMem(page)))
- return 0;
- vaddr = (unsigned long)page_address(page);
- } else if (cpa->flags & CPA_ARRAY)
- vaddr = cpa->vaddr[cpa->curpage];
- else
- vaddr = *cpa->vaddr;
-
+ vaddr = __cpa_addr(cpa, cpa->curpage);
if (!(within(vaddr, PAGE_OFFSET,
PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
alias_cpa = *cpa;
alias_cpa.vaddr = &laddr;
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+ alias_cpa.curpage = 0;
ret = __change_page_attr_set_clr(&alias_cpa, 0);
if (ret)
@@ -1592,6 +1568,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
alias_cpa = *cpa;
alias_cpa.vaddr = &temp_cpa_vaddr;
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
+ alias_cpa.curpage = 0;
/*
* The high mapping range is imprecise, so ignore the
@@ -1607,14 +1584,15 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{
unsigned long numpages = cpa->numpages;
- int ret;
+ unsigned long rempages = numpages;
+ int ret = 0;
- while (numpages) {
+ while (rempages) {
/*
* Store the remaining nr of pages for the large page
* preservation check.
*/
- cpa->numpages = numpages;
+ cpa->numpages = rempages;
/* for array changes, we can't use large page */
if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
cpa->numpages = 1;
@@ -1625,12 +1603,12 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
if (!debug_pagealloc_enabled())
spin_unlock(&cpa_lock);
if (ret)
- return ret;
+ goto out;
if (checkalias) {
ret = cpa_process_alias(cpa);
if (ret)
- return ret;
+ goto out;
}
/*
@@ -1638,15 +1616,15 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* CPA operation. Either a large page has been
* preserved or a single page update happened.
*/
- BUG_ON(cpa->numpages > numpages || !cpa->numpages);
- numpages -= cpa->numpages;
- if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
- cpa->curpage++;
- else
- *cpa->vaddr += cpa->numpages * PAGE_SIZE;
-
+ BUG_ON(cpa->numpages > rempages || !cpa->numpages);
+ rempages -= cpa->numpages;
+ cpa->curpage += cpa->numpages;
}
- return 0;
+
+out:
+ /* Restore the original numpages */
+ cpa->numpages = numpages;
+ return ret;
}
/*
@@ -1679,7 +1657,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
{
struct cpa_data cpa;
int ret, cache, checkalias;
- unsigned long baddr = 0;
memset(&cpa, 0, sizeof(cpa));
@@ -1704,7 +1681,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
} else if (!(in_flag & CPA_PAGES_ARRAY)) {
/*
* in_flag of CPA_PAGES_ARRAY implies it is aligned.
- * No need to cehck in that case
+ * No need to check in that case
*/
if (*addr & ~PAGE_MASK) {
*addr &= PAGE_MASK;
@@ -1713,11 +1690,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
*/
WARN_ON_ONCE(1);
}
- /*
- * Save address for cache flush. *addr is modified in the call
- * to __change_page_attr_set_clr() below.
- */
- baddr = make_addr_canonical_again(*addr);
}
/* Must avoid aliasing mappings in the highmem code */
@@ -1765,13 +1737,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
goto out;
}
- if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
- cpa_flush_array(baddr, addr, numpages, cache,
- cpa.flags, pages);
- } else {
- cpa_flush_range(baddr, numpages, cache);
- }
-
+ cpa_flush(&cpa, cache);
out:
return ret;
}
@@ -1842,14 +1808,14 @@ out_err:
}
EXPORT_SYMBOL(set_memory_uc);
-static int _set_memory_array(unsigned long *addr, int addrinarray,
+static int _set_memory_array(unsigned long *addr, int numpages,
enum page_cache_mode new_type)
{
enum page_cache_mode set_type;
int i, j;
int ret;
- for (i = 0; i < addrinarray; i++) {
+ for (i = 0; i < numpages; i++) {
ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
new_type, NULL);
if (ret)
@@ -1860,11 +1826,11 @@ static int _set_memory_array(unsigned long *addr, int addrinarray,
set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
_PAGE_CACHE_MODE_UC_MINUS : new_type;
- ret = change_page_attr_set(addr, addrinarray,
+ ret = change_page_attr_set(addr, numpages,
cachemode2pgprot(set_type), 1);
if (!ret && new_type == _PAGE_CACHE_MODE_WC)
- ret = change_page_attr_set_clr(addr, addrinarray,
+ ret = change_page_attr_set_clr(addr, numpages,
cachemode2pgprot(
_PAGE_CACHE_MODE_WC),
__pgprot(_PAGE_CACHE_MASK),
@@ -1881,36 +1847,34 @@ out_free:
return ret;
}
-int set_memory_array_uc(unsigned long *addr, int addrinarray)
+int set_memory_array_uc(unsigned long *addr, int numpages)
{
- return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
+ return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_UC_MINUS);
}
EXPORT_SYMBOL(set_memory_array_uc);
-int set_memory_array_wc(unsigned long *addr, int addrinarray)
+int set_memory_array_wc(unsigned long *addr, int numpages)
{
- return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WC);
+ return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WC);
}
EXPORT_SYMBOL(set_memory_array_wc);
-int set_memory_array_wt(unsigned long *addr, int addrinarray)
+int set_memory_array_wt(unsigned long *addr, int numpages)
{
- return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT);
+ return _set_memory_array(addr, numpages, _PAGE_CACHE_MODE_WT);
}
EXPORT_SYMBOL_GPL(set_memory_array_wt);
int _set_memory_wc(unsigned long addr, int numpages)
{
int ret;
- unsigned long addr_copy = addr;
ret = change_page_attr_set(&addr, numpages,
cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
0);
if (!ret) {
- ret = change_page_attr_set_clr(&addr_copy, numpages,
- cachemode2pgprot(
- _PAGE_CACHE_MODE_WC),
+ ret = change_page_attr_set_clr(&addr, numpages,
+ cachemode2pgprot(_PAGE_CACHE_MODE_WC),
__pgprot(_PAGE_CACHE_MASK),
0, 0, NULL);
}
@@ -1977,18 +1941,18 @@ int set_memory_wb(unsigned long addr, int numpages)
}
EXPORT_SYMBOL(set_memory_wb);
-int set_memory_array_wb(unsigned long *addr, int addrinarray)
+int set_memory_array_wb(unsigned long *addr, int numpages)
{
int i;
int ret;
/* WB cache mode is hard wired to all cache attribute bits being 0 */
- ret = change_page_attr_clear(addr, addrinarray,
+ ret = change_page_attr_clear(addr, numpages,
__pgprot(_PAGE_CACHE_MASK), 1);
if (ret)
return ret;
- for (i = 0; i < addrinarray; i++)
+ for (i = 0; i < numpages; i++)
free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
return 0;
@@ -2058,7 +2022,6 @@ int set_memory_global(unsigned long addr, int numpages)
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{
struct cpa_data cpa;
- unsigned long start;
int ret;
/* Nothing to do if memory encryption is not active */
@@ -2069,8 +2032,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
addr &= PAGE_MASK;
- start = addr;
-
memset(&cpa, 0, sizeof(cpa));
cpa.vaddr = &addr;
cpa.numpages = numpages;
@@ -2085,18 +2046,18 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/*
* Before changing the encryption attribute, we need to flush caches.
*/
- cpa_flush_range(start, numpages, 1);
+ cpa_flush(&cpa, 1);
ret = __change_page_attr_set_clr(&cpa, 1);
/*
- * After changing the encryption attribute, we need to flush TLBs
- * again in case any speculative TLB caching occurred (but no need
- * to flush caches again). We could just use cpa_flush_all(), but
- * in case TLB flushing gets optimized in the cpa_flush_range()
- * path use the same logic as above.
+ * After changing the encryption attribute, we need to flush TLBs again
+ * in case any speculative TLB caching occurred (but no need to flush
+ * caches again). We could just use cpa_flush_all(), but in case TLB
+ * flushing gets optimized in the cpa_flush() path use the same logic
+ * as above.
*/
- cpa_flush_range(start, numpages, 0);
+ cpa_flush(&cpa, 0);
return ret;
}
@@ -2121,7 +2082,7 @@ int set_pages_uc(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_uc);
-static int _set_pages_array(struct page **pages, int addrinarray,
+static int _set_pages_array(struct page **pages, int numpages,
enum page_cache_mode new_type)
{
unsigned long start;
@@ -2131,7 +2092,7 @@ static int _set_pages_array(struct page **pages, int addrinarray,
int free_idx;
int ret;
- for (i = 0; i < addrinarray; i++) {
+ for (i = 0; i < numpages; i++) {
if (PageHighMem(pages[i]))
continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
@@ -2144,10 +2105,10 @@ static int _set_pages_array(struct page **pages, int addrinarray,
set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
_PAGE_CACHE_MODE_UC_MINUS : new_type;
- ret = cpa_set_pages_array(pages, addrinarray,
+ ret = cpa_set_pages_array(pages, numpages,
cachemode2pgprot(set_type));
if (!ret && new_type == _PAGE_CACHE_MODE_WC)
- ret = change_page_attr_set_clr(NULL, addrinarray,
+ ret = change_page_attr_set_clr(NULL, numpages,
cachemode2pgprot(
_PAGE_CACHE_MODE_WC),
__pgprot(_PAGE_CACHE_MASK),
@@ -2167,21 +2128,21 @@ err_out:
return -EINVAL;
}
-int set_pages_array_uc(struct page **pages, int addrinarray)
+int set_pages_array_uc(struct page **pages, int numpages)
{
- return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_UC_MINUS);
+ return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_UC_MINUS);
}
EXPORT_SYMBOL(set_pages_array_uc);
-int set_pages_array_wc(struct page **pages, int addrinarray)
+int set_pages_array_wc(struct page **pages, int numpages)
{
- return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WC);
+ return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WC);
}
EXPORT_SYMBOL(set_pages_array_wc);
-int set_pages_array_wt(struct page **pages, int addrinarray)
+int set_pages_array_wt(struct page **pages, int numpages)
{
- return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT);
+ return _set_pages_array(pages, numpages, _PAGE_CACHE_MODE_WT);
}
EXPORT_SYMBOL_GPL(set_pages_array_wt);
@@ -2193,7 +2154,7 @@ int set_pages_wb(struct page *page, int numpages)
}
EXPORT_SYMBOL(set_pages_wb);
-int set_pages_array_wb(struct page **pages, int addrinarray)
+int set_pages_array_wb(struct page **pages, int numpages)
{
int retval;
unsigned long start;
@@ -2201,12 +2162,12 @@ int set_pages_array_wb(struct page **pages, int addrinarray)
int i;
/* WB cache mode is hard wired to all cache attribute bits being 0 */
- retval = cpa_clear_pages_array(pages, addrinarray,
+ retval = cpa_clear_pages_array(pages, numpages,
__pgprot(_PAGE_CACHE_MASK));
if (retval)
return retval;
- for (i = 0; i < addrinarray; i++) {
+ for (i = 0; i < numpages; i++) {
if (PageHighMem(pages[i]))
continue;
start = page_to_pfn(pages[i]) << PAGE_SHIFT;
@@ -2338,8 +2299,8 @@ bool kernel_page_present(struct page *page)
#endif /* CONFIG_DEBUG_PAGEALLOC */
-int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
- unsigned numpages, unsigned long page_flags)
+int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags)
{
int retval = -EINVAL;
@@ -2353,6 +2314,8 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.flags = 0,
};
+ WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
+
if (!(__supported_pte_mask & _PAGE_NX))
goto out;
@@ -2375,6 +2338,40 @@ out:
}
/*
+ * __flush_tlb_all() flushes mappings only on current CPU and hence this
+ * function shouldn't be used in an SMP environment. Presently, it's used only
+ * during boot (way before smp_init()) by EFI subsystem and hence is ok.
+ */
+int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned long numpages)
+{
+ int retval;
+
+ /*
+ * The typical sequence for unmapping is to find a pte through
+ * lookup_address_in_pgd() (ideally, it should never return NULL because
+ * the address is already mapped) and change it's protections. As pfn is
+ * the *target* of a mapping, it's not useful while unmapping.
+ */
+ struct cpa_data cpa = {
+ .vaddr = &address,
+ .pfn = 0,
+ .pgd = pgd,
+ .numpages = numpages,
+ .mask_set = __pgprot(0),
+ .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
+ .flags = 0,
+ };
+
+ WARN_ONCE(num_online_cpus() > 1, "Don't call after initializing SMP");
+
+ retval = __change_page_attr_set_clr(&cpa, 0);
+ __flush_tlb_all();
+
+ return retval;
+}
+
+/*
* The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here.
*/
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 08013524fba1..4fe956a63b25 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -519,8 +519,13 @@ static u64 sanitize_phys(u64 address)
* for a "decoy" virtual address (bit 63 clear) passed to
* set_memory_X(). __pa() on a "decoy" address results in a
* physical address with bit 63 set.
+ *
+ * Decoy addresses are not present for 32-bit builds, see
+ * set_mce_nospec().
*/
- return address & __PHYSICAL_MASK;
+ if (IS_ENABLED(CONFIG_X86_64))
+ return address & __PHYSICAL_MASK;
+ return address;
}
/*
@@ -546,7 +551,11 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
start = sanitize_phys(start);
end = sanitize_phys(end);
- BUG_ON(start >= end); /* end is exclusive */
+ if (start >= end) {
+ WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
+ start, end - 1, cattr_name(req_type));
+ return -EINVAL;
+ }
if (!pat_enabled()) {
/* This is identical to page table setting without PAT */
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 6e98e0a7c923..047a77f6a10c 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -131,6 +131,7 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot, int pkey
* in the process's lifetime will not accidentally get access
* to data which is pkey-protected later on.
*/
+static
u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index bddd6b3cee1d..999d6d8f0bef 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -7,7 +7,6 @@
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
-#include <linux/ptrace.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
@@ -16,6 +15,8 @@
#include <asm/apic.h>
#include <asm/uv/uv.h>
+#include "mm_internal.h"
+
/*
* TLB flushing, formerly SMP-only
* c/o Linus Torvalds.
@@ -31,6 +32,12 @@
*/
/*
+ * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_ibpb.
+ */
+#define LAST_USER_MM_IBPB 0x1UL
+
+/*
* We get here when we do something requiring a TLB invalidation
* but could not go invalidate all of the contexts. We do the
* necessary invalidation by clearing out the 'ctx_id' which
@@ -181,17 +188,87 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
}
}
-static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id)
+static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+{
+ unsigned long next_tif = task_thread_info(next)->flags;
+ unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+
+ return (unsigned long)next->mm | ibpb;
+}
+
+static void cond_ibpb(struct task_struct *next)
{
+ if (!next || !next->mm)
+ return;
+
/*
- * Check if the current (previous) task has access to the memory
- * of the @tsk (next) task. If access is denied, make sure to
- * issue a IBPB to stop user->user Spectre-v2 attacks.
- *
- * Note: __ptrace_may_access() returns 0 or -ERRNO.
+ * Both, the conditional and the always IBPB mode use the mm
+ * pointer to avoid the IBPB when switching between tasks of the
+ * same process. Using the mm pointer instead of mm->context.ctx_id
+ * opens a hypothetical hole vs. mm_struct reuse, which is more or
+ * less impossible to control by an attacker. Aside of that it
+ * would only affect the first schedule so the theoretically
+ * exposed data is not really interesting.
*/
- return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id &&
- ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB));
+ if (static_branch_likely(&switch_mm_cond_ibpb)) {
+ unsigned long prev_mm, next_mm;
+
+ /*
+ * This is a bit more complex than the always mode because
+ * it has to handle two cases:
+ *
+ * 1) Switch from a user space task (potential attacker)
+ * which has TIF_SPEC_IB set to a user space task
+ * (potential victim) which has TIF_SPEC_IB not set.
+ *
+ * 2) Switch from a user space task (potential attacker)
+ * which has TIF_SPEC_IB not set to a user space task
+ * (potential victim) which has TIF_SPEC_IB set.
+ *
+ * This could be done by unconditionally issuing IBPB when
+ * a task which has TIF_SPEC_IB set is either scheduled in
+ * or out. Though that results in two flushes when:
+ *
+ * - the same user space task is scheduled out and later
+ * scheduled in again and only a kernel thread ran in
+ * between.
+ *
+ * - a user space task belonging to the same process is
+ * scheduled in after a kernel thread ran in between
+ *
+ * - a user space task belonging to the same process is
+ * scheduled in immediately.
+ *
+ * Optimize this with reasonably small overhead for the
+ * above cases. Mangle the TIF_SPEC_IB bit into the mm
+ * pointer of the incoming task which is stored in
+ * cpu_tlbstate.last_user_mm_ibpb for comparison.
+ */
+ next_mm = mm_mangle_tif_spec_ib(next);
+ prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
+
+ /*
+ * Issue IBPB only if the mm's are different and one or
+ * both have the IBPB bit set.
+ */
+ if (next_mm != prev_mm &&
+ (next_mm | prev_mm) & LAST_USER_MM_IBPB)
+ indirect_branch_prediction_barrier();
+
+ this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
+ }
+
+ if (static_branch_unlikely(&switch_mm_always_ibpb)) {
+ /*
+ * Only flush when switching to a user space task with a
+ * different context than the user space task which ran
+ * last on this CPU.
+ */
+ if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+ indirect_branch_prediction_barrier();
+ this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
+ }
+ }
}
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
@@ -292,22 +369,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
new_asid = prev_asid;
need_flush = true;
} else {
- u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
-
/*
* Avoid user/user BTB poisoning by flushing the branch
* predictor when switching between processes. This stops
* one process from doing Spectre-v2 attacks on another.
- *
- * As an optimization, flush indirect branches only when
- * switching into a processes that can't be ptrace by the
- * current one (as in such case, attacker has much more
- * convenient way how to tamper with the next process than
- * branch buffer poisoning).
*/
- if (static_cpu_has(X86_FEATURE_USE_IBPB) &&
- ibpb_needed(tsk, last_ctx_id))
- indirect_branch_prediction_barrier();
+ cond_ibpb(tsk);
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
/*
@@ -365,14 +432,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
}
- /*
- * Record last user mm's context id, so we can avoid
- * flushing branch buffer with IBPB if we switch back
- * to the same user.
- */
- if (next != &init_mm)
- this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
-
/* Make sure we write CR3 before loaded_mm. */
barrier();
@@ -441,7 +500,7 @@ void initialize_tlbstate_and_flush(void)
write_cr3(build_cr3(mm->pgd, 0));
/* Reinitialize tlbstate. */
- this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
+ this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
this_cpu_write(cpu_tlbstate.next_asid, 1);
this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
@@ -664,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
*
* This is in units of pages.
*/
-static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned int stride_shift,
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 2580cd2e98b1..5542303c43d9 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1181,6 +1181,8 @@ out_image:
}
if (!image || !prog->is_func || extra_pass) {
+ if (image)
+ bpf_prog_fill_jited_linfo(prog, addrs);
out_addrs:
kfree(addrs);
kfree(jit_data);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 8cd66152cdb0..9df652d3d927 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
{
struct pcibios_fwaddrmap *map;
- WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
+ lockdep_assert_held(&pcibios_fwaddrmap_lock);
list_for_each_entry(map, &pcibios_fwaddrmappings, list)
if (map->dev == dev)
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index ce4b06733c09..b3233b1835ea 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -84,7 +84,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
}
static void ce4100_serial_fixup(int port, struct uart_port *up,
- u32 *capabilites)
+ u32 *capabilities)
{
#ifdef CONFIG_EARLY_PRINTK
/*
@@ -111,7 +111,7 @@ static void ce4100_serial_fixup(int port, struct uart_port *up,
up->serial_in = ce4100_mem_serial_in;
up->serial_out = ce4100_mem_serial_out;
- *capabilites |= (1 << 12);
+ *capabilities |= (1 << 12);
}
static __init void sdv_serial_fixup(void)
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 7476b3b097e1..7138bc7a265c 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -183,7 +183,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
num--;
}
- if (efi_x >= si->lfb_width) {
+ if (efi_x + font->width > si->lfb_width) {
efi_x = 0;
efi_y += font->height;
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 7ae939e353cd..e1cb01a22fa8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -993,6 +993,8 @@ static void __init __efi_enter_virtual_mode(void)
panic("EFI call to SetVirtualAddressMap() failed!");
}
+ efi_free_boot_services();
+
/*
* Now that EFI is in virtual mode, update the function
* pointers in the runtime service table to the new virtual addresses.
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 95e77a667ba5..17456a1d3f04 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -369,6 +369,40 @@ void __init efi_reserve_boot_services(void)
}
}
+/*
+ * Apart from having VA mappings for EFI boot services code/data regions,
+ * (duplicate) 1:1 mappings were also created as a quirk for buggy firmware. So,
+ * unmap both 1:1 and VA mappings.
+ */
+static void __init efi_unmap_pages(efi_memory_desc_t *md)
+{
+ pgd_t *pgd = efi_mm.pgd;
+ u64 pa = md->phys_addr;
+ u64 va = md->virt_addr;
+
+ /*
+ * To Do: Remove this check after adding functionality to unmap EFI boot
+ * services code/data regions from direct mapping area because
+ * "efi=old_map" maps EFI regions in swapper_pg_dir.
+ */
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+ /*
+ * EFI mixed mode has all RAM mapped to access arguments while making
+ * EFI runtime calls, hence don't unmap EFI boot services code/data
+ * regions.
+ */
+ if (!efi_is_native())
+ return;
+
+ if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
+ pr_err("Failed to unmap 1:1 mapping for 0x%llx\n", pa);
+
+ if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
+ pr_err("Failed to unmap VA mapping for 0x%llx\n", va);
+}
+
void __init efi_free_boot_services(void)
{
phys_addr_t new_phys, new_size;
@@ -394,6 +428,13 @@ void __init efi_free_boot_services(void)
}
/*
+ * Before calling set_virtual_address_map(), EFI boot services
+ * code/data regions were mapped as a quirk for buggy firmware.
+ * Unmap them from efi_pgd before freeing them up.
+ */
+ efi_unmap_pages(md);
+
+ /*
* Nasty quirk: if all sub-1MB memory is used for boot
* services, we can get here without having allocated the
* real mode trampoline. It's too late to hand boot services
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
index dbfc5cf2aa93..96f438d4b026 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
@@ -1,5 +1,5 @@
/*
- * platform_bcm43xx.c: bcm43xx platform data initilization file
+ * platform_bcm43xx.c: bcm43xx platform data initialization file
*
* (C) Copyright 2016 Intel Corporation
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
index 27186ad654c9..7a7fc54c449b 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
@@ -1,5 +1,5 @@
/*
- * spidev platform data initilization file
+ * spidev platform data initialization file
*
* (C) Copyright 2014, 2016 Intel Corporation
* Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
index 429a94192671..8344d5a928c9 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
@@ -1,5 +1,5 @@
/*
- * PCAL9555a platform data initilization file
+ * PCAL9555a platform data initialization file
*
* Copyright (C) 2016, Intel Corporation
*
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index 2e569d10f2d0..a9f2e888e135 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -13,7 +13,7 @@
*
*
* The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
- * mailbox interface (MBI) to communicate with mutiple devices. This
+ * mailbox interface (MBI) to communicate with multiple devices. This
* driver implements access to this interface for those platforms that can
* enumerate the device using PCI.
*/
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 7fa8b3b53bc0..d9b8a1c1ab0f 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -109,7 +109,7 @@ static void detect_lid_state(void)
* the edge detector hookup on the gpio inputs on the geode is
* odd, to say the least. See http://dev.laptop.org/ticket/5703
* for details, but in a nutshell: we don't use the edge
- * detectors. instead, we make use of an anomoly: with the both
+ * detectors. instead, we make use of an anomaly: with the both
* edge detectors turned off, we still get an edge event on a
* positive edge transition. to take advantage of this, we use the
* front-end inverter to ensure that that's the edge we're always
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index 24d2175a9480..b4ab779f1d47 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -19,7 +19,6 @@
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/of_pdt.h>
#include <asm/olpc.h>
#include <asm/olpc_ofw.h>
@@ -285,20 +284,3 @@ void __init olpc_dt_build_devicetree(void)
pr_info("PROM DT: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
}
-
-/* A list of DT node/bus matches that we want to expose as platform devices */
-static struct of_device_id __initdata of_ids[] = {
- { .compatible = "olpc,xo1-battery" },
- { .compatible = "olpc,xo1-dcon" },
- { .compatible = "olpc,xo1-rtc" },
- {},
-};
-
-static int __init olpc_create_platform_devices(void)
-{
- if (machine_is_olpc())
- return of_platform_bus_probe(NULL, of_ids, NULL);
- else
- return 0;
-}
-device_initcall(olpc_create_platform_devices);
diff --git a/arch/x86/platform/pvh/Makefile b/arch/x86/platform/pvh/Makefile
new file mode 100644
index 000000000000..5dec5067c9fb
--- /dev/null
+++ b/arch/x86/platform/pvh/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+OBJECT_FILES_NON_STANDARD_head.o := y
+
+obj-$(CONFIG_PVH) += enlighten.o
+obj-$(CONFIG_PVH) += head.o
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
new file mode 100644
index 000000000000..62f5c7045944
--- /dev/null
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
+
+#include <xen/hvc-console.h>
+
+#include <asm/io_apic.h>
+#include <asm/hypervisor.h>
+#include <asm/e820/api.h>
+#include <asm/x86_init.h>
+
+#include <asm/xen/interface.h>
+
+#include <xen/xen.h>
+#include <xen/interface/hvm/start_info.h>
+
+/*
+ * PVH variables.
+ *
+ * pvh_bootparams and pvh_start_info need to live in the data segment since
+ * they are used after startup_{32|64}, which clear .bss, are invoked.
+ */
+struct boot_params pvh_bootparams __attribute__((section(".data")));
+struct hvm_start_info pvh_start_info __attribute__((section(".data")));
+
+unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+
+static u64 pvh_get_root_pointer(void)
+{
+ return pvh_start_info.rsdp_paddr;
+}
+
+/*
+ * Xen guests are able to obtain the memory map from the hypervisor via the
+ * HYPERVISOR_memory_op hypercall.
+ * If we are trying to boot a Xen PVH guest, it is expected that the kernel
+ * will have been configured to provide an override for this routine to do
+ * just that.
+ */
+void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused)
+{
+ xen_raw_printk("Error: Could not find memory map\n");
+ BUG();
+}
+
+static void __init init_pvh_bootparams(bool xen_guest)
+{
+ memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
+
+ if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) {
+ struct hvm_memmap_table_entry *ep;
+ int i;
+
+ ep = __va(pvh_start_info.memmap_paddr);
+ pvh_bootparams.e820_entries = pvh_start_info.memmap_entries;
+
+ for (i = 0; i < pvh_bootparams.e820_entries ; i++, ep++) {
+ pvh_bootparams.e820_table[i].addr = ep->addr;
+ pvh_bootparams.e820_table[i].size = ep->size;
+ pvh_bootparams.e820_table[i].type = ep->type;
+ }
+ } else if (xen_guest) {
+ mem_map_via_hcall(&pvh_bootparams);
+ } else {
+ /* Non-xen guests are not supported by version 0 */
+ BUG();
+ }
+
+ if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
+ ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
+ ISA_END_ADDRESS - ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
+ E820_TYPE_RESERVED;
+ pvh_bootparams.e820_entries++;
+ } else
+ xen_raw_printk("Warning: Can fit ISA range into e820\n");
+
+ pvh_bootparams.hdr.cmd_line_ptr =
+ pvh_start_info.cmdline_paddr;
+
+ /* The first module is always ramdisk. */
+ if (pvh_start_info.nr_modules) {
+ struct hvm_modlist_entry *modaddr =
+ __va(pvh_start_info.modlist_paddr);
+ pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
+ pvh_bootparams.hdr.ramdisk_size = modaddr->size;
+ }
+
+ /*
+ * See Documentation/x86/boot.txt.
+ *
+ * Version 2.12 supports Xen entry point but we will use default x86/PC
+ * environment (i.e. hardware_subarch 0).
+ */
+ pvh_bootparams.hdr.version = (2 << 8) | 12;
+ pvh_bootparams.hdr.type_of_loader = ((xen_guest ? 0x9 : 0xb) << 4) | 0;
+
+ x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
+}
+
+/*
+ * If we are trying to boot a Xen PVH guest, it is expected that the kernel
+ * will have been configured to provide the required override for this routine.
+ */
+void __init __weak xen_pvh_init(void)
+{
+ xen_raw_printk("Error: Missing xen PVH initialization\n");
+ BUG();
+}
+
+static void hypervisor_specific_init(bool xen_guest)
+{
+ if (xen_guest)
+ xen_pvh_init();
+}
+
+/*
+ * This routine (and those that it might call) should not use
+ * anything that lives in .bss since that segment will be cleared later.
+ */
+void __init xen_prepare_pvh(void)
+{
+
+ u32 msr = xen_cpuid_base();
+ bool xen_guest = !!msr;
+
+ if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
+ xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
+ pvh_start_info.magic);
+ BUG();
+ }
+
+ hypervisor_specific_init(xen_guest);
+
+ init_pvh_bootparams(xen_guest);
+}
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/platform/pvh/head.S
index 1f8825bbaffb..1f8825bbaffb 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/platform/pvh/head.S
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 5f64f30873e2..b21a932c220c 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
}
}
-/* Ping non-responding CPU's attemping to force them into the NMI handler */
+/* Ping non-responding CPU's attempting to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void)
{
int cpu;
diff --git a/arch/x86/um/vdso/Makefile b/arch/x86/um/vdso/Makefile
index 822ccdba93ad..bf94060fc06f 100644
--- a/arch/x86/um/vdso/Makefile
+++ b/arch/x86/um/vdso/Makefile
@@ -26,7 +26,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)
CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
+ -Wl,-z,max-page-size=4096
$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 1ef391aa184d..e07abefd3d26 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -74,6 +74,7 @@ config XEN_DEBUG_FS
Enabling this option may incur a significant performance overhead.
config XEN_PVH
- bool "Support for running as a PVH guest"
+ bool "Support for running as a Xen PVH guest"
depends on XEN && XEN_PVHVM && ACPI
+ select PVH
def_bool n
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index dd2550d33b38..084de77a109e 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
-OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
@@ -38,7 +37,6 @@ obj-$(CONFIG_XEN_PV) += xen-asm.o
obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o
obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
-obj-$(CONFIG_XEN_PVH) += xen-pvh.o
obj-$(CONFIG_EVENT_TRACING) += trace.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e996e8e744cb..750f46ad018a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -10,7 +10,6 @@
#include <xen/xen.h>
#include <xen/features.h>
#include <xen/page.h>
-#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
@@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num)
}
EXPORT_SYMBOL(xen_arch_unregister_cpu);
#endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-void __init arch_xen_balloon_init(struct resource *hostmem_resource)
-{
- struct xen_memory_map memmap;
- int rc;
- unsigned int i, last_guest_ram;
- phys_addr_t max_addr = PFN_PHYS(max_pfn);
- struct e820_table *xen_e820_table;
- const struct e820_entry *entry;
- struct resource *res;
-
- if (!xen_initial_domain())
- return;
-
- xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
- if (!xen_e820_table)
- return;
-
- memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
- set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
- rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
- if (rc) {
- pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
- goto out;
- }
-
- last_guest_ram = 0;
- for (i = 0; i < memmap.nr_entries; i++) {
- if (xen_e820_table->entries[i].addr >= max_addr)
- break;
- if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
- last_guest_ram = i;
- }
-
- entry = &xen_e820_table->entries[last_guest_ram];
- if (max_addr >= entry->addr + entry->size)
- goto out; /* No unallocated host RAM. */
-
- hostmem_resource->start = max_addr;
- hostmem_resource->end = entry->addr + entry->size;
-
- /*
- * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
- * as unavailable. The rest of that region can be used for hotplug-based
- * ballooning.
- */
- for (; i < memmap.nr_entries; i++) {
- entry = &xen_e820_table->entries[i];
-
- if (entry->type == E820_TYPE_RAM)
- continue;
-
- if (entry->addr >= hostmem_resource->end)
- break;
-
- res = kzalloc(sizeof(*res), GFP_KERNEL);
- if (!res)
- goto out;
-
- res->name = "Unavailable host RAM";
- res->start = entry->addr;
- res->end = (entry->addr + entry->size < hostmem_resource->end) ?
- entry->addr + entry->size : hostmem_resource->end;
- rc = insert_resource(hostmem_resource, res);
- if (rc) {
- pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
- __func__, res->start, res->end, rc);
- kfree(res);
- goto out;
- }
- }
-
- out:
- kfree(xen_e820_table);
-}
-#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index 02e3ab7ff242..35b7599d2d0b 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -6,103 +6,45 @@
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
-#include <asm/x86_init.h>
+#include <xen/xen.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
-#include <xen/xen.h>
#include <xen/interface/memory.h>
-#include <xen/interface/hvm/start_info.h>
/*
* PVH variables.
*
- * xen_pvh pvh_bootparams and pvh_start_info need to live in data segment
- * since they are used after startup_{32|64}, which clear .bss, are invoked.
+ * The variable xen_pvh needs to live in the data segment since it is used
+ * after startup_{32|64} is invoked, which will clear the .bss segment.
*/
bool xen_pvh __attribute__((section(".data"))) = 0;
-struct boot_params pvh_bootparams __attribute__((section(".data")));
-struct hvm_start_info pvh_start_info __attribute__((section(".data")));
-
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
-static u64 pvh_get_root_pointer(void)
+void __init xen_pvh_init(void)
{
- return pvh_start_info.rsdp_paddr;
+ u32 msr;
+ u64 pfn;
+
+ xen_pvh = 1;
+ xen_start_flags = pvh_start_info.flags;
+
+ msr = cpuid_ebx(xen_cpuid_base() + 2);
+ pfn = __pa(hypercall_page);
+ wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
}
-static void __init init_pvh_bootparams(void)
+void __init mem_map_via_hcall(struct boot_params *boot_params_p)
{
struct xen_memory_map memmap;
int rc;
- memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
-
- memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
- set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
+ memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
+ set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc) {
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
BUG();
}
- pvh_bootparams.e820_entries = memmap.nr_entries;
-
- if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
- pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
- ISA_START_ADDRESS;
- pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
- ISA_END_ADDRESS - ISA_START_ADDRESS;
- pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
- E820_TYPE_RESERVED;
- pvh_bootparams.e820_entries++;
- } else
- xen_raw_printk("Warning: Can fit ISA range into e820\n");
-
- pvh_bootparams.hdr.cmd_line_ptr =
- pvh_start_info.cmdline_paddr;
-
- /* The first module is always ramdisk. */
- if (pvh_start_info.nr_modules) {
- struct hvm_modlist_entry *modaddr =
- __va(pvh_start_info.modlist_paddr);
- pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
- pvh_bootparams.hdr.ramdisk_size = modaddr->size;
- }
-
- /*
- * See Documentation/x86/boot.txt.
- *
- * Version 2.12 supports Xen entry point but we will use default x86/PC
- * environment (i.e. hardware_subarch 0).
- */
- pvh_bootparams.hdr.version = (2 << 8) | 12;
- pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
-
- x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
-}
-
-/*
- * This routine (and those that it might call) should not use
- * anything that lives in .bss since that segment will be cleared later.
- */
-void __init xen_prepare_pvh(void)
-{
- u32 msr;
- u64 pfn;
-
- if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
- xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
- pvh_start_info.magic);
- BUG();
- }
-
- xen_pvh = 1;
- xen_start_flags = pvh_start_info.flags;
-
- msr = cpuid_ebx(xen_cpuid_base() + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
- init_pvh_bootparams();
+ boot_params_p->e820_entries = memmap.nr_entries;
}
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index a5d7ed125337..0f4fe206dcc2 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -648,19 +648,20 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
unsigned long limit)
{
int i, nr, flush = 0;
- unsigned hole_low, hole_high;
+ unsigned hole_low = 0, hole_high = 0;
/* The limit is the last byte to be touched */
limit--;
BUG_ON(limit >= FIXADDR_TOP);
+#ifdef CONFIG_X86_64
/*
* 64-bit has a great big hole in the middle of the address
- * space, which contains the Xen mappings. On 32-bit these
- * will end up making a zero-sized hole and so is a no-op.
+ * space, which contains the Xen mappings.
*/
- hole_low = pgd_index(USER_LIMIT);
- hole_high = pgd_index(PAGE_OFFSET);
+ hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
+ hole_high = pgd_index(GUARD_HOLE_END_ADDR);
+#endif
nr = pgd_index(limit) + 1;
for (i = 0; i < nr; i++) {
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 2bce7958ce8b..0766a08bdf45 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -69,6 +69,11 @@ void xen_mc_flush(void)
trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
+#if MC_DEBUG
+ memcpy(b->debug, b->entries,
+ b->mcidx * sizeof(struct multicall_entry));
+#endif
+
switch (b->mcidx) {
case 0:
/* no-op */
@@ -87,32 +92,34 @@ void xen_mc_flush(void)
break;
default:
-#if MC_DEBUG
- memcpy(b->debug, b->entries,
- b->mcidx * sizeof(struct multicall_entry));
-#endif
-
if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
BUG();
for (i = 0; i < b->mcidx; i++)
if (b->entries[i].result < 0)
ret++;
+ }
+ if (WARN_ON(ret)) {
+ pr_err("%d of %d multicall(s) failed: cpu %d\n",
+ ret, b->mcidx, smp_processor_id());
+ for (i = 0; i < b->mcidx; i++) {
+ if (b->entries[i].result < 0) {
#if MC_DEBUG
- if (ret) {
- printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
- ret, smp_processor_id());
- dump_stack();
- for (i = 0; i < b->mcidx; i++) {
- printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
- i+1, b->mcidx,
+ pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n",
+ i + 1,
b->debug[i].op,
b->debug[i].args[0],
b->entries[i].result,
b->caller[i]);
+#else
+ pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n",
+ i + 1,
+ b->entries[i].op,
+ b->entries[i].args[0],
+ b->entries[i].result);
+#endif
}
}
-#endif
}
b->mcidx = 0;
@@ -126,8 +133,6 @@ void xen_mc_flush(void)
b->cbidx = 0;
local_irq_restore(flags);
-
- WARN_ON(ret);
}
struct multicall_space __xen_mc_entry(size_t args)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1163e33121fb..d5f303c0e656 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -493,7 +493,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
* The remap information (which mfn remap to which pfn) is contained in the
* to be remapped memory itself in a linked list anchored at xen_remap_mfn.
* This scheme allows to remap the different chunks in arbitrary order while
- * the resulting mapping will be independant from the order.
+ * the resulting mapping will be independent from the order.
*/
void __init xen_remap_memory(void)
{
@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
addr = xen_e820_table.entries[0].addr;
size = xen_e820_table.entries[0].size;
while (i < xen_e820_table.nr_entries) {
+ bool discard = false;
chunk_size = size;
type = xen_e820_table.entries[i].type;
@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
xen_add_extra_mem(pfn_s, n_pfns);
xen_max_p2m_pfn = pfn_s + n_pfns;
} else
- type = E820_TYPE_UNUSABLE;
+ discard = true;
}
- xen_align_and_add_e820_region(addr, chunk_size, type);
+ if (!discard)
+ xen_align_and_add_e820_region(addr, chunk_size, type);
addr += chunk_size;
size -= chunk_size;
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 1c8a8816a402..3776122c87cc 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -3,22 +3,17 @@
* Split spinlock implementation out into its own file, so it can be
* compiled in a FTRACE-compatible way.
*/
-#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
#include <linux/spinlock.h>
-#include <linux/debugfs.h>
-#include <linux/log2.h>
-#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <asm/paravirt.h>
#include <asm/qspinlock.h>
-#include <xen/interface/xen.h>
#include <xen/events.h>
#include "xen-ops.h"
-#include "debugfs.h"
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index bb1c2da0381d..1e9ef0ba30a5 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -12,6 +12,7 @@
#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
+#include <asm/asm.h>
#include <xen/interface/xen.h>
@@ -24,6 +25,7 @@ ENTRY(xen_\name)
pop %r11
jmp \name
END(xen_\name)
+_ASM_NOKPROBE(xen_\name)
.endm
xen_pv_trap divide_error
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index 67904f55f188..120dd746a147 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -94,14 +94,14 @@ int main(void)
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XTENSA_HAVE_COPROCESSORS
- DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+ DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+ DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+ DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+ DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+ DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+ DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 483dcfb6e681..4bb68133a72a 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti)
void coprocessor_flush_all(struct thread_info *ti)
{
- unsigned long cpenable;
+ unsigned long cpenable, old_cpenable;
int i;
preempt_disable();
+ RSR_CPENABLE(old_cpenable);
cpenable = ti->cpenable;
+ WSR_CPENABLE(cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
cpenable >>= 1;
}
+ WSR_CPENABLE(old_cpenable);
preempt_enable();
}
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index c0845cb1cbb9..d9541be0605a 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs)
}
+#if XTENSA_HAVE_COPROCESSORS
+#define CP_OFFSETS(cp) \
+ { \
+ .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
+ .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
+ .sz = sizeof(xtregs_ ## cp ## _t), \
+ }
+
+static const struct {
+ size_t elf_xtregs_offset;
+ size_t ti_offset;
+ size_t sz;
+} cp_offsets[] = {
+ CP_OFFSETS(cp0),
+ CP_OFFSETS(cp1),
+ CP_OFFSETS(cp2),
+ CP_OFFSETS(cp3),
+ CP_OFFSETS(cp4),
+ CP_OFFSETS(cp5),
+ CP_OFFSETS(cp6),
+ CP_OFFSETS(cp7),
+};
+#endif
+
static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(child);
struct thread_info *ti = task_thread_info(child);
elf_xtregs_t __user *xtregs = uregs;
int ret = 0;
+ int i __maybe_unused;
if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
return -EIO;
@@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
#if XTENSA_HAVE_COPROCESSORS
/* Flush all coprocessor registers to memory. */
coprocessor_flush_all(ti);
- ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
- sizeof(xtregs_coprocessor_t));
+
+ for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+ ret |= __copy_to_user((char __user *)xtregs +
+ cp_offsets[i].elf_xtregs_offset,
+ (const char *)ti +
+ cp_offsets[i].ti_offset,
+ cp_offsets[i].sz);
#endif
ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
sizeof(xtregs->opt));
@@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
struct pt_regs *regs = task_pt_regs(child);
elf_xtregs_t *xtregs = uregs;
int ret = 0;
+ int i __maybe_unused;
if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
return -EFAULT;
@@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
coprocessor_flush_all(ti);
coprocessor_release_all(ti);
- ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
- sizeof(xtregs_coprocessor_t));
+ for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+ ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
+ (const char __user *)xtregs +
+ cp_offsets[i].elf_xtregs_offset,
+ cp_offsets[i].sz);
#endif
ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
sizeof(xtregs->opt));
diff --git a/block/Kconfig b/block/Kconfig
index f7045aa47edb..8044452a4fd3 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -155,12 +155,6 @@ config BLK_CGROUP_IOLATENCY
Note, this is an experimental interface and could be changed someday.
-config BLK_WBT_SQ
- bool "Single queue writeback throttling"
- depends on BLK_WBT
- ---help---
- Enable writeback throttling by default on legacy single queue devices
-
config BLK_WBT_MQ
bool "Multiqueue writeback throttling"
default y
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index f95a48b0d7b2..4626b88b2d5a 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -3,67 +3,6 @@ if BLOCK
menu "IO Schedulers"
-config IOSCHED_NOOP
- bool
- default y
- ---help---
- The no-op I/O scheduler is a minimal scheduler that does basic merging
- and sorting. Its main uses include non-disk based block devices like
- memory devices, and specialised software or hardware environments
- that do their own scheduling and require only minimal assistance from
- the kernel.
-
-config IOSCHED_DEADLINE
- tristate "Deadline I/O scheduler"
- default y
- ---help---
- The deadline I/O scheduler is simple and compact. It will provide
- CSCAN service with FIFO expiration of requests, switching to
- a new point in the service tree and doing a batch of IO from there
- in case of expiry.
-
-config IOSCHED_CFQ
- tristate "CFQ I/O scheduler"
- default y
- ---help---
- The CFQ I/O scheduler tries to distribute bandwidth equally
- among all processes in the system. It should provide a fair
- and low latency working environment, suitable for both desktop
- and server systems.
-
- This is the default I/O scheduler.
-
-config CFQ_GROUP_IOSCHED
- bool "CFQ Group Scheduling support"
- depends on IOSCHED_CFQ && BLK_CGROUP
- ---help---
- Enable group IO scheduling in CFQ.
-
-choice
-
- prompt "Default I/O scheduler"
- default DEFAULT_CFQ
- help
- Select the I/O scheduler which will be used by default for all
- block devices.
-
- config DEFAULT_DEADLINE
- bool "Deadline" if IOSCHED_DEADLINE=y
-
- config DEFAULT_CFQ
- bool "CFQ" if IOSCHED_CFQ=y
-
- config DEFAULT_NOOP
- bool "No-op"
-
-endchoice
-
-config DEFAULT_IOSCHED
- string
- default "deadline" if DEFAULT_DEADLINE
- default "cfq" if DEFAULT_CFQ
- default "noop" if DEFAULT_NOOP
-
config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
diff --git a/block/Makefile b/block/Makefile
index 27eac600474f..eee1b4ceecf9 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -3,7 +3,7 @@
# Makefile for the kernel block layer
#
-obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
+obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
@@ -18,9 +18,6 @@ obj-$(CONFIG_BLK_DEV_BSGLIB) += bsg-lib.o
obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o
obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_BLK_CGROUP_IOLATENCY) += blk-iolatency.o
-obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
-obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
-obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
obj-$(CONFIG_MQ_IOSCHED_KYBER) += kyber-iosched.o
bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 9fe5952d117d..c6113af31960 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -334,7 +334,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
parent = bfqg_parent(bfqg);
- lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
+ lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
if (unlikely(!parent))
return;
@@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
+ serial_nr = __bio_blkcg(bio)->css.serial_nr;
/*
* Check whether blkcg has changed. The condition may trigger
@@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
- bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 3a27d31fcda6..cd307767a134 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -399,9 +399,9 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
unsigned long flags;
struct bfq_io_cq *icq;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&q->queue_lock, flags);
icq = icq_to_bic(ioc_lookup_icq(ioc, q));
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
return icq;
}
@@ -638,7 +638,7 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_BFQ_GROUP_IOSCHED
) ||
- (bfqd->num_active_groups > 0
+ (bfqd->num_groups_with_pending_reqs > 0
#endif
);
}
@@ -802,7 +802,21 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
*/
break;
}
- bfqd->num_active_groups--;
+
+ /*
+ * The decrement of num_groups_with_pending_reqs is
+ * not performed immediately upon the deactivation of
+ * entity, but it is delayed to when it also happens
+ * that the first leaf descendant bfqq of entity gets
+ * all its pending requests completed. The following
+ * instructions perform this delayed decrement, if
+ * needed. See the comments on
+ * num_groups_with_pending_reqs for details.
+ */
+ if (entity->in_groups_with_pending_reqs) {
+ entity->in_groups_with_pending_reqs = false;
+ bfqd->num_groups_with_pending_reqs--;
+ }
}
}
@@ -3529,27 +3543,44 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* fact, if there are active groups, then, for condition (i)
* to become false, it is enough that an active group contains
* more active processes or sub-groups than some other active
- * group. We address this issue with the following bi-modal
- * behavior, implemented in the function
+ * group. More precisely, for condition (i) to hold because of
+ * such a group, it is not even necessary that the group is
+ * (still) active: it is sufficient that, even if the group
+ * has become inactive, some of its descendant processes still
+ * have some request already dispatched but still waiting for
+ * completion. In fact, requests have still to be guaranteed
+ * their share of the throughput even after being
+ * dispatched. In this respect, it is easy to show that, if a
+ * group frequently becomes inactive while still having
+ * in-flight requests, and if, when this happens, the group is
+ * not considered in the calculation of whether the scenario
+ * is asymmetric, then the group may fail to be guaranteed its
+ * fair share of the throughput (basically because idling may
+ * not be performed for the descendant processes of the group,
+ * but it had to be). We address this issue with the
+ * following bi-modal behavior, implemented in the function
* bfq_symmetric_scenario().
*
- * If there are active groups, then the scenario is tagged as
+ * If there are groups with requests waiting for completion
+ * (as commented above, some of these groups may even be
+ * already inactive), then the scenario is tagged as
* asymmetric, conservatively, without checking any of the
* conditions (i) and (ii). So the device is idled for bfqq.
* This behavior matches also the fact that groups are created
- * exactly if controlling I/O (to preserve bandwidth and
- * latency guarantees) is a primary concern.
+ * exactly if controlling I/O is a primary concern (to
+ * preserve bandwidth and latency guarantees).
*
- * On the opposite end, if there are no active groups, then
- * only condition (i) is actually controlled, i.e., provided
- * that condition (i) holds, idling is not performed,
- * regardless of whether condition (ii) holds. In other words,
- * only if condition (i) does not hold, then idling is
- * allowed, and the device tends to be prevented from queueing
- * many requests, possibly of several processes. Since there
- * are no active groups, then, to control condition (i) it is
- * enough to check whether all active queues have the same
- * weight.
+ * On the opposite end, if there are no groups with requests
+ * waiting for completion, then only condition (i) is actually
+ * controlled, i.e., provided that condition (i) holds, idling
+ * is not performed, regardless of whether condition (ii)
+ * holds. In other words, only if condition (i) does not hold,
+ * then idling is allowed, and the device tends to be
+ * prevented from queueing many requests, possibly of several
+ * processes. Since there are no groups with requests waiting
+ * for completion, then, to control condition (i) it is enough
+ * to check just whether all the queues with requests waiting
+ * for completion also have the same weight.
*
* Not checking condition (ii) evidently exposes bfqq to the
* risk of getting less throughput than its fair share.
@@ -3607,10 +3638,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* bfqq is weight-raised is checked explicitly here. More
* precisely, the compound condition below takes into account
* also the fact that, even if bfqq is being weight-raised,
- * the scenario is still symmetric if all active queues happen
- * to be weight-raised. Actually, we should be even more
- * precise here, and differentiate between interactive weight
- * raising and soft real-time weight raising.
+ * the scenario is still symmetric if all queues with requests
+ * waiting for completion happen to be
+ * weight-raised. Actually, we should be even more precise
+ * here, and differentiate between interactive weight raising
+ * and soft real-time weight raising.
*
* As a side note, it is worth considering that the above
* device-idling countermeasures may however fail in the
@@ -4034,7 +4066,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (idle_timer_disabled)
/*
* Since the idle timer has been disabled,
@@ -4053,7 +4085,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_dispatch_stats(struct request_queue *q,
@@ -4384,7 +4416,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
rcu_read_lock();
- bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
+ bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
@@ -4637,11 +4669,11 @@ static void bfq_update_insert_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
if (idle_timer_disabled)
bfqg_stats_update_idle_time(bfqq_group(bfqq));
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_insert_stats(struct request_queue *q,
@@ -5382,9 +5414,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
}
eq->elevator_data = bfqd;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
/*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
@@ -5417,7 +5449,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT;
- bfqd->num_active_groups = 0;
+ bfqd->num_groups_with_pending_reqs = 0;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
@@ -5724,7 +5756,7 @@ static struct elv_fs_entry bfq_attrs[] = {
};
static struct elevator_type iosched_bfq_mq = {
- .ops.mq = {
+ .ops = {
.limit_depth = bfq_limit_depth,
.prepare_request = bfq_prepare_request,
.requeue_request = bfq_finish_requeue_request,
@@ -5745,7 +5777,6 @@ static struct elevator_type iosched_bfq_mq = {
.exit_sched = bfq_exit_queue,
},
- .uses_mq = true,
.icq_size = sizeof(struct bfq_io_cq),
.icq_align = __alignof__(struct bfq_io_cq),
.elevator_attrs = bfq_attrs,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 77651d817ecd..0b02bf302de0 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -196,6 +196,9 @@ struct bfq_entity {
/* flag, set to request a weight, ioprio or ioprio_class change */
int prio_changed;
+
+ /* flag, set if the entity is counted in groups_with_pending_reqs */
+ bool in_groups_with_pending_reqs;
};
struct bfq_group;
@@ -448,10 +451,54 @@ struct bfq_data {
* bfq_weights_tree_[add|remove] for further details).
*/
struct rb_root queue_weights_tree;
+
/*
- * number of groups with requests still waiting for completion
+ * Number of groups with at least one descendant process that
+ * has at least one request waiting for completion. Note that
+ * this accounts for also requests already dispatched, but not
+ * yet completed. Therefore this number of groups may differ
+ * (be larger) than the number of active groups, as a group is
+ * considered active only if its corresponding entity has
+ * descendant queues with at least one request queued. This
+ * number is used to decide whether a scenario is symmetric.
+ * For a detailed explanation see comments on the computation
+ * of the variable asymmetric_scenario in the function
+ * bfq_better_to_idle().
+ *
+ * However, it is hard to compute this number exactly, for
+ * groups with multiple descendant processes. Consider a group
+ * that is inactive, i.e., that has no descendant process with
+ * pending I/O inside BFQ queues. Then suppose that
+ * num_groups_with_pending_reqs is still accounting for this
+ * group, because the group has descendant processes with some
+ * I/O request still in flight. num_groups_with_pending_reqs
+ * should be decremented when the in-flight request of the
+ * last descendant process is finally completed (assuming that
+ * nothing else has changed for the group in the meantime, in
+ * terms of composition of the group and active/inactive state of child
+ * groups and processes). To accomplish this, an additional
+ * pending-request counter must be added to entities, and must
+ * be updated correctly. To avoid this additional field and operations,
+ * we resort to the following tradeoff between simplicity and
+ * accuracy: for an inactive group that is still counted in
+ * num_groups_with_pending_reqs, we decrement
+ * num_groups_with_pending_reqs when the first descendant
+ * process of the group remains with no request waiting for
+ * completion.
+ *
+ * Even this simpler decrement strategy requires a little
+ * carefulness: to avoid multiple decrements, we flag a group,
+ * more precisely an entity representing a group, as still
+ * counted in num_groups_with_pending_reqs when it becomes
+ * inactive. Then, when the first descendant queue of the
+ * entity remains with no request waiting for completion,
+ * num_groups_with_pending_reqs is decremented, and this flag
+ * is reset. After this flag is reset for the entity,
+ * num_groups_with_pending_reqs won't be decremented any
+ * longer in case a new descendant queue of the entity remains
+ * with no request waiting for completion.
*/
- unsigned int num_active_groups;
+ unsigned int num_groups_with_pending_reqs;
/*
* Number of bfq_queues containing requests (including the
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 4b0d5fb69160..63e0f12be7c9 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -1012,7 +1012,10 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
container_of(entity, struct bfq_group, entity);
struct bfq_data *bfqd = bfqg->bfqd;
- bfqd->num_active_groups++;
+ if (!entity->in_groups_with_pending_reqs) {
+ entity->in_groups_with_pending_reqs = true;
+ bfqd->num_groups_with_pending_reqs++;
+ }
}
#endif
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 290af497997b..1b633a3526d4 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -390,7 +390,6 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
bip->bip_iter.bi_sector += bytes_done >> 9;
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
-EXPORT_SYMBOL(bio_integrity_advance);
/**
* bio_integrity_trim - Trim integrity vector
@@ -460,7 +459,6 @@ void bioset_integrity_free(struct bio_set *bs)
mempool_exit(&bs->bio_integrity_pool);
mempool_exit(&bs->bvec_integrity_pool);
}
-EXPORT_SYMBOL(bioset_integrity_free);
void __init bio_integrity_init(void)
{
diff --git a/block/bio.c b/block/bio.c
index 4f4d9884443b..8281bfcbc265 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -244,7 +244,7 @@ fallback:
void bio_uninit(struct bio *bio)
{
- bio_disassociate_task(bio);
+ bio_disassociate_blkg(bio);
}
EXPORT_SYMBOL(bio_uninit);
@@ -571,14 +571,13 @@ void bio_put(struct bio *bio)
}
EXPORT_SYMBOL(bio_put);
-inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
+int bio_phys_segments(struct request_queue *q, struct bio *bio)
{
if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
blk_recount_segments(q, bio);
return bio->bi_phys_segments;
}
-EXPORT_SYMBOL(bio_phys_segments);
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
@@ -610,7 +609,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+ blkcg_bio_issue_init(bio);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -901,7 +901,6 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
return 0;
}
-EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
static void submit_bio_wait_endio(struct bio *bio)
{
@@ -1261,7 +1260,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (ret)
goto cleanup;
} else {
- zero_fill_bio(bio);
+ if (bmd->is_our_pages)
+ zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size);
}
@@ -1591,7 +1591,6 @@ void bio_set_pages_dirty(struct bio *bio)
set_page_dirty_lock(bvec->bv_page);
}
}
-EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
static void bio_release_pages(struct bio *bio)
{
@@ -1661,17 +1660,33 @@ defer:
spin_unlock_irqrestore(&bio_dirty_lock, flags);
schedule_work(&bio_dirty_work);
}
-EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
+
+void update_io_ticks(struct hd_struct *part, unsigned long now)
+{
+ unsigned long stamp;
+again:
+ stamp = READ_ONCE(part->stamp);
+ if (unlikely(stamp != now)) {
+ if (likely(cmpxchg(&part->stamp, stamp, now) == stamp)) {
+ __part_stat_add(part, io_ticks, 1);
+ }
+ }
+ if (part->partno) {
+ part = &part_to_disk(part)->part0;
+ goto again;
+ }
+}
void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part)
{
const int sgrp = op_stat_group(op);
- int cpu = part_stat_lock();
- part_round_stats(q, cpu, part);
- part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, sectors[sgrp], sectors);
+ part_stat_lock();
+
+ update_io_ticks(part, jiffies);
+ part_stat_inc(part, ios[sgrp]);
+ part_stat_add(part, sectors[sgrp], sectors);
part_inc_in_flight(q, part, op_is_write(op));
part_stat_unlock();
@@ -1681,12 +1696,15 @@ EXPORT_SYMBOL(generic_start_io_acct);
void generic_end_io_acct(struct request_queue *q, int req_op,
struct hd_struct *part, unsigned long start_time)
{
- unsigned long duration = jiffies - start_time;
+ unsigned long now = jiffies;
+ unsigned long duration = now - start_time;
const int sgrp = op_stat_group(req_op);
- int cpu = part_stat_lock();
- part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
- part_round_stats(q, cpu, part);
+ part_stat_lock();
+
+ update_io_ticks(part, now);
+ part_stat_add(part, nsecs[sgrp], jiffies_to_nsecs(duration));
+ part_stat_add(part, time_in_queue, duration);
part_dec_in_flight(q, part, op_is_write(req_op));
part_stat_unlock();
@@ -1956,102 +1974,133 @@ EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
-#ifdef CONFIG_MEMCG
/**
- * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
+ * bio_disassociate_blkg - puts back the blkg reference if associated
* @bio: target bio
- * @page: the page to lookup the blkcg from
*
- * Associate @bio with the blkcg from @page's owning memcg. This works like
- * every other associate function wrt references.
+ * Helper to disassociate the blkg from @bio if a blkg is associated.
*/
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
+void bio_disassociate_blkg(struct bio *bio)
{
- struct cgroup_subsys_state *blkcg_css;
-
- if (unlikely(bio->bi_css))
- return -EBUSY;
- if (!page->mem_cgroup)
- return 0;
- blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
- &io_cgrp_subsys);
- bio->bi_css = blkcg_css;
- return 0;
+ if (bio->bi_blkg) {
+ blkg_put(bio->bi_blkg);
+ bio->bi_blkg = NULL;
+ }
}
-#endif /* CONFIG_MEMCG */
+EXPORT_SYMBOL_GPL(bio_disassociate_blkg);
/**
- * bio_associate_blkcg - associate a bio with the specified blkcg
+ * __bio_associate_blkg - associate a bio with the a blkg
* @bio: target bio
- * @blkcg_css: css of the blkcg to associate
+ * @blkg: the blkg to associate
*
- * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
- * treat @bio as if it were issued by a task which belongs to the blkcg.
+ * This tries to associate @bio with the specified @blkg. Association failure
+ * is handled by walking up the blkg tree. Therefore, the blkg associated can
+ * be anything between @blkg and the root_blkg. This situation only happens
+ * when a cgroup is dying and then the remaining bios will spill to the closest
+ * alive blkg.
*
- * This function takes an extra reference of @blkcg_css which will be put
- * when @bio is released. The caller must own @bio and is responsible for
- * synchronizing calls to this function.
+ * A reference will be taken on the @blkg and will be released when @bio is
+ * freed.
*/
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
+static void __bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
{
- if (unlikely(bio->bi_css))
- return -EBUSY;
- css_get(blkcg_css);
- bio->bi_css = blkcg_css;
- return 0;
+ bio_disassociate_blkg(bio);
+
+ bio->bi_blkg = blkg_tryget_closest(blkg);
}
-EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
- * bio_associate_blkg - associate a bio with the specified blkg
+ * bio_associate_blkg_from_css - associate a bio with a specified css
* @bio: target bio
- * @blkg: the blkg to associate
+ * @css: target css
*
- * Associate @bio with the blkg specified by @blkg. This is the queue specific
- * blkcg information associated with the @bio, a reference will be taken on the
- * @blkg and will be freed when the bio is freed.
+ * Associate @bio with the blkg found by combining the css's blkg and the
+ * request_queue of the @bio. This falls back to the queue's root_blkg if
+ * the association fails with the css.
*/
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
{
- if (unlikely(bio->bi_blkg))
- return -EBUSY;
- if (!blkg_try_get(blkg))
- return -ENODEV;
- bio->bi_blkg = blkg;
- return 0;
+ struct request_queue *q = bio->bi_disk->queue;
+ struct blkcg_gq *blkg;
+
+ rcu_read_lock();
+
+ if (!css || !css->parent)
+ blkg = q->root_blkg;
+ else
+ blkg = blkg_lookup_create(css_to_blkcg(css), q);
+
+ __bio_associate_blkg(bio, blkg);
+
+ rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
+#ifdef CONFIG_MEMCG
/**
- * bio_disassociate_task - undo bio_associate_current()
+ * bio_associate_blkg_from_page - associate a bio with the page's blkg
* @bio: target bio
+ * @page: the page to lookup the blkcg from
+ *
+ * Associate @bio with the blkg from @page's owning memcg and the respective
+ * request_queue. If cgroup_e_css returns %NULL, fall back to the queue's
+ * root_blkg.
*/
-void bio_disassociate_task(struct bio *bio)
+void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
{
- if (bio->bi_ioc) {
- put_io_context(bio->bi_ioc);
- bio->bi_ioc = NULL;
- }
- if (bio->bi_css) {
- css_put(bio->bi_css);
- bio->bi_css = NULL;
- }
- if (bio->bi_blkg) {
- blkg_put(bio->bi_blkg);
- bio->bi_blkg = NULL;
- }
+ struct cgroup_subsys_state *css;
+
+ if (!page->mem_cgroup)
+ return;
+
+ rcu_read_lock();
+
+ css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+ bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+}
+#endif /* CONFIG_MEMCG */
+
+/**
+ * bio_associate_blkg - associate a bio with a blkg
+ * @bio: target bio
+ *
+ * Associate @bio with the blkg found from the bio's css and request_queue.
+ * If one is not found, bio_lookup_blkg() creates the blkg. If a blkg is
+ * already associated, the css is reused and association redone as the
+ * request_queue may have changed.
+ */
+void bio_associate_blkg(struct bio *bio)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+
+ if (bio->bi_blkg)
+ css = &bio_blkcg(bio)->css;
+ else
+ css = blkcg_css();
+
+ bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(bio_associate_blkg);
/**
- * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_css)
- WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+ if (src->bi_blkg)
+ __bio_associate_blkg(dst, src->bi_blkg);
}
-EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
+EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c630e02836a8..c8cc1cbb6370 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -76,14 +76,42 @@ static void blkg_free(struct blkcg_gq *blkg)
if (blkg->pd[i])
blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
- if (blkg->blkcg != &blkcg_root)
- blk_exit_rl(blkg->q, &blkg->rl);
-
blkg_rwstat_exit(&blkg->stat_ios);
blkg_rwstat_exit(&blkg->stat_bytes);
kfree(blkg);
}
+static void __blkg_release(struct rcu_head *rcu)
+{
+ struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
+
+ percpu_ref_exit(&blkg->refcnt);
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+
+ wb_congested_put(blkg->wb_congested);
+
+ blkg_free(blkg);
+}
+
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+static void blkg_release(struct percpu_ref *ref)
+{
+ struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
+
+ call_rcu(&blkg->rcu_head, __blkg_release);
+}
+
/**
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
@@ -110,14 +138,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
- atomic_set(&blkg->refcnt, 1);
-
- /* root blkg uses @q->root_rl, init rl only for !root blkgs */
- if (blkcg != &blkcg_root) {
- if (blk_init_rl(&blkg->rl, q, gfp_mask))
- goto err_free;
- blkg->rl.blkg = blkg;
- }
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -157,7 +177,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) {
if (update_hint) {
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg);
}
return blkg;
@@ -180,7 +200,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
int i, ret;
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
+
+ /* request_queue is dying, do not create/recreate a blkg */
+ if (blk_queue_dying(q)) {
+ ret = -ENODEV;
+ goto err_free_blkg;
+ }
/* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) {
@@ -217,6 +243,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_get(blkg->parent);
}
+ ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (ret)
+ goto err_cancel_ref;
+
/* invoke per-policy init */
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -249,6 +280,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_put(blkg);
return ERR_PTR(ret);
+err_cancel_ref:
+ percpu_ref_exit(&blkg->refcnt);
err_put_congested:
wb_congested_put(wb_congested);
err_put_css:
@@ -259,7 +292,7 @@ err_free_blkg:
}
/**
- * blkg_lookup_create - lookup blkg, try to create one if not there
+ * __blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
@@ -268,24 +301,16 @@ err_free_blkg:
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock.
*
- * Returns pointer to the looked up or created blkg on success, ERR_PTR()
- * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
- * dead and bypassing, returns ERR_PTR(-EBUSY).
+ * Returns the blkg or the closest blkg if blkg_create() fails as it walks
+ * down from root.
*/
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
-
- /*
- * This could be the first entry point of blkcg implementation and
- * we shouldn't allow anything to go through for a bypassing queue.
- */
- if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
+ lockdep_assert_held(&q->queue_lock);
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
@@ -293,30 +318,64 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
- * non-root blkgs have access to their parents.
+ * non-root blkgs have access to their parents. Returns the closest
+ * blkg to the intended blkg should blkg_create() fail.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent = blkcg_parent(blkcg);
-
- while (parent && !__blkg_lookup(parent, q, false)) {
+ struct blkcg_gq *ret_blkg = q->root_blkg;
+
+ while (parent) {
+ blkg = __blkg_lookup(parent, q, false);
+ if (blkg) {
+ /* remember closest blkg */
+ ret_blkg = blkg;
+ break;
+ }
pos = parent;
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, q, NULL);
- if (pos == blkcg || IS_ERR(blkg))
+ if (IS_ERR(blkg))
+ return ret_blkg;
+ if (pos == blkcg)
return blkg;
}
}
+/**
+ * blkg_lookup_create - find or create a blkg
+ * @blkcg: target block cgroup
+ * @q: target request_queue
+ *
+ * This looks up or creates the blkg representing the unique pair
+ * of the blkcg and the request_queue.
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
+
+ if (unlikely(!blkg)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->queue_lock, flags);
+ blkg = __blkg_lookup_create(blkcg, q);
+ spin_unlock_irqrestore(&q->queue_lock, flags);
+ }
+
+ return blkg;
+}
+
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
struct blkcg_gq *parent = blkg->parent;
int i;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
/* Something wrong if we are trying to remove same group twice */
@@ -353,7 +412,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- blkg_put(blkg);
+ percpu_ref_kill(&blkg->refcnt);
}
/**
@@ -366,8 +425,7 @@ static void blkg_destroy_all(struct request_queue *q)
{
struct blkcg_gq *blkg, *n;
- lockdep_assert_held(q->queue_lock);
-
+ spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;
@@ -377,7 +435,7 @@ static void blkg_destroy_all(struct request_queue *q)
}
q->root_blkg = NULL;
- q->root_rl.blkg = NULL;
+ spin_unlock_irq(&q->queue_lock);
}
/*
@@ -403,41 +461,6 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
}
EXPORT_SYMBOL_GPL(__blkg_release_rcu);
-/*
- * The next function used by blk_queue_for_each_rl(). It's a bit tricky
- * because the root blkg uses @q->root_rl instead of its own rl.
- */
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q)
-{
- struct list_head *ent;
- struct blkcg_gq *blkg;
-
- /*
- * Determine the current blkg list_head. The first entry is
- * root_rl which is off @q->blkg_list and mapped to the head.
- */
- if (rl == &q->root_rl) {
- ent = &q->blkg_list;
- /* There are no more block groups, hence no request lists */
- if (list_empty(ent))
- return NULL;
- } else {
- blkg = container_of(rl, struct blkcg_gq, rl);
- ent = &blkg->q_node;
- }
-
- /* walk to the next list_head, skip root blkcg */
- ent = ent->next;
- if (ent == &q->root_blkg->q_node)
- ent = ent->next;
- if (ent == &q->blkg_list)
- return NULL;
-
- blkg = container_of(ent, struct blkcg_gq, q_node);
- return &blkg->rl;
-}
-
static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 val)
{
@@ -477,7 +500,6 @@ const char *blkg_dev_name(struct blkcg_gq *blkg)
return dev_name(blkg->q->backing_dev_info->dev);
return NULL;
}
-EXPORT_SYMBOL_GPL(blkg_dev_name);
/**
* blkcg_print_blkgs - helper for printing per-blkg data
@@ -508,10 +530,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- spin_lock_irq(blkg->q->queue_lock);
+ spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
@@ -709,7 +731,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
struct cgroup_subsys_state *pos_css;
u64 sum = 0;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -752,7 +774,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
struct blkg_rwstat sum = { };
int i;
- lockdep_assert_held(blkg->q->queue_lock);
+ lockdep_assert_held(&blkg->q->queue_lock);
rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -783,18 +805,10 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP);
-
- /*
- * This could be the first entry point of blkcg implementation and
- * we shouldn't allow anything to go through for a bypassing queue.
- */
- if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
return __blkg_lookup(blkcg, q, true /* update_hint */);
}
@@ -812,7 +826,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
- __acquires(rcu) __acquires(disk->queue->queue_lock)
+ __acquires(rcu) __acquires(&disk->queue->queue_lock)
{
struct gendisk *disk;
struct request_queue *q;
@@ -840,7 +854,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
q = disk->queue;
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(blkcg, pol, q);
if (IS_ERR(blkg)) {
@@ -867,7 +881,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
}
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@@ -877,7 +891,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
}
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
@@ -905,7 +919,7 @@ success:
return 0;
fail_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
fail:
put_disk_and_module(disk);
@@ -921,7 +935,6 @@ fail:
}
return ret;
}
-EXPORT_SYMBOL_GPL(blkg_conf_prep);
/**
* blkg_conf_finish - finish up per-blkg config update
@@ -931,13 +944,12 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
- __releases(ctx->disk->queue->queue_lock) __releases(rcu)
+ __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
{
- spin_unlock_irq(ctx->disk->queue->queue_lock);
+ spin_unlock_irq(&ctx->disk->queue->queue_lock);
rcu_read_unlock();
put_disk_and_module(ctx->disk);
}
-EXPORT_SYMBOL_GPL(blkg_conf_finish);
static int blkcg_print_stat(struct seq_file *sf, void *v)
{
@@ -967,7 +979,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/
off += scnprintf(buf+off, size-off, "%s ", dname);
- spin_lock_irq(blkg->q->queue_lock);
+ spin_lock_irq(&blkg->q->queue_lock);
rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes));
@@ -981,7 +993,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
- spin_unlock_irq(blkg->q->queue_lock);
+ spin_unlock_irq(&blkg->q->queue_lock);
if (rbytes || wbytes || rios || wios) {
has_stats = true;
@@ -1102,9 +1114,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;
- if (spin_trylock(q->queue_lock)) {
+ if (spin_trylock(&q->queue_lock)) {
blkg_destroy(blkg);
- spin_unlock(q->queue_lock);
+ spin_unlock(&q->queue_lock);
} else {
spin_unlock_irq(&blkcg->lock);
cpu_relax();
@@ -1225,36 +1237,31 @@ int blkcg_init_queue(struct request_queue *q)
/* Make sure the root blkg exists. */
rcu_read_lock();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
- q->root_rl.blkg = blkg;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
ret = blk_iolatency_init(q);
- if (ret) {
- spin_lock_irq(q->queue_lock);
- blkg_destroy_all(q);
- spin_unlock_irq(q->queue_lock);
- return ret;
- }
+ if (ret)
+ goto err_destroy_all;
ret = blk_throtl_init(q);
- if (ret) {
- spin_lock_irq(q->queue_lock);
- blkg_destroy_all(q);
- spin_unlock_irq(q->queue_lock);
- }
- return ret;
+ if (ret)
+ goto err_destroy_all;
+ return 0;
+err_destroy_all:
+ blkg_destroy_all(q);
+ return ret;
err_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
@@ -1269,7 +1276,7 @@ err_unlock:
*/
void blkcg_drain_queue(struct request_queue *q)
{
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
@@ -1289,10 +1296,7 @@ void blkcg_drain_queue(struct request_queue *q)
*/
void blkcg_exit_queue(struct request_queue *q)
{
- spin_lock_irq(q->queue_lock);
blkg_destroy_all(q);
- spin_unlock_irq(q->queue_lock);
-
blk_throtl_exit(q);
}
@@ -1396,10 +1400,8 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol))
return 0;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
- else
- blk_queue_bypass_start(q);
pd_prealloc:
if (!pd_prealloc) {
pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
@@ -1409,7 +1411,7 @@ pd_prealloc:
}
}
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
@@ -1421,7 +1423,7 @@ pd_prealloc:
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
goto pd_prealloc;
}
@@ -1435,12 +1437,10 @@ pd_prealloc:
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
out_bypass_end:
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
- else
- blk_queue_bypass_end(q);
if (pd_prealloc)
pol->pd_free_fn(pd_prealloc);
return ret;
@@ -1463,12 +1463,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol))
return;
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_freeze_queue(q);
- else
- blk_queue_bypass_start(q);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols);
@@ -1481,12 +1479,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
}
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unfreeze_queue(q);
- else
- blk_queue_bypass_end(q);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
@@ -1748,8 +1744,7 @@ void blkcg_maybe_throttle_current(void)
blkg = blkg_lookup(blkcg, q);
if (!blkg)
goto out;
- blkg = blkg_try_get(blkg);
- if (!blkg)
+ if (!blkg_tryget(blkg))
goto out;
rcu_read_unlock();
@@ -1761,7 +1756,6 @@ out:
rcu_read_unlock();
blk_put_queue(q);
}
-EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
/**
* blkcg_schedule_throttle - this task needs to check for throttling
@@ -1795,7 +1789,6 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
current->use_memdelay = use_memdelay;
set_notify_resume(current);
}
-EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
/**
* blkcg_add_delay - add delay to this blkg
@@ -1810,7 +1803,6 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
blkcg_scale_delay(blkg, now);
atomic64_add(delta, &blkg->delay_nsec);
}
-EXPORT_SYMBOL_GPL(blkcg_add_delay);
module_param(blkcg_debug_stats, bool, 0644);
MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
diff --git a/block/blk-core.c b/block/blk-core.c
index deb56932f8c4..c78042975737 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -58,11 +58,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
/*
- * For the allocated request tables
- */
-struct kmem_cache *request_cachep;
-
-/*
* For queue allocation
*/
struct kmem_cache *blk_requestq_cachep;
@@ -79,11 +74,7 @@ static struct workqueue_struct *kblockd_workqueue;
*/
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_set(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_set);
@@ -94,11 +85,7 @@ EXPORT_SYMBOL(blk_queue_flag_set);
*/
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ clear_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_clear);
@@ -112,85 +99,15 @@ EXPORT_SYMBOL(blk_queue_flag_clear);
*/
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_set(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
+ return test_and_set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
-/**
- * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
- * @flag: flag to be cleared
- * @q: request queue
- *
- * Returns the previous value of @flag - 0 if the flag was not set and 1 if
- * the flag was set.
- */
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
-{
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_clear(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
-
-static void blk_clear_congested(struct request_list *rl, int sync)
-{
-#ifdef CONFIG_CGROUP_WRITEBACK
- clear_wb_congested(rl->blkg->wb_congested, sync);
-#else
- /*
- * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
- * flip its congestion state for events on other blkcgs.
- */
- if (rl == &rl->q->root_rl)
- clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
-#endif
-}
-
-static void blk_set_congested(struct request_list *rl, int sync)
-{
-#ifdef CONFIG_CGROUP_WRITEBACK
- set_wb_congested(rl->blkg->wb_congested, sync);
-#else
- /* see blk_clear_congested() */
- if (rl == &rl->q->root_rl)
- set_wb_congested(rl->q->backing_dev_info->wb.congested, sync);
-#endif
-}
-
-void blk_queue_congestion_threshold(struct request_queue *q)
-{
- int nr;
-
- nr = q->nr_requests - (q->nr_requests / 8) + 1;
- if (nr > q->nr_requests)
- nr = q->nr_requests;
- q->nr_congestion_on = nr;
-
- nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
- if (nr < 1)
- nr = 1;
- q->nr_congestion_off = nr;
-}
-
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
- INIT_LIST_HEAD(&rq->timeout_list);
- rq->cpu = -1;
rq->q = q;
rq->__sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash);
@@ -256,10 +173,11 @@ static void print_req_error(struct request *req, blk_status_t status)
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
return;
- printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
- __func__, blk_errors[idx].name, req->rq_disk ?
- req->rq_disk->disk_name : "?",
- (unsigned long long)blk_rq_pos(req));
+ printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu flags %x\n",
+ __func__, blk_errors[idx].name,
+ req->rq_disk ? req->rq_disk->disk_name : "?",
+ (unsigned long long)blk_rq_pos(req),
+ req->cmd_flags);
}
static void req_bio_endio(struct request *rq, struct bio *bio,
@@ -292,99 +210,6 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
}
EXPORT_SYMBOL(blk_dump_rq_flags);
-static void blk_delay_work(struct work_struct *work)
-{
- struct request_queue *q;
-
- q = container_of(work, struct request_queue, delay_work.work);
- spin_lock_irq(q->queue_lock);
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
-}
-
-/**
- * blk_delay_queue - restart queueing after defined interval
- * @q: The &struct request_queue in question
- * @msecs: Delay in msecs
- *
- * Description:
- * Sometimes queueing needs to be postponed for a little while, to allow
- * resources to come back. This function will make sure that queueing is
- * restarted around the specified time.
- */
-void blk_delay_queue(struct request_queue *q, unsigned long msecs)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (likely(!blk_queue_dead(q)))
- queue_delayed_work(kblockd_workqueue, &q->delay_work,
- msecs_to_jiffies(msecs));
-}
-EXPORT_SYMBOL(blk_delay_queue);
-
-/**
- * blk_start_queue_async - asynchronously restart a previously stopped queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * blk_start_queue_async() will clear the stop flag on the queue, and
- * ensure that the request_fn for the queue is run from an async
- * context.
- **/
-void blk_start_queue_async(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- blk_run_queue_async(q);
-}
-EXPORT_SYMBOL(blk_start_queue_async);
-
-/**
- * blk_start_queue - restart a previously stopped queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * blk_start_queue() will clear the stop flag on the queue, and call
- * the request_fn for the queue if it was in a stopped state when
- * entered. Also see blk_stop_queue().
- **/
-void blk_start_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, q);
- __blk_run_queue(q);
-}
-EXPORT_SYMBOL(blk_start_queue);
-
-/**
- * blk_stop_queue - stop a queue
- * @q: The &struct request_queue in question
- *
- * Description:
- * The Linux block layer assumes that a block driver will consume all
- * entries on the request queue when the request_fn strategy is called.
- * Often this will not happen, because of hardware limitations (queue
- * depth settings). If a device driver gets a 'queue full' response,
- * or if it simply chooses not to queue more I/O at one point, it can
- * call this function to prevent the request_fn from being called until
- * the driver has signalled it's ready to go again. This happens by calling
- * blk_start_queue() to restart queue operations.
- **/
-void blk_stop_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- cancel_delayed_work(&q->delay_work);
- queue_flag_set(QUEUE_FLAG_STOPPED, q);
-}
-EXPORT_SYMBOL(blk_stop_queue);
-
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
@@ -408,15 +233,13 @@ void blk_sync_queue(struct request_queue *q)
del_timer_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
cancel_delayed_work_sync(&q->requeue_work);
queue_for_each_hw_ctx(q, hctx, i)
cancel_delayed_work_sync(&hctx->run_work);
- } else {
- cancel_delayed_work_sync(&q->delay_work);
}
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -442,250 +265,12 @@ void blk_clear_pm_only(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_clear_pm_only);
-/**
- * __blk_run_queue_uncond - run a queue whether or not it has been stopped
- * @q: The queue to run
- *
- * Description:
- * Invoke request handling on a queue if there are any pending requests.
- * May be used to restart request handling after a request has completed.
- * This variant runs the queue whether or not the queue has been
- * stopped. Must be called with the queue lock held and interrupts
- * disabled. See also @blk_run_queue.
- */
-inline void __blk_run_queue_uncond(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (unlikely(blk_queue_dead(q)))
- return;
-
- /*
- * Some request_fn implementations, e.g. scsi_request_fn(), unlock
- * the queue lock internally. As a result multiple threads may be
- * running such a request function concurrently. Keep track of the
- * number of active request_fn invocations such that blk_drain_queue()
- * can wait until all these request_fn calls have finished.
- */
- q->request_fn_active++;
- q->request_fn(q);
- q->request_fn_active--;
-}
-EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
-
-/**
- * __blk_run_queue - run a single device queue
- * @q: The queue to run
- *
- * Description:
- * See @blk_run_queue.
- */
-void __blk_run_queue(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (unlikely(blk_queue_stopped(q)))
- return;
-
- __blk_run_queue_uncond(q);
-}
-EXPORT_SYMBOL(__blk_run_queue);
-
-/**
- * blk_run_queue_async - run a single device queue in workqueue context
- * @q: The queue to run
- *
- * Description:
- * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
- * of us.
- *
- * Note:
- * Since it is not allowed to run q->delay_work after blk_cleanup_queue()
- * has canceled q->delay_work, callers must hold the queue lock to avoid
- * race conditions between blk_cleanup_queue() and blk_run_queue_async().
- */
-void blk_run_queue_async(struct request_queue *q)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
- mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
-}
-EXPORT_SYMBOL(blk_run_queue_async);
-
-/**
- * blk_run_queue - run a single device queue
- * @q: The queue to run
- *
- * Description:
- * Invoke request handling on this queue, if it has pending work to do.
- * May be used to restart queueing when a request has completed.
- */
-void blk_run_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_run_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_run_queue);
-
void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
EXPORT_SYMBOL(blk_put_queue);
-/**
- * __blk_drain_queue - drain requests from request_queue
- * @q: queue to drain
- * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
- *
- * Drain requests from @q. If @drain_all is set, all requests are drained.
- * If not, only ELVPRIV requests are drained. The caller is responsible
- * for ensuring that no new requests which need to be drained are queued.
- */
-static void __blk_drain_queue(struct request_queue *q, bool drain_all)
- __releases(q->queue_lock)
- __acquires(q->queue_lock)
-{
- int i;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- while (true) {
- bool drain = false;
-
- /*
- * The caller might be trying to drain @q before its
- * elevator is initialized.
- */
- if (q->elevator)
- elv_drain_elevator(q);
-
- blkcg_drain_queue(q);
-
- /*
- * This function might be called on a queue which failed
- * driver init after queue creation or is not yet fully
- * active yet. Some drivers (e.g. fd and loop) get unhappy
- * in such cases. Kick queue iff dispatch queue has
- * something on it and @q has request_fn set.
- */
- if (!list_empty(&q->queue_head) && q->request_fn)
- __blk_run_queue(q);
-
- drain |= q->nr_rqs_elvpriv;
- drain |= q->request_fn_active;
-
- /*
- * Unfortunately, requests are queued at and tracked from
- * multiple places and there's no single counter which can
- * be drained. Check all the queues and counters.
- */
- if (drain_all) {
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
- drain |= !list_empty(&q->queue_head);
- for (i = 0; i < 2; i++) {
- drain |= q->nr_rqs[i];
- drain |= q->in_flight[i];
- if (fq)
- drain |= !list_empty(&fq->flush_queue[i]);
- }
- }
-
- if (!drain)
- break;
-
- spin_unlock_irq(q->queue_lock);
-
- msleep(10);
-
- spin_lock_irq(q->queue_lock);
- }
-
- /*
- * With queue marked dead, any woken up waiter will fail the
- * allocation path, so the wakeup chaining is lost and we're
- * left with hung waiters. We need to wake up those waiters.
- */
- if (q->request_fn) {
- struct request_list *rl;
-
- blk_queue_for_each_rl(rl, q)
- for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
- wake_up_all(&rl->wait[i]);
- }
-}
-
-void blk_drain_queue(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- __blk_drain_queue(q, true);
- spin_unlock_irq(q->queue_lock);
-}
-
-/**
- * blk_queue_bypass_start - enter queue bypass mode
- * @q: queue of interest
- *
- * In bypass mode, only the dispatch FIFO queue of @q is used. This
- * function makes @q enter bypass mode and drains all requests which were
- * throttled or issued before. On return, it's guaranteed that no request
- * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
- * inside queue or RCU read lock.
- */
-void blk_queue_bypass_start(struct request_queue *q)
-{
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irq(q->queue_lock);
- q->bypass_depth++;
- queue_flag_set(QUEUE_FLAG_BYPASS, q);
- spin_unlock_irq(q->queue_lock);
-
- /*
- * Queues start drained. Skip actual draining till init is
- * complete. This avoids lenghty delays during queue init which
- * can happen many times during boot.
- */
- if (blk_queue_init_done(q)) {
- spin_lock_irq(q->queue_lock);
- __blk_drain_queue(q, false);
- spin_unlock_irq(q->queue_lock);
-
- /* ensure blk_queue_bypass() is %true inside RCU read lock */
- synchronize_rcu();
- }
-}
-EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
-
-/**
- * blk_queue_bypass_end - leave queue bypass mode
- * @q: queue of interest
- *
- * Leave bypass mode and restore the normal queueing behavior.
- *
- * Note: although blk_queue_bypass_start() is only called for blk-sq queues,
- * this function is called for both blk-sq and blk-mq queues.
- */
-void blk_queue_bypass_end(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- if (!--q->bypass_depth)
- queue_flag_clear(QUEUE_FLAG_BYPASS, q);
- WARN_ON_ONCE(q->bypass_depth < 0);
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
-
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
@@ -697,20 +282,8 @@ void blk_set_queue_dying(struct request_queue *q)
*/
blk_freeze_queue_start(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_wake_waiters(q);
- else {
- struct request_list *rl;
-
- spin_lock_irq(q->queue_lock);
- blk_queue_for_each_rl(rl, q) {
- if (rl->rq_pool) {
- wake_up_all(&rl->wait[BLK_RW_SYNC]);
- wake_up_all(&rl->wait[BLK_RW_ASYNC]);
- }
- }
- spin_unlock_irq(q->queue_lock);
- }
/* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
@@ -755,29 +328,13 @@ void blk_exit_queue(struct request_queue *q)
*/
void blk_cleanup_queue(struct request_queue *q)
{
- spinlock_t *lock = q->queue_lock;
-
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
- spin_lock_irq(lock);
-
- /*
- * A dying queue is permanently in bypass mode till released. Note
- * that, unlike blk_queue_bypass_start(), we aren't performing
- * synchronize_rcu() after entering bypass mode to avoid the delay
- * as some drivers create and destroy a lot of queues while
- * probing. This is still safe because blk_release_queue() will be
- * called only after the queue refcnt drops to zero and nothing,
- * RCU or not, would be traversing the queue by then.
- */
- q->bypass_depth++;
- queue_flag_set(QUEUE_FLAG_BYPASS, q);
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
mutex_unlock(&q->sysfs_lock);
/*
@@ -788,9 +345,7 @@ void blk_cleanup_queue(struct request_queue *q)
rq_qos_exit(q);
- spin_lock_irq(lock);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
- spin_unlock_irq(lock);
+ blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
/*
* make sure all in-progress dispatch are completed because
@@ -801,7 +356,7 @@ void blk_cleanup_queue(struct request_queue *q)
* We rely on driver to deal with the race in case that queue
* initialization isn't done.
*/
- if (q->mq_ops && blk_queue_init_done(q))
+ if (queue_is_mq(q) && blk_queue_init_done(q))
blk_mq_quiesce_queue(q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
@@ -819,98 +374,19 @@ void blk_cleanup_queue(struct request_queue *q)
blk_exit_queue(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_free_queue(q);
- percpu_ref_exit(&q->q_usage_counter);
- spin_lock_irq(lock);
- if (q->queue_lock != &q->__queue_lock)
- q->queue_lock = &q->__queue_lock;
- spin_unlock_irq(lock);
+ percpu_ref_exit(&q->q_usage_counter);
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
EXPORT_SYMBOL(blk_cleanup_queue);
-/* Allocate memory local to the request queue */
-static void *alloc_request_simple(gfp_t gfp_mask, void *data)
-{
- struct request_queue *q = data;
-
- return kmem_cache_alloc_node(request_cachep, gfp_mask, q->node);
-}
-
-static void free_request_simple(void *element, void *data)
-{
- kmem_cache_free(request_cachep, element);
-}
-
-static void *alloc_request_size(gfp_t gfp_mask, void *data)
-{
- struct request_queue *q = data;
- struct request *rq;
-
- rq = kmalloc_node(sizeof(struct request) + q->cmd_size, gfp_mask,
- q->node);
- if (rq && q->init_rq_fn && q->init_rq_fn(q, rq, gfp_mask) < 0) {
- kfree(rq);
- rq = NULL;
- }
- return rq;
-}
-
-static void free_request_size(void *element, void *data)
-{
- struct request_queue *q = data;
-
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, element);
- kfree(element);
-}
-
-int blk_init_rl(struct request_list *rl, struct request_queue *q,
- gfp_t gfp_mask)
-{
- if (unlikely(rl->rq_pool) || q->mq_ops)
- return 0;
-
- rl->q = q;
- rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
- rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
- init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
- init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
-
- if (q->cmd_size) {
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
- alloc_request_size, free_request_size,
- q, gfp_mask, q->node);
- } else {
- rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ,
- alloc_request_simple, free_request_simple,
- q, gfp_mask, q->node);
- }
- if (!rl->rq_pool)
- return -ENOMEM;
-
- if (rl != &q->root_rl)
- WARN_ON_ONCE(!blk_get_queue(q));
-
- return 0;
-}
-
-void blk_exit_rl(struct request_queue *q, struct request_list *rl)
-{
- if (rl->rq_pool) {
- mempool_destroy(rl->rq_pool);
- if (rl != &q->root_rl)
- blk_put_queue(q);
- }
-}
-
struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{
- return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
+ return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
}
EXPORT_SYMBOL(blk_alloc_queue);
@@ -990,17 +466,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
* blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags
* @node_id: NUMA node to allocate memory from
- * @lock: For legacy queues, pointer to a spinlock that will be used to e.g.
- * serialize calls to the legacy .request_fn() callback. Ignored for
- * blk-mq request queues.
- *
- * Note: pass the queue lock as the third argument to this function instead of
- * setting the queue lock pointer explicitly to avoid triggering a sporadic
- * crash in the blkcg code. This function namely calls blkcg_init_queue() and
- * the queue lock pointer must be set before blkcg_init_queue() is called.
*/
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
- spinlock_t *lock)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
{
struct request_queue *q;
int ret;
@@ -1012,8 +479,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
- q->end_sector = 0;
- q->boundary_rq = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
@@ -1041,12 +506,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
- INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list);
#endif
- INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);
@@ -1054,18 +517,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
- spin_lock_init(&q->__queue_lock);
-
- q->queue_lock = lock ? : &q->__queue_lock;
-
- /*
- * A queue starts its life with bypass turned on to avoid
- * unnecessary bypass on/off overhead and nasty surprises during
- * init. The initial bypass will be finished when the queue is
- * registered by blk_register_queue().
- */
- q->bypass_depth = 1;
- queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
+ spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
@@ -1099,105 +551,6 @@ fail_q:
}
EXPORT_SYMBOL(blk_alloc_queue_node);
-/**
- * blk_init_queue - prepare a request queue for use with a block device
- * @rfn: The function to be called to process requests that have been
- * placed on the queue.
- * @lock: Request queue spin lock
- *
- * Description:
- * If a block device wishes to use the standard request handling procedures,
- * which sorts requests and coalesces adjacent requests, then it must
- * call blk_init_queue(). The function @rfn will be called when there
- * are requests on the queue that need to be processed. If the device
- * supports plugging, then @rfn may not be called immediately when requests
- * are available on the queue, but may be called at some time later instead.
- * Plugged queues are generally unplugged when a buffer belonging to one
- * of the requests on the queue is needed, or due to memory pressure.
- *
- * @rfn is not required, or even expected, to remove all requests off the
- * queue, but only as many as it can handle at a time. If it does leave
- * requests on the queue, it is responsible for arranging that the requests
- * get dealt with eventually.
- *
- * The queue spin lock must be held while manipulating the requests on the
- * request queue; this lock will be taken also from interrupt context, so irq
- * disabling is needed for it.
- *
- * Function returns a pointer to the initialized request queue, or %NULL if
- * it didn't succeed.
- *
- * Note:
- * blk_init_queue() must be paired with a blk_cleanup_queue() call
- * when the block device is deactivated (such as at module unload).
- **/
-
-struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
-{
- return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
-}
-EXPORT_SYMBOL(blk_init_queue);
-
-struct request_queue *
-blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
-{
- struct request_queue *q;
-
- q = blk_alloc_queue_node(GFP_KERNEL, node_id, lock);
- if (!q)
- return NULL;
-
- q->request_fn = rfn;
- if (blk_init_allocated_queue(q) < 0) {
- blk_cleanup_queue(q);
- return NULL;
- }
-
- return q;
-}
-EXPORT_SYMBOL(blk_init_queue_node);
-
-static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
-
-
-int blk_init_allocated_queue(struct request_queue *q)
-{
- WARN_ON_ONCE(q->mq_ops);
-
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
- if (!q->fq)
- return -ENOMEM;
-
- if (q->init_rq_fn && q->init_rq_fn(q, q->fq->flush_rq, GFP_KERNEL))
- goto out_free_flush_queue;
-
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
- goto out_exit_flush_rq;
-
- INIT_WORK(&q->timeout_work, blk_timeout_work);
- q->queue_flags |= QUEUE_FLAG_DEFAULT;
-
- /*
- * This also sets hw/phys segments, boundary and size
- */
- blk_queue_make_request(q, blk_queue_bio);
-
- q->sg_reserved_size = INT_MAX;
-
- if (elevator_init(q))
- goto out_exit_flush_rq;
- return 0;
-
-out_exit_flush_rq:
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, q->fq->flush_rq);
-out_free_flush_queue:
- blk_free_flush_queue(q->fq);
- q->fq = NULL;
- return -ENOMEM;
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
bool blk_get_queue(struct request_queue *q)
{
if (likely(!blk_queue_dying(q))) {
@@ -1209,406 +562,6 @@ bool blk_get_queue(struct request_queue *q)
}
EXPORT_SYMBOL(blk_get_queue);
-static inline void blk_free_request(struct request_list *rl, struct request *rq)
-{
- if (rq->rq_flags & RQF_ELVPRIV) {
- elv_put_request(rl->q, rq);
- if (rq->elv.icq)
- put_io_context(rq->elv.icq->ioc);
- }
-
- mempool_free(rq, rl->rq_pool);
-}
-
-/*
- * ioc_batching returns true if the ioc is a valid batching request and
- * should be given priority access to a request.
- */
-static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
-{
- if (!ioc)
- return 0;
-
- /*
- * Make sure the process is able to allocate at least 1 request
- * even if the batch times out, otherwise we could theoretically
- * lose wakeups.
- */
- return ioc->nr_batch_requests == q->nr_batching ||
- (ioc->nr_batch_requests > 0
- && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
-}
-
-/*
- * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
- * will cause the process to be a "batcher" on all queues in the system. This
- * is the behaviour we want though - once it gets a wakeup it should be given
- * a nice run.
- */
-static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
-{
- if (!ioc || ioc_batching(q, ioc))
- return;
-
- ioc->nr_batch_requests = q->nr_batching;
- ioc->last_waited = jiffies;
-}
-
-static void __freed_request(struct request_list *rl, int sync)
-{
- struct request_queue *q = rl->q;
-
- if (rl->count[sync] < queue_congestion_off_threshold(q))
- blk_clear_congested(rl, sync);
-
- if (rl->count[sync] + 1 <= q->nr_requests) {
- if (waitqueue_active(&rl->wait[sync]))
- wake_up(&rl->wait[sync]);
-
- blk_clear_rl_full(rl, sync);
- }
-}
-
-/*
- * A request has just been released. Account for it, update the full and
- * congestion status, wake up any waiters. Called under q->queue_lock.
- */
-static void freed_request(struct request_list *rl, bool sync,
- req_flags_t rq_flags)
-{
- struct request_queue *q = rl->q;
-
- q->nr_rqs[sync]--;
- rl->count[sync]--;
- if (rq_flags & RQF_ELVPRIV)
- q->nr_rqs_elvpriv--;
-
- __freed_request(rl, sync);
-
- if (unlikely(rl->starved[sync ^ 1]))
- __freed_request(rl, sync ^ 1);
-}
-
-int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
-{
- struct request_list *rl;
- int on_thresh, off_thresh;
-
- WARN_ON_ONCE(q->mq_ops);
-
- spin_lock_irq(q->queue_lock);
- q->nr_requests = nr;
- blk_queue_congestion_threshold(q);
- on_thresh = queue_congestion_on_threshold(q);
- off_thresh = queue_congestion_off_threshold(q);
-
- blk_queue_for_each_rl(rl, q) {
- if (rl->count[BLK_RW_SYNC] >= on_thresh)
- blk_set_congested(rl, BLK_RW_SYNC);
- else if (rl->count[BLK_RW_SYNC] < off_thresh)
- blk_clear_congested(rl, BLK_RW_SYNC);
-
- if (rl->count[BLK_RW_ASYNC] >= on_thresh)
- blk_set_congested(rl, BLK_RW_ASYNC);
- else if (rl->count[BLK_RW_ASYNC] < off_thresh)
- blk_clear_congested(rl, BLK_RW_ASYNC);
-
- if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
- blk_set_rl_full(rl, BLK_RW_SYNC);
- } else {
- blk_clear_rl_full(rl, BLK_RW_SYNC);
- wake_up(&rl->wait[BLK_RW_SYNC]);
- }
-
- if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
- blk_set_rl_full(rl, BLK_RW_ASYNC);
- } else {
- blk_clear_rl_full(rl, BLK_RW_ASYNC);
- wake_up(&rl->wait[BLK_RW_ASYNC]);
- }
- }
-
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-/**
- * __get_request - get a free request
- * @rl: request list to allocate from
- * @op: operation and flags
- * @bio: bio to allocate request for (can be %NULL)
- * @flags: BLQ_MQ_REQ_* flags
- * @gfp_mask: allocator flags
- *
- * Get a free request from @q. This function may fail under memory
- * pressure or if @q is dead.
- *
- * Must be called with @q->queue_lock held and,
- * Returns ERR_PTR on failure, with @q->queue_lock held.
- * Returns request pointer on success, with @q->queue_lock *not held*.
- */
-static struct request *__get_request(struct request_list *rl, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp_mask)
-{
- struct request_queue *q = rl->q;
- struct request *rq;
- struct elevator_type *et = q->elevator->type;
- struct io_context *ioc = rq_ioc(bio);
- struct io_cq *icq = NULL;
- const bool is_sync = op_is_sync(op);
- int may_queue;
- req_flags_t rq_flags = RQF_ALLOCED;
-
- lockdep_assert_held(q->queue_lock);
-
- if (unlikely(blk_queue_dying(q)))
- return ERR_PTR(-ENODEV);
-
- may_queue = elv_may_queue(q, op);
- if (may_queue == ELV_MQUEUE_NO)
- goto rq_starved;
-
- if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
- if (rl->count[is_sync]+1 >= q->nr_requests) {
- /*
- * The queue will fill after this allocation, so set
- * it as full, and mark this process as "batching".
- * This process will be allowed to complete a batch of
- * requests, others will be blocked.
- */
- if (!blk_rl_full(rl, is_sync)) {
- ioc_set_batching(q, ioc);
- blk_set_rl_full(rl, is_sync);
- } else {
- if (may_queue != ELV_MQUEUE_MUST
- && !ioc_batching(q, ioc)) {
- /*
- * The queue is full and the allocating
- * process is not a "batcher", and not
- * exempted by the IO scheduler
- */
- return ERR_PTR(-ENOMEM);
- }
- }
- }
- blk_set_congested(rl, is_sync);
- }
-
- /*
- * Only allow batching queuers to allocate up to 50% over the defined
- * limit of requests, otherwise we could have thousands of requests
- * allocated with any setting of ->nr_requests
- */
- if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
- return ERR_PTR(-ENOMEM);
-
- q->nr_rqs[is_sync]++;
- rl->count[is_sync]++;
- rl->starved[is_sync] = 0;
-
- /*
- * Decide whether the new request will be managed by elevator. If
- * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
- * prevent the current elevator from being destroyed until the new
- * request is freed. This guarantees icq's won't be destroyed and
- * makes creating new ones safe.
- *
- * Flush requests do not use the elevator so skip initialization.
- * This allows a request to share the flush and elevator data.
- *
- * Also, lookup icq while holding queue_lock. If it doesn't exist,
- * it will be created after releasing queue_lock.
- */
- if (!op_is_flush(op) && !blk_queue_bypass(q)) {
- rq_flags |= RQF_ELVPRIV;
- q->nr_rqs_elvpriv++;
- if (et->icq_cache && ioc)
- icq = ioc_lookup_icq(ioc, q);
- }
-
- if (blk_queue_io_stat(q))
- rq_flags |= RQF_IO_STAT;
- spin_unlock_irq(q->queue_lock);
-
- /* allocate and init request */
- rq = mempool_alloc(rl->rq_pool, gfp_mask);
- if (!rq)
- goto fail_alloc;
-
- blk_rq_init(q, rq);
- blk_rq_set_rl(rq, rl);
- rq->cmd_flags = op;
- rq->rq_flags = rq_flags;
- if (flags & BLK_MQ_REQ_PREEMPT)
- rq->rq_flags |= RQF_PREEMPT;
-
- /* init elvpriv */
- if (rq_flags & RQF_ELVPRIV) {
- if (unlikely(et->icq_cache && !icq)) {
- if (ioc)
- icq = ioc_create_icq(ioc, q, gfp_mask);
- if (!icq)
- goto fail_elvpriv;
- }
-
- rq->elv.icq = icq;
- if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
- goto fail_elvpriv;
-
- /* @rq->elv.icq holds io_context until @rq is freed */
- if (icq)
- get_io_context(icq->ioc);
- }
-out:
- /*
- * ioc may be NULL here, and ioc_batching will be false. That's
- * OK, if the queue is under the request limit then requests need
- * not count toward the nr_batch_requests limit. There will always
- * be some limit enforced by BLK_BATCH_TIME.
- */
- if (ioc_batching(q, ioc))
- ioc->nr_batch_requests--;
-
- trace_block_getrq(q, bio, op);
- return rq;
-
-fail_elvpriv:
- /*
- * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
- * and may fail indefinitely under memory pressure and thus
- * shouldn't stall IO. Treat this request as !elvpriv. This will
- * disturb iosched and blkcg but weird is bettern than dead.
- */
- printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
- __func__, dev_name(q->backing_dev_info->dev));
-
- rq->rq_flags &= ~RQF_ELVPRIV;
- rq->elv.icq = NULL;
-
- spin_lock_irq(q->queue_lock);
- q->nr_rqs_elvpriv--;
- spin_unlock_irq(q->queue_lock);
- goto out;
-
-fail_alloc:
- /*
- * Allocation failed presumably due to memory. Undo anything we
- * might have messed up.
- *
- * Allocating task should really be put onto the front of the wait
- * queue, but this is pretty rare.
- */
- spin_lock_irq(q->queue_lock);
- freed_request(rl, is_sync, rq_flags);
-
- /*
- * in the very unlikely event that allocation failed and no
- * requests for this direction was pending, mark us starved so that
- * freeing of a request in the other direction will notice
- * us. another possible fix would be to split the rq mempool into
- * READ and WRITE
- */
-rq_starved:
- if (unlikely(rl->count[is_sync] == 0))
- rl->starved[is_sync] = 1;
- return ERR_PTR(-ENOMEM);
-}
-
-/**
- * get_request - get a free request
- * @q: request_queue to allocate request from
- * @op: operation and flags
- * @bio: bio to allocate request for (can be %NULL)
- * @flags: BLK_MQ_REQ_* flags.
- * @gfp: allocator flags
- *
- * Get a free request from @q. If %BLK_MQ_REQ_NOWAIT is set in @flags,
- * this function keeps retrying under memory pressure and fails iff @q is dead.
- *
- * Must be called with @q->queue_lock held and,
- * Returns ERR_PTR on failure, with @q->queue_lock held.
- * Returns request pointer on success, with @q->queue_lock *not held*.
- */
-static struct request *get_request(struct request_queue *q, unsigned int op,
- struct bio *bio, blk_mq_req_flags_t flags, gfp_t gfp)
-{
- const bool is_sync = op_is_sync(op);
- DEFINE_WAIT(wait);
- struct request_list *rl;
- struct request *rq;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- rl = blk_get_rl(q, bio); /* transferred to @rq on success */
-retry:
- rq = __get_request(rl, op, bio, flags, gfp);
- if (!IS_ERR(rq))
- return rq;
-
- if (op & REQ_NOWAIT) {
- blk_put_rl(rl);
- return ERR_PTR(-EAGAIN);
- }
-
- if ((flags & BLK_MQ_REQ_NOWAIT) || unlikely(blk_queue_dying(q))) {
- blk_put_rl(rl);
- return rq;
- }
-
- /* wait on @rl and retry */
- prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
- TASK_UNINTERRUPTIBLE);
-
- trace_block_sleeprq(q, bio, op);
-
- spin_unlock_irq(q->queue_lock);
- io_schedule();
-
- /*
- * After sleeping, we become a "batching" process and will be able
- * to allocate at least one request, and up to a big batch of them
- * for a small period time. See ioc_batching, ioc_set_batching
- */
- ioc_set_batching(q, current->io_context);
-
- spin_lock_irq(q->queue_lock);
- finish_wait(&rl->wait[is_sync], &wait);
-
- goto retry;
-}
-
-/* flags: BLK_MQ_REQ_PREEMPT and/or BLK_MQ_REQ_NOWAIT. */
-static struct request *blk_old_get_request(struct request_queue *q,
- unsigned int op, blk_mq_req_flags_t flags)
-{
- struct request *rq;
- gfp_t gfp_mask = flags & BLK_MQ_REQ_NOWAIT ? GFP_ATOMIC : GFP_NOIO;
- int ret = 0;
-
- WARN_ON_ONCE(q->mq_ops);
-
- /* create ioc upfront */
- create_io_context(gfp_mask, q->node);
-
- ret = blk_queue_enter(q, flags);
- if (ret)
- return ERR_PTR(ret);
- spin_lock_irq(q->queue_lock);
- rq = get_request(q, op, NULL, flags, gfp_mask);
- if (IS_ERR(rq)) {
- spin_unlock_irq(q->queue_lock);
- blk_queue_exit(q);
- return rq;
- }
-
- /* q->queue_lock is unlocked at this point */
- rq->__data_len = 0;
- rq->__sector = (sector_t) -1;
- rq->bio = rq->biotail = NULL;
- return rq;
-}
-
/**
* blk_get_request - allocate a request
* @q: request queue to allocate a request for
@@ -1623,170 +576,17 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
WARN_ON_ONCE(op & REQ_NOWAIT);
WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
- if (q->mq_ops) {
- req = blk_mq_alloc_request(q, op, flags);
- if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
- q->mq_ops->initialize_rq_fn(req);
- } else {
- req = blk_old_get_request(q, op, flags);
- if (!IS_ERR(req) && q->initialize_rq_fn)
- q->initialize_rq_fn(req);
- }
+ req = blk_mq_alloc_request(q, op, flags);
+ if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
+ q->mq_ops->initialize_rq_fn(req);
return req;
}
EXPORT_SYMBOL(blk_get_request);
-/**
- * blk_requeue_request - put a request back on queue
- * @q: request queue where request should be inserted
- * @rq: request to be inserted
- *
- * Description:
- * Drivers often keep queueing requests until the hardware cannot accept
- * more, when that condition happens we need to put the request back
- * on the queue. Must be called with queue lock held.
- */
-void blk_requeue_request(struct request_queue *q, struct request *rq)
-{
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- blk_delete_timer(rq);
- blk_clear_rq_complete(rq);
- trace_block_rq_requeue(q, rq);
- rq_qos_requeue(q, rq);
-
- if (rq->rq_flags & RQF_QUEUED)
- blk_queue_end_tag(q, rq);
-
- BUG_ON(blk_queued_rq(rq));
-
- elv_requeue_request(q, rq);
-}
-EXPORT_SYMBOL(blk_requeue_request);
-
-static void add_acct_request(struct request_queue *q, struct request *rq,
- int where)
-{
- blk_account_io_start(rq, true);
- __elv_add_request(q, rq, where);
-}
-
-static void part_round_stats_single(struct request_queue *q, int cpu,
- struct hd_struct *part, unsigned long now,
- unsigned int inflight)
-{
- if (inflight) {
- __part_stat_add(cpu, part, time_in_queue,
- inflight * (now - part->stamp));
- __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
- }
- part->stamp = now;
-}
-
-/**
- * part_round_stats() - Round off the performance stats on a struct disk_stats.
- * @q: target block queue
- * @cpu: cpu number for stats access
- * @part: target partition
- *
- * The average IO queue length and utilisation statistics are maintained
- * by observing the current state of the queue length and the amount of
- * time it has been in this state for.
- *
- * Normally, that accounting is done on IO completion, but that can result
- * in more than a second's worth of IO being accounted for within any one
- * second, leading to >100% utilisation. To deal with that, we call this
- * function to do a round-off before returning the results when reading
- * /proc/diskstats. This accounts immediately for all queue usage up to
- * the current jiffies and restarts the counters again.
- */
-void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
-{
- struct hd_struct *part2 = NULL;
- unsigned long now = jiffies;
- unsigned int inflight[2];
- int stats = 0;
-
- if (part->stamp != now)
- stats |= 1;
-
- if (part->partno) {
- part2 = &part_to_disk(part)->part0;
- if (part2->stamp != now)
- stats |= 2;
- }
-
- if (!stats)
- return;
-
- part_in_flight(q, part, inflight);
-
- if (stats & 2)
- part_round_stats_single(q, cpu, part2, now, inflight[1]);
- if (stats & 1)
- part_round_stats_single(q, cpu, part, now, inflight[0]);
-}
-EXPORT_SYMBOL_GPL(part_round_stats);
-
-void __blk_put_request(struct request_queue *q, struct request *req)
-{
- req_flags_t rq_flags = req->rq_flags;
-
- if (unlikely(!q))
- return;
-
- if (q->mq_ops) {
- blk_mq_free_request(req);
- return;
- }
-
- lockdep_assert_held(q->queue_lock);
-
- blk_req_zone_write_unlock(req);
- blk_pm_put_request(req);
- blk_pm_mark_last_busy(req);
-
- elv_completed_request(q, req);
-
- /* this is a bio leak */
- WARN_ON(req->bio != NULL);
-
- rq_qos_done(q, req);
-
- /*
- * Request may not have originated from ll_rw_blk. if not,
- * it didn't come out of our reserved rq pools
- */
- if (rq_flags & RQF_ALLOCED) {
- struct request_list *rl = blk_rq_rl(req);
- bool sync = op_is_sync(req->cmd_flags);
-
- BUG_ON(!list_empty(&req->queuelist));
- BUG_ON(ELV_ON_HASH(req));
-
- blk_free_request(rl, req);
- freed_request(rl, sync, rq_flags);
- blk_put_rl(rl);
- blk_queue_exit(q);
- }
-}
-EXPORT_SYMBOL_GPL(__blk_put_request);
-
void blk_put_request(struct request *req)
{
- struct request_queue *q = req->q;
-
- if (q->mq_ops)
- blk_mq_free_request(req);
- else {
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- __blk_put_request(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ blk_mq_free_request(req);
}
EXPORT_SYMBOL(blk_put_request);
@@ -1806,7 +606,6 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
- req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
return true;
@@ -1830,7 +629,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
- req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
blk_account_io_start(req, false);
return true;
@@ -1850,7 +648,6 @@ bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = bio;
req->biotail = bio;
req->__data_len += bio->bi_iter.bi_size;
- req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
req->nr_phys_segments = segments + 1;
blk_account_io_start(req, false);
@@ -1883,7 +680,6 @@ no_merge:
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int *request_count,
struct request **same_queue_rq)
{
struct blk_plug *plug;
@@ -1893,25 +689,19 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
plug = current->plug;
if (!plug)
return false;
- *request_count = 0;
- if (q->mq_ops)
- plug_list = &plug->mq_list;
- else
- plug_list = &plug->list;
+ plug_list = &plug->mq_list;
list_for_each_entry_reverse(rq, plug_list, queuelist) {
bool merged = false;
- if (rq->q == q) {
- (*request_count)++;
+ if (rq->q == q && same_queue_rq) {
/*
* Only blk-mq multiple hardware queues case checks the
* rq in the same queue, there should be only one such
* rq in a queue
**/
- if (same_queue_rq)
- *same_queue_rq = rq;
+ *same_queue_rq = rq;
}
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
@@ -1938,176 +728,18 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
return false;
}
-unsigned int blk_plug_queued_count(struct request_queue *q)
-{
- struct blk_plug *plug;
- struct request *rq;
- struct list_head *plug_list;
- unsigned int ret = 0;
-
- plug = current->plug;
- if (!plug)
- goto out;
-
- if (q->mq_ops)
- plug_list = &plug->mq_list;
- else
- plug_list = &plug->list;
-
- list_for_each_entry(rq, plug_list, queuelist) {
- if (rq->q == q)
- ret++;
- }
-out:
- return ret;
-}
-
void blk_init_request_from_bio(struct request *req, struct bio *bio)
{
- struct io_context *ioc = rq_ioc(bio);
-
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;
req->__sector = bio->bi_iter.bi_sector;
- if (ioprio_valid(bio_prio(bio)))
- req->ioprio = bio_prio(bio);
- else if (ioc)
- req->ioprio = ioc->ioprio;
- else
- req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ req->ioprio = bio_prio(bio);
req->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(req->q, req, bio);
}
EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
-static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
-{
- struct blk_plug *plug;
- int where = ELEVATOR_INSERT_SORT;
- struct request *req, *free;
- unsigned int request_count = 0;
-
- /*
- * low level driver can indicate that it wants pages above a
- * certain limit bounced to low memory (ie for highmem, or even
- * ISA dma in theory)
- */
- blk_queue_bounce(q, &bio);
-
- blk_queue_split(q, &bio);
-
- if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
-
- if (op_is_flush(bio->bi_opf)) {
- spin_lock_irq(q->queue_lock);
- where = ELEVATOR_INSERT_FLUSH;
- goto get_rq;
- }
-
- /*
- * Check if we can merge with the plugged list before grabbing
- * any locks.
- */
- if (!blk_queue_nomerges(q)) {
- if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
- return BLK_QC_T_NONE;
- } else
- request_count = blk_plug_queued_count(q);
-
- spin_lock_irq(q->queue_lock);
-
- switch (elv_merge(q, &req, bio)) {
- case ELEVATOR_BACK_MERGE:
- if (!bio_attempt_back_merge(q, req, bio))
- break;
- elv_bio_merged(q, req, bio);
- free = attempt_back_merge(q, req);
- if (free)
- __blk_put_request(q, free);
- else
- elv_merged_request(q, req, ELEVATOR_BACK_MERGE);
- goto out_unlock;
- case ELEVATOR_FRONT_MERGE:
- if (!bio_attempt_front_merge(q, req, bio))
- break;
- elv_bio_merged(q, req, bio);
- free = attempt_front_merge(q, req);
- if (free)
- __blk_put_request(q, free);
- else
- elv_merged_request(q, req, ELEVATOR_FRONT_MERGE);
- goto out_unlock;
- default:
- break;
- }
-
-get_rq:
- rq_qos_throttle(q, bio, q->queue_lock);
-
- /*
- * Grab a free request. This is might sleep but can not fail.
- * Returns with the queue unlocked.
- */
- blk_queue_enter_live(q);
- req = get_request(q, bio->bi_opf, bio, 0, GFP_NOIO);
- if (IS_ERR(req)) {
- blk_queue_exit(q);
- rq_qos_cleanup(q, bio);
- if (PTR_ERR(req) == -ENOMEM)
- bio->bi_status = BLK_STS_RESOURCE;
- else
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
- goto out_unlock;
- }
-
- rq_qos_track(q, req, bio);
-
- /*
- * After dropping the lock and possibly sleeping here, our request
- * may now be mergeable after it had proven unmergeable (above).
- * We don't worry about that case for efficiency. It won't happen
- * often, and the elevators are able to handle it.
- */
- blk_init_request_from_bio(req, bio);
-
- if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
- req->cpu = raw_smp_processor_id();
-
- plug = current->plug;
- if (plug) {
- /*
- * If this is the first request added after a plug, fire
- * of a plug trace.
- *
- * @request_count may become stale because of schedule
- * out, so check plug list again.
- */
- if (!request_count || list_empty(&plug->list))
- trace_block_plug(q);
- else {
- struct request *last = list_entry_rq(plug->list.prev);
- if (request_count >= BLK_MAX_REQUEST_COUNT ||
- blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
- blk_flush_plug_list(plug, false);
- trace_block_plug(q);
- }
- }
- list_add_tail(&req->queuelist, &plug->list);
- blk_account_io_start(req, true);
- } else {
- spin_lock_irq(q->queue_lock);
- add_acct_request(q, req, where);
- __blk_run_queue(q);
-out_unlock:
- spin_unlock_irq(q->queue_lock);
- }
-
- return BLK_QC_T_NONE;
-}
-
static void handle_bad_sector(struct bio *bio, sector_t maxsector)
{
char b[BDEVNAME_SIZE];
@@ -2259,7 +891,7 @@ generic_make_request_checks(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
*/
- if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
+ if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
goto not_supported;
if (should_fail_bio(bio))
@@ -2289,6 +921,9 @@ generic_make_request_checks(struct bio *bio)
}
}
+ if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ bio->bi_opf &= ~REQ_HIPRI;
+
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
if (!blk_queue_discard(q))
@@ -2561,17 +1196,6 @@ blk_qc_t submit_bio(struct bio *bio)
}
EXPORT_SYMBOL(submit_bio);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie)
-{
- if (!q->poll_fn || !blk_qc_t_valid(cookie))
- return false;
-
- if (current->plug)
- blk_flush_plug_list(current->plug, false);
- return q->poll_fn(q, cookie);
-}
-EXPORT_SYMBOL_GPL(blk_poll);
-
/**
* blk_cloned_rq_check_limits - Helper function to check a cloned request
* for new the queue limits
@@ -2619,8 +1243,7 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
*/
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
{
- unsigned long flags;
- int where = ELEVATOR_INSERT_BACK;
+ blk_qc_t unused;
if (blk_cloned_rq_check_limits(q, rq))
return BLK_STS_IOERR;
@@ -2629,38 +1252,15 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
return BLK_STS_IOERR;
- if (q->mq_ops) {
- if (blk_queue_io_stat(q))
- blk_account_io_start(rq, true);
- /*
- * Since we have a scheduler attached on the top device,
- * bypass a potential scheduler on the bottom device for
- * insert.
- */
- return blk_mq_request_issue_directly(rq);
- }
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (unlikely(blk_queue_dying(q))) {
- spin_unlock_irqrestore(q->queue_lock, flags);
- return BLK_STS_IOERR;
- }
+ if (blk_queue_io_stat(q))
+ blk_account_io_start(rq, true);
/*
- * Submitting request must be dequeued before calling this function
- * because it will be linked to another request_queue
+ * Since we have a scheduler attached on the top device,
+ * bypass a potential scheduler on the bottom device for
+ * insert.
*/
- BUG_ON(blk_queued_rq(rq));
-
- if (op_is_flush(rq->cmd_flags))
- where = ELEVATOR_INSERT_FLUSH;
-
- add_acct_request(q, rq, where);
- if (where == ELEVATOR_INSERT_FLUSH)
- __blk_run_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return BLK_STS_OK;
+ return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
@@ -2710,11 +1310,10 @@ void blk_account_io_completion(struct request *req, unsigned int bytes)
if (blk_do_io_stat(req)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_stat_add(cpu, part, sectors[sgrp], bytes >> 9);
+ part_stat_add(part, sectors[sgrp], bytes >> 9);
part_stat_unlock();
}
}
@@ -2729,14 +1328,14 @@ void blk_account_io_done(struct request *req, u64 now)
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
- part_round_stats(req->q, cpu, part);
+ update_io_ticks(part, jiffies);
+ part_stat_inc(part, ios[sgrp]);
+ part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
+ part_stat_add(part, time_in_queue, nsecs_to_jiffies64(now - req->start_time_ns));
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
@@ -2748,16 +1347,15 @@ void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
int rw = rq_data_dir(rq);
- int cpu;
if (!blk_do_io_stat(rq))
return;
- cpu = part_stat_lock();
+ part_stat_lock();
if (!new_io) {
part = rq->part;
- part_stat_inc(cpu, part, merges[rw]);
+ part_stat_inc(part, merges[rw]);
} else {
part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
if (!hd_struct_try_get(part)) {
@@ -2772,232 +1370,14 @@ void blk_account_io_start(struct request *rq, bool new_io)
part = &rq->rq_disk->part0;
hd_struct_get(part);
}
- part_round_stats(rq->q, cpu, part);
part_inc_in_flight(rq->q, part, rw);
rq->part = part;
}
- part_stat_unlock();
-}
-
-static struct request *elv_next_request(struct request_queue *q)
-{
- struct request *rq;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
-
- WARN_ON_ONCE(q->mq_ops);
-
- while (1) {
- list_for_each_entry(rq, &q->queue_head, queuelist) {
-#ifdef CONFIG_PM
- /*
- * If a request gets queued in state RPM_SUSPENDED
- * then that's a kernel bug.
- */
- WARN_ON_ONCE(q->rpm_status == RPM_SUSPENDED);
-#endif
- return rq;
- }
-
- /*
- * Flush request is running and flush request isn't queueable
- * in the drive, we can hold the queue till flush request is
- * finished. Even we don't do this, driver can't dispatch next
- * requests and will requeue them. And this can improve
- * throughput too. For example, we have request flush1, write1,
- * flush 2. flush1 is dispatched, then queue is hold, write1
- * isn't inserted to queue. After flush1 is finished, flush2
- * will be dispatched. Since disk cache is already clean,
- * flush2 will be finished very soon, so looks like flush2 is
- * folded to flush1.
- * Since the queue is hold, a flag is set to indicate the queue
- * should be restarted later. Please see flush_end_io() for
- * details.
- */
- if (fq->flush_pending_idx != fq->flush_running_idx &&
- !queue_flush_queueable(q)) {
- fq->flush_queue_delayed = 1;
- return NULL;
- }
- if (unlikely(blk_queue_bypass(q)) ||
- !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
- return NULL;
- }
-}
-
-/**
- * blk_peek_request - peek at the top of a request queue
- * @q: request queue to peek at
- *
- * Description:
- * Return the request at the top of @q. The returned request
- * should be started using blk_start_request() before LLD starts
- * processing it.
- *
- * Return:
- * Pointer to the request at the top of @q if available. Null
- * otherwise.
- */
-struct request *blk_peek_request(struct request_queue *q)
-{
- struct request *rq;
- int ret;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- while ((rq = elv_next_request(q)) != NULL) {
- if (!(rq->rq_flags & RQF_STARTED)) {
- /*
- * This is the first time the device driver
- * sees this request (possibly after
- * requeueing). Notify IO scheduler.
- */
- if (rq->rq_flags & RQF_SORTED)
- elv_activate_rq(q, rq);
-
- /*
- * just mark as started even if we don't start
- * it, a request that has been delayed should
- * not be passed by new incoming requests
- */
- rq->rq_flags |= RQF_STARTED;
- trace_block_rq_issue(q, rq);
- }
-
- if (!q->boundary_rq || q->boundary_rq == rq) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = NULL;
- }
-
- if (rq->rq_flags & RQF_DONTPREP)
- break;
-
- if (q->dma_drain_size && blk_rq_bytes(rq)) {
- /*
- * make sure space for the drain appears we
- * know we can do this because max_hw_segments
- * has been adjusted to be one fewer than the
- * device can handle
- */
- rq->nr_phys_segments++;
- }
-
- if (!q->prep_rq_fn)
- break;
-
- ret = q->prep_rq_fn(q, rq);
- if (ret == BLKPREP_OK) {
- break;
- } else if (ret == BLKPREP_DEFER) {
- /*
- * the request may have been (partially) prepped.
- * we need to keep this request in the front to
- * avoid resource deadlock. RQF_STARTED will
- * prevent other fs requests from passing this one.
- */
- if (q->dma_drain_size && blk_rq_bytes(rq) &&
- !(rq->rq_flags & RQF_DONTPREP)) {
- /*
- * remove the space for the drain we added
- * so that we don't add it again
- */
- --rq->nr_phys_segments;
- }
-
- rq = NULL;
- break;
- } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
- rq->rq_flags |= RQF_QUIET;
- /*
- * Mark this request as started so we don't trigger
- * any debug logic in the end I/O path.
- */
- blk_start_request(rq);
- __blk_end_request_all(rq, ret == BLKPREP_INVALID ?
- BLK_STS_TARGET : BLK_STS_IOERR);
- } else {
- printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
- break;
- }
- }
-
- return rq;
-}
-EXPORT_SYMBOL(blk_peek_request);
-
-static void blk_dequeue_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
+ update_io_ticks(part, jiffies);
- BUG_ON(list_empty(&rq->queuelist));
- BUG_ON(ELV_ON_HASH(rq));
-
- list_del_init(&rq->queuelist);
-
- /*
- * the time frame between a request being removed from the lists
- * and to it is freed is accounted as io that is in progress at
- * the driver side.
- */
- if (blk_account_rq(rq))
- q->in_flight[rq_is_sync(rq)]++;
-}
-
-/**
- * blk_start_request - start request processing on the driver
- * @req: request to dequeue
- *
- * Description:
- * Dequeue @req and start timeout timer on it. This hands off the
- * request to the driver.
- */
-void blk_start_request(struct request *req)
-{
- lockdep_assert_held(req->q->queue_lock);
- WARN_ON_ONCE(req->q->mq_ops);
-
- blk_dequeue_request(req);
-
- if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
- req->io_start_time_ns = ktime_get_ns();
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- req->throtl_size = blk_rq_sectors(req);
-#endif
- req->rq_flags |= RQF_STATS;
- rq_qos_issue(req->q, req);
- }
-
- BUG_ON(blk_rq_is_complete(req));
- blk_add_timer(req);
-}
-EXPORT_SYMBOL(blk_start_request);
-
-/**
- * blk_fetch_request - fetch a request from a request queue
- * @q: request queue to fetch a request from
- *
- * Description:
- * Return the request at the top of @q. The request is started on
- * return and LLD can start processing it immediately.
- *
- * Return:
- * Pointer to the request at the top of @q if available. Null
- * otherwise.
- */
-struct request *blk_fetch_request(struct request_queue *q)
-{
- struct request *rq;
-
- lockdep_assert_held(q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- rq = blk_peek_request(q);
- if (rq)
- blk_start_request(rq);
- return rq;
+ part_stat_unlock();
}
-EXPORT_SYMBOL(blk_fetch_request);
/*
* Steal bios from a request and add them to a bio list.
@@ -3124,255 +1504,6 @@ bool blk_update_request(struct request *req, blk_status_t error,
}
EXPORT_SYMBOL_GPL(blk_update_request);
-static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes,
- unsigned int bidi_bytes)
-{
- if (blk_update_request(rq, error, nr_bytes))
- return true;
-
- /* Bidi request must be completed as a whole */
- if (unlikely(blk_bidi_rq(rq)) &&
- blk_update_request(rq->next_rq, error, bidi_bytes))
- return true;
-
- if (blk_queue_add_random(rq->q))
- add_disk_randomness(rq->rq_disk);
-
- return false;
-}
-
-/**
- * blk_unprep_request - unprepare a request
- * @req: the request
- *
- * This function makes a request ready for complete resubmission (or
- * completion). It happens only after all error handling is complete,
- * so represents the appropriate moment to deallocate any resources
- * that were allocated to the request in the prep_rq_fn. The queue
- * lock is held when calling this.
- */
-void blk_unprep_request(struct request *req)
-{
- struct request_queue *q = req->q;
-
- req->rq_flags &= ~RQF_DONTPREP;
- if (q->unprep_rq_fn)
- q->unprep_rq_fn(q, req);
-}
-EXPORT_SYMBOL_GPL(blk_unprep_request);
-
-void blk_finish_request(struct request *req, blk_status_t error)
-{
- struct request_queue *q = req->q;
- u64 now = ktime_get_ns();
-
- lockdep_assert_held(req->q->queue_lock);
- WARN_ON_ONCE(q->mq_ops);
-
- if (req->rq_flags & RQF_STATS)
- blk_stat_add(req, now);
-
- if (req->rq_flags & RQF_QUEUED)
- blk_queue_end_tag(q, req);
-
- BUG_ON(blk_queued_rq(req));
-
- if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
- laptop_io_completion(req->q->backing_dev_info);
-
- blk_delete_timer(req);
-
- if (req->rq_flags & RQF_DONTPREP)
- blk_unprep_request(req);
-
- blk_account_io_done(req, now);
-
- if (req->end_io) {
- rq_qos_done(q, req);
- req->end_io(req, error);
- } else {
- if (blk_bidi_rq(req))
- __blk_put_request(req->next_rq->q, req->next_rq);
-
- __blk_put_request(q, req);
- }
-}
-EXPORT_SYMBOL(blk_finish_request);
-
-/**
- * blk_end_bidi_request - Complete a bidi request
- * @rq: the request to complete
- * @error: block status code
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
- *
- * Description:
- * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
- * Drivers that supports bidi can safely call this member for any
- * type of request, bidi or uni. In the later case @bidi_bytes is
- * just ignored.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes, unsigned int bidi_bytes)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- WARN_ON_ONCE(q->mq_ops);
-
- if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
- return true;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_finish_request(rq, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return false;
-}
-
-/**
- * __blk_end_bidi_request - Complete a bidi request with queue lock held
- * @rq: the request to complete
- * @error: block status code
- * @nr_bytes: number of bytes to complete @rq
- * @bidi_bytes: number of bytes to complete @rq->next_rq
- *
- * Description:
- * Identical to blk_end_bidi_request() except that queue lock is
- * assumed to be locked on entry and remains so on return.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes, unsigned int bidi_bytes)
-{
- lockdep_assert_held(rq->q->queue_lock);
- WARN_ON_ONCE(rq->q->mq_ops);
-
- if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
- return true;
-
- blk_finish_request(rq, error);
-
- return false;
-}
-
-/**
- * blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: block status code
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Ends I/O on a number of bytes attached to @rq.
- * If @rq has leftover, sets it up for the next range of segments.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes)
-{
- WARN_ON_ONCE(rq->q->mq_ops);
- return blk_end_bidi_request(rq, error, nr_bytes, 0);
-}
-EXPORT_SYMBOL(blk_end_request);
-
-/**
- * blk_end_request_all - Helper function for drives to finish the request.
- * @rq: the request to finish
- * @error: block status code
- *
- * Description:
- * Completely finish @rq.
- */
-void blk_end_request_all(struct request *rq, blk_status_t error)
-{
- bool pending;
- unsigned int bidi_bytes = 0;
-
- if (unlikely(blk_bidi_rq(rq)))
- bidi_bytes = blk_rq_bytes(rq->next_rq);
-
- pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
- BUG_ON(pending);
-}
-EXPORT_SYMBOL(blk_end_request_all);
-
-/**
- * __blk_end_request - Helper function for drivers to complete the request.
- * @rq: the request being processed
- * @error: block status code
- * @nr_bytes: number of bytes to complete
- *
- * Description:
- * Must be called with queue lock held unlike blk_end_request().
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- **/
-bool __blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes)
-{
- lockdep_assert_held(rq->q->queue_lock);
- WARN_ON_ONCE(rq->q->mq_ops);
-
- return __blk_end_bidi_request(rq, error, nr_bytes, 0);
-}
-EXPORT_SYMBOL(__blk_end_request);
-
-/**
- * __blk_end_request_all - Helper function for drives to finish the request.
- * @rq: the request to finish
- * @error: block status code
- *
- * Description:
- * Completely finish @rq. Must be called with queue lock held.
- */
-void __blk_end_request_all(struct request *rq, blk_status_t error)
-{
- bool pending;
- unsigned int bidi_bytes = 0;
-
- lockdep_assert_held(rq->q->queue_lock);
- WARN_ON_ONCE(rq->q->mq_ops);
-
- if (unlikely(blk_bidi_rq(rq)))
- bidi_bytes = blk_rq_bytes(rq->next_rq);
-
- pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
- BUG_ON(pending);
-}
-EXPORT_SYMBOL(__blk_end_request_all);
-
-/**
- * __blk_end_request_cur - Helper function to finish the current request chunk.
- * @rq: the request to finish the current chunk for
- * @error: block status code
- *
- * Description:
- * Complete the current consecutively mapped chunk from @rq. Must
- * be called with queue lock held.
- *
- * Return:
- * %false - we are done with this request
- * %true - still buffers pending for this request
- */
-bool __blk_end_request_cur(struct request *rq, blk_status_t error)
-{
- return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
-}
-EXPORT_SYMBOL(__blk_end_request_cur);
-
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio)
{
@@ -3428,8 +1559,8 @@ EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
*/
int blk_lld_busy(struct request_queue *q)
{
- if (q->lld_busy_fn)
- return q->lld_busy_fn(q);
+ if (queue_is_mq(q) && q->mq_ops->busy)
+ return q->mq_ops->busy(q);
return 0;
}
@@ -3460,7 +1591,6 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
*/
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
- dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
@@ -3572,9 +1702,11 @@ void blk_start_plug(struct blk_plug *plug)
if (tsk->plug)
return;
- INIT_LIST_HEAD(&plug->list);
INIT_LIST_HEAD(&plug->mq_list);
INIT_LIST_HEAD(&plug->cb_list);
+ plug->rq_count = 0;
+ plug->multiple_queues = false;
+
/*
* Store ordering should not be needed here, since a potential
* preempt will imply a full memory barrier
@@ -3583,36 +1715,6 @@ void blk_start_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_start_plug);
-static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
-{
- struct request *rqa = container_of(a, struct request, queuelist);
- struct request *rqb = container_of(b, struct request, queuelist);
-
- return !(rqa->q < rqb->q ||
- (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
-}
-
-/*
- * If 'from_schedule' is true, then postpone the dispatch of requests
- * until a safe kblockd context. We due this to avoid accidental big
- * additional stack usage in driver dispatch, in places where the originally
- * plugger did not intend it.
- */
-static void queue_unplugged(struct request_queue *q, unsigned int depth,
- bool from_schedule)
- __releases(q->queue_lock)
-{
- lockdep_assert_held(q->queue_lock);
-
- trace_block_unplug(q, depth, !from_schedule);
-
- if (from_schedule)
- blk_run_queue_async(q);
- else
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
-}
-
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
LIST_HEAD(callbacks);
@@ -3657,65 +1759,10 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
- struct request_queue *q;
- struct request *rq;
- LIST_HEAD(list);
- unsigned int depth;
-
flush_plug_callbacks(plug, from_schedule);
if (!list_empty(&plug->mq_list))
blk_mq_flush_plug_list(plug, from_schedule);
-
- if (list_empty(&plug->list))
- return;
-
- list_splice_init(&plug->list, &list);
-
- list_sort(NULL, &list, plug_rq_cmp);
-
- q = NULL;
- depth = 0;
-
- while (!list_empty(&list)) {
- rq = list_entry_rq(list.next);
- list_del_init(&rq->queuelist);
- BUG_ON(!rq->q);
- if (rq->q != q) {
- /*
- * This drops the queue lock
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
- q = rq->q;
- depth = 0;
- spin_lock_irq(q->queue_lock);
- }
-
- /*
- * Short-circuit if @q is dead
- */
- if (unlikely(blk_queue_dying(q))) {
- __blk_end_request_all(rq, BLK_STS_IOERR);
- continue;
- }
-
- /*
- * rq is already accounted, so use raw insert
- */
- if (op_is_flush(rq->cmd_flags))
- __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
- else
- __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
-
- depth++;
- }
-
- /*
- * This drops the queue lock
- */
- if (q)
- queue_unplugged(q, depth, from_schedule);
}
void blk_finish_plug(struct blk_plug *plug)
@@ -3742,9 +1789,6 @@ int __init blk_dev_init(void)
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
- request_cachep = kmem_cache_create("blkdev_requests",
- sizeof(struct request), 0, SLAB_PANIC, NULL);
-
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index f7b292f12449..a34b7d918742 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -48,8 +48,6 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
rq_end_io_fn *done)
{
- int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
-
WARN_ON(irqs_disabled());
WARN_ON(!blk_rq_is_passthrough(rq));
@@ -60,23 +58,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* don't check dying flag for MQ because the request won't
* be reused after dying flag is set
*/
- if (q->mq_ops) {
- blk_mq_sched_insert_request(rq, at_head, true, false);
- return;
- }
-
- spin_lock_irq(q->queue_lock);
-
- if (unlikely(blk_queue_dying(q))) {
- rq->rq_flags |= RQF_QUIET;
- __blk_end_request_all(rq, BLK_STS_IOERR);
- spin_unlock_irq(q->queue_lock);
- return;
- }
-
- __elv_add_request(q, rq, where);
- __blk_run_queue(q);
- spin_unlock_irq(q->queue_lock);
+ blk_mq_sched_insert_request(rq, at_head, true, false);
}
EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 8b44b86779da..a3fc7191c694 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -93,7 +93,7 @@ enum {
FLUSH_PENDING_TIMEOUT = 5 * HZ,
};
-static bool blk_kick_flush(struct request_queue *q,
+static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, unsigned int flags);
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
@@ -132,18 +132,9 @@ static void blk_flush_restore_request(struct request *rq)
rq->end_io = rq->flush.saved_end_io;
}
-static bool blk_flush_queue_rq(struct request *rq, bool add_front)
+static void blk_flush_queue_rq(struct request *rq, bool add_front)
{
- if (rq->q->mq_ops) {
- blk_mq_add_to_requeue_list(rq, add_front, true);
- return false;
- } else {
- if (add_front)
- list_add(&rq->queuelist, &rq->q->queue_head);
- else
- list_add_tail(&rq->queuelist, &rq->q->queue_head);
- return true;
- }
+ blk_mq_add_to_requeue_list(rq, add_front, true);
}
/**
@@ -157,18 +148,17 @@ static bool blk_flush_queue_rq(struct request *rq, bool add_front)
* completion and trigger the next step.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
+ * spin_lock_irq(fq->mq_flush_lock)
*
* RETURNS:
* %true if requests were added to the dispatch queue, %false otherwise.
*/
-static bool blk_flush_complete_seq(struct request *rq,
+static void blk_flush_complete_seq(struct request *rq,
struct blk_flush_queue *fq,
unsigned int seq, blk_status_t error)
{
struct request_queue *q = rq->q;
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
- bool queued = false, kicked;
unsigned int cmd_flags;
BUG_ON(rq->flush.seq & seq);
@@ -191,7 +181,7 @@ static bool blk_flush_complete_seq(struct request *rq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &fq->flush_data_in_flight);
- queued = blk_flush_queue_rq(rq, true);
+ blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -204,42 +194,34 @@ static bool blk_flush_complete_seq(struct request *rq,
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list);
blk_flush_restore_request(rq);
- if (q->mq_ops)
- blk_mq_end_request(rq, error);
- else
- __blk_end_request_all(rq, error);
+ blk_mq_end_request(rq, error);
break;
default:
BUG();
}
- kicked = blk_kick_flush(q, fq, cmd_flags);
- return kicked | queued;
+ blk_kick_flush(q, fq, cmd_flags);
}
static void flush_end_io(struct request *flush_rq, blk_status_t error)
{
struct request_queue *q = flush_rq->q;
struct list_head *running;
- bool queued = false;
struct request *rq, *n;
unsigned long flags = 0;
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
+ struct blk_mq_hw_ctx *hctx;
- if (q->mq_ops) {
- struct blk_mq_hw_ctx *hctx;
-
- /* release the tag's ownership to the req cloned from */
- spin_lock_irqsave(&fq->mq_flush_lock, flags);
- hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
- if (!q->elevator) {
- blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
- flush_rq->tag = -1;
- } else {
- blk_mq_put_driver_tag_hctx(hctx, flush_rq);
- flush_rq->internal_tag = -1;
- }
+ /* release the tag's ownership to the req cloned from */
+ spin_lock_irqsave(&fq->mq_flush_lock, flags);
+ hctx = flush_rq->mq_hctx;
+ if (!q->elevator) {
+ blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
+ flush_rq->tag = -1;
+ } else {
+ blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+ flush_rq->internal_tag = -1;
}
running = &fq->flush_queue[fq->flush_running_idx];
@@ -248,35 +230,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* account completion of the flush request */
fq->flush_running_idx ^= 1;
- if (!q->mq_ops)
- elv_completed_request(q, flush_rq);
-
/* and push the waiting requests to the next stage */
list_for_each_entry_safe(rq, n, running, flush.list) {
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
- queued |= blk_flush_complete_seq(rq, fq, seq, error);
+ blk_flush_complete_seq(rq, fq, seq, error);
}
- /*
- * Kick the queue to avoid stall for two cases:
- * 1. Moving a request silently to empty queue_head may stall the
- * queue.
- * 2. When flush request is running in non-queueable queue, the
- * queue is hold. Restart the queue after flush request is finished
- * to avoid stall.
- * This function is called from request completion path and calling
- * directly into request_fn may confuse the driver. Always use
- * kblockd.
- */
- if (queued || fq->flush_queue_delayed) {
- WARN_ON(q->mq_ops);
- blk_run_queue_async(q);
- }
fq->flush_queue_delayed = 0;
- if (q->mq_ops)
- spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
+ spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
}
/**
@@ -289,12 +252,10 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
* Please read the comment at the top of this file for more info.
*
* CONTEXT:
- * spin_lock_irq(q->queue_lock or fq->mq_flush_lock)
+ * spin_lock_irq(fq->mq_flush_lock)
*
- * RETURNS:
- * %true if flush was issued, %false otherwise.
*/
-static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
+static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
unsigned int flags)
{
struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx];
@@ -304,7 +265,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
/* C1 described at the top of this file */
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
- return false;
+ return;
/* C2 and C3
*
@@ -312,11 +273,10 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* assigned to empty flushes, and we deadlock if we are expecting
* other requests to make progress. Don't defer for that case.
*/
- if (!list_empty(&fq->flush_data_in_flight) &&
- !(q->mq_ops && q->elevator) &&
+ if (!list_empty(&fq->flush_data_in_flight) && q->elevator &&
time_before(jiffies,
fq->flush_pending_since + FLUSH_PENDING_TIMEOUT))
- return false;
+ return;
/*
* Issue flush and toggle pending_idx. This makes pending_idx
@@ -334,19 +294,15 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* In case of IO scheduler, flush rq need to borrow scheduler tag
* just for cheating put/get driver tag.
*/
- if (q->mq_ops) {
- struct blk_mq_hw_ctx *hctx;
-
- flush_rq->mq_ctx = first_rq->mq_ctx;
-
- if (!q->elevator) {
- fq->orig_rq = first_rq;
- flush_rq->tag = first_rq->tag;
- hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
- blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
- } else {
- flush_rq->internal_tag = first_rq->internal_tag;
- }
+ flush_rq->mq_ctx = first_rq->mq_ctx;
+ flush_rq->mq_hctx = first_rq->mq_hctx;
+
+ if (!q->elevator) {
+ fq->orig_rq = first_rq;
+ flush_rq->tag = first_rq->tag;
+ blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
+ } else {
+ flush_rq->internal_tag = first_rq->internal_tag;
}
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -355,62 +311,17 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
- return blk_flush_queue_rq(flush_rq, false);
-}
-
-static void flush_data_end_io(struct request *rq, blk_status_t error)
-{
- struct request_queue *q = rq->q;
- struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
-
- lockdep_assert_held(q->queue_lock);
-
- /*
- * Updating q->in_flight[] here for making this tag usable
- * early. Because in blk_queue_start_tag(),
- * q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
- * reserve tags for sync I/O.
- *
- * More importantly this way can avoid the following I/O
- * deadlock:
- *
- * - suppose there are 40 fua requests comming to flush queue
- * and queue depth is 31
- * - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
- * tag for async I/O any more
- * - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
- * and flush_data_end_io() is called
- * - the other rqs still can't go ahead if not updating
- * q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
- * are held in flush data queue and make no progress of
- * handling post flush rq
- * - only after the post flush rq is handled, all these rqs
- * can be completed
- */
-
- elv_completed_request(q, rq);
-
- /* for avoiding double accounting */
- rq->rq_flags &= ~RQF_STARTED;
-
- /*
- * After populating an empty queue, kick it to avoid stall. Read
- * the comment in flush_end_io().
- */
- if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
- blk_run_queue_async(q);
+ blk_flush_queue_rq(flush_rq, false);
}
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
{
struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct blk_mq_ctx *ctx = rq->mq_ctx;
unsigned long flags;
struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx);
- hctx = blk_mq_map_queue(q, ctx->cpu);
-
if (q->elevator) {
WARN_ON(rq->tag < 0);
blk_mq_put_driver_tag_hctx(hctx, rq);
@@ -443,9 +354,6 @@ void blk_insert_flush(struct request *rq)
unsigned int policy = blk_flush_policy(fflags, rq);
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
- if (!q->mq_ops)
- lockdep_assert_held(q->queue_lock);
-
/*
* @policy now records what operations need to be done. Adjust
* REQ_PREFLUSH and FUA for the driver.
@@ -468,10 +376,7 @@ void blk_insert_flush(struct request *rq)
* complete the request.
*/
if (!policy) {
- if (q->mq_ops)
- blk_mq_end_request(rq, 0);
- else
- __blk_end_request(rq, 0, 0);
+ blk_mq_end_request(rq, 0);
return;
}
@@ -484,10 +389,7 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- if (q->mq_ops)
- blk_mq_request_bypass_insert(rq, false);
- else
- list_add_tail(&rq->queuelist, &q->queue_head);
+ blk_mq_request_bypass_insert(rq, false);
return;
}
@@ -499,17 +401,12 @@ void blk_insert_flush(struct request *rq)
INIT_LIST_HEAD(&rq->flush.list);
rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
- if (q->mq_ops) {
- rq->end_io = mq_flush_data_end_io;
- spin_lock_irq(&fq->mq_flush_lock);
- blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
- spin_unlock_irq(&fq->mq_flush_lock);
- return;
- }
- rq->end_io = flush_data_end_io;
+ rq->end_io = mq_flush_data_end_io;
+ spin_lock_irq(&fq->mq_flush_lock);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
+ spin_unlock_irq(&fq->mq_flush_lock);
}
/**
@@ -575,8 +472,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
if (!fq)
goto fail;
- if (q->mq_ops)
- spin_lock_init(&fq->mq_flush_lock);
+ spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 01580f88fcb3..5ed59ac6ae58 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -28,7 +28,6 @@ void get_io_context(struct io_context *ioc)
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
atomic_long_inc(&ioc->refcount);
}
-EXPORT_SYMBOL(get_io_context);
static void icq_free_icq_rcu(struct rcu_head *head)
{
@@ -48,10 +47,8 @@ static void ioc_exit_icq(struct io_cq *icq)
if (icq->flags & ICQ_EXITED)
return;
- if (et->uses_mq && et->ops.mq.exit_icq)
- et->ops.mq.exit_icq(icq);
- else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
- et->ops.sq.elevator_exit_icq_fn(icq);
+ if (et->ops.exit_icq)
+ et->ops.exit_icq(icq);
icq->flags |= ICQ_EXITED;
}
@@ -113,9 +110,9 @@ static void ioc_release_fn(struct work_struct *work)
struct io_cq, ioc_node);
struct request_queue *q = icq->q;
- if (spin_trylock(q->queue_lock)) {
+ if (spin_trylock(&q->queue_lock)) {
ioc_destroy_icq(icq);
- spin_unlock(q->queue_lock);
+ spin_unlock(&q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
cpu_relax();
@@ -162,7 +159,6 @@ void put_io_context(struct io_context *ioc)
if (free_ioc)
kmem_cache_free(iocontext_cachep, ioc);
}
-EXPORT_SYMBOL(put_io_context);
/**
* put_io_context_active - put active reference on ioc
@@ -173,7 +169,6 @@ EXPORT_SYMBOL(put_io_context);
*/
void put_io_context_active(struct io_context *ioc)
{
- struct elevator_type *et;
unsigned long flags;
struct io_cq *icq;
@@ -187,25 +182,12 @@ void put_io_context_active(struct io_context *ioc)
* reverse double locking. Read comment in ioc_release_fn() for
* explanation on the nested locking annotation.
*/
-retry:
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
if (icq->flags & ICQ_EXITED)
continue;
- et = icq->q->elevator->type;
- if (et->uses_mq) {
- ioc_exit_icq(icq);
- } else {
- if (spin_trylock(icq->q->queue_lock)) {
- ioc_exit_icq(icq);
- spin_unlock(icq->q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
- cpu_relax();
- goto retry;
- }
- }
+ ioc_exit_icq(icq);
}
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -232,7 +214,7 @@ static void __ioc_clear_queue(struct list_head *icq_list)
while (!list_empty(icq_list)) {
struct io_cq *icq = list_entry(icq_list->next,
- struct io_cq, q_node);
+ struct io_cq, q_node);
struct io_context *ioc = icq->ioc;
spin_lock_irqsave(&ioc->lock, flags);
@@ -251,16 +233,11 @@ void ioc_clear_queue(struct request_queue *q)
{
LIST_HEAD(icq_list);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
list_splice_init(&q->icq_list, &icq_list);
+ spin_unlock_irq(&q->queue_lock);
- if (q->mq_ops) {
- spin_unlock_irq(q->queue_lock);
- __ioc_clear_queue(&icq_list);
- } else {
- __ioc_clear_queue(&icq_list);
- spin_unlock_irq(q->queue_lock);
- }
+ __ioc_clear_queue(&icq_list);
}
int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
@@ -336,7 +313,6 @@ struct io_context *get_task_io_context(struct task_struct *task,
return NULL;
}
-EXPORT_SYMBOL(get_task_io_context);
/**
* ioc_lookup_icq - lookup io_cq from ioc
@@ -350,7 +326,7 @@ struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
{
struct io_cq *icq;
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
/*
* icq's are indexed from @ioc using radix tree and hint pointer,
@@ -409,16 +385,14 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
INIT_HLIST_NODE(&icq->ioc_node);
/* lock both q and ioc and try to link @icq */
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
spin_lock(&ioc->lock);
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list);
- if (et->uses_mq && et->ops.mq.init_icq)
- et->ops.mq.init_icq(icq);
- else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
- et->ops.sq.elevator_init_icq_fn(icq);
+ if (et->ops.init_icq)
+ et->ops.init_icq(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q);
@@ -427,7 +401,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
}
spin_unlock(&ioc->lock);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
radix_tree_preload_end();
return icq;
}
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 38c35c32aff2..fc714ef402a6 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -262,29 +262,25 @@ static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
stat->rqs.mean);
}
-static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
- wait_queue_entry_t *wait,
- bool first_block)
+static void iolat_cleanup_cb(struct rq_wait *rqw, void *private_data)
{
- struct rq_wait *rqw = &iolat->rq_wait;
+ atomic_dec(&rqw->inflight);
+ wake_up(&rqw->wait);
+}
- if (first_block && waitqueue_active(&rqw->wait) &&
- rqw->wait.head.next != &wait->entry)
- return false;
+static bool iolat_acquire_inflight(struct rq_wait *rqw, void *private_data)
+{
+ struct iolatency_grp *iolat = private_data;
return rq_wait_inc_below(rqw, iolat->rq_depth.max_depth);
}
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
struct iolatency_grp *iolat,
- spinlock_t *lock, bool issue_as_root,
+ bool issue_as_root,
bool use_memdelay)
- __releases(lock)
- __acquires(lock)
{
struct rq_wait *rqw = &iolat->rq_wait;
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
- DEFINE_WAIT(wait);
- bool first_block = true;
if (use_delay)
blkcg_schedule_throttle(rqos->q, use_memdelay);
@@ -301,27 +297,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
return;
}
- if (iolatency_may_queue(iolat, &wait, first_block))
- return;
-
- do {
- prepare_to_wait_exclusive(&rqw->wait, &wait,
- TASK_UNINTERRUPTIBLE);
-
- if (iolatency_may_queue(iolat, &wait, first_block))
- break;
- first_block = false;
-
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else {
- io_schedule();
- }
- } while (1);
-
- finish_wait(&rqw->wait, &wait);
+ rq_qos_wait(rqw, iolat, iolat_acquire_inflight, iolat_cleanup_cb);
}
#define SCALE_DOWN_FACTOR 2
@@ -478,38 +454,15 @@ static void check_scale_change(struct iolatency_grp *iolat)
scale_change(iolat, direction > 0);
}
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
- spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- struct request_queue *q = rqos->q;
+ struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
return;
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
- bio_associate_blkcg(bio, &blkcg->css);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
- }
- if (!blkg)
- goto out;
-
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
- bio_associate_blkg(bio, blkg);
-out:
- rcu_read_unlock();
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -518,7 +471,7 @@ out:
}
check_scale_change(iolat);
- __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+ __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
blkg = blkg->parent;
}
@@ -640,7 +593,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
bool enabled = false;
blkg = bio->bi_blkg;
- if (!blkg)
+ if (!blkg || !bio_flagged(bio, BIO_TRACKED))
return;
iolat = blkg_to_lat(bio->bi_blkg);
@@ -730,7 +683,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
* We could be exiting, don't access the pd unless we have a
* ref on the blkg.
*/
- if (!blkg_try_get(blkg))
+ if (!blkg_tryget(blkg))
continue;
iolat = blkg_to_lat(blkg);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e7696c47489a..e7f1c6cf0167 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -389,7 +389,6 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
bio_set_flag(bio, BIO_SEG_VALID);
}
-EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
@@ -596,17 +595,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
return ll_new_hw_segment(q, req, bio);
}
-/*
- * blk-mq uses req->special to carry normal driver per-request payload, it
- * does not indicate a prepared command that we cannot merge with.
- */
-static bool req_no_special_merge(struct request *req)
-{
- struct request_queue *q = req->q;
-
- return !q->mq_ops && req->special;
-}
-
static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
struct request *next)
{
@@ -632,13 +620,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
unsigned int seg_size =
req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
- /*
- * First check if the either of the requests are re-queued
- * requests. Can't merge them if they are.
- */
- if (req_no_special_merge(req) || req_no_special_merge(next))
- return 0;
-
if (req_gap_back_merge(req, next->bio))
return 0;
@@ -703,12 +684,10 @@ static void blk_account_io_merge(struct request *req)
{
if (blk_do_io_stat(req)) {
struct hd_struct *part;
- int cpu;
- cpu = part_stat_lock();
+ part_stat_lock();
part = req->part;
- part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
hd_struct_put(part);
@@ -731,7 +710,8 @@ static inline bool blk_discard_mergable(struct request *req)
return false;
}
-enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
+static enum elv_merge blk_try_req_merge(struct request *req,
+ struct request *next)
{
if (blk_discard_mergable(req))
return ELEVATOR_DISCARD_MERGE;
@@ -748,9 +728,6 @@ enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
static struct request *attempt_merge(struct request_queue *q,
struct request *req, struct request *next)
{
- if (!q->mq_ops)
- lockdep_assert_held(q->queue_lock);
-
if (!rq_mergeable(req) || !rq_mergeable(next))
return NULL;
@@ -758,8 +735,7 @@ static struct request *attempt_merge(struct request_queue *q,
return NULL;
if (rq_data_dir(req) != rq_data_dir(next)
- || req->rq_disk != next->rq_disk
- || req_no_special_merge(next))
+ || req->rq_disk != next->rq_disk)
return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME &&
@@ -773,6 +749,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->write_hint != next->write_hint)
return NULL;
+ if (req->ioprio != next->ioprio)
+ return NULL;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -820,7 +799,7 @@ static struct request *attempt_merge(struct request_queue *q,
req->__data_len += blk_rq_bytes(next);
- if (req_op(req) != REQ_OP_DISCARD)
+ if (!blk_discard_mergable(req))
elv_merge_requests(q, req, next);
/*
@@ -828,10 +807,6 @@ static struct request *attempt_merge(struct request_queue *q,
*/
blk_account_io_merge(next);
- req->ioprio = ioprio_best(req->ioprio, next->ioprio);
- if (blk_rq_cpu_valid(next))
- req->cpu = next->cpu;
-
/*
* ownership of bio passed from next to req, return 'next' for
* the caller to free
@@ -863,16 +838,11 @@ struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
struct request *next)
{
- struct elevator_queue *e = q->elevator;
struct request *free;
- if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
- if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
- return 0;
-
free = attempt_merge(q, rq, next);
if (free) {
- __blk_put_request(q, free);
+ blk_put_request(free);
return 1;
}
@@ -891,8 +861,8 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_data_dir(bio) != rq_data_dir(rq))
return false;
- /* must be same device and not a special request */
- if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
+ /* must be same device */
+ if (rq->rq_disk != bio->bi_disk)
return false;
/* only merge integrity protected bio into ditto rq */
@@ -911,6 +881,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->write_hint != bio->bi_write_hint)
return false;
+ if (rq->ioprio != bio_prio(bio))
+ return false;
+
return true;
}
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 3eb169f15842..03a534820271 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -14,9 +14,10 @@
#include "blk.h"
#include "blk-mq.h"
-static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
+static int cpu_to_queue_index(struct blk_mq_queue_map *qmap,
+ unsigned int nr_queues, const int cpu)
{
- return cpu % nr_queues;
+ return qmap->queue_offset + (cpu % nr_queues);
}
static int get_first_sibling(unsigned int cpu)
@@ -30,10 +31,10 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}
-int blk_mq_map_queues(struct blk_mq_tag_set *set)
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
- unsigned int *map = set->mq_map;
- unsigned int nr_queues = set->nr_hw_queues;
+ unsigned int *map = qmap->mq_map;
+ unsigned int nr_queues = qmap->nr_queues;
unsigned int cpu, first_sibling;
for_each_possible_cpu(cpu) {
@@ -44,11 +45,11 @@ int blk_mq_map_queues(struct blk_mq_tag_set *set)
* performace optimizations.
*/
if (cpu < nr_queues) {
- map[cpu] = cpu_to_queue_index(nr_queues, cpu);
+ map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
} else {
first_sibling = get_first_sibling(cpu);
if (first_sibling == cpu)
- map[cpu] = cpu_to_queue_index(nr_queues, cpu);
+ map[cpu] = cpu_to_queue_index(qmap, nr_queues, cpu);
else
map[cpu] = map[first_sibling];
}
@@ -62,12 +63,12 @@ EXPORT_SYMBOL_GPL(blk_mq_map_queues);
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
-int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
+int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
int i;
for_each_possible_cpu(i) {
- if (index == mq_map[i])
+ if (index == qmap->mq_map[i])
return local_memory_node(cpu_to_node(i));
}
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 10b284a1f18d..90d68760af08 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -23,6 +23,7 @@
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
+#include "blk-rq-qos.h"
static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
{
@@ -112,10 +113,8 @@ static int queue_pm_only_show(void *data, struct seq_file *m)
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
- QUEUE_FLAG_NAME(QUEUED),
QUEUE_FLAG_NAME(STOPPED),
QUEUE_FLAG_NAME(DYING),
- QUEUE_FLAG_NAME(BYPASS),
QUEUE_FLAG_NAME(BIDI),
QUEUE_FLAG_NAME(NOMERGES),
QUEUE_FLAG_NAME(SAME_COMP),
@@ -318,7 +317,6 @@ static const char *const cmd_flag_name[] = {
static const char *const rqf_name[] = {
RQF_NAME(SORTED),
RQF_NAME(STARTED),
- RQF_NAME(QUEUED),
RQF_NAME(SOFTBARRIER),
RQF_NAME(FLUSH_SEQ),
RQF_NAME(MIXED_MERGE),
@@ -424,15 +422,18 @@ struct show_busy_params {
/*
* Note: the state of a request may change while this function is in progress,
- * e.g. due to a concurrent blk_mq_finish_request() call.
+ * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
+ * keep iterating requests.
*/
-static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
+static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{
const struct show_busy_params *params = data;
- if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx)
+ if (rq->mq_hctx == params->hctx)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
+
+ return true;
}
static int hctx_busy_show(void *data, struct seq_file *m)
@@ -446,6 +447,21 @@ static int hctx_busy_show(void *data, struct seq_file *m)
return 0;
}
+static const char *const hctx_types[] = {
+ [HCTX_TYPE_DEFAULT] = "default",
+ [HCTX_TYPE_READ] = "read",
+ [HCTX_TYPE_POLL] = "poll",
+};
+
+static int hctx_type_show(void *data, struct seq_file *m)
+{
+ struct blk_mq_hw_ctx *hctx = data;
+
+ BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
+ seq_printf(m, "%s\n", hctx_types[hctx->type]);
+ return 0;
+}
+
static int hctx_ctx_map_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
@@ -636,36 +652,43 @@ static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
return 0;
}
-static void *ctx_rq_list_start(struct seq_file *m, loff_t *pos)
- __acquires(&ctx->lock)
-{
- struct blk_mq_ctx *ctx = m->private;
-
- spin_lock(&ctx->lock);
- return seq_list_start(&ctx->rq_list, *pos);
-}
-
-static void *ctx_rq_list_next(struct seq_file *m, void *v, loff_t *pos)
-{
- struct blk_mq_ctx *ctx = m->private;
-
- return seq_list_next(v, &ctx->rq_list, pos);
+#define CTX_RQ_SEQ_OPS(name, type) \
+static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
+ __acquires(&ctx->lock) \
+{ \
+ struct blk_mq_ctx *ctx = m->private; \
+ \
+ spin_lock(&ctx->lock); \
+ return seq_list_start(&ctx->rq_lists[type], *pos); \
+} \
+ \
+static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
+ loff_t *pos) \
+{ \
+ struct blk_mq_ctx *ctx = m->private; \
+ \
+ return seq_list_next(v, &ctx->rq_lists[type], pos); \
+} \
+ \
+static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
+ __releases(&ctx->lock) \
+{ \
+ struct blk_mq_ctx *ctx = m->private; \
+ \
+ spin_unlock(&ctx->lock); \
+} \
+ \
+static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
+ .start = ctx_##name##_rq_list_start, \
+ .next = ctx_##name##_rq_list_next, \
+ .stop = ctx_##name##_rq_list_stop, \
+ .show = blk_mq_debugfs_rq_show, \
}
-static void ctx_rq_list_stop(struct seq_file *m, void *v)
- __releases(&ctx->lock)
-{
- struct blk_mq_ctx *ctx = m->private;
-
- spin_unlock(&ctx->lock);
-}
+CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
+CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
+CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
-static const struct seq_operations ctx_rq_list_seq_ops = {
- .start = ctx_rq_list_start,
- .next = ctx_rq_list_next,
- .stop = ctx_rq_list_stop,
- .show = blk_mq_debugfs_rq_show,
-};
static int ctx_dispatched_show(void *data, struct seq_file *m)
{
struct blk_mq_ctx *ctx = data;
@@ -798,11 +821,14 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
{"run", 0600, hctx_run_show, hctx_run_write},
{"active", 0400, hctx_active_show},
{"dispatch_busy", 0400, hctx_dispatch_busy_show},
+ {"type", 0400, hctx_type_show},
{},
};
static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
- {"rq_list", 0400, .seq_ops = &ctx_rq_list_seq_ops},
+ {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
+ {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
+ {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
{"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
{"merged", 0600, ctx_merged_show, ctx_merged_write},
{"completed", 0600, ctx_completed_show, ctx_completed_write},
@@ -856,6 +882,15 @@ int blk_mq_debugfs_register(struct request_queue *q)
goto err;
}
+ if (q->rq_qos) {
+ struct rq_qos *rqos = q->rq_qos;
+
+ while (rqos) {
+ blk_mq_debugfs_register_rqos(rqos);
+ rqos = rqos->next;
+ }
+ }
+
return 0;
err:
@@ -978,6 +1013,50 @@ void blk_mq_debugfs_unregister_sched(struct request_queue *q)
q->sched_debugfs_dir = NULL;
}
+void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
+{
+ debugfs_remove_recursive(rqos->debugfs_dir);
+ rqos->debugfs_dir = NULL;
+}
+
+int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+{
+ struct request_queue *q = rqos->q;
+ const char *dir_name = rq_qos_id_to_name(rqos->id);
+
+ if (!q->debugfs_dir)
+ return -ENOENT;
+
+ if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
+ return 0;
+
+ if (!q->rqos_debugfs_dir) {
+ q->rqos_debugfs_dir = debugfs_create_dir("rqos",
+ q->debugfs_dir);
+ if (!q->rqos_debugfs_dir)
+ return -ENOMEM;
+ }
+
+ rqos->debugfs_dir = debugfs_create_dir(dir_name,
+ rqos->q->rqos_debugfs_dir);
+ if (!rqos->debugfs_dir)
+ return -ENOMEM;
+
+ if (!debugfs_create_files(rqos->debugfs_dir, rqos,
+ rqos->ops->debugfs_attrs))
+ goto err;
+ return 0;
+ err:
+ blk_mq_debugfs_unregister_rqos(rqos);
+ return -ENOMEM;
+}
+
+void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
+{
+ debugfs_remove_recursive(q->rqos_debugfs_dir);
+ q->rqos_debugfs_dir = NULL;
+}
+
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
diff --git a/block/blk-mq-debugfs.h b/block/blk-mq-debugfs.h
index a9160be12be0..8c9012a578c1 100644
--- a/block/blk-mq-debugfs.h
+++ b/block/blk-mq-debugfs.h
@@ -31,6 +31,10 @@ void blk_mq_debugfs_unregister_sched(struct request_queue *q);
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
+
+int blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
+void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
#else
static inline int blk_mq_debugfs_register(struct request_queue *q)
{
@@ -78,6 +82,19 @@ static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
+
+static inline int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
+{
+ return 0;
+}
+
+static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
+{
+}
+
+static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
+{
+}
#endif
#ifdef CONFIG_BLK_DEBUG_FS_ZONED
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
index db644ec624f5..1dce18553984 100644
--- a/block/blk-mq-pci.c
+++ b/block/blk-mq-pci.c
@@ -31,26 +31,26 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue + offset);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return 0;
fallback:
- WARN_ON_ONCE(set->nr_hw_queues > 1);
- blk_mq_clear_mq_map(set);
+ WARN_ON_ONCE(qmap->nr_queues > 1);
+ blk_mq_clear_mq_map(qmap);
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-rdma.c b/block/blk-mq-rdma.c
index 996167f1de18..45030a81a1ed 100644
--- a/block/blk-mq-rdma.c
+++ b/block/blk-mq-rdma.c
@@ -29,24 +29,24 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping.
*/
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec)
{
const struct cpumask *mask;
unsigned int queue, cpu;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < map->nr_queues; queue++) {
mask = ib_get_vector_affinity(dev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ map->mq_map[cpu] = map->queue_offset + queue;
}
return 0;
fallback:
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(map);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 29bfe8017a2d..140933e4a7d1 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -31,15 +31,22 @@ void blk_mq_sched_free_hctx_data(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
-void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
+void blk_mq_sched_assign_ioc(struct request *rq)
{
struct request_queue *q = rq->q;
- struct io_context *ioc = rq_ioc(bio);
+ struct io_context *ioc;
struct io_cq *icq;
- spin_lock_irq(q->queue_lock);
+ /*
+ * May not have an IO context if it's a passthrough request
+ */
+ ioc = current->io_context;
+ if (!ioc)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!icq) {
icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
@@ -54,13 +61,14 @@ void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
*/
-static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
{
if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
return;
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
+EXPORT_SYMBOL_GPL(blk_mq_sched_mark_restart_hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
{
@@ -85,14 +93,13 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
do {
struct request *rq;
- if (e->type->ops.mq.has_work &&
- !e->type->ops.mq.has_work(hctx))
+ if (e->type->ops.has_work && !e->type->ops.has_work(hctx))
break;
if (!blk_mq_get_dispatch_budget(hctx))
break;
- rq = e->type->ops.mq.dispatch_request(hctx);
+ rq = e->type->ops.dispatch_request(hctx);
if (!rq) {
blk_mq_put_dispatch_budget(hctx);
break;
@@ -110,7 +117,7 @@ static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- unsigned idx = ctx->index_hw;
+ unsigned short idx = ctx->index_hw[hctx->type];
if (++idx == hctx->nr_ctx)
idx = 0;
@@ -163,7 +170,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
{
struct request_queue *q = hctx->queue;
struct elevator_queue *e = q->elevator;
- const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
+ const bool has_sched_dispatch = e && e->type->ops.dispatch_request;
LIST_HEAD(rq_list);
/* RCU or SRCU read lock is needed before checking quiesced flag */
@@ -295,11 +302,14 @@ EXPORT_SYMBOL_GPL(blk_mq_bio_list_merge);
* too much time checking for merges.
*/
static bool blk_mq_attempt_merge(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx, struct bio *bio)
{
+ enum hctx_type type = hctx->type;
+
lockdep_assert_held(&ctx->lock);
- if (blk_mq_bio_list_merge(q, &ctx->rq_list, bio)) {
+ if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio)) {
ctx->rq_merged++;
return true;
}
@@ -311,19 +321,21 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu);
bool ret = false;
+ enum hctx_type type;
- if (e && e->type->ops.mq.bio_merge) {
+ if (e && e->type->ops.bio_merge) {
blk_mq_put_ctx(ctx);
- return e->type->ops.mq.bio_merge(hctx, bio);
+ return e->type->ops.bio_merge(hctx, bio);
}
+ type = hctx->type;
if ((hctx->flags & BLK_MQ_F_SHOULD_MERGE) &&
- !list_empty_careful(&ctx->rq_list)) {
+ !list_empty_careful(&ctx->rq_lists[type])) {
/* default per sw-queue merge */
spin_lock(&ctx->lock);
- ret = blk_mq_attempt_merge(q, ctx, bio);
+ ret = blk_mq_attempt_merge(q, hctx, ctx, bio);
spin_unlock(&ctx->lock);
}
@@ -367,7 +379,7 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
/* flush rq in flush machinery need to be dispatched directly */
if (!(rq->rq_flags & RQF_FLUSH_SEQ) && op_is_flush(rq->cmd_flags)) {
@@ -380,11 +392,11 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
goto run;
- if (e && e->type->ops.mq.insert_requests) {
+ if (e && e->type->ops.insert_requests) {
LIST_HEAD(list);
list_add(&rq->queuelist, &list);
- e->type->ops.mq.insert_requests(hctx, &list, at_head);
+ e->type->ops.insert_requests(hctx, &list, at_head);
} else {
spin_lock(&ctx->lock);
__blk_mq_insert_request(hctx, rq, at_head);
@@ -396,27 +408,25 @@ run:
blk_mq_run_hw_queue(hctx, async);
}
-void blk_mq_sched_insert_requests(struct request_queue *q,
+void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async)
{
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
- struct elevator_queue *e = hctx->queue->elevator;
+ struct elevator_queue *e;
- if (e && e->type->ops.mq.insert_requests)
- e->type->ops.mq.insert_requests(hctx, list, false);
+ e = hctx->queue->elevator;
+ if (e && e->type->ops.insert_requests)
+ e->type->ops.insert_requests(hctx, list, false);
else {
/*
* try to issue requests directly if the hw queue isn't
* busy in case of 'none' scheduler, and this way may save
* us one extra enqueue & dequeue to sw queue.
*/
- if (!hctx->dispatch_busy && !e && !run_queue_async) {
+ if (!hctx->dispatch_busy && !e && !run_queue_async)
blk_mq_try_issue_list_directly(hctx, list);
- if (list_empty(list))
- return;
- }
- blk_mq_insert_requests(hctx, ctx, list);
+ else
+ blk_mq_insert_requests(hctx, ctx, list);
}
blk_mq_run_hw_queue(hctx, run_queue_async);
@@ -489,15 +499,15 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
goto err;
}
- ret = e->ops.mq.init_sched(q, e);
+ ret = e->ops.init_sched(q, e);
if (ret)
goto err;
blk_mq_debugfs_register_sched(q);
queue_for_each_hw_ctx(q, hctx, i) {
- if (e->ops.mq.init_hctx) {
- ret = e->ops.mq.init_hctx(hctx, i);
+ if (e->ops.init_hctx) {
+ ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
blk_mq_exit_sched(q, eq);
@@ -523,14 +533,14 @@ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
queue_for_each_hw_ctx(q, hctx, i) {
blk_mq_debugfs_unregister_sched_hctx(hctx);
- if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
- e->type->ops.mq.exit_hctx(hctx, i);
+ if (e->type->ops.exit_hctx && hctx->sched_data) {
+ e->type->ops.exit_hctx(hctx, i);
hctx->sched_data = NULL;
}
}
blk_mq_debugfs_unregister_sched(q);
- if (e->type->ops.mq.exit_sched)
- e->type->ops.mq.exit_sched(e);
+ if (e->type->ops.exit_sched)
+ e->type->ops.exit_sched(e);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 8a9544203173..c7bdb52367ac 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -8,18 +8,19 @@
void blk_mq_sched_free_hctx_data(struct request_queue *q,
void (*exit)(struct blk_mq_hw_ctx *));
-void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio);
+void blk_mq_sched_assign_ioc(struct request *rq);
void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
struct request **merged_request);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
+void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async);
-void blk_mq_sched_insert_requests(struct request_queue *q,
+void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
@@ -43,8 +44,8 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
{
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.mq.allow_merge)
- return e->type->ops.mq.allow_merge(q, rq, bio);
+ if (e && e->type->ops.allow_merge)
+ return e->type->ops.allow_merge(q, rq, bio);
return true;
}
@@ -53,8 +54,8 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
struct elevator_queue *e = rq->q->elevator;
- if (e && e->type->ops.mq.completed_request)
- e->type->ops.mq.completed_request(rq, now);
+ if (e && e->type->ops.completed_request)
+ e->type->ops.completed_request(rq, now);
}
static inline void blk_mq_sched_started_request(struct request *rq)
@@ -62,8 +63,8 @@ static inline void blk_mq_sched_started_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.mq.started_request)
- e->type->ops.mq.started_request(rq);
+ if (e && e->type->ops.started_request)
+ e->type->ops.started_request(rq);
}
static inline void blk_mq_sched_requeue_request(struct request *rq)
@@ -71,16 +72,16 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.mq.requeue_request)
- e->type->ops.mq.requeue_request(rq);
+ if (e && e->type->ops.requeue_request)
+ e->type->ops.requeue_request(rq);
}
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
{
struct elevator_queue *e = hctx->queue->elevator;
- if (e && e->type->ops.mq.has_work)
- return e->type->ops.mq.has_work(hctx);
+ if (e && e->type->ops.has_work)
+ return e->type->ops.has_work(hctx);
return false;
}
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index aafb44224c89..3f9c3f4ac44c 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -15,6 +15,18 @@
static void blk_mq_sysfs_release(struct kobject *kobj)
{
+ struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
+
+ free_percpu(ctxs->queue_ctx);
+ kfree(ctxs);
+}
+
+static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
+{
+ struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+
+ /* ctx->ctxs won't be released until all ctx are freed */
+ kobject_put(&ctx->ctxs->kobj);
}
static void blk_mq_hw_sysfs_release(struct kobject *kobj)
@@ -203,7 +215,7 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.default_attrs = default_ctx_attrs,
- .release = blk_mq_sysfs_release,
+ .release = blk_mq_ctx_sysfs_release,
};
static struct kobj_type blk_mq_hw_ktype = {
@@ -235,7 +247,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
if (!hctx->nr_ctx)
return 0;
- ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
+ ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
if (ret)
return ret;
@@ -258,8 +270,8 @@ void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx);
- kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
- kobject_del(&q->mq_kobj);
+ kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
+ kobject_del(q->mq_kobj);
kobject_put(&dev->kobj);
q->mq_sysfs_init_done = false;
@@ -279,7 +291,7 @@ void blk_mq_sysfs_deinit(struct request_queue *q)
ctx = per_cpu_ptr(q->queue_ctx, cpu);
kobject_put(&ctx->kobj);
}
- kobject_put(&q->mq_kobj);
+ kobject_put(q->mq_kobj);
}
void blk_mq_sysfs_init(struct request_queue *q)
@@ -287,10 +299,12 @@ void blk_mq_sysfs_init(struct request_queue *q)
struct blk_mq_ctx *ctx;
int cpu;
- kobject_init(&q->mq_kobj, &blk_mq_ktype);
+ kobject_init(q->mq_kobj, &blk_mq_ktype);
for_each_possible_cpu(cpu) {
ctx = per_cpu_ptr(q->queue_ctx, cpu);
+
+ kobject_get(q->mq_kobj);
kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
}
}
@@ -303,11 +317,11 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
WARN_ON_ONCE(!q->kobj.parent);
lockdep_assert_held(&q->sysfs_lock);
- ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
+ ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
if (ret < 0)
goto out;
- kobject_uevent(&q->mq_kobj, KOBJ_ADD);
+ kobject_uevent(q->mq_kobj, KOBJ_ADD);
queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx);
@@ -324,8 +338,8 @@ unreg:
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
- kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
- kobject_del(&q->mq_kobj);
+ kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
+ kobject_del(q->mq_kobj);
kobject_put(&dev->kobj);
return ret;
}
@@ -340,7 +354,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
return ret;
}
-EXPORT_SYMBOL_GPL(blk_mq_register_dev);
void blk_mq_sysfs_unregister(struct request_queue *q)
{
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index cfda95b85d34..2089c6c62f44 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -110,7 +110,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct sbitmap_queue *bt;
struct sbq_wait_state *ws;
- DEFINE_WAIT(wait);
+ DEFINE_SBQ_WAIT(wait);
unsigned int tag_offset;
bool drop_ctx;
int tag;
@@ -154,8 +154,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (tag != -1)
break;
- prepare_to_wait_exclusive(&ws->wait, &wait,
- TASK_UNINTERRUPTIBLE);
+ sbitmap_prepare_to_wait(bt, ws, &wait, TASK_UNINTERRUPTIBLE);
tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
@@ -167,16 +166,17 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
bt_prev = bt;
io_schedule();
+ sbitmap_finish_wait(bt, ws, &wait);
+
data->ctx = blk_mq_get_ctx(data->q);
- data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
+ data->hctx = blk_mq_map_queue(data->q, data->cmd_flags,
+ data->ctx->cpu);
tags = blk_mq_tags_from_data(data);
if (data->flags & BLK_MQ_REQ_RESERVED)
bt = &tags->breserved_tags;
else
bt = &tags->bitmap_tags;
- finish_wait(&ws->wait, &wait);
-
/*
* If destination hw queue is changed, fake wake up on
* previous queue for compensating the wake up miss, so
@@ -191,7 +191,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
if (drop_ctx && data->ctx)
blk_mq_put_ctx(data->ctx);
- finish_wait(&ws->wait, &wait);
+ sbitmap_finish_wait(bt, ws, &wait);
found_tag:
return tag + tag_offset;
@@ -235,7 +235,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* test and set the bit before assigning ->rqs[].
*/
if (rq && rq->q == hctx->queue)
- iter_data->fn(hctx, rq, iter_data->data, reserved);
+ return iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
}
@@ -247,7 +247,8 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* @fn: Pointer to the function that will be called for each request
* associated with @hctx that has been assigned a driver tag.
* @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
- * where rq is a pointer to a request.
+ * where rq is a pointer to a request. Return true to continue
+ * iterating tags, false to stop.
* @data: Will be passed as third argument to @fn.
* @reserved: Indicates whether @bt is the breserved_tags member or the
* bitmap_tags member of struct blk_mq_tags.
@@ -288,7 +289,7 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
*/
rq = tags->rqs[bitnr];
if (rq && blk_mq_request_started(rq))
- iter_data->fn(rq, iter_data->data, reserved);
+ return iter_data->fn(rq, iter_data->data, reserved);
return true;
}
@@ -300,7 +301,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* or the bitmap_tags member of struct blk_mq_tags.
* @fn: Pointer to the function that will be called for each started
* request. @fn will be called as follows: @fn(rq, @data,
- * @reserved) where rq is a pointer to a request.
+ * @reserved) where rq is a pointer to a request. Return true
+ * to continue iterating tags, false to stop.
* @data: Will be passed as second argument to @fn.
* @reserved: Indicates whether @bt is the breserved_tags member or the
* bitmap_tags member of struct blk_mq_tags.
@@ -325,7 +327,8 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
* @fn: Pointer to the function that will be called for each started
* request. @fn will be called as follows: @fn(rq, @priv,
* reserved) where rq is a pointer to a request. 'reserved'
- * indicates whether or not @rq is a reserved request.
+ * indicates whether or not @rq is a reserved request. Return
+ * true to continue iterating tags, false to stop.
* @priv: Will be passed as second argument to @fn.
*/
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
@@ -342,7 +345,8 @@ static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
* @fn: Pointer to the function that will be called for each started
* request. @fn will be called as follows: @fn(rq, @priv,
* reserved) where rq is a pointer to a request. 'reserved'
- * indicates whether or not @rq is a reserved request.
+ * indicates whether or not @rq is a reserved request. Return
+ * true to continue iterating tags, false to stop.
* @priv: Will be passed as second argument to @fn.
*/
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
@@ -526,16 +530,7 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
*/
u32 blk_mq_unique_tag(struct request *rq)
{
- struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
- int hwq = 0;
-
- if (q->mq_ops) {
- hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
- hwq = hctx->queue_num;
- }
-
- return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
+ return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) |
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
}
EXPORT_SYMBOL(blk_mq_unique_tag);
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index c3afbca11299..370827163835 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -29,7 +29,7 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec)
{
const struct cpumask *mask;
@@ -38,17 +38,17 @@ int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
if (!vdev->config->get_vq_affinity)
goto fallback;
- for (queue = 0; queue < set->nr_hw_queues; queue++) {
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
if (!mask)
goto fallback;
for_each_cpu(cpu, mask)
- set->mq_map[cpu] = queue;
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
}
return 0;
fallback:
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3f91c6e5b17a..3ba37b9e15e9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -38,7 +38,6 @@
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
-static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
static void blk_mq_poll_stats_start(struct request_queue *q);
static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
@@ -75,14 +74,18 @@ static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- if (!sbitmap_test_bit(&hctx->ctx_map, ctx->index_hw))
- sbitmap_set_bit(&hctx->ctx_map, ctx->index_hw);
+ const int bit = ctx->index_hw[hctx->type];
+
+ if (!sbitmap_test_bit(&hctx->ctx_map, bit))
+ sbitmap_set_bit(&hctx->ctx_map, bit);
}
static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
- sbitmap_clear_bit(&hctx->ctx_map, ctx->index_hw);
+ const int bit = ctx->index_hw[hctx->type];
+
+ sbitmap_clear_bit(&hctx->ctx_map, bit);
}
struct mq_inflight {
@@ -90,33 +93,33 @@ struct mq_inflight {
unsigned int *inflight;
};
-static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
+static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
{
struct mq_inflight *mi = priv;
/*
- * index[0] counts the specific partition that was asked for. index[1]
- * counts the ones that are active on the whole device, so increment
- * that if mi->part is indeed a partition, and not a whole device.
+ * index[0] counts the specific partition that was asked for.
*/
if (rq->part == mi->part)
mi->inflight[0]++;
- if (mi->part->partno)
- mi->inflight[1]++;
+
+ return true;
}
-void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2])
+unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
{
+ unsigned inflight[2];
struct mq_inflight mi = { .part = part, .inflight = inflight, };
inflight[0] = inflight[1] = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
+
+ return inflight[0];
}
-static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
+static bool blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
{
@@ -124,6 +127,8 @@ static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
if (rq->part == mi->part)
mi->inflight[rq_data_dir(rq)]++;
+
+ return true;
}
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
@@ -142,7 +147,7 @@ void blk_freeze_queue_start(struct request_queue *q)
freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_run_hw_queues(q, false);
}
}
@@ -177,8 +182,6 @@ void blk_freeze_queue(struct request_queue *q)
* exported to drivers as the only user for unfreeze is blk_mq.
*/
blk_freeze_queue_start(q);
- if (!q->mq_ops)
- blk_drain_queue(q);
blk_mq_freeze_queue_wait(q);
}
@@ -275,6 +278,15 @@ bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
}
EXPORT_SYMBOL(blk_mq_can_queue);
+/*
+ * Only need start/end time stamping if we have stats enabled, or using
+ * an IO scheduler.
+ */
+static inline bool blk_mq_need_time_stamp(struct request *rq)
+{
+ return (rq->rq_flags & RQF_IO_STAT) || rq->q->elevator;
+}
+
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
unsigned int tag, unsigned int op)
{
@@ -298,8 +310,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
/* csd/requeue_work/fifo_time is initialized before use */
rq->q = data->q;
rq->mq_ctx = data->ctx;
+ rq->mq_hctx = data->hctx;
rq->rq_flags = rq_flags;
- rq->cpu = -1;
rq->cmd_flags = op;
if (data->flags & BLK_MQ_REQ_PREEMPT)
rq->rq_flags |= RQF_PREEMPT;
@@ -310,7 +322,10 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
RB_CLEAR_NODE(&rq->rb_node);
rq->rq_disk = NULL;
rq->part = NULL;
- rq->start_time_ns = ktime_get_ns();
+ if (blk_mq_need_time_stamp(rq))
+ rq->start_time_ns = ktime_get_ns();
+ else
+ rq->start_time_ns = 0;
rq->io_start_time_ns = 0;
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -319,27 +334,22 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->special = NULL;
/* tag was already set */
rq->extra_len = 0;
- rq->__deadline = 0;
+ WRITE_ONCE(rq->deadline, 0);
- INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
rq->end_io = NULL;
rq->end_io_data = NULL;
rq->next_rq = NULL;
-#ifdef CONFIG_BLK_CGROUP
- rq->rl = NULL;
-#endif
-
data->ctx->rq_dispatched[op_is_sync(op)]++;
refcount_set(&rq->ref, 1);
return rq;
}
static struct request *blk_mq_get_request(struct request_queue *q,
- struct bio *bio, unsigned int op,
- struct blk_mq_alloc_data *data)
+ struct bio *bio,
+ struct blk_mq_alloc_data *data)
{
struct elevator_queue *e = q->elevator;
struct request *rq;
@@ -353,8 +363,9 @@ static struct request *blk_mq_get_request(struct request_queue *q,
put_ctx_on_error = true;
}
if (likely(!data->hctx))
- data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
- if (op & REQ_NOWAIT)
+ data->hctx = blk_mq_map_queue(q, data->cmd_flags,
+ data->ctx->cpu);
+ if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;
if (e) {
@@ -365,9 +376,10 @@ static struct request *blk_mq_get_request(struct request_queue *q,
* dispatch list. Don't include reserved tags in the
* limiting, as it isn't useful.
*/
- if (!op_is_flush(op) && e->type->ops.mq.limit_depth &&
+ if (!op_is_flush(data->cmd_flags) &&
+ e->type->ops.limit_depth &&
!(data->flags & BLK_MQ_REQ_RESERVED))
- e->type->ops.mq.limit_depth(op, data);
+ e->type->ops.limit_depth(data->cmd_flags, data);
} else {
blk_mq_tag_busy(data->hctx);
}
@@ -382,14 +394,14 @@ static struct request *blk_mq_get_request(struct request_queue *q,
return NULL;
}
- rq = blk_mq_rq_ctx_init(data, tag, op);
- if (!op_is_flush(op)) {
+ rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags);
+ if (!op_is_flush(data->cmd_flags)) {
rq->elv.icq = NULL;
- if (e && e->type->ops.mq.prepare_request) {
- if (e->type->icq_cache && rq_ioc(bio))
- blk_mq_sched_assign_ioc(rq, bio);
+ if (e && e->type->ops.prepare_request) {
+ if (e->type->icq_cache)
+ blk_mq_sched_assign_ioc(rq);
- e->type->ops.mq.prepare_request(rq, bio);
+ e->type->ops.prepare_request(rq, bio);
rq->rq_flags |= RQF_ELVPRIV;
}
}
@@ -400,7 +412,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
blk_mq_req_flags_t flags)
{
- struct blk_mq_alloc_data alloc_data = { .flags = flags };
+ struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq;
int ret;
@@ -408,7 +420,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
if (ret)
return ERR_PTR(ret);
- rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+ rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
@@ -426,7 +438,7 @@ EXPORT_SYMBOL(blk_mq_alloc_request);
struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
unsigned int op, blk_mq_req_flags_t flags, unsigned int hctx_idx)
{
- struct blk_mq_alloc_data alloc_data = { .flags = flags };
+ struct blk_mq_alloc_data alloc_data = { .flags = flags, .cmd_flags = op };
struct request *rq;
unsigned int cpu;
int ret;
@@ -459,7 +471,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
cpu = cpumask_first_and(alloc_data.hctx->cpumask, cpu_online_mask);
alloc_data.ctx = __blk_mq_get_ctx(q, cpu);
- rq = blk_mq_get_request(q, NULL, op, &alloc_data);
+ rq = blk_mq_get_request(q, NULL, &alloc_data);
blk_queue_exit(q);
if (!rq)
@@ -473,10 +485,11 @@ static void __blk_mq_free_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
const int sched_tag = rq->internal_tag;
blk_pm_mark_last_busy(rq);
+ rq->mq_hctx = NULL;
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -490,11 +503,11 @@ void blk_mq_free_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
if (rq->rq_flags & RQF_ELVPRIV) {
- if (e && e->type->ops.mq.finish_request)
- e->type->ops.mq.finish_request(rq);
+ if (e && e->type->ops.finish_request)
+ e->type->ops.finish_request(rq);
if (rq->elv.icq) {
put_io_context(rq->elv.icq->ioc);
rq->elv.icq = NULL;
@@ -510,9 +523,6 @@ void blk_mq_free_request(struct request *rq)
rq_qos_done(q, rq);
- if (blk_rq_rl(rq))
- blk_put_rl(blk_rq_rl(rq));
-
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
@@ -521,7 +531,10 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
{
- u64 now = ktime_get_ns();
+ u64 now = 0;
+
+ if (blk_mq_need_time_stamp(rq))
+ now = ktime_get_ns();
if (rq->rq_flags & RQF_STATS) {
blk_mq_poll_stats_start(rq->q);
@@ -555,19 +568,19 @@ EXPORT_SYMBOL(blk_mq_end_request);
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
+ struct request_queue *q = rq->q;
- rq->q->softirq_done_fn(rq);
+ q->mq_ops->complete(rq);
}
static void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct request_queue *q = rq->q;
bool shared = false;
int cpu;
- if (!blk_mq_mark_complete(rq))
- return;
-
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
/*
* Most of single queue controllers, there is only one irq vector
* for handling IO completion, and the only irq's affinity is set
@@ -577,18 +590,23 @@ static void __blk_mq_complete_request(struct request *rq)
* So complete IO reqeust in softirq context in case of single queue
* for not degrading IO performance by irqsoff latency.
*/
- if (rq->q->nr_hw_queues == 1) {
+ if (q->nr_hw_queues == 1) {
__blk_complete_request(rq);
return;
}
- if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
- rq->q->softirq_done_fn(rq);
+ /*
+ * For a polled request, always complete locallly, it's pointless
+ * to redirect the completion.
+ */
+ if ((rq->cmd_flags & REQ_HIPRI) ||
+ !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
+ q->mq_ops->complete(rq);
return;
}
cpu = get_cpu();
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -597,7 +615,7 @@ static void __blk_mq_complete_request(struct request *rq)
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
- rq->q->softirq_done_fn(rq);
+ q->mq_ops->complete(rq);
}
put_cpu();
}
@@ -630,11 +648,12 @@ static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
* Ends all I/O on a request. It does not handle partial completions.
* The actual completion happens out-of-order, through a IPI handler.
**/
-void blk_mq_complete_request(struct request *rq)
+bool blk_mq_complete_request(struct request *rq)
{
if (unlikely(blk_should_fake_timeout(rq->q)))
- return;
+ return false;
__blk_mq_complete_request(rq);
+ return true;
}
EXPORT_SYMBOL(blk_mq_complete_request);
@@ -701,7 +720,7 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
/* this request will be re-inserted to io scheduler queue */
blk_mq_sched_requeue_request(rq);
- BUG_ON(blk_queued_rq(rq));
+ BUG_ON(!list_empty(&rq->queuelist));
blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
}
EXPORT_SYMBOL(blk_mq_requeue_request);
@@ -786,6 +805,32 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
}
EXPORT_SYMBOL(blk_mq_tag_to_rq);
+static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
+ void *priv, bool reserved)
+{
+ /*
+ * If we find a request that is inflight and the queue matches,
+ * we know the queue is busy. Return false to stop the iteration.
+ */
+ if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
+ bool *busy = priv;
+
+ *busy = true;
+ return false;
+ }
+
+ return true;
+}
+
+bool blk_mq_queue_inflight(struct request_queue *q)
+{
+ bool busy = false;
+
+ blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy);
+ return busy;
+}
+EXPORT_SYMBOL_GPL(blk_mq_queue_inflight);
+
static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
req->rq_flags |= RQF_TIMED_OUT;
@@ -810,7 +855,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
if (rq->rq_flags & RQF_TIMED_OUT)
return false;
- deadline = blk_rq_deadline(rq);
+ deadline = READ_ONCE(rq->deadline);
if (time_after_eq(jiffies, deadline))
return true;
@@ -821,7 +866,7 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
return false;
}
-static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
+static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
unsigned long *next = priv;
@@ -831,7 +876,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* so we're not unnecessarilly synchronizing across CPUs.
*/
if (!blk_mq_req_expired(rq, next))
- return;
+ return true;
/*
* We have reason to believe the request may be expired. Take a
@@ -843,7 +888,7 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
* timeout handler to posting a natural completion.
*/
if (!refcount_inc_not_zero(&rq->ref))
- return;
+ return true;
/*
* The request is now locked and cannot be reallocated underneath the
@@ -855,6 +900,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
blk_mq_rq_timed_out(rq, reserved);
if (refcount_dec_and_test(&rq->ref))
__blk_mq_free_request(rq);
+
+ return true;
}
static void blk_mq_timeout_work(struct work_struct *work)
@@ -911,9 +958,10 @@ static bool flush_busy_ctx(struct sbitmap *sb, unsigned int bitnr, void *data)
struct flush_busy_ctx_data *flush_data = data;
struct blk_mq_hw_ctx *hctx = flush_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+ enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- list_splice_tail_init(&ctx->rq_list, flush_data->list);
+ list_splice_tail_init(&ctx->rq_lists[type], flush_data->list);
sbitmap_clear_bit(sb, bitnr);
spin_unlock(&ctx->lock);
return true;
@@ -945,12 +993,13 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
struct dispatch_rq_data *dispatch_data = data;
struct blk_mq_hw_ctx *hctx = dispatch_data->hctx;
struct blk_mq_ctx *ctx = hctx->ctxs[bitnr];
+ enum hctx_type type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_list)) {
- dispatch_data->rq = list_entry_rq(ctx->rq_list.next);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next);
list_del_init(&dispatch_data->rq->queuelist);
- if (list_empty(&ctx->rq_list))
+ if (list_empty(&ctx->rq_lists[type]))
sbitmap_clear_bit(sb, bitnr);
}
spin_unlock(&ctx->lock);
@@ -961,7 +1010,7 @@ static bool dispatch_rq_from_ctx(struct sbitmap *sb, unsigned int bitnr,
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *start)
{
- unsigned off = start ? start->index_hw : 0;
+ unsigned off = start ? start->index_hw[hctx->type] : 0;
struct dispatch_rq_data data = {
.hctx = hctx,
.rq = NULL,
@@ -985,8 +1034,9 @@ bool blk_mq_get_driver_tag(struct request *rq)
{
struct blk_mq_alloc_data data = {
.q = rq->q,
- .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
+ .hctx = rq->mq_hctx,
.flags = BLK_MQ_REQ_NOWAIT,
+ .cmd_flags = rq->cmd_flags,
};
bool shared;
@@ -1150,7 +1200,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist);
- hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
+ hctx = rq->mq_hctx;
if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
break;
@@ -1223,6 +1273,14 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
if (!list_empty(list)) {
bool needs_restart;
+ /*
+ * If we didn't flush the entire list, we could have told
+ * the driver there was more coming, but that turned out to
+ * be a lie.
+ */
+ if (q->mq_ops->commit_rqs)
+ q->mq_ops->commit_rqs(hctx);
+
spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
@@ -1552,15 +1610,16 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ enum hctx_type type = hctx->type;
lockdep_assert_held(&ctx->lock);
trace_block_rq_insert(hctx->queue, rq);
if (at_head)
- list_add(&rq->queuelist, &ctx->rq_list);
+ list_add(&rq->queuelist, &ctx->rq_lists[type]);
else
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ list_add_tail(&rq->queuelist, &ctx->rq_lists[type]);
}
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
@@ -1580,8 +1639,7 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
*/
void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
{
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock);
list_add_tail(&rq->queuelist, &hctx->dispatch);
@@ -1596,6 +1654,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
{
struct request *rq;
+ enum hctx_type type = hctx->type;
/*
* preemption doesn't flush plug list, so it's possible ctx->cpu is
@@ -1607,35 +1666,46 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
}
spin_lock(&ctx->lock);
- list_splice_tail_init(list, &ctx->rq_list);
+ list_splice_tail_init(list, &ctx->rq_lists[type]);
blk_mq_hctx_mark_pending(hctx, ctx);
spin_unlock(&ctx->lock);
}
-static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
+static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->mq_ctx < rqb->mq_ctx ||
- (rqa->mq_ctx == rqb->mq_ctx &&
- blk_rq_pos(rqa) < blk_rq_pos(rqb)));
+ if (rqa->mq_ctx < rqb->mq_ctx)
+ return -1;
+ else if (rqa->mq_ctx > rqb->mq_ctx)
+ return 1;
+ else if (rqa->mq_hctx < rqb->mq_hctx)
+ return -1;
+ else if (rqa->mq_hctx > rqb->mq_hctx)
+ return 1;
+
+ return blk_rq_pos(rqa) > blk_rq_pos(rqb);
}
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
+ struct blk_mq_hw_ctx *this_hctx;
struct blk_mq_ctx *this_ctx;
struct request_queue *this_q;
struct request *rq;
LIST_HEAD(list);
- LIST_HEAD(ctx_list);
+ LIST_HEAD(rq_list);
unsigned int depth;
list_splice_init(&plug->mq_list, &list);
+ plug->rq_count = 0;
- list_sort(NULL, &list, plug_ctx_cmp);
+ if (plug->rq_count > 2 && plug->multiple_queues)
+ list_sort(NULL, &list, plug_rq_cmp);
this_q = NULL;
+ this_hctx = NULL;
this_ctx = NULL;
depth = 0;
@@ -1643,30 +1713,31 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
BUG_ON(!rq->q);
- if (rq->mq_ctx != this_ctx) {
- if (this_ctx) {
+ if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
+ if (this_hctx) {
trace_block_unplug(this_q, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_q, this_ctx,
- &ctx_list,
+ blk_mq_sched_insert_requests(this_hctx, this_ctx,
+ &rq_list,
from_schedule);
}
- this_ctx = rq->mq_ctx;
this_q = rq->q;
+ this_ctx = rq->mq_ctx;
+ this_hctx = rq->mq_hctx;
depth = 0;
}
depth++;
- list_add_tail(&rq->queuelist, &ctx_list);
+ list_add_tail(&rq->queuelist, &rq_list);
}
/*
- * If 'this_ctx' is set, we know we have entries to complete
- * on 'ctx_list'. Do those.
+ * If 'this_hctx' is set, we know we have entries to complete
+ * on 'rq_list'. Do those.
*/
- if (this_ctx) {
+ if (this_hctx) {
trace_block_unplug(this_q, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
+ blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
from_schedule);
}
}
@@ -1675,27 +1746,17 @@ static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
{
blk_init_request_from_bio(rq, bio);
- blk_rq_set_rl(rq, blk_get_rl(rq->q, bio));
-
blk_account_io_start(rq, true);
}
-static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
-{
- if (rq->tag != -1)
- return blk_tag_to_qc_t(rq->tag, hctx->queue_num, false);
-
- return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
-}
-
static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
- blk_qc_t *cookie)
+ blk_qc_t *cookie, bool last)
{
struct request_queue *q = rq->q;
struct blk_mq_queue_data bd = {
.rq = rq,
- .last = true,
+ .last = last,
};
blk_qc_t new_cookie;
blk_status_t ret;
@@ -1727,77 +1788,74 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq,
blk_qc_t *cookie,
- bool bypass_insert)
+ bool bypass, bool last)
{
struct request_queue *q = rq->q;
bool run_queue = true;
+ blk_status_t ret = BLK_STS_RESOURCE;
+ int srcu_idx;
+ bool force = false;
+ hctx_lock(hctx, &srcu_idx);
/*
- * RCU or SRCU read lock is needed before checking quiesced flag.
+ * hctx_lock is needed before checking quiesced flag.
*
- * When queue is stopped or quiesced, ignore 'bypass_insert' from
- * blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
- * and avoid driver to try to dispatch again.
+ * When queue is stopped or quiesced, ignore 'bypass', insert
+ * and return BLK_STS_OK to caller, and avoid driver to try to
+ * dispatch again.
*/
- if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
+ if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
run_queue = false;
- bypass_insert = false;
- goto insert;
+ bypass = false;
+ goto out_unlock;
}
- if (q->elevator && !bypass_insert)
- goto insert;
+ if (unlikely(q->elevator && !bypass))
+ goto out_unlock;
if (!blk_mq_get_dispatch_budget(hctx))
- goto insert;
+ goto out_unlock;
if (!blk_mq_get_driver_tag(rq)) {
blk_mq_put_dispatch_budget(hctx);
- goto insert;
+ goto out_unlock;
}
- return __blk_mq_issue_directly(hctx, rq, cookie);
-insert:
- if (bypass_insert)
- return BLK_STS_RESOURCE;
-
- blk_mq_sched_insert_request(rq, false, run_queue, false);
- return BLK_STS_OK;
-}
-
-static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
- struct request *rq, blk_qc_t *cookie)
-{
- blk_status_t ret;
- int srcu_idx;
-
- might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
-
- hctx_lock(hctx, &srcu_idx);
-
- ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
- if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
- blk_mq_sched_insert_request(rq, false, true, false);
- else if (ret != BLK_STS_OK)
- blk_mq_end_request(rq, ret);
-
- hctx_unlock(hctx, srcu_idx);
-}
-
-blk_status_t blk_mq_request_issue_directly(struct request *rq)
-{
- blk_status_t ret;
- int srcu_idx;
- blk_qc_t unused_cookie;
- struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(rq->q, ctx->cpu);
-
- hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true);
+ /*
+ * Always add a request that has been through
+ *.queue_rq() to the hardware dispatch list.
+ */
+ force = true;
+ ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
+out_unlock:
hctx_unlock(hctx, srcu_idx);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_DEV_RESOURCE:
+ case BLK_STS_RESOURCE:
+ if (force) {
+ blk_mq_request_bypass_insert(rq, run_queue);
+ /*
+ * We have to return BLK_STS_OK for the DM
+ * to avoid livelock. Otherwise, we return
+ * the real result to indicate whether the
+ * request is direct-issued successfully.
+ */
+ ret = bypass ? BLK_STS_OK : ret;
+ } else if (!bypass) {
+ blk_mq_sched_insert_request(rq, false,
+ run_queue, false);
+ }
+ break;
+ default:
+ if (!bypass)
+ blk_mq_end_request(rq, ret);
+ break;
+ }
return ret;
}
@@ -1805,21 +1863,42 @@ blk_status_t blk_mq_request_issue_directly(struct request *rq)
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list)
{
+ blk_qc_t unused;
+ blk_status_t ret = BLK_STS_OK;
+
while (!list_empty(list)) {
- blk_status_t ret;
struct request *rq = list_first_entry(list, struct request,
queuelist);
list_del_init(&rq->queuelist);
- ret = blk_mq_request_issue_directly(rq);
- if (ret != BLK_STS_OK) {
- if (ret == BLK_STS_RESOURCE ||
- ret == BLK_STS_DEV_RESOURCE) {
- list_add(&rq->queuelist, list);
- break;
- }
- blk_mq_end_request(rq, ret);
- }
+ if (ret == BLK_STS_OK)
+ ret = blk_mq_try_issue_directly(hctx, rq, &unused,
+ false,
+ list_empty(list));
+ else
+ blk_mq_sched_insert_request(rq, false, true, false);
+ }
+
+ /*
+ * If we didn't flush the entire list, we could have told
+ * the driver there was more coming, but that turned out to
+ * be a lie.
+ */
+ if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
+ hctx->queue->mq_ops->commit_rqs(hctx);
+}
+
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+ list_add_tail(&rq->queuelist, &plug->mq_list);
+ plug->rq_count++;
+ if (!plug->multiple_queues && !list_is_singular(&plug->mq_list)) {
+ struct request *tmp;
+
+ tmp = list_first_entry(&plug->mq_list, struct request,
+ queuelist);
+ if (tmp->q != rq->q)
+ plug->multiple_queues = true;
}
}
@@ -1827,9 +1906,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = op_is_sync(bio->bi_opf);
const int is_flush_fua = op_is_flush(bio->bi_opf);
- struct blk_mq_alloc_data data = { .flags = 0 };
+ struct blk_mq_alloc_data data = { .flags = 0, .cmd_flags = bio->bi_opf };
struct request *rq;
- unsigned int request_count = 0;
struct blk_plug *plug;
struct request *same_queue_rq = NULL;
blk_qc_t cookie;
@@ -1842,15 +1920,15 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
if (!is_flush_fua && !blk_queue_nomerges(q) &&
- blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
+ blk_attempt_plug_merge(q, bio, &same_queue_rq))
return BLK_QC_T_NONE;
if (blk_mq_sched_bio_merge(q, bio))
return BLK_QC_T_NONE;
- rq_qos_throttle(q, bio, NULL);
+ rq_qos_throttle(q, bio);
- rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
+ rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
@@ -1872,21 +1950,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
/* bypass scheduler for flush rq */
blk_insert_flush(rq);
blk_mq_run_hw_queue(data.hctx, true);
- } else if (plug && q->nr_hw_queues == 1) {
+ } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
+ /*
+ * Use plugging if we have a ->commit_rqs() hook as well, as
+ * we know the driver uses bd->last in a smart fashion.
+ */
+ unsigned int request_count = plug->rq_count;
struct request *last = NULL;
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- /*
- * @request_count may become stale because of schedule
- * out, so check the list again.
- */
- if (list_empty(&plug->mq_list))
- request_count = 0;
- else if (blk_queue_nomerges(q))
- request_count = blk_plug_queued_count(q);
-
if (!request_count)
trace_block_plug(q);
else
@@ -1898,7 +1972,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
trace_block_plug(q);
}
- list_add_tail(&rq->queuelist, &plug->mq_list);
+ blk_add_rq_to_plug(plug, rq);
} else if (plug && !blk_queue_nomerges(q)) {
blk_mq_bio_to_request(rq, bio);
@@ -1911,23 +1985,24 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
*/
if (list_empty(&plug->mq_list))
same_queue_rq = NULL;
- if (same_queue_rq)
+ if (same_queue_rq) {
list_del_init(&same_queue_rq->queuelist);
- list_add_tail(&rq->queuelist, &plug->mq_list);
+ plug->rq_count--;
+ }
+ blk_add_rq_to_plug(plug, rq);
blk_mq_put_ctx(data.ctx);
if (same_queue_rq) {
- data.hctx = blk_mq_map_queue(q,
- same_queue_rq->mq_ctx->cpu);
+ data.hctx = same_queue_rq->mq_hctx;
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
- &cookie);
+ &cookie, false, true);
}
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
!data.hctx->dispatch_busy)) {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
- blk_mq_try_issue_directly(data.hctx, rq, &cookie);
+ blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
} else {
blk_mq_put_ctx(data.ctx);
blk_mq_bio_to_request(rq, bio);
@@ -1985,7 +2060,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags;
int node;
- node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2041,7 +2116,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
size_t rq_size, left;
int node;
- node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
+ node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;
@@ -2121,13 +2196,15 @@ static int blk_mq_hctx_notify_dead(unsigned int cpu, struct hlist_node *node)
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
LIST_HEAD(tmp);
+ enum hctx_type type;
hctx = hlist_entry_safe(node, struct blk_mq_hw_ctx, cpuhp_dead);
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+ type = hctx->type;
spin_lock(&ctx->lock);
- if (!list_empty(&ctx->rq_list)) {
- list_splice_init(&ctx->rq_list, &tmp);
+ if (!list_empty(&ctx->rq_lists[type])) {
+ list_splice_init(&ctx->rq_lists[type], &tmp);
blk_mq_hctx_clear_pending(hctx, ctx);
}
spin_unlock(&ctx->lock);
@@ -2258,24 +2335,30 @@ static int blk_mq_init_hctx(struct request_queue *q,
static void blk_mq_init_cpu_queues(struct request_queue *q,
unsigned int nr_hw_queues)
{
- unsigned int i;
+ struct blk_mq_tag_set *set = q->tag_set;
+ unsigned int i, j;
for_each_possible_cpu(i) {
struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
struct blk_mq_hw_ctx *hctx;
+ int k;
__ctx->cpu = i;
spin_lock_init(&__ctx->lock);
- INIT_LIST_HEAD(&__ctx->rq_list);
+ for (k = HCTX_TYPE_DEFAULT; k < HCTX_MAX_TYPES; k++)
+ INIT_LIST_HEAD(&__ctx->rq_lists[k]);
+
__ctx->queue = q;
/*
* Set local node, IFF we have more than one hw queue. If
* not, we remain on the home node of the device
*/
- hctx = blk_mq_map_queue(q, i);
- if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
- hctx->numa_node = local_memory_node(cpu_to_node(i));
+ for (j = 0; j < set->nr_maps; j++) {
+ hctx = blk_mq_map_queue_type(q, j, i);
+ if (nr_hw_queues > 1 && hctx->numa_node == NUMA_NO_NODE)
+ hctx->numa_node = local_memory_node(cpu_to_node(i));
+ }
}
}
@@ -2301,7 +2384,7 @@ static bool __blk_mq_alloc_rq_map(struct blk_mq_tag_set *set, int hctx_idx)
static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
unsigned int hctx_idx)
{
- if (set->tags[hctx_idx]) {
+ if (set->tags && set->tags[hctx_idx]) {
blk_mq_free_rqs(set, set->tags[hctx_idx], hctx_idx);
blk_mq_free_rq_map(set->tags[hctx_idx]);
set->tags[hctx_idx] = NULL;
@@ -2310,7 +2393,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q)
{
- unsigned int i, hctx_idx;
+ unsigned int i, j, hctx_idx;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@@ -2332,7 +2415,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = q->mq_map[i];
+ hctx_idx = set->map[0].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
@@ -2342,15 +2425,35 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
- q->mq_map[i] = 0;
+ set->map[0].mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
- hctx = blk_mq_map_queue(q, i);
+ for (j = 0; j < set->nr_maps; j++) {
+ if (!set->map[j].nr_queues)
+ continue;
+
+ hctx = blk_mq_map_queue_type(q, j, i);
+
+ /*
+ * If the CPU is already set in the mask, then we've
+ * mapped this one already. This can happen if
+ * devices share queues across queue maps.
+ */
+ if (cpumask_test_cpu(i, hctx->cpumask))
+ continue;
+
+ cpumask_set_cpu(i, hctx->cpumask);
+ hctx->type = j;
+ ctx->index_hw[hctx->type] = hctx->nr_ctx;
+ hctx->ctxs[hctx->nr_ctx++] = ctx;
- cpumask_set_cpu(i, hctx->cpumask);
- ctx->index_hw = hctx->nr_ctx;
- hctx->ctxs[hctx->nr_ctx++] = ctx;
+ /*
+ * If the nr_ctx type overflows, we have exceeded the
+ * amount of sw queues we can support.
+ */
+ BUG_ON(!hctx->nr_ctx);
+ }
}
mutex_unlock(&q->sysfs_lock);
@@ -2440,8 +2543,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- q->tag_set = set;
-
mutex_lock(&set->tag_list_lock);
/*
@@ -2460,6 +2561,34 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
mutex_unlock(&set->tag_list_lock);
}
+/* All allocations will be freed in release handler of q->mq_kobj */
+static int blk_mq_alloc_ctxs(struct request_queue *q)
+{
+ struct blk_mq_ctxs *ctxs;
+ int cpu;
+
+ ctxs = kzalloc(sizeof(*ctxs), GFP_KERNEL);
+ if (!ctxs)
+ return -ENOMEM;
+
+ ctxs->queue_ctx = alloc_percpu(struct blk_mq_ctx);
+ if (!ctxs->queue_ctx)
+ goto fail;
+
+ for_each_possible_cpu(cpu) {
+ struct blk_mq_ctx *ctx = per_cpu_ptr(ctxs->queue_ctx, cpu);
+ ctx->ctxs = ctxs;
+ }
+
+ q->mq_kobj = &ctxs->kobj;
+ q->queue_ctx = ctxs->queue_ctx;
+
+ return 0;
+ fail:
+ kfree(ctxs);
+ return -ENOMEM;
+}
+
/*
* It is the actual release handler for mq, but we do it from
* request queue's release handler for avoiding use-after-free
@@ -2478,8 +2607,6 @@ void blk_mq_release(struct request_queue *q)
kobject_put(&hctx->kobj);
}
- q->mq_map = NULL;
-
kfree(q->queue_hw_ctx);
/*
@@ -2487,15 +2614,13 @@ void blk_mq_release(struct request_queue *q)
* both share lifetime with request queue.
*/
blk_mq_sysfs_deinit(q);
-
- free_percpu(q->queue_ctx);
}
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
{
struct request_queue *uninit_q, *q;
- uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
+ uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
if (!uninit_q)
return ERR_PTR(-ENOMEM);
@@ -2522,6 +2647,7 @@ struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
memset(set, 0, sizeof(*set));
set->ops = ops;
set->nr_hw_queues = 1;
+ set->nr_maps = 1;
set->queue_depth = queue_depth;
set->numa_node = NUMA_NO_NODE;
set->flags = set_flags;
@@ -2599,7 +2725,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int node;
struct blk_mq_hw_ctx *hctx;
- node = blk_mq_hw_queue_to_node(q->mq_map, i);
+ node = blk_mq_hw_queue_to_node(&set->map[0], i);
/*
* If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback
@@ -2652,6 +2778,19 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
mutex_unlock(&q->sysfs_lock);
}
+/*
+ * Maximum number of hardware queues we support. For single sets, we'll never
+ * have more than the CPUs (software queues). For multiple sets, the tag_set
+ * user may have set ->nr_hw_queues larger.
+ */
+static unsigned int nr_hw_queues(struct blk_mq_tag_set *set)
+{
+ if (set->nr_maps == 1)
+ return nr_cpu_ids;
+
+ return max(set->nr_hw_queues, nr_cpu_ids);
+}
+
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q)
{
@@ -2664,19 +2803,17 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
if (!q->poll_cb)
goto err_exit;
- q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
- if (!q->queue_ctx)
+ if (blk_mq_alloc_ctxs(q))
goto err_exit;
/* init q->mq_kobj and sw queues' kobjects */
blk_mq_sysfs_init(q);
- q->queue_hw_ctx = kcalloc_node(nr_cpu_ids, sizeof(*(q->queue_hw_ctx)),
+ q->nr_queues = nr_hw_queues(set);
+ q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
if (!q->queue_hw_ctx)
- goto err_percpu;
-
- q->mq_map = set->mq_map;
+ goto err_sys_init;
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
@@ -2685,12 +2822,15 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
- q->nr_queues = nr_cpu_ids;
+ q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ if (set->nr_maps > HCTX_TYPE_POLL &&
+ set->map[HCTX_TYPE_POLL].nr_queues)
+ blk_queue_flag_set(QUEUE_FLAG_POLL, q);
if (!(set->flags & BLK_MQ_F_SG_MERGE))
- queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+ blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
q->sg_reserved_size = INT_MAX;
@@ -2699,8 +2839,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
blk_queue_make_request(q, blk_mq_make_request);
- if (q->mq_ops->poll)
- q->poll_fn = blk_mq_poll;
/*
* Do this after blk_queue_make_request() overrides it...
@@ -2712,9 +2850,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
*/
q->poll_nsec = -1;
- if (set->ops->complete)
- blk_queue_softirq_done(q, set->ops->complete);
-
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q);
@@ -2731,8 +2866,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
err_hctxs:
kfree(q->queue_hw_ctx);
-err_percpu:
- free_percpu(q->queue_ctx);
+err_sys_init:
+ blk_mq_sysfs_deinit(q);
err_exit:
q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
@@ -2801,7 +2936,9 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{
- if (set->ops->map_queues) {
+ if (set->ops->map_queues && !is_kdump_kernel()) {
+ int i;
+
/*
* transport .map_queues is usually done in the following
* way:
@@ -2809,18 +2946,21 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
* for (queue = 0; queue < set->nr_hw_queues; queue++) {
* mask = get_cpu_mask(queue)
* for_each_cpu(cpu, mask)
- * set->mq_map[cpu] = queue;
+ * set->map[x].mq_map[cpu] = queue;
* }
*
* When we need to remap, the table has to be cleared for
* killing stale mapping since one CPU may not be mapped
* to any hw queue.
*/
- blk_mq_clear_mq_map(set);
+ for (i = 0; i < set->nr_maps; i++)
+ blk_mq_clear_mq_map(&set->map[i]);
return set->ops->map_queues(set);
- } else
- return blk_mq_map_queues(set);
+ } else {
+ BUG_ON(set->nr_maps > 1);
+ return blk_mq_map_queues(&set->map[0]);
+ }
}
/*
@@ -2831,7 +2971,7 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
*/
int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
{
- int ret;
+ int i, ret;
BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
@@ -2854,6 +2994,11 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->queue_depth = BLK_MQ_MAX_DEPTH;
}
+ if (!set->nr_maps)
+ set->nr_maps = 1;
+ else if (set->nr_maps > HCTX_MAX_TYPES)
+ return -EINVAL;
+
/*
* If a crashdump is active, then we are potentially in a very
* memory constrained environment. Limit us to 1 queue and
@@ -2861,24 +3006,30 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
*/
if (is_kdump_kernel()) {
set->nr_hw_queues = 1;
+ set->nr_maps = 1;
set->queue_depth = min(64U, set->queue_depth);
}
/*
- * There is no use for more h/w queues than cpus.
+ * There is no use for more h/w queues than cpus if we just have
+ * a single map
*/
- if (set->nr_hw_queues > nr_cpu_ids)
+ if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
set->nr_hw_queues = nr_cpu_ids;
- set->tags = kcalloc_node(nr_cpu_ids, sizeof(struct blk_mq_tags *),
+ set->tags = kcalloc_node(nr_hw_queues(set), sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
if (!set->tags)
return -ENOMEM;
ret = -ENOMEM;
- set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
- GFP_KERNEL, set->numa_node);
- if (!set->mq_map)
- goto out_free_tags;
+ for (i = 0; i < set->nr_maps; i++) {
+ set->map[i].mq_map = kcalloc_node(nr_cpu_ids,
+ sizeof(set->map[i].mq_map[0]),
+ GFP_KERNEL, set->numa_node);
+ if (!set->map[i].mq_map)
+ goto out_free_mq_map;
+ set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
+ }
ret = blk_mq_update_queue_map(set);
if (ret)
@@ -2894,9 +3045,10 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return 0;
out_free_mq_map:
- kfree(set->mq_map);
- set->mq_map = NULL;
-out_free_tags:
+ for (i = 0; i < set->nr_maps; i++) {
+ kfree(set->map[i].mq_map);
+ set->map[i].mq_map = NULL;
+ }
kfree(set->tags);
set->tags = NULL;
return ret;
@@ -2905,13 +3057,15 @@ EXPORT_SYMBOL(blk_mq_alloc_tag_set);
void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
{
- int i;
+ int i, j;
- for (i = 0; i < nr_cpu_ids; i++)
+ for (i = 0; i < nr_hw_queues(set); i++)
blk_mq_free_map_and_requests(set, i);
- kfree(set->mq_map);
- set->mq_map = NULL;
+ for (j = 0; j < set->nr_maps; j++) {
+ kfree(set->map[j].mq_map);
+ set->map[j].mq_map = NULL;
+ }
kfree(set->tags);
set->tags = NULL;
@@ -3037,7 +3191,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
lockdep_assert_held(&set->tag_list_lock);
- if (nr_hw_queues > nr_cpu_ids)
+ if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
return;
@@ -3072,7 +3226,7 @@ fallback:
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
nr_hw_queues, prev_nr_hw_queues);
set->nr_hw_queues = prev_nr_hw_queues;
- blk_mq_map_queues(set);
+ blk_mq_map_queues(&set->map[0]);
goto fallback;
}
blk_mq_map_swqueue(q);
@@ -3179,15 +3333,12 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
return false;
/*
- * poll_nsec can be:
+ * If we get here, hybrid polling is enabled. Hence poll_nsec can be:
*
- * -1: don't ever hybrid sleep
* 0: use half of prev avg
* >0: use this specific value
*/
- if (q->poll_nsec == -1)
- return false;
- else if (q->poll_nsec > 0)
+ if (q->poll_nsec > 0)
nsecs = q->poll_nsec;
else
nsecs = blk_mq_poll_nsecs(q, hctx, rq);
@@ -3224,11 +3375,57 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
return true;
}
-static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static bool blk_mq_poll_hybrid(struct request_queue *q,
+ struct blk_mq_hw_ctx *hctx, blk_qc_t cookie)
{
- struct request_queue *q = hctx->queue;
+ struct request *rq;
+
+ if (q->poll_nsec == -1)
+ return false;
+
+ if (!blk_qc_t_is_internal(cookie))
+ rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
+ else {
+ rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
+ /*
+ * With scheduling, if the request has completed, we'll
+ * get a NULL return here, as we clear the sched tag when
+ * that happens. The request still remains valid, like always,
+ * so we should be safe with just the NULL check.
+ */
+ if (!rq)
+ return false;
+ }
+
+ return blk_mq_poll_hybrid_sleep(q, hctx, rq);
+}
+
+/**
+ * blk_poll - poll for IO completions
+ * @q: the queue
+ * @cookie: cookie passed back at IO submission time
+ * @spin: whether to spin for completions
+ *
+ * Description:
+ * Poll for completions on the passed in queue. Returns number of
+ * completed entries found. If @spin is true, then blk_poll will continue
+ * looping until at least one completion is found, unless the task is
+ * otherwise marked running (or we need to reschedule).
+ */
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+{
+ struct blk_mq_hw_ctx *hctx;
long state;
+ if (!blk_qc_t_valid(cookie) ||
+ !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ return 0;
+
+ if (current->plug)
+ blk_flush_plug_list(current->plug, false);
+
+ hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
+
/*
* If we sleep, have the caller restart the poll loop to reset
* the state. Like for the other success return cases, the
@@ -3236,63 +3433,44 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
* the IO isn't complete, we'll get called again and will go
* straight to the busy poll loop.
*/
- if (blk_mq_poll_hybrid_sleep(q, hctx, rq))
- return true;
+ if (blk_mq_poll_hybrid(q, hctx, cookie))
+ return 1;
hctx->poll_considered++;
state = current->state;
- while (!need_resched()) {
+ do {
int ret;
hctx->poll_invoked++;
- ret = q->mq_ops->poll(hctx, rq->tag);
+ ret = q->mq_ops->poll(hctx);
if (ret > 0) {
hctx->poll_success++;
- set_current_state(TASK_RUNNING);
- return true;
+ __set_current_state(TASK_RUNNING);
+ return ret;
}
if (signal_pending_state(state, current))
- set_current_state(TASK_RUNNING);
+ __set_current_state(TASK_RUNNING);
if (current->state == TASK_RUNNING)
- return true;
- if (ret < 0)
+ return 1;
+ if (ret < 0 || !spin)
break;
cpu_relax();
- }
+ } while (!need_resched());
__set_current_state(TASK_RUNNING);
- return false;
+ return 0;
}
+EXPORT_SYMBOL_GPL(blk_poll);
-static bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie)
+unsigned int blk_mq_rq_cpu(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
- struct request *rq;
-
- if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
- return false;
-
- hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
- if (!blk_qc_t_is_internal(cookie))
- rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie));
- else {
- rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie));
- /*
- * With scheduling, if the request has completed, we'll
- * get a NULL return here, as we clear the sched tag when
- * that happens. The request still remains valid, like always,
- * so we should be safe with just the NULL check.
- */
- if (!rq)
- return false;
- }
-
- return __blk_mq_poll(hctx, rq);
+ return rq->mq_ctx->cpu;
}
+EXPORT_SYMBOL(blk_mq_rq_cpu);
static int __init blk_mq_init(void)
{
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 9497b47e2526..d943d46b0785 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -7,17 +7,22 @@
struct blk_mq_tag_set;
+struct blk_mq_ctxs {
+ struct kobject kobj;
+ struct blk_mq_ctx __percpu *queue_ctx;
+};
+
/**
* struct blk_mq_ctx - State for a software queue facing the submitting CPUs
*/
struct blk_mq_ctx {
struct {
spinlock_t lock;
- struct list_head rq_list;
- } ____cacheline_aligned_in_smp;
+ struct list_head rq_lists[HCTX_MAX_TYPES];
+ } ____cacheline_aligned_in_smp;
unsigned int cpu;
- unsigned int index_hw;
+ unsigned short index_hw[HCTX_MAX_TYPES];
/* incremented at dispatch time */
unsigned long rq_dispatched[2];
@@ -27,6 +32,7 @@ struct blk_mq_ctx {
unsigned long ____cacheline_aligned_in_smp rq_completed[2];
struct request_queue *queue;
+ struct blk_mq_ctxs *ctxs;
struct kobject kobj;
} ____cacheline_aligned_in_smp;
@@ -62,20 +68,55 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
-/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq);
+blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
+ struct request *rq,
+ blk_qc_t *cookie,
+ bool bypass, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
/*
* CPU -> queue mappings
*/
-extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
+extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);
+
+/*
+ * blk_mq_map_queue_type() - map (hctx_type,cpu) to hardware queue
+ * @q: request queue
+ * @type: the hctx type index
+ * @cpu: CPU
+ */
+static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q,
+ enum hctx_type type,
+ unsigned int cpu)
+{
+ return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]];
+}
+/*
+ * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
+ * @q: request queue
+ * @flags: request command flags
+ * @cpu: CPU
+ */
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
- int cpu)
+ unsigned int flags,
+ unsigned int cpu)
{
- return q->queue_hw_ctx[q->mq_map[cpu]];
+ enum hctx_type type = HCTX_TYPE_DEFAULT;
+
+ if ((flags & REQ_HIPRI) &&
+ q->tag_set->nr_maps > HCTX_TYPE_POLL &&
+ q->tag_set->map[HCTX_TYPE_POLL].nr_queues &&
+ test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
+ type = HCTX_TYPE_POLL;
+
+ else if (((flags & REQ_OP_MASK) == REQ_OP_READ) &&
+ q->tag_set->nr_maps > HCTX_TYPE_READ &&
+ q->tag_set->map[HCTX_TYPE_READ].nr_queues)
+ type = HCTX_TYPE_READ;
+
+ return blk_mq_map_queue_type(q, type, cpu);
}
/*
@@ -126,6 +167,7 @@ struct blk_mq_alloc_data {
struct request_queue *q;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
+ unsigned int cmd_flags;
/* input & output parameter */
struct blk_mq_ctx *ctx;
@@ -150,8 +192,7 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
return hctx->nr_ctx && hctx->tags;
}
-void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
@@ -195,21 +236,18 @@ static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
static inline void blk_mq_put_driver_tag(struct request *rq)
{
- struct blk_mq_hw_ctx *hctx;
-
if (rq->tag == -1 || rq->internal_tag == -1)
return;
- hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
- __blk_mq_put_driver_tag(hctx, rq);
+ __blk_mq_put_driver_tag(rq->mq_hctx, rq);
}
-static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
+static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{
int cpu;
for_each_possible_cpu(cpu)
- set->mq_map[cpu] = 0;
+ qmap->mq_map[cpu] = 0;
}
#endif
diff --git a/block/blk-pm.c b/block/blk-pm.c
index f8fdae01bea2..0a028c189897 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -89,12 +89,12 @@ int blk_pre_runtime_suspend(struct request_queue *q)
/* Switch q_usage_counter back to per-cpu mode. */
blk_mq_unfreeze_queue(q);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (ret < 0)
pm_runtime_mark_last_busy(q->dev);
else
q->rpm_status = RPM_SUSPENDING;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (ret)
blk_clear_pm_only(q);
@@ -121,14 +121,14 @@ void blk_post_runtime_suspend(struct request_queue *q, int err)
if (!q->dev)
return;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (!err) {
q->rpm_status = RPM_SUSPENDED;
} else {
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (err)
blk_clear_pm_only(q);
@@ -151,9 +151,9 @@ void blk_pre_runtime_resume(struct request_queue *q)
if (!q->dev)
return;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_RESUMING;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
EXPORT_SYMBOL(blk_pre_runtime_resume);
@@ -176,7 +176,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
if (!q->dev)
return;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (!err) {
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
@@ -184,7 +184,7 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
} else {
q->rpm_status = RPM_SUSPENDED;
}
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!err)
blk_clear_pm_only(q);
@@ -207,10 +207,10 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
*/
void blk_set_runtime_active(struct request_queue *q)
{
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_ACTIVE;
pm_runtime_mark_last_busy(q->dev);
pm_request_autosuspend(q->dev);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-pm.h b/block/blk-pm.h
index a8564ea72a41..ea5507d23e75 100644
--- a/block/blk-pm.h
+++ b/block/blk-pm.h
@@ -21,7 +21,7 @@ static inline void blk_pm_mark_last_busy(struct request *rq)
static inline void blk_pm_requeue_request(struct request *rq)
{
- lockdep_assert_held(rq->q->queue_lock);
+ lockdep_assert_held(&rq->q->queue_lock);
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
rq->q->nr_pending--;
@@ -30,7 +30,7 @@ static inline void blk_pm_requeue_request(struct request *rq)
static inline void blk_pm_add_request(struct request_queue *q,
struct request *rq)
{
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(&q->queue_lock);
if (q->dev && !(rq->rq_flags & RQF_PM))
q->nr_pending++;
@@ -38,7 +38,7 @@ static inline void blk_pm_add_request(struct request_queue *q,
static inline void blk_pm_put_request(struct request *rq)
{
- lockdep_assert_held(rq->q->queue_lock);
+ lockdep_assert_held(&rq->q->queue_lock);
if (rq->q->dev && !(rq->rq_flags & RQF_PM))
--rq->q->nr_pending;
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 0005dfd568dd..d169d7188fa6 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -27,75 +27,67 @@ bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit)
return atomic_inc_below(&rq_wait->inflight, limit);
}
-void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->cleanup)
rqos->ops->cleanup(rqos, bio);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_done(struct request_queue *q, struct request *rq)
+void __rq_qos_done(struct rq_qos *rqos, struct request *rq)
{
- struct rq_qos *rqos;
-
- for (rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->done)
rqos->ops->done(rqos, rq);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_issue(struct request_queue *q, struct request *rq)
+void __rq_qos_issue(struct rq_qos *rqos, struct request *rq)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->issue)
rqos->ops->issue(rqos, rq);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_requeue(struct request_queue *q, struct request *rq)
+void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->requeue)
rqos->ops->requeue(rqos, rq);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
- spinlock_t *lock)
+void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->throttle)
- rqos->ops->throttle(rqos, bio, lock);
- }
+ rqos->ops->throttle(rqos, bio);
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_track(struct request_queue *q, struct request *rq, struct bio *bio)
+void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->track)
rqos->ops->track(rqos, rq, bio);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
+void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio)
{
- struct rq_qos *rqos;
-
- for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
+ do {
if (rqos->ops->done_bio)
rqos->ops->done_bio(rqos, bio);
- }
+ rqos = rqos->next;
+ } while (rqos);
}
/*
@@ -184,8 +176,96 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
rq_depth_calc_max_depth(rqd);
}
+struct rq_qos_wait_data {
+ struct wait_queue_entry wq;
+ struct task_struct *task;
+ struct rq_wait *rqw;
+ acquire_inflight_cb_t *cb;
+ void *private_data;
+ bool got_token;
+};
+
+static int rq_qos_wake_function(struct wait_queue_entry *curr,
+ unsigned int mode, int wake_flags, void *key)
+{
+ struct rq_qos_wait_data *data = container_of(curr,
+ struct rq_qos_wait_data,
+ wq);
+
+ /*
+ * If we fail to get a budget, return -1 to interrupt the wake up loop
+ * in __wake_up_common.
+ */
+ if (!data->cb(data->rqw, data->private_data))
+ return -1;
+
+ data->got_token = true;
+ list_del_init(&curr->entry);
+ wake_up_process(data->task);
+ return 1;
+}
+
+/**
+ * rq_qos_wait - throttle on a rqw if we need to
+ * @private_data - caller provided specific data
+ * @acquire_inflight_cb - inc the rqw->inflight counter if we can
+ * @cleanup_cb - the callback to cleanup in case we race with a waker
+ *
+ * This provides a uniform place for the rq_qos users to do their throttling.
+ * Since you can end up with a lot of things sleeping at once, this manages the
+ * waking up based on the resources available. The acquire_inflight_cb should
+ * inc the rqw->inflight if we have the ability to do so, or return false if not
+ * and then we will sleep until the room becomes available.
+ *
+ * cleanup_cb is in case that we race with a waker and need to cleanup the
+ * inflight count accordingly.
+ */
+void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ acquire_inflight_cb_t *acquire_inflight_cb,
+ cleanup_cb_t *cleanup_cb)
+{
+ struct rq_qos_wait_data data = {
+ .wq = {
+ .func = rq_qos_wake_function,
+ .entry = LIST_HEAD_INIT(data.wq.entry),
+ },
+ .task = current,
+ .rqw = rqw,
+ .cb = acquire_inflight_cb,
+ .private_data = private_data,
+ };
+ bool has_sleeper;
+
+ has_sleeper = wq_has_sleeper(&rqw->wait);
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data))
+ return;
+
+ prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
+ do {
+ if (data.got_token)
+ break;
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
+ finish_wait(&rqw->wait, &data.wq);
+
+ /*
+ * We raced with wbt_wake_function() getting a token,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ */
+ if (data.got_token)
+ cleanup_cb(rqw, private_data);
+ break;
+ }
+ io_schedule();
+ has_sleeper = false;
+ } while (1);
+ finish_wait(&rqw->wait, &data.wq);
+}
+
void rq_qos_exit(struct request_queue *q)
{
+ blk_mq_debugfs_unregister_queue_rqos(q);
+
while (q->rq_qos) {
struct rq_qos *rqos = q->rq_qos;
q->rq_qos = rqos->next;
diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
index 32b02efbfa66..564851889550 100644
--- a/block/blk-rq-qos.h
+++ b/block/blk-rq-qos.h
@@ -7,6 +7,10 @@
#include <linux/atomic.h>
#include <linux/wait.h>
+#include "blk-mq-debugfs.h"
+
+struct blk_mq_debugfs_attr;
+
enum rq_qos_id {
RQ_QOS_WBT,
RQ_QOS_CGROUP,
@@ -22,10 +26,13 @@ struct rq_qos {
struct request_queue *q;
enum rq_qos_id id;
struct rq_qos *next;
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+#endif
};
struct rq_qos_ops {
- void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+ void (*throttle)(struct rq_qos *, struct bio *);
void (*track)(struct rq_qos *, struct request *, struct bio *);
void (*issue)(struct rq_qos *, struct request *);
void (*requeue)(struct rq_qos *, struct request *);
@@ -33,6 +40,7 @@ struct rq_qos_ops {
void (*done_bio)(struct rq_qos *, struct bio *);
void (*cleanup)(struct rq_qos *, struct bio *);
void (*exit)(struct rq_qos *);
+ const struct blk_mq_debugfs_attr *debugfs_attrs;
};
struct rq_depth {
@@ -66,6 +74,17 @@ static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q)
return rq_qos_id(q, RQ_QOS_CGROUP);
}
+static inline const char *rq_qos_id_to_name(enum rq_qos_id id)
+{
+ switch (id) {
+ case RQ_QOS_WBT:
+ return "wbt";
+ case RQ_QOS_CGROUP:
+ return "cgroup";
+ }
+ return "unknown";
+}
+
static inline void rq_wait_init(struct rq_wait *rq_wait)
{
atomic_set(&rq_wait->inflight, 0);
@@ -76,6 +95,9 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
{
rqos->next = q->rq_qos;
q->rq_qos = rqos;
+
+ if (rqos->ops->debugfs_attrs)
+ blk_mq_debugfs_register_rqos(rqos);
}
static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
@@ -91,19 +113,77 @@ static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
}
prev = cur;
}
+
+ blk_mq_debugfs_unregister_rqos(rqos);
}
+typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
+typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
+
+void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ acquire_inflight_cb_t *acquire_inflight_cb,
+ cleanup_cb_t *cleanup_cb);
bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
void rq_depth_scale_up(struct rq_depth *rqd);
void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
bool rq_depth_calc_max_depth(struct rq_depth *rqd);
-void rq_qos_cleanup(struct request_queue *, struct bio *);
-void rq_qos_done(struct request_queue *, struct request *);
-void rq_qos_issue(struct request_queue *, struct request *);
-void rq_qos_requeue(struct request_queue *, struct request *);
-void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
-void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
+void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
+void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
+void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
+void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
+void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
+void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
+void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
+
+static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_cleanup(q->rq_qos, bio);
+}
+
+static inline void rq_qos_done(struct request_queue *q, struct request *rq)
+{
+ if (q->rq_qos)
+ __rq_qos_done(q->rq_qos, rq);
+}
+
+static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
+{
+ if (q->rq_qos)
+ __rq_qos_issue(q->rq_qos, rq);
+}
+
+static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
+{
+ if (q->rq_qos)
+ __rq_qos_requeue(q->rq_qos, rq);
+}
+
+static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_done_bio(q->rq_qos, bio);
+}
+
+static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
+{
+ /*
+ * BIO_TRACKED lets controllers know that a bio went through the
+ * normal rq_qos path.
+ */
+ bio_set_flag(bio, BIO_TRACKED);
+ if (q->rq_qos)
+ __rq_qos_throttle(q->rq_qos, bio);
+}
+
+static inline void rq_qos_track(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+{
+ if (q->rq_qos)
+ __rq_qos_track(q->rq_qos, rq, bio);
+}
+
void rq_qos_exit(struct request_queue *);
+
#endif
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 696c04c1ab6c..3abe831e92c8 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -20,65 +20,12 @@ EXPORT_SYMBOL(blk_max_low_pfn);
unsigned long blk_max_pfn;
-/**
- * blk_queue_prep_rq - set a prepare_request function for queue
- * @q: queue
- * @pfn: prepare_request function
- *
- * It's possible for a queue to register a prepare_request callback which
- * is invoked before the request is handed to the request_fn. The goal of
- * the function is to prepare a request for I/O, it can be used to build a
- * cdb from the request data for instance.
- *
- */
-void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
-{
- q->prep_rq_fn = pfn;
-}
-EXPORT_SYMBOL(blk_queue_prep_rq);
-
-/**
- * blk_queue_unprep_rq - set an unprepare_request function for queue
- * @q: queue
- * @ufn: unprepare_request function
- *
- * It's possible for a queue to register an unprepare_request callback
- * which is invoked before the request is finally completed. The goal
- * of the function is to deallocate any data that was allocated in the
- * prepare_request callback.
- *
- */
-void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
-{
- q->unprep_rq_fn = ufn;
-}
-EXPORT_SYMBOL(blk_queue_unprep_rq);
-
-void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
-{
- q->softirq_done_fn = fn;
-}
-EXPORT_SYMBOL(blk_queue_softirq_done);
-
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
-void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
-{
- WARN_ON_ONCE(q->mq_ops);
- q->rq_timed_out_fn = fn;
-}
-EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
-
-void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
-{
- q->lld_busy_fn = fn;
-}
-EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
-
/**
* blk_set_default_limits - reset limits to default values
* @lim: the queue_limits structure to reset
@@ -169,8 +116,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
q->make_request_fn = mfn;
blk_queue_dma_alignment(q, 511);
- blk_queue_congestion_threshold(q);
- q->nr_batching = BLK_BATCH_REQ;
blk_set_default_limits(&q->limits);
}
@@ -889,16 +834,14 @@ EXPORT_SYMBOL(blk_set_queue_depth);
*/
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
- spin_lock_irq(q->queue_lock);
if (wc)
- queue_flag_set(QUEUE_FLAG_WC, q);
+ blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
- queue_flag_clear(QUEUE_FLAG_WC, q);
+ blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua)
- queue_flag_set(QUEUE_FLAG_FUA, q);
+ blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
- queue_flag_clear(QUEUE_FLAG_FUA, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index e47a2f751884..457d9ba3eb20 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -34,7 +34,7 @@ static __latent_entropy void blk_done_softirq(struct softirq_action *h)
rq = list_entry(local_list.next, struct request, ipi_list);
list_del_init(&rq->ipi_list);
- rq->q->softirq_done_fn(rq);
+ rq->q->mq_ops->complete(rq);
}
}
@@ -98,11 +98,11 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
void __blk_complete_request(struct request *req)
{
struct request_queue *q = req->q;
- int cpu, ccpu = q->mq_ops ? req->mq_ctx->cpu : req->cpu;
+ int cpu, ccpu = req->mq_ctx->cpu;
unsigned long flags;
bool shared = false;
- BUG_ON(!q->softirq_done_fn);
+ BUG_ON(!q->mq_ops->complete);
local_irq_save(flags);
cpu = smp_processor_id();
@@ -143,27 +143,6 @@ do_local:
local_irq_restore(flags);
}
-EXPORT_SYMBOL(__blk_complete_request);
-
-/**
- * blk_complete_request - end I/O on a request
- * @req: the request being processed
- *
- * Description:
- * Ends all I/O on a request. It does not handle partial completions,
- * unless the driver actually implements this in its completion callback
- * through requeueing. The actual completion happens out-of-order,
- * through a softirq handler. The user must have registered a completion
- * callback through blk_queue_softirq_done().
- **/
-void blk_complete_request(struct request *req)
-{
- if (unlikely(blk_should_fake_timeout(req->q)))
- return;
- if (!blk_mark_rq_complete(req))
- __blk_complete_request(req);
-}
-EXPORT_SYMBOL(blk_complete_request);
static __init int blk_softirq_init(void)
{
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 90561af85a62..696a04176e4d 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -130,7 +130,6 @@ blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
return cb;
}
-EXPORT_SYMBOL_GPL(blk_stat_alloc_callback);
void blk_stat_add_callback(struct request_queue *q,
struct blk_stat_callback *cb)
@@ -151,7 +150,6 @@ void blk_stat_add_callback(struct request_queue *q,
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}
-EXPORT_SYMBOL_GPL(blk_stat_add_callback);
void blk_stat_remove_callback(struct request_queue *q,
struct blk_stat_callback *cb)
@@ -164,7 +162,6 @@ void blk_stat_remove_callback(struct request_queue *q,
del_timer_sync(&cb->timer);
}
-EXPORT_SYMBOL_GPL(blk_stat_remove_callback);
static void blk_stat_free_callback_rcu(struct rcu_head *head)
{
@@ -181,7 +178,6 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
if (cb)
call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
}
-EXPORT_SYMBOL_GPL(blk_stat_free_callback);
void blk_stat_enable_accounting(struct request_queue *q)
{
diff --git a/block/blk-stat.h b/block/blk-stat.h
index f4a1568e81a4..17b47a86eefb 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -145,6 +145,11 @@ static inline void blk_stat_activate_nsecs(struct blk_stat_callback *cb,
mod_timer(&cb->timer, jiffies + nsecs_to_jiffies(nsecs));
}
+static inline void blk_stat_deactivate(struct blk_stat_callback *cb)
+{
+ del_timer_sync(&cb->timer);
+}
+
/**
* blk_stat_activate_msecs() - Gather block statistics during a time window in
* milliseconds.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 844a454a7b3a..0619c8922893 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -68,7 +68,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
unsigned long nr;
int ret, err;
- if (!q->request_fn && !q->mq_ops)
+ if (!queue_is_mq(q))
return -EINVAL;
ret = queue_var_store(&nr, page, count);
@@ -78,11 +78,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
- if (q->request_fn)
- err = blk_update_nr_requests(q, nr);
- else
- err = blk_mq_update_nr_requests(q, nr);
-
+ err = blk_mq_update_nr_requests(q, nr);
if (err)
return err;
@@ -242,10 +238,10 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
q->limits.max_sectors = max_sectors_kb << 1;
q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
return ret;
}
@@ -320,14 +316,12 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
- queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
if (nm == 2)
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else if (nm)
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
return ret;
}
@@ -351,18 +345,16 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
if (val == 2) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 1) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 0) {
- queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
}
- spin_unlock_irq(q->queue_lock);
#endif
return ret;
}
@@ -410,7 +402,8 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
unsigned long poll_on;
ssize_t ret;
- if (!q->mq_ops || !q->mq_ops->poll)
+ if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL ||
+ !q->tag_set->map[HCTX_TYPE_POLL].nr_queues)
return -EINVAL;
ret = queue_var_store(&poll_on, page, count);
@@ -425,6 +418,26 @@ static ssize_t queue_poll_store(struct request_queue *q, const char *page,
return ret;
}
+static ssize_t queue_io_timeout_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout));
+}
+
+static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page,
+ size_t count)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtou32(page, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+
+ blk_queue_rq_timeout(q, msecs_to_jiffies(val));
+
+ return count;
+}
+
static ssize_t queue_wb_lat_show(struct request_queue *q, char *page)
{
if (!wbt_rq_qos(q))
@@ -463,20 +476,14 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
* ends up either enabling or disabling wbt completely. We can't
* have IO inflight if that happens.
*/
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
- } else
- blk_queue_bypass_start(q);
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
wbt_set_min_lat(q, val);
wbt_update_limits(q);
- if (q->mq_ops) {
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
- } else
- blk_queue_bypass_end(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
return count;
}
@@ -699,6 +706,12 @@ static struct queue_sysfs_entry queue_dax_entry = {
.show = queue_dax_show,
};
+static struct queue_sysfs_entry queue_io_timeout_entry = {
+ .attr = {.name = "io_timeout", .mode = 0644 },
+ .show = queue_io_timeout_show,
+ .store = queue_io_timeout_store,
+};
+
static struct queue_sysfs_entry queue_wb_lat_entry = {
.attr = {.name = "wbt_lat_usec", .mode = 0644 },
.show = queue_wb_lat_show,
@@ -748,6 +761,7 @@ static struct attribute *default_attrs[] = {
&queue_dax_entry.attr,
&queue_wb_lat_entry.attr,
&queue_poll_delay_entry.attr,
+ &queue_io_timeout_entry.attr,
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
&throtl_sample_time_entry.attr,
#endif
@@ -847,24 +861,14 @@ static void __blk_release_queue(struct work_struct *work)
blk_free_queue_stats(q->stats);
- blk_exit_rl(q, &q->root_rl);
-
- if (q->queue_tags)
- __blk_queue_free_tags(q);
-
blk_queue_free_zone_bitmaps(q);
- if (!q->mq_ops) {
- if (q->exit_rq_fn)
- q->exit_rq_fn(q, q->fq->flush_rq);
- blk_free_flush_queue(q->fq);
- } else {
+ if (queue_is_mq(q))
blk_mq_release(q);
- }
blk_trace_shutdown(q);
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_debugfs_unregister(q);
bioset_exit(&q->bio_split);
@@ -909,7 +913,7 @@ int blk_register_queue(struct gendisk *disk)
WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
+ blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
/*
* SCSI probing may synchronously create and destroy a lot of
@@ -921,9 +925,8 @@ int blk_register_queue(struct gendisk *disk)
* request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
- queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
percpu_ref_switch_to_percpu(&q->q_usage_counter);
- blk_queue_bypass_end(q);
}
ret = blk_trace_init_sysfs(dev);
@@ -939,7 +942,7 @@ int blk_register_queue(struct gendisk *disk)
goto unlock;
}
- if (q->mq_ops) {
+ if (queue_is_mq(q)) {
__blk_mq_register_dev(dev, q);
blk_mq_debugfs_register(q);
}
@@ -950,7 +953,7 @@ int blk_register_queue(struct gendisk *disk)
blk_throtl_register_queue(q);
- if (q->request_fn || (q->mq_ops && q->elevator)) {
+ if (q->elevator) {
ret = elv_register_queue(q);
if (ret) {
mutex_unlock(&q->sysfs_lock);
@@ -999,7 +1002,7 @@ void blk_unregister_queue(struct gendisk *disk)
* Remove the sysfs attributes before unregistering the queue data
* structures that can be modified through sysfs.
*/
- if (q->mq_ops)
+ if (queue_is_mq(q))
blk_mq_unregister_dev(disk_to_dev(disk), q);
mutex_unlock(&q->sysfs_lock);
@@ -1008,7 +1011,7 @@ void blk_unregister_queue(struct gendisk *disk)
blk_trace_remove_sysfs(disk_to_dev(disk));
mutex_lock(&q->sysfs_lock);
- if (q->request_fn || (q->mq_ops && q->elevator))
+ if (q->elevator)
elv_unregister_queue(q);
mutex_unlock(&q->sysfs_lock);
diff --git a/block/blk-tag.c b/block/blk-tag.c
deleted file mode 100644
index fbc153aef166..000000000000
--- a/block/blk-tag.c
+++ /dev/null
@@ -1,378 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Functions related to tagged command queuing
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/slab.h>
-
-#include "blk.h"
-
-/**
- * blk_queue_find_tag - find a request by its tag and queue
- * @q: The request queue for the device
- * @tag: The tag of the request
- *
- * Notes:
- * Should be used when a device returns a tag and you want to match
- * it with a request.
- *
- * no locks need be held.
- **/
-struct request *blk_queue_find_tag(struct request_queue *q, int tag)
-{
- return blk_map_queue_find_tag(q->queue_tags, tag);
-}
-EXPORT_SYMBOL(blk_queue_find_tag);
-
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt: the tag map to free
- *
- * Drop the reference count on @bqt and frees it when the last reference
- * is dropped.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
- if (atomic_dec_and_test(&bqt->refcnt)) {
- BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
- bqt->max_depth);
-
- kfree(bqt->tag_index);
- bqt->tag_index = NULL;
-
- kfree(bqt->tag_map);
- bqt->tag_map = NULL;
-
- kfree(bqt);
- }
-}
-EXPORT_SYMBOL(blk_free_tags);
-
-/**
- * __blk_queue_free_tags - release tag maintenance info
- * @q: the request queue for the device
- *
- * Notes:
- * blk_cleanup_queue() will take care of calling this function, if tagging
- * has been used. So there's no need to call this directly.
- **/
-void __blk_queue_free_tags(struct request_queue *q)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
-
- if (!bqt)
- return;
-
- blk_free_tags(bqt);
-
- q->queue_tags = NULL;
- queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
-}
-
-/**
- * blk_queue_free_tags - release tag maintenance info
- * @q: the request queue for the device
- *
- * Notes:
- * This is used to disable tagged queuing to a device, yet leave
- * queue in function.
- **/
-void blk_queue_free_tags(struct request_queue *q)
-{
- queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
-}
-EXPORT_SYMBOL(blk_queue_free_tags);
-
-static int
-init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
-{
- struct request **tag_index;
- unsigned long *tag_map;
- int nr_ulongs;
-
- if (q && depth > q->nr_requests * 2) {
- depth = q->nr_requests * 2;
- printk(KERN_ERR "%s: adjusted depth to %d\n",
- __func__, depth);
- }
-
- tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
- if (!tag_index)
- goto fail;
-
- nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
- tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
- if (!tag_map)
- goto fail;
-
- tags->real_max_depth = depth;
- tags->max_depth = depth;
- tags->tag_index = tag_index;
- tags->tag_map = tag_map;
-
- return 0;
-fail:
- kfree(tag_index);
- return -ENOMEM;
-}
-
-static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
- int depth, int alloc_policy)
-{
- struct blk_queue_tag *tags;
-
- tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
- if (!tags)
- goto fail;
-
- if (init_tag_map(q, tags, depth))
- goto fail;
-
- atomic_set(&tags->refcnt, 1);
- tags->alloc_policy = alloc_policy;
- tags->next_tag = 0;
- return tags;
-fail:
- kfree(tags);
- return NULL;
-}
-
-/**
- * blk_init_tags - initialize the tag info for an external tag map
- * @depth: the maximum queue depth supported
- * @alloc_policy: tag allocation policy
- **/
-struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
-{
- return __blk_queue_init_tags(NULL, depth, alloc_policy);
-}
-EXPORT_SYMBOL(blk_init_tags);
-
-/**
- * blk_queue_init_tags - initialize the queue tag info
- * @q: the request queue for the device
- * @depth: the maximum queue depth supported
- * @tags: the tag to use
- * @alloc_policy: tag allocation policy
- *
- * Queue lock must be held here if the function is called to resize an
- * existing map.
- **/
-int blk_queue_init_tags(struct request_queue *q, int depth,
- struct blk_queue_tag *tags, int alloc_policy)
-{
- int rc;
-
- BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
-
- if (!tags && !q->queue_tags) {
- tags = __blk_queue_init_tags(q, depth, alloc_policy);
-
- if (!tags)
- return -ENOMEM;
-
- } else if (q->queue_tags) {
- rc = blk_queue_resize_tags(q, depth);
- if (rc)
- return rc;
- queue_flag_set(QUEUE_FLAG_QUEUED, q);
- return 0;
- } else
- atomic_inc(&tags->refcnt);
-
- /*
- * assign it, all done
- */
- q->queue_tags = tags;
- queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
- return 0;
-}
-EXPORT_SYMBOL(blk_queue_init_tags);
-
-/**
- * blk_queue_resize_tags - change the queueing depth
- * @q: the request queue for the device
- * @new_depth: the new max command queueing depth
- *
- * Notes:
- * Must be called with the queue lock held.
- **/
-int blk_queue_resize_tags(struct request_queue *q, int new_depth)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
- struct request **tag_index;
- unsigned long *tag_map;
- int max_depth, nr_ulongs;
-
- if (!bqt)
- return -ENXIO;
-
- /*
- * if we already have large enough real_max_depth. just
- * adjust max_depth. *NOTE* as requests with tag value
- * between new_depth and real_max_depth can be in-flight, tag
- * map can not be shrunk blindly here.
- */
- if (new_depth <= bqt->real_max_depth) {
- bqt->max_depth = new_depth;
- return 0;
- }
-
- /*
- * Currently cannot replace a shared tag map with a new
- * one, so error out if this is the case
- */
- if (atomic_read(&bqt->refcnt) != 1)
- return -EBUSY;
-
- /*
- * save the old state info, so we can copy it back
- */
- tag_index = bqt->tag_index;
- tag_map = bqt->tag_map;
- max_depth = bqt->real_max_depth;
-
- if (init_tag_map(q, bqt, new_depth))
- return -ENOMEM;
-
- memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
- nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
- memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
-
- kfree(tag_index);
- kfree(tag_map);
- return 0;
-}
-EXPORT_SYMBOL(blk_queue_resize_tags);
-
-/**
- * blk_queue_end_tag - end tag operations for a request
- * @q: the request queue for the device
- * @rq: the request that has completed
- *
- * Description:
- * Typically called when end_that_request_first() returns %0, meaning
- * all transfers have been done for a request. It's important to call
- * this function before end_that_request_last(), as that will put the
- * request back on the free list thus corrupting the internal tag list.
- **/
-void blk_queue_end_tag(struct request_queue *q, struct request *rq)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
- unsigned tag = rq->tag; /* negative tags invalid */
-
- lockdep_assert_held(q->queue_lock);
-
- BUG_ON(tag >= bqt->real_max_depth);
-
- list_del_init(&rq->queuelist);
- rq->rq_flags &= ~RQF_QUEUED;
- rq->tag = -1;
- rq->internal_tag = -1;
-
- if (unlikely(bqt->tag_index[tag] == NULL))
- printk(KERN_ERR "%s: tag %d is missing\n",
- __func__, tag);
-
- bqt->tag_index[tag] = NULL;
-
- if (unlikely(!test_bit(tag, bqt->tag_map))) {
- printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
- __func__, tag);
- return;
- }
- /*
- * The tag_map bit acts as a lock for tag_index[bit], so we need
- * unlock memory barrier semantics.
- */
- clear_bit_unlock(tag, bqt->tag_map);
-}
-
-/**
- * blk_queue_start_tag - find a free tag and assign it
- * @q: the request queue for the device
- * @rq: the block request that needs tagging
- *
- * Description:
- * This can either be used as a stand-alone helper, or possibly be
- * assigned as the queue &prep_rq_fn (in which case &struct request
- * automagically gets a tag assigned). Note that this function
- * assumes that any type of request can be queued! if this is not
- * true for your device, you must check the request type before
- * calling this function. The request will also be removed from
- * the request queue, so it's the drivers responsibility to readd
- * it if it should need to be restarted for some reason.
- **/
-int blk_queue_start_tag(struct request_queue *q, struct request *rq)
-{
- struct blk_queue_tag *bqt = q->queue_tags;
- unsigned max_depth;
- int tag;
-
- lockdep_assert_held(q->queue_lock);
-
- if (unlikely((rq->rq_flags & RQF_QUEUED))) {
- printk(KERN_ERR
- "%s: request %p for device [%s] already tagged %d",
- __func__, rq,
- rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
- BUG();
- }
-
- /*
- * Protect against shared tag maps, as we may not have exclusive
- * access to the tag map.
- *
- * We reserve a few tags just for sync IO, since we don't want
- * to starve sync IO on behalf of flooding async IO.
- */
- max_depth = bqt->max_depth;
- if (!rq_is_sync(rq) && max_depth > 1) {
- switch (max_depth) {
- case 2:
- max_depth = 1;
- break;
- case 3:
- max_depth = 2;
- break;
- default:
- max_depth -= 2;
- }
- if (q->in_flight[BLK_RW_ASYNC] > max_depth)
- return 1;
- }
-
- do {
- if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
- tag = find_first_zero_bit(bqt->tag_map, max_depth);
- if (tag >= max_depth)
- return 1;
- } else {
- int start = bqt->next_tag;
- int size = min_t(int, bqt->max_depth, max_depth + start);
- tag = find_next_zero_bit(bqt->tag_map, size, start);
- if (tag >= size && start + size > bqt->max_depth) {
- size = start + size - bqt->max_depth;
- tag = find_first_zero_bit(bqt->tag_map, size);
- }
- if (tag >= size)
- return 1;
- }
-
- } while (test_and_set_bit_lock(tag, bqt->tag_map));
- /*
- * We need lock ordering semantics given by test_and_set_bit_lock.
- * See blk_queue_end_tag for details.
- */
-
- bqt->next_tag = (tag + 1) % bqt->max_depth;
- rq->rq_flags |= RQF_QUEUED;
- rq->tag = tag;
- bqt->tag_index[tag] = rq;
- blk_start_request(rq);
- return 0;
-}
-EXPORT_SYMBOL(blk_queue_start_tag);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index db1a3a2ae006..1b97a73d2fb1 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1243,7 +1243,7 @@ static void throtl_pending_timer_fn(struct timer_list *t)
bool dispatched;
int ret;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
if (throtl_can_upgrade(td, NULL))
throtl_upgrade_state(td);
@@ -1266,9 +1266,9 @@ again:
break;
/* this dispatch windows is still open, relax and repeat */
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
cpu_relax();
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
}
if (!dispatched)
@@ -1290,7 +1290,7 @@ again:
queue_work(kthrotld_workqueue, &td->dispatch_work);
}
out_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
}
/**
@@ -1314,11 +1314,11 @@ static void blk_throtl_dispatch_work_fn(struct work_struct *work)
bio_list_init(&bio_list_on_stack);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
for (rw = READ; rw <= WRITE; rw++)
while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
bio_list_add(&bio_list_on_stack, bio);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
if (!bio_list_empty(&bio_list_on_stack)) {
blk_start_plug(&plug);
@@ -2115,16 +2115,6 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
-static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
-{
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- /* fallback to root_blkg if we fail to get a blkg ref */
- if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
- bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-#endif
-}
-
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
{
@@ -2141,14 +2131,10 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
goto out;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
throtl_update_latency_buckets(td);
- if (unlikely(blk_queue_bypass(q)))
- goto out_unlock;
-
- blk_throtl_assoc_bio(tg, bio);
blk_throtl_update_idletime(tg);
sq = &tg->service_queue;
@@ -2227,7 +2213,7 @@ again:
}
out_unlock:
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
out:
bio_set_flag(bio, BIO_THROTTLED);
@@ -2348,7 +2334,7 @@ static void tg_drain_bios(struct throtl_service_queue *parent_sq)
* Dispatch all currently throttled bios on @q through ->make_request_fn().
*/
void blk_throtl_drain(struct request_queue *q)
- __releases(q->queue_lock) __acquires(q->queue_lock)
+ __releases(&q->queue_lock) __acquires(&q->queue_lock)
{
struct throtl_data *td = q->td;
struct blkcg_gq *blkg;
@@ -2356,7 +2342,6 @@ void blk_throtl_drain(struct request_queue *q)
struct bio *bio;
int rw;
- queue_lockdep_assert_held(q);
rcu_read_lock();
/*
@@ -2372,7 +2357,7 @@ void blk_throtl_drain(struct request_queue *q)
tg_drain_bios(&td->service_queue);
rcu_read_unlock();
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&q->queue_lock);
/* all bios now should be in td->service_queue, issue them */
for (rw = READ; rw <= WRITE; rw++)
@@ -2380,7 +2365,7 @@ void blk_throtl_drain(struct request_queue *q)
NULL)))
generic_make_request(bio);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&q->queue_lock);
}
int blk_throtl_init(struct request_queue *q)
@@ -2460,7 +2445,7 @@ void blk_throtl_register_queue(struct request_queue *q)
td->throtl_slice = DFL_THROTL_SLICE_HD;
#endif
- td->track_bio_latency = !queue_is_rq_based(q);
+ td->track_bio_latency = !queue_is_mq(q);
if (!td->track_bio_latency)
blk_stat_enable_accounting(q);
}
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index f2cfd56e1606..124c26128bf6 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -68,80 +68,6 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
#endif /* CONFIG_FAIL_IO_TIMEOUT */
-/*
- * blk_delete_timer - Delete/cancel timer for a given function.
- * @req: request that we are canceling timer for
- *
- */
-void blk_delete_timer(struct request *req)
-{
- list_del_init(&req->timeout_list);
-}
-
-static void blk_rq_timed_out(struct request *req)
-{
- struct request_queue *q = req->q;
- enum blk_eh_timer_return ret = BLK_EH_RESET_TIMER;
-
- if (q->rq_timed_out_fn)
- ret = q->rq_timed_out_fn(req);
- switch (ret) {
- case BLK_EH_RESET_TIMER:
- blk_add_timer(req);
- blk_clear_rq_complete(req);
- break;
- case BLK_EH_DONE:
- /*
- * LLD handles this for now but in the future
- * we can send a request msg to abort the command
- * and we can move more of the generic scsi eh code to
- * the blk layer.
- */
- break;
- default:
- printk(KERN_ERR "block: bad eh return: %d\n", ret);
- break;
- }
-}
-
-static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
- unsigned int *next_set)
-{
- const unsigned long deadline = blk_rq_deadline(rq);
-
- if (time_after_eq(jiffies, deadline)) {
- list_del_init(&rq->timeout_list);
-
- /*
- * Check if we raced with end io completion
- */
- if (!blk_mark_rq_complete(rq))
- blk_rq_timed_out(rq);
- } else if (!*next_set || time_after(*next_timeout, deadline)) {
- *next_timeout = deadline;
- *next_set = 1;
- }
-}
-
-void blk_timeout_work(struct work_struct *work)
-{
- struct request_queue *q =
- container_of(work, struct request_queue, timeout_work);
- unsigned long flags, next = 0;
- struct request *rq, *tmp;
- int next_set = 0;
-
- spin_lock_irqsave(q->queue_lock, flags);
-
- list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
- blk_rq_check_expired(rq, &next, &next_set);
-
- if (next_set)
- mod_timer(&q->timeout, round_jiffies_up(next));
-
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
/**
* blk_abort_request -- Request request recovery for the specified command
* @req: pointer to the request of interest
@@ -149,24 +75,17 @@ void blk_timeout_work(struct work_struct *work)
* This function requests that the block layer start recovery for the
* request by deleting the timer and calling the q's timeout function.
* LLDDs who implement their own error recovery MAY ignore the timeout
- * event if they generated blk_abort_req. Must hold queue lock.
+ * event if they generated blk_abort_request.
*/
void blk_abort_request(struct request *req)
{
- if (req->q->mq_ops) {
- /*
- * All we need to ensure is that timeout scan takes place
- * immediately and that scan sees the new timeout value.
- * No need for fancy synchronizations.
- */
- blk_rq_set_deadline(req, jiffies);
- kblockd_schedule_work(&req->q->timeout_work);
- } else {
- if (blk_mark_rq_complete(req))
- return;
- blk_delete_timer(req);
- blk_rq_timed_out(req);
- }
+ /*
+ * All we need to ensure is that timeout scan takes place
+ * immediately and that scan sees the new timeout value.
+ * No need for fancy synchronizations.
+ */
+ WRITE_ONCE(req->deadline, jiffies);
+ kblockd_schedule_work(&req->q->timeout_work);
}
EXPORT_SYMBOL_GPL(blk_abort_request);
@@ -194,15 +113,6 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q;
unsigned long expiry;
- if (!q->mq_ops)
- lockdep_assert_held(q->queue_lock);
-
- /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
- if (!q->mq_ops && !q->rq_timed_out_fn)
- return;
-
- BUG_ON(!list_empty(&req->timeout_list));
-
/*
* Some LLDs, like scsi, peek at the timeout to prevent a
* command from being retried forever.
@@ -211,21 +121,16 @@ void blk_add_timer(struct request *req)
req->timeout = q->rq_timeout;
req->rq_flags &= ~RQF_TIMED_OUT;
- blk_rq_set_deadline(req, jiffies + req->timeout);
- /*
- * Only the non-mq case needs to add the request to a protected list.
- * For the mq case we simply scan the tag map.
- */
- if (!q->mq_ops)
- list_add_tail(&req->timeout_list, &req->q->timeout_list);
+ expiry = jiffies + req->timeout;
+ WRITE_ONCE(req->deadline, expiry);
/*
* If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round up to next nearest
* second.
*/
- expiry = blk_rq_timeout(round_jiffies_up(blk_rq_deadline(req)));
+ expiry = blk_rq_timeout(round_jiffies_up(expiry));
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires)) {
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8ac93fcbaa2e..f0c56649775f 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -489,31 +489,21 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
}
struct wbt_wait_data {
- struct wait_queue_entry wq;
- struct task_struct *task;
struct rq_wb *rwb;
- struct rq_wait *rqw;
+ enum wbt_flags wb_acct;
unsigned long rw;
- bool got_token;
};
-static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
- int wake_flags, void *key)
+static bool wbt_inflight_cb(struct rq_wait *rqw, void *private_data)
{
- struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
- wq);
-
- /*
- * If we fail to get a budget, return -1 to interrupt the wake up
- * loop in __wake_up_common.
- */
- if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
- return -1;
+ struct wbt_wait_data *data = private_data;
+ return rq_wait_inc_below(rqw, get_limit(data->rwb, data->rw));
+}
- data->got_token = true;
- list_del_init(&curr->entry);
- wake_up_process(data->task);
- return 1;
+static void wbt_cleanup_cb(struct rq_wait *rqw, void *private_data)
+{
+ struct wbt_wait_data *data = private_data;
+ wbt_rqw_done(data->rwb, rqw, data->wb_acct);
}
/*
@@ -521,57 +511,16 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
- unsigned long rw, spinlock_t *lock)
- __releases(lock)
- __acquires(lock)
+ unsigned long rw)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
struct wbt_wait_data data = {
- .wq = {
- .func = wbt_wake_function,
- .entry = LIST_HEAD_INIT(data.wq.entry),
- },
- .task = current,
.rwb = rwb,
- .rqw = rqw,
+ .wb_acct = wb_acct,
.rw = rw,
};
- bool has_sleeper;
-
- has_sleeper = wq_has_sleeper(&rqw->wait);
- if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
- return;
- prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
- do {
- if (data.got_token)
- break;
-
- if (!has_sleeper &&
- rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
- finish_wait(&rqw->wait, &data.wq);
-
- /*
- * We raced with wbt_wake_function() getting a token,
- * which means we now have two. Put our local token
- * and wake anyone else potentially waiting for one.
- */
- if (data.got_token)
- wbt_rqw_done(rwb, rqw, wb_acct);
- break;
- }
-
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else
- io_schedule();
-
- has_sleeper = false;
- } while (1);
-
- finish_wait(&rqw->wait, &data.wq);
+ rq_qos_wait(rqw, &data, wbt_inflight_cb, wbt_cleanup_cb);
}
static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -624,7 +573,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
{
struct rq_wb *rwb = RQWB(rqos);
enum wbt_flags flags;
@@ -636,7 +585,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
return;
}
- __wbt_wait(rwb, flags, bio->bi_opf, lock);
+ __wbt_wait(rwb, flags, bio->bi_opf);
if (!blk_stat_is_active(rwb->cb))
rwb_arm_timer(rwb);
@@ -709,8 +658,7 @@ void wbt_enable_default(struct request_queue *q)
if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags))
return;
- if ((q->mq_ops && IS_ENABLED(CONFIG_BLK_WBT_MQ)) ||
- (q->request_fn && IS_ENABLED(CONFIG_BLK_WBT_SQ)))
+ if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
wbt_init(q);
}
EXPORT_SYMBOL_GPL(wbt_enable_default);
@@ -760,11 +708,100 @@ void wbt_disable_default(struct request_queue *q)
if (!rqos)
return;
rwb = RQWB(rqos);
- if (rwb->enable_state == WBT_STATE_ON_DEFAULT)
+ if (rwb->enable_state == WBT_STATE_ON_DEFAULT) {
+ blk_stat_deactivate(rwb->cb);
rwb->wb_normal = 0;
+ }
}
EXPORT_SYMBOL_GPL(wbt_disable_default);
+#ifdef CONFIG_BLK_DEBUG_FS
+static int wbt_curr_win_nsec_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%llu\n", rwb->cur_win_nsec);
+ return 0;
+}
+
+static int wbt_enabled_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%d\n", rwb->enable_state);
+ return 0;
+}
+
+static int wbt_id_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+
+ seq_printf(m, "%u\n", rqos->id);
+ return 0;
+}
+
+static int wbt_inflight_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+ int i;
+
+ for (i = 0; i < WBT_NUM_RWQ; i++)
+ seq_printf(m, "%d: inflight %d\n", i,
+ atomic_read(&rwb->rq_wait[i].inflight));
+ return 0;
+}
+
+static int wbt_min_lat_nsec_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%lu\n", rwb->min_lat_nsec);
+ return 0;
+}
+
+static int wbt_unknown_cnt_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%u\n", rwb->unknown_cnt);
+ return 0;
+}
+
+static int wbt_normal_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%u\n", rwb->wb_normal);
+ return 0;
+}
+
+static int wbt_background_show(void *data, struct seq_file *m)
+{
+ struct rq_qos *rqos = data;
+ struct rq_wb *rwb = RQWB(rqos);
+
+ seq_printf(m, "%u\n", rwb->wb_background);
+ return 0;
+}
+
+static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
+ {"curr_win_nsec", 0400, wbt_curr_win_nsec_show},
+ {"enabled", 0400, wbt_enabled_show},
+ {"id", 0400, wbt_id_show},
+ {"inflight", 0400, wbt_inflight_show},
+ {"min_lat_nsec", 0400, wbt_min_lat_nsec_show},
+ {"unknown_cnt", 0400, wbt_unknown_cnt_show},
+ {"wb_normal", 0400, wbt_normal_show},
+ {"wb_background", 0400, wbt_background_show},
+ {},
+};
+#endif
static struct rq_qos_ops wbt_rqos_ops = {
.throttle = wbt_wait,
@@ -774,6 +811,9 @@ static struct rq_qos_ops wbt_rqos_ops = {
.done = wbt_done,
.cleanup = wbt_cleanup,
.exit = wbt_exit,
+#ifdef CONFIG_BLK_DEBUG_FS
+ .debugfs_attrs = wbt_debugfs_attrs,
+#endif
};
int wbt_init(struct request_queue *q)
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 13ba2011a306..2d98803faec2 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -378,7 +378,7 @@ static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
struct page *page;
int order;
- for (order = get_order(size); order > 0; order--) {
+ for (order = get_order(size); order >= 0; order--) {
page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
if (page) {
*nr_zones = min_t(unsigned int, *nr_zones,
@@ -421,7 +421,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk)
* BIO based queues do not use a scheduler so only q->nr_zones
* needs to be updated so that the sysfs exposed value is correct.
*/
- if (!queue_is_rq_based(q)) {
+ if (!queue_is_mq(q)) {
q->nr_zones = nr_zones;
return 0;
}
diff --git a/block/blk.h b/block/blk.h
index 0089fefdf771..848278c52030 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -7,12 +7,6 @@
#include <xen/xen.h>
#include "blk-mq.h"
-/* Amount of time in which a process may batch requests */
-#define BLK_BATCH_TIME (HZ/50UL)
-
-/* Number of requests a "batching" process may submit */
-#define BLK_BATCH_REQ 32
-
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -38,85 +32,13 @@ struct blk_flush_queue {
};
extern struct kmem_cache *blk_requestq_cachep;
-extern struct kmem_cache *request_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
-/*
- * @q->queue_lock is set while a queue is being initialized. Since we know
- * that no other threads access the queue object before @q->queue_lock has
- * been set, it is safe to manipulate queue flags without holding the
- * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and
- * blk_init_allocated_queue().
- */
-static inline void queue_lockdep_assert_held(struct request_queue *q)
-{
- if (q->queue_lock)
- lockdep_assert_held(q->queue_lock);
-}
-
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline struct blk_flush_queue *blk_get_flush_queue(
- struct request_queue *q, struct blk_mq_ctx *ctx)
+static inline struct blk_flush_queue *
+blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{
- if (q->mq_ops)
- return blk_mq_map_queue(q, ctx->cpu)->fq;
- return q->fq;
+ return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq;
}
static inline void __blk_get_queue(struct request_queue *q)
@@ -128,15 +50,9 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
-int blk_init_rl(struct request_list *rl, struct request_queue *q,
- gfp_t gfp_mask);
-void blk_exit_rl(struct request_queue *q, struct request_list *rl);
void blk_exit_queue(struct request_queue *q);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
-void blk_queue_bypass_start(struct request_queue *q);
-void blk_queue_bypass_end(struct request_queue *q);
-void __blk_queue_free_tags(struct request_queue *q);
void blk_freeze_queue(struct request_queue *q);
static inline void blk_queue_enter_live(struct request_queue *q)
@@ -235,11 +151,8 @@ static inline bool bio_integrity_endio(struct bio *bio)
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
-void blk_delete_timer(struct request *);
-
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio);
@@ -248,58 +161,19 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
struct bio *bio);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int *request_count,
struct request **same_queue_rq);
-unsigned int blk_plug_queued_count(struct request_queue *q);
void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req, u64 now);
/*
- * EH timer and IO completion will both attempt to 'grab' the request, make
- * sure that only one of them succeeds. Steal the bottom bit of the
- * __deadline field for this.
- */
-static inline int blk_mark_rq_complete(struct request *rq)
-{
- return test_and_set_bit(0, &rq->__deadline);
-}
-
-static inline void blk_clear_rq_complete(struct request *rq)
-{
- clear_bit(0, &rq->__deadline);
-}
-
-static inline bool blk_rq_is_complete(struct request *rq)
-{
- return test_bit(0, &rq->__deadline);
-}
-
-/*
* Internal elevator interface
*/
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
void blk_insert_flush(struct request *rq);
-static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->type->ops.sq.elevator_activate_req_fn)
- e->type->ops.sq.elevator_activate_req_fn(q, rq);
-}
-
-static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (e->type->ops.sq.elevator_deactivate_req_fn)
- e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
-}
-
-int elevator_init(struct request_queue *);
int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
@@ -334,31 +208,8 @@ void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
-void blk_queue_congestion_threshold(struct request_queue *q);
-
int blk_dev_init(void);
-
-/*
- * Return the threshold (number of used requests) at which the queue is
- * considered to be congested. It include a little hysteresis to keep the
- * context switch rate down.
- */
-static inline int queue_congestion_on_threshold(struct request_queue *q)
-{
- return q->nr_congestion_on;
-}
-
-/*
- * The threshold at which a queue is considered to be uncongested
- */
-static inline int queue_congestion_off_threshold(struct request_queue *q)
-{
- return q->nr_congestion_off;
-}
-
-extern int blk_update_nr_requests(struct request_queue *, unsigned int);
-
/*
* Contribute to IO statistics IFF:
*
@@ -381,21 +232,6 @@ static inline void req_set_nomerge(struct request_queue *q, struct request *req)
}
/*
- * Steal a bit from this field for legacy IO path atomic IO marking. Note that
- * setting the deadline clears the bottom bit, potentially clearing the
- * completed bit. The user has to be OK with this (current ones are fine).
- */
-static inline void blk_rq_set_deadline(struct request *rq, unsigned long time)
-{
- rq->__deadline = time & ~0x1UL;
-}
-
-static inline unsigned long blk_rq_deadline(struct request *rq)
-{
- return rq->__deadline & ~0x1UL;
-}
-
-/*
* The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
* is defined as 'unsigned int', meantime it has to aligned to with logical
* block size which is the minimum accepted unit by hardware.
@@ -417,22 +253,6 @@ void ioc_clear_queue(struct request_queue *q);
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
- * rq_ioc - determine io_context for request allocation
- * @bio: request being allocated is for this bio (can be %NULL)
- *
- * Determine io_context to use for request allocation for @bio. May return
- * %NULL if %current->io_context doesn't exist.
- */
-static inline struct io_context *rq_ioc(struct bio *bio)
-{
-#ifdef CONFIG_BLK_CGROUP
- if (bio && bio->bi_ioc)
- return bio->bi_ioc;
-#endif
- return current->io_context;
-}
-
-/**
* create_io_context - try to create task->io_context
* @gfp_mask: allocation mask
* @node: allocation node
@@ -490,8 +310,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
}
#endif /* CONFIG_BOUNCE */
-extern void blk_drain_queue(struct request_queue *q);
-
#ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q);
#else
diff --git a/block/bounce.c b/block/bounce.c
index 559c55bda040..ffb9e9ecfa7e 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -277,7 +277,8 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
}
}
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+ blkcg_bio_issue_init(bio);
return bio;
}
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index f3501cdaf1a6..192129856342 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -21,7 +21,7 @@
*
*/
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
#include <linux/bsg-lib.h>
@@ -31,6 +31,12 @@
#define uptr64(val) ((void __user *)(uintptr_t)(val))
+struct bsg_set {
+ struct blk_mq_tag_set tag_set;
+ bsg_job_fn *job_fn;
+ bsg_timeout_fn *timeout_fn;
+};
+
static int bsg_transport_check_proto(struct sg_io_v4 *hdr)
{
if (hdr->protocol != BSG_PROTOCOL_SCSI ||
@@ -129,7 +135,7 @@ static void bsg_teardown_job(struct kref *kref)
kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list);
- blk_end_request_all(rq, BLK_STS_OK);
+ blk_mq_end_request(rq, BLK_STS_OK);
}
void bsg_job_put(struct bsg_job *job)
@@ -157,15 +163,15 @@ void bsg_job_done(struct bsg_job *job, int result,
{
job->result = result;
job->reply_payload_rcv_len = reply_payload_rcv_len;
- blk_complete_request(blk_mq_rq_from_pdu(job));
+ blk_mq_complete_request(blk_mq_rq_from_pdu(job));
}
EXPORT_SYMBOL_GPL(bsg_job_done);
/**
- * bsg_softirq_done - softirq done routine for destroying the bsg requests
+ * bsg_complete - softirq done routine for destroying the bsg requests
* @rq: BSG request that holds the job to be destroyed
*/
-static void bsg_softirq_done(struct request *rq)
+static void bsg_complete(struct request *rq)
{
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
@@ -224,54 +230,48 @@ failjob_rls_job:
}
/**
- * bsg_request_fn - generic handler for bsg requests
- * @q: request queue to manage
+ * bsg_queue_rq - generic handler for bsg requests
+ * @hctx: hardware queue
+ * @bd: queue data
*
* On error the create_bsg_job function should return a -Exyz error value
* that will be set to ->result.
*
* Drivers/subsys should pass this to the queue init function.
*/
-static void bsg_request_fn(struct request_queue *q)
- __releases(q->queue_lock)
- __acquires(q->queue_lock)
+static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct device *dev = q->queuedata;
- struct request *req;
+ struct request *req = bd->rq;
+ struct bsg_set *bset =
+ container_of(q->tag_set, struct bsg_set, tag_set);
int ret;
+ blk_mq_start_request(req);
+
if (!get_device(dev))
- return;
-
- while (1) {
- req = blk_fetch_request(q);
- if (!req)
- break;
- spin_unlock_irq(q->queue_lock);
-
- if (!bsg_prepare_job(dev, req)) {
- blk_end_request_all(req, BLK_STS_OK);
- spin_lock_irq(q->queue_lock);
- continue;
- }
-
- ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
- spin_lock_irq(q->queue_lock);
- if (ret)
- break;
- }
+ return BLK_STS_IOERR;
+
+ if (!bsg_prepare_job(dev, req))
+ return BLK_STS_IOERR;
+
+ ret = bset->job_fn(blk_mq_rq_to_pdu(req));
+ if (ret)
+ return BLK_STS_IOERR;
- spin_unlock_irq(q->queue_lock);
put_device(dev);
- spin_lock_irq(q->queue_lock);
+ return BLK_STS_OK;
}
/* called right after the request is allocated for the request_queue */
-static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
+static int bsg_init_rq(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx, unsigned int numa_node)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
- job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
+ job->reply = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
if (!job->reply)
return -ENOMEM;
return 0;
@@ -289,13 +289,47 @@ static void bsg_initialize_rq(struct request *req)
job->dd_data = job + 1;
}
-static void bsg_exit_rq(struct request_queue *q, struct request *req)
+static void bsg_exit_rq(struct blk_mq_tag_set *set, struct request *req,
+ unsigned int hctx_idx)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
kfree(job->reply);
}
+void bsg_remove_queue(struct request_queue *q)
+{
+ if (q) {
+ struct bsg_set *bset =
+ container_of(q->tag_set, struct bsg_set, tag_set);
+
+ bsg_unregister_queue(q);
+ blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&bset->tag_set);
+ kfree(bset);
+ }
+}
+EXPORT_SYMBOL_GPL(bsg_remove_queue);
+
+static enum blk_eh_timer_return bsg_timeout(struct request *rq, bool reserved)
+{
+ struct bsg_set *bset =
+ container_of(rq->q->tag_set, struct bsg_set, tag_set);
+
+ if (!bset->timeout_fn)
+ return BLK_EH_DONE;
+ return bset->timeout_fn(rq);
+}
+
+static const struct blk_mq_ops bsg_mq_ops = {
+ .queue_rq = bsg_queue_rq,
+ .init_request = bsg_init_rq,
+ .exit_request = bsg_exit_rq,
+ .initialize_rq_fn = bsg_initialize_rq,
+ .complete = bsg_complete,
+ .timeout = bsg_timeout,
+};
+
/**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to
@@ -304,28 +338,38 @@ static void bsg_exit_rq(struct request_queue *q, struct request *req)
* @dd_job_size: size of LLD data needed for each job
*/
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, int dd_job_size)
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size)
{
+ struct bsg_set *bset;
+ struct blk_mq_tag_set *set;
struct request_queue *q;
- int ret;
+ int ret = -ENOMEM;
- q = blk_alloc_queue(GFP_KERNEL);
- if (!q)
+ bset = kzalloc(sizeof(*bset), GFP_KERNEL);
+ if (!bset)
return ERR_PTR(-ENOMEM);
- q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
- q->init_rq_fn = bsg_init_rq;
- q->exit_rq_fn = bsg_exit_rq;
- q->initialize_rq_fn = bsg_initialize_rq;
- q->request_fn = bsg_request_fn;
- ret = blk_init_allocated_queue(q);
- if (ret)
- goto out_cleanup_queue;
+ bset->job_fn = job_fn;
+ bset->timeout_fn = timeout;
+
+ set = &bset->tag_set;
+ set->ops = &bsg_mq_ops,
+ set->nr_hw_queues = 1;
+ set->queue_depth = 128;
+ set->numa_node = NUMA_NO_NODE;
+ set->cmd_size = sizeof(struct bsg_job) + dd_job_size;
+ set->flags = BLK_MQ_F_NO_SCHED | BLK_MQ_F_BLOCKING;
+ if (blk_mq_alloc_tag_set(set))
+ goto out_tag_set;
+
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto out_queue;
+ }
q->queuedata = dev;
- q->bsg_job_fn = job_fn;
blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
- blk_queue_softirq_done(q, bsg_softirq_done);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
ret = bsg_register_queue(q, dev, name, &bsg_transport_ops);
@@ -338,6 +382,10 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
return q;
out_cleanup_queue:
blk_cleanup_queue(q);
+out_queue:
+ blk_mq_free_tag_set(set);
+out_tag_set:
+ kfree(bset);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(bsg_setup_queue);
diff --git a/block/bsg.c b/block/bsg.c
index 9a442c23a715..44f6028b9567 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -471,7 +471,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent,
/*
* we need a proper transport to send commands, not a stacked device
*/
- if (!queue_is_rq_based(q))
+ if (!queue_is_mq(q))
return 0;
bcd = &q->bsg_dev;
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
deleted file mode 100644
index ed41aa978c4a..000000000000
--- a/block/cfq-iosched.c
+++ /dev/null
@@ -1,4916 +0,0 @@
-/*
- * CFQ, or complete fairness queueing, disk scheduler.
- *
- * Based on ideas from a previously unfinished io
- * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
- *
- * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/sched/clock.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/ktime.h>
-#include <linux/rbtree.h>
-#include <linux/ioprio.h>
-#include <linux/blktrace_api.h>
-#include <linux/blk-cgroup.h>
-#include "blk.h"
-#include "blk-wbt.h"
-
-/*
- * tunables
- */
-/* max queue in one round of service */
-static const int cfq_quantum = 8;
-static const u64 cfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 };
-/* maximum backwards seek, in KiB */
-static const int cfq_back_max = 16 * 1024;
-/* penalty of a backwards seek */
-static const int cfq_back_penalty = 2;
-static const u64 cfq_slice_sync = NSEC_PER_SEC / 10;
-static u64 cfq_slice_async = NSEC_PER_SEC / 25;
-static const int cfq_slice_async_rq = 2;
-static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
-static u64 cfq_group_idle = NSEC_PER_SEC / 125;
-static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
-static const int cfq_hist_divisor = 4;
-
-/*
- * offset from end of queue service tree for idle class
- */
-#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
-/* offset from end of group service tree under time slice mode */
-#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
-/* offset from end of group service under IOPS mode */
-#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
-
-/*
- * below this threshold, we consider thinktime immediate
- */
-#define CFQ_MIN_TT (2 * NSEC_PER_SEC / HZ)
-
-#define CFQ_SLICE_SCALE (5)
-#define CFQ_HW_QUEUE_MIN (5)
-#define CFQ_SERVICE_SHIFT 12
-
-#define CFQQ_SEEK_THR (sector_t)(8 * 100)
-#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
-#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
-#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
-
-#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
-#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
-#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
-
-static struct kmem_cache *cfq_pool;
-
-#define CFQ_PRIO_LISTS IOPRIO_BE_NR
-#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
-#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
-
-#define sample_valid(samples) ((samples) > 80)
-#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
-
-/* blkio-related constants */
-#define CFQ_WEIGHT_LEGACY_MIN 10
-#define CFQ_WEIGHT_LEGACY_DFL 500
-#define CFQ_WEIGHT_LEGACY_MAX 1000
-
-struct cfq_ttime {
- u64 last_end_request;
-
- u64 ttime_total;
- u64 ttime_mean;
- unsigned long ttime_samples;
-};
-
-/*
- * Most of our rbtree usage is for sorting with min extraction, so
- * if we cache the leftmost node we don't have to walk down the tree
- * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
- * move this into the elevator for the rq sorting as well.
- */
-struct cfq_rb_root {
- struct rb_root_cached rb;
- struct rb_node *rb_rightmost;
- unsigned count;
- u64 min_vdisktime;
- struct cfq_ttime ttime;
-};
-#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT_CACHED, \
- .rb_rightmost = NULL, \
- .ttime = {.last_end_request = ktime_get_ns(),},}
-
-/*
- * Per process-grouping structure
- */
-struct cfq_queue {
- /* reference count */
- int ref;
- /* various state flags, see below */
- unsigned int flags;
- /* parent cfq_data */
- struct cfq_data *cfqd;
- /* service_tree member */
- struct rb_node rb_node;
- /* service_tree key */
- u64 rb_key;
- /* prio tree member */
- struct rb_node p_node;
- /* prio tree root we belong to, if any */
- struct rb_root *p_root;
- /* sorted list of pending requests */
- struct rb_root sort_list;
- /* if fifo isn't expired, next request to serve */
- struct request *next_rq;
- /* requests queued in sort_list */
- int queued[2];
- /* currently allocated requests */
- int allocated[2];
- /* fifo list of requests in sort_list */
- struct list_head fifo;
-
- /* time when queue got scheduled in to dispatch first request. */
- u64 dispatch_start;
- u64 allocated_slice;
- u64 slice_dispatch;
- /* time when first request from queue completed and slice started. */
- u64 slice_start;
- u64 slice_end;
- s64 slice_resid;
-
- /* pending priority requests */
- int prio_pending;
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
-
- /* io prio of this group */
- unsigned short ioprio, org_ioprio;
- unsigned short ioprio_class, org_ioprio_class;
-
- pid_t pid;
-
- u32 seek_history;
- sector_t last_request_pos;
-
- struct cfq_rb_root *service_tree;
- struct cfq_queue *new_cfqq;
- struct cfq_group *cfqg;
- /* Number of sectors dispatched from queue in single dispatch round */
- unsigned long nr_sectors;
-};
-
-/*
- * First index in the service_trees.
- * IDLE is handled separately, so it has negative index
- */
-enum wl_class_t {
- BE_WORKLOAD = 0,
- RT_WORKLOAD = 1,
- IDLE_WORKLOAD = 2,
- CFQ_PRIO_NR,
-};
-
-/*
- * Second index in the service_trees.
- */
-enum wl_type_t {
- ASYNC_WORKLOAD = 0,
- SYNC_NOIDLE_WORKLOAD = 1,
- SYNC_WORKLOAD = 2
-};
-
-struct cfqg_stats {
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- /* number of ios merged */
- struct blkg_rwstat merged;
- /* total time spent on device in ns, may not be accurate w/ queueing */
- struct blkg_rwstat service_time;
- /* total time spent waiting in scheduler queue in ns */
- struct blkg_rwstat wait_time;
- /* number of IOs queued up */
- struct blkg_rwstat queued;
- /* total disk time and nr sectors dispatched by this group */
- struct blkg_stat time;
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- /* time not charged to this cgroup */
- struct blkg_stat unaccounted_time;
- /* sum of number of ios queued across all samples */
- struct blkg_stat avg_queue_size_sum;
- /* count of samples taken for average */
- struct blkg_stat avg_queue_size_samples;
- /* how many times this group has been removed from service tree */
- struct blkg_stat dequeue;
- /* total time spent waiting for it to be assigned a timeslice. */
- struct blkg_stat group_wait_time;
- /* time spent idling for this blkcg_gq */
- struct blkg_stat idle_time;
- /* total time with empty current active q with other requests queued */
- struct blkg_stat empty_time;
- /* fields after this shouldn't be cleared on stat reset */
- u64 start_group_wait_time;
- u64 start_idle_time;
- u64 start_empty_time;
- uint16_t flags;
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
-#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-};
-
-/* Per-cgroup data */
-struct cfq_group_data {
- /* must be the first member */
- struct blkcg_policy_data cpd;
-
- unsigned int weight;
- unsigned int leaf_weight;
-};
-
-/* This is per cgroup per device grouping structure */
-struct cfq_group {
- /* must be the first member */
- struct blkg_policy_data pd;
-
- /* group service_tree member */
- struct rb_node rb_node;
-
- /* group service_tree key */
- u64 vdisktime;
-
- /*
- * The number of active cfqgs and sum of their weights under this
- * cfqg. This covers this cfqg's leaf_weight and all children's
- * weights, but does not cover weights of further descendants.
- *
- * If a cfqg is on the service tree, it's active. An active cfqg
- * also activates its parent and contributes to the children_weight
- * of the parent.
- */
- int nr_active;
- unsigned int children_weight;
-
- /*
- * vfraction is the fraction of vdisktime that the tasks in this
- * cfqg are entitled to. This is determined by compounding the
- * ratios walking up from this cfqg to the root.
- *
- * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all
- * vfractions on a service tree is approximately 1. The sum may
- * deviate a bit due to rounding errors and fluctuations caused by
- * cfqgs entering and leaving the service tree.
- */
- unsigned int vfraction;
-
- /*
- * There are two weights - (internal) weight is the weight of this
- * cfqg against the sibling cfqgs. leaf_weight is the wight of
- * this cfqg against the child cfqgs. For the root cfqg, both
- * weights are kept in sync for backward compatibility.
- */
- unsigned int weight;
- unsigned int new_weight;
- unsigned int dev_weight;
-
- unsigned int leaf_weight;
- unsigned int new_leaf_weight;
- unsigned int dev_leaf_weight;
-
- /* number of cfqq currently on this group */
- int nr_cfqq;
-
- /*
- * Per group busy queues average. Useful for workload slice calc. We
- * create the array for each prio class but at run time it is used
- * only for RT and BE class and slot for IDLE class remains unused.
- * This is primarily done to avoid confusion and a gcc warning.
- */
- unsigned int busy_queues_avg[CFQ_PRIO_NR];
- /*
- * rr lists of queues with requests. We maintain service trees for
- * RT and BE classes. These trees are subdivided in subclasses
- * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
- * class there is no subclassification and all the cfq queues go on
- * a single tree service_tree_idle.
- * Counts are embedded in the cfq_rb_root
- */
- struct cfq_rb_root service_trees[2][3];
- struct cfq_rb_root service_tree_idle;
-
- u64 saved_wl_slice;
- enum wl_type_t saved_wl_type;
- enum wl_class_t saved_wl_class;
-
- /* number of requests that are on the dispatch list or inside driver */
- int dispatched;
- struct cfq_ttime ttime;
- struct cfqg_stats stats; /* stats for this cfqg */
-
- /* async queue for each priority case */
- struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
- struct cfq_queue *async_idle_cfqq;
-
-};
-
-struct cfq_io_cq {
- struct io_cq icq; /* must be the first member */
- struct cfq_queue *cfqq[2];
- struct cfq_ttime ttime;
- int ioprio; /* the current ioprio */
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- uint64_t blkcg_serial_nr; /* the current blkcg serial */
-#endif
-};
-
-/*
- * Per block device queue structure
- */
-struct cfq_data {
- struct request_queue *queue;
- /* Root service tree for cfq_groups */
- struct cfq_rb_root grp_service_tree;
- struct cfq_group *root_group;
-
- /*
- * The priority currently being served
- */
- enum wl_class_t serving_wl_class;
- enum wl_type_t serving_wl_type;
- u64 workload_expires;
- struct cfq_group *serving_group;
-
- /*
- * Each priority tree is sorted by next_request position. These
- * trees are used when determining if two or more queues are
- * interleaving requests (see cfq_close_cooperator).
- */
- struct rb_root prio_trees[CFQ_PRIO_LISTS];
-
- unsigned int busy_queues;
- unsigned int busy_sync_queues;
-
- int rq_in_driver;
- int rq_in_flight[2];
-
- /*
- * queue-depth detection
- */
- int rq_queued;
- int hw_tag;
- /*
- * hw_tag can be
- * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
- * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
- * 0 => no NCQ
- */
- int hw_tag_est_depth;
- unsigned int hw_tag_samples;
-
- /*
- * idle window management
- */
- struct hrtimer idle_slice_timer;
- struct work_struct unplug_work;
-
- struct cfq_queue *active_queue;
- struct cfq_io_cq *active_cic;
-
- sector_t last_position;
-
- /*
- * tunables, see top of file
- */
- unsigned int cfq_quantum;
- unsigned int cfq_back_penalty;
- unsigned int cfq_back_max;
- unsigned int cfq_slice_async_rq;
- unsigned int cfq_latency;
- u64 cfq_fifo_expire[2];
- u64 cfq_slice[2];
- u64 cfq_slice_idle;
- u64 cfq_group_idle;
- u64 cfq_target_latency;
-
- /*
- * Fallback dummy cfqq for extreme OOM conditions
- */
- struct cfq_queue oom_cfqq;
-
- u64 last_delayed_sync;
-};
-
-static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
-static void cfq_put_queue(struct cfq_queue *cfqq);
-
-static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
- enum wl_class_t class,
- enum wl_type_t type)
-{
- if (!cfqg)
- return NULL;
-
- if (class == IDLE_WORKLOAD)
- return &cfqg->service_tree_idle;
-
- return &cfqg->service_trees[class][type];
-}
-
-enum cfqq_state_flags {
- CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
- CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
- CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
- CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
- CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
- CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
- CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
- CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
- CFQ_CFQQ_FLAG_sync, /* synchronous queue */
- CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
- CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
- CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
- CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
-};
-
-#define CFQ_CFQQ_FNS(name) \
-static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
-{ \
- (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
-} \
-static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
-{ \
- (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
-} \
-static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
-{ \
- return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
-}
-
-CFQ_CFQQ_FNS(on_rr);
-CFQ_CFQQ_FNS(wait_request);
-CFQ_CFQQ_FNS(must_dispatch);
-CFQ_CFQQ_FNS(must_alloc_slice);
-CFQ_CFQQ_FNS(fifo_expire);
-CFQ_CFQQ_FNS(idle_window);
-CFQ_CFQQ_FNS(prio_changed);
-CFQ_CFQQ_FNS(slice_new);
-CFQ_CFQQ_FNS(sync);
-CFQ_CFQQ_FNS(coop);
-CFQ_CFQQ_FNS(split_coop);
-CFQ_CFQQ_FNS(deep);
-CFQ_CFQQ_FNS(wait_busy);
-#undef CFQ_CFQQ_FNS
-
-#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
-
-/* cfqg stats flags */
-enum cfqg_stats_flags {
- CFQG_stats_waiting = 0,
- CFQG_stats_idling,
- CFQG_stats_empty,
-};
-
-#define CFQG_FLAG_FNS(name) \
-static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
-{ \
- stats->flags |= (1 << CFQG_stats_##name); \
-} \
-static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
-{ \
- stats->flags &= ~(1 << CFQG_stats_##name); \
-} \
-static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
-{ \
- return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
-} \
-
-CFQG_FLAG_FNS(waiting)
-CFQG_FLAG_FNS(idling)
-CFQG_FLAG_FNS(empty)
-#undef CFQG_FLAG_FNS
-
-/* This should be called with the queue_lock held. */
-static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
-{
- u64 now;
-
- if (!cfqg_stats_waiting(stats))
- return;
-
- now = ktime_get_ns();
- if (now > stats->start_group_wait_time)
- blkg_stat_add(&stats->group_wait_time,
- now - stats->start_group_wait_time);
- cfqg_stats_clear_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- if (cfqg_stats_waiting(stats))
- return;
- if (cfqg == curr_cfqg)
- return;
- stats->start_group_wait_time = ktime_get_ns();
- cfqg_stats_mark_waiting(stats);
-}
-
-/* This should be called with the queue_lock held. */
-static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
-{
- u64 now;
-
- if (!cfqg_stats_empty(stats))
- return;
-
- now = ktime_get_ns();
- if (now > stats->start_empty_time)
- blkg_stat_add(&stats->empty_time,
- now - stats->start_empty_time);
- cfqg_stats_clear_empty(stats);
-}
-
-static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
-{
- blkg_stat_add(&cfqg->stats.dequeue, 1);
-}
-
-static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- if (blkg_rwstat_total(&stats->queued))
- return;
-
- /*
- * group is already marked empty. This can happen if cfqq got new
- * request in parent group and moved to this group while being added
- * to service tree. Just ignore the event and move on.
- */
- if (cfqg_stats_empty(stats))
- return;
-
- stats->start_empty_time = ktime_get_ns();
- cfqg_stats_mark_empty(stats);
-}
-
-static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- if (cfqg_stats_idling(stats)) {
- u64 now = ktime_get_ns();
-
- if (now > stats->start_idle_time)
- blkg_stat_add(&stats->idle_time,
- now - stats->start_idle_time);
- cfqg_stats_clear_idling(stats);
- }
-}
-
-static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- BUG_ON(cfqg_stats_idling(stats));
-
- stats->start_idle_time = ktime_get_ns();
- cfqg_stats_mark_idling(stats);
-}
-
-static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
-{
- struct cfqg_stats *stats = &cfqg->stats;
-
- blkg_stat_add(&stats->avg_queue_size_sum,
- blkg_rwstat_total(&stats->queued));
- blkg_stat_add(&stats->avg_queue_size_samples, 1);
- cfqg_stats_update_group_wait_time(stats);
-}
-
-#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
-
-static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
-static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
-static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
-static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
-
-#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-
-static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
-{
- return pd ? container_of(pd, struct cfq_group, pd) : NULL;
-}
-
-static struct cfq_group_data
-*cpd_to_cfqgd(struct blkcg_policy_data *cpd)
-{
- return cpd ? container_of(cpd, struct cfq_group_data, cpd) : NULL;
-}
-
-static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
-{
- return pd_to_blkg(&cfqg->pd);
-}
-
-static struct blkcg_policy blkcg_policy_cfq;
-
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
- return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
-static struct cfq_group_data *blkcg_to_cfqgd(struct blkcg *blkcg)
-{
- return cpd_to_cfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_cfq));
-}
-
-static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
-{
- struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent;
-
- return pblkg ? blkg_to_cfqg(pblkg) : NULL;
-}
-
-static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
- struct cfq_group *ancestor)
-{
- return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
- cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
-}
-
-static inline void cfqg_get(struct cfq_group *cfqg)
-{
- return blkg_get(cfqg_to_blkg(cfqg));
-}
-
-static inline void cfqg_put(struct cfq_group *cfqg)
-{
- return blkg_put(cfqg_to_blkg(cfqg));
-}
-
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
- blk_add_cgroup_trace_msg((cfqd)->queue, \
- cfqg_to_blkg((cfqq)->cfqg)->blkcg, \
- "cfq%d%c%c " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
- ##args); \
-} while (0)
-
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
- blk_add_cgroup_trace_msg((cfqd)->queue, \
- cfqg_to_blkg(cfqg)->blkcg, fmt, ##args); \
-} while (0)
-
-static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg,
- unsigned int op)
-{
- blkg_rwstat_add(&cfqg->stats.queued, op, 1);
- cfqg_stats_end_empty_time(&cfqg->stats);
- cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
-}
-
-static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
- uint64_t time, unsigned long unaccounted_time)
-{
- blkg_stat_add(&cfqg->stats.time, time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
-#endif
-}
-
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
- unsigned int op)
-{
- blkg_rwstat_add(&cfqg->stats.queued, op, -1);
-}
-
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
- unsigned int op)
-{
- blkg_rwstat_add(&cfqg->stats.merged, op, 1);
-}
-
-static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- u64 start_time_ns,
- u64 io_start_time_ns,
- unsigned int op)
-{
- struct cfqg_stats *stats = &cfqg->stats;
- u64 now = ktime_get_ns();
-
- if (now > io_start_time_ns)
- blkg_rwstat_add(&stats->service_time, op,
- now - io_start_time_ns);
- if (io_start_time_ns > start_time_ns)
- blkg_rwstat_add(&stats->wait_time, op,
- io_start_time_ns - start_time_ns);
-}
-
-/* @stats = 0 */
-static void cfqg_stats_reset(struct cfqg_stats *stats)
-{
- /* queued stats shouldn't be cleared */
- blkg_rwstat_reset(&stats->merged);
- blkg_rwstat_reset(&stats->service_time);
- blkg_rwstat_reset(&stats->wait_time);
- blkg_stat_reset(&stats->time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_reset(&stats->unaccounted_time);
- blkg_stat_reset(&stats->avg_queue_size_sum);
- blkg_stat_reset(&stats->avg_queue_size_samples);
- blkg_stat_reset(&stats->dequeue);
- blkg_stat_reset(&stats->group_wait_time);
- blkg_stat_reset(&stats->idle_time);
- blkg_stat_reset(&stats->empty_time);
-#endif
-}
-
-/* @to += @from */
-static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from)
-{
- /* queued stats shouldn't be cleared */
- blkg_rwstat_add_aux(&to->merged, &from->merged);
- blkg_rwstat_add_aux(&to->service_time, &from->service_time);
- blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
- blkg_stat_add_aux(&from->time, &from->time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
- blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
- blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples);
- blkg_stat_add_aux(&to->dequeue, &from->dequeue);
- blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
- blkg_stat_add_aux(&to->idle_time, &from->idle_time);
- blkg_stat_add_aux(&to->empty_time, &from->empty_time);
-#endif
-}
-
-/*
- * Transfer @cfqg's stats to its parent's aux counts so that the ancestors'
- * recursive stats can still account for the amount used by this cfqg after
- * it's gone.
- */
-static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
-{
- struct cfq_group *parent = cfqg_parent(cfqg);
-
- lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock);
-
- if (unlikely(!parent))
- return;
-
- cfqg_stats_add_aux(&parent->stats, &cfqg->stats);
- cfqg_stats_reset(&cfqg->stats);
-}
-
-#else /* CONFIG_CFQ_GROUP_IOSCHED */
-
-static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
-static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
- struct cfq_group *ancestor)
-{
- return true;
-}
-static inline void cfqg_get(struct cfq_group *cfqg) { }
-static inline void cfqg_put(struct cfq_group *cfqg) { }
-
-#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \
- cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
- cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\
- ##args)
-#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
-
-static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
- struct cfq_group *curr_cfqg, unsigned int op) { }
-static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
- uint64_t time, unsigned long unaccounted_time) { }
-static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg,
- unsigned int op) { }
-static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg,
- unsigned int op) { }
-static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
- u64 start_time_ns,
- u64 io_start_time_ns,
- unsigned int op) { }
-
-#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-
-#define cfq_log(cfqd, fmt, args...) \
- blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
-
-/* Traverses through cfq group service trees */
-#define for_each_cfqg_st(cfqg, i, j, st) \
- for (i = 0; i <= IDLE_WORKLOAD; i++) \
- for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
- : &cfqg->service_tree_idle; \
- (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
- (i == IDLE_WORKLOAD && j == 0); \
- j++, st = i < IDLE_WORKLOAD ? \
- &cfqg->service_trees[i][j]: NULL) \
-
-static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
- struct cfq_ttime *ttime, bool group_idle)
-{
- u64 slice;
- if (!sample_valid(ttime->ttime_samples))
- return false;
- if (group_idle)
- slice = cfqd->cfq_group_idle;
- else
- slice = cfqd->cfq_slice_idle;
- return ttime->ttime_mean > slice;
-}
-
-static inline bool iops_mode(struct cfq_data *cfqd)
-{
- /*
- * If we are not idling on queues and it is a NCQ drive, parallel
- * execution of requests is on and measuring time is not possible
- * in most of the cases until and unless we drive shallower queue
- * depths and that becomes a performance bottleneck. In such cases
- * switch to start providing fairness in terms of number of IOs.
- */
- if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
- return true;
- else
- return false;
-}
-
-static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
-{
- if (cfq_class_idle(cfqq))
- return IDLE_WORKLOAD;
- if (cfq_class_rt(cfqq))
- return RT_WORKLOAD;
- return BE_WORKLOAD;
-}
-
-
-static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
-{
- if (!cfq_cfqq_sync(cfqq))
- return ASYNC_WORKLOAD;
- if (!cfq_cfqq_idle_window(cfqq))
- return SYNC_NOIDLE_WORKLOAD;
- return SYNC_WORKLOAD;
-}
-
-static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
- struct cfq_data *cfqd,
- struct cfq_group *cfqg)
-{
- if (wl_class == IDLE_WORKLOAD)
- return cfqg->service_tree_idle.count;
-
- return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
- cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
- cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
-}
-
-static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
- struct cfq_group *cfqg)
-{
- return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
- cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
-}
-
-static void cfq_dispatch_insert(struct request_queue *, struct request *);
-static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
- struct cfq_io_cq *cic, struct bio *bio);
-
-static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
-{
- /* cic->icq is the first member, %NULL will convert to %NULL */
- return container_of(icq, struct cfq_io_cq, icq);
-}
-
-static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
- struct io_context *ioc)
-{
- if (ioc)
- return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
- return NULL;
-}
-
-static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
-{
- return cic->cfqq[is_sync];
-}
-
-static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
- bool is_sync)
-{
- cic->cfqq[is_sync] = cfqq;
-}
-
-static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
-{
- return cic->icq.q->elevator->elevator_data;
-}
-
-/*
- * scheduler run of queue, if there are requests pending and no one in the
- * driver that will restart queueing
- */
-static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
-{
- if (cfqd->busy_queues) {
- cfq_log(cfqd, "schedule dispatch");
- kblockd_schedule_work(&cfqd->unplug_work);
- }
-}
-
-/*
- * Scale schedule slice based on io priority. Use the sync time slice only
- * if a queue is marked sync and has sync io queued. A sync queue with async
- * io only, should not get full sync slice length.
- */
-static inline u64 cfq_prio_slice(struct cfq_data *cfqd, bool sync,
- unsigned short prio)
-{
- u64 base_slice = cfqd->cfq_slice[sync];
- u64 slice = div_u64(base_slice, CFQ_SLICE_SCALE);
-
- WARN_ON(prio >= IOPRIO_BE_NR);
-
- return base_slice + (slice * (4 - prio));
-}
-
-static inline u64
-cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
-}
-
-/**
- * cfqg_scale_charge - scale disk time charge according to cfqg weight
- * @charge: disk time being charged
- * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT
- *
- * Scale @charge according to @vfraction, which is in range (0, 1]. The
- * scaling is inversely proportional.
- *
- * scaled = charge / vfraction
- *
- * The result is also in fixed point w/ CFQ_SERVICE_SHIFT.
- */
-static inline u64 cfqg_scale_charge(u64 charge,
- unsigned int vfraction)
-{
- u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */
-
- /* charge / vfraction */
- c <<= CFQ_SERVICE_SHIFT;
- return div_u64(c, vfraction);
-}
-
-static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
-{
- s64 delta = (s64)(vdisktime - min_vdisktime);
- if (delta > 0)
- min_vdisktime = vdisktime;
-
- return min_vdisktime;
-}
-
-static void update_min_vdisktime(struct cfq_rb_root *st)
-{
- if (!RB_EMPTY_ROOT(&st->rb.rb_root)) {
- struct cfq_group *cfqg = rb_entry_cfqg(st->rb.rb_leftmost);
-
- st->min_vdisktime = max_vdisktime(st->min_vdisktime,
- cfqg->vdisktime);
- }
-}
-
-/*
- * get averaged number of queues of RT/BE priority.
- * average is updated, with a formula that gives more weight to higher numbers,
- * to quickly follows sudden increases and decrease slowly
- */
-
-static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
- struct cfq_group *cfqg, bool rt)
-{
- unsigned min_q, max_q;
- unsigned mult = cfq_hist_divisor - 1;
- unsigned round = cfq_hist_divisor / 2;
- unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
-
- min_q = min(cfqg->busy_queues_avg[rt], busy);
- max_q = max(cfqg->busy_queues_avg[rt], busy);
- cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
- cfq_hist_divisor;
- return cfqg->busy_queues_avg[rt];
-}
-
-static inline u64
-cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT;
-}
-
-static inline u64
-cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- u64 slice = cfq_prio_to_slice(cfqd, cfqq);
- if (cfqd->cfq_latency) {
- /*
- * interested queues (we consider only the ones with the same
- * priority class in the cfq group)
- */
- unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
- cfq_class_rt(cfqq));
- u64 sync_slice = cfqd->cfq_slice[1];
- u64 expect_latency = sync_slice * iq;
- u64 group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
-
- if (expect_latency > group_slice) {
- u64 base_low_slice = 2 * cfqd->cfq_slice_idle;
- u64 low_slice;
-
- /* scale low_slice according to IO priority
- * and sync vs async */
- low_slice = div64_u64(base_low_slice*slice, sync_slice);
- low_slice = min(slice, low_slice);
- /* the adapted slice value is scaled to fit all iqs
- * into the target latency */
- slice = div64_u64(slice*group_slice, expect_latency);
- slice = max(slice, low_slice);
- }
- }
- return slice;
-}
-
-static inline void
-cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- u64 slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
- u64 now = ktime_get_ns();
-
- cfqq->slice_start = now;
- cfqq->slice_end = now + slice;
- cfqq->allocated_slice = slice;
- cfq_log_cfqq(cfqd, cfqq, "set_slice=%llu", cfqq->slice_end - now);
-}
-
-/*
- * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
- * isn't valid until the first request from the dispatch is activated
- * and the slice time set.
- */
-static inline bool cfq_slice_used(struct cfq_queue *cfqq)
-{
- if (cfq_cfqq_slice_new(cfqq))
- return false;
- if (ktime_get_ns() < cfqq->slice_end)
- return false;
-
- return true;
-}
-
-/*
- * Lifted from AS - choose which of rq1 and rq2 that is best served now.
- * We choose the request that is closest to the head right now. Distance
- * behind the head is penalized and only allowed to a certain extent.
- */
-static struct request *
-cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
-{
- sector_t s1, s2, d1 = 0, d2 = 0;
- unsigned long back_max;
-#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
-#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
- unsigned wrap = 0; /* bit mask: requests behind the disk head? */
-
- if (rq1 == NULL || rq1 == rq2)
- return rq2;
- if (rq2 == NULL)
- return rq1;
-
- if (rq_is_sync(rq1) != rq_is_sync(rq2))
- return rq_is_sync(rq1) ? rq1 : rq2;
-
- if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
- return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
-
- s1 = blk_rq_pos(rq1);
- s2 = blk_rq_pos(rq2);
-
- /*
- * by definition, 1KiB is 2 sectors
- */
- back_max = cfqd->cfq_back_max * 2;
-
- /*
- * Strict one way elevator _except_ in the case where we allow
- * short backward seeks which are biased as twice the cost of a
- * similar forward seek.
- */
- if (s1 >= last)
- d1 = s1 - last;
- else if (s1 + back_max >= last)
- d1 = (last - s1) * cfqd->cfq_back_penalty;
- else
- wrap |= CFQ_RQ1_WRAP;
-
- if (s2 >= last)
- d2 = s2 - last;
- else if (s2 + back_max >= last)
- d2 = (last - s2) * cfqd->cfq_back_penalty;
- else
- wrap |= CFQ_RQ2_WRAP;
-
- /* Found required data */
-
- /*
- * By doing switch() on the bit mask "wrap" we avoid having to
- * check two variables for all permutations: --> faster!
- */
- switch (wrap) {
- case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
- if (d1 < d2)
- return rq1;
- else if (d2 < d1)
- return rq2;
- else {
- if (s1 >= s2)
- return rq1;
- else
- return rq2;
- }
-
- case CFQ_RQ2_WRAP:
- return rq1;
- case CFQ_RQ1_WRAP:
- return rq2;
- case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
- default:
- /*
- * Since both rqs are wrapped,
- * start with the one that's further behind head
- * (--> only *one* back seek required),
- * since back seek takes more time than forward.
- */
- if (s1 <= s2)
- return rq1;
- else
- return rq2;
- }
-}
-
-static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
-{
- /* Service tree is empty */
- if (!root->count)
- return NULL;
-
- return rb_entry(rb_first_cached(&root->rb), struct cfq_queue, rb_node);
-}
-
-static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
-{
- return rb_entry_cfqg(rb_first_cached(&root->rb));
-}
-
-static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
-{
- if (root->rb_rightmost == n)
- root->rb_rightmost = rb_prev(n);
-
- rb_erase_cached(n, &root->rb);
- RB_CLEAR_NODE(n);
-
- --root->count;
-}
-
-/*
- * would be nice to take fifo expire time into account as well
- */
-static struct request *
-cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *last)
-{
- struct rb_node *rbnext = rb_next(&last->rb_node);
- struct rb_node *rbprev = rb_prev(&last->rb_node);
- struct request *next = NULL, *prev = NULL;
-
- BUG_ON(RB_EMPTY_NODE(&last->rb_node));
-
- if (rbprev)
- prev = rb_entry_rq(rbprev);
-
- if (rbnext)
- next = rb_entry_rq(rbnext);
- else {
- rbnext = rb_first(&cfqq->sort_list);
- if (rbnext && rbnext != &last->rb_node)
- next = rb_entry_rq(rbnext);
- }
-
- return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
-}
-
-static u64 cfq_slice_offset(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- /*
- * just an approximation, should be ok.
- */
- return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
- cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
-}
-
-static inline s64
-cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- return cfqg->vdisktime - st->min_vdisktime;
-}
-
-static void
-__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- struct rb_node **node = &st->rb.rb_root.rb_node;
- struct rb_node *parent = NULL;
- struct cfq_group *__cfqg;
- s64 key = cfqg_key(st, cfqg);
- bool leftmost = true, rightmost = true;
-
- while (*node != NULL) {
- parent = *node;
- __cfqg = rb_entry_cfqg(parent);
-
- if (key < cfqg_key(st, __cfqg)) {
- node = &parent->rb_left;
- rightmost = false;
- } else {
- node = &parent->rb_right;
- leftmost = false;
- }
- }
-
- if (rightmost)
- st->rb_rightmost = &cfqg->rb_node;
-
- rb_link_node(&cfqg->rb_node, parent, node);
- rb_insert_color_cached(&cfqg->rb_node, &st->rb, leftmost);
-}
-
-/*
- * This has to be called only on activation of cfqg
- */
-static void
-cfq_update_group_weight(struct cfq_group *cfqg)
-{
- if (cfqg->new_weight) {
- cfqg->weight = cfqg->new_weight;
- cfqg->new_weight = 0;
- }
-}
-
-static void
-cfq_update_group_leaf_weight(struct cfq_group *cfqg)
-{
- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
- if (cfqg->new_leaf_weight) {
- cfqg->leaf_weight = cfqg->new_leaf_weight;
- cfqg->new_leaf_weight = 0;
- }
-}
-
-static void
-cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */
- struct cfq_group *pos = cfqg;
- struct cfq_group *parent;
- bool propagate;
-
- /* add to the service tree */
- BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
-
- /*
- * Update leaf_weight. We cannot update weight at this point
- * because cfqg might already have been activated and is
- * contributing its current weight to the parent's child_weight.
- */
- cfq_update_group_leaf_weight(cfqg);
- __cfq_group_service_tree_add(st, cfqg);
-
- /*
- * Activate @cfqg and calculate the portion of vfraction @cfqg is
- * entitled to. vfraction is calculated by walking the tree
- * towards the root calculating the fraction it has at each level.
- * The compounded ratio is how much vfraction @cfqg owns.
- *
- * Start with the proportion tasks in this cfqg has against active
- * children cfqgs - its leaf_weight against children_weight.
- */
- propagate = !pos->nr_active++;
- pos->children_weight += pos->leaf_weight;
- vfr = vfr * pos->leaf_weight / pos->children_weight;
-
- /*
- * Compound ->weight walking up the tree. Both activation and
- * vfraction calculation are done in the same loop. Propagation
- * stops once an already activated node is met. vfraction
- * calculation should always continue to the root.
- */
- while ((parent = cfqg_parent(pos))) {
- if (propagate) {
- cfq_update_group_weight(pos);
- propagate = !parent->nr_active++;
- parent->children_weight += pos->weight;
- }
- vfr = vfr * pos->weight / parent->children_weight;
- pos = parent;
- }
-
- cfqg->vfraction = max_t(unsigned, vfr, 1);
-}
-
-static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
-{
- if (!iops_mode(cfqd))
- return CFQ_SLICE_MODE_GROUP_DELAY;
- else
- return CFQ_IOPS_MODE_GROUP_DELAY;
-}
-
-static void
-cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
- struct cfq_group *__cfqg;
- struct rb_node *n;
-
- cfqg->nr_cfqq++;
- if (!RB_EMPTY_NODE(&cfqg->rb_node))
- return;
-
- /*
- * Currently put the group at the end. Later implement something
- * so that groups get lesser vtime based on their weights, so that
- * if group does not loose all if it was not continuously backlogged.
- */
- n = st->rb_rightmost;
- if (n) {
- __cfqg = rb_entry_cfqg(n);
- cfqg->vdisktime = __cfqg->vdisktime +
- cfq_get_cfqg_vdisktime_delay(cfqd);
- } else
- cfqg->vdisktime = st->min_vdisktime;
- cfq_group_service_tree_add(st, cfqg);
-}
-
-static void
-cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
-{
- struct cfq_group *pos = cfqg;
- bool propagate;
-
- /*
- * Undo activation from cfq_group_service_tree_add(). Deactivate
- * @cfqg and propagate deactivation upwards.
- */
- propagate = !--pos->nr_active;
- pos->children_weight -= pos->leaf_weight;
-
- while (propagate) {
- struct cfq_group *parent = cfqg_parent(pos);
-
- /* @pos has 0 nr_active at this point */
- WARN_ON_ONCE(pos->children_weight);
- pos->vfraction = 0;
-
- if (!parent)
- break;
-
- propagate = !--parent->nr_active;
- parent->children_weight -= pos->weight;
- pos = parent;
- }
-
- /* remove from the service tree */
- if (!RB_EMPTY_NODE(&cfqg->rb_node))
- cfq_rb_erase(&cfqg->rb_node, st);
-}
-
-static void
-cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
-
- BUG_ON(cfqg->nr_cfqq < 1);
- cfqg->nr_cfqq--;
-
- /* If there are other cfq queues under this group, don't delete it */
- if (cfqg->nr_cfqq)
- return;
-
- cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
- cfq_group_service_tree_del(st, cfqg);
- cfqg->saved_wl_slice = 0;
- cfqg_stats_update_dequeue(cfqg);
-}
-
-static inline u64 cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
- u64 *unaccounted_time)
-{
- u64 slice_used;
- u64 now = ktime_get_ns();
-
- /*
- * Queue got expired before even a single request completed or
- * got expired immediately after first request completion.
- */
- if (!cfqq->slice_start || cfqq->slice_start == now) {
- /*
- * Also charge the seek time incurred to the group, otherwise
- * if there are mutiple queues in the group, each can dispatch
- * a single request on seeky media and cause lots of seek time
- * and group will never know it.
- */
- slice_used = max_t(u64, (now - cfqq->dispatch_start),
- jiffies_to_nsecs(1));
- } else {
- slice_used = now - cfqq->slice_start;
- if (slice_used > cfqq->allocated_slice) {
- *unaccounted_time = slice_used - cfqq->allocated_slice;
- slice_used = cfqq->allocated_slice;
- }
- if (cfqq->slice_start > cfqq->dispatch_start)
- *unaccounted_time += cfqq->slice_start -
- cfqq->dispatch_start;
- }
-
- return slice_used;
-}
-
-static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
- struct cfq_queue *cfqq)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
- u64 used_sl, charge, unaccounted_sl = 0;
- int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
- - cfqg->service_tree_idle.count;
- unsigned int vfr;
- u64 now = ktime_get_ns();
-
- BUG_ON(nr_sync < 0);
- used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
-
- if (iops_mode(cfqd))
- charge = cfqq->slice_dispatch;
- else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
- charge = cfqq->allocated_slice;
-
- /*
- * Can't update vdisktime while on service tree and cfqg->vfraction
- * is valid only while on it. Cache vfr, leave the service tree,
- * update vdisktime and go back on. The re-addition to the tree
- * will also update the weights as necessary.
- */
- vfr = cfqg->vfraction;
- cfq_group_service_tree_del(st, cfqg);
- cfqg->vdisktime += cfqg_scale_charge(charge, vfr);
- cfq_group_service_tree_add(st, cfqg);
-
- /* This group is being expired. Save the context */
- if (cfqd->workload_expires > now) {
- cfqg->saved_wl_slice = cfqd->workload_expires - now;
- cfqg->saved_wl_type = cfqd->serving_wl_type;
- cfqg->saved_wl_class = cfqd->serving_wl_class;
- } else
- cfqg->saved_wl_slice = 0;
-
- cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
- st->min_vdisktime);
- cfq_log_cfqq(cfqq->cfqd, cfqq,
- "sl_used=%llu disp=%llu charge=%llu iops=%u sect=%lu",
- used_sl, cfqq->slice_dispatch, charge,
- iops_mode(cfqd), cfqq->nr_sectors);
- cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
- cfqg_stats_set_start_empty_time(cfqg);
-}
-
-/**
- * cfq_init_cfqg_base - initialize base part of a cfq_group
- * @cfqg: cfq_group to initialize
- *
- * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
- * is enabled or not.
- */
-static void cfq_init_cfqg_base(struct cfq_group *cfqg)
-{
- struct cfq_rb_root *st;
- int i, j;
-
- for_each_cfqg_st(cfqg, i, j, st)
- *st = CFQ_RB_ROOT;
- RB_CLEAR_NODE(&cfqg->rb_node);
-
- cfqg->ttime.last_end_request = ktime_get_ns();
-}
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
- bool on_dfl, bool reset_dev, bool is_leaf_weight);
-
-static void cfqg_stats_exit(struct cfqg_stats *stats)
-{
- blkg_rwstat_exit(&stats->merged);
- blkg_rwstat_exit(&stats->service_time);
- blkg_rwstat_exit(&stats->wait_time);
- blkg_rwstat_exit(&stats->queued);
- blkg_stat_exit(&stats->time);
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- blkg_stat_exit(&stats->unaccounted_time);
- blkg_stat_exit(&stats->avg_queue_size_sum);
- blkg_stat_exit(&stats->avg_queue_size_samples);
- blkg_stat_exit(&stats->dequeue);
- blkg_stat_exit(&stats->group_wait_time);
- blkg_stat_exit(&stats->idle_time);
- blkg_stat_exit(&stats->empty_time);
-#endif
-}
-
-static int cfqg_stats_init(struct cfqg_stats *stats, gfp_t gfp)
-{
- if (blkg_rwstat_init(&stats->merged, gfp) ||
- blkg_rwstat_init(&stats->service_time, gfp) ||
- blkg_rwstat_init(&stats->wait_time, gfp) ||
- blkg_rwstat_init(&stats->queued, gfp) ||
- blkg_stat_init(&stats->time, gfp))
- goto err;
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- if (blkg_stat_init(&stats->unaccounted_time, gfp) ||
- blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
- blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
- blkg_stat_init(&stats->dequeue, gfp) ||
- blkg_stat_init(&stats->group_wait_time, gfp) ||
- blkg_stat_init(&stats->idle_time, gfp) ||
- blkg_stat_init(&stats->empty_time, gfp))
- goto err;
-#endif
- return 0;
-err:
- cfqg_stats_exit(stats);
- return -ENOMEM;
-}
-
-static struct blkcg_policy_data *cfq_cpd_alloc(gfp_t gfp)
-{
- struct cfq_group_data *cgd;
-
- cgd = kzalloc(sizeof(*cgd), gfp);
- if (!cgd)
- return NULL;
- return &cgd->cpd;
-}
-
-static void cfq_cpd_init(struct blkcg_policy_data *cpd)
-{
- struct cfq_group_data *cgd = cpd_to_cfqgd(cpd);
- unsigned int weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
- CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
-
- if (cpd_to_blkcg(cpd) == &blkcg_root)
- weight *= 2;
-
- cgd->weight = weight;
- cgd->leaf_weight = weight;
-}
-
-static void cfq_cpd_free(struct blkcg_policy_data *cpd)
-{
- kfree(cpd_to_cfqgd(cpd));
-}
-
-static void cfq_cpd_bind(struct blkcg_policy_data *cpd)
-{
- struct blkcg *blkcg = cpd_to_blkcg(cpd);
- bool on_dfl = cgroup_subsys_on_dfl(io_cgrp_subsys);
- unsigned int weight = on_dfl ? CGROUP_WEIGHT_DFL : CFQ_WEIGHT_LEGACY_DFL;
-
- if (blkcg == &blkcg_root)
- weight *= 2;
-
- WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, false));
- WARN_ON_ONCE(__cfq_set_weight(&blkcg->css, weight, on_dfl, true, true));
-}
-
-static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node)
-{
- struct cfq_group *cfqg;
-
- cfqg = kzalloc_node(sizeof(*cfqg), gfp, node);
- if (!cfqg)
- return NULL;
-
- cfq_init_cfqg_base(cfqg);
- if (cfqg_stats_init(&cfqg->stats, gfp)) {
- kfree(cfqg);
- return NULL;
- }
-
- return &cfqg->pd;
-}
-
-static void cfq_pd_init(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
- struct cfq_group_data *cgd = blkcg_to_cfqgd(pd->blkg->blkcg);
-
- cfqg->weight = cgd->weight;
- cfqg->leaf_weight = cgd->leaf_weight;
-}
-
-static void cfq_pd_offline(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
- int i;
-
- for (i = 0; i < IOPRIO_BE_NR; i++) {
- if (cfqg->async_cfqq[0][i]) {
- cfq_put_queue(cfqg->async_cfqq[0][i]);
- cfqg->async_cfqq[0][i] = NULL;
- }
- if (cfqg->async_cfqq[1][i]) {
- cfq_put_queue(cfqg->async_cfqq[1][i]);
- cfqg->async_cfqq[1][i] = NULL;
- }
- }
-
- if (cfqg->async_idle_cfqq) {
- cfq_put_queue(cfqg->async_idle_cfqq);
- cfqg->async_idle_cfqq = NULL;
- }
-
- /*
- * @blkg is going offline and will be ignored by
- * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
- * that they don't get lost. If IOs complete after this point, the
- * stats for them will be lost. Oh well...
- */
- cfqg_stats_xfer_dead(cfqg);
-}
-
-static void cfq_pd_free(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- cfqg_stats_exit(&cfqg->stats);
- return kfree(cfqg);
-}
-
-static void cfq_pd_reset_stats(struct blkg_policy_data *pd)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- cfqg_stats_reset(&cfqg->stats);
-}
-
-static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
- struct blkcg *blkcg)
-{
- struct blkcg_gq *blkg;
-
- blkg = blkg_lookup(blkcg, cfqd->queue);
- if (likely(blkg))
- return blkg_to_cfqg(blkg);
- return NULL;
-}
-
-static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
-{
- cfqq->cfqg = cfqg;
- /* cfqq reference on cfqg */
- cfqg_get(cfqg);
-}
-
-static u64 cfqg_prfill_weight_device(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- if (!cfqg->dev_weight)
- return 0;
- return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
-}
-
-static int cfqg_print_weight_device(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_weight_device, &blkcg_policy_cfq,
- 0, false);
- return 0;
-}
-
-static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
-
- if (!cfqg->dev_leaf_weight)
- return 0;
- return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
-}
-
-static int cfqg_print_leaf_weight_device(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq,
- 0, false);
- return 0;
-}
-
-static int cfq_print_weight(struct seq_file *sf, void *v)
-{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
- unsigned int val = 0;
-
- if (cgd)
- val = cgd->weight;
-
- seq_printf(sf, "%u\n", val);
- return 0;
-}
-
-static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
-{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
- unsigned int val = 0;
-
- if (cgd)
- val = cgd->leaf_weight;
-
- seq_printf(sf, "%u\n", val);
- return 0;
-}
-
-static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off,
- bool on_dfl, bool is_leaf_weight)
-{
- unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
- unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
- struct blkcg *blkcg = css_to_blkcg(of_css(of));
- struct blkg_conf_ctx ctx;
- struct cfq_group *cfqg;
- struct cfq_group_data *cfqgd;
- int ret;
- u64 v;
-
- ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
- if (ret)
- return ret;
-
- if (sscanf(ctx.body, "%llu", &v) == 1) {
- /* require "default" on dfl */
- ret = -ERANGE;
- if (!v && on_dfl)
- goto out_finish;
- } else if (!strcmp(strim(ctx.body), "default")) {
- v = 0;
- } else {
- ret = -EINVAL;
- goto out_finish;
- }
-
- cfqg = blkg_to_cfqg(ctx.blkg);
- cfqgd = blkcg_to_cfqgd(blkcg);
-
- ret = -ERANGE;
- if (!v || (v >= min && v <= max)) {
- if (!is_leaf_weight) {
- cfqg->dev_weight = v;
- cfqg->new_weight = v ?: cfqgd->weight;
- } else {
- cfqg->dev_leaf_weight = v;
- cfqg->new_leaf_weight = v ?: cfqgd->leaf_weight;
- }
- ret = 0;
- }
-out_finish:
- blkg_conf_finish(&ctx);
- return ret ?: nbytes;
-}
-
-static ssize_t cfqg_set_weight_device(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- return __cfqg_set_weight_device(of, buf, nbytes, off, false, false);
-}
-
-static ssize_t cfqg_set_leaf_weight_device(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- return __cfqg_set_weight_device(of, buf, nbytes, off, false, true);
-}
-
-static int __cfq_set_weight(struct cgroup_subsys_state *css, u64 val,
- bool on_dfl, bool reset_dev, bool is_leaf_weight)
-{
- unsigned int min = on_dfl ? CGROUP_WEIGHT_MIN : CFQ_WEIGHT_LEGACY_MIN;
- unsigned int max = on_dfl ? CGROUP_WEIGHT_MAX : CFQ_WEIGHT_LEGACY_MAX;
- struct blkcg *blkcg = css_to_blkcg(css);
- struct blkcg_gq *blkg;
- struct cfq_group_data *cfqgd;
- int ret = 0;
-
- if (val < min || val > max)
- return -ERANGE;
-
- spin_lock_irq(&blkcg->lock);
- cfqgd = blkcg_to_cfqgd(blkcg);
- if (!cfqgd) {
- ret = -EINVAL;
- goto out;
- }
-
- if (!is_leaf_weight)
- cfqgd->weight = val;
- else
- cfqgd->leaf_weight = val;
-
- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
- struct cfq_group *cfqg = blkg_to_cfqg(blkg);
-
- if (!cfqg)
- continue;
-
- if (!is_leaf_weight) {
- if (reset_dev)
- cfqg->dev_weight = 0;
- if (!cfqg->dev_weight)
- cfqg->new_weight = cfqgd->weight;
- } else {
- if (reset_dev)
- cfqg->dev_leaf_weight = 0;
- if (!cfqg->dev_leaf_weight)
- cfqg->new_leaf_weight = cfqgd->leaf_weight;
- }
- }
-
-out:
- spin_unlock_irq(&blkcg->lock);
- return ret;
-}
-
-static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
- u64 val)
-{
- return __cfq_set_weight(css, val, false, false, false);
-}
-
-static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
- struct cftype *cft, u64 val)
-{
- return __cfq_set_weight(css, val, false, false, true);
-}
-
-static int cfqg_print_stat(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
- &blkcg_policy_cfq, seq_cft(sf)->private, false);
- return 0;
-}
-
-static int cfqg_print_rwstat(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
- &blkcg_policy_cfq, seq_cft(sf)->private, true);
- return 0;
-}
-
-static u64 cfqg_prfill_stat_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
- &blkcg_policy_cfq, off);
- return __blkg_prfill_u64(sf, pd, sum);
-}
-
-static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
- &blkcg_policy_cfq, off);
- return __blkg_prfill_rwstat(sf, pd, &sum);
-}
-
-static int cfqg_print_stat_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_stat_recursive, &blkcg_policy_cfq,
- seq_cft(sf)->private, false);
- return 0;
-}
-
-static int cfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_rwstat_recursive, &blkcg_policy_cfq,
- seq_cft(sf)->private, true);
- return 0;
-}
-
-static u64 cfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
- int off)
-{
- u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
-
- return __blkg_prfill_u64(sf, pd, sum >> 9);
-}
-
-static int cfqg_print_stat_sectors(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_sectors, &blkcg_policy_cfq, 0, false);
- return 0;
-}
-
-static u64 cfqg_prfill_sectors_recursive(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
- offsetof(struct blkcg_gq, stat_bytes));
- u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
- atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
-
- return __blkg_prfill_u64(sf, pd, sum >> 9);
-}
-
-static int cfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_sectors_recursive, &blkcg_policy_cfq, 0,
- false);
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_BLK_CGROUP
-static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
- struct blkg_policy_data *pd, int off)
-{
- struct cfq_group *cfqg = pd_to_cfqg(pd);
- u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
- u64 v = 0;
-
- if (samples) {
- v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
- v = div64_u64(v, samples);
- }
- __blkg_prfill_u64(sf, pd, v);
- return 0;
-}
-
-/* print avg_queue_size */
-static int cfqg_print_avg_queue_size(struct seq_file *sf, void *v)
-{
- blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
- cfqg_prfill_avg_queue_size, &blkcg_policy_cfq,
- 0, false);
- return 0;
-}
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
-
-static struct cftype cfq_blkcg_legacy_files[] = {
- /* on root, weight is mapped to leaf_weight */
- {
- .name = "weight_device",
- .flags = CFTYPE_ONLY_ON_ROOT,
- .seq_show = cfqg_print_leaf_weight_device,
- .write = cfqg_set_leaf_weight_device,
- },
- {
- .name = "weight",
- .flags = CFTYPE_ONLY_ON_ROOT,
- .seq_show = cfq_print_leaf_weight,
- .write_u64 = cfq_set_leaf_weight,
- },
-
- /* no such mapping necessary for !roots */
- {
- .name = "weight_device",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cfqg_print_weight_device,
- .write = cfqg_set_weight_device,
- },
- {
- .name = "weight",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cfq_print_weight,
- .write_u64 = cfq_set_weight,
- },
-
- {
- .name = "leaf_weight_device",
- .seq_show = cfqg_print_leaf_weight_device,
- .write = cfqg_set_leaf_weight_device,
- },
- {
- .name = "leaf_weight",
- .seq_show = cfq_print_leaf_weight,
- .write_u64 = cfq_set_leaf_weight,
- },
-
- /* statistics, covers only the tasks in the cfqg */
- {
- .name = "time",
- .private = offsetof(struct cfq_group, stats.time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "sectors",
- .seq_show = cfqg_print_stat_sectors,
- },
- {
- .name = "io_service_bytes",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_bytes,
- },
- {
- .name = "io_serviced",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_ios,
- },
- {
- .name = "io_service_time",
- .private = offsetof(struct cfq_group, stats.service_time),
- .seq_show = cfqg_print_rwstat,
- },
- {
- .name = "io_wait_time",
- .private = offsetof(struct cfq_group, stats.wait_time),
- .seq_show = cfqg_print_rwstat,
- },
- {
- .name = "io_merged",
- .private = offsetof(struct cfq_group, stats.merged),
- .seq_show = cfqg_print_rwstat,
- },
- {
- .name = "io_queued",
- .private = offsetof(struct cfq_group, stats.queued),
- .seq_show = cfqg_print_rwstat,
- },
-
- /* the same statictics which cover the cfqg and its descendants */
- {
- .name = "time_recursive",
- .private = offsetof(struct cfq_group, stats.time),
- .seq_show = cfqg_print_stat_recursive,
- },
- {
- .name = "sectors_recursive",
- .seq_show = cfqg_print_stat_sectors_recursive,
- },
- {
- .name = "io_service_bytes_recursive",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_bytes_recursive,
- },
- {
- .name = "io_serviced_recursive",
- .private = (unsigned long)&blkcg_policy_cfq,
- .seq_show = blkg_print_stat_ios_recursive,
- },
- {
- .name = "io_service_time_recursive",
- .private = offsetof(struct cfq_group, stats.service_time),
- .seq_show = cfqg_print_rwstat_recursive,
- },
- {
- .name = "io_wait_time_recursive",
- .private = offsetof(struct cfq_group, stats.wait_time),
- .seq_show = cfqg_print_rwstat_recursive,
- },
- {
- .name = "io_merged_recursive",
- .private = offsetof(struct cfq_group, stats.merged),
- .seq_show = cfqg_print_rwstat_recursive,
- },
- {
- .name = "io_queued_recursive",
- .private = offsetof(struct cfq_group, stats.queued),
- .seq_show = cfqg_print_rwstat_recursive,
- },
-#ifdef CONFIG_DEBUG_BLK_CGROUP
- {
- .name = "avg_queue_size",
- .seq_show = cfqg_print_avg_queue_size,
- },
- {
- .name = "group_wait_time",
- .private = offsetof(struct cfq_group, stats.group_wait_time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "idle_time",
- .private = offsetof(struct cfq_group, stats.idle_time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "empty_time",
- .private = offsetof(struct cfq_group, stats.empty_time),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "dequeue",
- .private = offsetof(struct cfq_group, stats.dequeue),
- .seq_show = cfqg_print_stat,
- },
- {
- .name = "unaccounted_time",
- .private = offsetof(struct cfq_group, stats.unaccounted_time),
- .seq_show = cfqg_print_stat,
- },
-#endif /* CONFIG_DEBUG_BLK_CGROUP */
- { } /* terminate */
-};
-
-static int cfq_print_weight_on_dfl(struct seq_file *sf, void *v)
-{
- struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
- struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
-
- seq_printf(sf, "default %u\n", cgd->weight);
- blkcg_print_blkgs(sf, blkcg, cfqg_prfill_weight_device,
- &blkcg_policy_cfq, 0, false);
- return 0;
-}
-
-static ssize_t cfq_set_weight_on_dfl(struct kernfs_open_file *of,
- char *buf, size_t nbytes, loff_t off)
-{
- char *endp;
- int ret;
- u64 v;
-
- buf = strim(buf);
-
- /* "WEIGHT" or "default WEIGHT" sets the default weight */
- v = simple_strtoull(buf, &endp, 0);
- if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
- ret = __cfq_set_weight(of_css(of), v, true, false, false);
- return ret ?: nbytes;
- }
-
- /* "MAJ:MIN WEIGHT" */
- return __cfqg_set_weight_device(of, buf, nbytes, off, true, false);
-}
-
-static struct cftype cfq_blkcg_files[] = {
- {
- .name = "weight",
- .flags = CFTYPE_NOT_ON_ROOT,
- .seq_show = cfq_print_weight_on_dfl,
- .write = cfq_set_weight_on_dfl,
- },
- { } /* terminate */
-};
-
-#else /* GROUP_IOSCHED */
-static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd,
- struct blkcg *blkcg)
-{
- return cfqd->root_group;
-}
-
-static inline void
-cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
- cfqq->cfqg = cfqg;
-}
-
-#endif /* GROUP_IOSCHED */
-
-/*
- * The cfqd->service_trees holds all pending cfq_queue's that have
- * requests waiting to be processed. It is sorted in the order that
- * we will service the queues.
- */
-static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- bool add_front)
-{
- struct rb_node **p, *parent;
- struct cfq_queue *__cfqq;
- u64 rb_key;
- struct cfq_rb_root *st;
- bool leftmost = true;
- int new_cfqq = 1;
- u64 now = ktime_get_ns();
-
- st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
- if (cfq_class_idle(cfqq)) {
- rb_key = CFQ_IDLE_DELAY;
- parent = st->rb_rightmost;
- if (parent && parent != &cfqq->rb_node) {
- __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
- rb_key += __cfqq->rb_key;
- } else
- rb_key += now;
- } else if (!add_front) {
- /*
- * Get our rb key offset. Subtract any residual slice
- * value carried from last service. A negative resid
- * count indicates slice overrun, and this should position
- * the next service time further away in the tree.
- */
- rb_key = cfq_slice_offset(cfqd, cfqq) + now;
- rb_key -= cfqq->slice_resid;
- cfqq->slice_resid = 0;
- } else {
- rb_key = -NSEC_PER_SEC;
- __cfqq = cfq_rb_first(st);
- rb_key += __cfqq ? __cfqq->rb_key : now;
- }
-
- if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
- new_cfqq = 0;
- /*
- * same position, nothing more to do
- */
- if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
- return;
-
- cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
- cfqq->service_tree = NULL;
- }
-
- parent = NULL;
- cfqq->service_tree = st;
- p = &st->rb.rb_root.rb_node;
- while (*p) {
- parent = *p;
- __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
-
- /*
- * sort by key, that represents service time.
- */
- if (rb_key < __cfqq->rb_key)
- p = &parent->rb_left;
- else {
- p = &parent->rb_right;
- leftmost = false;
- }
- }
-
- cfqq->rb_key = rb_key;
- rb_link_node(&cfqq->rb_node, parent, p);
- rb_insert_color_cached(&cfqq->rb_node, &st->rb, leftmost);
- st->count++;
- if (add_front || !new_cfqq)
- return;
- cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
-}
-
-static struct cfq_queue *
-cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
- sector_t sector, struct rb_node **ret_parent,
- struct rb_node ***rb_link)
-{
- struct rb_node **p, *parent;
- struct cfq_queue *cfqq = NULL;
-
- parent = NULL;
- p = &root->rb_node;
- while (*p) {
- struct rb_node **n;
-
- parent = *p;
- cfqq = rb_entry(parent, struct cfq_queue, p_node);
-
- /*
- * Sort strictly based on sector. Smallest to the left,
- * largest to the right.
- */
- if (sector > blk_rq_pos(cfqq->next_rq))
- n = &(*p)->rb_right;
- else if (sector < blk_rq_pos(cfqq->next_rq))
- n = &(*p)->rb_left;
- else
- break;
- p = n;
- cfqq = NULL;
- }
-
- *ret_parent = parent;
- if (rb_link)
- *rb_link = p;
- return cfqq;
-}
-
-static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- struct rb_node **p, *parent;
- struct cfq_queue *__cfqq;
-
- if (cfqq->p_root) {
- rb_erase(&cfqq->p_node, cfqq->p_root);
- cfqq->p_root = NULL;
- }
-
- if (cfq_class_idle(cfqq))
- return;
- if (!cfqq->next_rq)
- return;
-
- cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
- __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
- blk_rq_pos(cfqq->next_rq), &parent, &p);
- if (!__cfqq) {
- rb_link_node(&cfqq->p_node, parent, p);
- rb_insert_color(&cfqq->p_node, cfqq->p_root);
- } else
- cfqq->p_root = NULL;
-}
-
-/*
- * Update cfqq's position in the service tree.
- */
-static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- /*
- * Resorting requires the cfqq to be on the RR list already.
- */
- if (cfq_cfqq_on_rr(cfqq)) {
- cfq_service_tree_add(cfqd, cfqq, 0);
- cfq_prio_tree_add(cfqd, cfqq);
- }
-}
-
-/*
- * add to busy list of queues for service, trying to be fair in ordering
- * the pending list according to last request service
- */
-static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
- BUG_ON(cfq_cfqq_on_rr(cfqq));
- cfq_mark_cfqq_on_rr(cfqq);
- cfqd->busy_queues++;
- if (cfq_cfqq_sync(cfqq))
- cfqd->busy_sync_queues++;
-
- cfq_resort_rr_list(cfqd, cfqq);
-}
-
-/*
- * Called when the cfqq no longer has requests pending, remove it from
- * the service tree.
- */
-static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
- BUG_ON(!cfq_cfqq_on_rr(cfqq));
- cfq_clear_cfqq_on_rr(cfqq);
-
- if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
- cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
- cfqq->service_tree = NULL;
- }
- if (cfqq->p_root) {
- rb_erase(&cfqq->p_node, cfqq->p_root);
- cfqq->p_root = NULL;
- }
-
- cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
- BUG_ON(!cfqd->busy_queues);
- cfqd->busy_queues--;
- if (cfq_cfqq_sync(cfqq))
- cfqd->busy_sync_queues--;
-}
-
-/*
- * rb tree support functions
- */
-static void cfq_del_rq_rb(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- const int sync = rq_is_sync(rq);
-
- BUG_ON(!cfqq->queued[sync]);
- cfqq->queued[sync]--;
-
- elv_rb_del(&cfqq->sort_list, rq);
-
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
- /*
- * Queue will be deleted from service tree when we actually
- * expire it later. Right now just remove it from prio tree
- * as it is empty.
- */
- if (cfqq->p_root) {
- rb_erase(&cfqq->p_node, cfqq->p_root);
- cfqq->p_root = NULL;
- }
- }
-}
-
-static void cfq_add_rq_rb(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- struct cfq_data *cfqd = cfqq->cfqd;
- struct request *prev;
-
- cfqq->queued[rq_is_sync(rq)]++;
-
- elv_rb_add(&cfqq->sort_list, rq);
-
- if (!cfq_cfqq_on_rr(cfqq))
- cfq_add_cfqq_rr(cfqd, cfqq);
-
- /*
- * check if this request is a better next-serve candidate
- */
- prev = cfqq->next_rq;
- cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
-
- /*
- * adjust priority tree position, if ->next_rq changes
- */
- if (prev != cfqq->next_rq)
- cfq_prio_tree_add(cfqd, cfqq);
-
- BUG_ON(!cfqq->next_rq);
-}
-
-static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
-{
- elv_rb_del(&cfqq->sort_list, rq);
- cfqq->queued[rq_is_sync(rq)]--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
- cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
- rq->cmd_flags);
-}
-
-static struct request *
-cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
-{
- struct task_struct *tsk = current;
- struct cfq_io_cq *cic;
- struct cfq_queue *cfqq;
-
- cic = cfq_cic_lookup(cfqd, tsk->io_context);
- if (!cic)
- return NULL;
-
- cfqq = cic_to_cfqq(cic, op_is_sync(bio->bi_opf));
- if (cfqq)
- return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio));
-
- return NULL;
-}
-
-static void cfq_activate_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- cfqd->rq_in_driver++;
- cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
- cfqd->rq_in_driver);
-
- cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
-}
-
-static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- WARN_ON(!cfqd->rq_in_driver);
- cfqd->rq_in_driver--;
- cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
- cfqd->rq_in_driver);
-}
-
-static void cfq_remove_request(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- if (cfqq->next_rq == rq)
- cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
-
- list_del_init(&rq->queuelist);
- cfq_del_rq_rb(rq);
-
- cfqq->cfqd->rq_queued--;
- cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
- if (rq->cmd_flags & REQ_PRIO) {
- WARN_ON(!cfqq->prio_pending);
- cfqq->prio_pending--;
- }
-}
-
-static enum elv_merge cfq_merge(struct request_queue *q, struct request **req,
- struct bio *bio)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct request *__rq;
-
- __rq = cfq_find_rq_fmerge(cfqd, bio);
- if (__rq && elv_bio_merge_ok(__rq, bio)) {
- *req = __rq;
- return ELEVATOR_FRONT_MERGE;
- }
-
- return ELEVATOR_NO_MERGE;
-}
-
-static void cfq_merged_request(struct request_queue *q, struct request *req,
- enum elv_merge type)
-{
- if (type == ELEVATOR_FRONT_MERGE) {
- struct cfq_queue *cfqq = RQ_CFQQ(req);
-
- cfq_reposition_rq_rb(cfqq, req);
- }
-}
-
-static void cfq_bio_merged(struct request_queue *q, struct request *req,
- struct bio *bio)
-{
- cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_opf);
-}
-
-static void
-cfq_merged_requests(struct request_queue *q, struct request *rq,
- struct request *next)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- struct cfq_data *cfqd = q->elevator->elevator_data;
-
- /*
- * reposition in fifo if next is older than rq
- */
- if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
- next->fifo_time < rq->fifo_time &&
- cfqq == RQ_CFQQ(next)) {
- list_move(&rq->queuelist, &next->queuelist);
- rq->fifo_time = next->fifo_time;
- }
-
- if (cfqq->next_rq == next)
- cfqq->next_rq = rq;
- cfq_remove_request(next);
- cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
-
- cfqq = RQ_CFQQ(next);
- /*
- * all requests of this queue are merged to other queues, delete it
- * from the service tree. If it's the active_queue,
- * cfq_dispatch_requests() will choose to expire it or do idle
- */
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
- cfqq != cfqd->active_queue)
- cfq_del_cfqq_rr(cfqd, cfqq);
-}
-
-static int cfq_allow_bio_merge(struct request_queue *q, struct request *rq,
- struct bio *bio)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- bool is_sync = op_is_sync(bio->bi_opf);
- struct cfq_io_cq *cic;
- struct cfq_queue *cfqq;
-
- /*
- * Disallow merge of a sync bio into an async request.
- */
- if (is_sync && !rq_is_sync(rq))
- return false;
-
- /*
- * Lookup the cfqq that this bio will be queued with and allow
- * merge only if rq is queued there.
- */
- cic = cfq_cic_lookup(cfqd, current->io_context);
- if (!cic)
- return false;
-
- cfqq = cic_to_cfqq(cic, is_sync);
- return cfqq == RQ_CFQQ(rq);
-}
-
-static int cfq_allow_rq_merge(struct request_queue *q, struct request *rq,
- struct request *next)
-{
- return RQ_CFQQ(rq) == RQ_CFQQ(next);
-}
-
-static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- hrtimer_try_to_cancel(&cfqd->idle_slice_timer);
- cfqg_stats_update_idle_time(cfqq->cfqg);
-}
-
-static void __cfq_set_active_queue(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
- cfqd->serving_wl_class, cfqd->serving_wl_type);
- cfqg_stats_update_avg_queue_size(cfqq->cfqg);
- cfqq->slice_start = 0;
- cfqq->dispatch_start = ktime_get_ns();
- cfqq->allocated_slice = 0;
- cfqq->slice_end = 0;
- cfqq->slice_dispatch = 0;
- cfqq->nr_sectors = 0;
-
- cfq_clear_cfqq_wait_request(cfqq);
- cfq_clear_cfqq_must_dispatch(cfqq);
- cfq_clear_cfqq_must_alloc_slice(cfqq);
- cfq_clear_cfqq_fifo_expire(cfqq);
- cfq_mark_cfqq_slice_new(cfqq);
-
- cfq_del_timer(cfqd, cfqq);
- }
-
- cfqd->active_queue = cfqq;
-}
-
-/*
- * current cfqq expired its slice (or was too idle), select new one
- */
-static void
-__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- bool timed_out)
-{
- cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
-
- if (cfq_cfqq_wait_request(cfqq))
- cfq_del_timer(cfqd, cfqq);
-
- cfq_clear_cfqq_wait_request(cfqq);
- cfq_clear_cfqq_wait_busy(cfqq);
-
- /*
- * If this cfqq is shared between multiple processes, check to
- * make sure that those processes are still issuing I/Os within
- * the mean seek distance. If not, it may be time to break the
- * queues apart again.
- */
- if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
- cfq_mark_cfqq_split_coop(cfqq);
-
- /*
- * store what was left of this slice, if the queue idled/timed out
- */
- if (timed_out) {
- if (cfq_cfqq_slice_new(cfqq))
- cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
- else
- cfqq->slice_resid = cfqq->slice_end - ktime_get_ns();
- cfq_log_cfqq(cfqd, cfqq, "resid=%lld", cfqq->slice_resid);
- }
-
- cfq_group_served(cfqd, cfqq->cfqg, cfqq);
-
- if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
- cfq_del_cfqq_rr(cfqd, cfqq);
-
- cfq_resort_rr_list(cfqd, cfqq);
-
- if (cfqq == cfqd->active_queue)
- cfqd->active_queue = NULL;
-
- if (cfqd->active_cic) {
- put_io_context(cfqd->active_cic->icq.ioc);
- cfqd->active_cic = NULL;
- }
-}
-
-static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
-{
- struct cfq_queue *cfqq = cfqd->active_queue;
-
- if (cfqq)
- __cfq_slice_expired(cfqd, cfqq, timed_out);
-}
-
-/*
- * Get next queue for service. Unless we have a queue preemption,
- * we'll simply select the first cfqq in the service tree.
- */
-static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
-{
- struct cfq_rb_root *st = st_for(cfqd->serving_group,
- cfqd->serving_wl_class, cfqd->serving_wl_type);
-
- if (!cfqd->rq_queued)
- return NULL;
-
- /* There is nothing to dispatch */
- if (!st)
- return NULL;
- if (RB_EMPTY_ROOT(&st->rb.rb_root))
- return NULL;
- return cfq_rb_first(st);
-}
-
-static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
-{
- struct cfq_group *cfqg;
- struct cfq_queue *cfqq;
- int i, j;
- struct cfq_rb_root *st;
-
- if (!cfqd->rq_queued)
- return NULL;
-
- cfqg = cfq_get_next_cfqg(cfqd);
- if (!cfqg)
- return NULL;
-
- for_each_cfqg_st(cfqg, i, j, st) {
- cfqq = cfq_rb_first(st);
- if (cfqq)
- return cfqq;
- }
- return NULL;
-}
-
-/*
- * Get and set a new active queue for service.
- */
-static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- if (!cfqq)
- cfqq = cfq_get_next_queue(cfqd);
-
- __cfq_set_active_queue(cfqd, cfqq);
- return cfqq;
-}
-
-static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
- struct request *rq)
-{
- if (blk_rq_pos(rq) >= cfqd->last_position)
- return blk_rq_pos(rq) - cfqd->last_position;
- else
- return cfqd->last_position - blk_rq_pos(rq);
-}
-
-static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq)
-{
- return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
-}
-
-static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
- struct cfq_queue *cur_cfqq)
-{
- struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
- struct rb_node *parent, *node;
- struct cfq_queue *__cfqq;
- sector_t sector = cfqd->last_position;
-
- if (RB_EMPTY_ROOT(root))
- return NULL;
-
- /*
- * First, if we find a request starting at the end of the last
- * request, choose it.
- */
- __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
- if (__cfqq)
- return __cfqq;
-
- /*
- * If the exact sector wasn't found, the parent of the NULL leaf
- * will contain the closest sector.
- */
- __cfqq = rb_entry(parent, struct cfq_queue, p_node);
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
- return __cfqq;
-
- if (blk_rq_pos(__cfqq->next_rq) < sector)
- node = rb_next(&__cfqq->p_node);
- else
- node = rb_prev(&__cfqq->p_node);
- if (!node)
- return NULL;
-
- __cfqq = rb_entry(node, struct cfq_queue, p_node);
- if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
- return __cfqq;
-
- return NULL;
-}
-
-/*
- * cfqd - obvious
- * cur_cfqq - passed in so that we don't decide that the current queue is
- * closely cooperating with itself.
- *
- * So, basically we're assuming that that cur_cfqq has dispatched at least
- * one request, and that cfqd->last_position reflects a position on the disk
- * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
- * assumption.
- */
-static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
- struct cfq_queue *cur_cfqq)
-{
- struct cfq_queue *cfqq;
-
- if (cfq_class_idle(cur_cfqq))
- return NULL;
- if (!cfq_cfqq_sync(cur_cfqq))
- return NULL;
- if (CFQQ_SEEKY(cur_cfqq))
- return NULL;
-
- /*
- * Don't search priority tree if it's the only queue in the group.
- */
- if (cur_cfqq->cfqg->nr_cfqq == 1)
- return NULL;
-
- /*
- * We should notice if some of the queues are cooperating, eg
- * working closely on the same area of the disk. In that case,
- * we can group them together and don't waste time idling.
- */
- cfqq = cfqq_close(cfqd, cur_cfqq);
- if (!cfqq)
- return NULL;
-
- /* If new queue belongs to different cfq_group, don't choose it */
- if (cur_cfqq->cfqg != cfqq->cfqg)
- return NULL;
-
- /*
- * It only makes sense to merge sync queues.
- */
- if (!cfq_cfqq_sync(cfqq))
- return NULL;
- if (CFQQ_SEEKY(cfqq))
- return NULL;
-
- /*
- * Do not merge queues of different priority classes
- */
- if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
- return NULL;
-
- return cfqq;
-}
-
-/*
- * Determine whether we should enforce idle window for this queue.
- */
-
-static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- enum wl_class_t wl_class = cfqq_class(cfqq);
- struct cfq_rb_root *st = cfqq->service_tree;
-
- BUG_ON(!st);
- BUG_ON(!st->count);
-
- if (!cfqd->cfq_slice_idle)
- return false;
-
- /* We never do for idle class queues. */
- if (wl_class == IDLE_WORKLOAD)
- return false;
-
- /* We do for queues that were marked with idle window flag. */
- if (cfq_cfqq_idle_window(cfqq) &&
- !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
- return true;
-
- /*
- * Otherwise, we do only if they are the last ones
- * in their service tree.
- */
- if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
- !cfq_io_thinktime_big(cfqd, &st->ttime, false))
- return true;
- cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
- return false;
-}
-
-static void cfq_arm_slice_timer(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq = cfqd->active_queue;
- struct cfq_rb_root *st = cfqq->service_tree;
- struct cfq_io_cq *cic;
- u64 sl, group_idle = 0;
- u64 now = ktime_get_ns();
-
- /*
- * SSD device without seek penalty, disable idling. But only do so
- * for devices that support queuing, otherwise we still have a problem
- * with sync vs async workloads.
- */
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
- !cfqd->cfq_group_idle)
- return;
-
- WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
- WARN_ON(cfq_cfqq_slice_new(cfqq));
-
- /*
- * idle is disabled, either manually or by past process history
- */
- if (!cfq_should_idle(cfqd, cfqq)) {
- /* no queue idling. Check for group idling */
- if (cfqd->cfq_group_idle)
- group_idle = cfqd->cfq_group_idle;
- else
- return;
- }
-
- /*
- * still active requests from this queue, don't idle
- */
- if (cfqq->dispatched)
- return;
-
- /*
- * task has exited, don't wait
- */
- cic = cfqd->active_cic;
- if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
- return;
-
- /*
- * If our average think time is larger than the remaining time
- * slice, then don't idle. This avoids overrunning the allotted
- * time slice.
- */
- if (sample_valid(cic->ttime.ttime_samples) &&
- (cfqq->slice_end - now < cic->ttime.ttime_mean)) {
- cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%llu",
- cic->ttime.ttime_mean);
- return;
- }
-
- /*
- * There are other queues in the group or this is the only group and
- * it has too big thinktime, don't do group idle.
- */
- if (group_idle &&
- (cfqq->cfqg->nr_cfqq > 1 ||
- cfq_io_thinktime_big(cfqd, &st->ttime, true)))
- return;
-
- cfq_mark_cfqq_wait_request(cfqq);
-
- if (group_idle)
- sl = cfqd->cfq_group_idle;
- else
- sl = cfqd->cfq_slice_idle;
-
- hrtimer_start(&cfqd->idle_slice_timer, ns_to_ktime(sl),
- HRTIMER_MODE_REL);
- cfqg_stats_set_start_idle_time(cfqq->cfqg);
- cfq_log_cfqq(cfqd, cfqq, "arm_idle: %llu group_idle: %d", sl,
- group_idle ? 1 : 0);
-}
-
-/*
- * Move request from internal lists to the request queue dispatch list.
- */
-static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
-
- cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
- cfq_remove_request(rq);
- cfqq->dispatched++;
- (RQ_CFQG(rq))->dispatched++;
- elv_dispatch_sort(q, rq);
-
- cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
- cfqq->nr_sectors += blk_rq_sectors(rq);
-}
-
-/*
- * return expired entry, or NULL to just start from scratch in rbtree
- */
-static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
-{
- struct request *rq = NULL;
-
- if (cfq_cfqq_fifo_expire(cfqq))
- return NULL;
-
- cfq_mark_cfqq_fifo_expire(cfqq);
-
- if (list_empty(&cfqq->fifo))
- return NULL;
-
- rq = rq_entry_fifo(cfqq->fifo.next);
- if (ktime_get_ns() < rq->fifo_time)
- rq = NULL;
-
- return rq;
-}
-
-static inline int
-cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- const int base_rq = cfqd->cfq_slice_async_rq;
-
- WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
-
- return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
-}
-
-/*
- * Must be called with the queue_lock held.
- */
-static int cfqq_process_refs(struct cfq_queue *cfqq)
-{
- int process_refs, io_refs;
-
- io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
- process_refs = cfqq->ref - io_refs;
- BUG_ON(process_refs < 0);
- return process_refs;
-}
-
-static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
-{
- int process_refs, new_process_refs;
- struct cfq_queue *__cfqq;
-
- /*
- * If there are no process references on the new_cfqq, then it is
- * unsafe to follow the ->new_cfqq chain as other cfqq's in the
- * chain may have dropped their last reference (not just their
- * last process reference).
- */
- if (!cfqq_process_refs(new_cfqq))
- return;
-
- /* Avoid a circular list and skip interim queue merges */
- while ((__cfqq = new_cfqq->new_cfqq)) {
- if (__cfqq == cfqq)
- return;
- new_cfqq = __cfqq;
- }
-
- process_refs = cfqq_process_refs(cfqq);
- new_process_refs = cfqq_process_refs(new_cfqq);
- /*
- * If the process for the cfqq has gone away, there is no
- * sense in merging the queues.
- */
- if (process_refs == 0 || new_process_refs == 0)
- return;
-
- /*
- * Merge in the direction of the lesser amount of work.
- */
- if (new_process_refs >= process_refs) {
- cfqq->new_cfqq = new_cfqq;
- new_cfqq->ref += process_refs;
- } else {
- new_cfqq->new_cfqq = cfqq;
- cfqq->ref += new_process_refs;
- }
-}
-
-static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd,
- struct cfq_group *cfqg, enum wl_class_t wl_class)
-{
- struct cfq_queue *queue;
- int i;
- bool key_valid = false;
- u64 lowest_key = 0;
- enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
-
- for (i = 0; i <= SYNC_WORKLOAD; ++i) {
- /* select the one with lowest rb_key */
- queue = cfq_rb_first(st_for(cfqg, wl_class, i));
- if (queue &&
- (!key_valid || queue->rb_key < lowest_key)) {
- lowest_key = queue->rb_key;
- cur_best = i;
- key_valid = true;
- }
- }
-
- return cur_best;
-}
-
-static void
-choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg)
-{
- u64 slice;
- unsigned count;
- struct cfq_rb_root *st;
- u64 group_slice;
- enum wl_class_t original_class = cfqd->serving_wl_class;
- u64 now = ktime_get_ns();
-
- /* Choose next priority. RT > BE > IDLE */
- if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
- cfqd->serving_wl_class = RT_WORKLOAD;
- else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
- cfqd->serving_wl_class = BE_WORKLOAD;
- else {
- cfqd->serving_wl_class = IDLE_WORKLOAD;
- cfqd->workload_expires = now + jiffies_to_nsecs(1);
- return;
- }
-
- if (original_class != cfqd->serving_wl_class)
- goto new_workload;
-
- /*
- * For RT and BE, we have to choose also the type
- * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
- * expiration time
- */
- st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
- count = st->count;
-
- /*
- * check workload expiration, and that we still have other queues ready
- */
- if (count && !(now > cfqd->workload_expires))
- return;
-
-new_workload:
- /* otherwise select new workload type */
- cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg,
- cfqd->serving_wl_class);
- st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
- count = st->count;
-
- /*
- * the workload slice is computed as a fraction of target latency
- * proportional to the number of queues in that workload, over
- * all the queues in the same priority class
- */
- group_slice = cfq_group_slice(cfqd, cfqg);
-
- slice = div_u64(group_slice * count,
- max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
- cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
- cfqg)));
-
- if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
- u64 tmp;
-
- /*
- * Async queues are currently system wide. Just taking
- * proportion of queues with-in same group will lead to higher
- * async ratio system wide as generally root group is going
- * to have higher weight. A more accurate thing would be to
- * calculate system wide asnc/sync ratio.
- */
- tmp = cfqd->cfq_target_latency *
- cfqg_busy_async_queues(cfqd, cfqg);
- tmp = div_u64(tmp, cfqd->busy_queues);
- slice = min_t(u64, slice, tmp);
-
- /* async workload slice is scaled down according to
- * the sync/async slice ratio. */
- slice = div64_u64(slice*cfqd->cfq_slice[0], cfqd->cfq_slice[1]);
- } else
- /* sync workload slice is at least 2 * cfq_slice_idle */
- slice = max(slice, 2 * cfqd->cfq_slice_idle);
-
- slice = max_t(u64, slice, CFQ_MIN_TT);
- cfq_log(cfqd, "workload slice:%llu", slice);
- cfqd->workload_expires = now + slice;
-}
-
-static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
-{
- struct cfq_rb_root *st = &cfqd->grp_service_tree;
- struct cfq_group *cfqg;
-
- if (RB_EMPTY_ROOT(&st->rb.rb_root))
- return NULL;
- cfqg = cfq_rb_first_group(st);
- update_min_vdisktime(st);
- return cfqg;
-}
-
-static void cfq_choose_cfqg(struct cfq_data *cfqd)
-{
- struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
- u64 now = ktime_get_ns();
-
- cfqd->serving_group = cfqg;
-
- /* Restore the workload type data */
- if (cfqg->saved_wl_slice) {
- cfqd->workload_expires = now + cfqg->saved_wl_slice;
- cfqd->serving_wl_type = cfqg->saved_wl_type;
- cfqd->serving_wl_class = cfqg->saved_wl_class;
- } else
- cfqd->workload_expires = now - 1;
-
- choose_wl_class_and_type(cfqd, cfqg);
-}
-
-/*
- * Select a queue for service. If we have a current active queue,
- * check whether to continue servicing it, or retrieve and set a new one.
- */
-static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq, *new_cfqq = NULL;
- u64 now = ktime_get_ns();
-
- cfqq = cfqd->active_queue;
- if (!cfqq)
- goto new_queue;
-
- if (!cfqd->rq_queued)
- return NULL;
-
- /*
- * We were waiting for group to get backlogged. Expire the queue
- */
- if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
- goto expire;
-
- /*
- * The active queue has run out of time, expire it and select new.
- */
- if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
- /*
- * If slice had not expired at the completion of last request
- * we might not have turned on wait_busy flag. Don't expire
- * the queue yet. Allow the group to get backlogged.
- *
- * The very fact that we have used the slice, that means we
- * have been idling all along on this queue and it should be
- * ok to wait for this request to complete.
- */
- if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
- && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
- cfqq = NULL;
- goto keep_queue;
- } else
- goto check_group_idle;
- }
-
- /*
- * The active queue has requests and isn't expired, allow it to
- * dispatch.
- */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- goto keep_queue;
-
- /*
- * If another queue has a request waiting within our mean seek
- * distance, let it run. The expire code will check for close
- * cooperators and put the close queue at the front of the service
- * tree. If possible, merge the expiring queue with the new cfqq.
- */
- new_cfqq = cfq_close_cooperator(cfqd, cfqq);
- if (new_cfqq) {
- if (!cfqq->new_cfqq)
- cfq_setup_merge(cfqq, new_cfqq);
- goto expire;
- }
-
- /*
- * No requests pending. If the active queue still has requests in
- * flight or is idling for a new request, allow either of these
- * conditions to happen (or time out) before selecting a new queue.
- */
- if (hrtimer_active(&cfqd->idle_slice_timer)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
- /*
- * This is a deep seek queue, but the device is much faster than
- * the queue can deliver, don't idle
- **/
- if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
- (cfq_cfqq_slice_new(cfqq) ||
- (cfqq->slice_end - now > now - cfqq->slice_start))) {
- cfq_clear_cfqq_deep(cfqq);
- cfq_clear_cfqq_idle_window(cfqq);
- }
-
- if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
- /*
- * If group idle is enabled and there are requests dispatched from
- * this group, wait for requests to complete.
- */
-check_group_idle:
- if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
- cfqq->cfqg->dispatched &&
- !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
- cfqq = NULL;
- goto keep_queue;
- }
-
-expire:
- cfq_slice_expired(cfqd, 0);
-new_queue:
- /*
- * Current queue expired. Check if we have to switch to a new
- * service tree
- */
- if (!new_cfqq)
- cfq_choose_cfqg(cfqd);
-
- cfqq = cfq_set_active_queue(cfqd, new_cfqq);
-keep_queue:
- return cfqq;
-}
-
-static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
-{
- int dispatched = 0;
-
- while (cfqq->next_rq) {
- cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
- dispatched++;
- }
-
- BUG_ON(!list_empty(&cfqq->fifo));
-
- /* By default cfqq is not expired if it is empty. Do it explicitly */
- __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
- return dispatched;
-}
-
-/*
- * Drain our current requests. Used for barriers and when switching
- * io schedulers on-the-fly.
- */
-static int cfq_forced_dispatch(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq;
- int dispatched = 0;
-
- /* Expire the timeslice of the current active queue first */
- cfq_slice_expired(cfqd, 0);
- while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
- __cfq_set_active_queue(cfqd, cfqq);
- dispatched += __cfq_forced_dispatch_cfqq(cfqq);
- }
-
- BUG_ON(cfqd->busy_queues);
-
- cfq_log(cfqd, "forced_dispatch=%d", dispatched);
- return dispatched;
-}
-
-static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
- struct cfq_queue *cfqq)
-{
- u64 now = ktime_get_ns();
-
- /* the queue hasn't finished any request, can't estimate */
- if (cfq_cfqq_slice_new(cfqq))
- return true;
- if (now + cfqd->cfq_slice_idle * cfqq->dispatched > cfqq->slice_end)
- return true;
-
- return false;
-}
-
-static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- unsigned int max_dispatch;
-
- if (cfq_cfqq_must_dispatch(cfqq))
- return true;
-
- /*
- * Drain async requests before we start sync IO
- */
- if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
- return false;
-
- /*
- * If this is an async queue and we have sync IO in flight, let it wait
- */
- if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
- return false;
-
- max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
- if (cfq_class_idle(cfqq))
- max_dispatch = 1;
-
- /*
- * Does this cfqq already have too much IO in flight?
- */
- if (cfqq->dispatched >= max_dispatch) {
- bool promote_sync = false;
- /*
- * idle queue must always only have a single IO in flight
- */
- if (cfq_class_idle(cfqq))
- return false;
-
- /*
- * If there is only one sync queue
- * we can ignore async queue here and give the sync
- * queue no dispatch limit. The reason is a sync queue can
- * preempt async queue, limiting the sync queue doesn't make
- * sense. This is useful for aiostress test.
- */
- if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
- promote_sync = true;
-
- /*
- * We have other queues, don't allow more IO from this one
- */
- if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
- !promote_sync)
- return false;
-
- /*
- * Sole queue user, no limit
- */
- if (cfqd->busy_queues == 1 || promote_sync)
- max_dispatch = -1;
- else
- /*
- * Normally we start throttling cfqq when cfq_quantum/2
- * requests have been dispatched. But we can drive
- * deeper queue depths at the beginning of slice
- * subjected to upper limit of cfq_quantum.
- * */
- max_dispatch = cfqd->cfq_quantum;
- }
-
- /*
- * Async queues must wait a bit before being allowed dispatch.
- * We also ramp up the dispatch depth gradually for async IO,
- * based on the last sync IO we serviced
- */
- if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
- u64 last_sync = ktime_get_ns() - cfqd->last_delayed_sync;
- unsigned int depth;
-
- depth = div64_u64(last_sync, cfqd->cfq_slice[1]);
- if (!depth && !cfqq->dispatched)
- depth = 1;
- if (depth < max_dispatch)
- max_dispatch = depth;
- }
-
- /*
- * If we're below the current max, allow a dispatch
- */
- return cfqq->dispatched < max_dispatch;
-}
-
-/*
- * Dispatch a request from cfqq, moving them to the request queue
- * dispatch list.
- */
-static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- struct request *rq;
-
- BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
- rq = cfq_check_fifo(cfqq);
- if (rq)
- cfq_mark_cfqq_must_dispatch(cfqq);
-
- if (!cfq_may_dispatch(cfqd, cfqq))
- return false;
-
- /*
- * follow expired path, else get first next available
- */
- if (!rq)
- rq = cfqq->next_rq;
- else
- cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
-
- /*
- * insert request into driver dispatch list
- */
- cfq_dispatch_insert(cfqd->queue, rq);
-
- if (!cfqd->active_cic) {
- struct cfq_io_cq *cic = RQ_CIC(rq);
-
- atomic_long_inc(&cic->icq.ioc->refcount);
- cfqd->active_cic = cic;
- }
-
- return true;
-}
-
-/*
- * Find the cfqq that we need to service and move a request from that to the
- * dispatch list
- */
-static int cfq_dispatch_requests(struct request_queue *q, int force)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq;
-
- if (!cfqd->busy_queues)
- return 0;
-
- if (unlikely(force))
- return cfq_forced_dispatch(cfqd);
-
- cfqq = cfq_select_queue(cfqd);
- if (!cfqq)
- return 0;
-
- /*
- * Dispatch a request from this cfqq, if it is allowed
- */
- if (!cfq_dispatch_request(cfqd, cfqq))
- return 0;
-
- cfqq->slice_dispatch++;
- cfq_clear_cfqq_must_dispatch(cfqq);
-
- /*
- * expire an async queue immediately if it has used up its slice. idle
- * queue always expire after 1 dispatch round.
- */
- if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
- cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
- cfq_class_idle(cfqq))) {
- cfqq->slice_end = ktime_get_ns() + 1;
- cfq_slice_expired(cfqd, 0);
- }
-
- cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
- return 1;
-}
-
-/*
- * task holds one reference to the queue, dropped when task exits. each rq
- * in-flight on this queue also holds a reference, dropped when rq is freed.
- *
- * Each cfq queue took a reference on the parent group. Drop it now.
- * queue lock must be held here.
- */
-static void cfq_put_queue(struct cfq_queue *cfqq)
-{
- struct cfq_data *cfqd = cfqq->cfqd;
- struct cfq_group *cfqg;
-
- BUG_ON(cfqq->ref <= 0);
-
- cfqq->ref--;
- if (cfqq->ref)
- return;
-
- cfq_log_cfqq(cfqd, cfqq, "put_queue");
- BUG_ON(rb_first(&cfqq->sort_list));
- BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
- cfqg = cfqq->cfqg;
-
- if (unlikely(cfqd->active_queue == cfqq)) {
- __cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd);
- }
-
- BUG_ON(cfq_cfqq_on_rr(cfqq));
- kmem_cache_free(cfq_pool, cfqq);
- cfqg_put(cfqg);
-}
-
-static void cfq_put_cooperator(struct cfq_queue *cfqq)
-{
- struct cfq_queue *__cfqq, *next;
-
- /*
- * If this queue was scheduled to merge with another queue, be
- * sure to drop the reference taken on that queue (and others in
- * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
- */
- __cfqq = cfqq->new_cfqq;
- while (__cfqq) {
- if (__cfqq == cfqq) {
- WARN(1, "cfqq->new_cfqq loop detected\n");
- break;
- }
- next = __cfqq->new_cfqq;
- cfq_put_queue(__cfqq);
- __cfqq = next;
- }
-}
-
-static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- if (unlikely(cfqq == cfqd->active_queue)) {
- __cfq_slice_expired(cfqd, cfqq, 0);
- cfq_schedule_dispatch(cfqd);
- }
-
- cfq_put_cooperator(cfqq);
-
- cfq_put_queue(cfqq);
-}
-
-static void cfq_init_icq(struct io_cq *icq)
-{
- struct cfq_io_cq *cic = icq_to_cic(icq);
-
- cic->ttime.last_end_request = ktime_get_ns();
-}
-
-static void cfq_exit_icq(struct io_cq *icq)
-{
- struct cfq_io_cq *cic = icq_to_cic(icq);
- struct cfq_data *cfqd = cic_to_cfqd(cic);
-
- if (cic_to_cfqq(cic, false)) {
- cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, false));
- cic_set_cfqq(cic, NULL, false);
- }
-
- if (cic_to_cfqq(cic, true)) {
- cfq_exit_cfqq(cfqd, cic_to_cfqq(cic, true));
- cic_set_cfqq(cic, NULL, true);
- }
-}
-
-static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
-{
- struct task_struct *tsk = current;
- int ioprio_class;
-
- if (!cfq_cfqq_prio_changed(cfqq))
- return;
-
- ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- switch (ioprio_class) {
- default:
- printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
- /* fall through */
- case IOPRIO_CLASS_NONE:
- /*
- * no prio set, inherit CPU scheduling settings
- */
- cfqq->ioprio = task_nice_ioprio(tsk);
- cfqq->ioprio_class = task_nice_ioclass(tsk);
- break;
- case IOPRIO_CLASS_RT:
- cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- cfqq->ioprio_class = IOPRIO_CLASS_RT;
- break;
- case IOPRIO_CLASS_BE:
- cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- cfqq->ioprio_class = IOPRIO_CLASS_BE;
- break;
- case IOPRIO_CLASS_IDLE:
- cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
- cfqq->ioprio = 7;
- cfq_clear_cfqq_idle_window(cfqq);
- break;
- }
-
- /*
- * keep track of original prio settings in case we have to temporarily
- * elevate the priority of this queue
- */
- cfqq->org_ioprio = cfqq->ioprio;
- cfqq->org_ioprio_class = cfqq->ioprio_class;
- cfq_clear_cfqq_prio_changed(cfqq);
-}
-
-static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
-{
- int ioprio = cic->icq.ioc->ioprio;
- struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct cfq_queue *cfqq;
-
- /*
- * Check whether ioprio has changed. The condition may trigger
- * spuriously on a newly created cic but there's no harm.
- */
- if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
- return;
-
- cfqq = cic_to_cfqq(cic, false);
- if (cfqq) {
- cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio);
- cic_set_cfqq(cic, cfqq, false);
- }
-
- cfqq = cic_to_cfqq(cic, true);
- if (cfqq)
- cfq_mark_cfqq_prio_changed(cfqq);
-
- cic->ioprio = ioprio;
-}
-
-static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- pid_t pid, bool is_sync)
-{
- RB_CLEAR_NODE(&cfqq->rb_node);
- RB_CLEAR_NODE(&cfqq->p_node);
- INIT_LIST_HEAD(&cfqq->fifo);
-
- cfqq->ref = 0;
- cfqq->cfqd = cfqd;
-
- cfq_mark_cfqq_prio_changed(cfqq);
-
- if (is_sync) {
- if (!cfq_class_idle(cfqq))
- cfq_mark_cfqq_idle_window(cfqq);
- cfq_mark_cfqq_sync(cfqq);
- }
- cfqq->pid = pid;
-}
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
-{
- struct cfq_data *cfqd = cic_to_cfqd(cic);
- struct cfq_queue *cfqq;
- uint64_t serial_nr;
-
- rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
- rcu_read_unlock();
-
- /*
- * Check whether blkcg has changed. The condition may trigger
- * spuriously on a newly created cic but there's no harm.
- */
- if (unlikely(!cfqd) || likely(cic->blkcg_serial_nr == serial_nr))
- return;
-
- /*
- * Drop reference to queues. New queues will be assigned in new
- * group upon arrival of fresh requests.
- */
- cfqq = cic_to_cfqq(cic, false);
- if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
- cic_set_cfqq(cic, NULL, false);
- cfq_put_queue(cfqq);
- }
-
- cfqq = cic_to_cfqq(cic, true);
- if (cfqq) {
- cfq_log_cfqq(cfqd, cfqq, "changed cgroup");
- cic_set_cfqq(cic, NULL, true);
- cfq_put_queue(cfqq);
- }
-
- cic->blkcg_serial_nr = serial_nr;
-}
-#else
-static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
-{
-}
-#endif /* CONFIG_CFQ_GROUP_IOSCHED */
-
-static struct cfq_queue **
-cfq_async_queue_prio(struct cfq_group *cfqg, int ioprio_class, int ioprio)
-{
- switch (ioprio_class) {
- case IOPRIO_CLASS_RT:
- return &cfqg->async_cfqq[0][ioprio];
- case IOPRIO_CLASS_NONE:
- ioprio = IOPRIO_NORM;
- /* fall through */
- case IOPRIO_CLASS_BE:
- return &cfqg->async_cfqq[1][ioprio];
- case IOPRIO_CLASS_IDLE:
- return &cfqg->async_idle_cfqq;
- default:
- BUG();
- }
-}
-
-static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
- struct bio *bio)
-{
- int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
- struct cfq_queue **async_cfqq = NULL;
- struct cfq_queue *cfqq;
- struct cfq_group *cfqg;
-
- rcu_read_lock();
- cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
- if (!cfqg) {
- cfqq = &cfqd->oom_cfqq;
- goto out;
- }
-
- if (!is_sync) {
- if (!ioprio_valid(cic->ioprio)) {
- struct task_struct *tsk = current;
- ioprio = task_nice_ioprio(tsk);
- ioprio_class = task_nice_ioclass(tsk);
- }
- async_cfqq = cfq_async_queue_prio(cfqg, ioprio_class, ioprio);
- cfqq = *async_cfqq;
- if (cfqq)
- goto out;
- }
-
- cfqq = kmem_cache_alloc_node(cfq_pool,
- GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
- cfqd->queue->node);
- if (!cfqq) {
- cfqq = &cfqd->oom_cfqq;
- goto out;
- }
-
- /* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
- cfqq->ioprio_class = IOPRIO_CLASS_NONE;
- cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
- cfq_init_prio_data(cfqq, cic);
- cfq_link_cfqq_cfqg(cfqq, cfqg);
- cfq_log_cfqq(cfqd, cfqq, "alloced");
-
- if (async_cfqq) {
- /* a new async queue is created, pin and remember */
- cfqq->ref++;
- *async_cfqq = cfqq;
- }
-out:
- cfqq->ref++;
- rcu_read_unlock();
- return cfqq;
-}
-
-static void
-__cfq_update_io_thinktime(struct cfq_ttime *ttime, u64 slice_idle)
-{
- u64 elapsed = ktime_get_ns() - ttime->last_end_request;
- elapsed = min(elapsed, 2UL * slice_idle);
-
- ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
- ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8);
- ttime->ttime_mean = div64_ul(ttime->ttime_total + 128,
- ttime->ttime_samples);
-}
-
-static void
-cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_io_cq *cic)
-{
- if (cfq_cfqq_sync(cfqq)) {
- __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
- __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
- cfqd->cfq_slice_idle);
- }
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
-#endif
-}
-
-static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq)
-{
- sector_t sdist = 0;
- sector_t n_sec = blk_rq_sectors(rq);
- if (cfqq->last_request_pos) {
- if (cfqq->last_request_pos < blk_rq_pos(rq))
- sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
- else
- sdist = cfqq->last_request_pos - blk_rq_pos(rq);
- }
-
- cfqq->seek_history <<= 1;
- if (blk_queue_nonrot(cfqd->queue))
- cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
- else
- cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
-}
-
-static inline bool req_noidle(struct request *req)
-{
- return req_op(req) == REQ_OP_WRITE &&
- (req->cmd_flags & (REQ_SYNC | REQ_IDLE)) == REQ_SYNC;
-}
-
-/*
- * Disable idle window if the process thinks too long or seeks so much that
- * it doesn't matter
- */
-static void
-cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct cfq_io_cq *cic)
-{
- int old_idle, enable_idle;
-
- /*
- * Don't idle for async or idle io prio class
- */
- if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
- return;
-
- enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
-
- if (cfqq->queued[0] + cfqq->queued[1] >= 4)
- cfq_mark_cfqq_deep(cfqq);
-
- if (cfqq->next_rq && req_noidle(cfqq->next_rq))
- enable_idle = 0;
- else if (!atomic_read(&cic->icq.ioc->active_ref) ||
- !cfqd->cfq_slice_idle ||
- (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
- enable_idle = 0;
- else if (sample_valid(cic->ttime.ttime_samples)) {
- if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
- enable_idle = 0;
- else
- enable_idle = 1;
- }
-
- if (old_idle != enable_idle) {
- cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
- if (enable_idle)
- cfq_mark_cfqq_idle_window(cfqq);
- else
- cfq_clear_cfqq_idle_window(cfqq);
- }
-}
-
-/*
- * Check if new_cfqq should preempt the currently active queue. Return 0 for
- * no or if we aren't sure, a 1 will cause a preempt.
- */
-static bool
-cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
- struct request *rq)
-{
- struct cfq_queue *cfqq;
-
- cfqq = cfqd->active_queue;
- if (!cfqq)
- return false;
-
- if (cfq_class_idle(new_cfqq))
- return false;
-
- if (cfq_class_idle(cfqq))
- return true;
-
- /*
- * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
- */
- if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
- return false;
-
- /*
- * if the new request is sync, but the currently running queue is
- * not, let the sync request have priority.
- */
- if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
- return true;
-
- /*
- * Treat ancestors of current cgroup the same way as current cgroup.
- * For anybody else we disallow preemption to guarantee service
- * fairness among cgroups.
- */
- if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
- return false;
-
- if (cfq_slice_used(cfqq))
- return true;
-
- /*
- * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
- */
- if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
- return true;
-
- WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
- /* Allow preemption only if we are idling on sync-noidle tree */
- if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
- cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
- RB_EMPTY_ROOT(&cfqq->sort_list))
- return true;
-
- /*
- * So both queues are sync. Let the new request get disk time if
- * it's a metadata request and the current queue is doing regular IO.
- */
- if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
- return true;
-
- /* An idle queue should not be idle now for some reason */
- if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
- return true;
-
- if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
- return false;
-
- /*
- * if this request is as-good as one we would expect from the
- * current cfqq, let it preempt
- */
- if (cfq_rq_close(cfqd, cfqq, rq))
- return true;
-
- return false;
-}
-
-/*
- * cfqq preempts the active queue. if we allowed preempt with no slice left,
- * let it have half of its nominal slice.
- */
-static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
-
- cfq_log_cfqq(cfqd, cfqq, "preempt");
- cfq_slice_expired(cfqd, 1);
-
- /*
- * workload type is changed, don't save slice, otherwise preempt
- * doesn't happen
- */
- if (old_type != cfqq_type(cfqq))
- cfqq->cfqg->saved_wl_slice = 0;
-
- /*
- * Put the new queue at the front of the of the current list,
- * so we know that it will be selected next.
- */
- BUG_ON(!cfq_cfqq_on_rr(cfqq));
-
- cfq_service_tree_add(cfqd, cfqq, 1);
-
- cfqq->slice_end = 0;
- cfq_mark_cfqq_slice_new(cfqq);
-}
-
-/*
- * Called when a new fs request (rq) is added (to cfqq). Check if there's
- * something we should do about it
- */
-static void
-cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
- struct request *rq)
-{
- struct cfq_io_cq *cic = RQ_CIC(rq);
-
- cfqd->rq_queued++;
- if (rq->cmd_flags & REQ_PRIO)
- cfqq->prio_pending++;
-
- cfq_update_io_thinktime(cfqd, cfqq, cic);
- cfq_update_io_seektime(cfqd, cfqq, rq);
- cfq_update_idle_window(cfqd, cfqq, cic);
-
- cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
-
- if (cfqq == cfqd->active_queue) {
- /*
- * Remember that we saw a request from this process, but
- * don't start queuing just yet. Otherwise we risk seeing lots
- * of tiny requests, because we disrupt the normal plugging
- * and merging. If the request is already larger than a single
- * page, let it rip immediately. For that case we assume that
- * merging is already done. Ditto for a busy system that
- * has other work pending, don't risk delaying until the
- * idle timer unplug to continue working.
- */
- if (cfq_cfqq_wait_request(cfqq)) {
- if (blk_rq_bytes(rq) > PAGE_SIZE ||
- cfqd->busy_queues > 1) {
- cfq_del_timer(cfqd, cfqq);
- cfq_clear_cfqq_wait_request(cfqq);
- __blk_run_queue(cfqd->queue);
- } else {
- cfqg_stats_update_idle_time(cfqq->cfqg);
- cfq_mark_cfqq_must_dispatch(cfqq);
- }
- }
- } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
- /*
- * not the active queue - expire current slice if it is
- * idle and has expired it's mean thinktime or this new queue
- * has some old slice time left and is of higher priority or
- * this new queue is RT and the current one is BE
- */
- cfq_preempt_queue(cfqd, cfqq);
- __blk_run_queue(cfqd->queue);
- }
-}
-
-static void cfq_insert_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- cfq_log_cfqq(cfqd, cfqq, "insert_request");
- cfq_init_prio_data(cfqq, RQ_CIC(rq));
-
- rq->fifo_time = ktime_get_ns() + cfqd->cfq_fifo_expire[rq_is_sync(rq)];
- list_add_tail(&rq->queuelist, &cfqq->fifo);
- cfq_add_rq_rb(rq);
- cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
- rq->cmd_flags);
- cfq_rq_enqueued(cfqd, cfqq, rq);
-}
-
-/*
- * Update hw_tag based on peak queue depth over 50 samples under
- * sufficient load.
- */
-static void cfq_update_hw_tag(struct cfq_data *cfqd)
-{
- struct cfq_queue *cfqq = cfqd->active_queue;
-
- if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
- cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
-
- if (cfqd->hw_tag == 1)
- return;
-
- if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
- cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
- return;
-
- /*
- * If active queue hasn't enough requests and can idle, cfq might not
- * dispatch sufficient requests to hardware. Don't zero hw_tag in this
- * case
- */
- if (cfqq && cfq_cfqq_idle_window(cfqq) &&
- cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
- CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
- return;
-
- if (cfqd->hw_tag_samples++ < 50)
- return;
-
- if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
- cfqd->hw_tag = 1;
- else
- cfqd->hw_tag = 0;
-}
-
-static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
-{
- struct cfq_io_cq *cic = cfqd->active_cic;
- u64 now = ktime_get_ns();
-
- /* If the queue already has requests, don't wait */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- return false;
-
- /* If there are other queues in the group, don't wait */
- if (cfqq->cfqg->nr_cfqq > 1)
- return false;
-
- /* the only queue in the group, but think time is big */
- if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
- return false;
-
- if (cfq_slice_used(cfqq))
- return true;
-
- /* if slice left is less than think time, wait busy */
- if (cic && sample_valid(cic->ttime.ttime_samples)
- && (cfqq->slice_end - now < cic->ttime.ttime_mean))
- return true;
-
- /*
- * If think times is less than a jiffy than ttime_mean=0 and above
- * will not be true. It might happen that slice has not expired yet
- * but will expire soon (4-5 ns) during select_queue(). To cover the
- * case where think time is less than a jiffy, mark the queue wait
- * busy if only 1 jiffy is left in the slice.
- */
- if (cfqq->slice_end - now <= jiffies_to_nsecs(1))
- return true;
-
- return false;
-}
-
-static void cfq_completed_request(struct request_queue *q, struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
- struct cfq_data *cfqd = cfqq->cfqd;
- const int sync = rq_is_sync(rq);
- u64 now = ktime_get_ns();
-
- cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", req_noidle(rq));
-
- cfq_update_hw_tag(cfqd);
-
- WARN_ON(!cfqd->rq_in_driver);
- WARN_ON(!cfqq->dispatched);
- cfqd->rq_in_driver--;
- cfqq->dispatched--;
- (RQ_CFQG(rq))->dispatched--;
- cfqg_stats_update_completion(cfqq->cfqg, rq->start_time_ns,
- rq->io_start_time_ns, rq->cmd_flags);
-
- cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
-
- if (sync) {
- struct cfq_rb_root *st;
-
- RQ_CIC(rq)->ttime.last_end_request = now;
-
- if (cfq_cfqq_on_rr(cfqq))
- st = cfqq->service_tree;
- else
- st = st_for(cfqq->cfqg, cfqq_class(cfqq),
- cfqq_type(cfqq));
-
- st->ttime.last_end_request = now;
- if (rq->start_time_ns + cfqd->cfq_fifo_expire[1] <= now)
- cfqd->last_delayed_sync = now;
- }
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- cfqq->cfqg->ttime.last_end_request = now;
-#endif
-
- /*
- * If this is the active queue, check if it needs to be expired,
- * or if we want to idle in case it has no pending requests.
- */
- if (cfqd->active_queue == cfqq) {
- const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
-
- if (cfq_cfqq_slice_new(cfqq)) {
- cfq_set_prio_slice(cfqd, cfqq);
- cfq_clear_cfqq_slice_new(cfqq);
- }
-
- /*
- * Should we wait for next request to come in before we expire
- * the queue.
- */
- if (cfq_should_wait_busy(cfqd, cfqq)) {
- u64 extend_sl = cfqd->cfq_slice_idle;
- if (!cfqd->cfq_slice_idle)
- extend_sl = cfqd->cfq_group_idle;
- cfqq->slice_end = now + extend_sl;
- cfq_mark_cfqq_wait_busy(cfqq);
- cfq_log_cfqq(cfqd, cfqq, "will busy wait");
- }
-
- /*
- * Idling is not enabled on:
- * - expired queues
- * - idle-priority queues
- * - async queues
- * - queues with still some requests queued
- * - when there is a close cooperator
- */
- if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
- cfq_slice_expired(cfqd, 1);
- else if (sync && cfqq_empty &&
- !cfq_close_cooperator(cfqd, cfqq)) {
- cfq_arm_slice_timer(cfqd);
- }
- }
-
- if (!cfqd->rq_in_driver)
- cfq_schedule_dispatch(cfqd);
-}
-
-static void cfqq_boost_on_prio(struct cfq_queue *cfqq, unsigned int op)
-{
- /*
- * If REQ_PRIO is set, boost class and prio level, if it's below
- * BE/NORM. If prio is not set, restore the potentially boosted
- * class/prio level.
- */
- if (!(op & REQ_PRIO)) {
- cfqq->ioprio_class = cfqq->org_ioprio_class;
- cfqq->ioprio = cfqq->org_ioprio;
- } else {
- if (cfq_class_idle(cfqq))
- cfqq->ioprio_class = IOPRIO_CLASS_BE;
- if (cfqq->ioprio > IOPRIO_NORM)
- cfqq->ioprio = IOPRIO_NORM;
- }
-}
-
-static inline int __cfq_may_queue(struct cfq_queue *cfqq)
-{
- if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
- cfq_mark_cfqq_must_alloc_slice(cfqq);
- return ELV_MQUEUE_MUST;
- }
-
- return ELV_MQUEUE_MAY;
-}
-
-static int cfq_may_queue(struct request_queue *q, unsigned int op)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct task_struct *tsk = current;
- struct cfq_io_cq *cic;
- struct cfq_queue *cfqq;
-
- /*
- * don't force setup of a queue from here, as a call to may_queue
- * does not necessarily imply that a request actually will be queued.
- * so just lookup a possibly existing queue, or return 'may queue'
- * if that fails
- */
- cic = cfq_cic_lookup(cfqd, tsk->io_context);
- if (!cic)
- return ELV_MQUEUE_MAY;
-
- cfqq = cic_to_cfqq(cic, op_is_sync(op));
- if (cfqq) {
- cfq_init_prio_data(cfqq, cic);
- cfqq_boost_on_prio(cfqq, op);
-
- return __cfq_may_queue(cfqq);
- }
-
- return ELV_MQUEUE_MAY;
-}
-
-/*
- * queue lock held here
- */
-static void cfq_put_request(struct request *rq)
-{
- struct cfq_queue *cfqq = RQ_CFQQ(rq);
-
- if (cfqq) {
- const int rw = rq_data_dir(rq);
-
- BUG_ON(!cfqq->allocated[rw]);
- cfqq->allocated[rw]--;
-
- /* Put down rq reference on cfqg */
- cfqg_put(RQ_CFQG(rq));
- rq->elv.priv[0] = NULL;
- rq->elv.priv[1] = NULL;
-
- cfq_put_queue(cfqq);
- }
-}
-
-static struct cfq_queue *
-cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
- struct cfq_queue *cfqq)
-{
- cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
- cic_set_cfqq(cic, cfqq->new_cfqq, 1);
- cfq_mark_cfqq_coop(cfqq->new_cfqq);
- cfq_put_queue(cfqq);
- return cic_to_cfqq(cic, 1);
-}
-
-/*
- * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
- * was the last process referring to said cfqq.
- */
-static struct cfq_queue *
-split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
-{
- if (cfqq_process_refs(cfqq) == 1) {
- cfqq->pid = current->pid;
- cfq_clear_cfqq_coop(cfqq);
- cfq_clear_cfqq_split_coop(cfqq);
- return cfqq;
- }
-
- cic_set_cfqq(cic, NULL, 1);
-
- cfq_put_cooperator(cfqq);
-
- cfq_put_queue(cfqq);
- return NULL;
-}
-/*
- * Allocate cfq data structures associated with this request.
- */
-static int
-cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
- gfp_t gfp_mask)
-{
- struct cfq_data *cfqd = q->elevator->elevator_data;
- struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
- const int rw = rq_data_dir(rq);
- const bool is_sync = rq_is_sync(rq);
- struct cfq_queue *cfqq;
-
- spin_lock_irq(q->queue_lock);
-
- check_ioprio_changed(cic, bio);
- check_blkcg_changed(cic, bio);
-new_queue:
- cfqq = cic_to_cfqq(cic, is_sync);
- if (!cfqq || cfqq == &cfqd->oom_cfqq) {
- if (cfqq)
- cfq_put_queue(cfqq);
- cfqq = cfq_get_queue(cfqd, is_sync, cic, bio);
- cic_set_cfqq(cic, cfqq, is_sync);
- } else {
- /*
- * If the queue was seeky for too long, break it apart.
- */
- if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
- cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
- cfqq = split_cfqq(cic, cfqq);
- if (!cfqq)
- goto new_queue;
- }
-
- /*
- * Check to see if this queue is scheduled to merge with
- * another, closely cooperating queue. The merging of
- * queues happens here as it must be done in process context.
- * The reference on new_cfqq was taken in merge_cfqqs.
- */
- if (cfqq->new_cfqq)
- cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
- }
-
- cfqq->allocated[rw]++;
-
- cfqq->ref++;
- cfqg_get(cfqq->cfqg);
- rq->elv.priv[0] = cfqq;
- rq->elv.priv[1] = cfqq->cfqg;
- spin_unlock_irq(q->queue_lock);
-
- return 0;
-}
-
-static void cfq_kick_queue(struct work_struct *work)
-{
- struct cfq_data *cfqd =
- container_of(work, struct cfq_data, unplug_work);
- struct request_queue *q = cfqd->queue;
-
- spin_lock_irq(q->queue_lock);
- __blk_run_queue(cfqd->queue);
- spin_unlock_irq(q->queue_lock);
-}
-
-/*
- * Timer running if the active_queue is currently idling inside its time slice
- */
-static enum hrtimer_restart cfq_idle_slice_timer(struct hrtimer *timer)
-{
- struct cfq_data *cfqd = container_of(timer, struct cfq_data,
- idle_slice_timer);
- struct cfq_queue *cfqq;
- unsigned long flags;
- int timed_out = 1;
-
- cfq_log(cfqd, "idle timer fired");
-
- spin_lock_irqsave(cfqd->queue->queue_lock, flags);
-
- cfqq = cfqd->active_queue;
- if (cfqq) {
- timed_out = 0;
-
- /*
- * We saw a request before the queue expired, let it through
- */
- if (cfq_cfqq_must_dispatch(cfqq))
- goto out_kick;
-
- /*
- * expired
- */
- if (cfq_slice_used(cfqq))
- goto expire;
-
- /*
- * only expire and reinvoke request handler, if there are
- * other queues with pending requests
- */
- if (!cfqd->busy_queues)
- goto out_cont;
-
- /*
- * not expired and it has a request pending, let it dispatch
- */
- if (!RB_EMPTY_ROOT(&cfqq->sort_list))
- goto out_kick;
-
- /*
- * Queue depth flag is reset only when the idle didn't succeed
- */
- cfq_clear_cfqq_deep(cfqq);
- }
-expire:
- cfq_slice_expired(cfqd, timed_out);
-out_kick:
- cfq_schedule_dispatch(cfqd);
-out_cont:
- spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
- return HRTIMER_NORESTART;
-}
-
-static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
-{
- hrtimer_cancel(&cfqd->idle_slice_timer);
- cancel_work_sync(&cfqd->unplug_work);
-}
-
-static void cfq_exit_queue(struct elevator_queue *e)
-{
- struct cfq_data *cfqd = e->elevator_data;
- struct request_queue *q = cfqd->queue;
-
- cfq_shutdown_timer_wq(cfqd);
-
- spin_lock_irq(q->queue_lock);
-
- if (cfqd->active_queue)
- __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
-
- spin_unlock_irq(q->queue_lock);
-
- cfq_shutdown_timer_wq(cfqd);
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- blkcg_deactivate_policy(q, &blkcg_policy_cfq);
-#else
- kfree(cfqd->root_group);
-#endif
- kfree(cfqd);
-}
-
-static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct cfq_data *cfqd;
- struct blkcg_gq *blkg __maybe_unused;
- int i, ret;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
- if (!cfqd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = cfqd;
-
- cfqd->queue = q;
- spin_lock_irq(q->queue_lock);
- q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
-
- /* Init root service tree */
- cfqd->grp_service_tree = CFQ_RB_ROOT;
-
- /* Init root group and prefer root group over other groups by default */
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
- if (ret)
- goto out_free;
-
- cfqd->root_group = blkg_to_cfqg(q->root_blkg);
-#else
- ret = -ENOMEM;
- cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
- GFP_KERNEL, cfqd->queue->node);
- if (!cfqd->root_group)
- goto out_free;
-
- cfq_init_cfqg_base(cfqd->root_group);
- cfqd->root_group->weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
- cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_LEGACY_DFL;
-#endif
-
- /*
- * Not strictly needed (since RB_ROOT just clears the node and we
- * zeroed cfqd on alloc), but better be safe in case someone decides
- * to add magic to the rb code
- */
- for (i = 0; i < CFQ_PRIO_LISTS; i++)
- cfqd->prio_trees[i] = RB_ROOT;
-
- /*
- * Our fallback cfqq if cfq_get_queue() runs into OOM issues.
- * Grab a permanent reference to it, so that the normal code flow
- * will not attempt to free it. oom_cfqq is linked to root_group
- * but shouldn't hold a reference as it'll never be unlinked. Lose
- * the reference from linking right away.
- */
- cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
- cfqd->oom_cfqq.ref++;
-
- spin_lock_irq(q->queue_lock);
- cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
- cfqg_put(cfqd->root_group);
- spin_unlock_irq(q->queue_lock);
-
- hrtimer_init(&cfqd->idle_slice_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
-
- INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
-
- cfqd->cfq_quantum = cfq_quantum;
- cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
- cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
- cfqd->cfq_back_max = cfq_back_max;
- cfqd->cfq_back_penalty = cfq_back_penalty;
- cfqd->cfq_slice[0] = cfq_slice_async;
- cfqd->cfq_slice[1] = cfq_slice_sync;
- cfqd->cfq_target_latency = cfq_target_latency;
- cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
- cfqd->cfq_slice_idle = cfq_slice_idle;
- cfqd->cfq_group_idle = cfq_group_idle;
- cfqd->cfq_latency = 1;
- cfqd->hw_tag = -1;
- /*
- * we optimistically start assuming sync ops weren't delayed in last
- * second, in order to have larger depth for async operations.
- */
- cfqd->last_delayed_sync = ktime_get_ns() - NSEC_PER_SEC;
- return 0;
-
-out_free:
- kfree(cfqd);
- kobject_put(&eq->kobj);
- return ret;
-}
-
-static void cfq_registered_queue(struct request_queue *q)
-{
- struct elevator_queue *e = q->elevator;
- struct cfq_data *cfqd = e->elevator_data;
-
- /*
- * Default to IOPS mode with no idling for SSDs
- */
- if (blk_queue_nonrot(q))
- cfqd->cfq_slice_idle = 0;
- wbt_disable_default(q);
-}
-
-/*
- * sysfs parts below -->
- */
-static ssize_t
-cfq_var_show(unsigned int var, char *page)
-{
- return sprintf(page, "%u\n", var);
-}
-
-static void
-cfq_var_store(unsigned int *var, const char *page)
-{
- char *p = (char *) page;
-
- *var = simple_strtoul(p, &p, 10);
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- u64 __data = __VAR; \
- if (__CONV) \
- __data = div_u64(__data, NSEC_PER_MSEC); \
- return cfq_var_show(__data, (page)); \
-}
-SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
-SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
-SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
-SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
-SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
-SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
-SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
-SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
-SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
-SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
-SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
-SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
-#undef SHOW_FUNCTION
-
-#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- u64 __data = __VAR; \
- __data = div_u64(__data, NSEC_PER_USEC); \
- return cfq_var_show(__data, (page)); \
-}
-USEC_SHOW_FUNCTION(cfq_slice_idle_us_show, cfqd->cfq_slice_idle);
-USEC_SHOW_FUNCTION(cfq_group_idle_us_show, cfqd->cfq_group_idle);
-USEC_SHOW_FUNCTION(cfq_slice_sync_us_show, cfqd->cfq_slice[1]);
-USEC_SHOW_FUNCTION(cfq_slice_async_us_show, cfqd->cfq_slice[0]);
-USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
-#undef USEC_SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data, __min = (MIN), __max = (MAX); \
- \
- cfq_var_store(&__data, (page)); \
- if (__data < __min) \
- __data = __min; \
- else if (__data > __max) \
- __data = __max; \
- if (__CONV) \
- *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
- else \
- *(__PTR) = __data; \
- return count; \
-}
-STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
- UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
- UINT_MAX, 1);
-STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
-STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
- UINT_MAX, 0);
-STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
- UINT_MAX, 0);
-STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
-STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
-#undef STORE_FUNCTION
-
-#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct cfq_data *cfqd = e->elevator_data; \
- unsigned int __data, __min = (MIN), __max = (MAX); \
- \
- cfq_var_store(&__data, (page)); \
- if (__data < __min) \
- __data = __min; \
- else if (__data > __max) \
- __data = __max; \
- *(__PTR) = (u64)__data * NSEC_PER_USEC; \
- return count; \
-}
-USEC_STORE_FUNCTION(cfq_slice_idle_us_store, &cfqd->cfq_slice_idle, 0, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_group_idle_us_store, &cfqd->cfq_group_idle, 0, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_slice_sync_us_store, &cfqd->cfq_slice[1], 1, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_slice_async_us_store, &cfqd->cfq_slice[0], 1, UINT_MAX);
-USEC_STORE_FUNCTION(cfq_target_latency_us_store, &cfqd->cfq_target_latency, 1, UINT_MAX);
-#undef USEC_STORE_FUNCTION
-
-#define CFQ_ATTR(name) \
- __ATTR(name, 0644, cfq_##name##_show, cfq_##name##_store)
-
-static struct elv_fs_entry cfq_attrs[] = {
- CFQ_ATTR(quantum),
- CFQ_ATTR(fifo_expire_sync),
- CFQ_ATTR(fifo_expire_async),
- CFQ_ATTR(back_seek_max),
- CFQ_ATTR(back_seek_penalty),
- CFQ_ATTR(slice_sync),
- CFQ_ATTR(slice_sync_us),
- CFQ_ATTR(slice_async),
- CFQ_ATTR(slice_async_us),
- CFQ_ATTR(slice_async_rq),
- CFQ_ATTR(slice_idle),
- CFQ_ATTR(slice_idle_us),
- CFQ_ATTR(group_idle),
- CFQ_ATTR(group_idle_us),
- CFQ_ATTR(low_latency),
- CFQ_ATTR(target_latency),
- CFQ_ATTR(target_latency_us),
- __ATTR_NULL
-};
-
-static struct elevator_type iosched_cfq = {
- .ops.sq = {
- .elevator_merge_fn = cfq_merge,
- .elevator_merged_fn = cfq_merged_request,
- .elevator_merge_req_fn = cfq_merged_requests,
- .elevator_allow_bio_merge_fn = cfq_allow_bio_merge,
- .elevator_allow_rq_merge_fn = cfq_allow_rq_merge,
- .elevator_bio_merged_fn = cfq_bio_merged,
- .elevator_dispatch_fn = cfq_dispatch_requests,
- .elevator_add_req_fn = cfq_insert_request,
- .elevator_activate_req_fn = cfq_activate_request,
- .elevator_deactivate_req_fn = cfq_deactivate_request,
- .elevator_completed_req_fn = cfq_completed_request,
- .elevator_former_req_fn = elv_rb_former_request,
- .elevator_latter_req_fn = elv_rb_latter_request,
- .elevator_init_icq_fn = cfq_init_icq,
- .elevator_exit_icq_fn = cfq_exit_icq,
- .elevator_set_req_fn = cfq_set_request,
- .elevator_put_req_fn = cfq_put_request,
- .elevator_may_queue_fn = cfq_may_queue,
- .elevator_init_fn = cfq_init_queue,
- .elevator_exit_fn = cfq_exit_queue,
- .elevator_registered_fn = cfq_registered_queue,
- },
- .icq_size = sizeof(struct cfq_io_cq),
- .icq_align = __alignof__(struct cfq_io_cq),
- .elevator_attrs = cfq_attrs,
- .elevator_name = "cfq",
- .elevator_owner = THIS_MODULE,
-};
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
-static struct blkcg_policy blkcg_policy_cfq = {
- .dfl_cftypes = cfq_blkcg_files,
- .legacy_cftypes = cfq_blkcg_legacy_files,
-
- .cpd_alloc_fn = cfq_cpd_alloc,
- .cpd_init_fn = cfq_cpd_init,
- .cpd_free_fn = cfq_cpd_free,
- .cpd_bind_fn = cfq_cpd_bind,
-
- .pd_alloc_fn = cfq_pd_alloc,
- .pd_init_fn = cfq_pd_init,
- .pd_offline_fn = cfq_pd_offline,
- .pd_free_fn = cfq_pd_free,
- .pd_reset_stats_fn = cfq_pd_reset_stats,
-};
-#endif
-
-static int __init cfq_init(void)
-{
- int ret;
-
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- ret = blkcg_policy_register(&blkcg_policy_cfq);
- if (ret)
- return ret;
-#else
- cfq_group_idle = 0;
-#endif
-
- ret = -ENOMEM;
- cfq_pool = KMEM_CACHE(cfq_queue, 0);
- if (!cfq_pool)
- goto err_pol_unreg;
-
- ret = elv_register(&iosched_cfq);
- if (ret)
- goto err_free_pool;
-
- return 0;
-
-err_free_pool:
- kmem_cache_destroy(cfq_pool);
-err_pol_unreg:
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- blkcg_policy_unregister(&blkcg_policy_cfq);
-#endif
- return ret;
-}
-
-static void __exit cfq_exit(void)
-{
-#ifdef CONFIG_CFQ_GROUP_IOSCHED
- blkcg_policy_unregister(&blkcg_policy_cfq);
-#endif
- elv_unregister(&iosched_cfq);
- kmem_cache_destroy(cfq_pool);
-}
-
-module_init(cfq_init);
-module_exit(cfq_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
deleted file mode 100644
index ef2f1f09e9b3..000000000000
--- a/block/deadline-iosched.c
+++ /dev/null
@@ -1,560 +0,0 @@
-/*
- * Deadline i/o scheduler.
- *
- * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk>
- */
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/compiler.h>
-#include <linux/rbtree.h>
-
-/*
- * See Documentation/block/deadline-iosched.txt
- */
-static const int read_expire = HZ / 2; /* max time before a read is submitted. */
-static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
-static const int writes_starved = 2; /* max times reads can starve a write */
-static const int fifo_batch = 16; /* # of sequential requests treated as one
- by the above parameters. For throughput. */
-
-struct deadline_data {
- /*
- * run time data
- */
-
- /*
- * requests (deadline_rq s) are present on both sort_list and fifo_list
- */
- struct rb_root sort_list[2];
- struct list_head fifo_list[2];
-
- /*
- * next in sort order. read, write or both are NULL
- */
- struct request *next_rq[2];
- unsigned int batching; /* number of sequential requests made */
- unsigned int starved; /* times reads have starved writes */
-
- /*
- * settings that change how the i/o scheduler behaves
- */
- int fifo_expire[2];
- int fifo_batch;
- int writes_starved;
- int front_merges;
-};
-
-static inline struct rb_root *
-deadline_rb_root(struct deadline_data *dd, struct request *rq)
-{
- return &dd->sort_list[rq_data_dir(rq)];
-}
-
-/*
- * get the request after `rq' in sector-sorted order
- */
-static inline struct request *
-deadline_latter_request(struct request *rq)
-{
- struct rb_node *node = rb_next(&rq->rb_node);
-
- if (node)
- return rb_entry_rq(node);
-
- return NULL;
-}
-
-static void
-deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
-{
- struct rb_root *root = deadline_rb_root(dd, rq);
-
- elv_rb_add(root, rq);
-}
-
-static inline void
-deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
-{
- const int data_dir = rq_data_dir(rq);
-
- if (dd->next_rq[data_dir] == rq)
- dd->next_rq[data_dir] = deadline_latter_request(rq);
-
- elv_rb_del(deadline_rb_root(dd, rq), rq);
-}
-
-/*
- * add rq to rbtree and fifo
- */
-static void
-deadline_add_request(struct request_queue *q, struct request *rq)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- const int data_dir = rq_data_dir(rq);
-
- /*
- * This may be a requeue of a write request that has locked its
- * target zone. If it is the case, this releases the zone lock.
- */
- blk_req_zone_write_unlock(rq);
-
- deadline_add_rq_rb(dd, rq);
-
- /*
- * set expire time and add to fifo list
- */
- rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
- list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]);
-}
-
-/*
- * remove rq from rbtree and fifo.
- */
-static void deadline_remove_request(struct request_queue *q, struct request *rq)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- rq_fifo_clear(rq);
- deadline_del_rq_rb(dd, rq);
-}
-
-static enum elv_merge
-deadline_merge(struct request_queue *q, struct request **req, struct bio *bio)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- struct request *__rq;
-
- /*
- * check for front merge
- */
- if (dd->front_merges) {
- sector_t sector = bio_end_sector(bio);
-
- __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector);
- if (__rq) {
- BUG_ON(sector != blk_rq_pos(__rq));
-
- if (elv_bio_merge_ok(__rq, bio)) {
- *req = __rq;
- return ELEVATOR_FRONT_MERGE;
- }
- }
- }
-
- return ELEVATOR_NO_MERGE;
-}
-
-static void deadline_merged_request(struct request_queue *q,
- struct request *req, enum elv_merge type)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
-
- /*
- * if the merge was a front merge, we need to reposition request
- */
- if (type == ELEVATOR_FRONT_MERGE) {
- elv_rb_del(deadline_rb_root(dd, req), req);
- deadline_add_rq_rb(dd, req);
- }
-}
-
-static void
-deadline_merged_requests(struct request_queue *q, struct request *req,
- struct request *next)
-{
- /*
- * if next expires before rq, assign its expire time to rq
- * and move into next position (next will be deleted) in fifo
- */
- if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
- if (time_before((unsigned long)next->fifo_time,
- (unsigned long)req->fifo_time)) {
- list_move(&req->queuelist, &next->queuelist);
- req->fifo_time = next->fifo_time;
- }
- }
-
- /*
- * kill knowledge of next, this one is a goner
- */
- deadline_remove_request(q, next);
-}
-
-/*
- * move request from sort list to dispatch queue.
- */
-static inline void
-deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq)
-{
- struct request_queue *q = rq->q;
-
- /*
- * For a zoned block device, write requests must write lock their
- * target zone.
- */
- blk_req_zone_write_lock(rq);
-
- deadline_remove_request(q, rq);
- elv_dispatch_add_tail(q, rq);
-}
-
-/*
- * move an entry to dispatch queue
- */
-static void
-deadline_move_request(struct deadline_data *dd, struct request *rq)
-{
- const int data_dir = rq_data_dir(rq);
-
- dd->next_rq[READ] = NULL;
- dd->next_rq[WRITE] = NULL;
- dd->next_rq[data_dir] = deadline_latter_request(rq);
-
- /*
- * take it off the sort and fifo list, move
- * to dispatch queue
- */
- deadline_move_to_dispatch(dd, rq);
-}
-
-/*
- * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
- * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
- */
-static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
-{
- struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next);
-
- /*
- * rq is expired!
- */
- if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
- return 1;
-
- return 0;
-}
-
-/*
- * For the specified data direction, return the next request to dispatch using
- * arrival ordered lists.
- */
-static struct request *
-deadline_fifo_request(struct deadline_data *dd, int data_dir)
-{
- struct request *rq;
-
- if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
- return NULL;
-
- if (list_empty(&dd->fifo_list[data_dir]))
- return NULL;
-
- rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
- if (data_dir == READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone.
- */
- list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) {
- if (blk_req_can_dispatch_to_zone(rq))
- return rq;
- }
-
- return NULL;
-}
-
-/*
- * For the specified data direction, return the next request to dispatch using
- * sector position sorted lists.
- */
-static struct request *
-deadline_next_request(struct deadline_data *dd, int data_dir)
-{
- struct request *rq;
-
- if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE))
- return NULL;
-
- rq = dd->next_rq[data_dir];
- if (!rq)
- return NULL;
-
- if (data_dir == READ || !blk_queue_is_zoned(rq->q))
- return rq;
-
- /*
- * Look for a write request that can be dispatched, that is one with
- * an unlocked target zone.
- */
- while (rq) {
- if (blk_req_can_dispatch_to_zone(rq))
- return rq;
- rq = deadline_latter_request(rq);
- }
-
- return NULL;
-}
-
-/*
- * deadline_dispatch_requests selects the best request according to
- * read/write expire, fifo_batch, etc
- */
-static int deadline_dispatch_requests(struct request_queue *q, int force)
-{
- struct deadline_data *dd = q->elevator->elevator_data;
- const int reads = !list_empty(&dd->fifo_list[READ]);
- const int writes = !list_empty(&dd->fifo_list[WRITE]);
- struct request *rq, *next_rq;
- int data_dir;
-
- /*
- * batches are currently reads XOR writes
- */
- rq = deadline_next_request(dd, WRITE);
- if (!rq)
- rq = deadline_next_request(dd, READ);
-
- if (rq && dd->batching < dd->fifo_batch)
- /* we have a next request are still entitled to batch */
- goto dispatch_request;
-
- /*
- * at this point we are not running a batch. select the appropriate
- * data direction (read / write)
- */
-
- if (reads) {
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ]));
-
- if (deadline_fifo_request(dd, WRITE) &&
- (dd->starved++ >= dd->writes_starved))
- goto dispatch_writes;
-
- data_dir = READ;
-
- goto dispatch_find_request;
- }
-
- /*
- * there are either no reads or writes have been starved
- */
-
- if (writes) {
-dispatch_writes:
- BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE]));
-
- dd->starved = 0;
-
- data_dir = WRITE;
-
- goto dispatch_find_request;
- }
-
- return 0;
-
-dispatch_find_request:
- /*
- * we are not running a batch, find best request for selected data_dir
- */
- next_rq = deadline_next_request(dd, data_dir);
- if (deadline_check_fifo(dd, data_dir) || !next_rq) {
- /*
- * A deadline has expired, the last request was in the other
- * direction, or we have run out of higher-sectored requests.
- * Start again from the request with the earliest expiry time.
- */
- rq = deadline_fifo_request(dd, data_dir);
- } else {
- /*
- * The last req was the same dir and we have a next request in
- * sort order. No expired requests so continue on from here.
- */
- rq = next_rq;
- }
-
- /*
- * For a zoned block device, if we only have writes queued and none of
- * them can be dispatched, rq will be NULL.
- */
- if (!rq)
- return 0;
-
- dd->batching = 0;
-
-dispatch_request:
- /*
- * rq is the selected appropriate request.
- */
- dd->batching++;
- deadline_move_request(dd, rq);
-
- return 1;
-}
-
-/*
- * For zoned block devices, write unlock the target zone of completed
- * write requests.
- */
-static void
-deadline_completed_request(struct request_queue *q, struct request *rq)
-{
- blk_req_zone_write_unlock(rq);
-}
-
-static void deadline_exit_queue(struct elevator_queue *e)
-{
- struct deadline_data *dd = e->elevator_data;
-
- BUG_ON(!list_empty(&dd->fifo_list[READ]));
- BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
-
- kfree(dd);
-}
-
-/*
- * initialize elevator private data (deadline_data).
- */
-static int deadline_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct deadline_data *dd;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
- if (!dd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = dd;
-
- INIT_LIST_HEAD(&dd->fifo_list[READ]);
- INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
- dd->sort_list[READ] = RB_ROOT;
- dd->sort_list[WRITE] = RB_ROOT;
- dd->fifo_expire[READ] = read_expire;
- dd->fifo_expire[WRITE] = write_expire;
- dd->writes_starved = writes_starved;
- dd->front_merges = 1;
- dd->fifo_batch = fifo_batch;
-
- spin_lock_irq(q->queue_lock);
- q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-/*
- * sysfs parts below
- */
-
-static ssize_t
-deadline_var_show(int var, char *page)
-{
- return sprintf(page, "%d\n", var);
-}
-
-static void
-deadline_var_store(int *var, const char *page)
-{
- char *p = (char *) page;
-
- *var = simple_strtol(p, &p, 10);
-}
-
-#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, char *page) \
-{ \
- struct deadline_data *dd = e->elevator_data; \
- int __data = __VAR; \
- if (__CONV) \
- __data = jiffies_to_msecs(__data); \
- return deadline_var_show(__data, (page)); \
-}
-SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
-SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
-SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
-SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
-SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
-#undef SHOW_FUNCTION
-
-#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
-static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
-{ \
- struct deadline_data *dd = e->elevator_data; \
- int __data; \
- deadline_var_store(&__data, (page)); \
- if (__data < (MIN)) \
- __data = (MIN); \
- else if (__data > (MAX)) \
- __data = (MAX); \
- if (__CONV) \
- *(__PTR) = msecs_to_jiffies(__data); \
- else \
- *(__PTR) = __data; \
- return count; \
-}
-STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
-STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
-STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
-STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
-#undef STORE_FUNCTION
-
-#define DD_ATTR(name) \
- __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
-
-static struct elv_fs_entry deadline_attrs[] = {
- DD_ATTR(read_expire),
- DD_ATTR(write_expire),
- DD_ATTR(writes_starved),
- DD_ATTR(front_merges),
- DD_ATTR(fifo_batch),
- __ATTR_NULL
-};
-
-static struct elevator_type iosched_deadline = {
- .ops.sq = {
- .elevator_merge_fn = deadline_merge,
- .elevator_merged_fn = deadline_merged_request,
- .elevator_merge_req_fn = deadline_merged_requests,
- .elevator_dispatch_fn = deadline_dispatch_requests,
- .elevator_completed_req_fn = deadline_completed_request,
- .elevator_add_req_fn = deadline_add_request,
- .elevator_former_req_fn = elv_rb_former_request,
- .elevator_latter_req_fn = elv_rb_latter_request,
- .elevator_init_fn = deadline_init_queue,
- .elevator_exit_fn = deadline_exit_queue,
- },
-
- .elevator_attrs = deadline_attrs,
- .elevator_name = "deadline",
- .elevator_owner = THIS_MODULE,
-};
-
-static int __init deadline_init(void)
-{
- return elv_register(&iosched_deadline);
-}
-
-static void __exit deadline_exit(void)
-{
- elv_unregister(&iosched_deadline);
-}
-
-module_init(deadline_init);
-module_exit(deadline_exit);
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/block/elevator.c b/block/elevator.c
index 8fdcd64ae12e..f05e90d4e695 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -61,10 +61,8 @@ static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.allow_merge)
- return e->type->ops.mq.allow_merge(q, rq, bio);
- else if (!e->uses_mq && e->type->ops.sq.elevator_allow_bio_merge_fn)
- return e->type->ops.sq.elevator_allow_bio_merge_fn(q, rq, bio);
+ if (e->type->ops.allow_merge)
+ return e->type->ops.allow_merge(q, rq, bio);
return 1;
}
@@ -95,14 +93,14 @@ static bool elevator_match(const struct elevator_type *e, const char *name)
}
/*
- * Return scheduler with name 'name' and with matching 'mq capability
+ * Return scheduler with name 'name'
*/
-static struct elevator_type *elevator_find(const char *name, bool mq)
+static struct elevator_type *elevator_find(const char *name)
{
struct elevator_type *e;
list_for_each_entry(e, &elv_list, list) {
- if (elevator_match(e, name) && (mq == e->uses_mq))
+ if (elevator_match(e, name))
return e;
}
@@ -121,12 +119,12 @@ static struct elevator_type *elevator_get(struct request_queue *q,
spin_lock(&elv_list_lock);
- e = elevator_find(name, q->mq_ops != NULL);
+ e = elevator_find(name);
if (!e && try_loading) {
spin_unlock(&elv_list_lock);
request_module("%s-iosched", name);
spin_lock(&elv_list_lock);
- e = elevator_find(name, q->mq_ops != NULL);
+ e = elevator_find(name);
}
if (e && !try_module_get(e->elevator_owner))
@@ -150,26 +148,6 @@ static int __init elevator_setup(char *str)
__setup("elevator=", elevator_setup);
-/* called during boot to load the elevator chosen by the elevator param */
-void __init load_default_elevator_module(void)
-{
- struct elevator_type *e;
-
- if (!chosen_elevator[0])
- return;
-
- /*
- * Boot parameter is deprecated, we haven't supported that for MQ.
- * Only look for non-mq schedulers from here.
- */
- spin_lock(&elv_list_lock);
- e = elevator_find(chosen_elevator, false);
- spin_unlock(&elv_list_lock);
-
- if (!e)
- request_module("%s-iosched", chosen_elevator);
-}
-
static struct kobj_type elv_ktype;
struct elevator_queue *elevator_alloc(struct request_queue *q,
@@ -185,7 +163,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
kobject_init(&eq->kobj, &elv_ktype);
mutex_init(&eq->sysfs_lock);
hash_init(eq->hash);
- eq->uses_mq = e->uses_mq;
return eq;
}
@@ -200,54 +177,11 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-/*
- * Use the default elevator specified by config boot param for non-mq devices,
- * or by config option. Don't try to load modules as we could be running off
- * async and request_module() isn't allowed from async.
- */
-int elevator_init(struct request_queue *q)
-{
- struct elevator_type *e = NULL;
- int err = 0;
-
- /*
- * q->sysfs_lock must be held to provide mutual exclusion between
- * elevator_switch() and here.
- */
- mutex_lock(&q->sysfs_lock);
- if (unlikely(q->elevator))
- goto out_unlock;
-
- if (*chosen_elevator) {
- e = elevator_get(q, chosen_elevator, false);
- if (!e)
- printk(KERN_ERR "I/O scheduler %s not found\n",
- chosen_elevator);
- }
-
- if (!e)
- e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
- if (!e) {
- printk(KERN_ERR
- "Default I/O scheduler not found. Using noop.\n");
- e = elevator_get(q, "noop", false);
- }
-
- err = e->ops.sq.elevator_init_fn(q, e);
- if (err)
- elevator_put(e);
-out_unlock:
- mutex_unlock(&q->sysfs_lock);
- return err;
-}
-
void elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
- if (e->uses_mq && e->type->ops.mq.exit_sched)
+ if (e->type->ops.exit_sched)
blk_mq_exit_sched(q, e);
- else if (!e->uses_mq && e->type->ops.sq.elevator_exit_fn)
- e->type->ops.sq.elevator_exit_fn(e);
mutex_unlock(&e->sysfs_lock);
kobject_put(&e->kobj);
@@ -356,68 +290,6 @@ struct request *elv_rb_find(struct rb_root *root, sector_t sector)
}
EXPORT_SYMBOL(elv_rb_find);
-/*
- * Insert rq into dispatch queue of q. Queue lock must be held on
- * entry. rq is sort instead into the dispatch queue. To be used by
- * specific elevators.
- */
-void elv_dispatch_sort(struct request_queue *q, struct request *rq)
-{
- sector_t boundary;
- struct list_head *entry;
-
- if (q->last_merge == rq)
- q->last_merge = NULL;
-
- elv_rqhash_del(q, rq);
-
- q->nr_sorted--;
-
- boundary = q->end_sector;
- list_for_each_prev(entry, &q->queue_head) {
- struct request *pos = list_entry_rq(entry);
-
- if (req_op(rq) != req_op(pos))
- break;
- if (rq_data_dir(rq) != rq_data_dir(pos))
- break;
- if (pos->rq_flags & (RQF_STARTED | RQF_SOFTBARRIER))
- break;
- if (blk_rq_pos(rq) >= boundary) {
- if (blk_rq_pos(pos) < boundary)
- continue;
- } else {
- if (blk_rq_pos(pos) >= boundary)
- break;
- }
- if (blk_rq_pos(rq) >= blk_rq_pos(pos))
- break;
- }
-
- list_add(&rq->queuelist, entry);
-}
-EXPORT_SYMBOL(elv_dispatch_sort);
-
-/*
- * Insert rq into dispatch queue of q. Queue lock must be held on
- * entry. rq is added to the back of the dispatch queue. To be used by
- * specific elevators.
- */
-void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
-{
- if (q->last_merge == rq)
- q->last_merge = NULL;
-
- elv_rqhash_del(q, rq);
-
- q->nr_sorted--;
-
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = rq;
- list_add_tail(&rq->queuelist, &q->queue_head);
-}
-EXPORT_SYMBOL(elv_dispatch_add_tail);
-
enum elv_merge elv_merge(struct request_queue *q, struct request **req,
struct bio *bio)
{
@@ -457,10 +329,8 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
return ELEVATOR_BACK_MERGE;
}
- if (e->uses_mq && e->type->ops.mq.request_merge)
- return e->type->ops.mq.request_merge(q, req, bio);
- else if (!e->uses_mq && e->type->ops.sq.elevator_merge_fn)
- return e->type->ops.sq.elevator_merge_fn(q, req, bio);
+ if (e->type->ops.request_merge)
+ return e->type->ops.request_merge(q, req, bio);
return ELEVATOR_NO_MERGE;
}
@@ -511,10 +381,8 @@ void elv_merged_request(struct request_queue *q, struct request *rq,
{
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.request_merged)
- e->type->ops.mq.request_merged(q, rq, type);
- else if (!e->uses_mq && e->type->ops.sq.elevator_merged_fn)
- e->type->ops.sq.elevator_merged_fn(q, rq, type);
+ if (e->type->ops.request_merged)
+ e->type->ops.request_merged(q, rq, type);
if (type == ELEVATOR_BACK_MERGE)
elv_rqhash_reposition(q, rq);
@@ -526,176 +394,20 @@ void elv_merge_requests(struct request_queue *q, struct request *rq,
struct request *next)
{
struct elevator_queue *e = q->elevator;
- bool next_sorted = false;
-
- if (e->uses_mq && e->type->ops.mq.requests_merged)
- e->type->ops.mq.requests_merged(q, rq, next);
- else if (e->type->ops.sq.elevator_merge_req_fn) {
- next_sorted = (__force bool)(next->rq_flags & RQF_SORTED);
- if (next_sorted)
- e->type->ops.sq.elevator_merge_req_fn(q, rq, next);
- }
- elv_rqhash_reposition(q, rq);
-
- if (next_sorted) {
- elv_rqhash_del(q, next);
- q->nr_sorted--;
- }
+ if (e->type->ops.requests_merged)
+ e->type->ops.requests_merged(q, rq, next);
+ elv_rqhash_reposition(q, rq);
q->last_merge = rq;
}
-void elv_bio_merged(struct request_queue *q, struct request *rq,
- struct bio *bio)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- if (e->type->ops.sq.elevator_bio_merged_fn)
- e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
-}
-
-void elv_requeue_request(struct request_queue *q, struct request *rq)
-{
- /*
- * it already went through dequeue, we need to decrement the
- * in_flight count again
- */
- if (blk_account_rq(rq)) {
- q->in_flight[rq_is_sync(rq)]--;
- if (rq->rq_flags & RQF_SORTED)
- elv_deactivate_rq(q, rq);
- }
-
- rq->rq_flags &= ~RQF_STARTED;
-
- blk_pm_requeue_request(rq);
-
- __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
-}
-
-void elv_drain_elevator(struct request_queue *q)
-{
- struct elevator_queue *e = q->elevator;
- static int printed;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- lockdep_assert_held(q->queue_lock);
-
- while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
- ;
- if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
- printk(KERN_ERR "%s: forced dispatching is broken "
- "(nr_sorted=%u), please report this\n",
- q->elevator->type->elevator_name, q->nr_sorted);
- }
-}
-
-void __elv_add_request(struct request_queue *q, struct request *rq, int where)
-{
- trace_block_rq_insert(q, rq);
-
- blk_pm_add_request(q, rq);
-
- rq->q = q;
-
- if (rq->rq_flags & RQF_SOFTBARRIER) {
- /* barriers are scheduling boundary, update end_sector */
- if (!blk_rq_is_passthrough(rq)) {
- q->end_sector = rq_end_sector(rq);
- q->boundary_rq = rq;
- }
- } else if (!(rq->rq_flags & RQF_ELVPRIV) &&
- (where == ELEVATOR_INSERT_SORT ||
- where == ELEVATOR_INSERT_SORT_MERGE))
- where = ELEVATOR_INSERT_BACK;
-
- switch (where) {
- case ELEVATOR_INSERT_REQUEUE:
- case ELEVATOR_INSERT_FRONT:
- rq->rq_flags |= RQF_SOFTBARRIER;
- list_add(&rq->queuelist, &q->queue_head);
- break;
-
- case ELEVATOR_INSERT_BACK:
- rq->rq_flags |= RQF_SOFTBARRIER;
- elv_drain_elevator(q);
- list_add_tail(&rq->queuelist, &q->queue_head);
- /*
- * We kick the queue here for the following reasons.
- * - The elevator might have returned NULL previously
- * to delay requests and returned them now. As the
- * queue wasn't empty before this request, ll_rw_blk
- * won't run the queue on return, resulting in hang.
- * - Usually, back inserted requests won't be merged
- * with anything. There's no point in delaying queue
- * processing.
- */
- __blk_run_queue(q);
- break;
-
- case ELEVATOR_INSERT_SORT_MERGE:
- /*
- * If we succeed in merging this request with one in the
- * queue already, we are done - rq has now been freed,
- * so no need to do anything further.
- */
- if (elv_attempt_insert_merge(q, rq))
- break;
- /* fall through */
- case ELEVATOR_INSERT_SORT:
- BUG_ON(blk_rq_is_passthrough(rq));
- rq->rq_flags |= RQF_SORTED;
- q->nr_sorted++;
- if (rq_mergeable(rq)) {
- elv_rqhash_add(q, rq);
- if (!q->last_merge)
- q->last_merge = rq;
- }
-
- /*
- * Some ioscheds (cfq) run q->request_fn directly, so
- * rq cannot be accessed after calling
- * elevator_add_req_fn.
- */
- q->elevator->type->ops.sq.elevator_add_req_fn(q, rq);
- break;
-
- case ELEVATOR_INSERT_FLUSH:
- rq->rq_flags |= RQF_SOFTBARRIER;
- blk_insert_flush(rq);
- break;
- default:
- printk(KERN_ERR "%s: bad insertion point %d\n",
- __func__, where);
- BUG();
- }
-}
-EXPORT_SYMBOL(__elv_add_request);
-
-void elv_add_request(struct request_queue *q, struct request *rq, int where)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- __elv_add_request(q, rq, where);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(elv_add_request);
-
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.next_request)
- return e->type->ops.mq.next_request(q, rq);
- else if (!e->uses_mq && e->type->ops.sq.elevator_latter_req_fn)
- return e->type->ops.sq.elevator_latter_req_fn(q, rq);
+ if (e->type->ops.next_request)
+ return e->type->ops.next_request(q, rq);
return NULL;
}
@@ -704,66 +416,10 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
{
struct elevator_queue *e = q->elevator;
- if (e->uses_mq && e->type->ops.mq.former_request)
- return e->type->ops.mq.former_request(q, rq);
- if (!e->uses_mq && e->type->ops.sq.elevator_former_req_fn)
- return e->type->ops.sq.elevator_former_req_fn(q, rq);
- return NULL;
-}
-
-int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return 0;
-
- if (e->type->ops.sq.elevator_set_req_fn)
- return e->type->ops.sq.elevator_set_req_fn(q, rq, bio, gfp_mask);
- return 0;
-}
-
-void elv_put_request(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
-
- if (e->type->ops.sq.elevator_put_req_fn)
- e->type->ops.sq.elevator_put_req_fn(rq);
-}
-
-int elv_may_queue(struct request_queue *q, unsigned int op)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return 0;
-
- if (e->type->ops.sq.elevator_may_queue_fn)
- return e->type->ops.sq.elevator_may_queue_fn(q, op);
-
- return ELV_MQUEUE_MAY;
-}
-
-void elv_completed_request(struct request_queue *q, struct request *rq)
-{
- struct elevator_queue *e = q->elevator;
-
- if (WARN_ON_ONCE(e->uses_mq))
- return;
+ if (e->type->ops.former_request)
+ return e->type->ops.former_request(q, rq);
- /*
- * request is released from the driver, io must be done
- */
- if (blk_account_rq(rq)) {
- q->in_flight[rq_is_sync(rq)]--;
- if ((rq->rq_flags & RQF_SORTED) &&
- e->type->ops.sq.elevator_completed_req_fn)
- e->type->ops.sq.elevator_completed_req_fn(q, rq);
- }
+ return NULL;
}
#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
@@ -832,8 +488,6 @@ int elv_register_queue(struct request_queue *q)
}
kobject_uevent(&e->kobj, KOBJ_ADD);
e->registered = 1;
- if (!e->uses_mq && e->type->ops.sq.elevator_registered_fn)
- e->type->ops.sq.elevator_registered_fn(q);
}
return error;
}
@@ -873,7 +527,7 @@ int elv_register(struct elevator_type *e)
/* register, don't allow duplicate names */
spin_lock(&elv_list_lock);
- if (elevator_find(e->elevator_name, e->uses_mq)) {
+ if (elevator_find(e->elevator_name)) {
spin_unlock(&elv_list_lock);
kmem_cache_destroy(e->icq_cache);
return -EBUSY;
@@ -881,12 +535,6 @@ int elv_register(struct elevator_type *e)
list_add_tail(&e->list, &elv_list);
spin_unlock(&elv_list_lock);
- /* print pretty message */
- if (elevator_match(e, chosen_elevator) ||
- (!*chosen_elevator &&
- elevator_match(e, CONFIG_DEFAULT_IOSCHED)))
- def = " (default)";
-
printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
def);
return 0;
@@ -989,71 +637,17 @@ out_unlock:
*/
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{
- struct elevator_queue *old = q->elevator;
- bool old_registered = false;
int err;
lockdep_assert_held(&q->sysfs_lock);
- if (q->mq_ops) {
- blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
-
- err = elevator_switch_mq(q, new_e);
-
- blk_mq_unquiesce_queue(q);
- blk_mq_unfreeze_queue(q);
-
- return err;
- }
-
- /*
- * Turn on BYPASS and drain all requests w/ elevator private data.
- * Block layer doesn't call into a quiesced elevator - all requests
- * are directly put on the dispatch list without elevator data
- * using INSERT_BACK. All requests have SOFTBARRIER set and no
- * merge happens either.
- */
- if (old) {
- old_registered = old->registered;
-
- blk_queue_bypass_start(q);
-
- /* unregister and clear all auxiliary data of the old elevator */
- if (old_registered)
- elv_unregister_queue(q);
-
- ioc_clear_queue(q);
- }
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
- /* allocate, init and register new elevator */
- err = new_e->ops.sq.elevator_init_fn(q, new_e);
- if (err)
- goto fail_init;
-
- err = elv_register_queue(q);
- if (err)
- goto fail_register;
-
- /* done, kill the old one and finish */
- if (old) {
- elevator_exit(q, old);
- blk_queue_bypass_end(q);
- }
+ err = elevator_switch_mq(q, new_e);
- blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
-
- return 0;
-
-fail_register:
- elevator_exit(q, q->elevator);
-fail_init:
- /* switch failed, restore and re-register old elevator */
- if (old) {
- q->elevator = old;
- elv_register_queue(q);
- blk_queue_bypass_end(q);
- }
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
return err;
}
@@ -1073,7 +667,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
/*
* Special case for mq, turn off scheduling
*/
- if (q->mq_ops && !strncmp(name, "none", 4))
+ if (!strncmp(name, "none", 4))
return elevator_switch(q, NULL);
strlcpy(elevator_name, name, sizeof(elevator_name));
@@ -1091,8 +685,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
static inline bool elv_support_iosched(struct request_queue *q)
{
- if (q->mq_ops && q->tag_set && (q->tag_set->flags &
- BLK_MQ_F_NO_SCHED))
+ if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
return false;
return true;
}
@@ -1102,7 +695,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
{
int ret;
- if (!(q->mq_ops || q->request_fn) || !elv_support_iosched(q))
+ if (!queue_is_mq(q) || !elv_support_iosched(q))
return count;
ret = __elevator_change(q, name);
@@ -1117,10 +710,9 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
struct elevator_queue *e = q->elevator;
struct elevator_type *elv = NULL;
struct elevator_type *__e;
- bool uses_mq = q->mq_ops != NULL;
int len = 0;
- if (!queue_is_rq_based(q))
+ if (!queue_is_mq(q))
return sprintf(name, "none\n");
if (!q->elevator)
@@ -1130,19 +722,16 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
spin_lock(&elv_list_lock);
list_for_each_entry(__e, &elv_list, list) {
- if (elv && elevator_match(elv, __e->elevator_name) &&
- (__e->uses_mq == uses_mq)) {
+ if (elv && elevator_match(elv, __e->elevator_name)) {
len += sprintf(name+len, "[%s] ", elv->elevator_name);
continue;
}
- if (__e->uses_mq && q->mq_ops && elv_support_iosched(q))
- len += sprintf(name+len, "%s ", __e->elevator_name);
- else if (!__e->uses_mq && !q->mq_ops)
+ if (elv_support_iosched(q))
len += sprintf(name+len, "%s ", __e->elevator_name);
}
spin_unlock(&elv_list_lock);
- if (q->mq_ops && q->elevator)
+ if (q->elevator)
len += sprintf(name+len, "none");
len += sprintf(len+name, "\n");
diff --git a/block/genhd.c b/block/genhd.c
index cff6bdf27226..1dd8fd6613b8 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -47,51 +47,64 @@ static void disk_release_events(struct gendisk *disk);
void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
- if (q->mq_ops)
+ if (queue_is_mq(q))
return;
- atomic_inc(&part->in_flight[rw]);
+ part_stat_local_inc(part, in_flight[rw]);
if (part->partno)
- atomic_inc(&part_to_disk(part)->part0.in_flight[rw]);
+ part_stat_local_inc(&part_to_disk(part)->part0, in_flight[rw]);
}
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw)
{
- if (q->mq_ops)
+ if (queue_is_mq(q))
return;
- atomic_dec(&part->in_flight[rw]);
+ part_stat_local_dec(part, in_flight[rw]);
if (part->partno)
- atomic_dec(&part_to_disk(part)->part0.in_flight[rw]);
+ part_stat_local_dec(&part_to_disk(part)->part0, in_flight[rw]);
}
-void part_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2])
+unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part)
{
- if (q->mq_ops) {
- blk_mq_in_flight(q, part, inflight);
- return;
+ int cpu;
+ unsigned int inflight;
+
+ if (queue_is_mq(q)) {
+ return blk_mq_in_flight(q, part);
}
- inflight[0] = atomic_read(&part->in_flight[0]) +
- atomic_read(&part->in_flight[1]);
- if (part->partno) {
- part = &part_to_disk(part)->part0;
- inflight[1] = atomic_read(&part->in_flight[0]) +
- atomic_read(&part->in_flight[1]);
+ inflight = 0;
+ for_each_possible_cpu(cpu) {
+ inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) +
+ part_stat_local_read_cpu(part, in_flight[1], cpu);
}
+ if ((int)inflight < 0)
+ inflight = 0;
+
+ return inflight;
}
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
- if (q->mq_ops) {
+ int cpu;
+
+ if (queue_is_mq(q)) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}
- inflight[0] = atomic_read(&part->in_flight[0]);
- inflight[1] = atomic_read(&part->in_flight[1]);
+ inflight[0] = 0;
+ inflight[1] = 0;
+ for_each_possible_cpu(cpu) {
+ inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu);
+ inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+ if ((int)inflight[0] < 0)
+ inflight[0] = 0;
+ if ((int)inflight[1] < 0)
+ inflight[1] = 0;
}
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
@@ -1325,8 +1338,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
struct disk_part_iter piter;
struct hd_struct *hd;
char buf[BDEVNAME_SIZE];
- unsigned int inflight[2];
- int cpu;
+ unsigned int inflight;
/*
if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next)
@@ -1338,10 +1350,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0);
while ((hd = disk_part_iter_next(&piter))) {
- cpu = part_stat_lock();
- part_round_stats(gp->queue, cpu, hd);
- part_stat_unlock();
- part_in_flight(gp->queue, hd, inflight);
+ inflight = part_in_flight(gp->queue, hd);
seq_printf(seqf, "%4d %7d %s "
"%lu %lu %lu %u "
"%lu %lu %lu %u "
@@ -1357,7 +1366,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, merges[STAT_WRITE]),
part_stat_read(hd, sectors[STAT_WRITE]),
(unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
- inflight[0],
+ inflight,
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
part_stat_read(hd, ios[STAT_DISCARD]),
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index eccac01a10b6..ec6a04e01bc1 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -195,7 +195,7 @@ struct kyber_hctx_data {
unsigned int batching;
struct kyber_ctx_queue *kcqs;
struct sbitmap kcq_map[KYBER_NUM_DOMAINS];
- wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
+ struct sbq_wait domain_wait[KYBER_NUM_DOMAINS];
struct sbq_wait_state *domain_ws[KYBER_NUM_DOMAINS];
atomic_t wait_index[KYBER_NUM_DOMAINS];
};
@@ -501,10 +501,11 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
INIT_LIST_HEAD(&khd->rqs[i]);
- init_waitqueue_func_entry(&khd->domain_wait[i],
+ khd->domain_wait[i].sbq = NULL;
+ init_waitqueue_func_entry(&khd->domain_wait[i].wait,
kyber_domain_wake);
- khd->domain_wait[i].private = hctx;
- INIT_LIST_HEAD(&khd->domain_wait[i].entry);
+ khd->domain_wait[i].wait.private = hctx;
+ INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry);
atomic_set(&khd->wait_index[i], 0);
}
@@ -576,7 +577,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{
struct kyber_hctx_data *khd = hctx->sched_data;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
- struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw];
+ struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];
bool merged;
@@ -602,7 +603,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
- struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw];
+ struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
struct list_head *head = &kcq->rq_list[sched_domain];
spin_lock(&kcq->lock);
@@ -611,7 +612,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
else
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
- rq->mq_ctx->index_hw);
+ rq->mq_ctx->index_hw[hctx->type]);
blk_mq_sched_request_inserted(rq);
spin_unlock(&kcq->lock);
}
@@ -698,12 +699,13 @@ static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd,
flush_busy_kcq, &data);
}
-static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
+static int kyber_domain_wake(wait_queue_entry_t *wqe, unsigned mode, int flags,
void *key)
{
- struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
+ struct blk_mq_hw_ctx *hctx = READ_ONCE(wqe->private);
+ struct sbq_wait *wait = container_of(wqe, struct sbq_wait, wait);
- list_del_init(&wait->entry);
+ sbitmap_del_wait_queue(wait);
blk_mq_run_hw_queue(hctx, true);
return 1;
}
@@ -714,7 +716,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
{
unsigned int sched_domain = khd->cur_domain;
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
- wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
+ struct sbq_wait *wait = &khd->domain_wait[sched_domain];
struct sbq_wait_state *ws;
int nr;
@@ -725,11 +727,11 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
* run when one becomes available. Note that this is serialized on
* khd->lock, but we still need to be careful about the waker.
*/
- if (nr < 0 && list_empty_careful(&wait->entry)) {
+ if (nr < 0 && list_empty_careful(&wait->wait.entry)) {
ws = sbq_wait_ptr(domain_tokens,
&khd->wait_index[sched_domain]);
khd->domain_ws[sched_domain] = ws;
- add_wait_queue(&ws->wait, wait);
+ sbitmap_add_wait_queue(domain_tokens, ws, wait);
/*
* Try again in case a token was freed before we got on the wait
@@ -745,10 +747,10 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
* between the !list_empty_careful() check and us grabbing the lock, but
* list_del_init() is okay with that.
*/
- if (nr >= 0 && !list_empty_careful(&wait->entry)) {
+ if (nr >= 0 && !list_empty_careful(&wait->wait.entry)) {
ws = khd->domain_ws[sched_domain];
spin_lock_irq(&ws->wait.lock);
- list_del_init(&wait->entry);
+ sbitmap_del_wait_queue(wait);
spin_unlock_irq(&ws->wait.lock);
}
@@ -951,7 +953,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
{ \
struct blk_mq_hw_ctx *hctx = data; \
struct kyber_hctx_data *khd = hctx->sched_data; \
- wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
+ wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
\
seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
return 0; \
@@ -1017,7 +1019,7 @@ static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
#endif
static struct elevator_type kyber_sched = {
- .ops.mq = {
+ .ops = {
.init_sched = kyber_init_sched,
.exit_sched = kyber_exit_sched,
.init_hctx = kyber_init_hctx,
@@ -1032,7 +1034,6 @@ static struct elevator_type kyber_sched = {
.dispatch_request = kyber_dispatch_request,
.has_work = kyber_has_work,
},
- .uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = kyber_queue_debugfs_attrs,
.hctx_debugfs_attrs = kyber_hctx_debugfs_attrs,
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 099a9e05854c..14288f864e94 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -373,9 +373,16 @@ done:
/*
* One confusing aspect here is that we get called for a specific
- * hardware queue, but we return a request that may not be for a
+ * hardware queue, but we may return a request that is for a
* different hardware queue. This is because mq-deadline has shared
* state for all hardware queues, in terms of sorting, FIFOs, etc.
+ *
+ * For a zoned block device, __dd_dispatch_request() may return NULL
+ * if all the queued write requests are directed at zones that are already
+ * locked due to on-going write requests. In this case, make sure to mark
+ * the queue as needing a restart to ensure that the queue is run again
+ * and the pending writes dispatched once the target zones for the ongoing
+ * write requests are unlocked in dd_finish_request().
*/
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{
@@ -384,6 +391,9 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
spin_lock(&dd->lock);
rq = __dd_dispatch_request(dd);
+ if (!rq && blk_queue_is_zoned(hctx->queue) &&
+ !list_empty(&dd->fifo_list[WRITE]))
+ blk_mq_sched_mark_restart_hctx(hctx);
spin_unlock(&dd->lock);
return rq;
@@ -761,7 +771,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
#endif
static struct elevator_type mq_deadline = {
- .ops.mq = {
+ .ops = {
.insert_requests = dd_insert_requests,
.dispatch_request = dd_dispatch_request,
.prepare_request = dd_prepare_request,
@@ -777,7 +787,6 @@ static struct elevator_type mq_deadline = {
.exit_sched = dd_exit_queue,
},
- .uses_mq = true,
#ifdef CONFIG_BLK_DEBUG_FS
.queue_debugfs_attrs = deadline_queue_debugfs_attrs,
#endif
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
deleted file mode 100644
index 2d1b15d89b45..000000000000
--- a/block/noop-iosched.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * elevator noop
- */
-#include <linux/blkdev.h>
-#include <linux/elevator.h>
-#include <linux/bio.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-struct noop_data {
- struct list_head queue;
-};
-
-static void noop_merged_requests(struct request_queue *q, struct request *rq,
- struct request *next)
-{
- list_del_init(&next->queuelist);
-}
-
-static int noop_dispatch(struct request_queue *q, int force)
-{
- struct noop_data *nd = q->elevator->elevator_data;
- struct request *rq;
-
- rq = list_first_entry_or_null(&nd->queue, struct request, queuelist);
- if (rq) {
- list_del_init(&rq->queuelist);
- elv_dispatch_sort(q, rq);
- return 1;
- }
- return 0;
-}
-
-static void noop_add_request(struct request_queue *q, struct request *rq)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- list_add_tail(&rq->queuelist, &nd->queue);
-}
-
-static struct request *
-noop_former_request(struct request_queue *q, struct request *rq)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- if (rq->queuelist.prev == &nd->queue)
- return NULL;
- return list_prev_entry(rq, queuelist);
-}
-
-static struct request *
-noop_latter_request(struct request_queue *q, struct request *rq)
-{
- struct noop_data *nd = q->elevator->elevator_data;
-
- if (rq->queuelist.next == &nd->queue)
- return NULL;
- return list_next_entry(rq, queuelist);
-}
-
-static int noop_init_queue(struct request_queue *q, struct elevator_type *e)
-{
- struct noop_data *nd;
- struct elevator_queue *eq;
-
- eq = elevator_alloc(q, e);
- if (!eq)
- return -ENOMEM;
-
- nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node);
- if (!nd) {
- kobject_put(&eq->kobj);
- return -ENOMEM;
- }
- eq->elevator_data = nd;
-
- INIT_LIST_HEAD(&nd->queue);
-
- spin_lock_irq(q->queue_lock);
- q->elevator = eq;
- spin_unlock_irq(q->queue_lock);
- return 0;
-}
-
-static void noop_exit_queue(struct elevator_queue *e)
-{
- struct noop_data *nd = e->elevator_data;
-
- BUG_ON(!list_empty(&nd->queue));
- kfree(nd);
-}
-
-static struct elevator_type elevator_noop = {
- .ops.sq = {
- .elevator_merge_req_fn = noop_merged_requests,
- .elevator_dispatch_fn = noop_dispatch,
- .elevator_add_req_fn = noop_add_request,
- .elevator_former_req_fn = noop_former_request,
- .elevator_latter_req_fn = noop_latter_request,
- .elevator_init_fn = noop_init_queue,
- .elevator_exit_fn = noop_exit_queue,
- },
- .elevator_name = "noop",
- .elevator_owner = THIS_MODULE,
-};
-
-static int __init noop_init(void)
-{
- return elv_register(&elevator_noop);
-}
-
-static void __exit noop_exit(void)
-{
- elv_unregister(&elevator_noop);
-}
-
-module_init(noop_init);
-module_exit(noop_exit);
-
-
-MODULE_AUTHOR("Jens Axboe");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d3d14e81fb12..8e596a8dff32 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -120,13 +120,9 @@ ssize_t part_stat_show(struct device *dev,
{
struct hd_struct *p = dev_to_part(dev);
struct request_queue *q = part_to_disk(p)->queue;
- unsigned int inflight[2];
- int cpu;
+ unsigned int inflight;
- cpu = part_stat_lock();
- part_round_stats(q, cpu, p);
- part_stat_unlock();
- part_in_flight(q, p, inflight);
+ inflight = part_in_flight(q, p);
return sprintf(buf,
"%8lu %8lu %8llu %8u "
"%8lu %8lu %8llu %8u "
@@ -141,7 +137,7 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, merges[STAT_WRITE]),
(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
(unsigned int)part_stat_read_msecs(p, STAT_WRITE),
- inflight[0],
+ inflight,
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)),
part_stat_read(p, ios[STAT_DISCARD]),
@@ -249,9 +245,10 @@ struct device_type part_type = {
.uevent = part_uevent,
};
-static void delete_partition_rcu_cb(struct rcu_head *head)
+static void delete_partition_work_fn(struct work_struct *work)
{
- struct hd_struct *part = container_of(head, struct hd_struct, rcu_head);
+ struct hd_struct *part = container_of(to_rcu_work(work), struct hd_struct,
+ rcu_work);
part->start_sect = 0;
part->nr_sects = 0;
@@ -262,7 +259,8 @@ static void delete_partition_rcu_cb(struct rcu_head *head)
void __delete_partition(struct percpu_ref *ref)
{
struct hd_struct *part = container_of(ref, struct hd_struct, ref);
- call_rcu(&part->rcu_head, delete_partition_rcu_cb);
+ INIT_RCU_WORK(&part->rcu_work, delete_partition_work_fn);
+ queue_rcu_work(system_wq, &part->rcu_work);
}
/*
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f7a235db56aa..045af6eeb7e2 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -430,11 +430,14 @@ config CRYPTO_CTS
help
CTS: Cipher Text Stealing
This is the Cipher Text Stealing mode as described by
- Section 8 of rfc2040 and referenced by rfc3962.
- (rfc3962 includes errata information in its Appendix A)
+ Section 8 of rfc2040 and referenced by rfc3962
+ (rfc3962 includes errata information in its Appendix A) or
+ CBC-CS3 as defined by NIST in Sp800-38A addendum from Oct 2010.
This mode is required for Kerberos gss mechanism support
for AES encryption.
+ See: https://csrc.nist.gov/publications/detail/sp/800-38a/addendum/final
+
config CRYPTO_ECB
tristate "ECB support"
select CRYPTO_BLKCIPHER
@@ -493,6 +496,50 @@ config CRYPTO_KEYWRAP
Support for key wrapping (NIST SP800-38F / RFC3394) without
padding.
+config CRYPTO_NHPOLY1305
+ tristate
+ select CRYPTO_HASH
+ select CRYPTO_POLY1305
+
+config CRYPTO_NHPOLY1305_SSE2
+ tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)"
+ depends on X86 && 64BIT
+ select CRYPTO_NHPOLY1305
+ help
+ SSE2 optimized implementation of the hash function used by the
+ Adiantum encryption mode.
+
+config CRYPTO_NHPOLY1305_AVX2
+ tristate "NHPoly1305 hash function (x86_64 AVX2 implementation)"
+ depends on X86 && 64BIT
+ select CRYPTO_NHPOLY1305
+ help
+ AVX2 optimized implementation of the hash function used by the
+ Adiantum encryption mode.
+
+config CRYPTO_ADIANTUM
+ tristate "Adiantum support"
+ select CRYPTO_CHACHA20
+ select CRYPTO_POLY1305
+ select CRYPTO_NHPOLY1305
+ help
+ Adiantum is a tweakable, length-preserving encryption mode
+ designed for fast and secure disk encryption, especially on
+ CPUs without dedicated crypto instructions. It encrypts
+ each sector using the XChaCha12 stream cipher, two passes of
+ an ε-almost-∆-universal hash function, and an invocation of
+ the AES-256 block cipher on a single 16-byte block. On CPUs
+ without AES instructions, Adiantum is much faster than
+ AES-XTS.
+
+ Adiantum's security is provably reducible to that of its
+ underlying stream and block ciphers, subject to a security
+ bound. Unlike XTS, Adiantum is a true wide-block encryption
+ mode, so it actually provides an even stronger notion of
+ security than XTS, subject to the security bound.
+
+ If unsure, say N.
+
comment "Hash modes"
config CRYPTO_CMAC
@@ -936,6 +983,18 @@ config CRYPTO_SM3
http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
https://datatracker.ietf.org/doc/html/draft-shen-sm3-hash
+config CRYPTO_STREEBOG
+ tristate "Streebog Hash Function"
+ select CRYPTO_HASH
+ help
+ Streebog Hash Function (GOST R 34.11-2012, RFC 6986) is one of the Russian
+ cryptographic standard algorithms (called GOST algorithms).
+ This setting enables two hash algorithms with 256 and 512 bits output.
+
+ References:
+ https://tc26.ru/upload/iblock/fed/feddbb4d26b685903faa2ba11aea43f6.pdf
+ https://tools.ietf.org/html/rfc6986
+
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
select CRYPTO_HASH
@@ -1006,7 +1065,8 @@ config CRYPTO_AES_TI
8 for decryption), this implementation only uses just two S-boxes of
256 bytes each, and attempts to eliminate data dependent latencies by
prefetching the entire table into the cache at the start of each
- block.
+ block. Interrupts are also disabled to avoid races where cachelines
+ are evicted when the CPU is interrupted to do something else.
config CRYPTO_AES_586
tristate "AES cipher algorithms (i586)"
@@ -1387,32 +1447,34 @@ config CRYPTO_SALSA20
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
config CRYPTO_CHACHA20
- tristate "ChaCha20 cipher algorithm"
+ tristate "ChaCha stream cipher algorithms"
select CRYPTO_BLKCIPHER
help
- ChaCha20 cipher algorithm, RFC7539.
+ The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms.
ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
Bernstein and further specified in RFC7539 for use in IETF protocols.
- This is the portable C implementation of ChaCha20.
-
- See also:
+ This is the portable C implementation of ChaCha20. See also:
<http://cr.yp.to/chacha/chacha-20080128.pdf>
+ XChaCha20 is the application of the XSalsa20 construction to ChaCha20
+ rather than to Salsa20. XChaCha20 extends ChaCha20's nonce length
+ from 64 bits (or 96 bits using the RFC7539 convention) to 192 bits,
+ while provably retaining ChaCha20's security. See also:
+ <https://cr.yp.to/snuffle/xsalsa-20081128.pdf>
+
+ XChaCha12 is XChaCha20 reduced to 12 rounds, with correspondingly
+ reduced security margin but increased performance. It can be needed
+ in some performance-sensitive scenarios.
+
config CRYPTO_CHACHA20_X86_64
- tristate "ChaCha20 cipher algorithm (x86_64/SSSE3/AVX2)"
+ tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)"
depends on X86 && 64BIT
select CRYPTO_BLKCIPHER
select CRYPTO_CHACHA20
help
- ChaCha20 cipher algorithm, RFC7539.
-
- ChaCha20 is a 256-bit high-speed stream cipher designed by Daniel J.
- Bernstein and further specified in RFC7539 for use in IETF protocols.
- This is the x86_64 assembler implementation using SIMD instructions.
-
- See also:
- <http://cr.yp.to/chacha/chacha-20080128.pdf>
+ SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20,
+ XChaCha20, and XChaCha12 stream ciphers.
config CRYPTO_SEED
tristate "SEED cipher algorithm"
@@ -1813,6 +1875,7 @@ config CRYPTO_USER_API_AEAD
config CRYPTO_STATS
bool "Crypto usage statistics for User-space"
+ depends on CRYPTO_USER
help
This option enables the gathering of crypto stats.
This will collect:
diff --git a/crypto/Makefile b/crypto/Makefile
index 5c207c76abf7..799ed5e94606 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -54,7 +54,8 @@ cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
-crypto_user-y := crypto_user_base.o crypto_user_stat.o
+crypto_user-y := crypto_user_base.o
+crypto_user-$(CONFIG_CRYPTO_STATS) += crypto_user_stat.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
@@ -71,6 +72,7 @@ obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
obj-$(CONFIG_CRYPTO_SHA3) += sha3_generic.o
obj-$(CONFIG_CRYPTO_SM3) += sm3_generic.o
+obj-$(CONFIG_CRYPTO_STREEBOG) += streebog_generic.o
obj-$(CONFIG_CRYPTO_WP512) += wp512.o
CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
@@ -84,6 +86,8 @@ obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_KEYWRAP) += keywrap.o
+obj-$(CONFIG_CRYPTO_ADIANTUM) += adiantum.o
+obj-$(CONFIG_CRYPTO_NHPOLY1305) += nhpoly1305.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
obj-$(CONFIG_CRYPTO_CHACHA20POLY1305) += chacha20poly1305.o
@@ -116,7 +120,7 @@ obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
-obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
+obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 8882e90e868e..b339587073c3 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -365,23 +365,18 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
- strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
- strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
- sizeof(rblkcipher.geniv));
- rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
+ memset(&rblkcipher, 0, sizeof(rblkcipher));
+
+ strscpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
+ strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
- sizeof(struct crypto_report_blkcipher), &rblkcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+ sizeof(rblkcipher), &rblkcipher);
}
#else
static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
@@ -403,7 +398,7 @@ static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
- seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>");
+ seq_printf(m, "geniv : <default>\n");
}
const struct crypto_type crypto_ablkcipher_type = {
@@ -415,78 +410,3 @@ const struct crypto_type crypto_ablkcipher_type = {
.report = crypto_ablkcipher_report,
};
EXPORT_SYMBOL_GPL(crypto_ablkcipher_type);
-
-static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type,
- u32 mask)
-{
- struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;
- struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher;
-
- if (alg->ivsize > PAGE_SIZE / 8)
- return -EINVAL;
-
- crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ?
- alg->setkey : setkey;
- crt->encrypt = alg->encrypt;
- crt->decrypt = alg->decrypt;
- crt->base = __crypto_ablkcipher_cast(tfm);
- crt->ivsize = alg->ivsize;
-
- return 0;
-}
-
-#ifdef CONFIG_NET
-static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_blkcipher rblkcipher;
-
- strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
- strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
- sizeof(rblkcipher.geniv));
- rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
-
- rblkcipher.blocksize = alg->cra_blocksize;
- rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
- rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
- rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
-
- if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
- sizeof(struct crypto_report_blkcipher), &rblkcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-#else
-static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
- return -ENOSYS;
-}
-#endif
-
-static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
- __maybe_unused;
-static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg)
-{
- struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher;
-
- seq_printf(m, "type : givcipher\n");
- seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
- "yes" : "no");
- seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
- seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize);
- seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize);
- seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize);
- seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>");
-}
-
-const struct crypto_type crypto_givcipher_type = {
- .ctxsize = crypto_ablkcipher_ctxsize,
- .init = crypto_init_givcipher_ops,
-#ifdef CONFIG_PROC_FS
- .show = crypto_givcipher_show,
-#endif
- .report = crypto_givcipher_report,
-};
-EXPORT_SYMBOL_GPL(crypto_givcipher_type);
diff --git a/crypto/acompress.c b/crypto/acompress.c
index 1544b7c057fb..0c5bedd06e70 100644
--- a/crypto/acompress.c
+++ b/crypto/acompress.c
@@ -33,15 +33,11 @@ static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
- strncpy(racomp.type, "acomp", sizeof(racomp.type));
+ memset(&racomp, 0, sizeof(racomp));
- if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
- sizeof(struct crypto_report_acomp), &racomp))
- goto nla_put_failure;
- return 0;
+ strscpy(racomp.type, "acomp", sizeof(racomp.type));
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
#else
static int crypto_acomp_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/adiantum.c b/crypto/adiantum.c
new file mode 100644
index 000000000000..6651e713c45d
--- /dev/null
+++ b/crypto/adiantum.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Adiantum length-preserving encryption mode
+ *
+ * Copyright 2018 Google LLC
+ */
+
+/*
+ * Adiantum is a tweakable, length-preserving encryption mode designed for fast
+ * and secure disk encryption, especially on CPUs without dedicated crypto
+ * instructions. Adiantum encrypts each sector using the XChaCha12 stream
+ * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
+ * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
+ * 16-byte block. See the paper for details:
+ *
+ * Adiantum: length-preserving encryption for entry-level processors
+ * (https://eprint.iacr.org/2018/720.pdf)
+ *
+ * For flexibility, this implementation also allows other ciphers:
+ *
+ * - Stream cipher: XChaCha12 or XChaCha20
+ * - Block cipher: any with a 128-bit block size and 256-bit key
+ *
+ * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
+ * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
+ * but still provably as secure, and also the ε-∆U hash function of HBSH is
+ * formally defined to take two inputs (tweak, message) which makes it difficult
+ * to wrap with the crypto_shash API. Rather, some details need to be handled
+ * here. Nevertheless, if needed in the future, support for other ε-∆U hash
+ * functions could be added here.
+ */
+
+#include <crypto/b128ops.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/nhpoly1305.h>
+#include <crypto/scatterwalk.h>
+#include <linux/module.h>
+
+#include "internal.h"
+
+/*
+ * Size of right-hand part of input data, in bytes; also the size of the block
+ * cipher's block size and the hash function's output.
+ */
+#define BLOCKCIPHER_BLOCK_SIZE 16
+
+/* Size of the block cipher key (K_E) in bytes */
+#define BLOCKCIPHER_KEY_SIZE 32
+
+/* Size of the hash key (K_H) in bytes */
+#define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
+
+/*
+ * The specification allows variable-length tweaks, but Linux's crypto API
+ * currently only allows algorithms to support a single length. The "natural"
+ * tweak length for Adiantum is 16, since that fits into one Poly1305 block for
+ * the best performance. But longer tweaks are useful for fscrypt, to avoid
+ * needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
+ */
+#define TWEAK_SIZE 32
+
+struct adiantum_instance_ctx {
+ struct crypto_skcipher_spawn streamcipher_spawn;
+ struct crypto_spawn blockcipher_spawn;
+ struct crypto_shash_spawn hash_spawn;
+};
+
+struct adiantum_tfm_ctx {
+ struct crypto_skcipher *streamcipher;
+ struct crypto_cipher *blockcipher;
+ struct crypto_shash *hash;
+ struct poly1305_key header_hash_key;
+};
+
+struct adiantum_request_ctx {
+
+ /*
+ * Buffer for right-hand part of data, i.e.
+ *
+ * P_L => P_M => C_M => C_R when encrypting, or
+ * C_R => C_M => P_M => P_L when decrypting.
+ *
+ * Also used to build the IV for the stream cipher.
+ */
+ union {
+ u8 bytes[XCHACHA_IV_SIZE];
+ __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
+ le128 bignum; /* interpret as element of Z/(2^{128}Z) */
+ } rbuf;
+
+ bool enc; /* true if encrypting, false if decrypting */
+
+ /*
+ * The result of the Poly1305 ε-∆U hash function applied to
+ * (bulk length, tweak)
+ */
+ le128 header_hash;
+
+ /* Sub-requests, must be last */
+ union {
+ struct shash_desc hash_desc;
+ struct skcipher_request streamcipher_req;
+ } u;
+};
+
+/*
+ * Given the XChaCha stream key K_S, derive the block cipher key K_E and the
+ * hash key K_H as follows:
+ *
+ * K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
+ *
+ * Note that this denotes using bits from the XChaCha keystream, which here we
+ * get indirectly by encrypting a buffer containing all 0's.
+ */
+static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct {
+ u8 iv[XCHACHA_IV_SIZE];
+ u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
+ struct scatterlist sg;
+ struct crypto_wait wait;
+ struct skcipher_request req; /* must be last */
+ } *data;
+ u8 *keyp;
+ int err;
+
+ /* Set the stream cipher key (K_S) */
+ crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
+ crypto_skcipher_set_flags(tctx->streamcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
+ crypto_skcipher_set_flags(tfm,
+ crypto_skcipher_get_flags(tctx->streamcipher) &
+ CRYPTO_TFM_RES_MASK);
+ if (err)
+ return err;
+
+ /* Derive the subkeys */
+ data = kzalloc(sizeof(*data) +
+ crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->iv[0] = 1;
+ sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
+ crypto_init_wait(&data->wait);
+ skcipher_request_set_tfm(&data->req, tctx->streamcipher);
+ skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &data->wait);
+ skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
+ sizeof(data->derived_keys), data->iv);
+ err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
+ if (err)
+ goto out;
+ keyp = data->derived_keys;
+
+ /* Set the block cipher key (K_E) */
+ crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(tctx->blockcipher,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(tctx->blockcipher, keyp,
+ BLOCKCIPHER_KEY_SIZE);
+ crypto_skcipher_set_flags(tfm,
+ crypto_cipher_get_flags(tctx->blockcipher) &
+ CRYPTO_TFM_RES_MASK);
+ if (err)
+ goto out;
+ keyp += BLOCKCIPHER_KEY_SIZE;
+
+ /* Set the hash key (K_H) */
+ poly1305_core_setkey(&tctx->header_hash_key, keyp);
+ keyp += POLY1305_BLOCK_SIZE;
+
+ crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
+ crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
+ crypto_skcipher_set_flags(tfm, crypto_shash_get_flags(tctx->hash) &
+ CRYPTO_TFM_RES_MASK);
+ keyp += NHPOLY1305_KEY_SIZE;
+ WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
+out:
+ kzfree(data);
+ return err;
+}
+
+/* Addition in Z/(2^{128}Z) */
+static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
+{
+ u64 x = le64_to_cpu(v1->b);
+ u64 y = le64_to_cpu(v2->b);
+
+ r->b = cpu_to_le64(x + y);
+ r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
+ (x + y < x));
+}
+
+/* Subtraction in Z/(2^{128}Z) */
+static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
+{
+ u64 x = le64_to_cpu(v1->b);
+ u64 y = le64_to_cpu(v2->b);
+
+ r->b = cpu_to_le64(x - y);
+ r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
+ (x - y > x));
+}
+
+/*
+ * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
+ * result to rctx->header_hash. This is the calculation
+ *
+ * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
+ *
+ * from the procedure in section 6.4 of the Adiantum paper. The resulting value
+ * is reused in both the first and second hash steps. Specifically, it's added
+ * to the result of an independently keyed ε-∆U hash function (for equal length
+ * inputs only) taken over the left-hand part (the "bulk") of the message, to
+ * give the overall Adiantum hash of the (tweak, left-hand part) pair.
+ */
+static void adiantum_hash_header(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
+ const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct {
+ __le64 message_bits;
+ __le64 padding;
+ } header = {
+ .message_bits = cpu_to_le64((u64)bulk_len * 8)
+ };
+ struct poly1305_state state;
+
+ poly1305_core_init(&state);
+
+ BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
+ poly1305_core_blocks(&state, &tctx->header_hash_key,
+ &header, sizeof(header) / POLY1305_BLOCK_SIZE);
+
+ BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
+ poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
+ TWEAK_SIZE / POLY1305_BLOCK_SIZE);
+
+ poly1305_core_emit(&state, &rctx->header_hash);
+}
+
+/* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
+static int adiantum_hash_message(struct skcipher_request *req,
+ struct scatterlist *sgl, le128 *digest)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
+ const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ struct shash_desc *hash_desc = &rctx->u.hash_desc;
+ struct sg_mapping_iter miter;
+ unsigned int i, n;
+ int err;
+
+ hash_desc->tfm = tctx->hash;
+ hash_desc->flags = 0;
+
+ err = crypto_shash_init(hash_desc);
+ if (err)
+ return err;
+
+ sg_miter_start(&miter, sgl, sg_nents(sgl),
+ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+ for (i = 0; i < bulk_len; i += n) {
+ sg_miter_next(&miter);
+ n = min_t(unsigned int, miter.length, bulk_len - i);
+ err = crypto_shash_update(hash_desc, miter.addr, n);
+ if (err)
+ break;
+ }
+ sg_miter_stop(&miter);
+ if (err)
+ return err;
+
+ return crypto_shash_final(hash_desc, (u8 *)digest);
+}
+
+/* Continue Adiantum encryption/decryption after the stream cipher step */
+static int adiantum_finish(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
+ const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ le128 digest;
+ int err;
+
+ /* If decrypting, decrypt C_M with the block cipher to get P_M */
+ if (!rctx->enc)
+ crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
+ rctx->rbuf.bytes);
+
+ /*
+ * Second hash step
+ * enc: C_R = C_M - H_{K_H}(T, C_L)
+ * dec: P_R = P_M - H_{K_H}(T, P_L)
+ */
+ err = adiantum_hash_message(req, req->dst, &digest);
+ if (err)
+ return err;
+ le128_add(&digest, &digest, &rctx->header_hash);
+ le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+ scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->dst,
+ bulk_len, BLOCKCIPHER_BLOCK_SIZE, 1);
+ return 0;
+}
+
+static void adiantum_streamcipher_done(struct crypto_async_request *areq,
+ int err)
+{
+ struct skcipher_request *req = areq->data;
+
+ if (!err)
+ err = adiantum_finish(req);
+
+ skcipher_request_complete(req, err);
+}
+
+static int adiantum_crypt(struct skcipher_request *req, bool enc)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
+ const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
+ unsigned int stream_len;
+ le128 digest;
+ int err;
+
+ if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
+ return -EINVAL;
+
+ rctx->enc = enc;
+
+ /*
+ * First hash step
+ * enc: P_M = P_R + H_{K_H}(T, P_L)
+ * dec: C_M = C_R + H_{K_H}(T, C_L)
+ */
+ adiantum_hash_header(req);
+ err = adiantum_hash_message(req, req->src, &digest);
+ if (err)
+ return err;
+ le128_add(&digest, &digest, &rctx->header_hash);
+ scatterwalk_map_and_copy(&rctx->rbuf.bignum, req->src,
+ bulk_len, BLOCKCIPHER_BLOCK_SIZE, 0);
+ le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
+
+ /* If encrypting, encrypt P_M with the block cipher to get C_M */
+ if (enc)
+ crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
+ rctx->rbuf.bytes);
+
+ /* Initialize the rest of the XChaCha IV (first part is C_M) */
+ BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
+ BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
+ rctx->rbuf.words[4] = cpu_to_le32(1);
+ rctx->rbuf.words[5] = 0;
+ rctx->rbuf.words[6] = 0;
+ rctx->rbuf.words[7] = 0;
+
+ /*
+ * XChaCha needs to be done on all the data except the last 16 bytes;
+ * for disk encryption that usually means 4080 or 496 bytes. But ChaCha
+ * implementations tend to be most efficient when passed a whole number
+ * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
+ * And here it doesn't matter whether the last 16 bytes are written to,
+ * as the second hash step will overwrite them. Thus, round the XChaCha
+ * length up to the next 64-byte boundary if possible.
+ */
+ stream_len = bulk_len;
+ if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
+ stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
+
+ skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
+ skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
+ req->dst, stream_len, &rctx->rbuf);
+ skcipher_request_set_callback(&rctx->u.streamcipher_req,
+ req->base.flags,
+ adiantum_streamcipher_done, req);
+ return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
+ adiantum_finish(req);
+}
+
+static int adiantum_encrypt(struct skcipher_request *req)
+{
+ return adiantum_crypt(req, true);
+}
+
+static int adiantum_decrypt(struct skcipher_request *req)
+{
+ return adiantum_crypt(req, false);
+}
+
+static int adiantum_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
+ struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ struct crypto_skcipher *streamcipher;
+ struct crypto_cipher *blockcipher;
+ struct crypto_shash *hash;
+ unsigned int subreq_size;
+ int err;
+
+ streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
+ if (IS_ERR(streamcipher))
+ return PTR_ERR(streamcipher);
+
+ blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
+ if (IS_ERR(blockcipher)) {
+ err = PTR_ERR(blockcipher);
+ goto err_free_streamcipher;
+ }
+
+ hash = crypto_spawn_shash(&ictx->hash_spawn);
+ if (IS_ERR(hash)) {
+ err = PTR_ERR(hash);
+ goto err_free_blockcipher;
+ }
+
+ tctx->streamcipher = streamcipher;
+ tctx->blockcipher = blockcipher;
+ tctx->hash = hash;
+
+ BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
+ sizeof(struct adiantum_request_ctx));
+ subreq_size = max(FIELD_SIZEOF(struct adiantum_request_ctx,
+ u.hash_desc) +
+ crypto_shash_descsize(hash),
+ FIELD_SIZEOF(struct adiantum_request_ctx,
+ u.streamcipher_req) +
+ crypto_skcipher_reqsize(streamcipher));
+
+ crypto_skcipher_set_reqsize(tfm,
+ offsetof(struct adiantum_request_ctx, u) +
+ subreq_size);
+ return 0;
+
+err_free_blockcipher:
+ crypto_free_cipher(blockcipher);
+err_free_streamcipher:
+ crypto_free_skcipher(streamcipher);
+ return err;
+}
+
+static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_skcipher(tctx->streamcipher);
+ crypto_free_cipher(tctx->blockcipher);
+ crypto_free_shash(tctx->hash);
+}
+
+static void adiantum_free_instance(struct skcipher_instance *inst)
+{
+ struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
+
+ crypto_drop_skcipher(&ictx->streamcipher_spawn);
+ crypto_drop_spawn(&ictx->blockcipher_spawn);
+ crypto_drop_shash(&ictx->hash_spawn);
+ kfree(inst);
+}
+
+/*
+ * Check for a supported set of inner algorithms.
+ * See the comment at the beginning of this file.
+ */
+static bool adiantum_supported_algorithms(struct skcipher_alg *streamcipher_alg,
+ struct crypto_alg *blockcipher_alg,
+ struct shash_alg *hash_alg)
+{
+ if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
+ strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
+ return false;
+
+ if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
+ blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
+ return false;
+ if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
+ return false;
+
+ if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
+ return false;
+
+ return true;
+}
+
+static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+ const char *streamcipher_name;
+ const char *blockcipher_name;
+ const char *nhpoly1305_name;
+ struct skcipher_instance *inst;
+ struct adiantum_instance_ctx *ictx;
+ struct skcipher_alg *streamcipher_alg;
+ struct crypto_alg *blockcipher_alg;
+ struct crypto_alg *_hash_alg;
+ struct shash_alg *hash_alg;
+ int err;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return PTR_ERR(algt);
+
+ if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
+ return -EINVAL;
+
+ streamcipher_name = crypto_attr_alg_name(tb[1]);
+ if (IS_ERR(streamcipher_name))
+ return PTR_ERR(streamcipher_name);
+
+ blockcipher_name = crypto_attr_alg_name(tb[2]);
+ if (IS_ERR(blockcipher_name))
+ return PTR_ERR(blockcipher_name);
+
+ nhpoly1305_name = crypto_attr_alg_name(tb[3]);
+ if (nhpoly1305_name == ERR_PTR(-ENOENT))
+ nhpoly1305_name = "nhpoly1305";
+ if (IS_ERR(nhpoly1305_name))
+ return PTR_ERR(nhpoly1305_name);
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+ ictx = skcipher_instance_ctx(inst);
+
+ /* Stream cipher, e.g. "xchacha12" */
+ err = crypto_grab_skcipher(&ictx->streamcipher_spawn, streamcipher_name,
+ 0, crypto_requires_sync(algt->type,
+ algt->mask));
+ if (err)
+ goto out_free_inst;
+ streamcipher_alg = crypto_spawn_skcipher_alg(&ictx->streamcipher_spawn);
+
+ /* Block cipher, e.g. "aes" */
+ err = crypto_grab_spawn(&ictx->blockcipher_spawn, blockcipher_name,
+ CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_drop_streamcipher;
+ blockcipher_alg = ictx->blockcipher_spawn.alg;
+
+ /* NHPoly1305 ε-∆U hash function */
+ _hash_alg = crypto_alg_mod_lookup(nhpoly1305_name,
+ CRYPTO_ALG_TYPE_SHASH,
+ CRYPTO_ALG_TYPE_MASK);
+ if (IS_ERR(_hash_alg)) {
+ err = PTR_ERR(_hash_alg);
+ goto out_drop_blockcipher;
+ }
+ hash_alg = __crypto_shash_alg(_hash_alg);
+ err = crypto_init_shash_spawn(&ictx->hash_spawn, hash_alg,
+ skcipher_crypto_instance(inst));
+ if (err)
+ goto out_put_hash;
+
+ /* Check the set of algorithms */
+ if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
+ hash_alg)) {
+ pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
+ streamcipher_alg->base.cra_name,
+ blockcipher_alg->cra_name, hash_alg->base.cra_name);
+ err = -EINVAL;
+ goto out_drop_hash;
+ }
+
+ /* Instance fields */
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
+ blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_hash;
+ if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "adiantum(%s,%s,%s)",
+ streamcipher_alg->base.cra_driver_name,
+ blockcipher_alg->cra_driver_name,
+ hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_drop_hash;
+
+ inst->alg.base.cra_flags = streamcipher_alg->base.cra_flags &
+ CRYPTO_ALG_ASYNC;
+ inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
+ inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
+ inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask |
+ hash_alg->base.cra_alignmask;
+ /*
+ * The block cipher is only invoked once per message, so for long
+ * messages (e.g. sectors for disk encryption) its performance doesn't
+ * matter as much as that of the stream cipher and hash function. Thus,
+ * weigh the block cipher's ->cra_priority less.
+ */
+ inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
+ 2 * hash_alg->base.cra_priority +
+ blockcipher_alg->cra_priority) / 7;
+
+ inst->alg.setkey = adiantum_setkey;
+ inst->alg.encrypt = adiantum_encrypt;
+ inst->alg.decrypt = adiantum_decrypt;
+ inst->alg.init = adiantum_init_tfm;
+ inst->alg.exit = adiantum_exit_tfm;
+ inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(streamcipher_alg);
+ inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(streamcipher_alg);
+ inst->alg.ivsize = TWEAK_SIZE;
+
+ inst->free = adiantum_free_instance;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto out_drop_hash;
+
+ crypto_mod_put(_hash_alg);
+ return 0;
+
+out_drop_hash:
+ crypto_drop_shash(&ictx->hash_spawn);
+out_put_hash:
+ crypto_mod_put(_hash_alg);
+out_drop_blockcipher:
+ crypto_drop_spawn(&ictx->blockcipher_spawn);
+out_drop_streamcipher:
+ crypto_drop_skcipher(&ictx->streamcipher_spawn);
+out_free_inst:
+ kfree(inst);
+ return err;
+}
+
+/* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
+static struct crypto_template adiantum_tmpl = {
+ .name = "adiantum",
+ .create = adiantum_create,
+ .module = THIS_MODULE,
+};
+
+static int __init adiantum_module_init(void)
+{
+ return crypto_register_template(&adiantum_tmpl);
+}
+
+static void __exit adiantum_module_exit(void)
+{
+ crypto_unregister_template(&adiantum_tmpl);
+}
+
+module_init(adiantum_module_init);
+module_exit(adiantum_module_exit);
+
+MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("adiantum");
diff --git a/crypto/aead.c b/crypto/aead.c
index 60b3bbe973e7..189c52d1f63a 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -119,20 +119,16 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
- strncpy(raead.type, "aead", sizeof(raead.type));
- strncpy(raead.geniv, "<none>", sizeof(raead.geniv));
+ memset(&raead, 0, sizeof(raead));
+
+ strscpy(raead.type, "aead", sizeof(raead.type));
+ strscpy(raead.geniv, "<none>", sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
- sizeof(struct crypto_report_aead), &raead))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
#else
static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index ca554d57d01e..13df33aca463 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -63,7 +63,8 @@ static inline u8 byte(const u32 x, const unsigned n)
static const u32 rco_tab[10] = { 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
-__visible const u32 crypto_ft_tab[4][256] = {
+/* cacheline-aligned to facilitate prefetching into cache */
+__visible const u32 crypto_ft_tab[4][256] __cacheline_aligned = {
{
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
@@ -327,7 +328,7 @@ __visible const u32 crypto_ft_tab[4][256] = {
}
};
-__visible const u32 crypto_fl_tab[4][256] = {
+__visible const u32 crypto_fl_tab[4][256] __cacheline_aligned = {
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
@@ -591,7 +592,7 @@ __visible const u32 crypto_fl_tab[4][256] = {
}
};
-__visible const u32 crypto_it_tab[4][256] = {
+__visible const u32 crypto_it_tab[4][256] __cacheline_aligned = {
{
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
@@ -855,7 +856,7 @@ __visible const u32 crypto_it_tab[4][256] = {
}
};
-__visible const u32 crypto_il_tab[4][256] = {
+__visible const u32 crypto_il_tab[4][256] __cacheline_aligned = {
{
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 03023b2290e8..1ff9785b30f5 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
const u32 *rkp = ctx->key_enc + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
+ unsigned long flags;
int round;
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
+ /*
+ * Temporarily disable interrupts to avoid races where cachelines are
+ * evicted when the CPU is interrupted to do something else.
+ */
+ local_irq_save(flags);
+
st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
+
+ local_irq_restore(flags);
}
static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
const u32 *rkp = ctx->key_dec + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
+ unsigned long flags;
int round;
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
+ /*
+ * Temporarily disable interrupts to avoid races where cachelines are
+ * evicted when the CPU is interrupted to do something else.
+ */
+ local_irq_save(flags);
+
st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
+
+ local_irq_restore(flags);
}
static struct crypto_alg aes_alg = {
diff --git a/crypto/ahash.c b/crypto/ahash.c
index e21667b4e10a..5d320a811f75 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -364,20 +364,28 @@ static int crypto_ahash_op(struct ahash_request *req,
int crypto_ahash_final(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
- crypto_stat_ahash_final(req, ret);
+ crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
- crypto_stat_ahash_final(req, ret);
+ crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
@@ -385,13 +393,16 @@ EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_ahash_op(req, tfm->digest);
- crypto_stat_ahash_final(req, ret);
+ crypto_stats_ahash_final(nbytes, ret, alg);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -498,18 +509,14 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
- strncpy(rhash.type, "ahash", sizeof(rhash.type));
+ memset(&rhash, 0, sizeof(rhash));
+
+ strscpy(rhash.type, "ahash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
- sizeof(struct crypto_report_hash), &rhash))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
#else
static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/akcipher.c b/crypto/akcipher.c
index cfbdb06d8ca8..0cbeae137e0a 100644
--- a/crypto/akcipher.c
+++ b/crypto/akcipher.c
@@ -30,15 +30,12 @@ static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_akcipher rakcipher;
- strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ memset(&rakcipher, 0, sizeof(rakcipher));
- if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
- sizeof(struct crypto_report_akcipher), &rakcipher))
- goto nla_put_failure;
- return 0;
+ strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
+ sizeof(rakcipher), &rakcipher);
}
#else
static int crypto_akcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/algapi.c b/crypto/algapi.c
index 2545c5f89c4c..8b65ada33e5d 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -258,13 +258,7 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
list_add(&alg->cra_list, &crypto_alg_list);
list_add(&larval->alg.cra_list, &crypto_alg_list);
- atomic_set(&alg->encrypt_cnt, 0);
- atomic_set(&alg->decrypt_cnt, 0);
- atomic64_set(&alg->encrypt_tlen, 0);
- atomic64_set(&alg->decrypt_tlen, 0);
- atomic_set(&alg->verify_cnt, 0);
- atomic_set(&alg->cipher_err_cnt, 0);
- atomic_set(&alg->sign_cnt, 0);
+ crypto_stats_init(alg);
out:
return larval;
@@ -1076,6 +1070,245 @@ int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
+#ifdef CONFIG_CRYPTO_STATS
+void crypto_stats_init(struct crypto_alg *alg)
+{
+ memset(&alg->stats, 0, sizeof(alg->stats));
+}
+EXPORT_SYMBOL_GPL(crypto_stats_init);
+
+void crypto_stats_get(struct crypto_alg *alg)
+{
+ crypto_alg_get(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_get);
+
+void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.cipher.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.cipher.encrypt_cnt);
+ atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_encrypt);
+
+void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.cipher.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.cipher.decrypt_cnt);
+ atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ablkcipher_decrypt);
+
+void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg,
+ int ret)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.aead.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.aead.encrypt_cnt);
+ atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_aead_encrypt);
+
+void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg,
+ int ret)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.aead.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.aead.decrypt_cnt);
+ atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_aead_decrypt);
+
+void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
+ atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_encrypt);
+
+void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
+ atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_decrypt);
+
+void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
+ else
+ atomic64_inc(&alg->stats.akcipher.sign_cnt);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_sign);
+
+void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
+ else
+ atomic64_inc(&alg->stats.akcipher.verify_cnt);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_akcipher_verify);
+
+void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.compress.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.compress.compress_cnt);
+ atomic64_add(slen, &alg->stats.compress.compress_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_compress);
+
+void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.compress.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.compress.decompress_cnt);
+ atomic64_add(slen, &alg->stats.compress.decompress_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_decompress);
+
+void crypto_stats_ahash_update(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic64_inc(&alg->stats.hash.err_cnt);
+ else
+ atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ahash_update);
+
+void crypto_stats_ahash_final(unsigned int nbytes, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.hash.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.hash.hash_cnt);
+ atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_ahash_final);
+
+void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
+{
+ if (ret)
+ atomic64_inc(&alg->stats.kpp.err_cnt);
+ else
+ atomic64_inc(&alg->stats.kpp.setsecret_cnt);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_kpp_set_secret);
+
+void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
+{
+ if (ret)
+ atomic64_inc(&alg->stats.kpp.err_cnt);
+ else
+ atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_kpp_generate_public_key);
+
+void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
+{
+ if (ret)
+ atomic64_inc(&alg->stats.kpp.err_cnt);
+ else
+ atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_kpp_compute_shared_secret);
+
+void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic64_inc(&alg->stats.rng.err_cnt);
+ else
+ atomic64_inc(&alg->stats.rng.seed_cnt);
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_rng_seed);
+
+void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen,
+ int ret)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.rng.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.rng.generate_cnt);
+ atomic64_add(dlen, &alg->stats.rng.generate_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_rng_generate);
+
+void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.cipher.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.cipher.encrypt_cnt);
+ atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_skcipher_encrypt);
+
+void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret,
+ struct crypto_alg *alg)
+{
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic64_inc(&alg->stats.cipher.err_cnt);
+ } else {
+ atomic64_inc(&alg->stats.cipher.decrypt_cnt);
+ atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
+ }
+ crypto_alg_put(alg);
+}
+EXPORT_SYMBOL_GPL(crypto_stats_skcipher_decrypt);
+#endif
+
static int __init crypto_algapi_init(void)
{
crypto_init_proc();
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index f93abf13b5d4..c5398bd54942 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -507,23 +507,18 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_blkcipher rblkcipher;
- strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
- strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
- sizeof(rblkcipher.geniv));
- rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
+ memset(&rblkcipher, 0, sizeof(rblkcipher));
+
+ strscpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
+ strscpy(rblkcipher.geniv, "<default>", sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
- sizeof(struct crypto_report_blkcipher), &rblkcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+ sizeof(rblkcipher), &rblkcipher);
}
#else
static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
@@ -541,8 +536,7 @@ static void crypto_blkcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "min keysize : %u\n", alg->cra_blkcipher.min_keysize);
seq_printf(m, "max keysize : %u\n", alg->cra_blkcipher.max_keysize);
seq_printf(m, "ivsize : %u\n", alg->cra_blkcipher.ivsize);
- seq_printf(m, "geniv : %s\n", alg->cra_blkcipher.geniv ?:
- "<default>");
+ seq_printf(m, "geniv : <default>\n");
}
const struct crypto_type crypto_blkcipher_type = {
diff --git a/crypto/cbc.c b/crypto/cbc.c
index b761b1f9c6ca..dd5f332fd566 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
- crypto_mod_put(alg);
if (err)
- goto err_free_inst;
+ goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
if (err)
@@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
+ crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
+err_put_alg:
+ crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;
diff --git a/crypto/cfb.c b/crypto/cfb.c
index a0d68c09e1b9..e81e45673498 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
- crypto_xor(dst, iv, bsize);
+ crypto_xor(dst, src, bsize);
iv = src;
src += bsize;
@@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
- crypto_mod_put(alg);
if (err)
- goto err_free_inst;
+ goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
if (err)
@@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
+ crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
+err_put_alg:
+ crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
deleted file mode 100644
index 3ae96587caf9..000000000000
--- a/crypto/chacha20_generic.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * ChaCha20 256-bit cipher algorithm, RFC7539
- *
- * Copyright (C) 2015 Martin Willi
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <asm/unaligned.h>
-#include <crypto/algapi.h>
-#include <crypto/chacha20.h>
-#include <crypto/internal/skcipher.h>
-#include <linux/module.h>
-
-static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
- unsigned int bytes)
-{
- /* aligned to potentially speed up crypto_xor() */
- u8 stream[CHACHA20_BLOCK_SIZE] __aligned(sizeof(long));
-
- if (dst != src)
- memcpy(dst, src, bytes);
-
- while (bytes >= CHACHA20_BLOCK_SIZE) {
- chacha20_block(state, stream);
- crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
- bytes -= CHACHA20_BLOCK_SIZE;
- dst += CHACHA20_BLOCK_SIZE;
- }
- if (bytes) {
- chacha20_block(state, stream);
- crypto_xor(dst, stream, bytes);
- }
-}
-
-void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv)
-{
- state[0] = 0x61707865; /* "expa" */
- state[1] = 0x3320646e; /* "nd 3" */
- state[2] = 0x79622d32; /* "2-by" */
- state[3] = 0x6b206574; /* "te k" */
- state[4] = ctx->key[0];
- state[5] = ctx->key[1];
- state[6] = ctx->key[2];
- state[7] = ctx->key[3];
- state[8] = ctx->key[4];
- state[9] = ctx->key[5];
- state[10] = ctx->key[6];
- state[11] = ctx->key[7];
- state[12] = get_unaligned_le32(iv + 0);
- state[13] = get_unaligned_le32(iv + 4);
- state[14] = get_unaligned_le32(iv + 8);
- state[15] = get_unaligned_le32(iv + 12);
-}
-EXPORT_SYMBOL_GPL(crypto_chacha20_init);
-
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize)
-{
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
- int i;
-
- if (keysize != CHACHA20_KEY_SIZE)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
-
-int crypto_chacha20_crypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- u32 state[16];
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_chacha20_init(state, ctx, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
-
- if (nbytes < walk.total)
- nbytes = round_down(nbytes, walk.stride);
-
- chacha20_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
- nbytes);
- err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(crypto_chacha20_crypt);
-
-static struct skcipher_alg alg = {
- .base.cra_name = "chacha20",
- .base.cra_driver_name = "chacha20-generic",
- .base.cra_priority = 100,
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct chacha20_ctx),
- .base.cra_module = THIS_MODULE,
-
- .min_keysize = CHACHA20_KEY_SIZE,
- .max_keysize = CHACHA20_KEY_SIZE,
- .ivsize = CHACHA20_IV_SIZE,
- .chunksize = CHACHA20_BLOCK_SIZE,
- .setkey = crypto_chacha20_setkey,
- .encrypt = crypto_chacha20_crypt,
- .decrypt = crypto_chacha20_crypt,
-};
-
-static int __init chacha20_generic_mod_init(void)
-{
- return crypto_register_skcipher(&alg);
-}
-
-static void __exit chacha20_generic_mod_fini(void)
-{
- crypto_unregister_skcipher(&alg);
-}
-
-module_init(chacha20_generic_mod_init);
-module_exit(chacha20_generic_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
-MODULE_DESCRIPTION("chacha20 cipher algorithm");
-MODULE_ALIAS_CRYPTO("chacha20");
-MODULE_ALIAS_CRYPTO("chacha20-generic");
diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
index 600afa99941f..fef11446ab1b 100644
--- a/crypto/chacha20poly1305.c
+++ b/crypto/chacha20poly1305.c
@@ -13,7 +13,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
#include <crypto/poly1305.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -22,8 +22,6 @@
#include "internal.h"
-#define CHACHAPOLY_IV_SIZE 12
-
struct chachapoly_instance_ctx {
struct crypto_skcipher_spawn chacha;
struct crypto_ahash_spawn poly;
@@ -51,7 +49,7 @@ struct poly_req {
};
struct chacha_req {
- u8 iv[CHACHA20_IV_SIZE];
+ u8 iv[CHACHA_IV_SIZE];
struct scatterlist src[1];
struct skcipher_request req; /* must be last member */
};
@@ -91,7 +89,7 @@ static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb)
memcpy(iv, &leicb, sizeof(leicb));
memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen);
memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv,
- CHACHA20_IV_SIZE - sizeof(leicb) - ctx->saltlen);
+ CHACHA_IV_SIZE - sizeof(leicb) - ctx->saltlen);
}
static int poly_verify_tag(struct aead_request *req)
@@ -494,7 +492,7 @@ static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
struct chachapoly_ctx *ctx = crypto_aead_ctx(aead);
int err;
- if (keylen != ctx->saltlen + CHACHA20_KEY_SIZE)
+ if (keylen != ctx->saltlen + CHACHA_KEY_SIZE)
return -EINVAL;
keylen -= ctx->saltlen;
@@ -639,7 +637,7 @@ static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb,
err = -EINVAL;
/* Need 16-byte IV size, including Initial Block Counter value */
- if (crypto_skcipher_alg_ivsize(chacha) != CHACHA20_IV_SIZE)
+ if (crypto_skcipher_alg_ivsize(chacha) != CHACHA_IV_SIZE)
goto out_drop_chacha;
/* Not a stream cipher? */
if (chacha->base.cra_blocksize != 1)
diff --git a/crypto/chacha_generic.c b/crypto/chacha_generic.c
new file mode 100644
index 000000000000..35b583101f4f
--- /dev/null
+++ b/crypto/chacha_generic.c
@@ -0,0 +1,217 @@
+/*
+ * ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
+ *
+ * Copyright (C) 2015 Martin Willi
+ * Copyright (C) 2018 Google LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/module.h>
+
+static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ /* aligned to potentially speed up crypto_xor() */
+ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long));
+
+ if (dst != src)
+ memcpy(dst, src, bytes);
+
+ while (bytes >= CHACHA_BLOCK_SIZE) {
+ chacha_block(state, stream, nrounds);
+ crypto_xor(dst, stream, CHACHA_BLOCK_SIZE);
+ bytes -= CHACHA_BLOCK_SIZE;
+ dst += CHACHA_BLOCK_SIZE;
+ }
+ if (bytes) {
+ chacha_block(state, stream, nrounds);
+ crypto_xor(dst, stream, bytes);
+ }
+}
+
+static int chacha_stream_xor(struct skcipher_request *req,
+ struct chacha_ctx *ctx, u8 *iv)
+{
+ struct skcipher_walk walk;
+ u32 state[16];
+ int err;
+
+ err = skcipher_walk_virt(&walk, req, false);
+
+ crypto_chacha_init(state, ctx, iv);
+
+ while (walk.nbytes > 0) {
+ unsigned int nbytes = walk.nbytes;
+
+ if (nbytes < walk.total)
+ nbytes = round_down(nbytes, walk.stride);
+
+ chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr,
+ nbytes, ctx->nrounds);
+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+
+ return err;
+}
+
+void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv)
+{
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
+ state[4] = ctx->key[0];
+ state[5] = ctx->key[1];
+ state[6] = ctx->key[2];
+ state[7] = ctx->key[3];
+ state[8] = ctx->key[4];
+ state[9] = ctx->key[5];
+ state[10] = ctx->key[6];
+ state[11] = ctx->key[7];
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
+}
+EXPORT_SYMBOL_GPL(crypto_chacha_init);
+
+static int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+EXPORT_SYMBOL_GPL(crypto_chacha20_setkey);
+
+int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+EXPORT_SYMBOL_GPL(crypto_chacha12_setkey);
+
+int crypto_chacha_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ return chacha_stream_xor(req, ctx, req->iv);
+}
+EXPORT_SYMBOL_GPL(crypto_chacha_crypt);
+
+int crypto_xchacha_crypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct chacha_ctx subctx;
+ u32 state[16];
+ u8 real_iv[16];
+
+ /* Compute the subkey given the original key and first 128 nonce bits */
+ crypto_chacha_init(state, ctx, req->iv);
+ hchacha_block(state, subctx.key, ctx->nrounds);
+ subctx.nrounds = ctx->nrounds;
+
+ /* Build the real IV */
+ memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
+ memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
+
+ /* Generate the stream and XOR it with the data */
+ return chacha_stream_xor(req, &subctx, real_iv);
+}
+EXPORT_SYMBOL_GPL(crypto_xchacha_crypt);
+
+static struct skcipher_alg algs[] = {
+ {
+ .base.cra_name = "chacha20",
+ .base.cra_driver_name = "chacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = crypto_chacha_crypt,
+ .decrypt = crypto_chacha_crypt,
+ }, {
+ .base.cra_name = "xchacha20",
+ .base.cra_driver_name = "xchacha20-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha20_setkey,
+ .encrypt = crypto_xchacha_crypt,
+ .decrypt = crypto_xchacha_crypt,
+ }, {
+ .base.cra_name = "xchacha12",
+ .base.cra_driver_name = "xchacha12-generic",
+ .base.cra_priority = 100,
+ .base.cra_blocksize = 1,
+ .base.cra_ctxsize = sizeof(struct chacha_ctx),
+ .base.cra_module = THIS_MODULE,
+
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = XCHACHA_IV_SIZE,
+ .chunksize = CHACHA_BLOCK_SIZE,
+ .setkey = crypto_chacha12_setkey,
+ .encrypt = crypto_xchacha_crypt,
+ .decrypt = crypto_xchacha_crypt,
+ }
+};
+
+static int __init chacha_generic_mod_init(void)
+{
+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit chacha_generic_mod_fini(void)
+{
+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
+}
+
+module_init(chacha_generic_mod_init);
+module_exit(chacha_generic_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Willi <martin@strongswan.org>");
+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
+MODULE_ALIAS_CRYPTO("chacha20");
+MODULE_ALIAS_CRYPTO("chacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha20");
+MODULE_ALIAS_CRYPTO("xchacha20-generic");
+MODULE_ALIAS_CRYPTO("xchacha12");
+MODULE_ALIAS_CRYPTO("xchacha12-generic");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 7118fb5efbaa..5640e5db7bdb 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -422,8 +422,6 @@ static int cryptd_create_blkcipher(struct crypto_template *tmpl,
inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
- inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
-
inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
inst->alg.cra_init = cryptd_blkcipher_init_tfm;
@@ -1174,7 +1172,7 @@ struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
return ERR_PTR(-EINVAL);
type = crypto_skcipher_type(type);
mask &= ~CRYPTO_ALG_TYPE_MASK;
- mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
+ mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
index 784748dbb19f..f25d3f32c9c2 100644
--- a/crypto/crypto_user_base.c
+++ b/crypto/crypto_user_base.c
@@ -84,87 +84,38 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
- strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
+ memset(&rcipher, 0, sizeof(rcipher));
+
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
- sizeof(struct crypto_report_cipher), &rcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
+ sizeof(rcipher), &rcipher);
}
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rcomp;
- strncpy(rcomp.type, "compression", sizeof(rcomp.type));
- if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
- sizeof(struct crypto_report_comp), &rcomp))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_acomp racomp;
+ memset(&rcomp, 0, sizeof(rcomp));
- strncpy(racomp.type, "acomp", sizeof(racomp.type));
+ strscpy(rcomp.type, "compression", sizeof(rcomp.type));
- if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
- sizeof(struct crypto_report_acomp), &racomp))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_akcipher rakcipher;
-
- strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
-
- if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
- sizeof(struct crypto_report_akcipher), &rakcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
-}
-
-static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
-{
- struct crypto_report_kpp rkpp;
-
- strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
- if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
- sizeof(struct crypto_report_kpp), &rkpp))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
}
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
- strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
- strncpy(ualg->cru_driver_name, alg->cra_driver_name,
+ memset(ualg, 0, sizeof(*ualg));
+
+ strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strscpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
- strncpy(ualg->cru_module_name, module_name(alg->cra_module),
+ strscpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name));
ualg->cru_type = 0;
@@ -177,9 +128,9 @@ static int crypto_report_one(struct crypto_alg *alg,
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
- strncpy(rl.type, "larval", sizeof(rl.type));
- if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
- sizeof(struct crypto_report_larval), &rl))
+ memset(&rl, 0, sizeof(rl));
+ strscpy(rl.type, "larval", sizeof(rl.type));
+ if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
goto nla_put_failure;
goto out;
}
@@ -202,20 +153,6 @@ static int crypto_report_one(struct crypto_alg *alg,
goto nla_put_failure;
break;
- case CRYPTO_ALG_TYPE_ACOMPRESS:
- if (crypto_report_acomp(skb, alg))
- goto nla_put_failure;
-
- break;
- case CRYPTO_ALG_TYPE_AKCIPHER:
- if (crypto_report_akcipher(skb, alg))
- goto nla_put_failure;
-
- break;
- case CRYPTO_ALG_TYPE_KPP:
- if (crypto_report_kpp(skb, alg))
- goto nla_put_failure;
- break;
}
out:
@@ -294,30 +231,33 @@ drop_alg:
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct crypto_alg *alg;
+ const size_t start_pos = cb->args[0];
+ size_t pos = 0;
struct crypto_dump_info info;
- int err;
-
- if (cb->args[0])
- goto out;
-
- cb->args[0] = 1;
+ struct crypto_alg *alg;
+ int res;
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
+ down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list) {
- err = crypto_report_alg(alg, &info);
- if (err)
- goto out_err;
+ if (pos >= start_pos) {
+ res = crypto_report_alg(alg, &info);
+ if (res == -EMSGSIZE)
+ break;
+ if (res)
+ goto out;
+ }
+ pos++;
}
-
+ cb->args[0] = pos;
+ res = skb->len;
out:
- return skb->len;
-out_err:
- return err;
+ up_read(&crypto_alg_sem);
+ return res;
}
static int crypto_dump_report_done(struct netlink_callback *cb)
@@ -483,9 +423,7 @@ static const struct crypto_link {
.dump = crypto_dump_report,
.done = crypto_dump_report_done},
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
- [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat,
- .dump = crypto_dump_reportstat,
- .done = crypto_dump_reportstat_done},
+ [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
};
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -505,7 +443,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg;
- u16 dump_alloc = 0;
+ unsigned long dump_alloc = 0;
if (link->dump == NULL)
return -EINVAL;
@@ -513,16 +451,16 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
+ up_read(&crypto_alg_sem);
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
- .min_dump_alloc = dump_alloc,
+ .min_dump_alloc = min(dump_alloc, 65535UL),
};
err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
- up_read(&crypto_alg_sem);
return err;
}
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
index 1dfaa0ccd555..3e9a53233d80 100644
--- a/crypto/crypto_user_stat.c
+++ b/crypto/crypto_user_stat.c
@@ -33,260 +33,149 @@ struct crypto_dump_info {
static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat raead;
- u64 v64;
- u32 v32;
+ struct crypto_stat_aead raead;
memset(&raead, 0, sizeof(raead));
- strncpy(raead.type, "aead", sizeof(raead.type));
-
- v32 = atomic_read(&alg->encrypt_cnt);
- raead.stat_encrypt_cnt = v32;
- v64 = atomic64_read(&alg->encrypt_tlen);
- raead.stat_encrypt_tlen = v64;
- v32 = atomic_read(&alg->decrypt_cnt);
- raead.stat_decrypt_cnt = v32;
- v64 = atomic64_read(&alg->decrypt_tlen);
- raead.stat_decrypt_tlen = v64;
- v32 = atomic_read(&alg->aead_err_cnt);
- raead.stat_aead_err_cnt = v32;
-
- if (nla_put(skb, CRYPTOCFGA_STAT_AEAD,
- sizeof(struct crypto_stat), &raead))
- goto nla_put_failure;
- return 0;
+ strscpy(raead.type, "aead", sizeof(raead.type));
-nla_put_failure:
- return -EMSGSIZE;
+ raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
+ raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
+ raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
+ raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
+ raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
}
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rcipher;
- u64 v64;
- u32 v32;
+ struct crypto_stat_cipher rcipher;
memset(&rcipher, 0, sizeof(rcipher));
- strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
-
- v32 = atomic_read(&alg->encrypt_cnt);
- rcipher.stat_encrypt_cnt = v32;
- v64 = atomic64_read(&alg->encrypt_tlen);
- rcipher.stat_encrypt_tlen = v64;
- v32 = atomic_read(&alg->decrypt_cnt);
- rcipher.stat_decrypt_cnt = v32;
- v64 = atomic64_read(&alg->decrypt_tlen);
- rcipher.stat_decrypt_tlen = v64;
- v32 = atomic_read(&alg->cipher_err_cnt);
- rcipher.stat_cipher_err_cnt = v32;
-
- if (nla_put(skb, CRYPTOCFGA_STAT_CIPHER,
- sizeof(struct crypto_stat), &rcipher))
- goto nla_put_failure;
- return 0;
+ strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
-nla_put_failure:
- return -EMSGSIZE;
+ rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
+ rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
+ rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
+ rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
+ rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rcomp;
- u64 v64;
- u32 v32;
+ struct crypto_stat_compress rcomp;
memset(&rcomp, 0, sizeof(rcomp));
- strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
- v32 = atomic_read(&alg->compress_cnt);
- rcomp.stat_compress_cnt = v32;
- v64 = atomic64_read(&alg->compress_tlen);
- rcomp.stat_compress_tlen = v64;
- v32 = atomic_read(&alg->decompress_cnt);
- rcomp.stat_decompress_cnt = v32;
- v64 = atomic64_read(&alg->decompress_tlen);
- rcomp.stat_decompress_tlen = v64;
- v32 = atomic_read(&alg->cipher_err_cnt);
- rcomp.stat_compress_err_cnt = v32;
-
- if (nla_put(skb, CRYPTOCFGA_STAT_COMPRESS,
- sizeof(struct crypto_stat), &rcomp))
- goto nla_put_failure;
- return 0;
+ strscpy(rcomp.type, "compression", sizeof(rcomp.type));
+ rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
+ rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
+ rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
+ rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
+ rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
}
static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat racomp;
- u64 v64;
- u32 v32;
+ struct crypto_stat_compress racomp;
memset(&racomp, 0, sizeof(racomp));
- strlcpy(racomp.type, "acomp", sizeof(racomp.type));
- v32 = atomic_read(&alg->compress_cnt);
- racomp.stat_compress_cnt = v32;
- v64 = atomic64_read(&alg->compress_tlen);
- racomp.stat_compress_tlen = v64;
- v32 = atomic_read(&alg->decompress_cnt);
- racomp.stat_decompress_cnt = v32;
- v64 = atomic64_read(&alg->decompress_tlen);
- racomp.stat_decompress_tlen = v64;
- v32 = atomic_read(&alg->cipher_err_cnt);
- racomp.stat_compress_err_cnt = v32;
-
- if (nla_put(skb, CRYPTOCFGA_STAT_ACOMP,
- sizeof(struct crypto_stat), &racomp))
- goto nla_put_failure;
- return 0;
+ strscpy(racomp.type, "acomp", sizeof(racomp.type));
+ racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
+ racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
+ racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
+ racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
+ racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
}
static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rakcipher;
- u64 v64;
- u32 v32;
+ struct crypto_stat_akcipher rakcipher;
memset(&rakcipher, 0, sizeof(rakcipher));
- strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
- v32 = atomic_read(&alg->encrypt_cnt);
- rakcipher.stat_encrypt_cnt = v32;
- v64 = atomic64_read(&alg->encrypt_tlen);
- rakcipher.stat_encrypt_tlen = v64;
- v32 = atomic_read(&alg->decrypt_cnt);
- rakcipher.stat_decrypt_cnt = v32;
- v64 = atomic64_read(&alg->decrypt_tlen);
- rakcipher.stat_decrypt_tlen = v64;
- v32 = atomic_read(&alg->sign_cnt);
- rakcipher.stat_sign_cnt = v32;
- v32 = atomic_read(&alg->verify_cnt);
- rakcipher.stat_verify_cnt = v32;
- v32 = atomic_read(&alg->akcipher_err_cnt);
- rakcipher.stat_akcipher_err_cnt = v32;
-
- if (nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
- sizeof(struct crypto_stat), &rakcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
+ rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
+ rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
+ rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
+ rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
+ rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
+ rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
+
+ return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(rakcipher), &rakcipher);
}
static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rkpp;
- u32 v;
+ struct crypto_stat_kpp rkpp;
memset(&rkpp, 0, sizeof(rkpp));
- strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
-
- v = atomic_read(&alg->setsecret_cnt);
- rkpp.stat_setsecret_cnt = v;
- v = atomic_read(&alg->generate_public_key_cnt);
- rkpp.stat_generate_public_key_cnt = v;
- v = atomic_read(&alg->compute_shared_secret_cnt);
- rkpp.stat_compute_shared_secret_cnt = v;
- v = atomic_read(&alg->kpp_err_cnt);
- rkpp.stat_kpp_err_cnt = v;
+ strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
- if (nla_put(skb, CRYPTOCFGA_STAT_KPP,
- sizeof(struct crypto_stat), &rkpp))
- goto nla_put_failure;
- return 0;
+ rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
+ rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
+ rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
+ rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
}
static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rhash;
- u64 v64;
- u32 v32;
+ struct crypto_stat_hash rhash;
memset(&rhash, 0, sizeof(rhash));
- strncpy(rhash.type, "ahash", sizeof(rhash.type));
+ strscpy(rhash.type, "ahash", sizeof(rhash.type));
- v32 = atomic_read(&alg->hash_cnt);
- rhash.stat_hash_cnt = v32;
- v64 = atomic64_read(&alg->hash_tlen);
- rhash.stat_hash_tlen = v64;
- v32 = atomic_read(&alg->hash_err_cnt);
- rhash.stat_hash_err_cnt = v32;
+ rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
+ rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
+ rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
- if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
- sizeof(struct crypto_stat), &rhash))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rhash;
- u64 v64;
- u32 v32;
+ struct crypto_stat_hash rhash;
memset(&rhash, 0, sizeof(rhash));
- strncpy(rhash.type, "shash", sizeof(rhash.type));
-
- v32 = atomic_read(&alg->hash_cnt);
- rhash.stat_hash_cnt = v32;
- v64 = atomic64_read(&alg->hash_tlen);
- rhash.stat_hash_tlen = v64;
- v32 = atomic_read(&alg->hash_err_cnt);
- rhash.stat_hash_err_cnt = v32;
+ strscpy(rhash.type, "shash", sizeof(rhash.type));
- if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
- sizeof(struct crypto_stat), &rhash))
- goto nla_put_failure;
- return 0;
+ rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
+ rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
+ rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
{
- struct crypto_stat rrng;
- u64 v64;
- u32 v32;
+ struct crypto_stat_rng rrng;
memset(&rrng, 0, sizeof(rrng));
- strncpy(rrng.type, "rng", sizeof(rrng.type));
+ strscpy(rrng.type, "rng", sizeof(rrng.type));
- v32 = atomic_read(&alg->generate_cnt);
- rrng.stat_generate_cnt = v32;
- v64 = atomic64_read(&alg->generate_tlen);
- rrng.stat_generate_tlen = v64;
- v32 = atomic_read(&alg->seed_cnt);
- rrng.stat_seed_cnt = v32;
- v32 = atomic_read(&alg->hash_err_cnt);
- rrng.stat_rng_err_cnt = v32;
-
- if (nla_put(skb, CRYPTOCFGA_STAT_RNG,
- sizeof(struct crypto_stat), &rrng))
- goto nla_put_failure;
- return 0;
+ rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
+ rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
+ rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
+ rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
}
static int crypto_reportstat_one(struct crypto_alg *alg,
@@ -295,10 +184,10 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
{
memset(ualg, 0, sizeof(*ualg));
- strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
- strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
+ strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strscpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
- strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
+ strscpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name));
ualg->cru_type = 0;
@@ -309,12 +198,11 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
- struct crypto_stat rl;
+ struct crypto_stat_larval rl;
memset(&rl, 0, sizeof(rl));
- strlcpy(rl.type, "larval", sizeof(rl.type));
- if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
- sizeof(struct crypto_stat), &rl))
+ strscpy(rl.type, "larval", sizeof(rl.type));
+ if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
goto nla_put_failure;
goto out;
}
@@ -448,37 +336,4 @@ drop_alg:
return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
}
-int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct crypto_alg *alg;
- struct crypto_dump_info info;
- int err;
-
- if (cb->args[0])
- goto out;
-
- cb->args[0] = 1;
-
- info.in_skb = cb->skb;
- info.out_skb = skb;
- info.nlmsg_seq = cb->nlh->nlmsg_seq;
- info.nlmsg_flags = NLM_F_MULTI;
-
- list_for_each_entry(alg, &crypto_alg_list, cra_list) {
- err = crypto_reportstat_alg(alg, &info);
- if (err)
- goto out_err;
- }
-
-out:
- return skb->len;
-out_err:
- return err;
-}
-
-int crypto_dump_reportstat_done(struct netlink_callback *cb)
-{
- return 0;
-}
-
MODULE_LICENSE("GPL");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index 435b75bd619e..30f3946efc6d 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -233,8 +233,6 @@ static struct crypto_instance *crypto_ctr_alloc(struct rtattr **tb)
inst->alg.cra_blkcipher.encrypt = crypto_ctr_crypt;
inst->alg.cra_blkcipher.decrypt = crypto_ctr_crypt;
- inst->alg.cra_blkcipher.geniv = "chainiv";
-
out:
crypto_mod_put(alg);
return inst;
diff --git a/crypto/ecc.c b/crypto/ecc.c
index 8facafd67802..ed1237115066 100644
--- a/crypto/ecc.c
+++ b/crypto/ecc.c
@@ -842,15 +842,23 @@ static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2, u64 *curve_prime,
static void ecc_point_mult(struct ecc_point *result,
const struct ecc_point *point, const u64 *scalar,
- u64 *initial_z, u64 *curve_prime,
+ u64 *initial_z, const struct ecc_curve *curve,
unsigned int ndigits)
{
/* R0 and R1 */
u64 rx[2][ECC_MAX_DIGITS];
u64 ry[2][ECC_MAX_DIGITS];
u64 z[ECC_MAX_DIGITS];
+ u64 sk[2][ECC_MAX_DIGITS];
+ u64 *curve_prime = curve->p;
int i, nb;
- int num_bits = vli_num_bits(scalar, ndigits);
+ int num_bits;
+ int carry;
+
+ carry = vli_add(sk[0], scalar, curve->n, ndigits);
+ vli_add(sk[1], sk[0], curve->n, ndigits);
+ scalar = sk[!carry];
+ num_bits = sizeof(u64) * ndigits * 8 + 1;
vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits);
@@ -904,30 +912,43 @@ static inline void ecc_swap_digits(const u64 *in, u64 *out,
out[i] = __swab64(in[ndigits - 1 - i]);
}
-int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
- const u64 *private_key, unsigned int private_key_len)
+static int __ecc_is_key_valid(const struct ecc_curve *curve,
+ const u64 *private_key, unsigned int ndigits)
{
- int nbytes;
- const struct ecc_curve *curve = ecc_get_curve(curve_id);
+ u64 one[ECC_MAX_DIGITS] = { 1, };
+ u64 res[ECC_MAX_DIGITS];
if (!private_key)
return -EINVAL;
- nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
-
- if (private_key_len != nbytes)
+ if (curve->g.ndigits != ndigits)
return -EINVAL;
- if (vli_is_zero(private_key, ndigits))
+ /* Make sure the private key is in the range [2, n-3]. */
+ if (vli_cmp(one, private_key, ndigits) != -1)
return -EINVAL;
-
- /* Make sure the private key is in the range [1, n-1]. */
- if (vli_cmp(curve->n, private_key, ndigits) != 1)
+ vli_sub(res, curve->n, one, ndigits);
+ vli_sub(res, res, one, ndigits);
+ if (vli_cmp(res, private_key, ndigits) != 1)
return -EINVAL;
return 0;
}
+int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
+ const u64 *private_key, unsigned int private_key_len)
+{
+ int nbytes;
+ const struct ecc_curve *curve = ecc_get_curve(curve_id);
+
+ nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
+
+ if (private_key_len != nbytes)
+ return -EINVAL;
+
+ return __ecc_is_key_valid(curve, private_key, ndigits);
+}
+
/*
* ECC private keys are generated using the method of extra random bits,
* equivalent to that described in FIPS 186-4, Appendix B.4.1.
@@ -971,11 +992,8 @@ int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
if (err)
return err;
- if (vli_is_zero(priv, ndigits))
- return -EINVAL;
-
- /* Make sure the private key is in the range [1, n-1]. */
- if (vli_cmp(curve->n, priv, ndigits) != 1)
+ /* Make sure the private key is in the valid range. */
+ if (__ecc_is_key_valid(curve, priv, ndigits))
return -EINVAL;
ecc_swap_digits(priv, privkey, ndigits);
@@ -1004,7 +1022,7 @@ int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
goto out;
}
- ecc_point_mult(pk, &curve->g, priv, NULL, curve->p, ndigits);
+ ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
if (ecc_point_is_zero(pk)) {
ret = -EAGAIN;
goto err_free_point;
@@ -1090,7 +1108,7 @@ int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
goto err_alloc_product;
}
- ecc_point_mult(product, pk, priv, rand_z, curve->p, ndigits);
+ ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
ecc_swap_digits(product->x, secret, ndigits);
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
index 7b1e0b188ce6..1dd095e4b451 100644
--- a/crypto/hash_info.c
+++ b/crypto/hash_info.c
@@ -32,6 +32,8 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_160] = "tgr160",
[HASH_ALGO_TGR_192] = "tgr192",
[HASH_ALGO_SM3_256] = "sm3-256",
+ [HASH_ALGO_STREEBOG_256] = "streebog256",
+ [HASH_ALGO_STREEBOG_512] = "streebog512",
};
EXPORT_SYMBOL_GPL(hash_algo_name);
@@ -54,5 +56,7 @@ const int hash_digest_size[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
[HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
[HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE,
+ [HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE,
+ [HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE,
};
EXPORT_SYMBOL_GPL(hash_digest_size);
diff --git a/crypto/kpp.c b/crypto/kpp.c
index a90edc27af77..bc2f1006a2f7 100644
--- a/crypto/kpp.c
+++ b/crypto/kpp.c
@@ -30,15 +30,11 @@ static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
- strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
+ memset(&rkpp, 0, sizeof(rkpp));
- if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
- sizeof(struct crypto_report_kpp), &rkpp))
- goto nla_put_failure;
- return 0;
+ strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
#else
static int crypto_kpp_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 2ce2660d3519..c160dfdbf2e0 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -122,7 +122,6 @@ static struct crypto_alg alg_lz4 = {
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lz4_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(alg_lz4.cra_list),
.cra_init = lz4_init,
.cra_exit = lz4_exit,
.cra_u = { .compress = {
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 2be14f054daf..583b5e013d7a 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -123,7 +123,6 @@ static struct crypto_alg alg_lz4hc = {
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lz4hc_ctx),
.cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(alg_lz4hc.cra_list),
.cra_init = lz4hc_init,
.cra_exit = lz4hc_exit,
.cra_u = { .compress = {
diff --git a/crypto/nhpoly1305.c b/crypto/nhpoly1305.c
new file mode 100644
index 000000000000..ec831a5594d8
--- /dev/null
+++ b/crypto/nhpoly1305.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum
+ *
+ * Copyright 2018 Google LLC
+ */
+
+/*
+ * "NHPoly1305" is the main component of Adiantum hashing.
+ * Specifically, it is the calculation
+ *
+ * H_L ← Poly1305_{K_L}(NH_{K_N}(pad_{128}(L)))
+ *
+ * from the procedure in section 6.4 of the Adiantum paper [1]. It is an
+ * ε-almost-∆-universal (ε-∆U) hash function for equal-length inputs over
+ * Z/(2^{128}Z), where the "∆" operation is addition. It hashes 1024-byte
+ * chunks of the input with the NH hash function [2], reducing the input length
+ * by 32x. The resulting NH digests are evaluated as a polynomial in
+ * GF(2^{130}-5), like in the Poly1305 MAC [3]. Note that the polynomial
+ * evaluation by itself would suffice to achieve the ε-∆U property; NH is used
+ * for performance since it's over twice as fast as Poly1305.
+ *
+ * This is *not* a cryptographic hash function; do not use it as such!
+ *
+ * [1] Adiantum: length-preserving encryption for entry-level processors
+ * (https://eprint.iacr.org/2018/720.pdf)
+ * [2] UMAC: Fast and Secure Message Authentication
+ * (https://fastcrypto.org/umac/umac_proc.pdf)
+ * [3] The Poly1305-AES message-authentication code
+ * (https://cr.yp.to/mac/poly1305-20050329.pdf)
+ */
+
+#include <asm/unaligned.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/nhpoly1305.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+static void nh_generic(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES])
+{
+ u64 sums[4] = { 0, 0, 0, 0 };
+
+ BUILD_BUG_ON(NH_PAIR_STRIDE != 2);
+ BUILD_BUG_ON(NH_NUM_PASSES != 4);
+
+ while (message_len) {
+ u32 m0 = get_unaligned_le32(message + 0);
+ u32 m1 = get_unaligned_le32(message + 4);
+ u32 m2 = get_unaligned_le32(message + 8);
+ u32 m3 = get_unaligned_le32(message + 12);
+
+ sums[0] += (u64)(u32)(m0 + key[ 0]) * (u32)(m2 + key[ 2]);
+ sums[1] += (u64)(u32)(m0 + key[ 4]) * (u32)(m2 + key[ 6]);
+ sums[2] += (u64)(u32)(m0 + key[ 8]) * (u32)(m2 + key[10]);
+ sums[3] += (u64)(u32)(m0 + key[12]) * (u32)(m2 + key[14]);
+ sums[0] += (u64)(u32)(m1 + key[ 1]) * (u32)(m3 + key[ 3]);
+ sums[1] += (u64)(u32)(m1 + key[ 5]) * (u32)(m3 + key[ 7]);
+ sums[2] += (u64)(u32)(m1 + key[ 9]) * (u32)(m3 + key[11]);
+ sums[3] += (u64)(u32)(m1 + key[13]) * (u32)(m3 + key[15]);
+ key += NH_MESSAGE_UNIT / sizeof(key[0]);
+ message += NH_MESSAGE_UNIT;
+ message_len -= NH_MESSAGE_UNIT;
+ }
+
+ hash[0] = cpu_to_le64(sums[0]);
+ hash[1] = cpu_to_le64(sums[1]);
+ hash[2] = cpu_to_le64(sums[2]);
+ hash[3] = cpu_to_le64(sums[3]);
+}
+
+/* Pass the next NH hash value through Poly1305 */
+static void process_nh_hash_value(struct nhpoly1305_state *state,
+ const struct nhpoly1305_key *key)
+{
+ BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0);
+
+ poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash,
+ NH_HASH_BYTES / POLY1305_BLOCK_SIZE);
+}
+
+/*
+ * Feed the next portion of the source data, as a whole number of 16-byte
+ * "NH message units", through NH and Poly1305. Each NH hash is taken over
+ * 1024 bytes, except possibly the final one which is taken over a multiple of
+ * 16 bytes up to 1024. Also, in the case where data is passed in misaligned
+ * chunks, we combine partial hashes; the end result is the same either way.
+ */
+static void nhpoly1305_units(struct nhpoly1305_state *state,
+ const struct nhpoly1305_key *key,
+ const u8 *src, unsigned int srclen, nh_t nh_fn)
+{
+ do {
+ unsigned int bytes;
+
+ if (state->nh_remaining == 0) {
+ /* Starting a new NH message */
+ bytes = min_t(unsigned int, srclen, NH_MESSAGE_BYTES);
+ nh_fn(key->nh_key, src, bytes, state->nh_hash);
+ state->nh_remaining = NH_MESSAGE_BYTES - bytes;
+ } else {
+ /* Continuing a previous NH message */
+ __le64 tmp_hash[NH_NUM_PASSES];
+ unsigned int pos;
+ int i;
+
+ pos = NH_MESSAGE_BYTES - state->nh_remaining;
+ bytes = min(srclen, state->nh_remaining);
+ nh_fn(&key->nh_key[pos / 4], src, bytes, tmp_hash);
+ for (i = 0; i < NH_NUM_PASSES; i++)
+ le64_add_cpu(&state->nh_hash[i],
+ le64_to_cpu(tmp_hash[i]));
+ state->nh_remaining -= bytes;
+ }
+ if (state->nh_remaining == 0)
+ process_nh_hash_value(state, key);
+ src += bytes;
+ srclen -= bytes;
+ } while (srclen);
+}
+
+int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ struct nhpoly1305_key *ctx = crypto_shash_ctx(tfm);
+ int i;
+
+ if (keylen != NHPOLY1305_KEY_SIZE)
+ return -EINVAL;
+
+ poly1305_core_setkey(&ctx->poly_key, key);
+ key += POLY1305_BLOCK_SIZE;
+
+ for (i = 0; i < NH_KEY_WORDS; i++)
+ ctx->nh_key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ return 0;
+}
+EXPORT_SYMBOL(crypto_nhpoly1305_setkey);
+
+int crypto_nhpoly1305_init(struct shash_desc *desc)
+{
+ struct nhpoly1305_state *state = shash_desc_ctx(desc);
+
+ poly1305_core_init(&state->poly_state);
+ state->buflen = 0;
+ state->nh_remaining = 0;
+ return 0;
+}
+EXPORT_SYMBOL(crypto_nhpoly1305_init);
+
+int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen,
+ nh_t nh_fn)
+{
+ struct nhpoly1305_state *state = shash_desc_ctx(desc);
+ const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
+ unsigned int bytes;
+
+ if (state->buflen) {
+ bytes = min(srclen, (int)NH_MESSAGE_UNIT - state->buflen);
+ memcpy(&state->buffer[state->buflen], src, bytes);
+ state->buflen += bytes;
+ if (state->buflen < NH_MESSAGE_UNIT)
+ return 0;
+ nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
+ nh_fn);
+ state->buflen = 0;
+ src += bytes;
+ srclen -= bytes;
+ }
+
+ if (srclen >= NH_MESSAGE_UNIT) {
+ bytes = round_down(srclen, NH_MESSAGE_UNIT);
+ nhpoly1305_units(state, key, src, bytes, nh_fn);
+ src += bytes;
+ srclen -= bytes;
+ }
+
+ if (srclen) {
+ memcpy(state->buffer, src, srclen);
+ state->buflen = srclen;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(crypto_nhpoly1305_update_helper);
+
+int crypto_nhpoly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen)
+{
+ return crypto_nhpoly1305_update_helper(desc, src, srclen, nh_generic);
+}
+EXPORT_SYMBOL(crypto_nhpoly1305_update);
+
+int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn)
+{
+ struct nhpoly1305_state *state = shash_desc_ctx(desc);
+ const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm);
+
+ if (state->buflen) {
+ memset(&state->buffer[state->buflen], 0,
+ NH_MESSAGE_UNIT - state->buflen);
+ nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT,
+ nh_fn);
+ }
+
+ if (state->nh_remaining)
+ process_nh_hash_value(state, key);
+
+ poly1305_core_emit(&state->poly_state, dst);
+ return 0;
+}
+EXPORT_SYMBOL(crypto_nhpoly1305_final_helper);
+
+int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ return crypto_nhpoly1305_final_helper(desc, dst, nh_generic);
+}
+EXPORT_SYMBOL(crypto_nhpoly1305_final);
+
+static struct shash_alg nhpoly1305_alg = {
+ .base.cra_name = "nhpoly1305",
+ .base.cra_driver_name = "nhpoly1305-generic",
+ .base.cra_priority = 100,
+ .base.cra_ctxsize = sizeof(struct nhpoly1305_key),
+ .base.cra_module = THIS_MODULE,
+ .digestsize = POLY1305_DIGEST_SIZE,
+ .init = crypto_nhpoly1305_init,
+ .update = crypto_nhpoly1305_update,
+ .final = crypto_nhpoly1305_final,
+ .setkey = crypto_nhpoly1305_setkey,
+ .descsize = sizeof(struct nhpoly1305_state),
+};
+
+static int __init nhpoly1305_mod_init(void)
+{
+ return crypto_register_shash(&nhpoly1305_alg);
+}
+
+static void __exit nhpoly1305_mod_exit(void)
+{
+ crypto_unregister_shash(&nhpoly1305_alg);
+}
+
+module_init(nhpoly1305_mod_init);
+module_exit(nhpoly1305_mod_exit);
+
+MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_ALIAS_CRYPTO("nhpoly1305");
+MODULE_ALIAS_CRYPTO("nhpoly1305-generic");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index ef802f6e9642..8aa10144407c 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
spawn = skcipher_instance_ctx(inst);
err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
CRYPTO_ALG_TYPE_MASK);
- crypto_mod_put(alg);
if (err)
- goto err_free_inst;
+ goto err_put_alg;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
if (err)
@@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
err = skcipher_register_instance(tmpl, inst);
if (err)
goto err_drop_spawn;
+ crypto_mod_put(alg);
out:
return err;
err_drop_spawn:
crypto_drop_spawn(spawn);
+err_put_alg:
+ crypto_mod_put(alg);
err_free_inst:
kfree(inst);
goto out;
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index f8ec3d4ba4a8..d47cfc47b1b1 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -382,7 +382,7 @@ static int pcrypt_cpumask_change_notify(struct notifier_block *self,
cpumask_copy(new_mask->mask, cpumask->cbcpu);
rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
- synchronize_rcu_bh();
+ synchronize_rcu();
free_cpumask_var(old_mask->mask);
kfree(old_mask);
@@ -394,7 +394,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
int ret;
pinst->kobj.kset = pcrypt_kset;
- ret = kobject_add(&pinst->kobj, NULL, name);
+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
if (!ret)
kobject_uevent(&pinst->kobj, KOBJ_ADD);
diff --git a/crypto/poly1305_generic.c b/crypto/poly1305_generic.c
index 47d3a6b83931..2a06874204e8 100644
--- a/crypto/poly1305_generic.c
+++ b/crypto/poly1305_generic.c
@@ -38,7 +38,7 @@ int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
- memset(dctx->h, 0, sizeof(dctx->h));
+ poly1305_core_init(&dctx->h);
dctx->buflen = 0;
dctx->rset = false;
dctx->sset = false;
@@ -47,23 +47,16 @@ int crypto_poly1305_init(struct shash_desc *desc)
}
EXPORT_SYMBOL_GPL(crypto_poly1305_init);
-static void poly1305_setrkey(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key)
{
/* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
- dctx->r[0] = (get_unaligned_le32(key + 0) >> 0) & 0x3ffffff;
- dctx->r[1] = (get_unaligned_le32(key + 3) >> 2) & 0x3ffff03;
- dctx->r[2] = (get_unaligned_le32(key + 6) >> 4) & 0x3ffc0ff;
- dctx->r[3] = (get_unaligned_le32(key + 9) >> 6) & 0x3f03fff;
- dctx->r[4] = (get_unaligned_le32(key + 12) >> 8) & 0x00fffff;
-}
-
-static void poly1305_setskey(struct poly1305_desc_ctx *dctx, const u8 *key)
-{
- dctx->s[0] = get_unaligned_le32(key + 0);
- dctx->s[1] = get_unaligned_le32(key + 4);
- dctx->s[2] = get_unaligned_le32(key + 8);
- dctx->s[3] = get_unaligned_le32(key + 12);
+ key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff;
+ key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03;
+ key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff;
+ key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff;
+ key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff;
}
+EXPORT_SYMBOL_GPL(poly1305_core_setkey);
/*
* Poly1305 requires a unique key for each tag, which implies that we can't set
@@ -75,13 +68,16 @@ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
{
if (!dctx->sset) {
if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_setrkey(dctx, src);
+ poly1305_core_setkey(&dctx->r, src);
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
dctx->rset = true;
}
if (srclen >= POLY1305_BLOCK_SIZE) {
- poly1305_setskey(dctx, src);
+ dctx->s[0] = get_unaligned_le32(src + 0);
+ dctx->s[1] = get_unaligned_le32(src + 4);
+ dctx->s[2] = get_unaligned_le32(src + 8);
+ dctx->s[3] = get_unaligned_le32(src + 12);
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
dctx->sset = true;
@@ -91,41 +87,37 @@ unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
}
EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey);
-static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen,
- u32 hibit)
+static void poly1305_blocks_internal(struct poly1305_state *state,
+ const struct poly1305_key *key,
+ const void *src, unsigned int nblocks,
+ u32 hibit)
{
u32 r0, r1, r2, r3, r4;
u32 s1, s2, s3, s4;
u32 h0, h1, h2, h3, h4;
u64 d0, d1, d2, d3, d4;
- unsigned int datalen;
- if (unlikely(!dctx->sset)) {
- datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
- src += srclen - datalen;
- srclen = datalen;
- }
+ if (!nblocks)
+ return;
- r0 = dctx->r[0];
- r1 = dctx->r[1];
- r2 = dctx->r[2];
- r3 = dctx->r[3];
- r4 = dctx->r[4];
+ r0 = key->r[0];
+ r1 = key->r[1];
+ r2 = key->r[2];
+ r3 = key->r[3];
+ r4 = key->r[4];
s1 = r1 * 5;
s2 = r2 * 5;
s3 = r3 * 5;
s4 = r4 * 5;
- h0 = dctx->h[0];
- h1 = dctx->h[1];
- h2 = dctx->h[2];
- h3 = dctx->h[3];
- h4 = dctx->h[4];
-
- while (likely(srclen >= POLY1305_BLOCK_SIZE)) {
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
+ do {
/* h += m[i] */
h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff;
h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff;
@@ -154,16 +146,36 @@ static unsigned int poly1305_blocks(struct poly1305_desc_ctx *dctx,
h1 += h0 >> 26; h0 = h0 & 0x3ffffff;
src += POLY1305_BLOCK_SIZE;
- srclen -= POLY1305_BLOCK_SIZE;
- }
+ } while (--nblocks);
- dctx->h[0] = h0;
- dctx->h[1] = h1;
- dctx->h[2] = h2;
- dctx->h[3] = h3;
- dctx->h[4] = h4;
+ state->h[0] = h0;
+ state->h[1] = h1;
+ state->h[2] = h2;
+ state->h[3] = h3;
+ state->h[4] = h4;
+}
- return srclen;
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_key *key,
+ const void *src, unsigned int nblocks)
+{
+ poly1305_blocks_internal(state, key, src, nblocks, 1 << 24);
+}
+EXPORT_SYMBOL_GPL(poly1305_core_blocks);
+
+static void poly1305_blocks(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen, u32 hibit)
+{
+ unsigned int datalen;
+
+ if (unlikely(!dctx->sset)) {
+ datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
+ src += srclen - datalen;
+ srclen = datalen;
+ }
+
+ poly1305_blocks_internal(&dctx->h, &dctx->r,
+ src, srclen / POLY1305_BLOCK_SIZE, hibit);
}
int crypto_poly1305_update(struct shash_desc *desc,
@@ -187,9 +199,9 @@ int crypto_poly1305_update(struct shash_desc *desc,
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
- bytes = poly1305_blocks(dctx, src, srclen, 1 << 24);
- src += srclen - bytes;
- srclen = bytes;
+ poly1305_blocks(dctx, src, srclen, 1 << 24);
+ src += srclen - (srclen % POLY1305_BLOCK_SIZE);
+ srclen %= POLY1305_BLOCK_SIZE;
}
if (unlikely(srclen)) {
@@ -201,30 +213,18 @@ int crypto_poly1305_update(struct shash_desc *desc,
}
EXPORT_SYMBOL_GPL(crypto_poly1305_update);
-int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
+void poly1305_core_emit(const struct poly1305_state *state, void *dst)
{
- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
u32 h0, h1, h2, h3, h4;
u32 g0, g1, g2, g3, g4;
u32 mask;
- u64 f = 0;
-
- if (unlikely(!dctx->sset))
- return -ENOKEY;
-
- if (unlikely(dctx->buflen)) {
- dctx->buf[dctx->buflen++] = 1;
- memset(dctx->buf + dctx->buflen, 0,
- POLY1305_BLOCK_SIZE - dctx->buflen);
- poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
- }
/* fully carry h */
- h0 = dctx->h[0];
- h1 = dctx->h[1];
- h2 = dctx->h[2];
- h3 = dctx->h[3];
- h4 = dctx->h[4];
+ h0 = state->h[0];
+ h1 = state->h[1];
+ h2 = state->h[2];
+ h3 = state->h[3];
+ h4 = state->h[4];
h2 += (h1 >> 26); h1 = h1 & 0x3ffffff;
h3 += (h2 >> 26); h2 = h2 & 0x3ffffff;
@@ -254,16 +254,40 @@ int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
h4 = (h4 & mask) | g4;
/* h = h % (2^128) */
- h0 = (h0 >> 0) | (h1 << 26);
- h1 = (h1 >> 6) | (h2 << 20);
- h2 = (h2 >> 12) | (h3 << 14);
- h3 = (h3 >> 18) | (h4 << 8);
+ put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0);
+ put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4);
+ put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8);
+ put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12);
+}
+EXPORT_SYMBOL_GPL(poly1305_core_emit);
+
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
+{
+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
+ __le32 digest[4];
+ u64 f = 0;
+
+ if (unlikely(!dctx->sset))
+ return -ENOKEY;
+
+ if (unlikely(dctx->buflen)) {
+ dctx->buf[dctx->buflen++] = 1;
+ memset(dctx->buf + dctx->buflen, 0,
+ POLY1305_BLOCK_SIZE - dctx->buflen);
+ poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0);
+ }
+
+ poly1305_core_emit(&dctx->h, digest);
/* mac = (h + s) % (2^128) */
- f = (f >> 32) + h0 + dctx->s[0]; put_unaligned_le32(f, dst + 0);
- f = (f >> 32) + h1 + dctx->s[1]; put_unaligned_le32(f, dst + 4);
- f = (f >> 32) + h2 + dctx->s[2]; put_unaligned_le32(f, dst + 8);
- f = (f >> 32) + h3 + dctx->s[3]; put_unaligned_le32(f, dst + 12);
+ f = (f >> 32) + le32_to_cpu(digest[0]) + dctx->s[0];
+ put_unaligned_le32(f, dst + 0);
+ f = (f >> 32) + le32_to_cpu(digest[1]) + dctx->s[1];
+ put_unaligned_le32(f, dst + 4);
+ f = (f >> 32) + le32_to_cpu(digest[2]) + dctx->s[2];
+ put_unaligned_le32(f, dst + 8);
+ f = (f >> 32) + le32_to_cpu(digest[3]) + dctx->s[3];
+ put_unaligned_le32(f, dst + 12);
return 0;
}
diff --git a/crypto/rng.c b/crypto/rng.c
index 547f16ecbfb0..33c38a72bff5 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -35,9 +35,11 @@ static int crypto_default_rng_refcnt;
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
+ struct crypto_alg *alg = tfm->base.__crt_alg;
u8 *buf = NULL;
int err;
+ crypto_stats_get(alg);
if (!seed && slen) {
buf = kmalloc(slen, GFP_KERNEL);
if (!buf)
@@ -50,7 +52,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
- crypto_stat_rng_seed(tfm, err);
+ crypto_stats_rng_seed(alg, err);
out:
kzfree(buf);
return err;
@@ -74,17 +76,13 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_rng rrng;
- strncpy(rrng.type, "rng", sizeof(rrng.type));
+ memset(&rrng, 0, sizeof(rrng));
- rrng.seedsize = seedsize(alg);
+ strscpy(rrng.type, "rng", sizeof(rrng.type));
- if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
- sizeof(struct crypto_report_rng), &rrng))
- goto nla_put_failure;
- return 0;
+ rrng.seedsize = seedsize(alg);
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
}
#else
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 8c77bc78a09f..00fce32ae17a 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -159,7 +159,7 @@ static int salsa20_crypt(struct skcipher_request *req)
u32 state[16];
int err;
- err = skcipher_walk_virt(&walk, req, true);
+ err = skcipher_walk_virt(&walk, req, false);
salsa20_init(state, ctx, walk.iv);
diff --git a/crypto/scompress.c b/crypto/scompress.c
index 968bbcf65c94..6f8305f8c300 100644
--- a/crypto/scompress.c
+++ b/crypto/scompress.c
@@ -40,15 +40,12 @@ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rscomp;
- strncpy(rscomp.type, "scomp", sizeof(rscomp.type));
+ memset(&rscomp, 0, sizeof(rscomp));
- if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
- sizeof(struct crypto_report_comp), &rscomp))
- goto nla_put_failure;
- return 0;
+ strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
+ sizeof(rscomp), &rscomp);
}
#else
static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/shash.c b/crypto/shash.c
index d21f04d70dce..44d297b82a8f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -408,18 +408,14 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
- strncpy(rhash.type, "shash", sizeof(rhash.type));
+ memset(&rhash, 0, sizeof(rhash));
+
+ strscpy(rhash.type, "shash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
- sizeof(struct crypto_report_hash), &rhash))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
#else
static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 4caab81d2d02..2a969296bc24 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -474,6 +474,8 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
{
int err;
+ might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+
walk->flags &= ~SKCIPHER_WALK_PHYS;
err = skcipher_walk_skcipher(walk, req);
@@ -577,8 +579,7 @@ static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
if (alg->cra_type == &crypto_blkcipher_type)
return sizeof(struct crypto_blkcipher *);
- if (alg->cra_type == &crypto_ablkcipher_type ||
- alg->cra_type == &crypto_givcipher_type)
+ if (alg->cra_type == &crypto_ablkcipher_type)
return sizeof(struct crypto_ablkcipher *);
return crypto_alg_extsize(alg);
@@ -842,8 +843,7 @@ static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
return crypto_init_skcipher_ops_blkcipher(tfm);
- if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type ||
- tfm->__crt_alg->cra_type == &crypto_givcipher_type)
+ if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type)
return crypto_init_skcipher_ops_ablkcipher(tfm);
skcipher->setkey = skcipher_setkey;
@@ -897,21 +897,18 @@ static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
base);
- strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
- strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
+ memset(&rblkcipher, 0, sizeof(rblkcipher));
+
+ strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
+ strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = skcipher->min_keysize;
rblkcipher.max_keysize = skcipher->max_keysize;
rblkcipher.ivsize = skcipher->ivsize;
- if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
- sizeof(struct crypto_report_blkcipher), &rblkcipher))
- goto nla_put_failure;
- return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
+ return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
+ sizeof(rblkcipher), &rblkcipher);
}
#else
static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
diff --git a/crypto/streebog_generic.c b/crypto/streebog_generic.c
new file mode 100644
index 000000000000..03272a22afce
--- /dev/null
+++ b/crypto/streebog_generic.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause
+/*
+ * Streebog hash function as specified by GOST R 34.11-2012 and
+ * described at https://tools.ietf.org/html/rfc6986
+ *
+ * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org>
+ * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <crypto/streebog.h>
+
+static const struct streebog_uint512 buffer0 = { {
+ 0, 0, 0, 0, 0, 0, 0, 0
+} };
+
+static const struct streebog_uint512 buffer512 = { {
+ cpu_to_le64(0x200), 0, 0, 0, 0, 0, 0, 0
+} };
+
+static const struct streebog_uint512 C[12] = {
+ { {
+ cpu_to_le64(0xdd806559f2a64507ULL),
+ cpu_to_le64(0x05767436cc744d23ULL),
+ cpu_to_le64(0xa2422a08a460d315ULL),
+ cpu_to_le64(0x4b7ce09192676901ULL),
+ cpu_to_le64(0x714eb88d7585c4fcULL),
+ cpu_to_le64(0x2f6a76432e45d016ULL),
+ cpu_to_le64(0xebcb2f81c0657c1fULL),
+ cpu_to_le64(0xb1085bda1ecadae9ULL)
+ } },
+ { {
+ cpu_to_le64(0xe679047021b19bb7ULL),
+ cpu_to_le64(0x55dda21bd7cbcd56ULL),
+ cpu_to_le64(0x5cb561c2db0aa7caULL),
+ cpu_to_le64(0x9ab5176b12d69958ULL),
+ cpu_to_le64(0x61d55e0f16b50131ULL),
+ cpu_to_le64(0xf3feea720a232b98ULL),
+ cpu_to_le64(0x4fe39d460f70b5d7ULL),
+ cpu_to_le64(0x6fa3b58aa99d2f1aULL)
+ } },
+ { {
+ cpu_to_le64(0x991e96f50aba0ab2ULL),
+ cpu_to_le64(0xc2b6f443867adb31ULL),
+ cpu_to_le64(0xc1c93a376062db09ULL),
+ cpu_to_le64(0xd3e20fe490359eb1ULL),
+ cpu_to_le64(0xf2ea7514b1297b7bULL),
+ cpu_to_le64(0x06f15e5f529c1f8bULL),
+ cpu_to_le64(0x0a39fc286a3d8435ULL),
+ cpu_to_le64(0xf574dcac2bce2fc7ULL)
+ } },
+ { {
+ cpu_to_le64(0x220cbebc84e3d12eULL),
+ cpu_to_le64(0x3453eaa193e837f1ULL),
+ cpu_to_le64(0xd8b71333935203beULL),
+ cpu_to_le64(0xa9d72c82ed03d675ULL),
+ cpu_to_le64(0x9d721cad685e353fULL),
+ cpu_to_le64(0x488e857e335c3c7dULL),
+ cpu_to_le64(0xf948e1a05d71e4ddULL),
+ cpu_to_le64(0xef1fdfb3e81566d2ULL)
+ } },
+ { {
+ cpu_to_le64(0x601758fd7c6cfe57ULL),
+ cpu_to_le64(0x7a56a27ea9ea63f5ULL),
+ cpu_to_le64(0xdfff00b723271a16ULL),
+ cpu_to_le64(0xbfcd1747253af5a3ULL),
+ cpu_to_le64(0x359e35d7800fffbdULL),
+ cpu_to_le64(0x7f151c1f1686104aULL),
+ cpu_to_le64(0x9a3f410c6ca92363ULL),
+ cpu_to_le64(0x4bea6bacad474799ULL)
+ } },
+ { {
+ cpu_to_le64(0xfa68407a46647d6eULL),
+ cpu_to_le64(0xbf71c57236904f35ULL),
+ cpu_to_le64(0x0af21f66c2bec6b6ULL),
+ cpu_to_le64(0xcffaa6b71c9ab7b4ULL),
+ cpu_to_le64(0x187f9ab49af08ec6ULL),
+ cpu_to_le64(0x2d66c4f95142a46cULL),
+ cpu_to_le64(0x6fa4c33b7a3039c0ULL),
+ cpu_to_le64(0xae4faeae1d3ad3d9ULL)
+ } },
+ { {
+ cpu_to_le64(0x8886564d3a14d493ULL),
+ cpu_to_le64(0x3517454ca23c4af3ULL),
+ cpu_to_le64(0x06476983284a0504ULL),
+ cpu_to_le64(0x0992abc52d822c37ULL),
+ cpu_to_le64(0xd3473e33197a93c9ULL),
+ cpu_to_le64(0x399ec6c7e6bf87c9ULL),
+ cpu_to_le64(0x51ac86febf240954ULL),
+ cpu_to_le64(0xf4c70e16eeaac5ecULL)
+ } },
+ { {
+ cpu_to_le64(0xa47f0dd4bf02e71eULL),
+ cpu_to_le64(0x36acc2355951a8d9ULL),
+ cpu_to_le64(0x69d18d2bd1a5c42fULL),
+ cpu_to_le64(0xf4892bcb929b0690ULL),
+ cpu_to_le64(0x89b4443b4ddbc49aULL),
+ cpu_to_le64(0x4eb7f8719c36de1eULL),
+ cpu_to_le64(0x03e7aa020c6e4141ULL),
+ cpu_to_le64(0x9b1f5b424d93c9a7ULL)
+ } },
+ { {
+ cpu_to_le64(0x7261445183235adbULL),
+ cpu_to_le64(0x0e38dc92cb1f2a60ULL),
+ cpu_to_le64(0x7b2b8a9aa6079c54ULL),
+ cpu_to_le64(0x800a440bdbb2ceb1ULL),
+ cpu_to_le64(0x3cd955b7e00d0984ULL),
+ cpu_to_le64(0x3a7d3a1b25894224ULL),
+ cpu_to_le64(0x944c9ad8ec165fdeULL),
+ cpu_to_le64(0x378f5a541631229bULL)
+ } },
+ { {
+ cpu_to_le64(0x74b4c7fb98459cedULL),
+ cpu_to_le64(0x3698fad1153bb6c3ULL),
+ cpu_to_le64(0x7a1e6c303b7652f4ULL),
+ cpu_to_le64(0x9fe76702af69334bULL),
+ cpu_to_le64(0x1fffe18a1b336103ULL),
+ cpu_to_le64(0x8941e71cff8a78dbULL),
+ cpu_to_le64(0x382ae548b2e4f3f3ULL),
+ cpu_to_le64(0xabbedea680056f52ULL)
+ } },
+ { {
+ cpu_to_le64(0x6bcaa4cd81f32d1bULL),
+ cpu_to_le64(0xdea2594ac06fd85dULL),
+ cpu_to_le64(0xefbacd1d7d476e98ULL),
+ cpu_to_le64(0x8a1d71efea48b9caULL),
+ cpu_to_le64(0x2001802114846679ULL),
+ cpu_to_le64(0xd8fa6bbbebab0761ULL),
+ cpu_to_le64(0x3002c6cd635afe94ULL),
+ cpu_to_le64(0x7bcd9ed0efc889fbULL)
+ } },
+ { {
+ cpu_to_le64(0x48bc924af11bd720ULL),
+ cpu_to_le64(0xfaf417d5d9b21b99ULL),
+ cpu_to_le64(0xe71da4aa88e12852ULL),
+ cpu_to_le64(0x5d80ef9d1891cc86ULL),
+ cpu_to_le64(0xf82012d430219f9bULL),
+ cpu_to_le64(0xcda43c32bcdf1d77ULL),
+ cpu_to_le64(0xd21380b00449b17aULL),
+ cpu_to_le64(0x378ee767f11631baULL)
+ } }
+};
+
+static const u8 Tau[64] = {
+ 0, 8, 16, 24, 32, 40, 48, 56,
+ 1, 9, 17, 25, 33, 41, 49, 57,
+ 2, 10, 18, 26, 34, 42, 50, 58,
+ 3, 11, 19, 27, 35, 43, 51, 59,
+ 4, 12, 20, 28, 36, 44, 52, 60,
+ 5, 13, 21, 29, 37, 45, 53, 61,
+ 6, 14, 22, 30, 38, 46, 54, 62,
+ 7, 15, 23, 31, 39, 47, 55, 63
+};
+
+static const u8 Pi[256] = {
+ 252, 238, 221, 17, 207, 110, 49, 22,
+ 251, 196, 250, 218, 35, 197, 4, 77,
+ 233, 119, 240, 219, 147, 46, 153, 186,
+ 23, 54, 241, 187, 20, 205, 95, 193,
+ 249, 24, 101, 90, 226, 92, 239, 33,
+ 129, 28, 60, 66, 139, 1, 142, 79,
+ 5, 132, 2, 174, 227, 106, 143, 160,
+ 6, 11, 237, 152, 127, 212, 211, 31,
+ 235, 52, 44, 81, 234, 200, 72, 171,
+ 242, 42, 104, 162, 253, 58, 206, 204,
+ 181, 112, 14, 86, 8, 12, 118, 18,
+ 191, 114, 19, 71, 156, 183, 93, 135,
+ 21, 161, 150, 41, 16, 123, 154, 199,
+ 243, 145, 120, 111, 157, 158, 178, 177,
+ 50, 117, 25, 61, 255, 53, 138, 126,
+ 109, 84, 198, 128, 195, 189, 13, 87,
+ 223, 245, 36, 169, 62, 168, 67, 201,
+ 215, 121, 214, 246, 124, 34, 185, 3,
+ 224, 15, 236, 222, 122, 148, 176, 188,
+ 220, 232, 40, 80, 78, 51, 10, 74,
+ 167, 151, 96, 115, 30, 0, 98, 68,
+ 26, 184, 56, 130, 100, 159, 38, 65,
+ 173, 69, 70, 146, 39, 94, 85, 47,
+ 140, 163, 165, 125, 105, 213, 149, 59,
+ 7, 88, 179, 64, 134, 172, 29, 247,
+ 48, 55, 107, 228, 136, 217, 231, 137,
+ 225, 27, 131, 73, 76, 63, 248, 254,
+ 141, 83, 170, 144, 202, 216, 133, 97,
+ 32, 113, 103, 164, 45, 43, 9, 91,
+ 203, 155, 37, 208, 190, 229, 108, 82,
+ 89, 166, 116, 210, 230, 244, 180, 192,
+ 209, 102, 175, 194, 57, 75, 99, 182
+};
+
+static const unsigned long long Ax[8][256] = {
+ {
+ 0xd01f715b5c7ef8e6ULL, 0x16fa240980778325ULL, 0xa8a42e857ee049c8ULL,
+ 0x6ac1068fa186465bULL, 0x6e417bd7a2e9320bULL, 0x665c8167a437daabULL,
+ 0x7666681aa89617f6ULL, 0x4b959163700bdcf5ULL, 0xf14be6b78df36248ULL,
+ 0xc585bd689a625cffULL, 0x9557d7fca67d82cbULL, 0x89f0b969af6dd366ULL,
+ 0xb0833d48749f6c35ULL, 0xa1998c23b1ecbc7cULL, 0x8d70c431ac02a736ULL,
+ 0xd6dfbc2fd0a8b69eULL, 0x37aeb3e551fa198bULL, 0x0b7d128a40b5cf9cULL,
+ 0x5a8f2008b5780cbcULL, 0xedec882284e333e5ULL, 0xd25fc177d3c7c2ceULL,
+ 0x5e0f5d50b61778ecULL, 0x1d873683c0c24cb9ULL, 0xad040bcbb45d208cULL,
+ 0x2f89a0285b853c76ULL, 0x5732fff6791b8d58ULL, 0x3e9311439ef6ec3fULL,
+ 0xc9183a809fd3c00fULL, 0x83adf3f5260a01eeULL, 0xa6791941f4e8ef10ULL,
+ 0x103ae97d0ca1cd5dULL, 0x2ce948121dee1b4aULL, 0x39738421dbf2bf53ULL,
+ 0x093da2a6cf0cf5b4ULL, 0xcd9847d89cbcb45fULL, 0xf9561c078b2d8ae8ULL,
+ 0x9c6a755a6971777fULL, 0xbc1ebaa0712ef0c5ULL, 0x72e61542abf963a6ULL,
+ 0x78bb5fde229eb12eULL, 0x14ba94250fceb90dULL, 0x844d6697630e5282ULL,
+ 0x98ea08026a1e032fULL, 0xf06bbea144217f5cULL, 0xdb6263d11ccb377aULL,
+ 0x641c314b2b8ee083ULL, 0x320e96ab9b4770cfULL, 0x1ee7deb986a96b85ULL,
+ 0xe96cf57a878c47b5ULL, 0xfdd6615f8842feb8ULL, 0xc83862965601dd1bULL,
+ 0x2ea9f83e92572162ULL, 0xf876441142ff97fcULL, 0xeb2c455608357d9dULL,
+ 0x5612a7e0b0c9904cULL, 0x6c01cbfb2d500823ULL, 0x4548a6a7fa037a2dULL,
+ 0xabc4c6bf388b6ef4ULL, 0xbade77d4fdf8bebdULL, 0x799b07c8eb4cac3aULL,
+ 0x0c9d87e805b19cf0ULL, 0xcb588aac106afa27ULL, 0xea0c1d40c1e76089ULL,
+ 0x2869354a1e816f1aULL, 0xff96d17307fbc490ULL, 0x9f0a9d602f1a5043ULL,
+ 0x96373fc6e016a5f7ULL, 0x5292dab8b3a6e41cULL, 0x9b8ae0382c752413ULL,
+ 0x4f15ec3b7364a8a5ULL, 0x3fb349555724f12bULL, 0xc7c50d4415db66d7ULL,
+ 0x92b7429ee379d1a7ULL, 0xd37f99611a15dfdaULL, 0x231427c05e34a086ULL,
+ 0xa439a96d7b51d538ULL, 0xb403401077f01865ULL, 0xdda2aea5901d7902ULL,
+ 0x0a5d4a9c8967d288ULL, 0xc265280adf660f93ULL, 0x8bb0094520d4e94eULL,
+ 0x2a29856691385532ULL, 0x42a833c5bf072941ULL, 0x73c64d54622b7eb2ULL,
+ 0x07e095624504536cULL, 0x8a905153e906f45aULL, 0x6f6123c16b3b2f1fULL,
+ 0xc6e55552dc097bc3ULL, 0x4468feb133d16739ULL, 0xe211e7f0c7398829ULL,
+ 0xa2f96419f7879b40ULL, 0x19074bdbc3ad38e9ULL, 0xf4ebc3f9474e0b0cULL,
+ 0x43886bd376d53455ULL, 0xd8028beb5aa01046ULL, 0x51f23282f5cdc320ULL,
+ 0xe7b1c2be0d84e16dULL, 0x081dfab006dee8a0ULL, 0x3b33340d544b857bULL,
+ 0x7f5bcabc679ae242ULL, 0x0edd37c48a08a6d8ULL, 0x81ed43d9a9b33bc6ULL,
+ 0xb1a3655ebd4d7121ULL, 0x69a1eeb5e7ed6167ULL, 0xf6ab73d5c8f73124ULL,
+ 0x1a67a3e185c61fd5ULL, 0x2dc91004d43c065eULL, 0x0240b02c8fb93a28ULL,
+ 0x90f7f2b26cc0eb8fULL, 0x3cd3a16f114fd617ULL, 0xaae49ea9f15973e0ULL,
+ 0x06c0cd748cd64e78ULL, 0xda423bc7d5192a6eULL, 0xc345701c16b41287ULL,
+ 0x6d2193ede4821537ULL, 0xfcf639494190e3acULL, 0x7c3b228621f1c57eULL,
+ 0xfb16ac2b0494b0c0ULL, 0xbf7e529a3745d7f9ULL, 0x6881b6a32e3f7c73ULL,
+ 0xca78d2bad9b8e733ULL, 0xbbfe2fc2342aa3a9ULL, 0x0dbddffecc6381e4ULL,
+ 0x70a6a56e2440598eULL, 0xe4d12a844befc651ULL, 0x8c509c2765d0ba22ULL,
+ 0xee8c6018c28814d9ULL, 0x17da7c1f49a59e31ULL, 0x609c4c1328e194d3ULL,
+ 0xb3e3d57232f44b09ULL, 0x91d7aaa4a512f69bULL, 0x0ffd6fd243dabbccULL,
+ 0x50d26a943c1fde34ULL, 0x6be15e9968545b4fULL, 0x94778fea6faf9fdfULL,
+ 0x2b09dd7058ea4826ULL, 0x677cd9716de5c7bfULL, 0x49d5214fffb2e6ddULL,
+ 0x0360e83a466b273cULL, 0x1fc786af4f7b7691ULL, 0xa0b9d435783ea168ULL,
+ 0xd49f0c035f118cb6ULL, 0x01205816c9d21d14ULL, 0xac2453dd7d8f3d98ULL,
+ 0x545217cc3f70aa64ULL, 0x26b4028e9489c9c2ULL, 0xdec2469fd6765e3eULL,
+ 0x04807d58036f7450ULL, 0xe5f17292823ddb45ULL, 0xf30b569b024a5860ULL,
+ 0x62dcfc3fa758aefbULL, 0xe84cad6c4e5e5aa1ULL, 0xccb81fce556ea94bULL,
+ 0x53b282ae7a74f908ULL, 0x1b47fbf74c1402c1ULL, 0x368eebf39828049fULL,
+ 0x7afbeff2ad278b06ULL, 0xbe5e0a8cfe97caedULL, 0xcfd8f7f413058e77ULL,
+ 0xf78b2bc301252c30ULL, 0x4d555c17fcdd928dULL, 0x5f2f05467fc565f8ULL,
+ 0x24f4b2a21b30f3eaULL, 0x860dd6bbecb768aaULL, 0x4c750401350f8f99ULL,
+ 0x0000000000000000ULL, 0xecccd0344d312ef1ULL, 0xb5231806be220571ULL,
+ 0xc105c030990d28afULL, 0x653c695de25cfd97ULL, 0x159acc33c61ca419ULL,
+ 0xb89ec7f872418495ULL, 0xa9847693b73254dcULL, 0x58cf90243ac13694ULL,
+ 0x59efc832f3132b80ULL, 0x5c4fed7c39ae42c4ULL, 0x828dabe3efd81cfaULL,
+ 0xd13f294d95ace5f2ULL, 0x7d1b7a90e823d86aULL, 0xb643f03cf849224dULL,
+ 0x3df3f979d89dcb03ULL, 0x7426d836272f2ddeULL, 0xdfe21e891fa4432aULL,
+ 0x3a136c1b9d99986fULL, 0xfa36f43dcd46add4ULL, 0xc025982650df35bbULL,
+ 0x856d3e81aadc4f96ULL, 0xc4a5e57e53b041ebULL, 0x4708168b75ba4005ULL,
+ 0xaf44bbe73be41aa4ULL, 0x971767d029c4b8e3ULL, 0xb9be9feebb939981ULL,
+ 0x215497ecd18d9aaeULL, 0x316e7e91dd2c57f3ULL, 0xcef8afe2dad79363ULL,
+ 0x3853dc371220a247ULL, 0x35ee03c9de4323a3ULL, 0xe6919aa8c456fc79ULL,
+ 0xe05157dc4880b201ULL, 0x7bdbb7e464f59612ULL, 0x127a59518318f775ULL,
+ 0x332ecebd52956ddbULL, 0x8f30741d23bb9d1eULL, 0xd922d3fd93720d52ULL,
+ 0x7746300c61440ae2ULL, 0x25d4eab4d2e2eefeULL, 0x75068020eefd30caULL,
+ 0x135a01474acaea61ULL, 0x304e268714fe4ae7ULL, 0xa519f17bb283c82cULL,
+ 0xdc82f6b359cf6416ULL, 0x5baf781e7caa11a8ULL, 0xb2c38d64fb26561dULL,
+ 0x34ce5bdf17913eb7ULL, 0x5d6fb56af07c5fd0ULL, 0x182713cd0a7f25fdULL,
+ 0x9e2ac576e6c84d57ULL, 0x9aaab82ee5a73907ULL, 0xa3d93c0f3e558654ULL,
+ 0x7e7b92aaae48ff56ULL, 0x872d8ead256575beULL, 0x41c8dbfff96c0e7dULL,
+ 0x99ca5014a3cc1e3bULL, 0x40e883e930be1369ULL, 0x1ca76e95091051adULL,
+ 0x4e35b42dbab6b5b1ULL, 0x05a0254ecabd6944ULL, 0xe1710fca8152af15ULL,
+ 0xf22b0e8dcb984574ULL, 0xb763a82a319b3f59ULL, 0x63fca4296e8ab3efULL,
+ 0x9d4a2d4ca0a36a6bULL, 0xe331bfe60eeb953dULL, 0xd5bf541596c391a2ULL,
+ 0xf5cb9bef8e9c1618ULL, 0x46284e9dbc685d11ULL, 0x2074cffa185f87baULL,
+ 0xbd3ee2b6b8fcedd1ULL, 0xae64e3f1f23607b0ULL, 0xfeb68965ce29d984ULL,
+ 0x55724fdaf6a2b770ULL, 0x29496d5cd753720eULL, 0xa75941573d3af204ULL,
+ 0x8e102c0bea69800aULL, 0x111ab16bc573d049ULL, 0xd7ffe439197aab8aULL,
+ 0xefac380e0b5a09cdULL, 0x48f579593660fbc9ULL, 0x22347fd697e6bd92ULL,
+ 0x61bc1405e13389c7ULL, 0x4ab5c975b9d9c1e1ULL, 0x80cd1bcf606126d2ULL,
+ 0x7186fd78ed92449aULL, 0x93971a882aabccb3ULL, 0x88d0e17f66bfce72ULL,
+ 0x27945a985d5bd4d6ULL
+ }, {
+ 0xde553f8c05a811c8ULL, 0x1906b59631b4f565ULL, 0x436e70d6b1964ff7ULL,
+ 0x36d343cb8b1e9d85ULL, 0x843dfacc858aab5aULL, 0xfdfc95c299bfc7f9ULL,
+ 0x0f634bdea1d51fa2ULL, 0x6d458b3b76efb3cdULL, 0x85c3f77cf8593f80ULL,
+ 0x3c91315fbe737cb2ULL, 0x2148b03366ace398ULL, 0x18f8b8264c6761bfULL,
+ 0xc830c1c495c9fb0fULL, 0x981a76102086a0aaULL, 0xaa16012142f35760ULL,
+ 0x35cc54060c763cf6ULL, 0x42907d66cc45db2dULL, 0x8203d44b965af4bcULL,
+ 0x3d6f3cefc3a0e868ULL, 0xbc73ff69d292bda7ULL, 0x8722ed0102e20a29ULL,
+ 0x8f8185e8cd34deb7ULL, 0x9b0561dda7ee01d9ULL, 0x5335a0193227fad6ULL,
+ 0xc9cecc74e81a6fd5ULL, 0x54f5832e5c2431eaULL, 0x99e47ba05d553470ULL,
+ 0xf7bee756acd226ceULL, 0x384e05a5571816fdULL, 0xd1367452a47d0e6aULL,
+ 0xf29fde1c386ad85bULL, 0x320c77316275f7caULL, 0xd0c879e2d9ae9ab0ULL,
+ 0xdb7406c69110ef5dULL, 0x45505e51a2461011ULL, 0xfc029872e46c5323ULL,
+ 0xfa3cb6f5f7bc0cc5ULL, 0x031f17cd8768a173ULL, 0xbd8df2d9af41297dULL,
+ 0x9d3b4f5ab43e5e3fULL, 0x4071671b36feee84ULL, 0x716207e7d3e3b83dULL,
+ 0x48d20ff2f9283a1aULL, 0x27769eb4757cbc7eULL, 0x5c56ebc793f2e574ULL,
+ 0xa48b474f9ef5dc18ULL, 0x52cbada94ff46e0cULL, 0x60c7da982d8199c6ULL,
+ 0x0e9d466edc068b78ULL, 0x4eec2175eaf865fcULL, 0x550b8e9e21f7a530ULL,
+ 0x6b7ba5bc653fec2bULL, 0x5eb7f1ba6949d0ddULL, 0x57ea94e3db4c9099ULL,
+ 0xf640eae6d101b214ULL, 0xdd4a284182c0b0bbULL, 0xff1d8fbf6304f250ULL,
+ 0xb8accb933bf9d7e8ULL, 0xe8867c478eb68c4dULL, 0x3f8e2692391bddc1ULL,
+ 0xcb2fd60912a15a7cULL, 0xaec935dbab983d2fULL, 0xf55ffd2b56691367ULL,
+ 0x80e2ce366ce1c115ULL, 0x179bf3f8edb27e1dULL, 0x01fe0db07dd394daULL,
+ 0xda8a0b76ecc37b87ULL, 0x44ae53e1df9584cbULL, 0xb310b4b77347a205ULL,
+ 0xdfab323c787b8512ULL, 0x3b511268d070b78eULL, 0x65e6e3d2b9396753ULL,
+ 0x6864b271e2574d58ULL, 0x259784c98fc789d7ULL, 0x02e11a7dfabb35a9ULL,
+ 0x8841a6dfa337158bULL, 0x7ade78c39b5dcdd0ULL, 0xb7cf804d9a2cc84aULL,
+ 0x20b6bd831b7f7742ULL, 0x75bd331d3a88d272ULL, 0x418f6aab4b2d7a5eULL,
+ 0xd9951cbb6babdaf4ULL, 0xb6318dfde7ff5c90ULL, 0x1f389b112264aa83ULL,
+ 0x492c024284fbaec0ULL, 0xe33a0363c608f9a0ULL, 0x2688930408af28a4ULL,
+ 0xc7538a1a341ce4adULL, 0x5da8e677ee2171aeULL, 0x8c9e92254a5c7fc4ULL,
+ 0x63d8cd55aae938b5ULL, 0x29ebd8daa97a3706ULL, 0x959827b37be88aa1ULL,
+ 0x1484e4356adadf6eULL, 0xa7945082199d7d6bULL, 0xbf6ce8a455fa1cd4ULL,
+ 0x9cc542eac9edcae5ULL, 0x79c16f0e1c356ca3ULL, 0x89bfab6fdee48151ULL,
+ 0xd4174d1830c5f0ffULL, 0x9258048415eb419dULL, 0x6139d72850520d1cULL,
+ 0x6a85a80c18ec78f1ULL, 0xcd11f88e0171059aULL, 0xcceff53e7ca29140ULL,
+ 0xd229639f2315af19ULL, 0x90b91ef9ef507434ULL, 0x5977d28d074a1be1ULL,
+ 0x311360fce51d56b9ULL, 0xc093a92d5a1f2f91ULL, 0x1a19a25bb6dc5416ULL,
+ 0xeb996b8a09de2d3eULL, 0xfee3820f1ed7668aULL, 0xd7085ad5b7ad518cULL,
+ 0x7fff41890fe53345ULL, 0xec5948bd67dde602ULL, 0x2fd5f65dbaaa68e0ULL,
+ 0xa5754affe32648c2ULL, 0xf8ddac880d07396cULL, 0x6fa491468c548664ULL,
+ 0x0c7c5c1326bdbed1ULL, 0x4a33158f03930fb3ULL, 0x699abfc19f84d982ULL,
+ 0xe4fa2054a80b329cULL, 0x6707f9af438252faULL, 0x08a368e9cfd6d49eULL,
+ 0x47b1442c58fd25b8ULL, 0xbbb3dc5ebc91769bULL, 0x1665fe489061eac7ULL,
+ 0x33f27a811fa66310ULL, 0x93a609346838d547ULL, 0x30ed6d4c98cec263ULL,
+ 0x1dd9816cd8df9f2aULL, 0x94662a03063b1e7bULL, 0x83fdd9fbeb896066ULL,
+ 0x7b207573e68e590aULL, 0x5f49fc0a149a4407ULL, 0x343259b671a5a82cULL,
+ 0xfbc2bb458a6f981fULL, 0xc272b350a0a41a38ULL, 0x3aaf1fd8ada32354ULL,
+ 0x6cbb868b0b3c2717ULL, 0xa2b569c88d2583feULL, 0xf180c9d1bf027928ULL,
+ 0xaf37386bd64ba9f5ULL, 0x12bacab2790a8088ULL, 0x4c0d3b0810435055ULL,
+ 0xb2eeb9070e9436dfULL, 0xc5b29067cea7d104ULL, 0xdcb425f1ff132461ULL,
+ 0x4f122cc5972bf126ULL, 0xac282fa651230886ULL, 0xe7e537992f6393efULL,
+ 0xe61b3a2952b00735ULL, 0x709c0a57ae302ce7ULL, 0xe02514ae416058d3ULL,
+ 0xc44c9dd7b37445deULL, 0x5a68c5408022ba92ULL, 0x1c278cdca50c0bf0ULL,
+ 0x6e5a9cf6f18712beULL, 0x86dce0b17f319ef3ULL, 0x2d34ec2040115d49ULL,
+ 0x4bcd183f7e409b69ULL, 0x2815d56ad4a9a3dcULL, 0x24698979f2141d0dULL,
+ 0x0000000000000000ULL, 0x1ec696a15fb73e59ULL, 0xd86b110b16784e2eULL,
+ 0x8e7f8858b0e74a6dULL, 0x063e2e8713d05fe6ULL, 0xe2c40ed3bbdb6d7aULL,
+ 0xb1f1aeca89fc97acULL, 0xe1db191e3cb3cc09ULL, 0x6418ee62c4eaf389ULL,
+ 0xc6ad87aa49cf7077ULL, 0xd6f65765ca7ec556ULL, 0x9afb6c6dda3d9503ULL,
+ 0x7ce05644888d9236ULL, 0x8d609f95378feb1eULL, 0x23a9aa4e9c17d631ULL,
+ 0x6226c0e5d73aac6fULL, 0x56149953a69f0443ULL, 0xeeb852c09d66d3abULL,
+ 0x2b0ac2a753c102afULL, 0x07c023376e03cb3cULL, 0x2ccae1903dc2c993ULL,
+ 0xd3d76e2f5ec63bc3ULL, 0x9e2458973356ff4cULL, 0xa66a5d32644ee9b1ULL,
+ 0x0a427294356de137ULL, 0x783f62be61e6f879ULL, 0x1344c70204d91452ULL,
+ 0x5b96c8f0fdf12e48ULL, 0xa90916ecc59bf613ULL, 0xbe92e5142829880eULL,
+ 0x727d102a548b194eULL, 0x1be7afebcb0fc0ccULL, 0x3e702b2244c8491bULL,
+ 0xd5e940a84d166425ULL, 0x66f9f41f3e51c620ULL, 0xabe80c913f20c3baULL,
+ 0xf07ec461c2d1edf2ULL, 0xf361d3ac45b94c81ULL, 0x0521394a94b8fe95ULL,
+ 0xadd622162cf09c5cULL, 0xe97871f7f3651897ULL, 0xf4a1f09b2bba87bdULL,
+ 0x095d6559b2054044ULL, 0x0bbc7f2448be75edULL, 0x2af4cf172e129675ULL,
+ 0x157ae98517094bb4ULL, 0x9fda55274e856b96ULL, 0x914713499283e0eeULL,
+ 0xb952c623462a4332ULL, 0x74433ead475b46a8ULL, 0x8b5eb112245fb4f8ULL,
+ 0xa34b6478f0f61724ULL, 0x11a5dd7ffe6221fbULL, 0xc16da49d27ccbb4bULL,
+ 0x76a224d0bde07301ULL, 0x8aa0bca2598c2022ULL, 0x4df336b86d90c48fULL,
+ 0xea67663a740db9e4ULL, 0xef465f70e0b54771ULL, 0x39b008152acb8227ULL,
+ 0x7d1e5bf4f55e06ecULL, 0x105bd0cf83b1b521ULL, 0x775c2960c033e7dbULL,
+ 0x7e014c397236a79fULL, 0x811cc386113255cfULL, 0xeda7450d1a0e72d8ULL,
+ 0x5889df3d7a998f3bULL, 0x2e2bfbedc779fc3aULL, 0xce0eef438619a4e9ULL,
+ 0x372d4e7bf6cd095fULL, 0x04df34fae96b6a4fULL, 0xf923a13870d4adb6ULL,
+ 0xa1aa7e050a4d228dULL, 0xa8f71b5cb84862c9ULL, 0xb52e9a306097fde3ULL,
+ 0x0d8251a35b6e2a0bULL, 0x2257a7fee1c442ebULL, 0x73831d9a29588d94ULL,
+ 0x51d4ba64c89ccf7fULL, 0x502ab7d4b54f5ba5ULL, 0x97793dce8153bf08ULL,
+ 0xe5042de4d5d8a646ULL, 0x9687307efc802bd2ULL, 0xa05473b5779eb657ULL,
+ 0xb4d097801d446939ULL, 0xcff0e2f3fbca3033ULL, 0xc38cbee0dd778ee2ULL,
+ 0x464f499c252eb162ULL, 0xcad1dbb96f72cea6ULL, 0xba4dd1eec142e241ULL,
+ 0xb00fa37af42f0376ULL
+ }, {
+ 0xcce4cd3aa968b245ULL, 0x089d5484e80b7fafULL, 0x638246c1b3548304ULL,
+ 0xd2fe0ec8c2355492ULL, 0xa7fbdf7ff2374eeeULL, 0x4df1600c92337a16ULL,
+ 0x84e503ea523b12fbULL, 0x0790bbfd53ab0c4aULL, 0x198a780f38f6ea9dULL,
+ 0x2ab30c8f55ec48cbULL, 0xe0f7fed6b2c49db5ULL, 0xb6ecf3f422cadbdcULL,
+ 0x409c9a541358df11ULL, 0xd3ce8a56dfde3fe3ULL, 0xc3e9224312c8c1a0ULL,
+ 0x0d6dfa58816ba507ULL, 0xddf3e1b179952777ULL, 0x04c02a42748bb1d9ULL,
+ 0x94c2abff9f2decb8ULL, 0x4f91752da8f8acf4ULL, 0x78682befb169bf7bULL,
+ 0xe1c77a48af2ff6c4ULL, 0x0c5d7ec69c80ce76ULL, 0x4cc1e4928fd81167ULL,
+ 0xfeed3d24d9997b62ULL, 0x518bb6dfc3a54a23ULL, 0x6dbf2d26151f9b90ULL,
+ 0xb5bc624b05ea664fULL, 0xe86aaa525acfe21aULL, 0x4801ced0fb53a0beULL,
+ 0xc91463e6c00868edULL, 0x1027a815cd16fe43ULL, 0xf67069a0319204cdULL,
+ 0xb04ccc976c8abce7ULL, 0xc0b9b3fc35e87c33ULL, 0xf380c77c58f2de65ULL,
+ 0x50bb3241de4e2152ULL, 0xdf93f490435ef195ULL, 0xf1e0d25d62390887ULL,
+ 0xaf668bfb1a3c3141ULL, 0xbc11b251f00a7291ULL, 0x73a5eed47e427d47ULL,
+ 0x25bee3f6ee4c3b2eULL, 0x43cc0beb34786282ULL, 0xc824e778dde3039cULL,
+ 0xf97d86d98a327728ULL, 0xf2b043e24519b514ULL, 0xe297ebf7880f4b57ULL,
+ 0x3a94a49a98fab688ULL, 0x868516cb68f0c419ULL, 0xeffa11af0964ee50ULL,
+ 0xa4ab4ec0d517f37dULL, 0xa9c6b498547c567aULL, 0x8e18424f80fbbbb6ULL,
+ 0x0bcdc53bcf2bc23cULL, 0x137739aaea3643d0ULL, 0x2c1333ec1bac2ff0ULL,
+ 0x8d48d3f0a7db0625ULL, 0x1e1ac3f26b5de6d7ULL, 0xf520f81f16b2b95eULL,
+ 0x9f0f6ec450062e84ULL, 0x0130849e1deb6b71ULL, 0xd45e31ab8c7533a9ULL,
+ 0x652279a2fd14e43fULL, 0x3209f01e70f1c927ULL, 0xbe71a770cac1a473ULL,
+ 0x0e3d6be7a64b1894ULL, 0x7ec8148cff29d840ULL, 0xcb7476c7fac3be0fULL,
+ 0x72956a4a63a91636ULL, 0x37f95ec21991138fULL, 0x9e3fea5a4ded45f5ULL,
+ 0x7b38ba50964902e8ULL, 0x222e580bbde73764ULL, 0x61e253e0899f55e6ULL,
+ 0xfc8d2805e352ad80ULL, 0x35994be3235ac56dULL, 0x09add01af5e014deULL,
+ 0x5e8659a6780539c6ULL, 0xb17c48097161d796ULL, 0x026015213acbd6e2ULL,
+ 0xd1ae9f77e515e901ULL, 0xb7dc776a3f21b0adULL, 0xaba6a1b96eb78098ULL,
+ 0x9bcf4486248d9f5dULL, 0x582666c536455efdULL, 0xfdbdac9bfeb9c6f1ULL,
+ 0xc47999be4163cdeaULL, 0x765540081722a7efULL, 0x3e548ed8ec710751ULL,
+ 0x3d041f67cb51bac2ULL, 0x7958af71ac82d40aULL, 0x36c9da5c047a78feULL,
+ 0xed9a048e33af38b2ULL, 0x26ee7249c96c86bdULL, 0x900281bdeba65d61ULL,
+ 0x11172c8bd0fd9532ULL, 0xea0abf73600434f8ULL, 0x42fc8f75299309f3ULL,
+ 0x34a9cf7d3eb1ae1cULL, 0x2b838811480723baULL, 0x5ce64c8742ceef24ULL,
+ 0x1adae9b01fd6570eULL, 0x3c349bf9d6bad1b3ULL, 0x82453c891c7b75c0ULL,
+ 0x97923a40b80d512bULL, 0x4a61dbf1c198765cULL, 0xb48ce6d518010d3eULL,
+ 0xcfb45c858e480fd6ULL, 0xd933cbf30d1e96aeULL, 0xd70ea014ab558e3aULL,
+ 0xc189376228031742ULL, 0x9262949cd16d8b83ULL, 0xeb3a3bed7def5f89ULL,
+ 0x49314a4ee6b8cbcfULL, 0xdcc3652f647e4c06ULL, 0xda635a4c2a3e2b3dULL,
+ 0x470c21a940f3d35bULL, 0x315961a157d174b4ULL, 0x6672e81dda3459acULL,
+ 0x5b76f77a1165e36eULL, 0x445cb01667d36ec8ULL, 0xc5491d205c88a69bULL,
+ 0x456c34887a3805b9ULL, 0xffddb9bac4721013ULL, 0x99af51a71e4649bfULL,
+ 0xa15be01cbc7729d5ULL, 0x52db2760e485f7b0ULL, 0x8c78576eba306d54ULL,
+ 0xae560f6507d75a30ULL, 0x95f22f6182c687c9ULL, 0x71c5fbf54489aba5ULL,
+ 0xca44f259e728d57eULL, 0x88b87d2ccebbdc8dULL, 0xbab18d32be4a15aaULL,
+ 0x8be8ec93e99b611eULL, 0x17b713e89ebdf209ULL, 0xb31c5d284baa0174ULL,
+ 0xeeca9531148f8521ULL, 0xb8d198138481c348ULL, 0x8988f9b2d350b7fcULL,
+ 0xb9e11c8d996aa839ULL, 0x5a4673e40c8e881fULL, 0x1687977683569978ULL,
+ 0xbf4123eed72acf02ULL, 0x4ea1f1b3b513c785ULL, 0xe767452be16f91ffULL,
+ 0x7505d1b730021a7cULL, 0xa59bca5ec8fc980cULL, 0xad069eda20f7e7a3ULL,
+ 0x38f4b1bba231606aULL, 0x60d2d77e94743e97ULL, 0x9affc0183966f42cULL,
+ 0x248e6768f3a7505fULL, 0xcdd449a4b483d934ULL, 0x87b59255751baf68ULL,
+ 0x1bea6d2e023d3c7fULL, 0x6b1f12455b5ffcabULL, 0x743555292de9710dULL,
+ 0xd8034f6d10f5fddfULL, 0xc6198c9f7ba81b08ULL, 0xbb8109aca3a17edbULL,
+ 0xfa2d1766ad12cabbULL, 0xc729080166437079ULL, 0x9c5fff7b77269317ULL,
+ 0x0000000000000000ULL, 0x15d706c9a47624ebULL, 0x6fdf38072fd44d72ULL,
+ 0x5fb6dd3865ee52b7ULL, 0xa33bf53d86bcff37ULL, 0xe657c1b5fc84fa8eULL,
+ 0xaa962527735cebe9ULL, 0x39c43525bfda0b1bULL, 0x204e4d2a872ce186ULL,
+ 0x7a083ece8ba26999ULL, 0x554b9c9db72efbfaULL, 0xb22cd9b656416a05ULL,
+ 0x96a2bedea5e63a5aULL, 0x802529a826b0a322ULL, 0x8115ad363b5bc853ULL,
+ 0x8375b81701901eb1ULL, 0x3069e53f4a3a1fc5ULL, 0xbd2136cfede119e0ULL,
+ 0x18bafc91251d81ecULL, 0x1d4a524d4c7d5b44ULL, 0x05f0aedc6960daa8ULL,
+ 0x29e39d3072ccf558ULL, 0x70f57f6b5962c0d4ULL, 0x989fd53903ad22ceULL,
+ 0xf84d024797d91c59ULL, 0x547b1803aac5908bULL, 0xf0d056c37fd263f6ULL,
+ 0xd56eb535919e58d8ULL, 0x1c7ad6d351963035ULL, 0x2e7326cd2167f912ULL,
+ 0xac361a443d1c8cd2ULL, 0x697f076461942a49ULL, 0x4b515f6fdc731d2dULL,
+ 0x8ad8680df4700a6fULL, 0x41ac1eca0eb3b460ULL, 0x7d988533d80965d3ULL,
+ 0xa8f6300649973d0bULL, 0x7765c4960ac9cc9eULL, 0x7ca801adc5e20ea2ULL,
+ 0xdea3700e5eb59ae4ULL, 0xa06b6482a19c42a4ULL, 0x6a2f96db46b497daULL,
+ 0x27def6d7d487edccULL, 0x463ca5375d18b82aULL, 0xa6cb5be1efdc259fULL,
+ 0x53eba3fef96e9cc1ULL, 0xce84d81b93a364a7ULL, 0xf4107c810b59d22fULL,
+ 0x333974806d1aa256ULL, 0x0f0def79bba073e5ULL, 0x231edc95a00c5c15ULL,
+ 0xe437d494c64f2c6cULL, 0x91320523f64d3610ULL, 0x67426c83c7df32ddULL,
+ 0x6eefbc99323f2603ULL, 0x9d6f7be56acdf866ULL, 0x5916e25b2bae358cULL,
+ 0x7ff89012e2c2b331ULL, 0x035091bf2720bd93ULL, 0x561b0d22900e4669ULL,
+ 0x28d319ae6f279e29ULL, 0x2f43a2533c8c9263ULL, 0xd09e1be9f8fe8270ULL,
+ 0xf740ed3e2c796fbcULL, 0xdb53ded237d5404cULL, 0x62b2c25faebfe875ULL,
+ 0x0afd41a5d2c0a94dULL, 0x6412fd3ce0ff8f4eULL, 0xe3a76f6995e42026ULL,
+ 0x6c8fa9b808f4f0e1ULL, 0xc2d9a6dd0f23aad1ULL, 0x8f28c6d19d10d0c7ULL,
+ 0x85d587744fd0798aULL, 0xa20b71a39b579446ULL, 0x684f83fa7c7f4138ULL,
+ 0xe507500adba4471dULL, 0x3f640a46f19a6c20ULL, 0x1247bd34f7dd28a1ULL,
+ 0x2d23b77206474481ULL, 0x93521002cc86e0f2ULL, 0x572b89bc8de52d18ULL,
+ 0xfb1d93f8b0f9a1caULL, 0xe95a2ecc4724896bULL, 0x3ba420048511ddf9ULL,
+ 0xd63e248ab6bee54bULL, 0x5dd6c8195f258455ULL, 0x06a03f634e40673bULL,
+ 0x1f2a476c76b68da6ULL, 0x217ec9b49ac78af7ULL, 0xecaa80102e4453c3ULL,
+ 0x14e78257b99d4f9aULL
+ }, {
+ 0x20329b2cc87bba05ULL, 0x4f5eb6f86546a531ULL, 0xd4f44775f751b6b1ULL,
+ 0x8266a47b850dfa8bULL, 0xbb986aa15a6ca985ULL, 0xc979eb08f9ae0f99ULL,
+ 0x2da6f447a2375ea1ULL, 0x1e74275dcd7d8576ULL, 0xbc20180a800bc5f8ULL,
+ 0xb4a2f701b2dc65beULL, 0xe726946f981b6d66ULL, 0x48e6c453bf21c94cULL,
+ 0x42cad9930f0a4195ULL, 0xefa47b64aacccd20ULL, 0x71180a8960409a42ULL,
+ 0x8bb3329bf6a44e0cULL, 0xd34c35de2d36daccULL, 0xa92f5b7cbc23dc96ULL,
+ 0xb31a85aa68bb09c3ULL, 0x13e04836a73161d2ULL, 0xb24dfc4129c51d02ULL,
+ 0x8ae44b70b7da5acdULL, 0xe671ed84d96579a7ULL, 0xa4bb3417d66f3832ULL,
+ 0x4572ab38d56d2de8ULL, 0xb1b47761ea47215cULL, 0xe81c09cf70aba15dULL,
+ 0xffbdb872ce7f90acULL, 0xa8782297fd5dc857ULL, 0x0d946f6b6a4ce4a4ULL,
+ 0xe4df1f4f5b995138ULL, 0x9ebc71edca8c5762ULL, 0x0a2c1dc0b02b88d9ULL,
+ 0x3b503c115d9d7b91ULL, 0xc64376a8111ec3a2ULL, 0xcec199a323c963e4ULL,
+ 0xdc76a87ec58616f7ULL, 0x09d596e073a9b487ULL, 0x14583a9d7d560dafULL,
+ 0xf4c6dc593f2a0cb4ULL, 0xdd21d19584f80236ULL, 0x4a4836983ddde1d3ULL,
+ 0xe58866a41ae745f9ULL, 0xf591a5b27e541875ULL, 0x891dc05074586693ULL,
+ 0x5b068c651810a89eULL, 0xa30346bc0c08544fULL, 0x3dbf3751c684032dULL,
+ 0x2a1e86ec785032dcULL, 0xf73f5779fca830eaULL, 0xb60c05ca30204d21ULL,
+ 0x0cc316802b32f065ULL, 0x8770241bdd96be69ULL, 0xb861e18199ee95dbULL,
+ 0xf805cad91418fcd1ULL, 0x29e70dccbbd20e82ULL, 0xc7140f435060d763ULL,
+ 0x0f3a9da0e8b0cc3bULL, 0xa2543f574d76408eULL, 0xbd7761e1c175d139ULL,
+ 0x4b1f4f737ca3f512ULL, 0x6dc2df1f2fc137abULL, 0xf1d05c3967b14856ULL,
+ 0xa742bf3715ed046cULL, 0x654030141d1697edULL, 0x07b872abda676c7dULL,
+ 0x3ce84eba87fa17ecULL, 0xc1fb0403cb79afdfULL, 0x3e46bc7105063f73ULL,
+ 0x278ae987121cd678ULL, 0xa1adb4778ef47cd0ULL, 0x26dd906c5362c2b9ULL,
+ 0x05168060589b44e2ULL, 0xfbfc41f9d79ac08fULL, 0x0e6de44ba9ced8faULL,
+ 0x9feb08068bf243a3ULL, 0x7b341749d06b129bULL, 0x229c69e74a87929aULL,
+ 0xe09ee6c4427c011bULL, 0x5692e30e725c4c3aULL, 0xda99a33e5e9f6e4bULL,
+ 0x353dd85af453a36bULL, 0x25241b4c90e0fee7ULL, 0x5de987258309d022ULL,
+ 0xe230140fc0802984ULL, 0x93281e86a0c0b3c6ULL, 0xf229d719a4337408ULL,
+ 0x6f6c2dd4ad3d1f34ULL, 0x8ea5b2fbae3f0aeeULL, 0x8331dd90c473ee4aULL,
+ 0x346aa1b1b52db7aaULL, 0xdf8f235e06042aa9ULL, 0xcc6f6b68a1354b7bULL,
+ 0x6c95a6f46ebf236aULL, 0x52d31a856bb91c19ULL, 0x1a35ded6d498d555ULL,
+ 0xf37eaef2e54d60c9ULL, 0x72e181a9a3c2a61cULL, 0x98537aad51952fdeULL,
+ 0x16f6c856ffaa2530ULL, 0xd960281e9d1d5215ULL, 0x3a0745fa1ce36f50ULL,
+ 0x0b7b642bf1559c18ULL, 0x59a87eae9aec8001ULL, 0x5e100c05408bec7cULL,
+ 0x0441f98b19e55023ULL, 0xd70dcc5534d38aefULL, 0x927f676de1bea707ULL,
+ 0x9769e70db925e3e5ULL, 0x7a636ea29115065aULL, 0x468b201816ef11b6ULL,
+ 0xab81a9b73edff409ULL, 0xc0ac7de88a07bb1eULL, 0x1f235eb68c0391b7ULL,
+ 0x6056b074458dd30fULL, 0xbe8eeac102f7ed67ULL, 0xcd381283e04b5fbaULL,
+ 0x5cbefecec277c4e3ULL, 0xd21b4c356c48ce0dULL, 0x1019c31664b35d8cULL,
+ 0x247362a7d19eea26ULL, 0xebe582efb3299d03ULL, 0x02aef2cb82fc289fULL,
+ 0x86275df09ce8aaa8ULL, 0x28b07427faac1a43ULL, 0x38a9b7319e1f47cfULL,
+ 0xc82e92e3b8d01b58ULL, 0x06ef0b409b1978bcULL, 0x62f842bfc771fb90ULL,
+ 0x9904034610eb3b1fULL, 0xded85ab5477a3e68ULL, 0x90d195a663428f98ULL,
+ 0x5384636e2ac708d8ULL, 0xcbd719c37b522706ULL, 0xae9729d76644b0ebULL,
+ 0x7c8c65e20a0c7ee6ULL, 0x80c856b007f1d214ULL, 0x8c0b40302cc32271ULL,
+ 0xdbcedad51fe17a8aULL, 0x740e8ae938dbdea0ULL, 0xa615c6dc549310adULL,
+ 0x19cc55f6171ae90bULL, 0x49b1bdb8fe5fdd8dULL, 0xed0a89af2830e5bfULL,
+ 0x6a7aadb4f5a65bd6ULL, 0x7e22972988f05679ULL, 0xf952b3325566e810ULL,
+ 0x39fecedadf61530eULL, 0x6101c99f04f3c7ceULL, 0x2e5f7f6761b562ffULL,
+ 0xf08725d226cf5c97ULL, 0x63af3b54860fef51ULL, 0x8ff2cb10ef411e2fULL,
+ 0x884ab9bb35267252ULL, 0x4df04433e7ba8daeULL, 0x9afd8866d3690741ULL,
+ 0x66b9bb34de94abb3ULL, 0x9baaf18d92171380ULL, 0x543c11c5f0a064a5ULL,
+ 0x17a1b1bdbed431f1ULL, 0xb5f58eeaf3a2717fULL, 0xc355f6c849858740ULL,
+ 0xec5df044694ef17eULL, 0xd83751f5dc6346d4ULL, 0xfc4433520dfdacf2ULL,
+ 0x0000000000000000ULL, 0x5a51f58e596ebc5fULL, 0x3285aaf12e34cf16ULL,
+ 0x8d5c39db6dbd36b0ULL, 0x12b731dde64f7513ULL, 0x94906c2d7aa7dfbbULL,
+ 0x302b583aacc8e789ULL, 0x9d45facd090e6b3cULL, 0x2165e2c78905aec4ULL,
+ 0x68d45f7f775a7349ULL, 0x189b2c1d5664fdcaULL, 0xe1c99f2f030215daULL,
+ 0x6983269436246788ULL, 0x8489af3b1e148237ULL, 0xe94b702431d5b59cULL,
+ 0x33d2d31a6f4adbd7ULL, 0xbfd9932a4389f9a6ULL, 0xb0e30e8aab39359dULL,
+ 0xd1e2c715afcaf253ULL, 0x150f43763c28196eULL, 0xc4ed846393e2eb3dULL,
+ 0x03f98b20c3823c5eULL, 0xfd134ab94c83b833ULL, 0x556b682eb1de7064ULL,
+ 0x36c4537a37d19f35ULL, 0x7559f30279a5ca61ULL, 0x799ae58252973a04ULL,
+ 0x9c12832648707ffdULL, 0x78cd9c6913e92ec5ULL, 0x1d8dac7d0effb928ULL,
+ 0x439da0784e745554ULL, 0x413352b3cc887dcbULL, 0xbacf134a1b12bd44ULL,
+ 0x114ebafd25cd494dULL, 0x2f08068c20cb763eULL, 0x76a07822ba27f63fULL,
+ 0xeab2fb04f25789c2ULL, 0xe3676de481fe3d45ULL, 0x1b62a73d95e6c194ULL,
+ 0x641749ff5c68832cULL, 0xa5ec4dfc97112cf3ULL, 0xf6682e92bdd6242bULL,
+ 0x3f11c59a44782bb2ULL, 0x317c21d1edb6f348ULL, 0xd65ab5be75ad9e2eULL,
+ 0x6b2dd45fb4d84f17ULL, 0xfaab381296e4d44eULL, 0xd0b5befeeeb4e692ULL,
+ 0x0882ef0b32d7a046ULL, 0x512a91a5a83b2047ULL, 0x963e9ee6f85bf724ULL,
+ 0x4e09cf132438b1f0ULL, 0x77f701c9fb59e2feULL, 0x7ddb1c094b726a27ULL,
+ 0x5f4775ee01f5f8bdULL, 0x9186ec4d223c9b59ULL, 0xfeeac1998f01846dULL,
+ 0xac39db1ce4b89874ULL, 0xb75b7c21715e59e0ULL, 0xafc0503c273aa42aULL,
+ 0x6e3b543fec430bf5ULL, 0x704f7362213e8e83ULL, 0x58ff0745db9294c0ULL,
+ 0x67eec2df9feabf72ULL, 0xa0facd9ccf8a6811ULL, 0xb936986ad890811aULL,
+ 0x95c715c63bd9cb7aULL, 0xca8060283a2c33c7ULL, 0x507de84ee9453486ULL,
+ 0x85ded6d05f6a96f6ULL, 0x1cdad5964f81ade9ULL, 0xd5a33e9eb62fa270ULL,
+ 0x40642b588df6690aULL, 0x7f75eec2c98e42b8ULL, 0x2cf18dace3494a60ULL,
+ 0x23cb100c0bf9865bULL, 0xeef3028febb2d9e1ULL, 0x4425d2d394133929ULL,
+ 0xaad6d05c7fa1e0c8ULL, 0xad6ea2f7a5c68cb5ULL, 0xc2028f2308fb9381ULL,
+ 0x819f2f5b468fc6d5ULL, 0xc5bafd88d29cfffcULL, 0x47dc59f357910577ULL,
+ 0x2b49ff07392e261dULL, 0x57c59ae5332258fbULL, 0x73b6f842e2bcb2ddULL,
+ 0xcf96e04862b77725ULL, 0x4ca73dd8a6c4996fULL, 0x015779eb417e14c1ULL,
+ 0x37932a9176af8bf4ULL
+ }, {
+ 0x190a2c9b249df23eULL, 0x2f62f8b62263e1e9ULL, 0x7a7f754740993655ULL,
+ 0x330b7ba4d5564d9fULL, 0x4c17a16a46672582ULL, 0xb22f08eb7d05f5b8ULL,
+ 0x535f47f40bc148ccULL, 0x3aec5d27d4883037ULL, 0x10ed0a1825438f96ULL,
+ 0x516101f72c233d17ULL, 0x13cc6f949fd04eaeULL, 0x739853c441474bfdULL,
+ 0x653793d90d3f5b1bULL, 0x5240647b96b0fc2fULL, 0x0c84890ad27623e0ULL,
+ 0xd7189b32703aaea3ULL, 0x2685de3523bd9c41ULL, 0x99317c5b11bffefaULL,
+ 0x0d9baa854f079703ULL, 0x70b93648fbd48ac5ULL, 0xa80441fce30bc6beULL,
+ 0x7287704bdc36ff1eULL, 0xb65384ed33dc1f13ULL, 0xd36417343ee34408ULL,
+ 0x39cd38ab6e1bf10fULL, 0x5ab861770a1f3564ULL, 0x0ebacf09f594563bULL,
+ 0xd04572b884708530ULL, 0x3cae9722bdb3af47ULL, 0x4a556b6f2f5cbaf2ULL,
+ 0xe1704f1f76c4bd74ULL, 0x5ec4ed7144c6dfcfULL, 0x16afc01d4c7810e6ULL,
+ 0x283f113cd629ca7aULL, 0xaf59a8761741ed2dULL, 0xeed5a3991e215facULL,
+ 0x3bf37ea849f984d4ULL, 0xe413e096a56ce33cULL, 0x2c439d3a98f020d1ULL,
+ 0x637559dc6404c46bULL, 0x9e6c95d1e5f5d569ULL, 0x24bb9836045fe99aULL,
+ 0x44efa466dac8ecc9ULL, 0xc6eab2a5c80895d6ULL, 0x803b50c035220cc4ULL,
+ 0x0321658cba93c138ULL, 0x8f9ebc465dc7ee1cULL, 0xd15a5137190131d3ULL,
+ 0x0fa5ec8668e5e2d8ULL, 0x91c979578d1037b1ULL, 0x0642ca05693b9f70ULL,
+ 0xefca80168350eb4fULL, 0x38d21b24f36a45ecULL, 0xbeab81e1af73d658ULL,
+ 0x8cbfd9cae7542f24ULL, 0xfd19cc0d81f11102ULL, 0x0ac6430fbb4dbc90ULL,
+ 0x1d76a09d6a441895ULL, 0x2a01573ff1cbbfa1ULL, 0xb572e161894fde2bULL,
+ 0x8124734fa853b827ULL, 0x614b1fdf43e6b1b0ULL, 0x68ac395c4238cc18ULL,
+ 0x21d837bfd7f7b7d2ULL, 0x20c714304a860331ULL, 0x5cfaab726324aa14ULL,
+ 0x74c5ba4eb50d606eULL, 0xf3a3030474654739ULL, 0x23e671bcf015c209ULL,
+ 0x45f087e947b9582aULL, 0xd8bd77b418df4c7bULL, 0xe06f6c90ebb50997ULL,
+ 0x0bd96080263c0873ULL, 0x7e03f9410e40dcfeULL, 0xb8e94be4c6484928ULL,
+ 0xfb5b0608e8ca8e72ULL, 0x1a2b49179e0e3306ULL, 0x4e29e76961855059ULL,
+ 0x4f36c4e6fcf4e4baULL, 0x49740ee395cf7bcaULL, 0xc2963ea386d17f7dULL,
+ 0x90d65ad810618352ULL, 0x12d34c1b02a1fa4dULL, 0xfa44258775bb3a91ULL,
+ 0x18150f14b9ec46ddULL, 0x1491861e6b9a653dULL, 0x9a1019d7ab2c3fc2ULL,
+ 0x3668d42d06fe13d7ULL, 0xdcc1fbb25606a6d0ULL, 0x969490dd795a1c22ULL,
+ 0x3549b1a1bc6dd2efULL, 0xc94f5e23a0ed770eULL, 0xb9f6686b5b39fdcbULL,
+ 0xc4d4f4a6efeae00dULL, 0xe732851a1fff2204ULL, 0x94aad6de5eb869f9ULL,
+ 0x3f8ff2ae07206e7fULL, 0xfe38a9813b62d03aULL, 0xa7a1ad7a8bee2466ULL,
+ 0x7b6056c8dde882b6ULL, 0x302a1e286fc58ca7ULL, 0x8da0fa457a259bc7ULL,
+ 0xb3302b64e074415bULL, 0x5402ae7eff8b635fULL, 0x08f8050c9cafc94bULL,
+ 0xae468bf98a3059ceULL, 0x88c355cca98dc58fULL, 0xb10e6d67c7963480ULL,
+ 0xbad70de7e1aa3cf3ULL, 0xbfb4a26e320262bbULL, 0xcb711820870f02d5ULL,
+ 0xce12b7a954a75c9dULL, 0x563ce87dd8691684ULL, 0x9f73b65e7884618aULL,
+ 0x2b1e74b06cba0b42ULL, 0x47cec1ea605b2df1ULL, 0x1c698312f735ac76ULL,
+ 0x5fdbcefed9b76b2cULL, 0x831a354c8fb1cdfcULL, 0x820516c312c0791fULL,
+ 0xb74ca762aeadabf0ULL, 0xfc06ef821c80a5e1ULL, 0x5723cbf24518a267ULL,
+ 0x9d4df05d5f661451ULL, 0x588627742dfd40bfULL, 0xda8331b73f3d39a0ULL,
+ 0x17b0e392d109a405ULL, 0xf965400bcf28fba9ULL, 0x7c3dbf4229a2a925ULL,
+ 0x023e460327e275dbULL, 0x6cd0b55a0ce126b3ULL, 0xe62da695828e96e7ULL,
+ 0x42ad6e63b3f373b9ULL, 0xe50cc319381d57dfULL, 0xc5cbd729729b54eeULL,
+ 0x46d1e265fd2a9912ULL, 0x6428b056904eeff8ULL, 0x8be23040131e04b7ULL,
+ 0x6709d5da2add2ec0ULL, 0x075de98af44a2b93ULL, 0x8447dcc67bfbe66fULL,
+ 0x6616f655b7ac9a23ULL, 0xd607b8bded4b1a40ULL, 0x0563af89d3a85e48ULL,
+ 0x3db1b4ad20c21ba4ULL, 0x11f22997b8323b75ULL, 0x292032b34b587e99ULL,
+ 0x7f1cdace9331681dULL, 0x8e819fc9c0b65affULL, 0xa1e3677fe2d5bb16ULL,
+ 0xcd33d225ee349da5ULL, 0xd9a2543b85aef898ULL, 0x795e10cbfa0af76dULL,
+ 0x25a4bbb9992e5d79ULL, 0x78413344677b438eULL, 0xf0826688cef68601ULL,
+ 0xd27b34bba392f0ebULL, 0x551d8df162fad7bcULL, 0x1e57c511d0d7d9adULL,
+ 0xdeffbdb171e4d30bULL, 0xf4feea8e802f6caaULL, 0xa480c8f6317de55eULL,
+ 0xa0fc44f07fa40ff5ULL, 0x95b5f551c3c9dd1aULL, 0x22f952336d6476eaULL,
+ 0x0000000000000000ULL, 0xa6be8ef5169f9085ULL, 0xcc2cf1aa73452946ULL,
+ 0x2e7ddb39bf12550aULL, 0xd526dd3157d8db78ULL, 0x486b2d6c08becf29ULL,
+ 0x9b0f3a58365d8b21ULL, 0xac78cdfaadd22c15ULL, 0xbc95c7e28891a383ULL,
+ 0x6a927f5f65dab9c3ULL, 0xc3891d2c1ba0cb9eULL, 0xeaa92f9f50f8b507ULL,
+ 0xcf0d9426c9d6e87eULL, 0xca6e3baf1a7eb636ULL, 0xab25247059980786ULL,
+ 0x69b31ad3df4978fbULL, 0xe2512a93cc577c4cULL, 0xff278a0ea61364d9ULL,
+ 0x71a615c766a53e26ULL, 0x89dc764334fc716cULL, 0xf87a638452594f4aULL,
+ 0xf2bc208be914f3daULL, 0x8766b94ac1682757ULL, 0xbbc82e687cdb8810ULL,
+ 0x626a7a53f9757088ULL, 0xa2c202f358467a2eULL, 0x4d0882e5db169161ULL,
+ 0x09e7268301de7da8ULL, 0xe897699c771ac0dcULL, 0xc8507dac3d9cc3edULL,
+ 0xc0a878a0a1330aa6ULL, 0x978bb352e42ba8c1ULL, 0xe9884a13ea6b743fULL,
+ 0x279afdbabecc28a2ULL, 0x047c8c064ed9eaabULL, 0x507e2278b15289f4ULL,
+ 0x599904fbb08cf45cULL, 0xbd8ae46d15e01760ULL, 0x31353da7f2b43844ULL,
+ 0x8558ff49e68a528cULL, 0x76fbfc4d92ef15b5ULL, 0x3456922e211c660cULL,
+ 0x86799ac55c1993b4ULL, 0x3e90d1219a51da9cULL, 0x2d5cbeb505819432ULL,
+ 0x982e5fd48cce4a19ULL, 0xdb9c1238a24c8d43ULL, 0xd439febecaa96f9bULL,
+ 0x418c0bef0960b281ULL, 0x158ea591f6ebd1deULL, 0x1f48e69e4da66d4eULL,
+ 0x8afd13cf8e6fb054ULL, 0xf5e1c9011d5ed849ULL, 0xe34e091c5126c8afULL,
+ 0xad67ee7530a398f6ULL, 0x43b24dec2e82c75aULL, 0x75da99c1287cd48dULL,
+ 0x92e81cdb3783f689ULL, 0xa3dd217cc537cecdULL, 0x60543c50de970553ULL,
+ 0x93f73f54aaf2426aULL, 0xa91b62737e7a725dULL, 0xf19d4507538732e2ULL,
+ 0x77e4dfc20f9ea156ULL, 0x7d229ccdb4d31dc6ULL, 0x1b346a98037f87e5ULL,
+ 0xedf4c615a4b29e94ULL, 0x4093286094110662ULL, 0xb0114ee85ae78063ULL,
+ 0x6ff1d0d6b672e78bULL, 0x6dcf96d591909250ULL, 0xdfe09e3eec9567e8ULL,
+ 0x3214582b4827f97cULL, 0xb46dc2ee143e6ac8ULL, 0xf6c0ac8da7cd1971ULL,
+ 0xebb60c10cd8901e4ULL, 0xf7df8f023abcad92ULL, 0x9c52d3d2c217a0b2ULL,
+ 0x6b8d5cd0f8ab0d20ULL, 0x3777f7a29b8fa734ULL, 0x011f238f9d71b4e3ULL,
+ 0xc1b75b2f3c42be45ULL, 0x5de588fdfe551ef7ULL, 0x6eeef3592b035368ULL,
+ 0xaa3a07ffc4e9b365ULL, 0xecebe59a39c32a77ULL, 0x5ba742f8976e8187ULL,
+ 0x4b4a48e0b22d0e11ULL, 0xddded83dcb771233ULL, 0xa59feb79ac0c51bdULL,
+ 0xc7f5912a55792135ULL
+ }, {
+ 0x6d6ae04668a9b08aULL, 0x3ab3f04b0be8c743ULL, 0xe51e166b54b3c908ULL,
+ 0xbe90a9eb35c2f139ULL, 0xb2c7066637f2bec1ULL, 0xaa6945613392202cULL,
+ 0x9a28c36f3b5201ebULL, 0xddce5a93ab536994ULL, 0x0e34133ef6382827ULL,
+ 0x52a02ba1ec55048bULL, 0xa2f88f97c4b2a177ULL, 0x8640e513ca2251a5ULL,
+ 0xcdf1d36258137622ULL, 0xfe6cb708dedf8ddbULL, 0x8a174a9ec8121e5dULL,
+ 0x679896036b81560eULL, 0x59ed033395795feeULL, 0x1dd778ab8b74edafULL,
+ 0xee533ef92d9f926dULL, 0x2a8c79baf8a8d8f5ULL, 0x6bcf398e69b119f6ULL,
+ 0xe20491742fafdd95ULL, 0x276488e0809c2aecULL, 0xea955b82d88f5cceULL,
+ 0x7102c63a99d9e0c4ULL, 0xf9763017a5c39946ULL, 0x429fa2501f151b3dULL,
+ 0x4659c72bea05d59eULL, 0x984b7fdccf5a6634ULL, 0xf742232953fbb161ULL,
+ 0x3041860e08c021c7ULL, 0x747bfd9616cd9386ULL, 0x4bb1367192312787ULL,
+ 0x1b72a1638a6c44d3ULL, 0x4a0e68a6e8359a66ULL, 0x169a5039f258b6caULL,
+ 0xb98a2ef44edee5a4ULL, 0xd9083fe85e43a737ULL, 0x967f6ce239624e13ULL,
+ 0x8874f62d3c1a7982ULL, 0x3c1629830af06e3fULL, 0x9165ebfd427e5a8eULL,
+ 0xb5dd81794ceeaa5cULL, 0x0de8f15a7834f219ULL, 0x70bd98ede3dd5d25ULL,
+ 0xaccc9ca9328a8950ULL, 0x56664eda1945ca28ULL, 0x221db34c0f8859aeULL,
+ 0x26dbd637fa98970dULL, 0x1acdffb4f068f932ULL, 0x4585254f64090fa0ULL,
+ 0x72de245e17d53afaULL, 0x1546b25d7c546cf4ULL, 0x207e0ffffb803e71ULL,
+ 0xfaaad2732bcf4378ULL, 0xb462dfae36ea17bdULL, 0xcf926fd1ac1b11fdULL,
+ 0xe0672dc7dba7ba4aULL, 0xd3fa49ad5d6b41b3ULL, 0x8ba81449b216a3bcULL,
+ 0x14f9ec8a0650d115ULL, 0x40fc1ee3eb1d7ce2ULL, 0x23a2ed9b758ce44fULL,
+ 0x782c521b14fddc7eULL, 0x1c68267cf170504eULL, 0xbcf31558c1ca96e6ULL,
+ 0xa781b43b4ba6d235ULL, 0xf6fd7dfe29ff0c80ULL, 0xb0a4bad5c3fad91eULL,
+ 0xd199f51ea963266cULL, 0x414340349119c103ULL, 0x5405f269ed4dadf7ULL,
+ 0xabd61bb649969dcdULL, 0x6813dbeae7bdc3c8ULL, 0x65fb2ab09f8931d1ULL,
+ 0xf1e7fae152e3181dULL, 0xc1a67cef5a2339daULL, 0x7a4feea8e0f5bba1ULL,
+ 0x1e0b9acf05783791ULL, 0x5b8ebf8061713831ULL, 0x80e53cdbcb3af8d9ULL,
+ 0x7e898bd315e57502ULL, 0xc6bcfbf0213f2d47ULL, 0x95a38e86b76e942dULL,
+ 0x092e94218d243cbaULL, 0x8339debf453622e7ULL, 0xb11be402b9fe64ffULL,
+ 0x57d9100d634177c9ULL, 0xcc4e8db52217cbc3ULL, 0x3b0cae9c71ec7aa2ULL,
+ 0xfb158ca451cbfe99ULL, 0x2b33276d82ac6514ULL, 0x01bf5ed77a04bde1ULL,
+ 0xc5601994af33f779ULL, 0x75c4a3416cc92e67ULL, 0xf3844652a6eb7fc2ULL,
+ 0x3487e375fdd0ef64ULL, 0x18ae430704609eedULL, 0x4d14efb993298efbULL,
+ 0x815a620cb13e4538ULL, 0x125c354207487869ULL, 0x9eeea614ce42cf48ULL,
+ 0xce2d3106d61fac1cULL, 0xbbe99247bad6827bULL, 0x071a871f7b1c149dULL,
+ 0x2e4a1cc10db81656ULL, 0x77a71ff298c149b8ULL, 0x06a5d9c80118a97cULL,
+ 0xad73c27e488e34b1ULL, 0x443a7b981e0db241ULL, 0xe3bbcfa355ab6074ULL,
+ 0x0af276450328e684ULL, 0x73617a896dd1871bULL, 0x58525de4ef7de20fULL,
+ 0xb7be3dcab8e6cd83ULL, 0x19111dd07e64230cULL, 0x842359a03e2a367aULL,
+ 0x103f89f1f3401fb6ULL, 0xdc710444d157d475ULL, 0xb835702334da5845ULL,
+ 0x4320fc876511a6dcULL, 0xd026abc9d3679b8dULL, 0x17250eee885c0b2bULL,
+ 0x90dab52a387ae76fULL, 0x31fed8d972c49c26ULL, 0x89cba8fa461ec463ULL,
+ 0x2ff5421677bcabb7ULL, 0x396f122f85e41d7dULL, 0xa09b332430bac6a8ULL,
+ 0xc888e8ced7070560ULL, 0xaeaf201ac682ee8fULL, 0x1180d7268944a257ULL,
+ 0xf058a43628e7a5fcULL, 0xbd4c4b8fbbce2b07ULL, 0xa1246df34abe7b49ULL,
+ 0x7d5569b79be9af3cULL, 0xa9b5a705bd9efa12ULL, 0xdb6b835baa4bc0e8ULL,
+ 0x05793bac8f147342ULL, 0x21c1512881848390ULL, 0xfdb0556c50d357e5ULL,
+ 0x613d4fcb6a99ff72ULL, 0x03dce2648e0cda3eULL, 0xe949b9e6568386f0ULL,
+ 0xfc0f0bbb2ad7ea04ULL, 0x6a70675913b5a417ULL, 0x7f36d5046fe1c8e3ULL,
+ 0x0c57af8d02304ff8ULL, 0x32223abdfcc84618ULL, 0x0891caf6f720815bULL,
+ 0xa63eeaec31a26fd4ULL, 0x2507345374944d33ULL, 0x49d28ac266394058ULL,
+ 0xf5219f9aa7f3d6beULL, 0x2d96fea583b4cc68ULL, 0x5a31e1571b7585d0ULL,
+ 0x8ed12fe53d02d0feULL, 0xdfade6205f5b0e4bULL, 0x4cabb16ee92d331aULL,
+ 0x04c6657bf510cea3ULL, 0xd73c2cd6a87b8f10ULL, 0xe1d87310a1a307abULL,
+ 0x6cd5be9112ad0d6bULL, 0x97c032354366f3f2ULL, 0xd4e0ceb22677552eULL,
+ 0x0000000000000000ULL, 0x29509bde76a402cbULL, 0xc27a9e8bd42fe3e4ULL,
+ 0x5ef7842cee654b73ULL, 0xaf107ecdbc86536eULL, 0x3fcacbe784fcb401ULL,
+ 0xd55f90655c73e8cfULL, 0xe6c2f40fdabf1336ULL, 0xe8f6e7312c873b11ULL,
+ 0xeb2a0555a28be12fULL, 0xe4a148bc2eb774e9ULL, 0x9b979db84156bc0aULL,
+ 0x6eb60222e6a56ab4ULL, 0x87ffbbc4b026ec44ULL, 0xc703a5275b3b90a6ULL,
+ 0x47e699fc9001687fULL, 0x9c8d1aa73a4aa897ULL, 0x7cea3760e1ed12ddULL,
+ 0x4ec80ddd1d2554c5ULL, 0x13e36b957d4cc588ULL, 0x5d2b66486069914dULL,
+ 0x92b90999cc7280b0ULL, 0x517cc9c56259deb5ULL, 0xc937b619ad03b881ULL,
+ 0xec30824ad997f5b2ULL, 0xa45d565fc5aa080bULL, 0xd6837201d27f32f1ULL,
+ 0x635ef3789e9198adULL, 0x531f75769651b96aULL, 0x4f77530a6721e924ULL,
+ 0x486dd4151c3dfdb9ULL, 0x5f48dafb9461f692ULL, 0x375b011173dc355aULL,
+ 0x3da9775470f4d3deULL, 0x8d0dcd81b30e0ac0ULL, 0x36e45fc609d888bbULL,
+ 0x55baacbe97491016ULL, 0x8cb29356c90ab721ULL, 0x76184125e2c5f459ULL,
+ 0x99f4210bb55edbd5ULL, 0x6f095cf59ca1d755ULL, 0x9f51f8c3b44672a9ULL,
+ 0x3538bda287d45285ULL, 0x50c39712185d6354ULL, 0xf23b1885dcefc223ULL,
+ 0x79930ccc6ef9619fULL, 0xed8fdc9da3934853ULL, 0xcb540aaa590bdf5eULL,
+ 0x5c94389f1a6d2cacULL, 0xe77daad8a0bbaed7ULL, 0x28efc5090ca0bf2aULL,
+ 0xbf2ff73c4fc64cd8ULL, 0xb37858b14df60320ULL, 0xf8c96ec0dfc724a7ULL,
+ 0x828680683f329f06ULL, 0x941cd051cd6a29ccULL, 0xc3c5c05cae2b5e05ULL,
+ 0xb601631dc2e27062ULL, 0xc01922382027843bULL, 0x24b86a840e90f0d2ULL,
+ 0xd245177a276ffc52ULL, 0x0f8b4de98c3c95c6ULL, 0x3e759530fef809e0ULL,
+ 0x0b4d2892792c5b65ULL, 0xc4df4743d5374a98ULL, 0xa5e20888bfaeb5eaULL,
+ 0xba56cc90c0d23f9aULL, 0x38d04cf8ffe0a09cULL, 0x62e1adafe495254cULL,
+ 0x0263bcb3f40867dfULL, 0xcaeb547d230f62bfULL, 0x6082111c109d4293ULL,
+ 0xdad4dd8cd04f7d09ULL, 0xefec602e579b2f8cULL, 0x1fb4c4187f7c8a70ULL,
+ 0xffd3e9dfa4db303aULL, 0x7bf0b07f9af10640ULL, 0xf49ec14dddf76b5fULL,
+ 0x8f6e713247066d1fULL, 0x339d646a86ccfbf9ULL, 0x64447467e58d8c30ULL,
+ 0x2c29a072f9b07189ULL, 0xd8b7613f24471ad6ULL, 0x6627c8d41185ebefULL,
+ 0xa347d140beb61c96ULL, 0xde12b8f7255fb3aaULL, 0x9d324470404e1576ULL,
+ 0x9306574eb6763d51ULL, 0xa80af9d2c79a47f3ULL, 0x859c0777442e8b9bULL,
+ 0x69ac853d9db97e29ULL
+ }, {
+ 0xc3407dfc2de6377eULL, 0x5b9e93eea4256f77ULL, 0xadb58fdd50c845e0ULL,
+ 0x5219ff11a75bed86ULL, 0x356b61cfd90b1de9ULL, 0xfb8f406e25abe037ULL,
+ 0x7a5a0231c0f60796ULL, 0x9d3cd216e1f5020bULL, 0x0c6550fb6b48d8f3ULL,
+ 0xf57508c427ff1c62ULL, 0x4ad35ffa71cb407dULL, 0x6290a2da1666aa6dULL,
+ 0xe284ec2349355f9fULL, 0xb3c307c53d7c84ecULL, 0x05e23c0468365a02ULL,
+ 0x190bac4d6c9ebfa8ULL, 0x94bbbee9e28b80faULL, 0xa34fc777529cb9b5ULL,
+ 0xcc7b39f095bcd978ULL, 0x2426addb0ce532e3ULL, 0x7e79329312ce4fc7ULL,
+ 0xab09a72eebec2917ULL, 0xf8d15499f6b9d6c2ULL, 0x1a55b8babf8c895dULL,
+ 0xdb8add17fb769a85ULL, 0xb57f2f368658e81bULL, 0x8acd36f18f3f41f6ULL,
+ 0x5ce3b7bba50f11d3ULL, 0x114dcc14d5ee2f0aULL, 0xb91a7fcded1030e8ULL,
+ 0x81d5425fe55de7a1ULL, 0xb6213bc1554adeeeULL, 0x80144ef95f53f5f2ULL,
+ 0x1e7688186db4c10cULL, 0x3b912965db5fe1bcULL, 0xc281715a97e8252dULL,
+ 0x54a5d7e21c7f8171ULL, 0x4b12535ccbc5522eULL, 0x1d289cefbea6f7f9ULL,
+ 0x6ef5f2217d2e729eULL, 0xe6a7dc819b0d17ceULL, 0x1b94b41c05829b0eULL,
+ 0x33d7493c622f711eULL, 0xdcf7f942fa5ce421ULL, 0x600fba8b7f7a8ecbULL,
+ 0x46b60f011a83988eULL, 0x235b898e0dcf4c47ULL, 0x957ab24f588592a9ULL,
+ 0x4354330572b5c28cULL, 0xa5f3ef84e9b8d542ULL, 0x8c711e02341b2d01ULL,
+ 0x0b1874ae6a62a657ULL, 0x1213d8e306fc19ffULL, 0xfe6d7c6a4d9dba35ULL,
+ 0x65ed868f174cd4c9ULL, 0x88522ea0e6236550ULL, 0x899322065c2d7703ULL,
+ 0xc01e690bfef4018bULL, 0x915982ed8abddaf8ULL, 0xbe675b98ec3a4e4cULL,
+ 0xa996bf7f82f00db1ULL, 0xe1daf8d49a27696aULL, 0x2effd5d3dc8986e7ULL,
+ 0xd153a51f2b1a2e81ULL, 0x18caa0ebd690adfbULL, 0x390e3134b243c51aULL,
+ 0x2778b92cdff70416ULL, 0x029f1851691c24a6ULL, 0x5e7cafeacc133575ULL,
+ 0xfa4e4cc89fa5f264ULL, 0x5a5f9f481e2b7d24ULL, 0x484c47ab18d764dbULL,
+ 0x400a27f2a1a7f479ULL, 0xaeeb9b2a83da7315ULL, 0x721c626879869734ULL,
+ 0x042330a2d2384851ULL, 0x85f672fd3765aff0ULL, 0xba446b3a3e02061dULL,
+ 0x73dd6ecec3888567ULL, 0xffac70ccf793a866ULL, 0xdfa9edb5294ed2d4ULL,
+ 0x6c6aea7014325638ULL, 0x834a5a0e8c41c307ULL, 0xcdba35562fb2cb2bULL,
+ 0x0ad97808d06cb404ULL, 0x0f3b440cb85aee06ULL, 0xe5f9c876481f213bULL,
+ 0x98deee1289c35809ULL, 0x59018bbfcd394bd1ULL, 0xe01bf47220297b39ULL,
+ 0xde68e1139340c087ULL, 0x9fa3ca4788e926adULL, 0xbb85679c840c144eULL,
+ 0x53d8f3b71d55ffd5ULL, 0x0da45c5dd146caa0ULL, 0x6f34fe87c72060cdULL,
+ 0x57fbc315cf6db784ULL, 0xcee421a1fca0fddeULL, 0x3d2d0196607b8d4bULL,
+ 0x642c8a29ad42c69aULL, 0x14aff010bdd87508ULL, 0xac74837beac657b3ULL,
+ 0x3216459ad821634dULL, 0x3fb219c70967a9edULL, 0x06bc28f3bb246cf7ULL,
+ 0xf2082c9126d562c6ULL, 0x66b39278c45ee23cULL, 0xbd394f6f3f2878b9ULL,
+ 0xfd33689d9e8f8cc0ULL, 0x37f4799eb017394fULL, 0x108cc0b26fe03d59ULL,
+ 0xda4bd1b1417888d6ULL, 0xb09d1332ee6eb219ULL, 0x2f3ed975668794b4ULL,
+ 0x58c0871977375982ULL, 0x7561463d78ace990ULL, 0x09876cff037e82f1ULL,
+ 0x7fb83e35a8c05d94ULL, 0x26b9b58a65f91645ULL, 0xef20b07e9873953fULL,
+ 0x3148516d0b3355b8ULL, 0x41cb2b541ba9e62aULL, 0x790416c613e43163ULL,
+ 0xa011d380818e8f40ULL, 0x3a5025c36151f3efULL, 0xd57095bdf92266d0ULL,
+ 0x498d4b0da2d97688ULL, 0x8b0c3a57353153a5ULL, 0x21c491df64d368e1ULL,
+ 0x8f2f0af5e7091bf4ULL, 0x2da1c1240f9bb012ULL, 0xc43d59a92ccc49daULL,
+ 0xbfa6573e56345c1fULL, 0x828b56a8364fd154ULL, 0x9a41f643e0df7cafULL,
+ 0xbcf843c985266aeaULL, 0x2b1de9d7b4bfdce5ULL, 0x20059d79dedd7ab2ULL,
+ 0x6dabe6d6ae3c446bULL, 0x45e81bf6c991ae7bULL, 0x6351ae7cac68b83eULL,
+ 0xa432e32253b6c711ULL, 0xd092a9b991143cd2ULL, 0xcac711032e98b58fULL,
+ 0xd8d4c9e02864ac70ULL, 0xc5fc550f96c25b89ULL, 0xd7ef8dec903e4276ULL,
+ 0x67729ede7e50f06fULL, 0xeac28c7af045cf3dULL, 0xb15c1f945460a04aULL,
+ 0x9cfddeb05bfb1058ULL, 0x93c69abce3a1fe5eULL, 0xeb0380dc4a4bdd6eULL,
+ 0xd20db1e8f8081874ULL, 0x229a8528b7c15e14ULL, 0x44291750739fbc28ULL,
+ 0xd3ccbd4e42060a27ULL, 0xf62b1c33f4ed2a97ULL, 0x86a8660ae4779905ULL,
+ 0xd62e814a2a305025ULL, 0x477703a7a08d8addULL, 0x7b9b0e977af815c5ULL,
+ 0x78c51a60a9ea2330ULL, 0xa6adfb733aaae3b7ULL, 0x97e5aa1e3199b60fULL,
+ 0x0000000000000000ULL, 0xf4b404629df10e31ULL, 0x5564db44a6719322ULL,
+ 0x9207961a59afec0dULL, 0x9624a6b88b97a45cULL, 0x363575380a192b1cULL,
+ 0x2c60cd82b595a241ULL, 0x7d272664c1dc7932ULL, 0x7142769faa94a1c1ULL,
+ 0xa1d0df263b809d13ULL, 0x1630e841d4c451aeULL, 0xc1df65ad44fa13d8ULL,
+ 0x13d2d445bcf20bacULL, 0xd915c546926abe23ULL, 0x38cf3d92084dd749ULL,
+ 0xe766d0272103059dULL, 0xc7634d5effde7f2fULL, 0x077d2455012a7ea4ULL,
+ 0xedbfa82ff16fb199ULL, 0xaf2a978c39d46146ULL, 0x42953fa3c8bbd0dfULL,
+ 0xcb061da59496a7dcULL, 0x25e7a17db6eb20b0ULL, 0x34aa6d6963050fbaULL,
+ 0xa76cf7d580a4f1e4ULL, 0xf7ea10954ee338c4ULL, 0xfcf2643b24819e93ULL,
+ 0xcf252d0746aeef8dULL, 0x4ef06f58a3f3082cULL, 0x563acfb37563a5d7ULL,
+ 0x5086e740ce47c920ULL, 0x2982f186dda3f843ULL, 0x87696aac5e798b56ULL,
+ 0x5d22bb1d1f010380ULL, 0x035e14f7d31236f5ULL, 0x3cec0d30da759f18ULL,
+ 0xf3c920379cdb7095ULL, 0xb8db736b571e22bbULL, 0xdd36f5e44052f672ULL,
+ 0xaac8ab8851e23b44ULL, 0xa857b3d938fe1fe2ULL, 0x17f1e4e76eca43fdULL,
+ 0xec7ea4894b61a3caULL, 0x9e62c6e132e734feULL, 0xd4b1991b432c7483ULL,
+ 0x6ad6c283af163acfULL, 0x1ce9904904a8e5aaULL, 0x5fbda34c761d2726ULL,
+ 0xf910583f4cb7c491ULL, 0xc6a241f845d06d7cULL, 0x4f3163fe19fd1a7fULL,
+ 0xe99c988d2357f9c8ULL, 0x8eee06535d0709a7ULL, 0x0efa48aa0254fc55ULL,
+ 0xb4be23903c56fa48ULL, 0x763f52caabbedf65ULL, 0xeee1bcd8227d876cULL,
+ 0xe345e085f33b4dccULL, 0x3e731561b369bbbeULL, 0x2843fd2067adea10ULL,
+ 0x2adce5710eb1ceb6ULL, 0xb7e03767ef44ccbdULL, 0x8db012a48e153f52ULL,
+ 0x61ceb62dc5749c98ULL, 0xe85d942b9959eb9bULL, 0x4c6f7709caef2c8aULL,
+ 0x84377e5b8d6bbda3ULL, 0x30895dcbb13d47ebULL, 0x74a04a9bc2a2fbc3ULL,
+ 0x6b17ce251518289cULL, 0xe438c4d0f2113368ULL, 0x1fb784bed7bad35fULL,
+ 0x9b80fae55ad16efcULL, 0x77fe5e6c11b0cd36ULL, 0xc858095247849129ULL,
+ 0x08466059b97090a2ULL, 0x01c10ca6ba0e1253ULL, 0x6988d6747c040c3aULL,
+ 0x6849dad2c60a1e69ULL, 0x5147ebe67449db73ULL, 0xc99905f4fd8a837aULL,
+ 0x991fe2b433cd4a5aULL, 0xf09734c04fc94660ULL, 0xa28ecbd1e892abe6ULL,
+ 0xf1563866f5c75433ULL, 0x4dae7baf70e13ed9ULL, 0x7ce62ac27bd26b61ULL,
+ 0x70837a39109ab392ULL, 0x90988e4b30b3c8abULL, 0xb2020b63877296bfULL,
+ 0x156efcb607d6675bULL
+ }, {
+ 0xe63f55ce97c331d0ULL, 0x25b506b0015bba16ULL, 0xc8706e29e6ad9ba8ULL,
+ 0x5b43d3775d521f6aULL, 0x0bfa3d577035106eULL, 0xab95fc172afb0e66ULL,
+ 0xf64b63979e7a3276ULL, 0xf58b4562649dad4bULL, 0x48f7c3dbae0c83f1ULL,
+ 0xff31916642f5c8c5ULL, 0xcbb048dc1c4a0495ULL, 0x66b8f83cdf622989ULL,
+ 0x35c130e908e2b9b0ULL, 0x7c761a61f0b34fa1ULL, 0x3601161cf205268dULL,
+ 0x9e54ccfe2219b7d6ULL, 0x8b7d90a538940837ULL, 0x9cd403588ea35d0bULL,
+ 0xbc3c6fea9ccc5b5aULL, 0xe5ff733b6d24aeedULL, 0xceed22de0f7eb8d2ULL,
+ 0xec8581cab1ab545eULL, 0xb96105e88ff8e71dULL, 0x8ca03501871a5eadULL,
+ 0x76ccce65d6db2a2fULL, 0x5883f582a7b58057ULL, 0x3f7be4ed2e8adc3eULL,
+ 0x0fe7be06355cd9c9ULL, 0xee054e6c1d11be83ULL, 0x1074365909b903a6ULL,
+ 0x5dde9f80b4813c10ULL, 0x4a770c7d02b6692cULL, 0x5379c8d5d7809039ULL,
+ 0xb4067448161ed409ULL, 0x5f5e5026183bd6cdULL, 0xe898029bf4c29df9ULL,
+ 0x7fb63c940a54d09cULL, 0xc5171f897f4ba8bcULL, 0xa6f28db7b31d3d72ULL,
+ 0x2e4f3be7716eaa78ULL, 0x0d6771a099e63314ULL, 0x82076254e41bf284ULL,
+ 0x2f0fd2b42733df98ULL, 0x5c9e76d3e2dc49f0ULL, 0x7aeb569619606cdbULL,
+ 0x83478b07b2468764ULL, 0xcfadcb8d5923cd32ULL, 0x85dac7f05b95a41eULL,
+ 0xb5469d1b4043a1e9ULL, 0xb821ecbbd9a592fdULL, 0x1b8e0b0e798c13c8ULL,
+ 0x62a57b6d9a0be02eULL, 0xfcf1b793b81257f8ULL, 0x9d94ea0bd8fe28ebULL,
+ 0x4cea408aeb654a56ULL, 0x23284a47e888996cULL, 0x2d8f1d128b893545ULL,
+ 0xf4cbac3132c0d8abULL, 0xbd7c86b9ca912ebaULL, 0x3a268eef3dbe6079ULL,
+ 0xf0d62f6077a9110cULL, 0x2735c916ade150cbULL, 0x89fd5f03942ee2eaULL,
+ 0x1acee25d2fd16628ULL, 0x90f39bab41181bffULL, 0x430dfe8cde39939fULL,
+ 0xf70b8ac4c8274796ULL, 0x1c53aeaac6024552ULL, 0x13b410acf35e9c9bULL,
+ 0xa532ab4249faa24fULL, 0x2b1251e5625a163fULL, 0xd7e3e676da4841c7ULL,
+ 0xa7b264e4e5404892ULL, 0xda8497d643ae72d3ULL, 0x861ae105a1723b23ULL,
+ 0x38a6414991048aa4ULL, 0x6578dec92585b6b4ULL, 0x0280cfa6acbaeaddULL,
+ 0x88bdb650c273970aULL, 0x9333bd5ebbff84c2ULL, 0x4e6a8f2c47dfa08bULL,
+ 0x321c954db76cef2aULL, 0x418d312a72837942ULL, 0xb29b38bfffcdf773ULL,
+ 0x6c022c38f90a4c07ULL, 0x5a033a240b0f6a8aULL, 0x1f93885f3ce5da6fULL,
+ 0xc38a537e96988bc6ULL, 0x39e6a81ac759ff44ULL, 0x29929e43cee0fce2ULL,
+ 0x40cdd87924de0ca2ULL, 0xe9d8ebc8a29fe819ULL, 0x0c2798f3cfbb46f4ULL,
+ 0x55e484223e53b343ULL, 0x4650948ecd0d2fd8ULL, 0x20e86cb2126f0651ULL,
+ 0x6d42c56baf5739e7ULL, 0xa06fc1405ace1e08ULL, 0x7babbfc54f3d193bULL,
+ 0x424d17df8864e67fULL, 0xd8045870ef14980eULL, 0xc6d7397c85ac3781ULL,
+ 0x21a885e1443273b1ULL, 0x67f8116f893f5c69ULL, 0x24f5efe35706cff6ULL,
+ 0xd56329d076f2ab1aULL, 0x5e1eb9754e66a32dULL, 0x28d2771098bd8902ULL,
+ 0x8f6013f47dfdc190ULL, 0x17a993fdb637553cULL, 0xe0a219397e1012aaULL,
+ 0x786b9930b5da8606ULL, 0x6e82e39e55b0a6daULL, 0x875a0856f72f4ec3ULL,
+ 0x3741ff4fa458536dULL, 0xac4859b3957558fcULL, 0x7ef6d5c75c09a57cULL,
+ 0xc04a758b6c7f14fbULL, 0xf9acdd91ab26ebbfULL, 0x7391a467c5ef9668ULL,
+ 0x335c7c1ee1319acaULL, 0xa91533b18641e4bbULL, 0xe4bf9a683b79db0dULL,
+ 0x8e20faa72ba0b470ULL, 0x51f907737b3a7ae4ULL, 0x2268a314bed5ec8cULL,
+ 0xd944b123b949edeeULL, 0x31dcb3b84d8b7017ULL, 0xd3fe65279f218860ULL,
+ 0x097af2f1dc8ffab3ULL, 0x9b09a6fc312d0b91ULL, 0xcc6ded78a3c4520fULL,
+ 0x3481d9ba5ebfcc50ULL, 0x4f2a667f1182d56bULL, 0xdfd9fdd4509ace94ULL,
+ 0x26752045fbbc252bULL, 0xbffc491f662bc467ULL, 0xdd593272fc202449ULL,
+ 0x3cbbc218d46d4303ULL, 0x91b372f817456e1fULL, 0x681faf69bc6385a0ULL,
+ 0xb686bbeebaa43ed4ULL, 0x1469b5084cd0ca01ULL, 0x98c98009cbca94acULL,
+ 0x6438379a73d8c354ULL, 0xc2caba2dc0c5fe26ULL, 0x3e3b0dbe78d7a9deULL,
+ 0x50b9ee202d670f04ULL, 0x4590b27b37eab0e5ULL, 0x6025b4cb36b10af3ULL,
+ 0xfb2c1237079c0162ULL, 0xa12f28130c936be8ULL, 0x4b37e52e54eb1cccULL,
+ 0x083a1ba28ad28f53ULL, 0xc10a9cd83a22611bULL, 0x9f1425ad7444c236ULL,
+ 0x069d4cf7e9d3237aULL, 0xedc56899e7f621beULL, 0x778c273680865fcfULL,
+ 0x309c5aeb1bd605f7ULL, 0x8de0dc52d1472b4dULL, 0xf8ec34c2fd7b9e5fULL,
+ 0xea18cd3d58787724ULL, 0xaad515447ca67b86ULL, 0x9989695a9d97e14cULL,
+ 0x0000000000000000ULL, 0xf196c63321f464ecULL, 0x71116bc169557cb5ULL,
+ 0xaf887f466f92c7c1ULL, 0x972e3e0ffe964d65ULL, 0x190ec4a8d536f915ULL,
+ 0x95aef1a9522ca7b8ULL, 0xdc19db21aa7d51a9ULL, 0x94ee18fa0471d258ULL,
+ 0x8087adf248a11859ULL, 0xc457f6da2916dd5cULL, 0xfa6cfb6451c17482ULL,
+ 0xf256e0c6db13fbd1ULL, 0x6a9f60cf10d96f7dULL, 0x4daaa9d9bd383fb6ULL,
+ 0x03c026f5fae79f3dULL, 0xde99148706c7bb74ULL, 0x2a52b8b6340763dfULL,
+ 0x6fc20acd03edd33aULL, 0xd423c08320afdefaULL, 0xbbe1ca4e23420dc0ULL,
+ 0x966ed75ca8cb3885ULL, 0xeb58246e0e2502c4ULL, 0x055d6a021334bc47ULL,
+ 0xa47242111fa7d7afULL, 0xe3623fcc84f78d97ULL, 0x81c744a11efc6db9ULL,
+ 0xaec8961539cfb221ULL, 0xf31609958d4e8e31ULL, 0x63e5923ecc5695ceULL,
+ 0x47107ddd9b505a38ULL, 0xa3afe7b5a0298135ULL, 0x792b7063e387f3e6ULL,
+ 0x0140e953565d75e0ULL, 0x12f4f9ffa503e97bULL, 0x750ce8902c3cb512ULL,
+ 0xdbc47e8515f30733ULL, 0x1ed3610c6ab8af8fULL, 0x5239218681dde5d9ULL,
+ 0xe222d69fd2aaf877ULL, 0xfe71783514a8bd25ULL, 0xcaf0a18f4a177175ULL,
+ 0x61655d9860ec7f13ULL, 0xe77fbc9dc19e4430ULL, 0x2ccff441ddd440a5ULL,
+ 0x16e97aaee06a20dcULL, 0xa855dae2d01c915bULL, 0x1d1347f9905f30b2ULL,
+ 0xb7c652bdecf94b34ULL, 0xd03e43d265c6175dULL, 0xfdb15ec0ee4f2218ULL,
+ 0x57644b8492e9599eULL, 0x07dda5a4bf8e569aULL, 0x54a46d71680ec6a3ULL,
+ 0x5624a2d7c4b42c7eULL, 0xbebca04c3076b187ULL, 0x7d36f332a6ee3a41ULL,
+ 0x3b6667bc6be31599ULL, 0x695f463aea3ef040ULL, 0xad08b0e0c3282d1cULL,
+ 0xb15b1e4a052a684eULL, 0x44d05b2861b7c505ULL, 0x15295c5b1a8dbfe1ULL,
+ 0x744c01c37a61c0f2ULL, 0x59c31cd1f1e8f5b7ULL, 0xef45a73f4b4ccb63ULL,
+ 0x6bdf899c46841a9dULL, 0x3dfb2b4b823036e3ULL, 0xa2ef0ee6f674f4d5ULL,
+ 0x184e2dfb836b8cf5ULL, 0x1134df0a5fe47646ULL, 0xbaa1231d751f7820ULL,
+ 0xd17eaa81339b62bdULL, 0xb01bf71953771daeULL, 0x849a2ea30dc8d1feULL,
+ 0x705182923f080955ULL, 0x0ea757556301ac29ULL, 0x041d83514569c9a7ULL,
+ 0x0abad4042668658eULL, 0x49b72a88f851f611ULL, 0x8a3d79f66ec97dd7ULL,
+ 0xcd2d042bf59927efULL, 0xc930877ab0f0ee48ULL, 0x9273540deda2f122ULL,
+ 0xc797d02fd3f14261ULL, 0xe1e2f06a284d674aULL, 0xd2be8c74c97cfd80ULL,
+ 0x9a494faf67707e71ULL, 0xb3dbd1eca9908293ULL, 0x72d14d3493b2e388ULL,
+ 0xd6a30f258c153427ULL
+ }
+}; /* Ax */
+
+static void streebog_xor(const struct streebog_uint512 *x,
+ const struct streebog_uint512 *y,
+ struct streebog_uint512 *z)
+{
+ z->qword[0] = x->qword[0] ^ y->qword[0];
+ z->qword[1] = x->qword[1] ^ y->qword[1];
+ z->qword[2] = x->qword[2] ^ y->qword[2];
+ z->qword[3] = x->qword[3] ^ y->qword[3];
+ z->qword[4] = x->qword[4] ^ y->qword[4];
+ z->qword[5] = x->qword[5] ^ y->qword[5];
+ z->qword[6] = x->qword[6] ^ y->qword[6];
+ z->qword[7] = x->qword[7] ^ y->qword[7];
+}
+
+static void streebog_xlps(const struct streebog_uint512 *x,
+ const struct streebog_uint512 *y,
+ struct streebog_uint512 *data)
+{
+ u64 r0, r1, r2, r3, r4, r5, r6, r7;
+ int i;
+
+ r0 = le64_to_cpu(x->qword[0] ^ y->qword[0]);
+ r1 = le64_to_cpu(x->qword[1] ^ y->qword[1]);
+ r2 = le64_to_cpu(x->qword[2] ^ y->qword[2]);
+ r3 = le64_to_cpu(x->qword[3] ^ y->qword[3]);
+ r4 = le64_to_cpu(x->qword[4] ^ y->qword[4]);
+ r5 = le64_to_cpu(x->qword[5] ^ y->qword[5]);
+ r6 = le64_to_cpu(x->qword[6] ^ y->qword[6]);
+ r7 = le64_to_cpu(x->qword[7] ^ y->qword[7]);
+
+ for (i = 0; i <= 7; i++) {
+ data->qword[i] = cpu_to_le64(Ax[0][r0 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[1][r1 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[2][r2 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[3][r3 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[4][r4 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[5][r5 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[6][r6 & 0xFF]);
+ data->qword[i] ^= cpu_to_le64(Ax[7][r7 & 0xFF]);
+ r0 >>= 8;
+ r1 >>= 8;
+ r2 >>= 8;
+ r3 >>= 8;
+ r4 >>= 8;
+ r5 >>= 8;
+ r6 >>= 8;
+ r7 >>= 8;
+ }
+}
+
+static void streebog_round(int i, struct streebog_uint512 *Ki,
+ struct streebog_uint512 *data)
+{
+ streebog_xlps(Ki, &C[i], Ki);
+ streebog_xlps(Ki, data, data);
+}
+
+static int streebog_init(struct shash_desc *desc)
+{
+ struct streebog_state *ctx = shash_desc_ctx(desc);
+ unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
+ unsigned int i;
+
+ memset(ctx, 0, sizeof(struct streebog_state));
+ for (i = 0; i < 8; i++) {
+ if (digest_size == STREEBOG256_DIGEST_SIZE)
+ ctx->h.qword[i] = 0x0101010101010101ULL;
+ }
+ return 0;
+}
+
+static void streebog_pad(struct streebog_state *ctx)
+{
+ if (ctx->fillsize >= STREEBOG_BLOCK_SIZE)
+ return;
+
+ memset(ctx->buffer + ctx->fillsize, 0,
+ sizeof(ctx->buffer) - ctx->fillsize);
+
+ ctx->buffer[ctx->fillsize] = 1;
+}
+
+static void streebog_add512(const struct streebog_uint512 *x,
+ const struct streebog_uint512 *y,
+ struct streebog_uint512 *r)
+{
+ u64 carry = 0;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ const u64 left = le64_to_cpu(x->qword[i]);
+ u64 sum;
+
+ sum = left + le64_to_cpu(y->qword[i]) + carry;
+ if (sum != left)
+ carry = (sum < left);
+ r->qword[i] = cpu_to_le64(sum);
+ }
+}
+
+static void streebog_g(struct streebog_uint512 *h,
+ const struct streebog_uint512 *N,
+ const u8 *m)
+{
+ struct streebog_uint512 Ki, data;
+ unsigned int i;
+
+ streebog_xlps(h, N, &data);
+
+ /* Starting E() */
+ Ki = data;
+ streebog_xlps(&Ki, (const struct streebog_uint512 *)&m[0], &data);
+
+ for (i = 0; i < 11; i++)
+ streebog_round(i, &Ki, &data);
+
+ streebog_xlps(&Ki, &C[11], &Ki);
+ streebog_xor(&Ki, &data, &data);
+ /* E() done */
+
+ streebog_xor(&data, h, &data);
+ streebog_xor(&data, (const struct streebog_uint512 *)&m[0], h);
+}
+
+static void streebog_stage2(struct streebog_state *ctx, const u8 *data)
+{
+ streebog_g(&ctx->h, &ctx->N, data);
+
+ streebog_add512(&ctx->N, &buffer512, &ctx->N);
+ streebog_add512(&ctx->Sigma, (const struct streebog_uint512 *)data,
+ &ctx->Sigma);
+}
+
+static void streebog_stage3(struct streebog_state *ctx)
+{
+ struct streebog_uint512 buf = { { 0 } };
+
+ buf.qword[0] = cpu_to_le64(ctx->fillsize << 3);
+ streebog_pad(ctx);
+
+ streebog_g(&ctx->h, &ctx->N, (const u8 *)&ctx->buffer);
+ streebog_add512(&ctx->N, &buf, &ctx->N);
+ streebog_add512(&ctx->Sigma,
+ (const struct streebog_uint512 *)&ctx->buffer[0],
+ &ctx->Sigma);
+ streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->N);
+ streebog_g(&ctx->h, &buffer0, (const u8 *)&ctx->Sigma);
+ memcpy(&ctx->hash, &ctx->h, sizeof(struct streebog_uint512));
+}
+
+static int streebog_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
+{
+ struct streebog_state *ctx = shash_desc_ctx(desc);
+ size_t chunksize;
+
+ if (ctx->fillsize) {
+ chunksize = STREEBOG_BLOCK_SIZE - ctx->fillsize;
+ if (chunksize > len)
+ chunksize = len;
+ memcpy(&ctx->buffer[ctx->fillsize], data, chunksize);
+ ctx->fillsize += chunksize;
+ len -= chunksize;
+ data += chunksize;
+
+ if (ctx->fillsize == STREEBOG_BLOCK_SIZE) {
+ streebog_stage2(ctx, ctx->buffer);
+ ctx->fillsize = 0;
+ }
+ }
+
+ while (len >= STREEBOG_BLOCK_SIZE) {
+ streebog_stage2(ctx, data);
+ data += STREEBOG_BLOCK_SIZE;
+ len -= STREEBOG_BLOCK_SIZE;
+ }
+
+ if (len) {
+ memcpy(&ctx->buffer, data, len);
+ ctx->fillsize = len;
+ }
+ return 0;
+}
+
+static int streebog_final(struct shash_desc *desc, u8 *digest)
+{
+ struct streebog_state *ctx = shash_desc_ctx(desc);
+
+ streebog_stage3(ctx);
+ ctx->fillsize = 0;
+ if (crypto_shash_digestsize(desc->tfm) == STREEBOG256_DIGEST_SIZE)
+ memcpy(digest, &ctx->hash.qword[4], STREEBOG256_DIGEST_SIZE);
+ else
+ memcpy(digest, &ctx->hash.qword[0], STREEBOG512_DIGEST_SIZE);
+ return 0;
+}
+
+static struct shash_alg algs[2] = { {
+ .digestsize = STREEBOG256_DIGEST_SIZE,
+ .init = streebog_init,
+ .update = streebog_update,
+ .final = streebog_final,
+ .descsize = sizeof(struct streebog_state),
+ .base = {
+ .cra_name = "streebog256",
+ .cra_driver_name = "streebog256-generic",
+ .cra_blocksize = STREEBOG_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ },
+}, {
+ .digestsize = STREEBOG512_DIGEST_SIZE,
+ .init = streebog_init,
+ .update = streebog_update,
+ .final = streebog_final,
+ .descsize = sizeof(struct streebog_state),
+ .base = {
+ .cra_name = "streebog512",
+ .cra_driver_name = "streebog512-generic",
+ .cra_blocksize = STREEBOG_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ }
+} };
+
+static int __init streebog_mod_init(void)
+{
+ return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit streebog_mod_fini(void)
+{
+ crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(streebog_mod_init);
+module_exit(streebog_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Chikunov <vt@altlinux.org>");
+MODULE_DESCRIPTION("Streebog Hash Function");
+
+MODULE_ALIAS_CRYPTO("streebog256");
+MODULE_ALIAS_CRYPTO("streebog256-generic");
+MODULE_ALIAS_CRYPTO("streebog512");
+MODULE_ALIAS_CRYPTO("streebog512-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index c20c9f5c18f2..e7fb87e114a5 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,10 +76,12 @@ static char *check[] = {
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
- "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", NULL
+ "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
+ "streebog256", "streebog512",
+ NULL
};
-static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
+static u32 block_sizes[] = { 16, 64, 256, 1024, 1472, 8192, 0 };
static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
#define XBUFSIZE 8
@@ -1736,6 +1738,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
ret += tcrypt_test("ofb(aes)");
+ ret += tcrypt_test("cfb(aes)");
break;
case 11:
@@ -1913,6 +1916,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("sm3");
break;
+ case 53:
+ ret += tcrypt_test("streebog256");
+ break;
+
+ case 54:
+ ret += tcrypt_test("streebog512");
+ break;
+
case 100:
ret += tcrypt_test("hmac(md5)");
break;
@@ -1969,6 +1980,14 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("hmac(sha3-512)");
break;
+ case 115:
+ ret += tcrypt_test("hmac(streebog256)");
+ break;
+
+ case 116:
+ ret += tcrypt_test("hmac(streebog512)");
+ break;
+
case 150:
ret += tcrypt_test("ansi_cprng");
break;
@@ -2060,6 +2079,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
+ test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
break;
case 201:
@@ -2297,6 +2320,18 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
break;
+
+ case 219:
+ test_cipher_speed("adiantum(xchacha12,aes)", ENCRYPT, sec, NULL,
+ 0, speed_template_32);
+ test_cipher_speed("adiantum(xchacha12,aes)", DECRYPT, sec, NULL,
+ 0, speed_template_32);
+ test_cipher_speed("adiantum(xchacha20,aes)", ENCRYPT, sec, NULL,
+ 0, speed_template_32);
+ test_cipher_speed("adiantum(xchacha20,aes)", DECRYPT, sec, NULL,
+ 0, speed_template_32);
+ break;
+
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
@@ -2407,6 +2442,16 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
test_hash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
/* fall through */
+ case 327:
+ test_hash_speed("streebog256", sec,
+ generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ /* fall through */
+ case 328:
+ test_hash_speed("streebog512", sec,
+ generic_hash_speed_template);
+ if (mode > 300 && mode < 400) break;
+ /* fall through */
case 399:
break;
@@ -2520,6 +2565,16 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
num_mb);
if (mode > 400 && mode < 500) break;
/* fall through */
+ case 426:
+ test_mb_ahash_speed("streebog256", sec,
+ generic_hash_speed_template, num_mb);
+ if (mode > 400 && mode < 500) break;
+ /* fall through */
+ case 427:
+ test_mb_ahash_speed("streebog512", sec,
+ generic_hash_speed_template, num_mb);
+ if (mode > 400 && mode < 500) break;
+ /* fall through */
case 499:
break;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index b1f79c6bf409..0f684a414acb 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2404,6 +2404,18 @@ static int alg_test_null(const struct alg_test_desc *desc,
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
+ .alg = "adiantum(xchacha12,aes)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(adiantum_xchacha12_aes_tv_template)
+ },
+ }, {
+ .alg = "adiantum(xchacha20,aes)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(adiantum_xchacha20_aes_tv_template)
+ },
+ }, {
.alg = "aegis128",
.test = alg_test_aead,
.suite = {
@@ -2691,6 +2703,13 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "cfb(aes)",
+ .test = alg_test_skcipher,
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = __VECS(aes_cfb_tv_template)
+ },
+ }, {
.alg = "chacha20",
.test = alg_test_skcipher,
.suite = {
@@ -2805,6 +2824,7 @@ static const struct alg_test_desc alg_test_descs[] = {
}, {
.alg = "cts(cbc(aes))",
.test = alg_test_skcipher,
+ .fips_allowed = 1,
.suite = {
.cipher = __VECS(cts_mode_tv_template)
}
@@ -3185,6 +3205,18 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(hmac_sha512_tv_template)
}
}, {
+ .alg = "hmac(streebog256)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(hmac_streebog256_tv_template)
+ }
+ }, {
+ .alg = "hmac(streebog512)",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(hmac_streebog512_tv_template)
+ }
+ }, {
.alg = "jitterentropy_rng",
.fips_allowed = 1,
.test = alg_test_null,
@@ -3292,6 +3324,12 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "nhpoly1305",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(nhpoly1305_tv_template)
+ }
+ }, {
.alg = "ofb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -3497,6 +3535,18 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(sm3_tv_template)
}
}, {
+ .alg = "streebog256",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(streebog256_tv_template)
+ }
+ }, {
+ .alg = "streebog512",
+ .test = alg_test_hash,
+ .suite = {
+ .hash = __VECS(streebog512_tv_template)
+ }
+ }, {
.alg = "tgr128",
.test = alg_test_hash,
.suite = {
@@ -3545,6 +3595,18 @@ static const struct alg_test_desc alg_test_descs[] = {
.hash = __VECS(aes_xcbc128_tv_template)
}
}, {
+ .alg = "xchacha12",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(xchacha12_tv_template)
+ },
+ }, {
+ .alg = "xchacha20",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(xchacha20_tv_template)
+ },
+ }, {
.alg = "xts(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 1fe7b97ba03f..e8f47d7b92cd 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -27,7 +27,7 @@
#define MAX_DIGEST_SIZE 64
#define MAX_TAP 8
-#define MAX_KEYLEN 160
+#define MAX_KEYLEN 1088
#define MAX_IVLEN 32
struct hash_testvec {
@@ -35,10 +35,10 @@ struct hash_testvec {
const char *key;
const char *plaintext;
const char *digest;
- unsigned char tap[MAX_TAP];
+ unsigned short tap[MAX_TAP];
+ unsigned short np;
unsigned short psize;
- unsigned char np;
- unsigned char ksize;
+ unsigned short ksize;
};
/*
@@ -2307,6 +2307,122 @@ static const struct hash_testvec crct10dif_tv_template[] = {
}
};
+/*
+ * Streebog test vectors from RFC 6986 and GOST R 34.11-2012
+ */
+static const struct hash_testvec streebog256_tv_template[] = {
+ { /* M1 */
+ .plaintext = "012345678901234567890123456789012345678901234567890123456789012",
+ .psize = 63,
+ .digest =
+ "\x9d\x15\x1e\xef\xd8\x59\x0b\x89"
+ "\xda\xa6\xba\x6c\xb7\x4a\xf9\x27"
+ "\x5d\xd0\x51\x02\x6b\xb1\x49\xa4"
+ "\x52\xfd\x84\xe5\xe5\x7b\x55\x00",
+ },
+ { /* M2 */
+ .plaintext =
+ "\xd1\xe5\x20\xe2\xe5\xf2\xf0\xe8"
+ "\x2c\x20\xd1\xf2\xf0\xe8\xe1\xee"
+ "\xe6\xe8\x20\xe2\xed\xf3\xf6\xe8"
+ "\x2c\x20\xe2\xe5\xfe\xf2\xfa\x20"
+ "\xf1\x20\xec\xee\xf0\xff\x20\xf1"
+ "\xf2\xf0\xe5\xeb\xe0\xec\xe8\x20"
+ "\xed\xe0\x20\xf5\xf0\xe0\xe1\xf0"
+ "\xfb\xff\x20\xef\xeb\xfa\xea\xfb"
+ "\x20\xc8\xe3\xee\xf0\xe5\xe2\xfb",
+ .psize = 72,
+ .digest =
+ "\x9d\xd2\xfe\x4e\x90\x40\x9e\x5d"
+ "\xa8\x7f\x53\x97\x6d\x74\x05\xb0"
+ "\xc0\xca\xc6\x28\xfc\x66\x9a\x74"
+ "\x1d\x50\x06\x3c\x55\x7e\x8f\x50",
+ },
+};
+
+static const struct hash_testvec streebog512_tv_template[] = {
+ { /* M1 */
+ .plaintext = "012345678901234567890123456789012345678901234567890123456789012",
+ .psize = 63,
+ .digest =
+ "\x1b\x54\xd0\x1a\x4a\xf5\xb9\xd5"
+ "\xcc\x3d\x86\xd6\x8d\x28\x54\x62"
+ "\xb1\x9a\xbc\x24\x75\x22\x2f\x35"
+ "\xc0\x85\x12\x2b\xe4\xba\x1f\xfa"
+ "\x00\xad\x30\xf8\x76\x7b\x3a\x82"
+ "\x38\x4c\x65\x74\xf0\x24\xc3\x11"
+ "\xe2\xa4\x81\x33\x2b\x08\xef\x7f"
+ "\x41\x79\x78\x91\xc1\x64\x6f\x48",
+ },
+ { /* M2 */
+ .plaintext =
+ "\xd1\xe5\x20\xe2\xe5\xf2\xf0\xe8"
+ "\x2c\x20\xd1\xf2\xf0\xe8\xe1\xee"
+ "\xe6\xe8\x20\xe2\xed\xf3\xf6\xe8"
+ "\x2c\x20\xe2\xe5\xfe\xf2\xfa\x20"
+ "\xf1\x20\xec\xee\xf0\xff\x20\xf1"
+ "\xf2\xf0\xe5\xeb\xe0\xec\xe8\x20"
+ "\xed\xe0\x20\xf5\xf0\xe0\xe1\xf0"
+ "\xfb\xff\x20\xef\xeb\xfa\xea\xfb"
+ "\x20\xc8\xe3\xee\xf0\xe5\xe2\xfb",
+ .psize = 72,
+ .digest =
+ "\x1e\x88\xe6\x22\x26\xbf\xca\x6f"
+ "\x99\x94\xf1\xf2\xd5\x15\x69\xe0"
+ "\xda\xf8\x47\x5a\x3b\x0f\xe6\x1a"
+ "\x53\x00\xee\xe4\x6d\x96\x13\x76"
+ "\x03\x5f\xe8\x35\x49\xad\xa2\xb8"
+ "\x62\x0f\xcd\x7c\x49\x6c\xe5\xb3"
+ "\x3f\x0c\xb9\xdd\xdc\x2b\x64\x60"
+ "\x14\x3b\x03\xda\xba\xc9\xfb\x28",
+ },
+};
+
+/*
+ * Two HMAC-Streebog test vectors from RFC 7836 and R 50.1.113-2016 A
+ */
+static const struct hash_testvec hmac_streebog256_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .ksize = 32,
+ .plaintext =
+ "\x01\x26\xbd\xb8\x78\x00\xaf\x21"
+ "\x43\x41\x45\x65\x63\x78\x01\x00",
+ .psize = 16,
+ .digest =
+ "\xa1\xaa\x5f\x7d\xe4\x02\xd7\xb3"
+ "\xd3\x23\xf2\x99\x1c\x8d\x45\x34"
+ "\x01\x31\x37\x01\x0a\x83\x75\x4f"
+ "\xd0\xaf\x6d\x7c\xd4\x92\x2e\xd9",
+ },
+};
+
+static const struct hash_testvec hmac_streebog512_tv_template[] = {
+ {
+ .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+ "\x10\x11\x12\x13\x14\x15\x16\x17"
+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
+ .ksize = 32,
+ .plaintext =
+ "\x01\x26\xbd\xb8\x78\x00\xaf\x21"
+ "\x43\x41\x45\x65\x63\x78\x01\x00",
+ .psize = 16,
+ .digest =
+ "\xa5\x9b\xab\x22\xec\xae\x19\xc6"
+ "\x5f\xbd\xe6\xe5\xf4\xe9\xf5\xd8"
+ "\x54\x9d\x31\xf0\x37\xf9\xdf\x9b"
+ "\x90\x55\x00\xe1\x71\x92\x3a\x77"
+ "\x3d\x5f\x15\x30\xf2\xed\x7e\x96"
+ "\x4c\xb2\xee\xdc\x29\xe9\xad\x2f"
+ "\x3a\xfe\x93\xb2\x81\x4f\x79\xf5"
+ "\x00\x0f\xfc\x03\x66\xc2\x51\xe6",
+ },
+};
+
/* Example vectors below taken from
* http://www.oscca.gov.cn/UpFile/20101222141857786.pdf
*
@@ -5593,6 +5709,1238 @@ static const struct hash_testvec poly1305_tv_template[] = {
},
};
+/* NHPoly1305 test vectors from https://github.com/google/adiantum */
+static const struct hash_testvec nhpoly1305_tv_template[] = {
+ {
+ .key = "\xd2\x5d\x4c\xdd\x8d\x2b\x7f\x7a"
+ "\xd9\xbe\x71\xec\xd1\x83\x52\xe3"
+ "\xe1\xad\xd7\x5c\x0a\x75\x9d\xec"
+ "\x1d\x13\x7e\x5d\x71\x07\xc9\xe4"
+ "\x57\x2d\x44\x68\xcf\xd8\xd6\xc5"
+ "\x39\x69\x7d\x32\x75\x51\x4f\x7e"
+ "\xb2\x4c\xc6\x90\x51\x6e\xd9\xd6"
+ "\xa5\x8b\x2d\xf1\x94\xf9\xf7\x5e"
+ "\x2c\x84\x7b\x41\x0f\x88\x50\x89"
+ "\x30\xd9\xa1\x38\x46\x6c\xc0\x4f"
+ "\xe8\xdf\xdc\x66\xab\x24\x43\x41"
+ "\x91\x55\x29\x65\x86\x28\x5e\x45"
+ "\xd5\x2d\xb7\x80\x08\x9a\xc3\xd4"
+ "\x9a\x77\x0a\xd4\xef\x3e\xe6\x3f"
+ "\x6f\x2f\x9b\x3a\x7d\x12\x1e\x80"
+ "\x6c\x44\xa2\x25\xe1\xf6\x60\xe9"
+ "\x0d\xaf\xc5\x3c\xa5\x79\xae\x64"
+ "\xbc\xa0\x39\xa3\x4d\x10\xe5\x4d"
+ "\xd5\xe7\x89\x7a\x13\xee\x06\x78"
+ "\xdc\xa4\xdc\x14\x27\xe6\x49\x38"
+ "\xd0\xe0\x45\x25\x36\xc5\xf4\x79"
+ "\x2e\x9a\x98\x04\xe4\x2b\x46\x52"
+ "\x7c\x33\xca\xe2\x56\x51\x50\xe2"
+ "\xa5\x9a\xae\x18\x6a\x13\xf8\xd2"
+ "\x21\x31\x66\x02\xe2\xda\x8d\x7e"
+ "\x41\x19\xb2\x61\xee\x48\x8f\xf1"
+ "\x65\x24\x2e\x1e\x68\xce\x05\xd9"
+ "\x2a\xcf\xa5\x3a\x57\xdd\x35\x91"
+ "\x93\x01\xca\x95\xfc\x2b\x36\x04"
+ "\xe6\x96\x97\x28\xf6\x31\xfe\xa3"
+ "\x9d\xf6\x6a\x1e\x80\x8d\xdc\xec"
+ "\xaf\x66\x11\x13\x02\x88\xd5\x27"
+ "\x33\xb4\x1a\xcd\xa3\xf6\xde\x31"
+ "\x8e\xc0\x0e\x6c\xd8\x5a\x97\x5e"
+ "\xdd\xfd\x60\x69\x38\x46\x3f\x90"
+ "\x5e\x97\xd3\x32\x76\xc7\x82\x49"
+ "\xfe\xba\x06\x5f\x2f\xa2\xfd\xff"
+ "\x80\x05\x40\xe4\x33\x03\xfb\x10"
+ "\xc0\xde\x65\x8c\xc9\x8d\x3a\x9d"
+ "\xb5\x7b\x36\x4b\xb5\x0c\xcf\x00"
+ "\x9c\x87\xe4\x49\xad\x90\xda\x4a"
+ "\xdd\xbd\xff\xe2\x32\x57\xd6\x78"
+ "\x36\x39\x6c\xd3\x5b\x9b\x88\x59"
+ "\x2d\xf0\x46\xe4\x13\x0e\x2b\x35"
+ "\x0d\x0f\x73\x8a\x4f\x26\x84\x75"
+ "\x88\x3c\xc5\x58\x66\x18\x1a\xb4"
+ "\x64\x51\x34\x27\x1b\xa4\x11\xc9"
+ "\x6d\x91\x8a\xfa\x32\x60\x9d\xd7"
+ "\x87\xe5\xaa\x43\x72\xf8\xda\xd1"
+ "\x48\x44\x13\x61\xdc\x8c\x76\x17"
+ "\x0c\x85\x4e\xf3\xdd\xa2\x42\xd2"
+ "\x74\xc1\x30\x1b\xeb\x35\x31\x29"
+ "\x5b\xd7\x4c\x94\x46\x35\xa1\x23"
+ "\x50\xf2\xa2\x8e\x7e\x4f\x23\x4f"
+ "\x51\xff\xe2\xc9\xa3\x7d\x56\x8b"
+ "\x41\xf2\xd0\xc5\x57\x7e\x59\xac"
+ "\xbb\x65\xf3\xfe\xf7\x17\xef\x63"
+ "\x7c\x6f\x23\xdd\x22\x8e\xed\x84"
+ "\x0e\x3b\x09\xb3\xf3\xf4\x8f\xcd"
+ "\x37\xa8\xe1\xa7\x30\xdb\xb1\xa2"
+ "\x9c\xa2\xdf\x34\x17\x3e\x68\x44"
+ "\xd0\xde\x03\x50\xd1\x48\x6b\x20"
+ "\xe2\x63\x45\xa5\xea\x87\xc2\x42"
+ "\x95\x03\x49\x05\xed\xe0\x90\x29"
+ "\x1a\xb8\xcf\x9b\x43\xcf\x29\x7a"
+ "\x63\x17\x41\x9f\xe0\xc9\x10\xfd"
+ "\x2c\x56\x8c\x08\x55\xb4\xa9\x27"
+ "\x0f\x23\xb1\x05\x6a\x12\x46\xc7"
+ "\xe1\xfe\x28\x93\x93\xd7\x2f\xdc"
+ "\x98\x30\xdb\x75\x8a\xbe\x97\x7a"
+ "\x02\xfb\x8c\xba\xbe\x25\x09\xbe"
+ "\xce\xcb\xa2\xef\x79\x4d\x0e\x9d"
+ "\x1b\x9d\xb6\x39\x34\x38\xfa\x07"
+ "\xec\xe8\xfc\x32\x85\x1d\xf7\x85"
+ "\x63\xc3\x3c\xc0\x02\x75\xd7\x3f"
+ "\xb2\x68\x60\x66\x65\x81\xc6\xb1"
+ "\x42\x65\x4b\x4b\x28\xd7\xc7\xaa"
+ "\x9b\xd2\xdc\x1b\x01\xe0\x26\x39"
+ "\x01\xc1\x52\x14\xd1\x3f\xb7\xe6"
+ "\x61\x41\xc7\x93\xd2\xa2\x67\xc6"
+ "\xf7\x11\xb5\xf5\xea\xdd\x19\xfb"
+ "\x4d\x21\x12\xd6\x7d\xf1\x10\xb0"
+ "\x89\x07\xc7\x5a\x52\x73\x70\x2f"
+ "\x32\xef\x65\x2b\x12\xb2\xf0\xf5"
+ "\x20\xe0\x90\x59\x7e\x64\xf1\x4c"
+ "\x41\xb3\xa5\x91\x08\xe6\x5e\x5f"
+ "\x05\x56\x76\xb4\xb0\xcd\x70\x53"
+ "\x10\x48\x9c\xff\xc2\x69\x55\x24"
+ "\x87\xef\x84\xea\xfb\xa7\xbf\xa0"
+ "\x91\x04\xad\x4f\x8b\x57\x54\x4b"
+ "\xb6\xe9\xd1\xac\x37\x2f\x1d\x2e"
+ "\xab\xa5\xa4\xe8\xff\xfb\xd9\x39"
+ "\x2f\xb7\xac\xd1\xfe\x0b\x9a\x80"
+ "\x0f\xb6\xf4\x36\x39\x90\x51\xe3"
+ "\x0a\x2f\xb6\x45\x76\x89\xcd\x61"
+ "\xfe\x48\x5f\x75\x1d\x13\x00\x62"
+ "\x80\x24\x47\xe7\xbc\x37\xd7\xe3"
+ "\x15\xe8\x68\x22\xaf\x80\x6f\x4b"
+ "\xa8\x9f\x01\x10\x48\x14\xc3\x02"
+ "\x52\xd2\xc7\x75\x9b\x52\x6d\x30"
+ "\xac\x13\x85\xc8\xf7\xa3\x58\x4b"
+ "\x49\xf7\x1c\x45\x55\x8c\x39\x9a"
+ "\x99\x6d\x97\x27\x27\xe6\xab\xdd"
+ "\x2c\x42\x1b\x35\xdd\x9d\x73\xbb"
+ "\x6c\xf3\x64\xf1\xfb\xb9\xf7\xe6"
+ "\x4a\x3c\xc0\x92\xc0\x2e\xb7\x1a"
+ "\xbe\xab\xb3\x5a\xe5\xea\xb1\x48"
+ "\x58\x13\x53\x90\xfd\xc3\x8e\x54"
+ "\xf9\x18\x16\x73\xe8\xcb\x6d\x39"
+ "\x0e\xd7\xe0\xfe\xb6\x9f\x43\x97"
+ "\xe8\xd0\x85\x56\x83\x3e\x98\x68"
+ "\x7f\xbd\x95\xa8\x9a\x61\x21\x8f"
+ "\x06\x98\x34\xa6\xc8\xd6\x1d\xf3"
+ "\x3d\x43\xa4\x9a\x8c\xe5\xd3\x5a"
+ "\x32\xa2\x04\x22\xa4\x19\x1a\x46"
+ "\x42\x7e\x4d\xe5\xe0\xe6\x0e\xca"
+ "\xd5\x58\x9d\x2c\xaf\xda\x33\x5c"
+ "\xb0\x79\x9e\xc9\xfc\xca\xf0\x2f"
+ "\xa8\xb2\x77\xeb\x7a\xa2\xdd\x37"
+ "\x35\x83\x07\xd6\x02\x1a\xb6\x6c"
+ "\x24\xe2\x59\x08\x0e\xfd\x3e\x46"
+ "\xec\x40\x93\xf4\x00\x26\x4f\x2a"
+ "\xff\x47\x2f\xeb\x02\x92\x26\x5b"
+ "\x53\x17\xc2\x8d\x2a\xc7\xa3\x1b"
+ "\xcd\xbc\xa7\xe8\xd1\x76\xe3\x80"
+ "\x21\xca\x5d\x3b\xe4\x9c\x8f\xa9"
+ "\x5b\x7f\x29\x7f\x7c\xd8\xed\x6d"
+ "\x8c\xb2\x86\x85\xe7\x77\xf2\x85"
+ "\xab\x38\xa9\x9d\xc1\x4e\xc5\x64"
+ "\x33\x73\x8b\x59\x03\xad\x05\xdf"
+ "\x25\x98\x31\xde\xef\x13\xf1\x9b"
+ "\x3c\x91\x9d\x7b\xb1\xfa\xe6\xbf"
+ "\x5b\xed\xa5\x55\xe6\xea\x6c\x74"
+ "\xf4\xb9\xe4\x45\x64\x72\x81\xc2"
+ "\x4c\x28\xd4\xcd\xac\xe2\xde\xf9"
+ "\xeb\x5c\xeb\x61\x60\x5a\xe5\x28",
+ .ksize = 1088,
+ .plaintext = "",
+ .psize = 0,
+ .digest = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ }, {
+ .key = "\x29\x21\x43\xcb\xcb\x13\x07\xde"
+ "\xbf\x48\xdf\x8a\x7f\xa2\x84\xde"
+ "\x72\x23\x9d\xf5\xf0\x07\xf2\x4c"
+ "\x20\x3a\x93\xb9\xcd\x5d\xfe\xcb"
+ "\x99\x2c\x2b\x58\xc6\x50\x5f\x94"
+ "\x56\xc3\x7c\x0d\x02\x3f\xb8\x5e"
+ "\x7b\xc0\x6c\x51\x34\x76\xc0\x0e"
+ "\xc6\x22\xc8\x9e\x92\xa0\x21\xc9"
+ "\x85\x5c\x7c\xf8\xe2\x64\x47\xc9"
+ "\xe4\xa2\x57\x93\xf8\xa2\x69\xcd"
+ "\x62\x98\x99\xf4\xd7\x7b\x14\xb1"
+ "\xd8\x05\xff\x04\x15\xc9\xe1\x6e"
+ "\x9b\xe6\x50\x6b\x0b\x3f\x22\x1f"
+ "\x08\xde\x0c\x5b\x08\x7e\xc6\x2f"
+ "\x6c\xed\xd6\xb2\x15\xa4\xb3\xf9"
+ "\xa7\x46\x38\x2a\xea\x69\xa5\xde"
+ "\x02\xc3\x96\x89\x4d\x55\x3b\xed"
+ "\x3d\x3a\x85\x77\xbf\x97\x45\x5c"
+ "\x9e\x02\x69\xe2\x1b\x68\xbe\x96"
+ "\xfb\x64\x6f\x0f\xf6\x06\x40\x67"
+ "\xfa\x04\xe3\x55\xfa\xbe\xa4\x60"
+ "\xef\x21\x66\x97\xe6\x9d\x5c\x1f"
+ "\x62\x37\xaa\x31\xde\xe4\x9c\x28"
+ "\x95\xe0\x22\x86\xf4\x4d\xf3\x07"
+ "\xfd\x5f\x3a\x54\x2c\x51\x80\x71"
+ "\xba\x78\x69\x5b\x65\xab\x1f\x81"
+ "\xed\x3b\xff\x34\xa3\xfb\xbc\x73"
+ "\x66\x7d\x13\x7f\xdf\x6e\xe2\xe2"
+ "\xeb\x4f\x6c\xda\x7d\x33\x57\xd0"
+ "\xd3\x7c\x95\x4f\x33\x58\x21\xc7"
+ "\xc0\xe5\x6f\x42\x26\xc6\x1f\x5e"
+ "\x85\x1b\x98\x9a\xa2\x1e\x55\x77"
+ "\x23\xdf\x81\x5e\x79\x55\x05\xfc"
+ "\xfb\xda\xee\xba\x5a\xba\xf7\x77"
+ "\x7f\x0e\xd3\xe1\x37\xfe\x8d\x2b"
+ "\xd5\x3f\xfb\xd0\xc0\x3c\x0b\x3f"
+ "\xcf\x3c\x14\xcf\xfb\x46\x72\x4c"
+ "\x1f\x39\xe2\xda\x03\x71\x6d\x23"
+ "\xef\x93\xcd\x39\xd9\x37\x80\x4d"
+ "\x65\x61\xd1\x2c\x03\xa9\x47\x72"
+ "\x4d\x1e\x0e\x16\x33\x0f\x21\x17"
+ "\xec\x92\xea\x6f\x37\x22\xa4\xd8"
+ "\x03\x33\x9e\xd8\x03\x69\x9a\xe8"
+ "\xb2\x57\xaf\x78\x99\x05\x12\xab"
+ "\x48\x90\x80\xf0\x12\x9b\x20\x64"
+ "\x7a\x1d\x47\x5f\xba\x3c\xf9\xc3"
+ "\x0a\x0d\x8d\xa1\xf9\x1b\x82\x13"
+ "\x3e\x0d\xec\x0a\x83\xc0\x65\xe1"
+ "\xe9\x95\xff\x97\xd6\xf2\xe4\xd5"
+ "\x86\xc0\x1f\x29\x27\x63\xd7\xde"
+ "\xb7\x0a\x07\x99\x04\x2d\xa3\x89"
+ "\xa2\x43\xcf\xf3\xe1\x43\xac\x4a"
+ "\x06\x97\xd0\x05\x4f\x87\xfa\xf9"
+ "\x9b\xbf\x52\x70\xbd\xbc\x6c\xf3"
+ "\x03\x13\x60\x41\x28\x09\xec\xcc"
+ "\xb1\x1a\xec\xd6\xfb\x6f\x2a\x89"
+ "\x5d\x0b\x53\x9c\x59\xc1\x84\x21"
+ "\x33\x51\x47\x19\x31\x9c\xd4\x0a"
+ "\x4d\x04\xec\x50\x90\x61\xbd\xbc"
+ "\x7e\xc8\xd9\x6c\x98\x1d\x45\x41"
+ "\x17\x5e\x97\x1c\xc5\xa8\xe8\xea"
+ "\x46\x58\x53\xf7\x17\xd5\xad\x11"
+ "\xc8\x54\xf5\x7a\x33\x90\xf5\x19"
+ "\xba\x36\xb4\xfc\x52\xa5\x72\x3d"
+ "\x14\xbb\x55\xa7\xe9\xe3\x12\xf7"
+ "\x1c\x30\xa2\x82\x03\xbf\x53\x91"
+ "\x2e\x60\x41\x9f\x5b\x69\x39\xf6"
+ "\x4d\xc8\xf8\x46\x7a\x7f\xa4\x98"
+ "\x36\xff\x06\xcb\xca\xe7\x33\xf2"
+ "\xc0\x4a\xf4\x3c\x14\x44\x5f\x6b"
+ "\x75\xef\x02\x36\x75\x08\x14\xfd"
+ "\x10\x8e\xa5\x58\xd0\x30\x46\x49"
+ "\xaf\x3a\xf8\x40\x3d\x35\xdb\x84"
+ "\x11\x2e\x97\x6a\xb7\x87\x7f\xad"
+ "\xf1\xfa\xa5\x63\x60\xd8\x5e\xbf"
+ "\x41\x78\x49\xcf\x77\xbb\x56\xbb"
+ "\x7d\x01\x67\x05\x22\xc8\x8f\x41"
+ "\xba\x81\xd2\xca\x2c\x38\xac\x76"
+ "\x06\xc1\x1a\xc2\xce\xac\x90\x67"
+ "\x57\x3e\x20\x12\x5b\xd9\x97\x58"
+ "\x65\x05\xb7\x04\x61\x7e\xd8\x3a"
+ "\xbf\x55\x3b\x13\xe9\x34\x5a\x37"
+ "\x36\xcb\x94\x45\xc5\x32\xb3\xa0"
+ "\x0c\x3e\x49\xc5\xd3\xed\xa7\xf0"
+ "\x1c\x69\xcc\xea\xcc\x83\xc9\x16"
+ "\x95\x72\x4b\xf4\x89\xd5\xb9\x10"
+ "\xf6\x2d\x60\x15\xea\x3c\x06\x66"
+ "\x9f\x82\xad\x17\xce\xd2\xa4\x48"
+ "\x7c\x65\xd9\xf8\x02\x4d\x9b\x4c"
+ "\x89\x06\x3a\x34\x85\x48\x89\x86"
+ "\xf9\x24\xa9\x54\x72\xdb\x44\x95"
+ "\xc7\x44\x1c\x19\x11\x4c\x04\xdc"
+ "\x13\xb9\x67\xc8\xc3\x3a\x6a\x50"
+ "\xfa\xd1\xfb\xe1\x88\xb6\xf1\xa3"
+ "\xc5\x3b\xdc\x38\x45\x16\x26\x02"
+ "\x3b\xb8\x8f\x8b\x58\x7d\x23\x04"
+ "\x50\x6b\x81\x9f\xae\x66\xac\x6f"
+ "\xcf\x2a\x9d\xf1\xfd\x1d\x57\x07"
+ "\xbe\x58\xeb\x77\x0c\xe3\xc2\x19"
+ "\x14\x74\x1b\x51\x1c\x4f\x41\xf3"
+ "\x32\x89\xb3\xe7\xde\x62\xf6\x5f"
+ "\xc7\x6a\x4a\x2a\x5b\x0f\x5f\x87"
+ "\x9c\x08\xb9\x02\x88\xc8\x29\xb7"
+ "\x94\x52\xfa\x52\xfe\xaa\x50\x10"
+ "\xba\x48\x75\x5e\x11\x1b\xe6\x39"
+ "\xd7\x82\x2c\x87\xf1\x1e\xa4\x38"
+ "\x72\x3e\x51\xe7\xd8\x3e\x5b\x7b"
+ "\x31\x16\x89\xba\xd6\xad\x18\x5e"
+ "\xba\xf8\x12\xb3\xf4\x6c\x47\x30"
+ "\xc0\x38\x58\xb3\x10\x8d\x58\x5d"
+ "\xb4\xfb\x19\x7e\x41\xc3\x66\xb8"
+ "\xd6\x72\x84\xe1\x1a\xc2\x71\x4c"
+ "\x0d\x4a\x21\x7a\xab\xa2\xc0\x36"
+ "\x15\xc5\xe9\x46\xd7\x29\x17\x76"
+ "\x5e\x47\x36\x7f\x72\x05\xa7\xcc"
+ "\x36\x63\xf9\x47\x7d\xe6\x07\x3c"
+ "\x8b\x79\x1d\x96\x61\x8d\x90\x65"
+ "\x7c\xf5\xeb\x4e\x6e\x09\x59\x6d"
+ "\x62\x50\x1b\x0f\xe0\xdc\x78\xf2"
+ "\x5b\x83\x1a\xa1\x11\x75\xfd\x18"
+ "\xd7\xe2\x8d\x65\x14\x21\xce\xbe"
+ "\xb5\x87\xe3\x0a\xda\x24\x0a\x64"
+ "\xa9\x9f\x03\x8d\x46\x5d\x24\x1a"
+ "\x8a\x0c\x42\x01\xca\xb1\x5f\x7c"
+ "\xa5\xac\x32\x4a\xb8\x07\x91\x18"
+ "\x6f\xb0\x71\x3c\xc9\xb1\xa8\xf8"
+ "\x5f\x69\xa5\xa1\xca\x9e\x7a\xaa"
+ "\xac\xe9\xc7\x47\x41\x75\x25\xc3"
+ "\x73\xe2\x0b\xdd\x6d\x52\x71\xbe"
+ "\xc5\xdc\xb4\xe7\x01\x26\x53\x77"
+ "\x86\x90\x85\x68\x6b\x7b\x03\x53"
+ "\xda\x52\x52\x51\x68\xc8\xf3\xec"
+ "\x6c\xd5\x03\x7a\xa3\x0e\xb4\x02"
+ "\x5f\x1a\xab\xee\xca\x67\x29\x7b"
+ "\xbd\x96\x59\xb3\x8b\x32\x7a\x92"
+ "\x9f\xd8\x25\x2b\xdf\xc0\x4c\xda",
+ .ksize = 1088,
+ .plaintext = "\xbc\xda\x81\xa8\x78\x79\x1c\xbf"
+ "\x77\x53\xba\x4c\x30\x5b\xb8\x33",
+ .psize = 16,
+ .digest = "\x04\xbf\x7f\x6a\xce\x72\xea\x6a"
+ "\x79\xdb\xb0\xc9\x60\xf6\x12\xcc",
+ .np = 6,
+ .tap = { 4, 4, 1, 1, 1, 5 },
+ }, {
+ .key = "\x65\x4d\xe3\xf8\xd2\x4c\xac\x28"
+ "\x68\xf5\xb3\x81\x71\x4b\xa1\xfa"
+ "\x04\x0e\xd3\x81\x36\xbe\x0c\x81"
+ "\x5e\xaf\xbc\x3a\xa4\xc0\x8e\x8b"
+ "\x55\x63\xd3\x52\x97\x88\xd6\x19"
+ "\xbc\x96\xdf\x49\xff\x04\x63\xf5"
+ "\x0c\x11\x13\xaa\x9e\x1f\x5a\xf7"
+ "\xdd\xbd\x37\x80\xc3\xd0\xbe\xa7"
+ "\x05\xc8\x3c\x98\x1e\x05\x3c\x84"
+ "\x39\x61\xc4\xed\xed\x71\x1b\xc4"
+ "\x74\x45\x2c\xa1\x56\x70\x97\xfd"
+ "\x44\x18\x07\x7d\xca\x60\x1f\x73"
+ "\x3b\x6d\x21\xcb\x61\x87\x70\x25"
+ "\x46\x21\xf1\x1f\x21\x91\x31\x2d"
+ "\x5d\xcc\xb7\xd1\x84\x3e\x3d\xdb"
+ "\x03\x53\x2a\x82\xa6\x9a\x95\xbc"
+ "\x1a\x1e\x0a\x5e\x07\x43\xab\x43"
+ "\xaf\x92\x82\x06\x91\x04\x09\xf4"
+ "\x17\x0a\x9a\x2c\x54\xdb\xb8\xf4"
+ "\xd0\xf0\x10\x66\x24\x8d\xcd\xda"
+ "\xfe\x0e\x45\x9d\x6f\xc4\x4e\xf4"
+ "\x96\xaf\x13\xdc\xa9\xd4\x8c\xc4"
+ "\xc8\x57\x39\x3c\xc2\xd3\x0a\x76"
+ "\x4a\x1f\x75\x83\x44\xc7\xd1\x39"
+ "\xd8\xb5\x41\xba\x73\x87\xfa\x96"
+ "\xc7\x18\x53\xfb\x9b\xda\xa0\x97"
+ "\x1d\xee\x60\x85\x9e\x14\xc3\xce"
+ "\xc4\x05\x29\x3b\x95\x30\xa3\xd1"
+ "\x9f\x82\x6a\x04\xf5\xa7\x75\x57"
+ "\x82\x04\xfe\x71\x51\x71\xb1\x49"
+ "\x50\xf8\xe0\x96\xf1\xfa\xa8\x88"
+ "\x3f\xa0\x86\x20\xd4\x60\x79\x59"
+ "\x17\x2d\xd1\x09\xf4\xec\x05\x57"
+ "\xcf\x62\x7e\x0e\x7e\x60\x78\xe6"
+ "\x08\x60\x29\xd8\xd5\x08\x1a\x24"
+ "\xc4\x6c\x24\xe7\x92\x08\x3d\x8a"
+ "\x98\x7a\xcf\x99\x0a\x65\x0e\xdc"
+ "\x8c\x8a\xbe\x92\x82\x91\xcc\x62"
+ "\x30\xb6\xf4\x3f\xc6\x8a\x7f\x12"
+ "\x4a\x8a\x49\xfa\x3f\x5c\xd4\x5a"
+ "\xa6\x82\xa3\xe6\xaa\x34\x76\xb2"
+ "\xab\x0a\x30\xef\x6c\x77\x58\x3f"
+ "\x05\x6b\xcc\x5c\xae\xdc\xd7\xb9"
+ "\x51\x7e\x8d\x32\x5b\x24\x25\xbe"
+ "\x2b\x24\x01\xcf\x80\xda\x16\xd8"
+ "\x90\x72\x2c\xad\x34\x8d\x0c\x74"
+ "\x02\xcb\xfd\xcf\x6e\xef\x97\xb5"
+ "\x4c\xf2\x68\xca\xde\x43\x9e\x8a"
+ "\xc5\x5f\x31\x7f\x14\x71\x38\xec"
+ "\xbd\x98\xe5\x71\xc4\xb5\xdb\xef"
+ "\x59\xd2\xca\xc0\xc1\x86\x75\x01"
+ "\xd4\x15\x0d\x6f\xa4\xf7\x7b\x37"
+ "\x47\xda\x18\x93\x63\xda\xbe\x9e"
+ "\x07\xfb\xb2\x83\xd5\xc4\x34\x55"
+ "\xee\x73\xa1\x42\x96\xf9\x66\x41"
+ "\xa4\xcc\xd2\x93\x6e\xe1\x0a\xbb"
+ "\xd2\xdd\x18\x23\xe6\x6b\x98\x0b"
+ "\x8a\x83\x59\x2c\xc3\xa6\x59\x5b"
+ "\x01\x22\x59\xf7\xdc\xb0\x87\x7e"
+ "\xdb\x7d\xf4\x71\x41\xab\xbd\xee"
+ "\x79\xbe\x3c\x01\x76\x0b\x2d\x0a"
+ "\x42\xc9\x77\x8c\xbb\x54\x95\x60"
+ "\x43\x2e\xe0\x17\x52\xbd\x90\xc9"
+ "\xc2\x2c\xdd\x90\x24\x22\x76\x40"
+ "\x5c\xb9\x41\xc9\xa1\xd5\xbd\xe3"
+ "\x44\xe0\xa4\xab\xcc\xb8\xe2\x32"
+ "\x02\x15\x04\x1f\x8c\xec\x5d\x14"
+ "\xac\x18\xaa\xef\x6e\x33\x19\x6e"
+ "\xde\xfe\x19\xdb\xeb\x61\xca\x18"
+ "\xad\xd8\x3d\xbf\x09\x11\xc7\xa5"
+ "\x86\x0b\x0f\xe5\x3e\xde\xe8\xd9"
+ "\x0a\x69\x9e\x4c\x20\xff\xf9\xc5"
+ "\xfa\xf8\xf3\x7f\xa5\x01\x4b\x5e"
+ "\x0f\xf0\x3b\x68\xf0\x46\x8c\x2a"
+ "\x7a\xc1\x8f\xa0\xfe\x6a\x5b\x44"
+ "\x70\x5c\xcc\x92\x2c\x6f\x0f\xbd"
+ "\x25\x3e\xb7\x8e\x73\x58\xda\xc9"
+ "\xa5\xaa\x9e\xf3\x9b\xfd\x37\x3e"
+ "\xe2\x88\xa4\x7b\xc8\x5c\xa8\x93"
+ "\x0e\xe7\x9a\x9c\x2e\x95\x18\x9f"
+ "\xc8\x45\x0c\x88\x9e\x53\x4f\x3a"
+ "\x76\xc1\x35\xfa\x17\xd8\xac\xa0"
+ "\x0c\x2d\x47\x2e\x4f\x69\x9b\xf7"
+ "\xd0\xb6\x96\x0c\x19\xb3\x08\x01"
+ "\x65\x7a\x1f\xc7\x31\x86\xdb\xc8"
+ "\xc1\x99\x8f\xf8\x08\x4a\x9d\x23"
+ "\x22\xa8\xcf\x27\x01\x01\x88\x93"
+ "\x9c\x86\x45\xbd\xe0\x51\xca\x52"
+ "\x84\xba\xfe\x03\xf7\xda\xc5\xce"
+ "\x3e\x77\x75\x86\xaf\x84\xc8\x05"
+ "\x44\x01\x0f\x02\xf3\x58\xb0\x06"
+ "\x5a\xd7\x12\x30\x8d\xdf\x1f\x1f"
+ "\x0a\xe6\xd2\xea\xf6\x3a\x7a\x99"
+ "\x63\xe8\xd2\xc1\x4a\x45\x8b\x40"
+ "\x4d\x0a\xa9\x76\x92\xb3\xda\x87"
+ "\x36\x33\xf0\x78\xc3\x2f\x5f\x02"
+ "\x1a\x6a\x2c\x32\xcd\x76\xbf\xbd"
+ "\x5a\x26\x20\x28\x8c\x8c\xbc\x52"
+ "\x3d\x0a\xc9\xcb\xab\xa4\x21\xb0"
+ "\x54\x40\x81\x44\xc7\xd6\x1c\x11"
+ "\x44\xc6\x02\x92\x14\x5a\xbf\x1a"
+ "\x09\x8a\x18\xad\xcd\x64\x3d\x53"
+ "\x4a\xb6\xa5\x1b\x57\x0e\xef\xe0"
+ "\x8c\x44\x5f\x7d\xbd\x6c\xfd\x60"
+ "\xae\x02\x24\xb6\x99\xdd\x8c\xaf"
+ "\x59\x39\x75\x3c\xd1\x54\x7b\x86"
+ "\xcc\x99\xd9\x28\x0c\xb0\x94\x62"
+ "\xf9\x51\xd1\x19\x96\x2d\x66\xf5"
+ "\x55\xcf\x9e\x59\xe2\x6b\x2c\x08"
+ "\xc0\x54\x48\x24\x45\xc3\x8c\x73"
+ "\xea\x27\x6e\x66\x7d\x1d\x0e\x6e"
+ "\x13\xe8\x56\x65\x3a\xb0\x81\x5c"
+ "\xf0\xe8\xd8\x00\x6b\xcd\x8f\xad"
+ "\xdd\x53\xf3\xa4\x6c\x43\xd6\x31"
+ "\xaf\xd2\x76\x1e\x91\x12\xdb\x3c"
+ "\x8c\xc2\x81\xf0\x49\xdb\xe2\x6b"
+ "\x76\x62\x0a\x04\xe4\xaa\x8a\x7c"
+ "\x08\x0b\x5d\xd0\xee\x1d\xfb\xc4"
+ "\x02\x75\x42\xd6\xba\xa7\x22\xa8"
+ "\x47\x29\xb7\x85\x6d\x93\x3a\xdb"
+ "\x00\x53\x0b\xa2\xeb\xf8\xfe\x01"
+ "\x6f\x8a\x31\xd6\x17\x05\x6f\x67"
+ "\x88\x95\x32\xfe\x4f\xa6\x4b\xf8"
+ "\x03\xe4\xcd\x9a\x18\xe8\x4e\x2d"
+ "\xf7\x97\x9a\x0c\x7d\x9f\x7e\x44"
+ "\x69\x51\xe0\x32\x6b\x62\x86\x8f"
+ "\xa6\x8e\x0b\x21\x96\xe5\xaf\x77"
+ "\xc0\x83\xdf\xa5\x0e\xd0\xa1\x04"
+ "\xaf\xc1\x10\xcb\x5a\x40\xe4\xe3"
+ "\x38\x7e\x07\xe8\x4d\xfa\xed\xc5"
+ "\xf0\x37\xdf\xbb\x8a\xcf\x3d\xdc"
+ "\x61\xd2\xc6\x2b\xff\x07\xc9\x2f"
+ "\x0c\x2d\x5c\x07\xa8\x35\x6a\xfc"
+ "\xae\x09\x03\x45\x74\x51\x4d\xc4"
+ "\xb8\x23\x87\x4a\x99\x27\x20\x87"
+ "\x62\x44\x0a\x4a\xce\x78\x47\x22",
+ .ksize = 1088,
+ .plaintext = "\x8e\xb0\x4c\xde\x9c\x4a\x04\x5a"
+ "\xf6\xa9\x7f\x45\x25\xa5\x7b\x3a"
+ "\xbc\x4d\x73\x39\x81\xb5\xbd\x3d"
+ "\x21\x6f\xd7\x37\x50\x3c\x7b\x28"
+ "\xd1\x03\x3a\x17\xed\x7b\x7c\x2a"
+ "\x16\xbc\xdf\x19\x89\x52\x71\x31"
+ "\xb6\xc0\xfd\xb5\xd3\xba\x96\x99"
+ "\xb6\x34\x0b\xd0\x99\x93\xfc\x1a"
+ "\x01\x3c\x85\xc6\x9b\x78\x5c\x8b"
+ "\xfe\xae\xd2\xbf\xb2\x6f\xf9\xed"
+ "\xc8\x25\x17\xfe\x10\x3b\x7d\xda"
+ "\xf4\x8d\x35\x4b\x7c\x7b\x82\xe7"
+ "\xc2\xb3\xee\x60\x4a\x03\x86\xc9"
+ "\x4e\xb5\xc4\xbe\xd2\xbd\x66\xf1"
+ "\x13\xf1\x09\xab\x5d\xca\x63\x1f"
+ "\xfc\xfb\x57\x2a\xfc\xca\x66\xd8"
+ "\x77\x84\x38\x23\x1d\xac\xd3\xb3"
+ "\x7a\xad\x4c\x70\xfa\x9c\xc9\x61"
+ "\xa6\x1b\xba\x33\x4b\x4e\x33\xec"
+ "\xa0\xa1\x64\x39\x40\x05\x1c\xc2"
+ "\x3f\x49\x9d\xae\xf2\xc5\xf2\xc5"
+ "\xfe\xe8\xf4\xc2\xf9\x96\x2d\x28"
+ "\x92\x30\x44\xbc\xd2\x7f\xe1\x6e"
+ "\x62\x02\x8f\x3d\x1c\x80\xda\x0e"
+ "\x6a\x90\x7e\x75\xff\xec\x3e\xc4"
+ "\xcd\x16\x34\x3b\x05\x6d\x4d\x20"
+ "\x1c\x7b\xf5\x57\x4f\xfa\x3d\xac"
+ "\xd0\x13\x55\xe8\xb3\xe1\x1b\x78"
+ "\x30\xe6\x9f\x84\xd4\x69\xd1\x08"
+ "\x12\x77\xa7\x4a\xbd\xc0\xf2\xd2"
+ "\x78\xdd\xa3\x81\x12\xcb\x6c\x14"
+ "\x90\x61\xe2\x84\xc6\x2b\x16\xcc"
+ "\x40\x99\x50\x88\x01\x09\x64\x4f"
+ "\x0a\x80\xbe\x61\xae\x46\xc9\x0a"
+ "\x5d\xe0\xfb\x72\x7a\x1a\xdd\x61"
+ "\x63\x20\x05\xa0\x4a\xf0\x60\x69"
+ "\x7f\x92\xbc\xbf\x4e\x39\x4d\xdd"
+ "\x74\xd1\xb7\xc0\x5a\x34\xb7\xae"
+ "\x76\x65\x2e\xbc\x36\xb9\x04\x95"
+ "\x42\xe9\x6f\xca\x78\xb3\x72\x07"
+ "\xa3\xba\x02\x94\x67\x4c\xb1\xd7"
+ "\xe9\x30\x0d\xf0\x3b\xb8\x10\x6d"
+ "\xea\x2b\x21\xbf\x74\x59\x82\x97"
+ "\x85\xaa\xf1\xd7\x54\x39\xeb\x05"
+ "\xbd\xf3\x40\xa0\x97\xe6\x74\xfe"
+ "\xb4\x82\x5b\xb1\x36\xcb\xe8\x0d"
+ "\xce\x14\xd9\xdf\xf1\x94\x22\xcd"
+ "\xd6\x00\xba\x04\x4c\x05\x0c\xc0"
+ "\xd1\x5a\xeb\x52\xd5\xa8\x8e\xc8"
+ "\x97\xa1\xaa\xc1\xea\xc1\xbe\x7c"
+ "\x36\xb3\x36\xa0\xc6\x76\x66\xc5"
+ "\xe2\xaf\xd6\x5c\xe2\xdb\x2c\xb3"
+ "\x6c\xb9\x99\x7f\xff\x9f\x03\x24"
+ "\xe1\x51\x44\x66\xd8\x0c\x5d\x7f"
+ "\x5c\x85\x22\x2a\xcf\x6d\x79\x28"
+ "\xab\x98\x01\x72\xfe\x80\x87\x5f"
+ "\x46\xba\xef\x81\x24\xee\xbf\xb0"
+ "\x24\x74\xa3\x65\x97\x12\xc4\xaf"
+ "\x8b\xa0\x39\xda\x8a\x7e\x74\x6e"
+ "\x1b\x42\xb4\x44\x37\xfc\x59\xfd"
+ "\x86\xed\xfb\x8c\x66\x33\xda\x63"
+ "\x75\xeb\xe1\xa4\x85\x4f\x50\x8f"
+ "\x83\x66\x0d\xd3\x37\xfa\xe6\x9c"
+ "\x4f\x30\x87\x35\x18\xe3\x0b\xb7"
+ "\x6e\x64\x54\xcd\x70\xb3\xde\x54"
+ "\xb7\x1d\xe6\x4c\x4d\x55\x12\x12"
+ "\xaf\x5f\x7f\x5e\xee\x9d\xe8\x8e"
+ "\x32\x9d\x4e\x75\xeb\xc6\xdd\xaa"
+ "\x48\x82\xa4\x3f\x3c\xd7\xd3\xa8"
+ "\x63\x9e\x64\xfe\xe3\x97\x00\x62"
+ "\xe5\x40\x5d\xc3\xad\x72\xe1\x28"
+ "\x18\x50\xb7\x75\xef\xcd\x23\xbf"
+ "\x3f\xc0\x51\x36\xf8\x41\xc3\x08"
+ "\xcb\xf1\x8d\x38\x34\xbd\x48\x45"
+ "\x75\xed\xbc\x65\x7b\xb5\x0c\x9b"
+ "\xd7\x67\x7d\x27\xb4\xc4\x80\xd7"
+ "\xa9\xb9\xc7\x4a\x97\xaa\xda\xc8"
+ "\x3c\x74\xcf\x36\x8f\xe4\x41\xe3"
+ "\xd4\xd3\x26\xa7\xf3\x23\x9d\x8f"
+ "\x6c\x20\x05\x32\x3e\xe0\xc3\xc8"
+ "\x56\x3f\xa7\x09\xb7\xfb\xc7\xf7"
+ "\xbe\x2a\xdd\x0f\x06\x7b\x0d\xdd"
+ "\xb0\xb4\x86\x17\xfd\xb9\x04\xe5"
+ "\xc0\x64\x5d\xad\x2a\x36\x38\xdb"
+ "\x24\xaf\x5b\xff\xca\xf9\x41\xe8"
+ "\xf9\x2f\x1e\x5e\xf9\xf5\xd5\xf2"
+ "\xb2\x88\xca\xc9\xa1\x31\xe2\xe8"
+ "\x10\x95\x65\xbf\xf1\x11\x61\x7a"
+ "\x30\x1a\x54\x90\xea\xd2\x30\xf6"
+ "\xa5\xad\x60\xf9\x4d\x84\x21\x1b"
+ "\xe4\x42\x22\xc8\x12\x4b\xb0\x58"
+ "\x3e\x9c\x2d\x32\x95\x0a\x8e\xb0"
+ "\x0a\x7e\x77\x2f\xe8\x97\x31\x6a"
+ "\xf5\x59\xb4\x26\xe6\x37\x12\xc9"
+ "\xcb\xa0\x58\x33\x6f\xd5\x55\x55"
+ "\x3c\xa1\x33\xb1\x0b\x7e\x2e\xb4"
+ "\x43\x2a\x84\x39\xf0\x9c\xf4\x69"
+ "\x4f\x1e\x79\xa6\x15\x1b\x87\xbb"
+ "\xdb\x9b\xe0\xf1\x0b\xba\xe3\x6e"
+ "\xcc\x2f\x49\x19\x22\x29\xfc\x71"
+ "\xbb\x77\x38\x18\x61\xaf\x85\x76"
+ "\xeb\xd1\x09\xcc\x86\x04\x20\x9a"
+ "\x66\x53\x2f\x44\x8b\xc6\xa3\xd2"
+ "\x5f\xc7\x79\x82\x66\xa8\x6e\x75"
+ "\x7d\x94\xd1\x86\x75\x0f\xa5\x4f"
+ "\x3c\x7a\x33\xce\xd1\x6e\x9d\x7b"
+ "\x1f\x91\x37\xb8\x37\x80\xfb\xe0"
+ "\x52\x26\xd0\x9a\xd4\x48\x02\x41"
+ "\x05\xe3\x5a\x94\xf1\x65\x61\x19"
+ "\xb8\x88\x4e\x2b\xea\xba\x8b\x58"
+ "\x8b\x42\x01\x00\xa8\xfe\x00\x5c"
+ "\xfe\x1c\xee\x31\x15\x69\xfa\xb3"
+ "\x9b\x5f\x22\x8e\x0d\x2c\xe3\xa5"
+ "\x21\xb9\x99\x8a\x8e\x94\x5a\xef"
+ "\x13\x3e\x99\x96\x79\x6e\xd5\x42"
+ "\x36\x03\xa9\xe2\xca\x65\x4e\x8a"
+ "\x8a\x30\xd2\x7d\x74\xe7\xf0\xaa"
+ "\x23\x26\xdd\xcb\x82\x39\xfc\x9d"
+ "\x51\x76\x21\x80\xa2\xbe\x93\x03"
+ "\x47\xb0\xc1\xb6\xdc\x63\xfd\x9f"
+ "\xca\x9d\xa5\xca\x27\x85\xe2\xd8"
+ "\x15\x5b\x7e\x14\x7a\xc4\x89\xcc"
+ "\x74\x14\x4b\x46\xd2\xce\xac\x39"
+ "\x6b\x6a\x5a\xa4\x0e\xe3\x7b\x15"
+ "\x94\x4b\x0f\x74\xcb\x0c\x7f\xa9"
+ "\xbe\x09\x39\xa3\xdd\x56\x5c\xc7"
+ "\x99\x56\x65\x39\xf4\x0b\x7d\x87"
+ "\xec\xaa\xe3\x4d\x22\x65\x39\x4e",
+ .psize = 1024,
+ .digest = "\x64\x3a\xbc\xc3\x3f\x74\x40\x51"
+ "\x6e\x56\x01\x1a\x51\xec\x36\xde",
+ .np = 8,
+ .tap = { 64, 203, 267, 28, 263, 62, 54, 83 },
+ }, {
+ .key = "\x1b\x82\x2e\x1b\x17\x23\xb9\x6d"
+ "\xdc\x9c\xda\x99\x07\xe3\x5f\xd8"
+ "\xd2\xf8\x43\x80\x8d\x86\x7d\x80"
+ "\x1a\xd0\xcc\x13\xb9\x11\x05\x3f"
+ "\x7e\xcf\x7e\x80\x0e\xd8\x25\x48"
+ "\x8b\xaa\x63\x83\x92\xd0\x72\xf5"
+ "\x4f\x67\x7e\x50\x18\x25\xa4\xd1"
+ "\xe0\x7e\x1e\xba\xd8\xa7\x6e\xdb"
+ "\x1a\xcc\x0d\xfe\x9f\x6d\x22\x35"
+ "\xe1\xe6\xe0\xa8\x7b\x9c\xb1\x66"
+ "\xa3\xf8\xff\x4d\x90\x84\x28\xbc"
+ "\xdc\x19\xc7\x91\x49\xfc\xf6\x33"
+ "\xc9\x6e\x65\x7f\x28\x6f\x68\x2e"
+ "\xdf\x1a\x75\xe9\xc2\x0c\x96\xb9"
+ "\x31\x22\xc4\x07\xc6\x0a\x2f\xfd"
+ "\x36\x06\x5f\x5c\xc5\xb1\x3a\xf4"
+ "\x5e\x48\xa4\x45\x2b\x88\xa7\xee"
+ "\xa9\x8b\x52\xcc\x99\xd9\x2f\xb8"
+ "\xa4\x58\x0a\x13\xeb\x71\x5a\xfa"
+ "\xe5\x5e\xbe\xf2\x64\xad\x75\xbc"
+ "\x0b\x5b\x34\x13\x3b\x23\x13\x9a"
+ "\x69\x30\x1e\x9a\xb8\x03\xb8\x8b"
+ "\x3e\x46\x18\x6d\x38\xd9\xb3\xd8"
+ "\xbf\xf1\xd0\x28\xe6\x51\x57\x80"
+ "\x5e\x99\xfb\xd0\xce\x1e\x83\xf7"
+ "\xe9\x07\x5a\x63\xa9\xef\xce\xa5"
+ "\xfb\x3f\x37\x17\xfc\x0b\x37\x0e"
+ "\xbb\x4b\x21\x62\xb7\x83\x0e\xa9"
+ "\x9e\xb0\xc4\xad\x47\xbe\x35\xe7"
+ "\x51\xb2\xf2\xac\x2b\x65\x7b\x48"
+ "\xe3\x3f\x5f\xb6\x09\x04\x0c\x58"
+ "\xce\x99\xa9\x15\x2f\x4e\xc1\xf2"
+ "\x24\x48\xc0\xd8\x6c\xd3\x76\x17"
+ "\x83\x5d\xe6\xe3\xfd\x01\x8e\xf7"
+ "\x42\xa5\x04\x29\x30\xdf\xf9\x00"
+ "\x4a\xdc\x71\x22\x1a\x33\x15\xb6"
+ "\xd7\x72\xfb\x9a\xb8\xeb\x2b\x38"
+ "\xea\xa8\x61\xa8\x90\x11\x9d\x73"
+ "\x2e\x6c\xce\x81\x54\x5a\x9f\xcd"
+ "\xcf\xd5\xbd\x26\x5d\x66\xdb\xfb"
+ "\xdc\x1e\x7c\x10\xfe\x58\x82\x10"
+ "\x16\x24\x01\xce\x67\x55\x51\xd1"
+ "\xdd\x6b\x44\xa3\x20\x8e\xa9\xa6"
+ "\x06\xa8\x29\x77\x6e\x00\x38\x5b"
+ "\xde\x4d\x58\xd8\x1f\x34\xdf\xf9"
+ "\x2c\xac\x3e\xad\xfb\x92\x0d\x72"
+ "\x39\xa4\xac\x44\x10\xc0\x43\xc4"
+ "\xa4\x77\x3b\xfc\xc4\x0d\x37\xd3"
+ "\x05\x84\xda\x53\x71\xf8\x80\xd3"
+ "\x34\x44\xdb\x09\xb4\x2b\x8e\xe3"
+ "\x00\x75\x50\x9e\x43\x22\x00\x0b"
+ "\x7c\x70\xab\xd4\x41\xf1\x93\xcd"
+ "\x25\x2d\x84\x74\xb5\xf2\x92\xcd"
+ "\x0a\x28\xea\x9a\x49\x02\x96\xcb"
+ "\x85\x9e\x2f\x33\x03\x86\x1d\xdc"
+ "\x1d\x31\xd5\xfc\x9d\xaa\xc5\xe9"
+ "\x9a\xc4\x57\xf5\x35\xed\xf4\x4b"
+ "\x3d\x34\xc2\x29\x13\x86\x36\x42"
+ "\x5d\xbf\x90\x86\x13\x77\xe5\xc3"
+ "\x62\xb4\xfe\x0b\x70\x39\x35\x65"
+ "\x02\xea\xf6\xce\x57\x0c\xbb\x74"
+ "\x29\xe3\xfd\x60\x90\xfd\x10\x38"
+ "\xd5\x4e\x86\xbd\x37\x70\xf0\x97"
+ "\xa6\xab\x3b\x83\x64\x52\xca\x66"
+ "\x2f\xf9\xa4\xca\x3a\x55\x6b\xb0"
+ "\xe8\x3a\x34\xdb\x9e\x48\x50\x2f"
+ "\x3b\xef\xfd\x08\x2d\x5f\xc1\x37"
+ "\x5d\xbe\x73\xe4\xd8\xe9\xac\xca"
+ "\x8a\xaa\x48\x7c\x5c\xf4\xa6\x96"
+ "\x5f\xfa\x70\xa6\xb7\x8b\x50\xcb"
+ "\xa6\xf5\xa9\xbd\x7b\x75\x4c\x22"
+ "\x0b\x19\x40\x2e\xc9\x39\x39\x32"
+ "\x83\x03\xa8\xa4\x98\xe6\x8e\x16"
+ "\xb9\xde\x08\xc5\xfc\xbf\xad\x39"
+ "\xa8\xc7\x93\x6c\x6f\x23\xaf\xc1"
+ "\xab\xe1\xdf\xbb\x39\xae\x93\x29"
+ "\x0e\x7d\x80\x8d\x3e\x65\xf3\xfd"
+ "\x96\x06\x65\x90\xa1\x28\x64\x4b"
+ "\x69\xf9\xa8\x84\x27\x50\xfc\x87"
+ "\xf7\xbf\x55\x8e\x56\x13\x58\x7b"
+ "\x85\xb4\x6a\x72\x0f\x40\xf1\x4f"
+ "\x83\x81\x1f\x76\xde\x15\x64\x7a"
+ "\x7a\x80\xe4\xc7\x5e\x63\x01\x91"
+ "\xd7\x6b\xea\x0b\x9b\xa2\x99\x3b"
+ "\x6c\x88\xd8\xfd\x59\x3c\x8d\x22"
+ "\x86\x56\xbe\xab\xa1\x37\x08\x01"
+ "\x50\x85\x69\x29\xee\x9f\xdf\x21"
+ "\x3e\x20\x20\xf5\xb0\xbb\x6b\xd0"
+ "\x9c\x41\x38\xec\x54\x6f\x2d\xbd"
+ "\x0f\xe1\xbd\xf1\x2b\x6e\x60\x56"
+ "\x29\xe5\x7a\x70\x1c\xe2\xfc\x97"
+ "\x82\x68\x67\xd9\x3d\x1f\xfb\xd8"
+ "\x07\x9f\xbf\x96\x74\xba\x6a\x0e"
+ "\x10\x48\x20\xd8\x13\x1e\xb5\x44"
+ "\xf2\xcc\xb1\x8b\xfb\xbb\xec\xd7"
+ "\x37\x70\x1f\x7c\x55\xd2\x4b\xb9"
+ "\xfd\x70\x5e\xa3\x91\x73\x63\x52"
+ "\x13\x47\x5a\x06\xfb\x01\x67\xa5"
+ "\xc0\xd0\x49\x19\x56\x66\x9a\x77"
+ "\x64\xaf\x8c\x25\x91\x52\x87\x0e"
+ "\x18\xf3\x5f\x97\xfd\x71\x13\xf8"
+ "\x05\xa5\x39\xcc\x65\xd3\xcc\x63"
+ "\x5b\xdb\x5f\x7e\x5f\x6e\xad\xc4"
+ "\xf4\xa0\xc5\xc2\x2b\x4d\x97\x38"
+ "\x4f\xbc\xfa\x33\x17\xb4\x47\xb9"
+ "\x43\x24\x15\x8d\xd2\xed\x80\x68"
+ "\x84\xdb\x04\x80\xca\x5e\x6a\x35"
+ "\x2c\x2c\xe7\xc5\x03\x5f\x54\xb0"
+ "\x5e\x4f\x1d\x40\x54\x3d\x78\x9a"
+ "\xac\xda\x80\x27\x4d\x15\x4c\x1a"
+ "\x6e\x80\xc9\xc4\x3b\x84\x0e\xd9"
+ "\x2e\x93\x01\x8c\xc3\xc8\x91\x4b"
+ "\xb3\xaa\x07\x04\x68\x5b\x93\xa5"
+ "\xe7\xc4\x9d\xe7\x07\xee\xf5\x3b"
+ "\x40\x89\xcc\x60\x34\x9d\xb4\x06"
+ "\x1b\xef\x92\xe6\xc1\x2a\x7d\x0f"
+ "\x81\xaa\x56\xe3\xd7\xed\xa7\xd4"
+ "\xa7\x3a\x49\xc4\xad\x81\x5c\x83"
+ "\x55\x8e\x91\x54\xb7\x7d\x65\xa5"
+ "\x06\x16\xd5\x9a\x16\xc1\xb0\xa2"
+ "\x06\xd8\x98\x47\x73\x7e\x73\xa0"
+ "\xb8\x23\xb1\x52\xbf\x68\x74\x5d"
+ "\x0b\xcb\xfa\x8c\x46\xe3\x24\xe6"
+ "\xab\xd4\x69\x8d\x8c\xf2\x8a\x59"
+ "\xbe\x48\x46\x50\x8c\x9a\xe8\xe3"
+ "\x31\x55\x0a\x06\xed\x4f\xf8\xb7"
+ "\x4f\xe3\x85\x17\x30\xbd\xd5\x20"
+ "\xe7\x5b\xb2\x32\xcf\x6b\x16\x44"
+ "\xd2\xf5\x7e\xd7\xd1\x2f\xee\x64"
+ "\x3e\x9d\x10\xef\x27\x35\x43\x64"
+ "\x67\xfb\x7a\x7b\xe0\x62\x31\x9a"
+ "\x4d\xdf\xa5\xab\xc0\x20\xbb\x01"
+ "\xe9\x7b\x54\xf1\xde\xb2\x79\x50"
+ "\x6c\x4b\x91\xdb\x7f\xbb\x50\xc1"
+ "\x55\x44\x38\x9a\xe0\x9f\xe8\x29"
+ "\x6f\x15\xf8\x4e\xa6\xec\xa0\x60",
+ .ksize = 1088,
+ .plaintext = "\x15\x68\x9e\x2f\xad\x15\x52\xdf"
+ "\xf0\x42\x62\x24\x2a\x2d\xea\xbf"
+ "\xc7\xf3\xb4\x1a\xf5\xed\xb2\x08"
+ "\x15\x60\x1c\x00\x77\xbf\x0b\x0e"
+ "\xb7\x2c\xcf\x32\x3a\xc7\x01\x77"
+ "\xef\xa6\x75\xd0\x29\xc7\x68\x20"
+ "\xb2\x92\x25\xbf\x12\x34\xe9\xa4"
+ "\xfd\x32\x7b\x3f\x7c\xbd\xa5\x02"
+ "\x38\x41\xde\xc9\xc1\x09\xd9\xfc"
+ "\x6e\x78\x22\x83\x18\xf7\x50\x8d"
+ "\x8f\x9c\x2d\x02\xa5\x30\xac\xff"
+ "\xea\x63\x2e\x80\x37\x83\xb0\x58"
+ "\xda\x2f\xef\x21\x55\xba\x7b\xb1"
+ "\xb6\xed\xf5\xd2\x4d\xaa\x8c\xa9"
+ "\xdd\xdb\x0f\xb4\xce\xc1\x9a\xb1"
+ "\xc1\xdc\xbd\xab\x86\xc2\xdf\x0b"
+ "\xe1\x2c\xf9\xbe\xf6\xd8\xda\x62"
+ "\x72\xdd\x98\x09\x52\xc0\xc4\xb6"
+ "\x7b\x17\x5c\xf5\xd8\x4b\x88\xd6"
+ "\x6b\xbf\x84\x4a\x3f\xf5\x4d\xd2"
+ "\x94\xe2\x9c\xff\xc7\x3c\xd9\xc8"
+ "\x37\x38\xbc\x8c\xf3\xe7\xb7\xd0"
+ "\x1d\x78\xc4\x39\x07\xc8\x5e\x79"
+ "\xb6\x5a\x90\x5b\x6e\x97\xc9\xd4"
+ "\x82\x9c\xf3\x83\x7a\xe7\x97\xfc"
+ "\x1d\xbb\xef\xdb\xce\xe0\x82\xad"
+ "\xca\x07\x6c\x54\x62\x6f\x81\xe6"
+ "\x7a\x5a\x96\x6e\x80\x3a\xa2\x37"
+ "\x6f\xc6\xa4\x29\xc3\x9e\x19\x94"
+ "\x9f\xb0\x3e\x38\xfb\x3c\x2b\x7d"
+ "\xaa\xb8\x74\xda\x54\x23\x51\x12"
+ "\x4b\x96\x36\x8f\x91\x4f\x19\x37"
+ "\x83\xc9\xdd\xc7\x1a\x32\x2d\xab"
+ "\xc7\x89\xe2\x07\x47\x6c\xe8\xa6"
+ "\x70\x6b\x8e\x0c\xda\x5c\x6a\x59"
+ "\x27\x33\x0e\xe1\xe1\x20\xe8\xc8"
+ "\xae\xdc\xd0\xe3\x6d\xa8\xa6\x06"
+ "\x41\xb4\xd4\xd4\xcf\x91\x3e\x06"
+ "\xb0\x9a\xf7\xf1\xaa\xa6\x23\x92"
+ "\x10\x86\xf0\x94\xd1\x7c\x2e\x07"
+ "\x30\xfb\xc5\xd8\xf3\x12\xa9\xe8"
+ "\x22\x1c\x97\x1a\xad\x96\xb0\xa1"
+ "\x72\x6a\x6b\xb4\xfd\xf7\xe8\xfa"
+ "\xe2\x74\xd8\x65\x8d\x35\x17\x4b"
+ "\x00\x23\x5c\x8c\x70\xad\x71\xa2"
+ "\xca\xc5\x6c\x59\xbf\xb4\xc0\x6d"
+ "\x86\x98\x3e\x19\x5a\x90\x92\xb1"
+ "\x66\x57\x6a\x91\x68\x7c\xbc\xf3"
+ "\xf1\xdb\x94\xf8\x48\xf1\x36\xd8"
+ "\x78\xac\x1c\xa9\xcc\xd6\x27\xba"
+ "\x91\x54\x22\xf5\xe6\x05\x3f\xcc"
+ "\xc2\x8f\x2c\x3b\x2b\xc3\x2b\x2b"
+ "\x3b\xb8\xb6\x29\xb7\x2f\x94\xb6"
+ "\x7b\xfc\x94\x3e\xd0\x7a\x41\x59"
+ "\x7b\x1f\x9a\x09\xa6\xed\x4a\x82"
+ "\x9d\x34\x1c\xbd\x4e\x1c\x3a\x66"
+ "\x80\x74\x0e\x9a\x4f\x55\x54\x47"
+ "\x16\xba\x2a\x0a\x03\x35\x99\xa3"
+ "\x5c\x63\x8d\xa2\x72\x8b\x17\x15"
+ "\x68\x39\x73\xeb\xec\xf2\xe8\xf5"
+ "\x95\x32\x27\xd6\xc4\xfe\xb0\x51"
+ "\xd5\x0c\x50\xc5\xcd\x6d\x16\xb3"
+ "\xa3\x1e\x95\x69\xad\x78\x95\x06"
+ "\xb9\x46\xf2\x6d\x24\x5a\x99\x76"
+ "\x73\x6a\x91\xa6\xac\x12\xe1\x28"
+ "\x79\xbc\x08\x4e\x97\x00\x98\x63"
+ "\x07\x1c\x4e\xd1\x68\xf3\xb3\x81"
+ "\xa8\xa6\x5f\xf1\x01\xc9\xc1\xaf"
+ "\x3a\x96\xf9\x9d\xb5\x5a\x5f\x8f"
+ "\x7e\xc1\x7e\x77\x0a\x40\xc8\x8e"
+ "\xfc\x0e\xed\xe1\x0d\xb0\xe5\x5e"
+ "\x5e\x6f\xf5\x7f\xab\x33\x7d\xcd"
+ "\xf0\x09\x4b\xb2\x11\x37\xdc\x65"
+ "\x97\x32\x62\x71\x3a\x29\x54\xb9"
+ "\xc7\xa4\xbf\x75\x0f\xf9\x40\xa9"
+ "\x8d\xd7\x8b\xa7\xe0\x9a\xbe\x15"
+ "\xc6\xda\xd8\x00\x14\x69\x1a\xaf"
+ "\x5f\x79\xc3\xf5\xbb\x6c\x2a\x9d"
+ "\xdd\x3c\x5f\x97\x21\xe1\x3a\x03"
+ "\x84\x6a\xe9\x76\x11\x1f\xd3\xd5"
+ "\xf0\x54\x20\x4d\xc2\x91\xc3\xa4"
+ "\x36\x25\xbe\x1b\x2a\x06\xb7\xf3"
+ "\xd1\xd0\x55\x29\x81\x4c\x83\xa3"
+ "\xa6\x84\x1e\x5c\xd1\xd0\x6c\x90"
+ "\xa4\x11\xf0\xd7\x63\x6a\x48\x05"
+ "\xbc\x48\x18\x53\xcd\xb0\x8d\xdb"
+ "\xdc\xfe\x55\x11\x5c\x51\xb3\xab"
+ "\xab\x63\x3e\x31\x5a\x8b\x93\x63"
+ "\x34\xa9\xba\x2b\x69\x1a\xc0\xe3"
+ "\xcb\x41\xbc\xd7\xf5\x7f\x82\x3e"
+ "\x01\xa3\x3c\x72\xf4\xfe\xdf\xbe"
+ "\xb1\x67\x17\x2b\x37\x60\x0d\xca"
+ "\x6f\xc3\x94\x2c\xd2\x92\x6d\x9d"
+ "\x75\x18\x77\xaa\x29\x38\x96\xed"
+ "\x0e\x20\x70\x92\xd5\xd0\xb4\x00"
+ "\xc0\x31\xf2\xc9\x43\x0e\x75\x1d"
+ "\x4b\x64\xf2\x1f\xf2\x29\x6c\x7b"
+ "\x7f\xec\x59\x7d\x8c\x0d\xd4\xd3"
+ "\xac\x53\x4c\xa3\xde\x42\x92\x95"
+ "\x6d\xa3\x4f\xd0\xe6\x3d\xe7\xec"
+ "\x7a\x4d\x68\xf1\xfe\x67\x66\x09"
+ "\x83\x22\xb1\x98\x43\x8c\xab\xb8"
+ "\x45\xe6\x6d\xdf\x5e\x50\x71\xce"
+ "\xf5\x4e\x40\x93\x2b\xfa\x86\x0e"
+ "\xe8\x30\xbd\x82\xcc\x1c\x9c\x5f"
+ "\xad\xfd\x08\x31\xbe\x52\xe7\xe6"
+ "\xf2\x06\x01\x62\x25\x15\x99\x74"
+ "\x33\x51\x52\x57\x3f\x57\x87\x61"
+ "\xb9\x7f\x29\x3d\xcd\x92\x5e\xa6"
+ "\x5c\x3b\xf1\xed\x5f\xeb\x82\xed"
+ "\x56\x7b\x61\xe7\xfd\x02\x47\x0e"
+ "\x2a\x15\xa4\xce\x43\x86\x9b\xe1"
+ "\x2b\x4c\x2a\xd9\x42\x97\xf7\x9a"
+ "\xe5\x47\x46\x48\xd3\x55\x6f\x4d"
+ "\xd9\xeb\x4b\xdd\x7b\x21\x2f\xb3"
+ "\xa8\x36\x28\xdf\xca\xf1\xf6\xd9"
+ "\x10\xf6\x1c\xfd\x2e\x0c\x27\xe0"
+ "\x01\xb3\xff\x6d\x47\x08\x4d\xd4"
+ "\x00\x25\xee\x55\x4a\xe9\xe8\x5b"
+ "\xd8\xf7\x56\x12\xd4\x50\xb2\xe5"
+ "\x51\x6f\x34\x63\x69\xd2\x4e\x96"
+ "\x4e\xbc\x79\xbf\x18\xae\xc6\x13"
+ "\x80\x92\x77\xb0\xb4\x0f\x29\x94"
+ "\x6f\x4c\xbb\x53\x11\x36\xc3\x9f"
+ "\x42\x8e\x96\x8a\x91\xc8\xe9\xfc"
+ "\xfe\xbf\x7c\x2d\x6f\xf9\xb8\x44"
+ "\x89\x1b\x09\x53\x0a\x2a\x92\xc3"
+ "\x54\x7a\x3a\xf9\xe2\xe4\x75\x87"
+ "\xa0\x5e\x4b\x03\x7a\x0d\x8a\xf4"
+ "\x55\x59\x94\x2b\x63\x96\x0e\xf5",
+ .psize = 1040,
+ .digest = "\xb5\xb9\x08\xb3\x24\x3e\x03\xf0"
+ "\xd6\x0b\x57\xbc\x0a\x6d\x89\x59",
+ }, {
+ .key = "\xf6\x34\x42\x71\x35\x52\x8b\x58"
+ "\x02\x3a\x8e\x4a\x8d\x41\x13\xe9"
+ "\x7f\xba\xb9\x55\x9d\x73\x4d\xf8"
+ "\x3f\x5d\x73\x15\xff\xd3\x9e\x7f"
+ "\x20\x2a\x6a\xa8\xd1\xf0\x8f\x12"
+ "\x6b\x02\xd8\x6c\xde\xba\x80\x22"
+ "\x19\x37\xc8\xd0\x4e\x89\x17\x7c"
+ "\x7c\xdd\x88\xfd\x41\xc0\x04\xb7"
+ "\x1d\xac\x19\xe3\x20\xc7\x16\xcf"
+ "\x58\xee\x1d\x7a\x61\x69\xa9\x12"
+ "\x4b\xef\x4f\xb6\x38\xdd\x78\xf8"
+ "\x28\xee\x70\x08\xc7\x7c\xcc\xc8"
+ "\x1e\x41\xf5\x80\x86\x70\xd0\xf0"
+ "\xa3\x87\x6b\x0a\x00\xd2\x41\x28"
+ "\x74\x26\xf1\x24\xf3\xd0\x28\x77"
+ "\xd7\xcd\xf6\x2d\x61\xf4\xa2\x13"
+ "\x77\xb4\x6f\xa0\xf4\xfb\xd6\xb5"
+ "\x38\x9d\x5a\x0c\x51\xaf\xad\x63"
+ "\x27\x67\x8c\x01\xea\x42\x1a\x66"
+ "\xda\x16\x7c\x3c\x30\x0c\x66\x53"
+ "\x1c\x88\xa4\x5c\xb2\xe3\x78\x0a"
+ "\x13\x05\x6d\xe2\xaf\xb3\xe4\x75"
+ "\x00\x99\x58\xee\x76\x09\x64\xaa"
+ "\xbb\x2e\xb1\x81\xec\xd8\x0e\xd3"
+ "\x0c\x33\x5d\xb7\x98\xef\x36\xb6"
+ "\xd2\x65\x69\x41\x70\x12\xdc\x25"
+ "\x41\x03\x99\x81\x41\x19\x62\x13"
+ "\xd1\x0a\x29\xc5\x8c\xe0\x4c\xf3"
+ "\xd6\xef\x4c\xf4\x1d\x83\x2e\x6d"
+ "\x8e\x14\x87\xed\x80\xe0\xaa\xd3"
+ "\x08\x04\x73\x1a\x84\x40\xf5\x64"
+ "\xbd\x61\x32\x65\x40\x42\xfb\xb0"
+ "\x40\xf6\x40\x8d\xc7\x7f\x14\xd0"
+ "\x83\x99\xaa\x36\x7e\x60\xc6\xbf"
+ "\x13\x8a\xf9\x21\xe4\x7e\x68\x87"
+ "\xf3\x33\x86\xb4\xe0\x23\x7e\x0a"
+ "\x21\xb1\xf5\xad\x67\x3c\x9c\x9d"
+ "\x09\xab\xaf\x5f\xba\xe0\xd0\x82"
+ "\x48\x22\x70\xb5\x6d\x53\xd6\x0e"
+ "\xde\x64\x92\x41\xb0\xd3\xfb\xda"
+ "\x21\xfe\xab\xea\x20\xc4\x03\x58"
+ "\x18\x2e\x7d\x2f\x03\xa9\x47\x66"
+ "\xdf\x7b\xa4\x6b\x34\x6b\x55\x9c"
+ "\x4f\xd7\x9c\x47\xfb\xa9\x42\xec"
+ "\x5a\x12\xfd\xfe\x76\xa0\x92\x9d"
+ "\xfe\x1e\x16\xdd\x24\x2a\xe4\x27"
+ "\xd5\xa9\xf2\x05\x4f\x83\xa2\xaf"
+ "\xfe\xee\x83\x7a\xad\xde\xdf\x9a"
+ "\x80\xd5\x81\x14\x93\x16\x7e\x46"
+ "\x47\xc2\x14\xef\x49\x6e\xb9\xdb"
+ "\x40\xe8\x06\x6f\x9c\x2a\xfd\x62"
+ "\x06\x46\xfd\x15\x1d\x36\x61\x6f"
+ "\x77\x77\x5e\x64\xce\x78\x1b\x85"
+ "\xbf\x50\x9a\xfd\x67\xa6\x1a\x65"
+ "\xad\x5b\x33\x30\xf1\x71\xaa\xd9"
+ "\x23\x0d\x92\x24\x5f\xae\x57\xb0"
+ "\x24\x37\x0a\x94\x12\xfb\xb5\xb1"
+ "\xd3\xb8\x1d\x12\x29\xb0\x80\x24"
+ "\x2d\x47\x9f\x96\x1f\x95\xf1\xb1"
+ "\xda\x35\xf6\x29\xe0\xe1\x23\x96"
+ "\xc7\xe8\x22\x9b\x7c\xac\xf9\x41"
+ "\x39\x01\xe5\x73\x15\x5e\x99\xec"
+ "\xb4\xc1\xf4\xe7\xa7\x97\x6a\xd5"
+ "\x90\x9a\xa0\x1d\xf3\x5a\x8b\x5f"
+ "\xdf\x01\x52\xa4\x93\x31\x97\xb0"
+ "\x93\x24\xb5\xbc\xb2\x14\x24\x98"
+ "\x4a\x8f\x19\x85\xc3\x2d\x0f\x74"
+ "\x9d\x16\x13\x80\x5e\x59\x62\x62"
+ "\x25\xe0\xd1\x2f\x64\xef\xba\xac"
+ "\xcd\x09\x07\x15\x8a\xcf\x73\xb5"
+ "\x8b\xc9\xd8\x24\xb0\x53\xd5\x6f"
+ "\xe1\x2b\x77\xb1\xc5\xe4\xa7\x0e"
+ "\x18\x45\xab\x36\x03\x59\xa8\xbd"
+ "\x43\xf0\xd8\x2c\x1a\x69\x96\xbb"
+ "\x13\xdf\x6c\x33\x77\xdf\x25\x34"
+ "\x5b\xa5\x5b\x8c\xf9\x51\x05\xd4"
+ "\x8b\x8b\x44\x87\x49\xfc\xa0\x8f"
+ "\x45\x15\x5b\x40\x42\xc4\x09\x92"
+ "\x98\x0c\x4d\xf4\x26\x37\x1b\x13"
+ "\x76\x01\x93\x8d\x4f\xe6\xed\x18"
+ "\xd0\x79\x7b\x3f\x44\x50\xcb\xee"
+ "\xf7\x4a\xc9\x9e\xe0\x96\x74\xa7"
+ "\xe6\x93\xb2\x53\xca\x55\xa8\xdc"
+ "\x1e\x68\x07\x87\xb7\x2e\xc1\x08"
+ "\xb2\xa4\x5b\xaf\xc6\xdb\x5c\x66"
+ "\x41\x1c\x51\xd9\xb0\x07\x00\x0d"
+ "\xf0\x4c\xdc\x93\xde\xa9\x1e\x8e"
+ "\xd3\x22\x62\xd8\x8b\x88\x2c\xea"
+ "\x5e\xf1\x6e\x14\x40\xc7\xbe\xaa"
+ "\x42\x28\xd0\x26\x30\x78\x01\x9b"
+ "\x83\x07\xbc\x94\xc7\x57\xa2\x9f"
+ "\x03\x07\xff\x16\xff\x3c\x6e\x48"
+ "\x0a\xd0\xdd\x4c\xf6\x64\x9a\xf1"
+ "\xcd\x30\x12\x82\x2c\x38\xd3\x26"
+ "\x83\xdb\xab\x3e\xc6\xf8\xe6\xfa"
+ "\x77\x0a\x78\x82\x75\xf8\x63\x51"
+ "\x59\xd0\x8d\x24\x9f\x25\xe6\xa3"
+ "\x4c\xbc\x34\xfc\xe3\x10\xc7\x62"
+ "\xd4\x23\xc8\x3d\xa7\xc6\xa6\x0a"
+ "\x4f\x7e\x29\x9d\x6d\xbe\xb5\xf1"
+ "\xdf\xa4\x53\xfa\xc0\x23\x0f\x37"
+ "\x84\x68\xd0\xb5\xc8\xc6\xae\xf8"
+ "\xb7\x8d\xb3\x16\xfe\x8f\x87\xad"
+ "\xd0\xc1\x08\xee\x12\x1c\x9b\x1d"
+ "\x90\xf8\xd1\x63\xa4\x92\x3c\xf0"
+ "\xc7\x34\xd8\xf1\x14\xed\xa3\xbc"
+ "\x17\x7e\xd4\x62\x42\x54\x57\x2c"
+ "\x3e\x7a\x35\x35\x17\x0f\x0b\x7f"
+ "\x81\xa1\x3f\xd0\xcd\xc8\x3b\x96"
+ "\xe9\xe0\x4a\x04\xe1\xb6\x3c\xa1"
+ "\xd6\xca\xc4\xbd\xb6\xb5\x95\x34"
+ "\x12\x9d\xc5\x96\xf2\xdf\xba\x54"
+ "\x76\xd1\xb2\x6b\x3b\x39\xe0\xb9"
+ "\x18\x62\xfb\xf7\xfc\x12\xf1\x5f"
+ "\x7e\xc7\xe3\x59\x4c\xa6\xc2\x3d"
+ "\x40\x15\xf9\xa3\x95\x64\x4c\x74"
+ "\x8b\x73\x77\x33\x07\xa7\x04\x1d"
+ "\x33\x5a\x7e\x8f\xbd\x86\x01\x4f"
+ "\x3e\xb9\x27\x6f\xe2\x41\xf7\x09"
+ "\x67\xfd\x29\x28\xc5\xe4\xf6\x18"
+ "\x4c\x1b\x49\xb2\x9c\x5b\xf6\x81"
+ "\x4f\xbb\x5c\xcc\x0b\xdf\x84\x23"
+ "\x58\xd6\x28\x34\x93\x3a\x25\x97"
+ "\xdf\xb2\xc3\x9e\x97\x38\x0b\x7d"
+ "\x10\xb3\x54\x35\x23\x8c\x64\xee"
+ "\xf0\xd8\x66\xff\x8b\x22\xd2\x5b"
+ "\x05\x16\x3c\x89\xf7\xb1\x75\xaf"
+ "\xc0\xae\x6a\x4f\x3f\xaf\x9a\xf4"
+ "\xf4\x9a\x24\xd9\x80\x82\xc0\x12"
+ "\xde\x96\xd1\xbe\x15\x0b\x8d\x6a"
+ "\xd7\x12\xe4\x85\x9f\x83\xc9\xc3"
+ "\xff\x0b\xb5\xaf\x3b\xd8\x6d\x67"
+ "\x81\x45\xe6\xac\xec\xc1\x7b\x16"
+ "\x18\x0a\xce\x4b\xc0\x2e\x76\xbc"
+ "\x1b\xfa\xb4\x34\xb8\xfc\x3e\xc8"
+ "\x5d\x90\x71\x6d\x7a\x79\xef\x06",
+ .ksize = 1088,
+ .plaintext = "\xaa\x5d\x54\xcb\xea\x1e\x46\x0f"
+ "\x45\x87\x70\x51\x8a\x66\x7a\x33"
+ "\xb4\x18\xff\xa9\x82\xf9\x45\x4b"
+ "\x93\xae\x2e\x7f\xab\x98\xfe\xbf"
+ "\x01\xee\xe5\xa0\x37\x8f\x57\xa6"
+ "\xb0\x76\x0d\xa4\xd6\x28\x2b\x5d"
+ "\xe1\x03\xd6\x1c\x6f\x34\x0d\xe7"
+ "\x61\x2d\x2e\xe5\xae\x5d\x47\xc7"
+ "\x80\x4b\x18\x8f\xa8\x99\xbc\x28"
+ "\xed\x1d\x9d\x86\x7d\xd7\x41\xd1"
+ "\xe0\x2b\xe1\x8c\x93\x2a\xa7\x80"
+ "\xe1\x07\xa0\xa9\x9f\x8c\x8d\x1a"
+ "\x55\xfc\x6b\x24\x7a\xbd\x3e\x51"
+ "\x68\x4b\x26\x59\xc8\xa7\x16\xd9"
+ "\xb9\x61\x13\xde\x8b\x63\x1c\xf6"
+ "\x60\x01\xfb\x08\xb3\x5b\x0a\xbf"
+ "\x34\x73\xda\x87\x87\x3d\x6f\x97"
+ "\x4a\x0c\xa3\x58\x20\xa2\xc0\x81"
+ "\x5b\x8c\xef\xa9\xc2\x01\x1e\x64"
+ "\x83\x8c\xbc\x03\xb6\xd0\x29\x9f"
+ "\x54\xe2\xce\x8b\xc2\x07\x85\x78"
+ "\x25\x38\x96\x4c\xb4\xbe\x17\x4a"
+ "\x65\xa6\xfa\x52\x9d\x66\x9d\x65"
+ "\x4a\xd1\x01\x01\xf0\xcb\x13\xcc"
+ "\xa5\x82\xf3\xf2\x66\xcd\x3f\x9d"
+ "\xd1\xaa\xe4\x67\xea\xf2\xad\x88"
+ "\x56\x76\xa7\x9b\x59\x3c\xb1\x5d"
+ "\x78\xfd\x69\x79\x74\x78\x43\x26"
+ "\x7b\xde\x3f\xf1\xf5\x4e\x14\xd9"
+ "\x15\xf5\x75\xb5\x2e\x19\xf3\x0c"
+ "\x48\x72\xd6\x71\x6d\x03\x6e\xaa"
+ "\xa7\x08\xf9\xaa\x70\xa3\x0f\x4d"
+ "\x12\x8a\xdd\xe3\x39\x73\x7e\xa7"
+ "\xea\x1f\x6d\x06\x26\x2a\xf2\xc5"
+ "\x52\xb4\xbf\xfd\x52\x0c\x06\x60"
+ "\x90\xd1\xb2\x7b\x56\xae\xac\x58"
+ "\x5a\x6b\x50\x2a\xf5\xe0\x30\x3c"
+ "\x2a\x98\x0f\x1b\x5b\x0a\x84\x6c"
+ "\x31\xae\x92\xe2\xd4\xbb\x7f\x59"
+ "\x26\x10\xb9\x89\x37\x68\x26\xbf"
+ "\x41\xc8\x49\xc4\x70\x35\x7d\xff"
+ "\x2d\x7f\xf6\x8a\x93\x68\x8c\x78"
+ "\x0d\x53\xce\x7d\xff\x7d\xfb\xae"
+ "\x13\x1b\x75\xc4\x78\xd7\x71\xd8"
+ "\xea\xd3\xf4\x9d\x95\x64\x8e\xb4"
+ "\xde\xb8\xe4\xa6\x68\xc8\xae\x73"
+ "\x58\xaf\xa8\xb0\x5a\x20\xde\x87"
+ "\x43\xb9\x0f\xe3\xad\x41\x4b\xd5"
+ "\xb7\xad\x16\x00\xa6\xff\xf6\x74"
+ "\xbf\x8c\x9f\xb3\x58\x1b\xb6\x55"
+ "\xa9\x90\x56\x28\xf0\xb5\x13\x4e"
+ "\x9e\xf7\x25\x86\xe0\x07\x7b\x98"
+ "\xd8\x60\x5d\x38\x95\x3c\xe4\x22"
+ "\x16\x2f\xb2\xa2\xaf\xe8\x90\x17"
+ "\xec\x11\x83\x1a\xf4\xa9\x26\xda"
+ "\x39\x72\xf5\x94\x61\x05\x51\xec"
+ "\xa8\x30\x8b\x2c\x13\xd0\x72\xac"
+ "\xb9\xd2\xa0\x4c\x4b\x78\xe8\x6e"
+ "\x04\x85\xe9\x04\x49\x82\x91\xff"
+ "\x89\xe5\xab\x4c\xaa\x37\x03\x12"
+ "\xca\x8b\x74\x10\xfd\x9e\xd9\x7b"
+ "\xcb\xdb\x82\x6e\xce\x2e\x33\x39"
+ "\xce\xd2\x84\x6e\x34\x71\x51\x6e"
+ "\x0d\xd6\x01\x87\xc7\xfa\x0a\xd3"
+ "\xad\x36\xf3\x4c\x9f\x96\x5e\x62"
+ "\x62\x54\xc3\x03\x78\xd6\xab\xdd"
+ "\x89\x73\x55\x25\x30\xf8\xa7\xe6"
+ "\x4f\x11\x0c\x7c\x0a\xa1\x2b\x7b"
+ "\x3d\x0d\xde\x81\xd4\x9d\x0b\xae"
+ "\xdf\x00\xf9\x4c\xb6\x90\x8e\x16"
+ "\xcb\x11\xc8\xd1\x2e\x73\x13\x75"
+ "\x75\x3e\xaa\xf5\xee\x02\xb3\x18"
+ "\xa6\x2d\xf5\x3b\x51\xd1\x1f\x47"
+ "\x6b\x2c\xdb\xc4\x10\xe0\xc8\xba"
+ "\x9d\xac\xb1\x9d\x75\xd5\x41\x0e"
+ "\x7e\xbe\x18\x5b\xa4\x1f\xf8\x22"
+ "\x4c\xc1\x68\xda\x6d\x51\x34\x6c"
+ "\x19\x59\xec\xb5\xb1\xec\xa7\x03"
+ "\xca\x54\x99\x63\x05\x6c\xb1\xac"
+ "\x9c\x31\xd6\xdb\xba\x7b\x14\x12"
+ "\x7a\xc3\x2f\xbf\x8d\xdc\x37\x46"
+ "\xdb\xd2\xbc\xd4\x2f\xab\x30\xd5"
+ "\xed\x34\x99\x8e\x83\x3e\xbe\x4c"
+ "\x86\x79\x58\xe0\x33\x8d\x9a\xb8"
+ "\xa9\xa6\x90\x46\xa2\x02\xb8\xdd"
+ "\xf5\xf9\x1a\x5c\x8c\x01\xaa\x6e"
+ "\xb4\x22\x12\xf5\x0c\x1b\x9b\x7a"
+ "\xc3\x80\xf3\x06\x00\x5f\x30\xd5"
+ "\x06\xdb\x7d\x82\xc2\xd4\x0b\x4c"
+ "\x5f\xe9\xc5\xf5\xdf\x97\x12\xbf"
+ "\x56\xaf\x9b\x69\xcd\xee\x30\xb4"
+ "\xa8\x71\xff\x3e\x7d\x73\x7a\xb4"
+ "\x0d\xa5\x46\x7a\xf3\xf4\x15\x87"
+ "\x5d\x93\x2b\x8c\x37\x64\xb5\xdd"
+ "\x48\xd1\xe5\x8c\xae\xd4\xf1\x76"
+ "\xda\xf4\xba\x9e\x25\x0e\xad\xa3"
+ "\x0d\x08\x7c\xa8\x82\x16\x8d\x90"
+ "\x56\x40\x16\x84\xe7\x22\x53\x3a"
+ "\x58\xbc\xb9\x8f\x33\xc8\xc2\x84"
+ "\x22\xe6\x0d\xe7\xb3\xdc\x5d\xdf"
+ "\xd7\x2a\x36\xe4\x16\x06\x07\xd2"
+ "\x97\x60\xb2\xf5\x5e\x14\xc9\xfd"
+ "\x8b\x05\xd1\xce\xee\x9a\x65\x99"
+ "\xb7\xae\x19\xb7\xc8\xbc\xd5\xa2"
+ "\x7b\x95\xe1\xcc\xba\x0d\xdc\x8a"
+ "\x1d\x59\x52\x50\xaa\x16\x02\x82"
+ "\xdf\x61\x33\x2e\x44\xce\x49\xc7"
+ "\xe5\xc6\x2e\x76\xcf\x80\x52\xf0"
+ "\x3d\x17\x34\x47\x3f\xd3\x80\x48"
+ "\xa2\xba\xd5\xc7\x7b\x02\x28\xdb"
+ "\xac\x44\xc7\x6e\x05\x5c\xc2\x79"
+ "\xb3\x7d\x6a\x47\x77\x66\xf1\x38"
+ "\xf0\xf5\x4f\x27\x1a\x31\xca\x6c"
+ "\x72\x95\x92\x8e\x3f\xb0\xec\x1d"
+ "\xc7\x2a\xff\x73\xee\xdf\x55\x80"
+ "\x93\xd2\xbd\x34\xd3\x9f\x00\x51"
+ "\xfb\x2e\x41\xba\x6c\x5a\x7c\x17"
+ "\x7f\xe6\x70\xac\x8d\x39\x3f\x77"
+ "\xe2\x23\xac\x8f\x72\x4e\xe4\x53"
+ "\xcc\xf1\x1b\xf1\x35\xfe\x52\xa4"
+ "\xd6\xb8\x40\x6b\xc1\xfd\xa0\xa1"
+ "\xf5\x46\x65\xc2\x50\xbb\x43\xe2"
+ "\xd1\x43\x28\x34\x74\xf5\x87\xa0"
+ "\xf2\x5e\x27\x3b\x59\x2b\x3e\x49"
+ "\xdf\x46\xee\xaf\x71\xd7\x32\x36"
+ "\xc7\x14\x0b\x58\x6e\x3e\x2d\x41"
+ "\xfa\x75\x66\x3a\x54\xe0\xb2\xb9"
+ "\xaf\xdd\x04\x80\x15\x19\x3f\x6f"
+ "\xce\x12\xb4\xd8\xe8\x89\x3c\x05"
+ "\x30\xeb\xf3\x3d\xcd\x27\xec\xdc"
+ "\x56\x70\x12\xcf\x78\x2b\x77\xbf"
+ "\x22\xf0\x1b\x17\x9c\xcc\xd6\x1b"
+ "\x2d\x3d\xa0\x3b\xd8\xc9\x70\xa4"
+ "\x7a\x3e\x07\xb9\x06\xc3\xfa\xb0"
+ "\x33\xee\xc1\xd8\xf6\xe0\xf0\xb2"
+ "\x61\x12\x69\xb0\x5f\x28\x99\xda"
+ "\xc3\x61\x48\xfa\x07\x16\x03\xc4"
+ "\xa8\xe1\x3c\xe8\x0e\x64\x15\x30"
+ "\xc1\x9d\x84\x2f\x73\x98\x0e\x3a"
+ "\xf2\x86\x21\xa4\x9e\x1d\xb5\x86"
+ "\x16\xdb\x2b\x9a\x06\x64\x8e\x79"
+ "\x8d\x76\x3e\xc3\xc2\x64\x44\xe3"
+ "\xda\xbc\x1a\x52\xd7\x61\x03\x65"
+ "\x54\x32\x77\x01\xed\x9d\x8a\x43"
+ "\x25\x24\xe3\xc1\xbe\xb8\x2f\xcb"
+ "\x89\x14\x64\xab\xf6\xa0\x6e\x02"
+ "\x57\xe4\x7d\xa9\x4e\x9a\x03\x36"
+ "\xad\xf1\xb1\xfc\x0b\xe6\x79\x51"
+ "\x9f\x81\x77\xc4\x14\x78\x9d\xbf"
+ "\xb6\xd6\xa3\x8c\xba\x0b\x26\xe7"
+ "\xc8\xb9\x5c\xcc\xe1\x5f\xd5\xc6"
+ "\xc4\xca\xc2\xa3\x45\xba\x94\x13"
+ "\xb2\x8f\xc3\x54\x01\x09\xe7\x8b"
+ "\xda\x2a\x0a\x11\x02\x43\xcb\x57"
+ "\xc9\xcc\xb5\x5c\xab\xc4\xec\x54"
+ "\x00\x06\x34\xe1\x6e\x03\x89\x7c"
+ "\xc6\xfb\x6a\xc7\x60\x43\xd6\xc5"
+ "\xb5\x68\x72\x89\x8f\x42\xc3\x74"
+ "\xbd\x25\xaa\x9f\x67\xb5\xdf\x26"
+ "\x20\xe8\xb7\x01\x3c\xe4\x77\xce"
+ "\xc4\x65\xa7\x23\x79\xea\x33\xc7"
+ "\x82\x14\x5c\x82\xf2\x4e\x3d\xf6"
+ "\xc6\x4a\x0e\x29\xbb\xec\x44\xcd"
+ "\x2f\xd1\x4f\x21\x71\xa9\xce\x0f"
+ "\x5c\xf2\x72\x5c\x08\x2e\x21\xd2"
+ "\xc3\x29\x13\xd8\xac\xc3\xda\x13"
+ "\x1a\x9d\xa7\x71\x1d\x27\x1d\x27"
+ "\x1d\xea\xab\x44\x79\xad\xe5\xeb"
+ "\xef\x1f\x22\x0a\x44\x4f\xcb\x87"
+ "\xa7\x58\x71\x0e\x66\xf8\x60\xbf"
+ "\x60\x74\x4a\xb4\xec\x2e\xfe\xd3"
+ "\xf5\xb8\xfe\x46\x08\x50\x99\x6c"
+ "\x66\xa5\xa8\x34\x44\xb5\xe5\xf0"
+ "\xdd\x2c\x67\x4e\x35\x96\x8e\x67"
+ "\x48\x3f\x5f\x37\x44\x60\x51\x2e"
+ "\x14\x91\x5e\x57\xc3\x0e\x79\x77"
+ "\x2f\x03\xf4\xe2\x1c\x72\xbf\x85"
+ "\x5d\xd3\x17\xdf\x6c\xc5\x70\x24"
+ "\x42\xdf\x51\x4e\x2a\xb2\xd2\x5b"
+ "\x9e\x69\x83\x41\x11\xfe\x73\x22"
+ "\xde\x8a\x9e\xd8\x8a\xfb\x20\x38"
+ "\xd8\x47\x6f\xd5\xed\x8f\x41\xfd"
+ "\x13\x7a\x18\x03\x7d\x0f\xcd\x7d"
+ "\xa6\x7d\x31\x9e\xf1\x8f\x30\xa3"
+ "\x8b\x4c\x24\xb7\xf5\x48\xd7\xd9"
+ "\x12\xe7\x84\x97\x5c\x31\x6d\xfb"
+ "\xdf\xf3\xd3\xd1\xd5\x0c\x30\x06"
+ "\x01\x6a\xbc\x6c\x78\x7b\xa6\x50"
+ "\xfa\x0f\x3c\x42\x2d\xa5\xa3\x3b"
+ "\xcf\x62\x50\xff\x71\x6d\xe7\xda"
+ "\x27\xab\xc6\x67\x16\x65\x68\x64"
+ "\xc7\xd5\x5f\x81\xa9\xf6\x65\xb3"
+ "\x5e\x43\x91\x16\xcd\x3d\x55\x37"
+ "\x55\xb3\xf0\x28\xc5\x54\x19\xc0"
+ "\xe0\xd6\x2a\x61\xd4\xc8\x72\x51"
+ "\xe9\xa1\x7b\x48\x21\xad\x44\x09"
+ "\xe4\x01\x61\x3c\x8a\x5b\xf9\xa1"
+ "\x6e\x1b\xdf\xc0\x04\xa8\x8b\xf2"
+ "\x21\xbe\x34\x7b\xfc\xa1\xcd\xc9"
+ "\xa9\x96\xf4\xa4\x4c\xf7\x4e\x8f"
+ "\x84\xcc\xd3\xa8\x92\x77\x8f\x36"
+ "\xe2\x2e\x8c\x33\xe8\x84\xa6\x0c"
+ "\x6c\x8a\xda\x14\x32\xc2\x96\xff"
+ "\xc6\x4a\xc2\x9b\x30\x7f\xd1\x29"
+ "\xc0\xd5\x78\x41\x00\x80\x80\x03"
+ "\x2a\xb1\xde\x26\x03\x48\x49\xee"
+ "\x57\x14\x76\x51\x3c\x36\x5d\x0a"
+ "\x5c\x9f\xe8\xd8\x53\xdb\x4f\xd4"
+ "\x38\xbf\x66\xc9\x75\x12\x18\x75"
+ "\x34\x2d\x93\x22\x96\x51\x24\x6e"
+ "\x4e\xd9\x30\xea\x67\xff\x92\x1c"
+ "\x16\x26\xe9\xb5\x33\xab\x8c\x22"
+ "\x47\xdb\xa0\x2c\x08\xf0\x12\x69"
+ "\x7e\x93\x52\xda\xa5\xe5\xca\xc1"
+ "\x0f\x55\x2a\xbd\x09\x30\x88\x1b"
+ "\x9c\xc6\x9f\xe6\xdb\xa6\x92\xeb"
+ "\xf4\xbd\x5c\xc4\xdb\xc6\x71\x09"
+ "\xab\x5e\x48\x0c\xed\x6f\xda\x8e"
+ "\x8d\x0c\x98\x71\x7d\x10\xd0\x9c"
+ "\x20\x9b\x79\x53\x26\x5d\xb9\x85"
+ "\x8a\x31\xb8\xc5\x1c\x97\xde\x88"
+ "\x61\x55\x7f\x7c\x21\x06\xea\xc4"
+ "\x5f\xaf\xf2\xf0\xd5\x5e\x7d\xb4"
+ "\x6e\xcf\xe9\xae\x1b\x0e\x11\x80"
+ "\xc1\x9a\x74\x7e\x52\x6f\xa0\xb7"
+ "\x24\xcd\x8d\x0a\x11\x40\x63\x72"
+ "\xfa\xe2\xc5\xb3\x94\xef\x29\xa2"
+ "\x1a\x23\x43\x04\x37\x55\x0d\xe9"
+ "\x83\xb2\x29\x51\x49\x64\xa0\xbd"
+ "\xde\x73\xfd\xa5\x7c\x95\x70\x62"
+ "\x58\xdc\xe2\xd0\xbf\x98\xf5\x8a"
+ "\x6a\xfd\xce\xa8\x0e\x42\x2a\xeb"
+ "\xd2\xff\x83\x27\x53\x5c\xa0\x6e"
+ "\x93\xef\xe2\xb9\x5d\x35\xd6\x98"
+ "\xf6\x71\x19\x7a\x54\xa1\xa7\xe8"
+ "\x09\xfe\xf6\x9e\xc7\xbd\x3e\x29"
+ "\xbd\x6b\x17\xf4\xe7\x3e\x10\x5c"
+ "\xc1\xd2\x59\x4f\x4b\x12\x1a\x5b"
+ "\x50\x80\x59\xb9\xec\x13\x66\xa8"
+ "\xd2\x31\x7b\x6a\x61\x22\xdd\x7d"
+ "\x61\xee\x87\x16\x46\x9f\xf9\xc7"
+ "\x41\xee\x74\xf8\xd0\x96\x2c\x76"
+ "\x2a\xac\x7d\x6e\x9f\x0e\x7f\x95"
+ "\xfe\x50\x16\xb2\x23\xca\x62\xd5"
+ "\x68\xcf\x07\x3f\x3f\x97\x85\x2a"
+ "\x0c\x25\x45\xba\xdb\x32\xcb\x83"
+ "\x8c\x4f\xe0\x6d\x9a\x99\xf9\xc9"
+ "\xda\xd4\x19\x31\xc1\x7c\x6d\xd9"
+ "\x9c\x56\xd3\xec\xc1\x81\x4c\xed"
+ "\x28\x9d\x87\xeb\x19\xd7\x1a\x4f"
+ "\x04\x6a\xcb\x1f\xcf\x1f\xa2\x16"
+ "\xfc\x2a\x0d\xa1\x14\x2d\xfa\xc5"
+ "\x5a\xd2\xc5\xf9\x19\x7c\x20\x1f"
+ "\x2d\x10\xc0\x66\x7c\xd9\x2d\xe5"
+ "\x88\x70\x59\xa7\x85\xd5\x2e\x7c"
+ "\x5c\xe3\xb7\x12\xd6\x97\x3f\x29",
+ .psize = 2048,
+ .digest = "\x37\x90\x92\xc2\xeb\x01\x87\xd9"
+ "\x95\xc7\x91\xc3\x17\x8b\x38\x52",
+ }
+};
+
+
/*
* DES test vectors.
*/
@@ -11449,6 +12797,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
},
};
+static const struct cipher_testvec aes_cfb_tv_template[] = {
+ { /* From NIST SP800-38A */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+ .len = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+ .len = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+ .len = 64,
+ },
+};
+
static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
@@ -30802,6 +32226,1794 @@ static const struct cipher_testvec chacha20_tv_template[] = {
},
};
+static const struct cipher_testvec xchacha20_tv_template[] = {
+ { /* from libsodium test/default/xchacha20.c */
+ .key = "\x79\xc9\x97\x98\xac\x67\x30\x0b"
+ "\xbb\x27\x04\xc9\x5c\x34\x1e\x32"
+ "\x45\xf3\xdc\xb2\x17\x61\xb9\x8e"
+ "\x52\xff\x45\xb2\x4f\x30\x4f\xc4",
+ .klen = 32,
+ .iv = "\xb3\x3f\xfd\x30\x96\x47\x9b\xcf"
+ "\xbc\x9a\xee\x49\x41\x76\x88\xa0"
+ "\xa2\x55\x4f\x8d\x95\x38\x94\x19"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00",
+ .ctext = "\xc6\xe9\x75\x81\x60\x08\x3a\xc6"
+ "\x04\xef\x90\xe7\x12\xce\x6e\x75"
+ "\xd7\x79\x75\x90\x74\x4e\x0c\xf0"
+ "\x60\xf0\x13\x73\x9c",
+ .len = 29,
+ }, { /* from libsodium test/default/xchacha20.c */
+ .key = "\x9d\x23\xbd\x41\x49\xcb\x97\x9c"
+ "\xcf\x3c\x5c\x94\xdd\x21\x7e\x98"
+ "\x08\xcb\x0e\x50\xcd\x0f\x67\x81"
+ "\x22\x35\xea\xaf\x60\x1d\x62\x32",
+ .klen = 32,
+ .iv = "\xc0\x47\x54\x82\x66\xb7\xc3\x70"
+ "\xd3\x35\x66\xa2\x42\x5c\xbf\x30"
+ "\xd8\x2d\x1e\xaf\x52\x94\x10\x9e"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00",
+ .ctext = "\xa2\x12\x09\x09\x65\x94\xde\x8c"
+ "\x56\x67\xb1\xd1\x3a\xd9\x3f\x74"
+ "\x41\x06\xd0\x54\xdf\x21\x0e\x47"
+ "\x82\xcd\x39\x6f\xec\x69\x2d\x35"
+ "\x15\xa2\x0b\xf3\x51\xee\xc0\x11"
+ "\xa9\x2c\x36\x78\x88\xbc\x46\x4c"
+ "\x32\xf0\x80\x7a\xcd\x6c\x20\x3a"
+ "\x24\x7e\x0d\xb8\x54\x14\x84\x68"
+ "\xe9\xf9\x6b\xee\x4c\xf7\x18\xd6"
+ "\x8d\x5f\x63\x7c\xbd\x5a\x37\x64"
+ "\x57\x78\x8e\x6f\xae\x90\xfc\x31"
+ "\x09\x7c\xfc",
+ .len = 91,
+ }, { /* Taken from the ChaCha20 test vectors, appended 12 random bytes
+ to the nonce, zero-padded the stream position from 4 to 8 bytes,
+ and recomputed the ciphertext using libsodium's XChaCha20 */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x67\xc6\x69\x73"
+ "\x51\xff\x4a\xec\x29\xcd\xba\xab"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ctext = "\x9c\x49\x2a\xe7\x8a\x2f\x93\xc7"
+ "\xb3\x33\x6f\x82\x17\xd8\xc4\x1e"
+ "\xad\x80\x11\x11\x1d\x4c\x16\x18"
+ "\x07\x73\x9b\x4f\xdb\x7c\xcb\x47"
+ "\xfd\xef\x59\x74\xfa\x3f\xe5\x4c"
+ "\x9b\xd0\xea\xbc\xba\x56\xad\x32"
+ "\x03\xdc\xf8\x2b\xc1\xe1\x75\x67"
+ "\x23\x7b\xe6\xfc\xd4\x03\x86\x54",
+ .len = 64,
+ }, { /* Derived from a ChaCha20 test vector, via the process above */
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x02\xf2\xfb\xe3\x46"
+ "\x7c\xc2\x54\xf8\x1b\xe8\xe7\x8d"
+ "\x01\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x41\x6e\x79\x20\x73\x75\x62\x6d"
+ "\x69\x73\x73\x69\x6f\x6e\x20\x74"
+ "\x6f\x20\x74\x68\x65\x20\x49\x45"
+ "\x54\x46\x20\x69\x6e\x74\x65\x6e"
+ "\x64\x65\x64\x20\x62\x79\x20\x74"
+ "\x68\x65\x20\x43\x6f\x6e\x74\x72"
+ "\x69\x62\x75\x74\x6f\x72\x20\x66"
+ "\x6f\x72\x20\x70\x75\x62\x6c\x69"
+ "\x63\x61\x74\x69\x6f\x6e\x20\x61"
+ "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
+ "\x20\x70\x61\x72\x74\x20\x6f\x66"
+ "\x20\x61\x6e\x20\x49\x45\x54\x46"
+ "\x20\x49\x6e\x74\x65\x72\x6e\x65"
+ "\x74\x2d\x44\x72\x61\x66\x74\x20"
+ "\x6f\x72\x20\x52\x46\x43\x20\x61"
+ "\x6e\x64\x20\x61\x6e\x79\x20\x73"
+ "\x74\x61\x74\x65\x6d\x65\x6e\x74"
+ "\x20\x6d\x61\x64\x65\x20\x77\x69"
+ "\x74\x68\x69\x6e\x20\x74\x68\x65"
+ "\x20\x63\x6f\x6e\x74\x65\x78\x74"
+ "\x20\x6f\x66\x20\x61\x6e\x20\x49"
+ "\x45\x54\x46\x20\x61\x63\x74\x69"
+ "\x76\x69\x74\x79\x20\x69\x73\x20"
+ "\x63\x6f\x6e\x73\x69\x64\x65\x72"
+ "\x65\x64\x20\x61\x6e\x20\x22\x49"
+ "\x45\x54\x46\x20\x43\x6f\x6e\x74"
+ "\x72\x69\x62\x75\x74\x69\x6f\x6e"
+ "\x22\x2e\x20\x53\x75\x63\x68\x20"
+ "\x73\x74\x61\x74\x65\x6d\x65\x6e"
+ "\x74\x73\x20\x69\x6e\x63\x6c\x75"
+ "\x64\x65\x20\x6f\x72\x61\x6c\x20"
+ "\x73\x74\x61\x74\x65\x6d\x65\x6e"
+ "\x74\x73\x20\x69\x6e\x20\x49\x45"
+ "\x54\x46\x20\x73\x65\x73\x73\x69"
+ "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
+ "\x77\x65\x6c\x6c\x20\x61\x73\x20"
+ "\x77\x72\x69\x74\x74\x65\x6e\x20"
+ "\x61\x6e\x64\x20\x65\x6c\x65\x63"
+ "\x74\x72\x6f\x6e\x69\x63\x20\x63"
+ "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
+ "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
+ "\x64\x65\x20\x61\x74\x20\x61\x6e"
+ "\x79\x20\x74\x69\x6d\x65\x20\x6f"
+ "\x72\x20\x70\x6c\x61\x63\x65\x2c"
+ "\x20\x77\x68\x69\x63\x68\x20\x61"
+ "\x72\x65\x20\x61\x64\x64\x72\x65"
+ "\x73\x73\x65\x64\x20\x74\x6f",
+ .ctext = "\xf9\xab\x7a\x4a\x60\xb8\x5f\xa0"
+ "\x50\xbb\x57\xce\xef\x8c\xc1\xd9"
+ "\x24\x15\xb3\x67\x5e\x7f\x01\xf6"
+ "\x1c\x22\xf6\xe5\x71\xb1\x43\x64"
+ "\x63\x05\xd5\xfc\x5c\x3d\xc0\x0e"
+ "\x23\xef\xd3\x3b\xd9\xdc\x7f\xa8"
+ "\x58\x26\xb3\xd0\xc2\xd5\x04\x3f"
+ "\x0a\x0e\x8f\x17\xe4\xcd\xf7\x2a"
+ "\xb4\x2c\x09\xe4\x47\xec\x8b\xfb"
+ "\x59\x37\x7a\xa1\xd0\x04\x7e\xaa"
+ "\xf1\x98\x5f\x24\x3d\x72\x9a\x43"
+ "\xa4\x36\x51\x92\x22\x87\xff\x26"
+ "\xce\x9d\xeb\x59\x78\x84\x5e\x74"
+ "\x97\x2e\x63\xc0\xef\x29\xf7\x8a"
+ "\xb9\xee\x35\x08\x77\x6a\x35\x9a"
+ "\x3e\xe6\x4f\x06\x03\x74\x1b\xc1"
+ "\x5b\xb3\x0b\x89\x11\x07\xd3\xb7"
+ "\x53\xd6\x25\x04\xd9\x35\xb4\x5d"
+ "\x4c\x33\x5a\xc2\x42\x4c\xe6\xa4"
+ "\x97\x6e\x0e\xd2\xb2\x8b\x2f\x7f"
+ "\x28\xe5\x9f\xac\x4b\x2e\x02\xab"
+ "\x85\xfa\xa9\x0d\x7c\x2d\x10\xe6"
+ "\x91\xab\x55\x63\xf0\xde\x3a\x94"
+ "\x25\x08\x10\x03\xc2\x68\xd1\xf4"
+ "\xaf\x7d\x9c\x99\xf7\x86\x96\x30"
+ "\x60\xfc\x0b\xe6\xa8\x80\x15\xb0"
+ "\x81\xb1\x0c\xbe\xb9\x12\x18\x25"
+ "\xe9\x0e\xb1\xe7\x23\xb2\xef\x4a"
+ "\x22\x8f\xc5\x61\x89\xd4\xe7\x0c"
+ "\x64\x36\x35\x61\xb6\x34\x60\xf7"
+ "\x7b\x61\x37\x37\x12\x10\xa2\xf6"
+ "\x7e\xdb\x7f\x39\x3f\xb6\x8e\x89"
+ "\x9e\xf3\xfe\x13\x98\xbb\x66\x5a"
+ "\xec\xea\xab\x3f\x9c\x87\xc4\x8c"
+ "\x8a\x04\x18\x49\xfc\x77\x11\x50"
+ "\x16\xe6\x71\x2b\xee\xc0\x9c\xb6"
+ "\x87\xfd\x80\xff\x0b\x1d\x73\x38"
+ "\xa4\x1d\x6f\xae\xe4\x12\xd7\x93"
+ "\x9d\xcd\x38\x26\x09\x40\x52\xcd"
+ "\x67\x01\x67\x26\xe0\x3e\x98\xa8"
+ "\xe8\x1a\x13\x41\xbb\x90\x4d\x87"
+ "\xbb\x42\x82\x39\xce\x3a\xd0\x18"
+ "\x6d\x7b\x71\x8f\xbb\x2c\x6a\xd1"
+ "\xbd\xf5\xc7\x8a\x7e\xe1\x1e\x0f"
+ "\x0d\x0d\x13\x7c\xd9\xd8\x3c\x91"
+ "\xab\xff\x1f\x12\xc3\xee\xe5\x65"
+ "\x12\x8d\x7b\x61\xe5\x1f\x98",
+ .len = 375,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 375 - 20, 4, 16 },
+
+ }, { /* Derived from a ChaCha20 test vector, via the process above */
+ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+ "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+ "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+ "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x02\x76\x5a\x2e\x63"
+ "\x33\x9f\xc9\x9a\x66\x32\x0d\xb7"
+ "\x2a\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x27\x54\x77\x61\x73\x20\x62\x72"
+ "\x69\x6c\x6c\x69\x67\x2c\x20\x61"
+ "\x6e\x64\x20\x74\x68\x65\x20\x73"
+ "\x6c\x69\x74\x68\x79\x20\x74\x6f"
+ "\x76\x65\x73\x0a\x44\x69\x64\x20"
+ "\x67\x79\x72\x65\x20\x61\x6e\x64"
+ "\x20\x67\x69\x6d\x62\x6c\x65\x20"
+ "\x69\x6e\x20\x74\x68\x65\x20\x77"
+ "\x61\x62\x65\x3a\x0a\x41\x6c\x6c"
+ "\x20\x6d\x69\x6d\x73\x79\x20\x77"
+ "\x65\x72\x65\x20\x74\x68\x65\x20"
+ "\x62\x6f\x72\x6f\x67\x6f\x76\x65"
+ "\x73\x2c\x0a\x41\x6e\x64\x20\x74"
+ "\x68\x65\x20\x6d\x6f\x6d\x65\x20"
+ "\x72\x61\x74\x68\x73\x20\x6f\x75"
+ "\x74\x67\x72\x61\x62\x65\x2e",
+ .ctext = "\x95\xb9\x51\xe7\x8f\xb4\xa4\x03"
+ "\xca\x37\xcc\xde\x60\x1d\x8c\xe2"
+ "\xf1\xbb\x8a\x13\x7f\x61\x85\xcc"
+ "\xad\xf4\xf0\xdc\x86\xa6\x1e\x10"
+ "\xbc\x8e\xcb\x38\x2b\xa5\xc8\x8f"
+ "\xaa\x03\x3d\x53\x4a\x42\xb1\x33"
+ "\xfc\xd3\xef\xf0\x8e\x7e\x10\x9c"
+ "\x6f\x12\x5e\xd4\x96\xfe\x5b\x08"
+ "\xb6\x48\xf0\x14\x74\x51\x18\x7c"
+ "\x07\x92\xfc\xac\x9d\xf1\x94\xc0"
+ "\xc1\x9d\xc5\x19\x43\x1f\x1d\xbb"
+ "\x07\xf0\x1b\x14\x25\x45\xbb\xcb"
+ "\x5c\xe2\x8b\x28\xf3\xcf\x47\x29"
+ "\x27\x79\x67\x24\xa6\x87\xc2\x11"
+ "\x65\x03\xfa\x45\xf7\x9e\x53\x7a"
+ "\x99\xf1\x82\x25\x4f\x8d\x07",
+ .len = 127,
+ }, { /* Derived from a ChaCha20 test vector, via the process above */
+ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+ "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+ "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+ "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x01\x31\x58\xa3\x5a"
+ "\x25\x5d\x05\x17\x58\xe9\x5e\xd4"
+ "\x1c\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x49\xee\xe0\xdc\x24\x90\x40\xcd"
+ "\xc5\x40\x8f\x47\x05\xbc\xdd\x81"
+ "\x47\xc6\x8d\xe6\xb1\x8f\xd7\xcb"
+ "\x09\x0e\x6e\x22\x48\x1f\xbf\xb8"
+ "\x5c\xf7\x1e\x8a\xc1\x23\xf2\xd4"
+ "\x19\x4b\x01\x0f\x4e\xa4\x43\xce"
+ "\x01\xc6\x67\xda\x03\x91\x18\x90"
+ "\xa5\xa4\x8e\x45\x03\xb3\x2d\xac"
+ "\x74\x92\xd3\x53\x47\xc8\xdd\x25"
+ "\x53\x6c\x02\x03\x87\x0d\x11\x0c"
+ "\x58\xe3\x12\x18\xfd\x2a\x5b\x40"
+ "\x0c\x30\xf0\xb8\x3f\x43\xce\xae"
+ "\x65\x3a\x7d\x7c\xf4\x54\xaa\xcc"
+ "\x33\x97\xc3\x77\xba\xc5\x70\xde"
+ "\xd7\xd5\x13\xa5\x65\xc4\x5f\x0f"
+ "\x46\x1a\x0d\x97\xb5\xf3\xbb\x3c"
+ "\x84\x0f\x2b\xc5\xaa\xea\xf2\x6c"
+ "\xc9\xb5\x0c\xee\x15\xf3\x7d\xbe"
+ "\x9f\x7b\x5a\xa6\xae\x4f\x83\xb6"
+ "\x79\x49\x41\xf4\x58\x18\xcb\x86"
+ "\x7f\x30\x0e\xf8\x7d\x44\x36\xea"
+ "\x75\xeb\x88\x84\x40\x3c\xad\x4f"
+ "\x6f\x31\x6b\xaa\x5d\xe5\xa5\xc5"
+ "\x21\x66\xe9\xa7\xe3\xb2\x15\x88"
+ "\x78\xf6\x79\xa1\x59\x47\x12\x4e"
+ "\x9f\x9f\x64\x1a\xa0\x22\x5b\x08"
+ "\xbe\x7c\x36\xc2\x2b\x66\x33\x1b"
+ "\xdd\x60\x71\xf7\x47\x8c\x61\xc3"
+ "\xda\x8a\x78\x1e\x16\xfa\x1e\x86"
+ "\x81\xa6\x17\x2a\xa7\xb5\xc2\xe7"
+ "\xa4\xc7\x42\xf1\xcf\x6a\xca\xb4"
+ "\x45\xcf\xf3\x93\xf0\xe7\xea\xf6"
+ "\xf4\xe6\x33\x43\x84\x93\xa5\x67"
+ "\x9b\x16\x58\x58\x80\x0f\x2b\x5c"
+ "\x24\x74\x75\x7f\x95\x81\xb7\x30"
+ "\x7a\x33\xa7\xf7\x94\x87\x32\x27"
+ "\x10\x5d\x14\x4c\x43\x29\xdd\x26"
+ "\xbd\x3e\x3c\x0e\xfe\x0e\xa5\x10"
+ "\xea\x6b\x64\xfd\x73\xc6\xed\xec"
+ "\xa8\xc9\xbf\xb3\xba\x0b\x4d\x07"
+ "\x70\xfc\x16\xfd\x79\x1e\xd7\xc5"
+ "\x49\x4e\x1c\x8b\x8d\x79\x1b\xb1"
+ "\xec\xca\x60\x09\x4c\x6a\xd5\x09"
+ "\x49\x46\x00\x88\x22\x8d\xce\xea"
+ "\xb1\x17\x11\xde\x42\xd2\x23\xc1"
+ "\x72\x11\xf5\x50\x73\x04\x40\x47"
+ "\xf9\x5d\xe7\xa7\x26\xb1\x7e\xb0"
+ "\x3f\x58\xc1\x52\xab\x12\x67\x9d"
+ "\x3f\x43\x4b\x68\xd4\x9c\x68\x38"
+ "\x07\x8a\x2d\x3e\xf3\xaf\x6a\x4b"
+ "\xf9\xe5\x31\x69\x22\xf9\xa6\x69"
+ "\xc6\x9c\x96\x9a\x12\x35\x95\x1d"
+ "\x95\xd5\xdd\xbe\xbf\x93\x53\x24"
+ "\xfd\xeb\xc2\x0a\x64\xb0\x77\x00"
+ "\x6f\x88\xc4\x37\x18\x69\x7c\xd7"
+ "\x41\x92\x55\x4c\x03\xa1\x9a\x4b"
+ "\x15\xe5\xdf\x7f\x37\x33\x72\xc1"
+ "\x8b\x10\x67\xa3\x01\x57\x94\x25"
+ "\x7b\x38\x71\x7e\xdd\x1e\xcc\x73"
+ "\x55\xd2\x8e\xeb\x07\xdd\xf1\xda"
+ "\x58\xb1\x47\x90\xfe\x42\x21\x72"
+ "\xa3\x54\x7a\xa0\x40\xec\x9f\xdd"
+ "\xc6\x84\x6e\xca\xae\xe3\x68\xb4"
+ "\x9d\xe4\x78\xff\x57\xf2\xf8\x1b"
+ "\x03\xa1\x31\xd9\xde\x8d\xf5\x22"
+ "\x9c\xdd\x20\xa4\x1e\x27\xb1\x76"
+ "\x4f\x44\x55\xe2\x9b\xa1\x9c\xfe"
+ "\x54\xf7\x27\x1b\xf4\xde\x02\xf5"
+ "\x1b\x55\x48\x5c\xdc\x21\x4b\x9e"
+ "\x4b\x6e\xed\x46\x23\xdc\x65\xb2"
+ "\xcf\x79\x5f\x28\xe0\x9e\x8b\xe7"
+ "\x4c\x9d\x8a\xff\xc1\xa6\x28\xb8"
+ "\x65\x69\x8a\x45\x29\xef\x74\x85"
+ "\xde\x79\xc7\x08\xae\x30\xb0\xf4"
+ "\xa3\x1d\x51\x41\xab\xce\xcb\xf6"
+ "\xb5\xd8\x6d\xe0\x85\xe1\x98\xb3"
+ "\x43\xbb\x86\x83\x0a\xa0\xf5\xb7"
+ "\x04\x0b\xfa\x71\x1f\xb0\xf6\xd9"
+ "\x13\x00\x15\xf0\xc7\xeb\x0d\x5a"
+ "\x9f\xd7\xb9\x6c\x65\x14\x22\x45"
+ "\x6e\x45\x32\x3e\x7e\x60\x1a\x12"
+ "\x97\x82\x14\xfb\xaa\x04\x22\xfa"
+ "\xa0\xe5\x7e\x8c\x78\x02\x48\x5d"
+ "\x78\x33\x5a\x7c\xad\xdb\x29\xce"
+ "\xbb\x8b\x61\xa4\xb7\x42\xe2\xac"
+ "\x8b\x1a\xd9\x2f\x0b\x8b\x62\x21"
+ "\x83\x35\x7e\xad\x73\xc2\xb5\x6c"
+ "\x10\x26\x38\x07\xe5\xc7\x36\x80"
+ "\xe2\x23\x12\x61\xf5\x48\x4b\x2b"
+ "\xc5\xdf\x15\xd9\x87\x01\xaa\xac"
+ "\x1e\x7c\xad\x73\x78\x18\x63\xe0"
+ "\x8b\x9f\x81\xd8\x12\x6a\x28\x10"
+ "\xbe\x04\x68\x8a\x09\x7c\x1b\x1c"
+ "\x83\x66\x80\x47\x80\xe8\xfd\x35"
+ "\x1c\x97\x6f\xae\x49\x10\x66\xcc"
+ "\xc6\xd8\xcc\x3a\x84\x91\x20\x77"
+ "\x72\xe4\x24\xd2\x37\x9f\xc5\xc9"
+ "\x25\x94\x10\x5f\x40\x00\x64\x99"
+ "\xdc\xae\xd7\x21\x09\x78\x50\x15"
+ "\xac\x5f\xc6\x2c\xa2\x0b\xa9\x39"
+ "\x87\x6e\x6d\xab\xde\x08\x51\x16"
+ "\xc7\x13\xe9\xea\xed\x06\x8e\x2c"
+ "\xf8\x37\x8c\xf0\xa6\x96\x8d\x43"
+ "\xb6\x98\x37\xb2\x43\xed\xde\xdf"
+ "\x89\x1a\xe7\xeb\x9d\xa1\x7b\x0b"
+ "\x77\xb0\xe2\x75\xc0\xf1\x98\xd9"
+ "\x80\x55\xc9\x34\x91\xd1\x59\xe8"
+ "\x4b\x0f\xc1\xa9\x4b\x7a\x84\x06"
+ "\x20\xa8\x5d\xfa\xd1\xde\x70\x56"
+ "\x2f\x9e\x91\x9c\x20\xb3\x24\xd8"
+ "\x84\x3d\xe1\x8c\x7e\x62\x52\xe5"
+ "\x44\x4b\x9f\xc2\x93\x03\xea\x2b"
+ "\x59\xc5\xfa\x3f\x91\x2b\xbb\x23"
+ "\xf5\xb2\x7b\xf5\x38\xaf\xb3\xee"
+ "\x63\xdc\x7b\xd1\xff\xaa\x8b\xab"
+ "\x82\x6b\x37\x04\xeb\x74\xbe\x79"
+ "\xb9\x83\x90\xef\x20\x59\x46\xff"
+ "\xe9\x97\x3e\x2f\xee\xb6\x64\x18"
+ "\x38\x4c\x7a\x4a\xf9\x61\xe8\x9a"
+ "\xa1\xb5\x01\xa6\x47\xd3\x11\xd4"
+ "\xce\xd3\x91\x49\x88\xc7\xb8\x4d"
+ "\xb1\xb9\x07\x6d\x16\x72\xae\x46"
+ "\x5e\x03\xa1\x4b\xb6\x02\x30\xa8"
+ "\x3d\xa9\x07\x2a\x7c\x19\xe7\x62"
+ "\x87\xe3\x82\x2f\x6f\xe1\x09\xd9"
+ "\x94\x97\xea\xdd\x58\x9e\xae\x76"
+ "\x7e\x35\xe5\xb4\xda\x7e\xf4\xde"
+ "\xf7\x32\x87\xcd\x93\xbf\x11\x56"
+ "\x11\xbe\x08\x74\xe1\x69\xad\xe2"
+ "\xd7\xf8\x86\x75\x8a\x3c\xa4\xbe"
+ "\x70\xa7\x1b\xfc\x0b\x44\x2a\x76"
+ "\x35\xea\x5d\x85\x81\xaf\x85\xeb"
+ "\xa0\x1c\x61\xc2\xf7\x4f\xa5\xdc"
+ "\x02\x7f\xf6\x95\x40\x6e\x8a\x9a"
+ "\xf3\x5d\x25\x6e\x14\x3a\x22\xc9"
+ "\x37\x1c\xeb\x46\x54\x3f\xa5\x91"
+ "\xc2\xb5\x8c\xfe\x53\x08\x97\x32"
+ "\x1b\xb2\x30\x27\xfe\x25\x5d\xdc"
+ "\x08\x87\xd0\xe5\x94\x1a\xd4\xf1"
+ "\xfe\xd6\xb4\xa3\xe6\x74\x81\x3c"
+ "\x1b\xb7\x31\xa7\x22\xfd\xd4\xdd"
+ "\x20\x4e\x7c\x51\xb0\x60\x73\xb8"
+ "\x9c\xac\x91\x90\x7e\x01\xb0\xe1"
+ "\x8a\x2f\x75\x1c\x53\x2a\x98\x2a"
+ "\x06\x52\x95\x52\xb2\xe9\x25\x2e"
+ "\x4c\xe2\x5a\x00\xb2\x13\x81\x03"
+ "\x77\x66\x0d\xa5\x99\xda\x4e\x8c"
+ "\xac\xf3\x13\x53\x27\x45\xaf\x64"
+ "\x46\xdc\xea\x23\xda\x97\xd1\xab"
+ "\x7d\x6c\x30\x96\x1f\xbc\x06\x34"
+ "\x18\x0b\x5e\x21\x35\x11\x8d\x4c"
+ "\xe0\x2d\xe9\x50\x16\x74\x81\xa8"
+ "\xb4\x34\xb9\x72\x42\xa6\xcc\xbc"
+ "\xca\x34\x83\x27\x10\x5b\x68\x45"
+ "\x8f\x52\x22\x0c\x55\x3d\x29\x7c"
+ "\xe3\xc0\x66\x05\x42\x91\x5f\x58"
+ "\xfe\x4a\x62\xd9\x8c\xa9\x04\x19"
+ "\x04\xa9\x08\x4b\x57\xfc\x67\x53"
+ "\x08\x7c\xbc\x66\x8a\xb0\xb6\x9f"
+ "\x92\xd6\x41\x7c\x5b\x2a\x00\x79"
+ "\x72",
+ .ctext = "\x3a\x92\xee\x53\x31\xaf\x2b\x60"
+ "\x5f\x55\x8d\x00\x5d\xfc\x74\x97"
+ "\x28\x54\xf4\xa5\x75\xf1\x9b\x25"
+ "\x62\x1c\xc0\xe0\x13\xc8\x87\x53"
+ "\xd0\xf3\xa7\x97\x1f\x3b\x1e\xea"
+ "\xe0\xe5\x2a\xd1\xdd\xa4\x3b\x50"
+ "\x45\xa3\x0d\x7e\x1b\xc9\xa0\xad"
+ "\xb9\x2c\x54\xa6\xc7\x55\x16\xd0"
+ "\xc5\x2e\x02\x44\x35\xd0\x7e\x67"
+ "\xf2\xc4\x9b\xcd\x95\x10\xcc\x29"
+ "\x4b\xfa\x86\x87\xbe\x40\x36\xbe"
+ "\xe1\xa3\x52\x89\x55\x20\x9b\xc2"
+ "\xab\xf2\x31\x34\x16\xad\xc8\x17"
+ "\x65\x24\xc0\xff\x12\x37\xfe\x5a"
+ "\x62\x3b\x59\x47\x6c\x5f\x3a\x8e"
+ "\x3b\xd9\x30\xc8\x7f\x2f\x88\xda"
+ "\x80\xfd\x02\xda\x7f\x9a\x7a\x73"
+ "\x59\xc5\x34\x09\x9a\x11\xcb\xa7"
+ "\xfc\xf6\xa1\xa0\x60\xfb\x43\xbb"
+ "\xf1\xe9\xd7\xc6\x79\x27\x4e\xff"
+ "\x22\xb4\x24\xbf\x76\xee\x47\xb9"
+ "\x6d\x3f\x8b\xb0\x9c\x3c\x43\xdd"
+ "\xff\x25\x2e\x6d\xa4\x2b\xfb\x5d"
+ "\x1b\x97\x6c\x55\x0a\x82\x7a\x7b"
+ "\x94\x34\xc2\xdb\x2f\x1f\xc1\xea"
+ "\xd4\x4d\x17\x46\x3b\x51\x69\x09"
+ "\xe4\x99\x32\x25\xfd\x94\xaf\xfb"
+ "\x10\xf7\x4f\xdd\x0b\x3c\x8b\x41"
+ "\xb3\x6a\xb7\xd1\x33\xa8\x0c\x2f"
+ "\x62\x4c\x72\x11\xd7\x74\xe1\x3b"
+ "\x38\x43\x66\x7b\x6c\x36\x48\xe7"
+ "\xe3\xe7\x9d\xb9\x42\x73\x7a\x2a"
+ "\x89\x20\x1a\x41\x80\x03\xf7\x8f"
+ "\x61\x78\x13\xbf\xfe\x50\xf5\x04"
+ "\x52\xf9\xac\x47\xf8\x62\x4b\xb2"
+ "\x24\xa9\xbf\x64\xb0\x18\x69\xd2"
+ "\xf5\xe4\xce\xc8\xb1\x87\x75\xd6"
+ "\x2c\x24\x79\x00\x7d\x26\xfb\x44"
+ "\xe7\x45\x7a\xee\x58\xa5\x83\xc1"
+ "\xb4\x24\xab\x23\x2f\x4d\xd7\x4f"
+ "\x1c\xc7\xaa\xa9\x50\xf4\xa3\x07"
+ "\x12\x13\x89\x74\xdc\x31\x6a\xb2"
+ "\xf5\x0f\x13\x8b\xb9\xdb\x85\x1f"
+ "\xf5\xbc\x88\xd9\x95\xea\x31\x6c"
+ "\x36\x60\xb6\x49\xdc\xc4\xf7\x55"
+ "\x3f\x21\xc1\xb5\x92\x18\x5e\xbc"
+ "\x9f\x87\x7f\xe7\x79\x25\x40\x33"
+ "\xd6\xb9\x33\xd5\x50\xb3\xc7\x89"
+ "\x1b\x12\xa0\x46\xdd\xa7\xd8\x3e"
+ "\x71\xeb\x6f\x66\xa1\x26\x0c\x67"
+ "\xab\xb2\x38\x58\x17\xd8\x44\x3b"
+ "\x16\xf0\x8e\x62\x8d\x16\x10\x00"
+ "\x32\x8b\xef\xb9\x28\xd3\xc5\xad"
+ "\x0a\x19\xa2\xe4\x03\x27\x7d\x94"
+ "\x06\x18\xcd\xd6\x27\x00\xf9\x1f"
+ "\xb6\xb3\xfe\x96\x35\x5f\xc4\x1c"
+ "\x07\x62\x10\x79\x68\x50\xf1\x7e"
+ "\x29\xe7\xc4\xc4\xe7\xee\x54\xd6"
+ "\x58\x76\x84\x6d\x8d\xe4\x59\x31"
+ "\xe9\xf4\xdc\xa1\x1f\xe5\x1a\xd6"
+ "\xe6\x64\x46\xf5\x77\x9c\x60\x7a"
+ "\x5e\x62\xe3\x0a\xd4\x9f\x7a\x2d"
+ "\x7a\xa5\x0a\x7b\x29\x86\x7a\x74"
+ "\x74\x71\x6b\xca\x7d\x1d\xaa\xba"
+ "\x39\x84\x43\x76\x35\xfe\x4f\x9b"
+ "\xbb\xbb\xb5\x6a\x32\xb5\x5d\x41"
+ "\x51\xf0\x5b\x68\x03\x47\x4b\x8a"
+ "\xca\x88\xf6\x37\xbd\x73\x51\x70"
+ "\x66\xfe\x9e\x5f\x21\x9c\xf3\xdd"
+ "\xc3\xea\x27\xf9\x64\x94\xe1\x19"
+ "\xa0\xa9\xab\x60\xe0\x0e\xf7\x78"
+ "\x70\x86\xeb\xe0\xd1\x5c\x05\xd3"
+ "\xd7\xca\xe0\xc0\x47\x47\x34\xee"
+ "\x11\xa3\xa3\x54\x98\xb7\x49\x8e"
+ "\x84\x28\x70\x2c\x9e\xfb\x55\x54"
+ "\x4d\xf8\x86\xf7\x85\x7c\xbd\xf3"
+ "\x17\xd8\x47\xcb\xac\xf4\x20\x85"
+ "\x34\x66\xad\x37\x2d\x5e\x52\xda"
+ "\x8a\xfe\x98\x55\x30\xe7\x2d\x2b"
+ "\x19\x10\x8e\x7b\x66\x5e\xdc\xe0"
+ "\x45\x1f\x7b\xb4\x08\xfb\x8f\xf6"
+ "\x8c\x89\x21\x34\x55\x27\xb2\x76"
+ "\xb2\x07\xd9\xd6\x68\x9b\xea\x6b"
+ "\x2d\xb4\xc4\x35\xdd\xd2\x79\xae"
+ "\xc7\xd6\x26\x7f\x12\x01\x8c\xa7"
+ "\xe3\xdb\xa8\xf4\xf7\x2b\xec\x99"
+ "\x11\x00\xf1\x35\x8c\xcf\xd5\xc9"
+ "\xbd\x91\x36\x39\x70\xcf\x7d\x70"
+ "\x47\x1a\xfc\x6b\x56\xe0\x3f\x9c"
+ "\x60\x49\x01\x72\xa9\xaf\x2c\x9c"
+ "\xe8\xab\xda\x8c\x14\x19\xf3\x75"
+ "\x07\x17\x9d\x44\x67\x7a\x2e\xef"
+ "\xb7\x83\x35\x4a\xd1\x3d\x1c\x84"
+ "\x32\xdd\xaa\xea\xca\x1d\xdc\x72"
+ "\x2c\xcc\x43\xcd\x5d\xe3\x21\xa4"
+ "\xd0\x8a\x4b\x20\x12\xa3\xd5\x86"
+ "\x76\x96\xff\x5f\x04\x57\x0f\xe6"
+ "\xba\xe8\x76\x50\x0c\x64\x1d\x83"
+ "\x9c\x9b\x9a\x9a\x58\x97\x9c\x5c"
+ "\xb4\xa4\xa6\x3e\x19\xeb\x8f\x5a"
+ "\x61\xb2\x03\x7b\x35\x19\xbe\xa7"
+ "\x63\x0c\xfd\xdd\xf9\x90\x6c\x08"
+ "\x19\x11\xd3\x65\x4a\xf5\x96\x92"
+ "\x59\xaa\x9c\x61\x0c\x29\xa7\xf8"
+ "\x14\x39\x37\xbf\x3c\xf2\x16\x72"
+ "\x02\xfa\xa2\xf3\x18\x67\x5d\xcb"
+ "\xdc\x4d\xbb\x96\xff\x70\x08\x2d"
+ "\xc2\xa8\x52\xe1\x34\x5f\x72\xfe"
+ "\x64\xbf\xca\xa7\x74\x38\xfb\x74"
+ "\x55\x9c\xfa\x8a\xed\xfb\x98\xeb"
+ "\x58\x2e\x6c\xe1\x52\x76\x86\xd7"
+ "\xcf\xa1\xa4\xfc\xb2\x47\x41\x28"
+ "\xa3\xc1\xe5\xfd\x53\x19\x28\x2b"
+ "\x37\x04\x65\x96\x99\x7a\x28\x0f"
+ "\x07\x68\x4b\xc7\x52\x0a\x55\x35"
+ "\x40\x19\x95\x61\xe8\x59\x40\x1f"
+ "\x9d\xbf\x78\x7d\x8f\x84\xff\x6f"
+ "\xd0\xd5\x63\xd2\x22\xbd\xc8\x4e"
+ "\xfb\xe7\x9f\x06\xe6\xe7\x39\x6d"
+ "\x6a\x96\x9f\xf0\x74\x7e\xc9\x35"
+ "\xb7\x26\xb8\x1c\x0a\xa6\x27\x2c"
+ "\xa2\x2b\xfe\xbe\x0f\x07\x73\xae"
+ "\x7f\x7f\x54\xf5\x7c\x6a\x0a\x56"
+ "\x49\xd4\x81\xe5\x85\x53\x99\x1f"
+ "\x95\x05\x13\x58\x8d\x0e\x1b\x90"
+ "\xc3\x75\x48\x64\x58\x98\x67\x84"
+ "\xae\xe2\x21\xa2\x8a\x04\x0a\x0b"
+ "\x61\xaa\xb0\xd4\x28\x60\x7a\xf8"
+ "\xbc\x52\xfb\x24\x7f\xed\x0d\x2a"
+ "\x0a\xb2\xf9\xc6\x95\xb5\x11\xc9"
+ "\xf4\x0f\x26\x11\xcf\x2a\x57\x87"
+ "\x7a\xf3\xe7\x94\x65\xc2\xb5\xb3"
+ "\xab\x98\xe3\xc1\x2b\x59\x19\x7c"
+ "\xd6\xf3\xf9\xbf\xff\x6d\xc6\x82"
+ "\x13\x2f\x4a\x2e\xcd\x26\xfe\x2d"
+ "\x01\x70\xf4\xc2\x7f\x1f\x4c\xcb"
+ "\x47\x77\x0c\xa0\xa3\x03\xec\xda"
+ "\xa9\xbf\x0d\x2d\xae\xe4\xb8\x7b"
+ "\xa9\xbc\x08\xb4\x68\x2e\xc5\x60"
+ "\x8d\x87\x41\x2b\x0f\x69\xf0\xaf"
+ "\x5f\xba\x72\x20\x0f\x33\xcd\x6d"
+ "\x36\x7d\x7b\xd5\x05\xf1\x4b\x05"
+ "\xc4\xfc\x7f\x80\xb9\x4d\xbd\xf7"
+ "\x7c\x84\x07\x01\xc2\x40\x66\x5b"
+ "\x98\xc7\x2c\xe3\x97\xfa\xdf\x87"
+ "\xa0\x1f\xe9\x21\x42\x0f\x3b\xeb"
+ "\x89\x1c\x3b\xca\x83\x61\x77\x68"
+ "\x84\xbb\x60\x87\x38\x2e\x25\xd5"
+ "\x9e\x04\x41\x70\xac\xda\xc0\x9c"
+ "\x9c\x69\xea\x8d\x4e\x55\x2a\x29"
+ "\xed\x05\x4b\x7b\x73\x71\x90\x59"
+ "\x4d\xc8\xd8\x44\xf0\x4c\xe1\x5e"
+ "\x84\x47\x55\xcc\x32\x3f\xe7\x97"
+ "\x42\xc6\x32\xac\x40\xe5\xa5\xc7"
+ "\x8b\xed\xdb\xf7\x83\xd6\xb1\xc2"
+ "\x52\x5e\x34\xb7\xeb\x6e\xd9\xfc"
+ "\xe5\x93\x9a\x97\x3e\xb0\xdc\xd9"
+ "\xd7\x06\x10\xb6\x1d\x80\x59\xdd"
+ "\x0d\xfe\x64\x35\xcd\x5d\xec\xf0"
+ "\xba\xd0\x34\xc9\x2d\x91\xc5\x17"
+ "\x11",
+ .len = 1281,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 1200, 1, 80 },
+ }, { /* test vector from https://tools.ietf.org/html/draft-arciszewski-xchacha-02#appendix-A.3.2 */
+ .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f",
+ .klen = 32,
+ .iv = "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x58"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x54\x68\x65\x20\x64\x68\x6f\x6c"
+ "\x65\x20\x28\x70\x72\x6f\x6e\x6f"
+ "\x75\x6e\x63\x65\x64\x20\x22\x64"
+ "\x6f\x6c\x65\x22\x29\x20\x69\x73"
+ "\x20\x61\x6c\x73\x6f\x20\x6b\x6e"
+ "\x6f\x77\x6e\x20\x61\x73\x20\x74"
+ "\x68\x65\x20\x41\x73\x69\x61\x74"
+ "\x69\x63\x20\x77\x69\x6c\x64\x20"
+ "\x64\x6f\x67\x2c\x20\x72\x65\x64"
+ "\x20\x64\x6f\x67\x2c\x20\x61\x6e"
+ "\x64\x20\x77\x68\x69\x73\x74\x6c"
+ "\x69\x6e\x67\x20\x64\x6f\x67\x2e"
+ "\x20\x49\x74\x20\x69\x73\x20\x61"
+ "\x62\x6f\x75\x74\x20\x74\x68\x65"
+ "\x20\x73\x69\x7a\x65\x20\x6f\x66"
+ "\x20\x61\x20\x47\x65\x72\x6d\x61"
+ "\x6e\x20\x73\x68\x65\x70\x68\x65"
+ "\x72\x64\x20\x62\x75\x74\x20\x6c"
+ "\x6f\x6f\x6b\x73\x20\x6d\x6f\x72"
+ "\x65\x20\x6c\x69\x6b\x65\x20\x61"
+ "\x20\x6c\x6f\x6e\x67\x2d\x6c\x65"
+ "\x67\x67\x65\x64\x20\x66\x6f\x78"
+ "\x2e\x20\x54\x68\x69\x73\x20\x68"
+ "\x69\x67\x68\x6c\x79\x20\x65\x6c"
+ "\x75\x73\x69\x76\x65\x20\x61\x6e"
+ "\x64\x20\x73\x6b\x69\x6c\x6c\x65"
+ "\x64\x20\x6a\x75\x6d\x70\x65\x72"
+ "\x20\x69\x73\x20\x63\x6c\x61\x73"
+ "\x73\x69\x66\x69\x65\x64\x20\x77"
+ "\x69\x74\x68\x20\x77\x6f\x6c\x76"
+ "\x65\x73\x2c\x20\x63\x6f\x79\x6f"
+ "\x74\x65\x73\x2c\x20\x6a\x61\x63"
+ "\x6b\x61\x6c\x73\x2c\x20\x61\x6e"
+ "\x64\x20\x66\x6f\x78\x65\x73\x20"
+ "\x69\x6e\x20\x74\x68\x65\x20\x74"
+ "\x61\x78\x6f\x6e\x6f\x6d\x69\x63"
+ "\x20\x66\x61\x6d\x69\x6c\x79\x20"
+ "\x43\x61\x6e\x69\x64\x61\x65\x2e",
+ .ctext = "\x45\x59\xab\xba\x4e\x48\xc1\x61"
+ "\x02\xe8\xbb\x2c\x05\xe6\x94\x7f"
+ "\x50\xa7\x86\xde\x16\x2f\x9b\x0b"
+ "\x7e\x59\x2a\x9b\x53\xd0\xd4\xe9"
+ "\x8d\x8d\x64\x10\xd5\x40\xa1\xa6"
+ "\x37\x5b\x26\xd8\x0d\xac\xe4\xfa"
+ "\xb5\x23\x84\xc7\x31\xac\xbf\x16"
+ "\xa5\x92\x3c\x0c\x48\xd3\x57\x5d"
+ "\x4d\x0d\x2c\x67\x3b\x66\x6f\xaa"
+ "\x73\x10\x61\x27\x77\x01\x09\x3a"
+ "\x6b\xf7\xa1\x58\xa8\x86\x42\x92"
+ "\xa4\x1c\x48\xe3\xa9\xb4\xc0\xda"
+ "\xec\xe0\xf8\xd9\x8d\x0d\x7e\x05"
+ "\xb3\x7a\x30\x7b\xbb\x66\x33\x31"
+ "\x64\xec\x9e\x1b\x24\xea\x0d\x6c"
+ "\x3f\xfd\xdc\xec\x4f\x68\xe7\x44"
+ "\x30\x56\x19\x3a\x03\xc8\x10\xe1"
+ "\x13\x44\xca\x06\xd8\xed\x8a\x2b"
+ "\xfb\x1e\x8d\x48\xcf\xa6\xbc\x0e"
+ "\xb4\xe2\x46\x4b\x74\x81\x42\x40"
+ "\x7c\x9f\x43\x1a\xee\x76\x99\x60"
+ "\xe1\x5b\xa8\xb9\x68\x90\x46\x6e"
+ "\xf2\x45\x75\x99\x85\x23\x85\xc6"
+ "\x61\xf7\x52\xce\x20\xf9\xda\x0c"
+ "\x09\xab\x6b\x19\xdf\x74\xe7\x6a"
+ "\x95\x96\x74\x46\xf8\xd0\xfd\x41"
+ "\x5e\x7b\xee\x2a\x12\xa1\x14\xc2"
+ "\x0e\xb5\x29\x2a\xe7\xa3\x49\xae"
+ "\x57\x78\x20\xd5\x52\x0a\x1f\x3f"
+ "\xb6\x2a\x17\xce\x6a\x7e\x68\xfa"
+ "\x7c\x79\x11\x1d\x88\x60\x92\x0b"
+ "\xc0\x48\xef\x43\xfe\x84\x48\x6c"
+ "\xcb\x87\xc2\x5f\x0a\xe0\x45\xf0"
+ "\xcc\xe1\xe7\x98\x9a\x9a\xa2\x20"
+ "\xa2\x8b\xdd\x48\x27\xe7\x51\xa2"
+ "\x4a\x6d\x5c\x62\xd7\x90\xa6\x63"
+ "\x93\xb9\x31\x11\xc1\xa5\x5d\xd7"
+ "\x42\x1a\x10\x18\x49\x74\xc7\xc5",
+ .len = 304,
+ }
+};
+
+/*
+ * Same as XChaCha20 test vectors above, but recomputed the ciphertext with
+ * XChaCha12, using a modified libsodium.
+ */
+static const struct cipher_testvec xchacha12_tv_template[] = {
+ {
+ .key = "\x79\xc9\x97\x98\xac\x67\x30\x0b"
+ "\xbb\x27\x04\xc9\x5c\x34\x1e\x32"
+ "\x45\xf3\xdc\xb2\x17\x61\xb9\x8e"
+ "\x52\xff\x45\xb2\x4f\x30\x4f\xc4",
+ .klen = 32,
+ .iv = "\xb3\x3f\xfd\x30\x96\x47\x9b\xcf"
+ "\xbc\x9a\xee\x49\x41\x76\x88\xa0"
+ "\xa2\x55\x4f\x8d\x95\x38\x94\x19"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00",
+ .ctext = "\x1b\x78\x7f\xd7\xa1\x41\x68\xab"
+ "\x3d\x3f\xd1\x7b\x69\x56\xb2\xd5"
+ "\x43\xce\xeb\xaf\x36\xf0\x29\x9d"
+ "\x3a\xfb\x18\xae\x1b",
+ .len = 29,
+ }, {
+ .key = "\x9d\x23\xbd\x41\x49\xcb\x97\x9c"
+ "\xcf\x3c\x5c\x94\xdd\x21\x7e\x98"
+ "\x08\xcb\x0e\x50\xcd\x0f\x67\x81"
+ "\x22\x35\xea\xaf\x60\x1d\x62\x32",
+ .klen = 32,
+ .iv = "\xc0\x47\x54\x82\x66\xb7\xc3\x70"
+ "\xd3\x35\x66\xa2\x42\x5c\xbf\x30"
+ "\xd8\x2d\x1e\xaf\x52\x94\x10\x9e"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00",
+ .ctext = "\xfb\x32\x09\x1d\x83\x05\xae\x4c"
+ "\x13\x1f\x12\x71\xf2\xca\xb2\xeb"
+ "\x5b\x83\x14\x7d\x83\xf6\x57\x77"
+ "\x2e\x40\x1f\x92\x2c\xf9\xec\x35"
+ "\x34\x1f\x93\xdf\xfb\x30\xd7\x35"
+ "\x03\x05\x78\xc1\x20\x3b\x7a\xe3"
+ "\x62\xa3\x89\xdc\x11\x11\x45\xa8"
+ "\x82\x89\xa0\xf1\x4e\xc7\x0f\x11"
+ "\x69\xdd\x0c\x84\x2b\x89\x5c\xdc"
+ "\xf0\xde\x01\xef\xc5\x65\x79\x23"
+ "\x87\x67\xd6\x50\xd9\x8d\xd9\x92"
+ "\x54\x5b\x0e",
+ .len = 91,
+ }, {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x67\xc6\x69\x73"
+ "\x51\xff\x4a\xec\x29\xcd\xba\xab"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ctext = "\xdf\x2d\xc6\x21\x2a\x9d\xa1\xbb"
+ "\xc2\x77\x66\x0c\x5c\x46\xef\xa7"
+ "\x79\x1b\xb9\xdf\x55\xe2\xf9\x61"
+ "\x4c\x7b\xa4\x52\x24\xaf\xa2\xda"
+ "\xd1\x8f\x8f\xa2\x9e\x53\x4d\xc4"
+ "\xb8\x55\x98\x08\x7c\x08\xd4\x18"
+ "\x67\x8f\xef\x50\xb1\x5f\xa5\x77"
+ "\x4c\x25\xe7\x86\x26\x42\xca\x44",
+ .len = 64,
+ }, {
+ .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x02\xf2\xfb\xe3\x46"
+ "\x7c\xc2\x54\xf8\x1b\xe8\xe7\x8d"
+ "\x01\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x41\x6e\x79\x20\x73\x75\x62\x6d"
+ "\x69\x73\x73\x69\x6f\x6e\x20\x74"
+ "\x6f\x20\x74\x68\x65\x20\x49\x45"
+ "\x54\x46\x20\x69\x6e\x74\x65\x6e"
+ "\x64\x65\x64\x20\x62\x79\x20\x74"
+ "\x68\x65\x20\x43\x6f\x6e\x74\x72"
+ "\x69\x62\x75\x74\x6f\x72\x20\x66"
+ "\x6f\x72\x20\x70\x75\x62\x6c\x69"
+ "\x63\x61\x74\x69\x6f\x6e\x20\x61"
+ "\x73\x20\x61\x6c\x6c\x20\x6f\x72"
+ "\x20\x70\x61\x72\x74\x20\x6f\x66"
+ "\x20\x61\x6e\x20\x49\x45\x54\x46"
+ "\x20\x49\x6e\x74\x65\x72\x6e\x65"
+ "\x74\x2d\x44\x72\x61\x66\x74\x20"
+ "\x6f\x72\x20\x52\x46\x43\x20\x61"
+ "\x6e\x64\x20\x61\x6e\x79\x20\x73"
+ "\x74\x61\x74\x65\x6d\x65\x6e\x74"
+ "\x20\x6d\x61\x64\x65\x20\x77\x69"
+ "\x74\x68\x69\x6e\x20\x74\x68\x65"
+ "\x20\x63\x6f\x6e\x74\x65\x78\x74"
+ "\x20\x6f\x66\x20\x61\x6e\x20\x49"
+ "\x45\x54\x46\x20\x61\x63\x74\x69"
+ "\x76\x69\x74\x79\x20\x69\x73\x20"
+ "\x63\x6f\x6e\x73\x69\x64\x65\x72"
+ "\x65\x64\x20\x61\x6e\x20\x22\x49"
+ "\x45\x54\x46\x20\x43\x6f\x6e\x74"
+ "\x72\x69\x62\x75\x74\x69\x6f\x6e"
+ "\x22\x2e\x20\x53\x75\x63\x68\x20"
+ "\x73\x74\x61\x74\x65\x6d\x65\x6e"
+ "\x74\x73\x20\x69\x6e\x63\x6c\x75"
+ "\x64\x65\x20\x6f\x72\x61\x6c\x20"
+ "\x73\x74\x61\x74\x65\x6d\x65\x6e"
+ "\x74\x73\x20\x69\x6e\x20\x49\x45"
+ "\x54\x46\x20\x73\x65\x73\x73\x69"
+ "\x6f\x6e\x73\x2c\x20\x61\x73\x20"
+ "\x77\x65\x6c\x6c\x20\x61\x73\x20"
+ "\x77\x72\x69\x74\x74\x65\x6e\x20"
+ "\x61\x6e\x64\x20\x65\x6c\x65\x63"
+ "\x74\x72\x6f\x6e\x69\x63\x20\x63"
+ "\x6f\x6d\x6d\x75\x6e\x69\x63\x61"
+ "\x74\x69\x6f\x6e\x73\x20\x6d\x61"
+ "\x64\x65\x20\x61\x74\x20\x61\x6e"
+ "\x79\x20\x74\x69\x6d\x65\x20\x6f"
+ "\x72\x20\x70\x6c\x61\x63\x65\x2c"
+ "\x20\x77\x68\x69\x63\x68\x20\x61"
+ "\x72\x65\x20\x61\x64\x64\x72\x65"
+ "\x73\x73\x65\x64\x20\x74\x6f",
+ .ctext = "\xe4\xa6\xc8\x30\xc4\x23\x13\xd6"
+ "\x08\x4d\xc9\xb7\xa5\x64\x7c\xb9"
+ "\x71\xe2\xab\x3e\xa8\x30\x8a\x1c"
+ "\x4a\x94\x6d\x9b\xe0\xb3\x6f\xf1"
+ "\xdc\xe3\x1b\xb3\xa9\x6d\x0d\xd6"
+ "\xd0\xca\x12\xef\xe7\x5f\xd8\x61"
+ "\x3c\x82\xd3\x99\x86\x3c\x6f\x66"
+ "\x02\x06\xdc\x55\xf9\xed\xdf\x38"
+ "\xb4\xa6\x17\x00\x7f\xef\xbf\x4f"
+ "\xf8\x36\xf1\x60\x7e\x47\xaf\xdb"
+ "\x55\x9b\x12\xcb\x56\x44\xa7\x1f"
+ "\xd3\x1a\x07\x3b\x00\xec\xe6\x4c"
+ "\xa2\x43\x27\xdf\x86\x19\x4f\x16"
+ "\xed\xf9\x4a\xf3\x63\x6f\xfa\x7f"
+ "\x78\x11\xf6\x7d\x97\x6f\xec\x6f"
+ "\x85\x0f\x5c\x36\x13\x8d\x87\xe0"
+ "\x80\xb1\x69\x0b\x98\x89\x9c\x4e"
+ "\xf8\xdd\xee\x5c\x0a\x85\xce\xd4"
+ "\xea\x1b\x48\xbe\x08\xf8\xe2\xa8"
+ "\xa5\xb0\x3c\x79\xb1\x15\xb4\xb9"
+ "\x75\x10\x95\x35\x81\x7e\x26\xe6"
+ "\x78\xa4\x88\xcf\xdb\x91\x34\x18"
+ "\xad\xd7\x8e\x07\x7d\xab\x39\xf9"
+ "\xa3\x9e\xa5\x1d\xbb\xed\x61\xfd"
+ "\xdc\xb7\x5a\x27\xfc\xb5\xc9\x10"
+ "\xa8\xcc\x52\x7f\x14\x76\x90\xe7"
+ "\x1b\x29\x60\x74\xc0\x98\x77\xbb"
+ "\xe0\x54\xbb\x27\x49\x59\x1e\x62"
+ "\x3d\xaf\x74\x06\xa4\x42\x6f\xc6"
+ "\x52\x97\xc4\x1d\xc4\x9f\xe2\xe5"
+ "\x38\x57\x91\xd1\xa2\x28\xcc\x40"
+ "\xcc\x70\x59\x37\xfc\x9f\x4b\xda"
+ "\xa0\xeb\x97\x9a\x7d\xed\x14\x5c"
+ "\x9c\xb7\x93\x26\x41\xa8\x66\xdd"
+ "\x87\x6a\xc0\xd3\xc2\xa9\x3e\xae"
+ "\xe9\x72\xfe\xd1\xb3\xac\x38\xea"
+ "\x4d\x15\xa9\xd5\x36\x61\xe9\x96"
+ "\x6c\x23\xf8\x43\xe4\x92\x29\xd9"
+ "\x8b\x78\xf7\x0a\x52\xe0\x19\x5b"
+ "\x59\x69\x5b\x5d\xa1\x53\xc4\x68"
+ "\xe1\xbb\xac\x89\x14\xe2\xe2\x85"
+ "\x41\x18\xf5\xb3\xd1\xfa\x68\x19"
+ "\x44\x78\xdc\xcf\xe7\x88\x2d\x52"
+ "\x5f\x40\xb5\x7e\xf8\x88\xa2\xae"
+ "\x4a\xb2\x07\x35\x9d\x9b\x07\x88"
+ "\xb7\x00\xd0\x0c\xb6\xa0\x47\x59"
+ "\xda\x4e\xc9\xab\x9b\x8a\x7b",
+
+ .len = 375,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 375 - 20, 4, 16 },
+
+ }, {
+ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+ "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+ "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+ "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x02\x76\x5a\x2e\x63"
+ "\x33\x9f\xc9\x9a\x66\x32\x0d\xb7"
+ "\x2a\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x27\x54\x77\x61\x73\x20\x62\x72"
+ "\x69\x6c\x6c\x69\x67\x2c\x20\x61"
+ "\x6e\x64\x20\x74\x68\x65\x20\x73"
+ "\x6c\x69\x74\x68\x79\x20\x74\x6f"
+ "\x76\x65\x73\x0a\x44\x69\x64\x20"
+ "\x67\x79\x72\x65\x20\x61\x6e\x64"
+ "\x20\x67\x69\x6d\x62\x6c\x65\x20"
+ "\x69\x6e\x20\x74\x68\x65\x20\x77"
+ "\x61\x62\x65\x3a\x0a\x41\x6c\x6c"
+ "\x20\x6d\x69\x6d\x73\x79\x20\x77"
+ "\x65\x72\x65\x20\x74\x68\x65\x20"
+ "\x62\x6f\x72\x6f\x67\x6f\x76\x65"
+ "\x73\x2c\x0a\x41\x6e\x64\x20\x74"
+ "\x68\x65\x20\x6d\x6f\x6d\x65\x20"
+ "\x72\x61\x74\x68\x73\x20\x6f\x75"
+ "\x74\x67\x72\x61\x62\x65\x2e",
+ .ctext = "\xb9\x68\xbc\x6a\x24\xbc\xcc\xd8"
+ "\x9b\x2a\x8d\x5b\x96\xaf\x56\xe3"
+ "\x11\x61\xe7\xa7\x9b\xce\x4e\x7d"
+ "\x60\x02\x48\xac\xeb\xd5\x3a\x26"
+ "\x9d\x77\x3b\xb5\x32\x13\x86\x8e"
+ "\x20\x82\x26\x72\xae\x64\x1b\x7e"
+ "\x2e\x01\x68\xb4\x87\x45\xa1\x24"
+ "\xe4\x48\x40\xf0\xaa\xac\xee\xa9"
+ "\xfc\x31\xad\x9d\x89\xa3\xbb\xd2"
+ "\xe4\x25\x13\xad\x0f\x5e\xdf\x3c"
+ "\x27\xab\xb8\x62\x46\x22\x30\x48"
+ "\x55\x2c\x4e\x84\x78\x1d\x0d\x34"
+ "\x8d\x3c\x91\x0a\x7f\x5b\x19\x9f"
+ "\x97\x05\x4c\xa7\x62\x47\x8b\xc5"
+ "\x44\x2e\x20\x33\xdd\xa0\x82\xa9"
+ "\x25\x76\x37\xe6\x3c\x67\x5b",
+ .len = 127,
+ }, {
+ .key = "\x1c\x92\x40\xa5\xeb\x55\xd3\x8a"
+ "\xf3\x33\x88\x86\x04\xf6\xb5\xf0"
+ "\x47\x39\x17\xc1\x40\x2b\x80\x09"
+ "\x9d\xca\x5c\xbc\x20\x70\x75\xc0",
+ .klen = 32,
+ .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x01\x31\x58\xa3\x5a"
+ "\x25\x5d\x05\x17\x58\xe9\x5e\xd4"
+ "\x1c\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x49\xee\xe0\xdc\x24\x90\x40\xcd"
+ "\xc5\x40\x8f\x47\x05\xbc\xdd\x81"
+ "\x47\xc6\x8d\xe6\xb1\x8f\xd7\xcb"
+ "\x09\x0e\x6e\x22\x48\x1f\xbf\xb8"
+ "\x5c\xf7\x1e\x8a\xc1\x23\xf2\xd4"
+ "\x19\x4b\x01\x0f\x4e\xa4\x43\xce"
+ "\x01\xc6\x67\xda\x03\x91\x18\x90"
+ "\xa5\xa4\x8e\x45\x03\xb3\x2d\xac"
+ "\x74\x92\xd3\x53\x47\xc8\xdd\x25"
+ "\x53\x6c\x02\x03\x87\x0d\x11\x0c"
+ "\x58\xe3\x12\x18\xfd\x2a\x5b\x40"
+ "\x0c\x30\xf0\xb8\x3f\x43\xce\xae"
+ "\x65\x3a\x7d\x7c\xf4\x54\xaa\xcc"
+ "\x33\x97\xc3\x77\xba\xc5\x70\xde"
+ "\xd7\xd5\x13\xa5\x65\xc4\x5f\x0f"
+ "\x46\x1a\x0d\x97\xb5\xf3\xbb\x3c"
+ "\x84\x0f\x2b\xc5\xaa\xea\xf2\x6c"
+ "\xc9\xb5\x0c\xee\x15\xf3\x7d\xbe"
+ "\x9f\x7b\x5a\xa6\xae\x4f\x83\xb6"
+ "\x79\x49\x41\xf4\x58\x18\xcb\x86"
+ "\x7f\x30\x0e\xf8\x7d\x44\x36\xea"
+ "\x75\xeb\x88\x84\x40\x3c\xad\x4f"
+ "\x6f\x31\x6b\xaa\x5d\xe5\xa5\xc5"
+ "\x21\x66\xe9\xa7\xe3\xb2\x15\x88"
+ "\x78\xf6\x79\xa1\x59\x47\x12\x4e"
+ "\x9f\x9f\x64\x1a\xa0\x22\x5b\x08"
+ "\xbe\x7c\x36\xc2\x2b\x66\x33\x1b"
+ "\xdd\x60\x71\xf7\x47\x8c\x61\xc3"
+ "\xda\x8a\x78\x1e\x16\xfa\x1e\x86"
+ "\x81\xa6\x17\x2a\xa7\xb5\xc2\xe7"
+ "\xa4\xc7\x42\xf1\xcf\x6a\xca\xb4"
+ "\x45\xcf\xf3\x93\xf0\xe7\xea\xf6"
+ "\xf4\xe6\x33\x43\x84\x93\xa5\x67"
+ "\x9b\x16\x58\x58\x80\x0f\x2b\x5c"
+ "\x24\x74\x75\x7f\x95\x81\xb7\x30"
+ "\x7a\x33\xa7\xf7\x94\x87\x32\x27"
+ "\x10\x5d\x14\x4c\x43\x29\xdd\x26"
+ "\xbd\x3e\x3c\x0e\xfe\x0e\xa5\x10"
+ "\xea\x6b\x64\xfd\x73\xc6\xed\xec"
+ "\xa8\xc9\xbf\xb3\xba\x0b\x4d\x07"
+ "\x70\xfc\x16\xfd\x79\x1e\xd7\xc5"
+ "\x49\x4e\x1c\x8b\x8d\x79\x1b\xb1"
+ "\xec\xca\x60\x09\x4c\x6a\xd5\x09"
+ "\x49\x46\x00\x88\x22\x8d\xce\xea"
+ "\xb1\x17\x11\xde\x42\xd2\x23\xc1"
+ "\x72\x11\xf5\x50\x73\x04\x40\x47"
+ "\xf9\x5d\xe7\xa7\x26\xb1\x7e\xb0"
+ "\x3f\x58\xc1\x52\xab\x12\x67\x9d"
+ "\x3f\x43\x4b\x68\xd4\x9c\x68\x38"
+ "\x07\x8a\x2d\x3e\xf3\xaf\x6a\x4b"
+ "\xf9\xe5\x31\x69\x22\xf9\xa6\x69"
+ "\xc6\x9c\x96\x9a\x12\x35\x95\x1d"
+ "\x95\xd5\xdd\xbe\xbf\x93\x53\x24"
+ "\xfd\xeb\xc2\x0a\x64\xb0\x77\x00"
+ "\x6f\x88\xc4\x37\x18\x69\x7c\xd7"
+ "\x41\x92\x55\x4c\x03\xa1\x9a\x4b"
+ "\x15\xe5\xdf\x7f\x37\x33\x72\xc1"
+ "\x8b\x10\x67\xa3\x01\x57\x94\x25"
+ "\x7b\x38\x71\x7e\xdd\x1e\xcc\x73"
+ "\x55\xd2\x8e\xeb\x07\xdd\xf1\xda"
+ "\x58\xb1\x47\x90\xfe\x42\x21\x72"
+ "\xa3\x54\x7a\xa0\x40\xec\x9f\xdd"
+ "\xc6\x84\x6e\xca\xae\xe3\x68\xb4"
+ "\x9d\xe4\x78\xff\x57\xf2\xf8\x1b"
+ "\x03\xa1\x31\xd9\xde\x8d\xf5\x22"
+ "\x9c\xdd\x20\xa4\x1e\x27\xb1\x76"
+ "\x4f\x44\x55\xe2\x9b\xa1\x9c\xfe"
+ "\x54\xf7\x27\x1b\xf4\xde\x02\xf5"
+ "\x1b\x55\x48\x5c\xdc\x21\x4b\x9e"
+ "\x4b\x6e\xed\x46\x23\xdc\x65\xb2"
+ "\xcf\x79\x5f\x28\xe0\x9e\x8b\xe7"
+ "\x4c\x9d\x8a\xff\xc1\xa6\x28\xb8"
+ "\x65\x69\x8a\x45\x29\xef\x74\x85"
+ "\xde\x79\xc7\x08\xae\x30\xb0\xf4"
+ "\xa3\x1d\x51\x41\xab\xce\xcb\xf6"
+ "\xb5\xd8\x6d\xe0\x85\xe1\x98\xb3"
+ "\x43\xbb\x86\x83\x0a\xa0\xf5\xb7"
+ "\x04\x0b\xfa\x71\x1f\xb0\xf6\xd9"
+ "\x13\x00\x15\xf0\xc7\xeb\x0d\x5a"
+ "\x9f\xd7\xb9\x6c\x65\x14\x22\x45"
+ "\x6e\x45\x32\x3e\x7e\x60\x1a\x12"
+ "\x97\x82\x14\xfb\xaa\x04\x22\xfa"
+ "\xa0\xe5\x7e\x8c\x78\x02\x48\x5d"
+ "\x78\x33\x5a\x7c\xad\xdb\x29\xce"
+ "\xbb\x8b\x61\xa4\xb7\x42\xe2\xac"
+ "\x8b\x1a\xd9\x2f\x0b\x8b\x62\x21"
+ "\x83\x35\x7e\xad\x73\xc2\xb5\x6c"
+ "\x10\x26\x38\x07\xe5\xc7\x36\x80"
+ "\xe2\x23\x12\x61\xf5\x48\x4b\x2b"
+ "\xc5\xdf\x15\xd9\x87\x01\xaa\xac"
+ "\x1e\x7c\xad\x73\x78\x18\x63\xe0"
+ "\x8b\x9f\x81\xd8\x12\x6a\x28\x10"
+ "\xbe\x04\x68\x8a\x09\x7c\x1b\x1c"
+ "\x83\x66\x80\x47\x80\xe8\xfd\x35"
+ "\x1c\x97\x6f\xae\x49\x10\x66\xcc"
+ "\xc6\xd8\xcc\x3a\x84\x91\x20\x77"
+ "\x72\xe4\x24\xd2\x37\x9f\xc5\xc9"
+ "\x25\x94\x10\x5f\x40\x00\x64\x99"
+ "\xdc\xae\xd7\x21\x09\x78\x50\x15"
+ "\xac\x5f\xc6\x2c\xa2\x0b\xa9\x39"
+ "\x87\x6e\x6d\xab\xde\x08\x51\x16"
+ "\xc7\x13\xe9\xea\xed\x06\x8e\x2c"
+ "\xf8\x37\x8c\xf0\xa6\x96\x8d\x43"
+ "\xb6\x98\x37\xb2\x43\xed\xde\xdf"
+ "\x89\x1a\xe7\xeb\x9d\xa1\x7b\x0b"
+ "\x77\xb0\xe2\x75\xc0\xf1\x98\xd9"
+ "\x80\x55\xc9\x34\x91\xd1\x59\xe8"
+ "\x4b\x0f\xc1\xa9\x4b\x7a\x84\x06"
+ "\x20\xa8\x5d\xfa\xd1\xde\x70\x56"
+ "\x2f\x9e\x91\x9c\x20\xb3\x24\xd8"
+ "\x84\x3d\xe1\x8c\x7e\x62\x52\xe5"
+ "\x44\x4b\x9f\xc2\x93\x03\xea\x2b"
+ "\x59\xc5\xfa\x3f\x91\x2b\xbb\x23"
+ "\xf5\xb2\x7b\xf5\x38\xaf\xb3\xee"
+ "\x63\xdc\x7b\xd1\xff\xaa\x8b\xab"
+ "\x82\x6b\x37\x04\xeb\x74\xbe\x79"
+ "\xb9\x83\x90\xef\x20\x59\x46\xff"
+ "\xe9\x97\x3e\x2f\xee\xb6\x64\x18"
+ "\x38\x4c\x7a\x4a\xf9\x61\xe8\x9a"
+ "\xa1\xb5\x01\xa6\x47\xd3\x11\xd4"
+ "\xce\xd3\x91\x49\x88\xc7\xb8\x4d"
+ "\xb1\xb9\x07\x6d\x16\x72\xae\x46"
+ "\x5e\x03\xa1\x4b\xb6\x02\x30\xa8"
+ "\x3d\xa9\x07\x2a\x7c\x19\xe7\x62"
+ "\x87\xe3\x82\x2f\x6f\xe1\x09\xd9"
+ "\x94\x97\xea\xdd\x58\x9e\xae\x76"
+ "\x7e\x35\xe5\xb4\xda\x7e\xf4\xde"
+ "\xf7\x32\x87\xcd\x93\xbf\x11\x56"
+ "\x11\xbe\x08\x74\xe1\x69\xad\xe2"
+ "\xd7\xf8\x86\x75\x8a\x3c\xa4\xbe"
+ "\x70\xa7\x1b\xfc\x0b\x44\x2a\x76"
+ "\x35\xea\x5d\x85\x81\xaf\x85\xeb"
+ "\xa0\x1c\x61\xc2\xf7\x4f\xa5\xdc"
+ "\x02\x7f\xf6\x95\x40\x6e\x8a\x9a"
+ "\xf3\x5d\x25\x6e\x14\x3a\x22\xc9"
+ "\x37\x1c\xeb\x46\x54\x3f\xa5\x91"
+ "\xc2\xb5\x8c\xfe\x53\x08\x97\x32"
+ "\x1b\xb2\x30\x27\xfe\x25\x5d\xdc"
+ "\x08\x87\xd0\xe5\x94\x1a\xd4\xf1"
+ "\xfe\xd6\xb4\xa3\xe6\x74\x81\x3c"
+ "\x1b\xb7\x31\xa7\x22\xfd\xd4\xdd"
+ "\x20\x4e\x7c\x51\xb0\x60\x73\xb8"
+ "\x9c\xac\x91\x90\x7e\x01\xb0\xe1"
+ "\x8a\x2f\x75\x1c\x53\x2a\x98\x2a"
+ "\x06\x52\x95\x52\xb2\xe9\x25\x2e"
+ "\x4c\xe2\x5a\x00\xb2\x13\x81\x03"
+ "\x77\x66\x0d\xa5\x99\xda\x4e\x8c"
+ "\xac\xf3\x13\x53\x27\x45\xaf\x64"
+ "\x46\xdc\xea\x23\xda\x97\xd1\xab"
+ "\x7d\x6c\x30\x96\x1f\xbc\x06\x34"
+ "\x18\x0b\x5e\x21\x35\x11\x8d\x4c"
+ "\xe0\x2d\xe9\x50\x16\x74\x81\xa8"
+ "\xb4\x34\xb9\x72\x42\xa6\xcc\xbc"
+ "\xca\x34\x83\x27\x10\x5b\x68\x45"
+ "\x8f\x52\x22\x0c\x55\x3d\x29\x7c"
+ "\xe3\xc0\x66\x05\x42\x91\x5f\x58"
+ "\xfe\x4a\x62\xd9\x8c\xa9\x04\x19"
+ "\x04\xa9\x08\x4b\x57\xfc\x67\x53"
+ "\x08\x7c\xbc\x66\x8a\xb0\xb6\x9f"
+ "\x92\xd6\x41\x7c\x5b\x2a\x00\x79"
+ "\x72",
+ .ctext = "\xe1\xb6\x8b\x5c\x80\xb8\xcc\x08"
+ "\x1b\x84\xb2\xd1\xad\xa4\x70\xac"
+ "\x67\xa9\x39\x27\xac\xb4\x5b\xb7"
+ "\x4c\x26\x77\x23\x1d\xce\x0a\xbe"
+ "\x18\x9e\x42\x8b\xbd\x7f\xd6\xf1"
+ "\xf1\x6b\xe2\x6d\x7f\x92\x0e\xcb"
+ "\xb8\x79\xba\xb4\xac\x7e\x2d\xc0"
+ "\x9e\x83\x81\x91\xd5\xea\xc3\x12"
+ "\x8d\xa4\x26\x70\xa4\xf9\x71\x0b"
+ "\xbd\x2e\xe1\xb3\x80\x42\x25\xb3"
+ "\x0b\x31\x99\xe1\x0d\xde\xa6\x90"
+ "\xf2\xa3\x10\xf7\xe5\xf3\x83\x1e"
+ "\x2c\xfb\x4d\xf0\x45\x3d\x28\x3c"
+ "\xb8\xf1\xcb\xbf\x67\xd8\x43\x5a"
+ "\x9d\x7b\x73\x29\x88\x0f\x13\x06"
+ "\x37\x50\x0d\x7c\xe6\x9b\x07\xdd"
+ "\x7e\x01\x1f\x81\x90\x10\x69\xdb"
+ "\xa4\xad\x8a\x5e\xac\x30\x72\xf2"
+ "\x36\xcd\xe3\x23\x49\x02\x93\xfa"
+ "\x3d\xbb\xe2\x98\x83\xeb\xe9\x8d"
+ "\xb3\x8f\x11\xaa\x53\xdb\xaf\x2e"
+ "\x95\x13\x99\x3d\x71\xbd\x32\x92"
+ "\xdd\xfc\x9d\x5e\x6f\x63\x2c\xee"
+ "\x91\x1f\x4c\x64\x3d\x87\x55\x0f"
+ "\xcc\x3d\x89\x61\x53\x02\x57\x8f"
+ "\xe4\x77\x29\x32\xaf\xa6\x2f\x0a"
+ "\xae\x3c\x3f\x3f\xf4\xfb\x65\x52"
+ "\xc5\xc1\x78\x78\x53\x28\xad\xed"
+ "\xd1\x67\x37\xc7\x59\x70\xcd\x0a"
+ "\xb8\x0f\x80\x51\x9f\xc0\x12\x5e"
+ "\x06\x0a\x7e\xec\x24\x5f\x73\x00"
+ "\xb1\x0b\x31\x47\x4f\x73\x8d\xb4"
+ "\xce\xf3\x55\x45\x6c\x84\x27\xba"
+ "\xb9\x6f\x03\x4a\xeb\x98\x88\x6e"
+ "\x53\xed\x25\x19\x0d\x8f\xfe\xca"
+ "\x60\xe5\x00\x93\x6e\x3c\xff\x19"
+ "\xae\x08\x3b\x8a\xa6\x84\x05\xfe"
+ "\x9b\x59\xa0\x8c\xc8\x05\x45\xf5"
+ "\x05\x37\xdc\x45\x6f\x8b\x95\x8c"
+ "\x4e\x11\x45\x7a\xce\x21\xa5\xf7"
+ "\x71\x67\xb9\xce\xd7\xf9\xe9\x5e"
+ "\x60\xf5\x53\x7a\xa8\x85\x14\x03"
+ "\xa0\x92\xec\xf3\x51\x80\x84\xc4"
+ "\xdc\x11\x9e\x57\xce\x4b\x45\xcf"
+ "\x90\x95\x85\x0b\x96\xe9\xee\x35"
+ "\x10\xb8\x9b\xf2\x59\x4a\xc6\x7e"
+ "\x85\xe5\x6f\x38\x51\x93\x40\x0c"
+ "\x99\xd7\x7f\x32\xa8\x06\x27\xd1"
+ "\x2b\xd5\xb5\x3a\x1a\xe1\x5e\xda"
+ "\xcd\x5a\x50\x30\x3c\xc7\xe7\x65"
+ "\xa6\x07\x0b\x98\x91\xc6\x20\x27"
+ "\x2a\x03\x63\x1b\x1e\x3d\xaf\xc8"
+ "\x71\x48\x46\x6a\x64\x28\xf9\x3d"
+ "\xd1\x1d\xab\xc8\x40\x76\xc2\x39"
+ "\x4e\x00\x75\xd2\x0e\x82\x58\x8c"
+ "\xd3\x73\x5a\xea\x46\x89\xbe\xfd"
+ "\x4e\x2c\x0d\x94\xaa\x9b\x68\xac"
+ "\x86\x87\x30\x7e\xa9\x16\xcd\x59"
+ "\xd2\xa6\xbe\x0a\xd8\xf5\xfd\x2d"
+ "\x49\x69\xd2\x1a\x90\xd2\x1b\xed"
+ "\xff\x71\x04\x87\x87\x21\xc4\xb8"
+ "\x1f\x5b\x51\x33\xd0\xd6\x59\x9a"
+ "\x03\x0e\xd3\x8b\xfb\x57\x73\xfd"
+ "\x5a\x52\x63\x82\xc8\x85\x2f\xcb"
+ "\x74\x6d\x4e\xd9\x68\x37\x85\x6a"
+ "\xd4\xfb\x94\xed\x8d\xd1\x1a\xaf"
+ "\x76\xa7\xb7\x88\xd0\x2b\x4e\xda"
+ "\xec\x99\x94\x27\x6f\x87\x8c\xdf"
+ "\x4b\x5e\xa6\x66\xdd\xcb\x33\x7b"
+ "\x64\x94\x31\xa8\x37\xa6\x1d\xdb"
+ "\x0d\x5c\x93\xa4\x40\xf9\x30\x53"
+ "\x4b\x74\x8d\xdd\xf6\xde\x3c\xac"
+ "\x5c\x80\x01\x3a\xef\xb1\x9a\x02"
+ "\x0c\x22\x8e\xe7\x44\x09\x74\x4c"
+ "\xf2\x9a\x27\x69\x7f\x12\x32\x36"
+ "\xde\x92\xdf\xde\x8f\x5b\x31\xab"
+ "\x4a\x01\x26\xe0\xb1\xda\xe8\x37"
+ "\x21\x64\xe8\xff\x69\xfc\x9e\x41"
+ "\xd2\x96\x2d\x18\x64\x98\x33\x78"
+ "\x24\x61\x73\x9b\x47\x29\xf1\xa7"
+ "\xcb\x27\x0f\xf0\x85\x6d\x8c\x9d"
+ "\x2c\x95\x9e\xe5\xb2\x8e\x30\x29"
+ "\x78\x8a\x9d\x65\xb4\x8e\xde\x7b"
+ "\xd9\x00\x50\xf5\x7f\x81\xc3\x1b"
+ "\x25\x85\xeb\xc2\x8c\x33\x22\x1e"
+ "\x68\x38\x22\x30\xd8\x2e\x00\x98"
+ "\x85\x16\x06\x56\xb4\x81\x74\x20"
+ "\x95\xdb\x1c\x05\x19\xe8\x23\x4d"
+ "\x65\x5d\xcc\xd8\x7f\xc4\x2d\x0f"
+ "\x57\x26\x71\x07\xad\xaa\x71\x9f"
+ "\x19\x76\x2f\x25\x51\x88\xe4\xc0"
+ "\x82\x6e\x08\x05\x37\x04\xee\x25"
+ "\x23\x90\xe9\x4e\xce\x9b\x16\xc1"
+ "\x31\xe7\x6e\x2c\x1b\xe1\x85\x9a"
+ "\x0c\x8c\xbb\x12\x1e\x68\x7b\x93"
+ "\xa9\x3c\x39\x56\x23\x3e\x6e\xc7"
+ "\x77\x84\xd3\xe0\x86\x59\xaa\xb9"
+ "\xd5\x53\x58\xc9\x0a\x83\x5f\x85"
+ "\xd8\x47\x14\x67\x8a\x3c\x17\xe0"
+ "\xab\x02\x51\xea\xf1\xf0\x4f\x30"
+ "\x7d\xe0\x92\xc2\x5f\xfb\x19\x5a"
+ "\x3f\xbd\xf4\x39\xa4\x31\x0c\x39"
+ "\xd1\xae\x4e\xf7\x65\x7f\x1f\xce"
+ "\xc2\x39\xd1\x84\xd4\xe5\x02\xe0"
+ "\x58\xaa\xf1\x5e\x81\xaf\x7f\x72"
+ "\x0f\x08\x99\x43\xb9\xd8\xac\x41"
+ "\x35\x55\xf2\xb2\xd4\x98\xb8\x3b"
+ "\x2b\x3c\x3e\x16\x06\x31\xfc\x79"
+ "\x47\x38\x63\x51\xc5\xd0\x26\xd7"
+ "\x43\xb4\x2b\xd9\xc5\x05\xf2\x9d"
+ "\x18\xc9\x26\x82\x56\xd2\x11\x05"
+ "\xb6\x89\xb4\x43\x9c\xb5\x9d\x11"
+ "\x6c\x83\x37\x71\x27\x1c\xae\xbf"
+ "\xcd\x57\xd2\xee\x0d\x5a\x15\x26"
+ "\x67\x88\x80\x80\x1b\xdc\xc1\x62"
+ "\xdd\x4c\xff\x92\x5c\x6c\xe1\xa0"
+ "\xe3\x79\xa9\x65\x8c\x8c\x14\x42"
+ "\xe5\x11\xd2\x1a\xad\xa9\x56\x6f"
+ "\x98\xfc\x8a\x7b\x56\x1f\xc6\xc1"
+ "\x52\x12\x92\x9b\x41\x0f\x4b\xae"
+ "\x1b\x4a\xbc\xfe\x23\xb6\x94\x70"
+ "\x04\x30\x9e\x69\x47\xbe\xb8\x8f"
+ "\xca\x45\xd7\x8a\xf4\x78\x3e\xaa"
+ "\x71\x17\xd8\x1e\xb8\x11\x8f\xbc"
+ "\xc8\x1a\x65\x7b\x41\x89\x72\xc7"
+ "\x5f\xbe\xc5\x2a\xdb\x5c\x54\xf9"
+ "\x25\xa3\x7a\x80\x56\x9c\x8c\xab"
+ "\x26\x19\x10\x36\xa6\xf3\x14\x79"
+ "\x40\x98\x70\x68\xb7\x35\xd9\xb9"
+ "\x27\xd4\xe7\x74\x5b\x3d\x97\xb4"
+ "\xd9\xaa\xd9\xf2\xb5\x14\x84\x1f"
+ "\xa9\xde\x12\x44\x5b\x00\xc0\xbc"
+ "\xc8\x11\x25\x1b\x67\x7a\x15\x72"
+ "\xa6\x31\x6f\xf4\x68\x7a\x86\x9d"
+ "\x43\x1c\x5f\x16\xd3\xad\x2e\x52"
+ "\xf3\xb4\xc3\xfa\x27\x2e\x68\x6c"
+ "\x06\xe7\x4c\x4f\xa2\xe0\xe4\x21"
+ "\x5d\x9e\x33\x58\x8d\xbf\xd5\x70"
+ "\xf8\x80\xa5\xdd\xe7\x18\x79\xfa"
+ "\x7b\xfd\x09\x69\x2c\x37\x32\xa8"
+ "\x65\xfa\x8d\x8b\x5c\xcc\xe8\xf3"
+ "\x37\xf6\xa6\xc6\x5c\xa2\x66\x79"
+ "\xfa\x8a\xa7\xd1\x0b\x2e\x1b\x5e"
+ "\x95\x35\x00\x76\xae\x42\xf7\x50"
+ "\x51\x78\xfb\xb4\x28\x24\xde\x1a"
+ "\x70\x8b\xed\xca\x3c\x5e\xe4\xbd"
+ "\x28\xb5\xf3\x76\x4f\x67\x5d\x81"
+ "\xb2\x60\x87\xd9\x7b\x19\x1a\xa7"
+ "\x79\xa2\xfa\x3f\x9e\xa9\xd7\x25"
+ "\x61\xe1\x74\x31\xa2\x77\xa0\x1b"
+ "\xf6\xf7\xcb\xc5\xaa\x9e\xce\xf9"
+ "\x9b\x96\xef\x51\xc3\x1a\x44\x96"
+ "\xae\x17\x50\xab\x29\x08\xda\xcc"
+ "\x1a\xb3\x12\xd0\x24\xe4\xe2\xe0"
+ "\xc6\xe3\xcc\x82\xd0\xba\x47\x4c"
+ "\x3f\x49\xd7\xe8\xb6\x61\xaa\x65"
+ "\x25\x18\x40\x2d\x62\x25\x02\x71"
+ "\x61\xa2\xc1\xb2\x13\xd2\x71\x3f"
+ "\x43\x1a\xc9\x09\x92\xff\xd5\x57"
+ "\xf0\xfc\x5e\x1c\xf1\xf5\xf9\xf3"
+ "\x5b",
+ .len = 1281,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 1200, 1, 80 },
+ }, {
+ .key = "\x80\x81\x82\x83\x84\x85\x86\x87"
+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
+ "\x90\x91\x92\x93\x94\x95\x96\x97"
+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f",
+ .klen = 32,
+ .iv = "\x40\x41\x42\x43\x44\x45\x46\x47"
+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
+ "\x50\x51\x52\x53\x54\x55\x56\x58"
+ "\x00\x00\x00\x00\x00\x00\x00\x00",
+ .ptext = "\x54\x68\x65\x20\x64\x68\x6f\x6c"
+ "\x65\x20\x28\x70\x72\x6f\x6e\x6f"
+ "\x75\x6e\x63\x65\x64\x20\x22\x64"
+ "\x6f\x6c\x65\x22\x29\x20\x69\x73"
+ "\x20\x61\x6c\x73\x6f\x20\x6b\x6e"
+ "\x6f\x77\x6e\x20\x61\x73\x20\x74"
+ "\x68\x65\x20\x41\x73\x69\x61\x74"
+ "\x69\x63\x20\x77\x69\x6c\x64\x20"
+ "\x64\x6f\x67\x2c\x20\x72\x65\x64"
+ "\x20\x64\x6f\x67\x2c\x20\x61\x6e"
+ "\x64\x20\x77\x68\x69\x73\x74\x6c"
+ "\x69\x6e\x67\x20\x64\x6f\x67\x2e"
+ "\x20\x49\x74\x20\x69\x73\x20\x61"
+ "\x62\x6f\x75\x74\x20\x74\x68\x65"
+ "\x20\x73\x69\x7a\x65\x20\x6f\x66"
+ "\x20\x61\x20\x47\x65\x72\x6d\x61"
+ "\x6e\x20\x73\x68\x65\x70\x68\x65"
+ "\x72\x64\x20\x62\x75\x74\x20\x6c"
+ "\x6f\x6f\x6b\x73\x20\x6d\x6f\x72"
+ "\x65\x20\x6c\x69\x6b\x65\x20\x61"
+ "\x20\x6c\x6f\x6e\x67\x2d\x6c\x65"
+ "\x67\x67\x65\x64\x20\x66\x6f\x78"
+ "\x2e\x20\x54\x68\x69\x73\x20\x68"
+ "\x69\x67\x68\x6c\x79\x20\x65\x6c"
+ "\x75\x73\x69\x76\x65\x20\x61\x6e"
+ "\x64\x20\x73\x6b\x69\x6c\x6c\x65"
+ "\x64\x20\x6a\x75\x6d\x70\x65\x72"
+ "\x20\x69\x73\x20\x63\x6c\x61\x73"
+ "\x73\x69\x66\x69\x65\x64\x20\x77"
+ "\x69\x74\x68\x20\x77\x6f\x6c\x76"
+ "\x65\x73\x2c\x20\x63\x6f\x79\x6f"
+ "\x74\x65\x73\x2c\x20\x6a\x61\x63"
+ "\x6b\x61\x6c\x73\x2c\x20\x61\x6e"
+ "\x64\x20\x66\x6f\x78\x65\x73\x20"
+ "\x69\x6e\x20\x74\x68\x65\x20\x74"
+ "\x61\x78\x6f\x6e\x6f\x6d\x69\x63"
+ "\x20\x66\x61\x6d\x69\x6c\x79\x20"
+ "\x43\x61\x6e\x69\x64\x61\x65\x2e",
+ .ctext = "\x9f\x1a\xab\x8a\x95\xf4\x7e\xcd"
+ "\xee\x34\xc0\x39\xd6\x23\x43\x94"
+ "\xf6\x01\xc1\x7f\x60\x91\xa5\x23"
+ "\x4a\x8a\xe6\xb1\x14\x8b\xd7\x58"
+ "\xee\x02\xad\xab\xce\x1e\x7d\xdf"
+ "\xf9\x49\x27\x69\xd0\x8d\x0c\x20"
+ "\x6e\x17\xc4\xae\x87\x7a\xc6\x61"
+ "\x91\xe2\x8e\x0a\x1d\x61\xcc\x38"
+ "\x02\x64\x43\x49\xc6\xb2\x59\x59"
+ "\x42\xe7\x9d\x83\x00\x60\x90\xd2"
+ "\xb9\xcd\x97\x6e\xc7\x95\x71\xbc"
+ "\x23\x31\x58\x07\xb3\xb4\xac\x0b"
+ "\x87\x64\x56\xe5\xe3\xec\x63\xa1"
+ "\x71\x8c\x08\x48\x33\x20\x29\x81"
+ "\xea\x01\x25\x20\xc3\xda\xe6\xee"
+ "\x6a\x03\xf6\x68\x4d\x26\xa0\x91"
+ "\x9e\x44\xb8\xc1\xc0\x8f\x5a\x6a"
+ "\xc0\xcd\xbf\x24\x5e\x40\x66\xd2"
+ "\x42\x24\xb5\xbf\xc1\xeb\x12\x60"
+ "\x56\xbe\xb1\xa6\xc4\x0f\xfc\x49"
+ "\x69\x9f\xcc\x06\x5c\xe3\x26\xd7"
+ "\x52\xc0\x42\xe8\xb4\x76\xc3\xee"
+ "\xb2\x97\xe3\x37\x61\x29\x5a\xb5"
+ "\x8e\xe8\x8c\xc5\x38\xcc\xcb\xec"
+ "\x64\x1a\xa9\x12\x5f\xf7\x79\xdf"
+ "\x64\xca\x77\x4e\xbd\xf9\x83\xa0"
+ "\x13\x27\x3f\x31\x03\x63\x30\x26"
+ "\x27\x0b\x3e\xb3\x23\x13\x61\x0b"
+ "\x70\x1d\xd4\xad\x85\x1e\xbf\xdf"
+ "\xc6\x8e\x4d\x08\xcc\x7e\x77\xbd"
+ "\x1e\x18\x77\x38\x3a\xfe\xc0\x5d"
+ "\x16\xfc\xf0\xa9\x2f\xe9\x17\xc7"
+ "\xd3\x23\x17\x18\xa3\xe6\x54\x77"
+ "\x6f\x1b\xbe\x8a\x6e\x7e\xca\x97"
+ "\x08\x05\x36\x76\xaf\x12\x7a\x42"
+ "\xf7\x7a\xc2\x35\xc3\xb4\x93\x40"
+ "\x54\x14\x90\xa0\x4d\x65\x1c\x37"
+ "\x50\x70\x44\x29\x6d\x6e\x62\x68",
+ .len = 304,
+ }
+};
+
+/* Adiantum test vectors from https://github.com/google/adiantum */
+static const struct cipher_testvec adiantum_xchacha12_aes_tv_template[] = {
+ {
+ .key = "\x9e\xeb\xb2\x49\x3c\x1c\xf5\xf4"
+ "\x6a\x99\xc2\xc4\xdf\xb1\xf4\xdd"
+ "\x75\x20\x57\xea\x2c\x4f\xcd\xb2"
+ "\xa5\x3d\x7b\x49\x1e\xab\xfd\x0f",
+ .klen = 32,
+ .iv = "\xdf\x63\xd4\xab\xd2\x49\xf3\xd8"
+ "\x33\x81\x37\x60\x7d\xfa\x73\x08"
+ "\xd8\x49\x6d\x80\xe8\x2f\x62\x54"
+ "\xeb\x0e\xa9\x39\x5b\x45\x7f\x8a",
+ .ptext = "\x67\xc9\xf2\x30\x84\x41\x8e\x43"
+ "\xfb\xf3\xb3\x3e\x79\x36\x7f\xe8",
+ .ctext = "\x6d\x32\x86\x18\x67\x86\x0f\x3f"
+ "\x96\x7c\x9d\x28\x0d\x53\xec\x9f",
+ .len = 16,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 14, 2 },
+ }, {
+ .key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99"
+ "\x5f\x1a\x5a\x44\x1d\x92\x0f\x27"
+ "\xcc\x16\xd7\x2b\x85\x63\x99\xd3"
+ "\xba\x96\xa1\xdb\xd2\x60\x68\xda",
+ .klen = 32,
+ .iv = "\xef\x58\x69\xb1\x2c\x5e\x9a\x47"
+ "\x24\xc1\xb1\x69\xe1\x12\x93\x8f"
+ "\x43\x3d\x6d\x00\xdb\x5e\xd8\xd9"
+ "\x12\x9a\xfe\xd9\xff\x2d\xaa\xc4",
+ .ptext = "\x5e\xa8\x68\x19\x85\x98\x12\x23"
+ "\x26\x0a\xcc\xdb\x0a\x04\xb9\xdf"
+ "\x4d\xb3\x48\x7b\xb0\xe3\xc8\x19"
+ "\x43\x5a\x46\x06\x94\x2d\xf2",
+ .ctext = "\xc7\xc6\xf1\x73\x8f\xc4\xff\x4a"
+ "\x39\xbe\x78\xbe\x8d\x28\xc8\x89"
+ "\x46\x63\xe7\x0c\x7d\x87\xe8\x4e"
+ "\xc9\x18\x7b\xbe\x18\x60\x50",
+ .len = 31,
+ }, {
+ .key = "\xa5\x28\x24\x34\x1a\x3c\xd8\xf7"
+ "\x05\x91\x8f\xee\x85\x1f\x35\x7f"
+ "\x80\x3d\xfc\x9b\x94\xf6\xfc\x9e"
+ "\x19\x09\x00\xa9\x04\x31\x4f\x11",
+ .klen = 32,
+ .iv = "\xa1\xba\x49\x95\xff\x34\x6d\xb8"
+ "\xcd\x87\x5d\x5e\xfd\xea\x85\xdb"
+ "\x8a\x7b\x5e\xb2\x5d\x57\xdd\x62"
+ "\xac\xa9\x8c\x41\x42\x94\x75\xb7",
+ .ptext = "\x69\xb4\xe8\x8c\x37\xe8\x67\x82"
+ "\xf1\xec\x5d\x04\xe5\x14\x91\x13"
+ "\xdf\xf2\x87\x1b\x69\x81\x1d\x71"
+ "\x70\x9e\x9c\x3b\xde\x49\x70\x11"
+ "\xa0\xa3\xdb\x0d\x54\x4f\x66\x69"
+ "\xd7\xdb\x80\xa7\x70\x92\x68\xce"
+ "\x81\x04\x2c\xc6\xab\xae\xe5\x60"
+ "\x15\xe9\x6f\xef\xaa\x8f\xa7\xa7"
+ "\x63\x8f\xf2\xf0\x77\xf1\xa8\xea"
+ "\xe1\xb7\x1f\x9e\xab\x9e\x4b\x3f"
+ "\x07\x87\x5b\x6f\xcd\xa8\xaf\xb9"
+ "\xfa\x70\x0b\x52\xb8\xa8\xa7\x9e"
+ "\x07\x5f\xa6\x0e\xb3\x9b\x79\x13"
+ "\x79\xc3\x3e\x8d\x1c\x2c\x68\xc8"
+ "\x51\x1d\x3c\x7b\x7d\x79\x77\x2a"
+ "\x56\x65\xc5\x54\x23\x28\xb0\x03",
+ .ctext = "\x9e\x16\xab\xed\x4b\xa7\x42\x5a"
+ "\xc6\xfb\x4e\x76\xff\xbe\x03\xa0"
+ "\x0f\xe3\xad\xba\xe4\x98\x2b\x0e"
+ "\x21\x48\xa0\xb8\x65\x48\x27\x48"
+ "\x84\x54\x54\xb2\x9a\x94\x7b\xe6"
+ "\x4b\x29\xe9\xcf\x05\x91\x80\x1a"
+ "\x3a\xf3\x41\x96\x85\x1d\x9f\x74"
+ "\x51\x56\x63\xfa\x7c\x28\x85\x49"
+ "\xf7\x2f\xf9\xf2\x18\x46\xf5\x33"
+ "\x80\xa3\x3c\xce\xb2\x57\x93\xf5"
+ "\xae\xbd\xa9\xf5\x7b\x30\xc4\x93"
+ "\x66\xe0\x30\x77\x16\xe4\xa0\x31"
+ "\xba\x70\xbc\x68\x13\xf5\xb0\x9a"
+ "\xc1\xfc\x7e\xfe\x55\x80\x5c\x48"
+ "\x74\xa6\xaa\xa3\xac\xdc\xc2\xf5"
+ "\x8d\xde\x34\x86\x78\x60\x75\x8d",
+ .len = 128,
+ .also_non_np = 1,
+ .np = 4,
+ .tap = { 104, 16, 4, 4 },
+ }, {
+ .key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a"
+ "\x25\x74\x29\x0d\x51\x8a\x0e\x13"
+ "\xc1\x53\x5d\x30\x8d\xee\x75\x0d"
+ "\x14\xd6\x69\xc9\x15\xa9\x0c\x60",
+ .klen = 32,
+ .iv = "\x65\x9b\xd4\xa8\x7d\x29\x1d\xf4"
+ "\xc4\xd6\x9b\x6a\x28\xab\x64\xe2"
+ "\x62\x81\x97\xc5\x81\xaa\xf9\x44"
+ "\xc1\x72\x59\x82\xaf\x16\xc8\x2c",
+ .ptext = "\xc7\x6b\x52\x6a\x10\xf0\xcc\x09"
+ "\xc1\x12\x1d\x6d\x21\xa6\x78\xf5"
+ "\x05\xa3\x69\x60\x91\x36\x98\x57"
+ "\xba\x0c\x14\xcc\xf3\x2d\x73\x03"
+ "\xc6\xb2\x5f\xc8\x16\x27\x37\x5d"
+ "\xd0\x0b\x87\xb2\x50\x94\x7b\x58"
+ "\x04\xf4\xe0\x7f\x6e\x57\x8e\xc9"
+ "\x41\x84\xc1\xb1\x7e\x4b\x91\x12"
+ "\x3a\x8b\x5d\x50\x82\x7b\xcb\xd9"
+ "\x9a\xd9\x4e\x18\x06\x23\x9e\xd4"
+ "\xa5\x20\x98\xef\xb5\xda\xe5\xc0"
+ "\x8a\x6a\x83\x77\x15\x84\x1e\xae"
+ "\x78\x94\x9d\xdf\xb7\xd1\xea\x67"
+ "\xaa\xb0\x14\x15\xfa\x67\x21\x84"
+ "\xd3\x41\x2a\xce\xba\x4b\x4a\xe8"
+ "\x95\x62\xa9\x55\xf0\x80\xad\xbd"
+ "\xab\xaf\xdd\x4f\xa5\x7c\x13\x36"
+ "\xed\x5e\x4f\x72\xad\x4b\xf1\xd0"
+ "\x88\x4e\xec\x2c\x88\x10\x5e\xea"
+ "\x12\xc0\x16\x01\x29\xa3\xa0\x55"
+ "\xaa\x68\xf3\xe9\x9d\x3b\x0d\x3b"
+ "\x6d\xec\xf8\xa0\x2d\xf0\x90\x8d"
+ "\x1c\xe2\x88\xd4\x24\x71\xf9\xb3"
+ "\xc1\x9f\xc5\xd6\x76\x70\xc5\x2e"
+ "\x9c\xac\xdb\x90\xbd\x83\x72\xba"
+ "\x6e\xb5\xa5\x53\x83\xa9\xa5\xbf"
+ "\x7d\x06\x0e\x3c\x2a\xd2\x04\xb5"
+ "\x1e\x19\x38\x09\x16\xd2\x82\x1f"
+ "\x75\x18\x56\xb8\x96\x0b\xa6\xf9"
+ "\xcf\x62\xd9\x32\x5d\xa9\xd7\x1d"
+ "\xec\xe4\xdf\x1b\xbe\xf1\x36\xee"
+ "\xe3\x7b\xb5\x2f\xee\xf8\x53\x3d"
+ "\x6a\xb7\x70\xa9\xfc\x9c\x57\x25"
+ "\xf2\x89\x10\xd3\xb8\xa8\x8c\x30"
+ "\xae\x23\x4f\x0e\x13\x66\x4f\xe1"
+ "\xb6\xc0\xe4\xf8\xef\x93\xbd\x6e"
+ "\x15\x85\x6b\xe3\x60\x81\x1d\x68"
+ "\xd7\x31\x87\x89\x09\xab\xd5\x96"
+ "\x1d\xf3\x6d\x67\x80\xca\x07\x31"
+ "\x5d\xa7\xe4\xfb\x3e\xf2\x9b\x33"
+ "\x52\x18\xc8\x30\xfe\x2d\xca\x1e"
+ "\x79\x92\x7a\x60\x5c\xb6\x58\x87"
+ "\xa4\x36\xa2\x67\x92\x8b\xa4\xb7"
+ "\xf1\x86\xdf\xdc\xc0\x7e\x8f\x63"
+ "\xd2\xa2\xdc\x78\xeb\x4f\xd8\x96"
+ "\x47\xca\xb8\x91\xf9\xf7\x94\x21"
+ "\x5f\x9a\x9f\x5b\xb8\x40\x41\x4b"
+ "\x66\x69\x6a\x72\xd0\xcb\x70\xb7"
+ "\x93\xb5\x37\x96\x05\x37\x4f\xe5"
+ "\x8c\xa7\x5a\x4e\x8b\xb7\x84\xea"
+ "\xc7\xfc\x19\x6e\x1f\x5a\xa1\xac"
+ "\x18\x7d\x52\x3b\xb3\x34\x62\x99"
+ "\xe4\x9e\x31\x04\x3f\xc0\x8d\x84"
+ "\x17\x7c\x25\x48\x52\x67\x11\x27"
+ "\x67\xbb\x5a\x85\xca\x56\xb2\x5c"
+ "\xe6\xec\xd5\x96\x3d\x15\xfc\xfb"
+ "\x22\x25\xf4\x13\xe5\x93\x4b\x9a"
+ "\x77\xf1\x52\x18\xfa\x16\x5e\x49"
+ "\x03\x45\xa8\x08\xfa\xb3\x41\x92"
+ "\x79\x50\x33\xca\xd0\xd7\x42\x55"
+ "\xc3\x9a\x0c\x4e\xd9\xa4\x3c\x86"
+ "\x80\x9f\x53\xd1\xa4\x2e\xd1\xbc"
+ "\xf1\x54\x6e\x93\xa4\x65\x99\x8e"
+ "\xdf\x29\xc0\x64\x63\x07\xbb\xea",
+ .ctext = "\x15\x97\xd0\x86\x18\x03\x9c\x51"
+ "\xc5\x11\x36\x62\x13\x92\xe6\x73"
+ "\x29\x79\xde\xa1\x00\x3e\x08\x64"
+ "\x17\x1a\xbc\xd5\xfe\x33\x0e\x0c"
+ "\x7c\x94\xa7\xc6\x3c\xbe\xac\xa2"
+ "\x89\xe6\xbc\xdf\x0c\x33\x27\x42"
+ "\x46\x73\x2f\xba\x4e\xa6\x46\x8f"
+ "\xe4\xee\x39\x63\x42\x65\xa3\x88"
+ "\x7a\xad\x33\x23\xa9\xa7\x20\x7f"
+ "\x0b\xe6\x6a\xc3\x60\xda\x9e\xb4"
+ "\xd6\x07\x8a\x77\x26\xd1\xab\x44"
+ "\x99\x55\x03\x5e\xed\x8d\x7b\xbd"
+ "\xc8\x21\xb7\x21\x30\x3f\xc0\xb5"
+ "\xc8\xec\x6c\x23\xa6\xa3\x6d\xf1"
+ "\x30\x0a\xd0\xa6\xa9\x28\x69\xae"
+ "\x2a\xe6\x54\xac\x82\x9d\x6a\x95"
+ "\x6f\x06\x44\xc5\x5a\x77\x6e\xec"
+ "\xf8\xf8\x63\xb2\xe6\xaa\xbd\x8e"
+ "\x0e\x8a\x62\x00\x03\xc8\x84\xdd"
+ "\x47\x4a\xc3\x55\xba\xb7\xe7\xdf"
+ "\x08\xbf\x62\xf5\xe8\xbc\xb6\x11"
+ "\xe4\xcb\xd0\x66\x74\x32\xcf\xd4"
+ "\xf8\x51\x80\x39\x14\x05\x12\xdb"
+ "\x87\x93\xe2\x26\x30\x9c\x3a\x21"
+ "\xe5\xd0\x38\x57\x80\x15\xe4\x08"
+ "\x58\x05\x49\x7d\xe6\x92\x77\x70"
+ "\xfb\x1e\x2d\x6a\x84\x00\xc8\x68"
+ "\xf7\x1a\xdd\xf0\x7b\x38\x1e\xd8"
+ "\x2c\x78\x78\x61\xcf\xe3\xde\x69"
+ "\x1f\xd5\x03\xd5\x1a\xb4\xcf\x03"
+ "\xc8\x7a\x70\x68\x35\xb4\xf6\xbe"
+ "\x90\x62\xb2\x28\x99\x86\xf5\x44"
+ "\x99\xeb\x31\xcf\xca\xdf\xd0\x21"
+ "\xd6\x60\xf7\x0f\x40\xb4\x80\xb7"
+ "\xab\xe1\x9b\x45\xba\x66\xda\xee"
+ "\xdd\x04\x12\x40\x98\xe1\x69\xe5"
+ "\x2b\x9c\x59\x80\xe7\x7b\xcc\x63"
+ "\xa6\xc0\x3a\xa9\xfe\x8a\xf9\x62"
+ "\x11\x34\x61\x94\x35\xfe\xf2\x99"
+ "\xfd\xee\x19\xea\x95\xb6\x12\xbf"
+ "\x1b\xdf\x02\x1a\xcc\x3e\x7e\x65"
+ "\x78\x74\x10\x50\x29\x63\x28\xea"
+ "\x6b\xab\xd4\x06\x4d\x15\x24\x31"
+ "\xc7\x0a\xc9\x16\xb6\x48\xf0\xbf"
+ "\x49\xdb\x68\x71\x31\x8f\x87\xe2"
+ "\x13\x05\x64\xd6\x22\x0c\xf8\x36"
+ "\x84\x24\x3e\x69\x5e\xb8\x9e\x16"
+ "\x73\x6c\x83\x1e\xe0\x9f\x9e\xba"
+ "\xe5\x59\x21\x33\x1b\xa9\x26\xc2"
+ "\xc7\xd9\x30\x73\xb6\xa6\x73\x82"
+ "\x19\xfa\x44\x4d\x40\x8b\x69\x04"
+ "\x94\x74\xea\x6e\xb3\x09\x47\x01"
+ "\x2a\xb9\x78\x34\x43\x11\xed\xd6"
+ "\x8c\x95\x65\x1b\x85\x67\xa5\x40"
+ "\xac\x9c\x05\x4b\x57\x4a\xa9\x96"
+ "\x0f\xdd\x4f\xa1\xe0\xcf\x6e\xc7"
+ "\x1b\xed\xa2\xb4\x56\x8c\x09\x6e"
+ "\xa6\x65\xd7\x55\x81\xb7\xed\x11"
+ "\x9b\x40\x75\xa8\x6b\x56\xaf\x16"
+ "\x8b\x3d\xf4\xcb\xfe\xd5\x1d\x3d"
+ "\x85\xc2\xc0\xde\x43\x39\x4a\x96"
+ "\xba\x88\x97\xc0\xd6\x00\x0e\x27"
+ "\x21\xb0\x21\x52\xba\xa7\x37\xaa"
+ "\xcc\xbf\x95\xa8\xf4\xd0\x91\xf6",
+ .len = 512,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 144, 368 },
+ }
+};
+
+/* Adiantum with XChaCha20 instead of XChaCha12 */
+/* Test vectors from https://github.com/google/adiantum */
+static const struct cipher_testvec adiantum_xchacha20_aes_tv_template[] = {
+ {
+ .key = "\x9e\xeb\xb2\x49\x3c\x1c\xf5\xf4"
+ "\x6a\x99\xc2\xc4\xdf\xb1\xf4\xdd"
+ "\x75\x20\x57\xea\x2c\x4f\xcd\xb2"
+ "\xa5\x3d\x7b\x49\x1e\xab\xfd\x0f",
+ .klen = 32,
+ .iv = "\xdf\x63\xd4\xab\xd2\x49\xf3\xd8"
+ "\x33\x81\x37\x60\x7d\xfa\x73\x08"
+ "\xd8\x49\x6d\x80\xe8\x2f\x62\x54"
+ "\xeb\x0e\xa9\x39\x5b\x45\x7f\x8a",
+ .ptext = "\x67\xc9\xf2\x30\x84\x41\x8e\x43"
+ "\xfb\xf3\xb3\x3e\x79\x36\x7f\xe8",
+ .ctext = "\xf6\x78\x97\xd6\xaa\x94\x01\x27"
+ "\x2e\x4d\x83\xe0\x6e\x64\x9a\xdf",
+ .len = 16,
+ .also_non_np = 1,
+ .np = 3,
+ .tap = { 5, 2, 9 },
+ }, {
+ .key = "\x36\x2b\x57\x97\xf8\x5d\xcd\x99"
+ "\x5f\x1a\x5a\x44\x1d\x92\x0f\x27"
+ "\xcc\x16\xd7\x2b\x85\x63\x99\xd3"
+ "\xba\x96\xa1\xdb\xd2\x60\x68\xda",
+ .klen = 32,
+ .iv = "\xef\x58\x69\xb1\x2c\x5e\x9a\x47"
+ "\x24\xc1\xb1\x69\xe1\x12\x93\x8f"
+ "\x43\x3d\x6d\x00\xdb\x5e\xd8\xd9"
+ "\x12\x9a\xfe\xd9\xff\x2d\xaa\xc4",
+ .ptext = "\x5e\xa8\x68\x19\x85\x98\x12\x23"
+ "\x26\x0a\xcc\xdb\x0a\x04\xb9\xdf"
+ "\x4d\xb3\x48\x7b\xb0\xe3\xc8\x19"
+ "\x43\x5a\x46\x06\x94\x2d\xf2",
+ .ctext = "\x4b\xb8\x90\x10\xdf\x7f\x64\x08"
+ "\x0e\x14\x42\x5f\x00\x74\x09\x36"
+ "\x57\x72\xb5\xfd\xb5\x5d\xb8\x28"
+ "\x0c\x04\x91\x14\x91\xe9\x37",
+ .len = 31,
+ .also_non_np = 1,
+ .np = 2,
+ .tap = { 16, 15 },
+ }, {
+ .key = "\xa5\x28\x24\x34\x1a\x3c\xd8\xf7"
+ "\x05\x91\x8f\xee\x85\x1f\x35\x7f"
+ "\x80\x3d\xfc\x9b\x94\xf6\xfc\x9e"
+ "\x19\x09\x00\xa9\x04\x31\x4f\x11",
+ .klen = 32,
+ .iv = "\xa1\xba\x49\x95\xff\x34\x6d\xb8"
+ "\xcd\x87\x5d\x5e\xfd\xea\x85\xdb"
+ "\x8a\x7b\x5e\xb2\x5d\x57\xdd\x62"
+ "\xac\xa9\x8c\x41\x42\x94\x75\xb7",
+ .ptext = "\x69\xb4\xe8\x8c\x37\xe8\x67\x82"
+ "\xf1\xec\x5d\x04\xe5\x14\x91\x13"
+ "\xdf\xf2\x87\x1b\x69\x81\x1d\x71"
+ "\x70\x9e\x9c\x3b\xde\x49\x70\x11"
+ "\xa0\xa3\xdb\x0d\x54\x4f\x66\x69"
+ "\xd7\xdb\x80\xa7\x70\x92\x68\xce"
+ "\x81\x04\x2c\xc6\xab\xae\xe5\x60"
+ "\x15\xe9\x6f\xef\xaa\x8f\xa7\xa7"
+ "\x63\x8f\xf2\xf0\x77\xf1\xa8\xea"
+ "\xe1\xb7\x1f\x9e\xab\x9e\x4b\x3f"
+ "\x07\x87\x5b\x6f\xcd\xa8\xaf\xb9"
+ "\xfa\x70\x0b\x52\xb8\xa8\xa7\x9e"
+ "\x07\x5f\xa6\x0e\xb3\x9b\x79\x13"
+ "\x79\xc3\x3e\x8d\x1c\x2c\x68\xc8"
+ "\x51\x1d\x3c\x7b\x7d\x79\x77\x2a"
+ "\x56\x65\xc5\x54\x23\x28\xb0\x03",
+ .ctext = "\xb1\x8b\xa0\x05\x77\xa8\x4d\x59"
+ "\x1b\x8e\x21\xfc\x3a\x49\xfa\xd4"
+ "\xeb\x36\xf3\xc4\xdf\xdc\xae\x67"
+ "\x07\x3f\x70\x0e\xe9\x66\xf5\x0c"
+ "\x30\x4d\x66\xc9\xa4\x2f\x73\x9c"
+ "\x13\xc8\x49\x44\xcc\x0a\x90\x9d"
+ "\x7c\xdd\x19\x3f\xea\x72\x8d\x58"
+ "\xab\xe7\x09\x2c\xec\xb5\x44\xd2"
+ "\xca\xa6\x2d\x7a\x5c\x9c\x2b\x15"
+ "\xec\x2a\xa6\x69\x91\xf9\xf3\x13"
+ "\xf7\x72\xc1\xc1\x40\xd5\xe1\x94"
+ "\xf4\x29\xa1\x3e\x25\x02\xa8\x3e"
+ "\x94\xc1\x91\x14\xa1\x14\xcb\xbe"
+ "\x67\x4c\xb9\x38\xfe\xa7\xaa\x32"
+ "\x29\x62\x0d\xb2\xf6\x3c\x58\x57"
+ "\xc1\xd5\x5a\xbb\xd6\xa6\x2a\xe5",
+ .len = 128,
+ .also_non_np = 1,
+ .np = 4,
+ .tap = { 112, 7, 8, 1 },
+ }, {
+ .key = "\xd3\x81\x72\x18\x23\xff\x6f\x4a"
+ "\x25\x74\x29\x0d\x51\x8a\x0e\x13"
+ "\xc1\x53\x5d\x30\x8d\xee\x75\x0d"
+ "\x14\xd6\x69\xc9\x15\xa9\x0c\x60",
+ .klen = 32,
+ .iv = "\x65\x9b\xd4\xa8\x7d\x29\x1d\xf4"
+ "\xc4\xd6\x9b\x6a\x28\xab\x64\xe2"
+ "\x62\x81\x97\xc5\x81\xaa\xf9\x44"
+ "\xc1\x72\x59\x82\xaf\x16\xc8\x2c",
+ .ptext = "\xc7\x6b\x52\x6a\x10\xf0\xcc\x09"
+ "\xc1\x12\x1d\x6d\x21\xa6\x78\xf5"
+ "\x05\xa3\x69\x60\x91\x36\x98\x57"
+ "\xba\x0c\x14\xcc\xf3\x2d\x73\x03"
+ "\xc6\xb2\x5f\xc8\x16\x27\x37\x5d"
+ "\xd0\x0b\x87\xb2\x50\x94\x7b\x58"
+ "\x04\xf4\xe0\x7f\x6e\x57\x8e\xc9"
+ "\x41\x84\xc1\xb1\x7e\x4b\x91\x12"
+ "\x3a\x8b\x5d\x50\x82\x7b\xcb\xd9"
+ "\x9a\xd9\x4e\x18\x06\x23\x9e\xd4"
+ "\xa5\x20\x98\xef\xb5\xda\xe5\xc0"
+ "\x8a\x6a\x83\x77\x15\x84\x1e\xae"
+ "\x78\x94\x9d\xdf\xb7\xd1\xea\x67"
+ "\xaa\xb0\x14\x15\xfa\x67\x21\x84"
+ "\xd3\x41\x2a\xce\xba\x4b\x4a\xe8"
+ "\x95\x62\xa9\x55\xf0\x80\xad\xbd"
+ "\xab\xaf\xdd\x4f\xa5\x7c\x13\x36"
+ "\xed\x5e\x4f\x72\xad\x4b\xf1\xd0"
+ "\x88\x4e\xec\x2c\x88\x10\x5e\xea"
+ "\x12\xc0\x16\x01\x29\xa3\xa0\x55"
+ "\xaa\x68\xf3\xe9\x9d\x3b\x0d\x3b"
+ "\x6d\xec\xf8\xa0\x2d\xf0\x90\x8d"
+ "\x1c\xe2\x88\xd4\x24\x71\xf9\xb3"
+ "\xc1\x9f\xc5\xd6\x76\x70\xc5\x2e"
+ "\x9c\xac\xdb\x90\xbd\x83\x72\xba"
+ "\x6e\xb5\xa5\x53\x83\xa9\xa5\xbf"
+ "\x7d\x06\x0e\x3c\x2a\xd2\x04\xb5"
+ "\x1e\x19\x38\x09\x16\xd2\x82\x1f"
+ "\x75\x18\x56\xb8\x96\x0b\xa6\xf9"
+ "\xcf\x62\xd9\x32\x5d\xa9\xd7\x1d"
+ "\xec\xe4\xdf\x1b\xbe\xf1\x36\xee"
+ "\xe3\x7b\xb5\x2f\xee\xf8\x53\x3d"
+ "\x6a\xb7\x70\xa9\xfc\x9c\x57\x25"
+ "\xf2\x89\x10\xd3\xb8\xa8\x8c\x30"
+ "\xae\x23\x4f\x0e\x13\x66\x4f\xe1"
+ "\xb6\xc0\xe4\xf8\xef\x93\xbd\x6e"
+ "\x15\x85\x6b\xe3\x60\x81\x1d\x68"
+ "\xd7\x31\x87\x89\x09\xab\xd5\x96"
+ "\x1d\xf3\x6d\x67\x80\xca\x07\x31"
+ "\x5d\xa7\xe4\xfb\x3e\xf2\x9b\x33"
+ "\x52\x18\xc8\x30\xfe\x2d\xca\x1e"
+ "\x79\x92\x7a\x60\x5c\xb6\x58\x87"
+ "\xa4\x36\xa2\x67\x92\x8b\xa4\xb7"
+ "\xf1\x86\xdf\xdc\xc0\x7e\x8f\x63"
+ "\xd2\xa2\xdc\x78\xeb\x4f\xd8\x96"
+ "\x47\xca\xb8\x91\xf9\xf7\x94\x21"
+ "\x5f\x9a\x9f\x5b\xb8\x40\x41\x4b"
+ "\x66\x69\x6a\x72\xd0\xcb\x70\xb7"
+ "\x93\xb5\x37\x96\x05\x37\x4f\xe5"
+ "\x8c\xa7\x5a\x4e\x8b\xb7\x84\xea"
+ "\xc7\xfc\x19\x6e\x1f\x5a\xa1\xac"
+ "\x18\x7d\x52\x3b\xb3\x34\x62\x99"
+ "\xe4\x9e\x31\x04\x3f\xc0\x8d\x84"
+ "\x17\x7c\x25\x48\x52\x67\x11\x27"
+ "\x67\xbb\x5a\x85\xca\x56\xb2\x5c"
+ "\xe6\xec\xd5\x96\x3d\x15\xfc\xfb"
+ "\x22\x25\xf4\x13\xe5\x93\x4b\x9a"
+ "\x77\xf1\x52\x18\xfa\x16\x5e\x49"
+ "\x03\x45\xa8\x08\xfa\xb3\x41\x92"
+ "\x79\x50\x33\xca\xd0\xd7\x42\x55"
+ "\xc3\x9a\x0c\x4e\xd9\xa4\x3c\x86"
+ "\x80\x9f\x53\xd1\xa4\x2e\xd1\xbc"
+ "\xf1\x54\x6e\x93\xa4\x65\x99\x8e"
+ "\xdf\x29\xc0\x64\x63\x07\xbb\xea",
+ .ctext = "\xe0\x33\xf6\xe0\xb4\xa5\xdd\x2b"
+ "\xdd\xce\xfc\x12\x1e\xfc\x2d\xf2"
+ "\x8b\xc7\xeb\xc1\xc4\x2a\xe8\x44"
+ "\x0f\x3d\x97\x19\x2e\x6d\xa2\x38"
+ "\x9d\xa6\xaa\xe1\x96\xb9\x08\xe8"
+ "\x0b\x70\x48\x5c\xed\xb5\x9b\xcb"
+ "\x8b\x40\x88\x7e\x69\x73\xf7\x16"
+ "\x71\xbb\x5b\xfc\xa3\x47\x5d\xa6"
+ "\xae\x3a\x64\xc4\xe7\xb8\xa8\xe7"
+ "\xb1\x32\x19\xdb\xe3\x01\xb8\xf0"
+ "\xa4\x86\xb4\x4c\xc2\xde\x5c\xd2"
+ "\x6c\x77\xd2\xe8\x18\xb7\x0a\xc9"
+ "\x3d\x53\xb5\xc4\x5c\xf0\x8c\x06"
+ "\xdc\x90\xe0\x74\x47\x1b\x0b\xf6"
+ "\xd2\x71\x6b\xc4\xf1\x97\x00\x2d"
+ "\x63\x57\x44\x1f\x8c\xf4\xe6\x9b"
+ "\xe0\x7a\xdd\xec\x32\x73\x42\x32"
+ "\x7f\x35\x67\x60\x0d\xcf\x10\x52"
+ "\x61\x22\x53\x8d\x8e\xbb\x33\x76"
+ "\x59\xd9\x10\xce\xdf\xef\xc0\x41"
+ "\xd5\x33\x29\x6a\xda\x46\xa4\x51"
+ "\xf0\x99\x3d\x96\x31\xdd\xb5\xcb"
+ "\x3e\x2a\x1f\xc7\x5c\x79\xd3\xc5"
+ "\x20\xa1\xb1\x39\x1b\xc6\x0a\x70"
+ "\x26\x39\x95\x07\xad\x7a\xc9\x69"
+ "\xfe\x81\xc7\x88\x08\x38\xaf\xad"
+ "\x9e\x8d\xfb\xe8\x24\x0d\x22\xb8"
+ "\x0e\xed\xbe\x37\x53\x7c\xa6\xc6"
+ "\x78\x62\xec\xa3\x59\xd9\xc6\x9d"
+ "\xb8\x0e\x69\x77\x84\x2d\x6a\x4c"
+ "\xc5\xd9\xb2\xa0\x2b\xa8\x80\xcc"
+ "\xe9\x1e\x9c\x5a\xc4\xa1\xb2\x37"
+ "\x06\x9b\x30\x32\x67\xf7\xe7\xd2"
+ "\x42\xc7\xdf\x4e\xd4\xcb\xa0\x12"
+ "\x94\xa1\x34\x85\x93\x50\x4b\x0a"
+ "\x3c\x7d\x49\x25\x01\x41\x6b\x96"
+ "\xa9\x12\xbb\x0b\xc0\xd7\xd0\x93"
+ "\x1f\x70\x38\xb8\x21\xee\xf6\xa7"
+ "\xee\xeb\xe7\x81\xa4\x13\xb4\x87"
+ "\xfa\xc1\xb0\xb5\x37\x8b\x74\xa2"
+ "\x4e\xc7\xc2\xad\x3d\x62\x3f\xf8"
+ "\x34\x42\xe5\xae\x45\x13\x63\xfe"
+ "\xfc\x2a\x17\x46\x61\xa9\xd3\x1c"
+ "\x4c\xaf\xf0\x09\x62\x26\x66\x1e"
+ "\x74\xcf\xd6\x68\x3d\x7d\xd8\xb7"
+ "\xe7\xe6\xf8\xf0\x08\x20\xf7\x47"
+ "\x1c\x52\xaa\x0f\x3e\x21\xa3\xf2"
+ "\xbf\x2f\x95\x16\xa8\xc8\xc8\x8c"
+ "\x99\x0f\x5d\xfb\xfa\x2b\x58\x8a"
+ "\x7e\xd6\x74\x02\x60\xf0\xd0\x5b"
+ "\x65\xa8\xac\xea\x8d\x68\x46\x34"
+ "\x26\x9d\x4f\xb1\x9a\x8e\xc0\x1a"
+ "\xf1\xed\xc6\x7a\x83\xfd\x8a\x57"
+ "\xf2\xe6\xe4\xba\xfc\xc6\x3c\xad"
+ "\x5b\x19\x50\x2f\x3a\xcc\x06\x46"
+ "\x04\x51\x3f\x91\x97\xf0\xd2\x07"
+ "\xe7\x93\x89\x7e\xb5\x32\x0f\x03"
+ "\xe5\x58\x9e\x74\x72\xeb\xc2\x38"
+ "\x00\x0c\x91\x72\x69\xed\x7d\x6d"
+ "\xc8\x71\xf0\xec\xff\x80\xd9\x1c"
+ "\x9e\xd2\xfa\x15\xfc\x6c\x4e\xbc"
+ "\xb1\xa6\xbd\xbd\x70\x40\xca\x20"
+ "\xb8\x78\xd2\xa3\xc6\xf3\x79\x9c"
+ "\xc7\x27\xe1\x6a\x29\xad\xa4\x03",
+ .len = 512,
+ }
+};
+
/*
* CTS (Cipher Text Stealing) mode tests
*/
diff --git a/drivers/Kconfig b/drivers/Kconfig
index ab4d43923c4d..8395bc515996 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -57,6 +57,8 @@ source "drivers/char/Kconfig"
source "drivers/i2c/Kconfig"
+source "drivers/i3c/Kconfig"
+
source "drivers/spi/Kconfig"
source "drivers/spmi/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 578f469f72fb..e1ce029d28fd 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -111,7 +111,7 @@ obj-$(CONFIG_SERIO) += input/serio/
obj-$(CONFIG_GAMEPORT) += input/gameport/
obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_RTC_LIB) += rtc/
-obj-y += i2c/ media/
+obj-y += i2c/ i3c/ media/
obj-$(CONFIG_PPS) += pps/
obj-y += ptp/
obj-$(CONFIG_W1) += w1/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 7cea769c37df..7b65a807b3dd 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -9,7 +9,6 @@ config ARCH_SUPPORTS_ACPI
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on ARCH_SUPPORTS_ACPI
- depends on PCI
select PNP
default y if X86
help
@@ -336,7 +335,7 @@ config ACPI_CUSTOM_DSDT_FILE
See Documentation/acpi/dsdt-override.txt
Enter the full path name to the file which includes the AmlCode
- declaration.
+ or dsdt_aml_code declaration.
If unsure, don't enter a file name.
@@ -370,7 +369,7 @@ config ACPI_DEBUG
config ACPI_PCI_SLOT
bool "PCI slot detection driver"
- depends on SYSFS
+ depends on SYSFS && PCI
help
This driver creates entries in /sys/bus/pci/slots/ for all PCI
slots in the system. This can help correlate PCI bus addresses,
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index edc039313cd6..7c6afc111d76 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -39,7 +39,7 @@ acpi-y += processor_core.o
acpi-$(CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC) += processor_pdc.o
acpi-y += ec.o
acpi-$(CONFIG_ACPI_DOCK) += dock.o
-acpi-y += pci_root.o pci_link.o pci_irq.o
+acpi-$(CONFIG_PCI) += pci_root.o pci_link.o pci_irq.o
obj-$(CONFIG_ACPI_MCFG) += pci_mcfg.o
acpi-y += acpi_lpss.o acpi_apd.o
acpi-y += acpi_platform.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 2664452fa112..ddf598ae8b6b 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -166,6 +166,11 @@ static const struct apd_device_desc thunderx2_i2c_desc = {
.setup = acpi_apd_setup,
.fixed_clk_rate = 125000000,
};
+
+static const struct apd_device_desc hip08_spi_desc = {
+ .setup = acpi_apd_setup,
+ .fixed_clk_rate = 250000000,
+};
#endif
#else
@@ -234,6 +239,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = {
{ "CAV9007", APD_ADDR(thunderx2_i2c_desc) },
{ "HISI02A1", APD_ADDR(hip07_i2c_desc) },
{ "HISI02A2", APD_ADDR(hip08_i2c_desc) },
+ { "HISI0173", APD_ADDR(hip08_spi_desc) },
#endif
{ }
};
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index b9bda06d344d..5f94c35d165f 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -673,12 +673,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
* have _PS0 and _PS3 without _PSC (and no power resources), so
* acpi_bus_init_power() will assume that the BIOS has put them into D0.
*/
- ret = acpi_device_fix_up_power(adev);
- if (ret) {
- /* Skip the device, but continue the namespace scan. */
- ret = 0;
- goto err_out;
- }
+ acpi_device_fix_up_power(adev);
adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties);
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index b14621da5413..59700433a96e 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -77,13 +77,13 @@ acpi-y += \
hwacpi.o \
hwesleep.o \
hwgpe.o \
- hwpci.o \
hwregs.o \
hwsleep.o \
hwvalid.o \
hwxface.o \
hwxfsleep.o
+acpi-$(CONFIG_PCI) += hwpci.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
acpi-y += \
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 1e6204518496..87d6eb01beaf 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -172,11 +172,7 @@ ACPI_GLOBAL(u8, acpi_gbl_disable_mem_tracking);
*
****************************************************************************/
-#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
#define NUM_PREDEFINED_NAMES 10
-#else
-#define NUM_PREDEFINED_NAMES 9
-#endif
ACPI_GLOBAL(struct acpi_namespace_node, acpi_gbl_root_node_struct);
ACPI_GLOBAL(struct acpi_namespace_node *, acpi_gbl_root_node);
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 43ce67a9da1f..ef99e2fc37f8 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -106,11 +106,20 @@ acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
void *context);
+#ifdef ACPI_PCI_CONFIGURED
/*
* hwpci - PCI configuration support
*/
acpi_status
acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id,
acpi_handle root_pci_device, acpi_handle pci_region);
+#else
+static inline acpi_status
+acpi_hw_derive_pci_id(struct acpi_pci_id *pci_id, acpi_handle root_pci_device,
+ acpi_handle pci_region)
+{
+ return AE_SUPPORT;
+}
+#endif
#endif /* __ACHWARE_H__ */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index bbb3b4d1e796..9bd25f36c608 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -34,6 +34,7 @@
#define ACPI_NS_TEMPORARY 0x0040
#define ACPI_NS_OVERRIDE_IF_FOUND 0x0080
#define ACPI_NS_EARLY_INIT 0x0100
+#define ACPI_NS_PREFIX_MUST_EXIST 0x0200
/* Flags for acpi_ns_walk_namespace */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index acf27156dbd4..14be32961b4c 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -60,6 +60,8 @@ struct acpi_walk_state {
struct acpi_parse_state parser_state; /* Current state of parser */
u32 prev_arg_types;
u32 arg_count; /* push for fixed or var args */
+ u16 method_nesting_depth;
+ u8 method_is_nested;
struct acpi_namespace_node arguments[ACPI_METHOD_NUM_ARGS]; /* Control method arguments */
struct acpi_namespace_node local_variables[ACPI_METHOD_NUM_LOCALS]; /* Control method locals */
@@ -74,7 +76,8 @@ struct acpi_walk_state {
struct acpi_namespace_node *method_call_node; /* Called method Node */
union acpi_parse_object *method_call_op; /* method_call Op if running a method */
union acpi_operand_object *method_desc; /* Method descriptor if running a method */
- struct acpi_namespace_node *method_node; /* Method node if running a method. */
+ struct acpi_namespace_node *method_node; /* Method node if running a method */
+ char *method_pathname; /* Full pathname of running method */
union acpi_parse_object *op; /* Current parser op */
const struct acpi_opcode_info *op_info; /* Info on current opcode */
union acpi_parse_object *origin; /* Start of walk [Obsolete] */
diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c
index f2526726daf6..3eb45ea93e5e 100644
--- a/drivers/acpi/acpica/dbxface.c
+++ b/drivers/acpi/acpica/dbxface.c
@@ -24,6 +24,13 @@ acpi_db_start_command(struct acpi_walk_state *walk_state,
void acpi_db_method_end(struct acpi_walk_state *walk_state);
#endif
+#ifdef ACPI_DISASSEMBLER
+static union acpi_parse_object *acpi_db_get_display_op(struct acpi_walk_state
+ *walk_state,
+ union acpi_parse_object
+ *op);
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_db_start_command
@@ -113,6 +120,70 @@ void acpi_db_signal_break_point(struct acpi_walk_state *walk_state)
acpi_os_printf("**break** Executed AML BreakPoint opcode\n");
}
+#ifdef ACPI_DISASSEMBLER
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_db_get_display_op
+ *
+ * PARAMETERS: walk_state - Current walk
+ * op - Current executing op (from aml interpreter)
+ *
+ * RETURN: Opcode to display
+ *
+ * DESCRIPTION: Find the opcode to display during single stepping
+ *
+ ******************************************************************************/
+
+static union acpi_parse_object *acpi_db_get_display_op(struct acpi_walk_state
+ *walk_state,
+ union acpi_parse_object
+ *op)
+{
+ union acpi_parse_object *display_op;
+ union acpi_parse_object *parent_op;
+
+ display_op = op;
+ parent_op = op->common.parent;
+ if (parent_op) {
+ if ((walk_state->control_state) &&
+ (walk_state->control_state->common.state ==
+ ACPI_CONTROL_PREDICATE_EXECUTING)) {
+ /*
+ * We are executing the predicate of an IF or WHILE statement
+ * Search upwards for the containing IF or WHILE so that the
+ * entire predicate can be displayed.
+ */
+ while (parent_op) {
+ if ((parent_op->common.aml_opcode == AML_IF_OP)
+ || (parent_op->common.aml_opcode ==
+ AML_WHILE_OP)) {
+ display_op = parent_op;
+ break;
+ }
+ parent_op = parent_op->common.parent;
+ }
+ } else {
+ while (parent_op) {
+ if ((parent_op->common.aml_opcode == AML_IF_OP)
+ || (parent_op->common.aml_opcode ==
+ AML_ELSE_OP)
+ || (parent_op->common.aml_opcode ==
+ AML_SCOPE_OP)
+ || (parent_op->common.aml_opcode ==
+ AML_METHOD_OP)
+ || (parent_op->common.aml_opcode ==
+ AML_WHILE_OP)) {
+ break;
+ }
+ display_op = parent_op;
+ parent_op = parent_op->common.parent;
+ }
+ }
+ }
+ return display_op;
+}
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_db_single_step
@@ -134,8 +205,6 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
union acpi_parse_object *next;
acpi_status status = AE_OK;
u32 original_debug_level;
- union acpi_parse_object *display_op;
- union acpi_parse_object *parent_op;
u32 aml_offset;
ACPI_FUNCTION_ENTRY();
@@ -222,51 +291,12 @@ acpi_db_single_step(struct acpi_walk_state *walk_state,
next = op->common.next;
op->common.next = NULL;
- display_op = op;
- parent_op = op->common.parent;
- if (parent_op) {
- if ((walk_state->control_state) &&
- (walk_state->control_state->common.state ==
- ACPI_CONTROL_PREDICATE_EXECUTING)) {
- /*
- * We are executing the predicate of an IF or WHILE statement
- * Search upwards for the containing IF or WHILE so that the
- * entire predicate can be displayed.
- */
- while (parent_op) {
- if ((parent_op->common.aml_opcode ==
- AML_IF_OP)
- || (parent_op->common.aml_opcode ==
- AML_WHILE_OP)) {
- display_op = parent_op;
- break;
- }
- parent_op = parent_op->common.parent;
- }
- } else {
- while (parent_op) {
- if ((parent_op->common.aml_opcode ==
- AML_IF_OP)
- || (parent_op->common.aml_opcode ==
- AML_ELSE_OP)
- || (parent_op->common.aml_opcode ==
- AML_SCOPE_OP)
- || (parent_op->common.aml_opcode ==
- AML_METHOD_OP)
- || (parent_op->common.aml_opcode ==
- AML_WHILE_OP)) {
- break;
- }
- display_op = parent_op;
- parent_op = parent_op->common.parent;
- }
- }
- }
-
/* Now we can disassemble and display it */
#ifdef ACPI_DISASSEMBLER
- acpi_dm_disassemble(walk_state, display_op, ACPI_UINT32_MAX);
+ acpi_dm_disassemble(walk_state,
+ acpi_db_get_display_op(walk_state, op),
+ ACPI_UINT32_MAX);
#else
/*
* The AML Disassembler is not configured - at least we can
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index dd4deb678d13..c1a4d02fafd5 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -532,6 +532,9 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
goto cleanup;
}
+ next_walk_state->method_nesting_depth =
+ this_walk_state->method_nesting_depth + 1;
+
/*
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
@@ -549,6 +552,17 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
method_node->name.ascii, next_walk_state));
+ this_walk_state->method_pathname =
+ acpi_ns_get_normalized_pathname(method_node, TRUE);
+ this_walk_state->method_is_nested = TRUE;
+
+ /* Optional object evaluation log */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
+ "%-26s: %*s%s\n", " Nested method call",
+ next_walk_state->method_nesting_depth * 3, " ",
+ &this_walk_state->method_pathname[1]));
+
/* Invoke an internal method if necessary */
if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 6992c8d5ab43..6a9cc613adaa 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -18,7 +18,6 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
-#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ds_build_internal_object
@@ -299,8 +298,6 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
-#endif /* ACPI_NO_METHOD_EXECUTION */
-
/*******************************************************************************
*
* FUNCTION: acpi_ds_init_object_from_op
@@ -404,9 +401,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Truncate value if we are executing from a 32-bit ACPI table */
-#ifndef ACPI_NO_METHOD_EXECUTION
(void)acpi_ex_truncate_for32bit_table(obj_desc);
-#endif
break;
case AML_REVISION_OP:
@@ -428,7 +423,6 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
obj_desc->integer.value = op->common.value.integer;
-#ifndef ACPI_NO_METHOD_EXECUTION
if (acpi_ex_truncate_for32bit_table(obj_desc)) {
/* Warn if we found a 64-bit constant in a 32-bit table */
@@ -439,7 +433,6 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
value.integer),
(u32)obj_desc->integer.value));
}
-#endif
break;
default:
@@ -477,7 +470,6 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
((u32)opcode) - AML_FIRST_LOCAL_OP;
obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
-#ifndef ACPI_NO_METHOD_EXECUTION
status =
acpi_ds_method_data_get_node(ACPI_REFCLASS_LOCAL,
obj_desc->reference.
@@ -487,7 +479,6 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
acpi_namespace_node,
&obj_desc->reference.
object));
-#endif
break;
case AML_TYPE_METHOD_ARGUMENT:
@@ -498,7 +489,6 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
((u32)opcode) - AML_FIRST_ARG_OP;
obj_desc->reference.class = ACPI_REFCLASS_ARG;
-#ifndef ACPI_NO_METHOD_EXECUTION
status = acpi_ds_method_data_get_node(ACPI_REFCLASS_ARG,
obj_desc->
reference.value,
@@ -509,7 +499,6 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
&obj_desc->
reference.
object));
-#endif
break;
default: /* Object name or Debug object */
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
index d703a5594a02..584853385268 100644
--- a/drivers/acpi/acpica/dspkginit.c
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -152,6 +152,32 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
*/
for (i = 0; arg && (i < element_count); i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ if (!arg->common.node) {
+ /*
+ * This is the case where an expression has returned a value.
+ * The use of expressions (term_args) within individual
+ * package elements is not supported by the AML interpreter,
+ * even though the ASL grammar supports it. Example:
+ *
+ * Name (INT1, 0x1234)
+ *
+ * Name (PKG3, Package () {
+ * Add (INT1, 0xAAAA0000)
+ * })
+ *
+ * 1) No known AML interpreter supports this type of construct
+ * 2) This fixes a fault if the construct is encountered
+ */
+ ACPI_EXCEPTION((AE_INFO, AE_SUPPORT,
+ "Expressions within package elements are not supported"));
+
+ /* Cleanup the return object, it is not needed */
+
+ acpi_ut_remove_reference(walk_state->results->
+ results.obj_desc[0]);
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
if (arg->common.node->type == ACPI_TYPE_METHOD) {
/*
* A method reference "looks" to the parser to be a method
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 8d1b75400515..fb9ed5e1da89 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -57,7 +57,6 @@ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state)
}
}
-#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ds_do_implicit_return
@@ -401,7 +400,6 @@ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state)
walk_state->num_operands = 0;
return_VOID;
}
-#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index d06c41446282..e2ef09643d50 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -73,12 +73,10 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number)
/* Execution pass */
-#ifndef ACPI_NO_METHOD_EXECUTION
walk_state->parse_flags |= ACPI_PARSE_EXECUTE |
ACPI_PARSE_DELETE_TREE;
walk_state->descending_callback = acpi_ds_exec_begin_op;
walk_state->ascending_callback = acpi_ds_exec_end_op;
-#endif
break;
default:
@@ -364,7 +362,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
/* Initialize the op */
-#if (defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY))
+#ifdef ACPI_CONSTANT_EVAL_ONLY
op->named.path = path;
#endif
@@ -422,7 +420,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
object_type = walk_state->op_info->object_type;
-#ifndef ACPI_NO_METHOD_EXECUTION
if (walk_state->op_info->flags & AML_FIELD) {
/*
* If we are executing a method, do not create any namespace objects
@@ -466,7 +463,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
}
}
}
-#endif
if (op->common.aml_opcode == AML_NAME_OP) {
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index b4685bb5f071..9a309f5c4de8 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -296,6 +296,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
}
#endif
+ /*
+ * For name creation opcodes, the full namepath prefix must
+ * exist, except for the final (new) nameseg.
+ */
+ if (walk_state->op_info->flags & AML_NAMED) {
+ flags |= ACPI_NS_PREFIX_MUST_EXIST;
+ }
+
/* Add new entry or lookup existing entry */
status =
@@ -363,10 +371,8 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
struct acpi_namespace_node *node;
union acpi_parse_object *arg;
struct acpi_namespace_node *new_node;
-#ifndef ACPI_NO_METHOD_EXECUTION
u32 i;
u8 region_space;
-#endif
ACPI_FUNCTION_TRACE(ds_load2_end_op);
@@ -453,7 +459,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
arg = op->common.value.arg;
switch (walk_state->op_info->type) {
-#ifndef ACPI_NO_METHOD_EXECUTION
case AML_TYPE_CREATE_FIELD:
/*
@@ -550,12 +555,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
break;
-#endif /* ACPI_NO_METHOD_EXECUTION */
case AML_TYPE_NAMED_COMPLEX:
switch (op->common.aml_opcode) {
-#ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP:
case AML_DATA_REGION_OP:
@@ -643,8 +646,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
}
break;
-#endif /* ACPI_NO_METHOD_EXECUTION */
-
default:
/* All NAMED_COMPLEX opcodes must be handled above */
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index c879380e5ce1..4c1ec202d5ab 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -530,7 +530,7 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
/* Init the method args/local */
-#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
+#ifndef ACPI_CONSTANT_EVAL_ONLY
acpi_ds_method_data_init(walk_state);
#endif
diff --git a/drivers/acpi/acpica/evhandler.c b/drivers/acpi/acpica/evhandler.c
index d319ee33d040..4ed1e67db6be 100644
--- a/drivers/acpi/acpica/evhandler.c
+++ b/drivers/acpi/acpica/evhandler.c
@@ -364,25 +364,25 @@ acpi_ev_install_space_handler(struct acpi_namespace_node *node,
handler = acpi_ex_system_io_space_handler;
setup = acpi_ev_io_space_region_setup;
break;
-
+#ifdef ACPI_PCI_CONFIGURED
case ACPI_ADR_SPACE_PCI_CONFIG:
handler = acpi_ex_pci_config_space_handler;
setup = acpi_ev_pci_config_region_setup;
break;
-
+#endif
case ACPI_ADR_SPACE_CMOS:
handler = acpi_ex_cmos_space_handler;
setup = acpi_ev_cmos_region_setup;
break;
-
+#ifdef ACPI_PCI_CONFIGURED
case ACPI_ADR_SPACE_PCI_BAR_TARGET:
handler = acpi_ex_pci_bar_space_handler;
setup = acpi_ev_pci_bar_region_setup;
break;
-
+#endif
case ACPI_ADR_SPACE_DATA_TABLE:
handler = acpi_ex_data_table_space_handler;
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 98de48481776..1a70b80cc406 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -323,7 +323,7 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
/* hex_length: 2 ascii hex chars per data byte */
- hex_length = ACPI_MUL_2(data_width);
+ hex_length = (data_width * 2);
for (i = 0, j = (hex_length - 1); i < hex_length; i++, j--) {
/* Get one hex digit, most significant digits first */
@@ -364,7 +364,8 @@ acpi_ex_convert_to_ascii(u64 integer, u16 base, u8 *string, u8 data_width)
*
* RETURN: Status
*
- * DESCRIPTION: Convert an ACPI Object to a string
+ * DESCRIPTION: Convert an ACPI Object to a string. Supports both implicit
+ * and explicit conversions and related rules.
*
******************************************************************************/
@@ -393,9 +394,11 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
switch (type) {
case ACPI_EXPLICIT_CONVERT_DECIMAL:
-
- /* Make room for maximum decimal number */
-
+ /*
+ * From to_decimal_string, integer source.
+ *
+ * Make room for the maximum decimal number size
+ */
string_length = ACPI_MAX_DECIMAL_DIGITS;
base = 10;
break;
@@ -440,8 +443,10 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
switch (type) {
case ACPI_EXPLICIT_CONVERT_DECIMAL: /* Used by to_decimal_string */
/*
- * From ACPI: "If Data is a buffer, it is converted to a string of
- * decimal values separated by commas."
+ * Explicit conversion from the to_decimal_string ASL operator.
+ *
+ * From ACPI: "If the input is a buffer, it is converted to a
+ * a string of decimal values separated by commas."
*/
base = 10;
@@ -462,20 +467,29 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
case ACPI_IMPLICIT_CONVERT_HEX:
/*
+ * Implicit buffer-to-string conversion
+ *
* From the ACPI spec:
- *"The entire contents of the buffer are converted to a string of
+ * "The entire contents of the buffer are converted to a string of
* two-character hexadecimal numbers, each separated by a space."
+ *
+ * Each hex number is prefixed with 0x (11/2018)
*/
separator = ' ';
- string_length = (obj_desc->buffer.length * 3);
+ string_length = (obj_desc->buffer.length * 5);
break;
- case ACPI_EXPLICIT_CONVERT_HEX: /* Used by to_hex_string */
+ case ACPI_EXPLICIT_CONVERT_HEX:
/*
+ * Explicit conversion from the to_hex_string ASL operator.
+ *
* From ACPI: "If Data is a buffer, it is converted to a string of
* hexadecimal values separated by commas."
+ *
+ * Each hex number is prefixed with 0x (11/2018)
*/
- string_length = (obj_desc->buffer.length * 3);
+ separator = ',';
+ string_length = (obj_desc->buffer.length * 5);
break;
default:
@@ -504,10 +518,21 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* (separated by commas or spaces)
*/
for (i = 0; i < obj_desc->buffer.length; i++) {
+ if (base == 16) {
+
+ /* Emit 0x prefix for explict/implicit hex conversion */
+
+ *new_buf++ = '0';
+ *new_buf++ = 'x';
+ }
+
new_buf += acpi_ex_convert_to_ascii((u64) obj_desc->
buffer.pointer[i],
base, new_buf, 1);
- *new_buf++ = separator; /* each separated by a comma or space */
+
+ /* Each digit is separated by either a comma or space */
+
+ *new_buf++ = separator;
}
/*
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index e49fa3c1321a..3304c6b1e8a7 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -15,7 +15,6 @@
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("excreate")
-#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ex_create_alias
@@ -390,7 +389,6 @@ acpi_status acpi_ex_create_power_resource(struct acpi_walk_state *walk_state)
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
-#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index d5b3efd35a5b..3a477566ba1b 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -287,9 +287,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
* NOTE: A length of zero is ok, and will create a zero-length, null
* terminated string.
*/
- while ((length < operand[0]->buffer.length) &&
- (length < operand[1]->integer.value) &&
- (operand[0]->buffer.pointer[length])) {
+ while ((length < operand[0]->buffer.length) && /* Length of input buffer */
+ (length < operand[1]->integer.value) && /* Length operand */
+ (operand[0]->buffer.pointer[length])) { /* Null terminator */
length++;
}
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 97bbfd07fcf7..2c58f5e00b1a 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -311,6 +311,7 @@ acpi_ex_system_io_space_handler(u32 function,
return_ACPI_STATUS(status);
}
+#ifdef ACPI_PCI_CONFIGURED
/*******************************************************************************
*
* FUNCTION: acpi_ex_pci_config_space_handler
@@ -387,6 +388,7 @@ acpi_ex_pci_config_space_handler(u32 function,
return_ACPI_STATUS(status);
}
+#endif
/*******************************************************************************
*
@@ -420,6 +422,7 @@ acpi_ex_cmos_space_handler(u32 function,
return_ACPI_STATUS(status);
}
+#ifdef ACPI_PCI_CONFIGURED
/*******************************************************************************
*
* FUNCTION: acpi_ex_pci_bar_space_handler
@@ -451,6 +454,7 @@ acpi_ex_pci_bar_space_handler(u32 function,
return_ACPI_STATUS(status);
}
+#endif
/*******************************************************************************
*
diff --git a/drivers/acpi/acpica/exserial.c b/drivers/acpi/acpica/exserial.c
index 0d42f30e5b25..ec61553c4483 100644
--- a/drivers/acpi/acpica/exserial.c
+++ b/drivers/acpi/acpica/exserial.c
@@ -282,14 +282,12 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
case ACPI_ADR_SPACE_SMBUS:
buffer_length = ACPI_SMBUS_BUFFER_SIZE;
- data_length = ACPI_SMBUS_DATA_SIZE;
function = ACPI_WRITE | (obj_desc->field.attribute << 16);
break;
case ACPI_ADR_SPACE_IPMI:
buffer_length = ACPI_IPMI_BUFFER_SIZE;
- data_length = ACPI_IPMI_DATA_SIZE;
function = ACPI_WRITE;
break;
@@ -310,7 +308,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
/* Add header length to get the full size of the buffer */
buffer_length += ACPI_SERIAL_HEADER_SIZE;
- data_length = source_desc->buffer.pointer[1];
function = ACPI_WRITE | (accessor_type << 16);
break;
@@ -318,20 +315,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
}
-#if 0
- OBSOLETE ?
- /* Check for possible buffer overflow */
- if (data_length > source_desc->buffer.length) {
- ACPI_ERROR((AE_INFO,
- "Length in buffer header (%u)(%u) is greater than "
- "the physical buffer length (%u) and will overflow",
- data_length, buffer_length,
- source_desc->buffer.length));
-
- return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
- }
-#endif
-
/* Create the transfer/bidirectional/return buffer */
buffer_desc = acpi_ut_create_buffer_object(buffer_length);
@@ -342,6 +325,8 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
/* Copy the input buffer data to the transfer buffer */
buffer = buffer_desc->buffer.pointer;
+ data_length = (buffer_length < source_desc->buffer.length ?
+ buffer_length : source_desc->buffer.length);
memcpy(buffer, source_desc->buffer.pointer, data_length);
/* Lock entire transaction if requested */
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 6ce307d5ce2a..bd22e27adf9b 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -34,7 +34,6 @@ ACPI_MODULE_NAME("exutils")
/* Local prototypes */
static u32 acpi_ex_digits_needed(u64 value, u32 base);
-#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ex_enter_interpreter
@@ -409,5 +408,3 @@ u8 acpi_is_valid_space_id(u8 space_id)
return (TRUE);
}
-
-#endif
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index e3f10afde5ff..75192b958544 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -267,6 +267,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
acpi_object_type this_search_type;
u32 search_parent_flag = ACPI_NS_SEARCH_PARENT;
u32 local_flags;
+ acpi_interpreter_mode local_interpreter_mode;
ACPI_FUNCTION_TRACE(ns_lookup);
@@ -506,6 +507,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
*/
this_search_type = ACPI_TYPE_ANY;
current_node = this_node;
+
while (num_segments && current_node) {
num_segments--;
if (!num_segments) {
@@ -536,6 +538,16 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
}
}
+ /* Handle opcodes that create a new name_seg via a full name_path */
+
+ local_interpreter_mode = interpreter_mode;
+ if ((flags & ACPI_NS_PREFIX_MUST_EXIST) && (num_segments > 0)) {
+
+ /* Every element of the path must exist (except for the final name_seg) */
+
+ local_interpreter_mode = ACPI_IMODE_EXECUTE;
+ }
+
/* Extract one ACPI name from the front of the pathname */
ACPI_MOVE_32_TO_32(&simple_name, path);
@@ -544,12 +556,19 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
status =
acpi_ns_search_and_enter(simple_name, walk_state,
- current_node, interpreter_mode,
+ current_node,
+ local_interpreter_mode,
this_search_type, local_flags,
&this_node);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
-
+#if !defined ACPI_ASL_COMPILER /* Note: iASL reports this error by itself, not needed here */
+ if (flags & ACPI_NS_PREFIX_MUST_EXIST) {
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR
+ "Object does not exist: %4.4s\n",
+ &simple_name);
+ }
+#endif
/* Name not found in ACPI namespace */
ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index 64ba80ede0ad..6390b7951ebf 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -104,6 +104,13 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
return_ACPI_STATUS(AE_NO_MEMORY);
}
+ /* Optional object evaluation log */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
+ "%-26s: %s (%s)\n", " Enter evaluation",
+ &info->full_pathname[1],
+ acpi_ut_get_type_name(info->node->type)));
+
/* Count the number of arguments being passed in */
info->param_count = 0;
@@ -289,6 +296,12 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
info->relative_pathname));
cleanup:
+ /* Optional object evaluation log */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
+ "%-26s: %s\n", " Exit evaluation",
+ &info->full_pathname[1]));
+
/*
* Namespace was unlocked by the handling acpi_ns* function, so we
* just free the pathname and return
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index e291bb8cd369..04bc73e82aed 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -24,7 +24,6 @@ acpi_status acpi_ns_unload_namespace(acpi_handle handle);
static acpi_status acpi_ns_delete_subtree(acpi_handle start_handle);
#endif
-#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
* FUNCTION: acpi_ns_load_table
@@ -297,4 +296,3 @@ acpi_status acpi_ns_unload_namespace(acpi_handle handle)
return_ACPI_STATUS(status);
}
#endif
-#endif
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index c9ef4949869f..488ff39d86f7 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -107,8 +107,20 @@ acpi_ns_execute_table(u32 table_index, struct acpi_namespace_node *start_node)
goto cleanup;
}
+ /* Optional object evaluation log */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
+ "%-26s: (Definition Block level)\n",
+ "Module-level evaluation"));
+
status = acpi_ps_execute_table(info);
+ /* Optional object evaluation log */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
+ "%-26s: (Definition Block level)\n",
+ "Module-level complete"));
+
cleanup:
if (info) {
ACPI_FREE(info->full_pathname);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 0fa01c9e353e..e00d1af6fa80 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -428,7 +428,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
parser_state = &walk_state->parser_state;
walk_state->arg_types = 0;
-#if (!defined (ACPI_NO_METHOD_EXECUTION) && !defined (ACPI_CONSTANT_EVAL_ONLY))
+#ifndef ACPI_CONSTANT_EVAL_ONLY
if (walk_state->walk_type & ACPI_WALK_METHOD_RESTART) {
@@ -508,7 +508,8 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
*/
if ((walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)
- && status == AE_ALREADY_EXISTS) {
+ && ((status == AE_ALREADY_EXISTS)
+ || (status == AE_NOT_FOUND))) {
status = AE_OK;
}
if (status == AE_CTRL_PARSE_CONTINUE) {
@@ -537,10 +538,7 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
* the scope op because the parse failure indicates that
* the device may not exist.
*/
- ACPI_ERROR((AE_INFO,
- "Skip parsing opcode %s",
- acpi_ps_get_opcode_name
- (walk_state->opcode)));
+ ACPI_INFO(("Skipping parse of AML opcode: %s (0x%4.4X)", acpi_ps_get_opcode_name(walk_state->opcode), walk_state->opcode));
/*
* Determine the opcode length before skipping the opcode.
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index 3138e7a00da8..e1fd819a2955 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -600,8 +600,7 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
* because there could be correct AML beyond the parts that caused
* the runtime error.
*/
- ACPI_ERROR((AE_INFO,
- "Ignore error and continue table load"));
+ ACPI_INFO(("Ignoring error and continuing table load"));
return_ACPI_STATUS(AE_OK);
}
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index a16a6ea5ae02..65603473b6cb 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -479,6 +479,21 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
"Completed one call to walk loop, %s State=%p\n",
acpi_format_exception(status), walk_state));
+ if (walk_state->method_pathname && walk_state->method_is_nested) {
+
+ /* Optional object evaluation log */
+
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
+ "%-26s: %*s%s\n",
+ " Exit nested method",
+ (walk_state->
+ method_nesting_depth + 1) * 3,
+ " ",
+ &walk_state->method_pathname[1]));
+
+ ACPI_FREE(walk_state->method_pathname);
+ walk_state->method_is_nested = FALSE;
+ }
if (status == AE_CTRL_TRANSFER) {
/*
* A method call was detected.
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index f26bcbbc2c27..5743b22399a0 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -147,6 +147,9 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
goto cleanup;
}
+ walk_state->method_pathname = info->full_pathname;
+ walk_state->method_is_nested = FALSE;
+
if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
}
@@ -267,6 +270,9 @@ acpi_status acpi_ps_execute_table(struct acpi_evaluate_info *info)
goto cleanup;
}
+ walk_state->method_pathname = info->full_pathname;
+ walk_state->method_is_nested = FALSE;
+
if (info->obj_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) {
walk_state->parse_flags |= ACPI_PARSE_MODULE_LEVEL;
}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index fa674e9b0e62..f8c5b49344df 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -83,10 +83,7 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
{"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
{"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
-
-#if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
{"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
-#endif
/* Table terminator */
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index ed73d79b500e..afaadc73196b 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -62,7 +62,8 @@ u8 acpi_ut_is_aml_table(struct acpi_table_header *table)
if (ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) ||
ACPI_COMPARE_NAME(table->signature, ACPI_SIG_PSDT) ||
ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT) ||
- ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT)) {
+ ACPI_COMPARE_NAME(table->signature, ACPI_SIG_OSDT) ||
+ ACPI_IS_OEM_SIG(table->signature)) {
return (TRUE);
}
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 64b63c81994b..902a47463abf 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -70,6 +70,8 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2016", NULL, 0, ACPI_OSI_WIN_10_RS1}, /* Windows 10 version 1607 - Added 12/2017 */
{"Windows 2017", NULL, 0, ACPI_OSI_WIN_10_RS2}, /* Windows 10 version 1703 - Added 12/2017 */
{"Windows 2017.2", NULL, 0, ACPI_OSI_WIN_10_RS3}, /* Windows 10 version 1709 - Added 02/2018 */
+ {"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */
+ {"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */
/* Feature Group Strings */
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index b38737c83a24..fcccbfdbdd1a 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -607,17 +607,7 @@ static int available_error_type_show(struct seq_file *m, void *v)
return 0;
}
-static int available_error_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, available_error_type_show, NULL);
-}
-
-static const struct file_operations available_error_type_fops = {
- .open = available_error_type_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(available_error_type);
static int error_type_get(void *data, u64 *val)
{
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 3c5ea7cb693e..9953e50667ec 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -1035,7 +1035,7 @@ skip:
CPER_SECTION_TYPE_MCE) == 0)
record->type = PSTORE_TYPE_MCE;
else
- record->type = PSTORE_TYPE_UNKNOWN;
+ record->type = PSTORE_TYPE_MAX;
if (rcd->hdr.validation_bits & CPER_VALID_TIMESTAMP)
record->time.tv_sec = rcd->hdr.timestamp;
@@ -1176,7 +1176,6 @@ static int __init erst_init(void)
"Error Record Serialization Table (ERST) support is initialized.\n");
buf = kmalloc(erst_erange.size, GFP_KERNEL);
- spin_lock_init(&erst_info.buf_lock);
if (buf) {
erst_info.buf = buf + sizeof(struct cper_pstore_record);
erst_info.bufsize = erst_erange.size -
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 02c6fd9caff7..f008ba7c9ced 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -691,6 +691,8 @@ static void __ghes_panic(struct ghes *ghes)
{
__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
+ ghes_clear_estatus(ghes);
+
/* reboot to log the error! */
if (!panic_timeout)
panic_timeout = ghes_panic_timeout;
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 2a361e22d38d..2159ad9bf9ed 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
*/
static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
{
- struct acpi_iort_node *node, *msi_parent;
+ struct acpi_iort_node *node, *msi_parent = NULL;
struct fwnode_handle *iort_fwnode;
struct acpi_iort_its_group *its;
int i;
@@ -1435,8 +1435,14 @@ dev_put:
return ret;
}
-static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
+#ifdef CONFIG_PCI
+static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
{
+ static bool acs_enabled __initdata;
+
+ if (acs_enabled)
+ return;
+
if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
struct acpi_iort_node *parent;
struct acpi_iort_id_mapping *map;
@@ -1458,13 +1464,15 @@ static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
if ((parent->type == ACPI_IORT_NODE_SMMU) ||
(parent->type == ACPI_IORT_NODE_SMMU_V3)) {
pci_request_acs();
- return true;
+ acs_enabled = true;
+ return;
}
}
}
-
- return false;
}
+#else
+static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
+#endif
static void __init iort_init_platform_devices(void)
{
@@ -1472,7 +1480,6 @@ static void __init iort_init_platform_devices(void)
struct acpi_table_iort *iort;
struct fwnode_handle *fwnode;
int i, ret;
- bool acs_enabled = false;
const struct iort_dev_config *ops;
/*
@@ -1493,8 +1500,7 @@ static void __init iort_init_platform_devices(void)
return;
}
- if (!acs_enabled)
- acs_enabled = iort_enable_acs(iort_node);
+ iort_enable_acs(iort_node);
ops = iort_get_dev_cfg(iort_node);
if (ops) {
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bb3d96dea6db..99d820a693a8 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1237,7 +1237,6 @@ static int __init acpi_init(void)
acpi_kobj = NULL;
}
- init_acpi_device_notify();
result = acpi_bus_init();
if (result) {
disable_acpi();
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d4e5610e09c5..9d66a47d32fb 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1034,6 +1034,18 @@ void acpi_ec_unblock_transactions(void)
acpi_ec_start(first_ec, true);
}
+void acpi_ec_mark_gpe_for_wake(void)
+{
+ if (first_ec && !ec_no_wakeup)
+ acpi_mark_gpe_for_wake(NULL, first_ec->gpe);
+}
+
+void acpi_ec_set_gpe_wake_mask(u8 action)
+{
+ if (first_ec && !ec_no_wakeup)
+ acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
+}
+
void acpi_ec_dispatch_gpe(void)
{
if (first_ec)
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 12ba2bee8789..edd10b3c7ec8 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -296,7 +296,7 @@ int acpi_unbind_one(struct device *dev)
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
-static int acpi_platform_notify(struct device *dev)
+static int acpi_device_notify(struct device *dev)
{
struct acpi_bus_type *type = acpi_get_bus_type(dev);
struct acpi_device *adev;
@@ -343,7 +343,7 @@ static int acpi_platform_notify(struct device *dev)
return ret;
}
-static int acpi_platform_notify_remove(struct device *dev)
+static int acpi_device_notify_remove(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct acpi_bus_type *type;
@@ -361,12 +361,17 @@ static int acpi_platform_notify_remove(struct device *dev)
return 0;
}
-void __init init_acpi_device_notify(void)
+int acpi_platform_notify(struct device *dev, enum kobject_action action)
{
- if (platform_notify || platform_notify_remove) {
- printk(KERN_ERR PREFIX "Can't use platform_notify\n");
- return;
+ switch (action) {
+ case KOBJ_ADD:
+ acpi_device_notify(dev);
+ break;
+ case KOBJ_REMOVE:
+ acpi_device_notify_remove(dev);
+ break;
+ default:
+ break;
}
- platform_notify = acpi_platform_notify;
- platform_notify_remove = acpi_platform_notify_remove;
+ return 0;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 530a3f675490..7e6952edb5b0 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -23,10 +23,14 @@
int early_acpi_osi_init(void);
int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
-void init_acpi_device_notify(void);
int acpi_scan_init(void);
+#ifdef CONFIG_PCI
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
+#else
+static inline void acpi_pci_root_init(void) {}
+static inline void acpi_pci_link_init(void) {}
+#endif
void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
@@ -188,6 +192,8 @@ int acpi_ec_ecdt_probe(void);
int acpi_ec_dsdt_probe(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
+void acpi_ec_mark_gpe_for_wake(void);
+void acpi_ec_set_gpe_wake_mask(u8 action);
void acpi_ec_dispatch_gpe(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 14d9f5bea015..5912d30020c7 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1308,7 +1308,7 @@ static ssize_t scrub_store(struct device *dev,
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
- rc = acpi_nfit_ars_rescan(acpi_desc, 0);
+ rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
}
device_unlock(dev);
if (rc)
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index b2a16ed7e81a..efd2ce099893 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -74,6 +74,13 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
* a BIOS workaround.
*/
{"Linux-Lenovo-NV-HDMI-Audio", true},
+ /*
+ * Linux-HPI-Hybrid-Graphics is used by BIOS to enable dGPU to
+ * output video directly to external monitors on HP Inc. mobile
+ * workstations as Nvidia and AMD VGA drivers provide limited
+ * hybrid graphics supports.
+ */
+ {"Linux-HPI-Hybrid-Graphics", true},
};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b48874b8e1ea..f29e427d0d1d 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -769,6 +769,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
return AE_OK;
}
+#ifdef CONFIG_PCI
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
u64 *value, u32 width)
@@ -827,6 +828,7 @@ acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
return (result ? AE_ERROR : AE_OK);
}
+#endif
static void acpi_os_execute_deferred(struct work_struct *work)
{
diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c
index 6fa9c2a4cfe9..ca707f5b521d 100644
--- a/drivers/acpi/reboot.c
+++ b/drivers/acpi/reboot.c
@@ -4,11 +4,35 @@
#include <linux/acpi.h>
#include <acpi/reboot.h>
+#ifdef CONFIG_PCI
+static void acpi_pci_reboot(struct acpi_generic_address *rr, u8 reset_value)
+{
+ unsigned int devfn;
+ struct pci_bus *bus0;
+
+ /* The reset register can only live on bus 0. */
+ bus0 = pci_find_bus(0, 0);
+ if (!bus0)
+ return;
+ /* Form PCI device/function pair. */
+ devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
+ (rr->address >> 16) & 0xffff);
+ pr_debug("Resetting with ACPI PCI RESET_REG.\n");
+ /* Write the value that resets us. */
+ pci_bus_write_config_byte(bus0, devfn,
+ (rr->address & 0xffff), reset_value);
+}
+#else
+static inline void acpi_pci_reboot(struct acpi_generic_address *rr,
+ u8 reset_value)
+{
+ pr_warn_once("PCI configuration space access is not supported\n");
+}
+#endif
+
void acpi_reboot(void)
{
struct acpi_generic_address *rr;
- struct pci_bus *bus0;
- unsigned int devfn;
u8 reset_value;
if (acpi_disabled)
@@ -33,17 +57,7 @@ void acpi_reboot(void)
* on a device on bus 0. */
switch (rr->space_id) {
case ACPI_ADR_SPACE_PCI_CONFIG:
- /* The reset register can only live on bus 0. */
- bus0 = pci_find_bus(0, 0);
- if (!bus0)
- return;
- /* Form PCI device/function pair. */
- devfn = PCI_DEVFN((rr->address >> 32) & 0xffff,
- (rr->address >> 16) & 0xffff);
- printk(KERN_DEBUG "Resetting with ACPI PCI RESET_REG.\n");
- /* Write the value that resets us. */
- pci_bus_write_config_byte(bus0, devfn,
- (rr->address & 0xffff), reset_value);
+ acpi_pci_reboot(rr, reset_value);
break;
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b75ae34ed188..5efd4219f112 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1546,6 +1546,7 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
{"BSG1160", },
{"INT33FE", },
+ {"INT3515", },
{}
};
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 754d59f95500..403c4ff15349 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -940,6 +940,8 @@ static int lps0_device_attach(struct acpi_device *adev,
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
bitmask);
+
+ acpi_ec_mark_gpe_for_wake();
} else {
acpi_handle_debug(adev->handle,
"_DSM function 0 evaluation failed\n");
@@ -968,16 +970,23 @@ static int acpi_s2idle_prepare(void)
if (lps0_device_handle) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+
+ acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE);
}
if (acpi_sci_irq_valid())
enable_irq_wake(acpi_sci_irq);
+ /* Change the configuration of GPEs to avoid spurious wakeup. */
+ acpi_enable_all_wakeup_gpes();
+ acpi_os_wait_events_complete();
return 0;
}
static void acpi_s2idle_wake(void)
{
+ if (!lps0_device_handle)
+ return;
if (pm_debug_messages_on)
lpi_check_constraints();
@@ -996,8 +1005,7 @@ static void acpi_s2idle_wake(void)
* takes too much time for EC wakeup events to survive, so look
* for them now.
*/
- if (lps0_device_handle)
- acpi_ec_dispatch_gpe();
+ acpi_ec_dispatch_gpe();
}
}
@@ -1017,10 +1025,14 @@ static void acpi_s2idle_sync(void)
static void acpi_s2idle_restore(void)
{
+ acpi_enable_all_runtime_gpes();
+
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
if (lps0_device_handle) {
+ acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE);
+
acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 9d52743080a4..c336784d0bcb 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -148,6 +148,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
}
switch (table->baud_rate) {
+ case 0:
+ /*
+ * SPCR 1.04 defines 0 as a preconfigured state of UART.
+ * Assume firmware or bootloader configures console correctly.
+ */
+ baud_rate = 0;
+ break;
case 3:
baud_rate = 9600;
break;
@@ -196,6 +203,10 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* UART so don't attempt to change to the baud rate state
* in the table because driver cannot calculate the dividers
*/
+ baud_rate = 0;
+ }
+
+ if (!baud_rate) {
snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
table->serial_port.address);
} else {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 61203eebf3a1..48eabb6c2d4f 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -712,6 +712,11 @@ acpi_os_physical_table_override(struct acpi_table_header *existing_table,
table_length);
}
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+static void *amlcode __attribute__ ((weakref("AmlCode")));
+static void *dsdt_amlcode __attribute__ ((weakref("dsdt_aml_code")));
+#endif
+
acpi_status
acpi_os_table_override(struct acpi_table_header *existing_table,
struct acpi_table_header **new_table)
@@ -722,8 +727,11 @@ acpi_os_table_override(struct acpi_table_header *existing_table,
*new_table = NULL;
#ifdef CONFIG_ACPI_CUSTOM_DSDT
- if (strncmp(existing_table->signature, "DSDT", 4) == 0)
- *new_table = (struct acpi_table_header *)AmlCode;
+ if (!strncmp(existing_table->signature, "DSDT", 4)) {
+ *new_table = (struct acpi_table_header *)&amlcode;
+ if (!(*new_table))
+ *new_table = (struct acpi_table_header *)&dsdt_amlcode;
+ }
#endif
if (*new_table != NULL)
acpi_table_taint(existing_table);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index cb30a524d16d..9f1000d2a40c 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2974,7 +2974,6 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
- t->buffer->allow_user_free = 0;
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
@@ -3510,14 +3509,18 @@ static int binder_thread_write(struct binder_proc *proc,
buffer = binder_alloc_prepare_to_free(&proc->alloc,
data_ptr);
- if (buffer == NULL) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
- proc->pid, thread->pid, (u64)data_ptr);
- break;
- }
- if (!buffer->allow_user_free) {
- binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
- proc->pid, thread->pid, (u64)data_ptr);
+ if (IS_ERR_OR_NULL(buffer)) {
+ if (PTR_ERR(buffer) == -EPERM) {
+ binder_user_error(
+ "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+ proc->pid, thread->pid,
+ (u64)data_ptr);
+ } else {
+ binder_user_error(
+ "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+ proc->pid, thread->pid,
+ (u64)data_ptr);
+ }
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 64fd96eada31..030c98f35cca 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -151,16 +151,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
else {
/*
* Guard against user threads attempting to
- * free the buffer twice
+ * free the buffer when in use by kernel or
+ * after it's already been freed.
*/
- if (buffer->free_in_progress) {
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
- alloc->pid, current->pid,
- (u64)user_ptr);
- return NULL;
- }
- buffer->free_in_progress = 1;
+ if (!buffer->allow_user_free)
+ return ERR_PTR(-EPERM);
+ buffer->allow_user_free = 0;
return buffer;
}
}
@@ -500,7 +496,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
- buffer->free_in_progress = 0;
+ buffer->allow_user_free = 0;
binder_insert_allocated_buffer_locked(alloc, buffer);
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 9ef64e563856..fb3238c74c8a 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -50,8 +50,7 @@ struct binder_buffer {
unsigned free:1;
unsigned allow_user_free:1;
unsigned async_transaction:1;
- unsigned free_in_progress:1;
- unsigned debug_id:28;
+ unsigned debug_id:29;
struct binder_transaction *transaction;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a7f5202a4815..b8c3f9e6af89 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 01306c018398..938ed513b070 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -919,8 +919,6 @@ static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct request_queue *q = qc->scsicmd->device->request_queue;
- unsigned long flags;
WARN_ON(!ap->ops->error_handler);
@@ -932,9 +930,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
- spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index d071ab6864a8..26817fd91700 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -26,16 +26,17 @@
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <scsi/scsi_host.h>
#include <mach/palmld.h>
#define DRV_NAME "pata_palmld"
-static struct gpio palmld_hdd_gpios[] = {
- { GPIO_NR_PALMLD_IDE_PWEN, GPIOF_INIT_HIGH, "HDD Power" },
- { GPIO_NR_PALMLD_IDE_RESET, GPIOF_INIT_LOW, "HDD Reset" },
+struct palmld_pata {
+ struct ata_host *host;
+ struct gpio_desc *power;
+ struct gpio_desc *reset;
};
static struct scsi_host_template palmld_sht = {
@@ -50,39 +51,44 @@ static struct ata_port_operations palmld_port_ops = {
static int palmld_pata_probe(struct platform_device *pdev)
{
- struct ata_host *host;
+ struct palmld_pata *lda;
struct ata_port *ap;
void __iomem *mem;
+ struct device *dev = &pdev->dev;
int ret;
+ lda = devm_kzalloc(dev, sizeof(*lda), GFP_KERNEL);
+ if (!lda)
+ return -ENOMEM;
+
/* allocate host */
- host = ata_host_alloc(&pdev->dev, 1);
- if (!host) {
- ret = -ENOMEM;
- goto err1;
- }
+ lda->host = ata_host_alloc(dev, 1);
+ if (!lda->host)
+ return -ENOMEM;
/* remap drive's physical memory address */
- mem = devm_ioremap(&pdev->dev, PALMLD_IDE_PHYS, 0x1000);
- if (!mem) {
- ret = -ENOMEM;
- goto err1;
+ mem = devm_ioremap(dev, PALMLD_IDE_PHYS, 0x1000);
+ if (!mem)
+ return -ENOMEM;
+
+ /* request and activate power and reset GPIOs */
+ lda->power = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
+ if (IS_ERR(lda->power))
+ return PTR_ERR(lda->power);
+ lda->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(lda->reset)) {
+ gpiod_set_value(lda->power, 0);
+ return PTR_ERR(lda->reset);
}
- /* request and activate power GPIO, IRQ GPIO */
- ret = gpio_request_array(palmld_hdd_gpios,
- ARRAY_SIZE(palmld_hdd_gpios));
- if (ret)
- goto err1;
-
- /* reset the drive */
- gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 0);
+ /* Assert reset to reset the drive */
+ gpiod_set_value(lda->reset, 1);
msleep(30);
- gpio_set_value(GPIO_NR_PALMLD_IDE_RESET, 1);
+ gpiod_set_value(lda->reset, 0);
msleep(30);
/* setup the ata port */
- ap = host->ports[0];
+ ap = lda->host->ports[0];
ap->ops = &palmld_port_ops;
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_PIO_POLLING;
@@ -96,27 +102,26 @@ static int palmld_pata_probe(struct platform_device *pdev)
ata_sff_std_ports(&ap->ioaddr);
/* activate host */
- ret = ata_host_activate(host, 0, NULL, IRQF_TRIGGER_RISING,
- &palmld_sht);
- if (ret)
- goto err2;
-
- return ret;
+ ret = ata_host_activate(lda->host, 0, NULL, IRQF_TRIGGER_RISING,
+ &palmld_sht);
+ /* power down on failure */
+ if (ret) {
+ gpiod_set_value(lda->power, 0);
+ return ret;
+ }
-err2:
- gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
-err1:
- return ret;
+ platform_set_drvdata(pdev, lda);
+ return 0;
}
-static int palmld_pata_remove(struct platform_device *dev)
+static int palmld_pata_remove(struct platform_device *pdev)
{
- ata_platform_remove_one(dev);
+ struct palmld_pata *lda = platform_get_drvdata(pdev);
- /* power down the HDD */
- gpio_set_value(GPIO_NR_PALMLD_IDE_PWEN, 0);
+ ata_platform_remove_one(pdev);
- gpio_free_array(palmld_hdd_gpios, ARRAY_SIZE(palmld_hdd_gpios));
+ /* power down the HDD */
+ gpiod_set_value(lda->power, 0);
return 0;
}
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index e8b6a2e464c9..4b9b9e120188 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -25,7 +25,6 @@
#include <linux/libata.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
-#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/completion.h>
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 653b9a0bf727..66bb5bff126b 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -27,7 +27,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
@@ -49,7 +49,7 @@
struct rb532_cf_info {
void __iomem *iobase;
- unsigned int gpio_line;
+ struct gpio_desc *gpio_line;
unsigned int irq;
};
@@ -60,7 +60,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
struct ata_host *ah = dev_instance;
struct rb532_cf_info *info = ah->private_data;
- if (gpio_get_value(info->gpio_line)) {
+ if (gpiod_get_value(info->gpio_line)) {
irq_set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
ata_sff_interrupt(info->irq, dev_instance);
} else {
@@ -106,10 +106,9 @@ static void rb532_pata_setup_ports(struct ata_host *ah)
static int rb532_pata_driver_probe(struct platform_device *pdev)
{
int irq;
- int gpio;
+ struct gpio_desc *gpiod;
struct resource *res;
struct ata_host *ah;
- struct cf_device *pdata;
struct rb532_cf_info *info;
int ret;
@@ -125,23 +124,12 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT;
}
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data specified\n");
- return -EINVAL;
- }
-
- gpio = pdata->gpio_pin;
- if (gpio < 0) {
+ gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
+ if (IS_ERR(gpiod)) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
- return -ENOENT;
- }
-
- ret = gpio_request(gpio, DRV_NAME);
- if (ret) {
- dev_err(&pdev->dev, "GPIO request failed\n");
- return ret;
+ return PTR_ERR(gpiod);
}
+ gpiod_set_consumer_name(gpiod, DRV_NAME);
/* allocate host */
ah = ata_host_alloc(&pdev->dev, RB500_CF_MAXPORTS);
@@ -153,7 +141,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOMEM;
ah->private_data = info;
- info->gpio_line = gpio;
+ info->gpio_line = gpiod;
info->irq = irq;
info->iobase = devm_ioremap_nocache(&pdev->dev, res->start,
@@ -161,26 +149,14 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
if (!info->iobase)
return -ENOMEM;
- ret = gpio_direction_input(gpio);
- if (ret) {
- dev_err(&pdev->dev, "unable to set GPIO direction, err=%d\n",
- ret);
- goto err_free_gpio;
- }
-
rb532_pata_setup_ports(ah);
ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
IRQF_TRIGGER_LOW, &rb532_pata_sht);
if (ret)
- goto err_free_gpio;
+ return ret;
return 0;
-
-err_free_gpio:
- gpio_free(gpio);
-
- return ret;
}
static int rb532_pata_driver_remove(struct platform_device *pdev)
@@ -189,7 +165,6 @@ static int rb532_pata_driver_remove(struct platform_device *pdev)
struct rb532_cf_info *info = ah->private_data;
ata_host_detach(ah);
- gpio_free(info->gpio_line);
return 0;
}
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index e67815b896fc..c8fc9280d6e4 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -31,8 +31,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/export.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include "ahci.h"
@@ -85,7 +84,7 @@ struct ecx_plat_data {
/* number of extra clocks that the SGPIO PIC controller expects */
u32 pre_clocks;
u32 post_clocks;
- unsigned sgpio_gpio[SGPIO_PINS];
+ struct gpio_desc *sgpio_gpiod[SGPIO_PINS];
u32 sgpio_pattern;
u32 port_to_sgpio[SGPIO_PORTS];
};
@@ -131,9 +130,9 @@ static void ecx_parse_sgpio(struct ecx_plat_data *pdata, u32 port, u32 state)
*/
static void ecx_led_cycle_clock(struct ecx_plat_data *pdata)
{
- gpio_set_value(pdata->sgpio_gpio[SCLOCK], 1);
+ gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 1);
udelay(50);
- gpio_set_value(pdata->sgpio_gpio[SCLOCK], 0);
+ gpiod_set_value(pdata->sgpio_gpiod[SCLOCK], 0);
udelay(50);
}
@@ -164,15 +163,15 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
for (i = 0; i < pdata->pre_clocks; i++)
ecx_led_cycle_clock(pdata);
- gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
+ gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 1);
ecx_led_cycle_clock(pdata);
- gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
+ gpiod_set_value(pdata->sgpio_gpiod[SLOAD], 0);
/*
* bit-bang out the SGPIO pattern, by consuming a bit and then
* clocking it out.
*/
for (i = 0; i < (SGPIO_SIGNALS * pdata->n_ports); i++) {
- gpio_set_value(pdata->sgpio_gpio[SDATA], sgpio_out & 1);
+ gpiod_set_value(pdata->sgpio_gpiod[SDATA], sgpio_out & 1);
sgpio_out >>= 1;
ecx_led_cycle_clock(pdata);
}
@@ -193,21 +192,19 @@ static void highbank_set_em_messages(struct device *dev,
struct device_node *np = dev->of_node;
struct ecx_plat_data *pdata = hpriv->plat_data;
int i;
- int err;
for (i = 0; i < SGPIO_PINS; i++) {
- err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
- if (err < 0)
- return;
-
- pdata->sgpio_gpio[i] = err;
- err = gpio_request(pdata->sgpio_gpio[i], "CX SGPIO");
- if (err) {
- pr_err("sata_highbank gpio_request %d failed: %d\n",
- i, err);
- return;
+ struct gpio_desc *gpiod;
+
+ gpiod = devm_gpiod_get_index(dev, "calxeda,sgpio", i,
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(gpiod)) {
+ dev_err(dev, "failed to get GPIO %d\n", i);
+ continue;
}
- gpio_direction_output(pdata->sgpio_gpio[i], 1);
+ gpiod_set_consumer_name(gpiod, "CX SGPIO");
+
+ pdata->sgpio_gpiod[i] = gpiod;
}
of_property_read_u32_array(np, "calxeda,led-order",
pdata->port_to_sgpio,
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 4b1ff5bc256a..59b2317acea9 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -891,7 +891,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
int ret = 0;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
+ return irq;
+ if (!irq)
return -EINVAL;
priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 4e46dc9e41ad..11e1663bdc4d 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -1410,7 +1410,7 @@ static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
func_enter ();
- fs_dprintk (FS_DEBUG_INIT, "Inititing queue at %x: %d entries:\n",
+ fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
queue, nentries);
p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
@@ -1443,7 +1443,7 @@ static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
{
func_enter ();
- fs_dprintk (FS_DEBUG_INIT, "Inititing free pool at %x:\n", queue);
+ fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
write_fs (dev, FP_SA(queue), 0);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f55ffde877b5..14053e01a2cc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -754,8 +754,8 @@ static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
regs = of_get_property(op->dev.of_node, "reg", NULL);
- return sprintf(page, " SBUS slot/device:\t\t%d/'%s'\n",
- (regs ? regs->which_io : 0), op->dev.of_node->name);
+ return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n",
+ (regs ? regs->which_io : 0), op->dev.of_node);
}
static const struct fore200e_bus fore200e_sbus_ops = {
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 81c22d20d9d9..60e0b772673f 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -538,6 +538,9 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
}
case 'x': /* gotoxy : LxXXX[yYYY]; */
case 'y': /* gotoxy : LyYYY[xXXX]; */
+ if (priv->esc_seq.buf[priv->esc_seq.len - 1] != ';')
+ break;
+
/* If the command is valid, move to the new address */
if (parse_xy(esc, &priv->addr.x, &priv->addr.y))
charlcd_gotoxy(lcd);
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 704f44295810..157452080f3d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- devcon.o
+ devcon.o swnode.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_ISA_BUS_API) += isa.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 04bbcd779e11..a2f14098663f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -8,6 +8,7 @@
* Copyright (c) 2006 Novell, Inc.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fwnode.h>
@@ -728,6 +729,26 @@ static inline int device_is_not_partition(struct device *dev)
}
#endif
+static int
+device_platform_notify(struct device *dev, enum kobject_action action)
+{
+ int ret;
+
+ ret = acpi_platform_notify(dev, action);
+ if (ret)
+ return ret;
+
+ ret = software_node_notify(dev, action);
+ if (ret)
+ return ret;
+
+ if (platform_notify && action == KOBJ_ADD)
+ platform_notify(dev);
+ else if (platform_notify_remove && action == KOBJ_REMOVE)
+ platform_notify_remove(dev);
+ return 0;
+}
+
/**
* dev_driver_string - Return a device's driver name, if at all possible
* @dev: struct device to get the name of
@@ -1883,8 +1904,9 @@ int device_add(struct device *dev)
}
/* notify platform of device entry */
- if (platform_notify)
- platform_notify(dev);
+ error = device_platform_notify(dev, KOBJ_ADD);
+ if (error)
+ goto platform_error;
error = device_create_file(dev, &dev_attr_uevent);
if (error)
@@ -1960,6 +1982,8 @@ done:
SymlinkError:
device_remove_file(dev, &dev_attr_uevent);
attrError:
+ device_platform_notify(dev, KOBJ_REMOVE);
+platform_error:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
@@ -2077,14 +2101,10 @@ void device_del(struct device *dev)
bus_remove_device(dev);
device_pm_remove(dev);
driver_deferred_probe_del(dev);
+ device_platform_notify(dev, KOBJ_REMOVE);
device_remove_properties(dev);
device_links_purge(dev);
- /* Notify the platform of the removal, in case they
- * need to do anything...
- */
- if (platform_notify_remove)
- platform_notify_remove(dev);
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_REMOVED_DEVICE, dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 4aaf00d2098b..e038e2b3b7ea 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -26,8 +26,14 @@ struct devres_node {
struct devres {
struct devres_node node;
- /* -- 3 pointers */
- unsigned long long data[]; /* guarantee ull alignment */
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+ * buffer alignment as if it was allocated by plain kmalloc().
+ */
+ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
};
struct devres_group {
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f39a920496fb..8da314b81eab 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nvec)
{
struct platform_msi_priv_data *data = domain->host_data;
- struct msi_desc *desc;
- for_each_msi_entry(desc, data->dev) {
+ struct msi_desc *desc, *tmp;
+ for_each_msi_entry_safe(desc, tmp, data->dev) {
if (WARN_ON(!desc->irq || desc->nvec_used != 1))
return;
if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
continue;
irq_domain_free_irqs_common(domain, desc->irq, 1);
+ list_del(&desc->list);
+ free_msi_entry(desc);
}
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c1ddf191711e..445cbd8266ca 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -448,7 +448,6 @@ void platform_device_del(struct platform_device *pdev)
int i;
if (pdev) {
- device_remove_properties(&pdev->dev);
device_del(&pdev->dev);
if (pdev->id_auto) {
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 7f38a92b444a..500de1dee967 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -239,6 +239,127 @@ static void genpd_update_accounting(struct generic_pm_domain *genpd)
static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
#endif
+static int _genpd_reeval_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state)
+{
+ struct generic_pm_domain_data *pd_data;
+ struct pm_domain_data *pdd;
+ struct gpd_link *link;
+
+ /* New requested state is same as Max requested state */
+ if (state == genpd->performance_state)
+ return state;
+
+ /* New requested state is higher than Max requested state */
+ if (state > genpd->performance_state)
+ return state;
+
+ /* Traverse all devices within the domain */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ pd_data = to_gpd_data(pdd);
+
+ if (pd_data->performance_state > state)
+ state = pd_data->performance_state;
+ }
+
+ /*
+ * Traverse all sub-domains within the domain. This can be
+ * done without any additional locking as the link->performance_state
+ * field is protected by the master genpd->lock, which is already taken.
+ *
+ * Also note that link->performance_state (subdomain's performance state
+ * requirement to master domain) is different from
+ * link->slave->performance_state (current performance state requirement
+ * of the devices/sub-domains of the subdomain) and so can have a
+ * different value.
+ *
+ * Note that we also take vote from powered-off sub-domains into account
+ * as the same is done for devices right now.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ if (link->performance_state > state)
+ state = link->performance_state;
+ }
+
+ return state;
+}
+
+static int _genpd_set_performance_state(struct generic_pm_domain *genpd,
+ unsigned int state, int depth)
+{
+ struct generic_pm_domain *master;
+ struct gpd_link *link;
+ int master_state, ret;
+
+ if (state == genpd->performance_state)
+ return 0;
+
+ /* Propagate to masters of genpd */
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ master = link->master;
+
+ if (!master->set_performance_state)
+ continue;
+
+ /* Find master's performance state */
+ ret = dev_pm_opp_xlate_performance_state(genpd->opp_table,
+ master->opp_table,
+ state);
+ if (unlikely(ret < 0))
+ goto err;
+
+ master_state = ret;
+
+ genpd_lock_nested(master, depth + 1);
+
+ link->prev_performance_state = link->performance_state;
+ link->performance_state = master_state;
+ master_state = _genpd_reeval_performance_state(master,
+ master_state);
+ ret = _genpd_set_performance_state(master, master_state, depth + 1);
+ if (ret)
+ link->performance_state = link->prev_performance_state;
+
+ genpd_unlock(master);
+
+ if (ret)
+ goto err;
+ }
+
+ ret = genpd->set_performance_state(genpd, state);
+ if (ret)
+ goto err;
+
+ genpd->performance_state = state;
+ return 0;
+
+err:
+ /* Encountered an error, lets rollback */
+ list_for_each_entry_continue_reverse(link, &genpd->slave_links,
+ slave_node) {
+ master = link->master;
+
+ if (!master->set_performance_state)
+ continue;
+
+ genpd_lock_nested(master, depth + 1);
+
+ master_state = link->prev_performance_state;
+ link->performance_state = master_state;
+
+ master_state = _genpd_reeval_performance_state(master,
+ master_state);
+ if (_genpd_set_performance_state(master, master_state, depth + 1)) {
+ pr_err("%s: Failed to roll back to %d performance state\n",
+ master->name, master_state);
+ }
+
+ genpd_unlock(master);
+ }
+
+ return ret;
+}
+
/**
* dev_pm_genpd_set_performance_state- Set performance state of device's power
* domain.
@@ -257,10 +378,9 @@ static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
{
struct generic_pm_domain *genpd;
- struct generic_pm_domain_data *gpd_data, *pd_data;
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data;
unsigned int prev;
- int ret = 0;
+ int ret;
genpd = dev_to_genpd(dev);
if (IS_ERR(genpd))
@@ -281,47 +401,11 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state)
prev = gpd_data->performance_state;
gpd_data->performance_state = state;
- /* New requested state is same as Max requested state */
- if (state == genpd->performance_state)
- goto unlock;
-
- /* New requested state is higher than Max requested state */
- if (state > genpd->performance_state)
- goto update_state;
-
- /* Traverse all devices within the domain */
- list_for_each_entry(pdd, &genpd->dev_list, list_node) {
- pd_data = to_gpd_data(pdd);
-
- if (pd_data->performance_state > state)
- state = pd_data->performance_state;
- }
-
- if (state == genpd->performance_state)
- goto unlock;
-
- /*
- * We aren't propagating performance state changes of a subdomain to its
- * masters as we don't have hardware that needs it. Over that, the
- * performance states of subdomain and its masters may not have
- * one-to-one mapping and would require additional information. We can
- * get back to this once we have hardware that needs it. For that
- * reason, we don't have to consider performance state of the subdomains
- * of genpd here.
- */
-
-update_state:
- if (genpd_status_on(genpd)) {
- ret = genpd->set_performance_state(genpd, state);
- if (ret) {
- gpd_data->performance_state = prev;
- goto unlock;
- }
- }
-
- genpd->performance_state = state;
+ state = _genpd_reeval_performance_state(genpd, state);
+ ret = _genpd_set_performance_state(genpd, state, 0);
+ if (ret)
+ gpd_data->performance_state = prev;
-unlock:
genpd_unlock(genpd);
return ret;
@@ -347,15 +431,6 @@ static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
return ret;
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
-
- if (unlikely(genpd->set_performance_state)) {
- ret = genpd->set_performance_state(genpd, genpd->performance_state);
- if (ret) {
- pr_warn("%s: Failed to set performance state %d (%d)\n",
- genpd->name, genpd->performance_state, ret);
- }
- }
-
if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
return ret;
@@ -1907,12 +1982,21 @@ int of_genpd_add_provider_simple(struct device_node *np,
ret);
goto unlock;
}
+
+ /*
+ * Save table for faster processing while setting performance
+ * state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table(&genpd->dev);
+ WARN_ON(!genpd->opp_table);
}
ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
if (ret) {
- if (genpd->set_performance_state)
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
+ }
goto unlock;
}
@@ -1965,6 +2049,13 @@ int of_genpd_add_provider_onecell(struct device_node *np,
i, ret);
goto error;
}
+
+ /*
+ * Save table for faster processing while setting
+ * performance state.
+ */
+ genpd->opp_table = dev_pm_opp_get_opp_table_indexed(&genpd->dev, i);
+ WARN_ON(!genpd->opp_table);
}
genpd->provider = &np->fwnode;
@@ -1989,8 +2080,10 @@ error:
genpd->provider = NULL;
genpd->has_provider = false;
- if (genpd->set_performance_state)
+ if (genpd->set_performance_state) {
+ dev_pm_opp_put_opp_table(genpd->opp_table);
dev_pm_opp_of_remove_table(&genpd->dev);
+ }
}
mutex_unlock(&gpd_list_lock);
@@ -2024,6 +2117,7 @@ void of_genpd_del_provider(struct device_node *np)
if (!gpd->set_performance_state)
continue;
+ dev_pm_opp_put_opp_table(gpd->opp_table);
dev_pm_opp_of_remove_table(&gpd->dev);
}
}
@@ -2338,7 +2432,7 @@ EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index)
{
- struct device *genpd_dev;
+ struct device *virt_dev;
int num_domains;
int ret;
@@ -2352,31 +2446,31 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
return NULL;
/* Allocate and register device on the genpd bus. */
- genpd_dev = kzalloc(sizeof(*genpd_dev), GFP_KERNEL);
- if (!genpd_dev)
+ virt_dev = kzalloc(sizeof(*virt_dev), GFP_KERNEL);
+ if (!virt_dev)
return ERR_PTR(-ENOMEM);
- dev_set_name(genpd_dev, "genpd:%u:%s", index, dev_name(dev));
- genpd_dev->bus = &genpd_bus_type;
- genpd_dev->release = genpd_release_dev;
+ dev_set_name(virt_dev, "genpd:%u:%s", index, dev_name(dev));
+ virt_dev->bus = &genpd_bus_type;
+ virt_dev->release = genpd_release_dev;
- ret = device_register(genpd_dev);
+ ret = device_register(virt_dev);
if (ret) {
- kfree(genpd_dev);
+ kfree(virt_dev);
return ERR_PTR(ret);
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
+ ret = __genpd_dev_pm_attach(virt_dev, dev->of_node, index, false);
if (ret < 1) {
- device_unregister(genpd_dev);
+ device_unregister(virt_dev);
return ret ? ERR_PTR(ret) : NULL;
}
- pm_runtime_enable(genpd_dev);
- genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
+ pm_runtime_enable(virt_dev);
+ genpd_queue_power_off_work(dev_to_genpd(virt_dev));
- return genpd_dev;
+ return virt_dev;
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach_by_id);
@@ -2521,52 +2615,36 @@ int of_genpd_parse_idle_states(struct device_node *dn,
EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
/**
- * of_genpd_opp_to_performance_state- Gets performance state of device's
- * power domain corresponding to a DT node's "required-opps" property.
+ * pm_genpd_opp_to_performance_state - Gets performance state of the genpd from its OPP node.
*
- * @dev: Device for which the performance-state needs to be found.
- * @np: DT node where the "required-opps" property is present. This can be
- * the device node itself (if it doesn't have an OPP table) or a node
- * within the OPP table of a device (if device has an OPP table).
+ * @genpd_dev: Genpd's device for which the performance-state needs to be found.
+ * @opp: struct dev_pm_opp of the OPP for which we need to find performance
+ * state.
*
- * Returns performance state corresponding to the "required-opps" property of
- * a DT node. This calls platform specific genpd->opp_to_performance_state()
- * callback to translate power domain OPP to performance state.
+ * Returns performance state encoded in the OPP of the genpd. This calls
+ * platform specific genpd->opp_to_performance_state() callback to translate
+ * power domain OPP to performance state.
*
* Returns performance state on success and 0 on failure.
*/
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np)
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
{
- struct generic_pm_domain *genpd;
- struct dev_pm_opp *opp;
- int state = 0;
+ struct generic_pm_domain *genpd = NULL;
+ int state;
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return 0;
+ genpd = container_of(genpd_dev, struct generic_pm_domain, dev);
- if (unlikely(!genpd->set_performance_state))
+ if (unlikely(!genpd->opp_to_performance_state))
return 0;
genpd_lock(genpd);
-
- opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
- if (IS_ERR(opp)) {
- dev_err(dev, "Failed to find required OPP: %ld\n",
- PTR_ERR(opp));
- goto unlock;
- }
-
state = genpd->opp_to_performance_state(genpd, opp);
- dev_pm_opp_put(opp);
-
-unlock:
genpd_unlock(genpd);
return state;
}
-EXPORT_SYMBOL_GPL(of_genpd_opp_to_performance_state);
+EXPORT_SYMBOL_GPL(pm_genpd_opp_to_performance_state);
static int __init genpd_bus_init(void)
{
@@ -2671,7 +2749,7 @@ exit:
return 0;
}
-static int genpd_summary_show(struct seq_file *s, void *data)
+static int summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -2694,7 +2772,7 @@ static int genpd_summary_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_status_show(struct seq_file *s, void *data)
+static int status_show(struct seq_file *s, void *data)
{
static const char * const status_lookup[] = {
[GPD_STATE_ACTIVE] = "on",
@@ -2721,7 +2799,7 @@ exit:
return ret;
}
-static int genpd_sub_domains_show(struct seq_file *s, void *data)
+static int sub_domains_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct gpd_link *link;
@@ -2738,7 +2816,7 @@ static int genpd_sub_domains_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_idle_states_show(struct seq_file *s, void *data)
+static int idle_states_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
unsigned int i;
@@ -2767,7 +2845,7 @@ static int genpd_idle_states_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_active_time_show(struct seq_file *s, void *data)
+static int active_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0;
@@ -2787,7 +2865,7 @@ static int genpd_active_time_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+static int total_idle_time_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
ktime_t delta = 0, total = 0;
@@ -2815,7 +2893,7 @@ static int genpd_total_idle_time_show(struct seq_file *s, void *data)
}
-static int genpd_devices_show(struct seq_file *s, void *data)
+static int devices_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
struct pm_domain_data *pm_data;
@@ -2841,7 +2919,7 @@ static int genpd_devices_show(struct seq_file *s, void *data)
return ret;
}
-static int genpd_perf_state_show(struct seq_file *s, void *data)
+static int perf_state_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd = s->private;
@@ -2854,37 +2932,14 @@ static int genpd_perf_state_show(struct seq_file *s, void *data)
return 0;
}
-#define define_genpd_open_function(name) \
-static int genpd_##name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, genpd_##name##_show, inode->i_private); \
-}
-
-define_genpd_open_function(summary);
-define_genpd_open_function(status);
-define_genpd_open_function(sub_domains);
-define_genpd_open_function(idle_states);
-define_genpd_open_function(active_time);
-define_genpd_open_function(total_idle_time);
-define_genpd_open_function(devices);
-define_genpd_open_function(perf_state);
-
-#define define_genpd_debugfs_fops(name) \
-static const struct file_operations genpd_##name##_fops = { \
- .open = genpd_##name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-}
-
-define_genpd_debugfs_fops(summary);
-define_genpd_debugfs_fops(status);
-define_genpd_debugfs_fops(sub_domains);
-define_genpd_debugfs_fops(idle_states);
-define_genpd_debugfs_fops(active_time);
-define_genpd_debugfs_fops(total_idle_time);
-define_genpd_debugfs_fops(devices);
-define_genpd_debugfs_fops(perf_state);
+DEFINE_SHOW_ATTRIBUTE(summary);
+DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(sub_domains);
+DEFINE_SHOW_ATTRIBUTE(idle_states);
+DEFINE_SHOW_ATTRIBUTE(active_time);
+DEFINE_SHOW_ATTRIBUTE(total_idle_time);
+DEFINE_SHOW_ATTRIBUTE(devices);
+DEFINE_SHOW_ATTRIBUTE(perf_state);
static int __init genpd_debug_init(void)
{
@@ -2897,7 +2952,7 @@ static int __init genpd_debug_init(void)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- genpd_debugfs_dir, NULL, &genpd_summary_fops);
+ genpd_debugfs_dir, NULL, &summary_fops);
if (!d)
return -ENOMEM;
@@ -2907,20 +2962,20 @@ static int __init genpd_debug_init(void)
return -ENOMEM;
debugfs_create_file("current_state", 0444,
- d, genpd, &genpd_status_fops);
+ d, genpd, &status_fops);
debugfs_create_file("sub_domains", 0444,
- d, genpd, &genpd_sub_domains_fops);
+ d, genpd, &sub_domains_fops);
debugfs_create_file("idle_states", 0444,
- d, genpd, &genpd_idle_states_fops);
+ d, genpd, &idle_states_fops);
debugfs_create_file("active_time", 0444,
- d, genpd, &genpd_active_time_fops);
+ d, genpd, &active_time_fops);
debugfs_create_file("total_idle_time", 0444,
- d, genpd, &genpd_total_idle_time_fops);
+ d, genpd, &total_idle_time_fops);
debugfs_create_file("devices", 0444,
- d, genpd, &genpd_devices_fops);
+ d, genpd, &devices_fops);
if (genpd->set_performance_state)
debugfs_create_file("perf_state", 0444,
- d, genpd, &genpd_perf_state_fops);
+ d, genpd, &perf_state_fops);
}
return 0;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index beb85c31f3fa..70624695b6d5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -8,6 +8,8 @@
*/
#include <linux/sched/mm.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
#include <linux/export.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -93,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
static void pm_runtime_deactivate_timer(struct device *dev)
{
if (dev->power.timer_expires > 0) {
- del_timer(&dev->power.suspend_timer);
+ hrtimer_cancel(&dev->power.suspend_timer);
dev->power.timer_expires = 0;
}
}
@@ -124,12 +126,11 @@ static void pm_runtime_cancel_pending(struct device *dev)
* This function may be called either with or without dev->power.lock held.
* Either way it can be racy, since power.last_busy may be updated at any time.
*/
-unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
+u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
- long elapsed;
- unsigned long last_busy;
- unsigned long expires = 0;
+ u64 last_busy, expires = 0;
+ u64 now = ktime_to_ns(ktime_get());
if (!dev->power.use_autosuspend)
goto out;
@@ -139,19 +140,9 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
goto out;
last_busy = READ_ONCE(dev->power.last_busy);
- elapsed = jiffies - last_busy;
- if (elapsed < 0)
- goto out; /* jiffies has wrapped around. */
- /*
- * If the autosuspend_delay is >= 1 second, align the timer by rounding
- * up to the nearest second.
- */
- expires = last_busy + msecs_to_jiffies(autosuspend_delay);
- if (autosuspend_delay >= 1000)
- expires = round_jiffies(expires);
- expires += !expires;
- if (elapsed >= expires - last_busy)
+ expires = last_busy + autosuspend_delay * NSEC_PER_MSEC;
+ if (expires <= now)
expires = 0; /* Already expired. */
out:
@@ -515,7 +506,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
/* If the autosuspend_delay time hasn't expired yet, reschedule. */
if ((rpmflags & RPM_AUTO)
&& dev->power.runtime_status != RPM_SUSPENDING) {
- unsigned long expires = pm_runtime_autosuspend_expiration(dev);
+ u64 expires = pm_runtime_autosuspend_expiration(dev);
if (expires != 0) {
/* Pending requests need to be canceled. */
@@ -528,10 +519,20 @@ static int rpm_suspend(struct device *dev, int rpmflags)
* expire; pm_suspend_timer_fn() will take care of the
* rest.
*/
- if (!(dev->power.timer_expires && time_before_eq(
- dev->power.timer_expires, expires))) {
+ if (!(dev->power.timer_expires &&
+ dev->power.timer_expires <= expires)) {
+ /*
+ * We add a slack of 25% to gather wakeups
+ * without sacrificing the granularity.
+ */
+ u64 slack = READ_ONCE(dev->power.autosuspend_delay) *
+ (NSEC_PER_MSEC >> 2);
+
dev->power.timer_expires = expires;
- mod_timer(&dev->power.suspend_timer, expires);
+ hrtimer_start_range_ns(&dev->power.suspend_timer,
+ ns_to_ktime(expires),
+ slack,
+ HRTIMER_MODE_ABS);
}
dev->power.timer_autosuspends = 1;
goto out;
@@ -895,23 +896,25 @@ static void pm_runtime_work(struct work_struct *work)
*
* Check if the time is right and queue a suspend request.
*/
-static void pm_suspend_timer_fn(struct timer_list *t)
+static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
{
- struct device *dev = from_timer(dev, t, power.suspend_timer);
+ struct device *dev = container_of(timer, struct device, power.suspend_timer);
unsigned long flags;
- unsigned long expires;
+ u64 expires;
spin_lock_irqsave(&dev->power.lock, flags);
expires = dev->power.timer_expires;
/* If 'expire' is after 'jiffies' we've been called too early. */
- if (expires > 0 && !time_after(expires, jiffies)) {
+ if (expires > 0 && expires < ktime_to_ns(ktime_get())) {
dev->power.timer_expires = 0;
rpm_suspend(dev, dev->power.timer_autosuspends ?
(RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
}
spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return HRTIMER_NORESTART;
}
/**
@@ -922,6 +925,7 @@ static void pm_suspend_timer_fn(struct timer_list *t)
int pm_schedule_suspend(struct device *dev, unsigned int delay)
{
unsigned long flags;
+ ktime_t expires;
int retval;
spin_lock_irqsave(&dev->power.lock, flags);
@@ -938,10 +942,10 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
/* Other scheduled or pending requests need to be canceled. */
pm_runtime_cancel_pending(dev);
- dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
- dev->power.timer_expires += !dev->power.timer_expires;
+ expires = ktime_add(ktime_get(), ms_to_ktime(delay));
+ dev->power.timer_expires = ktime_to_ns(expires);
dev->power.timer_autosuspends = 0;
- mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
+ hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -1491,7 +1495,8 @@ void pm_runtime_init(struct device *dev)
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
- timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0);
+ hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ dev->power.suspend_timer.function = pm_suspend_timer_fn;
init_waitqueue_head(&dev->power.wait_queue);
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 240ab5230ff6..8b91ab380d14 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -18,236 +18,6 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
-struct property_set {
- struct device *dev;
- struct fwnode_handle fwnode;
- const struct property_entry *properties;
-};
-
-static const struct fwnode_operations pset_fwnode_ops;
-
-static inline bool is_pset_node(const struct fwnode_handle *fwnode)
-{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops;
-}
-
-#define to_pset_node(__fwnode) \
- ({ \
- typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \
- \
- is_pset_node(__to_pset_node_fwnode) ? \
- container_of(__to_pset_node_fwnode, \
- struct property_set, fwnode) : \
- NULL; \
- })
-
-static const struct property_entry *
-pset_prop_get(const struct property_set *pset, const char *name)
-{
- const struct property_entry *prop;
-
- if (!pset || !pset->properties)
- return NULL;
-
- for (prop = pset->properties; prop->name; prop++)
- if (!strcmp(name, prop->name))
- return prop;
-
- return NULL;
-}
-
-static const void *property_get_pointer(const struct property_entry *prop)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- return prop->pointer.u8_data;
- return &prop->value.u8_data;
- case DEV_PROP_U16:
- if (prop->is_array)
- return prop->pointer.u16_data;
- return &prop->value.u16_data;
- case DEV_PROP_U32:
- if (prop->is_array)
- return prop->pointer.u32_data;
- return &prop->value.u32_data;
- case DEV_PROP_U64:
- if (prop->is_array)
- return prop->pointer.u64_data;
- return &prop->value.u64_data;
- case DEV_PROP_STRING:
- if (prop->is_array)
- return prop->pointer.str;
- return &prop->value.str;
- default:
- return NULL;
- }
-}
-
-static void property_set_pointer(struct property_entry *prop, const void *pointer)
-{
- switch (prop->type) {
- case DEV_PROP_U8:
- if (prop->is_array)
- prop->pointer.u8_data = pointer;
- else
- prop->value.u8_data = *((u8 *)pointer);
- break;
- case DEV_PROP_U16:
- if (prop->is_array)
- prop->pointer.u16_data = pointer;
- else
- prop->value.u16_data = *((u16 *)pointer);
- break;
- case DEV_PROP_U32:
- if (prop->is_array)
- prop->pointer.u32_data = pointer;
- else
- prop->value.u32_data = *((u32 *)pointer);
- break;
- case DEV_PROP_U64:
- if (prop->is_array)
- prop->pointer.u64_data = pointer;
- else
- prop->value.u64_data = *((u64 *)pointer);
- break;
- case DEV_PROP_STRING:
- if (prop->is_array)
- prop->pointer.str = pointer;
- else
- prop->value.str = pointer;
- break;
- default:
- break;
- }
-}
-
-static const void *pset_prop_find(const struct property_set *pset,
- const char *propname, size_t length)
-{
- const struct property_entry *prop;
- const void *pointer;
-
- prop = pset_prop_get(pset, propname);
- if (!prop)
- return ERR_PTR(-EINVAL);
- pointer = property_get_pointer(prop);
- if (!pointer)
- return ERR_PTR(-ENODATA);
- if (length > prop->length)
- return ERR_PTR(-EOVERFLOW);
- return pointer;
-}
-
-static int pset_prop_read_u8_array(const struct property_set *pset,
- const char *propname,
- u8 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = pset_prop_find(pset, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int pset_prop_read_u16_array(const struct property_set *pset,
- const char *propname,
- u16 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = pset_prop_find(pset, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int pset_prop_read_u32_array(const struct property_set *pset,
- const char *propname,
- u32 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = pset_prop_find(pset, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int pset_prop_read_u64_array(const struct property_set *pset,
- const char *propname,
- u64 *values, size_t nval)
-{
- const void *pointer;
- size_t length = nval * sizeof(*values);
-
- pointer = pset_prop_find(pset, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(values, pointer, length);
- return 0;
-}
-
-static int pset_prop_count_elems_of_size(const struct property_set *pset,
- const char *propname, size_t length)
-{
- const struct property_entry *prop;
-
- prop = pset_prop_get(pset, propname);
- if (!prop)
- return -EINVAL;
-
- return prop->length / length;
-}
-
-static int pset_prop_read_string_array(const struct property_set *pset,
- const char *propname,
- const char **strings, size_t nval)
-{
- const struct property_entry *prop;
- const void *pointer;
- size_t array_len, length;
-
- /* Find out the array length. */
- prop = pset_prop_get(pset, propname);
- if (!prop)
- return -EINVAL;
-
- if (!prop->is_array)
- /* The array length for a non-array string property is 1. */
- array_len = 1;
- else
- /* Find the length of an array. */
- array_len = pset_prop_count_elems_of_size(pset, propname,
- sizeof(const char *));
-
- /* Return how many there are if strings is NULL. */
- if (!strings)
- return array_len;
-
- array_len = min(nval, array_len);
- length = array_len * sizeof(*strings);
-
- pointer = pset_prop_find(pset, propname, length);
- if (IS_ERR(pointer))
- return PTR_ERR(pointer);
-
- memcpy(strings, pointer, length);
-
- return array_len;
-}
-
struct fwnode_handle *dev_fwnode(struct device *dev)
{
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
@@ -255,51 +25,6 @@ struct fwnode_handle *dev_fwnode(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_fwnode);
-static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode,
- const char *propname)
-{
- return !!pset_prop_get(to_pset_node(fwnode), propname);
-}
-
-static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode,
- const char *propname,
- unsigned int elem_size, void *val,
- size_t nval)
-{
- const struct property_set *node = to_pset_node(fwnode);
-
- if (!val)
- return pset_prop_count_elems_of_size(node, propname, elem_size);
-
- switch (elem_size) {
- case sizeof(u8):
- return pset_prop_read_u8_array(node, propname, val, nval);
- case sizeof(u16):
- return pset_prop_read_u16_array(node, propname, val, nval);
- case sizeof(u32):
- return pset_prop_read_u32_array(node, propname, val, nval);
- case sizeof(u64):
- return pset_prop_read_u64_array(node, propname, val, nval);
- }
-
- return -ENXIO;
-}
-
-static int
-pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
-{
- return pset_prop_read_string_array(to_pset_node(fwnode), propname,
- val, nval);
-}
-
-static const struct fwnode_operations pset_fwnode_ops = {
- .property_present = pset_fwnode_property_present,
- .property_read_int_array = pset_fwnode_read_int_array,
- .property_read_string_array = pset_fwnode_property_read_string_array,
-};
-
/**
* device_property_present - check if a property of a device is present
* @dev: Device whose property is being checked
@@ -759,223 +484,25 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
-static void property_entry_free_data(const struct property_entry *p)
-{
- const void *pointer = property_get_pointer(p);
- size_t i, nval;
-
- if (p->is_array) {
- if (p->type == DEV_PROP_STRING && p->pointer.str) {
- nval = p->length / sizeof(const char *);
- for (i = 0; i < nval; i++)
- kfree(p->pointer.str[i]);
- }
- kfree(pointer);
- } else if (p->type == DEV_PROP_STRING) {
- kfree(p->value.str);
- }
- kfree(p->name);
-}
-
-static int property_copy_string_array(struct property_entry *dst,
- const struct property_entry *src)
-{
- const char **d;
- size_t nval = src->length / sizeof(*d);
- int i;
-
- d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
- if (!d)
- return -ENOMEM;
-
- for (i = 0; i < nval; i++) {
- d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
- if (!d[i] && src->pointer.str[i]) {
- while (--i >= 0)
- kfree(d[i]);
- kfree(d);
- return -ENOMEM;
- }
- }
-
- dst->pointer.str = d;
- return 0;
-}
-
-static int property_entry_copy_data(struct property_entry *dst,
- const struct property_entry *src)
-{
- const void *pointer = property_get_pointer(src);
- const void *new;
- int error;
-
- if (src->is_array) {
- if (!src->length)
- return -ENODATA;
-
- if (src->type == DEV_PROP_STRING) {
- error = property_copy_string_array(dst, src);
- if (error)
- return error;
- new = dst->pointer.str;
- } else {
- new = kmemdup(pointer, src->length, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
- }
- } else if (src->type == DEV_PROP_STRING) {
- new = kstrdup(src->value.str, GFP_KERNEL);
- if (!new && src->value.str)
- return -ENOMEM;
- } else {
- new = pointer;
- }
-
- dst->length = src->length;
- dst->is_array = src->is_array;
- dst->type = src->type;
-
- property_set_pointer(dst, new);
-
- dst->name = kstrdup(src->name, GFP_KERNEL);
- if (!dst->name)
- goto out_free_data;
-
- return 0;
-
-out_free_data:
- property_entry_free_data(dst);
- return -ENOMEM;
-}
-
-/**
- * property_entries_dup - duplicate array of properties
- * @properties: array of properties to copy
- *
- * This function creates a deep copy of the given NULL-terminated array
- * of property entries.
- */
-struct property_entry *
-property_entries_dup(const struct property_entry *properties)
-{
- struct property_entry *p;
- int i, n = 0;
-
- while (properties[n].name)
- n++;
-
- p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < n; i++) {
- int ret = property_entry_copy_data(&p[i], &properties[i]);
- if (ret) {
- while (--i >= 0)
- property_entry_free_data(&p[i]);
- kfree(p);
- return ERR_PTR(ret);
- }
- }
-
- return p;
-}
-EXPORT_SYMBOL_GPL(property_entries_dup);
-
-/**
- * property_entries_free - free previously allocated array of properties
- * @properties: array of properties to destroy
- *
- * This function frees given NULL-terminated array of property entries,
- * along with their data.
- */
-void property_entries_free(const struct property_entry *properties)
-{
- const struct property_entry *p;
-
- for (p = properties; p->name; p++)
- property_entry_free_data(p);
-
- kfree(properties);
-}
-EXPORT_SYMBOL_GPL(property_entries_free);
-
-/**
- * pset_free_set - releases memory allocated for copied property set
- * @pset: Property set to release
- *
- * Function takes previously copied property set and releases all the
- * memory allocated to it.
- */
-static void pset_free_set(struct property_set *pset)
-{
- if (!pset)
- return;
-
- property_entries_free(pset->properties);
- kfree(pset);
-}
-
-/**
- * pset_copy_set - copies property set
- * @pset: Property set to copy
- *
- * This function takes a deep copy of the given property set and returns
- * pointer to the copy. Call device_free_property_set() to free resources
- * allocated in this function.
- *
- * Return: Pointer to the new property set or error pointer.
- */
-static struct property_set *pset_copy_set(const struct property_set *pset)
-{
- struct property_entry *properties;
- struct property_set *p;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- properties = property_entries_dup(pset->properties);
- if (IS_ERR(properties)) {
- kfree(p);
- return ERR_CAST(properties);
- }
-
- p->properties = properties;
- return p;
-}
-
/**
* device_remove_properties - Remove properties from a device object.
* @dev: Device whose properties to remove.
*
* The function removes properties previously associated to the device
- * secondary firmware node with device_add_properties(). Memory allocated
- * to the properties will also be released.
+ * firmware node with device_add_properties(). Memory allocated to the
+ * properties will also be released.
*/
void device_remove_properties(struct device *dev)
{
- struct fwnode_handle *fwnode;
- struct property_set *pset;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
- fwnode = dev_fwnode(dev);
if (!fwnode)
return;
- /*
- * Pick either primary or secondary node depending which one holds
- * the pset. If there is no real firmware node (ACPI/DT) primary
- * will hold the pset.
- */
- pset = to_pset_node(fwnode);
- if (pset) {
- set_primary_fwnode(dev, NULL);
- } else {
- pset = to_pset_node(fwnode->secondary);
- if (pset && dev == pset->dev)
- set_secondary_fwnode(dev, NULL);
+
+ if (is_software_node(fwnode->secondary)) {
+ fwnode_remove_software_node(fwnode->secondary);
+ set_secondary_fwnode(dev, NULL);
}
- if (pset && dev == pset->dev)
- pset_free_set(pset);
}
EXPORT_SYMBOL_GPL(device_remove_properties);
@@ -985,26 +512,22 @@ EXPORT_SYMBOL_GPL(device_remove_properties);
* @properties: Collection of properties to add.
*
* Associate a collection of device properties represented by @properties with
- * @dev as its secondary firmware node. The function takes a copy of
- * @properties.
+ * @dev. The function takes a copy of @properties.
+ *
+ * WARNING: The callers should not use this function if it is known that there
+ * is no real firmware node associated with @dev! In that case the callers
+ * should create a software node and assign it to @dev directly.
*/
int device_add_properties(struct device *dev,
const struct property_entry *properties)
{
- struct property_set *p, pset;
-
- if (!properties)
- return -EINVAL;
-
- pset.properties = properties;
+ struct fwnode_handle *fwnode;
- p = pset_copy_set(&pset);
- if (IS_ERR(p))
- return PTR_ERR(p);
+ fwnode = fwnode_create_software_node(properties, NULL);
+ if (IS_ERR(fwnode))
+ return PTR_ERR(fwnode);
- p->fwnode.ops = &pset_fwnode_ops;
- set_secondary_fwnode(dev, &p->fwnode);
- p->dev = dev;
+ set_secondary_fwnode(dev, fwnode);
return 0;
}
EXPORT_SYMBOL_GPL(device_add_properties);
@@ -1341,7 +864,7 @@ int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index)
EXPORT_SYMBOL(fwnode_irq_get);
/**
- * device_graph_get_next_endpoint - Get next endpoint firmware node
+ * fwnode_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
*
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index b1e9aae9a5d0..2e8f0144f9ab 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -177,17 +177,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
return 0;
}
-static int rbtree_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rbtree_show, inode->i_private);
-}
-
-static const struct file_operations rbtree_fops = {
- .open = rbtree_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rbtree);
static void rbtree_debugfs_init(struct regmap *map)
{
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 87b562e49a43..19eb454f26c3 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -435,17 +435,7 @@ static int regmap_access_show(struct seq_file *s, void *ignored)
return 0;
}
-static int access_open(struct inode *inode, struct file *file)
-{
- return single_open(file, regmap_access_show, inode->i_private);
-}
-
-static const struct file_operations regmap_access_fops = {
- .open = access_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(regmap_access);
static ssize_t regmap_cache_only_write_file(struct file *file,
const char __user *user_buf,
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 429ca8ed7e51..1bd1145ad8b5 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -44,6 +44,8 @@ struct regmap_irq_chip_data {
unsigned int irq_reg_stride;
unsigned int type_reg_stride;
+
+ bool clear_status:1;
};
static inline const
@@ -77,6 +79,7 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
int i, ret;
u32 reg;
u32 unmask_offset;
+ u32 val;
if (d->chip->runtime_pm) {
ret = pm_runtime_get_sync(map->dev);
@@ -85,6 +88,20 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
ret);
}
+ if (d->clear_status) {
+ for (i = 0; i < d->chip->num_regs; i++) {
+ reg = d->chip->status_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+
+ ret = regmap_read(map, reg, &val);
+ if (ret)
+ dev_err(d->map->dev,
+ "Failed to clear the interrupt status bits\n");
+ }
+
+ d->clear_status = false;
+ }
+
/*
* If there's been a change in the mask write it back to the
* hardware. We rely on the use of the regmap core cache to
@@ -157,20 +174,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
}
}
- for (i = 0; i < d->chip->num_type_reg; i++) {
- if (!d->type_buf_def[i])
- continue;
- reg = d->chip->type_base +
- (i * map->reg_stride * d->type_reg_stride);
- if (d->chip->type_invert)
- ret = regmap_irq_update_bits(d, reg,
- d->type_buf_def[i], ~d->type_buf[i]);
- else
- ret = regmap_irq_update_bits(d, reg,
- d->type_buf_def[i], d->type_buf[i]);
- if (ret != 0)
- dev_err(d->map->dev, "Failed to sync type in %x\n",
- reg);
+ /* Don't update the type bits if we're using mask bits for irq type. */
+ if (!d->chip->type_in_mask) {
+ for (i = 0; i < d->chip->num_type_reg; i++) {
+ if (!d->type_buf_def[i])
+ continue;
+ reg = d->chip->type_base +
+ (i * map->reg_stride * d->type_reg_stride);
+ if (d->chip->type_invert)
+ ret = regmap_irq_update_bits(d, reg,
+ d->type_buf_def[i], ~d->type_buf[i]);
+ else
+ ret = regmap_irq_update_bits(d, reg,
+ d->type_buf_def[i], d->type_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to sync type in %x\n",
+ reg);
+ }
}
if (d->chip->runtime_pm)
@@ -194,8 +214,30 @@ static void regmap_irq_enable(struct irq_data *data)
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+ unsigned int mask, type;
+
+ type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
- d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
+ /*
+ * The type_in_mask flag means that the underlying hardware uses
+ * separate mask bits for rising and falling edge interrupts, but
+ * we want to make them into a single virtual interrupt with
+ * configurable edge.
+ *
+ * If the interrupt we're enabling defines the falling or rising
+ * masks then instead of using the regular mask bits for this
+ * interrupt, use the value previously written to the type buffer
+ * at the corresponding offset in regmap_irq_set_type().
+ */
+ if (d->chip->type_in_mask && type)
+ mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
+ else
+ mask = irq_data->mask;
+
+ if (d->chip->clear_on_unmask)
+ d->clear_status = true;
+
+ d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
}
static void regmap_irq_disable(struct irq_data *data)
@@ -212,27 +254,42 @@ static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
struct regmap *map = d->map;
const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
- int reg = irq_data->type_reg_offset / map->reg_stride;
+ int reg;
+ const struct regmap_irq_type *t = &irq_data->type;
- if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
- return 0;
+ if ((t->types_supported & type) != type)
+ return -ENOTSUPP;
- d->type_buf[reg] &= ~(irq_data->type_falling_mask |
- irq_data->type_rising_mask);
+ reg = t->type_reg_offset / map->reg_stride;
+
+ if (t->type_reg_mask)
+ d->type_buf[reg] &= ~t->type_reg_mask;
+ else
+ d->type_buf[reg] &= ~(t->type_falling_val |
+ t->type_rising_val |
+ t->type_level_low_val |
+ t->type_level_high_val);
switch (type) {
case IRQ_TYPE_EDGE_FALLING:
- d->type_buf[reg] |= irq_data->type_falling_mask;
+ d->type_buf[reg] |= t->type_falling_val;
break;
case IRQ_TYPE_EDGE_RISING:
- d->type_buf[reg] |= irq_data->type_rising_mask;
+ d->type_buf[reg] |= t->type_rising_val;
break;
case IRQ_TYPE_EDGE_BOTH:
- d->type_buf[reg] |= (irq_data->type_falling_mask |
- irq_data->type_rising_mask);
+ d->type_buf[reg] |= (t->type_falling_val |
+ t->type_rising_val);
break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ d->type_buf[reg] |= t->type_level_high_val;
+ break;
+
+ case IRQ_TYPE_LEVEL_LOW:
+ d->type_buf[reg] |= t->type_level_low_val;
+ break;
default:
return -EINVAL;
}
@@ -430,12 +487,16 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
struct regmap_irq_chip_data *d;
int i;
int ret = -ENOMEM;
+ int num_type_reg;
u32 reg;
u32 unmask_offset;
if (chip->num_regs <= 0)
return -EINVAL;
+ if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
+ return -EINVAL;
+
for (i = 0; i < chip->num_irqs; i++) {
if (chip->irqs[i].reg_offset % map->reg_stride)
return -EINVAL;
@@ -479,13 +540,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
goto err_alloc;
}
- if (chip->num_type_reg) {
- d->type_buf_def = kcalloc(chip->num_type_reg,
- sizeof(unsigned int), GFP_KERNEL);
+ num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
+ if (num_type_reg) {
+ d->type_buf_def = kcalloc(num_type_reg,
+ sizeof(unsigned int), GFP_KERNEL);
if (!d->type_buf_def)
goto err_alloc;
- d->type_buf = kcalloc(chip->num_type_reg, sizeof(unsigned int),
+ d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
GFP_KERNEL);
if (!d->type_buf)
goto err_alloc;
@@ -600,27 +662,21 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
}
}
- if (chip->num_type_reg) {
- for (i = 0; i < chip->num_irqs; i++) {
- reg = chip->irqs[i].type_reg_offset / map->reg_stride;
- d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
- chip->irqs[i].type_falling_mask;
- }
+ if (chip->num_type_reg && !chip->type_in_mask) {
for (i = 0; i < chip->num_type_reg; ++i) {
if (!d->type_buf_def[i])
continue;
reg = chip->type_base +
(i * map->reg_stride * d->type_reg_stride);
- if (chip->type_invert)
- ret = regmap_irq_update_bits(d, reg,
- d->type_buf_def[i], 0xFF);
- else
- ret = regmap_irq_update_bits(d, reg,
- d->type_buf_def[i], 0x0);
- if (ret != 0) {
- dev_err(map->dev,
- "Failed to set type in 0x%x: %x\n",
+
+ ret = regmap_read(map, reg, &d->type_buf_def[i]);
+
+ if (d->chip->type_invert)
+ d->type_buf_def[i] = ~d->type_buf_def[i];
+
+ if (ret) {
+ dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
reg, ret);
goto err_alloc;
}
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
new file mode 100644
index 000000000000..306bb93287af
--- /dev/null
+++ b/drivers/base/swnode.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Software nodes for the firmware node framework.
+ *
+ * Copyright (C) 2018, Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+struct software_node {
+ int id;
+ struct kobject kobj;
+ struct fwnode_handle fwnode;
+
+ /* hierarchy */
+ struct ida child_ids;
+ struct list_head entry;
+ struct list_head children;
+ struct software_node *parent;
+
+ /* properties */
+ const struct property_entry *properties;
+};
+
+static DEFINE_IDA(swnode_root_ids);
+static struct kset *swnode_kset;
+
+#define kobj_to_swnode(_kobj_) container_of(_kobj_, struct software_node, kobj)
+
+static const struct fwnode_operations software_node_ops;
+
+bool is_software_node(const struct fwnode_handle *fwnode)
+{
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &software_node_ops;
+}
+
+#define to_software_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_software_node_fwnode = __fwnode; \
+ \
+ is_software_node(__to_software_node_fwnode) ? \
+ container_of(__to_software_node_fwnode, \
+ struct software_node, fwnode) : \
+ NULL; \
+ })
+
+/* -------------------------------------------------------------------------- */
+/* property_entry processing */
+
+static const struct property_entry *
+property_entry_get(const struct property_entry *prop, const char *name)
+{
+ if (!prop)
+ return NULL;
+
+ for (; prop->name; prop++)
+ if (!strcmp(name, prop->name))
+ return prop;
+
+ return NULL;
+}
+
+static void
+property_set_pointer(struct property_entry *prop, const void *pointer)
+{
+ switch (prop->type) {
+ case DEV_PROP_U8:
+ if (prop->is_array)
+ prop->pointer.u8_data = pointer;
+ else
+ prop->value.u8_data = *((u8 *)pointer);
+ break;
+ case DEV_PROP_U16:
+ if (prop->is_array)
+ prop->pointer.u16_data = pointer;
+ else
+ prop->value.u16_data = *((u16 *)pointer);
+ break;
+ case DEV_PROP_U32:
+ if (prop->is_array)
+ prop->pointer.u32_data = pointer;
+ else
+ prop->value.u32_data = *((u32 *)pointer);
+ break;
+ case DEV_PROP_U64:
+ if (prop->is_array)
+ prop->pointer.u64_data = pointer;
+ else
+ prop->value.u64_data = *((u64 *)pointer);
+ break;
+ case DEV_PROP_STRING:
+ if (prop->is_array)
+ prop->pointer.str = pointer;
+ else
+ prop->value.str = pointer;
+ break;
+ default:
+ break;
+ }
+}
+
+static const void *property_get_pointer(const struct property_entry *prop)
+{
+ switch (prop->type) {
+ case DEV_PROP_U8:
+ if (prop->is_array)
+ return prop->pointer.u8_data;
+ return &prop->value.u8_data;
+ case DEV_PROP_U16:
+ if (prop->is_array)
+ return prop->pointer.u16_data;
+ return &prop->value.u16_data;
+ case DEV_PROP_U32:
+ if (prop->is_array)
+ return prop->pointer.u32_data;
+ return &prop->value.u32_data;
+ case DEV_PROP_U64:
+ if (prop->is_array)
+ return prop->pointer.u64_data;
+ return &prop->value.u64_data;
+ case DEV_PROP_STRING:
+ if (prop->is_array)
+ return prop->pointer.str;
+ return &prop->value.str;
+ default:
+ return NULL;
+ }
+}
+
+static const void *property_entry_find(const struct property_entry *props,
+ const char *propname, size_t length)
+{
+ const struct property_entry *prop;
+ const void *pointer;
+
+ prop = property_entry_get(props, propname);
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+ pointer = property_get_pointer(prop);
+ if (!pointer)
+ return ERR_PTR(-ENODATA);
+ if (length > prop->length)
+ return ERR_PTR(-EOVERFLOW);
+ return pointer;
+}
+
+static int property_entry_read_u8_array(const struct property_entry *props,
+ const char *propname,
+ u8 *values, size_t nval)
+{
+ const void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = property_entry_find(props, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int property_entry_read_u16_array(const struct property_entry *props,
+ const char *propname,
+ u16 *values, size_t nval)
+{
+ const void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = property_entry_find(props, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int property_entry_read_u32_array(const struct property_entry *props,
+ const char *propname,
+ u32 *values, size_t nval)
+{
+ const void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = property_entry_find(props, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int property_entry_read_u64_array(const struct property_entry *props,
+ const char *propname,
+ u64 *values, size_t nval)
+{
+ const void *pointer;
+ size_t length = nval * sizeof(*values);
+
+ pointer = property_entry_find(props, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(values, pointer, length);
+ return 0;
+}
+
+static int
+property_entry_count_elems_of_size(const struct property_entry *props,
+ const char *propname, size_t length)
+{
+ const struct property_entry *prop;
+
+ prop = property_entry_get(props, propname);
+ if (!prop)
+ return -EINVAL;
+
+ return prop->length / length;
+}
+
+static int property_entry_read_int_array(const struct property_entry *props,
+ const char *name,
+ unsigned int elem_size, void *val,
+ size_t nval)
+{
+ if (!val)
+ return property_entry_count_elems_of_size(props, name,
+ elem_size);
+ switch (elem_size) {
+ case sizeof(u8):
+ return property_entry_read_u8_array(props, name, val, nval);
+ case sizeof(u16):
+ return property_entry_read_u16_array(props, name, val, nval);
+ case sizeof(u32):
+ return property_entry_read_u32_array(props, name, val, nval);
+ case sizeof(u64):
+ return property_entry_read_u64_array(props, name, val, nval);
+ }
+
+ return -ENXIO;
+}
+
+static int property_entry_read_string_array(const struct property_entry *props,
+ const char *propname,
+ const char **strings, size_t nval)
+{
+ const struct property_entry *prop;
+ const void *pointer;
+ size_t array_len, length;
+
+ /* Find out the array length. */
+ prop = property_entry_get(props, propname);
+ if (!prop)
+ return -EINVAL;
+
+ if (prop->is_array)
+ /* Find the length of an array. */
+ array_len = property_entry_count_elems_of_size(props, propname,
+ sizeof(const char *));
+ else
+ /* The array length for a non-array string property is 1. */
+ array_len = 1;
+
+ /* Return how many there are if strings is NULL. */
+ if (!strings)
+ return array_len;
+
+ array_len = min(nval, array_len);
+ length = array_len * sizeof(*strings);
+
+ pointer = property_entry_find(props, propname, length);
+ if (IS_ERR(pointer))
+ return PTR_ERR(pointer);
+
+ memcpy(strings, pointer, length);
+
+ return array_len;
+}
+
+static void property_entry_free_data(const struct property_entry *p)
+{
+ const void *pointer = property_get_pointer(p);
+ size_t i, nval;
+
+ if (p->is_array) {
+ if (p->type == DEV_PROP_STRING && p->pointer.str) {
+ nval = p->length / sizeof(const char *);
+ for (i = 0; i < nval; i++)
+ kfree(p->pointer.str[i]);
+ }
+ kfree(pointer);
+ } else if (p->type == DEV_PROP_STRING) {
+ kfree(p->value.str);
+ }
+ kfree(p->name);
+}
+
+static int property_copy_string_array(struct property_entry *dst,
+ const struct property_entry *src)
+{
+ const char **d;
+ size_t nval = src->length / sizeof(*d);
+ int i;
+
+ d = kcalloc(nval, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ for (i = 0; i < nval; i++) {
+ d[i] = kstrdup(src->pointer.str[i], GFP_KERNEL);
+ if (!d[i] && src->pointer.str[i]) {
+ while (--i >= 0)
+ kfree(d[i]);
+ kfree(d);
+ return -ENOMEM;
+ }
+ }
+
+ dst->pointer.str = d;
+ return 0;
+}
+
+static int property_entry_copy_data(struct property_entry *dst,
+ const struct property_entry *src)
+{
+ const void *pointer = property_get_pointer(src);
+ const void *new;
+ int error;
+
+ if (src->is_array) {
+ if (!src->length)
+ return -ENODATA;
+
+ if (src->type == DEV_PROP_STRING) {
+ error = property_copy_string_array(dst, src);
+ if (error)
+ return error;
+ new = dst->pointer.str;
+ } else {
+ new = kmemdup(pointer, src->length, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ }
+ } else if (src->type == DEV_PROP_STRING) {
+ new = kstrdup(src->value.str, GFP_KERNEL);
+ if (!new && src->value.str)
+ return -ENOMEM;
+ } else {
+ new = pointer;
+ }
+
+ dst->length = src->length;
+ dst->is_array = src->is_array;
+ dst->type = src->type;
+
+ property_set_pointer(dst, new);
+
+ dst->name = kstrdup(src->name, GFP_KERNEL);
+ if (!dst->name)
+ goto out_free_data;
+
+ return 0;
+
+out_free_data:
+ property_entry_free_data(dst);
+ return -ENOMEM;
+}
+
+/**
+ * property_entries_dup - duplicate array of properties
+ * @properties: array of properties to copy
+ *
+ * This function creates a deep copy of the given NULL-terminated array
+ * of property entries.
+ */
+struct property_entry *
+property_entries_dup(const struct property_entry *properties)
+{
+ struct property_entry *p;
+ int i, n = 0;
+ int ret;
+
+ while (properties[n].name)
+ n++;
+
+ p = kcalloc(n + 1, sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < n; i++) {
+ ret = property_entry_copy_data(&p[i], &properties[i]);
+ if (ret) {
+ while (--i >= 0)
+ property_entry_free_data(&p[i]);
+ kfree(p);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(property_entries_dup);
+
+/**
+ * property_entries_free - free previously allocated array of properties
+ * @properties: array of properties to destroy
+ *
+ * This function frees given NULL-terminated array of property entries,
+ * along with their data.
+ */
+void property_entries_free(const struct property_entry *properties)
+{
+ const struct property_entry *p;
+
+ if (!properties)
+ return;
+
+ for (p = properties; p->name; p++)
+ property_entry_free_data(p);
+
+ kfree(properties);
+}
+EXPORT_SYMBOL_GPL(property_entries_free);
+
+/* -------------------------------------------------------------------------- */
+/* fwnode operations */
+
+static struct fwnode_handle *software_node_get(struct fwnode_handle *fwnode)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+
+ kobject_get(&swnode->kobj);
+
+ return &swnode->fwnode;
+}
+
+static void software_node_put(struct fwnode_handle *fwnode)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+
+ kobject_put(&swnode->kobj);
+}
+
+static bool software_node_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return !!property_entry_get(to_software_node(fwnode)->properties,
+ propname);
+}
+
+static int software_node_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+
+ return property_entry_read_int_array(swnode->properties, propname,
+ elem_size, val, nval);
+}
+
+static int software_node_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char **val, size_t nval)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+
+ return property_entry_read_string_array(swnode->properties, propname,
+ val, nval);
+}
+
+struct fwnode_handle *
+software_node_get_parent(const struct fwnode_handle *fwnode)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+
+ return swnode->parent ? &swnode->parent->fwnode : NULL;
+}
+
+struct fwnode_handle *
+software_node_get_next_child(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ struct software_node *p = to_software_node(fwnode);
+ struct software_node *c = to_software_node(child);
+
+ if (list_empty(&p->children) ||
+ (c && list_is_last(&c->entry, &p->children)))
+ return NULL;
+
+ if (c)
+ c = list_next_entry(c, entry);
+ else
+ c = list_first_entry(&p->children, struct software_node, entry);
+ return &c->fwnode;
+}
+
+
+static const struct fwnode_operations software_node_ops = {
+ .get = software_node_get,
+ .put = software_node_put,
+ .property_present = software_node_property_present,
+ .property_read_int_array = software_node_read_int_array,
+ .property_read_string_array = software_node_read_string_array,
+ .get_parent = software_node_get_parent,
+ .get_next_child_node = software_node_get_next_child,
+};
+
+/* -------------------------------------------------------------------------- */
+
+static int
+software_node_register_properties(struct software_node *swnode,
+ const struct property_entry *properties)
+{
+ struct property_entry *props;
+
+ props = property_entries_dup(properties);
+ if (IS_ERR(props))
+ return PTR_ERR(props);
+
+ swnode->properties = props;
+
+ return 0;
+}
+
+static void software_node_release(struct kobject *kobj)
+{
+ struct software_node *swnode = kobj_to_swnode(kobj);
+
+ if (swnode->parent) {
+ ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ list_del(&swnode->entry);
+ } else {
+ ida_simple_remove(&swnode_root_ids, swnode->id);
+ }
+
+ ida_destroy(&swnode->child_ids);
+ property_entries_free(swnode->properties);
+ kfree(swnode);
+}
+
+static struct kobj_type software_node_type = {
+ .release = software_node_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent)
+{
+ struct software_node *p = NULL;
+ struct software_node *swnode;
+ int ret;
+
+ if (parent) {
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ if (!is_software_node(parent))
+ return ERR_PTR(-EINVAL);
+ p = to_software_node(parent);
+ }
+
+ swnode = kzalloc(sizeof(*swnode), GFP_KERNEL);
+ if (!swnode)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ida_simple_get(p ? &p->child_ids : &swnode_root_ids, 0, 0,
+ GFP_KERNEL);
+ if (ret < 0) {
+ kfree(swnode);
+ return ERR_PTR(ret);
+ }
+
+ swnode->id = ret;
+ swnode->kobj.kset = swnode_kset;
+ swnode->fwnode.ops = &software_node_ops;
+
+ ida_init(&swnode->child_ids);
+ INIT_LIST_HEAD(&swnode->entry);
+ INIT_LIST_HEAD(&swnode->children);
+ swnode->parent = p;
+
+ if (p)
+ list_add_tail(&swnode->entry, &p->children);
+
+ ret = kobject_init_and_add(&swnode->kobj, &software_node_type,
+ p ? &p->kobj : NULL, "node%d", swnode->id);
+ if (ret) {
+ kobject_put(&swnode->kobj);
+ return ERR_PTR(ret);
+ }
+
+ ret = software_node_register_properties(swnode, properties);
+ if (ret) {
+ kobject_put(&swnode->kobj);
+ return ERR_PTR(ret);
+ }
+
+ kobject_uevent(&swnode->kobj, KOBJ_ADD);
+ return &swnode->fwnode;
+}
+EXPORT_SYMBOL_GPL(fwnode_create_software_node);
+
+void fwnode_remove_software_node(struct fwnode_handle *fwnode)
+{
+ struct software_node *swnode = to_software_node(fwnode);
+
+ if (!swnode)
+ return;
+
+ kobject_put(&swnode->kobj);
+}
+EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
+
+int software_node_notify(struct device *dev, unsigned long action)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct software_node *swnode;
+ int ret;
+
+ if (!fwnode)
+ return 0;
+
+ if (!is_software_node(fwnode))
+ fwnode = fwnode->secondary;
+ if (!is_software_node(fwnode))
+ return 0;
+
+ swnode = to_software_node(fwnode);
+
+ switch (action) {
+ case KOBJ_ADD:
+ ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
+ "software_node");
+ if (ret)
+ break;
+
+ ret = sysfs_create_link(&swnode->kobj, &dev->kobj,
+ dev_name(dev));
+ if (ret) {
+ sysfs_remove_link(&dev->kobj, "software_node");
+ break;
+ }
+ kobject_get(&swnode->kobj);
+ break;
+ case KOBJ_REMOVE:
+ sysfs_remove_link(&swnode->kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "software_node");
+ kobject_put(&swnode->kobj);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int __init software_node_init(void)
+{
+ swnode_kset = kset_create_and_add("software_nodes", NULL, kernel_kobj);
+ if (!swnode_kset)
+ return -ENOMEM;
+ return 0;
+}
+postcore_initcall(software_node_init);
+
+static void __exit software_node_exit(void)
+{
+ ida_destroy(&swnode_root_ids);
+ kset_unregister(swnode_kset);
+}
+__exitcall(software_node_exit);
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 7ca76ed2e71a..84d0fcebd6af 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,6 +100,10 @@ enum {
MAX_TAINT = 1000, /* cap on aoetgt taint */
};
+struct aoe_req {
+ unsigned long nr_bios;
+};
+
struct buf {
ulong nframesout;
struct bio *bio;
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index ed26b7287256..e2c6aae2d636 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -387,6 +387,7 @@ aoeblk_gdalloc(void *vp)
set = &d->tag_set;
set->ops = &aoeblk_mq_ops;
+ set->cmd_size = sizeof(struct aoe_req);
set->nr_hw_queues = 1;
set->queue_depth = 128;
set->numa_node = NUMA_NO_NODE;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index bb2fba651bd2..3cf9bc5d8d95 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -822,17 +822,6 @@ out:
spin_unlock_irqrestore(&d->lock, flags);
}
-static unsigned long
-rqbiocnt(struct request *r)
-{
- struct bio *bio;
- unsigned long n = 0;
-
- __rq_for_each_bio(bio, r)
- n++;
- return n;
-}
-
static void
bufinit(struct buf *buf, struct request *rq, struct bio *bio)
{
@@ -847,6 +836,7 @@ nextbuf(struct aoedev *d)
{
struct request *rq;
struct request_queue *q;
+ struct aoe_req *req;
struct buf *buf;
struct bio *bio;
@@ -865,7 +855,11 @@ nextbuf(struct aoedev *d)
blk_mq_start_request(rq);
d->ip.rq = rq;
d->ip.nxbio = rq->bio;
- rq->special = (void *) rqbiocnt(rq);
+
+ req = blk_mq_rq_to_pdu(rq);
+ req->nr_bios = 0;
+ __rq_for_each_bio(bio, rq)
+ req->nr_bios++;
}
buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
if (buf == NULL) {
@@ -1069,16 +1063,13 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
static void
aoe_end_buf(struct aoedev *d, struct buf *buf)
{
- struct request *rq;
- unsigned long n;
+ struct request *rq = buf->rq;
+ struct aoe_req *req = blk_mq_rq_to_pdu(rq);
if (buf == d->ip.buf)
d->ip.buf = NULL;
- rq = buf->rq;
mempool_free(buf, d->bufpool);
- n = (unsigned long) rq->special;
- rq->special = (void *) --n;
- if (n == 0)
+ if (--req->nr_bios == 0)
aoe_end_request(d, rq, 0);
}
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 9063f8efbd3b..5b49f1b33ebe 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -160,21 +160,22 @@ static void
aoe_failip(struct aoedev *d)
{
struct request *rq;
+ struct aoe_req *req;
struct bio *bio;
- unsigned long n;
aoe_failbuf(d, d->ip.buf);
-
rq = d->ip.rq;
if (rq == NULL)
return;
+
+ req = blk_mq_rq_to_pdu(rq);
while ((bio = d->ip.nxbio)) {
bio->bi_status = BLK_STS_IOERR;
d->ip.nxbio = bio->bi_next;
- n = (unsigned long) rq->special;
- rq->special = (void *) --n;
+ req->nr_bios--;
}
- if ((unsigned long) rq->special == 0)
+
+ if (!req->nr_bios)
aoe_end_request(d, rq, 0);
}
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index 251482066977..1e4e2971171c 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -24,7 +24,7 @@ static void discover_timer(struct timer_list *t)
aoecmd_cfg(0xffff, 0xff);
}
-static void
+static void __exit
aoe_exit(void)
{
del_timer_sync(&timer);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index f88b4c26d422..b0dbbdfeb33e 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1471,6 +1471,15 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
+static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ spin_lock_irq(&ataflop_lock);
+ atari_disable_irq(IRQ_MFP_FDC);
+ finish_fdc();
+ atari_enable_irq(IRQ_MFP_FDC);
+ spin_unlock_irq(&ataflop_lock);
+}
+
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1947,6 +1956,7 @@ static const struct block_device_operations floppy_fops = {
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
+ .commit_rqs = ataflop_commit_rqs,
};
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
@@ -1982,6 +1992,7 @@ static int __init atari_floppy_init (void)
&ataflop_mq_ops, 2,
BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(unit[i].disk->queue)) {
+ put_disk(unit[i].disk);
ret = PTR_ERR(unit[i].disk->queue);
unit[i].disk->queue = NULL;
goto err;
@@ -2033,18 +2044,13 @@ static int __init atari_floppy_init (void)
return 0;
err:
- do {
+ while (--i >= 0) {
struct gendisk *disk = unit[i].disk;
- if (disk) {
- if (disk->queue) {
- blk_cleanup_queue(disk->queue);
- disk->queue = NULL;
- }
- blk_mq_free_tag_set(&unit[i].tag_set);
- put_disk(unit[i].disk);
- }
- } while (i--);
+ blk_cleanup_queue(disk->queue);
+ blk_mq_free_tag_set(&unit[i].tag_set);
+ put_disk(unit[i].disk);
+ }
unregister_blkdev(FLOPPY_MAJOR, "fd");
return ret;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index fa8204214ac0..f973a2a845c8 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2792,7 +2792,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_init_set_defaults(device);
- q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, &resource->req_lock);
+ q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!q)
goto out_no_q;
device->rq_queue = q;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 61c392752fe4..ccfcf00f2798 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3623,7 +3623,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
* change.
*/
- peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, 0);
if (IS_ERR(peer_integrity_tfm)) {
peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fb23578e9a41..6f2856c6d0f2 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2231,7 +2231,6 @@ static void request_done(int uptodate)
{
struct request *req = current_req;
struct request_queue *q;
- unsigned long flags;
int block;
char msg[sizeof("request done ") + sizeof(int) * 3];
@@ -2254,10 +2253,7 @@ static void request_done(int uptodate)
if (block > _floppy->sect)
DRS->maxtrack = 1;
- /* unlock chained buffers */
- spin_lock_irqsave(q->queue_lock, flags);
floppy_end_request(req, 0);
- spin_unlock_irqrestore(q->queue_lock, flags);
} else {
if (rq_data_dir(req) == WRITE) {
/* record write error information */
@@ -2269,9 +2265,7 @@ static void request_done(int uptodate)
DRWE->last_error_sector = blk_rq_pos(req);
DRWE->last_error_generation = DRS->generation;
}
- spin_lock_irqsave(q->queue_lock, flags);
floppy_end_request(req, BLK_STS_IOERR);
- spin_unlock_irqrestore(q->queue_lock, flags);
}
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cb0cc8685076..0939f36548c9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -77,13 +77,14 @@
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
+#include <linux/blk-cgroup.h>
#include "loop.h"
#include <linux/uaccess.h>
static DEFINE_IDR(loop_index_idr);
-static DEFINE_MUTEX(loop_index_mutex);
+static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
static int part_shift;
@@ -630,18 +631,7 @@ static void loop_reread_partitions(struct loop_device *lo,
{
int rc;
- /*
- * bd_mutex has been held already in release path, so don't
- * acquire it if this function is called in such case.
- *
- * If the reread partition isn't from release path, lo_refcnt
- * must be at least one and it can only become zero when the
- * current holder is released.
- */
- if (!atomic_read(&lo->lo_refcnt))
- rc = __blkdev_reread_part(bdev);
- else
- rc = blkdev_reread_part(bdev);
+ rc = blkdev_reread_part(bdev);
if (rc)
pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
__func__, lo->lo_number, lo->lo_file_name, rc);
@@ -688,26 +678,30 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
unsigned int arg)
{
- struct file *file, *old_file;
+ struct file *file = NULL, *old_file;
int error;
+ bool partscan;
+ error = mutex_lock_killable(&loop_ctl_mutex);
+ if (error)
+ return error;
error = -ENXIO;
if (lo->lo_state != Lo_bound)
- goto out;
+ goto out_err;
/* the loop device has to be read-only */
error = -EINVAL;
if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
- goto out;
+ goto out_err;
error = -EBADF;
file = fget(arg);
if (!file)
- goto out;
+ goto out_err;
error = loop_validate_file(file, bdev);
if (error)
- goto out_putf;
+ goto out_err;
old_file = lo->lo_backing_file;
@@ -715,7 +709,7 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
- goto out_putf;
+ goto out_err;
/* and ... switch */
blk_mq_freeze_queue(lo->lo_queue);
@@ -726,15 +720,22 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
loop_update_dio(lo);
blk_mq_unfreeze_queue(lo->lo_queue);
-
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
+ mutex_unlock(&loop_ctl_mutex);
+ /*
+ * We must drop file reference outside of loop_ctl_mutex as dropping
+ * the file ref can take bd_mutex which creates circular locking
+ * dependency.
+ */
fput(old_file);
- if (lo->lo_flags & LO_FLAGS_PARTSCAN)
+ if (partscan)
loop_reread_partitions(lo, bdev);
return 0;
- out_putf:
- fput(file);
- out:
+out_err:
+ mutex_unlock(&loop_ctl_mutex);
+ if (file)
+ fput(file);
return error;
}
@@ -909,6 +910,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
int lo_flags = 0;
int error;
loff_t size;
+ bool partscan;
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -918,13 +920,17 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!file)
goto out;
+ error = mutex_lock_killable(&loop_ctl_mutex);
+ if (error)
+ goto out_putf;
+
error = -EBUSY;
if (lo->lo_state != Lo_unbound)
- goto out_putf;
+ goto out_unlock;
error = loop_validate_file(file, bdev);
if (error)
- goto out_putf;
+ goto out_unlock;
mapping = file->f_mapping;
inode = mapping->host;
@@ -936,10 +942,10 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
error = -EFBIG;
size = get_loop_size(lo, file);
if ((loff_t)(sector_t)size != size)
- goto out_putf;
+ goto out_unlock;
error = loop_prepare_queue(lo);
if (error)
- goto out_putf;
+ goto out_unlock;
error = 0;
@@ -971,18 +977,22 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_state = Lo_bound;
if (part_shift)
lo->lo_flags |= LO_FLAGS_PARTSCAN;
- if (lo->lo_flags & LO_FLAGS_PARTSCAN)
- loop_reread_partitions(lo, bdev);
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
/* Grab the block_device to prevent its destruction after we
- * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
+ * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
*/
bdgrab(bdev);
+ mutex_unlock(&loop_ctl_mutex);
+ if (partscan)
+ loop_reread_partitions(lo, bdev);
return 0;
- out_putf:
+out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
+out_putf:
fput(file);
- out:
+out:
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
return error;
@@ -1025,39 +1035,31 @@ loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
return err;
}
-static int loop_clr_fd(struct loop_device *lo)
+static int __loop_clr_fd(struct loop_device *lo, bool release)
{
- struct file *filp = lo->lo_backing_file;
+ struct file *filp = NULL;
gfp_t gfp = lo->old_gfp_mask;
struct block_device *bdev = lo->lo_device;
+ int err = 0;
+ bool partscan = false;
+ int lo_number;
- if (lo->lo_state != Lo_bound)
- return -ENXIO;
-
- /*
- * If we've explicitly asked to tear down the loop device,
- * and it has an elevated reference count, set it for auto-teardown when
- * the last reference goes away. This stops $!~#$@ udev from
- * preventing teardown because it decided that it needs to run blkid on
- * the loopback device whenever they appear. xfstests is notorious for
- * failing tests because blkid via udev races with a losetup
- * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
- * command to fail with EBUSY.
- */
- if (atomic_read(&lo->lo_refcnt) > 1) {
- lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&lo->lo_ctl_mutex);
- return 0;
+ mutex_lock(&loop_ctl_mutex);
+ if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
+ err = -ENXIO;
+ goto out_unlock;
}
- if (filp == NULL)
- return -EINVAL;
+ filp = lo->lo_backing_file;
+ if (filp == NULL) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
/* freeze request queue during the transition */
blk_mq_freeze_queue(lo->lo_queue);
spin_lock_irq(&lo->lo_lock);
- lo->lo_state = Lo_rundown;
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
@@ -1093,21 +1095,73 @@ static int loop_clr_fd(struct loop_device *lo)
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
- if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
- loop_reread_partitions(lo, bdev);
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
+ lo_number = lo->lo_number;
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo);
- mutex_unlock(&lo->lo_ctl_mutex);
+out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
+ if (partscan) {
+ /*
+ * bd_mutex has been held already in release path, so don't
+ * acquire it if this function is called in such case.
+ *
+ * If the reread partition isn't from release path, lo_refcnt
+ * must be at least one and it can only become zero when the
+ * current holder is released.
+ */
+ if (release)
+ err = __blkdev_reread_part(bdev);
+ else
+ err = blkdev_reread_part(bdev);
+ pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
+ __func__, lo_number, err);
+ /* Device is gone, no point in returning error */
+ err = 0;
+ }
/*
- * Need not hold lo_ctl_mutex to fput backing file.
- * Calling fput holding lo_ctl_mutex triggers a circular
+ * Need not hold loop_ctl_mutex to fput backing file.
+ * Calling fput holding loop_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before lo_ctl_mutex.
+ * bd_mutex which is usually taken before loop_ctl_mutex.
*/
- fput(filp);
- return 0;
+ if (filp)
+ fput(filp);
+ return err;
+}
+
+static int loop_clr_fd(struct loop_device *lo)
+{
+ int err;
+
+ err = mutex_lock_killable(&loop_ctl_mutex);
+ if (err)
+ return err;
+ if (lo->lo_state != Lo_bound) {
+ mutex_unlock(&loop_ctl_mutex);
+ return -ENXIO;
+ }
+ /*
+ * If we've explicitly asked to tear down the loop device,
+ * and it has an elevated reference count, set it for auto-teardown when
+ * the last reference goes away. This stops $!~#$@ udev from
+ * preventing teardown because it decided that it needs to run blkid on
+ * the loopback device whenever they appear. xfstests is notorious for
+ * failing tests because blkid via udev races with a losetup
+ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+ * command to fail with EBUSY.
+ */
+ if (atomic_read(&lo->lo_refcnt) > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ mutex_unlock(&loop_ctl_mutex);
+ return 0;
+ }
+ lo->lo_state = Lo_rundown;
+ mutex_unlock(&loop_ctl_mutex);
+
+ return __loop_clr_fd(lo, false);
}
static int
@@ -1116,47 +1170,58 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
+ struct block_device *bdev;
+ bool partscan = false;
+ err = mutex_lock_killable(&loop_ctl_mutex);
+ if (err)
+ return err;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (lo->lo_state != Lo_bound)
- return -ENXIO;
- if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
- return -EINVAL;
+ !capable(CAP_SYS_ADMIN)) {
+ err = -EPERM;
+ goto out_unlock;
+ }
+ if (lo->lo_state != Lo_bound) {
+ err = -ENXIO;
+ goto out_unlock;
+ }
+ if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
err = loop_release_xfer(lo);
if (err)
- goto exit;
+ goto out_unfreeze;
if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type;
if (type >= MAX_LO_CRYPT) {
err = -EINVAL;
- goto exit;
+ goto out_unfreeze;
}
xfer = xfer_funcs[type];
if (xfer == NULL) {
err = -EINVAL;
- goto exit;
+ goto out_unfreeze;
}
} else
xfer = NULL;
err = loop_init_xfer(lo, xfer, info);
if (err)
- goto exit;
+ goto out_unfreeze;
if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) {
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
- goto exit;
+ goto out_unfreeze;
}
}
@@ -1188,15 +1253,20 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio);
- exit:
+out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue);
if (!err && (info->lo_flags & LO_FLAGS_PARTSCAN) &&
!(lo->lo_flags & LO_FLAGS_PARTSCAN)) {
lo->lo_flags |= LO_FLAGS_PARTSCAN;
lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
- loop_reread_partitions(lo, lo->lo_device);
+ bdev = lo->lo_device;
+ partscan = true;
}
+out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
+ if (partscan)
+ loop_reread_partitions(lo, bdev);
return err;
}
@@ -1204,12 +1274,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
static int
loop_get_status(struct loop_device *lo, struct loop_info64 *info)
{
- struct file *file;
+ struct path path;
struct kstat stat;
int ret;
+ ret = mutex_lock_killable(&loop_ctl_mutex);
+ if (ret)
+ return ret;
if (lo->lo_state != Lo_bound) {
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return -ENXIO;
}
@@ -1228,17 +1301,17 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
lo->lo_encrypt_key_size);
}
- /* Drop lo_ctl_mutex while we call into the filesystem. */
- file = get_file(lo->lo_backing_file);
- mutex_unlock(&lo->lo_ctl_mutex);
- ret = vfs_getattr(&file->f_path, &stat, STATX_INO,
- AT_STATX_SYNC_AS_STAT);
+ /* Drop loop_ctl_mutex while we call into the filesystem. */
+ path = lo->lo_backing_file->f_path;
+ path_get(&path);
+ mutex_unlock(&loop_ctl_mutex);
+ ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) {
info->lo_device = huge_encode_dev(stat.dev);
info->lo_inode = stat.ino;
info->lo_rdevice = huge_encode_dev(stat.rdev);
}
- fput(file);
+ path_put(&path);
return ret;
}
@@ -1322,10 +1395,8 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
struct loop_info64 info64;
int err;
- if (!arg) {
- mutex_unlock(&lo->lo_ctl_mutex);
+ if (!arg)
return -EINVAL;
- }
err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_old(&info64, &info);
@@ -1340,10 +1411,8 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
struct loop_info64 info64;
int err;
- if (!arg) {
- mutex_unlock(&lo->lo_ctl_mutex);
+ if (!arg)
return -EINVAL;
- }
err = loop_get_status(lo, &info64);
if (!err && copy_to_user(arg, &info64, sizeof(info64)))
err = -EFAULT;
@@ -1393,70 +1462,73 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
return 0;
}
+static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ err = mutex_lock_killable(&loop_ctl_mutex);
+ if (err)
+ return err;
+ switch (cmd) {
+ case LOOP_SET_CAPACITY:
+ err = loop_set_capacity(lo);
+ break;
+ case LOOP_SET_DIRECT_IO:
+ err = loop_set_dio(lo, arg);
+ break;
+ case LOOP_SET_BLOCK_SIZE:
+ err = loop_set_block_size(lo, arg);
+ break;
+ default:
+ err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ }
+ mutex_unlock(&loop_ctl_mutex);
+ return err;
+}
+
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1);
- if (err)
- goto out_unlocked;
-
switch (cmd) {
case LOOP_SET_FD:
- err = loop_set_fd(lo, mode, bdev, arg);
- break;
+ return loop_set_fd(lo, mode, bdev, arg);
case LOOP_CHANGE_FD:
- err = loop_change_fd(lo, bdev, arg);
- break;
+ return loop_change_fd(lo, bdev, arg);
case LOOP_CLR_FD:
- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
- err = loop_clr_fd(lo);
- if (!err)
- goto out_unlocked;
- break;
+ return loop_clr_fd(lo);
case LOOP_SET_STATUS:
err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
err = loop_set_status_old(lo,
(struct loop_info __user *)arg);
+ }
break;
case LOOP_GET_STATUS:
- err = loop_get_status_old(lo, (struct loop_info __user *) arg);
- /* loop_get_status() unlocks lo_ctl_mutex */
- goto out_unlocked;
+ return loop_get_status_old(lo, (struct loop_info __user *) arg);
case LOOP_SET_STATUS64:
err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
err = loop_set_status64(lo,
(struct loop_info64 __user *) arg);
+ }
break;
case LOOP_GET_STATUS64:
- err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
- /* loop_get_status() unlocks lo_ctl_mutex */
- goto out_unlocked;
+ return loop_get_status64(lo, (struct loop_info64 __user *) arg);
case LOOP_SET_CAPACITY:
- err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_capacity(lo);
- break;
case LOOP_SET_DIRECT_IO:
- err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_dio(lo, arg);
- break;
case LOOP_SET_BLOCK_SIZE:
- err = -EPERM;
- if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
- err = loop_set_block_size(lo, arg);
- break;
+ if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /* Fall through */
default:
- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ err = lo_simple_ioctl(lo, cmd, arg);
+ break;
}
- mutex_unlock(&lo->lo_ctl_mutex);
-out_unlocked:
return err;
}
@@ -1570,10 +1642,8 @@ loop_get_status_compat(struct loop_device *lo,
struct loop_info64 info64;
int err;
- if (!arg) {
- mutex_unlock(&lo->lo_ctl_mutex);
+ if (!arg)
return -EINVAL;
- }
err = loop_get_status(lo, &info64);
if (!err)
err = loop_info64_to_compat(&info64, arg);
@@ -1588,20 +1658,12 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
- err = mutex_lock_killable(&lo->lo_ctl_mutex);
- if (!err) {
- err = loop_set_status_compat(lo,
- (const struct compat_loop_info __user *)arg);
- mutex_unlock(&lo->lo_ctl_mutex);
- }
+ err = loop_set_status_compat(lo,
+ (const struct compat_loop_info __user *)arg);
break;
case LOOP_GET_STATUS:
- err = mutex_lock_killable(&lo->lo_ctl_mutex);
- if (!err) {
- err = loop_get_status_compat(lo,
- (struct compat_loop_info __user *)arg);
- /* loop_get_status() unlocks lo_ctl_mutex */
- }
+ err = loop_get_status_compat(lo,
+ (struct compat_loop_info __user *)arg);
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
@@ -1625,9 +1687,11 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode)
{
struct loop_device *lo;
- int err = 0;
+ int err;
- mutex_lock(&loop_index_mutex);
+ err = mutex_lock_killable(&loop_ctl_mutex);
+ if (err)
+ return err;
lo = bdev->bd_disk->private_data;
if (!lo) {
err = -ENXIO;
@@ -1636,26 +1700,30 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&lo->lo_refcnt);
out:
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return err;
}
-static void __lo_release(struct loop_device *lo)
+static void lo_release(struct gendisk *disk, fmode_t mode)
{
- int err;
+ struct loop_device *lo;
+ mutex_lock(&loop_ctl_mutex);
+ lo = disk->private_data;
if (atomic_dec_return(&lo->lo_refcnt))
- return;
+ goto out_unlock;
- mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
+ if (lo->lo_state != Lo_bound)
+ goto out_unlock;
+ lo->lo_state = Lo_rundown;
+ mutex_unlock(&loop_ctl_mutex);
/*
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- err = loop_clr_fd(lo);
- if (!err)
- return;
+ __loop_clr_fd(lo, true);
+ return;
} else if (lo->lo_state == Lo_bound) {
/*
* Otherwise keep thread (if running) and config,
@@ -1665,14 +1733,8 @@ static void __lo_release(struct loop_device *lo)
blk_mq_unfreeze_queue(lo->lo_queue);
}
- mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
- mutex_lock(&loop_index_mutex);
- __lo_release(disk->private_data);
- mutex_unlock(&loop_index_mutex);
+out_unlock:
+ mutex_unlock(&loop_ctl_mutex);
}
static const struct block_device_operations lo_fops = {
@@ -1711,10 +1773,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return 0;
}
@@ -1759,8 +1821,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
- cmd->css = rq->bio->bi_css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
+ cmd->css = &bio_blkcg(rq->bio)->css;
css_get(cmd->css);
} else
#endif
@@ -1853,7 +1915,7 @@ static int loop_add(struct loop_device **l, int i)
goto out_free_idr;
lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
- if (IS_ERR_OR_NULL(lo->lo_queue)) {
+ if (IS_ERR(lo->lo_queue)) {
err = PTR_ERR(lo->lo_queue);
goto out_cleanup_tags;
}
@@ -1895,7 +1957,6 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
- mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
@@ -1974,7 +2035,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
struct kobject *kobj;
int err;
- mutex_lock(&loop_index_mutex);
+ mutex_lock(&loop_ctl_mutex);
err = loop_lookup(&lo, MINOR(dev) >> part_shift);
if (err < 0)
err = loop_add(&lo, MINOR(dev) >> part_shift);
@@ -1982,7 +2043,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data)
kobj = NULL;
else
kobj = get_disk_and_module(lo->lo_disk);
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
*part = 0;
return kobj;
@@ -1992,9 +2053,13 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
unsigned long parm)
{
struct loop_device *lo;
- int ret = -ENOSYS;
+ int ret;
- mutex_lock(&loop_index_mutex);
+ ret = mutex_lock_killable(&loop_ctl_mutex);
+ if (ret)
+ return ret;
+
+ ret = -ENOSYS;
switch (cmd) {
case LOOP_CTL_ADD:
ret = loop_lookup(&lo, parm);
@@ -2008,21 +2073,15 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
- ret = mutex_lock_killable(&lo->lo_ctl_mutex);
- if (ret)
- break;
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
- mutex_unlock(&lo->lo_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
- mutex_unlock(&lo->lo_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
- mutex_unlock(&lo->lo_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
@@ -2032,7 +2091,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
break;
ret = loop_add(&lo, -1);
}
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return ret;
}
@@ -2116,10 +2175,10 @@ static int __init loop_init(void)
THIS_MODULE, loop_probe, NULL, NULL);
/* pre-create number of devices given by config or max_loop */
- mutex_lock(&loop_index_mutex);
+ mutex_lock(&loop_ctl_mutex);
for (i = 0; i < nr; i++)
loop_add(&lo, i);
- mutex_unlock(&loop_index_mutex);
+ mutex_unlock(&loop_ctl_mutex);
printk(KERN_INFO "loop: module loaded\n");
return 0;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 4d42c7af7de7..af75a5ee4094 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -54,7 +54,6 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
- struct mutex lo_ctl_mutex;
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a7daa8acbab3..88e8440e75c3 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -168,41 +168,6 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev)
return false; /* device present */
}
-/* we have to use runtime tag to setup command header */
-static void mtip_init_cmd_header(struct request *rq)
-{
- struct driver_data *dd = rq->q->queuedata;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
-
- /* Point the command headers at the command tables. */
- cmd->command_header = dd->port->command_list +
- (sizeof(struct mtip_cmd_hdr) * rq->tag);
- cmd->command_header_dma = dd->port->command_list_dma +
- (sizeof(struct mtip_cmd_hdr) * rq->tag);
-
- if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
- cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16);
-
- cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
-}
-
-static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
-{
- struct request *rq;
-
- if (mtip_check_surprise_removal(dd->pdev))
- return NULL;
-
- rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
- if (IS_ERR(rq))
- return NULL;
-
- /* Internal cmd isn't submitted via .queue_rq */
- mtip_init_cmd_header(rq);
-
- return blk_mq_rq_to_pdu(rq);
-}
-
static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd,
unsigned int tag)
{
@@ -1023,13 +988,14 @@ static int mtip_exec_internal_command(struct mtip_port *port,
return -EFAULT;
}
- int_cmd = mtip_get_int_command(dd);
- if (!int_cmd) {
+ if (mtip_check_surprise_removal(dd->pdev))
+ return -EFAULT;
+
+ rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
+ if (IS_ERR(rq)) {
dbg_printk(MTIP_DRV_NAME "Unable to allocate tag for PIO cmd\n");
return -EFAULT;
}
- rq = blk_mq_rq_from_pdu(int_cmd);
- rq->special = &icmd;
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1050,6 +1016,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
/* Copy the command to the command table */
+ int_cmd = blk_mq_rq_to_pdu(rq);
+ int_cmd->icmd = &icmd;
memcpy(int_cmd->command, fis, fis_len*4);
rq->timeout = timeout;
@@ -1423,23 +1391,19 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
* @dd pointer to driver_data structure
* @lba starting lba
* @len # of 512b sectors to trim
- *
- * return value
- * -ENOMEM Out of dma memory
- * -EINVAL Invalid parameters passed in, trim not supported
- * -EIO Error submitting trim request to hw
*/
-static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
- unsigned int len)
+static blk_status_t mtip_send_trim(struct driver_data *dd, unsigned int lba,
+ unsigned int len)
{
- int i, rv = 0;
u64 tlba, tlen, sect_left;
struct mtip_trim_entry *buf;
dma_addr_t dma_addr;
struct host_to_dev_fis fis;
+ blk_status_t ret = BLK_STS_OK;
+ int i;
if (!len || dd->trim_supp == false)
- return -EINVAL;
+ return BLK_STS_IOERR;
/* Trim request too big */
WARN_ON(len > (MTIP_MAX_TRIM_ENTRY_LEN * MTIP_MAX_TRIM_ENTRIES));
@@ -1454,7 +1418,7 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
buf = dmam_alloc_coherent(&dd->pdev->dev, ATA_SECT_SIZE, &dma_addr,
GFP_KERNEL);
if (!buf)
- return -ENOMEM;
+ return BLK_STS_RESOURCE;
memset(buf, 0, ATA_SECT_SIZE);
for (i = 0, sect_left = len, tlba = lba;
@@ -1463,8 +1427,8 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
tlen = (sect_left >= MTIP_MAX_TRIM_ENTRY_LEN ?
MTIP_MAX_TRIM_ENTRY_LEN :
sect_left);
- buf[i].lba = __force_bit2int cpu_to_le32(tlba);
- buf[i].range = __force_bit2int cpu_to_le16(tlen);
+ buf[i].lba = cpu_to_le32(tlba);
+ buf[i].range = cpu_to_le16(tlen);
tlba += tlen;
sect_left -= tlen;
}
@@ -1486,10 +1450,10 @@ static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
ATA_SECT_SIZE,
0,
MTIP_TRIM_TIMEOUT_MS) < 0)
- rv = -EIO;
+ ret = BLK_STS_IOERR;
dmam_free_coherent(&dd->pdev->dev, ATA_SECT_SIZE, buf, dma_addr);
- return rv;
+ return ret;
}
/*
@@ -1585,23 +1549,20 @@ static inline void fill_command_sg(struct driver_data *dd,
int n;
unsigned int dma_len;
struct mtip_cmd_sg *command_sg;
- struct scatterlist *sg = command->sg;
+ struct scatterlist *sg;
command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
- for (n = 0; n < nents; n++) {
+ for_each_sg(command->sg, sg, nents, n) {
dma_len = sg_dma_len(sg);
if (dma_len > 0x400000)
dev_err(&dd->pdev->dev,
"DMA segment length truncated\n");
- command_sg->info = __force_bit2int
- cpu_to_le32((dma_len-1) & 0x3FFFFF);
- command_sg->dba = __force_bit2int
- cpu_to_le32(sg_dma_address(sg));
- command_sg->dba_upper = __force_bit2int
+ command_sg->info = cpu_to_le32((dma_len-1) & 0x3FFFFF);
+ command_sg->dba = cpu_to_le32(sg_dma_address(sg));
+ command_sg->dba_upper =
cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
command_sg++;
- sg++;
}
}
@@ -2171,7 +2132,6 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* @dd Pointer to the driver data structure.
* @start First sector to read.
* @nsect Number of sectors to read.
- * @nents Number of entries in scatter list for the read command.
* @tag The tag of this read command.
* @callback Pointer to the function that should be called
* when the read completes.
@@ -2183,16 +2143,20 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* None
*/
static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
- struct mtip_cmd *command, int nents,
+ struct mtip_cmd *command,
struct blk_mq_hw_ctx *hctx)
{
+ struct mtip_cmd_hdr *hdr =
+ dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
u64 start = blk_rq_pos(rq);
unsigned int nsect = blk_rq_sectors(rq);
+ unsigned int nents;
/* Map the scatter list for DMA access */
+ nents = blk_rq_map_sg(hctx->queue, rq, command->sg);
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
prefetch(&port->flags);
@@ -2233,10 +2197,11 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq,
fis->device |= 1 << 7;
/* Populate the command header */
- command->command_header->opts =
- __force_bit2int cpu_to_le32(
- (nents << 16) | 5 | AHCI_CMD_PREFETCH);
- command->command_header->byte_count = 0;
+ hdr->ctba = cpu_to_le32(command->command_dma & 0xFFFFFFFF);
+ if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
+ hdr->ctbau = cpu_to_le32((command->command_dma >> 16) >> 16);
+ hdr->opts = cpu_to_le32((nents << 16) | 5 | AHCI_CMD_PREFETCH);
+ hdr->byte_count = 0;
command->direction = dma_dir;
@@ -2715,12 +2680,12 @@ static void mtip_softirq_done_fn(struct request *rq)
cmd->direction);
if (unlikely(cmd->unaligned))
- up(&dd->port->cmd_slot_unal);
+ atomic_inc(&dd->port->cmd_slot_unal);
blk_mq_end_request(rq, cmd->status);
}
-static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
+static bool mtip_abort_cmd(struct request *req, void *data, bool reserved)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req);
struct driver_data *dd = data;
@@ -2730,14 +2695,16 @@ static void mtip_abort_cmd(struct request *req, void *data, bool reserved)
clear_bit(req->tag, dd->port->cmds_to_issue);
cmd->status = BLK_STS_IOERR;
mtip_softirq_done_fn(req);
+ return true;
}
-static void mtip_queue_cmd(struct request *req, void *data, bool reserved)
+static bool mtip_queue_cmd(struct request *req, void *data, bool reserved)
{
struct driver_data *dd = data;
set_bit(req->tag, dd->port->cmds_to_issue);
blk_abort_request(req);
+ return true;
}
/*
@@ -2803,10 +2770,7 @@ restart_eh:
blk_mq_quiesce_queue(dd->queue);
- spin_lock(dd->queue->queue_lock);
- blk_mq_tagset_busy_iter(&dd->tags,
- mtip_queue_cmd, dd);
- spin_unlock(dd->queue->queue_lock);
+ blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd);
set_bit(MTIP_PF_ISSUE_CMDS_BIT, &dd->port->flags);
@@ -3026,7 +2990,7 @@ static int mtip_hw_init(struct driver_data *dd)
else
dd->unal_qdepth = 0;
- sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth);
+ atomic_set(&dd->port->cmd_slot_unal, dd->unal_qdepth);
/* Spinlock to prevent concurrent issue */
for (i = 0; i < MTIP_MAX_SLOT_GROUPS; i++)
@@ -3531,58 +3495,24 @@ static inline bool is_se_active(struct driver_data *dd)
return false;
}
-/*
- * Block layer make request function.
- *
- * This function is called by the kernel to process a BIO for
- * the P320 device.
- *
- * @queue Pointer to the request queue. Unused other than to obtain
- * the driver data structure.
- * @rq Pointer to the request.
- *
- */
-static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
+static inline bool is_stopped(struct driver_data *dd, struct request *rq)
{
- struct driver_data *dd = hctx->queue->queuedata;
- struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
- unsigned int nents;
-
- if (is_se_active(dd))
- return -ENODATA;
-
- if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
- if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag))) {
- return -ENXIO;
- }
- if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
- return -ENODATA;
- }
- if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
- &dd->dd_flag) &&
- rq_data_dir(rq))) {
- return -ENODATA;
- }
- if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
- test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
- return -ENODATA;
- }
-
- if (req_op(rq) == REQ_OP_DISCARD) {
- int err;
-
- err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
- blk_mq_end_request(rq, err ? BLK_STS_IOERR : BLK_STS_OK);
- return 0;
- }
+ if (likely(!(dd->dd_flag & MTIP_DDF_STOP_IO)))
+ return false;
- /* Create the scatter list for this request. */
- nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg);
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+ return true;
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ return true;
+ if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) &&
+ rq_data_dir(rq))
+ return true;
+ if (test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
+ return true;
+ if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+ return true;
- /* Issue the read/write. */
- mtip_hw_submit_io(dd, rq, cmd, nents, hctx);
- return 0;
+ return false;
}
static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
@@ -3603,7 +3533,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
cmd->unaligned = 1;
}
- if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal))
+ if (cmd->unaligned && atomic_dec_if_positive(&dd->port->cmd_slot_unal) >= 0)
return true;
return false;
@@ -3613,32 +3543,33 @@ static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct driver_data *dd = hctx->queue->queuedata;
- struct mtip_int_cmd *icmd = rq->special;
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
+ struct mtip_int_cmd *icmd = cmd->icmd;
+ struct mtip_cmd_hdr *hdr =
+ dd->port->command_list + sizeof(struct mtip_cmd_hdr) * rq->tag;
struct mtip_cmd_sg *command_sg;
if (mtip_commands_active(dd->port))
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
+ hdr->ctba = cpu_to_le32(cmd->command_dma & 0xFFFFFFFF);
+ if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags))
+ hdr->ctbau = cpu_to_le32((cmd->command_dma >> 16) >> 16);
/* Populate the SG list */
- cmd->command_header->opts =
- __force_bit2int cpu_to_le32(icmd->opts | icmd->fis_len);
+ hdr->opts = cpu_to_le32(icmd->opts | icmd->fis_len);
if (icmd->buf_len) {
command_sg = cmd->command + AHCI_CMD_TBL_HDR_SZ;
- command_sg->info =
- __force_bit2int cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
- command_sg->dba =
- __force_bit2int cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
+ command_sg->info = cpu_to_le32((icmd->buf_len-1) & 0x3FFFFF);
+ command_sg->dba = cpu_to_le32(icmd->buffer & 0xFFFFFFFF);
command_sg->dba_upper =
- __force_bit2int cpu_to_le32((icmd->buffer >> 16) >> 16);
+ cpu_to_le32((icmd->buffer >> 16) >> 16);
- cmd->command_header->opts |=
- __force_bit2int cpu_to_le32((1 << 16));
+ hdr->opts |= cpu_to_le32((1 << 16));
}
/* Populate the command header */
- cmd->command_header->byte_count = 0;
+ hdr->byte_count = 0;
blk_mq_start_request(rq);
mtip_issue_non_ncq_command(dd->port, rq->tag);
@@ -3648,23 +3579,25 @@ static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
+ struct driver_data *dd = hctx->queue->queuedata;
struct request *rq = bd->rq;
- int ret;
-
- mtip_init_cmd_header(rq);
+ struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
if (blk_rq_is_passthrough(rq))
return mtip_issue_reserved_cmd(hctx, rq);
if (unlikely(mtip_check_unal_depth(hctx, rq)))
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
+
+ if (is_se_active(dd) || is_stopped(dd, rq))
+ return BLK_STS_IOERR;
blk_mq_start_request(rq);
- ret = mtip_submit_request(hctx, rq);
- if (likely(!ret))
- return BLK_STS_OK;
- return BLK_STS_IOERR;
+ if (req_op(rq) == REQ_OP_DISCARD)
+ return mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq));
+ mtip_hw_submit_io(dd, rq, cmd, hctx);
+ return BLK_STS_OK;
}
static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
@@ -3920,12 +3853,13 @@ protocol_init_error:
return rv;
}
-static void mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
+static bool mtip_no_dev_cleanup(struct request *rq, void *data, bool reserv)
{
struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(rq);
+ return true;
}
/*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index e20e55dab443..abce25f27f57 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -126,8 +126,6 @@
#define MTIP_DFS_MAX_BUF_SIZE 1024
-#define __force_bit2int (unsigned int __force)
-
enum {
/* below are bit numbers in 'flags' defined in mtip_port */
MTIP_PF_IC_ACTIVE_BIT = 0, /* pio/ioctl */
@@ -174,10 +172,10 @@ enum {
struct smart_attr {
u8 attr_id;
- u16 flags;
+ __le16 flags;
u8 cur;
u8 worst;
- u32 data;
+ __le32 data;
u8 res[3];
} __packed;
@@ -200,9 +198,9 @@ struct mtip_work {
#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
struct mtip_trim_entry {
- u32 lba; /* starting lba of region */
- u16 rsvd; /* unused */
- u16 range; /* # of 512b blocks to trim */
+ __le32 lba; /* starting lba of region */
+ __le16 rsvd; /* unused */
+ __le16 range; /* # of 512b blocks to trim */
} __packed;
struct mtip_trim {
@@ -278,24 +276,24 @@ struct mtip_cmd_hdr {
* - Bit 5 Unused in this implementation.
* - Bits 4:0 Length of the command FIS in DWords (DWord = 4 bytes).
*/
- unsigned int opts;
+ __le32 opts;
/* This field is unsed when using NCQ. */
union {
- unsigned int byte_count;
- unsigned int status;
+ __le32 byte_count;
+ __le32 status;
};
/*
* Lower 32 bits of the command table address associated with this
* header. The command table addresses must be 128 byte aligned.
*/
- unsigned int ctba;
+ __le32 ctba;
/*
* If 64 bit addressing is used this field is the upper 32 bits
* of the command table address associated with this command.
*/
- unsigned int ctbau;
+ __le32 ctbau;
/* Reserved and unused. */
- unsigned int res[4];
+ u32 res[4];
};
/* Command scatter gather structure (PRD). */
@@ -305,31 +303,28 @@ struct mtip_cmd_sg {
* address must be 8 byte aligned signified by bits 2:0 being
* set to 0.
*/
- unsigned int dba;
+ __le32 dba;
/*
* When 64 bit addressing is used this field is the upper
* 32 bits of the data buffer address.
*/
- unsigned int dba_upper;
+ __le32 dba_upper;
/* Unused. */
- unsigned int reserved;
+ __le32 reserved;
/*
* Bit 31: interrupt when this data block has been transferred.
* Bits 30..22: reserved
* Bits 21..0: byte count (minus 1). For P320 the byte count must be
* 8 byte aligned signified by bits 2:0 being set to 1.
*/
- unsigned int info;
+ __le32 info;
};
struct mtip_port;
+struct mtip_int_cmd;
+
/* Structure used to describe a command. */
struct mtip_cmd {
-
- struct mtip_cmd_hdr *command_header; /* ptr to command header entry */
-
- dma_addr_t command_header_dma; /* corresponding physical address */
-
void *command; /* ptr to command table entry */
dma_addr_t command_dma; /* corresponding physical address */
@@ -338,7 +333,10 @@ struct mtip_cmd {
int unaligned; /* command is unaligned on 4k boundary */
- struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
+ union {
+ struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
+ struct mtip_int_cmd *icmd;
+ };
int retries; /* The number of retries left for this command. */
@@ -435,8 +433,8 @@ struct mtip_port {
*/
unsigned long ic_pause_timer;
- /* Semaphore to control queue depth of unaligned IOs */
- struct semaphore cmd_slot_unal;
+ /* Counter to control queue depth of unaligned IOs */
+ atomic_t cmd_slot_unal;
/* Spinlock for working around command-issue bug. */
spinlock_t cmd_issue_lock[MTIP_MAX_SLOT_GROUPS];
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4d4d6129ff66..08696f5f00bb 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -734,12 +734,13 @@ static void recv_work(struct work_struct *work)
kfree(args);
}
-static void nbd_clear_req(struct request *req, void *data, bool reserved)
+static bool nbd_clear_req(struct request *req, void *data, bool reserved)
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
cmd->status = BLK_STS_IOERR;
blk_mq_complete_request(req);
+ return true;
}
static void nbd_clear_que(struct nbd_device *nbd)
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 7685df43f1ef..b3df2793e7cd 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -49,6 +49,7 @@ struct nullb_device {
unsigned long completion_nsec; /* time in ns to complete a request */
unsigned long cache_size; /* disk cache size in MB */
unsigned long zone_size; /* zone size in MB if device is zoned */
+ unsigned int zone_nr_conv; /* number of conventional zones */
unsigned int submit_queues; /* number of submission queues */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 09339203dfba..62c9654b9ce8 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -188,6 +188,10 @@ static unsigned long g_zone_size = 256;
module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
+static unsigned int g_zone_nr_conv;
+module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
+MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
+
static struct nullb_device *null_alloc_dev(void);
static void null_free_dev(struct nullb_device *dev);
static void null_del_dev(struct nullb *nullb);
@@ -293,6 +297,7 @@ NULLB_DEVICE_ATTR(mbps, uint);
NULLB_DEVICE_ATTR(cache_size, ulong);
NULLB_DEVICE_ATTR(zoned, bool);
NULLB_DEVICE_ATTR(zone_size, ulong);
+NULLB_DEVICE_ATTR(zone_nr_conv, uint);
static ssize_t nullb_device_power_show(struct config_item *item, char *page)
{
@@ -407,6 +412,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_badblocks,
&nullb_device_attr_zoned,
&nullb_device_attr_zone_size,
+ &nullb_device_attr_zone_nr_conv,
NULL,
};
@@ -520,6 +526,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->use_per_node_hctx = g_use_per_node_hctx;
dev->zoned = g_zoned;
dev->zone_size = g_zone_size;
+ dev->zone_nr_conv = g_zone_nr_conv;
return dev;
}
@@ -635,14 +642,9 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
-static void null_softirq_done_fn(struct request *rq)
+static void null_complete_rq(struct request *rq)
{
- struct nullb *nullb = rq->q->queuedata;
-
- if (nullb->dev->queue_mode == NULL_Q_MQ)
- end_cmd(blk_mq_rq_to_pdu(rq));
- else
- end_cmd(rq->special);
+ end_cmd(blk_mq_rq_to_pdu(rq));
}
static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
@@ -1350,7 +1352,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
- .complete = null_softirq_done_fn,
+ .complete = null_complete_rq,
.timeout = null_timeout_rq,
};
@@ -1657,8 +1659,7 @@ static int null_add_dev(struct nullb_device *dev)
}
null_init_queues(nullb);
} else if (dev->queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
- NULL);
+ nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index c0b0e4a3fa8f..5d1c261a2cfd 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -29,7 +29,25 @@ int null_zone_init(struct nullb_device *dev)
if (!dev->zones)
return -ENOMEM;
- for (i = 0; i < dev->nr_zones; i++) {
+ if (dev->zone_nr_conv >= dev->nr_zones) {
+ dev->zone_nr_conv = dev->nr_zones - 1;
+ pr_info("null_blk: changed the number of conventional zones to %u",
+ dev->zone_nr_conv);
+ }
+
+ for (i = 0; i < dev->zone_nr_conv; i++) {
+ struct blk_zone *zone = &dev->zones[i];
+
+ zone->start = sector;
+ zone->len = dev->zone_size_sects;
+ zone->wp = zone->start + zone->len;
+ zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
+ zone->cond = BLK_ZONE_COND_NOT_WP;
+
+ sector += dev->zone_size_sects;
+ }
+
+ for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
struct blk_zone *zone = &dev->zones[i];
zone->start = zone->wp = sector;
@@ -98,6 +116,8 @@ void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL;
break;
+ case BLK_ZONE_COND_NOT_WP:
+ break;
default:
/* Invalid zone condition */
cmd->error = BLK_STS_IOERR;
@@ -111,6 +131,11 @@ void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
+ cmd->error = BLK_STS_IOERR;
+ return;
+ }
+
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
}
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index ae4971e5d9a8..0ff9b12d0e35 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -242,6 +242,11 @@ struct pd_unit {
static struct pd_unit pd[PD_UNITS];
+struct pd_req {
+ /* for REQ_OP_DRV_IN: */
+ enum action (*func)(struct pd_unit *disk);
+};
+
static char pd_scratch[512]; /* scratch block buffer */
static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
@@ -502,8 +507,9 @@ static enum action do_pd_io_start(void)
static enum action pd_special(void)
{
- enum action (*func)(struct pd_unit *) = pd_req->special;
- return func(pd_current);
+ struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
+
+ return req->func(pd_current);
}
static int pd_next_buf(void)
@@ -767,12 +773,14 @@ static int pd_special_command(struct pd_unit *disk,
enum action (*func)(struct pd_unit *disk))
{
struct request *rq;
+ struct pd_req *req;
rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
+ req = blk_mq_rq_to_pdu(rq);
- rq->special = func;
+ req->func = func;
blk_execute_rq(disk->gd->queue, disk->gd, rq, 0);
blk_put_request(rq);
return 0;
@@ -892,9 +900,21 @@ static void pd_probe_drive(struct pd_unit *disk)
disk->gd = p;
p->private_data = disk;
- p->queue = blk_mq_init_sq_queue(&disk->tag_set, &pd_mq_ops, 2,
- BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ memset(&disk->tag_set, 0, sizeof(disk->tag_set));
+ disk->tag_set.ops = &pd_mq_ops;
+ disk->tag_set.cmd_size = sizeof(struct pd_req);
+ disk->tag_set.nr_hw_queues = 1;
+ disk->tag_set.nr_maps = 1;
+ disk->tag_set.queue_depth = 2;
+ disk->tag_set.numa_node = NUMA_NO_NODE;
+ disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+
+ if (blk_mq_alloc_tag_set(&disk->tag_set))
+ return;
+
+ p->queue = blk_mq_init_queue(&disk->tag_set);
if (IS_ERR(p->queue)) {
+ blk_mq_free_tag_set(&disk->tag_set);
p->queue = NULL;
return;
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 9381f4e3b221..f5a71023f76c 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2203,9 +2203,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
* Some CDRW drives can not handle writes larger than one packet,
* even if the size is a multiple of the packet size.
*/
- spin_lock_irq(q->queue_lock);
blk_queue_max_hw_sectors(q, pd->settings.size);
- spin_unlock_irq(q->queue_lock);
set_bit(PACKET_WRITABLE, &pd->flags);
} else {
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 2459dcc04b1c..a10d5736d8f7 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -181,6 +181,7 @@ struct skd_request_context {
struct fit_completion_entry_v1 completion;
struct fit_comp_error_info err_info;
+ int retries;
blk_status_t status;
};
@@ -382,11 +383,12 @@ static void skd_log_skreq(struct skd_device *skdev,
* READ/WRITE REQUESTS
*****************************************************************************
*/
-static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
+static bool skd_inc_in_flight(struct request *rq, void *data, bool reserved)
{
int *count = data;
count++;
+ return true;
}
static int skd_in_flight(struct skd_device *skdev)
@@ -494,6 +496,11 @@ static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
+ if (!(req->rq_flags & RQF_DONTPREP)) {
+ skreq->retries = 0;
+ req->rq_flags |= RQF_DONTPREP;
+ }
+
blk_mq_start_request(req);
WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
@@ -1425,7 +1432,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
+ if (++skreq->retries < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
blk_mq_requeue_request(req, true);
break;
@@ -1887,13 +1894,13 @@ static void skd_isr_fwstate(struct skd_device *skdev)
skd_skdev_state_to_str(skdev->state), skdev->state);
}
-static void skd_recover_request(struct request *req, void *data, bool reserved)
+static bool skd_recover_request(struct request *req, void *data, bool reserved)
{
struct skd_device *const skdev = data;
struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
if (skreq->state != SKD_REQ_STATE_BUSY)
- return;
+ return true;
skd_log_skreq(skdev, skreq, "recover");
@@ -1904,6 +1911,7 @@ static void skd_recover_request(struct request *req, void *data, bool reserved)
skreq->state = SKD_REQ_STATE_IDLE;
skreq->status = BLK_STS_IOERR;
blk_mq_complete_request(req);
+ return true;
}
static void skd_recover_requests(struct skd_device *skdev)
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index b54fa6726303..9c0553dd13e7 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/hdreg.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
@@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
+#define VDC_MAX_RETRIES 10
+
static struct workqueue_struct *sunvdc_wq;
struct vdc_req_entry {
@@ -66,9 +68,10 @@ struct vdc_port {
u64 max_xfer_size;
u32 vdisk_block_size;
+ u32 drain;
u64 ldc_timeout;
- struct timer_list ldc_reset_timer;
+ struct delayed_work ldc_reset_timer_work;
struct work_struct ldc_reset_work;
/* The server fills these in for us in the disk attribute
@@ -80,12 +83,14 @@ struct vdc_port {
u8 vdisk_mtype;
u32 vdisk_phys_blksz;
+ struct blk_mq_tag_set tag_set;
+
char disk_name[32];
};
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(struct timer_list *t);
+static void vdc_ldc_reset_timer_work(struct work_struct *work);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
@@ -175,11 +180,8 @@ static void vdc_blk_queue_start(struct vdc_port *port)
* handshake completes, so check for initial handshake before we've
* allocated a disk.
*/
- if (port->disk && blk_queue_stopped(port->disk->queue) &&
- vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) {
- blk_start_queue(port->disk->queue);
- }
-
+ if (port->disk && vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
+ blk_mq_start_hw_queues(port->disk->queue);
}
static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
@@ -197,7 +199,7 @@ static void vdc_handshake_complete(struct vio_driver_state *vio)
{
struct vdc_port *port = to_vdc_port(vio);
- del_timer(&port->ldc_reset_timer);
+ cancel_delayed_work(&port->ldc_reset_timer_work);
vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
vdc_blk_queue_start(port);
}
@@ -320,7 +322,7 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
rqe->req = NULL;
- __blk_end_request(req, (desc->status ? BLK_STS_IOERR : 0), desc->size);
+ blk_mq_end_request(req, desc->status ? BLK_STS_IOERR : 0);
vdc_blk_queue_start(port);
}
@@ -431,6 +433,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
.end_idx = dr->prod,
};
int err, delay;
+ int retries = 0;
hdr.seq = dr->snd_nxt;
delay = 1;
@@ -443,6 +446,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
udelay(delay);
if ((delay <<= 1) > 128)
delay = 128;
+ if (retries++ > VDC_MAX_RETRIES)
+ break;
} while (err == -EAGAIN);
if (err == -ENOTCONN)
@@ -525,29 +530,40 @@ static int __send_request(struct request *req)
return err;
}
-static void do_vdc_request(struct request_queue *rq)
+static blk_status_t vdc_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
+ struct vdc_port *port = hctx->queue->queuedata;
+ struct vio_dring_state *dr;
+ unsigned long flags;
- while ((req = blk_peek_request(rq)) != NULL) {
- struct vdc_port *port;
- struct vio_dring_state *dr;
+ dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- port = req->rq_disk->private_data;
- dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- if (unlikely(vdc_tx_dring_avail(dr) < 1))
- goto wait;
+ blk_mq_start_request(bd->rq);
- blk_start_request(req);
+ spin_lock_irqsave(&port->vio.lock, flags);
- if (__send_request(req) < 0) {
- blk_requeue_request(rq, req);
-wait:
- /* Avoid pointless unplugs. */
- blk_stop_queue(rq);
- break;
- }
+ /*
+ * Doing drain, just end the request in error
+ */
+ if (unlikely(port->drain)) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return BLK_STS_IOERR;
+ }
+
+ if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ blk_mq_stop_hw_queue(hctx);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
+ if (__send_request(bd->rq) < 0) {
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return BLK_STS_IOERR;
}
+
+ spin_unlock_irqrestore(&port->vio.lock, flags);
+ return BLK_STS_OK;
}
static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
@@ -759,6 +775,31 @@ static void vdc_port_down(struct vdc_port *port)
vio_ldc_free(&port->vio);
}
+static const struct blk_mq_ops vdc_mq_ops = {
+ .queue_rq = vdc_queue_rq,
+};
+
+static void cleanup_queue(struct request_queue *q)
+{
+ struct vdc_port *port = q->queuedata;
+
+ blk_cleanup_queue(q);
+ blk_mq_free_tag_set(&port->tag_set);
+}
+
+static struct request_queue *init_queue(struct vdc_port *port)
+{
+ struct request_queue *q;
+
+ q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(q))
+ return q;
+
+ q->queuedata = port;
+ return q;
+}
+
static int probe_disk(struct vdc_port *port)
{
struct request_queue *q;
@@ -796,17 +837,17 @@ static int probe_disk(struct vdc_port *port)
(u64)geom.num_sec);
}
- q = blk_init_queue(do_vdc_request, &port->vio.lock);
- if (!q) {
+ q = init_queue(port);
+ if (IS_ERR(q)) {
printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
port->vio.name);
- return -ENOMEM;
+ return PTR_ERR(q);
}
g = alloc_disk(1 << PARTITION_SHIFT);
if (!g) {
printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
port->vio.name);
- blk_cleanup_queue(q);
+ cleanup_queue(q);
return -ENOMEM;
}
@@ -981,7 +1022,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
- timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
+ INIT_DELAYED_WORK(&port->ldc_reset_timer_work, vdc_ldc_reset_timer_work);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
@@ -1034,18 +1075,14 @@ static int vdc_port_remove(struct vio_dev *vdev)
struct vdc_port *port = dev_get_drvdata(&vdev->dev);
if (port) {
- unsigned long flags;
-
- spin_lock_irqsave(&port->vio.lock, flags);
- blk_stop_queue(port->disk->queue);
- spin_unlock_irqrestore(&port->vio.lock, flags);
+ blk_mq_stop_hw_queues(port->disk->queue);
flush_work(&port->ldc_reset_work);
- del_timer_sync(&port->ldc_reset_timer);
+ cancel_delayed_work_sync(&port->ldc_reset_timer_work);
del_timer_sync(&port->vio.timer);
del_gendisk(port->disk);
- blk_cleanup_queue(port->disk->queue);
+ cleanup_queue(port->disk->queue);
put_disk(port->disk);
port->disk = NULL;
@@ -1080,32 +1117,46 @@ static void vdc_requeue_inflight(struct vdc_port *port)
}
rqe->req = NULL;
- blk_requeue_request(port->disk->queue, req);
+ blk_mq_requeue_request(req, false);
}
}
static void vdc_queue_drain(struct vdc_port *port)
{
- struct request *req;
+ struct request_queue *q = port->disk->queue;
+
+ /*
+ * Mark the queue as draining, then freeze/quiesce to ensure
+ * that all existing requests are seen in ->queue_rq() and killed
+ */
+ port->drain = 1;
+ spin_unlock_irq(&port->vio.lock);
- while ((req = blk_fetch_request(port->disk->queue)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+
+ spin_lock_irq(&port->vio.lock);
+ port->drain = 0;
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
}
-static void vdc_ldc_reset_timer(struct timer_list *t)
+static void vdc_ldc_reset_timer_work(struct work_struct *work)
{
- struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
- struct vio_driver_state *vio = &port->vio;
- unsigned long flags;
+ struct vdc_port *port;
+ struct vio_driver_state *vio;
- spin_lock_irqsave(&vio->lock, flags);
+ port = container_of(work, struct vdc_port, ldc_reset_timer_work.work);
+ vio = &port->vio;
+
+ spin_lock_irq(&vio->lock);
if (!(port->vio.hs_state & VIO_HS_COMPLETE)) {
pr_warn(PFX "%s ldc down %llu seconds, draining queue\n",
port->disk_name, port->ldc_timeout);
vdc_queue_drain(port);
vdc_blk_queue_start(port);
}
- spin_unlock_irqrestore(&vio->lock, flags);
+ spin_unlock_irq(&vio->lock);
}
static void vdc_ldc_reset_work(struct work_struct *work)
@@ -1129,7 +1180,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
assert_spin_locked(&port->vio.lock);
pr_warn(PFX "%s ldc link reset\n", port->disk_name);
- blk_stop_queue(port->disk->queue);
+ blk_mq_stop_hw_queues(port->disk->queue);
vdc_requeue_inflight(port);
vdc_port_down(port);
@@ -1146,7 +1197,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_timer(&port->ldc_reset_timer,
+ mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 064b8c5c7a32..4478eb7efee0 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -243,7 +243,6 @@ struct carm_port {
unsigned int port_no;
struct gendisk *disk;
struct carm_host *host;
- struct blk_mq_tag_set tag_set;
/* attached device characteristics */
u64 capacity;
@@ -254,13 +253,10 @@ struct carm_port {
};
struct carm_request {
- unsigned int tag;
int n_elem;
unsigned int msg_type;
unsigned int msg_subtype;
unsigned int msg_bucket;
- struct request *rq;
- struct carm_port *port;
struct scatterlist sg[CARM_MAX_REQ_SG];
};
@@ -291,9 +287,6 @@ struct carm_host {
unsigned int wait_q_cons;
struct request_queue *wait_q[CARM_MAX_WAIT_Q];
- unsigned int n_msgs;
- u64 msg_alloc;
- struct carm_request req[CARM_MAX_REQ];
void *msg_base;
dma_addr_t msg_dma;
@@ -478,10 +471,10 @@ static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
}
static int carm_send_msg(struct carm_host *host,
- struct carm_request *crq)
+ struct carm_request *crq, unsigned tag)
{
void __iomem *mmio = host->mmio;
- u32 msg = (u32) carm_ref_msg_dma(host, crq->tag);
+ u32 msg = (u32) carm_ref_msg_dma(host, tag);
u32 cm_bucket = crq->msg_bucket;
u32 tmp;
int rc = 0;
@@ -506,99 +499,24 @@ static int carm_send_msg(struct carm_host *host,
return rc;
}
-static struct carm_request *carm_get_request(struct carm_host *host)
-{
- unsigned int i;
-
- /* obey global hardware limit on S/G entries */
- if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
- return NULL;
-
- for (i = 0; i < max_queue; i++)
- if ((host->msg_alloc & (1ULL << i)) == 0) {
- struct carm_request *crq = &host->req[i];
- crq->port = NULL;
- crq->n_elem = 0;
-
- host->msg_alloc |= (1ULL << i);
- host->n_msgs++;
-
- assert(host->n_msgs <= CARM_MAX_REQ);
- sg_init_table(crq->sg, CARM_MAX_REQ_SG);
- return crq;
- }
-
- DPRINTK("no request available, returning NULL\n");
- return NULL;
-}
-
-static int carm_put_request(struct carm_host *host, struct carm_request *crq)
-{
- assert(crq->tag < max_queue);
-
- if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
- return -EINVAL; /* tried to clear a tag that was not active */
-
- assert(host->hw_sg_used >= crq->n_elem);
-
- host->msg_alloc &= ~(1ULL << crq->tag);
- host->hw_sg_used -= crq->n_elem;
- host->n_msgs--;
-
- return 0;
-}
-
-static struct carm_request *carm_get_special(struct carm_host *host)
-{
- unsigned long flags;
- struct carm_request *crq = NULL;
- struct request *rq;
- int tries = 5000;
-
- while (tries-- > 0) {
- spin_lock_irqsave(&host->lock, flags);
- crq = carm_get_request(host);
- spin_unlock_irqrestore(&host->lock, flags);
-
- if (crq)
- break;
- msleep(10);
- }
-
- if (!crq)
- return NULL;
-
- rq = blk_get_request(host->oob_q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(rq)) {
- spin_lock_irqsave(&host->lock, flags);
- carm_put_request(host, crq);
- spin_unlock_irqrestore(&host->lock, flags);
- return NULL;
- }
-
- crq->rq = rq;
- return crq;
-}
-
static int carm_array_info (struct carm_host *host, unsigned int array_idx)
{
struct carm_msg_ioctl *ioc;
- unsigned int idx;
u32 msg_data;
dma_addr_t msg_dma;
struct carm_request *crq;
+ struct request *rq;
int rc;
- crq = carm_get_special(host);
- if (!crq) {
+ rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(rq)) {
rc = -ENOMEM;
goto err_out;
}
+ crq = blk_mq_rq_to_pdu(rq);
- idx = crq->tag;
-
- ioc = carm_ref_msg(host, idx);
- msg_dma = carm_ref_msg_dma(host, idx);
+ ioc = carm_ref_msg(host, rq->tag);
+ msg_dma = carm_ref_msg_dma(host, rq->tag);
msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
crq->msg_type = CARM_MSG_ARRAY;
@@ -612,7 +530,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
ioc->type = CARM_MSG_ARRAY;
ioc->subtype = CARM_ARRAY_INFO;
ioc->array_id = (u8) array_idx;
- ioc->handle = cpu_to_le32(TAG_ENCODE(idx));
+ ioc->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
ioc->data_addr = cpu_to_le32(msg_data);
spin_lock_irq(&host->lock);
@@ -620,9 +538,8 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
host->state == HST_DEV_SCAN);
spin_unlock_irq(&host->lock);
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->special = crq;
- blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
+ DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
+ blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
return 0;
@@ -637,21 +554,21 @@ typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
static int carm_send_special (struct carm_host *host, carm_sspc_t func)
{
+ struct request *rq;
struct carm_request *crq;
struct carm_msg_ioctl *ioc;
void *mem;
- unsigned int idx, msg_size;
+ unsigned int msg_size;
int rc;
- crq = carm_get_special(host);
- if (!crq)
+ rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
+ if (IS_ERR(rq))
return -ENOMEM;
+ crq = blk_mq_rq_to_pdu(rq);
- idx = crq->tag;
+ mem = carm_ref_msg(host, rq->tag);
- mem = carm_ref_msg(host, idx);
-
- msg_size = func(host, idx, mem);
+ msg_size = func(host, rq->tag, mem);
ioc = mem;
crq->msg_type = ioc->type;
@@ -660,9 +577,8 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
BUG_ON(rc < 0);
crq->msg_bucket = (u32) rc;
- DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
- crq->rq->special = crq;
- blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
+ DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
+ blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
return 0;
}
@@ -744,19 +660,6 @@ static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
sizeof(struct carm_fw_ver);
}
-static inline void carm_end_request_queued(struct carm_host *host,
- struct carm_request *crq,
- blk_status_t error)
-{
- struct request *req = crq->rq;
- int rc;
-
- blk_mq_end_request(req, error);
-
- rc = carm_put_request(host, crq);
- assert(rc == 0);
-}
-
static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
{
unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
@@ -791,101 +694,50 @@ static inline void carm_round_robin(struct carm_host *host)
}
}
-static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
- blk_status_t error)
-{
- carm_end_request_queued(host, crq, error);
- if (max_queue == 1)
- carm_round_robin(host);
- else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
- (host->hw_sg_used <= CARM_SG_LOW_WATER)) {
- carm_round_robin(host);
- }
-}
-
-static blk_status_t carm_oob_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static inline enum dma_data_direction carm_rq_dir(struct request *rq)
{
- struct request_queue *q = hctx->queue;
- struct carm_host *host = q->queuedata;
- struct carm_request *crq;
- int rc;
-
- blk_mq_start_request(bd->rq);
-
- spin_lock_irq(&host->lock);
-
- crq = bd->rq->special;
- assert(crq != NULL);
- assert(crq->rq == bd->rq);
-
- crq->n_elem = 0;
-
- DPRINTK("send req\n");
- rc = carm_send_msg(host, crq);
- if (rc) {
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
- }
-
- spin_unlock_irq(&host->lock);
- return BLK_STS_OK;
+ return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request_queue *q = hctx->queue;
+ struct request *rq = bd->rq;
struct carm_port *port = q->queuedata;
struct carm_host *host = port->host;
+ struct carm_request *crq = blk_mq_rq_to_pdu(rq);
struct carm_msg_rw *msg;
- struct carm_request *crq;
- struct request *rq = bd->rq;
struct scatterlist *sg;
- int writing = 0, pci_dir, i, n_elem, rc;
- u32 tmp;
+ int i, n_elem = 0, rc;
unsigned int msg_size;
+ u32 tmp;
+
+ crq->n_elem = 0;
+ sg_init_table(crq->sg, CARM_MAX_REQ_SG);
blk_mq_start_request(rq);
spin_lock_irq(&host->lock);
-
- crq = carm_get_request(host);
- if (!crq) {
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
- }
- crq->rq = rq;
-
- if (rq_data_dir(rq) == WRITE) {
- writing = 1;
- pci_dir = DMA_TO_DEVICE;
- } else {
- pci_dir = DMA_FROM_DEVICE;
- }
+ if (req_op(rq) == REQ_OP_DRV_OUT)
+ goto send_msg;
/* get scatterlist from block layer */
sg = &crq->sg[0];
n_elem = blk_rq_map_sg(q, rq, sg);
- if (n_elem <= 0) {
- /* request with no s/g entries? */
- carm_end_rq(host, crq, BLK_STS_IOERR);
- spin_unlock_irq(&host->lock);
- return BLK_STS_IOERR;
- }
+ if (n_elem <= 0)
+ goto out_ioerr;
/* map scatterlist to PCI bus addresses */
- n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, pci_dir);
- if (n_elem <= 0) {
- /* request with no s/g entries? */
- carm_end_rq(host, crq, BLK_STS_IOERR);
- spin_unlock_irq(&host->lock);
- return BLK_STS_IOERR;
- }
+ n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
+ if (n_elem <= 0)
+ goto out_ioerr;
+
+ /* obey global hardware limit on S/G entries */
+ if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
+ goto out_resource;
+
crq->n_elem = n_elem;
- crq->port = port;
host->hw_sg_used += n_elem;
/*
@@ -893,9 +745,9 @@ static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
*/
VPRINTK("build msg\n");
- msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag);
+ msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
- if (writing) {
+ if (rq_data_dir(rq) == WRITE) {
msg->type = CARM_MSG_WRITE;
crq->msg_type = CARM_MSG_WRITE;
} else {
@@ -906,7 +758,7 @@ static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
msg->id = port->port_no;
msg->sg_count = n_elem;
msg->sg_type = SGT_32BIT;
- msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag));
+ msg->handle = cpu_to_le32(TAG_ENCODE(rq->tag));
msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
tmp = (blk_rq_pos(rq) >> 16) >> 16;
msg->lba_high = cpu_to_le16( (u16) tmp );
@@ -923,22 +775,28 @@ static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
rc = carm_lookup_bucket(msg_size);
BUG_ON(rc < 0);
crq->msg_bucket = (u32) rc;
-
+send_msg:
/*
* queue read/write message to hardware
*/
-
- VPRINTK("send msg, tag == %u\n", crq->tag);
- rc = carm_send_msg(host, crq);
+ VPRINTK("send msg, tag == %u\n", rq->tag);
+ rc = carm_send_msg(host, crq, rq->tag);
if (rc) {
- carm_put_request(host, crq);
- carm_push_q(host, q);
- spin_unlock_irq(&host->lock);
- return BLK_STS_DEV_RESOURCE;
+ host->hw_sg_used -= n_elem;
+ goto out_resource;
}
spin_unlock_irq(&host->lock);
return BLK_STS_OK;
+out_resource:
+ dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
+ carm_push_q(host, q);
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
+out_ioerr:
+ carm_round_robin(host);
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_IOERR;
}
static void carm_handle_array_info(struct carm_host *host,
@@ -954,8 +812,6 @@ static void carm_handle_array_info(struct carm_host *host,
DPRINTK("ENTER\n");
- carm_end_rq(host, crq, error);
-
if (error)
goto out;
if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
@@ -1011,8 +867,6 @@ static void carm_handle_scan_chan(struct carm_host *host,
DPRINTK("ENTER\n");
- carm_end_rq(host, crq, error);
-
if (error) {
new_state = HST_ERROR;
goto out;
@@ -1040,8 +894,6 @@ static void carm_handle_generic(struct carm_host *host,
{
DPRINTK("ENTER\n");
- carm_end_rq(host, crq, error);
-
assert(host->state == cur_state);
if (error)
host->state = HST_ERROR;
@@ -1050,28 +902,12 @@ static void carm_handle_generic(struct carm_host *host,
schedule_work(&host->fsm_task);
}
-static inline void carm_handle_rw(struct carm_host *host,
- struct carm_request *crq, blk_status_t error)
-{
- int pci_dir;
-
- VPRINTK("ENTER\n");
-
- if (rq_data_dir(crq->rq) == WRITE)
- pci_dir = DMA_TO_DEVICE;
- else
- pci_dir = DMA_FROM_DEVICE;
-
- dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, pci_dir);
-
- carm_end_rq(host, crq, error);
-}
-
static inline void carm_handle_resp(struct carm_host *host,
__le32 ret_handle_le, u32 status)
{
u32 handle = le32_to_cpu(ret_handle_le);
unsigned int msg_idx;
+ struct request *rq;
struct carm_request *crq;
blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
u8 *mem;
@@ -1087,13 +923,15 @@ static inline void carm_handle_resp(struct carm_host *host,
msg_idx = TAG_DECODE(handle);
VPRINTK("tag == %u\n", msg_idx);
- crq = &host->req[msg_idx];
+ rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
+ crq = blk_mq_rq_to_pdu(rq);
/* fast path */
if (likely(crq->msg_type == CARM_MSG_READ ||
crq->msg_type == CARM_MSG_WRITE)) {
- carm_handle_rw(host, crq, error);
- return;
+ dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
+ carm_rq_dir(rq));
+ goto done;
}
mem = carm_ref_msg(host, msg_idx);
@@ -1103,7 +941,7 @@ static inline void carm_handle_resp(struct carm_host *host,
switch (crq->msg_subtype) {
case CARM_IOC_SCAN_CHAN:
carm_handle_scan_chan(host, crq, mem, error);
- break;
+ goto done;
default:
/* unknown / invalid response */
goto err_out;
@@ -1116,11 +954,11 @@ static inline void carm_handle_resp(struct carm_host *host,
case MISC_ALLOC_MEM:
carm_handle_generic(host, crq, error,
HST_ALLOC_BUF, HST_SYNC_TIME);
- break;
+ goto done;
case MISC_SET_TIME:
carm_handle_generic(host, crq, error,
HST_SYNC_TIME, HST_GET_FW_VER);
- break;
+ goto done;
case MISC_GET_FW_VER: {
struct carm_fw_ver *ver = (struct carm_fw_ver *)
(mem + sizeof(struct carm_msg_get_fw_ver));
@@ -1130,7 +968,7 @@ static inline void carm_handle_resp(struct carm_host *host,
}
carm_handle_generic(host, crq, error,
HST_GET_FW_VER, HST_PORT_SCAN);
- break;
+ goto done;
}
default:
/* unknown / invalid response */
@@ -1161,7 +999,13 @@ static inline void carm_handle_resp(struct carm_host *host,
err_out:
printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
- carm_end_rq(host, crq, BLK_STS_IOERR);
+ error = BLK_STS_IOERR;
+done:
+ host->hw_sg_used -= crq->n_elem;
+ blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
+
+ if (host->hw_sg_used <= CARM_SG_LOW_WATER)
+ carm_round_robin(host);
}
static inline void carm_handle_responses(struct carm_host *host)
@@ -1491,78 +1335,56 @@ static int carm_init_host(struct carm_host *host)
return 0;
}
-static const struct blk_mq_ops carm_oob_mq_ops = {
- .queue_rq = carm_oob_queue_rq,
-};
-
static const struct blk_mq_ops carm_mq_ops = {
.queue_rq = carm_queue_rq,
};
-static int carm_init_disks(struct carm_host *host)
+static int carm_init_disk(struct carm_host *host, unsigned int port_no)
{
- unsigned int i;
- int rc = 0;
+ struct carm_port *port = &host->port[port_no];
+ struct gendisk *disk;
+ struct request_queue *q;
- for (i = 0; i < CARM_MAX_PORTS; i++) {
- struct gendisk *disk;
- struct request_queue *q;
- struct carm_port *port;
+ port->host = host;
+ port->port_no = port_no;
- port = &host->port[i];
- port->host = host;
- port->port_no = i;
+ disk = alloc_disk(CARM_MINORS_PER_MAJOR);
+ if (!disk)
+ return -ENOMEM;
- disk = alloc_disk(CARM_MINORS_PER_MAJOR);
- if (!disk) {
- rc = -ENOMEM;
- break;
- }
+ port->disk = disk;
+ sprintf(disk->disk_name, DRV_NAME "/%u",
+ (unsigned int)host->id * CARM_MAX_PORTS + port_no);
+ disk->major = host->major;
+ disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
+ disk->fops = &carm_bd_ops;
+ disk->private_data = port;
- port->disk = disk;
- sprintf(disk->disk_name, DRV_NAME "/%u",
- (unsigned int) (host->id * CARM_MAX_PORTS) + i);
- disk->major = host->major;
- disk->first_minor = i * CARM_MINORS_PER_MAJOR;
- disk->fops = &carm_bd_ops;
- disk->private_data = port;
-
- q = blk_mq_init_sq_queue(&port->tag_set, &carm_mq_ops,
- max_queue, BLK_MQ_F_SHOULD_MERGE);
- if (IS_ERR(q)) {
- rc = PTR_ERR(q);
- break;
- }
- disk->queue = q;
- blk_queue_max_segments(q, CARM_MAX_REQ_SG);
- blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
+ q = blk_mq_init_queue(&host->tag_set);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
- q->queuedata = port;
- }
+ blk_queue_max_segments(q, CARM_MAX_REQ_SG);
+ blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
- return rc;
+ q->queuedata = port;
+ disk->queue = q;
+ return 0;
}
-static void carm_free_disks(struct carm_host *host)
+static void carm_free_disk(struct carm_host *host, unsigned int port_no)
{
- unsigned int i;
-
- for (i = 0; i < CARM_MAX_PORTS; i++) {
- struct carm_port *port = &host->port[i];
- struct gendisk *disk = port->disk;
+ struct carm_port *port = &host->port[port_no];
+ struct gendisk *disk = port->disk;
- if (disk) {
- struct request_queue *q = disk->queue;
+ if (!disk)
+ return;
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q) {
- blk_mq_free_tag_set(&port->tag_set);
- blk_cleanup_queue(q);
- }
- put_disk(disk);
- }
- }
+ if (disk->flags & GENHD_FL_UP)
+ del_gendisk(disk);
+ if (disk->queue)
+ blk_cleanup_queue(disk->queue);
+ put_disk(disk);
}
static int carm_init_shm(struct carm_host *host)
@@ -1618,9 +1440,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&host->fsm_task, carm_fsm_task);
init_completion(&host->probe_comp);
- for (i = 0; i < ARRAY_SIZE(host->req); i++)
- host->req[i].tag = i;
-
host->mmio = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!host->mmio) {
@@ -1637,14 +1456,26 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
- q = blk_mq_init_sq_queue(&host->tag_set, &carm_oob_mq_ops, 1,
- BLK_MQ_F_NO_SCHED);
+ memset(&host->tag_set, 0, sizeof(host->tag_set));
+ host->tag_set.ops = &carm_mq_ops;
+ host->tag_set.cmd_size = sizeof(struct carm_request);
+ host->tag_set.nr_hw_queues = 1;
+ host->tag_set.nr_maps = 1;
+ host->tag_set.queue_depth = max_queue;
+ host->tag_set.numa_node = NUMA_NO_NODE;
+ host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+
+ rc = blk_mq_alloc_tag_set(&host->tag_set);
+ if (rc)
+ goto err_out_dma_free;
+
+ q = blk_mq_init_queue(&host->tag_set);
if (IS_ERR(q)) {
- printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
- pci_name(pdev));
rc = PTR_ERR(q);
+ blk_mq_free_tag_set(&host->tag_set);
goto err_out_dma_free;
}
+
host->oob_q = q;
q->queuedata = host;
@@ -1667,9 +1498,11 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (host->flags & FL_DYN_MAJOR)
host->major = rc;
- rc = carm_init_disks(host);
- if (rc)
- goto err_out_blkdev_disks;
+ for (i = 0; i < CARM_MAX_PORTS; i++) {
+ rc = carm_init_disk(host, i);
+ if (rc)
+ goto err_out_blkdev_disks;
+ }
pci_set_master(pdev);
@@ -1699,7 +1532,8 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_free_irq:
free_irq(pdev->irq, host);
err_out_blkdev_disks:
- carm_free_disks(host);
+ for (i = 0; i < CARM_MAX_PORTS; i++)
+ carm_free_disk(host, i);
unregister_blkdev(host->major, host->name);
err_out_free_majors:
if (host->major == 160)
@@ -1724,6 +1558,7 @@ err_out:
static void carm_remove_one (struct pci_dev *pdev)
{
struct carm_host *host = pci_get_drvdata(pdev);
+ unsigned int i;
if (!host) {
printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
@@ -1732,7 +1567,8 @@ static void carm_remove_one (struct pci_dev *pdev)
}
free_irq(pdev->irq, host);
- carm_free_disks(host);
+ for (i = 0; i < CARM_MAX_PORTS; i++)
+ carm_free_disk(host, i);
unregister_blkdev(host->major, host->name);
if (host->major == 160)
clear_bit(0, &carm_major_alloc);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index be3e3ab79950..aa035cf8a51d 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -888,8 +888,7 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
card->biotail = &card->bio;
spin_lock_init(&card->lock);
- card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE,
- &card->lock);
+ card->queue = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
if (!card->queue)
goto failed_alloc;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 086c6bb12baa..912c4265e592 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -214,6 +214,20 @@ static void virtblk_done(struct virtqueue *vq)
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
}
+static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct virtio_blk *vblk = hctx->queue->queuedata;
+ struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
+ bool kick;
+
+ spin_lock_irq(&vq->lock);
+ kick = virtqueue_kick_prepare(vq->vq);
+ spin_unlock_irq(&vq->lock);
+
+ if (kick)
+ virtqueue_notify(vq->vq);
+}
+
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -624,7 +638,7 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
- return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
+ return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
}
#ifdef CONFIG_VIRTIO_BLK_SCSI
@@ -638,6 +652,7 @@ static void virtblk_initialize_rq(struct request *req)
static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
+ .commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
#ifdef CONFIG_VIRTIO_BLK_SCSI
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index e3e4d929e74f..d5d6e6e5da3b 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -33,6 +33,8 @@
#define VERSION "0.1"
#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
+#define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}})
+#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
@@ -64,15 +66,23 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
* The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller
* with no configured address.
*
+ * The address 20:70:02:A0:00:00 indicates a BCM20702A1 controller
+ * with no configured address.
+ *
* The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
* with waiting for configuration state.
*
* The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller
* with waiting for configuration state.
+ *
+ * The address 43:43:A0:12:1F:AC indicates a BCM43430A0 controller
+ * with no configured address.
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
- !bacmp(&bda->bdaddr, BDADDR_BCM4330B1)) {
+ !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -330,6 +340,8 @@ static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x2209, "BCM43430A1" }, /* 001.002.009 */
{ 0x6119, "BCM4345C0" }, /* 003.001.025 */
{ 0x230f, "BCM4356A2" }, /* 001.003.015 */
+ { 0x220e, "BCM20702A1" }, /* 001.002.014 */
+ { 0x4217, "BCM4329B1" }, /* 002.002.023 */
{ }
};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7439a7eb50ac..4761499db9ee 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -344,6 +344,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -1935,10 +1936,8 @@ static void btusb_intel_bootup(struct btusb_data *data, const void *ptr,
if (len != sizeof(*evt))
return;
- if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) {
- smp_mb__after_atomic();
+ if (test_and_clear_bit(BTUSB_BOOTING, &data->flags))
wake_up_bit(&data->flags, BTUSB_BOOTING);
- }
}
static void btusb_intel_secure_send_result(struct btusb_data *data,
@@ -1953,10 +1952,8 @@ static void btusb_intel_secure_send_result(struct btusb_data *data,
set_bit(BTUSB_FIRMWARE_FAILED, &data->flags);
if (test_and_clear_bit(BTUSB_DOWNLOADING, &data->flags) &&
- test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) {
- smp_mb__after_atomic();
+ test_bit(BTUSB_FIRMWARE_LOADED, &data->flags))
wake_up_bit(&data->flags, BTUSB_DOWNLOADING);
- }
}
static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -2055,6 +2052,35 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return -EILSEQ;
}
+static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
+ struct intel_boot_params *params,
+ char *fw_name, size_t len,
+ const char *suffix)
+{
+ switch (ver->hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fw_name, len, "intel/ibt-%u-%u.%s",
+ le16_to_cpu(ver->hw_variant),
+ le16_to_cpu(params->dev_revid),
+ suffix);
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* CcP */
+ snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s",
+ le16_to_cpu(ver->hw_variant),
+ le16_to_cpu(ver->hw_revision),
+ le16_to_cpu(ver->fw_revision),
+ suffix);
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
static int btusb_setup_intel_new(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2106,7 +2132,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
- case 0x14: /* QnJ, IcP */
+ case 0x14: /* CcP */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -2190,23 +2216,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
* ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
*
*/
- switch (ver.hw_variant) {
- case 0x0b: /* SfP */
- case 0x0c: /* WsP */
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params.dev_revid));
- break;
- case 0x11: /* JfP */
- case 0x12: /* ThP */
- case 0x13: /* HrP */
- case 0x14: /* QnJ, IcP */
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(ver.hw_revision),
- le16_to_cpu(ver.fw_revision));
- break;
- default:
+ err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+ sizeof(fwname), "sfi");
+ if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
@@ -2222,23 +2234,9 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Save the DDC file name for later use to apply once the firmware
* downloading is done.
*/
- switch (ver.hw_variant) {
- case 0x0b: /* SfP */
- case 0x0c: /* WsP */
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params.dev_revid));
- break;
- case 0x11: /* JfP */
- case 0x12: /* ThP */
- case 0x13: /* HrP */
- case 0x14: /* QnJ, IcP */
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(ver.hw_revision),
- le16_to_cpu(ver.fw_revision));
- break;
- default:
+ err = btusb_setup_intel_new_get_fw_name(&ver, &params, fwname,
+ sizeof(fwname), "ddc");
+ if (!err) {
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ddbd8c6a0ceb..ddbe518c3e5b 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -31,6 +31,7 @@
#include <linux/property.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/tty.h>
@@ -51,8 +52,16 @@
#define BCM_LM_DIAG_PKT 0x07
#define BCM_LM_DIAG_SIZE 63
+#define BCM_TYPE49_PKT 0x31
+#define BCM_TYPE49_SIZE 0
+
+#define BCM_TYPE52_PKT 0x34
+#define BCM_TYPE52_SIZE 0
+
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
+#define BCM_NUM_SUPPLIES 2
+
/**
* struct bcm_device - device driver resources
* @serdev_hu: HCI UART controller struct
@@ -71,8 +80,10 @@
* @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power")
* @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up")
* @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down")
- * @clk: clock used by Bluetooth device
- * @clk_enabled: whether @clk is prepared and enabled
+ * @txco_clk: external reference frequency clock used by Bluetooth device
+ * @lpo_clk: external LPO clock used by Bluetooth device
+ * @supplies: VBAT and VDDIO supplies used by Bluetooth device
+ * @res_enabled: whether clocks and supplies are prepared and enabled
* @init_speed: default baudrate of Bluetooth device;
* the host UART is initially set to this baudrate so that
* it can configure the Bluetooth device for @oper_speed
@@ -102,8 +113,10 @@ struct bcm_device {
int gpio_int_idx;
#endif
- struct clk *clk;
- bool clk_enabled;
+ struct clk *txco_clk;
+ struct clk *lpo_clk;
+ struct regulator_bulk_data supplies[BCM_NUM_SUPPLIES];
+ bool res_enabled;
u32 init_speed;
u32 oper_speed;
@@ -214,32 +227,59 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
{
int err;
- if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled) {
- err = clk_prepare_enable(dev->clk);
+ if (powered && !dev->res_enabled) {
+ err = regulator_bulk_enable(BCM_NUM_SUPPLIES, dev->supplies);
if (err)
return err;
+
+ /* LPO clock needs to be 32.768 kHz */
+ err = clk_set_rate(dev->lpo_clk, 32768);
+ if (err) {
+ dev_err(dev->dev, "Could not set LPO clock rate\n");
+ goto err_regulator_disable;
+ }
+
+ err = clk_prepare_enable(dev->lpo_clk);
+ if (err)
+ goto err_regulator_disable;
+
+ err = clk_prepare_enable(dev->txco_clk);
+ if (err)
+ goto err_lpo_clk_disable;
}
err = dev->set_shutdown(dev, powered);
if (err)
- goto err_clk_disable;
+ goto err_txco_clk_disable;
err = dev->set_device_wakeup(dev, powered);
if (err)
goto err_revert_shutdown;
- if (!powered && !IS_ERR(dev->clk) && dev->clk_enabled)
- clk_disable_unprepare(dev->clk);
+ if (!powered && dev->res_enabled) {
+ clk_disable_unprepare(dev->txco_clk);
+ clk_disable_unprepare(dev->lpo_clk);
+ regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies);
+ }
+
+ /* wait for device to power on and come out of reset */
+ usleep_range(10000, 20000);
- dev->clk_enabled = powered;
+ dev->res_enabled = powered;
return 0;
err_revert_shutdown:
dev->set_shutdown(dev, !powered);
-err_clk_disable:
- if (powered && !IS_ERR(dev->clk) && !dev->clk_enabled)
- clk_disable_unprepare(dev->clk);
+err_txco_clk_disable:
+ if (powered && !dev->res_enabled)
+ clk_disable_unprepare(dev->txco_clk);
+err_lpo_clk_disable:
+ if (powered && !dev->res_enabled)
+ clk_disable_unprepare(dev->lpo_clk);
+err_regulator_disable:
+ if (powered && !dev->res_enabled)
+ regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies);
return err;
}
@@ -561,12 +601,28 @@ finalize:
.lsize = 0, \
.maxlen = BCM_NULL_SIZE
+#define BCM_RECV_TYPE49 \
+ .type = BCM_TYPE49_PKT, \
+ .hlen = BCM_TYPE49_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = BCM_TYPE49_SIZE
+
+#define BCM_RECV_TYPE52 \
+ .type = BCM_TYPE52_PKT, \
+ .hlen = BCM_TYPE52_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = BCM_TYPE52_SIZE
+
static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
{ BCM_RECV_NULL, .recv = hci_recv_diag },
+ { BCM_RECV_TYPE49, .recv = hci_recv_diag },
+ { BCM_RECV_TYPE52, .recv = hci_recv_diag },
};
static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@@ -896,16 +952,57 @@ static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered)
return 0;
}
+/* Try a bunch of names for TXCO */
+static struct clk *bcm_get_txco(struct device *dev)
+{
+ struct clk *clk;
+
+ /* New explicit name */
+ clk = devm_clk_get(dev, "txco");
+ if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ return clk;
+
+ /* Deprecated name */
+ clk = devm_clk_get(dev, "extclk");
+ if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
+ return clk;
+
+ /* Original code used no name at all */
+ return devm_clk_get(dev, NULL);
+}
+
static int bcm_get_resources(struct bcm_device *dev)
{
const struct dmi_system_id *dmi_id;
+ int err;
dev->name = dev_name(dev->dev);
if (x86_apple_machine && !bcm_apple_get_resources(dev))
return 0;
- dev->clk = devm_clk_get(dev->dev, NULL);
+ dev->txco_clk = bcm_get_txco(dev->dev);
+
+ /* Handle deferred probing */
+ if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER))
+ return PTR_ERR(dev->txco_clk);
+
+ /* Ignore all other errors as before */
+ if (IS_ERR(dev->txco_clk))
+ dev->txco_clk = NULL;
+
+ dev->lpo_clk = devm_clk_get(dev->dev, "lpo");
+ if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER))
+ return PTR_ERR(dev->lpo_clk);
+
+ if (IS_ERR(dev->lpo_clk))
+ dev->lpo_clk = NULL;
+
+ /* Check if we accidentally fetched the lpo clock twice */
+ if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) {
+ devm_clk_put(dev->dev, dev->txco_clk);
+ dev->txco_clk = NULL;
+ }
dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
GPIOD_OUT_LOW);
@@ -920,6 +1017,13 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->set_device_wakeup = bcm_gpio_set_device_wakeup;
dev->set_shutdown = bcm_gpio_set_shutdown;
+ dev->supplies[0].supply = "vbat";
+ dev->supplies[1].supply = "vddio";
+ err = devm_regulator_bulk_get(dev->dev, BCM_NUM_SUPPLIES,
+ dev->supplies);
+ if (err)
+ return err;
+
/* IRQ can be declared in ACPI table as Interrupt or GpioInt */
if (dev->irq <= 0) {
struct gpio_desc *gpio;
@@ -1314,6 +1418,8 @@ static void bcm_serdev_remove(struct serdev_device *serdev)
#ifdef CONFIG_OF
static const struct of_device_id bcm_bluetooth_of_match[] = {
+ { .compatible = "brcm,bcm20702a1" },
+ { .compatible = "brcm,bcm4330-bt" },
{ .compatible = "brcm,bcm43438-bt" },
{ },
};
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 8eede1197cd2..069d1c8fde73 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -115,6 +115,8 @@ struct h5_vnd {
int (*setup)(struct h5 *h5);
void (*open)(struct h5 *h5);
void (*close)(struct h5 *h5);
+ int (*suspend)(struct h5 *h5);
+ int (*resume)(struct h5 *h5);
const struct acpi_gpio_mapping *acpi_gpio_map;
};
@@ -841,6 +843,28 @@ static void h5_serdev_remove(struct serdev_device *serdev)
hci_uart_unregister_device(&h5->serdev_hu);
}
+static int __maybe_unused h5_serdev_suspend(struct device *dev)
+{
+ struct h5 *h5 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (h5->vnd && h5->vnd->suspend)
+ ret = h5->vnd->suspend(h5);
+
+ return ret;
+}
+
+static int __maybe_unused h5_serdev_resume(struct device *dev)
+{
+ struct h5 *h5 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (h5->vnd && h5->vnd->resume)
+ ret = h5->vnd->resume(h5);
+
+ return ret;
+}
+
#ifdef CONFIG_BT_HCIUART_RTL
static int h5_btrtl_setup(struct h5 *h5)
{
@@ -907,6 +931,56 @@ static void h5_btrtl_close(struct h5 *h5)
gpiod_set_value_cansleep(h5->enable_gpio, 0);
}
+/* Suspend/resume support. On many devices the RTL BT device loses power during
+ * suspend/resume, causing it to lose its firmware and all state. So we simply
+ * turn it off on suspend and reprobe on resume. This mirrors how RTL devices
+ * are handled in the USB driver, where the USB_QUIRK_RESET_RESUME is used which
+ * also causes a reprobe on resume.
+ */
+static int h5_btrtl_suspend(struct h5 *h5)
+{
+ serdev_device_set_flow_control(h5->hu->serdev, false);
+ gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
+ gpiod_set_value_cansleep(h5->enable_gpio, 0);
+ return 0;
+}
+
+struct h5_btrtl_reprobe {
+ struct device *dev;
+ struct work_struct work;
+};
+
+static void h5_btrtl_reprobe_worker(struct work_struct *work)
+{
+ struct h5_btrtl_reprobe *reprobe =
+ container_of(work, struct h5_btrtl_reprobe, work);
+ int ret;
+
+ ret = device_reprobe(reprobe->dev);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(reprobe->dev, "Reprobe error %d\n", ret);
+
+ put_device(reprobe->dev);
+ kfree(reprobe);
+ module_put(THIS_MODULE);
+}
+
+static int h5_btrtl_resume(struct h5 *h5)
+{
+ struct h5_btrtl_reprobe *reprobe;
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL);
+ if (!reprobe)
+ return -ENOMEM;
+
+ __module_get(THIS_MODULE);
+
+ INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker);
+ reprobe->dev = get_device(&h5->hu->serdev->dev);
+ queue_work(system_long_wq, &reprobe->work);
+ return 0;
+}
+
static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
@@ -921,6 +995,8 @@ static struct h5_vnd rtl_vnd = {
.setup = h5_btrtl_setup,
.open = h5_btrtl_open,
.close = h5_btrtl_close,
+ .suspend = h5_btrtl_suspend,
+ .resume = h5_btrtl_resume,
.acpi_gpio_map = acpi_btrtl_gpios,
};
#endif
@@ -935,12 +1011,17 @@ static const struct acpi_device_id h5_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
#endif
+static const struct dev_pm_ops h5_serdev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
+};
+
static struct serdev_device_driver h5_serdev_driver = {
.probe = h5_serdev_probe,
.remove = h5_serdev_remove,
.driver = {
.name = "hci_uart_h5",
.acpi_match_table = ACPI_PTR(h5_acpi_match),
+ .pm = &h5_serdev_pm_ops,
},
};
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 46ace321bf60..f31410526c57 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -596,8 +596,8 @@ static int intel_setup(struct hci_uart *hu)
* is in bootloader mode or if it already has operational firmware
* loaded.
*/
- err = btintel_read_version(hdev, &ver);
- if (err)
+ err = btintel_read_version(hdev, &ver);
+ if (err)
return err;
/* The hardware platform number has a fixed value of 0x37 and
@@ -909,10 +909,8 @@ static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
set_bit(STATE_FIRMWARE_FAILED, &intel->flags);
if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) &&
- test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) {
- smp_mb__after_atomic();
+ test_bit(STATE_FIRMWARE_LOADED, &intel->flags))
wake_up_bit(&intel->flags, STATE_DOWNLOADING);
- }
/* When switching to the operational firmware the device
* sends a vendor specific event indicating that the bootup
@@ -920,10 +918,8 @@ static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
*/
} else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 &&
skb->data[2] == 0x02) {
- if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) {
- smp_mb__after_atomic();
+ if (test_and_clear_bit(STATE_BOOTING, &intel->flags))
wake_up_bit(&intel->flags, STATE_BOOTING);
- }
}
recv:
return hci_recv_frame(hdev, skb);
@@ -960,17 +956,13 @@ static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb)
break;
case LPM_OP_SUSPEND_ACK:
set_bit(STATE_SUSPENDED, &intel->flags);
- if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
- smp_mb__after_atomic();
+ if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags))
wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
- }
break;
case LPM_OP_RESUME_ACK:
clear_bit(STATE_SUSPENDED, &intel->flags);
- if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) {
- smp_mb__after_atomic();
+ if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags))
wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION);
- }
break;
default:
bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index c445aa9ac511..490abba94363 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -333,9 +333,6 @@ int hci_uart_register_device(struct hci_uart *hu,
if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags))
set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks);
- if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
-
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;
else
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index 6767d965c36c..256b0b1d0f26 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -1,10 +1,7 @@
-/**
+// SPDX-License-Identifier: GPL-2.0
+/*
* Copyright (c) 2010-2012 Broadcom. All rights reserved.
* Copyright (c) 2013 Lubomir Rintel
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License ("GPL")
- * version 2, as published by the Free Software Foundation.
*/
#include <linux/hw_random.h>
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 677618e6f1f7..dc8603d34320 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2187,7 +2187,7 @@ static void shutdown_smi(void *send_info)
* handlers might have been running before we freed the
* interrupt.
*/
- synchronize_sched();
+ synchronize_rcu();
/*
* Timeouts are stopped, now make sure the interrupts are off
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 2eb70e76ed35..38c6d1af6d1c 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -265,7 +265,7 @@
#include <linux/syscalls.h>
#include <linux/completion.h>
#include <linux/uuid.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
@@ -431,11 +431,10 @@ static int crng_init = 0;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE]);
+#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
+static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
+ __u8 tmp[CHACHA_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -863,7 +862,7 @@ static int crng_fast_load(const char *cp, size_t len)
}
p = (unsigned char *) &primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
- p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
+ p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--;
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -895,7 +894,7 @@ static int crng_slow_load(const char *cp, size_t len)
unsigned long flags;
static unsigned char lfsr = 1;
unsigned char tmp;
- unsigned i, max = CHACHA20_KEY_SIZE;
+ unsigned i, max = CHACHA_KEY_SIZE;
const char * src_buf = cp;
char * dest_buf = (char *) &primary_crng.state[4];
@@ -913,8 +912,8 @@ static int crng_slow_load(const char *cp, size_t len)
lfsr >>= 1;
if (tmp & 1)
lfsr ^= 0xE1;
- tmp = dest_buf[i % CHACHA20_KEY_SIZE];
- dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
+ tmp = dest_buf[i % CHACHA_KEY_SIZE];
+ dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
lfsr += (tmp << 3) | (tmp >> 5);
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
@@ -926,7 +925,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u8 block[CHACHA20_BLOCK_SIZE];
+ __u8 block[CHACHA_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -937,7 +936,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
} else {
_extract_crng(&primary_crng, buf.block);
_crng_backtrack_protect(&primary_crng, buf.block,
- CHACHA20_KEY_SIZE);
+ CHACHA_KEY_SIZE);
}
spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
@@ -973,7 +972,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA20_BLOCK_SIZE])
+ __u8 out[CHACHA_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -990,7 +989,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
+static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -1008,14 +1007,14 @@ static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+ __u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
int i;
used = round_up(used, sizeof(__u32));
- if (used + CHACHA20_KEY_SIZE > CHACHA20_BLOCK_SIZE) {
+ if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
extract_crng(tmp);
used = 0;
}
@@ -1027,7 +1026,7 @@ static void _crng_backtrack_protect(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -1042,8 +1041,8 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
- ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
+ ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
+ __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1057,7 +1056,7 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
}
extract_crng(tmp);
- i = min_t(int, nbytes, CHACHA20_BLOCK_SIZE);
+ i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
if (copy_to_user(buf, tmp, i)) {
ret = -EFAULT;
break;
@@ -1622,14 +1621,14 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
+ __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
- while (nbytes >= CHACHA20_BLOCK_SIZE) {
+ while (nbytes >= CHACHA_BLOCK_SIZE) {
extract_crng(buf);
- buf += CHACHA20_BLOCK_SIZE;
- nbytes -= CHACHA20_BLOCK_SIZE;
+ buf += CHACHA_BLOCK_SIZE;
+ nbytes -= CHACHA_BLOCK_SIZE;
}
if (nbytes > 0) {
@@ -1637,7 +1636,7 @@ static void _get_random_bytes(void *buf, int nbytes)
memcpy(buf, tmp, nbytes);
crng_backtrack_protect(tmp, nbytes);
} else
- crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
+ crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
memzero_explicit(tmp, sizeof(tmp));
}
@@ -2208,8 +2207,8 @@ struct ctl_table random_table[] = {
struct batched_entropy {
union {
- u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
- u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
+ u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
+ u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
};
unsigned int position;
};
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 81cdb4eaca07..e5b2fe80eab4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -283,10 +283,19 @@ config COMMON_CLK_STM32H7
---help---
Support for stm32h7 SoC family clocks
+config COMMON_CLK_BD718XX
+ tristate "Clock driver for ROHM BD718x7 PMIC"
+ depends on MFD_ROHM_BD718XX
+ help
+ This driver supports ROHM BD71837 and ROHM BD71847
+ PMICs clock gates.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
+source "drivers/clk/imx/Kconfig"
source "drivers/clk/imgtec/Kconfig"
+source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 72be7a38cff1..8a9440a97500 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -21,6 +21,7 @@ endif
obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o
obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o
obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o
+obj-$(CONFIG_COMMON_CLK_BD718XX) += clk-bd718x7.o
obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o
@@ -71,7 +72,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci/
obj-$(CONFIG_H8300) += h8300/
obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
-obj-$(CONFIG_ARCH_MXC) += imx/
+obj-y += imx/
obj-y += ingenic/
obj-$(CONFIG_ARCH_K3) += keystone/
obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c
index f225ad29b110..2a2c7569336a 100644
--- a/drivers/clk/bcm/clk-bcm2835-aux.c
+++ b/drivers/clk/bcm/clk-bcm2835-aux.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -79,4 +70,4 @@ builtin_platform_driver(bcm2835_aux_clk_driver);
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 7bef0666ae7e..9fcae932e082 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -1,17 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2010,2015 Broadcom
* Copyright (C) 2012 Stephen Warren
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
/**
@@ -2206,4 +2196,4 @@ builtin_platform_driver(bcm2835_clk_driver);
MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
MODULE_DESCRIPTION("BCM2835 clock driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 5d7ae333257e..98e0c9ba7b61 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -311,7 +311,6 @@ static struct axxia_divclk clk_per_div = {
"clk_sm1_pll"
},
.num_parents = 1,
- .flags = CLK_IS_BASIC,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
@@ -326,7 +325,6 @@ static struct axxia_divclk clk_mmc_div = {
"clk_sm1_pll"
},
.num_parents = 1,
- .flags = CLK_IS_BASIC,
.ops = &axxia_divclk_ops,
},
.reg = 0x1000c,
diff --git a/drivers/clk/clk-bd718x7.c b/drivers/clk/clk-bd718x7.c
new file mode 100644
index 000000000000..60422c72d142
--- /dev/null
+++ b/drivers/clk/clk-bd718x7.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 ROHM Semiconductors
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/rohm-bd718x7.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+
+struct bd718xx_clk {
+ struct clk_hw hw;
+ u8 reg;
+ u8 mask;
+ struct platform_device *pdev;
+ struct bd718xx *mfd;
+};
+
+static int bd71837_clk_set(struct clk_hw *hw, int status)
+{
+ struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
+
+ return regmap_update_bits(c->mfd->regmap, c->reg, c->mask, status);
+}
+
+static void bd71837_clk_disable(struct clk_hw *hw)
+{
+ int rv;
+ struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
+
+ rv = bd71837_clk_set(hw, 0);
+ if (rv)
+ dev_dbg(&c->pdev->dev, "Failed to disable 32K clk (%d)\n", rv);
+}
+
+static int bd71837_clk_enable(struct clk_hw *hw)
+{
+ return bd71837_clk_set(hw, 1);
+}
+
+static int bd71837_clk_is_enabled(struct clk_hw *hw)
+{
+ int enabled;
+ int rval;
+ struct bd718xx_clk *c = container_of(hw, struct bd718xx_clk, hw);
+
+ rval = regmap_read(c->mfd->regmap, c->reg, &enabled);
+
+ if (rval)
+ return rval;
+
+ return enabled & c->mask;
+}
+
+static const struct clk_ops bd71837_clk_ops = {
+ .prepare = &bd71837_clk_enable,
+ .unprepare = &bd71837_clk_disable,
+ .is_prepared = &bd71837_clk_is_enabled,
+};
+
+static int bd71837_clk_probe(struct platform_device *pdev)
+{
+ struct bd718xx_clk *c;
+ int rval = -ENOMEM;
+ const char *parent_clk;
+ struct device *parent = pdev->dev.parent;
+ struct bd718xx *mfd = dev_get_drvdata(parent);
+ struct clk_init_data init = {
+ .name = "bd718xx-32k-out",
+ .ops = &bd71837_clk_ops,
+ };
+
+ c = devm_kzalloc(&pdev->dev, sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return -ENOMEM;
+
+ init.num_parents = 1;
+ parent_clk = of_clk_get_parent_name(parent->of_node, 0);
+
+ init.parent_names = &parent_clk;
+ if (!parent_clk) {
+ dev_err(&pdev->dev, "No parent clk found\n");
+ return -EINVAL;
+ }
+
+ c->reg = BD718XX_REG_OUT32K;
+ c->mask = BD718XX_OUT32K_EN;
+ c->mfd = mfd;
+ c->pdev = pdev;
+ c->hw.init = &init;
+
+ of_property_read_string_index(parent->of_node,
+ "clock-output-names", 0, &init.name);
+
+ rval = devm_clk_hw_register(&pdev->dev, &c->hw);
+ if (rval) {
+ dev_err(&pdev->dev, "failed to register 32K clk");
+ return rval;
+ }
+ rval = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &c->hw);
+ if (rval)
+ dev_err(&pdev->dev, "adding clk provider failed\n");
+
+ return rval;
+}
+
+static struct platform_driver bd71837_clk = {
+ .driver = {
+ .name = "bd718xx-clk",
+ },
+ .probe = bd71837_clk_probe,
+};
+
+module_platform_driver(bd71837_clk);
+
+MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+MODULE_DESCRIPTION("BD71837 chip clk driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index 6a7118d4250a..06499568cf07 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2017 NXP
*
* Dong Aisheng <aisheng.dong@nxp.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 00269de2f390..46604214bba0 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -1,17 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2013 NVIDIA CORPORATION. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 49819b546134..2ef819606c41 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c
index 12c87457eca1..c9a86156ced8 100644
--- a/drivers/clk/clk-devres.c
+++ b/drivers/clk/clk-devres.c
@@ -1,9 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
+// SPDX-License-Identifier: GPL-2.0
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/export.h>
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index b6234a5da12d..e5a17265cfaf 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
* Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Adjustable divider clock implementation
*/
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index ff83e899df71..241b3f8c61a9 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -1,11 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Standard functionality for the common clock API.
*/
#include <linux/module.h>
#include <linux/clk-provider.h>
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 6d6475c32ee5..00ef4f5e53fe 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Fixed rate clock implementation
*/
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index fdf625fb10fa..545dceec0bbf 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Intel Corporation
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Adjustable fractional divider clock implementation.
* Output rate = (m / n) * parent_rate.
* Uses rational best approximation algorithm.
@@ -40,6 +37,11 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
m = (val & fd->mmask) >> fd->mshift;
n = (val & fd->nmask) >> fd->nshift;
+ if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
+ m++;
+ n++;
+ }
+
if (!n || !m)
return parent_rate;
@@ -103,6 +105,11 @@ static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
&m, &n);
+ if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) {
+ m--;
+ n--;
+ }
+
if (fd->lock)
spin_lock_irqsave(fd->lock, flags);
else
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index dd82485e09a1..f05823cd9b21 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Gated clock implementation
*/
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 6a43ce420492..25eed3e0251f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com
*
@@ -5,10 +6,6 @@
* Jyri Sarha <jsarha@ti.com>
* Sergej Sawazki <ce3a@gmx.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Gpio controlled clock implementation
*/
diff --git a/drivers/clk/clk-hi655x.c b/drivers/clk/clk-hi655x.c
index 403a0188634a..a0de3315df2e 100644
--- a/drivers/clk/clk-hi655x.c
+++ b/drivers/clk/clk-hi655x.c
@@ -107,8 +107,8 @@ static int hi655x_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- return of_clk_add_hw_provider(parent->of_node, of_clk_hw_simple_get,
- &hi655x_clk->clk_hw);
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &hi655x_clk->clk_hw);
}
static struct platform_driver hi655x_clk_driver = {
diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c
index 02551fe4b87c..22c937644c93 100644
--- a/drivers/clk/clk-max77686.c
+++ b/drivers/clk/clk-max77686.c
@@ -137,7 +137,7 @@ static unsigned long max77686_recalc_rate(struct clk_hw *hw,
return 32768;
}
-static struct clk_ops max77686_clk_ops = {
+static const struct clk_ops max77686_clk_ops = {
.prepare = max77686_clk_prepare,
.unprepare = max77686_clk_unprepare,
.is_prepared = max77686_clk_is_prepared,
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index dc037c957acd..3c86f859c199 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/bitops.h>
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 1628b93655ed..2ad2df2e8909 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
* Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
* Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple multiplexer clock implementation
*/
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 84a24875c629..a95aa96f4a68 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -455,7 +455,7 @@ static const char * const src_clk_names[] = {
"RNGCCLK ",
};
-static int nomadik_src_clk_show(struct seq_file *s, void *what)
+static int nomadik_src_clk_debugfs_show(struct seq_file *s, void *what)
{
int i;
u32 src_pcksr0 = readl(src_base + SRC_PCKSR0);
@@ -479,17 +479,7 @@ static int nomadik_src_clk_show(struct seq_file *s, void *what)
return 0;
}
-static int nomadik_src_clk_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nomadik_src_clk_show, NULL);
-}
-
-static const struct file_operations nomadik_src_clk_debugfs_ops = {
- .open = nomadik_src_clk_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nomadik_src_clk_debugfs);
static int __init nomadik_src_clk_init_debugfs(void)
{
@@ -499,7 +489,7 @@ static int __init nomadik_src_clk_init_debugfs(void)
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
- NULL, NULL, &nomadik_src_clk_debugfs_ops);
+ NULL, NULL, &nomadik_src_clk_debugfs_fops);
return 0;
}
device_initcall(nomadik_src_clk_init_debugfs);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
index e9612e7068e9..e41a3a9f7528 100644
--- a/drivers/clk/clk-palmas.c
+++ b/drivers/clk/clk-palmas.c
@@ -115,7 +115,7 @@ static int palmas_clks_is_prepared(struct clk_hw *hw)
return !!(val & cinfo->clk_desc->enable_mask);
}
-static struct clk_ops palmas_clks_ops = {
+static const struct clk_ops palmas_clks_ops = {
.prepare = palmas_clks_prepare,
.unprepare = palmas_clks_unprepare,
.is_prepared = palmas_clks_is_prepared,
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 4c30b6e799ed..5baa9e051110 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -1418,12 +1418,23 @@ err:
CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
+CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
/* Legacy nodes */
CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
diff --git a/drivers/clk/clk-rk808.c b/drivers/clk/clk-rk808.c
index 6461f2820a5b..8d90bdf5b946 100644
--- a/drivers/clk/clk-rk808.c
+++ b/drivers/clk/clk-rk808.c
@@ -138,23 +138,12 @@ static int rk808_clkout_probe(struct platform_device *pdev)
if (ret)
return ret;
- return of_clk_add_hw_provider(node, of_clk_rk808_get, rk808_clkout);
-}
-
-static int rk808_clkout_remove(struct platform_device *pdev)
-{
- struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
- struct i2c_client *client = rk808->i2c;
- struct device_node *node = client->dev.of_node;
-
- of_clk_del_provider(node);
-
- return 0;
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_rk808_get,
+ rk808_clkout);
}
static struct platform_driver rk808_clkout_driver = {
.probe = rk808_clkout_probe,
- .remove = rk808_clkout_remove,
.driver = {
.name = "rk808-clkout",
},
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 5b419b82f7ca..2ce370c804aa 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -71,7 +71,7 @@ static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
return 32768;
}
-static struct clk_ops s2mps11_clk_ops = {
+static const struct clk_ops s2mps11_clk_ops = {
.prepare = s2mps11_clk_prepare,
.unprepare = s2mps11_clk_unprepare,
.is_prepared = s2mps11_clk_is_prepared,
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 4f48342bc280..6a31f7f434ce 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -2015,7 +2015,7 @@ static int stm32_register_hw_clk(struct device *dev,
void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg)
{
- static struct clk_hw **hws;
+ struct clk_hw **hws;
struct clk_hw *hw = ERR_PTR(-ENOENT);
hws = clk_data->hws;
diff --git a/drivers/clk/clk-twl6040.c b/drivers/clk/clk-twl6040.c
index 25dfe050ae9f..ea846f77750b 100644
--- a/drivers/clk/clk-twl6040.c
+++ b/drivers/clk/clk-twl6040.c
@@ -108,9 +108,8 @@ static int twl6040_pdmclk_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, clkdata);
- return of_clk_add_hw_provider(pdev->dev.parent->of_node,
- of_clk_hw_simple_get,
- &clkdata->pdmclk_hw);
+ return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &clkdata->pdmclk_hw);
}
static struct platform_driver twl6040_pdmclk_driver = {
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index decffb3826ec..5b393e711e94 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -906,6 +906,28 @@ static int vc5_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused vc5_suspend(struct device *dev)
+{
+ struct vc5_driver_data *vc5 = dev_get_drvdata(dev);
+
+ regcache_cache_only(vc5->regmap, true);
+ regcache_mark_dirty(vc5->regmap);
+
+ return 0;
+}
+
+static int __maybe_unused vc5_resume(struct device *dev)
+{
+ struct vc5_driver_data *vc5 = dev_get_drvdata(dev);
+ int ret;
+
+ regcache_cache_only(vc5->regmap, false);
+ ret = regcache_sync(vc5->regmap);
+ if (ret)
+ dev_err(dev, "Failed to restore register map: %d\n", ret);
+ return ret;
+}
+
static const struct vc5_chip_info idt_5p49v5923_info = {
.model = IDT_VC5_5P49V5923,
.clk_fod_cnt = 2,
@@ -961,9 +983,12 @@ static const struct of_device_id clk_vc5_of_match[] = {
};
MODULE_DEVICE_TABLE(of, clk_vc5_of_match);
+static SIMPLE_DEV_PM_OPS(vc5_pm_ops, vc5_suspend, vc5_resume);
+
static struct i2c_driver vc5_driver = {
.driver = {
.name = "vc5",
+ .pm = &vc5_pm_ops,
.of_match_table = clk_vc5_of_match,
},
.probe = vc5_probe,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index af011974d4ec..75d13c0eff12 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
@@ -3893,6 +3890,39 @@ static void devm_of_clk_release_provider(struct device *dev, void *res)
of_clk_del_provider(*(struct device_node **)res);
}
+/*
+ * We allow a child device to use its parent device as the clock provider node
+ * for cases like MFD sub-devices where the child device driver wants to use
+ * devm_*() APIs but not list the device in DT as a sub-node.
+ */
+static struct device_node *get_clk_provider_node(struct device *dev)
+{
+ struct device_node *np, *parent_np;
+
+ np = dev->of_node;
+ parent_np = dev->parent ? dev->parent->of_node : NULL;
+
+ if (!of_find_property(np, "#clock-cells", NULL))
+ if (of_find_property(parent_np, "#clock-cells", NULL))
+ np = parent_np;
+
+ return np;
+}
+
+/**
+ * devm_of_clk_add_hw_provider() - Managed clk provider node registration
+ * @dev: Device acting as the clock provider (used for DT node and lifetime)
+ * @get: callback for decoding clk_hw
+ * @data: context pointer for @get callback
+ *
+ * Registers clock provider for given device's node. If the device has no DT
+ * node or if the device node lacks of clock provider information (#clock-cells)
+ * then the parent device's node is scanned for this information. If parent node
+ * has the #clock-cells then it is used in registration. Provider is
+ * automatically released at device exit.
+ *
+ * Return: 0 on success or an errno on failure.
+ */
int devm_of_clk_add_hw_provider(struct device *dev,
struct clk_hw *(*get)(struct of_phandle_args *clkspec,
void *data),
@@ -3906,7 +3936,7 @@ int devm_of_clk_add_hw_provider(struct device *dev,
if (!ptr)
return -ENOMEM;
- np = dev->of_node;
+ np = get_clk_provider_node(dev);
ret = of_clk_add_hw_provider(np, get, data);
if (!ret) {
*ptr = np;
@@ -3950,12 +3980,17 @@ static int devm_clk_provider_match(struct device *dev, void *res, void *data)
return *np == data;
}
+/**
+ * devm_of_clk_del_provider() - Remove clock provider registered using devm
+ * @dev: Device to whose lifetime the clock provider was bound
+ */
void devm_of_clk_del_provider(struct device *dev)
{
int ret;
+ struct device_node *np = get_clk_provider_node(dev);
ret = devres_release(dev, devm_of_clk_release_provider,
- devm_clk_provider_match, dev->of_node);
+ devm_clk_provider_match, np);
WARN_ON(ret);
}
diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h
index 70c0ba6336c1..b02f5e604e69 100644
--- a/drivers/clk/clk.h
+++ b/drivers/clk/clk.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/drivers/clk/clk.h
- *
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
struct clk_hw;
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index b68045d8b921..c7ae653c8a16 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -117,7 +117,7 @@ static void __init h8s2678_pll_clk_setup(struct device_node *node)
parent_name = of_clk_get_parent_name(node, 0);
init.name = clk_name;
init.ops = &pll_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.parent_names = &parent_name;
init.num_parents = 1;
pll_clock->hw.init = &init;
diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c
index 77072c7778b9..2eda9bdf6d03 100644
--- a/drivers/clk/hisilicon/clk-hi3620.c
+++ b/drivers/clk/hisilicon/clk-hi3620.c
@@ -435,7 +435,7 @@ static struct clk *hisi_register_clk_mmc(struct hisi_mmc_clock *mmc_clk,
init.name = mmc_clk->name;
init.ops = &clk_mmc_ops;
- init.flags = mmc_clk->flags | CLK_IS_BASIC;
+ init.flags = mmc_clk->flags;
init.parent_names = (mmc_clk->parent_name ? &mmc_clk->parent_name : NULL);
init.num_parents = (mmc_clk->parent_name ? 1 : 0);
mclk->hw.init = &init;
diff --git a/drivers/clk/hisilicon/clk-hisi-phase.c b/drivers/clk/hisilicon/clk-hisi-phase.c
index 5bce9297b78b..5fdc267bb2da 100644
--- a/drivers/clk/hisilicon/clk-hisi-phase.c
+++ b/drivers/clk/hisilicon/clk-hisi-phase.c
@@ -103,7 +103,7 @@ struct clk *clk_register_hisi_phase(struct device *dev,
init.name = clks->name;
init.ops = &clk_phase_ops;
- init.flags = clks->flags | CLK_IS_BASIC;
+ init.flags = clks->flags;
init.parent_names = clks->parent_names ? &clks->parent_names : NULL;
init.num_parents = clks->parent_names ? 1 : 0;
diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c
index 9584f0c32dda..659bd5f493b8 100644
--- a/drivers/clk/hisilicon/clk-hix5hd2.c
+++ b/drivers/clk/hisilicon/clk-hix5hd2.c
@@ -274,7 +274,7 @@ hix5hd2_clk_register_complex(struct hix5hd2_complex_clock *clks, int nums,
else
init.ops = &clk_complex_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.parent_names =
(clks[i].parent_name ? &clks[i].parent_name : NULL);
init.num_parents = (clks[i].parent_name ? 1 : 0);
diff --git a/drivers/clk/hisilicon/clkgate-separated.c b/drivers/clk/hisilicon/clkgate-separated.c
index f36bdef91831..ae84884dc749 100644
--- a/drivers/clk/hisilicon/clkgate-separated.c
+++ b/drivers/clk/hisilicon/clkgate-separated.c
@@ -110,7 +110,7 @@ struct clk *hisi_register_clkgate_sep(struct device *dev, const char *name,
init.name = name;
init.ops = &clkgate_separated_ops;
- init.flags = flags | CLK_IS_BASIC;
+ init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index 15af423cc0c9..dddda45127a8 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -73,27 +73,40 @@ static void __init clk_boston_setup(struct device_node *np)
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
if (IS_ERR(hw)) {
pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
- return;
+ goto fail_input;
}
onecell->hws[BOSTON_CLK_INPUT] = hw;
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
if (IS_ERR(hw)) {
pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
- return;
+ goto fail_sys;
}
onecell->hws[BOSTON_CLK_SYS] = hw;
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
if (IS_ERR(hw)) {
pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
- return;
+ goto fail_cpu;
}
onecell->hws[BOSTON_CLK_CPU] = hw;
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
- if (err)
+ if (err) {
pr_err("failed to add DT provider: %d\n", err);
+ goto fail_clk_add;
+ }
+
+ return;
+
+fail_clk_add:
+ clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_CPU]);
+fail_cpu:
+ clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_SYS]);
+fail_sys:
+ clk_hw_unregister_fixed_rate(onecell->hws[BOSTON_CLK_INPUT]);
+fail_input:
+ kfree(onecell);
}
/*
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
new file mode 100644
index 000000000000..4aae31a23449
--- /dev/null
+++ b/drivers/clk/imx/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+# common clock support for NXP i.MX SoC family.
+config MXC_CLK
+ bool
+ def_bool ARCH_MXC
+
+config MXC_CLK_SCU
+ bool
+ depends on IMX_SCU
+
+config CLK_IMX8MQ
+ bool "IMX8MQ CCM Clock Driver"
+ depends on ARCH_MXC && ARM64
+ help
+ Build the driver for i.MX8MQ CCM Clock Driver
+
+config CLK_IMX8QXP
+ bool "IMX8QXP SCU Clock"
+ depends on ARCH_MXC && IMX_SCU && ARM64
+ select MXC_CLK_SCU
+ help
+ Build the driver for IMX8QXP SCU based clocks.
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 8c3baa7e6496..73119fbfa547 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -1,17 +1,31 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += \
+obj-$(CONFIG_MXC_CLK) += \
clk.o \
clk-busy.o \
+ clk-composite-8m.o \
clk-cpu.o \
+ clk-composite-7ulp.o \
+ clk-divider-gate.o \
clk-fixup-div.o \
clk-fixup-mux.o \
+ clk-frac-pll.o \
clk-gate-exclusive.o \
clk-gate2.o \
+ clk-pfd.o \
+ clk-pfdv2.o \
clk-pllv1.o \
clk-pllv2.o \
clk-pllv3.o \
- clk-pfd.o
+ clk-pllv4.o \
+ clk-sccg-pll.o
+
+obj-$(CONFIG_MXC_CLK_SCU) += \
+ clk-scu.o \
+ clk-lpcg-scu.o
+
+obj-$(CONFIG_CLK_IMX8MQ) += clk-imx8mq.o
+obj-$(CONFIG_CLK_IMX8QXP) += clk-imx8qxp.o clk-imx8qxp-lpcg.o
obj-$(CONFIG_SOC_IMX1) += clk-imx1.o
obj-$(CONFIG_SOC_IMX21) += clk-imx21.o
@@ -26,4 +40,5 @@ obj-$(CONFIG_SOC_IMX6SLL) += clk-imx6sll.o
obj-$(CONFIG_SOC_IMX6SX) += clk-imx6sx.o
obj-$(CONFIG_SOC_IMX6UL) += clk-imx6ul.o
obj-$(CONFIG_SOC_IMX7D) += clk-imx7d.o
+obj-$(CONFIG_SOC_IMX7ULP) += clk-imx7ulp.o
obj-$(CONFIG_SOC_VF610) += clk-vf610.o
diff --git a/drivers/clk/imx/clk-busy.c b/drivers/clk/imx/clk-busy.c
index 99036527eb0d..e695622c5aa5 100644
--- a/drivers/clk/imx/clk-busy.c
+++ b/drivers/clk/imx/clk-busy.c
@@ -154,7 +154,7 @@ static const struct clk_ops clk_busy_mux_ops = {
struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
u8 width, void __iomem *busy_reg, u8 busy_shift,
- const char **parent_names, int num_parents)
+ const char * const *parent_names, int num_parents)
{
struct clk_busy_mux *busy;
struct clk *clk;
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
new file mode 100644
index 000000000000..060f8600ea0d
--- /dev/null
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+#define PCG_PCS_SHIFT 24
+#define PCG_PCS_MASK 0x7
+#define PCG_CGC_SHIFT 30
+#define PCG_FRAC_SHIFT 3
+#define PCG_FRAC_WIDTH 1
+#define PCG_FRAC_MASK BIT(3)
+#define PCG_PCD_SHIFT 0
+#define PCG_PCD_WIDTH 3
+#define PCG_PCD_MASK 0x7
+
+struct clk_hw *imx7ulp_clk_composite(const char *name,
+ const char * const *parent_names,
+ int num_parents, bool mux_present,
+ bool rate_present, bool gate_present,
+ void __iomem *reg)
+{
+ struct clk_hw *mux_hw = NULL, *fd_hw = NULL, *gate_hw = NULL;
+ struct clk_fractional_divider *fd = NULL;
+ struct clk_gate *gate = NULL;
+ struct clk_mux *mux = NULL;
+ struct clk_hw *hw;
+
+ if (mux_present) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+ mux_hw = &mux->hw;
+ mux->reg = reg;
+ mux->shift = PCG_PCS_SHIFT;
+ mux->mask = PCG_PCS_MASK;
+ }
+
+ if (rate_present) {
+ fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+ if (!fd) {
+ kfree(mux);
+ return ERR_PTR(-ENOMEM);
+ }
+ fd_hw = &fd->hw;
+ fd->reg = reg;
+ fd->mshift = PCG_FRAC_SHIFT;
+ fd->mwidth = PCG_FRAC_WIDTH;
+ fd->mmask = PCG_FRAC_MASK;
+ fd->nshift = PCG_PCD_SHIFT;
+ fd->nwidth = PCG_PCD_WIDTH;
+ fd->nmask = PCG_PCD_MASK;
+ fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED;
+ }
+
+ if (gate_present) {
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate) {
+ kfree(mux);
+ kfree(fd);
+ return ERR_PTR(-ENOMEM);
+ }
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
+ }
+
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux_hw, &clk_mux_ops, fd_hw,
+ &clk_fractional_divider_ops, gate_hw,
+ &clk_gate_ops, CLK_SET_RATE_GATE |
+ CLK_SET_PARENT_GATE);
+ if (IS_ERR(hw)) {
+ kfree(mux);
+ kfree(fd);
+ kfree(gate);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
new file mode 100644
index 000000000000..527ade1d6933
--- /dev/null
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 NXP
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+
+#include "clk.h"
+
+#define PCG_PREDIV_SHIFT 16
+#define PCG_PREDIV_WIDTH 3
+#define PCG_PREDIV_MAX 8
+
+#define PCG_DIV_SHIFT 0
+#define PCG_DIV_WIDTH 6
+#define PCG_DIV_MAX 64
+
+#define PCG_PCS_SHIFT 24
+#define PCG_PCS_MASK 0x7
+
+#define PCG_CGC_SHIFT 28
+
+static unsigned long imx8m_clk_composite_divider_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned long prediv_rate;
+ unsigned int prediv_value;
+ unsigned int div_value;
+
+ prediv_value = readl(divider->reg) >> divider->shift;
+ prediv_value &= clk_div_mask(divider->width);
+
+ prediv_rate = divider_recalc_rate(hw, parent_rate, prediv_value,
+ NULL, divider->flags,
+ divider->width);
+
+ div_value = readl(divider->reg) >> PCG_DIV_SHIFT;
+ div_value &= clk_div_mask(PCG_DIV_WIDTH);
+
+ return divider_recalc_rate(hw, prediv_rate, div_value, NULL,
+ divider->flags, PCG_DIV_WIDTH);
+}
+
+static int imx8m_clk_composite_compute_dividers(unsigned long rate,
+ unsigned long parent_rate,
+ int *prediv, int *postdiv)
+{
+ int div1, div2;
+ int error = INT_MAX;
+ int ret = -EINVAL;
+
+ *prediv = 1;
+ *postdiv = 1;
+
+ for (div1 = 1; div1 <= PCG_PREDIV_MAX; div1++) {
+ for (div2 = 1; div2 <= PCG_DIV_MAX; div2++) {
+ int new_error = ((parent_rate / div1) / div2) - rate;
+
+ if (abs(new_error) < abs(error)) {
+ *prediv = div1;
+ *postdiv = div2;
+ error = new_error;
+ ret = 0;
+ }
+ }
+ }
+ return ret;
+}
+
+static long imx8m_clk_composite_divider_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *prate)
+{
+ int prediv_value;
+ int div_value;
+
+ imx8m_clk_composite_compute_dividers(rate, *prate,
+ &prediv_value, &div_value);
+ rate = DIV_ROUND_UP(*prate, prediv_value);
+
+ return DIV_ROUND_UP(rate, div_value);
+
+}
+
+static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider *divider = to_clk_divider(hw);
+ unsigned long flags = 0;
+ int prediv_value;
+ int div_value;
+ int ret;
+ u32 val;
+
+ ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
+ &prediv_value, &div_value);
+ if (ret)
+ return -EINVAL;
+
+ spin_lock_irqsave(divider->lock, flags);
+
+ val = readl(divider->reg);
+ val &= ~((clk_div_mask(divider->width) << divider->shift) |
+ (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
+
+ val |= (u32)(prediv_value - 1) << divider->shift;
+ val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
+ writel(val, divider->reg);
+
+ spin_unlock_irqrestore(divider->lock, flags);
+
+ return ret;
+}
+
+static const struct clk_ops imx8m_clk_composite_divider_ops = {
+ .recalc_rate = imx8m_clk_composite_divider_recalc_rate,
+ .round_rate = imx8m_clk_composite_divider_round_rate,
+ .set_rate = imx8m_clk_composite_divider_set_rate,
+};
+
+struct clk *imx8m_clk_composite_flags(const char *name,
+ const char **parent_names,
+ int num_parents, void __iomem *reg,
+ unsigned long flags)
+{
+ struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
+ struct clk_hw *div_hw, *gate_hw;
+ struct clk_divider *div = NULL;
+ struct clk_gate *gate = NULL;
+ struct clk_mux *mux = NULL;
+
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ goto fail;
+
+ mux_hw = &mux->hw;
+ mux->reg = reg;
+ mux->shift = PCG_PCS_SHIFT;
+ mux->mask = PCG_PCS_MASK;
+
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div)
+ goto fail;
+
+ div_hw = &div->hw;
+ div->reg = reg;
+ div->shift = PCG_PREDIV_SHIFT;
+ div->width = PCG_PREDIV_WIDTH;
+ div->lock = &imx_ccm_lock;
+ div->flags = CLK_DIVIDER_ROUND_CLOSEST;
+
+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ goto fail;
+
+ gate_hw = &gate->hw;
+ gate->reg = reg;
+ gate->bit_idx = PCG_CGC_SHIFT;
+
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux_hw, &clk_mux_ops, div_hw,
+ &imx8m_clk_composite_divider_ops,
+ gate_hw, &clk_gate_ops, flags);
+ if (IS_ERR(hw))
+ goto fail;
+
+ return hw->clk;
+
+fail:
+ kfree(gate);
+ kfree(div);
+ kfree(mux);
+ return ERR_CAST(hw);
+}
diff --git a/drivers/clk/imx/clk-divider-gate.c b/drivers/clk/imx/clk-divider-gate.c
new file mode 100644
index 000000000000..df1f8429fe16
--- /dev/null
+++ b/drivers/clk/imx/clk-divider-gate.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP.
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+struct clk_divider_gate {
+ struct clk_divider divider;
+ u32 cached_val;
+};
+
+static inline struct clk_divider_gate *to_clk_divider_gate(struct clk_hw *hw)
+{
+ struct clk_divider *div = to_clk_divider(hw);
+
+ return container_of(div, struct clk_divider_gate, divider);
+}
+
+static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider *div = to_clk_divider(hw);
+ unsigned int val;
+
+ val = clk_readl(div->reg) >> div->shift;
+ val &= clk_div_mask(div->width);
+ if (!val)
+ return 0;
+
+ return divider_recalc_rate(hw, parent_rate, val, div->table,
+ div->flags, div->width);
+}
+
+static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
+ struct clk_divider *div = to_clk_divider(hw);
+ unsigned long flags = 0;
+ unsigned int val;
+
+ spin_lock_irqsave(div->lock, flags);
+
+ if (!clk_hw_is_enabled(hw)) {
+ val = div_gate->cached_val;
+ } else {
+ val = clk_readl(div->reg) >> div->shift;
+ val &= clk_div_mask(div->width);
+ }
+
+ spin_unlock_irqrestore(div->lock, flags);
+
+ if (!val)
+ return 0;
+
+ return divider_recalc_rate(hw, parent_rate, val, div->table,
+ div->flags, div->width);
+}
+
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clk_divider_ops.round_rate(hw, rate, prate);
+}
+
+static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
+ struct clk_divider *div = to_clk_divider(hw);
+ unsigned long flags = 0;
+ int value;
+ u32 val;
+
+ value = divider_get_val(rate, parent_rate, div->table,
+ div->width, div->flags);
+ if (value < 0)
+ return value;
+
+ spin_lock_irqsave(div->lock, flags);
+
+ if (clk_hw_is_enabled(hw)) {
+ val = clk_readl(div->reg);
+ val &= ~(clk_div_mask(div->width) << div->shift);
+ val |= (u32)value << div->shift;
+ clk_writel(val, div->reg);
+ } else {
+ div_gate->cached_val = value;
+ }
+
+ spin_unlock_irqrestore(div->lock, flags);
+
+ return 0;
+}
+
+static int clk_divider_enable(struct clk_hw *hw)
+{
+ struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
+ struct clk_divider *div = to_clk_divider(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ if (!div_gate->cached_val) {
+ pr_err("%s: no valid preset rate\n", clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(div->lock, flags);
+ /* restore div val */
+ val = clk_readl(div->reg);
+ val |= div_gate->cached_val << div->shift;
+ clk_writel(val, div->reg);
+
+ spin_unlock_irqrestore(div->lock, flags);
+
+ return 0;
+}
+
+static void clk_divider_disable(struct clk_hw *hw)
+{
+ struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
+ struct clk_divider *div = to_clk_divider(hw);
+ unsigned long flags = 0;
+ u32 val;
+
+ spin_lock_irqsave(div->lock, flags);
+
+ /* store the current div val */
+ val = clk_readl(div->reg) >> div->shift;
+ val &= clk_div_mask(div->width);
+ div_gate->cached_val = val;
+ clk_writel(0, div->reg);
+
+ spin_unlock_irqrestore(div->lock, flags);
+}
+
+static int clk_divider_is_enabled(struct clk_hw *hw)
+{
+ struct clk_divider *div = to_clk_divider(hw);
+ u32 val;
+
+ val = clk_readl(div->reg) >> div->shift;
+ val &= clk_div_mask(div->width);
+
+ return val ? 1 : 0;
+}
+
+static const struct clk_ops clk_divider_gate_ro_ops = {
+ .recalc_rate = clk_divider_gate_recalc_rate_ro,
+ .round_rate = clk_divider_round_rate,
+};
+
+static const struct clk_ops clk_divider_gate_ops = {
+ .recalc_rate = clk_divider_gate_recalc_rate,
+ .round_rate = clk_divider_round_rate,
+ .set_rate = clk_divider_gate_set_rate,
+ .enable = clk_divider_enable,
+ .disable = clk_divider_disable,
+ .is_enabled = clk_divider_is_enabled,
+};
+
+/*
+ * NOTE: In order to resue the most code from the common divider,
+ * we also design our divider following the way that provids an extra
+ * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
+ * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
+ * flag which can be specified by user flexibly.
+ */
+struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct clk_divider_gate *div_gate;
+ struct clk_hw *hw;
+ u32 val;
+ int ret;
+
+ div_gate = kzalloc(sizeof(*div_gate), GFP_KERNEL);
+ if (!div_gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
+ init.ops = &clk_divider_gate_ro_ops;
+ else
+ init.ops = &clk_divider_gate_ops;
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ div_gate->divider.reg = reg;
+ div_gate->divider.shift = shift;
+ div_gate->divider.width = width;
+ div_gate->divider.lock = lock;
+ div_gate->divider.table = table;
+ div_gate->divider.hw.init = &init;
+ div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
+ /* cache gate status */
+ val = clk_readl(reg) >> shift;
+ val &= clk_div_mask(width);
+ div_gate->cached_val = val;
+
+ hw = &div_gate->divider.hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(div_gate);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c
index c9b327e0a8dd..44817c1b0b88 100644
--- a/drivers/clk/imx/clk-fixup-mux.c
+++ b/drivers/clk/imx/clk-fixup-mux.c
@@ -70,7 +70,7 @@ static const struct clk_ops clk_fixup_mux_ops = {
};
struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char **parents,
+ u8 shift, u8 width, const char * const *parents,
int num_parents, void (*fixup)(u32 *val))
{
struct clk_fixup_mux *fixup_mux;
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c
new file mode 100644
index 000000000000..0026c3969b1e
--- /dev/null
+++ b/drivers/clk/imx/clk-frac-pll.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 NXP.
+ *
+ * This driver supports the fractional plls found in the imx8m SOCs
+ *
+ * Documentation for this fractional pll can be found at:
+ * https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+#include "clk.h"
+
+#define PLL_CFG0 0x0
+#define PLL_CFG1 0x4
+
+#define PLL_LOCK_STATUS BIT(31)
+#define PLL_PD_MASK BIT(19)
+#define PLL_BYPASS_MASK BIT(14)
+#define PLL_NEWDIV_VAL BIT(12)
+#define PLL_NEWDIV_ACK BIT(11)
+#define PLL_FRAC_DIV_MASK GENMASK(30, 7)
+#define PLL_INT_DIV_MASK GENMASK(6, 0)
+#define PLL_OUTPUT_DIV_MASK GENMASK(4, 0)
+#define PLL_FRAC_DENOM 0x1000000
+
+#define PLL_FRAC_LOCK_TIMEOUT 10000
+#define PLL_FRAC_ACK_TIMEOUT 500000
+
+struct clk_frac_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+#define to_clk_frac_pll(_hw) container_of(_hw, struct clk_frac_pll, hw)
+
+static int clk_wait_lock(struct clk_frac_pll *pll)
+{
+ u32 val;
+
+ return readl_poll_timeout(pll->base, val, val & PLL_LOCK_STATUS, 0,
+ PLL_FRAC_LOCK_TIMEOUT);
+}
+
+static int clk_wait_ack(struct clk_frac_pll *pll)
+{
+ u32 val;
+
+ /* return directly if the pll is in powerdown or in bypass */
+ if (readl_relaxed(pll->base) & (PLL_PD_MASK | PLL_BYPASS_MASK))
+ return 0;
+
+ /* Wait for the pll's divfi and divff to be reloaded */
+ return readl_poll_timeout(pll->base, val, val & PLL_NEWDIV_ACK, 0,
+ PLL_FRAC_ACK_TIMEOUT);
+}
+
+static int clk_pll_prepare(struct clk_hw *hw)
+{
+ struct clk_frac_pll *pll = to_clk_frac_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val &= ~PLL_PD_MASK;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+
+ return clk_wait_lock(pll);
+}
+
+static void clk_pll_unprepare(struct clk_hw *hw)
+{
+ struct clk_frac_pll *pll = to_clk_frac_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val |= PLL_PD_MASK;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+}
+
+static int clk_pll_is_prepared(struct clk_hw *hw)
+{
+ struct clk_frac_pll *pll = to_clk_frac_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ return (val & PLL_PD_MASK) ? 0 : 1;
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_frac_pll *pll = to_clk_frac_pll(hw);
+ u32 val, divff, divfi, divq;
+ u64 temp64 = parent_rate;
+ u64 rate;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ divq = (FIELD_GET(PLL_OUTPUT_DIV_MASK, val) + 1) * 2;
+ val = readl_relaxed(pll->base + PLL_CFG1);
+ divff = FIELD_GET(PLL_FRAC_DIV_MASK, val);
+ divfi = FIELD_GET(PLL_INT_DIV_MASK, val);
+
+ temp64 *= 8;
+ temp64 *= divff;
+ do_div(temp64, PLL_FRAC_DENOM);
+ do_div(temp64, divq);
+
+ rate = parent_rate * 8 * (divfi + 1);
+ do_div(rate, divq);
+ rate += temp64;
+
+ return rate;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u64 parent_rate = *prate;
+ u32 divff, divfi;
+ u64 temp64;
+
+ parent_rate *= 8;
+ rate *= 2;
+ temp64 = rate;
+ do_div(temp64, parent_rate);
+ divfi = temp64;
+ temp64 = rate - divfi * parent_rate;
+ temp64 *= PLL_FRAC_DENOM;
+ do_div(temp64, parent_rate);
+ divff = temp64;
+
+ temp64 = parent_rate;
+ temp64 *= divff;
+ do_div(temp64, PLL_FRAC_DENOM);
+
+ rate = parent_rate * divfi + temp64;
+
+ return rate / 2;
+}
+
+/*
+ * To simplify the clock calculation, we can keep the 'PLL_OUTPUT_VAL' at zero
+ * (means the PLL output will be divided by 2). So the PLL output can use
+ * the below formula:
+ * pllout = parent_rate * 8 / 2 * DIVF_VAL;
+ * where DIVF_VAL = 1 + DIVFI + DIVFF / 2^24.
+ */
+static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_frac_pll *pll = to_clk_frac_pll(hw);
+ u32 val, divfi, divff;
+ u64 temp64 = parent_rate;
+ int ret;
+
+ parent_rate *= 8;
+ rate *= 2;
+ divfi = rate / parent_rate;
+ temp64 *= rate - divfi;
+ temp64 *= PLL_FRAC_DENOM;
+ do_div(temp64, parent_rate);
+ divff = temp64;
+
+ val = readl_relaxed(pll->base + PLL_CFG1);
+ val &= ~(PLL_FRAC_DIV_MASK | PLL_INT_DIV_MASK);
+ val |= (divff << 7) | (divfi - 1);
+ writel_relaxed(val, pll->base + PLL_CFG1);
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val &= ~0x1f;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+
+ /* Set the NEV_DIV_VAL to reload the DIVFI and DIVFF */
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val |= PLL_NEWDIV_VAL;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+
+ ret = clk_wait_ack(pll);
+
+ /* clear the NEV_DIV_VAL */
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val &= ~PLL_NEWDIV_VAL;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+
+ return ret;
+}
+
+static const struct clk_ops clk_frac_pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .is_prepared = clk_pll_is_prepared,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_init_data init;
+ struct clk_frac_pll *pll;
+ struct clk_hw *hw;
+ int ret;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_frac_pll_ops;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->base = base;
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pll);
+ return ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index bbe0c60f4d09..716eac3136b4 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -225,6 +225,41 @@ static void of_assigned_ldb_sels(struct device_node *node,
}
}
+static bool pll6_bypassed(struct device_node *node)
+{
+ int index, ret, num_clocks;
+ struct of_phandle_args clkspec;
+
+ num_clocks = of_count_phandle_with_args(node, "assigned-clocks",
+ "#clock-cells");
+ if (num_clocks < 0)
+ return false;
+
+ for (index = 0; index < num_clocks; index++) {
+ ret = of_parse_phandle_with_args(node, "assigned-clocks",
+ "#clock-cells", index,
+ &clkspec);
+ if (ret < 0)
+ return false;
+
+ if (clkspec.np == node &&
+ clkspec.args[0] == IMX6QDL_PLL6_BYPASS)
+ break;
+ }
+
+ /* PLL6 bypass is not part of the assigned clock list */
+ if (index == num_clocks)
+ return false;
+
+ ret = of_parse_phandle_with_args(node, "assigned-clock-parents",
+ "#clock-cells", index, &clkspec);
+
+ if (clkspec.args[0] != IMX6QDL_CLK_PLL6)
+ return true;
+
+ return false;
+}
+
#define CCM_CCDR 0x04
#define CCM_CCSR 0x0c
#define CCM_CS2CDR 0x2c
@@ -414,12 +449,24 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
int ret;
clk[IMX6QDL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
- clk[IMX6QDL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0);
- clk[IMX6QDL_CLK_CKIH] = imx_obtain_fixed_clock("ckih1", 0);
- clk[IMX6QDL_CLK_OSC] = imx_obtain_fixed_clock("osc", 0);
+ clk[IMX6QDL_CLK_CKIL] = of_clk_get_by_name(ccm_node, "ckil");
+ if (IS_ERR(clk[IMX6QDL_CLK_CKIL]))
+ clk[IMX6QDL_CLK_CKIL] = imx_obtain_fixed_clock("ckil", 0);
+ clk[IMX6QDL_CLK_CKIH] = of_clk_get_by_name(ccm_node, "ckih1");
+ if (IS_ERR(clk[IMX6QDL_CLK_CKIH]))
+ clk[IMX6QDL_CLK_CKIH] = imx_obtain_fixed_clock("ckih1", 0);
+ clk[IMX6QDL_CLK_OSC] = of_clk_get_by_name(ccm_node, "osc");
+ if (IS_ERR(clk[IMX6QDL_CLK_OSC]))
+ clk[IMX6QDL_CLK_OSC] = imx_obtain_fixed_clock("osc", 0);
+
/* Clock source from external clock via CLK1/2 PADs */
- clk[IMX6QDL_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
- clk[IMX6QDL_CLK_ANACLK2] = imx_obtain_fixed_clock("anaclk2", 0);
+ clk[IMX6QDL_CLK_ANACLK1] = of_clk_get_by_name(ccm_node, "anaclk1");
+ if (IS_ERR(clk[IMX6QDL_CLK_ANACLK1]))
+ clk[IMX6QDL_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
+
+ clk[IMX6QDL_CLK_ANACLK2] = of_clk_get_by_name(ccm_node, "anaclk2");
+ if (IS_ERR(clk[IMX6QDL_CLK_ANACLK2]))
+ clk[IMX6QDL_CLK_ANACLK2] = imx_obtain_fixed_clock("anaclk2", 0);
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = base = of_iomap(np, 0);
@@ -491,16 +538,32 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_USBPHY1_GATE] = imx_clk_gate("usbphy1_gate", "dummy", base + 0x10, 6);
clk[IMX6QDL_CLK_USBPHY2_GATE] = imx_clk_gate("usbphy2_gate", "dummy", base + 0x20, 6);
- clk[IMX6QDL_CLK_SATA_REF] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
- clk[IMX6QDL_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
+ /*
+ * The ENET PLL is special in that is has multiple outputs with
+ * different post-dividers that are all affected by the single bypass
+ * bit, so a single mux bit affects 3 independent branches of the clock
+ * tree. There is no good way to model this in the clock framework and
+ * dynamically changing the bypass bit, will yield unexpected results.
+ * So we treat any configuration that bypasses the ENET PLL as
+ * essentially static with the divider ratios reflecting the bypass
+ * status.
+ *
+ */
+ if (!pll6_bypassed(ccm_node)) {
+ clk[IMX6QDL_CLK_SATA_REF] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 5);
+ clk[IMX6QDL_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 4);
+ clk[IMX6QDL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
+ base + 0xe0, 0, 2, 0, clk_enet_ref_table,
+ &imx_ccm_lock);
+ } else {
+ clk[IMX6QDL_CLK_SATA_REF] = imx_clk_fixed_factor("sata_ref", "pll6_enet", 1, 1);
+ clk[IMX6QDL_CLK_PCIE_REF] = imx_clk_fixed_factor("pcie_ref", "pll6_enet", 1, 1);
+ clk[IMX6QDL_CLK_ENET_REF] = imx_clk_fixed_factor("enet_ref", "pll6_enet", 1, 1);
+ }
clk[IMX6QDL_CLK_SATA_REF_100M] = imx_clk_gate("sata_ref_100m", "sata_ref", base + 0xe0, 20);
clk[IMX6QDL_CLK_PCIE_REF_125M] = imx_clk_gate("pcie_ref_125m", "pcie_ref", base + 0xe0, 19);
- clk[IMX6QDL_CLK_ENET_REF] = clk_register_divider_table(NULL, "enet_ref", "pll6_enet", 0,
- base + 0xe0, 0, 2, 0, clk_enet_ref_table,
- &imx_ccm_lock);
-
clk[IMX6QDL_CLK_LVDS1_SEL] = imx_clk_mux("lvds1_sel", base + 0x160, 0, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
clk[IMX6QDL_CLK_LVDS2_SEL] = imx_clk_mux("lvds2_sel", base + 0x160, 5, 5, lvds_sels, ARRAY_SIZE(lvds_sels));
@@ -508,8 +571,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
* lvds1_gate and lvds2_gate are pseudo-gates. Both can be
* independently configured as clock inputs or outputs. We treat
* the "output_enable" bit as a gate, even though it's really just
- * enabling clock output.
+ * enabling clock output. Initially the gate bits are cleared, as
+ * otherwise the exclusive configuration gets locked in the setup done
+ * by software running before the clock driver, with no way to change
+ * it.
*/
+ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
@@ -737,6 +804,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[IMX6QDL_CLK_CAN1_SERIAL] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
clk[IMX6QDL_CLK_CAN2_IPG] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
clk[IMX6QDL_CLK_CAN2_SERIAL] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20);
+ clk[IMX6QDL_CLK_DCIC1] = imx_clk_gate2("dcic1", "ipu1_podf", base + 0x68, 24);
+ clk[IMX6QDL_CLK_DCIC2] = imx_clk_gate2("dcic2", "ipu2_podf", base + 0x68, 26);
clk[IMX6QDL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0);
clk[IMX6QDL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
clk[IMX6QDL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 6fcfbbd907a5..e13d8814cfa4 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -17,6 +17,8 @@
#include "clk.h"
+#define CCDR 0x4
+#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
#define CCSR 0xc
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
#define CACRR 0x10
@@ -411,6 +413,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
+ /* Ensure the MMDC CH0 handshake is bypassed */
+ writel_relaxed(readl_relaxed(base + CCDR) |
+ BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
+
imx_check_clocks(clks, ARRAY_SIZE(clks));
clk_data.clks = clks;
diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
index adb08f64c691..06c105d580a4 100644
--- a/drivers/clk/imx/clk-imx7d.c
+++ b/drivers/clk/imx/clk-imx7d.c
@@ -886,9 +886,6 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
/* use old gpt clk setting, gpt1 root clk must be twice as gpt counter freq */
clk_set_parent(clks[IMX7D_GPT1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
- /* set uart module clock's parent clock source that must be great then 80MHz */
- clk_set_parent(clks[IMX7D_UART1_ROOT_SRC], clks[IMX7D_OSC_24M_CLK]);
-
/* Set clock rate for USBPHY, the USB_PLL at CCM is from USBOTG2 */
clks[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
clks[IMX7D_USB_MAIN_480M_CLK] = imx_clk_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
new file mode 100644
index 000000000000..4e18f629f823
--- /dev/null
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ */
+
+#include <dt-bindings/clock/imx7ulp-clock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+static const char * const pll_pre_sels[] = { "sosc", "firc", };
+static const char * const spll_pfd_sels[] = { "spll_pfd0", "spll_pfd1", "spll_pfd2", "spll_pfd3", };
+static const char * const spll_sels[] = { "spll", "spll_pfd_sel", };
+static const char * const apll_pfd_sels[] = { "apll_pfd0", "apll_pfd1", "apll_pfd2", "apll_pfd3", };
+static const char * const apll_sels[] = { "apll", "apll_pfd_sel", };
+static const char * const scs_sels[] = { "dummy", "sosc", "sirc", "firc", "dummy", "apll_sel", "spll_sel", "upll", };
+static const char * const ddr_sels[] = { "apll_pfd_sel", "upll", };
+static const char * const nic_sels[] = { "firc", "ddr_clk", };
+static const char * const periph_plat_sels[] = { "dummy", "nic1_bus_clk", "nic1_clk", "ddr_clk", "apll_pfd2", "apll_pfd1", "apll_pfd0", "upll", };
+static const char * const periph_bus_sels[] = { "dummy", "sosc_bus_clk", "mpll", "firc_bus_clk", "rosc", "nic1_bus_clk", "nic1_clk", "spll_bus_clk", };
+static const char * const arm_sels[] = { "divcore", "dummy", "dummy", "hsrun_divcore", };
+
+/* used by sosc/sirc/firc/ddr/spll/apll dividers */
+static const struct clk_div_table ulp_div_table[] = {
+ { .val = 1, .div = 1, },
+ { .val = 2, .div = 2, },
+ { .val = 3, .div = 4, },
+ { .val = 4, .div = 8, },
+ { .val = 5, .div = 16, },
+ { .val = 6, .div = 32, },
+ { .val = 7, .div = 64, },
+};
+
+static void __init imx7ulp_clk_scg1_init(struct device_node *np)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
+ IMX7ULP_CLK_SCG1_END, GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->num = IMX7ULP_CLK_SCG1_END;
+ clks = clk_data->hws;
+
+ clks[IMX7ULP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+
+ clks[IMX7ULP_CLK_ROSC] = imx_obtain_fixed_clk_hw(np, "rosc");
+ clks[IMX7ULP_CLK_SOSC] = imx_obtain_fixed_clk_hw(np, "sosc");
+ clks[IMX7ULP_CLK_SIRC] = imx_obtain_fixed_clk_hw(np, "sirc");
+ clks[IMX7ULP_CLK_FIRC] = imx_obtain_fixed_clk_hw(np, "firc");
+ clks[IMX7ULP_CLK_MIPI_PLL] = imx_obtain_fixed_clk_hw(np, "mpll");
+ clks[IMX7ULP_CLK_UPLL] = imx_obtain_fixed_clk_hw(np, "upll");
+
+ /* SCG1 */
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ /* NOTE: xPLL config can't be changed when xPLL is enabled */
+ clks[IMX7ULP_CLK_APLL_PRE_SEL] = imx_clk_hw_mux_flags("apll_pre_sel", base + 0x508, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ clks[IMX7ULP_CLK_SPLL_PRE_SEL] = imx_clk_hw_mux_flags("spll_pre_sel", base + 0x608, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+
+ /* name parent_name reg shift width flags */
+ clks[IMX7ULP_CLK_APLL_PRE_DIV] = imx_clk_hw_divider_flags("apll_pre_div", "apll_pre_sel", base + 0x508, 8, 3, CLK_SET_RATE_GATE);
+ clks[IMX7ULP_CLK_SPLL_PRE_DIV] = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608, 8, 3, CLK_SET_RATE_GATE);
+
+ /* name parent_name base */
+ clks[IMX7ULP_CLK_APLL] = imx_clk_pllv4("apll", "apll_pre_div", base + 0x500);
+ clks[IMX7ULP_CLK_SPLL] = imx_clk_pllv4("spll", "spll_pre_div", base + 0x600);
+
+ /* APLL PFDs */
+ clks[IMX7ULP_CLK_APLL_PFD0] = imx_clk_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
+ clks[IMX7ULP_CLK_APLL_PFD1] = imx_clk_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
+ clks[IMX7ULP_CLK_APLL_PFD2] = imx_clk_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
+ clks[IMX7ULP_CLK_APLL_PFD3] = imx_clk_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
+
+ /* SPLL PFDs */
+ clks[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
+ clks[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
+ clks[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
+ clks[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
+
+ /* PLL Mux */
+ clks[IMX7ULP_CLK_APLL_PFD_SEL] = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ clks[IMX7ULP_CLK_SPLL_PFD_SEL] = imx_clk_hw_mux_flags("spll_pfd_sel", base + 0x608, 14, 2, spll_pfd_sels, ARRAY_SIZE(spll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ clks[IMX7ULP_CLK_APLL_SEL] = imx_clk_hw_mux_flags("apll_sel", base + 0x508, 1, 1, apll_sels, ARRAY_SIZE(apll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+ clks[IMX7ULP_CLK_SPLL_SEL] = imx_clk_hw_mux_flags("spll_sel", base + 0x608, 1, 1, spll_sels, ARRAY_SIZE(spll_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
+
+ clks[IMX7ULP_CLK_SPLL_BUS_CLK] = imx_clk_divider_gate("spll_bus_clk", "spll_sel", CLK_SET_RATE_GATE, base + 0x604, 8, 3, 0, ulp_div_table, &imx_ccm_lock);
+
+ /* scs/ddr/nic select different clock source requires that clock to be enabled first */
+ clks[IMX7ULP_CLK_SYS_SEL] = imx_clk_hw_mux2("scs_sel", base + 0x14, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+ clks[IMX7ULP_CLK_HSRUN_SYS_SEL] = imx_clk_hw_mux2("hsrun_scs_sel", base + 0x1c, 24, 4, scs_sels, ARRAY_SIZE(scs_sels));
+ clks[IMX7ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x40, 28, 1, nic_sels, ARRAY_SIZE(nic_sels));
+ clks[IMX7ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x30, 24, 1, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+
+ clks[IMX7ULP_CLK_CORE_DIV] = imx_clk_hw_divider_flags("divcore", "scs_sel", base + 0x14, 16, 4, CLK_SET_RATE_PARENT);
+ clks[IMX7ULP_CLK_HSRUN_CORE_DIV] = imx_clk_hw_divider_flags("hsrun_divcore", "hsrun_scs_sel", base + 0x1c, 16, 4, CLK_SET_RATE_PARENT);
+
+ clks[IMX7ULP_CLK_DDR_DIV] = imx_clk_divider_gate("ddr_clk", "ddr_sel", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, base + 0x30, 0, 3,
+ 0, ulp_div_table, &imx_ccm_lock);
+
+ clks[IMX7ULP_CLK_NIC0_DIV] = imx_clk_hw_divider_flags("nic0_clk", "nic_sel", base + 0x40, 24, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX7ULP_CLK_NIC1_DIV] = imx_clk_hw_divider_flags("nic1_clk", "nic0_clk", base + 0x40, 16, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX7ULP_CLK_NIC1_BUS_DIV] = imx_clk_hw_divider_flags("nic1_bus_clk", "nic1_clk", base + 0x40, 4, 4, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+ clks[IMX7ULP_CLK_GPU_DIV] = imx_clk_hw_divider("gpu_clk", "nic0_clk", base + 0x40, 20, 4);
+
+ clks[IMX7ULP_CLK_SOSC_BUS_CLK] = imx_clk_divider_gate("sosc_bus_clk", "sosc", 0, base + 0x104, 8, 3,
+ CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
+ clks[IMX7ULP_CLK_FIRC_BUS_CLK] = imx_clk_divider_gate("firc_bus_clk", "firc", 0, base + 0x304, 8, 3,
+ CLK_DIVIDER_READ_ONLY, ulp_div_table, &imx_ccm_lock);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+CLK_OF_DECLARE(imx7ulp_clk_scg1, "fsl,imx7ulp-scg1", imx7ulp_clk_scg1_init);
+
+static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
+ IMX7ULP_CLK_PCC2_END, GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->num = IMX7ULP_CLK_PCC2_END;
+ clks = clk_data->hws;
+
+ /* PCC2 */
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX7ULP_CLK_DMA1] = imx_clk_hw_gate("dma1", "nic1_clk", base + 0x20, 30);
+ clks[IMX7ULP_CLK_RGPIO2P1] = imx_clk_hw_gate("rgpio2p1", "nic1_bus_clk", base + 0x3c, 30);
+ clks[IMX7ULP_CLK_DMA_MUX1] = imx_clk_hw_gate("dma_mux1", "nic1_bus_clk", base + 0x84, 30);
+ clks[IMX7ULP_CLK_SNVS] = imx_clk_hw_gate("snvs", "nic1_bus_clk", base + 0x8c, 30);
+ clks[IMX7ULP_CLK_CAAM] = imx_clk_hw_gate("caam", "nic1_clk", base + 0x90, 30);
+ clks[IMX7ULP_CLK_LPTPM4] = imx7ulp_clk_composite("lptpm4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+ clks[IMX7ULP_CLK_LPTPM5] = imx7ulp_clk_composite("lptpm5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+ clks[IMX7ULP_CLK_LPIT1] = imx7ulp_clk_composite("lpit1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+ clks[IMX7ULP_CLK_LPSPI2] = imx7ulp_clk_composite("lpspi2", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa4);
+ clks[IMX7ULP_CLK_LPSPI3] = imx7ulp_clk_composite("lpspi3", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xa8);
+ clks[IMX7ULP_CLK_LPI2C4] = imx7ulp_clk_composite("lpi2c4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xac);
+ clks[IMX7ULP_CLK_LPI2C5] = imx7ulp_clk_composite("lpi2c5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb0);
+ clks[IMX7ULP_CLK_LPUART4] = imx7ulp_clk_composite("lpuart4", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb4);
+ clks[IMX7ULP_CLK_LPUART5] = imx7ulp_clk_composite("lpuart5", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xb8);
+ clks[IMX7ULP_CLK_FLEXIO1] = imx7ulp_clk_composite("flexio1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0xc4);
+ clks[IMX7ULP_CLK_USB0] = imx7ulp_clk_composite("usb0", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xcc);
+ clks[IMX7ULP_CLK_USB1] = imx7ulp_clk_composite("usb1", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xd0);
+ clks[IMX7ULP_CLK_USB_PHY] = imx_clk_hw_gate("usb_phy", "nic1_bus_clk", base + 0xd4, 30);
+ clks[IMX7ULP_CLK_USDHC0] = imx7ulp_clk_composite("usdhc0", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xdc);
+ clks[IMX7ULP_CLK_USDHC1] = imx7ulp_clk_composite("usdhc1", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xe0);
+ clks[IMX7ULP_CLK_WDG1] = imx7ulp_clk_composite("wdg1", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0xf4);
+ clks[IMX7ULP_CLK_WDG2] = imx7ulp_clk_composite("sdg2", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0x10c);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
+
+static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
+ IMX7ULP_CLK_PCC3_END, GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->num = IMX7ULP_CLK_PCC3_END;
+ clks = clk_data->hws;
+
+ /* PCC3 */
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX7ULP_CLK_LPTPM6] = imx7ulp_clk_composite("lptpm6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x84);
+ clks[IMX7ULP_CLK_LPTPM7] = imx7ulp_clk_composite("lptpm7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x88);
+
+ clks[IMX7ULP_CLK_MMDC] = clk_hw_register_gate(NULL, "mmdc", "nic1_clk", CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ base + 0xac, 30, 0, &imx_ccm_lock);
+ clks[IMX7ULP_CLK_LPI2C6] = imx7ulp_clk_composite("lpi2c6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x90);
+ clks[IMX7ULP_CLK_LPI2C7] = imx7ulp_clk_composite("lpi2c7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x94);
+ clks[IMX7ULP_CLK_LPUART6] = imx7ulp_clk_composite("lpuart6", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x98);
+ clks[IMX7ULP_CLK_LPUART7] = imx7ulp_clk_composite("lpuart7", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, false, true, base + 0x9c);
+ clks[IMX7ULP_CLK_DSI] = imx7ulp_clk_composite("dsi", periph_bus_sels, ARRAY_SIZE(periph_bus_sels), true, true, true, base + 0xa4);
+ clks[IMX7ULP_CLK_LCDIF] = imx7ulp_clk_composite("lcdif", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, true, true, base + 0xa8);
+
+ clks[IMX7ULP_CLK_VIU] = imx_clk_hw_gate("viu", "nic1_clk", base + 0xa0, 30);
+ clks[IMX7ULP_CLK_PCTLC] = imx_clk_hw_gate("pctlc", "nic1_bus_clk", base + 0xb8, 30);
+ clks[IMX7ULP_CLK_PCTLD] = imx_clk_hw_gate("pctld", "nic1_bus_clk", base + 0xbc, 30);
+ clks[IMX7ULP_CLK_PCTLE] = imx_clk_hw_gate("pctle", "nic1_bus_clk", base + 0xc0, 30);
+ clks[IMX7ULP_CLK_PCTLF] = imx_clk_hw_gate("pctlf", "nic1_bus_clk", base + 0xc4, 30);
+
+ clks[IMX7ULP_CLK_GPU3D] = imx7ulp_clk_composite("gpu3d", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x140);
+ clks[IMX7ULP_CLK_GPU2D] = imx7ulp_clk_composite("gpu2d", periph_plat_sels, ARRAY_SIZE(periph_plat_sels), true, false, true, base + 0x144);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
+
+static void __init imx7ulp_clk_smc1_init(struct device_node *np)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = kzalloc(sizeof(*clk_data) + sizeof(*clk_data->hws) *
+ IMX7ULP_CLK_SMC1_END, GFP_KERNEL);
+ if (!clk_data)
+ return;
+
+ clk_data->num = IMX7ULP_CLK_SMC1_END;
+ clks = clk_data->hws;
+
+ /* SMC1 */
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
+
+ clks[IMX7ULP_CLK_ARM] = imx_clk_hw_mux_flags("arm", base + 0x10, 8, 2, arm_sels, ARRAY_SIZE(arm_sels), CLK_IS_CRITICAL);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+CLK_OF_DECLARE(imx7ulp_clk_smc1, "fsl,imx7ulp-smc1", imx7ulp_clk_smc1_init);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
new file mode 100644
index 000000000000..26b57f43ccc3
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -0,0 +1,589 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 NXP.
+ * Copyright (C) 2017 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <dt-bindings/clock/imx8mq-clock.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+#include "clk.h"
+
+static u32 share_count_sai1;
+static u32 share_count_sai2;
+static u32 share_count_sai3;
+static u32 share_count_sai4;
+static u32 share_count_sai5;
+static u32 share_count_sai6;
+static u32 share_count_dcss;
+static u32 share_count_nand;
+
+static struct clk *clks[IMX8MQ_CLK_END];
+
+static const char *pll_ref_sels[] = { "osc_25m", "osc_27m", "dummy", "dummy", };
+static const char *arm_pll_bypass_sels[] = {"arm_pll", "arm_pll_ref_sel", };
+static const char *gpu_pll_bypass_sels[] = {"gpu_pll", "gpu_pll_ref_sel", };
+static const char *vpu_pll_bypass_sels[] = {"vpu_pll", "vpu_pll_ref_sel", };
+static const char *audio_pll1_bypass_sels[] = {"audio_pll1", "audio_pll1_ref_sel", };
+static const char *audio_pll2_bypass_sels[] = {"audio_pll2", "audio_pll2_ref_sel", };
+static const char *video_pll1_bypass_sels[] = {"video_pll1", "video_pll1_ref_sel", };
+
+static const char *sys1_pll1_out_sels[] = {"sys1_pll1", "sys1_pll1_ref_sel", };
+static const char *sys2_pll1_out_sels[] = {"sys2_pll1", "sys1_pll1_ref_sel", };
+static const char *sys3_pll1_out_sels[] = {"sys3_pll1", "sys3_pll1_ref_sel", };
+static const char *dram_pll1_out_sels[] = {"dram_pll1", "dram_pll1_ref_sel", };
+
+static const char *sys1_pll2_out_sels[] = {"sys1_pll2_div", "sys1_pll1_ref_sel", };
+static const char *sys2_pll2_out_sels[] = {"sys2_pll2_div", "sys2_pll1_ref_sel", };
+static const char *sys3_pll2_out_sels[] = {"sys3_pll2_div", "sys2_pll1_ref_sel", };
+static const char *dram_pll2_out_sels[] = {"dram_pll2_div", "dram_pll1_ref_sel", };
+
+/* CCM ROOT */
+static const char *imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
+ "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll2_out", };
+
+static const char *imx8mq_vpu_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m",
+ "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "vpu_pll_out", };
+
+static const char *imx8mq_gpu_core_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
+ "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mq_gpu_shader_sels[] = {"osc_25m", "gpu_pll_out", "sys1_pll_800m", "sys3_pll2_out",
+ "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mq_main_axi_sels[] = {"osc_25m", "sys2_pll_333m", "sys1_pll_800m", "sys2_pll_250m",
+ "sys2_pll_1000m", "audio_pll1_out", "video_pll1_out", "sys1_pll_100m",};
+
+static const char *imx8mq_enet_axi_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_250m",
+ "sys2_pll_200m", "audio_pll1_out", "video_pll1_out", "sys3_pll2_out", };
+
+static const char *imx8mq_nand_usdhc_sels[] = {"osc_25m", "sys1_pll_266m", "sys1_pll_800m", "sys2_pll_200m",
+ "sys1_pll_133m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll1_out", };
+
+static const char *imx8mq_vpu_bus_sels[] = {"osc_25m", "sys1_pll_800m", "vpu_pll_out", "audio_pll2_out", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_200m", "sys1_pll_100m", };
+
+static const char *imx8mq_disp_axi_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out", "sys1_pll_400m", "audio_pll2_out", "clk_ext1", "clk_ext4", };
+
+static const char *imx8mq_disp_apb_sels[] = {"osc_25m", "sys2_pll_125m", "sys1_pll_800m", "sys3_pll2_out",
+ "sys1_pll_40m", "audio_pll2_out", "clk_ext1", "clk_ext3", };
+
+static const char *imx8mq_disp_rtrm_sels[] = {"osc_25m", "sys1_pll_800m", "sys2_pll_200m", "sys1_pll_400m",
+ "audio_pll1_out", "video_pll1_out", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mq_usb_bus_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_100m",
+ "sys2_pll_200m", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mq_gpu_axi_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mq_gpu_ahb_sels[] = {"osc_25m", "sys1_pll_800m", "gpu_pll_out", "sys3_pll2_out", "sys2_pll_1000m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mq_noc_sels[] = {"osc_25m", "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_1000m", "sys2_pll_500m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mq_noc_apb_sels[] = {"osc_25m", "sys1_pll_400m", "sys3_pll2_out", "sys2_pll_333m", "sys2_pll_200m",
+ "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mq_ahb_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_800m", "sys1_pll_400m",
+ "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mq_audio_ahb_sels[] = {"osc_25m", "sys2_pll_500m", "sys1_pll_800m", "sys2_pll_1000m",
+ "sys2_pll_166m", "sys3_pll2_out", "audio_pll1_out", "video_pll1_out", };
+
+static const char *imx8mq_dsi_ahb_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out"};
+
+static const char *imx8mq_dram_alt_sels[] = {"osc_25m", "sys1_pll_800m", "sys1_pll_100m", "sys2_pll_500m",
+ "sys2_pll_250m", "sys1_pll_400m", "audio_pll1_out", "sys1_pll_266m", };
+
+static const char *imx8mq_dram_apb_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+ "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
+
+static const char *imx8mq_vpu_g1_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
+
+static const char *imx8mq_vpu_g2_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_100m", "sys2_pll_125m", "sys3_pll2_out", "audio_pll1_out", };
+
+static const char *imx8mq_disp_dtrc_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
+
+static const char *imx8mq_disp_dc8000_sels[] = {"osc_25m", "vpu_pll_out", "sys1_pll_800m", "sys2_pll_1000m", "sys1_pll_160m", "sys2_pll_100m", "sys3_pll2_out", "audio_pll2_out", };
+
+static const char *imx8mq_pcie1_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
+ "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_250m", "sys3_pll2_out", };
+
+static const char *imx8mq_pcie1_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", };
+
+static const char *imx8mq_pcie1_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_500m", "sys3_pll2_out",
+ "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
+
+static const char *imx8mq_dc_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
+
+static const char *imx8mq_lcdif_pixel_sels[] = {"osc_25m", "video_pll1_out", "audio_pll2_out", "audio_pll1_out", "sys1_pll_800m", "sys2_pll_1000m", "sys3_pll2_out", "clk_ext4", };
+
+static const char *imx8mq_sai1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
+
+static const char *imx8mq_sai2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mq_sai3_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mq_sai4_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext1", "clk_ext2", };
+
+static const char *imx8mq_sai5_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mq_sai6_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mq_spdif1_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext2", "clk_ext3", };
+
+static const char *imx8mq_spdif2_sels[] = {"osc_25m", "audio_pll1_out", "audio_pll2_out", "video_pll1_out", "sys1_pll_133m", "osc_27m", "clk_ext3", "clk_ext4", };
+
+static const char *imx8mq_enet_ref_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_500m", "sys2_pll_100m",
+ "sys1_pll_160m", "audio_pll1_out", "video_pll1_out", "clk_ext4", };
+
+static const char *imx8mq_enet_timer_sels[] = {"osc_25m", "sys2_pll_100m", "audio_pll1_out", "clk_ext1", "clk_ext2",
+ "clk_ext3", "clk_ext4", "video_pll1_out", };
+
+static const char *imx8mq_enet_phy_sels[] = {"osc_25m", "sys2_pll_50m", "sys2_pll_125m", "sys2_pll_500m",
+ "audio_pll1_out", "video_pll1_out", "audio_pll2_out", };
+
+static const char *imx8mq_nand_sels[] = {"osc_25m", "sys2_pll_500m", "audio_pll1_out", "sys1_pll_400m",
+ "audio_pll2_out", "sys3_pll2_out", "sys2_pll_250m", "video_pll1_out", };
+
+static const char *imx8mq_qspi_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+ "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
+
+static const char *imx8mq_usdhc1_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+ "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
+
+static const char *imx8mq_usdhc2_sels[] = {"osc_25m", "sys1_pll_400m", "sys1_pll_800m", "sys2_pll_500m",
+ "audio_pll2_out", "sys1_pll_266m", "sys3_pll2_out", "sys1_pll_100m", };
+
+static const char *imx8mq_i2c1_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
+
+static const char *imx8mq_i2c2_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
+
+static const char *imx8mq_i2c3_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
+
+static const char *imx8mq_i2c4_sels[] = {"osc_25m", "sys1_pll_160m", "sys2_pll_50m", "sys3_pll2_out", "audio_pll1_out",
+ "video_pll1_out", "audio_pll2_out", "sys1_pll_133m", };
+
+static const char *imx8mq_uart1_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+ "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mq_uart2_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+ "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_uart3_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+ "sys3_pll2_out", "clk_ext2", "clk_ext4", "audio_pll2_out", };
+
+static const char *imx8mq_uart4_sels[] = {"osc_25m", "sys1_pll_80m", "sys2_pll_200m", "sys2_pll_100m",
+ "sys3_pll2_out", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_usb_core_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
+ "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_usb_phy_sels[] = {"osc_25m", "sys1_pll_100m", "sys1_pll_40m", "sys2_pll_100m",
+ "sys2_pll_200m", "clk_ext2", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_ecspi1_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+ "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
+
+static const char *imx8mq_ecspi2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+ "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
+
+static const char *imx8mq_pwm1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+ "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
+
+static const char *imx8mq_pwm2_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+ "sys3_pll2_out", "clk_ext1", "sys1_pll_80m", "video_pll1_out", };
+
+static const char *imx8mq_pwm3_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+ "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
+
+static const char *imx8mq_pwm4_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_160m", "sys1_pll_40m",
+ "sys3_pll2_out", "clk_ext2", "sys1_pll_80m", "video_pll1_out", };
+
+static const char *imx8mq_gpt1_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_400m", "sys1_pll_40m",
+ "sys1_pll_80m", "audio_pll1_out", "clk_ext1", };
+
+static const char *imx8mq_wdog_sels[] = {"osc_25m", "sys1_pll_133m", "sys1_pll_160m", "vpu_pll_out",
+ "sys2_pll_125m", "sys3_pll2_out", "sys1_pll_80m", "sys2_pll_166m", };
+
+static const char *imx8mq_wrclk_sels[] = {"osc_25m", "sys1_pll_40m", "vpu_pll_out", "sys3_pll2_out", "sys2_pll_200m",
+ "sys1_pll_266m", "sys2_pll_500m", "sys1_pll_100m", };
+
+static const char *imx8mq_dsi_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_dsi_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+ "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_dsi_dbi_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_100m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_dsi_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_csi1_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_csi1_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+ "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_csi1_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_csi2_core_sels[] = {"osc_25m", "sys1_pll_266m", "sys2_pll_250m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_csi2_phy_sels[] = {"osc_25m", "sys2_pll_125m", "sys2_pll_100m", "sys1_pll_800m",
+ "sys2_pll_1000m", "clk_ext2", "audio_pll2_out", "video_pll1_out", };
+
+static const char *imx8mq_csi2_esc_sels[] = {"osc_25m", "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_800m",
+ "sys2_pll_1000m", "sys3_pll2_out", "clk_ext3", "audio_pll2_out", };
+
+static const char *imx8mq_pcie2_ctrl_sels[] = {"osc_25m", "sys2_pll_250m", "sys2_pll_200m", "sys1_pll_266m",
+ "sys1_pll_800m", "sys2_pll_500m", "sys2_pll_333m", "sys3_pll2_out", };
+
+static const char *imx8mq_pcie2_phy_sels[] = {"osc_25m", "sys2_pll_100m", "sys2_pll_500m", "clk_ext1",
+ "clk_ext2", "clk_ext3", "clk_ext4", "sys1_pll_400m", };
+
+static const char *imx8mq_pcie2_aux_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_50m", "sys3_pll2_out",
+ "sys2_pll_100m", "sys1_pll_80m", "sys1_pll_160m", "sys1_pll_200m", };
+
+static const char *imx8mq_ecspi3_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_40m", "sys1_pll_160m",
+ "sys1_pll_800m", "sys3_pll2_out", "sys2_pll_250m", "audio_pll2_out", };
+static const char *imx8mq_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
+
+static const char *imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sys1_pll_400m", "sys2_pll_166m", "audio_pll1_out",
+ "video_pll1_out", "ckil", };
+
+static struct clk_onecell_data clk_data;
+
+static int imx8mq_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ void __iomem *base;
+ int err;
+ int i;
+
+ clks[IMX8MQ_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
+ clks[IMX8MQ_CLK_32K] = of_clk_get_by_name(np, "ckil");
+ clks[IMX8MQ_CLK_25M] = of_clk_get_by_name(np, "osc_25m");
+ clks[IMX8MQ_CLK_27M] = of_clk_get_by_name(np, "osc_27m");
+ clks[IMX8MQ_CLK_EXT1] = of_clk_get_by_name(np, "clk_ext1");
+ clks[IMX8MQ_CLK_EXT2] = of_clk_get_by_name(np, "clk_ext2");
+ clks[IMX8MQ_CLK_EXT3] = of_clk_get_by_name(np, "clk_ext3");
+ clks[IMX8MQ_CLK_EXT4] = of_clk_get_by_name(np, "clk_ext4");
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop");
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return -ENOMEM;
+
+ clks[IMX8MQ_ARM_PLL_REF_SEL] = imx_clk_mux("arm_pll_ref_sel", base + 0x28, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_GPU_PLL_REF_SEL] = imx_clk_mux("gpu_pll_ref_sel", base + 0x18, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_VPU_PLL_REF_SEL] = imx_clk_mux("vpu_pll_ref_sel", base + 0x20, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_AUDIO_PLL1_REF_SEL] = imx_clk_mux("audio_pll1_ref_sel", base + 0x0, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_AUDIO_PLL2_REF_SEL] = imx_clk_mux("audio_pll2_ref_sel", base + 0x8, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_VIDEO_PLL1_REF_SEL] = imx_clk_mux("video_pll1_ref_sel", base + 0x10, 16, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_SYS1_PLL1_REF_SEL] = imx_clk_mux("sys1_pll1_ref_sel", base + 0x30, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_SYS2_PLL1_REF_SEL] = imx_clk_mux("sys2_pll1_ref_sel", base + 0x3c, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_SYS3_PLL1_REF_SEL] = imx_clk_mux("sys3_pll1_ref_sel", base + 0x48, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+ clks[IMX8MQ_DRAM_PLL1_REF_SEL] = imx_clk_mux("dram_pll1_ref_sel", base + 0x60, 0, 2, pll_ref_sels, ARRAY_SIZE(pll_ref_sels));
+
+ clks[IMX8MQ_ARM_PLL_REF_DIV] = imx_clk_divider("arm_pll_ref_div", "arm_pll_ref_sel", base + 0x28, 5, 6);
+ clks[IMX8MQ_GPU_PLL_REF_DIV] = imx_clk_divider("gpu_pll_ref_div", "gpu_pll_ref_sel", base + 0x18, 5, 6);
+ clks[IMX8MQ_VPU_PLL_REF_DIV] = imx_clk_divider("vpu_pll_ref_div", "vpu_pll_ref_sel", base + 0x20, 5, 6);
+ clks[IMX8MQ_AUDIO_PLL1_REF_DIV] = imx_clk_divider("audio_pll1_ref_div", "audio_pll1_ref_sel", base + 0x0, 5, 6);
+ clks[IMX8MQ_AUDIO_PLL2_REF_DIV] = imx_clk_divider("audio_pll2_ref_div", "audio_pll2_ref_sel", base + 0x8, 5, 6);
+ clks[IMX8MQ_VIDEO_PLL1_REF_DIV] = imx_clk_divider("video_pll1_ref_div", "video_pll1_ref_sel", base + 0x10, 5, 6);
+ clks[IMX8MQ_SYS1_PLL1_REF_DIV] = imx_clk_divider("sys1_pll1_ref_div", "sys1_pll1_ref_sel", base + 0x38, 25, 3);
+ clks[IMX8MQ_SYS2_PLL1_REF_DIV] = imx_clk_divider("sys2_pll1_ref_div", "sys2_pll1_ref_sel", base + 0x44, 25, 3);
+ clks[IMX8MQ_SYS3_PLL1_REF_DIV] = imx_clk_divider("sys3_pll1_ref_div", "sys3_pll1_ref_sel", base + 0x50, 25, 3);
+ clks[IMX8MQ_DRAM_PLL1_REF_DIV] = imx_clk_divider("dram_pll1_ref_div", "dram_pll1_ref_sel", base + 0x68, 25, 3);
+
+ clks[IMX8MQ_ARM_PLL] = imx_clk_frac_pll("arm_pll", "arm_pll_ref_div", base + 0x28);
+ clks[IMX8MQ_GPU_PLL] = imx_clk_frac_pll("gpu_pll", "gpu_pll_ref_div", base + 0x18);
+ clks[IMX8MQ_VPU_PLL] = imx_clk_frac_pll("vpu_pll", "vpu_pll_ref_div", base + 0x20);
+ clks[IMX8MQ_AUDIO_PLL1] = imx_clk_frac_pll("audio_pll1", "audio_pll1_ref_div", base + 0x0);
+ clks[IMX8MQ_AUDIO_PLL2] = imx_clk_frac_pll("audio_pll2", "audio_pll2_ref_div", base + 0x8);
+ clks[IMX8MQ_VIDEO_PLL1] = imx_clk_frac_pll("video_pll1", "video_pll1_ref_div", base + 0x10);
+ clks[IMX8MQ_SYS1_PLL1] = imx_clk_sccg_pll("sys1_pll1", "sys1_pll1_ref_div", base + 0x30, SCCG_PLL1);
+ clks[IMX8MQ_SYS2_PLL1] = imx_clk_sccg_pll("sys2_pll1", "sys2_pll1_ref_div", base + 0x3c, SCCG_PLL1);
+ clks[IMX8MQ_SYS3_PLL1] = imx_clk_sccg_pll("sys3_pll1", "sys3_pll1_ref_div", base + 0x48, SCCG_PLL1);
+ clks[IMX8MQ_DRAM_PLL1] = imx_clk_sccg_pll("dram_pll1", "dram_pll1_ref_div", base + 0x60, SCCG_PLL1);
+
+ clks[IMX8MQ_SYS1_PLL2] = imx_clk_sccg_pll("sys1_pll2", "sys1_pll1_out_div", base + 0x30, SCCG_PLL2);
+ clks[IMX8MQ_SYS2_PLL2] = imx_clk_sccg_pll("sys2_pll2", "sys2_pll1_out_div", base + 0x3c, SCCG_PLL2);
+ clks[IMX8MQ_SYS3_PLL2] = imx_clk_sccg_pll("sys3_pll2", "sys3_pll1_out_div", base + 0x48, SCCG_PLL2);
+ clks[IMX8MQ_DRAM_PLL2] = imx_clk_sccg_pll("dram_pll2", "dram_pll1_out_div", base + 0x60, SCCG_PLL2);
+
+ /* PLL divs */
+ clks[IMX8MQ_SYS1_PLL1_OUT_DIV] = imx_clk_divider("sys1_pll1_out_div", "sys1_pll1_out", base + 0x38, 19, 6);
+ clks[IMX8MQ_SYS2_PLL1_OUT_DIV] = imx_clk_divider("sys2_pll1_out_div", "sys2_pll1_out", base + 0x44, 19, 6);
+ clks[IMX8MQ_SYS3_PLL1_OUT_DIV] = imx_clk_divider("sys3_pll1_out_div", "sys3_pll1_out", base + 0x50, 19, 6);
+ clks[IMX8MQ_DRAM_PLL1_OUT_DIV] = imx_clk_divider("dram_pll1_out_div", "dram_pll1_out", base + 0x68, 19, 6);
+ clks[IMX8MQ_SYS1_PLL2_DIV] = imx_clk_divider("sys1_pll2_div", "sys1_pll2", base + 0x38, 1, 6);
+ clks[IMX8MQ_SYS2_PLL2_DIV] = imx_clk_divider("sys2_pll2_div", "sys2_pll2", base + 0x44, 1, 6);
+ clks[IMX8MQ_SYS3_PLL2_DIV] = imx_clk_divider("sys3_pll2_div", "sys3_pll2", base + 0x50, 1, 6);
+ clks[IMX8MQ_DRAM_PLL2_DIV] = imx_clk_divider("dram_pll2_div", "dram_pll2", base + 0x68, 1, 6);
+
+ /* PLL bypass out */
+ clks[IMX8MQ_ARM_PLL_BYPASS] = imx_clk_mux("arm_pll_bypass", base + 0x28, 14, 1, arm_pll_bypass_sels, ARRAY_SIZE(arm_pll_bypass_sels));
+ clks[IMX8MQ_GPU_PLL_BYPASS] = imx_clk_mux("gpu_pll_bypass", base + 0x18, 14, 1, gpu_pll_bypass_sels, ARRAY_SIZE(gpu_pll_bypass_sels));
+ clks[IMX8MQ_VPU_PLL_BYPASS] = imx_clk_mux("vpu_pll_bypass", base + 0x20, 14, 1, vpu_pll_bypass_sels, ARRAY_SIZE(vpu_pll_bypass_sels));
+ clks[IMX8MQ_AUDIO_PLL1_BYPASS] = imx_clk_mux("audio_pll1_bypass", base + 0x0, 14, 1, audio_pll1_bypass_sels, ARRAY_SIZE(audio_pll1_bypass_sels));
+ clks[IMX8MQ_AUDIO_PLL2_BYPASS] = imx_clk_mux("audio_pll2_bypass", base + 0x8, 14, 1, audio_pll2_bypass_sels, ARRAY_SIZE(audio_pll2_bypass_sels));
+ clks[IMX8MQ_VIDEO_PLL1_BYPASS] = imx_clk_mux("video_pll1_bypass", base + 0x10, 14, 1, video_pll1_bypass_sels, ARRAY_SIZE(video_pll1_bypass_sels));
+
+ clks[IMX8MQ_SYS1_PLL1_OUT] = imx_clk_mux("sys1_pll1_out", base + 0x30, 5, 1, sys1_pll1_out_sels, ARRAY_SIZE(sys1_pll1_out_sels));
+ clks[IMX8MQ_SYS2_PLL1_OUT] = imx_clk_mux("sys2_pll1_out", base + 0x3c, 5, 1, sys2_pll1_out_sels, ARRAY_SIZE(sys2_pll1_out_sels));
+ clks[IMX8MQ_SYS3_PLL1_OUT] = imx_clk_mux("sys3_pll1_out", base + 0x48, 5, 1, sys3_pll1_out_sels, ARRAY_SIZE(sys3_pll1_out_sels));
+ clks[IMX8MQ_DRAM_PLL1_OUT] = imx_clk_mux("dram_pll1_out", base + 0x60, 5, 1, dram_pll1_out_sels, ARRAY_SIZE(dram_pll1_out_sels));
+ clks[IMX8MQ_SYS1_PLL2_OUT] = imx_clk_mux("sys1_pll2_out", base + 0x30, 4, 1, sys1_pll2_out_sels, ARRAY_SIZE(sys1_pll2_out_sels));
+ clks[IMX8MQ_SYS2_PLL2_OUT] = imx_clk_mux("sys2_pll2_out", base + 0x3c, 4, 1, sys2_pll2_out_sels, ARRAY_SIZE(sys2_pll2_out_sels));
+ clks[IMX8MQ_SYS3_PLL2_OUT] = imx_clk_mux("sys3_pll2_out", base + 0x48, 4, 1, sys3_pll2_out_sels, ARRAY_SIZE(sys3_pll2_out_sels));
+ clks[IMX8MQ_DRAM_PLL2_OUT] = imx_clk_mux("dram_pll2_out", base + 0x60, 4, 1, dram_pll2_out_sels, ARRAY_SIZE(dram_pll2_out_sels));
+
+ /* PLL OUT GATE */
+ clks[IMX8MQ_ARM_PLL_OUT] = imx_clk_gate("arm_pll_out", "arm_pll_bypass", base + 0x28, 21);
+ clks[IMX8MQ_GPU_PLL_OUT] = imx_clk_gate("gpu_pll_out", "gpu_pll_bypass", base + 0x18, 21);
+ clks[IMX8MQ_VPU_PLL_OUT] = imx_clk_gate("vpu_pll_out", "vpu_pll_bypass", base + 0x20, 21);
+ clks[IMX8MQ_AUDIO_PLL1_OUT] = imx_clk_gate("audio_pll1_out", "audio_pll1_bypass", base + 0x0, 21);
+ clks[IMX8MQ_AUDIO_PLL2_OUT] = imx_clk_gate("audio_pll2_out", "audio_pll2_bypass", base + 0x8, 21);
+ clks[IMX8MQ_VIDEO_PLL1_OUT] = imx_clk_gate("video_pll1_out", "video_pll1_bypass", base + 0x10, 21);
+ clks[IMX8MQ_SYS1_PLL_OUT] = imx_clk_gate("sys1_pll_out", "sys1_pll2_out", base + 0x30, 9);
+ clks[IMX8MQ_SYS2_PLL_OUT] = imx_clk_gate("sys2_pll_out", "sys2_pll2_out", base + 0x3c, 9);
+ clks[IMX8MQ_SYS3_PLL_OUT] = imx_clk_gate("sys3_pll_out", "sys3_pll2_out", base + 0x48, 9);
+ clks[IMX8MQ_DRAM_PLL_OUT] = imx_clk_gate("dram_pll_out", "dram_pll2_out", base + 0x60, 9);
+
+ /* SYS PLL fixed output */
+ clks[IMX8MQ_SYS1_PLL_40M] = imx_clk_fixed_factor("sys1_pll_40m", "sys1_pll_out", 1, 20);
+ clks[IMX8MQ_SYS1_PLL_80M] = imx_clk_fixed_factor("sys1_pll_80m", "sys1_pll_out", 1, 10);
+ clks[IMX8MQ_SYS1_PLL_100M] = imx_clk_fixed_factor("sys1_pll_100m", "sys1_pll_out", 1, 8);
+ clks[IMX8MQ_SYS1_PLL_133M] = imx_clk_fixed_factor("sys1_pll_133m", "sys1_pll_out", 1, 6);
+ clks[IMX8MQ_SYS1_PLL_160M] = imx_clk_fixed_factor("sys1_pll_160m", "sys1_pll_out", 1, 5);
+ clks[IMX8MQ_SYS1_PLL_200M] = imx_clk_fixed_factor("sys1_pll_200m", "sys1_pll_out", 1, 4);
+ clks[IMX8MQ_SYS1_PLL_266M] = imx_clk_fixed_factor("sys1_pll_266m", "sys1_pll_out", 1, 3);
+ clks[IMX8MQ_SYS1_PLL_400M] = imx_clk_fixed_factor("sys1_pll_400m", "sys1_pll_out", 1, 2);
+ clks[IMX8MQ_SYS1_PLL_800M] = imx_clk_fixed_factor("sys1_pll_800m", "sys1_pll_out", 1, 1);
+
+ clks[IMX8MQ_SYS2_PLL_50M] = imx_clk_fixed_factor("sys2_pll_50m", "sys2_pll_out", 1, 20);
+ clks[IMX8MQ_SYS2_PLL_100M] = imx_clk_fixed_factor("sys2_pll_100m", "sys2_pll_out", 1, 10);
+ clks[IMX8MQ_SYS2_PLL_125M] = imx_clk_fixed_factor("sys2_pll_125m", "sys2_pll_out", 1, 8);
+ clks[IMX8MQ_SYS2_PLL_166M] = imx_clk_fixed_factor("sys2_pll_166m", "sys2_pll_out", 1, 6);
+ clks[IMX8MQ_SYS2_PLL_200M] = imx_clk_fixed_factor("sys2_pll_200m", "sys2_pll_out", 1, 5);
+ clks[IMX8MQ_SYS2_PLL_250M] = imx_clk_fixed_factor("sys2_pll_250m", "sys2_pll_out", 1, 4);
+ clks[IMX8MQ_SYS2_PLL_333M] = imx_clk_fixed_factor("sys2_pll_333m", "sys2_pll_out", 1, 3);
+ clks[IMX8MQ_SYS2_PLL_500M] = imx_clk_fixed_factor("sys2_pll_500m", "sys2_pll_out", 1, 2);
+ clks[IMX8MQ_SYS2_PLL_1000M] = imx_clk_fixed_factor("sys2_pll_1000m", "sys2_pll_out", 1, 1);
+
+ np = dev->of_node;
+ base = of_iomap(np, 0);
+ if (WARN_ON(!base))
+ return -ENOMEM;
+
+ /* CORE */
+ clks[IMX8MQ_CLK_A53_SRC] = imx_clk_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels));
+ clks[IMX8MQ_CLK_VPU_SRC] = imx_clk_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels));
+ clks[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels));
+ clks[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels));
+ clks[IMX8MQ_CLK_A53_CG] = imx_clk_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL);
+ clks[IMX8MQ_CLK_VPU_CG] = imx_clk_gate3("vpu_cg", "vpu_src", base + 0x8100, 28);
+ clks[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28);
+ clks[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28);
+
+ clks[IMX8MQ_CLK_A53_DIV] = imx_clk_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3);
+ clks[IMX8MQ_CLK_VPU_DIV] = imx_clk_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3);
+ clks[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3);
+ clks[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3);
+
+ /* BUS */
+ clks[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800);
+ clks[IMX8MQ_CLK_ENET_AXI] = imx8m_clk_composite("enet_axi", imx8mq_enet_axi_sels, base + 0x8880);
+ clks[IMX8MQ_CLK_NAND_USDHC_BUS] = imx8m_clk_composite("nand_usdhc_bus", imx8mq_nand_usdhc_sels, base + 0x8900);
+ clks[IMX8MQ_CLK_VPU_BUS] = imx8m_clk_composite("vpu_bus", imx8mq_vpu_bus_sels, base + 0x8980);
+ clks[IMX8MQ_CLK_DISP_AXI] = imx8m_clk_composite("disp_axi", imx8mq_disp_axi_sels, base + 0x8a00);
+ clks[IMX8MQ_CLK_DISP_APB] = imx8m_clk_composite("disp_apb", imx8mq_disp_apb_sels, base + 0x8a80);
+ clks[IMX8MQ_CLK_DISP_RTRM] = imx8m_clk_composite("disp_rtrm", imx8mq_disp_rtrm_sels, base + 0x8b00);
+ clks[IMX8MQ_CLK_USB_BUS] = imx8m_clk_composite("usb_bus", imx8mq_usb_bus_sels, base + 0x8b80);
+ clks[IMX8MQ_CLK_GPU_AXI] = imx8m_clk_composite("gpu_axi", imx8mq_gpu_axi_sels, base + 0x8c00);
+ clks[IMX8MQ_CLK_GPU_AHB] = imx8m_clk_composite("gpu_ahb", imx8mq_gpu_ahb_sels, base + 0x8c80);
+ clks[IMX8MQ_CLK_NOC] = imx8m_clk_composite_critical("noc", imx8mq_noc_sels, base + 0x8d00);
+ clks[IMX8MQ_CLK_NOC_APB] = imx8m_clk_composite_critical("noc_apb", imx8mq_noc_apb_sels, base + 0x8d80);
+
+ /* AHB */
+ clks[IMX8MQ_CLK_AHB] = imx8m_clk_composite("ahb", imx8mq_ahb_sels, base + 0x9000);
+ clks[IMX8MQ_CLK_AUDIO_AHB] = imx8m_clk_composite("audio_ahb", imx8mq_audio_ahb_sels, base + 0x9100);
+
+ /* IPG */
+ clks[IMX8MQ_CLK_IPG_ROOT] = imx_clk_divider2("ipg_root", "ahb", base + 0x9080, 0, 1);
+ clks[IMX8MQ_CLK_IPG_AUDIO_ROOT] = imx_clk_divider2("ipg_audio_root", "audio_ahb", base + 0x9180, 0, 1);
+
+ /* IP */
+ clks[IMX8MQ_CLK_DRAM_CORE] = imx_clk_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mq_dram_core_sels, ARRAY_SIZE(imx8mq_dram_core_sels), CLK_IS_CRITICAL);
+
+ clks[IMX8MQ_CLK_DRAM_ALT] = imx8m_clk_composite("dram_alt", imx8mq_dram_alt_sels, base + 0xa000);
+ clks[IMX8MQ_CLK_DRAM_APB] = imx8m_clk_composite_critical("dram_apb", imx8mq_dram_apb_sels, base + 0xa080);
+ clks[IMX8MQ_CLK_VPU_G1] = imx8m_clk_composite("vpu_g1", imx8mq_vpu_g1_sels, base + 0xa100);
+ clks[IMX8MQ_CLK_VPU_G2] = imx8m_clk_composite("vpu_g2", imx8mq_vpu_g2_sels, base + 0xa180);
+ clks[IMX8MQ_CLK_DISP_DTRC] = imx8m_clk_composite("disp_dtrc", imx8mq_disp_dtrc_sels, base + 0xa200);
+ clks[IMX8MQ_CLK_DISP_DC8000] = imx8m_clk_composite("disp_dc8000", imx8mq_disp_dc8000_sels, base + 0xa280);
+ clks[IMX8MQ_CLK_PCIE1_CTRL] = imx8m_clk_composite("pcie1_ctrl", imx8mq_pcie1_ctrl_sels, base + 0xa300);
+ clks[IMX8MQ_CLK_PCIE1_PHY] = imx8m_clk_composite("pcie1_phy", imx8mq_pcie1_phy_sels, base + 0xa380);
+ clks[IMX8MQ_CLK_PCIE1_AUX] = imx8m_clk_composite("pcie1_aux", imx8mq_pcie1_aux_sels, base + 0xa400);
+ clks[IMX8MQ_CLK_DC_PIXEL] = imx8m_clk_composite("dc_pixel", imx8mq_dc_pixel_sels, base + 0xa480);
+ clks[IMX8MQ_CLK_LCDIF_PIXEL] = imx8m_clk_composite("lcdif_pixel", imx8mq_lcdif_pixel_sels, base + 0xa500);
+ clks[IMX8MQ_CLK_SAI1] = imx8m_clk_composite("sai1", imx8mq_sai1_sels, base + 0xa580);
+ clks[IMX8MQ_CLK_SAI2] = imx8m_clk_composite("sai2", imx8mq_sai2_sels, base + 0xa600);
+ clks[IMX8MQ_CLK_SAI3] = imx8m_clk_composite("sai3", imx8mq_sai3_sels, base + 0xa680);
+ clks[IMX8MQ_CLK_SAI4] = imx8m_clk_composite("sai4", imx8mq_sai4_sels, base + 0xa700);
+ clks[IMX8MQ_CLK_SAI5] = imx8m_clk_composite("sai5", imx8mq_sai5_sels, base + 0xa780);
+ clks[IMX8MQ_CLK_SAI6] = imx8m_clk_composite("sai6", imx8mq_sai6_sels, base + 0xa800);
+ clks[IMX8MQ_CLK_SPDIF1] = imx8m_clk_composite("spdif1", imx8mq_spdif1_sels, base + 0xa880);
+ clks[IMX8MQ_CLK_SPDIF2] = imx8m_clk_composite("spdif2", imx8mq_spdif2_sels, base + 0xa900);
+ clks[IMX8MQ_CLK_ENET_REF] = imx8m_clk_composite("enet_ref", imx8mq_enet_ref_sels, base + 0xa980);
+ clks[IMX8MQ_CLK_ENET_TIMER] = imx8m_clk_composite("enet_timer", imx8mq_enet_timer_sels, base + 0xaa00);
+ clks[IMX8MQ_CLK_ENET_PHY_REF] = imx8m_clk_composite("enet_phy", imx8mq_enet_phy_sels, base + 0xaa80);
+ clks[IMX8MQ_CLK_NAND] = imx8m_clk_composite("nand", imx8mq_nand_sels, base + 0xab00);
+ clks[IMX8MQ_CLK_QSPI] = imx8m_clk_composite("qspi", imx8mq_qspi_sels, base + 0xab80);
+ clks[IMX8MQ_CLK_USDHC1] = imx8m_clk_composite("usdhc1", imx8mq_usdhc1_sels, base + 0xac00);
+ clks[IMX8MQ_CLK_USDHC2] = imx8m_clk_composite("usdhc2", imx8mq_usdhc2_sels, base + 0xac80);
+ clks[IMX8MQ_CLK_I2C1] = imx8m_clk_composite("i2c1", imx8mq_i2c1_sels, base + 0xad00);
+ clks[IMX8MQ_CLK_I2C2] = imx8m_clk_composite("i2c2", imx8mq_i2c2_sels, base + 0xad80);
+ clks[IMX8MQ_CLK_I2C3] = imx8m_clk_composite("i2c3", imx8mq_i2c3_sels, base + 0xae00);
+ clks[IMX8MQ_CLK_I2C4] = imx8m_clk_composite("i2c4", imx8mq_i2c4_sels, base + 0xae80);
+ clks[IMX8MQ_CLK_UART1] = imx8m_clk_composite("uart1", imx8mq_uart1_sels, base + 0xaf00);
+ clks[IMX8MQ_CLK_UART2] = imx8m_clk_composite("uart2", imx8mq_uart2_sels, base + 0xaf80);
+ clks[IMX8MQ_CLK_UART3] = imx8m_clk_composite("uart3", imx8mq_uart3_sels, base + 0xb000);
+ clks[IMX8MQ_CLK_UART4] = imx8m_clk_composite("uart4", imx8mq_uart4_sels, base + 0xb080);
+ clks[IMX8MQ_CLK_USB_CORE_REF] = imx8m_clk_composite("usb_core_ref", imx8mq_usb_core_sels, base + 0xb100);
+ clks[IMX8MQ_CLK_USB_PHY_REF] = imx8m_clk_composite("usb_phy_ref", imx8mq_usb_phy_sels, base + 0xb180);
+ clks[IMX8MQ_CLK_ECSPI1] = imx8m_clk_composite("ecspi1", imx8mq_ecspi1_sels, base + 0xb280);
+ clks[IMX8MQ_CLK_ECSPI2] = imx8m_clk_composite("ecspi2", imx8mq_ecspi2_sels, base + 0xb300);
+ clks[IMX8MQ_CLK_PWM1] = imx8m_clk_composite("pwm1", imx8mq_pwm1_sels, base + 0xb380);
+ clks[IMX8MQ_CLK_PWM2] = imx8m_clk_composite("pwm2", imx8mq_pwm2_sels, base + 0xb400);
+ clks[IMX8MQ_CLK_PWM3] = imx8m_clk_composite("pwm3", imx8mq_pwm3_sels, base + 0xb480);
+ clks[IMX8MQ_CLK_PWM4] = imx8m_clk_composite("pwm4", imx8mq_pwm4_sels, base + 0xb500);
+ clks[IMX8MQ_CLK_GPT1] = imx8m_clk_composite("gpt1", imx8mq_gpt1_sels, base + 0xb580);
+ clks[IMX8MQ_CLK_WDOG] = imx8m_clk_composite("wdog", imx8mq_wdog_sels, base + 0xb900);
+ clks[IMX8MQ_CLK_WRCLK] = imx8m_clk_composite("wrclk", imx8mq_wrclk_sels, base + 0xb980);
+ clks[IMX8MQ_CLK_CLKO2] = imx8m_clk_composite("clko2", imx8mq_clko2_sels, base + 0xba80);
+ clks[IMX8MQ_CLK_DSI_CORE] = imx8m_clk_composite("dsi_core", imx8mq_dsi_core_sels, base + 0xbb00);
+ clks[IMX8MQ_CLK_DSI_PHY_REF] = imx8m_clk_composite("dsi_phy_ref", imx8mq_dsi_phy_sels, base + 0xbb80);
+ clks[IMX8MQ_CLK_DSI_DBI] = imx8m_clk_composite("dsi_dbi", imx8mq_dsi_dbi_sels, base + 0xbc00);
+ clks[IMX8MQ_CLK_DSI_ESC] = imx8m_clk_composite("dsi_esc", imx8mq_dsi_esc_sels, base + 0xbc80);
+ clks[IMX8MQ_CLK_DSI_AHB] = imx8m_clk_composite("dsi_ahb", imx8mq_dsi_ahb_sels, base + 0x9200);
+ clks[IMX8MQ_CLK_CSI1_CORE] = imx8m_clk_composite("csi1_core", imx8mq_csi1_core_sels, base + 0xbd00);
+ clks[IMX8MQ_CLK_CSI1_PHY_REF] = imx8m_clk_composite("csi1_phy_ref", imx8mq_csi1_phy_sels, base + 0xbd80);
+ clks[IMX8MQ_CLK_CSI1_ESC] = imx8m_clk_composite("csi1_esc", imx8mq_csi1_esc_sels, base + 0xbe00);
+ clks[IMX8MQ_CLK_CSI2_CORE] = imx8m_clk_composite("csi2_core", imx8mq_csi2_core_sels, base + 0xbe80);
+ clks[IMX8MQ_CLK_CSI2_PHY_REF] = imx8m_clk_composite("csi2_phy_ref", imx8mq_csi2_phy_sels, base + 0xbf00);
+ clks[IMX8MQ_CLK_CSI2_ESC] = imx8m_clk_composite("csi2_esc", imx8mq_csi2_esc_sels, base + 0xbf80);
+ clks[IMX8MQ_CLK_PCIE2_CTRL] = imx8m_clk_composite("pcie2_ctrl", imx8mq_pcie2_ctrl_sels, base + 0xc000);
+ clks[IMX8MQ_CLK_PCIE2_PHY] = imx8m_clk_composite("pcie2_phy", imx8mq_pcie2_phy_sels, base + 0xc080);
+ clks[IMX8MQ_CLK_PCIE2_AUX] = imx8m_clk_composite("pcie2_aux", imx8mq_pcie2_aux_sels, base + 0xc100);
+ clks[IMX8MQ_CLK_ECSPI3] = imx8m_clk_composite("ecspi3", imx8mq_ecspi3_sels, base + 0xc180);
+
+ clks[IMX8MQ_CLK_ECSPI1_ROOT] = imx_clk_gate4("ecspi1_root_clk", "ecspi1", base + 0x4070, 0);
+ clks[IMX8MQ_CLK_ECSPI2_ROOT] = imx_clk_gate4("ecspi2_root_clk", "ecspi2", base + 0x4080, 0);
+ clks[IMX8MQ_CLK_ECSPI3_ROOT] = imx_clk_gate4("ecspi3_root_clk", "ecspi3", base + 0x4090, 0);
+ clks[IMX8MQ_CLK_ENET1_ROOT] = imx_clk_gate4("enet1_root_clk", "enet_axi", base + 0x40a0, 0);
+ clks[IMX8MQ_CLK_GPT1_ROOT] = imx_clk_gate4("gpt1_root_clk", "gpt1", base + 0x4100, 0);
+ clks[IMX8MQ_CLK_I2C1_ROOT] = imx_clk_gate4("i2c1_root_clk", "i2c1", base + 0x4170, 0);
+ clks[IMX8MQ_CLK_I2C2_ROOT] = imx_clk_gate4("i2c2_root_clk", "i2c2", base + 0x4180, 0);
+ clks[IMX8MQ_CLK_I2C3_ROOT] = imx_clk_gate4("i2c3_root_clk", "i2c3", base + 0x4190, 0);
+ clks[IMX8MQ_CLK_I2C4_ROOT] = imx_clk_gate4("i2c4_root_clk", "i2c4", base + 0x41a0, 0);
+ clks[IMX8MQ_CLK_MU_ROOT] = imx_clk_gate4("mu_root_clk", "ipg_root", base + 0x4210, 0);
+ clks[IMX8MQ_CLK_OCOTP_ROOT] = imx_clk_gate4("ocotp_root_clk", "ipg_root", base + 0x4220, 0);
+ clks[IMX8MQ_CLK_PCIE1_ROOT] = imx_clk_gate4("pcie1_root_clk", "pcie1_ctrl", base + 0x4250, 0);
+ clks[IMX8MQ_CLK_PCIE2_ROOT] = imx_clk_gate4("pcie2_root_clk", "pcie2_ctrl", base + 0x4640, 0);
+ clks[IMX8MQ_CLK_PWM1_ROOT] = imx_clk_gate4("pwm1_root_clk", "pwm1", base + 0x4280, 0);
+ clks[IMX8MQ_CLK_PWM2_ROOT] = imx_clk_gate4("pwm2_root_clk", "pwm2", base + 0x4290, 0);
+ clks[IMX8MQ_CLK_PWM3_ROOT] = imx_clk_gate4("pwm3_root_clk", "pwm3", base + 0x42a0, 0);
+ clks[IMX8MQ_CLK_PWM4_ROOT] = imx_clk_gate4("pwm4_root_clk", "pwm4", base + 0x42b0, 0);
+ clks[IMX8MQ_CLK_QSPI_ROOT] = imx_clk_gate4("qspi_root_clk", "qspi", base + 0x42f0, 0);
+ clks[IMX8MQ_CLK_RAWNAND_ROOT] = imx_clk_gate2_shared2("nand_root_clk", "nand", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK] = imx_clk_gate2_shared2("nand_usdhc_rawnand_clk", "nand_usdhc_bus", base + 0x4300, 0, &share_count_nand);
+ clks[IMX8MQ_CLK_SAI1_ROOT] = imx_clk_gate2_shared2("sai1_root_clk", "sai1", base + 0x4330, 0, &share_count_sai1);
+ clks[IMX8MQ_CLK_SAI1_IPG] = imx_clk_gate2_shared2("sai1_ipg_clk", "ipg_audio_root", base + 0x4330, 0, &share_count_sai1);
+ clks[IMX8MQ_CLK_SAI2_ROOT] = imx_clk_gate2_shared2("sai2_root_clk", "sai2", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MQ_CLK_SAI2_IPG] = imx_clk_gate2_shared2("sai2_ipg_clk", "ipg_root", base + 0x4340, 0, &share_count_sai2);
+ clks[IMX8MQ_CLK_SAI3_ROOT] = imx_clk_gate2_shared2("sai3_root_clk", "sai3", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MQ_CLK_SAI3_IPG] = imx_clk_gate2_shared2("sai3_ipg_clk", "ipg_root", base + 0x4350, 0, &share_count_sai3);
+ clks[IMX8MQ_CLK_SAI4_ROOT] = imx_clk_gate2_shared2("sai4_root_clk", "sai4", base + 0x4360, 0, &share_count_sai4);
+ clks[IMX8MQ_CLK_SAI4_IPG] = imx_clk_gate2_shared2("sai4_ipg_clk", "ipg_audio_root", base + 0x4360, 0, &share_count_sai4);
+ clks[IMX8MQ_CLK_SAI5_ROOT] = imx_clk_gate2_shared2("sai5_root_clk", "sai5", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MQ_CLK_SAI5_IPG] = imx_clk_gate2_shared2("sai5_ipg_clk", "ipg_audio_root", base + 0x4370, 0, &share_count_sai5);
+ clks[IMX8MQ_CLK_SAI6_ROOT] = imx_clk_gate2_shared2("sai6_root_clk", "sai6", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MQ_CLK_SAI6_IPG] = imx_clk_gate2_shared2("sai6_ipg_clk", "ipg_audio_root", base + 0x4380, 0, &share_count_sai6);
+ clks[IMX8MQ_CLK_UART1_ROOT] = imx_clk_gate4("uart1_root_clk", "uart1", base + 0x4490, 0);
+ clks[IMX8MQ_CLK_UART2_ROOT] = imx_clk_gate4("uart2_root_clk", "uart2", base + 0x44a0, 0);
+ clks[IMX8MQ_CLK_UART3_ROOT] = imx_clk_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0);
+ clks[IMX8MQ_CLK_UART4_ROOT] = imx_clk_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0);
+ clks[IMX8MQ_CLK_USB1_CTRL_ROOT] = imx_clk_gate4("usb1_ctrl_root_clk", "usb_core_ref", base + 0x44d0, 0);
+ clks[IMX8MQ_CLK_USB2_CTRL_ROOT] = imx_clk_gate4("usb2_ctrl_root_clk", "usb_core_ref", base + 0x44e0, 0);
+ clks[IMX8MQ_CLK_USB1_PHY_ROOT] = imx_clk_gate4("usb1_phy_root_clk", "usb_phy_ref", base + 0x44f0, 0);
+ clks[IMX8MQ_CLK_USB2_PHY_ROOT] = imx_clk_gate4("usb2_phy_root_clk", "usb_phy_ref", base + 0x4500, 0);
+ clks[IMX8MQ_CLK_USDHC1_ROOT] = imx_clk_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0);
+ clks[IMX8MQ_CLK_USDHC2_ROOT] = imx_clk_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0);
+ clks[IMX8MQ_CLK_WDOG1_ROOT] = imx_clk_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0);
+ clks[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0);
+ clks[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0);
+ clks[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ clks[IMX8MQ_CLK_GPU_ROOT] = imx_clk_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0);
+ clks[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ clks[IMX8MQ_CLK_DISP_ROOT] = imx_clk_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MQ_CLK_DISP_APB_ROOT] = imx_clk_gate2_shared2("disp_apb_root_clk", "disp_apb", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MQ_CLK_DISP_RTRM_ROOT] = imx_clk_gate2_shared2("disp_rtrm_root_clk", "disp_rtrm", base + 0x45d0, 0, &share_count_dcss);
+ clks[IMX8MQ_CLK_TMU_ROOT] = imx_clk_gate4_flags("tmu_root_clk", "ipg_root", base + 0x4620, 0, CLK_IS_CRITICAL);
+ clks[IMX8MQ_CLK_VPU_DEC_ROOT] = imx_clk_gate2_flags("vpu_dec_root_clk", "vpu_bus", base + 0x4630, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE);
+ clks[IMX8MQ_CLK_CSI1_ROOT] = imx_clk_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0);
+ clks[IMX8MQ_CLK_CSI2_ROOT] = imx_clk_gate4("csi2_root_clk", "csi2_core", base + 0x4660, 0);
+ clks[IMX8MQ_CLK_SDMA1_ROOT] = imx_clk_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0);
+ clks[IMX8MQ_CLK_SDMA2_ROOT] = imx_clk_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0);
+
+ clks[IMX8MQ_GPT_3M_CLK] = imx_clk_fixed_factor("gpt_3m", "osc_25m", 1, 8);
+ clks[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_fixed_factor("dram_alt_root", "dram_alt", 1, 4);
+
+ for (i = 0; i < IMX8MQ_CLK_END; i++)
+ if (IS_ERR(clks[i]))
+ pr_err("i.MX8mq clk %u register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+
+ clk_data.clks = clks;
+ clk_data.clk_num = ARRAY_SIZE(clks);
+
+ err = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+ WARN_ON(err);
+
+ return err;
+}
+
+static const struct of_device_id imx8mq_clk_of_match[] = {
+ { .compatible = "fsl,imx8mq-ccm" },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8mq_clk_of_match);
+
+
+static struct platform_driver imx8mq_clk_driver = {
+ .probe = imx8mq_clocks_probe,
+ .driver = {
+ .name = "imx8mq-ccm",
+ .of_match_table = of_match_ptr(imx8mq_clk_of_match),
+ },
+};
+module_platform_driver(imx8mq_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
new file mode 100644
index 000000000000..dcae1dd85e43
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-scu.h"
+#include "clk-imx8qxp-lpcg.h"
+
+#include <dt-bindings/clock/imx8qxp-clock.h>
+
+/*
+ * struct imx8qxp_lpcg_data - Description of one LPCG clock
+ * @id: clock ID
+ * @name: clock name
+ * @parent: parent clock name
+ * @flags: common clock flags
+ * @offset: offset of this LPCG clock
+ * @bit_idx: bit index of this LPCG clock
+ * @hw_gate: whether supports HW autogate
+ *
+ * This structure describes one LPCG clock
+ */
+struct imx8qxp_lpcg_data {
+ int id;
+ char *name;
+ char *parent;
+ unsigned long flags;
+ u32 offset;
+ u8 bit_idx;
+ bool hw_gate;
+};
+
+/*
+ * struct imx8qxp_ss_lpcg - Description of one subsystem LPCG clocks
+ * @lpcg: LPCG clocks array of one subsystem
+ * @num_lpcg: the number of LPCG clocks
+ * @num_max: the maximum number of LPCG clocks
+ *
+ * This structure describes each subsystem LPCG clocks information
+ * which then will be used to create respective LPCGs clocks
+ */
+struct imx8qxp_ss_lpcg {
+ const struct imx8qxp_lpcg_data *lpcg;
+ u8 num_lpcg;
+ u8 num_max;
+};
+
+static const struct imx8qxp_lpcg_data imx8qxp_lpcg_adma[] = {
+ { IMX8QXP_ADMA_LPCG_UART0_IPG_CLK, "uart0_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPUART_0_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_UART0_BAUD_CLK, "uart0_lpcg_baud_clk", "uart0_clk", 0, ADMA_LPUART_0_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_UART1_IPG_CLK, "uart1_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPUART_1_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_UART1_BAUD_CLK, "uart1_lpcg_baud_clk", "uart1_clk", 0, ADMA_LPUART_1_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_UART2_IPG_CLK, "uart2_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPUART_2_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_UART2_BAUD_CLK, "uart2_lpcg_baud_clk", "uart2_clk", 0, ADMA_LPUART_2_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_UART3_IPG_CLK, "uart3_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPUART_3_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_UART3_BAUD_CLK, "uart3_lpcg_baud_clk", "uart3_clk", 0, ADMA_LPUART_3_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C0_IPG_CLK, "i2c0_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPI2C_0_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C0_CLK, "i2c0_lpcg_clk", "i2c0_clk", 0, ADMA_LPI2C_0_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C1_IPG_CLK, "i2c1_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPI2C_1_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C1_CLK, "i2c1_lpcg_clk", "i2c1_clk", 0, ADMA_LPI2C_1_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C2_IPG_CLK, "i2c2_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPI2C_2_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C2_CLK, "i2c2_lpcg_clk", "i2c2_clk", 0, ADMA_LPI2C_2_LPCG, 0, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C3_IPG_CLK, "i2c3_lpcg_ipg_clk", "dma_ipg_clk_root", 0, ADMA_LPI2C_3_LPCG, 16, 0, },
+ { IMX8QXP_ADMA_LPCG_I2C3_CLK, "i2c3_lpcg_clk", "i2c3_clk", 0, ADMA_LPI2C_3_LPCG, 0, 0, },
+};
+
+static const struct imx8qxp_ss_lpcg imx8qxp_ss_adma = {
+ .lpcg = imx8qxp_lpcg_adma,
+ .num_lpcg = ARRAY_SIZE(imx8qxp_lpcg_adma),
+ .num_max = IMX8QXP_ADMA_LPCG_CLK_END,
+};
+
+static const struct imx8qxp_lpcg_data imx8qxp_lpcg_conn[] = {
+ { IMX8QXP_CONN_LPCG_SDHC0_PER_CLK, "sdhc0_lpcg_per_clk", "sdhc0_clk", 0, CONN_USDHC_0_LPCG, 0, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC0_IPG_CLK, "sdhc0_lpcg_ipg_clk", "conn_ipg_clk_root", 0, CONN_USDHC_0_LPCG, 16, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC0_HCLK, "sdhc0_lpcg_ahb_clk", "conn_axi_clk_root", 0, CONN_USDHC_0_LPCG, 20, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC1_PER_CLK, "sdhc1_lpcg_per_clk", "sdhc1_clk", 0, CONN_USDHC_1_LPCG, 0, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC1_IPG_CLK, "sdhc1_lpcg_ipg_clk", "conn_ipg_clk_root", 0, CONN_USDHC_1_LPCG, 16, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC1_HCLK, "sdhc1_lpcg_ahb_clk", "conn_axi_clk_root", 0, CONN_USDHC_1_LPCG, 20, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC2_PER_CLK, "sdhc2_lpcg_per_clk", "sdhc2_clk", 0, CONN_USDHC_2_LPCG, 0, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC2_IPG_CLK, "sdhc2_lpcg_ipg_clk", "conn_ipg_clk_root", 0, CONN_USDHC_2_LPCG, 16, 0, },
+ { IMX8QXP_CONN_LPCG_SDHC2_HCLK, "sdhc2_lpcg_ahb_clk", "conn_axi_clk_root", 0, CONN_USDHC_2_LPCG, 20, 0, },
+ { IMX8QXP_CONN_LPCG_ENET0_ROOT_CLK, "enet0_ipg_root_clk", "enet0_clk", 0, CONN_ENET_0_LPCG, 0, 0, },
+ { IMX8QXP_CONN_LPCG_ENET0_TX_CLK, "enet0_tx_clk", "enet0_clk", 0, CONN_ENET_0_LPCG, 4, 0, },
+ { IMX8QXP_CONN_LPCG_ENET0_AHB_CLK, "enet0_ahb_clk", "conn_axi_clk_root", 0, CONN_ENET_0_LPCG, 8, 0, },
+ { IMX8QXP_CONN_LPCG_ENET0_IPG_S_CLK, "enet0_ipg_s_clk", "conn_ipg_clk_root", 0, CONN_ENET_0_LPCG, 20, 0, },
+ { IMX8QXP_CONN_LPCG_ENET0_IPG_CLK, "enet0_ipg_clk", "enet0_ipg_s_clk", 0, CONN_ENET_0_LPCG, 16, 0, },
+ { IMX8QXP_CONN_LPCG_ENET1_ROOT_CLK, "enet1_ipg_root_clk", "enet1_clk", 0, CONN_ENET_1_LPCG, 0, 0, },
+ { IMX8QXP_CONN_LPCG_ENET1_TX_CLK, "enet1_tx_clk", "enet1_clk", 0, CONN_ENET_1_LPCG, 4, 0, },
+ { IMX8QXP_CONN_LPCG_ENET1_AHB_CLK, "enet1_ahb_clk", "conn_axi_clk_root", 0, CONN_ENET_1_LPCG, 8, 0, },
+ { IMX8QXP_CONN_LPCG_ENET1_IPG_S_CLK, "enet1_ipg_s_clk", "conn_ipg_clk_root", 0, CONN_ENET_1_LPCG, 20, 0, },
+ { IMX8QXP_CONN_LPCG_ENET1_IPG_CLK, "enet1_ipg_clk", "enet0_ipg_s_clk", 0, CONN_ENET_1_LPCG, 16, 0, },
+};
+
+static const struct imx8qxp_ss_lpcg imx8qxp_ss_conn = {
+ .lpcg = imx8qxp_lpcg_conn,
+ .num_lpcg = ARRAY_SIZE(imx8qxp_lpcg_conn),
+ .num_max = IMX8QXP_CONN_LPCG_CLK_END,
+};
+
+static const struct imx8qxp_lpcg_data imx8qxp_lpcg_lsio[] = {
+ { IMX8QXP_LSIO_LPCG_PWM0_IPG_CLK, "pwm0_lpcg_ipg_clk", "pwm0_clk", 0, LSIO_PWM_0_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM0_IPG_HF_CLK, "pwm0_lpcg_ipg_hf_clk", "pwm0_clk", 0, LSIO_PWM_0_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM0_IPG_S_CLK, "pwm0_lpcg_ipg_s_clk", "pwm0_clk", 0, LSIO_PWM_0_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM0_IPG_SLV_CLK, "pwm0_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_0_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM0_IPG_MSTR_CLK, "pwm0_lpcg_ipg_mstr_clk", "pwm0_clk", 0, LSIO_PWM_0_LPCG, 24, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM1_IPG_CLK, "pwm1_lpcg_ipg_clk", "pwm1_clk", 0, LSIO_PWM_1_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM1_IPG_HF_CLK, "pwm1_lpcg_ipg_hf_clk", "pwm1_clk", 0, LSIO_PWM_1_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM1_IPG_S_CLK, "pwm1_lpcg_ipg_s_clk", "pwm1_clk", 0, LSIO_PWM_1_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM1_IPG_SLV_CLK, "pwm1_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_1_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM1_IPG_MSTR_CLK, "pwm1_lpcg_ipg_mstr_clk", "pwm1_clk", 0, LSIO_PWM_1_LPCG, 24, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM2_IPG_CLK, "pwm2_lpcg_ipg_clk", "pwm2_clk", 0, LSIO_PWM_2_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM2_IPG_HF_CLK, "pwm2_lpcg_ipg_hf_clk", "pwm2_clk", 0, LSIO_PWM_2_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM2_IPG_S_CLK, "pwm2_lpcg_ipg_s_clk", "pwm2_clk", 0, LSIO_PWM_2_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM2_IPG_SLV_CLK, "pwm2_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_2_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM2_IPG_MSTR_CLK, "pwm2_lpcg_ipg_mstr_clk", "pwm2_clk", 0, LSIO_PWM_2_LPCG, 24, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM3_IPG_CLK, "pwm3_lpcg_ipg_clk", "pwm3_clk", 0, LSIO_PWM_3_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM3_IPG_HF_CLK, "pwm3_lpcg_ipg_hf_clk", "pwm3_clk", 0, LSIO_PWM_3_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM3_IPG_S_CLK, "pwm3_lpcg_ipg_s_clk", "pwm3_clk", 0, LSIO_PWM_3_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM3_IPG_SLV_CLK, "pwm3_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_3_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM3_IPG_MSTR_CLK, "pwm3_lpcg_ipg_mstr_clk", "pwm3_clk", 0, LSIO_PWM_3_LPCG, 24, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM4_IPG_CLK, "pwm4_lpcg_ipg_clk", "pwm4_clk", 0, LSIO_PWM_4_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM4_IPG_HF_CLK, "pwm4_lpcg_ipg_hf_clk", "pwm4_clk", 0, LSIO_PWM_4_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM4_IPG_S_CLK, "pwm4_lpcg_ipg_s_clk", "pwm4_clk", 0, LSIO_PWM_4_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM4_IPG_SLV_CLK, "pwm4_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_4_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM4_IPG_MSTR_CLK, "pwm4_lpcg_ipg_mstr_clk", "pwm4_clk", 0, LSIO_PWM_4_LPCG, 24, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM5_IPG_CLK, "pwm5_lpcg_ipg_clk", "pwm5_clk", 0, LSIO_PWM_5_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM5_IPG_HF_CLK, "pwm5_lpcg_ipg_hf_clk", "pwm5_clk", 0, LSIO_PWM_5_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM5_IPG_S_CLK, "pwm5_lpcg_ipg_s_clk", "pwm5_clk", 0, LSIO_PWM_5_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM5_IPG_SLV_CLK, "pwm5_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_5_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM5_IPG_MSTR_CLK, "pwm5_lpcg_ipg_mstr_clk", "pwm5_clk", 0, LSIO_PWM_5_LPCG, 24, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM6_IPG_CLK, "pwm6_lpcg_ipg_clk", "pwm6_clk", 0, LSIO_PWM_6_LPCG, 0, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM6_IPG_HF_CLK, "pwm6_lpcg_ipg_hf_clk", "pwm6_clk", 0, LSIO_PWM_6_LPCG, 4, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM6_IPG_S_CLK, "pwm6_lpcg_ipg_s_clk", "pwm6_clk", 0, LSIO_PWM_6_LPCG, 16, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM6_IPG_SLV_CLK, "pwm6_lpcg_ipg_slv_clk", "lsio_bus_clk_root", 0, LSIO_PWM_6_LPCG, 20, 0, },
+ { IMX8QXP_LSIO_LPCG_PWM6_IPG_MSTR_CLK, "pwm6_lpcg_ipg_mstr_clk", "pwm6_clk", 0, LSIO_PWM_6_LPCG, 24, 0, },
+};
+
+static const struct imx8qxp_ss_lpcg imx8qxp_ss_lsio = {
+ .lpcg = imx8qxp_lpcg_lsio,
+ .num_lpcg = ARRAY_SIZE(imx8qxp_lpcg_lsio),
+ .num_max = IMX8QXP_LSIO_LPCG_CLK_END,
+};
+
+static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct clk_hw_onecell_data *clk_data;
+ const struct imx8qxp_ss_lpcg *ss_lpcg;
+ const struct imx8qxp_lpcg_data *lpcg;
+ struct resource *res;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int i;
+
+ ss_lpcg = of_device_get_match_data(dev);
+ if (!ss_lpcg)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws,
+ ss_lpcg->num_max), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = ss_lpcg->num_max;
+ clks = clk_data->hws;
+
+ for (i = 0; i < ss_lpcg->num_lpcg; i++) {
+ lpcg = ss_lpcg->lpcg + i;
+ clks[lpcg->id] = imx_clk_lpcg_scu(lpcg->name, lpcg->parent,
+ lpcg->flags, base + lpcg->offset,
+ lpcg->bit_idx, lpcg->hw_gate);
+ }
+
+ for (i = 0; i < clk_data->num; i++) {
+ if (IS_ERR(clks[i]))
+ pr_warn("i.MX clk %u: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ }
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+}
+
+static const struct of_device_id imx8qxp_lpcg_match[] = {
+ { .compatible = "fsl,imx8qxp-lpcg-adma", &imx8qxp_ss_adma, },
+ { .compatible = "fsl,imx8qxp-lpcg-conn", &imx8qxp_ss_conn, },
+ { .compatible = "fsl,imx8qxp-lpcg-lsio", &imx8qxp_ss_lsio, },
+ { /* sentinel */ }
+};
+
+static struct platform_driver imx8qxp_lpcg_clk_driver = {
+ .driver = {
+ .name = "imx8qxp-lpcg-clk",
+ .of_match_table = imx8qxp_lpcg_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8qxp_lpcg_clk_probe,
+};
+
+builtin_platform_driver(imx8qxp_lpcg_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.h b/drivers/clk/imx/clk-imx8qxp-lpcg.h
new file mode 100644
index 000000000000..2a37ce57c500
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#ifndef _IMX8QXP_LPCG_H
+#define _IMX8QXP_LPCG_H
+
+/*LSIO SS */
+#define LSIO_PWM_0_LPCG 0x00000
+#define LSIO_PWM_1_LPCG 0x10000
+#define LSIO_PWM_2_LPCG 0x20000
+#define LSIO_PWM_3_LPCG 0x30000
+#define LSIO_PWM_4_LPCG 0x40000
+#define LSIO_PWM_5_LPCG 0x50000
+#define LSIO_PWM_6_LPCG 0x60000
+#define LSIO_PWM_7_LPCG 0x70000
+#define LSIO_GPIO_0_LPCG 0x80000
+#define LSIO_GPIO_1_LPCG 0x90000
+#define LSIO_GPIO_2_LPCG 0xa0000
+#define LSIO_GPIO_3_LPCG 0xb0000
+#define LSIO_GPIO_4_LPCG 0xc0000
+#define LSIO_GPIO_5_LPCG 0xd0000
+#define LSIO_GPIO_6_LPCG 0xe0000
+#define LSIO_GPIO_7_LPCG 0xf0000
+#define LSIO_FSPI_0_LPCG 0x120000
+#define LSIO_FSPI_1_LPCG 0x130000
+#define LSIO_GPT_0_LPCG 0x140000
+#define LSIO_GPT_1_LPCG 0x150000
+#define LSIO_GPT_2_LPCG 0x160000
+#define LSIO_GPT_3_LPCG 0x170000
+#define LSIO_GPT_4_LPCG 0x180000
+#define LSIO_OCRAM_LPCG 0x190000
+#define LSIO_KPP_LPCG 0x1a0000
+#define LSIO_ROMCP_LPCG 0x100000
+
+/* Connectivity SS */
+#define CONN_USDHC_0_LPCG 0x00000
+#define CONN_USDHC_1_LPCG 0x10000
+#define CONN_USDHC_2_LPCG 0x20000
+#define CONN_ENET_0_LPCG 0x30000
+#define CONN_ENET_1_LPCG 0x40000
+#define CONN_DTCP_LPCG 0x50000
+#define CONN_MLB_LPCG 0x60000
+#define CONN_USB_2_LPCG 0x70000
+#define CONN_USB_3_LPCG 0x80000
+#define CONN_NAND_LPCG 0x90000
+#define CONN_EDMA_LPCG 0xa0000
+
+/* ADMA SS */
+#define ADMA_ASRC_0_LPCG 0x400000
+#define ADMA_ESAI_0_LPCG 0x410000
+#define ADMA_SPDIF_0_LPCG 0x420000
+#define ADMA_SAI_0_LPCG 0x440000
+#define ADMA_SAI_1_LPCG 0x450000
+#define ADMA_SAI_2_LPCG 0x460000
+#define ADMA_SAI_3_LPCG 0x470000
+#define ADMA_GPT_5_LPCG 0x4b0000
+#define ADMA_GPT_6_LPCG 0x4c0000
+#define ADMA_GPT_7_LPCG 0x4d0000
+#define ADMA_GPT_8_LPCG 0x4e0000
+#define ADMA_GPT_9_LPCG 0x4f0000
+#define ADMA_GPT_10_LPCG 0x500000
+#define ADMA_HIFI_LPCG 0x580000
+#define ADMA_OCRAM_LPCG 0x590000
+#define ADMA_EDMA_0_LPCG 0x5f0000
+#define ADMA_ASRC_1_LPCG 0xc00000
+#define ADMA_SAI_4_LPCG 0xc20000
+#define ADMA_SAI_5_LPCG 0xc30000
+#define ADMA_AMIX_LPCG 0xc40000
+#define ADMA_MQS_LPCG 0xc50000
+#define ADMA_ACM_LPCG 0xc60000
+#define ADMA_REC_CLK0_LPCG 0xd00000
+#define ADMA_REC_CLK1_LPCG 0xd10000
+#define ADMA_PLL_CLK0_LPCG 0xd20000
+#define ADMA_PLL_CLK1_LPCG 0xd30000
+#define ADMA_MCLKOUT0_LPCG 0xd50000
+#define ADMA_MCLKOUT1_LPCG 0xd60000
+#define ADMA_EDMA_1_LPCG 0xdf0000
+#define ADMA_LPSPI_0_LPCG 0x1400000
+#define ADMA_LPSPI_1_LPCG 0x1410000
+#define ADMA_LPSPI_2_LPCG 0x1420000
+#define ADMA_LPSPI_3_LPCG 0x1430000
+#define ADMA_LPUART_0_LPCG 0x1460000
+#define ADMA_LPUART_1_LPCG 0x1470000
+#define ADMA_LPUART_2_LPCG 0x1480000
+#define ADMA_LPUART_3_LPCG 0x1490000
+#define ADMA_LCD_LPCG 0x1580000
+#define ADMA_PWM_LPCG 0x1590000
+#define ADMA_LPI2C_0_LPCG 0x1c00000
+#define ADMA_LPI2C_1_LPCG 0x1c10000
+#define ADMA_LPI2C_2_LPCG 0x1c20000
+#define ADMA_LPI2C_3_LPCG 0x1c30000
+#define ADMA_ADC_0_LPCG 0x1c80000
+#define ADMA_FTM_0_LPCG 0x1ca0000
+#define ADMA_FTM_1_LPCG 0x1cb0000
+#define ADMA_FLEXCAN_0_LPCG 0x1cd0000
+#define ADMA_FLEXCAN_1_LPCG 0x1ce0000
+#define ADMA_FLEXCAN_2_LPCG 0x1cf0000
+
+#endif /* _IMX8QXP_LPCG_H */
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
new file mode 100644
index 000000000000..33c9396b08f1
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "clk-scu.h"
+
+#include <dt-bindings/clock/imx8qxp-clock.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+static int imx8qxp_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *ccm_node = pdev->dev.of_node;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ int ret, i;
+
+ ret = imx_clk_scu_init();
+ if (ret)
+ return ret;
+
+ clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws,
+ IMX8QXP_SCU_CLK_END), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8QXP_SCU_CLK_END;
+ clks = clk_data->hws;
+
+ /* Fixed clocks */
+ clks[IMX8QXP_CLK_DUMMY] = clk_hw_register_fixed_rate(NULL, "dummy", NULL, 0, 0);
+ clks[IMX8QXP_ADMA_IPG_CLK_ROOT] = clk_hw_register_fixed_rate(NULL, "dma_ipg_clk_root", NULL, 0, 120000000);
+ clks[IMX8QXP_CONN_AXI_CLK_ROOT] = clk_hw_register_fixed_rate(NULL, "conn_axi_clk_root", NULL, 0, 333333333);
+ clks[IMX8QXP_CONN_AHB_CLK_ROOT] = clk_hw_register_fixed_rate(NULL, "conn_ahb_clk_root", NULL, 0, 166666666);
+ clks[IMX8QXP_CONN_IPG_CLK_ROOT] = clk_hw_register_fixed_rate(NULL, "conn_ipg_clk_root", NULL, 0, 83333333);
+ clks[IMX8QXP_DC_AXI_EXT_CLK] = clk_hw_register_fixed_rate(NULL, "dc_axi_ext_clk_root", NULL, 0, 800000000);
+ clks[IMX8QXP_DC_AXI_INT_CLK] = clk_hw_register_fixed_rate(NULL, "dc_axi_int_clk_root", NULL, 0, 400000000);
+ clks[IMX8QXP_DC_CFG_CLK] = clk_hw_register_fixed_rate(NULL, "dc_cfg_clk_root", NULL, 0, 100000000);
+ clks[IMX8QXP_MIPI_IPG_CLK] = clk_hw_register_fixed_rate(NULL, "mipi_ipg_clk_root", NULL, 0, 120000000);
+ clks[IMX8QXP_IMG_AXI_CLK] = clk_hw_register_fixed_rate(NULL, "img_axi_clk_root", NULL, 0, 400000000);
+ clks[IMX8QXP_IMG_IPG_CLK] = clk_hw_register_fixed_rate(NULL, "img_ipg_clk_root", NULL, 0, 200000000);
+ clks[IMX8QXP_IMG_PXL_CLK] = clk_hw_register_fixed_rate(NULL, "img_pxl_clk_root", NULL, 0, 600000000);
+ clks[IMX8QXP_HSIO_AXI_CLK] = clk_hw_register_fixed_rate(NULL, "hsio_axi_clk_root", NULL, 0, 400000000);
+ clks[IMX8QXP_HSIO_PER_CLK] = clk_hw_register_fixed_rate(NULL, "hsio_per_clk_root", NULL, 0, 133333333);
+ clks[IMX8QXP_LSIO_MEM_CLK] = clk_hw_register_fixed_rate(NULL, "lsio_mem_clk_root", NULL, 0, 200000000);
+ clks[IMX8QXP_LSIO_BUS_CLK] = clk_hw_register_fixed_rate(NULL, "lsio_bus_clk_root", NULL, 0, 100000000);
+
+ /* ARM core */
+ clks[IMX8QXP_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU);
+
+ /* LSIO SS */
+ clks[IMX8QXP_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER);
+
+ /* ADMA SS */
+ clks[IMX8QXP_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+
+ /* Connectivity */
+ clks[IMX8QXP_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS);
+ clks[IMX8QXP_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0);
+ clks[IMX8QXP_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS);
+ clks[IMX8QXP_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0);
+ clks[IMX8QXP_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS);
+ clks[IMX8QXP_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS);
+ clks[IMX8QXP_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
+
+ /* Display controller SS */
+ clks[IMX8QXP_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
+ clks[IMX8QXP_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
+
+ /* MIPI-LVDS SS */
+ clks[IMX8QXP_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
+ clks[IMX8QXP_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
+
+ /* MIPI CSI SS */
+ clks[IMX8QXP_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC);
+ clks[IMX8QXP_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER);
+
+ /* GPU SS */
+ clks[IMX8QXP_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER);
+ clks[IMX8QXP_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC);
+
+ for (i = 0; i < clk_data->num; i++) {
+ if (IS_ERR(clks[i]))
+ pr_warn("i.MX clk %u: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+ }
+
+ return of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
+}
+
+static const struct of_device_id imx8qxp_match[] = {
+ { .compatible = "fsl,imx8qxp-clk", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver imx8qxp_clk_driver = {
+ .driver = {
+ .name = "imx8qxp-clk",
+ .of_match_table = imx8qxp_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = imx8qxp_clk_probe,
+};
+builtin_platform_driver(imx8qxp_clk_driver);
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
new file mode 100644
index 000000000000..a73a799fb777
--- /dev/null
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "clk-scu.h"
+
+static DEFINE_SPINLOCK(imx_lpcg_scu_lock);
+
+#define CLK_GATE_SCU_LPCG_MASK 0x3
+#define CLK_GATE_SCU_LPCG_HW_SEL BIT(0)
+#define CLK_GATE_SCU_LPCG_SW_SEL BIT(1)
+
+/*
+ * struct clk_lpcg_scu - Description of LPCG clock
+ *
+ * @hw: clk_hw of this LPCG
+ * @reg: register of this LPCG clock
+ * @bit_idx: bit index of this LPCG clock
+ * @hw_gate: HW auto gate enable
+ *
+ * This structure describes one LPCG clock
+ */
+struct clk_lpcg_scu {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 bit_idx;
+ bool hw_gate;
+};
+
+#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
+
+static int clk_lpcg_scu_enable(struct clk_hw *hw)
+{
+ struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
+ unsigned long flags;
+ u32 reg, val;
+
+ spin_lock_irqsave(&imx_lpcg_scu_lock, flags);
+
+ reg = readl_relaxed(clk->reg);
+ reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
+
+ val = CLK_GATE_SCU_LPCG_SW_SEL;
+ if (clk->hw_gate)
+ val |= CLK_GATE_SCU_LPCG_HW_SEL;
+
+ reg |= val << clk->bit_idx;
+ writel(reg, clk->reg);
+
+ spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
+
+ return 0;
+}
+
+static void clk_lpcg_scu_disable(struct clk_hw *hw)
+{
+ struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&imx_lpcg_scu_lock, flags);
+
+ reg = readl_relaxed(clk->reg);
+ reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx);
+ writel(reg, clk->reg);
+
+ spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags);
+}
+
+static const struct clk_ops clk_lpcg_scu_ops = {
+ .enable = clk_lpcg_scu_enable,
+ .disable = clk_lpcg_scu_disable,
+};
+
+struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 bit_idx, bool hw_gate)
+{
+ struct clk_lpcg_scu *clk;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return ERR_PTR(-ENOMEM);
+
+ clk->reg = reg;
+ clk->bit_idx = bit_idx;
+ clk->hw_gate = hw_gate;
+
+ init.name = name;
+ init.ops = &clk_lpcg_scu_ops;
+ init.flags = CLK_SET_RATE_PARENT | flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
+
+ clk->hw.init = &init;
+
+ hw = &clk->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(clk);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
new file mode 100644
index 000000000000..7e9134b205ab
--- /dev/null
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+/**
+ * struct clk_pfdv2 - IMX PFD clock
+ * @clk_hw: clock source
+ * @reg: PFD register address
+ * @gate_bit: Gate bit offset
+ * @vld_bit: Valid bit offset
+ * @frac_off: PLL Fractional Divider offset
+ */
+
+struct clk_pfdv2 {
+ struct clk_hw hw;
+ void __iomem *reg;
+ u8 gate_bit;
+ u8 vld_bit;
+ u8 frac_off;
+};
+
+#define to_clk_pfdv2(_hw) container_of(_hw, struct clk_pfdv2, hw)
+
+#define CLK_PFDV2_FRAC_MASK 0x3f
+
+#define LOCK_TIMEOUT_US USEC_PER_MSEC
+
+static DEFINE_SPINLOCK(pfd_lock);
+
+static int clk_pfdv2_wait(struct clk_pfdv2 *pfd)
+{
+ u32 val;
+
+ return readl_poll_timeout(pfd->reg, val, val & pfd->vld_bit,
+ 0, LOCK_TIMEOUT_US);
+}
+
+static int clk_pfdv2_enable(struct clk_hw *hw)
+{
+ struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pfd_lock, flags);
+ val = readl_relaxed(pfd->reg);
+ val &= ~pfd->gate_bit;
+ writel_relaxed(val, pfd->reg);
+ spin_unlock_irqrestore(&pfd_lock, flags);
+
+ return clk_pfdv2_wait(pfd);
+}
+
+static void clk_pfdv2_disable(struct clk_hw *hw)
+{
+ struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&pfd_lock, flags);
+ val = readl_relaxed(pfd->reg);
+ val |= pfd->gate_bit;
+ writel_relaxed(val, pfd->reg);
+ spin_unlock_irqrestore(&pfd_lock, flags);
+}
+
+static unsigned long clk_pfdv2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
+ u64 tmp = parent_rate;
+ u8 frac;
+
+ frac = (readl_relaxed(pfd->reg) >> pfd->frac_off)
+ & CLK_PFDV2_FRAC_MASK;
+
+ if (!frac) {
+ pr_debug("clk_pfdv2: %s invalid pfd frac value 0\n",
+ clk_hw_get_name(hw));
+ return 0;
+ }
+
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static long clk_pfdv2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u64 tmp = *prate;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+
+ tmp = *prate;
+ tmp *= 18;
+ do_div(tmp, frac);
+
+ return tmp;
+}
+
+static int clk_pfdv2_is_enabled(struct clk_hw *hw)
+{
+ struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
+
+ if (readl_relaxed(pfd->reg) & pfd->gate_bit)
+ return 0;
+
+ return 1;
+}
+
+static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pfdv2 *pfd = to_clk_pfdv2(hw);
+ unsigned long flags;
+ u64 tmp = parent_rate;
+ u32 val;
+ u8 frac;
+
+ tmp = tmp * 18 + rate / 2;
+ do_div(tmp, rate);
+ frac = tmp;
+ if (frac < 12)
+ frac = 12;
+ else if (frac > 35)
+ frac = 35;
+
+ spin_lock_irqsave(&pfd_lock, flags);
+ val = readl_relaxed(pfd->reg);
+ val &= ~(CLK_PFDV2_FRAC_MASK << pfd->frac_off);
+ val |= frac << pfd->frac_off;
+ writel_relaxed(val, pfd->reg);
+ spin_unlock_irqrestore(&pfd_lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pfdv2_ops = {
+ .enable = clk_pfdv2_enable,
+ .disable = clk_pfdv2_disable,
+ .recalc_rate = clk_pfdv2_recalc_rate,
+ .round_rate = clk_pfdv2_round_rate,
+ .set_rate = clk_pfdv2_set_rate,
+ .is_enabled = clk_pfdv2_is_enabled,
+};
+
+struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx)
+{
+ struct clk_init_data init;
+ struct clk_pfdv2 *pfd;
+ struct clk_hw *hw;
+ int ret;
+
+ WARN_ON(idx > 3);
+
+ pfd = kzalloc(sizeof(*pfd), GFP_KERNEL);
+ if (!pfd)
+ return ERR_PTR(-ENOMEM);
+
+ pfd->reg = reg;
+ pfd->gate_bit = 1 << ((idx + 1) * 8 - 1);
+ pfd->vld_bit = pfd->gate_bit - 1;
+ pfd->frac_off = idx * 8;
+
+ init.name = name;
+ init.ops = &clk_pfdv2_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pfd->hw.init = &init;
+
+ hw = &pfd->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pfd);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
new file mode 100644
index 000000000000..d38bc9f87c1d
--- /dev/null
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ * Author: Dong Aisheng <aisheng.dong@nxp.com>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+/* PLL Control Status Register (xPLLCSR) */
+#define PLL_CSR_OFFSET 0x0
+#define PLL_VLD BIT(24)
+#define PLL_EN BIT(0)
+
+/* PLL Configuration Register (xPLLCFG) */
+#define PLL_CFG_OFFSET 0x08
+#define BP_PLL_MULT 16
+#define BM_PLL_MULT (0x7f << 16)
+
+/* PLL Numerator Register (xPLLNUM) */
+#define PLL_NUM_OFFSET 0x10
+
+/* PLL Denominator Register (xPLLDENOM) */
+#define PLL_DENOM_OFFSET 0x14
+
+struct clk_pllv4 {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+/* Valid PLL MULT Table */
+static const int pllv4_mult_table[] = {33, 27, 22, 20, 17, 16};
+
+#define to_clk_pllv4(__hw) container_of(__hw, struct clk_pllv4, hw)
+
+#define LOCK_TIMEOUT_US USEC_PER_MSEC
+
+static inline int clk_pllv4_wait_lock(struct clk_pllv4 *pll)
+{
+ u32 csr;
+
+ return readl_poll_timeout(pll->base + PLL_CSR_OFFSET,
+ csr, csr & PLL_VLD, 0, LOCK_TIMEOUT_US);
+}
+
+static int clk_pllv4_is_enabled(struct clk_hw *hw)
+{
+ struct clk_pllv4 *pll = to_clk_pllv4(hw);
+
+ if (readl_relaxed(pll->base) & PLL_EN)
+ return 1;
+
+ return 0;
+}
+
+static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_pllv4 *pll = to_clk_pllv4(hw);
+ u32 div;
+
+ div = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ div &= BM_PLL_MULT;
+ div >>= BP_PLL_MULT;
+
+ return parent_rate * div;
+}
+
+static long clk_pllv4_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ unsigned long round_rate, i;
+
+ for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
+ round_rate = parent_rate * pllv4_mult_table[i];
+ if (rate >= round_rate)
+ return round_rate;
+ }
+
+ return round_rate;
+}
+
+static bool clk_pllv4_is_valid_mult(unsigned int mult)
+{
+ int i;
+
+ /* check if mult is in valid MULT table */
+ for (i = 0; i < ARRAY_SIZE(pllv4_mult_table); i++) {
+ if (pllv4_mult_table[i] == mult)
+ return true;
+ }
+
+ return false;
+}
+
+static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_pllv4 *pll = to_clk_pllv4(hw);
+ u32 val, mult;
+
+ mult = rate / parent_rate;
+
+ if (!clk_pllv4_is_valid_mult(mult))
+ return -EINVAL;
+
+ val = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ val &= ~BM_PLL_MULT;
+ val |= mult << BP_PLL_MULT;
+ writel_relaxed(val, pll->base + PLL_CFG_OFFSET);
+
+ return 0;
+}
+
+static int clk_pllv4_enable(struct clk_hw *hw)
+{
+ u32 val;
+ struct clk_pllv4 *pll = to_clk_pllv4(hw);
+
+ val = readl_relaxed(pll->base);
+ val |= PLL_EN;
+ writel_relaxed(val, pll->base);
+
+ return clk_pllv4_wait_lock(pll);
+}
+
+static void clk_pllv4_disable(struct clk_hw *hw)
+{
+ u32 val;
+ struct clk_pllv4 *pll = to_clk_pllv4(hw);
+
+ val = readl_relaxed(pll->base);
+ val &= ~PLL_EN;
+ writel_relaxed(val, pll->base);
+}
+
+static const struct clk_ops clk_pllv4_ops = {
+ .recalc_rate = clk_pllv4_recalc_rate,
+ .round_rate = clk_pllv4_round_rate,
+ .set_rate = clk_pllv4_set_rate,
+ .enable = clk_pllv4_enable,
+ .disable = clk_pllv4_disable,
+ .is_enabled = clk_pllv4_is_enabled,
+};
+
+struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+ void __iomem *base)
+{
+ struct clk_pllv4 *pll;
+ struct clk_hw *hw;
+ struct clk_init_data init;
+ int ret;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ pll->base = base;
+
+ init.name = name;
+ init.ops = &clk_pllv4_ops;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = CLK_SET_RATE_GATE;
+
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pll);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-sccg-pll.c b/drivers/clk/imx/clk-sccg-pll.c
new file mode 100644
index 000000000000..ee7752bace89
--- /dev/null
+++ b/drivers/clk/imx/clk-sccg-pll.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2018 NXP.
+ *
+ * This driver supports the SCCG plls found in the imx8m SOCs
+ *
+ * Documentation for this SCCG pll can be found at:
+ * https://www.nxp.com/docs/en/reference-manual/IMX8MDQLQRM.pdf#page=834
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <linux/bitfield.h>
+
+#include "clk.h"
+
+/* PLL CFGs */
+#define PLL_CFG0 0x0
+#define PLL_CFG1 0x4
+#define PLL_CFG2 0x8
+
+#define PLL_DIVF1_MASK GENMASK(18, 13)
+#define PLL_DIVF2_MASK GENMASK(12, 7)
+#define PLL_DIVR1_MASK GENMASK(27, 25)
+#define PLL_DIVR2_MASK GENMASK(24, 19)
+#define PLL_REF_MASK GENMASK(2, 0)
+
+#define PLL_LOCK_MASK BIT(31)
+#define PLL_PD_MASK BIT(7)
+
+#define OSC_25M 25000000
+#define OSC_27M 27000000
+
+#define PLL_SCCG_LOCK_TIMEOUT 70
+
+struct clk_sccg_pll {
+ struct clk_hw hw;
+ void __iomem *base;
+};
+
+#define to_clk_sccg_pll(_hw) container_of(_hw, struct clk_sccg_pll, hw)
+
+static int clk_pll_wait_lock(struct clk_sccg_pll *pll)
+{
+ u32 val;
+
+ return readl_poll_timeout(pll->base, val, val & PLL_LOCK_MASK, 0,
+ PLL_SCCG_LOCK_TIMEOUT);
+}
+
+static int clk_pll1_is_prepared(struct clk_hw *hw)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ return (val & PLL_PD_MASK) ? 0 : 1;
+}
+
+static unsigned long clk_pll1_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val, divf;
+
+ val = readl_relaxed(pll->base + PLL_CFG2);
+ divf = FIELD_GET(PLL_DIVF1_MASK, val);
+
+ return parent_rate * 2 * (divf + 1);
+}
+
+static long clk_pll1_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ unsigned long parent_rate = *prate;
+ u32 div;
+
+ if (!parent_rate)
+ return 0;
+
+ div = rate / (parent_rate * 2);
+
+ return parent_rate * div * 2;
+}
+
+static int clk_pll1_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val;
+ u32 divf;
+
+ if (!parent_rate)
+ return -EINVAL;
+
+ divf = rate / (parent_rate * 2);
+
+ val = readl_relaxed(pll->base + PLL_CFG2);
+ val &= ~PLL_DIVF1_MASK;
+ val |= FIELD_PREP(PLL_DIVF1_MASK, divf - 1);
+ writel_relaxed(val, pll->base + PLL_CFG2);
+
+ return clk_pll_wait_lock(pll);
+}
+
+static int clk_pll1_prepare(struct clk_hw *hw)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val &= ~PLL_PD_MASK;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+
+ return clk_pll_wait_lock(pll);
+}
+
+static void clk_pll1_unprepare(struct clk_hw *hw)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ val |= PLL_PD_MASK;
+ writel_relaxed(val, pll->base + PLL_CFG0);
+
+}
+
+static unsigned long clk_pll2_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+ u32 val, ref, divr1, divf1, divr2, divf2;
+ u64 temp64;
+
+ val = readl_relaxed(pll->base + PLL_CFG0);
+ switch (FIELD_GET(PLL_REF_MASK, val)) {
+ case 0:
+ ref = OSC_25M;
+ break;
+ case 1:
+ ref = OSC_27M;
+ break;
+ default:
+ ref = OSC_25M;
+ break;
+ }
+
+ val = readl_relaxed(pll->base + PLL_CFG2);
+ divr1 = FIELD_GET(PLL_DIVR1_MASK, val);
+ divr2 = FIELD_GET(PLL_DIVR2_MASK, val);
+ divf1 = FIELD_GET(PLL_DIVF1_MASK, val);
+ divf2 = FIELD_GET(PLL_DIVF2_MASK, val);
+
+ temp64 = ref * 2;
+ temp64 *= (divf1 + 1) * (divf2 + 1);
+
+ do_div(temp64, (divr1 + 1) * (divr2 + 1));
+
+ return temp64;
+}
+
+static long clk_pll2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ u32 div;
+ unsigned long parent_rate = *prate;
+
+ if (!parent_rate)
+ return 0;
+
+ div = rate / parent_rate;
+
+ return parent_rate * div;
+}
+
+static int clk_pll2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u32 val;
+ u32 divf;
+ struct clk_sccg_pll *pll = to_clk_sccg_pll(hw);
+
+ if (!parent_rate)
+ return -EINVAL;
+
+ divf = rate / parent_rate;
+
+ val = readl_relaxed(pll->base + PLL_CFG2);
+ val &= ~PLL_DIVF2_MASK;
+ val |= FIELD_PREP(PLL_DIVF2_MASK, divf - 1);
+ writel_relaxed(val, pll->base + PLL_CFG2);
+
+ return clk_pll_wait_lock(pll);
+}
+
+static const struct clk_ops clk_sccg_pll1_ops = {
+ .is_prepared = clk_pll1_is_prepared,
+ .recalc_rate = clk_pll1_recalc_rate,
+ .round_rate = clk_pll1_round_rate,
+ .set_rate = clk_pll1_set_rate,
+};
+
+static const struct clk_ops clk_sccg_pll2_ops = {
+ .prepare = clk_pll1_prepare,
+ .unprepare = clk_pll1_unprepare,
+ .recalc_rate = clk_pll2_recalc_rate,
+ .round_rate = clk_pll2_round_rate,
+ .set_rate = clk_pll2_set_rate,
+};
+
+struct clk *imx_clk_sccg_pll(const char *name,
+ const char *parent_name,
+ void __iomem *base,
+ enum imx_sccg_pll_type pll_type)
+{
+ struct clk_sccg_pll *pll;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
+
+ switch (pll_type) {
+ case SCCG_PLL1:
+ init.ops = &clk_sccg_pll1_ops;
+ break;
+ case SCCG_PLL2:
+ init.ops = &clk_sccg_pll2_ops;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = 0;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ pll->base = base;
+ pll->hw.init = &init;
+
+ hw = &pll->hw;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(pll);
+ return ERR_PTR(ret);
+ }
+
+ return hw->clk;
+}
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
new file mode 100644
index 000000000000..7ccf7edfe11c
--- /dev/null
+++ b/drivers/clk/imx/clk-scu.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include "clk-scu.h"
+
+static struct imx_sc_ipc *ccm_ipc_handle;
+
+/*
+ * struct clk_scu - Description of one SCU clock
+ * @hw: the common clk_hw
+ * @rsrc_id: resource ID of this SCU clock
+ * @clk_type: type of this clock resource
+ */
+struct clk_scu {
+ struct clk_hw hw;
+ u16 rsrc_id;
+ u8 clk_type;
+};
+
+/*
+ * struct imx_sc_msg_req_set_clock_rate - clock set rate protocol
+ * @hdr: SCU protocol header
+ * @rate: rate to set
+ * @resource: clock resource to set rate
+ * @clk: clk type of this resource
+ *
+ * This structure describes the SCU protocol of clock rate set
+ */
+struct imx_sc_msg_req_set_clock_rate {
+ struct imx_sc_rpc_msg hdr;
+ __le32 rate;
+ __le16 resource;
+ u8 clk;
+} __packed;
+
+struct req_get_clock_rate {
+ __le16 resource;
+ u8 clk;
+} __packed;
+
+struct resp_get_clock_rate {
+ __le32 rate;
+};
+
+/*
+ * struct imx_sc_msg_get_clock_rate - clock get rate protocol
+ * @hdr: SCU protocol header
+ * @req: get rate request protocol
+ * @resp: get rate response protocol
+ *
+ * This structure describes the SCU protocol of clock rate get
+ */
+struct imx_sc_msg_get_clock_rate {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct req_get_clock_rate req;
+ struct resp_get_clock_rate resp;
+ } data;
+};
+
+/*
+ * struct imx_sc_msg_req_clock_enable - clock gate protocol
+ * @hdr: SCU protocol header
+ * @resource: clock resource to gate
+ * @clk: clk type of this resource
+ * @enable: whether gate off the clock
+ * @autog: HW auto gate enable
+ *
+ * This structure describes the SCU protocol of clock gate
+ */
+struct imx_sc_msg_req_clock_enable {
+ struct imx_sc_rpc_msg hdr;
+ __le16 resource;
+ u8 clk;
+ u8 enable;
+ u8 autog;
+} __packed;
+
+static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
+{
+ return container_of(hw, struct clk_scu, hw);
+}
+
+int imx_clk_scu_init(void)
+{
+ return imx_scu_get_handle(&ccm_ipc_handle);
+}
+
+/*
+ * clk_scu_recalc_rate - Get clock rate for a SCU clock
+ * @hw: clock to get rate for
+ * @parent_rate: parent rate provided by common clock framework, not used
+ *
+ * Gets the current clock rate of a SCU clock. Returns the current
+ * clock rate, or zero in failure.
+ */
+static unsigned long clk_scu_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct imx_sc_msg_get_clock_rate msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ int ret;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_GET_CLOCK_RATE;
+ hdr->size = 2;
+
+ msg.data.req.resource = cpu_to_le16(clk->rsrc_id);
+ msg.data.req.clk = clk->clk_type;
+
+ ret = imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+ if (ret) {
+ pr_err("%s: failed to get clock rate %d\n",
+ clk_hw_get_name(hw), ret);
+ return 0;
+ }
+
+ return le32_to_cpu(msg.data.resp.rate);
+}
+
+/*
+ * clk_scu_round_rate - Round clock rate for a SCU clock
+ * @hw: clock to round rate for
+ * @rate: rate to round
+ * @parent_rate: parent rate provided by common clock framework, not used
+ *
+ * Returns the current clock rate, or zero in failure.
+ */
+static long clk_scu_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * Assume we support all the requested rate and let the SCU firmware
+ * to handle the left work
+ */
+ return rate;
+}
+
+/*
+ * clk_scu_set_rate - Set rate for a SCU clock
+ * @hw: clock to change rate for
+ * @rate: target rate for the clock
+ * @parent_rate: rate of the clock parent, not used for SCU clocks
+ *
+ * Sets a clock frequency for a SCU clock. Returns the SCU
+ * protocol status.
+ */
+static int clk_scu_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ struct imx_sc_msg_req_set_clock_rate msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_SET_CLOCK_RATE;
+ hdr->size = 3;
+
+ msg.rate = cpu_to_le32(rate);
+ msg.resource = cpu_to_le16(clk->rsrc_id);
+ msg.clk = clk->clk_type;
+
+ return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+}
+
+static int sc_pm_clock_enable(struct imx_sc_ipc *ipc, u16 resource,
+ u8 clk, bool enable, bool autog)
+{
+ struct imx_sc_msg_req_clock_enable msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_PM;
+ hdr->func = IMX_SC_PM_FUNC_CLOCK_ENABLE;
+ hdr->size = 3;
+
+ msg.resource = cpu_to_le16(resource);
+ msg.clk = clk;
+ msg.enable = enable;
+ msg.autog = autog;
+
+ return imx_scu_call_rpc(ccm_ipc_handle, &msg, true);
+}
+
+/*
+ * clk_scu_prepare - Enable a SCU clock
+ * @hw: clock to enable
+ *
+ * Enable the clock at the DSC slice level
+ */
+static int clk_scu_prepare(struct clk_hw *hw)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+
+ return sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
+ clk->clk_type, true, false);
+}
+
+/*
+ * clk_scu_unprepare - Disable a SCU clock
+ * @hw: clock to enable
+ *
+ * Disable the clock at the DSC slice level
+ */
+static void clk_scu_unprepare(struct clk_hw *hw)
+{
+ struct clk_scu *clk = to_clk_scu(hw);
+ int ret;
+
+ ret = sc_pm_clock_enable(ccm_ipc_handle, clk->rsrc_id,
+ clk->clk_type, false, false);
+ if (ret)
+ pr_warn("%s: clk unprepare failed %d\n", clk_hw_get_name(hw),
+ ret);
+}
+
+static const struct clk_ops clk_scu_ops = {
+ .recalc_rate = clk_scu_recalc_rate,
+ .round_rate = clk_scu_round_rate,
+ .set_rate = clk_scu_set_rate,
+ .prepare = clk_scu_prepare,
+ .unprepare = clk_scu_unprepare,
+};
+
+struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type)
+{
+ struct clk_init_data init;
+ struct clk_scu *clk;
+ struct clk_hw *hw;
+ int ret;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return ERR_PTR(-ENOMEM);
+
+ clk->rsrc_id = rsrc_id;
+ clk->clk_type = clk_type;
+
+ init.name = name;
+ init.ops = &clk_scu_ops;
+ init.num_parents = 0;
+ /*
+ * Note on MX8, the clocks are tightly coupled with power domain
+ * that once the power domain is off, the clock status may be
+ * lost. So we make it NOCACHE to let user to retrieve the real
+ * clock status from HW instead of using the possible invalid
+ * cached rate.
+ */
+ init.flags = CLK_GET_RATE_NOCACHE;
+ clk->hw.init = &init;
+
+ hw = &clk->hw;
+ ret = clk_hw_register(NULL, hw);
+ if (ret) {
+ kfree(clk);
+ hw = ERR_PTR(ret);
+ }
+
+ return hw;
+}
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
new file mode 100644
index 000000000000..52c1746ec988
--- /dev/null
+++ b/drivers/clk/imx/clk-scu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#ifndef __IMX_CLK_SCU_H
+#define __IMX_CLK_SCU_H
+
+#include <linux/firmware/imx/sci.h>
+
+int imx_clk_scu_init(void);
+struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id, u8 clk_type);
+
+struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 bit_idx, bool hw_gate);
+#endif
diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
index 9074e6974b6d..1efed86217f7 100644
--- a/drivers/clk/imx/clk.c
+++ b/drivers/clk/imx/clk.c
@@ -18,6 +18,16 @@ void __init imx_check_clocks(struct clk *clks[], unsigned int count)
i, PTR_ERR(clks[i]));
}
+void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ if (IS_ERR(clks[i]))
+ pr_err("i.MX clk %u: register failed with %ld\n",
+ i, PTR_ERR(clks[i]));
+}
+
static struct clk * __init imx_obtain_fixed_clock_from_dt(const char *name)
{
struct of_phandle_args phandle;
@@ -49,6 +59,18 @@ struct clk * __init imx_obtain_fixed_clock(
return clk;
}
+struct clk_hw * __init imx_obtain_fixed_clk_hw(struct device_node *np,
+ const char *name)
+{
+ struct clk *clk;
+
+ clk = of_clk_get_by_name(np, name);
+ if (IS_ERR(clk))
+ return ERR_PTR(-ENOENT);
+
+ return __clk_get_hw(clk);
+}
+
/*
* This fixups the register CCM_CSCMR1 write value.
* The write/read/divider values of the aclk_podf field
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 5895e2237b6c..028312de21b8 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -8,6 +8,7 @@
extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
+void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
void imx_register_uart_clocks(struct clk ** const clks[]);
extern void imx_cscmr1_fixup(u32 *val);
@@ -21,12 +22,24 @@ enum imx_pllv1_type {
IMX_PLLV1_IMX35,
};
+enum imx_sccg_pll_type {
+ SCCG_PLL1,
+ SCCG_PLL2,
+};
+
struct clk *imx_clk_pllv1(enum imx_pllv1_type type, const char *name,
const char *parent, void __iomem *base);
struct clk *imx_clk_pllv2(const char *name, const char *parent,
void __iomem *base);
+struct clk *imx_clk_frac_pll(const char *name, const char *parent_name,
+ void __iomem *base);
+
+struct clk *imx_clk_sccg_pll(const char *name, const char *parent_name,
+ void __iomem *base,
+ enum imx_sccg_pll_type pll_type);
+
enum imx_pllv3_type {
IMX_PLLV3_GENERIC,
IMX_PLLV3_SYS,
@@ -42,6 +55,9 @@ enum imx_pllv3_type {
struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
const char *parent_name, void __iomem *base, u32 div_mask);
+struct clk_hw *imx_clk_pllv4(const char *name, const char *parent_name,
+ void __iomem *base);
+
struct clk *clk_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx, u8 cgr_val,
@@ -51,26 +67,38 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
struct clk * imx_obtain_fixed_clock(
const char *name, unsigned long rate);
+struct clk_hw *imx_obtain_fixed_clk_hw(struct device_node *np,
+ const char *name);
+
struct clk *imx_clk_gate_exclusive(const char *name, const char *parent,
void __iomem *reg, u8 shift, u32 exclusive_mask);
struct clk *imx_clk_pfd(const char *name, const char *parent_name,
void __iomem *reg, u8 idx);
+struct clk_hw *imx_clk_pfdv2(const char *name, const char *parent_name,
+ void __iomem *reg, u8 idx);
+
struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
void __iomem *reg, u8 shift, u8 width,
void __iomem *busy_reg, u8 busy_shift);
struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
u8 width, void __iomem *busy_reg, u8 busy_shift,
- const char **parent_names, int num_parents);
+ const char * const *parent_names, int num_parents);
+
+struct clk_hw *imx7ulp_clk_composite(const char *name,
+ const char * const *parent_names,
+ int num_parents, bool mux_present,
+ bool rate_present, bool gate_present,
+ void __iomem *reg);
struct clk *imx_clk_fixup_divider(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width,
void (*fixup)(u32 *val));
struct clk *imx_clk_fixup_mux(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char **parents,
+ u8 shift, u8 width, const char * const *parents,
int num_parents, void (*fixup)(u32 *val));
static inline struct clk *imx_clk_fixed(const char *name, int rate)
@@ -78,8 +106,19 @@ static inline struct clk *imx_clk_fixed(const char *name, int rate)
return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
}
+static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
+{
+ return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
+}
+
+static inline struct clk_hw *imx_get_clk_hw_fixed(const char *name, int rate)
+{
+ return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
+}
+
static inline struct clk *imx_clk_mux_ldb(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char **parents, int num_parents)
+ u8 shift, u8 width, const char * const *parents,
+ int num_parents)
{
return clk_register_mux(NULL, name, parents, num_parents,
CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
@@ -100,6 +139,15 @@ static inline struct clk *imx_clk_divider(const char *name, const char *parent,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_clk_hw_divider(const char *name,
+ const char *parent,
+ void __iomem *reg, u8 shift,
+ u8 width)
+{
+ return clk_hw_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_divider_flags(const char *name,
const char *parent, void __iomem *reg, u8 shift, u8 width,
unsigned long flags)
@@ -108,6 +156,15 @@ static inline struct clk *imx_clk_divider_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
+ const char *parent,
+ void __iomem *reg, u8 shift,
+ u8 width, unsigned long flags)
+{
+ return clk_hw_register_divider(NULL, name, parent, flags,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_divider2(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width)
{
@@ -116,6 +173,15 @@ static inline struct clk *imx_clk_divider2(const char *name, const char *parent,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_divider2_flags(const char *name,
+ const char *parent, void __iomem *reg, u8 shift, u8 width,
+ unsigned long flags)
+{
+ return clk_register_divider(NULL, name, parent,
+ flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -130,6 +196,13 @@ static inline struct clk *imx_clk_gate_flags(const char *name, const char *paren
shift, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *parent,
+ void __iomem *reg, u8 shift)
+{
+ return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
+ shift, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate_dis(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -190,6 +263,15 @@ static inline struct clk *imx_clk_gate3(const char *name, const char *parent,
reg, shift, 0, &imx_ccm_lock);
}
+static inline struct clk *imx_clk_gate3_flags(const char *name,
+ const char *parent, void __iomem *reg, u8 shift,
+ unsigned long flags)
+{
+ return clk_register_gate(NULL, name, parent,
+ flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
void __iomem *reg, u8 shift)
{
@@ -198,8 +280,18 @@ static inline struct clk *imx_clk_gate4(const char *name, const char *parent,
reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
}
+static inline struct clk *imx_clk_gate4_flags(const char *name,
+ const char *parent, void __iomem *reg, u8 shift,
+ unsigned long flags)
+{
+ return clk_register_gate2(NULL, name, parent,
+ flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
+}
+
static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char **parents, int num_parents)
+ u8 shift, u8 width, const char * const *parents,
+ int num_parents)
{
return clk_register_mux(NULL, name, parents, num_parents,
CLK_SET_RATE_NO_REPARENT, reg, shift,
@@ -207,24 +299,78 @@ static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
}
static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char **parents, int num_parents)
+ u8 shift, u8 width, const char * const *parents,
+ int num_parents)
{
return clk_register_mux(NULL, name, parents, num_parents,
CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
reg, shift, width, 0, &imx_ccm_lock);
}
+static inline struct clk_hw *imx_clk_hw_mux2(const char *name, void __iomem *reg,
+ u8 shift, u8 width,
+ const char * const *parents,
+ int num_parents)
+{
+ return clk_hw_register_mux(NULL, name, parents, num_parents,
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_OPS_PARENT_ENABLE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
static inline struct clk *imx_clk_mux_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width, const char **parents,
- int num_parents, unsigned long flags)
+ void __iomem *reg, u8 shift, u8 width,
+ const char * const *parents, int num_parents,
+ unsigned long flags)
{
return clk_register_mux(NULL, name, parents, num_parents,
flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
&imx_ccm_lock);
}
+static inline struct clk *imx_clk_mux2_flags(const char *name,
+ void __iomem *reg, u8 shift, u8 width, const char **parents,
+ int num_parents, unsigned long flags)
+{
+ return clk_register_mux(NULL, name, parents, num_parents,
+ flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
+static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name,
+ void __iomem *reg, u8 shift,
+ u8 width,
+ const char * const *parents,
+ int num_parents,
+ unsigned long flags)
+{
+ return clk_hw_register_mux(NULL, name, parents, num_parents,
+ flags | CLK_SET_RATE_NO_REPARENT,
+ reg, shift, width, 0, &imx_ccm_lock);
+}
+
struct clk *imx_clk_cpu(const char *name, const char *parent_name,
struct clk *div, struct clk *mux, struct clk *pll,
struct clk *step);
+struct clk *imx8m_clk_composite_flags(const char *name,
+ const char **parent_names,
+ int num_parents, void __iomem *reg,
+ unsigned long flags);
+
+#define __imx8m_clk_composite(name, parent_names, reg, flags) \
+ imx8m_clk_composite_flags(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, \
+ flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+
+#define imx8m_clk_composite(name, parent_names, reg) \
+ __imx8m_clk_composite(name, parent_names, reg, 0)
+
+#define imx8m_clk_composite_critical(name, parent_names, reg) \
+ __imx8m_clk_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+
+struct clk_hw *imx_clk_divider_gate(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock);
#endif
diff --git a/drivers/clk/loongson1/clk.c b/drivers/clk/loongson1/clk.c
index cfcfd143fccb..983ce9f6edbb 100644
--- a/drivers/clk/loongson1/clk.c
+++ b/drivers/clk/loongson1/clk.c
@@ -10,6 +10,8 @@
#include <linux/clk-provider.h>
#include <linux/slab.h>
+#include "clk.h"
+
struct clk_hw *__init clk_hw_register_pll(struct device *dev,
const char *name,
const char *parent_name,
@@ -27,9 +29,9 @@ struct clk_hw *__init clk_hw_register_pll(struct device *dev,
init.name = name;
init.ops = ops;
- init.flags = flags | CLK_IS_BASIC;
- init.parent_names = (parent_name ? &parent_name : NULL);
- init.num_parents = (parent_name ? 1 : 0);
+ init.flags = flags;
+ init.parent_names = parent_name ? &parent_name : NULL;
+ init.num_parents = parent_name ? 1 : 0;
hw->init = &init;
/* register the clock */
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 3dd1dab92223..53edade25a1d 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -178,6 +178,29 @@ config COMMON_CLK_MT7622_AUDSYS
This driver supports MediaTek MT7622 AUDSYS clocks providing
to audio consumers such as I2S and TDM.
+config COMMON_CLK_MT7629
+ bool "Clock driver for MediaTek MT7629"
+ depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK && ARM
+ ---help---
+ This driver supports MediaTek MT7629 basic clocks and clocks
+ required for various periperals found on MediaTek.
+
+config COMMON_CLK_MT7629_ETHSYS
+ bool "Clock driver for MediaTek MT7629 ETHSYS"
+ depends on COMMON_CLK_MT7629
+ ---help---
+ This driver add support for clocks for Ethernet and SGMII
+ required on MediaTek MT7629 SoC.
+
+config COMMON_CLK_MT7629_HIFSYS
+ bool "Clock driver for MediaTek MT7629 HIFSYS"
+ depends on COMMON_CLK_MT7629
+ ---help---
+ This driver supports MediaTek MT7629 HIFSYS clocks providing
+ to PCI-E and USB.
+
config COMMON_CLK_MT8135
bool "Clock driver for MediaTek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 844b55d2770d..ee4410ff43ab 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -26,5 +26,8 @@ obj-$(CONFIG_COMMON_CLK_MT7622) += clk-mt7622.o
obj-$(CONFIG_COMMON_CLK_MT7622_ETHSYS) += clk-mt7622-eth.o
obj-$(CONFIG_COMMON_CLK_MT7622_HIFSYS) += clk-mt7622-hif.o
obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
+obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
+obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
+obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 16e56772d280..6c7eaa21e662 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -53,7 +53,7 @@ static const struct clk_ops clk_cpumux_ops = {
.set_parent = clk_cpumux_set_parent,
};
-static struct clk __init *
+static struct clk *
mtk_clk_register_cpumux(const struct mtk_composite *mux,
struct regmap *regmap)
{
@@ -84,9 +84,9 @@ mtk_clk_register_cpumux(const struct mtk_composite *mux,
return clk;
}
-int __init mtk_clk_register_cpumuxes(struct device_node *node,
- const struct mtk_composite *clks, int num,
- struct clk_onecell_data *clk_data)
+int mtk_clk_register_cpumuxes(struct device_node *node,
+ const struct mtk_composite *clks, int num,
+ struct clk_onecell_data *clk_data)
{
int i;
struct clk *clk;
diff --git a/drivers/clk/mediatek/clk-mt7622.c b/drivers/clk/mediatek/clk-mt7622.c
index 92f7e32770c6..a8aecef1ba89 100644
--- a/drivers/clk/mediatek/clk-mt7622.c
+++ b/drivers/clk/mediatek/clk-mt7622.c
@@ -513,7 +513,7 @@ static const struct mtk_gate peri_clks[] = {
GATE_PERI1(CLK_PERI_IRTX_PD, "peri_irtx_pd", "irtx_sel", 2),
};
-static struct mtk_composite infra_muxes[] __initdata = {
+static struct mtk_composite infra_muxes[] = {
MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents,
0x000, 2, 2),
};
@@ -652,7 +652,7 @@ static int mtk_topckgen_init(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
-static int __init mtk_infrasys_init(struct platform_device *pdev)
+static int mtk_infrasys_init(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct clk_onecell_data *clk_data;
diff --git a/drivers/clk/mediatek/clk-mt7629-eth.c b/drivers/clk/mediatek/clk-mt7629-eth.c
new file mode 100644
index 000000000000..88279d0ea1a7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7629-eth.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Wenzhen Yu <Wenzhen Yu@mediatek.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7629-clk.h>
+
+#define GATE_ETH(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &eth_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate eth_clks[] = {
+ GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "eth2pll", 6),
+ GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "txclk_src_pre", 7),
+ GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "txclk_src_pre", 8),
+ GATE_ETH(CLK_ETH_GP0_EN, "eth_gp0_en", "txclk_src_pre", 9),
+ GATE_ETH(CLK_ETH_ESW_EN, "eth_esw_en", "eth_500m", 16),
+};
+
+static const struct mtk_gate_regs sgmii_cg_regs = {
+ .set_ofs = 0xE4,
+ .clr_ofs = 0xE4,
+ .sta_ofs = 0xE4,
+};
+
+#define GATE_SGMII(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &sgmii_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii_clks[2][4] = {
+ {
+ GATE_SGMII(CLK_SGMII_TX_EN, "sgmii_tx_en",
+ "ssusb_tx250m", 2),
+ GATE_SGMII(CLK_SGMII_RX_EN, "sgmii_rx_en",
+ "ssusb_eq_rx250m", 3),
+ GATE_SGMII(CLK_SGMII_CDR_REF, "sgmii_cdr_ref",
+ "ssusb_cdr_ref", 4),
+ GATE_SGMII(CLK_SGMII_CDR_FB, "sgmii_cdr_fb",
+ "ssusb_cdr_fb", 5),
+ }, {
+ GATE_SGMII(CLK_SGMII_TX_EN, "sgmii_tx_en1",
+ "ssusb_tx250m", 2),
+ GATE_SGMII(CLK_SGMII_RX_EN, "sgmii_rx_en1",
+ "ssusb_eq_rx250m", 3),
+ GATE_SGMII(CLK_SGMII_CDR_REF, "sgmii_cdr_ref1",
+ "ssusb_cdr_ref", 4),
+ GATE_SGMII(CLK_SGMII_CDR_FB, "sgmii_cdr_fb1",
+ "ssusb_cdr_fb", 5),
+ }
+};
+
+static int clk_mt7629_ethsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_ETH_NR_CLK);
+
+ mtk_clk_register_gates(node, eth_clks, CLK_ETH_NR_CLK, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static int clk_mt7629_sgmiisys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ static int id;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SGMII_NR_CLK);
+
+ mtk_clk_register_gates(node, sgmii_clks[id++], CLK_SGMII_NR_CLK,
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7629_eth[] = {
+ {
+ .compatible = "mediatek,mt7629-ethsys",
+ .data = clk_mt7629_ethsys_init,
+ }, {
+ .compatible = "mediatek,mt7629-sgmiisys",
+ .data = clk_mt7629_sgmiisys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7629_eth_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7629_eth_drv = {
+ .probe = clk_mt7629_eth_probe,
+ .driver = {
+ .name = "clk-mt7629-eth",
+ .of_match_table = of_match_clk_mt7629_eth,
+ },
+};
+
+builtin_platform_driver(clk_mt7629_eth_drv);
diff --git a/drivers/clk/mediatek/clk-mt7629-hif.c b/drivers/clk/mediatek/clk-mt7629-hif.c
new file mode 100644
index 000000000000..5c5b37207afb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7629-hif.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Wenzhen Yu <Wenzhen Yu@mediatek.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7629-clk.h>
+
+#define GATE_PCIE(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pcie_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_SSUSB(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ssusb_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate_regs pcie_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate_regs ssusb_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+static const struct mtk_gate ssusb_clks[] = {
+ GATE_SSUSB(CLK_SSUSB_U2_PHY_1P_EN, "ssusb_u2_phy_1p",
+ "to_u2_phy_1p", 0),
+ GATE_SSUSB(CLK_SSUSB_U2_PHY_EN, "ssusb_u2_phy_en", "to_u2_phy", 1),
+ GATE_SSUSB(CLK_SSUSB_REF_EN, "ssusb_ref_en", "to_usb3_ref", 5),
+ GATE_SSUSB(CLK_SSUSB_SYS_EN, "ssusb_sys_en", "to_usb3_sys", 6),
+ GATE_SSUSB(CLK_SSUSB_MCU_EN, "ssusb_mcu_en", "to_usb3_mcu", 7),
+ GATE_SSUSB(CLK_SSUSB_DMA_EN, "ssusb_dma_en", "to_usb3_dma", 8),
+};
+
+static const struct mtk_gate pcie_clks[] = {
+ GATE_PCIE(CLK_PCIE_P1_AUX_EN, "pcie_p1_aux_en", "p1_1mhz", 12),
+ GATE_PCIE(CLK_PCIE_P1_OBFF_EN, "pcie_p1_obff_en", "free_run_4mhz", 13),
+ GATE_PCIE(CLK_PCIE_P1_AHB_EN, "pcie_p1_ahb_en", "from_top_ahb", 14),
+ GATE_PCIE(CLK_PCIE_P1_AXI_EN, "pcie_p1_axi_en", "from_top_axi", 15),
+ GATE_PCIE(CLK_PCIE_P1_MAC_EN, "pcie_p1_mac_en", "pcie1_mac_en", 16),
+ GATE_PCIE(CLK_PCIE_P1_PIPE_EN, "pcie_p1_pipe_en", "pcie1_pipe_en", 17),
+ GATE_PCIE(CLK_PCIE_P0_AUX_EN, "pcie_p0_aux_en", "p0_1mhz", 18),
+ GATE_PCIE(CLK_PCIE_P0_OBFF_EN, "pcie_p0_obff_en", "free_run_4mhz", 19),
+ GATE_PCIE(CLK_PCIE_P0_AHB_EN, "pcie_p0_ahb_en", "from_top_ahb", 20),
+ GATE_PCIE(CLK_PCIE_P0_AXI_EN, "pcie_p0_axi_en", "from_top_axi", 21),
+ GATE_PCIE(CLK_PCIE_P0_MAC_EN, "pcie_p0_mac_en", "pcie0_mac_en", 22),
+ GATE_PCIE(CLK_PCIE_P0_PIPE_EN, "pcie_p0_pipe_en", "pcie0_pipe_en", 23),
+};
+
+static int clk_mt7629_ssusbsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_SSUSB_NR_CLK);
+
+ mtk_clk_register_gates(node, ssusb_clks, ARRAY_SIZE(ssusb_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static int clk_mt7629_pciesys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_PCIE_NR_CLK);
+
+ mtk_clk_register_gates(node, pcie_clks, ARRAY_SIZE(pcie_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ mtk_register_reset_controller(node, 1, 0x34);
+
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7629_hif[] = {
+ {
+ .compatible = "mediatek,mt7629-pciesys",
+ .data = clk_mt7629_pciesys_init,
+ }, {
+ .compatible = "mediatek,mt7629-ssusbsys",
+ .data = clk_mt7629_ssusbsys_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7629_hif_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7629_hif_drv = {
+ .probe = clk_mt7629_hif_probe,
+ .driver = {
+ .name = "clk-mt7629-hif",
+ .of_match_table = of_match_clk_mt7629_hif,
+ },
+};
+
+builtin_platform_driver(clk_mt7629_hif_drv);
diff --git a/drivers/clk/mediatek/clk-mt7629.c b/drivers/clk/mediatek/clk-mt7629.c
new file mode 100644
index 000000000000..d6233994af5a
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7629.c
@@ -0,0 +1,723 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ * Author: Wenzhen Yu <Wenzhen Yu@mediatek.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-cpumux.h"
+
+#include <dt-bindings/clock/mt7629-clk.h>
+
+#define MT7629_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7629_RST_BAR BIT(24)
+
+#define PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift, _div_table, _parent_name) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = CON0_MT7629_RST_BAR, \
+ .fmax = MT7629_PLL_FMAX, \
+ .pcwbits = _pcwbits, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, \
+ _pcw_shift) \
+ PLL_B(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ NULL, "clk20m")
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &apmixed_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+#define GATE_INFRA(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &infra_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static DEFINE_SPINLOCK(mt7629_clk_lock);
+
+static const char * const axi_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "syspll_d5",
+ "syspll1_d4",
+ "univpll_d5",
+ "univpll2_d2",
+ "univpll_d7",
+ "dmpll_ck"
+};
+
+static const char * const mem_parents[] = {
+ "clkxtal",
+ "dmpll_ck"
+};
+
+static const char * const ddrphycfg_parents[] = {
+ "clkxtal",
+ "syspll1_d8"
+};
+
+static const char * const eth_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "sgmiipll_d2",
+ "univpll_d7",
+ "dmpll_ck"
+};
+
+static const char * const pwm_parents[] = {
+ "clkxtal",
+ "univpll2_d4"
+};
+
+static const char * const f10m_ref_parents[] = {
+ "clkxtal",
+ "sgmiipll_d2"
+};
+
+static const char * const nfi_infra_parents[] = {
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "clkxtal",
+ "univpll2_d8",
+ "univpll3_d4",
+ "syspll1_d8",
+ "univpll1_d8",
+ "syspll4_d2",
+ "syspll2_d4",
+ "univpll2_d4",
+ "univpll3_d2",
+ "syspll1_d4",
+ "syspll_d7"
+};
+
+static const char * const flash_parents[] = {
+ "clkxtal",
+ "univpll_d80_d4",
+ "syspll2_d8",
+ "syspll3_d4",
+ "univpll3_d4",
+ "univpll1_d8",
+ "syspll2_d4",
+ "univpll2_d4"
+};
+
+static const char * const uart_parents[] = {
+ "clkxtal",
+ "univpll2_d8"
+};
+
+static const char * const spi0_parents[] = {
+ "clkxtal",
+ "syspll3_d2",
+ "clkxtal",
+ "syspll2_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8",
+ "clkxtal"
+};
+
+static const char * const spi1_parents[] = {
+ "clkxtal",
+ "syspll3_d2",
+ "clkxtal",
+ "syspll4_d4",
+ "syspll4_d2",
+ "univpll2_d4",
+ "univpll1_d8",
+ "clkxtal"
+};
+
+static const char * const msdc30_0_parents[] = {
+ "clkxtal",
+ "univpll2_d16",
+ "univ48m"
+};
+
+static const char * const msdc30_1_parents[] = {
+ "clkxtal",
+ "univpll2_d16",
+ "univ48m",
+ "syspll2_d4",
+ "univpll2_d4",
+ "syspll_d7",
+ "syspll2_d2",
+ "univpll2_d2"
+};
+
+static const char * const ap2wbmcu_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "univ48m",
+ "syspll1_d8",
+ "univpll2_d4",
+ "syspll_d7",
+ "syspll2_d2",
+ "univpll2_d2"
+};
+
+static const char * const audio_parents[] = {
+ "clkxtal",
+ "syspll3_d4",
+ "syspll4_d4",
+ "syspll1_d16"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clkxtal",
+ "syspll1_d4",
+ "syspll4_d2",
+ "dmpll_d4"
+};
+
+static const char * const pmicspi_parents[] = {
+ "clkxtal",
+ "syspll1_d8",
+ "syspll3_d4",
+ "syspll1_d16",
+ "univpll3_d4",
+ "clkxtal",
+ "univpll2_d4",
+ "dmpll_d8"
+};
+
+static const char * const scp_parents[] = {
+ "clkxtal",
+ "syspll1_d8",
+ "univpll2_d2",
+ "univpll2_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "syspll_d5"
+};
+
+static const char * const hif_parents[] = {
+ "clkxtal",
+ "syspll1_d2",
+ "univpll1_d2",
+ "syspll1_d4",
+ "univpll_d5",
+ "clk_null",
+ "univpll_d7"
+};
+
+static const char * const sata_parents[] = {
+ "clkxtal",
+ "univpll2_d4"
+};
+
+static const char * const usb20_parents[] = {
+ "clkxtal",
+ "univpll3_d4",
+ "syspll1_d8"
+};
+
+static const char * const aud1_parents[] = {
+ "clkxtal"
+};
+
+static const char * const irrx_parents[] = {
+ "clkxtal",
+ "syspll4_d16"
+};
+
+static const char * const crypto_parents[] = {
+ "clkxtal",
+ "univpll_d3",
+ "univpll1_d2",
+ "syspll1_d2",
+ "univpll_d5",
+ "syspll_d5",
+ "univpll2_d2",
+ "syspll_d2"
+};
+
+static const char * const gpt10m_parents[] = {
+ "clkxtal",
+ "clkxtal_d4"
+};
+
+static const char * const peribus_ck_parents[] = {
+ "syspll1_d8",
+ "syspll1_d4"
+};
+
+static const char * const infra_mux1_parents[] = {
+ "clkxtal",
+ "armpll",
+ "main_core_en",
+ "armpll"
+};
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs infra_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs peri0_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x10,
+ .sta_ofs = 0x18,
+};
+
+static const struct mtk_gate_regs peri1_cg_regs = {
+ .set_ofs = 0xC,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x1C,
+};
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001,
+ 0, 21, 0x0204, 24, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x0210, 0x021C, 0x00000001,
+ HAVE_RST_BAR, 21, 0x0214, 24, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_UNIV2PLL, "univ2pll", 0x0220, 0x022C, 0x00000001,
+ HAVE_RST_BAR, 7, 0x0224, 24, 0, 0x0224, 14),
+ PLL(CLK_APMIXED_ETH1PLL, "eth1pll", 0x0300, 0x0310, 0x00000001,
+ 0, 21, 0x0300, 1, 0, 0x0304, 0),
+ PLL(CLK_APMIXED_ETH2PLL, "eth2pll", 0x0314, 0x0320, 0x00000001,
+ 0, 21, 0x0314, 1, 0, 0x0318, 0),
+ PLL(CLK_APMIXED_SGMIPLL, "sgmipll", 0x0358, 0x0368, 0x00000001,
+ 0, 21, 0x0358, 1, 0, 0x035C, 0),
+};
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_MAIN_CORE_EN, "main_core_en", "mainpll", 5),
+};
+
+static const struct mtk_gate infra_clks[] = {
+ GATE_INFRA(CLK_INFRA_DBGCLK_PD, "infra_dbgclk_pd", "hd_faxi", 0),
+ GATE_INFRA(CLK_INFRA_TRNG_PD, "infra_trng_pd", "hd_faxi", 2),
+ GATE_INFRA(CLK_INFRA_DEVAPC_PD, "infra_devapc_pd", "hd_faxi", 4),
+ GATE_INFRA(CLK_INFRA_APXGPT_PD, "infra_apxgpt_pd", "infrao_10m", 18),
+ GATE_INFRA(CLK_INFRA_SEJ_PD, "infra_sej_pd", "infrao_10m", 19),
+};
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_TO_U2_PHY, "to_u2_phy", "clkxtal",
+ 31250000),
+ FIXED_CLK(CLK_TOP_TO_U2_PHY_1P, "to_u2_phy_1p", "clkxtal",
+ 31250000),
+ FIXED_CLK(CLK_TOP_PCIE0_PIPE_EN, "pcie0_pipe_en", "clkxtal",
+ 125000000),
+ FIXED_CLK(CLK_TOP_PCIE1_PIPE_EN, "pcie1_pipe_en", "clkxtal",
+ 125000000),
+ FIXED_CLK(CLK_TOP_SSUSB_TX250M, "ssusb_tx250m", "clkxtal",
+ 250000000),
+ FIXED_CLK(CLK_TOP_SSUSB_EQ_RX250M, "ssusb_eq_rx250m", "clkxtal",
+ 250000000),
+ FIXED_CLK(CLK_TOP_SSUSB_CDR_REF, "ssusb_cdr_ref", "clkxtal",
+ 33333333),
+ FIXED_CLK(CLK_TOP_SSUSB_CDR_FB, "ssusb_cdr_fb", "clkxtal",
+ 50000000),
+ FIXED_CLK(CLK_TOP_SATA_ASIC, "sata_asic", "clkxtal",
+ 50000000),
+ FIXED_CLK(CLK_TOP_SATA_RBC, "sata_rbc", "clkxtal",
+ 50000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_TO_USB3_SYS, "to_usb3_sys", "eth1pll", 1, 4),
+ FACTOR(CLK_TOP_P1_1MHZ, "p1_1mhz", "eth1pll", 1, 500),
+ FACTOR(CLK_TOP_4MHZ, "free_run_4mhz", "eth1pll", 1, 125),
+ FACTOR(CLK_TOP_P0_1MHZ, "p0_1mhz", "eth1pll", 1, 500),
+ FACTOR(CLK_TOP_ETH_500M, "eth_500m", "eth1pll", 1, 1),
+ FACTOR(CLK_TOP_TXCLK_SRC_PRE, "txclk_src_pre", "sgmiipll_d2", 1, 1),
+ FACTOR(CLK_TOP_RTC, "rtc", "clkxtal", 1, 1024),
+ FACTOR(CLK_TOP_PWM_QTR_26M, "pwm_qtr_26m", "clkxtal", 1, 1),
+ FACTOR(CLK_TOP_CPUM_TCK_IN, "cpum_tck_in", "cpum_tck", 1, 1),
+ FACTOR(CLK_TOP_TO_USB3_DA_TOP, "to_usb3_da_top", "clkxtal", 1, 1),
+ FACTOR(CLK_TOP_MEMPLL, "mempll", "clkxtal", 32, 1),
+ FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "mempll", 1, 1),
+ FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "mempll", 1, 4),
+ FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "mempll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll", 1, 2),
+ FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_SYSPLL2_D8, "syspll2_d8", "mainpll", 1, 24),
+ FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_SYSPLL4_D16, "syspll4_d16", "mainpll", 1, 112),
+ FACTOR(CLK_TOP_UNIVPLL, "univpll", "univ2pll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL2_D16, "univpll2_d16", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL3_D16, "univpll3_d16", "univpll", 1, 80),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_D80_D4, "univpll_d80_d4", "univpll", 1, 320),
+ FACTOR(CLK_TOP_UNIV48M, "univ48m", "univpll", 1, 25),
+ FACTOR(CLK_TOP_SGMIIPLL_D2, "sgmiipll_d2", "sgmipll", 1, 2),
+ FACTOR(CLK_TOP_CLKXTAL_D4, "clkxtal_d4", "clkxtal", 1, 4),
+ FACTOR(CLK_TOP_HD_FAXI, "hd_faxi", "axi_sel", 1, 1),
+ FACTOR(CLK_TOP_FAXI, "faxi", "axi_sel", 1, 1),
+ FACTOR(CLK_TOP_F_FAUD_INTBUS, "f_faud_intbus", "aud_intbus_sel", 1, 1),
+ FACTOR(CLK_TOP_AP2WBHIF_HCLK, "ap2wbhif_hclk", "syspll1_d8", 1, 1),
+ FACTOR(CLK_TOP_10M_INFRAO, "infrao_10m", "gpt10m_sel", 1, 1),
+ FACTOR(CLK_TOP_MSDC30_1, "msdc30_1", "msdc30_1_sel", 1, 1),
+ FACTOR(CLK_TOP_SPI, "spi", "spi0_sel", 1, 1),
+ FACTOR(CLK_TOP_SF, "sf", "nfi_infra_sel", 1, 1),
+ FACTOR(CLK_TOP_FLASH, "flash", "flash_sel", 1, 1),
+ FACTOR(CLK_TOP_TO_USB3_REF, "to_usb3_ref", "sata_sel", 1, 4),
+ FACTOR(CLK_TOP_TO_USB3_MCU, "to_usb3_mcu", "axi_sel", 1, 1),
+ FACTOR(CLK_TOP_TO_USB3_DMA, "to_usb3_dma", "hif_sel", 1, 1),
+ FACTOR(CLK_TOP_FROM_TOP_AHB, "from_top_ahb", "axi_sel", 1, 1),
+ FACTOR(CLK_TOP_FROM_TOP_AXI, "from_top_axi", "hif_sel", 1, 1),
+ FACTOR(CLK_TOP_PCIE1_MAC_EN, "pcie1_mac_en", "sata_sel", 1, 1),
+ FACTOR(CLK_TOP_PCIE0_MAC_EN, "pcie0_mac_en", "sata_sel", 1, 1),
+};
+
+static const struct mtk_gate peri_clks[] = {
+ /* PERI0 */
+ GATE_PERI0(CLK_PERI_PWM1_PD, "peri_pwm1_pd", "pwm_qtr_26m", 2),
+ GATE_PERI0(CLK_PERI_PWM2_PD, "peri_pwm2_pd", "pwm_qtr_26m", 3),
+ GATE_PERI0(CLK_PERI_PWM3_PD, "peri_pwm3_pd", "pwm_qtr_26m", 4),
+ GATE_PERI0(CLK_PERI_PWM4_PD, "peri_pwm4_pd", "pwm_qtr_26m", 5),
+ GATE_PERI0(CLK_PERI_PWM5_PD, "peri_pwm5_pd", "pwm_qtr_26m", 6),
+ GATE_PERI0(CLK_PERI_PWM6_PD, "peri_pwm6_pd", "pwm_qtr_26m", 7),
+ GATE_PERI0(CLK_PERI_PWM7_PD, "peri_pwm7_pd", "pwm_qtr_26m", 8),
+ GATE_PERI0(CLK_PERI_PWM_PD, "peri_pwm_pd", "pwm_qtr_26m", 9),
+ GATE_PERI0(CLK_PERI_AP_DMA_PD, "peri_ap_dma_pd", "faxi", 12),
+ GATE_PERI0(CLK_PERI_MSDC30_1_PD, "peri_msdc30_1", "msdc30_1", 14),
+ GATE_PERI0(CLK_PERI_UART0_PD, "peri_uart0_pd", "faxi", 17),
+ GATE_PERI0(CLK_PERI_UART1_PD, "peri_uart1_pd", "faxi", 18),
+ GATE_PERI0(CLK_PERI_UART2_PD, "peri_uart2_pd", "faxi", 19),
+ GATE_PERI0(CLK_PERI_UART3_PD, "peri_uart3_pd", "faxi", 20),
+ GATE_PERI0(CLK_PERI_BTIF_PD, "peri_btif_pd", "faxi", 22),
+ GATE_PERI0(CLK_PERI_I2C0_PD, "peri_i2c0_pd", "faxi", 23),
+ GATE_PERI0(CLK_PERI_SPI0_PD, "peri_spi0_pd", "spi", 28),
+ GATE_PERI0(CLK_PERI_SNFI_PD, "peri_snfi_pd", "sf", 29),
+ GATE_PERI0(CLK_PERI_NFI_PD, "peri_nfi_pd", "faxi", 30),
+ GATE_PERI0(CLK_PERI_NFIECC_PD, "peri_nfiecc_pd", "faxi", 31),
+ /* PERI1 */
+ GATE_PERI1(CLK_PERI_FLASH_PD, "peri_flash_pd", "flash", 1),
+};
+
+static struct mtk_composite infra_muxes[] = {
+ /* INFRA_TOPCKGEN_CKMUXSEL */
+ MUX(CLK_INFRA_MUX1_SEL, "infra_mux1_sel", infra_mux1_parents, 0x000,
+ 2, 2),
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents,
+ 0x040, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents,
+ 0x040, 8, 1, 15),
+ MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents,
+ 0x040, 16, 1, 23),
+ MUX_GATE(CLK_TOP_ETH_SEL, "eth_sel", eth_parents,
+ 0x040, 24, 3, 31),
+ /* CLK_CFG_1 */
+ MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents,
+ 0x050, 0, 2, 7),
+ MUX_GATE(CLK_TOP_F10M_REF_SEL, "f10m_ref_sel", f10m_ref_parents,
+ 0x050, 8, 1, 15),
+ MUX_GATE(CLK_TOP_NFI_INFRA_SEL, "nfi_infra_sel", nfi_infra_parents,
+ 0x050, 16, 4, 23),
+ MUX_GATE(CLK_TOP_FLASH_SEL, "flash_sel", flash_parents,
+ 0x050, 24, 3, 31),
+ /* CLK_CFG_2 */
+ MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents,
+ 0x060, 0, 1, 7),
+ MUX_GATE(CLK_TOP_SPI0_SEL, "spi0_sel", spi0_parents,
+ 0x060, 8, 3, 15),
+ MUX_GATE(CLK_TOP_SPI1_SEL, "spi1_sel", spi1_parents,
+ 0x060, 16, 3, 23),
+ MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", uart_parents,
+ 0x060, 24, 3, 31),
+ /* CLK_CFG_3 */
+ MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_0_parents,
+ 0x070, 0, 3, 7),
+ MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents,
+ 0x070, 8, 3, 15),
+ MUX_GATE(CLK_TOP_AP2WBMCU_SEL, "ap2wbmcu_sel", ap2wbmcu_parents,
+ 0x070, 16, 3, 23),
+ MUX_GATE(CLK_TOP_AP2WBHIF_SEL, "ap2wbhif_sel", ap2wbmcu_parents,
+ 0x070, 24, 3, 31),
+ /* CLK_CFG_4 */
+ MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents,
+ 0x080, 0, 2, 7),
+ MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents,
+ 0x080, 8, 2, 15),
+ MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents,
+ 0x080, 16, 3, 23),
+ MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents,
+ 0x080, 24, 2, 31),
+ /* CLK_CFG_5 */
+ MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents,
+ 0x090, 0, 2, 7),
+ MUX_GATE(CLK_TOP_HIF_SEL, "hif_sel", hif_parents,
+ 0x090, 8, 3, 15),
+ MUX_GATE(CLK_TOP_SATA_SEL, "sata_sel", sata_parents,
+ 0x090, 16, 1, 23),
+ MUX_GATE(CLK_TOP_U2_SEL, "usb20_sel", usb20_parents,
+ 0x090, 24, 2, 31),
+ /* CLK_CFG_6 */
+ MUX_GATE(CLK_TOP_AUD1_SEL, "aud1_sel", aud1_parents,
+ 0x0A0, 0, 1, 7),
+ MUX_GATE(CLK_TOP_AUD2_SEL, "aud2_sel", aud1_parents,
+ 0x0A0, 8, 1, 15),
+ MUX_GATE(CLK_TOP_IRRX_SEL, "irrx_sel", irrx_parents,
+ 0x0A0, 16, 1, 23),
+ MUX_GATE(CLK_TOP_IRTX_SEL, "irtx_sel", irrx_parents,
+ 0x0A0, 24, 1, 31),
+ /* CLK_CFG_7 */
+ MUX_GATE(CLK_TOP_SATA_MCU_SEL, "sata_mcu_sel", scp_parents,
+ 0x0B0, 0, 2, 7),
+ MUX_GATE(CLK_TOP_PCIE0_MCU_SEL, "pcie0_mcu_sel", scp_parents,
+ 0x0B0, 8, 2, 15),
+ MUX_GATE(CLK_TOP_PCIE1_MCU_SEL, "pcie1_mcu_sel", scp_parents,
+ 0x0B0, 16, 2, 23),
+ MUX_GATE(CLK_TOP_SSUSB_MCU_SEL, "ssusb_mcu_sel", scp_parents,
+ 0x0B0, 24, 2, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE(CLK_TOP_CRYPTO_SEL, "crypto_sel", crypto_parents,
+ 0x0C0, 0, 3, 7),
+ MUX_GATE(CLK_TOP_SGMII_REF_1_SEL, "sgmii_ref_1_sel", f10m_ref_parents,
+ 0x0C0, 8, 1, 15),
+ MUX_GATE(CLK_TOP_10M_SEL, "gpt10m_sel", gpt10m_parents,
+ 0x0C0, 16, 1, 23),
+};
+
+static struct mtk_composite peri_muxes[] = {
+ /* PERI_GLOBALCON_CKSEL */
+ MUX(CLK_PERIBUS_SEL, "peribus_ck_sel", peribus_ck_parents, 0x05C, 0, 1),
+};
+
+static int mtk_topckgen_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs),
+ clk_data);
+
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes),
+ base, &mt7629_clk_lock, clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_TOP_AXI_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_MEM_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DDRPHYCFG_SEL]);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+static int mtk_infrasys_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK);
+
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ mtk_clk_register_cpumuxes(node, infra_muxes, ARRAY_SIZE(infra_muxes),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get,
+ clk_data);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int mtk_pericfg_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ void __iomem *base;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK);
+
+ mtk_clk_register_gates(node, peri_clks, ARRAY_SIZE(peri_clks),
+ clk_data);
+
+ mtk_clk_register_composites(peri_muxes, ARRAY_SIZE(peri_muxes), base,
+ &mt7629_clk_lock, clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ return r;
+
+ clk_prepare_enable(clk_data->clks[CLK_PERI_UART0_PD]);
+
+ return 0;
+}
+
+static int mtk_apmixedsys_init(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls),
+ clk_data);
+
+ mtk_clk_register_gates(node, apmixed_clks,
+ ARRAY_SIZE(apmixed_clks), clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_MAIN_CORE_EN]);
+
+ return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+}
+
+
+static const struct of_device_id of_match_clk_mt7629[] = {
+ {
+ .compatible = "mediatek,mt7629-apmixedsys",
+ .data = mtk_apmixedsys_init,
+ }, {
+ .compatible = "mediatek,mt7629-infracfg",
+ .data = mtk_infrasys_init,
+ }, {
+ .compatible = "mediatek,mt7629-topckgen",
+ .data = mtk_topckgen_init,
+ }, {
+ .compatible = "mediatek,mt7629-pericfg",
+ .data = mtk_pericfg_init,
+ }, {
+ /* sentinel */
+ }
+};
+
+static int clk_mt7629_probe(struct platform_device *pdev)
+{
+ int (*clk_init)(struct platform_device *);
+ int r;
+
+ clk_init = of_device_get_match_data(&pdev->dev);
+ if (!clk_init)
+ return -EINVAL;
+
+ r = clk_init(pdev);
+ if (r)
+ dev_err(&pdev->dev,
+ "could not register clock provider: %s: %d\n",
+ pdev->name, r);
+
+ return r;
+}
+
+static struct platform_driver clk_mt7629_drv = {
+ .probe = clk_mt7629_probe,
+ .driver = {
+ .name = "clk-mt7629",
+ .of_match_table = of_match_clk_mt7629,
+ },
+};
+
+static int clk_mt7629_init(void)
+{
+ return platform_driver_register(&clk_mt7629_drv);
+}
+
+arch_initcall(clk_mt7629_init);
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index 72ec8c40d848..a849aa809825 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -2,7 +2,8 @@
# Makefile for Meson specific clk
#
-obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-pll.o clk-mpll.o clk-phase.o vid-pll-div.o
+obj-$(CONFIG_COMMON_CLK_AMLOGIC) += clk-input.o
obj-$(CONFIG_COMMON_CLK_AMLOGIC_AUDIO) += clk-triphase.o sclk-div.o
obj-$(CONFIG_COMMON_CLK_MESON_AO) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON8B) += meson8b.o
diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
index 5f6c860aa122..8ac3a2295473 100644
--- a/drivers/clk/meson/axg-audio.c
+++ b/drivers/clk/meson/axg-audio.c
@@ -631,22 +631,23 @@ static struct clk_regmap *const axg_audio_clk_regmaps[] = {
&axg_tdmout_c_lrclk,
};
-static struct clk *devm_clk_get_enable(struct device *dev, char *id)
+static int devm_clk_get_enable(struct device *dev, char *id)
{
struct clk *clk;
int ret;
clk = devm_clk_get(dev, id);
if (IS_ERR(clk)) {
- if (PTR_ERR(clk) != -EPROBE_DEFER)
+ ret = PTR_ERR(clk);
+ if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get %s", id);
- return clk;
+ return ret;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(dev, "failed to enable %s", id);
- return ERR_PTR(ret);
+ return ret;
}
ret = devm_add_action_or_reset(dev,
@@ -654,74 +655,40 @@ static struct clk *devm_clk_get_enable(struct device *dev, char *id)
clk);
if (ret) {
dev_err(dev, "failed to add reset action on %s", id);
- return ERR_PTR(ret);
+ return ret;
}
- return clk;
-}
-
-static const struct clk_ops axg_clk_no_ops = {};
-
-static struct clk_hw *axg_clk_hw_register_bypass(struct device *dev,
- const char *name,
- const char *parent_name)
-{
- struct clk_hw *hw;
- struct clk_init_data init;
- char *clk_name;
- int ret;
-
- hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
- if (!hw)
- return ERR_PTR(-ENOMEM);
-
- clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
- if (!clk_name)
- return ERR_PTR(-ENOMEM);
-
- init.name = clk_name;
- init.ops = &axg_clk_no_ops;
- init.flags = 0;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- hw->init = &init;
-
- ret = devm_clk_hw_register(dev, hw);
- kfree(clk_name);
-
- return ret ? ERR_PTR(ret) : hw;
+ return 0;
}
static int axg_register_clk_hw_input(struct device *dev,
const char *name,
unsigned int clkid)
{
- struct clk *parent_clk = devm_clk_get(dev, name);
- struct clk_hw *hw = NULL;
+ char *clk_name;
+ struct clk_hw *hw;
+ int err = 0;
- if (IS_ERR(parent_clk)) {
- int err = PTR_ERR(parent_clk);
+ clk_name = kasprintf(GFP_KERNEL, "axg_%s", name);
+ if (!clk_name)
+ return -ENOMEM;
+ hw = meson_clk_hw_register_input(dev, name, clk_name, 0);
+ if (IS_ERR(hw)) {
/* It is ok if an input clock is missing */
- if (err == -ENOENT) {
+ if (PTR_ERR(hw) == -ENOENT) {
dev_dbg(dev, "%s not provided", name);
} else {
+ err = PTR_ERR(hw);
if (err != -EPROBE_DEFER)
dev_err(dev, "failed to get %s clock", name);
- return err;
}
} else {
- hw = axg_clk_hw_register_bypass(dev, name,
- __clk_get_name(parent_clk));
- }
-
- if (IS_ERR(hw)) {
- dev_err(dev, "failed to register %s clock", name);
- return PTR_ERR(hw);
+ axg_audio_hw_onecell_data.hws[clkid] = hw;
}
- axg_audio_hw_onecell_data.hws[clkid] = hw;
- return 0;
+ kfree(clk_name);
+ return err;
}
static int axg_register_clk_hw_inputs(struct device *dev,
@@ -759,7 +726,6 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
struct regmap *map;
struct resource *res;
void __iomem *regs;
- struct clk *clk;
struct clk_hw *hw;
int ret, i;
@@ -775,9 +741,9 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Get the mandatory peripheral clock */
- clk = devm_clk_get_enable(dev, "pclk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ ret = devm_clk_get_enable(dev, "pclk");
+ if (ret)
+ return ret;
ret = device_reset(dev);
if (ret) {
@@ -786,8 +752,7 @@ static int axg_audio_clkc_probe(struct platform_device *pdev)
}
/* Register the peripheral input clock */
- hw = axg_clk_hw_register_bypass(dev, "audio_pclk",
- __clk_get_name(clk));
+ hw = meson_clk_hw_register_input(dev, "pclk", "axg_audio_pclk", 0);
if (IS_ERR(hw))
return PTR_ERR(hw);
diff --git a/drivers/clk/meson/clk-input.c b/drivers/clk/meson/clk-input.c
new file mode 100644
index 000000000000..06b3e3bb6a66
--- /dev/null
+++ b/drivers/clk/meson/clk-input.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include "clkc.h"
+
+static const struct clk_ops meson_clk_no_ops = {};
+
+struct clk_hw *meson_clk_hw_register_input(struct device *dev,
+ const char *of_name,
+ const char *clk_name,
+ unsigned long flags)
+{
+ struct clk *parent_clk = devm_clk_get(dev, of_name);
+ struct clk_init_data init;
+ const char *parent_name;
+ struct clk_hw *hw;
+ int ret;
+
+ if (IS_ERR(parent_clk))
+ return (struct clk_hw *)parent_clk;
+
+ hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return ERR_PTR(-ENOMEM);
+
+ parent_name = __clk_get_name(parent_clk);
+ init.name = clk_name;
+ init.ops = &meson_clk_no_ops;
+ init.flags = flags;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, hw);
+
+ return ret ? ERR_PTR(ret) : hw;
+}
+EXPORT_SYMBOL_GPL(meson_clk_hw_register_input);
diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
index f5b5b3fabe3c..afffc1547e20 100644
--- a/drivers/clk/meson/clk-pll.c
+++ b/drivers/clk/meson/clk-pll.c
@@ -200,11 +200,28 @@ static void meson_clk_pll_init(struct clk_hw *hw)
}
}
+static int meson_clk_pll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+
+ if (meson_parm_read(clk->map, &pll->rst) ||
+ !meson_parm_read(clk->map, &pll->en) ||
+ !meson_parm_read(clk->map, &pll->l))
+ return 0;
+
+ return 1;
+}
+
static int meson_clk_pll_enable(struct clk_hw *hw)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+ /* do nothing if the PLL is already enabled */
+ if (clk_hw_is_enabled(hw))
+ return 0;
+
/* Make sure the pll is in reset */
meson_parm_write(clk->map, &pll->rst, 1);
@@ -288,10 +305,12 @@ const struct clk_ops meson_clk_pll_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
.round_rate = meson_clk_pll_round_rate,
.set_rate = meson_clk_pll_set_rate,
+ .is_enabled = meson_clk_pll_is_enabled,
.enable = meson_clk_pll_enable,
.disable = meson_clk_pll_disable
};
const struct clk_ops meson_clk_pll_ro_ops = {
.recalc_rate = meson_clk_pll_recalc_rate,
+ .is_enabled = meson_clk_pll_is_enabled,
};
diff --git a/drivers/clk/meson/clk-regmap.c b/drivers/clk/meson/clk-regmap.c
index 305ee307c003..c515f67322a3 100644
--- a/drivers/clk/meson/clk-regmap.c
+++ b/drivers/clk/meson/clk-regmap.c
@@ -50,6 +50,11 @@ const struct clk_ops clk_regmap_gate_ops = {
};
EXPORT_SYMBOL_GPL(clk_regmap_gate_ops);
+const struct clk_ops clk_regmap_gate_ro_ops = {
+ .is_enabled = clk_regmap_gate_is_enabled,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_gate_ro_ops);
+
static unsigned long clk_regmap_div_recalc_rate(struct clk_hw *hw,
unsigned long prate)
{
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index ed2d4348dbe2..e9c5728d40eb 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -51,6 +51,7 @@ clk_get_regmap_gate_data(struct clk_regmap *clk)
}
extern const struct clk_ops clk_regmap_gate_ops;
+extern const struct clk_ops clk_regmap_gate_ro_ops;
/**
* struct clk_regmap_div_data - regmap backed adjustable divider specific data
diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h
index 6b96d55c047d..6183b22c4bf2 100644
--- a/drivers/clk/meson/clkc.h
+++ b/drivers/clk/meson/clkc.h
@@ -90,6 +90,11 @@ struct meson_clk_phase_data {
int meson_clk_degrees_from_val(unsigned int val, unsigned int width);
unsigned int meson_clk_degrees_to_val(int degrees, unsigned int width);
+struct meson_vid_pll_div_data {
+ struct parm val;
+ struct parm sel;
+};
+
#define MESON_GATE(_name, _reg, _bit) \
struct clk_regmap _name = { \
.data = &(struct clk_regmap_gate_data){ \
@@ -112,5 +117,11 @@ extern const struct clk_ops meson_clk_cpu_ops;
extern const struct clk_ops meson_clk_mpll_ro_ops;
extern const struct clk_ops meson_clk_mpll_ops;
extern const struct clk_ops meson_clk_phase_ops;
+extern const struct clk_ops meson_vid_pll_div_ro_ops;
+
+struct clk_hw *meson_clk_hw_register_input(struct device *dev,
+ const char *of_name,
+ const char *clk_name,
+ unsigned long flags);
#endif /* __CLKC_H */
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 4ada9668fd49..65f2599e5243 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -199,6 +199,58 @@ static struct clk_regmap gxbb_hdmi_pll_dco = {
},
};
+static struct clk_regmap gxl_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 30,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 0,
+ .width = 9,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 9,
+ .width = 5,
+ },
+ /*
+ * On gxl, there is a register shift due to
+ * HHI_HDMI_PLL_CNTL1 which does not exist on gxbb,
+ * so we use the HHI_HDMI_PLL_CNTL2 define from GXBB
+ * instead which is defined at the same offset.
+ */
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL2,
+ .shift = 0,
+ .width = 10,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL,
+ .shift = 28,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap gxbb_hdmi_pll_od = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_HDMI_PLL_CNTL2,
@@ -1524,6 +1576,616 @@ static struct clk_regmap gxbb_vapb = {
},
};
+/* Video Clocks */
+
+static struct clk_regmap gxbb_vid_pll_div = {
+ .data = &(struct meson_vid_pll_div_data){
+ .val = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 0,
+ .width = 15,
+ },
+ .sel = {
+ .reg_off = HHI_VID_PLL_CLK_DIV,
+ .shift = 16,
+ .width = 2,
+ },
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll_div",
+ .ops = &meson_vid_pll_div_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const char * const gxbb_vid_pll_parent_names[] = { "vid_pll_div", "hdmi_pll" };
+
+static struct clk_regmap gxbb_vid_pll_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .mask = 0x1,
+ .shift = 18,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bit 18 selects from 2 possible parents:
+ * vid_pll_div or hdmi_pll
+ */
+ .parent_names = gxbb_vid_pll_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_vid_pll = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_PLL_CLK_DIV,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vid_pll",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vid_pll_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static const char * const gxbb_vclk_parent_names[] = {
+ "vid_pll", "fclk_div4", "fclk_div3", "fclk_div5", "vid_pll",
+ "fclk_div7", "mpll1",
+};
+
+static struct clk_regmap gxbb_vclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 16:18 selects from 8 possible parents:
+ * vid_pll, fclk_div4, fclk_div3, fclk_div5,
+ * vid_pll, fclk_div7, mp1
+ */
+ .parent_names = gxbb_vclk_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 16:18 selects from 8 possible parents:
+ * vid_pll, fclk_div4, fclk_div3, fclk_div5,
+ * vid_pll, fclk_div7, mp1
+ */
+ .parent_names = gxbb_vclk_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vclk_input" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "vclk2_input" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_vclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_vclk2_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "vclk2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div2_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div4_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div6_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_div12_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk2_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div2_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk2_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div4_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk2_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div6_en" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor gxbb_vclk2_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_div12_en" },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const char * const gxbb_cts_parent_names[] = {
+ "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
+ "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
+ "vclk2_div6", "vclk2_div12"
+};
+
+static struct clk_regmap gxbb_cts_enci_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enci_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = gxbb_cts_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_cts_encp_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 20,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encp_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = gxbb_cts_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_cts_vdac_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_vdac_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = gxbb_cts_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+/* TOFIX: add support for cts_tcon */
+static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const char * const gxbb_cts_hdmi_tx_parent_names[] = {
+ "vclk_div1", "vclk_div2", "vclk_div4", "vclk_div6",
+ "vclk_div12", "vclk2_div1", "vclk2_div2", "vclk2_div4",
+ "vclk2_div6", "vclk2_div12"
+};
+
+static struct clk_regmap gxbb_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = mux_table_hdmi_tx_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 31:28 selects from 12 possible parents:
+ * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
+ * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
+ * cts_tcon
+ */
+ .parent_names = gxbb_cts_hdmi_tx_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_cts_enci = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_enci",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_enci_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_cts_encp = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_encp",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_encp_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_cts_vdac = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_vdac",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "cts_vdac_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap gxbb_hdmi_tx = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 5,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi_tx",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "hdmi_tx_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* HDMI Clocks */
+
+static const char * const gxbb_hdmi_parent_names[] = {
+ "xtal", "fclk_div4", "fclk_div3", "fclk_div5"
+};
+
+static struct clk_regmap gxbb_hdmi_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = gxbb_hdmi_parent_names,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_names),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_hdmi_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_names = (const char *[]){ "hdmi_sel" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap gxbb_hdmi = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi",
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "hdmi_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
/* VDEC clocks */
static const char * const gxbb_vdec_parent_names[] = {
@@ -1935,6 +2597,46 @@ static struct clk_hw_onecell_data gxbb_hw_onecell_data = {
[CLKID_HDMI_PLL_OD2] = &gxbb_hdmi_pll_od2.hw,
[CLKID_SYS_PLL_DCO] = &gxbb_sys_pll_dco.hw,
[CLKID_GP0_PLL_DCO] = &gxbb_gp0_pll_dco.hw,
+ [CLKID_VID_PLL_DIV] = &gxbb_vid_pll_div.hw,
+ [CLKID_VID_PLL_SEL] = &gxbb_vid_pll_sel.hw,
+ [CLKID_VID_PLL] = &gxbb_vid_pll.hw,
+ [CLKID_VCLK_SEL] = &gxbb_vclk_sel.hw,
+ [CLKID_VCLK2_SEL] = &gxbb_vclk2_sel.hw,
+ [CLKID_VCLK_INPUT] = &gxbb_vclk_input.hw,
+ [CLKID_VCLK2_INPUT] = &gxbb_vclk2_input.hw,
+ [CLKID_VCLK_DIV] = &gxbb_vclk_div.hw,
+ [CLKID_VCLK2_DIV] = &gxbb_vclk2_div.hw,
+ [CLKID_VCLK] = &gxbb_vclk.hw,
+ [CLKID_VCLK2] = &gxbb_vclk2.hw,
+ [CLKID_VCLK_DIV1] = &gxbb_vclk_div1.hw,
+ [CLKID_VCLK_DIV2_EN] = &gxbb_vclk_div2_en.hw,
+ [CLKID_VCLK_DIV2] = &gxbb_vclk_div2.hw,
+ [CLKID_VCLK_DIV4_EN] = &gxbb_vclk_div4_en.hw,
+ [CLKID_VCLK_DIV4] = &gxbb_vclk_div4.hw,
+ [CLKID_VCLK_DIV6_EN] = &gxbb_vclk_div6_en.hw,
+ [CLKID_VCLK_DIV6] = &gxbb_vclk_div6.hw,
+ [CLKID_VCLK_DIV12_EN] = &gxbb_vclk_div12_en.hw,
+ [CLKID_VCLK_DIV12] = &gxbb_vclk_div12.hw,
+ [CLKID_VCLK2_DIV1] = &gxbb_vclk2_div1.hw,
+ [CLKID_VCLK2_DIV2_EN] = &gxbb_vclk2_div2_en.hw,
+ [CLKID_VCLK2_DIV2] = &gxbb_vclk2_div2.hw,
+ [CLKID_VCLK2_DIV4_EN] = &gxbb_vclk2_div4_en.hw,
+ [CLKID_VCLK2_DIV4] = &gxbb_vclk2_div4.hw,
+ [CLKID_VCLK2_DIV6_EN] = &gxbb_vclk2_div6_en.hw,
+ [CLKID_VCLK2_DIV6] = &gxbb_vclk2_div6.hw,
+ [CLKID_VCLK2_DIV12_EN] = &gxbb_vclk2_div12_en.hw,
+ [CLKID_VCLK2_DIV12] = &gxbb_vclk2_div12.hw,
+ [CLKID_CTS_ENCI_SEL] = &gxbb_cts_enci_sel.hw,
+ [CLKID_CTS_ENCP_SEL] = &gxbb_cts_encp_sel.hw,
+ [CLKID_CTS_VDAC_SEL] = &gxbb_cts_vdac_sel.hw,
+ [CLKID_HDMI_TX_SEL] = &gxbb_hdmi_tx_sel.hw,
+ [CLKID_CTS_ENCI] = &gxbb_cts_enci.hw,
+ [CLKID_CTS_ENCP] = &gxbb_cts_encp.hw,
+ [CLKID_CTS_VDAC] = &gxbb_cts_vdac.hw,
+ [CLKID_HDMI_TX] = &gxbb_hdmi_tx.hw,
+ [CLKID_HDMI_SEL] = &gxbb_hdmi_sel.hw,
+ [CLKID_HDMI_DIV] = &gxbb_hdmi_div.hw,
+ [CLKID_HDMI] = &gxbb_hdmi.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2101,11 +2803,51 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_GEN_CLK_DIV] = &gxbb_gen_clk_div.hw,
[CLKID_GEN_CLK] = &gxbb_gen_clk.hw,
[CLKID_FIXED_PLL_DCO] = &gxbb_fixed_pll_dco.hw,
- [CLKID_HDMI_PLL_DCO] = &gxbb_hdmi_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &gxl_hdmi_pll_dco.hw,
[CLKID_HDMI_PLL_OD] = &gxl_hdmi_pll_od.hw,
[CLKID_HDMI_PLL_OD2] = &gxl_hdmi_pll_od2.hw,
[CLKID_SYS_PLL_DCO] = &gxbb_sys_pll_dco.hw,
[CLKID_GP0_PLL_DCO] = &gxl_gp0_pll_dco.hw,
+ [CLKID_VID_PLL_DIV] = &gxbb_vid_pll_div.hw,
+ [CLKID_VID_PLL_SEL] = &gxbb_vid_pll_sel.hw,
+ [CLKID_VID_PLL] = &gxbb_vid_pll.hw,
+ [CLKID_VCLK_SEL] = &gxbb_vclk_sel.hw,
+ [CLKID_VCLK2_SEL] = &gxbb_vclk2_sel.hw,
+ [CLKID_VCLK_INPUT] = &gxbb_vclk_input.hw,
+ [CLKID_VCLK2_INPUT] = &gxbb_vclk2_input.hw,
+ [CLKID_VCLK_DIV] = &gxbb_vclk_div.hw,
+ [CLKID_VCLK2_DIV] = &gxbb_vclk2_div.hw,
+ [CLKID_VCLK] = &gxbb_vclk.hw,
+ [CLKID_VCLK2] = &gxbb_vclk2.hw,
+ [CLKID_VCLK_DIV1] = &gxbb_vclk_div1.hw,
+ [CLKID_VCLK_DIV2_EN] = &gxbb_vclk_div2_en.hw,
+ [CLKID_VCLK_DIV2] = &gxbb_vclk_div2.hw,
+ [CLKID_VCLK_DIV4_EN] = &gxbb_vclk_div4_en.hw,
+ [CLKID_VCLK_DIV4] = &gxbb_vclk_div4.hw,
+ [CLKID_VCLK_DIV6_EN] = &gxbb_vclk_div6_en.hw,
+ [CLKID_VCLK_DIV6] = &gxbb_vclk_div6.hw,
+ [CLKID_VCLK_DIV12_EN] = &gxbb_vclk_div12_en.hw,
+ [CLKID_VCLK_DIV12] = &gxbb_vclk_div12.hw,
+ [CLKID_VCLK2_DIV1] = &gxbb_vclk2_div1.hw,
+ [CLKID_VCLK2_DIV2_EN] = &gxbb_vclk2_div2_en.hw,
+ [CLKID_VCLK2_DIV2] = &gxbb_vclk2_div2.hw,
+ [CLKID_VCLK2_DIV4_EN] = &gxbb_vclk2_div4_en.hw,
+ [CLKID_VCLK2_DIV4] = &gxbb_vclk2_div4.hw,
+ [CLKID_VCLK2_DIV6_EN] = &gxbb_vclk2_div6_en.hw,
+ [CLKID_VCLK2_DIV6] = &gxbb_vclk2_div6.hw,
+ [CLKID_VCLK2_DIV12_EN] = &gxbb_vclk2_div12_en.hw,
+ [CLKID_VCLK2_DIV12] = &gxbb_vclk2_div12.hw,
+ [CLKID_CTS_ENCI_SEL] = &gxbb_cts_enci_sel.hw,
+ [CLKID_CTS_ENCP_SEL] = &gxbb_cts_encp_sel.hw,
+ [CLKID_CTS_VDAC_SEL] = &gxbb_cts_vdac_sel.hw,
+ [CLKID_HDMI_TX_SEL] = &gxbb_hdmi_tx_sel.hw,
+ [CLKID_CTS_ENCI] = &gxbb_cts_enci.hw,
+ [CLKID_CTS_ENCP] = &gxbb_cts_encp.hw,
+ [CLKID_CTS_VDAC] = &gxbb_cts_vdac.hw,
+ [CLKID_HDMI_TX] = &gxbb_hdmi_tx.hw,
+ [CLKID_HDMI_SEL] = &gxbb_hdmi_sel.hw,
+ [CLKID_HDMI_DIV] = &gxbb_hdmi_div.hw,
+ [CLKID_HDMI] = &gxbb_hdmi.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -2116,6 +2858,7 @@ static struct clk_regmap *const gxbb_clk_regmaps[] = {
&gxbb_hdmi_pll,
&gxbb_hdmi_pll_od,
&gxbb_hdmi_pll_od2,
+ &gxbb_hdmi_pll_dco,
};
static struct clk_regmap *const gxl_clk_regmaps[] = {
@@ -2123,6 +2866,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = {
&gxl_hdmi_pll,
&gxl_hdmi_pll_od,
&gxl_hdmi_pll_od2,
+ &gxl_hdmi_pll_dco,
};
static struct clk_regmap *const gx_clk_regmaps[] = {
@@ -2278,9 +3022,40 @@ static struct clk_regmap *const gx_clk_regmaps[] = {
&gxbb_gen_clk_div,
&gxbb_gen_clk,
&gxbb_fixed_pll_dco,
- &gxbb_hdmi_pll_dco,
&gxbb_sys_pll_dco,
&gxbb_gp0_pll,
+ &gxbb_vid_pll,
+ &gxbb_vid_pll_sel,
+ &gxbb_vid_pll_div,
+ &gxbb_vclk,
+ &gxbb_vclk_sel,
+ &gxbb_vclk_div,
+ &gxbb_vclk_input,
+ &gxbb_vclk_div1,
+ &gxbb_vclk_div2_en,
+ &gxbb_vclk_div4_en,
+ &gxbb_vclk_div6_en,
+ &gxbb_vclk_div12_en,
+ &gxbb_vclk2,
+ &gxbb_vclk2_sel,
+ &gxbb_vclk2_div,
+ &gxbb_vclk2_input,
+ &gxbb_vclk2_div1,
+ &gxbb_vclk2_div2_en,
+ &gxbb_vclk2_div4_en,
+ &gxbb_vclk2_div6_en,
+ &gxbb_vclk2_div12_en,
+ &gxbb_cts_enci,
+ &gxbb_cts_enci_sel,
+ &gxbb_cts_encp,
+ &gxbb_cts_encp_sel,
+ &gxbb_cts_vdac,
+ &gxbb_cts_vdac_sel,
+ &gxbb_hdmi_tx,
+ &gxbb_hdmi_tx_sel,
+ &gxbb_hdmi_sel,
+ &gxbb_hdmi_div,
+ &gxbb_hdmi,
};
struct clkc_data {
diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h
index 72bc077d9663..b53584fe66cf 100644
--- a/drivers/clk/meson/gxbb.h
+++ b/drivers/clk/meson/gxbb.h
@@ -165,8 +165,30 @@
#define CLKID_HDMI_PLL_OD2 163
#define CLKID_SYS_PLL_DCO 164
#define CLKID_GP0_PLL_DCO 165
-
-#define NR_CLKS 166
+#define CLKID_VID_PLL_SEL 167
+#define CLKID_VID_PLL_DIV 168
+#define CLKID_VCLK_SEL 169
+#define CLKID_VCLK2_SEL 170
+#define CLKID_VCLK_INPUT 171
+#define CLKID_VCLK2_INPUT 172
+#define CLKID_VCLK_DIV 173
+#define CLKID_VCLK2_DIV 174
+#define CLKID_VCLK_DIV2_EN 177
+#define CLKID_VCLK_DIV4_EN 178
+#define CLKID_VCLK_DIV6_EN 179
+#define CLKID_VCLK_DIV12_EN 180
+#define CLKID_VCLK2_DIV2_EN 181
+#define CLKID_VCLK2_DIV4_EN 182
+#define CLKID_VCLK2_DIV6_EN 183
+#define CLKID_VCLK2_DIV12_EN 184
+#define CLKID_CTS_ENCI_SEL 195
+#define CLKID_CTS_ENCP_SEL 196
+#define CLKID_CTS_VDAC_SEL 197
+#define CLKID_HDMI_TX_SEL 198
+#define CLKID_HDMI_SEL 203
+#define CLKID_HDMI_DIV 204
+
+#define NR_CLKS 206
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/gxbb-clkc.h>
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 346b9e165b7a..950d0e548c75 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/of_address.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -42,6 +43,11 @@ static const struct pll_params_table sys_pll_params_table[] = {
PLL_PARAMS(62, 1),
PLL_PARAMS(63, 1),
PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
+ PLL_PARAMS(67, 1),
+ PLL_PARAMS(68, 1),
+ PLL_PARAMS(84, 1),
{ /* sentinel */ },
};
@@ -114,7 +120,7 @@ static struct clk_regmap meson8b_fixed_pll = {
},
};
-static struct clk_regmap meson8b_vid_pll_dco = {
+static struct clk_regmap meson8b_hdmi_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = HHI_VID_PLL_CNTL,
@@ -128,9 +134,14 @@ static struct clk_regmap meson8b_vid_pll_dco = {
},
.n = {
.reg_off = HHI_VID_PLL_CNTL,
- .shift = 9,
+ .shift = 10,
.width = 5,
},
+ .frac = {
+ .reg_off = HHI_VID_PLL_CNTL2,
+ .shift = 0,
+ .width = 12,
+ },
.l = {
.reg_off = HHI_VID_PLL_CNTL,
.shift = 31,
@@ -143,14 +154,15 @@ static struct clk_regmap meson8b_vid_pll_dco = {
},
},
.hw.init = &(struct clk_init_data){
- .name = "vid_pll_dco",
+ /* sometimes also called "HPLL" or "HPLL PLL" */
+ .name = "hdmi_pll_dco",
.ops = &meson_clk_pll_ro_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
};
-static struct clk_regmap meson8b_vid_pll = {
+static struct clk_regmap meson8b_hdmi_pll_lvds_out = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_VID_PLL_CNTL,
.shift = 16,
@@ -158,9 +170,25 @@ static struct clk_regmap meson8b_vid_pll = {
.flags = CLK_DIVIDER_POWER_OF_TWO,
},
.hw.init = &(struct clk_init_data){
- .name = "vid_pll",
+ .name = "hdmi_pll_lvds_out",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_hdmi_pll_hdmi_out = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_PLL_CNTL,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_hdmi_out",
.ops = &clk_regmap_divider_ro_ops,
- .parent_names = (const char *[]){ "vid_pll_dco" },
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -197,7 +225,7 @@ static struct clk_regmap meson8b_sys_pll_dco = {
},
.hw.init = &(struct clk_init_data){
.name = "sys_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
+ .ops = &meson_clk_pll_ops,
.parent_names = (const char *[]){ "xtal" },
.num_parents = 1,
},
@@ -212,7 +240,7 @@ static struct clk_regmap meson8b_sys_pll = {
},
.hw.init = &(struct clk_init_data){
.name = "sys_pll",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "sys_pll_dco" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -546,7 +574,7 @@ static struct clk_regmap meson8b_cpu_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "xtal", "sys_pll" },
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
@@ -554,11 +582,11 @@ static struct clk_regmap meson8b_cpu_in_sel = {
},
};
-static struct clk_fixed_factor meson8b_cpu_div2 = {
+static struct clk_fixed_factor meson8b_cpu_in_div2 = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "cpu_div2",
+ .name = "cpu_in_div2",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "cpu_in_sel" },
.num_parents = 1,
@@ -566,11 +594,11 @@ static struct clk_fixed_factor meson8b_cpu_div2 = {
},
};
-static struct clk_fixed_factor meson8b_cpu_div3 = {
+static struct clk_fixed_factor meson8b_cpu_in_div3 = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
- .name = "cpu_div3",
+ .name = "cpu_in_div3",
.ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "cpu_in_sel" },
.num_parents = 1,
@@ -579,13 +607,14 @@ static struct clk_fixed_factor meson8b_cpu_div3 = {
};
static const struct clk_div_table cpu_scale_table[] = {
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { .val = 8, .div = 16 },
+ { .val = 1, .div = 4 },
+ { .val = 2, .div = 6 },
+ { .val = 3, .div = 8 },
+ { .val = 4, .div = 10 },
+ { .val = 5, .div = 12 },
+ { .val = 6, .div = 14 },
+ { .val = 7, .div = 16 },
+ { .val = 8, .div = 18 },
{ /* sentinel */ },
};
@@ -593,33 +622,40 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.shift = 20,
- .width = 9,
+ .width = 10,
.table = cpu_scale_table,
.flags = CLK_DIVIDER_ALLOW_ZERO,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_names = (const char *[]){ "cpu_in_sel" },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
+static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
static struct clk_regmap meson8b_cpu_scale_out_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 2,
+ .table = mux_table_cpu_scale_out_sel,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_out_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * NOTE: We are skipping the parent with value 0x2 (which is
+ * "cpu_in_div3") because it results in a duty cycle of 33%
+ * which makes the system unstable and can result in a lockup
+ * of the whole system.
+ */
.parent_names = (const char *[]) { "cpu_in_sel",
- "cpu_div2",
- "cpu_div3",
+ "cpu_in_div2",
"cpu_scale_div" },
- .num_parents = 4,
+ .num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -632,12 +668,13 @@ static struct clk_regmap meson8b_cpu_clk = {
},
.hw.init = &(struct clk_init_data){
.name = "cpu_clk",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_names = (const char *[]){ "xtal",
"cpu_scale_out_sel" },
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
- CLK_SET_RATE_NO_REPARENT),
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_IS_CRITICAL),
},
};
@@ -689,6 +726,853 @@ static struct clk_regmap meson8b_nand_clk_gate = {
},
};
+static struct clk_fixed_factor meson8b_cpu_clk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_clk_div3 = {
+ .mult = 1,
+ .div = 3,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div3",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_clk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_clk_div5 = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div5",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_clk_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_clk_div7 = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div7",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
+ .mult = 1,
+ .div = 8,
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_div8",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "cpu_clk" },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_abp[] = { 1, 2, 3, 4, 5, 6, 7 };
+static struct clk_regmap meson8b_abp_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .mask = 0x7,
+ .shift = 3,
+ .table = mux_table_abp,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "abp_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "cpu_clk_div2",
+ "cpu_clk_div3",
+ "cpu_clk_div4",
+ "cpu_clk_div5",
+ "cpu_clk_div6",
+ "cpu_clk_div7",
+ "cpu_clk_div8", },
+ .num_parents = 7,
+ },
+};
+
+static struct clk_regmap meson8b_abp_clk_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 16,
+ .flags = CLK_GATE_SET_TO_DISABLE,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "abp_clk_dis",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "abp_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_periph_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .mask = 0x7,
+ .shift = 6,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "periph_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "cpu_clk_div2",
+ "cpu_clk_div3",
+ "cpu_clk_div4",
+ "cpu_clk_div5",
+ "cpu_clk_div6",
+ "cpu_clk_div7",
+ "cpu_clk_div8", },
+ .num_parents = 7,
+ },
+};
+
+static struct clk_regmap meson8b_periph_clk_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 17,
+ .flags = CLK_GATE_SET_TO_DISABLE,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "periph_clk_dis",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "periph_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static u32 mux_table_axi[] = { 1, 2, 3, 4, 5, 6, 7 };
+static struct clk_regmap meson8b_axi_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .mask = 0x7,
+ .shift = 9,
+ .table = mux_table_axi,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axi_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "cpu_clk_div2",
+ "cpu_clk_div3",
+ "cpu_clk_div4",
+ "cpu_clk_div5",
+ "cpu_clk_div6",
+ "cpu_clk_div7",
+ "cpu_clk_div8", },
+ .num_parents = 7,
+ },
+};
+
+static struct clk_regmap meson8b_axi_clk_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 18,
+ .flags = CLK_GATE_SET_TO_DISABLE,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "axi_clk_dis",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "axi_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_l2_dram_clk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .mask = 0x7,
+ .shift = 12,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "l2_dram_clk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_names = (const char *[]){ "cpu_clk_div2",
+ "cpu_clk_div3",
+ "cpu_clk_div4",
+ "cpu_clk_div5",
+ "cpu_clk_div6",
+ "cpu_clk_div7",
+ "cpu_clk_div8", },
+ .num_parents = 7,
+ },
+};
+
+static struct clk_regmap meson8b_l2_dram_clk_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL1,
+ .bit_idx = 19,
+ .flags = CLK_GATE_SET_TO_DISABLE,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "l2_dram_clk_dis",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "l2_dram_clk_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll_in_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .mask = 0x1,
+ .shift = 15,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_in_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ /*
+ * TODO: depending on the SoC there is also a second parent:
+ * Meson8: unknown
+ * Meson8b: hdmi_pll_dco
+ * Meson8m2: vid2_pll
+ */
+ .parent_names = (const char *[]){ "hdmi_pll_dco" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll_in_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_in_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vid_pll_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll_pre_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .shift = 4,
+ .width = 3,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_pre_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "vid_pll_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll_post_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .shift = 12,
+ .width = 3,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_post_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "vid_pll_pre_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .mask = 0x3,
+ .shift = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll",
+ .ops = &clk_regmap_mux_ro_ops,
+ /* TODO: parent 0x2 is vid_pll_pre_div_mult7_div2 */
+ .parent_names = (const char *[]){ "vid_pll_pre_div",
+ "vid_pll_post_div" },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vid_pll_final_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_final_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "vid_pll" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const meson8b_vclk_mux_parents[] = {
+ "vid_pll_final_div", "fclk_div4", "fclk_div3", "fclk_div5",
+ "vid_pll_final_div", "fclk_div7", "mpll1"
+};
+
+static struct clk_regmap meson8b_vclk_in_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_in_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vclk_in_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_in_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vclk_div1_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div1_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk_div2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk_div2_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk_div2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk_div4_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk_div4" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk_div6_div = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk_div6_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk_div6" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk_div12_div = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk_div12_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk_div12" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vclk2_in_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_in_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vclk2_clk_in_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_in_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk2_in_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_vclk2_div1_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div1_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk2_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk2_div2_div = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk2_div2_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk2_div2" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk2_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk2_div4_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk2_div4" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk2_div6_div = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk2_div6_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk2_div6" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor meson8b_vclk2_div12_div = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_names = (const char *[]){ "vclk2_in_en" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ }
+};
+
+static struct clk_regmap meson8b_vclk2_div12_div_gate = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12_en",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "vclk2_div12" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const meson8b_vclk_enc_mux_parents[] = {
+ "vclk_div1_en", "vclk_div2_en", "vclk_div4_en", "vclk_div6_en",
+ "vclk_div12_en",
+};
+
+static struct clk_regmap meson8b_cts_enct_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 20,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enct_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk_enc_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_enct = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enct",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cts_enct_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_encp_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 24,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encp_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk_enc_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_encp = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encp",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cts_encp_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_enci_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enci_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk_enc_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_enci = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_enci",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cts_enci_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_pixel_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk_enc_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_hdmi_tx_pixel = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 5,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_pixel",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_tx_pixel_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const char * const meson8b_vclk2_enc_mux_parents[] = {
+ "vclk2_div1_en", "vclk2_div2_en", "vclk2_div4_en", "vclk2_div6_en",
+ "vclk2_div12_en",
+};
+
+static struct clk_regmap meson8b_cts_encl_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 12,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encl_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk2_enc_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_encl = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encl",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cts_encl_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_vdac0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 28,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_vdac0_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_names = meson8b_vclk2_enc_mux_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parents),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_cts_vdac0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_vdac0",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "cts_vdac0_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_hdmi_sys_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_sys_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ /* FIXME: all other parents are unknown */
+ .parent_names = (const char *[]){ "xtal" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap meson8b_hdmi_sys_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_sys_div",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_sys_sel" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap meson8b_hdmi_sys = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "hdmi_sys",
+ .ops = &clk_regmap_gate_ro_ops,
+ .parent_names = (const char *[]){ "hdmi_sys_div" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* Everything Else (EE) domain gates */
static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
@@ -874,8 +1758,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_MPLL1_DIV] = &meson8b_mpll1_div.hw,
[CLKID_MPLL2_DIV] = &meson8b_mpll2_div.hw,
[CLKID_CPU_IN_SEL] = &meson8b_cpu_in_sel.hw,
- [CLKID_CPU_DIV2] = &meson8b_cpu_div2.hw,
- [CLKID_CPU_DIV3] = &meson8b_cpu_div3.hw,
+ [CLKID_CPU_IN_DIV2] = &meson8b_cpu_in_div2.hw,
+ [CLKID_CPU_IN_DIV3] = &meson8b_cpu_in_div3.hw,
[CLKID_CPU_SCALE_DIV] = &meson8b_cpu_scale_div.hw,
[CLKID_CPU_SCALE_OUT_SEL] = &meson8b_cpu_scale_out_sel.hw,
[CLKID_MPLL_PREDIV] = &meson8b_mpll_prediv.hw,
@@ -888,8 +1772,67 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
[CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
- [CLKID_PLL_VID_DCO] = &meson8b_vid_pll_dco.hw,
+ [CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
+ [CLKID_CPU_CLK_DIV2] = &meson8b_cpu_clk_div2.hw,
+ [CLKID_CPU_CLK_DIV3] = &meson8b_cpu_clk_div3.hw,
+ [CLKID_CPU_CLK_DIV4] = &meson8b_cpu_clk_div4.hw,
+ [CLKID_CPU_CLK_DIV5] = &meson8b_cpu_clk_div5.hw,
+ [CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
+ [CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
+ [CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
+ [CLKID_ABP_SEL] = &meson8b_abp_clk_sel.hw,
+ [CLKID_ABP] = &meson8b_abp_clk_gate.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
+ [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
+ [CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
+ [CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
+ [CLKID_VID_PLL_IN_EN] = &meson8b_vid_pll_in_en.hw,
+ [CLKID_VID_PLL_PRE_DIV] = &meson8b_vid_pll_pre_div.hw,
+ [CLKID_VID_PLL_POST_DIV] = &meson8b_vid_pll_post_div.hw,
+ [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw,
+ [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
+ [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
+ [CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
+ [CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
+ [CLKID_CTS_ENCP] = &meson8b_cts_encp.hw,
+ [CLKID_CTS_ENCI_SEL] = &meson8b_cts_enci_sel.hw,
+ [CLKID_CTS_ENCI] = &meson8b_cts_enci.hw,
+ [CLKID_HDMI_TX_PIXEL_SEL] = &meson8b_hdmi_tx_pixel_sel.hw,
+ [CLKID_HDMI_TX_PIXEL] = &meson8b_hdmi_tx_pixel.hw,
+ [CLKID_CTS_ENCL_SEL] = &meson8b_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &meson8b_cts_encl.hw,
+ [CLKID_CTS_VDAC0_SEL] = &meson8b_cts_vdac0_sel.hw,
+ [CLKID_CTS_VDAC0] = &meson8b_cts_vdac0.hw,
+ [CLKID_HDMI_SYS_SEL] = &meson8b_hdmi_sys_sel.hw,
+ [CLKID_HDMI_SYS_DIV] = &meson8b_hdmi_sys_div.hw,
+ [CLKID_HDMI_SYS] = &meson8b_hdmi_sys.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -983,7 +1926,6 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_mpll1_div,
&meson8b_mpll2_div,
&meson8b_fixed_pll,
- &meson8b_vid_pll,
&meson8b_sys_pll,
&meson8b_cpu_in_sel,
&meson8b_cpu_scale_div,
@@ -999,8 +1941,53 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_nand_clk_div,
&meson8b_nand_clk_gate,
&meson8b_fixed_pll_dco,
- &meson8b_vid_pll_dco,
+ &meson8b_hdmi_pll_dco,
&meson8b_sys_pll_dco,
+ &meson8b_abp_clk_sel,
+ &meson8b_abp_clk_gate,
+ &meson8b_periph_clk_sel,
+ &meson8b_periph_clk_gate,
+ &meson8b_axi_clk_sel,
+ &meson8b_axi_clk_gate,
+ &meson8b_l2_dram_clk_sel,
+ &meson8b_l2_dram_clk_gate,
+ &meson8b_hdmi_pll_lvds_out,
+ &meson8b_hdmi_pll_hdmi_out,
+ &meson8b_vid_pll_in_sel,
+ &meson8b_vid_pll_in_en,
+ &meson8b_vid_pll_pre_div,
+ &meson8b_vid_pll_post_div,
+ &meson8b_vid_pll,
+ &meson8b_vid_pll_final_div,
+ &meson8b_vclk_in_sel,
+ &meson8b_vclk_in_en,
+ &meson8b_vclk_div1_gate,
+ &meson8b_vclk_div2_div_gate,
+ &meson8b_vclk_div4_div_gate,
+ &meson8b_vclk_div6_div_gate,
+ &meson8b_vclk_div12_div_gate,
+ &meson8b_vclk2_in_sel,
+ &meson8b_vclk2_clk_in_en,
+ &meson8b_vclk2_div1_gate,
+ &meson8b_vclk2_div2_div_gate,
+ &meson8b_vclk2_div4_div_gate,
+ &meson8b_vclk2_div6_div_gate,
+ &meson8b_vclk2_div12_div_gate,
+ &meson8b_cts_enct_sel,
+ &meson8b_cts_enct,
+ &meson8b_cts_encp_sel,
+ &meson8b_cts_encp,
+ &meson8b_cts_enci_sel,
+ &meson8b_cts_enci,
+ &meson8b_hdmi_tx_pixel_sel,
+ &meson8b_hdmi_tx_pixel,
+ &meson8b_cts_encl_sel,
+ &meson8b_cts_encl,
+ &meson8b_cts_vdac0_sel,
+ &meson8b_cts_vdac0,
+ &meson8b_hdmi_sys_sel,
+ &meson8b_hdmi_sys_div,
+ &meson8b_hdmi_sys,
};
static const struct meson8b_clk_reset_line {
@@ -1101,6 +2088,53 @@ static const struct reset_control_ops meson8b_clk_reset_ops = {
.deassert = meson8b_clk_reset_deassert,
};
+struct meson8b_nb_data {
+ struct notifier_block nb;
+ struct clk_hw_onecell_data *onecell_data;
+};
+
+static int meson8b_cpu_clk_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct meson8b_nb_data *nb_data =
+ container_of(nb, struct meson8b_nb_data, nb);
+ struct clk_hw **hws = nb_data->onecell_data->hws;
+ struct clk_hw *cpu_clk_hw, *parent_clk_hw;
+ struct clk *cpu_clk, *parent_clk;
+ int ret;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ parent_clk_hw = hws[CLKID_XTAL];
+ break;
+
+ case POST_RATE_CHANGE:
+ parent_clk_hw = hws[CLKID_CPU_SCALE_OUT_SEL];
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ cpu_clk_hw = hws[CLKID_CPUCLK];
+ cpu_clk = __clk_lookup(clk_hw_get_name(cpu_clk_hw));
+
+ parent_clk = __clk_lookup(clk_hw_get_name(parent_clk_hw));
+
+ ret = clk_set_parent(cpu_clk, parent_clk);
+ if (ret)
+ return notifier_from_errno(ret);
+
+ udelay(100);
+
+ return NOTIFY_OK;
+}
+
+static struct meson8b_nb_data meson8b_cpu_nb_data = {
+ .nb.notifier_call = meson8b_cpu_clk_notifier_cb,
+ .onecell_data = &meson8b_hw_onecell_data,
+};
+
static const struct regmap_config clkc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -1110,20 +2144,27 @@ static const struct regmap_config clkc_regmap_config = {
static void __init meson8b_clkc_init(struct device_node *np)
{
struct meson8b_clk_reset *rstc;
+ const char *notifier_clk_name;
+ struct clk *notifier_clk;
void __iomem *clk_base;
struct regmap *map;
int i, ret;
- /* Generic clocks, PLLs and some of the reset-bits */
- clk_base = of_iomap(np, 1);
- if (!clk_base) {
- pr_err("%s: Unable to map clk base\n", __func__);
- return;
- }
+ map = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(map)) {
+ pr_info("failed to get HHI regmap - Trying obsolete regs\n");
- map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config);
- if (IS_ERR(map))
- return;
+ /* Generic clocks, PLLs and some of the reset-bits */
+ clk_base = of_iomap(np, 1);
+ if (!clk_base) {
+ pr_err("%s: Unable to map clk base\n", __func__);
+ return;
+ }
+
+ map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config);
+ if (IS_ERR(map))
+ return;
+ }
rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
if (!rstc)
@@ -1159,6 +2200,20 @@ static void __init meson8b_clkc_init(struct device_node *np)
return;
}
+ /*
+ * FIXME we shouldn't program the muxes in notifier handlers. The
+ * tricky programming sequence will be handled by the forthcoming
+ * coordinated clock rates mechanism once that feature is released.
+ */
+ notifier_clk_name = clk_hw_get_name(&meson8b_cpu_scale_out_sel.hw);
+ notifier_clk = __clk_lookup(notifier_clk_name);
+ ret = clk_notifier_register(notifier_clk, &meson8b_cpu_nb_data.nb);
+ if (ret) {
+ pr_err("%s: failed to register the CPU clock notifier\n",
+ __func__);
+ return;
+ }
+
ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get,
&meson8b_hw_onecell_data);
if (ret)
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index 1c6fb180e6a2..87fba739af81 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -19,20 +19,26 @@
*
* [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf
*/
+#define HHI_VIID_CLK_DIV 0x128 /* 0x4a offset in data sheet */
+#define HHI_VIID_CLK_CNTL 0x12c /* 0x4b offset in data sheet */
#define HHI_GCLK_MPEG0 0x140 /* 0x50 offset in data sheet */
#define HHI_GCLK_MPEG1 0x144 /* 0x51 offset in data sheet */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_GCLK_OTHER 0x150 /* 0x54 offset in data sheet */
#define HHI_GCLK_AO 0x154 /* 0x55 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL1 0x15c /* 0x57 offset in data sheet */
+#define HHI_VID_CLK_DIV 0x164 /* 0x59 offset in data sheet */
#define HHI_MPEG_CLK_CNTL 0x174 /* 0x5d offset in data sheet */
#define HHI_VID_CLK_CNTL 0x17c /* 0x5f offset in data sheet */
+#define HHI_VID_CLK_CNTL2 0x194 /* 0x65 offset in data sheet */
#define HHI_VID_DIVIDER_CNTL 0x198 /* 0x66 offset in data sheet */
#define HHI_SYS_CPU_CLK_CNTL0 0x19c /* 0x67 offset in data sheet */
+#define HHI_HDMI_CLK_CNTL 0x1cc /* 0x73 offset in data sheet */
#define HHI_NAND_CLK_CNTL 0x25c /* 0x97 offset in data sheet */
#define HHI_MPLL_CNTL 0x280 /* 0xa0 offset in data sheet */
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
+#define HHI_VID_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
/*
* MPLL register offeset taken from the S905 datasheet. Vendor kernel source
@@ -63,8 +69,8 @@
#define CLKID_MPLL1_DIV 97
#define CLKID_MPLL2_DIV 98
#define CLKID_CPU_IN_SEL 99
-#define CLKID_CPU_DIV2 100
-#define CLKID_CPU_DIV3 101
+#define CLKID_CPU_IN_DIV2 100
+#define CLKID_CPU_IN_DIV3 101
#define CLKID_CPU_SCALE_DIV 102
#define CLKID_CPU_SCALE_OUT_SEL 103
#define CLKID_MPLL_PREDIV 104
@@ -76,10 +82,65 @@
#define CLKID_NAND_SEL 110
#define CLKID_NAND_DIV 111
#define CLKID_PLL_FIXED_DCO 113
-#define CLKID_PLL_VID_DCO 114
+#define CLKID_HDMI_PLL_DCO 114
#define CLKID_PLL_SYS_DCO 115
+#define CLKID_CPU_CLK_DIV2 116
+#define CLKID_CPU_CLK_DIV3 117
+#define CLKID_CPU_CLK_DIV4 118
+#define CLKID_CPU_CLK_DIV5 119
+#define CLKID_CPU_CLK_DIV6 120
+#define CLKID_CPU_CLK_DIV7 121
+#define CLKID_CPU_CLK_DIV8 122
+#define CLKID_ABP_SEL 123
+#define CLKID_PERIPH_SEL 125
+#define CLKID_AXI_SEL 127
+#define CLKID_L2_DRAM_SEL 129
+#define CLKID_HDMI_PLL_LVDS_OUT 131
+#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_IN_SEL 133
+#define CLKID_VID_PLL_IN_EN 134
+#define CLKID_VID_PLL_PRE_DIV 135
+#define CLKID_VID_PLL_POST_DIV 136
+#define CLKID_VID_PLL_FINAL_DIV 137
+#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK_IN_EN 139
+#define CLKID_VCLK_DIV1 140
+#define CLKID_VCLK_DIV2_DIV 141
+#define CLKID_VCLK_DIV2 142
+#define CLKID_VCLK_DIV4_DIV 143
+#define CLKID_VCLK_DIV4 144
+#define CLKID_VCLK_DIV6_DIV 145
+#define CLKID_VCLK_DIV6 146
+#define CLKID_VCLK_DIV12_DIV 147
+#define CLKID_VCLK_DIV12 148
+#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_VCLK2_IN_EN 150
+#define CLKID_VCLK2_DIV1 151
+#define CLKID_VCLK2_DIV2_DIV 152
+#define CLKID_VCLK2_DIV2 153
+#define CLKID_VCLK2_DIV4_DIV 154
+#define CLKID_VCLK2_DIV4 155
+#define CLKID_VCLK2_DIV6_DIV 156
+#define CLKID_VCLK2_DIV6 157
+#define CLKID_VCLK2_DIV12_DIV 158
+#define CLKID_VCLK2_DIV12 159
+#define CLKID_CTS_ENCT_SEL 160
+#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP_SEL 162
+#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI_SEL 164
+#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL_SEL 166
+#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL_SEL 168
+#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0_SEL 170
+#define CLKID_CTS_VDAC0 171
+#define CLKID_HDMI_SYS_SEL 172
+#define CLKID_HDMI_SYS_DIV 173
+#define CLKID_HDMI_SYS 174
-#define CLK_NR_CLKS 116
+#define CLK_NR_CLKS 175
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/meson/vid-pll-div.c b/drivers/clk/meson/vid-pll-div.c
new file mode 100644
index 000000000000..88af0e282ea0
--- /dev/null
+++ b/drivers/clk/meson/vid-pll-div.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 BayLibre, SAS.
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/clk-provider.h>
+#include "clkc.h"
+
+static inline struct meson_vid_pll_div_data *
+meson_vid_pll_div_data(struct clk_regmap *clk)
+{
+ return (struct meson_vid_pll_div_data *)clk->data;
+}
+
+/*
+ * This vid_pll divided is a fully programmable fractionnal divider to
+ * achieve complex video clock rates.
+ *
+ * Here are provided the commonly used fraction values provided by Amlogic.
+ */
+
+struct vid_pll_div {
+ unsigned int shift_val;
+ unsigned int shift_sel;
+ unsigned int divider;
+ unsigned int multiplier;
+};
+
+#define VID_PLL_DIV(_val, _sel, _ft, _fb) \
+ { \
+ .shift_val = (_val), \
+ .shift_sel = (_sel), \
+ .divider = (_ft), \
+ .multiplier = (_fb), \
+ }
+
+static const struct vid_pll_div vid_pll_div_table[] = {
+ VID_PLL_DIV(0x0aaa, 0, 2, 1), /* 2/1 => /2 */
+ VID_PLL_DIV(0x5294, 2, 5, 2), /* 5/2 => /2.5 */
+ VID_PLL_DIV(0x0db6, 0, 3, 1), /* 3/1 => /3 */
+ VID_PLL_DIV(0x36cc, 1, 7, 2), /* 7/2 => /3.5 */
+ VID_PLL_DIV(0x6666, 2, 15, 4), /* 15/4 => /3.75 */
+ VID_PLL_DIV(0x0ccc, 0, 4, 1), /* 4/1 => /4 */
+ VID_PLL_DIV(0x739c, 2, 5, 1), /* 5/1 => /5 */
+ VID_PLL_DIV(0x0e38, 0, 6, 1), /* 6/1 => /6 */
+ VID_PLL_DIV(0x0000, 3, 25, 4), /* 25/4 => /6.25 */
+ VID_PLL_DIV(0x3c78, 1, 7, 1), /* 7/1 => /7 */
+ VID_PLL_DIV(0x78f0, 2, 15, 2), /* 15/2 => /7.5 */
+ VID_PLL_DIV(0x0fc0, 0, 12, 1), /* 12/1 => /12 */
+ VID_PLL_DIV(0x3f80, 1, 14, 1), /* 14/1 => /14 */
+ VID_PLL_DIV(0x7f80, 2, 15, 1), /* 15/1 => /15 */
+};
+
+#define to_meson_vid_pll_div(_hw) \
+ container_of(_hw, struct meson_vid_pll_div, hw)
+
+static const struct vid_pll_div *_get_table_val(unsigned int shift_val,
+ unsigned int shift_sel)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(vid_pll_div_table) ; ++i) {
+ if (vid_pll_div_table[i].shift_val == shift_val &&
+ vid_pll_div_table[i].shift_sel == shift_sel)
+ return &vid_pll_div_table[i];
+ }
+
+ return NULL;
+}
+
+static unsigned long meson_vid_pll_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_vid_pll_div_data *pll_div = meson_vid_pll_div_data(clk);
+ const struct vid_pll_div *div;
+
+ div = _get_table_val(meson_parm_read(clk->map, &pll_div->val),
+ meson_parm_read(clk->map, &pll_div->sel));
+ if (!div || !div->divider) {
+ pr_info("%s: Invalid config value for vid_pll_div\n", __func__);
+ return parent_rate;
+ }
+
+ return DIV_ROUND_UP_ULL(parent_rate * div->multiplier, div->divider);
+}
+
+const struct clk_ops meson_vid_pll_div_ro_ops = {
+ .recalc_rate = meson_vid_pll_div_recalc_rate,
+};
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index ad8d483a35cd..ca7d37e2c7be 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
pr_err("CLK %d has invalid pointer %p\n", id, clk);
return;
}
- if (id > unit->nr_clks) {
+ if (id >= unit->nr_clks) {
pr_err("CLK %d is invalid\n", id);
return;
}
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 9781b1bf5998..9235a331b588 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -200,11 +200,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
unsigned int idx = clkspec->args[1];
if (type == CP110_CLK_TYPE_CORE) {
- if (idx > CP110_MAX_CORE_CLOCKS)
+ if (idx >= CP110_MAX_CORE_CLOCKS)
return ERR_PTR(-EINVAL);
return clk_data->hws[idx];
} else if (type == CP110_CLK_TYPE_GATABLE) {
- if (idx > CP110_MAX_GATABLE_CLOCKS)
+ if (idx >= CP110_MAX_GATABLE_CLOCKS)
return ERR_PTR(-EINVAL);
return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
}
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index 7e8daab9025b..312c3580187f 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -298,7 +298,7 @@ static unsigned long pll_gf40lp_frac_recalc_rate(struct clk_hw *hw,
return rate;
}
-static struct clk_ops pll_gf40lp_frac_ops = {
+static const struct clk_ops pll_gf40lp_frac_ops = {
.enable = pll_gf40lp_frac_enable,
.disable = pll_gf40lp_frac_disable,
.is_enabled = pll_gf40lp_frac_is_enabled,
@@ -307,7 +307,7 @@ static struct clk_ops pll_gf40lp_frac_ops = {
.set_rate = pll_gf40lp_frac_set_rate,
};
-static struct clk_ops pll_gf40lp_frac_fixed_ops = {
+static const struct clk_ops pll_gf40lp_frac_fixed_ops = {
.enable = pll_gf40lp_frac_enable,
.disable = pll_gf40lp_frac_disable,
.is_enabled = pll_gf40lp_frac_is_enabled,
@@ -430,7 +430,7 @@ static unsigned long pll_gf40lp_laint_recalc_rate(struct clk_hw *hw,
return rate;
}
-static struct clk_ops pll_gf40lp_laint_ops = {
+static const struct clk_ops pll_gf40lp_laint_ops = {
.enable = pll_gf40lp_laint_enable,
.disable = pll_gf40lp_laint_disable,
.is_enabled = pll_gf40lp_laint_is_enabled,
@@ -439,7 +439,7 @@ static struct clk_ops pll_gf40lp_laint_ops = {
.set_rate = pll_gf40lp_laint_set_rate,
};
-static struct clk_ops pll_gf40lp_laint_fixed_ops = {
+static const struct clk_ops pll_gf40lp_laint_fixed_ops = {
.enable = pll_gf40lp_laint_enable,
.disable = pll_gf40lp_laint_disable,
.is_enabled = pll_gf40lp_laint_is_enabled,
diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c
index b80dc9d5855c..42627bf8a09e 100644
--- a/drivers/clk/pxa/clk-pxa.c
+++ b/drivers/clk/pxa/clk-pxa.c
@@ -70,7 +70,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw,
return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
}
-static struct clk_ops cken_rate_ops = {
+static const struct clk_ops cken_rate_ops = {
.recalc_rate = cken_recalc_rate,
};
@@ -83,7 +83,7 @@ static u8 cken_get_parent(struct clk_hw *hw)
return pclk->is_in_low_power() ? 0 : 1;
}
-static struct clk_ops cken_mux_ops = {
+static const struct clk_ops cken_mux_ops = {
.get_parent = cken_get_parent,
.set_parent = dummy_clk_set_parent,
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index a611531df115..1b1ba54e33dd 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -9,16 +9,17 @@ config QCOM_GDSC
config QCOM_RPMCC
bool
-config COMMON_CLK_QCOM
+menuconfig COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
depends on ARCH_QCOM || COMPILE_TEST
select REGMAP_MMIO
select RESET_CONTROLLER
+if COMMON_CLK_QCOM
+
config QCOM_A53PLL
tristate "MSM8916 A53 PLL"
- depends on COMMON_CLK_QCOM
default ARCH_QCOM
help
Support for the A53 PLL on MSM8916 devices. It provides
@@ -28,7 +29,6 @@ config QCOM_A53PLL
config QCOM_CLK_APCS_MSM8916
tristate "MSM8916 APCS Clock Controller"
- depends on COMMON_CLK_QCOM
depends on QCOM_APCS_IPC || COMPILE_TEST
default ARCH_QCOM
help
@@ -39,7 +39,7 @@ config QCOM_CLK_APCS_MSM8916
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
- depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ depends on MFD_QCOM_RPM
select QCOM_RPMCC
help
The RPM (Resource Power Manager) is a dedicated hardware engine for
@@ -52,7 +52,7 @@ config QCOM_CLK_RPM
config QCOM_CLK_SMD_RPM
tristate "RPM over SMD based Clock Controller"
- depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ depends on QCOM_SMD_RPM
select QCOM_RPMCC
help
The RPM (Resource Power Manager) is a dedicated hardware engine for
@@ -65,7 +65,7 @@ config QCOM_CLK_SMD_RPM
config QCOM_CLK_RPMH
tristate "RPMh Clock Driver"
- depends on COMMON_CLK_QCOM && QCOM_RPMH
+ depends on QCOM_RPMH
help
RPMh manages shared resources on some Qualcomm Technologies, Inc.
SoCs. It accepts requests from other hardware subsystems via RSC.
@@ -75,7 +75,6 @@ config QCOM_CLK_RPMH
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on apq8084 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -85,7 +84,6 @@ config APQ_MMCC_8084
tristate "APQ8084 Multimedia Clock Controller"
select APQ_GCC_8084
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on apq8084 devices.
Say Y if you want to support multimedia devices such as display,
@@ -93,7 +91,6 @@ config APQ_MMCC_8084
config IPQ_GCC_4019
tristate "IPQ4019 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on ipq4019 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -101,7 +98,6 @@ config IPQ_GCC_4019
config IPQ_GCC_806X
tristate "IPQ806x Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on ipq806x devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -110,7 +106,6 @@ config IPQ_GCC_806X
config IPQ_LCC_806X
tristate "IPQ806x LPASS Clock Controller"
select IPQ_GCC_806X
- depends on COMMON_CLK_QCOM
help
Support for the LPASS clock controller on ipq806x devices.
Say Y if you want to use audio devices such as i2s, pcm,
@@ -118,7 +113,6 @@ config IPQ_LCC_806X
config IPQ_GCC_8074
tristate "IPQ8074 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for global clock controller on ipq8074 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -127,7 +121,6 @@ config IPQ_GCC_8074
config MSM_GCC_8660
tristate "MSM8660 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8660 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -136,7 +129,6 @@ config MSM_GCC_8660
config MSM_GCC_8916
tristate "MSM8916 Global Clock Controller"
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8916 devices.
Say Y if you want to use devices such as UART, SPI i2c, USB,
@@ -144,7 +136,6 @@ config MSM_GCC_8916
config MSM_GCC_8960
tristate "APQ8064/MSM8960 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on apq8064/msm8960 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -153,7 +144,6 @@ config MSM_GCC_8960
config MSM_LCC_8960
tristate "APQ8064/MSM8960 LPASS Clock Controller"
select MSM_GCC_8960
- depends on COMMON_CLK_QCOM
help
Support for the LPASS clock controller on apq8064/msm8960 devices.
Say Y if you want to use audio devices such as i2s, pcm,
@@ -161,7 +151,6 @@ config MSM_LCC_8960
config MDM_GCC_9615
tristate "MDM9615 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on mdm9615 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -170,7 +159,6 @@ config MDM_GCC_9615
config MDM_LCC_9615
tristate "MDM9615 LPASS Clock Controller"
select MDM_GCC_9615
- depends on COMMON_CLK_QCOM
help
Support for the LPASS clock controller on mdm9615 devices.
Say Y if you want to use audio devices such as i2s, pcm,
@@ -179,7 +167,6 @@ config MDM_LCC_9615
config MSM_MMCC_8960
tristate "MSM8960 Multimedia Clock Controller"
select MSM_GCC_8960
- depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8960 devices.
Say Y if you want to support multimedia devices such as display,
@@ -188,7 +175,6 @@ config MSM_MMCC_8960
config MSM_GCC_8974
tristate "MSM8974 Global Clock Controller"
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8974 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -198,7 +184,6 @@ config MSM_MMCC_8974
tristate "MSM8974 Multimedia Clock Controller"
select MSM_GCC_8974
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8974 devices.
Say Y if you want to support multimedia devices such as display,
@@ -206,7 +191,6 @@ config MSM_MMCC_8974
config MSM_GCC_8994
tristate "MSM8994 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8994 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -215,7 +199,6 @@ config MSM_GCC_8994
config MSM_GCC_8996
tristate "MSM8996 Global Clock Controller"
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8996 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -225,7 +208,6 @@ config MSM_MMCC_8996
tristate "MSM8996 Multimedia Clock Controller"
select MSM_GCC_8996
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8996 devices.
Say Y if you want to support multimedia devices such as display,
@@ -233,7 +215,6 @@ config MSM_MMCC_8996
config MSM_GCC_8998
tristate "MSM8998 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8998 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -241,7 +222,6 @@ config MSM_GCC_8998
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on QCS404 devices.
Say Y if you want to use multimedia devices or peripheral
@@ -249,7 +229,6 @@ config QCS_GCC_404
config SDM_CAMCC_845
tristate "SDM845 Camera Clock Controller"
- depends on COMMON_CLK_QCOM
select SDM_GCC_845
help
Support for the camera clock controller on SDM845 devices.
@@ -258,7 +237,6 @@ config SDM_CAMCC_845
config SDM_GCC_660
tristate "SDM660 Global Clock Controller"
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on SDM660 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
@@ -267,15 +245,21 @@ config SDM_GCC_660
config SDM_GCC_845
tristate "SDM845 Global Clock Controller"
select QCOM_GDSC
- depends on COMMON_CLK_QCOM
help
Support for the global clock controller on SDM845 devices.
Say Y if you want to use peripheral devices such as UART, SPI,
i2C, USB, UFS, SDDC, PCIe, etc.
+config SDM_GPUCC_845
+ tristate "SDM845 Graphics Clock Controller"
+ select SDM_GCC_845
+ help
+ Support for the graphics clock controller on SDM845 devices.
+ Say Y if you want to support graphics controller devices and
+ functionality such as 3D graphics.
+
config SDM_VIDEOCC_845
tristate "SDM845 Video Clock Controller"
- depends on COMMON_CLK_QCOM
select SDM_GCC_845
select QCOM_GDSC
help
@@ -286,16 +270,23 @@ config SDM_VIDEOCC_845
config SDM_DISPCC_845
tristate "SDM845 Display Clock Controller"
select SDM_GCC_845
- depends on COMMON_CLK_QCOM
help
Support for the display clock controller on Qualcomm Technologies, Inc
SDM845 devices.
Say Y if you want to support display devices and functionality such as
splash screen.
+config SDM_LPASSCC_845
+ tristate "SDM845 Low Power Audio Subsystem (LPAAS) Clock Controller"
+ select SDM_GCC_845
+ help
+ Support for the LPASS clock controller on SDM845 devices.
+ Say Y if you want to use the LPASS branch clocks of the LPASS clock
+ controller to reset the LPASS subsystem.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
- depends on (COMMON_CLK_QCOM && SPMI) || COMPILE_TEST
+ depends on SPMI || COMPILE_TEST
help
This driver supports the clkdiv functionality on the Qualcomm
Technologies, Inc. SPMI PMIC. It configures the frequency of
@@ -304,7 +295,6 @@ config SPMI_PMIC_CLKDIV
config QCOM_HFPLL
tristate "High-Frequency PLL (HFPLL) Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the high-frequency PLLs present on Qualcomm devices.
Say Y if you want to support CPU frequency scaling on devices
@@ -312,7 +302,6 @@ config QCOM_HFPLL
config KPSS_XCC
tristate "KPSS Clock Controller"
- depends on COMMON_CLK_QCOM
help
Support for the Krait ACC and GCC clock controllers. Say Y
if you want to support CPU frequency scaling on devices such
@@ -320,8 +309,10 @@ config KPSS_XCC
config KRAITCC
tristate "Krait Clock Controller"
- depends on COMMON_CLK_QCOM && ARM
+ depends on ARM
select KRAIT_CLOCKS
help
Support for the Krait CPU clocks on Qualcomm devices.
Say Y if you want to support CPU frequency scaling.
+
+endif
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 981882e16189..ee8d0698e370 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -46,6 +46,8 @@ obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o
obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o
obj-$(CONFIG_SDM_GCC_660) += gcc-sdm660.o
obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
+obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
+obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index b1cc8dbcd327..a6c89a310b18 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -96,8 +96,8 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
goto err;
}
- ret = of_clk_add_hw_provider(parent->of_node, of_clk_hw_simple_get,
- &a53cc->clkr.hw);
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &a53cc->clkr.hw);
if (ret) {
dev_err(dev, "failed to add clock provider: %d\n", ret);
goto err;
@@ -115,10 +115,8 @@ err:
static int qcom_apcs_msm8916_clk_remove(struct platform_device *pdev)
{
struct clk_regmap_mux_div *a53cc = platform_get_drvdata(pdev);
- struct device *parent = pdev->dev.parent;
clk_notifier_unregister(a53cc->pclk, &a53cc->clk_nb);
- of_clk_del_provider(parent->of_node);
return 0;
}
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 850c02a52248..d3aadaeb2903 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -611,10 +611,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
.num_clks = ARRAY_SIZE(msm8996_clks),
};
+/* QCS404 */
+DEFINE_CLK_SMD_RPM_QDSS(qcs404, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+
+DEFINE_CLK_SMD_RPM(qcs404, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcs404, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+
+DEFINE_CLK_SMD_RPM(qcs404, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcs404, bimc_gpu_clk, bimc_gpu_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
+
+DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcs404, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs404, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, rf_clk1_pin, rf_clk1_a_pin, 4);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER(qcs404, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8);
+
+static struct clk_smd_rpm *qcs404_clks[] = {
+ [RPM_SMD_QDSS_CLK] = &qcs404_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &qcs404_qdss_a_clk,
+ [RPM_SMD_PNOC_CLK] = &qcs404_pnoc_clk,
+ [RPM_SMD_PNOC_A_CLK] = &qcs404_pnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &qcs404_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &qcs404_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &qcs404_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &qcs404_bimc_a_clk,
+ [RPM_SMD_BIMC_GPU_CLK] = &qcs404_bimc_gpu_clk,
+ [RPM_SMD_BIMC_GPU_A_CLK] = &qcs404_bimc_gpu_a_clk,
+ [RPM_SMD_QPIC_CLK] = &qcs404_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &qcs404_qpic_a_clk,
+ [RPM_SMD_CE1_CLK] = &qcs404_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &qcs404_ce1_a_clk,
+ [RPM_SMD_RF_CLK1] = &qcs404_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &qcs404_rf_clk1_a,
+ [RPM_SMD_LN_BB_CLK] = &qcs404_ln_bb_clk,
+ [RPM_SMD_LN_BB_A_CLK] = &qcs404_ln_bb_a_clk,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
+ .clks = qcs404_clks,
+ .num_clks = ARRAY_SIZE(qcs404_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 },
{ .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
+ { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
{ }
};
MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index db9b2471ac40..0a48ed56833b 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -191,6 +191,22 @@ int qcom_cc_register_sleep_clk(struct device *dev)
}
EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
+/* Drop 'protected-clocks' from the list of clocks to register */
+static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc)
+{
+ struct device_node *np = dev->of_node;
+ struct property *prop;
+ const __be32 *p;
+ u32 i;
+
+ of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
+ if (i >= cc->num_rclks)
+ continue;
+
+ cc->rclks[i] = NULL;
+ }
+}
+
static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -251,6 +267,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
cc->rclks = rclks;
cc->num_rclks = num_clks;
+ qcom_cc_drop_protected(dev, cc);
+
for (i = 0; i < num_clks; i++) {
if (!rclks[i])
continue;
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index ac2b0aa1e8b5..7d9647cc29f9 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -544,7 +544,11 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
};
static const struct freq_tbl ftbl_gcc_blsp1_qup1_6_spi_apps_clk[] = {
+ F(100000, P_XO, 16, 2, 24),
+ F(250000, P_XO, 16, 5, 24),
+ F(500000, P_XO, 8, 5, 24),
F(960000, P_XO, 10, 1, 2),
+ F(1000000, P_XO, 4, 5, 24),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
F(16000000, P_GPLL0, 10, 1, 5),
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 9f0ae403d5f5..1b779396e04f 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -117,6 +117,17 @@ static const char * const gcc_parent_names_5[] = {
"core_bi_pll_test_se",
};
+static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "xo",
+ .parent_names = (const char *[]){ "xo_board" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
static struct pll_vco fabia_vco[] = {
{ 250000000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
@@ -1964,19 +1975,6 @@ static struct clk_branch gcc_hmss_at_clk = {
},
};
-static struct clk_branch gcc_hmss_dvm_bus_clk = {
- .halt_reg = 0x4808c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x4808c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_hmss_dvm_bus_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_hmss_rbcpr_clk = {
.halt_reg = 0x48008,
.halt_check = BRANCH_HALT,
@@ -2007,32 +2005,6 @@ static struct clk_branch gcc_hmss_trig_clk = {
},
};
-static struct clk_branch gcc_lpass_at_clk = {
- .halt_reg = 0x47020,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x47020,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_lpass_at_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_lpass_trig_clk = {
- .halt_reg = 0x4701c,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x4701c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_lpass_trig_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.halt_reg = 0x9004,
.halt_check = BRANCH_HALT,
@@ -2042,6 +2014,12 @@ static struct clk_branch gcc_mmss_noc_cfg_ahb_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_noc_cfg_ahb_clk",
.ops = &clk_branch2_ops,
+ /*
+ * Any access to mmss depends on this clock.
+ * Gating this clock has been shown to crash the system
+ * when mmssnoc_axi_rpm_clk is inited in rpmcc.
+ */
+ .flags = CLK_IS_CRITICAL,
},
},
};
@@ -2401,7 +2379,7 @@ static struct clk_branch gcc_ufs_phy_aux_clk = {
static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.halt_reg = 0x75014,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75014,
.enable_mask = BIT(0),
@@ -2414,7 +2392,7 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.halt_reg = 0x7605c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x7605c,
.enable_mask = BIT(0),
@@ -2427,7 +2405,7 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.halt_reg = 0x75010,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75010,
.enable_mask = BIT(0),
@@ -2541,6 +2519,76 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
},
};
+static struct clk_branch gcc_hdmi_clkref_clk = {
+ .halt_reg = 0x88000,
+ .clkr = {
+ .enable_reg = 0x88000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_hdmi_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_clkref_clk = {
+ .halt_reg = 0x88004,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_clkref_clk = {
+ .halt_reg = 0x88008,
+ .clkr = {
+ .enable_reg = 0x88008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_clkref_clk = {
+ .halt_reg = 0x8800c,
+ .clkr = {
+ .enable_reg = 0x8800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_clk = {
+ .halt_reg = 0x88014,
+ .clkr = {
+ .enable_reg = 0x88014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx1_usb2_clkref_clk",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.gds_hw_ctrl = 0x0,
@@ -2653,11 +2701,8 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
[GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
[GCC_HMSS_AT_CLK] = &gcc_hmss_at_clk.clkr,
- [GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
[GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
[GCC_HMSS_TRIG_CLK] = &gcc_hmss_trig_clk.clkr,
- [GCC_LPASS_AT_CLK] = &gcc_lpass_at_clk.clkr,
- [GCC_LPASS_TRIG_CLK] = &gcc_lpass_trig_clk.clkr,
[GCC_MMSS_NOC_CFG_AHB_CLK] = &gcc_mmss_noc_cfg_ahb_clk.clkr,
[GCC_MMSS_QM_AHB_CLK] = &gcc_mmss_qm_ahb_clk.clkr,
[GCC_MMSS_QM_CORE_CLK] = &gcc_mmss_qm_core_clk.clkr,
@@ -2733,6 +2778,11 @@ static struct clk_regmap *gcc_msm8998_clocks[] = {
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GCC_HDMI_CLKREF_CLK] = &gcc_hdmi_clkref_clk.clkr,
+ [GCC_UFS_CLKREF_CLK] = &gcc_ufs_clkref_clk.clkr,
+ [GCC_USB3_CLKREF_CLK] = &gcc_usb3_clkref_clk.clkr,
+ [GCC_PCIE_CLKREF_CLK] = &gcc_pcie_clkref_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_CLK] = &gcc_rx1_usb2_clkref_clk.clkr,
};
static struct gdsc *gcc_msm8998_gdscs[] = {
@@ -2742,25 +2792,114 @@ static struct gdsc *gcc_msm8998_gdscs[] = {
};
static const struct qcom_reset_map gcc_msm8998_resets[] = {
- [GCC_BLSP1_QUP1_BCR] = { 0x102400 },
- [GCC_BLSP1_QUP2_BCR] = { 0x110592 },
- [GCC_BLSP1_QUP3_BCR] = { 0x118784 },
- [GCC_BLSP1_QUP4_BCR] = { 0x126976 },
- [GCC_BLSP1_QUP5_BCR] = { 0x135168 },
- [GCC_BLSP1_QUP6_BCR] = { 0x143360 },
- [GCC_BLSP2_QUP1_BCR] = { 0x155648 },
- [GCC_BLSP2_QUP2_BCR] = { 0x163840 },
- [GCC_BLSP2_QUP3_BCR] = { 0x172032 },
- [GCC_BLSP2_QUP4_BCR] = { 0x180224 },
- [GCC_BLSP2_QUP5_BCR] = { 0x188416 },
- [GCC_BLSP2_QUP6_BCR] = { 0x196608 },
- [GCC_PCIE_0_BCR] = { 0x438272 },
- [GCC_PDM_BCR] = { 0x208896 },
- [GCC_SDCC2_BCR] = { 0x81920 },
- [GCC_SDCC4_BCR] = { 0x90112 },
- [GCC_TSIF_BCR] = { 0x221184 },
- [GCC_UFS_BCR] = { 0x479232 },
- [GCC_USB_30_BCR] = { 0x61440 },
+ [GCC_BLSP1_QUP1_BCR] = { 0x19000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x1b000 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x1d000 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x1f000 },
+ [GCC_BLSP1_QUP5_BCR] = { 0x21000 },
+ [GCC_BLSP1_QUP6_BCR] = { 0x23000 },
+ [GCC_BLSP2_QUP1_BCR] = { 0x26000 },
+ [GCC_BLSP2_QUP2_BCR] = { 0x28000 },
+ [GCC_BLSP2_QUP3_BCR] = { 0x2a000 },
+ [GCC_BLSP2_QUP4_BCR] = { 0x2c000 },
+ [GCC_BLSP2_QUP5_BCR] = { 0x2e000 },
+ [GCC_BLSP2_QUP6_BCR] = { 0x30000 },
+ [GCC_PCIE_0_BCR] = { 0x6b000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_SDCC2_BCR] = { 0x14000 },
+ [GCC_SDCC4_BCR] = { 0x16000 },
+ [GCC_TSIF_BCR] = { 0x36000 },
+ [GCC_UFS_BCR] = { 0x75000 },
+ [GCC_USB_30_BCR] = { 0xf000 },
+ [GCC_SYSTEM_NOC_BCR] = { 0x4000 },
+ [GCC_CONFIG_NOC_BCR] = { 0x5000 },
+ [GCC_AHB2PHY_EAST_BCR] = { 0x7000 },
+ [GCC_IMEM_BCR] = { 0x8000 },
+ [GCC_PIMEM_BCR] = { 0xa000 },
+ [GCC_MMSS_BCR] = { 0xb000 },
+ [GCC_QDSS_BCR] = { 0xc000 },
+ [GCC_WCSS_BCR] = { 0x11000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 },
+ [GCC_BLSP1_BCR] = { 0x17000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x1a000 },
+ [GCC_BLSP1_UART2_BCR] = { 0x1c000 },
+ [GCC_BLSP1_UART3_BCR] = { 0x1e000 },
+ [GCC_CM_PHY_REFGEN1_BCR] = { 0x22000 },
+ [GCC_CM_PHY_REFGEN2_BCR] = { 0x24000 },
+ [GCC_BLSP2_BCR] = { 0x25000 },
+ [GCC_BLSP2_UART1_BCR] = { 0x27000 },
+ [GCC_BLSP2_UART2_BCR] = { 0x29000 },
+ [GCC_BLSP2_UART3_BCR] = { 0x2b000 },
+ [GCC_SRAM_SENSOR_BCR] = { 0x2d000 },
+ [GCC_PRNG_BCR] = { 0x34000 },
+ [GCC_TSIF_0_RESET] = { 0x36024 },
+ [GCC_TSIF_1_RESET] = { 0x36028 },
+ [GCC_TCSR_BCR] = { 0x37000 },
+ [GCC_BOOT_ROM_BCR] = { 0x38000 },
+ [GCC_MSG_RAM_BCR] = { 0x39000 },
+ [GCC_TLMM_BCR] = { 0x3a000 },
+ [GCC_MPM_BCR] = { 0x3b000 },
+ [GCC_SEC_CTRL_BCR] = { 0x3d000 },
+ [GCC_SPMI_BCR] = { 0x3f000 },
+ [GCC_SPDM_BCR] = { 0x40000 },
+ [GCC_CE1_BCR] = { 0x41000 },
+ [GCC_BIMC_BCR] = { 0x44000 },
+ [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x49000 },
+ [GCC_SNOC_BUS_TIMEOUT1_BCR] = { 0x49008 },
+ [GCC_SNOC_BUS_TIMEOUT3_BCR] = { 0x49010 },
+ [GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR] = { 0x49018 },
+ [GCC_PNOC_BUS_TIMEOUT0_BCR] = { 0x4a000 },
+ [GCC_CNOC_PERIPH_BUS_TIMEOUT1_BCR] = { 0x4a004 },
+ [GCC_CNOC_PERIPH_BUS_TIMEOUT2_BCR] = { 0x4a00c },
+ [GCC_CNOC_BUS_TIMEOUT0_BCR] = { 0x4b000 },
+ [GCC_CNOC_BUS_TIMEOUT1_BCR] = { 0x4b008 },
+ [GCC_CNOC_BUS_TIMEOUT2_BCR] = { 0x4b010 },
+ [GCC_CNOC_BUS_TIMEOUT3_BCR] = { 0x4b018 },
+ [GCC_CNOC_BUS_TIMEOUT4_BCR] = { 0x4b020 },
+ [GCC_CNOC_BUS_TIMEOUT5_BCR] = { 0x4b028 },
+ [GCC_CNOC_BUS_TIMEOUT6_BCR] = { 0x4b030 },
+ [GCC_CNOC_BUS_TIMEOUT7_BCR] = { 0x4b038 },
+ [GCC_APB2JTAG_BCR] = { 0x4c000 },
+ [GCC_RBCPR_CX_BCR] = { 0x4e000 },
+ [GCC_RBCPR_MX_BCR] = { 0x4f000 },
+ [GCC_USB3_PHY_BCR] = { 0x50020 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x50024 },
+ [GCC_USB3_DP_PHY_BCR] = { 0x50028 },
+ [GCC_SSC_BCR] = { 0x63000 },
+ [GCC_SSC_RESET] = { 0x63020 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x6c01c },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x6f00c },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f010 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x6f014 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_SPSS_BCR] = { 0x72000 },
+ [GCC_OBT_ODT_BCR] = { 0x73000 },
+ [GCC_VS_BCR] = { 0x7a000 },
+ [GCC_MSS_VS_RESET] = { 0x7a100 },
+ [GCC_GPU_VS_RESET] = { 0x7a104 },
+ [GCC_APC0_VS_RESET] = { 0x7a108 },
+ [GCC_APC1_VS_RESET] = { 0x7a10c },
+ [GCC_CNOC_BUS_TIMEOUT8_BCR] = { 0x80000 },
+ [GCC_CNOC_BUS_TIMEOUT9_BCR] = { 0x80008 },
+ [GCC_CNOC_BUS_TIMEOUT10_BCR] = { 0x80010 },
+ [GCC_CNOC_BUS_TIMEOUT11_BCR] = { 0x80018 },
+ [GCC_CNOC_BUS_TIMEOUT12_BCR] = { 0x80020 },
+ [GCC_CNOC_BUS_TIMEOUT13_BCR] = { 0x80028 },
+ [GCC_CNOC_BUS_TIMEOUT14_BCR] = { 0x80030 },
+ [GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR] = { 0x80038 },
+ [GCC_AGGRE1_NOC_BCR] = { 0x82000 },
+ [GCC_AGGRE2_NOC_BCR] = { 0x83000 },
+ [GCC_DCC_BCR] = { 0x84000 },
+ [GCC_QREFS_VBG_CAL_BCR] = { 0x88028 },
+ [GCC_IPA_BCR] = { 0x89000 },
+ [GCC_GLM_BCR] = { 0x8b000 },
+ [GCC_SKL_BCR] = { 0x8c000 },
+ [GCC_MSMPU_BCR] = { 0x8d000 },
};
static const struct regmap_config gcc_msm8998_regmap_config = {
@@ -2798,6 +2937,10 @@ static int gcc_msm8998_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_clk_hw_register(&pdev->dev, &xo.hw);
+ if (ret)
+ return ret;
+
return qcom_cc_really_probe(pdev, &gcc_msm8998_desc, regmap);
}
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index ef1b267cb058..64da032bb9ed 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -297,7 +297,7 @@ static struct clk_alpha_pll gpll0_out_main = {
.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
.parent_names = (const char *[])
- { "gpll0_sleep_clk_src" },
+ { "cxo" },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index f133b7f5652f..c782e62dd98b 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3153,6 +3153,37 @@ static struct clk_branch gcc_cpuss_gnoc_clk = {
},
};
+/* TODO: Remove after DTS updated to protect these */
+#ifdef CONFIG_SDM_LPASSCC_845
+static struct clk_branch gcc_lpass_q6_axi_clk = {
+ .halt_reg = 0x47000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_q6_axi_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_lpass_sway_clk = {
+ .halt_reg = 0x47008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_lpass_sway_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+#endif
+
static struct gdsc pcie_0_gdsc = {
.gdscr = 0x6b004,
.pd = {
@@ -3453,6 +3484,10 @@ static struct clk_regmap *gcc_sdm845_clocks[] = {
[GCC_QSPI_CORE_CLK_SRC] = &gcc_qspi_core_clk_src.clkr,
[GCC_QSPI_CORE_CLK] = &gcc_qspi_core_clk.clkr,
[GCC_QSPI_CNOC_PERIPH_AHB_CLK] = &gcc_qspi_cnoc_periph_ahb_clk.clkr,
+#ifdef CONFIG_SDM_LPASSCC_845
+ [GCC_LPASS_Q6_AXI_CLK] = &gcc_lpass_q6_axi_clk.clkr,
+ [GCC_LPASS_SWAY_CLK] = &gcc_lpass_sway_clk.clkr,
+#endif
};
static const struct qcom_reset_map gcc_sdm845_resets[] = {
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index a077133c7ce3..dd63aa36b092 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -350,8 +350,10 @@ static int gdsc_init(struct gdsc *sc)
else
gdsc_clear_mem_on(sc);
- sc->pd.power_off = gdsc_disable;
- sc->pd.power_on = gdsc_enable;
+ if (!sc->pd.power_off)
+ sc->pd.power_off = gdsc_disable;
+ if (!sc->pd.power_on)
+ sc->pd.power_on = gdsc_enable;
pm_genpd_init(&sc->pd, NULL, !on);
return 0;
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
new file mode 100644
index 000000000000..e40efba1bf7d
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-sdm845.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "gdsc.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK 0xf
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xf
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+#define CLK_DIS_WAIT_SHIFT 12
+#define CLK_DIS_WAIT_MASK (0xf << CLK_DIS_WAIT_SHIFT)
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL1_OUT_EVEN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_ODD,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpu_cc_pll1",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src",
+ "core_bi_pll_test_se",
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1a,
+ .alpha = 0xaab,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_names = gpu_cc_parent_names_0,
+ .num_parents = 5,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_gmu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gpu_cx_gdsc = {
+ .gdscr = 0x106c,
+ .gds_hw_ctrl = 0x1540,
+ .pd = {
+ .name = "gpu_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+/*
+ * On SDM845 the GPU GX domain is *almost* entirely controlled by the GMU
+ * running in the CX domain so the CPU doesn't need to know anything about the
+ * GX domain EXCEPT....
+ *
+ * Hardware constraints dictate that the GX be powered down before the CX. If
+ * the GMU crashes it could leave the GX on. In order to successfully bring back
+ * the device the CPU needs to disable the GX headswitch. There being no sane
+ * way to reach in and touch that register from deep inside the GPU driver we
+ * need to set up the infrastructure to be able to ensure that the GPU can
+ * ensure that the GX is off during this super special case. We do this by
+ * defining a GX gdsc with a dummy enable function and a "default" disable
+ * function.
+ *
+ * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
+ * driver. During power up, nothing will happen from the CPU (and the GMU will
+ * power up normally but during power down this will ensure that the GX domain
+ * is *really* off - this gives us a semi standard way of doing what we need.
+ */
+static int gx_gdsc_enable(struct generic_pm_domain *domain)
+{
+ /* Do nothing but give genpd the impression that we were successful */
+ return 0;
+}
+
+static struct gdsc gpu_gx_gdsc = {
+ .gdscr = 0x100c,
+ .clamp_io_ctrl = 0x1508,
+ .pd = {
+ .name = "gpu_gx_gdsc",
+ .power_on = gx_gdsc_enable,
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR,
+};
+
+static struct clk_regmap *gpu_cc_sdm845_clocks[] = {
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+};
+
+static struct gdsc *gpu_cc_sdm845_gdscs[] = {
+ [GPU_CX_GDSC] = &gpu_cx_gdsc,
+ [GPU_GX_GDSC] = &gpu_gx_gdsc,
+};
+
+static const struct regmap_config gpu_cc_sdm845_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_sdm845_desc = {
+ .config = &gpu_cc_sdm845_regmap_config,
+ .clks = gpu_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_sdm845_clocks),
+ .gdscs = gpu_cc_sdm845_gdscs,
+ .num_gdscs = ARRAY_SIZE(gpu_cc_sdm845_gdscs),
+};
+
+static const struct of_device_id gpu_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_sdm845_match_table);
+
+static int gpu_cc_sdm845_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ unsigned int value, mask;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_sdm845_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_fabia_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /*
+ * Configure gpu_cc_cx_gmu_clk with recommended
+ * wakeup/sleep settings
+ */
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, 0x1098, mask, value);
+
+ /* Configure clk_dis_wait for gpu_cx_gdsc */
+ regmap_update_bits(regmap, 0x106c, CLK_DIS_WAIT_MASK,
+ 8 << CLK_DIS_WAIT_SHIFT);
+
+ return qcom_cc_really_probe(pdev, &gpu_cc_sdm845_desc, regmap);
+}
+
+static struct platform_driver gpu_cc_sdm845_driver = {
+ .probe = gpu_cc_sdm845_probe,
+ .driver = {
+ .name = "sdm845-gpucc",
+ .of_match_table = gpu_cc_sdm845_match_table,
+ },
+};
+
+static int __init gpu_cc_sdm845_init(void)
+{
+ return platform_driver_register(&gpu_cc_sdm845_driver);
+}
+subsys_initcall(gpu_cc_sdm845_init);
+
+static void __exit gpu_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_sdm845_driver);
+}
+module_exit(gpu_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI GPUCC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscc-sdm845.c b/drivers/clk/qcom/lpasscc-sdm845.c
new file mode 100644
index 000000000000..e246b99dfbc6
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sdm845.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpass-sdm845.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+
+static struct clk_branch lpass_q6ss_ahbm_aon_clk = {
+ .halt_reg = 0x12000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x12000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbm_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_q6ss_ahbs_aon_clk = {
+ .halt_reg = 0x1f000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x1f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbs_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_core_clk = {
+ .halt_reg = 0x20,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_xo_clk = {
+ .halt_reg = 0x38,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x38,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_sleep_clk = {
+ .halt_reg = 0x3c,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct regmap_config lpass_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static struct clk_regmap *lpass_cc_sdm845_clocks[] = {
+ [LPASS_Q6SS_AHBM_AON_CLK] = &lpass_q6ss_ahbm_aon_clk.clkr,
+ [LPASS_Q6SS_AHBS_AON_CLK] = &lpass_q6ss_ahbs_aon_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_cc_sdm845_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_cc_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_sdm845_clocks),
+};
+
+static struct clk_regmap *lpass_qdsp6ss_sdm845_clocks[] = {
+ [LPASS_QDSP6SS_XO_CLK] = &lpass_qdsp6ss_xo_clk.clkr,
+ [LPASS_QDSP6SS_SLEEP_CLK] = &lpass_qdsp6ss_sleep_clk.clkr,
+ [LPASS_QDSP6SS_CORE_CLK] = &lpass_qdsp6ss_core_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_qdsp6ss_sdm845_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_qdsp6ss_sdm845_clocks,
+ .num_clks = ARRAY_SIZE(lpass_qdsp6ss_sdm845_clocks),
+};
+
+static int lpass_clocks_sdm845_probe(struct platform_device *pdev, int index,
+ const struct qcom_cc_desc *desc)
+{
+ struct regmap *regmap;
+ struct resource *res;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, desc->config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return qcom_cc_really_probe(pdev, desc, regmap);
+}
+
+static int lpass_cc_sdm845_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ int ret;
+
+ lpass_regmap_config.name = "cc";
+ desc = &lpass_cc_sdm845_desc;
+
+ ret = lpass_clocks_sdm845_probe(pdev, 0, desc);
+ if (ret)
+ return ret;
+
+ lpass_regmap_config.name = "qdsp6ss";
+ desc = &lpass_qdsp6ss_sdm845_desc;
+
+ return lpass_clocks_sdm845_probe(pdev, 1, desc);
+}
+
+static const struct of_device_id lpass_cc_sdm845_match_table[] = {
+ { .compatible = "qcom,sdm845-lpasscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_cc_sdm845_match_table);
+
+static struct platform_driver lpass_cc_sdm845_driver = {
+ .probe = lpass_cc_sdm845_probe,
+ .driver = {
+ .name = "sdm845-lpasscc",
+ .of_match_table = lpass_cc_sdm845_match_table,
+ },
+};
+
+static int __init lpass_cc_sdm845_init(void)
+{
+ return platform_driver_register(&lpass_cc_sdm845_driver);
+}
+subsys_initcall(lpass_cc_sdm845_init);
+
+static void __exit lpass_cc_sdm845_exit(void)
+{
+ platform_driver_unregister(&lpass_cc_sdm845_driver);
+}
+module_exit(lpass_cc_sdm845_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_CC SDM845 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 57c934164306..e98a9f5b3c90 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -274,7 +274,7 @@ struct clk * __init cpg_div6_register(const char *name,
/* Register the clock. */
init.name = name;
init.ops = &cpg_div6_clock_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.parent_names = parent_names;
init.num_parents = valid_parents;
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 1c1768c2cc82..92ece221b0d4 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -158,7 +158,7 @@ static struct clk * __init cpg_mstp_clock_register(const char *name,
init.name = name;
init.ops = &cpg_mstp_clock_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
/* INTC-SYS is the module clock of the GIC, and must not be disabled */
if (!strcmp(name, "intc-sys")) {
pr_debug("MSTP %s setting CLK_IS_CRITICAL\n", name);
@@ -280,7 +280,7 @@ int cpg_mstp_attach_dev(struct generic_pm_domain *unused, struct device *dev)
goto found;
/* BSC on r8a73a4/sh73a0 uses zb_clk instead of an mstp clock */
- if (!strcmp(clkspec.np->name, "zb_clk"))
+ if (of_node_name_eq(clkspec.np, "zb_clk"))
goto found;
of_node_put(clkspec.np);
diff --git a/drivers/clk/renesas/r7s9210-cpg-mssr.c b/drivers/clk/renesas/r7s9210-cpg-mssr.c
index 5135f13ec628..57c49fe88295 100644
--- a/drivers/clk/renesas/r7s9210-cpg-mssr.c
+++ b/drivers/clk/renesas/r7s9210-cpg-mssr.c
@@ -87,6 +87,8 @@ static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = {
DEF_MOD_STB("scif1", 46, R7S9210_CLK_P1C),
DEF_MOD_STB("scif0", 47, R7S9210_CLK_P1C),
+ DEF_MOD_STB("usb1", 60, R7S9210_CLK_B),
+ DEF_MOD_STB("usb0", 61, R7S9210_CLK_B),
DEF_MOD_STB("ether1", 64, R7S9210_CLK_B),
DEF_MOD_STB("ether0", 65, R7S9210_CLK_B),
@@ -98,6 +100,11 @@ static const struct mssr_mod_clk r7s9210_mod_clks[] __initconst = {
DEF_MOD_STB("spi2", 95, R7S9210_CLK_P1),
DEF_MOD_STB("spi1", 96, R7S9210_CLK_P1),
DEF_MOD_STB("spi0", 97, R7S9210_CLK_P1),
+
+ DEF_MOD_STB("sdhi11", 100, R7S9210_CLK_B),
+ DEF_MOD_STB("sdhi10", 101, R7S9210_CLK_B),
+ DEF_MOD_STB("sdhi01", 102, R7S9210_CLK_B),
+ DEF_MOD_STB("sdhi00", 103, R7S9210_CLK_B),
};
/* The clock dividers in the table vary based on DT and register settings */
@@ -148,7 +155,7 @@ static void __init r7s9210_update_clk_table(struct clk *extal_clk,
}
}
-struct clk * __init rza2_cpg_clk_register(struct device *dev,
+static struct clk * __init rza2_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base,
struct raw_notifier_head *notifiers)
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index b0da34217bdf..10e852518870 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -100,6 +100,7 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A774A1_CLK_CPEX, CLK_EXTAL, 2, 1),
DEF_DIV6P1("csi0", R8A774A1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
DEF_DIV6P1("mso", R8A774A1_CLK_MSO, CLK_PLL1_DIV4, 0x014),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index 119c02440726..86842c9fd314 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -104,6 +104,7 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
DEF_FIXED("cp", R8A7795_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A7795_CLK_CPEX, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A7795_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A7795_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 10567386e6dd..12c455859f2c 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -103,6 +103,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A7796_CLK_CPEX, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A7796_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A7796_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index 1fcc411502da..eb1cca58a1e1 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -100,6 +100,7 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A77965_CLK_CPEX, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A77965_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("csi0", R8A77965_CLK_CSI0, CLK_PLL1_DIV4, 0x00c),
diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c
index 2015e45543e9..cbed3769a100 100644
--- a/drivers/clk/renesas/r8a77970-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c
@@ -91,8 +91,12 @@ static const struct cpg_core_clk r8a77970_core_clks[] __initconst = {
CLK_PLL1_DIV2),
DEF_BASE("sd0", R8A77970_CLK_SD0, CLK_TYPE_R8A77970_SD0, CLK_PLL1_DIV2),
+ DEF_FIXED("rpc", R8A77970_CLK_RPC, CLK_PLL1_DIV2, 5, 1),
+ DEF_FIXED("rpcd2", R8A77970_CLK_RPCD2, CLK_PLL1_DIV2, 10, 1),
+
DEF_FIXED("cl", R8A77970_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A77970_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A77970_CLK_CPEX, CLK_EXTAL, 2, 1),
DEF_DIV6P1("canfd", R8A77970_CLK_CANFD, CLK_PLL1_DIV4, 0x244),
DEF_DIV6P1("mso", R8A77970_CLK_MSO, CLK_PLL1_DIV4, 0x014),
@@ -152,6 +156,7 @@ static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = {
DEF_MOD("gpio1", 911, R8A77970_CLK_CP),
DEF_MOD("gpio0", 912, R8A77970_CLK_CP),
DEF_MOD("can-fd", 914, R8A77970_CLK_S2D2),
+ DEF_MOD("rpc-if", 917, R8A77970_CLK_RPC),
DEF_MOD("i2c4", 927, R8A77970_CLK_S2D2),
DEF_MOD("i2c3", 928, R8A77970_CLK_S2D2),
DEF_MOD("i2c2", 929, R8A77970_CLK_S2D2),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index 9eb80180eea0..9a278c75c918 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -183,8 +183,8 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = {
DEF_MOD("ehci0", 703, R8A77990_CLK_S3D4),
DEF_MOD("hsusb", 704, R8A77990_CLK_S3D4),
DEF_MOD("csi40", 716, R8A77990_CLK_CSI0),
- DEF_MOD("du1", 723, R8A77990_CLK_S2D1),
- DEF_MOD("du0", 724, R8A77990_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A77990_CLK_S1D1),
+ DEF_MOD("du0", 724, R8A77990_CLK_S1D1),
DEF_MOD("lvds", 727, R8A77990_CLK_S2D1),
DEF_MOD("vin5", 806, R8A77990_CLK_S1D2),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 47e60e3dbe05..eee3874865a9 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -22,7 +22,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A77995_CLK_CP,
+ LAST_DT_CORE_CLK = R8A77995_CLK_CPEX,
/* External Input Clocks */
CLK_EXTAL,
@@ -42,7 +42,6 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
- CLK_SSPSRC,
CLK_RINT,
CLK_OCO,
@@ -93,6 +92,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A77995_CLK_CPEX, CLK_EXTAL, 4, 1),
DEF_DIV6_RO("osc", R8A77995_CLK_OSC, CLK_EXTAL, CPG_RCKCR, 8),
@@ -146,12 +146,9 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = {
DEF_MOD("vspbs", 627, R8A77995_CLK_S0D1),
DEF_MOD("ehci0", 703, R8A77995_CLK_S3D2),
DEF_MOD("hsusb", 704, R8A77995_CLK_S3D2),
- DEF_MOD("du1", 723, R8A77995_CLK_S2D1),
- DEF_MOD("du0", 724, R8A77995_CLK_S2D1),
+ DEF_MOD("du1", 723, R8A77995_CLK_S1D1),
+ DEF_MOD("du0", 724, R8A77995_CLK_S1D1),
DEF_MOD("lvds", 727, R8A77995_CLK_S2D1),
- DEF_MOD("vin7", 804, R8A77995_CLK_S1D2),
- DEF_MOD("vin6", 805, R8A77995_CLK_S1D2),
- DEF_MOD("vin5", 806, R8A77995_CLK_S1D2),
DEF_MOD("vin4", 807, R8A77995_CLK_S1D2),
DEF_MOD("etheravb", 812, R8A77995_CLK_S3D2),
DEF_MOD("imr0", 823, R8A77995_CLK_S1D2),
@@ -194,14 +191,14 @@ static const unsigned int r8a77995_crit_mod_clks[] __initconst = {
* MD19 EXTAL (MHz) PLL0 PLL1 PLL3
*--------------------------------------------------------------------
* 0 48 x 1 x250/4 x100/3 x100/3
- * 1 48 x 1 x250/4 x100/3 x116/6
+ * 1 48 x 1 x250/4 x100/3 x58/3
*/
#define CPG_PLL_CONFIG_INDEX(md) (((md) & BIT(19)) >> 19)
static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[2] __initconst = {
/* EXTAL div PLL1 mult/div PLL3 mult/div */
{ 1, 100, 3, 100, 3, },
- { 1, 100, 3, 116, 6, },
+ { 1, 100, 3, 58, 3, },
};
static int __init r8a77995_cpg_mssr_init(struct device *dev)
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 6d2b56891559..658cb11b6f55 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -424,7 +424,7 @@ r9a06g032_register_gate(struct r9a06g032_priv *clocks,
init.name = desc->name;
init.ops = &r9a06g032_clk_gate_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
@@ -595,7 +595,7 @@ r9a06g032_register_div(struct r9a06g032_priv *clocks,
init.name = desc->name;
init.ops = &r9a06g032_clk_div_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = parent_name ? &parent_name : NULL;
init.num_parents = parent_name ? 1 : 0;
@@ -683,7 +683,7 @@ r9a06g032_register_bitsel(struct r9a06g032_priv *clocks,
init.name = desc->name;
init.ops = &clk_bitselect_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = names;
init.num_parents = 2;
@@ -777,7 +777,7 @@ r9a06g032_register_dualgate(struct r9a06g032_priv *clocks,
init.name = desc->name;
init.ops = &r9a06g032_clk_dualgate_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &parent_name;
init.num_parents = 1;
g->hw.init = &init;
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 4ba38f98cc7b..be2ccbd6d623 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -232,16 +232,20 @@ struct sd_clock {
* sd_srcfc sd_fc div
* stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
*-------------------------------------------------------------------
- * 0 0 0 (1) 1 (4) 4
- * 0 0 1 (2) 1 (4) 8
- * 1 0 2 (4) 1 (4) 16
- * 1 0 3 (8) 1 (4) 32
+ * 0 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
+ * 0 0 1 (2) 1 (4) 8 : SDR50
+ * 1 0 2 (4) 1 (4) 16 : HS / SDR25
+ * 1 0 3 (8) 1 (4) 32 : NS / SDR12
* 1 0 4 (16) 1 (4) 64
* 0 0 0 (1) 0 (2) 2
- * 0 0 1 (2) 0 (2) 4
+ * 0 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
* 1 0 2 (4) 0 (2) 8
* 1 0 3 (8) 0 (2) 16
* 1 0 4 (16) 0 (2) 32
+ *
+ * NOTE: There is a quirk option to ignore the first row of the dividers
+ * table when searching for suitable settings. This is because HS400 on
+ * early ES versions of H3 and M3-W requires a specific setting to work.
*/
static const struct sd_div_table cpg_sd_div_table[] = {
/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
@@ -352,6 +356,12 @@ static const struct clk_ops cpg_sd_clock_ops = {
.set_rate = cpg_sd_clock_set_rate,
};
+static u32 cpg_quirks __initdata;
+
+#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
+#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
+#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
+
static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
void __iomem *base, const char *parent_name,
struct raw_notifier_head *notifiers)
@@ -360,7 +370,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
struct sd_clock *clock;
struct clk *clk;
unsigned int i;
- u32 sd_fc;
+ u32 val;
clock = kzalloc(sizeof(*clock), GFP_KERNEL);
if (!clock)
@@ -368,7 +378,7 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
init.name = core->name;
init.ops = &cpg_sd_clock_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -377,17 +387,14 @@ static struct clk * __init cpg_sd_clk_register(const struct cpg_core_clk *core,
clock->div_table = cpg_sd_div_table;
clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
- sd_fc = readl(clock->csn.reg) & CPG_SD_FC_MASK;
- for (i = 0; i < clock->div_num; i++)
- if (sd_fc == (clock->div_table[i].val & CPG_SD_FC_MASK))
- break;
-
- if (WARN_ON(i >= clock->div_num)) {
- kfree(clock);
- return ERR_PTR(-EINVAL);
+ if (cpg_quirks & SD_SKIP_FIRST) {
+ clock->div_table++;
+ clock->div_num--;
}
- clock->cur_div_idx = i;
+ val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
+ val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
+ writel(val, clock->csn.reg);
clock->div_max = clock->div_table[0].div;
clock->div_min = clock->div_max;
@@ -412,23 +419,27 @@ free_clock:
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
-static u32 cpg_quirks __initdata;
-
-#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
-#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
.soc_id = "r8a7795", .revision = "ES1.0",
- .data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
+ .data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST),
},
{
.soc_id = "r8a7795", .revision = "ES1.*",
- .data = (void *)RCKCR_CKSEL,
+ .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
+ },
+ {
+ .soc_id = "r8a7795", .revision = "ES2.0",
+ .data = (void *)SD_SKIP_FIRST,
},
{
.soc_id = "r8a7796", .revision = "ES1.0",
- .data = (void *)RCKCR_CKSEL,
+ .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
+ },
+ {
+ .soc_id = "r8a7796", .revision = "ES1.1",
+ .data = (void *)SD_SKIP_FIRST,
},
{ /* sentinel */ }
};
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index f7bb817420b4..30df0dc853f0 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -412,7 +412,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
init.name = mod->name;
init.ops = &cpg_mstp_clock_ops;
- init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
+ init.flags = CLK_SET_RATE_PARENT;
for (i = 0; i < info->num_crit_mod_clks; i++)
if (id == info->crit_mod_clks[i]) {
dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n",
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index fa25e35ce7d5..7ea20341e870 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -362,8 +362,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(2), 5, GFLAGS),
MUX(SCLK_MAC, "sclk_macref", mux_sclk_macref_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(21), 4, 1, MFLAGS),
- GATE(0, "sclk_mac_lbtest", "sclk_macref",
- RK2928_CLKGATE_CON(2), 12, 0, GFLAGS),
+ GATE(0, "sclk_mac_lbtest", "sclk_macref", 0,
+ RK2928_CLKGATE_CON(2), 12, GFLAGS),
COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
@@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 13, GFLAGS),
- COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(9), 0,
RK2928_CLKGATE_CON(0), 14, GFLAGS,
&common_spdif_fracmux),
@@ -391,8 +391,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
* Clock-Architecture Diagram 4
*/
- GATE(SCLK_SMC, "sclk_smc", "hclk_peri",
- RK2928_CLKGATE_CON(2), 4, 0, GFLAGS),
+ GATE(SCLK_SMC, "sclk_smc", "hclk_peri", 0,
+ RK2928_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_NOMUX(SCLK_SPI0, "sclk_spi0", "pclk_peri", 0,
RK2928_CLKSEL_CON(25), 0, 7, DFLAGS,
@@ -757,7 +757,8 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"hclk_peri",
"pclk_cpu",
"pclk_peri",
- "hclk_cpubus"
+ "hclk_cpubus",
+ "hclk_vio_bus",
};
static struct rockchip_clk_provider *__init rk3188_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
index 2c5426607790..faa94adb2a37 100644
--- a/drivers/clk/rockchip/clk-rk3328.c
+++ b/drivers/clk/rockchip/clk-rk3328.c
@@ -392,7 +392,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
RK3328_CLKGATE_CON(1), 5, GFLAGS,
&rk3328_i2s1_fracmux),
GATE(SCLK_I2S1, "clk_i2s1", "i2s1_pre", CLK_SET_RATE_PARENT,
- RK3328_CLKGATE_CON(0), 6, GFLAGS),
+ RK3328_CLKGATE_CON(1), 6, GFLAGS),
COMPOSITE_NODIV(SCLK_I2S1_OUT, "i2s1_out", mux_i2s1out_p, 0,
RK3328_CLKSEL_CON(8), 12, 1, MFLAGS,
RK3328_CLKGATE_CON(1), 7, GFLAGS),
@@ -804,7 +804,7 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = {
GATE(PCLK_USB3_GRF, "pclk_usb3_grf", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 2, GFLAGS),
GATE(PCLK_USB2_GRF, "pclk_usb2_grf", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 14, GFLAGS),
GATE(0, "pclk_ddrphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 13, GFLAGS),
- GATE(0, "pclk_acodecphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 5, GFLAGS),
+ GATE(PCLK_ACODECPHY, "pclk_acodecphy", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(17), 5, GFLAGS),
GATE(PCLK_HDMIPHY, "pclk_hdmiphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 7, GFLAGS),
GATE(0, "pclk_vdacphy", "pclk_phy_pre", CLK_IGNORE_UNUSED, RK3328_CLKGATE_CON(17), 8, GFLAGS),
GATE(0, "pclk_phy_niu", "pclk_phy_pre", 0, RK3328_CLKGATE_CON(15), 15, GFLAGS),
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
index 0d92f3e5e3d9..82f8ae22fd34 100644
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ b/drivers/clk/samsung/clk-s3c2410-dclk.c
@@ -105,7 +105,7 @@ static struct clk_hw *s3c24xx_register_clkout(struct device *dev,
init.name = name;
init.ops = &s3c24xx_clkout_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 918ba3164da9..ca8d5350f94e 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -210,7 +210,7 @@ static struct clk *clk_register_flexgen(const char *name,
init.name = name;
init.ops = &flexgen_ops;
- init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE | flexgen_flags;
+ init.flags = CLK_GET_RATE_NOCACHE | flexgen_flags;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index cfa000007622..946ceb14dbf7 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -404,7 +404,7 @@ static struct clk * __init st_clk_register_quadfs_pll(
init.name = name;
init.ops = quadfs->pll_ops;
- init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
+ init.flags = CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
@@ -843,7 +843,7 @@ static struct clk * __init st_clk_register_quadfs_fsynth(
init.name = name;
init.ops = &st_quadfs_ops;
- init.flags = flags | CLK_GET_RATE_NOCACHE | CLK_IS_BASIC;
+ init.flags = flags | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 7a7106dc80bf..6930348ce843 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -613,7 +613,7 @@ static struct clk * __init clkgen_pll_register(const char *parent_name,
init.name = clk_name;
init.ops = pll_data->ops;
- init.flags = pll_flags | CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
+ init.flags = pll_flags | CLK_GET_RATE_NOCACHE;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 826674d090fd..ecd1b6b2bfaf 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -6,6 +6,11 @@ config SUNXI_CCU
if SUNXI_CCU
+config SUNIV_F1C100S_CCU
+ bool "Support for the Allwinner newer F1C100s CCU"
+ default MACH_SUNIV
+ depends on MACH_SUNIV || COMPILE_TEST
+
config SUN50I_A64_CCU
bool "Support for the Allwinner A64 CCU"
default ARM64 && ARCH_SUNXI
@@ -63,6 +68,7 @@ config SUN8I_V3S_CCU
config SUN8I_DE2_CCU
bool "Support for the Allwinner SoCs DE2 CCU"
+ default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
config SUN8I_R40_CCU
bool "Support for the Allwinner R40 CCU"
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 49454700f2e5..4c7bee883f2f 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -21,6 +21,7 @@ obj-y += ccu_nm.o
obj-y += ccu_mp.o
# SoC support
+obj-$(CONFIG_SUNIV_F1C100S_CCU) += ccu-suniv-f1c100s.o
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 5f80eb018014..932836d26e2b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -51,18 +51,29 @@ static struct ccu_nkmp pll_cpux_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN50I_A64_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK_MIN_MAX(pll_video0_clk, "pll-video0",
"osc24M", 0x010,
@@ -162,7 +173,12 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_gpu_clk, "pll-gpu",
#define SUN50I_A64_PLL_MIPI_REG 0x040
static struct ccu_nkm pll_mipi_clk = {
- .enable = BIT(31),
+ /*
+ * The bit 23 and 22 are called "LDO{1,2}_EN" on the SoC's
+ * user manual, and by experiments the PLL doesn't work without
+ * these bits toggled.
+ */
+ .enable = BIT(31) | BIT(23) | BIT(22),
.lock = BIT(28),
.n = _SUNXI_CCU_MULT(8, 4),
.k = _SUNXI_CCU_MULT_MIN(4, 2, 2),
@@ -554,7 +570,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
- 0x13c, 16, 3, BIT(31), 0);
+ 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
0x140, BIT(31), CLK_SET_RATE_PARENT);
@@ -581,7 +597,7 @@ static const char * const dsi_dphy_parents[] = { "pll-video0", "pll-periph0" };
static const u8 dsi_dphy_table[] = { 0, 2, };
static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(dsi_dphy_clk, "dsi-dphy",
dsi_dphy_parents, dsi_dphy_table,
- 0x168, 0, 4, 8, 2, BIT(31), CLK_SET_RATE_PARENT);
+ 0x168, 0, 4, 8, 2, BIT(15), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT);
@@ -589,9 +605,9 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu",
/* Fixed Factor clocks */
static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0);
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -911,10 +927,10 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN50I_A64_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN50I_A64_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN50I_A64_PLL_AUDIO_REG);
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index 2193e1495086..139e8389615c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -120,6 +120,8 @@ static struct ccu_nm pll_video0_clk = {
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
.fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
.common = {
.reg = 0x040,
.features = CCU_FEATURE_FIXED_POSTDIV,
@@ -136,6 +138,8 @@ static struct ccu_nm pll_video1_clk = {
.n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
.m = _SUNXI_CCU_DIV(1, 1), /* input divider */
.fixed_post_div = 4,
+ .min_rate = 288000000,
+ .max_rate = 2400000000UL,
.common = {
.reg = 0x048,
.features = CCU_FEATURE_FIXED_POSTDIV,
@@ -411,7 +415,7 @@ static const char * const mmc_parents[] = { "osc24M", "pll-periph0-2x",
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
0, 4, /* M */
8, 2, /* N */
- 24, 3, /* mux */
+ 24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
@@ -419,7 +423,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830,
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
0, 4, /* M */
8, 2, /* N */
- 24, 3, /* mux */
+ 24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
@@ -427,7 +431,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834,
static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838,
0, 4, /* M */
8, 2, /* N */
- 24, 3, /* mux */
+ 24, 2, /* mux */
BIT(31), /* gate */
2, /* post-div */
0);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 13eb5b23c5e7..c7bf814dfd2b 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -51,18 +51,29 @@ static struct ccu_nkmp pll_cpux_clk = {
* the base (2x, 4x and 8x), and one variable divider (the one true
* pll audio).
*
- * We don't have any need for the variable divider for now, so we just
- * hardcode it to match with the clock names
+ * With sigma-delta modulation for fractional-N on the audio PLL,
+ * we have to use specific dividers. This means the variable divider
+ * can no longer be used, as the audio codec requests the exact clock
+ * rates we support through this mechanism. So we now hard code the
+ * variable divider to 1. This means the clock rates will no longer
+ * match the clock names.
*/
#define SUN8I_A33_PLL_AUDIO_REG 0x008
-static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
- "osc24M", 0x008,
- 8, 7, /* N */
- 0, 5, /* M */
- BIT(31), /* gate */
- BIT(28), /* lock */
- CLK_SET_RATE_UNGATE);
+static struct ccu_sdm_setting pll_audio_sdm_table[] = {
+ { .rate = 22579200, .pattern = 0xc0010d84, .m = 8, .n = 7 },
+ { .rate = 24576000, .pattern = 0xc000ac02, .m = 14, .n = 14 },
+};
+
+static SUNXI_CCU_NM_WITH_SDM_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ pll_audio_sdm_table, BIT(24),
+ 0x284, BIT(31),
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
"osc24M", 0x010,
@@ -366,10 +377,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
- 0x0b0, 16, 2, BIT(31), 0);
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
- 0x0b4, 16, 2, BIT(31), 0);
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
/* TODO: the parent for most of the USB clocks is not known */
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
@@ -446,7 +457,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
- 0x140, BIT(30), 0);
+ 0x140, BIT(30), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
@@ -576,9 +587,9 @@ static struct ccu_common *sun8i_a33_ccu_clks[] = {
&ats_clk.common,
};
-/* We hardcode the divider to 4 for now */
+/* We hardcode the divider to 1 for now */
static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
- "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
"pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
@@ -781,10 +792,10 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
return;
}
- /* Force the PLL-Audio-1x divider to 4 */
+ /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A33_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
- writel(val | (3 << 16), reg + SUN8I_A33_PLL_AUDIO_REG);
+ writel(val | (0 << 16), reg + SUN8I_A33_PLL_AUDIO_REG);
/* Force PLL-MIPI to MIPI mode */
val = readl(reg + SUN8I_A33_PLL_MIPI_REG);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index bae5ee67a797..1c9ae0a319c1 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -31,6 +31,8 @@ static SUNXI_CCU_GATE(bus_mixer1_clk, "bus-mixer1", "bus-de",
0x04, BIT(1), 0);
static SUNXI_CCU_GATE(bus_wb_clk, "bus-wb", "bus-de",
0x04, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_rot_clk, "bus-rot", "bus-de",
+ 0x04, BIT(3), 0);
static SUNXI_CCU_GATE(mixer0_clk, "mixer0", "mixer0-div",
0x00, BIT(0), CLK_SET_RATE_PARENT);
@@ -38,6 +40,8 @@ static SUNXI_CCU_GATE(mixer1_clk, "mixer1", "mixer1-div",
0x00, BIT(1), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(wb_clk, "wb", "wb-div",
0x00, BIT(2), CLK_SET_RATE_PARENT);
+static SUNXI_CCU_GATE(rot_clk, "rot", "rot-div",
+ 0x00, BIT(3), CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(mixer0_div_clk, "mixer0-div", "de", 0x0c, 0, 4,
CLK_SET_RATE_PARENT);
@@ -45,6 +49,8 @@ static SUNXI_CCU_M(mixer1_div_clk, "mixer1-div", "de", 0x0c, 4, 4,
CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(wb_div_clk, "wb-div", "de", 0x0c, 8, 4,
CLK_SET_RATE_PARENT);
+static SUNXI_CCU_M(rot_div_clk, "rot-div", "de", 0x0c, 0x0c, 4,
+ CLK_SET_RATE_PARENT);
static SUNXI_CCU_M(mixer0_div_a83_clk, "mixer0-div", "pll-de", 0x0c, 0, 4,
CLK_SET_RATE_PARENT);
@@ -53,6 +59,24 @@ static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4,
static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4,
CLK_SET_RATE_PARENT);
+static struct ccu_common *sun50i_h6_de3_clks[] = {
+ &mixer0_clk.common,
+ &mixer1_clk.common,
+ &wb_clk.common,
+
+ &bus_mixer0_clk.common,
+ &bus_mixer1_clk.common,
+ &bus_wb_clk.common,
+
+ &mixer0_div_clk.common,
+ &mixer1_div_clk.common,
+ &wb_div_clk.common,
+
+ &bus_rot_clk.common,
+ &rot_clk.common,
+ &rot_div_clk.common,
+};
+
static struct ccu_common *sun8i_a83t_de2_clks[] = {
&mixer0_clk.common,
&mixer1_clk.common,
@@ -106,7 +130,7 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = {
[CLK_MIXER1_DIV] = &mixer1_div_a83_clk.common.hw,
[CLK_WB_DIV] = &wb_div_a83_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_NUMBER_WITHOUT_ROT,
};
static struct clk_hw_onecell_data sun8i_h3_de2_hw_clks = {
@@ -123,7 +147,7 @@ static struct clk_hw_onecell_data sun8i_h3_de2_hw_clks = {
[CLK_MIXER1_DIV] = &mixer1_div_clk.common.hw,
[CLK_WB_DIV] = &wb_div_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_NUMBER_WITHOUT_ROT,
};
static struct clk_hw_onecell_data sun8i_v3s_de2_hw_clks = {
@@ -137,7 +161,27 @@ static struct clk_hw_onecell_data sun8i_v3s_de2_hw_clks = {
[CLK_MIXER0_DIV] = &mixer0_div_clk.common.hw,
[CLK_WB_DIV] = &wb_div_clk.common.hw,
},
- .num = CLK_NUMBER,
+ .num = CLK_NUMBER_WITHOUT_ROT,
+};
+
+static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = {
+ .hws = {
+ [CLK_MIXER0] = &mixer0_clk.common.hw,
+ [CLK_MIXER1] = &mixer1_clk.common.hw,
+ [CLK_WB] = &wb_clk.common.hw,
+ [CLK_ROT] = &rot_clk.common.hw,
+
+ [CLK_BUS_MIXER0] = &bus_mixer0_clk.common.hw,
+ [CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw,
+ [CLK_BUS_WB] = &bus_wb_clk.common.hw,
+ [CLK_BUS_ROT] = &bus_rot_clk.common.hw,
+
+ [CLK_MIXER0_DIV] = &mixer0_div_clk.common.hw,
+ [CLK_MIXER1_DIV] = &mixer1_div_clk.common.hw,
+ [CLK_WB_DIV] = &wb_div_clk.common.hw,
+ [CLK_ROT_DIV] = &rot_div_clk.common.hw,
+ },
+ .num = CLK_NUMBER_WITH_ROT,
};
static struct ccu_reset_map sun8i_a83t_de2_resets[] = {
@@ -156,6 +200,13 @@ static struct ccu_reset_map sun50i_a64_de2_resets[] = {
[RST_WB] = { 0x08, BIT(2) },
};
+static struct ccu_reset_map sun50i_h6_de3_resets[] = {
+ [RST_MIXER0] = { 0x08, BIT(0) },
+ [RST_MIXER1] = { 0x08, BIT(1) },
+ [RST_WB] = { 0x08, BIT(2) },
+ [RST_ROT] = { 0x08, BIT(3) },
+};
+
static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = {
.ccu_clks = sun8i_a83t_de2_clks,
.num_ccu_clks = ARRAY_SIZE(sun8i_a83t_de2_clks),
@@ -186,6 +237,16 @@ static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = {
.num_resets = ARRAY_SIZE(sun50i_a64_de2_resets),
};
+static const struct sunxi_ccu_desc sun50i_h6_de3_clk_desc = {
+ .ccu_clks = sun50i_h6_de3_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun50i_h6_de3_clks),
+
+ .hw_clks = &sun50i_h6_de3_hw_clks,
+
+ .resets = sun50i_h6_de3_resets,
+ .num_resets = ARRAY_SIZE(sun50i_h6_de3_resets),
+};
+
static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = {
.ccu_clks = sun8i_v3s_de2_clks,
.num_ccu_clks = ARRAY_SIZE(sun8i_v3s_de2_clks),
@@ -296,6 +357,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = {
.compatible = "allwinner,sun50i-h5-de2-clk",
.data = &sun50i_a64_de2_clk_desc,
},
+ {
+ .compatible = "allwinner,sun50i-h6-de3-clk",
+ .data = &sun50i_h6_de3_clk_desc,
+ },
{ }
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.h b/drivers/clk/sunxi-ng/ccu-sun8i-de2.h
index 530c006e0ae9..fc9c6b4c89a8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.h
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.h
@@ -22,7 +22,9 @@
#define CLK_MIXER0_DIV 3
#define CLK_MIXER1_DIV 4
#define CLK_WB_DIV 5
+#define CLK_ROT_DIV 11
-#define CLK_NUMBER (CLK_WB + 1)
+#define CLK_NUMBER_WITH_ROT (CLK_ROT_DIV + 1)
+#define CLK_NUMBER_WITHOUT_ROT (CLK_WB + 1)
#endif /* _CCU_SUN8I_DE2_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index eb5c608428fa..e71e2451c2e3 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -476,12 +476,12 @@ static const char * const csi_sclk_parents[] = { "pll-periph0", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(csi_sclk_clk, "csi-sclk", csi_sclk_parents,
0x134, 16, 4, 24, 3, BIT(31), 0);
-static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph0" };
+static const char * const csi_mclk_parents[] = { "osc24M", "pll-video", "pll-periph1" };
static SUNXI_CCU_M_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents,
0x134, 0, 5, 8, 3, BIT(15), 0);
static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
- 0x13c, 16, 3, BIT(31), 0);
+ 0x13c, 16, 3, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
0x140, BIT(31), CLK_SET_RATE_PARENT);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 582ebd41d20d..a22d11aa38ba 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -1284,6 +1284,9 @@ static struct regmap_config sun8i_r40_ccu_regmap_config = {
.writeable_reg = sun8i_r40_ccu_regmap_accessible_reg,
};
+#define SUN8I_R40_SYS_32K_CLK_REG 0x310
+#define SUN8I_R40_SYS_32K_CLK_KEY (0x16AA << 16)
+
static int sun8i_r40_ccu_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -1312,6 +1315,14 @@ static int sun8i_r40_ccu_probe(struct platform_device *pdev)
val &= ~GENMASK(25, 20);
writel(val, reg + SUN8I_R40_USB_CLK_REG);
+ /*
+ * Force SYS 32k (otherwise known as LOSC throughout the CCU)
+ * clock parent to LOSC output from RTC module instead of the
+ * CCU's internal RC oscillator divided output.
+ */
+ writel(SUN8I_R40_SYS_32K_CLK_KEY | BIT(8),
+ reg + SUN8I_R40_SYS_32K_CLK_REG);
+
regmap = devm_regmap_init_mmio(&pdev->dev, reg,
&sun8i_r40_ccu_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
new file mode 100644
index 000000000000..a09dfbe36402
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016 Icenowy Zheng <icenowy@aosc.io>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+#include "ccu_phase.h"
+
+#include "ccu-suniv-f1c100s.h"
+
+static struct ccu_nkmp pll_cpu_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .m = _SUNXI_CCU_DIV(0, 2),
+ /* MAX is guessed by the BSP table */
+ .p = _SUNXI_CCU_DIV_MAX(16, 2, 4),
+
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT("pll-cpu", "osc24M",
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * The Audio PLL is supposed to have 4 outputs: 3 fixed factors from
+ * the base (2x, 4x and 8x), and one variable divider (the one true
+ * pll audio).
+ *
+ * We don't have any need for the variable divider for now, so we just
+ * hardcode it to match with the clock names
+ */
+#define SUNIV_PLL_AUDIO_REG 0x008
+
+static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_audio_base_clk, "pll-audio-base",
+ "osc24M", 0x008,
+ 8, 7, /* N */
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_video_clk, "pll-video",
+ "osc24M", 0x010,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ "osc24M", 0x018,
+ 8, 7, /* N */
+ 0, 4, /* M */
+ BIT(24), /* frac enable */
+ BIT(25), /* frac select */
+ 270000000, /* frac rate 0 */
+ 297000000, /* frac rate 1 */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+ 0, 2, /* M */
+ BIT(31), /* gate */
+ BIT(28), /* lock */
+ CLK_IS_CRITICAL);
+
+static struct ccu_nk pll_periph_clk = {
+ .enable = BIT(31),
+ .lock = BIT(28),
+ .k = _SUNXI_CCU_MULT(4, 2),
+ .n = _SUNXI_CCU_MULT(8, 5),
+ .common = {
+ .reg = 0x028,
+ .hw.init = CLK_HW_INIT("pll-periph", "osc24M",
+ &ccu_nk_ops, 0),
+ },
+};
+
+static const char * const cpu_parents[] = { "osc32k", "osc24M",
+ "pll-cpu", "pll-cpu" };
+static SUNXI_CCU_MUX(cpu_clk, "cpu", cpu_parents,
+ 0x050, 16, 2, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT);
+
+static const char * const ahb_parents[] = { "osc32k", "osc24M",
+ "cpu", "pll-periph" };
+static const struct ccu_mux_var_prediv ahb_predivs[] = {
+ { .index = 3, .shift = 6, .width = 2 },
+};
+static struct ccu_div ahb_clk = {
+ .div = _SUNXI_CCU_DIV_FLAGS(4, 2, CLK_DIVIDER_POWER_OF_TWO),
+
+ .mux = {
+ .shift = 12,
+ .width = 2,
+
+ .var_predivs = ahb_predivs,
+ .n_var_predivs = ARRAY_SIZE(ahb_predivs),
+ },
+
+ .common = {
+ .reg = 0x054,
+ .features = CCU_FEATURE_VARIABLE_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("ahb",
+ ahb_parents,
+ &ccu_div_ops,
+ 0),
+ },
+};
+
+static struct clk_div_table apb_div_table[] = {
+ { .val = 0, .div = 2 },
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 4 },
+ { .val = 3, .div = 8 },
+ { /* Sentinel */ },
+};
+static SUNXI_CCU_DIV_TABLE(apb_clk, "apb", "ahb",
+ 0x054, 8, 2, apb_div_table, 0);
+
+static SUNXI_CCU_GATE(bus_dma_clk, "bus-dma", "ahb",
+ 0x060, BIT(6), 0);
+static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb",
+ 0x060, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb",
+ 0x060, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_dram_clk, "bus-dram", "ahb",
+ 0x060, BIT(14), 0);
+static SUNXI_CCU_GATE(bus_spi0_clk, "bus-spi0", "ahb",
+ 0x060, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_spi1_clk, "bus-spi1", "ahb",
+ 0x060, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_otg_clk, "bus-otg", "ahb",
+ 0x060, BIT(24), 0);
+
+static SUNXI_CCU_GATE(bus_ve_clk, "bus-ve", "ahb",
+ 0x064, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_lcd_clk, "bus-lcd", "ahb",
+ 0x064, BIT(4), 0);
+static SUNXI_CCU_GATE(bus_deinterlace_clk, "bus-deinterlace", "ahb",
+ 0x064, BIT(5), 0);
+static SUNXI_CCU_GATE(bus_csi_clk, "bus-csi", "ahb",
+ 0x064, BIT(8), 0);
+static SUNXI_CCU_GATE(bus_tvd_clk, "bus-tvd", "ahb",
+ 0x064, BIT(9), 0);
+static SUNXI_CCU_GATE(bus_tve_clk, "bus-tve", "ahb",
+ 0x064, BIT(10), 0);
+static SUNXI_CCU_GATE(bus_de_be_clk, "bus-de-be", "ahb",
+ 0x064, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_de_fe_clk, "bus-de-fe", "ahb",
+ 0x064, BIT(14), 0);
+
+static SUNXI_CCU_GATE(bus_codec_clk, "bus-codec", "apb",
+ 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE(bus_spdif_clk, "bus-spdif", "apb",
+ 0x068, BIT(1), 0);
+static SUNXI_CCU_GATE(bus_ir_clk, "bus-ir", "apb",
+ 0x068, BIT(2), 0);
+static SUNXI_CCU_GATE(bus_rsb_clk, "bus-rsb", "apb",
+ 0x068, BIT(3), 0);
+static SUNXI_CCU_GATE(bus_i2s0_clk, "bus-i2s0", "apb",
+ 0x068, BIT(12), 0);
+static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb",
+ 0x068, BIT(16), 0);
+static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb",
+ 0x068, BIT(17), 0);
+static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb",
+ 0x068, BIT(18), 0);
+static SUNXI_CCU_GATE(bus_pio_clk, "bus-pio", "apb",
+ 0x068, BIT(19), 0);
+static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb",
+ 0x068, BIT(20), 0);
+static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb",
+ 0x068, BIT(21), 0);
+static SUNXI_CCU_GATE(bus_uart2_clk, "bus-uart2", "apb",
+ 0x068, BIT(22), 0);
+
+static const char * const mod0_default_parents[] = { "osc24M", "pll-periph" };
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc0_clk, "mmc0", mod0_default_parents, 0x088,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc0_sample_clk, "mmc0_sample", "mmc0",
+ 0x088, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc0_output_clk, "mmc0_output", "mmc0",
+ 0x088, 8, 3, 0);
+
+static SUNXI_CCU_MP_WITH_MUX_GATE(mmc1_clk, "mmc1", mod0_default_parents, 0x08c,
+ 0, 4, /* M */
+ 16, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_PHASE(mmc1_sample_clk, "mmc1_sample", "mmc1",
+ 0x08c, 20, 3, 0);
+static SUNXI_CCU_PHASE(mmc1_output_clk, "mmc1_output", "mmc1",
+ 0x08c, 8, 3, 0);
+
+static const char * const i2s_spdif_parents[] = { "pll-audio-8x",
+ "pll-audio-4x",
+ "pll-audio-2x",
+ "pll-audio" };
+
+static SUNXI_CCU_MUX_WITH_GATE(i2s_clk, "i2s", i2s_spdif_parents,
+ 0x0b0, 16, 2, BIT(31), 0);
+
+static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", i2s_spdif_parents,
+ 0x0b4, 16, 2, BIT(31), 0);
+
+/* The BSP header file has a CIR_CFG, but no mod clock uses this definition */
+
+static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ 0x0cc, BIT(8), 0);
+
+static SUNXI_CCU_GATE(dram_ve_clk, "dram-ve", "pll-ddr",
+ 0x100, BIT(0), 0);
+static SUNXI_CCU_GATE(dram_csi_clk, "dram-csi", "pll-ddr",
+ 0x100, BIT(1), 0);
+static SUNXI_CCU_GATE(dram_deinterlace_clk, "dram-deinterlace",
+ "pll-ddr", 0x100, BIT(2), 0);
+static SUNXI_CCU_GATE(dram_tvd_clk, "dram-tvd", "pll-ddr",
+ 0x100, BIT(3), 0);
+static SUNXI_CCU_GATE(dram_de_fe_clk, "dram-de-fe", "pll-ddr",
+ 0x100, BIT(24), 0);
+static SUNXI_CCU_GATE(dram_de_be_clk, "dram-de-be", "pll-ddr",
+ 0x100, BIT(26), 0);
+
+static const char * const de_parents[] = { "pll-video", "pll-periph" };
+static const u8 de_table[] = { 0, 2, };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(de_be_clk, "de-be",
+ de_parents, de_table,
+ 0x104, 0, 4, 24, 3, BIT(31), 0);
+
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(de_fe_clk, "de-fe",
+ de_parents, de_table,
+ 0x10c, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tcon_parents[] = { "pll-video", "pll-video-2x" };
+static const u8 tcon_table[] = { 0, 2, };
+static SUNXI_CCU_MUX_TABLE_WITH_GATE(tcon_clk, "tcon",
+ tcon_parents, tcon_table,
+ 0x118, 24, 3, BIT(31),
+ CLK_SET_RATE_PARENT);
+
+static const char * const deinterlace_parents[] = { "pll-video",
+ "pll-video-2x" };
+static const u8 deinterlace_table[] = { 0, 2, };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(deinterlace_clk, "deinterlace",
+ deinterlace_parents, deinterlace_table,
+ 0x11c, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const tve_clk2_parents[] = { "pll-video",
+ "pll-video-2x" };
+static const u8 tve_clk2_table[] = { 0, 2, };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(tve_clk2_clk, "tve-clk2",
+ tve_clk2_parents, tve_clk2_table,
+ 0x120, 0, 4, 24, 3, BIT(31), 0);
+static SUNXI_CCU_M_WITH_GATE(tve_clk1_clk, "tve-clk1", "tve-clk2",
+ 0x120, 8, 1, BIT(15), 0);
+
+static const char * const tvd_parents[] = { "pll-video", "osc24M",
+ "pll-video-2x" };
+static SUNXI_CCU_M_WITH_MUX_GATE(tvd_clk, "tvd", tvd_parents,
+ 0x124, 0, 4, 24, 3, BIT(31), 0);
+
+static const char * const csi_parents[] = { "pll-video", "osc24M" };
+static const u8 csi_table[] = { 0, 5, };
+static SUNXI_CCU_M_WITH_MUX_TABLE_GATE(csi_clk, "csi", csi_parents, csi_table,
+ 0x120, 0, 4, 8, 3, BIT(15), 0);
+
+/*
+ * TODO: BSP says the parent is pll-audio, however common sense and experience
+ * told us it should be pll-ve. pll-ve is totally not used in BSP code.
+ */
+static SUNXI_CCU_GATE(ve_clk, "ve", "pll-audio", 0x13c, BIT(31), 0);
+
+static SUNXI_CCU_GATE(codec_clk, "codec", "pll-audio", 0x140, BIT(31), 0);
+
+static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M", 0x144, BIT(31), 0);
+
+static struct ccu_common *suniv_ccu_clks[] = {
+ &pll_cpu_clk.common,
+ &pll_audio_base_clk.common,
+ &pll_video_clk.common,
+ &pll_ve_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph_clk.common,
+ &cpu_clk.common,
+ &ahb_clk.common,
+ &apb_clk.common,
+ &bus_dma_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_dram_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &bus_otg_clk.common,
+ &bus_ve_clk.common,
+ &bus_lcd_clk.common,
+ &bus_deinterlace_clk.common,
+ &bus_csi_clk.common,
+ &bus_tve_clk.common,
+ &bus_tvd_clk.common,
+ &bus_de_be_clk.common,
+ &bus_de_fe_clk.common,
+ &bus_codec_clk.common,
+ &bus_spdif_clk.common,
+ &bus_ir_clk.common,
+ &bus_rsb_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_pio_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &mmc0_clk.common,
+ &mmc0_sample_clk.common,
+ &mmc0_output_clk.common,
+ &mmc1_clk.common,
+ &mmc1_sample_clk.common,
+ &mmc1_output_clk.common,
+ &i2s_clk.common,
+ &spdif_clk.common,
+ &usb_phy0_clk.common,
+ &dram_ve_clk.common,
+ &dram_csi_clk.common,
+ &dram_deinterlace_clk.common,
+ &dram_tvd_clk.common,
+ &dram_de_fe_clk.common,
+ &dram_de_be_clk.common,
+ &de_be_clk.common,
+ &de_fe_clk.common,
+ &tcon_clk.common,
+ &deinterlace_clk.common,
+ &tve_clk2_clk.common,
+ &tve_clk1_clk.common,
+ &tvd_clk.common,
+ &csi_clk.common,
+ &ve_clk.common,
+ &codec_clk.common,
+ &avs_clk.common,
+};
+
+static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio",
+ "pll-audio-base", 4, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_2x_clk, "pll-audio-2x",
+ "pll-audio-base", 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_4x_clk, "pll-audio-4x",
+ "pll-audio-base", 1, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_audio_8x_clk, "pll-audio-8x",
+ "pll-audio-base", 1, 2, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR(pll_video_2x_clk, "pll-video-2x",
+ "pll-video", 1, 2, 0);
+
+static struct clk_hw_onecell_data suniv_hw_clks = {
+ .hws = {
+ [CLK_PLL_CPU] = &pll_cpu_clk.common.hw,
+ [CLK_PLL_AUDIO_BASE] = &pll_audio_base_clk.common.hw,
+ [CLK_PLL_AUDIO] = &pll_audio_clk.hw,
+ [CLK_PLL_AUDIO_2X] = &pll_audio_2x_clk.hw,
+ [CLK_PLL_AUDIO_4X] = &pll_audio_4x_clk.hw,
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO] = &pll_video_clk.common.hw,
+ [CLK_PLL_VIDEO_2X] = &pll_video_2x_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH] = &pll_periph_clk.common.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_AHB] = &ahb_clk.common.hw,
+ [CLK_APB] = &apb_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_LCD] = &bus_lcd_clk.common.hw,
+ [CLK_BUS_DEINTERLACE] = &bus_deinterlace_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_BUS_TVD] = &bus_tvd_clk.common.hw,
+ [CLK_BUS_TVE] = &bus_tve_clk.common.hw,
+ [CLK_BUS_DE_BE] = &bus_de_be_clk.common.hw,
+ [CLK_BUS_DE_FE] = &bus_de_fe_clk.common.hw,
+ [CLK_BUS_CODEC] = &bus_codec_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_BUS_IR] = &bus_ir_clk.common.hw,
+ [CLK_BUS_RSB] = &bus_rsb_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_PIO] = &bus_pio_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC0_SAMPLE] = &mmc0_sample_clk.common.hw,
+ [CLK_MMC0_OUTPUT] = &mmc0_output_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC1_SAMPLE] = &mmc1_sample_clk.common.hw,
+ [CLK_MMC1_OUTPUT] = &mmc1_output_clk.common.hw,
+ [CLK_I2S] = &i2s_clk.common.hw,
+ [CLK_SPDIF] = &spdif_clk.common.hw,
+ [CLK_USB_PHY0] = &usb_phy0_clk.common.hw,
+ [CLK_DRAM_VE] = &dram_ve_clk.common.hw,
+ [CLK_DRAM_CSI] = &dram_csi_clk.common.hw,
+ [CLK_DRAM_DEINTERLACE] = &dram_deinterlace_clk.common.hw,
+ [CLK_DRAM_TVD] = &dram_tvd_clk.common.hw,
+ [CLK_DRAM_DE_FE] = &dram_de_fe_clk.common.hw,
+ [CLK_DRAM_DE_BE] = &dram_de_be_clk.common.hw,
+ [CLK_DE_BE] = &de_be_clk.common.hw,
+ [CLK_DE_FE] = &de_fe_clk.common.hw,
+ [CLK_TCON] = &tcon_clk.common.hw,
+ [CLK_DEINTERLACE] = &deinterlace_clk.common.hw,
+ [CLK_TVE2_CLK] = &tve_clk2_clk.common.hw,
+ [CLK_TVE1_CLK] = &tve_clk1_clk.common.hw,
+ [CLK_TVD] = &tvd_clk.common.hw,
+ [CLK_CSI] = &csi_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_CODEC] = &codec_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ },
+ .num = CLK_NUMBER,
+};
+
+static struct ccu_reset_map suniv_ccu_resets[] = {
+ [RST_USB_PHY0] = { 0x0cc, BIT(0) },
+
+ [RST_BUS_DMA] = { 0x2c0, BIT(6) },
+ [RST_BUS_MMC0] = { 0x2c0, BIT(8) },
+ [RST_BUS_MMC1] = { 0x2c0, BIT(9) },
+ [RST_BUS_DRAM] = { 0x2c0, BIT(14) },
+ [RST_BUS_SPI0] = { 0x2c0, BIT(20) },
+ [RST_BUS_SPI1] = { 0x2c0, BIT(21) },
+ [RST_BUS_OTG] = { 0x2c0, BIT(24) },
+ [RST_BUS_VE] = { 0x2c4, BIT(0) },
+ [RST_BUS_LCD] = { 0x2c4, BIT(4) },
+ [RST_BUS_DEINTERLACE] = { 0x2c4, BIT(5) },
+ [RST_BUS_CSI] = { 0x2c4, BIT(8) },
+ [RST_BUS_TVD] = { 0x2c4, BIT(9) },
+ [RST_BUS_TVE] = { 0x2c4, BIT(10) },
+ [RST_BUS_DE_BE] = { 0x2c4, BIT(12) },
+ [RST_BUS_DE_FE] = { 0x2c4, BIT(14) },
+ [RST_BUS_CODEC] = { 0x2d0, BIT(0) },
+ [RST_BUS_SPDIF] = { 0x2d0, BIT(1) },
+ [RST_BUS_IR] = { 0x2d0, BIT(2) },
+ [RST_BUS_RSB] = { 0x2d0, BIT(3) },
+ [RST_BUS_I2S0] = { 0x2d0, BIT(12) },
+ [RST_BUS_I2C0] = { 0x2d0, BIT(16) },
+ [RST_BUS_I2C1] = { 0x2d0, BIT(17) },
+ [RST_BUS_I2C2] = { 0x2d0, BIT(18) },
+ [RST_BUS_UART0] = { 0x2d0, BIT(20) },
+ [RST_BUS_UART1] = { 0x2d0, BIT(21) },
+ [RST_BUS_UART2] = { 0x2d0, BIT(22) },
+};
+
+static const struct sunxi_ccu_desc suniv_ccu_desc = {
+ .ccu_clks = suniv_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(suniv_ccu_clks),
+
+ .hw_clks = &suniv_hw_clks,
+
+ .resets = suniv_ccu_resets,
+ .num_resets = ARRAY_SIZE(suniv_ccu_resets),
+};
+
+static struct ccu_pll_nb suniv_pll_cpu_nb = {
+ .common = &pll_cpu_clk.common,
+ /* copy from pll_cpu_clk */
+ .enable = BIT(31),
+ .lock = BIT(28),
+};
+
+static struct ccu_mux_nb suniv_cpu_nb = {
+ .common = &cpu_clk.common,
+ .cm = &cpu_clk.mux,
+ .delay_us = 1, /* > 8 clock cycles at 24 MHz */
+ .bypass_index = 1, /* index of 24 MHz oscillator */
+};
+
+static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
+{
+ void __iomem *reg;
+ u32 val;
+
+ reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+ if (IS_ERR(reg)) {
+ pr_err("%pOF: Could not map the clock registers\n", node);
+ return;
+ }
+
+ /* Force the PLL-Audio-1x divider to 4 */
+ val = readl(reg + SUNIV_PLL_AUDIO_REG);
+ val &= ~GENMASK(19, 16);
+ writel(val | (3 << 16), reg + SUNIV_PLL_AUDIO_REG);
+
+ sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
+
+ /* Gate then ungate PLL CPU after any rate changes */
+ ccu_pll_notifier_register(&suniv_pll_cpu_nb);
+
+ /* Reparent CPU during PLL CPU rate changes */
+ ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
+ &suniv_cpu_nb);
+}
+CLK_OF_DECLARE(suniv_f1c100s_ccu, "allwinner,suniv-f1c100s-ccu",
+ suniv_f1c100s_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
new file mode 100644
index 000000000000..39d06fed55b2
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright 2017 Icenowy Zheng <icenowy@aosc.io>
+ *
+ */
+
+#ifndef _CCU_SUNIV_F1C100S_H_
+#define _CCU_SUNIV_F1C100S_H_
+
+#include <dt-bindings/clock/suniv-ccu-f1c100s.h>
+#include <dt-bindings/reset/suniv-ccu-f1c100s.h>
+
+#define CLK_PLL_CPU 0
+#define CLK_PLL_AUDIO_BASE 1
+#define CLK_PLL_AUDIO 2
+#define CLK_PLL_AUDIO_2X 3
+#define CLK_PLL_AUDIO_4X 4
+#define CLK_PLL_AUDIO_8X 5
+#define CLK_PLL_VIDEO 6
+#define CLK_PLL_VIDEO_2X 7
+#define CLK_PLL_VE 8
+#define CLK_PLL_DDR0 9
+#define CLK_PLL_PERIPH 10
+
+/* CPU clock is exported */
+
+#define CLK_AHB 12
+#define CLK_APB 13
+
+/* All bus gates, DRAM gates and mod clocks are exported */
+
+#define CLK_NUMBER (CLK_AVS + 1)
+
+#endif /* _CCU_SUNIV_F1C100S_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 5d0af4051737..0357349eb767 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -40,6 +40,61 @@ static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
*p = best_p;
}
+static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
+ unsigned long *parent,
+ unsigned long rate,
+ unsigned int max_m,
+ unsigned int max_p)
+{
+ unsigned long parent_rate_saved;
+ unsigned long parent_rate, now;
+ unsigned long best_rate = 0;
+ unsigned int _m, _p, div;
+ unsigned long maxdiv;
+
+ parent_rate_saved = *parent;
+
+ /*
+ * The maximum divider we can use without overflowing
+ * unsigned long in rate * m * p below
+ */
+ maxdiv = max_m * max_p;
+ maxdiv = min(ULONG_MAX / rate, maxdiv);
+
+ for (_p = 1; _p <= max_p; _p <<= 1) {
+ for (_m = 1; _m <= max_m; _m++) {
+ div = _m * _p;
+
+ if (div > maxdiv)
+ break;
+
+ if (rate * div == parent_rate_saved) {
+ /*
+ * It's the most ideal case if the requested
+ * rate can be divided from parent clock without
+ * needing to change parent rate, so return the
+ * divider immediately.
+ */
+ *parent = parent_rate_saved;
+ return rate;
+ }
+
+ parent_rate = clk_hw_round_rate(hw, rate * div);
+ now = parent_rate / div;
+
+ if (now <= rate && now > best_rate) {
+ best_rate = now;
+ *parent = parent_rate;
+
+ if (now == rate)
+ return rate;
+ }
+ }
+ }
+
+ return best_rate;
+}
+
static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
struct clk_hw *hw,
unsigned long *parent_rate,
@@ -56,8 +111,13 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
- ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
- rate = *parent_rate / p / m;
+ if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
+ rate = *parent_rate / p / m;
+ } else {
+ rate = ccu_mp_find_best_with_parent_adj(hw, parent_rate, rate,
+ max_m, max_p);
+ }
if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= cmp->fixed_post_div;
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 6fe3c14f7b2d..424d8635b053 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -19,6 +19,17 @@ struct _ccu_nm {
unsigned long m, min_m, max_m;
};
+static unsigned long ccu_nm_calc_rate(unsigned long parent,
+ unsigned long n, unsigned long m)
+{
+ u64 rate = parent;
+
+ rate *= n;
+ do_div(rate, m);
+
+ return rate;
+}
+
static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
struct _ccu_nm *nm)
{
@@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
for (_n = nm->min_n; _n <= nm->max_n; _n++) {
for (_m = nm->min_m; _m <= nm->max_m; _m++) {
- unsigned long tmp_rate = parent * _n / _m;
+ unsigned long tmp_rate = ccu_nm_calc_rate(parent,
+ _n, _m);
if (tmp_rate > rate)
continue;
@@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
else
- rate = parent_rate * n / m;
+ rate = ccu_nm_calc_rate(parent_rate, n, m);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
@@ -149,7 +161,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
ccu_nm_find_best(*parent_rate, rate, &_nm);
- rate = *parent_rate * _nm.n / _nm.m;
+ rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index 92d04ce2dee6..53cdc0ec40f3 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -55,7 +55,7 @@ const struct clk_ops tegra_clk_sync_source_ops = {
};
struct clk *tegra_clk_register_sync_source(const char *name,
- unsigned long rate, unsigned long max_rate)
+ unsigned long max_rate)
{
struct tegra_clk_sync_source *sync;
struct clk_init_data init;
@@ -67,7 +67,6 @@ struct clk *tegra_clk_register_sync_source(const char *name,
return ERR_PTR(-ENOMEM);
}
- sync->rate = rate;
sync->max_rate = max_rate;
init.ops = &tegra_clk_sync_source_ops;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index ebb0e1b6bf01..609e363dabf8 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1184,17 +1184,7 @@ static int attr_registers_show(struct seq_file *s, void *data)
return 0;
}
-static int attr_registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, attr_registers_show, inode->i_private);
-}
-
-static const struct file_operations attr_registers_fops = {
- .open = attr_registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(attr_registers);
static void dfll_debug_init(struct tegra_dfll *td)
{
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 830d1c87fa7c..b50b7460014b 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -590,12 +590,13 @@ static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
cfg->n = cfg->output_rate / cfreq;
cfg->cpcon = OUT_OF_TABLE_CPCON;
- if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
- (1 << p_div) > divp_max(pll)
- || cfg->output_rate > pll->params->vco_max) {
+ if (cfg->m == 0 || cfg->m > divm_max(pll) ||
+ cfg->n > divn_max(pll) || (1 << p_div) > divp_max(pll) ||
+ cfg->output_rate > pll->params->vco_max) {
return -EINVAL;
}
+ cfg->output_rate = cfg->n * DIV_ROUND_UP(parent_rate, cfg->m);
cfg->output_rate >>= p_div;
if (pll->params->pdiv_tohw) {
diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
index b37cae7af26d..02dd6487d855 100644
--- a/drivers/clk/tegra/clk-tegra-audio.c
+++ b/drivers/clk/tegra/clk-tegra-audio.c
@@ -49,8 +49,6 @@ struct tegra_sync_source_initdata {
#define SYNC(_name) \
{\
.name = #_name,\
- .rate = 24000000,\
- .max_rate = 24000000,\
.clk_id = tegra_clk_ ## _name,\
}
@@ -176,7 +174,7 @@ static void __init tegra_audio_sync_clk_init(void __iomem *clk_base,
void __init tegra_audio_clk_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_audio_clk_info *audio_info,
- unsigned int num_plls)
+ unsigned int num_plls, unsigned long sync_max_rate)
{
struct clk *clk;
struct clk **dt_clk;
@@ -221,8 +219,7 @@ void __init tegra_audio_clk_init(void __iomem *clk_base,
if (!dt_clk)
continue;
- clk = tegra_clk_register_sync_source(data->name,
- data->rate, data->max_rate);
+ clk = tegra_clk_register_sync_source(data->name, sync_max_rate);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 38c4eb28c8bf..cc5275ec2c01 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -79,7 +79,6 @@
#define CLK_SOURCE_3D 0x158
#define CLK_SOURCE_2D 0x15c
#define CLK_SOURCE_MPE 0x170
-#define CLK_SOURCE_UARTE 0x1c4
#define CLK_SOURCE_VI_SENSOR 0x1a8
#define CLK_SOURCE_VI 0x148
#define CLK_SOURCE_EPP 0x16c
@@ -117,8 +116,6 @@
#define CLK_SOURCE_ISP 0x144
#define CLK_SOURCE_SOR0 0x414
#define CLK_SOURCE_DPAUX 0x418
-#define CLK_SOURCE_SATA_OOB 0x420
-#define CLK_SOURCE_SATA 0x424
#define CLK_SOURCE_ENTROPY 0x628
#define CLK_SOURCE_VI_SENSOR2 0x658
#define CLK_SOURCE_HDMI_AUDIO 0x668
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 1824f014202b..625d11091330 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1190,6 +1190,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
{ TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
{ TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_I2S2_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
/* must be the last entry */
{ TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
};
@@ -1362,7 +1369,7 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra114_periph_clk_init(clk_base, pmc_base);
tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
tegra114_audio_plls,
- ARRAY_SIZE(tegra114_audio_plls));
+ ARRAY_SIZE(tegra114_audio_plls), 24000000);
tegra_pmc_clk_init(pmc_base, tegra114_clks);
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index b6cf28ca2ed2..df0018f7bf7e 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1291,6 +1291,13 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
{ TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
{ TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
{ TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
+ { TEGRA124_CLK_SPDIF_IN_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_I2S0_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_I2S1_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_I2S2_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
/* must be the last entry */
{ TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
};
@@ -1455,7 +1462,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
tegra124_periph_clk_init(clk_base, pmc_base);
tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
tegra124_audio_plls,
- ARRAY_SIZE(tegra124_audio_plls));
+ ARRAY_SIZE(tegra124_audio_plls), 24576000);
tegra_pmc_clk_init(pmc_base, tegra124_clks);
/* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index cc857d4d4a86..c71b61162a32 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -578,7 +578,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
[tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
[tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
- [tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
@@ -799,6 +798,41 @@ static struct tegra_periph_init_data tegra_periph_nodiv_clk_list[] = {
TEGRA_INIT_DATA_NODIV("disp2", mux_pllpdc_clkm, CLK_SOURCE_DISP2, 30, 2, 26, 0, TEGRA20_CLK_DISP2),
};
+static void __init tegra20_emc_clk_init(void)
+{
+ const u32 use_pllm_ud = BIT(29);
+ struct clk *clk;
+ u32 emc_reg;
+
+ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
+ ARRAY_SIZE(mux_pllmcp_clkm),
+ CLK_SET_RATE_NO_REPARENT,
+ clk_base + CLK_SOURCE_EMC,
+ 30, 2, 0, &emc_lock);
+
+ clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
+ &emc_lock);
+ clks[TEGRA20_CLK_MC] = clk;
+
+ /* un-divided pll_m_out0 is currently unsupported */
+ emc_reg = readl_relaxed(clk_base + CLK_SOURCE_EMC);
+ if (emc_reg & use_pllm_ud) {
+ pr_err("%s: un-divided PllM_out0 used as clock source\n",
+ __func__);
+ return;
+ }
+
+ /*
+ * Note that 'emc_mux' source and 'emc' rate shouldn't be changed at
+ * the same time due to a HW bug, this won't happen because we're
+ * defining 'emc_mux' and 'emc' as distinct clocks.
+ */
+ clk = tegra_clk_register_divider("emc", "emc_mux",
+ clk_base + CLK_SOURCE_EMC, CLK_IS_CRITICAL,
+ TEGRA_DIVIDER_INT, 0, 8, 1, &emc_lock);
+ clks[TEGRA20_CLK_EMC] = clk;
+}
+
static void __init tegra20_periph_clk_init(void)
{
struct tegra_periph_init_data *data;
@@ -812,15 +846,7 @@ static void __init tegra20_periph_clk_init(void)
clks[TEGRA20_CLK_AC97] = clk;
/* emc */
- clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
- ARRAY_SIZE(mux_pllmcp_clkm),
- CLK_SET_RATE_NO_REPARENT,
- clk_base + CLK_SOURCE_EMC,
- 30, 2, 0, &emc_lock);
-
- clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
- &emc_lock);
- clks[TEGRA20_CLK_MC] = clk;
+ tegra20_emc_clk_init();
/* dsi */
clk = tegra_clk_register_periph_gate("dsi", "pll_d", 0, clk_base, 0,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 88f1943bd2b5..7545af763d7a 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3370,6 +3370,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
+ { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_I2S1_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_I2S2_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
+ { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
};
@@ -3563,7 +3570,7 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra210_periph_clk_init(clk_base, pmc_base);
tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
tegra210_audio_plls,
- ARRAY_SIZE(tegra210_audio_plls));
+ ARRAY_SIZE(tegra210_audio_plls), 24576000);
tegra_pmc_clk_init(pmc_base, tegra210_clks);
/* For Tegra210, PLLD is the only source for DSIA & DSIB */
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index acfe661b2ae7..fa8d573ac626 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1148,9 +1148,9 @@ static bool tegra30_cpu_rail_off_ready(void)
cpu_rst_status = readl(clk_base +
TEGRA30_CLK_RST_CONTROLLER_CPU_CMPLX_STATUS);
- cpu_pwr_status = tegra_powergate_is_powered(TEGRA_POWERGATE_CPU1) ||
- tegra_powergate_is_powered(TEGRA_POWERGATE_CPU2) ||
- tegra_powergate_is_powered(TEGRA_POWERGATE_CPU3);
+ cpu_pwr_status = tegra_pmc_cpu_is_powered(1) ||
+ tegra_pmc_cpu_is_powered(2) ||
+ tegra_pmc_cpu_is_powered(3);
if (((cpu_rst_status & 0xE) != 0xE) || cpu_pwr_status)
return false;
@@ -1267,6 +1267,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
{ TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
{ TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_I2S2_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
@@ -1344,7 +1351,7 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra30_periph_clk_init();
tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
tegra30_audio_plls,
- ARRAY_SIZE(tegra30_audio_plls));
+ ARRAY_SIZE(tegra30_audio_plls), 24000000);
tegra_pmc_clk_init(pmc_base, tegra30_clks);
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index d2c3a010f8e9..09bccbb9640c 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -41,7 +41,7 @@ extern const struct clk_ops tegra_clk_sync_source_ops;
extern int *periph_clk_enb_refcnt;
struct clk *tegra_clk_register_sync_source(const char *name,
- unsigned long fixed_rate, unsigned long max_rate);
+ unsigned long max_rate);
/**
* struct tegra_clk_frac_div - fractional divider clock
@@ -796,7 +796,7 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
void tegra_audio_clk_init(void __iomem *clk_base,
void __iomem *pmc_base, struct tegra_clk *tegra_clks,
struct tegra_audio_clk_info *audio_info,
- unsigned int num_plls);
+ unsigned int num_plls, unsigned long sync_max_rate);
void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
struct tegra_clk *tegra_clks,
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index 469f560ae1cf..40630eb950fc 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -448,7 +448,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
char *c;
if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
- !strcmp(node->name, "clk"))
+ of_node_name_eq(node, "clk"))
ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
addrp = of_get_address(node, 0, NULL, NULL);
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 92e28af7afba..6c3329bc116f 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -410,7 +410,7 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
if ((of_machine_is_compatible("ti,omap3630") ||
of_machine_is_compatible("ti,omap36xx")) &&
- !strcmp(node->name, "dpll5_ck"))
+ of_node_name_eq(node, "dpll5_ck"))
of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
else
of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index d5888591e1a9..18a3c4522831 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -545,21 +545,21 @@ static void u8500_clk_init(struct device_node *np)
for_each_child_of_node(np, child) {
static struct clk_onecell_data clk_data;
- if (!of_node_cmp(child->name, "prcmu-clock")) {
+ if (of_node_name_eq(child, "prcmu-clock")) {
clk_data.clks = prcmu_clk;
clk_data.clk_num = ARRAY_SIZE(prcmu_clk);
of_clk_add_provider(child, of_clk_src_onecell_get, &clk_data);
}
- if (!of_node_cmp(child->name, "prcc-periph-clock"))
+ if (of_node_name_eq(child, "prcc-periph-clock"))
of_clk_add_provider(child, ux500_twocell_get, prcc_pclk);
- if (!of_node_cmp(child->name, "prcc-kernel-clock"))
+ if (of_node_name_eq(child, "prcc-kernel-clock"))
of_clk_add_provider(child, ux500_twocell_get, prcc_kclk);
- if (!of_node_cmp(child->name, "rtc32k-clock"))
+ if (of_node_name_eq(child, "rtc32k-clock"))
of_clk_add_provider(child, of_clk_src_simple_get, rtc_clk);
- if (!of_node_cmp(child->name, "smp-twd-clock"))
+ if (of_node_name_eq(child, "smp-twd-clock"))
of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
}
}
diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
index 1fe1e8d970cf..c2b6bb814742 100644
--- a/drivers/clk/versatile/clk-sp810.c
+++ b/drivers/clk/versatile/clk-sp810.c
@@ -111,7 +111,7 @@ static void __init clk_sp810_of_setup(struct device_node *node)
init.name = name;
init.ops = &clk_sp810_timerclken_ops;
- init.flags = CLK_IS_BASIC;
+ init.flags = 0;
init.parent_names = parent_names;
init.num_parents = num;
diff --git a/drivers/clk/zynqmp/clkc.c b/drivers/clk/zynqmp/clkc.c
index 9d7d297f0ea8..f65cc0ff76ab 100644
--- a/drivers/clk/zynqmp/clkc.c
+++ b/drivers/clk/zynqmp/clkc.c
@@ -128,7 +128,7 @@ static const struct zynqmp_eemi_ops *eemi_ops;
*/
static inline int zynqmp_is_valid_clock(u32 clk_id)
{
- if (clk_id > clock_max_idx)
+ if (clk_id >= clock_max_idx)
return -ENODEV;
return clock[clk_id].valid;
@@ -279,6 +279,9 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
qdata.arg1 = clk_id;
ret = eemi_ops->query_data(qdata, ret_payload);
+ if (ret)
+ return ERR_PTR(ret);
+
mult = ret_payload[1];
div = ret_payload[2];
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 55c77e44bb2d..a9e26f6a81a1 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -105,6 +105,14 @@ config OWL_TIMER
help
Enables the support for the Actions Semi Owl timer driver.
+config RDA_TIMER
+ bool "RDA timer driver" if COMPILE_TEST
+ depends on GENERIC_CLOCKEVENTS
+ select CLKSRC_MMIO
+ select TIMER_OF
+ help
+ Enables the support for the RDA Micro timer driver.
+
config SUN4I_TIMER
bool "Sun4i timer driver" if COMPILE_TEST
depends on HAS_IOMEM
@@ -163,12 +171,6 @@ config CLKSRC_NOMADIK_MTU
to multiple interrupt generating programmable
32-bit free running decrementing counters.
-config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
- bool
- depends on CLKSRC_NOMADIK_MTU
- help
- Use the Multi Timer Unit as the sched_clock.
-
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer" if COMPILE_TEST
depends on HAS_IOMEM
@@ -226,13 +228,6 @@ config INTEGRATOR_AP_TIMER
help
Enables support for the Integrator-ap timer.
-config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- bool "Clocksource PRCMU Timer sched_clock"
- depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
- default y
- help
- Use the always on PRCMU Timer as sched_clock
-
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
@@ -290,6 +285,7 @@ config CLKSRC_MPS2
config ARC_TIMERS
bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK
select TIMER_OF
help
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
@@ -580,7 +576,7 @@ config H8300_TPU
config CLKSRC_IMX_GPT
bool "Clocksource using i.MX GPT" if COMPILE_TEST
- depends on ARM && CLKDEV_LOOKUP
+ depends on (ARM || ARM64) && CLKDEV_LOOKUP
select CLKSRC_MMIO
config CLKSRC_IMX_TPM
@@ -611,7 +607,7 @@ config ATCPIT100_TIMER
config RISCV_TIMER
bool "Timer for the RISC-V platform"
- depends on RISCV
+ depends on GENERIC_SCHED_CLOCK && RISCV
default y
select TIMER_PROBE
select TIMER_OF
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dd9138104568..cdd210ff89ea 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -20,7 +20,7 @@ obj-$(CONFIG_OMAP_DM_TIMER) += timer-ti-dm.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
-obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o
+obj-$(CONFIG_ROCKCHIP_TIMER) += timer-rockchip.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o
@@ -32,10 +32,10 @@ obj-$(CONFIG_MXS_TIMER) += mxs_timer.o
obj-$(CONFIG_CLKSRC_PXA) += pxa_timer.o
obj-$(CONFIG_PRIMA2_TIMER) += timer-prima2.o
obj-$(CONFIG_U300_TIMER) += timer-u300.o
-obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
+obj-$(CONFIG_SUN4I_TIMER) += timer-sun4i.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
-obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
-obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
+obj-$(CONFIG_MESON6_TIMER) += timer-meson6.o
+obj-$(CONFIG_TEGRA_TIMER) += timer-tegra20.o
obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
@@ -57,6 +57,7 @@ obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
+obj-$(CONFIG_RDA_TIMER) += timer-rda.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
@@ -78,6 +79,6 @@ obj-$(CONFIG_H8300_TPU) += h8300_tpu.o
obj-$(CONFIG_CLKSRC_ST_LPC) += clksrc_st_lpc.o
obj-$(CONFIG_X86_NUMACHIP) += numachip.o
obj-$(CONFIG_ATCPIT100_TIMER) += timer-atcpit100.o
-obj-$(CONFIG_RISCV_TIMER) += riscv_timer.o
+obj-$(CONFIG_RISCV_TIMER) += timer-riscv.o
obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 20da9b1d7f7d..b28970ca4a7a 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -23,6 +23,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
#include <soc/arc/timers.h>
#include <soc/arc/mcip.h>
@@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs)
return (((u64)h) << 32) | l;
}
+static notrace u64 arc_gfrc_clock_read(void)
+{
+ return arc_read_gfrc(NULL);
+}
+
static struct clocksource arc_counter_gfrc = {
.name = "ARConnect GFRC",
.rating = 400,
@@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
if (ret)
return ret;
+ sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
+
return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
}
TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
@@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs)
return (((u64)h) << 32) | l;
}
+static notrace u64 arc_rtc_clock_read(void)
+{
+ return arc_read_rtc(NULL);
+}
+
static struct clocksource arc_counter_rtc = {
.name = "ARCv2 RTC",
.rating = 350,
@@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
write_aux_reg(AUX_RTC_CTRL, 1);
+ sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
+
return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
@@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs)
return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
}
+static notrace u64 arc_timer1_clock_read(void)
+{
+ return arc_read_timer1(NULL);
+}
+
static struct clocksource arc_counter_timer1 = {
.name = "ARC Timer1",
.rating = 300,
@@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+ sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
+
return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c
index 60da2537bef9..2b196cbfadb6 100644
--- a/drivers/clocksource/bcm2835_timer.c
+++ b/drivers/clocksource/bcm2835_timer.c
@@ -1,19 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2012 Simon Arlott
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/bitops.h>
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index c1b96dc5f444..51d53c4e646f 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
#define RATE_32K 32768
@@ -26,8 +25,6 @@
#define PRCMU_TIMER_DOWNCOUNT 0x4
#define PRCMU_TIMER_MODE 0x8
-#define SCHED_CLOCK_MIN_WRAP 131072 /* 2^32 / 32768 */
-
static void __iomem *clksrc_dbx500_timer_base;
static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
@@ -46,24 +43,12 @@ static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs)
static struct clocksource clocksource_dbx500_prcmu = {
.name = "dbx500-prcmu-timer",
- .rating = 300,
+ .rating = 100,
.read = clksrc_dbx500_prcmu_read,
.mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
-#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-
-static u64 notrace dbx500_prcmu_sched_clock_read(void)
-{
- if (unlikely(!clksrc_dbx500_timer_base))
- return 0;
-
- return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
-}
-
-#endif
-
static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
{
clksrc_dbx500_timer_base = of_iomap(node, 0);
@@ -81,9 +66,6 @@ static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
writel(TIMER_DOWNCOUNT_VAL,
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
-#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
-#endif
return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
}
TIMER_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
diff --git a/drivers/clocksource/meson6_timer.c b/drivers/clocksource/meson6_timer.c
deleted file mode 100644
index 92f20991a937..000000000000
--- a/drivers/clocksource/meson6_timer.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Amlogic Meson6 SoCs timer handling.
- *
- * Copyright (C) 2014 Carlo Caione <carlo@caione.org>
- *
- * Based on code from Amlogic, Inc
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clockchips.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqreturn.h>
-#include <linux/sched_clock.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#define CED_ID 0
-#define CSD_ID 4
-
-#define TIMER_ISA_MUX 0
-#define TIMER_ISA_VAL(t) (((t) + 1) << 2)
-
-#define TIMER_INPUT_BIT(t) (2 * (t))
-#define TIMER_ENABLE_BIT(t) (16 + (t))
-#define TIMER_PERIODIC_BIT(t) (12 + (t))
-
-#define TIMER_CED_INPUT_MASK (3UL << TIMER_INPUT_BIT(CED_ID))
-#define TIMER_CSD_INPUT_MASK (7UL << TIMER_INPUT_BIT(CSD_ID))
-
-#define TIMER_CED_UNIT_1US 0
-#define TIMER_CSD_UNIT_1US 1
-
-static void __iomem *timer_base;
-
-static u64 notrace meson6_timer_sched_read(void)
-{
- return (u64)readl(timer_base + TIMER_ISA_VAL(CSD_ID));
-}
-
-static void meson6_clkevt_time_stop(unsigned char timer)
-{
- u32 val = readl(timer_base + TIMER_ISA_MUX);
-
- writel(val & ~TIMER_ENABLE_BIT(timer), timer_base + TIMER_ISA_MUX);
-}
-
-static void meson6_clkevt_time_setup(unsigned char timer, unsigned long delay)
-{
- writel(delay, timer_base + TIMER_ISA_VAL(timer));
-}
-
-static void meson6_clkevt_time_start(unsigned char timer, bool periodic)
-{
- u32 val = readl(timer_base + TIMER_ISA_MUX);
-
- if (periodic)
- val |= TIMER_PERIODIC_BIT(timer);
- else
- val &= ~TIMER_PERIODIC_BIT(timer);
-
- writel(val | TIMER_ENABLE_BIT(timer), timer_base + TIMER_ISA_MUX);
-}
-
-static int meson6_shutdown(struct clock_event_device *evt)
-{
- meson6_clkevt_time_stop(CED_ID);
- return 0;
-}
-
-static int meson6_set_oneshot(struct clock_event_device *evt)
-{
- meson6_clkevt_time_stop(CED_ID);
- meson6_clkevt_time_start(CED_ID, false);
- return 0;
-}
-
-static int meson6_set_periodic(struct clock_event_device *evt)
-{
- meson6_clkevt_time_stop(CED_ID);
- meson6_clkevt_time_setup(CED_ID, USEC_PER_SEC / HZ - 1);
- meson6_clkevt_time_start(CED_ID, true);
- return 0;
-}
-
-static int meson6_clkevt_next_event(unsigned long evt,
- struct clock_event_device *unused)
-{
- meson6_clkevt_time_stop(CED_ID);
- meson6_clkevt_time_setup(CED_ID, evt);
- meson6_clkevt_time_start(CED_ID, false);
-
- return 0;
-}
-
-static struct clock_event_device meson6_clockevent = {
- .name = "meson6_tick",
- .rating = 400,
- .features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = meson6_shutdown,
- .set_state_periodic = meson6_set_periodic,
- .set_state_oneshot = meson6_set_oneshot,
- .tick_resume = meson6_shutdown,
- .set_next_event = meson6_clkevt_next_event,
-};
-
-static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = (struct clock_event_device *)dev_id;
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction meson6_timer_irq = {
- .name = "meson6_timer",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = meson6_timer_interrupt,
- .dev_id = &meson6_clockevent,
-};
-
-static int __init meson6_timer_init(struct device_node *node)
-{
- u32 val;
- int ret, irq;
-
- timer_base = of_io_request_and_map(node, 0, "meson6-timer");
- if (IS_ERR(timer_base)) {
- pr_err("Can't map registers\n");
- return -ENXIO;
- }
-
- irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0) {
- pr_err("Can't parse IRQ\n");
- return -EINVAL;
- }
-
- /* Set 1us for timer E */
- val = readl(timer_base + TIMER_ISA_MUX);
- val &= ~TIMER_CSD_INPUT_MASK;
- val |= TIMER_CSD_UNIT_1US << TIMER_INPUT_BIT(CSD_ID);
- writel(val, timer_base + TIMER_ISA_MUX);
-
- sched_clock_register(meson6_timer_sched_read, 32, USEC_PER_SEC);
- clocksource_mmio_init(timer_base + TIMER_ISA_VAL(CSD_ID), node->name,
- 1000 * 1000, 300, 32, clocksource_mmio_readl_up);
-
- /* Timer A base 1us */
- val &= ~TIMER_CED_INPUT_MASK;
- val |= TIMER_CED_UNIT_1US << TIMER_INPUT_BIT(CED_ID);
- writel(val, timer_base + TIMER_ISA_MUX);
-
- /* Stop the timer A */
- meson6_clkevt_time_stop(CED_ID);
-
- ret = setup_irq(irq, &meson6_timer_irq);
- if (ret) {
- pr_warn("failed to setup irq %d\n", irq);
- return ret;
- }
-
- meson6_clockevent.cpumask = cpu_possible_mask;
- meson6_clockevent.irq = irq;
-
- clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
- 1, 0xfffe);
- return 0;
-}
-TIMER_OF_DECLARE(meson6, "amlogic,meson6-timer",
- meson6_timer_init);
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c
index 8e4ddb9420c6..19b336c9b417 100644
--- a/drivers/clocksource/nomadik-mtu.c
+++ b/drivers/clocksource/nomadik-mtu.c
@@ -69,7 +69,6 @@ static u32 clk_prescale;
static u32 nmdk_cycle; /* write-once */
static struct delay_timer mtu_delay_timer;
-#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
/*
* Override the global weak sched_clock symbol with this
* local implementation which uses the clocksource to get some
@@ -82,7 +81,6 @@ static u64 notrace nomadik_read_sched_clock(void)
return -readl(mtu_base + MTU_VAL(0));
}
-#endif
static unsigned long nmdk_timer_read_current_timer(void)
{
@@ -234,9 +232,7 @@ static int __init nmdk_timer_init(void __iomem *base, int irq,
return ret;
}
-#ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
sched_clock_register(nomadik_read_sched_clock, 32, rate);
-#endif
/* Timer 1 is used for events, register irq and clockevents */
setup_irq(irq, &nmdk_timer_irq);
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index cf93f6419b51..fadff7915dd9 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -21,7 +21,7 @@
#include <linux/delay.h>
/*
- * Register definitions for the timers
+ * Register definitions common for all the timer variants.
*/
#define TIMER1_COUNT (0x00)
#define TIMER1_LOAD (0x04)
@@ -36,9 +36,10 @@
#define TIMER3_MATCH1 (0x28)
#define TIMER3_MATCH2 (0x2c)
#define TIMER_CR (0x30)
-#define TIMER_INTR_STATE (0x34)
-#define TIMER_INTR_MASK (0x38)
+/*
+ * Control register (TMC30) bit fields for fttmr010/gemini/moxart timers.
+ */
#define TIMER_1_CR_ENABLE BIT(0)
#define TIMER_1_CR_CLOCK BIT(1)
#define TIMER_1_CR_INT BIT(2)
@@ -53,8 +54,9 @@
#define TIMER_3_CR_UPDOWN BIT(11)
/*
- * The Aspeed AST2400 moves bits around in the control register
- * and lacks bits for setting the timer to count upwards.
+ * Control register (TMC30) bit fields for aspeed ast2400/ast2500 timers.
+ * The aspeed timers move bits around in the control register and lacks
+ * bits for setting the timer to count upwards.
*/
#define TIMER_1_CR_ASPEED_ENABLE BIT(0)
#define TIMER_1_CR_ASPEED_CLOCK BIT(1)
@@ -66,6 +68,18 @@
#define TIMER_3_CR_ASPEED_CLOCK BIT(9)
#define TIMER_3_CR_ASPEED_INT BIT(10)
+/*
+ * Interrupt status/mask register definitions for fttmr010/gemini/moxart
+ * timers.
+ * The registers don't exist and they are not needed on aspeed timers
+ * because:
+ * - aspeed timer overflow interrupt is controlled by bits in Control
+ * Register (TMC30).
+ * - aspeed timers always generate interrupt when either one of the
+ * Match registers equals to Status register.
+ */
+#define TIMER_INTR_STATE (0x34)
+#define TIMER_INTR_MASK (0x38)
#define TIMER_1_INT_MATCH1 BIT(0)
#define TIMER_1_INT_MATCH2 BIT(1)
#define TIMER_1_INT_OVERFLOW BIT(2)
@@ -80,7 +94,7 @@
struct fttmr010 {
void __iomem *base;
unsigned int tick_rate;
- bool count_down;
+ bool is_aspeed;
u32 t1_enable_val;
struct clock_event_device clkevt;
#ifdef CONFIG_ARM
@@ -130,7 +144,7 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
- if (fttmr010->count_down) {
+ if (fttmr010->is_aspeed) {
/*
* ASPEED Timer Controller will load TIMER1_LOAD register
* into TIMER1_COUNT register when the timer is re-enabled.
@@ -175,16 +189,17 @@ static int fttmr010_timer_set_oneshot(struct clock_event_device *evt)
/* Setup counter start from 0 or ~0 */
writel(0, fttmr010->base + TIMER1_COUNT);
- if (fttmr010->count_down)
+ if (fttmr010->is_aspeed) {
writel(~0, fttmr010->base + TIMER1_LOAD);
- else
+ } else {
writel(0, fttmr010->base + TIMER1_LOAD);
- /* Enable interrupt */
- cr = readl(fttmr010->base + TIMER_INTR_MASK);
- cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
- cr |= TIMER_1_INT_MATCH1;
- writel(cr, fttmr010->base + TIMER_INTR_MASK);
+ /* Enable interrupt */
+ cr = readl(fttmr010->base + TIMER_INTR_MASK);
+ cr &= ~(TIMER_1_INT_OVERFLOW | TIMER_1_INT_MATCH2);
+ cr |= TIMER_1_INT_MATCH1;
+ writel(cr, fttmr010->base + TIMER_INTR_MASK);
+ }
return 0;
}
@@ -201,9 +216,8 @@ static int fttmr010_timer_set_periodic(struct clock_event_device *evt)
writel(cr, fttmr010->base + TIMER_CR);
/* Setup timer to fire at 1/HZ intervals. */
- if (fttmr010->count_down) {
+ if (fttmr010->is_aspeed) {
writel(period, fttmr010->base + TIMER1_LOAD);
- writel(0, fttmr010->base + TIMER1_MATCH1);
} else {
cr = 0xffffffff - (period - 1);
writel(cr, fttmr010->base + TIMER1_COUNT);
@@ -281,23 +295,21 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
}
/*
- * The Aspeed AST2400 moves bits around in the control register,
- * otherwise it works the same.
+ * The Aspeed timers move bits around in the control register.
*/
if (is_aspeed) {
fttmr010->t1_enable_val = TIMER_1_CR_ASPEED_ENABLE |
TIMER_1_CR_ASPEED_INT;
- /* Downward not available */
- fttmr010->count_down = true;
+ fttmr010->is_aspeed = true;
} else {
fttmr010->t1_enable_val = TIMER_1_CR_ENABLE | TIMER_1_CR_INT;
- }
- /*
- * Reset the interrupt mask and status
- */
- writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK);
- writel(0, fttmr010->base + TIMER_INTR_STATE);
+ /*
+ * Reset the interrupt mask and status
+ */
+ writel(TIMER_INT_ALL_MASK, fttmr010->base + TIMER_INTR_MASK);
+ writel(0, fttmr010->base + TIMER_INTR_STATE);
+ }
/*
* Enable timer 1 count up, timer 2 count up, except on Aspeed,
@@ -306,9 +318,8 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
if (is_aspeed)
val = TIMER_2_CR_ASPEED_ENABLE;
else {
- val = TIMER_2_CR_ENABLE;
- if (!fttmr010->count_down)
- val |= TIMER_1_CR_UPDOWN | TIMER_2_CR_UPDOWN;
+ val = TIMER_2_CR_ENABLE | TIMER_1_CR_UPDOWN |
+ TIMER_2_CR_UPDOWN;
}
writel(val, fttmr010->base + TIMER_CR);
@@ -321,7 +332,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
writel(0, fttmr010->base + TIMER2_MATCH1);
writel(0, fttmr010->base + TIMER2_MATCH2);
- if (fttmr010->count_down) {
+ if (fttmr010->is_aspeed) {
writel(~0, fttmr010->base + TIMER2_LOAD);
clocksource_mmio_init(fttmr010->base + TIMER2_COUNT,
"FTTMR010-TIMER2",
@@ -371,7 +382,7 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed)
#ifdef CONFIG_ARM
/* Also use this timer for delays */
- if (fttmr010->count_down)
+ if (fttmr010->is_aspeed)
fttmr010->delay_timer.read_current_timer =
fttmr010_read_current_timer_down;
else
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 165fbbb1c9a0..706c0d0ff56c 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -141,21 +141,25 @@ static u64 notrace mxc_read_sched_clock(void)
return sched_clock_reg ? readl_relaxed(sched_clock_reg) : 0;
}
+#if defined(CONFIG_ARM)
static struct delay_timer imx_delay_timer;
static unsigned long imx_read_current_timer(void)
{
return readl_relaxed(sched_clock_reg);
}
+#endif
static int __init mxc_clocksource_init(struct imx_timer *imxtm)
{
unsigned int c = clk_get_rate(imxtm->clk_per);
void __iomem *reg = imxtm->base + imxtm->gpt->reg_tcn;
+#if defined(CONFIG_ARM)
imx_delay_timer.read_current_timer = &imx_read_current_timer;
imx_delay_timer.freq = c;
register_current_timer_delay(&imx_delay_timer);
+#endif
sched_clock_reg = reg;
@@ -198,15 +202,8 @@ static int v2_set_next_event(unsigned long evt,
static int mxc_shutdown(struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
- unsigned long flags;
u32 tcn;
- /*
- * The timer interrupt generation is disabled at least
- * for enough time to call mxc_set_next_event()
- */
- local_irq_save(flags);
-
/* Disable interrupt in GPT module */
imxtm->gpt->gpt_irq_disable(imxtm);
@@ -221,21 +218,12 @@ static int mxc_shutdown(struct clock_event_device *ced)
printk(KERN_INFO "%s: changing mode\n", __func__);
#endif /* DEBUG */
- local_irq_restore(flags);
-
return 0;
}
static int mxc_set_oneshot(struct clock_event_device *ced)
{
struct imx_timer *imxtm = to_imx_timer(ced);
- unsigned long flags;
-
- /*
- * The timer interrupt generation is disabled at least
- * for enough time to call mxc_set_next_event()
- */
- local_irq_save(flags);
/* Disable interrupt in GPT module */
imxtm->gpt->gpt_irq_disable(imxtm);
@@ -260,7 +248,6 @@ static int mxc_set_oneshot(struct clock_event_device *ced)
* mode switching
*/
imxtm->gpt->gpt_irq_enable(imxtm);
- local_irq_restore(flags);
return 0;
}
diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
index b7aa2b817078..c1d52d5264c2 100644
--- a/drivers/clocksource/timer-imx-tpm.c
+++ b/drivers/clocksource/timer-imx-tpm.c
@@ -12,6 +12,8 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#include "timer-of.h"
+
#define TPM_PARAM 0x4
#define TPM_PARAM_WIDTH_SHIFT 16
#define TPM_PARAM_WIDTH_MASK (0xff << 16)
@@ -33,9 +35,7 @@
#define TPM_C0V 0x24
static int counter_width;
-static int rating;
static void __iomem *timer_base;
-static struct clock_event_device clockevent_tpm;
static inline void tpm_timer_disable(void)
{
@@ -80,19 +80,6 @@ static u64 notrace tpm_read_sched_clock(void)
return tpm_read_counter();
}
-static int __init tpm_clocksource_init(unsigned long rate)
-{
- tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
- tpm_delay_timer.freq = rate;
- register_current_timer_delay(&tpm_delay_timer);
-
- sched_clock_register(tpm_read_sched_clock, counter_width, rate);
-
- return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
- rate, rating, counter_width,
- clocksource_mmio_readl_up);
-}
-
static int tpm_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
@@ -137,74 +124,80 @@ static irqreturn_t tpm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static struct clock_event_device clockevent_tpm = {
- .name = "i.MX7ULP TPM Timer",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_state_oneshot = tpm_set_state_oneshot,
- .set_next_event = tpm_set_next_event,
- .set_state_shutdown = tpm_set_state_shutdown,
+static struct timer_of to_tpm = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+ .clkevt = {
+ .name = "i.MX7ULP TPM Timer",
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = tpm_set_state_shutdown,
+ .set_state_oneshot = tpm_set_state_oneshot,
+ .set_next_event = tpm_set_next_event,
+ .cpumask = cpu_possible_mask,
+ },
+ .of_irq = {
+ .handler = tpm_timer_interrupt,
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ },
+ .of_clk = {
+ .name = "per",
+ },
};
-static int __init tpm_clockevent_init(unsigned long rate, int irq)
+static int __init tpm_clocksource_init(void)
{
- int ret;
+ tpm_delay_timer.read_current_timer = &tpm_read_current_timer;
+ tpm_delay_timer.freq = timer_of_rate(&to_tpm) >> 3;
+ register_current_timer_delay(&tpm_delay_timer);
- ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "i.MX7ULP TPM Timer", &clockevent_tpm);
+ sched_clock_register(tpm_read_sched_clock, counter_width,
+ timer_of_rate(&to_tpm) >> 3);
- clockevent_tpm.rating = rating;
- clockevent_tpm.cpumask = cpumask_of(0);
- clockevent_tpm.irq = irq;
- clockevents_config_and_register(&clockevent_tpm, rate, 300,
- GENMASK(counter_width - 1, 1));
+ return clocksource_mmio_init(timer_base + TPM_CNT,
+ "imx-tpm",
+ timer_of_rate(&to_tpm) >> 3,
+ to_tpm.clkevt.rating,
+ counter_width,
+ clocksource_mmio_readl_up);
+}
- return ret;
+static void __init tpm_clockevent_init(void)
+{
+ clockevents_config_and_register(&to_tpm.clkevt,
+ timer_of_rate(&to_tpm) >> 3,
+ 300,
+ GENMASK(counter_width - 1,
+ 1));
}
static int __init tpm_timer_init(struct device_node *np)
{
- struct clk *ipg, *per;
- int irq, ret;
- u32 rate;
-
- timer_base = of_iomap(np, 0);
- if (!timer_base) {
- pr_err("tpm: failed to get base address\n");
- return -ENXIO;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (!irq) {
- pr_err("tpm: failed to get irq\n");
- ret = -ENOENT;
- goto err_iomap;
- }
+ struct clk *ipg;
+ int ret;
ipg = of_clk_get_by_name(np, "ipg");
- per = of_clk_get_by_name(np, "per");
- if (IS_ERR(ipg) || IS_ERR(per)) {
- pr_err("tpm: failed to get ipg or per clk\n");
- ret = -ENODEV;
- goto err_clk_get;
+ if (IS_ERR(ipg)) {
+ pr_err("tpm: failed to get ipg clk\n");
+ return -ENODEV;
}
-
/* enable clk before accessing registers */
ret = clk_prepare_enable(ipg);
if (ret) {
pr_err("tpm: ipg clock enable failed (%d)\n", ret);
- goto err_clk_get;
+ clk_put(ipg);
+ return ret;
}
- ret = clk_prepare_enable(per);
- if (ret) {
- pr_err("tpm: per clock enable failed (%d)\n", ret);
- goto err_per_clk_enable;
- }
+ ret = timer_of_init(np, &to_tpm);
+ if (ret)
+ return ret;
+
+ timer_base = timer_of_base(&to_tpm);
- counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
- >> TPM_PARAM_WIDTH_SHIFT;
+ counter_width = (readl(timer_base + TPM_PARAM)
+ & TPM_PARAM_WIDTH_MASK) >> TPM_PARAM_WIDTH_SHIFT;
/* use rating 200 for 32-bit counter and 150 for 16-bit counter */
- rating = counter_width == 0x20 ? 200 : 150;
+ to_tpm.clkevt.rating = counter_width == 0x20 ? 200 : 150;
/*
* Initialize tpm module to a known state
@@ -229,29 +222,13 @@ static int __init tpm_timer_init(struct device_node *np)
writel(TPM_SC_CMOD_INC_PER_CNT |
(counter_width == 0x20 ?
TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
- timer_base + TPM_SC);
+ timer_base + TPM_SC);
/* set MOD register to maximum for free running mode */
writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
- rate = clk_get_rate(per) >> 3;
- ret = tpm_clocksource_init(rate);
- if (ret)
- goto err_per_clk_enable;
-
- ret = tpm_clockevent_init(rate, irq);
- if (ret)
- goto err_per_clk_enable;
-
- return 0;
+ tpm_clockevent_init();
-err_per_clk_enable:
- clk_disable_unprepare(ipg);
-err_clk_get:
- clk_put(per);
- clk_put(ipg);
-err_iomap:
- iounmap(timer_base);
- return ret;
+ return tpm_clocksource_init();
}
TIMER_OF_DECLARE(imx7ulp, "fsl,imx7ulp-tpm", tpm_timer_init);
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 76e526f58620..19fb7de4b928 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -181,8 +181,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
int irq;
struct clk *clk;
unsigned long rate;
- struct device_node *pri_node;
- struct device_node *sec_node;
+ struct device_node *alias_node;
base = of_io_request_and_map(node, 0, "integrator-timer");
if (IS_ERR(base))
@@ -204,7 +203,18 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
return err;
}
- pri_node = of_find_node_by_path(path);
+ alias_node = of_find_node_by_path(path);
+
+ /*
+ * The pointer is used as an identifier not as a pointer, we
+ * can drop the refcount on the of__node immediately after
+ * getting it.
+ */
+ of_node_put(alias_node);
+
+ if (node == alias_node)
+ /* The primary timer lacks IRQ, use as clocksource */
+ return integrator_clocksource_init(rate, base);
err = of_property_read_string(of_aliases,
"arm,timer-secondary", &path);
@@ -213,14 +223,11 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
return err;
}
+ alias_node = of_find_node_by_path(path);
- sec_node = of_find_node_by_path(path);
-
- if (node == pri_node)
- /* The primary timer lacks IRQ, use as clocksource */
- return integrator_clocksource_init(rate, base);
+ of_node_put(alias_node);
- if (node == sec_node) {
+ if (node == alias_node) {
/* The secondary timer will drive the clock event */
irq = irq_of_parse_and_map(node, 0);
return integrator_clockevent_init(rate, base, irq);
diff --git a/drivers/clocksource/timer-meson6.c b/drivers/clocksource/timer-meson6.c
new file mode 100644
index 000000000000..84bd9479c3f8
--- /dev/null
+++ b/drivers/clocksource/timer-meson6.c
@@ -0,0 +1,220 @@
+/*
+ * Amlogic Meson6 SoCs timer handling.
+ *
+ * Copyright (C) 2014 Carlo Caione <carlo@caione.org>
+ *
+ * Based on code from Amlogic, Inc
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_ARM
+#include <linux/delay.h>
+#endif
+
+#define MESON_ISA_TIMER_MUX 0x00
+#define MESON_ISA_TIMER_MUX_TIMERD_EN BIT(19)
+#define MESON_ISA_TIMER_MUX_TIMERC_EN BIT(18)
+#define MESON_ISA_TIMER_MUX_TIMERB_EN BIT(17)
+#define MESON_ISA_TIMER_MUX_TIMERA_EN BIT(16)
+#define MESON_ISA_TIMER_MUX_TIMERD_MODE BIT(15)
+#define MESON_ISA_TIMER_MUX_TIMERC_MODE BIT(14)
+#define MESON_ISA_TIMER_MUX_TIMERB_MODE BIT(13)
+#define MESON_ISA_TIMER_MUX_TIMERA_MODE BIT(12)
+#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_MASK GENMASK(10, 8)
+#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_SYSTEM_CLOCK 0x0
+#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_1US 0x1
+#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_10US 0x2
+#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_100US 0x3
+#define MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_1MS 0x4
+#define MESON_ISA_TIMER_MUX_TIMERD_INPUT_CLOCK_MASK GENMASK(7, 6)
+#define MESON_ISA_TIMER_MUX_TIMERC_INPUT_CLOCK_MASK GENMASK(5, 4)
+#define MESON_ISA_TIMER_MUX_TIMERB_INPUT_CLOCK_MASK GENMASK(3, 2)
+#define MESON_ISA_TIMER_MUX_TIMERA_INPUT_CLOCK_MASK GENMASK(1, 0)
+#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_1US 0x0
+#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_10US 0x1
+#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_100US 0x0
+#define MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_1MS 0x3
+
+#define MESON_ISA_TIMERA 0x04
+#define MESON_ISA_TIMERB 0x08
+#define MESON_ISA_TIMERC 0x0c
+#define MESON_ISA_TIMERD 0x10
+#define MESON_ISA_TIMERE 0x14
+
+static void __iomem *timer_base;
+
+#ifdef CONFIG_ARM
+static unsigned long meson6_read_current_timer(void)
+{
+ return readl_relaxed(timer_base + MESON_ISA_TIMERE);
+}
+
+static struct delay_timer meson6_delay_timer = {
+ .read_current_timer = meson6_read_current_timer,
+ .freq = 1000 * 1000,
+};
+#endif
+
+static u64 notrace meson6_timer_sched_read(void)
+{
+ return (u64)readl(timer_base + MESON_ISA_TIMERE);
+}
+
+static void meson6_clkevt_time_stop(void)
+{
+ u32 val = readl(timer_base + MESON_ISA_TIMER_MUX);
+
+ writel(val & ~MESON_ISA_TIMER_MUX_TIMERA_EN,
+ timer_base + MESON_ISA_TIMER_MUX);
+}
+
+static void meson6_clkevt_time_setup(unsigned long delay)
+{
+ writel(delay, timer_base + MESON_ISA_TIMERA);
+}
+
+static void meson6_clkevt_time_start(bool periodic)
+{
+ u32 val = readl(timer_base + MESON_ISA_TIMER_MUX);
+
+ if (periodic)
+ val |= MESON_ISA_TIMER_MUX_TIMERA_MODE;
+ else
+ val &= ~MESON_ISA_TIMER_MUX_TIMERA_MODE;
+
+ writel(val | MESON_ISA_TIMER_MUX_TIMERA_EN,
+ timer_base + MESON_ISA_TIMER_MUX);
+}
+
+static int meson6_shutdown(struct clock_event_device *evt)
+{
+ meson6_clkevt_time_stop();
+ return 0;
+}
+
+static int meson6_set_oneshot(struct clock_event_device *evt)
+{
+ meson6_clkevt_time_stop();
+ meson6_clkevt_time_start(false);
+ return 0;
+}
+
+static int meson6_set_periodic(struct clock_event_device *evt)
+{
+ meson6_clkevt_time_stop();
+ meson6_clkevt_time_setup(USEC_PER_SEC / HZ - 1);
+ meson6_clkevt_time_start(true);
+ return 0;
+}
+
+static int meson6_clkevt_next_event(unsigned long evt,
+ struct clock_event_device *unused)
+{
+ meson6_clkevt_time_stop();
+ meson6_clkevt_time_setup(evt);
+ meson6_clkevt_time_start(false);
+
+ return 0;
+}
+
+static struct clock_event_device meson6_clockevent = {
+ .name = "meson6_tick",
+ .rating = 400,
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = meson6_shutdown,
+ .set_state_periodic = meson6_set_periodic,
+ .set_state_oneshot = meson6_set_oneshot,
+ .tick_resume = meson6_shutdown,
+ .set_next_event = meson6_clkevt_next_event,
+};
+
+static irqreturn_t meson6_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction meson6_timer_irq = {
+ .name = "meson6_timer",
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = meson6_timer_interrupt,
+ .dev_id = &meson6_clockevent,
+};
+
+static int __init meson6_timer_init(struct device_node *node)
+{
+ u32 val;
+ int ret, irq;
+
+ timer_base = of_io_request_and_map(node, 0, "meson6-timer");
+ if (IS_ERR(timer_base)) {
+ pr_err("Can't map registers\n");
+ return -ENXIO;
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("Can't parse IRQ\n");
+ return -EINVAL;
+ }
+
+ /* Set 1us for timer E */
+ val = readl(timer_base + MESON_ISA_TIMER_MUX);
+ val &= ~MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_MASK;
+ val |= FIELD_PREP(MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_MASK,
+ MESON_ISA_TIMER_MUX_TIMERE_INPUT_CLOCK_1US);
+ writel(val, timer_base + MESON_ISA_TIMER_MUX);
+
+ sched_clock_register(meson6_timer_sched_read, 32, USEC_PER_SEC);
+ clocksource_mmio_init(timer_base + MESON_ISA_TIMERE, node->name,
+ 1000 * 1000, 300, 32, clocksource_mmio_readl_up);
+
+ /* Timer A base 1us */
+ val &= ~MESON_ISA_TIMER_MUX_TIMERA_INPUT_CLOCK_MASK;
+ val |= FIELD_PREP(MESON_ISA_TIMER_MUX_TIMERA_INPUT_CLOCK_MASK,
+ MESON_ISA_TIMER_MUX_TIMERABCD_INPUT_CLOCK_1US);
+ writel(val, timer_base + MESON_ISA_TIMER_MUX);
+
+ /* Stop the timer A */
+ meson6_clkevt_time_stop();
+
+ ret = setup_irq(irq, &meson6_timer_irq);
+ if (ret) {
+ pr_warn("failed to setup irq %d\n", irq);
+ return ret;
+ }
+
+ meson6_clockevent.cpumask = cpu_possible_mask;
+ meson6_clockevent.irq = irq;
+
+ clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
+ 1, 0xfffe);
+
+#ifdef CONFIG_ARM
+ /* Also use MESON_ISA_TIMERE for delays */
+ register_current_timer_delay(&meson6_delay_timer);
+#endif
+
+ return 0;
+}
+TIMER_OF_DECLARE(meson6, "amlogic,meson6-timer",
+ meson6_timer_init);
diff --git a/drivers/clocksource/timer-rda.c b/drivers/clocksource/timer-rda.c
new file mode 100644
index 000000000000..fd1199c189bf
--- /dev/null
+++ b/drivers/clocksource/timer-rda.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDA8810PL SoC timer driver
+ *
+ * Copyright RDA Microelectronics Company Limited
+ * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2018 Manivannan Sadhasivam
+ *
+ * RDA8810PL has two independent timers: OSTIMER (56 bit) and HWTIMER (64 bit).
+ * Each timer provides optional interrupt support. In this driver, OSTIMER is
+ * used for clockevents and HWTIMER is used for clocksource.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+#include "timer-of.h"
+
+#define RDA_OSTIMER_LOADVAL_L 0x000
+#define RDA_OSTIMER_CTRL 0x004
+#define RDA_HWTIMER_LOCKVAL_L 0x024
+#define RDA_HWTIMER_LOCKVAL_H 0x028
+#define RDA_TIMER_IRQ_MASK_SET 0x02c
+#define RDA_TIMER_IRQ_MASK_CLR 0x030
+#define RDA_TIMER_IRQ_CLR 0x034
+
+#define RDA_OSTIMER_CTRL_ENABLE BIT(24)
+#define RDA_OSTIMER_CTRL_REPEAT BIT(28)
+#define RDA_OSTIMER_CTRL_LOAD BIT(30)
+
+#define RDA_TIMER_IRQ_MASK_OSTIMER BIT(0)
+
+#define RDA_TIMER_IRQ_CLR_OSTIMER BIT(0)
+
+static int rda_ostimer_start(void __iomem *base, bool periodic, u64 cycles)
+{
+ u32 ctrl, load_l;
+
+ load_l = (u32)cycles;
+ ctrl = ((cycles >> 32) & 0xffffff);
+ ctrl |= RDA_OSTIMER_CTRL_LOAD | RDA_OSTIMER_CTRL_ENABLE;
+ if (periodic)
+ ctrl |= RDA_OSTIMER_CTRL_REPEAT;
+
+ /* Enable ostimer interrupt first */
+ writel_relaxed(RDA_TIMER_IRQ_MASK_OSTIMER,
+ base + RDA_TIMER_IRQ_MASK_SET);
+
+ /* Write low 32 bits first, high 24 bits are with ctrl */
+ writel_relaxed(load_l, base + RDA_OSTIMER_LOADVAL_L);
+ writel_relaxed(ctrl, base + RDA_OSTIMER_CTRL);
+
+ return 0;
+}
+
+static int rda_ostimer_stop(void __iomem *base)
+{
+ /* Disable ostimer interrupt first */
+ writel_relaxed(RDA_TIMER_IRQ_MASK_OSTIMER,
+ base + RDA_TIMER_IRQ_MASK_CLR);
+
+ writel_relaxed(0, base + RDA_OSTIMER_CTRL);
+
+ return 0;
+}
+
+static int rda_ostimer_set_state_shutdown(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+
+ rda_ostimer_stop(timer_of_base(to));
+
+ return 0;
+}
+
+static int rda_ostimer_set_state_oneshot(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+
+ rda_ostimer_stop(timer_of_base(to));
+
+ return 0;
+}
+
+static int rda_ostimer_set_state_periodic(struct clock_event_device *evt)
+{
+ struct timer_of *to = to_timer_of(evt);
+ unsigned long cycles_per_jiffy;
+
+ rda_ostimer_stop(timer_of_base(to));
+
+ cycles_per_jiffy = ((unsigned long long)NSEC_PER_SEC / HZ *
+ evt->mult) >> evt->shift;
+ rda_ostimer_start(timer_of_base(to), true, cycles_per_jiffy);
+
+ return 0;
+}
+
+static int rda_ostimer_tick_resume(struct clock_event_device *evt)
+{
+ return 0;
+}
+
+static int rda_ostimer_set_next_event(unsigned long evt,
+ struct clock_event_device *ev)
+{
+ struct timer_of *to = to_timer_of(ev);
+
+ rda_ostimer_start(timer_of_base(to), false, evt);
+
+ return 0;
+}
+
+static irqreturn_t rda_ostimer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct timer_of *to = to_timer_of(evt);
+
+ /* clear timer int */
+ writel_relaxed(RDA_TIMER_IRQ_CLR_OSTIMER,
+ timer_of_base(to) + RDA_TIMER_IRQ_CLR);
+
+ if (evt->event_handler)
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static struct timer_of rda_ostimer_of = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "rda-ostimer",
+ .rating = 250,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ,
+ .set_state_shutdown = rda_ostimer_set_state_shutdown,
+ .set_state_oneshot = rda_ostimer_set_state_oneshot,
+ .set_state_periodic = rda_ostimer_set_state_periodic,
+ .tick_resume = rda_ostimer_tick_resume,
+ .set_next_event = rda_ostimer_set_next_event,
+ },
+
+ .of_base = {
+ .name = "rda-timer",
+ .index = 0,
+ },
+
+ .of_irq = {
+ .name = "ostimer",
+ .handler = rda_ostimer_interrupt,
+ .flags = IRQF_TIMER,
+ },
+};
+
+static u64 rda_hwtimer_read(struct clocksource *cs)
+{
+ void __iomem *base = timer_of_base(&rda_ostimer_of);
+ u32 lo, hi;
+
+ /* Always read low 32 bits first */
+ do {
+ lo = readl_relaxed(base + RDA_HWTIMER_LOCKVAL_L);
+ hi = readl_relaxed(base + RDA_HWTIMER_LOCKVAL_H);
+ } while (hi != readl_relaxed(base + RDA_HWTIMER_LOCKVAL_H));
+
+ return ((u64)hi << 32) | lo;
+}
+
+static struct clocksource rda_hwtimer_clocksource = {
+ .name = "rda-timer",
+ .rating = 400,
+ .read = rda_hwtimer_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init rda_timer_init(struct device_node *np)
+{
+ unsigned long rate = 2000000;
+ int ret;
+
+ ret = timer_of_init(np, &rda_ostimer_of);
+ if (ret)
+ return ret;
+
+ clocksource_register_hz(&rda_hwtimer_clocksource, rate);
+
+ clockevents_config_and_register(&rda_ostimer_of.clkevt, rate,
+ 0x2, UINT_MAX);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(rda8810pl, "rda,8810pl-timer", rda_timer_init);
diff --git a/drivers/clocksource/riscv_timer.c b/drivers/clocksource/timer-riscv.c
index 084e97dc10ed..431892200a08 100644
--- a/drivers/clocksource/riscv_timer.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <linux/sched_clock.h>
#include <asm/smp.h>
#include <asm/sbi.h>
@@ -49,6 +50,11 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
return get_cycles64();
}
+static u64 riscv_sched_clock(void)
+{
+ return get_cycles64();
+}
+
static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
.name = "riscv_clocksource",
.rating = 300,
@@ -97,6 +103,9 @@ static int __init riscv_timer_init_dt(struct device_node *n)
cs = per_cpu_ptr(&riscv_clocksource, cpuid);
clocksource_register_hz(cs, riscv_timebase);
+ sched_clock_register(riscv_sched_clock,
+ BITS_PER_LONG, riscv_timebase);
+
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/timer-rockchip.c
index 33f370dbd0d6..33f370dbd0d6 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/timer-rockchip.c
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/timer-sun4i.c
index 6e0180aaf784..6e0180aaf784 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/timer-sun4i.c
diff --git a/drivers/clocksource/tegra20_timer.c b/drivers/clocksource/timer-tegra20.c
index aa624885e0e2..4293943f4e2b 100644
--- a/drivers/clocksource/tegra20_timer.c
+++ b/drivers/clocksource/timer-tegra20.c
@@ -30,7 +30,6 @@
#include <linux/delay.h>
#include <asm/mach/time.h>
-#include <asm/smp_twd.h>
#define RTC_SECONDS 0x08
#define RTC_SHADOW_SECONDS 0x0c
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 4cce6b224b87..595124074821 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -991,7 +991,6 @@ static struct platform_driver omap_dm_timer_driver = {
},
};
-early_platform_init("earlytimer", &omap_dm_timer_driver);
module_platform_driver(omap_dm_timer_driver);
MODULE_DESCRIPTION("OMAP Dual-Mode Timer Driver");
diff --git a/drivers/clocksource/timer-vt8500.c b/drivers/clocksource/timer-vt8500.c
index e0f7489cfc8e..c3aff1a8f7d5 100644
--- a/drivers/clocksource/timer-vt8500.c
+++ b/drivers/clocksource/timer-vt8500.c
@@ -145,7 +145,7 @@ static int __init vt8500_timer_init(struct device_node *np)
ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
if (ret) {
- pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
+ pr_err("%s: clocksource_register failed for %s\n",
__func__, clocksource.name);
return ret;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4e1131ef85ae..688f10227793 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -114,6 +114,17 @@ config ARM_QCOM_CPUFREQ_KRYO
If in doubt, say N.
+config ARM_QCOM_CPUFREQ_HW
+ tristate "QCOM CPUFreq HW driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ help
+ Support for the CPUFreq HW driver.
+ Some QCOM chipsets have a HW engine to offload the steps
+ necessary for changing the frequency of the CPUs. Firmware loaded
+ in this engine exposes a programming interface to the OS.
+ The driver implements the cpufreq interface for this HW engine.
+ Say Y if you want to support CPUFreq HW.
+
config ARM_S3C_CPUFREQ
bool
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d5ee4562ed06..08c071be2491 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_KRYO) += qcom-cpufreq-kryo.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index dbf82f36d270..33c309a08c64 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -123,8 +123,6 @@ static void nforce2_write_pll(int pll)
/* Now write the value in all 64 registers */
for (temp = 0; temp <= 0x3f; temp++)
pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll);
-
- return;
}
/**
@@ -438,4 +436,3 @@ static void __exit nforce2_exit(void)
module_init(nforce2_init);
module_exit(nforce2_exit);
-
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7aa3dcad2175..6f23ebb395f1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2277,6 +2277,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
ret = cpufreq_start_governor(policy);
if (!ret) {
pr_debug("cpufreq: governor change\n");
+ sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
cpufreq_exit_governor(policy);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 6d53f7d9fc7a..ffa9adeaba31 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -346,7 +346,7 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
for_each_cpu(i, policy->cpus)
cpufreq_remove_update_util_hook(i);
- synchronize_sched();
+ synchronize_rcu();
}
static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index dd5440d3372d..80c5bf590acb 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -16,7 +16,6 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/pal.h>
@@ -28,7 +27,6 @@ MODULE_AUTHOR("Venkatesh Pallipadi");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
MODULE_LICENSE("GPL");
-
struct cpufreq_acpi_io {
struct acpi_processor_performance acpi_data;
unsigned int resume;
@@ -348,10 +346,7 @@ acpi_cpufreq_exit (void)
pr_debug("acpi_cpufreq_exit\n");
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- return;
}
-
late_initcall(acpi_cpufreq_init);
module_exit(acpi_cpufreq_exit);
-
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index d8c3595e9023..9fedf627e000 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -177,22 +177,16 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* scaling down? scale voltage after frequency */
if (new_freq < old_freq) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
- if (ret) {
+ if (ret)
dev_warn(cpu_dev,
"failed to scale vddarm down: %d\n", ret);
- ret = 0;
- }
ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
- if (ret) {
+ if (ret)
dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
- ret = 0;
- }
if (!IS_ERR(pu_reg)) {
ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
- if (ret) {
+ if (ret)
dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
- ret = 0;
- }
}
}
@@ -411,9 +405,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
if (of_machine_is_compatible("fsl,imx6ul") ||
of_machine_is_compatible("fsl,imx6ull")) {
ret = imx6ul_opp_check_speed_grading(cpu_dev);
- if (ret == -EPROBE_DEFER)
- return ret;
if (ret) {
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
dev_err(cpu_dev, "failed to read ocotp: %d\n",
ret);
return ret;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 9578312e43f2..dd66decf2087 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -830,6 +830,28 @@ skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
+static void intel_pstate_hwp_force_min_perf(int cpu)
+{
+ u64 value;
+ int min_perf;
+
+ value = all_cpu_data[cpu]->hwp_req_cached;
+ value &= ~GENMASK_ULL(31, 0);
+ min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached);
+
+ /* Set hwp_max = hwp_min */
+ value |= HWP_MAX_PERF(min_perf);
+ value |= HWP_MIN_PERF(min_perf);
+
+ /* Set EPP/EPB to min */
+ if (static_cpu_has(X86_FEATURE_HWP_EPP))
+ value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
+ else
+ intel_pstate_set_epb(cpu, HWP_EPP_BALANCE_POWERSAVE);
+
+ wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
+}
+
static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
{
struct cpudata *cpu_data = all_cpu_data[policy->cpu];
@@ -1930,7 +1952,7 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
cpufreq_remove_update_util_hook(cpu);
cpu_data->update_util_set = false;
- synchronize_sched();
+ synchronize_rcu();
}
static int intel_pstate_get_max_freq(struct cpudata *cpu)
@@ -2084,10 +2106,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
pr_debug("CPU %d exiting\n", policy->cpu);
intel_pstate_clear_update_util_hook(policy->cpu);
- if (hwp_active)
+ if (hwp_active) {
intel_pstate_hwp_save_state(policy);
- else
+ intel_pstate_hwp_force_min_perf(policy->cpu);
+ } else {
intel_cpufreq_stop_cpu(policy);
+ }
}
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 61ae06ca008e..52f0d91d30c1 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -128,7 +128,7 @@ static int cpu_750fx_cpu_speed(int low_speed)
mtspr(SPRN_HID2, hid2);
}
}
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
low_choose_750fx_pll(low_speed);
#endif
if (low_speed == 1) {
@@ -166,7 +166,7 @@ static int dfs_set_cpu_speed(int low_speed)
}
/* set frequency */
-#ifdef CONFIG_6xx
+#ifdef CONFIG_PPC_BOOK3S_32
low_choose_7447a_dfs(low_speed);
#endif
udelay(100);
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index be623dd7b9f2..1d32a863332d 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -411,6 +411,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
pfunc_vdnap0_complete =
pmf_find_function(root, "slewing-done");
+ of_node_put(root);
if (pfunc_set_vdnap0 == NULL ||
pfunc_vdnap0_complete == NULL) {
pr_err("Can't find required platform function\n");
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index bf6519cf64bc..7e7ad3879c4e 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -253,18 +253,18 @@ static int init_powernv_pstates(void)
if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
pr_warn("ibm,pstate-min node not found\n");
- return -ENODEV;
+ goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
pr_warn("ibm,pstate-max node not found\n");
- return -ENODEV;
+ goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
&pstate_nominal)) {
pr_warn("ibm,pstate-nominal not found\n");
- return -ENODEV;
+ goto out;
}
if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
@@ -293,14 +293,14 @@ next:
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
if (!pstate_ids) {
pr_warn("ibm,pstate-ids not found\n");
- return -ENODEV;
+ goto out;
}
pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
&len_freqs);
if (!pstate_freqs) {
pr_warn("ibm,pstate-frequencies-mhz not found\n");
- return -ENODEV;
+ goto out;
}
if (len_ids != len_freqs) {
@@ -311,7 +311,7 @@ next:
nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
if (!nr_pstates) {
pr_warn("No PStates found\n");
- return -ENODEV;
+ goto out;
}
powernv_pstate_info.nr_pstates = nr_pstates;
@@ -352,7 +352,12 @@ next:
/* End of list marker entry */
powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
+
+ of_node_put(power_mgt);
return 0;
+out:
+ of_node_put(power_mgt);
+ return -ENODEV;
}
/* Returns the CPU frequency corresponding to the pstate_id. */
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
new file mode 100644
index 000000000000..d83939a1b3d4
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+
+#define LUT_MAX_ENTRIES 40U
+#define LUT_SRC GENMASK(31, 30)
+#define LUT_L_VAL GENMASK(7, 0)
+#define LUT_CORE_COUNT GENMASK(18, 16)
+#define LUT_ROW_SIZE 32
+#define CLK_HW_DIV 2
+
+/* Register offsets */
+#define REG_ENABLE 0x0
+#define REG_LUT_TABLE 0x110
+#define REG_PERF_STATE 0x920
+
+static unsigned long cpu_hw_rate, xo_rate;
+static struct platform_device *global_pdev;
+
+static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ void __iomem *perf_state_reg = policy->driver_data;
+
+ writel_relaxed(index, perf_state_reg);
+
+ return 0;
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+ void __iomem *perf_state_reg;
+ struct cpufreq_policy *policy;
+ unsigned int index;
+
+ policy = cpufreq_cpu_get_raw(cpu);
+ if (!policy)
+ return 0;
+
+ perf_state_reg = policy->driver_data;
+
+ index = readl_relaxed(perf_state_reg);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
+}
+
+static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+ unsigned int target_freq)
+{
+ void __iomem *perf_state_reg = policy->driver_data;
+ int index;
+
+ index = policy->cached_resolved_idx;
+ if (index < 0)
+ return 0;
+
+ writel_relaxed(index, perf_state_reg);
+
+ return policy->freq_table[index].frequency;
+}
+
+static int qcom_cpufreq_hw_read_lut(struct device *dev,
+ struct cpufreq_policy *policy,
+ void __iomem *base)
+{
+ u32 data, src, lval, i, core_count, prev_cc = 0, prev_freq = 0, freq;
+ unsigned int max_cores = cpumask_weight(policy->cpus);
+ struct cpufreq_frequency_table *table;
+
+ table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+ data = readl_relaxed(base + REG_LUT_TABLE + i * LUT_ROW_SIZE);
+ src = FIELD_GET(LUT_SRC, data);
+ lval = FIELD_GET(LUT_L_VAL, data);
+ core_count = FIELD_GET(LUT_CORE_COUNT, data);
+
+ if (src)
+ freq = xo_rate * lval / 1000;
+ else
+ freq = cpu_hw_rate / 1000;
+
+ /* Ignore boosts in the middle of the table */
+ if (core_count != max_cores) {
+ table[i].frequency = CPUFREQ_ENTRY_INVALID;
+ } else {
+ table[i].frequency = freq;
+ dev_dbg(dev, "index=%d freq=%d, core_count %d\n", i,
+ freq, core_count);
+ }
+
+ /*
+ * Two of the same frequencies with the same core counts means
+ * end of table
+ */
+ if (i > 0 && prev_freq == freq && prev_cc == core_count) {
+ struct cpufreq_frequency_table *prev = &table[i - 1];
+
+ /*
+ * Only treat the last frequency that might be a boost
+ * as the boost frequency
+ */
+ if (prev_cc != max_cores) {
+ prev->frequency = prev_freq;
+ prev->flags = CPUFREQ_BOOST_FREQ;
+ }
+
+ break;
+ }
+
+ prev_cc = core_count;
+ prev_freq = freq;
+ }
+
+ table[i].frequency = CPUFREQ_TABLE_END;
+ policy->freq_table = table;
+
+ return 0;
+}
+
+static void qcom_get_related_cpus(int index, struct cpumask *m)
+{
+ struct device_node *cpu_np;
+ struct of_phandle_args args;
+ int cpu, ret;
+
+ for_each_possible_cpu(cpu) {
+ cpu_np = of_cpu_device_node_get(cpu);
+ if (!cpu_np)
+ continue;
+
+ ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+ "#freq-domain-cells", 0,
+ &args);
+ of_node_put(cpu_np);
+ if (ret < 0)
+ continue;
+
+ if (index == args.args[0])
+ cpumask_set_cpu(cpu, m);
+ }
+}
+
+static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+ struct device *dev = &global_pdev->dev;
+ struct of_phandle_args args;
+ struct device_node *cpu_np;
+ struct resource *res;
+ void __iomem *base;
+ int ret, index;
+
+ cpu_np = of_cpu_device_node_get(policy->cpu);
+ if (!cpu_np)
+ return -EINVAL;
+
+ ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+ "#freq-domain-cells", 0, &args);
+ of_node_put(cpu_np);
+ if (ret)
+ return ret;
+
+ index = args.args[0];
+
+ res = platform_get_resource(global_pdev, IORESOURCE_MEM, index);
+ if (!res)
+ return -ENODEV;
+
+ base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ /* HW should be in enabled state to proceed */
+ if (!(readl_relaxed(base + REG_ENABLE) & 0x1)) {
+ dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ qcom_get_related_cpus(index, policy->cpus);
+ if (!cpumask_weight(policy->cpus)) {
+ dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
+ ret = -ENOENT;
+ goto error;
+ }
+
+ policy->driver_data = base + REG_PERF_STATE;
+
+ ret = qcom_cpufreq_hw_read_lut(dev, policy, base);
+ if (ret) {
+ dev_err(dev, "Domain-%d failed to read LUT\n", index);
+ goto error;
+ }
+
+ policy->fast_switch_possible = true;
+
+ return 0;
+error:
+ devm_iounmap(dev, base);
+ return ret;
+}
+
+static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
+{
+ void __iomem *base = policy->driver_data - REG_PERF_STATE;
+
+ kfree(policy->freq_table);
+ devm_iounmap(&global_pdev->dev, base);
+
+ return 0;
+}
+
+static struct freq_attr *qcom_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &cpufreq_freq_attr_scaling_boost_freqs,
+ NULL
+};
+
+static struct cpufreq_driver cpufreq_qcom_hw_driver = {
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = qcom_cpufreq_hw_target_index,
+ .get = qcom_cpufreq_hw_get,
+ .init = qcom_cpufreq_hw_cpu_init,
+ .exit = qcom_cpufreq_hw_cpu_exit,
+ .fast_switch = qcom_cpufreq_hw_fast_switch,
+ .name = "qcom-cpufreq-hw",
+ .attr = qcom_cpufreq_hw_attr,
+};
+
+static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = clk_get(&pdev->dev, "xo");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ xo_rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ clk = clk_get(&pdev->dev, "alternate");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
+ clk_put(clk);
+
+ global_pdev = pdev;
+
+ ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
+ if (ret)
+ dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+ else
+ dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+
+ return ret;
+}
+
+static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
+{
+ return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+}
+
+static const struct of_device_id qcom_cpufreq_hw_match[] = {
+ { .compatible = "qcom,cpufreq-hw" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
+
+static struct platform_driver qcom_cpufreq_hw_driver = {
+ .probe = qcom_cpufreq_hw_driver_probe,
+ .remove = qcom_cpufreq_hw_driver_remove,
+ .driver = {
+ .name = "qcom-cpufreq-hw",
+ .of_match_table = qcom_cpufreq_hw_match,
+ },
+};
+
+static int __init qcom_cpufreq_hw_init(void)
+{
+ return platform_driver_register(&qcom_cpufreq_hw_driver);
+}
+subsys_initcall(qcom_cpufreq_hw_init);
+
+static void __exit qcom_cpufreq_hw_exit(void)
+{
+ platform_driver_unregister(&qcom_cpufreq_hw_driver);
+}
+module_exit(qcom_cpufreq_hw_exit);
+
+MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
index 4d976e8dbb2f..0df87b6480fe 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
@@ -63,18 +63,7 @@ static int board_show(struct seq_file *seq, void *p)
return 0;
}
-static int fops_board_open(struct inode *inode, struct file *file)
-{
- return single_open(file, board_show, NULL);
-}
-
-static const struct file_operations fops_board = {
- .open = fops_board_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(board);
static int info_show(struct seq_file *seq, void *p)
{
@@ -105,18 +94,7 @@ static int info_show(struct seq_file *seq, void *p)
return 0;
}
-static int fops_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, info_show, NULL);
-}
-
-static const struct file_operations fops_info = {
- .open = fops_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(info);
static int io_show(struct seq_file *seq, void *p)
{
@@ -162,19 +140,7 @@ static int io_show(struct seq_file *seq, void *p)
return 0;
}
-static int fops_io_open(struct inode *inode, struct file *file)
-{
- return single_open(file, io_show, NULL);
-}
-
-static const struct file_operations fops_io = {
- .open = fops_io_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
+DEFINE_SHOW_ATTRIBUTE(io);
static int __init s3c_freq_debugfs_init(void)
{
@@ -185,13 +151,13 @@ static int __init s3c_freq_debugfs_init(void)
}
dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
- NULL, &fops_io);
+ NULL, &io_fops);
dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
- NULL, &fops_info);
+ NULL, &info_fops);
dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
- NULL, &fops_board);
+ NULL, &board_fops);
return 0;
}
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index db2ede565f1a..b44476a1b7ad 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
{
int ret;
struct device_node *root = of_find_node_by_path("/");
+ const struct of_device_id *match_id;
if (!root)
return -ENODEV;
@@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
/*
* Initialize the driver just for a compliant set of machines
*/
- if (!of_match_node(compatible_machine_match, root))
+ match_id = of_match_node(compatible_machine_match, root);
+
+ of_node_put(root);
+
+ if (!match_id)
return -ENODEV;
if (!mcpm_is_available())
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 9e56bc411061..74c247972bb3 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -247,7 +247,13 @@ static int pseries_idle_probe(void)
return -ENODEV;
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- if (lppaca_shared_proc(get_lppaca())) {
+ /*
+ * Use local_paca instead of get_lppaca() since
+ * preemption is not disabled, and it is not required in
+ * fact, since lppaca_ptr does not need to be the value
+ * associated to the current CPU, it can be from any CPU.
+ */
+ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
cpuidle_state_table = shared_states;
max_idle_state = ARRAY_SIZE(shared_states);
} else {
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 4a97446f66d8..7f108309e871 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -202,7 +202,6 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
struct cpuidle_state *target_state = &drv->states[index];
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
- s64 diff;
/*
* Tell the time framework to switch to a broadcast timer because our
@@ -248,6 +247,9 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
local_irq_enable();
if (entered_state >= 0) {
+ s64 diff, delay = drv->states[entered_state].exit_latency;
+ int i;
+
/*
* Update cpuidle counters
* This can be moved to within driver enter routine,
@@ -260,6 +262,33 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
dev->last_residency = (int)diff;
dev->states_usage[entered_state].time += dev->last_residency;
dev->states_usage[entered_state].usage++;
+
+ if (diff < drv->states[entered_state].target_residency) {
+ for (i = entered_state - 1; i >= 0; i--) {
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
+ continue;
+
+ /* Shallower states are enabled, so update. */
+ dev->states_usage[entered_state].above++;
+ break;
+ }
+ } else if (diff > delay) {
+ for (i = entered_state + 1; i < drv->state_count; i++) {
+ if (drv->states[i].disabled ||
+ dev->states_usage[i].disable)
+ continue;
+
+ /*
+ * Update if a deeper state would have been a
+ * better match for the observed idle duration.
+ */
+ if (diff - delay >= drv->states[i].target_residency)
+ dev->states_usage[entered_state].below++;
+
+ break;
+ }
+ }
} else {
dev->last_residency = 0;
}
@@ -702,4 +731,5 @@ static int __init cpuidle_init(void)
}
module_param(off, int, 0444);
+module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444);
core_initcall(cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 2965ab32a583..d6613101af92 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -7,6 +7,7 @@
#define __DRIVER_CPUIDLE_H
/* For internal use only */
+extern char param_governor[];
extern struct cpuidle_governor *cpuidle_curr_governor;
extern struct list_head cpuidle_governors;
extern struct list_head cpuidle_detected_devices;
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 9fed1b829292..bb93e5cf6a4a 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -11,10 +11,13 @@
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/mutex.h>
+#include <linux/module.h>
#include <linux/pm_qos.h>
#include "cpuidle.h"
+char param_governor[CPUIDLE_NAME_LEN];
+
LIST_HEAD(cpuidle_governors);
struct cpuidle_governor *cpuidle_curr_governor;
@@ -86,9 +89,11 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0;
- list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor ||
- cpuidle_curr_governor->rating < gov->rating)
+ !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
+ (cpuidle_curr_governor->rating < gov->rating &&
+ strncasecmp(param_governor, cpuidle_curr_governor->name,
+ CPUIDLE_NAME_LEN)))
cpuidle_switch_governor(gov);
}
mutex_unlock(&cpuidle_lock);
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 85792d371add..b17d153e724f 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -20,8 +20,17 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
local_irq_enable();
if (!current_set_polling_and_test()) {
- u64 limit = (u64)drv->states[1].target_residency * NSEC_PER_USEC;
unsigned int loop_count = 0;
+ u64 limit = TICK_USEC;
+ int i;
+
+ for (i = 1; i < drv->state_count; i++) {
+ if (drv->states[i].disabled || dev->states_usage[i].disable)
+ continue;
+
+ limit = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
+ break;
+ }
while (!need_resched()) {
cpu_relax();
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index e754c7aae7f7..eb20adb5de23 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -301,6 +301,8 @@ define_show_state_str_function(name)
define_show_state_str_function(desc)
define_show_state_ull_function(disable)
define_store_state_ull_function(disable)
+define_show_state_ull_function(above)
+define_show_state_ull_function(below)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
@@ -310,6 +312,8 @@ define_one_state_ro(power, show_state_power_usage);
define_one_state_ro(usage, show_state_usage);
define_one_state_ro(time, show_state_time);
define_one_state_rw(disable, show_state_disable, store_state_disable);
+define_one_state_ro(above, show_state_above);
+define_one_state_ro(below, show_state_below);
static struct attribute *cpuidle_state_default_attrs[] = {
&attr_name.attr,
@@ -320,6 +324,8 @@ static struct attribute *cpuidle_state_default_attrs[] = {
&attr_usage.attr,
&attr_time.attr,
&attr_disable.attr,
+ &attr_above.attr,
+ &attr_below.attr,
NULL
};
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index caa98a7fe392..d80751d48cf1 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -762,10 +762,12 @@ config CRYPTO_DEV_CCREE
select CRYPTO_ECB
select CRYPTO_CTR
select CRYPTO_XTS
+ select CRYPTO_SM4
+ select CRYPTO_SM3
help
Say 'Y' to enable a driver for the REE interface of the Arm
TrustZone CryptoCell family of processors. Currently the
- CryptoCell 712, 710 and 630 are supported.
+ CryptoCell 713, 703, 712, 710 and 630 are supported.
Choose this if you wish to use hardware acceleration of
cryptographic operations on the system REE.
If unsure say Y.
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index f5c07498ea4f..4092c2aad8e2 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -520,8 +520,7 @@ static int crypto4xx_compute_gcm_hash_key_sw(__le32 *hash_start, const u8 *key,
uint8_t src[16] = { 0 };
int rc = 0;
- aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ aes_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(aes_tfm)) {
rc = PTR_ERR(aes_tfm);
pr_warn("could not load aes cipher driver: %d\n", rc);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 6eaec9ba0f68..63cb6956c948 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -596,7 +596,7 @@ static void crypto4xx_aead_done(struct crypto4xx_device *dev,
pd->pd_ctl_len.bf.pkt_len,
dst);
} else {
- __dma_sync_page(sg_page(dst), dst->offset, dst->length,
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
DMA_FROM_DEVICE);
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 2d1f1db9f807..c9393ffb70ed 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3868,7 +3868,6 @@ static struct iproc_alg_s driver_algs[] = {
.cra_driver_name = "ctr-aes-iproc",
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ablkcipher = {
- /* .geniv = "chainiv", */
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
@@ -4605,7 +4604,6 @@ static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg)
crypto->cra_priority = cipher_pri;
crypto->cra_alignmask = 0;
crypto->cra_ctxsize = sizeof(struct iproc_ctx_s);
- INIT_LIST_HEAD(&crypto->cra_list);
crypto->cra_init = ablkcipher_cra_init;
crypto->cra_exit = generic_cra_exit;
@@ -4652,12 +4650,16 @@ static int spu_register_ahash(struct iproc_alg_s *driver_alg)
hash->halg.statesize = sizeof(struct spu_hash_export_s);
if (driver_alg->auth_info.mode != HASH_MODE_HMAC) {
- hash->setkey = ahash_setkey;
hash->init = ahash_init;
hash->update = ahash_update;
hash->final = ahash_final;
hash->finup = ahash_finup;
hash->digest = ahash_digest;
+ if ((driver_alg->auth_info.alg == HASH_ALG_AES) &&
+ ((driver_alg->auth_info.mode == HASH_MODE_XCBC) ||
+ (driver_alg->auth_info.mode == HASH_MODE_CMAC))) {
+ hash->setkey = ahash_setkey;
+ }
} else {
hash->setkey = ahash_hmac_setkey;
hash->init = ahash_hmac_init;
@@ -4687,7 +4689,6 @@ static int spu_register_aead(struct iproc_alg_s *driver_alg)
aead->base.cra_priority = aead_pri;
aead->base.cra_alignmask = 0;
aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s);
- INIT_LIST_HEAD(&aead->base.cra_list);
aead->base.cra_flags |= CRYPTO_ALG_ASYNC;
/* setkey set in alg initialization */
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 869f092432de..92e593e2069a 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -72,6 +72,8 @@
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
CAAM_CMD_SZ * 5)
+#define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
+
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -513,6 +515,61 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
return 0;
}
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ u32 *desc;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, true, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), ctx->dir);
+
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, false, false);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), ctx->dir);
+
+ return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+ if (keylen != CHACHA_KEY_SIZE + saltlen) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.key_virt = key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+}
+
static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
@@ -1031,6 +1088,40 @@ static void init_gcm_job(struct aead_request *req,
/* End of blank commands */
}
+static void init_chachapoly_job(struct aead_request *req,
+ struct aead_edesc *edesc, bool all_contig,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int assoclen = req->assoclen;
+ u32 *desc = edesc->hw_desc;
+ u32 ctx_iv_off = 4;
+
+ init_aead_job(req, edesc, all_contig, encrypt);
+
+ if (ivsize != CHACHAPOLY_IV_SIZE) {
+ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
+ ctx_iv_off += 4;
+
+ /*
+ * The associated data comes already with the IV but we need
+ * to skip it when we authenticate or encrypt...
+ */
+ assoclen -= ivsize;
+ }
+
+ append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen);
+
+ /*
+ * For IPsec load the IV further in the same register.
+ * For RFC7539 simply load the 12 bytes nonce in a single operation
+ */
+ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ctx_iv_off << LDST_OFFSET_SHIFT);
+}
+
static void init_authenc_job(struct aead_request *req,
struct aead_edesc *edesc,
bool all_contig, bool encrypt)
@@ -1289,6 +1380,72 @@ static int gcm_encrypt(struct aead_request *req)
return ret;
}
+static int chachapoly_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret;
+
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
+ true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ desc = edesc->hw_desc;
+
+ init_chachapoly_job(req, edesc, all_contig, true);
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+static int chachapoly_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool all_contig;
+ u32 *desc;
+ int ret;
+
+ edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig,
+ false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ desc = edesc->hw_desc;
+
+ init_chachapoly_job(req, edesc, all_contig, false);
+ print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
static int ipsec_gcm_encrypt(struct aead_request *req)
{
if (req->assoclen < 8)
@@ -3002,6 +3159,50 @@ static struct caam_aead_alg driver_aeads[] = {
.geniv = true,
},
},
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
+ "caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = chachapoly_encrypt,
+ .decrypt = chachapoly_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539esp-chacha20-"
+ "poly1305-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = chachapoly_encrypt,
+ .decrypt = chachapoly_decrypt,
+ .ivsize = 8,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
};
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
@@ -3135,7 +3336,7 @@ static int __init caam_algapi_init(void)
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -3168,14 +3369,38 @@ static int __init caam_algapi_init(void)
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ if (priv->era < 10) {
+ u32 cha_vid, cha_inst;
+
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ ccha_inst = 0;
+ ptha_inst = 0;
+ } else {
+ u32 aesa, mdha;
+
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ aes_inst = aesa & CHA_VER_NUM_MASK;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ ccha_inst = rd_reg32(&priv->ctrl->vreg.ccha) & CHA_VER_NUM_MASK;
+ ptha_inst = rd_reg32(&priv->ctrl->vreg.ptha) & CHA_VER_NUM_MASK;
+ }
/* If MD is present, limit digest size based on LP256 */
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
@@ -3196,10 +3421,10 @@ static int __init caam_algapi_init(void)
* Check support for AES modes not available
* on LP devices.
*/
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_XTS)
- continue;
+ if (aes_vid == CHA_VER_VID_AES_LP &&
+ (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_XTS)
+ continue;
caam_skcipher_alg_init(t_alg);
@@ -3232,21 +3457,28 @@ static int __init caam_algapi_init(void)
if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst)
+ continue;
+
+ /* Skip POLY1305 algorithms if not supported by device */
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst)
+ continue;
+
/*
* Check support for AES algorithms not available
* on LP devices.
*/
- if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if (alg_aai == OP_ALG_AAI_GCM)
- continue;
+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
+ continue;
/*
* Skip algorithms requiring message digests
* if MD or MD size is not supported by device.
*/
- if (c2_alg_sel &&
- (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
- continue;
+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ (!md_inst || t_alg->aead.maxauthsize > md_limit))
+ continue;
caam_aead_alg_init(t_alg);
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 1a6f0da14106..7db1640d3577 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1213,6 +1213,139 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
+/**
+ * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and
+ * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared
+ * descriptor (non-protocol).
+ * @desc: pointer to buffer used for descriptor construction
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with
+ * OP_ALG_AAI_AEAD.
+ * @adata: pointer to authentication transform definitions
+ * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with
+ * OP_ALG_AAI_AEAD.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @encap: true if encapsulation, false if decapsulation
+ * @is_qi: true when called from caam/qi
+ */
+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool encap,
+ const bool is_qi)
+{
+ u32 *key_jump_cmd, *wait_cmd;
+ u32 nfifo;
+ const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE);
+
+ /* Note: Context registers are saved. */
+ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+
+ /* skip key loading if they are loaded due to sharing */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+
+ /* For IPsec load the salt from keymat in the context register */
+ if (is_ipsec)
+ append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4,
+ LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT |
+ 4 << LDST_OFFSET_SHIFT);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 and 1 operations: Poly & ChaCha */
+ if (encap) {
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_ENCRYPT);
+ } else {
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ }
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+ u32 ctx1_iv_off = is_ipsec ? 8 : 4;
+
+ /* REG3 = assoclen */
+ append_seq_load(desc, 4, LDST_CLASS_DECO |
+ LDST_SRCDST_WORD_DECO_MATH3 |
+ 4 << LDST_OFFSET_SHIFT);
+
+ wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_CALM | JUMP_COND_NCP |
+ JUMP_COND_NOP | JUMP_COND_NIP |
+ JUMP_COND_NIFP);
+ set_jump_tgt_here(desc, wait_load_cmd);
+
+ append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+ LDST_SRCDST_BYTE_CONTEXT |
+ ctx1_iv_off << LDST_OFFSET_SHIFT);
+ }
+
+ /*
+ * MAGIC with NFIFO
+ * Read associated data from the input and send them to class1 and
+ * class2 alignment blocks. From class1 send data to output fifo and
+ * then write it to memory since we don't need to encrypt AD.
+ */
+ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 |
+ NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND;
+ append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3);
+
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO |
+ FIFOLD_CLASS_CLASS1 | LDST_VLF);
+ append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK |
+ MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
+
+ /* IPsec - copy IV at the output */
+ if (is_ipsec)
+ append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA |
+ 0x2 << 25);
+
+ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, wait_cmd);
+
+ if (encap) {
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0,
+ CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+ } else {
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0,
+ CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0,
+ CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV for verification */
+ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ }
+
+ print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_shdsc_chachapoly);
+
/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
static inline void skcipher_append_src_dst(u32 *desc)
{
@@ -1228,7 +1361,8 @@ static inline void skcipher_append_src_dst(u32 *desc)
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ * - OP_ALG_ALGSEL_CHACHA20
* @ivsize: initialization vector size
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
@@ -1293,7 +1427,8 @@ EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128
+ * - OP_ALG_ALGSEL_CHACHA20
* @ivsize: initialization vector size
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index 1315c8f6f951..d5ca42ff961a 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -96,6 +96,11 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
+void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool encap,
+ const bool is_qi);
+
void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, const bool is_rfc3686,
const u32 ctx1_iv_off);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index 23c9fc4975f8..c0d55310aade 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -2462,7 +2462,7 @@ static int __init caam_qi_algapi_init(void)
struct device *ctrldev;
struct caam_drv_private *priv;
int i = 0, err = 0;
- u32 cha_vid, cha_inst, des_inst, aes_inst, md_inst;
+ u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
unsigned int md_limit = SHA512_DIGEST_SIZE;
bool registered = false;
@@ -2497,14 +2497,34 @@ static int __init caam_qi_algapi_init(void)
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT;
- aes_inst = (cha_inst & CHA_ID_LS_AES_MASK) >> CHA_ID_LS_AES_SHIFT;
- md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ if (priv->era < 10) {
+ u32 cha_vid, cha_inst;
+
+ cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
+ aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
+ md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+
+ cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
+ CHA_ID_LS_DES_SHIFT;
+ aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
+ md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ } else {
+ u32 aesa, mdha;
+
+ aesa = rd_reg32(&priv->ctrl->vreg.aesa);
+ mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+
+ des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
+ aes_inst = aesa & CHA_VER_NUM_MASK;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ }
/* If MD is present, limit digest size based on LP256 */
- if (md_inst && ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256))
+ if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
@@ -2556,8 +2576,7 @@ static int __init caam_qi_algapi_init(void)
* Check support for AES algorithms not available
* on LP devices.
*/
- if (((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) &&
- (alg_aai == OP_ALG_AAI_GCM))
+ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
continue;
/*
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 7d8ac0222fa3..425d5d974613 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -462,7 +462,15 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
edesc->dst_nents = dst_nents;
edesc->iv_dma = iv_dma;
- edesc->assoclen = cpu_to_caam32(req->assoclen);
+ if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+ OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
+ /*
+ * The associated data comes already with the IV but we need
+ * to skip it when we authenticate or encrypt...
+ */
+ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
+ else
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, edesc->assoclen_dma)) {
@@ -532,6 +540,68 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return edesc;
}
+static int chachapoly_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, true, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, false, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int chachapoly_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+
+ if (authsize != POLY1305_DIGEST_SIZE)
+ return -EINVAL;
+
+ ctx->authsize = authsize;
+ return chachapoly_set_sh_desc(aead);
+}
+
+static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
+
+ if (keylen != CHACHA_KEY_SIZE + saltlen) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.key_virt = key;
+ ctx->cdata.keylen = keylen - saltlen;
+
+ return chachapoly_set_sh_desc(aead);
+}
+
static int gcm_set_sh_desc(struct crypto_aead *aead)
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
@@ -816,7 +886,9 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
- OP_ALG_AAI_CTR_MOD128);
+ OP_ALG_AAI_CTR_MOD128) &&
+ ((ctx->cdata.algtype & OP_ALG_ALGSEL_MASK) !=
+ OP_ALG_ALGSEL_CHACHA20);
const bool is_rfc3686 = alg->caam.rfc3686;
print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
@@ -1494,7 +1566,23 @@ static struct caam_skcipher_alg driver_algs[] = {
.ivsize = AES_BLOCK_SIZE,
},
.caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
- }
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "chacha20",
+ .cra_driver_name = "chacha20-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = CHACHA_KEY_SIZE,
+ .max_keysize = CHACHA_KEY_SIZE,
+ .ivsize = CHACHA_IV_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
+ },
};
static struct caam_aead_alg driver_aeads[] = {
@@ -2611,6 +2699,50 @@ static struct caam_aead_alg driver_aeads[] = {
{
.aead = {
.base = {
+ .cra_name = "rfc7539(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
+ "caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CHACHAPOLY_IV_SIZE,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc7539esp(chacha20,poly1305)",
+ .cra_driver_name = "rfc7539esp-chacha20-"
+ "poly1305-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = chachapoly_setkey,
+ .setauthsize = chachapoly_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 8,
+ .maxauthsize = POLY1305_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
+ OP_ALG_AAI_AEAD,
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
+ OP_ALG_AAI_AEAD,
+ },
+ },
+ {
+ .aead = {
+ .base = {
.cra_name = "authenc(hmac(sha512),"
"rfc3686(ctr(aes)))",
.cra_driver_name = "authenc-hmac-sha512-"
@@ -4908,6 +5040,11 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
alg_sel == OP_ALG_ALGSEL_AES)
continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+ !priv->sec_attr.ccha_acc_num)
+ continue;
+
t_alg->caam.dev = dev;
caam_skcipher_alg_init(t_alg);
@@ -4940,11 +5077,22 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
c1_alg_sel == OP_ALG_ALGSEL_AES)
continue;
+ /* Skip CHACHA20 algorithms if not supported by device */
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
+ !priv->sec_attr.ccha_acc_num)
+ continue;
+
+ /* Skip POLY1305 algorithms if not supported by device */
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
+ !priv->sec_attr.ptha_acc_num)
+ continue;
+
/*
* Skip algorithms requiring message digests
* if MD not supported by device.
*/
- if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
+ !priv->sec_attr.md_acc_num)
continue;
t_alg->caam.dev = dev;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 46924affa0bd..81712aa5d0f2 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -3,6 +3,7 @@
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
* Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -1801,7 +1802,7 @@ static int __init caam_algapi_hash_init(void)
int i = 0, err = 0;
struct caam_drv_private *priv;
unsigned int md_limit = SHA512_DIGEST_SIZE;
- u32 cha_inst, cha_vid;
+ u32 md_inst, md_vid;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
if (!dev_node) {
@@ -1831,18 +1832,27 @@ static int __init caam_algapi_hash_init(void)
* Register crypto algorithms the device supports. First, identify
* presence and attributes of MD block.
*/
- cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
+ if (priv->era < 10) {
+ md_vid = (rd_reg32(&priv->ctrl->perfmon.cha_id_ls) &
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ md_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
+ } else {
+ u32 mdha = rd_reg32(&priv->ctrl->vreg.mdha);
+
+ md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
+ md_inst = mdha & CHA_VER_NUM_MASK;
+ }
/*
* Skip registration of any hashing algorithms if MD block
* is not present.
*/
- if (!((cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT))
+ if (!md_inst)
return -ENODEV;
/* Limit digest size based on LP256 */
- if ((cha_vid & CHA_ID_LS_MD_MASK) == CHA_ID_LS_MD_LP256)
+ if (md_vid == CHA_VER_VID_MD_LP256)
md_limit = SHA256_DIGEST_SIZE;
INIT_LIST_HEAD(&hash_list);
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index 4fc209cbbeab..77ab28a2811a 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -3,6 +3,7 @@
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
* Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
* There is no Shared Descriptor for PKC so that the Job Descriptor must carry
* all the desired key parameters, input and output pointers.
@@ -1017,7 +1018,7 @@ static int __init caam_pkc_init(void)
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
- u32 cha_inst, pk_inst;
+ u32 pk_inst;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -1045,8 +1046,11 @@ static int __init caam_pkc_init(void)
return -ENODEV;
/* Determine public key hardware accelerator presence. */
- cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
- pk_inst = (cha_inst & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+ if (priv->era < 10)
+ pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
+ else
+ pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
/* Do not register algorithms if PKHA is not present. */
if (!pk_inst)
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 4318b0aa6fb9..a387c8d49a62 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -3,6 +3,7 @@
* caam - Freescale FSL CAAM support for hw_random
*
* Copyright 2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*
* Based on caamalg.c crypto API driver.
*
@@ -309,6 +310,7 @@ static int __init caam_rng_init(void)
struct platform_device *pdev;
struct device *ctrldev;
struct caam_drv_private *priv;
+ u32 rng_inst;
int err;
dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
@@ -336,7 +338,13 @@ static int __init caam_rng_init(void)
return -ENODEV;
/* Check for an instantiated RNG before registration */
- if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK))
+ if (priv->era < 10)
+ rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
+
+ if (!rng_inst)
return -ENODEV;
dev = caam_jr_alloc();
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 9604ff7a335e..87d9efe4c7aa 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -36,6 +36,8 @@
#include <crypto/gcm.h>
#include <crypto/sha.h>
#include <crypto/md5.h>
+#include <crypto/chacha.h>
+#include <crypto/poly1305.h>
#include <crypto/internal/aead.h>
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 3fc793193821..16bbc72f041a 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -3,6 +3,7 @@
* Controller-level driver, kernel property detection, initialization
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#include <linux/device.h>
@@ -106,7 +107,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
struct caam_deco __iomem *deco = ctrlpriv->deco;
unsigned int timeout = 100000;
- u32 deco_dbg_reg, flags;
+ u32 deco_dbg_reg, deco_state, flags;
int i;
@@ -149,13 +150,22 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
timeout = 10000000;
do {
deco_dbg_reg = rd_reg32(&deco->desc_dbg);
+
+ if (ctrlpriv->era < 10)
+ deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
+ DESC_DBG_DECO_STAT_SHIFT;
+ else
+ deco_state = (rd_reg32(&deco->dbg_exec) &
+ DESC_DER_DECO_STAT_MASK) >>
+ DESC_DER_DECO_STAT_SHIFT;
+
/*
* If an error occured in the descriptor, then
* the DECO status field will be set to 0x0D
*/
- if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
- DESC_DBG_DECO_STAT_HOST_ERR)
+ if (deco_state == DECO_STAT_HOST_ERR)
break;
+
cpu_relax();
} while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
@@ -491,7 +501,7 @@ static int caam_probe(struct platform_device *pdev)
struct caam_perfmon *perfmon;
#endif
u32 scfgr, comp_params;
- u32 cha_vid_ls;
+ u8 rng_vid;
int pg_size;
int BLOCK_OFFSET = 0;
@@ -733,15 +743,19 @@ static int caam_probe(struct platform_device *pdev)
goto caam_remove;
}
- cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
+ if (ctrlpriv->era < 10)
+ rng_vid = (rd_reg32(&ctrl->perfmon.cha_id_ls) &
+ CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
+ else
+ rng_vid = (rd_reg32(&ctrl->vreg.rng) & CHA_VER_VID_MASK) >>
+ CHA_VER_VID_SHIFT;
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
* In case of SoCs with Management Complex, RNG is managed by MC f/w.
*/
- if (!ctrlpriv->mc_en &&
- (cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
+ if (!ctrlpriv->mc_en && rng_vid >= 4) {
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index f76ff160a02c..ec10230178c5 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,6 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#ifndef DESC_H
@@ -242,6 +243,7 @@
#define LDST_SRCDST_WORD_DESCBUF_SHARED (0x42 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_JOB_WE (0x45 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_DESCBUF_SHARED_WE (0x46 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO_SM (0x71 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
/* Offset in source/destination */
@@ -284,6 +286,12 @@
#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+/* Special Length definitions when dst=sm, nfifo-{sm,m} */
+#define LDLEN_MATH0 0
+#define LDLEN_MATH1 1
+#define LDLEN_MATH2 2
+#define LDLEN_MATH3 3
+
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
* Command Constructs
@@ -408,6 +416,7 @@
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_METADATA (0x3e << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
@@ -1133,6 +1142,12 @@
#define OP_ALG_TYPE_CLASS1 (2 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_TYPE_CLASS2 (4 << OP_ALG_TYPE_SHIFT)
+/* version register fields */
+#define OP_VER_CCHA_NUM 0x000000ff /* Number CCHAs instantiated */
+#define OP_VER_CCHA_MISC 0x0000ff00 /* CCHA Miscellaneous Information */
+#define OP_VER_CCHA_REV 0x00ff0000 /* CCHA Revision Number */
+#define OP_VER_CCHA_VID 0xff000000 /* CCHA Version ID */
+
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SUBMASK (0x0f << OP_ALG_ALGSEL_SHIFT)
@@ -1152,6 +1167,8 @@
#define OP_ALG_ALGSEL_KASUMI (0x70 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_CRC (0x90 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_ALGSEL_SNOW_F9 (0xA0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_CHACHA20 (0xD0 << OP_ALG_ALGSEL_SHIFT)
+#define OP_ALG_ALGSEL_POLY1305 (0xE0 << OP_ALG_ALGSEL_SHIFT)
#define OP_ALG_AAI_SHIFT 4
#define OP_ALG_AAI_MASK (0x1ff << OP_ALG_AAI_SHIFT)
@@ -1199,6 +1216,11 @@
#define OP_ALG_AAI_RNG4_AI (0x80 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_RNG4_SK (0x100 << OP_ALG_AAI_SHIFT)
+/* Chacha20 AAI set */
+#define OP_ALG_AAI_AEAD (0x002 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_KEYSTREAM (0x001 << OP_ALG_AAI_SHIFT)
+#define OP_ALG_AAI_BC8 (0x008 << OP_ALG_AAI_SHIFT)
+
/* hmac/smac AAI set */
#define OP_ALG_AAI_HASH (0x00 << OP_ALG_AAI_SHIFT)
#define OP_ALG_AAI_HMAC (0x01 << OP_ALG_AAI_SHIFT)
@@ -1387,6 +1409,7 @@
#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_AUX_ABLK (0x0a << MOVE_SRC_SHIFT)
#define MOVE_DEST_SHIFT 16
#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
@@ -1413,6 +1436,10 @@
#define MOVELEN_MRSEL_SHIFT 0
#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVELEN_MRSEL_MATH0 (0 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH1 (1 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH2 (2 << MOVELEN_MRSEL_SHIFT)
+#define MOVELEN_MRSEL_MATH3 (3 << MOVELEN_MRSEL_SHIFT)
/*
* MATH Command Constructs
@@ -1589,6 +1616,7 @@
#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_POLY (0xB << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index d4256fa4a1d6..2980b8ef1fb1 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -189,6 +189,7 @@ static inline u32 *append_##cmd(u32 * const desc, u32 options) \
}
APPEND_CMD_RET(jump, JUMP)
APPEND_CMD_RET(move, MOVE)
+APPEND_CMD_RET(move_len, MOVE_LEN)
static inline void set_jump_tgt_here(u32 * const desc, u32 *jump_cmd)
{
@@ -327,7 +328,11 @@ static inline void append_##cmd##_imm_##type(u32 * const desc, type immediate, \
u32 options) \
{ \
PRINT_POS; \
- append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
+ if (options & LDST_LEN_MASK) \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options); \
+ else \
+ append_cmd(desc, CMD_##op | IMMEDIATE | options | \
+ sizeof(type)); \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 457815f965c0..3cd0822ea819 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -3,6 +3,7 @@
* CAAM hardware register-level view
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2018 NXP
*/
#ifndef REGS_H
@@ -211,6 +212,47 @@ struct jr_outentry {
u32 jrstatus; /* Status for completed descriptor */
} __packed;
+/* Version registers (Era 10+) e80-eff */
+struct version_regs {
+ u32 crca; /* CRCA_VERSION */
+ u32 afha; /* AFHA_VERSION */
+ u32 kfha; /* KFHA_VERSION */
+ u32 pkha; /* PKHA_VERSION */
+ u32 aesa; /* AESA_VERSION */
+ u32 mdha; /* MDHA_VERSION */
+ u32 desa; /* DESA_VERSION */
+ u32 snw8a; /* SNW8A_VERSION */
+ u32 snw9a; /* SNW9A_VERSION */
+ u32 zuce; /* ZUCE_VERSION */
+ u32 zuca; /* ZUCA_VERSION */
+ u32 ccha; /* CCHA_VERSION */
+ u32 ptha; /* PTHA_VERSION */
+ u32 rng; /* RNG_VERSION */
+ u32 trng; /* TRNG_VERSION */
+ u32 aaha; /* AAHA_VERSION */
+ u32 rsvd[10];
+ u32 sr; /* SR_VERSION */
+ u32 dma; /* DMA_VERSION */
+ u32 ai; /* AI_VERSION */
+ u32 qi; /* QI_VERSION */
+ u32 jr; /* JR_VERSION */
+ u32 deco; /* DECO_VERSION */
+};
+
+/* Version registers bitfields */
+
+/* Number of CHAs instantiated */
+#define CHA_VER_NUM_MASK 0xffull
+/* CHA Miscellaneous Information */
+#define CHA_VER_MISC_SHIFT 8
+#define CHA_VER_MISC_MASK (0xffull << CHA_VER_MISC_SHIFT)
+/* CHA Revision Number */
+#define CHA_VER_REV_SHIFT 16
+#define CHA_VER_REV_MASK (0xffull << CHA_VER_REV_SHIFT)
+/* CHA Version ID */
+#define CHA_VER_VID_SHIFT 24
+#define CHA_VER_VID_MASK (0xffull << CHA_VER_VID_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
@@ -223,15 +265,13 @@ struct jr_outentry {
#define CHA_NUM_MS_DECONUM_MASK (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
/*
- * CHA version IDs / instantiation bitfields
+ * CHA version IDs / instantiation bitfields (< Era 10)
* Defined for use with the cha_id fields in perfmon, but the same shift/mask
* selectors can be used to pull out the number of instantiated blocks within
* cha_num fields in perfmon because the locations are the same.
*/
#define CHA_ID_LS_AES_SHIFT 0
#define CHA_ID_LS_AES_MASK (0xfull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_LS_AES_LP (0x3ull << CHA_ID_LS_AES_SHIFT)
-#define CHA_ID_LS_AES_HP (0x4ull << CHA_ID_LS_AES_SHIFT)
#define CHA_ID_LS_DES_SHIFT 4
#define CHA_ID_LS_DES_MASK (0xfull << CHA_ID_LS_DES_SHIFT)
@@ -241,9 +281,6 @@ struct jr_outentry {
#define CHA_ID_LS_MD_SHIFT 12
#define CHA_ID_LS_MD_MASK (0xfull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_LP256 (0x0ull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_LP512 (0x1ull << CHA_ID_LS_MD_SHIFT)
-#define CHA_ID_LS_MD_HP (0x2ull << CHA_ID_LS_MD_SHIFT)
#define CHA_ID_LS_RNG_SHIFT 16
#define CHA_ID_LS_RNG_MASK (0xfull << CHA_ID_LS_RNG_SHIFT)
@@ -269,6 +306,13 @@ struct jr_outentry {
#define CHA_ID_MS_JR_SHIFT 28
#define CHA_ID_MS_JR_MASK (0xfull << CHA_ID_MS_JR_SHIFT)
+/* Specific CHA version IDs */
+#define CHA_VER_VID_AES_LP 0x3ull
+#define CHA_VER_VID_AES_HP 0x4ull
+#define CHA_VER_VID_MD_LP256 0x0ull
+#define CHA_VER_VID_MD_LP512 0x1ull
+#define CHA_VER_VID_MD_HP 0x2ull
+
struct sec_vid {
u16 ip_id;
u8 maj_rev;
@@ -479,8 +523,10 @@ struct caam_ctrl {
struct rng4tst r4tst[2];
};
- u32 rsvd9[448];
+ u32 rsvd9[416];
+ /* Version registers - introduced with era 10 e80-eff */
+ struct version_regs vreg;
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
@@ -570,8 +616,10 @@ struct caam_job_ring {
u32 rsvd11;
u32 jrcommand; /* JRCRx - JobR command */
- u32 rsvd12[932];
+ u32 rsvd12[900];
+ /* Version registers - introduced with era 10 e80-eff */
+ struct version_regs vreg;
/* Performance Monitor f00-fff */
struct caam_perfmon perfmon;
};
@@ -878,13 +926,19 @@ struct caam_deco {
u32 rsvd29[48];
u32 descbuf[64]; /* DxDESB - Descriptor buffer */
u32 rscvd30[193];
-#define DESC_DBG_DECO_STAT_HOST_ERR 0x00D00000
#define DESC_DBG_DECO_STAT_VALID 0x80000000
#define DESC_DBG_DECO_STAT_MASK 0x00F00000
+#define DESC_DBG_DECO_STAT_SHIFT 20
u32 desc_dbg; /* DxDDR - DECO Debug Register */
- u32 rsvd31[126];
+ u32 rsvd31[13];
+#define DESC_DER_DECO_STAT_MASK 0x000F0000
+#define DESC_DER_DECO_STAT_SHIFT 16
+ u32 dbg_exec; /* DxDER - DECO Debug Exec Register */
+ u32 rsvd32[112];
};
+#define DECO_STAT_HOST_ERR 0xD
+
#define DECO_JQCR_WHL 0x20000000
#define DECO_JQCR_FOUR 0x10000000
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index e12954791673..f83991aaf820 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -6,7 +6,10 @@ n5pf-objs := nitrox_main.o \
nitrox_lib.o \
nitrox_hal.o \
nitrox_reqmgr.o \
- nitrox_algs.o
+ nitrox_algs.o \
+ nitrox_mbx.o \
+ nitrox_skcipher.o \
+ nitrox_aead.o
n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
new file mode 100644
index 000000000000..4f43eacd2557
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/crypto.h>
+#include <linux/rtnetlink.h>
+
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/internal/aead.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/gcm.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_req.h"
+
+#define GCM_AES_SALT_SIZE 4
+
+/**
+ * struct nitrox_crypt_params - Params to set nitrox crypto request.
+ * @cryptlen: Encryption/Decryption data length
+ * @authlen: Assoc data length + Cryptlen
+ * @srclen: Input buffer length
+ * @dstlen: Output buffer length
+ * @iv: IV data
+ * @ivsize: IV data length
+ * @ctrl_arg: Identifies the request type (ENCRYPT/DECRYPT)
+ */
+struct nitrox_crypt_params {
+ unsigned int cryptlen;
+ unsigned int authlen;
+ unsigned int srclen;
+ unsigned int dstlen;
+ u8 *iv;
+ int ivsize;
+ u8 ctrl_arg;
+};
+
+union gph_p3 {
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 iv_offset : 8;
+ u16 auth_offset : 8;
+#else
+ u16 auth_offset : 8;
+ u16 iv_offset : 8;
+#endif
+ };
+ u16 param;
+};
+
+static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ int aes_keylen;
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx;
+ union fc_ctx_flags flags;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* fill crypto context */
+ fctx = nctx->u.fctx;
+ flags.f = be64_to_cpu(fctx->flags.f);
+ flags.w0.aes_keylen = aes_keylen;
+ fctx->flags.f = cpu_to_be64(flags.f);
+
+ /* copy enc key to context */
+ memset(&fctx->crypto, 0, sizeof(fctx->crypto));
+ memcpy(fctx->crypto.u.key, key, keylen);
+
+ return 0;
+}
+
+static int nitrox_aead_setauthsize(struct crypto_aead *aead,
+ unsigned int authsize)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ union fc_ctx_flags flags;
+
+ flags.f = be64_to_cpu(fctx->flags.f);
+ flags.w0.mac_len = authsize;
+ fctx->flags.f = cpu_to_be64(flags.f);
+
+ aead->authsize = authsize;
+
+ return 0;
+}
+
+static int alloc_src_sglist(struct aead_request *areq, char *iv, int ivsize,
+ int buflen)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ int nents = sg_nents_for_len(areq->src, buflen) + 1;
+ int ret;
+
+ if (nents < 0)
+ return nents;
+
+ /* Allocate buffer to hold IV and input scatterlist array */
+ ret = alloc_src_req_buf(nkreq, nents, ivsize);
+ if (ret)
+ return ret;
+
+ nitrox_creq_copy_iv(nkreq->src, iv, ivsize);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, areq->src, buflen);
+
+ return 0;
+}
+
+static int alloc_dst_sglist(struct aead_request *areq, int ivsize, int buflen)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ int nents = sg_nents_for_len(areq->dst, buflen) + 3;
+ int ret;
+
+ if (nents < 0)
+ return nents;
+
+ /* Allocate buffer to hold ORH, COMPLETION and output scatterlist
+ * array
+ */
+ ret = alloc_dst_req_buf(nkreq, nents);
+ if (ret)
+ return ret;
+
+ nitrox_creq_set_orh(nkreq);
+ nitrox_creq_set_comp(nkreq);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, areq->dst, buflen);
+
+ return 0;
+}
+
+static void free_src_sglist(struct aead_request *areq)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct aead_request *areq)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+
+ kfree(nkreq->dst);
+}
+
+static int nitrox_set_creq(struct aead_request *areq,
+ struct nitrox_crypt_params *params)
+{
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ union gph_p3 param3;
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ int ret;
+
+ creq->flags = areq->base.flags;
+ creq->gfp = (areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ creq->ctrl.value = 0;
+ creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ creq->ctrl.s.arg = params->ctrl_arg;
+
+ creq->gph.param0 = cpu_to_be16(params->cryptlen);
+ creq->gph.param1 = cpu_to_be16(params->authlen);
+ creq->gph.param2 = cpu_to_be16(params->ivsize + areq->assoclen);
+ param3.iv_offset = 0;
+ param3.auth_offset = params->ivsize;
+ creq->gph.param3 = cpu_to_be16(param3.param);
+
+ creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
+
+ ret = alloc_src_sglist(areq, params->iv, params->ivsize,
+ params->srclen);
+ if (ret)
+ return ret;
+
+ ret = alloc_dst_sglist(areq, params->ivsize, params->dstlen);
+ if (ret) {
+ free_src_sglist(areq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nitrox_aead_callback(void *arg, int err)
+{
+ struct aead_request *areq = arg;
+
+ free_src_sglist(areq);
+ free_dst_sglist(areq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ areq->base.complete(&areq->base, err);
+}
+
+static int nitrox_aes_gcm_enc(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ struct nitrox_crypt_params params;
+ int ret;
+
+ memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
+
+ memset(&params, 0, sizeof(params));
+ params.cryptlen = areq->cryptlen;
+ params.authlen = areq->assoclen + params.cryptlen;
+ params.srclen = params.authlen;
+ params.dstlen = params.srclen + aead->authsize;
+ params.iv = &areq->iv[GCM_AES_SALT_SIZE];
+ params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ params.ctrl_arg = ENCRYPT;
+ ret = nitrox_set_creq(areq, &params);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback,
+ areq);
+}
+
+static int nitrox_aes_gcm_dec(struct aead_request *areq)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct nitrox_kcrypt_request *nkreq = aead_request_ctx(areq);
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+ struct nitrox_crypt_params params;
+ int ret;
+
+ memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE);
+
+ memset(&params, 0, sizeof(params));
+ params.cryptlen = areq->cryptlen - aead->authsize;
+ params.authlen = areq->assoclen + params.cryptlen;
+ params.srclen = areq->cryptlen + areq->assoclen;
+ params.dstlen = params.srclen - aead->authsize;
+ params.iv = &areq->iv[GCM_AES_SALT_SIZE];
+ params.ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE;
+ params.ctrl_arg = DECRYPT;
+ ret = nitrox_set_creq(areq, &params);
+ if (ret)
+ return ret;
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback,
+ areq);
+}
+
+static int nitrox_aead_init(struct crypto_aead *aead)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ struct crypto_ctx_hdr *chdr;
+
+ /* get the first device */
+ nctx->ndev = nitrox_get_first_device();
+ if (!nctx->ndev)
+ return -ENODEV;
+
+ /* allocate nitrox crypto context */
+ chdr = crypto_alloc_context(nctx->ndev);
+ if (!chdr) {
+ nitrox_put_device(nctx->ndev);
+ return -ENOMEM;
+ }
+ nctx->chdr = chdr;
+ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+ sizeof(struct ctx_hdr));
+ nctx->u.fctx->flags.f = 0;
+
+ return 0;
+}
+
+static int nitrox_aes_gcm_init(struct crypto_aead *aead)
+{
+ int ret;
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+ union fc_ctx_flags *flags;
+
+ ret = nitrox_aead_init(aead);
+ if (ret)
+ return ret;
+
+ flags = &nctx->u.fctx->flags;
+ flags->w0.cipher_type = CIPHER_AES_GCM;
+ flags->w0.hash_type = AUTH_NULL;
+ flags->w0.iv_source = IV_FROM_DPTR;
+ /* ask microcode to calculate ipad/opad */
+ flags->w0.auth_input_type = 1;
+ flags->f = be64_to_cpu(flags->f);
+
+ crypto_aead_set_reqsize(aead, sizeof(struct aead_request) +
+ sizeof(struct nitrox_kcrypt_request));
+
+ return 0;
+}
+
+static void nitrox_aead_exit(struct crypto_aead *aead)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead);
+
+ /* free the nitrox crypto context */
+ if (nctx->u.ctx_handle) {
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+
+ memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys));
+ memzero_explicit(&fctx->auth, sizeof(struct auth_keys));
+ crypto_free_context((void *)nctx->chdr);
+ }
+ nitrox_put_device(nctx->ndev);
+
+ nctx->u.ctx_handle = 0;
+ nctx->ndev = NULL;
+}
+
+static struct aead_alg nitrox_aeads[] = { {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "n5_aes_gcm",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .setkey = nitrox_aes_gcm_setkey,
+ .setauthsize = nitrox_aead_setauthsize,
+ .encrypt = nitrox_aes_gcm_enc,
+ .decrypt = nitrox_aes_gcm_dec,
+ .init = nitrox_aes_gcm_init,
+ .exit = nitrox_aead_exit,
+ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+} };
+
+int nitrox_register_aeads(void)
+{
+ return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads));
+}
+
+void nitrox_unregister_aeads(void)
+{
+ crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
index 2ae6124e5da6..d646ae5f29b0 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -1,458 +1,24 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/printk.h>
-
-#include <crypto/aes.h>
-#include <crypto/skcipher.h>
-#include <crypto/ctr.h>
-#include <crypto/des.h>
-#include <crypto/xts.h>
-
-#include "nitrox_dev.h"
#include "nitrox_common.h"
-#include "nitrox_req.h"
-
-#define PRIO 4001
-
-struct nitrox_cipher {
- const char *name;
- enum flexi_cipher value;
-};
-
-/**
- * supported cipher list
- */
-static const struct nitrox_cipher flexi_cipher_table[] = {
- { "null", CIPHER_NULL },
- { "cbc(des3_ede)", CIPHER_3DES_CBC },
- { "ecb(des3_ede)", CIPHER_3DES_ECB },
- { "cbc(aes)", CIPHER_AES_CBC },
- { "ecb(aes)", CIPHER_AES_ECB },
- { "cfb(aes)", CIPHER_AES_CFB },
- { "rfc3686(ctr(aes))", CIPHER_AES_CTR },
- { "xts(aes)", CIPHER_AES_XTS },
- { "cts(cbc(aes))", CIPHER_AES_CBC_CTS },
- { NULL, CIPHER_INVALID }
-};
-
-static enum flexi_cipher flexi_cipher_type(const char *name)
-{
- const struct nitrox_cipher *cipher = flexi_cipher_table;
-
- while (cipher->name) {
- if (!strcmp(cipher->name, name))
- break;
- cipher++;
- }
- return cipher->value;
-}
-
-static int flexi_aes_keylen(int keylen)
-{
- int aes_keylen;
-
- switch (keylen) {
- case AES_KEYSIZE_128:
- aes_keylen = 1;
- break;
- case AES_KEYSIZE_192:
- aes_keylen = 2;
- break;
- case AES_KEYSIZE_256:
- aes_keylen = 3;
- break;
- default:
- aes_keylen = -EINVAL;
- break;
- }
- return aes_keylen;
-}
-
-static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
-{
- struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
- void *fctx;
-
- /* get the first device */
- nctx->ndev = nitrox_get_first_device();
- if (!nctx->ndev)
- return -ENODEV;
-
- /* allocate nitrox crypto context */
- fctx = crypto_alloc_context(nctx->ndev);
- if (!fctx) {
- nitrox_put_device(nctx->ndev);
- return -ENOMEM;
- }
- nctx->u.ctx_handle = (uintptr_t)fctx;
- crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
- sizeof(struct nitrox_kcrypt_request));
- return 0;
-}
-
-static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
-{
- struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
-
- /* free the nitrox crypto context */
- if (nctx->u.ctx_handle) {
- struct flexi_crypto_context *fctx = nctx->u.fctx;
-
- memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
- memset(&fctx->auth, 0, sizeof(struct auth_keys));
- crypto_free_context((void *)fctx);
- }
- nitrox_put_device(nctx->ndev);
-
- nctx->u.ctx_handle = 0;
- nctx->ndev = NULL;
-}
-static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
- int aes_keylen, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
- struct flexi_crypto_context *fctx;
- enum flexi_cipher cipher_type;
- const char *name;
-
- name = crypto_tfm_alg_name(tfm);
- cipher_type = flexi_cipher_type(name);
- if (unlikely(cipher_type == CIPHER_INVALID)) {
- pr_err("unsupported cipher: %s\n", name);
- return -EINVAL;
- }
-
- /* fill crypto context */
- fctx = nctx->u.fctx;
- fctx->flags = 0;
- fctx->w0.cipher_type = cipher_type;
- fctx->w0.aes_keylen = aes_keylen;
- fctx->w0.iv_source = IV_FROM_DPTR;
- fctx->flags = cpu_to_be64(*(u64 *)&fctx->w0);
- /* copy the key to context */
- memcpy(fctx->crypto.u.key, key, keylen);
-
- return 0;
-}
-
-static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
- unsigned int keylen)
+int nitrox_crypto_register(void)
{
- int aes_keylen;
+ int err;
- aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
-}
+ err = nitrox_register_skciphers();
+ if (err)
+ return err;
-static void nitrox_skcipher_callback(struct skcipher_request *skreq,
- int err)
-{
+ err = nitrox_register_aeads();
if (err) {
- pr_err_ratelimited("request failed status 0x%0x\n", err);
- err = -EINVAL;
- }
- skcipher_request_complete(skreq, err);
-}
-
-static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
-{
- struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
- struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
- struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
- int ivsize = crypto_skcipher_ivsize(cipher);
- struct se_crypto_request *creq;
-
- creq = &nkreq->creq;
- creq->flags = skreq->base.flags;
- creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
-
- /* fill the request */
- creq->ctrl.value = 0;
- creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
- creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
- /* param0: length of the data to be encrypted */
- creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
- creq->gph.param1 = 0;
- /* param2: encryption data offset */
- creq->gph.param2 = cpu_to_be16(ivsize);
- creq->gph.param3 = 0;
-
- creq->ctx_handle = nctx->u.ctx_handle;
- creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
-
- /* copy the iv */
- memcpy(creq->iv, skreq->iv, ivsize);
- creq->ivsize = ivsize;
- creq->src = skreq->src;
- creq->dst = skreq->dst;
-
- nkreq->nctx = nctx;
- nkreq->skreq = skreq;
-
- /* send the crypto request */
- return nitrox_process_se_request(nctx->ndev, creq,
- nitrox_skcipher_callback, skreq);
-}
-
-static int nitrox_aes_encrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, true);
-}
-
-static int nitrox_aes_decrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, false);
-}
-
-static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return nitrox_skcipher_setkey(cipher, 0, key, keylen);
-}
-
-static int nitrox_3des_encrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, true);
-}
-
-static int nitrox_3des_decrypt(struct skcipher_request *skreq)
-{
- return nitrox_skcipher_crypt(skreq, false);
-}
-
-static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
- struct flexi_crypto_context *fctx;
- int aes_keylen, ret;
-
- ret = xts_check_key(tfm, key, keylen);
- if (ret)
- return ret;
-
- keylen /= 2;
-
- aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
+ nitrox_unregister_skciphers();
+ return err;
}
- fctx = nctx->u.fctx;
- /* copy KEY2 */
- memcpy(fctx->auth.u.key2, (key + keylen), keylen);
-
- return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
-}
-
-static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
- struct flexi_crypto_context *fctx;
- int aes_keylen;
-
- if (keylen < CTR_RFC3686_NONCE_SIZE)
- return -EINVAL;
-
- fctx = nctx->u.fctx;
-
- memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
- CTR_RFC3686_NONCE_SIZE);
-
- keylen -= CTR_RFC3686_NONCE_SIZE;
-
- aes_keylen = flexi_aes_keylen(keylen);
- if (aes_keylen < 0) {
- crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
-}
-
-static struct skcipher_alg nitrox_skciphers[] = { {
- .base = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "n5_cbc(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "n5_ecb(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "n5_cfb(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "xts(aes)",
- .cra_driver_name = "n5_xts(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = 2 * AES_MIN_KEY_SIZE,
- .max_keysize = 2 * AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_xts_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "rfc3686(ctr(aes))",
- .cra_driver_name = "n5_rfc3686(ctr(aes))",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
- .ivsize = CTR_RFC3686_IV_SIZE,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
- .setkey = nitrox_aes_ctr_rfc3686_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
-}, {
- .base = {
- .cra_name = "cts(cbc(aes))",
- .cra_driver_name = "n5_cts(cbc(aes))",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_type = &crypto_ablkcipher_type,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "cbc(des3_ede)",
- .cra_driver_name = "n5_cbc(des3_ede)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = nitrox_3des_setkey,
- .encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
- .cra_name = "ecb(des3_ede)",
- .cra_driver_name = "n5_ecb(des3_ede)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- .setkey = nitrox_3des_setkey,
- .encrypt = nitrox_3des_encrypt,
- .decrypt = nitrox_3des_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}
-
-};
-
-int nitrox_crypto_register(void)
-{
- return crypto_register_skciphers(nitrox_skciphers,
- ARRAY_SIZE(nitrox_skciphers));
+ return 0;
}
void nitrox_crypto_unregister(void)
{
- crypto_unregister_skciphers(nitrox_skciphers,
- ARRAY_SIZE(nitrox_skciphers));
+ nitrox_unregister_aeads();
+ nitrox_unregister_skciphers();
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 863143a8336b..e4be69d7e6e5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -7,6 +7,10 @@
int nitrox_crypto_register(void);
void nitrox_crypto_unregister(void);
+int nitrox_register_aeads(void);
+void nitrox_unregister_aeads(void);
+int nitrox_register_skciphers(void);
+void nitrox_unregister_skciphers(void);
void *crypto_alloc_context(struct nitrox_device *ndev);
void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
@@ -19,7 +23,7 @@ void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
- struct skcipher_request *skreq);
+ void *cb_arg);
void backlog_qflush_work(struct work_struct *work);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 1ad27b1a87c5..a2a452642b38 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -54,7 +54,13 @@
#define NPS_STATS_PKT_DMA_WR_CNT 0x1000190
/* NPS packet registers */
-#define NPS_PKT_INT 0x1040018
+#define NPS_PKT_INT 0x1040018
+#define NPS_PKT_MBOX_INT_LO 0x1040020
+#define NPS_PKT_MBOX_INT_LO_ENA_W1C 0x1040030
+#define NPS_PKT_MBOX_INT_LO_ENA_W1S 0x1040038
+#define NPS_PKT_MBOX_INT_HI 0x1040040
+#define NPS_PKT_MBOX_INT_HI_ENA_W1C 0x1040050
+#define NPS_PKT_MBOX_INT_HI_ENA_W1S 0x1040058
#define NPS_PKT_IN_RERR_HI 0x1040108
#define NPS_PKT_IN_RERR_HI_ENA_W1S 0x1040120
#define NPS_PKT_IN_RERR_LO 0x1040128
@@ -74,6 +80,10 @@
#define NPS_PKT_SLC_RERR_LO_ENA_W1S 0x1040240
#define NPS_PKT_SLC_ERR_TYPE 0x1040248
#define NPS_PKT_SLC_ERR_TYPE_ENA_W1S 0x1040260
+/* Mailbox PF->VF PF Accessible Data registers */
+#define NPS_PKT_MBOX_PF_VF_PFDATAX(_i) (0x1040800 + ((_i) * 0x8))
+#define NPS_PKT_MBOX_VF_PF_PFDATAX(_i) (0x1040C00 + ((_i) * 0x8))
+
#define NPS_PKT_SLC_CTLX(_i) (0x10000 + ((_i) * 0x40000))
#define NPS_PKT_SLC_CNTSX(_i) (0x10008 + ((_i) * 0x40000))
#define NPS_PKT_SLC_INT_LEVELSX(_i) (0x10010 + ((_i) * 0x40000))
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
index 5f3cd5fafe04..0196b992280f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -13,18 +13,7 @@ static int firmware_show(struct seq_file *s, void *v)
return 0;
}
-static int firmware_open(struct inode *inode, struct file *file)
-{
- return single_open(file, firmware_show, inode->i_private);
-}
-
-static const struct file_operations firmware_fops = {
- .owner = THIS_MODULE,
- .open = firmware_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(firmware);
static int device_show(struct seq_file *s, void *v)
{
@@ -41,18 +30,7 @@ static int device_show(struct seq_file *s, void *v)
return 0;
}
-static int nitrox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device);
static int stats_show(struct seq_file *s, void *v)
{
@@ -69,18 +47,7 @@ static int stats_show(struct seq_file *s, void *v)
return 0;
}
-static int nitrox_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stats_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_stats_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(stats);
void nitrox_debugfs_exit(struct nitrox_device *ndev)
{
@@ -97,13 +64,16 @@ int nitrox_debugfs_init(struct nitrox_device *ndev)
return -ENOMEM;
ndev->debugfs_dir = dir;
- f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ f = debugfs_create_file("firmware", 0400, dir, ndev,
+ &firmware_fops);
if (!f)
goto err;
- f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
+ f = debugfs_create_file("device", 0400, dir, ndev,
+ &device_fops);
if (!f)
goto err;
- f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
+ f = debugfs_create_file("stats", 0400, dir, ndev,
+ &stats_fops);
if (!f)
goto err;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.h b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
new file mode 100644
index 000000000000..a8d85ffa619c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __NITROX_DEBUGFS_H
+#define __NITROX_DEBUGFS_H
+
+#include "nitrox_dev.h"
+
+#ifdef CONFIG_DEBUG_FS
+int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_exit(struct nitrox_device *ndev);
+#else
+static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ return 0;
+}
+
+static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+}
+#endif /* !CONFIG_DEBUG_FS */
+
+#endif /* __NITROX_DEBUGFS_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index 283e252385fb..0338877b828f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -8,6 +8,8 @@
#include <linux/if.h>
#define VERSION_LEN 32
+/* Maximum queues in PF mode */
+#define MAX_PF_QUEUES 64
/**
* struct nitrox_cmdq - NITROX command queue
@@ -103,6 +105,61 @@ struct nitrox_q_vector {
};
};
+/**
+ * mbox_msg - Mailbox message data
+ * @type: message type
+ * @opcode: message opcode
+ * @data: message data
+ */
+union mbox_msg {
+ u64 value;
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 data: 58;
+ };
+ struct {
+ u64 type: 2;
+ u64 opcode: 6;
+ u64 chipid: 8;
+ u64 vfid: 8;
+ } id;
+};
+
+/**
+ * nitrox_vfdev - NITROX VF device instance in PF
+ * @state: VF device state
+ * @vfno: VF number
+ * @nr_queues: number of queues enabled in VF
+ * @ring: ring to communicate with VF
+ * @msg: Mailbox message data from VF
+ * @mbx_resp: Mailbox counters
+ */
+struct nitrox_vfdev {
+ atomic_t state;
+ int vfno;
+ int nr_queues;
+ int ring;
+ union mbox_msg msg;
+ atomic64_t mbx_resp;
+};
+
+/**
+ * struct nitrox_iov - SR-IOV information
+ * @num_vfs: number of VF(s) enabled
+ * @max_vf_queues: Maximum number of queues allowed for VF
+ * @vfdev: VF(s) devices
+ * @pf2vf_wq: workqueue for PF2VF communication
+ * @msix: MSI-X entry for PF in SR-IOV case
+ */
+struct nitrox_iov {
+ int num_vfs;
+ int max_vf_queues;
+ struct nitrox_vfdev *vfdev;
+ struct workqueue_struct *pf2vf_wq;
+ struct msix_entry msix;
+};
+
/*
* NITROX Device states
*/
@@ -150,6 +207,9 @@ enum vf_mode {
* @ctx_pool: DMA pool for crypto context
* @pkt_inq: Packet input rings
* @qvec: MSI-X queue vectors information
+ * @iov: SR-IOV informatin
+ * @num_vecs: number of MSI-X vectors
+ * @stats: request statistics
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
@@ -168,13 +228,13 @@ struct nitrox_device {
int node;
u16 qlen;
u16 nr_queues;
- int num_vfs;
enum vf_mode mode;
struct dma_pool *ctx_pool;
struct nitrox_cmdq *pkt_inq;
struct nitrox_q_vector *qvec;
+ struct nitrox_iov iov;
int num_vecs;
struct nitrox_stats stats;
@@ -213,17 +273,9 @@ static inline bool nitrox_ready(struct nitrox_device *ndev)
return atomic_read(&ndev->state) == __NDEV_READY;
}
-#ifdef CONFIG_DEBUG_FS
-int nitrox_debugfs_init(struct nitrox_device *ndev);
-void nitrox_debugfs_exit(struct nitrox_device *ndev);
-#else
-static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+static inline bool nitrox_vfdev_ready(struct nitrox_vfdev *vfdev)
{
- return 0;
+ return atomic_read(&vfdev->state) == __NDEV_READY;
}
-static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{ }
-#endif
-
#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index a9b82387cf53..c08d9f33a3b1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -5,10 +5,11 @@
#include "nitrox_csr.h"
#define PLL_REF_CLK 50
+#define MAX_CSR_RETRIES 10
/**
* emu_enable_cores - Enable EMU cluster cores.
- * @ndev: N5 device
+ * @ndev: NITROX device
*/
static void emu_enable_cores(struct nitrox_device *ndev)
{
@@ -33,7 +34,7 @@ static void emu_enable_cores(struct nitrox_device *ndev)
/**
* nitrox_config_emu_unit - configure EMU unit.
- * @ndev: N5 device
+ * @ndev: NITROX device
*/
void nitrox_config_emu_unit(struct nitrox_device *ndev)
{
@@ -63,29 +64,26 @@ void nitrox_config_emu_unit(struct nitrox_device *ndev)
static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
{
union nps_pkt_in_instr_ctl pkt_in_ctl;
- union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
union nps_pkt_in_done_cnts pkt_in_cnts;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
+ /* step 1: disable the ring, clear enable bit */
offset = NPS_PKT_IN_INSTR_CTLX(ring);
- /* disable the ring */
pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
pkt_in_ctl.s.enb = 0;
nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
- usleep_range(100, 150);
- /* wait to clear [ENB] */
+ /* step 2: wait to clear [ENB] */
+ usleep_range(100, 150);
do {
pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
- } while (pkt_in_ctl.s.enb);
-
- /* clear off door bell counts */
- offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(ring);
- pkt_in_dbell.value = 0;
- pkt_in_dbell.s.dbell = 0xffffffff;
- nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
+ if (!pkt_in_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
- /* clear done counts */
+ /* step 3: clear done counts */
offset = NPS_PKT_IN_DONE_CNTSX(ring);
pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
@@ -95,6 +93,7 @@ static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
{
union nps_pkt_in_instr_ctl pkt_in_ctl;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
/* 64-byte instruction size */
@@ -107,12 +106,15 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
/* wait for set [ENB] */
do {
pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
- } while (!pkt_in_ctl.s.enb);
+ if (pkt_in_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
}
/**
* nitrox_config_pkt_input_rings - configure Packet Input Rings
- * @ndev: N5 device
+ * @ndev: NITROX device
*/
void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
{
@@ -121,11 +123,14 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
union nps_pkt_in_instr_rsize pkt_in_rsize;
+ union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
u64 offset;
reset_pkt_input_ring(ndev, i);
- /* configure ring base address 16-byte aligned,
+ /**
+ * step 4:
+ * configure ring base address 16-byte aligned,
* size and interrupt threshold.
*/
offset = NPS_PKT_IN_INSTR_BADDRX(i);
@@ -141,6 +146,13 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
offset = NPS_PKT_IN_INT_LEVELSX(i);
nitrox_write_csr(ndev, offset, 0xffffffff);
+ /* step 5: clear off door bell counts */
+ offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
+ pkt_in_dbell.value = 0;
+ pkt_in_dbell.s.dbell = 0xffffffff;
+ nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
+
+ /* enable the ring */
enable_pkt_input_ring(ndev, i);
}
}
@@ -149,21 +161,26 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
{
union nps_pkt_slc_ctl pkt_slc_ctl;
union nps_pkt_slc_cnts pkt_slc_cnts;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
- /* disable slc port */
+ /* step 1: disable slc port */
offset = NPS_PKT_SLC_CTLX(port);
pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
pkt_slc_ctl.s.enb = 0;
nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
- usleep_range(100, 150);
+ /* step 2 */
+ usleep_range(100, 150);
/* wait to clear [ENB] */
do {
pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
- } while (pkt_slc_ctl.s.enb);
+ if (!pkt_slc_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
- /* clear slc counters */
+ /* step 3: clear slc counters */
offset = NPS_PKT_SLC_CNTSX(port);
pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
@@ -173,12 +190,12 @@ static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
{
union nps_pkt_slc_ctl pkt_slc_ctl;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
offset = NPS_PKT_SLC_CTLX(port);
pkt_slc_ctl.value = 0;
pkt_slc_ctl.s.enb = 1;
-
/*
* 8 trailing 0x00 bytes will be added
* to the end of the outgoing packet.
@@ -191,23 +208,27 @@ void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
/* wait to set [ENB] */
do {
pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
- } while (!pkt_slc_ctl.s.enb);
+ if (pkt_slc_ctl.s.enb)
+ break;
+ udelay(50);
+ } while (max_retries--);
}
-static void config_single_pkt_solicit_port(struct nitrox_device *ndev,
- int port)
+static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
{
union nps_pkt_slc_int_levels pkt_slc_int;
u64 offset;
reset_pkt_solicit_port(ndev, port);
+ /* step 4: configure interrupt levels */
offset = NPS_PKT_SLC_INT_LEVELSX(port);
pkt_slc_int.value = 0;
/* time interrupt threshold */
pkt_slc_int.s.timet = 0x3fffff;
nitrox_write_csr(ndev, offset, pkt_slc_int.value);
+ /* enable the solicit port */
enable_pkt_solicit_port(ndev, port);
}
@@ -216,12 +237,12 @@ void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
int i;
for (i = 0; i < ndev->nr_queues; i++)
- config_single_pkt_solicit_port(ndev, i);
+ config_pkt_solicit_port(ndev, i);
}
/**
* enable_nps_interrupts - enable NPS interrutps
- * @ndev: N5 device.
+ * @ndev: NITROX device.
*
* This includes NPS core, packet in and slc interrupts.
*/
@@ -284,8 +305,8 @@ void nitrox_config_pom_unit(struct nitrox_device *ndev)
}
/**
- * nitrox_config_rand_unit - enable N5 random number unit
- * @ndev: N5 device
+ * nitrox_config_rand_unit - enable NITROX random number unit
+ * @ndev: NITROX device
*/
void nitrox_config_rand_unit(struct nitrox_device *ndev)
{
@@ -361,6 +382,7 @@ void invalidate_lbc(struct nitrox_device *ndev)
{
union lbc_inval_ctl lbc_ctl;
union lbc_inval_status lbc_stat;
+ int max_retries = MAX_CSR_RETRIES;
u64 offset;
/* invalidate LBC */
@@ -370,10 +392,12 @@ void invalidate_lbc(struct nitrox_device *ndev)
nitrox_write_csr(ndev, offset, lbc_ctl.value);
offset = LBC_INVAL_STATUS;
-
do {
lbc_stat.value = nitrox_read_csr(ndev, offset);
- } while (!lbc_stat.s.done);
+ if (lbc_stat.s.done)
+ break;
+ udelay(50);
+ } while (max_retries--);
}
void nitrox_config_lbc_unit(struct nitrox_device *ndev)
@@ -467,3 +491,31 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
/* copy partname */
strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
}
+
+void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
+{
+ u64 value = ~0ULL;
+ u64 reg_addr;
+
+ /* Mailbox interrupt low enable set register */
+ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
+ nitrox_write_csr(ndev, reg_addr, value);
+
+ /* Mailbox interrupt high enable set register */
+ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
+ nitrox_write_csr(ndev, reg_addr, value);
+}
+
+void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
+{
+ u64 value = ~0ULL;
+ u64 reg_addr;
+
+ /* Mailbox interrupt low enable clear register */
+ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
+ nitrox_write_csr(ndev, reg_addr, value);
+
+ /* Mailbox interrupt high enable clear register */
+ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
+ nitrox_write_csr(ndev, reg_addr, value);
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
index 489ee64c119e..d6606418ba38 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -19,5 +19,7 @@ void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
void nitrox_get_hwinfo(struct nitrox_device *ndev);
+void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev);
+void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev);
#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index 88a77b8fb3fb..3dec570a190a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -7,12 +7,14 @@
#include "nitrox_csr.h"
#include "nitrox_common.h"
#include "nitrox_hal.h"
+#include "nitrox_mbx.h"
/**
* One vector for each type of ring
* - NPS packet ring, AQMQ ring and ZQMQ ring
*/
#define NR_RING_VECTORS 3
+#define NR_NON_RING_VECTORS 1
/* base entry for packet ring/port */
#define PKT_RING_MSIX_BASE 0
#define NON_RING_MSIX_BASE 192
@@ -219,7 +221,8 @@ static void nps_core_int_tasklet(unsigned long data)
*/
static irqreturn_t nps_core_int_isr(int irq, void *data)
{
- struct nitrox_device *ndev = data;
+ struct nitrox_q_vector *qvec = data;
+ struct nitrox_device *ndev = qvec->ndev;
union nps_core_int_active core_int;
core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
@@ -245,6 +248,10 @@ static irqreturn_t nps_core_int_isr(int irq, void *data)
if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
+ /* Mailbox interrupt */
+ if (core_int.s.mbox)
+ nitrox_pf2vf_mbox_handler(ndev);
+
/* If more work callback the ISR, set resend */
core_int.s.resend = 1;
nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
@@ -275,6 +282,7 @@ void nitrox_unregister_interrupts(struct nitrox_device *ndev)
qvec->valid = false;
}
kfree(ndev->qvec);
+ ndev->qvec = NULL;
pci_free_irq_vectors(pdev);
}
@@ -321,6 +329,7 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
if (qvec->ring >= ndev->nr_queues)
break;
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
/* get the vector number */
vec = pci_irq_vector(pdev, i);
@@ -335,13 +344,13 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
(unsigned long)qvec);
- qvec->cmdq = &ndev->pkt_inq[qvec->ring];
qvec->valid = true;
}
/* request irqs for non ring vectors */
i = NON_RING_MSIX_BASE;
qvec = &ndev->qvec[i];
+ qvec->ndev = ndev;
snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
/* get the vector number */
@@ -356,7 +365,6 @@ int nitrox_register_interrupts(struct nitrox_device *ndev)
tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
(unsigned long)qvec);
- qvec->ndev = ndev;
qvec->valid = true;
return 0;
@@ -365,3 +373,81 @@ irq_fail:
nitrox_unregister_interrupts(ndev);
return ret;
}
+
+void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ int i;
+
+ for (i = 0; i < ndev->num_vecs; i++) {
+ struct nitrox_q_vector *qvec;
+ int vec;
+
+ qvec = ndev->qvec + i;
+ if (!qvec->valid)
+ continue;
+
+ vec = ndev->iov.msix.vector;
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, qvec);
+
+ tasklet_disable(&qvec->resp_tasklet);
+ tasklet_kill(&qvec->resp_tasklet);
+ qvec->valid = false;
+ }
+ kfree(ndev->qvec);
+ ndev->qvec = NULL;
+ pci_disable_msix(pdev);
+}
+
+int nitrox_sriov_register_interupts(struct nitrox_device *ndev)
+{
+ struct pci_dev *pdev = ndev->pdev;
+ struct nitrox_q_vector *qvec;
+ int vec, cpu;
+ int ret;
+
+ /**
+ * only non ring vectors i.e Entry 192 is available
+ * for PF in SR-IOV mode.
+ */
+ ndev->iov.msix.entry = NON_RING_MSIX_BASE;
+ ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS);
+ if (ret) {
+ dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n",
+ NON_RING_MSIX_BASE);
+ return ret;
+ }
+
+ qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL);
+ if (!qvec) {
+ pci_disable_msix(pdev);
+ return -ENOMEM;
+ }
+ qvec->ndev = ndev;
+
+ ndev->qvec = qvec;
+ ndev->num_vecs = NR_NON_RING_VECTORS;
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d",
+ NON_RING_MSIX_BASE);
+
+ vec = ndev->iov.msix.vector;
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n",
+ NON_RING_MSIX_BASE);
+ goto iov_irq_fail;
+ }
+ cpu = num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
+
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
+ (unsigned long)qvec);
+ qvec->valid = true;
+
+ return 0;
+
+iov_irq_fail:
+ nitrox_sriov_unregister_interrupts(ndev);
+ return ret;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
index 63418a6cc52c..1062c9336c1f 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -6,5 +6,7 @@
int nitrox_register_interrupts(struct nitrox_device *ndev);
void nitrox_unregister_interrupts(struct nitrox_device *ndev);
+int nitrox_sriov_register_interupts(struct nitrox_device *ndev);
+void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev);
#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 2260efa42308..9138bae12521 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
void *crypto_alloc_context(struct nitrox_device *ndev)
{
struct ctx_hdr *ctx;
+ struct crypto_ctx_hdr *chdr;
void *vaddr;
dma_addr_t dma;
+ chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
+ if (!chdr)
+ return NULL;
+
vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
- if (!vaddr)
+ if (!vaddr) {
+ kfree(chdr);
return NULL;
+ }
/* fill meta data */
ctx = vaddr;
@@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
ctx->dma = dma;
ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
- return ((u8 *)vaddr + sizeof(struct ctx_hdr));
+ chdr->pool = ndev->ctx_pool;
+ chdr->dma = dma;
+ chdr->vaddr = vaddr;
+
+ return chdr;
}
/**
@@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
*/
void crypto_free_context(void *ctx)
{
- struct ctx_hdr *ctxp;
+ struct crypto_ctx_hdr *ctxp;
if (!ctx)
return;
- ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
- dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
+ ctxp = ctx;
+ dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
+ kfree(ctxp);
}
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index 6595c95af9f1..014e9863c20e 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -1,6 +1,5 @@
#include <linux/aer.h>
#include <linux/delay.h>
-#include <linux/debugfs.h>
#include <linux/firmware.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -13,9 +12,9 @@
#include "nitrox_csr.h"
#include "nitrox_hal.h"
#include "nitrox_isr.h"
+#include "nitrox_debugfs.h"
#define CNN55XX_DEV_ID 0x12
-#define MAX_PF_QUEUES 64
#define UCODE_HLEN 48
#define SE_GROUP 0
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
new file mode 100644
index 000000000000..02ee95064841
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/workqueue.h>
+
+#include "nitrox_csr.h"
+#include "nitrox_hal.h"
+#include "nitrox_dev.h"
+
+#define RING_TO_VFNO(_x, _y) ((_x) / (_y))
+
+/**
+ * mbx_msg_type - Mailbox message types
+ */
+enum mbx_msg_type {
+ MBX_MSG_TYPE_NOP,
+ MBX_MSG_TYPE_REQ,
+ MBX_MSG_TYPE_ACK,
+ MBX_MSG_TYPE_NACK,
+};
+
+/**
+ * mbx_msg_opcode - Mailbox message opcodes
+ */
+enum mbx_msg_opcode {
+ MSG_OP_VF_MODE = 1,
+ MSG_OP_VF_UP,
+ MSG_OP_VF_DOWN,
+ MSG_OP_CHIPID_VFID,
+};
+
+struct pf2vf_work {
+ struct nitrox_vfdev *vfdev;
+ struct nitrox_device *ndev;
+ struct work_struct pf2vf_resp;
+};
+
+static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring)
+{
+ u64 reg_addr;
+
+ reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
+ return nitrox_read_csr(ndev, reg_addr);
+}
+
+static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value,
+ int ring)
+{
+ u64 reg_addr;
+
+ reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
+ nitrox_write_csr(ndev, reg_addr, value);
+}
+
+static void pf2vf_send_response(struct nitrox_device *ndev,
+ struct nitrox_vfdev *vfdev)
+{
+ union mbox_msg msg;
+
+ msg.value = vfdev->msg.value;
+
+ switch (vfdev->msg.opcode) {
+ case MSG_OP_VF_MODE:
+ msg.data = ndev->mode;
+ break;
+ case MSG_OP_VF_UP:
+ vfdev->nr_queues = vfdev->msg.data;
+ atomic_set(&vfdev->state, __NDEV_READY);
+ break;
+ case MSG_OP_CHIPID_VFID:
+ msg.id.chipid = ndev->idx;
+ msg.id.vfid = vfdev->vfno;
+ break;
+ case MSG_OP_VF_DOWN:
+ vfdev->nr_queues = 0;
+ atomic_set(&vfdev->state, __NDEV_NOT_READY);
+ break;
+ default:
+ msg.type = MBX_MSG_TYPE_NOP;
+ break;
+ }
+
+ if (msg.type == MBX_MSG_TYPE_NOP)
+ return;
+
+ /* send ACK to VF */
+ msg.type = MBX_MSG_TYPE_ACK;
+ pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
+
+ vfdev->msg.value = 0;
+ atomic64_inc(&vfdev->mbx_resp);
+}
+
+static void pf2vf_resp_handler(struct work_struct *work)
+{
+ struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work,
+ pf2vf_resp);
+ struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev;
+ struct nitrox_device *ndev = pf2vf_resp->ndev;
+
+ switch (vfdev->msg.type) {
+ case MBX_MSG_TYPE_REQ:
+ /* process the request from VF */
+ pf2vf_send_response(ndev, vfdev);
+ break;
+ case MBX_MSG_TYPE_ACK:
+ case MBX_MSG_TYPE_NACK:
+ break;
+ };
+
+ kfree(pf2vf_resp);
+}
+
+void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev)
+{
+ struct nitrox_vfdev *vfdev;
+ struct pf2vf_work *pfwork;
+ u64 value, reg_addr;
+ u32 i;
+ int vfno;
+
+ /* loop for VF(0..63) */
+ reg_addr = NPS_PKT_MBOX_INT_LO;
+ value = nitrox_read_csr(ndev, reg_addr);
+ for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ /* get the vfno from ring */
+ vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues);
+ vfdev = ndev->iov.vfdev + vfno;
+ vfdev->ring = i;
+ /* fill the vf mailbox data */
+ vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
+ pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
+ if (!pfwork)
+ continue;
+
+ pfwork->vfdev = vfdev;
+ pfwork->ndev = ndev;
+ INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
+ queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
+ /* clear the corresponding vf bit */
+ nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
+ }
+
+ /* loop for VF(64..127) */
+ reg_addr = NPS_PKT_MBOX_INT_HI;
+ value = nitrox_read_csr(ndev, reg_addr);
+ for_each_set_bit(i, (const unsigned long *)&value, BITS_PER_LONG) {
+ /* get the vfno from ring */
+ vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues);
+ vfdev = ndev->iov.vfdev + vfno;
+ vfdev->ring = (i + 64);
+ /* fill the vf mailbox data */
+ vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
+
+ pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC);
+ if (!pfwork)
+ continue;
+
+ pfwork->vfdev = vfdev;
+ pfwork->ndev = ndev;
+ INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler);
+ queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp);
+ /* clear the corresponding vf bit */
+ nitrox_write_csr(ndev, reg_addr, BIT_ULL(i));
+ }
+}
+
+int nitrox_mbox_init(struct nitrox_device *ndev)
+{
+ struct nitrox_vfdev *vfdev;
+ int i;
+
+ ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs,
+ sizeof(struct nitrox_vfdev), GFP_KERNEL);
+ if (!ndev->iov.vfdev)
+ return -ENOMEM;
+
+ for (i = 0; i < ndev->iov.num_vfs; i++) {
+ vfdev = ndev->iov.vfdev + i;
+ vfdev->vfno = i;
+ }
+
+ /* allocate pf2vf response workqueue */
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ if (!ndev->iov.pf2vf_wq) {
+ kfree(ndev->iov.vfdev);
+ return -ENOMEM;
+ }
+ /* enable pf2vf mailbox interrupts */
+ enable_pf2vf_mbox_interrupts(ndev);
+
+ return 0;
+}
+
+void nitrox_mbox_cleanup(struct nitrox_device *ndev)
+{
+ /* disable pf2vf mailbox interrupts */
+ disable_pf2vf_mbox_interrupts(ndev);
+ /* destroy workqueue */
+ if (ndev->iov.pf2vf_wq)
+ destroy_workqueue(ndev->iov.pf2vf_wq);
+
+ kfree(ndev->iov.vfdev);
+ ndev->iov.pf2vf_wq = NULL;
+ ndev->iov.vfdev = NULL;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.h b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
new file mode 100644
index 000000000000..5008399775a9
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.h
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __NITROX_MBX_H
+#define __NITROX_MBX_H
+
+int nitrox_mbox_init(struct nitrox_device *ndev);
+void nitrox_mbox_cleanup(struct nitrox_device *ndev);
+void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev);
+
+#endif /* __NITROX_MBX_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index d091b6f5f5dd..76c0f0be7233 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -7,6 +7,9 @@
#include "nitrox_dev.h"
+#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
+#define PRIO 4001
+
/**
* struct gphdr - General purpose Header
* @param0: first parameter.
@@ -46,13 +49,6 @@ union se_req_ctrl {
} s;
};
-struct nitrox_sglist {
- u16 len;
- u16 raz0;
- u32 raz1;
- dma_addr_t dma;
-};
-
#define MAX_IV_LEN 16
/**
@@ -62,8 +58,10 @@ struct nitrox_sglist {
* @ctx_handle: Crypto context handle.
* @gph: GP Header
* @ctrl: Request Information.
- * @in: Input sglist
- * @out: Output sglist
+ * @orh: ORH address
+ * @comp: completion address
+ * @src: Input sglist
+ * @dst: Output sglist
*/
struct se_crypto_request {
u8 opcode;
@@ -73,9 +71,8 @@ struct se_crypto_request {
struct gphdr gph;
union se_req_ctrl ctrl;
-
- u8 iv[MAX_IV_LEN];
- u16 ivsize;
+ u64 *orh;
+ u64 *comp;
struct scatterlist *src;
struct scatterlist *dst;
@@ -110,6 +107,18 @@ enum flexi_cipher {
CIPHER_INVALID
};
+enum flexi_auth {
+ AUTH_NULL = 0,
+ AUTH_MD5,
+ AUTH_SHA1,
+ AUTH_SHA2_SHA224,
+ AUTH_SHA2_SHA256,
+ AUTH_SHA2_SHA384,
+ AUTH_SHA2_SHA512,
+ AUTH_GMAC,
+ AUTH_INVALID
+};
+
/**
* struct crypto_keys - Crypto keys
* @key: Encryption key or KEY1 for AES-XTS
@@ -136,6 +145,32 @@ struct auth_keys {
u8 opad[64];
};
+union fc_ctx_flags {
+ __be64 f;
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cipher_type : 4;
+ u64 reserved_59 : 1;
+ u64 aes_keylen : 2;
+ u64 iv_source : 1;
+ u64 hash_type : 4;
+ u64 reserved_49_51 : 3;
+ u64 auth_input_type: 1;
+ u64 mac_len : 8;
+ u64 reserved_0_39 : 40;
+#else
+ u64 reserved_0_39 : 40;
+ u64 mac_len : 8;
+ u64 auth_input_type: 1;
+ u64 reserved_49_51 : 3;
+ u64 hash_type : 4;
+ u64 iv_source : 1;
+ u64 aes_keylen : 2;
+ u64 reserved_59 : 1;
+ u64 cipher_type : 4;
+#endif
+ } w0;
+};
/**
* struct flexi_crypto_context - Crypto context
* @cipher_type: Encryption cipher type
@@ -150,49 +185,30 @@ struct auth_keys {
* @auth: Authentication keys
*/
struct flexi_crypto_context {
- union {
- __be64 flags;
- struct {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 cipher_type : 4;
- u64 reserved_59 : 1;
- u64 aes_keylen : 2;
- u64 iv_source : 1;
- u64 hash_type : 4;
- u64 reserved_49_51 : 3;
- u64 auth_input_type: 1;
- u64 mac_len : 8;
- u64 reserved_0_39 : 40;
-#else
- u64 reserved_0_39 : 40;
- u64 mac_len : 8;
- u64 auth_input_type: 1;
- u64 reserved_49_51 : 3;
- u64 hash_type : 4;
- u64 iv_source : 1;
- u64 aes_keylen : 2;
- u64 reserved_59 : 1;
- u64 cipher_type : 4;
-#endif
- } w0;
- };
-
+ union fc_ctx_flags flags;
struct crypto_keys crypto;
struct auth_keys auth;
};
+struct crypto_ctx_hdr {
+ struct dma_pool *pool;
+ dma_addr_t dma;
+ void *vaddr;
+};
+
struct nitrox_crypto_ctx {
struct nitrox_device *ndev;
union {
u64 ctx_handle;
struct flexi_crypto_context *fctx;
} u;
+ struct crypto_ctx_hdr *chdr;
};
struct nitrox_kcrypt_request {
struct se_crypto_request creq;
- struct nitrox_crypto_ctx *nctx;
- struct skcipher_request *skreq;
+ u8 *src;
+ u8 *dst;
};
/**
@@ -369,26 +385,19 @@ struct nitrox_sgcomp {
/*
* strutct nitrox_sgtable - SG list information
- * @map_cnt: Number of buffers mapped
- * @nr_comp: Number of sglist components
+ * @sgmap_cnt: Number of buffers mapped
* @total_bytes: Total bytes in sglist.
- * @len: Total sglist components length.
- * @dma: DMA address of sglist component.
- * @dir: DMA direction.
- * @buf: crypto request buffer.
- * @sglist: SG list of input/output buffers.
+ * @sgcomp_len: Total sglist components length.
+ * @sgcomp_dma: DMA address of sglist component.
+ * @sg: crypto request buffer.
* @sgcomp: sglist component for NITROX.
*/
struct nitrox_sgtable {
- u8 map_bufs_cnt;
- u8 nr_sgcomp;
+ u8 sgmap_cnt;
u16 total_bytes;
- u32 len;
- dma_addr_t dma;
- enum dma_data_direction dir;
-
- struct scatterlist *buf;
- struct nitrox_sglist *sglist;
+ u32 sgcomp_len;
+ dma_addr_t sgcomp_dma;
+ struct scatterlist *sg;
struct nitrox_sgcomp *sgcomp;
};
@@ -398,13 +407,11 @@ struct nitrox_sgtable {
#define COMP_HLEN 8
struct resp_hdr {
- u64 orh;
- dma_addr_t orh_dma;
- u64 completion;
- dma_addr_t completion_dma;
+ u64 *orh;
+ u64 *completion;
};
-typedef void (*completion_t)(struct skcipher_request *skreq, int err);
+typedef void (*completion_t)(void *arg, int err);
/**
* struct nitrox_softreq - Represents the NIROX Request.
@@ -427,7 +434,6 @@ struct nitrox_softreq {
u32 flags;
gfp_t gfp;
atomic_t status;
- bool inplace;
struct nitrox_device *ndev;
struct nitrox_cmdq *cmdq;
@@ -440,7 +446,201 @@ struct nitrox_softreq {
unsigned long tstamp;
completion_t callback;
- struct skcipher_request *skreq;
+ void *cb_arg;
};
+static inline int flexi_aes_keylen(int keylen)
+{
+ int aes_keylen;
+
+ switch (keylen) {
+ case AES_KEYSIZE_128:
+ aes_keylen = 1;
+ break;
+ case AES_KEYSIZE_192:
+ aes_keylen = 2;
+ break;
+ case AES_KEYSIZE_256:
+ aes_keylen = 3;
+ break;
+ default:
+ aes_keylen = -EINVAL;
+ break;
+ }
+ return aes_keylen;
+}
+
+static inline void *alloc_req_buf(int nents, int extralen, gfp_t gfp)
+{
+ size_t size;
+
+ size = sizeof(struct scatterlist) * nents;
+ size += extralen;
+
+ return kzalloc(size, gfp);
+}
+
+/**
+ * create_single_sg - Point SG entry to the data
+ * @sg: Destination SG list
+ * @buf: Data
+ * @buflen: Data length
+ *
+ * Returns next free entry in the destination SG list
+ **/
+static inline struct scatterlist *create_single_sg(struct scatterlist *sg,
+ void *buf, int buflen)
+{
+ sg_set_buf(sg, buf, buflen);
+ sg++;
+ return sg;
+}
+
+/**
+ * create_multi_sg - Create multiple sg entries with buflen data length from
+ * source sglist
+ * @to_sg: Destination SG list
+ * @from_sg: Source SG list
+ * @buflen: Data length
+ *
+ * Returns next free entry in the destination SG list
+ **/
+static inline struct scatterlist *create_multi_sg(struct scatterlist *to_sg,
+ struct scatterlist *from_sg,
+ int buflen)
+{
+ struct scatterlist *sg = to_sg;
+ unsigned int sglen;
+
+ for (; buflen; buflen -= sglen) {
+ sglen = from_sg->length;
+ if (sglen > buflen)
+ sglen = buflen;
+
+ sg_set_buf(sg, sg_virt(from_sg), sglen);
+ from_sg = sg_next(from_sg);
+ sg++;
+ }
+
+ return sg;
+}
+
+static inline void set_orh_value(u64 *orh)
+{
+ WRITE_ONCE(*orh, PENDING_SIG);
+}
+
+static inline void set_comp_value(u64 *comp)
+{
+ WRITE_ONCE(*comp, PENDING_SIG);
+}
+
+static inline int alloc_src_req_buf(struct nitrox_kcrypt_request *nkreq,
+ int nents, int ivsize)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ nkreq->src = alloc_req_buf(nents, ivsize, creq->gfp);
+ if (!nkreq->src)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void nitrox_creq_copy_iv(char *dst, char *src, int size)
+{
+ memcpy(dst, src, size);
+}
+
+static inline struct scatterlist *nitrox_creq_src_sg(char *iv, int ivsize)
+{
+ return (struct scatterlist *)(iv + ivsize);
+}
+
+static inline void nitrox_creq_set_src_sg(struct nitrox_kcrypt_request *nkreq,
+ int nents, int ivsize,
+ struct scatterlist *src, int buflen)
+{
+ char *iv = nkreq->src;
+ struct scatterlist *sg;
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ creq->src = nitrox_creq_src_sg(iv, ivsize);
+ sg = creq->src;
+ sg_init_table(sg, nents);
+
+ /* Input format:
+ * +----+----------------+
+ * | IV | SRC sg entries |
+ * +----+----------------+
+ */
+
+ /* IV */
+ sg = create_single_sg(sg, iv, ivsize);
+ /* SRC entries */
+ create_multi_sg(sg, src, buflen);
+}
+
+static inline int alloc_dst_req_buf(struct nitrox_kcrypt_request *nkreq,
+ int nents)
+{
+ int extralen = ORH_HLEN + COMP_HLEN;
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ nkreq->dst = alloc_req_buf(nents, extralen, creq->gfp);
+ if (!nkreq->dst)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static inline void nitrox_creq_set_orh(struct nitrox_kcrypt_request *nkreq)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ creq->orh = (u64 *)(nkreq->dst);
+ set_orh_value(creq->orh);
+}
+
+static inline void nitrox_creq_set_comp(struct nitrox_kcrypt_request *nkreq)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+
+ creq->comp = (u64 *)(nkreq->dst + ORH_HLEN);
+ set_comp_value(creq->comp);
+}
+
+static inline struct scatterlist *nitrox_creq_dst_sg(char *dst)
+{
+ return (struct scatterlist *)(dst + ORH_HLEN + COMP_HLEN);
+}
+
+static inline void nitrox_creq_set_dst_sg(struct nitrox_kcrypt_request *nkreq,
+ int nents, int ivsize,
+ struct scatterlist *dst, int buflen)
+{
+ struct se_crypto_request *creq = &nkreq->creq;
+ struct scatterlist *sg;
+ char *iv = nkreq->src;
+
+ creq->dst = nitrox_creq_dst_sg(nkreq->dst);
+ sg = creq->dst;
+ sg_init_table(sg, nents);
+
+ /* Output format:
+ * +-----+----+----------------+-----------------+
+ * | ORH | IV | DST sg entries | COMPLETION Bytes|
+ * +-----+----+----------------+-----------------+
+ */
+
+ /* ORH */
+ sg = create_single_sg(sg, creq->orh, ORH_HLEN);
+ /* IV */
+ sg = create_single_sg(sg, iv, ivsize);
+ /* DST entries */
+ sg = create_multi_sg(sg, dst, buflen);
+ /* COMPLETION Bytes */
+ create_single_sg(sg, creq->comp, COMP_HLEN);
+}
+
#endif /* __NITROX_REQ_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 3987cd84c033..e34e4df8fd24 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -13,7 +13,6 @@
#define FDATA_SIZE 32
/* Base destination port for the solicited requests */
#define SOLICIT_BASE_DPORT 256
-#define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
#define REQ_NOT_POSTED 1
#define REQ_BACKLOG 2
@@ -52,58 +51,26 @@ static inline int incr_index(int index, int count, int max)
return index;
}
-/**
- * dma_free_sglist - unmap and free the sg lists.
- * @ndev: N5 device
- * @sgtbl: SG table
- */
static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
{
struct nitrox_device *ndev = sr->ndev;
struct device *dev = DEV(ndev);
- struct nitrox_sglist *sglist;
-
- /* unmap in sgbuf */
- sglist = sr->in.sglist;
- if (!sglist)
- goto out_unmap;
-
- /* unmap iv */
- dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
- /* unmpa src sglist */
- dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
- /* unamp gather component */
- dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
- kfree(sr->in.sglist);
+
+
+ dma_unmap_sg(dev, sr->in.sg, sr->in.sgmap_cnt, DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len,
+ DMA_TO_DEVICE);
kfree(sr->in.sgcomp);
- sr->in.sglist = NULL;
- sr->in.buf = NULL;
- sr->in.map_bufs_cnt = 0;
-
-out_unmap:
- /* unmap out sgbuf */
- sglist = sr->out.sglist;
- if (!sglist)
- return;
-
- /* unmap orh */
- dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
-
- /* unmap dst sglist */
- if (!sr->inplace) {
- dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
- sr->out.dir);
- }
- /* unmap completion */
- dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
+ sr->in.sg = NULL;
+ sr->in.sgmap_cnt = 0;
- /* unmap scatter component */
- dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
- kfree(sr->out.sglist);
+ dma_unmap_sg(dev, sr->out.sg, sr->out.sgmap_cnt,
+ DMA_BIDIRECTIONAL);
+ dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len,
+ DMA_TO_DEVICE);
kfree(sr->out.sgcomp);
- sr->out.sglist = NULL;
- sr->out.buf = NULL;
- sr->out.map_bufs_cnt = 0;
+ sr->out.sg = NULL;
+ sr->out.sgmap_cnt = 0;
}
static void softreq_destroy(struct nitrox_softreq *sr)
@@ -116,7 +83,7 @@ static void softreq_destroy(struct nitrox_softreq *sr)
* create_sg_component - create SG componets for N5 device.
* @sr: Request structure
* @sgtbl: SG table
- * @nr_comp: total number of components required
+ * @map_nents: number of dma mapped entries
*
* Component structure
*
@@ -140,7 +107,7 @@ static int create_sg_component(struct nitrox_softreq *sr,
{
struct nitrox_device *ndev = sr->ndev;
struct nitrox_sgcomp *sgcomp;
- struct nitrox_sglist *sglist;
+ struct scatterlist *sg;
dma_addr_t dma;
size_t sz_comp;
int i, j, nr_sgcomp;
@@ -154,17 +121,15 @@ static int create_sg_component(struct nitrox_softreq *sr,
return -ENOMEM;
sgtbl->sgcomp = sgcomp;
- sgtbl->nr_sgcomp = nr_sgcomp;
- sglist = sgtbl->sglist;
+ sg = sgtbl->sg;
/* populate device sg component */
for (i = 0; i < nr_sgcomp; i++) {
- for (j = 0; j < 4; j++) {
- sgcomp->len[j] = cpu_to_be16(sglist->len);
- sgcomp->dma[j] = cpu_to_be64(sglist->dma);
- sglist++;
+ for (j = 0; j < 4 && sg; j++) {
+ sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg));
+ sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg));
+ sg = sg_next(sg);
}
- sgcomp++;
}
/* map the device sg component */
dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
@@ -174,8 +139,8 @@ static int create_sg_component(struct nitrox_softreq *sr,
return -ENOMEM;
}
- sgtbl->dma = dma;
- sgtbl->len = sz_comp;
+ sgtbl->sgcomp_dma = dma;
+ sgtbl->sgcomp_len = sz_comp;
return 0;
}
@@ -193,66 +158,27 @@ static int dma_map_inbufs(struct nitrox_softreq *sr,
{
struct device *dev = DEV(sr->ndev);
struct scatterlist *sg = req->src;
- struct nitrox_sglist *glist;
int i, nents, ret = 0;
- dma_addr_t dma;
- size_t sz;
- nents = sg_nents(req->src);
+ nents = dma_map_sg(dev, req->src, sg_nents(req->src),
+ DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
- /* creater gather list IV and src entries */
- sz = roundup((1 + nents), 4) * sizeof(*glist);
- glist = kzalloc(sz, sr->gfp);
- if (!glist)
- return -ENOMEM;
+ for_each_sg(req->src, sg, nents, i)
+ sr->in.total_bytes += sg_dma_len(sg);
- sr->in.sglist = glist;
- /* map IV */
- dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, dma)) {
- ret = -EINVAL;
- goto iv_map_err;
- }
-
- sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
- /* map src entries */
- nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
- if (!nents) {
- ret = -EINVAL;
- goto src_map_err;
- }
- sr->in.buf = req->src;
-
- /* store the mappings */
- glist->len = req->ivsize;
- glist->dma = dma;
- glist++;
- sr->in.total_bytes += req->ivsize;
-
- for_each_sg(req->src, sg, nents, i) {
- glist->len = sg_dma_len(sg);
- glist->dma = sg_dma_address(sg);
- sr->in.total_bytes += glist->len;
- glist++;
- }
- /* roundup map count to align with entires in sg component */
- sr->in.map_bufs_cnt = (1 + nents);
-
- /* create NITROX gather component */
- ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
+ sr->in.sg = req->src;
+ sr->in.sgmap_cnt = nents;
+ ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt);
if (ret)
goto incomp_err;
return 0;
incomp_err:
- dma_unmap_sg(dev, req->src, nents, sr->in.dir);
- sr->in.map_bufs_cnt = 0;
-src_map_err:
- dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
-iv_map_err:
- kfree(sr->in.sglist);
- sr->in.sglist = NULL;
+ dma_unmap_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
+ sr->in.sgmap_cnt = 0;
return ret;
}
@@ -260,104 +186,25 @@ static int dma_map_outbufs(struct nitrox_softreq *sr,
struct se_crypto_request *req)
{
struct device *dev = DEV(sr->ndev);
- struct nitrox_sglist *glist = sr->in.sglist;
- struct nitrox_sglist *slist;
- struct scatterlist *sg;
- int i, nents, map_bufs_cnt, ret = 0;
- size_t sz;
-
- nents = sg_nents(req->dst);
-
- /* create scatter list ORH, IV, dst entries and Completion header */
- sz = roundup((3 + nents), 4) * sizeof(*slist);
- slist = kzalloc(sz, sr->gfp);
- if (!slist)
- return -ENOMEM;
-
- sr->out.sglist = slist;
- sr->out.dir = DMA_BIDIRECTIONAL;
- /* map ORH */
- sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
- sr->out.dir);
- if (dma_mapping_error(dev, sr->resp.orh_dma)) {
- ret = -EINVAL;
- goto orh_map_err;
- }
+ int nents, ret = 0;
- /* map completion */
- sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
- COMP_HLEN, sr->out.dir);
- if (dma_mapping_error(dev, sr->resp.completion_dma)) {
- ret = -EINVAL;
- goto compl_map_err;
- }
+ nents = dma_map_sg(dev, req->dst, sg_nents(req->dst),
+ DMA_BIDIRECTIONAL);
+ if (!nents)
+ return -EINVAL;
- sr->inplace = (req->src == req->dst) ? true : false;
- /* out place */
- if (!sr->inplace) {
- nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
- if (!nents) {
- ret = -EINVAL;
- goto dst_map_err;
- }
- }
- sr->out.buf = req->dst;
-
- /* store the mappings */
- /* orh */
- slist->len = ORH_HLEN;
- slist->dma = sr->resp.orh_dma;
- slist++;
-
- /* copy the glist mappings */
- if (sr->inplace) {
- nents = sr->in.map_bufs_cnt - 1;
- map_bufs_cnt = sr->in.map_bufs_cnt;
- while (map_bufs_cnt--) {
- slist->len = glist->len;
- slist->dma = glist->dma;
- slist++;
- glist++;
- }
- } else {
- /* copy iv mapping */
- slist->len = glist->len;
- slist->dma = glist->dma;
- slist++;
- /* copy remaining maps */
- for_each_sg(req->dst, sg, nents, i) {
- slist->len = sg_dma_len(sg);
- slist->dma = sg_dma_address(sg);
- slist++;
- }
- }
-
- /* completion */
- slist->len = COMP_HLEN;
- slist->dma = sr->resp.completion_dma;
-
- sr->out.map_bufs_cnt = (3 + nents);
-
- ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
+ sr->out.sg = req->dst;
+ sr->out.sgmap_cnt = nents;
+ ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt);
if (ret)
goto outcomp_map_err;
return 0;
outcomp_map_err:
- if (!sr->inplace)
- dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
- sr->out.map_bufs_cnt = 0;
- sr->out.buf = NULL;
-dst_map_err:
- dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
- sr->resp.completion_dma = 0;
-compl_map_err:
- dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
- sr->resp.orh_dma = 0;
-orh_map_err:
- kfree(sr->out.sglist);
- sr->out.sglist = NULL;
+ dma_unmap_sg(dev, req->dst, nents, DMA_BIDIRECTIONAL);
+ sr->out.sgmap_cnt = 0;
+ sr->out.sg = NULL;
return ret;
}
@@ -422,6 +269,8 @@ static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
smp_mb__after_atomic();
return true;
}
+ /* sync with other cpus */
+ smp_mb__after_atomic();
return false;
}
@@ -477,8 +326,6 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
- struct skcipher_request *skreq;
-
/* submit until space available */
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
ret = -ENOSPC;
@@ -490,12 +337,8 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* sync with other cpus */
smp_mb__after_atomic();
- skreq = sr->skreq;
/* post the command */
post_se_instr(sr, cmdq);
-
- /* backlog requests are posted, wakeup with -EINPROGRESS */
- skcipher_request_complete(skreq, -EINPROGRESS);
}
spin_unlock_bh(&cmdq->backlog_qlock);
@@ -518,7 +361,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
}
/* add to backlog list */
backlog_list_add(sr, cmdq);
- return -EBUSY;
+ return -EINPROGRESS;
}
post_se_instr(sr, cmdq);
@@ -535,7 +378,7 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t callback,
- struct skcipher_request *skreq)
+ void *cb_arg)
{
struct nitrox_softreq *sr;
dma_addr_t ctx_handle = 0;
@@ -552,12 +395,12 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
sr->flags = req->flags;
sr->gfp = req->gfp;
sr->callback = callback;
- sr->skreq = skreq;
+ sr->cb_arg = cb_arg;
atomic_set(&sr->status, REQ_NOT_POSTED);
- WRITE_ONCE(sr->resp.orh, PENDING_SIG);
- WRITE_ONCE(sr->resp.completion, PENDING_SIG);
+ sr->resp.orh = req->orh;
+ sr->resp.completion = req->comp;
ret = softreq_map_iobuf(sr, req);
if (ret) {
@@ -598,13 +441,13 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* fill the packet instruction */
/* word 0 */
- sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
+ sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma);
/* word 1 */
sr->instr.ih.value = 0;
sr->instr.ih.s.g = 1;
- sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
- sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.ih.s.gsz = sr->in.sgmap_cnt;
+ sr->instr.ih.s.ssz = sr->out.sgmap_cnt;
sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
@@ -626,11 +469,11 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* word 4 */
sr->instr.slc.value[0] = 0;
- sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
+ sr->instr.slc.s.ssz = sr->out.sgmap_cnt;
sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
/* word 5 */
- sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
+ sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma);
/*
* No conversion for front data,
@@ -664,6 +507,24 @@ void backlog_qflush_work(struct work_struct *work)
post_backlog_cmds(cmdq);
}
+static bool sr_completed(struct nitrox_softreq *sr)
+{
+ u64 orh = READ_ONCE(*sr->resp.orh);
+ unsigned long timeout = jiffies + msecs_to_jiffies(1);
+
+ if ((orh != PENDING_SIG) && (orh & 0xff))
+ return true;
+
+ while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("comp not done\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
/**
* process_request_list - process completed requests
* @ndev: N5 device
@@ -675,8 +536,6 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
struct nitrox_softreq *sr;
- struct skcipher_request *skreq;
- completion_t callback;
int req_completed = 0, err = 0, budget;
/* check all pending requests */
@@ -691,13 +550,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
break;
/* check orh and completion bytes updates */
- if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
+ if (!sr_completed(sr)) {
/* request not completed, check for timeout */
if (!cmd_timeout(sr->tstamp, ndev->timeout))
break;
dev_err_ratelimited(DEV(ndev),
"Request timeout, orh 0x%016llx\n",
- READ_ONCE(sr->resp.orh));
+ READ_ONCE(*sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
atomic64_inc(&ndev->stats.completed);
@@ -706,15 +565,12 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
/* remove from response list */
response_list_del(sr, cmdq);
- callback = sr->callback;
- skreq = sr->skreq;
-
/* ORH error code */
- err = READ_ONCE(sr->resp.orh) & 0xff;
+ err = READ_ONCE(*sr->resp.orh) & 0xff;
softreq_destroy(sr);
- if (callback)
- callback(skreq, err);
+ if (sr->callback)
+ sr->callback(sr->cb_arg, err);
req_completed++;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
new file mode 100644
index 000000000000..d4935d6cefdd
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/xts.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_common.h"
+#include "nitrox_req.h"
+
+struct nitrox_cipher {
+ const char *name;
+ enum flexi_cipher value;
+};
+
+/**
+ * supported cipher list
+ */
+static const struct nitrox_cipher flexi_cipher_table[] = {
+ { "null", CIPHER_NULL },
+ { "cbc(des3_ede)", CIPHER_3DES_CBC },
+ { "ecb(des3_ede)", CIPHER_3DES_ECB },
+ { "cbc(aes)", CIPHER_AES_CBC },
+ { "ecb(aes)", CIPHER_AES_ECB },
+ { "cfb(aes)", CIPHER_AES_CFB },
+ { "rfc3686(ctr(aes))", CIPHER_AES_CTR },
+ { "xts(aes)", CIPHER_AES_XTS },
+ { "cts(cbc(aes))", CIPHER_AES_CBC_CTS },
+ { NULL, CIPHER_INVALID }
+};
+
+static enum flexi_cipher flexi_cipher_type(const char *name)
+{
+ const struct nitrox_cipher *cipher = flexi_cipher_table;
+
+ while (cipher->name) {
+ if (!strcmp(cipher->name, name))
+ break;
+ cipher++;
+ }
+ return cipher->value;
+}
+
+static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+ struct crypto_ctx_hdr *chdr;
+
+ /* get the first device */
+ nctx->ndev = nitrox_get_first_device();
+ if (!nctx->ndev)
+ return -ENODEV;
+
+ /* allocate nitrox crypto context */
+ chdr = crypto_alloc_context(nctx->ndev);
+ if (!chdr) {
+ nitrox_put_device(nctx->ndev);
+ return -ENOMEM;
+ }
+ nctx->chdr = chdr;
+ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+ sizeof(struct ctx_hdr));
+ crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
+ sizeof(struct nitrox_kcrypt_request));
+ return 0;
+}
+
+static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
+{
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
+
+ /* free the nitrox crypto context */
+ if (nctx->u.ctx_handle) {
+ struct flexi_crypto_context *fctx = nctx->u.fctx;
+
+ memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys));
+ memzero_explicit(&fctx->auth, sizeof(struct auth_keys));
+ crypto_free_context((void *)nctx->chdr);
+ }
+ nitrox_put_device(nctx->ndev);
+
+ nctx->u.ctx_handle = 0;
+ nctx->ndev = NULL;
+}
+
+static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher,
+ int aes_keylen, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct flexi_crypto_context *fctx;
+ union fc_ctx_flags *flags;
+ enum flexi_cipher cipher_type;
+ const char *name;
+
+ name = crypto_tfm_alg_name(tfm);
+ cipher_type = flexi_cipher_type(name);
+ if (unlikely(cipher_type == CIPHER_INVALID)) {
+ pr_err("unsupported cipher: %s\n", name);
+ return -EINVAL;
+ }
+
+ /* fill crypto context */
+ fctx = nctx->u.fctx;
+ flags = &fctx->flags;
+ flags->f = 0;
+ flags->w0.cipher_type = cipher_type;
+ flags->w0.aes_keylen = aes_keylen;
+ flags->w0.iv_source = IV_FROM_DPTR;
+ flags->f = cpu_to_be64(*(u64 *)&flags->w0);
+ /* copy the key to context */
+ memcpy(fctx->crypto.u.key, key, keylen);
+
+ return 0;
+}
+
+static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ int aes_keylen;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ int nents = sg_nents(skreq->src) + 1;
+ int ret;
+
+ /* Allocate buffer to hold IV and input scatterlist array */
+ ret = alloc_src_req_buf(nkreq, nents, ivsize);
+ if (ret)
+ return ret;
+
+ nitrox_creq_copy_iv(nkreq->src, skreq->iv, ivsize);
+ nitrox_creq_set_src_sg(nkreq, nents, ivsize, skreq->src,
+ skreq->cryptlen);
+
+ return 0;
+}
+
+static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ int nents = sg_nents(skreq->dst) + 3;
+ int ret;
+
+ /* Allocate buffer to hold ORH, COMPLETION and output scatterlist
+ * array
+ */
+ ret = alloc_dst_req_buf(nkreq, nents);
+ if (ret)
+ return ret;
+
+ nitrox_creq_set_orh(nkreq);
+ nitrox_creq_set_comp(nkreq);
+ nitrox_creq_set_dst_sg(nkreq, nents, ivsize, skreq->dst,
+ skreq->cryptlen);
+
+ return 0;
+}
+
+static void free_src_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->src);
+}
+
+static void free_dst_sglist(struct skcipher_request *skreq)
+{
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+
+ kfree(nkreq->dst);
+}
+
+static void nitrox_skcipher_callback(void *arg, int err)
+{
+ struct skcipher_request *skreq = arg;
+
+ free_src_sglist(skreq);
+ free_dst_sglist(skreq);
+ if (err) {
+ pr_err_ratelimited("request failed status 0x%0x\n", err);
+ err = -EINVAL;
+ }
+
+ skcipher_request_complete(skreq, err);
+}
+
+static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc)
+{
+ struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq);
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
+ struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq);
+ int ivsize = crypto_skcipher_ivsize(cipher);
+ struct se_crypto_request *creq;
+ int ret;
+
+ creq = &nkreq->creq;
+ creq->flags = skreq->base.flags;
+ creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+
+ /* fill the request */
+ creq->ctrl.value = 0;
+ creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC;
+ creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT);
+ /* param0: length of the data to be encrypted */
+ creq->gph.param0 = cpu_to_be16(skreq->cryptlen);
+ creq->gph.param1 = 0;
+ /* param2: encryption data offset */
+ creq->gph.param2 = cpu_to_be16(ivsize);
+ creq->gph.param3 = 0;
+
+ creq->ctx_handle = nctx->u.ctx_handle;
+ creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context);
+
+ ret = alloc_src_sglist(skreq, ivsize);
+ if (ret)
+ return ret;
+
+ ret = alloc_dst_sglist(skreq, ivsize);
+ if (ret) {
+ free_src_sglist(skreq);
+ return ret;
+ }
+
+ /* send the crypto request */
+ return nitrox_process_se_request(nctx->ndev, creq,
+ nitrox_skcipher_callback, skreq);
+}
+
+static int nitrox_aes_encrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, true);
+}
+
+static int nitrox_aes_decrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, false);
+}
+
+static int nitrox_3des_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ if (keylen != DES3_EDE_KEY_SIZE) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ return nitrox_skcipher_setkey(cipher, 0, key, keylen);
+}
+
+static int nitrox_3des_encrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, true);
+}
+
+static int nitrox_3des_decrypt(struct skcipher_request *skreq)
+{
+ return nitrox_skcipher_crypt(skreq, false);
+}
+
+static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct flexi_crypto_context *fctx;
+ int aes_keylen, ret;
+
+ ret = xts_check_key(tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ keylen /= 2;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ fctx = nctx->u.fctx;
+ /* copy KEY2 */
+ memcpy(fctx->auth.u.key2, (key + keylen), keylen);
+
+ return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
+ struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct flexi_crypto_context *fctx;
+ int aes_keylen;
+
+ if (keylen < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+ fctx = nctx->u.fctx;
+
+ memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE),
+ CTR_RFC3686_NONCE_SIZE);
+
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+
+ aes_keylen = flexi_aes_keylen(keylen);
+ if (aes_keylen < 0) {
+ crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+ return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen);
+}
+
+static struct skcipher_alg nitrox_skciphers[] = { {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "n5_cbc(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "n5_ecb(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "n5_cfb(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "n5_xts(aes)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_xts_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "n5_rfc3686(ctr(aes))",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+ .setkey = nitrox_aes_ctr_rfc3686_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+}, {
+ .base = {
+ .cra_name = "cts(cbc(aes))",
+ .cra_driver_name = "n5_cts(cbc(aes))",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = nitrox_aes_setkey,
+ .encrypt = nitrox_aes_encrypt,
+ .decrypt = nitrox_aes_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "n5_cbc(des3_ede)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = nitrox_3des_setkey,
+ .encrypt = nitrox_3des_encrypt,
+ .decrypt = nitrox_3des_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}, {
+ .base = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "n5_ecb(des3_ede)",
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = nitrox_3des_setkey,
+ .encrypt = nitrox_3des_encrypt,
+ .decrypt = nitrox_3des_decrypt,
+ .init = nitrox_skcipher_init,
+ .exit = nitrox_skcipher_exit,
+}
+
+};
+
+int nitrox_register_skciphers(void)
+{
+ return crypto_register_skciphers(nitrox_skciphers,
+ ARRAY_SIZE(nitrox_skciphers));
+}
+
+void nitrox_unregister_skciphers(void)
+{
+ crypto_unregister_skciphers(nitrox_skciphers,
+ ARRAY_SIZE(nitrox_skciphers));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
index 30c0aa874583..bf439d8256ba 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_sriov.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -6,7 +6,12 @@
#include "nitrox_hal.h"
#include "nitrox_common.h"
#include "nitrox_isr.h"
+#include "nitrox_mbx.h"
+/**
+ * num_vfs_valid - validate VF count
+ * @num_vfs: number of VF(s)
+ */
static inline bool num_vfs_valid(int num_vfs)
{
bool valid = false;
@@ -48,7 +53,32 @@ static inline enum vf_mode num_vfs_to_mode(int num_vfs)
return mode;
}
-static void pf_sriov_cleanup(struct nitrox_device *ndev)
+static inline int vf_mode_to_nr_queues(enum vf_mode mode)
+{
+ int nr_queues = 0;
+
+ switch (mode) {
+ case __NDEV_MODE_PF:
+ nr_queues = MAX_PF_QUEUES;
+ break;
+ case __NDEV_MODE_VF16:
+ nr_queues = 8;
+ break;
+ case __NDEV_MODE_VF32:
+ nr_queues = 4;
+ break;
+ case __NDEV_MODE_VF64:
+ nr_queues = 2;
+ break;
+ case __NDEV_MODE_VF128:
+ nr_queues = 1;
+ break;
+ }
+
+ return nr_queues;
+}
+
+static void nitrox_pf_cleanup(struct nitrox_device *ndev)
{
/* PF has no queues in SR-IOV mode */
atomic_set(&ndev->state, __NDEV_NOT_READY);
@@ -60,7 +90,11 @@ static void pf_sriov_cleanup(struct nitrox_device *ndev)
nitrox_common_sw_cleanup(ndev);
}
-static int pf_sriov_init(struct nitrox_device *ndev)
+/**
+ * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled
+ * @ndev: NITROX device
+ */
+static int nitrox_pf_reinit(struct nitrox_device *ndev)
{
int err;
@@ -86,6 +120,33 @@ static int pf_sriov_init(struct nitrox_device *ndev)
return nitrox_crypto_register();
}
+static void nitrox_sriov_cleanup(struct nitrox_device *ndev)
+{
+ /* unregister interrupts for PF in SR-IOV */
+ nitrox_sriov_unregister_interrupts(ndev);
+ nitrox_mbox_cleanup(ndev);
+}
+
+static int nitrox_sriov_init(struct nitrox_device *ndev)
+{
+ int ret;
+
+ /* register interrupts for PF in SR-IOV */
+ ret = nitrox_sriov_register_interupts(ndev);
+ if (ret)
+ return ret;
+
+ ret = nitrox_mbox_init(ndev);
+ if (ret)
+ goto sriov_init_fail;
+
+ return 0;
+
+sriov_init_fail:
+ nitrox_sriov_cleanup(ndev);
+ return ret;
+}
+
static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct nitrox_device *ndev = pci_get_drvdata(pdev);
@@ -106,17 +167,32 @@ static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
}
dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
- ndev->num_vfs = num_vfs;
ndev->mode = num_vfs_to_mode(num_vfs);
+ ndev->iov.num_vfs = num_vfs;
+ ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode);
/* set bit in flags */
set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
/* cleanup PF resources */
- pf_sriov_cleanup(ndev);
+ nitrox_pf_cleanup(ndev);
- config_nps_core_vfcfg_mode(ndev, ndev->mode);
+ /* PF SR-IOV mode initialization */
+ err = nitrox_sriov_init(ndev);
+ if (err)
+ goto iov_fail;
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
return num_vfs;
+
+iov_fail:
+ pci_disable_sriov(pdev);
+ /* clear bit in flags */
+ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+ ndev->iov.num_vfs = 0;
+ ndev->mode = __NDEV_MODE_PF;
+ /* reset back to working mode in PF */
+ nitrox_pf_reinit(ndev);
+ return err;
}
static int nitrox_sriov_disable(struct pci_dev *pdev)
@@ -134,12 +210,16 @@ static int nitrox_sriov_disable(struct pci_dev *pdev)
/* clear bit in flags */
clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
- ndev->num_vfs = 0;
+ ndev->iov.num_vfs = 0;
+ ndev->iov.max_vf_queues = 0;
ndev->mode = __NDEV_MODE_PF;
+ /* cleanup PF SR-IOV resources */
+ nitrox_sriov_cleanup(ndev);
+
config_nps_core_vfcfg_mode(ndev, ndev->mode);
- return pf_sriov_init(ndev);
+ return nitrox_pf_reinit(ndev);
}
int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
index 3c6fe57f91f8..9108015e56cc 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
@@ -346,9 +346,7 @@ static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
- cipher_tfm = crypto_alloc_cipher("aes", 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ cipher_tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(cipher_tfm)) {
pr_warn("could not load aes cipher driver\n");
return PTR_ERR(cipher_tfm);
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 01b82b82f8b8..f2643cda45db 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -58,6 +58,7 @@ struct cc_aead_ctx {
unsigned int enc_keylen;
unsigned int auth_keylen;
unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
+ unsigned int hash_len;
enum drv_cipher_mode cipher_mode;
enum cc_flow_mode flow_mode;
enum drv_hash_mode auth_mode;
@@ -122,6 +123,13 @@ static void cc_aead_exit(struct crypto_aead *tfm)
}
}
+static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
+{
+ struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
+
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
static int cc_aead_init(struct crypto_aead *tfm)
{
struct aead_alg *alg = crypto_aead_alg(tfm);
@@ -196,6 +204,7 @@ static int cc_aead_init(struct crypto_aead *tfm)
ctx->auth_state.hmac.ipad_opad = NULL;
ctx->auth_state.hmac.padded_authkey = NULL;
}
+ ctx->hash_len = cc_get_aead_hash_len(tfm);
return 0;
@@ -327,7 +336,7 @@ static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -465,7 +474,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hashmode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -1001,7 +1010,7 @@ static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1098,7 +1107,7 @@ static void cc_proc_scheme_desc(struct aead_request *req,
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
@@ -1128,7 +1137,7 @@ static void cc_proc_scheme_desc(struct aead_request *req,
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hash_mode);
set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -2358,6 +2367,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha1),cbc(des3_ede))",
@@ -2377,6 +2387,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_DES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),cbc(aes))",
@@ -2396,6 +2407,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),cbc(des3_ede))",
@@ -2415,6 +2427,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_DES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(xcbc(aes),cbc(aes))",
@@ -2434,6 +2447,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
@@ -2453,6 +2467,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA1,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
@@ -2472,6 +2487,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_SHA256,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
@@ -2491,6 +2507,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_XCBC_MAC,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ccm(aes)",
@@ -2510,6 +2527,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4309(ccm(aes))",
@@ -2529,6 +2547,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "gcm(aes)",
@@ -2548,6 +2567,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4106(gcm(aes))",
@@ -2567,6 +2587,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "rfc4543(gcm(aes))",
@@ -2586,6 +2607,7 @@ static struct cc_alg_template aead_algs[] = {
.flow_mode = S_DIN_to_AES,
.auth_mode = DRV_HASH_NULL,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
};
@@ -2670,7 +2692,8 @@ int cc_aead_alloc(struct cc_drvdata *drvdata)
/* Linux crypto */
for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
- if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
+ if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & aead_algs[alg].std_body))
continue;
t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 7623b29911af..cc92b031fad1 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -7,6 +7,7 @@
#include <crypto/internal/skcipher.h>
#include <crypto/des.h>
#include <crypto/xts.h>
+#include <crypto/sm4.h>
#include <crypto/scatterwalk.h>
#include "cc_driver.h"
@@ -83,6 +84,9 @@ static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE)
return 0;
break;
+ case S_DIN_to_SM4:
+ if (size == SM4_KEY_SIZE)
+ return 0;
default:
break;
}
@@ -122,6 +126,17 @@ static int validate_data_size(struct cc_cipher_ctx *ctx_p,
if (IS_ALIGNED(size, DES_BLOCK_SIZE))
return 0;
break;
+ case S_DIN_to_SM4:
+ switch (ctx_p->cipher_mode) {
+ case DRV_CIPHER_CTR:
+ return 0;
+ case DRV_CIPHER_ECB:
+ case DRV_CIPHER_CBC:
+ if (IS_ALIGNED(size, SM4_BLOCK_SIZE))
+ return 0;
+ default:
+ break;
+ }
default:
break;
}
@@ -522,6 +537,9 @@ static void cc_setup_cipher_data(struct crypto_tfm *tfm,
case S_DIN_to_DES:
flow_mode = DIN_DES_DOUT;
break;
+ case S_DIN_to_SM4:
+ flow_mode = DIN_SM4_DOUT;
+ break;
default:
dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode);
return;
@@ -815,6 +833,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts512(paes)",
@@ -832,6 +851,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts4096(paes)",
@@ -849,6 +869,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv(paes)",
@@ -865,6 +886,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv512(paes)",
@@ -882,6 +904,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv4096(paes)",
@@ -899,6 +922,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker(paes)",
@@ -915,6 +939,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker512(paes)",
@@ -932,6 +957,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker4096(paes)",
@@ -949,6 +975,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(paes)",
@@ -965,6 +992,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(paes)",
@@ -981,6 +1009,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ofb(paes)",
@@ -997,6 +1026,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "cts(cbc(paes))",
@@ -1013,6 +1043,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ctr(paes)",
@@ -1029,6 +1060,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CTR,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts(aes)",
@@ -1045,6 +1077,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_XTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts512(aes)",
@@ -1062,6 +1095,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "xts4096(aes)",
@@ -1079,6 +1113,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv(aes)",
@@ -1095,6 +1130,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ESSIV,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv512(aes)",
@@ -1112,6 +1148,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "essiv4096(aes)",
@@ -1129,6 +1166,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker(aes)",
@@ -1145,6 +1183,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_BITLOCKER,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker512(aes)",
@@ -1162,6 +1201,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 512,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "bitlocker4096(aes)",
@@ -1179,6 +1219,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.flow_mode = S_DIN_to_AES,
.data_unit = 4096,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(aes)",
@@ -1195,6 +1236,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(aes)",
@@ -1211,6 +1253,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ofb(aes)",
@@ -1227,6 +1270,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_OFB,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cts(cbc(aes))",
@@ -1243,6 +1287,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC_CTS,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ctr(aes)",
@@ -1259,6 +1304,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CTR,
.flow_mode = S_DIN_to_AES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(des3_ede)",
@@ -1275,6 +1321,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(des3_ede)",
@@ -1291,6 +1338,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "cbc(des)",
@@ -1307,6 +1355,7 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_CBC,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "ecb(des)",
@@ -1323,6 +1372,58 @@ static const struct cc_alg_template skcipher_algs[] = {
.cipher_mode = DRV_CIPHER_ECB,
.flow_mode = S_DIN_to_DES,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "cbc(sm4)",
+ .driver_name = "cbc-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CBC,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ecb(sm4)",
+ .driver_name = "ecb-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = 0,
+ },
+ .cipher_mode = DRV_CIPHER_ECB,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
+ },
+ {
+ .name = "ctr(sm4)",
+ .driver_name = "ctr-sm4-ccree",
+ .blocksize = SM4_BLOCK_SIZE,
+ .template_skcipher = {
+ .setkey = cc_cipher_setkey,
+ .encrypt = cc_cipher_encrypt,
+ .decrypt = cc_cipher_decrypt,
+ .min_keysize = SM4_KEY_SIZE,
+ .max_keysize = SM4_KEY_SIZE,
+ .ivsize = SM4_BLOCK_SIZE,
+ },
+ .cipher_mode = DRV_CIPHER_CTR,
+ .flow_mode = S_DIN_to_SM4,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
},
};
@@ -1398,7 +1499,8 @@ int cc_cipher_alloc(struct cc_drvdata *drvdata)
dev_dbg(dev, "Number of algorithms = %zu\n",
ARRAY_SIZE(skcipher_algs));
for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) {
- if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev)
+ if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & skcipher_algs[alg].std_body))
continue;
dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name);
diff --git a/drivers/crypto/ccree/cc_crypto_ctx.h b/drivers/crypto/ccree/cc_crypto_ctx.h
index e032544f4e31..c8dac273c563 100644
--- a/drivers/crypto/ccree/cc_crypto_ctx.h
+++ b/drivers/crypto/ccree/cc_crypto_ctx.h
@@ -115,7 +115,8 @@ enum drv_hash_mode {
DRV_HASH_CBC_MAC = 6,
DRV_HASH_XCBC_MAC = 7,
DRV_HASH_CMAC = 8,
- DRV_HASH_MODE_NUM = 9,
+ DRV_HASH_SM3 = 9,
+ DRV_HASH_MODE_NUM = 10,
DRV_HASH_RESERVE32B = S32_MAX
};
@@ -127,6 +128,7 @@ enum drv_hash_hw_mode {
DRV_HASH_HW_SHA512 = 4,
DRV_HASH_HW_SHA384 = 12,
DRV_HASH_HW_GHASH = 6,
+ DRV_HASH_HW_SM3 = 14,
DRV_HASH_HW_RESERVE32B = S32_MAX
};
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 1ff229c2aeab..8ada308d72ee 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -39,23 +39,38 @@ struct cc_hw_data {
char *name;
enum cc_hw_rev rev;
u32 sig;
+ int std_bodies;
};
/* Hardware revisions defs. */
+/* The 703 is a OSCCA only variant of the 713 */
+static const struct cc_hw_data cc703_hw = {
+ .name = "703", .rev = CC_HW_REV_713, .std_bodies = CC_STD_OSCCA
+};
+
+static const struct cc_hw_data cc713_hw = {
+ .name = "713", .rev = CC_HW_REV_713, .std_bodies = CC_STD_ALL
+};
+
static const struct cc_hw_data cc712_hw = {
- .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U
+ .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
+ .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc710_hw = {
- .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U
+ .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
+ .std_bodies = CC_STD_ALL
};
static const struct cc_hw_data cc630p_hw = {
- .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U
+ .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
+ .std_bodies = CC_STD_ALL
};
static const struct of_device_id arm_ccree_dev_of_match[] = {
+ { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
+ { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
{ .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
{ .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
{ .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
@@ -204,14 +219,13 @@ static int init_cc_resources(struct platform_device *plat_dev)
hw_rev = (struct cc_hw_data *)dev_id->data;
new_drvdata->hw_rev_name = hw_rev->name;
new_drvdata->hw_rev = hw_rev->rev;
+ new_drvdata->std_bodies = hw_rev->std_bodies;
if (hw_rev->rev >= CC_HW_REV_712) {
- new_drvdata->hash_len_sz = HASH_LEN_SIZE_712;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
} else {
- new_drvdata->hash_len_sz = HASH_LEN_SIZE_630;
new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
@@ -297,15 +311,17 @@ static int init_cc_resources(struct platform_device *plat_dev)
return rc;
}
- /* Verify correct mapping */
- signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
- if (signature_val != hw_rev->sig) {
- dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
- signature_val, hw_rev->sig);
- rc = -EINVAL;
- goto post_clk_err;
+ if (hw_rev->rev <= CC_HW_REV_712) {
+ /* Verify correct mapping */
+ signature_val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
+ if (signature_val != hw_rev->sig) {
+ dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
+ signature_val, hw_rev->sig);
+ rc = -EINVAL;
+ goto post_clk_err;
+ }
+ dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
}
- dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);
/* Display HW versions */
dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
@@ -461,6 +477,14 @@ int cc_clk_on(struct cc_drvdata *drvdata)
return 0;
}
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
+{
+ if (drvdata->hw_rev >= CC_HW_REV_712)
+ return HASH_LEN_SIZE_712;
+ else
+ return HASH_LEN_SIZE_630;
+}
+
void cc_clk_off(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
diff --git a/drivers/crypto/ccree/cc_driver.h b/drivers/crypto/ccree/cc_driver.h
index d608a4faf662..5be7fd431b05 100644
--- a/drivers/crypto/ccree/cc_driver.h
+++ b/drivers/crypto/ccree/cc_driver.h
@@ -36,12 +36,19 @@
extern bool cc_dump_desc;
extern bool cc_dump_bytes;
-#define DRV_MODULE_VERSION "4.0"
+#define DRV_MODULE_VERSION "5.0"
enum cc_hw_rev {
CC_HW_REV_630 = 630,
CC_HW_REV_710 = 710,
- CC_HW_REV_712 = 712
+ CC_HW_REV_712 = 712,
+ CC_HW_REV_713 = 713
+};
+
+enum cc_std_body {
+ CC_STD_NIST = 0x1,
+ CC_STD_OSCCA = 0x2,
+ CC_STD_ALL = 0x3
};
#define CC_COHERENT_CACHE_PARAMS 0xEEE
@@ -127,10 +134,10 @@ struct cc_drvdata {
bool coherent;
char *hw_rev_name;
enum cc_hw_rev hw_rev;
- u32 hash_len_sz;
u32 axim_mon_offset;
u32 sig_offset;
u32 ver_offset;
+ int std_bodies;
};
struct cc_crypto_alg {
@@ -156,6 +163,7 @@ struct cc_alg_template {
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
u32 min_hw_rev;
+ enum cc_std_body std_body;
unsigned int data_unit;
struct cc_drvdata *drvdata;
};
@@ -182,6 +190,7 @@ int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
void fini_cc_regs(struct cc_drvdata *drvdata);
int cc_clk_on(struct cc_drvdata *drvdata);
void cc_clk_off(struct cc_drvdata *drvdata);
+unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
{
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c
index b9313306c36f..2c4ddc8fb76b 100644
--- a/drivers/crypto/ccree/cc_hash.c
+++ b/drivers/crypto/ccree/cc_hash.c
@@ -6,6 +6,7 @@
#include <crypto/algapi.h>
#include <crypto/hash.h>
#include <crypto/md5.h>
+#include <crypto/sm3.h>
#include <crypto/internal/hash.h>
#include "cc_driver.h"
@@ -16,6 +17,7 @@
#define CC_MAX_HASH_SEQ_LEN 12
#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
+#define CC_SM3_HASH_LEN_SIZE 8
struct cc_hash_handle {
cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
@@ -43,6 +45,9 @@ static u64 sha384_init[] = {
static u64 sha512_init[] = {
SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
+static const u32 sm3_init[] = {
+ SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
+ SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
unsigned int *seq_size);
@@ -82,6 +87,7 @@ struct cc_hash_ctx {
int hash_mode;
int hw_mode;
int inter_digestsize;
+ unsigned int hash_len;
struct completion setkey_comp;
bool is_hmac;
};
@@ -138,10 +144,10 @@ static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
ctx->hash_mode == DRV_HASH_SHA384)
memcpy(state->digest_bytes_len,
digest_len_sha512_init,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
else
memcpy(state->digest_bytes_len, digest_len_init,
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
}
if (ctx->hash_mode != DRV_HASH_NULL) {
@@ -321,7 +327,7 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
/* Get final MAC result */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
/* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, 1);
@@ -367,7 +373,7 @@ static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_din_sram(&desc[idx],
cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
- ctx->drvdata->hash_len_sz);
+ ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -440,7 +446,7 @@ static int cc_hash_digest(struct ahash_request *req)
* digest
*/
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
@@ -454,14 +460,14 @@ static int cc_hash_digest(struct ahash_request *req)
/* Load the hash current length */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
if (is_hmac) {
set_din_type(&desc[idx], DMA_DLLI,
state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT);
+ ctx->hash_len, NS_BIT);
} else {
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
if (nbytes)
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
else
@@ -478,7 +484,7 @@ static int cc_hash_digest(struct ahash_request *req)
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ ctx->hash_len, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_cipher_do(&desc[idx], DO_PAD);
@@ -504,7 +510,7 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
{
/* Restore hash digest */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
@@ -513,10 +519,10 @@ static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
/* Restore hash current length */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT);
+ ctx->hash_len, NS_BIT);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -576,7 +582,7 @@ static int cc_hash_update(struct ahash_request *req)
/* store the hash digest result in context */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
ctx->inter_digestsize, NS_BIT, 0);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
@@ -585,9 +591,9 @@ static int cc_hash_update(struct ahash_request *req)
/* store current hash length in context */
hw_desc_init(&desc[idx]);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 1);
+ ctx->hash_len, NS_BIT, 1);
set_queue_last_ind(ctx->drvdata, &desc[idx]);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
@@ -649,9 +655,9 @@ static int cc_do_finup(struct ahash_request *req, bool update)
/* Pad the hash */
hw_desc_init(&desc[idx]);
set_cipher_do(&desc[idx], DO_PAD);
- set_cipher_mode(&desc[idx], ctx->hw_mode);
+ set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
- ctx->drvdata->hash_len_sz, NS_BIT, 0);
+ ctx->hash_len, NS_BIT, 0);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
idx++;
@@ -749,7 +755,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
@@ -831,7 +837,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
- set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
+ set_din_const(&desc[idx], 0, ctx->hash_len);
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
@@ -1069,6 +1075,16 @@ fail:
return -ENOMEM;
}
+static int cc_get_hash_len(struct crypto_tfm *tfm)
+{
+ struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->hash_mode == DRV_HASH_SM3)
+ return CC_SM3_HASH_LEN_SIZE;
+ else
+ return cc_get_default_hash_len(ctx->drvdata);
+}
+
static int cc_cra_init(struct crypto_tfm *tfm)
{
struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -1086,7 +1102,7 @@ static int cc_cra_init(struct crypto_tfm *tfm)
ctx->hw_mode = cc_alg->hw_mode;
ctx->inter_digestsize = cc_alg->inter_digestsize;
ctx->drvdata = cc_alg->drvdata;
-
+ ctx->hash_len = cc_get_hash_len(tfm);
return cc_alloc_ctx(ctx);
}
@@ -1465,8 +1481,8 @@ static int cc_hash_export(struct ahash_request *req, void *out)
memcpy(out, state->digest_buff, ctx->inter_digestsize);
out += ctx->inter_digestsize;
- memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
- out += ctx->drvdata->hash_len_sz;
+ memcpy(out, state->digest_bytes_len, ctx->hash_len);
+ out += ctx->hash_len;
memcpy(out, &curr_buff_cnt, sizeof(u32));
out += sizeof(u32);
@@ -1494,8 +1510,8 @@ static int cc_hash_import(struct ahash_request *req, const void *in)
memcpy(state->digest_buff, in, ctx->inter_digestsize);
in += ctx->inter_digestsize;
- memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
- in += ctx->drvdata->hash_len_sz;
+ memcpy(state->digest_bytes_len, in, ctx->hash_len);
+ in += ctx->hash_len;
/* Sanity check the data as much as possible */
memcpy(&tmp, in, sizeof(u32));
@@ -1515,6 +1531,7 @@ struct cc_hash_template {
char mac_name[CRYPTO_MAX_ALG_NAME];
char mac_driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
+ bool is_mac;
bool synchronize;
struct ahash_alg template_ahash;
int hash_mode;
@@ -1522,6 +1539,7 @@ struct cc_hash_template {
int inter_digestsize;
struct cc_drvdata *drvdata;
u32 min_hw_rev;
+ enum cc_std_body std_body;
};
#define CC_STATE_SIZE(_x) \
@@ -1536,6 +1554,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha1)",
.mac_driver_name = "hmac-sha1-ccree",
.blocksize = SHA1_BLOCK_SIZE,
+ .is_mac = true,
.synchronize = false,
.template_ahash = {
.init = cc_hash_init,
@@ -1555,6 +1574,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA1,
.inter_digestsize = SHA1_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha256",
@@ -1562,6 +1582,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha256)",
.mac_driver_name = "hmac-sha256-ccree",
.blocksize = SHA256_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1580,6 +1601,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha224",
@@ -1587,6 +1609,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha224)",
.mac_driver_name = "hmac-sha224-ccree",
.blocksize = SHA224_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1605,6 +1628,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA256,
.inter_digestsize = SHA256_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha384",
@@ -1612,6 +1636,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha384)",
.mac_driver_name = "hmac-sha384-ccree",
.blocksize = SHA384_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1630,6 +1655,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "sha512",
@@ -1637,6 +1663,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(sha512)",
.mac_driver_name = "hmac-sha512-ccree",
.blocksize = SHA512_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1655,6 +1682,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_SHA512,
.inter_digestsize = SHA512_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_712,
+ .std_body = CC_STD_NIST,
},
{
.name = "md5",
@@ -1662,6 +1690,7 @@ static struct cc_hash_template driver_hash[] = {
.mac_name = "hmac(md5)",
.mac_driver_name = "hmac-md5-ccree",
.blocksize = MD5_HMAC_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_hash_update,
@@ -1680,11 +1709,38 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_HASH_HW_MD5,
.inter_digestsize = MD5_DIGEST_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
+ },
+ {
+ .name = "sm3",
+ .driver_name = "sm3-ccree",
+ .blocksize = SM3_BLOCK_SIZE,
+ .is_mac = false,
+ .template_ahash = {
+ .init = cc_hash_init,
+ .update = cc_hash_update,
+ .final = cc_hash_final,
+ .finup = cc_hash_finup,
+ .digest = cc_hash_digest,
+ .export = cc_hash_export,
+ .import = cc_hash_import,
+ .setkey = cc_hash_setkey,
+ .halg = {
+ .digestsize = SM3_DIGEST_SIZE,
+ .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
+ },
+ },
+ .hash_mode = DRV_HASH_SM3,
+ .hw_mode = DRV_HASH_HW_SM3,
+ .inter_digestsize = SM3_DIGEST_SIZE,
+ .min_hw_rev = CC_HW_REV_713,
+ .std_body = CC_STD_OSCCA,
},
{
.mac_name = "xcbc(aes)",
.mac_driver_name = "xcbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
@@ -1703,11 +1759,13 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_XCBC_MAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
{
.mac_name = "cmac(aes)",
.mac_driver_name = "cmac-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
+ .is_mac = true,
.template_ahash = {
.init = cc_hash_init,
.update = cc_mac_update,
@@ -1726,6 +1784,7 @@ static struct cc_hash_template driver_hash[] = {
.hw_mode = DRV_CIPHER_CMAC,
.inter_digestsize = AES_BLOCK_SIZE,
.min_hw_rev = CC_HW_REV_630,
+ .std_body = CC_STD_NIST,
},
};
@@ -1780,6 +1839,7 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
+ bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
int rc = 0;
/* Copy-to-sram digest-len */
@@ -1845,6 +1905,17 @@ int cc_init_hash_sram(struct cc_drvdata *drvdata)
sram_buff_ofs += sizeof(sha256_init);
larval_seq_len = 0;
+ if (sm3_supported) {
+ cc_set_sram_desc(sm3_init, sram_buff_ofs,
+ ARRAY_SIZE(sm3_init), larval_seq,
+ &larval_seq_len);
+ rc = send_request_init(drvdata, larval_seq, larval_seq_len);
+ if (rc)
+ goto init_digest_const_err;
+ sram_buff_ofs += sizeof(sm3_init);
+ larval_seq_len = 0;
+ }
+
if (large_sha_supported) {
cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
(ARRAY_SIZE(sha384_init) * 2), larval_seq,
@@ -1911,6 +1982,9 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
sizeof(sha224_init) +
sizeof(sha256_init);
+ if (drvdata->hw_rev >= CC_HW_REV_713)
+ sram_size_to_alloc += sizeof(sm3_init);
+
if (drvdata->hw_rev >= CC_HW_REV_712)
sram_size_to_alloc += sizeof(digest_len_sha512_init) +
sizeof(sha384_init) + sizeof(sha512_init);
@@ -1937,30 +2011,33 @@ int cc_hash_alloc(struct cc_drvdata *drvdata)
struct cc_hash_alg *t_alg;
int hw_mode = driver_hash[alg].hw_mode;
- /* We either support both HASH and MAC or none */
- if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
+ /* Check that the HW revision and variants are suitable */
+ if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
+ !(drvdata->std_bodies & driver_hash[alg].std_body))
continue;
- /* register hmac version */
- t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
- if (IS_ERR(t_alg)) {
- rc = PTR_ERR(t_alg);
- dev_err(dev, "%s alg allocation failed\n",
- driver_hash[alg].driver_name);
- goto fail;
- }
- t_alg->drvdata = drvdata;
-
- rc = crypto_register_ahash(&t_alg->ahash_alg);
- if (rc) {
- dev_err(dev, "%s alg registration failed\n",
- driver_hash[alg].driver_name);
- kfree(t_alg);
- goto fail;
- } else {
- list_add_tail(&t_alg->entry, &hash_handle->hash_list);
+ if (driver_hash[alg].is_mac) {
+ /* register hmac version */
+ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
+ if (IS_ERR(t_alg)) {
+ rc = PTR_ERR(t_alg);
+ dev_err(dev, "%s alg allocation failed\n",
+ driver_hash[alg].driver_name);
+ goto fail;
+ }
+ t_alg->drvdata = drvdata;
+
+ rc = crypto_register_ahash(&t_alg->ahash_alg);
+ if (rc) {
+ dev_err(dev, "%s alg registration failed\n",
+ driver_hash[alg].driver_name);
+ kfree(t_alg);
+ goto fail;
+ } else {
+ list_add_tail(&t_alg->entry,
+ &hash_handle->hash_list);
+ }
}
-
if (hw_mode == DRV_CIPHER_XCBC_MAC ||
hw_mode == DRV_CIPHER_CMAC)
continue;
@@ -2027,7 +2104,7 @@ static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
XCBC_MAC_K1_OFFSET),
CC_AES_128_BIT_KEY_SIZE, NS_BIT);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
- set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
+ set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
set_flow_mode(&desc[idx], S_DIN_to_AES);
@@ -2162,6 +2239,8 @@ static const void *cc_larval_digest(struct device *dev, u32 mode)
return sha384_init;
case DRV_HASH_SHA512:
return sha512_init;
+ case DRV_HASH_SM3:
+ return sm3_init;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
return md5_init;
@@ -2182,6 +2261,8 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
+ bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
+ cc_sram_addr_t addr;
switch (mode) {
case DRV_HASH_NULL:
@@ -2200,19 +2281,31 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init));
- case DRV_HASH_SHA384:
+ case DRV_HASH_SM3:
return (hash_handle->larval_digest_sram_addr +
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init) +
sizeof(sha256_init));
+ case DRV_HASH_SHA384:
+ addr = (hash_handle->larval_digest_sram_addr +
+ sizeof(md5_init) +
+ sizeof(sha1_init) +
+ sizeof(sha224_init) +
+ sizeof(sha256_init));
+ if (sm3_supported)
+ addr += sizeof(sm3_init);
+ return addr;
case DRV_HASH_SHA512:
- return (hash_handle->larval_digest_sram_addr +
+ addr = (hash_handle->larval_digest_sram_addr +
sizeof(md5_init) +
sizeof(sha1_init) +
sizeof(sha224_init) +
sizeof(sha256_init) +
sizeof(sha384_init));
+ if (sm3_supported)
+ addr += sizeof(sm3_init);
+ return addr;
default:
dev_err(dev, "Invalid hash mode (%d)\n", mode);
}
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index 45985b955d2c..7a9b90db7db7 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -42,6 +42,7 @@
#define WORD3_QUEUE_LAST_IND CC_GENMASK(3, QUEUE_LAST_IND)
#define WORD4_ACK_NEEDED CC_GENMASK(4, ACK_NEEDED)
#define WORD4_AES_SEL_N_HASH CC_GENMASK(4, AES_SEL_N_HASH)
+#define WORD4_AES_XOR_CRYPTO_KEY CC_GENMASK(4, AES_XOR_CRYPTO_KEY)
#define WORD4_BYTES_SWAP CC_GENMASK(4, BYTES_SWAP)
#define WORD4_CIPHER_CONF0 CC_GENMASK(4, CIPHER_CONF0)
#define WORD4_CIPHER_CONF1 CC_GENMASK(4, CIPHER_CONF1)
@@ -107,6 +108,7 @@ enum cc_flow_mode {
AES_to_AES_to_HASH_and_DOUT = 13,
AES_to_AES_to_HASH = 14,
AES_to_HASH_and_AES = 15,
+ DIN_SM4_DOUT = 16,
DIN_AES_AESMAC = 17,
HASH_to_DOUT = 18,
/* setup flows */
@@ -114,9 +116,11 @@ enum cc_flow_mode {
S_DIN_to_AES2 = 33,
S_DIN_to_DES = 34,
S_DIN_to_RC4 = 35,
+ S_DIN_to_SM4 = 36,
S_DIN_to_HASH = 37,
S_AES_to_DOUT = 38,
S_AES2_to_DOUT = 39,
+ S_SM4_to_DOUT = 40,
S_RC4_to_DOUT = 41,
S_DES_to_DOUT = 42,
S_HASH_to_DOUT = 43,
@@ -394,6 +398,16 @@ static inline void set_aes_not_hash_mode(struct cc_hw_desc *pdesc)
}
/*
+ * Set aes xor crypto key, this in some secenrios select SM3 engine
+ *
+ * @pdesc: pointer HW descriptor struct
+ */
+static inline void set_aes_xor_crypto_key(struct cc_hw_desc *pdesc)
+{
+ pdesc->word[4] |= FIELD_PREP(WORD4_AES_XOR_CRYPTO_KEY, 1);
+}
+
+/*
* Set the DOUT field of a HW descriptors to SRAM mode
* Note: No need to check SRAM alignment since host requests do not use SRAM and
* adaptor will enforce alignment check.
@@ -455,6 +469,22 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
}
/*
+ * Set the cipher mode for hash algorithms.
+ *
+ * @pdesc: pointer HW descriptor struct
+ * @cipher_mode: Any one of the modes defined in [CC7x-DESC]
+ * @hash_mode: specifies which hash is being handled
+ */
+static inline void set_hash_cipher_mode(struct cc_hw_desc *pdesc,
+ enum drv_cipher_mode cipher_mode,
+ enum drv_hash_mode hash_mode)
+{
+ set_cipher_mode(pdesc, cipher_mode);
+ if (hash_mode == DRV_HASH_SM3)
+ set_aes_xor_crypto_key(pdesc);
+}
+
+/*
* Set the cipher configuration fields.
*
* @pdesc: pointer HW descriptor struct
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index db203f8be429..bcef76508dfa 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
{
- return ctx->dev->u_ctx;
+ return container_of(ctx->dev, struct uld_ctx, dev);
}
static inline int is_ofld_imm(const struct sk_buff *skb)
@@ -198,18 +198,43 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
*err = 0;
}
-static inline void chcr_handle_aead_resp(struct aead_request *req,
+static int chcr_inc_wrcount(struct chcr_dev *dev)
+{
+ int err = 0;
+
+ spin_lock_bh(&dev->lock_chcr_dev);
+ if (dev->state == CHCR_DETACH)
+ err = 1;
+ else
+ atomic_inc(&dev->inflight);
+
+ spin_unlock_bh(&dev->lock_chcr_dev);
+
+ return err;
+}
+
+static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+{
+ atomic_dec(&dev->inflight);
+}
+
+static inline int chcr_handle_aead_resp(struct aead_request *req,
unsigned char *input,
int err)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_dev *dev = a_ctx(tfm)->dev;
chcr_aead_common_exit(req);
if (reqctx->verify == VERIFY_SW) {
chcr_verify_tag(req, input, &err);
reqctx->verify = VERIFY_HW;
}
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
+
+ return err;
}
static void get_aes_decrypt_key(unsigned char *dec_key,
@@ -391,7 +416,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
int j;
@@ -399,7 +424,7 @@ static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
return;
j = walk->nents;
walk->to->len[j % 8] = htons(size);
- walk->to->addr[j % 8] = cpu_to_be64(*addr);
+ walk->to->addr[j % 8] = cpu_to_be64(addr);
j++;
if ((j % 8) == 0)
walk->to++;
@@ -473,16 +498,16 @@ static inline void ulptx_walk_end(struct ulptx_walk *walk)
static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
size_t size,
- dma_addr_t *addr)
+ dma_addr_t addr)
{
if (!size)
return;
if (walk->nents == 0) {
walk->sgl->len0 = cpu_to_be32(size);
- walk->sgl->addr0 = cpu_to_be64(*addr);
+ walk->sgl->addr0 = cpu_to_be64(addr);
} else {
- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
+ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
walk->pair_idx = !walk->pair_idx;
if (!walk->pair_idx)
@@ -717,7 +742,7 @@ static inline void create_wreq(struct chcr_context *ctx,
htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
chcr_req->wreq.rx_chid_to_rx_q_id =
- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
+ FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
!!lcb, ctx->tx_qidx);
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
@@ -773,7 +798,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
}
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
+ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1100,6 +1125,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
struct cipher_wr_param wrparam;
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
int bytes;
if (err)
@@ -1161,6 +1187,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
unmap:
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
complete:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
return err;
}
@@ -1187,7 +1214,10 @@ static int process_cipher(struct ablkcipher_request *req,
ablkctx->enckey_len, req->nbytes, ivsize);
goto error;
}
- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+
+ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ if (err)
+ goto error;
if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
AES_MIN_KEY_SIZE +
sizeof(struct cpl_rx_phys_dsgl) +
@@ -1276,15 +1306,21 @@ error:
static int chcr_aes_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ err = -ENOSPC;
+ goto error;
+ }
}
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1295,15 +1331,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
chcr_send_wr(skb);
return isfull ? -EBUSY : -EINPROGRESS;
+error:
+ chcr_dec_wrcount(dev);
+ return err;
}
static int chcr_aes_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+ struct chcr_dev *dev = c_ctx(tfm)->dev;
struct sk_buff *skb = NULL;
int err, isfull = 0;
+ err = chcr_inc_wrcount(dev);
+ if (err)
+ return -ENXIO;
+
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
c_ctx(tfm)->tx_qidx))) {
isfull = 1;
@@ -1311,8 +1355,8 @@ static int chcr_aes_decrypt(struct ablkcipher_request *req)
return -ENOSPC;
}
- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
- &skb, CHCR_DECRYPT_OP);
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+ &skb, CHCR_DECRYPT_OP);
if (err || !skb)
return err;
skb->dev = u_ctx->lldi.ports[0];
@@ -1333,10 +1377,11 @@ static int chcr_device_init(struct chcr_context *ctx)
if (!ctx->dev) {
u_ctx = assign_chcr_device();
if (!u_ctx) {
+ err = -ENXIO;
pr_err("chcr device assignment fails\n");
goto out;
}
- ctx->dev = u_ctx->dev;
+ ctx->dev = &u_ctx->dev;
adap = padap(ctx->dev);
ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
@@ -1344,7 +1389,6 @@ static int chcr_device_init(struct chcr_context *ctx)
spin_lock(&ctx->dev->lock_chcr_dev);
ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
- ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
rxq_idx = ctx->tx_chan_id * rxq_perchan;
rxq_idx += id % rxq_perchan;
@@ -1498,7 +1542,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
chcr_req = __skb_put_zero(skb, transhdr_len);
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
+ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
chcr_req->sec_cpl.aadstart_cipherstop_hi =
@@ -1562,6 +1606,7 @@ static int chcr_ahash_update(struct ahash_request *req)
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = NULL;
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct sk_buff *skb;
u8 remainder = 0, bs;
unsigned int nbytes = req->nbytes;
@@ -1570,12 +1615,6 @@ static int chcr_ahash_update(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
- h_ctx(rtfm)->tx_qidx))) {
- isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
- }
if (nbytes + req_ctx->reqlen >= bs) {
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1586,10 +1625,27 @@ static int chcr_ahash_update(struct ahash_request *req)
req_ctx->reqlen += nbytes;
return 0;
}
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
+ * inflight count for dev guarantees that lldi and padap is valid
+ */
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
+ }
+
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
@@ -1629,6 +1685,8 @@ static int chcr_ahash_update(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1646,10 +1704,16 @@ static int chcr_ahash_final(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct hash_wr_param params;
struct sk_buff *skb;
struct uld_ctx *u_ctx = NULL;
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ int error = -EINVAL;
+
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
chcr_init_hctx_per_wr(req_ctx);
u_ctx = ULD_CTX(h_ctx(rtfm));
@@ -1686,19 +1750,25 @@ static int chcr_ahash_final(struct ahash_request *req)
}
params.hash_size = crypto_ahash_digestsize(rtfm);
skb = create_hash_wr(req, &params);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (IS_ERR(skb)) {
+ error = PTR_ERR(skb);
+ goto err;
+ }
req_ctx->reqlen = 0;
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
chcr_send_wr(skb);
return -EINPROGRESS;
+err:
+ chcr_dec_wrcount(dev);
+ return error;
}
static int chcr_ahash_finup(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1707,17 +1777,24 @@ static int chcr_ahash_finup(struct ahash_request *req)
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
u_ctx = ULD_CTX(h_ctx(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1774,6 +1851,8 @@ static int chcr_ahash_finup(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1781,6 +1860,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct chcr_dev *dev = h_ctx(rtfm)->dev;
struct uld_ctx *u_ctx = NULL;
struct sk_buff *skb;
struct hash_wr_param params;
@@ -1789,19 +1869,26 @@ static int chcr_ahash_digest(struct ahash_request *req)
rtfm->init(req);
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ error = chcr_inc_wrcount(dev);
+ if (error)
+ return -ENXIO;
u_ctx = ULD_CTX(h_ctx(rtfm));
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
h_ctx(rtfm)->tx_qidx))) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
- return -ENOSPC;
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ error = -ENOSPC;
+ goto err;
+ }
}
chcr_init_hctx_per_wr(req_ctx);
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
- if (error)
- return -ENOMEM;
+ if (error) {
+ error = -ENOMEM;
+ goto err;
+ }
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1854,6 +1941,8 @@ static int chcr_ahash_digest(struct ahash_request *req)
return isfull ? -EBUSY : -EINPROGRESS;
unmap:
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
+err:
+ chcr_dec_wrcount(dev);
return error;
}
@@ -1925,6 +2014,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
int digestsize, updated_digestsize;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
+ struct chcr_dev *dev = h_ctx(tfm)->dev;
if (input == NULL)
goto out;
@@ -1967,6 +2057,7 @@ unmap:
out:
+ chcr_dec_wrcount(dev);
req->base.complete(&req->base, err);
}
@@ -1983,14 +2074,13 @@ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
- chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
break;
case CRYPTO_ALG_TYPE_ABLKCIPHER:
- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ chcr_handle_cipher_resp(ablkcipher_request_cast(req),
input, err);
break;
-
case CRYPTO_ALG_TYPE_AHASH:
chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
}
@@ -2008,7 +2098,7 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
memcpy(state->partial_hash, req_ctx->partial_hash,
CHCR_HASH_MAX_DIGEST_SIZE);
chcr_init_hctx_per_wr(state);
- return 0;
+ return 0;
}
static int chcr_ahash_import(struct ahash_request *areq, const void *in)
@@ -2215,10 +2305,7 @@ static int chcr_aead_common_init(struct aead_request *req)
error = -ENOMEM;
goto err;
}
- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
- CHCR_SRC_SG_SIZE, 0);
- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
- CHCR_SRC_SG_SIZE, req->assoclen);
+
return 0;
err:
return error;
@@ -2249,7 +2336,7 @@ static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
- aead_request_set_ad(subreq, req->assoclen);
+ aead_request_set_ad(subreq, req->assoclen);
return op_type ? crypto_aead_decrypt(subreq) :
crypto_aead_encrypt(subreq);
}
@@ -2268,10 +2355,10 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
- unsigned int kctx_len = 0, dnents;
- unsigned int assoclen = req->assoclen;
+ unsigned int kctx_len = 0, dnents, snents;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
int null = 0;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
@@ -2288,24 +2375,20 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
null = 1;
- assoclen = 0;
- reqctx->aad_nents = 0;
}
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
- req->assoclen);
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
dnents += MIN_AUTH_SG; // For IV
-
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
- sizeof(chcr_req->key_ctx);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
- + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
+ : (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
@@ -2315,7 +2398,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2331,16 +2414,16 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
* to the hardware spec
*/
chcr_req->sec_cpl.op_ivinsrtofst =
- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
- assoclen + 1);
- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
+ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1,
+ null ? 0 : 1 + IV,
+ null ? 0 : IV + req->assoclen,
+ req->assoclen + IV + 1,
(temp & 0x1F0) >> 4);
chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
temp & 0xF,
- null ? 0 : assoclen + IV + 1,
+ null ? 0 : req->assoclen + IV + 1,
temp, temp);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
@@ -2367,23 +2450,24 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
+ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
CTR_RFC3686_IV_SIZE);
- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
+ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
} else {
- memcpy(reqctx->iv, req->iv, IV);
+ memcpy(ivptr, req->iv, IV);
}
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.cipher_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, 0);
reqctx->skb = skb;
@@ -2470,8 +2554,7 @@ void chcr_aead_dma_unmap(struct device *dev,
}
void chcr_add_aead_src_ent(struct aead_request *req,
- struct ulptx_sgl *ulptx,
- unsigned int assoclen)
+ struct ulptx_sgl *ulptx)
{
struct ulptx_walk ulp_walk;
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2484,28 +2567,20 @@ void chcr_add_aead_src_ent(struct aead_request *req,
buf += reqctx->b0_len;
}
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, assoclen, 0);
- buf += assoclen;
- memcpy(buf, reqctx->iv, IV);
- buf += IV;
- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
- buf, req->cryptlen, req->assoclen);
+ buf, req->cryptlen + req->assoclen, 0);
} else {
ulptx_walk_init(&ulp_walk, ulptx);
if (reqctx->b0_len)
ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
- &reqctx->b0_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
- req->assoclen);
+ reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+ req->assoclen, 0);
ulptx_walk_end(&ulp_walk);
}
}
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
@@ -2516,12 +2591,10 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
- if (reqctx->b0_len)
- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
+ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ temp = req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
@@ -2589,7 +2662,7 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
ulptx_walk_init(&ulp_walk, ulptx);
if (param->bfr_len)
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
- &reqctx->hctx_wr.dma_addr);
+ reqctx->hctx_wr.dma_addr);
ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
param->sg_len, reqctx->hctx_wr.src_ofst);
reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
@@ -2689,8 +2762,7 @@ static int set_msg_len(u8 *block, unsigned int msglen, int csize)
return 0;
}
-static void generate_b0(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+static void generate_b0(struct aead_request *req, u8 *ivptr,
unsigned short op_type)
{
unsigned int l, lp, m;
@@ -2701,7 +2773,7 @@ static void generate_b0(struct aead_request *req,
m = crypto_aead_authsize(aead);
- memcpy(b0, reqctx->iv, 16);
+ memcpy(b0, ivptr, 16);
lp = b0[0];
l = lp + 1;
@@ -2727,29 +2799,31 @@ static inline int crypto_ccm_check_iv(const u8 *iv)
}
static int ccm_format_packet(struct aead_request *req,
- struct chcr_aead_ctx *aeadctx,
+ u8 *ivptr,
unsigned int sub_type,
unsigned short op_type,
unsigned int assoclen)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
int rc = 0;
if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
- reqctx->iv[0] = 3;
- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
- memcpy(reqctx->iv + 4, req->iv, 8);
- memset(reqctx->iv + 12, 0, 4);
+ ivptr[0] = 3;
+ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
+ memcpy(ivptr + 4, req->iv, 8);
+ memset(ivptr + 12, 0, 4);
} else {
- memcpy(reqctx->iv, req->iv, 16);
+ memcpy(ivptr, req->iv, 16);
}
if (assoclen)
*((unsigned short *)(reqctx->scratch_pad + 16)) =
htons(assoclen);
- generate_b0(req, aeadctx, op_type);
+ generate_b0(req, ivptr, op_type);
/* zero the ctr value */
- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
return rc;
}
@@ -2762,7 +2836,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
+ unsigned int c_id = a_ctx(tfm)->tx_chan_id;
unsigned int ccm_xtra;
unsigned char tag_offset = 0, auth_offset = 0;
unsigned int assoclen;
@@ -2775,7 +2849,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
auth_offset = req->cryptlen ?
- (assoclen + IV + 1 + ccm_xtra) : 0;
+ (req->assoclen + IV + 1 + ccm_xtra) : 0;
if (op_type == CHCR_DECRYPT_OP) {
if (crypto_aead_authsize(tfm) != req->cryptlen)
tag_offset = crypto_aead_authsize(tfm);
@@ -2785,13 +2859,13 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
- 2, assoclen + 1 + ccm_xtra);
+ 2, 1);
sec_cpl->pldlen =
- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
+ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
/* For CCM there wil be b0 always. So AAD start will be 1 always */
sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- 1, assoclen + ccm_xtra, assoclen
- + IV + 1 + ccm_xtra, 0);
+ 1 + IV, IV + assoclen + ccm_xtra,
+ req->assoclen + IV + 1 + ccm_xtra, 0);
sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
auth_offset, tag_offset,
@@ -2838,10 +2912,11 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
unsigned int transhdr_len;
- unsigned int dst_size = 0, kctx_len, dnents, temp;
+ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
unsigned int sub_type, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2857,37 +2932,38 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
if (error)
goto err;
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
+ (reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
dnents += MIN_CCM_SG; // For IV and B0
dst_size = get_space_for_phys_dsgl(dnents);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
+ snents += MIN_CCM_SG; //For B0
kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
reqctx->b0_len) <= SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
reqctx->b0_len, 16) :
- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
- MIN_CCM_SG) * 8);
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
- reqctx->b0_len, transhdr_len, reqctx->op)) {
+ reqctx->b0_len, transhdr_len, reqctx->op)) {
atomic_inc(&adap->chcr_stats.fallback);
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
}
- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
+ chcr_req = __skb_put_zero(skb, transhdr_len);
fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
@@ -2897,16 +2973,17 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
aeadctx->key, aeadctx->enckey_len);
phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
if (error)
goto dstmap_fail;
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
reqctx->b0_len) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
transhdr_len, temp, 0);
@@ -2931,10 +3008,11 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
struct chcr_wr *chcr_req;
struct cpl_rx_phys_dsgl *phys_cpl;
struct ulptx_sgl *ulptx;
- unsigned int transhdr_len, dnents = 0;
+ unsigned int transhdr_len, dnents = 0, snents;
unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
unsigned int authsize = crypto_aead_authsize(tfm);
int error = -EINVAL;
+ u8 *ivptr;
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC;
struct adapter *adap = padap(a_ctx(tfm)->dev);
@@ -2946,19 +3024,19 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
error = chcr_aead_common_init(req);
if (error)
return ERR_PTR(error);
- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
- dnents += sg_nents_xlen(req->dst, req->cryptlen +
+ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
(reqctx->op ? -authsize : authsize),
- CHCR_DST_SG_SIZE, req->assoclen);
+ CHCR_DST_SG_SIZE, 0);
+ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
+ CHCR_SRC_SG_SIZE, 0);
dnents += MIN_GCM_SG; // For IV
dst_size = get_space_for_phys_dsgl(dnents);
kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
+ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
SGE_MAX_WR_LEN;
- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
- (sgl_len(reqctx->src_nents +
- reqctx->aad_nents + MIN_GCM_SG) * 8);
+ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
+ (sgl_len(snents) * 8);
transhdr_len += temp;
transhdr_len = roundup(transhdr_len, 16);
if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
@@ -2968,7 +3046,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
chcr_aead_common_exit(req);
return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
}
- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
+ skb = alloc_skb(transhdr_len, flags);
if (!skb) {
error = -ENOMEM;
goto err;
@@ -2979,15 +3057,15 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
//Offset of tag from end
temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
- a_ctx(tfm)->dev->rx_channel_id, 2,
- (assoclen + 1));
+ a_ctx(tfm)->tx_chan_id, 2, 1);
chcr_req->sec_cpl.pldlen =
- htonl(assoclen + IV + req->cryptlen);
+ htonl(req->assoclen + IV + req->cryptlen);
chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- assoclen ? 1 : 0, assoclen,
- assoclen + IV + 1, 0);
+ assoclen ? 1 + IV : 0,
+ assoclen ? IV + assoclen : 0,
+ req->assoclen + IV + 1, 0);
chcr_req->sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
+ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
temp, temp);
chcr_req->sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
@@ -3002,25 +3080,26 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
/* prepare a 16 byte iv */
/* S A L T | IV | 0x00000001 */
if (get_aead_subtype(tfm) ==
CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
- memcpy(reqctx->iv, aeadctx->salt, 4);
- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
+ memcpy(ivptr, aeadctx->salt, 4);
+ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
} else {
- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
+ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
}
- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
+ *((unsigned int *)(ivptr + 12)) = htonl(0x01);
- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+ ulptx = (struct ulptx_sgl *)(ivptr + 16);
- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
- chcr_add_aead_src_ent(req, ulptx, assoclen);
+ chcr_add_aead_dst_ent(req, phys_cpl, qid);
+ chcr_add_aead_src_ent(req, ulptx);
atomic_inc(&adap->chcr_stats.aead_rqst);
- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
+ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
+ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
transhdr_len, temp, reqctx->verify);
reqctx->skb = skb;
@@ -3118,12 +3197,12 @@ static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
aeadctx->mayverify = VERIFY_HW;
break;
case ICV_12:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_14:
- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
- aeadctx->mayverify = VERIFY_HW;
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+ aeadctx->mayverify = VERIFY_HW;
break;
case ICV_16:
aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
@@ -3565,27 +3644,42 @@ static int chcr_aead_op(struct aead_request *req,
create_wr_t create_wr_fn)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
struct uld_ctx *u_ctx;
struct sk_buff *skb;
int isfull = 0;
+ struct chcr_dev *cdev;
- if (!a_ctx(tfm)->dev) {
+ cdev = a_ctx(tfm)->dev;
+ if (!cdev) {
pr_err("chcr : %s : No crypto device.\n", __func__);
return -ENXIO;
}
+
+ if (chcr_inc_wrcount(cdev)) {
+ /* Detach state for CHCR means lldi or padap is freed.
+ * We cannot increment fallback here.
+ */
+ return chcr_aead_fallback(req, reqctx->op);
+ }
+
u_ctx = ULD_CTX(a_ctx(tfm));
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
a_ctx(tfm)->tx_qidx)) {
isfull = 1;
- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ chcr_dec_wrcount(cdev);
return -ENOSPC;
+ }
}
/* Form a WR from req */
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
- if (IS_ERR(skb) || !skb)
+ if (IS_ERR(skb) || !skb) {
+ chcr_dec_wrcount(cdev);
return PTR_ERR(skb);
+ }
skb->dev = u_ctx->lldi.ports[0];
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
@@ -3722,7 +3816,6 @@ static struct chcr_alg_template driver_algs[] = {
.setkey = chcr_aes_rfc3686_setkey,
.encrypt = chcr_aes_encrypt,
.decrypt = chcr_aes_decrypt,
- .geniv = "seqiv",
}
}
},
@@ -4178,7 +4271,6 @@ static struct chcr_alg_template driver_algs[] = {
.setauthsize = chcr_authenc_null_setauthsize,
}
},
-
};
/*
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index 1871500309e2..ee20dd899e83 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -262,7 +262,7 @@
#define MIN_AUTH_SG 1 /* IV */
#define MIN_GCM_SG 1 /* IV */
#define MIN_DIGEST_SG 1 /*Partial Buffer*/
-#define MIN_CCM_SG 2 /*IV+B0*/
+#define MIN_CCM_SG 1 /*IV+B0*/
#define CIP_SPACE_LEFT(len) \
((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
#define HASH_SPACE_LEFT(len) \
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 2c472e3c6aeb..239b933d6df6 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -26,10 +26,7 @@
#include "chcr_core.h"
#include "cxgb4_uld.h"
-static LIST_HEAD(uld_ctx_list);
-static DEFINE_MUTEX(dev_mutex);
-static atomic_t dev_count;
-static struct uld_ctx *ctx_rr;
+static struct chcr_driver_data drv_data;
typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
@@ -53,6 +50,29 @@ static struct cxgb4_uld_info chcr_uld_info = {
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
};
+static void detach_work_fn(struct work_struct *work)
+{
+ struct chcr_dev *dev;
+
+ dev = container_of(work, struct chcr_dev, detach_work.work);
+
+ if (atomic_read(&dev->inflight)) {
+ dev->wqretry--;
+ if (dev->wqretry) {
+ pr_debug("Request Inflight Count %d\n",
+ atomic_read(&dev->inflight));
+
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ } else {
+ WARN(1, "CHCR:%d request Still Pending\n",
+ atomic_read(&dev->inflight));
+ complete(&dev->detach_comp);
+ }
+ } else {
+ complete(&dev->detach_comp);
+ }
+}
+
struct uld_ctx *assign_chcr_device(void)
{
struct uld_ctx *u_ctx = NULL;
@@ -63,56 +83,74 @@ struct uld_ctx *assign_chcr_device(void)
* Although One session must use the same device to
* maintain request-response ordering.
*/
- mutex_lock(&dev_mutex);
- if (!list_empty(&uld_ctx_list)) {
- u_ctx = ctx_rr;
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
- ctx_rr = list_first_entry(&uld_ctx_list,
- struct uld_ctx,
- entry);
+ mutex_lock(&drv_data.drv_mutex);
+ if (!list_empty(&drv_data.act_dev)) {
+ u_ctx = drv_data.last_dev;
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
else
- ctx_rr = list_next_entry(ctx_rr, entry);
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
}
- mutex_unlock(&dev_mutex);
+ mutex_unlock(&drv_data.drv_mutex);
return u_ctx;
}
-static int chcr_dev_add(struct uld_ctx *u_ctx)
+static void chcr_dev_add(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENXIO;
+ dev = &u_ctx->dev;
+ dev->state = CHCR_ATTACH;
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_move(&u_ctx->entry, &drv_data.act_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
+}
+
+static void chcr_dev_init(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev;
+ dev = &u_ctx->dev;
spin_lock_init(&dev->lock_chcr_dev);
- u_ctx->dev = dev;
- dev->u_ctx = u_ctx;
- atomic_inc(&dev_count);
- mutex_lock(&dev_mutex);
- list_add_tail(&u_ctx->entry, &uld_ctx_list);
- if (!ctx_rr)
- ctx_rr = u_ctx;
- mutex_unlock(&dev_mutex);
- return 0;
+ INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
+ init_completion(&dev->detach_comp);
+ dev->state = CHCR_INIT;
+ dev->wqretry = WQ_RETRY;
+ atomic_inc(&drv_data.dev_count);
+ atomic_set(&dev->inflight, 0);
+ mutex_lock(&drv_data.drv_mutex);
+ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
+ if (!drv_data.last_dev)
+ drv_data.last_dev = u_ctx;
+ mutex_unlock(&drv_data.drv_mutex);
}
-static int chcr_dev_remove(struct uld_ctx *u_ctx)
+static int chcr_dev_move(struct uld_ctx *u_ctx)
{
- if (ctx_rr == u_ctx) {
- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
- ctx_rr = list_first_entry(&uld_ctx_list,
- struct uld_ctx,
- entry);
+ struct adapter *adap;
+
+ mutex_lock(&drv_data.drv_mutex);
+ if (drv_data.last_dev == u_ctx) {
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
+ struct uld_ctx, entry);
else
- ctx_rr = list_next_entry(ctx_rr, entry);
+ drv_data.last_dev =
+ list_next_entry(drv_data.last_dev, entry);
}
- list_del(&u_ctx->entry);
- if (list_empty(&uld_ctx_list))
- ctx_rr = NULL;
- kfree(u_ctx->dev);
- u_ctx->dev = NULL;
- atomic_dec(&dev_count);
+ list_move(&u_ctx->entry, &drv_data.inact_dev);
+ if (list_empty(&drv_data.act_dev))
+ drv_data.last_dev = NULL;
+ adap = padap(&u_ctx->dev);
+ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+ atomic_dec(&drv_data.dev_count);
+ mutex_unlock(&drv_data.drv_mutex);
+
return 0;
}
@@ -131,12 +169,8 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
ack_err_status =
ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
- if (ack_err_status) {
- if (CHK_MAC_ERR_BIT(ack_err_status) ||
- CHK_PAD_ERR_BIT(ack_err_status))
- error_status = -EBADMSG;
- atomic_inc(&adap->chcr_stats.error);
- }
+ if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
+ error_status = -EBADMSG;
/* call completion callback with failure status */
if (req) {
error_status = chcr_handle_resp(req, input, error_status);
@@ -144,6 +178,9 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
pr_err("Incorrect request address from the firmware\n");
return -EFAULT;
}
+ if (error_status)
+ atomic_inc(&adap->chcr_stats.error);
+
return 0;
}
@@ -167,6 +204,7 @@ static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
goto out;
}
u_ctx->lldi = *lld;
+ chcr_dev_init(u_ctx);
#ifdef CONFIG_CHELSIO_IPSEC_INLINE
if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
chcr_add_xfrmops(lld);
@@ -179,7 +217,7 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
const struct pkt_gl *pgl)
{
struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
- struct chcr_dev *dev = u_ctx->dev;
+ struct chcr_dev *dev = &u_ctx->dev;
const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
if (rpl->opcode != CPL_FW6_PLD) {
@@ -201,6 +239,28 @@ int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
}
#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+static void chcr_detach_device(struct uld_ctx *u_ctx)
+{
+ struct chcr_dev *dev = &u_ctx->dev;
+
+ spin_lock_bh(&dev->lock_chcr_dev);
+ if (dev->state == CHCR_DETACH) {
+ spin_unlock_bh(&dev->lock_chcr_dev);
+ pr_debug("Detached Event received for already detach device\n");
+ return;
+ }
+ dev->state = CHCR_DETACH;
+ spin_unlock_bh(&dev->lock_chcr_dev);
+
+ if (atomic_read(&dev->inflight) != 0) {
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ wait_for_completion(&dev->detach_comp);
+ }
+
+ // Move u_ctx to inactive_dev list
+ chcr_dev_move(u_ctx);
+}
+
static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
{
struct uld_ctx *u_ctx = handle;
@@ -208,23 +268,16 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
switch (state) {
case CXGB4_STATE_UP:
- if (!u_ctx->dev) {
- ret = chcr_dev_add(u_ctx);
- if (ret != 0)
- return ret;
+ if (u_ctx->dev.state != CHCR_INIT) {
+ // ALready Initialised.
+ return 0;
}
- if (atomic_read(&dev_count) == 1)
- ret = start_crypto();
+ chcr_dev_add(u_ctx);
+ ret = start_crypto();
break;
case CXGB4_STATE_DETACH:
- if (u_ctx->dev) {
- mutex_lock(&dev_mutex);
- chcr_dev_remove(u_ctx);
- mutex_unlock(&dev_mutex);
- }
- if (!atomic_read(&dev_count))
- stop_crypto();
+ chcr_detach_device(u_ctx);
break;
case CXGB4_STATE_START_RECOVERY:
@@ -237,7 +290,13 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
static int __init chcr_crypto_init(void)
{
+ INIT_LIST_HEAD(&drv_data.act_dev);
+ INIT_LIST_HEAD(&drv_data.inact_dev);
+ atomic_set(&drv_data.dev_count, 0);
+ mutex_init(&drv_data.drv_mutex);
+ drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
+
return 0;
}
@@ -245,18 +304,20 @@ static void __exit chcr_crypto_exit(void)
{
struct uld_ctx *u_ctx, *tmp;
- if (atomic_read(&dev_count))
- stop_crypto();
+ stop_crypto();
+ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
/* Remove all devices from list */
- mutex_lock(&dev_mutex);
- list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
- if (u_ctx->dev)
- chcr_dev_remove(u_ctx);
+ mutex_lock(&drv_data.drv_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
+ list_del(&u_ctx->entry);
kfree(u_ctx);
}
- mutex_unlock(&dev_mutex);
- cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&drv_data.drv_mutex);
}
module_init(chcr_crypto_init);
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index de3a9c085daf..1159dee964ed 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -47,7 +47,7 @@
#define MAX_PENDING_REQ_TO_HW 20
#define CHCR_TEST_RESPONSE_TIMEOUT 1000
-
+#define WQ_DETACH_TM (msecs_to_jiffies(50))
#define PAD_ERROR_BIT 1
#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
@@ -61,9 +61,6 @@
#define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
DUMMY_BYTES + \
sizeof(struct ulptx_sgl))
-
-#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
-
struct uld_ctx;
struct _key_ctx {
@@ -121,6 +118,20 @@ struct _key_ctx {
#define KEYCTX_TX_WR_AUTHIN_G(x) \
(((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+#define WQ_RETRY 5
+struct chcr_driver_data {
+ struct list_head act_dev;
+ struct list_head inact_dev;
+ atomic_t dev_count;
+ struct mutex drv_mutex;
+ struct uld_ctx *last_dev;
+};
+
+enum chcr_state {
+ CHCR_INIT = 0,
+ CHCR_ATTACH,
+ CHCR_DETACH,
+};
struct chcr_wr {
struct fw_crypto_lookaside_wr wreq;
struct ulp_txpkt ulptx;
@@ -131,15 +142,18 @@ struct chcr_wr {
struct chcr_dev {
spinlock_t lock_chcr_dev;
- struct uld_ctx *u_ctx;
+ enum chcr_state state;
+ atomic_t inflight;
+ int wqretry;
+ struct delayed_work detach_work;
+ struct completion detach_comp;
unsigned char tx_channel_id;
- unsigned char rx_channel_id;
};
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
- struct chcr_dev *dev;
+ struct chcr_dev dev;
};
struct sge_opaque_hdr {
@@ -159,8 +173,17 @@ struct chcr_ipsec_wr {
struct chcr_ipsec_req req;
};
+#define ESN_IV_INSERT_OFFSET 12
+struct chcr_ipsec_aadiv {
+ __be32 spi;
+ u8 seq_no[8];
+ u8 iv[8];
+};
+
struct ipsec_sa_entry {
int hmac_ctrl;
+ u16 esn;
+ u16 imm;
unsigned int enckey_len;
unsigned int kctx_len;
unsigned int authsize;
@@ -181,6 +204,13 @@ static inline unsigned int sgl_len(unsigned int n)
return (3 * n) / 2 + (n & 1) + 2;
}
+static inline void *padap(struct chcr_dev *dev)
+{
+ struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
+
+ return pci_get_drvdata(u_ctx->lldi.pdev);
+}
+
struct uld_ctx *assign_chcr_device(void);
int chcr_send_wr(struct sk_buff *skb);
int start_crypto(void);
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index d37ef41f9ebe..655606f2e4d0 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -41,7 +41,8 @@
#define CCM_B0_SIZE 16
#define CCM_AAD_FIELD_SIZE 2
-#define T6_MAX_AAD_SIZE 511
+// 511 - 16(For IV)
+#define T6_MAX_AAD_SIZE 495
/* Define following if h/w is not dropping the AAD and IV data before
@@ -185,9 +186,6 @@ struct chcr_aead_reqctx {
dma_addr_t b0_dma;
unsigned int b0_len;
unsigned int op;
- short int aad_nents;
- short int src_nents;
- short int dst_nents;
u16 imm;
u16 verify;
u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
@@ -322,10 +320,8 @@ void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req,
unsigned short op_type);
void chcr_add_aead_dst_ent(struct aead_request *req,
struct cpl_rx_phys_dsgl *phys_cpl,
- unsigned int assoclen,
unsigned short qid);
-void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
- unsigned int assoclen);
+void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
void *ulptx,
struct cipher_wr_param *wrparam);
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 461b97e2f1fd..2fb48cce4462 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -76,12 +76,14 @@ static int chcr_xfrm_add_state(struct xfrm_state *x);
static void chcr_xfrm_del_state(struct xfrm_state *x);
static void chcr_xfrm_free_state(struct xfrm_state *x);
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static void chcr_advance_esn_state(struct xfrm_state *x);
static const struct xfrmdev_ops chcr_xfrmdev_ops = {
.xdo_dev_state_add = chcr_xfrm_add_state,
.xdo_dev_state_delete = chcr_xfrm_del_state,
.xdo_dev_state_free = chcr_xfrm_free_state,
.xdo_dev_offload_ok = chcr_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = chcr_advance_esn_state,
};
/* Add offload xfrms to Chelsio Interface */
@@ -210,10 +212,6 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
pr_debug("CHCR: Cannot offload compressed xfrm states\n");
return -EINVAL;
}
- if (x->props.flags & XFRM_STATE_ESN) {
- pr_debug("CHCR: Cannot offload ESN xfrm states\n");
- return -EINVAL;
- }
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
@@ -266,6 +264,8 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
}
sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ if (x->props.flags & XFRM_STATE_ESN)
+ sa_entry->esn = 1;
chcr_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
try_module_get(THIS_MODULE);
@@ -294,28 +294,57 @@ static void chcr_xfrm_free_state(struct xfrm_state *x)
static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
- /* Offload with IP options is not supported yet */
- if (ip_hdr(skb)->ihl > 5)
- return false;
-
+ if (x->props.family == AF_INET) {
+ /* Offload with IP options is not supported yet */
+ if (ip_hdr(skb)->ihl > 5)
+ return false;
+ } else {
+ /* Offload with IPv6 extension headers is not support yet */
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
return true;
}
-static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+static void chcr_advance_esn_state(struct xfrm_state *x)
{
- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+ /* do nothing */
+ if (!x->xso.offload_handle)
+ return;
+}
+
+static inline int is_eth_imm(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry)
+{
+ unsigned int kctx_len;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) + kctx_len;
hdrlen += sizeof(struct cpl_tx_pkt);
+ if (sa_entry->esn)
+ hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
+ << 4);
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
}
static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
- unsigned int kctx_len)
+ struct ipsec_sa_entry *sa_entry)
{
+ unsigned int kctx_len;
unsigned int flits;
- int hdrlen = is_eth_imm(skb, kctx_len);
+ int aadivlen;
+ int hdrlen;
+
+ kctx_len = sa_entry->kctx_len;
+ hdrlen = is_eth_imm(skb, sa_entry);
+ aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ 16) : 0;
+ aadivlen <<= 4;
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -338,13 +367,69 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
flits += (sizeof(struct fw_ulptx_wr) +
sizeof(struct chcr_ipsec_req) +
kctx_len +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ sizeof(struct cpl_tx_pkt_core) +
+ aadivlen) / sizeof(__be64);
return flits;
}
+inline void *copy_esn_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
+{
+ struct chcr_ipsec_aadiv *aadiv;
+ struct ulptx_idata *sc_imm;
+ struct ip_esp_hdr *esphdr;
+ struct xfrm_offload *xo;
+ struct sge_eth_txq *q;
+ struct adapter *adap;
+ struct port_info *pi;
+ __be64 seqno;
+ u32 qidx;
+ u32 seqlo;
+ u8 *iv;
+ int eoq;
+ int len;
+
+ pi = netdev_priv(dev);
+ adap = pi->adapter;
+ qidx = skb->queue_mapping;
+ q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+ /* end of queue, reset pos to start of queue */
+ eoq = (void *)q->q.stat - pos;
+ if (!eoq)
+ pos = q->q.desc;
+
+ len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
+ memset(pos, 0, len);
+ aadiv = (struct chcr_ipsec_aadiv *)pos;
+ esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ xo = xfrm_offload(skb);
+
+ aadiv->spi = (esphdr->spi);
+ seqlo = htonl(esphdr->seq_no);
+ seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
+ memcpy(aadiv->seq_no, &seqno, 8);
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ memcpy(aadiv->iv, iv, 8);
+
+ if (sa_entry->imm) {
+ sc_imm = (struct ulptx_idata *)(pos +
+ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ sizeof(__be64)) << 3));
+ sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
+ sc_imm->len = cpu_to_be32(sa_entry->imm);
+ }
+ pos += len;
+ return pos;
+}
+
inline void *copy_cpltx_pktxt(struct sk_buff *skb,
- struct net_device *dev,
- void *pos)
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
{
struct cpl_tx_pkt_core *cpl;
struct sge_eth_txq *q;
@@ -379,6 +464,9 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
cpl->ctrl1 = cpu_to_be64(cntrl);
pos += sizeof(struct cpl_tx_pkt_core);
+ /* Copy ESN info for HW */
+ if (sa_entry->esn)
+ pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
return pos;
}
@@ -425,7 +513,7 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
pos = (u8 *)q->q.desc + (key_len - left);
}
/* Copy CPL TX PKT XT */
- pos = copy_cpltx_pktxt(skb, dev, pos);
+ pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
return pos;
}
@@ -438,10 +526,16 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- unsigned int immdatalen = 0;
unsigned int ivsize = GCM_ESP_IV_SIZE;
struct chcr_ipsec_wr *wr;
+ u16 immdatalen = 0;
unsigned int flits;
+ u32 ivinoffset;
+ u32 aadstart;
+ u32 aadstop;
+ u32 ciphstart;
+ u32 ivdrop = 0;
+ u32 esnlen = 0;
u32 wr_mid;
int qidx = skb_get_queue_mapping(skb);
struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
@@ -450,10 +544,17 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
atomic_inc(&adap->chcr_stats.ipsec_cnt);
- flits = calc_tx_sec_flits(skb, kctx_len);
+ flits = calc_tx_sec_flits(skb, sa_entry);
+ if (sa_entry->esn)
+ ivdrop = 1;
- if (is_eth_imm(skb, kctx_len))
+ if (is_eth_imm(skb, sa_entry)) {
immdatalen = skb->len;
+ sa_entry->imm = immdatalen;
+ }
+
+ if (sa_entry->esn)
+ esnlen = sizeof(struct chcr_ipsec_aadiv);
/* WR Header */
wr = (struct chcr_ipsec_wr *)pos;
@@ -478,33 +579,38 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
sizeof(wr->req.key_ctx) +
kctx_len +
sizeof(struct cpl_tx_pkt_core) +
- immdatalen);
+ esnlen +
+ (esnlen ? 0 : immdatalen));
/* CPL_SEC_PDU */
+ ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr) + 1);
wr->req.sec_cpl.op_ivinsrtofst = htonl(
CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(2) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) + 1)));
+ ivinoffset));
- wr->req.sec_cpl.pldlen = htonl(skb->len);
+ wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
+ aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
+ aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
+ (skb_transport_offset(skb) +
+ sizeof(struct ip_esp_hdr));
+ ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
+ GCM_ESP_IV_SIZE + 1;
+ ciphstart += sa_entry->esn ? esnlen : 0;
wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
- (skb_transport_offset(skb) + 1),
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr)),
- (skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) +
- GCM_ESP_IV_SIZE + 1), 0);
+ aadstart,
+ aadstop,
+ ciphstart, 0);
wr->req.sec_cpl.cipherstop_lo_authinsert =
- FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
- sizeof(struct ip_esp_hdr) +
- GCM_ESP_IV_SIZE + 1,
- sa_entry->authsize,
- sa_entry->authsize);
+ FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
+ sa_entry->authsize,
+ sa_entry->authsize);
wr->req.sec_cpl.seqno_numivs =
FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
CHCR_SCMD_CIPHER_MODE_AES_GCM,
@@ -512,7 +618,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
sa_entry->hmac_ctrl,
ivsize >> 1);
wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
- 0, 0, 0);
+ 0, ivdrop, 0);
pos += sizeof(struct fw_ulptx_wr) +
sizeof(struct ulp_txpkt) +
@@ -565,20 +671,21 @@ int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipsec_sa_entry *sa_entry;
u64 *pos, *end, *before, *sgl;
int qidx, left, credits;
- unsigned int flits = 0, ndesc, kctx_len;
+ unsigned int flits = 0, ndesc;
struct adapter *adap;
struct sge_eth_txq *q;
struct port_info *pi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ struct sec_path *sp;
bool immediate = false;
if (!x->xso.offload_handle)
return NETDEV_TX_BUSY;
sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
- kctx_len = sa_entry->kctx_len;
- if (skb->sp->len != 1) {
+ sp = skb_sec_path(skb);
+ if (sp->len != 1) {
out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -590,7 +697,7 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_reclaim_completed_tx(adap, &q->q, true);
- flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
+ flits = calc_tx_sec_flits(skb, sa_entry);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -603,7 +710,7 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb, kctx_len))
+ if (is_eth_imm(skb, sa_entry))
immediate = true;
if (!immediate &&
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/crypto/chelsio/chtls/chtls.h
index 7725b6ee14ef..59bb67d5a7ce 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/crypto/chelsio/chtls/chtls.h
@@ -153,6 +153,11 @@ struct chtls_dev {
unsigned int cdev_state;
};
+struct chtls_listen {
+ struct chtls_dev *cdev;
+ struct sock *sk;
+};
+
struct chtls_hws {
struct sk_buff_head sk_recv_queue;
u8 txqid;
@@ -215,6 +220,8 @@ struct chtls_sock {
u16 resv2;
u32 delack_mode;
u32 delack_seq;
+ u32 snd_win;
+ u32 rcv_win;
void *passive_reap_next; /* placeholder for passive */
struct chtls_hws tlshws;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 20209e29f814..59b75299fcbc 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -21,6 +21,7 @@
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/if_vlan.h>
+#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/dst.h>
@@ -887,24 +888,6 @@ static unsigned int chtls_select_mss(const struct chtls_sock *csk,
return mtu_idx;
}
-static unsigned int select_rcv_wnd(struct chtls_sock *csk)
-{
- unsigned int rcvwnd;
- unsigned int wnd;
- struct sock *sk;
-
- sk = csk->sk;
- wnd = tcp_full_space(sk);
-
- if (wnd < MIN_RCV_WND)
- wnd = MIN_RCV_WND;
-
- rcvwnd = MAX_RCV_WND;
-
- csk_set_flag(csk, CSK_UPDATE_RCV_WND);
- return min(wnd, rcvwnd);
-}
-
static unsigned int select_rcv_wscale(int space, int wscale_ok, int win_clamp)
{
int wscale = 0;
@@ -951,7 +934,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
csk->mtu_idx = chtls_select_mss(csk, dst_mtu(__sk_dst_get(sk)),
req);
opt0 = TCAM_BYPASS_F |
- WND_SCALE_V((tp)->rx_opt.rcv_wscale) |
+ WND_SCALE_V(RCV_WSCALE(tp)) |
MSS_IDX_V(csk->mtu_idx) |
L2T_IDX_V(csk->l2t_entry->idx) |
NAGLE_V(!(tp->nonagle & TCP_NAGLE_OFF)) |
@@ -1005,6 +988,25 @@ static int chtls_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
+static void chtls_set_tcp_window(struct chtls_sock *csk)
+{
+ struct net_device *ndev = csk->egress_dev;
+ struct port_info *pi = netdev_priv(ndev);
+ unsigned int linkspeed;
+ u8 scale;
+
+ linkspeed = pi->link_cfg.speed;
+ scale = linkspeed / SPEED_10000;
+#define CHTLS_10G_RCVWIN (256 * 1024)
+ csk->rcv_win = CHTLS_10G_RCVWIN;
+ if (scale)
+ csk->rcv_win *= scale;
+#define CHTLS_10G_SNDWIN (256 * 1024)
+ csk->snd_win = CHTLS_10G_SNDWIN;
+ if (scale)
+ csk->snd_win *= scale;
+}
+
static struct sock *chtls_recv_sock(struct sock *lsk,
struct request_sock *oreq,
void *network_hdr,
@@ -1067,6 +1069,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->port_id = port_id;
csk->egress_dev = ndev;
csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ chtls_set_tcp_window(csk);
+ tp->rcv_wnd = csk->rcv_win;
+ csk->sndbuf = csk->snd_win;
csk->ulp_mode = ULP_MODE_TLS;
step = cdev->lldi->nrxq / cdev->lldi->nchan;
csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
@@ -1074,11 +1079,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
port_id * step;
csk->sndbuf = newsk->sk_sndbuf;
- csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi->adapter_type,
- cxgb4_port_viid(ndev));
- tp->rcv_wnd = select_rcv_wnd(csk);
+ csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk),
- WSCALE_OK(tp),
+ sock_net(newsk)->
+ ipv4.sysctl_tcp_window_scaling,
tp->window_clamp);
neigh_release(n);
inet_inherit_port(&tcp_hashinfo, lsk, newsk);
@@ -1130,6 +1134,7 @@ static void chtls_pass_accept_request(struct sock *sk,
struct cpl_t5_pass_accept_rpl *rpl;
struct cpl_pass_accept_req *req;
struct listen_ctx *listen_ctx;
+ struct vlan_ethhdr *vlan_eh;
struct request_sock *oreq;
struct sk_buff *reply_skb;
struct chtls_sock *csk;
@@ -1142,6 +1147,10 @@ static void chtls_pass_accept_request(struct sock *sk,
unsigned int stid;
unsigned int len;
unsigned int tid;
+ bool th_ecn, ect;
+ __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
+ u16 eth_hdr_len;
+ bool ecn_ok;
req = cplhdr(skb) + RSS_HDR;
tid = GET_TID(req);
@@ -1180,24 +1189,40 @@ static void chtls_pass_accept_request(struct sock *sk,
oreq->mss = 0;
oreq->ts_recent = 0;
- eh = (struct ethhdr *)(req + 1);
- iph = (struct iphdr *)(eh + 1);
+ eth_hdr_len = T6_ETH_HDR_LEN_G(ntohl(req->hdr_len));
+ if (eth_hdr_len == ETH_HLEN) {
+ eh = (struct ethhdr *)(req + 1);
+ iph = (struct iphdr *)(eh + 1);
+ network_hdr = (void *)(eh + 1);
+ } else {
+ vlan_eh = (struct vlan_ethhdr *)(req + 1);
+ iph = (struct iphdr *)(vlan_eh + 1);
+ network_hdr = (void *)(vlan_eh + 1);
+ }
if (iph->version != 0x4)
goto free_oreq;
- network_hdr = (void *)(eh + 1);
tcph = (struct tcphdr *)(iph + 1);
+ skb_set_network_header(skb, (void *)iph - (void *)req);
tcp_rsk(oreq)->tfo_listener = false;
tcp_rsk(oreq)->rcv_isn = ntohl(tcph->seq);
chtls_set_req_port(oreq, tcph->source, tcph->dest);
- inet_rsk(oreq)->ecn_ok = 0;
chtls_set_req_addr(oreq, iph->daddr, iph->saddr);
- if (req->tcpopt.wsf <= 14) {
+ ip_dsfield = ipv4_get_dsfield(iph);
+ if (req->tcpopt.wsf <= 14 &&
+ sock_net(sk)->ipv4.sysctl_tcp_window_scaling) {
inet_rsk(oreq)->wscale_ok = 1;
inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf;
}
inet_rsk(oreq)->ir_iif = sk->sk_bound_dev_if;
+ th_ecn = tcph->ece && tcph->cwr;
+ if (th_ecn) {
+ ect = !INET_ECN_is_not_ect(ip_dsfield);
+ ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn;
+ if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))
+ inet_rsk(oreq)->ecn_ok = 1;
+ }
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index afebbd87c4aa..18f553fcc167 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -397,7 +397,7 @@ static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb,
req_wr->lsodisable_to_flags =
htonl(TX_ULP_MODE_V(ULP_MODE_TLS) |
- FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
+ TX_URG_V(skb_urgent(skb)) |
T6_TX_FORCE_F | wr_ulp_mode_force |
TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
skb_queue_empty(&csk->txq)));
@@ -534,10 +534,9 @@ static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb,
FW_OFLD_TX_DATA_WR_SHOVE_F);
req->tunnel_to_proxy = htonl(wr_ulp_mode_force |
- FW_OFLD_TX_DATA_WR_URGENT_V(skb_urgent(skb)) |
- FW_OFLD_TX_DATA_WR_SHOVE_V((!csk_flag
- (sk, CSK_TX_MORE_DATA)) &&
- skb_queue_empty(&csk->txq)));
+ TX_URG_V(skb_urgent(skb)) |
+ TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) &&
+ skb_queue_empty(&csk->txq)));
req->plen = htonl(len);
}
@@ -995,7 +994,6 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
int mss, flags, err;
int recordsz = 0;
int copied = 0;
- int hdrlen = 0;
long timeo;
lock_sock(sk);
@@ -1032,7 +1030,7 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
recordsz = tls_header_read(&hdr, &msg->msg_iter);
size -= TLS_HEADER_LENGTH;
- hdrlen += TLS_HEADER_LENGTH;
+ copied += TLS_HEADER_LENGTH;
csk->tlshws.txleft = recordsz;
csk->tlshws.type = hdr.type;
if (skb)
@@ -1083,10 +1081,8 @@ new_buf:
int off = TCP_OFF(sk);
bool merge;
- if (!page)
- goto wait_for_memory;
-
- pg_size <<= compound_order(page);
+ if (page)
+ pg_size <<= compound_order(page);
if (off < pg_size &&
skb_can_coalesce(skb, i, page, off)) {
merge = 1;
@@ -1187,7 +1183,7 @@ out:
chtls_tcp_push(sk, flags);
done:
release_sock(sk);
- return copied + hdrlen;
+ return copied;
do_fault:
if (!skb->len) {
__skb_unlink(skb, &csk->txq);
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f472c51abe56..563f8fe7686a 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -55,24 +55,19 @@ static void unregister_listen_notifier(struct notifier_block *nb)
static int listen_notify_handler(struct notifier_block *this,
unsigned long event, void *data)
{
- struct chtls_dev *cdev;
- struct sock *sk;
- int ret;
+ struct chtls_listen *clisten;
+ int ret = NOTIFY_DONE;
- sk = data;
- ret = NOTIFY_DONE;
+ clisten = (struct chtls_listen *)data;
switch (event) {
case CHTLS_LISTEN_START:
+ ret = chtls_listen_start(clisten->cdev, clisten->sk);
+ kfree(clisten);
+ break;
case CHTLS_LISTEN_STOP:
- mutex_lock(&cdev_list_lock);
- list_for_each_entry(cdev, &cdev_list, list) {
- if (event == CHTLS_LISTEN_START)
- ret = chtls_listen_start(cdev, sk);
- else
- chtls_listen_stop(cdev, sk);
- }
- mutex_unlock(&cdev_list_lock);
+ chtls_listen_stop(clisten->cdev, clisten->sk);
+ kfree(clisten);
break;
}
return ret;
@@ -90,8 +85,9 @@ static int listen_backlog_rcv(struct sock *sk, struct sk_buff *skb)
return 0;
}
-static int chtls_start_listen(struct sock *sk)
+static int chtls_start_listen(struct chtls_dev *cdev, struct sock *sk)
{
+ struct chtls_listen *clisten;
int err;
if (sk->sk_protocol != IPPROTO_TCP)
@@ -102,21 +98,33 @@ static int chtls_start_listen(struct sock *sk)
return -EADDRNOTAVAIL;
sk->sk_backlog_rcv = listen_backlog_rcv;
+ clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+ if (!clisten)
+ return -ENOMEM;
+ clisten->cdev = cdev;
+ clisten->sk = sk;
mutex_lock(&notify_mutex);
err = raw_notifier_call_chain(&listen_notify_list,
- CHTLS_LISTEN_START, sk);
+ CHTLS_LISTEN_START, clisten);
mutex_unlock(&notify_mutex);
return err;
}
-static void chtls_stop_listen(struct sock *sk)
+static void chtls_stop_listen(struct chtls_dev *cdev, struct sock *sk)
{
+ struct chtls_listen *clisten;
+
if (sk->sk_protocol != IPPROTO_TCP)
return;
+ clisten = kmalloc(sizeof(*clisten), GFP_KERNEL);
+ if (!clisten)
+ return;
+ clisten->cdev = cdev;
+ clisten->sk = sk;
mutex_lock(&notify_mutex);
raw_notifier_call_chain(&listen_notify_list,
- CHTLS_LISTEN_STOP, sk);
+ CHTLS_LISTEN_STOP, clisten);
mutex_unlock(&notify_mutex);
}
@@ -138,15 +146,43 @@ static int chtls_inline_feature(struct tls_device *dev)
static int chtls_create_hash(struct tls_device *dev, struct sock *sk)
{
+ struct chtls_dev *cdev = to_chtls_dev(dev);
+
if (sk->sk_state == TCP_LISTEN)
- return chtls_start_listen(sk);
+ return chtls_start_listen(cdev, sk);
return 0;
}
static void chtls_destroy_hash(struct tls_device *dev, struct sock *sk)
{
+ struct chtls_dev *cdev = to_chtls_dev(dev);
+
if (sk->sk_state == TCP_LISTEN)
- chtls_stop_listen(sk);
+ chtls_stop_listen(cdev, sk);
+}
+
+static void chtls_free_uld(struct chtls_dev *cdev)
+{
+ int i;
+
+ tls_unregister_device(&cdev->tlsdev);
+ kvfree(cdev->kmap.addr);
+ idr_destroy(&cdev->hwtid_idr);
+ for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
+ kfree_skb(cdev->rspq_skb_cache[i]);
+ kfree(cdev->lldi);
+ kfree_skb(cdev->askb);
+ kfree(cdev);
+}
+
+static inline void chtls_dev_release(struct kref *kref)
+{
+ struct chtls_dev *cdev;
+ struct tls_device *dev;
+
+ dev = container_of(kref, struct tls_device, kref);
+ cdev = to_chtls_dev(dev);
+ chtls_free_uld(cdev);
}
static void chtls_register_dev(struct chtls_dev *cdev)
@@ -159,15 +195,12 @@ static void chtls_register_dev(struct chtls_dev *cdev)
tlsdev->feature = chtls_inline_feature;
tlsdev->hash = chtls_create_hash;
tlsdev->unhash = chtls_destroy_hash;
- tls_register_device(&cdev->tlsdev);
+ tlsdev->release = chtls_dev_release;
+ kref_init(&tlsdev->kref);
+ tls_register_device(tlsdev);
cdev->cdev_state = CHTLS_CDEV_STATE_UP;
}
-static void chtls_unregister_dev(struct chtls_dev *cdev)
-{
- tls_unregister_device(&cdev->tlsdev);
-}
-
static void process_deferq(struct work_struct *task_param)
{
struct chtls_dev *cdev = container_of(task_param,
@@ -262,28 +295,16 @@ out:
return NULL;
}
-static void chtls_free_uld(struct chtls_dev *cdev)
-{
- int i;
-
- chtls_unregister_dev(cdev);
- kvfree(cdev->kmap.addr);
- idr_destroy(&cdev->hwtid_idr);
- for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
- kfree_skb(cdev->rspq_skb_cache[i]);
- kfree(cdev->lldi);
- kfree_skb(cdev->askb);
- kfree(cdev);
-}
-
static void chtls_free_all_uld(void)
{
struct chtls_dev *cdev, *tmp;
mutex_lock(&cdev_mutex);
list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
- if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
- chtls_free_uld(cdev);
+ if (cdev->cdev_state == CHTLS_CDEV_STATE_UP) {
+ list_del(&cdev->list);
+ kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
+ }
}
mutex_unlock(&cdev_mutex);
}
@@ -304,7 +325,7 @@ static int chtls_uld_state_change(void *handle, enum cxgb4_state new_state)
mutex_lock(&cdev_mutex);
list_del(&cdev->list);
mutex_unlock(&cdev_mutex);
- chtls_free_uld(cdev);
+ kref_put(&cdev->tlsdev.kref, cdev->tlsdev.release);
break;
default:
break;
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index eb2a0a73cbed..b4c24a35b3d0 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -261,7 +261,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
struct geode_aes_op *op = crypto_tfm_ctx(tfm);
op->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(op->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 3aef1d43e435..d531c14020dc 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -970,7 +970,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des = {
.cra_name = "cbc(des)",
.cra_driver_name = "safexcel-cbc-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1010,7 +1010,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des = {
.cra_name = "ecb(des)",
.cra_driver_name = "safexcel-ecb-des",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1074,7 +1074,7 @@ struct safexcel_alg_template safexcel_alg_cbc_des3_ede = {
.cra_name = "cbc(des3_ede)",
.cra_driver_name = "safexcel-cbc-des3_ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
@@ -1114,7 +1114,7 @@ struct safexcel_alg_template safexcel_alg_ecb_des3_ede = {
.cra_name = "ecb(des3_ede)",
.cra_driver_name = "safexcel-ecb-des3_ede",
.cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 27f7dad2d45d..19fba998b86b 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1194,7 +1194,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1221,7 +1220,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1247,7 +1245,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1273,7 +1270,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .geniv = "eseqiv",
}
}
},
@@ -1287,7 +1283,6 @@ static struct ixp_alg ixp4xx_algos[] = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- .geniv = "eseqiv",
.setkey = ablk_rfc3686_setkey,
.encrypt = ablk_rfc3686_crypt,
.decrypt = ablk_rfc3686_crypt }
diff --git a/drivers/crypto/mxc-scc.c b/drivers/crypto/mxc-scc.c
index e01c46387df8..519086730791 100644
--- a/drivers/crypto/mxc-scc.c
+++ b/drivers/crypto/mxc-scc.c
@@ -178,12 +178,12 @@ static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
else
from = scc->black_memory;
- dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
+ dev_dbg(scc->dev, "pcopy: from 0x%p %zu bytes\n", from,
ctx->dst_nents * 8);
len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
from, ctx->size, ctx->offset);
if (!len) {
- dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
+ dev_err(scc->dev, "pcopy err from 0x%p (len=%zu)\n", from, len);
return -EINVAL;
}
@@ -274,7 +274,7 @@ static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
to, len, ctx->offset);
if (!len) {
- dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
+ dev_err(scc->dev, "pcopy err to 0x%p (len=%zu)\n", to, len);
return -EINVAL;
}
@@ -335,9 +335,9 @@ static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
return;
}
- dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
- (void *)readl(scc->base + SCC_SCM_RED_START),
- (void *)readl(scc->base + SCC_SCM_BLACK_START));
+ dev_dbg(scc->dev, "Start encryption (0x%x/0x%x)\n",
+ readl(scc->base + SCC_SCM_RED_START),
+ readl(scc->base + SCC_SCM_BLACK_START));
/* clear interrupt control registers */
writel(SCC_SCM_INTR_CTRL_CLR_INTR,
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 4e6ff32f8a7e..a2105cf33abb 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/stmp_device.h>
+#include <linux/clk.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
@@ -82,6 +83,7 @@ struct dcp {
spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
+ struct clk *dcp_clk;
};
enum dcp_chan {
@@ -1053,11 +1055,24 @@ static int mxs_dcp_probe(struct platform_device *pdev)
/* Re-align the structure so it fits the DCP constraints. */
sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
- /* Restart the DCP block. */
- ret = stmp_reset_block(sdcp->base);
+ /* DCP clock is optional, only used on some SOCs */
+ sdcp->dcp_clk = devm_clk_get(dev, "dcp");
+ if (IS_ERR(sdcp->dcp_clk)) {
+ if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
+ return PTR_ERR(sdcp->dcp_clk);
+ sdcp->dcp_clk = NULL;
+ }
+ ret = clk_prepare_enable(sdcp->dcp_clk);
if (ret)
return ret;
+ /* Restart the DCP block. */
+ ret = stmp_reset_block(sdcp->base);
+ if (ret) {
+ dev_err(dev, "Failed reset\n");
+ goto err_disable_unprepare_clk;
+ }
+
/* Initialize control register. */
writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
@@ -1094,7 +1109,8 @@ static int mxs_dcp_probe(struct platform_device *pdev)
NULL, "mxs_dcp_chan/sha");
if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
dev_err(dev, "Error starting SHA thread!\n");
- return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
+ goto err_disable_unprepare_clk;
}
sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
@@ -1151,6 +1167,10 @@ err_destroy_aes_thread:
err_destroy_sha_thread:
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
+
+err_disable_unprepare_clk:
+ clk_disable_unprepare(sdcp->dcp_clk);
+
return ret;
}
@@ -1170,6 +1190,8 @@ static int mxs_dcp_remove(struct platform_device *pdev)
kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
+ clk_disable_unprepare(sdcp->dcp_clk);
+
platform_set_drvdata(pdev, NULL);
global_sdcp = NULL;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index 898c0a280511..5a26fcd75d2d 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -159,7 +159,6 @@ struct crypto_alg nx_ctr3686_aes_alg = {
.min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- .geniv = "seqiv",
.setkey = ctr3686_aes_nx_set_key,
.encrypt = ctr3686_aes_nx_crypt,
.decrypt = ctr3686_aes_nx_crypt,
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index a553ffddb11b..0120feb2d746 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -749,7 +749,6 @@ static struct crypto_alg algs_ctr[] = {
.cra_u.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .geniv = "eseqiv",
.ivsize = AES_BLOCK_SIZE,
.setkey = omap_aes_setkey,
.encrypt = omap_aes_ctr_encrypt,
@@ -1222,7 +1221,6 @@ static int omap_aes_probe(struct platform_device *pdev)
algp = &dd->pdata->algs_info[i].algs_list[j];
pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
err = crypto_register_alg(algp);
if (err)
@@ -1240,7 +1238,6 @@ static int omap_aes_probe(struct platform_device *pdev)
algp = &aalg->base;
pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
err = crypto_register_aead(aalg);
if (err)
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index eb95b0d7f184..6369019219d4 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1069,7 +1069,6 @@ static int omap_des_probe(struct platform_device *pdev)
algp = &dd->pdata->algs_info[i].algs_list[j];
pr_debug("reg alg: %s\n", algp->cra_name);
- INIT_LIST_HEAD(&algp->cra_list);
err = crypto_register_alg(algp);
if (err)
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a28f1d18fe01..17068b55fea5 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -1585,8 +1585,7 @@ static struct spacc_alg l2_engine_algs[] = {
.cra_name = "f8(kasumi)",
.cra_driver_name = "f8-kasumi-picoxcell",
.cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_GIVCIPHER |
- CRYPTO_ALG_ASYNC |
+ .cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct spacc_ablk_ctx),
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index 585e1cab9ae3..25c13e26d012 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -376,7 +376,6 @@ static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
alg->cra_module = THIS_MODULE;
alg->cra_init = qce_ablkcipher_init;
alg->cra_exit = qce_ablkcipher_exit;
- INIT_LIST_HEAD(&alg->cra_list);
INIT_LIST_HEAD(&tmpl->entry);
tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index d8a5db11b7ea..fc45f5ea6fdd 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -508,7 +508,6 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def,
base->cra_alignmask = 0;
base->cra_module = THIS_MODULE;
base->cra_init = qce_ahash_cra_init;
- INIT_LIST_HEAD(&base->cra_list);
snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index bbf166a97ad3..8c32a3059b4a 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1321,7 +1321,6 @@ static int sahara_register_algs(struct sahara_dev *dev)
unsigned int i, j, k, l;
for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- INIT_LIST_HEAD(&aes_algs[i].cra_list);
err = crypto_register_alg(&aes_algs[i]);
if (err)
goto err_aes_algs;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 6988012deca4..45e20707cef8 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -3155,7 +3155,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_ablkcipher.setkey = ablkcipher_setkey;
alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
- alg->cra_ablkcipher.geniv = "eseqiv";
break;
case CRYPTO_ALG_TYPE_AEAD:
alg = &t_alg->algt.alg.aead.base;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d2663a4e1f5e..a92a66b1ff46 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_src,
ctx->device->dma.sg_src_len,
- direction, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
break;
case DMA_FROM_DEVICE:
@@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_dst,
ctx->device->dma.sg_dst_len,
- direction,
+ DMA_DEV_TO_MEM,
DMA_CTRL_ACK |
DMA_PREP_INTERRUPT);
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 633321a8dd03..a0bb8a6eec3f 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
__func__);
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 141413067b5c..0ae3de76833b 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -285,6 +285,44 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
return 0;
}
+static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
+ u32 flags)
+{
+ struct devfreq_freqs freqs;
+ unsigned long cur_freq;
+ int err = 0;
+
+ if (devfreq->profile->get_cur_freq)
+ devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
+ else
+ cur_freq = devfreq->previous_freq;
+
+ freqs.old = cur_freq;
+ freqs.new = new_freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
+
+ err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
+ if (err) {
+ freqs.new = cur_freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+ return err;
+ }
+
+ freqs.new = new_freq;
+ devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
+
+ if (devfreq_update_status(devfreq, new_freq))
+ dev_err(&devfreq->dev,
+ "Couldn't update frequency transition information.\n");
+
+ devfreq->previous_freq = new_freq;
+
+ if (devfreq->suspend_freq)
+ devfreq->resume_freq = cur_freq;
+
+ return err;
+}
+
/* Load monitoring helper functions for governors use */
/**
@@ -296,8 +334,7 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
*/
int update_devfreq(struct devfreq *devfreq)
{
- struct devfreq_freqs freqs;
- unsigned long freq, cur_freq, min_freq, max_freq;
+ unsigned long freq, min_freq, max_freq;
int err = 0;
u32 flags = 0;
@@ -333,31 +370,8 @@ int update_devfreq(struct devfreq *devfreq)
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
- if (devfreq->profile->get_cur_freq)
- devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
- else
- cur_freq = devfreq->previous_freq;
+ return devfreq_set_target(devfreq, freq, flags);
- freqs.old = cur_freq;
- freqs.new = freq;
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
-
- err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
- if (err) {
- freqs.new = cur_freq;
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
- return err;
- }
-
- freqs.new = freq;
- devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
-
- if (devfreq_update_status(devfreq, freq))
- dev_err(&devfreq->dev,
- "Couldn't update frequency transition information.\n");
-
- devfreq->previous_freq = freq;
- return err;
}
EXPORT_SYMBOL(update_devfreq);
@@ -657,6 +671,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
}
devfreq->max_freq = devfreq->scaling_max_freq;
+ devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
+ atomic_set(&devfreq->suspend_count, 0);
+
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
err = device_register(&devfreq->dev);
@@ -857,14 +874,28 @@ EXPORT_SYMBOL(devm_devfreq_remove_device);
*/
int devfreq_suspend_device(struct devfreq *devfreq)
{
+ int ret;
+
if (!devfreq)
return -EINVAL;
- if (!devfreq->governor)
+ if (atomic_inc_return(&devfreq->suspend_count) > 1)
return 0;
- return devfreq->governor->event_handler(devfreq,
- DEVFREQ_GOV_SUSPEND, NULL);
+ if (devfreq->governor) {
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_SUSPEND, NULL);
+ if (ret)
+ return ret;
+ }
+
+ if (devfreq->suspend_freq) {
+ ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(devfreq_suspend_device);
@@ -878,18 +909,76 @@ EXPORT_SYMBOL(devfreq_suspend_device);
*/
int devfreq_resume_device(struct devfreq *devfreq)
{
+ int ret;
+
if (!devfreq)
return -EINVAL;
- if (!devfreq->governor)
+ if (atomic_dec_return(&devfreq->suspend_count) >= 1)
return 0;
- return devfreq->governor->event_handler(devfreq,
- DEVFREQ_GOV_RESUME, NULL);
+ if (devfreq->resume_freq) {
+ ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
+ if (ret)
+ return ret;
+ }
+
+ if (devfreq->governor) {
+ ret = devfreq->governor->event_handler(devfreq,
+ DEVFREQ_GOV_RESUME, NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(devfreq_resume_device);
/**
+ * devfreq_suspend() - Suspend devfreq governors and devices
+ *
+ * Called during system wide Suspend/Hibernate cycles for suspending governors
+ * and devices preserving the state for resume. On some platforms the devfreq
+ * device must have precise state (frequency) after resume in order to provide
+ * fully operating setup.
+ */
+void devfreq_suspend(void)
+{
+ struct devfreq *devfreq;
+ int ret;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ ret = devfreq_suspend_device(devfreq);
+ if (ret)
+ dev_err(&devfreq->dev,
+ "failed to suspend devfreq device\n");
+ }
+ mutex_unlock(&devfreq_list_lock);
+}
+
+/**
+ * devfreq_resume() - Resume devfreq governors and devices
+ *
+ * Called during system wide Suspend/Hibernate cycle for resuming governors and
+ * devices that are suspended with devfreq_suspend().
+ */
+void devfreq_resume(void)
+{
+ struct devfreq *devfreq;
+ int ret;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(devfreq, &devfreq_list, node) {
+ ret = devfreq_resume_device(devfreq);
+ if (ret)
+ dev_warn(&devfreq->dev,
+ "failed to resume devfreq device\n");
+ }
+ mutex_unlock(&devfreq_list_lock);
+}
+
+/**
* devfreq_add_governor() - Add devfreq governor
* @governor: the devfreq governor to be added
*/
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 1551ca7df394..136ec04d683f 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -30,13 +30,16 @@
EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+static DEFINE_SPINLOCK(dma_fence_stub_lock);
+static struct dma_fence dma_fence_stub;
+
/*
* fence context counter: each execution context should have its own
* fence context, this allows checking if fences belong to the same
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
/**
* DOC: DMA fences overview
@@ -68,6 +71,37 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(0);
* &dma_buf.resv pointer.
*/
+static const char *dma_fence_stub_get_name(struct dma_fence *fence)
+{
+ return "stub";
+}
+
+static const struct dma_fence_ops dma_fence_stub_ops = {
+ .get_driver_name = dma_fence_stub_get_name,
+ .get_timeline_name = dma_fence_stub_get_name,
+};
+
+/**
+ * dma_fence_get_stub - return a signaled fence
+ *
+ * Return a stub fence which is already signaled.
+ */
+struct dma_fence *dma_fence_get_stub(void)
+{
+ spin_lock(&dma_fence_stub_lock);
+ if (!dma_fence_stub.ops) {
+ dma_fence_init(&dma_fence_stub,
+ &dma_fence_stub_ops,
+ &dma_fence_stub_lock,
+ 0, 0);
+ dma_fence_signal_locked(&dma_fence_stub);
+ }
+ spin_unlock(&dma_fence_stub_lock);
+
+ return dma_fence_get(&dma_fence_stub);
+}
+EXPORT_SYMBOL(dma_fence_get_stub);
+
/**
* dma_fence_context_alloc - allocate an array of fence contexts
* @num: amount of contexts to allocate
diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
index 6c95f61a32e7..c1618335ca99 100644
--- a/drivers/dma-buf/reservation.c
+++ b/drivers/dma-buf/reservation.c
@@ -56,9 +56,10 @@ const char reservation_seqcount_string[] = "reservation_seqcount";
EXPORT_SYMBOL(reservation_seqcount_string);
/**
- * reservation_object_reserve_shared - Reserve space to add a shared
- * fence to a reservation_object.
+ * reservation_object_reserve_shared - Reserve space to add shared fences to
+ * a reservation_object.
* @obj: reservation object
+ * @num_fences: number of fences we want to add
*
* Should be called before reservation_object_add_shared_fence(). Must
* be called with obj->lock held.
@@ -66,107 +67,27 @@ EXPORT_SYMBOL(reservation_seqcount_string);
* RETURNS
* Zero for success, or -errno
*/
-int reservation_object_reserve_shared(struct reservation_object *obj)
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences)
{
- struct reservation_object_list *fobj, *old;
- u32 max;
+ struct reservation_object_list *old, *new;
+ unsigned int i, j, k, max;
old = reservation_object_get_list(obj);
if (old && old->shared_max) {
- if (old->shared_count < old->shared_max) {
- /* perform an in-place update */
- kfree(obj->staged);
- obj->staged = NULL;
+ if ((old->shared_count + num_fences) <= old->shared_max)
return 0;
- } else
- max = old->shared_max * 2;
- } else
- max = 4;
-
- /*
- * resize obj->staged or allocate if it doesn't exist,
- * noop if already correct size
- */
- fobj = krealloc(obj->staged, offsetof(typeof(*fobj), shared[max]),
- GFP_KERNEL);
- if (!fobj)
- return -ENOMEM;
-
- obj->staged = fobj;
- fobj->shared_max = max;
- return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-static void
-reservation_object_add_shared_inplace(struct reservation_object *obj,
- struct reservation_object_list *fobj,
- struct dma_fence *fence)
-{
- struct dma_fence *signaled = NULL;
- u32 i, signaled_idx;
-
- dma_fence_get(fence);
-
- preempt_disable();
- write_seqcount_begin(&obj->seq);
-
- for (i = 0; i < fobj->shared_count; ++i) {
- struct dma_fence *old_fence;
-
- old_fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
-
- if (old_fence->context == fence->context) {
- /* memory barrier is added by write_seqcount_begin */
- RCU_INIT_POINTER(fobj->shared[i], fence);
- write_seqcount_end(&obj->seq);
- preempt_enable();
-
- dma_fence_put(old_fence);
- return;
- }
-
- if (!signaled && dma_fence_is_signaled(old_fence)) {
- signaled = old_fence;
- signaled_idx = i;
- }
- }
-
- /*
- * memory barrier is added by write_seqcount_begin,
- * fobj->shared_count is protected by this lock too
- */
- if (signaled) {
- RCU_INIT_POINTER(fobj->shared[signaled_idx], fence);
+ else
+ max = max(old->shared_count + num_fences,
+ old->shared_max * 2);
} else {
- BUG_ON(fobj->shared_count >= fobj->shared_max);
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ max = 4;
}
- write_seqcount_end(&obj->seq);
- preempt_enable();
-
- dma_fence_put(signaled);
-}
-
-static void
-reservation_object_add_shared_replace(struct reservation_object *obj,
- struct reservation_object_list *old,
- struct reservation_object_list *fobj,
- struct dma_fence *fence)
-{
- unsigned i, j, k;
-
- dma_fence_get(fence);
-
- if (!old) {
- RCU_INIT_POINTER(fobj->shared[0], fence);
- fobj->shared_count = 1;
- goto done;
- }
+ new = kmalloc(offsetof(typeof(*new), shared[max]), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
/*
* no need to bump fence refcounts, rcu_read access
@@ -174,46 +95,45 @@ reservation_object_add_shared_replace(struct reservation_object *obj,
* references from the old struct are carried over to
* the new.
*/
- for (i = 0, j = 0, k = fobj->shared_max; i < old->shared_count; ++i) {
- struct dma_fence *check;
-
- check = rcu_dereference_protected(old->shared[i],
- reservation_object_held(obj));
+ for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+ struct dma_fence *fence;
- if (check->context == fence->context ||
- dma_fence_is_signaled(check))
- RCU_INIT_POINTER(fobj->shared[--k], check);
+ fence = rcu_dereference_protected(old->shared[i],
+ reservation_object_held(obj));
+ if (dma_fence_is_signaled(fence))
+ RCU_INIT_POINTER(new->shared[--k], fence);
else
- RCU_INIT_POINTER(fobj->shared[j++], check);
+ RCU_INIT_POINTER(new->shared[j++], fence);
}
- fobj->shared_count = j;
- RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
- fobj->shared_count++;
+ new->shared_count = j;
+ new->shared_max = max;
-done:
preempt_disable();
write_seqcount_begin(&obj->seq);
/*
* RCU_INIT_POINTER can be used here,
* seqcount provides the necessary barriers
*/
- RCU_INIT_POINTER(obj->fence, fobj);
+ RCU_INIT_POINTER(obj->fence, new);
write_seqcount_end(&obj->seq);
preempt_enable();
if (!old)
- return;
+ return 0;
/* Drop the references to the signaled fences */
- for (i = k; i < fobj->shared_max; ++i) {
- struct dma_fence *f;
+ for (i = k; i < new->shared_max; ++i) {
+ struct dma_fence *fence;
- f = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(obj));
- dma_fence_put(f);
+ fence = rcu_dereference_protected(new->shared[i],
+ reservation_object_held(obj));
+ dma_fence_put(fence);
}
kfree_rcu(old, rcu);
+
+ return 0;
}
+EXPORT_SYMBOL(reservation_object_reserve_shared);
/**
* reservation_object_add_shared_fence - Add a fence to a shared slot
@@ -226,15 +146,39 @@ done:
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence)
{
- struct reservation_object_list *old, *fobj = obj->staged;
+ struct reservation_object_list *fobj;
+ unsigned int i, count;
- old = reservation_object_get_list(obj);
- obj->staged = NULL;
+ dma_fence_get(fence);
- if (!fobj)
- reservation_object_add_shared_inplace(obj, old, fence);
- else
- reservation_object_add_shared_replace(obj, old, fobj, fence);
+ fobj = reservation_object_get_list(obj);
+ count = fobj->shared_count;
+
+ preempt_disable();
+ write_seqcount_begin(&obj->seq);
+
+ for (i = 0; i < count; ++i) {
+ struct dma_fence *old_fence;
+
+ old_fence = rcu_dereference_protected(fobj->shared[i],
+ reservation_object_held(obj));
+ if (old_fence->context == fence->context ||
+ dma_fence_is_signaled(old_fence)) {
+ dma_fence_put(old_fence);
+ goto replace;
+ }
+ }
+
+ BUG_ON(fobj->shared_count >= fobj->shared_max);
+ count++;
+
+replace:
+ RCU_INIT_POINTER(fobj->shared[i], fence);
+ /* pointer update must be visible before we extend the shared_count */
+ smp_store_mb(fobj->shared_count, count);
+
+ write_seqcount_end(&obj->seq);
+ preempt_enable();
}
EXPORT_SYMBOL(reservation_object_add_shared_fence);
@@ -343,9 +287,6 @@ retry:
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
- kfree(dst->staged);
- dst->staged = NULL;
-
src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7cbac6e8c113..01d936c9fe89 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
atchan->descs_allocated = 0;
atchan->status = 0;
+ /*
+ * Free atslave allocated in at_dma_xlate()
+ */
+ kfree(chan->private);
+ chan->private = NULL;
+
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+ atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
if (!atslave)
return NULL;
@@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev)
struct resource *io;
at_dma_off(atdma);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&atdma->dma_common);
dma_pool_destroy(atdma->memset_pool);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index d0c3e50b39fb..1fc488e90f36 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
/*
* Program FIFO size of channels.
*
- * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
+ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
* slice FIFO on equal parts between channels.
*/
static void idma32_fifo_partition(struct dw_dma *dw)
{
- u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
+ u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
IDMA32C_FP_UPDATE;
u64 fifo_partition = 0;
@@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
/* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
fifo_partition |= value << 32;
- /* Program FIFO Partition registers - 128 bytes for each channel */
+ /* Program FIFO Partition registers - 64 bytes per channel */
idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
}
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b4ec2d20e661..cb1b44d78a1f 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -33,6 +32,7 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
+#include <linux/workqueue.h>
#include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h>
@@ -376,7 +376,7 @@ struct sdma_channel {
u32 shp_addr, per_addr;
enum dma_status status;
struct imx_dma_data data;
- struct dma_pool *bd_pool;
+ struct work_struct terminate_worker;
};
#define IMX_DMA_SG_LOOP BIT(0)
@@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
-
-static int sdma_disable_channel_with_delay(struct dma_chan *chan)
+static void sdma_channel_terminate_work(struct work_struct *work)
{
- struct sdma_channel *sdmac = to_sdma_chan(chan);
+ struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
+ terminate_worker);
unsigned long flags;
LIST_HEAD(head);
- sdma_disable_channel(chan);
- spin_lock_irqsave(&sdmac->vc.lock, flags);
- vchan_get_all_descriptors(&sdmac->vc, &head);
- sdmac->desc = NULL;
- spin_unlock_irqrestore(&sdmac->vc.lock, flags);
- vchan_dma_desc_free_list(&sdmac->vc, &head);
-
/*
* According to NXP R&D team a delay of one BD SDMA cost time
* (maximum is 1ms) should be added after disable of the channel
* bit, to ensure SDMA core has really been stopped after SDMA
* clients call .device_terminate_all.
*/
- mdelay(1);
+ usleep_range(1000, 2000);
+
+ spin_lock_irqsave(&sdmac->vc.lock, flags);
+ vchan_get_all_descriptors(&sdmac->vc, &head);
+ sdmac->desc = NULL;
+ spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&sdmac->vc, &head);
+}
+
+static int sdma_disable_channel_async(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+ sdma_disable_channel(chan);
+
+ if (sdmac->desc)
+ schedule_work(&sdmac->terminate_worker);
return 0;
}
+static void sdma_channel_synchronize(struct dma_chan *chan)
+{
+ struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+ vchan_synchronize(&sdmac->vc);
+
+ flush_work(&sdmac->terminate_worker);
+}
+
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
@@ -1192,10 +1210,11 @@ out:
static int sdma_alloc_bd(struct sdma_desc *desc)
{
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
int ret = 0;
- desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
- &desc->bd_phys);
+ desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+ GFP_NOWAIT);
if (!desc->bd) {
ret = -ENOMEM;
goto out;
@@ -1206,7 +1225,9 @@ out:
static void sdma_free_bd(struct sdma_desc *desc)
{
- dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+ u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+
+ dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
}
static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if (ret)
goto disable_clk_ahb;
- sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
- sizeof(struct sdma_buffer_descriptor),
- 32, 0);
-
return 0;
disable_clk_ahb:
@@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- sdma_disable_channel_with_delay(chan);
+ sdma_disable_channel_async(chan);
+
+ sdma_channel_synchronize(chan);
if (sdmac->event_id0)
sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
clk_disable(sdma->clk_ipg);
clk_disable(sdma->clk_ahb);
-
- dma_pool_destroy(sdmac->bd_pool);
- sdmac->bd_pool = NULL;
}
static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
@@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
sdmac->channel = i;
sdmac->vc.desc_free = sdma_desc_free;
+ INIT_WORK(&sdmac->terminate_worker,
+ sdma_channel_terminate_work);
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
@@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
- sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
+ sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+ sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 1497da367710..e507ec36c0d3 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
- if (!cdd->chan_busy[desc_num])
+ if (!cdd->chan_busy[desc_num]) {
+ struct cppi41_channel *cc, *_ct;
+
+ /*
+ * channels might still be in the pendling list if
+ * cppi41_dma_issue_pending() is called after
+ * cppi41_runtime_suspend() is called
+ */
+ list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
+ if (cc != c)
+ continue;
+ list_del(&cc->node);
+ break;
+ }
return 0;
+ }
ret = cppi41_tear_down_chan(c);
if (ret)
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 41c9ccdd20d6..e286b5b99003 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -231,10 +231,10 @@ config EDAC_SBRIDGE
config EDAC_SKX
tristate "Intel Skylake server Integrated MC"
- depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG
+ depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG && ACPI
depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_SKX can't be y
select DMI
- select ACPI_ADXL if ACPI
+ select ACPI_ADXL
help
Support for error detection and correction the Intel
Skylake server Integrated Memory Controllers. If your
@@ -442,7 +442,7 @@ config EDAC_ALTERA_SDMMC
config EDAC_SYNOPSYS
tristate "Synopsys DDR Memory Controller"
- depends on ARCH_ZYNQ
+ depends on ARCH_ZYNQ || ARCH_ZYNQMP
help
Support for error detection and correction on the Synopsys DDR
memory controller.
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index b5de9a13ea3f..de732dc2ef33 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1446,8 +1446,8 @@ static int __init e752x_init(void)
edac_dbg(3, "\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
pci_rc = pci_register_driver(&e752x_driver);
return (pci_rc < 0) ? pci_rc : 0;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7d3edd713932..13594ffadcb3 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -55,8 +55,6 @@ static LIST_HEAD(mc_devices);
*/
static const char *edac_mc_owner;
-static struct bus_type mc_bus[EDAC_MAX_MCS];
-
int edac_get_report_status(void)
{
return edac_report;
@@ -716,11 +714,6 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
int ret = -EINVAL;
edac_dbg(0, "\n");
- if (mci->mc_idx >= EDAC_MAX_MCS) {
- pr_warn_once("Too many memory controllers: %d\n", mci->mc_idx);
- return -ENODEV;
- }
-
#ifdef CONFIG_EDAC_DEBUG
if (edac_debug_level >= 3)
edac_mc_dump_mci(mci);
@@ -760,7 +753,7 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
/* set load time so that error rate can be tracked */
mci->start_time = jiffies;
- mci->bus = &mc_bus[mci->mc_idx];
+ mci->bus = edac_get_sysfs_subsys();
if (edac_create_sysfs_mci_device(mci, groups)) {
edac_mc_printk(mci, KERN_WARNING,
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 20374b8248f0..464174685589 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -405,7 +405,6 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
struct csrow_info *csrow, int index)
{
csrow->dev.type = &csrow_attr_type;
- csrow->dev.bus = mci->bus;
csrow->dev.groups = csrow_dev_groups;
device_initialize(&csrow->dev);
csrow->dev.parent = &mci->dev;
@@ -636,7 +635,6 @@ static int edac_create_dimm_object(struct mem_ctl_info *mci,
dimm->mci = mci;
dimm->dev.type = &dimm_attr_type;
- dimm->dev.bus = mci->bus;
device_initialize(&dimm->dev);
dimm->dev.parent = &mci->dev;
@@ -914,33 +912,13 @@ static const struct device_type mci_attr_type = {
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
const struct attribute_group **groups)
{
- char *name;
int i, err;
- /*
- * The memory controller needs its own bus, in order to avoid
- * namespace conflicts at /sys/bus/edac.
- */
- name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
- if (!name)
- return -ENOMEM;
-
- mci->bus->name = name;
-
- edac_dbg(0, "creating bus %s\n", mci->bus->name);
-
- err = bus_register(mci->bus);
- if (err < 0) {
- kfree(name);
- return err;
- }
-
/* get the /sys/devices/system/edac subsys reference */
mci->dev.type = &mci_attr_type;
device_initialize(&mci->dev);
mci->dev.parent = mci_pdev;
- mci->dev.bus = mci->bus;
mci->dev.groups = groups;
dev_set_name(&mci->dev, "mc%d", mci->mc_idx);
dev_set_drvdata(&mci->dev, mci);
@@ -950,7 +928,7 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
err = device_add(&mci->dev);
if (err < 0) {
edac_dbg(1, "failure: create device %s\n", dev_name(&mci->dev));
- goto fail_unregister_bus;
+ goto out;
}
/*
@@ -998,10 +976,8 @@ fail_unregister_dimm:
device_unregister(&dimm->dev);
}
device_unregister(&mci->dev);
-fail_unregister_bus:
- bus_unregister(mci->bus);
- kfree(name);
+out:
return err;
}
@@ -1032,13 +1008,8 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
void edac_unregister_sysfs(struct mem_ctl_info *mci)
{
- struct bus_type *bus = mci->bus;
- const char *name = mci->bus->name;
-
edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
device_unregister(&mci->dev);
- bus_unregister(bus);
- kfree(name);
}
static void mc_attr_release(struct device *dev)
diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c
index efc8276d1d9c..6d8ea226010d 100644
--- a/drivers/edac/fsl_ddr_edac.c
+++ b/drivers/edac/fsl_ddr_edac.c
@@ -2,8 +2,8 @@
* Freescale Memory Controller kernel module
*
* Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
- * ARM-based Layerscape SoCs including LS2xxx. Originally split
- * out from mpc85xx_edac EDAC driver.
+ * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
+ * split out from mpc85xx_edac EDAC driver.
*
* Parts Copyrighted (c) 2013 by Freescale Semiconductor, Inc.
*
@@ -51,6 +51,7 @@ static inline void ddr_out32(void __iomem *addr, u32 value)
iowrite32be(value, addr);
}
+#ifdef CONFIG_EDAC_DEBUG
/************************ MC SYSFS parts ***********************************/
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
@@ -151,11 +152,14 @@ static DEVICE_ATTR(inject_data_lo, S_IRUGO | S_IWUSR,
fsl_mc_inject_data_lo_show, fsl_mc_inject_data_lo_store);
static DEVICE_ATTR(inject_ctrl, S_IRUGO | S_IWUSR,
fsl_mc_inject_ctrl_show, fsl_mc_inject_ctrl_store);
+#endif /* CONFIG_EDAC_DEBUG */
static struct attribute *fsl_ddr_dev_attrs[] = {
+#ifdef CONFIG_EDAC_DEBUG
&dev_attr_inject_data_hi.attr,
&dev_attr_inject_data_lo.attr,
&dev_attr_inject_ctrl.attr,
+#endif
NULL
};
diff --git a/drivers/edac/fsl_ddr_edac.h b/drivers/edac/fsl_ddr_edac.h
index 4ccee292eff1..589b9b4a5e8a 100644
--- a/drivers/edac/fsl_ddr_edac.h
+++ b/drivers/edac/fsl_ddr_edac.h
@@ -2,8 +2,8 @@
* Freescale Memory Controller kernel module
*
* Support Power-based SoCs including MPC85xx, MPC86xx, MPC83xx and
- * ARM-based Layerscape SoCs including LS2xxx. Originally split
- * out from mpc85xx_edac EDAC driver.
+ * ARM-based Layerscape SoCs including LS2xxx and LS1021A. Originally
+ * split out from mpc85xx_edac EDAC driver.
*
* Author: Dave Jiang <djiang@mvista.com>
*
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 8085a32ec3bd..f564a4a8a4ae 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -508,8 +508,8 @@ static int __init i3000_init(void)
edac_dbg(3, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
pci_rc = pci_register_driver(&i3000_driver);
if (pci_rc < 0)
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index 53f24b18cd61..078a7351bf05 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -1134,8 +1134,6 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
u32 actual_tolm;
u16 limit;
int slot_row;
- int maxch;
- int maxdimmperch;
int way0, way1;
pvt = mci->pvt_info;
@@ -1145,9 +1143,6 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
pci_read_config_dword(pvt->system_address, AMBASE + sizeof(u32),
&pvt->u.ambase_top);
- maxdimmperch = pvt->maxdimmperch;
- maxch = pvt->maxch;
-
edac_dbg(2, "AMBASE= 0x%lx MAXCH= %d MAX-DIMM-Per-CH= %d\n",
(long unsigned int)pvt->ambase, pvt->maxch, pvt->maxdimmperch);
@@ -1253,7 +1248,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
struct dimm_info *dimm;
- int empty, channel_count;
+ int empty;
int max_csrows;
int mtr;
int csrow_megs;
@@ -1261,8 +1256,6 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
int slot;
pvt = mci->pvt_info;
-
- channel_count = pvt->maxch;
max_csrows = pvt->maxdimmperch * 2;
empty = 1; /* Assume NO memory */
@@ -1559,8 +1552,8 @@ static int __init i5000_init(void)
edac_dbg(2, "MC:\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
pci_rc = pci_register_driver(&i5000_driver);
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 9ef448fef12f..40297550313a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -724,7 +724,7 @@ static ssize_t i7core_inject_type_store(struct device *dev,
const char *data, size_t count)
{
struct mem_ctl_info *mci = to_mci(dev);
-struct i7core_pvt *pvt = mci->pvt_info;
+ struct i7core_pvt *pvt = mci->pvt_info;
unsigned long value;
int rc;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 892815eaa97b..7c6a2d4d2360 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -104,7 +104,7 @@ NOTE: Only ONE of the three must be enabled
*
* 31:14 Base Addr of 16K memory-mapped
* configuration space
- * 13:1 reserverd
+ * 13:1 reserved
* 0 mem-mapped config space enable
*/
@@ -358,14 +358,6 @@ static int dual_channel_active(void __iomem *mch_window)
return dualch;
}
-static enum dev_type i82975x_dram_type(void __iomem *mch_window, int rank)
-{
- /*
- * ECC is possible on i92975x ONLY with DEV_X8
- */
- return DEV_X8;
-}
-
static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct pci_dev *pdev, void __iomem *mch_window)
{
@@ -375,7 +367,6 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
u32 cumul_size, nr_pages;
int index, chan;
struct dimm_info *dimm;
- enum dev_type dtype;
last_cumul_size = 0;
@@ -413,7 +404,6 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
*/
- dtype = i82975x_dram_type(mch_window, index);
for (chan = 0; chan < csrow->nr_channels; chan++) {
dimm = mci->csrows[index]->channels[chan]->dimm;
@@ -423,7 +413,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
(chan == 0) ? 'A' : 'B',
index);
dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
- dimm->dtype = i82975x_dram_type(mch_window, index);
+
+ /* ECC is possible on i92975x ONLY with DEV_X8. */
+ dimm->dtype = DEV_X8;
+
dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
dimm->edac_mode = EDAC_SECDED; /* only supported */
}
@@ -655,8 +648,8 @@ static int __init i82975x_init(void)
edac_dbg(3, "\n");
- /* Ensure that the OPSTATE is set correctly for POLL or NMI */
- opstate_init();
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
pci_rc = pci_register_driver(&i82975x_driver);
if (pci_rc < 0)
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 82bd775124f2..97a27e42dd61 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -292,7 +292,6 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
struct llcc_drv_data *drv = edac_dev_ctl->pvt_info;
irqreturn_t irq_rc = IRQ_NONE;
u32 drp_error, trp_error, i;
- bool irq_handled;
int ret;
/* Iterate over the banks and look for Tag RAM or Data RAM errors */
@@ -311,7 +310,7 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
ret = dump_syn_reg(edev_ctl, LLCC_DRAM_UE, i);
}
if (!ret)
- irq_handled = true;
+ irq_rc = IRQ_HANDLED;
ret = regmap_read(drv->regmap,
drv->offsets[i] + TRP_INTERRUPT_0_STATUS,
@@ -327,12 +326,9 @@ llcc_ecc_irq_handler(int irq, void *edev_ctl)
ret = dump_syn_reg(edev_ctl, LLCC_TRAM_UE, i);
}
if (!ret)
- irq_handled = true;
+ irq_rc = IRQ_HANDLED;
}
- if (irq_handled)
- irq_rc = IRQ_HANDLED;
-
return irq_rc;
}
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index a99ea61dad32..93ef161bb5e1 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -209,7 +209,7 @@ static int get_all_bus_mappings(void)
d->bus[1] = GET_BITFIELD(reg, 8, 15);
d->bus[2] = GET_BITFIELD(reg, 16, 23);
d->bus[3] = GET_BITFIELD(reg, 24, 31);
- edac_dbg(2, "busses: %x, %x, %x, %x\n",
+ edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
list_add_tail(&d->list, &skx_edac_list);
skx_num_sockets++;
@@ -245,8 +245,8 @@ static int get_all_munits(const struct munit *m)
/* Be sure that the device is enabled */
if (unlikely(pci_enable_device(pdev) < 0)) {
- skx_printk(KERN_ERR,
- "Couldn't enable %04x:%04x\n", PCI_VENDOR_ID_INTEL, m->did);
+ skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n",
+ PCI_VENDOR_ID_INTEL, m->did);
goto fail;
}
@@ -323,7 +323,7 @@ static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval,
u32 val = GET_BITFIELD(reg, lobit, hibit);
if (val < minval || val > maxval) {
- edac_dbg(2, "bad %s = %d (raw=%x)\n", name, val, reg);
+ edac_dbg(2, "bad %s = %d (raw=0x%x)\n", name, val, reg);
return -EINVAL;
}
return val + add;
@@ -368,7 +368,7 @@ static int skx_get_hi_lo(void)
skx_tohm |= (u64)reg << 32;
pci_dev_put(pdev);
- edac_dbg(2, "tolm=%llx tohm=%llx\n", skx_tolm, skx_tohm);
+ edac_dbg(2, "tolm=0x%llx tohm=0x%llx\n", skx_tolm, skx_tohm);
return 0;
}
@@ -389,7 +389,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: 0x%#x, col: 0x%#x\n",
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
@@ -430,18 +430,18 @@ static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
}
if (smbios_handle < 0) {
- skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=%x\n", dev_handle);
+ skx_printk(KERN_ERR, "Can't find handle for NVDIMM ADR=0x%x\n", dev_handle);
goto unknown_size;
}
if (flags & ACPI_NFIT_MEM_MAP_FAILED) {
- skx_printk(KERN_ERR, "NVDIMM ADR=%x is not mapped\n", dev_handle);
+ skx_printk(KERN_ERR, "NVDIMM ADR=0x%x is not mapped\n", dev_handle);
goto unknown_size;
}
size = dmi_memdev_size(smbios_handle);
if (size == ~0ull)
- skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=%x/SMBIOS=%x\n",
+ skx_printk(KERN_ERR, "Can't find size for NVDIMM ADR=0x%x/SMBIOS=0x%x\n",
dev_handle, smbios_handle);
unknown_size:
@@ -616,7 +616,7 @@ static bool skx_sad_decode(struct decoded_addr *res)
/* Simple sanity check for I/O space or out of range */
if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
- edac_dbg(0, "Address %llx out of range\n", addr);
+ edac_dbg(0, "Address 0x%llx out of range\n", addr);
return false;
}
@@ -631,7 +631,7 @@ restart:
}
prev_limit = limit + 1;
}
- edac_dbg(0, "No SAD entry for %llx\n", addr);
+ edac_dbg(0, "No SAD entry for 0x%llx\n", addr);
return false;
sad_found:
@@ -709,7 +709,7 @@ sad_found:
res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
- edac_dbg(2, "%llx: socket=%d imc=%d channel=%d\n",
+ edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n",
res->addr, res->socket, res->imc, res->channel);
return true;
}
@@ -756,7 +756,7 @@ static bool skx_tad_decode(struct decoded_addr *res)
if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
goto tad_found;
}
- edac_dbg(0, "No TAD entry for %llx\n", res->addr);
+ edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr);
return false;
tad_found:
@@ -784,7 +784,7 @@ tad_found:
res->chan_addr = channel_addr;
- edac_dbg(2, "%llx: chan_addr=%llx sktways=%d chanways=%d\n",
+ edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n",
res->addr, res->chan_addr, res->sktways, res->chanways);
return true;
}
@@ -826,7 +826,7 @@ static bool skx_rir_decode(struct decoded_addr *res)
}
prev_limit = limit;
}
- edac_dbg(0, "No RIR entry for %llx\n", res->addr);
+ edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr);
return false;
rir_found:
@@ -845,7 +845,7 @@ rir_found:
res->dimm = chan_rank / 4;
res->rank = chan_rank % 4;
- edac_dbg(2, "%llx: dimm=%d rank=%d chan_rank=%d rank_addr=%llx\n",
+ edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n",
res->addr, res->dimm, res->rank,
res->channel_rank, res->rank_address);
return true;
@@ -908,7 +908,7 @@ static bool skx_mad_decode(struct decoded_addr *r)
}
r->row &= (1u << dimm->rowbits) - 1;
- edac_dbg(2, "%llx: row=%x col=%x bank_addr=%d bank_group=%d\n",
+ edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n",
r->addr, r->row, r->column, r->bank_address,
r->bank_group);
return true;
@@ -921,53 +921,6 @@ static bool skx_decode(struct decoded_addr *res)
skx_rir_decode(res) && skx_mad_decode(res);
}
-#ifdef CONFIG_EDAC_DEBUG
-/*
- * Debug feature. Make /sys/kernel/debug/skx_edac_test/addr.
- * Write an address to this file to exercise the address decode
- * logic in this driver.
- */
-static struct dentry *skx_test;
-static u64 skx_fake_addr;
-
-static int debugfs_u64_set(void *data, u64 val)
-{
- struct decoded_addr res;
-
- res.addr = val;
- skx_decode(&res);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
-
-static struct dentry *mydebugfs_create(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
-{
- return debugfs_create_file(name, mode, parent, value, &fops_u64_wo);
-}
-
-static void setup_skx_debug(void)
-{
- skx_test = debugfs_create_dir("skx_edac_test", NULL);
- mydebugfs_create("addr", S_IWUSR, skx_test, &skx_fake_addr);
-}
-
-static void teardown_skx_debug(void)
-{
- debugfs_remove_recursive(skx_test);
-}
-#else
-static void setup_skx_debug(void)
-{
-}
-
-static void teardown_skx_debug(void)
-{
-}
-#endif /*CONFIG_EDAC_DEBUG*/
-
static bool skx_adxl_decode(struct decoded_addr *res)
{
@@ -1069,13 +1022,13 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
}
}
if (adxl_component_count) {
- snprintf(skx_msg, MSG_SIZE, "%s%s err_code:%04x:%04x %s",
+ snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode, adxl_msg);
} else {
snprintf(skx_msg, MSG_SIZE,
- "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+ "%s%s err_code:0x%04x:0x%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:0x%x col:0x%x",
overflow ? " OVERFLOW" : "",
(uncorrected_error && recoverable) ? " recoverable" : "",
mscod, errcode,
@@ -1151,15 +1104,15 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
skx_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
- skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
+ skx_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: 0x%llx "
"Bank %d: %016Lx\n", mce->extcpu, type,
mce->mcgstatus, mce->bank, mce->status);
- skx_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
- skx_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
- skx_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
+ skx_mc_printk(mci, KERN_DEBUG, "TSC 0x%llx ", mce->tsc);
+ skx_mc_printk(mci, KERN_DEBUG, "ADDR 0x%llx ", mce->addr);
+ skx_mc_printk(mci, KERN_DEBUG, "MISC 0x%llx ", mce->misc);
- skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
- "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
+ skx_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:0x%x TIME %llu SOCKET "
+ "%u APIC 0x%x\n", mce->cpuvendor, mce->cpuid,
mce->time, mce->socketid, mce->apicid);
skx_mce_output_error(mci, mce, &res);
@@ -1172,6 +1125,54 @@ static struct notifier_block skx_mce_dec = {
.priority = MCE_PRIO_EDAC,
};
+#ifdef CONFIG_EDAC_DEBUG
+/*
+ * Debug feature.
+ * Exercise the address decode logic by writing an address to
+ * /sys/kernel/debug/edac/skx_test/addr.
+ */
+static struct dentry *skx_test;
+
+static int debugfs_u64_set(void *data, u64 val)
+{
+ struct mce m;
+
+ pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
+
+ memset(&m, 0, sizeof(m));
+ /* ADDRV + MemRd + Unknown channel */
+ m.status = MCI_STATUS_ADDRV + 0x90;
+ /* One corrected error */
+ m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
+ m.addr = val;
+ skx_mce_check_error(NULL, 0, &m);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
+
+static void setup_skx_debug(void)
+{
+ skx_test = edac_debugfs_create_dir("skx_test");
+ if (!skx_test)
+ return;
+
+ if (!edac_debugfs_create_file("addr", 0200, skx_test,
+ NULL, &fops_u64_wo)) {
+ debugfs_remove(skx_test);
+ skx_test = NULL;
+ }
+}
+
+static void teardown_skx_debug(void)
+{
+ debugfs_remove_recursive(skx_test);
+}
+#else
+static void setup_skx_debug(void) {}
+static void teardown_skx_debug(void) {}
+#endif /*CONFIG_EDAC_DEBUG*/
+
static void skx_remove(void)
{
int i, j;
@@ -1291,7 +1292,7 @@ static int __init skx_init(void)
if (rc < 0)
goto fail;
if (rc != m->per_socket * skx_num_sockets) {
- edac_dbg(2, "Expected %d, got %d of %x\n",
+ edac_dbg(2, "Expected %d, got %d of 0x%x\n",
m->per_socket * skx_num_sockets, rc, m->did);
rc = -ENODEV;
goto fail;
@@ -1339,11 +1340,11 @@ static void __exit skx_exit(void)
{
edac_dbg(2, "\n");
mce_unregister_decode_chain(&skx_mce_dec);
- skx_remove();
+ teardown_skx_debug();
if (nvdimm_count)
skx_adxl_put();
kfree(skx_msg);
- teardown_skx_debug();
+ skx_remove();
}
module_init(skx_init);
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 0c9c59e2b5a3..2d263382d797 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -22,86 +22,259 @@
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "edac_module.h"
/* Number of cs_rows needed per memory controller */
-#define SYNPS_EDAC_NR_CSROWS 1
+#define SYNPS_EDAC_NR_CSROWS 1
/* Number of channels per memory controller */
-#define SYNPS_EDAC_NR_CHANS 1
+#define SYNPS_EDAC_NR_CHANS 1
/* Granularity of reported error in bytes */
-#define SYNPS_EDAC_ERR_GRAIN 1
+#define SYNPS_EDAC_ERR_GRAIN 1
-#define SYNPS_EDAC_MSG_SIZE 256
+#define SYNPS_EDAC_MSG_SIZE 256
-#define SYNPS_EDAC_MOD_STRING "synps_edac"
-#define SYNPS_EDAC_MOD_VER "1"
+#define SYNPS_EDAC_MOD_STRING "synps_edac"
+#define SYNPS_EDAC_MOD_VER "1"
/* Synopsys DDR memory controller registers that are relevant to ECC */
-#define CTRL_OFST 0x0
-#define T_ZQ_OFST 0xA4
+#define CTRL_OFST 0x0
+#define T_ZQ_OFST 0xA4
/* ECC control register */
-#define ECC_CTRL_OFST 0xC4
+#define ECC_CTRL_OFST 0xC4
/* ECC log register */
-#define CE_LOG_OFST 0xC8
+#define CE_LOG_OFST 0xC8
/* ECC address register */
-#define CE_ADDR_OFST 0xCC
+#define CE_ADDR_OFST 0xCC
/* ECC data[31:0] register */
-#define CE_DATA_31_0_OFST 0xD0
+#define CE_DATA_31_0_OFST 0xD0
/* Uncorrectable error info registers */
-#define UE_LOG_OFST 0xDC
-#define UE_ADDR_OFST 0xE0
-#define UE_DATA_31_0_OFST 0xE4
+#define UE_LOG_OFST 0xDC
+#define UE_ADDR_OFST 0xE0
+#define UE_DATA_31_0_OFST 0xE4
-#define STAT_OFST 0xF0
-#define SCRUB_OFST 0xF4
+#define STAT_OFST 0xF0
+#define SCRUB_OFST 0xF4
/* Control register bit field definitions */
-#define CTRL_BW_MASK 0xC
-#define CTRL_BW_SHIFT 2
+#define CTRL_BW_MASK 0xC
+#define CTRL_BW_SHIFT 2
-#define DDRCTL_WDTH_16 1
-#define DDRCTL_WDTH_32 0
+#define DDRCTL_WDTH_16 1
+#define DDRCTL_WDTH_32 0
/* ZQ register bit field definitions */
-#define T_ZQ_DDRMODE_MASK 0x2
+#define T_ZQ_DDRMODE_MASK 0x2
/* ECC control register bit field definitions */
-#define ECC_CTRL_CLR_CE_ERR 0x2
-#define ECC_CTRL_CLR_UE_ERR 0x1
+#define ECC_CTRL_CLR_CE_ERR 0x2
+#define ECC_CTRL_CLR_UE_ERR 0x1
/* ECC correctable/uncorrectable error log register definitions */
-#define LOG_VALID 0x1
-#define CE_LOG_BITPOS_MASK 0xFE
-#define CE_LOG_BITPOS_SHIFT 1
+#define LOG_VALID 0x1
+#define CE_LOG_BITPOS_MASK 0xFE
+#define CE_LOG_BITPOS_SHIFT 1
/* ECC correctable/uncorrectable error address register definitions */
-#define ADDR_COL_MASK 0xFFF
-#define ADDR_ROW_MASK 0xFFFF000
-#define ADDR_ROW_SHIFT 12
-#define ADDR_BANK_MASK 0x70000000
-#define ADDR_BANK_SHIFT 28
+#define ADDR_COL_MASK 0xFFF
+#define ADDR_ROW_MASK 0xFFFF000
+#define ADDR_ROW_SHIFT 12
+#define ADDR_BANK_MASK 0x70000000
+#define ADDR_BANK_SHIFT 28
/* ECC statistic register definitions */
-#define STAT_UECNT_MASK 0xFF
-#define STAT_CECNT_MASK 0xFF00
-#define STAT_CECNT_SHIFT 8
+#define STAT_UECNT_MASK 0xFF
+#define STAT_CECNT_MASK 0xFF00
+#define STAT_CECNT_SHIFT 8
/* ECC scrub register definitions */
-#define SCRUB_MODE_MASK 0x7
-#define SCRUB_MODE_SECDED 0x4
+#define SCRUB_MODE_MASK 0x7
+#define SCRUB_MODE_SECDED 0x4
+
+/* DDR ECC Quirks */
+#define DDR_ECC_INTR_SUPPORT BIT(0)
+#define DDR_ECC_DATA_POISON_SUPPORT BIT(1)
+
+/* ZynqMP Enhanced DDR memory controller registers that are relevant to ECC */
+/* ECC Configuration Registers */
+#define ECC_CFG0_OFST 0x70
+#define ECC_CFG1_OFST 0x74
+
+/* ECC Status Register */
+#define ECC_STAT_OFST 0x78
+
+/* ECC Clear Register */
+#define ECC_CLR_OFST 0x7C
+
+/* ECC Error count Register */
+#define ECC_ERRCNT_OFST 0x80
+
+/* ECC Corrected Error Address Register */
+#define ECC_CEADDR0_OFST 0x84
+#define ECC_CEADDR1_OFST 0x88
+
+/* ECC Syndrome Registers */
+#define ECC_CSYND0_OFST 0x8C
+#define ECC_CSYND1_OFST 0x90
+#define ECC_CSYND2_OFST 0x94
+
+/* ECC Bit Mask0 Address Register */
+#define ECC_BITMASK0_OFST 0x98
+#define ECC_BITMASK1_OFST 0x9C
+#define ECC_BITMASK2_OFST 0xA0
+
+/* ECC UnCorrected Error Address Register */
+#define ECC_UEADDR0_OFST 0xA4
+#define ECC_UEADDR1_OFST 0xA8
+
+/* ECC Syndrome Registers */
+#define ECC_UESYND0_OFST 0xAC
+#define ECC_UESYND1_OFST 0xB0
+#define ECC_UESYND2_OFST 0xB4
+
+/* ECC Poison Address Reg */
+#define ECC_POISON0_OFST 0xB8
+#define ECC_POISON1_OFST 0xBC
+
+#define ECC_ADDRMAP0_OFFSET 0x200
+
+/* Control register bitfield definitions */
+#define ECC_CTRL_BUSWIDTH_MASK 0x3000
+#define ECC_CTRL_BUSWIDTH_SHIFT 12
+#define ECC_CTRL_CLR_CE_ERRCNT BIT(2)
+#define ECC_CTRL_CLR_UE_ERRCNT BIT(3)
+
+/* DDR Control Register width definitions */
+#define DDRCTL_EWDTH_16 2
+#define DDRCTL_EWDTH_32 1
+#define DDRCTL_EWDTH_64 0
+
+/* ECC status register definitions */
+#define ECC_STAT_UECNT_MASK 0xF0000
+#define ECC_STAT_UECNT_SHIFT 16
+#define ECC_STAT_CECNT_MASK 0xF00
+#define ECC_STAT_CECNT_SHIFT 8
+#define ECC_STAT_BITNUM_MASK 0x7F
+
+/* DDR QOS Interrupt register definitions */
+#define DDR_QOS_IRQ_STAT_OFST 0x20200
+#define DDR_QOSUE_MASK 0x4
+#define DDR_QOSCE_MASK 0x2
+#define ECC_CE_UE_INTR_MASK 0x6
+#define DDR_QOS_IRQ_EN_OFST 0x20208
+#define DDR_QOS_IRQ_DB_OFST 0x2020C
+
+/* ECC Corrected Error Register Mask and Shifts*/
+#define ECC_CEADDR0_RW_MASK 0x3FFFF
+#define ECC_CEADDR0_RNK_MASK BIT(24)
+#define ECC_CEADDR1_BNKGRP_MASK 0x3000000
+#define ECC_CEADDR1_BNKNR_MASK 0x70000
+#define ECC_CEADDR1_BLKNR_MASK 0xFFF
+#define ECC_CEADDR1_BNKGRP_SHIFT 24
+#define ECC_CEADDR1_BNKNR_SHIFT 16
+
+/* ECC Poison register shifts */
+#define ECC_POISON0_RANK_SHIFT 24
+#define ECC_POISON0_RANK_MASK BIT(24)
+#define ECC_POISON0_COLUMN_SHIFT 0
+#define ECC_POISON0_COLUMN_MASK 0xFFF
+#define ECC_POISON1_BG_SHIFT 28
+#define ECC_POISON1_BG_MASK 0x30000000
+#define ECC_POISON1_BANKNR_SHIFT 24
+#define ECC_POISON1_BANKNR_MASK 0x7000000
+#define ECC_POISON1_ROW_SHIFT 0
+#define ECC_POISON1_ROW_MASK 0x3FFFF
+
+/* DDR Memory type defines */
+#define MEM_TYPE_DDR3 0x1
+#define MEM_TYPE_LPDDR3 0x8
+#define MEM_TYPE_DDR2 0x4
+#define MEM_TYPE_DDR4 0x10
+#define MEM_TYPE_LPDDR4 0x20
+
+/* DDRC Software control register */
+#define DDRC_SWCTL 0x320
+
+/* DDRC ECC CE & UE poison mask */
+#define ECC_CEPOISON_MASK 0x3
+#define ECC_UEPOISON_MASK 0x1
+
+/* DDRC Device config masks */
+#define DDRC_MSTR_CFG_MASK 0xC0000000
+#define DDRC_MSTR_CFG_SHIFT 30
+#define DDRC_MSTR_CFG_X4_MASK 0x0
+#define DDRC_MSTR_CFG_X8_MASK 0x1
+#define DDRC_MSTR_CFG_X16_MASK 0x2
+#define DDRC_MSTR_CFG_X32_MASK 0x3
+
+#define DDR_MAX_ROW_SHIFT 18
+#define DDR_MAX_COL_SHIFT 14
+#define DDR_MAX_BANK_SHIFT 3
+#define DDR_MAX_BANKGRP_SHIFT 2
+
+#define ROW_MAX_VAL_MASK 0xF
+#define COL_MAX_VAL_MASK 0xF
+#define BANK_MAX_VAL_MASK 0x1F
+#define BANKGRP_MAX_VAL_MASK 0x1F
+#define RANK_MAX_VAL_MASK 0x1F
+
+#define ROW_B0_BASE 6
+#define ROW_B1_BASE 7
+#define ROW_B2_BASE 8
+#define ROW_B3_BASE 9
+#define ROW_B4_BASE 10
+#define ROW_B5_BASE 11
+#define ROW_B6_BASE 12
+#define ROW_B7_BASE 13
+#define ROW_B8_BASE 14
+#define ROW_B9_BASE 15
+#define ROW_B10_BASE 16
+#define ROW_B11_BASE 17
+#define ROW_B12_BASE 18
+#define ROW_B13_BASE 19
+#define ROW_B14_BASE 20
+#define ROW_B15_BASE 21
+#define ROW_B16_BASE 22
+#define ROW_B17_BASE 23
+
+#define COL_B2_BASE 2
+#define COL_B3_BASE 3
+#define COL_B4_BASE 4
+#define COL_B5_BASE 5
+#define COL_B6_BASE 6
+#define COL_B7_BASE 7
+#define COL_B8_BASE 8
+#define COL_B9_BASE 9
+#define COL_B10_BASE 10
+#define COL_B11_BASE 11
+#define COL_B12_BASE 12
+#define COL_B13_BASE 13
+
+#define BANK_B0_BASE 2
+#define BANK_B1_BASE 3
+#define BANK_B2_BASE 4
+
+#define BANKGRP_B0_BASE 2
+#define BANKGRP_B1_BASE 3
+
+#define RANK_B0_BASE 6
/**
- * struct ecc_error_info - ECC error log information
- * @row: Row number
- * @col: Column number
- * @bank: Bank number
- * @bitpos: Bit position
- * @data: Data causing the error
+ * struct ecc_error_info - ECC error log information.
+ * @row: Row number.
+ * @col: Column number.
+ * @bank: Bank number.
+ * @bitpos: Bit position.
+ * @data: Data causing the error.
+ * @bankgrpnr: Bank group number.
+ * @blknr: Block number.
*/
struct ecc_error_info {
u32 row;
@@ -109,14 +282,16 @@ struct ecc_error_info {
u32 bank;
u32 bitpos;
u32 data;
+ u32 bankgrpnr;
+ u32 blknr;
};
/**
- * struct synps_ecc_status - ECC status information to report
- * @ce_cnt: Correctable error count
- * @ue_cnt: Uncorrectable error count
- * @ceinfo: Correctable error log information
- * @ueinfo: Uncorrectable error log information
+ * struct synps_ecc_status - ECC status information to report.
+ * @ce_cnt: Correctable error count.
+ * @ue_cnt: Uncorrectable error count.
+ * @ceinfo: Correctable error log information.
+ * @ueinfo: Uncorrectable error log information.
*/
struct synps_ecc_status {
u32 ce_cnt;
@@ -126,34 +301,67 @@ struct synps_ecc_status {
};
/**
- * struct synps_edac_priv - DDR memory controller private instance data
- * @baseaddr: Base address of the DDR controller
- * @message: Buffer for framing the event specific info
- * @stat: ECC status information
- * @ce_cnt: Correctable Error count
- * @ue_cnt: Uncorrectable Error count
+ * struct synps_edac_priv - DDR memory controller private instance data.
+ * @baseaddr: Base address of the DDR controller.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @p_data: Platform data.
+ * @ce_cnt: Correctable Error count.
+ * @ue_cnt: Uncorrectable Error count.
+ * @poison_addr: Data poison address.
+ * @row_shift: Bit shifts for row bit.
+ * @col_shift: Bit shifts for column bit.
+ * @bank_shift: Bit shifts for bank bit.
+ * @bankgrp_shift: Bit shifts for bank group bit.
+ * @rank_shift: Bit shifts for rank bit.
*/
struct synps_edac_priv {
void __iomem *baseaddr;
char message[SYNPS_EDAC_MSG_SIZE];
struct synps_ecc_status stat;
+ const struct synps_platform_data *p_data;
u32 ce_cnt;
u32 ue_cnt;
+#ifdef CONFIG_EDAC_DEBUG
+ ulong poison_addr;
+ u32 row_shift[18];
+ u32 col_shift[14];
+ u32 bank_shift[3];
+ u32 bankgrp_shift[2];
+ u32 rank_shift[1];
+#endif
};
/**
- * synps_edac_geterror_info - Get the current ecc error info
- * @base: Pointer to the base address of the ddr memory controller
- * @p: Pointer to the synopsys ecc status structure
- *
- * Determines there is any ecc error or not
+ * struct synps_platform_data - synps platform data structure.
+ * @get_error_info: Get EDAC error info.
+ * @get_mtype: Get mtype.
+ * @get_dtype: Get dtype.
+ * @get_ecc_state: Get ECC state.
+ * @quirks: To differentiate IPs.
+ */
+struct synps_platform_data {
+ int (*get_error_info)(struct synps_edac_priv *priv);
+ enum mem_type (*get_mtype)(const void __iomem *base);
+ enum dev_type (*get_dtype)(const void __iomem *base);
+ bool (*get_ecc_state)(void __iomem *base);
+ int quirks;
+};
+
+/**
+ * zynq_get_error_info - Get the current ECC error info.
+ * @priv: DDR memory controller private instance data.
*
- * Return: one if there is no error otherwise returns zero
+ * Return: one if there is no error, otherwise zero.
*/
-static int synps_edac_geterror_info(void __iomem *base,
- struct synps_ecc_status *p)
+static int zynq_get_error_info(struct synps_edac_priv *priv)
{
+ struct synps_ecc_status *p;
u32 regval, clearval = 0;
+ void __iomem *base;
+
+ base = priv->baseaddr;
+ p = &priv->stat;
regval = readl(base + STAT_OFST);
if (!regval)
@@ -172,7 +380,7 @@ static int synps_edac_geterror_info(void __iomem *base,
p->ceinfo.col = regval & ADDR_COL_MASK;
p->ceinfo.bank = (regval & ADDR_BANK_MASK) >> ADDR_BANK_SHIFT;
p->ceinfo.data = readl(base + CE_DATA_31_0_OFST);
- edac_dbg(3, "ce bit position: %d data: %d\n", p->ceinfo.bitpos,
+ edac_dbg(3, "CE bit position: %d data: %d\n", p->ceinfo.bitpos,
p->ceinfo.data);
clearval = ECC_CTRL_CLR_CE_ERR;
@@ -196,23 +404,98 @@ out:
}
/**
- * synps_edac_handle_error - Handle controller error types CE and UE
- * @mci: Pointer to the edac memory controller instance
- * @p: Pointer to the synopsys ecc status structure
+ * zynqmp_get_error_info - Get the current ECC error info.
+ * @priv: DDR memory controller private instance data.
*
- * Handles the controller ECC correctable and un correctable error.
+ * Return: one if there is no error otherwise returns zero.
*/
-static void synps_edac_handle_error(struct mem_ctl_info *mci,
- struct synps_ecc_status *p)
+static int zynqmp_get_error_info(struct synps_edac_priv *priv)
+{
+ struct synps_ecc_status *p;
+ u32 regval, clearval = 0;
+ void __iomem *base;
+
+ base = priv->baseaddr;
+ p = &priv->stat;
+
+ regval = readl(base + ECC_STAT_OFST);
+ if (!regval)
+ return 1;
+
+ p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
+ p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
+ if (!p->ce_cnt)
+ goto ue_err;
+
+ p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
+
+ regval = readl(base + ECC_CEADDR0_OFST);
+ p->ceinfo.row = (regval & ECC_CEADDR0_RW_MASK);
+ regval = readl(base + ECC_CEADDR1_OFST);
+ p->ceinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
+ ECC_CEADDR1_BNKNR_SHIFT;
+ p->ceinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
+ ECC_CEADDR1_BNKGRP_SHIFT;
+ p->ceinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
+ p->ceinfo.data = readl(base + ECC_CSYND0_OFST);
+ edac_dbg(2, "ECCCSYN0: 0x%08X ECCCSYN1: 0x%08X ECCCSYN2: 0x%08X\n",
+ readl(base + ECC_CSYND0_OFST), readl(base + ECC_CSYND1_OFST),
+ readl(base + ECC_CSYND2_OFST));
+ue_err:
+ if (!p->ue_cnt)
+ goto out;
+
+ regval = readl(base + ECC_UEADDR0_OFST);
+ p->ueinfo.row = (regval & ECC_CEADDR0_RW_MASK);
+ regval = readl(base + ECC_UEADDR1_OFST);
+ p->ueinfo.bankgrpnr = (regval & ECC_CEADDR1_BNKGRP_MASK) >>
+ ECC_CEADDR1_BNKGRP_SHIFT;
+ p->ueinfo.bank = (regval & ECC_CEADDR1_BNKNR_MASK) >>
+ ECC_CEADDR1_BNKNR_SHIFT;
+ p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
+ p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
+out:
+ clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
+ clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ writel(clearval, base + ECC_CLR_OFST);
+ writel(0x0, base + ECC_CLR_OFST);
+
+ return 0;
+}
+
+/**
+ * handle_error - Handle Correctable and Uncorrectable errors.
+ * @mci: EDAC memory controller instance.
+ * @p: Synopsys ECC status structure.
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
{
struct synps_edac_priv *priv = mci->pvt_info;
struct ecc_error_info *pinf;
if (p->ce_cnt) {
pinf = &p->ceinfo;
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
+ if (!priv->p_data->quirks) {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "DDR ECC error type:%s Row %d Bank %d Col %d ",
+ "CE", pinf->row, pinf->bank, pinf->col);
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "Bit Position: %d Data: 0x%08x\n",
+ pinf->bitpos, pinf->data);
+ } else {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "DDR ECC error type:%s Row %d Bank %d Col %d ",
+ "CE", pinf->row, pinf->bank, pinf->col);
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "BankGroup Number %d Block Number %d ",
+ pinf->bankgrpnr, pinf->blknr);
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "Bit Position: %d Data: 0x%08x\n",
+ pinf->bitpos, pinf->data);
+ }
+
edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
p->ce_cnt, 0, 0, 0, 0, 0, -1,
priv->message, "");
@@ -220,9 +503,19 @@ static void synps_edac_handle_error(struct mem_ctl_info *mci,
if (p->ue_cnt) {
pinf = &p->ueinfo;
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "UE", pinf->row, pinf->bank, pinf->col);
+ if (!priv->p_data->quirks) {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "DDR ECC error type :%s Row %d Bank %d Col %d ",
+ "UE", pinf->row, pinf->bank, pinf->col);
+ } else {
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "DDR ECC error type :%s Row %d Bank %d Col %d ",
+ "UE", pinf->row, pinf->bank, pinf->col);
+ snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
+ "BankGroup Number %d Block Number %d",
+ pinf->bankgrpnr, pinf->blknr);
+ }
+
edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
p->ue_cnt, 0, 0, 0, 0, 0, -1,
priv->message, "");
@@ -232,38 +525,78 @@ static void synps_edac_handle_error(struct mem_ctl_info *mci,
}
/**
- * synps_edac_check - Check controller for ECC errors
- * @mci: Pointer to the edac memory controller instance
+ * intr_handler - Interrupt Handler for ECC interrupts.
+ * @irq: IRQ number.
+ * @dev_id: Device ID.
*
- * Used to check and post ECC errors. Called by the polling thread
+ * Return: IRQ_NONE, if interrupt not set or IRQ_HANDLED otherwise.
*/
-static void synps_edac_check(struct mem_ctl_info *mci)
+static irqreturn_t intr_handler(int irq, void *dev_id)
{
- struct synps_edac_priv *priv = mci->pvt_info;
+ const struct synps_platform_data *p_data;
+ struct mem_ctl_info *mci = dev_id;
+ struct synps_edac_priv *priv;
+ int status, regval;
+
+ priv = mci->pvt_info;
+ p_data = priv->p_data;
+
+ regval = readl(priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+ regval &= (DDR_QOSCE_MASK | DDR_QOSUE_MASK);
+ if (!(regval & ECC_CE_UE_INTR_MASK))
+ return IRQ_NONE;
+
+ status = p_data->get_error_info(priv);
+ if (status)
+ return IRQ_NONE;
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ handle_error(mci, &priv->stat);
+
+ edac_dbg(3, "Total error count CE %d UE %d\n",
+ priv->ce_cnt, priv->ue_cnt);
+ writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
+ return IRQ_HANDLED;
+}
+
+/**
+ * check_errors - Check controller for ECC errors.
+ * @mci: EDAC memory controller instance.
+ *
+ * Check and post ECC errors. Called by the polling thread.
+ */
+static void check_errors(struct mem_ctl_info *mci)
+{
+ const struct synps_platform_data *p_data;
+ struct synps_edac_priv *priv;
int status;
- status = synps_edac_geterror_info(priv->baseaddr, &priv->stat);
+ priv = mci->pvt_info;
+ p_data = priv->p_data;
+
+ status = p_data->get_error_info(priv);
if (status)
return;
priv->ce_cnt += priv->stat.ce_cnt;
priv->ue_cnt += priv->stat.ue_cnt;
- synps_edac_handle_error(mci, &priv->stat);
+ handle_error(mci, &priv->stat);
- edac_dbg(3, "Total error count ce %d ue %d\n",
+ edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
}
/**
- * synps_edac_get_dtype - Return the controller memory width
- * @base: Pointer to the ddr memory controller base address
+ * zynq_get_dtype - Return the controller memory width.
+ * @base: DDR memory controller base address.
*
* Get the EDAC device type width appropriate for the current controller
* configuration.
*
* Return: a device type width enumeration.
*/
-static enum dev_type synps_edac_get_dtype(const void __iomem *base)
+static enum dev_type zynq_get_dtype(const void __iomem *base)
{
enum dev_type dt;
u32 width;
@@ -286,36 +619,93 @@ static enum dev_type synps_edac_get_dtype(const void __iomem *base)
}
/**
- * synps_edac_get_eccstate - Return the controller ecc enable/disable status
- * @base: Pointer to the ddr memory controller base address
+ * zynqmp_get_dtype - Return the controller memory width.
+ * @base: DDR memory controller base address.
+ *
+ * Get the EDAC device type width appropriate for the current controller
+ * configuration.
+ *
+ * Return: a device type width enumeration.
+ */
+static enum dev_type zynqmp_get_dtype(const void __iomem *base)
+{
+ enum dev_type dt;
+ u32 width;
+
+ width = readl(base + CTRL_OFST);
+ width = (width & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
+ switch (width) {
+ case DDRCTL_EWDTH_16:
+ dt = DEV_X2;
+ break;
+ case DDRCTL_EWDTH_32:
+ dt = DEV_X4;
+ break;
+ case DDRCTL_EWDTH_64:
+ dt = DEV_X8;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ return dt;
+}
+
+/**
+ * zynq_get_ecc_state - Return the controller ECC enable/disable status.
+ * @base: DDR memory controller base address.
*
- * Get the ECC enable/disable status for the controller
+ * Get the ECC enable/disable status of the controller.
*
- * Return: a ecc status boolean i.e true/false - enabled/disabled.
+ * Return: true if enabled, otherwise false.
*/
-static bool synps_edac_get_eccstate(void __iomem *base)
+static bool zynq_get_ecc_state(void __iomem *base)
{
enum dev_type dt;
u32 ecctype;
- bool state = false;
- dt = synps_edac_get_dtype(base);
+ dt = zynq_get_dtype(base);
if (dt == DEV_UNKNOWN)
- return state;
+ return false;
ecctype = readl(base + SCRUB_OFST) & SCRUB_MODE_MASK;
if ((ecctype == SCRUB_MODE_SECDED) && (dt == DEV_X2))
- state = true;
+ return true;
- return state;
+ return false;
}
/**
- * synps_edac_get_memsize - reads the size of the attached memory device
+ * zynqmp_get_ecc_state - Return the controller ECC enable/disable status.
+ * @base: DDR memory controller base address.
*
- * Return: the memory size in bytes
+ * Get the ECC enable/disable status for the controller.
+ *
+ * Return: a ECC status boolean i.e true/false - enabled/disabled.
*/
-static u32 synps_edac_get_memsize(void)
+static bool zynqmp_get_ecc_state(void __iomem *base)
+{
+ enum dev_type dt;
+ u32 ecctype;
+
+ dt = zynqmp_get_dtype(base);
+ if (dt == DEV_UNKNOWN)
+ return false;
+
+ ecctype = readl(base + ECC_CFG0_OFST) & SCRUB_MODE_MASK;
+ if ((ecctype == SCRUB_MODE_SECDED) &&
+ ((dt == DEV_X2) || (dt == DEV_X4) || (dt == DEV_X8)))
+ return true;
+
+ return false;
+}
+
+/**
+ * get_memsize - Read the size of the attached memory device.
+ *
+ * Return: the memory size in bytes.
+ */
+static u32 get_memsize(void)
{
struct sysinfo inf;
@@ -325,15 +715,15 @@ static u32 synps_edac_get_memsize(void)
}
/**
- * synps_edac_get_mtype - Returns controller memory type
- * @base: pointer to the synopsys ecc status structure
+ * zynq_get_mtype - Return the controller memory type.
+ * @base: Synopsys ECC status structure.
*
* Get the EDAC memory type appropriate for the current controller
* configuration.
*
* Return: a memory type enumeration.
*/
-static enum mem_type synps_edac_get_mtype(const void __iomem *base)
+static enum mem_type zynq_get_mtype(const void __iomem *base)
{
enum mem_type mt;
u32 memtype;
@@ -349,54 +739,77 @@ static enum mem_type synps_edac_get_mtype(const void __iomem *base)
}
/**
- * synps_edac_init_csrows - Initialize the cs row data
- * @mci: Pointer to the edac memory controller instance
+ * zynqmp_get_mtype - Returns controller memory type.
+ * @base: Synopsys ECC status structure.
*
- * Initializes the chip select rows associated with the EDAC memory
- * controller instance
+ * Get the EDAC memory type appropriate for the current controller
+ * configuration.
*
- * Return: Unconditionally 0.
+ * Return: a memory type enumeration.
*/
-static int synps_edac_init_csrows(struct mem_ctl_info *mci)
+static enum mem_type zynqmp_get_mtype(const void __iomem *base)
{
+ enum mem_type mt;
+ u32 memtype;
+
+ memtype = readl(base + CTRL_OFST);
+
+ if ((memtype & MEM_TYPE_DDR3) || (memtype & MEM_TYPE_LPDDR3))
+ mt = MEM_DDR3;
+ else if (memtype & MEM_TYPE_DDR2)
+ mt = MEM_RDDR2;
+ else if ((memtype & MEM_TYPE_LPDDR4) || (memtype & MEM_TYPE_DDR4))
+ mt = MEM_DDR4;
+ else
+ mt = MEM_EMPTY;
+
+ return mt;
+}
+
+/**
+ * init_csrows - Initialize the csrow data.
+ * @mci: EDAC memory controller instance.
+ *
+ * Initialize the chip select rows associated with the EDAC memory
+ * controller instance.
+ */
+static void init_csrows(struct mem_ctl_info *mci)
+{
+ struct synps_edac_priv *priv = mci->pvt_info;
+ const struct synps_platform_data *p_data;
struct csrow_info *csi;
struct dimm_info *dimm;
- struct synps_edac_priv *priv = mci->pvt_info;
- u32 size;
- int row, j;
+ u32 size, row;
+ int j;
+
+ p_data = priv->p_data;
for (row = 0; row < mci->nr_csrows; row++) {
csi = mci->csrows[row];
- size = synps_edac_get_memsize();
+ size = get_memsize();
for (j = 0; j < csi->nr_channels; j++) {
- dimm = csi->channels[j]->dimm;
- dimm->edac_mode = EDAC_FLAG_SECDED;
- dimm->mtype = synps_edac_get_mtype(priv->baseaddr);
- dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
- dimm->grain = SYNPS_EDAC_ERR_GRAIN;
- dimm->dtype = synps_edac_get_dtype(priv->baseaddr);
+ dimm = csi->channels[j]->dimm;
+ dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->mtype = p_data->get_mtype(priv->baseaddr);
+ dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
+ dimm->grain = SYNPS_EDAC_ERR_GRAIN;
+ dimm->dtype = p_data->get_dtype(priv->baseaddr);
}
}
-
- return 0;
}
/**
- * synps_edac_mc_init - Initialize driver instance
- * @mci: Pointer to the edac memory controller instance
- * @pdev: Pointer to the platform_device struct
+ * mc_init - Initialize one driver instance.
+ * @mci: EDAC memory controller instance.
+ * @pdev: platform device.
*
- * Performs initialization of the EDAC memory controller instance and
+ * Perform initialization of the EDAC memory controller instance and
* related driver-private data associated with the memory controller the
* instance is bound to.
- *
- * Return: Always zero.
*/
-static int synps_edac_mc_init(struct mem_ctl_info *mci,
- struct platform_device *pdev)
+static void mc_init(struct mem_ctl_info *mci, struct platform_device *pdev)
{
- int status;
struct synps_edac_priv *priv;
mci->pdev = &pdev->dev;
@@ -414,39 +827,491 @@ static int synps_edac_mc_init(struct mem_ctl_info *mci,
mci->dev_name = SYNPS_EDAC_MOD_STRING;
mci->mod_name = SYNPS_EDAC_MOD_VER;
- edac_op_state = EDAC_OPSTATE_POLL;
- mci->edac_check = synps_edac_check;
+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
+ edac_op_state = EDAC_OPSTATE_INT;
+ } else {
+ edac_op_state = EDAC_OPSTATE_POLL;
+ mci->edac_check = check_errors;
+ }
+
mci->ctl_page_to_phys = NULL;
- status = synps_edac_init_csrows(mci);
+ init_csrows(mci);
+}
+
+static void enable_intr(struct synps_edac_priv *priv)
+{
+ /* Enable UE/CE Interrupts */
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+}
+
+static void disable_intr(struct synps_edac_priv *priv)
+{
+ /* Disable UE/CE Interrupts */
+ writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
+ priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+}
+
+static int setup_irq(struct mem_ctl_info *mci,
+ struct platform_device *pdev)
+{
+ struct synps_edac_priv *priv = mci->pvt_info;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "No IRQ %d in DT\n", irq);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, intr_handler,
+ 0, dev_name(&pdev->dev), mci);
+ if (ret < 0) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ enable_intr(priv);
+
+ return 0;
+}
+
+static const struct synps_platform_data zynq_edac_def = {
+ .get_error_info = zynq_get_error_info,
+ .get_mtype = zynq_get_mtype,
+ .get_dtype = zynq_get_dtype,
+ .get_ecc_state = zynq_get_ecc_state,
+ .quirks = 0,
+};
+
+static const struct synps_platform_data zynqmp_edac_def = {
+ .get_error_info = zynqmp_get_error_info,
+ .get_mtype = zynqmp_get_mtype,
+ .get_dtype = zynqmp_get_dtype,
+ .get_ecc_state = zynqmp_get_ecc_state,
+ .quirks = (DDR_ECC_INTR_SUPPORT
+#ifdef CONFIG_EDAC_DEBUG
+ | DDR_ECC_DATA_POISON_SUPPORT
+#endif
+ ),
+};
+
+static const struct of_device_id synps_edac_match[] = {
+ {
+ .compatible = "xlnx,zynq-ddrc-a05",
+ .data = (void *)&zynq_edac_def
+ },
+ {
+ .compatible = "xlnx,zynqmp-ddrc-2.40a",
+ .data = (void *)&zynqmp_edac_def
+ },
+ {
+ /* end of table */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, synps_edac_match);
+
+#ifdef CONFIG_EDAC_DEBUG
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+/**
+ * ddr_poison_setup - Update poison registers.
+ * @priv: DDR memory controller private instance data.
+ *
+ * Update poison registers as per DDR mapping.
+ * Return: none.
+ */
+static void ddr_poison_setup(struct synps_edac_priv *priv)
+{
+ int col = 0, row = 0, bank = 0, bankgrp = 0, rank = 0, regval;
+ int index;
+ ulong hif_addr = 0;
+
+ hif_addr = priv->poison_addr >> 3;
+
+ for (index = 0; index < DDR_MAX_ROW_SHIFT; index++) {
+ if (priv->row_shift[index])
+ row |= (((hif_addr >> priv->row_shift[index]) &
+ BIT(0)) << index);
+ else
+ break;
+ }
+
+ for (index = 0; index < DDR_MAX_COL_SHIFT; index++) {
+ if (priv->col_shift[index] || index < 3)
+ col |= (((hif_addr >> priv->col_shift[index]) &
+ BIT(0)) << index);
+ else
+ break;
+ }
+
+ for (index = 0; index < DDR_MAX_BANK_SHIFT; index++) {
+ if (priv->bank_shift[index])
+ bank |= (((hif_addr >> priv->bank_shift[index]) &
+ BIT(0)) << index);
+ else
+ break;
+ }
+
+ for (index = 0; index < DDR_MAX_BANKGRP_SHIFT; index++) {
+ if (priv->bankgrp_shift[index])
+ bankgrp |= (((hif_addr >> priv->bankgrp_shift[index])
+ & BIT(0)) << index);
+ else
+ break;
+ }
+
+ if (priv->rank_shift[0])
+ rank = (hif_addr >> priv->rank_shift[0]) & BIT(0);
+
+ regval = (rank << ECC_POISON0_RANK_SHIFT) & ECC_POISON0_RANK_MASK;
+ regval |= (col << ECC_POISON0_COLUMN_SHIFT) & ECC_POISON0_COLUMN_MASK;
+ writel(regval, priv->baseaddr + ECC_POISON0_OFST);
+
+ regval = (bankgrp << ECC_POISON1_BG_SHIFT) & ECC_POISON1_BG_MASK;
+ regval |= (bank << ECC_POISON1_BANKNR_SHIFT) & ECC_POISON1_BANKNR_MASK;
+ regval |= (row << ECC_POISON1_ROW_SHIFT) & ECC_POISON1_ROW_MASK;
+ writel(regval, priv->baseaddr + ECC_POISON1_OFST);
+}
+
+static ssize_t inject_data_error_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct synps_edac_priv *priv = mci->pvt_info;
+
+ return sprintf(data, "Poison0 Addr: 0x%08x\n\rPoison1 Addr: 0x%08x\n\r"
+ "Error injection Address: 0x%lx\n\r",
+ readl(priv->baseaddr + ECC_POISON0_OFST),
+ readl(priv->baseaddr + ECC_POISON1_OFST),
+ priv->poison_addr);
+}
+
+static ssize_t inject_data_error_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct synps_edac_priv *priv = mci->pvt_info;
+
+ if (kstrtoul(data, 0, &priv->poison_addr))
+ return -EINVAL;
+
+ ddr_poison_setup(priv);
+
+ return count;
+}
+
+static ssize_t inject_data_poison_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct synps_edac_priv *priv = mci->pvt_info;
+
+ return sprintf(data, "Data Poisoning: %s\n\r",
+ (((readl(priv->baseaddr + ECC_CFG1_OFST)) & 0x3) == 0x3)
+ ? ("Correctable Error") : ("UnCorrectable Error"));
+}
+
+static ssize_t inject_data_poison_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
+{
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct synps_edac_priv *priv = mci->pvt_info;
+
+ writel(0, priv->baseaddr + DDRC_SWCTL);
+ if (strncmp(data, "CE", 2) == 0)
+ writel(ECC_CEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
+ else
+ writel(ECC_UEPOISON_MASK, priv->baseaddr + ECC_CFG1_OFST);
+ writel(1, priv->baseaddr + DDRC_SWCTL);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(inject_data_error);
+static DEVICE_ATTR_RW(inject_data_poison);
+
+static int edac_create_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ int rc;
+
+ rc = device_create_file(&mci->dev, &dev_attr_inject_data_error);
+ if (rc < 0)
+ return rc;
+ rc = device_create_file(&mci->dev, &dev_attr_inject_data_poison);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+static void edac_remove_sysfs_attributes(struct mem_ctl_info *mci)
+{
+ device_remove_file(&mci->dev, &dev_attr_inject_data_error);
+ device_remove_file(&mci->dev, &dev_attr_inject_data_poison);
+}
+
+static void setup_row_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+{
+ u32 addrmap_row_b2_10;
+ int index;
+
+ priv->row_shift[0] = (addrmap[5] & ROW_MAX_VAL_MASK) + ROW_B0_BASE;
+ priv->row_shift[1] = ((addrmap[5] >> 8) &
+ ROW_MAX_VAL_MASK) + ROW_B1_BASE;
+
+ addrmap_row_b2_10 = (addrmap[5] >> 16) & ROW_MAX_VAL_MASK;
+ if (addrmap_row_b2_10 != ROW_MAX_VAL_MASK) {
+ for (index = 2; index < 11; index++)
+ priv->row_shift[index] = addrmap_row_b2_10 +
+ index + ROW_B0_BASE;
+
+ } else {
+ priv->row_shift[2] = (addrmap[9] &
+ ROW_MAX_VAL_MASK) + ROW_B2_BASE;
+ priv->row_shift[3] = ((addrmap[9] >> 8) &
+ ROW_MAX_VAL_MASK) + ROW_B3_BASE;
+ priv->row_shift[4] = ((addrmap[9] >> 16) &
+ ROW_MAX_VAL_MASK) + ROW_B4_BASE;
+ priv->row_shift[5] = ((addrmap[9] >> 24) &
+ ROW_MAX_VAL_MASK) + ROW_B5_BASE;
+ priv->row_shift[6] = (addrmap[10] &
+ ROW_MAX_VAL_MASK) + ROW_B6_BASE;
+ priv->row_shift[7] = ((addrmap[10] >> 8) &
+ ROW_MAX_VAL_MASK) + ROW_B7_BASE;
+ priv->row_shift[8] = ((addrmap[10] >> 16) &
+ ROW_MAX_VAL_MASK) + ROW_B8_BASE;
+ priv->row_shift[9] = ((addrmap[10] >> 24) &
+ ROW_MAX_VAL_MASK) + ROW_B9_BASE;
+ priv->row_shift[10] = (addrmap[11] &
+ ROW_MAX_VAL_MASK) + ROW_B10_BASE;
+ }
- return status;
+ priv->row_shift[11] = (((addrmap[5] >> 24) & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : (((addrmap[5] >> 24) &
+ ROW_MAX_VAL_MASK) + ROW_B11_BASE);
+ priv->row_shift[12] = ((addrmap[6] & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : ((addrmap[6] &
+ ROW_MAX_VAL_MASK) + ROW_B12_BASE);
+ priv->row_shift[13] = (((addrmap[6] >> 8) & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 8) &
+ ROW_MAX_VAL_MASK) + ROW_B13_BASE);
+ priv->row_shift[14] = (((addrmap[6] >> 16) & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 16) &
+ ROW_MAX_VAL_MASK) + ROW_B14_BASE);
+ priv->row_shift[15] = (((addrmap[6] >> 24) & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : (((addrmap[6] >> 24) &
+ ROW_MAX_VAL_MASK) + ROW_B15_BASE);
+ priv->row_shift[16] = ((addrmap[7] & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : ((addrmap[7] &
+ ROW_MAX_VAL_MASK) + ROW_B16_BASE);
+ priv->row_shift[17] = (((addrmap[7] >> 8) & ROW_MAX_VAL_MASK) ==
+ ROW_MAX_VAL_MASK) ? 0 : (((addrmap[7] >> 8) &
+ ROW_MAX_VAL_MASK) + ROW_B17_BASE);
+}
+
+static void setup_column_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+{
+ u32 width, memtype;
+ int index;
+
+ memtype = readl(priv->baseaddr + CTRL_OFST);
+ width = (memtype & ECC_CTRL_BUSWIDTH_MASK) >> ECC_CTRL_BUSWIDTH_SHIFT;
+
+ priv->col_shift[0] = 0;
+ priv->col_shift[1] = 1;
+ priv->col_shift[2] = (addrmap[2] & COL_MAX_VAL_MASK) + COL_B2_BASE;
+ priv->col_shift[3] = ((addrmap[2] >> 8) &
+ COL_MAX_VAL_MASK) + COL_B3_BASE;
+ priv->col_shift[4] = (((addrmap[2] >> 16) & COL_MAX_VAL_MASK) ==
+ COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 16) &
+ COL_MAX_VAL_MASK) + COL_B4_BASE);
+ priv->col_shift[5] = (((addrmap[2] >> 24) & COL_MAX_VAL_MASK) ==
+ COL_MAX_VAL_MASK) ? 0 : (((addrmap[2] >> 24) &
+ COL_MAX_VAL_MASK) + COL_B5_BASE);
+ priv->col_shift[6] = ((addrmap[3] & COL_MAX_VAL_MASK) ==
+ COL_MAX_VAL_MASK) ? 0 : ((addrmap[3] &
+ COL_MAX_VAL_MASK) + COL_B6_BASE);
+ priv->col_shift[7] = (((addrmap[3] >> 8) & COL_MAX_VAL_MASK) ==
+ COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 8) &
+ COL_MAX_VAL_MASK) + COL_B7_BASE);
+ priv->col_shift[8] = (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) ==
+ COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 16) &
+ COL_MAX_VAL_MASK) + COL_B8_BASE);
+ priv->col_shift[9] = (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) ==
+ COL_MAX_VAL_MASK) ? 0 : (((addrmap[3] >> 24) &
+ COL_MAX_VAL_MASK) + COL_B9_BASE);
+ if (width == DDRCTL_EWDTH_64) {
+ if (memtype & MEM_TYPE_LPDDR3) {
+ priv->col_shift[10] = ((addrmap[4] &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ ((addrmap[4] & COL_MAX_VAL_MASK) +
+ COL_B10_BASE);
+ priv->col_shift[11] = (((addrmap[4] >> 8) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
+ COL_B11_BASE);
+ } else {
+ priv->col_shift[11] = ((addrmap[4] &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ ((addrmap[4] & COL_MAX_VAL_MASK) +
+ COL_B10_BASE);
+ priv->col_shift[13] = (((addrmap[4] >> 8) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[4] >> 8) & COL_MAX_VAL_MASK) +
+ COL_B11_BASE);
+ }
+ } else if (width == DDRCTL_EWDTH_32) {
+ if (memtype & MEM_TYPE_LPDDR3) {
+ priv->col_shift[10] = (((addrmap[3] >> 24) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
+ COL_B9_BASE);
+ priv->col_shift[11] = ((addrmap[4] &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ ((addrmap[4] & COL_MAX_VAL_MASK) +
+ COL_B10_BASE);
+ } else {
+ priv->col_shift[11] = (((addrmap[3] >> 24) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
+ COL_B9_BASE);
+ priv->col_shift[13] = ((addrmap[4] &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ ((addrmap[4] & COL_MAX_VAL_MASK) +
+ COL_B10_BASE);
+ }
+ } else {
+ if (memtype & MEM_TYPE_LPDDR3) {
+ priv->col_shift[10] = (((addrmap[3] >> 16) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
+ COL_B8_BASE);
+ priv->col_shift[11] = (((addrmap[3] >> 24) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
+ COL_B9_BASE);
+ priv->col_shift[13] = ((addrmap[4] &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ ((addrmap[4] & COL_MAX_VAL_MASK) +
+ COL_B10_BASE);
+ } else {
+ priv->col_shift[11] = (((addrmap[3] >> 16) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[3] >> 16) & COL_MAX_VAL_MASK) +
+ COL_B8_BASE);
+ priv->col_shift[13] = (((addrmap[3] >> 24) &
+ COL_MAX_VAL_MASK) == COL_MAX_VAL_MASK) ? 0 :
+ (((addrmap[3] >> 24) & COL_MAX_VAL_MASK) +
+ COL_B9_BASE);
+ }
+ }
+
+ if (width) {
+ for (index = 9; index > width; index--) {
+ priv->col_shift[index] = priv->col_shift[index - width];
+ priv->col_shift[index - width] = 0;
+ }
+ }
+
+}
+
+static void setup_bank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+{
+ priv->bank_shift[0] = (addrmap[1] & BANK_MAX_VAL_MASK) + BANK_B0_BASE;
+ priv->bank_shift[1] = ((addrmap[1] >> 8) &
+ BANK_MAX_VAL_MASK) + BANK_B1_BASE;
+ priv->bank_shift[2] = (((addrmap[1] >> 16) &
+ BANK_MAX_VAL_MASK) == BANK_MAX_VAL_MASK) ? 0 :
+ (((addrmap[1] >> 16) & BANK_MAX_VAL_MASK) +
+ BANK_B2_BASE);
+
+}
+
+static void setup_bg_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+{
+ priv->bankgrp_shift[0] = (addrmap[8] &
+ BANKGRP_MAX_VAL_MASK) + BANKGRP_B0_BASE;
+ priv->bankgrp_shift[1] = (((addrmap[8] >> 8) & BANKGRP_MAX_VAL_MASK) ==
+ BANKGRP_MAX_VAL_MASK) ? 0 : (((addrmap[8] >> 8)
+ & BANKGRP_MAX_VAL_MASK) + BANKGRP_B1_BASE);
+
+}
+
+static void setup_rank_address_map(struct synps_edac_priv *priv, u32 *addrmap)
+{
+ priv->rank_shift[0] = ((addrmap[0] & RANK_MAX_VAL_MASK) ==
+ RANK_MAX_VAL_MASK) ? 0 : ((addrmap[0] &
+ RANK_MAX_VAL_MASK) + RANK_B0_BASE);
}
/**
- * synps_edac_mc_probe - Check controller and bind driver
- * @pdev: Pointer to the platform_device struct
+ * setup_address_map - Set Address Map by querying ADDRMAP registers.
+ * @priv: DDR memory controller private instance data.
*
- * Probes a specific controller instance for binding with the driver.
+ * Set Address Map by querying ADDRMAP registers.
+ *
+ * Return: none.
+ */
+static void setup_address_map(struct synps_edac_priv *priv)
+{
+ u32 addrmap[12];
+ int index;
+
+ for (index = 0; index < 12; index++) {
+ u32 addrmap_offset;
+
+ addrmap_offset = ECC_ADDRMAP0_OFFSET + (index * 4);
+ addrmap[index] = readl(priv->baseaddr + addrmap_offset);
+ }
+
+ setup_row_address_map(priv, addrmap);
+
+ setup_column_address_map(priv, addrmap);
+
+ setup_bank_address_map(priv, addrmap);
+
+ setup_bg_address_map(priv, addrmap);
+
+ setup_rank_address_map(priv, addrmap);
+}
+#endif /* CONFIG_EDAC_DEBUG */
+
+/**
+ * mc_probe - Check controller and bind driver.
+ * @pdev: platform device.
+ *
+ * Probe a specific controller instance for binding with the driver.
*
* Return: 0 if the controller instance was successfully bound to the
* driver; otherwise, < 0 on error.
*/
-static int synps_edac_mc_probe(struct platform_device *pdev)
+static int mc_probe(struct platform_device *pdev)
{
- struct mem_ctl_info *mci;
+ const struct synps_platform_data *p_data;
struct edac_mc_layer layers[2];
struct synps_edac_priv *priv;
- int rc;
- struct resource *res;
+ struct mem_ctl_info *mci;
void __iomem *baseaddr;
+ struct resource *res;
+ int rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
baseaddr = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
- if (!synps_edac_get_eccstate(baseaddr)) {
+ p_data = of_device_get_match_data(&pdev->dev);
+ if (!p_data)
+ return -ENODEV;
+
+ if (!p_data->get_ecc_state(baseaddr)) {
edac_printk(KERN_INFO, EDAC_MC, "ECC not enabled\n");
return -ENXIO;
}
@@ -468,11 +1333,14 @@ static int synps_edac_mc_probe(struct platform_device *pdev)
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
- rc = synps_edac_mc_init(mci, pdev);
- if (rc) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Failed to initialize instance\n");
- goto free_edac_mc;
+ priv->p_data = p_data;
+
+ mc_init(mci, pdev);
+
+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT) {
+ rc = setup_irq(mci, pdev);
+ if (rc)
+ goto free_edac_mc;
}
rc = edac_mc_add_mc(mci);
@@ -482,11 +1350,27 @@ static int synps_edac_mc_probe(struct platform_device *pdev)
goto free_edac_mc;
}
+#ifdef CONFIG_EDAC_DEBUG
+ if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT) {
+ if (edac_create_sysfs_attributes(mci)) {
+ edac_printk(KERN_ERR, EDAC_MC,
+ "Failed to create sysfs entries\n");
+ goto free_edac_mc;
+ }
+ }
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,zynqmp-ddrc-2.40a"))
+ setup_address_map(priv);
+#endif
+
/*
* Start capturing the correctable and uncorrectable errors. A write of
* 0 starts the counters.
*/
- writel(0x0, baseaddr + ECC_CTRL_OFST);
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SUPPORT))
+ writel(0x0, baseaddr + ECC_CTRL_OFST);
+
return rc;
free_edac_mc:
@@ -496,14 +1380,23 @@ free_edac_mc:
}
/**
- * synps_edac_mc_remove - Unbind driver from controller
- * @pdev: Pointer to the platform_device struct
+ * mc_remove - Unbind driver from controller.
+ * @pdev: Platform device.
*
* Return: Unconditionally 0
*/
-static int synps_edac_mc_remove(struct platform_device *pdev)
+static int mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
+ struct synps_edac_priv *priv = mci->pvt_info;
+
+ if (priv->p_data->quirks & DDR_ECC_INTR_SUPPORT)
+ disable_intr(priv);
+
+#ifdef CONFIG_EDAC_DEBUG
+ if (priv->p_data->quirks & DDR_ECC_DATA_POISON_SUPPORT)
+ edac_remove_sysfs_attributes(mci);
+#endif
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
@@ -511,20 +1404,13 @@ static int synps_edac_mc_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id synps_edac_match[] = {
- { .compatible = "xlnx,zynq-ddrc-a05", },
- { /* end of table */ }
-};
-
-MODULE_DEVICE_TABLE(of, synps_edac_match);
-
static struct platform_driver synps_edac_mc_driver = {
.driver = {
.name = "synopsys-edac",
.of_match_table = synps_edac_match,
},
- .probe = synps_edac_mc_probe,
- .remove = synps_edac_mc_remove,
+ .probe = mc_probe,
+ .remove = mc_remove,
};
module_platform_driver(synps_edac_mc_driver);
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index a00934d263c5..23ea1ed409d1 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -38,7 +38,7 @@ static struct ptdump_info efi_ptdump_info = {
.mm = &efi_mm,
.markers = (struct addr_marker[]){
{ 0, "UEFI runtime start" },
- { TASK_SIZE_64, "UEFI runtime end" }
+ { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }
},
.base_addr = 0,
};
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index cfe87b465819..0f7d97917197 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -259,8 +259,7 @@ static int efi_pstore_write(struct pstore_record *record)
efi_name[i] = name[i];
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
- !pstore_cannot_block_path(record->reason),
- record->size, record->psi->buf);
+ preemptible(), record->size, record->psi->buf);
if (record->reason == KMSG_DUMP_OOPS)
efivar_run_worker();
@@ -369,7 +368,6 @@ static __init int efivars_pstore_init(void)
return -ENOMEM;
efi_pstore_info.bufsize = 1024;
- spin_lock_init(&efi_pstore_info.buf_lock);
if (pstore_register(&efi_pstore_info)) {
kfree(efi_pstore_info.buf);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index fad7c62cfc0e..4c46ff6f2242 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -602,21 +602,33 @@ int __init efi_apply_persistent_mem_reservations(void)
while (prsv) {
struct linux_efi_memreserve *rsv;
-
- /* reserve the entry itself */
- memblock_reserve(prsv, sizeof(*rsv));
-
- rsv = early_memremap(prsv, sizeof(*rsv));
- if (rsv == NULL) {
+ u8 *p;
+ int i;
+
+ /*
+ * Just map a full page: that is what we will get
+ * anyway, and it permits us to map the entire entry
+ * before knowing its size.
+ */
+ p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
+ PAGE_SIZE);
+ if (p == NULL) {
pr_err("Could not map UEFI memreserve entry!\n");
return -ENOMEM;
}
- if (rsv->size)
- memblock_reserve(rsv->base, rsv->size);
+ rsv = (void *)(p + prsv % PAGE_SIZE);
+
+ /* reserve the entry itself */
+ memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+
+ for (i = 0; i < atomic_read(&rsv->count); i++) {
+ memblock_reserve(rsv->entry[i].base,
+ rsv->entry[i].size);
+ }
prsv = rsv->next;
- early_memunmap(rsv, sizeof(*rsv));
+ early_memunmap(p, PAGE_SIZE);
}
}
@@ -969,19 +981,55 @@ bool efi_is_table_address(unsigned long phys_addr)
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
-int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+static int __init efi_memreserve_map_root(void)
+{
+ if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
+ return -ENODEV;
+
+ efi_memreserve_root = memremap(efi.mem_reserve,
+ sizeof(*efi_memreserve_root),
+ MEMREMAP_WB);
+ if (WARN_ON_ONCE(!efi_memreserve_root))
+ return -ENOMEM;
+ return 0;
+}
+
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
{
struct linux_efi_memreserve *rsv;
+ unsigned long prsv;
+ int rc, index;
- if (!efi_memreserve_root)
+ if (efi_memreserve_root == (void *)ULONG_MAX)
return -ENODEV;
- rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
+ if (!efi_memreserve_root) {
+ rc = efi_memreserve_map_root();
+ if (rc)
+ return rc;
+ }
+
+ /* first try to find a slot in an existing linked list entry */
+ for (prsv = efi_memreserve_root->next; prsv; prsv = rsv->next) {
+ rsv = __va(prsv);
+ index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
+ if (index < rsv->size) {
+ rsv->entry[index].base = addr;
+ rsv->entry[index].size = size;
+
+ return 0;
+ }
+ }
+
+ /* no slot found - allocate a new linked list entry */
+ rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
if (!rsv)
return -ENOMEM;
- rsv->base = addr;
- rsv->size = size;
+ rsv->size = EFI_MEMRESERVE_COUNT(PAGE_SIZE);
+ atomic_set(&rsv->count, 1);
+ rsv->entry[0].base = addr;
+ rsv->entry[0].size = size;
spin_lock(&efi_mem_reserve_persistent_lock);
rsv->next = efi_memreserve_root->next;
@@ -993,14 +1041,10 @@ int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
static int __init efi_memreserve_root_init(void)
{
- if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
- return -ENODEV;
-
- efi_memreserve_root = memremap(efi.mem_reserve,
- sizeof(*efi_memreserve_root),
- MEMREMAP_WB);
- if (!efi_memreserve_root)
- return -ENOMEM;
+ if (efi_memreserve_root)
+ return 0;
+ if (efi_memreserve_map_root())
+ efi_memreserve_root = (void *)ULONG_MAX;
return 0;
}
early_initcall(efi_memreserve_root_init);
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index c51627660dbb..d9845099635e 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -9,7 +9,10 @@ cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
- -mno-mmx -mno-sse -fshort-wchar
+ -mno-mmx -mno-sse -fshort-wchar \
+ -Wno-pointer-sign \
+ $(call cc-disable-warning, address-of-packed-member) \
+ $(call cc-disable-warning, gnu)
# arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
# disable the stackleak plugin
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 3d36142cf812..eee42d5e25ee 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -33,7 +33,7 @@
#define EFI_RT_VIRTUAL_SIZE SZ_512M
#ifdef CONFIG_ARM64
-# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_64
+# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64
#else
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
#endif
@@ -86,8 +86,8 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
}
rsv->next = 0;
- rsv->base = 0;
rsv->size = 0;
+ atomic_set(&rsv->count, 0);
status = efi_call_early(install_configuration_table,
&memreserve_table_guid,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 0c0d2312f4a8..0dc7b4987cc2 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -370,22 +370,24 @@ void *get_fdt(efi_system_table_t *sys_table, unsigned long *fdt_size)
{
efi_guid_t fdt_guid = DEVICE_TREE_GUID;
efi_config_table_t *tables;
- void *fdt;
int i;
- tables = (efi_config_table_t *) sys_table->tables;
- fdt = NULL;
+ tables = (efi_config_table_t *)sys_table->tables;
- for (i = 0; i < sys_table->nr_tables; i++)
- if (efi_guidcmp(tables[i].guid, fdt_guid) == 0) {
- fdt = (void *) tables[i].table;
- if (fdt_check_header(fdt) != 0) {
- pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
- return NULL;
- }
- *fdt_size = fdt_totalsize(fdt);
- break;
- }
+ for (i = 0; i < sys_table->nr_tables; i++) {
+ void *fdt;
+
+ if (efi_guidcmp(tables[i].guid, fdt_guid) != 0)
+ continue;
+
+ fdt = (void *)tables[i].table;
+ if (fdt_check_header(fdt) != 0) {
+ pr_efi_err(sys_table, "Invalid header detected on UEFI supplied FDT, ignoring ...\n");
+ return NULL;
+ }
+ *fdt_size = fdt_totalsize(fdt);
+ return fdt;
+ }
- return fdt;
+ return NULL;
}
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 9336ffdf6e2c..fceaafd67ec6 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -318,7 +318,12 @@ EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
static efi_status_t
check_var_size(u32 attributes, unsigned long size)
{
- const struct efivar_operations *fops = __efivars->ops;
+ const struct efivar_operations *fops;
+
+ if (!__efivars)
+ return EFI_UNSUPPORTED;
+
+ fops = __efivars->ops;
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
@@ -329,7 +334,12 @@ check_var_size(u32 attributes, unsigned long size)
static efi_status_t
check_var_size_nonblocking(u32 attributes, unsigned long size)
{
- const struct efivar_operations *fops = __efivars->ops;
+ const struct efivar_operations *fops;
+
+ if (!__efivars)
+ return EFI_UNSUPPORTED;
+
+ fops = __efivars->ops;
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
@@ -429,13 +439,18 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
unsigned long variable_name_size = 1024;
efi_char16_t *variable_name;
efi_status_t status;
efi_guid_t vendor_guid;
int err = 0;
+ if (!__efivars)
+ return -EFAULT;
+
+ ops = __efivars->ops;
+
variable_name = kzalloc(variable_name_size, GFP_KERNEL);
if (!variable_name) {
printk(KERN_ERR "efivars: Memory allocation failed.\n");
@@ -583,12 +598,14 @@ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
*/
int __efivar_entry_delete(struct efivar_entry *entry)
{
- const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
- status = ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- 0, 0, NULL);
+ if (!__efivars)
+ return -EINVAL;
+
+ status = __efivars->ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
return efi_status_to_err(status);
}
@@ -607,12 +624,17 @@ EXPORT_SYMBOL_GPL(__efivar_entry_delete);
*/
int efivar_entry_delete(struct efivar_entry *entry)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
status = ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL);
@@ -650,13 +672,19 @@ EXPORT_SYMBOL_GPL(efivar_entry_delete);
int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
unsigned long size, void *data, struct list_head *head)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
efi_char16_t *name = entry->var.VariableName;
efi_guid_t vendor = entry->var.VendorGuid;
if (down_interruptible(&efivars_lock))
return -EINTR;
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
if (head && efivar_entry_find(name, vendor, head, false)) {
up(&efivars_lock);
return -EEXIST;
@@ -687,12 +715,17 @@ static int
efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
u32 attributes, unsigned long size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
if (down_trylock(&efivars_lock))
return -EBUSY;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+
status = check_var_size_nonblocking(attributes,
size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
@@ -700,6 +733,7 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
return -ENOSPC;
}
+ ops = __efivars->ops;
status = ops->set_variable_nonblocking(name, &vendor, attributes,
size, data);
@@ -727,9 +761,13 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
bool block, unsigned long size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
+ if (!__efivars)
+ return -EINVAL;
+
+ ops = __efivars->ops;
if (!ops->query_variable_store)
return -ENOSYS;
@@ -829,13 +867,18 @@ EXPORT_SYMBOL_GPL(efivar_entry_find);
*/
int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
*size = 0;
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid, NULL, size, NULL);
up(&efivars_lock);
@@ -861,12 +904,14 @@ EXPORT_SYMBOL_GPL(efivar_entry_size);
int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
unsigned long *size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
+ if (!__efivars)
+ return -EINVAL;
+
+ status = __efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
return efi_status_to_err(status);
}
@@ -882,14 +927,19 @@ EXPORT_SYMBOL_GPL(__efivar_entry_get);
int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
unsigned long *size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
if (down_interruptible(&efivars_lock))
return -EINTR;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+
+ status = __efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
up(&efivars_lock);
return efi_status_to_err(status);
@@ -921,7 +971,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_get);
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_char16_t *name = entry->var.VariableName;
efi_guid_t *vendor = &entry->var.VendorGuid;
efi_status_t status;
@@ -940,6 +990,11 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (!__efivars) {
+ err = -EINVAL;
+ goto out;
+ }
+
/*
* Ensure that the available space hasn't shrunk below the safe level
*/
@@ -956,6 +1011,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
}
}
+ ops = __efivars->ops;
+
status = ops->set_variable(name, vendor, attributes, *size, data);
if (status != EFI_SUCCESS) {
err = efi_status_to_err(status);
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index af3a20dd5aa4..99c99a5d57fe 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
tristate "FSI master based on Aspeed ColdFire coprocessor"
depends on GPIOLIB
depends on GPIO_ASPEED
+ select GENERIC_ALLOCATOR
---help---
This option enables a FSI master using the AST2400 and AST2500 GPIO
lines driven by the internal ColdFire coprocessor. This requires
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index df94021dd9d1..81dc01ac2351 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -20,7 +20,6 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/cdev.h>
#include <linux/list.h>
#include <uapi/linux/fsi.h>
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 71d014edd167..2c22836d3ffd 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -168,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active)
else
timeout = SIRF_HIBERNATE_TIMEOUT;
- while (retries-- > 0) {
+ do {
sirf_pulse_on_off(data);
ret = sirf_wait_for_power_state(data, active, timeout);
if (ret < 0) {
@@ -179,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active)
}
break;
- }
+ } while (retries--);
- if (retries == 0)
+ if (retries < 0)
return -ETIMEDOUT;
return 0;
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 5c1564fcc24e..bdb29e51b417 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -258,7 +258,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.set = davinci_gpio_set;
chips->chip.ngpio = ngpio;
- chips->chip.base = -1;
+ chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
#ifdef CONFIG_OF_GPIO
chips->chip.of_gpio_n_cells = 2;
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 05813fbf3daf..647dfbbc4e1c 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
struct spi_device *spi = to_spi_device(dev);
u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
- return spi_write(spi, (const u8 *)&word, sizeof(word));
+ return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
}
/* A read from the MAX7301 means two transfers; here, one message each */
@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
struct spi_device *spi = to_spi_device(dev);
word = 0x8000 | (reg << 8);
- ret = spi_write(spi, (const u8 *)&word, sizeof(word));
- if (ret)
- return ret;
- /*
- * This relies on the fact, that a transfer with NULL tx_buf shifts out
- * zero bytes (=NOOP for MAX7301)
- */
- ret = spi_read(spi, (u8 *)&word, sizeof(word));
+ ret = spi_write_then_read(spi, &word, sizeof(word), &word,
+ sizeof(word));
if (ret)
return ret;
return word & 0xff;
diff --git a/drivers/gpio/gpio-max77620.c b/drivers/gpio/gpio-max77620.c
index 538bce4b5b42..65fa3a198ebd 100644
--- a/drivers/gpio/gpio-max77620.c
+++ b/drivers/gpio/gpio-max77620.c
@@ -25,60 +25,92 @@ struct max77620_gpio {
static const struct regmap_irq max77620_gpio_irqs[] = {
[0] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE0,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 0,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE0,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 0,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[1] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE1,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 1,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE1,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 1,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[2] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE2,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 2,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE2,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 2,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[3] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE3,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 3,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE3,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 3,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[4] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE4,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 4,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE4,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 4,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[5] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE5,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 5,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE5,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 5,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[6] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE6,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 6,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE6,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 6,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
[7] = {
- .mask = MAX77620_IRQ_LVL2_GPIO_EDGE7,
- .type_rising_mask = MAX77620_CNFG_GPIO_INT_RISING,
- .type_falling_mask = MAX77620_CNFG_GPIO_INT_FALLING,
.reg_offset = 0,
- .type_reg_offset = 7,
+ .mask = MAX77620_IRQ_LVL2_GPIO_EDGE7,
+ .type = {
+ .type_rising_val = MAX77620_CNFG_GPIO_INT_RISING,
+ .type_falling_val = MAX77620_CNFG_GPIO_INT_FALLING,
+ .type_reg_mask = MAX77620_CNFG_GPIO_INT_MASK,
+ .type_reg_offset = 7,
+ .types_supported = IRQ_TYPE_EDGE_BOTH,
+ },
},
};
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 6e02148c208b..adc768f908f1 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -773,9 +773,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
"marvell,armada-370-gpio"))
return 0;
- if (IS_ERR(mvchip->clk))
- return PTR_ERR(mvchip->clk);
-
/*
* There are only two sets of PWM configuration registers for
* all the GPIO lines on those SoCs which this driver reserves
@@ -786,6 +783,9 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
if (!res)
return 0;
+ if (IS_ERR(mvchip->clk))
+ return PTR_ERR(mvchip->clk);
+
/*
* Use set A for lines of GPIO chip with id 0, B for GPIO chip
* with id 1. Don't allow further GPIO chips to be used for PWM.
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 9887c3db6e16..5b3e83cd7137 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -32,7 +32,6 @@
#define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF
#define OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER BIT(2)
-#define OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN BIT(1)
struct gpio_regs {
u32 irqenable1;
@@ -379,18 +378,9 @@ static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio,
readl_relaxed(bank->base + bank->regs->fallingdetect);
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
- /* Defer wkup_en register update until we idle? */
- if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) {
- if (trigger)
- bank->context.wake_en |= gpio_bit;
- else
- bank->context.wake_en &= ~gpio_bit;
- } else {
- omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit,
- trigger != 0);
- bank->context.wake_en =
- readl_relaxed(bank->base + bank->regs->wkup_en);
- }
+ omap_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
+ bank->context.wake_en =
+ readl_relaxed(bank->base + bank->regs->wkup_en);
}
/* This part needs to be executed always for OMAP{34xx, 44xx} */
@@ -942,44 +932,6 @@ omap2_gpio_disable_level_quirk(struct gpio_bank *bank)
bank->base + bank->regs->risingdetect);
}
-/*
- * On omap4 and later SoC variants a level interrupt with wkup_en
- * enabled blocks the GPIO functional clock from idling until the GPIO
- * instance has been reset. To avoid that, we must set wkup_en only for
- * idle for level interrupts, and clear level registers for the duration
- * of idle. The level interrupts will be still there on wakeup by their
- * nature.
- */
-static void __maybe_unused
-omap4_gpio_enable_level_quirk(struct gpio_bank *bank)
-{
- /* Update wake register for idle, edge bits might be already set */
- writel_relaxed(bank->context.wake_en,
- bank->base + bank->regs->wkup_en);
-
- /* Clear level registers for idle */
- writel_relaxed(0, bank->base + bank->regs->leveldetect0);
- writel_relaxed(0, bank->base + bank->regs->leveldetect1);
-}
-
-static void __maybe_unused
-omap4_gpio_disable_level_quirk(struct gpio_bank *bank)
-{
- /* Restore level registers after idle */
- writel_relaxed(bank->context.leveldetect0,
- bank->base + bank->regs->leveldetect0);
- writel_relaxed(bank->context.leveldetect1,
- bank->base + bank->regs->leveldetect1);
-
- /* Clear saved wkup_en for level, it will be set for next idle again */
- bank->context.wake_en &= ~(bank->context.leveldetect0 |
- bank->context.leveldetect1);
-
- /* Update wake with only edge configuration */
- writel_relaxed(bank->context.wake_en,
- bank->base + bank->regs->wkup_en);
-}
-
/*---------------------------------------------------------------------*/
static int omap_mpuio_suspend_noirq(struct device *dev)
@@ -1412,12 +1364,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_set_gpio_dataout_mask_multiple;
}
- if (bank->quirks & OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN) {
- bank->funcs.idle_enable_level_quirk =
- omap4_gpio_enable_level_quirk;
- bank->funcs.idle_disable_level_quirk =
- omap4_gpio_disable_level_quirk;
- } else if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
+ if (bank->quirks & OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER) {
bank->funcs.idle_enable_level_quirk =
omap2_gpio_enable_level_quirk;
bank->funcs.idle_disable_level_quirk =
@@ -1806,8 +1753,7 @@ static const struct omap_gpio_platform_data omap4_pdata = {
.regs = &omap4_gpio_regs,
.bank_width = 32,
.dbck_flag = true,
- .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER |
- OMAP_GPIO_QUIRK_DEFERRED_WKUP_EN,
+ .quirks = OMAP_GPIO_QUIRK_IDLE_REMOVE_TRIGGER,
};
static const struct of_device_id omap_gpio_match[] = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 55b72fbe1631..7f93954c58ea 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -19,11 +19,28 @@
#include "gpiolib.h"
+/**
+ * struct acpi_gpio_event - ACPI GPIO event handler data
+ *
+ * @node: list-entry of the events list of the struct acpi_gpio_chip
+ * @handle: handle of ACPI method to execute when the IRQ triggers
+ * @handler: irq_handler to pass to request_irq when requesting the IRQ
+ * @pin: GPIO pin number on the gpio_chip
+ * @irq: Linux IRQ number for the event, for request_ / free_irq
+ * @irqflags: flags to pass to request_irq when requesting the IRQ
+ * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
+ * @is_requested: True if request_irq has been done
+ * @desc: gpio_desc for the GPIO pin for this event
+ */
struct acpi_gpio_event {
struct list_head node;
acpi_handle handle;
+ irq_handler_t handler;
unsigned int pin;
unsigned int irq;
+ unsigned long irqflags;
+ bool irq_is_wake;
+ bool irq_requested;
struct gpio_desc *desc;
};
@@ -49,10 +66,10 @@ struct acpi_gpio_chip {
/*
* For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
* late_initcall_sync handler, so that other builtin drivers can register their
* OpRegions before the event handlers can run. This list contains gpiochips
- * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
*/
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@@ -133,8 +150,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
}
EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
-static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
- void *context)
+static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
+ struct acpi_gpio_event *event)
+{
+ int ret, value;
+
+ ret = request_threaded_irq(event->irq, NULL, event->handler,
+ event->irqflags, "ACPI:Event", event);
+ if (ret) {
+ dev_err(acpi_gpio->chip->parent,
+ "Failed to setup interrupt handler for %d\n",
+ event->irq);
+ return;
+ }
+
+ if (event->irq_is_wake)
+ enable_irq_wake(event->irq);
+
+ event->irq_requested = true;
+
+ /* Make sure we trigger the initial state of edge-triggered IRQs */
+ value = gpiod_get_raw_value_cansleep(event->desc);
+ if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+ ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+ event->handler(event->irq, event);
+}
+
+static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
+{
+ struct acpi_gpio_event *event;
+
+ list_for_each_entry(event, &acpi_gpio->events, node)
+ acpi_gpiochip_request_irq(acpi_gpio, event);
+}
+
+static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
+ void *context)
{
struct acpi_gpio_chip *acpi_gpio = context;
struct gpio_chip *chip = acpi_gpio->chip;
@@ -143,8 +194,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
struct acpi_gpio_event *event;
irq_handler_t handler = NULL;
struct gpio_desc *desc;
- unsigned long irqflags;
- int ret, pin, irq, value;
+ int ret, pin, irq;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@@ -175,8 +225,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
- value = gpiod_get_value_cansleep(desc);
-
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@@ -189,64 +237,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
goto fail_unlock_irq;
}
- irqflags = IRQF_ONESHOT;
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ goto fail_unlock_irq;
+
+ event->irqflags = IRQF_ONESHOT;
if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
if (agpio->polarity == ACPI_ACTIVE_HIGH)
- irqflags |= IRQF_TRIGGER_HIGH;
+ event->irqflags |= IRQF_TRIGGER_HIGH;
else
- irqflags |= IRQF_TRIGGER_LOW;
+ event->irqflags |= IRQF_TRIGGER_LOW;
} else {
switch (agpio->polarity) {
case ACPI_ACTIVE_HIGH:
- irqflags |= IRQF_TRIGGER_RISING;
+ event->irqflags |= IRQF_TRIGGER_RISING;
break;
case ACPI_ACTIVE_LOW:
- irqflags |= IRQF_TRIGGER_FALLING;
+ event->irqflags |= IRQF_TRIGGER_FALLING;
break;
default:
- irqflags |= IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING;
+ event->irqflags |= IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING;
break;
}
}
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- goto fail_unlock_irq;
-
event->handle = evt_handle;
+ event->handler = handler;
event->irq = irq;
+ event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin;
event->desc = desc;
- ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
- "ACPI:Event", event);
- if (ret) {
- dev_err(chip->parent,
- "Failed to setup interrupt handler for %d\n",
- event->irq);
- goto fail_free_event;
- }
-
- if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
- enable_irq_wake(irq);
-
list_add_tail(&event->node, &acpi_gpio->events);
- /*
- * Make sure we trigger the initial state of the IRQ when using RISING
- * or FALLING. Note we run the handlers on late_init, the AML code
- * may refer to OperationRegions from other (builtin) drivers which
- * may be probed after us.
- */
- if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
- ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
- handler(event->irq, event);
-
return AE_OK;
-fail_free_event:
- kfree(event);
fail_unlock_irq:
gpiochip_unlock_as_irq(chip, pin);
fail_free_desc:
@@ -283,6 +309,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
+ acpi_walk_resources(handle, "_AEI",
+ acpi_gpiochip_alloc_event, acpi_gpio);
+
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
defer = !acpi_gpio_deferred_req_irqs_done;
if (defer)
@@ -293,8 +322,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (defer)
return;
- acpi_walk_resources(handle, "_AEI",
- acpi_gpiochip_request_interrupt, acpi_gpio);
+ acpi_gpiochip_request_irqs(acpi_gpio);
}
EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
@@ -331,10 +359,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
- if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
- disable_irq_wake(event->irq);
+ if (event->irq_requested) {
+ if (event->irq_is_wake)
+ disable_irq_wake(event->irq);
+
+ free_irq(event->irq, event);
+ }
- free_irq(event->irq, event);
desc = event->desc;
if (WARN_ON(IS_ERR(desc)))
continue;
@@ -1200,23 +1231,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
-/* Run deferred acpi_gpiochip_request_interrupts() */
-static int acpi_gpio_handle_deferred_request_interrupts(void)
+/* Run deferred acpi_gpiochip_request_irqs() */
+static int acpi_gpio_handle_deferred_request_irqs(void)
{
struct acpi_gpio_chip *acpi_gpio, *tmp;
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe(acpi_gpio, tmp,
&acpi_gpio_deferred_req_irqs_list,
- deferred_req_irqs_list_entry) {
- acpi_handle handle;
-
- handle = ACPI_HANDLE(acpi_gpio->chip->parent);
- acpi_walk_resources(handle, "_AEI",
- acpi_gpiochip_request_interrupt, acpi_gpio);
-
- list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
- }
+ deferred_req_irqs_list_entry)
+ acpi_gpiochip_request_irqs(acpi_gpio);
acpi_gpio_deferred_req_irqs_done = true;
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@@ -1224,4 +1248,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
return 0;
}
/* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
+late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 01959369360b..0acc2cc6e868 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -98,15 +98,28 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
struct gpio_desc **dr;
struct gpio_desc *desc;
+ desc = gpiod_get_index(dev, con_id, idx, flags);
+ if (IS_ERR(desc))
+ return desc;
+
+ /*
+ * For non-exclusive GPIO descriptors, check if this descriptor is
+ * already under resource management by this device.
+ */
+ if (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
+ struct devres *dres;
+
+ dres = devres_find(dev, devm_gpiod_release,
+ devm_gpiod_match, &desc);
+ if (dres)
+ return desc;
+ }
+
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
- if (!dr)
+ if (!dr) {
+ gpiod_put(desc);
return ERR_PTR(-ENOMEM);
-
- desc = gpiod_get_index(dev, con_id, idx, flags);
- if (IS_ERR(desc)) {
- devres_free(dr);
- return desc;
}
*dr = desc;
@@ -140,15 +153,28 @@ struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct gpio_desc **dr;
struct gpio_desc *desc;
+ desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
+ if (IS_ERR(desc))
+ return desc;
+
+ /*
+ * For non-exclusive GPIO descriptors, check if this descriptor is
+ * already under resource management by this device.
+ */
+ if (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
+ struct devres *dres;
+
+ dres = devres_find(dev, devm_gpiod_release,
+ devm_gpiod_match, &desc);
+ if (dres)
+ return desc;
+ }
+
dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
GFP_KERNEL);
- if (!dr)
+ if (!dr) {
+ gpiod_put(desc);
return ERR_PTR(-ENOMEM);
-
- desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
- if (IS_ERR(desc)) {
- devres_free(dr);
- return desc;
}
*dr = desc;
@@ -321,6 +347,36 @@ void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
EXPORT_SYMBOL(devm_gpiod_put);
/**
+ * devm_gpiod_unhinge - Remove resource management from a gpio descriptor
+ * @dev: GPIO consumer
+ * @desc: GPIO descriptor to remove resource management from
+ *
+ * Remove resource management from a GPIO descriptor. This is needed when
+ * you want to hand over lifecycle management of a descriptor to another
+ * mechanism.
+ */
+
+void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(desc))
+ return;
+ ret = devres_destroy(dev, devm_gpiod_release,
+ devm_gpiod_match, &desc);
+ /*
+ * If the GPIO descriptor is requested as nonexclusive, we
+ * may call this function several times on the same descriptor
+ * so it is OK if devres_destroy() returns -ENOENT.
+ */
+ if (ret == -ENOENT)
+ return;
+ /* Anything else we should warn about */
+ WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_gpiod_unhinge);
+
+/**
* devm_gpiod_put_array - Resource-managed gpiod_put_array()
* @dev: GPIO consumer
* @descs: GPIO descriptor array to dispose of
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a2cbb474901c..985c09ce80fb 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -4205,6 +4205,8 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
transitory = flags & OF_GPIO_TRANSITORY;
ret = gpiod_request(desc, label);
+ if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+ return desc;
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 087d865286a0..bc57f0dc5953 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -201,12 +201,6 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap);
-/* This is just passed between gpiolib and devres */
-struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label);
-
extern struct spinlock gpio_lock;
extern struct list_head gpio_devices;
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index bc6a16a3c36e..ce8d1d384319 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -10,8 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
- drm_info.o drm_encoder_slave.o \
- drm_trace_points.o drm_global.o drm_prime.o \
+ drm_encoder_slave.o \
+ drm_trace_points.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
@@ -32,11 +32,12 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
+drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o drm_gem_framebuffer_helper.o
+ drm_scdc_helper.o drm_gem_framebuffer_helper.o \
+ drm_atomic_state_helper.o drm_damage_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 138cb787d27e..f76bcb9c45e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -53,7 +53,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o
+ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@@ -105,6 +105,7 @@ amdgpu-y += \
# add GFX block
amdgpu-y += \
amdgpu_gfx.o \
+ amdgpu_rlc.o \
gfx_v8_0.o \
gfx_v9_0.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 104b2e0d893b..bcef6ea4bcf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -75,11 +75,14 @@
#include "amdgpu_sdma.h"
#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
#define MAX_GPU_INSTANCE 16
@@ -161,6 +164,7 @@ extern int amdgpu_si_support;
extern int amdgpu_cik_support;
#endif
+#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
@@ -233,7 +237,7 @@ enum amdgpu_kiq_irq {
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
int amdgpu_device_ip_set_clockgating_state(void *dev,
enum amd_ip_block_type block_type,
@@ -360,123 +364,6 @@ int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void);
/*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
- AMDGPU_DOORBELL_KIQ = 0x000,
- AMDGPU_DOORBELL_HIQ = 0x001,
- AMDGPU_DOORBELL_DIQ = 0x002,
- AMDGPU_DOORBELL_MEC_RING0 = 0x010,
- AMDGPU_DOORBELL_MEC_RING1 = 0x011,
- AMDGPU_DOORBELL_MEC_RING2 = 0x012,
- AMDGPU_DOORBELL_MEC_RING3 = 0x013,
- AMDGPU_DOORBELL_MEC_RING4 = 0x014,
- AMDGPU_DOORBELL_MEC_RING5 = 0x015,
- AMDGPU_DOORBELL_MEC_RING6 = 0x016,
- AMDGPU_DOORBELL_MEC_RING7 = 0x017,
- AMDGPU_DOORBELL_GFX_RING0 = 0x020,
- AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
- AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
- AMDGPU_DOORBELL_IH = 0x1E8,
- AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
- AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
-};
-
-/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
- /*
- * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
- * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
- * Compute related doorbells are allocated from 0x00 to 0x8a
- */
-
-
- /* kernel scheduling */
- AMDGPU_DOORBELL64_KIQ = 0x00,
-
- /* HSA interface queue and debug queue */
- AMDGPU_DOORBELL64_HIQ = 0x01,
- AMDGPU_DOORBELL64_DIQ = 0x02,
-
- /* Compute engines */
- AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
- AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
- AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
- AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
- AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
- AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
- AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
- AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
-
- /* User queue doorbell range (128 doorbells) */
- AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
- AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
-
- /* Graphics engine */
- AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
-
- /*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
- * Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
- */
-
- /* sDMA engines reserved from 0xe0 -oxef */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9,
-
- /* For vega10 sriov, the sdma doorbell must be fixed as follow
- * to keep the same setting with host driver, or it will
- * happen conflicts
- */
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
-
- /* Interrupt handler */
- AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
- AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
- AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
-
- /* VCN engine use 32 bits doorbell */
- AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
- AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
- AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
- AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
-
- /* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
- */
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
-
- AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
-
- AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
- AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-/*
* IRQS.
*/
@@ -653,6 +540,8 @@ struct amdgpu_asic_funcs {
struct amdgpu_ring *ring);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
+ /* initialize doorbell layout for specific asic*/
+ void (*init_doorbell_index)(struct amdgpu_device *adev);
};
/*
@@ -831,7 +720,6 @@ struct amdgpu_device {
bool need_dma32;
bool need_swiotlb;
bool accel_working;
- struct work_struct reset_work;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
@@ -976,6 +864,9 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* KFD */
+ struct amdgpu_kfd_dev kfd;
+
/* display related functionality */
struct amdgpu_display_manager dm;
@@ -989,9 +880,6 @@ struct amdgpu_device {
atomic64_t visible_pin_size;
atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
-
/* soc15 register offset based on ip, instance and segment */
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
@@ -1023,6 +911,10 @@ struct amdgpu_device {
unsigned long last_mm_index;
bool in_gpu_reset;
struct mutex lock_reset;
+ struct amdgpu_doorbell_index doorbell_index;
+
+ int asic_reset_res;
+ struct work_struct xgmi_reset_work;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1047,11 +939,6 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
-
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
@@ -1113,11 +1000,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
-
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1159,6 +1041,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
/* Common functions */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@@ -1219,12 +1102,6 @@ void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-
-/*
- * functions used by amdgpu_xgmi.c
- */
-int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-
/*
* functions used by amdgpu_encoder.c
*/
@@ -1252,6 +1129,9 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 7f0afc526419..4376b17ca594 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -41,28 +41,21 @@ struct amdgpu_atif_notification_cfg {
};
struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
bool thermal_state;
bool forced_power_state;
bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
bool brightness_change;
bool dgpu_display_event;
+ bool gpu_package_power_limit;
};
struct amdgpu_atif_functions {
bool system_params;
bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
bool temperature_change;
- bool graphics_device_types;
+ bool query_backlight_transfer_characteristics;
+ bool ready_to_undock;
+ bool external_gpu_information;
};
struct amdgpu_atif {
@@ -72,6 +65,7 @@ struct amdgpu_atif {
struct amdgpu_atif_functions functions;
struct amdgpu_atif_notification_cfg notification_cfg;
struct amdgpu_encoder *encoder_for_bl;
+ struct amdgpu_dm_backlight_caps backlight_caps;
};
/* Call the ATIF method
@@ -137,15 +131,12 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
- n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
- n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
- n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
- n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+ n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
@@ -162,14 +153,11 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
- f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
- f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
- f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
- f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
- f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
- f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
- f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+ f->query_backlight_transfer_characteristics =
+ mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+ f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+ f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
@@ -311,6 +299,65 @@ out:
}
/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @handle: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_qbtc_output characteristics;
+ struct atif_qbtc_arguments arguments;
+ struct acpi_buffer params;
+ size_t size;
+ int err = 0;
+
+ arguments.size = sizeof(arguments);
+ arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+ params.length = sizeof(arguments);
+ params.pointer = (void *)&arguments;
+
+ info = amdgpu_atif_call(atif,
+ ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+ &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&characteristics, 0, sizeof(characteristics));
+ size = min(sizeof(characteristics), size);
+ memcpy(&characteristics, info->buffer.pointer, size);
+
+ atif->backlight_caps.caps_valid = true;
+ atif->backlight_caps.min_input_signal =
+ characteristics.min_input_signal;
+ atif->backlight_caps.max_input_signal =
+ characteristics.max_input_signal;
+out:
+ kfree(info);
+ return err;
+}
+
+/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
* @handle: acpi handle
@@ -799,6 +846,17 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
+ if (atif->functions.query_backlight_transfer_characteristics) {
+ ret = amdgpu_atif_query_backlight_caps(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+ ret);
+ atif->backlight_caps.caps_valid = false;
+ }
+ } else {
+ atif->backlight_caps.caps_valid = false;
+ }
+
out:
adev->acpi_nb.notifier_call = amdgpu_acpi_event;
register_acpi_notifier(&adev->acpi_nb);
@@ -806,6 +864,18 @@ out:
return ret;
}
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
+ struct amdgpu_dm_backlight_caps *caps)
+{
+ if (!adev->atif) {
+ caps->caps_valid = false;
+ return;
+ }
+ caps->caps_valid = adev->atif->backlight_caps.caps_valid;
+ caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
+ caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
+}
+
/**
* amdgpu_acpi_fini - tear down driver acpi support
*
@@ -816,6 +886,5 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
- if (adev->atif)
- kfree(adev->atif);
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1580ec60b89f..2dfaf158ef07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -26,15 +26,26 @@
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include <linux/module.h>
+#include <linux/dma-buf.h>
const struct kgd2kfd_calls *kgd2kfd;
static const unsigned int compute_vmid_bitmap = 0xFF00;
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
+
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
+
#ifdef CONFIG_HSA_AMD
ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
if (ret)
@@ -73,9 +84,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
@@ -85,8 +98,11 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
return;
}
- adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
- adev->pdev, kfd2kgd);
+ adev->kfd.dev = kgd2kfd->probe((struct kgd_dev *)adev,
+ adev->pdev, kfd2kgd);
+
+ if (adev->kfd.dev)
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
}
/**
@@ -126,7 +142,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
int i, n;
int last_valid_bit;
- if (adev->kfd) {
+
+ if (adev->kfd.dev) {
struct kgd2kfd_shared_resources gpu_resources = {
.compute_vmid_bitmap = compute_vmid_bitmap,
.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
@@ -144,7 +161,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
KGD_MAX_QUEUES);
/* remove the KIQ bit as well */
- if (adev->gfx.kiq.ring.ready)
+ if (adev->gfx.kiq.ring.sched.ready)
clear_bit(amdgpu_gfx_queue_to_bit(adev,
adev->gfx.kiq.ring.me - 1,
adev->gfx.kiq.ring.pipe,
@@ -165,7 +182,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
&gpu_resources.doorbell_start_offset);
if (adev->asic_type < CHIP_VEGA10) {
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
return;
}
@@ -179,25 +196,14 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* process in case of 64-bit doorbells so we
* can use each doorbell assignment twice.
*/
- if (adev->asic_type == CHIP_VEGA10) {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- } else {
- gpu_resources.sdma_doorbell[0][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1);
- gpu_resources.sdma_doorbell[0][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1);
- gpu_resources.sdma_doorbell[1][i+1] =
- AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1);
- }
+ gpu_resources.sdma_doorbell[0][i] =
+ adev->doorbell_index.sdma_engine0 + (i >> 1);
+ gpu_resources.sdma_doorbell[0][i+1] =
+ adev->doorbell_index.sdma_engine0 + 0x200 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i] =
+ adev->doorbell_index.sdma_engine1 + (i >> 1);
+ gpu_resources.sdma_doorbell[1][i+1] =
+ adev->doorbell_index.sdma_engine1 + 0x200 + (i >> 1);
}
/* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for
* SDMA, IH and VCN. So don't use them for the CP.
@@ -205,37 +211,37 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
gpu_resources.reserved_doorbell_mask = 0x1e0;
gpu_resources.reserved_doorbell_val = 0x0e0;
- kgd2kfd->device_init(adev->kfd, &gpu_resources);
+ kgd2kfd->device_init(adev->kfd.dev, &gpu_resources);
}
}
void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
{
- if (adev->kfd) {
- kgd2kfd->device_exit(adev->kfd);
- adev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd->device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
}
}
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (adev->kfd)
- kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd->interrupt(adev->kfd.dev, ih_ring_entry);
}
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
{
- if (adev->kfd)
- kgd2kfd->suspend(adev->kfd);
+ if (adev->kfd.dev)
+ kgd2kfd->suspend(adev->kfd.dev);
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->resume(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->resume(adev->kfd.dev);
return r;
}
@@ -244,8 +250,8 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->pre_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->pre_reset(adev->kfd.dev);
return r;
}
@@ -254,8 +260,8 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (adev->kfd)
- r = kgd2kfd->post_reset(adev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd->post_reset(adev->kfd.dev);
return r;
}
@@ -268,9 +274,9 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
amdgpu_device_gpu_recover(adev, NULL);
}
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -340,7 +346,7 @@ allocate_mem_reserve_bo_failed:
return r;
}
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
{
struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
@@ -351,8 +357,8 @@ void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
-void get_local_mem_info(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info)
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
@@ -383,7 +389,7 @@ void get_local_mem_info(struct kgd_dev *kgd,
mem_info->mem_clk_max = 100;
}
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -392,7 +398,7 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
return 0;
}
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -405,7 +411,7 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
return 100;
}
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
@@ -428,6 +434,62 @@ void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
cu_info->lds_size = acu_info.lds_size;
}
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dma_buf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev->ddev->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = obj->dev->dev_private;
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dma_buf_kgd)
+ *dma_buf_kgd = (struct kgd_dev *)adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_size)
+ *metadata_size = bo->metadata_size;
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ }
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@@ -510,7 +572,7 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
- if (adev->kfd) {
+ if (adev->kfd.dev) {
if ((1 << vmid) & compute_vmid_bitmap)
return true;
}
@@ -524,7 +586,7 @@ bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
return false;
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 8e0d4f7196b4..70429f7aa9a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -35,6 +34,7 @@
#include "amdgpu_vm.h"
extern const struct kgd2kfd_calls *kgd2kfd;
+extern uint64_t amdgpu_amdkfd_total_mem_size;
struct amdgpu_device;
@@ -77,6 +77,11 @@ struct amdgpu_amdkfd_fence {
char timeline_name[TASK_COMM_LEN];
};
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ uint64_t vram_used;
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -134,16 +139,21 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-void get_local_mem_info(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
-
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
-void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
+ struct kfd_local_mem_info *mem_info);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
+
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
+int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
+ struct kgd_dev **dmabuf_kgd,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
@@ -195,7 +205,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dmabuf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 244d9834a381..ff7fac7df34b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -23,6 +23,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -173,13 +174,6 @@ static int get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -200,28 +194,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 9f149914ad6c..56ea929f524b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -24,6 +24,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -128,13 +129,6 @@ static int get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -157,27 +151,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 42cb4c4e0929..5c51d4910650 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -26,6 +26,7 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
#include <linux/firmware.h>
+#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
@@ -46,38 +47,9 @@
#include "v9_structs.h"
#include "soc15.h"
#include "soc15d.h"
+#include "mmhub_v1_0.h"
+#include "gfxhub_v1_0.h"
-/* HACK: MMHUB and GC both have VM-related register with the same
- * names but different offsets. Define the MMHUB register we need here
- * with a prefix. A proper solution would be to move the functions
- * programming these registers into gfx_v9_0.c and mmhub_v1_0.c
- * respectively.
- */
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3
-#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705
-#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c
-#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0
-
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728
-#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0
#define V9_PIPE_PER_MEC (4)
#define V9_QUEUES_PER_PIPE_MEC (8)
@@ -167,13 +139,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
}
static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_local_mem_info = get_local_mem_info,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .alloc_pasid = amdgpu_pasid_alloc,
- .free_pasid = amdgpu_pasid_free,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_interrupts = kgd_init_interrupts,
@@ -196,26 +161,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
- .get_cu_info = get_cu_info,
- .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
- .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
- .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
- .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
- .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
- .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
- .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
- .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
- .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
- .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
- .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
- .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
- .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
- .submit_ib = amdgpu_amdkfd_submit_ib,
- .gpu_recover = amdgpu_amdkfd_gpu_reset,
- .set_compute_idle = amdgpu_amdkfd_set_compute_idle,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
@@ -785,15 +733,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
{
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- uint32_t req = (1 << vmid) |
- (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK |
- VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK;
-
- mutex_lock(&adev->srbm_mutex);
/* Use legacy mode tlb invalidation.
*
@@ -810,34 +749,7 @@ static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
* TODO 2: support range-based invalidation, requires kfg2kgd
* interface change
*/
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32),
- 0xffffffff);
- WREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32),
- 0x0000001f);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ),
- req);
-
- while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0,
- mmMMHUB_VM_INVALIDATE_ENG16_ACK)) &
- (1 << vmid)))
- cpu_relax();
-
- mutex_unlock(&adev->srbm_mutex);
-
+ amdgpu_gmc_flush_gpu_tlb(adev, vmid, 0);
}
static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid)
@@ -876,7 +788,7 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
if (adev->in_gpu_reset)
return -EIO;
- if (ring->ready)
+ if (ring->sched.ready)
return invalidate_tlbs_with_kiq(adev, pasid);
for (vmid = 0; vmid < 16; vmid++) {
@@ -1016,7 +928,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint64_t base = page_table_base | AMDGPU_PTE_VALID;
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
pr_err("trying to set page table base for wrong VMID %u\n",
@@ -1028,25 +939,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
* now, all processes share the same address space size, like
* on GFX8 and older.
*/
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
-
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0);
-
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2),
- lower_32_bits(adev->vm_manager.max_pfn - 1));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2),
- upper_32_bits(adev->vm_manager.max_pfn - 1));
+ mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base));
- WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base));
+ gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index df0a059565f9..be1ab43473c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
+#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
@@ -46,9 +47,9 @@
/* Impose limit on how much memory KFD can use */
static struct {
uint64_t max_system_mem_limit;
- uint64_t max_userptr_mem_limit;
+ uint64_t max_ttm_mem_limit;
int64_t system_mem_used;
- int64_t userptr_mem_used;
+ int64_t ttm_mem_used;
spinlock_t mem_limit_lock;
} kfd_mem_limit;
@@ -90,8 +91,8 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm,
}
/* Set memory usage limits. Current, limits are
- * System (kernel) memory - 3/8th System RAM
- * Userptr memory - 3/4th System RAM
+ * System (TTM + userptr) memory - 3/4th System RAM
+ * TTM memory - 3/8th System RAM
*/
void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
{
@@ -103,48 +104,61 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
- kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3);
- kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2);
- pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n",
+ kfd_mem_limit.max_system_mem_limit = (mem >> 1) + (mem >> 2);
+ kfd_mem_limit.max_ttm_mem_limit = (mem >> 1) - (mem >> 3);
+ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
(kfd_mem_limit.max_system_mem_limit >> 20),
- (kfd_mem_limit.max_userptr_mem_limit >> 20));
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
}
-static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
- size_t acc_size;
+ size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
+ uint64_t reserved_for_pt = amdgpu_amdkfd_total_mem_size >> 9;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- if (kfd_mem_limit.system_mem_used + (acc_size + size) >
- kfd_mem_limit.max_system_mem_limit) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
- if ((kfd_mem_limit.system_mem_used + acc_size >
- kfd_mem_limit.max_system_mem_limit) ||
- (kfd_mem_limit.userptr_mem_used + (size + acc_size) >
- kfd_mem_limit.max_userptr_mem_limit)) {
- ret = -ENOMEM;
- goto err_no_mem;
- }
- kfd_mem_limit.system_mem_used += acc_size;
- kfd_mem_limit.userptr_mem_used += size;
+ /* TTM GTT memory */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size + size;
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ /* Userptr */
+ system_mem_needed = acc_size + size;
+ ttm_mem_needed = acc_size;
+ } else {
+ /* VRAM and SG */
+ system_mem_needed = acc_size;
+ ttm_mem_needed = acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM)
+ vram_needed = size;
+ }
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if ((kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit) ||
+ (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) ||
+ (adev->kfd.vram_used + vram_needed >
+ adev->gmc.real_vram_size - reserved_for_pt)) {
+ ret = -ENOMEM;
+ } else {
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+ adev->kfd.vram_used += vram_needed;
}
-err_no_mem:
+
spin_unlock(&kfd_mem_limit.mem_limit_lock);
return ret;
}
-static void unreserve_system_mem_limit(struct amdgpu_device *adev,
- uint64_t size, u32 domain)
+static void unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 domain, bool sg)
{
size_t acc_size;
@@ -154,35 +168,39 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev,
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
kfd_mem_limit.system_mem_used -= (acc_size + size);
- } else if (domain == AMDGPU_GEM_DOMAIN_CPU) {
+ kfd_mem_limit.ttm_mem_used -= (acc_size + size);
+ } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) {
+ kfd_mem_limit.system_mem_used -= (acc_size + size);
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ } else {
kfd_mem_limit.system_mem_used -= acc_size;
- kfd_mem_limit.userptr_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= acc_size;
+ if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ adev->kfd.vram_used -= size;
+ WARN_ONCE(adev->kfd.vram_used < 0,
+ "kfd VRAM memory accounting unbalanced");
+ }
}
WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
"kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+ "kfd TTM memory accounting unbalanced");
spin_unlock(&kfd_mem_limit.mem_limit_lock);
}
-void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
+void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
- spin_lock(&kfd_mem_limit.mem_limit_lock);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 domain = bo->preferred_domains;
+ bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU);
if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) {
- kfd_mem_limit.system_mem_used -= bo->tbo.acc_size;
- kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo);
- } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) {
- kfd_mem_limit.system_mem_used -=
- (bo->tbo.acc_size + amdgpu_bo_size(bo));
+ domain = AMDGPU_GEM_DOMAIN_CPU;
+ sg = false;
}
- WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
- "kfd system memory accounting unbalanced");
- WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0,
- "kfd userptr memory accounting unbalanced");
- spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg);
}
@@ -395,23 +413,6 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
return 0;
}
-static int sync_vm_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
-{
- int ret = amdgpu_sync_fence(adev, sync, f, false);
-
- /* Sync objects can't handle multiple GPUs (contexts) updating
- * sync->last_vm_update. Fortunately we don't need it for
- * KFD's purposes, so we can just drop that fence.
- */
- if (sync->last_vm_update) {
- dma_fence_put(sync->last_vm_update);
- sync->last_vm_update = NULL;
- }
-
- return ret;
-}
-
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
{
struct amdgpu_bo *pd = vm->root.base.bo;
@@ -422,7 +423,7 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
if (ret)
return ret;
- return sync_vm_fence(adev, sync, vm->last_update);
+ return amdgpu_sync_fence(NULL, sync, vm->last_update, false);
}
/* add_bo_to_vm - Add a BO to a VM
@@ -536,7 +537,7 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
struct amdgpu_bo *bo = mem->bo;
INIT_LIST_HEAD(&entry->head);
- entry->shared = true;
+ entry->num_shared = 1;
entry->bo = &bo->tbo;
mutex_lock(&process_info->lock);
if (userptr)
@@ -677,7 +678,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -741,7 +742,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo;
- ctx->kfd_bo.tv.shared = true;
+ ctx->kfd_bo.tv.num_shared = 1;
ctx->kfd_bo.user_pages = NULL;
list_add(&ctx->kfd_bo.tv.head, &ctx->list);
@@ -826,7 +827,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
/* Add the eviction fence back */
amdgpu_bo_fence(pd, &vm->process_info->eviction_fence->base, true);
- sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
return 0;
}
@@ -851,7 +852,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
return ret;
}
- return sync_vm_fence(adev, sync, bo_va->last_pt_update);
+ return amdgpu_sync_fence(NULL, sync, bo_va->last_pt_update, false);
}
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
@@ -886,6 +887,24 @@ update_gpuvm_pte_failed:
return ret;
}
+static struct sg_table *create_doorbell_sg(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg->sgl->dma_address = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
static int process_validate_vms(struct amdkfd_process_info *process_info)
{
struct amdgpu_vm *peer_vm;
@@ -901,6 +920,26 @@ static int process_validate_vms(struct amdkfd_process_info *process_info)
return 0;
}
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.base.bo;
+
+ ret = amdgpu_sync_resv(NULL,
+ sync, pd->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int process_update_pds(struct amdkfd_process_info *process_info,
struct amdgpu_sync *sync)
{
@@ -1149,6 +1188,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct amdgpu_bo_param bp;
@@ -1177,13 +1218,25 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
if (!offset || !*offset)
return -EINVAL;
user_addr = *offset;
+ } else if (flags & ALLOC_MEM_FLAGS_DOORBELL) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ bo_type = ttm_bo_type_sg;
+ alloc_flags = 0;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_doorbell_sg(*offset, size);
+ if (!sg)
+ return -ENOMEM;
} else {
return -EINVAL;
}
*mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if (!*mem)
- return -ENOMEM;
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
(*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
@@ -1199,7 +1252,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
byte_align = (adev->family == AMDGPU_FAMILY_VI &&
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
- adev->asic_type != CHIP_POLARIS11) ?
+ adev->asic_type != CHIP_POLARIS11 &&
+ adev->asic_type != CHIP_POLARIS12) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@ -1215,10 +1269,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_system_mem_limit(adev, size, alloc_domain);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
pr_debug("Insufficient system memory\n");
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
@@ -1229,7 +1283,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
bp.byte_align = byte_align;
bp.domain = alloc_domain;
bp.flags = alloc_flags;
- bp.type = ttm_bo_type_device;
+ bp.type = bo_type;
bp.resv = NULL;
ret = amdgpu_bo_create(adev, &bp, &bo);
if (ret) {
@@ -1237,6 +1291,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
domain_string(alloc_domain), ret);
goto err_bo_create;
}
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
bo->kfd_bo = *mem;
(*mem)->bo = bo;
if (user_addr)
@@ -1266,12 +1324,17 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
allocate_init_user_pages_failed:
amdgpu_bo_unref(&bo);
/* Don't unreserve system mem limit twice */
- goto err_reserve_system_mem;
+ goto err_reserve_limit;
err_bo_create:
- unreserve_system_mem_limit(adev, size, alloc_domain);
-err_reserve_system_mem:
+ unreserve_mem_limit(adev, size, alloc_domain, !!sg);
+err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
return ret;
}
@@ -1341,6 +1404,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
/* Free the sync object */
amdgpu_sync_free(&mem->sync);
+ /* If the SG is not NULL, it's one we created for a doorbell
+ * BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
/* Free the BO*/
amdgpu_bo_unref(&mem->bo);
mutex_destroy(&mem->lock);
@@ -1405,7 +1476,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the queues are still stopped and we can leave mapping for
* the next restore worker
*/
- if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+ bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
if (check_if_add_bo_to_vm(avm, mem)) {
@@ -1642,6 +1714,60 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
return 0;
}
+int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
+ struct dma_buf *dma_buf,
+ uint64_t va, void *vm,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ struct amdgpu_vm *avm = (struct amdgpu_vm *)vm;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ return -EINVAL;
+
+ obj = dma_buf->priv;
+ if (obj->dev->dev_private != adev)
+ /* Can't handle buffers from other devices */
+ return -EINVAL;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->bo_va_list);
+ mutex_init(&(*mem)->lock);
+ (*mem)->mapping_flags =
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC;
+
+ (*mem)->bo = amdgpu_bo_ref(bo);
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1808,7 +1934,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
validate_list.head) {
list_add_tail(&mem->resv_list.head, &resv_list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
/* Reserve all BOs and page tables for validation */
@@ -2027,7 +2153,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
list_add_tail(&mem->resv_list.head, &ctx.list);
mem->resv_list.bo = mem->validate_list.bo;
- mem->resv_list.shared = mem->validate_list.shared;
+ mem->resv_list.num_shared = mem->validate_list.num_shared;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
@@ -2044,13 +2170,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
if (ret)
goto validate_map_fail;
- /* Wait for PD/PTs validate to finish */
- /* FIXME: I think this isn't needed */
- list_for_each_entry(peer_vm, &process_info->vm_list_head,
- vm_list_node) {
- struct amdgpu_bo *bo = peer_vm->root.base.bo;
-
- ttm_bo_wait(&bo->tbo, false, false);
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
}
/* Validate BOs and map them to GPUVM (update VM page tables). */
@@ -2066,7 +2189,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
pr_debug("Memory eviction: Validate BOs failed. Try again\n");
goto validate_map_fail;
}
-
+ ret = amdgpu_sync_fence(NULL, &sync_obj, bo->tbo.moving, false);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
list_for_each_entry(bo_va_entry, &mem->bo_va_list,
bo_list) {
ret = update_gpuvm_pte((struct amdgpu_device *)
@@ -2087,6 +2214,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
goto validate_map_fail;
}
+ /* Wait for validate and PT updates to finish */
amdgpu_sync_wait(&sync_obj, false);
/* Release old eviction fence and create new one, because fence only
@@ -2105,10 +2233,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
process_info->eviction_fence = new_fence;
*ef = dma_fence_get(&new_fence->base);
- /* Wait for validate to finish and attach new eviction fence */
- list_for_each_entry(mem, &process_info->kfd_bo_list,
- validate_list.head)
- ttm_bo_wait(&mem->bo->tbo, false, false);
+ /* Attach new eviction fence to all BOs */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list.head)
amdgpu_bo_fence(mem->bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 14d2982a47cc..5c79da8e1150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -118,7 +118,6 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = !bo->prime_shared_count;
if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 8816c697b205..387f1cf1dc20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -330,7 +330,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
@@ -351,7 +353,6 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (type == CGS_UCODE_ID_SMU) {
if (((adev->pdev->device == 0x67ef) &&
((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe2) ||
(adev->pdev->revision == 0xe5))) ||
((adev->pdev->device == 0x67ff) &&
((adev->pdev->revision == 0xcf) ||
@@ -359,8 +360,13 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
@@ -375,17 +381,35 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) &&
- (adev->pdev->revision == 0xef))) {
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
- } else
+ } else if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ } else {
strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ } else {
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ }
break;
case CHIP_VEGAM:
strcpy(fw_name, "amdgpu/vegam_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 663043c8f0f5..cf4e190c0a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -50,7 +50,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- p->uf_entry.tv.shared = true;
+ /* One for TTM and one for the CS job */
+ p->uf_entry.tv.num_shared = 2;
p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj);
@@ -124,14 +125,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_chunk;
}
+ mutex_lock(&p->ctx->lock);
+
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
@@ -598,6 +599,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ /* One for TTM and one for the CS job */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->tv.num_shared = 2;
+
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
@@ -717,8 +722,14 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- amdgpu_bo_list_for_each_entry(e, p->bo_list)
- e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
+
+ /* Make sure we use the exclusive slot for shared BOs */
+ if (bo->prime_shared_count)
+ e->tv.num_shared = 0;
+ e->bo_va = amdgpu_vm_bo_find(vm, bo);
+ }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
@@ -955,10 +966,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
- if (r)
- return r;
-
p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
if (amdgpu_vm_debug) {
@@ -1104,7 +1111,7 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
{
int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, &fence);
+ r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
if (r)
return r;
@@ -1193,7 +1200,7 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], 0, p->fence);
+ drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
@@ -1260,8 +1267,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
return 0;
error_abort:
- dma_fence_put(&job->base.s_fence->finished);
- job->base.s_fence = NULL;
+ drm_sched_job_cleanup(&job->base);
amdgpu_mn_unlock(p->mn);
error_unlock:
@@ -1285,7 +1291,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, data);
if (r) {
- DRM_ERROR("Failed to initialize parser !\n");
+ DRM_ERROR("Failed to initialize parser %d!\n", r);
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644
index 000000000000..7e22be7ca68a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+ uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
+
+ addr -= AMDGPU_VA_RESERVED_SIZE;
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size)
+{
+ int r;
+ void *ptr;
+
+ r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ domain, bo,
+ NULL, &ptr);
+ if (!*bo)
+ return -ENOMEM;
+
+ memset(ptr, 0, size);
+ return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+ amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size)
+{
+ struct ww_acquire_ctx ticket;
+ struct list_head list;
+ struct amdgpu_bo_list_entry pd;
+ struct ttm_validate_buffer csa_tv;
+ int r;
+
+ INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&csa_tv.head);
+ csa_tv.bo = &bo->tbo;
+ csa_tv.num_shared = 1;
+
+ list_add(&csa_tv.head, &list);
+ amdgpu_vm_get_pd_bo(vm, &list, &pd);
+
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ if (r) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ return r;
+ }
+
+ *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!*bo_va) {
+ ttm_eu_backoff_reservation(&ticket, &list);
+ DRM_ERROR("failed to create bo_va for static CSA\n");
+ return -ENOMEM;
+ }
+
+ r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
+ size);
+ if (r) {
+ DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
+
+ if (r) {
+ DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+ amdgpu_vm_bo_rmv(adev, *bo_va);
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return r;
+ }
+
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644
index 000000000000..524b4437a021
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE (128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f9b54236102d..d85184b5b35c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_UVD_ENC] = 1,
[AMDGPU_HW_IP_VCN_DEC] = 1,
[AMDGPU_HW_IP_VCN_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_JPEG] = 1,
};
static int amdgput_ctx_total_num_entities(void)
@@ -247,7 +248,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 30bc345d6fdf..b60afeade50a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -59,6 +59,8 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
+#include "amdgpu_xgmi.h"
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -513,6 +515,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
{
+
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
@@ -525,15 +528,26 @@ static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
return -EINVAL;
+ amdgpu_asic_init_doorbell_index(adev);
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
+ adev->doorbell_index.max_assignment+1);
if (adev->doorbell.num_doorbells == 0)
return -EINVAL;
+ /* For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_doorbells += 0x400;
+
adev->doorbell.ptr = ioremap(adev->doorbell.base,
adev->doorbell.num_doorbells *
sizeof(u32));
@@ -1656,7 +1670,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_allocate_static_csa(adev);
+ r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
return r;
@@ -1681,7 +1697,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
return r;
- amdgpu_xgmi_add_device(adev);
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
if (amdgpu_sriov_vf(adev))
@@ -1848,6 +1865,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
@@ -1890,7 +1910,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
- amdgpu_free_static_csa(adev);
+ amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
}
@@ -2337,6 +2357,19 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ if (adev->asic_reset_res)
+ DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ adev->asic_reset_res, adev->ddev->unique);
+}
+
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2435,6 +2468,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
amdgpu_device_delay_enable_gfx_off);
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+
adev->gfx.gfx_off_req_count = 1;
adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
@@ -2455,9 +2490,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* doorbell bar mapping */
- amdgpu_device_doorbell_init(adev);
-
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
@@ -2476,6 +2508,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_device_doorbell_init(adev);
+
/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -3148,86 +3183,6 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
return 0;
}
-/**
- * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
- *
- * @adev: amdgpu device pointer
- *
- * attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means succeeded otherwise failed
- */
-static int amdgpu_device_reset(struct amdgpu_device *adev)
-{
- bool need_full_reset, vram_lost = 0;
- int r;
-
- need_full_reset = amdgpu_device_ip_need_full_reset(adev);
-
- if (!need_full_reset) {
- amdgpu_device_ip_pre_soft_reset(adev);
- r = amdgpu_device_ip_soft_reset(adev);
- amdgpu_device_ip_post_soft_reset(adev);
- if (r || amdgpu_device_ip_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
- }
- }
-
- if (need_full_reset) {
- r = amdgpu_device_ip_suspend(adev);
-
-retry:
- r = amdgpu_asic_reset(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
-
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_device_ip_resume_phase1(adev);
- if (r)
- goto out;
-
- vram_lost = amdgpu_device_check_vram_lost(adev);
- if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
- atomic_inc(&adev->vram_lost_counter);
- }
-
- r = amdgpu_gtt_mgr_recover(
- &adev->mman.bdev.man[TTM_PL_TT]);
- if (r)
- goto out;
-
- r = amdgpu_device_fw_loading(adev);
- if (r)
- return r;
-
- r = amdgpu_device_ip_resume_phase2(adev);
- if (r)
- goto out;
-
- if (vram_lost)
- amdgpu_device_fill_reset_magic(adev);
- }
- }
-
-out:
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(adev);
- need_full_reset = true;
- goto retry;
- }
- }
-
- if (!r)
- r = amdgpu_device_recover_vram(adev);
-
- return r;
-}
/**
* amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
@@ -3295,40 +3250,46 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
return false;
}
- if (amdgpu_gpu_recovery == 0 || (amdgpu_gpu_recovery == -1 &&
- !amdgpu_sriov_vf(adev))) {
- DRM_INFO("GPU recovery disabled.\n");
- return false;
- }
+ if (amdgpu_gpu_recovery == 0)
+ goto disabled;
- return true;
-}
+ if (amdgpu_sriov_vf(adev))
+ return true;
-/**
- * amdgpu_device_gpu_recover - reset the asic and recover scheduler
- *
- * @adev: amdgpu device pointer
- * @job: which job trigger hang
- *
- * Attempt to reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
- struct amdgpu_job *job)
-{
- int i, r, resched;
+ if (amdgpu_gpu_recovery == -1) {
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA20:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ break;
+ default:
+ goto disabled;
+ }
+ }
- dev_info(adev->dev, "GPU reset begin!\n");
+ return true;
- mutex_lock(&adev->lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->in_gpu_reset = 1;
+disabled:
+ DRM_INFO("GPU recovery disabled.\n");
+ return false;
+}
- /* Block kfd */
- amdgpu_amdkfd_pre_reset(adev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
+{
+ int i, r = 0;
+ bool need_full_reset = *need_full_reset_arg;
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -3348,10 +3309,144 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_fence_driver_force_completion(ring);
}
- if (amdgpu_sriov_vf(adev))
- r = amdgpu_device_reset_sriov(adev, job ? false : true);
- else
- r = amdgpu_device_reset(adev);
+
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
+
+ if (!need_full_reset) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ DRM_INFO("soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
+
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+
+ *need_full_reset_arg = need_full_reset;
+ }
+
+ return r;
+}
+
+static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset = *need_full_reset_arg, vram_lost = false;
+ int r = 0;
+
+ /*
+ * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ r, tmp_adev->ddev->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all PSP resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+ }
+
+
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ if (need_full_reset) {
+ /* post card */
+ if (amdgpu_atom_asic_init(tmp_adev->mode_info.atom_context))
+ DRM_WARN("asic atom init failed!");
+
+ if (!r) {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+ if (vram_lost) {
+ DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&tmp_adev->vram_lost_counter);
+ }
+
+ r = amdgpu_gtt_mgr_recover(
+ &tmp_adev->mman.bdev.man[TTM_PL_TT]);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ }
+ }
+
+
+out:
+ if (!r) {
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = amdgpu_device_ip_suspend(tmp_adev);
+ need_full_reset = true;
+ r = -EAGAIN;
+ goto end;
+ }
+ }
+
+ if (!r)
+ r = amdgpu_device_recover_vram(tmp_adev);
+ else
+ tmp_adev->asic_reset_res = r;
+ }
+
+end:
+ *need_full_reset_arg = need_full_reset;
+ return r;
+}
+
+static void amdgpu_device_post_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -3363,7 +3458,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* or all rings (in the case @job is NULL)
* after above amdgpu_reset accomplished
*/
- if ((!job || job->base.sched == &ring->sched) && !r)
+ if ((!job || job->base.sched == &ring->sched) && !adev->asic_reset_res)
drm_sched_job_recovery(&ring->sched);
kthread_unpark(ring->sched.thread);
@@ -3373,21 +3468,142 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
drm_helper_resume_force_mode(adev->ddev);
}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
+ adev->asic_reset_res = 0;
+}
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
- } else {
- dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
- }
+static void amdgpu_device_lock_adev(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->lock_reset);
+ atomic_inc(&adev->gpu_reset_counter);
+ adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+}
+static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
+{
/*unlock kfd */
amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu device pointer
+ * @job: which job trigger hang
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
+ bool need_full_reset = false;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct list_head device_list, *device_list_handle = NULL;
+
+ INIT_LIST_HEAD(&device_list);
+
+ dev_info(adev->dev, "GPU reset begin!\n");
+
+ /*
+ * In case of XGMI hive disallow concurrent resets to be triggered
+ * by different nodes. No point also since the one node already executing
+ * reset will also reset all the other nodes in the hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1 &&
+ !mutex_trylock(&hive->hive_lock))
+ return 0;
+
+ /* Start with adev pre asic reset first for soft reset check.*/
+ amdgpu_device_lock_adev(adev);
+ r = amdgpu_device_pre_asic_reset(adev,
+ job,
+ &need_full_reset);
+ if (r) {
+ /*TODO Should we stop ?*/
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev->ddev->unique);
+ adev->asic_reset_res = r;
+ }
+
+ /* Build list of devices to reset */
+ if (need_full_reset && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive) {
+ amdgpu_device_unlock_adev(adev);
+ return -ENODEV;
+ }
+
+ /*
+ * In case we are in XGMI hive mode device reset is done for all the
+ * nodes in the hive to retrain all XGMI links and hence the reset
+ * sequence is executed in loop on all nodes.
+ */
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+
+ if (tmp_adev == adev)
+ continue;
+
+ amdgpu_device_lock_adev(tmp_adev);
+ r = amdgpu_device_pre_asic_reset(tmp_adev,
+ NULL,
+ &need_full_reset);
+ /*TODO Should we stop ?*/
+ if (r) {
+ DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, tmp_adev->ddev->unique);
+ tmp_adev->asic_reset_res = r;
+ }
+ }
+
+ /* Actual ASIC resets if needed.*/
+ /* TODO Implement XGMI hive reset logic for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_reset_sriov(adev, job ? false : true);
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_device_post_asic_reset(tmp_adev, tmp_adev == adev ? job : NULL);
+
+ if (r) {
+ /* bad news, how to tell it to userspace ? */
+ dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
+ amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter));
+ }
+
+ amdgpu_device_unlock_adev(tmp_adev);
+ }
+
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ mutex_unlock(&hive->hive_lock);
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 686a26de50f9..15ce7e681d67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -631,6 +631,11 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
if (!adev->mode_info.max_bpc_property)
return -ENOMEM;
+ adev->mode_info.abm_level_property =
+ drm_property_create_range(adev->ddev, 0,
+ "abm level", 0, 4);
+ if (!adev->mode_info.abm_level_property)
+ return -ENOMEM;
}
return 0;
@@ -857,7 +862,12 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
- *vpos = *vpos - vtotal;
+
+ /* With variable refresh rate displays the vpos can exceed
+ * the vtotal value. Clamp to 0 to return -vbl_end instead
+ * of guessing the remaining number of lines until scanout.
+ */
+ *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 000000000000..be620b29f4aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+ /* doorbell mmio */
+ resource_size_t base;
+ resource_size_t size;
+ u32 __iomem *ptr;
+ u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+ uint32_t kiq;
+ uint32_t mec_ring0;
+ uint32_t mec_ring1;
+ uint32_t mec_ring2;
+ uint32_t mec_ring3;
+ uint32_t mec_ring4;
+ uint32_t mec_ring5;
+ uint32_t mec_ring6;
+ uint32_t mec_ring7;
+ uint32_t userqueue_start;
+ uint32_t userqueue_end;
+ uint32_t gfx_ring0;
+ uint32_t sdma_engine0;
+ uint32_t sdma_engine1;
+ uint32_t sdma_engine2;
+ uint32_t sdma_engine3;
+ uint32_t sdma_engine4;
+ uint32_t sdma_engine5;
+ uint32_t sdma_engine6;
+ uint32_t sdma_engine7;
+ uint32_t ih;
+ union {
+ struct {
+ uint32_t vcn_ring0_1;
+ uint32_t vcn_ring2_3;
+ uint32_t vcn_ring4_5;
+ uint32_t vcn_ring6_7;
+ } vcn;
+ struct {
+ uint32_t uvd_ring0_1;
+ uint32_t uvd_ring2_3;
+ uint32_t uvd_ring4_5;
+ uint32_t uvd_ring6_7;
+ uint32_t vce_ring0_1;
+ uint32_t vce_ring2_3;
+ uint32_t vce_ring4_5;
+ uint32_t vce_ring6_7;
+ } uvd_vce;
+ };
+ uint32_t max_assignment;
+};
+
+typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
+{
+ AMDGPU_DOORBELL_KIQ = 0x000,
+ AMDGPU_DOORBELL_HIQ = 0x001,
+ AMDGPU_DOORBELL_DIQ = 0x002,
+ AMDGPU_DOORBELL_MEC_RING0 = 0x010,
+ AMDGPU_DOORBELL_MEC_RING1 = 0x011,
+ AMDGPU_DOORBELL_MEC_RING2 = 0x012,
+ AMDGPU_DOORBELL_MEC_RING3 = 0x013,
+ AMDGPU_DOORBELL_MEC_RING4 = 0x014,
+ AMDGPU_DOORBELL_MEC_RING5 = 0x015,
+ AMDGPU_DOORBELL_MEC_RING6 = 0x016,
+ AMDGPU_DOORBELL_MEC_RING7 = 0x017,
+ AMDGPU_DOORBELL_GFX_RING0 = 0x020,
+ AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
+ AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
+ AMDGPU_DOORBELL_IH = 0x1E8,
+ AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
+ AMDGPU_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_DOORBELL_ASSIGNMENT;
+
+typedef enum _AMDGPU_VEGA20_DOORBELL_ASSIGNMENT
+{
+ /* Compute + GFX: 0~255 */
+ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
+ AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
+ AMDGPU_VEGA20_DOORBELL_DIQ = 0x002,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B,
+ /* SDMA:256~335*/
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146,
+ /* IH: 376~391 */
+ AMDGPU_VEGA20_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x18F,
+ AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
+} AMDGPU_VEGA20_DOORBELL_ASSIGNMENT;
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
+{
+ /*
+ * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+ * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+ * Compute related doorbells are allocated from 0x00 to 0x8a
+ */
+
+
+ /* kernel scheduling */
+ AMDGPU_DOORBELL64_KIQ = 0x00,
+
+ /* HSA interface queue and debug queue */
+ AMDGPU_DOORBELL64_HIQ = 0x01,
+ AMDGPU_DOORBELL64_DIQ = 0x02,
+
+ /* Compute engines */
+ AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
+ AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
+ AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
+ AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
+ AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
+ AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
+ AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
+ AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
+
+ /* User queue doorbell range (128 doorbells) */
+ AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
+ AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
+
+ /* Graphics engine */
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+ AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
+
+ /* VCN engine use 32 bits doorbell */
+ AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
+ AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
+ AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
+
+ /* overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
+
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+
+ AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
+ AMDGPU_DOORBELL64_INVALID = 0xFFFF
+} AMDGPU_DOORBELL64_ASSIGNMENT;
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8de55f7f1a3a..9c77eaa45982 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -454,9 +454,10 @@ module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
/**
* DOC: param_buf_per_se (int)
- * Override the size of Off-Chip Pramater Cache per Shader Engine in Byte. The default is 0 (depending on gfx).
+ * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte.
+ * The default is 0 (depending on gfx).
*/
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
+MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)");
module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
/**
@@ -872,7 +873,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Vega 12 */
{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
@@ -885,6 +892,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
/* Raven */
@@ -1220,9 +1228,6 @@ static struct drm_driver kms_driver = {
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
-
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
@@ -1252,16 +1257,14 @@ static int __init amdgpu_init(void)
goto error_fence;
DRM_INFO("amdgpu kernel modesetting enabled.\n");
- driver = &kms_driver;
- pdriver = &amdgpu_kms_pci_driver;
- driver->num_ioctls = amdgpu_max_kms_ioctl;
+ kms_driver.num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
amdgpu_amdkfd_init();
/* let modprobe override vga console setting */
- return pci_register_driver(pdriver);
+ return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
amdgpu_sync_fini();
@@ -1273,7 +1276,7 @@ error_sync:
static void __exit amdgpu_exit(void)
{
amdgpu_amdkfd_fini();
- pci_unregister_driver(pdriver);
+ pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
amdgpu_fence_slab_fini();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5448cf27654e..ee47c11e92ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -398,9 +398,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
- dev_dbg(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
- "cpu addr 0x%p\n", ring->idx,
- ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+ DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
+ "0x%016llx, cpu addr 0x%p\n", ring->name,
+ ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 11fea28f8ad3..6d11e1721147 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -248,7 +248,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
}
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
return 0;
}
@@ -259,6 +259,8 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
*
* Map the dma_addresses into GART entries (all asics).
* Returns 0 for success, -EINVAL for failure.
@@ -331,7 +333,7 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
mb();
amdgpu_asic_flush_hdp(adev, NULL);
- amdgpu_gmc_flush_gpu_tlb(adev, 0);
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 9ff62887e4e3..afa2e2877d87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -41,6 +41,7 @@ struct amdgpu_bo;
struct amdgpu_gart {
struct amdgpu_bo *bo;
+ /* CPU kmapped address of gart table */
void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7b3d1ebda9df..f4f00217546e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -169,7 +169,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -604,7 +604,10 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
tv.bo = &abo->tbo;
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ tv.num_shared = 1;
+ else
+ tv.num_shared = 0;
list_add(&tv.head, &list);
} else {
gobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index d63daba9b17c..f1ddfc50bcc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -54,6 +54,8 @@ void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
/*
* GEM objects.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 1a656b8657f7..97a60da62004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -25,6 +25,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
@@ -249,7 +250,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_KIQ;
+ ring->doorbell_index = adev->doorbell_index.kiq;
r = amdgpu_gfx_kiq_acquire(adev, ring);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b61b5c11aead..f790e15bcd08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -29,6 +29,7 @@
*/
#include "clearstate_defs.h"
#include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -37,59 +38,6 @@
#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
-
-struct amdgpu_rlc_funcs {
- void (*enter_safe_mode)(struct amdgpu_device *adev);
- void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
- /* for power gating */
- struct amdgpu_bo *save_restore_obj;
- uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
- const u32 *reg_list;
- u32 reg_list_size;
- /* for clear state */
- struct amdgpu_bo *clear_state_obj;
- uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
- const struct cs_section_def *cs_data;
- u32 clear_state_size;
- /* for cp tables */
- struct amdgpu_bo *cp_table_obj;
- uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
- u32 cp_table_size;
-
- /* safe mode for updating CG/PG state */
- bool in_safe_mode;
- const struct amdgpu_rlc_funcs *funcs;
-
- /* for firmware data */
- u32 save_and_restore_offset;
- u32 clear_state_descriptor_offset;
- u32 avail_scratch_ram_locations;
- u32 reg_restore_list_size;
- u32 reg_list_format_start;
- u32 reg_list_format_separate_start;
- u32 starting_offsets_start;
- u32 reg_list_format_size_bytes;
- u32 reg_list_size_bytes;
- u32 reg_list_format_direct_reg_list_length;
- u32 save_restore_list_cntl_size_bytes;
- u32 save_restore_list_gpm_size_bytes;
- u32 save_restore_list_srm_size_bytes;
-
- u32 *register_list_format;
- u32 *register_restore;
- u8 *save_restore_list_cntl;
- u8 *save_restore_list_gpm;
- u8 *save_restore_list_srm;
-
- bool is_rlc_v2_1;
-};
-
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
struct amdgpu_mec {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6fa7ef446e46..81e6070d255b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -64,7 +64,7 @@ struct amdgpu_vmhub {
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid);
+ uint32_t vmid, uint32_t flush_type);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
@@ -89,7 +89,7 @@ struct amdgpu_gmc_funcs {
struct amdgpu_xgmi {
/* from psp */
- u64 device_id;
+ u64 node_id;
u64 hive_id;
/* fixed per family */
u64 node_segment_size;
@@ -99,6 +99,7 @@ struct amdgpu_xgmi {
unsigned num_physical_nodes;
/* gpu list in the same hive */
struct list_head head;
+ bool supported;
};
struct amdgpu_gmc {
@@ -151,7 +152,7 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
};
-#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
+#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index b8963b725dfa..c48207b377bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -146,7 +146,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_ctx = 0;
}
- if (!ring->ready) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
@@ -221,8 +221,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
- amdgpu_ring_emit_ib(ring, ib, job ? job->vmid : 0,
- need_ctx_switch);
+ amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch);
need_ctx_switch = false;
}
@@ -347,19 +346,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
}
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
long tmo;
- if (!ring || !ring->ready)
- continue;
-
- /* skip IB tests for KIQ in general for the below reasons:
- * 1. We never submit IBs to the KIQ
- * 2. KIQ doesn't use the EOP interrupts,
- * we use some other CP interrupt.
+ /* KIQ rings don't have an IB test because we never submit IBs
+ * to them and they have no interrupt support.
*/
- if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ if (!ring->sched.ready || !ring->funcs->test_ib)
continue;
/* MM engine need more time */
@@ -374,20 +368,23 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
tmo = tmo_gfx;
r = amdgpu_ring_test_ib(ring, tmo);
- if (r) {
- ring->ready = false;
-
- if (ring == &adev->gfx.gfx_ring[0]) {
- /* oh, oh, that's really bad */
- DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
- adev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
- ret = r;
- }
+ if (!r) {
+ DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+ ring->name);
+ continue;
+ }
+
+ ring->sched.ready = false;
+ DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+ ring->name, r);
+
+ if (ring == &adev->gfx.gfx_ring[0]) {
+ /* oh, oh, that's really bad */
+ adev->accel_working = false;
+ return r;
+
+ } else {
+ ret = r;
}
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 9ce8c93ec19b..f877bb78d10a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -51,14 +51,12 @@ struct amdgpu_ih_ring {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
- bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
};
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 52c17f6219a7..b7968f426862 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -94,23 +94,6 @@ static void amdgpu_hotplug_work_func(struct work_struct *work)
}
/**
- * amdgpu_irq_reset_work_func - execute GPU reset
- *
- * @work: work struct pointer
- *
- * Execute scheduled GPU reset (Cayman+).
- * This function is called when the IRQ handler thinks we need a GPU reset.
- */
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
- struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- reset_work);
-
- if (!amdgpu_sriov_vf(adev) && amdgpu_device_should_recover_gpu(adev))
- amdgpu_device_gpu_recover(adev, NULL);
-}
-
-/**
* amdgpu_irq_disable_all - disable *all* interrupts
*
* @adev: amdgpu device pointer
@@ -162,13 +145,6 @@ static void amdgpu_irq_callback(struct amdgpu_device *adev,
u32 ring_index = ih->rptr >> 2;
struct amdgpu_iv_entry entry;
- /* Prescreening of high-frequency interrupts */
- if (!amdgpu_ih_prescreen_iv(adev))
- return;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]);
-
entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
amdgpu_ih_decode_iv(adev, &entry);
@@ -262,15 +238,12 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
amdgpu_hotplug_work_func);
}
- INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
-
adev->irq.installed = true;
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
return r;
}
adev->ddev->max_vblank_count = 0x00ffffff;
@@ -299,7 +272,6 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
pci_disable_msi(adev->pdev);
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
}
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
@@ -392,39 +364,38 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
unsigned client_id = entry->client_id;
unsigned src_id = entry->src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
trace_amdgpu_iv(entry);
if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
- if (adev->irq.virq[src_id]) {
+ } else if (adev->irq.virq[src_id]) {
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ } else if (!adev->irq.client[client_id].sources) {
+ DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
r = src->funcs->process(adev, src, entry);
- if (r)
+ if (r < 0)
DRM_ERROR("error processing interrupt (%d)\n", r);
+ else if (r)
+ handled = true;
+
+ } else {
+ DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry->iv_entry);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 755f733bf0d9..e0af44fd6a0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -112,6 +112,8 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ drm_sched_job_cleanup(s_job);
+
amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 57cfe78a262b..e1b46a6703de 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -33,6 +33,8 @@
#define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base)
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
struct amdgpu_fence;
struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 81732a84c2ab..bc62bf41b7e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -336,7 +336,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- if (adev->gfx.gfx_ring[i].ready)
+ if (adev->gfx.gfx_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -344,7 +344,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- if (adev->gfx.compute_ring[i].ready)
+ if (adev->gfx.compute_ring[i].sched.ready)
++num_rings;
ib_start_alignment = 32;
ib_size_alignment = 32;
@@ -352,7 +352,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (adev->sdma.instance[i].ring.ready)
+ if (adev->sdma.instance[i].ring.sched.ready)
++num_rings;
ib_start_alignment = 256;
ib_size_alignment = 4;
@@ -363,7 +363,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
if (adev->uvd.harvest_config & (1 << i))
continue;
- if (adev->uvd.inst[i].ring.ready)
+ if (adev->uvd.inst[i].ring.sched.ready)
++num_rings;
}
ib_start_alignment = 64;
@@ -372,7 +372,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- if (adev->vce.ring[i].ready)
+ if (adev->vce.ring[i].sched.ready)
++num_rings;
ib_start_alignment = 4;
ib_size_alignment = 1;
@@ -384,7 +384,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- if (adev->uvd.inst[i].ring_enc[j].ready)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready)
++num_rings;
}
ib_start_alignment = 64;
@@ -392,7 +392,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_dec.ready)
+ if (adev->vcn.ring_dec.sched.ready)
++num_rings;
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -400,14 +400,14 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- if (adev->vcn.ring_enc[i].ready)
+ if (adev->vcn.ring_enc[i].sched.ready)
++num_rings;
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
- if (adev->vcn.ring_jpeg.ready)
+ if (adev->vcn.ring_jpeg.sched.ready)
++num_rings;
ib_start_alignment = 16;
ib_size_alignment = 16;
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
- /* Ensure IB tests are run on ring */
- flush_delayed_work(&adev->late_init_work);
-
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
struct amdgpu_fpriv *fpriv;
int r, pasid;
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->late_init_work);
+
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+ &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
if (r)
goto error_vm;
}
@@ -1048,8 +1051,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
- amdgpu_vm_fini(adev, &fpriv->vm);
amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index d1b4d9b6aae0..aadd0fa42e43 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -38,7 +38,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_fb_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
@@ -57,7 +56,6 @@ struct amdgpu_hpd;
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_amdgpu_plane(x) container_of(x, struct amdgpu_plane, base)
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
@@ -295,13 +293,6 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- /* it is used to enter or exit into free sync mode */
- int (*notify_freesync)(struct drm_device *dev, void *data,
- struct drm_file *filp);
- /* it is used to allow enablement of freesync mode */
- int (*set_freesync_property)(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val);
};
@@ -325,7 +316,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
- struct amdgpu_plane *planes[AMDGPU_MAX_PLANES];
+ struct drm_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -341,6 +332,8 @@ struct amdgpu_mode_info {
struct drm_property *dither_property;
/* maximum number of bits per channel for monitor color */
struct drm_property *max_bpc_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
int bios_hardcoded_edid_size;
@@ -436,11 +429,6 @@ struct amdgpu_crtc {
struct drm_pending_vblank_event *event;
};
-struct amdgpu_plane {
- struct drm_plane base;
- enum drm_plane_type plane_type;
-};
-
struct amdgpu_encoder_atom_dig {
bool linkb;
/* atom dig */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 904014dc5915..fd271f9746a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -81,7 +81,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
- amdgpu_amdkfd_unreserve_system_memory_limit(bo);
+ amdgpu_amdkfd_unreserve_memory_limit(bo);
amdgpu_bo_kunmap(bo);
@@ -608,53 +608,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_backup_to_shadow - Backs up an &amdgpu_bo buffer object
- * @adev: amdgpu device object
- * @ring: amdgpu_ring for the engine handling the buffer operations
- * @bo: &amdgpu_bo buffer to be backed up
- * @resv: reservation object with embedded fence
- * @fence: dma_fence associated with the operation
- * @direct: whether to submit the job directly
- *
- * Copies an &amdgpu_bo buffer object to its shadow object.
- * Not used for now.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct, false);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-/**
* amdgpu_bo_validate - validate an &amdgpu_bo buffer object
* @bo: pointer to the buffer object
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 7d3312d0da11..9291c2f837e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -267,11 +267,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 59cc678de8c1..1f61ed95727c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -33,6 +33,8 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/nospec.h>
+#include "hwmgr.h"
+#define WIDTH_4K 3840
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -1642,6 +1644,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
+ /* Skip fan attributes on APU */
+ if ((adev->flags & AMD_IS_APU) &&
+ (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
+ return 0;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
@@ -1956,6 +1971,17 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
mutex_unlock(&adev->pm.mutex);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K) {
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+
+ if (hwmgr && hwmgr->hwmgr_func &&
+ hwmgr->hwmgr_func->update_nbdpm_pstate)
+ hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
+ !enable,
+ true);
+ }
}
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
@@ -2129,7 +2155,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
+ if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index e45e929aaab5..71913a18d142 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -39,8 +39,6 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
-static const struct dma_buf_ops amdgpu_dmabuf_ops;
-
/**
* amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table
* implementation
@@ -332,15 +330,13 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
}
-static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
- .map = drm_gem_dmabuf_kmap,
- .unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 25d2f3e757f1..6759d898b3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -90,6 +90,8 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ release_firmware(adev->psp.ta_fw);
+ adev->psp.ta_fw = NULL;
return 0;
}
@@ -118,21 +120,25 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
- int index)
+ struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
+ int index;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+ index = atomic_inc_return(&psp->fence_value);
ret = psp_cmd_submit(psp, ucode, psp->cmd_buf_mc_addr,
fence_mc_addr, index);
+ if (ret) {
+ atomic_dec(&psp->fence_value);
+ return ret;
+ }
- while (*((unsigned int *)psp->fence_buf) != index) {
+ while (*((unsigned int *)psp->fence_buf) != index)
msleep(1);
- }
/* the status field must be 0 after FW is loaded */
if (ucode && psp->cmd_buf_mem->resp.status) {
@@ -149,10 +155,22 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+bool psp_support_vmr_ring(struct psp_context *psp)
+{
+ if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
+ return true;
+ else
+ return false;
+}
+
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ if (psp_support_vmr_ring(psp))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
@@ -186,12 +204,12 @@ static int psp_tmr_load(struct psp_context *psp)
if (!cmd)
return -ENOMEM;
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, PSP_TMR_SIZE);
DRM_INFO("reserve 0x%x from 0x%llx for PSP TMR SIZE\n",
PSP_TMR_SIZE, psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 1);
+ psp->fence_buf_mc_addr);
if (ret)
goto failed;
@@ -258,13 +276,194 @@ static int psp_asd_load(struct psp_context *psp)
psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 2);
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared,
+ uint32_t xgmi_ta_size, uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for xgmi ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->xgmi_context.xgmi_shared_bo,
+ &psp->xgmi_context.xgmi_shared_mc_addr,
+ &psp->xgmi_context.xgmi_shared_buf);
+
+ return ret;
+}
+
+static int psp_xgmi_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+
+ psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->xgmi_context.xgmi_shared_mc_addr,
+ psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->xgmi_context.initialized = 1;
+ psp->xgmi_context.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t xgmi_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id;
+}
+
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
kfree(cmd);
return ret;
}
+static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t xgmi_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->xgmi_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_xgmi_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->xgmi_context.initialized)
+ return 0;
+
+ ret = psp_xgmi_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->xgmi_context.initialized = 0;
+
+ /* free xgmi shared memory */
+ amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
+ &psp->xgmi_context.xgmi_shared_mc_addr,
+ &psp->xgmi_context.xgmi_shared_buf);
+
+ return 0;
+}
+
+static int psp_xgmi_initialize(struct psp_context *psp)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ if (!psp->xgmi_context.initialized) {
+ ret = psp_xgmi_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* Load XGMI TA */
+ ret = psp_xgmi_load(psp);
+ if (ret)
+ return ret;
+
+ /* Initialize XGMI session */
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -292,6 +491,15 @@ static int psp_hw_start(struct psp_context *psp)
if (ret)
return ret;
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
return 0;
}
@@ -321,7 +529,7 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret;
ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
- psp->fence_buf_mc_addr, i + 3);
+ psp->fence_buf_mc_addr);
if (ret)
return ret;
@@ -340,8 +548,10 @@ static int psp_load_fw(struct amdgpu_device *adev)
int ret;
struct psp_context *psp = &adev->psp;
- if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset != 0)
+ if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) {
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
goto skip_memalloc;
+ }
psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
if (!psp->cmd)
@@ -452,6 +662,10 @@ static int psp_hw_fini(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.initialized == 1)
+ psp_xgmi_terminate(psp);
+
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -479,6 +693,15 @@ static int psp_suspend(void *handle)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
return 0;
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.initialized == 1) {
+ ret = psp_xgmi_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate xgmi ta\n");
+ return ret;
+ }
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8b8720e9c3f0..10decf70c9aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -27,14 +27,17 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_ASD_SHARED_MEM_SIZE 0x4000
+#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
struct psp_context;
+struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
enum psp_ring_type
@@ -80,12 +83,20 @@ struct psp_funcs
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- uint64_t (*xgmi_get_device_id)(struct psp_context *psp);
+ uint64_t (*xgmi_get_node_id)(struct psp_context *psp);
uint64_t (*xgmi_get_hive_id)(struct psp_context *psp);
int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology);
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
+ struct psp_xgmi_topology_info *topology);
+};
+
+struct psp_xgmi_context {
+ uint8_t initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *xgmi_shared_bo;
+ uint64_t xgmi_shared_mc_addr;
+ void *xgmi_shared_buf;
};
struct psp_context
@@ -96,7 +107,7 @@ struct psp_context
const struct psp_funcs *funcs;
- /* fence buffer */
+ /* firmware buffer */
struct amdgpu_bo *fw_pri_bo;
uint64_t fw_pri_mc_addr;
void *fw_pri_buf;
@@ -134,6 +145,16 @@ struct psp_context
struct amdgpu_bo *cmd_buf_bo;
uint64_t cmd_buf_mc_addr;
struct psp_gfx_cmd_resp *cmd_buf_mem;
+
+ /* fence value associated with cmd buffer */
+ atomic_t fence_value;
+
+ /* xgmi ta firmware and buffer */
+ const struct firmware *ta_fw;
+ uint32_t ta_xgmi_ucode_version;
+ uint32_t ta_xgmi_ucode_size;
+ uint8_t *ta_xgmi_start_addr;
+ struct psp_xgmi_context xgmi_context;
};
struct amdgpu_psp_funcs {
@@ -141,21 +162,17 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID);
};
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
+struct psp_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
struct psp_xgmi_topology_info {
- /* Generated by PSP to identify the GPU instance within xgmi connection */
- uint64_t device_id;
- /*
- * If all bits set to 0 , driver indicates it wants to retrieve the xgmi
- * connection vector topology, but not access enable the connections
- * if some or all bits are set to 1, driver indicates it want to retrieve the
- * current xgmi topology and access enable the link to GPU[i] associated
- * with the bit position in the vector.
- * On return,: bits indicated which xgmi links are present/active depending
- * on the value passed in. The relative bit offset for the relative GPU index
- * within the hive is always marked active.
- */
- uint32_t connection_mask;
- uint32_t reserved; /* must be 0 */
+ uint32_t num_nodes;
+ struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
#define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
@@ -177,8 +194,8 @@ struct psp_xgmi_topology_info {
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_device_id(psp) \
- ((psp)->funcs->xgmi_get_device_id ? (psp)->funcs->xgmi_get_device_id((psp)) : 0)
+#define psp_xgmi_get_node_id(psp) \
+ ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp)) : 0)
#define psp_xgmi_get_hive_id(psp) \
((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp)) : 0)
#define psp_xgmi_get_topology_info(psp, num_device, topology) \
@@ -199,6 +216,9 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+bool psp_support_vmr_ring(struct psp_context *psp);
+
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index b70e85ec147d..335a0edf114b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -338,7 +338,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->ready = false;
+ ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
@@ -397,7 +397,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
{
ktime_t deadline = ktime_add_us(ktime_get(), 10000);
- if (!ring->funcs->soft_recovery)
+ if (!ring->funcs->soft_recovery || !fence)
return false;
atomic_inc(&ring->adev->gpu_reset_counter);
@@ -500,3 +500,29 @@ static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
debugfs_remove(ring->ent);
#endif
}
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+ ring->name, r);
+ else
+ DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+ ring->name);
+
+ ring->sched.ready = !r;
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 4caa301ce454..0beb01fef83f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -129,8 +129,9 @@ struct amdgpu_ring_funcs {
unsigned emit_ib_size;
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch);
+ bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@@ -189,7 +190,6 @@ struct amdgpu_ring {
uint64_t gpu_addr;
uint64_t ptr_mask;
uint32_t buf_mask;
- bool ready;
u32 idx;
u32 me;
u32 pipe;
@@ -229,7 +229,7 @@ struct amdgpu_ring {
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
+#define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c)))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@@ -313,4 +313,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
ring->count_dw -= count_dw;
}
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644
index 000000000000..c8793e6cc3c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
+{
+ if (adev->gfx.rlc.in_safe_mode)
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->set_safe_mode(adev);
+ adev->gfx.rlc.in_safe_mode = true;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
+{
+ if (!(adev->gfx.rlc.in_safe_mode))
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->unset_safe_mode(adev);
+ adev->gfx.rlc.in_safe_mode = false;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+ const u32 *src_ptr;
+ volatile u32 *dst_ptr;
+ u32 i;
+ int r;
+
+ /* allocate save restore block */
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* write the sr buffer */
+ src_ptr = adev->gfx.rlc.reg_list;
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+ volatile u32 *dst_ptr;
+ u32 dws;
+ int r;
+
+ /* allocate clear state block */
+ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cs buffer */
+ dst_ptr = adev->gfx.rlc.cs_ptr;
+ adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cp table */
+ amdgpu_gfx_rlc_setup_cp_table(adev);
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i ++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ }
+
+ /* clear state block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+
+ /* jump table block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644
index 000000000000..49a8ab52113b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+struct amdgpu_rlc_funcs {
+ bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+ void (*set_safe_mode)(struct amdgpu_device *adev);
+ void (*unset_safe_mode)(struct amdgpu_device *adev);
+ int (*init)(struct amdgpu_device *adev);
+ u32 (*get_csb_size)(struct amdgpu_device *adev);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ int (*get_cp_table_num)(struct amdgpu_device *adev);
+ int (*resume)(struct amdgpu_device *adev);
+ void (*stop)(struct amdgpu_device *adev);
+ void (*reset)(struct amdgpu_device *adev);
+ void (*start)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_rlc {
+ /* for power gating */
+ struct amdgpu_bo *save_restore_obj;
+ uint64_t save_restore_gpu_addr;
+ volatile uint32_t *sr_ptr;
+ const u32 *reg_list;
+ u32 reg_list_size;
+ /* for clear state */
+ struct amdgpu_bo *clear_state_obj;
+ uint64_t clear_state_gpu_addr;
+ volatile uint32_t *cs_ptr;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct amdgpu_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+ bool in_safe_mode;
+ const struct amdgpu_rlc_funcs *funcs;
+
+ /* for firmware data */
+ u32 save_and_restore_offset;
+ u32 clear_state_descriptor_offset;
+ u32 avail_scratch_ram_locations;
+ u32 reg_restore_list_size;
+ u32 reg_list_format_start;
+ u32 reg_list_format_separate_start;
+ u32 starting_offsets_start;
+ u32 reg_list_format_size_bytes;
+ u32 reg_list_size_bytes;
+ u32 reg_list_format_direct_reg_list_length;
+ u32 save_restore_list_cntl_size_bytes;
+ u32 save_restore_list_gpm_size_bytes;
+ u32 save_restore_list_srm_size_bytes;
+
+ u32 *register_list_format;
+ u32 *register_restore;
+ u8 *save_restore_list_cntl;
+ u8 *save_restore_list_gpm;
+ u8 *save_restore_list_srm;
+
+ bool is_rlc_v2_1;
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index bc9244b429ef..115bb0c99b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -28,17 +28,31 @@
* GPU SDMA IP block helpers function.
*/
-struct amdgpu_sdma_instance * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
- if (&adev->sdma.instance[i].ring == ring)
- break;
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page)
+ return &adev->sdma.instance[i];
- if (i < AMDGPU_MAX_SDMA_INSTANCES)
- return &adev->sdma.instance[i];
- else
- return NULL;
+ return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page) {
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 500113ec65ca..16b1a6ae5ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -41,6 +41,7 @@ struct amdgpu_sdma_instance {
uint32_t feature_version;
struct amdgpu_ring ring;
+ struct amdgpu_ring page;
bool burst_nop;
};
@@ -50,6 +51,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src illegal_inst_irq;
int num_instances;
uint32_t srbm_soft_reset;
+ bool has_page_queue;
};
/*
@@ -92,6 +94,7 @@ struct amdgpu_buffer_funcs {
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring);
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index e9bf70e2ac51..626abca770a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -218,6 +218,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
__field(u32, pasid)
+ __string(ring, ring->name)
__field(u32, ring)
__field(u32, vmid)
__field(u32, vm_hub)
@@ -227,14 +228,14 @@ TRACE_EVENT(amdgpu_vm_grab_id,
TP_fast_assign(
__entry->pasid = vm->pasid;
- __entry->ring = ring->idx;
+ __assign_str(ring, ring->name)
__entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->pasid, __entry->ring, __entry->vmid,
+ TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->pasid, __get_str(ring), __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
@@ -366,20 +367,20 @@ TRACE_EVENT(amdgpu_vm_flush,
uint64_t pd_addr),
TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry(
- __field(u32, ring)
+ __string(ring, ring->name)
__field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
- __entry->ring = ring->idx;
+ __assign_str(ring, ring->name)
__entry->vmid = vmid;
__entry->vm_hub = ring->funcs->vmhub;
__entry->pd_addr = pd_addr;
),
- TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
- __entry->ring, __entry->vmid,
+ TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+ __get_str(ring), __entry->vmid,
__entry->vm_hub,__entry->pd_addr)
);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a44fc12ae1f9..c91ec3101d00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,100 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-/*
- * Global memory.
- */
-
-/**
- * amdgpu_ttm_mem_global_init - Initialize and acquire reference to
- * memory object
- *
- * @ref: Object for initialization.
- *
- * This is called by drm_global_item_ref() when an object is being
- * initialized.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-/**
- * amdgpu_ttm_mem_global_release - Drop reference to a memory object
- *
- * @ref: Object being removed
- *
- * This is called by drm_global_item_unref() when an object is being
- * released.
- */
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
- *
- * @adev: AMDGPU device for which the global structures need to be registered.
- *
- * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
- * during bring up.
- */
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- /* ensure reference is false in case init fails */
- adev->mman.mem_global_referenced = false;
-
- global_ref = &adev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &amdgpu_ttm_mem_global_init;
- global_ref->release = &amdgpu_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- goto error_mem;
- }
-
- adev->mman.bo_global_ref.mem_glob =
- adev->mman.mem_global_ref.object;
- global_ref = &adev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- goto error_bo;
- }
-
- mutex_init(&adev->mman.gtt_window_lock);
-
- adev->mman.mem_global_referenced = true;
-
- return 0;
-
-error_bo:
- drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
- return r;
-}
-
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
-{
- if (adev->mman.mem_global_referenced) {
- mutex_destroy(&adev->mman.gtt_window_lock);
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
- drm_global_item_unref(&adev->mman.mem_global_ref);
- adev->mman.mem_global_referenced = false;
- }
-}
-
static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -1758,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
int r;
u64 vis_vram_limit;
- /* initialize global references for vram/gtt */
- r = amdgpu_ttm_global_init(adev);
- if (r) {
- return r;
- }
+ mutex_init(&adev->mman.gtt_window_lock);
+
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
- adev->mman.bo_global_ref.ref.object,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -1922,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
- amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -2069,7 +1970,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->ready) {
+ if (direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index fe8f276e9811..b5b2d101f7db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -39,8 +39,6 @@
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index aa6641b944a0..7ac25a1c7853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -58,6 +58,17 @@ struct psp_firmware_header_v1_0 {
};
/* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ta_xgmi_ucode_version;
+ uint32_t ta_xgmi_offset_bytes;
+ uint32_t ta_xgmi_size_bytes;
+ uint32_t ta_ras_ucode_version;
+ uint32_t ta_ras_offset_bytes;
+ uint32_t ta_ras_size_bytes;
+};
+
+/* version_major=1, version_minor=0 */
struct gfx_firmware_header_v1_0 {
struct common_firmware_header header;
uint32_t ucode_feature_version;
@@ -170,6 +181,7 @@ union amdgpu_firmware_header {
struct mc_firmware_header_v1_0 mc;
struct smc_firmware_header_v1_0 smc;
struct psp_firmware_header_v1_0 psp;
+ struct ta_firmware_header_v1_0 ta;
struct gfx_firmware_header_v1_0 gfx;
struct rlc_firmware_header_v1_0 rlc;
struct rlc_firmware_header_v2_0 rlc_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e5a6db6beab7..4e5d13e41f6a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -692,6 +692,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
buf_sizes[0x4] = min_ctx_size;
+ /* store image width to adjust nb memory pstate */
+ adev->uvd.decode_image_width = width;
return 0;
}
@@ -1243,30 +1245,20 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence;
long r;
- uint32_t ip_instance = ring->me;
r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r);
+ if (r)
goto error;
- }
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance);
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r);
- } else {
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index a3ab1a41060f..5eb63288d157 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -65,6 +65,8 @@ struct amdgpu_uvd {
struct drm_sched_entity entity;
struct delayed_work idle_work;
unsigned harvest_config;
+ /* store image width to adjust nb memory state */
+ unsigned decode_image_width;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 5f3f54073818..98a1b2ce2b9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1032,8 +1032,10 @@ out:
* @ib: the IB to execute
*
*/
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1079,11 +1081,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
@@ -1093,14 +1093,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -1121,27 +1115,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
return 0;
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index a1f209eed4c4..50293652af14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -65,8 +65,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 27da13df2f11..e2e42e3fbcf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -425,11 +425,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -441,14 +439,9 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -570,30 +563,20 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
-
error:
return r;
}
@@ -606,11 +589,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vcn enc failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -620,14 +601,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -742,27 +717,19 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
@@ -778,11 +745,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0));
@@ -796,14 +760,8 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
return r;
}
@@ -856,21 +814,18 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r = 0;
r = amdgpu_vcn_jpeg_set_reg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to set jpeg register (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto error;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto error;
- } else
+ } else {
r = 0;
+ }
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9));
@@ -879,15 +834,10 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout)
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- else {
- DRM_ERROR("ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
dma_fence_put(fence);
-
error:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f2f358aa0597..462a04e0f5e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -23,16 +23,6 @@
#include "amdgpu.h"
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
-{
- uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
-
- addr -= AMDGPU_VA_RESERVED_SIZE;
- addr = amdgpu_gmc_sign_extend(addr);
-
- return addr;
-}
-
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -41,88 +31,6 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
return RREG32_NO_KIQ(0xc040) == 0xffffffff;
}
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
- int r;
- void *ptr;
-
- r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr, &ptr);
- if (r)
- return r;
-
- memset(ptr, 0, AMDGPU_CSA_SIZE);
- return 0;
-}
-
-void amdgpu_free_static_csa(struct amdgpu_device *adev) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr,
- NULL);
-}
-
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
- * submission of GFX should use this virtual address within META_DATA init
- * package to support SRIOV gfx preemption.
- */
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va)
-{
- uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
- int r;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
- csa_tv.bo = &adev->virt.csa_obj->tbo;
- csa_tv.shared = true;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
- return r;
- }
-
- *bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!*bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
- DRM_ERROR("failed to create bo_va for static CSA\n");
- return -ENOMEM;
- }
-
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
- AMDGPU_CSA_SIZE);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- ttm_eu_backoff_reservation(&ticket, &list);
- return 0;
-}
-
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
/* enable virtual display */
@@ -162,9 +70,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_read;
- if (in_interrupt())
- might_sleep();
-
+ might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -210,9 +116,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
goto failed_kiq_write;
- if (in_interrupt())
- might_sleep();
-
+ might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
@@ -228,6 +132,46 @@ failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+ ref, mask);
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for IRQ context */
+ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return;
+
+failed_kiq:
+ pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
* @amdgpu: amdgpu device.
@@ -390,7 +334,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
- (struct amdgim_pf2vf_info_header *)(
+ (struct amd_sriov_msg_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 880ac113a3a9..722deefc0a7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -63,8 +63,8 @@ struct amdgpu_virt_ops {
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
- struct amdgim_pf2vf_info_header *p_pf2vf;
- struct amdgim_vf2pf_info_header *p_vf2pf;
+ struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+ struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
@@ -85,15 +85,17 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
};
-struct amdgim_pf2vf_info_header {
+struct amd_sriov_msg_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
@@ -112,7 +114,7 @@ struct amdgim_pf2vf_info_v1 {
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
- struct amdgim_pf2vf_info_header header;
+ struct amd_sriov_msg_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
@@ -137,20 +139,22 @@ struct amdgim_pf2vf_info_v2 {
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
-struct amdgim_vf2pf_info_header {
+struct amd_sriov_msg_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
@@ -180,7 +184,7 @@ struct amdgim_vf2pf_info_v1 {
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
- struct amdgim_vf2pf_info_header header;
+ struct amd_sriov_msg_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
@@ -206,7 +210,7 @@ struct amdgim_vf2pf_info_v2 {
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
- uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
@@ -238,7 +242,6 @@ typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
struct amdgpu_virt {
uint32_t caps;
struct amdgpu_bo *csa_obj;
- uint64_t csa_vmid0_addr;
bool chained_ib_support;
uint32_t reg_val_offs;
struct amdgpu_irq_src ack_irq;
@@ -251,8 +254,6 @@ struct amdgpu_virt {
uint32_t gim_feature;
};
-#define AMDGPU_CSA_SIZE (8 * 1024)
-
#define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@@ -277,17 +278,13 @@ static inline bool is_virtual_machine(void)
#endif
}
-struct amdgpu_vm;
-
-uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_bo_va **bo_va);
-void amdgpu_free_static_csa(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t rreg1,
+ uint32_t ref, uint32_t mask);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dad0e2342df9..e73d152659a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == adev->vm_manager.root_level)
/* For the root directory */
- return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
else if (level != AMDGPU_VM_PTB)
/* Everything in between */
return 512;
@@ -617,7 +617,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- entry->tv.shared = true;
+ /* One for the VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 3;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -773,10 +774,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- return r;
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
@@ -1656,9 +1653,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (!amdgpu_vm_pt_descendant(adev, &cursor))
return -ENOENT;
continue;
- } else if (frag >= parent_shift) {
+ } else if (frag >= parent_shift &&
+ cursor.level - 1 != adev->vm_manager.root_level) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again.
+ * shift we should go up one level and check it again
+ * unless one level up is the root level.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
return -ENOENT;
@@ -1666,10 +1665,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
}
/* Looks good so far, calculate parameters for the update */
- incr = AMDGPU_GPU_PAGE_SIZE << shift;
+ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (mask + 1) << shift;
+ entry_end = (uint64_t)(mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1682,7 +1681,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
flags | AMDGPU_PTE_FRAG(frag));
pe_start += nptes * 8;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+ dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
frag_start = upd_end;
if (frag_start >= frag_end) {
@@ -1842,10 +1841,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
@@ -3026,6 +3021,10 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
goto error_free_root;
+ r = reservation_object_reserve_shared(root->tbo.resv, 1);
+ if (r)
+ goto error_unreserve;
+
r = amdgpu_vm_clear_bo(adev, vm, root,
adev->vm_manager.root_level,
vm->pte_support_ats);
@@ -3055,7 +3054,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
INIT_KFIFO(vm->faults);
- vm->fault_credit = 16;
return 0;
@@ -3268,42 +3266,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
/**
- * amdgpu_vm_pasid_fault_credit - Check fault credit for given PASID
- *
- * @adev: amdgpu_device pointer
- * @pasid: PASID do identify the VM
- *
- * This function is expected to be called in interrupt context.
- *
- * Returns:
- * True if there was fault credit, false otherwise
- */
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid)
-{
- struct amdgpu_vm *vm;
-
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- if (!vm) {
- /* VM not found, can't track fault credit */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- /* No lock needed. only accessed by IRQ handler */
- if (!vm->fault_credit) {
- /* Too many faults in this VM */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- vm->fault_credit--;
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
-}
-
-/**
* amdgpu_vm_manager_init - init the VM manager
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 2a8898d19c8b..e8dcfd59fc93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -229,9 +229,6 @@ struct amdgpu_vm {
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
- /* Limit non-retry fault storms */
- unsigned int fault_credit;
-
/* Points to the KFD process VM info */
struct amdkfd_process_info *process_info;
@@ -299,8 +296,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
- unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 897afbb348c1..0b263a9857c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -23,7 +23,7 @@
*/
#include <linux/list.h>
#include "amdgpu.h"
-#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -31,15 +31,16 @@ static DEFINE_MUTEX(xgmi_mutex);
#define AMDGPU_MAX_XGMI_HIVE 8
#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
-struct amdgpu_hive_info {
- uint64_t hive_id;
- struct list_head device_list;
-};
-
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+
+void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
+{
+ return &hive->device_list;
+}
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
{
int i;
struct amdgpu_hive_info *tmp;
@@ -58,62 +59,99 @@ static struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
tmp = &xgmi_hives[hive_count++];
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
+ mutex_init(&tmp->hive_lock);
+
return tmp;
}
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+ int ret = -EINVAL;
+
+ /* Each psp need to set the latest topology */
+ ret = psp_xgmi_set_topology_info(&adev->psp,
+ hive->number_devices,
+ &hive->topology_info);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ else
+ dev_info(adev->dev, "XGMI: Set topology for node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id,
+ adev->gmc.xgmi.hive_id);
+
+ return ret;
+}
+
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
- struct psp_xgmi_topology_info tmp_topology[AMDGPU_MAX_XGMI_DEVICE_PER_HIVE];
+ struct psp_xgmi_topology_info *hive_topology;
struct amdgpu_hive_info *hive;
struct amdgpu_xgmi *entry;
- struct amdgpu_device *tmp_adev;
+ struct amdgpu_device *tmp_adev = NULL;
int count = 0, ret = -EINVAL;
- if ((adev->asic_type < CHIP_VEGA20) ||
- (adev->flags & AMD_IS_APU) )
+ if (!adev->gmc.xgmi.supported)
return 0;
- adev->gmc.xgmi.device_id = psp_xgmi_get_device_id(&adev->psp);
+
+ adev->gmc.xgmi.node_id = psp_xgmi_get_node_id(&adev->psp);
adev->gmc.xgmi.hive_id = psp_xgmi_get_hive_id(&adev->psp);
- memset(&tmp_topology[0], 0, sizeof(tmp_topology));
mutex_lock(&xgmi_mutex);
hive = amdgpu_get_xgmi_hive(adev);
if (!hive)
goto exit;
+ hive_topology = &hive->topology_info;
+
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
list_for_each_entry(entry, &hive->device_list, head)
- tmp_topology[count++].device_id = entry->device_id;
+ hive_topology->nodes[count++].node_id = entry->node_id;
+ hive->number_devices = count;
- ret = psp_xgmi_get_topology_info(&adev->psp, count, tmp_topology);
- if (ret) {
- dev_err(adev->dev,
- "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
- adev->gmc.xgmi.device_id,
- adev->gmc.xgmi.hive_id, ret);
- goto exit;
- }
- /* Each psp need to set the latest topology */
+ /* Each psp need to get the latest topology */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- ret = psp_xgmi_set_topology_info(&tmp_adev->psp, count, tmp_topology);
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count, hive_topology);
if (ret) {
dev_err(tmp_adev->dev,
- "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
- tmp_adev->gmc.xgmi.device_id,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
tmp_adev->gmc.xgmi.hive_id, ret);
- /* To do : continue with some node failed or disable the whole hive */
+ /* To do : continue with some node failed or disable the whole hive */
break;
}
}
- if (!ret)
- dev_info(adev->dev, "XGMI: Add node %d to hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id,
- adev->gmc.xgmi.hive_id);
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
+ break;
+ }
exit:
mutex_unlock(&xgmi_mutex);
return ret;
}
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ mutex_lock(&xgmi_mutex);
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ goto exit;
+
+ if (!(hive->number_devices--))
+ mutex_destroy(&hive->hive_lock);
+
+exit:
+ mutex_unlock(&xgmi_mutex);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 000000000000..6151eb9c8ad3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include "amdgpu_psp.h"
+
+struct amdgpu_hive_info {
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct psp_xgmi_topology_info topology_info;
+ int number_devices;
+ struct mutex hive_lock;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 79220a91abe3..86e14c754dd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret;
}
}
ci_do_enable_didt(adev, enable);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index f41f5f57e9f3..71c50d8900e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1755,6 +1755,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
.need_full_reset = &cik_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index e49c6f15a0a0..54c625a2e570 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -30,4 +30,5 @@ void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b5775c6a857b..8a8b4967a101 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,34 +228,6 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
-/**
- * cik_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -461,7 +433,6 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
- .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index b918c8886b75..45795191de1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -198,7 +198,7 @@ static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -218,9 +218,11 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
@@ -316,8 +318,8 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -494,18 +496,16 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -618,21 +618,17 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
+
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -647,15 +643,11 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -678,20 +670,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -706,21 +694,16 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -822,7 +805,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1214,8 +1197,11 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index df5ac4d85a00..9d3ea298e116 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,34 +208,6 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * cz_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -442,7 +414,6 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
- .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d76eb27945dc..1dc3013ea1d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1775,18 +1775,15 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_CONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1798,13 +1795,11 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -1845,9 +1840,11 @@ static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
}
static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -1892,17 +1889,15 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
@@ -1914,22 +1909,16 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1950,9 +1939,9 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
CP_ME_CNTL__CE_HALT_MASK));
WREG32(mmSCRATCH_UMSK, 0);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2124,12 +2113,9 @@ static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the rings */
gfx_v6_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
return 0;
}
@@ -2227,14 +2213,11 @@ static int gfx_v6_0_cp_compute_resume(struct amdgpu_device *adev)
WREG32(mmCP_RB2_CNTL, tmp);
WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8);
- adev->gfx.compute_ring[0].ready = false;
- adev->gfx.compute_ring[1].ready = false;
for (i = 0; i < 2; i++) {
- r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]);
+ r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]);
if (r)
return r;
- adev->gfx.compute_ring[i].ready = true;
}
return 0;
@@ -2368,18 +2351,11 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, val);
}
-static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
- u32 dws, i;
+ u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
int r;
@@ -2394,26 +2370,10 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) {
- /* save restore block */
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_obj,
- &adev->gfx.rlc.save_restore_gpu_addr,
- (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
- r);
- gfx_v6_0_rlc_fini(adev);
+ /* init save restore block */
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
+ if (r)
return r;
- }
-
- /* write the sr buffer */
- dst_ptr = adev->gfx.rlc.sr_ptr;
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) {
@@ -2428,7 +2388,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v6_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
return r;
}
@@ -2549,8 +2509,8 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
if (!adev->gfx.rlc_fw)
return -EINVAL;
- gfx_v6_0_rlc_stop(adev);
- gfx_v6_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->stop(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v6_0_init_pg(adev);
gfx_v6_0_init_cg(adev);
@@ -2578,7 +2538,7 @@ static int gfx_v6_0_rlc_resume(struct amdgpu_device *adev)
WREG32(mmRLC_UCODE_ADDR, 0);
gfx_v6_0_enable_lbpw(adev, gfx_v6_0_lbpw_supported(adev));
- gfx_v6_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -3075,6 +3035,14 @@ static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs = {
.select_me_pipe_q = &gfx_v6_0_select_me_pipe_q
};
+static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs = {
+ .init = gfx_v6_0_rlc_init,
+ .resume = gfx_v6_0_rlc_resume,
+ .stop = gfx_v6_0_rlc_stop,
+ .reset = gfx_v6_0_rlc_reset,
+ .start = gfx_v6_0_rlc_start
+};
+
static int gfx_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3082,6 +3050,7 @@ static int gfx_v6_0_early_init(void *handle)
adev->gfx.num_gfx_rings = GFX6_NUM_GFX_RINGS;
adev->gfx.num_compute_rings = GFX6_NUM_COMPUTE_RINGS;
adev->gfx.funcs = &gfx_v6_0_gfx_funcs;
+ adev->gfx.rlc.funcs = &gfx_v6_0_rlc_funcs;
gfx_v6_0_set_ring_funcs(adev);
gfx_v6_0_set_irq_funcs(adev);
@@ -3114,7 +3083,7 @@ static int gfx_v6_0_sw_init(void *handle)
return r;
}
- r = gfx_v6_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -3165,7 +3134,7 @@ static int gfx_v6_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- gfx_v6_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
return 0;
}
@@ -3177,7 +3146,7 @@ static int gfx_v6_0_hw_init(void *handle)
gfx_v6_0_constants_init(adev);
- r = gfx_v6_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3195,7 +3164,7 @@ static int gfx_v6_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gfx_v6_0_cp_enable(adev, false);
- gfx_v6_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v6_0_fini_pg(adev);
return 0;
@@ -3393,12 +3362,31 @@ static int gfx_v6_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v6_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_ring *ring;
+
+ switch (entry->ring_id) {
+ case 0:
+ ring = &adev->gfx.gfx_ring[0];
+ break;
+ case 1:
+ case 2:
+ ring = &adev->gfx.compute_ring[entry->ring_id - 1];
+ break;
+ default:
+ return;
+ }
+ drm_sched_fault(&ring->sched);
+}
+
static int gfx_v6_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v6_0_fault(adev, entry);
return 0;
}
@@ -3407,7 +3395,7 @@ static int gfx_v6_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v6_0_fault(adev, entry);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0e72bc09939a..3a9fb6018c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -2064,17 +2063,14 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -2086,13 +2082,10 @@ static int gfx_v7_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -2233,9 +2226,11 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU.
*/
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
/* insert SWITCH_BUFFER packet before first IB in the ring frame */
@@ -2262,9 +2257,11 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -2316,17 +2313,15 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
ib.ptr[2] = 0xDEADBEEF;
@@ -2338,22 +2333,16 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = RREG32(scratch);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -2403,7 +2392,7 @@ static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2613,12 +2602,9 @@ static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v7_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
return 0;
}
@@ -2675,7 +2661,7 @@ static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -2781,7 +2767,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -3106,10 +3092,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
return 0;
@@ -3268,18 +3251,10 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
* The RLC is a multi-purpose microengine that handles a
* variety of functions.
*/
-static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
-}
-
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
- u32 dws, i;
+ u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -3306,66 +3281,23 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) {
- /* save restore block */
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.save_restore_obj,
- &adev->gfx.rlc.save_restore_gpu_addr,
- (void **)&adev->gfx.rlc.sr_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ /* init save restore block */
+ r = amdgpu_gfx_rlc_init_sr(adev, dws);
+ if (r)
return r;
- }
-
- /* write the sr buffer */
- dst_ptr = adev->gfx.rlc.sr_ptr;
- for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
- dst_ptr[i] = cpu_to_le32(src_ptr[i]);
- amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
-
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
-
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v7_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->gfx.rlc.cp_table_size) {
-
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
- gfx_v7_0_rlc_fini(adev);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- gfx_v7_0_init_cp_pg_table(adev);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
-
}
return 0;
@@ -3446,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig;
}
-static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
+{
+ return true;
+}
+
+static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
{
u32 tmp, i, mask;
@@ -3468,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
}
}
-static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
{
u32 tmp;
@@ -3545,13 +3482,13 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
adev->gfx.rlc_feature_version = le32_to_cpu(
hdr->ucode_feature_version);
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL) & 0xfffffffc;
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
- gfx_v7_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v7_0_init_pg(adev);
@@ -3582,7 +3519,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_BONAIRE)
WREG32(mmRLC_DRIVER_CPDMA_STATUS, 0);
- gfx_v7_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -3784,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
WREG32(mmRLC_PG_CNTL, data);
}
-static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev)
+static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 4;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
if (adev->asic_type == CHIP_KAVERI)
- max_me = 5;
-
- if (adev->gfx.rlc.cp_table_ptr == NULL)
- return;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
+ return 5;
+ else
+ return 4;
}
static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
@@ -4288,8 +4165,17 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
};
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
- .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v7_0_set_safe_mode,
+ .unset_safe_mode = gfx_v7_0_unset_safe_mode,
+ .init = gfx_v7_0_rlc_init,
+ .get_csb_size = gfx_v7_0_get_csb_size,
+ .get_csb_buffer = gfx_v7_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v7_0_cp_pg_table_num,
+ .resume = gfx_v7_0_rlc_resume,
+ .stop = gfx_v7_0_rlc_stop,
+ .reset = gfx_v7_0_rlc_reset,
+ .start = gfx_v7_0_rlc_start
};
static int gfx_v7_0_early_init(void *handle)
@@ -4477,7 +4363,7 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
@@ -4540,7 +4426,7 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
- r = gfx_v7_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -4604,7 +4490,7 @@ static int gfx_v7_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
gfx_v7_0_cp_compute_fini(adev);
- gfx_v7_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
@@ -4627,7 +4513,7 @@ static int gfx_v7_0_hw_init(void *handle)
gfx_v7_0_constants_init(adev);
/* init rlc */
- r = gfx_v7_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4645,7 +4531,7 @@ static int gfx_v7_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
gfx_v7_0_cp_enable(adev, false);
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v7_0_fini_pg(adev);
return 0;
@@ -4730,7 +4616,7 @@ static int gfx_v7_0_soft_reset(void *handle)
gfx_v7_0_update_cg(adev, false);
/* stop the rlc */
- gfx_v7_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* Disable GFX parsing/prefetching */
WREG32(mmCP_ME_CNTL, CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK);
@@ -4959,12 +4845,36 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v7_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_ring *ring;
+ u8 me_id, pipe_id;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if ((ring->me == me_id) && (ring->pipe == pipe_id))
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v7_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v7_0_fault(adev, entry);
return 0;
}
@@ -4974,7 +4884,7 @@ static int gfx_v7_0_priv_inst_irq(struct amdgpu_device *adev,
{
DRM_ERROR("Illegal instruction in command stream\n");
// XXX soft reset the gfx block only
- schedule_work(&adev->reset_work);
+ gfx_v7_0_fault(adev, entry);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 617b0c8908a3..381f593b0cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -44,7 +44,6 @@
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
@@ -54,7 +53,7 @@
#include "ivsrcid/ivsrcid_vislands30.h"
#define GFX8_NUM_GFX_RINGS 1
-#define GFX8_MEC_HPD_SIZE 2048
+#define GFX8_MEC_HPD_SIZE 4096
#define TOPAZ_GB_ADDR_CONFIG_GOLDEN 0x22010001
#define CARRIZO_GB_ADDR_CONFIG_GOLDEN 0x22010001
@@ -839,18 +838,14 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -862,14 +857,11 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -886,19 +878,16 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -912,22 +901,17 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
tmp = adev->wb.wb[index];
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
+ else
r = -EINVAL;
- }
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -1298,81 +1282,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0);
}
-static void cz_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 4;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
if (adev->asic_type == CHIP_CARRIZO)
- max_me = 5;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 4) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
-}
-
-static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
-{
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL);
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL);
+ return 5;
+ else
+ return 4;
}
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
- u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -1381,44 +1300,18 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
-
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
- gfx_v8_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
-
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v8_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- cz_init_cp_jump_table(adev);
-
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
return 0;
@@ -1443,7 +1336,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -1629,7 +1522,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
return 0;
/* bail if the compute ring is not ready */
- if (!ring->ready)
+ if (!ring->sched.ready)
return 0;
tmp = RREG32(mmGB_EDC_MODE);
@@ -1997,7 +1890,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_MEC_RING0 + ring_id;
+ ring->doorbell_index = adev->doorbell_index.mec_ring0 + ring_id;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX8_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -2088,7 +1981,7 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
- r = gfx_v8_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -2108,7 +2001,7 @@ static int gfx_v8_0_sw_init(void *handle)
/* no gfx doorbells on iceland */
if (adev->asic_type != CHIP_TOPAZ) {
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL_GFX_RING0;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0;
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
@@ -2181,7 +2074,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_kiq_fini(adev);
gfx_v8_0_mec_fini(adev);
- gfx_v8_0_rlc_fini(adev);
+ amdgpu_gfx_rlc_fini(adev);
amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
@@ -4175,10 +4068,15 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{
- gfx_v8_0_rlc_stop(adev);
- gfx_v8_0_rlc_reset(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v8_0_init_csb(adev);
+ return 0;
+ }
+
+ adev->gfx.rlc.funcs->stop(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v8_0_init_pg(adev);
- gfx_v8_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -4197,7 +4095,7 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
@@ -4322,7 +4220,7 @@ static void gfx_v8_0_set_cpg_door_bell(struct amdgpu_device *adev, struct amdgpu
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER,
- AMDGPU_DOORBELL_GFX_RING0);
+ adev->doorbell_index.gfx_ring0);
WREG32(mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
WREG32(mmCP_RB_DOORBELL_RANGE_UPPER,
@@ -4379,10 +4277,8 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
amdgpu_ring_clear_ring(ring);
gfx_v8_0_cp_gfx_start(adev);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ ring->sched.ready = true;
+ r = amdgpu_ring_test_helper(ring);
return r;
}
@@ -4396,8 +4292,8 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
- adev->gfx.kiq.ring.ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
+ adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
}
@@ -4473,11 +4369,9 @@ static int gfx_v8_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
+ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
DRM_ERROR("KCQ enable failed\n");
- kiq_ring->ready = false;
- }
return r;
}
@@ -4755,8 +4649,8 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
static void gfx_v8_0_set_mec_doorbell_range(struct amdgpu_device *adev)
{
if (adev->asic_type > CHIP_TONGA) {
- WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2);
- WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, AMDGPU_DOORBELL_MEC_RING7 << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, adev->doorbell_index.kiq << 2);
+ WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, adev->doorbell_index.mec_ring7 << 2);
}
/* enable doorbells */
WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
@@ -4781,7 +4675,7 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -4820,10 +4714,7 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
*/
for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
ring = &adev->gfx.compute_ring[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
}
done:
@@ -4867,7 +4758,7 @@ static int gfx_v8_0_hw_init(void *handle)
gfx_v8_0_init_golden_registers(adev);
gfx_v8_0_constants_init(adev);
- r = gfx_v8_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -4899,7 +4790,7 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
- r = amdgpu_ring_test_ring(kiq_ring);
+ r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -4973,16 +4864,16 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
}
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false);
else
pr_err("cp is busy, skip halt cp\n");
if (!gfx_v8_0_wait_for_rlc_idle(adev))
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
else
pr_err("rlc is busy, skip halt rlc\n");
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -5061,17 +4952,16 @@ static bool gfx_v8_0_check_soft_reset(void *handle)
static int gfx_v8_0_pre_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
/* stop the rlc */
- gfx_v8_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
@@ -5165,14 +5055,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static int gfx_v8_0_post_soft_reset(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
+ u32 grbm_soft_reset = 0;
if ((!adev->gfx.grbm_soft_reset) &&
(!adev->gfx.srbm_soft_reset))
return 0;
grbm_soft_reset = adev->gfx.grbm_soft_reset;
- srbm_soft_reset = adev->gfx.srbm_soft_reset;
if (REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CP) ||
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_CPF) ||
@@ -5197,7 +5086,7 @@ static int gfx_v8_0_post_soft_reset(void *handle)
REG_GET_FIELD(grbm_soft_reset, GRBM_SOFT_RESET, SOFT_RESET_GFX))
gfx_v8_0_cp_gfx_resume(adev);
- gfx_v8_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -5445,7 +5334,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5499,7 +5388,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG))
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -5593,57 +5482,53 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
-static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
{
- u32 data;
- unsigned i;
+ uint32_t rlc_setting;
- data = RREG32(mmRLC_CNTL);
- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
+ rlc_setting = RREG32(mmRLC_CNTL);
+ if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
+ return false;
- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- data |= RLC_SAFE_MODE__CMD_MASK;
- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32(mmRLC_SAFE_MODE, data);
+ return true;
+}
- for (i = 0; i < adev->usec_timeout; i++) {
- if ((RREG32(mmRLC_GPM_STAT) &
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
- (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
- RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
- break;
- udelay(1);
- }
+static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+ unsigned i;
+ data = RREG32(mmRLC_CNTL);
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32(mmRLC_SAFE_MODE, data);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if ((RREG32(mmRLC_GPM_STAT) &
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) ==
+ (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
+ RLC_GPM_STAT__GFX_POWER_STATUS_MASK))
+ break;
+ udelay(1);
+ }
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
}
}
-static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
{
- u32 data = 0;
+ uint32_t data;
unsigned i;
data = RREG32(mmRLC_CNTL);
- if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- if (adev->gfx.rlc.in_safe_mode) {
- data |= RLC_SAFE_MODE__CMD_MASK;
- data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
- WREG32(mmRLC_SAFE_MODE, data);
- adev->gfx.rlc.in_safe_mode = false;
- }
- }
+ data |= RLC_SAFE_MODE__CMD_MASK;
+ data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
+ WREG32(mmRLC_SAFE_MODE, data);
for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
@@ -5653,8 +5538,17 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
- .enter_safe_mode = iceland_enter_rlc_safe_mode,
- .exit_safe_mode = iceland_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v8_0_set_safe_mode,
+ .unset_safe_mode = gfx_v8_0_unset_safe_mode,
+ .init = gfx_v8_0_rlc_init,
+ .get_csb_size = gfx_v8_0_get_csb_size,
+ .get_csb_buffer = gfx_v8_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v8_0_cp_jump_table_num,
+ .resume = gfx_v8_0_rlc_resume,
+ .stop = gfx_v8_0_rlc_stop,
+ .reset = gfx_v8_0_rlc_reset,
+ .start = gfx_v8_0_rlc_start
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -5662,7 +5556,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t temp, data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
@@ -5758,7 +5652,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -5768,7 +5662,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
@@ -5851,7 +5745,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable)
@@ -6131,9 +6025,11 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -6161,9 +6057,11 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
@@ -6738,12 +6636,39 @@ static int gfx_v8_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v8_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v8_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v8_0_fault(adev, entry);
return 0;
}
@@ -6752,7 +6677,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v8_0_fault(adev, entry);
return 0;
}
@@ -6976,10 +6901,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
17 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
- .emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
.test_ring = gfx_v8_0_ring_test_ring,
- .test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 6d7baf59d6e1..7556716038d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -41,7 +41,7 @@
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
#define GFX9_NUM_GFX_RINGS 1
-#define GFX9_MEC_HPD_SIZE 2048
+#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
@@ -86,6 +86,7 @@ MODULE_FIRMWARE("amdgpu/picasso_me.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec.bin");
MODULE_FIRMWARE("amdgpu/picasso_mec2.bin");
MODULE_FIRMWARE("amdgpu/picasso_rlc.bin");
+MODULE_FIRMWARE("amdgpu/picasso_rlc_am4.bin");
MODULE_FIRMWARE("amdgpu/raven2_ce.bin");
MODULE_FIRMWARE("amdgpu/raven2_pfp.bin");
@@ -396,18 +397,14 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r);
+ if (r)
return r;
- }
+
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- amdgpu_gfx_scratch_free(adev, scratch);
- return r;
- }
+ if (r)
+ goto error_free_scratch;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -419,14 +416,11 @@ static int gfx_v9_0_ring_test_ring(struct amdgpu_ring *ring)
break;
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
- ring->idx, scratch, tmp);
- r = -EINVAL;
- }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+error_free_scratch:
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
@@ -443,19 +437,16 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 16, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err1;
- }
+
ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
ib.ptr[2] = lower_32_bits(gpu_addr);
@@ -469,22 +460,17 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
- r = -ETIMEDOUT;
- goto err2;
+ r = -ETIMEDOUT;
+ goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- goto err2;
+ goto err2;
}
tmp = adev->wb.wb[index];
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
- r = 0;
- } else {
- DRM_ERROR("ib test on ring %d failed\n", ring->idx);
- r = -EINVAL;
- }
+ if (tmp == 0xDEADBEEF)
+ r = 0;
+ else
+ r = -EINVAL;
err2:
amdgpu_ib_free(adev, &ib, NULL);
@@ -660,7 +646,20 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
+ /*
+ * For Picasso && AM4 SOCKET board, we use picasso_rlc_am4.bin
+ * instead of picasso_rlc.bin.
+ * Judgment method:
+ * PCO AM4: revision >= 0xC8 && revision <= 0xCF
+ * or revision >= 0xD8 && revision <= 0xDF
+ * otherwise is PCO FP5
+ */
+ if (!strcmp(chip_name, "picasso") &&
+ (((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
+ ((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
if (err)
goto out;
@@ -1065,85 +1064,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
}
-static void rv_init_cp_jump_table(struct amdgpu_device *adev)
+static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- const __le32 *fw_data;
- volatile u32 *dst_ptr;
- int me, i, max_me = 5;
- u32 bo_offset = 0;
- u32 table_offset, table_size;
-
- /* write the cp table buffer */
- dst_ptr = adev->gfx.rlc.cp_table_ptr;
- for (me = 0; me < max_me; me++) {
- if (me == 0) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.ce_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 1) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.pfp_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 2) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.me_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 3) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- } else if (me == 4) {
- const struct gfx_firmware_header_v1_0 *hdr =
- (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
- fw_data = (const __le32 *)
- (adev->gfx.mec2_fw->data +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- table_offset = le32_to_cpu(hdr->jt_offset);
- table_size = le32_to_cpu(hdr->jt_size);
- }
-
- for (i = 0; i < table_size; i ++) {
- dst_ptr[bo_offset + i] =
- cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
- }
-
- bo_offset += table_size;
- }
-}
-
-static void gfx_v9_0_rlc_fini(struct amdgpu_device *adev)
-{
- /* clear state block */
- amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
-
- /* jump table block */
- amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
+ return 5;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{
- volatile u32 *dst_ptr;
- u32 dws;
const struct cs_section_def *cs_data;
int r;
@@ -1152,45 +1079,18 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data;
if (cs_data) {
- /* clear state block */
- adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
- r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.clear_state_obj,
- &adev->gfx.rlc.clear_state_gpu_addr,
- (void **)&adev->gfx.rlc.cs_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
- r);
- gfx_v9_0_rlc_fini(adev);
+ /* init clear state block */
+ r = amdgpu_gfx_rlc_init_csb(adev);
+ if (r)
return r;
- }
- /* set up the cs buffer */
- dst_ptr = adev->gfx.rlc.cs_ptr;
- gfx_v9_0_get_csb_buffer(adev, dst_ptr);
- amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
- r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.cp_table_obj,
- &adev->gfx.rlc.cp_table_gpu_addr,
- (void **)&adev->gfx.rlc.cp_table_ptr);
- if (r) {
- dev_err(adev->dev,
- "(%d) failed to create cp table bo\n", r);
- gfx_v9_0_rlc_fini(adev);
+ r = amdgpu_gfx_rlc_init_cpt(adev);
+ if (r)
return r;
- }
-
- rv_init_cp_jump_table(adev);
- amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
- amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
switch (adev->asic_type) {
@@ -1264,7 +1164,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -1635,8 +1535,8 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
/* Clear GDS reserved memory */
r = amdgpu_ring_alloc(ring, 17);
if (r) {
- DRM_ERROR("amdgpu: NGG failed to lock ring %d (%d).\n",
- ring->idx, r);
+ DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n",
+ ring->name, r);
return r;
}
@@ -1680,7 +1580,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
ring->ring_obj = NULL;
ring->use_doorbell = true;
- ring->doorbell_index = (AMDGPU_DOORBELL_MEC_RING0 + ring_id) << 1;
+ ring->doorbell_index = (adev->doorbell_index.mec_ring0 + ring_id) << 1;
ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
+ (ring_id * GFX9_MEC_HPD_SIZE);
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
@@ -1748,7 +1648,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
}
- r = gfx_v9_0_rlc_init(adev);
+ r = adev->gfx.rlc.funcs->init(adev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -1769,7 +1669,7 @@ static int gfx_v9_0_sw_init(void *handle)
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
- ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
+ ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
&adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_EOP);
if (r)
@@ -2440,12 +2340,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#endif
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+ udelay(50);
/* carrizo do enable cp interrupt after cp inited */
- if (!(adev->flags & AMD_IS_APU))
+ if (!(adev->flags & AMD_IS_APU)) {
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
-
- udelay(50);
+ udelay(50);
+ }
#ifdef AMDGPU_RLC_DEBUG_RETRY
/* RLC_GPM_GENERAL_6 : RLC Ucode version */
@@ -2498,12 +2399,12 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* disable CG */
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
- gfx_v9_0_rlc_reset(adev);
+ adev->gfx.rlc.funcs->reset(adev);
gfx_v9_0_init_pg(adev);
@@ -2514,15 +2415,24 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- if (adev->asic_type == CHIP_RAVEN ||
- adev->asic_type == CHIP_VEGA20) {
- if (amdgpu_lbpw != 0)
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ if (amdgpu_lbpw == 0)
+ gfx_v9_0_enable_lbpw(adev, false);
+ else
+ gfx_v9_0_enable_lbpw(adev, true);
+ break;
+ case CHIP_VEGA20:
+ if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
gfx_v9_0_enable_lbpw(adev, false);
+ break;
+ default:
+ break;
}
- gfx_v9_0_rlc_start(adev);
+ adev->gfx.rlc.funcs->start(adev);
return 0;
}
@@ -2537,7 +2447,7 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].ready = false;
+ adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
@@ -2727,7 +2637,7 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
/* start the ring */
gfx_v9_0_cp_gfx_start(adev);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -2742,8 +2652,8 @@ static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].ready = false;
- adev->gfx.kiq.ring.ready = false;
+ adev->gfx.compute_ring[i].sched.ready = false;
+ adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
}
@@ -2866,11 +2776,9 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
}
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
+ r = amdgpu_ring_test_helper(kiq_ring);
+ if (r)
DRM_ERROR("KCQ enable failed\n");
- kiq_ring->ready = false;
- }
return r;
}
@@ -3088,9 +2996,9 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
/* enable the doorbell if requested */
if (ring->use_doorbell) {
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
- (AMDGPU_DOORBELL64_KIQ *2) << 2);
+ (adev->doorbell_index.kiq * 2) << 2);
WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
- (AMDGPU_DOORBELL64_USERQUEUE_END * 2) << 2);
+ (adev->doorbell_index.userqueue_end * 2) << 2);
}
WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
@@ -3249,7 +3157,7 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
amdgpu_bo_unreserve(ring->mqd_obj);
- ring->ready = true;
+ ring->sched.ready = true;
return 0;
}
@@ -3314,19 +3222,13 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
return r;
ring = &adev->gfx.gfx_ring[0];
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i];
-
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r)
- ring->ready = false;
+ amdgpu_ring_test_helper(ring);
}
gfx_v9_0_enable_gui_idle_interrupt(adev, true);
@@ -3353,7 +3255,7 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- r = gfx_v9_0_rlc_resume(adev);
+ r = adev->gfx.rlc.funcs->resume(adev);
if (r)
return r;
@@ -3391,7 +3293,7 @@ static int gfx_v9_0_kcq_disable(struct amdgpu_device *adev)
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
}
- r = amdgpu_ring_test_ring(kiq_ring);
+ r = amdgpu_ring_test_helper(kiq_ring);
if (r)
DRM_ERROR("KCQ disable failed\n");
@@ -3433,7 +3335,7 @@ static int gfx_v9_0_hw_fini(void *handle)
}
gfx_v9_0_cp_enable(adev, false);
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
gfx_v9_0_csb_vram_unpin(adev);
@@ -3508,7 +3410,7 @@ static int gfx_v9_0_soft_reset(void *handle)
if (grbm_soft_reset) {
/* stop the rlc */
- gfx_v9_0_rlc_stop(adev);
+ adev->gfx.rlc.funcs->stop(adev);
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
@@ -3607,64 +3509,47 @@ static int gfx_v9_0_late_init(void *handle)
return 0;
}
-static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
+static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
{
- uint32_t rlc_setting, data;
- unsigned i;
-
- if (adev->gfx.rlc.in_safe_mode)
- return;
+ uint32_t rlc_setting;
/* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags &
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
- AMD_CG_SUPPORT_GFX_3D_CGCG)) {
- data = RLC_SAFE_MODE__CMD_MASK;
- data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+ return false;
- /* wait for RLC_SAFE_MODE */
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
- break;
- udelay(1);
- }
- adev->gfx.rlc.in_safe_mode = true;
- }
+ return true;
}
-static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
+static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
{
- uint32_t rlc_setting, data;
+ uint32_t data;
+ unsigned i;
- if (!adev->gfx.rlc.in_safe_mode)
- return;
+ data = RLC_SAFE_MODE__CMD_MASK;
+ data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- /* if RLC is not enabled, do nothing */
- rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
- if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
- return;
-
- if (adev->cg_flags &
- (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
- /*
- * Try to exit safe mode only if it is already in safe
- * mode.
- */
- data = RLC_SAFE_MODE__CMD_MASK;
- WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
- adev->gfx.rlc.in_safe_mode = false;
+ /* wait for RLC_SAFE_MODE */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ break;
+ udelay(1);
}
}
+static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RLC_SAFE_MODE__CMD_MASK;
+ WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
+}
+
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- gfx_v9_0_enter_rlc_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
@@ -3675,7 +3560,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
}
- gfx_v9_0_exit_rlc_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
@@ -3773,7 +3658,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
@@ -3813,7 +3698,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
@@ -3821,7 +3706,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{
uint32_t def, data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
@@ -3861,7 +3746,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -3890,8 +3775,17 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
}
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
- .enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode,
- .exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode
+ .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v9_0_set_safe_mode,
+ .unset_safe_mode = gfx_v9_0_unset_safe_mode,
+ .init = gfx_v9_0_rlc_init,
+ .get_csb_size = gfx_v9_0_get_csb_size,
+ .get_csb_buffer = gfx_v9_0_get_csb_buffer,
+ .get_cp_table_num = gfx_v9_0_cp_jump_table_num,
+ .resume = gfx_v9_0_rlc_resume,
+ .stop = gfx_v9_0_rlc_stop,
+ .reset = gfx_v9_0_rlc_reset,
+ .start = gfx_v9_0_rlc_start
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4072,9 +3966,11 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
if (ib->flags & AMDGPU_IB_FLAG_CE)
@@ -4103,20 +3999,22 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
}
static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
- u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
- amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
BUG_ON(ib->gpu_addr & 0x3); /* Dword align */
- amdgpu_ring_write(ring,
+ amdgpu_ring_write(ring,
#ifdef __BIG_ENDIAN
- (2 << 0) |
+ (2 << 0) |
#endif
- lower_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
- amdgpu_ring_write(ring, control);
+ lower_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, control);
}
static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
@@ -4695,12 +4593,39 @@ static int gfx_v9_0_eop_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_fault(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ u8 me_id, pipe_id, queue_id;
+ struct amdgpu_ring *ring;
+ int i;
+
+ me_id = (entry->ring_id & 0x0c) >> 2;
+ pipe_id = (entry->ring_id & 0x03) >> 0;
+ queue_id = (entry->ring_id & 0x70) >> 4;
+
+ switch (me_id) {
+ case 0:
+ drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
+ break;
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ }
+}
+
static int gfx_v9_0_priv_reg_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal register access in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v9_0_fault(adev, entry);
return 0;
}
@@ -4709,7 +4634,7 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
DRM_ERROR("Illegal instruction in command stream\n");
- schedule_work(&adev->reset_work);
+ gfx_v9_0_fault(adev, entry);
return 0;
}
@@ -4836,10 +4761,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
- .emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index bfa317ad20a9..f5edddf3b29d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -35,20 +35,25 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev)
return (u64)RREG32_SOC15(GC, 0, mmMC_VM_FB_OFFSET) << 24;
}
-static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- gfxhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
index 206e29cad753..92d3a70cd9b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
@@ -30,5 +30,7 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
bool value);
void gfxhub_v1_0_init(struct amdgpu_device *adev);
u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
+void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 73ad02aea2b2..9fc3296592fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -359,7 +359,8 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
return 0;
}
-static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid, uint32_t flush_type)
{
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
}
@@ -581,7 +582,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v6_0_set_fault_enable_default(adev, true);
- gmc_v6_0_flush_gpu_tlb(adev, 0);
+ gmc_v6_0_flush_gpu_tlb(adev, 0, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 910c4ce19cb3..761dcfb2fec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -430,7 +430,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*
* Flush the TLB for the requested page table (CIK).
*/
-static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
+static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev,
+ uint32_t vmid, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -698,7 +699,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp);
}
- gmc_v7_0_flush_gpu_tlb(adev, 0);
+ gmc_v7_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1d3265c97b70..1ad7e6b8ed1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "tonga";
break;
case CHIP_POLARIS11:
- chip_name = "polaris11";
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff))))
+ chip_name = "polaris11_k";
+ else if ((adev->pdev->device == 0x67ef) &&
+ (adev->pdev->revision == 0xe2))
+ chip_name = "polaris11_k";
+ else
+ chip_name = "polaris11";
break;
case CHIP_POLARIS10:
- chip_name = "polaris10";
+ if ((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe1) ||
+ (adev->pdev->revision == 0xf7)))
+ chip_name = "polaris10_k";
+ else
+ chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- chip_name = "polaris12";
+ if (((adev->pdev->device == 0x6987) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc3))) ||
+ ((adev->pdev->device == 0x6981) &&
+ ((adev->pdev->revision == 0x00) ||
+ (adev->pdev->revision == 0x01) ||
+ (adev->pdev->revision == 0x10))))
+ chip_name = "polaris12_k";
+ else
+ chip_name = "polaris12";
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
const struct mc_firmware_header_v1_0 *hdr;
const __le32 *fw_data = NULL;
const __le32 *io_mc_regs = NULL;
- u32 data, vbios_version;
+ u32 data;
int i, ucode_size, regs_size;
/* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev))
return 0;
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
- data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- vbios_version = data & 0xf;
-
- if (vbios_version == 0)
- return 0;
-
if (!adev->gmc.fw)
return -EINVAL;
@@ -611,7 +633,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
* Flush the TLB for the requested page table (CIK).
*/
static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+ uint32_t vmid, uint32_t flush_type)
{
/* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
@@ -920,7 +942,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else
gmc_v8_0_set_fault_enable_default(adev, true);
- gmc_v8_0_flush_gpu_tlb(adev, 0);
+ gmc_v8_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)table_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index f35d7a554ad5..ce150de723c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -244,6 +244,62 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
+/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ uint64_t addr)
+{
+ struct amdgpu_vm *vm;
+ u64 key;
+ int r;
+
+ /* No PASID, can't identify faulting process */
+ if (!entry->pasid)
+ return true;
+
+ /* Not a retry fault */
+ if (!(entry->src_data[1] & 0x80))
+ return true;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
+ if (!vm) {
+ /* VM not found, process it normally */
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return true;
+ }
+
+ key = AMDGPU_VM_FAULT(entry->pasid, addr);
+ r = amdgpu_vm_add_fault(vm->fault_hash, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0) {
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_vm_clear_fault(vm->fault_hash, key);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ return false;
+ }
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ /* It's the first fault for this address, process it normally */
+ return true;
+}
+
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -255,6 +311,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ return 1; /* This also prevents sending it to KFD */
+
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -293,14 +352,14 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
-static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
+ uint32_t flush_type)
{
u32 req = 0;
- /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
@@ -312,48 +371,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
return req;
}
-static signed long amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
- uint32_t reg0, uint32_t reg1,
- uint32_t ref, uint32_t mask)
-{
- signed long r, cnt = 0;
- unsigned long flags;
- uint32_t seq;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- spin_lock_irqsave(&kiq->ring_lock, flags);
-
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
- ref, mask);
- amdgpu_fence_emit_polling(ring, &seq);
- amdgpu_ring_commit(ring);
- spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
- /* don't wait anymore for IRQ context */
- if (r < 1 && in_interrupt())
- goto failed_kiq;
-
- might_sleep();
-
- while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
- msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
- r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
- }
-
- if (cnt > MAX_KIQ_REG_TRY)
- goto failed_kiq;
-
- return 0;
-
-failed_kiq:
- pr_err("failed to invalidate tlb with kiq\n");
- return r;
-}
-
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -362,64 +379,50 @@ failed_kiq:
*/
/**
- * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
+ * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
*
* @adev: amdgpu_device pointer
* @vmid: vm instance to flush
+ * @flush_type: the flush type
*
- * Flush the TLB for the requested page table.
+ * Flush the TLB for the requested page table using certain type.
*/
static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
- uint32_t vmid)
+ uint32_t vmid, uint32_t flush_type)
{
- /* Use register 17 for GART */
const unsigned eng = 17;
unsigned i, j;
- int r;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
- if (adev->gfx.kiq.ring.ready &&
+ /* This is necessary for a HW workaround under SRIOV as well
+ * as GFXOFF under bare metal
+ */
+ if (adev->gfx.kiq.ring.sched.ready &&
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
!adev->in_gpu_reset) {
- r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
- hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
- if (!r)
- continue;
- }
-
- spin_lock(&adev->gmc.invalidate_lock);
-
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
+ uint32_t req = hub->vm_inv_eng0_req + eng;
+ uint32_t ack = hub->vm_inv_eng0_ack + eng;
- /* Busy wait for ACK.*/
- for (j = 0; j < 100; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- tmp &= 1 << vmid;
- if (tmp)
- break;
- cpu_relax();
- }
- if (j < 100) {
- spin_unlock(&adev->gmc.invalidate_lock);
+ amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+ 1 << vmid);
continue;
}
- /* Wait for ACK with a delay.*/
+ spin_lock(&adev->gmc.invalidate_lock);
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
for (j = 0; j < adev->usec_timeout; j++) {
tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- tmp &= 1 << vmid;
- if (tmp)
+ if (tmp & (1 << vmid))
break;
udelay(1);
}
- if (j < adev->usec_timeout) {
- spin_unlock(&adev->gmc.invalidate_lock);
- continue;
- }
spin_unlock(&adev->gmc.invalidate_lock);
+ if (j < adev->usec_timeout)
+ continue;
+
DRM_ERROR("Timeout waiting for VM flush ACK!\n");
}
}
@@ -429,7 +432,7 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
- uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
+ uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
@@ -739,9 +742,8 @@ static int gmc_v9_0_late_init(void *handle)
unsigned vmhub = ring->funcs->vmhub;
ring->vm_inv_eng = vm_inv_eng[vmhub]++;
- dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
- ring->idx, ring->name, ring->vm_inv_eng,
- ring->funcs->vmhub);
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
}
/* Engine 16 is used for KFD and 17 for GART flushes */
@@ -959,6 +961,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
+ if (r)
+ return r;
+
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
&adev->gmc.vm_fault);
@@ -991,7 +996,7 @@ static int gmc_v9_0_sw_init(void *handle)
}
adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
- if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
if (r)
return r;
@@ -1122,7 +1127,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
- gmc_v9_0_flush_gpu_tlb(adev, 0);
+ gmc_v9_0_flush_gpu_tlb(adev, 0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index cf0fc61aebe6..a3984d10b604 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,34 +208,6 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * iceland_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -440,7 +412,6 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
- .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index d0e478f43443..0c9a2c03504e 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping ||
pi->caps_td_ramping ||
pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret;
}
}
kv_do_enable_didt(adev, enable);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index a0db67adc34c..d0d966d6080a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -52,20 +52,25 @@ u64 mmhub_v1_0_get_fb_location(struct amdgpu_device *adev)
return base;
}
-static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
{
- uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo);
+ /* two registers distance between mmVM_CONTEXT0_* to mmVM_CONTEXT1_* */
+ int offset = mmVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32
+ - mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
- lower_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ offset * vmid, lower_32_bits(page_table_base));
- WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
- upper_32_bits(value));
+ WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ offset * vmid, upper_32_bits(page_table_base));
}
static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- mmhub_v1_0_init_gart_pt_regs(adev);
+ uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->gmc.gart_start >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
index bef3d0c0c117..0de0fdf98c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h
@@ -34,5 +34,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev,
void mmhub_v1_0_get_clockgating(struct amdgpu_device *adev, u32 *flags);
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
bool enable);
+void mmhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 64e875d528dd..6a0fcd67662a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -37,7 +37,6 @@
#include "gmc/gmc_8_2_sh_mask.h"
#include "oss/oss_3_0_d.h"
#include "oss/oss_3_0_sh_mask.h"
-#include "gca/gfx_8_0_sh_mask.h"
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
#include "smu/smu_7_1_3_d.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 882bd83a28c4..0de00fbe9233 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,8 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
};
@@ -89,7 +91,8 @@ enum psp_gfx_cmd_id
GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */
GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */
GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */
-
+ GFX_CMD_ID_SETUP_VMR = 0x00000009, /* setup VMR region */
+ GFX_CMD_ID_DESTROY_VMR = 0x0000000A, /* destroy VMR region */
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 295c2205485a..d78b4306a36f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -240,12 +240,9 @@ static int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 3f3fac2d50cd..6c9a1b748ca7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -34,6 +34,7 @@
#include "nbio/nbio_7_4_offset.h"
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
@@ -98,7 +99,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -119,16 +121,32 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
if (err)
goto out;
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
- le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(hdr->sos_offset_bytes);
+ le32_to_cpu(sos_hdr->sos_offset_bytes);
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.ta_fw);
+ if (err)
+ goto out;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
+ adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
+ adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
+ adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
return 0;
out:
if (err) {
@@ -153,8 +171,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg)
+ if (sol_reg) {
+ psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
+ printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
return 0;
+ }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -167,7 +188,7 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Copy PSP System Driver binary to memory */
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
- /* Provide the sys driver to bootrom */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
@@ -208,7 +229,7 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
- /* Provide the PSP secure OS to bootrom */
+ /* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
@@ -278,26 +299,47 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- /* Write low address of the ring to C2PMSG_69 */
- psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
- /* Write high address of the ring to C2PMSG_70 */
- psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
- /* Write size of ring to C2PMSG_71 */
- psp_ring_reg = ring->ring_size;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
- /* Write the ring initialization command to C2PMSG_64 */
- psp_ring_reg = ring_type;
- psp_ring_reg = psp_ring_reg << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
-
- /* there might be handshake issue with hardware which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x8000FFFF, false);
+ if (psp_support_vmr_ring(psp)) {
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
return ret;
}
@@ -308,15 +350,24 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
int ret = 0;
struct amdgpu_device *adev = psp->adev;
- /* Write the ring destroy command to C2PMSG_64 */
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* Write the ring destroy command*/
+ if (psp_support_vmr_ring(psp))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
/* there might be handshake issue with hardware which needs delay */
mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
+ /* Wait for response flag (bit 31) */
+ if (psp_support_vmr_ring(psp))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -355,7 +406,10 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
- psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
+ if (psp_support_vmr_ring(psp))
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+ else
+ psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
/* write_frame ptr increments by size of rb_frame in bytes */
@@ -384,7 +438,11 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp,
/* Update the write Pointer in DWORDs */
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
+ if (psp_support_vmr_ring(psp)) {
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_write_ptr_reg);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
}
@@ -529,7 +587,7 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
/*send the mode 1 reset command*/
WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
@@ -552,24 +610,110 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+ if (ret)
+ return ret;
+
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+ topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ }
+
return 0;
}
static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
int number_devices, struct psp_xgmi_topology_info *topology)
{
- return 0;
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
}
static u64 psp_v11_0_xgmi_get_hive_id(struct psp_context *psp)
{
- u64 hive_id = 0;
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
- /* Remove me when we can get correct hive_id through PSP */
- if (psp->adev->gmc.xgmi.num_physical_nodes)
- hive_id = 0x123456789abcdef;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
- return hive_id;
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return 0;
+ else
+ return xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+}
+
+static u64 psp_v11_0_xgmi_get_node_id(struct psp_context *psp)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return 0;
+ else
+ return xgmi_cmd->xgmi_out_message.get_node_id.node_id;
}
static const struct psp_funcs psp_v11_0_funcs = {
@@ -587,6 +731,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
.xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
+ .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index e1ebf770c303..7357fd56e614 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -194,7 +194,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
/* Copy PSP System Driver binary to memory */
memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
- /* Provide the sys driver to bootrom */
+ /* Provide the sys driver to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 1 << 16;
@@ -254,7 +254,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
/* Copy Secure OS binary to PSP memory */
memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
- /* Provide the PSP secure OS to bootrom */
+ /* Provide the PSP secure OS to bootloader */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36,
(uint32_t)(psp->fw_pri_mc_addr >> 20));
psp_gfxdrv_command_reg = 2 << 16;
@@ -356,12 +356,9 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- struct psp_ring *ring;
unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- ring = &psp->km_ring;
-
/* Write the ring destroy command to C2PMSG_64 */
psp_ring_reg = 3 << 16;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
@@ -593,9 +590,9 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
}
/*send the mode 1 reset command*/
- WREG32(offset, 0x70000);
+ WREG32(offset, GFX_CTRL_CMD_ID_MODE1_RST);
- mdelay(1000);
+ msleep(500);
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 2d4770e173dd..9f3cb2aec7c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -225,7 +225,7 @@ static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -245,9 +245,12 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -349,8 +352,8 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -471,17 +474,15 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -550,21 +551,16 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -581,15 +577,11 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -612,20 +604,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -644,21 +632,16 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -760,7 +743,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void sdma_v2_4_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1105,8 +1088,14 @@ static int sdma_v2_4_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id, queue_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+
+ if (instance_id <= 1 && queue_id == 0)
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 6fb3edaba0ec..1bccc5fe2d9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -399,7 +399,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -419,9 +419,12 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI).
*/
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -523,8 +526,8 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -739,7 +742,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
- ring->ready = true;
+ ring->sched.ready = true;
}
/* unhalt the MEs */
@@ -749,11 +752,9 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -822,21 +823,16 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -853,15 +849,11 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -884,20 +876,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -916,21 +904,16 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -1031,7 +1014,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
*/
static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1163,7 +1146,7 @@ static int sdma_v3_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring->use_doorbell = true;
ring->doorbell_index = (i == 0) ?
- AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine0 : adev->doorbell_index.sdma_engine1;
} else {
ring->use_pollmem = true;
}
@@ -1440,8 +1423,14 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id, queue_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ queue_id = (entry->ring_id & 0xc) >> 2;
+
+ if (instance_id <= 1 && queue_id == 0)
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7a8c9172d30a..4b6d3e5c821f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,6 +54,11 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
+#define WREG32_SDMA(instance, offset, value) \
+ WREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)), value)
+#define RREG32_SDMA(instance, offset) \
+ RREG32(sdma_v4_0_get_reg_offset(adev, (instance), (offset)))
+
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
@@ -367,16 +372,11 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
- u32 lowbit, highbit;
-
- lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
- DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- ring->me, highbit, lowbit);
- wptr = highbit;
+ wptr = RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI);
wptr = wptr << 32;
- wptr |= lowbit;
+ wptr |= RREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR);
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
+ ring->me, wptr);
}
return wptr >> 2;
@@ -417,14 +417,67 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring)
lower_32_bits(ring->wptr << 2),
ring->me,
upper_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
- WREG32(sdma_v4_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR,
+ lower_32_bits(ring->wptr << 2));
+ WREG32_SDMA(ring->me, mmSDMA0_GFX_RB_WPTR_HI,
+ upper_32_bits(ring->wptr << 2));
+ }
+}
+
+/**
+ * sdma_v4_0_page_ring_get_wptr - get the current write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Get the current wptr from the hardware (VEGA10+).
+ */
+static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u64 wptr;
+
+ if (ring->use_doorbell) {
+ /* XXX check if swapping is necessary on BE */
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ } else {
+ wptr = RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI);
+ wptr = wptr << 32;
+ wptr |= RREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR);
+ }
+
+ return wptr >> 2;
+}
+
+/**
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
+ *
+ * @ring: amdgpu ring pointer
+ *
+ * Write the wptr back to the hardware (VEGA10+).
+ */
+static void sdma_v4_0_page_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell) {
+ u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
+
+ /* XXX check if swapping is necessary on BE */
+ WRITE_ONCE(*wb, (ring->wptr << 2));
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ } else {
+ uint64_t wptr = ring->wptr << 2;
+
+ WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR,
+ lower_32_bits(wptr));
+ WREG32_SDMA(ring->me, mmSDMA0_PAGE_RB_WPTR_HI,
+ upper_32_bits(wptr));
}
}
static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -444,9 +497,12 @@ static void sdma_v4_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VEGA10).
*/
static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
/* IB packet must end on a 8 DW boundary */
sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8);
@@ -568,16 +624,16 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
}
- sdma0->ready = false;
- sdma1->ready = false;
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
}
/**
@@ -593,6 +649,39 @@ static void sdma_v4_0_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * sdma_v4_0_page_stop - stop the page async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Stop the page async dma ring buffers (VEGA10).
+ */
+static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].page;
+ struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].page;
+ u32 rb_cntl, ib_cntl;
+ int i;
+
+ if ((adev->mman.buffer_funcs_ring == sdma0) ||
+ (adev->mman.buffer_funcs_ring == sdma1))
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+ RB_ENABLE, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
+ IB_ENABLE, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
+ }
+
+ sdma0->sched.ready = false;
+ sdma1->sched.ready = false;
+}
+
+/**
* sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
*
* @adev: amdgpu_device pointer
@@ -630,18 +719,15 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = RREG32_SDMA(i, mmSDMA0_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
AUTO_CTXSW_ENABLE, enable ? 1 : 0);
if (enable && amdgpu_sdma_phase_quantum) {
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
- phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
- phase_quantum);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
- phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE0_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE1_QUANTUM, phase_quantum);
+ WREG32_SDMA(i, mmSDMA0_PHASE2_QUANTUM, phase_quantum);
}
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ WREG32_SDMA(i, mmSDMA0_CNTL, f32_cntl);
}
}
@@ -662,156 +748,215 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
if (enable == false) {
sdma_v4_0_gfx_stop(adev);
sdma_v4_0_rlc_stop(adev);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_0_page_stop(adev);
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
+ f32_cntl = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
+ WREG32_SDMA(i, mmSDMA0_F32_CNTL, f32_cntl);
}
}
/**
+ * sdma_v4_0_rb_cntl - get parameters for rb_cntl
+ */
+static uint32_t sdma_v4_0_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
+{
+ /* Set ring buffer size in dwords */
+ uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
+
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
+#ifdef __BIG_ENDIAN
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_SWAP_ENABLE, 1);
+#endif
+ return rb_cntl;
+}
+
+/**
* sdma_v4_0_gfx_resume - setup and start the async dma engines
*
* @adev: amdgpu_device pointer
+ * @i: instance to resume
*
* Set up the gfx DMA ring buffers and enable them (VEGA10).
* Returns 0 for success, error for failure.
*/
-static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
+static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
{
- struct amdgpu_ring *ring;
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
- u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
u32 doorbell_offset;
- u32 temp;
u64 wptr_gpu_addr;
- int i, r;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- wb_offset = (ring->rptr_offs * 4);
+ wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL);
+ rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
- /* Set ring buffer size in dwords */
- rb_bufsz = order_base_2(ring->ring_size / 4);
- rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
-#ifdef __BIG_ENDIAN
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
- RPTR_WRITEBACK_SWAP_ENABLE, 1);
-#endif
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_HI, 0);
- /* Initialize the ring buffer's read and write pointers */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
- /* set the wb address whether it's enabled or not */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
- upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
- lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+ ring->wptr = 0;
- ring->wptr = 0;
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 1);
- /* before programing wptr to a less value, need set minor_ptr_update first */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
-
- if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
- }
+ doorbell = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET);
- doorbell = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
- doorbell_offset = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
-
- if (ring->use_doorbell) {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
- doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA0_GFX_DOORBELL_OFFSET,
OFFSET, ring->doorbell_index);
- } else {
- doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
- }
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
- adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
- ring->doorbell_index);
+ WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL, doorbell);
+ WREG32_SDMA(i, mmSDMA0_GFX_DOORBELL_OFFSET, doorbell_offset);
+ adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
+ ring->doorbell_index);
+
+ sdma_v4_0_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, mmSDMA0_GFX_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_GFX_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_GFX_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
- if (amdgpu_sriov_vf(adev))
- sdma_v4_0_ring_set_wptr(ring);
+ ring->sched.ready = true;
+}
- /* set minor_ptr_update to 0 after wptr programed */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+/**
+ * sdma_v4_0_page_resume - setup and start the async dma engines
+ *
+ * @adev: amdgpu_device pointer
+ * @i: instance to resume
+ *
+ * Set up the page DMA ring buffers and enable them (VEGA10).
+ * Returns 0 for success, error for failure.
+ */
+static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
+{
+ struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
+ u32 wb_offset;
+ u32 doorbell;
+ u32 doorbell_offset;
+ u64 wptr_gpu_addr;
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+ wb_offset = (ring->rptr_offs * 4);
- if (!amdgpu_sriov_vf(adev)) {
- /* unhalt engine */
- temp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
- }
+ rb_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL);
+ rb_cntl = sdma_v4_0_rb_cntl(ring, rb_cntl);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
- /* setup the wptr shadow polling */
- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
- lower_32_bits(wptr_gpu_addr));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
- upper_32_bits(wptr_gpu_addr));
- wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
- if (amdgpu_sriov_vf(adev))
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
- else
- wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl);
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_HI, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR, 0);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_HI, 0);
- /* enable DMA RB */
- rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+ /* set the wb address whether it's enabled or not */
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_HI,
+ upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_RPTR_ADDR_LO,
+ lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
- ib_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
-#ifdef __BIG_ENDIAN
- ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
-#endif
- /* enable DMA IBs */
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL,
+ RPTR_WRITEBACK_ENABLE, 1);
- ring->ready = true;
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE, ring->gpu_addr >> 8);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
- if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
- sdma_v4_0_ctx_switch_enable(adev, true);
- sdma_v4_0_enable(adev, true);
- }
+ ring->wptr = 0;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
- return r;
- }
+ /* before programing wptr to a less value, need set minor_ptr_update first */
+ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 1);
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ doorbell = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL);
+ doorbell_offset = RREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET);
- }
+ doorbell = REG_SET_FIELD(doorbell, SDMA0_PAGE_DOORBELL, ENABLE,
+ ring->use_doorbell);
+ doorbell_offset = REG_SET_FIELD(doorbell_offset,
+ SDMA0_PAGE_DOORBELL_OFFSET,
+ OFFSET, ring->doorbell_index);
+ WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL, doorbell);
+ WREG32_SDMA(i, mmSDMA0_PAGE_DOORBELL_OFFSET, doorbell_offset);
+
+ /* paging queue doorbell range is setup at sdma_v4_0_gfx_resume */
+ sdma_v4_0_page_ring_set_wptr(ring);
+
+ /* set minor_ptr_update to 0 after wptr programed */
+ WREG32_SDMA(i, mmSDMA0_PAGE_MINOR_PTR_UPDATE, 0);
+
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
+ SDMA0_PAGE_RB_WPTR_POLL_CNTL,
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
+
+ /* enable DMA RB */
+ rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_PAGE_RB_CNTL, RB_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_PAGE_RB_CNTL, rb_cntl);
+
+ ib_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL);
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_ENABLE, 1);
+#ifdef __BIG_ENDIAN
+ ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
+#endif
+ /* enable DMA IBs */
+ WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
- return 0;
+ ring->sched.ready = true;
}
static void
@@ -922,12 +1067,14 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
+ WREG32_SDMA(i, mmSDMA0_UCODE_ADDR, 0);
for (j = 0; j < fw_size; j++)
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
+ WREG32_SDMA(i, mmSDMA0_UCODE_DATA,
+ le32_to_cpup(fw_data++));
- WREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
+ WREG32_SDMA(i, mmSDMA0_UCODE_ADDR,
+ adev->sdma.instance[i].fw_version);
}
return 0;
@@ -943,33 +1090,78 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev)
*/
static int sdma_v4_0_start(struct amdgpu_device *adev)
{
- int r = 0;
+ struct amdgpu_ring *ring;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
+ } else {
- /* set RB registers */
- r = sdma_v4_0_gfx_resume(adev);
- return r;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ r = sdma_v4_0_load_microcode(adev);
+ if (r)
+ return r;
+ }
+
+ /* unhalt the MEs */
+ sdma_v4_0_enable(adev, true);
+ /* enable sdma ring preemption */
+ sdma_v4_0_ctx_switch_enable(adev, true);
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- r = sdma_v4_0_load_microcode(adev);
+ /* start the gfx rings and rlc compute queues */
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ uint32_t temp;
+
+ WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+ sdma_v4_0_gfx_resume(adev, i);
+ if (adev->sdma.has_page_queue)
+ sdma_v4_0_page_resume(adev, i);
+
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32_SDMA(i, mmSDMA0_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+ WREG32_SDMA(i, mmSDMA0_CNTL, temp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* unhalt engine */
+ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
+ temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
+ WREG32_SDMA(i, mmSDMA0_F32_CNTL, temp);
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ sdma_v4_0_ctx_switch_enable(adev, true);
+ sdma_v4_0_enable(adev, true);
+ } else {
+ r = sdma_v4_0_rlc_resume(adev);
if (r)
return r;
}
- /* unhalt the MEs */
- sdma_v4_0_enable(adev, true);
- /* enable sdma ring preemption */
- sdma_v4_0_ctx_switch_enable(adev, true);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ ring = &adev->sdma.instance[i].ring;
+
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
+ return r;
- /* start the gfx rings and rlc compute queues */
- r = sdma_v4_0_gfx_resume(adev);
- if (r)
- return r;
- r = sdma_v4_0_rlc_resume(adev);
+ if (adev->sdma.has_page_queue) {
+ struct amdgpu_ring *page = &adev->sdma.instance[i].page;
+
+ r = amdgpu_ring_test_helper(page);
+ if (r)
+ return r;
+
+ if (adev->mman.buffer_funcs_ring == page)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+
+ if (adev->mman.buffer_funcs_ring == ring)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
return r;
}
@@ -993,21 +1185,16 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
@@ -1024,15 +1211,11 @@ static int sdma_v4_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1055,20 +1238,16 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
@@ -1087,21 +1266,17 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
+
err1:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -1206,7 +1381,7 @@ static void sdma_v4_0_vm_set_pte_pde(struct amdgpu_ib *ib,
*/
static void sdma_v4_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
@@ -1272,15 +1447,46 @@ static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
}
+static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
+{
+ uint fw_version = adev->sdma.instance[0].fw_version;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return fw_version >= 430;
+ case CHIP_VEGA12:
+ /*return fw_version >= 31;*/
+ return false;
+ case CHIP_VEGA20:
+ /*return fw_version >= 115;*/
+ return false;
+ default:
+ return false;
+ }
+}
+
static int sdma_v4_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
if (adev->asic_type == CHIP_RAVEN)
adev->sdma.num_instances = 1;
else
adev->sdma.num_instances = 2;
+ r = sdma_v4_0_init_microcode(adev);
+ if (r) {
+ DRM_ERROR("Failed to load sdma firmware!\n");
+ return r;
+ }
+
+ /* TODO: Page queue breaks driver reload under SRIOV */
+ if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ adev->sdma.has_page_queue = false;
+ else if (sdma_v4_0_fw_support_paging_queue(adev))
+ adev->sdma.has_page_queue = true;
+
sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev);
sdma_v4_0_set_vm_pte_funcs(adev);
@@ -1289,7 +1495,6 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
-
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1308,12 +1513,6 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v4_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load sdma firmware!\n");
- return r;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1322,15 +1521,10 @@ static int sdma_v4_0_sw_init(void *handle)
DRM_INFO("use_doorbell being set to: [%s]\n",
ring->use_doorbell?"true":"false");
- if (adev->asic_type == CHIP_VEGA10)
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
- else
- ring->doorbell_index = (i == 0) ?
- (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset
- : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset
-
+ /* doorbell size is 2 dwords, get DWORD offset */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1340,6 +1534,29 @@ static int sdma_v4_0_sw_init(void *handle)
AMDGPU_SDMA_IRQ_TRAP1);
if (r)
return r;
+
+ if (adev->sdma.has_page_queue) {
+ ring = &adev->sdma.instance[i].page;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+
+ /* paging queue use same doorbell index/routing as gfx queue
+ * with 0x400 (4096 dwords) offset on second doorbell page
+ */
+ ring->doorbell_index = (i == 0) ?
+ (adev->doorbell_index.sdma_engine0 << 1)
+ : (adev->doorbell_index.sdma_engine1 << 1);
+ ring->doorbell_index += 0x400;
+
+ sprintf(ring->name, "page%d", i);
+ r = amdgpu_ring_init(adev, ring, 1024,
+ &adev->sdma.trap_irq,
+ (i == 0) ?
+ AMDGPU_SDMA_IRQ_TRAP0 :
+ AMDGPU_SDMA_IRQ_TRAP1);
+ if (r)
+ return r;
+ }
}
return r;
@@ -1350,8 +1567,11 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ if (adev->sdma.has_page_queue)
+ amdgpu_ring_fini(&adev->sdma.instance[i].page);
+ }
for (i = 0; i < adev->sdma.num_instances; i++) {
release_firmware(adev->sdma.instance[i].fw);
@@ -1414,7 +1634,7 @@ static bool sdma_v4_0_is_idle(void *handle)
u32 i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- u32 tmp = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
+ u32 tmp = RREG32_SDMA(i, mmSDMA0_STATUS_REG);
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
return false;
@@ -1430,8 +1650,8 @@ static int sdma_v4_0_wait_for_idle(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- sdma0 = RREG32(sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
- sdma1 = RREG32(sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
+ sdma0 = RREG32_SDMA(0, mmSDMA0_STATUS_REG);
+ sdma1 = RREG32_SDMA(1, mmSDMA0_STATUS_REG);
if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
return 0;
@@ -1452,16 +1672,13 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
+ unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_TRAP0) ?
- sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
- sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
-
- sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@@ -1470,39 +1687,32 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ uint32_t instance;
+
DRM_DEBUG("IH: SDMA trap\n");
switch (entry->client_id) {
case SOC15_IH_CLIENTID_SDMA0:
- switch (entry->ring_id) {
- case 0:
- amdgpu_fence_process(&adev->sdma.instance[0].ring);
- break;
- case 1:
- /* XXX compute */
- break;
- case 2:
- /* XXX compute */
- break;
- case 3:
- /* XXX page queue*/
- break;
- }
+ instance = 0;
break;
case SOC15_IH_CLIENTID_SDMA1:
- switch (entry->ring_id) {
- case 0:
- amdgpu_fence_process(&adev->sdma.instance[1].ring);
- break;
- case 1:
- /* XXX compute */
- break;
- case 2:
- /* XXX compute */
- break;
- case 3:
- /* XXX page queue*/
- break;
- }
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->ring_id) {
+ case 0:
+ amdgpu_fence_process(&adev->sdma.instance[instance].ring);
+ break;
+ case 1:
+ /* XXX compute */
+ break;
+ case 2:
+ /* XXX compute */
+ break;
+ case 3:
+ amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
return 0;
@@ -1512,12 +1722,29 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ int instance;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_SDMA1:
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->ring_id) {
+ case 0:
+ drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
+ break;
+ }
return 0;
}
-
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -1730,6 +1957,38 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs sdma_v4_0_page_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_SDMA,
+ .align_mask = 0xf,
+ .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
+ .support_64bit_ptrs = true,
+ .vmhub = AMDGPU_MMHUB,
+ .get_rptr = sdma_v4_0_ring_get_rptr,
+ .get_wptr = sdma_v4_0_page_ring_get_wptr,
+ .set_wptr = sdma_v4_0_page_ring_set_wptr,
+ .emit_frame_size =
+ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
+ 3 + /* hdp invalidate */
+ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
+ /* sdma_v4_0_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
+ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
+ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
+ .emit_ib = sdma_v4_0_ring_emit_ib,
+ .emit_fence = sdma_v4_0_ring_emit_fence,
+ .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
+ .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
+ .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
+ .test_ring = sdma_v4_0_ring_test_ring,
+ .test_ib = sdma_v4_0_ring_test_ib,
+ .insert_nop = sdma_v4_0_ring_insert_nop,
+ .pad_ib = sdma_v4_0_ring_pad_ib,
+ .emit_wreg = sdma_v4_0_ring_emit_wreg,
+ .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1737,6 +1996,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
+ if (adev->sdma.has_page_queue) {
+ adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
+ adev->sdma.instance[i].page.me = i;
+ }
}
}
@@ -1818,7 +2081,10 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
+ if (adev->sdma.has_page_queue)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ else
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
@@ -1836,7 +2102,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) {
- sched = &adev->sdma.instance[i].ring.sched;
+ if (adev->sdma.has_page_queue)
+ sched = &adev->sdma.instance[i].page.sched;
+ else
+ sched = &adev->sdma.instance[i].ring.sched;
adev->vm_manager.vm_pte_rqs[i] =
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index adbaea6da0d7..b6e473134e19 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -61,9 +61,11 @@ static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
}
static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
/* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
* Pad as necessary with NOPs.
*/
@@ -122,7 +124,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
- ring->ready = false;
+ ring->sched.ready = false;
}
}
@@ -175,13 +177,11 @@ static int si_dma_start(struct amdgpu_device *adev)
WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
- ring->ready = true;
+ ring->sched.ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
@@ -209,21 +209,16 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
u64 gpu_addr;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 4);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_device_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
@@ -238,15 +233,11 @@ static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_device_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -269,20 +260,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = amdgpu_device_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ if (r)
goto err0;
- }
ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
ib.ptr[1] = lower_32_bits(gpu_addr);
@@ -295,21 +282,16 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
amdgpu_ib_free(adev, &ib, NULL);
@@ -658,15 +640,6 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
-static int si_dma_process_illegal_inst_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
- return 0;
-}
-
static int si_dma_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
@@ -781,15 +754,10 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
.process = si_dma_process_trap_irq,
};
-static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = {
- .process = si_dma_process_illegal_inst_irq,
-};
-
static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
- adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index b3d7d9f83202..2938fb9f17cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,19 +118,6 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
-/**
- * si_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- /* Process all interrupts */
- return true;
-}
-
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -301,7 +288,6 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
- .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 4cc0dcb1a187..8849b74078d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -507,6 +507,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
return -EINVAL;
}
+ if (adev->asic_type == CHIP_VEGA20)
+ adev->gmc.xgmi.supported = true;
+
if (adev->flags & AMD_IS_APU)
adev->nbio_funcs = &nbio_v7_0_funcs;
else if (adev->asic_type == CHIP_VEGA20)
@@ -613,6 +616,24 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
.need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega10_doorbell_index_init,
+};
+
+static const struct amdgpu_asic_funcs vega20_asic_funcs =
+{
+ .read_disabled_bios = &soc15_read_disabled_bios,
+ .read_bios_from_rom = &soc15_read_bios_from_rom,
+ .read_register = &soc15_read_register,
+ .reset = &soc15_asic_reset,
+ .set_vga_state = &soc15_vga_set_state,
+ .get_xclk = &soc15_get_xclk,
+ .set_uvd_clocks = &soc15_set_uvd_clocks,
+ .set_vce_clocks = &soc15_set_vce_clocks,
+ .get_config_memsize = &soc15_get_config_memsize,
+ .flush_hdp = &soc15_flush_hdp,
+ .invalidate_hdp = &soc15_invalidate_hdp,
+ .need_full_reset = &soc15_need_full_reset,
+ .init_doorbell_index = &vega20_doorbell_index_init,
};
static int soc15_common_early_init(void *handle)
@@ -632,11 +653,11 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
- adev->asic_funcs = &soc15_asic_funcs;
adev->external_rev_id = 0xFF;
switch (adev->asic_type) {
case CHIP_VEGA10:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
@@ -660,6 +681,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = 0x1;
break;
case CHIP_VEGA12:
+ adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -682,6 +704,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_VEGA20:
+ adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -704,6 +727,7 @@ static int soc15_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x28;
break;
case CHIP_RAVEN:
+ adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
adev->external_rev_id = adev->rev_id + 0x81;
else if (adev->pdev->device == 0x15d8)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index f8ad7804dc40..a66c8bfbbaa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -58,4 +58,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
+void vega10_doorbell_index_init(struct amdgpu_device *adev);
+void vega20_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
new file mode 100644
index 000000000000..ac2c27b7630c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _TA_XGMI_IF_H
+#define _TA_XGMI_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+enum ta_command_xgmi {
+ TA_COMMAND_XGMI__INITIALIZE = 0x00,
+ TA_COMMAND_XGMI__GET_NODE_ID = 0x01,
+ TA_COMMAND_XGMI__GET_HIVE_ID = 0x02,
+ TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO = 0x03,
+ TA_COMMAND_XGMI__SET_TOPOLOGY_INFO = 0x04
+};
+
+/* XGMI related enumerations */
+/**********************************************************/;
+enum ta_xgmi_connected_nodes {
+ TA_XGMI__MAX_CONNECTED_NODES = 64
+};
+
+enum ta_xgmi_status {
+ TA_XGMI_STATUS__SUCCESS = 0x00,
+ TA_XGMI_STATUS__GENERIC_FAILURE = 0x01,
+ TA_XGMI_STATUS__NULL_POINTER = 0x02,
+ TA_XGMI_STATUS__INVALID_PARAMETER = 0x03,
+ TA_XGMI_STATUS__NOT_INITIALIZED = 0x04,
+ TA_XGMI_STATUS__INVALID_NODE_NUM = 0x05,
+ TA_XGMI_STATUS__INVALID_NODE_ID = 0x06,
+ TA_XGMI_STATUS__INVALID_TOPOLOGY = 0x07,
+ TA_XGMI_STATUS__FAILED_ID_GEN = 0x08,
+ TA_XGMI_STATUS__FAILED_TOPOLOGY_INIT = 0x09,
+ TA_XGMI_STATUS__SET_SHARING_ERROR = 0x0A
+};
+
+enum ta_xgmi_assigned_sdma_engine {
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__NOT_ASSIGNED = -1,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA0 = 0,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA1 = 1,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA2 = 2,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA3 = 3,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA4 = 4,
+ TA_XGMI_ASSIGNED_SDMA_ENGINE__SDMA5 = 5
+};
+
+/* input/output structures for XGMI commands */
+/**********************************************************/
+struct ta_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+};
+
+struct ta_xgmi_cmd_initialize_output {
+ uint32_t status;
+};
+
+struct ta_xgmi_cmd_get_node_id_output {
+ uint64_t node_id;
+};
+
+struct ta_xgmi_cmd_get_hive_id_output {
+ uint64_t hive_id;
+};
+
+struct ta_xgmi_cmd_get_topology_info_input {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_get_topology_info_output {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+struct ta_xgmi_cmd_set_topology_info_input {
+ uint32_t num_nodes;
+ struct ta_xgmi_node_info nodes[TA_XGMI__MAX_CONNECTED_NODES];
+};
+
+/**********************************************************/
+/* Common input structure for XGMI callbacks */
+union ta_xgmi_cmd_input {
+ struct ta_xgmi_cmd_get_topology_info_input get_topology_info;
+ struct ta_xgmi_cmd_set_topology_info_input set_topology_info;
+};
+
+/* Common output structure for XGMI callbacks */
+union ta_xgmi_cmd_output {
+ struct ta_xgmi_cmd_initialize_output initialize;
+ struct ta_xgmi_cmd_get_node_id_output get_node_id;
+ struct ta_xgmi_cmd_get_hive_id_output get_hive_id;
+ struct ta_xgmi_cmd_get_topology_info_output get_topology_info;
+};
+/**********************************************************/
+
+struct ta_xgmi_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_xgmi_status xgmi_status;
+ uint32_t reserved;
+ union ta_xgmi_cmd_input xgmi_in_message;
+ union ta_xgmi_cmd_output xgmi_out_message;
+};
+
+#endif //_TA_XGMI_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 3abffd06b5c7..15da06ddeb75 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,34 +219,6 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * tonga_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u16 pasid;
-
- switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
- case 146:
- case 147:
- pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
- if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
- return true;
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- adev->irq.ih.rptr += 16;
- return false;
-}
-
-/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -322,7 +294,7 @@ static int tonga_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL_IH;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
r = amdgpu_irq_init(adev);
@@ -506,7 +478,6 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
- .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 1fc17bf39fed..d69c8f6daaf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -116,16 +116,16 @@ static int uvd_v4_2_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
@@ -162,12 +162,9 @@ static int uvd_v4_2_hw_init(void *handle)
uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -218,7 +215,7 @@ static int uvd_v4_2_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -484,11 +481,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -499,14 +494,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -519,8 +509,9 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index fde6ad5ac9ab..ee8cd06ddc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -113,16 +113,16 @@ static int uvd_v5_0_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
return r;
@@ -158,12 +158,9 @@ static int uvd_v5_0_hw_init(void *handle)
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -215,7 +212,7 @@ static int uvd_v5_0_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -500,11 +497,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -515,14 +509,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -535,8 +524,9 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 7a5b40275e8e..d4f4a66f8324 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -175,11 +175,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
int r;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -189,14 +186,8 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -336,31 +327,24 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
}
+
static int uvd_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -416,16 +400,16 @@ static int uvd_v6_0_sw_init(void *handle)
DRM_INFO("UVD ENC is disabled\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
if (r)
return r;
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
@@ -476,12 +460,9 @@ static int uvd_v6_0_hw_init(void *handle)
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -513,12 +494,9 @@ static int uvd_v6_0_hw_init(void *handle)
if (uvd_v6_0_enc_support(adev)) {
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
}
@@ -548,7 +526,7 @@ static int uvd_v6_0_hw_fini(void *handle)
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -969,11 +947,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
@@ -984,14 +960,9 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -1004,9 +975,12 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer
*/
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
amdgpu_ring_write(ring, vmid);
@@ -1027,8 +1001,12 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 58b39afcfb86..089645e78f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -183,11 +183,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n",
- ring->me, ring->idx, r);
+ if (r)
return r;
- }
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);
@@ -197,14 +194,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
- ring->me, ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: (%d)ring %d test failed\n",
- ring->me, ring->idx);
+ if (i >= adev->usec_timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -343,27 +334,19 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
long r;
r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r);
+ if (r)
goto error;
- }
r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
- if (r) {
- DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me);
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r);
- } else {
- DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
@@ -447,10 +430,6 @@ static int uvd_v7_0_sw_init(void *handle)
DRM_INFO("PSP loading UVD firmware\n");
}
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
if (adev->uvd.harvest_config & (1 << j))
continue;
@@ -472,9 +451,9 @@ static int uvd_v7_0_sw_init(void *handle)
* sriov, so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
if (r)
@@ -482,6 +461,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
r = amdgpu_uvd_entity_init(adev);
if (r)
return r;
@@ -540,12 +523,9 @@ static int uvd_v7_0_hw_init(void *handle)
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
r = amdgpu_ring_alloc(ring, 10);
if (r) {
@@ -582,12 +562,9 @@ static int uvd_v7_0_hw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst[j].ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
}
done:
@@ -619,7 +596,7 @@ static int uvd_v7_0_hw_fini(void *handle)
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
if (adev->uvd.harvest_config & (1 << i))
continue;
- adev->uvd.inst[i].ring.ready = false;
+ adev->uvd.inst[i].ring.sched.ready = false;
}
return 0;
@@ -1235,11 +1212,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
- if (r) {
- DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n",
- ring->me, ring->idx, r);
+ if (r)
return r;
- }
+
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0));
amdgpu_ring_write(ring, 0xDEADBEEF);
@@ -1251,14 +1226,9 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
DRM_UDELAY(1);
}
- if (i < adev->usec_timeout) {
- DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n",
- ring->me, ring->idx, i);
- } else {
- DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n",
- ring->me, ring->idx, tmp);
- r = -EINVAL;
- }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
return r;
}
@@ -1300,10 +1270,12 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
* Write ring commands to execute the indirect buffer
*/
static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ bool ctx_switch)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1329,8 +1301,12 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
* Write enc ring commands to execute the indirect buffer
*/
static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index ea28828360d3..bed78a778e3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -463,15 +463,11 @@ static int vce_v2_0_hw_init(void *handle)
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
vce_v2_0_enable_mgcg(adev, true, false);
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6dbd39730070..2668effadd27 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -37,7 +37,6 @@
#include "gca/gfx_8_0_d.h"
#include "smu/smu_7_1_2_d.h"
#include "smu/smu_7_1_2_sh_mask.h"
-#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -474,15 +473,10 @@ static int vce_v3_0_hw_init(void *handle)
amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
-
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -838,8 +832,12 @@ out:
}
static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 1c9471890bf7..9fb34b7d8e03 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -466,9 +466,9 @@ static int vce_v4_0_sw_init(void *handle)
* so set unused location for other unused rings.
*/
if (i == 0)
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring0_1 * 2;
else
- ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1;
+ ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
if (r)
@@ -519,15 +519,10 @@ static int vce_v4_0_hw_init(void *handle)
if (r)
return r;
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
-
for (i = 0; i < adev->vce.num_rings; i++) {
- r = amdgpu_ring_test_ring(&adev->vce.ring[i]);
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
if (r)
return r;
- else
- adev->vce.ring[i].ready = true;
}
DRM_INFO("VCE initialized successfully.\n");
@@ -549,7 +544,7 @@ static int vce_v4_0_hw_fini(void *handle)
}
for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].ready = false;
+ adev->vce.ring[i].sched.ready = false;
return 0;
}
@@ -951,9 +946,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
}
#endif
-static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCE_CMD_IB_VM);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index eae90922fdbe..4f8352044563 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
/**
* vcn_v1_0_early_init - set function pointers
@@ -176,30 +177,22 @@ static int vcn_v1_0_hw_init(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
int i, r;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.ring_enc[i];
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ ring->sched.ready = true;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
}
ring = &adev->vcn.ring_jpeg;
- ring->ready = true;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
goto done;
- }
done:
if (!r)
@@ -222,9 +215,9 @@ static int vcn_v1_0_hw_fini(void *handle)
struct amdgpu_ring *ring = &adev->vcn.ring_dec;
if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
- vcn_v1_0_stop(adev);
+ vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->ready = false;
+ ring->sched.ready = false;
return 0;
}
@@ -1366,10 +1359,12 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
* Write ring commands to execute the indirect buffer
*/
static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0));
@@ -1524,8 +1519,12 @@ static void vcn_v1_0_enc_ring_insert_end(struct amdgpu_ring *ring)
* Write enc ring commands to execute the indirect buffer
*/
static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib, unsigned int vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+
amdgpu_ring_write(ring, VCN_ENC_CMD_IB);
amdgpu_ring_write(ring, vmid);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -1725,10 +1724,12 @@ static void vcn_v1_0_jpeg_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u6
* Write ring commands to execute the indirect buffer.
*/
static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring,
- struct amdgpu_ib *ib,
- unsigned vmid, bool ctx_switch)
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ bool ctx_switch)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
amdgpu_ring_write(ring,
PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_JRBC_IB_VMID), 0, 0, PACKETJ_TYPE0));
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index a0fda6f9252a..2c250b01a903 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,90 +220,6 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
}
/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
-{
- u32 ring_index = adev->irq.ih.rptr >> 2;
- u32 dw0, dw3, dw4, dw5;
- u16 pasid;
- u64 addr, key;
- struct amdgpu_vm *vm;
- int r;
-
- dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
- dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
- dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
-
- /* Filter retry page faults, let only the first one pass. If
- * there are too many outstanding faults, ignore them until
- * some faults get cleared.
- */
- switch (dw0 & 0xff) {
- case SOC15_IH_CLIENTID_VMC:
- case SOC15_IH_CLIENTID_UTCL2:
- break;
- default:
- /* Not a VM fault */
- return true;
- }
-
- pasid = dw3 & 0xffff;
- /* No PASID, can't identify faulting process */
- if (!pasid)
- return true;
-
- /* Not a retry fault, check fault credit */
- if (!(dw5 & 0x80)) {
- if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
- goto ignore_iv;
- return true;
- }
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
- addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
- key = AMDGPU_VM_FAULT(pasid, addr);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- } else {
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- goto ignore_iv;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-
-ignore_iv:
- adev->irq.ih.rptr += 32;
- return false;
-}
-
-/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -385,7 +301,7 @@ static int vega10_ih_sw_init(void *handle)
return r;
adev->irq.ih.use_doorbell = true;
- adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
r = amdgpu_irq_init(adev);
@@ -487,7 +403,6 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
- .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
index c5c9b2bc190d..422674bb3cdf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c
@@ -56,4 +56,32 @@ int vega10_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega10_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL64_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL64_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL64_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL64_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL64_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL64_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL64_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL64_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL64_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL64_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL64_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL64_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL64_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL64_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL64_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_DOORBELL64_VCE_RING6_7;
+ /* In unit of dword doorbell */
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL64_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index d13fc4fcb517..edce413fda9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -54,4 +54,37 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
return 0;
}
+void vega20_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_VEGA20_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_VEGA20_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_VEGA20_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_VEGA20_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_VEGA20_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_VEGA20_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_VEGA20_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_VEGA20_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_VEGA20_DOORBELL_MEC_RING7;
+ adev->doorbell_index.userqueue_start = AMDGPU_VEGA20_DOORBELL_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_VEGA20_DOORBELL_USERQUEUE_END;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_VEGA20_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.sdma_engine2 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2;
+ adev->doorbell_index.sdma_engine3 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3;
+ adev->doorbell_index.sdma_engine4 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4;
+ adev->doorbell_index.sdma_engine5 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5;
+ adev->doorbell_index.sdma_engine6 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6;
+ adev->doorbell_index.sdma_engine7 = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7;
+ adev->doorbell_index.ih = AMDGPU_VEGA20_DOORBELL_IH;
+ adev->doorbell_index.uvd_vce.uvd_ring0_1 = AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1;
+ adev->doorbell_index.uvd_vce.uvd_ring2_3 = AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3;
+ adev->doorbell_index.uvd_vce.uvd_ring4_5 = AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5;
+ adev->doorbell_index.uvd_vce.uvd_ring6_7 = AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7;
+ adev->doorbell_index.uvd_vce.vce_ring0_1 = AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1;
+ adev->doorbell_index.uvd_vce.vce_ring2_3 = AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3;
+ adev->doorbell_index.uvd_vce.vce_ring4_5 = AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5;
+ adev->doorbell_index.uvd_vce.vce_ring6_7 = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7;
+ adev->doorbell_index.max_assignment = AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT << 1;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 07880d35e9de..ff2906c215fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -955,6 +955,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
.need_full_reset = &vi_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
};
#define CZ_REV_BRISTOL(rev) \
@@ -1712,3 +1713,21 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return 0;
}
+
+void legacy_doorbell_index_init(struct amdgpu_device *adev)
+{
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_KIQ;
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_MEC_RING0;
+ adev->doorbell_index.mec_ring1 = AMDGPU_DOORBELL_MEC_RING1;
+ adev->doorbell_index.mec_ring2 = AMDGPU_DOORBELL_MEC_RING2;
+ adev->doorbell_index.mec_ring3 = AMDGPU_DOORBELL_MEC_RING3;
+ adev->doorbell_index.mec_ring4 = AMDGPU_DOORBELL_MEC_RING4;
+ adev->doorbell_index.mec_ring5 = AMDGPU_DOORBELL_MEC_RING5;
+ adev->doorbell_index.mec_ring6 = AMDGPU_DOORBELL_MEC_RING6;
+ adev->doorbell_index.mec_ring7 = AMDGPU_DOORBELL_MEC_RING7;
+ adev->doorbell_index.gfx_ring0 = AMDGPU_DOORBELL_GFX_RING0;
+ adev->doorbell_index.sdma_engine0 = AMDGPU_DOORBELL_sDMA_ENGINE0;
+ adev->doorbell_index.sdma_engine1 = AMDGPU_DOORBELL_sDMA_ENGINE1;
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_IH;
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_MAX_ASSIGNMENT;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h
index 0429fe332269..8de0772f986c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/vi.h
@@ -30,4 +30,5 @@ void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 5d2475d5392c..177d1e5329a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -23,6 +23,7 @@
#include "kfd_priv.h"
#include "kfd_events.h"
#include "cik_int.h"
+#include "amdgpu_amdkfd.h"
static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -107,7 +108,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_process_vm_fault(dev->dqm, pasid);
memset(&info, 0, sizeof(info));
- dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info);
if (!info.page_addr && !info.status)
return;
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
index 37ce6dd65391..8e2a1663c4db 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -68,6 +68,4 @@
#define GRBM_GFX_INDEX 0x30800
-#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
-
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 14d5b5fa822d..3623538baf6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -33,10 +33,12 @@
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/mman.h>
+#include <linux/dma-buf.h>
#include <asm/processor.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_dbgmgr.h"
+#include "amdgpu_amdkfd.h"
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
@@ -834,8 +836,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
dev = kfd_device_by_id(args->gpu_id);
if (dev)
/* Reading GPU clock counter from KGD */
- args->gpu_clock_counter =
- dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd);
else
/* Node without GPU resource */
args->gpu_clock_counter = 0;
@@ -1042,7 +1043,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
}
mutex_unlock(&p->mutex);
- err = kfd->kfd2kgd->map_gtt_bo_to_kernel(kfd->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
@@ -1240,7 +1241,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev)
if (dev->device_info->needs_iommu_device)
return false;
- dev->kfd2kgd->get_local_mem_info(dev->kgd, &mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info);
if (mem_info.local_mem_size_private == 0 &&
mem_info.local_mem_size_public > 0)
return true;
@@ -1273,6 +1274,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return -EINVAL;
}
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+ return -EINVAL;
+ offset = kfd_get_process_doorbells(dev, p);
+ }
+
mutex_lock(&p->mutex);
pdd = kfd_bind_process_to_device(dev, p);
@@ -1281,7 +1288,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_unlock;
}
- err = dev->kfd2kgd->alloc_memory_of_gpu(
+ err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
dev->kgd, args->va_addr, args->size,
pdd->vm, (struct kgd_mem **) &mem, &offset,
flags);
@@ -1303,7 +1310,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
err_unlock:
mutex_unlock(&p->mutex);
return err;
@@ -1338,7 +1345,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
goto err_unlock;
}
- ret = dev->kfd2kgd->free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
+ (struct kgd_mem *)mem);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
@@ -1418,7 +1426,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
err = PTR_ERR(peer_pdd);
goto get_mem_obj_from_handle_failed;
}
- err = peer->kfd2kgd->map_memory_to_gpu(
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
if (err) {
pr_err("Failed to map to gpu %d/%d\n",
@@ -1430,7 +1438,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
mutex_unlock(&p->mutex);
- err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
@@ -1525,7 +1533,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
err = -ENODEV;
goto get_mem_obj_from_handle_failed;
}
- err = dev->kfd2kgd->unmap_memory_to_gpu(
+ err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
if (err) {
pr_err("Failed to unmap from gpu %d/%d\n",
@@ -1549,6 +1557,115 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_get_dmabuf_info(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_get_dmabuf_info_args *args = data;
+ struct kfd_dev *dev = NULL;
+ struct kgd_dev *dma_buf_kgd;
+ void *metadata_buffer = NULL;
+ uint32_t flags;
+ unsigned int i;
+ int r;
+
+ /* Find a KFD GPU device that supports the get_dmabuf_info query */
+ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++)
+ if (dev)
+ break;
+ if (!dev)
+ return -EINVAL;
+
+ if (args->metadata_ptr) {
+ metadata_buffer = kzalloc(args->metadata_size, GFP_KERNEL);
+ if (!metadata_buffer)
+ return -ENOMEM;
+ }
+
+ /* Get dmabuf info from KGD */
+ r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd,
+ &dma_buf_kgd, &args->size,
+ metadata_buffer, args->metadata_size,
+ &args->metadata_size, &flags);
+ if (r)
+ goto exit;
+
+ /* Reverse-lookup gpu_id from kgd pointer */
+ dev = kfd_device_by_kgd(dma_buf_kgd);
+ if (!dev) {
+ r = -EINVAL;
+ goto exit;
+ }
+ args->gpu_id = dev->id;
+ args->flags = flags;
+
+ /* Copy metadata buffer to user mode */
+ if (metadata_buffer) {
+ r = copy_to_user((void __user *)args->metadata_ptr,
+ metadata_buffer, args->metadata_size);
+ if (r != 0)
+ r = -EFAULT;
+ }
+
+exit:
+ kfree(metadata_buffer);
+
+ return r;
+}
+
+static int kfd_ioctl_import_dmabuf(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ struct kfd_ioctl_import_dmabuf_args *args = data;
+ struct kfd_process_device *pdd;
+ struct dma_buf *dmabuf;
+ struct kfd_dev *dev;
+ int idr_handle;
+ uint64_t size;
+ void *mem;
+ int r;
+
+ dev = kfd_device_by_id(args->gpu_id);
+ if (!dev)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(args->dmabuf_fd);
+ if (!dmabuf)
+ return -EINVAL;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_bind_process_to_device(dev, p);
+ if (IS_ERR(pdd)) {
+ r = PTR_ERR(pdd);
+ goto err_unlock;
+ }
+
+ r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf,
+ args->va_addr, pdd->vm,
+ (struct kgd_mem **)&mem, &size,
+ NULL);
+ if (r)
+ goto err_unlock;
+
+ idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
+ if (idr_handle < 0) {
+ r = -EFAULT;
+ goto err_free;
+ }
+
+ mutex_unlock(&p->mutex);
+
+ args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
+
+ return 0;
+
+err_free:
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+err_unlock:
+ mutex_unlock(&p->mutex);
+ return r;
+}
+
#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, \
.cmd_drv = 0, .name = #ioctl}
@@ -1634,7 +1751,13 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
kfd_ioctl_set_cu_mask, 0),
AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE,
- kfd_ioctl_get_queue_wave_state, 0)
+ kfd_ioctl_get_queue_wave_state, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_DMABUF_INFO,
+ kfd_ioctl_get_dmabuf_info, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
+ kfd_ioctl_import_dmabuf, 0),
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 56412b0e7e1c..c02adbbeef2a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -26,6 +26,7 @@
#include "kfd_priv.h"
#include "kfd_topology.h"
#include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
/* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
* GPU processor ID are expressed with Bit[31]=1.
@@ -132,6 +133,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
#define fiji_cache_info carrizo_cache_info
#define polaris10_cache_info carrizo_cache_info
#define polaris11_cache_info carrizo_cache_info
+#define polaris12_cache_info carrizo_cache_info
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
@@ -646,7 +648,12 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = polaris11_cache_info;
num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
break;
+ case CHIP_POLARIS12:
+ pcache_info = polaris12_cache_info;
+ num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
+ break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
@@ -753,12 +760,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
return -ENODATA;
}
- pcrat_image = kmalloc(crat_table->length, GFP_KERNEL);
+ pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
if (!pcrat_image)
return -ENOMEM;
- memcpy(pcrat_image, crat_table, crat_table->length);
-
*crat_image = pcrat_image;
*size = crat_table->length;
@@ -1161,7 +1166,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
cu->proximity_domain = proximity_domain;
- kdev->kfd2kgd->get_cu_info(kdev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
cu->max_waves_simd = cu_info.max_waves_per_simd;
@@ -1192,7 +1197,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
* report the total FB size (public+private) as a single
* private heap.
*/
- kdev->kfd2kgd->get_local_mem_info(kdev->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index a9f18ea7e354..8be9677c0c07 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -28,6 +28,7 @@
#include "kfd_pm4_headers_vi.h"
#include "cwsr_trap_handler.h"
#include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
#define MQD_SIZE_ALIGNED 768
@@ -204,6 +205,22 @@ static const struct kfd_device_info polaris11_device_info = {
.num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info polaris12_device_info = {
+ .asic_family = CHIP_POLARIS12,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 4,
+ .ih_ring_entry_size = 4 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_cik,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info vega10_device_info = {
.asic_family = CHIP_VEGA10,
.max_pasid_bits = 16,
@@ -236,6 +253,22 @@ static const struct kfd_device_info vega10_vf_device_info = {
.num_sdma_queues_per_engine = 2,
};
+static const struct kfd_device_info vega12_device_info = {
+ .asic_family = CHIP_VEGA12,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info vega20_device_info = {
.asic_family = CHIP_VEGA20,
.max_pasid_bits = 16,
@@ -330,6 +363,14 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x67EB, &polaris11_device_info }, /* Polaris11 */
{ 0x67EF, &polaris11_device_info }, /* Polaris11 */
{ 0x67FF, &polaris11_device_info }, /* Polaris11 */
+ { 0x6980, &polaris12_device_info }, /* Polaris12 */
+ { 0x6981, &polaris12_device_info }, /* Polaris12 */
+ { 0x6985, &polaris12_device_info }, /* Polaris12 */
+ { 0x6986, &polaris12_device_info }, /* Polaris12 */
+ { 0x6987, &polaris12_device_info }, /* Polaris12 */
+ { 0x6995, &polaris12_device_info }, /* Polaris12 */
+ { 0x6997, &polaris12_device_info }, /* Polaris12 */
+ { 0x699F, &polaris12_device_info }, /* Polaris12 */
{ 0x6860, &vega10_device_info }, /* Vega10 */
{ 0x6861, &vega10_device_info }, /* Vega10 */
{ 0x6862, &vega10_device_info }, /* Vega10 */
@@ -337,12 +378,24 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
+ { 0x6869, &vega10_device_info }, /* Vega10 */
+ { 0x686A, &vega10_device_info }, /* Vega10 */
+ { 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
+ { 0x686D, &vega10_device_info }, /* Vega10 */
+ { 0x686E, &vega10_device_info }, /* Vega10 */
+ { 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
+ { 0x69A0, &vega12_device_info }, /* Vega12 */
+ { 0x69A1, &vega12_device_info }, /* Vega12 */
+ { 0x69A2, &vega12_device_info }, /* Vega12 */
+ { 0x69A3, &vega12_device_info }, /* Vega12 */
+ { 0x69AF, &vega12_device_info }, /* Vega12 */
{ 0x66a0, &vega20_device_info }, /* Vega20 */
{ 0x66a1, &vega20_device_info }, /* Vega20 */
{ 0x66a2, &vega20_device_info }, /* Vega20 */
{ 0x66a3, &vega20_device_info }, /* Vega20 */
+ { 0x66a4, &vega20_device_info }, /* Vega20 */
{ 0x66a7, &vega20_device_info }, /* Vega20 */
{ 0x66af, &vega20_device_info } /* Vega20 */
};
@@ -478,7 +531,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
- if (kfd->kfd2kgd->init_gtt_mem_allocation(
+ if (amdgpu_amdkfd_alloc_gtt_mem(
kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
@@ -552,7 +605,7 @@ kfd_topology_add_device_error:
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
- kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@@ -569,7 +622,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
- kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
@@ -681,6 +734,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
bool is_patched = false;
+ unsigned long flags;
if (!kfd->init_complete)
return;
@@ -690,7 +744,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
return;
}
- spin_lock(&kfd->interrupt_lock);
+ spin_lock_irqsave(&kfd->interrupt_lock, flags);
if (kfd->interrupts_active
&& interrupt_is_wanted(kfd, ih_ring_entry,
@@ -699,7 +753,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
- spin_unlock(&kfd->interrupt_lock);
+ spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index a3b933967171..8372556b52eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -33,6 +33,7 @@
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
+#include "amdgpu_amdkfd.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
@@ -219,7 +220,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
if (ret)
return ret;
- return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
+ return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
pmf->release_mem_size / sizeof(uint32_t));
}
@@ -672,7 +673,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
- pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+ pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -743,7 +744,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
- pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+ pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
@@ -793,7 +794,7 @@ static int register_process(struct device_queue_manager *dqm,
pdd = qpd_to_pdd(qpd);
/* Retrieve PD base */
- pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
+ pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
@@ -805,7 +806,7 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
if (dqm->processes_count++ == 0)
- dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
+ amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
dqm_unlock(dqm);
@@ -829,7 +830,7 @@ static int unregister_process(struct device_queue_manager *dqm,
list_del(&cur->list);
kfree(cur);
if (--dqm->processes_count == 0)
- dqm->dev->kfd2kgd->set_compute_idle(
+ amdgpu_amdkfd_set_compute_idle(
dqm->dev->kgd, true);
goto out;
}
@@ -845,15 +846,8 @@ static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
unsigned int vmid)
{
- uint32_t pasid_mapping;
-
- pasid_mapping = (pasid == 0) ? 0 :
- (uint32_t)pasid |
- ATC_VMID_PASID_MAPPING_VALID;
-
return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
- dqm->dev->kgd, pasid_mapping,
- vmid);
+ dqm->dev->kgd, pasid, vmid);
}
static void init_interrupts(struct device_queue_manager *dqm)
@@ -1553,7 +1547,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
u32 *ctl_stack_used_size,
u32 *save_area_used_size)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int r;
dqm_lock(dqm);
@@ -1564,19 +1558,19 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd) {
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr) {
r = -ENOMEM;
goto dqm_unlock;
}
- if (!mqd->get_wave_state) {
+ if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
goto dqm_unlock;
}
- r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size,
- save_area_used_size);
+ r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
+ ctl_stack_used_size, save_area_used_size);
dqm_unlock:
dqm_unlock(dqm);
@@ -1747,10 +1741,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
device_queue_manager_init_vi_tonga(&dqm->asic_ops);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
device_queue_manager_init_v9(&dqm->asic_ops);
@@ -1796,7 +1792,7 @@ static void kfd_process_hw_exception(struct work_struct *work)
{
struct device_queue_manager *dqm = container_of(work,
struct device_queue_manager, hw_exception_work);
- dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+ amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
index fd60a116be37..c3a5dcfe877a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c
@@ -24,7 +24,6 @@
#include "kfd_device_queue_manager.h"
#include "gca/gfx_8_0_enum.h"
#include "gca/gfx_8_0_sh_mask.h"
-#include "gca/gfx_8_0_enum.h"
#include "oss/oss_3_0_sh_mask.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 3d66cec414af..213ea5454d11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -397,9 +397,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kfd_init_apertures_vi(pdd, id);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kfd_init_apertures_v9(pdd, id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f836897bbf58..a85904ad0d5f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -23,7 +23,7 @@
#include "kfd_priv.h"
#include "kfd_events.h"
#include "soc15_int.h"
-
+#include "kfd_device_queue_manager.h"
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
const uint32_t *ih_ring_entry,
@@ -39,20 +39,39 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
vmid > dev->vm_info.last_vmid_kfd)
return 0;
- /* If there is no valid PASID, it's likely a firmware bug */
- pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
- if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt"))
- return 0;
-
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
+ pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
+
+ /* This is a known issue for gfx9. Under non HWS, pasid is not set
+ * in the interrupt payload, so we need to find out the pasid on our
+ * own.
+ */
+ if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ const uint32_t pasid_mask = 0xffff;
- pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n",
- client_id, source_id, pasid);
+ *patched_flag = true;
+ memcpy(patched_ihre, ih_ring_entry,
+ dev->device_info->ih_ring_entry_size);
+
+ pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
+ dev->kgd, vmid);
+
+ /* Patch the pasid field */
+ patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
+ & ~pasid_mask) | pasid);
+ }
+
+ pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
+ client_id, source_id, vmid, pasid);
pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
data[0], data[1], data[2], data[3],
data[4], data[5], data[6], data[7]);
+ /* If there is no valid PASID, it's likely a bug */
+ if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
+ return 0;
+
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
*/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 6c31f7370193..f1596881f20a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -313,6 +313,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
kernel_queue_init_vi(&kq->ops_asic_specific);
break;
@@ -322,6 +323,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
kernel_queue_init_v9(&kq->ops_asic_specific);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index e33019a7a883..aed9b9b82213 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -22,6 +22,7 @@
*/
#include "kfd_mqd_manager.h"
+#include "amdgpu_amdkfd.h"
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
@@ -37,8 +38,10 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
return mqd_manager_init_vi_tonga(type, dev);
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
return mqd_manager_init_v9(type, dev);
@@ -58,7 +61,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
uint32_t cu_per_sh[4] = {0};
int i, se, cu = 0;
- mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f381c1cb27bd..9dbba609450e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -30,6 +30,7 @@
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "amdgpu_amdkfd.h"
static inline struct v9_mqd *get_mqd(void *mqd)
{
@@ -83,7 +84,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
*mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!*mqd_mem_obj)
return -ENOMEM;
- retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
+ retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
ALIGN(q->ctl_stack_size, PAGE_SIZE) +
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&((*mqd_mem_obj)->gtt_mem),
@@ -250,7 +251,7 @@ static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_dev *kfd = mm->dev;
if (mqd_mem_obj->gtt_mem) {
- kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
+ amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
kfree(mqd_mem_obj);
} else {
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c6080ed3b6a7..045a229436a0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -226,9 +226,11 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
pm->pmf = &kfd_vi_pm_funcs;
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
pm->pmf = &kfd_v9_pm_funcs;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 15fff4420e53..33b08ff00b50 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include "kfd_priv.h"
+#include "amdgpu_ids.h"
static unsigned int pasid_bits = 16;
static const struct kfd2kgd_calls *kfd2kgd;
@@ -71,7 +72,7 @@ unsigned int kfd_pasid_alloc(void)
return false;
}
- r = kfd2kgd->alloc_pasid(pasid_bits);
+ r = amdgpu_pasid_alloc(pasid_bits);
return r > 0 ? r : 0;
}
@@ -79,5 +80,5 @@ unsigned int kfd_pasid_alloc(void)
void kfd_pasid_free(unsigned int pasid)
{
if (kfd2kgd)
- kfd2kgd->free_pasid(pasid);
+ amdgpu_pasid_free(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 53ff86d45d91..0689d4ccbbc0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -507,6 +507,7 @@ struct qcm_process_device {
* All the memory management data should be here too
*/
uint64_t gds_context_area;
+ /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
uint64_t page_table_base;
uint32_t sh_mem_config;
uint32_t sh_mem_bases;
@@ -792,6 +793,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
int kfd_numa_node_to_apic_id(int numa_node_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 0039e451d9af..80b36e860a0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
+#include "amdgpu_amdkfd.h"
struct mm_struct;
@@ -100,8 +101,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
{
struct kfd_dev *dev = pdd->dev;
- dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
- dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -119,16 +120,16 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
int handle;
int err;
- err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
+ err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
pdd->vm, &mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
if (err)
goto err_map_mem;
- err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
@@ -147,7 +148,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
}
if (kptr) {
- err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
+ err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
(struct kgd_mem *)mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
@@ -165,7 +166,7 @@ sync_memory_failed:
return err;
err_map_mem:
- kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -296,11 +297,11 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
per_device_list) {
if (!peer_pdd->vm)
continue;
- peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
- pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -323,11 +324,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
- pdd->dev->kfd2kgd->release_process_vm(pdd->dev->kgd, pdd->vm);
+ amdgpu_amdkfd_gpuvm_release_process_vm(
+ pdd->dev->kgd, pdd->vm);
fput(pdd->drm_file);
}
else if (pdd->vm)
- pdd->dev->kfd2kgd->destroy_process_vm(
+ amdgpu_amdkfd_gpuvm_destroy_process_vm(
pdd->dev->kgd, pdd->vm);
list_del(&pdd->per_device_list);
@@ -688,12 +690,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
dev = pdd->dev;
if (drm_file)
- ret = dev->kfd2kgd->acquire_process_vm(
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
dev->kgd, drm_file, p->pasid,
&pdd->vm, &p->kgd_process_info, &p->ef);
else
- ret = dev->kfd2kgd->create_process_vm(
- dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
+ ret = amdgpu_amdkfd_gpuvm_create_process_vm(dev->kgd, p->pasid,
+ &pdd->vm, &p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
@@ -714,7 +716,7 @@ err_init_cwsr:
err_reserve_ib_mem:
kfd_process_device_free_bos(pdd);
if (!drm_file)
- dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
+ amdgpu_amdkfd_gpuvm_destroy_process_vm(dev->kgd, pdd->vm);
pdd->vm = NULL;
return ret;
@@ -972,7 +974,7 @@ static void restore_process_worker(struct work_struct *work)
*/
p->last_restore_timestamp = get_jiffies_64();
- ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
+ ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e3843c5929ed..5f5b2acedbac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -36,6 +36,7 @@
#include "kfd_topology.h"
#include "kfd_device_queue_manager.h"
#include "kfd_iommu.h"
+#include "amdgpu_amdkfd.h"
/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
@@ -100,7 +101,25 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
- if (top_dev->gpu->pdev == pdev) {
+ if (top_dev->gpu && top_dev->gpu->pdev == pdev) {
+ device = top_dev->gpu;
+ break;
+ }
+
+ up_read(&topology_lock);
+
+ return device;
+}
+
+struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd)
+{
+ struct kfd_topology_device *top_dev;
+ struct kfd_dev *device = NULL;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list)
+ if (top_dev->gpu && top_dev->gpu->kgd == kgd) {
device = top_dev->gpu;
break;
}
@@ -1052,7 +1071,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
if (!gpu)
return 0;
- gpu->kfd2kgd->get_local_mem_info(gpu->kgd, &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info);
local_mem_size = local_mem_info.local_mem_size_private +
local_mem_info.local_mem_size_public;
@@ -1118,8 +1137,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev)
* for APUs - If CRAT from ACPI reports more than one bank, then
* all the banks will report the same mem_clk_max information
*/
- dev->gpu->kfd2kgd->get_local_mem_info(dev->gpu->kgd,
- &local_mem_info);
+ amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info);
list_for_each_entry(mem, &dev->mem_props, list)
mem->mem_clk_max = local_mem_info.mem_clk_max;
@@ -1240,7 +1258,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
* needed for the topology
*/
- dev->gpu->kfd2kgd->get_cu_info(dev->gpu->kgd, &cu_info);
+ amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
@@ -1249,7 +1267,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
gpu->pdev->devfn);
dev->node_props.max_engine_clk_fcompute =
- dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(dev->gpu->kgd);
+ amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
cpufreq_quick_get_max(0) / 1000;
dev->node_props.drm_render_minor =
@@ -1272,12 +1290,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_FIJI:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
pr_debug("Adding doorbell packet type capability\n");
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
break;
case CHIP_VEGA10:
+ case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index c97dc9613325..cfde1568c79a 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -32,11 +32,12 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power
#TODO: remove when Timing Sync feature is complete
subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0
-DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet
+DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ca925200fe09..d01315965af0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -23,6 +23,9 @@
*
*/
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
#include "dm_services_types.h"
#include "dc.h"
#include "dc/inc/core_types.h"
@@ -38,7 +41,6 @@
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
#include "dm_helpers.h"
-#include "dm_services_types.h"
#include "amdgpu_dm_mst_types.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
@@ -55,6 +57,7 @@
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
@@ -72,10 +75,22 @@
#endif
#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liason, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
@@ -95,7 +110,7 @@ static void
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct amdgpu_plane *aplane,
+ struct drm_plane *plane,
unsigned long possible_crtcs);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
@@ -119,6 +134,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
+static void handle_cursor_update(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state);
@@ -379,11 +396,6 @@ static void amdgpu_dm_fbc_init(struct drm_connector *connector)
}
-/*
- * Init display KMS
- *
- * Returns 0 on success
- */
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -393,6 +405,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
+ mutex_init(&adev->dm.dc_lock);
+
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
@@ -507,6 +521,9 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
+
+ mutex_destroy(&adev->dm.dc_lock);
+
return;
}
@@ -638,6 +655,26 @@ static int dm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ bool ret;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ ret = dmcu_load_iram(dmcu, params);
+
+ if (!ret)
+ return -EINVAL;
+
return detect_mst_link_for_all_connectors(adev->ddev);
}
@@ -663,6 +700,26 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
static int dm_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -673,6 +730,14 @@ static int dm_hw_init(void *handle)
return 0;
}
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdpgu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
static int dm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -898,6 +963,16 @@ static int dm_resume(void *handle)
return ret;
}
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
static const struct amd_ip_funcs amdgpu_dm_funcs = {
.name = "dm",
.early_init = dm_early_init,
@@ -926,53 +1001,17 @@ const struct amdgpu_ip_block_version dm_ip_block =
};
-static struct drm_atomic_state *
-dm_atomic_state_alloc(struct drm_device *dev)
-{
- struct dm_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
- if (!state)
- return NULL;
-
- if (drm_atomic_state_init(dev, &state->base) < 0)
- goto fail;
-
- return &state->base;
-
-fail:
- kfree(state);
- return NULL;
-}
-
-static void
-dm_atomic_state_clear(struct drm_atomic_state *state)
-{
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
-
- if (dm_state->context) {
- dc_release_state(dm_state->context);
- dm_state->context = NULL;
- }
-
- drm_atomic_state_default_clear(state);
-}
-
-static void
-dm_atomic_state_alloc_free(struct drm_atomic_state *state)
-{
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
- drm_atomic_state_default_release(state);
- kfree(dm_state);
-}
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
- .atomic_state_alloc = dm_atomic_state_alloc,
- .atomic_state_clear = dm_atomic_state_clear,
- .atomic_state_free = dm_atomic_state_alloc_free
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -1494,8 +1533,117 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
}
#endif
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+static int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+ int ret;
+
+ if (*dm_state)
+ return 0;
+
+ ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+struct dm_atomic_state *
+dm_atomic_get_old_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *old_obj_state;
+ int i;
+
+ for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(old_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ new_state->context = dc_create_state();
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ old_state = to_dm_atomic_state(obj->state);
+ if (old_state && old_state->context)
+ dc_resource_state_copy_construct(old_state->context,
+ new_state->context);
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_release_state(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
{
+ struct dm_atomic_state *state;
int r;
adev->mode_info.mode_config_initialized = true;
@@ -1513,6 +1661,24 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
+ drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_create_state();
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
+
+ drm_atomic_private_obj_init(&adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
@@ -1520,15 +1686,63 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
return 0;
}
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ if (dm->backlight_caps.caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps.min_input_signal = caps.min_input_signal;
+ dm->backlight_caps.max_input_signal = caps.max_input_signal;
+ dm->backlight_caps.caps_valid = true;
+ } else {
+ dm->backlight_caps.min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps.max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
+ struct amdgpu_dm_backlight_caps caps;
+ uint32_t brightness = bd->props.brightness;
+
+ amdgpu_dm_update_backlight_caps(dm);
+ caps = dm->backlight_caps;
+ /*
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ *
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
+ */
+ brightness =
+ brightness
+ * 0x101
+ * (caps.max_input_signal - caps.min_input_signal)
+ / AMDGPU_MAX_BL_LEVEL
+ + caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
- bd->props.brightness, 0, 0))
+ brightness, 0, 0))
return 0;
else
return 1;
@@ -1555,6 +1769,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
char bl_name[16];
struct backlight_properties props = { 0 };
+ amdgpu_dm_update_backlight_caps(dm);
+
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
@@ -1580,18 +1796,18 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
struct amdgpu_mode_info *mode_info,
int plane_id)
{
- struct amdgpu_plane *plane;
+ struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
- plane = kzalloc(sizeof(struct amdgpu_plane), GFP_KERNEL);
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
mode_info->planes[plane_id] = plane;
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
- plane->base.type = mode_info->plane_type[plane_id];
+ plane->type = mode_info->plane_type[plane_id];
/*
* HACK: IGT tests expect that each plane can only have
@@ -1682,7 +1898,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
- if (amdgpu_dm_crtc_init(dm, &mode_info->planes[i]->base, i)) {
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
DRM_ERROR("KMS: Failed to initialize crtc\n");
goto fail;
}
@@ -1786,6 +2002,7 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_mode_config_cleanup(dm->ddev);
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
@@ -1805,73 +2022,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
- struct drm_file *filp)
-{
- struct drm_atomic_state *state;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- int ret = 0;
- uint8_t i;
- bool enable = false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out;
- }
- state->acquire_ctx = &ctx;
-
-retry:
- drm_for_each_crtc(crtc, dev) {
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto fail;
-
- /* TODO rework amdgpu_dm_commit_planes so we don't need this */
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto fail;
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct drm_crtc_state *new_crtc_state;
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dm_crtc_state *dm_new_crtc_state;
-
- if (!acrtc) {
- ASSERT(0);
- continue;
- }
-
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-
- dm_new_crtc_state->freesync_enabled = enable;
- }
-
- ret = drm_atomic_commit(state);
-
-fail:
- if (ret == -EDEADLK) {
- drm_atomic_state_clear(state);
- drm_modeset_backoff(&ctx);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
-out:
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
- return ret;
-}
-
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
@@ -1884,8 +2034,6 @@ static const struct amdgpu_display_funcs dm_display_funcs = {
dm_crtc_get_scanoutpos,/* called unconditionally */
.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
.add_connector = NULL, /* VBIOS parsing. DAL does it. */
- .notify_freesync = amdgpu_notify_freesync,
-
};
#if defined(CONFIG_DEBUG_KERNEL_DC)
@@ -2486,7 +2634,8 @@ static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_
static void
fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_display_mode *mode_in,
- const struct drm_connector *connector)
+ const struct drm_connector *connector,
+ const struct dc_stream_state *old_stream)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -2512,7 +2661,18 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
connector);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
- timing_out->vic = drm_match_cea_mode(mode_in);
+
+ if(old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
timing_out->h_addressable = mode_in->crtc_hdisplay;
timing_out->h_total = mode_in->crtc_htotal;
@@ -2528,10 +2688,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
timing_out->pix_clk_khz = mode_in->crtc_clock;
timing_out->aspect_ratio = get_aspect_ratio(mode_in);
- if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
- timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
- if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
- timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
stream->output_color_space = get_output_color_space(timing_out);
@@ -2554,9 +2710,9 @@ static void fill_audio_info(struct audio_info *audio_info,
cea_revision = drm_connector->display_info.cea_rev;
- strncpy(audio_info->display_name,
+ strscpy(audio_info->display_name,
edid_caps->display_name,
- AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
if (cea_revision >= 3) {
audio_info->mode_count = edid_caps->audio_mode_count;
@@ -2694,13 +2850,18 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
- const struct dm_connector_state *dm_state)
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false;
+ bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+ int mode_refresh;
+ int preferred_refresh = 0;
+
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
@@ -2739,6 +2900,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_display_mode,
head);
+ mode_refresh = drm_mode_vrefresh(&mode);
+
if (preferred_mode == NULL) {
/*
* This may not be an error, the use case is when we have no
@@ -2751,13 +2914,23 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode,
dm_state ? (dm_state->scaling != RMX_OFF) : false);
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
}
if (!dm_state)
drm_mode_set_crtcinfo(&mode, 0);
- fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base);
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base, NULL);
+ else
+ fill_stream_properties_from_drm_display_mode(stream,
+ &mode, &aconnector->base, old_stream);
+
update_stream_scaling_settings(&mode, dm_state, stream);
fill_audio_info(
@@ -2769,6 +2942,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
+
finish:
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
@@ -2837,7 +3011,10 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->adjust = cur->adjust;
state->vrr_infopacket = cur->vrr_infopacket;
- state->freesync_enabled = cur->freesync_enabled;
+ state->abm_level = cur->abm_level;
+ state->vrr_supported = cur->vrr_supported;
+ state->freesync_config = cur->freesync_config;
+ state->crc_enabled = cur->crc_enabled;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
@@ -2953,6 +3130,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.max_bpc_property) {
dm_new_state->max_bpc = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ dm_new_state->abm_level = val;
+ ret = 0;
}
return ret;
@@ -2998,7 +3178,11 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.max_bpc_property) {
*val = dm_state->max_bpc;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ *val = dm_state->abm_level;
+ ret = 0;
}
+
return ret;
}
@@ -3042,6 +3226,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
state->underscan_enable = false;
state->underscan_hborder = 0;
state->underscan_vborder = 0;
+ state->max_bpc = 8;
__drm_atomic_helper_connector_reset(connector, &state->base);
}
@@ -3062,7 +3247,12 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
new_state->freesync_capable = state->freesync_capable;
- new_state->freesync_enable = state->freesync_enable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->max_bpc = state->max_bpc;
return &new_state->base;
}
@@ -3164,7 +3354,7 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_stream_for_sink(aconnector, mode, NULL);
+ stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
if (stream == NULL) {
DRM_ERROR("Failed to create stream for sink!\n");
goto fail;
@@ -3198,7 +3388,6 @@ amdgpu_dm_connector_helper_funcs = {
*/
.get_modes = get_modes,
.mode_valid = amdgpu_dm_connector_mode_valid,
- .best_encoder = drm_atomic_helper_best_encoder
};
static void dm_crtc_helper_disable(struct drm_crtc *crtc)
@@ -3436,10 +3625,43 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+static int dm_plane_atomic_async_check(struct drm_plane *plane,
+ struct drm_plane_state *new_plane_state)
+{
+ /* Only support async updates on cursor planes. */
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void dm_plane_atomic_async_update(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_plane_state *old_state =
+ drm_atomic_get_old_plane_state(new_state->state, plane);
+
+ if (plane->state->fb != new_state->fb)
+ drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_w = new_state->src_w;
+ plane->state->src_h = new_state->src_h;
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->crtc_h = new_state->crtc_h;
+
+ handle_cursor_update(plane, old_state);
+}
+
static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
.prepare_fb = dm_plane_helper_prepare_fb,
.cleanup_fb = dm_plane_helper_cleanup_fb,
.atomic_check = dm_plane_atomic_check,
+ .atomic_async_check = dm_plane_atomic_async_check,
+ .atomic_async_update = dm_plane_atomic_async_update
};
/*
@@ -3471,49 +3693,49 @@ static const u32 cursor_formats[] = {
};
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct amdgpu_plane *aplane,
+ struct drm_plane *plane,
unsigned long possible_crtcs)
{
int res = -EPERM;
- switch (aplane->base.type) {
+ switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
rgb_formats,
ARRAY_SIZE(rgb_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
case DRM_PLANE_TYPE_OVERLAY:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
yuv_formats,
ARRAY_SIZE(yuv_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
case DRM_PLANE_TYPE_CURSOR:
res = drm_universal_plane_init(
dm->adev->ddev,
- &aplane->base,
+ plane,
possible_crtcs,
&dm_plane_funcs,
cursor_formats,
ARRAY_SIZE(cursor_formats),
- NULL, aplane->base.type, NULL);
+ NULL, plane->type, NULL);
break;
}
- drm_plane_helper_add(&aplane->base, &dm_plane_helper_funcs);
+ drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
- if (aplane->base.funcs->reset)
- aplane->base.funcs->reset(&aplane->base);
+ if (plane->funcs->reset)
+ plane->funcs->reset(plane);
return res;
@@ -3524,7 +3746,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
uint32_t crtc_index)
{
struct amdgpu_crtc *acrtc = NULL;
- struct amdgpu_plane *cursor_plane;
+ struct drm_plane *cursor_plane;
int res = -ENOMEM;
@@ -3532,7 +3754,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
if (!cursor_plane)
goto fail;
- cursor_plane->base.type = DRM_PLANE_TYPE_CURSOR;
+ cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
@@ -3543,7 +3765,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->ddev,
&acrtc->base,
plane,
- &cursor_plane->base,
+ cursor_plane,
&amdgpu_dm_crtc_funcs, NULL);
if (res)
@@ -3601,14 +3823,17 @@ static int to_drm_connector_type(enum signal_type st)
}
}
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
{
- const struct drm_connector_helper_funcs *helper =
- connector->helper_private;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- encoder = helper->best_encoder(connector);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
if (encoder == NULL)
return;
@@ -3650,7 +3875,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
mode->hdisplay = hdisplay;
mode->vdisplay = vdisplay;
mode->type &= ~DRM_MODE_TYPE_PREFERRED;
- strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
return mode;
@@ -3735,14 +3960,12 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
- const struct drm_connector_helper_funcs *helper =
- connector->helper_private;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct drm_encoder *encoder;
struct edid *edid = amdgpu_dm_connector->edid;
- encoder = helper->best_encoder(connector);
+ encoder = amdgpu_dm_connector_to_encoder(connector);
if (!edid || !drm_edid_is_valid(edid)) {
amdgpu_dm_connector->num_modes =
@@ -3781,12 +4004,12 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_HDMIA:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.ycbcr420_supported ? true : false;
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
aconnector->base.ycbcr_420_allowed =
- link->link_enc->features.ycbcr420_supported ? true : false;
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
@@ -3812,6 +4035,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.max_bpc_property,
0);
+ if (connector_type == DRM_MODE_CONNECTOR_eDP &&
+ dc_is_dmcu_initialized(adev->dm.dc)) {
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.abm_level_property, 0);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ drm_connector_attach_vrr_capable_property(
+ &aconnector->base);
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -4116,6 +4350,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state)
{
+ struct amdgpu_device *adev = plane->dev->dev_private;
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
@@ -4140,9 +4375,12 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!position.enable) {
/* turn off cursor */
- if (crtc_state && crtc_state->stream)
+ if (crtc_state && crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
dc_stream_set_cursor_position(crtc_state->stream,
&position);
+ mutex_unlock(&adev->dm.dc_lock);
+ }
return;
}
@@ -4160,6 +4398,7 @@ static void handle_cursor_update(struct drm_plane *plane,
attributes.pitch = attributes.width;
if (crtc_state->stream) {
+ mutex_lock(&adev->dm.dc_lock);
if (!dc_stream_set_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
@@ -4167,6 +4406,7 @@ static void handle_cursor_update(struct drm_plane *plane,
if (!dc_stream_set_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
+ mutex_unlock(&adev->dm.dc_lock);
}
}
@@ -4188,6 +4428,91 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
acrtc->crtc_id);
}
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i])
+ return &state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream)
+{
+ struct mod_vrr_params vrr = {0};
+ struct dc_info_packet vrr_infopacket = {0};
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr);
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr,
+ PACKET_TYPE_VRR,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket);
+
+ new_crtc_state->freesync_timing_changed =
+ (memcmp(&new_crtc_state->adjust,
+ &vrr.adjust,
+ sizeof(vrr.adjust)) != 0);
+
+ new_crtc_state->freesync_vrr_info_changed =
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ new_crtc_state->adjust = vrr.adjust;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->adjust = new_crtc_state->adjust;
+ new_stream->vrr_infopacket = vrr_infopacket;
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr.state);
+
+ if (new_crtc_state->freesync_timing_changed)
+ DRM_DEBUG_KMS("VRR timing update: crtc=%u min=%u max=%u\n",
+ new_crtc_state->base.crtc->base.id,
+ vrr.adjust.v_total_min,
+ vrr.adjust.v_total_max);
+}
+
/*
* Executes flip
*
@@ -4209,6 +4534,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
struct dc_flip_addrs addr = { {0} };
/* TODO eliminate or rename surface_update */
struct dc_surface_update surface_updates[1] = { {0} };
+ struct dc_stream_update stream_update = {0};
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_status *stream_status;
@@ -4281,13 +4607,30 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
}
surface_updates->flip_addr = &addr;
+ if (acrtc_state->stream) {
+ update_freesync_state_on_stream(
+ &adev->dm,
+ acrtc_state,
+ acrtc_state->stream);
+
+ if (acrtc_state->freesync_timing_changed)
+ stream_update.adjust =
+ &acrtc_state->stream->adjust;
+
+ if (acrtc_state->freesync_vrr_info_changed)
+ stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+
+ mutex_lock(&adev->dm.dc_lock);
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
acrtc_state->stream,
- NULL,
+ &stream_update,
&surface_updates->surface,
state);
+ mutex_unlock(&adev->dm.dc_lock);
DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x \n",
__func__,
@@ -4302,6 +4645,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
* with a dc_plane_state and follow the atomic model a bit more closely here.
*/
static bool commit_planes_to_stream(
+ struct amdgpu_display_manager *dm,
struct dc *dc,
struct dc_plane_state **plane_states,
uint8_t new_plane_count,
@@ -4318,6 +4662,7 @@ static bool commit_planes_to_stream(
struct dc_stream_state *dc_stream = dm_new_crtc_state->stream;
struct dc_stream_update *stream_update =
kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL);
+ unsigned int abm_level;
if (!stream_update) {
BREAK_TO_DEBUGGER();
@@ -4345,9 +4690,9 @@ static bool commit_planes_to_stream(
stream_update->dst = dc_stream->dst;
stream_update->out_transfer_func = dc_stream->out_transfer_func;
- if (dm_new_crtc_state->freesync_enabled != dm_old_crtc_state->freesync_enabled) {
- stream_update->vrr_infopacket = &dc_stream->vrr_infopacket;
- stream_update->adjust = &dc_stream->adjust;
+ if (dm_new_crtc_state->abm_level != dm_old_crtc_state->abm_level) {
+ abm_level = dm_new_crtc_state->abm_level;
+ stream_update->abm_level = &abm_level;
}
for (i = 0; i < new_plane_count; i++) {
@@ -4377,11 +4722,13 @@ static bool commit_planes_to_stream(
updates[i].scaling_info = &scaling_info[i];
}
+ mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(
dc,
updates,
new_plane_count,
dc_stream, stream_update, plane_states, state);
+ mutex_unlock(&dm->dc_lock);
kfree(flip_addr);
kfree(plane_info);
@@ -4391,6 +4738,7 @@ static bool commit_planes_to_stream(
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
@@ -4407,7 +4755,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
int planes_count = 0;
unsigned long flags;
@@ -4468,7 +4815,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
crtc,
fb,
(uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank,
- dm_state->context);
+ dc_state);
}
}
@@ -4485,15 +4832,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- dc_stream_attach->adjust = acrtc_state->adjust;
- dc_stream_attach->vrr_infopacket = acrtc_state->vrr_infopacket;
+ dc_stream_attach->abm_level = acrtc_state->abm_level;
- if (false == commit_planes_to_stream(dm->dc,
+ if (false == commit_planes_to_stream(dm,
+ dm->dc,
plane_states_constructed,
planes_count,
acrtc_state,
dm_old_crtc_state,
- dm_state->context))
+ dc_state))
dm_error("%s: Failed to attach plane!\n", __func__);
} else {
/*TODO BUG Here should go disable planes on CRTC. */
@@ -4547,12 +4894,21 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
/*TODO Handle EINTR, reenable IRQ*/
}
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
uint32_t i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
@@ -4565,7 +4921,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_update_legacy_modeset_state(dev, state);
- dm_state = to_dm_atomic_state(state);
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ } else {
+ /* No state changes, retain current state. */
+ dc_state_temp = dc_create_state();
+ ASSERT(dc_state_temp);
+ dc_state = dc_state_temp;
+ dc_resource_state_copy_construct_current(dm->dc, dc_state);
+ }
/* update changed items */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -4638,9 +5003,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
- if (dm_state->context) {
- dm_enable_per_frame_crtc_master_sync(dm_state->context);
- WARN_ON(!dc_commit_state(dm->dc, dm_state->context));
+ if (dc_state) {
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+ mutex_unlock(&dm->dc_lock);
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -4653,13 +5020,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_stream_get_status(dm_new_crtc_state->stream);
if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+
+ if (!status)
DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
else
acrtc->otg_inst = status->primary_otg_inst;
}
}
- /* Handle scaling and underscan changes*/
+ /* Handle scaling, underscan, and abm changes*/
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
@@ -4675,11 +5046,14 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- /* Skip anything that is not scaling or underscan changes */
- if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
- continue;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* Skip anything that is not scaling or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state) &&
+ (dm_new_crtc_state->abm_level == dm_old_crtc_state->abm_level))
+ continue;
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
@@ -4691,17 +5065,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
WARN_ON(!status);
WARN_ON(!status->plane_count);
- dm_new_crtc_state->stream->adjust = dm_new_crtc_state->adjust;
- dm_new_crtc_state->stream->vrr_infopacket = dm_new_crtc_state->vrr_infopacket;
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
/*TODO How it works with MPO ?*/
if (!commit_planes_to_stream(
+ dm,
dm->dc,
status->plane_states,
status->plane_count,
dm_new_crtc_state,
to_dm_crtc_state(old_crtc_state),
- dm_state->context))
+ dc_state))
dm_error("%s: Failed to update stream scaling!\n", __func__);
}
@@ -4734,7 +5108,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dev, dm, crtc, &wait_for_vblank);
+ amdgpu_dm_commit_planes(state, dc_state, dev,
+ dm, crtc, &wait_for_vblank);
}
@@ -4774,6 +5149,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
+
+ if (dc_state_temp)
+ dc_release_state(dc_state_temp);
}
@@ -4917,20 +5295,18 @@ static int do_aquire_global_lock(struct drm_device *dev,
return ret < 0 ? ret : 0;
}
-void set_freesync_on_stream(struct amdgpu_display_manager *dm,
- struct dm_crtc_state *new_crtc_state,
- struct dm_connector_state *new_con_state,
- struct dc_stream_state *new_stream)
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
{
struct mod_freesync_config config = {0};
- struct mod_vrr_params vrr = {0};
- struct dc_info_packet vrr_infopacket = {0};
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(new_con_state->base.connector);
- if (new_con_state->freesync_capable &&
- new_con_state->freesync_enable) {
- config.state = new_crtc_state->freesync_enabled ?
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable;
+
+ if (new_con_state->freesync_capable) {
+ config.state = new_crtc_state->base.vrr_enabled ?
VRR_STATE_ACTIVE_VARIABLE :
VRR_STATE_INACTIVE;
config.min_refresh_in_uhz =
@@ -4940,19 +5316,18 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm,
config.vsif_supported = true;
}
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr);
+ new_crtc_state->freesync_config = config;
+}
- mod_freesync_build_vrr_infopacket(dm->freesync_module,
- new_stream,
- &vrr,
- packet_type_fs1,
- NULL,
- &vrr_infopacket);
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
- new_crtc_state->adjust = vrr.adjust;
- new_crtc_state->vrr_infopacket = vrr_infopacket;
+ memset(&new_crtc_state->adjust, 0,
+ sizeof(new_crtc_state->adjust));
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
}
static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
@@ -4960,11 +5335,11 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
bool enable,
bool *lock_and_validation_needed)
{
+ struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dc_stream_state *new_stream;
int ret = 0;
@@ -5012,7 +5387,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
new_stream = create_stream_for_sink(aconnector,
&new_crtc_state->mode,
- dm_new_conn_state);
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
/*
* we can have no stream on ACTION_SET if a display
@@ -5027,8 +5403,7 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
break;
}
- set_freesync_on_stream(dm, dm_new_crtc_state,
- dm_new_conn_state, new_stream);
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
@@ -5038,9 +5413,6 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
}
}
- if (dm_old_crtc_state->freesync_enabled != dm_new_crtc_state->freesync_enabled)
- new_crtc_state->mode_changed = true;
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto next_crtc;
@@ -5062,6 +5434,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
if (!dm_old_crtc_state->stream)
goto next_crtc;
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
crtc->base.id);
@@ -5077,6 +5453,8 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
dc_stream_release(dm_old_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
*lock_and_validation_needed = true;
} else {/* Add stream for any updated/enabled CRTC */
@@ -5096,6 +5474,10 @@ static int dm_update_crtcs_state(struct amdgpu_display_manager *dm,
WARN_ON(dm_new_crtc_state->stream);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
dm_new_crtc_state->stream = new_stream;
dc_stream_retain(new_stream);
@@ -5154,7 +5536,9 @@ next_crtc:
amdgpu_dm_set_ctm(dm_new_crtc_state);
}
-
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
}
return ret;
@@ -5170,12 +5554,13 @@ static int dm_update_planes_state(struct dc *dc,
bool enable,
bool *lock_and_validation_needed)
{
+
+ struct dm_atomic_state *dm_state = NULL;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
int i ;
/* TODO return page_flip_needed() function */
@@ -5213,6 +5598,10 @@ static int dm_update_planes_state(struct dc *dc,
DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, old_plane_crtc->base.id);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
if (!dc_remove_plane_from_context(
dc,
dm_old_crtc_state->stream,
@@ -5267,6 +5656,12 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
/*
* Any atomic check errors that occur after this will
* not need a release. The plane state will be attached
@@ -5298,11 +5693,14 @@ static int dm_update_planes_state(struct dc *dc,
return ret;
}
-enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, struct drm_atomic_state *state)
-{
-
- int i, j, num_plane;
+static int
+dm_determine_update_type_for_commit(struct dc *dc,
+ struct drm_atomic_state *state,
+ enum surface_update_type *out_type)
+{
+ struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
+ int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct dm_plane_state *new_dm_plane_state, *old_dm_plane_state;
struct drm_crtc *new_plane_crtc, *old_plane_crtc;
@@ -5318,6 +5716,12 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
struct dc_stream_update stream_update;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ if (!updates || !surface) {
+ DRM_ERROR("Plane or surface update failed to allocate");
+ /* Set type to FULL to avoid crashing in DC*/
+ update_type = UPDATE_TYPE_FULL;
+ goto cleanup;
+ }
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -5370,35 +5774,73 @@ enum surface_update_type dm_determine_update_type_for_commit(struct dc *dc, stru
}
if (num_plane > 0) {
- status = dc_stream_get_status(new_dm_crtc_state->stream);
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto cleanup;
+
+ old_dm_state = dm_atomic_get_old_state(state);
+ if (!old_dm_state) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ status = dc_state_get_stream_status(old_dm_state->context,
+ new_dm_crtc_state->stream);
+
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
}
} else if (!new_dm_crtc_state->stream && old_dm_crtc_state->stream) {
update_type = UPDATE_TYPE_FULL;
- goto ret;
+ goto cleanup;
}
}
-ret:
+cleanup:
kfree(updates);
kfree(surface);
- return update_type;
+ *out_type = update_type;
+ return ret;
}
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events. See
+ * dm_determine_update_type_for_commit()
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct dm_atomic_state *dm_state = NULL;
struct dc *dc = adev->dm.dc;
- struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct drm_crtc *crtc;
@@ -5419,12 +5861,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
- struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
-
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed &&
- (dm_old_crtc_state->freesync_enabled == dm_new_crtc_state->freesync_enabled))
+ !new_crtc_state->vrr_enabled)
continue;
if (!new_crtc_state->enable)
@@ -5439,10 +5878,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- dm_state->context = dc_create_state();
- ASSERT(dm_state->context);
- dc_resource_state_copy_construct_current(dc, dm_state->context);
-
/* Remove exiting planes if they are modified */
ret = dm_update_planes_state(dc, state, false, &lock_and_validation_needed);
if (ret) {
@@ -5495,16 +5930,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
- /*
- * For full updates case when
- * removing/adding/updating streams on one CRTC while flipping
- * on another CRTC,
- * acquiring global lock will guarantee that any such full
- * update commit
- * will wait for completion of any outstanding flip using DRMs
- * synchronization events.
- */
- update_type = dm_determine_update_type_for_commit(dc, state);
+ ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+ if (ret)
+ goto fail;
if (overall_update_type < update_type)
overall_update_type = update_type;
@@ -5522,6 +5950,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (overall_update_type > UPDATE_TYPE_FAST) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
ret = do_aquire_global_lock(dev, state);
if (ret)
@@ -5531,6 +5962,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
ret = -EINVAL;
goto fail;
}
+ } else if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update = !drm_atomic_helper_async_check(dev, state);
}
/* Must be success */
@@ -5576,14 +6014,15 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct detailed_data_monitor_range *range;
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct dm_connector_state *dm_con_state;
+ struct dm_connector_state *dm_con_state = NULL;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
+ bool freesync_capable = false;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
- return;
+ goto update;
}
if (!edid) {
@@ -5593,9 +6032,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0;
- dm_con_state->freesync_capable = false;
- dm_con_state->freesync_enable = false;
- return;
+ goto update;
}
dm_con_state = to_dm_connector_state(connector->state);
@@ -5603,10 +6040,10 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
edid_check_required = false;
if (!amdgpu_dm_connector->dc_sink) {
DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
- return;
+ goto update;
}
if (!adev->dm.freesync_module)
- return;
+ goto update;
/*
* if edid non zero restrict freesync only for dp and edp
*/
@@ -5618,7 +6055,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
amdgpu_dm_connector);
}
}
- dm_con_state->freesync_capable = false;
if (edid_check_required == true && (edid->version > 1 ||
(edid->version == 1 && edid->revision > 1))) {
for (i = 0; i < 4; i++) {
@@ -5650,8 +6086,16 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (amdgpu_dm_connector->max_vfreq -
amdgpu_dm_connector->min_vfreq > 10) {
- dm_con_state->freesync_capable = true;
+ freesync_capable = true;
}
}
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 6e069d777ab2..25bb91ee80ba 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -59,60 +59,140 @@ struct common_irq_params {
enum dc_irq_source irq_src;
};
+/**
+ * struct irq_list_head - Linked-list for low context IRQ handlers.
+ *
+ * @head: The list_head within &struct handler_data
+ * @work: A work_struct containing the deferred handler work
+ */
struct irq_list_head {
struct list_head head;
/* In case this interrupt needs post-processing, 'work' will be queued*/
struct work_struct work;
};
+/**
+ * struct dm_compressor_info - Buffer info used by frame buffer compression
+ * @cpu_addr: MMIO cpu addr
+ * @bo_ptr: Pointer to the buffer object
+ * @gpu_addr: MMIO gpu addr
+ */
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
+/**
+ * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
+ * @min_input_signal: minimum possible input in range 0-255
+ * @max_input_signal: maximum possible input in range 0-255
+ * @caps_valid: true if these values are from the ACPI interface
+ */
+struct amdgpu_dm_backlight_caps {
+ int min_input_signal;
+ int max_input_signal;
+ bool caps_valid;
+};
+
+/**
+ * struct amdgpu_display_manager - Central amdgpu display manager device
+ *
+ * @dc: Display Core control structure
+ * @adev: AMDGPU base driver structure
+ * @ddev: DRM base driver structure
+ * @display_indexes_num: Max number of display streams supported
+ * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
+ * @backlight_dev: Backlight control device
+ * @cached_state: Caches device atomic state for suspend/resume
+ * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ */
struct amdgpu_display_manager {
+
struct dc *dc;
+
+ /**
+ * @cgs_device:
+ *
+ * The Common Graphics Services device. It provides an interface for
+ * accessing registers.
+ */
struct cgs_device *cgs_device;
- struct amdgpu_device *adev; /*AMD base driver*/
- struct drm_device *ddev; /*DRM base driver*/
+ struct amdgpu_device *adev;
+ struct drm_device *ddev;
u16 display_indexes_num;
- /*
- * 'irq_source_handler_table' holds a list of handlers
- * per (DAL) IRQ source.
+ /**
+ * @atomic_obj
+ *
+ * In combination with &dm_atomic_state it helps manage
+ * global atomic state that doesn't map cleanly into existing
+ * drm resources, like &dc_context.
+ */
+ struct drm_private_obj atomic_obj;
+
+ struct drm_modeset_lock atomic_obj_lock;
+
+ /**
+ * @dc_lock:
+ *
+ * Guards access to DC functions that can issue register write
+ * sequences.
+ */
+ struct mutex dc_lock;
+
+ /**
+ * @irq_handler_list_low_tab:
+ *
+ * Low priority IRQ handler table.
*
- * Each IRQ source may need to be handled at different contexts.
- * By 'context' we mean, for example:
- * - The ISR context, which is the direct interrupt handler.
- * - The 'deferred' context - this is the post-processing of the
- * interrupt, but at a lower priority.
+ * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
+ * source. Low priority IRQ handlers are deferred to a workqueue to be
+ * processed. Hence, they can sleep.
*
* Note that handlers are called in the same order as they were
* registered (FIFO).
*/
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
+
+ /**
+ * @irq_handler_list_high_tab:
+ *
+ * High priority IRQ handler table.
+ *
+ * It is a n*m table, same as &irq_handler_list_low_tab. However,
+ * handlers in this table are not deferred and are called immediately.
+ */
struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
+ /**
+ * @pflip_params:
+ *
+ * Page flip IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
struct common_irq_params
pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
+ /**
+ * @vblank_params:
+ *
+ * Vertical blanking IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
- /* this spin lock synchronizes access to 'irq_handler_list_table' */
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
+ struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
- /**
- * Caches device atomic state for suspend/resume
- */
struct drm_atomic_state *cached_state;
struct dm_comressor_info compressor;
@@ -183,15 +263,21 @@ struct dm_crtc_state {
int crc_skip_count;
bool crc_enabled;
- bool freesync_enabled;
+ bool freesync_timing_changed;
+ bool freesync_vrr_info_changed;
+
+ bool vrr_supported;
+ struct mod_freesync_config freesync_config;
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
+
+ int abm_level;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
struct dm_atomic_state {
- struct drm_atomic_state base;
+ struct drm_private_state base;
struct dc_state *context;
};
@@ -206,8 +292,8 @@ struct dm_connector_state {
uint8_t underscan_hborder;
uint8_t max_bpc;
bool underscan_enable;
- bool freesync_enable;
bool freesync_capable;
+ uint8_t abm_level;
};
#define to_dm_connector_state(x)\
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index be19e6861189..216e48cec716 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -164,7 +164,7 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
*/
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
- gamma, true, adev->asic_type <= CHIP_RAVEN);
+ gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
dc_gamma_release(&gamma);
if (!ret) {
stream->out_transfer_func->type = old_type;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 01fc5717b657..f088ac585978 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -75,6 +75,11 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
return -EINVAL;
}
+ if (!stream_state) {
+ DRM_ERROR("No stream state for CRTC%d\n", crtc->index);
+ return -EINVAL;
+ }
+
/* When enabling CRC, we should also disable dithering. */
if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
if (dc_stream_configure_crc(stream_state->ctx->dc,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index a212178f2edc..cd10f77cdeb0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -32,16 +32,55 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
+/**
+ * DOC: overview
+ *
+ * DM provides another layer of IRQ management on top of what the base driver
+ * already provides. This is something that could be cleaned up, and is a
+ * future TODO item.
+ *
+ * The base driver provides IRQ source registration with DRM, handler
+ * registration into the base driver's IRQ table, and a handler callback
+ * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic
+ * handler looks up the IRQ table, and calls the respective
+ * &amdgpu_irq_src_funcs.process hookups.
+ *
+ * What DM provides on top are two IRQ tables specifically for top-half and
+ * bottom-half IRQ handling, with the bottom-half implementing workqueues:
+ *
+ * - &amdgpu_display_manager.irq_handler_list_high_tab
+ * - &amdgpu_display_manager.irq_handler_list_low_tab
+ *
+ * They override the base driver's IRQ table, and the effect can be seen
+ * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They
+ * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up
+ * DM's IRQ tables. However, in order for base driver to recognize this hook, DM
+ * still needs to register the IRQ with the base driver. See
+ * dce110_register_irq_handlers() and dcn10_register_irq_handlers().
+ *
+ * To expose DC's hardware interrupt toggle to the base driver, DM implements
+ * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through
+ * amdgpu_irq_update() to enable or disable the interrupt.
+ */
+
/******************************************************************************
* Private declarations.
*****************************************************************************/
+/**
+ * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers.
+ *
+ * @list: Linked list entry referencing the next/previous handler
+ * @handler: Handler function
+ * @handler_arg: Argument passed to the handler when triggered
+ * @dm: DM which this handler belongs to
+ * @irq_source: DC interrupt source that this handler is registered for
+ */
struct amdgpu_dm_irq_handler_data {
struct list_head list;
interrupt_handler handler;
void *handler_arg;
- /* DM which this handler belongs to */
struct amdgpu_display_manager *dm;
/* DAL irq source which registered for this interrupt. */
enum dc_irq_source irq_source;
@@ -68,7 +107,7 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
}
/**
- * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper.
+ * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper.
*
* @work: work struct
*/
@@ -99,8 +138,8 @@ static void dm_irq_work_func(struct work_struct *work)
* (The most common use is HPD interrupt) */
}
-/**
- * Remove a handler and return a pointer to hander list from which the
+/*
+ * Remove a handler and return a pointer to handler list from which the
* handler was removed.
*/
static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
@@ -203,6 +242,24 @@ static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
* Note: caller is responsible for input validation.
*****************************************************************************/
+/**
+ * amdgpu_dm_irq_register_interrupt() - Register a handler within DM.
+ * @adev: The base driver device containing the DM device.
+ * @int_params: Interrupt parameters containing the source, and handler context
+ * @ih: Function pointer to the interrupt handler to register
+ * @handler_args: Arguments passed to the handler when the interrupt occurs
+ *
+ * Register an interrupt handler for the given IRQ source, under the given
+ * context. The context can either be high or low. High context handlers are
+ * executed directly within ISR context, while low context is executed within a
+ * workqueue, thereby allowing operations that sleep.
+ *
+ * Registered handlers are called in a FIFO manner, i.e. the most recently
+ * registered handler will be called first.
+ *
+ * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ
+ * source, handler function, and args
+ */
void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
struct dc_interrupt_params *int_params,
void (*ih)(void *),
@@ -261,6 +318,15 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
return handler_data;
}
+/**
+ * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table
+ * @adev: The base driver device containing the DM device
+ * @irq_source: IRQ source to remove the given handler from
+ * @ih: Function pointer to the interrupt handler to unregister
+ *
+ * Go through both low and high context IRQ tables, and find the given handler
+ * for the given irq source. If found, remove it. Otherwise, do nothing.
+ */
void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
enum dc_irq_source irq_source,
void *ih)
@@ -295,6 +361,20 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_dm_irq_init() - Initialize DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Initialize DM's high and low context IRQ tables.
+ *
+ * The N by M table contains N IRQ sources, with M
+ * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The
+ * list_heads are initialized here. When an interrupt n is triggered, all m
+ * handlers are called in sequence, FIFO according to registration order.
+ *
+ * The low context table requires special steps to initialize, since handlers
+ * will be deferred to a workqueue. See &struct irq_list_head.
+ */
int amdgpu_dm_irq_init(struct amdgpu_device *adev)
{
int src;
@@ -317,7 +397,12 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
return 0;
}
-/* DM IRQ and timer resource release */
+/**
+ * amdgpu_dm_irq_fini() - Tear down DM IRQ management
+ * @adev: The base driver device containing the DM device
+ *
+ * Flush all work within the low context IRQ table.
+ */
void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
{
int src;
@@ -414,7 +499,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
return 0;
}
-/**
+/*
* amdgpu_dm_irq_schedule_work - schedule all work items registered for the
* "irq_source".
*/
@@ -439,8 +524,9 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
}
-/** amdgpu_dm_irq_immediate_work
- * Callback high irq work immediately, don't send to work queue
+/*
+ * amdgpu_dm_irq_immediate_work
+ * Callback high irq work immediately, don't send to work queue
*/
static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
enum dc_irq_source irq_source)
@@ -467,11 +553,14 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
}
-/*
- * amdgpu_dm_irq_handler
+/**
+ * amdgpu_dm_irq_handler - Generic DM IRQ handler
+ * @adev: amdgpu base driver device containing the DM device
+ * @source: Unused
+ * @entry: Data about the triggered interrupt
*
- * Generic IRQ handler, calls all registered high irq work immediately, and
- * schedules work for low irq
+ * Calls all registered high irq work immediately, and schedules work for low
+ * irq. The DM IRQ table is used to find the corresponding handlers.
*/
static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
@@ -613,7 +702,7 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->hpd_irq.funcs = &dm_hpd_irq_funcs;
}
-/*
+/**
* amdgpu_dm_hpd_init - hpd setup callback.
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index d02c32a1039c..1b0d209d8367 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -342,10 +342,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
master->connector_id);
aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+ drm_connector_attach_encoder(&aconnector->base,
+ &aconnector->mst_encoder->base);
- /*
- * TODO: understand why this one is needed
- */
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 12001a006b2d..9d2d6986b983 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -485,11 +485,11 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
return;
clock.clock_type = amd_pp_dcf_clock;
- clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+ clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
clock.clock_type = amd_pp_f_clock;
- clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+ clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
pp_funcs->display_clock_voltage_request(pp_handle, &clock);
}
@@ -518,13 +518,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_dce_clocks[i].wm_set_id =
ranges->reader_wm_sets[i].wm_inst;
wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].max_drain_clk_khz;
+ ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
- ranges->reader_wm_sets[i].min_drain_clk_khz;
+ ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
wm_dce_clocks[i].wm_max_mem_clk_in_khz =
- ranges->reader_wm_sets[i].max_fill_clk_khz;
+ ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
wm_dce_clocks[i].wm_min_mem_clk_in_khz =
- ranges->reader_wm_sets[i].min_fill_clk_khz;
+ ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
}
for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
@@ -534,13 +534,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_soc_clocks[i].wm_set_id =
ranges->writer_wm_sets[i].wm_inst;
wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].max_fill_clk_khz;
+ ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
- ranges->writer_wm_sets[i].min_fill_clk_khz;
+ ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
wm_soc_clocks[i].wm_max_mem_clk_in_khz =
- ranges->writer_wm_sets[i].max_drain_clk_khz;
+ ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
wm_soc_clocks[i].wm_min_mem_clk_in_khz =
- ranges->writer_wm_sets[i].min_drain_clk_khz;
+ ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
new file mode 100644
index 000000000000..d898981684d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM amdgpu_dm
+
+#if !defined(_AMDGPU_DM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _AMDGPU_DM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(amdgpu_dc_rreg,
+ TP_PROTO(unsigned long *read_count, uint32_t reg, uint32_t value),
+ TP_ARGS(read_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *read_count = *read_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+TRACE_EVENT(amdgpu_dc_wreg,
+ TP_PROTO(unsigned long *write_count, uint32_t reg, uint32_t value),
+ TP_ARGS(write_count, reg, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, reg)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->reg = reg;
+ __entry->value = value;
+ *write_count = *write_count + 1;
+ ),
+ TP_printk("reg=0x%08lx, value=0x%08lx",
+ (unsigned long)__entry->reg,
+ (unsigned long)__entry->value)
+);
+
+
+TRACE_EVENT(amdgpu_dc_performance,
+ TP_PROTO(unsigned long read_count, unsigned long write_count,
+ unsigned long *last_read, unsigned long *last_write,
+ const char *func, unsigned int line),
+ TP_ARGS(read_count, write_count, last_read, last_write, func, line),
+ TP_STRUCT__entry(
+ __field(uint32_t, reads)
+ __field(uint32_t, writes)
+ __field(uint32_t, read_delta)
+ __field(uint32_t, write_delta)
+ __string(func, func)
+ __field(uint32_t, line)
+ ),
+ TP_fast_assign(
+ __entry->reads = read_count;
+ __entry->writes = write_count;
+ __entry->read_delta = read_count - *last_read;
+ __entry->write_delta = write_count - *last_write;
+ __assign_str(func, func);
+ __entry->line = line;
+ *last_read = read_count;
+ *last_write = write_count;
+ ),
+ TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)",
+ __get_str(func), __entry->line,
+ (unsigned long)__entry->read_delta,
+ (unsigned long)__entry->reads,
+ (unsigned long)__entry->write_delta,
+ (unsigned long)__entry->writes)
+);
+#endif /* _AMDGPU_DM_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE amdgpu_dm_trace
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 0e1dc1b1a48d..c2ab026aee91 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -2030,7 +2030,7 @@ static uint32_t get_src_obj_list(struct bios_parser *bp, ATOM_OBJECT *object,
static struct device_id device_type_from_device_id(uint16_t device_id)
{
- struct device_id result_device_id;
+ struct device_id result_device_id = {0};
switch (device_id) {
case ATOM_DEVICE_LCD1_SUPPORT:
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index ff764da21b6f..751bb614fc0e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1884,6 +1884,8 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_accelerated_mode = bios_parser_is_accelerated_mode,
+ .is_active_display = bios_is_active_display,
+
.set_scratch_critical_state = bios_parser_set_scratch_critical_state,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
index d4589470985c..fdda8aa8e303 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c
@@ -88,3 +88,96 @@ uint32_t bios_get_vga_enabled_displays(
return active_disp;
}
+bool bios_is_active_display(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag)
+{
+ uint32_t active = 0;
+ uint32_t connected = 0;
+ uint32_t bios_scratch_0 = 0;
+ uint32_t bios_scratch_3 = 0;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ {
+ if (device_tag->dev_id.device_type == DEVICE_TYPE_DFP) {
+ switch (device_tag->dev_id.enum_id) {
+ case 1:
+ {
+ active = ATOM_S3_DFP1_ACTIVE;
+ connected = 0x0008; //ATOM_DISPLAY_DFP1_CONNECT
+ }
+ break;
+
+ case 2:
+ {
+ active = ATOM_S3_DFP2_ACTIVE;
+ connected = 0x0080; //ATOM_DISPLAY_DFP2_CONNECT
+ }
+ break;
+
+ case 3:
+ {
+ active = ATOM_S3_DFP3_ACTIVE;
+ connected = 0x0200; //ATOM_DISPLAY_DFP3_CONNECT
+ }
+ break;
+
+ case 4:
+ {
+ active = ATOM_S3_DFP4_ACTIVE;
+ connected = 0x0400; //ATOM_DISPLAY_DFP4_CONNECT
+ }
+ break;
+
+ case 5:
+ {
+ active = ATOM_S3_DFP5_ACTIVE;
+ connected = 0x0800; //ATOM_DISPLAY_DFP5_CONNECT
+ }
+ break;
+
+ case 6:
+ {
+ active = ATOM_S3_DFP6_ACTIVE;
+ connected = 0x0040; //ATOM_DISPLAY_DFP6_CONNECT
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+ break;
+
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ {
+ active = ATOM_S3_LCD1_ACTIVE;
+ connected = 0x0002; //ATOM_DISPLAY_LCD1_CONNECT
+ }
+ break;
+
+ default:
+ break;
+ }
+
+
+ if (bios->regs->BIOS_SCRATCH_0) /*follow up with other asic, todo*/
+ bios_scratch_0 = REG_READ(BIOS_SCRATCH_0);
+ if (bios->regs->BIOS_SCRATCH_3) /*follow up with other asic, todo*/
+ bios_scratch_3 = REG_READ(BIOS_SCRATCH_3);
+
+ bios_scratch_3 &= ATOM_S3_DEVICE_ACTIVE_MASK;
+ if ((active & bios_scratch_3) && (connected & bios_scratch_0))
+ return true;
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
index 75a29e68fb27..f33cac2147e3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h
@@ -35,6 +35,10 @@ bool bios_is_accelerated_mode(struct dc_bios *bios);
void bios_set_scratch_acc_mode_change(struct dc_bios *bios);
void bios_set_scratch_critical_state(struct dc_bios *bios, bool state);
uint32_t bios_get_vga_enabled_displays(struct dc_bios *bios);
+bool bios_is_active_display(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
#define GET_IMAGE(type, offset) ((type *) bios_get_image(&bp->base, offset, sizeof(type)))
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3208188b7ed4..43e4a2be0fa6 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1423,27 +1423,27 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
ranges.num_reader_wm_sets = WM_SET_COUNT;
ranges.num_writer_wm_sets = WM_SET_COUNT;
ranges.reader_wm_sets[0].wm_inst = WM_A;
- ranges.reader_wm_sets[0].min_drain_clk_khz = min_dcfclk_khz;
- ranges.reader_wm_sets[0].max_drain_clk_khz = overdrive;
- ranges.reader_wm_sets[0].min_fill_clk_khz = min_fclk_khz;
- ranges.reader_wm_sets[0].max_fill_clk_khz = overdrive;
+ ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000;
+ ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
+ ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000;
+ ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
- ranges.writer_wm_sets[0].min_fill_clk_khz = socclk_khz;
- ranges.writer_wm_sets[0].max_fill_clk_khz = overdrive;
- ranges.writer_wm_sets[0].min_drain_clk_khz = min_fclk_khz;
- ranges.writer_wm_sets[0].max_drain_clk_khz = overdrive;
+ ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000;
+ ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000;
+ ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000;
+ ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000;
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
ranges.reader_wm_sets[0].wm_inst = WM_A;
- ranges.reader_wm_sets[0].min_drain_clk_khz = 300000;
- ranges.reader_wm_sets[0].max_drain_clk_khz = 5000000;
- ranges.reader_wm_sets[0].min_fill_clk_khz = 800000;
- ranges.reader_wm_sets[0].max_fill_clk_khz = 5000000;
+ ranges.reader_wm_sets[0].min_drain_clk_mhz = 300;
+ ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000;
+ ranges.reader_wm_sets[0].min_fill_clk_mhz = 800;
+ ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000;
ranges.writer_wm_sets[0].wm_inst = WM_A;
- ranges.writer_wm_sets[0].min_fill_clk_khz = 200000;
- ranges.writer_wm_sets[0].max_fill_clk_khz = 5000000;
- ranges.writer_wm_sets[0].min_drain_clk_khz = 800000;
- ranges.writer_wm_sets[0].max_drain_clk_khz = 5000000;
+ ranges.writer_wm_sets[0].min_fill_clk_mhz = 200;
+ ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000;
+ ranges.writer_wm_sets[0].min_drain_clk_mhz = 800;
+ ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000;
}
ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7c491c91465f..d9c57984394b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -62,6 +62,55 @@
const static char DC_BUILD_ID[] = "production-build";
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct. One per driver. Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints). Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display. Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
+ * (the display directly attached). It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver. Represents the hw blocks not in the
+ * main pipeline. Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed. There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display. Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool. Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context. Represents the
+ * internal hardware pipeline components. Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
/*******************************************************************************
* Private functions
******************************************************************************/
@@ -175,6 +224,17 @@ failed_alloc:
return false;
}
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+ return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+ kfree(*perf_trace);
+ *perf_trace = NULL;
+}
+
/**
*****************************************************************************
* Function: dc_stream_adjust_vmin_vmax
@@ -240,7 +300,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
}
/**
- * dc_stream_configure_crc: Configure CRC capture for the given stream.
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
* @dc: DC Object
* @stream: The stream to configure CRC on.
* @enable: Enable CRC if true, disable otherwise.
@@ -292,7 +352,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
}
/**
- * dc_stream_get_crc: Get CRC values for the given stream.
+ * dc_stream_get_crc() - Get CRC values for the given stream.
* @dc: DC object
* @stream: The DC stream state of the stream to get CRCs from.
* @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
@@ -328,7 +388,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
enum dc_dither_option option)
{
struct bit_depth_reduction_params params;
- struct dc_link *link = stream->status.link;
+ struct dc_link *link = stream->sink->link;
struct pipe_ctx *pipes = NULL;
int i;
@@ -391,9 +451,11 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
== stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
- dc->hwss.program_csc_matrix(pipes,
- stream->output_color_space,
- stream->csc_color_matrix.matrix);
+ dc->hwss.program_output_csc(dc,
+ pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix,
+ pipes->plane_res.hubp->opp_id);
ret = true;
}
}
@@ -534,6 +596,8 @@ static void destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
kfree(dc->ctx);
dc->ctx = NULL;
@@ -657,6 +721,12 @@ static bool construct(struct dc *dc,
goto fail;
}
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
dc_version,
@@ -941,7 +1011,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
- dc->hwss.set_bandwidth(dc, context, false);
+ dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -957,8 +1027,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program hardware */
- dc->hwss.ready_shared_resources(dc, context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
@@ -1012,7 +1080,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
/* pplib is notified if disp_num changed */
- dc->hwss.set_bandwidth(dc, context, true);
+ dc->hwss.optimize_bandwidth(dc, context);
dc_release_state(dc->current_state);
@@ -1020,8 +1088,6 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_retain_state(dc->current_state);
- dc->hwss.optimize_shared_resources(dc);
-
return result;
}
@@ -1063,7 +1129,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->optimized_required = false;
- dc->hwss.set_bandwidth(dc, context, true);
+ dc->hwss.optimize_bandwidth(dc, context);
return true;
}
@@ -1331,6 +1397,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
return overall_type;
}
+/**
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
enum surface_update_type dc_check_update_surfaces_for_stream(
struct dc *dc,
struct dc_surface_update *updates,
@@ -1369,35 +1440,6 @@ static struct dc_stream_status *stream_get_status(
static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
-static void notify_display_count_to_smu(
- struct dc *dc,
- struct dc_state *context)
-{
- int i, display_count;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
-
- /*
- * if function pointer not set up, this message is
- * sent as part of pplib_apply_display_requirements.
- * So just return.
- */
- if (!pp_smu || !pp_smu->set_display_count)
- return;
-
- display_count = 0;
- for (i = 0; i < context->stream_count; i++) {
- const struct dc_stream_state *stream = context->streams[i];
-
- /* only notify active stream */
- if (stream->dpms_off)
- continue;
-
- display_count++;
- }
-
- pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-}
-
static void commit_planes_do_stream_update(struct dc *dc,
struct dc_stream_state *stream,
struct dc_stream_update *stream_update,
@@ -1422,7 +1464,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->adjust->v_total_max);
if (stream_update->periodic_fn_vsync_delta &&
- pipe_ctx->stream_res.tg &&
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
@@ -1441,6 +1482,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->output_csc_transform)
dc_stream_program_csc_matrix(dc, stream);
+ if (stream_update->dither_option) {
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ }
+
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
@@ -1448,19 +1497,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
- dc->hwss.pplib_apply_display_requirements(
- dc, dc->current_state);
- notify_display_count_to_smu(dc, dc->current_state);
+ dc->hwss.optimize_bandwidth(dc, dc->current_state);
} else {
- dc->hwss.pplib_apply_display_requirements(
- dc, dc->current_state);
- notify_display_count_to_smu(dc, dc->current_state);
+ dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
}
}
-
-
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
// if otg funcs defined check if blanked before programming
@@ -1487,7 +1530,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
if (update_type == UPDATE_TYPE_FULL) {
- dc->hwss.set_bandwidth(dc, context, false);
+ dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1669,6 +1712,9 @@ enum dc_irq_source dc_interrupt_to_irq_source(
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
}
+/**
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
{
@@ -1724,6 +1770,15 @@ void dc_resume(struct dc *dc)
core_link_resume(dc->links[i]);
}
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu)
+ return dmcu->funcs->is_dmcu_initialized(dmcu);
+ return false;
+}
+
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
@@ -1753,6 +1808,11 @@ static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink
return true;
}
+/**
+ * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
+ *
+ * EDID length is in bytes
+ */
struct dc_sink *dc_link_add_remote_sink(
struct dc_link *link,
const uint8_t *edid,
@@ -1811,6 +1871,12 @@ fail_add_sink:
return NULL;
}
+/**
+ * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
+ *
+ * Note that this just removes the struct dc_sink - it doesn't
+ * program hardware or alter other members of dc_link
+ */
void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
{
int i;
@@ -1848,4 +1914,4 @@ void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx
info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index e1ebdf7b5eaf..73d049506618 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -311,7 +311,7 @@ void context_timing_trace(
{
int i;
struct dc *core_dc = dc;
- int h_pos[MAX_PIPES], v_pos[MAX_PIPES];
+ int h_pos[MAX_PIPES] = {0}, v_pos[MAX_PIPES] = {0};
struct crtc_position position;
unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -322,8 +322,7 @@ void context_timing_trace(
/* get_position() returns CRTC vertical/horizontal counter
* hence not applicable for underlay pipe
*/
- if (pipe_ctx->stream == NULL
- || pipe_ctx->pipe_idx == underlay_idx)
+ if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
continue;
pipe_ctx->stream_res.tg->funcs->get_position(pipe_ctx->stream_res.tg, &position);
@@ -333,7 +332,7 @@ void context_timing_trace(
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
- if (pipe_ctx->stream == NULL)
+ if (pipe_ctx->stream == NULL || pipe_ctx->pipe_idx == underlay_idx)
continue;
TIMING_TRACE("OTG_%d H_tot:%d V_tot:%d H_pos:%d V_pos:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 5da2186b3615..4dc5846de5c4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -198,6 +198,13 @@ static bool program_hpd_filter(
return result;
}
+/**
+ * dc_link_detect_sink() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
@@ -324,9 +331,9 @@ static enum signal_type get_basic_signal_type(
return SIGNAL_TYPE_NONE;
}
-/*
- * @brief
- * Check whether there is a dongle on DP connector
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
*/
bool dc_link_is_dp_sink_present(struct dc_link *link)
{
@@ -593,6 +600,14 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
}
+/**
+ * dc_link_detect() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks but will trigger DM
+ * to start MST detection if a branch is detected.
+ */
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
{
struct dc_sink_init_data sink_init_data = { 0 };
@@ -1357,28 +1372,13 @@ static enum dc_status enable_link_dp(
struct dc_link *link = stream->sink->link;
struct dc_link_settings link_settings = {0};
enum dp_panel_mode panel_mode;
- enum dc_link_rate max_link_rate = LINK_RATE_HIGH2;
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
- /* raise clock state for HBR3 if required. Confirmed with HW DCE/DPCS
- * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
- */
- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
- max_link_rate = LINK_RATE_HIGH3;
-
- if (link_settings.link_rate == max_link_rate) {
- struct dc_clocks clocks = state->bw.dcn.clk;
-
- /* dce/dcn compat, do not update dispclk */
- clocks.dispclk_khz = 0;
- /* 27mhz = 27000000hz= 27000khz */
- clocks.phyclk_khz = link_settings.link_rate * 27000;
-
- state->dis_clk->funcs->update_clocks(
- state->dis_clk, &clocks, false);
- }
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ state->dccg->funcs->update_clocks(state->dccg, state, false);
dp_enable_link_phy(
link,
@@ -1411,8 +1411,6 @@ static enum dc_status enable_link_dp(
else
status = DC_FAIL_DP_LINK_TRAINING;
- enable_stream_features(pipe_ctx);
-
return status;
}
@@ -2156,14 +2154,16 @@ int dc_link_get_backlight_level(const struct dc_link *link)
{
struct abm *abm = link->ctx->dc->res_pool->abm;
- if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+ if (abm == NULL || abm->funcs->get_current_backlight == NULL)
return DC_ERROR_UNEXPECTED;
- return (int) abm->funcs->get_current_backlight_8_bit(abm);
+ return (int) abm->funcs->get_current_backlight(abm);
}
-bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
- uint32_t frame_ramp, const struct dc_stream_state *stream)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ const struct dc_stream_state *stream)
{
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
@@ -2175,26 +2175,24 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
if ((dmcu == NULL) ||
(abm == NULL) ||
- (abm->funcs->set_backlight_level == NULL))
+ (abm->funcs->set_backlight_level_pwm == NULL))
return false;
- if (stream) {
- if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL)
- frame_ramp = 0;
-
- ((struct dc_stream_state *)stream)->bl_pwm_level = level;
- }
+ if (stream)
+ ((struct dc_stream_state *)stream)->bl_pwm_level =
+ backlight_pwm_u16_16;
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", level, level);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
if (dc_is_embedded_signal(link->connector_signal)) {
- if (stream != NULL) {
- for (i = 0; i < MAX_PIPES; i++) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (core_dc->current_state->res_ctx.
- pipe_ctx[i].stream
- == stream)
+ pipe_ctx[i].stream->sink->link
+ == link)
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
@@ -2204,9 +2202,9 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
1;
}
}
- abm->funcs->set_backlight_level(
+ abm->funcs->set_backlight_level_pwm(
abm,
- level,
+ backlight_pwm_u16_16,
frame_ramp,
controller_id,
use_smooth_brightness);
@@ -2220,7 +2218,7 @@ bool dc_link_set_abm_disable(const struct dc_link *link)
struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm;
- if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL))
+ if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
return false;
abm->funcs->set_abm_immediate_disable(abm);
@@ -2233,7 +2231,7 @@ bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait)
struct dc *core_dc = link->ctx->dc;
struct dmcu *dmcu = core_dc->res_pool->dmcu;
- if (dmcu != NULL && link->psr_enabled)
+ if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled)
dmcu->funcs->set_psr_enable(dmcu, enable, wait);
return true;
@@ -2609,6 +2607,13 @@ void core_link_enable_stream(
core_dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->sink->link->cur_link_settings);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ enable_stream_features(pipe_ctx);
+
+ dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
+ pipe_ctx->stream->bl_pwm_level,
+ 0,
+ pipe_ctx->stream);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index d91df5ef0cb3..849a3a3032f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2196,7 +2196,7 @@ static void get_active_converter_info(
}
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[4];
+ uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
union dwnstream_port_caps_byte0 *port_caps =
(union dwnstream_port_caps_byte0 *)det_caps;
core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
@@ -2371,11 +2371,22 @@ static bool retrieve_link_cap(struct dc_link *link)
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
- core_link_read_dpcd(
+ uint8_t ext_cap_data[16];
+
+ memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
link,
DP_DP13_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
+ ext_cap_data,
+ sizeof(ext_cap_data));
+ if (status == DC_OK) {
+ memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+ break;
+ }
+ }
+ if (status != DC_OK)
+ dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index b6fe29b9fb65..c347afd1030f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -478,10 +478,29 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
return dal_pixel_format;
}
-static void rect_swap_helper(struct rect *rect)
-{
- swap(rect->height, rect->width);
- swap(rect->x, rect->y);
+static inline void get_vp_scan_direction(
+ enum dc_rotation_angle rotation,
+ bool horizontal_mirror,
+ bool *orthogonal_rotation,
+ bool *flip_vert_scan_dir,
+ bool *flip_horz_scan_dir)
+{
+ *orthogonal_rotation = false;
+ *flip_vert_scan_dir = false;
+ *flip_horz_scan_dir = false;
+ if (rotation == ROTATION_ANGLE_180) {
+ *flip_vert_scan_dir = true;
+ *flip_horz_scan_dir = true;
+ } else if (rotation == ROTATION_ANGLE_90) {
+ *orthogonal_rotation = true;
+ *flip_horz_scan_dir = true;
+ } else if (rotation == ROTATION_ANGLE_270) {
+ *orthogonal_rotation = true;
+ *flip_vert_scan_dir = true;
+ }
+
+ if (horizontal_mirror)
+ *flip_horz_scan_dir = !*flip_horz_scan_dir;
}
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
@@ -490,25 +509,14 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
const struct dc_stream_state *stream = pipe_ctx->stream;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_src = plane_state->src_rect;
- struct rect clip = { 0 };
+ struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
bool sec_split = pipe_ctx->top_pipe &&
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
- /*
- * Need to calculate the scan direction for viewport to properly determine offset
- */
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
- flip_vert_scan_dir = true;
- flip_horz_scan_dir = true;
- } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
- flip_vert_scan_dir = true;
- else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- flip_horz_scan_dir = true;
+ bool orthogonal_rotation, flip_y_start, flip_x_start;
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
@@ -516,13 +524,10 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
sec_split = false;
}
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
-
/* The actual clip is an intersection between stream
* source and surface clip
*/
+ dest = plane_state->dst_rect;
clip.x = stream->src.x > plane_state->clip_rect.x ?
stream->src.x : plane_state->clip_rect.x;
@@ -539,84 +544,77 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
stream->src.y + stream->src.height - clip.y :
plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
+ /*
+ * Need to calculate how scan origin is shifted in vp space
+ * to correctly rotate clip and dst
+ */
+ get_vp_scan_direction(
+ plane_state->rotation,
+ plane_state->horizontal_mirror,
+ &orthogonal_rotation,
+ &flip_y_start,
+ &flip_x_start);
+
+ if (orthogonal_rotation) {
+ swap(clip.x, clip.y);
+ swap(clip.width, clip.height);
+ swap(dest.x, dest.y);
+ swap(dest.width, dest.height);
+ }
+ if (flip_x_start) {
+ clip.x = dest.x + dest.width - clip.x - clip.width;
+ dest.x = 0;
+ }
+ if (flip_y_start) {
+ clip.y = dest.y + dest.height - clip.y - clip.height;
+ dest.y = 0;
+ }
+
/* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
* num_pixels = clip.num_pix * scl_ratio
*/
- data->viewport.x = surf_src.x + (clip.x - plane_state->dst_rect.x) *
- surf_src.width / plane_state->dst_rect.width;
- data->viewport.width = clip.width *
- surf_src.width / plane_state->dst_rect.width;
-
- data->viewport.y = surf_src.y + (clip.y - plane_state->dst_rect.y) *
- surf_src.height / plane_state->dst_rect.height;
- data->viewport.height = clip.height *
- surf_src.height / plane_state->dst_rect.height;
-
- /* To transfer the x, y to correct coordinate on mirror image (camera).
- * deg 0 : transfer x,
- * deg 90 : don't need to transfer,
- * deg180 : transfer y,
- * deg270 : transfer x and y.
- * To transfer the x, y to correct coordinate on non-mirror image (video).
- * deg 0 : don't need to transfer,
- * deg 90 : transfer y,
- * deg180 : transfer x and y,
- * deg270 : transfer x.
- */
- if (pipe_ctx->plane_state->horizontal_mirror) {
- if (flip_horz_scan_dir && !flip_vert_scan_dir) {
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
- } else if (flip_horz_scan_dir && flip_vert_scan_dir)
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
- else {
- if (!flip_horz_scan_dir && !flip_vert_scan_dir)
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
+ data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
+ data->viewport.width = clip.width * surf_src.width / dest.width;
+
+ data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
+ data->viewport.height = clip.height * surf_src.height / dest.height;
+
+ /* Handle split */
+ if (pri_split || sec_split) {
+ if (orthogonal_rotation) {
+ if (flip_y_start != pri_split)
+ data->viewport.height /= 2;
+ else {
+ data->viewport.y += data->viewport.height / 2;
+ /* Ceil offset pipe */
+ data->viewport.height = (data->viewport.height + 1) / 2;
+ }
+ } else {
+ if (flip_x_start != pri_split)
+ data->viewport.width /= 2;
+ else {
+ data->viewport.x += data->viewport.width / 2;
+ /* Ceil offset pipe */
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ }
}
- } else {
- if (flip_horz_scan_dir)
- data->viewport.x = surf_src.width - data->viewport.x - data->viewport.width;
- if (flip_vert_scan_dir)
- data->viewport.y = surf_src.height - data->viewport.y - data->viewport.height;
}
/* Round down, compensate in init */
data->viewport_c.x = data->viewport.x / vpc_div;
data->viewport_c.y = data->viewport.y / vpc_div;
- data->inits.h_c = (data->viewport.x % vpc_div) != 0 ?
- dc_fixpt_half : dc_fixpt_zero;
- data->inits.v_c = (data->viewport.y % vpc_div) != 0 ?
- dc_fixpt_half : dc_fixpt_zero;
+ data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+ data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
+
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
- /* Handle hsplit */
- if (sec_split) {
- data->viewport.x += data->viewport.width / 2;
- data->viewport_c.x += data->viewport_c.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- data->viewport_c.width = (data->viewport_c.width + 1) / 2;
- } else if (pri_split) {
- if (data->viewport.width > 1)
- data->viewport.width /= 2;
- if (data->viewport_c.width > 1)
- data->viewport_c.width /= 2;
- }
-
- if (plane_state->rotation == ROTATION_ANGLE_90 ||
- plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
}
-static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
- struct rect surf_src = plane_state->src_rect;
struct rect surf_clip = plane_state->clip_rect;
bool pri_split = pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
@@ -624,10 +622,6 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
-
pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
@@ -656,7 +650,7 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
stream->dst.y + stream->dst.height
- pipe_ctx->plane_res.scl_data.recout.y;
- /* Handle h & vsplit */
+ /* Handle h & v split, handle rotation using viewport */
if (sec_split && top_bottom_split) {
pipe_ctx->plane_res.scl_data.recout.y +=
pipe_ctx->plane_res.scl_data.recout.height / 2;
@@ -665,44 +659,14 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx, struct rect *recout_full
(pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
} else if (pri_split && top_bottom_split)
pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (pri_split || sec_split) {
- /* HMirror XOR Secondary_pipe XOR Rotation_180 */
- bool right_view = (sec_split != plane_state->horizontal_mirror) !=
- (plane_state->rotation == ROTATION_ANGLE_180);
-
- if (plane_state->rotation == ROTATION_ANGLE_90
- || plane_state->rotation == ROTATION_ANGLE_270)
- /* Secondary_pipe XOR Rotation_270 */
- right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
-
- if (right_view) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else {
- if (pipe_ctx->plane_res.scl_data.recout.width > 1)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
- }
- }
- /* Unclipped recout offset = stream dst offset + ((surf dst offset - stream surf_src offset)
- * * 1/ stream scaling ratio) - (surf surf_src offset * 1/ full scl
- * ratio)
- */
- recout_full->x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
- * stream->dst.width / stream->src.width -
- surf_src.x * plane_state->dst_rect.width / surf_src.width
- * stream->dst.width / stream->src.width;
- recout_full->y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
- * stream->dst.height / stream->src.height -
- surf_src.y * plane_state->dst_rect.height / surf_src.height
- * stream->dst.height / stream->src.height;
-
- recout_full->width = plane_state->dst_rect.width
- * stream->dst.width / stream->src.width;
- recout_full->height = plane_state->dst_rect.height
- * stream->dst.height / stream->src.height;
+ else if (sec_split) {
+ pipe_ctx->plane_res.scl_data.recout.x +=
+ pipe_ctx->plane_res.scl_data.recout.width / 2;
+ /* Ceil offset pipe */
+ pipe_ctx->plane_res.scl_data.recout.width =
+ (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
+ } else if (pri_split)
+ pipe_ctx->plane_res.scl_data.recout.width /= 2;
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -715,9 +679,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
const int out_w = stream->dst.width;
const int out_h = stream->dst.height;
+ /*Swap surf_src height and width since scaling ratios are in recout rotation*/
if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- rect_swap_helper(&surf_src);
+ swap(surf_src.height, surf_src.width);
pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction(
surf_src.width,
@@ -754,358 +719,202 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
}
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct rect *recout_full)
+static inline void adjust_vp_and_init_for_seamless_clip(
+ bool flip_scan_dir,
+ int recout_skip,
+ int src_size,
+ int taps,
+ struct fixed31_32 ratio,
+ struct fixed31_32 *init,
+ int *vp_offset,
+ int *vp_size)
{
- struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
- struct rect src = pipe_ctx->plane_state->src_rect;
- int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
- || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool flip_vert_scan_dir = false, flip_horz_scan_dir = false;
-
- /*
- * Need to calculate the scan direction for viewport to make adjustments
- */
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_180) {
- flip_vert_scan_dir = true;
- flip_horz_scan_dir = true;
- } else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90)
- flip_vert_scan_dir = true;
- else if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270)
- flip_horz_scan_dir = true;
-
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&src);
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
-
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270 &&
- pipe_ctx->plane_state->horizontal_mirror) {
- flip_vert_scan_dir = true;
- }
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 &&
- pipe_ctx->plane_state->horizontal_mirror) {
- flip_vert_scan_dir = false;
- }
- } else if (pipe_ctx->plane_state->horizontal_mirror)
- flip_horz_scan_dir = !flip_horz_scan_dir;
-
- /*
- * Init calculated according to formula:
- * init = (scaling_ratio + number_of_taps + 1) / 2
- * init_bot = init + scaling_ratio
- * init_c = init + truncated_vp_c_offset(from calculate viewport)
- */
- data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
-
- data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
-
- data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
-
- data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
- dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
-
- if (!flip_horz_scan_dir) {
+ if (!flip_scan_dir) {
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
- int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
- int vp_clip = (src.x + src.width) / vpc_div -
- data->viewport_c.width - data->viewport_c.x;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+ if ((*vp_offset + *vp_size) < src_size) {
+ int vp_clip = src_size - *vp_size - *vp_offset;
+ int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip;
+ *vp_size += int_part < vp_clip ? int_part : vp_clip;
}
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
+ if (*vp_offset) {
int int_part;
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, data->recout.x - recout_full->x));
- int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x;
- if (int_part < data->taps.h_taps) {
- int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : data->viewport.x;
- data->viewport.x -= int_adj;
- data->viewport.width += int_adj;
+ *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
+ int_part = dc_fixpt_floor(*init) - *vp_offset;
+ if (int_part < taps) {
+ int int_adj = *vp_offset >= (taps - int_part) ?
+ (taps - int_part) : *vp_offset;
+ *vp_offset -= int_adj;
+ *vp_size += int_adj;
int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.x += int_part - data->taps.h_taps;
- data->viewport.width -= int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ } else if (int_part > taps) {
+ *vp_offset += int_part - taps;
+ *vp_size -= int_part - taps;
+ int_part = taps;
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
- }
-
- if (data->viewport_c.x) {
- int int_part;
-
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, data->recout.x - recout_full->x));
- int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : data->viewport_c.x;
- data->viewport_c.x -= int_adj;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.x += int_part - data->taps.h_taps_c;
- data->viewport_c.width -= int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
- }
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
+ init->value &= 0xffffffff;
+ *init = dc_fixpt_add_int(*init, int_part);
}
} else {
/* Adjust for non-0 viewport offset */
- if (data->viewport.x) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h, data->ratios.horz));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.width += int_part < data->viewport.x ? int_part : data->viewport.x;
- data->viewport.x -= int_part < data->viewport.x ? int_part : data->viewport.x;
- }
- if (data->viewport_c.x) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c));
+ if (*vp_offset) {
+ int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.width += int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
- data->viewport_c.x -= int_part < data->viewport_c.x ? int_part : data->viewport_c.x;
+ *vp_size += int_part < *vp_offset ? int_part : *vp_offset;
+ *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
}
/* Adjust for viewport end clip-off */
- if ((data->viewport.x + data->viewport.width) < (src.x + src.width)) {
+ if ((*vp_offset + *vp_size) < src_size) {
int int_part;
- int end_offset = src.x + src.width
- - data->viewport.x - data->viewport.width;
+ int end_offset = src_size - *vp_offset - *vp_size;
/*
* this is init if vp had no offset, keep in mind this is from the
* right side of vp due to scan direction
*/
- data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int(
- data->ratios.horz, data->recout.x - recout_full->x));
+ *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
/*
* this is the difference between first pixel of viewport available to read
* and init position, takning into account scan direction
*/
- int_part = dc_fixpt_floor(data->inits.h) - end_offset;
- if (int_part < data->taps.h_taps) {
- int int_adj = end_offset >= (data->taps.h_taps - int_part) ?
- (data->taps.h_taps - int_part) : end_offset;
- data->viewport.width += int_adj;
+ int_part = dc_fixpt_floor(*init) - end_offset;
+ if (int_part < taps) {
+ int int_adj = end_offset >= (taps - int_part) ?
+ (taps - int_part) : end_offset;
+ *vp_size += int_adj;
int_part += int_adj;
- } else if (int_part > data->taps.h_taps) {
- data->viewport.width += int_part - data->taps.h_taps;
- int_part = data->taps.h_taps;
+ } else if (int_part > taps) {
+ *vp_size += int_part - taps;
+ int_part = taps;
}
- data->inits.h.value &= 0xffffffff;
- data->inits.h = dc_fixpt_add_int(data->inits.h, int_part);
+ init->value &= 0xffffffff;
+ *init = dc_fixpt_add_int(*init, int_part);
}
-
- if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div) {
- int int_part;
- int end_offset = (src.x + src.width) / vpc_div
- - data->viewport_c.x - data->viewport_c.width;
-
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int(
- data->ratios.horz_c, data->recout.x - recout_full->x));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, takning into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.h_c) - end_offset;
- if (int_part < data->taps.h_taps_c) {
- int int_adj = end_offset >= (data->taps.h_taps_c - int_part) ?
- (data->taps.h_taps_c - int_part) : end_offset;
- data->viewport_c.width += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.h_taps_c) {
- data->viewport_c.width += int_part - data->taps.h_taps_c;
- int_part = data->taps.h_taps_c;
- }
- data->inits.h_c.value &= 0xffffffff;
- data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part);
- }
-
}
- if (!flip_vert_scan_dir) {
- /* Adjust for viewport end clip-off */
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
- int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < vp_clip ? int_part : vp_clip;
- }
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
- int vp_clip = (src.y + src.height) / vpc_div -
- data->viewport_c.height - data->viewport_c.y;
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
-
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip;
- }
-
- /* Adjust for non-0 viewport offset */
- if (data->viewport.y) {
- int int_part;
-
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, data->recout.y - recout_full->y));
- int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y;
- if (int_part < data->taps.v_taps) {
- int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : data->viewport.y;
- data->viewport.y -= int_adj;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.y += int_part - data->taps.v_taps;
- data->viewport.height -= int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
- }
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
-
- if (data->viewport_c.y) {
- int int_part;
-
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, data->recout.y - recout_full->y));
- int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : data->viewport_c.y;
- data->viewport_c.y -= int_adj;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.y += int_part - data->taps.v_taps_c;
- data->viewport_c.height -= int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
- }
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
- }
- } else {
- /* Adjust for non-0 viewport offset */
- if (data->viewport.y) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v, data->ratios.vert));
+}
- int_part = int_part > 0 ? int_part : 0;
- data->viewport.height += int_part < data->viewport.y ? int_part : data->viewport.y;
- data->viewport.y -= int_part < data->viewport.y ? int_part : data->viewport.y;
- }
- if (data->viewport_c.y) {
- int int_part = dc_fixpt_floor(
- dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c));
+static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+{
+ const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
+ struct rect src = pipe_ctx->plane_state->src_rect;
+ int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
+ int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
+ || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+ bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- int_part = int_part > 0 ? int_part : 0;
- data->viewport_c.height += int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
- data->viewport_c.y -= int_part < data->viewport_c.y ? int_part : data->viewport_c.y;
- }
+ /*
+ * Need to calculate the scan direction for viewport to make adjustments
+ */
+ get_vp_scan_direction(
+ plane_state->rotation,
+ plane_state->horizontal_mirror,
+ &orthogonal_rotation,
+ &flip_vert_scan_dir,
+ &flip_horz_scan_dir);
+
+ /* Calculate src rect rotation adjusted to recout space */
+ surf_size_h = src.x + src.width;
+ surf_size_v = src.y + src.height;
+ if (flip_horz_scan_dir)
+ src.x = 0;
+ if (flip_vert_scan_dir)
+ src.y = 0;
+ if (orthogonal_rotation) {
+ swap(src.x, src.y);
+ swap(src.width, src.height);
+ }
+
+ /* Recout matching initial vp offset = recout_offset - (stream dst offset +
+ * ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
+ * - (surf surf_src offset * 1/ full scl ratio))
+ */
+ recout_skip_h = data->recout.x - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+ * stream->dst.width / stream->src.width -
+ src.x * plane_state->dst_rect.width / src.width
+ * stream->dst.width / stream->src.width);
+ recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+ * stream->dst.height / stream->src.height -
+ src.y * plane_state->dst_rect.height / src.height
+ * stream->dst.height / stream->src.height);
+ if (orthogonal_rotation)
+ swap(recout_skip_h, recout_skip_v);
+ /*
+ * Init calculated according to formula:
+ * init = (scaling_ratio + number_of_taps + 1) / 2
+ * init_bot = init + scaling_ratio
+ * init_c = init + truncated_vp_c_offset(from calculate viewport)
+ */
+ data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
- /* Adjust for viewport end clip-off */
- if ((data->viewport.y + data->viewport.height) < (src.y + src.height)) {
- int int_part;
- int end_offset = src.y + src.height
- - data->viewport.y - data->viewport.height;
+ data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int(
- data->ratios.vert, data->recout.y - recout_full->y));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, taking into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.v) - end_offset;
- if (int_part < data->taps.v_taps) {
- int int_adj = end_offset >= (data->taps.v_taps - int_part) ?
- (data->taps.v_taps - int_part) : end_offset;
- data->viewport.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps) {
- data->viewport.height += int_part - data->taps.v_taps;
- int_part = data->taps.v_taps;
- }
- data->inits.v.value &= 0xffffffff;
- data->inits.v = dc_fixpt_add_int(data->inits.v, int_part);
- }
+ data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
- if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div) {
- int int_part;
- int end_offset = (src.y + src.height) / vpc_div
- - data->viewport_c.y - data->viewport_c.height;
+ data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
+ dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
- /*
- * this is init if vp had no offset, keep in mind this is from the
- * right side of vp due to scan direction
- */
- data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int(
- data->ratios.vert_c, data->recout.y - recout_full->y));
- /*
- * this is the difference between first pixel of viewport available to read
- * and init position, taking into account scan direction
- */
- int_part = dc_fixpt_floor(data->inits.v_c) - end_offset;
- if (int_part < data->taps.v_taps_c) {
- int int_adj = end_offset >= (data->taps.v_taps_c - int_part) ?
- (data->taps.v_taps_c - int_part) : end_offset;
- data->viewport_c.height += int_adj;
- int_part += int_adj;
- } else if (int_part > data->taps.v_taps_c) {
- data->viewport_c.height += int_part - data->taps.v_taps_c;
- int_part = data->taps.v_taps_c;
- }
- data->inits.v_c.value &= 0xffffffff;
- data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part);
- }
- }
+ /*
+ * Taps, inits and scaling ratios are in recout space need to rotate
+ * to viewport rotation before adjustment
+ */
+ adjust_vp_and_init_for_seamless_clip(
+ flip_horz_scan_dir,
+ recout_skip_h,
+ surf_size_h,
+ orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
+ orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
+ orthogonal_rotation ? &data->inits.v : &data->inits.h,
+ &data->viewport.x,
+ &data->viewport.width);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_horz_scan_dir,
+ recout_skip_h,
+ surf_size_h / vpc_div,
+ orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
+ orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
+ orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+ &data->viewport_c.x,
+ &data->viewport_c.width);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_vert_scan_dir,
+ recout_skip_v,
+ surf_size_v,
+ orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
+ orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
+ orthogonal_rotation ? &data->inits.h : &data->inits.v,
+ &data->viewport.y,
+ &data->viewport.height);
+ adjust_vp_and_init_for_seamless_clip(
+ flip_vert_scan_dir,
+ recout_skip_v,
+ surf_size_v / vpc_div,
+ orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
+ orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
+ orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+ &data->viewport_c.y,
+ &data->viewport_c.height);
/* Interlaced inits based on final vert inits */
data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
- if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
- pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
- rect_swap_helper(&data->viewport_c);
- rect_swap_helper(&data->viewport);
- }
}
bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
- struct rect recout_full = { 0 };
bool res = false;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Important: scaling ratio calculation requires pixel format,
@@ -1115,9 +924,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
pipe_ctx->plane_state->format);
- if (pipe_ctx->stream->timing.flags.INTERLACE)
- pipe_ctx->stream->dst.height *= 2;
-
calculate_scaling_ratios(pipe_ctx);
calculate_viewport(pipe_ctx);
@@ -1125,7 +931,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16)
return false;
- calculate_recout(pipe_ctx, &recout_full);
+ calculate_recout(pipe_ctx);
/**
* Setting line buffer pixel depth to 24bpp yields banding
@@ -1138,9 +944,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
- if (pipe_ctx->stream->timing.flags.INTERLACE)
- pipe_ctx->plane_res.scl_data.v_active *= 2;
-
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -1169,7 +972,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
if (res)
/* May need to re-check lb size after this in some obscure scenario */
- calculate_inits_and_adj_vp(pipe_ctx, &recout_full);
+ calculate_inits_and_adj_vp(pipe_ctx);
DC_LOG_SCALER(
"%s: Viewport:\nheight:%d width:%d x:%d "
@@ -1185,9 +988,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
plane_state->dst_rect.x,
plane_state->dst_rect.y);
- if (pipe_ctx->stream->timing.flags.INTERLACE)
- pipe_ctx->stream->dst.height /= 2;
-
return res;
}
@@ -1382,6 +1182,9 @@ bool dc_add_plane_to_context(
return false;
}
+ tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
+ ASSERT(tail_pipe);
+
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -1399,10 +1202,6 @@ bool dc_add_plane_to_context(
free_pipe->plane_state = plane_state;
if (head_pipe != free_pipe) {
-
- tail_pipe = resource_get_tail_pipe_for_stream(&context->res_ctx, stream);
- ASSERT(tail_pipe);
-
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1648,6 +1447,14 @@ static bool are_stream_backends_same(
return true;
}
+/**
+ * dc_is_stream_unchanged() - Compare two stream states for equivalence.
+ *
+ * Checks if there a difference between the two states
+ * that would require a mode change.
+ *
+ * Does not compare cursor position or attributes.
+ */
bool dc_is_stream_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1658,6 +1465,9 @@ bool dc_is_stream_unchanged(
return true;
}
+/**
+ * dc_is_stream_scaling_unchanged() - Compare scaling rectangles of two streams.
+ */
bool dc_is_stream_scaling_unchanged(
struct dc_stream_state *old_stream, struct dc_stream_state *stream)
{
@@ -1817,16 +1627,19 @@ bool resource_is_stream_unchanged(
return false;
}
+/**
+ * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
+ */
enum dc_status dc_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream)
{
- struct dc_context *dc_ctx = dc->ctx;
enum dc_status res;
+ DC_LOGGER_INIT(dc->ctx->logger);
if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
- DC_ERROR("Max streams reached, can't add stream %p !\n", stream);
+ DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
return DC_ERROR_UNEXPECTED;
}
@@ -1836,11 +1649,14 @@ enum dc_status dc_add_stream_to_ctx(
res = dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream);
if (res != DC_OK)
- DC_ERROR("Adding stream %p to context failed with err %d!\n", stream, res);
+ DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
return res;
}
+/**
+ * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
+ */
enum dc_status dc_remove_stream_from_ctx(
struct dc *dc,
struct dc_state *new_ctx,
@@ -2002,6 +1818,8 @@ enum dc_status resource_map_pool_resources(
}
*/
+ calculate_phy_pix_clks(stream);
+
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
@@ -2059,6 +1877,12 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
+/**
+ * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
+ * Is a shallow copy. Increments refcounts on existing streams and planes.
+ * @dc: copy out of dc->current_state
+ * @dst_ctx: copy into this
+ */
void dc_resource_state_copy_construct_current(
const struct dc *dc,
struct dc_state *dst_ctx)
@@ -2071,9 +1895,17 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dis_clk = dc->res_pool->dccg;
+ dst_ctx->dccg = dc->res_pool->clk_mgr;
}
+/**
+ * dc_validate_global_state() - Determine if HW can support a given state
+ * Checks HW resource availability and bandwidth requirement.
+ * @dc: dc struct for this driver
+ * @new_ctx: state to be validated
+ *
+ * Return: DC_OK if the result can be programmed. Otherwise, an error code.
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx)
@@ -2401,113 +2233,15 @@ static void set_vendor_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
{
- uint32_t length = 0;
- bool hdmi_vic_mode = false;
- uint8_t checksum = 0;
- uint32_t i = 0;
- enum dc_timing_3d_format format;
- // Can be different depending on packet content /*todo*/
- // unsigned int length = pPathMode->dolbyVision ? 24 : 5;
-
- info_packet->valid = false;
-
- format = stream->timing.timing_3d_format;
- if (stream->view_format == VIEW_3D_FORMAT_NONE)
- format = TIMING_3D_FORMAT_NONE;
-
- /* Can be different depending on packet content */
- length = 5;
-
- if (stream->timing.hdmi_vic != 0
- && stream->timing.h_total >= 3840
- && stream->timing.v_total >= 2160)
- hdmi_vic_mode = true;
-
- /* According to HDMI 1.4a CTS, VSIF should be sent
- * for both 3D stereo and HDMI VIC modes.
- * For all other modes, there is no VSIF sent. */
+ /* SPD info packet for FreeSync */
- if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+ /* Check if Freesync is supported. Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->vsp_infopacket.valid)
return;
- /* 24bit IEEE Registration identifier (0x000c03). LSB first. */
- info_packet->sb[1] = 0x03;
- info_packet->sb[2] = 0x0C;
- info_packet->sb[3] = 0x00;
-
- /*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
- * The value for HDMI_Video_Format are:
- * 0x0 (0b000) - No additional HDMI video format is presented in this
- * packet
- * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
- * parameter follows
- * 0x2 (0b010) - 3D format indication present. 3D_Structure and
- * potentially 3D_Ext_Data follows
- * 0x3..0x7 (0b011..0b111) - reserved for future use */
- if (format != TIMING_3D_FORMAT_NONE)
- info_packet->sb[4] = (2 << 5);
- else if (hdmi_vic_mode)
- info_packet->sb[4] = (1 << 5);
-
- /* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
- * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
- * The value for 3D_Structure are:
- * 0x0 - Frame Packing
- * 0x1 - Field Alternative
- * 0x2 - Line Alternative
- * 0x3 - Side-by-Side (full)
- * 0x4 - L + depth
- * 0x5 - L + depth + graphics + graphics-depth
- * 0x6 - Top-and-Bottom
- * 0x7 - Reserved for future use
- * 0x8 - Side-by-Side (Half)
- * 0x9..0xE - Reserved for future use
- * 0xF - Not used */
- switch (format) {
- case TIMING_3D_FORMAT_HW_FRAME_PACKING:
- case TIMING_3D_FORMAT_SW_FRAME_PACKING:
- info_packet->sb[5] = (0x0 << 4);
- break;
-
- case TIMING_3D_FORMAT_SIDE_BY_SIDE:
- case TIMING_3D_FORMAT_SBS_SW_PACKED:
- info_packet->sb[5] = (0x8 << 4);
- length = 6;
- break;
-
- case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
- case TIMING_3D_FORMAT_TB_SW_PACKED:
- info_packet->sb[5] = (0x6 << 4);
- break;
-
- default:
- break;
- }
-
- /*PB5: If PB4 is set to 0x1 (extended resolution format)
- * fill PB5 with the correct HDMI VIC code */
- if (hdmi_vic_mode)
- info_packet->sb[5] = stream->timing.hdmi_vic;
-
- /* Header */
- info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; /* VSIF packet type. */
- info_packet->hb1 = 0x01; /* Version */
-
- /* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
- info_packet->hb2 = (uint8_t) (length);
-
- /* Calculate checksum */
- checksum = 0;
- checksum += info_packet->hb0;
- checksum += info_packet->hb1;
- checksum += info_packet->hb2;
-
- for (i = 1; i <= length; i++)
- checksum += info_packet->sb[i];
-
- info_packet->sb[0] = (uint8_t) (0x100 - checksum);
-
- info_packet->valid = true;
+ *info_packet = stream->vsp_infopacket;
}
static void set_spd_info_packet(
@@ -2563,10 +2297,6 @@ void dc_resource_state_destruct(struct dc_state *context)
}
}
-/*
- * Copy src_ctx into dst_ctx and retain all surfaces and streams referenced
- * by the src_ctx
- */
void dc_resource_state_copy_construct(
const struct dc_state *src_ctx,
struct dc_state *dst_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 2ac848a106ba..66e5c4623a49 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -100,12 +100,11 @@ static void construct(struct dc_stream_state *stream,
/* EDID CAP translation for HDMI 2.0 */
stream->timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
- stream->status.link = stream->sink->link;
-
update_stream_signal(stream);
stream->out_transfer_func = dc_create_transfer_func();
stream->out_transfer_func->type = TF_TYPE_BYPASS;
+ stream->out_transfer_func->ctx = stream->ctx;
}
static void destruct(struct dc_stream_state *stream)
@@ -171,7 +170,7 @@ struct dc_stream_status *dc_stream_get_status(
}
/**
- * Update the cursor attributes and set cursor surface address
+ * dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 8fb3aefd195c..c60c9b4c3075 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -44,6 +44,7 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state
plane_state->in_transfer_func = dc_create_transfer_func();
plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ plane_state->in_transfer_func->ctx = ctx;
}
static void destruct(struct dc_plane_state *plane_state)
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b57fa61b3034..4b5bbb13ce7f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -36,9 +36,10 @@
#include "inc/hw_sequencer.h"
#include "inc/compressor.h"
+#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.68"
+#define DC_VER "3.2.08"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -47,13 +48,6 @@
/*******************************************************************************
* Display Core Interfaces
******************************************************************************/
-struct dmcu_version {
- unsigned int date;
- unsigned int month;
- unsigned int year;
- unsigned int interface_version;
-};
-
struct dc_versions {
const char *dc_ver;
struct dmcu_version dmcu_version;
@@ -250,8 +244,6 @@ struct dc_debug_options {
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
- bool disable_hbup_pg;
- bool disable_dpp_pg;
bool disable_stereo_support;
bool vsr_support;
bool performance_trace;
@@ -305,11 +297,6 @@ struct dc {
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
- /* temp store of dm_pp_display_configuration
- * to compare to see if display config changed
- */
- struct dm_pp_display_configuration prev_display_config;
-
bool optimized_required;
/* FBC compressor */
@@ -755,5 +742,6 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
+bool dc_is_dmcu_initialized(struct dc *dc);
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 8130b95ccc53..a8b3cedf9431 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -86,6 +86,10 @@ struct dc_vbios_funcs {
bool (*is_accelerated_mode)(
struct dc_bios *bios);
+ bool (*is_active_display)(
+ struct dc_bios *bios,
+ enum signal_type signal,
+ const struct connector_device_tag_info *device_tag);
void (*set_scratch_critical_state)(
struct dc_bios *bios,
bool state);
@@ -141,6 +145,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 7825e4b5e97c..9ddfe4c6938b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -358,15 +358,16 @@ union dc_tiling_info {
} gfx8;
struct {
+ enum swizzle_mode_values swizzle;
unsigned int num_pipes;
- unsigned int num_banks;
+ unsigned int max_compressed_frags;
unsigned int pipe_interleave;
+
+ unsigned int num_banks;
unsigned int num_shader_engines;
unsigned int num_rb_per_se;
- unsigned int max_compressed_frags;
bool shaderEnable;
- enum swizzle_mode_values swizzle;
bool meta_linear;
bool rb_aligned;
bool pipe_aligned;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 3bfdccceb524..29f19d57ff7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -128,8 +128,10 @@ struct dc_link {
const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
-/*
- * Return an enumerated dc_link. dc_link order is constant and determined at
+/**
+ * dc_get_link_at_index() - Return an enumerated dc_link.
+ *
+ * dc_link order is constant and determined at
* boot time. They cannot be created or destroyed.
* Use dc_get_caps() to get number of links.
*/
@@ -138,9 +140,14 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
return dc->links[link_index];
}
-/* Set backlight level of an embedded panel (eDP, LVDS). */
-bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
- uint32_t frame_ramp, const struct dc_stream_state *stream);
+/* Set backlight level of an embedded panel (eDP, LVDS).
+ * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
+ * and 16 bit fractional, where 1.0 is max backlight value.
+ */
+bool dc_link_set_backlight_level(const struct dc_link *dc_link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ const struct dc_stream_state *stream);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index c5bd1fbb6982..be34d638e15d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -56,6 +56,7 @@ struct dc_stream_state {
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
+ struct dc_info_packet vsp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -104,8 +105,6 @@ struct dc_stream_state {
bool dpms_off;
bool apply_edp_fast_boot_optimization;
- struct dc_stream_status status;
-
struct dc_cursor_attributes cursor_attributes;
struct dc_cursor_position cursor_position;
uint32_t sdr_white_level; // for boosting (SDR) cursor in HDR mode
@@ -131,11 +130,13 @@ struct dc_stream_update {
struct dc_crtc_timing_adjust *adjust;
struct dc_info_packet *vrr_infopacket;
struct dc_info_packet *vsc_infopacket;
+ struct dc_info_packet *vsp_infopacket;
bool *dpms_off;
struct colorspace_transform *gamut_remap;
enum dc_color_space *output_color_space;
+ enum dc_dither_option *dither_option;
struct dc_csc_transform *output_csc_transform;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 6e12d640d020..0b20ae23f169 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -73,10 +73,18 @@ struct hw_asic_id {
void *atombios_base_address;
};
+struct dc_perf_trace {
+ unsigned long read_count;
+ unsigned long write_count;
+ unsigned long last_entry_read;
+ unsigned long last_entry_write;
+};
+
struct dc_context {
struct dc *dc;
void *driver_context; /* e.g. amdgpu_device */
+ struct dc_perf_trace *perf_trace;
void *cgs_device;
enum dce_environment dce_environment;
@@ -191,7 +199,6 @@ union display_content_support {
};
struct dc_panel_patch {
- unsigned int disconnect_delay;
unsigned int dppowerup_delay;
unsigned int extra_t12_ms;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 8f7f0e8b341f..6d7b64a743ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
+dce_clk_mgr.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 29294db1a96b..2a342eae80fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -54,7 +54,7 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
-static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
{
uint64_t current_backlight;
uint32_t round_result;
@@ -103,45 +103,21 @@ static unsigned int get_current_backlight_16_bit(struct dce_abm *abm_dce)
return (uint32_t)(current_backlight);
}
-static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
+static void driver_set_backlight_level(struct dce_abm *abm_dce,
+ uint32_t backlight_pwm_u16_16)
{
- uint32_t backlight_24bit;
- uint32_t backlight_17bit;
uint32_t backlight_16bit;
uint32_t masked_pwm_period;
- uint8_t rounding_bit;
uint8_t bit_count;
uint64_t active_duty_cycle;
uint32_t pwm_period_bitcnt;
/*
- * 1. Convert 8-bit value to 17 bit U1.16 format
- * (1 integer, 16 fractional bits)
- */
-
- /* 1.1 multiply 8 bit value by 0x10101 to get a 24 bit value,
- * effectively multiplying value by 256/255
- * eg. for a level of 0xEF, backlight_24bit = 0xEF * 0x10101 = 0xEFEFEF
- */
- backlight_24bit = level * 0x10101;
-
- /* 1.2 The upper 16 bits of the 24 bit value is the fraction, lower 8
- * used for rounding, take most significant bit of fraction for
- * rounding, e.g. for 0xEFEFEF, rounding bit is 1
- */
- rounding_bit = (backlight_24bit >> 7) & 1;
-
- /* 1.3 Add the upper 16 bits of the 24 bit value with the rounding bit
- * resulting in a 17 bit value e.g. 0xEFF0 = (0xEFEFEF >> 8) + 1
- */
- backlight_17bit = (backlight_24bit >> 8) + rounding_bit;
-
- /*
- * 2. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
* active duty cycle <= backlight period
*/
- /* 2.1 Apply bitmask for backlight period value based on value of BITCNT
+ /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
*/
REG_GET_2(BL_PWM_PERIOD_CNTL,
BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
@@ -155,13 +131,13 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
/* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
- /* 2.2 Calculate integer active duty cycle required upper 16 bits
+ /* 1.2 Calculate integer active duty cycle required upper 16 bits
* contain integer component, lower 16 bits contain fractional component
* of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
*/
- active_duty_cycle = backlight_17bit * masked_pwm_period;
+ active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
- /* 2.3 Calculate 16 bit active duty cycle from integer and fractional
+ /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
* components shift by bitCount then mask 16 bits and add rounding bit
* from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
*/
@@ -170,23 +146,23 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
/*
- * 3. Program register with updated value
+ * 2. Program register with updated value
*/
- /* 3.1 Lock group 2 backlight registers */
+ /* 2.1 Lock group 2 backlight registers */
REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
BL_PWM_GRP1_REG_LOCK, 1);
- // 3.2 Write new active duty cycle
+ // 2.2 Write new active duty cycle
REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
- /* 3.3 Unlock group 2 backlight registers */
+ /* 2.3 Unlock group 2 backlight registers */
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
- /* 5.4.4 Wait for pending bit to be cleared */
+ /* 3 Wait for pending bit to be cleared */
REG_WAIT(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
1, 10000);
@@ -194,16 +170,21 @@ static void driver_set_backlight_level(struct dce_abm *abm_dce, uint32_t level)
static void dmcu_set_backlight_level(
struct dce_abm *abm_dce,
- uint32_t level,
+ uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
uint32_t controller_id)
{
- unsigned int backlight_16_bit = (level * 0x10101) >> 8;
- unsigned int backlight_17_bit = backlight_16_bit +
- (((backlight_16_bit & 0x80) >> 7) & 1);
+ unsigned int backlight_8_bit = 0;
uint32_t rampingBoundary = 0xFFFF;
uint32_t s2;
+ if (backlight_pwm_u16_16 & 0x10000)
+ // Check for max backlight condition
+ backlight_8_bit = 0xFF;
+ else
+ // Take MSB of fractional part since backlight is not max
+ backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
/* set ramping boundary */
REG_WRITE(MASTER_COMM_DATA_REG1, rampingBoundary);
@@ -220,7 +201,7 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
/* setDMCUParam_BL */
- REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_17_bit);
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
/* write ramp */
if (controller_id == 0)
@@ -237,9 +218,9 @@ static void dmcu_set_backlight_level(
s2 = REG_READ(BIOS_SCRATCH_2);
s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
- level &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
- s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
REG_WRITE(BIOS_SCRATCH_2, s2);
}
@@ -247,7 +228,7 @@ static void dmcu_set_backlight_level(
static void dce_abm_init(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int backlight = get_current_backlight_16_bit(abm_dce);
+ unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -284,12 +265,26 @@ static void dce_abm_init(struct abm *abm)
ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
}
-static unsigned int dce_abm_get_current_backlight_8_bit(struct abm *abm)
+static unsigned int dce_abm_get_current_backlight(struct abm *abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
- return (backlight >> 8);
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static unsigned int dce_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *abm_dce = TO_DCE_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
}
static bool dce_abm_set_level(struct abm *abm, uint32_t level)
@@ -396,9 +391,9 @@ static bool dce_abm_init_backlight(struct abm *abm)
return true;
}
-static bool dce_abm_set_backlight_level(
+static bool dce_abm_set_backlight_level_pwm(
struct abm *abm,
- unsigned int backlight_level,
+ unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
bool use_smooth_brightness)
@@ -406,16 +401,16 @@ static bool dce_abm_set_backlight_level(
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_level, backlight_level);
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
/* If DMCU is in reset state, DMCU is uninitialized */
if (use_smooth_brightness)
dmcu_set_backlight_level(abm_dce,
- backlight_level,
+ backlight_pwm_u16_16,
frame_ramp,
controller_id);
else
- driver_set_backlight_level(abm_dce, backlight_level);
+ driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
return true;
}
@@ -424,8 +419,9 @@ static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
.init_backlight = dce_abm_init_backlight,
- .set_backlight_level = dce_abm_set_backlight_level,
- .get_current_backlight_8_bit = dce_abm_get_current_backlight_8_bit,
+ .set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
+ .get_current_backlight = dce_abm_get_current_backlight,
+ .get_target_backlight = dce_abm_get_target_backlight,
.set_abm_immediate_disable = dce_abm_immediate_disable
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
new file mode 100644
index 000000000000..bd22f51813bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -0,0 +1,884 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "dmcu.h"
+#include "core_types.h"
+#include "dal_asic_id.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+ container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+ (clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+ clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+ clk_mgr->ctx->logger
+
+/* Max clock values for each state indexed by "enum clocks_state": */
+static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+/* ClocksStateInvalid - should not be used */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/* ClocksStateLow */
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+/* ClocksStateNominal */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
+/* ClocksStatePerformance */
+{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
+
+static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
+
+static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
+/*ClocksStateInvalid - should not be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
+{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
+/*ClocksStateLow*/
+{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
+/*ClocksStateNominal*/
+{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
+/*ClocksStatePerformance*/
+{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
+
+int dentist_get_divider_from_did(int did)
+{
+ if (did < DENTIST_BASE_DID_1)
+ did = DENTIST_BASE_DID_1;
+ if (did > DENTIST_MAX_DID)
+ did = DENTIST_MAX_DID;
+
+ if (did < DENTIST_BASE_DID_2) {
+ return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
+ * (did - DENTIST_BASE_DID_1);
+ } else if (did < DENTIST_BASE_DID_3) {
+ return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
+ * (did - DENTIST_BASE_DID_2);
+ } else if (did < DENTIST_BASE_DID_4) {
+ return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
+ * (did - DENTIST_BASE_DID_3);
+ } else {
+ return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+ * (did - DENTIST_BASE_DID_4);
+ }
+}
+
+/* SW will adjust DP REF Clock average value for all purposes
+ * (DP DTO / DP Audio DTO and DP GTC)
+ if clock is spread for all cases:
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
+ calculations for DS_INCR/DS_MODULO (this is planned to be default case)
+ -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
+ calculations (not planned to be used, but average clock should still
+ be valid)
+ -if SS enabled on DP Ref clock and HW de-spreading disabled
+ (should not be case with CIK) then SW should program all rates
+ generated according to average value (case as with previous ASICs)
+ */
+static int clk_mgr_adjust_dp_ref_freq_for_ss(struct dce_clk_mgr *clk_mgr_dce, int dp_ref_clk_khz)
+{
+ if (clk_mgr_dce->ss_on_dprefclk && clk_mgr_dce->dprefclk_ss_divider != 0) {
+ struct fixed31_32 ss_percentage = dc_fixpt_div_int(
+ dc_fixpt_from_fraction(clk_mgr_dce->dprefclk_ss_percentage,
+ clk_mgr_dce->dprefclk_ss_divider), 200);
+ struct fixed31_32 adj_dp_ref_clk_khz;
+
+ ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
+ adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
+ dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
+ }
+ return dp_ref_clk_khz;
+}
+
+static int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ int dprefclk_wdivider;
+ int dprefclk_src_sel;
+ int dp_ref_clk_khz = 600000;
+ int target_div;
+
+ /* ASSERT DP Reference Clock source is from DFS*/
+ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
+ ASSERT(dprefclk_src_sel == 0);
+
+ /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
+ * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
+ REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
+
+ /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
+ target_div = dentist_get_divider_from_did(dprefclk_wdivider);
+
+ /* Calculate the current DFS clock, in kHz.*/
+ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr_dce->dentist_vco_freq_khz) / target_div;
+
+ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, dp_ref_clk_khz);
+}
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+
+ return clk_mgr_adjust_dp_ref_freq_for_ss(clk_mgr_dce, clk_mgr_dce->dprefclk_khz);
+}
+
+/* unit: in_khz before mode set, get pixel clock from context. ASIC register
+ * may not be programmed yet
+ */
+static uint32_t get_max_pixel_clock_for_all_paths(struct dc_state *context)
+{
+ uint32_t max_pix_clk = 0;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ /* do not check under lay */
+ if (pipe_ctx->top_pipe)
+ continue;
+
+ if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
+ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
+
+ /* raise clock state for HBR3/2 if required. Confirmed with HW DCE/DPCS
+ * logic for HBR3 still needs Nominal (0.8V) on VDDC rail
+ */
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk > max_pix_clk)
+ max_pix_clk = pipe_ctx->stream_res.pix_clk_params.requested_sym_clk;
+ }
+
+ return max_pix_clk;
+}
+
+static enum dm_pp_clocks_state dce_get_required_clocks_state(
+ struct clk_mgr *clk_mgr,
+ struct dc_state *context)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ int i;
+ enum dm_pp_clocks_state low_req_clk;
+ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+
+ /* Iterate from highest supported to lowest valid state, and update
+ * lowest RequiredState with the lowest state that satisfies
+ * all required clocks
+ */
+ for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
+ if (context->bw.dce.dispclk_khz >
+ clk_mgr_dce->max_clks_by_state[i].display_clk_khz
+ || max_pix_clk >
+ clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
+ break;
+
+ low_req_clk = i + 1;
+ if (low_req_clk > clk_mgr_dce->max_clks_state) {
+ /* set max clock state for high phyclock, invalid on exceeding display clock */
+ if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
+ < context->bw.dce.dispclk_khz)
+ low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
+ else
+ low_req_clk = clk_mgr_dce->max_clks_state;
+ }
+
+ return low_req_clk;
+}
+
+static int dce_set_clock(
+ struct clk_mgr *clk_mgr,
+ int requested_clk_khz)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
+ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+ int actual_clock = requested_clk_khz;
+ struct dmcu *dmcu = clk_mgr_dce->base.ctx->dc->res_pool->dmcu;
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->dentist_vco_freq_khz / 64);
+
+ /* Prepare to program display clock*/
+ pxl_clk_params.target_pixel_clock = requested_clk_khz;
+ pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+
+ if (clk_mgr_dce->dfs_bypass_active)
+ pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
+
+ bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
+
+ if (clk_mgr_dce->dfs_bypass_active) {
+ /* Cache the fixed display clock*/
+ clk_mgr_dce->dfs_bypass_disp_clk =
+ pxl_clk_params.dfs_bypass_display_clock;
+ actual_clock = pxl_clk_params.dfs_bypass_display_clock;
+ }
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ dmcu->funcs->set_psr_wait_loop(dmcu, actual_clock / 1000 / 7);
+
+ return actual_clock;
+}
+
+int dce112_set_clock(struct clk_mgr *clk_mgr, int requested_clk_khz)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+ struct dc *core_dc = clk_mgr->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+ int actual_clock = requested_clk_khz;
+ /* Prepare to program display clock*/
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+
+ /* Make sure requested clock isn't lower than minimum threshold*/
+ if (requested_clk_khz > 0)
+ requested_clk_khz = max(requested_clk_khz,
+ clk_mgr_dce->dentist_vco_freq_khz / 62);
+
+ dce_clk_params.target_clock_frequency = requested_clk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ actual_clock = dce_clk_params.target_clock_frequency;
+
+ /* from power down, we need mark the clock state as ClocksStateNominal
+ * from HWReset, so when resume we will call pplib voltage regulator.*/
+ if (requested_clk_khz == 0)
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+
+ /*Program DP ref Clock*/
+ /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
+ dce_clk_params.target_clock_frequency = 0;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
+ if (!ASICREV_IS_VEGA20_P(clk_mgr->ctx->asic_id.hw_internal_rev))
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
+ (dce_clk_params.pll_id ==
+ CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
+ else
+ dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
+
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+
+ if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
+ if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
+ if (clk_mgr_dce->dfs_bypass_disp_clk != actual_clock)
+ dmcu->funcs->set_psr_wait_loop(dmcu,
+ actual_clock / 1000 / 7);
+ }
+ }
+
+ clk_mgr_dce->dfs_bypass_disp_clk = actual_clock;
+ return actual_clock;
+}
+
+static void dce_clock_read_integrated_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+ struct dc_debug_options *debug = &clk_mgr_dce->base.ctx->dc->debug;
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+ struct integrated_info info = { { { 0 } } };
+ struct dc_firmware_info fw_info = { { 0 } };
+ int i;
+
+ if (bp->integrated_info)
+ info = *bp->integrated_info;
+
+ clk_mgr_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_mgr_dce->dentist_vco_freq_khz =
+ fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ /*update the maximum display clock for each power state*/
+ for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
+ enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ switch (i) {
+ case 0:
+ clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+
+ case 1:
+ clk_state = DM_PP_CLOCKS_STATE_LOW;
+ break;
+
+ case 2:
+ clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ break;
+
+ case 3:
+ clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+
+ default:
+ clk_state = DM_PP_CLOCKS_STATE_INVALID;
+ break;
+ }
+
+ /*Do not allow bad VBIOS/SBIOS to override with invalid values,
+ * check for > 100MHz*/
+ if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
+ clk_mgr_dce->max_clks_by_state[clk_state].display_clk_khz =
+ info.disp_clk_voltage[i].max_supported_clk;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr_dce->dfs_bypass_enabled = true;
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *clk_mgr_dce)
+{
+ struct dc_bios *bp = clk_mgr_dce->base.ctx->dc_bios;
+ int ss_info_num = bp->funcs->get_ss_entry_number(
+ bp, AS_SIGNAL_TYPE_GPU_PLL);
+
+ if (ss_info_num) {
+ struct spread_spectrum_info info = { { 0 } };
+ enum bp_result result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* TODO: Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+
+ return;
+ }
+
+ result = bp->funcs->get_spread_spectrum_info(
+ bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
+
+ /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
+ * even if SS not enabled and in that case
+ * SSInfo.spreadSpectrumPercentage !=0 would be sign
+ * that SS is enabled
+ */
+ if (result == BP_RESULT_OK &&
+ info.spread_spectrum_percentage != 0) {
+ clk_mgr_dce->ss_on_dprefclk = true;
+ clk_mgr_dce->dprefclk_ss_divider = info.spread_percentage_divider;
+
+ if (info.type.CENTER_MODE == 0) {
+ /* Currently for DP Reference clock we
+ * need only SS percentage for
+ * downspread */
+ clk_mgr_dce->dprefclk_ss_percentage =
+ info.spread_spectrum_percentage;
+ }
+ }
+ }
+}
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg)
+{
+ int j;
+ int num_cfgs = 0;
+
+ for (j = 0; j < context->stream_count; j++) {
+ int k;
+
+ const struct dc_stream_state *stream = context->streams[j];
+ struct dm_pp_single_disp_config *cfg =
+ &pp_display_cfg->disp_configs[num_cfgs];
+ const struct pipe_ctx *pipe_ctx = NULL;
+
+ for (k = 0; k < MAX_PIPES; k++)
+ if (stream == context->res_ctx.pipe_ctx[k].stream) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+
+ ASSERT(pipe_ctx != NULL);
+
+ /* only notify active stream */
+ if (stream->dpms_off)
+ continue;
+
+ num_cfgs++;
+ cfg->signal = pipe_ctx->stream->signal;
+ cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
+ cfg->src_height = stream->src.height;
+ cfg->src_width = stream->src.width;
+ cfg->ddi_channel_mapping =
+ stream->sink->link->ddi_channel_mapping.raw;
+ cfg->transmitter =
+ stream->sink->link->link_enc->transmitter;
+ cfg->link_settings.lane_count =
+ stream->sink->link->cur_link_settings.lane_count;
+ cfg->link_settings.link_rate =
+ stream->sink->link->cur_link_settings.link_rate;
+ cfg->link_settings.link_spread =
+ stream->sink->link->cur_link_settings.link_spread;
+ cfg->sym_clock = stream->phy_pix_clk;
+ /* Round v_refresh*/
+ cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
+ cfg->v_refresh /= stream->timing.h_total;
+ cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
+ / stream->timing.v_total;
+ }
+
+ pp_display_cfg->display_count = num_cfgs;
+}
+
+static uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
+{
+ uint8_t j;
+ uint32_t min_vertical_blank_time = -1;
+
+ for (j = 0; j < context->stream_count; j++) {
+ struct dc_stream_state *stream = context->streams[j];
+ uint32_t vertical_blank_in_pixels = 0;
+ uint32_t vertical_blank_time = 0;
+
+ vertical_blank_in_pixels = stream->timing.h_total *
+ (stream->timing.v_total
+ - stream->timing.v_addressable);
+
+ vertical_blank_time = vertical_blank_in_pixels
+ * 1000 / stream->timing.pix_clk_khz;
+
+ if (min_vertical_blank_time > vertical_blank_time)
+ min_vertical_blank_time = vertical_blank_time;
+ }
+
+ return min_vertical_blank_time;
+}
+
+static int determine_sclk_from_bounding_box(
+ const struct dc *dc,
+ int required_sclk)
+{
+ int i;
+
+ /*
+ * Some asics do not give us sclk levels, so we just report the actual
+ * required sclk
+ */
+ if (dc->sclk_lvls.num_levels == 0)
+ return required_sclk;
+
+ for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
+ if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
+ return dc->sclk_lvls.clocks_in_khz[i];
+ }
+ /*
+ * even maximum level could not satisfy requirement, this
+ * is unexpected at this stage, should have been caught at
+ * validation time
+ */
+ ASSERT(0);
+ return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
+}
+
+static void dce_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context);
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce11_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->all_displays_in_sync =
+ context->bw.dce.all_displays_in_sync;
+ pp_display_cfg->nb_pstate_switch_disable =
+ context->bw.dce.nbp_state_change_enable == false;
+ pp_display_cfg->cpu_cc6_disable =
+ context->bw.dce.cpuc_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_disable =
+ context->bw.dce.cpup_state_change_enable == false;
+ pp_display_cfg->cpu_pstate_separation_time =
+ context->bw.dce.blackout_recovery_time_us;
+
+ pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ / MEMORY_TYPE_MULTIPLIER_CZ;
+
+ pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
+ dc,
+ context->bw.dce.sclk_khz);
+
+ pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
+ pp_display_cfg->min_engine_clock_deep_sleep_khz
+ = context->bw.dce.sclk_deep_sleep_khz;
+
+ pp_display_cfg->avail_mclk_switch_time_us =
+ dce110_get_min_vblank_time_us(context);
+ /* TODO: dce11.2*/
+ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
+
+ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ /* TODO: is this still applicable?*/
+ if (pp_display_cfg->display_count == 1) {
+ const struct dc_crtc_timing *timing =
+ &context->streams[0]->timing;
+
+ pp_display_cfg->crtc_index =
+ pp_display_cfg->disp_configs[0].pipe_idx;
+ pp_display_cfg->line_time_in_us = timing->h_total * 1000 / timing->pix_clk_khz;
+ }
+
+ if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0)
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static void dce_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static void dce11_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce112_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_power_level_change_request level_change_req;
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context);
+ /* get max clock state from PPLIB */
+ if ((level_change_req.power_level < clk_mgr_dce->cur_min_clks_state && safe_to_lower)
+ || level_change_req.power_level > clk_mgr_dce->cur_min_clks_state) {
+ if (dm_pp_apply_power_level_change_request(clk_mgr->ctx, &level_change_req))
+ clk_mgr_dce->cur_min_clks_state = level_change_req.power_level;
+ }
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+}
+
+static void dce12_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
+ struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
+ int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
+ int unpatched_disp_clk = context->bw.dce.dispclk_khz;
+
+ /*TODO: W/A for dal3 linux, investigate why this works */
+ if (!clk_mgr_dce->dfs_bypass_active)
+ context->bw.dce.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+
+ if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
+ clock_voltage_req.clocks_in_khz = context->bw.dce.dispclk_khz;
+ context->bw.dce.dispclk_khz = dce112_set_clock(clk_mgr, context->bw.dce.dispclk_khz);
+ clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz;
+
+ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+ }
+
+ if (should_set_clock(safe_to_lower, max_pix_clk, clk_mgr->clks.phyclk_khz)) {
+ clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
+ clock_voltage_req.clocks_in_khz = max_pix_clk;
+ clk_mgr->clks.phyclk_khz = max_pix_clk;
+
+ dm_pp_apply_clock_for_voltage_request(clk_mgr->ctx, &clock_voltage_req);
+ }
+ dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
+
+ context->bw.dce.dispclk_khz = unpatched_disp_clk;
+}
+
+static const struct clk_mgr_funcs dce120_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dce12_update_clocks
+};
+
+static const struct clk_mgr_funcs dce112_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce112_update_clocks
+};
+
+static const struct clk_mgr_funcs dce110_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce11_update_clocks,
+};
+
+static const struct clk_mgr_funcs dce_funcs = {
+ .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
+ .update_clocks = dce_update_clocks
+};
+
+static void dce_clk_mgr_construct(
+ struct dce_clk_mgr *clk_mgr_dce,
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct clk_mgr *base = &clk_mgr_dce->base;
+ struct dm_pp_static_clock_info static_clk_info = {0};
+
+ base->ctx = ctx;
+ base->funcs = &dce_funcs;
+
+ clk_mgr_dce->regs = regs;
+ clk_mgr_dce->clk_mgr_shift = clk_shift;
+ clk_mgr_dce->clk_mgr_mask = clk_mask;
+
+ clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+ clk_mgr_dce->dprefclk_ss_percentage = 0;
+ clk_mgr_dce->dprefclk_ss_divider = 1000;
+ clk_mgr_dce->ss_on_dprefclk = false;
+
+
+ if (dm_pp_get_static_clocks(ctx, &static_clk_info))
+ clk_mgr_dce->max_clks_state = static_clk_info.max_clocks_state;
+ else
+ clk_mgr_dce->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
+ clk_mgr_dce->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+
+ dce_clock_read_integrated_info(clk_mgr_dce);
+ dce_clock_read_ss_info(clk_mgr_dce);
+}
+
+struct clk_mgr *dce_clk_mgr_create(
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce80_max_clks_by_state,
+ sizeof(dce80_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+ return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce110_clk_mgr_create(
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce110_max_clks_by_state,
+ sizeof(dce110_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_mgr_dce->base.funcs = &dce110_funcs;
+
+ return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce112_clk_mgr_create(
+ struct dc_context *ctx,
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce112_max_clks_by_state,
+ sizeof(dce112_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, regs, clk_shift, clk_mask);
+
+ clk_mgr_dce->base.funcs = &dce112_funcs;
+
+ return &clk_mgr_dce->base;
+}
+
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx)
+{
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ memcpy(clk_mgr_dce->max_clks_by_state,
+ dce120_max_clks_by_state,
+ sizeof(dce120_max_clks_by_state));
+
+ dce_clk_mgr_construct(
+ clk_mgr_dce, ctx, NULL, NULL, NULL);
+
+ clk_mgr_dce->dprefclk_khz = 600000;
+ clk_mgr_dce->base.funcs = &dce120_funcs;
+
+ return &clk_mgr_dce->base;
+}
+
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr)
+{
+ struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(*clk_mgr);
+
+ kfree(clk_mgr_dce);
+ *clk_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
index 34fdb386c884..3bceb31d910d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.h
@@ -24,10 +24,13 @@
*/
-#ifndef _DCE_CLOCKS_H_
-#define _DCE_CLOCKS_H_
+#ifndef _DCE_CLK_MGR_H_
+#define _DCE_CLK_MGR_H_
-#include "display_clock.h"
+#include "clk_mgr.h"
+#include "dccg.h"
+
+#define MEMORY_TYPE_MULTIPLIER_CZ 4
#define CLK_COMMON_REG_LIST_DCE_BASE() \
.DPREFCLK_CNTL = mmDPREFCLK_CNTL, \
@@ -53,24 +56,31 @@
type DENTIST_DISPCLK_WDIVIDER; \
type DENTIST_DISPCLK_CHG_DONE;
-struct dccg_shift {
+struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
};
-struct dccg_mask {
+struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
};
-struct dccg_registers {
+struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
};
-struct dce_dccg {
- struct dccg base;
- const struct dccg_registers *regs;
- const struct dccg_shift *clk_shift;
- const struct dccg_mask *clk_mask;
+struct state_dependent_clocks {
+ int display_clk_khz;
+ int pixel_clk_khz;
+};
+
+struct dce_clk_mgr {
+ struct clk_mgr base;
+ const struct clk_mgr_registers *regs;
+ const struct clk_mgr_shift *clk_mgr_shift;
+ const struct clk_mgr_mask *clk_mgr_mask;
+
+ struct dccg *dccg;
struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES];
@@ -91,33 +101,70 @@ struct dce_dccg {
/* DPREFCLK SS percentage Divider (100 or 1000) */
int dprefclk_ss_divider;
int dprefclk_khz;
+
+ enum dm_pp_clocks_state max_clks_state;
+ enum dm_pp_clocks_state cur_min_clks_state;
+};
+
+/* Starting DID for each range */
+enum dentist_base_divider_id {
+ DENTIST_BASE_DID_1 = 0x08,
+ DENTIST_BASE_DID_2 = 0x40,
+ DENTIST_BASE_DID_3 = 0x60,
+ DENTIST_BASE_DID_4 = 0x7e,
+ DENTIST_MAX_DID = 0x7f
};
+/* Starting point and step size for each divider range.*/
+enum dentist_divider_range {
+ DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
+ DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
+ DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
+ DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
+ DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
+ DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
+ DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
+};
+
+static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
+{
+ return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
+}
+
+void dce_clock_read_ss_info(struct dce_clk_mgr *dccg_dce);
+
+int dce12_get_dp_ref_freq_khz(struct clk_mgr *dccg);
+
+void dce110_fill_display_configs(
+ const struct dc_state *context,
+ struct dm_pp_display_configuration *pp_display_cfg);
+
+int dce112_set_clock(struct clk_mgr *dccg, int requested_clk_khz);
-struct dccg *dce_dccg_create(
+struct clk_mgr *dce_clk_mgr_create(
struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask);
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask);
-struct dccg *dce110_dccg_create(
+struct clk_mgr *dce110_clk_mgr_create(
struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask);
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask);
-struct dccg *dce112_dccg_create(
+struct clk_mgr *dce112_clk_mgr_create(
struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask);
+ const struct clk_mgr_registers *regs,
+ const struct clk_mgr_shift *clk_shift,
+ const struct clk_mgr_mask *clk_mask);
-struct dccg *dce120_dccg_create(struct dc_context *ctx);
+struct clk_mgr *dce120_clk_mgr_create(struct dc_context *ctx);
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx);
-#endif
+void dce_clk_mgr_destroy(struct clk_mgr **clk_mgr);
-void dce_dccg_destroy(struct dccg **dccg);
+int dentist_get_divider_from_did(int did);
-#endif /* _DCE_CLOCKS_H_ */
+#endif /* _DCE_CLK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
deleted file mode 100644
index d89a097ba936..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "dce_clocks.h"
-#include "dm_services.h"
-#include "reg_helper.h"
-#include "fixed31_32.h"
-#include "bios_parser_interface.h"
-#include "dc.h"
-#include "dmcu.h"
-#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
-#include "dcn_calcs.h"
-#endif
-#include "core_types.h"
-#include "dc_types.h"
-#include "dal_asic_id.h"
-
-#define TO_DCE_CLOCKS(clocks)\
- container_of(clocks, struct dce_dccg, base)
-
-#define REG(reg) \
- (clk_dce->regs->reg)
-
-#undef FN
-#define FN(reg_name, field_name) \
- clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name
-
-#define CTX \
- clk_dce->base.ctx
-#define DC_LOGGER \
- clk->ctx->logger
-
-/* Max clock values for each state indexed by "enum clocks_state": */
-static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
-/* ClocksStateInvalid - should not be used */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/* ClocksStateLow */
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
-/* ClocksStateNominal */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
-/* ClocksStatePerformance */
-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };
-
-static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };
-
-static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
-/*ClocksStateInvalid - should not be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
-{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
-/*ClocksStateLow*/
-{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
-/*ClocksStateNominal*/
-{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
-/*ClocksStatePerformance*/
-{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };
-
-/* Starting DID for each range */
-enum dentist_base_divider_id {
- DENTIST_BASE_DID_1 = 0x08,
- DENTIST_BASE_DID_2 = 0x40,
- DENTIST_BASE_DID_3 = 0x60,
- DENTIST_BASE_DID_4 = 0x7e,
- DENTIST_MAX_DID = 0x7f
-};
-
-/* Starting point and step size for each divider range.*/
-enum dentist_divider_range {
- DENTIST_DIVIDER_RANGE_1_START = 8, /* 2.00 */
- DENTIST_DIVIDER_RANGE_1_STEP = 1, /* 0.25 */
- DENTIST_DIVIDER_RANGE_2_START = 64, /* 16.00 */
- DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
- DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
- DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
- DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
- DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
- DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
-};
-
-static int dentist_get_divider_from_did(int did)
-{
- if (did < DENTIST_BASE_DID_1)
- did = DENTIST_BASE_DID_1;
- if (did > DENTIST_MAX_DID)
- did = DENTIST_MAX_DID;
-
- if (did < DENTIST_BASE_DID_2) {
- return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
- * (did - DENTIST_BASE_DID_1);
- } else if (did < DENTIST_BASE_DID_3) {
- return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
- * (did - DENTIST_BASE_DID_2);
- } else if (did < DENTIST_BASE_DID_4) {
- return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
- * (did - DENTIST_BASE_DID_3);
- } else {
- return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
- * (did - DENTIST_BASE_DID_4);
- }
-}
-
-/* SW will adjust DP REF Clock average value for all purposes
- * (DP DTO / DP Audio DTO and DP GTC)
- if clock is spread for all cases:
- -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
- calculations for DS_INCR/DS_MODULO (this is planned to be default case)
- -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
- calculations (not planned to be used, but average clock should still
- be valid)
- -if SS enabled on DP Ref clock and HW de-spreading disabled
- (should not be case with CIK) then SW should program all rates
- generated according to average value (case as with previous ASICs)
- */
-static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
-{
- if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
- struct fixed31_32 ss_percentage = dc_fixpt_div_int(
- dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
- clk_dce->dprefclk_ss_divider), 200);
- struct fixed31_32 adj_dp_ref_clk_khz;
-
- ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
- adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
- dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
- }
- return dp_ref_clk_khz;
-}
-
-static int dce_get_dp_ref_freq_khz(struct dccg *clk)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- int dprefclk_wdivider;
- int dprefclk_src_sel;
- int dp_ref_clk_khz = 600000;
- int target_div;
-
- /* ASSERT DP Reference Clock source is from DFS*/
- REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
- ASSERT(dprefclk_src_sel == 0);
-
- /* Read the mmDENTIST_DISPCLK_CNTL to get the currently
- * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
- REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);
-
- /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
- target_div = dentist_get_divider_from_did(dprefclk_wdivider);
-
- /* Calculate the current DFS clock, in kHz.*/
- dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
- * clk_dce->dentist_vco_freq_khz) / target_div;
-
- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
-}
-
-static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
-
- return dccg_adjust_dp_ref_freq_for_ss(clk_dce, clk_dce->dprefclk_khz);
-}
-
-static enum dm_pp_clocks_state dce_get_required_clocks_state(
- struct dccg *clk,
- struct dc_clocks *req_clocks)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- int i;
- enum dm_pp_clocks_state low_req_clk;
-
- /* Iterate from highest supported to lowest valid state, and update
- * lowest RequiredState with the lowest state that satisfies
- * all required clocks
- */
- for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (req_clocks->dispclk_khz >
- clk_dce->max_clks_by_state[i].display_clk_khz
- || req_clocks->phyclk_khz >
- clk_dce->max_clks_by_state[i].pixel_clk_khz)
- break;
-
- low_req_clk = i + 1;
- if (low_req_clk > clk->max_clks_state) {
- /* set max clock state for high phyclock, invalid on exceeding display clock */
- if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
- < req_clocks->dispclk_khz)
- low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
- else
- low_req_clk = clk->max_clks_state;
- }
-
- return low_req_clk;
-}
-
-static int dce_set_clock(
- struct dccg *clk,
- int requested_clk_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
- struct dc_bios *bp = clk->ctx->dc_bios;
- int actual_clock = requested_clk_khz;
-
- /* Make sure requested clock isn't lower than minimum threshold*/
- if (requested_clk_khz > 0)
- requested_clk_khz = max(requested_clk_khz,
- clk_dce->dentist_vco_freq_khz / 64);
-
- /* Prepare to program display clock*/
- pxl_clk_params.target_pixel_clock = requested_clk_khz;
- pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
-
- if (clk_dce->dfs_bypass_active)
- pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true;
-
- bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);
-
- if (clk_dce->dfs_bypass_active) {
- /* Cache the fixed display clock*/
- clk_dce->dfs_bypass_disp_clk =
- pxl_clk_params.dfs_bypass_display_clock;
- actual_clock = pxl_clk_params.dfs_bypass_display_clock;
- }
-
- /* from power down, we need mark the clock state as ClocksStateNominal
- * from HWReset, so when resume we will call pplib voltage regulator.*/
- if (requested_clk_khz == 0)
- clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- return actual_clock;
-}
-
-static int dce_psr_set_clock(
- struct dccg *clk,
- int requested_clk_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- struct dc_context *ctx = clk_dce->base.ctx;
- struct dc *core_dc = ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int actual_clk_khz = requested_clk_khz;
-
- actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
-
- dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
- return actual_clk_khz;
-}
-
-static int dce112_set_clock(
- struct dccg *clk,
- int requested_clk_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
- struct bp_set_dce_clock_parameters dce_clk_params;
- struct dc_bios *bp = clk->ctx->dc_bios;
- struct dc *core_dc = clk->ctx->dc;
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
- int actual_clock = requested_clk_khz;
- /* Prepare to program display clock*/
- memset(&dce_clk_params, 0, sizeof(dce_clk_params));
-
- /* Make sure requested clock isn't lower than minimum threshold*/
- if (requested_clk_khz > 0)
- requested_clk_khz = max(requested_clk_khz,
- clk_dce->dentist_vco_freq_khz / 62);
-
- dce_clk_params.target_clock_frequency = requested_clk_khz;
- dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
- dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
-
- bp->funcs->set_dce_clock(bp, &dce_clk_params);
- actual_clock = dce_clk_params.target_clock_frequency;
-
- /* from power down, we need mark the clock state as ClocksStateNominal
- * from HWReset, so when resume we will call pplib voltage regulator.*/
- if (requested_clk_khz == 0)
- clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
-
- /*Program DP ref Clock*/
- /*VBIOS will determine DPREFCLK frequency, so we don't set it*/
- dce_clk_params.target_clock_frequency = 0;
- dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
- if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
- (dce_clk_params.pll_id ==
- CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
- else
- dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
-
- bp->funcs->set_dce_clock(bp, &dce_clk_params);
-
- if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
- if (clk_dce->dfs_bypass_disp_clk != actual_clock)
- dmcu->funcs->set_psr_wait_loop(dmcu,
- actual_clock / 1000 / 7);
- }
-
- clk_dce->dfs_bypass_disp_clk = actual_clock;
- return actual_clock;
-}
-
-static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
-{
- struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
- struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
- struct integrated_info info = { { { 0 } } };
- struct dc_firmware_info fw_info = { { 0 } };
- int i;
-
- if (bp->integrated_info)
- info = *bp->integrated_info;
-
- clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
- if (clk_dce->dentist_vco_freq_khz == 0) {
- bp->funcs->get_firmware_info(bp, &fw_info);
- clk_dce->dentist_vco_freq_khz =
- fw_info.smu_gpu_pll_output_freq;
- if (clk_dce->dentist_vco_freq_khz == 0)
- clk_dce->dentist_vco_freq_khz = 3600000;
- }
-
- /*update the maximum display clock for each power state*/
- for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
- enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;
-
- switch (i) {
- case 0:
- clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
- break;
-
- case 1:
- clk_state = DM_PP_CLOCKS_STATE_LOW;
- break;
-
- case 2:
- clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
- break;
-
- case 3:
- clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
- break;
-
- default:
- clk_state = DM_PP_CLOCKS_STATE_INVALID;
- break;
- }
-
- /*Do not allow bad VBIOS/SBIOS to override with invalid values,
- * check for > 100MHz*/
- if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
- clk_dce->max_clks_by_state[clk_state].display_clk_khz =
- info.disp_clk_voltage[i].max_supported_clk;
- }
-
- if (!debug->disable_dfs_bypass && bp->integrated_info)
- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
- clk_dce->dfs_bypass_enabled = true;
-}
-
-static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
-{
- struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
- int ss_info_num = bp->funcs->get_ss_entry_number(
- bp, AS_SIGNAL_TYPE_GPU_PLL);
-
- if (ss_info_num) {
- struct spread_spectrum_info info = { { 0 } };
- enum bp_result result = bp->funcs->get_spread_spectrum_info(
- bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);
-
- /* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
- * even if SS not enabled and in that case
- * SSInfo.spreadSpectrumPercentage !=0 would be sign
- * that SS is enabled
- */
- if (result == BP_RESULT_OK &&
- info.spread_spectrum_percentage != 0) {
- clk_dce->ss_on_dprefclk = true;
- clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
- if (info.type.CENTER_MODE == 0) {
- /* TODO: Currently for DP Reference clock we
- * need only SS percentage for
- * downspread */
- clk_dce->dprefclk_ss_percentage =
- info.spread_spectrum_percentage;
- }
-
- return;
- }
-
- result = bp->funcs->get_spread_spectrum_info(
- bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);
-
- /* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
- * even if SS not enabled and in that case
- * SSInfo.spreadSpectrumPercentage !=0 would be sign
- * that SS is enabled
- */
- if (result == BP_RESULT_OK &&
- info.spread_spectrum_percentage != 0) {
- clk_dce->ss_on_dprefclk = true;
- clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
-
- if (info.type.CENTER_MODE == 0) {
- /* Currently for DP Reference clock we
- * need only SS percentage for
- * downspread */
- clk_dce->dprefclk_ss_percentage =
- info.spread_spectrum_percentage;
- }
- }
- }
-}
-
-static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
-{
- return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
-}
-
-static void dce12_update_clocks(struct dccg *dccg,
- struct dc_clocks *new_clocks,
- bool safe_to_lower)
-{
- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
-
- /* TODO: Investigate why this is needed to fix display corruption. */
- new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
- clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
- new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
- clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- }
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
- bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
- int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
- bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;
-
- /* increase clock, looking for div is 0 for current, request div is 1*/
- if (dispclk_increase) {
- /* already divided by 2, no need to reach target clk with 2 steps*/
- if (cur_dpp_div)
- return new_clocks->dispclk_khz;
-
- /* request disp clk is lower than maximum supported dpp clk,
- * no need to reach target clk with two steps.
- */
- if (new_clocks->dispclk_khz <= disp_clk_threshold)
- return new_clocks->dispclk_khz;
-
- /* target dpp clk not request divided by 2, still within threshold */
- if (!request_dpp_div)
- return new_clocks->dispclk_khz;
-
- } else {
- /* decrease clock, looking for current dppclk divided by 2,
- * request dppclk not divided by 2.
- */
-
- /* current dpp clk not divided by 2, no need to ramp*/
- if (!cur_dpp_div)
- return new_clocks->dispclk_khz;
-
- /* current disp clk is lower than current maximum dpp clk,
- * no need to ramp
- */
- if (dccg->clks.dispclk_khz <= disp_clk_threshold)
- return new_clocks->dispclk_khz;
-
- /* request dpp clk need to be divided by 2 */
- if (request_dpp_div)
- return new_clocks->dispclk_khz;
- }
-
- return disp_clk_threshold;
-}
-
-static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
-{
- struct dc *dc = dccg->ctx->dc;
- int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
- bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
- int i;
-
- /* set disp clk to dpp clk threshold */
- dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);
-
- /* update request dpp clk division option */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (!pipe_ctx->plane_state)
- continue;
-
- pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
- pipe_ctx->plane_res.dpp,
- request_dpp_div,
- true);
- }
-
- /* If target clk not same as dppclk threshold, set to target clock */
- if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
-
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
- dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
- dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
-}
-
-static void dcn1_update_clocks(struct dccg *dccg,
- struct dc_clocks *new_clocks,
- bool safe_to_lower)
-{
- struct dc *dc = dccg->ctx->dc;
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
- struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
- bool send_request_to_increase = false;
- bool send_request_to_lower = false;
-
- if (new_clocks->phyclk_khz)
- smu_req.display_count = 1;
- else
- smu_req.display_count = 0;
-
- if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
- || new_clocks->phyclk_khz > dccg->clks.phyclk_khz
- || new_clocks->fclk_khz > dccg->clks.fclk_khz
- || new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
- send_request_to_increase = true;
-
- if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
- dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
-
- send_request_to_lower = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
- dccg->clks.fclk_khz = new_clocks->fclk_khz;
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
- clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
- smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
-
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- send_request_to_lower = true;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
- dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
-
- send_request_to_lower = true;
- }
-
- if (should_set_clock(safe_to_lower,
- new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
- dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
-
- send_request_to_lower = true;
- }
-
- /* make sure dcf clk is before dpp clk to
- * make sure we have enough voltage to run dpp clk
- */
- if (send_request_to_increase) {
- /*use dcfclk to request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- }
-
- /* dcn1 dppclk is tied to dispclk */
- /* program dispclk on = as a w/a for sleep resume clock ramping issues */
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
- || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
- dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
-
- send_request_to_lower = true;
- }
-
- if (!send_request_to_increase && send_request_to_lower) {
- /*use dcfclk to request voltage*/
- clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
- clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
- dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- }
-
-
- *smu_req_cur = smu_req;
-}
-#endif
-
-static void dce_update_clocks(struct dccg *dccg,
- struct dc_clocks *new_clocks,
- bool safe_to_lower)
-{
- struct dm_pp_power_level_change_request level_change_req;
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
-
- /* TODO: Investigate why this is needed to fix display corruption. */
- if (!clk_dce->dfs_bypass_active)
- new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100;
-
- level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
- /* get max clock state from PPLIB */
- if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
- || level_change_req.power_level > dccg->cur_min_clks_state) {
- if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
- dccg->cur_min_clks_state = level_change_req.power_level;
- }
-
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
- new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
- dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
- }
-}
-
-static bool dce_update_dfs_bypass(
- struct dccg *dccg,
- struct dc *dc,
- struct dc_state *context,
- int requested_clock_khz)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg);
- struct resource_context *res_ctx = &context->res_ctx;
- enum signal_type signal_type = SIGNAL_TYPE_NONE;
- bool was_active = clk_dce->dfs_bypass_active;
- int i;
-
- /* Disable DFS bypass by default. */
- clk_dce->dfs_bypass_active = false;
-
- /* Check that DFS bypass is available. */
- if (!clk_dce->dfs_bypass_enabled)
- goto update;
-
- /* Check if the requested display clock is below the threshold. */
- if (requested_clock_khz >= 400000)
- goto update;
-
- /* DFS-bypass should only be enabled on single stream setups */
- if (context->stream_count != 1)
- goto update;
-
- /* Check that the stream's signal type is an embedded panel */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (res_ctx->pipe_ctx[i].stream) {
- struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
-
- signal_type = pipe_ctx->stream->sink->link->connector_signal;
- break;
- }
- }
-
- if (signal_type == SIGNAL_TYPE_EDP ||
- signal_type == SIGNAL_TYPE_LVDS)
- clk_dce->dfs_bypass_active = true;
-
-update:
- /* Update the clock state. We don't need to respect safe_to_lower
- * because DFS bypass should always be greater than the current
- * display clock frequency.
- */
- if (was_active != clk_dce->dfs_bypass_active) {
- dccg->clks.dispclk_khz =
- dccg->funcs->set_dispclk(dccg, dccg->clks.dispclk_khz);
- return true;
- }
-
- return false;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-static const struct display_clock_funcs dcn1_funcs = {
- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
- .set_dispclk = dce112_set_clock,
- .update_clocks = dcn1_update_clocks
-};
-#endif
-
-static const struct display_clock_funcs dce120_funcs = {
- .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
- .set_dispclk = dce112_set_clock,
- .update_clocks = dce12_update_clocks
-};
-
-static const struct display_clock_funcs dce112_funcs = {
- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
- .set_dispclk = dce112_set_clock,
- .update_clocks = dce_update_clocks
-};
-
-static const struct display_clock_funcs dce110_funcs = {
- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
- .set_dispclk = dce_psr_set_clock,
- .update_clocks = dce_update_clocks,
- .update_dfs_bypass = dce_update_dfs_bypass
-};
-
-static const struct display_clock_funcs dce_funcs = {
- .get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
- .set_dispclk = dce_set_clock,
- .update_clocks = dce_update_clocks
-};
-
-static void dce_dccg_construct(
- struct dce_dccg *clk_dce,
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dccg *base = &clk_dce->base;
-
- base->ctx = ctx;
- base->funcs = &dce_funcs;
-
- clk_dce->regs = regs;
- clk_dce->clk_shift = clk_shift;
- clk_dce->clk_mask = clk_mask;
-
- clk_dce->dfs_bypass_disp_clk = 0;
-
- clk_dce->dprefclk_ss_percentage = 0;
- clk_dce->dprefclk_ss_divider = 1000;
- clk_dce->ss_on_dprefclk = false;
-
- base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
- base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
-
- dce_clock_read_integrated_info(clk_dce);
- dce_clock_read_ss_info(clk_dce);
-}
-
-struct dccg *dce_dccg_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce80_max_clks_by_state,
- sizeof(dce80_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, regs, clk_shift, clk_mask);
-
- return &clk_dce->base;
-}
-
-struct dccg *dce110_dccg_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce110_max_clks_by_state,
- sizeof(dce110_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, regs, clk_shift, clk_mask);
-
- clk_dce->base.funcs = &dce110_funcs;
-
- return &clk_dce->base;
-}
-
-struct dccg *dce112_dccg_create(
- struct dc_context *ctx,
- const struct dccg_registers *regs,
- const struct dccg_shift *clk_shift,
- const struct dccg_mask *clk_mask)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce112_max_clks_by_state,
- sizeof(dce112_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, regs, clk_shift, clk_mask);
-
- clk_dce->base.funcs = &dce112_funcs;
-
- return &clk_dce->base;
-}
-
-struct dccg *dce120_dccg_create(struct dc_context *ctx)
-{
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- memcpy(clk_dce->max_clks_by_state,
- dce120_max_clks_by_state,
- sizeof(dce120_max_clks_by_state));
-
- dce_dccg_construct(
- clk_dce, ctx, NULL, NULL, NULL);
-
- clk_dce->dprefclk_khz = 600000;
- clk_dce->base.funcs = &dce120_funcs;
-
- return &clk_dce->base;
-}
-
-#ifdef CONFIG_DRM_AMD_DC_DCN1_0
-struct dccg *dcn1_dccg_create(struct dc_context *ctx)
-{
- struct dc_debug_options *debug = &ctx->dc->debug;
- struct dc_bios *bp = ctx->dc_bios;
- struct dc_firmware_info fw_info = { { 0 } };
- struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
-
- if (clk_dce == NULL) {
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-
- clk_dce->base.ctx = ctx;
- clk_dce->base.funcs = &dcn1_funcs;
-
- clk_dce->dfs_bypass_disp_clk = 0;
-
- clk_dce->dprefclk_ss_percentage = 0;
- clk_dce->dprefclk_ss_divider = 1000;
- clk_dce->ss_on_dprefclk = false;
-
- clk_dce->dprefclk_khz = 600000;
- if (bp->integrated_info)
- clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
- if (clk_dce->dentist_vco_freq_khz == 0) {
- bp->funcs->get_firmware_info(bp, &fw_info);
- clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
- if (clk_dce->dentist_vco_freq_khz == 0)
- clk_dce->dentist_vco_freq_khz = 3600000;
- }
-
- if (!debug->disable_dfs_bypass && bp->integrated_info)
- if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
- clk_dce->dfs_bypass_enabled = true;
-
- dce_clock_read_ss_info(clk_dce);
-
- return &clk_dce->base;
-}
-#endif
-
-void dce_dccg_destroy(struct dccg **dccg)
-{
- struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
-
- kfree(clk_dce);
- *dccg = NULL;
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 64dc75378541..c83a7f05f14c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -233,6 +233,16 @@ struct dce_hwseq_registers {
uint32_t DOMAIN5_PG_CONFIG;
uint32_t DOMAIN6_PG_CONFIG;
uint32_t DOMAIN7_PG_CONFIG;
+ uint32_t DOMAIN8_PG_CONFIG;
+ uint32_t DOMAIN9_PG_CONFIG;
+ uint32_t DOMAIN10_PG_CONFIG;
+ uint32_t DOMAIN11_PG_CONFIG;
+ uint32_t DOMAIN16_PG_CONFIG;
+ uint32_t DOMAIN17_PG_CONFIG;
+ uint32_t DOMAIN18_PG_CONFIG;
+ uint32_t DOMAIN19_PG_CONFIG;
+ uint32_t DOMAIN20_PG_CONFIG;
+ uint32_t DOMAIN21_PG_CONFIG;
uint32_t DOMAIN0_PG_STATUS;
uint32_t DOMAIN1_PG_STATUS;
uint32_t DOMAIN2_PG_STATUS;
@@ -241,6 +251,16 @@ struct dce_hwseq_registers {
uint32_t DOMAIN5_PG_STATUS;
uint32_t DOMAIN6_PG_STATUS;
uint32_t DOMAIN7_PG_STATUS;
+ uint32_t DOMAIN8_PG_STATUS;
+ uint32_t DOMAIN9_PG_STATUS;
+ uint32_t DOMAIN10_PG_STATUS;
+ uint32_t DOMAIN11_PG_STATUS;
+ uint32_t DOMAIN16_PG_STATUS;
+ uint32_t DOMAIN17_PG_STATUS;
+ uint32_t DOMAIN18_PG_STATUS;
+ uint32_t DOMAIN19_PG_STATUS;
+ uint32_t DOMAIN20_PG_STATUS;
+ uint32_t DOMAIN21_PG_STATUS;
uint32_t DIO_MEM_PWR_CTRL;
uint32_t DCCG_GATE_DISABLE_CNTL;
uint32_t DCCG_GATE_DISABLE_CNTL2;
@@ -262,6 +282,8 @@ struct dce_hwseq_registers {
uint32_t D2VGA_CONTROL;
uint32_t D3VGA_CONTROL;
uint32_t D4VGA_CONTROL;
+ uint32_t D5VGA_CONTROL;
+ uint32_t D6VGA_CONTROL;
uint32_t VGA_TEST_CONTROL;
/* MMHUB registers. read only. temporary hack */
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
@@ -489,6 +511,26 @@ struct dce_hwseq_registers {
type DOMAIN6_POWER_GATE; \
type DOMAIN7_POWER_FORCEON; \
type DOMAIN7_POWER_GATE; \
+ type DOMAIN8_POWER_FORCEON; \
+ type DOMAIN8_POWER_GATE; \
+ type DOMAIN9_POWER_FORCEON; \
+ type DOMAIN9_POWER_GATE; \
+ type DOMAIN10_POWER_FORCEON; \
+ type DOMAIN10_POWER_GATE; \
+ type DOMAIN11_POWER_FORCEON; \
+ type DOMAIN11_POWER_GATE; \
+ type DOMAIN16_POWER_FORCEON; \
+ type DOMAIN16_POWER_GATE; \
+ type DOMAIN17_POWER_FORCEON; \
+ type DOMAIN17_POWER_GATE; \
+ type DOMAIN18_POWER_FORCEON; \
+ type DOMAIN18_POWER_GATE; \
+ type DOMAIN19_POWER_FORCEON; \
+ type DOMAIN19_POWER_GATE; \
+ type DOMAIN20_POWER_FORCEON; \
+ type DOMAIN20_POWER_GATE; \
+ type DOMAIN21_POWER_FORCEON; \
+ type DOMAIN21_POWER_GATE; \
type DOMAIN0_PGFSM_PWR_STATUS; \
type DOMAIN1_PGFSM_PWR_STATUS; \
type DOMAIN2_PGFSM_PWR_STATUS; \
@@ -497,6 +539,16 @@ struct dce_hwseq_registers {
type DOMAIN5_PGFSM_PWR_STATUS; \
type DOMAIN6_PGFSM_PWR_STATUS; \
type DOMAIN7_PGFSM_PWR_STATUS; \
+ type DOMAIN8_PGFSM_PWR_STATUS; \
+ type DOMAIN9_PGFSM_PWR_STATUS; \
+ type DOMAIN10_PGFSM_PWR_STATUS; \
+ type DOMAIN11_PGFSM_PWR_STATUS; \
+ type DOMAIN16_PGFSM_PWR_STATUS; \
+ type DOMAIN17_PGFSM_PWR_STATUS; \
+ type DOMAIN18_PGFSM_PWR_STATUS; \
+ type DOMAIN19_PGFSM_PWR_STATUS; \
+ type DOMAIN20_PGFSM_PWR_STATUS; \
+ type DOMAIN21_PGFSM_PWR_STATUS; \
type DCFCLK_GATE_DIS; \
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
type VGA_TEST_ENABLE; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 366bc8c2c643..3e18ea84b1f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -645,7 +645,7 @@ static bool dce110_link_encoder_validate_hdmi_output(
return false;
/* DCE11 HW does not support 420 */
- if (!enc110->base.features.ycbcr420_supported &&
+ if (!enc110->base.features.hdmi_ycbcr420_supported &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index c47c81883d3c..cce0d18f91da 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -908,7 +908,6 @@ static void dce110_stream_encoder_dp_blank(
struct stream_encoder *enc)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t retries = 0;
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -926,30 +925,28 @@ static void dce110_stream_encoder_dp_blank(
* (2 = start of the next vertical blank) */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
- * a little more because we may not trust delay accuracy.
- */
+ * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
max_retries = DP_BLANK_MAX_RETRY * 150;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
/* the encoder stops sending the video stream
- * at the start of the vertical blanking.
- * Poll for DP_VID_STREAM_STATUS == 0
- */
+ * at the start of the vertical blanking.
+ * Poll for DP_VID_STREAM_STATUS == 0
+ */
REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS,
0,
10, max_retries);
- ASSERT(retries <= max_retries);
-
/* Tell the DP encoder to ignore timing from CRTC, must be done after
- * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
- * complete, stream status will be stuck in video stream enabled state,
- * i.e. DP_VID_STREAM_STATUS stuck at 1.
- */
+ * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
+ * complete, stream status will be stuck in video stream enabled state,
+ * i.e. DP_VID_STREAM_STATUS stuck at 1.
+ */
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index 74c05e878807..87771676acac 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -105,74 +105,30 @@ bool dce100_enable_display_power_gating(
return false;
}
-static void dce100_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /*pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
- / MEMORY_TYPE_MULTIPLIER;*/
-
- dce110_fill_display_configs(context, pp_display_cfg);
-
- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
- struct dm_pp_display_configuration)) != 0)
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
- dc->prev_display_config = *pp_display_cfg;
-}
-
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
- struct dc *dc,
- struct dc_state *context)
+void dce100_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
{
- uint32_t max_pix_clk = 0;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL)
- continue;
-
- /* do not check under lay */
- if (pipe_ctx->top_pipe)
- continue;
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
- max_pix_clk =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
- }
- return max_pix_clk;
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ false);
}
-void dce100_set_bandwidth(
+void dce100_optimize_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
+ struct dc_state *context)
{
- struct dc_clocks req_clks;
-
- req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
- req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
-
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- dc->res_pool->dccg->funcs->update_clocks(
- dc->res_pool->dccg,
- &req_clks,
- decrease_allowed);
-
- dce100_pplib_apply_display_requirements(dc, context);
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ true);
}
-
/**************************************************************************/
void dce100_hw_sequencer_construct(struct dc *dc)
@@ -180,8 +136,7 @@ void dce100_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
- dc->hwss.set_bandwidth = dce100_set_bandwidth;
- dc->hwss.pplib_apply_display_requirements =
- dce100_pplib_apply_display_requirements;
+ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
index c6ec0ed6ec3d..acd418515346 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h
@@ -33,10 +33,9 @@ struct dc_state;
void dce100_hw_sequencer_construct(struct dc *dc);
-void dce100_set_bandwidth(
+void dce100_prepare_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed);
+ struct dc_state *context);
bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id,
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 14754a87156c..6ae51a5dfc04 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -36,11 +36,11 @@
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
#include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -137,15 +137,15 @@ static const struct dce110_timing_generator_offsets dce100_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -722,8 +722,8 @@ static void destruct(struct dce110_resource_pool *pool)
dce_aud_destroy(&pool->base.audios[i]);
}
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.abm != NULL)
dce_abm_destroy(&pool->base.abm);
@@ -767,7 +767,7 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
context->bw.dce.dispclk_khz = 0;
context->bw.dce.yclk_khz = 0;
@@ -860,7 +860,6 @@ static bool construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -908,11 +907,11 @@ static bool construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -938,12 +937,6 @@ static bool construct(
goto res_create_fail;
}
- /* get static clock information for PPLIB or firmware, save
- * max_clock_state
- */
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
index 1f7f25013217..52d50e24a995 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c
@@ -64,65 +64,37 @@ static const struct dce110_compressor_reg_offsets reg_offsets[] = {
static const uint32_t dce11_one_lpt_channel_max_resolution = 2560 * 1600;
-enum fbc_idle_force {
- /* Bit 0 - Display registers updated */
- FBC_IDLE_FORCE_DISPLAY_REGISTER_UPDATE = 0x00000001,
-
- /* Bit 2 - FBC_GRPH_COMP_EN register updated */
- FBC_IDLE_FORCE_GRPH_COMP_EN = 0x00000002,
- /* Bit 3 - FBC_SRC_SEL register updated */
- FBC_IDLE_FORCE_SRC_SEL_CHANGE = 0x00000004,
- /* Bit 4 - FBC_MIN_COMPRESSION register updated */
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE = 0x00000008,
- /* Bit 5 - FBC_ALPHA_COMP_EN register updated */
- FBC_IDLE_FORCE_ALPHA_COMP_EN = 0x00000010,
- /* Bit 6 - FBC_ZERO_ALPHA_CHUNK_SKIP_EN register updated */
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN = 0x00000020,
- /* Bit 7 - FBC_FORCE_COPY_TO_COMP_BUF register updated */
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF = 0x00000040,
-
- /* Bit 24 - Memory write to region 0 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION0 = 0x01000000,
- /* Bit 25 - Memory write to region 1 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION1 = 0x02000000,
- /* Bit 26 - Memory write to region 2 defined by MC registers */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION2 = 0x04000000,
- /* Bit 27 - Memory write to region 3 defined by MC registers. */
- FBC_IDLE_FORCE_MEMORY_WRITE_TO_REGION3 = 0x08000000,
-
- /* Bit 28 - Memory write from any client other than MCIF */
- FBC_IDLE_FORCE_MEMORY_WRITE_OTHER_THAN_MCIF = 0x10000000,
- /* Bit 29 - CG statics screen signal is inactive */
- FBC_IDLE_FORCE_CG_STATIC_SCREEN_IS_INACTIVE = 0x20000000,
-};
-
-
static uint32_t align_to_chunks_number_per_line(uint32_t pixels)
{
return 256 * ((pixels + 255) / 256);
}
-static void reset_lb_on_vblank(struct dc_context *ctx)
+static void reset_lb_on_vblank(struct compressor *compressor, uint32_t crtc_inst)
{
- uint32_t value, frame_count;
+ uint32_t value;
+ uint32_t frame_count;
+ uint32_t status_pos;
uint32_t retry = 0;
- uint32_t status_pos =
- dm_read_reg(ctx, mmCRTC_STATUS_POSITION);
+ struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+
+ cp110->offsets = reg_offsets[crtc_inst];
+
+ status_pos = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION));
/* Only if CRTC is enabled and counter is moving we wait for one frame. */
- if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) {
+ if (status_pos != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_POSITION))) {
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
- frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT);
+ frame_count = dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT));
for (retry = 10000; retry > 0; retry--) {
- if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT))
+ if (frame_count != dm_read_reg(compressor->ctx, DCP_REG(mmCRTC_STATUS_FRAME_COUNT)))
break;
udelay(10);
}
@@ -130,13 +102,11 @@ static void reset_lb_on_vblank(struct dc_context *ctx)
dm_error("Frame count did not increase for 100ms.\n");
/* Resetting LB on VBlank */
- value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL);
+ value = dm_read_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL));
set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL);
set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2);
- dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value);
-
+ dm_write_reg(compressor->ctx, DCP_REG(mmLB_SYNC_RESET_SEL), value);
}
-
}
static void wait_for_fbc_state_changed(
@@ -226,10 +196,10 @@ void dce110_compressor_enable_fbc(
uint32_t addr;
uint32_t value, misc_value;
-
addr = mmFBC_CNTL;
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(value, 1, FBC_CNTL, FBC_GRPH_COMP_EN);
+ /* params->inst is valid HW CRTC instance start from 0 */
set_reg_field_value(
value,
params->inst,
@@ -238,8 +208,10 @@ void dce110_compressor_enable_fbc(
/* Keep track of enum controller_id FBC is attached to */
compressor->is_enabled = true;
- compressor->attached_inst = params->inst;
- cp110->offsets = reg_offsets[params->inst];
+ /* attached_inst is SW CRTC instance start from 1
+ * 0 = CONTROLLER_ID_UNDEFINED means not attached crtc
+ */
+ compressor->attached_inst = params->inst + CONTROLLER_ID_D0;
/* Toggle it as there is bug in HW */
set_reg_field_value(value, 0, FBC_CNTL, FBC_GRPH_COMP_EN);
@@ -268,9 +240,10 @@ void dce110_compressor_enable_fbc(
void dce110_compressor_disable_fbc(struct compressor *compressor)
{
struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor);
+ uint32_t crtc_inst = 0;
if (compressor->options.bits.FBC_SUPPORT) {
- if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) {
+ if (dce110_compressor_is_fbc_enabled_in_hw(compressor, &crtc_inst)) {
uint32_t reg_data;
/* Turn off compression */
reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL);
@@ -284,8 +257,10 @@ void dce110_compressor_disable_fbc(struct compressor *compressor)
wait_for_fbc_state_changed(cp110, false);
}
- /* Sync line buffer - dce100/110 only*/
- reset_lb_on_vblank(compressor->ctx);
+ /* Sync line buffer which fbc was attached to dce100/110 only */
+ if (crtc_inst > CONTROLLER_ID_UNDEFINED && crtc_inst < CONTROLLER_ID_D3)
+ reset_lb_on_vblank(compressor,
+ crtc_inst - CONTROLLER_ID_D0);
}
}
@@ -328,6 +303,8 @@ void dce110_compressor_program_compressed_surface_address_and_pitch(
uint32_t compressed_surf_address_low_part =
compressor->compr_surface_address.addr.low_part;
+ cp110->offsets = reg_offsets[params->inst];
+
/* Clear content first. */
dm_write_reg(
compressor->ctx,
@@ -410,13 +387,7 @@ void dce110_compressor_set_fbc_invalidation_triggers(
value = dm_read_reg(compressor->ctx, addr);
set_reg_field_value(
value,
- fbc_trigger |
- FBC_IDLE_FORCE_GRPH_COMP_EN |
- FBC_IDLE_FORCE_SRC_SEL_CHANGE |
- FBC_IDLE_FORCE_MIN_COMPRESSION_CHANGE |
- FBC_IDLE_FORCE_ALPHA_COMP_EN |
- FBC_IDLE_FORCE_ZERO_ALPHA_CHUNK_SKIP_EN |
- FBC_IDLE_FORCE_FORCE_COPY_TO_COMP_BUF,
+ fbc_trigger,
FBC_IDLE_FORCE_CLEAR_MASK,
FBC_IDLE_FORCE_CLEAR_MASK);
dm_write_reg(compressor->ctx, addr, value);
@@ -549,7 +520,7 @@ void dce110_compressor_construct(struct dce110_compressor *compressor,
compressor->base.channel_interleave_size = 0;
compressor->base.dram_channels_num = 0;
compressor->base.lpt_channels_num = 0;
- compressor->base.attached_inst = 0;
+ compressor->base.attached_inst = CONTROLLER_ID_UNDEFINED;
compressor->base.is_enabled = false;
compressor->base.funcs = &dce110_compressor_funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index b459867a05b2..6349ba7bec7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -548,14 +548,14 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf,
regamma_params->hw_points_num = hw_points;
- i = 1;
- for (k = 0; k < 16 && i < 16; k++) {
+ k = 0;
+ for (i = 1; i < 16; i++) {
if (seg_distr[k] != -1) {
regamma_params->arr_curve_points[k].segments_num = seg_distr[k];
regamma_params->arr_curve_points[i].offset =
regamma_params->arr_curve_points[k].offset + (1 << seg_distr[k]);
}
- i++;
+ k++;
}
if (seg_distr[k] != -1)
@@ -1085,7 +1085,6 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
link->dc->hwss.edp_backlight_control(link, true);
- stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL;
}
}
void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
@@ -1192,8 +1191,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
- state->dis_clk->funcs->get_dp_ref_clk_frequency(
- state->dis_clk);
+ state->dccg->funcs->get_dp_ref_clk_frequency(
+ state->dccg);
}
audio_output->pll_info.feed_back_divider =
@@ -1547,6 +1546,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
int i;
struct dc_link *edp_link_to_turnoff = NULL;
struct dc_link *edp_link = get_link_for_edp(dc);
+ struct dc_bios *bios = dc->ctx->dc_bios;
bool can_edp_fast_boot_optimize = false;
bool apply_edp_fast_boot_optimization = false;
@@ -1573,6 +1573,20 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
if (context->streams[i]->signal == SIGNAL_TYPE_EDP) {
context->streams[i]->apply_edp_fast_boot_optimization = true;
apply_edp_fast_boot_optimization = true;
+
+ /* When after S4 and S5, vbios may post edp and previous dpms_off
+ * doesn't make sense.
+ * Update dpms_off state to align hw and sw state via check
+ * vBios scratch register.
+ */
+ if (bios->funcs->is_active_display) {
+ const struct connector_device_tag_info *device_tag = &(edp_link->device_tag);
+
+ if (bios->funcs->is_active_display(bios,
+ context->streams[i]->signal,
+ device_tag))
+ context->streams[i]->dpms_off = false;
+ }
}
}
}
@@ -1748,44 +1762,17 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
}
-/* unit: in_khz before mode set, get pixel clock from context. ASIC register
- * may not be programmed yet
- */
-static uint32_t get_max_pixel_clock_for_all_paths(
- struct dc *dc,
- struct dc_state *context)
-{
- uint32_t max_pix_clk = 0;
- int i;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == NULL)
- continue;
-
- /* do not check under lay */
- if (pipe_ctx->top_pipe)
- continue;
-
- if (pipe_ctx->stream_res.pix_clk_params.requested_pix_clk > max_pix_clk)
- max_pix_clk =
- pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
- }
-
- return max_pix_clk;
-}
-
/*
* Check if FBC can be enabled
*/
static bool should_enable_fbc(struct dc *dc,
- struct dc_state *context,
- uint32_t *pipe_idx)
+ struct dc_state *context,
+ uint32_t *pipe_idx)
{
uint32_t i;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &context->res_ctx;
+ unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
ASSERT(dc->fbc_compressor);
@@ -1800,14 +1787,28 @@ static bool should_enable_fbc(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (res_ctx->pipe_ctx[i].stream) {
+
pipe_ctx = &res_ctx->pipe_ctx[i];
- *pipe_idx = i;
- break;
+
+ if (!pipe_ctx)
+ continue;
+
+ /* fbc not applicable on underlay pipe */
+ if (pipe_ctx->pipe_idx != underlay_idx) {
+ *pipe_idx = i;
+ break;
+ }
}
}
- /* Pipe context should be found */
- ASSERT(pipe_ctx);
+ if (i == dc->res_pool->pipe_count)
+ return false;
+
+ if (!pipe_ctx->stream->sink)
+ return false;
+
+ if (!pipe_ctx->stream->sink->link)
+ return false;
/* Only supports eDP */
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
@@ -1831,8 +1832,9 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(struct dc *dc,
- struct dc_state *context)
+static void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context)
{
uint32_t pipe_idx = 0;
@@ -1842,10 +1844,9 @@ static void enable_fbc(struct dc *dc,
struct compressor *compr = dc->fbc_compressor;
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
params.source_view_width = pipe_ctx->stream->timing.h_addressable;
params.source_view_height = pipe_ctx->stream->timing.v_addressable;
-
+ params.inst = pipe_ctx->stream_res.tg->inst;
compr->compr_surface_address.quad_part = dc->ctx->fbc_gpu_addr;
compr->funcs->surface_address_and_pitch(compr, &params);
@@ -2060,10 +2061,10 @@ enum dc_status dce110_apply_ctx_to_hw(
return status;
}
- dcb->funcs->set_scratch_critical_state(dcb, false);
-
if (dc->fbc_compressor)
- enable_fbc(dc, context);
+ enable_fbc(dc, dc->current_state);
+
+ dcb->funcs->set_scratch_critical_state(dcb, false);
return DC_OK;
}
@@ -2296,7 +2297,7 @@ static void dce110_enable_per_frame_crtc_position_reset(
int i;
gsl_params.gsl_group = 0;
- gsl_params.gsl_master = grouped_pipes[0]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst;
+ gsl_params.gsl_master = 0;
for (i = 0; i < group_size; i++)
grouped_pipes[i]->stream_res.tg->funcs->setup_global_swap_lock(
@@ -2385,191 +2386,33 @@ static void init_hw(struct dc *dc)
}
-void dce110_fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg)
-{
- int j;
- int num_cfgs = 0;
-
- for (j = 0; j < context->stream_count; j++) {
- int k;
-
- const struct dc_stream_state *stream = context->streams[j];
- struct dm_pp_single_disp_config *cfg =
- &pp_display_cfg->disp_configs[num_cfgs];
- const struct pipe_ctx *pipe_ctx = NULL;
-
- for (k = 0; k < MAX_PIPES; k++)
- if (stream == context->res_ctx.pipe_ctx[k].stream) {
- pipe_ctx = &context->res_ctx.pipe_ctx[k];
- break;
- }
-
- ASSERT(pipe_ctx != NULL);
-
- /* only notify active stream */
- if (stream->dpms_off)
- continue;
-
- num_cfgs++;
- cfg->signal = pipe_ctx->stream->signal;
- cfg->pipe_idx = pipe_ctx->stream_res.tg->inst;
- cfg->src_height = stream->src.height;
- cfg->src_width = stream->src.width;
- cfg->ddi_channel_mapping =
- stream->sink->link->ddi_channel_mapping.raw;
- cfg->transmitter =
- stream->sink->link->link_enc->transmitter;
- cfg->link_settings.lane_count =
- stream->sink->link->cur_link_settings.lane_count;
- cfg->link_settings.link_rate =
- stream->sink->link->cur_link_settings.link_rate;
- cfg->link_settings.link_spread =
- stream->sink->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
- /* Round v_refresh*/
- cfg->v_refresh = stream->timing.pix_clk_khz * 1000;
- cfg->v_refresh /= stream->timing.h_total;
- cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2)
- / stream->timing.v_total;
- }
-
- pp_display_cfg->display_count = num_cfgs;
-}
-
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context)
-{
- uint8_t j;
- uint32_t min_vertical_blank_time = -1;
-
- for (j = 0; j < context->stream_count; j++) {
- struct dc_stream_state *stream = context->streams[j];
- uint32_t vertical_blank_in_pixels = 0;
- uint32_t vertical_blank_time = 0;
-
- vertical_blank_in_pixels = stream->timing.h_total *
- (stream->timing.v_total
- - stream->timing.v_addressable);
-
- vertical_blank_time = vertical_blank_in_pixels
- * 1000 / stream->timing.pix_clk_khz;
-
- if (min_vertical_blank_time > vertical_blank_time)
- min_vertical_blank_time = vertical_blank_time;
- }
-
- return min_vertical_blank_time;
-}
-
-static int determine_sclk_from_bounding_box(
- const struct dc *dc,
- int required_sclk)
-{
- int i;
-
- /*
- * Some asics do not give us sclk levels, so we just report the actual
- * required sclk
- */
- if (dc->sclk_lvls.num_levels == 0)
- return required_sclk;
-
- for (i = 0; i < dc->sclk_lvls.num_levels; i++) {
- if (dc->sclk_lvls.clocks_in_khz[i] >= required_sclk)
- return dc->sclk_lvls.clocks_in_khz[i];
- }
- /*
- * even maximum level could not satisfy requirement, this
- * is unexpected at this stage, should have been caught at
- * validation time
- */
- ASSERT(0);
- return dc->sclk_lvls.clocks_in_khz[dc->sclk_lvls.num_levels - 1];
-}
-static void pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
+void dce110_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->all_displays_in_sync =
- context->bw.dce.all_displays_in_sync;
- pp_display_cfg->nb_pstate_switch_disable =
- context->bw.dce.nbp_state_change_enable == false;
- pp_display_cfg->cpu_cc6_disable =
- context->bw.dce.cpuc_state_change_enable == false;
- pp_display_cfg->cpu_pstate_disable =
- context->bw.dce.cpup_state_change_enable == false;
- pp_display_cfg->cpu_pstate_separation_time =
- context->bw.dce.blackout_recovery_time_us;
-
- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
- / MEMORY_TYPE_MULTIPLIER;
-
- pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
- dc,
- context->bw.dce.sclk_khz);
-
- pp_display_cfg->min_engine_clock_deep_sleep_khz
- = context->bw.dce.sclk_deep_sleep_khz;
-
- pp_display_cfg->avail_mclk_switch_time_us =
- dce110_get_min_vblank_time_us(context);
- /* TODO: dce11.2*/
- pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
-
- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
+ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
- dce110_fill_display_configs(context, pp_display_cfg);
+ dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
- /* TODO: is this still applicable?*/
- if (pp_display_cfg->display_count == 1) {
- const struct dc_crtc_timing *timing =
- &context->streams[0]->timing;
-
- pp_display_cfg->crtc_index =
- pp_display_cfg->disp_configs[0].pipe_idx;
- pp_display_cfg->line_time_in_us = timing->h_total * 1000
- / timing->pix_clk_khz;
- }
-
- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
- struct dm_pp_display_configuration)) != 0)
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
- dc->prev_display_config = *pp_display_cfg;
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ false);
}
-static void dce110_set_bandwidth(
+void dce110_optimize_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
+ struct dc_state *context)
{
- struct dc_clocks req_clks;
- struct dccg *dccg = dc->res_pool->dccg;
-
- req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
- req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
+ struct clk_mgr *dccg = dc->res_pool->clk_mgr;
- if (decrease_allowed)
- dce110_set_displaymarks(dc, context);
- else
- dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
- if (dccg->funcs->update_dfs_bypass)
- dccg->funcs->update_dfs_bypass(
- dccg,
- dc,
- context,
- req_clks.dispclk_khz);
+ dce110_set_displaymarks(dc, context);
dccg->funcs->update_clocks(
dccg,
- &req_clks,
- decrease_allowed);
- pplib_apply_display_requirements(dc, context);
+ context,
+ true);
}
static void dce110_program_front_end_for_pipe(
@@ -2580,7 +2423,6 @@ static void dce110_program_front_end_for_pipe(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct xfm_grph_csc_adjustment adjust;
struct out_csc_color_matrix tbl_entry;
- unsigned int underlay_idx = dc->res_pool->underlay_pipe_index;
unsigned int i;
DC_LOGGER_INIT();
memset(&tbl_entry, 0, sizeof(tbl_entry));
@@ -2621,15 +2463,6 @@ static void dce110_program_front_end_for_pipe(
program_scaler(dc, pipe_ctx);
- /* fbc not applicable on Underlay pipe */
- if (dc->fbc_compressor && old_pipe->stream &&
- pipe_ctx->pipe_idx != underlay_idx) {
- if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
- dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
- else
- enable_fbc(dc, dc->current_state);
- }
-
mi->funcs->mem_input_program_surface_config(
mi,
plane_state->format,
@@ -2706,6 +2539,9 @@ static void dce110_apply_ctx_for_surface(
if (num_planes == 0)
return;
+ if (dc->fbc_compressor)
+ dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -2748,6 +2584,9 @@ static void dce110_apply_ctx_for_surface(
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
+
+ if (dc->fbc_compressor)
+ enable_fbc(dc, dc->current_state);
}
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
@@ -2774,28 +2613,6 @@ static void dce110_wait_for_mpcc_disconnect(
/* do nothing*/
}
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix)
-{
- int i;
- struct out_csc_color_matrix tbl_entry;
-
- if (pipe_ctx->stream->csc_color_matrix.enable_adjustment
- == true) {
- enum dc_color_space color_space =
- pipe_ctx->stream->output_color_space;
-
- //uint16_t matrix[12];
- for (i = 0; i < 12; i++)
- tbl_entry.regval[i] = pipe_ctx->stream->csc_color_matrix.matrix[i];
-
- tbl_entry.color_space = color_space;
- //tbl_entry.regval = matrix;
- pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment(pipe_ctx->plane_res.xfm, &tbl_entry);
- }
-}
-
void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
{
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
@@ -2844,13 +2661,8 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.xfm, attributes);
}
-static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
-
-static void optimize_shared_resources(struct dc *dc) {}
-
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
- .program_csc_matrix = program_csc_matrix,
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
@@ -2873,7 +2685,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.enable_display_power_gating = dce110_enable_display_power_gating,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
- .set_bandwidth = dce110_set_bandwidth,
+ .prepare_bandwidth = dce110_prepare_bandwidth,
+ .optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
.get_position = get_position,
.set_static_screen_control = set_static_screen_control,
@@ -2882,9 +2695,6 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.setup_stereo = NULL,
.set_avmute = dce110_set_avmute,
.wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
- .ready_shared_resources = ready_shared_resources,
- .optimize_shared_resources = optimize_shared_resources,
- .pplib_apply_display_requirements = pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index d6db3dbd9015..cd3e36d52a52 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -40,7 +40,6 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc_state *context);
-
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option);
@@ -64,11 +63,13 @@ void dce110_set_safe_displaymarks(
struct resource_context *res_ctx,
const struct resource_pool *pool);
-void dce110_fill_display_configs(
- const struct dc_state *context,
- struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_prepare_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
-uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
+void dce110_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7c9fd9052ee2..e33d11785b1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -31,6 +31,7 @@
#include "resource.h"
#include "dce110/dce110_resource.h"
+#include "dce/dce_clk_mgr.h"
#include "include/irq_service_interface.h"
#include "dce/dce_audio.h"
#include "dce110/dce110_timing_generator.h"
@@ -45,7 +46,6 @@
#include "dce110/dce110_transform_v.h"
#include "dce/dce_opp.h"
#include "dce110/dce110_opp_v.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce110_tg_offsets[] = {
#define SRI(reg_name, block, id)\
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -760,8 +760,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -1173,12 +1173,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
}
@@ -1201,7 +1201,6 @@ static bool construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1257,11 +1256,11 @@ static bool construct(
}
}
- pool->base.dccg = dce110_dccg_create(ctx,
+ pool->base.clk_mgr = dce110_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1287,13 +1286,6 @@ static bool construct(
goto res_create_fail;
}
- /* get static clock information for PPLIB or firmware, save
- * max_clock_state
- */
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 3ce79c208ddf..969d4e72dc94 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -35,6 +35,7 @@
#include "irq/dce110/irq_service_dce110.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_transform.h"
#include "dce/dce_link_encoder.h"
@@ -42,7 +43,6 @@
#include "dce/dce_audio.h"
#include "dce/dce_opp.h"
#include "dce/dce_ipp.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
@@ -148,15 +148,15 @@ static const struct dce110_timing_generator_offsets dce112_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -551,7 +551,8 @@ static struct transform *dce112_transform_create(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
- .ycbcr420_supported = true,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = false,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -749,8 +750,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -1015,12 +1016,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+ clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+ clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
return;
@@ -1056,12 +1057,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1131,7 +1132,6 @@ static bool construct(
{
unsigned int i;
struct dc_context *ctx = dc->ctx;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1199,11 +1199,11 @@ static bool construct(
}
}
- pool->base.dccg = dce112_dccg_create(ctx,
+ pool->base.clk_mgr = dce112_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1229,13 +1229,6 @@ static bool construct(
goto res_create_fail;
}
- /* get static clock information for PPLIB or firmware, save
- * max_clock_state
- */
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 79ab5f9f9115..f12696674eb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -31,6 +31,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dce120_resource.h"
+
#include "dce112/dce112_resource.h"
#include "dce110/dce110_resource.h"
@@ -39,7 +40,6 @@
#include "irq/dce120/irq_service_dce120.h"
#include "dce/dce_opp.h"
#include "dce/dce_clock_source.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_ipp.h"
#include "dce/dce_mem_input.h"
@@ -47,6 +47,7 @@
#include "dce120/dce120_hw_sequencer.h"
#include "dce/dce_transform.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_audio.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
@@ -573,8 +574,8 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
}
static void read_dce_straps(
@@ -606,7 +607,8 @@ static struct audio *create_audio(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
- .ycbcr420_supported = true,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = false,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -834,12 +836,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER, 1000);
+ mem_clks.data[0].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER,
+ mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * MEMORY_TYPE_MULTIPLIER_CZ,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -973,8 +975,8 @@ static bool construct(
}
}
- pool->base.dccg = dce120_dccg_create(ctx);
- if (pool->base.dccg == NULL) {
+ pool->base.clk_mgr = dce120_clk_mgr_create(ctx);
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto dccg_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 6c6a1a16af19..a60a90e68d91 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -76,6 +76,7 @@ void dce80_hw_sequencer_construct(struct dc *dc)
dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating;
dc->hwss.pipe_control_lock = dce_pipe_control_lock;
- dc->hwss.set_bandwidth = dce100_set_bandwidth;
+ dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth;
+ dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index d68f951f9869..cdd1d6b7b9f2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -37,14 +37,13 @@
#include "dce110/dce110_timing_generator.h"
#include "dce110/dce110_resource.h"
#include "dce80/dce80_timing_generator.h"
+#include "dce/dce_clk_mgr.h"
#include "dce/dce_mem_input.h"
#include "dce/dce_link_encoder.h"
#include "dce/dce_stream_encoder.h"
-#include "dce/dce_mem_input.h"
#include "dce/dce_ipp.h"
#include "dce/dce_transform.h"
#include "dce/dce_opp.h"
-#include "dce/dce_clocks.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -155,15 +154,15 @@ static const struct dce110_timing_generator_offsets dce80_tg_offsets[] = {
.reg_name = mm ## block ## id ## _ ## reg_name
-static const struct dccg_registers disp_clk_regs = {
+static const struct clk_mgr_registers disp_clk_regs = {
CLK_COMMON_REG_LIST_DCE_BASE()
};
-static const struct dccg_shift disp_clk_shift = {
+static const struct clk_mgr_shift disp_clk_shift = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(__SHIFT)
};
-static const struct dccg_mask disp_clk_mask = {
+static const struct clk_mgr_mask disp_clk_mask = {
CLK_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(_MASK)
};
@@ -779,8 +778,8 @@ static void destruct(struct dce110_resource_pool *pool)
}
}
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
if (pool->base.irqs != NULL) {
dal_irq_service_destroy(&pool->base.irqs);
@@ -793,7 +792,7 @@ bool dce80_validate_bandwidth(
{
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
return true;
}
@@ -855,7 +854,6 @@ static bool dce80_construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -918,11 +916,11 @@ static bool dce80_construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -948,10 +946,6 @@ static bool dce80_construct(
goto res_create_fail;
}
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
@@ -1065,7 +1059,6 @@ static bool dce81_construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1128,11 +1121,11 @@ static bool dce81_construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1158,10 +1151,6 @@ static bool dce81_construct(
goto res_create_fail;
}
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
@@ -1275,7 +1264,6 @@ static bool dce83_construct(
struct dc_context *ctx = dc->ctx;
struct dc_firmware_info info;
struct dc_bios *bp;
- struct dm_pp_static_clock_info static_clk_info = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1334,11 +1322,11 @@ static bool dce83_construct(
}
}
- pool->base.dccg = dce_dccg_create(ctx,
+ pool->base.clk_mgr = dce_clk_mgr_create(ctx,
&disp_clk_regs,
&disp_clk_shift,
&disp_clk_mask);
- if (pool->base.dccg == NULL) {
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto res_create_fail;
@@ -1364,10 +1352,6 @@ static bool dce83_construct(
goto res_create_fail;
}
- if (dm_pp_get_static_clocks(ctx, &static_clk_info))
- pool->base.dccg->max_clks_state =
- static_clk_info.max_clocks_state;
-
{
struct irq_service_init_data init_data;
init_data.ctx = dc->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 032f872be89c..55f293c8a3c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -24,7 +24,7 @@
DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o dcn10_hw_sequencer_debug.o \
dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
- dcn10_hubp.o dcn10_mpc.o \
+ dcn10_hubp.o dcn10_mpc.o dcn10_clk_mgr.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
new file mode 100644
index 000000000000..54abedbf1b43
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn10_clk_mgr.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCE_CLK_MGR(clocks)\
+ container_of(clocks, struct dce_clk_mgr, base)
+
+#define REG(reg) \
+ (clk_mgr_dce->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr_dce->clk_mgr_shift->field_name, clk_mgr_dce->clk_mgr_mask->field_name
+
+#define CTX \
+ clk_mgr_dce->base.ctx
+#define DC_LOGGER \
+ clk_mgr->ctx->logger
+
+void dcn1_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
+
+ pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+ pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
+ pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
+ pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
+ dce110_fill_display_configs(context, pp_display_cfg);
+
+ dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
+}
+
+static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ bool dispclk_increase = new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz;
+ int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
+ bool cur_dpp_div = clk_mgr->clks.dispclk_khz > clk_mgr->clks.dppclk_khz;
+
+ /* increase clock, looking for div is 0 for current, request div is 1*/
+ if (dispclk_increase) {
+ /* already divided by 2, no need to reach target clk with 2 steps*/
+ if (cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* request disp clk is lower than maximum supported dpp clk,
+ * no need to reach target clk with two steps.
+ */
+ if (new_clocks->dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* target dpp clk not request divided by 2, still within threshold */
+ if (!request_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ } else {
+ /* decrease clock, looking for current dppclk divided by 2,
+ * request dppclk not divided by 2.
+ */
+
+ /* current dpp clk not divided by 2, no need to ramp*/
+ if (!cur_dpp_div)
+ return new_clocks->dispclk_khz;
+
+ /* current disp clk is lower than current maximum dpp clk,
+ * no need to ramp
+ */
+ if (clk_mgr->clks.dispclk_khz <= disp_clk_threshold)
+ return new_clocks->dispclk_khz;
+
+ /* request dpp clk need to be divided by 2 */
+ if (request_dpp_div)
+ return new_clocks->dispclk_khz;
+ }
+
+ return disp_clk_threshold;
+}
+
+static void dcn1_ramp_up_dispclk_with_dpp(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
+{
+ struct dc *dc = clk_mgr->ctx->dc;
+ int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(clk_mgr, new_clocks);
+ bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
+ int i;
+
+ /* set disp clk to dpp clk threshold */
+ dce112_set_clock(clk_mgr, dispclk_to_dpp_threshold);
+
+ /* update request dpp clk division option */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
+ pipe_ctx->plane_res.dpp,
+ request_dpp_div,
+ true);
+ }
+
+ /* If target clk not same as dppclk threshold, set to target clock */
+ if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
+ dce112_set_clock(clk_mgr, new_clocks->dispclk_khz);
+
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ clk_mgr->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
+}
+
+static int get_active_display_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, display_count;
+
+ display_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ /*
+ * Only notify active stream or virtual stream.
+ * Need to notify virtual stream to work around
+ * headless case. HPD does not fire when system is in
+ * S0i2.
+ */
+ if (!stream->dpms_off || stream->signal == SIGNAL_TYPE_VIRTUAL)
+ display_count++;
+ }
+
+ return display_count;
+}
+
+static void notify_deep_sleep_dcfclk_to_smu(
+ struct pp_smu_funcs_rv *pp_smu, int min_dcef_deep_sleep_clk_khz)
+{
+ int min_dcef_deep_sleep_clk_mhz; //minimum required DCEF Deep Sleep clock in mhz
+ /*
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+ if (!pp_smu || !pp_smu->set_min_deep_sleep_dcfclk)
+ return;
+
+ min_dcef_deep_sleep_clk_mhz = (min_dcef_deep_sleep_clk_khz + 999) / 1000; //Round up
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, min_dcef_deep_sleep_clk_mhz);
+}
+
+static void notify_hard_min_dcfclk_to_smu(
+ struct pp_smu_funcs_rv *pp_smu, int min_dcf_clk_khz)
+{
+ int min_dcf_clk_mhz; //minimum required DCF clock in mhz
+
+ /*
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+ if (!pp_smu || !pp_smu->set_hard_min_dcfclk_by_freq)
+ return;
+
+ min_dcf_clk_mhz = min_dcf_clk_khz / 1000;
+
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, min_dcf_clk_mhz);
+}
+
+static void notify_hard_min_fclk_to_smu(
+ struct pp_smu_funcs_rv *pp_smu, int min_f_clk_khz)
+{
+ int min_f_clk_mhz; //minimum required F clock in mhz
+
+ /*
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ * So just return.
+ */
+ if (!pp_smu || !pp_smu->set_hard_min_fclk_by_freq)
+ return;
+
+ min_f_clk_mhz = min_f_clk_khz / 1000;
+
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, min_f_clk_mhz);
+}
+
+static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dc *dc = clk_mgr->ctx->dc;
+ struct dc_clocks *new_clocks = &context->bw.dcn.clk;
+ struct pp_smu_display_requirement_rv *smu_req_cur =
+ &dc->res_pool->pp_smu_req;
+ struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ uint32_t requested_dcf_clock_in_khz = 0;
+ bool send_request_to_increase = false;
+ bool send_request_to_lower = false;
+ int display_count;
+
+ bool enter_display_off = false;
+
+ display_count = get_active_display_cnt(dc, context);
+
+ if (display_count == 0)
+ enter_display_off = true;
+
+ if (enter_display_off == safe_to_lower) {
+ /*
+ * Notify SMU active displays
+ * if function pointer not set up, this message is
+ * sent as part of pplib_apply_display_requirements.
+ */
+ if (pp_smu->set_display_count)
+ pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
+ else
+ smu_req.display_count = display_count;
+
+ }
+
+ if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
+ || new_clocks->phyclk_khz > clk_mgr->clks.phyclk_khz
+ || new_clocks->fclk_khz > clk_mgr->clks.fclk_khz
+ || new_clocks->dcfclk_khz > clk_mgr->clks.dcfclk_khz)
+ send_request_to_increase = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ // F Clock
+ if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
+ clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
+ smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
+
+ notify_hard_min_fclk_to_smu(pp_smu, new_clocks->fclk_khz);
+
+ send_request_to_lower = true;
+ }
+
+ //DCF Clock
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
+
+ send_request_to_lower = true;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz / 1000;
+
+ send_request_to_lower = true;
+ }
+
+ /* make sure dcf clk is before dpp clk to
+ * make sure we have enough voltage to run dpp clk
+ */
+ if (send_request_to_increase) {
+ /*use dcfclk to request voltage*/
+ requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+ notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+ /* dcn1 dppclk is tied to dispclk */
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
+ dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ send_request_to_lower = true;
+ }
+
+ if (!send_request_to_increase && send_request_to_lower) {
+ /*use dcfclk to request voltage*/
+ requested_dcf_clock_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
+
+ notify_hard_min_dcfclk_to_smu(pp_smu, requested_dcf_clock_in_khz);
+
+ if (pp_smu->set_display_requirement)
+ pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
+
+ notify_deep_sleep_dcfclk_to_smu(pp_smu, clk_mgr->clks.dcfclk_deep_sleep_khz);
+ dcn1_pplib_apply_display_requirements(dc, context);
+ }
+
+
+ *smu_req_cur = smu_req;
+}
+static const struct clk_mgr_funcs dcn1_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn1_update_clocks
+};
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ struct dc_firmware_info fw_info = { { 0 } };
+ struct dce_clk_mgr *clk_mgr_dce = kzalloc(sizeof(*clk_mgr_dce), GFP_KERNEL);
+
+ if (clk_mgr_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ clk_mgr_dce->base.ctx = ctx;
+ clk_mgr_dce->base.funcs = &dcn1_funcs;
+
+ clk_mgr_dce->dfs_bypass_disp_clk = 0;
+
+ clk_mgr_dce->dprefclk_ss_percentage = 0;
+ clk_mgr_dce->dprefclk_ss_divider = 1000;
+ clk_mgr_dce->ss_on_dprefclk = false;
+
+ clk_mgr_dce->dprefclk_khz = 600000;
+ if (bp->integrated_info)
+ clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0) {
+ bp->funcs->get_firmware_info(bp, &fw_info);
+ clk_mgr_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
+ if (clk_mgr_dce->dentist_vco_freq_khz == 0)
+ clk_mgr_dce->dentist_vco_freq_khz = 3600000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr_dce->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_mgr_dce);
+
+ return &clk_mgr_dce->base;
+}
+
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
new file mode 100644
index 000000000000..a995eda443a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN10_CLK_MGR_H__
+#define __DCN10_CLK_MGR_H__
+
+#include "../dce/dce_clk_mgr.h"
+
+struct clk_bypass {
+ uint32_t dcfclk_bypass;
+ uint32_t dispclk_pypass;
+ uint32_t dprefclk_bypass;
+};
+
+void dcn1_pplib_apply_display_requirements(
+ struct dc *dc,
+ struct dc_state *context);
+
+struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
+
+#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index 5d95a997fd9f..7469333a2c8a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -71,39 +71,39 @@ void cm_helper_program_xfer_func(
unsigned int i = 0;
REG_SET_2(reg->start_cntl_b, 0,
- exp_region_start, params->arr_points[0].custom_float_x,
+ exp_region_start, params->corner_points[0].blue.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_g, 0,
- exp_region_start, params->arr_points[0].custom_float_x,
+ exp_region_start, params->corner_points[0].green.custom_float_x,
exp_resion_start_segment, 0);
REG_SET_2(reg->start_cntl_r, 0,
- exp_region_start, params->arr_points[0].custom_float_x,
+ exp_region_start, params->corner_points[0].red.custom_float_x,
exp_resion_start_segment, 0);
REG_SET(reg->start_slope_cntl_b, 0,
- field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ field_region_linear_slope, params->corner_points[0].blue.custom_float_slope);
REG_SET(reg->start_slope_cntl_g, 0,
- field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ field_region_linear_slope, params->corner_points[0].green.custom_float_slope);
REG_SET(reg->start_slope_cntl_r, 0,
- field_region_linear_slope, params->arr_points[0].custom_float_slope);
+ field_region_linear_slope, params->corner_points[0].red.custom_float_slope);
REG_SET(reg->start_end_cntl1_b, 0,
- field_region_end, params->arr_points[1].custom_float_x);
+ field_region_end, params->corner_points[1].blue.custom_float_x);
REG_SET_2(reg->start_end_cntl2_b, 0,
- field_region_end_slope, params->arr_points[1].custom_float_slope,
- field_region_end_base, params->arr_points[1].custom_float_y);
+ field_region_end_slope, params->corner_points[1].blue.custom_float_slope,
+ field_region_end_base, params->corner_points[1].blue.custom_float_y);
REG_SET(reg->start_end_cntl1_g, 0,
- field_region_end, params->arr_points[1].custom_float_x);
+ field_region_end, params->corner_points[1].green.custom_float_x);
REG_SET_2(reg->start_end_cntl2_g, 0,
- field_region_end_slope, params->arr_points[1].custom_float_slope,
- field_region_end_base, params->arr_points[1].custom_float_y);
+ field_region_end_slope, params->corner_points[1].green.custom_float_slope,
+ field_region_end_base, params->corner_points[1].green.custom_float_y);
REG_SET(reg->start_end_cntl1_r, 0,
- field_region_end, params->arr_points[1].custom_float_x);
+ field_region_end, params->corner_points[1].red.custom_float_x);
REG_SET_2(reg->start_end_cntl2_r, 0,
- field_region_end_slope, params->arr_points[1].custom_float_slope,
- field_region_end_base, params->arr_points[1].custom_float_y);
+ field_region_end_slope, params->corner_points[1].red.custom_float_slope,
+ field_region_end_base, params->corner_points[1].red.custom_float_y);
for (reg_region_cur = reg->region_start;
reg_region_cur <= reg->region_end;
@@ -127,7 +127,7 @@ void cm_helper_program_xfer_func(
bool cm_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
+ struct curve_points3 *corner_points,
uint32_t hw_points_num,
bool fixpoint)
{
@@ -141,20 +141,53 @@ bool cm_helper_convert_to_custom_float(
fmt.mantissa_bits = 12;
fmt.sign = false;
- if (!convert_to_custom_float_format(arr_points[0].x, &fmt,
- &arr_points[0].custom_float_x)) {
+ /* corner_points[0] - beginning base, slope offset for R,G,B
+ * corner_points[1] - end base, slope offset for R,G,B
+ */
+ if (!convert_to_custom_float_format(corner_points[0].red.x, &fmt,
+ &corner_points[0].red.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.x, &fmt,
+ &corner_points[0].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.x, &fmt,
+ &corner_points[0].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(arr_points[0].offset, &fmt,
- &arr_points[0].custom_float_offset)) {
+ if (!convert_to_custom_float_format(corner_points[0].red.offset, &fmt,
+ &corner_points[0].red.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.offset, &fmt,
+ &corner_points[0].green.custom_float_offset)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.offset, &fmt,
+ &corner_points[0].blue.custom_float_offset)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(arr_points[0].slope, &fmt,
- &arr_points[0].custom_float_slope)) {
+ if (!convert_to_custom_float_format(corner_points[0].red.slope, &fmt,
+ &corner_points[0].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].green.slope, &fmt,
+ &corner_points[0].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[0].blue.slope, &fmt,
+ &corner_points[0].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -162,22 +195,59 @@ bool cm_helper_convert_to_custom_float(
fmt.mantissa_bits = 10;
fmt.sign = false;
- if (!convert_to_custom_float_format(arr_points[1].x, &fmt,
- &arr_points[1].custom_float_x)) {
+ if (!convert_to_custom_float_format(corner_points[1].red.x, &fmt,
+ &corner_points[1].red.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
-
- if (fixpoint == true)
- arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y);
- else if (!convert_to_custom_float_format(arr_points[1].y, &fmt,
- &arr_points[1].custom_float_y)) {
+ if (!convert_to_custom_float_format(corner_points[1].green.x, &fmt,
+ &corner_points[1].green.custom_float_x)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.x, &fmt,
+ &corner_points[1].blue.custom_float_x)) {
BREAK_TO_DEBUGGER();
return false;
}
- if (!convert_to_custom_float_format(arr_points[1].slope, &fmt,
- &arr_points[1].custom_float_slope)) {
+ if (fixpoint == true) {
+ corner_points[1].red.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].red.y);
+ corner_points[1].green.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].green.y);
+ corner_points[1].blue.custom_float_y =
+ dc_fixpt_clamp_u0d14(corner_points[1].blue.y);
+ } else {
+ if (!convert_to_custom_float_format(corner_points[1].red.y,
+ &fmt, &corner_points[1].red.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.y,
+ &fmt, &corner_points[1].green.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.y,
+ &fmt, &corner_points[1].blue.custom_float_y)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ }
+
+ if (!convert_to_custom_float_format(corner_points[1].red.slope, &fmt,
+ &corner_points[1].red.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].green.slope, &fmt,
+ &corner_points[1].green.custom_float_slope)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+ if (!convert_to_custom_float_format(corner_points[1].blue.slope, &fmt,
+ &corner_points[1].blue.custom_float_slope)) {
BREAK_TO_DEBUGGER();
return false;
}
@@ -242,15 +312,10 @@ bool cm_helper_translate_curve_to_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params, bool fixpoint)
{
- struct curve_points *arr_points;
+ struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
int32_t region_start, region_end;
int32_t i;
@@ -259,16 +324,16 @@ bool cm_helper_translate_curve_to_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
- arr_points = lut_params->arr_points;
+ corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
memset(lut_params, 0, sizeof(struct pwl_params));
memset(seg_distr, 0, sizeof(seg_distr));
- if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
+ if (output_tf->tf == TRANSFER_FUNCTION_PQ || output_tf->tf == TRANSFER_FUNCTION_GAMMA22) {
/* 32 segments
* segments are from 2^-25 to 2^7
*/
@@ -327,31 +392,37 @@ bool cm_helper_translate_curve_to_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
- arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ // All 3 color channels have same x
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
- arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
- dc_fixpt_from_int(region_end));
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
- y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
- arr_points[0].y = y1_min;
- arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
+ corner_points[0].red.slope = dc_fixpt_div(corner_points[0].red.y,
+ corner_points[0].red.x);
+ corner_points[0].green.slope = dc_fixpt_div(corner_points[0].green.y,
+ corner_points[0].green.x);
+ corner_points[0].blue.slope = dc_fixpt_div(corner_points[0].blue.y,
+ corner_points[0].blue.x);
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
- y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
-
- arr_points[1].slope = dc_fixpt_zero;
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
@@ -360,9 +431,15 @@ bool cm_helper_translate_curve_to_hw_format(
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
- arr_points[1].slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
- dc_fixpt_sub(end_value, arr_points[1].x));
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
@@ -411,7 +488,7 @@ bool cm_helper_translate_curve_to_hw_format(
++i;
}
cm_helper_convert_to_custom_float(rgb_resulted,
- lut_params->arr_points,
+ lut_params->corner_points,
hw_points, fixpoint);
return true;
@@ -424,15 +501,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct dc_transfer_func *output_tf,
struct pwl_params *lut_params)
{
- struct curve_points *arr_points;
+ struct curve_points3 *corner_points;
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
- struct fixed31_32 y_r;
- struct fixed31_32 y_g;
- struct fixed31_32 y_b;
- struct fixed31_32 y1_min;
- struct fixed31_32 y3_max;
int32_t region_start, region_end;
int32_t i;
@@ -441,9 +513,9 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
if (output_tf == NULL || lut_params == NULL || output_tf->type == TF_TYPE_BYPASS)
return false;
- PERF_TRACE();
+ PERF_TRACE_CTX(output_tf->ctx);
- arr_points = lut_params->arr_points;
+ corner_points = lut_params->corner_points;
rgb_resulted = lut_params->rgb_resulted;
hw_points = 0;
@@ -489,31 +561,28 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
- arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_start));
- arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2),
+ corner_points[0].green.x = corner_points[0].red.x;
+ corner_points[0].blue.x = corner_points[0].red.x;
+ corner_points[1].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
dc_fixpt_from_int(region_end));
+ corner_points[1].green.x = corner_points[1].red.x;
+ corner_points[1].blue.x = corner_points[1].red.x;
- y_r = rgb_resulted[0].red;
- y_g = rgb_resulted[0].green;
- y_b = rgb_resulted[0].blue;
-
- y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b));
-
- arr_points[0].y = y1_min;
- arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x);
- y_r = rgb_resulted[hw_points - 1].red;
- y_g = rgb_resulted[hw_points - 1].green;
- y_b = rgb_resulted[hw_points - 1].blue;
+ corner_points[0].red.y = rgb_resulted[0].red;
+ corner_points[0].green.y = rgb_resulted[0].green;
+ corner_points[0].blue.y = rgb_resulted[0].blue;
/* see comment above, m_arrPoints[1].y should be the Y value for the
* region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1)
*/
- y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b));
-
- arr_points[1].y = y3_max;
-
- arr_points[1].slope = dc_fixpt_zero;
+ corner_points[1].red.y = rgb_resulted[hw_points - 1].red;
+ corner_points[1].green.y = rgb_resulted[hw_points - 1].green;
+ corner_points[1].blue.y = rgb_resulted[hw_points - 1].blue;
+ corner_points[1].red.slope = dc_fixpt_zero;
+ corner_points[1].green.slope = dc_fixpt_zero;
+ corner_points[1].blue.slope = dc_fixpt_zero;
if (output_tf->tf == TRANSFER_FUNCTION_PQ) {
/* for PQ, we want to have a straight line from last HW X point,
@@ -522,9 +591,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
const struct fixed31_32 end_value =
dc_fixpt_from_int(125);
- arr_points[1].slope = dc_fixpt_div(
- dc_fixpt_sub(dc_fixpt_one, arr_points[1].y),
- dc_fixpt_sub(end_value, arr_points[1].x));
+ corner_points[1].red.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].red.y),
+ dc_fixpt_sub(end_value, corner_points[1].red.x));
+ corner_points[1].green.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].green.y),
+ dc_fixpt_sub(end_value, corner_points[1].green.x));
+ corner_points[1].blue.slope = dc_fixpt_div(
+ dc_fixpt_sub(dc_fixpt_one, corner_points[1].blue.y),
+ dc_fixpt_sub(end_value, corner_points[1].blue.x));
}
lut_params->hw_points_num = hw_points;
@@ -564,7 +639,7 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
++i;
}
cm_helper_convert_to_custom_float(rgb_resulted,
- lut_params->arr_points,
+ lut_params->corner_points,
hw_points, false);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
index 7a531b02871f..5ae4d69391a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h
@@ -98,7 +98,7 @@ void cm_helper_program_xfer_func(
bool cm_helper_convert_to_custom_float(
struct pwl_result_data *rgb_resulted,
- struct curve_points *arr_points,
+ struct curve_points3 *corner_points,
uint32_t hw_points_num,
bool fixpoint);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 4254e7e1a509..c7d1e678ebf5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -100,7 +100,7 @@ bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub)
REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL,
DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable);
- return true ? false : enable;
+ return enable ? true : false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 74132a1f3046..345af015d061 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -99,6 +99,14 @@ static unsigned int hubp1_get_underflow_status(struct hubp *hubp)
return hubp_underflow;
}
+
+void hubp1_clear_underflow(struct hubp *hubp)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, 1);
+}
+
static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
@@ -565,19 +573,6 @@ void hubp1_program_deadline(
REFCYC_X_AFTER_SCALER, dlg_attr->refcyc_x_after_scaler,
DST_Y_AFTER_SCALER, dlg_attr->dst_y_after_scaler);
- if (REG(PREFETCH_SETTINS))
- REG_SET_2(PREFETCH_SETTINS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
- else
- REG_SET_2(PREFETCH_SETTINGS, 0,
- DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
- VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
-
- REG_SET_2(VBLANK_PARAMETERS_0, 0,
- DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
- DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
-
REG_SET(REF_FREQ_TO_PIX_FREQ, 0,
REF_FREQ_TO_PIX_FREQ, dlg_attr->ref_freq_to_pix_freq);
@@ -585,9 +580,6 @@ void hubp1_program_deadline(
REG_SET(VBLANK_PARAMETERS_1, 0,
REFCYC_PER_PTE_GROUP_VBLANK_L, dlg_attr->refcyc_per_pte_group_vblank_l);
- REG_SET(VBLANK_PARAMETERS_3, 0,
- REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
-
if (REG(NOM_PARAMETERS_0))
REG_SET(NOM_PARAMETERS_0, 0,
DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l);
@@ -602,27 +594,13 @@ void hubp1_program_deadline(
REG_SET(NOM_PARAMETERS_5, 0,
REFCYC_PER_META_CHUNK_NOM_L, dlg_attr->refcyc_per_meta_chunk_nom_l);
- REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
- REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
- REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
-
REG_SET_2(PER_LINE_DELIVERY, 0,
REFCYC_PER_LINE_DELIVERY_L, dlg_attr->refcyc_per_line_delivery_l,
REFCYC_PER_LINE_DELIVERY_C, dlg_attr->refcyc_per_line_delivery_c);
- if (REG(PREFETCH_SETTINS_C))
- REG_SET(PREFETCH_SETTINS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
- else
- REG_SET(PREFETCH_SETTINGS_C, 0,
- VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
-
REG_SET(VBLANK_PARAMETERS_2, 0,
REFCYC_PER_PTE_GROUP_VBLANK_C, dlg_attr->refcyc_per_pte_group_vblank_c);
- REG_SET(VBLANK_PARAMETERS_4, 0,
- REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
-
if (REG(NOM_PARAMETERS_2))
REG_SET(NOM_PARAMETERS_2, 0,
DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c);
@@ -642,10 +620,6 @@ void hubp1_program_deadline(
QoS_LEVEL_LOW_WM, ttu_attr->qos_level_low_wm,
QoS_LEVEL_HIGH_WM, ttu_attr->qos_level_high_wm);
- REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
- MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
- QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
-
/* TTU - per luma/chroma */
/* Assumed surf0 is luma and 1 is chroma */
@@ -654,25 +628,15 @@ void hubp1_program_deadline(
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_l,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_l);
- REG_SET(DCN_SURF0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_l);
-
REG_SET_3(DCN_SURF1_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_c,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_c,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_c);
- REG_SET(DCN_SURF1_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE,
- ttu_attr->refcyc_per_req_delivery_pre_c);
-
REG_SET_3(DCN_CUR0_TTU_CNTL0, 0,
REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0,
QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0,
QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0);
- REG_SET(DCN_CUR0_TTU_CNTL1, 0,
- REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
}
static void hubp1_setup(
@@ -690,6 +654,48 @@ static void hubp1_setup(
hubp1_vready_workaround(hubp, pipe_dest);
}
+static void hubp1_setup_interdependent(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_SET_2(PREFETCH_SETTINS, 0,
+ DST_Y_PREFETCH, dlg_attr->dst_y_prefetch,
+ VRATIO_PREFETCH, dlg_attr->vratio_prefetch);
+
+ REG_SET(PREFETCH_SETTINS_C, 0,
+ VRATIO_PREFETCH_C, dlg_attr->vratio_prefetch_c);
+
+ REG_SET_2(VBLANK_PARAMETERS_0, 0,
+ DST_Y_PER_VM_VBLANK, dlg_attr->dst_y_per_vm_vblank,
+ DST_Y_PER_ROW_VBLANK, dlg_attr->dst_y_per_row_vblank);
+
+ REG_SET(VBLANK_PARAMETERS_3, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l);
+
+ REG_SET(VBLANK_PARAMETERS_4, 0,
+ REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c);
+
+ REG_SET_2(PER_LINE_DELIVERY_PRE, 0,
+ REFCYC_PER_LINE_DELIVERY_PRE_L, dlg_attr->refcyc_per_line_delivery_pre_l,
+ REFCYC_PER_LINE_DELIVERY_PRE_C, dlg_attr->refcyc_per_line_delivery_pre_c);
+
+ REG_SET(DCN_SURF0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_l);
+ REG_SET(DCN_SURF1_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE,
+ ttu_attr->refcyc_per_req_delivery_pre_c);
+ REG_SET(DCN_CUR0_TTU_CNTL1, 0,
+ REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0);
+
+ REG_SET_2(DCN_GLOBAL_TTU_CNTL, 0,
+ MIN_TTU_VBLANK, ttu_attr->min_ttu_vblank,
+ QoS_LEVEL_FLIP, ttu_attr->qos_level_flip);
+}
+
bool hubp1_is_flip_pending(struct hubp *hubp)
{
uint32_t flip_pending = 0;
@@ -1178,6 +1184,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
hubp1_program_surface_config,
.hubp_is_flip_pending = hubp1_is_flip_pending,
.hubp_setup = hubp1_setup,
+ .hubp_setup_interdependent = hubp1_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp1_set_vm_system_aperture_settings,
.hubp_set_vm_context0_settings = hubp1_set_vm_context0_settings,
.set_blank = hubp1_set_blank,
@@ -1190,6 +1197,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clk_cntl = hubp1_clk_cntl,
.hubp_vtg_sel = hubp1_vtg_sel,
.hubp_read_state = hubp1_read_state,
+ .hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 4890273b632b..62d4232e7796 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -251,6 +251,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_CLEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
@@ -435,6 +436,7 @@
type HUBP_NO_OUTSTANDING_REQ;\
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
+ type HUBP_UNDERFLOW_CLEAR;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@@ -739,6 +741,7 @@ void dcn10_hubp_construct(
const struct dcn_mi_mask *hubp_mask);
void hubp1_read_state(struct hubp *hubp);
+void hubp1_clear_underflow(struct hubp *hubp);
enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 193184affefb..0bd33a713836 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,6 +45,7 @@
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
#include "dc_link_dp.h"
+#include "dccg.h"
#define DC_LOGGER_INIT(logger)
@@ -786,7 +787,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
&dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
- if (hubp != NULL) {
+ if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
/* one pipe underflow, we will reset all the pipes*/
need_recover = true;
@@ -812,7 +813,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
@@ -825,7 +826,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=1*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
@@ -835,7 +836,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_DISABLE=0*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
hubp->funcs->hubp_disable_control(hubp, true);
}
}
@@ -847,7 +848,7 @@ static bool dcn10_hw_wa_force_recovery(struct dc *dc)
if (pipe_ctx != NULL) {
hubp = pipe_ctx->plane_res.hubp;
/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
- if (hubp != NULL)
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
hubp->funcs->set_hubp_blank_en(hubp, true);
}
}
@@ -1126,7 +1127,7 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
- memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
+ memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
}
static void reset_hw_ctx_wrap(
@@ -1226,7 +1227,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
tf = plane_state->in_transfer_func;
if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity
+ !dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction->is_identity
&& dce_use_lut(plane_state->format))
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
@@ -1399,7 +1401,7 @@ static void dcn10_enable_per_frame_crtc_position_reset(
if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
grouped_pipes[i]->stream_res.tg,
- grouped_pipes[i]->stream->triggered_crtc_reset.event_source->status.primary_otg_inst,
+ 0,
&grouped_pipes[i]->stream->triggered_crtc_reset);
DC_SYNC_INFO("Waiting for trigger\n");
@@ -1603,7 +1605,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
}
-static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
struct vm_system_aperture_param apt = { {{ 0 } } };
@@ -1703,33 +1705,22 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
}
-
-static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
+static void dcn10_program_output_csc(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
- uint16_t *matrix)
+ uint16_t *matrix,
+ int opp_id)
{
if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
- if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
+ if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
+ pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
} else {
if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
}
}
-static void dcn10_program_output_csc(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix,
- int opp_id)
-{
- if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL)
- program_csc_matrix(pipe_ctx,
- colorspace,
- matrix);
-}
-
-static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
return true;
@@ -1738,7 +1729,7 @@ static bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
return true;
@@ -1747,7 +1738,7 @@ static bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
return false;
}
-static bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx->plane_state->visible)
return true;
@@ -1780,7 +1771,7 @@ bool is_rgb_cspace(enum dc_color_space output_color_space)
}
}
-static void dcn10_get_surface_visual_confirm_color(
+void dcn10_get_surface_visual_confirm_color(
const struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -1816,7 +1807,7 @@ static void dcn10_get_surface_visual_confirm_color(
}
}
-static void dcn10_get_hdr_visual_confirm_color(
+void dcn10_get_hdr_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
@@ -1943,10 +1934,6 @@ static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
struct mpc *mpc = dc->res_pool->mpc;
struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
-
-
- /* TODO: proper fix once fpga works */
-
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
dcn10_get_hdr_visual_confirm_color(
pipe_ctx, &blnd_cfg.black_color);
@@ -2026,8 +2013,6 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
bool per_pixel_alpha =
pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
- /* TODO: proper fix once fpga works */
-
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
/* scaler configuration */
@@ -2035,7 +2020,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
}
-static void update_dchubp_dpp(
+void update_dchubp_dpp(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2052,16 +2037,22 @@ static void update_dchubp_dpp(
*/
if (plane_state->update_flags.bits.full_update) {
bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
- dc->res_pool->dccg->clks.dispclk_khz / 2;
+ dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
dpp,
should_divided_by_2,
true);
- dc->res_pool->dccg->clks.dppclk_khz = should_divided_by_2 ?
- dc->res_pool->dccg->clks.dispclk_khz / 2 :
- dc->res_pool->dccg->clks.dispclk_khz;
+ if (dc->res_pool->dccg)
+ dc->res_pool->dccg->funcs->update_dpp_dto(
+ dc->res_pool->dccg,
+ dpp->inst,
+ pipe_ctx->plane_res.bw.calc.dppclk_khz);
+ else
+ dc->res_pool->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
+ dc->res_pool->clk_mgr->clks.dispclk_khz / 2 :
+ dc->res_pool->clk_mgr->clks.dispclk_khz;
}
/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
@@ -2077,6 +2068,10 @@ static void update_dchubp_dpp(
&pipe_ctx->ttu_regs,
&pipe_ctx->rq_regs,
&pipe_ctx->pipe_dlg_param);
+ hubp->funcs->hubp_setup_interdependent(
+ hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
}
size.grph.surface_size = pipe_ctx->plane_res.scl_data.viewport;
@@ -2182,7 +2177,7 @@ static void dcn10_blank_pixel_data(
}
}
-static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
{
struct fixed31_32 multiplier = dc_fixpt_from_fraction(
pipe_ctx->plane_state->sdr_white_level, 80);
@@ -2257,47 +2252,7 @@ static void program_all_pipe_in_tree(
}
}
-static void dcn10_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->min_engine_clock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = dc->res_pool->dccg->clks.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->dccg->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = dc->res_pool->dccg->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
- dce110_fill_display_configs(context, pp_display_cfg);
-
- if (memcmp(&dc->prev_display_config, pp_display_cfg, sizeof(
- struct dm_pp_display_configuration)) != 0)
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-
- dc->prev_display_config = *pp_display_cfg;
-}
-
-static void optimize_shared_resources(struct dc *dc)
-{
- if (dc->current_state->stream_count == 0) {
- /* S0i2 message */
- dcn10_pplib_apply_display_requirements(dc, dc->current_state);
- }
-
- if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
- dcn_bw_notify_pplib_of_wm_ranges(dc);
-}
-
-static void ready_shared_resources(struct dc *dc, struct dc_state *context)
-{
- /* S0i2 message */
- if (dc->current_state->stream_count == 0 &&
- context->stream_count != 0)
- dcn10_pplib_apply_display_requirements(dc, context);
-}
-
-static struct pipe_ctx *find_top_pipe_for_stream(
+struct pipe_ctx *find_top_pipe_for_stream(
struct dc *dc,
struct dc_state *context,
const struct dc_stream_state *stream)
@@ -2387,6 +2342,32 @@ static void dcn10_apply_ctx_for_surface(
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+ if (top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ /* Skip inactive pipes and ones already updated */
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ continue;
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
+ pipe_ctx->plane_res.hubp,
+ &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs);
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream)
+ continue;
+
+ dcn10_pipe_control_lock(dc, pipe_ctx, false);
+ }
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2398,10 +2379,9 @@ static void dcn10_apply_ctx_for_surface(
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
-static void dcn10_set_bandwidth(
+static void dcn10_prepare_bandwidth(
struct dc *dc,
- struct dc_state *context,
- bool safe_to_lower)
+ struct dc_state *context)
{
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
@@ -2410,12 +2390,39 @@ static void dcn10_set_bandwidth(
if (context->stream_count == 0)
context->bw.dcn.clk.phyclk_khz = 0;
- dc->res_pool->dccg->funcs->update_clocks(
- dc->res_pool->dccg,
- &context->bw.dcn.clk,
- safe_to_lower);
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ false);
+ }
+
+ hubbub1_program_watermarks(dc->res_pool->hubbub,
+ &context->bw.dcn.watermarks,
+ dc->res_pool->ref_clock_inKhz / 1000,
+ true);
+
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+}
+
+static void dcn10_optimize_bandwidth(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (dc->debug.sanity_checks)
+ dcn10_verify_allow_pstate_change_high(dc);
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (context->stream_count == 0)
+ context->bw.dcn.clk.phyclk_khz = 0;
- dcn10_pplib_apply_display_requirements(dc, context);
+ dc->res_pool->clk_mgr->funcs->update_clocks(
+ dc->res_pool->clk_mgr,
+ context,
+ true);
}
hubbub1_program_watermarks(dc->res_pool->hubbub,
@@ -2423,6 +2430,9 @@ static void dcn10_set_bandwidth(
dc->res_pool->ref_clock_inKhz / 1000,
true);
+ if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
+ dcn_bw_notify_pplib_of_wm_ranges(dc);
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
}
@@ -2694,7 +2704,6 @@ static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
- .program_csc_matrix = program_csc_matrix,
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
@@ -2721,7 +2730,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_plane = dcn10_disable_plane,
.blank_pixel_data = dcn10_blank_pixel_data,
.pipe_control_lock = dcn10_pipe_control_lock,
- .set_bandwidth = dcn10_set_bandwidth,
+ .prepare_bandwidth = dcn10_prepare_bandwidth,
+ .optimize_bandwidth = dcn10_optimize_bandwidth,
.reset_hw_ctx_wrap = reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
@@ -2731,11 +2741,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_avmute = dce110_set_avmute,
.log_hw_state = dcn10_log_hw_state,
.get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
- .ready_shared_resources = ready_shared_resources,
- .optimize_shared_resources = optimize_shared_resources,
- .pplib_apply_display_requirements =
- dcn10_pplib_apply_display_requirements,
.edp_backlight_control = hwss_edp_backlight_control,
.edp_power_control = hwss_edp_power_control,
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 84d461e0ed3e..f8eea10e4c64 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -51,4 +51,34 @@ void dcn10_get_hw_state(
char *pBuf, unsigned int bufSize,
unsigned int mask);
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask);
+
+bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx);
+
+void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp);
+
+void set_hdr_multiplier(struct pipe_ctx *pipe_ctx);
+
+void dcn10_get_surface_visual_confirm_color(
+ const struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
+void dcn10_get_hdr_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color);
+
+void update_dchubp_dpp(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
+
+struct pipe_ctx *find_top_pipe_for_stream(
+ struct dc *dc,
+ struct dc_state *context,
+ const struct dc_stream_state *stream);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 64158900730f..cd469014baa3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -44,6 +44,7 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
+#include "dcn10_clk_mgr.h"
static unsigned int snprintf_count(char *pBuf, unsigned int bufSize, char *fmt, ...)
{
@@ -454,12 +455,6 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
remaining_buffer -= chars_printed;
pBuf += chars_printed;
-
- // Clear underflow for debug purposes
- // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
- // This function is called only from Windows or Diags test environment, hence it's safe to clear
- // it from here without affecting the original intent.
- tg->funcs->clear_optc_underflow(tg);
}
}
@@ -469,19 +464,75 @@ static unsigned int dcn10_get_otg_states(struct dc *dc, char *pBuf, unsigned int
static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned int bufSize)
{
unsigned int chars_printed = 0;
+ unsigned int remaining_buffer = bufSize;
- chars_printed = snprintf_count(pBuf, bufSize, "dcfclk_khz,dcfclk_deep_sleep_khz,dispclk_khz,"
- "dppclk_khz,max_supported_dppclk_khz,fclk_khz,socclk_khz\n"
- "%d,%d,%d,%d,%d,%d,%d\n",
+ chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
+ "dppclk,fclk,socclk\n"
+ "%d,%d,%d,%d,%d,%d\n",
dc->current_state->bw.dcn.clk.dcfclk_khz,
dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.clk.dispclk_khz,
dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw.dcn.clk.fclk_khz,
dc->current_state->bw.dcn.clk.socclk_khz);
- return chars_printed;
+ remaining_buffer -= chars_printed;
+ pBuf += chars_printed;
+
+ return bufSize - remaining_buffer;
+}
+
+static void dcn10_clear_otpc_underflow(struct dc *dc)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->timing_generator_count; i++) {
+ struct timing_generator *tg = pool->timing_generators[i];
+ struct dcn_otg_state s = {0};
+
+ optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
+
+ if (s.otg_enabled & 1)
+ tg->funcs->clear_optc_underflow(tg);
+ }
+}
+
+static void dcn10_clear_hubp_underflow(struct dc *dc)
+{
+ struct resource_pool *pool = dc->res_pool;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ struct hubp *hubp = pool->hubps[i];
+ struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
+
+ hubp->funcs->hubp_read_state(hubp);
+
+ if (!s->blank_en)
+ hubp->funcs->hubp_clear_underflow(hubp);
+ }
+}
+
+void dcn10_clear_status_bits(struct dc *dc, unsigned int mask)
+{
+ /*
+ * Mask Format
+ * Bit 0 - 31: Status bit to clear
+ *
+ * Mask = 0x0 means clear all status bits
+ */
+ const unsigned int DC_HW_STATE_MASK_HUBP_UNDERFLOW = 0x1;
+ const unsigned int DC_HW_STATE_MASK_OTPC_UNDERFLOW = 0x2;
+
+ if (mask == 0x0)
+ mask = 0xFFFFFFFF;
+
+ if (mask & DC_HW_STATE_MASK_HUBP_UNDERFLOW)
+ dcn10_clear_hubp_underflow(dc);
+
+ if (mask & DC_HW_STATE_MASK_OTPC_UNDERFLOW)
+ dcn10_clear_otpc_underflow(dc);
}
void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask)
@@ -491,16 +542,16 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
* Bit 0 - 15: Hardware block mask
* Bit 15: 1 = Invariant Only, 0 = All
*/
- const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
- const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
- const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
- const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
- const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
- const unsigned int DC_HW_STATE_MASK_CM = 0x20;
- const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
- const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
- const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
- const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
+ const unsigned int DC_HW_STATE_MASK_HUBBUB = 0x1;
+ const unsigned int DC_HW_STATE_MASK_HUBP = 0x2;
+ const unsigned int DC_HW_STATE_MASK_RQ = 0x4;
+ const unsigned int DC_HW_STATE_MASK_DLG = 0x8;
+ const unsigned int DC_HW_STATE_MASK_TTU = 0x10;
+ const unsigned int DC_HW_STATE_MASK_CM = 0x20;
+ const unsigned int DC_HW_STATE_MASK_MPCC = 0x40;
+ const unsigned int DC_HW_STATE_MASK_OTG = 0x80;
+ const unsigned int DC_HW_STATE_MASK_CLOCKS = 0x100;
+ const unsigned int DC_HW_STATE_INVAR_ONLY = 0x8000;
unsigned int chars_printed = 0;
unsigned int remaining_buf_size = bufSize;
@@ -556,6 +607,9 @@ void dcn10_get_hw_state(struct dc *dc, char *pBuf, unsigned int bufSize, unsigne
remaining_buf_size -= chars_printed;
}
- if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0)
+ if ((mask & DC_HW_STATE_MASK_CLOCKS) && remaining_buf_size > 0) {
chars_printed = dcn10_get_clock_states(dc, pBuf, remaining_buf_size);
+ pBuf += chars_printed;
+ remaining_buf_size -= chars_printed;
+ }
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index ba6a8686062f..477ab9222216 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -589,7 +589,7 @@ static bool dcn10_link_encoder_validate_hdmi_output(
return false;
/* DCE11 HW does not support 420 */
- if (!enc10->base.features.ycbcr420_supported &&
+ if (!enc10->base.features.hdmi_ycbcr420_supported &&
crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
return false;
@@ -606,8 +606,10 @@ bool dcn10_link_encoder_validate_dp_output(
const struct dcn10_link_encoder *enc10,
const struct dc_crtc_timing *crtc_timing)
{
- if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- return false;
+ if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (!enc10->base.features.dp_ycbcr420_supported)
+ return false;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 54626682bab2..7c138615f17d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -87,9 +87,8 @@ static void optc1_disable_stereo(struct timing_generator *optc)
REG_SET(OTG_STEREO_CONTROL, 0,
OTG_STEREO_EN, 0);
- REG_SET_3(OTG_3D_STRUCTURE_CONTROL, 0,
+ REG_SET_2(OTG_3D_STRUCTURE_CONTROL, 0,
OTG_3D_STRUCTURE_EN, 0,
- OTG_3D_STRUCTURE_V_UPDATE_MODE, 0,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0);
}
@@ -274,10 +273,12 @@ void optc1_program_timing(
* program the reg for interrupt postition.
*/
vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1;
- if (vertical_line_start < 0) {
- ASSERT(0);
+ v_fp2 = 0;
+ if (vertical_line_start < 0)
+ v_fp2 = -vertical_line_start;
+ if (vertical_line_start < 0)
vertical_line_start = 0;
- }
+
REG_SET(OTG_VERTICAL_INTERRUPT2_POSITION, 0,
OTG_VERTICAL_INTERRUPT2_LINE_START, vertical_line_start);
@@ -296,9 +297,6 @@ void optc1_program_timing(
if (patched_crtc_timing.flags.INTERLACE == 1)
field_num = 1;
}
- v_fp2 = 0;
- if (optc->dlg_otg_param.vstartup_start > asic_blank_end)
- v_fp2 = optc->dlg_otg_param.vstartup_start > asic_blank_end;
/* Interlace */
if (patched_crtc_timing.flags.INTERLACE == 1) {
@@ -337,9 +335,8 @@ void optc1_program_timing(
/* Enable stereo - only when we need to pack 3D frame. Other types
* of stereo handled in explicit call
*/
- h_div_2 = (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) ?
- 1 : 0;
+ h_div_2 = optc1_is_two_pixels_per_containter(&patched_crtc_timing);
REG_UPDATE(OTG_H_TIMING_CNTL,
OTG_H_TIMING_DIV_BY2, h_div_2);
@@ -362,20 +359,19 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
static void optc1_unblank_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t vertical_interrupt_enable = 0;
-
- REG_GET(OTG_VERTICAL_INTERRUPT2_CONTROL,
- OTG_VERTICAL_INTERRUPT2_INT_ENABLE, &vertical_interrupt_enable);
-
- /* temporary work around for vertical interrupt, once vertical interrupt enabled,
- * this check will be removed.
- */
- if (vertical_interrupt_enable)
- optc1_set_blank_data_double_buffer(optc, true);
REG_UPDATE_2(OTG_BLANK_CONTROL,
OTG_BLANK_DATA_EN, 0,
OTG_BLANK_DE_MODE, 0);
+
+ /* W/A for automated testing
+ * Automated testing will fail underflow test as there
+ * sporadic underflows which occur during the optc blank
+ * sequence. As a w/a, clear underflow on unblank.
+ * This prevents the failure, but will not mask actual
+ * underflow that affect real use cases.
+ */
+ optc1_clear_optc_underflow(optc);
}
/**
@@ -1155,9 +1151,8 @@ static void optc1_enable_stereo(struct timing_generator *optc,
OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP, 1);
if (flags->PROGRAM_STEREO)
- REG_UPDATE_3(OTG_3D_STRUCTURE_CONTROL,
+ REG_UPDATE_2(OTG_3D_STRUCTURE_CONTROL,
OTG_3D_STRUCTURE_EN, flags->FRAME_PACKED,
- OTG_3D_STRUCTURE_V_UPDATE_MODE, flags->FRAME_PACKED,
OTG_3D_STRUCTURE_STEREO_SEL_OVR, flags->FRAME_PACKED);
}
@@ -1425,3 +1420,9 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_h_sync_width = 8;
optc1->min_v_sync_width = 1;
}
+
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return timing->pixel_encoding == PIXEL_ENCODING_YCBCR420;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index c1b114209fe8..8bacf0b6e27e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -565,4 +565,6 @@ bool optc1_configure_crc(struct timing_generator *optc,
bool optc1_get_crc(struct timing_generator *optc,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index a71453a15ae3..5d4772dec0ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -28,23 +28,23 @@
#include "resource.h"
#include "include/irq_service_interface.h"
-#include "dcn10/dcn10_resource.h"
+#include "dcn10_resource.h"
-#include "dcn10/dcn10_ipp.h"
-#include "dcn10/dcn10_mpc.h"
+#include "dcn10_ipp.h"
+#include "dcn10_mpc.h"
#include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10/dcn10_dpp.h"
+#include "dcn10_dpp.h"
#include "dcn10_optc.h"
-#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn10_hw_sequencer.h"
#include "dce110/dce110_hw_sequencer.h"
-#include "dcn10/dcn10_opp.h"
-#include "dcn10/dcn10_link_encoder.h"
-#include "dcn10/dcn10_stream_encoder.h"
-#include "dce/dce_clocks.h"
+#include "dcn10_opp.h"
+#include "dcn10_link_encoder.h"
+#include "dcn10_stream_encoder.h"
+#include "dcn10_clk_mgr.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
-#include "../virtual/virtual_stream_encoder.h"
+#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
@@ -202,7 +202,6 @@ enum dcn10_clk_src_array_id {
#define MMHUB_SR(reg_name)\
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
mm ## reg_name
-
/* macros to expend register list macro defined in HW object header file
* end *********************/
@@ -436,8 +435,8 @@ static const struct dcn_optc_mask tg_mask = {
TG_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_0),
NBIO_SR(BIOS_SCRATCH_3),
NBIO_SR(BIOS_SCRATCH_6)
};
@@ -496,7 +495,6 @@ static const struct dce110_clk_src_mask cs_mask = {
CS_COMMON_MASK_SH_LIST_DCN1_0(_MASK)
};
-
static const struct resource_caps res_cap = {
.num_timing_generator = 4,
.num_opp = 4,
@@ -719,7 +717,8 @@ static struct timing_generator *dcn10_timing_generator_create(
static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_deep_color = COLOR_DEPTH_121212,
.max_hdmi_pixel_clock = 600000,
- .ycbcr420_supported = true,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = false,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -949,8 +948,8 @@ static void destruct(struct dcn10_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
- if (pool->base.dccg != NULL)
- dce_dccg_destroy(&pool->base.dccg);
+ if (pool->base.clk_mgr != NULL)
+ dce_clk_mgr_destroy(&pool->base.clk_mgr);
kfree(pool->base.pp_smu);
}
@@ -1275,9 +1274,8 @@ static bool construct(
goto fail;
}
}
-
- pool->base.dccg = dcn1_dccg_create(ctx);
- if (pool->base.dccg == NULL) {
+ pool->base.clk_mgr = dcn1_clk_mgr_create(ctx);
+ if (pool->base.clk_mgr == NULL) {
dm_error("DC: failed to create display clock!\n");
BREAK_TO_DEBUGGER();
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 6f9078f3c4d3..b8b5525a389a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -766,7 +766,6 @@ void enc1_stream_encoder_dp_blank(
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t retries = 0;
uint32_t reg1 = 0;
uint32_t max_retries = DP_BLANK_MAX_RETRY * 10;
@@ -803,8 +802,6 @@ void enc1_stream_encoder_dp_blank(
0,
10, max_retries);
- ASSERT(retries <= max_retries);
-
/* Tell the DP encoder to ignore timing from CRTC, must be done after
* the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is
* complete, stream status will be stuck in video stream enabled state,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_event_log.h b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
index 34a701ca879e..65663f4d93e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_event_log.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_event_log.h
@@ -33,6 +33,7 @@
#define EVENT_LOG_AUX_REQ(ddc, type, action, address, len, data)
#define EVENT_LOG_AUX_REP(ddc, type, replyStatus, len, data)
+#define EVENT_LOG_CUST_MSG(tag, a, ...)
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index f2ea8452d48f..0029a39efb1c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -55,10 +55,10 @@ struct pp_smu {
struct pp_smu_wm_set_range {
unsigned int wm_inst;
- uint32_t min_fill_clk_khz;
- uint32_t max_fill_clk_khz;
- uint32_t min_drain_clk_khz;
- uint32_t max_drain_clk_khz;
+ uint32_t min_fill_clk_mhz;
+ uint32_t max_fill_clk_mhz;
+ uint32_t min_drain_clk_mhz;
+ uint32_t max_drain_clk_mhz;
};
#define MAX_WATERMARK_SETS 4
@@ -77,15 +77,15 @@ struct pp_smu_display_requirement_rv {
*/
unsigned int display_count;
- /* PPSMC_MSG_SetHardMinFclkByFreq: khz
+ /* PPSMC_MSG_SetHardMinFclkByFreq: mhz
* FCLK will vary with DPM, but never below requested hard min
*/
- unsigned int hard_min_fclk_khz;
+ unsigned int hard_min_fclk_mhz;
- /* PPSMC_MSG_SetHardMinDcefclkByFreq: khz
+ /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
* fixed clock at requested freq, either from FCH bypass or DFS
*/
- unsigned int hard_min_dcefclk_khz;
+ unsigned int hard_min_dcefclk_mhz;
/* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
* when DF is in cstate, dcf clock is further divided down
@@ -102,14 +102,20 @@ struct pp_smu_funcs_rv {
*/
void (*set_display_count)(struct pp_smu *pp, int count);
- /* which SMU message? are reader and writer WM separate SMU msg? */
+ /* reader and writer WM's are sent together as part of one table*/
+ /*
+ * PPSMC_MSG_SetDriverDramAddrHigh
+ * PPSMC_MSG_SetDriverDramAddrLow
+ * PPSMC_MSG_TransferTableDram2Smu
+ *
+ * */
void (*set_wm_ranges)(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges);
/* PPSMC_MSG_SetHardMinDcfclkByFreq
* fixed clock at requested freq, either from FCH bypass or DFS
*/
- void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int khz);
+ void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz);
/* PPSMC_MSG_SetMinDeepSleepDcfclk
* when DF is in cstate, dcf clock is further divided down
@@ -120,12 +126,12 @@ struct pp_smu_funcs_rv {
/* PPSMC_MSG_SetHardMinFclkByFreq
* FCLK will vary with DPM, but never below requested hard min
*/
- void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int khz);
+ void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz);
/* PPSMC_MSG_SetHardMinSocclkByFreq
* Needed for DWB support
*/
- void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int khz);
+ void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz);
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 28128c02de00..1961cc6d9143 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -31,6 +31,8 @@
#define __DM_SERVICES_H__
+#include "amdgpu_dm_trace.h"
+
/* TODO: remove when DC is complete. */
#include "dm_services_types.h"
#include "logger_interface.h"
@@ -70,6 +72,7 @@ static inline uint32_t dm_read_reg_func(
}
#endif
value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
return value;
}
@@ -90,6 +93,7 @@ static inline void dm_write_reg_func(
}
#endif
cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
}
static inline uint32_t dm_read_index_reg(
@@ -351,8 +355,12 @@ unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx,
/*
* performance tracing
*/
-void dm_perf_trace_timestamp(const char *func_name, unsigned int line);
-#define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__)
+#define PERF_TRACE() trace_amdgpu_dc_performance(CTX->perf_trace->read_count,\
+ CTX->perf_trace->write_count, &CTX->perf_trace->last_entry_read,\
+ &CTX->perf_trace->last_entry_write, __func__, __LINE__)
+#define PERF_TRACE_CTX(__CTX) trace_amdgpu_dc_performance(__CTX->perf_trace->read_count,\
+ __CTX->perf_trace->write_count, &__CTX->perf_trace->last_entry_read,\
+ &__CTX->perf_trace->last_entry_write, __func__, __LINE__)
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 2b83f922ac02..1af8c777b3ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -208,22 +208,20 @@ struct dm_bl_data_point {
/* Brightness level as effective value in range 0-255,
* corresponding to above percentage
*/
- uint8_t signalLevel;
+ uint8_t signal_level;
};
/* Total size of the structure should not exceed 256 bytes */
struct dm_acpi_atif_backlight_caps {
-
-
uint16_t size; /* Bytes 0-1 (2 bytes) */
uint16_t flags; /* Byted 2-3 (2 bytes) */
- uint8_t errorCode; /* Byte 4 */
- uint8_t acLevelPercentage; /* Byte 5 */
- uint8_t dcLevelPercentage; /* Byte 6 */
- uint8_t minInputSignal; /* Byte 7 */
- uint8_t maxInputSignal; /* Byte 8 */
- uint8_t numOfDataPoints; /* Byte 9 */
- struct dm_bl_data_point dataPoints[99]; /* Bytes 10-207 (198 bytes)*/
+ uint8_t error_code; /* Byte 4 */
+ uint8_t ac_level_percentage; /* Byte 5 */
+ uint8_t dc_level_percentage; /* Byte 6 */
+ uint8_t min_input_signal; /* Byte 7 */
+ uint8_t max_input_signal; /* Byte 8 */
+ uint8_t num_data_points; /* Byte 9 */
+ struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/
};
enum dm_acpi_display_type {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index cbafce649e33..5dd04520ceca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -113,7 +113,8 @@ struct _vcs_dpi_soc_bounding_box_st {
int use_urgent_burst_bw;
double max_hscl_ratio;
double max_vscl_ratio;
- struct _vcs_dpi_voltage_scaling_st clock_limits[7];
+ unsigned int num_states;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[8];
};
struct _vcs_dpi_ip_params_st {
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
index f20161c5706d..dada04296025 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
@@ -56,7 +56,6 @@ struct gpio_service *dal_gpio_service_create(
struct dc_context *ctx)
{
struct gpio_service *service;
-
uint32_t index_of_id;
service = kzalloc(sizeof(struct gpio_service), GFP_KERNEL);
@@ -78,44 +77,33 @@ struct gpio_service *dal_gpio_service_create(
goto failure_1;
}
- /* allocate and initialize business storage */
+ /* allocate and initialize busyness storage */
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
index_of_id = 0;
service->ctx = ctx;
do {
uint32_t number_of_bits =
service->factory.number_of_pins[index_of_id];
+ uint32_t i = 0;
- uint32_t number_of_uints =
- (number_of_bits + bits_per_uint - 1) /
- bits_per_uint;
-
- uint32_t *slot;
-
- if (number_of_bits) {
- uint32_t index_of_uint = 0;
+ if (number_of_bits) {
+ service->busyness[index_of_id] =
+ kcalloc(number_of_bits, sizeof(char),
+ GFP_KERNEL);
- slot = kcalloc(number_of_uints,
- sizeof(uint32_t),
- GFP_KERNEL);
-
- if (!slot) {
+ if (!service->busyness[index_of_id]) {
BREAK_TO_DEBUGGER();
goto failure_2;
}
do {
- slot[index_of_uint] = 0;
-
- ++index_of_uint;
- } while (index_of_uint < number_of_uints);
- } else
- slot = NULL;
-
- service->busyness[index_of_id] = slot;
+ service->busyness[index_of_id][i] = 0;
+ ++i;
+ } while (i < number_of_bits);
+ } else {
+ service->busyness[index_of_id] = NULL;
+ }
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -125,13 +113,8 @@ struct gpio_service *dal_gpio_service_create(
failure_2:
while (index_of_id) {
- uint32_t *slot;
-
--index_of_id;
-
- slot = service->busyness[index_of_id];
-
- kfree(slot);
+ kfree(service->busyness[index_of_id]);
}
failure_1:
@@ -169,9 +152,7 @@ void dal_gpio_service_destroy(
uint32_t index_of_id = 0;
do {
- uint32_t *slot = (*ptr)->busyness[index_of_id];
-
- kfree(slot);
+ kfree((*ptr)->busyness[index_of_id]);
++index_of_id;
} while (index_of_id < GPIO_ID_COUNT);
@@ -192,11 +173,7 @@ static bool is_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- const uint32_t *slot = service->busyness[id] + (en / bits_per_uint);
-
- return 0 != (*slot & (1 << (en % bits_per_uint)));
+ return service->busyness[id][en];
}
static void set_pin_busy(
@@ -204,10 +181,7 @@ static void set_pin_busy(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] |=
- (1 << (en % bits_per_uint));
+ service->busyness[id][en] = true;
}
static void set_pin_free(
@@ -215,10 +189,7 @@ static void set_pin_free(
enum gpio_id id,
uint32_t en)
{
- const uint32_t bits_per_uint = sizeof(uint32_t) << 3;
-
- service->busyness[id][en / bits_per_uint] &=
- ~(1 << (en % bits_per_uint));
+ service->busyness[id][en] = false;
}
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
index c7f3081f59cc..1d501a43d13b 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h
@@ -36,10 +36,9 @@ struct gpio_service {
/*
* @brief
* Business storage.
- * For each member of 'enum gpio_id',
- * store array of bits (packed into uint32_t slots),
- * index individual bit by 'en' value */
- uint32_t *busyness[GPIO_ID_COUNT];
+ * one byte For each member of 'enum gpio_id'
+ */
+ char *busyness[GPIO_ID_COUNT];
};
enum gpio_result dal_gpio_service_open(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
index 39ee8eba3c31..d1656c9d50df 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h
@@ -126,7 +126,7 @@ static inline struct bw_fixed bw_div(const struct bw_fixed arg1, const struct bw
static inline struct bw_fixed bw_mod(const struct bw_fixed arg1, const struct bw_fixed arg2)
{
struct bw_fixed res;
- div64_u64_rem(arg1.value, arg2.value, &res.value);
+ div64_u64_rem(arg1.value, arg2.value, (uint64_t *)&res.value);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/compressor.h b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
index bcb18f5e1e60..7a147a9762a0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/compressor.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/compressor.h
@@ -77,6 +77,7 @@ struct compressor_funcs {
};
struct compressor {
struct dc_context *ctx;
+ /* CONTROLLER_ID_D0 + instance, CONTROLLER_ID_UNDEFINED = 0 */
uint32_t attached_inst;
bool is_enabled;
const struct compressor_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c1976c175b57..b168a5e9dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -82,7 +82,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option);
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
-#include "display_clock.h"
+#include "hw/clk_mgr.h"
#include "transform.h"
#include "dpp.h"
@@ -169,6 +169,7 @@ struct resource_pool {
unsigned int audio_count;
struct audio_support audio_support;
+ struct clk_mgr *clk_mgr;
struct dccg *dccg;
struct irq_service *irqs;
@@ -271,6 +272,17 @@ union bw_context {
struct dce_bw_output dce;
};
+/**
+ * struct dc_state - The full description of a state requested by a user
+ *
+ * @streams: Stream properties
+ * @stream_status: The planes on a given stream
+ * @res_ctx: Persistent state of resources
+ * @bw: The output from bandwidth and watermark calculations
+ * @pp_display_cfg: PowerPlay clocks and settings
+ * @dcn_bw_vars: non-stack memory to support bandwidth calculations
+ *
+ */
struct dc_state {
struct dc_stream_state *streams[MAX_PIPES];
struct dc_stream_status stream_status[MAX_PIPES];
@@ -278,7 +290,6 @@ struct dc_state {
struct resource_context res_ctx;
- /* The output from BW and WM calculations. */
union bw_context bw;
/* Note: these are big structures, do *not* put on stack! */
@@ -287,7 +298,7 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct dccg *dis_clk;
+ struct clk_mgr *dccg;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index e688eb9b975c..ece954a40a8e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -31,8 +31,8 @@
#define __DCN_CALCS_H__
#include "bw_fixed.h"
-#include "display_clock.h"
#include "../dml/display_mode_lib.h"
+#include "hw/clk_mgr.h"
struct dc;
struct dc_state;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index a83a48494613..abc961c0906e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -47,12 +47,18 @@ struct abm_funcs {
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm);
bool (*init_backlight)(struct abm *abm);
- bool (*set_backlight_level)(struct abm *abm,
- unsigned int backlight_level,
+
+ /* backlight_pwm_u16_16 is unsigned 32 bit,
+ * 16 bit integer + 16 fractional, where 1.0 is max backlight value.
+ */
+ bool (*set_backlight_level_pwm)(struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
bool use_smooth_brightness);
- unsigned int (*get_current_backlight_8_bit)(struct abm *abm);
+
+ unsigned int (*get_current_backlight)(struct abm *abm);
+ unsigned int (*get_target_backlight)(struct abm *abm);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 689faa16c0ae..23a4b18e5fee 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/display_clock.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -23,41 +23,25 @@
*
*/
-#ifndef __DISPLAY_CLOCK_H__
-#define __DISPLAY_CLOCK_H__
+#ifndef __DAL_CLK_MGR_H__
+#define __DAL_CLK_MGR_H__
#include "dm_services_types.h"
#include "dc.h"
-/* Structure containing all state-dependent clocks
- * (dependent on "enum clocks_state") */
-struct state_dependent_clocks {
- int display_clk_khz;
- int pixel_clk_khz;
-};
-
-struct dccg {
+struct clk_mgr {
struct dc_context *ctx;
- const struct display_clock_funcs *funcs;
+ const struct clk_mgr_funcs *funcs;
- enum dm_pp_clocks_state max_clks_state;
- enum dm_pp_clocks_state cur_min_clks_state;
struct dc_clocks clks;
};
-struct display_clock_funcs {
- void (*update_clocks)(struct dccg *dccg,
- struct dc_clocks *new_clocks,
+struct clk_mgr_funcs {
+ void (*update_clocks)(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
bool safe_to_lower);
- int (*set_dispclk)(struct dccg *dccg,
- int requested_clock_khz);
-
- int (*get_dp_ref_clk_frequency)(struct dccg *dccg);
- bool (*update_dfs_bypass)(struct dccg *dccg,
- struct dc *dc,
- struct dc_state *context,
- int requested_clock_khz);
+ int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
};
-#endif /* __DISPLAY_CLOCK_H__ */
+#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
new file mode 100644
index 000000000000..95a56d012626
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCCG_H__
+#define __DAL_DCCG_H__
+
+#include "dc_types.h"
+
+struct dccg {
+ struct dc_context *ctx;
+ const struct dccg_funcs *funcs;
+
+ int ref_dppclk;
+};
+
+struct dccg_funcs {
+ void (*update_dpp_dto)(struct dccg *dccg,
+ int dpp_inst,
+ int req_dppclk);
+};
+
+#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index 4550747fb61c..cb85eaa9857f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -32,6 +32,13 @@ enum dmcu_state {
DMCU_RUNNING = 1
};
+struct dmcu_version {
+ unsigned int date;
+ unsigned int month;
+ unsigned int year;
+ unsigned int interface_version;
+};
+
struct dmcu {
struct dc_context *ctx;
const struct dmcu_funcs *funcs;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 334c48cdafdc..04c6989aac58 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -63,6 +63,11 @@ struct hubp_funcs {
struct _vcs_dpi_display_rq_regs_st *rq_regs,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+ void (*hubp_setup_interdependent)(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+
void (*dcc_control)(struct hubp *hubp, bool enable,
bool independent_64b_blks);
void (*mem_program_viewport)(
@@ -121,6 +126,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433ebf91a..da85537a4488 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -53,6 +53,12 @@ struct curve_points {
uint32_t custom_float_slope;
};
+struct curve_points3 {
+ struct curve_points red;
+ struct curve_points green;
+ struct curve_points blue;
+};
+
struct pwl_result_data {
struct fixed31_32 red;
struct fixed31_32 green;
@@ -71,9 +77,17 @@ struct pwl_result_data {
uint32_t delta_blue_reg;
};
+/* arr_curve_points - regamma regions/segments specification
+ * arr_points - beginning and end point specified separately (only one on DCE)
+ * corner_points - beginning and end point for all 3 colors (DCN)
+ * rgb_resulted - final curve
+ */
struct pwl_params {
struct gamma_curve arr_curve_points[34];
- struct curve_points arr_points[2];
+ union {
+ struct curve_points arr_points[2];
+ struct curve_points3 corner_points[2];
+ };
struct pwl_result_data rgb_resulted[256 + 3];
uint32_t hw_points_num;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index e28e9770e0a3..c20fdcaac53b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -65,7 +65,8 @@ struct encoder_feature_support {
enum dc_color_depth max_hdmi_deep_color;
unsigned int max_hdmi_pixel_clock;
- bool ycbcr420_supported;
+ bool hdmi_ycbcr420_supported;
+ bool dp_ycbcr420_supported;
};
union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index da89c2edb07c..06df02ddff6a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -31,7 +31,7 @@
#include "dml/display_mode_structs.h"
struct dchub_init_data;
-struct cstate_pstate_watermarks_st {
+struct cstate_pstate_watermarks_st1 {
uint32_t cstate_exit_ns;
uint32_t cstate_enter_plus_exit_ns;
uint32_t pstate_change_ns;
@@ -40,7 +40,7 @@ struct cstate_pstate_watermarks_st {
struct dcn_watermarks {
uint32_t pte_meta_urgent_ns;
uint32_t urgent_ns;
- struct cstate_pstate_watermarks_st cstate_pstate;
+ struct cstate_pstate_watermarks_st1 cstate_pstate;
};
struct dcn_watermark_set {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 26f29d5da3d8..d6a85f48b6d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -32,8 +32,6 @@
#include "inc/hw/link_encoder.h"
#include "core_status.h"
-#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF
-
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
PIPE_GATING_CONTROL_ENABLE,
@@ -87,11 +85,6 @@ struct hw_sequencer_funcs {
void (*program_gamut_remap)(
struct pipe_ctx *pipe_ctx);
- void (*program_csc_matrix)(
- struct pipe_ctx *pipe_ctx,
- enum dc_color_space colorspace,
- uint16_t *matrix);
-
void (*program_output_csc)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
enum dc_color_space colorspace,
@@ -177,10 +170,12 @@ struct hw_sequencer_funcs {
struct pipe_ctx *pipe_ctx,
bool blank);
- void (*set_bandwidth)(
+ void (*prepare_bandwidth)(
struct dc *dc,
- struct dc_state *context,
- bool safe_to_lower);
+ struct dc_state *context);
+ void (*optimize_bandwidth)(
+ struct dc *dc,
+ struct dc_state *context);
void (*set_drr)(struct pipe_ctx **pipe_ctx, int num_pipes,
int vmin, int vmax);
@@ -205,16 +200,12 @@ struct hw_sequencer_funcs {
void (*log_hw_state)(struct dc *dc,
struct dc_log_buffer_ctx *log_ctx);
void (*get_hw_state)(struct dc *dc, char *pBuf, unsigned int bufSize, unsigned int mask);
+ void (*clear_status_bits)(struct dc *dc, unsigned int mask);
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
- void (*ready_shared_resources)(struct dc *dc, struct dc_state *context);
- void (*optimize_shared_resources)(struct dc *dc);
- void (*pplib_apply_display_requirements)(
- struct dc *dc,
- struct dc_state *context);
void (*edp_power_control)(
struct dc_link *link,
bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 33b99e3ab10d..0086a2f1d21a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -30,9 +30,6 @@
#include "dal_asic_id.h"
#include "dm_pp_smu.h"
-/* TODO unhardcode, 4 for CZ*/
-#define MEMORY_TYPE_MULTIPLIER 4
-
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cdcefd087487..479b77c2e89e 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -306,6 +306,18 @@ static struct fixed31_32 translate_from_linear_space(
a1);
}
+static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg)
+{
+ struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10);
+
+ return translate_from_linear_space(arg,
+ dc_fixpt_zero,
+ dc_fixpt_zero,
+ dc_fixpt_zero,
+ dc_fixpt_zero,
+ gamma);
+}
+
static struct fixed31_32 translate_to_linear_space(
struct fixed31_32 arg,
struct fixed31_32 a0,
@@ -709,6 +721,175 @@ static void build_regamma(struct pwl_float_data_ex *rgb_regamma,
}
}
+static void hermite_spline_eetf(struct fixed31_32 input_x,
+ struct fixed31_32 max_display,
+ struct fixed31_32 min_display,
+ struct fixed31_32 max_content,
+ struct fixed31_32 *out_x)
+{
+ struct fixed31_32 min_lum_pq;
+ struct fixed31_32 max_lum_pq;
+ struct fixed31_32 max_content_pq;
+ struct fixed31_32 ks;
+ struct fixed31_32 E1;
+ struct fixed31_32 E2;
+ struct fixed31_32 E3;
+ struct fixed31_32 t;
+ struct fixed31_32 t2;
+ struct fixed31_32 t3;
+ struct fixed31_32 two;
+ struct fixed31_32 three;
+ struct fixed31_32 temp1;
+ struct fixed31_32 temp2;
+ struct fixed31_32 a = dc_fixpt_from_fraction(15, 10);
+ struct fixed31_32 b = dc_fixpt_from_fraction(5, 10);
+ struct fixed31_32 epsilon = dc_fixpt_from_fraction(1, 1000000); // dc_fixpt_epsilon is a bit too small
+
+ if (dc_fixpt_eq(max_content, dc_fixpt_zero)) {
+ *out_x = dc_fixpt_zero;
+ return;
+ }
+
+ compute_pq(input_x, &E1);
+ compute_pq(dc_fixpt_div(min_display, max_content), &min_lum_pq);
+ compute_pq(dc_fixpt_div(max_display, max_content), &max_lum_pq);
+ compute_pq(dc_fixpt_one, &max_content_pq); // always 1? DAL2 code is weird
+ a = dc_fixpt_div(dc_fixpt_add(dc_fixpt_one, b), max_content_pq); // (1+b)/maxContent
+ ks = dc_fixpt_sub(dc_fixpt_mul(a, max_lum_pq), b); // a * max_lum_pq - b
+
+ if (dc_fixpt_lt(E1, ks))
+ E2 = E1;
+ else if (dc_fixpt_le(ks, E1) && dc_fixpt_le(E1, dc_fixpt_one)) {
+ if (dc_fixpt_lt(epsilon, dc_fixpt_sub(dc_fixpt_one, ks)))
+ // t = (E1 - ks) / (1 - ks)
+ t = dc_fixpt_div(dc_fixpt_sub(E1, ks),
+ dc_fixpt_sub(dc_fixpt_one, ks));
+ else
+ t = dc_fixpt_zero;
+
+ two = dc_fixpt_from_int(2);
+ three = dc_fixpt_from_int(3);
+
+ t2 = dc_fixpt_mul(t, t);
+ t3 = dc_fixpt_mul(t2, t);
+ temp1 = dc_fixpt_mul(two, t3);
+ temp2 = dc_fixpt_mul(three, t2);
+
+ // (2t^3 - 3t^2 + 1) * ks
+ E2 = dc_fixpt_mul(ks, dc_fixpt_add(dc_fixpt_one,
+ dc_fixpt_sub(temp1, temp2)));
+
+ // (-2t^3 + 3t^2) * max_lum_pq
+ E2 = dc_fixpt_add(E2, dc_fixpt_mul(max_lum_pq,
+ dc_fixpt_sub(temp2, temp1)));
+
+ temp1 = dc_fixpt_mul(two, t2);
+ temp2 = dc_fixpt_sub(dc_fixpt_one, ks);
+
+ // (t^3 - 2t^2 + t) * (1-ks)
+ E2 = dc_fixpt_add(E2, dc_fixpt_mul(temp2,
+ dc_fixpt_add(t, dc_fixpt_sub(t3, temp1))));
+ } else
+ E2 = dc_fixpt_one;
+
+ temp1 = dc_fixpt_sub(dc_fixpt_one, E2);
+ temp2 = dc_fixpt_mul(temp1, temp1);
+ temp2 = dc_fixpt_mul(temp2, temp2);
+ // temp2 = (1-E2)^4
+
+ E3 = dc_fixpt_add(E2, dc_fixpt_mul(min_lum_pq, temp2));
+ compute_de_pq(E3, out_x);
+
+ *out_x = dc_fixpt_div(*out_x, dc_fixpt_div(max_display, max_content));
+}
+
+static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
+ uint32_t hw_points_num,
+ const struct hw_x_point *coordinate_x,
+ const struct freesync_hdr_tf_params *fs_params)
+{
+ uint32_t i;
+ struct pwl_float_data_ex *rgb = rgb_regamma;
+ const struct hw_x_point *coord_x = coordinate_x;
+ struct fixed31_32 scaledX = dc_fixpt_zero;
+ struct fixed31_32 scaledX1 = dc_fixpt_zero;
+ struct fixed31_32 max_display;
+ struct fixed31_32 min_display;
+ struct fixed31_32 max_content;
+ struct fixed31_32 min_content;
+ struct fixed31_32 clip = dc_fixpt_one;
+ struct fixed31_32 output;
+ bool use_eetf = false;
+ bool is_clipped = false;
+ struct fixed31_32 sdr_white_level;
+
+ if (fs_params == NULL || fs_params->max_content == 0 ||
+ fs_params->max_display == 0)
+ return false;
+
+ max_display = dc_fixpt_from_int(fs_params->max_display);
+ min_display = dc_fixpt_from_fraction(fs_params->min_display, 10000);
+ max_content = dc_fixpt_from_int(fs_params->max_content);
+ min_content = dc_fixpt_from_fraction(fs_params->min_content, 10000);
+ sdr_white_level = dc_fixpt_from_int(fs_params->sdr_white_level);
+
+ if (fs_params->min_display > 1000) // cap at 0.1 at the bottom
+ min_display = dc_fixpt_from_fraction(1, 10);
+ if (fs_params->max_display < 100) // cap at 100 at the top
+ max_display = dc_fixpt_from_int(100);
+
+ if (fs_params->min_content < fs_params->min_display)
+ use_eetf = true;
+ else
+ min_content = min_display;
+
+ if (fs_params->max_content > fs_params->max_display)
+ use_eetf = true;
+ else
+ max_content = max_display;
+
+ rgb += 32; // first 32 points have problems with fixed point, too small
+ coord_x += 32;
+ for (i = 32; i <= hw_points_num; i++) {
+ if (!is_clipped) {
+ if (use_eetf) {
+ /*max content is equal 1 */
+ scaledX1 = dc_fixpt_div(coord_x->x,
+ dc_fixpt_div(max_content, sdr_white_level));
+ hermite_spline_eetf(scaledX1, max_display, min_display,
+ max_content, &scaledX);
+ } else
+ scaledX = dc_fixpt_div(coord_x->x,
+ dc_fixpt_div(max_display, sdr_white_level));
+
+ if (dc_fixpt_lt(scaledX, clip)) {
+ if (dc_fixpt_lt(scaledX, dc_fixpt_zero))
+ output = dc_fixpt_zero;
+ else
+ output = calculate_gamma22(scaledX);
+
+ rgb->r = output;
+ rgb->g = output;
+ rgb->b = output;
+ } else {
+ is_clipped = true;
+ rgb->r = clip;
+ rgb->g = clip;
+ rgb->b = clip;
+ }
+ } else {
+ rgb->r = clip;
+ rgb->g = clip;
+ rgb->b = clip;
+ }
+
+ ++coord_x;
+ ++rgb;
+ }
+
+ return true;
+}
+
static void build_degamma(struct pwl_float_data_ex *curve,
uint32_t hw_points_num,
const struct hw_x_point *coordinate_x, bool is_2_4)
@@ -1356,7 +1537,8 @@ static bool map_regamma_hw_to_x_user(
#define _EXTRA_POINTS 3
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed)
+ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+ const struct freesync_hdr_tf_params *fs_params)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
@@ -1374,7 +1556,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
/* we can use hardcoded curve for plain SRGB TF */
if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
output_tf->tf == TRANSFER_FUNCTION_SRGB &&
- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ (ramp->is_identity || (!mapUserRamp && ramp->type == GAMMA_RGB_256)))
return true;
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1424,6 +1606,12 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
MAX_HW_POINTS,
coordinates_x,
output_tf->sdr_ref_white_level);
+ } else if (tf == TRANSFER_FUNCTION_GAMMA22 &&
+ fs_params != NULL) {
+ build_freesync_hdr(rgb_regamma,
+ MAX_HW_POINTS,
+ coordinates_x,
+ fs_params);
} else {
tf_pts->end_exponent = 0;
tf_pts->x_point_at_y1_red = 1;
@@ -1573,7 +1761,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
struct pwl_float_data *rgb_user = NULL;
struct pwl_float_data_ex *curve = NULL;
- struct gamma_pixel *axix_x = NULL;
+ struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
bool ret = false;
@@ -1599,10 +1787,10 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
GFP_KERNEL);
if (!curve)
goto curve_alloc_fail;
- axix_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axix_x),
+ axis_x = kvcalloc(ramp->num_entries + _EXTRA_POINTS, sizeof(*axis_x),
GFP_KERNEL);
- if (!axix_x)
- goto axix_x_alloc_fail;
+ if (!axis_x)
+ goto axis_x_alloc_fail;
coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff),
GFP_KERNEL);
if (!coeff)
@@ -1615,7 +1803,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf = input_tf->tf;
build_evenly_distributed_points(
- axix_x,
+ axis_x,
ramp->num_entries,
dividers);
@@ -1640,7 +1828,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
tf_pts->x_point_at_y1_blue = 1;
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axix_x, curve,
+ coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp->type != GAMMA_CUSTOM);
if (ramp->type == GAMMA_CUSTOM)
@@ -1650,8 +1838,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
kvfree(coeff);
coeff_alloc_fail:
- kvfree(axix_x);
-axix_x_alloc_fail:
+ kvfree(axis_x);
+axis_x_alloc_fail:
kvfree(curve);
curve_alloc_fail:
kvfree(rgb_user);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 63ccb9c91224..a6e164df090a 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -73,12 +73,21 @@ struct regamma_lut {
};
};
+struct freesync_hdr_tf_params {
+ unsigned int sdr_white_level;
+ unsigned int min_content; // luminance in 1/10000 nits
+ unsigned int max_content; // luminance in nits
+ unsigned int min_display; // luminance in 1/10000 nits
+ unsigned int max_display; // luminance in nits
+};
+
void setup_x_points_distribution(void);
void precompute_pq(void);
void precompute_de_pq(void);
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed);
+ const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
+ const struct freesync_hdr_tf_params *fs_params);
bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4018c7180d00..1544ed3f1747 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
+/*Threshold to exit fixed refresh rate*/
+#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
#define FIXED_REFRESH_ENTER_FRAME_COUNT 5
#define FIXED_REFRESH_EXIT_FRAME_COUNT 5
@@ -257,40 +259,14 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
if (in_out_vrr->btr.btr_active) {
in_out_vrr->btr.frame_counter = 0;
in_out_vrr->btr.btr_active = false;
-
- /* Exit Fixed Refresh mode */
- } else if (in_out_vrr->fixed.fixed_active) {
-
- in_out_vrr->fixed.frame_counter++;
-
- if (in_out_vrr->fixed.frame_counter >
- FIXED_REFRESH_EXIT_FRAME_COUNT) {
- in_out_vrr->fixed.frame_counter = 0;
- in_out_vrr->fixed.fixed_active = false;
- }
}
} else if (last_render_time_in_us > max_render_time_in_us) {
/* Enter Below the Range */
- if (!in_out_vrr->btr.btr_active &&
- in_out_vrr->btr.btr_enabled) {
- in_out_vrr->btr.btr_active = true;
-
- /* Enter Fixed Refresh mode */
- } else if (!in_out_vrr->fixed.fixed_active &&
- !in_out_vrr->btr.btr_enabled) {
- in_out_vrr->fixed.frame_counter++;
-
- if (in_out_vrr->fixed.frame_counter >
- FIXED_REFRESH_ENTER_FRAME_COUNT) {
- in_out_vrr->fixed.frame_counter = 0;
- in_out_vrr->fixed.fixed_active = true;
- }
- }
+ in_out_vrr->btr.btr_active = true;
}
/* BTR set to "not active" so disengage */
if (!in_out_vrr->btr.btr_active) {
- in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
in_out_vrr->btr.frames_to_insert = 0;
in_out_vrr->btr.frame_counter = 0;
@@ -375,7 +351,12 @@ static void apply_fixed_refresh(struct core_freesync *core_freesync,
bool update = false;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
- if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) {
+ //Compute the exit refresh rate and exit frame duration
+ unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us)
+ + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ));
+ unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
+
+ if (last_render_time_in_us < exit_frame_duration_in_us) {
/* Exit Fixed Refresh mode */
if (in_out_vrr->fixed.fixed_active) {
in_out_vrr->fixed.frame_counter++;
@@ -627,12 +608,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf,
struct dc_info_packet *infopacket)
{
- if (app_tf != transfer_func_unknown) {
+ if (app_tf != TRANSFER_FUNC_UNKNOWN) {
infopacket->valid = true;
infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active]
- if (app_tf == transfer_func_gamma_22) {
+ if (app_tf == TRANSFER_FUNC_GAMMA_22) {
infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active]
}
}
@@ -707,11 +688,11 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case packet_type_fs2:
+ case PACKET_TYPE_FS2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
- case packet_type_vrr:
- case packet_type_fs1:
+ case PACKET_TYPE_VRR:
+ case PACKET_TYPE_FS1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 786b34380f85..5b1c9a4c7643 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -26,15 +26,13 @@
#ifndef MOD_INFO_PACKET_H_
#define MOD_INFO_PACKET_H_
-struct info_packet_inputs {
- const struct dc_stream_state *pStream;
-};
+#include "mod_shared.h"
-struct info_packets {
- struct dc_info_packet *pVscInfoPacket;
-};
+//Forward Declarations
+struct dc_stream_state;
+struct dc_info_packet;
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets);
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+ struct dc_info_packet *info_packet);
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index 238c431ae483..1bd02c0ac30c 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -23,27 +23,26 @@
*
*/
-
#ifndef MOD_SHARED_H_
#define MOD_SHARED_H_
enum color_transfer_func {
- transfer_func_unknown,
- transfer_func_srgb,
- transfer_func_bt709,
- transfer_func_pq2084,
- transfer_func_pq2084_interim,
- transfer_func_linear_0_1,
- transfer_func_linear_0_125,
- transfer_func_dolbyvision,
- transfer_func_gamma_22,
- transfer_func_gamma_26
+ TRANSFER_FUNC_UNKNOWN,
+ TRANSFER_FUNC_SRGB,
+ TRANSFER_FUNC_BT709,
+ TRANSFER_FUNC_PQ2084,
+ TRANSFER_FUNC_PQ2084_INTERIM,
+ TRANSFER_FUNC_LINEAR_0_1,
+ TRANSFER_FUNC_LINEAR_0_125,
+ TRANSFER_FUNC_GAMMA_22,
+ TRANSFER_FUNC_GAMMA_26
};
enum vrr_packet_type {
- packet_type_vrr,
- packet_type_fs1,
- packet_type_fs2
+ PACKET_TYPE_VRR,
+ PACKET_TYPE_FS1,
+ PACKET_TYPE_FS2
};
+
#endif /* MOD_SHARED_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index ff8bfb9b43b0..db06fab2ad5c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -25,6 +25,10 @@
#include "mod_info_packet.h"
#include "core_types.h"
+#include "dc_types.h"
+#include "mod_shared.h"
+
+#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
enum ColorimetryRGBDP {
ColorimetryRGB_DP_sRGB = 0,
@@ -41,7 +45,7 @@ enum ColorimetryYCCDP {
ColorimetryYCC_DP_ITU2020YCbCr = 7,
};
-static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
+void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet)
{
unsigned int vscPacketRevision = 0;
@@ -159,7 +163,7 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
* DPCD register is exposed in the new Extended Receiver Capability field for DPCD Rev. 1.4
* (and higher). When MISC1. bit 6. is Set to 1, a Source device uses a VSC SDP to indicate
* the Pixel Encoding/Colorimetry Format and that a Sink device must ignore MISC1, bit 7, and
- * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become “don’t care”).)
+ * MISC0, bits 7:1 (MISC1, bit 7. and MISC0, bits 7:1 become "don't care").)
*/
if (vscPacketRevision == 0x5) {
/* Secondary-data Packet ID = 0 */
@@ -320,10 +324,3 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
-void mod_build_infopackets(struct info_packet_inputs *inputs,
- struct info_packets *info_packets)
-{
- if (info_packets->pVscInfoPacket != NULL)
- mod_build_vsc_infopacket(inputs->pStream, info_packets->pVscInfoPacket);
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/power/Makefile b/drivers/gpu/drm/amd/display/modules/power/Makefile
new file mode 100644
index 000000000000..87851f892a52
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/Makefile
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
+# Makefile for the 'power' sub-module of DAL.
+#
+
+MOD_POWER = power_helpers.o
+
+AMD_DAL_MOD_POWER = $(addprefix $(AMDDALPATH)/modules/power/,$(MOD_POWER))
+#$(info ************ DAL POWER MODULE MAKEFILE ************)
+
+AMD_DISPLAY_FILES += $(AMD_DAL_MOD_POWER) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
new file mode 100644
index 000000000000..00f63b7dd32f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -0,0 +1,326 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "power_helpers.h"
+#include "dc/inc/hw/dmcu.h"
+
+#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
+
+/* Possible Min Reduction config from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 100 98.0 94.1 94.1 85.1 80.3 75.3 69.4 60.0 57.6 50.2 49.8 40.0 %
+ */
+static const unsigned char min_reduction_table[13] = {
+0xff, 0xfa, 0xf0, 0xf0, 0xd9, 0xcd, 0xc0, 0xb1, 0x99, 0x93, 0x80, 0x82, 0x66};
+
+/* Possible Max Reduction configs from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 96.1 89.8 85.1 80.3 69.4 64.7 64.7 50.2 39.6 30.2 30.2 30.2 19.6 %
+ */
+static const unsigned char max_reduction_table[13] = {
+0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
+
+/* Predefined ABM configuration sets. We may have different configuration sets
+ * in order to satisfy different power/quality requirements.
+ */
+static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_level] = {
+/* ABM Level 1, ABM Level 2, ABM Level 3, ABM Level 4 */
+{ 2, 5, 7, 8 }, /* Default - Medium aggressiveness */
+{ 2, 5, 8, 11 }, /* Alt #1 - Increased aggressiveness */
+{ 0, 2, 4, 8 }, /* Alt #2 - Minimal aggressiveness */
+{ 3, 6, 10, 12 }, /* Alt #3 - Super aggressiveness */
+};
+
+#define NUM_AMBI_LEVEL 5
+#define NUM_AGGR_LEVEL 4
+#define NUM_POWER_FN_SEGS 8
+#define NUM_BL_CURVE_SEGS 16
+
+/* NOTE: iRAM is 256B in size */
+struct iram_table_v_2 {
+ /* flags */
+ uint16_t flags; /* 0x00 U16 */
+
+ /* parameters for ABM2.0 algorithm */
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */
+ uint8_t bright_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x52 U2.6 */
+ uint8_t dark_neg_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x66 U2.6 */
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x7a U0.8 */
+ uint8_t deviation_gain; /* 0x7f U0.8 */
+
+ /* parameters for crgb conversion */
+ uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */
+
+ /* parameters for custom curve */
+ /* thresholds for brightness --> backlight */
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */
+ /* offsets for brightness --> backlight */
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */
+
+ /* For reading PSR State directly from IRAM */
+ uint8_t psr_state; /* 0xf0 */
+ uint8_t dmcu_interface_version; /* 0xf1 */
+ uint8_t dmcu_date_version_year_b0; /* 0xf2 */
+ uint8_t dmcu_date_version_year_b1; /* 0xf3 */
+ uint8_t dmcu_date_version_month; /* 0xf4 */
+ uint8_t dmcu_date_version_day; /* 0xf5 */
+ uint8_t dmcu_state; /* 0xf6 */
+
+ uint16_t blRampReduction; /* 0xf7 */
+ uint16_t blRampStart; /* 0xf9 */
+ uint8_t dummy5; /* 0xfb */
+ uint8_t dummy6; /* 0xfc */
+ uint8_t dummy7; /* 0xfd */
+ uint8_t dummy8; /* 0xfe */
+ uint8_t dummy9; /* 0xff */
+};
+
+static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
+{
+ return (uint16_t)(backlight_8bit * 0x101);
+}
+
+static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
+ struct iram_table_v_2 *table)
+{
+ unsigned int i;
+ unsigned int num_entries = NUM_BL_CURVE_SEGS;
+ unsigned int query_input_8bit;
+ unsigned int query_output_8bit;
+ unsigned int lut_index;
+
+ table->backlight_thresholds[0] = 0;
+ table->backlight_offsets[0] = params.backlight_lut_array[0];
+ table->backlight_thresholds[num_entries-1] = 0xFFFF;
+ table->backlight_offsets[num_entries-1] =
+ params.backlight_lut_array[params.backlight_lut_array_size - 1];
+
+ /* Setup all brightness levels between 0% and 100% exclusive
+ * Fills brightness-to-backlight transform table. Backlight custom curve
+ * describes transform from brightness to backlight. It will be defined
+ * as set of thresholds and set of offsets, together, implying
+ * extrapolation of custom curve into 16 uniformly spanned linear
+ * segments. Each threshold/offset represented by 16 bit entry in
+ * format U4.10.
+ */
+ for (i = 1; i+1 < num_entries; i++) {
+ query_input_8bit = DIV_ROUNDUP((i * 256), num_entries);
+
+ lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
+ ASSERT(lut_index < params.backlight_lut_array_size);
+ query_output_8bit = params.backlight_lut_array[lut_index] >> 8;
+
+ table->backlight_thresholds[i] =
+ backlight_8_to_16(query_input_8bit);
+ table->backlight_offsets[i] =
+ backlight_8_to_16(query_output_8bit);
+ }
+}
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+ struct dmcu_iram_parameters params)
+{
+ struct iram_table_v_2 ram_table;
+ unsigned int set = params.set;
+
+ if (dmcu == NULL)
+ return false;
+
+ if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+ return true;
+
+ memset(&ram_table, 0, sizeof(ram_table));
+
+ ram_table.flags = 0x0;
+ ram_table.deviation_gain = 0xb3;
+
+ ram_table.blRampReduction =
+ cpu_to_be16(params.backlight_ramping_reduction);
+ ram_table.blRampStart =
+ cpu_to_be16(params.backlight_ramping_start);
+
+ ram_table.min_reduction[0][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[1][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[2][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[3][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.min_reduction[4][0] = min_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[0][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[1][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[2][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[3][0] = max_reduction_table[abm_config[set][0]];
+ ram_table.max_reduction[4][0] = max_reduction_table[abm_config[set][0]];
+
+ ram_table.min_reduction[0][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[1][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[2][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[3][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.min_reduction[4][1] = min_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[0][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[1][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[2][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[3][1] = max_reduction_table[abm_config[set][1]];
+ ram_table.max_reduction[4][1] = max_reduction_table[abm_config[set][1]];
+
+ ram_table.min_reduction[0][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[1][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[2][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[3][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.min_reduction[4][2] = min_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[0][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[1][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[2][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[3][2] = max_reduction_table[abm_config[set][2]];
+ ram_table.max_reduction[4][2] = max_reduction_table[abm_config[set][2]];
+
+ ram_table.min_reduction[0][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[1][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[2][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[3][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.min_reduction[4][3] = min_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[0][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[1][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[2][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[3][3] = max_reduction_table[abm_config[set][3]];
+ ram_table.max_reduction[4][3] = max_reduction_table[abm_config[set][3]];
+
+ ram_table.bright_pos_gain[0][0] = 0x20;
+ ram_table.bright_pos_gain[0][1] = 0x20;
+ ram_table.bright_pos_gain[0][2] = 0x20;
+ ram_table.bright_pos_gain[0][3] = 0x20;
+ ram_table.bright_pos_gain[1][0] = 0x20;
+ ram_table.bright_pos_gain[1][1] = 0x20;
+ ram_table.bright_pos_gain[1][2] = 0x20;
+ ram_table.bright_pos_gain[1][3] = 0x20;
+ ram_table.bright_pos_gain[2][0] = 0x20;
+ ram_table.bright_pos_gain[2][1] = 0x20;
+ ram_table.bright_pos_gain[2][2] = 0x20;
+ ram_table.bright_pos_gain[2][3] = 0x20;
+ ram_table.bright_pos_gain[3][0] = 0x20;
+ ram_table.bright_pos_gain[3][1] = 0x20;
+ ram_table.bright_pos_gain[3][2] = 0x20;
+ ram_table.bright_pos_gain[3][3] = 0x20;
+ ram_table.bright_pos_gain[4][0] = 0x20;
+ ram_table.bright_pos_gain[4][1] = 0x20;
+ ram_table.bright_pos_gain[4][2] = 0x20;
+ ram_table.bright_pos_gain[4][3] = 0x20;
+ ram_table.bright_neg_gain[0][1] = 0x00;
+ ram_table.bright_neg_gain[0][2] = 0x00;
+ ram_table.bright_neg_gain[0][3] = 0x00;
+ ram_table.bright_neg_gain[1][0] = 0x00;
+ ram_table.bright_neg_gain[1][1] = 0x00;
+ ram_table.bright_neg_gain[1][2] = 0x00;
+ ram_table.bright_neg_gain[1][3] = 0x00;
+ ram_table.bright_neg_gain[2][0] = 0x00;
+ ram_table.bright_neg_gain[2][1] = 0x00;
+ ram_table.bright_neg_gain[2][2] = 0x00;
+ ram_table.bright_neg_gain[2][3] = 0x00;
+ ram_table.bright_neg_gain[3][0] = 0x00;
+ ram_table.bright_neg_gain[3][1] = 0x00;
+ ram_table.bright_neg_gain[3][2] = 0x00;
+ ram_table.bright_neg_gain[3][3] = 0x00;
+ ram_table.bright_neg_gain[4][0] = 0x00;
+ ram_table.bright_neg_gain[4][1] = 0x00;
+ ram_table.bright_neg_gain[4][2] = 0x00;
+ ram_table.bright_neg_gain[4][3] = 0x00;
+ ram_table.dark_pos_gain[0][0] = 0x00;
+ ram_table.dark_pos_gain[0][1] = 0x00;
+ ram_table.dark_pos_gain[0][2] = 0x00;
+ ram_table.dark_pos_gain[0][3] = 0x00;
+ ram_table.dark_pos_gain[1][0] = 0x00;
+ ram_table.dark_pos_gain[1][1] = 0x00;
+ ram_table.dark_pos_gain[1][2] = 0x00;
+ ram_table.dark_pos_gain[1][3] = 0x00;
+ ram_table.dark_pos_gain[2][0] = 0x00;
+ ram_table.dark_pos_gain[2][1] = 0x00;
+ ram_table.dark_pos_gain[2][2] = 0x00;
+ ram_table.dark_pos_gain[2][3] = 0x00;
+ ram_table.dark_pos_gain[3][0] = 0x00;
+ ram_table.dark_pos_gain[3][1] = 0x00;
+ ram_table.dark_pos_gain[3][2] = 0x00;
+ ram_table.dark_pos_gain[3][3] = 0x00;
+ ram_table.dark_pos_gain[4][0] = 0x00;
+ ram_table.dark_pos_gain[4][1] = 0x00;
+ ram_table.dark_pos_gain[4][2] = 0x00;
+ ram_table.dark_pos_gain[4][3] = 0x00;
+ ram_table.dark_neg_gain[0][0] = 0x00;
+ ram_table.dark_neg_gain[0][1] = 0x00;
+ ram_table.dark_neg_gain[0][2] = 0x00;
+ ram_table.dark_neg_gain[0][3] = 0x00;
+ ram_table.dark_neg_gain[1][0] = 0x00;
+ ram_table.dark_neg_gain[1][1] = 0x00;
+ ram_table.dark_neg_gain[1][2] = 0x00;
+ ram_table.dark_neg_gain[1][3] = 0x00;
+ ram_table.dark_neg_gain[2][0] = 0x00;
+ ram_table.dark_neg_gain[2][1] = 0x00;
+ ram_table.dark_neg_gain[2][2] = 0x00;
+ ram_table.dark_neg_gain[2][3] = 0x00;
+ ram_table.dark_neg_gain[3][0] = 0x00;
+ ram_table.dark_neg_gain[3][1] = 0x00;
+ ram_table.dark_neg_gain[3][2] = 0x00;
+ ram_table.dark_neg_gain[3][3] = 0x00;
+ ram_table.dark_neg_gain[4][0] = 0x00;
+ ram_table.dark_neg_gain[4][1] = 0x00;
+ ram_table.dark_neg_gain[4][2] = 0x00;
+ ram_table.dark_neg_gain[4][3] = 0x00;
+ ram_table.iir_curve[0] = 0x65;
+ ram_table.iir_curve[1] = 0x65;
+ ram_table.iir_curve[2] = 0x65;
+ ram_table.iir_curve[3] = 0x65;
+ ram_table.iir_curve[4] = 0x65;
+ ram_table.crgb_thresh[0] = cpu_to_be16(0x13b6);
+ ram_table.crgb_thresh[1] = cpu_to_be16(0x1648);
+ ram_table.crgb_thresh[2] = cpu_to_be16(0x18e3);
+ ram_table.crgb_thresh[3] = cpu_to_be16(0x1b41);
+ ram_table.crgb_thresh[4] = cpu_to_be16(0x1d46);
+ ram_table.crgb_thresh[5] = cpu_to_be16(0x1f21);
+ ram_table.crgb_thresh[6] = cpu_to_be16(0x2167);
+ ram_table.crgb_thresh[7] = cpu_to_be16(0x2384);
+ ram_table.crgb_offset[0] = cpu_to_be16(0x2999);
+ ram_table.crgb_offset[1] = cpu_to_be16(0x3999);
+ ram_table.crgb_offset[2] = cpu_to_be16(0x4666);
+ ram_table.crgb_offset[3] = cpu_to_be16(0x5999);
+ ram_table.crgb_offset[4] = cpu_to_be16(0x6333);
+ ram_table.crgb_offset[5] = cpu_to_be16(0x7800);
+ ram_table.crgb_offset[6] = cpu_to_be16(0x8c00);
+ ram_table.crgb_offset[7] = cpu_to_be16(0xa000);
+ ram_table.crgb_slope[0] = cpu_to_be16(0x3147);
+ ram_table.crgb_slope[1] = cpu_to_be16(0x2978);
+ ram_table.crgb_slope[2] = cpu_to_be16(0x23a2);
+ ram_table.crgb_slope[3] = cpu_to_be16(0x1f55);
+ ram_table.crgb_slope[4] = cpu_to_be16(0x1c63);
+ ram_table.crgb_slope[5] = cpu_to_be16(0x1a0f);
+ ram_table.crgb_slope[6] = cpu_to_be16(0x178d);
+ ram_table.crgb_slope[7] = cpu_to_be16(0x15ab);
+
+ fill_backlight_transform_table(
+ params, &ram_table);
+
+ return dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), sizeof(ram_table));
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
new file mode 100644
index 000000000000..da5df00fedce
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -0,0 +1,47 @@
+/* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef MODULES_POWER_POWER_HELPERS_H_
+#define MODULES_POWER_POWER_HELPERS_H_
+
+#include "dc/inc/hw/dmcu.h"
+
+
+enum abm_defines {
+ abm_defines_max_level = 4,
+ abm_defines_max_config = 4,
+};
+
+struct dmcu_iram_parameters {
+ unsigned int *backlight_lut_array;
+ unsigned int backlight_lut_array_size;
+ unsigned int backlight_ramping_reduction;
+ unsigned int backlight_ramping_start;
+ unsigned int set;
+};
+
+bool dmcu_load_iram(struct dmcu *dmcu,
+ struct dmcu_iram_parameters params);
+
+#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_acpi.h b/drivers/gpu/drm/amd/include/amd_acpi.h
index 9b9699fc433f..c72cbfe8f684 100644
--- a/drivers/gpu/drm/amd/include/amd_acpi.h
+++ b/drivers/gpu/drm/amd/include/amd_acpi.h
@@ -52,6 +52,30 @@ struct atif_sbios_requests {
u8 backlight_level; /* panel backlight level (0-255) */
} __packed;
+struct atif_qbtc_arguments {
+ u16 size; /* structure size in bytes (includes size field) */
+ u8 requested_display; /* which display is requested */
+} __packed;
+
+#define ATIF_QBTC_MAX_DATA_POINTS 99
+
+struct atif_qbtc_data_point {
+ u8 luminance; /* luminance in percent */
+ u8 ipnut_signal; /* input signal in range 0-255 */
+} __packed;
+
+struct atif_qbtc_output {
+ u16 size; /* structure size in bytes (includes size field) */
+ u16 flags; /* all zeroes */
+ u8 error_code; /* error code */
+ u8 ac_level; /* default brightness on AC power */
+ u8 dc_level; /* default brightness on DC power */
+ u8 min_input_signal; /* max input signal in range 0-255 */
+ u8 max_input_signal; /* min input signal in range 0-255 */
+ u8 number_of_points; /* number of data points */
+ struct atif_qbtc_data_point data_points[ATIF_QBTC_MAX_DATA_POINTS];
+} __packed;
+
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
#define ATIF_NOTIFY_81 1
@@ -126,26 +150,18 @@ struct atcs_pref_req_output {
* DWORD - supported functions bit vector
*/
/* Notifications mask */
-# define ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED (1 << 0)
-# define ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED (1 << 1)
# define ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED (1 << 2)
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED (1 << 3)
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED (1 << 4)
-# define ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED (1 << 5)
-# define ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED (1 << 6)
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED (1 << 7)
# define ATIF_DGPU_DISPLAY_EVENT_SUPPORTED (1 << 8)
+# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED (1 << 12)
/* supported functions vector */
# define ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED (1 << 0)
# define ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED (1 << 1)
-# define ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED (1 << 2)
-# define ATIF_GET_LID_STATE_SUPPORTED (1 << 3)
-# define ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED (1 << 4)
-# define ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED (1 << 5)
-# define ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED (1 << 6)
-# define ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED (1 << 7)
# define ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED (1 << 12)
-# define ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED (1 << 14)
+# define ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED (1 << 15)
+# define ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED (1 << 16)
# define ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED (1 << 20)
#define ATIF_FUNCTION_GET_SYSTEM_PARAMETERS 0x1
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_PARAMETERS
@@ -170,6 +186,10 @@ struct atcs_pref_req_output {
* n (0xd0-0xd9) is specified in notify command code.
* bit 2:
* 1 - lid changes not reported though int10
+ * bit 3:
+ * 1 - system bios controls overclocking
+ * bit 4:
+ * 1 - enable overclocking
*/
#define ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS 0x2
/* ARG0: ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS
@@ -177,28 +197,23 @@ struct atcs_pref_req_output {
* OUTPUT:
* WORD - structure size in bytes (includes size field)
* DWORD - pending sbios requests
- * BYTE - panel expansion mode
+ * BYTE - reserved (all zeroes)
* BYTE - thermal state: target gfx controller
* BYTE - thermal state: state id (0: exit state, non-0: state)
* BYTE - forced power state: target gfx controller
- * BYTE - forced power state: state id
+ * BYTE - forced power state: state id (0: forced state, non-0: state)
* BYTE - system power source
* BYTE - panel backlight level (0-255)
+ * BYTE - GPU package power limit: target gfx controller
+ * DWORD - GPU package power limit: value (24:8 fractional format, Watts)
*/
/* pending sbios requests */
-# define ATIF_DISPLAY_SWITCH_REQUEST (1 << 0)
-# define ATIF_EXPANSION_MODE_CHANGE_REQUEST (1 << 1)
# define ATIF_THERMAL_STATE_CHANGE_REQUEST (1 << 2)
# define ATIF_FORCED_POWER_STATE_CHANGE_REQUEST (1 << 3)
# define ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST (1 << 4)
-# define ATIF_DISPLAY_CONF_CHANGE_REQUEST (1 << 5)
-# define ATIF_PX_GFX_SWITCH_REQUEST (1 << 6)
# define ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST (1 << 7)
# define ATIF_DGPU_DISPLAY_EVENT (1 << 8)
-/* panel expansion mode */
-# define ATIF_PANEL_EXPANSION_DISABLE 0
-# define ATIF_PANEL_EXPANSION_FULL 1
-# define ATIF_PANEL_EXPANSION_ASPECT 2
+# define ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST (1 << 12)
/* target gfx controller */
# define ATIF_TARGET_GFX_SINGLE 0
# define ATIF_TARGET_GFX_PX_IGPU 1
@@ -208,76 +223,6 @@ struct atcs_pref_req_output {
# define ATIF_POWER_SOURCE_DC 2
# define ATIF_POWER_SOURCE_RESTRICTED_AC_1 3
# define ATIF_POWER_SOURCE_RESTRICTED_AC_2 4
-#define ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS 0x3
-/* ARG0: ATIF_FUNCTION_SELECT_ACTIVE_DISPLAYS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * WORD - selected displays
- * WORD - connected displays
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * WORD - selected displays
- */
-# define ATIF_LCD1 (1 << 0)
-# define ATIF_CRT1 (1 << 1)
-# define ATIF_TV (1 << 2)
-# define ATIF_DFP1 (1 << 3)
-# define ATIF_CRT2 (1 << 4)
-# define ATIF_LCD2 (1 << 5)
-# define ATIF_DFP2 (1 << 7)
-# define ATIF_CV (1 << 8)
-# define ATIF_DFP3 (1 << 9)
-# define ATIF_DFP4 (1 << 10)
-# define ATIF_DFP5 (1 << 11)
-# define ATIF_DFP6 (1 << 12)
-#define ATIF_FUNCTION_GET_LID_STATE 0x4
-/* ARG0: ATIF_FUNCTION_GET_LID_STATE
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - lid state (0: open, 1: closed)
- *
- * GET_LID_STATE only works at boot and resume, for general lid
- * status, use the kernel provided status
- */
-#define ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS 0x5
-/* ARG0: ATIF_FUNCTION_GET_TV_STANDARD_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - 0
- * BYTE - TV standard
- */
-# define ATIF_TV_STD_NTSC 0
-# define ATIF_TV_STD_PAL 1
-# define ATIF_TV_STD_PALM 2
-# define ATIF_TV_STD_PAL60 3
-# define ATIF_TV_STD_NTSCJ 4
-# define ATIF_TV_STD_PALCN 5
-# define ATIF_TV_STD_PALN 6
-# define ATIF_TV_STD_SCART_RGB 9
-#define ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS 0x6
-/* ARG0: ATIF_FUNCTION_SET_TV_STANDARD_IN_CMOS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * BYTE - 0
- * BYTE - TV standard
- * OUTPUT: none
- */
-#define ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS 0x7
-/* ARG0: ATIF_FUNCTION_GET_PANEL_EXPANSION_MODE_FROM_CMOS
- * ARG1: none
- * OUTPUT:
- * WORD - structure size in bytes (includes size field)
- * BYTE - panel expansion mode
- */
-#define ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS 0x8
-/* ARG0: ATIF_FUNCTION_SET_PANEL_EXPANSION_MODE_IN_CMOS
- * ARG1:
- * WORD - structure size in bytes (includes size field)
- * BYTE - panel expansion mode
- * OUTPUT: none
- */
#define ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION 0xD
/* ARG0: ATIF_FUNCTION_TEMPERATURE_CHANGE_NOTIFICATION
* ARG1:
@@ -286,21 +231,43 @@ struct atcs_pref_req_output {
* BYTE - current temperature (degress Celsius)
* OUTPUT: none
*/
-#define ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES 0xF
-/* ARG0: ATIF_FUNCTION_GET_GRAPHICS_DEVICE_TYPES
- * ARG1: none
+#define ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS 0x10
+/* ARG0: ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS
+ * ARG1:
+ * WORD - structure size in bytes (includes size field)
+ * BYTE - requested display
* OUTPUT:
- * WORD - number of gfx devices
- * WORD - device structure size in bytes (excludes device size field)
- * DWORD - flags \
- * WORD - bus number } repeated structure
- * WORD - device number /
+ * WORD - structure size in bytes (includes size field)
+ * WORD - flags (currently all 16 bits are reserved)
+ * BYTE - error code (on failure, disregard all below fields)
+ * BYTE - AC level (default brightness in percent when machine has full power)
+ * BYTE - DC level (default brightness in percent when machine is on battery)
+ * BYTE - min input signal, in range 0-255, corresponding to 0% backlight
+ * BYTE - max input signal, in range 0-255, corresponding to 100% backlight
+ * BYTE - number of reported data points
+ * BYTE - luminance level in percent \ repeated structure
+ * BYTE - input signal in range 0-255 / does not have entries for 0% and 100%
+ */
+/* requested display */
+# define ATIF_QBTC_REQUEST_LCD1 0
+# define ATIF_QBTC_REQUEST_CRT1 1
+# define ATIF_QBTC_REQUEST_DFP1 3
+# define ATIF_QBTC_REQUEST_CRT2 4
+# define ATIF_QBTC_REQUEST_LCD2 5
+# define ATIF_QBTC_REQUEST_DFP2 7
+# define ATIF_QBTC_REQUEST_DFP3 9
+# define ATIF_QBTC_REQUEST_DFP4 10
+# define ATIF_QBTC_REQUEST_DFP5 11
+# define ATIF_QBTC_REQUEST_DFP6 12
+/* error code */
+# define ATIF_QBTC_ERROR_CODE_SUCCESS 0
+# define ATIF_QBTC_ERROR_CODE_FAILURE 1
+# define ATIF_QBTC_ERROR_CODE_DEVICE_NOT_SUPPORTED 2
+#define ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION 0x11
+/* ARG0: ATIF_FUNCTION_READY_TO_UNDOCK_NOTIFICATION
+ * ARG1: none
+ * OUTPUT: none
*/
-/* flags */
-# define ATIF_PX_REMOVABLE_GRAPHICS_DEVICE (1 << 0)
-# define ATIF_XGP_PORT (1 << 1)
-# define ATIF_VGA_ENABLED_GRAPHICS_DEVICE (1 << 2)
-# define ATIF_XGP_PORT_IN_DOCK (1 << 3)
#define ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION 0x15
/* ARG0: ATIF_FUNCTION_GET_EXTERNAL_GPU_INFORMATION
* ARG1: none
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
new file mode 100644
index 000000000000..8f515875a34d
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_OFFSET_HEADER
+#define _mmhub_9_4_0_OFFSET_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+// base address: 0x6a040
+#define mmMC_VM_XGMI_LFB_CNTL 0x0823
+#define mmMC_VM_XGMI_LFB_CNTL_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE 0x0824
+#define mmMC_VM_XGMI_LFB_SIZE_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
new file mode 100644
index 000000000000..0a6b072d191e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_0_sh_mask.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mmhub_9_4_0_SH_MASK_HEADER
+#define _mmhub_9_4_0_SH_MASK_HEADER
+
+
+// addressBlock: mmhub_utcl2_vmsharedpfdec
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL__PF_LFB_REGION_MASK 0x00000007L
+#define MC_VM_XGMI_LFB_CNTL__PF_MAX_REGION_MASK 0x00000070L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE__PF_LFB_SIZE_MASK 0x0000FFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 64ecffd52126..8154d67388cc 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -188,8 +188,8 @@ struct tile_config {
*/
#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2) /* TODO */
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3) /* TODO */
+#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
+#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
/*
* Allocation flags attributes/access options.
@@ -205,20 +205,6 @@ struct tile_config {
/**
* struct kfd2kgd_calls
*
- * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
- * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
- *
- * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
- *
- * @get_local_mem_info: Retrieves information about GPU local memory
- *
- * @get_gpu_clock_counter: Retrieves GPU clock counter
- *
- * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
- *
- * @alloc_pasid: Allocate a PASID
- * @free_pasid: Free a PASID
- *
* @program_sh_mem_settings: A function that should initiate the memory
* properties such as main aperture memory type (cache / non cached) and
* secondary aperture base address, size and memory type.
@@ -255,64 +241,16 @@ struct tile_config {
*
* @get_tile_config: Returns GPU-specific tiling mode information
*
- * @get_cu_info: Retrieves activated cu info
- *
- * @get_vram_usage: Returns current VRAM usage
- *
- * @create_process_vm: Create a VM address space for a given process and GPU
- *
- * @destroy_process_vm: Destroy a VM
- *
- * @get_process_page_dir: Get physical address of a VM page directory
- *
* @set_vm_context_page_table_base: Program page table base for a VMID
*
- * @alloc_memory_of_gpu: Allocate GPUVM memory
- *
- * @free_memory_of_gpu: Free GPUVM memory
- *
- * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
- * space. Allocates and updates page tables and page directories as
- * needed. This function may return before all page table updates have
- * completed. This allows multiple map operations (on multiple GPUs)
- * to happen concurrently. Use sync_memory to synchronize with all
- * pending updates.
- *
- * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
- *
- * @sync_memory: Wait for pending page table updates to complete
- *
- * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
- * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
- * The kernel virtual address remains valid until the BO is freed.
- *
- * @restore_process_bos: Restore all BOs that belong to the
- * process. This is intended for restoring memory mappings after a TTM
- * eviction.
- *
* @invalidate_tlbs: Invalidate TLBs for a specific PASID
*
* @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
*
- * @submit_ib: Submits an IB to the engine specified by inserting the
- * IB to the corresponding ring (ring type). The IB is executed with the
- * specified VMID in a user mode context.
- *
- * @get_vm_fault_info: Return information about a recent VM fault on
- * GFXv7 and v8. If multiple VM faults occurred since the last call of
- * this function, it will return information about the first of those
- * faults. On GFXv9 VM fault information is fully contained in the IH
- * packet and this function is not needed.
- *
* @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
* IH ring entry. This function allows the KFD ISR to get the VMID
* from the fault status register as early as possible.
*
- * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
- *
- * @set_compute_idle: Indicates that compute is idle on a device. This
- * can be used to change power profiles depending on compute activity.
- *
* @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
*
* This structure contains function pointers to services that the kgd driver
@@ -320,21 +258,6 @@ struct tile_config {
*
*/
struct kfd2kgd_calls {
- int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9);
-
- void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
-
- void (*get_local_mem_info)(struct kgd_dev *kgd,
- struct kfd_local_mem_info *mem_info);
- uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
-
- uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
-
- int (*alloc_pasid)(unsigned int bits);
- void (*free_pasid)(unsigned int pasid);
-
/* Register access functions */
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
@@ -398,49 +321,11 @@ struct kfd2kgd_calls {
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
- void (*get_cu_info)(struct kgd_dev *kgd,
- struct kfd_cu_info *cu_info);
- uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
-
- int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
- void **process_info, struct dma_fence **ef);
- int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
- unsigned int pasid, void **vm, void **process_info,
- struct dma_fence **ef);
- void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
- void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
- uint64_t (*get_process_page_dir)(void *vm);
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
- int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
- uint64_t size, void *vm,
- struct kgd_mem **mem, uint64_t *offset,
- uint32_t flags);
- int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
- int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
- void *vm);
- int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
- void *vm);
- int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
- int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
- void **kptr, uint64_t *size);
- int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
-
int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
-
- int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
- uint32_t vmid, uint64_t gpu_addr,
- uint32_t *ib_cmd, uint32_t ib_len);
-
- int (*get_vm_fault_info)(struct kgd_dev *kgd,
- struct kfd_vm_fault_info *info);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
-
- void (*gpu_recover)(struct kgd_dev *kgd);
-
- void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
-
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
};
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 980e696989b1..1479ea1dc3e7 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -276,6 +276,10 @@ struct amd_pm_funcs {
struct amd_pp_simple_clock_info *clocks);
int (*notify_smu_enable_pwe)(void *handle);
int (*enable_mgpu_fan_boost)(void *handle);
+ int (*set_active_display_count)(void *handle, uint32_t count);
+ int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock);
};
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d6aa1d414320..9bc27f468d5b 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -300,7 +300,7 @@ static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
return -EINVAL;
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -387,7 +387,7 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
return 0;
if (hwmgr->hwmgr_func->get_sclk == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -405,7 +405,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
return 0;
if (hwmgr->hwmgr_func->get_mclk == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -422,7 +422,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -438,7 +438,7 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -505,7 +505,7 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
return;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
mutex_lock(&hwmgr->smu_lock);
@@ -522,7 +522,7 @@ static uint32_t pp_dpm_get_fan_control_mode(void *handle)
return 0;
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -540,7 +540,7 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -558,7 +558,7 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
return -EINVAL;
if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -594,7 +594,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
return -EINVAL;
if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -720,12 +720,12 @@ static int pp_dpm_force_clock_level(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("force clock level is for dpm manual mode only.\n");
+ pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
}
@@ -745,7 +745,7 @@ static int pp_dpm_print_clock_levels(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -763,7 +763,7 @@ static int pp_dpm_get_sclk_od(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -781,7 +781,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
return -EINVAL;
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -800,7 +800,7 @@ static int pp_dpm_get_mclk_od(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -818,7 +818,7 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
return -EINVAL;
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&hwmgr->smu_lock);
@@ -878,7 +878,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf)
return -EINVAL;
if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -894,12 +894,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
return ret;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return ret;
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
- pr_info("power profile setting is for manual dpm mode only.\n");
+ pr_debug("power profile setting is for manual dpm mode only.\n");
return ret;
}
@@ -917,7 +917,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
return -EINVAL;
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -935,7 +935,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -972,7 +972,7 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
return -EINVAL;
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;
}
@@ -1072,7 +1072,7 @@ static int pp_get_current_clocks(void *handle,
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
if (ret) {
- pr_info("Error in phm_get_clock_info \n");
+ pr_debug("Error in phm_get_clock_info \n");
mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1212,7 +1212,7 @@ static int pp_dpm_powergate_mmhub(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -1227,7 +1227,7 @@ static int pp_dpm_powergate_gfx(void *handle, bool gate)
return 0;
if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
@@ -1242,7 +1242,7 @@ static void pp_dpm_powergate_acp(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_acp == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
@@ -1257,7 +1257,7 @@ static void pp_dpm_powergate_sdma(void *handle, bool gate)
return;
if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
@@ -1303,7 +1303,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
return -EINVAL;
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
- pr_info("%s was not implemented.\n", __func__);
+ pr_info_ratelimited("%s was not implemented.\n", __func__);
return -EINVAL;;
}
@@ -1332,6 +1332,78 @@ static int pp_enable_mgpu_fan_boost(void *handle)
return 0;
}
+static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
+ pr_debug("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
+}
+
+static int pp_set_active_display_count(void *handle, uint32_t count)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ int ret = 0;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;
+
+ mutex_lock(&hwmgr->smu_lock);
+ ret = phm_set_active_display_count(hwmgr, count);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return ret;
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1378,4 +1450,8 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
+ .set_active_display_count = pp_set_active_display_count,
+ .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
+ .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 85119c2bdcc8..1f92a9f4c9e3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
- if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
+ /* Skip for suspend/resume case */
+ if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
+ && adev->in_suspend) {
pr_info("dpm has been enabled\n");
return 0;
}
@@ -286,8 +288,8 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
if (display_config == NULL)
return -EINVAL;
- if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
- hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
+ if (NULL != hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk);
for (index = 0; index < display_config->num_path_including_non_display; index++) {
if (display_config->displays[index].controller_id != 0)
@@ -478,3 +480,44 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr)
return hwmgr->hwmgr_func->disable_smc_firmware_ctf(hwmgr);
}
+
+int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_active_display_count)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_active_display_count(hwmgr, count);
+}
+
+int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
+}
+
+int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
+}
+
+int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ PHM_FUNC_CHECK(hwmgr);
+
+ if (!hwmgr->hwmgr_func->set_hard_min_fclk_by_freq)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 47ac92369739..0173d0480024 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_pre_display_configuration_changed(hwmgr);
+ if (ret)
+ return ret;
ret = phm_set_cpu_power_state(hwmgr);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index 91ffb7bc4ee7..56437866d120 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
if (skip)
return 0;
- phm_pre_display_configuration_changed(hwmgr);
-
phm_display_configuration_changed(hwmgr);
if (hwmgr->ps)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index dd18cb710391..f95c5f50eb0f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -216,12 +216,12 @@ static inline uint32_t convert_10k_to_mhz(uint32_t clock)
return (clock + 99) / 100;
}
-static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
+static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
+ smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
@@ -230,6 +230,34 @@ static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
return 0;
}
+static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->dcf_actual_hard_min_freq &&
+ smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinDcefclkByFreq,
+ smu10_data->dcf_actual_hard_min_freq);
+ }
+ return 0;
+}
+
+static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+ if (smu10_data->f_actual_hard_min_freq &&
+ smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
+ smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinFclkByFreq,
+ smu10_data->f_actual_hard_min_freq);
+ }
+ return 0;
+}
+
static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -1206,7 +1234,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.get_max_high_clocks = smu10_get_max_high_clocks,
.read_sensor = smu10_read_sensor,
.set_active_display_count = smu10_set_active_display_count,
- .set_deep_sleep_dcefclk = smu10_set_deep_sleep_dcefclk,
+ .set_min_deep_sleep_dcefclk = smu10_set_min_deep_sleep_dcefclk,
.dynamic_state_management_enable = smu10_enable_dpm_tasks,
.power_off_asic = smu10_power_off_asic,
.asic_setup = smu10_setup_asic_task,
@@ -1217,6 +1245,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.display_clock_voltage_request = smu10_display_clock_voltage_request,
.powergate_gfx = smu10_gfx_off_control,
.powergate_sdma = smu10_powergate_sdma,
+ .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
+ .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 88f6b35ea6fe..d91390459326 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -269,7 +269,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
hwmgr->dyn_state.mvdd_dependency_on_mclk);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 MVDD table from dependancy table.",
+ "Failed to retrieve SVI2 MVDD table from dependency table.",
return result;);
}
@@ -288,7 +288,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
hwmgr->dyn_state.vddci_dependency_on_mclk);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDCI table from dependancy table.",
+ "Failed to retrieve SVI2 VDDCI table from dependency table.",
return result);
}
@@ -317,7 +317,7 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
table_info->vddc_lookup_table);
PP_ASSERT_WITH_CODE((0 == result),
- "Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
+ "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
}
tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
@@ -2859,7 +2859,10 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+ if (hwmgr->is_kicker)
+ switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
+ else
+ switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
case CHIP_VEGAM:
switch_limit_us = 30;
@@ -3589,8 +3592,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= sclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
} else {
/* TODO: Check SCLK in DAL's minimum clocks
* in case DeepSleep divider update is required.
@@ -3607,8 +3612,10 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
}
if (i >= mclk_table->count) {
- data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
@@ -4219,9 +4226,17 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if (tmp & (1 << 23)) {
data->mem_latency_high = MEM_LATENCY_HIGH;
data->mem_latency_low = MEM_LATENCY_LOW;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
+ if ((hwmgr->chip_id == CHIP_POLARIS10) ||
+ (hwmgr->chip_id == CHIP_POLARIS11) ||
+ (hwmgr->chip_id == CHIP_POLARIS12))
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
}
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 5e19f5977eb1..d138ddae563d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
"Failed to enable DPM DIDT.", goto error);
}
mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
error:
mutex_unlock(&adev->grbm_idx_mutex);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return result;
}
@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0),
@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
}
return 0;
error:
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return result;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index fef111ddb736..553a203ac47c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -1228,17 +1228,14 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
- if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
- smu8_nbdpm_pstate_enable_disable(hwmgr, true, true);
+ if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
- }
return 0;
}
static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
- smu8_nbdpm_pstate_enable_disable(hwmgr, false, true);
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
@@ -1995,6 +1992,7 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.power_state_set = smu8_set_power_state_tasks,
.dynamic_state_management_disable = smu8_disable_dpm_tasks,
.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
+ .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index e2bc6e0c229f..79c86247d0ac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3266,8 +3266,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= sclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- sclk_table->dpm_levels[i-1].value = sclk;
+ if (sclk > sclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ sclk_table->dpm_levels[i-1].value = sclk;
+ }
}
for (i = 0; i < mclk_table->count; i++) {
@@ -3276,8 +3278,10 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
}
if (i >= mclk_table->count) {
- data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
- mclk_table->dpm_levels[i-1].value = mclk;
+ if (mclk > mclk_table->dpm_levels[i-1].value) {
+ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+ mclk_table->dpm_levels[i-1].value = mclk;
+ }
}
if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 2d88abf97e7b..6f26cb241ecc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
if (PP_CAP(PHM_PlatformCaps_GCEDC))
@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) {
@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
uint32_t data;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
int result;
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
+ amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, false);
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
+ amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index b4eadd47f3a4..2e99ecf4ab76 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -130,7 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
- data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
+ data->registry_data.fclk_gfxclk_ratio = 0;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
@@ -1660,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
return i;
}
-static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t min_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1676,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1692,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1710,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1720,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1733,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
return ret;
}
-static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
{
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
uint32_t max_freq;
int ret = 0;
- if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1750,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1760,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ if (data->smu_features[GNLD_DPM_UVD].enabled &&
+ (feature_mask & FEATURE_DPM_UVD_MASK)) {
max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1777,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ if (data->smu_features[GNLD_DPM_VCE].enabled &&
+ (feature_mask & FEATURE_DPM_VCE_MASK)) {
max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1787,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
return ret);
}
- if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -2126,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2158,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to highest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2176,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
int ret = 0;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Bootup Levels!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload DPM Max Levels!",
return ret);
@@ -2239,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2259,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
- ret = vega20_upload_dpm_min_level(hwmgr);
+ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
return ret);
- ret = vega20_upload_dpm_max_level(hwmgr);
+ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
return ret);
@@ -2767,7 +2777,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
case PP_MCLK:
@@ -2784,7 +2794,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
case PP_PCIE:
@@ -3466,109 +3476,64 @@ static int vega20_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
/* init/fini related */
- .backend_init =
- vega20_hwmgr_backend_init,
- .backend_fini =
- vega20_hwmgr_backend_fini,
- .asic_setup =
- vega20_setup_asic_task,
- .power_off_asic =
- vega20_power_off_asic,
- .dynamic_state_management_enable =
- vega20_enable_dpm_tasks,
- .dynamic_state_management_disable =
- vega20_disable_dpm_tasks,
+ .backend_init = vega20_hwmgr_backend_init,
+ .backend_fini = vega20_hwmgr_backend_fini,
+ .asic_setup = vega20_setup_asic_task,
+ .power_off_asic = vega20_power_off_asic,
+ .dynamic_state_management_enable = vega20_enable_dpm_tasks,
+ .dynamic_state_management_disable = vega20_disable_dpm_tasks,
/* power state related */
- .apply_clocks_adjust_rules =
- vega20_apply_clocks_adjust_rules,
- .pre_display_config_changed =
- vega20_pre_display_configuration_changed_task,
- .display_config_changed =
- vega20_display_configuration_changed_task,
+ .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+ .pre_display_config_changed = vega20_pre_display_configuration_changed_task,
+ .display_config_changed = vega20_display_configuration_changed_task,
.check_smc_update_required_for_display_configuration =
vega20_check_smc_update_required_for_display_configuration,
.notify_smc_display_config_after_ps_adjustment =
vega20_notify_smc_display_config_after_ps_adjustment,
/* export to DAL */
- .get_sclk =
- vega20_dpm_get_sclk,
- .get_mclk =
- vega20_dpm_get_mclk,
- .get_dal_power_level =
- vega20_get_dal_power_level,
- .get_clock_by_type_with_latency =
- vega20_get_clock_by_type_with_latency,
- .get_clock_by_type_with_voltage =
- vega20_get_clock_by_type_with_voltage,
- .set_watermarks_for_clocks_ranges =
- vega20_set_watermarks_for_clocks_ranges,
- .display_clock_voltage_request =
- vega20_display_clock_voltage_request,
- .get_performance_level =
- vega20_get_performance_level,
+ .get_sclk = vega20_dpm_get_sclk,
+ .get_mclk = vega20_dpm_get_mclk,
+ .get_dal_power_level = vega20_get_dal_power_level,
+ .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+ .get_clock_by_type_with_voltage = vega20_get_clock_by_type_with_voltage,
+ .set_watermarks_for_clocks_ranges = vega20_set_watermarks_for_clocks_ranges,
+ .display_clock_voltage_request = vega20_display_clock_voltage_request,
+ .get_performance_level = vega20_get_performance_level,
/* UMD pstate, profile related */
- .force_dpm_level =
- vega20_dpm_force_dpm_level,
- .get_power_profile_mode =
- vega20_get_power_profile_mode,
- .set_power_profile_mode =
- vega20_set_power_profile_mode,
+ .force_dpm_level = vega20_dpm_force_dpm_level,
+ .get_power_profile_mode = vega20_get_power_profile_mode,
+ .set_power_profile_mode = vega20_set_power_profile_mode,
/* od related */
- .set_power_limit =
- vega20_set_power_limit,
- .get_sclk_od =
- vega20_get_sclk_od,
- .set_sclk_od =
- vega20_set_sclk_od,
- .get_mclk_od =
- vega20_get_mclk_od,
- .set_mclk_od =
- vega20_set_mclk_od,
- .odn_edit_dpm_table =
- vega20_odn_edit_dpm_table,
+ .set_power_limit = vega20_set_power_limit,
+ .get_sclk_od = vega20_get_sclk_od,
+ .set_sclk_od = vega20_set_sclk_od,
+ .get_mclk_od = vega20_get_mclk_od,
+ .set_mclk_od = vega20_set_mclk_od,
+ .odn_edit_dpm_table = vega20_odn_edit_dpm_table,
/* for sysfs to retrive/set gfxclk/memclk */
- .force_clock_level =
- vega20_force_clock_level,
- .print_clock_levels =
- vega20_print_clock_levels,
- .read_sensor =
- vega20_read_sensor,
+ .force_clock_level = vega20_force_clock_level,
+ .print_clock_levels = vega20_print_clock_levels,
+ .read_sensor = vega20_read_sensor,
/* powergate related */
- .powergate_uvd =
- vega20_power_gate_uvd,
- .powergate_vce =
- vega20_power_gate_vce,
+ .powergate_uvd = vega20_power_gate_uvd,
+ .powergate_vce = vega20_power_gate_vce,
/* thermal related */
- .start_thermal_controller =
- vega20_start_thermal_controller,
- .stop_thermal_controller =
- vega20_thermal_stop_thermal_controller,
- .get_thermal_temperature_range =
- vega20_get_thermal_temperature_range,
- .register_irq_handlers =
- smu9_register_irq_handlers,
- .disable_smc_firmware_ctf =
- vega20_thermal_disable_alert,
+ .start_thermal_controller = vega20_start_thermal_controller,
+ .stop_thermal_controller = vega20_thermal_stop_thermal_controller,
+ .get_thermal_temperature_range = vega20_get_thermal_temperature_range,
+ .register_irq_handlers = smu9_register_irq_handlers,
+ .disable_smc_firmware_ctf = vega20_thermal_disable_alert,
/* fan control related */
- .get_fan_speed_percent =
- vega20_fan_ctrl_get_fan_speed_percent,
- .set_fan_speed_percent =
- vega20_fan_ctrl_set_fan_speed_percent,
- .get_fan_speed_info =
- vega20_fan_ctrl_get_fan_speed_info,
- .get_fan_speed_rpm =
- vega20_fan_ctrl_get_fan_speed_rpm,
- .set_fan_speed_rpm =
- vega20_fan_ctrl_set_fan_speed_rpm,
- .get_fan_control_mode =
- vega20_get_fan_control_mode,
- .set_fan_control_mode =
- vega20_set_fan_control_mode,
+ .get_fan_speed_percent = vega20_fan_ctrl_get_fan_speed_percent,
+ .set_fan_speed_percent = vega20_fan_ctrl_set_fan_speed_percent,
+ .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info,
+ .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm,
+ .set_fan_speed_rpm = vega20_fan_ctrl_set_fan_speed_rpm,
+ .get_fan_control_mode = vega20_get_fan_control_mode,
+ .set_fan_control_mode = vega20_set_fan_control_mode,
/* smu memory related */
- .notify_cac_buffer_info =
- vega20_notify_cac_buffer_info,
- .enable_mgpu_fan_boost =
- vega20_enable_mgpu_fan_boost,
+ .notify_cac_buffer_info = vega20_notify_cac_buffer_info,
+ .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost,
};
int vega20_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 54fd0125d9cf..f4dab979a3a1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -463,5 +463,8 @@ extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr);
+
+extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count);
+
#endif /* _HARDWARE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index e5a60aa44b5d..0d298a0409f5 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -28,7 +28,6 @@
#include "hardwaremanager.h"
#include "hwmgr_ppt.h"
#include "ppatomctrl.h"
-#include "hwmgr_ppt.h"
#include "power_state.h"
#include "smu_helper.h"
@@ -310,7 +309,7 @@ struct pp_hwmgr_func {
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
- int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr,
uint32_t virtual_addr_low,
@@ -318,6 +317,9 @@ struct pp_hwmgr_func {
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size);
+ int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
+ bool enable,
+ bool lock);
int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -330,6 +332,8 @@ struct pp_hwmgr_func {
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate);
int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
+ int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
};
struct pp_table_func {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
index 65eb630bfea3..94bf7b649c20 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_common.h
@@ -40,10 +40,6 @@
#include "bif/bif_5_0_d.h"
#include "bif/bif_5_0_sh_mask.h"
-
-#include "bif/bif_5_0_d.h"
-#include "bif/bif_5_0_sh_mask.h"
-
#include "dce/dce_10_0_d.h"
#include "dce/dce_10_0_sh_mask.h"
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
index 62f36ba2435b..6e19f4c7cf8f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h
@@ -386,6 +386,8 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403)
#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404)
+#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415)
+
#define PPSMC_MSG_GFX_CU_PG_ENABLE ((uint16_t) 0x280)
#define PPSMC_MSG_GFX_CU_PG_DISABLE ((uint16_t) 0x281)
#define PPSMC_MSG_GetCurrPkgPwr ((uint16_t) 0x282)
@@ -395,6 +397,9 @@ typedef uint16_t PPSMC_Result;
#define PPSMC_MSG_SetVBITimeout ((uint16_t) 0x306)
+#define PPSMC_MSG_EnableFFC ((uint16_t) 0x307)
+#define PPSMC_MSG_DisableFFC ((uint16_t) 0x308)
+
#define PPSMC_MSG_EnableDpmDidt ((uint16_t) 0x309)
#define PPSMC_MSG_DisableDpmDidt ((uint16_t) 0x30A)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 872d3824337b..52abca065764 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -44,7 +44,6 @@
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
-#include "ppatomctrl.h"
#include "atombios.h"
#include "pppcielanes.h"
@@ -1529,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
efuse = efuse >> 24;
if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
+ if (hwmgr->is_kicker) {
+ min = 1200;
+ max = 2500;
+ } else {
+ min = 1000;
+ max = 2300;
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ if (hwmgr->is_kicker) {
+ min = 900;
+ max = 2100;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
} else {
min = 1100;
max = 2100;
@@ -1627,6 +1639,7 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
int result = 0;
@@ -1647,6 +1660,59 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
if (0 == result) {
+ if (((adev->pdev->device == 0x67ef) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe5))) ||
+ ((adev->pdev->device == 0x67ff) &&
+ ((adev->pdev->revision == 0xcf) ||
+ (adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ if ((adev->pdev->device == 0x67ef && adev->pdev->revision == 0xe5) ||
+ (adev->pdev->device == 0x67ff && adev->pdev->revision == 0xef)) {
+ if ((avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 == 0xEA522DD3) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 == 0x5645A) &&
+ (avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 == 0x33F9E) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 == 0xFFFFC5CC) &&
+ (avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 == 0x1B1A) &&
+ (avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b == 0xFFFFFCED)) {
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF718F1D4;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x323FD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x1E455;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x23;
+ }
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS12 && !hwmgr->is_kicker) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF6B024DD;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x3005E;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0x18A5F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0x315;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFED1;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x3B;
+ } else if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
+ ((adev->pdev->revision == 0xef) ||
+ (adev->pdev->revision == 0xff)))) {
+ avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage = 1;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0 = 0xF843B66B;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1 = 0x59CB5;
+ avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2 = 0xFFFF287F;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = 0;
+ avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2 = 0xFF23;
+ avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b = 0x58;
+ }
+ }
+
+ if (0 == result) {
table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
@@ -1985,6 +2051,12 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ /* Apply avfs cks-off voltages to avoid the overshoot
+ * when switching to the highest sclk frequency
+ */
+ if (data->apply_avfs_cks_off_voltage)
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d0eb8ab50148..d111dd4e03d7 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -29,7 +29,6 @@
#include "rv_ppsmc.h"
#include "smu10_driver_if.h"
#include "smu10.h"
-#include "ppatomctrl.h"
#include "pp_debug.h"
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 09b844ec3eab..e2787e14a500 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -61,9 +62,13 @@ static uint32_t smu8_get_argument(struct pp_hwmgr *hwmgr)
mmSMU_MP1_SRBM2P_ARG_0);
}
-static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
+/* Send a message to the SMC, and wait for its response.*/
+static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
{
int result = 0;
+ ktime_t t_start;
+ s64 elapsed_us;
if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
@@ -74,28 +79,31 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
/* Read the last message to SMU, to report actual cause */
uint32_t val = cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_MSG_0);
- pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
- pr_err("SMU still servicing msg (0x%04x)\n", val);
+ pr_err("%s(0x%04x) aborted; SMU still servicing msg (0x%04x)\n",
+ __func__, msg, val);
return result;
}
+ t_start = ktime_get();
+
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
- return 0;
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
+ SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+
+ elapsed_us = ktime_us_delta(ktime_get(), t_start);
+
+ WARN(result, "%s(0x%04x, %#x) timed out after %lld us\n",
+ __func__, msg, parameter, elapsed_us);
+
+ return result;
}
-/* Send a message to the SMC, and wait for its response.*/
static int smu8_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- int result = 0;
-
- result = smu8_send_msg_to_smc_async(hwmgr, msg);
- if (result != 0)
- return result;
-
- return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
- SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
+ return smu8_send_msg_to_smc_with_parameter(hwmgr, msg, 0);
}
static int smu8_set_smc_sram_address(struct pp_hwmgr *hwmgr,
@@ -135,17 +143,6 @@ static int smu8_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
return result;
}
-static int smu8_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
-{
- if (hwmgr == NULL || hwmgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
-
- return smu8_send_msg_to_smc(hwmgr, msg);
-}
-
static int smu8_check_fw_load_finish(struct pp_hwmgr *hwmgr,
uint32_t firmware)
{
@@ -737,6 +734,10 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr)
cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA);
+ pr_info("smu version %02d.%02d.%02d\n",
+ ((hwmgr->smu_version >> 16) & 0xFF),
+ ((hwmgr->smu_version >> 8) & 0xFF),
+ (hwmgr->smu_version & 0xFF));
adev->pm.fw_version = hwmgr->smu_version >> 8;
return smu8_request_smu_load_fw(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 99d5e4f98f49..a6edd5df33b0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -37,10 +37,13 @@ MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k2_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_smc.bin");
MODULE_FIRMWARE("amdgpu/vegam_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 9f71512b2510..1e69300f6175 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -40,7 +40,6 @@
#include "smu7_hwmgr.h"
#include "hardwaremanager.h"
-#include "ppatomctrl.h"
#include "atombios.h"
#include "pppcielanes.h"
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index e8fcf3ab1d9a..90ef76b19f8a 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -20,7 +20,6 @@
struct arcpgu_drm_private {
void __iomem *regs;
struct clk *clk;
- struct drm_fbdev_cma *fbdev;
struct drm_framebuffer *fb;
struct drm_crtc crtc;
struct drm_plane *plane;
@@ -43,8 +42,5 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
int arc_pgu_setup_crtc(struct drm_device *dev);
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
-struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int num_crtc,
- unsigned int max_conn_count);
#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 965cda48dc13..62f51f70606d 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -158,8 +158,6 @@ static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
- .mode_set = drm_helper_crtc_mode_set,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
.atomic_begin = arc_pgu_crtc_atomic_begin,
.atomic_enable = arc_pgu_crtc_atomic_enable,
@@ -186,7 +184,6 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
static void arc_pgu_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index f067de4e1e82..206a76abf771 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -25,16 +26,8 @@
#include "arcpgu.h"
#include "arcpgu_regs.h"
-static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
-{
- struct arcpgu_drm_private *arcpgu = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
-}
-
static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = arcpgu_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -51,13 +44,6 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
-static void arcpgu_lastclose(struct drm_device *drm)
-{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(arcpgu->fbdev);
-}
-
static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
@@ -113,27 +99,14 @@ static int arcpgu_load(struct drm_device *drm)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- arcpgu->fbdev = drm_fbdev_cma_init(drm, 16,
- drm->mode_config.num_connector);
- if (IS_ERR(arcpgu->fbdev)) {
- ret = PTR_ERR(arcpgu->fbdev);
- arcpgu->fbdev = NULL;
- return -ENODEV;
- }
-
platform_set_drvdata(pdev, drm);
return 0;
}
static int arcpgu_unload(struct drm_device *drm)
{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
-
- if (arcpgu->fbdev) {
- drm_fbdev_cma_fini(arcpgu->fbdev);
- arcpgu->fbdev = NULL;
- }
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
return 0;
@@ -167,7 +140,6 @@ static int arcpgu_debugfs_init(struct drm_minor *minor)
static struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = arcpgu_lastclose,
.name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
@@ -210,13 +182,15 @@ static int arcpgu_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
+ drm_fbdev_generic_setup(drm, 16);
+
return 0;
err_unload:
arcpgu_unload(drm);
err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -227,7 +201,7 @@ static int arcpgu_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
arcpgu_unload(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 7aad7dd80d8c..b9bed1138fa3 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -77,12 +77,18 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
- { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
+ { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
+ { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}
static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
};
+static const struct malidp_format_id malidp650_de_formats[] = {
+ MALIDP_COMMON_FORMATS,
+ { DRM_FORMAT_X0L0, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 4)},
+};
+
static const struct malidp_layer malidp500_layers[] = {
/* id, base address, fb pointer address base, stride offset,
* yuv2rgb matrix offset, mmu control register offset, rotation_features
@@ -630,6 +636,8 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
case DRM_FORMAT_BGR565:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_X0L0:
+ case DRM_FORMAT_X0L2:
bytes_per_col = 32;
break;
/* 16 lines at 1.5 bytes per pixel */
@@ -905,8 +913,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
MALIDP550_DC_IRQ_SE,
.vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
},
- .pixel_formats = malidp550_de_formats,
- .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
+ .pixel_formats = malidp650_de_formats,
+ .n_pixel_formats = ARRAY_SIZE(malidp650_de_formats),
.bus_align_bytes = 16,
},
.query_hw = malidp650_query_hw,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 837a24d56675..c9a6d3e0cada 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -398,6 +398,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_framebuffer *fb;
u16 pixel_alpha = state->pixel_blend_mode;
int i, ret;
+ unsigned int block_w, block_h;
if (!state->crtc || !state->fb)
return 0;
@@ -413,13 +414,26 @@ static int malidp_de_plane_check(struct drm_plane *plane,
ms->n_planes = fb->format->num_planes;
for (i = 0; i < ms->n_planes; i++) {
u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
- if (fb->pitches[i] & (alignment - 1)) {
+
+ if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
+ & (alignment - 1)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
}
}
+ block_w = drm_format_info_block_width(fb->format, 0);
+ block_h = drm_format_info_block_height(fb->format, 0);
+ if (fb->width % block_w || fb->height % block_h) {
+ DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
+ return -EINVAL;
+ }
+ if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) {
+ DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
+ return -EINVAL;
+ }
+
if ((state->crtc_w > mp->hwdev->max_line_size) ||
(state->crtc_h > mp->hwdev->max_line_size) ||
(state->crtc_w < mp->hwdev->min_line_size) ||
@@ -492,10 +506,18 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
num_strides = (mp->hwdev->hw->features &
MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
- for (i = 0; i < num_strides; ++i)
- malidp_hw_write(mp->hwdev, pitches[i],
+ /*
+ * The drm convention for pitch is that it needs to cover width * cpp,
+ * but our hardware wants the pitch/stride to cover all rows included
+ * in a tile.
+ */
+ for (i = 0; i < num_strides; ++i) {
+ unsigned int block_h = drm_format_info_block_height(mp->base.state->fb->format, i);
+
+ malidp_hw_write(mp->hwdev, pitches[i] * block_h,
mp->layer->base +
mp->layer->stride_offset + i * 4);
+ }
}
static const s16
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e6c4cd3dc50e..bfc65040dfcb 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -104,8 +104,6 @@ struct ast_private {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 0cd827e11fa2..de26df0c6044 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{
struct ast_framebuffer *afb = &afbdev->afb;
+ drm_crtc_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) {
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index dac355812adc..373700c05a00 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
- pci_iounmap(dev->pdev, ast->ioregs);
+ if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
+ pci_iounmap(dev->pdev, ast->ioregs);
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 7c6ac3cadb6b..8bb355d5d43d 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -973,9 +973,21 @@ static int get_clock(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
return val & 1 ? 1 : 0;
}
@@ -983,9 +995,21 @@ static int get_data(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
return val & 1 ? 1 : 0;
}
@@ -998,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
@@ -1014,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index fe354ebf374d..c168d62fe8f9 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -36,63 +36,6 @@ ast_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct ast_private, ttm.bdev);
}
-static int
-ast_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-ast_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int ast_ttm_global_init(struct ast_private *ast)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &ast->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &ast_ttm_mem_global_init;
- global_ref->release = &ast_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- ast->ttm.bo_global_ref.mem_glob =
- ast->ttm.mem_global_ref.object;
- global_ref = &ast->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- return r;
- }
- return 0;
-}
-
-static void
-ast_ttm_global_release(struct ast_private *ast)
-{
- if (ast->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- ast->ttm.mem_global_ref.release = NULL;
-}
-
-
static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct ast_bo *bo;
@@ -232,12 +175,7 @@ int ast_mm_init(struct ast_private *ast)
struct drm_device *dev = ast->dev;
struct ttm_bo_device *bdev = &ast->ttm.bdev;
- ret = ast_ttm_global_init(ast);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&ast->ttm.bdev,
- ast->ttm.bo_global_ref.ref.object,
&ast_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast)
ttm_bo_device_release(&ast->ttm.bdev);
- ast_ttm_global_release(ast);
-
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 9e34bce089d0..96f4082671fe 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -364,9 +364,7 @@ static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_valid = atmel_hlcdc_crtc_mode_valid,
- .mode_set = drm_helper_crtc_mode_set,
.mode_set_nofb = atmel_hlcdc_crtc_mode_set_nofb,
- .mode_set_base = drm_helper_crtc_mode_set_base,
.atomic_check = atmel_hlcdc_crtc_atomic_check,
.atomic_begin = atmel_hlcdc_crtc_atomic_begin,
.atomic_flush = atmel_hlcdc_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 843cac222e60..034a91112098 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -556,7 +556,6 @@ error:
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
};
@@ -658,8 +657,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
platform_set_drvdata(pdev, dev);
- drm_fb_cma_fbdev_init(dev, 24, 0);
-
drm_kms_helper_poll_init(dev);
return 0;
@@ -678,7 +675,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- drm_fb_cma_fbdev_fini(dev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_atomic_helper_shutdown(dev);
@@ -727,7 +723,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
@@ -763,19 +758,21 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
ret = atmel_hlcdc_dc_load(ddev);
if (ret)
- goto err_unref;
+ goto err_put;
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_unload;
+ drm_fbdev_generic_setup(ddev, 24);
+
return 0;
err_unload:
atmel_hlcdc_dc_unload(ddev);
-err_unref:
- drm_dev_unref(ddev);
+err_put:
+ drm_dev_put(ddev);
return ret;
}
@@ -786,7 +783,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
atmel_hlcdc_dc_unload(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index e7a69077e45a..fb38c8b857b5 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -66,6 +66,7 @@ struct bochs_device {
u16 yres_virtual;
u32 stride;
u32 bpp;
+ struct edid *edid;
/* drm */
struct drm_device *dev;
@@ -76,8 +77,6 @@ struct bochs_device {
/* ttm */
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
} ttm;
@@ -126,6 +125,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
const struct drm_format_info *format);
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, u64 addr);
+int bochs_hw_load_edid(struct bochs_device *bochs);
/* bochs_mm.c */
int bochs_mm_init(struct bochs_device *bochs);
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index cacff73a64ab..c90a0d492fd5 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -69,6 +69,35 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs)
#define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b)
#endif
+static int bochs_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct bochs_device *bochs = data;
+ size_t i, start = block * EDID_LENGTH;
+
+ if (start + len > 0x400 /* vga register offset */)
+ return -1;
+
+ for (i = 0; i < len; i++) {
+ buf[i] = readb(bochs->mmio + start + i);
+ }
+ return 0;
+}
+
+int bochs_hw_load_edid(struct bochs_device *bochs)
+{
+ if (!bochs->mmio)
+ return -1;
+
+ kfree(bochs->edid);
+ bochs->edid = drm_do_get_edid(&bochs->connector,
+ bochs_get_edid_block, bochs);
+ if (bochs->edid == NULL)
+ return -1;
+
+ return 0;
+}
+
int bochs_hw_init(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
@@ -164,6 +193,7 @@ void bochs_hw_fini(struct drm_device *dev)
if (bochs->fb_map)
iounmap(bochs->fb_map);
pci_release_regions(dev->pdev);
+ kfree(bochs->edid);
}
void bochs_hw_setmode(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9bc5b438aefd..f87c284dd93d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -213,10 +213,17 @@ static void bochs_encoder_init(struct drm_device *dev)
static int bochs_connector_get_modes(struct drm_connector *connector)
{
- int count;
+ struct bochs_device *bochs =
+ container_of(connector, struct bochs_device, connector);
+ int count = 0;
+
+ if (bochs->edid)
+ count = drm_add_edid_modes(connector, bochs->edid);
- count = drm_add_modes_noedid(connector, 8192, 8192);
- drm_set_preferred_mode(connector, defx, defy);
+ if (!count) {
+ count = drm_add_modes_noedid(connector, 8192, 8192);
+ drm_set_preferred_mode(connector, defx, defy);
+ }
return count;
}
@@ -271,6 +278,13 @@ static void bochs_connector_init(struct drm_device *dev)
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
drm_connector_register(connector);
+
+ bochs_hw_load_edid(bochs);
+ if (bochs->edid) {
+ DRM_INFO("Found EDID data blob.\n");
+ drm_connector_attach_edid_property(connector);
+ drm_connector_update_edid_property(connector, bochs->edid);
+ }
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index a61c1ecb2bdc..0980411e41bf 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -16,61 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct bochs_device, ttm.bdev);
}
-static int bochs_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void bochs_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int bochs_ttm_global_init(struct bochs_device *bochs)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &bochs->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &bochs_ttm_mem_global_init;
- global_ref->release = &bochs_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- bochs->ttm.bo_global_ref.mem_glob =
- bochs->ttm.mem_global_ref.object;
- global_ref = &bochs->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&bochs->ttm.mem_global_ref);
- return r;
- }
-
- return 0;
-}
-
-static void bochs_ttm_global_release(struct bochs_device *bochs)
-{
- if (bochs->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
- drm_global_item_unref(&bochs->ttm.mem_global_ref);
- bochs->ttm.mem_global_ref.release = NULL;
-}
-
-
static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct bochs_bo *bo;
@@ -208,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct ttm_bo_device *bdev = &bochs->ttm.bdev;
int ret;
- ret = bochs_ttm_global_init(bochs);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&bochs->ttm.bdev,
- bochs->ttm.bo_global_ref.ref.object,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -240,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs)
return;
ttm_bo_device_release(&bochs->ttm.bdev);
- bochs_ttm_global_release(bochs);
bochs->ttm.initialized = false;
}
@@ -414,7 +353,7 @@ int bochs_dumb_create(struct drm_file *file, struct drm_device *dev,
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -454,6 +393,6 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
bo = gem_to_bochs_bo(obj);
*offset = bochs_bo_mmap_offset(bo);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 9eeb8ef0b174..2fee47b0d50b 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -95,6 +95,7 @@ config DRM_SII902X
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select I2C_MUX
---help---
Silicon Image sii902x bridge chip driver.
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 2f21d3b6850b..753e96129ab7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1219,12 +1219,12 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
* plat_data->attch return, that's why we record the connector
* point after plat attached.
*/
- if (dp->plat_data->attach) {
- ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
- if (ret) {
- DRM_ERROR("Failed at platform attch func\n");
- return ret;
- }
+ if (dp->plat_data->attach) {
+ ret = dp->plat_data->attach(dp->plat_data, bridge, connector);
+ if (ret) {
+ DRM_ERROR("Failed at platform attach func\n");
+ return ret;
+ }
}
if (dp->plat_data->panel) {
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index e59a13542333..bfa902013aa4 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1,4 +1,6 @@
/*
+ * Copyright (C) 2018 Renesas Electronics
+ *
* Copyright (C) 2016 Atmel
* Bo Shen <voice.shen@atmel.com>
*
@@ -21,6 +23,7 @@
*/
#include <linux/gpio/consumer.h>
+#include <linux/i2c-mux.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -86,8 +89,49 @@ struct sii902x {
struct drm_bridge bridge;
struct drm_connector connector;
struct gpio_desc *reset_gpio;
+ struct i2c_mux_core *i2cmux;
};
+static int sii902x_read_unlocked(struct i2c_client *i2c, u8 reg, u8 *val)
+{
+ union i2c_smbus_data data;
+ int ret;
+
+ ret = __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &data);
+
+ if (ret < 0)
+ return ret;
+
+ *val = data.byte;
+ return 0;
+}
+
+static int sii902x_write_unlocked(struct i2c_client *i2c, u8 reg, u8 val)
+{
+ union i2c_smbus_data data;
+
+ data.byte = val;
+
+ return __i2c_smbus_xfer(i2c->adapter, i2c->addr, i2c->flags,
+ I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA,
+ &data);
+}
+
+static int sii902x_update_bits_unlocked(struct i2c_client *i2c, u8 reg, u8 mask,
+ u8 val)
+{
+ int ret;
+ u8 status;
+
+ ret = sii902x_read_unlocked(i2c, reg, &status);
+ if (ret)
+ return ret;
+ status &= ~mask;
+ status |= val & mask;
+ return sii902x_write_unlocked(i2c, reg, status);
+}
+
static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
{
return container_of(bridge, struct sii902x, bridge);
@@ -135,41 +179,11 @@ static const struct drm_connector_funcs sii902x_connector_funcs = {
static int sii902x_get_modes(struct drm_connector *connector)
{
struct sii902x *sii902x = connector_to_sii902x(connector);
- struct regmap *regmap = sii902x->regmap;
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- struct device *dev = &sii902x->i2c->dev;
- unsigned long timeout;
- unsigned int retries;
- unsigned int status;
struct edid *edid;
- int num = 0;
- int ret;
-
- ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_DDC_BUS_REQ,
- SII902X_SYS_CTRL_DDC_BUS_REQ);
- if (ret)
- return ret;
-
- timeout = jiffies +
- msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
- if (ret)
- return ret;
- } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
- time_before(jiffies, timeout));
+ int num = 0, ret;
- if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(dev, "failed to acquire the i2c bus\n");
- return -ETIMEDOUT;
- }
-
- ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
- if (ret)
- return ret;
-
- edid = drm_get_edid(connector, sii902x->i2c->adapter);
+ edid = drm_get_edid(connector, sii902x->i2cmux->adapter[0]);
drm_connector_update_edid_property(connector, edid);
if (edid) {
num = drm_add_edid_modes(connector, edid);
@@ -181,42 +195,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
if (ret)
return ret;
- /*
- * Sometimes the I2C bus can stall after failure to use the
- * EDID channel. Retry a few times to see if things clear
- * up, else continue anyway.
- */
- retries = 5;
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA,
- &status);
- retries--;
- } while (ret && retries);
- if (ret)
- dev_err(dev, "failed to read status (%d)\n", ret);
-
- ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
- if (ret)
- return ret;
-
- timeout = jiffies +
- msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
- do {
- ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
- if (ret)
- return ret;
- } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
- time_before(jiffies, timeout));
-
- if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
- SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
- dev_err(dev, "failed to release the i2c bus\n");
- return -ETIMEDOUT;
- }
-
return num;
}
@@ -366,6 +344,121 @@ static irqreturn_t sii902x_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * The purpose of sii902x_i2c_bypass_select is to enable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct sii902x *sii902x = i2c_mux_priv(mux);
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned long timeout;
+ u8 status;
+ int ret;
+
+ ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ,
+ SII902X_SYS_CTRL_DDC_BUS_REQ);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ if (ret)
+ return ret;
+ } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(dev, "Failed to acquire the i2c bus\n");
+ return -ETIMEDOUT;
+ }
+
+ return sii902x_write_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ status);
+}
+
+/*
+ * The purpose of sii902x_i2c_bypass_deselect is to disable the pass through
+ * mode of the HDMI transmitter. Do not use regmap from within this function,
+ * only use sii902x_*_unlocked functions to read/modify/write registers.
+ * We are holding the parent adapter lock here, keep this in mind before
+ * adding more i2c transactions.
+ *
+ * Also, since SII902X_SYS_CTRL_DATA is used with regmap_update_bits elsewhere
+ * in this driver, we need to make sure that we only touch 0x1A[2:1] from
+ * within sii902x_i2c_bypass_select and sii902x_i2c_bypass_deselect, and that
+ * we leave the remaining bits as we have found them.
+ */
+static int sii902x_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
+{
+ struct sii902x *sii902x = i2c_mux_priv(mux);
+ struct device *dev = &sii902x->i2c->dev;
+ unsigned long timeout;
+ unsigned int retries;
+ u8 status;
+ int ret;
+
+ /*
+ * When the HDMI transmitter is in pass through mode, we need an
+ * (undocumented) additional delay between STOP and START conditions
+ * to guarantee the bus won't get stuck.
+ */
+ udelay(30);
+
+ /*
+ * Sometimes the I2C bus can stall after failure to use the
+ * EDID channel. Retry a few times to see if things clear
+ * up, else continue anyway.
+ */
+ retries = 5;
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ retries--;
+ } while (ret && retries);
+ if (ret) {
+ dev_err(dev, "failed to read status (%d)\n", ret);
+ return ret;
+ }
+
+ ret = sii902x_update_bits_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
+ if (ret)
+ return ret;
+
+ timeout = jiffies +
+ msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
+ do {
+ ret = sii902x_read_unlocked(sii902x->i2c, SII902X_SYS_CTRL_DATA,
+ &status);
+ if (ret)
+ return ret;
+ } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
+ time_before(jiffies, timeout));
+
+ if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
+ SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
+ dev_err(dev, "failed to release the i2c bus\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int sii902x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -375,6 +468,13 @@ static int sii902x_probe(struct i2c_client *client,
u8 chipid[4];
int ret;
+ ret = i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA);
+ if (!ret) {
+ dev_err(dev, "I2C adapter not suitable\n");
+ return -EIO;
+ }
+
sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
if (!sii902x)
return -ENOMEM;
@@ -433,7 +533,15 @@ static int sii902x_probe(struct i2c_client *client,
i2c_set_clientdata(client, sii902x);
- return 0;
+ sii902x->i2cmux = i2c_mux_alloc(client->adapter, dev,
+ 1, 0, I2C_MUX_GATE,
+ sii902x_i2c_bypass_select,
+ sii902x_i2c_bypass_deselect);
+ if (!sii902x->i2cmux)
+ return -ENOMEM;
+
+ sii902x->i2cmux->priv = sii902x;
+ return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
static int sii902x_remove(struct i2c_client *client)
@@ -441,6 +549,7 @@ static int sii902x_remove(struct i2c_client *client)
{
struct sii902x *sii902x = i2c_get_clientdata(client);
+ i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 5971976284bf..64c3cf027518 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1664,6 +1664,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
case 0x131a:
case 0x132a:
case 0x201a:
+ case 0x212a:
count = 1;
break;
default:
@@ -1957,7 +1958,6 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
.get_modes = dw_hdmi_connector_get_modes,
- .best_encoder = drm_atomic_helper_best_encoder,
};
static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
@@ -2205,7 +2205,9 @@ static int dw_hdmi_detect_phy(struct dw_hdmi *hdmi)
unsigned int i;
u8 phy_type;
- phy_type = hdmi_readb(hdmi, HDMI_CONFIG2_ID);
+ phy_type = hdmi->plat_data->phy_force_vendor ?
+ DW_HDMI_PHY_VENDOR_PHY :
+ hdmi_readb(hdmi, HDMI_CONFIG2_ID);
if (phy_type == DW_HDMI_PHY_VENDOR_PHY) {
/* Vendor PHYs require support from the glue layer. */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index fd7999642cf8..2f4b145b73af 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -230,10 +230,21 @@ struct dw_mipi_dsi {
u32 format;
unsigned long mode_flags;
+ struct dw_mipi_dsi *master; /* dual-dsi master ptr */
+ struct dw_mipi_dsi *slave; /* dual-dsi slave ptr */
+
const struct dw_mipi_dsi_plat_data *plat_data;
};
/*
+ * Check if either a link to a master or slave is present
+ */
+static inline bool dw_mipi_is_dual_mode(struct dw_mipi_dsi *dsi)
+{
+ return dsi->slave || dsi->master;
+}
+
+/*
* The controller should generate 2 frames before
* preparing the peripheral.
*/
@@ -270,6 +281,7 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
struct drm_bridge *bridge;
struct drm_panel *panel;
int ret;
@@ -300,6 +312,12 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
drm_bridge_add(&dsi->bridge);
+ if (pdata->host_ops && pdata->host_ops->attach) {
+ ret = pdata->host_ops->attach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
@@ -307,6 +325,14 @@ static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct dw_mipi_dsi *dsi = host_to_dsi(host);
+ const struct dw_mipi_dsi_plat_data *pdata = dsi->plat_data;
+ int ret;
+
+ if (pdata->host_ops && pdata->host_ops->detach) {
+ ret = pdata->host_ops->detach(pdata->priv_data, device);
+ if (ret < 0)
+ return ret;
+ }
drm_of_panel_bridge_remove(host->dev->of_node, 1, 0);
@@ -441,10 +467,17 @@ static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
}
dw_mipi_message_config(dsi, msg);
+ if (dsi->slave)
+ dw_mipi_message_config(dsi->slave, msg);
ret = dw_mipi_dsi_write(dsi, &packet);
if (ret)
return ret;
+ if (dsi->slave) {
+ ret = dw_mipi_dsi_write(dsi->slave, &packet);
+ if (ret)
+ return ret;
+ }
if (msg->rx_buf && msg->rx_len) {
ret = dw_mipi_dsi_read(dsi, msg);
@@ -583,7 +616,11 @@ static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
* DSI_VNPCR.NPSIZE... especially because this driver supports
* non-burst video modes, see dw_mipi_dsi_video_mode_config()...
*/
- dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
+
+ dsi_write(dsi, DSI_VID_PKT_SIZE,
+ dw_mipi_is_dual_mode(dsi) ?
+ VID_PKT_SIZE(mode->hdisplay / 2) :
+ VID_PKT_SIZE(mode->hdisplay));
}
static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
@@ -755,24 +792,43 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
*/
dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (dsi->slave) {
+ dw_mipi_dsi_disable(dsi->slave);
+ clk_disable_unprepare(dsi->slave->pclk);
+ pm_runtime_put(dsi->slave->dev);
+ }
dw_mipi_dsi_disable(dsi);
+
clk_disable_unprepare(dsi->pclk);
pm_runtime_put(dsi->dev);
}
-static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static unsigned int dw_mipi_dsi_get_lanes(struct dw_mipi_dsi *dsi)
+{
+ /* this instance is the slave, so add the master's lanes */
+ if (dsi->master)
+ return dsi->master->lanes + dsi->lanes;
+
+ /* this instance is the master, so add the slave's lanes */
+ if (dsi->slave)
+ return dsi->lanes + dsi->slave->lanes;
+
+ /* single-dsi, so no other instance to consider */
+ return dsi->lanes;
+}
+
+static void dw_mipi_dsi_mode_set(struct dw_mipi_dsi *dsi,
+ struct drm_display_mode *adjusted_mode)
{
- struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
void *priv_data = dsi->plat_data->priv_data;
int ret;
+ u32 lanes = dw_mipi_dsi_get_lanes(dsi);
clk_prepare_enable(dsi->pclk);
ret = phy_ops->get_lane_mbps(priv_data, adjusted_mode, dsi->mode_flags,
- dsi->lanes, dsi->format, &dsi->lane_mbps);
+ lanes, dsi->format, &dsi->lane_mbps);
if (ret)
DRM_DEBUG_DRIVER("Phy get_lane_mbps() failed\n");
@@ -804,12 +860,25 @@ static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
dw_mipi_dsi_set_mode(dsi, 0);
}
+static void dw_mipi_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
+
+ dw_mipi_dsi_mode_set(dsi, adjusted_mode);
+ if (dsi->slave)
+ dw_mipi_dsi_mode_set(dsi->slave, adjusted_mode);
+}
+
static void dw_mipi_dsi_bridge_enable(struct drm_bridge *bridge)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
/* Switch to video mode for panel-bridge enable & panel enable */
dw_mipi_dsi_set_mode(dsi, MIPI_DSI_MODE_VIDEO);
+ if (dsi->slave)
+ dw_mipi_dsi_set_mode(dsi->slave, MIPI_DSI_MODE_VIDEO);
}
static enum drm_mode_status
@@ -941,9 +1010,25 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
static void __dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
{
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+
pm_runtime_disable(dsi->dev);
}
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave)
+{
+ /* introduce controllers to each other */
+ dsi->slave = slave;
+ dsi->slave->master = dsi;
+
+ /* migrate settings for already attached displays */
+ dsi->slave->lanes = dsi->lanes;
+ dsi->slave->channel = dsi->channel;
+ dsi->slave->format = dsi->format;
+ dsi->slave->mode_flags = dsi->mode_flags;
+}
+EXPORT_SYMBOL_GPL(dw_mipi_dsi_set_slave);
+
/*
* Probe/remove API, used from platforms based on the DRM bridge API.
*/
@@ -957,8 +1042,6 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_probe);
void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi)
{
- mipi_dsi_host_unregister(&dsi->dsi_host);
-
__dw_mipi_dsi_remove(dsi);
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
@@ -966,31 +1049,22 @@ EXPORT_SYMBOL_GPL(dw_mipi_dsi_remove);
/*
* Bind/unbind API, used from platforms based on the component framework.
*/
-struct dw_mipi_dsi *
-dw_mipi_dsi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data *plat_data)
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
{
- struct dw_mipi_dsi *dsi;
int ret;
- dsi = __dw_mipi_dsi_probe(pdev, plat_data);
- if (IS_ERR(dsi))
- return dsi;
-
ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
if (ret) {
- dw_mipi_dsi_remove(dsi);
DRM_ERROR("Failed to initialize bridge with drm\n");
- return ERR_PTR(ret);
+ return ret;
}
- return dsi;
+ return ret;
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_bind);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi)
{
- __dw_mipi_dsi_remove(dsi);
}
EXPORT_SYMBOL_GPL(dw_mipi_dsi_unbind);
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index ee6b98efa9c2..afd491018bfc 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -379,7 +379,7 @@ static void tc358764_detach(struct drm_bridge *bridge)
drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
- drm_connector_unreference(&ctx->connector);
+ drm_connector_put(&ctx->connector);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 680566d97adc..10243965ee7c 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -54,7 +54,7 @@
#define SN_AUX_ADDR_7_0_REG 0x76
#define SN_AUX_LENGTH_REG 0x77
#define SN_AUX_CMD_REG 0x78
-#define AUX_CMD_SEND BIT(1)
+#define AUX_CMD_SEND BIT(0)
#define AUX_CMD_REQ(x) ((x) << 4)
#define SN_AUX_RDATA_REG(x) (0x79 + (x))
#define SN_SSC_CONFIG_REG 0x93
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index a29f87e98d9d..f2b2e0d169fa 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -136,8 +136,6 @@ struct cirrus_device {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
bool mm_inited;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 68ab1821e15b..4dd499c7d1ba 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -169,7 +169,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
struct drm_mode_fb_cmd2 mode_cmd;
void *sysram;
struct drm_gem_object *gobj = NULL;
- struct cirrus_bo *bo = NULL;
int size, ret;
mode_cmd.width = sizes->surface_width;
@@ -185,8 +184,6 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
return ret;
}
- bo = gem_to_cirrus_bo(gobj);
-
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f21953243790..e075810b4bd4 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -36,63 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct cirrus_device, ttm.bdev);
}
-static int
-cirrus_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-cirrus_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &cirrus->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &cirrus_ttm_mem_global_init;
- global_ref->release = &cirrus_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- cirrus->ttm.bo_global_ref.mem_glob =
- cirrus->ttm.mem_global_ref.object;
- global_ref = &cirrus->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&cirrus->ttm.mem_global_ref);
- return r;
- }
- return 0;
-}
-
-static void
-cirrus_ttm_global_release(struct cirrus_device *cirrus)
-{
- if (cirrus->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
- drm_global_item_unref(&cirrus->ttm.mem_global_ref);
- cirrus->ttm.mem_global_ref.release = NULL;
-}
-
-
static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct cirrus_bo *bo;
@@ -232,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
struct drm_device *dev = cirrus->dev;
struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
- ret = cirrus_ttm_global_init(cirrus);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
- cirrus->ttm.bo_global_ref.ref.object,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -273,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
ttm_bo_device_release(&cirrus->ttm.bdev);
- cirrus_ttm_global_release(cirrus);
-
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3dbfbddae7e6..48ec378fb27e 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -315,9 +315,11 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
-static int drm_atomic_crtc_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
+ const struct drm_crtc_state *new_crtc_state)
{
+ struct drm_crtc *crtc = new_crtc_state->crtc;
+
/* NOTE: we explicitly don't enforce constraints such as primary
* layer covering entire screen, since that is something we want
* to allow (on hw that supports it). For hw that does not, it
@@ -326,7 +328,7 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* TODO: Add generic modeset state checks once we support those.
*/
- if (state->active && !state->enable) {
+ if (new_crtc_state->active && !new_crtc_state->enable) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] active without enabled\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -336,14 +338,14 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* as this is a kernel-internal detail that userspace should never
* be able to trigger. */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
- WARN_ON(state->enable && !state->mode_blob)) {
+ WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
- WARN_ON(!state->enable && state->mode_blob)) {
+ WARN_ON(!new_crtc_state->enable && new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] disabled with mode blob\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -359,7 +361,8 @@ static int drm_atomic_crtc_check(struct drm_crtc *crtc,
* and legacy page_flip IOCTL which also reject service on a disabled
* pipe.
*/
- if (state->event && !state->active && !crtc->state->active) {
+ if (new_crtc_state->event &&
+ !new_crtc_state->active && !old_crtc_state->active) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requesting event but off\n",
crtc->base.id, crtc->name);
return -EINVAL;
@@ -395,6 +398,11 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
{
struct drm_crtc_state *crtc_state;
struct drm_writeback_job *writeback_job = state->writeback_job;
+ const struct drm_display_info *info = &connector->display_info;
+
+ state->max_bpc = info->bpc ? info->bpc : 8;
+ if (connector->max_bpc_property)
+ state->max_bpc = min(state->max_bpc, state->max_requested_bpc);
if ((connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) || !writeback_job)
return 0;
@@ -489,14 +497,13 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_plane_state);
static bool
-plane_switching_crtc(struct drm_atomic_state *state,
- struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+plane_switching_crtc(const struct drm_plane_state *old_plane_state,
+ const struct drm_plane_state *new_plane_state)
{
- if (!plane->state->crtc || !plane_state->crtc)
+ if (!old_plane_state->crtc || !new_plane_state->crtc)
return false;
- if (plane->state->crtc == plane_state->crtc)
+ if (old_plane_state->crtc == new_plane_state->crtc)
return false;
/* This could be refined, but currently there's no helper or driver code
@@ -509,88 +516,117 @@ plane_switching_crtc(struct drm_atomic_state *state,
/**
* drm_atomic_plane_check - check plane state
- * @plane: plane to check
- * @state: plane state to check
+ * @old_plane_state: old plane state to check
+ * @new_plane_state: new plane state to check
*
* Provides core sanity checks for plane state.
*
* RETURNS:
* Zero on success, error code on failure
*/
-static int drm_atomic_plane_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
+ const struct drm_plane_state *new_plane_state)
{
+ struct drm_plane *plane = new_plane_state->plane;
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ const struct drm_framebuffer *fb = new_plane_state->fb;
unsigned int fb_width, fb_height;
+ struct drm_mode_rect *clips;
+ uint32_t num_clips;
int ret;
/* either *both* CRTC and FB must be set, or neither */
- if (state->crtc && !state->fb) {
+ if (crtc && !fb) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] CRTC set but no FB\n",
plane->base.id, plane->name);
return -EINVAL;
- } else if (state->fb && !state->crtc) {
+ } else if (fb && !crtc) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] FB set but no CRTC\n",
plane->base.id, plane->name);
return -EINVAL;
}
/* if disabled, we don't care about the rest of the state: */
- if (!state->crtc)
+ if (!crtc)
return 0;
/* Check whether this plane is usable on this CRTC */
- if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) {
DRM_DEBUG_ATOMIC("Invalid [CRTC:%d:%s] for [PLANE:%d:%s]\n",
- state->crtc->base.id, state->crtc->name,
+ crtc->base.id, crtc->name,
plane->base.id, plane->name);
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
- ret = drm_plane_check_pixel_format(plane, state->fb->format->format,
- state->fb->modifier);
+ ret = drm_plane_check_pixel_format(plane, fb->format->format,
+ fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
plane->base.id, plane->name,
- drm_get_format_name(state->fb->format->format,
+ drm_get_format_name(fb->format->format,
&format_name),
- state->fb->modifier);
+ fb->modifier);
return ret;
}
/* Give drivers some help against integer overflows */
- if (state->crtc_w > INT_MAX ||
- state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
- state->crtc_h > INT_MAX ||
- state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
+ if (new_plane_state->crtc_w > INT_MAX ||
+ new_plane_state->crtc_x > INT_MAX - (int32_t) new_plane_state->crtc_w ||
+ new_plane_state->crtc_h > INT_MAX ||
+ new_plane_state->crtc_y > INT_MAX - (int32_t) new_plane_state->crtc_h) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid CRTC coordinates %ux%u+%d+%d\n",
plane->base.id, plane->name,
- state->crtc_w, state->crtc_h,
- state->crtc_x, state->crtc_y);
+ new_plane_state->crtc_w, new_plane_state->crtc_h,
+ new_plane_state->crtc_x, new_plane_state->crtc_y);
return -ERANGE;
}
- fb_width = state->fb->width << 16;
- fb_height = state->fb->height << 16;
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
/* Make sure source coordinates are inside the fb. */
- if (state->src_w > fb_width ||
- state->src_x > fb_width - state->src_w ||
- state->src_h > fb_height ||
- state->src_y > fb_height - state->src_h) {
+ if (new_plane_state->src_w > fb_width ||
+ new_plane_state->src_x > fb_width - new_plane_state->src_w ||
+ new_plane_state->src_h > fb_height ||
+ new_plane_state->src_y > fb_height - new_plane_state->src_h) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u (fb %ux%u)\n",
plane->base.id, plane->name,
- state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
- state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
- state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
- state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10,
- state->fb->width, state->fb->height);
+ new_plane_state->src_w >> 16,
+ ((new_plane_state->src_w & 0xffff) * 15625) >> 10,
+ new_plane_state->src_h >> 16,
+ ((new_plane_state->src_h & 0xffff) * 15625) >> 10,
+ new_plane_state->src_x >> 16,
+ ((new_plane_state->src_x & 0xffff) * 15625) >> 10,
+ new_plane_state->src_y >> 16,
+ ((new_plane_state->src_y & 0xffff) * 15625) >> 10,
+ fb->width, fb->height);
return -ENOSPC;
}
- if (plane_switching_crtc(state->state, plane, state)) {
+ clips = drm_plane_get_damage_clips(new_plane_state);
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+
+ /* Make sure damage clips are valid and inside the fb. */
+ while (num_clips > 0) {
+ if (clips->x1 >= clips->x2 ||
+ clips->y1 >= clips->y2 ||
+ clips->x1 < 0 ||
+ clips->y1 < 0 ||
+ clips->x2 > fb_width ||
+ clips->y2 > fb_height) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid damage clip %d %d %d %d\n",
+ plane->base.id, plane->name, clips->x1,
+ clips->y1, clips->x2, clips->y2);
+ return -EINVAL;
+ }
+ clips++;
+ num_clips--;
+ }
+
+ if (plane_switching_crtc(old_plane_state, new_plane_state)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] switching CRTC directly\n",
plane->base.id, plane->name);
return -EINVAL;
@@ -927,6 +963,8 @@ int
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
+ const struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
struct drm_plane *plane;
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
@@ -934,7 +972,7 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
DRM_DEBUG_ATOMIC("Adding all current planes for [CRTC:%d:%s] to %p\n",
crtc->base.id, crtc->name, state);
- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ drm_for_each_plane_mask(plane, state->dev, old_crtc_state->plane_mask) {
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
@@ -961,17 +999,19 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
struct drm_plane *plane;
- struct drm_plane_state *plane_state;
+ struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *new_plane_state;
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
- for_each_new_plane_in_state(state, plane, plane_state, i) {
- ret = drm_atomic_plane_check(plane, plane_state);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic core check failed\n",
plane->base.id, plane->name);
@@ -979,8 +1019,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- ret = drm_atomic_crtc_check(crtc, crtc_state);
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = drm_atomic_crtc_check(old_crtc_state, new_crtc_state);
if (ret) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic core check failed\n",
crtc->base.id, crtc->name);
@@ -1008,8 +1048,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
if (!state->allow_modeset) {
- for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] requires full modeset\n",
crtc->base.id, crtc->name);
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d8b526b7932c..54e2ae614dcc 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_damage_helper.h>
#include <linux/dma-fence.h>
#include "drm_crtc_helper_internal.h"
@@ -92,6 +93,17 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
}
}
+/*
+ * For connectors that support multiple encoders, either the
+ * .atomic_best_encoder() or .best_encoder() operation must be implemented.
+ */
+static struct drm_encoder *
+pick_single_encoder_for_connector(struct drm_connector *connector)
+{
+ WARN_ON(connector->encoder_ids[1]);
+ return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
+}
+
static int handle_conflicting_encoders(struct drm_atomic_state *state,
bool disable_conflicting_encoders)
{
@@ -119,7 +131,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = drm_atomic_helper_best_encoder(connector);
+ new_encoder = pick_single_encoder_for_connector(connector);
if (new_encoder) {
if (encoder_mask & drm_encoder_mask(new_encoder)) {
@@ -336,7 +348,7 @@ update_connector_routing(struct drm_atomic_state *state,
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
- new_encoder = drm_atomic_helper_best_encoder(connector);
+ new_encoder = pick_single_encoder_for_connector(connector);
if (!new_encoder) {
DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -658,6 +670,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
if (old_connector_state->link_status !=
new_connector_state->link_status)
new_crtc_state->connectors_changed = true;
+
+ if (old_connector_state->max_requested_bpc !=
+ new_connector_state->max_requested_bpc)
+ new_crtc_state->connectors_changed = true;
}
if (funcs->atomic_check)
@@ -847,6 +863,8 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
drm_atomic_helper_plane_changed(state, old_plane_state, new_plane_state, plane);
+ drm_atomic_helper_check_plane_damage(state, new_plane_state);
+
if (!funcs || !funcs->atomic_check)
continue;
@@ -1445,6 +1463,9 @@ void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
crtc->base.id, crtc->name);
}
+
+ if (old_state->fake_commit)
+ complete_all(&old_state->fake_commit->flip_done);
}
EXPORT_SYMBOL(drm_atomic_helper_wait_for_flip_done);
@@ -2202,8 +2223,10 @@ void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *old_state)
spin_unlock(&crtc->commit_lock);
}
- if (old_state->fake_commit)
+ if (old_state->fake_commit) {
complete_all(&old_state->fake_commit->cleanup_done);
+ WARN_ON(!try_wait_for_completion(&old_state->fake_commit->hw_done));
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
@@ -3108,27 +3131,104 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, 0);
- while (1) {
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (!ret)
- ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
-
- if (ret != -EDEADLK)
- break;
-
- drm_modeset_backoff(&ctx);
- }
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
+ ret = __drm_atomic_helper_disable_all(dev, &ctx, true);
if (ret)
DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
}
EXPORT_SYMBOL(drm_atomic_helper_shutdown);
/**
+ * drm_atomic_helper_duplicate_state - duplicate an atomic state object
+ * @dev: DRM device
+ * @ctx: lock acquisition context
+ *
+ * Makes a copy of the current atomic state by looping over all objects and
+ * duplicating their respective states. This is used for example by suspend/
+ * resume support code to save the state prior to suspend such that it can
+ * be restored upon resume.
+ *
+ * Note that this treats atomic state as persistent between save and restore.
+ * Drivers must make sure that this is possible and won't result in confusion
+ * or erroneous behaviour.
+ *
+ * Note that if callers haven't already acquired all modeset locks this might
+ * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
+ *
+ * Returns:
+ * A pointer to the copy of the atomic state object on success or an
+ * ERR_PTR()-encoded error code on failure.
+ *
+ * See also:
+ * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
+ */
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ struct drm_atomic_state *state;
+ struct drm_connector *conn;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ int err = 0;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ state->acquire_ctx = ctx;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto free;
+ }
+ }
+
+ drm_for_each_plane(plane, dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ err = PTR_ERR(plane_state);
+ goto free;
+ }
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
+ struct drm_connector_state *conn_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (IS_ERR(conn_state)) {
+ err = PTR_ERR(conn_state);
+ drm_connector_list_iter_end(&conn_iter);
+ goto free;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ /* clear the acquire context so that it isn't accidentally reused */
+ state->acquire_ctx = NULL;
+
+free:
+ if (err < 0) {
+ drm_atomic_state_put(state);
+ state = ERR_PTR(err);
+ }
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
+
+/**
* drm_atomic_helper_suspend - subsystem-level suspend helper
* @dev: DRM device
*
@@ -3159,14 +3259,10 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
struct drm_atomic_state *state;
int err;
- drm_modeset_acquire_init(&ctx, 0);
+ /* This can never be returned, but it makes the compiler happy */
+ state = ERR_PTR(-EINVAL);
-retry:
- err = drm_modeset_lock_all_ctx(dev, &ctx);
- if (err < 0) {
- state = ERR_PTR(err);
- goto unlock;
- }
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
state = drm_atomic_helper_duplicate_state(dev, &ctx);
if (IS_ERR(state))
@@ -3180,13 +3276,10 @@ retry:
}
unlock:
- if (PTR_ERR(state) == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
+ DRM_MODESET_LOCK_ALL_END(ctx, err);
+ if (err)
+ return ERR_PTR(err);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
return state;
}
EXPORT_SYMBOL(drm_atomic_helper_suspend);
@@ -3209,7 +3302,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- int i;
+ int i, ret;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct drm_connector *connector;
@@ -3228,7 +3321,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
- return drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+
+ state->acquire_ctx = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
@@ -3256,23 +3353,12 @@ int drm_atomic_helper_resume(struct drm_device *dev,
drm_mode_config_reset(dev);
- drm_modeset_acquire_init(&ctx, 0);
- while (1) {
- err = drm_modeset_lock_all_ctx(dev, &ctx);
- if (err)
- goto out;
-
- err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
-out:
- if (err != -EDEADLK)
- break;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err);
- drm_modeset_backoff(&ctx);
- }
+ err = drm_atomic_helper_commit_duplicated_state(state, &ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, err);
drm_atomic_state_put(state);
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
return err;
}
@@ -3413,504 +3499,6 @@ fail:
EXPORT_SYMBOL(drm_atomic_helper_page_flip_target);
/**
- * drm_atomic_helper_best_encoder - Helper for
- * &drm_connector_helper_funcs.best_encoder callback
- * @connector: Connector control structure
- *
- * This is a &drm_connector_helper_funcs.best_encoder callback helper for
- * connectors that support exactly 1 encoder, statically determined at driver
- * init time.
- */
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector)
-{
- WARN_ON(connector->encoder_ids[1]);
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-EXPORT_SYMBOL(drm_atomic_helper_best_encoder);
-
-/**
- * DOC: atomic state reset and initialization
- *
- * Both the drm core and the atomic helpers assume that there is always the full
- * and correct atomic software state for all connectors, CRTCs and planes
- * available. Which is a bit a problem on driver load and also after system
- * suspend. One way to solve this is to have a hardware state read-out
- * infrastructure which reconstructs the full software state (e.g. the i915
- * driver).
- *
- * The simpler solution is to just reset the software state to everything off,
- * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
- * the atomic helpers provide default reset implementations for all hooks.
- *
- * On the upside the precise state tracking of atomic simplifies system suspend
- * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
- * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
- * For other drivers the building blocks are split out, see the documentation
- * for these functions.
- */
-
-/**
- * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
- * @crtc: drm CRTC
- *
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
-{
- if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
-
- kfree(crtc->state);
- crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
-
- if (crtc->state)
- crtc->state->crtc = crtc;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
-
-/**
- * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
- * @crtc: CRTC object
- * @state: atomic CRTC state
- *
- * Copies atomic state from a CRTC's current state and resets inferred values.
- * This is useful for drivers that subclass the CRTC state.
- */
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- memcpy(state, crtc->state, sizeof(*state));
-
- if (state->mode_blob)
- drm_property_blob_get(state->mode_blob);
- if (state->degamma_lut)
- drm_property_blob_get(state->degamma_lut);
- if (state->ctm)
- drm_property_blob_get(state->ctm);
- if (state->gamma_lut)
- drm_property_blob_get(state->gamma_lut);
- state->mode_changed = false;
- state->active_changed = false;
- state->planes_changed = false;
- state->connectors_changed = false;
- state->color_mgmt_changed = false;
- state->zpos_changed = false;
- state->commit = NULL;
- state->event = NULL;
- state->pageflip_flags = 0;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
-
-/**
- * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
- * @crtc: drm CRTC
- *
- * Default CRTC state duplicate hook for drivers which don't have their own
- * subclassed CRTC state structure.
- */
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
-{
- struct drm_crtc_state *state;
-
- if (WARN_ON(!crtc->state))
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_crtc_duplicate_state(crtc, state);
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
-
-/**
- * __drm_atomic_helper_crtc_destroy_state - release CRTC state
- * @state: CRTC state object to release
- *
- * Releases all resources stored in the CRTC state without actually freeing
- * the memory of the CRTC state. This is useful for drivers that subclass the
- * CRTC state.
- */
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
-{
- if (state->commit) {
- /*
- * In the event that a non-blocking commit returns
- * -ERESTARTSYS before the commit_tail work is queued, we will
- * have an extra reference to the commit object. Release it, if
- * the event has not been consumed by the worker.
- *
- * state->event may be freed, so we can't directly look at
- * state->event->base.completion.
- */
- if (state->event && state->commit->abort_completion)
- drm_crtc_commit_put(state->commit);
-
- kfree(state->commit->event);
- state->commit->event = NULL;
-
- drm_crtc_commit_put(state->commit);
- }
-
- drm_property_blob_put(state->mode_blob);
- drm_property_blob_put(state->degamma_lut);
- drm_property_blob_put(state->ctm);
- drm_property_blob_put(state->gamma_lut);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
-
-/**
- * drm_atomic_helper_crtc_destroy_state - default state destroy hook
- * @crtc: drm CRTC
- * @state: CRTC state object to release
- *
- * Default CRTC state destroy hook for drivers which don't have their own
- * subclassed CRTC state structure.
- */
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- __drm_atomic_helper_crtc_destroy_state(state);
- kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
-
-/**
- * __drm_atomic_helper_plane_reset - resets planes state to default values
- * @plane: plane object, must not be NULL
- * @state: atomic plane state, must not be NULL
- *
- * Initializes plane state to default. This is useful for drivers that subclass
- * the plane state.
- */
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- state->plane = plane;
- state->rotation = DRM_MODE_ROTATE_0;
-
- state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
-
- plane->state = state;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
-
-/**
- * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
- * @plane: drm plane
- *
- * Resets the atomic state for @plane by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- */
-void drm_atomic_helper_plane_reset(struct drm_plane *plane)
-{
- if (plane->state)
- __drm_atomic_helper_plane_destroy_state(plane->state);
-
- kfree(plane->state);
- plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
- if (plane->state)
- __drm_atomic_helper_plane_reset(plane, plane->state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
-
-/**
- * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
- * @plane: plane object
- * @state: atomic plane state
- *
- * Copies atomic state from a plane's current state. This is useful for
- * drivers that subclass the plane state.
- */
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- memcpy(state, plane->state, sizeof(*state));
-
- if (state->fb)
- drm_framebuffer_get(state->fb);
-
- state->fence = NULL;
- state->commit = NULL;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
-
-/**
- * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
- * @plane: drm plane
- *
- * Default plane state duplicate hook for drivers which don't have their own
- * subclassed plane state structure.
- */
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
-{
- struct drm_plane_state *state;
-
- if (WARN_ON(!plane->state))
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_plane_duplicate_state(plane, state);
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
-
-/**
- * __drm_atomic_helper_plane_destroy_state - release plane state
- * @state: plane state object to release
- *
- * Releases all resources stored in the plane state without actually freeing
- * the memory of the plane state. This is useful for drivers that subclass the
- * plane state.
- */
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
-{
- if (state->fb)
- drm_framebuffer_put(state->fb);
-
- if (state->fence)
- dma_fence_put(state->fence);
-
- if (state->commit)
- drm_crtc_commit_put(state->commit);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
-
-/**
- * drm_atomic_helper_plane_destroy_state - default state destroy hook
- * @plane: drm plane
- * @state: plane state object to release
- *
- * Default plane state destroy hook for drivers which don't have their own
- * subclassed plane state structure.
- */
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- __drm_atomic_helper_plane_destroy_state(state);
- kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
-
-/**
- * __drm_atomic_helper_connector_reset - reset state on connector
- * @connector: drm connector
- * @conn_state: connector state to assign
- *
- * Initializes the newly allocated @conn_state and assigns it to
- * the &drm_conector->state pointer of @connector, usually required when
- * initializing the drivers or when called from the &drm_connector_funcs.reset
- * hook.
- *
- * This is useful for drivers that subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_reset(struct drm_connector *connector,
- struct drm_connector_state *conn_state)
-{
- if (conn_state)
- conn_state->connector = connector;
-
- connector->state = conn_state;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
-
-/**
- * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
- * @connector: drm connector
- *
- * Resets the atomic state for @connector by freeing the state pointer (which
- * might be NULL, e.g. at driver load time) and allocating a new empty state
- * object.
- */
-void drm_atomic_helper_connector_reset(struct drm_connector *connector)
-{
- struct drm_connector_state *conn_state =
- kzalloc(sizeof(*conn_state), GFP_KERNEL);
-
- if (connector->state)
- __drm_atomic_helper_connector_destroy_state(connector->state);
-
- kfree(connector->state);
- __drm_atomic_helper_connector_reset(connector, conn_state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
-
-/**
- * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
- * @connector: connector object
- * @state: atomic connector state
- *
- * Copies atomic state from a connector's current state. This is useful for
- * drivers that subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
- struct drm_connector_state *state)
-{
- memcpy(state, connector->state, sizeof(*state));
- if (state->crtc)
- drm_connector_get(connector);
- state->commit = NULL;
-
- /* Don't copy over a writeback job, they are used only once */
- state->writeback_job = NULL;
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
-
-/**
- * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
- * @connector: drm connector
- *
- * Default connector state duplicate hook for drivers which don't have their own
- * subclassed connector state structure.
- */
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
-{
- struct drm_connector_state *state;
-
- if (WARN_ON(!connector->state))
- return NULL;
-
- state = kmalloc(sizeof(*state), GFP_KERNEL);
- if (state)
- __drm_atomic_helper_connector_duplicate_state(connector, state);
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
-
-/**
- * drm_atomic_helper_duplicate_state - duplicate an atomic state object
- * @dev: DRM device
- * @ctx: lock acquisition context
- *
- * Makes a copy of the current atomic state by looping over all objects and
- * duplicating their respective states. This is used for example by suspend/
- * resume support code to save the state prior to suspend such that it can
- * be restored upon resume.
- *
- * Note that this treats atomic state as persistent between save and restore.
- * Drivers must make sure that this is possible and won't result in confusion
- * or erroneous behaviour.
- *
- * Note that if callers haven't already acquired all modeset locks this might
- * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
- *
- * Returns:
- * A pointer to the copy of the atomic state object on success or an
- * ERR_PTR()-encoded error code on failure.
- *
- * See also:
- * drm_atomic_helper_suspend(), drm_atomic_helper_resume()
- */
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_atomic_state *state;
- struct drm_connector *conn;
- struct drm_connector_list_iter conn_iter;
- struct drm_plane *plane;
- struct drm_crtc *crtc;
- int err = 0;
-
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return ERR_PTR(-ENOMEM);
-
- state->acquire_ctx = ctx;
-
- drm_for_each_crtc(crtc, dev) {
- struct drm_crtc_state *crtc_state;
-
- crtc_state = drm_atomic_get_crtc_state(state, crtc);
- if (IS_ERR(crtc_state)) {
- err = PTR_ERR(crtc_state);
- goto free;
- }
- }
-
- drm_for_each_plane(plane, dev) {
- struct drm_plane_state *plane_state;
-
- plane_state = drm_atomic_get_plane_state(state, plane);
- if (IS_ERR(plane_state)) {
- err = PTR_ERR(plane_state);
- goto free;
- }
- }
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(conn, &conn_iter) {
- struct drm_connector_state *conn_state;
-
- conn_state = drm_atomic_get_connector_state(state, conn);
- if (IS_ERR(conn_state)) {
- err = PTR_ERR(conn_state);
- drm_connector_list_iter_end(&conn_iter);
- goto free;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
-
- /* clear the acquire context so that it isn't accidentally reused */
- state->acquire_ctx = NULL;
-
-free:
- if (err < 0) {
- drm_atomic_state_put(state);
- state = ERR_PTR(err);
- }
-
- return state;
-}
-EXPORT_SYMBOL(drm_atomic_helper_duplicate_state);
-
-/**
- * __drm_atomic_helper_connector_destroy_state - release connector state
- * @state: connector state object to release
- *
- * Releases all resources stored in the connector state without actually
- * freeing the memory of the connector state. This is useful for drivers that
- * subclass the connector state.
- */
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
-{
- if (state->crtc)
- drm_connector_put(state->connector);
-
- if (state->commit)
- drm_crtc_commit_put(state->commit);
-}
-EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
-
-/**
- * drm_atomic_helper_connector_destroy_state - default state destroy hook
- * @connector: drm connector
- * @state: connector state object to release
- *
- * Default connector state destroy hook for drivers which don't have their own
- * subclassed connector state structure.
- */
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state)
-{
- __drm_atomic_helper_connector_destroy_state(state);
- kfree(state);
-}
-EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
-
-/**
* drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table
* @crtc: CRTC object
* @red: red correction table
@@ -3979,18 +3567,3 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
-
-/**
- * __drm_atomic_helper_private_duplicate_state - copy atomic private state
- * @obj: CRTC object
- * @state: new private object state
- *
- * Copies atomic state from a private objects's current state and resets inferred values.
- * This is useful for drivers that subclass the private state.
- */
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
-{
- memcpy(state, obj->state, sizeof(*state));
-}
-EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
new file mode 100644
index 000000000000..60bd7d708e35
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -0,0 +1,444 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_device.h>
+
+#include <linux/slab.h>
+#include <linux/dma-fence.h>
+
+/**
+ * DOC: atomic state reset and initialization
+ *
+ * Both the drm core and the atomic helpers assume that there is always the full
+ * and correct atomic software state for all connectors, CRTCs and planes
+ * available. Which is a bit a problem on driver load and also after system
+ * suspend. One way to solve this is to have a hardware state read-out
+ * infrastructure which reconstructs the full software state (e.g. the i915
+ * driver).
+ *
+ * The simpler solution is to just reset the software state to everything off,
+ * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
+ * the atomic helpers provide default reset implementations for all hooks.
+ *
+ * On the upside the precise state tracking of atomic simplifies system suspend
+ * and resume a lot. For drivers using drm_mode_config_reset() a complete recipe
+ * is implemented in drm_atomic_helper_suspend() and drm_atomic_helper_resume().
+ * For other drivers the building blocks are split out, see the documentation
+ * for these functions.
+ */
+
+/**
+ * drm_atomic_helper_crtc_reset - default &drm_crtc_funcs.reset hook for CRTCs
+ * @crtc: drm CRTC
+ *
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
+{
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+ crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->state->crtc = crtc;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
+
+/**
+ * __drm_atomic_helper_crtc_duplicate_state - copy atomic CRTC state
+ * @crtc: CRTC object
+ * @state: atomic CRTC state
+ *
+ * Copies atomic state from a CRTC's current state and resets inferred values.
+ * This is useful for drivers that subclass the CRTC state.
+ */
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ memcpy(state, crtc->state, sizeof(*state));
+
+ if (state->mode_blob)
+ drm_property_blob_get(state->mode_blob);
+ if (state->degamma_lut)
+ drm_property_blob_get(state->degamma_lut);
+ if (state->ctm)
+ drm_property_blob_get(state->ctm);
+ if (state->gamma_lut)
+ drm_property_blob_get(state->gamma_lut);
+ state->mode_changed = false;
+ state->active_changed = false;
+ state->planes_changed = false;
+ state->connectors_changed = false;
+ state->color_mgmt_changed = false;
+ state->zpos_changed = false;
+ state->commit = NULL;
+ state->event = NULL;
+ state->pageflip_flags = 0;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
+ * @crtc: drm CRTC
+ *
+ * Default CRTC state duplicate hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_crtc_duplicate_state(crtc, state);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
+
+/**
+ * __drm_atomic_helper_crtc_destroy_state - release CRTC state
+ * @state: CRTC state object to release
+ *
+ * Releases all resources stored in the CRTC state without actually freeing
+ * the memory of the CRTC state. This is useful for drivers that subclass the
+ * CRTC state.
+ */
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
+{
+ if (state->commit) {
+ /*
+ * In the event that a non-blocking commit returns
+ * -ERESTARTSYS before the commit_tail work is queued, we will
+ * have an extra reference to the commit object. Release it, if
+ * the event has not been consumed by the worker.
+ *
+ * state->event may be freed, so we can't directly look at
+ * state->event->base.completion.
+ */
+ if (state->event && state->commit->abort_completion)
+ drm_crtc_commit_put(state->commit);
+
+ kfree(state->commit->event);
+ state->commit->event = NULL;
+
+ drm_crtc_commit_put(state->commit);
+ }
+
+ drm_property_blob_put(state->mode_blob);
+ drm_property_blob_put(state->degamma_lut);
+ drm_property_blob_put(state->ctm);
+ drm_property_blob_put(state->gamma_lut);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * drm_atomic_helper_crtc_destroy_state - default state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ *
+ * Default CRTC state destroy hook for drivers which don't have their own
+ * subclassed CRTC state structure.
+ */
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
+
+/**
+ * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * @plane: plane object, must not be NULL
+ * @state: atomic plane state, must not be NULL
+ *
+ * Initializes plane state to default. This is useful for drivers that subclass
+ * the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ state->plane = plane;
+ state->rotation = DRM_MODE_ROTATE_0;
+
+ state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+
+ plane->state = state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
+
+/**
+ * drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
+ * @plane: drm plane
+ *
+ * Resets the atomic state for @plane by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ */
+void drm_atomic_helper_plane_reset(struct drm_plane *plane)
+{
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
+ if (plane->state)
+ __drm_atomic_helper_plane_reset(plane, plane->state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
+
+/**
+ * __drm_atomic_helper_plane_duplicate_state - copy atomic plane state
+ * @plane: plane object
+ * @state: atomic plane state
+ *
+ * Copies atomic state from a plane's current state. This is useful for
+ * drivers that subclass the plane state.
+ */
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ memcpy(state, plane->state, sizeof(*state));
+
+ if (state->fb)
+ drm_framebuffer_get(state->fb);
+
+ state->fence = NULL;
+ state->commit = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
+ * @plane: drm plane
+ *
+ * Default plane state duplicate hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
+
+/**
+ * __drm_atomic_helper_plane_destroy_state - release plane state
+ * @state: plane state object to release
+ *
+ * Releases all resources stored in the plane state without actually freeing
+ * the memory of the plane state. This is useful for drivers that subclass the
+ * plane state.
+ */
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state)
+{
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ if (state->fence)
+ dma_fence_put(state->fence);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_destroy_state);
+
+/**
+ * drm_atomic_helper_plane_destroy_state - default state destroy hook
+ * @plane: drm plane
+ * @state: plane state object to release
+ *
+ * Default plane state destroy hook for drivers which don't have their own
+ * subclassed plane state structure.
+ */
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
+
+/**
+ * __drm_atomic_helper_connector_reset - reset state on connector
+ * @connector: drm connector
+ * @conn_state: connector state to assign
+ *
+ * Initializes the newly allocated @conn_state and assigns it to
+ * the &drm_conector->state pointer of @connector, usually required when
+ * initializing the drivers or when called from the &drm_connector_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_reset(struct drm_connector *connector,
+ struct drm_connector_state *conn_state)
+{
+ if (conn_state)
+ conn_state->connector = connector;
+
+ connector->state = conn_state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_reset);
+
+/**
+ * drm_atomic_helper_connector_reset - default &drm_connector_funcs.reset hook for connectors
+ * @connector: drm connector
+ *
+ * Resets the atomic state for @connector by freeing the state pointer (which
+ * might be NULL, e.g. at driver load time) and allocating a new empty state
+ * object.
+ */
+void drm_atomic_helper_connector_reset(struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state =
+ kzalloc(sizeof(*conn_state), GFP_KERNEL);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(connector->state);
+ __drm_atomic_helper_connector_reset(connector, conn_state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
+
+/**
+ * __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
+ * @connector: connector object
+ * @state: atomic connector state
+ *
+ * Copies atomic state from a connector's current state. This is useful for
+ * drivers that subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ memcpy(state, connector->state, sizeof(*state));
+ if (state->crtc)
+ drm_connector_get(connector);
+ state->commit = NULL;
+
+ /* Don't copy over a writeback job, they are used only once */
+ state->writeback_job = NULL;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
+ * @connector: drm connector
+ *
+ * Default connector state duplicate hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
+{
+ struct drm_connector_state *state;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_connector_duplicate_state(connector, state);
+
+ return state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
+
+/**
+ * __drm_atomic_helper_connector_destroy_state - release connector state
+ * @state: connector state object to release
+ *
+ * Releases all resources stored in the connector state without actually
+ * freeing the memory of the connector state. This is useful for drivers that
+ * subclass the connector state.
+ */
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
+{
+ if (state->crtc)
+ drm_connector_put(state->connector);
+
+ if (state->commit)
+ drm_crtc_commit_put(state->commit);
+}
+EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
+
+/**
+ * drm_atomic_helper_connector_destroy_state - default state destroy hook
+ * @connector: drm connector
+ * @state: connector state object to release
+ *
+ * Default connector state destroy hook for drivers which don't have their own
+ * subclassed connector state structure.
+ */
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
+
+/**
+ * __drm_atomic_helper_private_duplicate_state - copy atomic private state
+ * @obj: CRTC object
+ * @state: new private object state
+ *
+ * Copies atomic state from a private objects's current state and resets inferred values.
+ * This is useful for drivers that subclass the private state.
+ */
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ memcpy(state, obj->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index d5b7f315098c..c40889888a16 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -433,6 +433,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_set_mode_prop_for_crtc(state, mode);
drm_property_blob_put(mode);
return ret;
+ } else if (property == config->prop_vrr_enabled) {
+ state->vrr_enabled = val;
} else if (property == config->degamma_lut_property) {
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->degamma_lut,
@@ -491,6 +493,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = state->active;
else if (property == config->prop_mode_id)
*val = (state->mode_blob) ? state->mode_blob->base.id : 0;
+ else if (property == config->prop_vrr_enabled)
+ *val = state->vrr_enabled;
else if (property == config->degamma_lut_property)
*val = (state->degamma_lut) ? state->degamma_lut->base.id : 0;
else if (property == config->ctm_property)
@@ -513,6 +517,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
+ bool replaced = false;
+ int ret;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
@@ -566,6 +572,14 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
+ } else if (property == config->prop_fb_damage_clips) {
+ ret = drm_atomic_replace_property_blob_from_id(dev,
+ &state->fb_damage_clips,
+ val,
+ -1,
+ sizeof(struct drm_rect),
+ &replaced);
+ return ret;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
@@ -621,6 +635,9 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
+ } else if (property == config->prop_fb_damage_clips) {
+ *val = (state->fb_damage_clips) ?
+ state->fb_damage_clips->base.id : 0;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
@@ -740,6 +757,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return set_out_fence_for_connector(state->state, connector,
fence_ptr);
+ } else if (property == connector->max_bpc_property) {
+ state->max_requested_bpc = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -804,6 +823,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = 0;
} else if (property == config->writeback_out_fence_ptr_property) {
*val = 0;
+ } else if (property == connector->max_bpc_property) {
+ *val = state->max_requested_bpc;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index d9c0f7573905..1669c42c40ed 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
lockdep_assert_held_once(&dev->master_mutex);
+ WARN_ON(fpriv->is_master);
old_master = fpriv->master;
fpriv->master = drm_master_create(dev);
if (!fpriv->master) {
@@ -170,6 +171,7 @@ out_err:
/* drop references and restore old master on failure */
drm_master_put(&fpriv->master);
fpriv->master = old_master;
+ fpriv->is_master = 0;
return ret;
}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 7412acaf3cde..d7d10cabb9bb 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,8 @@
#include <drm/drmP.h>
#include "drm_legacy.h"
+#include <linux/nospec.h>
+
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
@@ -1417,6 +1419,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
idx, dma->buf_count - 1);
return -EINVAL;
}
+ idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fc03d26fcacc..9b2bd28dde0a 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -81,8 +81,7 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
{
int ret;
- if (!drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev->driver->dumb_create || !dev->driver->gem_prime_vmap)
+ if (!drm_core_check_feature(dev, DRIVER_MODESET) || !dev->driver->dumb_create)
return -EOPNOTSUPP;
if (funcs && !try_module_get(funcs->owner))
@@ -229,8 +228,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
struct drm_device *dev = buffer->client->dev;
- if (buffer->vaddr && dev->driver->gem_prime_vunmap)
- dev->driver->gem_prime_vunmap(buffer->gem, buffer->vaddr);
+ drm_gem_vunmap(buffer->gem, buffer->vaddr);
if (buffer->gem)
drm_gem_object_put_unlocked(buffer->gem);
@@ -283,9 +281,9 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- vaddr = dev->driver->gem_prime_vmap(obj);
- if (!vaddr) {
- ret = -ENOMEM;
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
goto err_delete;
}
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 581cc3788223..07dcf47daafe 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -255,11 +255,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
if (crtc_lut->gamma_size != crtc->gamma_size)
return -EINVAL;
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
size = crtc_lut->gamma_size * (sizeof(uint16_t));
r_base = crtc->gamma_store;
@@ -284,13 +280,7 @@ retry:
crtc->gamma_size, &ctx);
out:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 4943cef178be..da8ae80c2750 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -260,9 +260,7 @@ int drm_connector_init(struct drm_device *dev,
if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL &&
connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
- drm_object_attach_property(&connector->base,
- config->edid_property,
- 0);
+ drm_connector_attach_edid_property(connector);
drm_object_attach_property(&connector->base,
config->dpms_property, 0);
@@ -295,6 +293,24 @@ out_put:
EXPORT_SYMBOL(drm_connector_init);
/**
+ * drm_connector_attach_edid_property - attach edid property.
+ * @connector: the connector
+ *
+ * Some connector types like DRM_MODE_CONNECTOR_VIRTUAL do not get a
+ * edid property attached by default. This function can be used to
+ * explicitly enable the edid property in these cases.
+ */
+void drm_connector_attach_edid_property(struct drm_connector *connector)
+{
+ struct drm_mode_config *config = &connector->dev->mode_config;
+
+ drm_object_attach_property(&connector->base,
+ config->edid_property,
+ 0);
+}
+EXPORT_SYMBOL(drm_connector_attach_edid_property);
+
+/**
* drm_connector_attach_encoder - attach a connector to an encoder
* @connector: connector to attach
* @encoder: encoder to attach @connector to
@@ -916,6 +932,13 @@ DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
* is no longer protected and userspace should take appropriate action
* (whatever that might be).
*
+ * max bpc:
+ * This range property is used by userspace to limit the bit depth. When
+ * used the driver would limit the bpc in accordance with the valid range
+ * supported by the hardware and sink. Drivers to use the function
+ * drm_connector_attach_max_bpc_property() to create and attach the
+ * property to the connector during initialization.
+ *
* Connectors also have one standardized atomic property:
*
* CRTC_ID:
@@ -1256,6 +1279,105 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
+ * DOC: Variable refresh properties
+ *
+ * Variable refresh rate capable displays can dynamically adjust their
+ * refresh rate by extending the duration of their vertical front porch
+ * until page flip or timeout occurs. This can reduce or remove stuttering
+ * and latency in scenarios where the page flip does not align with the
+ * vblank interval.
+ *
+ * An example scenario would be an application flipping at a constant rate
+ * of 48Hz on a 60Hz display. The page flip will frequently miss the vblank
+ * interval and the same contents will be displayed twice. This can be
+ * observed as stuttering for content with motion.
+ *
+ * If variable refresh rate was active on a display that supported a
+ * variable refresh range from 35Hz to 60Hz no stuttering would be observable
+ * for the example scenario. The minimum supported variable refresh rate of
+ * 35Hz is below the page flip frequency and the vertical front porch can
+ * be extended until the page flip occurs. The vblank interval will be
+ * directly aligned to the page flip rate.
+ *
+ * Not all userspace content is suitable for use with variable refresh rate.
+ * Large and frequent changes in vertical front porch duration may worsen
+ * perceived stuttering for input sensitive applications.
+ *
+ * Panel brightness will also vary with vertical front porch duration. Some
+ * panels may have noticeable differences in brightness between the minimum
+ * vertical front porch duration and the maximum vertical front porch duration.
+ * Large and frequent changes in vertical front porch duration may produce
+ * observable flickering for such panels.
+ *
+ * Userspace control for variable refresh rate is supported via properties
+ * on the &drm_connector and &drm_crtc objects.
+ *
+ * "vrr_capable":
+ * Optional &drm_connector boolean property that drivers should attach
+ * with drm_connector_attach_vrr_capable_property() on connectors that
+ * could support variable refresh rates. Drivers should update the
+ * property value by calling drm_connector_set_vrr_capable_property().
+ *
+ * Absence of the property should indicate absence of support.
+ *
+ * "vrr_enabled":
+ * Default &drm_crtc boolean property that notifies the driver that the
+ * content on the CRTC is suitable for variable refresh rate presentation.
+ * The driver will take this property as a hint to enable variable
+ * refresh rate support if the receiver supports it, ie. if the
+ * "vrr_capable" property is true on the &drm_connector object. The
+ * vertical front porch duration will be extended until page-flip or
+ * timeout when enabled.
+ *
+ * The minimum vertical front porch duration is defined as the vertical
+ * front porch duration for the current mode.
+ *
+ * The maximum vertical front porch duration is greater than or equal to
+ * the minimum vertical front porch duration. The duration is derived
+ * from the minimum supported variable refresh rate for the connector.
+ *
+ * The driver may place further restrictions within these minimum
+ * and maximum bounds.
+ *
+ * The semantics for the vertical blank timestamp differ when
+ * variable refresh rate is active. The vertical blank timestamp
+ * is defined to be an estimate using the current mode's fixed
+ * refresh rate timings. The semantics for the page-flip event
+ * timestamp remain the same.
+ */
+
+/**
+ * drm_connector_attach_vrr_capable_property - creates the
+ * vrr_capable property
+ * @connector: connector to create the vrr_capable property on.
+ *
+ * This is used by atomic drivers to add support for querying
+ * variable refresh rate capability for a connector.
+ *
+ * Returns:
+ * Zero on success, negative errono on failure.
+ */
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ if (!connector->vrr_capable_property) {
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE,
+ "vrr_capable");
+ if (!prop)
+ return -ENOMEM;
+
+ connector->vrr_capable_property = prop;
+ drm_object_attach_property(&connector->base, prop, 0);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
+
+/**
* drm_connector_attach_scaling_mode_property - attach atomic scaling mode property
* @connector: connector to attach scaling mode property on.
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
@@ -1584,6 +1706,58 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_set_link_status_property);
/**
+ * drm_connector_attach_max_bpc_property - attach "max bpc" property
+ * @connector: connector to attach max bpc property on.
+ * @min: The minimum bit depth supported by the connector.
+ * @max: The maximum bit depth supported by the connector.
+ *
+ * This is used to add support for limiting the bit depth on a connector.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ prop = connector->max_bpc_property;
+ if (!prop) {
+ prop = drm_property_create_range(dev, 0, "max bpc", min, max);
+ if (!prop)
+ return -ENOMEM;
+
+ connector->max_bpc_property = prop;
+ }
+
+ drm_object_attach_property(&connector->base, prop, max);
+ connector->state->max_requested_bpc = max;
+ connector->state->max_bpc = max;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
+
+/**
+ * drm_connector_set_vrr_capable_property - sets the variable refresh rate
+ * capable property for a connector
+ * @connector: drm connector
+ * @capable: True if the connector is variable refresh rate capable
+ *
+ * Should be used by atomic drivers to update the indicated support for
+ * variable refresh rate over a connector.
+ */
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable)
+{
+ drm_object_property_set_value(&connector->base,
+ connector->vrr_capable_property,
+ capable);
+}
+EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
+
+/**
* drm_connector_init_panel_orientation_property -
* initialize the connecters panel_orientation property
* @connector: connector for which to init the panel-orientation property.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 268a182ae189..1593dd6cdfb7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -340,6 +340,8 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
drm_object_attach_property(&crtc->base,
config->prop_out_fence_ptr, 0);
+ drm_object_attach_property(&crtc->base,
+ config->prop_vrr_enabled, 0);
}
return 0;
@@ -570,9 +572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_connector **connector_set, *connector;
- struct drm_framebuffer *fb;
- struct drm_display_mode *mode;
+ struct drm_connector **connector_set = NULL, *connector;
+ struct drm_framebuffer *fb = NULL;
+ struct drm_display_mode *mode = NULL;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
@@ -599,15 +601,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
mutex_lock(&crtc->dev->mode_config.mutex);
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
- connector_set = NULL;
- fb = NULL;
- mode = NULL;
-
- ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
- if (ret)
- goto out;
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
+ DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
@@ -766,13 +761,13 @@ out:
}
kfree(connector_set);
drm_mode_destroy(dev, mode);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+
+ /* In case we need to retry... */
+ connector_set = NULL;
+ fb = NULL;
+ mode = NULL;
+
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
mutex_unlock(&crtc->dev->mode_config.mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index ce75e9506e85..a3c81850e755 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -984,118 +984,3 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
-
-/**
- * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
- * @crtc: DRM CRTC
- * @mode: DRM display mode which userspace requested
- * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
- * @x: x offset of the CRTC scanout area on the underlying framebuffer
- * @y: y offset of the CRTC scanout area on the underlying framebuffer
- * @old_fb: previous framebuffer
- *
- * This function implements a callback useable as the ->mode_set callback
- * required by the CRTC helpers. Besides the atomic plane helper functions for
- * the primary plane the driver must also provide the ->mode_set_nofb callback
- * to set up the CRTC.
- *
- * This is a transitional helper useful for converting drivers to the atomic
- * interfaces.
- */
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_crtc_state *crtc_state;
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- int ret;
-
- if (crtc->funcs->atomic_duplicate_state)
- crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
- else {
- if (!crtc->state)
- drm_atomic_helper_crtc_reset(crtc);
-
- crtc_state = drm_atomic_helper_crtc_duplicate_state(crtc);
- }
-
- if (!crtc_state)
- return -ENOMEM;
-
- crtc_state->planes_changed = true;
- crtc_state->mode_changed = true;
- ret = drm_atomic_set_mode_for_crtc(crtc_state, mode);
- if (ret)
- goto out;
- drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
-
- if (crtc_funcs->atomic_check) {
- ret = crtc_funcs->atomic_check(crtc, crtc_state);
- if (ret)
- goto out;
- }
-
- swap(crtc->state, crtc_state);
-
- crtc_funcs->mode_set_nofb(crtc);
-
- ret = drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
-
-out:
- if (crtc_state) {
- if (crtc->funcs->atomic_destroy_state)
- crtc->funcs->atomic_destroy_state(crtc, crtc_state);
- else
- drm_atomic_helper_crtc_destroy_state(crtc, crtc_state);
- }
-
- return ret;
-}
-EXPORT_SYMBOL(drm_helper_crtc_mode_set);
-
-/**
- * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
- * @crtc: DRM CRTC
- * @x: x offset of the CRTC scanout area on the underlying framebuffer
- * @y: y offset of the CRTC scanout area on the underlying framebuffer
- * @old_fb: previous framebuffer
- *
- * This function implements a callback useable as the ->mode_set_base used
- * required by the CRTC helpers. The driver must provide the atomic plane helper
- * functions for the primary plane.
- *
- * This is a transitional helper useful for converting drivers to the atomic
- * interfaces.
- */
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_plane_state *plane_state;
- struct drm_plane *plane = crtc->primary;
-
- if (plane->funcs->atomic_duplicate_state)
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- else {
- if (!plane->state)
- drm_atomic_helper_plane_reset(plane);
-
- plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- }
- if (!plane_state)
- return -ENOMEM;
- plane_state->plane = plane;
-
- plane_state->crtc = crtc;
- drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
- plane_state->crtc_x = 0;
- plane_state->crtc_y = 0;
- plane_state->crtc_h = crtc->mode.vdisplay;
- plane_state->crtc_w = crtc->mode.hdisplay;
- plane_state->src_x = x << 16;
- plane_state->src_y = y << 16;
- plane_state->src_h = crtc->mode.vdisplay << 16;
- plane_state->src_w = crtc->mode.hdisplay << 16;
-
- return drm_plane_helper_commit(plane, plane_state, old_fb);
-}
-EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
new file mode 100644
index 000000000000..d2a1c7372f36
--- /dev/null
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ * Rob Clark <robdclark@gmail.com>
+ *
+ **************************************************************************/
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_damage_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * FB_DAMAGE_CLIPS is an optional plane property which provides a means to
+ * specify a list of damage rectangles on a plane in framebuffer coordinates of
+ * the framebuffer attached to the plane. In current context damage is the area
+ * of plane framebuffer that has changed since last plane update (also called
+ * page-flip), irrespective of whether currently attached framebuffer is same as
+ * framebuffer attached during last plane update or not.
+ *
+ * FB_DAMAGE_CLIPS is a hint to kernel which could be helpful for some drivers
+ * to optimize internally especially for virtual devices where each framebuffer
+ * change needs to be transmitted over network, usb, etc.
+ *
+ * Since FB_DAMAGE_CLIPS is a hint so it is an optional property. User-space can
+ * ignore damage clips property and in that case driver will do a full plane
+ * update. In case damage clips are provided then it is guaranteed that the area
+ * inside damage clips will be updated to plane. For efficiency driver can do
+ * full update or can update more than specified in damage clips. Since driver
+ * is free to read more, user-space must always render the entire visible
+ * framebuffer. Otherwise there can be corruptions. Also, if a user-space
+ * provides damage clips which doesn't encompass the actual damage to
+ * framebuffer (since last plane update) can result in incorrect rendering.
+ *
+ * FB_DAMAGE_CLIPS is a blob property with the layout of blob data is simply an
+ * array of &drm_mode_rect. Unlike plane &drm_plane_state.src coordinates,
+ * damage clips are not in 16.16 fixed point. Similar to plane src in
+ * framebuffer, damage clips cannot be negative. In damage clip, x1/y1 are
+ * inclusive and x2/y2 are exclusive. While kernel does not error for overlapped
+ * damage clips, it is strongly discouraged.
+ *
+ * Drivers that are interested in damage interface for plane should enable
+ * FB_DAMAGE_CLIPS property by calling drm_plane_enable_fb_damage_clips().
+ * Drivers implementing damage can use drm_atomic_helper_damage_iter_init() and
+ * drm_atomic_helper_damage_iter_next() helper iterator function to get damage
+ * rectangles clipped to &drm_plane_state.src.
+ */
+
+static void convert_clip_rect_to_rect(const struct drm_clip_rect *src,
+ struct drm_mode_rect *dest,
+ uint32_t num_clips, uint32_t src_inc)
+{
+ while (num_clips > 0) {
+ dest->x1 = src->x1;
+ dest->y1 = src->y1;
+ dest->x2 = src->x2;
+ dest->y2 = src->y2;
+ src += src_inc;
+ dest++;
+ num_clips--;
+ }
+}
+
+/**
+ * drm_plane_enable_fb_damage_clips - Enables plane fb damage clips property.
+ * @plane: Plane on which to enable damage clips property.
+ *
+ * This function lets driver to enable the damage clips property on a plane.
+ */
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ drm_object_attach_property(&plane->base, config->prop_fb_damage_clips,
+ 0);
+}
+EXPORT_SYMBOL(drm_plane_enable_fb_damage_clips);
+
+/**
+ * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check.
+ * @state: The driver state object.
+ * @plane_state: Plane state for which to verify damage.
+ *
+ * This helper function makes sure that damage from plane state is discarded
+ * for full modeset. If there are more reasons a driver would want to do a full
+ * plane update rather than processing individual damage regions, then those
+ * cases should be taken care of here.
+ *
+ * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that
+ * full plane update should happen. It also ensure helper iterator will return
+ * &drm_plane_state.src as damage.
+ */
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (plane_state->crtc) {
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ plane_state->crtc);
+
+ if (WARN_ON(!crtc_state))
+ return;
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ drm_property_blob_put(plane_state->fb_damage_clips);
+ plane_state->fb_damage_clips = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage);
+
+/**
+ * drm_atomic_helper_dirtyfb - Helper for dirtyfb.
+ * @fb: DRM framebuffer.
+ * @file_priv: Drm file for the ioctl call.
+ * @flags: Dirty fb annotate flags.
+ * @color: Color for annotate fill.
+ * @clips: Dirty region.
+ * @num_clips: Count of clip in clips.
+ *
+ * A helper to implement &drm_framebuffer_funcs.dirty using damage interface
+ * during plane update. If num_clips is 0 then this helper will do a full plane
+ * update. This is the same behaviour expected by DIRTFB IOCTL.
+ *
+ * Note that this helper is blocking implementation. This is what current
+ * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way
+ * to rate-limit userspace and make sure its rendering doesn't get ahead of
+ * uploading new data too much.
+ *
+ * Return: Zero on success, negative errno on failure.
+ */
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_property_blob *damage = NULL;
+ struct drm_mode_rect *rects = NULL;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+ int ret = 0;
+
+ /*
+ * When called from ioctl, we are interruptable, but not when called
+ * internally (ie. defio worker)
+ */
+ drm_modeset_acquire_init(&ctx,
+ file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0);
+
+ state = drm_atomic_state_alloc(fb->dev);
+ if (!state) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ state->acquire_ctx = &ctx;
+
+ if (clips) {
+ uint32_t inc = 1;
+
+ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+ inc = 2;
+ num_clips /= 2;
+ }
+
+ rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL);
+ if (!rects) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ convert_clip_rect_to_rect(clips, rects, num_clips, inc);
+ damage = drm_property_create_blob(fb->dev,
+ num_clips * sizeof(*rects),
+ rects);
+ if (IS_ERR(damage)) {
+ ret = PTR_ERR(damage);
+ damage = NULL;
+ goto out;
+ }
+ }
+
+retry:
+ drm_for_each_plane(plane, fb->dev) {
+ struct drm_plane_state *plane_state;
+
+ if (plane->state->fb != fb)
+ continue;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto out;
+ }
+
+ drm_property_replace_blob(&plane_state->fb_damage_clips,
+ damage);
+ }
+
+ ret = drm_atomic_commit(state);
+
+out:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry;
+ }
+
+ drm_property_blob_put(damage);
+ kfree(rects);
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_atomic_helper_dirtyfb);
+
+/**
+ * drm_atomic_helper_damage_iter_init - Initialize the damage iterator.
+ * @iter: The iterator to initialize.
+ * @old_state: Old plane state for validation.
+ * @state: Plane state from which to iterate the damage clips.
+ *
+ * Initialize an iterator, which clips plane damage
+ * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator
+ * returns full plane src in case damage is not present because either
+ * user-space didn't sent or driver discarded it (it want to do full plane
+ * update). Currently this iterator returns full plane src in case plane src
+ * changed but that can be changed in future to return damage.
+ *
+ * For the case when plane is not visible or plane update should not happen the
+ * first call to iter_next will return false. Note that this helper use clipped
+ * &drm_plane_state.src, so driver calling this helper should have called
+ * drm_atomic_helper_check_plane_state() earlier.
+ */
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *state)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ if (!state || !state->crtc || !state->fb || !state->visible)
+ return;
+
+ iter->clips = drm_helper_get_plane_damage_clips(state);
+ iter->num_clips = drm_plane_get_damage_clips_count(state);
+
+ /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */
+ iter->plane_src.x1 = state->src.x1 >> 16;
+ iter->plane_src.y1 = state->src.y1 >> 16;
+ iter->plane_src.x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ iter->plane_src.y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (!iter->clips || !drm_rect_equals(&state->src, &old_state->src)) {
+ iter->clips = 0;
+ iter->num_clips = 0;
+ iter->full_update = true;
+ }
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init);
+
+/**
+ * drm_atomic_helper_damage_iter_next - Advance the damage iterator.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Since plane src is in 16.16 fixed point and damage clips are whole number,
+ * this iterator round off clips that intersect with plane src. Round down for
+ * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding
+ * off for full plane src, in case it's returned as damage. This iterator will
+ * skip damage clips outside of plane src.
+ *
+ * Return: True if the output is valid, false if reached the end.
+ *
+ * If the first call to iterator next returns false then it means no need to
+ * update the plane.
+ */
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect)
+{
+ bool ret = false;
+
+ if (iter->full_update) {
+ *rect = iter->plane_src;
+ iter->full_update = false;
+ return true;
+ }
+
+ while (iter->curr_clip < iter->num_clips) {
+ *rect = iter->clips[iter->curr_clip];
+ iter->curr_clip++;
+
+ if (drm_rect_intersect(rect, &iter->plane_src)) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 373bd4c2b698..f8468eae0503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,6 +32,8 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_gem.h>
#include <drm/drmP.h>
#include "drm_internal.h"
@@ -43,6 +45,93 @@
* Initialization, etc.
**************************************************/
+static int drm_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_minor *minor = node->minor;
+ struct drm_device *dev = minor->dev;
+ struct drm_master *master;
+
+ mutex_lock(&dev->master_mutex);
+ master = dev->master;
+ seq_printf(m, "%s", dev->driver->name);
+ if (dev->dev)
+ seq_printf(m, " dev=%s", dev_name(dev->dev));
+ if (master && master->unique)
+ seq_printf(m, " master=%s", master->unique);
+ if (dev->unique)
+ seq_printf(m, " unique=%s", dev->unique);
+ seq_printf(m, "\n");
+ mutex_unlock(&dev->master_mutex);
+
+ return 0;
+}
+
+static int drm_clients_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_file *priv;
+ kuid_t uid;
+
+ seq_printf(m,
+ "%20s %5s %3s master a %5s %10s\n",
+ "command",
+ "pid",
+ "dev",
+ "uid",
+ "magic");
+
+ /* dev->filelist is sorted youngest first, but we want to present
+ * oldest first (i.e. kernel, servers, clients), so walk backwardss.
+ */
+ mutex_lock(&dev->filelist_mutex);
+ list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
+ struct task_struct *task;
+
+ rcu_read_lock(); /* locks pid_task()->comm */
+ task = pid_task(priv->pid, PIDTYPE_PID);
+ uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
+ task ? task->comm : "<unknown>",
+ pid_vnr(priv->pid),
+ priv->minor->index,
+ drm_is_current_master(priv) ? 'y' : 'n',
+ priv->authenticated ? 'y' : 'n',
+ from_kuid_munged(seq_user_ns(m), uid),
+ priv->magic);
+ rcu_read_unlock();
+ }
+ mutex_unlock(&dev->filelist_mutex);
+ return 0;
+}
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+ struct drm_gem_object *obj = ptr;
+ struct seq_file *m = data;
+
+ seq_printf(m, "%6d %8zd %7d %8d\n",
+ obj->name, obj->size,
+ obj->handle_count,
+ kref_read(&obj->refcount));
+ return 0;
+}
+
+static int drm_gem_name_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ seq_printf(m, " name size handles refcount\n");
+
+ mutex_lock(&dev->object_name_lock);
+ idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ mutex_unlock(&dev->object_name_lock);
+
+ return 0;
+}
+
static const struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index 8a718f85079a..b15cee85b702 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -424,8 +424,6 @@ void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
aux->cec.parent = parent;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
drm_dp_cec_unregister_work);
-
- drm_dp_cec_set_edid(aux, NULL);
}
EXPORT_SYMBOL(drm_dp_cec_register_connector);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 37c01b6076ec..2d6c491a0542 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1352,3 +1352,95 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
return 0;
}
EXPORT_SYMBOL(drm_dp_read_desc);
+
+/**
+ * DRM DP Helpers for DSC
+ */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp)
+{
+ u8 slice_cap1 = dsc_dpcd[DP_DSC_SLICE_CAP_1 - DP_DSC_SUPPORT];
+
+ if (is_edp) {
+ /* For eDP, register DSC_SLICE_CAPABILITIES_1 gives slice count */
+ if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+ return 4;
+ if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+ return 2;
+ if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+ return 1;
+ } else {
+ /* For DP, use values from DSC_SLICE_CAP_1 and DSC_SLICE_CAP2 */
+ u8 slice_cap2 = dsc_dpcd[DP_DSC_SLICE_CAP_2 - DP_DSC_SUPPORT];
+
+ if (slice_cap2 & DP_DSC_24_PER_DP_DSC_SINK)
+ return 24;
+ if (slice_cap2 & DP_DSC_20_PER_DP_DSC_SINK)
+ return 20;
+ if (slice_cap2 & DP_DSC_16_PER_DP_DSC_SINK)
+ return 16;
+ if (slice_cap1 & DP_DSC_12_PER_DP_DSC_SINK)
+ return 12;
+ if (slice_cap1 & DP_DSC_10_PER_DP_DSC_SINK)
+ return 10;
+ if (slice_cap1 & DP_DSC_8_PER_DP_DSC_SINK)
+ return 8;
+ if (slice_cap1 & DP_DSC_6_PER_DP_DSC_SINK)
+ return 6;
+ if (slice_cap1 & DP_DSC_4_PER_DP_DSC_SINK)
+ return 4;
+ if (slice_cap1 & DP_DSC_2_PER_DP_DSC_SINK)
+ return 2;
+ if (slice_cap1 & DP_DSC_1_PER_DP_DSC_SINK)
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_count);
+
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ u8 line_buf_depth = dsc_dpcd[DP_DSC_LINE_BUF_BIT_DEPTH - DP_DSC_SUPPORT];
+
+ switch (line_buf_depth & DP_DSC_LINE_BUF_BIT_DEPTH_MASK) {
+ case DP_DSC_LINE_BUF_BIT_DEPTH_9:
+ return 9;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_10:
+ return 10;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_11:
+ return 11;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_12:
+ return 12;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_13:
+ return 13;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_14:
+ return 14;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_15:
+ return 15;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_16:
+ return 16;
+ case DP_DSC_LINE_BUF_BIT_DEPTH_8:
+ return 8;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_line_buf_depth);
+
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3])
+{
+ int num_bpc = 0;
+ u8 color_depth = dsc_dpcd[DP_DSC_DEC_COLOR_DEPTH_CAP - DP_DSC_SUPPORT];
+
+ if (color_depth & DP_DSC_12_BPC)
+ dsc_bpc[num_bpc++] = 12;
+ if (color_depth & DP_DSC_10_BPC)
+ dsc_bpc[num_bpc++] = 10;
+ if (color_depth & DP_DSC_8_BPC)
+ dsc_bpc[num_bpc++] = 8;
+
+ return num_bpc;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 0e0df398222d..529414556962 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2572,9 +2572,16 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
EXPORT_SYMBOL(drm_dp_mst_get_edid);
/**
- * drm_dp_find_vcpi_slots() - find slots for this PBN value
+ * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
* @mgr: manager to use
* @pbn: payload bandwidth to convert into slots.
+ *
+ * Calculate the number of VCPI slots that will be required for the given PBN
+ * value. This function is deprecated, and should not be used in atomic
+ * drivers.
+ *
+ * RETURNS:
+ * The total slots required for this port, or error.
*/
int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
int pbn)
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 36e8e9cbec52..12e5e2be7890 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -476,8 +476,6 @@ static void drm_fs_inode_free(struct inode *inode)
* The initial ref-count of the object is 1. Use drm_dev_get() and
* drm_dev_put() to take and drop further ref-counts.
*
- * Note that for purely virtual devices @parent can be NULL.
- *
* Drivers that do not want to allocate their own device struct
* embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
* that do embed &struct drm_device it must be placed first in the overall
@@ -502,6 +500,8 @@ int drm_dev_init(struct drm_device *dev,
return -ENODEV;
}
+ BUG_ON(!parent);
+
kref_init(&dev->ref);
dev->dev = parent;
dev->driver = driver;
@@ -556,9 +556,7 @@ int drm_dev_init(struct drm_device *dev,
}
}
- /* Use the parent device name as DRM device unique identifier, but fall
- * back to the driver name for virtual devices like vgem. */
- ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
+ ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
goto err_setunique;
@@ -706,19 +704,6 @@ void drm_dev_put(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_dev_put);
-/**
- * drm_dev_unref - Drop reference of a DRM device
- * @dev: device to drop reference of or NULL
- *
- * This is a compatibility alias for drm_dev_put() and should not be used by new
- * code.
- */
-void drm_dev_unref(struct drm_device *dev)
-{
- drm_dev_put(dev);
-}
-EXPORT_SYMBOL(drm_dev_unref);
-
static int create_compat_control_link(struct drm_device *dev)
{
struct drm_minor *minor;
@@ -975,14 +960,12 @@ static void drm_core_exit(void)
drm_sysfs_destroy();
idr_destroy(&drm_minors_idr);
drm_connector_ida_destroy();
- drm_global_release();
}
static int __init drm_core_init(void)
{
int ret;
- drm_global_init();
drm_connector_ida_init();
idr_init(&drm_minors_idr);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
new file mode 100644
index 000000000000..bc2b23adb072
--- /dev/null
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corp
+ *
+ * Author:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/byteorder/generic.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_dsc.h>
+
+/**
+ * DOC: dsc helpers
+ *
+ * These functions contain some common logic and helpers to deal with VESA
+ * Display Stream Compression standard required for DSC on Display Port/eDP or
+ * MIPI display interfaces.
+ */
+
+/**
+ * drm_dsc_dp_pps_header_init() - Initializes the PPS Header
+ * for DisplayPort as per the DP 1.4 spec.
+ * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
+ */
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+{
+ memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+
+ pps_sdp->pps_header.HB1 = DP_SDP_PPS;
+ pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+}
+EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
+
+/**
+ * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * using the DSC configuration parameters in the order expected
+ * by the DSC Display Sink device. For the DSC, the sink device
+ * expects the PPS payload in the big endian format for the fields
+ * that span more than 1 byte.
+ *
+ * @pps_sdp:
+ * Secondary data packet for DSC Picture Parameter Set
+ * @dsc_cfg:
+ * DSC Configuration data filled by driver
+ */
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg)
+{
+ int i;
+
+ /* Protect against someone accidently changing struct size */
+ BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
+
+ memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+
+ /* PPS 0 */
+ pps_sdp->pps_payload.dsc_version =
+ dsc_cfg->dsc_version_minor |
+ dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
+
+ /* PPS 1, 2 is 0 */
+
+ /* PPS 3 */
+ pps_sdp->pps_payload.pps_3 =
+ dsc_cfg->line_buf_depth |
+ dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
+
+ /* PPS 4 */
+ pps_sdp->pps_payload.pps_4 =
+ ((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT) |
+ dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
+ dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
+ dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
+
+ /* PPS 5 */
+ pps_sdp->pps_payload.bits_per_pixel_low =
+ (dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
+
+ /*
+ * The DSC panel expects the PPS packet to have big endian format
+ * for data spanning 2 bytes. Use a macro cpu_to_be16() to convert
+ * to big endian format. If format is little endian, it will swap
+ * bytes to convert to Big endian else keep it unchanged.
+ */
+
+ /* PPS 6, 7 */
+ pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+
+ /* PPS 8, 9 */
+ pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+
+ /* PPS 10, 11 */
+ pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+
+ /* PPS 12, 13 */
+ pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+
+ /* PPS 14, 15 */
+ pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+
+ /* PPS 16 */
+ pps_sdp->pps_payload.initial_xmit_delay_high =
+ ((dsc_cfg->initial_xmit_delay &
+ DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 17 */
+ pps_sdp->pps_payload.initial_xmit_delay_low =
+ (dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
+
+ /* PPS 18, 19 */
+ pps_sdp->pps_payload.initial_dec_delay =
+ cpu_to_be16(dsc_cfg->initial_dec_delay);
+
+ /* PPS 20 is 0 */
+
+ /* PPS 21 */
+ pps_sdp->pps_payload.initial_scale_value =
+ dsc_cfg->initial_scale_value;
+
+ /* PPS 22, 23 */
+ pps_sdp->pps_payload.scale_increment_interval =
+ cpu_to_be16(dsc_cfg->scale_increment_interval);
+
+ /* PPS 24 */
+ pps_sdp->pps_payload.scale_decrement_interval_high =
+ ((dsc_cfg->scale_decrement_interval &
+ DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
+ DSC_PPS_MSB_SHIFT);
+
+ /* PPS 25 */
+ pps_sdp->pps_payload.scale_decrement_interval_low =
+ (dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
+
+ /* PPS 26[7:0], PPS 27[7:5] RESERVED */
+
+ /* PPS 27 */
+ pps_sdp->pps_payload.first_line_bpg_offset =
+ dsc_cfg->first_line_bpg_offset;
+
+ /* PPS 28, 29 */
+ pps_sdp->pps_payload.nfl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nfl_bpg_offset);
+
+ /* PPS 30, 31 */
+ pps_sdp->pps_payload.slice_bpg_offset =
+ cpu_to_be16(dsc_cfg->slice_bpg_offset);
+
+ /* PPS 32, 33 */
+ pps_sdp->pps_payload.initial_offset =
+ cpu_to_be16(dsc_cfg->initial_offset);
+
+ /* PPS 34, 35 */
+ pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+
+ /* PPS 36 */
+ pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+
+ /* PPS 37 */
+ pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+
+ /* PPS 38, 39 */
+ pps_sdp->pps_payload.rc_model_size =
+ cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
+
+ /* PPS 40 */
+ pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+
+ /* PPS 41 */
+ pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ dsc_cfg->rc_quant_incr_limit0;
+
+ /* PPS 42 */
+ pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ dsc_cfg->rc_quant_incr_limit1;
+
+ /* PPS 43 */
+ pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
+
+ /* PPS 44 - 57 */
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
+ pps_sdp->pps_payload.rc_buf_thresh[i] =
+ dsc_cfg->rc_buf_thresh[i];
+
+ /* PPS 58 - 87 */
+ /*
+ * For DSC sink programming the RC Range parameter fields
+ * are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
+ */
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ ((dsc_cfg->rc_range_params[i].range_min_qp <<
+ DSC_PPS_RC_RANGE_MINQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_max_qp <<
+ DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
+ (dsc_cfg->rc_range_params[i].range_bpg_offset));
+ pps_sdp->pps_payload.rc_range_parameters[i] =
+ cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ }
+
+ /* PPS 88 */
+ pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
+
+ /* PPS 89 */
+ pps_sdp->pps_payload.second_line_bpg_offset =
+ dsc_cfg->second_line_bpg_offset;
+
+ /* PPS 90, 91 */
+ pps_sdp->pps_payload.nsl_bpg_offset =
+ cpu_to_be16(dsc_cfg->nsl_bpg_offset);
+
+ /* PPS 92, 93 */
+ pps_sdp->pps_payload.second_line_offset_adj =
+ cpu_to_be16(dsc_cfg->second_line_offset_adj);
+
+ /* PPS 94 - 127 are O */
+}
+EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fb0dfc62b1b6..5b516615881a 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -72,7 +72,9 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
/**
- * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer
+ * drm_fb_cma_get_gem_addr() - Get physical address for framebuffer, for pixel
+ * formats where values are grouped in blocks this will get you the beginning of
+ * the block
* @fb: The framebuffer
* @state: Which state of drm plane
* @plane: Which plane
@@ -87,6 +89,13 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
struct drm_gem_cma_object *obj;
dma_addr_t paddr;
u8 h_div = 1, v_div = 1;
+ u32 block_w = drm_format_info_block_width(fb->format, plane);
+ u32 block_h = drm_format_info_block_height(fb->format, plane);
+ u32 block_size = fb->format->char_per_block[plane];
+ u32 sample_x;
+ u32 sample_y;
+ u32 block_start_y;
+ u32 num_hblocks;
obj = drm_fb_cma_get_gem_obj(fb, plane);
if (!obj)
@@ -99,8 +108,13 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
v_div = fb->format->vsub;
}
- paddr += (fb->format->cpp[plane] * (state->src_x >> 16)) / h_div;
- paddr += (fb->pitches[plane] * (state->src_y >> 16)) / v_div;
+ sample_x = (state->src_x >> 16) / h_div;
+ sample_y = (state->src_y >> 16) / v_div;
+ block_start_y = (sample_y / block_h) * block_h;
+ num_hblocks = sample_x / block_w;
+
+ paddr += fb->pitches[plane] * block_start_y;
+ paddr += block_size * num_hblocks;
return paddr;
}
@@ -124,10 +138,7 @@ int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
/* dev->fb_helper will indirectly point to fbdev_cma after this call */
fbdev_cma = drm_fbdev_cma_init(dev, preferred_bpp, max_conn_count);
- if (IS_ERR(fbdev_cma))
- return PTR_ERR(fbdev_cma);
-
- return 0;
+ return PTR_ERR_OR_ZERO(fbdev_cma);
}
EXPORT_SYMBOL_GPL(drm_fb_cma_fbdev_init);
@@ -226,21 +237,3 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
}
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
-
-/**
- * drm_fbdev_cma_set_suspend_unlocked - wrapper around
- * drm_fb_helper_set_suspend_unlocked
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- * @state: desired state, zero to resume, non-zero to suspend
- *
- * Calls drm_fb_helper_set_suspend, which is a wrapper around
- * fb_set_suspend implemented by fbdev core.
- */
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- bool state)
-{
- if (fbdev_cma)
- drm_fb_helper_set_suspend_unlocked(&fbdev_cma->fb_helper,
- state);
-}
-EXPORT_SYMBOL(drm_fbdev_cma_set_suspend_unlocked);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index dd852a25d375..d3af098b0922 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
static bool drm_leak_fbdev_smem = false;
module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
-MODULE_PARM_DESC(fbdev_emulation,
+MODULE_PARM_DESC(drm_leak_fbdev_smem,
"Allow unsafe leaking fbdev physical smem address [default=false]");
#endif
@@ -1635,6 +1635,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
+ if ((drm_format_info_block_width(fb->format, 0) > 1) ||
+ (drm_format_info_block_height(fb->format, 0) > 1))
+ return -EINVAL;
+
/*
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
@@ -1952,6 +1956,8 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
{
struct drm_framebuffer *fb = fb_helper->fb;
+ WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) ||
+ (drm_format_info_block_height(fb->format, 0) > 1));
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 8aaa5e86a979..d90ee03a84c6 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -103,8 +103,8 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
*
* Computes a drm fourcc pixel format code for the given @bpp/@depth values.
* Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
- * and depending on the quirk_addfb_prefer_host_byte_order flag it returns
- * little endian byte order or host byte order framebuffer formats.
+ * and depending on the &drm_mode_config.quirk_addfb_prefer_host_byte_order flag
+ * it returns little endian byte order or host byte order framebuffer formats.
*/
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth)
@@ -224,7 +224,20 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_X0L0, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
+ { .format = DRM_FORMAT_Y0L2, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_X0L2, .depth = 0, .num_planes = 1,
+ .char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
+ .hsub = 2, .vsub = 2, .is_yuv = true },
};
unsigned int i;
@@ -400,3 +413,65 @@ int drm_format_plane_height(int height, uint32_t format, int plane)
return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);
+
+/**
+ * drm_format_info_block_width - width in pixels of block.
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The width in pixels of a block, depending on the plane index.
+ */
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ int plane)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ if (!info->block_w[plane])
+ return 1;
+ return info->block_w[plane];
+}
+EXPORT_SYMBOL(drm_format_info_block_width);
+
+/**
+ * drm_format_info_block_height - height in pixels of a block
+ * @info: pixel format info
+ * @plane: plane index
+ *
+ * Returns:
+ * The height in pixels of a block, depending on the plane index.
+ */
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+ int plane)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ if (!info->block_h[plane])
+ return 1;
+ return info->block_h[plane];
+}
+EXPORT_SYMBOL(drm_format_info_block_height);
+
+/**
+ * drm_format_info_min_pitch - computes the minimum required pitch in bytes
+ * @info: pixel format info
+ * @plane: plane index
+ * @buffer_width: buffer width in pixels
+ *
+ * Returns:
+ * The minimum required pitch in bytes for a buffer by taking into consideration
+ * the pixel format information and the buffer width.
+ */
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+ int plane, unsigned int buffer_width)
+{
+ if (!info || plane < 0 || plane >= info->num_planes)
+ return 0;
+
+ return DIV_ROUND_UP_ULL((u64)buffer_width * info->char_per_block[plane],
+ drm_format_info_block_width(info, plane) *
+ drm_format_info_block_height(info, plane));
+}
+EXPORT_SYMBOL(drm_format_info_min_pitch);
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 3bf729d0aae5..fcaea8f50513 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -195,20 +195,26 @@ static int framebuffer_check(struct drm_device *dev,
for (i = 0; i < info->num_planes; i++) {
unsigned int width = fb_plane_width(r->width, info, i);
unsigned int height = fb_plane_height(r->height, info, i);
- unsigned int cpp = info->cpp[i];
+ unsigned int block_size = info->char_per_block[i];
+ u64 min_pitch = drm_format_info_min_pitch(info, i, width);
+
+ if (!block_size && (r->modifier[i] == DRM_FORMAT_MOD_LINEAR)) {
+ DRM_DEBUG_KMS("Format requires non-linear modifier for plane %d\n", i);
+ return -EINVAL;
+ }
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
return -EINVAL;
}
- if ((uint64_t) width * cpp > UINT_MAX)
+ if (min_pitch > UINT_MAX)
return -ERANGE;
if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
return -ERANGE;
- if (r->pitches[i] < width * cpp) {
+ if (block_size && r->pitches[i] < min_pitch) {
DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
return -EINVAL;
}
@@ -317,6 +323,7 @@ drm_internal_framebuffer_create(struct drm_device *dev,
return fb;
}
+EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_internal_framebuffer_create);
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 512078ebd97b..8b55ece97967 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -257,7 +257,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_close_object)
+ if (obj->funcs && obj->funcs->close)
+ obj->funcs->close(obj, file_priv);
+ else if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
if (drm_core_check_feature(dev, DRIVER_PRIME))
@@ -410,7 +412,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
if (ret)
goto err_remove;
- if (dev->driver->gem_open_object) {
+ if (obj->funcs && obj->funcs->open) {
+ ret = obj->funcs->open(obj, file_priv);
+ if (ret)
+ goto err_revoke;
+ } else if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
if (ret)
goto err_revoke;
@@ -835,7 +841,9 @@ drm_gem_object_free(struct kref *kref)
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
+ if (obj->funcs) {
+ obj->funcs->free(obj);
+ } else if (dev->driver->gem_free_object_unlocked) {
dev->driver->gem_free_object_unlocked(obj);
} else if (dev->driver->gem_free_object) {
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -864,13 +872,13 @@ drm_gem_object_put_unlocked(struct drm_gem_object *obj)
dev = obj->dev;
- if (dev->driver->gem_free_object_unlocked) {
- kref_put(&obj->refcount, drm_gem_object_free);
- } else {
+ if (dev->driver->gem_free_object) {
might_lock(&dev->struct_mutex);
if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
&dev->struct_mutex))
mutex_unlock(&dev->struct_mutex);
+ } else {
+ kref_put(&obj->refcount, drm_gem_object_free);
}
}
EXPORT_SYMBOL(drm_gem_object_put_unlocked);
@@ -960,11 +968,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
- if (!dev->driver->gem_vm_ops)
+ if (obj->funcs && obj->funcs->vm_ops)
+ vma->vm_ops = obj->funcs->vm_ops;
+ else if (dev->driver->gem_vm_ops)
+ vma->vm_ops = dev->driver->gem_vm_ops;
+ else
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = dev->driver->gem_vm_ops;
vma->vm_private_data = obj;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
@@ -1066,6 +1077,86 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
- if (obj->dev->driver->gem_print_info)
+ if (obj->funcs && obj->funcs->print_info)
+ obj->funcs->print_info(p, indent, obj);
+ else if (obj->dev->driver->gem_print_info)
obj->dev->driver->gem_print_info(p, indent, obj);
}
+
+/**
+ * drm_gem_pin - Pin backing buffer in memory
+ * @obj: GEM object
+ *
+ * Make sure the backing buffer is pinned in memory.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_pin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->pin)
+ return obj->funcs->pin(obj);
+ else if (obj->dev->driver->gem_prime_pin)
+ return obj->dev->driver->gem_prime_pin(obj);
+ else
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_pin);
+
+/**
+ * drm_gem_unpin - Unpin backing buffer from memory
+ * @obj: GEM object
+ *
+ * Relax the requirement that the backing buffer is pinned in memory.
+ */
+void drm_gem_unpin(struct drm_gem_object *obj)
+{
+ if (obj->funcs && obj->funcs->unpin)
+ obj->funcs->unpin(obj);
+ else if (obj->dev->driver->gem_prime_unpin)
+ obj->dev->driver->gem_prime_unpin(obj);
+}
+EXPORT_SYMBOL(drm_gem_unpin);
+
+/**
+ * drm_gem_vmap - Map buffer into kernel virtual address space
+ * @obj: GEM object
+ *
+ * Returns:
+ * A virtual pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+void *drm_gem_vmap(struct drm_gem_object *obj)
+{
+ void *vaddr;
+
+ if (obj->funcs && obj->funcs->vmap)
+ vaddr = obj->funcs->vmap(obj);
+ else if (obj->dev->driver->gem_prime_vmap)
+ vaddr = obj->dev->driver->gem_prime_vmap(obj);
+ else
+ vaddr = ERR_PTR(-EOPNOTSUPP);
+
+ if (!vaddr)
+ vaddr = ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_vmap);
+
+/**
+ * drm_gem_vunmap - Remove buffer mapping from kernel virtual address space
+ * @obj: GEM object
+ * @vaddr: Virtual address (can be NULL)
+ */
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ if (!vaddr)
+ return;
+
+ if (obj->funcs && obj->funcs->vunmap)
+ obj->funcs->vunmap(obj, vaddr);
+ else if (obj->dev->driver->gem_prime_vunmap)
+ obj->dev->driver->gem_prime_vunmap(obj, vaddr);
+}
+EXPORT_SYMBOL(drm_gem_vunmap);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1d2ced882b66..cc26625b4b33 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -176,6 +176,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*
* This function frees the backing memory of the CMA GEM object, cleans up the
* GEM object state and frees the memory used to store the object itself.
+ * If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
* &drm_driver.gem_free_object_unlocked callback.
*/
@@ -189,6 +190,8 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
cma_obj->vaddr, cma_obj->paddr);
} else if (gem_obj->import_attach) {
+ if (cma_obj->vaddr)
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
}
@@ -575,3 +578,86 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
/* Nothing to do */
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
+
+static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
+ .free = drm_gem_cma_free_object,
+ .print_info = drm_gem_cma_print_info,
+ .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .vmap = drm_gem_cma_prime_vmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
+/**
+ * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
+ * default function table
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This sets the GEM object functions to the default CMA helper functions.
+ * This function can be used as the &drm_driver.gem_create_object callback.
+ *
+ * Returns:
+ * A pointer to a allocated GEM object or an error pointer on failure.
+ */
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_cma_object *cma_obj;
+
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return NULL;
+
+ cma_obj->base.funcs = &drm_cma_gem_default_funcs;
+
+ return &cma_obj->base;
+}
+EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
+
+/**
+ * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
+ * scatter/gather table and get the virtual address of the buffer
+ * @dev: DRM device
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table using
+ * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
+ * virtual address. This ensures that a CMA GEM object always has its virtual
+ * address set. This address is released when the object is freed.
+ *
+ * This function can be used as the &drm_driver.gem_prime_import_sg_table
+ * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
+ * the necessary DRM driver operations.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *obj;
+ void *vaddr;
+
+ vaddr = dma_buf_vmap(attach->dmabuf);
+ if (!vaddr) {
+ DRM_ERROR("Failed to vmap PRIME buffer\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj)) {
+ dma_buf_vunmap(attach->dmabuf, vaddr);
+ return obj;
+ }
+
+ cma_obj = to_drm_gem_cma_obj(obj);
+ cma_obj->vaddr = vaddr;
+
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index ded7a379ac35..acb466d25afc 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -171,7 +171,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
}
min_size = (height - 1) * mode_cmd->pitches[i]
- + width * info->cpp[i]
+ + drm_format_info_min_pitch(info, i, width)
+ mode_cmd->offsets[i];
if (objs[i]->size < min_size) {
diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
deleted file mode 100644
index 5799e2782dd1..000000000000
--- a/drivers/gpu/drm/drm_global.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <drm/drm_global.h>
-
-struct drm_global_item {
- struct mutex mutex;
- void *object;
- int refcount;
-};
-
-static struct drm_global_item glob[DRM_GLOBAL_NUM];
-
-void drm_global_init(void)
-{
- int i;
-
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- mutex_init(&item->mutex);
- item->object = NULL;
- item->refcount = 0;
- }
-}
-
-void drm_global_release(void)
-{
- int i;
- for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
- struct drm_global_item *item = &glob[i];
- BUG_ON(item->object != NULL);
- BUG_ON(item->refcount != 0);
- }
-}
-
-/**
- * drm_global_item_ref - Initialize and acquire reference to memory
- * object
- * @ref: Object for initialization
- *
- * This initializes a memory object, allocating memory and calling the
- * .init() hook. Further calls will increase the reference count for
- * that item.
- *
- * Returns:
- * Zero on success, non-zero otherwise.
- */
-int drm_global_item_ref(struct drm_global_reference *ref)
-{
- int ret = 0;
- struct drm_global_item *item = &glob[ref->global_type];
-
- mutex_lock(&item->mutex);
- if (item->refcount == 0) {
- ref->object = kzalloc(ref->size, GFP_KERNEL);
- if (unlikely(ref->object == NULL)) {
- ret = -ENOMEM;
- goto error_unlock;
- }
- ret = ref->init(ref);
- if (unlikely(ret != 0))
- goto error_free;
-
- item->object = ref->object;
- } else {
- ref->object = item->object;
- }
-
- ++item->refcount;
- mutex_unlock(&item->mutex);
- return 0;
-
-error_free:
- kfree(ref->object);
- ref->object = NULL;
-error_unlock:
- mutex_unlock(&item->mutex);
- return ret;
-}
-EXPORT_SYMBOL(drm_global_item_ref);
-
-/**
- * drm_global_item_unref - Drop reference to memory
- * object
- * @ref: Object being removed
- *
- * Drop a reference to the memory object and eventually call the
- * release() hook. The allocated object should be dropped in the
- * release() hook or before calling this function
- *
- */
-
-void drm_global_item_unref(struct drm_global_reference *ref)
-{
- struct drm_global_item *item = &glob[ref->global_type];
-
- mutex_lock(&item->mutex);
- BUG_ON(item->refcount == 0);
- BUG_ON(ref->object != item->object);
- if (--item->refcount == 0) {
- ref->release(ref);
- item->object = NULL;
- }
- mutex_unlock(&item->mutex);
-}
-EXPORT_SYMBOL(drm_global_item_unref);
-
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
deleted file mode 100644
index 6b68e9088436..000000000000
--- a/drivers/gpu/drm/drm_info.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * \file drm_info.c
- * DRM info file implementations
- *
- * \author Ben Gamari <bgamari@gmail.com>
- */
-
-/*
- * Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2008 Ben Gamari <bgamari@gmail.com>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
-
-#include "drm_internal.h"
-#include "drm_legacy.h"
-
-/**
- * Called when "/proc/dri/.../name" is read.
- *
- * Prints the device name together with the bus id if available.
- */
-int drm_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
- struct drm_master *master;
-
- mutex_lock(&dev->master_mutex);
- master = dev->master;
- seq_printf(m, "%s", dev->driver->name);
- if (dev->dev)
- seq_printf(m, " dev=%s", dev_name(dev->dev));
- if (master && master->unique)
- seq_printf(m, " master=%s", master->unique);
- if (dev->unique)
- seq_printf(m, " unique=%s", dev->unique);
- seq_printf(m, "\n");
- mutex_unlock(&dev->master_mutex);
-
- return 0;
-}
-
-/**
- * Called when "/proc/dri/.../clients" is read.
- *
- */
-int drm_clients_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct drm_file *priv;
- kuid_t uid;
-
- seq_printf(m,
- "%20s %5s %3s master a %5s %10s\n",
- "command",
- "pid",
- "dev",
- "uid",
- "magic");
-
- /* dev->filelist is sorted youngest first, but we want to present
- * oldest first (i.e. kernel, servers, clients), so walk backwardss.
- */
- mutex_lock(&dev->filelist_mutex);
- list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
- struct task_struct *task;
-
- rcu_read_lock(); /* locks pid_task()->comm */
- task = pid_task(priv->pid, PIDTYPE_PID);
- uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
- task ? task->comm : "<unknown>",
- pid_vnr(priv->pid),
- priv->minor->index,
- drm_is_current_master(priv) ? 'y' : 'n',
- priv->authenticated ? 'y' : 'n',
- from_kuid_munged(seq_user_ns(m), uid),
- priv->magic);
- rcu_read_unlock();
- }
- mutex_unlock(&dev->filelist_mutex);
- return 0;
-}
-
-static int drm_gem_one_name_info(int id, void *ptr, void *data)
-{
- struct drm_gem_object *obj = ptr;
- struct seq_file *m = data;
-
- seq_printf(m, "%6d %8zd %7d %8d\n",
- obj->name, obj->size,
- obj->handle_count,
- kref_read(&obj->refcount));
- return 0;
-}
-
-int drm_gem_name_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
-
- seq_printf(m, " name size handles refcount\n");
-
- mutex_lock(&dev->object_name_lock);
- idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
- mutex_unlock(&dev->object_name_lock);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 0c4eb4a9ab31..d9caf205e0b3 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,11 +56,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
-/* drm_info.c */
-int drm_name_info(struct seq_file *m, void *data);
-int drm_clients_info(struct seq_file *m, void* data);
-int drm_gem_name_info(struct seq_file *m, void *data);
-
/* drm_vblank.c */
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
void drm_vblank_cleanup(struct drm_device *dev);
@@ -104,6 +99,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
int drm_sysfs_connector_add(struct drm_connector *connector);
void drm_sysfs_connector_remove(struct drm_connector *connector);
+void drm_sysfs_lease_event(struct drm_device *dev);
+
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 94bd872d56c4..7e6746b2d704 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/nospec.h>
/**
* DOC: getunique and setversion story
@@ -800,13 +801,17 @@ long drm_ioctl(struct file *filp,
if (is_driver_ioctl) {
/* driver ioctl */
- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ unsigned int index = nr - DRM_COMMAND_BASE;
+
+ if (index >= dev->driver->num_ioctls)
goto err_i1;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ index = array_index_nospec(index, dev->driver->num_ioctls);
+ ioctl = &dev->driver->ioctls[index];
} else {
/* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr];
}
@@ -888,6 +893,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
if (nr >= DRM_CORE_IOCTL_COUNT)
return false;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags;
return true;
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 24a177ea5417..99cba8ea5d82 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -39,7 +39,6 @@ struct drm_master *drm_lease_owner(struct drm_master *master)
master = master->lessor;
return master;
}
-EXPORT_SYMBOL(drm_lease_owner);
/**
* _drm_find_lessee - find lessee by id (idr_mutex held)
@@ -117,7 +116,6 @@ bool _drm_lease_held(struct drm_file *file_priv, int id)
return _drm_lease_held_master(file_priv->master, id);
}
-EXPORT_SYMBOL(_drm_lease_held);
/**
* drm_lease_held - check drm_mode_object lease status (idr_mutex not held)
@@ -144,7 +142,6 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
mutex_unlock(&master->dev->mode_config.idr_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_lease_held);
/**
* drm_lease_filter_crtcs - restricted crtc set to leased values (idr_mutex not held)
@@ -184,7 +181,6 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
mutex_unlock(&master->dev->mode_config.idr_mutex);
return crtcs_out;
}
-EXPORT_SYMBOL(drm_lease_filter_crtcs);
/*
* drm_lease_create - create a new drm_master with leased objects (idr_mutex not held)
@@ -195,7 +191,7 @@ EXPORT_SYMBOL(drm_lease_filter_crtcs);
* make sure all of the desired objects can be leased, atomically
* leasing them to the new drmmaster.
*
- * ERR_PTR(-EACCESS) some other master holds the title to any object
+ * ERR_PTR(-EACCES) some other master holds the title to any object
* ERR_PTR(-ENOENT) some object is not a valid DRM object for this device
* ERR_PTR(-EBUSY) some other lessee holds title to this object
* ERR_PTR(-EEXIST) same object specified more than once in the provided list
@@ -296,7 +292,7 @@ void drm_lease_destroy(struct drm_master *master)
if (master->lessor) {
/* Tell the master to check the lessee list */
- drm_sysfs_hotplug_event(dev);
+ drm_sysfs_lease_event(dev);
drm_master_put(&master->lessor);
}
@@ -357,9 +353,9 @@ void drm_lease_revoke(struct drm_master *top)
}
static int validate_lease(struct drm_device *dev,
- struct drm_file *lessor_priv,
int object_count,
- struct drm_mode_object **objects)
+ struct drm_mode_object **objects,
+ bool universal_planes)
{
int o;
int has_crtc = -1;
@@ -376,14 +372,14 @@ static int validate_lease(struct drm_device *dev,
if (objects[o]->type == DRM_MODE_OBJECT_CONNECTOR && has_connector == -1)
has_connector = o;
- if (lessor_priv->universal_planes) {
+ if (universal_planes) {
if (objects[o]->type == DRM_MODE_OBJECT_PLANE && has_plane == -1)
has_plane = o;
}
}
if (has_crtc == -1 || has_connector == -1)
return -EINVAL;
- if (lessor_priv->universal_planes && has_plane == -1)
+ if (universal_planes && has_plane == -1)
return -EINVAL;
return 0;
}
@@ -397,6 +393,8 @@ static int fill_object_idr(struct drm_device *dev,
struct drm_mode_object **objects;
u32 o;
int ret;
+ bool universal_planes = READ_ONCE(lessor_priv->universal_planes);
+
objects = kcalloc(object_count, sizeof(struct drm_mode_object *),
GFP_KERNEL);
if (!objects)
@@ -419,14 +417,17 @@ static int fill_object_idr(struct drm_device *dev,
}
if (!drm_mode_object_lease_required(objects[o]->type)) {
+ DRM_DEBUG_KMS("invalid object for lease\n");
ret = -EINVAL;
goto out_free_objects;
}
}
- ret = validate_lease(dev, lessor_priv, object_count, objects);
- if (ret)
+ ret = validate_lease(dev, object_count, objects, universal_planes);
+ if (ret) {
+ DRM_DEBUG_LEASE("lease validation failed\n");
goto out_free_objects;
+ }
/* add their IDs to the lease request - taking into account
universal planes */
@@ -449,7 +450,7 @@ static int fill_object_idr(struct drm_device *dev,
object_id, ret);
goto out_free_objects;
}
- if (obj->type == DRM_MODE_OBJECT_CRTC && !lessor_priv->universal_planes) {
+ if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
struct drm_crtc *crtc = obj_to_crtc(obj);
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
if (ret < 0) {
@@ -509,15 +510,21 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
/* Do not allow sub-leases */
- if (lessor->lessor)
+ if (lessor->lessor) {
+ DRM_DEBUG_LEASE("recursive leasing not allowed\n");
return -EINVAL;
+ }
/* need some objects */
- if (cl->object_count == 0)
+ if (cl->object_count == 0) {
+ DRM_DEBUG_LEASE("no objects in lease\n");
return -EINVAL;
+ }
- if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK)))
+ if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
+ DRM_DEBUG_LEASE("invalid flags\n");
return -EINVAL;
+ }
object_count = cl->object_count;
@@ -532,6 +539,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count, object_ids);
kfree(object_ids);
if (ret) {
+ DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
idr_destroy(&leases);
return ret;
}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index d69e4fc1ee77..40c4349cb939 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -51,7 +51,7 @@
#endif
static void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
unsigned long i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
@@ -94,26 +94,26 @@ static void *agp_remap(unsigned long offset, unsigned long size,
}
/** Wrapper around agp_free_memory() */
-void drm_free_agp(struct agp_memory * handle, int pages)
+void drm_free_agp(struct agp_memory *handle, int pages)
{
agp_free_memory(handle);
}
/** Wrapper around agp_bind_memory() */
-int drm_bind_agp(struct agp_memory * handle, unsigned int start)
+int drm_bind_agp(struct agp_memory *handle, unsigned int start)
{
return agp_bind_memory(handle, start);
}
/** Wrapper around agp_unbind_memory() */
-int drm_unbind_agp(struct agp_memory * handle)
+int drm_unbind_agp(struct agp_memory *handle)
{
return agp_unbind_memory(handle);
}
#else /* CONFIG_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
return NULL;
}
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index ee80788f2c40..703bfce975bb 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,6 +297,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
+ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
+ 0);
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_fb_damage_clips = prop;
+
prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
"ACTIVE");
if (!prop)
@@ -310,6 +316,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_mode_id = prop;
+ prop = drm_property_create_bool(dev, 0,
+ "VRR_ENABLED");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.prop_vrr_enabled = prop;
+
prop = drm_property_create(dev,
DRM_MODE_PROP_BLOB,
"DEGAMMA_LUT", 0);
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index be8b754eaf60..cd9bc0ce9be0 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -38,7 +38,8 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
int ret;
mutex_lock(&dev->mode_config.idr_mutex);
- ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL);
+ ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL,
+ 1, 0, GFP_KERNEL);
if (ret >= 0) {
/*
* Set up the object linking under the protection of the idr
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 02db9ac82d7a..24a750436559 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -716,8 +716,8 @@ int of_get_drm_display_mode(struct device_node *np,
if (bus_flags)
drm_bus_flags_from_videomode(&vm, bus_flags);
- pr_debug("%pOF: got %dx%d display mode from %s\n",
- np, vm.hactive, vm.vactive, np->name);
+ pr_debug("%pOF: got %dx%d display mode\n",
+ np, vm.hactive, vm.vactive);
drm_mode_debug_printmodeline(dmode);
return 0;
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index f1c24ab0ef09..9150fa385bba 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -146,6 +146,21 @@ static struct drm_plane *create_primary_plane(struct drm_device *dev)
* Initialize a CRTC object with a default helper-provided primary plane and no
* cursor plane.
*
+ * Note that we make some assumptions about hardware limitations that may not be
+ * true for all hardware:
+ *
+ * 1. Primary plane cannot be repositioned.
+ * 2. Primary plane cannot be scaled.
+ * 3. Primary plane must cover the entire CRTC.
+ * 4. Subpixel positioning is not supported.
+ * 5. The primary plane must always be on if the CRTC is enabled.
+ *
+ * This is purely a backwards compatibility helper for old drivers. Drivers
+ * should instead implement their own primary plane. Atomic drivers must do so.
+ * Drivers with the above hardware restriction can look into using &struct
+ * drm_simple_display_pipe, which encapsulates the above limitations into a nice
+ * interface.
+ *
* Returns:
* Zero on success, error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 8a5100685875..51f534db9107 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -56,6 +56,10 @@
* drm_modeset_drop_locks(ctx);
* drm_modeset_acquire_fini(ctx);
*
+ * For convenience this control flow is implemented in
+ * DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END() for the case
+ * where all modeset locks need to be taken through drm_modeset_lock_all_ctx().
+ *
* If all that is needed is a single modeset lock, then the &struct
* drm_modeset_acquire_ctx is not needed and the locking can be simplified
* by passing a NULL instead of ctx in the drm_modeset_lock() call or
@@ -383,6 +387,8 @@ EXPORT_SYMBOL(drm_modeset_unlock);
* Locks acquired with this function should be released by calling the
* drm_modeset_drop_locks() function on @ctx.
*
+ * See also: DRM_MODESET_LOCK_ALL_BEGIN() and DRM_MODESET_LOCK_ALL_END()
+ *
* Returns: 0 on success or a negative error-code on failure.
*/
int drm_modeset_lock_all_ctx(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ee4a5e1221f1..52e445bb1aa5 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -59,6 +59,14 @@ static const struct drm_dmi_panel_orientation_data gpd_win = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data gpd_win2 = {
+ .width = 720,
+ .height = 1280,
+ .bios_dates = (const char * const []){
+ "12/07/2017", "05/24/2018", "06/29/2018", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.width = 800,
.height = 1280,
@@ -106,6 +114,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win,
+ }, { /* GPD Win 2 (too generic strings, also match on bios date) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_win2,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 48f615d38931..a9d9df6c85ad 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -61,15 +61,14 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
return NULL;
dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+ dmah->vaddr = dma_zalloc_coherent(&dev->pdev->dev, size, &dmah->busaddr,
+ GFP_KERNEL | __GFP_COMP);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- memset(dmah->vaddr, 0, size);
-
/* XXX - Is virt_to_page() legal for consistent mem? */
/* Reserve */
for (addr = (unsigned long)dmah->vaddr, sz = size;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 1fa98bd12003..5f650d8fc66b 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -636,6 +636,29 @@ static int __setplane_check(struct drm_plane *plane,
return 0;
}
+/**
+ * drm_any_plane_has_format - Check whether any plane supports this format and modifier combination
+ * @dev: DRM device
+ * @format: pixel format (DRM_FORMAT_*)
+ * @modifier: data layout modifier
+ *
+ * Returns:
+ * Whether at least one plane supports the specified format and modifier combination.
+ */
+bool drm_any_plane_has_format(struct drm_device *dev,
+ u32 format, u64 modifier)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, dev) {
+ if (drm_plane_check_pixel_format(plane, format, modifier) == 0)
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(drm_any_plane_has_format);
+
/*
* __setplane_internal - setplane handler for internal callers
*
@@ -744,11 +767,8 @@ static int setplane_internal(struct drm_plane *plane,
struct drm_modeset_acquire_ctx ctx;
int ret;
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-retry:
- ret = drm_modeset_lock_all_ctx(plane->dev, &ctx);
- if (ret)
- goto fail;
+ DRM_MODESET_LOCK_ALL_BEGIN(plane->dev, ctx,
+ DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
if (drm_drv_uses_atomic_modeset(plane->dev))
ret = __setplane_atomic(plane, crtc, fb,
@@ -759,14 +779,7 @@ retry:
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h, &ctx);
-fail:
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index a393756b664e..0fff72dcd06d 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -42,11 +42,8 @@
* primary plane support on top of the normal CRTC configuration interface.
* Since the legacy &drm_mode_config_funcs.set_config interface ties the primary
* plane together with the CRTC state this does not allow userspace to disable
- * the primary plane itself. To avoid too much duplicated code use
- * drm_plane_helper_check_update() which can be used to enforce the same
- * restrictions as primary planes had thus. The default primary plane only
- * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
- * framebuffer.
+ * the primary plane itself. The default primary plane only expose XRBG8888 and
+ * ARGB8888 as valid pixel formats for the attached framebuffer.
*
* Drivers are highly recommended to implement proper support for primary
* planes, and newly merged drivers must not rely upon these transitional
@@ -100,43 +97,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
return count;
}
-/**
- * drm_plane_helper_check_update() - Check plane update for validity
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @src: source coordinates in 16.16 fixed point
- * @dst: integer destination coordinates
- * @rotation: plane rotation
- * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
- * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
- * @can_position: is it legal to position the plane such that it
- * doesn't cover the entire crtc? This will generally
- * only be false for primary planes.
- * @can_update_disabled: can the plane be updated while the crtc
- * is disabled?
- * @visible: output parameter indicating whether plane is still visible after
- * clipping
- *
- * Checks that a desired plane update is valid. Drivers that provide
- * their own plane handling rather than helper-provided implementations may
- * still wish to call this function to avoid duplication of error checking
- * code.
- *
- * RETURNS:
- * Zero if update appears valid, error code on failure
- */
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dst,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible)
+static int drm_plane_helper_check_update(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_rect *src,
+ struct drm_rect *dst,
+ unsigned int rotation,
+ int min_scale,
+ int max_scale,
+ bool can_position,
+ bool can_update_disabled,
+ bool *visible)
{
struct drm_plane_state plane_state = {
.plane = plane,
@@ -173,52 +144,14 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return 0;
}
-EXPORT_SYMBOL(drm_plane_helper_check_update);
-/**
- * drm_primary_helper_update() - Helper for primary plane update
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
- * @src_x: x offset of @fb for panning
- * @src_y: y offset of @fb for panning
- * @src_w: width of source rectangle in @fb
- * @src_h: height of source rectangle in @fb
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane update handler for primary planes. This is handler
- * is called in response to a userspace SetPlane operation on the plane with a
- * non-NULL framebuffer. We call the driver's modeset handler to update the
- * framebuffer.
- *
- * SetPlane() on a primary plane of a disabled CRTC is not supported, and will
- * return an error.
- *
- * Note that we make some assumptions about hardware limitations that may not be
- * true for all hardware --
- *
- * 1. Primary plane cannot be repositioned.
- * 2. Primary plane cannot be scaled.
- * 3. Primary plane must cover the entire CRTC.
- * 4. Subpixel positioning is not supported.
- *
- * Drivers for hardware that don't have these restrictions can provide their
- * own implementation rather than using this helper.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
+static int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_mode_set set = {
.crtc = crtc,
@@ -285,35 +218,12 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
kfree(connector_list);
return ret;
}
-EXPORT_SYMBOL(drm_primary_helper_update);
-/**
- * drm_primary_helper_disable() - Helper for primary plane disable
- * @plane: plane to disable
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane disable handler for primary planes. This is handler
- * is called in response to a userspace SetPlane operation on the plane with a
- * NULL framebuffer parameter. It unconditionally fails the disable call with
- * -EINVAL the only way to disable the primary plane without driver support is
- * to disable the entire CRTC. Which does not match the plane
- * &drm_plane_funcs.disable_plane hook.
- *
- * Note that some hardware may be able to disable the primary plane without
- * disabling the whole CRTC. Drivers for such hardware should provide their
- * own disable handler that disables just the primary plane (and they'll likely
- * need to provide their own update handler as well to properly re-enable a
- * disabled primary plane).
- *
- * RETURNS:
- * Unconditionally returns -EINVAL.
- */
-int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
+static int drm_primary_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx)
{
return -EINVAL;
}
-EXPORT_SYMBOL(drm_primary_helper_disable);
/**
* drm_primary_helper_destroy() - Helper for primary plane destruction
@@ -336,200 +246,3 @@ const struct drm_plane_funcs drm_primary_helper_funcs = {
.destroy = drm_primary_helper_destroy,
};
EXPORT_SYMBOL(drm_primary_helper_funcs);
-
-int drm_plane_helper_commit(struct drm_plane *plane,
- struct drm_plane_state *plane_state,
- struct drm_framebuffer *old_fb)
-{
- const struct drm_plane_helper_funcs *plane_funcs;
- struct drm_crtc *crtc[2];
- const struct drm_crtc_helper_funcs *crtc_funcs[2];
- int i, ret = 0;
-
- plane_funcs = plane->helper_private;
-
- /* Since this is a transitional helper we can't assume that plane->state
- * is always valid. Hence we need to use plane->crtc instead of
- * plane->state->crtc as the old crtc. */
- crtc[0] = plane->crtc;
- crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
-
- for (i = 0; i < 2; i++)
- crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
-
- if (plane_funcs->atomic_check) {
- ret = plane_funcs->atomic_check(plane, plane_state);
- if (ret)
- goto out;
- }
-
- if (plane_funcs->prepare_fb && plane_state->fb != old_fb) {
- ret = plane_funcs->prepare_fb(plane,
- plane_state);
- if (ret)
- goto out;
- }
-
- /* Point of no return, commit sw state. */
- swap(plane->state, plane_state);
-
- for (i = 0; i < 2; i++) {
- if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
- crtc_funcs[i]->atomic_begin(crtc[i], crtc[i]->state);
- }
-
- /*
- * Drivers may optionally implement the ->atomic_disable callback, so
- * special-case that here.
- */
- if (drm_atomic_plane_disabling(plane_state, plane->state) &&
- plane_funcs->atomic_disable)
- plane_funcs->atomic_disable(plane, plane_state);
- else
- plane_funcs->atomic_update(plane, plane_state);
-
- for (i = 0; i < 2; i++) {
- if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
- crtc_funcs[i]->atomic_flush(crtc[i], crtc[i]->state);
- }
-
- /*
- * If we only moved the plane and didn't change fb's, there's no need to
- * wait for vblank.
- */
- if (plane->state->fb == old_fb)
- goto out;
-
- for (i = 0; i < 2; i++) {
- if (!crtc[i])
- continue;
-
- if (crtc[i]->cursor == plane)
- continue;
-
- /* There's no other way to figure out whether the crtc is running. */
- ret = drm_crtc_vblank_get(crtc[i]);
- if (ret == 0) {
- drm_crtc_wait_one_vblank(crtc[i]);
- drm_crtc_vblank_put(crtc[i]);
- }
-
- ret = 0;
- }
-
- if (plane_funcs->cleanup_fb)
- plane_funcs->cleanup_fb(plane, plane_state);
-out:
- if (plane->funcs->atomic_destroy_state)
- plane->funcs->atomic_destroy_state(plane, plane_state);
- else
- drm_atomic_helper_plane_destroy_state(plane, plane_state);
-
- return ret;
-}
-
-/**
- * drm_plane_helper_update() - Transitional helper for plane update
- * @plane: plane object to update
- * @crtc: owning CRTC of owning plane
- * @fb: framebuffer to flip onto plane
- * @crtc_x: x offset of primary plane on crtc
- * @crtc_y: y offset of primary plane on crtc
- * @crtc_w: width of primary plane rectangle on crtc
- * @crtc_h: height of primary plane rectangle on crtc
- * @src_x: x offset of @fb for panning
- * @src_y: y offset of @fb for panning
- * @src_w: width of source rectangle in @fb
- * @src_h: height of source rectangle in @fb
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane update handler using the atomic plane update
- * functions. It is fully left to the driver to check plane constraints and
- * handle corner-cases like a fully occluded or otherwise invisible plane.
- *
- * This is useful for piecewise transitioning of a driver to the atomic helpers.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_plane_state *plane_state;
-
- if (plane->funcs->atomic_duplicate_state)
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- else {
- if (!plane->state)
- drm_atomic_helper_plane_reset(plane);
-
- plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- }
- if (!plane_state)
- return -ENOMEM;
- plane_state->plane = plane;
-
- plane_state->crtc = crtc;
- drm_atomic_set_fb_for_plane(plane_state, fb);
- plane_state->crtc_x = crtc_x;
- plane_state->crtc_y = crtc_y;
- plane_state->crtc_h = crtc_h;
- plane_state->crtc_w = crtc_w;
- plane_state->src_x = src_x;
- plane_state->src_y = src_y;
- plane_state->src_h = src_h;
- plane_state->src_w = src_w;
-
- return drm_plane_helper_commit(plane, plane_state, plane->fb);
-}
-EXPORT_SYMBOL(drm_plane_helper_update);
-
-/**
- * drm_plane_helper_disable() - Transitional helper for plane disable
- * @plane: plane to disable
- * @ctx: lock acquire context, not used here
- *
- * Provides a default plane disable handler using the atomic plane update
- * functions. It is fully left to the driver to check plane constraints and
- * handle corner-cases like a fully occluded or otherwise invisible plane.
- *
- * This is useful for piecewise transitioning of a driver to the atomic helpers.
- *
- * RETURNS:
- * Zero on success, error code on failure
- */
-int drm_plane_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct drm_plane_state *plane_state;
- struct drm_framebuffer *old_fb;
-
- /* crtc helpers love to call disable functions for already disabled hw
- * functions. So cope with that. */
- if (!plane->crtc)
- return 0;
-
- if (plane->funcs->atomic_duplicate_state)
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- else {
- if (!plane->state)
- drm_atomic_helper_plane_reset(plane);
-
- plane_state = drm_atomic_helper_plane_duplicate_state(plane);
- }
- if (!plane_state)
- return -ENOMEM;
- plane_state->plane = plane;
-
- plane_state->crtc = NULL;
- old_fb = plane_state->fb;
- drm_atomic_set_fb_for_plane(plane_state, NULL);
-
- return drm_plane_helper_commit(plane, plane_state, old_fb);
-}
-EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 3f0205fc0a1a..231e3f6d5f41 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -199,7 +199,6 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
{
struct drm_prime_attachment *prime_attach;
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
if (!prime_attach)
@@ -208,10 +207,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
prime_attach->dir = DMA_NONE;
attach->priv = prime_attach;
- if (!dev->driver->gem_prime_pin)
- return 0;
-
- return dev->driver->gem_prime_pin(obj);
+ return drm_gem_pin(obj);
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -228,7 +224,6 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
{
struct drm_prime_attachment *prime_attach = attach->priv;
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
if (prime_attach) {
struct sg_table *sgt = prime_attach->sgt;
@@ -247,8 +242,7 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
attach->priv = NULL;
}
- if (dev->driver->gem_prime_unpin)
- dev->driver->gem_prime_unpin(obj);
+ drm_gem_unpin(obj);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -310,7 +304,10 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(prime_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+ if (obj->funcs)
+ sgt = obj->funcs->get_sg_table(obj);
+ else
+ sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
@@ -406,12 +403,13 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
+ void *vaddr;
- if (dev->driver->gem_prime_vmap)
- return dev->driver->gem_prime_vmap(obj);
- else
- return NULL;
+ vaddr = drm_gem_vmap(obj);
+ if (IS_ERR(vaddr))
+ vaddr = NULL;
+
+ return vaddr;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
@@ -426,42 +424,12 @@ EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_gem_object *obj = dma_buf->priv;
- struct drm_device *dev = obj->dev;
- if (dev->driver->gem_prime_vunmap)
- dev->driver->gem_prime_vunmap(obj, vaddr);
+ drm_gem_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
/**
- * drm_gem_dmabuf_kmap - map implementation for GEM
- * @dma_buf: buffer to be mapped
- * @page_num: page number within the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.map callback.
- */
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
-{
- return NULL;
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kmap);
-
-/**
- * drm_gem_dmabuf_kunmap - unmap implementation for GEM
- * @dma_buf: buffer to be unmapped
- * @page_num: page number within the buffer
- * @addr: virtual address of the buffer
- *
- * Not implemented. This can be used as the &dma_buf_ops.unmap callback.
- */
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
- void *addr)
-{
-
-}
-EXPORT_SYMBOL(drm_gem_dmabuf_kunmap);
-
-/**
* drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
* @dma_buf: buffer to be mapped
* @vma: virtual address range
@@ -489,8 +457,6 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .map = drm_gem_dmabuf_kmap,
- .unmap = drm_gem_dmabuf_kunmap,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
@@ -559,7 +525,12 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
return dmabuf;
}
- dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (obj->funcs && obj->funcs->export)
+ dmabuf = obj->funcs->export(obj, flags);
+ else if (dev->driver->gem_prime_export)
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ else
+ dmabuf = drm_gem_prime_export(dev, obj, flags);
if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
@@ -679,6 +650,52 @@ out_unlock:
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
/**
+ * drm_gem_prime_mmap - PRIME mmap function for GEM drivers
+ * @obj: GEM object
+ * @vma: Virtual address range
+ *
+ * This function sets up a userspace mapping for PRIME exported buffers using
+ * the same codepath that is used for regular GEM buffer mapping on the DRM fd.
+ * The fake GEM offset is added to vma->vm_pgoff and &drm_driver->fops->mmap is
+ * called to set up the mapping.
+ *
+ * Drivers can use this as their &drm_driver.gem_prime_mmap callback.
+ */
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct drm_file *priv;
+ struct file *fil;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ fil = kzalloc(sizeof(*fil), GFP_KERNEL);
+ if (!priv || !fil) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Used by drm_gem_mmap() to lookup the GEM object */
+ priv->minor = obj->dev->primary;
+ fil->private_data = priv;
+
+ ret = drm_vma_node_allow(&obj->vma_node, priv);
+ if (ret)
+ goto out;
+
+ vma->vm_pgoff += drm_vma_node_start(&obj->vma_node);
+
+ ret = obj->dev->driver->fops->mmap(fil, vma);
+
+ drm_vma_node_revoke(&obj->vma_node, priv);
+out:
+ kfree(priv);
+ kfree(fil);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prime_mmap);
+
+/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
* @dma_buf: dma-buf object to import
@@ -792,7 +809,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
/* never seen this one, need to import */
mutex_lock(&dev->object_name_lock);
- obj = dev->driver->gem_prime_import(dev, dma_buf);
+ if (dev->driver->gem_prime_import)
+ obj = dev->driver->gem_prime_import(dev, dma_buf);
+ else
+ obj = drm_gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto out_unlock;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 51fa978f0d23..917812448d1b 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -190,6 +190,13 @@ static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
pipe->funcs->cleanup_fb(pipe, state);
}
+static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
.prepare_fb = drm_simple_kms_plane_prepare_fb,
.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
@@ -204,6 +211,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = drm_simple_kms_format_mod_supported,
};
/**
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 5c2091dbd230..db30a0e89db8 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -56,22 +56,6 @@
#include "drm_internal.h"
#include <drm/drm_syncobj.h>
-struct drm_syncobj_stub_fence {
- struct dma_fence base;
- spinlock_t lock;
-};
-
-static const char *drm_syncobj_stub_fence_get_name(struct dma_fence *fence)
-{
- return "syncobjstub";
-}
-
-static const struct dma_fence_ops drm_syncobj_stub_fence_ops = {
- .get_driver_name = drm_syncobj_stub_fence_get_name,
- .get_timeline_name = drm_syncobj_stub_fence_get_name,
-};
-
-
/**
* drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer
@@ -113,8 +97,6 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{
int ret;
- WARN_ON(*fence);
-
*fence = drm_syncobj_fence_get(syncobj);
if (*fence)
return 1;
@@ -158,13 +140,11 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
- * @point: timeline point
* @fence: fence to install in sync file.
*
- * This replaces the fence on a sync object, or a timeline point fence.
+ * This replaces the fence on a sync object.
*/
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
- u64 point,
struct dma_fence *fence)
{
struct dma_fence *old_fence;
@@ -192,23 +172,18 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
}
EXPORT_SYMBOL(drm_syncobj_replace_fence);
-static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+/**
+ * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
+ * @syncobj: sync object to assign the fence on
+ *
+ * Assign a already signaled stub fence to the sync object.
+ */
+static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- struct drm_syncobj_stub_fence *fence;
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (fence == NULL)
- return -ENOMEM;
-
- spin_lock_init(&fence->lock);
- dma_fence_init(&fence->base, &drm_syncobj_stub_fence_ops,
- &fence->lock, 0, 0);
- dma_fence_signal(&fence->base);
-
- drm_syncobj_replace_fence(syncobj, 0, &fence->base);
-
- dma_fence_put(&fence->base);
+ struct dma_fence *fence = dma_fence_get_stub();
- return 0;
+ drm_syncobj_replace_fence(syncobj, fence);
+ dma_fence_put(fence);
}
/**
@@ -216,6 +191,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
* @file_private: drm file private pointer
* @handle: sync object handle to lookup.
* @point: timeline point
+ * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
* @fence: out parameter for the fence
*
* This is just a convenience function that combines drm_syncobj_find() and
@@ -226,7 +202,7 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
* dma_fence_put().
*/
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle, u64 point,
+ u32 handle, u64 point, u64 flags,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
@@ -255,7 +231,7 @@ void drm_syncobj_free(struct kref *kref)
struct drm_syncobj *syncobj = container_of(kref,
struct drm_syncobj,
refcount);
- drm_syncobj_replace_fence(syncobj, 0, NULL);
+ drm_syncobj_replace_fence(syncobj, NULL);
kfree(syncobj);
}
EXPORT_SYMBOL(drm_syncobj_free);
@@ -275,7 +251,6 @@ EXPORT_SYMBOL(drm_syncobj_free);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
{
- int ret;
struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -286,16 +261,11 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
INIT_LIST_HEAD(&syncobj->cb_list);
spin_lock_init(&syncobj->lock);
- if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
- ret = drm_syncobj_assign_null_handle(syncobj);
- if (ret < 0) {
- drm_syncobj_put(syncobj);
- return ret;
- }
- }
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
+ drm_syncobj_assign_null_handle(syncobj);
if (fence)
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
*out_syncobj = syncobj;
return 0;
@@ -480,7 +450,7 @@ static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
return -ENOENT;
}
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
drm_syncobj_put(syncobj);
return 0;
@@ -497,7 +467,7 @@ static int drm_syncobj_export_sync_file(struct drm_file *file_private,
if (fd < 0)
return fd;
- ret = drm_syncobj_find_fence(file_private, handle, 0, &fence);
+ ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
if (ret)
goto err_put_fd;
@@ -719,9 +689,6 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
- if (entries[i].fence)
- continue;
-
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
@@ -954,7 +921,7 @@ drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
return ret;
for (i = 0; i < args->count_handles; i++)
- drm_syncobj_replace_fence(syncobjs[i], 0, NULL);
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -986,11 +953,8 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
- for (i = 0; i < args->count_handles; i++) {
- ret = drm_syncobj_assign_null_handle(syncobjs[i]);
- if (ret < 0)
- break;
- }
+ for (i = 0; i < args->count_handles; i++)
+ drm_syncobj_assign_null_handle(syncobjs[i]);
drm_syncobj_array_free(syncobjs, args->count_handles);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index b3c1daad1169..ecb7b33002bb 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
connector->kdev = NULL;
}
+void drm_sysfs_lease_event(struct drm_device *dev)
+{
+ char *event_string = "LEASE=1";
+ char *envp[] = { event_string, NULL };
+
+ DRM_DEBUG("generating lease event\n");
+
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* drm_sysfs_hotplug_event - generate a DRM uevent
* @dev: DRM device
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 83c1f46670bf..52802e6049e0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -550,7 +550,7 @@ out_register:
out_bind:
kfree(priv);
out_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -567,7 +567,7 @@ static void etnaviv_unbind(struct device *dev)
drm->dev_private = NULL;
kfree(priv);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops etnaviv_master_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 9146e30e24a6..3fbb4855396c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -118,6 +118,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ unsigned long flags;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
@@ -134,13 +135,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -182,14 +183,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- spin_lock(&gpu->sched.job_list_lock);
+ spin_lock_irqsave(&gpu->sched.job_list_lock, flags);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
- spin_unlock(&gpu->sched.job_list_lock);
+ spin_unlock_irqrestore(&gpu->sched.job_list_lock, flags);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 983e67f19e45..30875f8f2933 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -179,7 +179,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
struct reservation_object *robj = bo->obj->resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
- ret = reservation_object_reserve_shared(robj);
+ ret = reservation_object_reserve_shared(robj, 1);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 9b476368aa31..49a6763693f1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -105,8 +105,6 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr;
- schedule_delayed_work(&sched_job->sched->work_tdr,
- sched_job->sched->timeout);
return;
}
@@ -127,6 +125,8 @@ static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ drm_sched_job_cleanup(sched_job);
+
etnaviv_submit_put(submit);
}
@@ -159,6 +159,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
if (submit->out_fence_id < 0) {
+ drm_sched_job_cleanup(&submit->sched_job);
ret = -ENOMEM;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 208bc27be3cc..3691a140c950 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,11 +10,6 @@ config DRM_EXYNOS
if DRM_EXYNOS
-config DRM_EXYNOS_IOMMU
- bool
- depends on EXYNOS_IOMMU
- default y
-
comment "CRTCs"
config DRM_EXYNOS_FIMD
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 2ad146bbf4f5..2fd2f3ee4fcf 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,10 +4,9 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o exynos_drm_dma.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
-exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index aef487dd8731..5b4e0e8b23bc 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -25,7 +25,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon5433.h"
#define DSD_CFG_MUX 0x1004
@@ -84,6 +83,14 @@ static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
[CURSON_WIN] = DRM_PLANE_TYPE_CURSOR,
};
+static const unsigned int capabilities[WINDOWS_NR] = {
+ 0,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+ EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND,
+};
+
static inline void decon_set_bits(struct decon_context *ctx, u32 reg, u32 mask,
u32 val)
{
@@ -252,11 +259,76 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
}
+static void decon_win_set_bldeq(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 mask = BLENDERQ_A_FUNC_F(0xf) | BLENDERQ_B_FUNC_F(0xf);
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ case DRM_MODE_BLEND_COVERAGE:
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA_A);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ break;
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ALPHA0);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ } else {
+ val |= BLENDERQ_A_FUNC_F(BLENDERQ_ONE);
+ val |= BLENDERQ_B_FUNC_F(BLENDERQ_ONE_MINUS_ALPHA_A);
+ }
+ break;
+ }
+ decon_set_bits(ctx, DECON_BLENDERQx(win), mask, val);
+}
+
+static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
+ unsigned int alpha, unsigned int pixel_alpha)
+{
+ u32 win_alpha = alpha >> 8;
+ u32 val = 0;
+
+ switch (pixel_alpha) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ break;
+ case DRM_MODE_BLEND_COVERAGE:
+ case DRM_MODE_BLEND_PREMULTI:
+ default:
+ val |= WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_BLD_PIX_F;
+ val |= WINCONx_ALPHA_MUL_F;
+ break;
+ }
+ decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_BLEND_MODE_MASK, val);
+
+ if (alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ val = VIDOSD_Wx_ALPHA_R_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_G_F(win_alpha) |
+ VIDOSD_Wx_ALPHA_B_F(win_alpha);
+ decon_set_bits(ctx, DECON_VIDOSDxC(win),
+ VIDOSDxC_ALPHA0_RGB_MASK, val);
+ decon_set_bits(ctx, DECON_BLENDCON, BLEND_NEW, BLEND_NEW);
+ }
+}
+
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
struct drm_framebuffer *fb)
{
+ struct exynos_drm_plane plane = ctx->planes[win];
+ struct exynos_drm_plane_state *state =
+ to_exynos_plane_state(plane.base.state);
+ unsigned int alpha = state->base.alpha;
+ unsigned int pixel_alpha;
unsigned long val;
+ if (fb->format->has_alpha)
+ pixel_alpha = state->base.pixel_blend_mode;
+ else
+ pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE;
+
val = readl(ctx->addr + DECON_WINCONx(win));
val &= WINCONx_ENWIN_F;
@@ -279,7 +351,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
case DRM_FORMAT_ARGB8888:
default:
val |= WINCONx_BPPMODE_32BPP_A8888;
- val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
+ val |= WINCONx_WSWP_F;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
@@ -298,8 +370,12 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
+ decon_set_bits(ctx, DECON_WINCONx(win), ~WINCONx_BLEND_MODE_MASK, val);
- writel(val, ctx->addr + DECON_WINCONx(win));
+ if (win > 0) {
+ decon_win_set_bldmod(ctx, win, alpha, pixel_alpha);
+ decon_win_set_bldeq(ctx, win, alpha, pixel_alpha);
+ }
}
static void decon_shadow_protect(struct decon_context *ctx, bool protect)
@@ -552,6 +628,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ctx->configs[win].num_pixel_formats = ARRAY_SIZE(decon_formats);
ctx->configs[win].zpos = win - ctx->first_win;
ctx->configs[win].type = decon_win_types[win];
+ ctx->configs[win].capabilities = capabilities[win];
ret = exynos_plane_init(drm_dev, &ctx->planes[win], win,
&ctx->configs[win]);
@@ -569,7 +646,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -579,7 +656,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static const struct component_ops decon_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 88cbd000eb09..381aa3d60e37 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -30,7 +30,6 @@
#include "exynos_drm_plane.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_iommu.h"
#include "regs-decon7.h"
/*
@@ -133,13 +132,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, ctx->dev);
+ return exynos_drm_register_dma(drm_dev, ctx->dev);
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
new file mode 100644
index 000000000000..3432c5ee9f0c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd.
+// Author: Inki Dae <inki.dae@samsung.com>
+// Author: Andrzej Hajda <a.hajda@samsung.com>
+
+#include <drm/drmP.h>
+#include <drm/exynos_drm.h>
+#include <linux/dma-iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "exynos_drm_drv.h"
+
+#if defined(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#else
+#define arm_iommu_create_mapping(...) ({ NULL; })
+#define arm_iommu_attach_device(...) ({ -ENODEV; })
+#define arm_iommu_release_mapping(...) ({ })
+#define arm_iommu_detach_device(...) ({ })
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
+#if !defined(CONFIG_IOMMU_DMA)
+#define iommu_dma_init_domain(...) ({ -EINVAL; })
+#endif
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+
+static inline int configure_dma_max_seg_size(struct device *dev)
+{
+ if (!dev->dma_parms)
+ dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ return 0;
+}
+
+static inline void clear_dma_max_seg_size(struct device *dev)
+{
+ kfree(dev->dma_parms);
+ dev->dma_parms = NULL;
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+static int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
+ DRM_ERROR("Device %s lacks support for IOMMU\n",
+ dev_name(subdrv_dev));
+ return -EINVAL;
+ }
+
+ ret = configure_dma_max_seg_size(subdrv_dev);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+ if (to_dma_iommu_mapping(subdrv_dev))
+ arm_iommu_detach_device(subdrv_dev);
+
+ ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
+ } else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
+ ret = iommu_attach_device(priv->mapping, subdrv_dev);
+ }
+
+ if (ret)
+ clear_dma_max_seg_size(subdrv_dev);
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+static void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ arm_iommu_detach_device(subdrv_dev);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ iommu_detach_device(priv->mapping, subdrv_dev);
+
+ clear_dma_max_seg_size(subdrv_dev);
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!priv->dma_dev) {
+ priv->dma_dev = dev;
+ DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
+ dev_name(dev));
+ }
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return 0;
+
+ if (!priv->mapping) {
+ void *mapping;
+
+ if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ mapping = arm_iommu_create_mapping(&platform_bus_type,
+ EXYNOS_DEV_ADDR_START, EXYNOS_DEV_ADDR_SIZE);
+ else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+ mapping = iommu_get_domain_for_dev(priv->dma_dev);
+
+ if (IS_ERR(mapping))
+ return PTR_ERR(mapping);
+ priv->mapping = mapping;
+ }
+
+ return drm_iommu_attach_device(drm, dev);
+}
+
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+{
+ if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ drm_iommu_detach_device(drm, dev);
+}
+
+void exynos_drm_cleanup_dma(struct drm_device *drm)
+{
+ struct exynos_drm_private *priv = drm->dev_private;
+
+ if (!IS_ENABLED(CONFIG_EXYNOS_IOMMU))
+ return;
+
+ arm_iommu_release_mapping(priv->mapping);
+ priv->mapping = NULL;
+ priv->dma_dev = NULL;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 6f76baf4550a..2c75e789b2a7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -30,7 +30,6 @@
#include "exynos_drm_ipp.h"
#include "exynos_drm_vidi.h"
#include "exynos_drm_g2d.h"
-#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -175,8 +174,7 @@ struct exynos_drm_driver_info {
#define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */
#define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */
-#define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */
-#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */
+#define DRM_FIMC_DEVICE BIT(2) /* devices shared with V4L2 subsystem */
#define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL)
@@ -187,16 +185,16 @@ struct exynos_drm_driver_info {
static struct exynos_drm_driver_info exynos_drm_drivers[] = {
{
DRV_PTR(fimd_driver, CONFIG_DRM_EXYNOS_FIMD),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(exynos5433_decon_driver, CONFIG_DRM_EXYNOS5433_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(decon_driver, CONFIG_DRM_EXYNOS7_DECON),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER),
- DRM_COMPONENT_DRIVER | DRM_DMA_DEVICE
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC),
DRM_COMPONENT_DRIVER
@@ -267,27 +265,6 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
return match ?: ERR_PTR(-ENODEV);
}
-static struct device *exynos_drm_get_dma_device(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(exynos_drm_drivers); ++i) {
- struct exynos_drm_driver_info *info = &exynos_drm_drivers[i];
- struct device *dev;
-
- if (!info->driver || !(info->flags & DRM_DMA_DEVICE))
- continue;
-
- while ((dev = bus_find_device(&platform_bus_type, NULL,
- &info->driver->driver,
- (void *)platform_bus_type.match))) {
- put_device(dev);
- return dev;
- }
- }
- return NULL;
-}
-
static int exynos_drm_bind(struct device *dev)
{
struct exynos_drm_private *private;
@@ -312,23 +289,6 @@ static int exynos_drm_bind(struct device *dev)
dev_set_drvdata(dev, drm);
drm->dev_private = (void *)private;
- /* the first real CRTC device is used for all dma mapping operations */
- private->dma_dev = exynos_drm_get_dma_device();
- if (!private->dma_dev) {
- DRM_ERROR("no device found for DMA mapping operations.\n");
- ret = -ENODEV;
- goto err_free_private;
- }
- DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
- dev_name(private->dma_dev));
-
- /* create common IOMMU mapping for all devices attached to Exynos DRM */
- ret = drm_create_iommu_mapping(drm);
- if (ret < 0) {
- DRM_ERROR("failed to create iommu mapping.\n");
- goto err_free_private;
- }
-
drm_mode_config_init(drm);
exynos_drm_mode_config_init(drm);
@@ -385,8 +345,7 @@ err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
-err_free_private:
+ exynos_drm_cleanup_dma(drm);
kfree(private);
err_free_drm:
drm_dev_put(drm);
@@ -405,7 +364,7 @@ static void exynos_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
- drm_release_iommu_mapping(drm);
+ exynos_drm_cleanup_dma(drm);
kfree(drm->dev_private);
drm->dev_private = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e61e707f955..71eb240bc1f4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -214,6 +214,17 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ return priv->mapping ? true : false;
+}
+
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+void exynos_drm_cleanup_dma(struct drm_device *drm);
+
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 9f52382e19ee..31eb538a44ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -24,7 +24,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 01d182289efa..ce9604ca8041 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -23,7 +23,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
-#include "exynos_drm_iommu.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index e8d0670bb5f8..90dfea0aec4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -25,7 +25,6 @@
#include <drm/exynos_drm.h>
#include "regs-fimc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1129,7 +1128,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1149,7 +1148,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops fimc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index b7f56935a46b..e3d6a8584715 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,7 +32,6 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -1011,7 +1010,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc);
- return drm_iommu_attach_device(drm_dev, dev);
+ return exynos_drm_register_dma(drm_dev, dev);
}
static void fimd_unbind(struct device *dev, struct device *master,
@@ -1021,7 +1020,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_disable(ctx->crtc);
- drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
+ exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
if (ctx->encoder)
exynos_dpi_remove(ctx->encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f2481a2014bb..24c536d6d9cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -25,7 +25,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
#define G2D_HW_MAJOR_VER 4
#define G2D_HW_MINOR_VER 1
@@ -1405,7 +1404,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_iommu_attach_device(drm_dev, dev);
+ ret = exynos_drm_register_dma(drm_dev, dev);
if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d);
@@ -1430,7 +1429,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work);
- drm_iommu_detach_device(g2d->drm_dev, dev);
+ exynos_drm_unregister_dma(g2d->drm_dev, dev);
}
static const struct component_ops g2d_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 34ace85feb68..df66c383a877 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -19,7 +19,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_iommu.h"
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index ce15d46bfce8..f048d97fe9e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -24,7 +24,6 @@
#include <drm/exynos_drm.h>
#include "regs-gsc.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -1170,7 +1169,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1190,7 +1189,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(drm_dev, dev);
+ exynos_drm_unregister_dma(drm_dev, dev);
}
static const struct component_ops gsc_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
deleted file mode 100644
index 0f373702414e..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ /dev/null
@@ -1,111 +0,0 @@
-/* exynos_drm_iommu.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Author: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/exynos_drm.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/iommu.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
-
-static inline int configure_dma_max_seg_size(struct device *dev)
-{
- if (!dev->dma_parms)
- dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
- if (!dev->dma_parms)
- return -ENOMEM;
-
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
- return 0;
-}
-
-static inline void clear_dma_max_seg_size(struct device *dev)
-{
- kfree(dev->dma_parms);
- dev->dma_parms = NULL;
-}
-
-/*
- * drm_create_iommu_mapping - create a mapping structure
- *
- * @drm_dev: DRM device
- */
-int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
- EXYNOS_DEV_ADDR_SIZE);
-}
-
-/*
- * drm_release_iommu_mapping - release iommu mapping structure
- *
- * @drm_dev: DRM device
- */
-void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_release_mapping(priv);
-}
-
-/*
- * drm_iommu_attach_device- attach device to iommu mapping
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be attach
- *
- * This function should be called by sub drivers to attach it to iommu
- * mapping.
- */
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
-
- if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
- dev_name(subdrv_dev));
- return -EINVAL;
- }
-
- ret = configure_dma_max_seg_size(subdrv_dev);
- if (ret)
- return ret;
-
- ret = __exynos_iommu_attach(priv, subdrv_dev);
- if (ret)
- clear_dma_max_seg_size(subdrv_dev);
-
- return 0;
-}
-
-/*
- * drm_iommu_detach_device -detach device address space mapping from device
- *
- * @drm_dev: DRM device
- * @subdrv_dev: device to be detached
- *
- * This function should be called by sub drivers to detach it from iommu
- * mapping
- */
-void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- __exynos_iommu_detach(priv, subdrv_dev);
- clear_dma_max_seg_size(subdrv_dev);
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
deleted file mode 100644
index 797d9ee5f15a..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* exynos_drm_iommu.h
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- * Authoer: Inki Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_IOMMU_H_
-#define _EXYNOS_DRM_IOMMU_H_
-
-#define EXYNOS_DEV_ADDR_START 0x20000000
-#define EXYNOS_DEV_ADDR_SIZE 0x40000000
-
-#ifdef CONFIG_DRM_EXYNOS_IOMMU
-
-#if defined(CONFIG_ARM_DMA_USE_IOMMU)
-#include <asm/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
- size);
- return IS_ERR(priv->mapping);
-}
-
-static inline void
-__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- arm_iommu_release_mapping(priv->mapping);
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- if (dev->archdata.mapping)
- arm_iommu_detach_device(dev);
-
- return arm_iommu_attach_device(dev, priv->mapping);
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- arm_iommu_detach_device(dev);
-}
-
-#elif defined(CONFIG_IOMMU_DMA)
-#include <linux/dma-iommu.h>
-
-static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
- unsigned long start, unsigned long size)
-{
- priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
- return 0;
-}
-
-static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
-{
- priv->mapping = NULL;
-}
-
-static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- return iommu_attach_device(domain, dev);
- return 0;
-}
-
-static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
- struct device *dev)
-{
- struct iommu_domain *domain = priv->mapping;
-
- if (dev != priv->dma_dev)
- iommu_detach_device(domain, dev);
-}
-#else
-#error Unsupported architecture and IOMMU/DMA-mapping glue code
-#endif
-
-int drm_create_iommu_mapping(struct drm_device *drm_dev);
-
-void drm_release_iommu_mapping(struct drm_device *drm_dev);
-
-int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev);
-
-void drm_iommu_detach_device(struct drm_device *dev_dev,
- struct device *subdrv_dev);
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- struct exynos_drm_private *priv = drm_dev->dev_private;
-
- return priv->mapping ? true : false;
-}
-
-#else
-
-static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
-{
- return 0;
-}
-
-static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
-{
-}
-
-static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
- return 0;
-}
-
-static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
- struct device *subdrv_dev)
-{
-}
-
-static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
-{
- return false;
-}
-
-#endif
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index a820a68429b9..8d67b2a54be3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -23,7 +23,6 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
/*
@@ -244,7 +243,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -263,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(rot->drm_dev, rot->dev);
+ exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
static const struct component_ops rotator_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index cd66774e817d..71270efa64f3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -23,7 +23,6 @@
#include "regs-scaler.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_drv.h"
-#include "exynos_drm_iommu.h"
#include "exynos_drm_ipp.h"
#define scaler_read(offset) readl(scaler->regs + (offset))
@@ -452,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
- drm_iommu_attach_device(drm_dev, dev);
+ exynos_drm_register_dma(drm_dev, dev);
exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -473,7 +472,7 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(drm_dev, ipp);
- drm_iommu_detach_device(scaler->drm_dev, scaler->dev);
+ exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
static const struct component_ops scaler_component_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e3a4ecbc503b..0573eab0e190 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,7 +40,6 @@
#include "exynos_drm_crtc.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_plane.h"
-#include "exynos_drm_iommu.h"
#define MIXER_WIN_NR 3
#define VP_DEFAULT_WIN 2
@@ -381,19 +380,16 @@ static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_SCAN_MASK);
}
-static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, struct drm_display_mode *mode)
{
+ enum hdmi_quantization_range range = drm_default_rgb_quant_range(mode);
u32 val;
- switch (height) {
- case 480:
- case 576:
- val = MXR_CFG_RGB601_0_255;
- break;
- case 720:
- case 1080:
- default:
- val = MXR_CFG_RGB709_16_235;
+ if (mode->vdisplay < 720) {
+ val = MXR_CFG_RGB601;
+ } else {
+ val = MXR_CFG_RGB709;
+
/* Configure the BT.709 CSC matrix for full range RGB. */
mixer_reg_write(ctx, MXR_CM_COEFF_Y,
MXR_CSC_CT( 0.184, 0.614, 0.063) |
@@ -402,9 +398,13 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
MXR_CSC_CT(-0.102, -0.338, 0.440));
mixer_reg_write(ctx, MXR_CM_COEFF_CR,
MXR_CSC_CT( 0.440, -0.399, -0.040));
- break;
}
+ if (range == HDMI_QUANTIZATION_RANGE_FULL)
+ val |= MXR_CFG_QUANT_RANGE_FULL;
+ else
+ val |= MXR_CFG_QUANT_RANGE_LIMITED;
+
mixer_reg_writemask(ctx, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
}
@@ -461,7 +461,7 @@ static void mixer_commit(struct mixer_context *ctx)
struct drm_display_mode *mode = &ctx->crtc->base.state->adjusted_mode;
mixer_cfg_scan(ctx, mode->hdisplay, mode->vdisplay);
- mixer_cfg_rgb_fmt(ctx, mode->vdisplay);
+ mixer_cfg_rgb_fmt(ctx, mode);
mixer_run(ctx);
}
@@ -878,12 +878,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
- return drm_iommu_attach_device(drm_dev, mixer_ctx->dev);
+ return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{
- drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
+ exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
}
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/regs-decon5433.h b/drivers/gpu/drm/exynos/regs-decon5433.h
index 19ad9e47945e..63db6974bf14 100644
--- a/drivers/gpu/drm/exynos/regs-decon5433.h
+++ b/drivers/gpu/drm/exynos/regs-decon5433.h
@@ -104,6 +104,7 @@
#define WINCONx_BURSTLEN_16WORD (0x0 << 10)
#define WINCONx_BURSTLEN_8WORD (0x1 << 10)
#define WINCONx_BURSTLEN_4WORD (0x2 << 10)
+#define WINCONx_ALPHA_MUL_F (1 << 7)
#define WINCONx_BLD_PIX_F (1 << 6)
#define WINCONx_BPPMODE_MASK (0xf << 2)
#define WINCONx_BPPMODE_16BPP_565 (0x5 << 2)
@@ -116,11 +117,15 @@
#define WINCONx_BPPMODE_16BPP_A4444 (0xe << 2)
#define WINCONx_ALPHA_SEL_F (1 << 1)
#define WINCONx_ENWIN_F (1 << 0)
+#define WINCONx_BLEND_MODE_MASK (0xc2)
/* SHADOWCON */
#define SHADOWCON_PROTECT_MASK GENMASK(14, 10)
#define SHADOWCON_Wx_PROTECT(n) (1 << (10 + (n)))
+/* VIDOSDxC */
+#define VIDOSDxC_ALPHA0_RGB_MASK (0xffffff)
+
/* VIDOSDxD */
#define VIDOSD_Wx_ALPHA_R_F(n) (((n) & 0xff) << 16)
#define VIDOSD_Wx_ALPHA_G_F(n) (((n) & 0xff) << 8)
@@ -206,4 +211,21 @@
#define CRCCTRL_CRCEN (0x1 << 0)
#define CRCCTRL_MASK (0x7)
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
+/* BLENDERQx */
+#define BLENDERQ_ZERO 0x0
+#define BLENDERQ_ONE 0x1
+#define BLENDERQ_ALPHA_A 0x2
+#define BLENDERQ_ONE_MINUS_ALPHA_A 0x3
+#define BLENDERQ_ALPHA0 0x6
+#define BLENDERQ_Q_FUNC_F(n) (n << 18)
+#define BLENDERQ_P_FUNC_F(n) (n << 12)
+#define BLENDERQ_B_FUNC_F(n) (n << 6)
+#define BLENDERQ_A_FUNC_F(n) (n << 0)
+
+/* BLENDCON */
+#define BLEND_NEW (1 << 0)
+
#endif /* EXYNOS_REGS_DECON5433_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index d2b8194a07bf..5ff095b0c1b3 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -85,10 +85,11 @@
/* bits for MXR_CFG */
#define MXR_CFG_LAYER_UPDATE (1 << 31)
#define MXR_CFG_LAYER_UPDATE_COUNT_MASK (3 << 29)
-#define MXR_CFG_RGB601_0_255 (0 << 9)
-#define MXR_CFG_RGB601_16_235 (1 << 9)
-#define MXR_CFG_RGB709_0_255 (2 << 9)
-#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_QUANT_RANGE_FULL (0 << 9)
+#define MXR_CFG_QUANT_RANGE_LIMITED (1 << 9)
+#define MXR_CFG_RGB601 (0 << 10)
+#define MXR_CFG_RGB709 (1 << 10)
+
#define MXR_CFG_RGB_FMT_MASK 0x600
#define MXR_CFG_OUT_YUV444 (0 << 8)
#define MXR_CFG_OUT_RGB888 (1 << 8)
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 0e3752437e44..18afc94e4dff 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <video/videomode.h>
#include "fsl_dcu_drm_crtc.h"
#include "fsl_dcu_drm_drv.h"
@@ -85,40 +86,34 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
struct drm_connector *con = &fsl_dev->connector.base;
struct drm_display_mode *mode = &crtc->state->mode;
- unsigned int hbp, hfp, hsw, vbp, vfp, vsw, index, pol = 0;
+ unsigned int pol = 0;
+ struct videomode vm;
- index = drm_crtc_index(crtc);
clk_set_rate(fsl_dev->pix_clk, mode->clock * 1000);
- /* Configure timings: */
- hbp = mode->htotal - mode->hsync_end;
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- vbp = mode->vtotal - mode->vsync_end;
- vfp = mode->vsync_start - mode->vdisplay;
- vsw = mode->vsync_end - mode->vsync_start;
+ drm_display_mode_to_videomode(mode, &vm);
/* INV_PXCK as default (most display sample data on rising edge) */
if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
pol |= DCU_SYN_POL_INV_PXCK;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
pol |= DCU_SYN_POL_INV_HS_LOW;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
pol |= DCU_SYN_POL_INV_VS_LOW;
regmap_write(fsl_dev->regmap, DCU_HSYN_PARA,
- DCU_HSYN_PARA_BP(hbp) |
- DCU_HSYN_PARA_PW(hsw) |
- DCU_HSYN_PARA_FP(hfp));
+ DCU_HSYN_PARA_BP(vm.hback_porch) |
+ DCU_HSYN_PARA_PW(vm.hsync_len) |
+ DCU_HSYN_PARA_FP(vm.hfront_porch));
regmap_write(fsl_dev->regmap, DCU_VSYN_PARA,
- DCU_VSYN_PARA_BP(vbp) |
- DCU_VSYN_PARA_PW(vsw) |
- DCU_VSYN_PARA_FP(vfp));
+ DCU_VSYN_PARA_BP(vm.vback_porch) |
+ DCU_VSYN_PARA_PW(vm.vsync_len) |
+ DCU_VSYN_PARA_FP(vm.vfront_porch));
regmap_write(fsl_dev->regmap, DCU_DISP_SIZE,
- DCU_DISP_SIZE_DELTA_Y(mode->vdisplay) |
- DCU_DISP_SIZE_DELTA_X(mode->hdisplay));
+ DCU_DISP_SIZE_DELTA_Y(vm.vactive) |
+ DCU_DISP_SIZE_DELTA_X(vm.hactive));
regmap_write(fsl_dev->regmap, DCU_SYN_POL, pol);
regmap_write(fsl_dev->regmap, DCU_BGND, DCU_BGND_R(0) |
DCU_BGND_G(0) | DCU_BGND_B(0));
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index 0496be5212e1..ceddc3e29258 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -89,20 +90,11 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags)
"Invalid legacyfb_depth. Defaulting to 24bpp\n");
legacyfb_depth = 24;
}
- fsl_dev->fbdev = drm_fbdev_cma_init(dev, legacyfb_depth, 1);
- if (IS_ERR(fsl_dev->fbdev)) {
- ret = PTR_ERR(fsl_dev->fbdev);
- fsl_dev->fbdev = NULL;
- goto done;
- }
return 0;
done:
drm_kms_helper_poll_fini(dev);
- if (fsl_dev->fbdev)
- drm_fbdev_cma_fini(fsl_dev->fbdev);
-
drm_mode_config_cleanup(dev);
drm_irq_uninstall(dev);
dev->dev_private = NULL;
@@ -112,14 +104,9 @@ done:
static void fsl_dcu_unload(struct drm_device *dev)
{
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
drm_atomic_helper_shutdown(dev);
drm_kms_helper_poll_fini(dev);
- if (fsl_dev->fbdev)
- drm_fbdev_cma_fini(fsl_dev->fbdev);
-
drm_mode_config_cleanup(dev);
drm_irq_uninstall(dev);
@@ -147,19 +134,11 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static void fsl_dcu_drm_lastclose(struct drm_device *dev)
-{
- struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(fsl_dev->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(fsl_dcu_drm_fops);
static struct drm_driver fsl_dcu_drm_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
| DRIVER_PRIME | DRIVER_ATOMIC,
- .lastclose = fsl_dcu_drm_lastclose,
.load = fsl_dcu_load,
.unload = fsl_dcu_unload,
.irq_handler = fsl_dcu_drm_irq,
@@ -355,6 +334,8 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev)
if (ret < 0)
goto put;
+ drm_fbdev_generic_setup(drm, legacyfb_depth);
+
return 0;
put:
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index 93bfb98012d4..cb87bb74cb87 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -191,7 +191,6 @@ struct fsl_dcu_drm_device {
/*protects hardware register*/
spinlock_t irq_lock;
struct drm_device *drm;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct fsl_dcu_drm_connector connector;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 45c25a488f42..3c168ae77b0c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -49,8 +49,6 @@ struct hibmc_drm_private {
bool mode_config_initialized;
/* ttm */
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 2e3e0bdb8932..dd383267884c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -29,55 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct hibmc_drm_private, bdev);
}
-static int
-hibmc_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-hibmc_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
-{
- int ret;
-
- hibmc->mem_global_ref.global_type = DRM_GLOBAL_TTM_MEM;
- hibmc->mem_global_ref.size = sizeof(struct ttm_mem_global);
- hibmc->mem_global_ref.init = &hibmc_ttm_mem_global_init;
- hibmc->mem_global_ref.release = &hibmc_ttm_mem_global_release;
- ret = drm_global_item_ref(&hibmc->mem_global_ref);
- if (ret) {
- DRM_ERROR("could not get ref on ttm global: %d\n", ret);
- return ret;
- }
-
- hibmc->bo_global_ref.mem_glob =
- hibmc->mem_global_ref.object;
- hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
- hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
- hibmc->bo_global_ref.ref.init = &ttm_bo_global_init;
- hibmc->bo_global_ref.ref.release = &ttm_bo_global_release;
- ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
- if (ret) {
- DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
- drm_global_item_unref(&hibmc->mem_global_ref);
- return ret;
- }
- return 0;
-}
-
-static void
-hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
-{
- drm_global_item_unref(&hibmc->bo_global_ref.ref);
- drm_global_item_unref(&hibmc->mem_global_ref);
- hibmc->mem_global_ref.release = NULL;
-}
-
static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@@ -237,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
struct drm_device *dev = hibmc->dev;
struct ttm_bo_device *bdev = &hibmc->bdev;
- ret = hibmc_ttm_global_init(hibmc);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&hibmc->bdev,
- hibmc->bo_global_ref.ref.object,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
- hibmc_ttm_global_release(hibmc);
DRM_ERROR("error initializing bo driver: %d\n", ret);
return ret;
}
@@ -256,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
hibmc->fb_size >> PAGE_SHIFT);
if (ret) {
- hibmc_ttm_global_release(hibmc);
DRM_ERROR("failed ttm VRAM init: %d\n", ret);
return ret;
}
@@ -271,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
return;
ttm_bo_device_release(&hibmc->bdev);
- hibmc_ttm_global_release(hibmc);
hibmc->mm_inited = false;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1c2857f13ad4..19b5fe5016bf 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -75,6 +75,7 @@ i915-y += i915_cmd_parser.o \
i915_gemfs.o \
i915_query.o \
i915_request.o \
+ i915_scheduler.o \
i915_timeline.o \
i915_trace_points.o \
i915_vma.o \
@@ -112,6 +113,8 @@ i915-y += intel_audio.o \
intel_bios.o \
intel_cdclk.o \
intel_color.o \
+ intel_combo_phy.o \
+ intel_connector.o \
intel_display.o \
intel_dpio_phy.o \
intel_dpll_mgr.o \
@@ -120,9 +123,9 @@ i915-y += intel_audio.o \
intel_frontbuffer.o \
intel_hdcp.o \
intel_hotplug.o \
- intel_modes.o \
intel_overlay.o \
intel_psr.o \
+ intel_quirks.o \
intel_sideband.o \
intel_sprite.o
i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
@@ -142,6 +145,7 @@ i915-y += dvo_ch7017.o \
intel_dp_link_training.o \
intel_dp_mst.o \
intel_dp.o \
+ intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_vbt.o \
intel_dvo.o \
@@ -153,14 +157,17 @@ i915-y += dvo_ch7017.o \
intel_sdvo.o \
intel_tv.o \
vlv_dsi.o \
- vlv_dsi_pll.o
+ vlv_dsi_pll.o \
+ intel_vdsc.o
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
- selftests/igt_flush_test.o
+ selftests/igt_flush_test.o \
+ selftests/igt_reset.o \
+ selftests/igt_spinner.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index fe754022e356..359d37d5c958 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
}
mutex_lock(&dev_priv->drm.struct_mutex);
+ mmio_hw_access_pre(dev_priv);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
+ mmio_hw_access_post(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 481896fb712a..85e6736f0a32 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -235,7 +235,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->bpp = skl_pixel_formats[fmt].bpp;
plane->drm_format = skl_pixel_formats[fmt].drm_format;
} else {
- plane->tiled = !!(val & DISPPLANE_TILED);
+ plane->tiled = val & DISPPLANE_TILED;
fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK);
plane->bpp = bdw_pixel_formats[fmt].bpp;
plane->drm_format = bdw_pixel_formats[fmt].drm_format;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 58e166effa45..c7103dd2d8d5 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2447,10 +2447,11 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
- struct intel_gvt_partial_pte *pos;
+ struct intel_gvt_partial_pte *pos, *next;
- list_for_each_entry(pos,
- &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ list_for_each_entry_safe(pos, next,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
+ list) {
gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
pos->offset, pos->data);
kfree(pos);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 36a5147cd01e..d6e02c15ef97 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
int ring_id, i;
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+ if (!HAS_ENGINE(dev_priv, ring_id))
+ continue;
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index ea34003d6dd2..b8fbe3fabea3 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
}
+static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
+{
+ struct intel_vgpu_mm *mm = workload->shadow_mm;
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+ int i = 0;
+
+ if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
+ return -1;
+
+ if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
+ } else {
+ for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
+ px_dma(ppgtt->pdp.page_directory[i]) =
+ mm->ppgtt_mm.shadow_pdps[i];
+ }
+ }
+
+ return 0;
+}
+
/**
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
* shadow it as well, include ringbuffer,wa_ctx and ctx.
@@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
+ ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+ if (ret < 0) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return ret;
+ }
+
/* pin shadow context by gvt even the shadow context will be pinned
* when i915 alloc request. That is because gvt will update the guest
* context from shadow context when workload is completed, and at that
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f3ac0a12889..38dcee1ca062 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -943,30 +943,30 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
size_t count, loff_t *pos)
{
- struct i915_gpu_state *error = file->private_data;
- struct drm_i915_error_state_buf str;
+ struct i915_gpu_state *error;
ssize_t ret;
- loff_t tmp;
+ void *buf;
+ error = file->private_data;
if (!error)
return 0;
- ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
- if (ret)
- return ret;
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- ret = i915_error_state_to_str(&str, error);
- if (ret)
+ ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
goto out;
- tmp = 0;
- ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
- if (ret < 0)
- goto out;
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
- *pos = str.start + ret;
out:
- i915_error_state_buf_release(&str);
+ kfree(buf);
return ret;
}
@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
if (!IS_GEN5(dev_priv))
return -ENODEV;
+ intel_runtime_pm_get(dev_priv);
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -2215,8 +2219,23 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ u32 act_freq = rps->cur_freq;
struct drm_file *file;
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ mutex_lock(&dev_priv->pcu_lock);
+ act_freq = vlv_punit_read(dev_priv,
+ PUNIT_REG_GPU_FREQ_STS);
+ act_freq = (act_freq >> 8) & 0xff;
+ mutex_unlock(&dev_priv->pcu_lock);
+ } else {
+ act_freq = intel_get_cagf(dev_priv,
+ I915_READ(GEN6_RPSTAT1));
+ }
+ intel_runtime_pm_put(dev_priv);
+ }
+
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
seq_printf(m, "GPU busy? %s [%d requests]\n",
yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
@@ -2224,8 +2243,9 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
- seq_printf(m, "Frequency requested %d\n",
- intel_gpu_freq(dev_priv, rps->cur_freq));
+ seq_printf(m, "Frequency requested %d, actual %d\n",
+ intel_gpu_freq(dev_priv, rps->cur_freq),
+ intel_gpu_freq(dev_priv, act_freq));
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
intel_gpu_freq(dev_priv, rps->min_freq),
intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
@@ -2900,16 +2920,15 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
- if (IS_KABYLAKE(dev_priv) ||
- (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(SKL_CSR_DC3_DC5_COUNT));
+ if (WARN_ON(INTEL_GEN(dev_priv) > 11))
+ goto out;
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n",
+ I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT));
+ if (!IS_GEN9_LP(dev_priv))
seq_printf(m, "DC5 -> DC6 count: %d\n",
I915_READ(SKL_CSR_DC5_DC6_COUNT));
- } else if (IS_BROXTON(dev_priv) && csr->version >= CSR_VERSION(1, 4)) {
- seq_printf(m, "DC3 -> DC5 count: %d\n",
- I915_READ(BXT_CSR_DC3_DC5_COUNT));
- }
out:
seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
@@ -3049,16 +3068,17 @@ static void intel_connector_info(struct seq_file *m,
seq_printf(m, "connector %d: type %s, status: %s\n",
connector->base.id, connector->name,
drm_get_connector_status_name(connector->status));
- if (connector->status == connector_status_connected) {
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
- connector->display_info.width_mm,
- connector->display_info.height_mm);
- seq_printf(m, "\tsubpixel order: %s\n",
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
- seq_printf(m, "\tCEA rev: %d\n",
- connector->display_info.cea_rev);
- }
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tname: %s\n", connector->display_info.name);
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
if (!intel_encoder)
return;
@@ -3355,13 +3375,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
- struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
- int i;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
- seq_printf(m, "Workarounds applied: %d\n", wa->count);
- for (i = 0; i < wa->count; ++i)
+ seq_printf(m, "Workarounds applied: %u\n", wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
- wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
+ i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
return 0;
}
@@ -3421,31 +3443,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
- enum pipe pipe;
- int plane;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
- ddb = &dev_priv->wm.skl_hw.ddb;
-
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
- for_each_pipe(dev_priv, pipe) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
- for_each_universal_plane(dev_priv, pipe, plane) {
- entry = &ddb->plane[pipe][plane];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
- entry = &ddb->plane[pipe][PLANE_CURSOR];
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
@@ -4172,6 +4195,7 @@ i915_drop_caches_set(void *data, u64 val)
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
+ intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
i915_gem_set_wedged(i915);
@@ -4181,7 +4205,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
- return ret;
+ goto out;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@@ -4189,11 +4213,8 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (ret == 0 && val & DROP_RESET_SEQNO) {
- intel_runtime_pm_get(i915);
+ if (ret == 0 && val & DROP_RESET_SEQNO)
ret = i915_gem_set_global_seqno(&i915->drm, 1);
- intel_runtime_pm_put(i915);
- }
if (val & DROP_RETIRE)
i915_retire_requests(i915);
@@ -4231,6 +4252,9 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
+out:
+ intel_runtime_pm_put(i915);
+
return ret;
}
@@ -4331,7 +4355,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
for (s = 0; s < info->sseu.max_slices; s++) {
/*
* FIXME: Valid SS Mask respects the spec and read
- * only valid bits for those registers, excluding reserverd
+ * only valid bits for those registers, excluding reserved
* although this seems wrong because it would leave many
* subslices without ACK.
*/
@@ -4571,6 +4595,13 @@ static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = m->private;
struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ synchronize_irq(dev_priv->drm.irq);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_work(&dev_priv->hotplug.hotplug_work);
+
seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
seq_printf(m, "Detected: %s\n",
yesno(delayed_work_pending(&hotplug->reenable_work)));
@@ -4641,24 +4672,122 @@ static const struct file_operations i915_hpd_storm_ctl_fops = {
.write = i915_hpd_storm_ctl_write
};
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
static int i915_drrs_ctl_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
+ struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 7)
return -ENODEV;
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (!intel_crtc->base.state->active ||
- !intel_crtc->config->has_drrs)
- continue;
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_connector_list_iter conn_iter;
+ struct intel_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->base.active ||
+ !crtc_state->has_drrs)
+ goto out;
- for_each_encoder_on_crtc(dev, &intel_crtc->base, encoder) {
+ commit = crtc_state->base.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (!(crtc_state->base.connector_mask &
+ drm_connector_mask(connector)))
+ continue;
+
+ encoder = intel_attached_encoder(connector);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -4668,13 +4797,18 @@ static int i915_drrs_ctl_set(void *data, u64 val)
intel_dp = enc_to_intel_dp(&encoder->base);
if (val)
intel_edp_drrs_enable(intel_dp,
- intel_crtc->config);
+ crtc_state);
else
intel_edp_drrs_disable(intel_dp,
- intel_crtc->config);
+ crtc_state);
}
+ drm_connector_list_iter_end(&conn_iter);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
}
- drm_modeset_unlock_all(dev);
return 0;
}
@@ -4818,6 +4952,7 @@ static const struct i915_debugfs_files {
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
{"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
{"i915_ipc_status", &i915_ipc_status_fops},
{"i915_drrs_ctl", &i915_drrs_ctl_fops},
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
@@ -4899,13 +5034,10 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
continue;
err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
- if (err <= 0) {
- DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n",
- size, b->offset, err);
- continue;
- }
-
- seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf);
+ if (err < 0)
+ seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
+ else
+ seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
}
return 0;
@@ -4934,6 +5066,28 @@ static int i915_panel_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_panel);
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ /* HDCP is supported by connector */
+ if (!intel_connector->hdcp.shim)
+ return -EINVAL;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
+ "None" : "HDCP1.4");
+ seq_puts(m, "\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -4963,5 +5117,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
connector, &i915_psr_sink_status_fops);
}
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index ffdbbac4400e..b310a897a4ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -53,6 +53,7 @@
#include "i915_vgpu.h"
#include "intel_drv.h"
#include "intel_uc.h"
+#include "intel_workarounds.h"
static struct drm_driver driver;
@@ -287,7 +288,7 @@ static void intel_detect_pch(struct drm_i915_private *dev_priv)
* Use PCH_NOP (PCH but no South Display) for PCH platforms without
* display.
*/
- if (pch && INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (pch && !HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Display disabled, reverting to NOP PCH\n");
dev_priv->pch_type = PCH_NOP;
dev_priv->pch_id = 0;
@@ -345,7 +346,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = USES_PPGTT(dev_priv);
+ value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
value = HAS_LEGACY_SEMAPHORES(dev_priv);
@@ -645,6 +646,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
+ if (HAS_DISPLAY(dev_priv)) {
+ ret = drm_vblank_init(&dev_priv->drm,
+ INTEL_INFO(dev_priv)->num_pipes);
+ if (ret)
+ goto out;
+ }
+
intel_bios_init(dev_priv);
/* If we have > 1 VGA cards, then we need to arbitrate access
@@ -687,9 +695,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_modeset;
- intel_setup_overlay(dev_priv);
+ intel_overlay_setup(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
ret = intel_fbdev_init(dev);
@@ -699,6 +707,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev_priv);
+ intel_init_ipc(dev_priv);
+
return 0;
cleanup_gem:
@@ -859,6 +869,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+ pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
if (pre) {
DRM_ERROR("This is a pre-production stepping. "
@@ -1030,6 +1041,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
err_uncore:
intel_uncore_fini(dev_priv);
+ i915_mmio_cleanup(dev_priv);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -1049,17 +1061,6 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
{
- /*
- * i915.enable_ppgtt is read-only, so do an early pass to validate the
- * user's requested state against the hardware/driver capabilities. We
- * do this now so that we can print out any log messages once rather
- * than every time we check intel_enable_ppgtt().
- */
- i915_modparams.enable_ppgtt =
- intel_sanitize_enable_ppgtt(dev_priv,
- i915_modparams.enable_ppgtt);
- DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
-
intel_gvt_sanitize_options(dev_priv);
}
@@ -1340,7 +1341,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (INTEL_GEN(dev_priv) == 9)
+ else if (IS_GEN9(dev_priv))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
@@ -1375,6 +1376,29 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+ if (HAS_PPGTT(dev_priv)) {
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+ i915_report_error(dev_priv,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
+ return -ENXIO;
+ }
+ }
+
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ i915_report_error(dev_priv,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
intel_sanitize_options(dev_priv);
i915_perf_init(dev_priv);
@@ -1444,6 +1468,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_uncore_sanitize(dev_priv);
+ intel_gt_init_workarounds(dev_priv);
i915_gem_load_init_fences(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1543,7 +1568,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
} else
DRM_ERROR("Failed to register driver for userspace access!\n");
- if (INTEL_INFO(dev_priv)->num_pipes) {
+ if (HAS_DISPLAY(dev_priv)) {
/* Must be done after probing outputs */
intel_opregion_register(dev_priv);
acpi_video_register();
@@ -1567,7 +1592,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
* We need to coordinate the hotplugs with the asynchronous fbdev
* configuration, for which we use the fbdev->async_cookie.
*/
- if (INTEL_INFO(dev_priv)->num_pipes)
+ if (HAS_DISPLAY(dev_priv))
drm_kms_helper_poll_init(dev);
intel_power_domains_enable(dev_priv);
@@ -1630,14 +1655,16 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
+ int err;
i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
if (!i915)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_dev_init(&i915->drm, &driver, &pdev->dev)) {
+ err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
+ if (err) {
kfree(i915);
- return NULL;
+ return ERR_PTR(err);
}
i915->drm.pdev = pdev;
@@ -1650,8 +1677,8 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
device_info->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- sizeof(device_info->platform_mask) * BITS_PER_BYTE);
- BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
+ BITS_PER_TYPE(device_info->platform_mask));
+ BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
}
@@ -1686,8 +1713,8 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
dev_priv = i915_driver_create(pdev, ent);
- if (!dev_priv)
- return -ENOMEM;
+ if (IS_ERR(dev_priv))
+ return PTR_ERR(dev_priv);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
@@ -1711,26 +1738,12 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
- /*
- * TODO: move the vblank init and parts of modeset init steps into one
- * of the i915_driver_init_/i915_driver_register functions according
- * to the role/effect of the given init step.
- */
- if (INTEL_INFO(dev_priv)->num_pipes) {
- ret = drm_vblank_init(&dev_priv->drm,
- INTEL_INFO(dev_priv)->num_pipes);
- if (ret)
- goto out_cleanup_hw;
- }
-
ret = i915_load_modeset_init(&dev_priv->drm);
if (ret < 0)
goto out_cleanup_hw;
i915_driver_register(dev_priv);
- intel_init_ipc(dev_priv);
-
enable_rpm_wakeref_asserts(dev_priv);
i915_welcome_messages(dev_priv);
@@ -1782,7 +1795,6 @@ void i915_driver_unload(struct drm_device *dev)
i915_reset_error_state(dev_priv);
i915_gem_fini(dev_priv);
- intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini_hw(dev_priv);
@@ -1920,9 +1932,7 @@ static int i915_drm_suspend(struct drm_device *dev)
i915_save_state(dev_priv);
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
- intel_opregion_notify_adapter(dev_priv, opregion_target_state);
-
- intel_opregion_unregister(dev_priv);
+ intel_opregion_suspend(dev_priv, opregion_target_state);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1963,7 +1973,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
get_suspend_mode(dev_priv, hibernation));
ret = 0;
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv))
bxt_enable_dc9(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_enable_pc8(dev_priv);
@@ -2041,7 +2051,6 @@ static int i915_drm_resume(struct drm_device *dev)
i915_restore_state(dev_priv);
intel_pps_unlock_regs_wa(dev_priv);
- intel_opregion_setup(dev_priv);
intel_init_pch_refclk(dev_priv);
@@ -2083,12 +2092,10 @@ static int i915_drm_resume(struct drm_device *dev)
* */
intel_hpd_init(dev_priv);
- intel_opregion_register(dev_priv);
+ intel_opregion_resume(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
- intel_opregion_notify_adapter(dev_priv, PCI_D0);
-
intel_power_domains_enable(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2156,7 +2163,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_uncore_resume_early(dev_priv);
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
bxt_disable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2923,7 +2930,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_uncore_suspend(dev_priv);
ret = 0;
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ icl_display_core_uninit(dev_priv);
+ bxt_enable_dc9(dev_priv);
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_uninit(dev_priv);
bxt_enable_dc9(dev_priv);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3008,7 +3018,18 @@ static int intel_runtime_resume(struct device *kdev)
if (intel_uncore_unclaimed_mmio(dev_priv))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
- if (IS_GEN9_LP(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
+ bxt_disable_dc9(dev_priv);
+ icl_display_core_init(dev_priv, true);
+ if (dev_priv->csr.dmc_payload) {
+ if (dev_priv->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC6)
+ skl_enable_dc6(dev_priv);
+ else if (dev_priv->csr.allowed_dc_mask &
+ DC_STATE_EN_UPTO_DC5)
+ gen9_enable_dc5(dev_priv);
+ }
+ } else if (IS_GEN9_LP(dev_priv)) {
bxt_disable_dc9(dev_priv);
bxt_display_core_init(dev_priv, true);
if (dev_priv->csr.dmc_payload &&
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9102571e9692..b1c31967194b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -53,7 +53,9 @@
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
+#include <drm/drm_dsc.h>
+#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -67,6 +69,7 @@
#include "intel_ringbuffer.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
+#include "intel_workarounds.h"
#include "intel_uc.h"
#include "i915_gem.h"
@@ -87,8 +90,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20180921"
-#define DRIVER_TIMESTAMP 1537521997
+#define DRIVER_DATE "20181204"
+#define DRIVER_TIMESTAMP 1543944377
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -127,144 +130,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
-typedef struct {
- uint32_t val;
-} uint_fixed_16_16_t;
-
-#define FP_16_16_MAX ({ \
- uint_fixed_16_16_t fp; \
- fp.val = UINT_MAX; \
- fp; \
-})
-
-static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
-{
- if (val.val == 0)
- return true;
- return false;
-}
-
-static inline uint_fixed_16_16_t u32_to_fixed16(uint32_t val)
-{
- uint_fixed_16_16_t fp;
-
- WARN_ON(val > U16_MAX);
-
- fp.val = val << 16;
- return fp;
-}
-
-static inline uint32_t fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
-{
- return DIV_ROUND_UP(fp.val, 1 << 16);
-}
-
-static inline uint32_t fixed16_to_u32(uint_fixed_16_16_t fp)
-{
- return fp.val >> 16;
-}
-
-static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
- uint_fixed_16_16_t min2)
-{
- uint_fixed_16_16_t min;
-
- min.val = min(min1.val, min2.val);
- return min;
-}
-
-static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
- uint_fixed_16_16_t max2)
-{
- uint_fixed_16_16_t max;
-
- max.val = max(max1.val, max2.val);
- return max;
-}
-
-static inline uint_fixed_16_16_t clamp_u64_to_fixed16(uint64_t val)
-{
- uint_fixed_16_16_t fp;
- WARN_ON(val > U32_MAX);
- fp.val = (uint32_t) val;
- return fp;
-}
-
-static inline uint32_t div_round_up_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t d)
-{
- return DIV_ROUND_UP(val.val, d.val);
-}
-
-static inline uint32_t mul_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val * mul.val;
- intermediate_val = DIV_ROUND_UP_ULL(intermediate_val, 1 << 16);
- WARN_ON(intermediate_val > U32_MAX);
- return (uint32_t) intermediate_val;
-}
-
-static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val.val * mul.val;
- intermediate_val = intermediate_val >> 16;
- return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t div_fixed16(uint32_t val, uint32_t d)
-{
- uint64_t interm_val;
-
- interm_val = (uint64_t)val << 16;
- interm_val = DIV_ROUND_UP_ULL(interm_val, d);
- return clamp_u64_to_fixed16(interm_val);
-}
-
-static inline uint32_t div_round_up_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t d)
-{
- uint64_t interm_val;
-
- interm_val = (uint64_t)val << 16;
- interm_val = DIV_ROUND_UP_ULL(interm_val, d.val);
- WARN_ON(interm_val > U32_MAX);
- return (uint32_t) interm_val;
-}
-
-static inline uint_fixed_16_16_t mul_u32_fixed16(uint32_t val,
- uint_fixed_16_16_t mul)
-{
- uint64_t intermediate_val;
-
- intermediate_val = (uint64_t) val * mul.val;
- return clamp_u64_to_fixed16(intermediate_val);
-}
-
-static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
- uint_fixed_16_16_t add2)
-{
- uint64_t interm_sum;
-
- interm_sum = (uint64_t) add1.val + add2.val;
- return clamp_u64_to_fixed16(interm_sum);
-}
-
-static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
- uint32_t add2)
-{
- uint64_t interm_sum;
- uint_fixed_16_16_t interm_add2 = u32_to_fixed16(add2);
-
- interm_sum = (uint64_t) add1.val + interm_add2.val;
- return clamp_u64_to_fixed16(interm_sum);
-}
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -283,7 +148,8 @@ enum hpd_pin {
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
-#define HPD_STORM_DEFAULT_THRESHOLD 5
+/* Threshold == 5 for long IRQs, 50 for short */
+#define HPD_STORM_DEFAULT_THRESHOLD 50
struct i915_hotplug {
struct work_struct hotplug_work;
@@ -308,6 +174,8 @@ struct i915_hotplug {
bool poll_enabled;
unsigned int hpd_storm_threshold;
+ /* Whether or not to count short HPD IRQs in HPD storms */
+ u8 hpd_short_storm_enabled;
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
@@ -465,8 +333,10 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
+ uint32_t required_version;
+ uint32_t max_fw_size; /* bytes */
uint32_t *dmc_payload;
- uint32_t dmc_fw_size;
+ uint32_t dmc_fw_size; /* dwords */
uint32_t version;
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
@@ -546,6 +416,8 @@ struct intel_fbc {
int adjusted_y;
int y;
+
+ uint16_t pixel_blend_mode;
} plane;
struct {
@@ -624,17 +496,19 @@ struct i915_psr {
bool sink_support;
bool prepared, enabled;
struct intel_dp *dp;
+ enum pipe pipe;
bool active;
struct work_struct work;
unsigned busy_frontbuffer_bits;
bool sink_psr2_support;
bool link_standby;
bool colorimetry_support;
- bool alpm;
bool psr2_enabled;
u8 sink_sync_latency;
ktime_t last_entry_attempt;
ktime_t last_exit;
+ bool sink_not_reliable;
+ bool irq_aux_error;
};
enum intel_pch {
@@ -918,6 +792,11 @@ struct i915_power_well_desc {
/* The pw is backing the VGA functionality */
bool has_vga:1;
bool has_fuses:1;
+ /*
+ * The pw is for an ICL+ TypeC PHY port in
+ * Thunderbolt mode.
+ */
+ bool is_tc_tbt:1;
} hsw;
};
const struct i915_power_well_ops *ops;
@@ -1042,17 +921,6 @@ struct i915_gem_mm {
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
-#define DP_AUX_A 0x40
-#define DP_AUX_B 0x10
-#define DP_AUX_C 0x20
-#define DP_AUX_D 0x30
-#define DP_AUX_E 0x50
-#define DP_AUX_F 0x60
-
-#define DDC_PIN_B 0x05
-#define DDC_PIN_C 0x04
-#define DDC_PIN_D 0x06
-
struct ddi_vbt_port_info {
int max_tmds_clock;
@@ -1099,6 +967,7 @@ struct intel_vbt_data {
unsigned int panel_type:4;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
+ enum drm_panel_orientation orientation;
enum drrs_support_type drrs_type;
@@ -1144,6 +1013,7 @@ struct intel_vbt_data {
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
+ enum drm_panel_orientation orientation;
} dsi;
int crt_ddc_pin;
@@ -1228,9 +1098,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
- /* packed/y */
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
- struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
@@ -1240,9 +1107,9 @@ struct skl_ddb_values {
};
struct skl_wm_level {
- bool plane_en;
uint16_t plane_res_b;
uint8_t plane_res_l;
+ bool plane_en;
};
/* Stores plane specific WM parameters */
@@ -1323,20 +1190,6 @@ struct i915_frontbuffer_tracking {
unsigned flip_bits;
};
-struct i915_wa_reg {
- u32 addr;
- u32 value;
- /* bitmask representing WA bits */
- u32 mask;
-};
-
-#define I915_MAX_WA_REGS 16
-
-struct i915_workarounds {
- struct i915_wa_reg reg[I915_MAX_WA_REGS];
- u32 count;
-};
-
struct i915_virtual_gpu {
bool active;
u32 caps;
@@ -1520,30 +1373,12 @@ struct i915_oa_ops {
bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);
/**
- * @init_oa_buffer: Resets the head and tail pointers of the
- * circular buffer for periodic OA reports.
- *
- * Called when first opening a stream for OA metrics, but also may be
- * called in response to an OA buffer overflow or other error
- * condition.
- *
- * Note it may be necessary to clear the full OA buffer here as part of
- * maintaining the invariable that new reports must be written to
- * zeroed memory for us to be able to reliable detect if an expected
- * report has not yet landed in memory. (At least on Haswell the OA
- * buffer tail pointer is not synchronized with reports being visible
- * to the CPU)
- */
- void (*init_oa_buffer)(struct drm_i915_private *dev_priv);
-
- /**
* @enable_metric_set: Selects and applies any MUX configuration to set
* up the Boolean and Custom (B/C) counters that are part of the
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config);
+ int (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
@@ -1554,12 +1389,12 @@ struct i915_oa_ops {
/**
* @oa_enable: Enable periodic sampling
*/
- void (*oa_enable)(struct drm_i915_private *dev_priv);
+ void (*oa_enable)(struct i915_perf_stream *stream);
/**
* @oa_disable: Disable periodic sampling
*/
- void (*oa_disable)(struct drm_i915_private *dev_priv);
+ void (*oa_disable)(struct i915_perf_stream *stream);
/**
* @read: Copy data from the circular OA buffer into a given userspace
@@ -1804,7 +1639,7 @@ struct drm_i915_private {
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
- struct i915_workarounds workarounds;
+ struct i915_wa_list gt_wa_list;
struct i915_frontbuffer_tracking fb_tracking;
@@ -2148,6 +1983,8 @@ struct drm_i915_private {
struct delayed_work idle_work;
ktime_t last_init_time;
+
+ struct i915_vma *scratch;
} gt;
/* perform PHY state sanity checks? */
@@ -2322,6 +2159,8 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg)
(((__iter).curr += PAGE_SIZE) >= (__iter).max) ? \
(__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
+bool i915_sg_trim(struct sg_table *orig_st);
+
static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
{
unsigned int page_sizes;
@@ -2367,20 +2206,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define REVID_FOREVER 0xff
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
-#define GEN_FOREVER (0)
-
#define INTEL_GEN_MASK(s, e) ( \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
- GENMASK((e) != GEN_FOREVER ? (e) - 1 : BITS_PER_LONG - 1, \
- (s) != GEN_FOREVER ? (s) - 1 : 0) \
-)
+ GENMASK((e) - 1, (s) - 1))
-/*
- * Returns true if Gen is in inclusive range [Start, End].
- *
- * Use GEN_FOREVER for unbound start and or end.
- */
+/* Returns true if Gen is in inclusive range [Start, End] */
#define IS_GEN(dev_priv, s, e) \
(!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
@@ -2461,6 +2292,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
+ INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
(dev_priv)->info.gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2592,17 +2425,22 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
-#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
-#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define HAS_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
+#define HAS_FULL_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
+#define HAS_FULL_48BIT_PPGTT(dev_priv) \
+ (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
+
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
+#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.overlay_needs_physical)
+ ((dev_priv)->info.display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2623,31 +2461,31 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.has_hotplug)
+#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc)
+#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.has_dp_mst)
+#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.has_ddi)
+#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.has_psr)
+#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.has_csr)
+#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
@@ -2708,7 +2546,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
@@ -2720,6 +2558,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)
+
#include "i915_trace.h"
static inline bool intel_vtd_active(void)
@@ -2742,9 +2582,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
return IS_BROXTON(dev_priv) && intel_vtd_active();
}
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt);
-
/* i915_drv.c */
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
@@ -3229,7 +3066,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
@@ -3461,6 +3298,7 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
enum port port);
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
/* intel_acpi.c */
#ifdef CONFIG_ACPI
@@ -3482,8 +3320,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
extern void intel_modeset_init_hw(struct drm_device *dev);
extern int intel_modeset_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
-extern int intel_connector_register(struct drm_connector *);
-extern void intel_connector_unregister(struct drm_connector *);
extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
bool state);
extern void intel_display_resume(struct drm_device *dev);
@@ -3496,6 +3332,9 @@ extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
bool interactive);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -3583,6 +3422,12 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
+/* intel_combo_phy.c */
+void icl_combo_phys_init(struct drm_i915_private *dev_priv);
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
@@ -3870,4 +3715,9 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
return I915_HWS_CSB_WRITE_INDEX;
}
+static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
+{
+ return i915_ggtt_offset(i915->gt.scratch);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_fixed.h b/drivers/gpu/drm/i915/i915_fixed.h
new file mode 100644
index 000000000000..591dd89ba7af
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_fixed.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_FIXED_H_
+#define _I915_FIXED_H_
+
+typedef struct {
+ u32 val;
+} uint_fixed_16_16_t;
+
+#define FP_16_16_MAX ((uint_fixed_16_16_t){ .val = UINT_MAX })
+
+static inline bool is_fixed16_zero(uint_fixed_16_16_t val)
+{
+ return val.val == 0;
+}
+
+static inline uint_fixed_16_16_t u32_to_fixed16(u32 val)
+{
+ uint_fixed_16_16_t fp = { .val = val << 16 };
+
+ WARN_ON(val > U16_MAX);
+
+ return fp;
+}
+
+static inline u32 fixed16_to_u32_round_up(uint_fixed_16_16_t fp)
+{
+ return DIV_ROUND_UP(fp.val, 1 << 16);
+}
+
+static inline u32 fixed16_to_u32(uint_fixed_16_16_t fp)
+{
+ return fp.val >> 16;
+}
+
+static inline uint_fixed_16_16_t min_fixed16(uint_fixed_16_16_t min1,
+ uint_fixed_16_16_t min2)
+{
+ uint_fixed_16_16_t min = { .val = min(min1.val, min2.val) };
+
+ return min;
+}
+
+static inline uint_fixed_16_16_t max_fixed16(uint_fixed_16_16_t max1,
+ uint_fixed_16_16_t max2)
+{
+ uint_fixed_16_16_t max = { .val = max(max1.val, max2.val) };
+
+ return max;
+}
+
+static inline uint_fixed_16_16_t clamp_u64_to_fixed16(u64 val)
+{
+ uint_fixed_16_16_t fp = { .val = (u32)val };
+
+ WARN_ON(val > U32_MAX);
+
+ return fp;
+}
+
+static inline u32 div_round_up_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t d)
+{
+ return DIV_ROUND_UP(val.val, d.val);
+}
+
+static inline u32 mul_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val * mul.val;
+ tmp = DIV_ROUND_UP_ULL(tmp, 1 << 16);
+ WARN_ON(tmp > U32_MAX);
+
+ return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_fixed16(uint_fixed_16_16_t val,
+ uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val.val * mul.val;
+ tmp = tmp >> 16;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t div_fixed16(u32 val, u32 d)
+{
+ u64 tmp;
+
+ tmp = (u64)val << 16;
+ tmp = DIV_ROUND_UP_ULL(tmp, d);
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline u32 div_round_up_u32_fixed16(u32 val, uint_fixed_16_16_t d)
+{
+ u64 tmp;
+
+ tmp = (u64)val << 16;
+ tmp = DIV_ROUND_UP_ULL(tmp, d.val);
+ WARN_ON(tmp > U32_MAX);
+
+ return (u32)tmp;
+}
+
+static inline uint_fixed_16_16_t mul_u32_fixed16(u32 val, uint_fixed_16_16_t mul)
+{
+ u64 tmp;
+
+ tmp = (u64)val * mul.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16(uint_fixed_16_16_t add1,
+ uint_fixed_16_16_t add2)
+{
+ u64 tmp;
+
+ tmp = (u64)add1.val + add2.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
+ u32 add2)
+{
+ uint_fixed_16_16_t tmp_add2 = u32_to_fixed16(add2);
+ u64 tmp;
+
+ tmp = (u64)add1.val + tmp_add2.val;
+
+ return clamp_u64_to_fixed16(tmp);
+}
+
+#endif /* _I915_FIXED_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0c8aa57ce83b..d36a9755ad91 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/
err = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
MAX_SCHEDULE_TIMEOUT,
to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
+ struct pagevec pvec;
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
+ mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+ pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- put_page(page);
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
obj->mm.dirty = false;
sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
mutex_unlock(&obj->mm.lock);
}
-static bool i915_sg_trim(struct sg_table *orig_st)
+bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_page_sizes;
+ struct pagevec pvec;
gfp_t noreclaim;
int ret;
@@ -2559,6 +2579,7 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = obj->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
@@ -2573,6 +2594,7 @@ rebuild_st:
gfp_t gfp = noreclaim;
do {
+ cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (likely(!IS_ERR(page)))
break;
@@ -2583,7 +2605,6 @@ rebuild_st:
}
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
- cond_resched();
/*
* We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
err_sg:
sg_mark_end(sg);
err_pages:
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec);
+ for_each_sgt_page(page, sgt_iter, st) {
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
@@ -3282,16 +3309,6 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
static void nop_submit_request(struct i915_request *request)
{
- GEM_TRACE("%s fence %llx:%d -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- i915_request_submit(request);
-}
-
-static void nop_complete_submit_request(struct i915_request *request)
-{
unsigned long flags;
GEM_TRACE("%s fence %llx:%d -> -EIO\n",
@@ -3327,57 +3344,33 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id) {
+ for_each_engine(engine, i915, id)
i915_gem_reset_prepare_engine(engine);
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
/* Even if the GPU reset fails, it should still stop the engines */
if (INTEL_GEN(i915) >= 5)
intel_gpu_reset(i915, ALL_ENGINES);
- /*
- * Make sure no one is running the old callback before we proceed with
- * cancelling requests and resetting the completion tracking. Otherwise
- * we might submit a request to the hardware which never completes.
- */
- synchronize_rcu();
-
for_each_engine(engine, i915, id) {
- /* Mark all executing requests as skipped */
- engine->cancel_requests(engine);
-
- /*
- * Only once we've force-cancelled all in-flight requests can we
- * start to complete all requests.
- */
- engine->submit_request = nop_complete_submit_request;
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
- * in nop_complete_submit_request.
+ * in nop_submit_request.
*/
synchronize_rcu();
- for_each_engine(engine, i915, id) {
- unsigned long flags;
-
- /*
- * Mark all pending requests as complete so that any concurrent
- * (lockless) lookup doesn't try and wait upon the request as we
- * reset it.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- intel_engine_init_global_seqno(engine,
- intel_engine_last_submit(engine));
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+ for_each_engine(engine, i915, id) {
i915_gem_reset_finish_engine(engine);
+ intel_engine_wakeup(engine);
}
out:
@@ -3530,6 +3523,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
struct drm_i915_private *i915 = s->i915;
+ destroy_rcu_head(&s->rcu);
+
if (same_epoch(i915, s->epoch)) {
INIT_WORK(&s->work, __sleep_work);
queue_work(i915->wq, &s->work);
@@ -3646,6 +3641,7 @@ out_rearm:
if (same_epoch(dev_priv, epoch)) {
struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s) {
+ init_rcu_head(&s->rcu);
s->i915 = dev_priv;
s->epoch = epoch;
call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3739,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
start = ktime_get();
ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_PRIORITY |
+ I915_WAIT_ALL,
to_wait_timeout(args->timeout_ns),
to_rps_client(file));
@@ -4710,6 +4708,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
INIT_LIST_HEAD(&obj->batch_pool_link);
+ init_rcu_head(&obj->rcu);
+
obj->ops = ops;
reservation_object_init(&obj->__builtin_resv);
@@ -4977,6 +4977,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
/*
+ * We reuse obj->rcu for the freed list, so we had better not treat
+ * it like a rcu_head from this point forwards. And we expect all
+ * objects to be freed via this path.
+ */
+ destroy_rcu_head(&obj->rcu);
+
+ /*
* Since we require blocking on struct_mutex to unbind the freed
* object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
@@ -5293,19 +5300,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
- if (HAS_PCH_NOP(dev_priv)) {
- if (IS_IVYBRIDGE(dev_priv)) {
- u32 temp = I915_READ(GEN7_MSG_CTL);
- temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
- I915_WRITE(GEN7_MSG_CTL, temp);
- } else if (INTEL_GEN(dev_priv) >= 7) {
- u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
- temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
- }
- }
-
- intel_gt_workarounds_apply(dev_priv);
+ /* Apply the GT workarounds... */
+ intel_gt_apply_workarounds(dev_priv);
+ /* ...and determine whether they are sticking. */
+ intel_gt_verify_workarounds(dev_priv, "init");
i915_gem_init_swizzling(dev_priv);
@@ -5500,6 +5498,44 @@ err_active:
goto out_ctx;
}
+static int
+i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ obj = i915_gem_object_create_stolen(i915, size);
+ if (!obj)
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj)) {
+ DRM_ERROR("Failed to allocate scratch page\n");
+ return PTR_ERR(obj);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (ret)
+ goto err_unref;
+
+ i915->gt.scratch = vma;
+ return 0;
+
+err_unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
+static void i915_gem_fini_scratch(struct drm_i915_private *i915)
+{
+ i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+}
+
int i915_gem_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -5546,12 +5582,19 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_scratch(dev_priv,
+ IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
}
+ ret = i915_gem_contexts_init(dev_priv);
+ if (ret) {
+ GEM_BUG_ON(ret == -EIO);
+ goto err_scratch;
+ }
+
ret = intel_engines_init(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
@@ -5624,6 +5667,8 @@ err_pm:
err_context:
if (ret != -EIO)
i915_gem_contexts_fini(dev_priv);
+err_scratch:
+ i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -5675,8 +5720,11 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
intel_uc_fini(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
+ intel_wa_list_free(&dev_priv->gt_wa_list);
+
intel_cleanup_gt_powersave(dev_priv);
intel_uc_fini_misc(dev_priv);
@@ -5951,7 +5999,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
* the bits.
*/
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
- sizeof(atomic_t) * BITS_PER_BYTE);
+ BITS_PER_TYPE(atomic_t));
if (old) {
WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 599c4f6eb1ea..b0e4b976880c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -47,17 +47,19 @@ struct drm_i915_private;
#define GEM_DEBUG_DECL(var) var
#define GEM_DEBUG_EXEC(expr) expr
#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) GEM_WARN_ON(expr)
#else
#define GEM_SHOW_DEBUG() (0)
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
-#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
+#define GEM_WARN_ON(expr) ({ unlikely(!!(expr)); })
#define GEM_DEBUG_DECL(var)
#define GEM_DEBUG_EXEC(expr) do { } while (0)
#define GEM_DEBUG_BUG_ON(expr)
+#define GEM_DEBUG_WARN_ON(expr) ({ BUILD_BUG_ON_INVALID(expr); 0; })
#endif
#if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f772593b99ab..371c07087095 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
- ctx->sched.priority = I915_PRIORITY_NORMAL;
+ ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
struct intel_context *ce = &ctx->__engine[n];
@@ -414,7 +414,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (IS_ERR(ctx))
return ctx;
- if (USES_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
ppgtt = i915_ppgtt_create(dev_priv, file_priv);
@@ -457,7 +457,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = __create_hw_context(to_i915(dev), NULL);
+ ctx = i915_gem_create_context(to_i915(dev), NULL);
if (IS_ERR(ctx))
goto out;
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
}
i915_gem_context_clear_bannable(ctx);
- ctx->sched.priority = prio;
+ ctx->sched.priority = I915_USER_PRIORITY(prio);
ctx->ring_size = PAGE_SIZE;
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -535,16 +535,12 @@ static bool needs_preempt_context(struct drm_i915_private *i915)
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- int ret;
/* Reassure ourselves we are only called once */
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- ret = intel_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -879,7 +875,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_bannable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
- args->value = ctx->sched.priority;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
break;
default:
ret = -EINVAL;
@@ -948,7 +944,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
!capable(CAP_SYS_NICE))
ret = -EPERM;
else
- ctx->sched.priority = priority;
+ ctx->sched.priority =
+ I915_USER_PRIORITY(priority);
}
break;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 08165f6a0a84..f6d870b1f73e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,7 @@ struct i915_gem_context {
/** engine: per-engine logical HW state */
struct intel_context {
struct i915_gem_context *gem_context;
+ struct intel_engine_cs *active;
struct i915_vma *state;
struct intel_ring *ring;
u32 *lrc_reg_state;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d4fac09095f8..786d719e652d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
else if (gen >= 4)
len = 4;
else
- len = 6;
+ len = 3;
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
@@ -1309,11 +1309,6 @@ relocate_entry(struct i915_vma *vma,
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = addr;
*batch++ = target_offset;
-
- /* And again for good measure (blb/pnv) */
- *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
- *batch++ = addr;
- *batch++ = target_offset;
}
goto out;
@@ -2191,7 +2186,7 @@ signal_fence_array(struct i915_execbuffer *eb,
if (!(flags & I915_EXEC_FENCE_SIGNAL))
continue;
- drm_syncobj_replace_fence(syncobj, 0, fence);
+ drm_syncobj_replace_fence(syncobj, fence);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 07999fe09ad2..add1fe7aeb93 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -133,55 +133,6 @@ static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
i915->ggtt.invalidate(i915);
}
-int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
- int enable_ppgtt)
-{
- bool has_full_ppgtt;
- bool has_full_48bit_ppgtt;
-
- if (!dev_priv->info.has_aliasing_ppgtt)
- return 0;
-
- has_full_ppgtt = dev_priv->info.has_full_ppgtt;
- has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
-
- if (intel_vgpu_active(dev_priv)) {
- /* GVT-g has no support for 32bit ppgtt */
- has_full_ppgtt = false;
- has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
- }
-
- /*
- * We don't allow disabling PPGTT for gen9+ as it's a requirement for
- * execlists, the sole mechanism available to submit work.
- */
- if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
- return 0;
-
- if (enable_ppgtt == 1)
- return 1;
-
- if (enable_ppgtt == 2 && has_full_ppgtt)
- return 2;
-
- if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
- return 3;
-
- /* Disable ppgtt on SNB if VT-d is on. */
- if (IS_GEN6(dev_priv) && intel_vtd_active()) {
- DRM_INFO("Disabling PPGTT because VT-d is on\n");
- return 0;
- }
-
- if (has_full_48bit_ppgtt)
- return 3;
-
- if (has_full_ppgtt)
- return 2;
-
- return 1;
-}
-
static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
@@ -235,9 +186,9 @@ static void clear_pages(struct i915_vma *vma)
memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
}
-static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
@@ -274,9 +225,9 @@ static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode
-static gen6_pte_t snb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -296,9 +247,9 @@ static gen6_pte_t snb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -320,9 +271,9 @@ static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t byt_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
+static u64 byt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -336,9 +287,9 @@ static gen6_pte_t byt_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 hsw_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -349,9 +300,9 @@ static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
return pte;
}
-static gen6_pte_t iris_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 unused)
+static u64 iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
{
gen6_pte_t pte = GEN6_PTE_VALID;
pte |= HSW_PTE_ADDR_ENCODE(addr);
@@ -629,10 +580,9 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* region, including any PTEs which happen to point to scratch.
*
* This is only relevant for the 48b PPGTT where we support
- * huge-gtt-pages, see also i915_vma_insert().
- *
- * TODO: we should really consider write-protecting the scratch-page and
- * sharing between ppgtt
+ * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
+ * scratch (read-only) between all vm, we create one 64k scratch page
+ * for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
if (i915_vm_is_48bit(vm) &&
@@ -715,14 +665,13 @@ static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- fill_px(vm, pt,
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
+ fill_px(vm, pt, vm->scratch_pte);
}
-static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
+static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- fill32_px(&ppgtt->base.vm, pt, ppgtt->scratch_pte);
+ fill32_px(vm, pt, vm->scratch_pte);
}
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
@@ -856,15 +805,13 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
/* Removes entries from a single page table, releasing it if it's empty.
* Caller can use the return value to update higher-level entries.
*/
-static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
+static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
struct i915_page_table *pt,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -875,7 +822,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
vaddr = kmap_atomic_px(pt);
while (pte < pte_end)
- vaddr[pte++] = scratch_pte;
+ vaddr[pte++] = vm->scratch_pte;
kunmap_atomic(vaddr);
return false;
@@ -1208,7 +1155,7 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
u16 i;
- encode = pte_encode | vma->vm->scratch_page.daddr;
+ encode = vma->vm->scratch_pte;
vaddr = kmap_atomic_px(pd->page_table[idx.pde]);
for (i = 1; i < index; i += 16)
@@ -1261,10 +1208,35 @@ static int gen8_init_scratch(struct i915_address_space *vm)
{
int ret;
+ /*
+ * If everybody agrees to not to write into the scratch page,
+ * we can reuse it for all vm, keeping contexts and processes separate.
+ */
+ if (vm->has_read_only &&
+ vm->i915->kernel_context &&
+ vm->i915->kernel_context->ppgtt) {
+ struct i915_address_space *clone =
+ &vm->i915->kernel_context->ppgtt->vm;
+
+ GEM_BUG_ON(!clone->has_read_only);
+
+ vm->scratch_page.order = clone->scratch_page.order;
+ vm->scratch_pte = clone->scratch_pte;
+ vm->scratch_pt = clone->scratch_pt;
+ vm->scratch_pd = clone->scratch_pd;
+ vm->scratch_pdp = clone->scratch_pdp;
+ return 0;
+ }
+
ret = setup_scratch_page(vm, __GFP_HIGHMEM);
if (ret)
return ret;
+ vm->scratch_pte =
+ gen8_pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_LLC,
+ PTE_READ_ONLY);
+
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
@@ -1336,6 +1308,9 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
+ if (!vm->scratch_page.daddr)
+ return;
+
if (use_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
@@ -1573,8 +1548,7 @@ static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->vm;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen8_pte_t scratch_pte = vm->scratch_pte;
u64 start = 0, length = ppgtt->vm.total;
if (use_4lvl(vm)) {
@@ -1647,16 +1621,12 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = &i915->drm.pdev->dev;
- ppgtt->vm.total = USES_FULL_48BIT_PPGTT(i915) ?
+ ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
1ULL << 48 :
1ULL << 32;
- /*
- * From bdw, there is support for read-only pages in the PPGTT.
- *
- * XXX GVT is not honouring the lack of RW in the PTE bits.
- */
- ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
+ /* From bdw, there is support for read-only pages in the PPGTT. */
+ ppgtt->vm.has_read_only = true;
i915_address_space_init(&ppgtt->vm, i915);
@@ -1721,7 +1691,7 @@ err_free:
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ const gen6_pte_t scratch_pte = base->vm.scratch_pte;
struct i915_page_table *pt;
u32 pte, pde;
@@ -1782,19 +1752,6 @@ static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
ppgtt->pd_addr + pde);
}
-static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
- GEN8_GFX_PPGTT_48B : 0;
- I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
- }
-}
-
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
@@ -1834,7 +1791,8 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
- I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
+ I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
@@ -1846,7 +1804,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned int pde = first_entry / GEN6_PTES;
unsigned int pte = first_entry % GEN6_PTES;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen6_pte_t scratch_pte = ppgtt->scratch_pte;
+ const gen6_pte_t scratch_pte = vm->scratch_pte;
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
@@ -1937,7 +1895,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (IS_ERR(pt))
goto unwind_out;
- gen6_initialize_pt(ppgtt, pt);
+ gen6_initialize_pt(vm, pt);
ppgtt->base.pd.page_table[pde] = pt;
if (i915_vma_is_bound(ppgtt->vma,
@@ -1975,9 +1933,9 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ppgtt->scratch_pte =
- vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_NONE, PTE_READ_ONLY);
+ vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
+ I915_CACHE_NONE,
+ PTE_READ_ONLY);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@@ -1985,7 +1943,7 @@ static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
return PTR_ERR(vm->scratch_pt);
}
- gen6_initialize_pt(ppgtt, vm->scratch_pt);
+ gen6_initialize_pt(vm, vm->scratch_pt);
gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
@@ -2237,23 +2195,10 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
- /* In the case of execlists, PPGTT is enabled by the context descriptor
- * and the PDPs are contained within the context itself. We don't
- * need to do anything here. */
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
- return 0;
-
- if (!USES_PPGTT(dev_priv))
- return 0;
-
if (IS_GEN6(dev_priv))
gen6_ppgtt_enable(dev_priv);
else if (IS_GEN7(dev_priv))
gen7_ppgtt_enable(dev_priv);
- else if (INTEL_GEN(dev_priv) >= 8)
- gen8_ppgtt_enable(dev_priv);
- else
- MISSING_CASE(INTEL_GEN(dev_priv));
return 0;
}
@@ -2543,8 +2488,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start / I915_GTT_PAGE_SIZE;
unsigned num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte =
- gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
+ const gen8_pte_t scratch_pte = vm->scratch_pte;
gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
@@ -2669,8 +2613,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ scratch_pte = vm->scratch_pte;
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
@@ -2952,7 +2895,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
/* And finally clear the reserved guard page */
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
- if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
+ if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
ret = i915_gem_init_aliasing_ppgtt(dev_priv);
if (ret)
goto err;
@@ -3076,6 +3019,10 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return ret;
}
+ ggtt->vm.scratch_pte =
+ ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
+ I915_CACHE_NONE, 0);
+
return 0;
}
@@ -3275,7 +3222,7 @@ static void bdw_setup_private_ppat(struct intel_ppat *ppat)
ppat->match = bdw_private_pat_match;
ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
- if (!USES_PPGTT(ppat->i915)) {
+ if (!HAS_PPGTT(ppat->i915)) {
/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
* so RTL will always use the value corresponding to
* pat_sel = 000".
@@ -3402,7 +3349,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
- if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
+ if (intel_scanout_needs_vtd_wa(dev_priv))
ggtt->vm.clear_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -3427,6 +3374,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
+ ggtt->vm.pte_encode = gen8_pte_encode;
+
setup_private_pat(dev_priv);
return ggtt_probe_common(ggtt, size);
@@ -3614,7 +3563,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
/* Only VLV supports read-only GGTT mappings */
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
- if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
+ if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3716,7 +3665,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
}
static struct scatterlist *
-rotate_pages(const dma_addr_t *in, unsigned int offset,
+rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
unsigned int width, unsigned int height,
unsigned int stride,
struct sg_table *st, struct scatterlist *sg)
@@ -3725,7 +3674,7 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
unsigned int src_idx;
for (column = 0; column < width; column++) {
- src_idx = stride * (height - 1) + column;
+ src_idx = stride * (height - 1) + column + offset;
for (row = 0; row < height; row++) {
st->nents++;
/* We don't need the pages, but need to initialize
@@ -3733,7 +3682,8 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
* The only thing we need are DMA addresses.
*/
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) = in[offset + src_idx];
+ sg_dma_address(sg) =
+ i915_gem_object_get_dma_address(obj, src_idx);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
sg = sg_next(sg);
src_idx -= stride;
@@ -3747,22 +3697,11 @@ static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
- const unsigned long n_pages = obj->base.size / I915_GTT_PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
- struct sgt_iter sgt_iter;
- dma_addr_t dma_addr;
- unsigned long i;
- dma_addr_t *page_addr_list;
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
-
- /* Allocate a temporary list of source pages for random access. */
- page_addr_list = kvmalloc_array(n_pages,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!page_addr_list)
- return ERR_PTR(ret);
+ int i;
/* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -3773,29 +3712,20 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
if (ret)
goto err_sg_alloc;
- /* Populate source page list from the object. */
- i = 0;
- for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
- page_addr_list[i++] = dma_addr;
-
- GEM_BUG_ON(i != n_pages);
st->nents = 0;
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
- sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
+ sg = rotate_pages(obj, rot_info->plane[i].offset,
rot_info->plane[i].width, rot_info->plane[i].height,
rot_info->plane[i].stride, st, sg);
}
- kvfree(page_addr_list);
-
return st;
err_sg_alloc:
kfree(st);
err_st_alloc:
- kvfree(page_addr_list);
DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
@@ -3840,6 +3770,8 @@ intel_partial_pages(const struct i915_ggtt_view *view,
count -= len >> PAGE_SHIFT;
if (count == 0) {
sg_mark_end(sg);
+ i915_sg_trim(st); /* Drop any unused tail entries. */
+
return st;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 28039290655c..4874da09a3c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -289,6 +289,7 @@ struct i915_address_space {
struct mutex mutex; /* protects vma and our lists */
+ u64 scratch_pte;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -335,12 +336,11 @@ struct i915_address_space {
/* Some systems support read-only mappings for GGTT and/or PPGTT */
bool has_read_only:1;
- /* FIXME: Need a more generic return type */
- gen6_pte_t (*pte_encode)(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags); /* Create a valid PTE */
- /* flags for pte_encode */
+ u64 (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags); /* Create a valid PTE */
#define PTE_READ_ONLY (1<<0)
+
int (*allocate_va_range)(struct i915_address_space *vm,
u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
@@ -422,7 +422,6 @@ struct gen6_hw_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
- gen6_pte_t scratch_pte;
unsigned int pin_count;
bool scan_for_unused_pt;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 3eb33e000d6f..07465123c166 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -27,11 +27,14 @@
*
*/
-#include <generated/utsrelease.h>
+#include <linux/ascii85.h>
+#include <linux/nmi.h>
+#include <linux/scatterlist.h>
#include <linux/stop_machine.h>
+#include <linux/utsname.h>
#include <linux/zlib.h>
+
#include <drm/drm_print.h>
-#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -77,112 +80,110 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+static void __sg_set_buf(struct scatterlist *sg,
+ void *addr, unsigned int len, loff_t it)
{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
+ sg->page_link = (unsigned long)virt_to_page(addr);
+ sg->offset = offset_in_page(addr);
+ sg->length = len;
+ sg->dma_address = it;
}
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
+static bool __i915_error_grow(struct drm_i915_error_state_buf *e, size_t len)
{
- if (e->pos + len <= e->start) {
- e->pos += len;
+ if (!len)
return false;
- }
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
+ if (e->bytes + len + 1 <= e->size)
+ return true;
- return true;
-}
+ if (e->bytes) {
+ __sg_set_buf(e->cur++, e->buf, e->bytes, e->iter);
+ e->iter += e->bytes;
+ e->buf = NULL;
+ e->bytes = 0;
+ }
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
+ if (e->cur == e->end) {
+ struct scatterlist *sgl;
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
+ sgl = (typeof(sgl))__get_free_page(GFP_KERNEL);
+ if (!sgl) {
+ e->err = -ENOMEM;
+ return false;
+ }
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
+ if (e->cur) {
+ e->cur->offset = 0;
+ e->cur->length = 0;
+ e->cur->page_link =
+ (unsigned long)sgl | SG_CHAIN;
+ } else {
+ e->sgl = sgl;
}
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
+ e->cur = sgl;
+ e->end = sgl + SG_MAX_SINGLE_ALLOC - 1;
}
- e->bytes += len;
- e->pos += len;
+ e->size = ALIGN(len + 1, SZ_64K);
+ e->buf = kmalloc(e->size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!e->buf) {
+ e->size = PAGE_ALIGN(len + 1);
+ e->buf = kmalloc(e->size, GFP_KERNEL);
+ }
+ if (!e->buf) {
+ e->err = -ENOMEM;
+ return false;
+ }
+
+ return true;
}
__printf(2, 0)
static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
+ const char *fmt, va_list args)
{
- unsigned len;
+ va_list ap;
+ int len;
- if (!__i915_error_ok(e))
+ if (e->err)
return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- va_list tmp;
-
- va_copy(tmp, args);
- len = vsnprintf(NULL, 0, f, tmp);
- va_end(tmp);
-
- if (!__i915_error_seek(e, len))
- return;
+ va_copy(ap, args);
+ len = vsnprintf(NULL, 0, fmt, ap);
+ va_end(ap);
+ if (len <= 0) {
+ e->err = len;
+ return;
}
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ if (!__i915_error_grow(e, len))
+ return;
- __i915_error_advance(e, len);
+ GEM_BUG_ON(e->bytes >= e->size);
+ len = vscnprintf(e->buf + e->bytes, e->size - e->bytes, fmt, args);
+ if (len < 0) {
+ e->err = len;
+ return;
+ }
+ e->bytes += len;
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
+static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
{
unsigned len;
- if (!__i915_error_ok(e))
+ if (e->err || !str)
return;
len = strlen(str);
+ if (!__i915_error_grow(e, len))
+ return;
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
+ GEM_BUG_ON(e->bytes + len > e->size);
memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
+ e->bytes += len;
}
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
@@ -268,6 +269,8 @@ static int compress_page(struct compress *c,
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
+
+ touch_nmi_watchdog();
} while (zstream->avail_in);
/* Fallback to uncompressed if we increase size? */
@@ -512,7 +515,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
err_printf(m, " SYNC_2: 0x%08x\n",
ee->semaphore_mboxes[2]);
}
- if (USES_PPGTT(m->i915)) {
+ if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
if (INTEL_GEN(m->i915) >= 8) {
@@ -635,25 +638,33 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
print_error_obj(m, NULL, "GuC log buffer", error_uc->guc_log);
}
-int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
- const struct i915_gpu_state *error)
+static void err_free_sgl(struct scatterlist *sgl)
{
- struct drm_i915_private *dev_priv = m->i915;
- struct drm_i915_error_object *obj;
- struct timespec64 ts;
- int i, j;
+ while (sgl) {
+ struct scatterlist *sg;
- if (!error) {
- err_printf(m, "No error state collected\n");
- return 0;
+ for (sg = sgl; !sg_is_chain(sg); sg++) {
+ kfree(sg_virt(sg));
+ if (sg_is_last(sg))
+ break;
+ }
+
+ sg = sg_is_last(sg) ? NULL : sg_chain_ptr(sg);
+ free_page((unsigned long)sgl);
+ sgl = sg;
}
+}
- if (IS_ERR(error))
- return PTR_ERR(error);
+static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
+ struct i915_gpu_state *error)
+{
+ struct drm_i915_error_object *obj;
+ struct timespec64 ts;
+ int i, j;
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "Kernel: %s\n", init_utsname()->release);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -683,12 +694,12 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
- err_print_pciid(m, error->i915);
+ err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
- if (HAS_CSR(dev_priv)) {
- struct intel_csr *csr = &dev_priv->csr;
+ if (HAS_CSR(m->i915)) {
+ struct intel_csr *csr = &m->i915->csr;
err_printf(m, "DMC loaded: %s\n",
yesno(csr->dmc_payload != NULL));
@@ -708,22 +719,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
- err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
+ err_printf(m, "Missed interrupts: 0x%08lx\n",
+ m->i915->gpu_error.missed_irq_rings);
for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, "ERROR: 0x%08x\n", error->error);
- if (INTEL_GEN(dev_priv) >= 8)
+ if (INTEL_GEN(m->i915) >= 8)
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
error->fault_data1, error->fault_data0);
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN7(m->i915))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -745,7 +757,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
len += scnprintf(buf + len, sizeof(buf), "%s%s",
first ? "" : ", ",
- dev_priv->engine[j]->name);
+ m->i915->engine[j]->name);
first = 0;
}
scnprintf(buf + len, sizeof(buf), ")");
@@ -763,7 +775,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
obj = ee->batchbuffer;
if (obj) {
- err_puts(m, dev_priv->engine[i]->name);
+ err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
ee->context.comm,
@@ -775,16 +787,16 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
- print_error_obj(m, dev_priv->engine[i], NULL, obj);
+ print_error_obj(m, m->i915->engine[i], NULL, obj);
}
for (j = 0; j < ee->user_bo_count; j++)
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"user", ee->user_bo[j]);
if (ee->num_requests) {
err_printf(m, "%s --- %d requests\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_requests);
for (j = 0; j < ee->num_requests; j++)
error_print_request(m, " ",
@@ -794,10 +806,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
if (IS_ERR(ee->waiters)) {
err_printf(m, "%s --- ? waiters [unable to acquire spinlock]\n",
- dev_priv->engine[i]->name);
+ m->i915->engine[i]->name);
} else if (ee->num_waiters) {
err_printf(m, "%s --- %d waiters\n",
- dev_priv->engine[i]->name,
+ m->i915->engine[i]->name,
ee->num_waiters);
for (j = 0; j < ee->num_waiters; j++) {
err_printf(m, " seqno 0x%08x for %s [%d]\n",
@@ -807,22 +819,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
}
}
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"ringbuffer", ee->ringbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW Status", ee->hws_page);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"HW context", ee->ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA context", ee->wa_ctx);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"WA batchbuffer", ee->wa_batchbuffer);
- print_error_obj(m, dev_priv->engine[i],
+ print_error_obj(m, m->i915->engine[i],
"NULL context", ee->default_state);
}
@@ -835,43 +847,107 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
err_print_capabilities(m, &error->device_info, &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
+}
+
+static int err_print_to_sgl(struct i915_gpu_state *error)
+{
+ struct drm_i915_error_state_buf m;
+
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ if (READ_ONCE(error->sgl))
+ return 0;
+
+ memset(&m, 0, sizeof(m));
+ m.i915 = error->i915;
- if (m->bytes == 0 && m->err)
- return m->err;
+ __err_print_to_sgl(&m, error);
+
+ if (m.buf) {
+ __sg_set_buf(m.cur++, m.buf, m.bytes, m.iter);
+ m.bytes = 0;
+ m.buf = NULL;
+ }
+ if (m.cur) {
+ GEM_BUG_ON(m.end < m.cur);
+ sg_mark_end(m.cur - 1);
+ }
+ GEM_BUG_ON(m.sgl && !m.cur);
+
+ if (m.err) {
+ err_free_sgl(m.sgl);
+ return m.err;
+ }
+
+ if (cmpxchg(&error->sgl, NULL, m.sgl))
+ err_free_sgl(m.sgl);
return 0;
}
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
- struct drm_i915_private *i915,
- size_t count, loff_t pos)
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t off, size_t rem)
{
- memset(ebuf, 0, sizeof(*ebuf));
- ebuf->i915 = i915;
+ struct scatterlist *sg;
+ size_t count;
+ loff_t pos;
+ int err;
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size,
- GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+ if (!error || !rem)
+ return 0;
- if (ebuf->buf == NULL) {
- ebuf->size = PAGE_SIZE;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ err = err_print_to_sgl(error);
+ if (err)
+ return err;
- if (ebuf->buf == NULL) {
- ebuf->size = 128;
- ebuf->buf = kmalloc(ebuf->size, GFP_KERNEL);
- }
+ sg = READ_ONCE(error->fit);
+ if (!sg || off < sg->dma_address)
+ sg = error->sgl;
+ if (!sg)
+ return 0;
- if (ebuf->buf == NULL)
- return -ENOMEM;
+ pos = sg->dma_address;
+ count = 0;
+ do {
+ size_t len, start;
- ebuf->start = pos;
+ if (sg_is_chain(sg)) {
+ sg = sg_chain_ptr(sg);
+ GEM_BUG_ON(sg_is_chain(sg));
+ }
- return 0;
+ len = sg->length;
+ if (pos + len <= off) {
+ pos += len;
+ continue;
+ }
+
+ start = sg->offset;
+ if (pos < off) {
+ GEM_BUG_ON(off - pos > len);
+ len -= off - pos;
+ start += off - pos;
+ pos = off;
+ }
+
+ len = min(len, rem);
+ GEM_BUG_ON(!len || len > sg->length);
+
+ memcpy(buf, page_address(sg_page(sg)) + start, len);
+
+ count += len;
+ pos += len;
+
+ buf += len;
+ rem -= len;
+ if (!rem) {
+ WRITE_ONCE(error->fit, sg);
+ break;
+ }
+ } while (!sg_is_last(sg++));
+
+ return count;
}
static void i915_error_object_free(struct drm_i915_error_object *obj)
@@ -944,6 +1020,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
cleanup_params(error);
cleanup_uc_state(error);
+ err_free_sgl(error->sgl);
kfree(error);
}
@@ -1002,7 +1079,6 @@ i915_error_object_create(struct drm_i915_private *i915,
}
compress_fini(&compress, dst);
- ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
}
@@ -1271,7 +1347,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
engine);
- if (USES_PPGTT(dev_priv)) {
+ if (HAS_PPGTT(dev_priv)) {
int i;
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
@@ -1495,7 +1571,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
if (HAS_BROKEN_CS_TLB(i915))
ee->wa_batchbuffer =
i915_error_object_create(i915,
- engine->scratch);
+ i915->gt.scratch);
request_record_user_bo(request, ee);
ee->ctx =
@@ -1788,6 +1864,14 @@ static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
return epoch;
}
+static void capture_finish(struct i915_gpu_state *error)
+{
+ struct i915_ggtt *ggtt = &error->i915->ggtt;
+ const u64 slot = ggtt->error_capture.start;
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+}
+
static int capture(void *data)
{
struct i915_gpu_state *error = data;
@@ -1812,6 +1896,7 @@ static int capture(void *data)
error->epoch = capture_find_epoch(error);
+ capture_finish(error);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 3ec89a504de5..ff2652bbb0b0 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -192,6 +192,8 @@ struct i915_gpu_state {
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
struct i915_address_space *active_vm[I915_NUM_ENGINES];
+
+ struct scatterlist *sgl, *fit;
};
struct i915_gpu_error {
@@ -298,29 +300,20 @@ struct i915_gpu_error {
struct drm_i915_error_state_buf {
struct drm_i915_private *i915;
- unsigned int bytes;
- unsigned int size;
+ struct scatterlist *sgl, *cur, *end;
+
+ char *buf;
+ size_t bytes;
+ size_t size;
+ loff_t iter;
+
int err;
- u8 *buf;
- loff_t start;
- loff_t pos;
};
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_gpu_state *gpu);
-int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
- struct drm_i915_private *i915,
- size_t count, loff_t pos);
-
-static inline void
-i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
-{
- kfree(eb->buf);
-}
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
@@ -334,6 +327,9 @@ i915_gpu_state_get(struct i915_gpu_state *gpu)
return gpu;
}
+ssize_t i915_gpu_state_copy_to_buffer(struct i915_gpu_state *error,
+ char *buf, loff_t offset, size_t count);
+
void __i915_gpu_state_free(struct kref *kref);
static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
{
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 2e242270e270..d447d7d508f4 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2887,21 +2887,39 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
return ret;
}
+static inline u32 gen8_master_intr_disable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return raw_reg_read(regs, GEN8_MASTER_IRQ);
+}
+
+static inline void gen8_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+}
+
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
+ void __iomem * const regs = dev_priv->regs;
u32 master_ctl;
u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
- master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
- master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
- if (!master_ctl)
+ master_ctl = gen8_master_intr_disable(regs);
+ if (!master_ctl) {
+ gen8_master_intr_enable(regs);
return IRQ_NONE;
-
- I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+ }
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
@@ -2913,7 +2931,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(dev_priv);
}
- I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
+ gen8_master_intr_enable(regs);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
@@ -3111,6 +3129,24 @@ gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
intel_opregion_asle_intr(dev_priv);
}
+static inline u32 gen11_master_intr_disable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+
+ /*
+ * Now with master disabled, get a sample of level indications
+ * for this interrupt. Indications will be cleared on related acks.
+ * New indications can and will light up during processing,
+ * and will generate new interrupt after enabling master.
+ */
+ return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+}
+
+static inline void gen11_master_intr_enable(void __iomem * const regs)
+{
+ raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
+}
+
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
@@ -3121,13 +3157,11 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
if (!intel_irqs_enabled(i915))
return IRQ_NONE;
- master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
- master_ctl &= ~GEN11_MASTER_IRQ;
- if (!master_ctl)
+ master_ctl = gen11_master_intr_disable(regs);
+ if (!master_ctl) {
+ gen11_master_intr_enable(regs);
return IRQ_NONE;
-
- /* Disable interrupts. */
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
+ }
/* Find, clear, then process each source of interrupt. */
gen11_gt_irq_handler(i915, master_ctl);
@@ -3147,8 +3181,7 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
- /* Acknowledge and enable interrupts. */
- raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
+ gen11_master_intr_enable(regs);
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
@@ -3598,8 +3631,7 @@ static void gen8_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
int pipe;
- I915_WRITE(GEN8_MASTER_IRQ, 0);
- POSTING_READ(GEN8_MASTER_IRQ);
+ gen8_master_intr_disable(dev_priv->regs);
gen8_gt_irq_reset(dev_priv);
@@ -3641,13 +3673,15 @@ static void gen11_irq_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- I915_WRITE(GEN11_GFX_MSTR_IRQ, 0);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ gen11_master_intr_disable(dev_priv->regs);
gen11_gt_irq_reset(dev_priv);
I915_WRITE(GEN11_DISPLAY_INT_CTL, 0);
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
@@ -4244,8 +4278,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- POSTING_READ(GEN8_MASTER_IRQ);
+ gen8_master_intr_enable(dev_priv->regs);
return 0;
}
@@ -4307,8 +4340,7 @@ static int gen11_irq_postinstall(struct drm_device *dev)
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- I915_WRITE(GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
- POSTING_READ(GEN11_GFX_MSTR_IRQ);
+ gen11_master_intr_enable(dev_priv->regs);
return 0;
}
@@ -4834,6 +4866,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->display_irqs_enabled = false;
dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ /* If we have MST support, we want to avoid doing short HPD IRQ storm
+ * detection, as short HPD storms will occur as a natural part of
+ * sideband messaging with MST.
+ * On older platforms however, IRQ storms can occur with both long and
+ * short pulses, as seen on some G4x systems.
+ */
+ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.c b/drivers/gpu/drm/i915/i915_oa_bdw.c
index 4abd2e8b5083..4acdb94555b7 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.c
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bdw.h b/drivers/gpu/drm/i915/i915_oa_bdw.h
index b812d16162ac..0e667f1a8aa1 100644
--- a/drivers/gpu/drm/i915/i915_oa_bdw.h
+++ b/drivers/gpu/drm/i915/i915_oa_bdw.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_BDW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.c b/drivers/gpu/drm/i915/i915_oa_bxt.c
index cb6f304ec16a..a44195c39923 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.c
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_bxt.h b/drivers/gpu/drm/i915/i915_oa_bxt.h
index 690b963a2383..679e92cf4f1d 100644
--- a/drivers/gpu/drm/i915/i915_oa_bxt.h
+++ b/drivers/gpu/drm/i915/i915_oa_bxt.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_BXT_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.c b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
index 8641ae30e343..7f60d51b8761 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt2.h b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
index 1f3268ef2ea2..4d6025559bbe 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CFLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 792facdb6702..a92c38e3a0ce 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.h b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
index c13b5aac01b9..0697f4077402 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CFLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.c b/drivers/gpu/drm/i915/i915_oa_chv.c
index 556febb2c3c8..71ec889a0114 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.c
+++ b/drivers/gpu/drm/i915/i915_oa_chv.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_chv.h b/drivers/gpu/drm/i915/i915_oa_chv.h
index b9622496979e..0986eae3135f 100644
--- a/drivers/gpu/drm/i915/i915_oa_chv.h
+++ b/drivers/gpu/drm/i915/i915_oa_chv.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CHV_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ba9140c87cc0..5c23d883d6c9 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.h b/drivers/gpu/drm/i915/i915_oa_cnl.h
index fb918b131105..e830a406aff2 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.h
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_CNL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.c b/drivers/gpu/drm/i915/i915_oa_glk.c
index 971db587957c..4bdda66df7d2 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.c
+++ b/drivers/gpu/drm/i915/i915_oa_glk.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_glk.h b/drivers/gpu/drm/i915/i915_oa_glk.h
index 63bd113f4bc9..06dedf991edb 100644
--- a/drivers/gpu/drm/i915/i915_oa_glk.h
+++ b/drivers/gpu/drm/i915/i915_oa_glk.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_GLK_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.c b/drivers/gpu/drm/i915/i915_oa_hsw.c
index 434a9b96d7ab..cc6526fdd2bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.c
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_hsw.h b/drivers/gpu/drm/i915/i915_oa_hsw.h
index 74d03439c157..3d0c870cd0bd 100644
--- a/drivers/gpu/drm/i915/i915_oa_hsw.h
+++ b/drivers/gpu/drm/i915/i915_oa_hsw.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_HSW_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.c b/drivers/gpu/drm/i915/i915_oa_icl.c
index a5667926e3de..baa51427a543 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.c
+++ b/drivers/gpu/drm/i915/i915_oa_icl.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_icl.h b/drivers/gpu/drm/i915/i915_oa_icl.h
index ae1c24aafe4f..24eaa97d61ba 100644
--- a/drivers/gpu/drm/i915/i915_oa_icl.h
+++ b/drivers/gpu/drm/i915/i915_oa_icl.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_ICL_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.c b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
index 2fa98a40bbc8..168e49ab0d4d 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt2.h b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
index 25b803546dc1..a55398a904de 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.c b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
index f3cb6679a1bc..6ffa553c388e 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_kblgt3.h b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
index d5b5b5c1923e..3ddd3483b7cc 100644
--- a/drivers/gpu/drm/i915/i915_oa_kblgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_kblgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_KBLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.c b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
index bf8b8cd8a50d..7ce6ee851d43 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt2.h b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
index fe1aa2c03958..be6256037239 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt2.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt2.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT2_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.c b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
index ae534c7c8135..086ca2631e1c 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt3.h b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
index 06746b2616c8..650beb068e56 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt3.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt3.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT3_H__
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.c b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
index 817fba2d82df..b291a6eb8a87 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.c
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.c
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#include <linux/sysfs.h>
diff --git a/drivers/gpu/drm/i915/i915_oa_sklgt4.h b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
index 944fd525c8b1..8dcf849d131e 100644
--- a/drivers/gpu/drm/i915/i915_oa_sklgt4.h
+++ b/drivers/gpu/drm/i915/i915_oa_sklgt4.h
@@ -1,29 +1,10 @@
/*
- * Autogenerated file by GPU Top : https://github.com/rib/gputop
- * DO NOT EDIT manually!
- *
- *
- * Copyright (c) 2015 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
+ * Copyright © 2018 Intel Corporation
*
+ * Autogenerated file by GPU Top : https://github.com/rib/gputop
+ * DO NOT EDIT manually!
*/
#ifndef __I915_OA_SKLGT4_H__
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 295e981e4a39..2e0356561839 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -82,10 +82,6 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0644,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_ppgtt, int, 0400,
- "Override PPGTT usage. "
- "(-1=auto [default], 0=disabled, 1=aliasing, 2=full, 3=full with extended address space)");
-
i915_param_named_unsafe(enable_psr, int, 0600,
"Enable PSR "
"(0=disabled, 1=enabled) "
@@ -171,8 +167,10 @@ i915_param_named_unsafe(inject_load_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, bool, 0600,
"Enable support for DPCD backlight control (default:false)");
+#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+#endif
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
@@ -188,7 +186,8 @@ static __always_inline void _print_param(struct drm_printer *p,
else if (!__builtin_strcmp(type, "char *"))
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
else
- BUILD_BUG();
+ WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
+ type, name);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 6c4d4a21474b..7e56c516c815 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -41,7 +41,6 @@ struct drm_printer;
param(int, vbt_sdvo_panel_type, -1) \
param(int, enable_dc, -1) \
param(int, enable_fbc, -1) \
- param(int, enable_ppgtt, -1) \
param(int, enable_psr, -1) \
param(int, disable_power_well, -1) \
param(int, enable_ips, 1) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index d6f7b9fe1d26..6350db5503cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -33,19 +33,30 @@
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
#define GEN_DEFAULT_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ }
#define GEN_CHV_PIPEOFFSETS \
- .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
- CHV_PIPE_C_OFFSET }, \
- .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
- CHV_TRANSCODER_C_OFFSET, }, \
- .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
- CHV_PALETTE_C_OFFSET }
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = CHV_PIPE_C_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
+ }
#define CURSOR_OFFSETS \
.cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
@@ -68,8 +79,9 @@
#define GEN2_FEATURES \
GEN(2), \
.num_pipes = 1, \
- .has_overlay = 1, .overlay_needs_physical = 1, \
- .has_gmch_display = 1, \
+ .display.has_overlay = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch_display = 1, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -82,7 +94,8 @@
static const struct intel_device_info intel_i830_info = {
GEN2_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1, .cursor_needs_physical = 1,
+ .is_mobile = 1,
+ .display.cursor_needs_physical = 1,
.num_pipes = 2, /* legal, last one wins */
};
@@ -96,8 +109,8 @@ static const struct intel_device_info intel_i85x_info = {
PLATFORM(INTEL_I85X),
.is_mobile = 1,
.num_pipes = 2, /* legal, last one wins */
- .cursor_needs_physical = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
@@ -108,7 +121,7 @@ static const struct intel_device_info intel_i865g_info = {
#define GEN3_FEATURES \
GEN(3), \
.num_pipes = 2, \
- .has_gmch_display = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -120,8 +133,9 @@ static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915G),
.has_coherent_ggtt = false,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -130,10 +144,11 @@ static const struct intel_device_info intel_i915gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I915GM),
.is_mobile = 1,
- .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -141,8 +156,10 @@ static const struct intel_device_info intel_i915gm_info = {
static const struct intel_device_info intel_i945g_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945G),
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -151,10 +168,12 @@ static const struct intel_device_info intel_i945gm_info = {
GEN3_FEATURES,
PLATFORM(INTEL_I945GM),
.is_mobile = 1,
- .has_hotplug = 1, .cursor_needs_physical = 1,
- .has_overlay = 1, .overlay_needs_physical = 1,
- .supports_tv = 1,
- .has_fbc = 1,
+ .display.has_hotplug = 1,
+ .display.cursor_needs_physical = 1,
+ .display.has_overlay = 1,
+ .display.overlay_needs_physical = 1,
+ .display.supports_tv = 1,
+ .display.has_fbc = 1,
.hws_needs_physical = 1,
.unfenced_needs_alignment = 1,
};
@@ -162,23 +181,23 @@ static const struct intel_device_info intel_i945gm_info = {
static const struct intel_device_info intel_g33_info = {
GEN3_FEATURES,
PLATFORM(INTEL_G33),
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
static const struct intel_device_info intel_pineview_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
- .has_hotplug = 1,
- .has_overlay = 1,
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
};
#define GEN4_FEATURES \
GEN(4), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_gmch_display = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_gmch_display = 1, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -189,7 +208,7 @@ static const struct intel_device_info intel_pineview_info = {
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965G),
- .has_overlay = 1,
+ .display.has_overlay = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -197,9 +216,10 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
GEN4_FEATURES,
PLATFORM(INTEL_I965GM),
- .is_mobile = 1, .has_fbc = 1,
- .has_overlay = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.has_overlay = 1,
+ .display.supports_tv = 1,
.hws_needs_physical = 1,
.has_snoop = false,
};
@@ -213,15 +233,16 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_GM45),
- .is_mobile = 1, .has_fbc = 1,
- .supports_tv = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
+ .display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
};
#define GEN5_FEATURES \
GEN(5), \
.num_pipes = 2, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -239,20 +260,21 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
GEN5_FEATURES,
PLATFORM(INTEL_IRONLAKE),
- .is_mobile = 1, .has_fbc = 1,
+ .is_mobile = 1,
+ .display.has_fbc = 1,
};
#define GEN6_FEATURES \
GEN(6), \
.num_pipes = 2, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_aliasing_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_ALIASING, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
CURSOR_OFFSETS
@@ -290,15 +312,14 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
#define GEN7_FEATURES \
GEN(7), \
.num_pipes = 3, \
- .has_hotplug = 1, \
- .has_fbc = 1, \
+ .display.has_hotplug = 1, \
+ .display.has_fbc = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .has_aliasing_ppgtt = 1, \
- .has_full_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL, \
GEN_DEFAULT_PIPEOFFSETS, \
GEN_DEFAULT_PAGE_SIZES, \
IVB_CURSOR_OFFSETS
@@ -349,10 +370,9 @@ static const struct intel_device_info intel_valleyview_info = {
.num_pipes = 2,
.has_runtime_pm = 1,
.has_rc6 = 1,
- .has_gmch_display = 1,
- .has_hotplug = 1,
- .has_aliasing_ppgtt = 1,
- .has_full_ppgtt = 1,
+ .display.has_gmch_display = 1,
+ .display.has_hotplug = 1,
+ .ppgtt = INTEL_PPGTT_FULL,
.has_snoop = true,
.has_coherent_ggtt = false,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING,
@@ -365,10 +385,10 @@ static const struct intel_device_info intel_valleyview_info = {
#define G75_FEATURES \
GEN7_FEATURES, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_psr = 1, \
- .has_dp_mst = 1, \
+ .display.has_psr = 1, \
+ .display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
.has_runtime_pm = 1
@@ -399,7 +419,7 @@ static const struct intel_device_info intel_haswell_gt3_info = {
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
- .has_full_48bit_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@@ -435,16 +455,15 @@ static const struct intel_device_info intel_cherryview_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
.num_pipes = 3,
- .has_hotplug = 1,
+ .display.has_hotplug = 1,
.is_lp = 1,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
- .has_gmch_display = 1,
- .has_aliasing_ppgtt = 1,
- .has_full_ppgtt = 1,
+ .display.has_gmch_display = 1,
+ .ppgtt = INTEL_PPGTT_FULL,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -465,13 +484,15 @@ static const struct intel_device_info intel_cherryview_info = {
GEN(9), \
GEN9_DEFAULT_PAGE_SIZES, \
.has_logical_ring_preemption = 1, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_guc = 1, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
.ddb_size = 896
#define SKL_PLATFORM \
GEN9_FEATURES, \
+ /* Display WA #0477 WaDisableIPC: skl */ \
+ .display.has_ipc = 0, \
PLATFORM(INTEL_SKYLAKE)
static const struct intel_device_info intel_skylake_gt1_info = {
@@ -502,29 +523,27 @@ static const struct intel_device_info intel_skylake_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
- .has_hotplug = 1, \
+ .display.has_hotplug = 1, \
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
- .has_ddi = 1, \
+ .display.has_ddi = 1, \
.has_fpga_dbg = 1, \
- .has_fbc = 1, \
- .has_psr = 1, \
+ .display.has_fbc = 1, \
+ .display.has_psr = 1, \
.has_runtime_pm = 1, \
.has_pooled_eu = 0, \
- .has_csr = 1, \
+ .display.has_csr = 1, \
.has_rc6 = 1, \
- .has_dp_mst = 1, \
+ .display.has_dp_mst = 1, \
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
- .has_aliasing_ppgtt = 1, \
- .has_full_ppgtt = 1, \
- .has_full_48bit_ppgtt = 1, \
+ .ppgtt = INTEL_PPGTT_FULL_4LVL, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- .has_ipc = 1, \
+ .display.has_ipc = 1, \
GEN9_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -598,6 +617,22 @@ static const struct intel_device_info intel_cannonlake_info = {
#define GEN11_FEATURES \
GEN10_FEATURES, \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ [TRANSCODER_EDP] = PIPE_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ [TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
+ }, \
GEN(11), \
.ddb_size = 2048, \
.has_logical_ring_elsq = 1
@@ -663,7 +698,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
- INTEL_AML_GT2_IDS(&intel_kabylake_gt2_info),
+ INTEL_AML_KBL_GT2_IDS(&intel_kabylake_gt2_info),
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
@@ -671,6 +706,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_WHL_U_GT1_IDS(&intel_coffeelake_gt1_info),
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
+ INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 664b96bb65a3..4529edfdcfc8 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -890,8 +890,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.oa.ops.oa_enable(stream);
/*
* Note: .oa_enable() is expected to re-init the oabuffer and
@@ -1114,8 +1114,8 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n",
dev_priv->perf.oa.period_exponent);
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
+ dev_priv->perf.oa.ops.oa_enable(stream);
oastatus1 = I915_READ(GEN7_OASTATUS1);
}
@@ -1528,8 +1528,6 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto err_unpin;
}
- dev_priv->perf.oa.ops.init_oa_buffer(dev_priv);
-
DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n",
i915_ggtt_offset(dev_priv->perf.oa.oa_buffer.vma),
dev_priv->perf.oa.oa_buffer.vaddr);
@@ -1563,9 +1561,11 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
+
/* PRM:
*
* OA unit is using “crclk” for its functionality. When trunk
@@ -1767,9 +1767,10 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
return 0;
}
-static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
- const struct i915_oa_config *oa_config)
+static int gen8_enable_metric_set(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
int ret;
/*
@@ -1837,10 +1838,10 @@ static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE);
}
-static void gen7_oa_enable(struct drm_i915_private *dev_priv)
+static void gen7_oa_enable(struct i915_perf_stream *stream)
{
- struct i915_gem_context *ctx =
- dev_priv->perf.oa.exclusive_stream->ctx;
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct i915_gem_context *ctx = stream->ctx;
u32 ctx_id = dev_priv->perf.oa.specific_ctx_id;
bool periodic = dev_priv->perf.oa.periodic;
u32 period_exponent = dev_priv->perf.oa.period_exponent;
@@ -1867,8 +1868,9 @@ static void gen7_oa_enable(struct drm_i915_private *dev_priv)
GEN7_OACONTROL_ENABLE);
}
-static void gen8_oa_enable(struct drm_i915_private *dev_priv)
+static void gen8_oa_enable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
u32 report_format = dev_priv->perf.oa.oa_buffer.format;
/*
@@ -1905,7 +1907,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_enable(dev_priv);
+ dev_priv->perf.oa.ops.oa_enable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_start(&dev_priv->perf.oa.poll_check_timer,
@@ -1913,8 +1915,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
HRTIMER_MODE_REL_PINNED);
}
-static void gen7_oa_disable(struct drm_i915_private *dev_priv)
+static void gen7_oa_disable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN7_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
@@ -1922,8 +1926,10 @@ static void gen7_oa_disable(struct drm_i915_private *dev_priv)
DRM_ERROR("wait for OA to be disabled timed out\n");
}
-static void gen8_oa_disable(struct drm_i915_private *dev_priv)
+static void gen8_oa_disable(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+
I915_WRITE(GEN8_OACONTROL, 0);
if (intel_wait_for_register(dev_priv,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
@@ -1943,7 +1949,7 @@ static void i915_oa_stream_disable(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
- dev_priv->perf.oa.ops.oa_disable(dev_priv);
+ dev_priv->perf.oa.ops.oa_disable(stream);
if (dev_priv->perf.oa.periodic)
hrtimer_cancel(&dev_priv->perf.oa.poll_check_timer);
@@ -1998,7 +2004,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return -EINVAL;
}
- if (!dev_priv->perf.oa.ops.init_oa_buffer) {
+ if (!dev_priv->perf.oa.ops.enable_metric_set) {
DRM_DEBUG("OA unit not supported\n");
return -ENODEV;
}
@@ -2092,8 +2098,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
if (ret)
goto err_lock;
- ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
- stream->oa_config);
+ ret = dev_priv->perf.oa.ops.enable_metric_set(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -3387,7 +3392,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.is_valid_mux_reg =
hsw_is_valid_mux_addr;
dev_priv->perf.oa.ops.is_valid_flex_reg = NULL;
- dev_priv->perf.oa.ops.init_oa_buffer = gen7_init_oa_buffer;
dev_priv->perf.oa.ops.enable_metric_set = hsw_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = hsw_disable_metric_set;
dev_priv->perf.oa.ops.oa_enable = gen7_oa_enable;
@@ -3406,7 +3410,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
*/
dev_priv->perf.oa.oa_formats = gen8_plus_oa_formats;
- dev_priv->perf.oa.ops.init_oa_buffer = gen8_init_oa_buffer;
dev_priv->perf.oa.ops.oa_enable = gen8_oa_enable;
dev_priv->perf.oa.ops.oa_disable = gen8_oa_disable;
dev_priv->perf.oa.ops.read = gen8_oa_read;
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 3f502eef2431..6fc4b8eeab42 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -27,8 +27,7 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
slice_length = sizeof(sseu->slice_mask);
subslice_length = sseu->max_slices *
- DIV_ROUND_UP(sseu->max_subslices,
- sizeof(sseu->subslice_mask[0]) * BITS_PER_BYTE);
+ DIV_ROUND_UP(sseu->max_subslices, BITS_PER_BYTE);
eu_length = sseu->max_slices * sseu->max_subslices *
DIV_ROUND_UP(sseu->max_eus_per_subslice, BITS_PER_BYTE);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e31c27e45734..0a7d60509ca7 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -157,20 +157,37 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/*
* Named helper wrappers around _PICK_EVEN() and _PICK().
*/
-#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
-#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
-#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
-#define _MMIO_PLANE(plane, a, b) _MMIO_PIPE(plane, a, b)
-#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
-#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
-#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
-#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
-#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
-#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
-#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
-#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
-#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+#define _PIPE(pipe, a, b) _PICK_EVEN(pipe, a, b)
+#define _PLANE(plane, a, b) _PICK_EVEN(plane, a, b)
+#define _TRANS(tran, a, b) _PICK_EVEN(tran, a, b)
+#define _PORT(port, a, b) _PICK_EVEN(port, a, b)
+#define _PLL(pll, a, b) _PICK_EVEN(pll, a, b)
+
+#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
+#define _MMIO_PLANE(plane, a, b) _MMIO(_PLANE(plane, a, b))
+#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
+#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
+#define _MMIO_PLL(pll, a, b) _MMIO(_PLL(pll, a, b))
+
+#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
+
+#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PICK(pipe, a, b, c))
+#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
+
+/*
+ * Device info offset array based helpers for groups of registers with unevenly
+ * spaced base offsets.
+ */
+#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
+ dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
+ dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
+#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
+ dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
+ dev_priv->info.display_mmio_offset)
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -1631,35 +1648,6 @@ enum i915_power_well_id {
#define PHY_RESERVED (1 << 7)
#define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC)
-#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
-#define CL_POWER_DOWN_ENABLE (1 << 4)
-#define SUS_CLOCK_CONFIG (3 << 0)
-
-#define _ICL_PORT_CL_DW5_A 0x162014
-#define _ICL_PORT_CL_DW5_B 0x6C014
-#define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \
- _ICL_PORT_CL_DW5_B)
-
-#define _CNL_PORT_CL_DW10_A 0x162028
-#define _ICL_PORT_CL_DW10_B 0x6c028
-#define ICL_PORT_CL_DW10(port) _MMIO_PORT(port, \
- _CNL_PORT_CL_DW10_A, \
- _ICL_PORT_CL_DW10_B)
-#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
-#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
-#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
-#define PWR_UP_ALL_LANES (0x0 << 4)
-#define PWR_DOWN_LN_3_2_1 (0xe << 4)
-#define PWR_DOWN_LN_3_2 (0xc << 4)
-#define PWR_DOWN_LN_3 (0x8 << 4)
-#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
-#define PWR_DOWN_LN_1_0 (0x3 << 4)
-#define PWR_DOWN_LN_1 (0x2 << 4)
-#define PWR_DOWN_LN_3_1 (0xa << 4)
-#define PWR_DOWN_LN_3_1_0 (0xb << 4)
-#define PWR_DOWN_LN_MASK (0xf << 4)
-#define PWR_DOWN_LN_SHIFT 4
-
#define _PORT_CL1CM_DW9_A 0x162024
#define _PORT_CL1CM_DW9_BC 0x6C024
#define IREF0RC_OFFSET_SHIFT 8
@@ -1672,13 +1660,6 @@ enum i915_power_well_id {
#define IREF1RC_OFFSET_MASK (0xFF << IREF1RC_OFFSET_SHIFT)
#define BXT_PORT_CL1CM_DW10(phy) _BXT_PHY((phy), _PORT_CL1CM_DW10_BC)
-#define _ICL_PORT_CL_DW12_A 0x162030
-#define _ICL_PORT_CL_DW12_B 0x6C030
-#define ICL_LANE_ENABLE_AUX (1 << 0)
-#define ICL_PORT_CL_DW12(port) _MMIO_PORT((port), \
- _ICL_PORT_CL_DW12_A, \
- _ICL_PORT_CL_DW12_B)
-
#define _PORT_CL1CM_DW28_A 0x162070
#define _PORT_CL1CM_DW28_BC 0x6C070
#define OCL1_POWER_DOWN_EN (1 << 23)
@@ -1691,6 +1672,74 @@ enum i915_power_well_id {
#define OCL2_LDOFUSE_PWR_DIS (1 << 6)
#define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
+/*
+ * CNL/ICL Port/COMBO-PHY Registers
+ */
+#define _ICL_COMBOPHY_A 0x162000
+#define _ICL_COMBOPHY_B 0x6C000
+#define _ICL_COMBOPHY(port) _PICK(port, _ICL_COMBOPHY_A, \
+ _ICL_COMBOPHY_B)
+
+/* CNL/ICL Port CL_DW registers */
+#define _ICL_PORT_CL_DW(dw, port) (_ICL_COMBOPHY(port) + \
+ 4 * (dw))
+
+#define CNL_PORT_CL1CM_DW5 _MMIO(0x162014)
+#define ICL_PORT_CL_DW5(port) _MMIO(_ICL_PORT_CL_DW(5, port))
+#define CL_POWER_DOWN_ENABLE (1 << 4)
+#define SUS_CLOCK_CONFIG (3 << 0)
+
+#define ICL_PORT_CL_DW10(port) _MMIO(_ICL_PORT_CL_DW(10, port))
+#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25)
+#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25
+#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24)
+#define PWR_UP_ALL_LANES (0x0 << 4)
+#define PWR_DOWN_LN_3_2_1 (0xe << 4)
+#define PWR_DOWN_LN_3_2 (0xc << 4)
+#define PWR_DOWN_LN_3 (0x8 << 4)
+#define PWR_DOWN_LN_2_1_0 (0x7 << 4)
+#define PWR_DOWN_LN_1_0 (0x3 << 4)
+#define PWR_DOWN_LN_1 (0x2 << 4)
+#define PWR_DOWN_LN_3_1 (0xa << 4)
+#define PWR_DOWN_LN_3_1_0 (0xb << 4)
+#define PWR_DOWN_LN_MASK (0xf << 4)
+#define PWR_DOWN_LN_SHIFT 4
+
+#define ICL_PORT_CL_DW12(port) _MMIO(_ICL_PORT_CL_DW(12, port))
+#define ICL_LANE_ENABLE_AUX (1 << 0)
+
+/* CNL/ICL Port COMP_DW registers */
+#define _ICL_PORT_COMP 0x100
+#define _ICL_PORT_COMP_DW(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_COMP + 4 * (dw))
+
+#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
+#define ICL_PORT_COMP_DW0(port) _MMIO(_ICL_PORT_COMP_DW(0, port))
+#define COMP_INIT (1 << 31)
+
+#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
+#define ICL_PORT_COMP_DW1(port) _MMIO(_ICL_PORT_COMP_DW(1, port))
+
+#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
+#define ICL_PORT_COMP_DW3(port) _MMIO(_ICL_PORT_COMP_DW(3, port))
+#define PROCESS_INFO_DOT_0 (0 << 26)
+#define PROCESS_INFO_DOT_1 (1 << 26)
+#define PROCESS_INFO_DOT_4 (2 << 26)
+#define PROCESS_INFO_MASK (7 << 26)
+#define PROCESS_INFO_SHIFT 26
+#define VOLTAGE_INFO_0_85V (0 << 24)
+#define VOLTAGE_INFO_0_95V (1 << 24)
+#define VOLTAGE_INFO_1_05V (2 << 24)
+#define VOLTAGE_INFO_MASK (3 << 24)
+#define VOLTAGE_INFO_SHIFT 24
+
+#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
+#define ICL_PORT_COMP_DW9(port) _MMIO(_ICL_PORT_COMP_DW(9, port))
+
+#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
+#define ICL_PORT_COMP_DW10(port) _MMIO(_ICL_PORT_COMP_DW(10, port))
+
+/* CNL/ICL Port PCS registers */
#define _CNL_PORT_PCS_DW1_GRP_AE 0x162304
#define _CNL_PORT_PCS_DW1_GRP_B 0x162384
#define _CNL_PORT_PCS_DW1_GRP_C 0x162B04
@@ -1708,7 +1757,6 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_GRP_D, \
_CNL_PORT_PCS_DW1_GRP_AE, \
_CNL_PORT_PCS_DW1_GRP_F))
-
#define CNL_PORT_PCS_DW1_LN0(port) _MMIO(_PICK(port, \
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_B, \
@@ -1717,24 +1765,21 @@ enum i915_power_well_id {
_CNL_PORT_PCS_DW1_LN0_AE, \
_CNL_PORT_PCS_DW1_LN0_F))
-#define _ICL_PORT_PCS_DW1_GRP_A 0x162604
-#define _ICL_PORT_PCS_DW1_GRP_B 0x6C604
-#define _ICL_PORT_PCS_DW1_LN0_A 0x162804
-#define _ICL_PORT_PCS_DW1_LN0_B 0x6C804
-#define _ICL_PORT_PCS_DW1_AUX_A 0x162304
-#define _ICL_PORT_PCS_DW1_AUX_B 0x6c304
-#define ICL_PORT_PCS_DW1_GRP(port) _MMIO_PORT(port,\
- _ICL_PORT_PCS_DW1_GRP_A, \
- _ICL_PORT_PCS_DW1_GRP_B)
-#define ICL_PORT_PCS_DW1_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_PCS_DW1_LN0_A, \
- _ICL_PORT_PCS_DW1_LN0_B)
-#define ICL_PORT_PCS_DW1_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_PCS_DW1_AUX_A, \
- _ICL_PORT_PCS_DW1_AUX_B)
+#define _ICL_PORT_PCS_AUX 0x300
+#define _ICL_PORT_PCS_GRP 0x600
+#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100)
+#define _ICL_PORT_PCS_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_AUX + 4 * (dw))
+#define _ICL_PORT_PCS_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_GRP + 4 * (dw))
+#define _ICL_PORT_PCS_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_PCS_LN(ln) + 4 * (dw))
+#define ICL_PORT_PCS_DW1_AUX(port) _MMIO(_ICL_PORT_PCS_DW_AUX(1, port))
+#define ICL_PORT_PCS_DW1_GRP(port) _MMIO(_ICL_PORT_PCS_DW_GRP(1, port))
+#define ICL_PORT_PCS_DW1_LN0(port) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, port))
#define COMMON_KEEPER_EN (1 << 26)
-/* CNL Port TX registers */
+/* CNL/ICL Port TX registers */
#define _CNL_PORT_TX_AE_GRP_OFFSET 0x162340
#define _CNL_PORT_TX_B_GRP_OFFSET 0x1623C0
#define _CNL_PORT_TX_C_GRP_OFFSET 0x162B40
@@ -1762,23 +1807,22 @@ enum i915_power_well_id {
_CNL_PORT_TX_F_LN0_OFFSET) + \
4 * (dw))
-#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 2))
-#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 2))
-#define _ICL_PORT_TX_DW2_GRP_A 0x162688
-#define _ICL_PORT_TX_DW2_GRP_B 0x6C688
-#define _ICL_PORT_TX_DW2_LN0_A 0x162888
-#define _ICL_PORT_TX_DW2_LN0_B 0x6C888
-#define _ICL_PORT_TX_DW2_AUX_A 0x162388
-#define _ICL_PORT_TX_DW2_AUX_B 0x6c388
-#define ICL_PORT_TX_DW2_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_GRP_A, \
- _ICL_PORT_TX_DW2_GRP_B)
-#define ICL_PORT_TX_DW2_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_LN0_A, \
- _ICL_PORT_TX_DW2_LN0_B)
-#define ICL_PORT_TX_DW2_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW2_AUX_A, \
- _ICL_PORT_TX_DW2_AUX_B)
+#define _ICL_PORT_TX_AUX 0x380
+#define _ICL_PORT_TX_GRP 0x680
+#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100)
+
+#define _ICL_PORT_TX_DW_AUX(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_AUX + 4 * (dw))
+#define _ICL_PORT_TX_DW_GRP(dw, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_GRP + 4 * (dw))
+#define _ICL_PORT_TX_DW_LN(dw, ln, port) (_ICL_COMBOPHY(port) + \
+ _ICL_PORT_TX_LN(ln) + 4 * (dw))
+
+#define CNL_PORT_TX_DW2_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
+#define CNL_PORT_TX_DW2_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
+#define ICL_PORT_TX_DW2_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(2, port))
+#define ICL_PORT_TX_DW2_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(2, port))
+#define ICL_PORT_TX_DW2_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, port))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -1795,24 +1839,10 @@ enum i915_power_well_id {
#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
-#define _ICL_PORT_TX_DW4_GRP_A 0x162690
-#define _ICL_PORT_TX_DW4_GRP_B 0x6C690
-#define _ICL_PORT_TX_DW4_LN0_A 0x162890
-#define _ICL_PORT_TX_DW4_LN1_A 0x162990
-#define _ICL_PORT_TX_DW4_LN0_B 0x6C890
-#define _ICL_PORT_TX_DW4_AUX_A 0x162390
-#define _ICL_PORT_TX_DW4_AUX_B 0x6c390
-#define ICL_PORT_TX_DW4_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW4_GRP_A, \
- _ICL_PORT_TX_DW4_GRP_B)
-#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_PORT(port, \
- _ICL_PORT_TX_DW4_LN0_A, \
- _ICL_PORT_TX_DW4_LN0_B) + \
- ((ln) * (_ICL_PORT_TX_DW4_LN1_A - \
- _ICL_PORT_TX_DW4_LN0_A)))
-#define ICL_PORT_TX_DW4_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW4_AUX_A, \
- _ICL_PORT_TX_DW4_AUX_B)
+#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
+#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
+#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
+#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1821,23 +1851,11 @@ enum i915_power_well_id {
#define CURSOR_COEFF(x) ((x) << 0)
#define CURSOR_COEFF_MASK (0x3F << 0)
-#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 5))
-#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 5))
-#define _ICL_PORT_TX_DW5_GRP_A 0x162694
-#define _ICL_PORT_TX_DW5_GRP_B 0x6C694
-#define _ICL_PORT_TX_DW5_LN0_A 0x162894
-#define _ICL_PORT_TX_DW5_LN0_B 0x6C894
-#define _ICL_PORT_TX_DW5_AUX_A 0x162394
-#define _ICL_PORT_TX_DW5_AUX_B 0x6c394
-#define ICL_PORT_TX_DW5_GRP(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_GRP_A, \
- _ICL_PORT_TX_DW5_GRP_B)
-#define ICL_PORT_TX_DW5_LN0(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_LN0_A, \
- _ICL_PORT_TX_DW5_LN0_B)
-#define ICL_PORT_TX_DW5_AUX(port) _MMIO_PORT(port, \
- _ICL_PORT_TX_DW5_AUX_A, \
- _ICL_PORT_TX_DW5_AUX_B)
+#define CNL_PORT_TX_DW5_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
+#define CNL_PORT_TX_DW5_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
+#define ICL_PORT_TX_DW5_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(5, port))
+#define ICL_PORT_TX_DW5_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(5, port))
+#define ICL_PORT_TX_DW5_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, port))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -2054,47 +2072,10 @@ enum i915_power_well_id {
#define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC)
#define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28)
-#define CNL_PORT_COMP_DW0 _MMIO(0x162100)
-#define COMP_INIT (1 << 31)
-#define CNL_PORT_COMP_DW1 _MMIO(0x162104)
-#define CNL_PORT_COMP_DW3 _MMIO(0x16210c)
-#define PROCESS_INFO_DOT_0 (0 << 26)
-#define PROCESS_INFO_DOT_1 (1 << 26)
-#define PROCESS_INFO_DOT_4 (2 << 26)
-#define PROCESS_INFO_MASK (7 << 26)
-#define PROCESS_INFO_SHIFT 26
-#define VOLTAGE_INFO_0_85V (0 << 24)
-#define VOLTAGE_INFO_0_95V (1 << 24)
-#define VOLTAGE_INFO_1_05V (2 << 24)
-#define VOLTAGE_INFO_MASK (3 << 24)
-#define VOLTAGE_INFO_SHIFT 24
-#define CNL_PORT_COMP_DW9 _MMIO(0x162124)
-#define CNL_PORT_COMP_DW10 _MMIO(0x162128)
-
-#define _ICL_PORT_COMP_DW0_A 0x162100
-#define _ICL_PORT_COMP_DW0_B 0x6C100
-#define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \
- _ICL_PORT_COMP_DW0_B)
-#define _ICL_PORT_COMP_DW1_A 0x162104
-#define _ICL_PORT_COMP_DW1_B 0x6C104
-#define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \
- _ICL_PORT_COMP_DW1_B)
-#define _ICL_PORT_COMP_DW3_A 0x16210C
-#define _ICL_PORT_COMP_DW3_B 0x6C10C
-#define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \
- _ICL_PORT_COMP_DW3_B)
-#define _ICL_PORT_COMP_DW9_A 0x162124
-#define _ICL_PORT_COMP_DW9_B 0x6C124
-#define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \
- _ICL_PORT_COMP_DW9_B)
-#define _ICL_PORT_COMP_DW10_A 0x162128
-#define _ICL_PORT_COMP_DW10_B 0x6C128
-#define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \
- _ICL_PORT_COMP_DW10_A, \
- _ICL_PORT_COMP_DW10_B)
+#define FIA1_BASE 0x163000
/* ICL PHY DFLEX registers */
-#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
+#define PORT_TX_DFLEXDPMLE1 _MMIO(FIA1_BASE + 0x008C0)
#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
@@ -2417,6 +2398,7 @@ enum i915_power_well_id {
#define GEN8_GAMW_ECO_DEV_RW_IA _MMIO(0x4080)
#define GAMW_ECO_ENABLE_64K_IPS_FIELD 0xF
+#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_L3_COH_PIPE (1 << 31)
@@ -2577,6 +2559,7 @@ enum i915_power_well_id {
/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+#define GEN11_ENABLE_32_PLANE_MODE (1 << 7)
/* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0)
@@ -3479,11 +3462,13 @@ enum i915_power_well_id {
/*
* Palette regs
*/
-#define PALETTE_A_OFFSET 0xa000
-#define PALETTE_B_OFFSET 0xa800
-#define CHV_PALETTE_C_OFFSET 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.palette_offsets[pipe] + \
- dev_priv->info.display_mmio_offset + (i) * 4)
+#define _PALETTE_A 0xa000
+#define _PALETTE_B 0xa800
+#define _CHV_PALETTE_C 0xc000
+#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
+ _PICK((pipe), _PALETTE_A, \
+ _PALETTE_B, _CHV_PALETTE_C) + \
+ (i) * 4)
/* MCH MMIO space */
@@ -4065,15 +4050,27 @@ enum {
#define _VSYNCSHIFT_B 0x61028
#define _PIPE_MULT_B 0x6102c
+/* DSI 0 timing regs */
+#define _HTOTAL_DSI0 0x6b000
+#define _HSYNC_DSI0 0x6b008
+#define _VTOTAL_DSI0 0x6b00c
+#define _VSYNC_DSI0 0x6b014
+#define _VSYNCSHIFT_DSI0 0x6b028
+
+/* DSI 1 timing regs */
+#define _HTOTAL_DSI1 0x6b800
+#define _HSYNC_DSI1 0x6b808
+#define _VTOTAL_DSI1 0x6b80c
+#define _VSYNC_DSI1 0x6b814
+#define _VSYNCSHIFT_DSI1 0x6b828
+
#define TRANSCODER_A_OFFSET 0x60000
#define TRANSCODER_B_OFFSET 0x61000
#define TRANSCODER_C_OFFSET 0x62000
#define CHV_TRANSCODER_C_OFFSET 0x63000
#define TRANSCODER_EDP_OFFSET 0x6f000
-
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
- dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+#define TRANSCODER_DSI0_OFFSET 0x6b000
+#define TRANSCODER_DSI1_OFFSET 0x6b800
#define HTOTAL(trans) _MMIO_TRANS2(trans, _HTOTAL_A)
#define HBLANK(trans) _MMIO_TRANS2(trans, _HBLANK_A)
@@ -4153,9 +4150,13 @@ enum {
/* Bspec claims those aren't shifted but stay at 0x64800 */
#define EDP_PSR_IMR _MMIO(0x64834)
#define EDP_PSR_IIR _MMIO(0x64838)
-#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31))
-#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31))
-#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31))
+#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2))
+#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1))
+#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift))
+#define EDP_PSR_TRANSCODER_C_SHIFT 24
+#define EDP_PSR_TRANSCODER_B_SHIFT 16
+#define EDP_PSR_TRANSCODER_A_SHIFT 8
+#define EDP_PSR_TRANSCODER_EDP_SHIFT 0
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
@@ -4199,7 +4200,7 @@ enum {
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
#define EDP_PSR_DEBUG_MASK_HPD (1 << 25)
-#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16)
+#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */
#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
@@ -4236,7 +4237,7 @@ enum {
#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
-#define PSR_EVENT_REGISTER_UPDATE (1 << 5)
+#define PSR_EVENT_REGISTER_UPDATE (1 << 5) /* Reserved in ICL+ */
#define PSR_EVENT_HDCP_ENABLE (1 << 4)
#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
#define PSR_EVENT_VBI_ENABLE (1 << 2)
@@ -4569,6 +4570,7 @@ enum {
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
#define VIDEO_DIP_VSC_DATA_SIZE 36
+#define VIDEO_DIP_PPS_DATA_SIZE 132
#define VIDEO_DIP_CTL _MMIO(0x61170)
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -4588,6 +4590,15 @@ enum {
#define VIDEO_DIP_FREQ_2VSYNC (2 << 16)
#define VIDEO_DIP_FREQ_MASK (3 << 16)
/* HSW and later: */
+#define DRM_DIP_ENABLE (1 << 28)
+#define PSR_VSC_BIT_7_SET (1 << 27)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
+#define VDIP_ENABLE_PPS (1 << 24)
#define VIDEO_DIP_ENABLE_VSC_HSW (1 << 20)
#define VIDEO_DIP_ENABLE_GCP_HSW (1 << 16)
#define VIDEO_DIP_ENABLE_AVI_HSW (1 << 12)
@@ -4595,16 +4606,6 @@ enum {
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
-#define DRM_DIP_ENABLE (1 << 28)
-#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 25)
-#define VSC_SELECT_SHIFT 25
-#define VSC_DIP_HW_HEA_DATA (0 << 25)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
-#define VSC_DIP_SW_HEA_DATA (3 << 25)
-#define VDIP_ENABLE_PPS (1 << 24)
-
/* Panel power sequencing */
#define PPS_BASE 0x61200
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
@@ -4617,6 +4618,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON (1 << 31)
+
+#define _PP_CONTROL_1 0xc7204
+#define _PP_CONTROL_2 0xc7304
+#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
+ _PP_CONTROL_2)
+#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
+#define POWER_CYCLE_DELAY_SHIFT 4
+#define VDD_OVERRIDE_FORCE (1 << 3)
+#define BACKLIGHT_ENABLE (1 << 2)
+#define PWR_DOWN_ON_RESET (1 << 1)
+#define PWR_STATE_TARGET (1 << 0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -5640,9 +5652,9 @@ enum {
*/
#define PIPE_EDP_OFFSET 0x7f000
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
- dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+/* ICL DSI 0 and 1 */
+#define PIPE_DSI0_OFFSET 0x7b000
+#define PIPE_DSI1_OFFSET 0x7b800
#define PIPECONF(pipe) _MMIO_PIPE2(pipe, _PIPEACONF)
#define PIPEDSL(pipe) _MMIO_PIPE2(pipe, _PIPEADSL)
@@ -6091,10 +6103,6 @@ enum {
#define _CURBBASE_IVB 0x71084
#define _CURBPOS_IVB 0x71088
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
- dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-
#define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR)
#define CURBASE(pipe) _CURSOR2(pipe, _CURABASE)
#define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS)
@@ -6228,6 +6236,10 @@ enum {
#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
+/* ICL DSI 0 and 1 */
+#define _PIPEDSI0CONF 0x7b008
+#define _PIPEDSI1CONF 0x7b808
+
/* Sprite A control */
#define _DVSACNTR 0x72180
#define DVS_ENABLE (1 << 31)
@@ -6515,6 +6527,7 @@ enum {
#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
#define PLANE_CTL_ORDER_BGRX (0 << 20)
#define PLANE_CTL_ORDER_RGBX (1 << 20)
+#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
#define PLANE_CTL_YUV422_YUYV (0 << 16)
@@ -6558,17 +6571,33 @@ enum {
#define _PLANE_KEYVAL_2_A 0x70294
#define _PLANE_KEYMSK_1_A 0x70198
#define _PLANE_KEYMSK_2_A 0x70298
+#define PLANE_KEYMSK_ALPHA_ENABLE (1 << 31)
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
+#define PLANE_KEYMAX_ALPHA(a) ((a) << 24)
#define _PLANE_AUX_DIST_1_A 0x701c0
#define _PLANE_AUX_DIST_2_A 0x702c0
#define _PLANE_AUX_OFFSET_1_A 0x701c4
#define _PLANE_AUX_OFFSET_2_A 0x702c4
+#define _PLANE_CUS_CTL_1_A 0x701c8
+#define _PLANE_CUS_CTL_2_A 0x702c8
+#define PLANE_CUS_ENABLE (1 << 31)
+#define PLANE_CUS_PLANE_6 (0 << 30)
+#define PLANE_CUS_PLANE_7 (1 << 30)
+#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
+#define PLANE_CUS_HPHASE_0 (0 << 16)
+#define PLANE_CUS_HPHASE_0_25 (1 << 16)
+#define PLANE_CUS_HPHASE_0_5 (2 << 16)
+#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15)
+#define PLANE_CUS_VPHASE_0 (0 << 12)
+#define PLANE_CUS_VPHASE_0_25 (1 << 12)
+#define PLANE_CUS_VPHASE_0_5 (2 << 12)
#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709 (1 << 17)
@@ -6585,6 +6614,55 @@ enum {
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+/* Input CSC Register Definitions */
+#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
+#define _PLANE_INPUT_CSC_RY_GY_2_A 0x702E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1_B 0x711E0
+#define _PLANE_INPUT_CSC_RY_GY_2_B 0x712E0
+
+#define _PLANE_INPUT_CSC_RY_GY_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_1_A, \
+ _PLANE_INPUT_CSC_RY_GY_1_B)
+#define _PLANE_INPUT_CSC_RY_GY_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+ _PLANE_INPUT_CSC_RY_GY_2_B)
+
+#define PLANE_INPUT_CSC_COEFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_RY_GY_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_A 0x701F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_A 0x702F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1_B 0x711F8
+#define _PLANE_INPUT_CSC_PREOFF_HI_2_B 0x712F8
+
+#define _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_1_A, \
+ _PLANE_INPUT_CSC_PREOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_PREOFF_HI_2_A, \
+ _PLANE_INPUT_CSC_PREOFF_HI_2_B)
+#define PLANE_INPUT_CSC_PREOFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_PREOFF_HI_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_PREOFF_HI_2(pipe) + (index) * 4)
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_A 0x70204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_A 0x70304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1_B 0x71204
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2_B 0x71304
+
+#define _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_1_A, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_1_B)
+#define _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) \
+ _PIPE(pipe, _PLANE_INPUT_CSC_POSTOFF_HI_2_A, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define PLANE_INPUT_CSC_POSTOFF(pipe, plane, index) \
+ _MMIO_PLANE(plane, _PLANE_INPUT_CSC_POSTOFF_HI_1(pipe) + (index) * 4, \
+ _PLANE_INPUT_CSC_POSTOFF_HI_2(pipe) + (index) * 4)
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
@@ -6701,6 +6779,15 @@ enum {
#define PLANE_AUX_OFFSET(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_AUX_OFFSET_1(pipe), _PLANE_AUX_OFFSET_2(pipe))
+#define _PLANE_CUS_CTL_1_B 0x711c8
+#define _PLANE_CUS_CTL_2_B 0x712c8
+#define _PLANE_CUS_CTL_1(pipe) \
+ _PIPE(pipe, _PLANE_CUS_CTL_1_A, _PLANE_CUS_CTL_1_B)
+#define _PLANE_CUS_CTL_2(pipe) \
+ _PIPE(pipe, _PLANE_CUS_CTL_2_A, _PLANE_CUS_CTL_2_B)
+#define PLANE_CUS_CTL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_CUS_CTL_1(pipe), _PLANE_CUS_CTL_2(pipe))
+
#define _PLANE_COLOR_CTL_1_B 0x711CC
#define _PLANE_COLOR_CTL_2_B 0x712CC
#define _PLANE_COLOR_CTL_3_B 0x713CC
@@ -6854,11 +6941,12 @@ enum {
#define _PS_2B_CTRL 0x68A80
#define _PS_1C_CTRL 0x69180
#define PS_SCALER_EN (1 << 31)
-#define PS_SCALER_MODE_MASK (3 << 28)
-#define PS_SCALER_MODE_DYN (0 << 28)
-#define PS_SCALER_MODE_HQ (1 << 28)
+#define SKL_PS_SCALER_MODE_MASK (3 << 28)
+#define SKL_PS_SCALER_MODE_DYN (0 << 28)
+#define SKL_PS_SCALER_MODE_HQ (1 << 28)
#define SKL_PS_SCALER_MODE_NV12 (2 << 28)
#define PS_SCALER_MODE_PLANAR (1 << 29)
+#define PS_SCALER_MODE_NORMAL (0 << 29)
#define PS_PLANE_SEL_MASK (7 << 25)
#define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
#define PS_FILTER_MASK (3 << 23)
@@ -6875,6 +6963,8 @@ enum {
#define PS_VADAPT_MODE_LEAST_ADAPT (0 << 5)
#define PS_VADAPT_MODE_MOD_ADAPT (1 << 5)
#define PS_VADAPT_MODE_MOST_ADAPT (3 << 5)
+#define PS_PLANE_Y_SEL_MASK (7 << 5)
+#define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
#define _PS_PWR_GATE_1A 0x68160
#define _PS_PWR_GATE_2A 0x68260
@@ -7321,9 +7411,10 @@ enum {
#define BDW_DPRS_MASK_VBLANK_SRD (1 << 0)
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define CHICKEN_TRANS_A 0x420c0
-#define CHICKEN_TRANS_B 0x420c4
-#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define CHICKEN_TRANS_A _MMIO(0x420c0)
+#define CHICKEN_TRANS_B _MMIO(0x420c4)
+#define CHICKEN_TRANS_C _MMIO(0x420c8)
+#define CHICKEN_TRANS_EDP _MMIO(0x420cc)
#define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
#define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
#define DDI_TRAINING_OVERRIDE_VALUE (1 << 18)
@@ -7413,6 +7504,10 @@ enum {
#define GEN9_SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731c)
#define GEN11_STATE_CACHE_REDIRECT_TO_CS (1 << 11)
+#define GEN7_SARCHKMD _MMIO(0xB000)
+#define GEN7_DISABLE_DEMAND_PREFETCH (1 << 31)
+#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
+
#define GEN7_L3SQCREG1 _MMIO(0xB010)
#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
@@ -7667,6 +7762,7 @@ enum {
#define ICP_DDIB_HPD_LONG_DETECT (2 << 4)
#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4)
#define ICP_DDIA_HPD_ENABLE (1 << 3)
+#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2)
#define ICP_DDIA_HPD_STATUS_MASK (3 << 0)
#define ICP_DDIA_HPD_NO_DETECT (0 << 0)
#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0)
@@ -7828,8 +7924,7 @@ enum {
#define CNP_RAWCLK_DIV_MASK (0x3ff << 16)
#define CNP_RAWCLK_DIV(div) ((div) << 16)
#define CNP_RAWCLK_FRAC_MASK (0xf << 26)
-#define CNP_RAWCLK_FRAC(frac) ((frac) << 26)
-#define ICP_RAWCLK_DEN(den) ((den) << 26)
+#define CNP_RAWCLK_DEN(den) ((den) << 26)
#define ICP_RAWCLK_NUM(num) ((num) << 11)
#define PCH_DPLL_TMR_CFG _MMIO(0xc6208)
@@ -8629,8 +8724,7 @@ enum {
#define GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC (1 << 9)
#define GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC (1 << 7)
-#define GAMW_ECO_DEV_RW_IA_REG _MMIO(0x4080)
-#define GAMW_ECO_DEV_CTX_RELOAD_DISABLE (1 << 7)
+#define GEN10_SAMPLER_MODE _MMIO(0xE18C)
/* IVYBRIDGE DPF */
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
@@ -8931,6 +9025,15 @@ enum skl_power_gate {
#define CNL_AUX_ANAOVRD1_ENABLE (1 << 16)
#define CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
+#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
+#define _ICL_AUX_ANAOVRD1_A 0x162398
+#define _ICL_AUX_ANAOVRD1_B 0x6C398
+#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
+ _ICL_AUX_ANAOVRD1_A, \
+ _ICL_AUX_ANAOVRD1_B))
+#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
+#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
+
/* HDCP Key Registers */
#define HDCP_KEY_CONF _MMIO(0x66c00)
#define HDCP_AKSV_SEND_TRIGGER BIT(31)
@@ -9013,11 +9116,45 @@ enum skl_power_gate {
#define HDCP_STATUS_CIPHER BIT(16)
#define HDCP_STATUS_FRAME_CNT(x) (((x) >> 8) & 0xff)
+/* HDCP2.2 Registers */
+#define _PORTA_HDCP2_BASE 0x66800
+#define _PORTB_HDCP2_BASE 0x66500
+#define _PORTC_HDCP2_BASE 0x66600
+#define _PORTD_HDCP2_BASE 0x66700
+#define _PORTE_HDCP2_BASE 0x66A00
+#define _PORTF_HDCP2_BASE 0x66900
+#define _PORT_HDCP2_BASE(port, x) _MMIO(_PICK((port), \
+ _PORTA_HDCP2_BASE, \
+ _PORTB_HDCP2_BASE, \
+ _PORTC_HDCP2_BASE, \
+ _PORTD_HDCP2_BASE, \
+ _PORTE_HDCP2_BASE, \
+ _PORTF_HDCP2_BASE) + (x))
+
+#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98)
+#define AUTH_LINK_AUTHENTICATED BIT(31)
+#define AUTH_LINK_TYPE BIT(30)
+#define AUTH_FORCE_CLR_INPUTCTR BIT(19)
+#define AUTH_CLR_KEYS BIT(18)
+
+#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0)
+#define CTL_LINK_ENCRYPTION_REQ BIT(31)
+
+#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4)
+#define STREAM_ENCRYPTION_STATUS_A BIT(31)
+#define STREAM_ENCRYPTION_STATUS_B BIT(30)
+#define STREAM_ENCRYPTION_STATUS_C BIT(29)
+#define LINK_TYPE_STATUS BIT(22)
+#define LINK_AUTH_STATUS BIT(21)
+#define LINK_ENCRYPTION_STATUS BIT(20)
+
/* Per-pipe DDI Function Control */
#define _TRANS_DDI_FUNC_CTL_A 0x60400
#define _TRANS_DDI_FUNC_CTL_B 0x61400
#define _TRANS_DDI_FUNC_CTL_C 0x62400
#define _TRANS_DDI_FUNC_CTL_EDP 0x6F400
+#define _TRANS_DDI_FUNC_CTL_DSI0 0x6b400
+#define _TRANS_DDI_FUNC_CTL_DSI1 0x6bc00
#define TRANS_DDI_FUNC_CTL(tran) _MMIO_TRANS2(tran, _TRANS_DDI_FUNC_CTL_A)
#define TRANS_DDI_FUNC_ENABLE (1 << 31)
@@ -9055,11 +9192,25 @@ enum skl_power_gate {
| TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
| TRANS_DDI_HDMI_SCRAMBLING)
+#define _TRANS_DDI_FUNC_CTL2_A 0x60404
+#define _TRANS_DDI_FUNC_CTL2_B 0x61404
+#define _TRANS_DDI_FUNC_CTL2_C 0x62404
+#define _TRANS_DDI_FUNC_CTL2_EDP 0x6f404
+#define _TRANS_DDI_FUNC_CTL2_DSI0 0x6b404
+#define _TRANS_DDI_FUNC_CTL2_DSI1 0x6bc04
+#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
+ _TRANS_DDI_FUNC_CTL2_A)
+#define PORT_SYNC_MODE_ENABLE (1 << 4)
+#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
+#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
+#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
+
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
#define _DP_TP_CTL_B 0x64140
#define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B)
#define DP_TP_CTL_ENABLE (1 << 31)
+#define DP_TP_CTL_FEC_ENABLE (1 << 30)
#define DP_TP_CTL_MODE_SST (0 << 27)
#define DP_TP_CTL_MODE_MST (1 << 27)
#define DP_TP_CTL_FORCE_ACT (1 << 25)
@@ -9078,6 +9229,7 @@ enum skl_power_gate {
#define _DP_TP_STATUS_A 0x64044
#define _DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B)
+#define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28)
#define DP_TP_STATUS_IDLE_DONE (1 << 25)
#define DP_TP_STATUS_ACT_SENT (1 << 24)
#define DP_TP_STATUS_MODE_STATUS_MST (1 << 23)
@@ -9226,6 +9378,8 @@ enum skl_power_gate {
#define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1 << 0)
+#define TRANS_MSA_SAMPLING_444 (2 << 1)
+#define TRANS_MSA_CLRSP_YCBCR (2 << 3)
#define TRANS_MSA_6_BPC (0 << 5)
#define TRANS_MSA_8_BPC (1 << 5)
#define TRANS_MSA_10_BPC (2 << 5)
@@ -9793,6 +9947,10 @@ enum skl_power_gate {
#define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+/* Gen11 DSI */
+#define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \
+ dsi0, dsi1)
+
#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
@@ -9956,6 +10114,39 @@ enum skl_power_gate {
_ICL_DSI_IO_MODECTL_1)
#define COMBO_PHY_MODE_DSI (1 << 0)
+/* Display Stream Splitter Control */
+#define DSS_CTL1 _MMIO(0x67400)
+#define SPLITTER_ENABLE (1 << 31)
+#define JOINER_ENABLE (1 << 30)
+#define DUAL_LINK_MODE_INTERLEAVE (1 << 24)
+#define DUAL_LINK_MODE_FRONTBACK (0 << 24)
+#define OVERLAP_PIXELS_MASK (0xf << 16)
+#define OVERLAP_PIXELS(pixels) ((pixels) << 16)
+#define LEFT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define LEFT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+#define MAX_DL_BUFFER_TARGET_DEPTH 0x5a0
+
+#define DSS_CTL2 _MMIO(0x67404)
+#define LEFT_BRANCH_VDSC_ENABLE (1 << 31)
+#define RIGHT_BRANCH_VDSC_ENABLE (1 << 15)
+#define RIGHT_DL_BUF_TARGET_DEPTH_MASK (0xfff << 0)
+#define RIGHT_DL_BUF_TARGET_DEPTH(pixels) ((pixels) << 0)
+
+#define _ICL_PIPE_DSS_CTL1_PB 0x78200
+#define _ICL_PIPE_DSS_CTL1_PC 0x78400
+#define ICL_PIPE_DSS_CTL1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL1_PB, \
+ _ICL_PIPE_DSS_CTL1_PC)
+#define BIG_JOINER_ENABLE (1 << 29)
+#define MASTER_BIG_JOINER_ENABLE (1 << 28)
+#define VGA_CENTERING_ENABLE (1 << 27)
+
+#define _ICL_PIPE_DSS_CTL2_PB 0x78204
+#define _ICL_PIPE_DSS_CTL2_PC 0x78404
+#define ICL_PIPE_DSS_CTL2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
+ _ICL_PIPE_DSS_CTL2_PB, \
+ _ICL_PIPE_DSS_CTL2_PC)
+
#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
#define STAP_SELECT (1 << 0)
@@ -10292,6 +10483,235 @@ enum skl_power_gate {
_ICL_DSI_T_INIT_MASTER_0,\
_ICL_DSI_T_INIT_MASTER_1)
+#define _DPHY_CLK_TIMING_PARAM_0 0x162180
+#define _DPHY_CLK_TIMING_PARAM_1 0x6c180
+#define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_CLK_TIMING_PARAM_0,\
+ _DPHY_CLK_TIMING_PARAM_1)
+#define _DSI_CLK_TIMING_PARAM_0 0x6b080
+#define _DSI_CLK_TIMING_PARAM_1 0x6b880
+#define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_CLK_TIMING_PARAM_0,\
+ _DSI_CLK_TIMING_PARAM_1)
+#define CLK_PREPARE_OVERRIDE (1 << 31)
+#define CLK_PREPARE(x) ((x) << 28)
+#define CLK_PREPARE_MASK (0x7 << 28)
+#define CLK_PREPARE_SHIFT 28
+#define CLK_ZERO_OVERRIDE (1 << 27)
+#define CLK_ZERO(x) ((x) << 20)
+#define CLK_ZERO_MASK (0xf << 20)
+#define CLK_ZERO_SHIFT 20
+#define CLK_PRE_OVERRIDE (1 << 19)
+#define CLK_PRE(x) ((x) << 16)
+#define CLK_PRE_MASK (0x3 << 16)
+#define CLK_PRE_SHIFT 16
+#define CLK_POST_OVERRIDE (1 << 15)
+#define CLK_POST(x) ((x) << 8)
+#define CLK_POST_MASK (0x7 << 8)
+#define CLK_POST_SHIFT 8
+#define CLK_TRAIL_OVERRIDE (1 << 7)
+#define CLK_TRAIL(x) ((x) << 0)
+#define CLK_TRAIL_MASK (0xf << 0)
+#define CLK_TRAIL_SHIFT 0
+
+#define _DPHY_DATA_TIMING_PARAM_0 0x162184
+#define _DPHY_DATA_TIMING_PARAM_1 0x6c184
+#define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_DATA_TIMING_PARAM_0,\
+ _DPHY_DATA_TIMING_PARAM_1)
+#define _DSI_DATA_TIMING_PARAM_0 0x6B084
+#define _DSI_DATA_TIMING_PARAM_1 0x6B884
+#define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_DATA_TIMING_PARAM_0,\
+ _DSI_DATA_TIMING_PARAM_1)
+#define HS_PREPARE_OVERRIDE (1 << 31)
+#define HS_PREPARE(x) ((x) << 24)
+#define HS_PREPARE_MASK (0x7 << 24)
+#define HS_PREPARE_SHIFT 24
+#define HS_ZERO_OVERRIDE (1 << 23)
+#define HS_ZERO(x) ((x) << 16)
+#define HS_ZERO_MASK (0xf << 16)
+#define HS_ZERO_SHIFT 16
+#define HS_TRAIL_OVERRIDE (1 << 15)
+#define HS_TRAIL(x) ((x) << 8)
+#define HS_TRAIL_MASK (0x7 << 8)
+#define HS_TRAIL_SHIFT 8
+#define HS_EXIT_OVERRIDE (1 << 7)
+#define HS_EXIT(x) ((x) << 0)
+#define HS_EXIT_MASK (0x7 << 0)
+#define HS_EXIT_SHIFT 0
+
+#define _DPHY_TA_TIMING_PARAM_0 0x162188
+#define _DPHY_TA_TIMING_PARAM_1 0x6c188
+#define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DPHY_TA_TIMING_PARAM_0,\
+ _DPHY_TA_TIMING_PARAM_1)
+#define _DSI_TA_TIMING_PARAM_0 0x6b098
+#define _DSI_TA_TIMING_PARAM_1 0x6b898
+#define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \
+ _DSI_TA_TIMING_PARAM_0,\
+ _DSI_TA_TIMING_PARAM_1)
+#define TA_SURE_OVERRIDE (1 << 31)
+#define TA_SURE(x) ((x) << 16)
+#define TA_SURE_MASK (0x1f << 16)
+#define TA_SURE_SHIFT 16
+#define TA_GO_OVERRIDE (1 << 15)
+#define TA_GO(x) ((x) << 8)
+#define TA_GO_MASK (0xf << 8)
+#define TA_GO_SHIFT 8
+#define TA_GET_OVERRIDE (1 << 7)
+#define TA_GET(x) ((x) << 0)
+#define TA_GET_MASK (0xf << 0)
+#define TA_GET_SHIFT 0
+
+/* DSI transcoder configuration */
+#define _DSI_TRANS_FUNC_CONF_0 0x6b030
+#define _DSI_TRANS_FUNC_CONF_1 0x6b830
+#define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \
+ _DSI_TRANS_FUNC_CONF_0,\
+ _DSI_TRANS_FUNC_CONF_1)
+#define OP_MODE_MASK (0x3 << 28)
+#define OP_MODE_SHIFT 28
+#define CMD_MODE_NO_GATE (0x0 << 28)
+#define CMD_MODE_TE_GATE (0x1 << 28)
+#define VIDEO_MODE_SYNC_EVENT (0x2 << 28)
+#define VIDEO_MODE_SYNC_PULSE (0x3 << 28)
+#define LINK_READY (1 << 20)
+#define PIX_FMT_MASK (0x3 << 16)
+#define PIX_FMT_SHIFT 16
+#define PIX_FMT_RGB565 (0x0 << 16)
+#define PIX_FMT_RGB666_PACKED (0x1 << 16)
+#define PIX_FMT_RGB666_LOOSE (0x2 << 16)
+#define PIX_FMT_RGB888 (0x3 << 16)
+#define PIX_FMT_RGB101010 (0x4 << 16)
+#define PIX_FMT_RGB121212 (0x5 << 16)
+#define PIX_FMT_COMPRESSED (0x6 << 16)
+#define BGR_TRANSMISSION (1 << 15)
+#define PIX_VIRT_CHAN(x) ((x) << 12)
+#define PIX_VIRT_CHAN_MASK (0x3 << 12)
+#define PIX_VIRT_CHAN_SHIFT 12
+#define PIX_BUF_THRESHOLD_MASK (0x3 << 10)
+#define PIX_BUF_THRESHOLD_SHIFT 10
+#define PIX_BUF_THRESHOLD_1_4 (0x0 << 10)
+#define PIX_BUF_THRESHOLD_1_2 (0x1 << 10)
+#define PIX_BUF_THRESHOLD_3_4 (0x2 << 10)
+#define PIX_BUF_THRESHOLD_FULL (0x3 << 10)
+#define CONTINUOUS_CLK_MASK (0x3 << 8)
+#define CONTINUOUS_CLK_SHIFT 8
+#define CLK_ENTER_LP_AFTER_DATA (0x0 << 8)
+#define CLK_HS_OR_LP (0x2 << 8)
+#define CLK_HS_CONTINUOUS (0x3 << 8)
+#define LINK_CALIBRATION_MASK (0x3 << 4)
+#define LINK_CALIBRATION_SHIFT 4
+#define CALIBRATION_DISABLED (0x0 << 4)
+#define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4)
+#define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4)
+#define S3D_ORIENTATION_LANDSCAPE (1 << 1)
+#define EOTP_DISABLED (1 << 0)
+
+#define _DSI_CMD_RXCTL_0 0x6b0d4
+#define _DSI_CMD_RXCTL_1 0x6b8d4
+#define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_RXCTL_0,\
+ _DSI_CMD_RXCTL_1)
+#define READ_UNLOADS_DW (1 << 16)
+#define RECEIVED_UNASSIGNED_TRIGGER (1 << 15)
+#define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14)
+#define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13)
+#define RECEIVED_RESET_TRIGGER (1 << 12)
+#define RECEIVED_PAYLOAD_WAS_LOST (1 << 11)
+#define RECEIVED_CRC_WAS_LOST (1 << 10)
+#define NUMBER_RX_PLOAD_DW_MASK (0xff << 0)
+#define NUMBER_RX_PLOAD_DW_SHIFT 0
+
+#define _DSI_CMD_TXCTL_0 0x6b0d0
+#define _DSI_CMD_TXCTL_1 0x6b8d0
+#define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXCTL_0,\
+ _DSI_CMD_TXCTL_1)
+#define KEEP_LINK_IN_HS (1 << 24)
+#define FREE_HEADER_CREDIT_MASK (0x1f << 8)
+#define FREE_HEADER_CREDIT_SHIFT 0x8
+#define FREE_PLOAD_CREDIT_MASK (0xff << 0)
+#define FREE_PLOAD_CREDIT_SHIFT 0
+#define MAX_HEADER_CREDIT 0x10
+#define MAX_PLOAD_CREDIT 0x40
+
+#define _DSI_CMD_TXHDR_0 0x6b100
+#define _DSI_CMD_TXHDR_1 0x6b900
+#define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXHDR_0,\
+ _DSI_CMD_TXHDR_1)
+#define PAYLOAD_PRESENT (1 << 31)
+#define LP_DATA_TRANSFER (1 << 30)
+#define VBLANK_FENCE (1 << 29)
+#define PARAM_WC_MASK (0xffff << 8)
+#define PARAM_WC_LOWER_SHIFT 8
+#define PARAM_WC_UPPER_SHIFT 16
+#define VC_MASK (0x3 << 6)
+#define VC_SHIFT 6
+#define DT_MASK (0x3f << 0)
+#define DT_SHIFT 0
+
+#define _DSI_CMD_TXPYLD_0 0x6b104
+#define _DSI_CMD_TXPYLD_1 0x6b904
+#define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \
+ _DSI_CMD_TXPYLD_0,\
+ _DSI_CMD_TXPYLD_1)
+
+#define _DSI_LP_MSG_0 0x6b0d8
+#define _DSI_LP_MSG_1 0x6b8d8
+#define DSI_LP_MSG(tc) _MMIO_DSI(tc, \
+ _DSI_LP_MSG_0,\
+ _DSI_LP_MSG_1)
+#define LPTX_IN_PROGRESS (1 << 17)
+#define LINK_IN_ULPS (1 << 16)
+#define LINK_ULPS_TYPE_LP11 (1 << 8)
+#define LINK_ENTER_ULPS (1 << 0)
+
+/* DSI timeout registers */
+#define _DSI_HSTX_TO_0 0x6b044
+#define _DSI_HSTX_TO_1 0x6b844
+#define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \
+ _DSI_HSTX_TO_0,\
+ _DSI_HSTX_TO_1)
+#define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define HSTX_TIMEOUT_VALUE_SHIFT 16
+#define HSTX_TIMEOUT_VALUE(x) ((x) << 16)
+#define HSTX_TIMED_OUT (1 << 0)
+
+#define _DSI_LPRX_HOST_TO_0 0x6b048
+#define _DSI_LPRX_HOST_TO_1 0x6b848
+#define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \
+ _DSI_LPRX_HOST_TO_0,\
+ _DSI_LPRX_HOST_TO_1)
+#define LPRX_TIMED_OUT (1 << 16)
+#define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define LPRX_TIMEOUT_VALUE_SHIFT 0
+#define LPRX_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_PWAIT_TO_0 0x6b040
+#define _DSI_PWAIT_TO_1 0x6b840
+#define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \
+ _DSI_PWAIT_TO_0,\
+ _DSI_PWAIT_TO_1)
+#define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16)
+#define PRESET_TIMEOUT_VALUE_SHIFT 16
+#define PRESET_TIMEOUT_VALUE(x) ((x) << 16)
+#define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define PRESPONSE_TIMEOUT_VALUE_SHIFT 0
+#define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0)
+
+#define _DSI_TA_TO_0 0x6b04c
+#define _DSI_TA_TO_1 0x6b84c
+#define DSI_TA_TO(tc) _MMIO_DSI(tc, \
+ _DSI_TA_TO_0,\
+ _DSI_TA_TO_1)
+#define TA_TIMED_OUT (1 << 16)
+#define TA_TIMEOUT_VALUE_MASK (0xffff << 0)
+#define TA_TIMEOUT_VALUE_SHIFT 0
+#define TA_TIMEOUT_VALUE(x) ((x) << 0)
+
/* bits 31:0 */
#define _MIPIA_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb084)
#define _MIPIC_DBI_BW_CTRL (dev_priv->mipi_mmio_base + 0xb884)
@@ -10404,10 +10824,6 @@ enum skl_power_gate {
#define MIPI_READ_DATA_VALID(port) _MMIO_MIPI(port, _MIPIA_READ_DATA_VALID, _MIPIC_READ_DATA_VALID)
#define READ_DATA_VALID(n) (1 << (n))
-/* For UMS only (deprecated): */
-#define _PALETTE_A (dev_priv->info.display_mmio_offset + 0xa000)
-#define _PALETTE_B (dev_priv->info.display_mmio_offset + 0xa800)
-
/* MOCS (Memory Object Control State) registers */
#define GEN9_LNCFCMOCS(i) _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
@@ -10693,6 +11109,7 @@ enum skl_power_gate {
#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
+#define DSC_SLICE_ROW_PER_FRAME(slice_row_per_frame) ((slice_row_per_frame) << 20)
#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
#define DSC_SLICE_CHUNK_SIZE(slice_chunk_size) ((slice_chunk_size) << 0)
@@ -10747,17 +11164,17 @@ enum skl_power_gate {
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
-#define PORT_TX_DFLEXDPSP _MMIO(0x1638A0)
+#define PORT_TX_DFLEXDPSP _MMIO(FIA1_BASE + 0x008A0)
#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6))
#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5))
#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8)
#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8))
#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8))
-#define PORT_TX_DFLEXDPPMS _MMIO(0x163890)
+#define PORT_TX_DFLEXDPPMS _MMIO(FIA1_BASE + 0x00890)
#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port))
-#define PORT_TX_DFLEXDPCSSS _MMIO(0x163894)
+#define PORT_TX_DFLEXDPCSSS _MMIO(FIA1_BASE + 0x00894)
#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port))
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index a492385b2089..ca95ab2f4cfa 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -111,91 +111,6 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
-{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
-}
-
-static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
-{
- kmem_cache_free(i915->dependencies, dep);
-}
-
-static void
-__i915_sched_node_add_dependency(struct i915_sched_node *node,
- struct i915_sched_node *signal,
- struct i915_dependency *dep,
- unsigned long flags)
-{
- INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
- dep->signaler = signal;
- dep->flags = flags;
-}
-
-static int
-i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
- struct i915_sched_node *signal)
-{
- struct i915_dependency *dep;
-
- dep = i915_dependency_alloc(i915);
- if (!dep)
- return -ENOMEM;
-
- __i915_sched_node_add_dependency(node, signal, dep,
- I915_DEPENDENCY_ALLOC);
- return 0;
-}
-
-static void
-i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
-{
- struct i915_dependency *dep, *tmp;
-
- GEM_BUG_ON(!list_empty(&node->link));
-
- /*
- * Everyone we depended upon (the fences we wait to be signaled)
- * should retire before us and remove themselves from our list.
- * However, retirement is run independently on each timeline and
- * so we may be called out-of-order.
- */
- list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
- GEM_BUG_ON(!i915_sched_node_signaled(dep->signaler));
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->wait_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
- }
-
- /* Remove ourselves from everyone who depends upon us */
- list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
- GEM_BUG_ON(dep->signaler != node);
- GEM_BUG_ON(!list_empty(&dep->dfs_link));
-
- list_del(&dep->signal_link);
- if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
- }
-}
-
-static void
-i915_sched_node_init(struct i915_sched_node *node)
-{
- INIT_LIST_HEAD(&node->signalers_list);
- INIT_LIST_HEAD(&node->waiters_list);
- INIT_LIST_HEAD(&node->link);
- node->attr.priority = I915_PRIORITY_INVALID;
-}
-
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
struct intel_engine_cs *engine;
@@ -221,6 +136,11 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
intel_engine_get_seqno(engine),
seqno);
+ if (seqno == engine->timeline.seqno)
+ continue;
+
+ kthread_park(engine->breadcrumbs.signaler);
+
if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
/* Flush any waiters before we reuse the seqno */
intel_engine_disarm_breadcrumbs(engine);
@@ -235,6 +155,8 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
intel_engine_init_global_seqno(engine, seqno);
engine->timeline.seqno = seqno;
+
+ kthread_unpark(engine->breadcrumbs.signaler);
}
list_for_each_entry(timeline, &i915->gt.timelines, link)
@@ -740,17 +662,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (rq)
cond_synchronize_rcu(rq->rcustate);
- /*
- * We've forced the client to stall and catch up with whatever
- * backlog there might have been. As we are assuming that we
- * caused the mempressure, now is an opportune time to
- * recover as much memory from the request pool as is possible.
- * Having already penalized the client to stall, we spend
- * a little extra time to re-optimise page allocation.
- */
- kmem_cache_shrink(i915->requests);
- rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */
-
rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
if (!rq) {
ret = -ENOMEM;
@@ -1127,8 +1038,20 @@ void i915_request_add(struct i915_request *request)
*/
local_bh_disable();
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
- if (engine->schedule)
- engine->schedule(request, &request->gem_context->sched);
+ if (engine->schedule) {
+ struct i915_sched_attr attr = request->gem_context->sched;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (!prev || i915_request_completed(prev))
+ attr.priority |= I915_PRIORITY_NEWCLIENT;
+
+ engine->schedule(request, &attr);
+ }
rcu_read_unlock();
i915_sw_fence_commit(&request->submit);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
@@ -1310,6 +1233,8 @@ long i915_request_wait(struct i915_request *rq,
add_wait_queue(errq, &reset);
intel_wait_init(&wait);
+ if (flags & I915_WAIT_PRIORITY)
+ i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
restart:
do {
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 7fa94b024968..90e9d170a0cd 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -277,8 +277,9 @@ long i915_request_wait(struct i915_request *rq,
__attribute__((nonnull(1)));
#define I915_WAIT_INTERRUPTIBLE BIT(0)
#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */
-#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */
-#define I915_WAIT_FOR_IDLE_BOOST BIT(3)
+#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */
+#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
+#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
u32 seqno);
@@ -332,14 +333,6 @@ static inline bool i915_request_completed(const struct i915_request *rq)
return __i915_request_completed(rq, seqno);
}
-static inline bool i915_sched_node_signaled(const struct i915_sched_node *node)
-{
- const struct i915_request *rq =
- container_of(node, const struct i915_request, sched);
-
- return i915_request_completed(rq);
-}
-
void i915_retire_requests(struct drm_i915_private *i915);
/*
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
new file mode 100644
index 000000000000..340faea6c08a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -0,0 +1,399 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/mutex.h>
+
+#include "i915_drv.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+
+static DEFINE_SPINLOCK(schedule_lock);
+
+static const struct i915_request *
+node_to_request(const struct i915_sched_node *node)
+{
+ return container_of(node, const struct i915_request, sched);
+}
+
+static inline bool node_signaled(const struct i915_sched_node *node)
+{
+ return i915_request_completed(node_to_request(node));
+}
+
+void i915_sched_node_init(struct i915_sched_node *node)
+{
+ INIT_LIST_HEAD(&node->signalers_list);
+ INIT_LIST_HEAD(&node->waiters_list);
+ INIT_LIST_HEAD(&node->link);
+ node->attr.priority = I915_PRIORITY_INVALID;
+}
+
+static struct i915_dependency *
+i915_dependency_alloc(struct drm_i915_private *i915)
+{
+ return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+}
+
+static void
+i915_dependency_free(struct drm_i915_private *i915,
+ struct i915_dependency *dep)
+{
+ kmem_cache_free(i915->dependencies, dep);
+}
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags)
+{
+ bool ret = false;
+
+ spin_lock(&schedule_lock);
+
+ if (!node_signaled(signal)) {
+ INIT_LIST_HEAD(&dep->dfs_link);
+ list_add(&dep->wait_link, &signal->waiters_list);
+ list_add(&dep->signal_link, &node->signalers_list);
+ dep->signaler = signal;
+ dep->flags = flags;
+
+ ret = true;
+ }
+
+ spin_unlock(&schedule_lock);
+
+ return ret;
+}
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal)
+{
+ struct i915_dependency *dep;
+
+ dep = i915_dependency_alloc(i915);
+ if (!dep)
+ return -ENOMEM;
+
+ if (!__i915_sched_node_add_dependency(node, signal, dep,
+ I915_DEPENDENCY_ALLOC))
+ i915_dependency_free(i915, dep);
+
+ return 0;
+}
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node)
+{
+ struct i915_dependency *dep, *tmp;
+
+ GEM_BUG_ON(!list_empty(&node->link));
+
+ spin_lock(&schedule_lock);
+
+ /*
+ * Everyone we depended upon (the fences we wait to be signaled)
+ * should retire before us and remove themselves from our list.
+ * However, retirement is run independently on each timeline and
+ * so we may be called out-of-order.
+ */
+ list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(!node_signaled(dep->signaler));
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->wait_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ /* Remove ourselves from everyone who depends upon us */
+ list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
+ GEM_BUG_ON(dep->signaler != node);
+ GEM_BUG_ON(!list_empty(&dep->dfs_link));
+
+ list_del(&dep->signal_link);
+ if (dep->flags & I915_DEPENDENCY_ALLOC)
+ i915_dependency_free(i915, dep);
+ }
+
+ spin_unlock(&schedule_lock);
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+ return rb_entry(rb, struct i915_priolist, node);
+}
+
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+ long queue_priority)
+{
+ struct rb_node *rb;
+ long last_prio, i;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+ rb_first(&execlists->queue.rb_root));
+
+ last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ const struct i915_priolist *p = to_priolist(rb);
+
+ GEM_BUG_ON(p->priority >= last_prio);
+ last_prio = p->priority;
+
+ GEM_BUG_ON(!p->used);
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+ if (list_empty(&p->requests[i]))
+ continue;
+
+ GEM_BUG_ON(!(p->used & BIT(i)));
+ }
+ }
+}
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_priolist *p;
+ struct rb_node **parent, *rb;
+ bool first = true;
+ int idx, i;
+
+ lockdep_assert_held(&engine->timeline.lock);
+ assert_priolists(execlists, INT_MAX);
+
+ /* buckets sorted from highest [in slot 0] to lowest priority */
+ idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+ prio >>= I915_USER_PRIORITY_SHIFT;
+ if (unlikely(execlists->no_priolist))
+ prio = I915_PRIORITY_NORMAL;
+
+find_priolist:
+ /* most positive priority is scheduled first, equal priorities fifo */
+ rb = NULL;
+ parent = &execlists->queue.rb_root.rb_node;
+ while (*parent) {
+ rb = *parent;
+ p = to_priolist(rb);
+ if (prio > p->priority) {
+ parent = &rb->rb_left;
+ } else if (prio < p->priority) {
+ parent = &rb->rb_right;
+ first = false;
+ } else {
+ goto out;
+ }
+ }
+
+ if (prio == I915_PRIORITY_NORMAL) {
+ p = &execlists->default_priolist;
+ } else {
+ p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ /* Convert an allocation failure to a priority bump */
+ if (unlikely(!p)) {
+ prio = I915_PRIORITY_NORMAL; /* recurses just once */
+
+ /* To maintain ordering with all rendering, after an
+ * allocation failure we have to disable all scheduling.
+ * Requests will then be executed in fifo, and schedule
+ * will ensure that dependencies are emitted in fifo.
+ * There will be still some reordering with existing
+ * requests, so if userspace lied about their
+ * dependencies that reordering may be visible.
+ */
+ execlists->no_priolist = true;
+ goto find_priolist;
+ }
+ }
+
+ p->priority = prio;
+ for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+ INIT_LIST_HEAD(&p->requests[i]);
+ rb_link_node(&p->node, rb, parent);
+ rb_insert_color_cached(&p->node, &execlists->queue, first);
+ p->used = 0;
+
+out:
+ p->used |= BIT(idx);
+ return &p->requests[idx];
+}
+
+static struct intel_engine_cs *
+sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
+{
+ struct intel_engine_cs *engine = node_to_request(node)->engine;
+
+ GEM_BUG_ON(!locked);
+
+ if (engine != locked) {
+ spin_unlock(&locked->timeline.lock);
+ spin_lock(&engine->timeline.lock);
+ }
+
+ return engine;
+}
+
+static void __i915_schedule(struct i915_request *rq,
+ const struct i915_sched_attr *attr)
+{
+ struct list_head *uninitialized_var(pl);
+ struct intel_engine_cs *engine, *last;
+ struct i915_dependency *dep, *p;
+ struct i915_dependency stack;
+ const int prio = attr->priority;
+ LIST_HEAD(dfs);
+
+ /* Needed in order to use the temporary link inside i915_dependency */
+ lockdep_assert_held(&schedule_lock);
+ GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
+
+ if (i915_request_completed(rq))
+ return;
+
+ if (prio <= READ_ONCE(rq->sched.attr.priority))
+ return;
+
+ stack.signaler = &rq->sched;
+ list_add(&stack.dfs_link, &dfs);
+
+ /*
+ * Recursively bump all dependent priorities to match the new request.
+ *
+ * A naive approach would be to use recursion:
+ * static void update_priorities(struct i915_sched_node *node, prio) {
+ * list_for_each_entry(dep, &node->signalers_list, signal_link)
+ * update_priorities(dep->signal, prio)
+ * queue_request(node);
+ * }
+ * but that may have unlimited recursion depth and so runs a very
+ * real risk of overunning the kernel stack. Instead, we build
+ * a flat list of all dependencies starting with the current request.
+ * As we walk the list of dependencies, we add all of its dependencies
+ * to the end of the list (this may include an already visited
+ * request) and continue to walk onwards onto the new dependencies. The
+ * end result is a topological list of requests in reverse order, the
+ * last element in the list is the request we must execute first.
+ */
+ list_for_each_entry(dep, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ /*
+ * Within an engine, there can be no cycle, but we may
+ * refer to the same dependency chain multiple times
+ * (redundant dependencies are not eliminated) and across
+ * engines.
+ */
+ list_for_each_entry(p, &node->signalers_list, signal_link) {
+ GEM_BUG_ON(p == dep); /* no cycles! */
+
+ if (node_signaled(p->signaler))
+ continue;
+
+ GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
+ if (prio > READ_ONCE(p->signaler->attr.priority))
+ list_move_tail(&p->dfs_link, &dfs);
+ }
+ }
+
+ /*
+ * If we didn't need to bump any existing priorities, and we haven't
+ * yet submitted this request (i.e. there is no potential race with
+ * execlists_submit_request()), we can set our own priority and skip
+ * acquiring the engine locks.
+ */
+ if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ rq->sched.attr = *attr;
+
+ if (stack.dfs_link.next == stack.dfs_link.prev)
+ return;
+
+ __list_del_entry(&stack.dfs_link);
+ }
+
+ last = NULL;
+ engine = rq->engine;
+ spin_lock_irq(&engine->timeline.lock);
+
+ /* Fifo and depth-first replacement ensure our deps execute before us */
+ list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+ struct i915_sched_node *node = dep->signaler;
+
+ INIT_LIST_HEAD(&dep->dfs_link);
+
+ engine = sched_lock_engine(node, engine);
+
+ /* Recheck after acquiring the engine->timeline.lock */
+ if (prio <= node->attr.priority || node_signaled(node))
+ continue;
+
+ node->attr.priority = prio;
+ if (!list_empty(&node->link)) {
+ if (last != engine) {
+ pl = i915_sched_lookup_priolist(engine, prio);
+ last = engine;
+ }
+ list_move_tail(&node->link, pl);
+ } else {
+ /*
+ * If the request is not in the priolist queue because
+ * it is not yet runnable, then it doesn't contribute
+ * to our preemption decisions. On the other hand,
+ * if the request is on the HW, it too is not in the
+ * queue; but in that case we may still need to reorder
+ * the inflight requests.
+ */
+ if (!i915_sw_fence_done(&node_to_request(node)->submit))
+ continue;
+ }
+
+ if (prio <= engine->execlists.queue_priority)
+ continue;
+
+ /*
+ * If we are already the currently executing context, don't
+ * bother evaluating if we should preempt ourselves.
+ */
+ if (node_to_request(node)->global_seqno &&
+ i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
+ node_to_request(node)->global_seqno))
+ continue;
+
+ /* Defer (tasklet) submission until after all of our updates. */
+ engine->execlists.queue_priority = prio;
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->timeline.lock);
+}
+
+void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
+{
+ spin_lock(&schedule_lock);
+ __i915_schedule(rq, attr);
+ spin_unlock(&schedule_lock);
+}
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
+{
+ struct i915_sched_attr attr;
+
+ GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
+
+ if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
+ return;
+
+ spin_lock_bh(&schedule_lock);
+
+ attr = rq->sched.attr;
+ attr.priority |= bump;
+ __i915_schedule(rq, &attr);
+
+ spin_unlock_bh(&schedule_lock);
+}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..dbe9cb7ecd82 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,9 +8,14 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
+#include <linux/kernel.h>
#include <uapi/drm/i915_drm.h>
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
enum {
I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
@@ -19,6 +24,15 @@ enum {
I915_PRIORITY_INVALID = INT_MIN
};
+#define I915_USER_PRIORITY_SHIFT 2
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
+
struct i915_sched_attr {
/**
* @priority: execution and service priority
@@ -69,4 +83,26 @@ struct i915_dependency {
#define I915_DEPENDENCY_ALLOC BIT(0)
};
+void i915_sched_node_init(struct i915_sched_node *node);
+
+bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ struct i915_sched_node *signal,
+ struct i915_dependency *dep,
+ unsigned long flags);
+
+int i915_sched_node_add_dependency(struct drm_i915_private *i915,
+ struct i915_sched_node *node,
+ struct i915_sched_node *signal);
+
+void i915_sched_node_fini(struct drm_i915_private *i915,
+ struct i915_sched_node *node);
+
+void i915_schedule(struct i915_request *request,
+ const struct i915_sched_attr *attr);
+
+void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
+
+struct list_head *
+i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 6dbeed079ae5..fc2eeab823b7 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -1,10 +1,7 @@
/*
- * (C) Copyright 2016 Intel Corporation
+ * SPDX-License-Identifier: MIT
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
+ * (C) Copyright 2016 Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index fe2ef4dadfc6..0e055ea0179f 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -1,10 +1,9 @@
/*
+ * SPDX-License-Identifier: MIT
+ *
* i915_sw_fence.h - library routines for N:M synchronisation points
*
* Copyright (C) 2016 Intel Corporation
- *
- * This file is released under the GPLv2.
- *
*/
#ifndef _I915_SW_FENCE_H_
diff --git a/drivers/gpu/drm/i915/i915_syncmap.c b/drivers/gpu/drm/i915/i915_syncmap.c
index 58f8d0cc125c..60404dbb2e9f 100644
--- a/drivers/gpu/drm/i915/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/i915_syncmap.c
@@ -92,7 +92,7 @@ void i915_syncmap_init(struct i915_syncmap **root)
{
BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
- BUILD_BUG_ON(KSYNCMAP > BITS_PER_BYTE * sizeof((*root)->bitmap));
+ BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
*root = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6bb2b05..535caebd9813 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -483,7 +483,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
-static const struct attribute *gen6_attrs[] = {
+static const struct attribute * const gen6_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -495,7 +495,7 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
-static const struct attribute *vlv_attrs[] = {
+static const struct attribute * const vlv_attrs[] = {
&dev_attr_gt_act_freq_mhz.attr,
&dev_attr_gt_cur_freq_mhz.attr,
&dev_attr_gt_boost_freq_mhz.attr,
@@ -516,26 +516,21 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_i915_error_state_buf error_str;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gpu_state *gpu;
ssize_t ret;
- ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
- if (ret)
- return ret;
-
- gpu = i915_first_error_state(dev_priv);
- ret = i915_error_state_to_str(&error_str, gpu);
- if (ret)
- goto out;
-
- ret = count < error_str.bytes ? count : error_str.bytes;
- memcpy(buf, error_str.buf, ret);
+ gpu = i915_first_error_state(i915);
+ if (gpu) {
+ ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_state_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
-out:
- i915_gpu_state_put(gpu);
- i915_error_state_buf_release(&error_str);
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index a2c2c3ab5fb0..ebd71b487220 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -83,6 +83,25 @@ void i915_timeline_init(struct drm_i915_private *i915,
const char *name);
void i915_timeline_fini(struct i915_timeline *tl);
+static inline void
+i915_timeline_set_subclass(struct i915_timeline *timeline,
+ unsigned int subclass)
+{
+ lockdep_set_subclass(&timeline->lock, subclass);
+
+ /*
+ * Due to an interesting quirk in lockdep's internal debug tracking,
+ * after setting a subclass we must ensure the lock is used. Otherwise,
+ * nr_unused_locks is incremented once too often.
+ */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ local_irq_disable();
+ lock_map_acquire(&timeline->lock.dep_map);
+ lock_map_release(&timeline->lock.dep_map);
+ local_irq_enable();
+#endif
+}
+
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915, const char *name);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 395dd2511568..9726df37c4c4 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -44,16 +44,19 @@
__stringify(x), (long)(x))
#if defined(GCC_VERSION) && GCC_VERSION >= 70000
-#define add_overflows(A, B) \
- __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#define add_overflows_t(T, A, B) \
+ __builtin_add_overflow_p((A), (B), (T)0)
#else
-#define add_overflows(A, B) ({ \
+#define add_overflows_t(T, A, B) ({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
- a + b < a; \
+ (T)(a + b) < a; \
})
#endif
+#define add_overflows(A, B) \
+ add_overflows_t(typeof((A) + (B)), (A), (B))
+
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
@@ -68,7 +71,7 @@
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
- (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
+ (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
#define ptr_mask_bits(ptr, n) ({ \
unsigned long __v = (unsigned long)(ptr); \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 31efc971a3a8..5b4d78cdb4ca 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -305,12 +305,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
GEM_BUG_ON(vma->size > vma->node.size);
- if (GEM_WARN_ON(range_overflows(vma->node.start,
- vma->node.size,
- vma->vm->total)))
+ if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
return -ENODEV;
- if (GEM_WARN_ON(!flags))
+ if (GEM_DEBUG_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;
@@ -892,7 +892,7 @@ static void export_fence(struct i915_vma *vma,
reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &rq->fence);
- else if (reservation_object_reserve_shared(resv) == 0)
+ else if (reservation_object_reserve_shared(resv, 1) == 0)
reservation_object_add_shared_fence(resv, &rq->fence);
reservation_object_unlock(resv);
}
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 13830e43a4d1..4dd793b78996 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,8 +25,277 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_atomic_helper.h>
#include "intel_dsi.h"
+static inline int header_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ >> FREE_HEADER_CREDIT_SHIFT;
+}
+
+static inline int payload_credits_available(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ >> FREE_PLOAD_CREDIT_SHIFT;
+}
+
+static void wait_for_header_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
+ MAX_HEADER_CREDIT, 100))
+ DRM_ERROR("DSI header credits not released\n");
+}
+
+static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans)
+{
+ if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
+ MAX_PLOAD_CREDIT, 100))
+ DRM_ERROR("DSI payload credits not released\n");
+}
+
+static enum transcoder dsi_port_to_transcoder(enum port port)
+{
+ if (port == PORT_A)
+ return TRANSCODER_DSI_0;
+ else
+ return TRANSCODER_DSI_1;
+}
+
+static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ int ret;
+
+ /* wait for header/payload credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans);
+ wait_for_payload_credits(dev_priv, dsi_trans);
+ }
+
+ /* send nop DCS command */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+ dsi->channel = 0;
+ ret = mipi_dsi_dcs_nop(dsi);
+ if (ret < 0)
+ DRM_ERROR("error sending DCS NOP command\n");
+ }
+
+ /* wait for header credits to be released */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ wait_for_header_credits(dev_priv, dsi_trans);
+ }
+
+ /* wait for LP TX in progress bit to be cleared */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+ LPTX_IN_PROGRESS), 20))
+ DRM_ERROR("LPTX bit not cleared\n");
+ }
+}
+
+static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
+ u32 len)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ int free_credits;
+ int i, j;
+
+ for (i = 0; i < len; i += 4) {
+ u32 tmp = 0;
+
+ free_credits = payload_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("Payload credit not available\n");
+ return false;
+ }
+
+ for (j = 0; j < min_t(u32, len - i, 4); j++)
+ tmp |= *data++ << 8 * j;
+
+ I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ }
+
+ return true;
+}
+
+static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt, bool enable_lpdt)
+{
+ struct intel_dsi *intel_dsi = host->intel_dsi;
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
+ u32 tmp;
+ int free_credits;
+
+ /* check if header credit available */
+ free_credits = header_credits_available(dev_priv, dsi_trans);
+ if (free_credits < 1) {
+ DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ return -1;
+ }
+
+ tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+
+ if (pkt.payload)
+ tmp |= PAYLOAD_PRESENT;
+ else
+ tmp &= ~PAYLOAD_PRESENT;
+
+ tmp &= ~VBLANK_FENCE;
+
+ if (enable_lpdt)
+ tmp |= LP_DATA_TRANSFER;
+
+ tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
+ tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+
+ return 0;
+}
+
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ struct mipi_dsi_packet pkt)
+{
+ /* payload queue can accept *256 bytes*, check limit */
+ if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
+ DRM_ERROR("payload size exceeds max queue limit\n");
+ return -1;
+ }
+
+ /* load data into command payload queue */
+ if (!add_payld_to_queue(host, pkt.payload,
+ pkt.payload_length)) {
+ DRM_ERROR("adding payload to queue failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ int lane;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+
+ /*
+ * Program voltage swing and pre-emphasis level values as per
+ * table in BSPEC under DDI buffer programing
+ */
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
+ tmp |= SCALING_MODE_SEL(0x2);
+ tmp |= TAP2_DISABLE | TAP3_DISABLE;
+ tmp |= RTERM_SELECT(0x6);
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
+ RCOMP_SCALAR_MASK);
+ tmp |= SWING_SEL_UPPER(0x2);
+ tmp |= SWING_SEL_LOWER(0x2);
+ tmp |= RCOMP_SCALAR(0x98);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+
+ for (lane = 0; lane <= 3; lane++) {
+ /* Bspec: must not use GRP register for write */
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
+ CURSOR_COEFF_MASK);
+ tmp |= POST_CURSOR_1(0x0);
+ tmp |= POST_CURSOR_2(0x0);
+ tmp |= CURSOR_COEFF(0x3f);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ }
+ }
+}
+
+static void configure_dual_link_mode(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 dss_ctl1;
+
+ dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 |= SPLITTER_ENABLE;
+ dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
+ dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
+
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ u32 dss_ctl2;
+ u16 hactive = adjusted_mode->crtc_hdisplay;
+ u16 dl_buffer_depth;
+
+ dss_ctl1 &= ~DUAL_LINK_MODE_INTERLEAVE;
+ dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
+
+ if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
+ DRM_ERROR("DL buffer depth exceed max value\n");
+
+ dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
+ dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
+ I915_WRITE(DSS_CTL2, dss_ctl2);
+ } else {
+ /* Interleave */
+ dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
+ }
+
+ I915_WRITE(DSS_CTL1, dss_ctl1);
+}
+
static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -105,23 +374,1079 @@ static void gen11_dsi_power_up_lanes(struct intel_encoder *encoder)
}
}
-static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder)
+static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+ int lane;
+
+ /* Step 4b(i) set loadgen select for transmit and aux lanes */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_AUX(port));
+ tmp &= ~LOADGEN_SELECT;
+ I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
+ for (lane = 0; lane <= 3; lane++) {
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp &= ~LOADGEN_SELECT;
+ if (lane != 2)
+ tmp |= LOADGEN_SELECT;
+ I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ }
+ }
+
+ /* Step 4b(ii) set latency optimization for transmit and aux lanes */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW2_AUX(port));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ I915_WRITE(ICL_PORT_TX_DW2_AUX(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW2_LN0(port));
+ tmp &= ~FRC_LATENCY_OPTIM_MASK;
+ tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
+ I915_WRITE(ICL_PORT_TX_DW2_GRP(port), tmp);
+ }
+
+}
+
+static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ /* clear common keeper enable bit */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
+ tmp &= ~COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(port));
+ tmp &= ~COMMON_KEEPER_EN;
+ I915_WRITE(ICL_PORT_PCS_DW1_AUX(port), tmp);
+ }
+
+ /*
+ * Set SUS Clock Config bitfield to 11b
+ * Note: loadgen select program is done
+ * as part of lane phy sequence configuration
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_CL_DW5(port));
+ tmp |= SUS_CLOCK_CONFIG;
+ I915_WRITE(ICL_PORT_CL_DW5(port), tmp);
+ }
+
+ /* Clear training enable to change swing values */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp &= ~TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ }
+
+ /* Program swing and de-emphasis */
+ dsi_program_swing_and_deemphasis(encoder);
+
+ /* Set training enable to trigger update */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_PORT_TX_DW5_LN0(port));
+ tmp |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_GRP(port), tmp);
+ tmp = I915_READ(ICL_PORT_TX_DW5_AUX(port));
+ tmp |= TX_TRAINING_EN;
+ I915_WRITE(ICL_PORT_TX_DW5_AUX(port), tmp);
+ }
+}
+
+static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp |= DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 500))
+ DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+ }
+}
+
+static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ /* Program T-INIT master registers */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+ tmp &= ~MASTER_INIT_TIMER_MASK;
+ tmp |= intel_dsi->init_count;
+ I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+ }
+
+ /* Program DPHY clock lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+
+ /* shadow register inside display core */
+ I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ }
+
+ /* Program DPHY data lanes timings */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+
+ /* shadow register inside display core */
+ I915_WRITE(DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
+ }
+
+ /*
+ * If DSI link operating at or below an 800 MHz,
+ * TA_SURE should be override and programmed to
+ * a value '0' inside TA_PARAM_REGISTERS otherwise
+ * leave all fields at HW default values.
+ */
+ if (intel_dsi_bitrate(intel_dsi) <= 800000) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+
+ /* shadow register inside display core */
+ tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp &= ~TA_SURE_MASK;
+ tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
+ I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp |= DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ mutex_lock(&dev_priv->dpll_lock);
+ tmp = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
+ }
+
+ I915_WRITE(DPCLKA_CFGCR0_ICL, tmp);
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void gen11_dsi_map_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ enum port port;
+ u32 val;
+
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ }
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+
+ mutex_unlock(&dev_priv->dpll_lock);
+}
+
+static void
+gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
+ enum pipe pipe = intel_crtc->pipe;
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+
+ if (intel_dsi->eotp_pkt)
+ tmp &= ~EOTP_DISABLED;
+ else
+ tmp |= EOTP_DISABLED;
+
+ /* enable link calibration if freq > 1.5Gbps */
+ if (intel_dsi_bitrate(intel_dsi) >= 1500 * 1000) {
+ tmp &= ~LINK_CALIBRATION_MASK;
+ tmp |= CALIBRATION_ENABLED_INITIAL_ONLY;
+ }
+
+ /* configure continuous clock */
+ tmp &= ~CONTINUOUS_CLK_MASK;
+ if (intel_dsi->clock_stop)
+ tmp |= CLK_ENTER_LP_AFTER_DATA;
+ else
+ tmp |= CLK_HS_CONTINUOUS;
+
+ /* configure buffer threshold limit to minimum */
+ tmp &= ~PIX_BUF_THRESHOLD_MASK;
+ tmp |= PIX_BUF_THRESHOLD_1_4;
+
+ /* set virtual channel to '0' */
+ tmp &= ~PIX_VIRT_CHAN_MASK;
+ tmp |= PIX_VIRT_CHAN(0);
+
+ /* program BGR transmission */
+ if (intel_dsi->bgr_enabled)
+ tmp |= BGR_TRANSMISSION;
+
+ /* select pixel format */
+ tmp &= ~PIX_FMT_MASK;
+ switch (intel_dsi->pixel_format) {
+ default:
+ MISSING_CASE(intel_dsi->pixel_format);
+ /* fallthrough */
+ case MIPI_DSI_FMT_RGB565:
+ tmp |= PIX_FMT_RGB565;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ tmp |= PIX_FMT_RGB666_PACKED;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ tmp |= PIX_FMT_RGB666_LOOSE;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ tmp |= PIX_FMT_RGB888;
+ break;
+ }
+
+ /* program DSI operation mode */
+ if (is_vid_mode(intel_dsi)) {
+ tmp &= ~OP_MODE_MASK;
+ switch (intel_dsi->video_mode_format) {
+ default:
+ MISSING_CASE(intel_dsi->video_mode_format);
+ /* fallthrough */
+ case VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS:
+ tmp |= VIDEO_MODE_SYNC_EVENT;
+ break;
+ case VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE:
+ tmp |= VIDEO_MODE_SYNC_PULSE;
+ break;
+ }
+ }
+
+ I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ }
+
+ /* enable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp |= PORT_SYNC_MODE_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+
+ /* configure stream splitting */
+ configure_dual_link_mode(encoder, pipe_config);
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* select data lane width */
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~DDI_PORT_WIDTH_MASK;
+ tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+
+ /* select input pipe */
+ tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ /* fallthrough */
+ case PIPE_A:
+ tmp |= TRANS_DDI_EDP_INPUT_A_ON;
+ break;
+ case PIPE_B:
+ tmp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
+ break;
+ case PIPE_C:
+ tmp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
+ break;
+ }
+
+ /* enable DDI buffer */
+ tmp |= TRANS_DDI_FUNC_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* wait for link ready */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ DRM_ERROR("DSI link not ready\n");
+ }
+}
+
+static void
+gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ enum port port;
+ enum transcoder dsi_trans;
+ /* horizontal timings */
+ u16 htotal, hactive, hsync_start, hsync_end, hsync_size;
+ u16 hfront_porch, hback_porch;
+ /* vertical timings */
+ u16 vtotal, vactive, vsync_start, vsync_end, vsync_shift;
+
+ hactive = adjusted_mode->crtc_hdisplay;
+ htotal = adjusted_mode->crtc_htotal;
+ hsync_start = adjusted_mode->crtc_hsync_start;
+ hsync_end = adjusted_mode->crtc_hsync_end;
+ hsync_size = hsync_end - hsync_start;
+ hfront_porch = (adjusted_mode->crtc_hsync_start -
+ adjusted_mode->crtc_hdisplay);
+ hback_porch = (adjusted_mode->crtc_htotal -
+ adjusted_mode->crtc_hsync_end);
+ vactive = adjusted_mode->crtc_vdisplay;
+ vtotal = adjusted_mode->crtc_vtotal;
+ vsync_start = adjusted_mode->crtc_vsync_start;
+ vsync_end = adjusted_mode->crtc_vsync_end;
+ vsync_shift = hsync_start - htotal / 2;
+
+ if (intel_dsi->dual_link) {
+ hactive /= 2;
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
+ hactive += intel_dsi->pixel_overlap;
+ htotal /= 2;
+ }
+
+ /* minimum hactive as per bspec: 256 pixels */
+ if (adjusted_mode->crtc_hdisplay < 256)
+ DRM_ERROR("hactive is less then 256 pixels\n");
+
+ /* if RGB666 format, then hactive must be multiple of 4 pixels */
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
+ DRM_ERROR("hactive pixels are not multiple of 4\n");
+
+ /* program TRANS_HTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
+ }
+
+ /* TRANS_HSYNC register to be programmed only for video mode */
+ if (intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE) {
+ if (intel_dsi->video_mode_format ==
+ VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
+ /* BSPEC: hsync size should be atleast 16 pixels */
+ if (hsync_size < 16)
+ DRM_ERROR("hsync size < 16 pixels\n");
+ }
+
+ if (hback_porch < 16)
+ DRM_ERROR("hback porch < 16 pixels\n");
+
+ if (intel_dsi->dual_link) {
+ hsync_start /= 2;
+ hsync_end /= 2;
+ }
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
+ }
+ }
+
+ /* program TRANS_VTOTAL register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ /*
+ * FIXME: Programing this by assuming progressive mode, since
+ * non-interlaced info from VBT is not saved inside
+ * struct drm_display_mode.
+ * For interlace mode: program required pixel minus 2
+ */
+ I915_WRITE(VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
+ }
+
+ if (vsync_end < vsync_start || vsync_end > vtotal)
+ DRM_ERROR("Invalid vsync_end value\n");
+
+ if (vsync_start < vactive)
+ DRM_ERROR("vsync_start less than vactive\n");
+
+ /* program TRANS_VSYNC register */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
+ }
+
+ /*
+ * FIXME: It has to be programmed only for interlaced
+ * modes. Put the check condition here once interlaced
+ * info available as described above.
+ * program TRANS_VSYNCSHIFT register
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+ }
+}
+
+static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp |= PIPECONF_ENABLE;
+ I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be enabled */
+ if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE,
+ I965_PIPECONF_ACTIVE, 10))
+ DRM_ERROR("DSI transcoder not enabled\n");
+ }
+}
+
+static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp, hs_tx_timeout, lp_rx_timeout, ta_timeout, divisor, mul;
+
+ /*
+ * escape clock count calculation:
+ * BYTE_CLK_COUNT = TIME_NS/(8 * UI)
+ * UI (nsec) = (10^6)/Bitrate
+ * TIME_NS = (BYTE_CLK_COUNT * 8 * 10^6)/ Bitrate
+ * ESCAPE_CLK_COUNT = TIME_NS/ESC_CLK_NS
+ */
+ divisor = intel_dsi_tlpx_ns(intel_dsi) * intel_dsi_bitrate(intel_dsi) * 1000;
+ mul = 8 * 1000000;
+ hs_tx_timeout = DIV_ROUND_UP(intel_dsi->hs_tx_timeout * mul,
+ divisor);
+ lp_rx_timeout = DIV_ROUND_UP(intel_dsi->lp_rx_timeout * mul, divisor);
+ ta_timeout = DIV_ROUND_UP(intel_dsi->turn_arnd_val * mul, divisor);
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* program hst_tx_timeout */
+ tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+ tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
+ tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
+ I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_CALIB_TO */
+
+ /* program lp_rx_host timeout */
+ tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+ tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
+ tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
+ I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+
+ /* FIXME: DSI_PWAIT_TO */
+
+ /* program turn around timeout */
+ tmp = I915_READ(DSI_TA_TO(dsi_trans));
+ tmp &= ~TA_TIMEOUT_VALUE_MASK;
+ tmp |= TA_TIMEOUT_VALUE(ta_timeout);
+ I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+ }
+}
+
+static void
+gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
{
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
+
+ /* step 4b: configure lane sequencing of the Combo-PHY transmitters */
+ gen11_dsi_config_phy_lanes_sequence(encoder);
+
+ /* step 4c: configure voltage swing and skew */
+ gen11_dsi_voltage_swing_program_seq(encoder);
+
+ /* enable DDI buffer */
+ gen11_dsi_enable_ddi_buffer(encoder);
+
+ /* setup D-PHY timings */
+ gen11_dsi_setup_dphy_timings(encoder);
+
+ /* step 4h: setup DSI protocol timeouts */
+ gen11_dsi_setup_timeouts(encoder);
+
+ /* Step (4h, 4i, 4j, 4k): Configure transcoder */
+ gen11_dsi_configure_transcoder(encoder, pipe_config);
+
+ /* Step 4l: Gate DDI clocks */
+ gen11_dsi_gate_clocks(encoder);
}
-static void __attribute__((unused))
-gen11_dsi_pre_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ struct mipi_dsi_device *dsi;
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+ int ret;
+
+ /* set maximum return packet size */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /*
+ * FIXME: This uses the number of DW's currently in the payload
+ * receive queue. This is probably not what we want here.
+ */
+ tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+ tmp &= NUMBER_RX_PLOAD_DW_MASK;
+ /* multiply "Number Rx Payload DW" by 4 to get max value */
+ tmp = tmp * 4;
+ dsi = intel_dsi->dsi_hosts[port]->device;
+ ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
+ if (ret < 0)
+ DRM_ERROR("error setting max return pkt size%d\n", tmp);
+ }
+
+ /* panel power on related mipi dsi vbt sequences */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ /* ensure all panel commands dispatched before enabling transcoder */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
/* step2: enable IO power */
gen11_dsi_enable_io_power(encoder);
/* step3: enable DSI PLL */
gen11_dsi_program_esc_clk_div(encoder);
+}
+
+static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step3b */
+ gen11_dsi_map_pll(encoder, pipe_config);
/* step4: enable DSI port and DPHY */
- gen11_dsi_enable_port_and_phy(encoder);
+ gen11_dsi_enable_port_and_phy(encoder, pipe_config);
+
+ /* step5: program and powerup panel */
+ gen11_dsi_powerup_panel(encoder);
+
+ /* step6c: configure transcoder timings */
+ gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+
+ /* step6d: enable dsi transcoder */
+ gen11_dsi_enable_transcoder(encoder);
+
+ /* step7: enable backlight */
+ intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+}
+
+static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+
+ /* disable transcoder */
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp &= ~PIPECONF_ENABLE;
+ I915_WRITE(PIPECONF(dsi_trans), tmp);
+
+ /* wait for transcoder to be disabled */
+ if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ I965_PIPECONF_ACTIVE, 0, 50))
+ DRM_ERROR("DSI trancoder not disabled\n");
+ }
+}
+
+static void gen11_dsi_powerdown_panel(struct intel_encoder *encoder)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
+
+ /* ensure cmds dispatched to panel */
+ wait_for_cmds_dispatched_to_panel(encoder);
+}
+
+static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ enum transcoder dsi_trans;
+ u32 tmp;
+
+ /* put dsi link in ULPS */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+ tmp |= LINK_ENTER_ULPS;
+ tmp &= ~LINK_ULPS_TYPE_LP11;
+ I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+
+ if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
+ 10))
+ DRM_ERROR("DSI link not in ULPS\n");
+ }
+
+ /* disable ddi function */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp &= ~TRANS_DDI_FUNC_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ }
+
+ /* disable port sync mode if dual link */
+ if (intel_dsi->dual_link) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp &= ~PORT_SYNC_MODE_ENABLE;
+ I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ }
+ }
+}
+
+static void gen11_dsi_disable_port(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+
+ gen11_dsi_ungate_clocks(encoder);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp &= ~DDI_BUF_CTL_ENABLE;
+ I915_WRITE(DDI_BUF_CTL(port), tmp);
+
+ if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_IS_IDLE),
+ 8))
+ DRM_ERROR("DDI port:%c buffer not idle\n",
+ port_name(port));
+ }
+ gen11_dsi_ungate_clocks(encoder);
+}
+
+static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
+
+ if (intel_dsi->dual_link)
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+
+ /* set mode to DDI */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp &= ~COMBO_PHY_MODE_DSI;
+ I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ }
+}
+
+static void gen11_dsi_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+
+ /* step1: turn off backlight */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
+ intel_panel_disable_backlight(old_conn_state);
+
+ /* step2d,e: disable transcoder and wait */
+ gen11_dsi_disable_transcoder(encoder);
+
+ /* step2f,g: powerdown panel */
+ gen11_dsi_powerdown_panel(encoder);
+
+ /* step2h,i,j: deconfig trancoder */
+ gen11_dsi_deconfigure_trancoder(encoder);
+
+ /* step3: disable port */
+ gen11_dsi_disable_port(encoder);
+
+ /* step4: disable IO power */
+ gen11_dsi_disable_io_power(encoder);
+}
+
+static void gen11_dsi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 pll_id;
+
+ /* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
+ pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+}
+
+static bool gen11_dsi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
+ base);
+ struct intel_connector *intel_connector = intel_dsi->attached_connector;
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
+ const struct drm_display_mode *fixed_mode =
+ intel_connector->panel.fixed_mode;
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ intel_pch_panel_fitting(crtc, pipe_config, conn_state->scaling_mode);
+
+ adjusted_mode->flags = 0;
+
+ /* Dual link goes to trancoder DSI'0' */
+ if (intel_dsi->ports == BIT(PORT_B))
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_1;
+ else
+ pipe_config->cpu_transcoder = TRANSCODER_DSI_0;
+
+ pipe_config->clock_set = true;
+ pipe_config->port_clock = intel_dsi_bitrate(intel_dsi) / 5;
+
+ return true;
+}
+
+static u64 gen11_dsi_get_power_domains(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u64 domains = 0;
+ enum port port;
+
+ for_each_dsi_port(port, intel_dsi->ports)
+ if (port == PORT_A)
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO);
+ else
+ domains |= BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO);
+
+ return domains;
+}
+
+static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ u32 tmp;
+ enum port port;
+ enum transcoder dsi_trans;
+ bool ret = false;
+
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
+ return false;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ dsi_trans = dsi_port_to_transcoder(port);
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ case TRANS_DDI_EDP_INPUT_A_ON:
+ *pipe = PIPE_A;
+ break;
+ case TRANS_DDI_EDP_INPUT_B_ONOFF:
+ *pipe = PIPE_B;
+ break;
+ case TRANS_DDI_EDP_INPUT_C_ONOFF:
+ *pipe = PIPE_C;
+ break;
+ default:
+ DRM_ERROR("Invalid PIPE input\n");
+ goto out;
+ }
+
+ tmp = I915_READ(PIPECONF(dsi_trans));
+ ret = tmp & PIPECONF_ENABLE;
+ }
+out:
+ intel_display_power_put(dev_priv, encoder->power_domain);
+ return ret;
+}
+
+static void gen11_dsi_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs gen11_dsi_encoder_funcs = {
+ .destroy = gen11_dsi_encoder_destroy,
+};
+
+static const struct drm_connector_funcs gen11_dsi_connector_funcs = {
+ .late_register = intel_connector_register,
+ .early_unregister = intel_connector_unregister,
+ .destroy = intel_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
+};
+
+static const struct drm_connector_helper_funcs gen11_dsi_connector_helper_funcs = {
+ .get_modes = intel_dsi_get_modes,
+ .mode_valid = intel_dsi_mode_valid,
+ .atomic_check = intel_digital_connector_atomic_check,
+};
+
+static int gen11_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int gen11_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
+ struct mipi_dsi_packet dsi_pkt;
+ ssize_t ret;
+ bool enable_lpdt = false;
+
+ ret = mipi_dsi_create_packet(&dsi_pkt, msg);
+ if (ret < 0)
+ return ret;
+
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ enable_lpdt = true;
+
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
+ /* only long packet contains payload */
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ //TODO: add payload receive code if needed
+
+ ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops gen11_dsi_host_ops = {
+ .attach = gen11_dsi_host_attach,
+ .detach = gen11_dsi_host_detach,
+ .transfer = gen11_dsi_host_transfer,
+};
+
+void icl_dsi_init(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_dsi *intel_dsi;
+ struct intel_encoder *encoder;
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+ struct drm_display_mode *scan, *fixed_mode = NULL;
+ enum port port;
+
+ if (!intel_bios_is_dsi_present(dev_priv, &port))
+ return;
+
+ intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL);
+ if (!intel_dsi)
+ return;
+
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
+ kfree(intel_dsi);
+ return;
+ }
+
+ encoder = &intel_dsi->base;
+ intel_dsi->attached_connector = intel_connector;
+ connector = &intel_connector->base;
+
+ /* register DSI encoder with DRM subsystem */
+ drm_encoder_init(dev, &encoder->base, &gen11_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port));
+
+ encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
+ encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->disable = gen11_dsi_disable;
+ encoder->port = port;
+ encoder->get_config = gen11_dsi_get_config;
+ encoder->compute_config = gen11_dsi_compute_config;
+ encoder->get_hw_state = gen11_dsi_get_hw_state;
+ encoder->type = INTEL_OUTPUT_DSI;
+ encoder->cloneable = 0;
+ encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
+ encoder->power_domain = POWER_DOMAIN_PORT_DSI;
+ encoder->get_power_domains = gen11_dsi_get_power_domains;
+
+ /* register DSI connector with DRM subsystem */
+ drm_connector_init(dev, connector, &gen11_dsi_connector_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ drm_connector_helper_add(connector, &gen11_dsi_connector_helper_funcs);
+ connector->display_info.subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+ intel_connector->get_hw_state = intel_connector_get_hw_state;
+
+ /* attach connector to encoder */
+ intel_connector_attach_encoder(intel_connector, encoder);
+
+ /* fill mode info from VBT */
+ mutex_lock(&dev->mode_config.mutex);
+ intel_dsi_vbt_get_modes(intel_dsi);
+ list_for_each_entry(scan, &connector->probed_modes, head) {
+ if (scan->type & DRM_MODE_TYPE_PREFERRED) {
+ fixed_mode = drm_mode_duplicate(dev, scan);
+ break;
+ }
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!fixed_mode) {
+ DRM_ERROR("DSI fixed mode info missing\n");
+ goto err;
+ }
+
+ connector->display_info.width_mm = fixed_mode->width_mm;
+ connector->display_info.height_mm = fixed_mode->height_mm;
+ intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
+ intel_panel_setup_backlight(connector, INVALID_PIPE);
+
+
+ if (dev_priv->vbt.dsi.config->dual_link)
+ intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
+ else
+ intel_dsi->ports = BIT(port);
+
+ intel_dsi->dcs_backlight_ports = dev_priv->vbt.dsi.bl_ports;
+ intel_dsi->dcs_cabc_ports = dev_priv->vbt.dsi.cabc_ports;
+
+ for_each_dsi_port(port, intel_dsi->ports) {
+ struct intel_dsi_host *host;
+
+ host = intel_dsi_host_init(intel_dsi, &gen11_dsi_host_ops, port);
+ if (!host)
+ goto err;
+
+ intel_dsi->dsi_hosts[port] = host;
+ }
+
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
+ DRM_DEBUG_KMS("no device found\n");
+ goto err;
+ }
+
+ return;
+
+err:
+ drm_encoder_cleanup(&encoder->base);
+ kfree(intel_dsi);
+ kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index b04952bacf77..8cb02f28d30c 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -184,6 +184,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
+ crtc_state->update_planes = 0;
return &crtc_state->base;
}
@@ -203,6 +204,72 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
+static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ int j;
+ u32 mode;
+
+ if (*scaler_id < 0) {
+ /* find a free scaler */
+ for (j = 0; j < intel_crtc->num_scalers; j++) {
+ if (scaler_state->scalers[j].in_use)
+ continue;
+
+ *scaler_id = j;
+ scaler_state->scalers[*scaler_id].in_use = 1;
+ break;
+ }
+ }
+
+ if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+ return;
+
+ /* set scaler mode */
+ if (plane_state && plane_state->base.fb &&
+ plane_state->base.fb->format->is_yuv &&
+ plane_state->base.fb->format->num_planes > 1) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv)) {
+ mode = SKL_PS_SCALER_MODE_NV12;
+ } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+ /*
+ * On gen11+'s HDR planes we only use the scaler for
+ * scaling. They have a dedicated chroma upsampler, so
+ * we don't need the scaler to upsample the UV plane.
+ */
+ mode = PS_SCALER_MODE_NORMAL;
+ } else {
+ mode = PS_SCALER_MODE_PLANAR;
+
+ if (plane_state->linked_plane)
+ mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id);
+ }
+ } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) {
+ mode = PS_SCALER_MODE_NORMAL;
+ } else if (num_scalers_need == 1 && intel_crtc->num_scalers > 1) {
+ /*
+ * when only 1 scaler is in use on a pipe with 2 scalers
+ * scaler 0 operates in high quality (HQ) mode.
+ * In this case use scaler 0 to take advantage of HQ mode
+ */
+ scaler_state->scalers[*scaler_id].in_use = 0;
+ *scaler_id = 0;
+ scaler_state->scalers[0].in_use = 1;
+ mode = SKL_PS_SCALER_MODE_HQ;
+ } else {
+ mode = SKL_PS_SCALER_MODE_DYN;
+ }
+
+ DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
+ scaler_state->scalers[*scaler_id].mode = mode;
+}
+
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
* @dev_priv: i915 device
@@ -232,7 +299,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct drm_atomic_state *drm_state = crtc_state->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(drm_state);
int num_scalers_need;
- int i, j;
+ int i;
num_scalers_need = hweight32(scaler_state->scaler_users);
@@ -304,59 +371,17 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
- if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
+ if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
continue;
- }
plane_state = intel_atomic_get_new_plane_state(intel_state,
intel_plane);
scaler_id = &plane_state->scaler_id;
}
- if (*scaler_id < 0) {
- /* find a free scaler */
- for (j = 0; j < intel_crtc->num_scalers; j++) {
- if (!scaler_state->scalers[j].in_use) {
- scaler_state->scalers[j].in_use = 1;
- *scaler_id = j;
- DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
- break;
- }
- }
- }
-
- if (WARN_ON(*scaler_id < 0)) {
- DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
- continue;
- }
-
- /* set scaler mode */
- if ((INTEL_GEN(dev_priv) >= 9) &&
- plane_state && plane_state->base.fb &&
- plane_state->base.fb->format->format ==
- DRM_FORMAT_NV12) {
- if (INTEL_GEN(dev_priv) == 9 &&
- !IS_GEMINILAKE(dev_priv) &&
- !IS_SKYLAKE(dev_priv))
- scaler_state->scalers[*scaler_id].mode =
- SKL_PS_SCALER_MODE_NV12;
- else
- scaler_state->scalers[*scaler_id].mode =
- PS_SCALER_MODE_PLANAR;
- } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
- /*
- * when only 1 scaler is in use on either pipe A or B,
- * scaler 0 operates in high quality (HQ) mode.
- * In this case use scaler 0 to take advantage of HQ mode
- */
- *scaler_id = 0;
- scaler_state->scalers[0].in_use = 1;
- scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
- scaler_state->scalers[1].in_use = 0;
- } else {
- scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
- }
+ intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index aabebe0d2e9b..0a73e6e65c20 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -36,28 +36,31 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
-/**
- * intel_create_plane_state - create plane state object
- * @plane: drm plane
- *
- * Allocates a fresh plane state for the given plane and sets some of
- * the state values to sensible initial values.
- *
- * Returns: A newly allocated plane state, or NULL on failure
- */
-struct intel_plane_state *
-intel_create_plane_state(struct drm_plane *plane)
+struct intel_plane *intel_plane_alloc(void)
{
- struct intel_plane_state *state;
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
- return NULL;
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
- state->base.plane = plane;
- state->base.rotation = DRM_MODE_ROTATE_0;
+ plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
+ if (!plane_state) {
+ kfree(plane);
+ return ERR_PTR(-ENOMEM);
+ }
- return state;
+ __drm_atomic_helper_plane_reset(&plane->base, &plane_state->base);
+ plane_state->scaler_id = -1;
+
+ return plane;
+}
+
+void intel_plane_free(struct intel_plane *plane)
+{
+ intel_plane_destroy_state(&plane->base, plane->base.state);
+ kfree(plane);
}
/**
@@ -117,10 +120,14 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret;
+ crtc_state->active_planes &= ~BIT(intel_plane->id);
+ crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+ intel_state->base.visible = false;
+
+ /* If this is a cursor plane, no further checks are needed. */
if (!intel_state->base.crtc && !old_plane_state->base.crtc)
return 0;
- intel_state->base.visible = false;
ret = intel_plane->check_plane(crtc_state, intel_state);
if (ret)
return ret;
@@ -128,13 +135,12 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
/* FIXME pre-g4x don't work like this */
if (state->visible)
crtc_state->active_planes |= BIT(intel_plane->id);
- else
- crtc_state->active_planes &= ~BIT(intel_plane->id);
if (state->visible && state->fb->format->format == DRM_FORMAT_NV12)
crtc_state->nv12_planes |= BIT(intel_plane->id);
- else
- crtc_state->nv12_planes &= ~BIT(intel_plane->id);
+
+ if (state->visible || old_plane_state->base.visible)
+ crtc_state->update_planes |= BIT(intel_plane->id);
return intel_plane_atomic_calc_changes(old_crtc_state,
&crtc_state->base,
@@ -152,6 +158,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
const struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
+ new_plane_state->visible = false;
if (!crtc)
return 0;
@@ -164,29 +171,123 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
to_intel_plane_state(new_plane_state));
}
-static void intel_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static struct intel_plane *
+skl_next_plane_to_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES],
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES],
+ unsigned int *update_mask)
{
- struct intel_atomic_state *state = to_intel_atomic_state(old_state->state);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- const struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state, intel_plane);
- struct drm_crtc *crtc = new_plane_state->base.crtc ?: old_state->crtc;
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (*update_mask == 0)
+ return NULL;
- if (new_plane_state->base.visible) {
- const struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(state, to_intel_crtc(crtc));
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ enum plane_id plane_id = plane->id;
- trace_intel_update_plane(plane,
- to_intel_crtc(crtc));
+ if (crtc->pipe != plane->pipe ||
+ !(*update_mask & BIT(plane_id)))
+ continue;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_y[plane_id],
+ entries_y,
+ I915_MAX_PLANES, plane_id) ||
+ skl_ddb_allocation_overlaps(&crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ entries_uv,
+ I915_MAX_PLANES, plane_id))
+ continue;
+
+ *update_mask &= ~BIT(plane_id);
+ entries_y[plane_id] = crtc_state->wm.skl.plane_ddb_y[plane_id];
+ entries_uv[plane_id] = crtc_state->wm.skl.plane_ddb_uv[plane_id];
+
+ return plane;
+ }
- intel_plane->update_plane(intel_plane,
- new_crtc_state, new_plane_state);
- } else {
- trace_intel_disable_plane(plane,
- to_intel_crtc(crtc));
+ /* should never happen */
+ WARN_ON(1);
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ return NULL;
+}
+
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct skl_ddb_entry entries_y[I915_MAX_PLANES];
+ struct skl_ddb_entry entries_uv[I915_MAX_PLANES];
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane *plane;
+
+ memcpy(entries_y, old_crtc_state->wm.skl.plane_ddb_y,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_y));
+ memcpy(entries_uv, old_crtc_state->wm.skl.plane_ddb_uv,
+ sizeof(old_crtc_state->wm.skl.plane_ddb_uv));
+
+ while ((plane = skl_next_plane_to_commit(state, crtc,
+ entries_y, entries_uv,
+ &update_mask))) {
+ struct intel_plane_state *new_plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else if (new_plane_state->slave) {
+ struct intel_plane *master =
+ new_plane_state->linked_plane;
+
+ /*
+ * We update the slave plane from this function because
+ * programming it from the master plane's update_plane
+ * callback runs into issues when the Y plane is
+ * reassigned, disabled or used by a different plane.
+ *
+ * The slave plane is updated with the master plane's
+ * plane_state.
+ */
+ new_plane_state =
+ intel_atomic_get_new_plane_state(state, master);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_slave(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
+ }
+}
+
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ if (new_plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, new_crtc_state, new_plane_state);
+ } else {
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, new_crtc_state);
+ }
}
}
@@ -194,7 +295,6 @@ const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
.atomic_check = intel_plane_atomic_check,
- .atomic_update = intel_plane_atomic_update,
};
/**
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ee3ca2de983b..ae55a6865d5c 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -153,32 +153,32 @@ static const struct {
int n;
int cts;
} hdmi_aud_ncts[] = {
- { 44100, TMDS_296M, 4459, 234375 },
- { 44100, TMDS_297M, 4704, 247500 },
- { 48000, TMDS_296M, 5824, 281250 },
- { 48000, TMDS_297M, 5120, 247500 },
{ 32000, TMDS_296M, 5824, 421875 },
{ 32000, TMDS_297M, 3072, 222750 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 44100, TMDS_296M, 4459, 234375 },
+ { 44100, TMDS_297M, 4704, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
{ 88200, TMDS_296M, 8918, 234375 },
{ 88200, TMDS_297M, 9408, 247500 },
- { 96000, TMDS_296M, 11648, 281250 },
- { 96000, TMDS_297M, 10240, 247500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
{ 176400, TMDS_296M, 17836, 234375 },
{ 176400, TMDS_297M, 18816, 247500 },
- { 192000, TMDS_296M, 23296, 281250 },
- { 192000, TMDS_297M, 20480, 247500 },
- { 44100, TMDS_593M, 8918, 937500 },
- { 44100, TMDS_594M, 9408, 990000 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 48000, TMDS_296M, 5824, 281250 },
+ { 48000, TMDS_297M, 5120, 247500 },
{ 48000, TMDS_593M, 5824, 562500 },
{ 48000, TMDS_594M, 6144, 594000 },
- { 32000, TMDS_593M, 5824, 843750 },
- { 32000, TMDS_594M, 3072, 445500 },
- { 88200, TMDS_593M, 17836, 937500 },
- { 88200, TMDS_594M, 18816, 990000 },
+ { 96000, TMDS_296M, 11648, 281250 },
+ { 96000, TMDS_297M, 10240, 247500 },
{ 96000, TMDS_593M, 11648, 562500 },
{ 96000, TMDS_594M, 12288, 594000 },
- { 176400, TMDS_593M, 35672, 937500 },
- { 176400, TMDS_594M, 37632, 990000 },
+ { 192000, TMDS_296M, 23296, 281250 },
+ { 192000, TMDS_297M, 20480, 247500 },
{ 192000, TMDS_593M, 23296, 562500 },
{ 192000, TMDS_594M, 24576, 594000 },
};
@@ -929,6 +929,9 @@ static int i915_audio_component_bind(struct device *i915_kdev,
if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
+ if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+ return -ENOMEM;
+
drm_modeset_lock_all(&dev_priv->drm);
acomp->base.ops = &i915_audio_component_ops;
acomp->base.dev = i915_kdev;
@@ -952,6 +955,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
+
+ device_link_remove(hda_kdev, i915_kdev);
}
static const struct component_ops i915_audio_component_bind_ops = {
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1faa494e2bc9..6d3e0260d49c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -420,6 +420,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
intel_bios_ssc_frequency(dev_priv, general->ssc_freq);
dev_priv->vbt.display_clock_mode = general->display_clock_mode;
dev_priv->vbt.fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
+ if (bdb->version >= 181) {
+ dev_priv->vbt.orientation = general->rotate_180 ?
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP :
+ DRM_MODE_PANEL_ORIENTATION_NORMAL;
+ } else {
+ dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ }
DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
dev_priv->vbt.int_tv_support,
dev_priv->vbt.int_crt_support,
@@ -852,6 +859,30 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
parse_dsi_backlight_ports(dev_priv, bdb->version, port);
+ /* FIXME is the 90 vs. 270 correct? */
+ switch (config->rotation) {
+ case ENABLE_ROTATION_0:
+ /*
+ * Most (all?) VBTs claim 0 degrees despite having
+ * an upside down panel, thus we do not trust this.
+ */
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ break;
+ case ENABLE_ROTATION_90:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_RIGHT_UP;
+ break;
+ case ENABLE_ROTATION_180:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ break;
+ case ENABLE_ROTATION_270:
+ dev_priv->vbt.dsi.orientation =
+ DRM_MODE_PANEL_ORIENTATION_LEFT_UP;
+ break;
+ }
+
/* We have mandatory mipi config blocks. Initialize as generic panel */
dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
}
@@ -1721,7 +1752,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (INTEL_INFO(dev_priv)->num_pipes == 0) {
+ if (!HAS_DISPLAY(dev_priv)) {
DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
@@ -2039,17 +2070,17 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
- switch (dvo_port) {
- case DVO_PORT_MIPIA:
- case DVO_PORT_MIPIC:
+ if (dvo_port == DVO_PORT_MIPIA ||
+ (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
+ (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
- case DVO_PORT_MIPIB:
- case DVO_PORT_MIPID:
+ } else if (dvo_port == DVO_PORT_MIPIB ||
+ dvo_port == DVO_PORT_MIPIC ||
+ dvo_port == DVO_PORT_MIPID) {
DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
- break;
}
}
@@ -2159,3 +2190,49 @@ intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
return false;
}
+
+enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
+ enum aux_ch aux_ch;
+
+ if (!info->alternate_aux_channel) {
+ aux_ch = (enum aux_ch)port;
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
+ return aux_ch;
+ }
+
+ switch (info->alternate_aux_channel) {
+ case DP_AUX_A:
+ aux_ch = AUX_CH_A;
+ break;
+ case DP_AUX_B:
+ aux_ch = AUX_CH_B;
+ break;
+ case DP_AUX_C:
+ aux_ch = AUX_CH_C;
+ break;
+ case DP_AUX_D:
+ aux_ch = AUX_CH_D;
+ break;
+ case DP_AUX_E:
+ aux_ch = AUX_CH_E;
+ break;
+ case DP_AUX_F:
+ aux_ch = AUX_CH_F;
+ break;
+ default:
+ MISSING_CASE(info->alternate_aux_channel);
+ aux_ch = AUX_CH_A;
+ break;
+ }
+
+ DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
+
+ return aux_ch;
+}
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 84bf8d827136..447c5256f63a 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,11 +27,7 @@
#include "i915_drv.h"
-#ifdef CONFIG_SMP
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
-#else
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
-#endif
+#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
{
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 8d74276029e6..25e3aba9cded 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -2660,37 +2660,18 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
fraction = 200;
}
- rawclk = CNP_RAWCLK_DIV((divider / 1000) - 1);
- if (fraction)
- rawclk |= CNP_RAWCLK_FRAC(DIV_ROUND_CLOSEST(1000,
- fraction) - 1);
+ rawclk = CNP_RAWCLK_DIV(divider / 1000);
+ if (fraction) {
+ int numerator = 1;
- I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
- return divider + fraction;
-}
-
-static int icp_rawclk(struct drm_i915_private *dev_priv)
-{
- u32 rawclk;
- int divider, numerator, denominator, frequency;
-
- if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
- frequency = 24000;
- divider = 23;
- numerator = 0;
- denominator = 0;
- } else {
- frequency = 19200;
- divider = 18;
- numerator = 1;
- denominator = 4;
+ rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
+ fraction) - 1);
+ if (HAS_PCH_ICP(dev_priv))
+ rawclk |= ICP_RAWCLK_NUM(numerator);
}
- rawclk = CNP_RAWCLK_DIV(divider) | ICP_RAWCLK_NUM(numerator) |
- ICP_RAWCLK_DEN(denominator);
-
I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
- return frequency;
+ return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
@@ -2740,9 +2721,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_ICP(dev_priv))
- dev_priv->rawclk_freq = icp_rawclk(dev_priv);
- else if (HAS_PCH_CNP(dev_priv))
+ if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index c6a7beabd58d..5127da286a2b 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -149,7 +149,8 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
limited_color_range = intel_crtc_state->limited_color_range;
- if (intel_crtc_state->ycbcr420) {
+ if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
new file mode 100644
index 000000000000..3d0271cebf99
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "intel_drv.h"
+
+#define for_each_combo_port(__dev_priv, __port) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+#define for_each_combo_port_reverse(__dev_priv, __port) \
+ for ((__port) = I915_MAX_PORTS; (__port)-- > PORT_A;) \
+ for_each_if(intel_port_is_combophy(__dev_priv, __port))
+
+enum {
+ PROCMON_0_85V_DOT_0,
+ PROCMON_0_95V_DOT_0,
+ PROCMON_0_95V_DOT_1,
+ PROCMON_1_05V_DOT_0,
+ PROCMON_1_05V_DOT_1,
+};
+
+static const struct cnl_procmon {
+ u32 dw1, dw9, dw10;
+} cnl_procmon_values[] = {
+ [PROCMON_0_85V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
+ [PROCMON_0_95V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
+ [PROCMON_0_95V_DOT_1] =
+ { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
+ [PROCMON_1_05V_DOT_0] =
+ { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
+ [PROCMON_1_05V_DOT_1] =
+ { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
+};
+
+/*
+ * CNL has just one set of registers, while ICL has two sets: one for port A and
+ * the other for port B. The CNL registers are equivalent to the ICL port A
+ * registers, that's why we call the ICL macros even though the function has CNL
+ * on its name.
+ */
+static const struct cnl_procmon *
+cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum port port)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ val = I915_READ(ICL_PORT_COMP_DW3(port));
+ switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
+ default:
+ MISSING_CASE(val);
+ /* fall through */
+ case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+ break;
+ case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+ break;
+ case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
+ procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+ break;
+ }
+
+ return procmon;
+}
+
+static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct cnl_procmon *procmon;
+ u32 val;
+
+ procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW1(port));
+ val &= ~((0xff << 16) | 0xff);
+ val |= procmon->dw1;
+ I915_WRITE(ICL_PORT_COMP_DW1(port), val);
+
+ I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
+ I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
+}
+
+static bool check_phy_reg(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t reg, u32 mask,
+ u32 expected_val)
+{
+ u32 val = I915_READ(reg);
+
+ if ((val & mask) != expected_val) {
+ DRM_DEBUG_DRIVER("Port %c combo PHY reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ port_name(port),
+ reg.reg, val, mask, expected_val);
+ return false;
+ }
+
+ return true;
+}
+
+static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ const struct cnl_procmon *procmon;
+ bool ret;
+
+ procmon = cnl_get_procmon_ref_values(dev_priv, port);
+
+ ret = check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW1(port),
+ (0xff << 16) | 0xff, procmon->dw1);
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW9(port),
+ -1U, procmon->dw9);
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_COMP_DW10(port),
+ -1U, procmon->dw10);
+
+ return ret;
+}
+
+static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
+{
+ return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+ (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+}
+
+static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
+{
+ enum port port = PORT_A;
+ bool ret;
+
+ if (!cnl_combo_phy_enabled(dev_priv))
+ return false;
+
+ ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+ ret &= check_phy_reg(dev_priv, port, CNL_PORT_CL1CM_DW5,
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(CHICKEN_MISC_2);
+ val &= ~CNL_COMP_PWR_DOWN;
+ I915_WRITE(CHICKEN_MISC_2, val);
+
+ /* Dummy PORT_A to get the correct CNL register from the ICL macro */
+ cnl_set_procmon_ref_values(dev_priv, PORT_A);
+
+ val = I915_READ(CNL_PORT_COMP_DW0);
+ val |= COMP_INIT;
+ I915_WRITE(CNL_PORT_COMP_DW0, val);
+
+ val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+}
+
+void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ if (!cnl_combo_phy_verify_state(dev_priv))
+ DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+
+ val = I915_READ(CHICKEN_MISC_2);
+ val |= CNL_COMP_PWR_DOWN;
+ I915_WRITE(CHICKEN_MISC_2, val);
+}
+
+static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ return !(I915_READ(ICL_PHY_MISC(port)) &
+ ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
+ (I915_READ(ICL_PORT_COMP_DW0(port)) & COMP_INIT);
+}
+
+static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ bool ret;
+
+ if (!icl_combo_phy_enabled(dev_priv, port))
+ return false;
+
+ ret = cnl_verify_procmon_ref_values(dev_priv, port);
+
+ ret &= check_phy_reg(dev_priv, port, ICL_PORT_CL_DW5(port),
+ CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
+
+ return ret;
+}
+
+void icl_combo_phys_init(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for_each_combo_port(dev_priv, port) {
+ u32 val;
+
+ if (icl_combo_phy_verify_state(dev_priv, port)) {
+ DRM_DEBUG_DRIVER("Port %c combo PHY already enabled, won't reprogram it.\n",
+ port_name(port));
+ continue;
+ }
+
+ val = I915_READ(ICL_PHY_MISC(port));
+ val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ cnl_set_procmon_ref_values(dev_priv, port);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val |= COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+
+ val = I915_READ(ICL_PORT_CL_DW5(port));
+ val |= CL_POWER_DOWN_ENABLE;
+ I915_WRITE(ICL_PORT_CL_DW5(port), val);
+ }
+}
+
+void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for_each_combo_port_reverse(dev_priv, port) {
+ u32 val;
+
+ if (!icl_combo_phy_verify_state(dev_priv, port))
+ DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
+ port_name(port));
+
+ val = I915_READ(ICL_PHY_MISC(port));
+ val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
+ I915_WRITE(ICL_PHY_MISC(port), val);
+
+ val = I915_READ(ICL_PORT_COMP_DW0(port));
+ val &= ~COMP_INIT;
+ I915_WRITE(ICL_PORT_COMP_DW0(port), val);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_connector.c
index ca44bf368e24..18e370f607bc 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -25,11 +25,140 @@
#include <linux/slab.h>
#include <linux/i2c.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
+int intel_connector_init(struct intel_connector *connector)
+{
+ struct intel_digital_connector_state *conn_state;
+
+ /*
+ * Allocate enough memory to hold intel_digital_connector_state,
+ * This might be a few bytes too many, but for connectors that don't
+ * need it we'll free the state and allocate a smaller one on the first
+ * successful commit anyway.
+ */
+ conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
+ if (!conn_state)
+ return -ENOMEM;
+
+ __drm_atomic_helper_connector_reset(&connector->base,
+ &conn_state->base);
+
+ return 0;
+}
+
+struct intel_connector *intel_connector_alloc(void)
+{
+ struct intel_connector *connector;
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return NULL;
+
+ if (intel_connector_init(connector) < 0) {
+ kfree(connector);
+ return NULL;
+ }
+
+ return connector;
+}
+
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free (see intel_connector_destroy).
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(to_intel_digital_connector_state(connector->base.state));
+ kfree(connector);
+}
+
+/*
+ * Connector type independent destroy hook for drm_connector_funcs.
+ */
+void intel_connector_destroy(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ kfree(intel_connector->detect_edid);
+
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ kfree(intel_connector->edid);
+
+ intel_panel_fini(&intel_connector->panel);
+
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+int intel_connector_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = intel_backlight_device_register(intel_connector);
+ if (ret)
+ goto err;
+
+ if (i915_inject_load_failure()) {
+ ret = -EFAULT;
+ goto err_backlight;
+ }
+
+ return 0;
+
+err_backlight:
+ intel_backlight_device_unregister(intel_connector);
+err:
+ return ret;
+}
+
+void intel_connector_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_backlight_device_unregister(intel_connector);
+}
+
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_connector_attach_encoder(&connector->base, &encoder->base);
+}
+
+/*
+ * Simple connector->get_hw_state implementation for encoders that support only
+ * one connector and no cloning and hence the encoder state determines the state
+ * of the connector.
+ */
+bool intel_connector_get_hw_state(struct intel_connector *connector)
+{
+ enum pipe pipe = 0;
+ struct intel_encoder *encoder = connector->encoder;
+
+ return encoder->get_hw_state(encoder, &pipe);
+}
+
+enum pipe intel_connector_get_pipe(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (!connector->base.state->crtc)
+ return INVALID_PIPE;
+
+ return to_intel_crtc(connector->base.state->crtc)->pipe;
+}
+
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 0c6bf82bb059..68f2fb89ece3 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -354,6 +354,7 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -368,6 +369,7 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -389,6 +391,7 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
return false;
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
@@ -849,12 +852,6 @@ out:
return status;
}
-static void intel_crt_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static int intel_crt_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -909,7 +906,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_crt_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index d48186e9ddad..a516697bf57d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,34 +34,38 @@
* low-power state and comes back to normal.
*/
-#define I915_CSR_ICL "i915/icl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_ICL);
-#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin"
+#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define ICL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(ICL_CSR_PATH);
-#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_CNL);
+#define CNL_CSR_PATH "i915/cnl_dmc_ver1_07.bin"
#define CNL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
+#define CNL_CSR_MAX_FW_SIZE GLK_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(CNL_CSR_PATH);
+
+#define GLK_CSR_PATH "i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define GLK_CSR_MAX_FW_SIZE 0x4000
+MODULE_FIRMWARE(GLK_CSR_PATH);
-#define I915_CSR_KBL "i915/kbl_dmc_ver1_04.bin"
-MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_PATH "i915/kbl_dmc_ver1_04.bin"
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
+#define KBL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(KBL_CSR_PATH);
-#define I915_CSR_SKL "i915/skl_dmc_ver1_27.bin"
-MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_PATH "i915/skl_dmc_ver1_27.bin"
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 27)
+#define SKL_CSR_MAX_FW_SIZE BXT_CSR_MAX_FW_SIZE
+MODULE_FIRMWARE(SKL_CSR_PATH);
-#define I915_CSR_BXT "i915/bxt_dmc_ver1_07.bin"
-MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_PATH "i915/bxt_dmc_ver1_07.bin"
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-
-
#define BXT_CSR_MAX_FW_SIZE 0x3000
-#define GLK_CSR_MAX_FW_SIZE 0x4000
-#define ICL_CSR_MAX_FW_SIZE 0x6000
+MODULE_FIRMWARE(BXT_CSR_PATH);
+
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
struct intel_css_header {
@@ -190,6 +194,12 @@ static const struct stepping_info bxt_stepping_info[] = {
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
+static const struct stepping_info icl_stepping_info[] = {
+ {'A', '0'}, {'A', '1'}, {'A', '2'},
+ {'B', '0'}, {'B', '2'},
+ {'C', '0'}
+};
+
static const struct stepping_info no_stepping_info = { '*', '*' };
static const struct stepping_info *
@@ -198,7 +208,10 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
const struct stepping_info *si;
unsigned int size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_ICELAKE(dev_priv)) {
+ size = ARRAY_SIZE(icl_stepping_info);
+ si = icl_stepping_info;
+ } else if (IS_SKYLAKE(dev_priv)) {
size = ARRAY_SIZE(skl_stepping_info);
si = skl_stepping_info;
} else if (IS_BROXTON(dev_priv)) {
@@ -285,10 +298,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- uint32_t max_fw_size = 0;
uint32_t i;
uint32_t *dmc_payload;
- uint32_t required_version;
if (!fw)
return NULL;
@@ -303,38 +314,19 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return NULL;
}
- csr->version = css_header->version;
-
- if (csr->fw_path == i915_modparams.dmc_firmware_path) {
- /* Bypass version check for firmware override. */
- required_version = csr->version;
- } else if (IS_ICELAKE(dev_priv)) {
- required_version = ICL_CSR_VERSION_REQUIRED;
- } else if (IS_CANNONLAKE(dev_priv)) {
- required_version = CNL_CSR_VERSION_REQUIRED;
- } else if (IS_GEMINILAKE(dev_priv)) {
- required_version = GLK_CSR_VERSION_REQUIRED;
- } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
- required_version = KBL_CSR_VERSION_REQUIRED;
- } else if (IS_SKYLAKE(dev_priv)) {
- required_version = SKL_CSR_VERSION_REQUIRED;
- } else if (IS_BROXTON(dev_priv)) {
- required_version = BXT_CSR_VERSION_REQUIRED;
- } else {
- MISSING_CASE(INTEL_REVID(dev_priv));
- required_version = 0;
- }
-
- if (csr->version != required_version) {
+ if (csr->required_version &&
+ css_header->version != csr->required_version) {
DRM_INFO("Refusing to load DMC firmware v%u.%u,"
" please use v%u.%u\n",
- CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version),
- CSR_VERSION_MAJOR(required_version),
- CSR_VERSION_MINOR(required_version));
+ CSR_VERSION_MAJOR(css_header->version),
+ CSR_VERSION_MINOR(css_header->version),
+ CSR_VERSION_MAJOR(csr->required_version),
+ CSR_VERSION_MINOR(csr->required_version));
return NULL;
}
+ csr->version = css_header->version;
+
readcount += sizeof(struct intel_css_header);
/* Extract Package Header information*/
@@ -402,15 +394,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
- if (INTEL_GEN(dev_priv) >= 11)
- max_fw_size = ICL_CSR_MAX_FW_SIZE;
- else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
- max_fw_size = GLK_CSR_MAX_FW_SIZE;
- else if (IS_GEN9(dev_priv))
- max_fw_size = BXT_CSR_MAX_FW_SIZE;
- else
- MISSING_CASE(INTEL_REVID(dev_priv));
- if (nbytes > max_fw_size) {
+ if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
}
@@ -475,27 +459,57 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv))
return;
- if (i915_modparams.dmc_firmware_path)
- csr->fw_path = i915_modparams.dmc_firmware_path;
- else if (IS_ICELAKE(dev_priv))
- csr->fw_path = I915_CSR_ICL;
- else if (IS_CANNONLAKE(dev_priv))
- csr->fw_path = I915_CSR_CNL;
- else if (IS_GEMINILAKE(dev_priv))
- csr->fw_path = I915_CSR_GLK;
- else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
- csr->fw_path = I915_CSR_KBL;
- else if (IS_SKYLAKE(dev_priv))
- csr->fw_path = I915_CSR_SKL;
- else if (IS_BROXTON(dev_priv))
- csr->fw_path = I915_CSR_BXT;
-
/*
- * Obtain a runtime pm reference, until CSR is loaded,
- * to avoid entering runtime-suspend.
+ * Obtain a runtime pm reference, until CSR is loaded, to avoid entering
+ * runtime-suspend.
+ *
+ * On error, we return with the rpm wakeref held to prevent runtime
+ * suspend as runtime suspend *requires* a working CSR for whatever
+ * reason.
*/
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ /* Allow to load fw via parameter using the last known size */
+ csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
+ } else if (IS_ICELAKE(dev_priv)) {
+ csr->fw_path = ICL_CSR_PATH;
+ csr->required_version = ICL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ csr->fw_path = CNL_CSR_PATH;
+ csr->required_version = CNL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = CNL_CSR_MAX_FW_SIZE;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ csr->fw_path = GLK_CSR_PATH;
+ csr->required_version = GLK_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = GLK_CSR_MAX_FW_SIZE;
+ } else if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) {
+ csr->fw_path = KBL_CSR_PATH;
+ csr->required_version = KBL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = KBL_CSR_MAX_FW_SIZE;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ csr->fw_path = SKL_CSR_PATH;
+ csr->required_version = SKL_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = SKL_CSR_MAX_FW_SIZE;
+ } else if (IS_BROXTON(dev_priv)) {
+ csr->fw_path = BXT_CSR_PATH;
+ csr->required_version = BXT_CSR_VERSION_REQUIRED;
+ csr->max_fw_size = BXT_CSR_MAX_FW_SIZE;
+ }
+
+ if (i915_modparams.dmc_firmware_path) {
+ if (strlen(i915_modparams.dmc_firmware_path) == 0) {
+ csr->fw_path = NULL;
+ DRM_INFO("Disabling CSR firmware and runtime PM\n");
+ return;
+ }
+
+ csr->fw_path = i915_modparams.dmc_firmware_path;
+ /* Bypass version check for firmware override. */
+ csr->required_version = 0;
+ }
+
if (csr->fw_path == NULL) {
DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
WARN_ON(!IS_ALPHA_SUPPORT(INTEL_INFO(dev_priv)));
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 5186cd7075f9..f3e1d6a0b7dd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -28,6 +28,7 @@
#include <drm/drm_scdc_helper.h>
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_dsi.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -642,7 +643,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv) || IS_CFL_ULT(dev_priv)) {
@@ -658,7 +659,7 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv) ||
@@ -680,7 +681,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -1060,10 +1061,10 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
- const struct intel_shared_dpll *pll)
+ const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
- int clock = crtc->config->port_clock;
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
+ int clock = crtc_state->port_clock;
const enum intel_dpll_id id = pll->info->id;
switch (id) {
@@ -1363,8 +1364,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
return dco_freq / (p0 * p1 * p2 * 5);
}
-static int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id)
{
uint32_t cfgcr0, cfgcr1;
uint32_t p0, p1, p2, dco_freq, ref_clock;
@@ -1517,7 +1518,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
else
dotclock = pipe_config->port_clock;
- if (pipe_config->ycbcr420)
+ if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
dotclock *= 2;
if (pipe_config->pixel_multiplier)
@@ -1737,16 +1738,16 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
+ if (IS_ICELAKE(dev_priv))
+ icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_ICELAKE(dev_priv))
- icl_ddi_clock_get(encoder, pipe_config);
+ else if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_clock_get(encoder, pipe_config);
+ else if (IS_GEN9_BC(dev_priv))
+ skl_ddi_clock_get(encoder, pipe_config);
+ else if (INTEL_GEN(dev_priv) <= 8)
+ hsw_ddi_clock_get(encoder, pipe_config);
}
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
@@ -1784,6 +1785,13 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
break;
}
+ /*
+ * As per DP 1.2 spec section 2.3.4.3 while sending
+ * YCBCR 444 signals we should program MSA MISC1/0 fields with
+ * colorspace information. The output colorspace encoding is BT601.
+ */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR;
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
@@ -1998,24 +2006,24 @@ out:
return ret;
}
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
- enum pipe *pipe)
+static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
+ u8 *pipe_mask, bool *is_dp_mst)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
enum pipe p;
u32 tmp;
- bool ret;
+ u8 mst_pipe_mask;
+
+ *pipe_mask = 0;
+ *is_dp_mst = false;
if (!intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain))
- return false;
-
- ret = false;
+ return;
tmp = I915_READ(DDI_BUF_CTL(port));
-
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
@@ -2023,44 +2031,58 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
+ default:
+ MISSING_CASE(tmp & TRANS_DDI_EDP_INPUT_MASK);
+ /* fallthrough */
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
- *pipe = PIPE_A;
+ *pipe_mask = BIT(PIPE_A);
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- *pipe = PIPE_B;
+ *pipe_mask = BIT(PIPE_B);
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- *pipe = PIPE_C;
+ *pipe_mask = BIT(PIPE_C);
break;
}
- ret = true;
-
goto out;
}
+ mst_pipe_mask = 0;
for_each_pipe(dev_priv, p) {
- enum transcoder cpu_transcoder = (enum transcoder) p;
+ enum transcoder cpu_transcoder = (enum transcoder)p;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
- TRANS_DDI_MODE_SELECT_DP_MST)
- goto out;
+ if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
+ continue;
- *pipe = p;
- ret = true;
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ mst_pipe_mask |= BIT(p);
- goto out;
- }
+ *pipe_mask |= BIT(p);
+ }
+
+ if (!*pipe_mask)
+ DRM_DEBUG_KMS("No pipe for ddi port %c found\n",
+ port_name(port));
+
+ if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) {
+ DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n",
+ port_name(port), *pipe_mask);
+ *pipe_mask = BIT(ffs(*pipe_mask) - 1);
}
- DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
+ if (mst_pipe_mask && mst_pipe_mask != *pipe_mask)
+ DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n",
+ port_name(port), *pipe_mask, mst_pipe_mask);
+ else
+ *is_dp_mst = mst_pipe_mask;
out:
- if (ret && IS_GEN9_LP(dev_priv)) {
+ if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
@@ -2070,12 +2092,26 @@ out:
}
intel_display_power_put(dev_priv, encoder->power_domain);
+}
- return ret;
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
+ enum pipe *pipe)
+{
+ u8 pipe_mask;
+ bool is_mst;
+
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+
+ if (is_mst || !pipe_mask)
+ return false;
+
+ *pipe = ffs(pipe_mask) - 1;
+
+ return true;
}
static inline enum intel_display_power_domain
-intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
+intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
{
/* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
* DC states enabled at the same time, while for driver initiated AUX
@@ -2089,13 +2125,14 @@ intel_ddi_main_link_aux_domain(struct intel_dp *intel_dp)
* Note that PSR is enabled only on Port A even though this function
* returns the correct domain for other ports too.
*/
- return intel_dp->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
- intel_dp->aux_power_domain;
+ return dig_port->aux_ch == AUX_CH_A ? POWER_DOMAIN_AUX_IO_A :
+ intel_aux_power_domain(dig_port);
}
static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port;
u64 domains;
@@ -2110,12 +2147,19 @@ static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(&encoder->base);
domains = BIT_ULL(dig_port->ddi_io_power_domain);
- /* AUX power is only needed for (e)DP mode, not for HDMI. */
- if (intel_crtc_has_dp_encoder(crtc_state)) {
- struct intel_dp *intel_dp = &dig_port->dp;
+ /*
+ * AUX power is only needed for (e)DP mode, and for HDMI mode on TC
+ * ports.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ domains |= BIT_ULL(intel_ddi_main_link_aux_domain(dig_port));
- domains |= BIT_ULL(intel_ddi_main_link_aux_domain(intel_dp));
- }
+ /*
+ * VDSC power is needed when DSC is enabled
+ */
+ if (crtc_state->dsc_params.compression_enable)
+ domains |= BIT_ULL(intel_dsc_power_domain(crtc_state));
return domains;
}
@@ -2748,77 +2792,130 @@ uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
return 0;
}
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+static void icl_map_plls_to_ports(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *conn_state;
- struct drm_connector *conn;
- int i;
+ enum port port = encoder->port;
+ u32 val;
- for_each_new_connector_in_state(old_state, conn, conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- enum port port;
- uint32_t val;
+ mutex_lock(&dev_priv->dpll_lock);
- if (conn_state->crtc != crtc)
- continue;
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
+ if (intel_port_is_combophy(dev_priv, port)) {
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+ POSTING_READ(DPCLKA_CFGCR0_ICL);
+ }
- val = I915_READ(DPCLKA_CFGCR0_ICL);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
+ val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- if (intel_port_is_combophy(dev_priv, port)) {
- val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
- val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
- POSTING_READ(DPCLKA_CFGCR0_ICL);
- }
+ mutex_unlock(&dev_priv->dpll_lock);
+}
- val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
- I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
- mutex_unlock(&dev_priv->dpll_lock);
- }
+ mutex_lock(&dev_priv->dpll_lock);
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
+
+ mutex_unlock(&dev_priv->dpll_lock);
}
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state)
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val;
+ enum port port;
+ u32 port_mask;
+ bool ddi_clk_needed;
+
+ /*
+ * In case of DP MST, we sanitize the primary encoder only, not the
+ * virtual ones.
+ */
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ return;
- for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
- enum port port;
+ if (!encoder->base.crtc && intel_encoder_is_dp(encoder)) {
+ u8 pipe_mask;
+ bool is_mst;
- if (old_conn_state->crtc != crtc)
+ intel_ddi_get_encoder_pipes(encoder, &pipe_mask, &is_mst);
+ /*
+ * In the unlikely case that BIOS enables DP in MST mode, just
+ * warn since our MST HW readout is incomplete.
+ */
+ if (WARN_ON(is_mst))
+ return;
+ }
+
+ port_mask = BIT(encoder->port);
+ ddi_clk_needed = encoder->base.crtc;
+
+ if (encoder->type == INTEL_OUTPUT_DSI) {
+ struct intel_encoder *other_encoder;
+
+ port_mask = intel_dsi_encoder_ports(encoder);
+ /*
+ * Sanity check that we haven't incorrectly registered another
+ * encoder using any of the ports of this DSI encoder.
+ */
+ for_each_intel_encoder(&dev_priv->drm, other_encoder) {
+ if (other_encoder == encoder)
+ continue;
+
+ if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ return;
+ }
+ /*
+ * DSI ports should have their DDI clock ungated when disabled
+ * and gated when enabled.
+ */
+ ddi_clk_needed = !encoder->base.crtc;
+ }
+
+ val = I915_READ(DPCLKA_CFGCR0_ICL);
+ for_each_port_masked(port, port_mask) {
+ bool ddi_clk_ungated = !(val &
+ icl_dpclka_cfgcr0_clk_off(dev_priv,
+ port));
+
+ if (ddi_clk_needed == ddi_clk_ungated)
continue;
- port = encoder->port;
- mutex_lock(&dev_priv->dpll_lock);
- I915_WRITE(DPCLKA_CFGCR0_ICL,
- I915_READ(DPCLKA_CFGCR0_ICL) |
- icl_dpclka_cfgcr0_clk_off(dev_priv, port));
- mutex_unlock(&dev_priv->dpll_lock);
+ /*
+ * Punt on the case now where clock is gated, but it would
+ * be needed by the port. Something else is really broken then.
+ */
+ if (WARN_ON(ddi_clk_needed))
+ continue;
+
+ DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
+ port_name(port));
+ val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
+ I915_WRITE(DPCLKA_CFGCR0_ICL, val);
}
}
static void intel_ddi_clk_select(struct intel_encoder *encoder,
- const struct intel_shared_dpll *pll)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
uint32_t val;
+ const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
return;
@@ -2828,7 +2925,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
if (IS_ICELAKE(dev_priv)) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_pll_sel(encoder, pll));
+ icl_pll_to_ddi_pll_sel(encoder, crtc_state));
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
val = I915_READ(DPCLKA_CFGCR0);
@@ -2881,6 +2978,184 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
}
}
+static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val |= MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING;
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING;
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
+ u32 val;
+ int i;
+
+ if (tc_port == PORT_TC_NONE)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
+ val = I915_READ(mg_regs[i]);
+ val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
+ MG_DP_MODE_CFG_TRPWR_GATING |
+ MG_DP_MODE_CFG_CLNPWR_GATING |
+ MG_DP_MODE_CFG_DIGPWR_GATING |
+ MG_DP_MODE_CFG_GAONPWR_GATING);
+ I915_WRITE(mg_regs[i], val);
+ }
+
+ val = I915_READ(MG_MISC_SUS0(tc_port));
+ val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
+ MG_MISC_SUS0_CFG_TR2PWR_GATING |
+ MG_MISC_SUS0_CFG_CL2PWR_GATING |
+ MG_MISC_SUS0_CFG_GAONPWR_GATING |
+ MG_MISC_SUS0_CFG_TRPWR_GATING |
+ MG_MISC_SUS0_CFG_CL1PWR_GATING |
+ MG_MISC_SUS0_CFG_DGPWR_GATING);
+ I915_WRITE(MG_MISC_SUS0(tc_port), val);
+}
+
+static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum port port = intel_dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 ln0, ln1, lane_info;
+
+ if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ ln0 = I915_READ(MG_DP_MODE(port, 0));
+ ln1 = I915_READ(MG_DP_MODE(port, 1));
+
+ switch (intel_dig_port->tc_type) {
+ case TC_PORT_TYPEC:
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+ ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
+
+ lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
+ DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
+ DP_LANE_ASSIGNMENT_SHIFT(tc_port);
+
+ switch (lane_info) {
+ case 0x1:
+ case 0x4:
+ break;
+ case 0x2:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0x3:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0x8:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
+ break;
+ case 0xC:
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ case 0xF:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
+ MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+ default:
+ MISSING_CASE(lane_info);
+ }
+ break;
+
+ case TC_PORT_LEGACY:
+ ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
+ break;
+
+ default:
+ MISSING_CASE(intel_dig_port->tc_type);
+ return;
+ }
+
+ I915_WRITE(MG_DP_MODE(port, 0), ln0);
+ I915_WRITE(MG_DP_MODE(port, 1), ln1);
+}
+
+static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
+ DRM_DEBUG_KMS("Failed to set FEC_READY in the sink\n");
+}
+
+static void intel_ddi_enable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val |= DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+
+ if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ DP_TP_STATUS_FEC_ENABLE_LIVE,
+ 1))
+ DRM_ERROR("Timed out waiting for FEC Enable Status\n");
+}
+
+static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
+ u32 val;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ val = I915_READ(DP_TP_CTL(port));
+ val &= ~DP_TP_CTL_FEC_ENABLE;
+ I915_WRITE(DP_TP_CTL(port), val);
+ POSTING_READ(DP_TP_CTL(port));
+}
+
static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -2894,19 +3169,16 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
- intel_display_power_get(dev_priv,
- intel_ddi_main_link_aux_domain(intel_dp));
-
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
intel_edp_panel_on(intel_dp);
- intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+ intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
- icl_program_mg_dp_mode(intel_dp);
+ icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
if (IS_ICELAKE(dev_priv))
@@ -2922,14 +3194,21 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_init_dp_buf_reg(encoder);
if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
+ true);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
+ intel_ddi_enable_fec(encoder, crtc_state);
+
icl_enable_phy_clock_gating(dig_port);
if (!is_mst)
intel_ddi_enable_pipe_clock(crtc_state);
+
+ intel_dsc_enable(encoder, crtc_state);
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2944,10 +3223,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
- intel_ddi_clk_select(encoder, crtc_state->shared_dpll);
+ intel_ddi_clk_select(encoder, crtc_state);
intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+ icl_program_mg_dp_mode(dig_port);
+ icl_disable_phy_clock_gating(dig_port);
+
if (IS_ICELAKE(dev_priv))
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
@@ -2958,12 +3240,14 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
else
intel_prepare_hdmi_ddi_buffers(encoder, level);
+ icl_enable_phy_clock_gating(dig_port);
+
if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI);
intel_ddi_enable_pipe_clock(crtc_state);
- intel_dig_port->set_infoframes(&encoder->base,
+ intel_dig_port->set_infoframes(encoder,
crtc_state->has_infoframe,
crtc_state, conn_state);
}
@@ -2991,15 +3275,31 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
WARN_ON(crtc_state->has_pch_encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_map_plls_to_ports(encoder, crtc_state);
+
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
intel_ddi_pre_enable_hdmi(encoder, crtc_state, conn_state);
- else
+ } else {
+ struct intel_lspcon *lspcon =
+ enc_to_intel_lspcon(&encoder->base);
+
intel_ddi_pre_enable_dp(encoder, crtc_state, conn_state);
+ if (lspcon->active) {
+ struct intel_digital_port *dig_port =
+ enc_to_dig_port(&encoder->base);
+
+ dig_port->set_infoframes(encoder,
+ crtc_state->has_infoframe,
+ crtc_state, conn_state);
+ }
+ }
}
-static void intel_disable_ddi_buf(struct intel_encoder *encoder)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -3018,6 +3318,9 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder)
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
+ /* Disable FEC in DP Sink */
+ intel_ddi_disable_fec_state(encoder, crtc_state);
+
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
@@ -3041,7 +3344,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
}
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
@@ -3049,9 +3352,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
-
- intel_display_power_put(dev_priv,
- intel_ddi_main_link_aux_domain(intel_dp));
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3062,12 +3362,12 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
- dig_port->set_infoframes(&encoder->base, false,
+ dig_port->set_infoframes(encoder, false,
old_crtc_state, old_conn_state);
intel_ddi_disable_pipe_clock(old_crtc_state);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
@@ -3080,6 +3380,8 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
/*
* When called from DP MST code:
* - old_conn_state will be NULL
@@ -3099,6 +3401,9 @@ static void intel_ddi_post_disable(struct intel_encoder *encoder,
else
intel_ddi_post_disable_dp(encoder,
old_crtc_state, old_conn_state);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_unmap_plls_to_ports(encoder);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
@@ -3118,7 +3423,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_disable_ddi_buf(encoder);
+ intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
val = I915_READ(FDI_RX_MISC(PIPE_A));
@@ -3154,6 +3459,26 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder,
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
+static i915_reg_t
+gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
+ enum port port)
+{
+ static const i915_reg_t regs[] = {
+ [PORT_A] = CHICKEN_TRANS_EDP,
+ [PORT_B] = CHICKEN_TRANS_A,
+ [PORT_C] = CHICKEN_TRANS_B,
+ [PORT_D] = CHICKEN_TRANS_C,
+ [PORT_E] = CHICKEN_TRANS_A,
+ };
+
+ WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+ if (WARN_ON(port < PORT_A || port > PORT_E))
+ port = PORT_A;
+
+ return regs[port];
+}
+
static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -3177,17 +3502,10 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
* the bits affect a specific DDI port rather than
* a specific transcoder.
*/
- static const enum transcoder port_to_transcoder[] = {
- [PORT_A] = TRANSCODER_EDP,
- [PORT_B] = TRANSCODER_A,
- [PORT_C] = TRANSCODER_B,
- [PORT_D] = TRANSCODER_C,
- [PORT_E] = TRANSCODER_A,
- };
- enum transcoder transcoder = port_to_transcoder[port];
+ i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
- val = I915_READ(CHICKEN_TRANS(transcoder));
+ val = I915_READ(reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3196,8 +3514,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- I915_WRITE(CHICKEN_TRANS(transcoder), val);
- POSTING_READ(CHICKEN_TRANS(transcoder));
+ I915_WRITE(reg, val);
+ POSTING_READ(reg);
udelay(1);
@@ -3208,7 +3526,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- I915_WRITE(CHICKEN_TRANS(transcoder), val);
+ I915_WRITE(reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
@@ -3252,6 +3570,9 @@ static void intel_disable_ddi_dp(struct intel_encoder *encoder,
intel_edp_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
+ /* Disable the decompression in DP Sink */
+ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
+ false);
}
static void intel_disable_ddi_hdmi(struct intel_encoder *encoder,
@@ -3282,13 +3603,76 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
-static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ enum port port)
{
- uint8_t mask = pipe_config->lane_lat_optim_mask;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
+ u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
+ bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+ val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
+ switch (pipe_config->lane_count) {
+ case 1:
+ val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML0(tc_port);
+ break;
+ case 2:
+ val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
+ DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
+ break;
+ case 4:
+ val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
+ break;
+ default:
+ MISSING_CASE(pipe_config->lane_count);
+ }
+ I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
+}
- bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
+static void
+intel_ddi_pre_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum port port = encoder->port;
+
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ intel_display_power_get(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
+
+ if (IS_GEN9_LP(dev_priv))
+ bxt_ddi_phy_set_lane_optim_mask(encoder,
+ crtc_state->lane_lat_optim_mask);
+
+ /*
+ * Program the lane count for static/dynamic connections on Type-C ports.
+ * Skip this step for TBT.
+ */
+ if (dig_port->tc_type == TC_PORT_UNKNOWN ||
+ dig_port->tc_type == TC_PORT_TBT)
+ return;
+
+ intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
+}
+
+static void
+intel_ddi_post_pll_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+ if (intel_crtc_has_dp_encoder(crtc_state) ||
+ intel_port_is_tc(dev_priv, encoder->port))
+ intel_display_power_put(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3353,10 +3737,10 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 2;
- else if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
+ else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
}
void intel_ddi_get_config(struct intel_encoder *encoder,
@@ -3406,7 +3790,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
@@ -3767,6 +4151,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
+ enum pipe pipe;
init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
@@ -3805,8 +4190,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->compute_output_type = intel_ddi_compute_output_type;
intel_encoder->compute_config = intel_ddi_compute_config;
intel_encoder->enable = intel_enable_ddi;
- if (IS_GEN9_LP(dev_priv))
- intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
+ intel_encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
+ intel_encoder->post_pll_disable = intel_ddi_post_pll_disable;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
@@ -3817,8 +4202,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
+ for_each_pipe(dev_priv, pipe)
+ intel_encoder->crtc_mask |= BIT(pipe);
if (INTEL_GEN(dev_priv) >= 11)
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
@@ -3828,6 +4214,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
switch (port) {
case PORT_A:
@@ -3858,8 +4245,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
MISSING_CASE(port);
}
- intel_infoframe_init(intel_dig_port);
-
if (init_dp) {
if (!intel_ddi_init_dp_connector(intel_dig_port))
goto err;
@@ -3888,6 +4273,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
port_name(port));
}
+ intel_infoframe_init(intel_dig_port);
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 01fa98299bae..1e56319334f3 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -77,6 +77,10 @@ void intel_device_info_dump_flags(const struct intel_device_info *info,
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+
+#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->display.name));
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(PRINT_FLAG);
+#undef PRINT_FLAG
}
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
@@ -744,27 +748,30 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
info->num_scalers[pipe] = 2;
- } else if (INTEL_GEN(dev_priv) == 9) {
+ } else if (IS_GEN9(dev_priv)) {
info->num_scalers[PIPE_A] = 2;
info->num_scalers[PIPE_B] = 2;
info->num_scalers[PIPE_C] = 1;
}
- BUILD_BUG_ON(I915_NUM_ENGINES >
- sizeof(intel_ring_mask_t) * BITS_PER_BYTE);
+ BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
- /*
- * Skylake and Broxton currently don't expose the topmost plane as its
- * use is exclusive with the legacy cursor and we only want to expose
- * one of those, not both. Until we can safely expose the topmost plane
- * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
- * we don't expose the topmost plane at all to prevent ABI breakage
- * down the line.
- */
- if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+ if (IS_GEN11(dev_priv))
+ for_each_pipe(dev_priv, pipe)
+ info->num_sprites[pipe] = 6;
+ else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
info->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
+ /*
+ * Skylake and Broxton currently don't expose the topmost plane as its
+ * use is exclusive with the legacy cursor and we only want to expose
+ * one of those, not both. Until we can safely expose the topmost plane
+ * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
+ * we don't expose the topmost plane at all to prevent ABI breakage
+ * down the line.
+ */
+
info->num_sprites[PIPE_A] = 2;
info->num_sprites[PIPE_B] = 2;
info->num_sprites[PIPE_C] = 1;
@@ -779,7 +786,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
- } else if (info->num_pipes > 0 &&
+ } else if (HAS_DISPLAY(dev_priv) &&
(IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
@@ -804,7 +811,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
@@ -844,13 +851,18 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) == 9)
+ else if (IS_GEN9(dev_priv))
gen9_sseu_info_init(dev_priv);
- else if (INTEL_GEN(dev_priv) == 10)
+ else if (IS_GEN10(dev_priv))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
+ if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+ DRM_INFO("Disabling ppGTT for VT-d support\n");
+ info->ppgtt = INTEL_PPGTT_NONE;
+ }
+
/* Initialize command stream timestamp frequency */
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
@@ -872,40 +884,37 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- u8 vdbox_disable, vebox_disable;
u32 media_fuse;
- int i;
+ unsigned int i;
if (INTEL_GEN(dev_priv) < 11)
return;
- media_fuse = I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
+ media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- vdbox_disable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- vebox_disable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox disable: %04x\n", vdbox_disable);
+ DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & vdbox_disable))
- continue;
-
- info->ring_mask &= ~ENGINE_MASK(_VCS(i));
- DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ if (!(BIT(i) & info->vdbox_enable)) {
+ info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ }
}
- DRM_DEBUG_DRIVER("vebox disable: %04x\n", vebox_disable);
+ DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & vebox_disable))
- continue;
-
- info->ring_mask &= ~ENGINE_MASK(_VECS(i));
- DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ if (!(BIT(i) & info->vebox_enable)) {
+ info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 6eecd64734d5..1caf24e2cf0b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -25,6 +25,8 @@
#ifndef _INTEL_DEVICE_INFO_H_
#define _INTEL_DEVICE_INFO_H_
+#include <uapi/drm/i915_drm.h>
+
#include "intel_display.h"
struct drm_printer;
@@ -74,51 +76,58 @@ enum intel_platform {
INTEL_MAX_PLATFORMS
};
+enum intel_ppgtt {
+ INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
+ INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
+ INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
+ INTEL_PPGTT_FULL_4LVL,
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func) \
func(is_mobile); \
func(is_lp); \
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
- func(has_aliasing_ppgtt); \
- func(has_csr); \
- func(has_ddi); \
- func(has_dp_mst); \
func(has_reset_engine); \
- func(has_fbc); \
func(has_fpga_dbg); \
- func(has_full_ppgtt); \
- func(has_full_48bit_ppgtt); \
- func(has_gmch_display); \
func(has_guc); \
func(has_guc_ct); \
- func(has_hotplug); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \
func(has_logical_ring_elsq); \
func(has_logical_ring_preemption); \
- func(has_overlay); \
func(has_pooled_eu); \
- func(has_psr); \
func(has_rc6); \
func(has_rc6p); \
func(has_runtime_pm); \
func(has_snoop); \
func(has_coherent_ggtt); \
func(unfenced_needs_alignment); \
+ func(hws_needs_physical);
+
+#define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
+ /* Keep in alphabetical order */ \
func(cursor_needs_physical); \
- func(hws_needs_physical); \
+ func(has_csr); \
+ func(has_ddi); \
+ func(has_dp_mst); \
+ func(has_fbc); \
+ func(has_gmch_display); \
+ func(has_hotplug); \
+ func(has_ipc); \
+ func(has_overlay); \
+ func(has_psr); \
func(overlay_needs_physical); \
- func(supports_tv); \
- func(has_ipc);
+ func(supports_tv);
#define GEN_MAX_SLICES (6) /* CNL upper bound */
#define GEN_MAX_SUBSLICES (8) /* ICL upper bound */
struct sseu_dev_info {
u8 slice_mask;
- u8 subslice_mask[GEN_MAX_SUBSLICES];
+ u8 subslice_mask[GEN_MAX_SLICES];
u16 eu_total;
u8 eu_per_subslice;
u8 min_eu_in_pool;
@@ -154,6 +163,7 @@ struct intel_device_info {
enum intel_platform platform;
u32 platform_mask;
+ enum intel_ppgtt ppgtt;
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@@ -165,12 +175,18 @@ struct intel_device_info {
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
+
+ struct {
+#define DEFINE_FLAG(name) u8 name:1
+ DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
+#undef DEFINE_FLAG
+ } display;
+
u16 ddb_size; /* in blocks */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
int trans_offsets[I915_MAX_TRANSCODERS];
- int palette_offsets[I915_MAX_PIPES];
int cursor_offsets[I915_MAX_PIPES];
/* Slice/subslice/EU info */
@@ -178,6 +194,10 @@ struct intel_device_info {
u32 cs_timestamp_frequency_khz;
+ /* Enabled (not fused off) media engine bitmasks. */
+ u8 vdbox_enable;
+ u8 vebox_enable;
+
struct color_luts {
u16 degamma_lut_size;
u16 gamma_lut_size;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c9878dd1f7cd..07c861884c70 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,7 +24,6 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -74,55 +73,6 @@ static const uint64_t i9xx_format_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-static const uint32_t skl_primary_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
-};
-
-static const uint32_t skl_pri_planar_formats[] = {
- DRM_FORMAT_C8,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_NV12,
-};
-
-static const uint64_t skl_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const uint64_t skl_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
/* Cursor formats */
static const uint32_t intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -141,15 +91,15 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
static int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2);
-static void ironlake_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipeconf(struct drm_crtc *crtc);
-static void haswell_set_pipemisc(struct drm_crtc *crtc);
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2);
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
@@ -158,9 +108,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-static void skylake_pfit_enable(struct intel_crtc *crtc);
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
-static void ironlake_pfit_enable(struct intel_crtc *crtc);
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
@@ -506,23 +456,8 @@ static const struct intel_limit intel_limits_bxt = {
};
static void
-skl_wa_528(struct drm_i915_private *dev_priv, int pipe, bool enable)
-{
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return;
-
- if (enable)
- I915_WRITE(CHICKEN_PIPESL_1(pipe), HSW_FBCQ_DIS);
- else
- I915_WRITE(CHICKEN_PIPESL_1(pipe), 0);
-}
-
-static void
skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return;
-
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
@@ -1381,6 +1316,7 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
"PCH LVDS enabled on transcoder %c, should be disabled\n",
pipe_name(pipe));
+ /* PCH SDVOB multiplex with HDMIB */
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
@@ -1565,14 +1501,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
}
}
-static void i9xx_disable_pll(struct intel_crtc *crtc)
+static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
/* Disable DVO 2x clock on both PLLs if necessary */
if (IS_I830(dev_priv) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
!intel_num_dvo_pipes(dev_priv)) {
I915_WRITE(DPLL(PIPE_B),
I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1666,16 +1603,16 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
I915_READ(dpll_reg) & port_mask, expected_mask);
}
-static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
- pipe);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
i915_reg_t reg;
uint32_t val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
+ assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@@ -1701,7 +1638,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
* here for both 8bpc and 12bpc.
*/
val &= ~PIPECONF_BPC_MASK;
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val |= PIPECONF_8BPC;
else
val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1710,7 +1647,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_INTERLACE_MASK;
if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
if (HAS_PCH_IBX(dev_priv) &&
- intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
@@ -2254,6 +2191,11 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
return new_offset;
}
+static bool is_surface_linear(u64 modifier, int color_plane)
+{
+ return modifier == DRM_FORMAT_MOD_LINEAR;
+}
+
static u32 intel_adjust_aligned_offset(int *x, int *y,
const struct drm_framebuffer *fb,
int color_plane,
@@ -2266,7 +2208,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2330,14 +2272,13 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
unsigned int rotation,
u32 alignment)
{
- uint64_t fb_modifier = fb->modifier;
unsigned int cpp = fb->format->cpp[color_plane];
u32 offset, offset_aligned;
if (alignment)
alignment--;
- if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2400,10 +2341,26 @@ static int intel_fb_offset_to_xy(int *x, int *y,
int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int height;
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
- fb->offsets[color_plane] % intel_tile_size(dev_priv))
+ fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
+ DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
+ }
+
+ height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
+ height = ALIGN(height, intel_tile_height(fb, color_plane));
+
+ /* Catch potential overflows early */
+ if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
+ fb->offsets[color_plane])) {
+ DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
+ return -ERANGE;
+ }
*x = 0;
*y = 0;
@@ -2574,7 +2531,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
tile_size);
offset /= tile_size;
- if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ if (!is_surface_linear(fb->modifier, i)) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
@@ -2788,10 +2745,6 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state,
crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
else
crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
-
- DRM_DEBUG_KMS("%s active planes 0x%x\n",
- crtc_state->base.crtc->name,
- crtc_state->active_planes);
}
static void fixup_active_planes(struct intel_crtc_state *crtc_state)
@@ -2819,6 +2772,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
+ DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
+
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
@@ -2826,7 +2783,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_pre_disable_primary_noatomic(&crtc->base);
trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc);
+ plane->disable_plane(plane, crtc_state);
}
static void
@@ -3099,28 +3056,6 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
return 0;
}
-static int
-skl_check_nv12_surface(struct intel_plane_state *plane_state)
-{
- /* Display WA #1106 */
- if (plane_state->base.rotation !=
- (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90) &&
- plane_state->base.rotation != DRM_MODE_ROTATE_270)
- return 0;
-
- /*
- * src coordinates are rotated here.
- * We check height but report it as width
- */
- if (((drm_rect_height(&plane_state->base.src) >> 16) % 4) != 0) {
- DRM_DEBUG_KMS("src width must be multiple "
- "of 4 for rotated NV12\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
@@ -3199,9 +3134,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* the main surface setup depends on it.
*/
if (fb->format->format == DRM_FORMAT_NV12) {
- ret = skl_check_nv12_surface(plane_state);
- if (ret)
- return ret;
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3399,7 +3331,6 @@ static void i9xx_update_plane(struct intel_plane *plane,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 linear_offset;
u32 dspcntr = plane_state->ctl;
- i915_reg_t reg = DSPCNTR(i9xx_plane);
int x = plane_state->color_plane[0].x;
int y = plane_state->color_plane[0].y;
unsigned long irqflags;
@@ -3414,48 +3345,51 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+
if (INTEL_GEN(dev_priv) < 4) {
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
+ I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
I915_WRITE_FW(DSPSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
+ I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMSIZE(i9xx_plane),
((crtc_state->pipe_src_h - 1) << 16) |
(crtc_state->pipe_src_w - 1));
- I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
}
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
+ I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ }
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
+ I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ if (INTEL_GEN(dev_priv) >= 4)
I915_WRITE_FW(DSPSURF(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- } else {
+ else
I915_WRITE_FW(DSPADDR(i9xx_plane),
intel_plane_ggtt_offset(plane_state) +
dspaddr_offset);
- }
- POSTING_READ_FW(reg);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_plane(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
@@ -3468,7 +3402,6 @@ static void i9xx_disable_plane(struct intel_plane *plane,
I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
else
I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
- POSTING_READ_FW(DSPCNTR(i9xx_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -3528,13 +3461,13 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
/*
* This function detaches (aka. unbinds) unused scalers in hardware
*/
-static void skl_detach_scalers(struct intel_crtc *intel_crtc)
+static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc_scaler_state *scaler_state;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
int i;
- scaler_state = &intel_crtc->config->scaler_state;
-
/* loop through and disable scalers that aren't in use */
for (i = 0; i < intel_crtc->num_scalers; i++) {
if (!scaler_state->scalers[i].in_use)
@@ -3542,6 +3475,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
}
}
+static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int rotation)
+{
+ /*
+ * The stride is either expressed as a multiple of 64 bytes chunks for
+ * linear buffers or in number of tiles for tiled buffers.
+ */
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return 64;
+ else if (drm_rotation_90_or_270(rotation))
+ return intel_tile_height(fb, color_plane);
+ else
+ return intel_tile_width_bytes(fb, color_plane);
+}
+
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
int color_plane)
{
@@ -3552,16 +3500,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
if (color_plane >= fb->format->num_planes)
return 0;
- /*
- * The stride is either expressed as a multiple of 64 bytes chunks for
- * linear buffers or in number of tiles for tiled buffers.
- */
- if (drm_rotation_90_or_270(rotation))
- stride /= intel_tile_height(fb, color_plane);
- else
- stride /= intel_fb_stride_alignment(fb, color_plane);
-
- return stride;
+ return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
static u32 skl_plane_ctl_format(uint32_t pixel_format)
@@ -3598,29 +3537,38 @@ static u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
-/*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
-static u32 skl_plane_ctl_alpha(uint32_t pixel_format)
+static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
+ if (!plane_state->base.fb->format->has_alpha)
+ return PLANE_CTL_ALPHA_DISABLE;
+
+ switch (plane_state->base.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_CTL_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
default:
+ MISSING_CASE(plane_state->base.pixel_blend_mode);
return PLANE_CTL_ALPHA_DISABLE;
}
}
-static u32 glk_plane_color_ctl_alpha(uint32_t pixel_format)
+static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
{
- switch (pixel_format) {
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_ARGB8888:
+ if (!plane_state->base.fb->format->has_alpha)
+ return PLANE_COLOR_ALPHA_DISABLE;
+
+ switch (plane_state->base.pixel_blend_mode) {
+ case DRM_MODE_BLEND_PIXEL_NONE:
+ return PLANE_COLOR_ALPHA_DISABLE;
+ case DRM_MODE_BLEND_PREMULTI:
return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
+ case DRM_MODE_BLEND_COVERAGE:
+ return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
default:
+ MISSING_CASE(plane_state->base.pixel_blend_mode);
return PLANE_COLOR_ALPHA_DISABLE;
}
}
@@ -3697,7 +3645,7 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
plane_ctl = PLANE_CTL_ENABLE;
if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
- plane_ctl |= skl_plane_ctl_alpha(fb->format->format);
+ plane_ctl |= skl_plane_ctl_alpha(plane_state);
plane_ctl |=
PLANE_CTL_PIPE_GAMMA_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE |
@@ -3732,6 +3680,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv =
to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
if (INTEL_GEN(dev_priv) < 11) {
@@ -3739,9 +3688,9 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
}
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
- plane_color_ctl |= glk_plane_color_ctl_alpha(fb->format->format);
+ plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -3749,6 +3698,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
+ } else if (fb->format->is_yuv) {
+ plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
}
return plane_color_ctl;
@@ -3933,15 +3884,15 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
/* on skylake this is done by detaching scalers */
if (INTEL_GEN(dev_priv) >= 9) {
- skl_detach_scalers(crtc);
+ skl_detach_scalers(new_crtc_state);
if (new_crtc_state->pch_pfit.enabled)
- skylake_pfit_enable(crtc);
+ skylake_pfit_enable(new_crtc_state);
} else if (HAS_PCH_SPLIT(dev_priv)) {
if (new_crtc_state->pch_pfit.enabled)
- ironlake_pfit_enable(crtc);
+ ironlake_pfit_enable(new_crtc_state);
else if (old_crtc_state->pch_pfit.enabled)
- ironlake_pfit_disable(crtc, true);
+ ironlake_pfit_disable(old_crtc_state);
}
}
@@ -4340,10 +4291,10 @@ train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
-static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
+static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int pipe = intel_crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -4352,7 +4303,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
@@ -4501,10 +4452,11 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
}
/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(struct intel_crtc *crtc)
+static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc->config->base.adjusted_mode.crtc_clock;
+ int clock = crtc_state->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -4615,12 +4567,12 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
desired_divisor << auxdiv);
}
-static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
+static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
enum pipe pch_transcoder)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
I915_READ(HTOTAL(cpu_transcoder)));
@@ -4639,9 +4591,8 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
I915_READ(VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
uint32_t temp;
temp = I915_READ(SOUTH_CHICKEN1);
@@ -4660,22 +4611,23 @@ static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
POSTING_READ(SOUTH_CHICKEN1);
}
-static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
+static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- switch (intel_crtc->pipe) {
+ switch (crtc->pipe) {
case PIPE_A:
break;
case PIPE_B:
- if (intel_crtc->config->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev, false);
+ if (crtc_state->fdi_lanes > 2)
+ cpt_set_fdi_bc_bifurcation(dev_priv, false);
else
- cpt_set_fdi_bc_bifurcation(dev, true);
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev, true);
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
break;
default:
@@ -4732,7 +4684,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(crtc);
+ ivybridge_update_fdi_bc_bifurcation(crtc_state);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -4765,11 +4717,11 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
- intel_enable_shared_dpll(crtc);
+ intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(crtc, pipe);
+ ironlake_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -4801,7 +4753,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state,
I915_WRITE(reg, temp);
}
- ironlake_enable_pch_transcoder(dev_priv, pipe);
+ ironlake_enable_pch_transcoder(crtc_state);
}
static void lpt_pch_enable(const struct intel_atomic_state *state,
@@ -4813,10 +4765,10 @@ static void lpt_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, PIPE_A);
- lpt_program_iclkip(crtc);
+ lpt_program_iclkip(crtc_state);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
+ ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
@@ -4904,8 +4856,7 @@ static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h,
- bool plane_scaler_check,
- uint32_t pixel_format)
+ const struct drm_format_info *format, bool need_scaler)
{
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
@@ -4914,21 +4865,14 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- int need_scaling;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
- need_scaling = src_w != dst_w || src_h != dst_h;
-
- if (plane_scaler_check)
- if (pixel_format == DRM_FORMAT_NV12)
- need_scaling = true;
-
- if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX)
- need_scaling = true;
+ if (src_w != dst_w || src_h != dst_h)
+ need_scaler = true;
/*
* Scaling/fitting not supported in IF-ID mode in GEN9+
@@ -4937,7 +4881,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* for NV12.
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
- need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -4952,7 +4896,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
* update to free the scaler is done in plane/panel-fit programming.
* For this purpose crtc/plane_state->scaler_id isn't reset here.
*/
- if (force_detach || !need_scaling) {
+ if (force_detach || !need_scaler) {
if (*scaler_id >= 0) {
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
@@ -4966,7 +4910,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (plane_scaler_check && pixel_format == DRM_FORMAT_NV12 &&
+ if (format && format->format == DRM_FORMAT_NV12 &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
DRM_DEBUG_KMS("NV12: src dimensions not met\n");
return -EINVAL;
@@ -5009,12 +4953,16 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
int skl_update_scaler_crtc(struct intel_crtc_state *state)
{
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
+ bool need_scaler = false;
+
+ if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ need_scaler = true;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_vdisplay, false, 0);
+ adjusted_mode->crtc_vdisplay, NULL, need_scaler);
}
/**
@@ -5029,13 +4977,17 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
-
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
-
bool force_detach = !fb || !plane_state->base.visible;
+ bool need_scaler = false;
+
+ /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
+ if (!icl_is_hdr_plane(intel_plane) &&
+ fb && fb->format->format == DRM_FORMAT_NV12)
+ need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base),
@@ -5044,7 +4996,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst),
drm_rect_height(&plane_state->base.dst),
- fb ? true : false, fb ? fb->format->format : 0);
+ fb ? fb->format : NULL, need_scaler);
if (ret || plane_state->scaler_id < 0)
return ret;
@@ -5090,27 +5042,27 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
skl_detach_scaler(crtc, i);
}
-static void skylake_pfit_enable(struct intel_crtc *crtc)
+static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
- struct intel_crtc_scaler_state *scaler_state =
- &crtc->config->scaler_state;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
- if (crtc->config->pch_pfit.enabled) {
+ if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
int id;
- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
+ if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
return;
- pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
- pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
+ pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
+ pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
- hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
- vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
+ hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
+ vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -5122,18 +5074,18 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
}
}
-static void ironlake_pfit_enable(struct intel_crtc *crtc)
+static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int pipe = crtc->pipe;
- if (crtc->config->pch_pfit.enabled) {
+ if (crtc_state->pch_pfit.enabled) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
@@ -5143,8 +5095,8 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
+ I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
+ I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
}
}
@@ -5339,11 +5291,8 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
if (!crtc_state->nv12_planes)
return false;
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if ((INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv)) ||
- IS_CANNONLAKE(dev_priv))
+ /* WA Display #0827: Gen9:all */
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
return true;
return false;
@@ -5386,7 +5335,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
!needs_nv12_wa(dev_priv, pipe_config)) {
skl_wa_clkgate(dev_priv, crtc->pipe, false);
- skl_wa_528(dev_priv, crtc->pipe, false);
}
}
@@ -5426,7 +5374,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
needs_nv12_wa(dev_priv, pipe_config)) {
skl_wa_clkgate(dev_priv, crtc->pipe, true);
- skl_wa_528(dev_priv, crtc->pipe, true);
}
/*
@@ -5449,7 +5396,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
*
* WaCxSRDisabledForSpriteScaling:ivb
*/
- if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev))
+ if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
+ old_crtc_state->base.active)
intel_wait_for_vblank(dev_priv, crtc->pipe);
/*
@@ -5480,24 +5428,32 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
intel_update_watermarks(crtc);
}
-static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
+static void intel_crtc_disable_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *p;
- int pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int update_mask = new_crtc_state->update_planes;
+ const struct intel_plane_state *old_plane_state;
+ struct intel_plane *plane;
+ unsigned fb_bits = 0;
+ int i;
- intel_crtc_dpms_overlay_disable(intel_crtc);
+ intel_crtc_dpms_overlay_disable(crtc);
- drm_for_each_plane_mask(p, dev, plane_mask)
- to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
+ for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
- /*
- * FIXME: Once we grow proper nuclear flip support out of this we need
- * to compute the mask of flip planes precisely. For the time being
- * consider this a flip to a NULL plane.
- */
- intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ plane->disable_plane(plane, new_crtc_state);
+
+ if (old_plane_state->base.visible)
+ fb_bits |= plane->frontbuffer_bit;
+ }
+
+ intel_frontbuffer_flip(dev_priv, fb_bits);
}
static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
@@ -5555,7 +5511,8 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
if (conn_state->crtc != crtc)
continue;
- encoder->enable(encoder, crtc_state, conn_state);
+ if (encoder->enable)
+ encoder->enable(encoder, crtc_state, conn_state);
intel_opregion_notify_encoder(encoder, true);
}
}
@@ -5576,7 +5533,8 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
continue;
intel_opregion_notify_encoder(encoder, false);
- encoder->disable(encoder, old_crtc_state, old_conn_state);
+ if (encoder->disable)
+ encoder->disable(encoder, old_crtc_state, old_conn_state);
}
}
@@ -5647,37 +5605,37 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc->config->has_pch_encoder)
- intel_prepare_shared_dpll(intel_crtc);
+ if (pipe_config->has_pch_encoder)
+ intel_prepare_shared_dpll(pipe_config);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
- if (intel_crtc->config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config->fdi_m_n, NULL);
+ if (pipe_config->has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(pipe_config,
+ &pipe_config->fdi_m_n, NULL);
}
- ironlake_set_pipeconf(crtc);
+ ironlake_set_pipeconf(pipe_config);
intel_crtc->active = true;
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (pipe_config->has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
- ironlake_fdi_pll_enable(intel_crtc);
+ ironlake_fdi_pll_enable(pipe_config);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
}
- ironlake_pfit_enable(intel_crtc);
+ ironlake_pfit_enable(pipe_config);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -5686,10 +5644,10 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
- dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config);
+ dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
intel_enable_pipe(pipe_config);
- if (intel_crtc->config->has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
ironlake_pch_enable(old_intel_state, pipe_config);
assert_vblank_disabled(crtc);
@@ -5706,7 +5664,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* some interlaced HDMI modes. Let's do the double wait always
* in case there are more corner cases we don't know about.
*/
- if (intel_crtc->config->has_pch_encoder) {
+ if (pipe_config->has_pch_encoder) {
intel_wait_for_vblank(dev_priv, pipe);
intel_wait_for_vblank(dev_priv, pipe);
}
@@ -5740,10 +5698,9 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
uint32_t val;
- val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2);
-
- /* Program B credit equally to all pipes */
- val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes);
+ val = MBUS_DBOX_A_CREDIT(2);
+ val |= MBUS_DBOX_BW_CREDIT(1);
+ val |= MBUS_DBOX_B_CREDIT(8);
I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
}
@@ -5755,7 +5712,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_state);
bool psl_clkgate_wa;
@@ -5766,37 +5723,34 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
- if (intel_crtc->config->shared_dpll)
- intel_enable_shared_dpll(intel_crtc);
-
- if (INTEL_GEN(dev_priv) >= 11)
- icl_map_plls_to_ports(crtc, pipe_config, old_state);
+ if (pipe_config->shared_dpll)
+ intel_enable_shared_dpll(pipe_config);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_set_pipe_timings(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_src_size(pipe_config);
if (cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(cpu_transcoder)) {
I915_WRITE(PIPE_MULT(cpu_transcoder),
- intel_crtc->config->pixel_multiplier - 1);
+ pipe_config->pixel_multiplier - 1);
}
- if (intel_crtc->config->has_pch_encoder) {
- intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config->fdi_m_n, NULL);
+ if (pipe_config->has_pch_encoder) {
+ intel_cpu_transcoder_set_m_n(pipe_config,
+ &pipe_config->fdi_m_n, NULL);
}
if (!transcoder_is_dsi(cpu_transcoder))
- haswell_set_pipeconf(crtc);
+ haswell_set_pipeconf(pipe_config);
- haswell_set_pipemisc(crtc);
+ haswell_set_pipemisc(pipe_config);
intel_color_set_csc(&pipe_config->base);
@@ -5804,14 +5758,14 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
/* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
- intel_crtc->config->pch_pfit.enabled;
+ pipe_config->pch_pfit.enabled;
if (psl_clkgate_wa)
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
if (INTEL_GEN(dev_priv) >= 9)
- skylake_pfit_enable(intel_crtc);
+ skylake_pfit_enable(pipe_config);
else
- ironlake_pfit_enable(intel_crtc);
+ ironlake_pfit_enable(pipe_config);
/*
* On ILK+ LUT must be loaded before the pipe is running but with
@@ -5844,10 +5798,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
if (!transcoder_is_dsi(cpu_transcoder))
intel_enable_pipe(pipe_config);
- if (intel_crtc->config->has_pch_encoder)
+ if (pipe_config->has_pch_encoder)
lpt_pch_enable(old_intel_state, pipe_config);
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
+ if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
intel_ddi_set_vc_payload_alloc(pipe_config, true);
assert_vblank_disabled(crtc);
@@ -5869,15 +5823,15 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
}
}
-static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
+static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
- if (force || crtc->config->pch_pfit.enabled) {
+ if (old_crtc_state->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
@@ -5908,14 +5862,14 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(old_crtc_state);
- ironlake_pfit_disable(intel_crtc, false);
+ ironlake_pfit_disable(old_crtc_state);
- if (intel_crtc->config->has_pch_encoder)
+ if (old_crtc_state->has_pch_encoder)
ironlake_fdi_disable(crtc);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (intel_crtc->config->has_pch_encoder) {
+ if (old_crtc_state->has_pch_encoder) {
ironlake_disable_pch_transcoder(dev_priv, pipe);
if (HAS_PCH_CPT(dev_priv)) {
@@ -5966,24 +5920,24 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
else
- ironlake_pfit_disable(intel_crtc, false);
+ ironlake_pfit_disable(old_crtc_state);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_unmap_plls_to_ports(crtc, old_crtc_state, old_state);
+ intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
}
-static void i9xx_pfit_enable(struct intel_crtc *crtc)
+static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *pipe_config = crtc->config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!pipe_config->gmch_pfit.control)
+ if (!crtc_state->gmch_pfit.control)
return;
/*
@@ -5993,8 +5947,8 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc->pipe);
- I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
- I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
+ I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
@@ -6049,6 +6003,28 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
}
}
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port)
+{
+ switch (dig_port->aux_ch) {
+ case AUX_CH_A:
+ return POWER_DOMAIN_AUX_A;
+ case AUX_CH_B:
+ return POWER_DOMAIN_AUX_B;
+ case AUX_CH_C:
+ return POWER_DOMAIN_AUX_C;
+ case AUX_CH_D:
+ return POWER_DOMAIN_AUX_D;
+ case AUX_CH_E:
+ return POWER_DOMAIN_AUX_E;
+ case AUX_CH_F:
+ return POWER_DOMAIN_AUX_F;
+ default:
+ MISSING_CASE(dig_port->aux_ch);
+ return POWER_DOMAIN_AUX_A;
+ }
+}
+
static u64 get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -6128,20 +6104,18 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
if (WARN_ON(intel_crtc->active))
return;
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- struct drm_i915_private *dev_priv = to_i915(dev);
-
I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
I915_WRITE(CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(intel_crtc);
+ i9xx_set_pipeconf(pipe_config);
intel_color_set_csc(&pipe_config->base);
@@ -6152,16 +6126,16 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(intel_crtc, intel_crtc->config);
- chv_enable_pll(intel_crtc, intel_crtc->config);
+ chv_prepare_pll(intel_crtc, pipe_config);
+ chv_enable_pll(intel_crtc, pipe_config);
} else {
- vlv_prepare_pll(intel_crtc, intel_crtc->config);
- vlv_enable_pll(intel_crtc, intel_crtc->config);
+ vlv_prepare_pll(intel_crtc, pipe_config);
+ vlv_enable_pll(intel_crtc, pipe_config);
}
intel_encoders_pre_enable(crtc, pipe_config, old_state);
- i9xx_pfit_enable(intel_crtc);
+ i9xx_pfit_enable(pipe_config);
intel_color_load_luts(&pipe_config->base);
@@ -6175,13 +6149,13 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
}
-static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
+static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
+ I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
+ I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6198,15 +6172,15 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
if (WARN_ON(intel_crtc->active))
return;
- i9xx_set_pll_dividers(intel_crtc);
+ i9xx_set_pll_dividers(pipe_config);
- if (intel_crtc_has_dp_encoder(intel_crtc->config))
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ if (intel_crtc_has_dp_encoder(pipe_config))
+ intel_dp_set_m_n(pipe_config, M1_N1);
- intel_set_pipe_timings(intel_crtc);
- intel_set_pipe_src_size(intel_crtc);
+ intel_set_pipe_timings(pipe_config);
+ intel_set_pipe_src_size(pipe_config);
- i9xx_set_pipeconf(intel_crtc);
+ i9xx_set_pipeconf(pipe_config);
intel_crtc->active = true;
@@ -6217,13 +6191,13 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_enable_pll(intel_crtc, pipe_config);
- i9xx_pfit_enable(intel_crtc);
+ i9xx_pfit_enable(pipe_config);
intel_color_load_luts(&pipe_config->base);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
- intel_crtc->config);
+ pipe_config);
else
intel_update_watermarks(intel_crtc);
intel_enable_pipe(pipe_config);
@@ -6234,12 +6208,12 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_enable(crtc, pipe_config, old_state);
}
-static void i9xx_pfit_disable(struct intel_crtc *crtc)
+static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!crtc->config->gmch_pfit.control)
+ if (!old_crtc_state->gmch_pfit.control)
return;
assert_pipe_disabled(dev_priv, crtc->pipe);
@@ -6272,17 +6246,17 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(old_crtc_state);
- i9xx_pfit_disable(intel_crtc);
+ i9xx_pfit_disable(old_crtc_state);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
- if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
if (IS_CHERRYVIEW(dev_priv))
chv_disable_pll(dev_priv, pipe);
else if (IS_VALLEYVIEW(dev_priv))
vlv_disable_pll(dev_priv, pipe);
else
- i9xx_disable_pll(intel_crtc);
+ i9xx_disable_pll(old_crtc_state);
}
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
@@ -6356,7 +6330,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
intel_fbc_disable(intel_crtc);
intel_update_watermarks(intel_crtc);
- intel_disable_shared_dpll(intel_crtc);
+ intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
@@ -6434,66 +6408,6 @@ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
}
}
-int intel_connector_init(struct intel_connector *connector)
-{
- struct intel_digital_connector_state *conn_state;
-
- /*
- * Allocate enough memory to hold intel_digital_connector_state,
- * This might be a few bytes too many, but for connectors that don't
- * need it we'll free the state and allocate a smaller one on the first
- * succesful commit anyway.
- */
- conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL);
- if (!conn_state)
- return -ENOMEM;
-
- __drm_atomic_helper_connector_reset(&connector->base,
- &conn_state->base);
-
- return 0;
-}
-
-struct intel_connector *intel_connector_alloc(void)
-{
- struct intel_connector *connector;
-
- connector = kzalloc(sizeof *connector, GFP_KERNEL);
- if (!connector)
- return NULL;
-
- if (intel_connector_init(connector) < 0) {
- kfree(connector);
- return NULL;
- }
-
- return connector;
-}
-
-/*
- * Free the bits allocated by intel_connector_alloc.
- * This should only be used after intel_connector_alloc has returned
- * successfully, and before drm_connector_init returns successfully.
- * Otherwise the destroy callbacks for the connector and the state should
- * take care of proper cleanup/free
- */
-void intel_connector_free(struct intel_connector *connector)
-{
- kfree(to_intel_digital_connector_state(connector->base.state));
- kfree(connector);
-}
-
-/* Simple connector->get_hw_state implementation for encoders that support only
- * one connector and no cloning and hence the encoder state determines the state
- * of the connector. */
-bool intel_connector_get_hw_state(struct intel_connector *connector)
-{
- enum pipe pipe = 0;
- struct intel_encoder *encoder = connector->encoder;
-
- return encoder->get_hw_state(encoder, &pipe);
-}
-
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
{
if (crtc_state->base.enable && crtc_state->has_pch_encoder)
@@ -6604,6 +6518,9 @@ retry:
link_bw, &pipe_config->fdi_m_n, false);
ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
+
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
@@ -6760,7 +6677,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- if (pipe_config->ycbcr420 && pipe_config->base.ctm) {
+ if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
+ pipe_config->base.ctm) {
/*
* There is only one pipe CSC unit per pipe, and we need that
* for output conversion from RGB->YCBCR. So if CTM is already
@@ -6835,7 +6754,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
}
void
-intel_link_compute_m_n(int bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n)
@@ -6926,12 +6845,12 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
-static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n)
+static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
@@ -6939,25 +6858,39 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
-static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2)
+static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+ enum transcoder transcoder)
{
+ if (IS_HASWELL(dev_priv))
+ return transcoder == TRANSCODER_EDP;
+
+ /*
+ * Strictly speaking some registers are available before
+ * gen7, but we only support DRRS on gen7+
+ */
+ return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+}
+
+static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_link_m_n *m_n,
+ const struct intel_link_m_n *m2_n2)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int pipe = crtc->pipe;
- enum transcoder transcoder = crtc->config->cpu_transcoder;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder transcoder = crtc_state->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 5) {
I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
- /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
- * for gen < 8) and if DRRS is supported (to make sure the
- * registers are not unnecessarily accessed).
+ /*
+ * M2_N2 registers are set only if DRRS is supported
+ * (to make sure the registers are not unnecessarily accessed).
*/
- if (m2_n2 && (IS_CHERRYVIEW(dev_priv) ||
- INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) {
+ if (m2_n2 && crtc_state->has_drrs &&
+ transcoder_has_m2_n2(dev_priv, transcoder)) {
I915_WRITE(PIPE_DATA_M2(transcoder),
TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
@@ -6972,29 +6905,29 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
}
}
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
{
- struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+ const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
if (m_n == M1_N1) {
- dp_m_n = &crtc->config->dp_m_n;
- dp_m2_n2 = &crtc->config->dp_m2_n2;
+ dp_m_n = &crtc_state->dp_m_n;
+ dp_m2_n2 = &crtc_state->dp_m2_n2;
} else if (m_n == M2_N2) {
/*
* M2_N2 registers are not supported. Hence m2_n2 divider value
* needs to be programmed into M1_N1.
*/
- dp_m_n = &crtc->config->dp_m2_n2;
+ dp_m_n = &crtc_state->dp_m2_n2;
} else {
DRM_ERROR("Unsupported divider value\n");
return;
}
- if (crtc->config->has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
+ if (crtc_state->has_pch_encoder)
+ intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
+ intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
}
static void vlv_compute_dpll(struct intel_crtc *crtc,
@@ -7093,8 +7026,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
/* Set HBR and RBR LPF coefficients */
if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
@@ -7121,7 +7054,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(crtc->config))
+ if (intel_crtc_has_dp_encoder(pipe_config))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -7400,12 +7333,13 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
uint32_t crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
@@ -7419,7 +7353,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
crtc_vtotal -= 1;
crtc_vblank_end -= 1;
- if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
else
vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7461,18 +7395,18 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
}
-static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
+static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
- ((intel_crtc->config->pipe_src_w - 1) << 16) |
- (intel_crtc->config->pipe_src_h - 1));
+ ((crtc_state->pipe_src_w - 1) << 16) |
+ (crtc_state->pipe_src_h - 1));
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -7548,29 +7482,30 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
drm_mode_set_name(mode);
}
-static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
+static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
uint32_t pipeconf;
pipeconf = 0;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
- if (intel_crtc->config->double_wide)
+ if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
/* only g4x and later have fancy bpc/dither controls */
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
/* Bspec claims that we can't use dithering for 30bpp pipes. */
- if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
+ if (crtc_state->dither && crtc_state->pipe_bpp != 30)
pipeconf |= PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
pipeconf |= PIPECONF_6BPC;
break;
@@ -7586,9 +7521,9 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
}
}
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
if (INTEL_GEN(dev_priv) < 4 ||
- intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7596,11 +7531,11 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
pipeconf |= PIPECONF_PROGRESSIVE;
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_crtc->config->limited_color_range)
+ crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
- I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
- POSTING_READ(PIPECONF(intel_crtc->pipe));
+ I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
+ POSTING_READ(PIPECONF(crtc->pipe));
}
static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -7963,6 +7898,49 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
}
+static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
+
+ pipe_config->lspcon_downsampling = false;
+
+ if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
+ u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
+
+ if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
+ bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
+ bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
+
+ if (ycbcr420_enabled) {
+ /* We support 4:2:0 in full blend mode only */
+ if (!blend)
+ output = INTEL_OUTPUT_FORMAT_INVALID;
+ else if (!(IS_GEMINILAKE(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 10))
+ output = INTEL_OUTPUT_FORMAT_INVALID;
+ else
+ output = INTEL_OUTPUT_FORMAT_YCBCR420;
+ } else {
+ /*
+ * Currently there is no interface defined to
+ * check user preference between RGB/YCBCR444
+ * or YCBCR420. So the only possible case for
+ * YCBCR444 usage is driving YCBCR420 output
+ * with LSPCON, when pipe is configured for
+ * YCBCR444 output and LSPCON takes care of
+ * downsampling it.
+ */
+ pipe_config->lspcon_downsampling = true;
+ output = INTEL_OUTPUT_FORMAT_YCBCR444;
+ }
+ }
+ }
+
+ pipe_config->output_format = output;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -7975,6 +7953,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -8506,16 +8485,16 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
lpt_init_pch_refclk(dev_priv);
}
-static void ironlake_set_pipeconf(struct drm_crtc *crtc)
+static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
uint32_t val;
val = 0;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPECONF_6BPC;
break;
@@ -8533,32 +8512,32 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->dither)
+ if (crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
- if (intel_crtc->config->limited_color_range)
+ if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
-static void haswell_set_pipeconf(struct drm_crtc *crtc)
+static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val = 0;
- if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
+ if (IS_HASWELL(dev_priv) && crtc_state->dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
@@ -8567,16 +8546,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
POSTING_READ(PIPECONF(cpu_transcoder));
}
-static void haswell_set_pipemisc(struct drm_crtc *crtc)
+static void haswell_set_pipemisc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *config = intel_crtc->config;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
u32 val = 0;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
val |= PIPEMISC_DITHER_6_BPC;
break;
@@ -8594,14 +8572,16 @@ static void haswell_set_pipemisc(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->dither)
+ if (crtc_state->dither)
val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
- if (config->ycbcr420) {
- val |= PIPEMISC_OUTPUT_COLORSPACE_YUV |
- PIPEMISC_YUV420_ENABLE |
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- }
I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
}
@@ -8812,12 +8792,8 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
- /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
- * gen < 8) and if DRRS is supported (to make sure the
- * registers are not unnecessarily read).
- */
- if (m2_n2 && INTEL_GEN(dev_priv) < 8 &&
- crtc->config->has_drrs) {
+
+ if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
@@ -8993,7 +8969,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, plane_id));
- stride_mult = intel_fb_stride_alignment(fb, 0);
+ stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
@@ -9049,6 +9025,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = NULL;
@@ -9356,10 +9333,12 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->base.state);
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+ IS_ICELAKE(dev_priv)) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
@@ -9397,30 +9376,17 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
u32 temp;
/* TODO: TBT pll not implemented. */
- switch (port) {
- case PORT_A:
- case PORT_B:
+ if (intel_port_is_combophy(dev_priv, port)) {
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id != DPLL_ID_ICL_DPLL0 && id != DPLL_ID_ICL_DPLL1))
+ if (WARN_ON(!intel_dpll_is_combophy(id)))
return;
- break;
- case PORT_C:
- id = DPLL_ID_ICL_MGPLL1;
- break;
- case PORT_D:
- id = DPLL_ID_ICL_MGPLL2;
- break;
- case PORT_E:
- id = DPLL_ID_ICL_MGPLL3;
- break;
- case PORT_F:
- id = DPLL_ID_ICL_MGPLL4;
- break;
- default:
- MISSING_CASE(port);
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ id = icl_port_to_mg_pll_id(port);
+ } else {
+ WARN(1, "Invalid port %x\n", port);
return;
}
@@ -9510,11 +9476,18 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
+ unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long enabled_panel_transcoders = 0;
+ enum transcoder panel_transcoder;
u32 tmp;
+ if (IS_ICELAKE(dev_priv))
+ panel_transcoder_mask |=
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
- * transcoder handled below.
+ * and DSI transcoders handled below.
*/
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -9522,29 +9495,49 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
- if (tmp & TRANS_DDI_FUNC_ENABLE) {
- enum pipe trans_edp_pipe;
+ for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ enum pipe trans_pipe;
+
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
+ continue;
+
+ /*
+ * Log all enabled ones, only use the first one.
+ *
+ * FIXME: This won't work for two separate DSI displays.
+ */
+ enabled_panel_transcoders |= BIT(panel_transcoder);
+ if (enabled_panel_transcoders != BIT(panel_transcoder))
+ continue;
+
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to edp transcoder\n");
+ WARN(1, "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
case TRANS_DDI_EDP_INPUT_A_ON:
- trans_edp_pipe = PIPE_A;
+ trans_pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
- trans_edp_pipe = PIPE_B;
+ trans_pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
- trans_edp_pipe = PIPE_C;
+ trans_pipe = PIPE_C;
break;
}
- if (trans_edp_pipe == crtc->pipe)
- pipe_config->cpu_transcoder = TRANSCODER_EDP;
+ if (trans_pipe == crtc->pipe)
+ pipe_config->cpu_transcoder = panel_transcoder;
}
+ /*
+ * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ */
+ WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
@@ -9677,33 +9670,18 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ IS_ICELAKE(dev_priv)) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
intel_get_pipe_src_size(crtc, pipe_config);
+ intel_get_crtc_ycbcr_config(crtc, pipe_config);
pipe_config->gamma_mode =
I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
- if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
- bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV;
-
- if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
- bool blend_mode_420 = tmp &
- PIPEMISC_YUV420_MODE_FULL_BLEND;
-
- pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE;
- if (pipe_config->ycbcr420 != clrspace_yuv ||
- pipe_config->ycbcr420 != blend_mode_420)
- DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp);
- } else if (clrspace_yuv) {
- DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n");
- }
- }
-
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
power_domain_mask |= BIT_ULL(power_domain);
@@ -9749,7 +9727,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
- if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+ if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
base = obj->phys_handle->busaddr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -9972,15 +9950,13 @@ static void i845_update_cursor(struct intel_plane *plane,
I915_WRITE_FW(CURPOS(PIPE_A), pos);
}
- POSTING_READ_FW(CURCNTR(PIPE_A));
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i845_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i845_update_cursor(plane, NULL, NULL);
+ i845_update_cursor(plane, crtc_state, NULL);
}
static bool i845_cursor_get_hw_state(struct intel_plane *plane,
@@ -10171,8 +10147,8 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* On some platforms writing CURCNTR first will also
* cause CURPOS to be armed by the CURBASE write.
* Without the CURCNTR write the CURPOS write would
- * arm itself. Thus we always start the full update
- * with a CURCNTR write.
+ * arm itself. Thus we always update CURCNTR before
+ * CURPOS.
*
* On other platforms CURPOS always requires the
* CURBASE write to arm the update. Additonally
@@ -10182,15 +10158,20 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* cursor that doesn't appear to move, or even change
* shape. Thus we always write CURBASE.
*
- * CURCNTR and CUR_FBC_CTL are always
- * armed by the CURBASE write only.
+ * The other registers are armed by by the CURBASE write
+ * except when the plane is getting enabled at which time
+ * the CURCNTR write arms the update.
*/
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_write_cursor_wm(plane, crtc_state);
+
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(pipe), cntl);
if (HAS_CUR_FBC(dev_priv))
I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
I915_WRITE_FW(CURPOS(pipe), pos);
I915_WRITE_FW(CURBASE(pipe), base);
@@ -10202,15 +10183,13 @@ static void i9xx_update_cursor(struct intel_plane *plane,
I915_WRITE_FW(CURBASE(pipe), base);
}
- POSTING_READ_FW(CURBASE(pipe));
-
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_cursor(struct intel_plane *plane,
- struct intel_crtc *crtc)
+ const struct intel_crtc_state *crtc_state)
{
- i9xx_update_cursor(plane, NULL, NULL);
+ i9xx_update_cursor(plane, crtc_state, NULL);
}
static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
@@ -10808,14 +10787,40 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
pipe_config->fb_bits |= plane->frontbuffer_bit;
/*
+ * ILK/SNB DVSACNTR/Sprite Enable
+ * IVB SPR_CTL/Sprite Enable
+ * "When in Self Refresh Big FIFO mode, a write to enable the
+ * plane will be internally buffered and delayed while Big FIFO
+ * mode is exiting."
+ *
+ * Which means that enabling the sprite can take an extra frame
+ * when we start in big FIFO mode (LP1+). Thus we need to drop
+ * down to LP0 and wait for vblank in order to make sure the
+ * sprite gets enabled on the next vblank after the register write.
+ * Doing otherwise would risk enabling the sprite one frame after
+ * we've already signalled flip completion. We can resume LP1+
+ * once the sprite has been enabled.
+ *
+ *
* WaCxSRDisabledForSpriteScaling:ivb
+ * IVB SPR_SCALE/Scaling Enable
+ * "Low Power watermarks must be disabled for at least one
+ * frame before enabling sprite scaling, and kept disabled
+ * until sprite scaling is disabled."
*
- * cstate->update_wm was already set above, so this flag will
- * take effect when we commit and program watermarks.
+ * ILK/SNB DVSASCALE/Scaling Enable
+ * "When in Self Refresh Big FIFO mode, scaling enable will be
+ * masked off while Big FIFO mode is exiting."
+ *
+ * Despite the w/a only being listed for IVB we assume that
+ * the ILK/SNB note has similar ramifications, hence we apply
+ * the w/a on all three platforms.
*/
- if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
- needs_scaling(to_intel_plane_state(plane_state)) &&
- !needs_scaling(old_plane_state))
+ if (plane->id == PLANE_SPRITE0 &&
+ (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+ IS_IVYBRIDGE(dev_priv)) &&
+ (turn_on || (!needs_scaling(old_plane_state) &&
+ needs_scaling(to_intel_plane_state(plane_state)))))
pipe_config->disable_lp_wm = true;
return 0;
@@ -10851,6 +10856,101 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
return true;
}
+static int icl_add_linked_planes(struct intel_atomic_state *state)
+{
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state, *linked_plane_state;
+ int i;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ linked = plane_state->linked_plane;
+
+ if (!linked)
+ continue;
+
+ linked_plane_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_plane_state))
+ return PTR_ERR(linked_plane_state);
+
+ WARN_ON(linked_plane_state->linked_plane != plane);
+ WARN_ON(linked_plane_state->slave == plane_state->slave);
+ }
+
+ return 0;
+}
+
+static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
+ struct intel_plane *plane, *linked;
+ struct intel_plane_state *plane_state;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ /*
+ * Destroy all old plane links and make the slave plane invisible
+ * in the crtc_state->active_planes mask.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
+ continue;
+
+ plane_state->linked_plane = NULL;
+ if (plane_state->slave && !plane_state->base.visible) {
+ crtc_state->active_planes &= ~BIT(plane->id);
+ crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ plane_state->slave = false;
+ }
+
+ if (!crtc_state->nv12_planes)
+ return 0;
+
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ struct intel_plane_state *linked_state = NULL;
+
+ if (plane->pipe != crtc->pipe ||
+ !(crtc_state->nv12_planes & BIT(plane->id)))
+ continue;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
+ if (!icl_is_nv12_y_plane(linked->id))
+ continue;
+
+ if (crtc_state->active_planes & BIT(linked->id))
+ continue;
+
+ linked_state = intel_atomic_get_plane_state(state, linked);
+ if (IS_ERR(linked_state))
+ return PTR_ERR(linked_state);
+
+ break;
+ }
+
+ if (!linked_state) {
+ DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+ hweight8(crtc_state->nv12_planes));
+
+ return -EINVAL;
+ }
+
+ plane_state->linked_plane = linked;
+
+ linked_state->slave = true;
+ linked_state->linked_plane = plane;
+ crtc_state->active_planes |= BIT(linked->id);
+ crtc_state->update_planes |= BIT(linked->id);
+ DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+ }
+
+ return 0;
+}
+
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
@@ -10859,7 +10959,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
- struct drm_atomic_state *state = crtc_state->state;
int ret;
bool mode_changed = needs_modeset(crtc_state);
@@ -10896,8 +10995,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
}
- if (dev_priv->display.compute_intermediate_wm &&
- !to_intel_atomic_state(state)->skip_intermediate_wm) {
+ if (dev_priv->display.compute_intermediate_wm) {
if (WARN_ON(!dev_priv->display.compute_pipe_wm))
return 0;
@@ -10913,9 +11011,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
}
- } else if (dev_priv->display.compute_intermediate_wm) {
- if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
- pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
}
if (INTEL_GEN(dev_priv) >= 9) {
@@ -10923,6 +11018,8 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
+ ret = icl_check_nv12_planes(pipe_config);
+ if (!ret)
ret = skl_check_pipe_max_pixel_rate(intel_crtc,
pipe_config);
if (!ret)
@@ -10937,8 +11034,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
- .atomic_begin = intel_begin_crtc_commit,
- .atomic_flush = intel_finish_crtc_commit,
.atomic_check = intel_crtc_atomic_check,
};
@@ -10967,30 +11062,42 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
drm_connector_list_iter_end(&conn_iter);
}
-static void
-connected_sink_compute_bpp(struct intel_connector *connector,
- struct intel_crtc_state *pipe_config)
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ struct intel_crtc_state *pipe_config)
{
- const struct drm_display_info *info = &connector->base.display_info;
- int bpp = pipe_config->pipe_bpp;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
- connector->base.base.id,
- connector->base.name);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_info *info = &connector->display_info;
+ int bpp;
- /* Don't use an invalid EDID bpc value */
- if (info->bpc != 0 && info->bpc * 3 < bpp) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
- bpp, info->bpc * 3);
- pipe_config->pipe_bpp = info->bpc * 3;
+ switch (conn_state->max_bpc) {
+ case 6 ... 7:
+ bpp = 6 * 3;
+ break;
+ case 8 ... 9:
+ bpp = 8 * 3;
+ break;
+ case 10 ... 11:
+ bpp = 10 * 3;
+ break;
+ case 12:
+ bpp = 12 * 3;
+ break;
+ default:
+ return -EINVAL;
}
- /* Clamp bpp to 8 on screens without EDID 1.4 */
- if (info->bpc == 0 && bpp > 24) {
- DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
- bpp);
- pipe_config->pipe_bpp = 24;
+ if (bpp < pipe_config->pipe_bpp) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+ "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
+ pipe_config->pipe_bpp);
+
+ pipe_config->pipe_bpp = bpp;
}
+
+ return 0;
}
static int
@@ -10998,7 +11105,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_atomic_state *state;
+ struct drm_atomic_state *state = pipe_config->base.state;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int bpp, i;
@@ -11011,21 +11118,21 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
else
bpp = 8*3;
-
pipe_config->pipe_bpp = bpp;
- state = pipe_config->base.state;
-
- /* Clamp display bpp to EDID value */
+ /* Clamp display bpp to connector max bpp */
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ int ret;
+
if (connector_state->crtc != &crtc->base)
continue;
- connected_sink_compute_bpp(to_intel_connector(connector),
- pipe_config);
+ ret = compute_sink_pipe_bpp(connector_state, pipe_config);
+ if (ret)
+ return ret;
}
- return bpp;
+ return 0;
}
static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
@@ -11095,6 +11202,20 @@ static void snprintf_output_types(char *buf, size_t len,
WARN_ON_ONCE(output_types != 0);
}
+static const char * const output_format_str[] = {
+ [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
+ [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
+ [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
+ [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
+};
+
+static const char *output_formats(enum intel_output_format format)
+{
+ if (format >= ARRAY_SIZE(output_format_str))
+ format = INTEL_OUTPUT_FORMAT_INVALID;
+ return output_format_str[format];
+}
+
static void intel_dump_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
const char *context)
@@ -11114,6 +11235,9 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("output_types: %s (0x%x)\n",
buf, pipe_config->output_types);
+ DRM_DEBUG_KMS("output format: %s\n",
+ output_formats(pipe_config->output_format));
+
DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
transcoder_name(pipe_config->cpu_transcoder),
pipe_config->pipe_bpp, pipe_config->dither);
@@ -11123,9 +11247,6 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes,
&pipe_config->fdi_m_n);
- if (pipe_config->ycbcr420)
- DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n");
-
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
pipe_config->lane_count, &pipe_config->dp_m_n);
@@ -11314,7 +11435,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct intel_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int base_bpp, ret = -EINVAL;
+ int base_bpp, ret;
int i;
bool retry = true;
@@ -11336,10 +11457,12 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
- base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
- pipe_config);
- if (base_bpp < 0)
- goto fail;
+ ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
+ pipe_config);
+ if (ret)
+ return ret;
+
+ base_bpp = pipe_config->pipe_bpp;
/*
* Determine the real pipe dimensions. Note that stereo modes can
@@ -11361,7 +11484,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- goto fail;
+ return -EINVAL;
}
/*
@@ -11397,7 +11520,7 @@ encoder_retry:
if (!(encoder->compute_config(encoder, pipe_config, connector_state))) {
DRM_DEBUG_KMS("Encoder config failure\n");
- goto fail;
+ return -EINVAL;
}
}
@@ -11408,16 +11531,16 @@ encoder_retry:
* pipe_config->pixel_multiplier;
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
if (ret < 0) {
DRM_DEBUG_KMS("CRTC fixup failed\n");
- goto fail;
+ return ret;
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n")) {
- ret = -EINVAL;
- goto fail;
- }
+ if (WARN(!retry, "loop in pipe configuration computation\n"))
+ return -EINVAL;
DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
retry = false;
@@ -11433,8 +11556,7 @@ encoder_retry:
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
-fail:
- return ret;
+ return 0;
}
static bool intel_fuzzy_clock_check(int clock1, int clock2)
@@ -11703,6 +11825,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -11711,7 +11834,6 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
- PIPE_CONF_CHECK_BOOL(ycbcr420);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
@@ -11833,6 +11955,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -11843,6 +11967,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
@@ -11885,8 +12011,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][plane];
- sw_ddb_entry = &sw_ddb->plane[pipe][plane];
+ hw_ddb_entry = &hw_ddb_y[plane];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
@@ -11935,8 +12061,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
- sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
+ hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
@@ -12220,8 +12346,9 @@ intel_modeset_verify_disabled(struct drm_device *dev,
verify_disabled_dpll_state(dev);
}
-static void update_scanline_offset(struct intel_crtc *crtc)
+static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
/*
@@ -12252,7 +12379,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
* answer that's slightly in the future.
*/
if (IS_GEN2(dev_priv)) {
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
vtotal = adjusted_mode->crtc_vtotal;
@@ -12261,7 +12388,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
crtc->scanline_offset = vtotal - 1;
} else if (HAS_DDI(dev_priv) &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
crtc->scanline_offset = 2;
} else
crtc->scanline_offset = 1;
@@ -12544,6 +12671,8 @@ static int intel_atomic_check(struct drm_device *dev,
}
ret = intel_modeset_pipe_config(crtc, pipe_config);
+ if (ret == -EDEADLK)
+ return ret;
if (ret) {
intel_dump_pipe_config(to_intel_crtc(crtc),
pipe_config, "[failed]");
@@ -12575,6 +12704,10 @@ static int intel_atomic_check(struct drm_device *dev,
intel_state->cdclk.logical = dev_priv->cdclk.logical;
}
+ ret = icl_add_linked_planes(intel_state);
+ if (ret)
+ return ret;
+
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
@@ -12614,7 +12747,7 @@ static void intel_update_crtc(struct drm_crtc *crtc,
to_intel_plane(crtc->primary));
if (modeset) {
- update_scanline_offset(intel_crtc);
+ update_scanline_offset(pipe_config);
dev_priv->display.crtc_enable(pipe_config, state);
/* vblanks work again, re-enable pipe CRC. */
@@ -12627,7 +12760,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
- drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
+ intel_begin_crtc_commit(crtc, old_crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+ else
+ i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
+
+ intel_finish_crtc_commit(crtc, old_crtc_state);
}
static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -12659,13 +12799,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
int i;
u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
-
- const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
if (new_crtc_state->active)
- entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
+ entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
@@ -12691,14 +12830,13 @@ static void skl_update_crtcs(struct drm_atomic_state *state)
if (updated & cmask || !cstate->base.active)
continue;
- if (skl_ddb_allocation_overlaps(dev_priv,
+ if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
entries,
- &cstate->wm.skl.ddb,
- i))
+ INTEL_INFO(dev_priv)->num_pipes, i))
continue;
updated |= cmask;
- entries[i] = &cstate->wm.skl.ddb;
+ entries[i] = cstate->wm.skl.ddb;
/*
* If this is an already active pipe, it's DDB changed,
@@ -12788,8 +12926,9 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
struct drm_crtc *crtc;
- struct intel_crtc_state *intel_cstate;
+ struct intel_crtc *intel_crtc;
u64 put_domains[I915_MAX_PIPES] = {};
int i;
@@ -12801,24 +12940,25 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
+ intel_crtc = to_intel_crtc(crtc);
if (needs_modeset(new_crtc_state) ||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
- put_domains[to_intel_crtc(crtc)->pipe] =
+ put_domains[intel_crtc->pipe] =
modeset_get_crtc_power_domains(crtc,
- to_intel_crtc_state(new_crtc_state));
+ new_intel_crtc_state);
}
if (!needs_modeset(new_crtc_state))
continue;
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
- to_intel_crtc_state(new_crtc_state));
+ intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
if (old_crtc_state->active) {
- intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
+ intel_crtc_disable_planes(intel_state, intel_crtc);
/*
* We need to disable pipe CRC before disabling the pipe,
@@ -12826,10 +12966,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*/
intel_crtc_disable_pipe_crc(intel_crtc);
- dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state);
+ dev_priv->display.crtc_disable(old_intel_crtc_state, state);
intel_crtc->active = false;
intel_fbc_disable(intel_crtc);
- intel_disable_shared_dpll(intel_crtc);
+ intel_disable_shared_dpll(old_intel_crtc_state);
/*
* Underruns don't always raise
@@ -12843,7 +12983,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
!HAS_GMCH_DISPLAY(dev_priv) &&
dev_priv->display.initial_watermarks)
dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(new_crtc_state));
+ new_intel_crtc_state);
}
}
@@ -12902,11 +13042,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
- intel_cstate = to_intel_crtc_state(new_crtc_state);
+ new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
if (dev_priv->display.optimize_watermarks)
dev_priv->display.optimize_watermarks(intel_state,
- intel_cstate);
+ new_intel_crtc_state);
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -13173,7 +13313,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
struct i915_vma *vma;
if (plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
const int align = intel_cursor_alignment(dev_priv);
int err;
@@ -13289,13 +13429,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
- fb_obj_bump_render_priority(obj);
-
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
if (ret)
return ret;
+ fb_obj_bump_render_priority(obj);
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
if (!new_state->fence) { /* implicit fencing */
@@ -13426,7 +13565,7 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (intel_cstate->update_pipe)
intel_update_pipe_config(old_intel_cstate, intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(intel_crtc);
+ skl_detach_scalers(intel_cstate);
out:
if (dev_priv->display.atomic_update_watermarks)
@@ -13528,56 +13667,6 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
}
}
-static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
- u32 format, u64 modifier)
-{
- struct intel_plane *plane = to_intel_plane(_plane);
-
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
- return false;
- }
-
- switch (format) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
- return true;
- /* fall through */
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- case DRM_FORMAT_NV12:
- if (modifier == I915_FORMAT_MOD_Yf_TILED)
- return true;
- /* fall through */
- case DRM_FORMAT_C8:
- if (modifier == DRM_FORMAT_MOD_LINEAR ||
- modifier == I915_FORMAT_MOD_X_TILED ||
- modifier == I915_FORMAT_MOD_Y_TILED)
- return true;
- /* fall through */
- default:
- return false;
- }
-}
-
static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
@@ -13585,18 +13674,7 @@ static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
format == DRM_FORMAT_ARGB8888;
}
-static struct drm_plane_funcs skl_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = intel_plane_destroy,
- .atomic_get_property = intel_plane_atomic_get_property,
- .atomic_set_property = intel_plane_atomic_set_property,
- .atomic_duplicate_state = intel_plane_duplicate_state,
- .atomic_destroy_state = intel_plane_destroy_state,
- .format_mod_supported = skl_plane_format_mod_supported,
-};
-
-static struct drm_plane_funcs i965_plane_funcs = {
+static const struct drm_plane_funcs i965_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13607,7 +13685,7 @@ static struct drm_plane_funcs i965_plane_funcs = {
.format_mod_supported = i965_plane_format_mod_supported,
};
-static struct drm_plane_funcs i8xx_plane_funcs = {
+static const struct drm_plane_funcs i8xx_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = intel_plane_destroy,
@@ -13633,14 +13711,16 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state, *new_plane_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *old_fb;
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *new_crtc_state;
/*
* When crtc is inactive or there is a modeset pending,
* wait for it to complete in the slowpath
*/
- if (!crtc_state->active || needs_modeset(crtc_state) ||
- to_intel_crtc_state(crtc_state)->update_pipe)
+ if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
+ crtc_state->update_pipe)
goto slow;
old_plane_state = plane->state;
@@ -13670,6 +13750,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (!new_plane_state)
return -ENOMEM;
+ new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
+ if (!new_crtc_state) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
drm_atomic_set_fb_for_plane(new_plane_state, fb);
new_plane_state->src_x = src_x;
@@ -13681,9 +13767,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->crtc_w = crtc_w;
new_plane_state->crtc_h = crtc_h;
- ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state),
- to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */
- to_intel_plane_state(plane->state),
+ ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
+ to_intel_plane_state(old_plane_state),
to_intel_plane_state(new_plane_state));
if (ret)
goto out_free;
@@ -13705,14 +13790,25 @@ intel_legacy_cursor_update(struct drm_plane *plane,
/* Swap plane state */
plane->state = new_plane_state;
+ /*
+ * We cannot swap crtc_state as it may be in use by an atomic commit or
+ * page flip that's running simultaneously. If we swap crtc_state and
+ * destroy the old state, we will cause a use-after-free there.
+ *
+ * Only update active_planes, which is needed for our internal
+ * bookkeeping. Either value will do the right thing when updating
+ * planes atomically. If the cursor was part of the atomic update then
+ * we would have taken the slowpath.
+ */
+ crtc_state->active_planes = new_crtc_state->active_planes;
+
if (plane->state->visible) {
trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(intel_plane,
- to_intel_crtc_state(crtc->state),
+ intel_plane->update_plane(intel_plane, crtc_state,
to_intel_plane_state(plane->state));
} else {
trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
+ intel_plane->disable_plane(intel_plane, crtc_state);
}
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -13720,6 +13816,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
out_free:
+ if (new_crtc_state)
+ intel_crtc_destroy_state(crtc, &new_crtc_state->base);
if (ret)
intel_plane_destroy_state(plane, new_plane_state);
else
@@ -13760,176 +13858,90 @@ static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
return i9xx_plane == PLANE_A;
}
-static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (!HAS_FBC(dev_priv))
- return false;
-
- return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
-}
-
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- /*
- * FIXME: ICL requires two hardware planes for scanning out NV12
- * framebuffers. Do not advertize support until this is implemented.
- */
- if (INTEL_GEN(dev_priv) >= 11)
- return false;
-
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
- return false;
-
- if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
- return false;
-
- if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
- return false;
-
- return true;
-}
-
static struct intel_plane *
intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_plane *primary = NULL;
- struct intel_plane_state *state = NULL;
+ struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
- unsigned int num_formats;
- const uint64_t *modifiers;
+ unsigned int possible_crtcs;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
int ret;
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (!primary) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state = intel_create_plane_state(&primary->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_PRIMARY);
- primary->base.state = &state->base;
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
- if (INTEL_GEN(dev_priv) >= 9)
- state->scaler_id = -1;
- primary->pipe = pipe;
+ plane->pipe = pipe;
/*
* On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
* port is hooked to pipe B. Hence we want plane A feeding pipe B.
*/
if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
- primary->i9xx_plane = (enum i9xx_plane_id) !pipe;
- else
- primary->i9xx_plane = (enum i9xx_plane_id) pipe;
- primary->id = PLANE_PRIMARY;
- primary->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, primary->id);
-
- if (INTEL_GEN(dev_priv) >= 9)
- primary->has_fbc = skl_plane_has_fbc(dev_priv,
- primary->pipe,
- primary->id);
+ plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
else
- primary->has_fbc = i9xx_plane_has_fbc(dev_priv,
- primary->i9xx_plane);
+ plane->i9xx_plane = (enum i9xx_plane_id) pipe;
+ plane->id = PLANE_PRIMARY;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- if (primary->has_fbc) {
+ plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
+ if (plane->has_fbc) {
struct intel_fbc *fbc = &dev_priv->fbc;
- fbc->possible_framebuffer_bits |= primary->frontbuffer_bit;
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- if (INTEL_GEN(dev_priv) >= 9) {
- primary->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
- PLANE_PRIMARY);
-
- if (skl_plane_has_planar(dev_priv, pipe, PLANE_PRIMARY)) {
- intel_primary_formats = skl_pri_planar_formats;
- num_formats = ARRAY_SIZE(skl_pri_planar_formats);
- } else {
- intel_primary_formats = skl_primary_formats;
- num_formats = ARRAY_SIZE(skl_primary_formats);
- }
-
- if (primary->has_ccs)
- modifiers = skl_format_modifiers_ccs;
- else
- modifiers = skl_format_modifiers_noccs;
-
- primary->max_stride = skl_plane_max_stride;
- primary->update_plane = skl_update_plane;
- primary->disable_plane = skl_disable_plane;
- primary->get_hw_state = skl_plane_get_hw_state;
- primary->check_plane = skl_plane_check;
-
- plane_funcs = &skl_plane_funcs;
- } else if (INTEL_GEN(dev_priv) >= 4) {
- intel_primary_formats = i965_primary_formats;
+ if (INTEL_GEN(dev_priv) >= 4) {
+ formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->max_stride = i9xx_plane_max_stride;
- primary->update_plane = i9xx_update_plane;
- primary->disable_plane = i9xx_disable_plane;
- primary->get_hw_state = i9xx_plane_get_hw_state;
- primary->check_plane = i9xx_plane_check;
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
plane_funcs = &i965_plane_funcs;
} else {
- intel_primary_formats = i8xx_primary_formats;
+ formats = i8xx_primary_formats;
num_formats = ARRAY_SIZE(i8xx_primary_formats);
modifiers = i9xx_format_modifiers;
- primary->max_stride = i9xx_plane_max_stride;
- primary->update_plane = i9xx_update_plane;
- primary->disable_plane = i9xx_disable_plane;
- primary->get_hw_state = i9xx_plane_get_hw_state;
- primary->check_plane = i9xx_plane_check;
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = i9xx_update_plane;
+ plane->disable_plane = i9xx_disable_plane;
+ plane->get_hw_state = i9xx_plane_get_hw_state;
+ plane->check_plane = i9xx_plane_check;
plane_funcs = &i8xx_plane_funcs;
}
- if (INTEL_GEN(dev_priv) >= 9)
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
- DRM_PLANE_TYPE_PRIMARY,
- "plane 1%c", pipe_name(pipe));
- else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
+ possible_crtcs = BIT(pipe);
+
+ if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
- ret = drm_universal_plane_init(&dev_priv->drm, &primary->base,
- 0, plane_funcs,
- intel_primary_formats, num_formats,
- modifiers,
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
DRM_PLANE_TYPE_PRIMARY,
"plane %c",
- plane_name(primary->i9xx_plane));
+ plane_name(plane->i9xx_plane));
if (ret)
goto fail;
- if (INTEL_GEN(dev_priv) >= 10) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
- DRM_MODE_REFLECT_X;
- } else if (INTEL_GEN(dev_priv) >= 9) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -13941,26 +13953,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
}
if (INTEL_GEN(dev_priv) >= 4)
- drm_plane_create_rotation_property(&primary->base,
+ drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
- if (INTEL_GEN(dev_priv) >= 9)
- drm_plane_create_color_properties(&primary->base,
- BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
- BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
- BIT(DRM_COLOR_YCBCR_FULL_RANGE),
- DRM_COLOR_YCBCR_BT709,
- DRM_COLOR_YCBCR_LIMITED_RANGE);
-
- drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- return primary;
+ return plane;
fail:
- kfree(state);
- kfree(primary);
+ intel_plane_free(plane);
return ERR_PTR(ret);
}
@@ -13969,23 +13971,13 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- struct intel_plane *cursor = NULL;
- struct intel_plane_state *state = NULL;
+ unsigned int possible_crtcs;
+ struct intel_plane *cursor;
int ret;
- cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
- if (!cursor) {
- ret = -ENOMEM;
- goto fail;
- }
-
- state = intel_create_plane_state(&cursor->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
- }
-
- cursor->base.state = &state->base;
+ cursor = intel_plane_alloc();
+ if (IS_ERR(cursor))
+ return cursor;
cursor->pipe = pipe;
cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
@@ -14012,8 +14004,10 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
+ possible_crtcs = BIT(pipe);
+
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- 0, &intel_cursor_plane_funcs,
+ possible_crtcs, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -14028,16 +14022,12 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180);
- if (INTEL_GEN(dev_priv) >= 9)
- state->scaler_id = -1;
-
drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
return cursor;
fail:
- kfree(state);
- kfree(cursor);
+ intel_plane_free(cursor);
return ERR_PTR(ret);
}
@@ -14058,7 +14048,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_scaler *scaler = &scaler_state->scalers[i];
scaler->in_use = 0;
- scaler->mode = PS_SCALER_MODE_DYN;
+ scaler->mode = 0;
}
scaler_state->scaler_id = -1;
@@ -14153,18 +14143,6 @@ fail:
return ret;
}
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
-{
- struct drm_device *dev = connector->base.dev;
-
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
-
- if (!connector->base.state->crtc)
- return INVALID_PIPE;
-
- return to_intel_crtc(connector->base.state->crtc)->pipe;
-}
-
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
@@ -14281,7 +14259,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_pps_init(dev_priv);
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return;
/*
@@ -14301,6 +14279,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
intel_ddi_init(dev_priv, PORT_F);
+ icl_dsi_init(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
/*
* FIXME: Broxton doesn't support port detection via the
@@ -14523,7 +14502,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
static
u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, uint32_t pixel_format)
+ u32 pixel_format, u64 fb_modifier)
{
struct intel_crtc *crtc;
struct intel_plane *plane;
@@ -14545,7 +14524,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_framebuffer *fb = &intel_fb->base;
- struct drm_format_name_buf format_name;
u32 pitch_limit;
unsigned int tiling, stride;
int ret = -EINVAL;
@@ -14576,33 +14554,14 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
}
}
- /* Passed in modifier sanity checking. */
- switch (mode_cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- default:
- DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n");
- goto err;
- }
- /* fall through */
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- if (INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
- mode_cmd->modifier[0]);
- goto err;
- }
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- break;
- default:
- DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ struct drm_format_name_buf format_name;
+
+ DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
mode_cmd->modifier[0]);
goto err;
}
@@ -14617,8 +14576,8 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
- mode_cmd->pixel_format);
+ pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
@@ -14637,69 +14596,6 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err;
}
- /* Reject formats not supported by any plane early. */
- switch (mode_cmd->pixel_format) {
- case DRM_FORMAT_C8:
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- break;
- case DRM_FORMAT_XRGB1555:
- if (INTEL_GEN(dev_priv) > 3) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR8888:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- if (INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_ABGR2101010:
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_VYUY:
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
- break;
- case DRM_FORMAT_NV12:
- if (INTEL_GEN(dev_priv) < 9 || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv) || INTEL_GEN(dev_priv) >= 11) {
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name));
- goto err;
- }
- break;
- default:
- DRM_DEBUG_KMS("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- goto err;
- }
-
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
goto err;
@@ -14971,174 +14867,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = intel_update_crtcs;
}
-/*
- * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
- */
-static void quirk_ssc_force_disable(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
- DRM_INFO("applying lvds SSC disable quirk\n");
-}
-
-/*
- * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
- * brightness value
- */
-static void quirk_invert_brightness(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
- DRM_INFO("applying inverted panel brightness quirk\n");
-}
-
-/* Some VBT's incorrectly indicate no backlight is present */
-static void quirk_backlight_present(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
- DRM_INFO("applying backlight present quirk\n");
-}
-
-/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
- * which is 300 ms greater than eDP spec T12 min.
- */
-static void quirk_increase_t12_delay(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY;
- DRM_INFO("Applying T12 delay quirk\n");
-}
-
-/*
- * GeminiLake NUC HDMI outputs require additional off time
- * this allows the onboard retimer to correctly sync to signal
- */
-static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
- DRM_INFO("Applying Increase DDI Disabled quirk\n");
-}
-
-struct intel_quirk {
- int device;
- int subsystem_vendor;
- int subsystem_device;
- void (*hook)(struct drm_device *dev);
-};
-
-/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
-struct intel_dmi_quirk {
- void (*hook)(struct drm_device *dev);
- const struct dmi_system_id (*dmi_id_list)[];
-};
-
-static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
-{
- DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
- return 1;
-}
-
-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
- {
- .dmi_id_list = &(const struct dmi_system_id[]) {
- {
- .callback = intel_dmi_reverse_brightness,
- .ident = "NCR Corporation",
- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
- DMI_MATCH(DMI_PRODUCT_NAME, ""),
- },
- },
- { } /* terminating entry */
- },
- .hook = quirk_invert_brightness,
- },
-};
-
-static struct intel_quirk intel_quirks[] = {
- /* Lenovo U160 cannot use SSC on LVDS */
- { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
-
- /* Sony Vaio Y cannot use SSC on LVDS */
- { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
-
- /* Acer Aspire 5734Z must invert backlight brightness */
- { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
-
- /* Acer/eMachines G725 */
- { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
-
- /* Acer/eMachines e725 */
- { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
-
- /* Acer/Packard Bell NCL20 */
- { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
-
- /* Acer Aspire 4736Z */
- { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
-
- /* Acer Aspire 5336 */
- { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
-
- /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
- { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
-
- /* Acer C720 Chromebook (Core i3 4005U) */
- { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
-
- /* Apple Macbook 2,1 (Core 2 T7400) */
- { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
-
- /* Apple Macbook 4,1 */
- { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
-
- /* Toshiba CB35 Chromebook (Celeron 2955U) */
- { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
-
- /* HP Chromebook 14 (Celeron 2955U) */
- { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
-
- /* Dell Chromebook 11 */
- { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
-
- /* Dell Chromebook 11 (2015 version) */
- { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
-
- /* Toshiba Satellite P50-C-18C */
- { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
-
- /* GeminiLake NUC */
- { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
- { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
- /* ASRock ITX*/
- { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
- { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
-};
-
-static void intel_init_quirks(struct drm_device *dev)
-{
- struct pci_dev *d = dev->pdev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
- struct intel_quirk *q = &intel_quirks[i];
-
- if (d->device == q->device &&
- (d->subsystem_vendor == q->subsystem_vendor ||
- q->subsystem_vendor == PCI_ANY_ID) &&
- (d->subsystem_device == q->subsystem_device ||
- q->subsystem_device == PCI_ANY_ID))
- q->hook(dev);
- }
- for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
- if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
- intel_dmi_quirks[i].hook(dev);
- }
-}
-
/* Disable the VGA plane that we never use */
static void i915_disable_vga(struct drm_i915_private *dev_priv)
{
@@ -15352,7 +15080,9 @@ int intel_modeset_init(struct drm_device *dev)
INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state_worker);
- intel_init_quirks(dev);
+ intel_init_quirks(dev_priv);
+
+ intel_fbc_init(dev_priv);
intel_init_pm(dev_priv);
@@ -15584,8 +15314,8 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
if (pipe == crtc->pipe)
continue;
- DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n",
- plane->base.name);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
@@ -15626,7 +15356,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+ struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
@@ -15636,7 +15367,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
}
- if (crtc->active) {
+ if (crtc_state->base.active) {
struct intel_plane *plane;
/* Disable everything but the primary plane */
@@ -15652,10 +15383,10 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
/* Adjust the state of the output pipe according to whether we
* have active connectors/encoders. */
- if (crtc->active && !intel_crtc_has_encoders(crtc))
+ if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
intel_crtc_disable_noatomic(&crtc->base, ctx);
- if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) {
+ if (crtc_state->base.active || HAS_GMCH_DISPLAY(dev_priv)) {
/*
* We start out with underrun reporting disabled to avoid races.
* For correct bookkeeping mark this on active crtcs.
@@ -15686,6 +15417,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
static void intel_sanitize_encoder(struct intel_encoder *encoder)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_connector *connector;
/* We need to check both for a crtc link (meaning that the
@@ -15709,7 +15441,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
encoder->base.base.id,
encoder->base.name);
- encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
+ if (encoder->disable)
+ encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
if (encoder->post_disable)
encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state);
}
@@ -15726,6 +15459,9 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
/* notify opregion of the sanitized encoder state */
intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_sanitize_encoder_pll_mapping(encoder);
}
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
@@ -15774,6 +15510,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ enableddisabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -15926,7 +15666,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
drm_calc_timestamping_constants(&crtc->base,
&crtc_state->base.adjusted_mode);
- update_scanline_offset(crtc);
+ update_scanline_offset(crtc_state);
}
dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
@@ -15981,6 +15721,65 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
}
}
+static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t hdmi_reg)
+{
+ u32 val = I915_READ(hdmi_reg);
+
+ if (val & SDVO_ENABLE ||
+ (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
+ return;
+
+ DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
+
+ val &= ~SDVO_PIPE_SEL_MASK;
+ val |= SDVO_PIPE_SEL(PIPE_A);
+
+ I915_WRITE(hdmi_reg, val);
+}
+
+static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
+ enum port port, i915_reg_t dp_reg)
+{
+ u32 val = I915_READ(dp_reg);
+
+ if (val & DP_PORT_EN ||
+ (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
+ return;
+
+ DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
+ port_name(port));
+
+ val &= ~DP_PIPE_SEL_MASK;
+ val |= DP_PIPE_SEL(PIPE_A);
+
+ I915_WRITE(dp_reg, val);
+}
+
+static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The BIOS may select transcoder B on some of the PCH
+ * ports even it doesn't enable the port. This would trip
+ * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
+ * Sanitize the transcoder select bits to prevent that. We
+ * assume that the BIOS never actually enabled the port,
+ * because if it did we'd actually have to toggle the port
+ * on and back off to make the transcoder A select stick
+ * (see. intel_dp_link_down(), intel_disable_hdmi(),
+ * intel_disable_sdvo()).
+ */
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
+ ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
+
+ /* PCH SDVOB multiplex with HDMIB */
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
+ ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
+}
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@@ -15990,6 +15789,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
int i;
@@ -16001,6 +15801,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
/* HW state is read out, now we need to sanitize this mess. */
get_encoder_power_domains(dev_priv);
+ if (HAS_PCH_IBX(dev_priv))
+ ibx_sanitize_pch_ports(dev_priv);
+
/*
* intel_sanitize_plane_mapping() may need to do vblank
* waits, so we need vblank interrupts restored beforehand.
@@ -16008,7 +15811,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(&dev_priv->drm, crtc) {
drm_crtc_vblank_reset(&crtc->base);
- if (crtc->active)
+ if (crtc->base.state->active)
drm_crtc_vblank_on(&crtc->base);
}
@@ -16018,8 +15821,9 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_sanitize_encoder(encoder);
for_each_intel_crtc(&dev_priv->drm, crtc) {
+ crtc_state = to_intel_crtc_state(crtc->base.state);
intel_sanitize_crtc(crtc, ctx);
- intel_dump_pipe_config(crtc, crtc->config,
+ intel_dump_pipe_config(crtc, crtc_state,
"[setup_hw_state]");
}
@@ -16053,7 +15857,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
for_each_intel_crtc(dev, crtc) {
u64 put_domains;
- put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+ put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
if (WARN_ON(put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -16097,29 +15902,6 @@ void intel_display_resume(struct drm_device *dev)
drm_atomic_state_put(state);
}
-int intel_connector_register(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- int ret;
-
- ret = intel_backlight_device_register(intel_connector);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- return ret;
-}
-
-void intel_connector_unregister(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- intel_backlight_device_unregister(intel_connector);
- intel_panel_destroy_backlight(connector);
-}
-
static void intel_hpd_poll_fini(struct drm_device *dev)
{
struct intel_connector *connector;
@@ -16130,9 +15912,9 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->modeset_retry_work.func)
cancel_work_sync(&connector->modeset_retry_work);
- if (connector->hdcp_shim) {
- cancel_delayed_work_sync(&connector->hdcp_check_work);
- cancel_work_sync(&connector->hdcp_prop_work);
+ if (connector->hdcp.shim) {
+ cancel_delayed_work_sync(&connector->hdcp.check_work);
+ cancel_work_sync(&connector->hdcp.prop_work);
}
}
drm_connector_list_iter_end(&conn_iter);
@@ -16172,18 +15954,13 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_mode_config_cleanup(dev);
- intel_cleanup_overlay(dev_priv);
+ intel_overlay_cleanup(dev_priv);
intel_teardown_gmbus(dev_priv);
destroy_workqueue(dev_priv->modeset_wq);
-}
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder)
-{
- connector->encoder = encoder;
- drm_connector_attach_encoder(&connector->base, &encoder->base);
+ intel_fbc_cleanup_cfb(dev_priv);
}
/*
@@ -16273,7 +16050,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return NULL;
error = kzalloc(sizeof(*error), GFP_ATOMIC);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 9fac67e31205..4262452963b3 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -43,6 +43,11 @@ enum i915_gpio {
GPIOM,
};
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
enum pipe {
INVALID_PIPE = -1,
@@ -57,12 +62,25 @@ enum pipe {
#define pipe_name(p) ((p) + 'A')
enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
I915_MAX_TRANSCODERS
};
@@ -120,6 +138,9 @@ enum plane_id {
PLANE_SPRITE0,
PLANE_SPRITE1,
PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
PLANE_CURSOR,
I915_MAX_PLANES,
@@ -221,6 +242,7 @@ enum intel_display_power_domain {
POWER_DOMAIN_TRANSCODER_B,
POWER_DOMAIN_TRANSCODER_C,
POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_EDP_VDSC,
POWER_DOMAIN_TRANSCODER_DSI_A,
POWER_DOMAIN_TRANSCODER_DSI_C,
POWER_DOMAIN_PORT_DDI_A_LANES,
@@ -363,7 +385,7 @@ struct intel_link_m_n {
(__dev_priv)->power_domains.power_well_count; \
(__power_well)++)
-#define for_each_power_well_rev(__dev_priv, __power_well) \
+#define for_each_power_well_reverse(__dev_priv, __power_well) \
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
(__dev_priv)->power_domains.power_well_count - 1; \
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
@@ -373,10 +395,18 @@ struct intel_link_m_n {
for_each_power_well(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
-#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
- for_each_power_well_rev(__dev_priv, __power_well) \
+#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_reverse(__dev_priv, __power_well) \
for_each_if((__power_well)->desc->domains & (__domain_mask))
+#define for_each_old_intel_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
@@ -402,10 +432,18 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
-void intel_link_compute_m_n(int bpp, int nlanes,
+#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
+void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
bool constant_n);
-
bool is_ccs_modifier(u64 modifier);
#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 13f9b56a9ce7..fdd2cbc56fa3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,19 @@
#define DP_DPRX_ESI_LEN 14
+/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
+#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
+#define DP_DSC_MIN_SUPPORTED_BPC 8
+#define DP_DSC_MAX_SUPPORTED_BPC 10
+
+/* DP DSC throughput values used for slice count calculations KPixels/s */
+#define DP_DSC_PEAK_PIXEL_RATE 2720000
+#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
+#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
+
+/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 976
+
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -93,6 +106,14 @@ static const struct dp_link_dpll chv_dpll[] = {
{ .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
};
+/* Constants for DP DSC configurations */
+static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
+
+/* With Single pipe configuration, HW is capable of supporting maximum
+ * of 4 slices per line.
+ */
+static const u8 valid_dsc_slicecount[] = {1, 2, 4};
+
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -222,138 +243,6 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum port port = intel_dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 ln0, ln1, lane_info;
-
- if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
- return;
-
- ln0 = I915_READ(MG_DP_MODE(port, 0));
- ln1 = I915_READ(MG_DP_MODE(port, 1));
-
- switch (intel_dig_port->tc_type) {
- case TC_PORT_TYPEC:
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
- ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
-
- lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
- DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
- DP_LANE_ASSIGNMENT_SHIFT(tc_port);
-
- switch (lane_info) {
- case 0x1:
- case 0x4:
- break;
- case 0x2:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0x3:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0x8:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
- break;
- case 0xC:
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- case 0xF:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE |
- MG_DP_MODE_CFG_DP_X2_MODE;
- break;
- default:
- MISSING_CASE(lane_info);
- }
- break;
-
- case TC_PORT_LEGACY:
- ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE;
- break;
-
- default:
- MISSING_CASE(intel_dig_port->tc_type);
- return;
- }
-
- I915_WRITE(MG_DP_MODE(port, 0), ln0);
- I915_WRITE(MG_DP_MODE(port, 1), ln1);
-}
-
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
- u32 val;
- int i;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
- val |= MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(mg_regs[i], val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING;
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
- u32 val;
- int i;
-
- if (tc_port == PORT_TC_NONE)
- return;
-
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
- val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
- MG_DP_MODE_CFG_TRPWR_GATING |
- MG_DP_MODE_CFG_CLNPWR_GATING |
- MG_DP_MODE_CFG_DIGPWR_GATING |
- MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(mg_regs[i], val);
- }
-
- val = I915_READ(MG_MISC_SUS0(tc_port));
- val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK |
- MG_MISC_SUS0_CFG_TR2PWR_GATING |
- MG_MISC_SUS0_CFG_CL2PWR_GATING |
- MG_MISC_SUS0_CFG_GAONPWR_GATING |
- MG_MISC_SUS0_CFG_TRPWR_GATING |
- MG_MISC_SUS0_CFG_CL1PWR_GATING |
- MG_MISC_SUS0_CFG_DGPWR_GATING);
- I915_WRITE(MG_MISC_SUS0(tc_port), val);
-}
-
int
intel_dp_max_data_rate(int max_link_clock, int max_lanes)
{
@@ -455,7 +344,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (INTEL_GEN(dev_priv) == 10)
+ if (IS_GEN10(dev_priv))
max_rate = cnl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -616,9 +505,12 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
+ u16 dsc_max_output_bpp = 0;
+ u8 dsc_slice_count = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -641,7 +533,33 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(target_clock, 18);
- if (mode_rate > max_rate || target_clock > max_dotclk)
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
+ if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ if (intel_dp_is_edp(intel_dp)) {
+ dsc_max_output_bpp =
+ drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
+ dsc_slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(max_link_clock,
+ max_lanes,
+ target_clock,
+ mode->hdisplay) >> 4;
+ dsc_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ target_clock,
+ mode->hdisplay);
+ }
+ }
+
+ if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
+ target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
if (mode->clock < 10000)
@@ -690,7 +608,8 @@ static void pps_lock(struct intel_dp *intel_dp)
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
mutex_lock(&dev_priv->pps_mutex);
}
@@ -701,7 +620,8 @@ static void pps_unlock(struct intel_dp *intel_dp)
mutex_unlock(&dev_priv->pps_mutex);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
}
static void
@@ -1156,6 +1076,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (index)
return 0;
@@ -1165,7 +1086,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* like to run at 2MHz. So, take the cdclk or PCH rawclk value and
* divide by 2000 and use that
*/
- if (intel_dp->aux_ch == AUX_CH_A)
+ if (dig_port->aux_ch == AUX_CH_A)
return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
@@ -1174,8 +1095,9 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (intel_dp->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -1503,80 +1425,12 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
-static enum aux_ch intel_aux_ch(struct intel_dp *intel_dp)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
- enum aux_ch aux_ch;
-
- if (!info->alternate_aux_channel) {
- aux_ch = (enum aux_ch) port;
-
- DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
- return aux_ch;
- }
-
- switch (info->alternate_aux_channel) {
- case DP_AUX_A:
- aux_ch = AUX_CH_A;
- break;
- case DP_AUX_B:
- aux_ch = AUX_CH_B;
- break;
- case DP_AUX_C:
- aux_ch = AUX_CH_C;
- break;
- case DP_AUX_D:
- aux_ch = AUX_CH_D;
- break;
- case DP_AUX_E:
- aux_ch = AUX_CH_E;
- break;
- case DP_AUX_F:
- aux_ch = AUX_CH_F;
- break;
- default:
- MISSING_CASE(info->alternate_aux_channel);
- aux_ch = AUX_CH_A;
- break;
- }
-
- DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
-
- return aux_ch;
-}
-
-static enum intel_display_power_domain
-intel_aux_power_domain(struct intel_dp *intel_dp)
-{
- switch (intel_dp->aux_ch) {
- case AUX_CH_A:
- return POWER_DOMAIN_AUX_A;
- case AUX_CH_B:
- return POWER_DOMAIN_AUX_B;
- case AUX_CH_C:
- return POWER_DOMAIN_AUX_C;
- case AUX_CH_D:
- return POWER_DOMAIN_AUX_D;
- case AUX_CH_E:
- return POWER_DOMAIN_AUX_E;
- case AUX_CH_F:
- return POWER_DOMAIN_AUX_F;
- default:
- MISSING_CASE(intel_dp->aux_ch);
- return POWER_DOMAIN_AUX_A;
- }
-}
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_B:
@@ -1592,7 +1446,8 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_B:
@@ -1608,7 +1463,8 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1626,7 +1482,8 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1644,7 +1501,8 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1663,7 +1521,8 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- enum aux_ch aux_ch = intel_dp->aux_ch;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
switch (aux_ch) {
case AUX_CH_A:
@@ -1689,10 +1548,8 @@ static void
intel_dp_aux_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
-
- intel_dp->aux_ch = intel_aux_ch(intel_dp);
- intel_dp->aux_power_domain = intel_aux_power_domain(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
if (INTEL_GEN(dev_priv) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
@@ -1853,6 +1710,41 @@ struct link_config_limits {
int min_bpp, max_bpp;
};
+static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 11 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_fec(intel_dp->fec_capable);
+}
+
+static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ return INTEL_GEN(dev_priv) >= 10 &&
+ pipe_config->cpu_transcoder != TRANSCODER_A;
+}
+
+static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
+ return false;
+
+ return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
+}
+
static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -1951,14 +1843,158 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return false;
}
+/* Optimize link config in order: max bpp, min lanes, min clock */
+static bool
+intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ const struct link_config_limits *limits)
+{
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ int bpp, clock, lane_count;
+ int mode_rate, link_clock, link_avail;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
+ bpp);
+
+ for (lane_count = limits->min_lane_count;
+ lane_count <= limits->max_lane_count;
+ lane_count <<= 1) {
+ for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ link_clock = intel_dp->common_rates[clock];
+ link_avail = intel_dp_max_data_rate(link_clock,
+ lane_count);
+
+ if (mode_rate <= link_avail) {
+ pipe_config->lane_count = lane_count;
+ pipe_config->pipe_bpp = bpp;
+ pipe_config->port_clock = link_clock;
+
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
+{
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+ for (i = 0; i < num_bpc; i++) {
+ if (dsc_max_bpc >= dsc_bpc[i])
+ return dsc_bpc[i] * 3;
+ }
+
+ return 0;
+}
+
+static bool intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ u8 dsc_max_bpc;
+ int pipe_bpp;
+
+ if (!intel_dp_supports_dsc(intel_dp, pipe_config))
+ return false;
+
+ dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
+ conn_state->max_requested_bpc);
+
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
+ if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
+ DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ return false;
+ }
+
+ /*
+ * For now enable DSC for max bpp, max link rate, max lane count.
+ * Optimize this later for the minimum possible link rate/lane count
+ * with DSC enabled for the requested mode.
+ */
+ pipe_config->pipe_bpp = pipe_bpp;
+ pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->lane_count = limits->max_lane_count;
+
+ if (intel_dp_is_edp(intel_dp)) {
+ pipe_config->dsc_params.compressed_bpp =
+ min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count =
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ true);
+ } else {
+ u16 dsc_max_output_bpp;
+ u8 dsc_dp_slice_count;
+
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ dsc_dp_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay);
+ if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ return false;
+ }
+ pipe_config->dsc_params.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
+ }
+ /*
+ * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
+ * is greater than the maximum Cdclock and if slice count is even
+ * then we need to use 2 VDSC instances.
+ */
+ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
+ if (pipe_config->dsc_params.slice_count > 1) {
+ pipe_config->dsc_params.dsc_split = true;
+ } else {
+ DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ return false;
+ }
+ }
+ if (intel_dp_compute_dsc_params(intel_dp, pipe_config) < 0) {
+ DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+ return false;
+ }
+ pipe_config->dsc_params.compression_enable = true;
+ DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp,
+ pipe_config->dsc_params.slice_count);
+
+ return true;
+}
+
static bool
intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
{
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct link_config_limits limits;
int common_len;
+ bool ret;
common_len = intel_dp_common_len_rate_limit(intel_dp,
intel_dp->max_link_rate);
@@ -1975,13 +2011,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_bpp = 6 * 3;
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (intel_dp_is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
/*
* Use the maximum clock and number of lanes the eDP panel
- * advertizes being capable of. The panels are generally
- * designed to support only a single clock and lane
- * configuration, and typically these values correspond to the
- * native resolution of the panel.
+ * advertizes being capable of. The eDP 1.3 and earlier panels
+ * are generally designed to support only a single clock and
+ * lane configuration, and typically these values correspond to
+ * the native resolution of the panel. With eDP 1.4 rate select
+ * and DSC, this is decreasingly the case, and we need to be
+ * able to select less than maximum link config.
*/
limits.min_lane_count = limits.max_lane_count;
limits.min_clock = limits.max_clock;
@@ -1995,23 +2033,52 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->common_rates[limits.max_clock],
limits.max_bpp, adjusted_mode->crtc_clock);
- /*
- * Optimize for slow and wide. This is the place to add alternative
- * optimization policy.
- */
- if (!intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits))
- return false;
-
- DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
- pipe_config->lane_count, pipe_config->port_clock,
- pipe_config->pipe_bpp);
-
- DRM_DEBUG_KMS("DP link rate required %i available %i\n",
- intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->pipe_bpp),
- intel_dp_max_data_rate(pipe_config->port_clock,
- pipe_config->lane_count));
+ if (intel_dp_is_edp(intel_dp))
+ /*
+ * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
+ * section A.1: "It is recommended that the minimum number of
+ * lanes be used, using the minimum link rate allowed for that
+ * lane configuration."
+ *
+ * Note that we use the max clock and lane count for eDP 1.3 and
+ * earlier, and fast vs. wide is irrelevant.
+ */
+ ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
+ &limits);
+ else
+ /* Optimize for slow and wide. */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
+ &limits);
+
+ /* enable compression if the mode doesn't fit available BW */
+ if (!ret) {
+ if (!intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits))
+ return false;
+ }
+
+ if (pipe_config->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp,
+ pipe_config->dsc_params.compressed_bpp);
+
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->dsc_params.compressed_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ } else {
+ DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
+ pipe_config->lane_count, pipe_config->port_clock,
+ pipe_config->pipe_bpp);
+ DRM_DEBUG_KMS("DP link rate required %i available %i\n",
+ intel_dp_link_required(adjusted_mode->crtc_clock,
+ pipe_config->pipe_bpp),
+ intel_dp_max_data_rate(pipe_config->port_clock,
+ pipe_config->lane_count));
+ }
return true;
}
@@ -2023,6 +2090,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
enum port port = encoder->port;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
struct intel_connector *intel_connector = intel_dp->attached_connector;
@@ -2034,6 +2102,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ if (lspcon->active)
+ lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
+
pipe_config->has_drrs = false;
if (IS_G4X(dev_priv) || port == PORT_A)
pipe_config->has_audio = false;
@@ -2072,7 +2144,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
- if (!intel_dp_compute_link_config(encoder, pipe_config))
+ pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, pipe_config);
+
+ if (!intel_dp_compute_link_config(encoder, pipe_config, conn_state))
return false;
if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
@@ -2090,11 +2165,20 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
}
- intel_link_compute_m_n(pipe_config->pipe_bpp, pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (!pipe_config->dsc_params.compression_enable)
+ intel_link_compute_m_n(pipe_config->pipe_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
+ else
+ intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2338,7 +2422,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->base.port));
@@ -2424,7 +2509,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2537,6 +2623,7 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2546,10 +2633,10 @@ static void edp_panel_off(struct intel_dp *intel_dp)
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ port_name(dig_port->base.port));
WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
- port_name(dp_to_dig_port(intel_dp)->base.port));
+ port_name(dig_port->base.port));
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2568,7 +2655,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -2788,6 +2875,22 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
+{
+ int ret;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
+ enable ? DP_DECOMPRESSION_EN : 0);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
+ enable ? "enable" : "disable");
+}
+
/* If the sink supports it, try to set the power state appropriately */
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
@@ -3900,6 +4003,40 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
return intel_dp->dpcd[DP_DPCD_REV] != 0;
}
+static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
+{
+ /*
+ * Clear the cached register set to avoid using stale values
+ * for the sinks that do not support DSC.
+ */
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
+
+ /* Clear fec_capable to avoid using stale values */
+ intel_dp->fec_capable = 0;
+
+ /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
+ intel_dp->edp_dpcd[0] >= DP_EDP_14) {
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
+ intel_dp->dsc_dpcd,
+ sizeof(intel_dp->dsc_dpcd)) < 0)
+ DRM_ERROR("Failed to read DPCD register 0x%x\n",
+ DP_DSC_SUPPORT);
+
+ DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dsc_dpcd),
+ intel_dp->dsc_dpcd);
+
+ /* FEC is supported only on DP 1.4 */
+ if (!intel_dp_is_edp(intel_dp) &&
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
+ &intel_dp->fec_capable) < 0)
+ DRM_ERROR("Failed to read FEC DPCD register\n");
+
+ DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
+ }
+}
+
static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp)
{
@@ -3976,6 +4113,10 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
+ /* Read the eDP DSC DPCD registers */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
return true;
}
@@ -3983,8 +4124,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
- u8 sink_count;
-
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -3994,25 +4133,35 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
intel_dp_set_common_rates(intel_dp);
}
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &sink_count) <= 0)
- return false;
-
/*
- * Sink count can change between short pulse hpd hence
- * a member variable in intel_dp will track any changes
- * between short pulse interrupts.
+ * Some eDP panels do not set a valid value for sink count, that is why
+ * it don't care about read it here and in intel_edp_init_dpcd().
*/
- intel_dp->sink_count = DP_GET_SINK_COUNT(sink_count);
+ if (!intel_dp_is_edp(intel_dp)) {
+ u8 count;
+ ssize_t r;
- /*
- * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
- * a dongle is present but no display. Unless we require to know
- * if a dongle is present or not, we don't need to update
- * downstream port information. So, an early return here saves
- * time from performing other operations which are not required.
- */
- if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
- return false;
+ r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
+ if (r < 1)
+ return false;
+
+ /*
+ * Sink count can change between short pulse hpd hence
+ * a member variable in intel_dp will track any changes
+ * between short pulse interrupts.
+ */
+ intel_dp->sink_count = DP_GET_SINK_COUNT(count);
+
+ /*
+ * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
+ * a dongle is present but no display. Unless we require to know
+ * if a dongle is present or not, we don't need to update
+ * downstream port information. So, an early return here saves
+ * time from performing other operations which are not required.
+ */
+ if (!intel_dp->sink_count)
+ return false;
+ }
if (!drm_dp_is_branch(intel_dp->dpcd))
return true; /* native DP sink */
@@ -4029,16 +4178,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
}
static bool
-intel_dp_can_mst(struct intel_dp *intel_dp)
+intel_dp_sink_can_mst(struct intel_dp *intel_dp)
{
u8 mstm_cap;
- if (!i915_modparams.enable_dp_mst)
- return false;
-
- if (!intel_dp->can_mst)
- return false;
-
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
@@ -4048,34 +4191,36 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
return mstm_cap & DP_MST_CAP;
}
+static bool
+intel_dp_can_mst(struct intel_dp *intel_dp)
+{
+ return i915_modparams.enable_dp_mst &&
+ intel_dp->can_mst &&
+ intel_dp_sink_can_mst(intel_dp);
+}
+
static void
intel_dp_configure_mst(struct intel_dp *intel_dp)
{
- if (!i915_modparams.enable_dp_mst)
- return;
+ struct intel_encoder *encoder =
+ &dp_to_dig_port(intel_dp)->base;
+ bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
+
+ DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
+ port_name(encoder->port), yesno(intel_dp->can_mst),
+ yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
if (!intel_dp->can_mst)
return;
- intel_dp->is_mst = intel_dp_can_mst(intel_dp);
-
- if (intel_dp->is_mst)
- DRM_DEBUG_KMS("Sink is MST capable\n");
- else
- DRM_DEBUG_KMS("Sink is not MST capable\n");
+ intel_dp->is_mst = sink_can_mst &&
+ i915_modparams.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
}
static bool
-intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
-{
- return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector) == 1;
-}
-
-static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
@@ -4083,6 +4228,91 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
+u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+ int mode_clock, int mode_hdisplay)
+{
+ u16 bits_per_pixel, max_bpp_small_joiner_ram;
+ int i;
+
+ /*
+ * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+ * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
+ * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
+ * for MST -> TimeSlotsPerMTP has to be calculated
+ */
+ bits_per_pixel = (link_clock * lane_count * 8 *
+ DP_DSC_FEC_OVERHEAD_FACTOR) /
+ mode_clock;
+
+ /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+ max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
+ mode_hdisplay;
+
+ /*
+ * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
+ * check, output bpp from small joiner RAM check)
+ */
+ bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
+ return 0;
+ }
+
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ bits_per_pixel = valid_dsc_bpp[i];
+
+ /*
+ * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+ * fractional part is 0
+ */
+ return bits_per_pixel << 4;
+}
+
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock,
+ int mode_hdisplay)
+{
+ u8 min_slice_count, i;
+ int max_slice_width;
+
+ if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_0);
+ else
+ min_slice_count = DIV_ROUND_UP(mode_clock,
+ DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+ max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+ if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+ DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+ max_slice_width);
+ return 0;
+ }
+ /* Also take into account max slice width */
+ min_slice_count = min_t(uint8_t, min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay,
+ max_slice_width));
+
+ /* Find the closest match to the valid slice count values */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+ if (valid_dsc_slicecount[i] >
+ drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+ false))
+ break;
+ if (min_slice_count <= valid_dsc_slicecount[i])
+ return valid_dsc_slicecount[i];
+ }
+
+ DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+ return 0;
+}
+
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
@@ -4341,6 +4571,17 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (!intel_dp->link_trained)
return false;
+ /*
+ * While PSR source HW is enabled, it will control main-link sending
+ * frames, enabling and disabling it so trying to do a retrain will fail
+ * as the link would or not be on or it could mix training patterns
+ * and frame data at the same time causing retrain to fail.
+ * Also when exiting PSR, HW will retrain the link anyways fixing
+ * any link status error.
+ */
+ if (intel_psr_enabled(intel_dp))
+ return false;
+
if (!intel_dp_get_link_status(intel_dp, link_status))
return false;
@@ -4403,7 +4644,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
/* Suppress underruns caused by re-training */
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc->config->has_pch_encoder)
+ if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), false);
@@ -4414,7 +4655,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv, crtc->pipe);
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc->config->has_pch_encoder)
+ if (crtc_state->has_pch_encoder)
intel_set_pch_fifo_underrun_reporting(dev_priv,
intel_crtc_pch_transcoder(crtc), true);
@@ -4462,6 +4703,29 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
return changed;
}
+static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
+{
+ u8 val;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
+ return;
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+
+ if (val & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+
+ if (val & DP_CP_IRQ)
+ intel_hdcp_check_link(intel_dp->attached_connector);
+
+ if (val & DP_SINK_SPECIFIC_IRQ)
+ DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
+}
+
/*
* According to DP spec
* 5.1.2:
@@ -4479,7 +4743,6 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4502,20 +4765,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
return false;
}
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
- sink_irq_vector != 0) {
- /* Clear interrupt source */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
+ intel_dp_check_service_irq(intel_dp);
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
@@ -4810,6 +5060,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
type_str);
}
+static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
+
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -4864,9 +5117,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
if (dig_port->tc_type == TC_PORT_TYPEC &&
!(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
- I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
+ icl_tc_phy_disconnect(dev_priv, dig_port);
return false;
}
@@ -4881,21 +5132,24 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
- u32 val;
- if (dig_port->tc_type != TC_PORT_LEGACY &&
- dig_port->tc_type != TC_PORT_TYPEC)
+ if (dig_port->tc_type == TC_PORT_UNKNOWN)
return;
/*
- * This function may be called many times in a row without an HPD event
- * in between, so try to avoid the write when we can.
+ * TBT disconnection flow is read the live status, what was done in
+ * caller.
*/
- val = I915_READ(PORT_TX_DFLEXDPCSSS);
- if (val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)) {
+ if (dig_port->tc_type == TC_PORT_TYPEC ||
+ dig_port->tc_type == TC_PORT_LEGACY) {
+ u32 val;
+
+ val = I915_READ(PORT_TX_DFLEXDPCSSS);
val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
+
+ dig_port->tc_type = TC_PORT_UNKNOWN;
}
/*
@@ -4945,19 +5199,14 @@ static bool icl_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
- switch (encoder->hpd_pin) {
- case HPD_PORT_A:
- case HPD_PORT_B:
+ if (intel_port_is_combophy(dev_priv, encoder->port))
return icl_combo_port_connected(dev_priv, dig_port);
- case HPD_PORT_C:
- case HPD_PORT_D:
- case HPD_PORT_E:
- case HPD_PORT_F:
+ else if (intel_port_is_tc(dev_priv, encoder->port))
return icl_tc_port_connected(dev_priv, dig_port);
- default:
+ else
MISSING_CASE(encoder->hpd_pin);
- return false;
- }
+
+ return false;
}
/*
@@ -4982,20 +5231,23 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return g4x_digital_port_connected(encoder);
}
- if (IS_GEN5(dev_priv))
- return ilk_digital_port_connected(encoder);
- else if (IS_GEN6(dev_priv))
- return snb_digital_port_connected(encoder);
- else if (IS_GEN7(dev_priv))
- return ivb_digital_port_connected(encoder);
- else if (IS_GEN8(dev_priv))
- return bdw_digital_port_connected(encoder);
+ if (INTEL_GEN(dev_priv) >= 11)
+ return icl_digital_port_connected(encoder);
+ else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+ return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
- else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
- return spt_digital_port_connected(encoder);
- else
- return icl_digital_port_connected(encoder);
+ else if (IS_GEN8(dev_priv))
+ return bdw_digital_port_connected(encoder);
+ else if (IS_GEN7(dev_priv))
+ return ivb_digital_port_connected(encoder);
+ else if (IS_GEN6(dev_priv))
+ return snb_digital_port_connected(encoder);
+ else if (IS_GEN5(dev_priv))
+ return ilk_digital_port_connected(encoder);
+
+ MISSING_CASE(INTEL_GEN(dev_priv));
+ return false;
}
static struct edid *
@@ -5042,28 +5294,35 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
}
static int
-intel_dp_long_pulse(struct intel_connector *connector,
- struct drm_modeset_acquire_ctx *ctx)
+intel_dp_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- u8 sink_irq_vector = 0;
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, aux_domain);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
- else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
+ else if (intel_digital_port_connected(encoder))
status = intel_dp_detect_dpcd(intel_dp);
else
status = connector_status_disconnected;
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
+ memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
if (intel_dp->is_mst) {
DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
@@ -5089,6 +5348,10 @@ intel_dp_long_pulse(struct intel_connector *connector,
intel_dp_print_rates(intel_dp);
+ /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dp_get_dsc_sink_cap(intel_dp);
+
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
@@ -5109,9 +5372,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
* with an IRQ_HPD, so force a link status check.
*/
if (!intel_dp_is_edp(intel_dp)) {
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
- intel_dp_retrain_link(encoder, ctx);
+ ret = intel_dp_retrain_link(encoder, ctx);
+ if (ret) {
+ intel_display_power_put(dev_priv, aux_domain);
+ return ret;
+ }
}
/*
@@ -5123,61 +5390,17 @@ intel_dp_long_pulse(struct intel_connector *connector,
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) ||
+ to_intel_connector(connector)->detect_edid)
status = connector_status_connected;
- intel_dp->detect_done = true;
- /* Try to read the source of the interrupt */
- if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
- intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
- sink_irq_vector != 0) {
- /* Clear interrupt source */
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector);
-
- if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
- if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
- DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
- }
+ intel_dp_check_service_irq(intel_dp);
out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
- return status;
-}
-
-static int
-intel_dp_detect(struct drm_connector *connector,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct intel_dp *intel_dp = intel_attached_dp(connector);
- int status = connector->status;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- /* If full detect is not performed yet, do a full detect */
- if (!intel_dp->detect_done) {
- struct drm_crtc *crtc;
- int ret;
-
- crtc = connector->state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
- }
-
- status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
- }
-
- intel_dp->detect_done = false;
-
+ intel_display_power_put(dev_priv, aux_domain);
return status;
}
@@ -5185,8 +5408,11 @@ static void
intel_dp_force(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *intel_encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ enum intel_display_power_domain aux_domain =
+ intel_aux_power_domain(dig_port);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5195,11 +5421,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, aux_domain);
intel_dp_set_edid(intel_dp);
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv, aux_domain);
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5264,27 +5490,6 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
-static void
-intel_dp_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- kfree(intel_connector->detect_edid);
-
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- /*
- * Can't call intel_dp_is_edp() since the encoder may have been
- * destroyed already.
- */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_fini(&intel_connector->panel);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -5348,7 +5553,8 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
an, DRM_HDCP_AN_LEN);
if (dpcd_ret != DRM_HDCP_AN_LEN) {
- DRM_ERROR("Failed to write An over DP/AUX (%zd)\n", dpcd_ret);
+ DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
+ dpcd_ret);
return dpcd_ret >= 0 ? -EIO : dpcd_ret;
}
@@ -5364,10 +5570,10 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
rxbuf, sizeof(rxbuf),
DP_AUX_CH_CTL_AUX_AKSV_SELECT);
if (ret < 0) {
- DRM_ERROR("Write Aksv over DP/AUX failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
return ret;
} else if (ret == 0) {
- DRM_ERROR("Aksv write over DP/AUX was empty\n");
+ DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
return -EIO;
}
@@ -5382,7 +5588,7 @@ static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret != DRM_HDCP_KSV_LEN) {
- DRM_ERROR("Read Bksv from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5400,7 +5606,7 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret != DRM_HDCP_BSTATUS_LEN) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5415,7 +5621,7 @@ int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
bcaps, 1);
if (ret != 1) {
- DRM_ERROR("Read bcaps from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
@@ -5445,7 +5651,7 @@ int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret != DRM_HDCP_RI_LEN) {
- DRM_ERROR("Read Ri' from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5460,7 +5666,7 @@ int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return ret >= 0 ? -EIO : ret;
}
*ksv_ready = bstatus & DP_BSTATUS_READY;
@@ -5482,8 +5688,8 @@ int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ksv_fifo + i * DRM_HDCP_KSV_LEN,
len);
if (ret != len) {
- DRM_ERROR("Read ksv[%d] from DP/AUX failed (%zd)\n", i,
- ret);
+ DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
+ i, ret);
return ret >= 0 ? -EIO : ret;
}
}
@@ -5503,7 +5709,7 @@ int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
DP_AUX_HDCP_V_PRIME(i), part,
DRM_HDCP_V_PRIME_PART_LEN);
if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
- DRM_ERROR("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
+ DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
return ret >= 0 ? -EIO : ret;
}
return 0;
@@ -5526,7 +5732,7 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
&bstatus, 1);
if (ret != 1) {
- DRM_ERROR("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
return false;
}
@@ -5565,6 +5771,7 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -5578,7 +5785,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
}
@@ -5631,7 +5838,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_dp_connector_register,
.early_unregister = intel_dp_connector_unregister,
- .destroy = intel_dp_connector_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -5673,11 +5880,11 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
if (long_hpd) {
intel_dp->reset_link_params = true;
- intel_dp->detect_done = false;
return IRQ_NONE;
}
- intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5690,7 +5897,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
- intel_dp->detect_done = false;
goto put_power;
}
}
@@ -5700,19 +5906,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
handled = intel_dp_short_pulse(intel_dp);
- /* Short pulse can signify loss of hdcp authentication */
- intel_hdcp_check_link(intel_dp->attached_connector);
-
- if (!handled) {
- intel_dp->detect_done = false;
+ if (!handled)
goto put_power;
- }
}
ret = IRQ_HANDLED;
put_power:
- intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
+ intel_display_power_put(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
return ret;
}
@@ -5743,6 +5945,10 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 6, 10);
+ else if (INTEL_GEN(dev_priv) >= 5)
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
if (intel_dp_is_edp(intel_dp)) {
u32 allowed_scalers;
@@ -6099,10 +6305,10 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
switch (index) {
case DRRS_HIGH_RR:
- intel_dp_set_m_n(intel_crtc, M1_N1);
+ intel_dp_set_m_n(crtc_state, M1_N1);
break;
case DRRS_LOW_RR:
- intel_dp_set_m_n(intel_crtc, M2_N2);
+ intel_dp_set_m_n(crtc_state, M2_N2);
break;
case DRRS_MAX_RR:
default:
@@ -6422,6 +6628,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!intel_dp_is_edp(intel_dp))
return true;
+ INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
+
/*
* On IBX/CPT we may get here with LVDS already registered. Since the
* driver uses the only internal power sequencer available for both
@@ -6514,6 +6722,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
+ if (fixed_mode)
+ drm_connector_init_panel_orientation_property(
+ connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+
return true;
out_vdd_off:
@@ -6624,9 +6836,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_aux_init(intel_dp);
- INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
- edp_panel_vdd_work);
-
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (HAS_DDI(dev_priv))
@@ -6743,6 +6952,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
if (port != PORT_A)
intel_infoframe_init(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
goto err_init_connector;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index a911691dbd0f..4de247ddf05f 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -51,6 +51,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@@ -208,12 +209,25 @@ static void intel_mst_pre_pll_enable_dp(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- if (intel_dp->active_mst_links == 0 &&
- intel_dig_port->base.pre_pll_enable)
+ if (intel_dp->active_mst_links == 0)
intel_dig_port->base.pre_pll_enable(&intel_dig_port->base,
pipe_config, NULL);
}
+static void intel_mst_post_pll_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ if (intel_dp->active_mst_links == 0)
+ intel_dig_port->base.post_pll_disable(&intel_dig_port->base,
+ old_crtc_state,
+ old_conn_state);
+}
+
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -335,24 +349,12 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
intel_connector->port);
}
-static void
-intel_dp_mst_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dp_mst_connector_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
};
@@ -560,6 +562,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
intel_encoder->disable = intel_mst_disable_dp;
intel_encoder->post_disable = intel_mst_post_disable_dp;
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
+ intel_encoder->post_pll_disable = intel_mst_post_pll_disable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 00b3ab656b06..3c7f10d17658 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -748,7 +748,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
- if (crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
@@ -765,7 +765,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
val |= DPIO_PCS_CLK_SOFT_RESET;
vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
- if (crtc->config->lane_count > 2) {
+ if (crtc_state->lane_count > 2) {
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e6cac9225536..d513ca875c67 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -126,16 +126,16 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
/**
* intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc: CRTC which has a shared dpll
+ * @crtc_state: CRTC, and its state, which has a shared dpll
*
* This calls the PLL's prepare hook if it has one and if the PLL is not
* already enabled. The prepare hook is platform specific.
*/
-void intel_prepare_shared_dpll(struct intel_crtc *crtc)
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(pll == NULL))
return;
@@ -154,15 +154,15 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Enable the shared DPLL used by @crtc.
*/
-void intel_enable_shared_dpll(struct intel_crtc *crtc)
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
@@ -199,14 +199,15 @@ out:
/**
* intel_disable_shared_dpll - disable a CRTC's shared DPLL
- * @crtc: CRTC which has a shared DPLL
+ * @crtc_state: CRTC, and its state, which has a shared DPLL
*
* Disable the shared DPLL used by @crtc.
*/
-void intel_disable_shared_dpll(struct intel_crtc *crtc)
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll = crtc->config->shared_dpll;
+ struct intel_shared_dpll *pll = crtc_state->shared_dpll;
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
/* PCH only available on ILK+ */
@@ -409,14 +410,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- /* Make sure no transcoder isn't still depending on us. */
- for_each_intel_crtc(dev, crtc) {
- if (crtc->config->shared_dpll == pll)
- assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
- }
I915_WRITE(PCH_DPLL(id), 0);
POSTING_READ(PCH_DPLL(id));
@@ -2530,7 +2523,8 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
if (intel_port_is_tc(dev_priv, encoder->port))
ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
- else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
else
ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
@@ -2628,11 +2622,16 @@ static enum port icl_mg_pll_id_to_port(enum intel_dpll_id id)
return id - DPLL_ID_ICL_MGPLL1 + PORT_C;
}
-static enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port)
{
return port - PORT_C + DPLL_ID_ICL_MGPLL1;
}
+bool intel_dpll_is_combophy(enum intel_dpll_id id)
+{
+ return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
+}
+
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
uint32_t *target_dco_khz,
struct intel_dpll_hw_state *state)
@@ -2874,8 +2873,8 @@ static struct intel_shared_dpll *
icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
@@ -2883,18 +2882,21 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
int clock = crtc_state->port_clock;
bool ret;
- switch (port) {
- case PORT_A:
- case PORT_B:
+ if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
ret = icl_calc_dpll_state(crtc_state, encoder, clock,
&pll_state);
- break;
- case PORT_C:
- case PORT_D:
- case PORT_E:
- case PORT_F:
+ } else if (intel_port_is_tc(dev_priv, port)) {
+ if (encoder->type == INTEL_OUTPUT_DP_MST) {
+ struct intel_dp_mst_encoder *mst_encoder;
+
+ mst_encoder = enc_to_mst(&encoder->base);
+ intel_dig_port = mst_encoder->primary;
+ } else {
+ intel_dig_port = enc_to_dig_port(&encoder->base);
+ }
+
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
@@ -2906,8 +2908,7 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
&pll_state);
}
- break;
- default:
+ } else {
MISSING_CASE(port);
return NULL;
}
@@ -2932,21 +2933,16 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
{
- switch (id) {
- default:
- MISSING_CASE(id);
- /* fall through */
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
+ if (intel_dpll_is_combophy(id))
return CNL_DPLL_ENABLE(id);
- case DPLL_ID_ICL_TBTPLL:
+ else if (id == DPLL_ID_ICL_TBTPLL)
return TBT_PLL_ENABLE;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ else
+ /*
+ * TODO: Make MG_PLL macros use
+ * tc port id instead of port id
+ */
return MG_PLL_ENABLE(icl_mg_pll_id_to_port(id));
- }
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2965,17 +2961,11 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!(val & PLL_ENABLE))
goto out;
- switch (id) {
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
- case DPLL_ID_ICL_TBTPLL:
+ if (intel_dpll_is_combophy(id) ||
+ id == DPLL_ID_ICL_TBTPLL) {
hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
- break;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ } else {
port = icl_mg_pll_id_to_port(id);
hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
@@ -3013,9 +3003,6 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
- break;
- default:
- MISSING_CASE(id);
}
ret = true;
@@ -3104,21 +3091,10 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
PLL_POWER_STATE, 1))
DRM_ERROR("PLL %d Power not enabled\n", id);
- switch (id) {
- case DPLL_ID_ICL_DPLL0:
- case DPLL_ID_ICL_DPLL1:
- case DPLL_ID_ICL_TBTPLL:
+ if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
icl_dpll_write(dev_priv, pll);
- break;
- case DPLL_ID_ICL_MGPLL1:
- case DPLL_ID_ICL_MGPLL2:
- case DPLL_ID_ICL_MGPLL3:
- case DPLL_ID_ICL_MGPLL4:
+ else
icl_mg_pll_write(dev_priv, pll);
- break;
- default:
- MISSING_CASE(id);
- }
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index bf0de8a4dc63..a033d8f06d4a 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -334,9 +334,9 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
struct drm_atomic_state *state);
-void intel_prepare_shared_dpll(struct intel_crtc *crtc);
-void intel_enable_shared_dpll(struct intel_crtc *crtc);
-void intel_disable_shared_dpll(struct intel_crtc *crtc);
+void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
+void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct drm_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
@@ -345,5 +345,7 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
uint32_t pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
+enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
+bool intel_dpll_is_combophy(enum intel_dpll_id id);
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index db6fa1d0cbda..f94a04b4ad87 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -381,6 +381,15 @@ struct intel_hdcp_shim {
bool *hdcp_capable);
};
+struct intel_hdcp {
+ const struct intel_hdcp_shim *shim;
+ /* Mutex for hdcp state of the connector */
+ struct mutex mutex;
+ u64 value;
+ struct delayed_work check_work;
+ struct work_struct prop_work;
+};
+
struct intel_connector {
struct drm_connector base;
/*
@@ -413,11 +422,7 @@ struct intel_connector {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
- const struct intel_hdcp_shim *hdcp_shim;
- struct mutex hdcp_mutex;
- uint64_t hdcp_value; /* protected by hdcp_mutex */
- struct delayed_work hdcp_check_work;
- struct work_struct hdcp_prop_work;
+ struct intel_hdcp hdcp;
};
struct intel_digital_connector_state {
@@ -539,6 +544,26 @@ struct intel_plane_state {
*/
int scaler_id;
+ /*
+ * linked_plane:
+ *
+ * ICL planar formats require 2 planes that are updated as pairs.
+ * This member is used to make sure the other plane is also updated
+ * when required, and for update_slave() to find the correct
+ * plane_state to pass as argument.
+ */
+ struct intel_plane *linked_plane;
+
+ /*
+ * slave:
+ * If set don't update use the linked plane's state for updating
+ * this plane during atomic commit with the update_slave() callback.
+ *
+ * It's also used by the watermark code to ignore wm calculations on
+ * this plane. They're calculated by the linked plane's wm code.
+ */
+ u32 slave;
+
struct drm_intel_sprite_colorkey ckey;
};
@@ -681,6 +706,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
+ struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
@@ -713,6 +740,13 @@ struct intel_crtc_wm_state {
bool need_postvbl_update;
};
+enum intel_output_format {
+ INTEL_OUTPUT_FORMAT_INVALID,
+ INTEL_OUTPUT_FORMAT_RGB,
+ INTEL_OUTPUT_FORMAT_YCBCR420,
+ INTEL_OUTPUT_FORMAT_YCBCR444,
+};
+
struct intel_crtc_state {
struct drm_crtc_state base;
@@ -894,14 +928,32 @@ struct intel_crtc_state {
u8 active_planes;
u8 nv12_planes;
+ /* bitmask of planes that will be updated during the commit */
+ u8 update_planes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
/* HDMI High TMDS char rate ratio */
bool hdmi_high_tmds_clock_ratio;
- /* output format is YCBCR 4:2:0 */
- bool ycbcr420;
+ /* Output format RGB/YCBCR etc */
+ enum intel_output_format output_format;
+
+ /* Output down scaling is done in LSPCON device */
+ bool lspcon_downsampling;
+
+ /* Display Stream compression state */
+ struct {
+ bool compression_enable;
+ bool dsc_split;
+ u16 compressed_bpp;
+ u8 slice_count;
+ } dsc_params;
+ struct drm_dsc_config dp_dsc_cfg;
+
+ /* Forward Error correction State */
+ bool fec_enable;
};
struct intel_crtc {
@@ -974,8 +1026,11 @@ struct intel_plane {
void (*update_plane)(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
+ void (*update_slave)(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
void (*disable_plane)(struct intel_plane *plane,
- struct intel_crtc *crtc);
+ const struct intel_crtc_state *crtc_state);
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
@@ -1071,13 +1126,13 @@ struct intel_dp {
bool link_mst;
bool link_trained;
bool has_audio;
- bool detect_done;
bool reset_link_params;
- enum aux_ch aux_ch;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+ u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
+ u8 fec_capable;
/* source rates */
int num_source_rates;
const int *source_rates;
@@ -1095,7 +1150,6 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
- enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -1157,9 +1211,15 @@ struct intel_dp {
struct intel_dp_compliance compliance;
};
+enum lspcon_vendor {
+ LSPCON_VENDOR_MCA,
+ LSPCON_VENDOR_PARADE
+};
+
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
+ enum lspcon_vendor vendor;
};
struct intel_digital_port {
@@ -1171,18 +1231,20 @@ struct intel_digital_port {
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
+ /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
+ enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
enum tc_port_type tc_type;
- void (*write_infoframe)(struct drm_encoder *encoder,
+ void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
- void (*set_infoframes)(struct drm_encoder *encoder,
+ void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct drm_encoder *encoder,
+ bool (*infoframe_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -1282,6 +1344,12 @@ enc_to_dig_port(struct drm_encoder *encoder)
return NULL;
}
+static inline struct intel_digital_port *
+conn_to_dig_port(struct intel_connector *connector)
+{
+ return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
static inline struct intel_dp_mst_encoder *
enc_to_mst(struct drm_encoder *encoder)
{
@@ -1307,6 +1375,12 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
}
}
+static inline struct intel_lspcon *
+enc_to_intel_lspcon(struct drm_encoder *encoder)
+{
+ return &enc_to_dig_port(encoder)->lspcon;
+}
+
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
@@ -1332,6 +1406,27 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
}
static inline struct intel_plane_state *
+intel_atomic_get_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ struct drm_plane_state *ret =
+ drm_atomic_get_plane_state(&state->base, &plane->base);
+
+ if (IS_ERR(ret))
+ return ERR_CAST(ret);
+
+ return to_intel_plane_state(ret);
+}
+
+static inline struct intel_plane_state *
+intel_atomic_get_old_plane_state(struct intel_atomic_state *state,
+ struct intel_plane *plane)
+{
+ return to_intel_plane_state(drm_atomic_get_old_plane_state(&state->base,
+ &plane->base));
+}
+
+static inline struct intel_plane_state *
intel_atomic_get_new_plane_state(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1439,12 +1534,9 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
-void icl_map_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
-void icl_unmap_plls_to_ports(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct drm_atomic_state *old_state);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ enum intel_dpll_id pll_id);
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
int color_plane, unsigned int height);
@@ -1489,7 +1581,6 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
@@ -1510,20 +1601,12 @@ void intel_mark_idle(struct drm_i915_private *dev_priv);
int intel_display_suspend(struct drm_device *dev);
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
-int intel_connector_init(struct intel_connector *);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder);
bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port);
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port);
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
enum port port);
-
-enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
@@ -1629,9 +1712,11 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
+void skl_enable_dc6(struct drm_i915_private *dev_priv);
void intel_dp_get_m_n(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
+void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
+ enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
struct dpll *best_clock);
@@ -1642,6 +1727,8 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
+enum intel_display_power_domain
+intel_aux_power_domain(struct intel_digital_port *dig_port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -1671,6 +1758,24 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
+/* intel_connector.c */
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
void intel_csr_load_program(struct drm_i915_private *);
@@ -1696,6 +1801,9 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
@@ -1729,9 +1837,6 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
-void icl_program_mg_dp_mode(struct intel_dp *intel_dp);
-void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port);
-void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port);
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1749,6 +1854,16 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+ int mode_clock, int mode_hdisplay);
+uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
+
+/* intel_vdsc.c */
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config);
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -1769,6 +1884,9 @@ void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
/* vlv_dsi.c */
void vlv_dsi_init(struct drm_i915_private *dev_priv);
+/* icl_dsi.c */
+void icl_dsi_init(struct drm_i915_private *dev_priv);
+
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1859,7 +1977,6 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
-
/* intel_lvds.c */
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe);
@@ -1867,19 +1984,9 @@ void intel_lvds_init(struct drm_i915_private *dev_priv);
struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
bool intel_is_dual_link_lvds(struct drm_device *dev);
-
-/* intel_modes.c */
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-
/* intel_overlay.c */
-void intel_setup_overlay(struct drm_i915_private *dev_priv);
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
+void intel_overlay_setup(struct drm_i915_private *dev_priv);
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
int intel_overlay_switch_off(struct intel_overlay *overlay);
int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1908,7 +2015,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector,
void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-void intel_panel_destroy_backlight(struct drm_connector *connector);
extern struct drm_display_mode *intel_find_panel_downclock(
struct drm_i915_private *dev_priv,
struct drm_display_mode *fixed_mode,
@@ -1937,6 +2043,7 @@ int intel_hdcp_enable(struct intel_connector *connector);
int intel_hdcp_disable(struct intel_connector *connector);
int intel_hdcp_check_link(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
/* intel_psr.c */
#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
@@ -1962,12 +2069,18 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+/* intel_quirks.c */
+void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv);
+void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
+void icl_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
@@ -2091,6 +2204,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
@@ -2102,10 +2218,13 @@ int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry **entries,
- const struct skl_ddb_entry *ddb,
- int ignore);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry entries[],
+ int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
@@ -2128,23 +2247,29 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-void skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc);
-bool skl_plane_get_hw_state(struct intel_plane *plane, enum pipe *pipe);
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-unsigned int skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
-int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
int intel_plane_check_stride(const struct intel_plane_state *plane_state);
int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+ /* Don't need to do a gen check, these planes are only available on gen11 */
+ if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+ return true;
+
+ return false;
+}
+
+static inline bool icl_is_hdr_plane(struct intel_plane *plane)
+{
+ if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
+ return false;
+
+ return plane->id < PLANE_SPRITE2;
+}
/* intel_tv.c */
void intel_tv_init(struct drm_i915_private *dev_priv);
@@ -2186,11 +2311,16 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
-struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
void intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -2206,6 +2336,18 @@ void intel_color_load_luts(struct drm_crtc_state *crtc_state);
bool lspcon_init(struct intel_digital_port *intel_dig_port);
void lspcon_resume(struct intel_lspcon *lspcon);
void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state);
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
new file mode 100644
index 000000000000..5fec02aceaed
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include "intel_dsi.h"
+
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
+{
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+
+ if (WARN_ON(bpp < 0))
+ bpp = 16;
+
+ return intel_dsi->pclk * bpp / intel_dsi->lane_count;
+}
+
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi)
+{
+ switch (intel_dsi->escape_clk_div) {
+ default:
+ case 0:
+ return 50;
+ case 1:
+ return 100;
+ case 2:
+ return 200;
+ }
+}
+
+int intel_dsi_get_modes(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_display_mode *mode;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (!intel_connector->panel.fixed_mode) {
+ DRM_DEBUG_KMS("no fixed mode\n");
+ return 0;
+ }
+
+ mode = drm_mode_duplicate(connector->dev,
+ intel_connector->panel.fixed_mode);
+ if (!mode) {
+ DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
+ return 0;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
+ int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+
+ DRM_DEBUG_KMS("\n");
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (fixed_mode) {
+ if (mode->hdisplay > fixed_mode->hdisplay)
+ return MODE_PANEL;
+ if (mode->vdisplay > fixed_mode->vdisplay)
+ return MODE_PANEL;
+ if (fixed_mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port)
+{
+ struct intel_dsi_host *host;
+ struct mipi_dsi_device *device;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return NULL;
+
+ host->base.ops = funcs;
+ host->intel_dsi = intel_dsi;
+ host->port = port;
+
+ /*
+ * We should call mipi_dsi_host_register(&host->base) here, but we don't
+ * have a host->dev, and we don't have OF stuff either. So just use the
+ * dsi framework as a library and hope for the best. Create the dsi
+ * devices by ourselves here too. Need to be careful though, because we
+ * don't initialize any of the driver model devices here.
+ */
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device) {
+ kfree(host);
+ return NULL;
+ }
+
+ device->host = &host->base;
+ host->device = device;
+
+ return host;
+}
+
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ orientation = dev_priv->vbt.dsi.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ orientation = dev_priv->vbt.orientation;
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+
+ return DRM_MODE_PANEL_ORIENTATION_NORMAL;
+}
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index ad7c1cb32983..d968f1f13e09 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -81,14 +81,21 @@ struct intel_dsi {
u16 dcs_backlight_ports;
u16 dcs_cabc_ports;
+ /* RGB or BGR */
+ bool bgr_enabled;
+
u8 pixel_overlap;
u32 port_bits;
u32 bw_timer;
u32 dphy_reg;
+
+ /* data lanes dphy timing */
+ u32 dphy_data_lane_reg;
u32 video_frmt_cfg_bits;
u16 lp_byte_clk;
/* timeouts in byte clocks */
+ u16 hs_tx_timeout;
u16 lp_rx_timeout;
u16 turn_arnd_val;
u16 rst_timer_val;
@@ -129,9 +136,36 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
+static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
+}
+
+static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
+{
+ return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
+}
+
+static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder)
+{
+ return enc_to_intel_dsi(&encoder->base)->ports;
+}
+
+/* intel_dsi.c */
+int intel_dsi_bitrate(const struct intel_dsi *intel_dsi);
+int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi);
+enum drm_panel_orientation
+intel_dsi_get_panel_orientation(struct intel_connector *connector);
+
/* vlv_dsi.c */
void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+int intel_dsi_get_modes(struct drm_connector *connector);
+enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
+ const struct mipi_dsi_host_ops *funcs,
+ enum port port);
/* vlv_dsi_pll.c */
int vlv_dsi_pll_compute(struct intel_encoder *encoder,
@@ -158,5 +192,6 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index ac83d6b89ae0..a1a8b3790e61 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -103,6 +103,18 @@ static struct gpio_map vlv_gpio_table[] = {
#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
#define CHV_GPIO_CFGLOCK (1 << 31)
+/* ICL DSI Display GPIO Pins */
+#define ICL_GPIO_DDSP_HPD_A 0
+#define ICL_GPIO_L_VDDEN_1 1
+#define ICL_GPIO_L_BKLTEN_1 2
+#define ICL_GPIO_DDPA_CTRLCLK_1 3
+#define ICL_GPIO_DDPA_CTRLDATA_1 4
+#define ICL_GPIO_DDSP_HPD_B 5
+#define ICL_GPIO_L_VDDEN_2 6
+#define ICL_GPIO_L_BKLTEN_2 7
+#define ICL_GPIO_DDPA_CTRLCLK_2 8
+#define ICL_GPIO_DDPA_CTRLDATA_2 9
+
static inline enum port intel_dsi_seq_port_to_port(u8 port)
{
return port ? PORT_C : PORT_A;
@@ -111,6 +123,7 @@ static inline enum port intel_dsi_seq_port_to_port(u8 port)
static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
const u8 *data)
{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
struct mipi_dsi_device *dsi_device;
u8 type, flags, seq_port;
u16 len;
@@ -181,7 +194,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
+ if (!IS_ICELAKE(dev_priv))
+ vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
data += len;
@@ -322,6 +336,12 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
gpiod_set_value(gpio_desc, value);
}
+static void icl_exec_gpio(struct drm_i915_private *dev_priv,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+}
+
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
@@ -345,7 +365,9 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_VALLEYVIEW(dev_priv))
+ if (IS_ICELAKE(dev_priv))
+ icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
+ else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
else if (IS_CHERRYVIEW(dev_priv))
chv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -481,6 +503,17 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
}
}
+void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+
+ /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+ if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+ return;
+
+ msleep(msec);
+}
+
int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
struct intel_connector *connector = intel_dsi->attached_connector;
@@ -499,110 +532,125 @@ int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
return 1;
}
-bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+#define ICL_PREPARE_CNT_MAX 0x7
+#define ICL_CLK_ZERO_CNT_MAX 0xf
+#define ICL_TRAIL_CNT_MAX 0x7
+#define ICL_TCLK_PRE_CNT_MAX 0x3
+#define ICL_TCLK_POST_CNT_MAX 0x7
+#define ICL_HS_ZERO_CNT_MAX 0xf
+#define ICL_EXIT_ZERO_CNT_MAX 0x7
+
+static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
- struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
- struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
- u32 bpp;
- u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
- u32 ui_num, ui_den;
+ u32 tlpx_ns;
u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
u32 ths_prepare_ns, tclk_trail_ns;
- u32 tclk_prepare_clkzero, ths_prepare_hszero;
- u32 lp_to_hs_switch, hs_to_lp_switch;
- u32 pclk, computed_ddr;
- u32 mul;
- u16 burst_mode_ratio;
- enum port port;
-
- DRM_DEBUG_KMS("\n");
-
- intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
- intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
- intel_dsi->lane_count = mipi_config->lane_cnt + 1;
- intel_dsi->pixel_format =
- pixel_format_from_register_bits(
- mipi_config->videomode_color_format << 7);
- bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
+ u32 hs_zero_cnt;
+ u32 tclk_pre_cnt, tclk_post_cnt;
- intel_dsi->dual_link = mipi_config->dual_link;
- intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
- intel_dsi->operation_mode = mipi_config->is_cmd_mode;
- intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
- intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
- intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
- intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
- intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
- intel_dsi->init_count = mipi_config->master_init_timer;
- intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
- intel_dsi->video_frmt_cfg_bits =
- mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
- pclk = mode->clock;
+ tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
+ ths_prepare_ns = max(mipi_config->ths_prepare,
+ mipi_config->tclk_prepare);
- /* In dual link mode each port needs half of pixel clock */
- if (intel_dsi->dual_link) {
- pclk = pclk / 2;
+ /*
+ * prepare cnt in escape clocks
+ * this field represents a hexadecimal value with a precision
+ * of 1.2 – i.e. the most significant bit is the integer
+ * and the least significant 2 bits are fraction bits.
+ * so, the field can represent a range of 0.25 to 1.75
+ */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
+ if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
+ DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ prepare_cnt = ICL_PREPARE_CNT_MAX;
+ }
- /* we can enable pixel_overlap if needed by panel. In this
- * case we need to increase the pixelclock for extra pixels
- */
- if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
- pclk += DIV_ROUND_UP(mode->vtotal *
- intel_dsi->pixel_overlap *
- 60, 1000);
- }
+ /* clk zero count in escape clocks */
+ clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
+ ths_prepare_ns, tlpx_ns);
+ if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
- /* Burst Mode Ratio
- * Target ddr frequency from VBT / non burst ddr freq
- * multiply by 100 to preserve remainder
- */
- if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- if (mipi_config->target_burst_mode_freq) {
- computed_ddr = (pclk * bpp) / intel_dsi->lane_count;
+ /* trail cnt in escape clocks*/
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
+ if (trail_cnt > ICL_TRAIL_CNT_MAX) {
+ DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ trail_cnt = ICL_TRAIL_CNT_MAX;
+ }
- if (mipi_config->target_burst_mode_freq <
- computed_ddr) {
- DRM_ERROR("Burst mode freq is less than computed\n");
- return false;
- }
+ /* tclk pre count in escape clocks */
+ tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
+ if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
+ }
- burst_mode_ratio = DIV_ROUND_UP(
- mipi_config->target_burst_mode_freq * 100,
- computed_ddr);
+ /* tclk post count in escape clocks */
+ tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
+ if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
+ DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
+ }
- pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
- } else {
- DRM_ERROR("Burst mode target is not set\n");
- return false;
- }
- } else
- burst_mode_ratio = 100;
+ /* hs zero cnt in escape clocks */
+ hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
+ ths_prepare_ns, tlpx_ns);
+ if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
+ }
- intel_dsi->burst_mode_ratio = burst_mode_ratio;
- intel_dsi->pclk = pclk;
+ /* hs exit zero cnt in escape clocks */
+ exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
+ if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
+ DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
+ }
- bitrate = (pclk * bpp) / intel_dsi->lane_count;
+ /* clock lane dphy timings */
+ intel_dsi->dphy_reg = (CLK_PREPARE_OVERRIDE |
+ CLK_PREPARE(prepare_cnt) |
+ CLK_ZERO_OVERRIDE |
+ CLK_ZERO(clk_zero_cnt) |
+ CLK_PRE_OVERRIDE |
+ CLK_PRE(tclk_pre_cnt) |
+ CLK_POST_OVERRIDE |
+ CLK_POST(tclk_post_cnt) |
+ CLK_TRAIL_OVERRIDE |
+ CLK_TRAIL(trail_cnt));
+
+ /* data lanes dphy timings */
+ intel_dsi->dphy_data_lane_reg = (HS_PREPARE_OVERRIDE |
+ HS_PREPARE(prepare_cnt) |
+ HS_ZERO_OVERRIDE |
+ HS_ZERO(hs_zero_cnt) |
+ HS_TRAIL_OVERRIDE |
+ HS_TRAIL(trail_cnt) |
+ HS_EXIT_OVERRIDE |
+ HS_EXIT(exit_zero_cnt));
+}
- switch (intel_dsi->escape_clk_div) {
- case 0:
- tlpx_ns = 50;
- break;
- case 1:
- tlpx_ns = 100;
- break;
+static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ u32 tlpx_ns, extra_byte_count, tlpx_ui;
+ u32 ui_num, ui_den;
+ u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt;
+ u32 ths_prepare_ns, tclk_trail_ns;
+ u32 tclk_prepare_clkzero, ths_prepare_hszero;
+ u32 lp_to_hs_switch, hs_to_lp_switch;
+ u32 mul;
- case 2:
- tlpx_ns = 200;
- break;
- default:
- tlpx_ns = 50;
- break;
- }
+ tlpx_ns = intel_dsi_tlpx_ns(intel_dsi);
switch (intel_dsi->lane_count) {
case 1:
@@ -620,7 +668,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* in Kbps */
ui_num = NS_KHZ_RATIO;
- ui_den = bitrate;
+ ui_den = intel_dsi_bitrate(intel_dsi);
tclk_prepare_clkzero = mipi_config->tclk_prepare_clkzero;
ths_prepare_hszero = mipi_config->ths_prepare_hszero;
@@ -746,6 +794,88 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
DIV_ROUND_UP(2 * tlpx_ui + trail_cnt * 2 + 8,
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+}
+
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
+{
+ struct drm_device *dev = intel_dsi->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
+ struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
+ struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
+ u16 burst_mode_ratio;
+ enum port port;
+
+ DRM_DEBUG_KMS("\n");
+
+ intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
+ intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
+ intel_dsi->lane_count = mipi_config->lane_cnt + 1;
+ intel_dsi->pixel_format =
+ pixel_format_from_register_bits(
+ mipi_config->videomode_color_format << 7);
+
+ intel_dsi->dual_link = mipi_config->dual_link;
+ intel_dsi->pixel_overlap = mipi_config->pixel_overlap;
+ intel_dsi->operation_mode = mipi_config->is_cmd_mode;
+ intel_dsi->video_mode_format = mipi_config->video_transfer_mode;
+ intel_dsi->escape_clk_div = mipi_config->byte_clk_sel;
+ intel_dsi->lp_rx_timeout = mipi_config->lp_rx_timeout;
+ intel_dsi->hs_tx_timeout = mipi_config->hs_tx_timeout;
+ intel_dsi->turn_arnd_val = mipi_config->turn_around_timeout;
+ intel_dsi->rst_timer_val = mipi_config->device_reset_timer;
+ intel_dsi->init_count = mipi_config->master_init_timer;
+ intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
+ intel_dsi->video_frmt_cfg_bits =
+ mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ intel_dsi->bgr_enabled = mipi_config->rgb_flip;
+
+ /* Starting point, adjusted depending on dual link and burst mode */
+ intel_dsi->pclk = mode->clock;
+
+ /* In dual link mode each port needs half of pixel clock */
+ if (intel_dsi->dual_link) {
+ intel_dsi->pclk /= 2;
+
+ /* we can enable pixel_overlap if needed by panel. In this
+ * case we need to increase the pixelclock for extra pixels
+ */
+ if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
+ intel_dsi->pclk += DIV_ROUND_UP(mode->vtotal * intel_dsi->pixel_overlap * 60, 1000);
+ }
+ }
+
+ /* Burst Mode Ratio
+ * Target ddr frequency from VBT / non burst ddr freq
+ * multiply by 100 to preserve remainder
+ */
+ if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
+ if (mipi_config->target_burst_mode_freq) {
+ u32 bitrate = intel_dsi_bitrate(intel_dsi);
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ DRM_ERROR("Burst mode freq is less than computed\n");
+ return false;
+ }
+
+ burst_mode_ratio = DIV_ROUND_UP(
+ mipi_config->target_burst_mode_freq * 100,
+ bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
+ } else {
+ DRM_ERROR("Burst mode target is not set\n");
+ return false;
+ }
+ } else
+ burst_mode_ratio = 100;
+
+ intel_dsi->burst_mode_ratio = burst_mode_ratio;
+
+ if (IS_ICELAKE(dev_priv))
+ icl_dphy_param_init(intel_dsi);
+ else
+ vlv_dphy_param_init(intel_dsi);
DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 4e142ff49708..0042a7f69387 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -256,6 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
return true;
}
@@ -333,18 +334,11 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
return 0;
}
-static void intel_dvo_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- intel_panel_fini(&to_intel_connector(connector)->panel);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.detect = intel_dvo_detect,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dvo_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..af2873403009 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -273,13 +273,13 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
- if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
+ if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
return -EINVAL;
- if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
+ if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
return -EINVAL;
- if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
+ if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
return -EINVAL;
GEM_BUG_ON(dev_priv->engine[id]);
@@ -335,7 +335,10 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
- GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+
+ if (i915_inject_load_failure())
+ return -ENODEV;
for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
if (!HAS_ENGINE(dev_priv, i))
@@ -399,7 +402,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
err = -EINVAL;
err_id = id;
- if (GEM_WARN_ON(!init))
+ if (GEM_DEBUG_WARN_ON(!init))
goto cleanup;
err = init(engine);
@@ -463,7 +466,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
execlists->port_mask = 1;
- BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
+ GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
execlists->queue_priority = INT_MIN;
@@ -482,7 +485,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
void intel_engine_setup_common(struct intel_engine_cs *engine)
{
i915_timeline_init(engine->i915, &engine->timeline, engine->name);
- lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
+ i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
intel_engine_init_execlist(engine);
intel_engine_init_hangcheck(engine);
@@ -490,46 +493,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
intel_engine_init_cmd_parser(engine);
}
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- int ret;
-
- WARN_ON(engine->scratch);
-
- obj = i915_gem_object_create_stolen(engine->i915, size);
- if (!obj)
- obj = i915_gem_object_create_internal(engine->i915, size);
- if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate scratch page\n");
- return PTR_ERR(obj);
- }
-
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unref;
- }
-
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
- if (ret)
- goto err_unref;
-
- engine->scratch = vma;
- return 0;
-
-err_unref:
- i915_gem_object_put(obj);
- return ret;
-}
-
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
-{
- i915_vma_unpin_and_release(&engine->scratch, 0);
-}
-
static void cleanup_status_page(struct intel_engine_cs *engine)
{
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
@@ -704,8 +667,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- intel_engine_cleanup_scratch(engine);
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -720,6 +681,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
__intel_context_unpin(i915->kernel_context, engine);
i915_timeline_fini(&engine->timeline);
+
+ intel_wa_list_free(&engine->ctx_wa_list);
+ intel_wa_list_free(&engine->wa_list);
+ intel_wa_list_free(&engine->whitelist);
}
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -809,7 +774,7 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
- if (INTEL_GEN(dev_priv) == 10)
+ if (IS_GEN10(dev_priv))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
@@ -1534,10 +1499,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
count = 0;
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p =
- rb_entry(rb, typeof(*p), node);
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
- list_for_each_entry(rq, &p->requests, sched.link) {
+ priolist_for_each_request(rq, p, i) {
if (count++ < MAX_REQUESTS_TO_SHOW - 1)
print_request(m, rq, "\t\tQ ");
else
@@ -1559,8 +1524,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
- drm_printf(m, "\t%s [%d] waiting for %x\n",
- w->tsk->comm, w->tsk->pid, w->seqno);
+ drm_printf(m, "\t%s [%d:%c] waiting for %x\n",
+ w->tsk->comm, w->tsk->pid,
+ task_state_to_char(w->tsk),
+ w->seqno);
}
spin_unlock(&b->rb_lock);
local_irq_restore(flags);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 74d425c700ef..f23570c44323 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (INTEL_GEN(dev_priv) == 7)
+ if (IS_GEN7(dev_priv))
lines = min(lines, 2048);
else if (INTEL_GEN(dev_priv) >= 8)
lines = min(lines, 2560);
@@ -674,6 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.adjusted_y = plane_state->color_plane[0].y;
cache->plane.y = plane_state->base.src.y1 >> 16;
+ cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode;
+
if (!cache->plane.visible)
return;
@@ -748,6 +750,12 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
+ if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
+ cache->fb.format->has_alpha) {
+ fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
+ return false;
+ }
+
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
@@ -1301,7 +1309,7 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
fbc->active = false;
if (need_fbc_vtd_wa(dev_priv))
- mkwrite_device_info(dev_priv)->has_fbc = false;
+ mkwrite_device_info(dev_priv)->display.has_fbc = false;
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f99332972b7a..fb5bb5b32a60 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -593,7 +593,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
* pipe. Note we need to use the selected fb's pitch and bpp
* rather than the current pipe's, since they differ.
*/
- cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay;
+ cur_size = crtc->state->adjusted_mode.crtc_hdisplay;
cur_size = cur_size * fb->base.format->cpp[0];
if (fb->base.pitches[0] < cur_size) {
DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n",
@@ -603,13 +603,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
break;
}
- cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
+ cur_size = crtc->state->adjusted_mode.crtc_vdisplay;
cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
- intel_crtc->config->base.adjusted_mode.crtc_hdisplay,
- intel_crtc->config->base.adjusted_mode.crtc_vdisplay,
+ crtc->state->adjusted_mode.crtc_hdisplay,
+ crtc->state->adjusted_mode.crtc_vdisplay,
fb->base.format->cpp[0] * 8,
cur_size);
@@ -672,7 +672,7 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(INTEL_INFO(dev_priv)->num_pipes == 0))
+ if (WARN_ON(!HAS_DISPLAY(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 230aea69385d..8660af3fd755 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -50,7 +50,8 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
unsigned int i;
guc->send_regs.base = i915_mmio_reg_offset(SOFT_SCRATCH(0));
- guc->send_regs.count = SOFT_SCRATCH_COUNT - 1;
+ guc->send_regs.count = GUC_MAX_MMIO_MSG_LEN;
+ BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
@@ -521,6 +522,44 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+/*
+ * The ENTER/EXIT_S_STATE actions queue the save/restore operation in GuC FW and
+ * then return, so waiting on the H2G is not enough to guarantee GuC is done.
+ * When all the processing is done, GuC writes INTEL_GUC_SLEEP_STATE_SUCCESS to
+ * scratch register 14, so we can poll on that. Note that GuC does not ensure
+ * that the value in the register is different from
+ * INTEL_GUC_SLEEP_STATE_SUCCESS while the action is in progress so we need to
+ * take care of that ourselves as well.
+ */
+static int guc_sleep_state_action(struct intel_guc *guc,
+ const u32 *action, u32 len)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ int ret;
+ u32 status;
+
+ I915_WRITE(SOFT_SCRATCH(14), INTEL_GUC_SLEEP_STATE_INVALID_MASK);
+
+ ret = intel_guc_send(guc, action, len);
+ if (ret)
+ return ret;
+
+ ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+ INTEL_GUC_SLEEP_STATE_INVALID_MASK,
+ 0, 0, 10, &status);
+ if (ret)
+ return ret;
+
+ if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
+ DRM_ERROR("GuC failed to change sleep state. "
+ "action=0x%x, err=%u\n",
+ action[0], status);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* intel_guc_suspend() - notify GuC entering suspend state
* @guc: the guc
@@ -533,7 +572,7 @@ int intel_guc_suspend(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
@@ -571,7 +610,7 @@ int intel_guc_resume(struct intel_guc *guc)
intel_guc_ggtt_offset(guc, guc->shared_data)
};
- return intel_guc_send(guc, data, ARRAY_SIZE(data));
+ return guc_sleep_state_action(guc, data, ARRAY_SIZE(data));
}
/**
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index ad42faf48c46..0f1c4f9ebfd8 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -95,6 +95,11 @@ struct intel_guc {
void (*notify)(struct intel_guc *guc);
};
+static inline bool intel_guc_is_alive(struct intel_guc *guc)
+{
+ return intel_uc_fw_is_loaded(&guc->fw);
+}
+
static
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
{
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a9e6fcce467c..a67144ee5ceb 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -78,7 +78,8 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
} else {
- DRM_WARN("%s: No firmware known for this platform!\n",
+ dev_info(dev_priv->drm.dev,
+ "%s: No firmware known for this platform!\n",
intel_uc_fw_type_repr(guc_fw->type));
}
}
@@ -125,66 +126,26 @@ static void guc_prepare_xfer(struct intel_guc *guc)
}
/* Copy RSA signature from the fw image to HW for verification */
-static int guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
+static void guc_xfer_rsa(struct intel_guc *guc, struct i915_vma *vma)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- struct sg_table *sg = vma->pages;
u32 rsa[UOS_RSA_SCRATCH_COUNT];
int i;
- if (sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, sizeof(rsa),
- guc_fw->rsa_offset) != sizeof(rsa))
- return -EINVAL;
+ sg_pcopy_to_buffer(vma->pages->sgl, vma->pages->nents,
+ rsa, sizeof(rsa), guc->fw.rsa_offset);
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
-
- return 0;
}
-/*
- * Transfer the firmware image to RAM for execution by the microcontroller.
- *
- * Architecturally, the DMA engine is bidirectional, and can potentially even
- * transfer between GTT locations. This functionality is left out of the API
- * for now as there is no need for it.
- */
-static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+static bool guc_xfer_completed(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct intel_uc_fw *guc_fw = &guc->fw;
- unsigned long offset;
- u32 status;
- int ret;
-
- /*
- * The header plus uCode will be copied to WOPCM via DMA, excluding any
- * other components
- */
- I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
-
- /* Set the source address for the new blob */
- offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
- /*
- * Set the DMA destination. Current uCode expects the code to be
- * loaded at 8k; locations below this are used for the stack.
- */
- I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
-
- /* Finally start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
-
- /* Wait for DMA to finish */
- ret = __intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0,
- 2, 100, &status);
- DRM_DEBUG_DRIVER("GuC DMA status %#x\n", status);
-
- return ret;
+ /* Did we complete the xfer? */
+ *status = I915_READ(DMA_CTRL);
+ return !(*status & START_DMA);
}
/*
@@ -217,8 +178,8 @@ static int guc_wait_ucode(struct intel_guc *guc)
* NB: Docs recommend not using the interrupt for completion.
* Measurements indicate this should take no more than 20ms, so a
* timeout here indicates that the GuC has failed and is unusable.
- * (Higher levels of the driver will attempt to fall back to
- * execlist mode if this happens.)
+ * (Higher levels of the driver may decide to reset the GuC and
+ * attempt the ucode load again if this happens.)
*/
ret = wait_for(guc_ready(guc, &status), 100);
DRM_DEBUG_DRIVER("GuC status %#x\n", status);
@@ -228,10 +189,52 @@ static int guc_wait_ucode(struct intel_guc *guc)
ret = -ENOEXEC;
}
+ if (ret == 0 && !guc_xfer_completed(guc, &status)) {
+ DRM_ERROR("GuC is ready, but the xfer %08x is incomplete\n",
+ status);
+ ret = -ENXIO;
+ }
+
return ret;
}
/*
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Architecturally, the DMA engine is bidirectional, and can potentially even
+ * transfer between GTT locations. This functionality is left out of the API
+ * for now as there is no need for it.
+ */
+static int guc_xfer_ucode(struct intel_guc *guc, struct i915_vma *vma)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uc_fw *guc_fw = &guc->fw;
+ unsigned long offset;
+
+ /*
+ * The header plus uCode will be copied to WOPCM via DMA, excluding any
+ * other components
+ */
+ I915_WRITE(DMA_COPY_SIZE, guc_fw->header_size + guc_fw->ucode_size);
+
+ /* Set the source address for the new blob */
+ offset = intel_guc_ggtt_offset(guc, vma) + guc_fw->header_offset;
+ I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+ I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+ /*
+ * Set the DMA destination. Current uCode expects the code to be
+ * loaded at 8k; locations below this are used for the stack.
+ */
+ I915_WRITE(DMA_ADDR_1_LOW, 0x2000);
+ I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+ /* Finally start the DMA */
+ I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(UOS_MOVE | START_DMA));
+
+ return guc_wait_ucode(guc);
+}
+/*
* Load the GuC firmware blob into the MinuteIA.
*/
static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
@@ -251,17 +254,9 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- ret = guc_xfer_rsa(guc, vma);
- if (ret)
- DRM_WARN("GuC firmware signature xfer error %d\n", ret);
+ guc_xfer_rsa(guc, vma);
ret = guc_xfer_ucode(guc, vma);
- if (ret)
- DRM_WARN("GuC firmware code xfer error %d\n", ret);
-
- ret = guc_wait_ucode(guc);
- if (ret)
- DRM_ERROR("GuC firmware xfer error %d\n", ret);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 8382d591c784..b2f5148f4f17 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -39,6 +39,11 @@
#define GUC_VIDEO_ENGINE2 4
#define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
+#define GUC_DOORBELL_INVALID 256
+
+#define GUC_DB_SIZE (PAGE_SIZE)
+#define GUC_WQ_SIZE (PAGE_SIZE * 2)
+
/* Work queue item header definitions */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
@@ -59,9 +64,6 @@
#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
-#define GUC_DOORBELL_ENABLED 1
-#define GUC_DOORBELL_DISABLED 0
-
#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
@@ -219,26 +221,6 @@ struct uc_css_header {
u32 header_info;
} __packed;
-struct guc_doorbell_info {
- u32 db_status;
- u32 cookie;
- u32 reserved[14];
-} __packed;
-
-union guc_doorbell_qw {
- struct {
- u32 db_status;
- u32 cookie;
- };
- u64 value_qw;
-} __packed;
-
-#define GUC_NUM_DOORBELLS 256
-#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
-
-#define GUC_DB_SIZE (PAGE_SIZE)
-#define GUC_WQ_SIZE (PAGE_SIZE * 2)
-
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header;
@@ -601,7 +583,9 @@ struct guc_shared_ctx_data {
* registers, where first register holds data treated as message header,
* and other registers are used to hold message payload.
*
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8
+ * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 8 parameters and the GuC FW
+ * itself uses an 8-element array to store the H2G message.
*
* +-----------+---------+---------+---------+
* | MMIO[0] | MMIO[1] | ... | MMIO[n] |
@@ -633,6 +617,8 @@ struct guc_shared_ctx_data {
* field.
*/
+#define GUC_MAX_MMIO_MSG_LEN 8
+
#define INTEL_GUC_MSG_TYPE_SHIFT 28
#define INTEL_GUC_MSG_TYPE_MASK (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
#define INTEL_GUC_MSG_DATA_SHIFT 16
@@ -687,6 +673,13 @@ enum intel_guc_report_status {
INTEL_GUC_REPORT_STATUS_COMPLETE = 0x4,
};
+enum intel_guc_sleep_state_status {
+ INTEL_GUC_SLEEP_STATE_SUCCESS = 0x0,
+ INTEL_GUC_SLEEP_STATE_PREEMPT_TO_IDLE_FAILED = 0x1,
+ INTEL_GUC_SLEEP_STATE_ENGINE_RESET_FAILED = 0x2
+#define INTEL_GUC_SLEEP_STATE_INVALID_MASK 0x80000000
+};
+
#define GUC_LOG_CONTROL_LOGGING_ENABLED (1 << 0)
#define GUC_LOG_CONTROL_VERBOSITY_SHIFT 4
#define GUC_LOG_CONTROL_VERBOSITY_MASK (0xF << GUC_LOG_CONTROL_VERBOSITY_SHIFT)
diff --git a/drivers/gpu/drm/i915/intel_guc_reg.h b/drivers/gpu/drm/i915/intel_guc_reg.h
index d86084742a4a..57e7ad522c2f 100644
--- a/drivers/gpu/drm/i915/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/intel_guc_reg.h
@@ -104,6 +104,18 @@
#define GUC_SEND_INTERRUPT _MMIO(0xc4c8)
#define GUC_SEND_TRIGGER (1<<0)
+#define GUC_NUM_DOORBELLS 256
+
+/* format of the HW-monitored doorbell cacheline */
+struct guc_doorbell_info {
+ u32 db_status;
+#define GUC_DOORBELL_DISABLED 0
+#define GUC_DOORBELL_ENABLED 1
+
+ u32 cookie;
+ u32 reserved[14];
+} __packed;
+
#define GEN8_DRBREGL(x) _MMIO(0x1000 + (x) * 8)
#define GEN8_DRB_VALID (1<<0)
#define GEN8_DRBREGU(x) _MMIO(0x1000 + (x) * 8 + 4)
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index a81f04d46e87..1570dcbe249c 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -192,7 +192,15 @@ static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
return client->vaddr + client->doorbell_offset;
}
-static void __create_doorbell(struct intel_guc_client *client)
+static bool __doorbell_valid(struct intel_guc *guc, u16 db_id)
+{
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+ GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
+ return I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID;
+}
+
+static void __init_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *doorbell;
@@ -201,21 +209,19 @@ static void __create_doorbell(struct intel_guc_client *client)
doorbell->cookie = 0;
}
-static void __destroy_doorbell(struct intel_guc_client *client)
+static void __fini_doorbell(struct intel_guc_client *client)
{
- struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
struct guc_doorbell_info *doorbell;
u16 db_id = client->doorbell_id;
doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_DISABLED;
- doorbell->cookie = 0;
/* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
* to go to zero after updating db_status before we call the GuC to
* release the doorbell
*/
- if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+ if (wait_for_us(!__doorbell_valid(client->guc, db_id), 10))
WARN_ONCE(true, "Doorbell never became invalid after disable\n");
}
@@ -227,11 +233,11 @@ static int create_doorbell(struct intel_guc_client *client)
return -ENODEV; /* internal setup error, should never happen */
__update_doorbell_desc(client, client->doorbell_id);
- __create_doorbell(client);
+ __init_doorbell(client);
ret = __guc_allocate_doorbell(client->guc, client->stage_id);
if (ret) {
- __destroy_doorbell(client);
+ __fini_doorbell(client);
__update_doorbell_desc(client, GUC_DOORBELL_INVALID);
DRM_DEBUG_DRIVER("Couldn't create client %u doorbell: %d\n",
client->stage_id, ret);
@@ -247,7 +253,7 @@ static int destroy_doorbell(struct intel_guc_client *client)
GEM_BUG_ON(!has_doorbell(client));
- __destroy_doorbell(client);
+ __fini_doorbell(client);
ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
if (ret)
DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
@@ -282,8 +288,7 @@ __get_process_desc(struct intel_guc_client *client)
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
-static void guc_proc_desc_init(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_proc_desc_init(struct intel_guc_client *client)
{
struct guc_process_desc *desc;
@@ -304,6 +309,14 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->priority = client->priority;
}
+static void guc_proc_desc_fini(struct intel_guc_client *client)
+{
+ struct guc_process_desc *desc;
+
+ desc = __get_process_desc(client);
+ memset(desc, 0, sizeof(*desc));
+}
+
static int guc_stage_desc_pool_create(struct intel_guc *guc)
{
struct i915_vma *vma;
@@ -341,9 +354,9 @@ static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
-static void guc_stage_desc_init(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc_client *client)
{
+ struct intel_guc *guc = client->guc;
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
@@ -424,8 +437,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
desc->desc_private = ptr_to_u64(client);
}
-static void guc_stage_desc_fini(struct intel_guc *guc,
- struct intel_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc_client *client)
{
struct guc_stage_desc *desc;
@@ -486,14 +498,6 @@ static void guc_wq_item_append(struct intel_guc_client *client,
WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
}
-static void guc_reset_wq(struct intel_guc_client *client)
-{
- struct guc_process_desc *desc = __get_process_desc(client);
-
- desc->head = 0;
- desc->tail = 0;
-}
-
static void guc_ring_doorbell(struct intel_guc_client *client)
{
struct guc_doorbell_info *db;
@@ -746,30 +750,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
if (last && rq->hw_context != last->hw_context) {
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
if (submit)
port_assign(port, last);
port++;
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -791,19 +793,8 @@ done:
static void guc_dequeue(struct intel_engine_cs *engine)
{
- unsigned long flags;
- bool submit;
-
- local_irq_save(flags);
-
- spin_lock(&engine->timeline.lock);
- submit = __guc_dequeue(engine);
- spin_unlock(&engine->timeline.lock);
-
- if (submit)
+ if (__guc_dequeue(engine))
guc_submit(engine);
-
- local_irq_restore(flags);
}
static void guc_submission_tasklet(unsigned long data)
@@ -812,6 +803,9 @@ static void guc_submission_tasklet(unsigned long data)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
rq = port_request(port);
while (rq && i915_request_completed(rq)) {
@@ -835,6 +829,8 @@ static void guc_submission_tasklet(unsigned long data)
if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
guc_dequeue(engine);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static struct i915_request *
@@ -877,72 +873,31 @@ guc_reset_prepare(struct intel_engine_cs *engine)
/* Check that a doorbell register is in the expected state */
static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- u32 drbregl;
bool valid;
- GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+ GEM_BUG_ON(db_id >= GUC_NUM_DOORBELLS);
- drbregl = I915_READ(GEN8_DRBREGL(db_id));
- valid = drbregl & GEN8_DRB_VALID;
+ valid = __doorbell_valid(guc, db_id);
if (test_bit(db_id, guc->doorbell_bitmap) == valid)
return true;
- DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
- db_id, drbregl, yesno(valid));
+ DRM_DEBUG_DRIVER("Doorbell %u has unexpected state: valid=%s\n",
+ db_id, yesno(valid));
return false;
}
static bool guc_verify_doorbells(struct intel_guc *guc)
{
+ bool doorbells_ok = true;
u16 db_id;
for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
if (!doorbell_ok(guc, db_id))
- return false;
-
- return true;
-}
-
-static int guc_clients_doorbell_init(struct intel_guc *guc)
-{
- int ret;
-
- ret = create_doorbell(guc->execbuf_client);
- if (ret)
- return ret;
-
- if (guc->preempt_client) {
- ret = create_doorbell(guc->preempt_client);
- if (ret) {
- destroy_doorbell(guc->execbuf_client);
- return ret;
- }
- }
-
- return 0;
-}
-
-static void guc_clients_doorbell_fini(struct intel_guc *guc)
-{
- /*
- * By the time we're here, GuC has already been reset.
- * Instead of trying (in vain) to communicate with it, let's just
- * cleanup the doorbell HW and our internal state.
- */
- if (guc->preempt_client) {
- __destroy_doorbell(guc->preempt_client);
- __update_doorbell_desc(guc->preempt_client,
- GUC_DOORBELL_INVALID);
- }
+ doorbells_ok = false;
- if (guc->execbuf_client) {
- __destroy_doorbell(guc->execbuf_client);
- __update_doorbell_desc(guc->execbuf_client,
- GUC_DOORBELL_INVALID);
- }
+ return doorbells_ok;
}
/**
@@ -1005,6 +960,10 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
}
client->vaddr = vaddr;
+ ret = reserve_doorbell(client);
+ if (ret)
+ goto err_vaddr;
+
client->doorbell_offset = __select_cacheline(guc);
/*
@@ -1017,13 +976,6 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
else
client->proc_desc_offset = (GUC_DB_SIZE / 2);
- guc_proc_desc_init(guc, client);
- guc_stage_desc_init(guc, client);
-
- ret = reserve_doorbell(client);
- if (ret)
- goto err_vaddr;
-
DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
priority, client, client->engines, client->stage_id);
DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
@@ -1045,7 +997,6 @@ err_client:
static void guc_client_free(struct intel_guc_client *client)
{
unreserve_doorbell(client);
- guc_stage_desc_fini(client->guc, client);
i915_vma_unpin_and_release(&client->vma, I915_VMA_RELEASE_MAP);
ida_simple_remove(&client->guc->stage_ids, client->stage_id);
kfree(client);
@@ -1112,6 +1063,69 @@ static void guc_clients_destroy(struct intel_guc *guc)
guc_client_free(client);
}
+static int __guc_client_enable(struct intel_guc_client *client)
+{
+ int ret;
+
+ guc_proc_desc_init(client);
+ guc_stage_desc_init(client);
+
+ ret = create_doorbell(client);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ guc_stage_desc_fini(client);
+ guc_proc_desc_fini(client);
+ return ret;
+}
+
+static void __guc_client_disable(struct intel_guc_client *client)
+{
+ /*
+ * By the time we're here, GuC may have already been reset. if that is
+ * the case, instead of trying (in vain) to communicate with it, let's
+ * just cleanup the doorbell HW and our internal state.
+ */
+ if (intel_guc_is_alive(client->guc))
+ destroy_doorbell(client);
+ else
+ __fini_doorbell(client);
+
+ guc_stage_desc_fini(client);
+ guc_proc_desc_fini(client);
+}
+
+static int guc_clients_enable(struct intel_guc *guc)
+{
+ int ret;
+
+ ret = __guc_client_enable(guc->execbuf_client);
+ if (ret)
+ return ret;
+
+ if (guc->preempt_client) {
+ ret = __guc_client_enable(guc->preempt_client);
+ if (ret) {
+ __guc_client_disable(guc->execbuf_client);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void guc_clients_disable(struct intel_guc *guc)
+{
+ if (guc->preempt_client)
+ __guc_client_disable(guc->preempt_client);
+
+ if (guc->execbuf_client)
+ __guc_client_disable(guc->execbuf_client);
+}
+
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -1295,15 +1309,11 @@ int intel_guc_submission_enable(struct intel_guc *guc)
GEM_BUG_ON(!guc->execbuf_client);
- guc_reset_wq(guc->execbuf_client);
- if (guc->preempt_client)
- guc_reset_wq(guc->preempt_client);
-
err = intel_guc_sample_forcewake(guc);
if (err)
return err;
- err = guc_clients_doorbell_init(guc);
+ err = guc_clients_enable(guc);
if (err)
return err;
@@ -1325,7 +1335,7 @@ void intel_guc_submission_disable(struct intel_guc *guc)
GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
guc_interrupts_release(dev_priv);
- guc_clients_doorbell_fini(guc);
+ guc_clients_disable(guc);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 26e48fc95543..1bf487f94254 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -16,6 +16,62 @@
#define KEY_LOAD_TRIES 5
+static
+bool intel_hdcp_is_ksv_valid(u8 *ksv)
+{
+ int i, ones = 0;
+ /* KSV has 20 1's and 20 0's */
+ for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
+ ones += hweight8(ksv[i]);
+ if (ones != 20)
+ return false;
+
+ return true;
+}
+
+static
+int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
+ const struct intel_hdcp_shim *shim, u8 *bksv)
+{
+ int ret, i, tries = 2;
+
+ /* HDCP spec states that we must retry the bksv if it is invalid */
+ for (i = 0; i < tries; i++) {
+ ret = shim->read_bksv(intel_dig_port, bksv);
+ if (ret)
+ return ret;
+ if (intel_hdcp_is_ksv_valid(bksv))
+ break;
+ }
+ if (i == tries) {
+ DRM_DEBUG_KMS("Bksv is invalid\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/* Is HDCP1.4 capable on Platform and Sink */
+bool intel_hdcp_capable(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ const struct intel_hdcp_shim *shim = connector->hdcp.shim;
+ bool capable = false;
+ u8 bksv[5];
+
+ if (!shim)
+ return capable;
+
+ if (shim->hdcp_capable) {
+ shim->hdcp_capable(intel_dig_port, &capable);
+ } else {
+ if (!intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv))
+ capable = true;
+ }
+
+ return capable;
+}
+
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@@ -168,18 +224,6 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
}
static
-bool intel_hdcp_is_ksv_valid(u8 *ksv)
-{
- int i, ones = 0;
- /* KSV has 20 1's and 20 0's */
- for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
- ones += hweight8(ksv[i]);
- if (ones != 20)
- return false;
- return true;
-}
-
-static
int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
@@ -383,7 +427,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
+ DRM_ERROR("Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
@@ -404,7 +448,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+ DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -414,7 +458,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_ERROR("Max Topology Limit Exceeded\n");
+ DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -450,7 +494,7 @@ int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
- DRM_ERROR("V Prime validation failed.(%d)\n", ret);
+ DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
goto err;
}
@@ -499,7 +543,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_ERROR("Panel is not HDCP capable\n");
+ DRM_DEBUG_KMS("Panel is not HDCP capable\n");
return -EINVAL;
}
}
@@ -527,18 +571,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
memset(&bksv, 0, sizeof(bksv));
- /* HDCP spec states that we must retry the bksv if it is invalid */
- for (i = 0; i < tries; i++) {
- ret = shim->read_bksv(intel_dig_port, bksv.shim);
- if (ret)
- return ret;
- if (intel_hdcp_is_ksv_valid(bksv.shim))
- break;
- }
- if (i == tries) {
- DRM_ERROR("HDCP failed, Bksv is invalid\n");
- return -ENODEV;
- }
+ ret = intel_hdcp_read_valid_bksv(intel_dig_port, shim, bksv.shim);
+ if (ret < 0)
+ return ret;
I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
@@ -594,8 +629,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
if (i == tries) {
- DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(PORT_HDCP_STATUS(port)));
+ DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
+ I915_READ(PORT_HDCP_STATUS(port)));
return -ETIMEDOUT;
}
@@ -618,14 +653,9 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
return 0;
}
-static
-struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
-{
- return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
-}
-
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
@@ -641,7 +671,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
- ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
DRM_ERROR("Failed to disable HDCP signalling\n");
return ret;
@@ -653,6 +683,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
static int _intel_hdcp_enable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
@@ -677,8 +708,7 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
- ret = intel_hdcp_auth(conn_to_dig_port(connector),
- connector->hdcp_shim);
+ ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
if (!ret)
return 0;
@@ -688,42 +718,50 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
_intel_hdcp_disable(connector);
}
- DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
+static inline
+struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
+{
+ return container_of(hdcp, struct intel_connector, hdcp);
+}
+
static void intel_hdcp_check_work(struct work_struct *work)
{
- struct intel_connector *connector = container_of(to_delayed_work(work),
- struct intel_connector,
- hdcp_check_work);
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&connector->hdcp_check_work,
+ schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
}
static void intel_hdcp_prop_work(struct work_struct *work)
{
- struct intel_connector *connector = container_of(work,
- struct intel_connector,
- hdcp_prop_work);
+ struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
+ prop_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
struct drm_device *dev = connector->base.dev;
struct drm_connector_state *state;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
/*
* This worker is only used to flip between ENABLED/DESIRED. Either of
- * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
+ * those to UNDESIRED is handled by core. If value == UNDESIRED,
* we're running just after hdcp has been disabled, so just exit
*/
- if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
state = connector->base.state;
- state->content_protection = connector->hdcp_value;
+ state->content_protection = hdcp->value;
}
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
@@ -735,8 +773,9 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
}
int intel_hdcp_init(struct intel_connector *connector,
- const struct intel_hdcp_shim *hdcp_shim)
+ const struct intel_hdcp_shim *shim)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = drm_connector_attach_content_protection_property(
@@ -744,51 +783,53 @@ int intel_hdcp_init(struct intel_connector *connector,
if (ret)
return ret;
- connector->hdcp_shim = hdcp_shim;
- mutex_init(&connector->hdcp_mutex);
- INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
- INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
+ hdcp->shim = shim;
+ mutex_init(&hdcp->mutex);
+ INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
+ INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
ret = _intel_hdcp_enable(connector);
if (ret)
goto out;
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&connector->hdcp_prop_work);
- schedule_delayed_work(&connector->hdcp_check_work,
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ schedule_delayed_work(&hdcp->check_work,
DRM_HDCP_CHECK_PERIOD_MS);
out:
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
return ret;
}
int intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
int ret = 0;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
- if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
ret = _intel_hdcp_disable(connector);
}
- mutex_unlock(&connector->hdcp_mutex);
- cancel_delayed_work_sync(&connector->hdcp_check_work);
+ mutex_unlock(&hdcp->mutex);
+ cancel_delayed_work_sync(&hdcp->check_work);
return ret;
}
@@ -828,17 +869,18 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
/* Implements Part 3 of the HDCP authorization procedure */
int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_hdcp *hdcp = &connector->hdcp;
struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
int ret = 0;
- if (!connector->hdcp_shim)
+ if (!hdcp->shim)
return -ENOENT;
- mutex_lock(&connector->hdcp_mutex);
+ mutex_lock(&hdcp->mutex);
- if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
@@ -846,17 +888,15 @@ int intel_hdcp_check_link(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
I915_READ(PORT_HDCP_STATUS(port)));
ret = -ENXIO;
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
- if (connector->hdcp_shim->check_link(intel_dig_port)) {
- if (connector->hdcp_value !=
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- connector->hdcp_value =
- DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&connector->hdcp_prop_work);
+ if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
}
goto out;
}
@@ -867,20 +907,20 @@ int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_disable(connector);
if (ret) {
DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
ret = _intel_hdcp_enable(connector);
if (ret) {
- DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
- connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&connector->hdcp_prop_work);
+ DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
goto out;
}
out:
- mutex_unlock(&connector->hdcp_mutex);
+ mutex_unlock(&hdcp->mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a2dab0b6bde6..07e803a604bd 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -115,6 +115,8 @@ static u32 hsw_infoframe_enable(unsigned int type)
switch (type) {
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
+ case DP_SDP_PPS:
+ return VDIP_ENABLE_PPS;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -136,6 +138,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
switch (type) {
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
+ case DP_SDP_PPS:
+ return ICL_VIDEO_DIP_PPS_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
@@ -148,14 +152,25 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
}
}
-static void g4x_write_infoframe(struct drm_encoder *encoder,
+static int hsw_dip_data_size(unsigned int type)
+{
+ switch (type) {
+ case DP_SDP_VSC:
+ return VIDEO_DIP_VSC_DATA_SIZE;
+ case DP_SDP_PPS:
+ return VIDEO_DIP_PPS_DATA_SIZE;
+ default:
+ return VIDEO_DIP_DATA_SIZE;
+ }
+}
+
+static void g4x_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
int i;
@@ -186,31 +201,29 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct drm_encoder *encoder,
+static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
}
-static void ibx_write_infoframe(struct drm_encoder *encoder,
+static void ibx_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -243,11 +256,10 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
+static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
u32 val = I915_READ(reg);
@@ -255,7 +267,7 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -263,14 +275,13 @@ static bool ibx_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void cpt_write_infoframe(struct drm_encoder *encoder,
+static void cpt_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -306,10 +317,10 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
+static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
@@ -321,14 +332,13 @@ static bool cpt_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void vlv_write_infoframe(struct drm_encoder *encoder,
+static void vlv_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -361,18 +371,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
+static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->base.crtc)->pipe;
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return false;
- if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(intel_dig_port->base.port))
+ if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
return false;
return val & (VIDEO_DIP_ENABLE_AVI |
@@ -380,21 +389,21 @@ static bool vlv_infoframe_enabled(struct drm_encoder *encoder,
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
}
-static void hsw_write_infoframe(struct drm_encoder *encoder,
+static void hsw_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len)
{
const u32 *data = frame;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
- int data_size = type == DP_SDP_VSC ?
- VIDEO_DIP_VSC_DATA_SIZE : VIDEO_DIP_DATA_SIZE;
+ int data_size;
int i;
u32 val = I915_READ(ctl_reg);
+ data_size = hsw_dip_data_size(type);
+
val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
@@ -415,10 +424,10 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
+static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
return val & (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -443,11 +452,11 @@ static bool hsw_infoframe_enabled(struct drm_encoder *encoder,
* trick them by giving an offset into the buffer and moving back the header
* bytes by one.
*/
-static void intel_write_infoframe(struct drm_encoder *encoder,
+static void intel_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
union hdmi_infoframe *frame)
{
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
@@ -457,24 +466,25 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
return;
/* Insert the 'hole' (see big comment above) at position 3 */
- buffer[0] = buffer[1];
- buffer[1] = buffer[2];
- buffer[2] = buffer[3];
+ memmove(&buffer[0], &buffer[1], 3);
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder, crtc_state, frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder,
+ crtc_state,
+ frame->any.type, buffer, len);
}
-static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
struct drm_connector *connector = &intel_hdmi->attached_connector->base;
- bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported ||
+ connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB420;
union hdmi_infoframe frame;
int ret;
@@ -486,8 +496,10 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
return;
}
- if (crtc_state->ycbcr420)
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
else
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
@@ -502,10 +514,11 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
-static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
+static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
union hdmi_infoframe frame;
@@ -519,11 +532,12 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder,
frame.spd.sdi = HDMI_SPD_SDI_PC;
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
static void
-intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -536,20 +550,21 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
if (ret < 0)
return;
- intel_write_infoframe(encoder, crtc_state, &frame);
+ intel_write_infoframe(encoder, crtc_state,
+ &frame);
}
-static void g4x_set_infoframes(struct drm_encoder *encoder,
+static void g4x_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -657,11 +672,11 @@ static bool gcp_default_phase_possible(int pipe_bpp,
mode->crtc_htotal/2 % pixels_per_group == 0);
}
-static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
+static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
u32 val = 0;
@@ -689,18 +704,18 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder,
return val != 0;
}
-static void ibx_set_infoframes(struct drm_encoder *encoder,
+static void ibx_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -742,14 +757,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void cpt_set_infoframes(struct drm_encoder *encoder,
+static void cpt_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
@@ -785,18 +800,17 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void vlv_set_infoframes(struct drm_encoder *encoder,
+static void vlv_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
- u32 port = VIDEO_DIP_PORT(intel_dig_port->base.port);
+ u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -838,12 +852,12 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
}
-static void hsw_set_infoframes(struct drm_encoder *encoder,
+static void hsw_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
u32 val = I915_READ(reg);
@@ -965,13 +979,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_write(intel_dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- DRM_ERROR("Write An over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Write An over DDC failed (%d)\n", ret);
return ret;
}
ret = intel_gmbus_output_aksv(adapter);
if (ret < 0) {
- DRM_ERROR("Failed to output aksv (%d)\n", ret);
+ DRM_DEBUG_KMS("Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -984,7 +998,7 @@ static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- DRM_ERROR("Read Bksv over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read Bksv over DDC failed (%d)\n", ret);
return ret;
}
@@ -996,7 +1010,7 @@ int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- DRM_ERROR("Read bstatus over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bstatus over DDC failed (%d)\n", ret);
return ret;
}
@@ -1009,7 +1023,7 @@ int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
return ret;
}
*repeater_present = val & DRM_HDCP_DDC_BCAPS_REPEATER_PRESENT;
@@ -1024,7 +1038,7 @@ int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- DRM_ERROR("Read Ri' over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read Ri' over DDC failed (%d)\n", ret);
return ret;
}
@@ -1037,7 +1051,7 @@ int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- DRM_ERROR("Read bcaps over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read bcaps over DDC failed (%d)\n", ret);
return ret;
}
*ksv_ready = val & DRM_HDCP_DDC_BCAPS_KSV_FIFO_READY;
@@ -1052,7 +1066,7 @@ int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- DRM_ERROR("Read ksv fifo over DDC failed (%d)\n", ret);
+ DRM_DEBUG_KMS("Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
return 0;
@@ -1070,7 +1084,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
ret = intel_hdmi_hdcp_read(intel_dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- DRM_ERROR("Read V'[%d] over DDC failed (%d)\n", i, ret);
+ DRM_DEBUG_KMS("Read V'[%d] over DDC failed (%d)\n", i, ret);
return ret;
}
@@ -1217,7 +1231,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_dig_port->infoframe_enabled(&encoder->base, pipe_config))
+ if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -1438,7 +1452,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
- intel_dig_port->set_infoframes(&encoder->base, false,
+ intel_dig_port->set_infoframes(encoder,
+ false,
old_crtc_state, old_conn_state);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
@@ -1597,6 +1612,8 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
struct drm_atomic_state *state = crtc_state->base.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
int i;
if (HAS_GMCH_DISPLAY(dev_priv))
@@ -1624,7 +1641,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
if (connector_state->crtc != crtc_state->base.crtc)
continue;
- if (crtc_state->ycbcr420) {
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
const struct drm_hdmi_info *hdmi = &info->hdmi;
if (bpc == 12 && !(hdmi->y420_dc_modes &
@@ -1645,7 +1662,14 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display WA #1139: glk */
if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- crtc_state->base.adjusted_mode.htotal > 5460)
+ adjusted_mode->htotal > 5460)
+ return false;
+
+ /* Display Wa_1405510057:icl */
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ bpc == 10 && IS_ICELAKE(dev_priv) &&
+ (adjusted_mode->crtc_hblank_end -
+ adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
return true;
@@ -1669,7 +1693,7 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector,
*clock_12bpc /= 2;
*clock_10bpc /= 2;
*clock_8bpc /= 2;
- config->ycbcr420 = true;
+ config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420;
/* YCBCR 420 output conversion needs a scaler */
if (skl_update_scaler_crtc(config)) {
@@ -1703,6 +1727,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
@@ -1973,7 +1998,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder,
intel_hdmi_prepare(encoder, pipe_config);
- intel_dig_port->set_infoframes(&encoder->base,
+ intel_dig_port->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
}
@@ -1991,7 +2016,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder,
vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
0x2b247878);
- dport->set_infoframes(&encoder->base,
+ dport->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
@@ -2062,7 +2087,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
/* Use 800mV-0dB */
chv_set_phy_signal_level(encoder, 128, 102, false);
- dport->set_infoframes(&encoder->base,
+ dport->set_infoframes(encoder,
pipe_config->has_infoframe,
pipe_config, conn_state);
@@ -2074,13 +2099,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
chv_phy_release_cl2_override(encoder);
}
+static int
+intel_hdmi_connector_register(struct drm_connector *connector)
+{
+ int ret;
+
+ ret = intel_connector_register(connector);
+ if (ret)
+ return ret;
+
+ i915_debugfs_connector_add(connector);
+
+ return ret;
+}
+
static void intel_hdmi_destroy(struct drm_connector *connector)
{
if (intel_attached_hdmi(connector)->cec_notifier)
cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
- kfree(to_intel_connector(connector)->detect_edid);
- drm_connector_cleanup(connector);
- kfree(connector);
+
+ intel_connector_destroy(connector);
}
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -2089,7 +2127,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
+ .late_register = intel_hdmi_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_hdmi_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -2109,11 +2147,16 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
+
+ if (!HAS_GMCH_DISPLAY(dev_priv))
+ drm_connector_attach_max_bpc_property(connector, 8, 12);
}
/*
@@ -2324,9 +2367,18 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
intel_dig_port->set_infoframes = g4x_set_infoframes;
intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
} else if (HAS_DDI(dev_priv)) {
- intel_dig_port->write_infoframe = hsw_write_infoframe;
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframe_enabled = hsw_infoframe_enabled;
+ if (intel_dig_port->lspcon.active) {
+ intel_dig_port->write_infoframe =
+ lspcon_write_infoframe;
+ intel_dig_port->set_infoframes = lspcon_set_infoframes;
+ intel_dig_port->infoframe_enabled =
+ lspcon_infoframe_enabled;
+ } else {
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframe_enabled =
+ hsw_infoframe_enabled;
+ intel_dig_port->write_infoframe = hsw_write_infoframe;
+ }
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
@@ -2485,5 +2537,6 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
intel_infoframe_init(intel_dig_port);
+ intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 9a8018130237..e24174d08fed 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -114,51 +114,68 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
/**
- * intel_hpd_irq_storm_detect - gather stats and detect HPD irq storm on a pin
+ * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
* @dev_priv: private driver data pointer
* @pin: the pin to gather stats on
+ * @long_hpd: whether the HPD IRQ was long or short
*
- * Gather stats about HPD irqs from the specified @pin, and detect irq
+ * Gather stats about HPD IRQs from the specified @pin, and detect IRQ
* storms. Only the pin specific stats and state are changed, the caller is
* responsible for further action.
*
- * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is
* stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
- * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
- * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
+ * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and
+ * short IRQs count as +1. If this threshold is exceeded, it's considered an
+ * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED.
+ *
+ * By default, most systems will only count long IRQs towards
+ * &dev_priv->hotplug.hpd_storm_threshold. However, some older systems also
+ * suffer from short IRQ storms and must also track these. Because short IRQ
+ * storms are naturally caused by sideband interactions with DP MST devices,
+ * short IRQ detection is only enabled for systems without DP MST support.
+ * Systems which are new enough to support DP MST are far less likely to
+ * suffer from IRQ storms at all, so this is fine.
*
* The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
* and should only be adjusted for automated hotplug testing.
*
- * Return true if an irq storm was detected on @pin.
+ * Return true if an IRQ storm was detected on @pin.
*/
static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
- enum hpd_pin pin)
+ enum hpd_pin pin, bool long_hpd)
{
- unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
+ struct i915_hotplug *hpd = &dev_priv->hotplug;
+ unsigned long start = hpd->stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
- const int threshold = dev_priv->hotplug.hpd_storm_threshold;
+ const int increment = long_hpd ? 10 : 1;
+ const int threshold = hpd->hpd_storm_threshold;
bool storm = false;
+ if (!threshold ||
+ (!long_hpd && !dev_priv->hotplug.hpd_short_storm_enabled))
+ return false;
+
if (!time_in_range(jiffies, start, end)) {
- dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
- dev_priv->hotplug.stats[pin].count = 0;
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
- } else if (dev_priv->hotplug.stats[pin].count > threshold &&
- threshold) {
- dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
+ hpd->stats[pin].last_jiffies = jiffies;
+ hpd->stats[pin].count = 0;
+ }
+
+ hpd->stats[pin].count += increment;
+ if (hpd->stats[pin].count > threshold) {
+ hpd->stats[pin].state = HPD_MARK_DISABLED;
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- dev_priv->hotplug.stats[pin].count++;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
- dev_priv->hotplug.stats[pin].count);
+ hpd->stats[pin].count);
}
return storm;
}
-static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
+static void
+intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *intel_connector;
@@ -348,8 +365,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
hpd_event_bits = dev_priv->hotplug.event_bits;
dev_priv->hotplug.event_bits = 0;
- /* Disable hotplug on connectors that hit an irq storm. */
- intel_hpd_irq_storm_disable(dev_priv);
+ /* Enable polling for connectors which had HPD IRQ storms */
+ intel_hpd_irq_storm_switch_to_polling(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -474,15 +491,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
queue_hp = true;
}
- if (!long_hpd)
- continue;
-
- if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
+ if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) {
dev_priv->hotplug.event_bits &= ~BIT(pin);
storm_detected = true;
+ queue_hp = true;
}
}
+ /*
+ * Disable any IRQs that storms were detected on. Polling enablement
+ * happens later in our hotplug work.
+ */
if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 37ef540dd280..bc27b691d824 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -108,13 +108,14 @@ fail:
* This function reads status register to verify if HuC
* firmware was successfully loaded.
*
- * Returns positive value if HuC firmware is loaded and verified
- * and -ENODEV if HuC is not present.
+ * Returns: 1 if HuC firmware is loaded and verified,
+ * 0 if HuC firmware is not loaded and -ENODEV if HuC
+ * is not present on this platform.
*/
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- u32 status;
+ bool status;
if (!HAS_HUC(dev_priv))
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 33d87ab93fdd..802d0394ccc4 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -817,7 +817,7 @@ int intel_setup_gmbus(struct drm_i915_private *dev_priv)
unsigned int pin;
int ret;
- if (INTEL_INFO(dev_priv)->num_pipes == 0)
+ if (!HAS_DISPLAY(dev_priv))
return 0;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 37c94a54efcb..4be167dcd209 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,63 +259,6 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
ce->lrc_desc = desc;
}
-static struct i915_priolist *
-lookup_priolist(struct intel_engine_cs *engine, int prio)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_priolist *p;
- struct rb_node **parent, *rb;
- bool first = true;
-
- if (unlikely(execlists->no_priolist))
- prio = I915_PRIORITY_NORMAL;
-
-find_priolist:
- /* most positive priority is scheduled first, equal priorities fifo */
- rb = NULL;
- parent = &execlists->queue.rb_root.rb_node;
- while (*parent) {
- rb = *parent;
- p = to_priolist(rb);
- if (prio > p->priority) {
- parent = &rb->rb_left;
- } else if (prio < p->priority) {
- parent = &rb->rb_right;
- first = false;
- } else {
- return p;
- }
- }
-
- if (prio == I915_PRIORITY_NORMAL) {
- p = &execlists->default_priolist;
- } else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
- /* Convert an allocation failure to a priority bump */
- if (unlikely(!p)) {
- prio = I915_PRIORITY_NORMAL; /* recurses just once */
-
- /* To maintain ordering with all rendering, after an
- * allocation failure we have to disable all scheduling.
- * Requests will then be executed in fifo, and schedule
- * will ensure that dependencies are emitted in fifo.
- * There will be still some reordering with existing
- * requests, so if userspace lied about their
- * dependencies that reordering may be visible.
- */
- execlists->no_priolist = true;
- goto find_priolist;
- }
- }
-
- p->priority = prio;
- INIT_LIST_HEAD(&p->requests);
- rb_link_node(&p->node, rb, parent);
- rb_insert_color_cached(&p->node, &execlists->queue, first);
-
- return p;
-}
-
static void unwind_wa_tail(struct i915_request *rq)
{
rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
@@ -324,9 +267,9 @@ static void unwind_wa_tail(struct i915_request *rq)
static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
- struct i915_request *rq, *rn;
- struct i915_priolist *uninitialized_var(p);
- int last_prio = I915_PRIORITY_INVALID;
+ struct i915_request *rq, *rn, *active = NULL;
+ struct list_head *uninitialized_var(pl);
+ int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
lockdep_assert_held(&engine->timeline.lock);
@@ -334,19 +277,34 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
&engine->timeline.requests,
link) {
if (i915_request_completed(rq))
- return;
+ break;
__i915_request_unsubmit(rq);
unwind_wa_tail(rq);
+ GEM_BUG_ON(rq->hw_context->active);
+
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
- if (rq_prio(rq) != last_prio) {
- last_prio = rq_prio(rq);
- p = lookup_priolist(engine, last_prio);
+ if (rq_prio(rq) != prio) {
+ prio = rq_prio(rq);
+ pl = i915_sched_lookup_priolist(engine, prio);
}
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+
+ list_add(&rq->sched.link, pl);
- GEM_BUG_ON(p->priority != rq_prio(rq));
- list_add(&rq->sched.link, &p->requests);
+ active = rq;
+ }
+
+ /*
+ * The active request is now effectively the start of a new client
+ * stream, so give it the equivalent small priority bump to prevent
+ * it being gazumped a second time by another peer.
+ */
+ if (!(prio & I915_PRIORITY_NEWCLIENT)) {
+ prio |= I915_PRIORITY_NEWCLIENT;
+ list_move_tail(&active->sched.link,
+ i915_sched_lookup_priolist(engine, prio));
}
}
@@ -355,13 +313,8 @@ execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- unsigned long flags;
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
__unwind_incomplete_requests(engine);
-
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
static inline void
@@ -394,13 +347,17 @@ execlists_user_end(struct intel_engine_execlists *execlists)
static inline void
execlists_context_schedule_in(struct i915_request *rq)
{
+ GEM_BUG_ON(rq->hw_context->active);
+
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(rq->engine);
+ rq->hw_context->active = rq->engine;
}
static inline void
execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
{
+ rq->hw_context->active = NULL;
intel_engine_context_out(rq->engine);
execlists_context_status_change(rq, status);
trace_i915_request_out(rq);
@@ -417,9 +374,8 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
static u64 execlists_update_context(struct i915_request *rq)
{
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_context *ce = rq->hw_context;
- struct i915_hw_ppgtt *ppgtt =
- rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -430,7 +386,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
+ if (!i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state);
/*
@@ -442,8 +398,13 @@ static u64 execlists_update_context(struct i915_request *rq)
* may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing.
+ *
+ * Furthermore, Braswell, at least, wants a full mb to be sure that
+ * the writes are coherent in memory (visible to the GPU) prior to
+ * execution, and not just visible to other CPUs (as is the result of
+ * wmb).
*/
- wmb();
+ mb();
return ce->lrc_desc;
}
@@ -681,8 +642,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+ priolist_for_each_request_consume(rq, rn, p, i) {
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -701,11 +663,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* combine this request with the last, then we
* are done.
*/
- if (port == last_port) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ if (port == last_port)
goto done;
- }
/*
* If GVT overrides us we only ever submit
@@ -715,11 +674,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request) to the second port.
*/
if (ctx_single_port_submission(last->hw_context) ||
- ctx_single_port_submission(rq->hw_context)) {
- __list_del_many(&p->requests,
- &rq->sched.link);
+ ctx_single_port_submission(rq->hw_context))
goto done;
- }
GEM_BUG_ON(last->hw_context == rq->hw_context);
@@ -730,15 +686,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(port_isset(port));
}
- INIT_LIST_HEAD(&rq->sched.link);
+ list_del_init(&rq->sched.link);
+
__i915_request_submit(rq);
trace_i915_request_in(rq, port_index(port, execlists));
+
last = rq;
submit = true;
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
@@ -815,6 +772,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
+ const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
+
/*
* After a reset, the HW starts writing into CSB entry [0]. We
* therefore have to set our HEAD pointer back one entry so that
@@ -824,8 +783,8 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
* inline comparison of our cached head position against the last HW
* write works even before the first interrupt.
*/
- execlists->csb_head = execlists->csb_write_reset;
- WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
}
static void nop_submission_tasklet(unsigned long data)
@@ -866,27 +825,34 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (!i915_request_completed(rq))
- dma_fence_set_error(&rq->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ continue;
+
+ dma_fence_set_error(&rq->fence, -EIO);
}
/* Flush the queued requests to the timeline list (for retiring). */
while ((rb = rb_first_cached(&execlists->queue))) {
struct i915_priolist *p = to_priolist(rb);
+ int i;
- list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
- INIT_LIST_HEAD(&rq->sched.link);
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
dma_fence_set_error(&rq->fence, -EIO);
__i915_request_submit(rq);
}
rb_erase_cached(&p->node, &execlists->queue);
- INIT_LIST_HEAD(&p->requests);
if (p->priority != I915_PRIORITY_NORMAL)
kmem_cache_free(engine->i915->priorities, p);
}
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
execlists->queue_priority = INT_MIN;
@@ -1088,13 +1054,7 @@ static void queue_request(struct intel_engine_cs *engine,
struct i915_sched_node *node,
int prio)
{
- list_add_tail(&node->link,
- &lookup_priolist(engine, prio)->requests);
-}
-
-static void __update_queue(struct intel_engine_cs *engine, int prio)
-{
- engine->execlists.queue_priority = prio;
+ list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -1113,7 +1073,7 @@ static void __submit_queue_imm(struct intel_engine_cs *engine)
static void submit_queue(struct intel_engine_cs *engine, int prio)
{
if (prio > engine->execlists.queue_priority) {
- __update_queue(engine, prio);
+ engine->execlists.queue_priority = prio;
__submit_queue_imm(engine);
}
}
@@ -1136,139 +1096,6 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static struct i915_request *sched_to_request(struct i915_sched_node *node)
-{
- return container_of(node, struct i915_request, sched);
-}
-
-static struct intel_engine_cs *
-sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
-{
- struct intel_engine_cs *engine = sched_to_request(node)->engine;
-
- GEM_BUG_ON(!locked);
-
- if (engine != locked) {
- spin_unlock(&locked->timeline.lock);
- spin_lock(&engine->timeline.lock);
- }
-
- return engine;
-}
-
-static void execlists_schedule(struct i915_request *request,
- const struct i915_sched_attr *attr)
-{
- struct i915_priolist *uninitialized_var(pl);
- struct intel_engine_cs *engine, *last;
- struct i915_dependency *dep, *p;
- struct i915_dependency stack;
- const int prio = attr->priority;
- LIST_HEAD(dfs);
-
- GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
-
- if (i915_request_completed(request))
- return;
-
- if (prio <= READ_ONCE(request->sched.attr.priority))
- return;
-
- /* Need BKL in order to use the temporary link inside i915_dependency */
- lockdep_assert_held(&request->i915->drm.struct_mutex);
-
- stack.signaler = &request->sched;
- list_add(&stack.dfs_link, &dfs);
-
- /*
- * Recursively bump all dependent priorities to match the new request.
- *
- * A naive approach would be to use recursion:
- * static void update_priorities(struct i915_sched_node *node, prio) {
- * list_for_each_entry(dep, &node->signalers_list, signal_link)
- * update_priorities(dep->signal, prio)
- * queue_request(node);
- * }
- * but that may have unlimited recursion depth and so runs a very
- * real risk of overunning the kernel stack. Instead, we build
- * a flat list of all dependencies starting with the current request.
- * As we walk the list of dependencies, we add all of its dependencies
- * to the end of the list (this may include an already visited
- * request) and continue to walk onwards onto the new dependencies. The
- * end result is a topological list of requests in reverse order, the
- * last element in the list is the request we must execute first.
- */
- list_for_each_entry(dep, &dfs, dfs_link) {
- struct i915_sched_node *node = dep->signaler;
-
- /*
- * Within an engine, there can be no cycle, but we may
- * refer to the same dependency chain multiple times
- * (redundant dependencies are not eliminated) and across
- * engines.
- */
- list_for_each_entry(p, &node->signalers_list, signal_link) {
- GEM_BUG_ON(p == dep); /* no cycles! */
-
- if (i915_sched_node_signaled(p->signaler))
- continue;
-
- GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
- if (prio > READ_ONCE(p->signaler->attr.priority))
- list_move_tail(&p->dfs_link, &dfs);
- }
- }
-
- /*
- * If we didn't need to bump any existing priorities, and we haven't
- * yet submitted this request (i.e. there is no potential race with
- * execlists_submit_request()), we can set our own priority and skip
- * acquiring the engine locks.
- */
- if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
- GEM_BUG_ON(!list_empty(&request->sched.link));
- request->sched.attr = *attr;
- if (stack.dfs_link.next == stack.dfs_link.prev)
- return;
- __list_del_entry(&stack.dfs_link);
- }
-
- last = NULL;
- engine = request->engine;
- spin_lock_irq(&engine->timeline.lock);
-
- /* Fifo and depth-first replacement ensure our deps execute before us */
- list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
- struct i915_sched_node *node = dep->signaler;
-
- INIT_LIST_HEAD(&dep->dfs_link);
-
- engine = sched_lock_engine(node, engine);
-
- if (prio <= node->attr.priority)
- continue;
-
- node->attr.priority = prio;
- if (!list_empty(&node->link)) {
- if (last != engine) {
- pl = lookup_priolist(engine, prio);
- last = engine;
- }
- GEM_BUG_ON(pl->priority != prio);
- list_move_tail(&node->link, &pl->requests);
- }
-
- if (prio > engine->execlists.queue_priority &&
- i915_sw_fence_done(&sched_to_request(node)->submit)) {
- /* defer submission until after all of our updates */
- __update_queue(engine, prio);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- }
- }
-
- spin_unlock_irq(&engine->timeline.lock);
-}
-
static void execlists_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
@@ -1284,6 +1111,28 @@ static void execlists_context_destroy(struct intel_context *ce)
static void execlists_context_unpin(struct intel_context *ce)
{
+ struct intel_engine_cs *engine;
+
+ /*
+ * The tasklet may still be using a pointer to our state, via an
+ * old request. However, since we know we only unpin the context
+ * on retirement of the following request, we know that the last
+ * request referencing us will have had a completion CS interrupt.
+ * If we see that it is still active, it means that the tasklet hasn't
+ * had the chance to run yet; let it run before we teardown the
+ * reference it may use.
+ */
+ engine = READ_ONCE(ce->active);
+ if (unlikely(engine)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ process_csb(engine);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ GEM_BUG_ON(READ_ONCE(ce->active));
+ }
+
i915_gem_context_unpin_hw_id(ce->gem_context);
intel_ring_unpin(ce->ring);
@@ -1387,6 +1236,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
struct intel_context *ce = to_intel_context(ctx, engine);
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
+ GEM_BUG_ON(!ctx->ppgtt);
if (likely(ce->pin_count++))
return ce;
@@ -1443,9 +1293,10 @@ static int execlists_request_alloc(struct i915_request *request)
static u32 *
gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
+ /* NB no one else is allowed to scribble over scratch + 256! */
*batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
*batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1459,7 +1310,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
*batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
*batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
- *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = i915_scratch_offset(engine->i915) + 256;
*batch++ = 0;
return batch;
@@ -1496,7 +1347,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch) +
+ i915_scratch_offset(engine->i915) +
2 * CACHELINE_BYTES);
*batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -1565,18 +1416,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
- /* WaClearSlmSpaceAtContextSwitch:kbl */
- /* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
- batch = gen8_emit_pipe_control(batch,
- PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE,
- i915_ggtt_offset(engine->scratch)
- + 2 * CACHELINE_BYTES);
- }
-
/* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
@@ -1691,7 +1530,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_WARN_ON(engine->id != RCS))
+ if (GEM_DEBUG_WARN_ON(engine->id != RCS))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@@ -1730,8 +1569,8 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
*/
for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
wa_bb[i]->offset = batch_ptr - batch;
- if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
- CACHELINE_BYTES))) {
+ if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
+ CACHELINE_BYTES))) {
ret = -EINVAL;
break;
}
@@ -1793,6 +1632,8 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
+ intel_engine_apply_workarounds(engine);
+
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -1817,7 +1658,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
@@ -1840,7 +1681,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
+ intel_engine_apply_whitelist(engine);
return 0;
}
@@ -1914,7 +1755,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
unsigned long flags;
u32 *regs;
- GEM_TRACE("%s request global=%x, current=%d\n",
+ GEM_TRACE("%s request global=%d, current=%d\n",
engine->name, request ? request->global_seqno : 0,
intel_engine_get_seqno(engine));
@@ -2041,8 +1882,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* it is unsafe in case of lite-restore (because the ctx is
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
- if (rq->gem_context->ppgtt &&
- (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+ if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
!i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq);
@@ -2121,7 +1961,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode)
if (mode & EMIT_INVALIDATE) {
cmd |= MI_INVALIDATE_TLB;
- if (request->engine->id == VCS)
+ if (request->engine->class == VIDEO_DECODE_CLASS)
cmd |= MI_INVALIDATE_BSD;
}
@@ -2139,7 +1979,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
{
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
+ i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
u32 *cs, flags = 0;
int len;
@@ -2253,7 +2093,7 @@ static int gen8_init_rcs_context(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret)
return ret;
@@ -2306,7 +2146,7 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->cancel_requests = execlists_cancel_requests;
- engine->schedule = execlists_schedule;
+ engine->schedule = i915_schedule;
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
@@ -2394,12 +2234,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
logical_ring_default_irqs(engine);
}
-static bool csb_force_mmio(struct drm_i915_private *i915)
-{
- /* Older GVT emulation depends upon intercepting CSB mmio */
- return intel_vgpu_active(i915) && !intel_vgpu_has_hwsp_emulation(i915);
-}
-
static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -2429,24 +2263,12 @@ static int logical_ring_init(struct intel_engine_cs *engine)
upper_32_bits(ce->lrc_desc);
}
- execlists->csb_read =
- i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
- if (csb_force_mmio(i915)) {
- execlists->csb_status = (u32 __force *)
- (i915->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
+ execlists->csb_status =
+ &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
- execlists->csb_write = (u32 __force *)execlists->csb_read;
- execlists->csb_write_reset =
- _MASKED_FIELD(GEN8_CSB_WRITE_PTR_MASK,
- GEN8_CSB_ENTRIES - 1);
- } else {
- execlists->csb_status =
- &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
+ execlists->csb_write =
+ &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write =
- &engine->status_page.page_addr[intel_hws_csb_write_index(i915)];
- execlists->csb_write_reset = GEN8_CSB_ENTRIES - 1;
- }
reset_csb_pointers(execlists);
return 0;
@@ -2476,10 +2298,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
- ret = intel_engine_create_scratch(engine, PAGE_SIZE);
- if (ret)
- goto err_cleanup_common;
-
ret = intel_init_workaround_bb(engine);
if (ret) {
/*
@@ -2491,11 +2309,10 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
ret);
}
- return 0;
+ intel_engine_init_whitelist(engine);
+ intel_engine_init_workarounds(engine);
-err_cleanup_common:
- intel_engine_cleanup_common(engine);
- return ret;
+ return 0;
}
int logical_xcs_ring_init(struct intel_engine_cs *engine)
@@ -2644,7 +2461,6 @@ static void execlists_init_reg_state(u32 *regs,
struct intel_ring *ring)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
u32 base = engine->mmio_base;
bool rcs = engine->class == RENDER_CLASS;
@@ -2716,12 +2532,12 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
- if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
+ if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ppgtt, regs);
+ ASSIGN_CTX_PML4(ctx->ppgtt, regs);
}
if (rcs) {
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 3e085c5f2b81..96a8d9524b0c 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -27,6 +27,22 @@
#include <drm/drm_dp_dual_mode_helper.h>
#include "intel_drv.h"
+/* LSPCON OUI Vendor ID(signatures) */
+#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
+#define LSPCON_VENDOR_MCA_OUI 0x0060AD
+
+/* AUX addresses to write MCA AVI IF */
+#define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0
+#define LSPCON_MCA_AVI_IF_CTRL 0x5DF
+#define LSPCON_MCA_AVI_IF_KICKOFF (1 << 0)
+#define LSPCON_MCA_AVI_IF_HANDLED (1 << 1)
+
+/* AUX addresses to write Parade AVI IF */
+#define LSPCON_PARADE_AVI_IF_WRITE_OFFSET 0x516
+#define LSPCON_PARADE_AVI_IF_CTRL 0x51E
+#define LSPCON_PARADE_AVI_IF_KICKOFF (1 << 7)
+#define LSPCON_PARADE_AVI_IF_DATA_SIZE 32
+
static struct intel_dp *lspcon_to_intel_dp(struct intel_lspcon *lspcon)
{
struct intel_digital_port *dig_port =
@@ -50,6 +66,40 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode)
}
}
+static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
+{
+ struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ struct drm_dp_dpcd_ident *ident;
+ u32 vendor_oui;
+
+ if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
+ DRM_ERROR("Can't read description\n");
+ return false;
+ }
+
+ ident = &dp->desc.ident;
+ vendor_oui = (ident->oui[0] << 16) | (ident->oui[1] << 8) |
+ ident->oui[2];
+
+ switch (vendor_oui) {
+ case LSPCON_VENDOR_MCA_OUI:
+ lspcon->vendor = LSPCON_VENDOR_MCA;
+ DRM_DEBUG_KMS("Vendor: Mega Chips\n");
+ break;
+
+ case LSPCON_VENDOR_PARADE_OUI:
+ lspcon->vendor = LSPCON_VENDOR_PARADE;
+ DRM_DEBUG_KMS("Vendor: Parade Tech\n");
+ break;
+
+ default:
+ DRM_ERROR("Invalid/Unknown vendor OUI\n");
+ return false;
+ }
+
+ return true;
+}
+
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode current_mode;
@@ -130,6 +180,21 @@ static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
return true;
}
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ if (drm_mode_is_420_only(info, adjusted_mode) &&
+ connector->ycbcr_420_allowed) {
+ crtc_state->port_clock /= 2;
+ crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
+ crtc_state->lspcon_downsampling = true;
+ }
+}
+
static bool lspcon_probe(struct intel_lspcon *lspcon)
{
int retry;
@@ -159,7 +224,18 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
/* Yay ... got a LSPCON device */
DRM_DEBUG_KMS("LSPCON detected\n");
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
- lspcon->active = true;
+
+ /*
+ * In the SW state machine, lets Put LSPCON in PCON mode only.
+ * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
+ * 2.0 sinks.
+ */
+ if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
+ if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
+ DRM_ERROR("LSPCON mode change to PCON failed\n");
+ return false;
+ }
+ }
return true;
}
@@ -185,6 +261,255 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
}
+static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
+{
+ u8 avi_if_ctrl;
+ u8 retry;
+ ssize_t ret;
+
+ /* Check if LSPCON FW is ready for data */
+ for (retry = 0; retry < 5; retry++) {
+ if (retry)
+ usleep_range(200, 300);
+
+ ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
+ &avi_if_ctrl, 1);
+ if (ret < 0) {
+ DRM_ERROR("Failed to read AVI IF control\n");
+ return false;
+ }
+
+ if ((avi_if_ctrl & LSPCON_PARADE_AVI_IF_KICKOFF) == 0)
+ return true;
+ }
+
+ DRM_ERROR("Parade FW not ready to accept AVI IF\n");
+ return false;
+}
+
+static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
+ uint8_t *avi_buf)
+{
+ u8 avi_if_ctrl;
+ u8 block_count = 0;
+ u8 *data;
+ uint16_t reg;
+ ssize_t ret;
+
+ while (block_count < 4) {
+ if (!lspcon_parade_fw_ready(aux)) {
+ DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
+ block_count);
+ return false;
+ }
+
+ reg = LSPCON_PARADE_AVI_IF_WRITE_OFFSET;
+ data = avi_buf + block_count * 8;
+ ret = drm_dp_dpcd_write(aux, reg, data, 8);
+ if (ret < 0) {
+ DRM_ERROR("Failed to write AVI IF block %d\n",
+ block_count);
+ return false;
+ }
+
+ /*
+ * Once a block of data is written, we have to inform the FW
+ * about this by writing into avi infoframe control register:
+ * - set the kickoff bit[7] to 1
+ * - write the block no. to bits[1:0]
+ */
+ reg = LSPCON_PARADE_AVI_IF_CTRL;
+ avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
+ ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
+ if (ret < 0) {
+ DRM_ERROR("Failed to update (0x%x), block %d\n",
+ reg, block_count);
+ return false;
+ }
+
+ block_count++;
+ }
+
+ DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
+ const uint8_t *frame,
+ ssize_t len)
+{
+ uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+
+ /*
+ * Parade's frames contains 32 bytes of data, divided
+ * into 4 frames:
+ * Token byte (first byte of first frame, must be non-zero)
+ * HB0 to HB2 from AVI IF (3 bytes header)
+ * PB0 to PB27 from AVI IF (28 bytes data)
+ * So it should look like this
+ * first block: | <token> <HB0-HB2> <DB0-DB3> |
+ * next 3 blocks: |<DB4-DB11>|<DB12-DB19>|<DB20-DB28>|
+ */
+
+ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
+ DRM_ERROR("Invalid length of infoframes\n");
+ return false;
+ }
+
+ memcpy(&avi_if[1], frame, len);
+
+ if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
+ DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
+ const uint8_t *buffer, ssize_t len)
+{
+ int ret;
+ uint32_t val = 0;
+ uint32_t retry;
+ uint16_t reg;
+ const uint8_t *data = buffer;
+
+ reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
+ while (val < len) {
+ /* DPCD write for AVI IF can fail on a slow FW day, so retry */
+ for (retry = 0; retry < 5; retry++) {
+ ret = drm_dp_dpcd_write(aux, reg, (void *)data, 1);
+ if (ret == 1) {
+ break;
+ } else if (retry < 4) {
+ mdelay(50);
+ continue;
+ } else {
+ DRM_ERROR("DPCD write failed at:0x%x\n", reg);
+ return false;
+ }
+ }
+ val++; reg++; data++;
+ }
+
+ val = 0;
+ reg = LSPCON_MCA_AVI_IF_CTRL;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ /* Indicate LSPCON chip about infoframe, clear bit 1 and set bit 0 */
+ val &= ~LSPCON_MCA_AVI_IF_HANDLED;
+ val |= LSPCON_MCA_AVI_IF_KICKOFF;
+
+ ret = drm_dp_dpcd_write(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ val = 0;
+ ret = drm_dp_dpcd_read(aux, reg, &val, 1);
+ if (ret < 0) {
+ DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ return false;
+ }
+
+ if (val == LSPCON_MCA_AVI_IF_HANDLED)
+ DRM_DEBUG_KMS("AVI IF handled by FW\n");
+
+ return true;
+}
+
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *frame, ssize_t len)
+{
+ bool ret;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
+
+ /* LSPCON only needs AVI IF */
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ return;
+
+ if (lspcon->vendor == LSPCON_VENDOR_MCA)
+ ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux,
+ frame, len);
+ else
+ ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux,
+ frame, len);
+
+ if (!ret) {
+ DRM_ERROR("Failed to write AVI infoframes\n");
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
+}
+
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ ssize_t ret;
+ union hdmi_infoframe frame;
+ uint8_t buf[VIDEO_DIP_DATA_SIZE];
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct intel_dp *intel_dp = &dig_port->dp;
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
+ const struct drm_display_mode *mode = &crtc_state->base.adjusted_mode;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+
+ if (!lspcon->active) {
+ DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
+ return;
+ }
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ mode, is_hdmi2_sink);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
+
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ if (crtc_state->lspcon_downsampling)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ } else {
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ }
+
+ drm_hdmi_avi_infoframe_quant_range(&frame.avi, mode,
+ crtc_state->limited_color_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL,
+ false, is_hdmi2_sink);
+
+ ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
+ if (ret < 0) {
+ DRM_ERROR("Failed to pack AVI IF\n");
+ return;
+ }
+
+ dig_port->write_infoframe(encoder, crtc_state, HDMI_INFOFRAME_TYPE_AVI,
+ buf, ret);
+}
+
+bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config)
+{
+ return enc_to_intel_lspcon(&encoder->base)->active;
+}
+
void lspcon_resume(struct intel_lspcon *lspcon)
{
enum drm_lspcon_mode expected_mode;
@@ -216,6 +541,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
struct intel_lspcon *lspcon = &intel_dig_port->lspcon;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_connector *connector = &dp->attached_connector->base;
if (!HAS_LSPCON(dev_priv)) {
DRM_ERROR("LSPCON is not supported on this platform\n");
@@ -230,25 +556,18 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
- /*
- * In the SW state machine, lets Put LSPCON in PCON mode only.
- * In this way, it will work with both HDMI 1.4 sinks as well as HDMI
- * 2.0 sinks.
- */
- if (lspcon->active && lspcon->mode != DRM_LSPCON_MODE_PCON) {
- if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
- DRM_ERROR("LSPCON mode change to PCON failed\n");
- return false;
- }
- }
-
if (!intel_dp_read_dpcd(dp)) {
DRM_ERROR("LSPCON DPCD read failed\n");
return false;
}
- drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd));
+ if (!lspcon_detect_vendor(lspcon)) {
+ DRM_ERROR("LSPCON vendor detection failed\n");
+ return false;
+ }
+ connector->ycbcr_420_allowed = true;
+ lspcon->active = true;
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f9f3b0885ba5..e6c5d985ea0a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -42,10 +42,6 @@
#include <linux/acpi.h>
/* Private structure for the integrated LVDS support */
-struct intel_lvds_connector {
- struct intel_connector base;
-};
-
struct intel_lvds_pps {
/* 100us units */
int t1_t2;
@@ -70,7 +66,7 @@ struct intel_lvds_encoder {
struct intel_lvds_pps init_pps;
u32 init_lvds_val;
- struct intel_lvds_connector *attached_connector;
+ struct intel_connector *attached_connector;
};
static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
@@ -78,11 +74,6 @@ static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct intel_lvds_encoder, base.base);
}
-static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct intel_lvds_connector, base.base);
-}
-
bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t lvds_reg, enum pipe *pipe)
{
@@ -396,7 +387,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
struct intel_lvds_encoder *lvds_encoder =
to_lvds_encoder(&intel_encoder->base);
struct intel_connector *intel_connector =
- &lvds_encoder->attached_connector->base;
+ lvds_encoder->attached_connector;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
unsigned int lvds_bpp;
@@ -418,6 +409,8 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
pipe_config->pipe_bpp = lvds_bpp;
}
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+
/*
* We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -461,15 +454,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
- struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
+ struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
/* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- return drm_add_edid_modes(connector, lvds_connector->base.edid);
+ if (!IS_ERR_OR_NULL(intel_connector->edid))
+ return drm_add_edid_modes(connector, intel_connector->edid);
- mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
+ mode = drm_mode_duplicate(dev, intel_connector->panel.fixed_mode);
if (mode == NULL)
return 0;
@@ -477,27 +470,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
return 1;
}
-/**
- * intel_lvds_destroy - unregister and free LVDS structures
- * @connector: connector to free
- *
- * Unregister the DDC bus for this connector then free the driver private
- * structure.
- */
-static void intel_lvds_destroy(struct drm_connector *connector)
-{
- struct intel_lvds_connector *lvds_connector =
- to_lvds_connector(connector);
-
- if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
- kfree(lvds_connector->base.edid);
-
- intel_panel_fini(&lvds_connector->base.panel);
-
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@@ -511,7 +483,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_lvds_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
@@ -802,8 +774,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
return i915_modparams.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
- if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
- > 112999)
+ if (lvds_encoder->attached_connector->panel.fixed_mode->clock > 112999)
return true;
if (dmi_check_system(intel_dual_link_lvds))
@@ -858,7 +829,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct drm_device *dev = &dev_priv->drm;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
- struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -911,23 +881,16 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!lvds_encoder)
return;
- lvds_connector = kzalloc(sizeof(*lvds_connector), GFP_KERNEL);
- if (!lvds_connector) {
- kfree(lvds_encoder);
- return;
- }
-
- if (intel_connector_init(&lvds_connector->base) < 0) {
- kfree(lvds_connector);
+ intel_connector = intel_connector_alloc();
+ if (!intel_connector) {
kfree(lvds_encoder);
return;
}
- lvds_encoder->attached_connector = lvds_connector;
+ lvds_encoder->attached_connector = intel_connector;
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
- intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -1008,7 +971,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
} else {
edid = ERR_PTR(-ENOENT);
}
- lvds_connector->base.edid = edid;
+ intel_connector->edid = edid;
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -1072,6 +1035,6 @@ failed:
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
- kfree(lvds_connector);
+ intel_connector_free(intel_connector);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index e034b4166d32..b8f106d9ecf8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -773,70 +773,6 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
opregion->acpi->cadl[i] = 0;
}
-void intel_opregion_register(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->header)
- return;
-
- if (opregion->acpi) {
- intel_didl_outputs(dev_priv);
- intel_setup_cadls(dev_priv);
-
- /* Notify BIOS we are ready to handle ACPI video ext notifs.
- * Right now, all the events are handled by the ACPI video module.
- * We don't actually need to do anything with them. */
- opregion->acpi->csts = 0;
- opregion->acpi->drdy = 1;
-
- opregion->acpi_notifier.notifier_call = intel_opregion_video_event;
- register_acpi_notifier(&opregion->acpi_notifier);
- }
-
- if (opregion->asle) {
- opregion->asle->tche = ASLE_TCHE_BLC_EN;
- opregion->asle->ardy = ASLE_ARDY_READY;
- }
-}
-
-void intel_opregion_unregister(struct drm_i915_private *dev_priv)
-{
- struct intel_opregion *opregion = &dev_priv->opregion;
-
- if (!opregion->header)
- return;
-
- if (opregion->asle)
- opregion->asle->ardy = ASLE_ARDY_NOT_READY;
-
- cancel_work_sync(&dev_priv->opregion.asle_work);
-
- if (opregion->acpi) {
- opregion->acpi->drdy = 0;
-
- unregister_acpi_notifier(&opregion->acpi_notifier);
- opregion->acpi_notifier.notifier_call = NULL;
- }
-
- /* just clear all opregion memory pointers now */
- memunmap(opregion->header);
- if (opregion->rvda) {
- memunmap(opregion->rvda);
- opregion->rvda = NULL;
- }
- if (opregion->vbt_firmware) {
- kfree(opregion->vbt_firmware);
- opregion->vbt_firmware = NULL;
- }
- opregion->header = NULL;
- opregion->acpi = NULL;
- opregion->swsci = NULL;
- opregion->asle = NULL;
- opregion->vbt = NULL;
- opregion->lid_state = NULL;
-}
-
static void swsci_setup(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
@@ -1115,3 +1051,97 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
return ret - 1;
}
+
+void intel_opregion_register(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ opregion->acpi_notifier.notifier_call =
+ intel_opregion_video_event;
+ register_acpi_notifier(&opregion->acpi_notifier);
+ }
+
+ intel_opregion_resume(i915);
+}
+
+void intel_opregion_resume(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi) {
+ intel_didl_outputs(i915);
+ intel_setup_cadls(i915);
+
+ /*
+ * Notify BIOS we are ready to handle ACPI video ext notifs.
+ * Right now, all the events are handled by the ACPI video
+ * module. We don't actually need to do anything with them.
+ */
+ opregion->acpi->csts = 0;
+ opregion->acpi->drdy = 1;
+ }
+
+ if (opregion->asle) {
+ opregion->asle->tche = ASLE_TCHE_BLC_EN;
+ opregion->asle->ardy = ASLE_ARDY_READY;
+ }
+
+ intel_opregion_notify_adapter(i915, PCI_D0);
+}
+
+void intel_opregion_suspend(struct drm_i915_private *i915, pci_power_t state)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ if (!opregion->header)
+ return;
+
+ intel_opregion_notify_adapter(i915, state);
+
+ if (opregion->asle)
+ opregion->asle->ardy = ASLE_ARDY_NOT_READY;
+
+ cancel_work_sync(&i915->opregion.asle_work);
+
+ if (opregion->acpi)
+ opregion->acpi->drdy = 0;
+}
+
+void intel_opregion_unregister(struct drm_i915_private *i915)
+{
+ struct intel_opregion *opregion = &i915->opregion;
+
+ intel_opregion_suspend(i915, PCI_D1);
+
+ if (!opregion->header)
+ return;
+
+ if (opregion->acpi_notifier.notifier_call) {
+ unregister_acpi_notifier(&opregion->acpi_notifier);
+ opregion->acpi_notifier.notifier_call = NULL;
+ }
+
+ /* just clear all opregion memory pointers now */
+ memunmap(opregion->header);
+ if (opregion->rvda) {
+ memunmap(opregion->rvda);
+ opregion->rvda = NULL;
+ }
+ if (opregion->vbt_firmware) {
+ kfree(opregion->vbt_firmware);
+ opregion->vbt_firmware = NULL;
+ }
+ opregion->header = NULL;
+ opregion->acpi = NULL;
+ opregion->swsci = NULL;
+ opregion->asle = NULL;
+ opregion->vbt = NULL;
+ opregion->lid_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/intel_opregion.h b/drivers/gpu/drm/i915/intel_opregion.h
index e8498a8cda3d..4aa68ffbd30e 100644
--- a/drivers/gpu/drm/i915/intel_opregion.h
+++ b/drivers/gpu/drm/i915/intel_opregion.h
@@ -57,8 +57,14 @@ struct intel_opregion {
#ifdef CONFIG_ACPI
int intel_opregion_setup(struct drm_i915_private *dev_priv);
+
void intel_opregion_register(struct drm_i915_private *dev_priv);
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
+
+void intel_opregion_resume(struct drm_i915_private *dev_priv);
+void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state);
+
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
bool enable);
@@ -81,6 +87,15 @@ static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
{
}
+static inline void intel_opregion_resume(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_opregion_suspend(struct drm_i915_private *dev_priv,
+ pci_power_t state)
+{
+}
+
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
{
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 72eb7e48e8bc..20ea7c99d13a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1338,7 +1338,7 @@ err_put_bo:
return err;
}
-void intel_setup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_setup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
int ret;
@@ -1387,7 +1387,7 @@ out_free:
kfree(overlay);
}
-void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
+void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4a9f139e7b73..e6cd7b55c018 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -111,7 +111,7 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
/* Native modes don't need fitting */
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h &&
- !pipe_config->ycbcr420)
+ pipe_config->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
goto done;
switch (fitting_mode) {
@@ -505,7 +505,7 @@ static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
static u32 vlv_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = intel_get_pipe_from_connector(connector);
+ enum pipe pipe = intel_connector_get_pipe(connector);
return _vlv_get_backlight(dev_priv, pipe);
}
@@ -763,7 +763,7 @@ static void pwm_disable_backlight(const struct drm_connector_state *old_conn_sta
struct intel_panel *panel = &connector->panel;
/* Disable the backlight */
- pwm_config(panel->backlight.pwm, 0, CRC_PMIC_PWM_PERIOD_NS);
+ intel_panel_actually_set_backlight(old_conn_state, 0);
usleep_range(2000, 3000);
pwm_disable(panel->backlight.pwm);
}
@@ -1814,11 +1814,8 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
return 0;
}
-void intel_panel_destroy_backlight(struct drm_connector *connector)
+static void intel_panel_destroy_backlight(struct intel_panel *panel)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_panel *panel = &intel_connector->panel;
-
/* dispose of the pwm */
if (panel->backlight.pwm)
pwm_put(panel->backlight.pwm);
@@ -1923,6 +1920,8 @@ void intel_panel_fini(struct intel_panel *panel)
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
+ intel_panel_destroy_backlight(panel);
+
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3fe358db1276..a26b4eddda25 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3198,7 +3198,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* and after the vblank.
*/
*a = newstate->wm.ilk.optimal;
- if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base))
+ if (!newstate->base.active || drm_atomic_crtc_needs_modeset(&newstate->base) ||
+ intel_state->skip_intermediate_wm)
return 0;
a->pipe_enabled |= b->pipe_enabled;
@@ -3650,15 +3651,8 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
static bool
intel_has_sagv(struct drm_i915_private *dev_priv)
{
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
- IS_CANNONLAKE(dev_priv))
- return true;
-
- if (IS_SKYLAKE(dev_priv) &&
- dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED)
- return true;
-
- return false;
+ return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) &&
+ dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED;
}
/*
@@ -3822,7 +3816,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
+ const u64 total_data_rate,
const int num_active,
struct skl_ddb_allocation *ddb)
{
@@ -3836,12 +3830,12 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
return ddb_size - 4; /* 4 blocks for bypass path allocation */
adjusted_mode = &cstate->base.adjusted_mode;
- total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode);
+ total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
* 12GB/s is maximum BW supported by single DBuf slice.
*/
- if (total_data_bw >= GBps(12) || num_active > 1) {
+ if (num_active > 1 || total_data_bw >= GBps(12)) {
ddb->enabled_slices = 2;
} else {
ddb->enabled_slices = 1;
@@ -3852,16 +3846,15 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
}
static void
-skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
+skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *cstate,
- const unsigned int total_data_rate,
+ const u64 total_data_rate,
struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
struct drm_atomic_state *state = cstate->base.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
const struct drm_crtc_state *crtc_state;
const struct drm_crtc *crtc;
@@ -3958,73 +3951,68 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
- struct skl_ddb_allocation *ddb /* out */)
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- u32 val, val2 = 0;
- int fourcc, pixel_format;
+ u32 val, val2;
+ u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
- if (!(val & PLANE_CTL_ENABLE))
- return;
-
- pixel_format = val & PLANE_CTL_FORMAT_MASK;
- fourcc = skl_format_to_fourcc(pixel_format,
- val & PLANE_CTL_ORDER_RGBX,
- val & PLANE_CTL_ALPHA_MASK);
+ if (val & PLANE_CTL_ENABLE)
+ fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
+ val & PLANE_CTL_ORDER_RGBX,
+ val & PLANE_CTL_ALPHA_MASK);
- val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
- /*
- * FIXME: add proper NV12 support for ICL. Avoid reading unclaimed
- * registers for now.
- */
- if (INTEL_GEN(dev_priv) < 11)
+ if (INTEL_GEN(dev_priv) >= 11) {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ } else {
+ val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12) {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val2);
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->uv_plane[pipe][plane_id], val);
- } else {
- skl_ddb_entry_init_from_hw(dev_priv,
- &ddb->plane[pipe][plane_id], val);
+ if (fourcc == DRM_FORMAT_NV12)
+ swap(val, val2);
+
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
+ skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv)
{
- struct intel_crtc *crtc;
-
- memset(ddb, 0, sizeof(*ddb));
-
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- enum intel_display_power_domain power_domain;
- enum plane_id plane_id;
- enum pipe pipe = crtc->pipe;
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return;
- power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
- continue;
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ skl_ddb_get_hw_plane_state(dev_priv, pipe,
+ plane_id,
+ &ddb_y[plane_id],
+ &ddb_uv[plane_id]);
- for_each_plane_id_on_crtc(crtc, plane_id)
- skl_ddb_get_hw_plane_state(dev_priv, pipe,
- plane_id, ddb);
+ intel_display_power_put(dev_priv, power_domain);
+}
- intel_display_power_put(dev_priv, power_domain);
- }
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */)
+{
+ ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
@@ -4177,23 +4165,24 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
return 0;
}
-static unsigned int
+static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
- const struct drm_plane_state *pstate,
+ const struct intel_plane_state *intel_pstate,
const int plane)
{
- struct intel_plane *intel_plane = to_intel_plane(pstate->plane);
- struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
+ struct intel_plane *intel_plane =
+ to_intel_plane(intel_pstate->base.plane);
uint32_t data_rate;
uint32_t width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
+ u64 rate;
if (!intel_pstate->base.visible)
return 0;
- fb = pstate->fb;
+ fb = intel_pstate->base.fb;
format = fb->format->format;
if (intel_plane->id == PLANE_CURSOR)
@@ -4215,28 +4204,26 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
height /= 2;
}
- data_rate = width * height * fb->format->cpp[plane];
+ data_rate = width * height;
down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
- return mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+ rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount);
+
+ rate *= fb->format->cpp[plane];
+ return rate;
}
-/*
- * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
- * a 8192x4096@32bpp framebuffer:
- * 3 * 4096 * 8192 * 4 < 2^32
- */
-static unsigned int
+static u64
skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
- unsigned int *plane_data_rate,
- unsigned int *uv_plane_data_rate)
+ u64 *plane_data_rate,
+ u64 *uv_plane_data_rate)
{
struct drm_crtc_state *cstate = &intel_cstate->base;
struct drm_atomic_state *state = cstate->state;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
- unsigned int total_data_rate = 0;
+ u64 total_data_rate = 0;
if (WARN_ON(!state))
return 0;
@@ -4244,26 +4231,81 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
/* Calculate and cache data rate for each plane */
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
enum plane_id plane_id = to_intel_plane(plane)->id;
- unsigned int rate;
+ u64 rate;
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
/* packed/y */
rate = skl_plane_relative_data_rate(intel_cstate,
- pstate, 0);
+ intel_pstate, 0);
plane_data_rate[plane_id] = rate;
-
total_data_rate += rate;
/* uv-plane */
rate = skl_plane_relative_data_rate(intel_cstate,
- pstate, 1);
+ intel_pstate, 1);
uv_plane_data_rate[plane_id] = rate;
-
total_data_rate += rate;
}
return total_data_rate;
}
+static u64
+icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
+ u64 *plane_data_rate)
+{
+ struct drm_crtc_state *cstate = &intel_cstate->base;
+ struct drm_atomic_state *state = cstate->state;
+ struct drm_plane *plane;
+ const struct drm_plane_state *pstate;
+ u64 total_data_rate = 0;
+
+ if (WARN_ON(!state))
+ return 0;
+
+ /* Calculate and cache data rate for each plane */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, cstate) {
+ const struct intel_plane_state *intel_pstate =
+ to_intel_plane_state(pstate);
+ enum plane_id plane_id = to_intel_plane(plane)->id;
+ u64 rate;
+
+ if (!intel_pstate->linked_plane) {
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 0);
+ plane_data_rate[plane_id] = rate;
+ total_data_rate += rate;
+ } else {
+ enum plane_id y_plane_id;
+
+ /*
+ * The slave plane might not iterate in
+ * drm_atomic_crtc_state_for_each_plane_state(),
+ * and needs the master plane state which may be
+ * NULL if we try get_new_plane_state(), so we
+ * always calculate from the master.
+ */
+ if (intel_pstate->slave)
+ continue;
+
+ /* Y plane rate is calculated on the slave */
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 0);
+ y_plane_id = intel_pstate->linked_plane->id;
+ plane_data_rate[y_plane_id] = rate;
+ total_data_rate += rate;
+
+ rate = skl_plane_relative_data_rate(intel_cstate,
+ intel_pstate, 1);
+ plane_data_rate[plane_id] = rate;
+ total_data_rate += rate;
+ }
+ }
+
+ return total_data_rate;
+}
+
static uint16_t
skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
{
@@ -4336,15 +4378,25 @@ skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
enum plane_id plane_id = to_intel_plane(plane)->id;
+ struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
if (plane_id == PLANE_CURSOR)
continue;
- if (!pstate->visible)
+ /* slave plane must be invisible and calculated from master */
+ if (!pstate->visible || WARN_ON(plane_state->slave))
continue;
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ if (!plane_state->linked_plane) {
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
+ uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ } else {
+ enum plane_id y_plane_id =
+ plane_state->linked_plane->id;
+
+ minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
+ minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
+ }
}
minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
@@ -4356,23 +4408,22 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_crtc *crtc = cstate->base.crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
uint16_t uv_minimum[I915_MAX_PLANES] = {};
- unsigned int total_data_rate;
+ u64 total_data_rate;
enum plane_id plane_id;
int num_active;
- unsigned int plane_data_rate[I915_MAX_PLANES] = {};
- unsigned int uv_plane_data_rate[I915_MAX_PLANES] = {};
+ u64 plane_data_rate[I915_MAX_PLANES] = {};
+ u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
- memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
- memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
+ memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
+ memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
@@ -4382,11 +4433,18 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
}
- total_data_rate = skl_get_total_relative_data_rate(cstate,
- plane_data_rate,
- uv_plane_data_rate);
- skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb,
- alloc, &num_active);
+ if (INTEL_GEN(dev_priv) < 11)
+ total_data_rate =
+ skl_get_total_relative_data_rate(cstate,
+ plane_data_rate,
+ uv_plane_data_rate);
+ else
+ total_data_rate =
+ icl_get_total_relative_data_rate(cstate,
+ plane_data_rate);
+
+ skl_ddb_get_pipe_allocation_limits(dev_priv, cstate, total_data_rate,
+ ddb, alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4412,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
- ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
@@ -4426,7 +4484,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- unsigned int data_rate, uv_data_rate;
+ u64 data_rate, uv_data_rate;
uint16_t plane_blocks, uv_plane_blocks;
if (plane_id == PLANE_CURSOR)
@@ -4440,13 +4498,12 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* result is < available as data_rate / total_data_rate < 1
*/
plane_blocks = minimum[plane_id];
- plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
- total_data_rate);
+ plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
/* Leave disabled planes at (0,0) */
if (data_rate) {
- ddb->plane[pipe][plane_id].start = start;
- ddb->plane[pipe][plane_id].end = start + plane_blocks;
+ cstate->wm.skl.plane_ddb_y[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
@@ -4455,12 +4512,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uv_data_rate = uv_plane_data_rate[plane_id];
uv_plane_blocks = uv_minimum[plane_id];
- uv_plane_blocks += div_u64((uint64_t)alloc_size * uv_data_rate,
- total_data_rate);
+ uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+
+ /* Gen11+ uses a separate plane for UV watermarks */
+ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
- ddb->uv_plane[pipe][plane_id].start = start;
- ddb->uv_plane[pipe][plane_id].end =
+ cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
+ cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
@@ -4514,7 +4573,7 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
}
static uint_fixed_16_16_t
-intel_get_linetime_us(struct intel_crtc_state *cstate)
+intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
uint32_t pixel_rate;
uint32_t crtc_htotal;
@@ -4557,12 +4616,12 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
+skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int plane_id)
+ struct skl_wm_params *wp, int color_plane)
{
struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
uint32_t interm_pbpl;
@@ -4570,11 +4629,8 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- if (!intel_wm_plane_visible(cstate, intel_pstate))
- return 0;
-
/* only NV12 format has two planes */
- if (plane_id == 1 && fb->format->format != DRM_FORMAT_NV12) {
+ if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
DRM_DEBUG_KMS("Non NV12 format have single plane\n");
return -EINVAL;
}
@@ -4599,10 +4655,10 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
}
- if (plane_id == 1 && wp->is_planar)
+ if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[plane_id];
+ wp->cpp = fb->format->cpp[color_plane];
wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
intel_pstate);
@@ -4664,8 +4720,7 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv,
return 0;
}
-static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
- struct intel_crtc_state *cstate,
+static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
uint16_t ddb_allocation,
int level,
@@ -4673,6 +4728,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
uint32_t latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
@@ -4683,11 +4740,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
uint32_t min_disp_buf_needed;
- if (latency == 0 ||
- !intel_wm_plane_visible(cstate, intel_pstate)) {
- result->plane_en = false;
- return 0;
- }
+ if (latency == 0)
+ return level == 0 ? -EINVAL : 0;
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4710,15 +4764,24 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
} else {
if ((wp->cpp * cstate->base.adjusted_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
- (wp->plane_bytes_per_line / wp->dbuf_block_size < 1))
+ (wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
- else if (ddb_allocation >=
- fixed16_to_u32_round_up(wp->plane_blocks_per_line))
- selected_result = min_fixed16(method1, method2);
- else if (latency >= wp->linetime_us)
- selected_result = min_fixed16(method1, method2);
- else
+ } else if (ddb_allocation >=
+ fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv))
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else if (latency >= wp->linetime_us) {
+ if (IS_GEN9(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv))
+ selected_result = min_fixed16(method1, method2);
+ else
+ selected_result = method2;
+ } else {
selected_result = method1;
+ }
}
res_blocks = fixed16_to_u32_round_up(selected_result) + 1;
@@ -4775,8 +4838,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if ((level > 0 && res_lines > 31) ||
res_blocks >= ddb_allocation ||
min_disp_buf_needed >= ddb_allocation) {
- result->plane_en = false;
-
/*
* If there are no valid level 0 watermarks, then we can't
* support this display configuration.
@@ -4794,17 +4855,6 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
}
- /*
- * Display WA #826 (SKL:ALL, BXT:ALL) & #1059 (CNL:A)
- * disable wm level 1-7 on NV12 planes
- */
- if (wp->is_planar && level >= 1 &&
- (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))) {
- result->plane_en = false;
- return 0;
- }
-
/* The number of lines are ignored for the level 0 watermark. */
result->plane_res_b = res_blocks;
result->plane_res_l = res_lines;
@@ -4814,43 +4864,22 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
}
static int
-skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb,
- struct intel_crtc_state *cstate,
+skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
+ uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
- struct skl_plane_wm *wm,
- int plane_id)
+ struct skl_wm_level *levels)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
- struct drm_plane *plane = intel_pstate->base.plane;
- struct intel_plane *intel_plane = to_intel_plane(plane);
- uint16_t ddb_blocks;
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv =
+ to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum plane_id intel_plane_id = intel_plane->id;
+ struct skl_wm_level *result_prev = &levels[0];
int ret;
- if (WARN_ON(!intel_pstate->base.fb))
- return -EINVAL;
-
- ddb_blocks = plane_id ?
- skl_ddb_entry_size(&ddb->uv_plane[pipe][intel_plane_id]) :
- skl_ddb_entry_size(&ddb->plane[pipe][intel_plane_id]);
-
for (level = 0; level <= max_level; level++) {
- struct skl_wm_level *result = plane_id ? &wm->uv_wm[level] :
- &wm->wm[level];
- struct skl_wm_level *result_prev;
+ struct skl_wm_level *result = &levels[level];
- if (level)
- result_prev = plane_id ? &wm->uv_wm[level - 1] :
- &wm->wm[level - 1];
- else
- result_prev = plane_id ? &wm->uv_wm[0] : &wm->wm[0];
-
- ret = skl_compute_plane_wm(dev_priv,
- cstate,
+ ret = skl_compute_plane_wm(cstate,
intel_pstate,
ddb_blocks,
level,
@@ -4859,16 +4888,15 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv,
result);
if (ret)
return ret;
- }
- if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12)
- wm->is_planar = true;
+ result_prev = result;
+ }
return 0;
}
static uint32_t
-skl_compute_linetime_wm(struct intel_crtc_state *cstate)
+skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -4890,42 +4918,50 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
return linetime_wm;
}
-static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
- struct skl_wm_params *wp,
- struct skl_wm_level *wm_l0,
- uint16_t ddb_allocation,
- struct skl_wm_level *trans_wm /* out */)
+static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
+ const struct skl_wm_params *wp,
+ struct skl_plane_wm *wm,
+ uint16_t ddb_allocation)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
uint16_t trans_min, trans_y_tile_min;
const uint16_t trans_amount = 10; /* This is configurable amount */
- uint16_t trans_offset_b, res_blocks;
-
- if (!cstate->base.active)
- goto exit;
+ uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
- goto exit;
+ return;
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
- goto exit;
+ return;
- trans_min = 0;
- if (INTEL_GEN(dev_priv) >= 10)
+ trans_min = 14;
+ if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
trans_offset_b = trans_min + trans_amount;
+ /*
+ * The spec asks for Selected Result Blocks for wm0 (the real value),
+ * not Result Blocks (the integer value). Pay attention to the capital
+ * letters. The value wm_l0->plane_res_b is actually Result Blocks, but
+ * since Result Blocks is the ceiling of Selected Result Blocks plus 1,
+ * and since we later will have to get the ceiling of the sum in the
+ * transition watermarks calculation, we can just pretend Selected
+ * Result Blocks is Result Blocks minus 1 and it should work for the
+ * current platforms.
+ */
+ wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
+
if (wp->y_tiled) {
trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
wp->y_tile_minimum);
- res_blocks = max(wm_l0->plane_res_b, trans_y_tile_min) +
+ res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
- res_blocks = wm_l0->plane_res_b + trans_offset_b;
+ res_blocks = wm0_sel_res_b + trans_offset_b;
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
@@ -4936,25 +4972,132 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
res_blocks += 1;
if (res_blocks < ddb_allocation) {
- trans_wm->plane_res_b = res_blocks;
- trans_wm->plane_en = true;
- return;
+ wm->trans_wm.plane_res_b = res_blocks;
+ wm->trans_wm.plane_en = true;
+ }
+}
+
+static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id, int color_plane)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, color_plane);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->wm);
+ if (ret)
+ return ret;
+
+ skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+
+ return 0;
+}
+
+static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ enum plane_id plane_id)
+{
+ struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
+ u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
+ struct skl_wm_params wm_params;
+ int ret;
+
+ wm->is_planar = true;
+
+ /* uv plane watermarks must also be validated for NV12/Planar */
+ ret = skl_compute_plane_wm_params(crtc_state, plane_state,
+ &wm_params, 1);
+ if (ret)
+ return ret;
+
+ ret = skl_compute_wm_levels(crtc_state, plane_state,
+ ddb_blocks, &wm_params, wm->uv_wm);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id plane_id = plane->id;
+ int ret;
+
+ if (!intel_wm_plane_visible(crtc_state, plane_state))
+ return 0;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+
+ if (fb->format->is_yuv && fb->format->num_planes > 1) {
+ ret = skl_build_plane_wm_uv(crtc_state, plane_state,
+ plane_id);
+ if (ret)
+ return ret;
}
-exit:
- trans_wm->plane_en = false;
+ return 0;
+}
+
+static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
+ int ret;
+
+ /* Watermarks calculated in master */
+ if (plane_state->slave)
+ return 0;
+
+ if (plane_state->linked_plane) {
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ enum plane_id y_plane_id = plane_state->linked_plane->id;
+
+ WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state));
+ WARN_ON(!fb->format->is_yuv ||
+ fb->format->num_planes == 1);
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ y_plane_id, 0);
+ if (ret)
+ return ret;
+
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 1);
+ if (ret)
+ return ret;
+ } else if (intel_wm_plane_visible(crtc_state, plane_state)) {
+ ret = skl_build_plane_wm_single(crtc_state, plane_state,
+ plane_id, 0);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
- struct drm_device *dev = cstate->base.crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
struct drm_crtc_state *crtc_state = &cstate->base;
- const struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_plane *plane;
const struct drm_plane_state *pstate;
- struct skl_plane_wm *wm;
int ret;
/*
@@ -4966,44 +5109,15 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
const struct intel_plane_state *intel_pstate =
to_intel_plane_state(pstate);
- enum plane_id plane_id = to_intel_plane(plane)->id;
- struct skl_wm_params wm_params;
- enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe;
- uint16_t ddb_blocks;
- wm = &pipe_wm->planes[plane_id];
- ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
-
- ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate, &wm_params, 0);
- if (ret)
- return ret;
-
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params, wm, 0);
+ if (INTEL_GEN(dev_priv) >= 11)
+ ret = icl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
+ else
+ ret = skl_build_plane_wm(pipe_wm,
+ cstate, intel_pstate);
if (ret)
return ret;
-
- skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0],
- ddb_blocks, &wm->trans_wm);
-
- /* uv plane watermarks must also be validated for NV12/Planar */
- if (wm_params.is_planar) {
- memset(&wm_params, 0, sizeof(struct skl_wm_params));
- wm->is_planar = true;
-
- ret = skl_compute_plane_wm_params(dev_priv, cstate,
- intel_pstate,
- &wm_params, 1);
- if (ret)
- return ret;
-
- ret = skl_compute_wm_levels(dev_priv, ddb, cstate,
- intel_pstate, &wm_params,
- wm, 1);
- if (ret)
- return ret;
- }
}
pipe_wm->linetime = skl_compute_linetime_wm(cstate);
@@ -5016,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
+ I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE(reg, 0);
+ I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5033,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
- I915_WRITE(reg, val);
+ I915_WRITE_FW(reg, val);
}
-static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb,
- enum plane_id plane_id)
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb_y =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ const struct skl_ddb_entry *ddb_uv =
+ &crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
@@ -5054,35 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- /* FIXME: add proper NV12 support for ICL. */
- if (INTEL_GEN(dev_priv) >= 11)
- return skl_ddb_entry_write(dev_priv,
- PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- if (wm->is_planar) {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->uv_plane[pipe][plane_id]);
+ if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
- PLANE_NV12_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- } else {
- skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
- &ddb->plane[pipe][plane_id]);
- I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ return;
}
+
+ if (wm->is_planar)
+ swap(ddb_y, ddb_uv);
+
+ skl_ddb_entry_write(dev_priv,
+ PLANE_BUF_CFG(pipe, plane_id), ddb_y);
+ skl_ddb_entry_write(dev_priv,
+ PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
-static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
- const struct skl_plane_wm *wm,
- const struct skl_ddb_allocation *ddb)
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
- enum pipe pipe = intel_crtc->pipe;
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct skl_plane_wm *wm =
+ &crtc_state->wm.skl.optimal.planes[plane_id];
+ const struct skl_ddb_entry *ddb =
+ &crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
@@ -5090,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
- skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
- &ddb->plane[pipe][PLANE_CURSOR]);
+ skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
- if (l1->plane_en != l2->plane_en)
- return false;
+ return l1->plane_en == l2->plane_en &&
+ l1->plane_res_l == l2->plane_res_l &&
+ l1->plane_res_b == l2->plane_res_b;
+}
- /* If both planes aren't enabled, the rest shouldn't matter */
- if (!l1->plane_en)
- return true;
+static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
+ const struct skl_plane_wm *wm1,
+ const struct skl_plane_wm *wm2)
+{
+ int level, max_level = ilk_wm_max_level(dev_priv);
- return (l1->plane_res_l == l2->plane_res_l &&
- l1->plane_res_b == l2->plane_res_b);
+ for (level = 0; level <= max_level; level++) {
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
+ !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ return false;
+ }
+
+ return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
@@ -5114,16 +5236,15 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
return a->start < b->end && b->start < a->end;
}
-bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
- const struct skl_ddb_entry **entries,
- const struct skl_ddb_entry *ddb,
- int ignore)
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry entries[],
+ int num_entries, int ignore_idx)
{
- enum pipe pipe;
+ int i;
- for_each_pipe(dev_priv, pipe) {
- if (pipe != ignore && entries[pipe] &&
- skl_ddb_entries_overlap(ddb, entries[pipe]))
+ for (i = 0; i < num_entries; i++) {
+ if (i != ignore_idx &&
+ skl_ddb_entries_overlap(ddb, &entries[i]))
return true;
}
@@ -5133,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(struct drm_i915_private *dev_priv,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
- struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
+ ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
@@ -5165,32 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
-skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
+skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *new_crtc_state)
{
- struct drm_atomic_state *state = cstate->base.state;
- struct drm_device *dev = state->dev;
- struct drm_crtc *crtc = cstate->base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
- struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_plane_state *plane_state;
- struct drm_plane *plane;
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane;
- drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
- if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
- &new_ddb->plane[pipe][plane_id]) &&
- skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
- &new_ddb->uv_plane[pipe][plane_id]))
+ if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
+ skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
+ &new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
- plane_state = drm_atomic_get_plane_state(state, plane);
+ plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
@@ -5202,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
- ret = skl_allocate_pipe_ddb(cstate, ddb);
+ for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
- ret = skl_ddb_add_affected_planes(cstate);
+ ret = skl_ddb_add_affected_planes(old_crtc_state,
+ new_crtc_state);
if (ret)
return ret;
}
@@ -5222,38 +5342,31 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
-skl_print_wm_changes(const struct drm_atomic_state *state)
+skl_print_wm_changes(struct intel_atomic_state *state)
{
- const struct drm_device *dev = state->dev;
- const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- const struct intel_plane *intel_plane;
- const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
- const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
int i;
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
-
- for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
- enum plane_id plane_id = intel_plane->id;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
- old = &old_ddb->plane[pipe][plane_id];
- new = &new_ddb->plane[pipe][plane_id];
+ old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
+ new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
- intel_plane->base.base.id,
- intel_plane->base.name,
- old->start, old->end,
- new->start, new->end);
+ DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end,
+ new->start, new->end);
}
}
}
@@ -5348,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
return 0;
}
+/*
+ * To make sure the cursor watermark registers are always consistent
+ * with our computed state the following scenario needs special
+ * treatment:
+ *
+ * 1. enable cursor
+ * 2. move cursor entirely offscreen
+ * 3. disable cursor
+ *
+ * Step 2. does call .disable_plane() but does not zero the watermarks
+ * (since we consider an offscreen cursor still active for the purposes
+ * of watermarks). Step 3. would not normally call .disable_plane()
+ * because the actual plane visibility isn't changing, and we don't
+ * deallocate the cursor ddb until the pipe gets disabled. So we must
+ * force step 3. to call .disable_plane() to update the watermark
+ * registers properly.
+ *
+ * Other planes do not suffer from this issues as their watermarks are
+ * calculated based on the actual plane visibility. The only time this
+ * can trigger for the other planes is during the initial readout as the
+ * default value of the watermarks registers is not zero.
+ */
+static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+ enum plane_id plane_id = plane->id;
+
+ /*
+ * Force a full wm update for every plane on modeset.
+ * Required because the reset value of the wm registers
+ * is non-zero, whereas we want all disabled planes to
+ * have zero watermarks. So if we turn off the relevant
+ * power well the hardware state will go out of sync
+ * with the software state.
+ */
+ if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
+ skl_plane_wm_equals(dev_priv,
+ &old_crtc_state->wm.skl.optimal.planes[plane_id],
+ &new_crtc_state->wm.skl.optimal.planes[plane_id]))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane_id);
+ }
+
+ return 0;
+}
+
static int
skl_compute_wm(struct drm_atomic_state *state)
{
@@ -5387,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
- &results->ddb, &changed);
+ ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ if (ret)
+ return ret;
+
+ ret = skl_wm_add_affected_planes(intel_state,
+ to_intel_crtc(crtc));
if (ret)
return ret;
@@ -5402,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
- skl_print_wm_changes(state);
+ skl_print_wm_changes(intel_state);
return 0;
}
@@ -5413,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
- const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id != PLANE_CURSOR)
- skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
- ddb, plane_id);
- else
- skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
- ddb);
- }
}
static void skl_initial_wm(struct intel_atomic_state *state,
@@ -5439,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
- struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
- enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
@@ -5450,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
- memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
- sizeof(hw_vals->ddb.uv_plane[pipe]));
- memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
- sizeof(hw_vals->ddb.plane[pipe]));
-
mutex_unlock(&dev_priv->wm.wm_mutex);
}
@@ -5605,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
- } else {
- /*
- * Easy/common case; just sanitize DDB now if everything off
- * Keep dbuf slice info intact
- */
- memset(ddb->plane, 0, sizeof(ddb->plane));
- memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
@@ -6155,14 +6307,8 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
- /* Display WA #0477 WaDisableIPC: skl */
- if (IS_SKYLAKE(dev_priv))
- dev_priv->ipc_enabled = false;
-
- /* Display WA #1141: SKL:all KBL:all CFL */
- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
- !dev_priv->dram_info.symmetric_memory)
- dev_priv->ipc_enabled = false;
+ if (!HAS_IPC(dev_priv))
+ return;
val = I915_READ(DISP_ARB_CTL2);
@@ -6176,11 +6322,15 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv)
void intel_init_ipc(struct drm_i915_private *dev_priv)
{
- dev_priv->ipc_enabled = false;
if (!HAS_IPC(dev_priv))
return;
- dev_priv->ipc_enabled = true;
+ /* Display WA #1141: SKL:all KBL:all CFL */
+ if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ dev_priv->ipc_enabled = dev_priv->dram_info.symmetric_memory;
+ else
+ dev_priv->ipc_enabled = true;
+
intel_enable_ipc(dev_priv);
}
@@ -8774,6 +8924,10 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
/* This is not an Wa. Enable to reduce Sampler power */
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
+
+ /* WaEnable32PlaneMode:icl */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
+ _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9351,8 +9505,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
- intel_fbc_init(dev_priv);
-
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
i915_pineview_get_mem_freq(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index b6838b525502..419e56342523 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -71,6 +71,14 @@ static bool psr_global_enabled(u32 debug)
static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
+ /* Disable PSR2 by default for all platforms */
+ if (i915_modparams.enable_psr == -1)
+ return false;
+
+ /* Cannot enable DSC and PSR2 simultaneously */
+ WARN_ON(crtc_state->dsc_params.compression_enable &&
+ crtc_state->has_psr2);
+
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
@@ -79,25 +87,42 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
}
}
+static int edp_psr_shift(enum transcoder cpu_transcoder)
+{
+ switch (cpu_transcoder) {
+ case TRANSCODER_A:
+ return EDP_PSR_TRANSCODER_A_SHIFT;
+ case TRANSCODER_B:
+ return EDP_PSR_TRANSCODER_B_SHIFT;
+ case TRANSCODER_C:
+ return EDP_PSR_TRANSCODER_C_SHIFT;
+ default:
+ MISSING_CASE(cpu_transcoder);
+ /* fallthrough */
+ case TRANSCODER_EDP:
+ return EDP_PSR_TRANSCODER_EDP_SHIFT;
+ }
+}
+
void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug)
{
u32 debug_mask, mask;
+ enum transcoder cpu_transcoder;
+ u32 transcoders = BIT(TRANSCODER_EDP);
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ transcoders |= BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C);
+
+ debug_mask = 0;
+ mask = 0;
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ int shift = edp_psr_shift(cpu_transcoder);
- mask = EDP_PSR_ERROR(TRANSCODER_EDP);
- debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
-
- if (INTEL_GEN(dev_priv) >= 8) {
- mask |= EDP_PSR_ERROR(TRANSCODER_A) |
- EDP_PSR_ERROR(TRANSCODER_B) |
- EDP_PSR_ERROR(TRANSCODER_C);
-
- debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
- EDP_PSR_POST_EXIT(TRANSCODER_B) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
- EDP_PSR_POST_EXIT(TRANSCODER_C) |
- EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+ mask |= EDP_PSR_ERROR(shift);
+ debug_mask |= EDP_PSR_POST_EXIT(shift) |
+ EDP_PSR_PRE_ENTRY(shift);
}
if (debug & I915_PSR_DEBUG_IRQ)
@@ -148,6 +173,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
u32 transcoders = BIT(TRANSCODER_EDP);
enum transcoder cpu_transcoder;
ktime_t time_ns = ktime_get();
+ u32 mask = 0;
if (INTEL_GEN(dev_priv) >= 8)
transcoders |= BIT(TRANSCODER_A) |
@@ -155,18 +181,32 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
BIT(TRANSCODER_C);
for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
- /* FIXME: Exit PSR and link train manually when this happens. */
- if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
- DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
- transcoder_name(cpu_transcoder));
+ int shift = edp_psr_shift(cpu_transcoder);
+
+ if (psr_iir & EDP_PSR_ERROR(shift)) {
+ DRM_WARN("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ dev_priv->psr.irq_aux_error = true;
- if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+ /*
+ * If this interruption is not masked it will keep
+ * interrupting so fast that it prevents the scheduled
+ * work to run.
+ * Also after a PSR error, we don't want to arm PSR
+ * again so we don't care about unmask the interruption
+ * or unset irq_aux_error.
+ */
+ mask |= EDP_PSR_ERROR(shift);
+ }
+
+ if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
- if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+ if (psr_iir & EDP_PSR_POST_EXIT(shift)) {
dev_priv->psr.last_exit = time_ns;
DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
@@ -180,6 +220,13 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
}
}
}
+
+ if (mask) {
+ mask |= I915_READ(EDP_PSR_IMR);
+ I915_WRITE(EDP_PSR_IMR, mask);
+
+ schedule_work(&dev_priv->psr.work);
+ }
}
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
@@ -294,7 +341,8 @@ static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
psr_vsc.sdp_header.HB3 = 0x8;
}
- intel_dig_port->write_infoframe(&intel_dig_port->base.base, crtc_state,
+ intel_dig_port->write_infoframe(&intel_dig_port->base,
+ crtc_state,
DP_SDP_VSC, &psr_vsc, sizeof(psr_vsc));
}
@@ -458,6 +506,16 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!dev_priv->psr.sink_psr2_support)
return false;
+ /*
+ * DSC and PSR2 cannot be enabled simultaneously. If a requested
+ * resolution requires DSC to be enabled, priority is given to DSC
+ * over PSR2.
+ */
+ if (crtc_state->dsc_params.compression_enable) {
+ DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ return false;
+ }
+
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
@@ -503,10 +561,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
- S3D_ENABLE) {
- DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ if (dev_priv->psr.sink_not_reliable) {
+ DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
return;
}
@@ -553,11 +609,31 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
dev_priv->psr.active = true;
}
+static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
+{
+ static const i915_reg_t regs[] = {
+ [TRANSCODER_A] = CHICKEN_TRANS_A,
+ [TRANSCODER_B] = CHICKEN_TRANS_B,
+ [TRANSCODER_C] = CHICKEN_TRANS_C,
+ [TRANSCODER_EDP] = CHICKEN_TRANS_EDP,
+ };
+
+ WARN_ON(INTEL_GEN(dev_priv) < 9);
+
+ if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) ||
+ !regs[cpu_transcoder].reg))
+ cpu_transcoder = TRANSCODER_A;
+
+ return regs[cpu_transcoder];
+}
+
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 mask;
/* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
* use hardcoded values PSR AUX transactions
@@ -566,37 +642,34 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
hsw_psr_setup_aux(intel_dp);
if (dev_priv->psr.psr2_enabled) {
- u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+ i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
+ cpu_transcoder);
+ u32 chicken = I915_READ(reg);
- if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
| PSR2_ADD_VERTICAL_LINE_COUNT);
else
chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
- I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
-
- I915_WRITE(EDP_PSR_DEBUG,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
- } else {
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP
- * and HPD. also mask LPSP to avoid dependency on other
- * drivers that might block runtime_pm besides
- * preventing other hw tracking issues now we can rely
- * on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP);
+ I915_WRITE(reg, chicken);
}
+
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
+ * mask LPSP to avoid dependency on other drivers that might block
+ * runtime_pm besides preventing other hw tracking issues now we
+ * can rely on frontbuffer tracking.
+ */
+ mask = EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
+
+ I915_WRITE(EDP_PSR_DEBUG, mask);
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -646,6 +719,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.prepared = true;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
if (psr_global_enabled(dev_priv->psr.debug))
intel_psr_enable_locked(dev_priv, crtc_state);
@@ -656,49 +730,34 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void
-intel_psr_disable_source(struct intel_dp *intel_dp)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->psr.active) {
- i915_reg_t psr_status;
- u32 psr_status_mask;
-
- if (dev_priv->psr.psr2_enabled) {
- psr_status = EDP_PSR2_STATUS;
- psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
-
- I915_WRITE(EDP_PSR2_CTL,
- I915_READ(EDP_PSR2_CTL) &
- ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
-
- } else {
- psr_status = EDP_PSR_STATUS;
- psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
-
- I915_WRITE(EDP_PSR_CTL,
- I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
- }
+ u32 val;
- /* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv,
- psr_status, psr_status_mask, 0,
- 2000))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
+ if (!dev_priv->psr.active) {
+ if (INTEL_GEN(dev_priv) >= 9)
+ WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ return;
+ }
- dev_priv->psr.active = false;
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- if (dev_priv->psr.psr2_enabled)
- WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
+ dev_priv->psr.active = false;
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ i915_reg_t psr_status;
+ u32 psr_status_mask;
lockdep_assert_held(&dev_priv->psr.lock);
@@ -707,7 +766,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Disabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
- intel_psr_disable_source(intel_dp);
+
+ intel_psr_exit(dev_priv);
+
+ if (dev_priv->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS;
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ psr_status = EDP_PSR_STATUS;
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+ }
+
+ /* Wait till PSR is idle */
+ if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
+ 2000))
+ DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -893,6 +966,16 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return ret;
}
+static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+{
+ struct i915_psr *psr = &dev_priv->psr;
+
+ intel_psr_disable_locked(psr->dp);
+ psr->sink_not_reliable = true;
+ /* let's make sure that sink is awaken */
+ drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+}
+
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
@@ -903,6 +986,9 @@ static void intel_psr_work(struct work_struct *work)
if (!dev_priv->psr.enabled)
goto unlock;
+ if (READ_ONCE(dev_priv->psr.irq_aux_error))
+ intel_psr_handle_irq(dev_priv);
+
/*
* We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
@@ -925,25 +1011,6 @@ unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- if (!dev_priv->psr.active)
- return;
-
- if (dev_priv->psr.psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
- dev_priv->psr.active = false;
-}
-
/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
@@ -960,9 +1027,6 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -975,10 +1039,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
if (frontbuffer_bits)
@@ -1003,9 +1064,6 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
- struct drm_crtc *crtc;
- enum pipe pipe;
-
if (!CAN_PSR(dev_priv))
return;
@@ -1018,28 +1076,21 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
return;
}
- crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
if (frontbuffer_bits) {
- if (dev_priv->psr.psr2_enabled) {
- intel_psr_exit(dev_priv);
- } else {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(pipe), 0);
- }
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
}
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
@@ -1056,6 +1107,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ u32 val;
+
if (!HAS_PSR(dev_priv))
return;
@@ -1065,11 +1118,24 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.sink_support)
return;
- if (i915_modparams.enable_psr == -1) {
- i915_modparams.enable_psr = dev_priv->vbt.psr.enable;
+ if (i915_modparams.enable_psr == -1)
+ if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+ i915_modparams.enable_psr = 0;
- /* Per platform default: all disabled. */
- i915_modparams.enable_psr = 0;
+ /*
+ * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
+ * will still keep the error set even after the reset done in the
+ * irq_preinstall and irq_uninstall hooks.
+ * And enabling in this situation cause the screen to freeze in the
+ * first time that PSR HW tries to activate so lets keep PSR disabled
+ * to avoid any rendering problems.
+ */
+ val = I915_READ(EDP_PSR_IIR);
+ val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP));
+ if (val) {
+ DRM_DEBUG_KMS("PSR interruption error set\n");
+ dev_priv->psr.sink_not_reliable = true;
+ return;
}
/* Set link_standby x link_off defaults */
@@ -1109,6 +1175,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
}
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
@@ -1126,12 +1193,27 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (val & ~errors)
DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
val & ~errors);
- if (val & errors)
+ if (val & errors) {
intel_psr_disable_locked(intel_dp);
+ psr->sink_not_reliable = true;
+ }
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
-
- /* TODO: handle PSR2 errors */
exit:
mutex_unlock(&psr->lock);
}
+
+bool intel_psr_enabled(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ bool ret;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return false;
+
+ mutex_lock(&dev_priv->psr.lock);
+ ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
+ mutex_unlock(&dev_priv->psr.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_quirks.c b/drivers/gpu/drm/i915/intel_quirks.c
new file mode 100644
index 000000000000..ec2b0fc92b8b
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_quirks.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/dmi.h>
+
+#include "intel_drv.h"
+
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
+ DRM_INFO("applying lvds SSC disable quirk\n");
+}
+
+/*
+ * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
+ * brightness value
+ */
+static void quirk_invert_brightness(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
+ DRM_INFO("applying inverted panel brightness quirk\n");
+}
+
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
+ DRM_INFO("applying backlight present quirk\n");
+}
+
+/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
+ * which is 300 ms greater than eDP spec T12 min.
+ */
+static void quirk_increase_t12_delay(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INCREASE_T12_DELAY;
+ DRM_INFO("Applying T12 delay quirk\n");
+}
+
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
+{
+ i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
+struct intel_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ void (*hook)(struct drm_i915_private *i915);
+};
+
+/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
+struct intel_dmi_quirk {
+ void (*hook)(struct drm_i915_private *i915);
+ const struct dmi_system_id (*dmi_id_list)[];
+};
+
+static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
+{
+ DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
+ return 1;
+}
+
+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
+ {
+ .dmi_id_list = &(const struct dmi_system_id[]) {
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "NCR Corporation",
+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
+ },
+ },
+ { } /* terminating entry */
+ },
+ .hook = quirk_invert_brightness,
+ },
+};
+
+static struct intel_quirk intel_quirks[] = {
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
+
+ /* Sony Vaio Y cannot use SSC on LVDS */
+ { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
+
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
+
+ /* Acer Aspire 5336 */
+ { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+ /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+ { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Acer C720 Chromebook (Core i3 4005U) */
+ { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
+
+ /* Apple Macbook 2,1 (Core 2 T7400) */
+ { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
+
+ /* Apple Macbook 4,1 */
+ { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
+ /* Toshiba CB35 Chromebook (Celeron 2955U) */
+ { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+ /* HP Chromebook 14 (Celeron 2955U) */
+ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
+
+ /* Dell Chromebook 11 */
+ { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Dell Chromebook 11 (2015 version) */
+ { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
+
+ /* Toshiba Satellite P50-C-18C */
+ { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+};
+
+void intel_init_quirks(struct drm_i915_private *i915)
+{
+ struct pci_dev *d = i915->drm.pdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
+ struct intel_quirk *q = &intel_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID))
+ q->hook(i915);
+ }
+ for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
+ if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
+ intel_dmi_quirks[i].hook(i915);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 187bb0ceb4ac..fbeaec3994e7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -69,19 +69,28 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct i915_request *rq, u32 mode)
{
+ unsigned int num_store_dw;
u32 cmd, *cs;
cmd = MI_FLUSH;
-
+ num_store_dw = 0;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
+ if (mode & EMIT_FLUSH)
+ num_store_dw = 4;
- cs = intel_ring_begin(rq, 2);
+ cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = cmd;
- *cs++ = MI_NOOP;
+ while (num_store_dw--) {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ *cs++ = i915_scratch_offset(rq->i915);
+ *cs++ = 0;
+ }
+ *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
+
intel_ring_advance(rq, cs);
return 0;
@@ -150,8 +159,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*/
if (mode & EMIT_INVALIDATE) {
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
@@ -159,8 +167,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
*cs++ = MI_FLUSH;
*cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
- *cs++ = i915_ggtt_offset(rq->engine->scratch) |
- PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
*cs++ = 0;
*cs++ = 0;
}
@@ -212,8 +219,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
cs = intel_ring_begin(rq, 6);
@@ -246,8 +252,7 @@ intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
static int
gen6_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
int ret;
@@ -316,8 +321,7 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
static int
gen7_render_ring_flush(struct i915_request *rq, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
+ u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs, flags = 0;
/*
@@ -529,6 +533,13 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
+ if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
+ I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
+ I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
+ if (HAS_VEBOX(dev_priv))
+ I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
+ }
+
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -546,10 +557,11 @@ static int init_ring_common(struct intel_engine_cs *engine)
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
-
intel_ring_update_space(ring);
+
+ /* First wake the ring up to an empty/idle ring */
I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->tail);
+ I915_WRITE_TAIL(engine, ring->head);
(void)I915_READ_TAIL(engine);
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
@@ -574,6 +586,12 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2)
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ /* Now awake, let it get started */
+ if (ring->tail != ring->head) {
+ I915_WRITE_TAIL(engine, ring->tail);
+ (void)I915_READ_TAIL(engine);
+ }
+
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_wakeup(engine);
out:
@@ -608,7 +626,9 @@ static void skip_request(struct i915_request *rq)
static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
{
- GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
+ GEM_TRACE("%s request global=%d, current=%d\n",
+ engine->name, rq ? rq->global_seqno : 0,
+ intel_engine_get_seqno(engine));
/*
* Try to restore the logical GPU state to match the continuation
@@ -640,7 +660,7 @@ static int intel_rcs_ctx_init(struct i915_request *rq)
{
int ret;
- ret = intel_ctx_workarounds_emit(rq);
+ ret = intel_engine_emit_ctx_wa(rq);
if (ret != 0)
return ret;
@@ -658,8 +678,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
if (ret)
return ret;
- intel_whitelist_workarounds_apply(engine);
-
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
if (IS_GEN(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
@@ -741,9 +759,18 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (!i915_request_completed(request))
- dma_fence_set_error(&request->fence, -EIO);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags))
+ continue;
+
+ dma_fence_set_error(&request->fence, -EIO);
}
+
+ intel_write_status_page(engine,
+ I915_GEM_HWS_INDEX,
+ intel_engine_last_submit(engine));
+
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -971,7 +998,7 @@ i965_emit_bb_start(struct i915_request *rq,
}
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
-#define I830_BATCH_LIMIT (256*1024)
+#define I830_BATCH_LIMIT SZ_256K
#define I830_TLB_ENTRIES (2)
#define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
static int
@@ -979,7 +1006,9 @@ i830_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
+ u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+
+ GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1055,8 +1084,7 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map =
- HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
+ enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
@@ -1437,7 +1465,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
struct i915_timeline *timeline;
struct intel_ring *ring;
- unsigned int size;
int err;
intel_engine_setup_common(engine);
@@ -1462,21 +1489,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
- size = PAGE_SIZE;
- if (HAS_BROKEN_CS_TLB(engine->i915))
- size = I830_WA_SIZE;
- err = intel_engine_create_scratch(engine, size);
- if (err)
- goto err_unpin;
-
err = intel_engine_init_common(engine);
if (err)
- goto err_scratch;
+ goto err_unpin;
return 0;
-err_scratch:
- intel_engine_cleanup_scratch(engine);
err_unpin:
intel_ring_unpin(ring);
err_ring:
@@ -1550,7 +1568,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
@@ -1659,7 +1677,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* Insert a delay before the next switch! */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
}
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..72edaa7ff411 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
@@ -15,6 +15,7 @@
#include "i915_selftest.h"
#include "i915_timeline.h"
#include "intel_gpu_commands.h"
+#include "intel_workarounds.h"
struct drm_printer;
struct i915_sched_attr;
@@ -93,11 +94,11 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define I915_MAX_SUBSLICES 8
#define instdone_slice_mask(dev_priv__) \
- (INTEL_GEN(dev_priv__) == 7 ? \
+ (IS_GEN7(dev_priv__) ? \
1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
- (INTEL_GEN(dev_priv__) == 7 ? \
+ (IS_GEN7(dev_priv__) ? \
1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
@@ -190,11 +191,22 @@ enum intel_engine_id {
};
struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
struct rb_node node;
- struct list_head requests;
+ unsigned long used;
int priority;
};
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx - 1], \
+ sched.link)
+
struct st_preempt_hang {
struct completion completion;
bool inject_hang;
@@ -302,13 +314,6 @@ struct intel_engine_execlists {
struct rb_root_cached queue;
/**
- * @csb_read: control register for Context Switch buffer
- *
- * Note this register is always in mmio.
- */
- u32 __iomem *csb_read;
-
- /**
* @csb_write: control register for Context Switch buffer
*
* Note this register may be either mmio or HWSP shadow.
@@ -328,15 +333,6 @@ struct intel_engine_execlists {
u32 preempt_complete_status;
/**
- * @csb_write_reset: reset value for CSB write pointer
- *
- * As the CSB write pointer maybe either in HWSP or as a field
- * inside an mmio register, we want to reprogram it slightly
- * differently to avoid later confusion.
- */
- u32 csb_write_reset;
-
- /**
* @csb_head: context status buffer head
*/
u8 csb_head;
@@ -440,7 +436,9 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- struct i915_vma *scratch;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
u32 irq_keep_mask; /* always keep these interrupts */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
@@ -487,11 +485,10 @@ struct intel_engine_cs {
*/
void (*submit_request)(struct i915_request *rq);
- /* Call when the priority on a request has changed and it and its
+ /*
+ * Call when the priority on a request has changed and it and its
* dependencies may need rescheduling. Note the request itself may
* not be ready to run!
- *
- * Called under the struct_mutex.
*/
void (*schedule)(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -898,10 +895,6 @@ void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
-int intel_engine_create_scratch(struct intel_engine_cs *engine,
- unsigned int size);
-void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
-
int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 44e4491a4918..4350a5270423 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -76,6 +76,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "TRANSCODER_C";
case POWER_DOMAIN_TRANSCODER_EDP:
return "TRANSCODER_EDP";
+ case POWER_DOMAIN_TRANSCODER_EDP_VDSC:
+ return "TRANSCODER_EDP_VDSC";
case POWER_DOMAIN_TRANSCODER_DSI_A:
return "TRANSCODER_DSI_A";
case POWER_DOMAIN_TRANSCODER_DSI_C:
@@ -208,7 +210,7 @@ bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
is_enabled = true;
- for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain)) {
if (power_well->desc->always_on)
continue;
@@ -436,6 +438,15 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
I915_WRITE(ICL_PORT_CL_DW12(port), val | ICL_LANE_ENABLE_AUX);
hsw_wait_for_power_well_enable(dev_priv, power_well);
+
+ /* Display WA #1178: icl */
+ if (IS_ICELAKE(dev_priv) &&
+ pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
+ !intel_bios_is_port_edp(dev_priv, port)) {
+ val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
+ I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ }
}
static void
@@ -456,6 +467,25 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
+#define ICL_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+
+static void
+icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ u32 val;
+
+ val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val &= ~DP_AUX_CH_CTL_TBT_IO;
+ if (power_well->desc->hsw.is_tc_tbt)
+ val |= DP_AUX_CH_CTL_TBT_IO;
+ I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+
+ hsw_power_well_enable(dev_priv, power_well);
+}
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -465,11 +495,25 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
+ enum i915_power_well_id id = power_well->desc->id;
int pw_idx = power_well->desc->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx) |
HSW_PWR_WELL_CTL_STATE(pw_idx);
+ u32 val;
+
+ val = I915_READ(regs->driver);
- return (I915_READ(regs->driver) & mask) == mask;
+ /*
+ * On GEN9 big core due to a DMC bug the driver's request bits for PW1
+ * and the MISC_IO PW will be not restored, so check instead for the
+ * BIOS's own request bits, which are forced-on for these power wells
+ * when exiting DC5/6.
+ */
+ if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+ (id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
+ val |= I915_READ(regs->bios);
+
+ return (val & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
@@ -551,7 +595,9 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
u32 mask;
mask = DC_STATE_EN_UPTO_DC5;
- if (IS_GEN9_LP(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9;
+ else if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_EN_DC9;
else
mask |= DC_STATE_EN_UPTO_DC6;
@@ -624,8 +670,13 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
assert_can_enable_dc9(dev_priv);
DRM_DEBUG_KMS("Enabling DC9\n");
-
- intel_power_sequencer_reset(dev_priv);
+ /*
+ * Power sequencer reset is not needed on
+ * platforms with South Display Engine on PCH,
+ * because PPS registers are always on.
+ */
+ if (!HAS_PCH_SPLIT(dev_priv))
+ intel_power_sequencer_reset(dev_priv);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
@@ -707,7 +758,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
assert_csr_loaded(dev_priv);
}
-static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
@@ -808,6 +859,14 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
if (IS_GEN9_LP(dev_priv))
bxt_verify_ddi_phy_power_wells(dev_priv);
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ /*
+ * DMC retains HW context only for port A, the other combo
+ * PHY's HW context for port B is lost after DC transitions,
+ * so we need to restore it manually.
+ */
+ icl_combo_phys_init(dev_priv);
}
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1608,7 +1667,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
- for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
+ for_each_power_domain_well_reverse(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@@ -1971,9 +2030,9 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
*/
#define ICL_PW_2_POWER_DOMAINS ( \
ICL_PW_3_POWER_DOMAINS | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_EDP_VDSC) | \
BIT_ULL(POWER_DOMAIN_INIT))
/*
- * - eDP/DSI VDSC
* - KVMR (HW control)
*/
#define ICL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
@@ -2041,7 +2100,7 @@ static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
static const struct i915_power_well_desc i9xx_always_on_power_well[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2058,7 +2117,7 @@ static const struct i915_power_well_ops i830_pipes_power_well_ops = {
static const struct i915_power_well_desc i830_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2102,7 +2161,7 @@ static const struct i915_power_well_regs hsw_power_well_regs = {
static const struct i915_power_well_desc hsw_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2123,7 +2182,7 @@ static const struct i915_power_well_desc hsw_power_wells[] = {
static const struct i915_power_well_desc bdw_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2166,7 +2225,7 @@ static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
static const struct i915_power_well_desc vlv_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2242,7 +2301,7 @@ static const struct i915_power_well_desc vlv_power_wells[] = {
static const struct i915_power_well_desc chv_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2293,7 +2352,7 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2301,6 +2360,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2313,6 +2373,7 @@ static const struct i915_power_well_desc skl_power_wells[] = {
{
.name = "MISC IO power well",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_MISC_IO,
@@ -2385,13 +2446,15 @@ static const struct i915_power_well_desc skl_power_wells[] = {
static const struct i915_power_well_desc bxt_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
},
{
.name = "power well 1",
+ /* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2443,7 +2506,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = {
static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2451,6 +2514,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2571,7 +2635,7 @@ static const struct i915_power_well_desc glk_power_wells[] = {
static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2579,6 +2643,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2716,6 +2781,13 @@ static const struct i915_power_well_ops icl_combo_phy_aux_power_well_ops = {
.is_enabled = hsw_power_well_enabled,
};
+static const struct i915_power_well_ops icl_tc_phy_aux_power_well_ops = {
+ .sync_hw = hsw_power_well_sync_hw,
+ .enable = icl_tc_phy_aux_power_well_enable,
+ .disable = hsw_power_well_disable,
+ .is_enabled = hsw_power_well_enabled,
+};
+
static const struct i915_power_well_regs icl_aux_power_well_regs = {
.bios = ICL_PWR_WELL_CTL_AUX1,
.driver = ICL_PWR_WELL_CTL_AUX2,
@@ -2731,7 +2803,7 @@ static const struct i915_power_well_regs icl_ddi_power_well_regs = {
static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "always-on",
- .always_on = 1,
+ .always_on = true,
.domains = POWER_DOMAIN_MASK,
.ops = &i9xx_always_on_power_well_ops,
.id = DISP_PW_ID_NONE,
@@ -2739,6 +2811,7 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "power well 1",
/* Handled by the DMC firmware */
+ .always_on = true,
.domains = 0,
.ops = &hsw_power_well_ops,
.id = SKL_DISP_PW_1,
@@ -2861,81 +2934,89 @@ static const struct i915_power_well_desc icl_power_wells[] = {
{
.name = "AUX C",
.domains = ICL_AUX_C_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX D",
.domains = ICL_AUX_D_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX E",
.domains = ICL_AUX_E_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_E,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX F",
.domains = ICL_AUX_F_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_F,
+ .hsw.is_tc_tbt = false,
},
},
{
.name = "AUX TBT1",
.domains = ICL_AUX_TBT1_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT1,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT2",
.domains = ICL_AUX_TBT2_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT2,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT3",
.domains = ICL_AUX_TBT3_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT3,
+ .hsw.is_tc_tbt = true,
},
},
{
.name = "AUX TBT4",
.domains = ICL_AUX_TBT4_IO_POWER_DOMAINS,
- .ops = &hsw_power_well_ops,
+ .ops = &icl_tc_phy_aux_power_well_ops,
.id = DISP_PW_ID_NONE,
{
.hsw.regs = &icl_aux_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_AUX_TBT4,
+ .hsw.is_tc_tbt = true,
},
},
{
@@ -2969,17 +3050,20 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_GEN9_BC(dev_priv) || INTEL_INFO(dev_priv)->gen >= 10) {
+ if (INTEL_GEN(dev_priv) >= 11) {
max_dc = 2;
- mask = 0;
- } else if (IS_GEN9_LP(dev_priv)) {
- max_dc = 1;
/*
* DC9 has a separate HW flow from the rest of the DC states,
* not depending on the DMC firmware. It's needed by system
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
+ } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+ max_dc = 2;
+ mask = 0;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ max_dc = 1;
+ mask = DC_STATE_EN_DC9;
} else {
max_dc = 0;
mask = 0;
@@ -3075,12 +3159,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (IS_ICELAKE(dev_priv)) {
err = set_power_wells(power_domains, icl_power_wells);
- } else if (IS_HASWELL(dev_priv)) {
- err = set_power_wells(power_domains, hsw_power_wells);
- } else if (IS_BROADWELL(dev_priv)) {
- err = set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_GEN9_BC(dev_priv)) {
- err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3092,13 +3170,18 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
*/
if (!IS_CNL_WITH_PORT_F(dev_priv))
power_domains->power_well_count -= 2;
-
- } else if (IS_BROXTON(dev_priv)) {
- err = set_power_wells(power_domains, bxt_power_wells);
} else if (IS_GEMINILAKE(dev_priv)) {
err = set_power_wells(power_domains, glk_power_wells);
+ } else if (IS_BROXTON(dev_priv)) {
+ err = set_power_wells(power_domains, bxt_power_wells);
+ } else if (IS_GEN9_BC(dev_priv)) {
+ err = set_power_wells(power_domains, skl_power_wells);
} else if (IS_CHERRYVIEW(dev_priv)) {
err = set_power_wells(power_domains, chv_power_wells);
+ } else if (IS_BROADWELL(dev_priv)) {
+ err = set_power_wells(power_domains, bdw_power_wells);
+ } else if (IS_HASWELL(dev_priv)) {
+ err = set_power_wells(power_domains, hsw_power_wells);
} else if (IS_VALLEYVIEW(dev_priv)) {
err = set_power_wells(power_domains, vlv_power_wells);
} else if (IS_I830(dev_priv)) {
@@ -3238,18 +3321,40 @@ static void icl_mbus_init(struct drm_i915_private *dev_priv)
I915_WRITE(MBUS_ABOX_CTL, val);
}
+static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
+ bool enable)
+{
+ i915_reg_t reg;
+ u32 reset_bits, val;
+
+ if (IS_IVYBRIDGE(dev_priv)) {
+ reg = GEN7_MSG_CTL;
+ reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
+ } else {
+ reg = HSW_NDE_RSTWRN_OPT;
+ reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
+ }
+
+ val = I915_READ(reg);
+
+ if (enable)
+ val |= reset_bits;
+ else
+ val &= ~reset_bits;
+
+ I915_WRITE(reg, val);
+}
+
static void skl_display_core_init(struct drm_i915_private *dev_priv,
bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* enable PCH reset handshake */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE);
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/* enable PG1 and Misc I/O */
mutex_lock(&power_domains->lock);
@@ -3305,7 +3410,6 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- uint32_t val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3315,9 +3419,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
* Move the handshake programming to initialization sequence.
* Previously was left up to BIOS.
*/
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val &= ~RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
+ intel_pch_reset_handshake(dev_priv, false);
/* Enable PG1 */
mutex_lock(&power_domains->lock);
@@ -3363,101 +3465,18 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
}
-enum {
- PROCMON_0_85V_DOT_0,
- PROCMON_0_95V_DOT_0,
- PROCMON_0_95V_DOT_1,
- PROCMON_1_05V_DOT_0,
- PROCMON_1_05V_DOT_1,
-};
-
-static const struct cnl_procmon {
- u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
- [PROCMON_0_85V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
- [PROCMON_0_95V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x86E172C7, .dw10 = 0x77CA5EAB, },
- [PROCMON_0_95V_DOT_1] =
- { .dw1 = 0x00000000, .dw9 = 0x93F87FE1, .dw10 = 0x8AE871C5, },
- [PROCMON_1_05V_DOT_0] =
- { .dw1 = 0x00000000, .dw9 = 0x98FA82DD, .dw10 = 0x89E46DC1, },
- [PROCMON_1_05V_DOT_1] =
- { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
-};
-
-/*
- * CNL has just one set of registers, while ICL has two sets: one for port A and
- * the other for port B. The CNL registers are equivalent to the ICL port A
- * registers, that's why we call the ICL macros even though the function has CNL
- * on its name.
- */
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
- enum port port)
-{
- const struct cnl_procmon *procmon;
- u32 val;
-
- val = I915_READ(ICL_PORT_COMP_DW3(port));
- switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
- default:
- MISSING_CASE(val);
- /* fall through */
- case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
- break;
- case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
- break;
- case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
- procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
- break;
- case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
- procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
- break;
- case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
- procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
- break;
- }
-
- val = I915_READ(ICL_PORT_COMP_DW1(port));
- val &= ~((0xff << 16) | 0xff);
- val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(port), val);
-
- I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10);
-}
-
static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH Reset Handshake */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val |= RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
-
- /* 2. Enable Comp */
- val = I915_READ(CHICKEN_MISC_2);
- val &= ~CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
-
- /* Dummy PORT_A to get the correct CNL register from the ICL macro */
- cnl_set_procmon_ref_values(dev_priv, PORT_A);
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
- val = I915_READ(CNL_PORT_COMP_DW0);
- val |= COMP_INIT;
- I915_WRITE(CNL_PORT_COMP_DW0, val);
-
- /* 3. */
- val = I915_READ(CNL_PORT_CL1CM_DW5);
- val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ /* 2-3. */
+ cnl_combo_phys_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3482,7 +3501,6 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3506,44 +3524,23 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
usleep_range(10, 30); /* 10 us delay per Bspec */
- /* 5. Disable Comp */
- val = I915_READ(CHICKEN_MISC_2);
- val |= CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ /* 5. */
+ cnl_combo_phys_uninit(dev_priv);
}
-static void icl_display_core_init(struct drm_i915_private *dev_priv,
- bool resume)
+void icl_display_core_init(struct drm_i915_private *dev_priv,
+ bool resume)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- enum port port;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/* 1. Enable PCH reset handshake. */
- val = I915_READ(HSW_NDE_RSTWRN_OPT);
- val |= RESET_PCH_HANDSHAKE_ENABLE;
- I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
-
- for (port = PORT_A; port <= PORT_B; port++) {
- /* 2. Enable DDI combo PHY comp. */
- val = I915_READ(ICL_PHY_MISC(port));
- val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
-
- cnl_set_procmon_ref_values(dev_priv, port);
-
- val = I915_READ(ICL_PORT_COMP_DW0(port));
- val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(port), val);
-
- /* 3. Set power down enable. */
- val = I915_READ(ICL_PORT_CL_DW5(port));
- val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(port), val);
- }
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+
+ /* 2-3. */
+ icl_combo_phys_init(dev_priv);
/*
* 4. Enable Power Well 1 (PG1).
@@ -3567,12 +3564,10 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
intel_csr_load_program(dev_priv);
}
-static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
+void icl_display_core_uninit(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *well;
- enum port port;
- u32 val;
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -3594,12 +3589,8 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
intel_power_well_disable(dev_priv, well);
mutex_unlock(&power_domains->lock);
- /* 5. Disable Comp */
- for (port = PORT_A; port <= PORT_B; port++) {
- val = I915_READ(ICL_PHY_MISC(port));
- val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(port), val);
- }
+ /* 5. */
+ icl_combo_phys_uninit(dev_priv);
}
static void chv_phy_control_init(struct drm_i915_private *dev_priv)
@@ -3757,7 +3748,8 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
- }
+ } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
+ intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
/*
* Keep all power wells enabled for any dependent HW access during
@@ -3951,14 +3943,6 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
int domains_count;
bool enabled;
- /*
- * Power wells not belonging to any domain (like the MISC_IO
- * and PW1 power wells) are under FW control, so ignore them,
- * since their state can change asynchronously.
- */
- if (!power_well->desc->domains)
- continue;
-
enabled = power_well->desc->ops->is_enabled(dev_priv,
power_well);
if ((power_well->count || power_well->desc->always_on) !=
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 701372e512a8..5805ec1aba12 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -105,11 +105,6 @@ struct intel_sdvo {
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
- /**
- * This is sdvo fixed pannel mode pointer
- */
- struct drm_display_mode *sdvo_lvds_fixed_mode;
-
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
@@ -765,10 +760,14 @@ intel_sdvo_create_preferred_input_timing(struct intel_sdvo *intel_sdvo,
args.height = height;
args.interlace = 0;
- if (IS_LVDS(intel_sdvo_connector) &&
- (intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
- intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
- args.scaled = 1;
+ if (IS_LVDS(intel_sdvo_connector)) {
+ const struct drm_display_mode *fixed_mode =
+ intel_sdvo_connector->base.panel.fixed_mode;
+
+ if (fixed_mode->hdisplay != width ||
+ fixed_mode->vdisplay != height)
+ args.scaled = 1;
+ }
return intel_sdvo_set_value(intel_sdvo,
SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
@@ -1123,6 +1122,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
pipe_config->pipe_bpp = 8*3;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (HAS_PCH_SPLIT(to_i915(encoder->base.dev)))
pipe_config->has_pch_encoder = true;
@@ -1144,7 +1144,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->sdvo_tv_clock = true;
} else if (IS_LVDS(intel_sdvo_connector)) {
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
- intel_sdvo->sdvo_lvds_fixed_mode))
+ intel_sdvo_connector->base.panel.fixed_mode))
return false;
(void) intel_sdvo_get_preferred_input_mode(intel_sdvo,
@@ -1301,7 +1301,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
/* lvds has a special fixed output timing. */
if (IS_LVDS(intel_sdvo_connector))
intel_sdvo_get_dtd_from_mode(&output_dtd,
- intel_sdvo->sdvo_lvds_fixed_mode);
+ intel_sdvo_connector->base.panel.fixed_mode);
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
@@ -1642,10 +1642,13 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
- if (mode->hdisplay > intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ const struct drm_display_mode *fixed_mode =
+ intel_sdvo_connector->base.panel.fixed_mode;
+
+ if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
- if (mode->vdisplay > intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
@@ -2058,14 +2061,6 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
return !list_empty(&connector->probed_modes);
}
-static void intel_sdvo_destroy(struct drm_connector *connector)
-{
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-
- drm_connector_cleanup(connector);
- kfree(intel_sdvo_connector);
-}
-
static int
intel_sdvo_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
@@ -2228,7 +2223,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
.atomic_set_property = intel_sdvo_connector_atomic_set_property,
.late_register = intel_sdvo_connector_register,
.early_unregister = intel_sdvo_connector_unregister,
- .destroy = intel_sdvo_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_sdvo_connector_duplicate_state,
};
@@ -2267,10 +2262,6 @@ static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
- if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
- drm_mode_destroy(encoder->dev,
- intel_sdvo->sdvo_lvds_fixed_mode);
-
i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
@@ -2583,7 +2574,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
return true;
err:
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
return false;
}
@@ -2663,19 +2654,22 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
list_for_each_entry(mode, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
- intel_sdvo->sdvo_lvds_fixed_mode =
+ struct drm_display_mode *fixed_mode =
drm_mode_duplicate(connector->dev, mode);
+
+ intel_panel_init(&intel_connector->panel,
+ fixed_mode, NULL);
break;
}
}
- if (!intel_sdvo->sdvo_lvds_fixed_mode)
+ if (!intel_connector->panel.fixed_mode)
goto err;
return true;
err:
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
return false;
}
@@ -2745,7 +2739,7 @@ static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
&dev->mode_config.connector_list, head) {
if (intel_attached_encoder(connector) == &intel_sdvo->base) {
drm_connector_unregister(connector);
- intel_sdvo_destroy(connector);
+ intel_connector_destroy(connector);
}
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d3090a7537bb..d2e003d8f3db 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -40,6 +40,7 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include <drm/drm_color_mgmt.h>
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -275,17 +276,24 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y2 = (src_y + src_h) << 16;
if (fb->format->is_yuv &&
- fb->format->format != DRM_FORMAT_NV12 &&
(src_x & 1 || src_w & 1)) {
DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
src_x, src_w);
return -EINVAL;
}
+ if (fb->format->is_yuv &&
+ fb->format->num_planes > 1 &&
+ (src_y & 1 || src_h & 1)) {
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
+ src_y, src_h);
+ return -EINVAL;
+ }
+
return 0;
}
-unsigned int
+static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation)
@@ -328,7 +336,8 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
+ if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
+ !icl_is_hdr_plane(plane)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -346,7 +355,6 @@ skl_program_scaler(struct intel_plane *plane,
I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
@@ -355,70 +363,239 @@ skl_program_scaler(struct intel_plane *plane,
I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
}
-void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+/* Preoffset values for YUV to RGB Conversion */
+#define PREOFF_YUV_TO_RGB_HI 0x1800
+#define PREOFF_YUV_TO_RGB_ME 0x1F00
+#define PREOFF_YUV_TO_RGB_LO 0x1800
+
+#define ROFF(x) (((x) & 0xffff) << 16)
+#define GOFF(x) (((x) & 0xffff) << 0)
+#define BOFF(x) (((x) & 0xffff) << 16)
+
+static void
+icl_program_input_csc(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ enum plane_id plane_id = plane->id;
+
+ static const u16 input_csc_matrix[][9] = {
+ /*
+ * BT.601 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.371,
+ * 1.000, -0.336, -0.698,
+ * 1.000, 1.732, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7AF8, 0x7800, 0x0,
+ 0x8B28, 0x7800, 0x9AC0,
+ 0x0, 0x7800, 0x7DD8,
+ },
+ /*
+ * BT.709 full range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.000, 0.000, 1.574,
+ * 1.000, -0.187, -0.468,
+ * 1.000, 1.855, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7C98, 0x7800, 0x0,
+ 0x9EF8, 0x7800, 0xABF8,
+ 0x0, 0x7800, 0x7ED8,
+ },
+ };
+
+ /* Matrix for Limited Range to Full Range Conversion */
+ static const u16 input_csc_matrix_lr[][9] = {
+ /*
+ * BT.601 Limted range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164384, 0.000, 1.596370,
+ * 1.138393, -0.382500, -0.794598,
+ * 1.138393, 1.971696, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT601] = {
+ 0x7CC8, 0x7950, 0x0,
+ 0x8CB8, 0x7918, 0x9C40,
+ 0x0, 0x7918, 0x7FC8,
+ },
+ /*
+ * BT.709 Limited range YCbCr -> full range RGB
+ * The matrix required is :
+ * [1.164, 0.000, 1.833671,
+ * 1.138393, -0.213249, -0.532909,
+ * 1.138393, 2.112402, 0.0000]
+ */
+ [DRM_COLOR_YCBCR_BT709] = {
+ 0x7EA8, 0x7950, 0x0,
+ 0x8888, 0x7918, 0xADA8,
+ 0x0, 0x7918, 0x6870,
+ },
+ };
+ const u16 *csc;
+
+ if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+ csc = input_csc_matrix[plane_state->base.color_encoding];
+ else
+ csc = input_csc_matrix_lr[plane_state->base.color_encoding];
+
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
+ GOFF(csc[1]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
+ GOFF(csc[4]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
+ GOFF(csc[7]));
+ I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
+
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+}
+
+static void
+skl_program_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane, bool slave, u32 plane_ctl)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = plane_state->color_plane[0].offset;
- u32 stride = skl_plane_stride(plane_state, 0);
+ u32 surf_addr = plane_state->color_plane[color_plane].offset;
+ u32 stride = skl_plane_stride(plane_state, color_plane);
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
+ uint32_t x = plane_state->color_plane[color_plane].x;
+ uint32_t y = plane_state->color_plane[color_plane].y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ struct intel_plane *linked = plane_state->linked_plane;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ u8 alpha = plane_state->base.alpha >> 8;
unsigned long irqflags;
+ u32 keymsk, keymax;
/* Sizes are 0 based */
src_w--;
src_h--;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
- plane_state->color_ctl);
+ keymsk = key->channel_mask & 0x3ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
- if (key->flags) {
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
+ /* The scaler will handle the output position */
+ if (plane_state->scaler_id >= 0) {
+ crtc_x = 0;
+ crtc_y = 0;
}
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
- if (plane_state->scaler_id >= 0) {
- skl_program_scaler(plane, crtc_state, plane_state);
+ if (icl_is_hdr_plane(plane)) {
+ u32 cus_ctl = 0;
+
+ if (linked) {
+ /* Enable and use MPEG-2 chroma siting */
+ cus_ctl = PLANE_CUS_ENABLE |
+ PLANE_CUS_HPHASE_0 |
+ PLANE_CUS_VPHASE_SIGN_NEGATIVE |
+ PLANE_CUS_VPHASE_0_25;
+
+ if (linked->id == PLANE_SPRITE5)
+ cus_ctl |= PLANE_CUS_PLANE_7;
+ else if (linked->id == PLANE_SPRITE4)
+ cus_ctl |= PLANE_CUS_PLANE_6;
+ else
+ MISSING_CASE(linked->id);
+ }
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
- } else {
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), cus_ctl);
}
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ plane_state->color_ctl);
+
+ if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ icl_program_input_csc(plane, crtc_state, plane_state);
+
+ skl_write_plane_wm(plane, crtc_state);
+
+ I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
+ I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+
+ if (INTEL_GEN(dev_priv) < 11)
+ I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) |
+ plane_state->color_plane[1].x);
+
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+ if (!slave && plane_state->scaler_id >= 0)
+ skl_program_scaler(plane, crtc_state, plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-void
-skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+static void
+skl_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->linked_plane) {
+ /* Program the UV plane */
+ color_plane = 1;
+ }
+
+ skl_program_plane(plane, crtc_state, plane_state,
+ color_plane, false, plane_state->ctl);
+}
+
+static void
+icl_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ skl_program_plane(plane, crtc_state, plane_state, 0, true,
+ plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE);
+}
+
+static void
+skl_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
@@ -427,15 +604,15 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+ skl_write_plane_wm(plane, crtc_state);
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-bool
+static bool
skl_plane_get_hw_state(struct intel_plane *plane,
enum pipe *pipe)
{
@@ -628,7 +805,6 @@ vlv_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
u32 sprctl = plane_state->ctl;
@@ -651,38 +827,41 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- vlv_update_clrc(plane_state);
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+ I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
}
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- else
- I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
-
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
I915_WRITE_FW(SPSURF(pipe, plane_id),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ_FW(SPSURF(pipe, plane_id));
+
+ vlv_update_clrc(plane_state);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+vlv_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -692,9 +871,7 @@ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
-
I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
- POSTING_READ_FW(SPSURF(pipe, plane_id));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -789,7 +966,6 @@ ivb_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 sprctl = plane_state->ctl, sprscale = 0;
u32 sprsurf_offset = plane_state->color_plane[0].offset;
@@ -818,37 +994,42 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ if (IS_IVYBRIDGE(dev_priv))
+ I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+
if (key->flags) {
I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
-
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
- else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
- else
+ } else {
I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+ }
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
- if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(SPRCTL(pipe), sprctl);
I915_WRITE_FW(SPRSURF(pipe),
intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ_FW(SPRSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+ivb_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -857,12 +1038,10 @@ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
I915_WRITE_FW(SPRCTL(pipe), 0);
- /* Can't leave the scaler enabled... */
+ /* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
I915_WRITE_FW(SPRSCALE(pipe), 0);
-
I915_WRITE_FW(SPRSURF(pipe), 0);
- POSTING_READ_FW(SPRSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -961,7 +1140,6 @@ g4x_update_plane(struct intel_plane *plane,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_framebuffer *fb = plane_state->base.fb;
enum pipe pipe = plane->pipe;
u32 dvscntr = plane_state->ctl, dvsscale = 0;
u32 dvssurf_offset = plane_state->color_plane[0].offset;
@@ -990,32 +1168,35 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
+ I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+
if (key->flags) {
I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- else
- I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ /*
+ * The control register self-arms if the plane was previously
+ * disabled. Try to make the plane enable atomic by writing
+ * the control register just before the surface register.
+ */
I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
I915_WRITE_FW(DVSSURF(pipe),
intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
- POSTING_READ_FW(DVSSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
+g4x_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
@@ -1026,9 +1207,7 @@ g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
I915_WRITE_FW(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE_FW(DVSSCALE(pipe), 0);
-
I915_WRITE_FW(DVSSURF(pipe), 0);
- POSTING_READ_FW(DVSSURF(pipe));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1054,6 +1233,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
return ret;
}
+static bool intel_fb_scalable(const struct drm_framebuffer *fb)
+{
+ if (!fb)
+ return false;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ return false;
+ default:
+ return true;
+ }
+}
+
static int
g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -1121,18 +1313,18 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int max_scale, min_scale;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
- if (INTEL_GEN(dev_priv) < 7) {
- min_scale = 1;
- max_scale = 16 << 16;
- } else if (IS_IVYBRIDGE(dev_priv)) {
- min_scale = 1;
- max_scale = 2 << 16;
- } else {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ if (intel_fb_scalable(plane_state->base.fb)) {
+ if (INTEL_GEN(dev_priv) < 7) {
+ min_scale = 1;
+ max_scale = 16 << 16;
+ } else if (IS_IVYBRIDGE(dev_priv)) {
+ min_scale = 1;
+ max_scale = 2 << 16;
+ }
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1219,6 +1411,8 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
struct drm_format_name_buf format_name;
@@ -1247,13 +1441,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/*
- * 90/270 is not allowed with RGB64 16:16:16:16,
- * RGB 16-bit 5:6:5, and Indexed 8-bit.
- * TBD: Add RGB64 case once its added in supported format list.
+ * 90/270 is not allowed with RGB64 16:16:16:16 and
+ * Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
+ * TBD: Add RGB64 case once its added in supported format
+ * list.
*/
switch (fb->format->format) {
- case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
+ if (INTEL_GEN(dev_priv) >= 11)
+ break;
+ /* fall through */
+ case DRM_FORMAT_C8:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@@ -1307,12 +1505,31 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
return 0;
}
-int skl_plane_check(struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state)
+static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+
+ /* Display WA #1106 */
+ if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+ (rotation == DRM_MODE_ROTATE_270 ||
+ rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int skl_plane_check(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int max_scale, min_scale;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
ret = skl_plane_check_fb(crtc_state, plane_state);
@@ -1320,15 +1537,9 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
return ret;
/* use scaler when colorkey is not required */
- if (!plane_state->ckey.flags) {
- const struct drm_framebuffer *fb = plane_state->base.fb;
-
+ if (!plane_state->ckey.flags && intel_fb_scalable(fb)) {
min_scale = 1;
- max_scale = skl_max_scale(crtc_state,
- fb ? fb->format->format : 0);
- } else {
- min_scale = DRM_PLANE_HELPER_NO_SCALING;
- max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ max_scale = skl_max_scale(crtc_state, fb->format->format);
}
ret = drm_atomic_helper_check_plane_state(&plane_state->base,
@@ -1349,10 +1560,18 @@ int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ ret = skl_plane_check_nv12_rotation(plane_state);
+ if (ret)
+ return ret;
+
ret = skl_check_plane_surface(plane_state);
if (ret)
return ret;
+ /* HW only has 8 bits pixel precision, disable plane if invisible */
+ if (!(plane_state->base.alpha >> 8))
+ plane_state->base.visible = false;
+
plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -1517,24 +1736,30 @@ static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static uint32_t skl_plane_formats[] = {
+static const uint32_t skl_plane_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
-static uint32_t skl_planar_formats[] = {
+static const uint32_t skl_planar_formats[] = {
+ DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
@@ -1739,8 +1964,36 @@ static const struct drm_plane_funcs skl_plane_funcs = {
.format_mod_supported = skl_plane_format_mod_supported,
};
-bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
+static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (!HAS_FBC(dev_priv))
+ return false;
+
+ return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
+}
+
+static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (INTEL_GEN(dev_priv) >= 11)
+ return plane_id <= PLANE_SPRITE3;
+
+ /* Display WA #0870: skl, bxt */
+ if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
+ return false;
+
+ if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ return false;
+
+ if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
+ return false;
+
+ return true;
+}
+
+static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
{
if (plane_id == PLANE_CURSOR)
return false;
@@ -1757,109 +2010,173 @@ bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
}
struct intel_plane *
-intel_sprite_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane)
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
{
- struct intel_plane *intel_plane = NULL;
- struct intel_plane_state *state = NULL;
- const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
- const uint32_t *plane_formats;
- const uint64_t *modifiers;
+ struct intel_plane *plane;
+ enum drm_plane_type plane_type;
unsigned int supported_rotations;
- int num_plane_formats;
+ unsigned int possible_crtcs;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
int ret;
- intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL);
- if (!intel_plane) {
- ret = -ENOMEM;
- goto fail;
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ plane->pipe = pipe;
+ plane->id = plane_id;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
+
+ plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
+ if (plane->has_fbc) {
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
}
- state = intel_create_plane_state(&intel_plane->base);
- if (!state) {
- ret = -ENOMEM;
- goto fail;
+ plane->max_stride = skl_plane_max_stride;
+ plane->update_plane = skl_update_plane;
+ plane->disable_plane = skl_disable_plane;
+ plane->get_hw_state = skl_plane_get_hw_state;
+ plane->check_plane = skl_plane_check;
+ if (icl_is_nv12_y_plane(plane_id))
+ plane->update_slave = icl_update_slave;
+
+ if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
+ formats = skl_planar_formats;
+ num_formats = ARRAY_SIZE(skl_planar_formats);
+ } else {
+ formats = skl_plane_formats;
+ num_formats = ARRAY_SIZE(skl_plane_formats);
}
- intel_plane->base.state = &state->base;
- if (INTEL_GEN(dev_priv) >= 9) {
- state->scaler_id = -1;
+ plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
+ if (plane->has_ccs)
+ modifiers = skl_plane_format_modifiers_ccs;
+ else
+ modifiers = skl_plane_format_modifiers_noccs;
- intel_plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe,
- PLANE_SPRITE0 + plane);
+ if (plane_id == PLANE_PRIMARY)
+ plane_type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ plane_type = DRM_PLANE_TYPE_OVERLAY;
- intel_plane->max_stride = skl_plane_max_stride;
- intel_plane->update_plane = skl_update_plane;
- intel_plane->disable_plane = skl_disable_plane;
- intel_plane->get_hw_state = skl_plane_get_hw_state;
- intel_plane->check_plane = skl_plane_check;
+ possible_crtcs = BIT(pipe);
- if (skl_plane_has_planar(dev_priv, pipe,
- PLANE_SPRITE0 + plane)) {
- plane_formats = skl_planar_formats;
- num_plane_formats = ARRAY_SIZE(skl_planar_formats);
- } else {
- plane_formats = skl_plane_formats;
- num_plane_formats = ARRAY_SIZE(skl_plane_formats);
- }
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, &skl_plane_funcs,
+ formats, num_formats, modifiers,
+ plane_type,
+ "plane %d%c", plane_id + 1,
+ pipe_name(pipe));
+ if (ret)
+ goto fail;
+
+ supported_rotations =
+ DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
+
+ if (INTEL_GEN(dev_priv) >= 10)
+ supported_rotations |= DRM_MODE_REFLECT_X;
+
+ drm_plane_create_rotation_property(&plane->base,
+ DRM_MODE_ROTATE_0,
+ supported_rotations);
+
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ drm_plane_create_alpha_property(&plane->base);
+ drm_plane_create_blend_mode_property(&plane->base,
+ BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+
+ return plane;
- if (intel_plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
-
- plane_funcs = &skl_plane_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- intel_plane->max_stride = i9xx_plane_max_stride;
- intel_plane->update_plane = vlv_update_plane;
- intel_plane->disable_plane = vlv_disable_plane;
- intel_plane->get_hw_state = vlv_plane_get_hw_state;
- intel_plane->check_plane = vlv_sprite_check;
-
- plane_formats = vlv_plane_formats;
- num_plane_formats = ARRAY_SIZE(vlv_plane_formats);
+fail:
+ intel_plane_free(plane);
+
+ return ERR_PTR(ret);
+}
+
+struct intel_plane *
+intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int sprite)
+{
+ struct intel_plane *plane;
+ const struct drm_plane_funcs *plane_funcs;
+ unsigned long possible_crtcs;
+ unsigned int supported_rotations;
+ const u64 *modifiers;
+ const u32 *formats;
+ int num_formats;
+ int ret;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ return skl_universal_plane_create(dev_priv, pipe,
+ PLANE_SPRITE0 + sprite);
+
+ plane = intel_plane_alloc();
+ if (IS_ERR(plane))
+ return plane;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ plane->max_stride = i9xx_plane_max_stride;
+ plane->update_plane = vlv_update_plane;
+ plane->disable_plane = vlv_disable_plane;
+ plane->get_hw_state = vlv_plane_get_hw_state;
+ plane->check_plane = vlv_sprite_check;
+
+ formats = vlv_plane_formats;
+ num_formats = ARRAY_SIZE(vlv_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &vlv_sprite_funcs;
} else if (INTEL_GEN(dev_priv) >= 7) {
- intel_plane->max_stride = g4x_sprite_max_stride;
- intel_plane->update_plane = ivb_update_plane;
- intel_plane->disable_plane = ivb_disable_plane;
- intel_plane->get_hw_state = ivb_plane_get_hw_state;
- intel_plane->check_plane = g4x_sprite_check;
-
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->update_plane = ivb_update_plane;
+ plane->disable_plane = ivb_disable_plane;
+ plane->get_hw_state = ivb_plane_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
+
+ formats = snb_plane_formats;
+ num_formats = ARRAY_SIZE(snb_plane_formats);
modifiers = i9xx_plane_format_modifiers;
plane_funcs = &snb_sprite_funcs;
} else {
- intel_plane->max_stride = g4x_sprite_max_stride;
- intel_plane->update_plane = g4x_update_plane;
- intel_plane->disable_plane = g4x_disable_plane;
- intel_plane->get_hw_state = g4x_plane_get_hw_state;
- intel_plane->check_plane = g4x_sprite_check;
+ plane->max_stride = g4x_sprite_max_stride;
+ plane->update_plane = g4x_update_plane;
+ plane->disable_plane = g4x_disable_plane;
+ plane->get_hw_state = g4x_plane_get_hw_state;
+ plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
if (IS_GEN6(dev_priv)) {
- plane_formats = snb_plane_formats;
- num_plane_formats = ARRAY_SIZE(snb_plane_formats);
+ formats = snb_plane_formats;
+ num_formats = ARRAY_SIZE(snb_plane_formats);
plane_funcs = &snb_sprite_funcs;
} else {
- plane_formats = g4x_plane_formats;
- num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
+ formats = g4x_plane_formats;
+ num_formats = ARRAY_SIZE(g4x_plane_formats);
plane_funcs = &g4x_sprite_funcs;
}
}
- if (INTEL_GEN(dev_priv) >= 9) {
- supported_rotations =
- DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
- DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
- } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
supported_rotations =
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X;
@@ -1868,35 +2185,25 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
}
- intel_plane->pipe = pipe;
- intel_plane->i9xx_plane = plane;
- intel_plane->id = PLANE_SPRITE0 + plane;
- intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, intel_plane->id);
+ plane->pipe = pipe;
+ plane->id = PLANE_SPRITE0 + sprite;
+ plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = (1 << pipe);
+ possible_crtcs = BIT(pipe);
- if (INTEL_GEN(dev_priv) >= 9)
- ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, plane_funcs,
- plane_formats, num_plane_formats,
- modifiers,
- DRM_PLANE_TYPE_OVERLAY,
- "plane %d%c", plane + 2, pipe_name(pipe));
- else
- ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base,
- possible_crtcs, plane_funcs,
- plane_formats, num_plane_formats,
- modifiers,
- DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(pipe, plane));
+ ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
+ possible_crtcs, plane_funcs,
+ formats, num_formats, modifiers,
+ DRM_PLANE_TYPE_OVERLAY,
+ "sprite %c", sprite_name(pipe, sprite));
if (ret)
goto fail;
- drm_plane_create_rotation_property(&intel_plane->base,
+ drm_plane_create_rotation_property(&plane->base,
DRM_MODE_ROTATE_0,
supported_rotations);
- drm_plane_create_color_properties(&intel_plane->base,
+ drm_plane_create_color_properties(&plane->base,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -1904,13 +2211,12 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
- drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
+ drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
- return intel_plane;
+ return plane;
fail:
- kfree(state);
- kfree(intel_plane);
+ intel_plane_free(plane);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index b5b04cb892e9..860f306a23ba 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -885,6 +885,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return false;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
@@ -1377,17 +1378,10 @@ intel_tv_get_modes(struct drm_connector *connector)
return count;
}
-static void
-intel_tv_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_tv_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b1b3e81b6e24..b34c318b238d 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -376,7 +376,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
- if (ret == 0 || ret != -EAGAIN)
+ if (ret == 0 || ret != -ETIMEDOUT)
break;
DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.h b/drivers/gpu/drm/i915/intel_uc_fw.h
index 87910aa83267..0e3bd580e267 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/intel_uc_fw.h
@@ -115,9 +115,14 @@ static inline bool intel_uc_fw_is_selected(struct intel_uc_fw *uc_fw)
return uc_fw->path != NULL;
}
+static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
+{
+ return uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS;
+}
+
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
- if (uc_fw->load_status == INTEL_UC_FIRMWARE_SUCCESS)
+ if (intel_uc_fw_is_loaded(uc_fw))
uc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 3ad302c66254..9289515108c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN9(dev_priv) || IS_GEN10(dev_priv)) {
+ } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bba98cf83cbd..bf3662ad5fed 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -326,6 +326,13 @@ enum vbt_gmbus_ddi {
ICL_DDC_BUS_PORT_4,
};
+#define DP_AUX_A 0x40
+#define DP_AUX_B 0x10
+#define DP_AUX_C 0x20
+#define DP_AUX_D 0x30
+#define DP_AUX_E 0x50
+#define DP_AUX_F 0x60
+
#define VBT_DP_MAX_LINK_RATE_HBR3 0
#define VBT_DP_MAX_LINK_RATE_HBR2 1
#define VBT_DP_MAX_LINK_RATE_HBR 2
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
new file mode 100644
index 000000000000..c56ba0e04044
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -0,0 +1,1088 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Author: Gaurav K Singh <gaurav.k.singh@intel.com>
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+enum ROW_INDEX_BPP {
+ ROW_INDEX_6BPP = 0,
+ ROW_INDEX_8BPP,
+ ROW_INDEX_10BPP,
+ ROW_INDEX_12BPP,
+ ROW_INDEX_15BPP,
+ MAX_ROW_INDEX
+};
+
+enum COLUMN_INDEX_BPC {
+ COLUMN_INDEX_8BPC = 0,
+ COLUMN_INDEX_10BPC,
+ COLUMN_INDEX_12BPC,
+ COLUMN_INDEX_14BPC,
+ COLUMN_INDEX_16BPC,
+ MAX_COLUMN_INDEX
+};
+
+#define DSC_SUPPORTED_VERSION_MIN 1
+
+/* From DSC_v1.11 spec, rc_parameter_Set syntax element typically constant */
+static u16 rc_buf_thresh[] = {
+ 896, 1792, 2688, 3584, 4480, 5376, 6272, 6720, 7168, 7616,
+ 7744, 7872, 8000, 8064
+};
+
+struct rc_parameters {
+ u16 initial_xmit_delay;
+ u8 first_line_bpg_offset;
+ u16 initial_offset;
+ u8 flatness_min_qp;
+ u8 flatness_max_qp;
+ u8 rc_quant_incr_limit0;
+ u8 rc_quant_incr_limit1;
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+};
+
+/*
+ * Selected Rate Control Related Parameter Recommended Values
+ * from DSC_v1.11 spec & C Model release: DSC_model_20161212
+ */
+static struct rc_parameters rc_params[][MAX_COLUMN_INDEX] = {
+{
+ /* 6BPP/8BPC */
+ { 768, 15, 6144, 3, 13, 11, 11, {
+ { 0, 4, 0 }, { 1, 6, -2 }, { 3, 8, -2 }, { 4, 8, -4 },
+ { 5, 9, -6 }, { 5, 9, -6 }, { 6, 9, -6 }, { 6, 10, -8 },
+ { 7, 11, -8 }, { 8, 12, -10 }, { 9, 12, -10 }, { 10, 12, -12 },
+ { 10, 12, -12 }, { 11, 12, -12 }, { 13, 14, -12 }
+ }
+ },
+ /* 6BPP/10BPC */
+ { 768, 15, 6144, 7, 17, 15, 15, {
+ { 0, 8, 0 }, { 3, 10, -2 }, { 7, 12, -2 }, { 8, 12, -4 },
+ { 9, 13, -6 }, { 9, 13, -6 }, { 10, 13, -6 }, { 10, 14, -8 },
+ { 11, 15, -8 }, { 12, 16, -10 }, { 13, 16, -10 },
+ { 14, 16, -12 }, { 14, 16, -12 }, { 15, 16, -12 },
+ { 17, 18, -12 }
+ }
+ },
+ /* 6BPP/12BPC */
+ { 768, 15, 6144, 11, 21, 19, 19, {
+ { 0, 12, 0 }, { 5, 14, -2 }, { 11, 16, -2 }, { 12, 16, -4 },
+ { 13, 17, -6 }, { 13, 17, -6 }, { 14, 17, -6 }, { 14, 18, -8 },
+ { 15, 19, -8 }, { 16, 20, -10 }, { 17, 20, -10 },
+ { 18, 20, -12 }, { 18, 20, -12 }, { 19, 20, -12 },
+ { 21, 22, -12 }
+ }
+ },
+ /* 6BPP/14BPC */
+ { 768, 15, 6144, 15, 25, 23, 27, {
+ { 0, 16, 0 }, { 7, 18, -2 }, { 15, 20, -2 }, { 16, 20, -4 },
+ { 17, 21, -6 }, { 17, 21, -6 }, { 18, 21, -6 }, { 18, 22, -8 },
+ { 19, 23, -8 }, { 20, 24, -10 }, { 21, 24, -10 },
+ { 22, 24, -12 }, { 22, 24, -12 }, { 23, 24, -12 },
+ { 25, 26, -12 }
+ }
+ },
+ /* 6BPP/16BPC */
+ { 768, 15, 6144, 19, 29, 27, 27, {
+ { 0, 20, 0 }, { 9, 22, -2 }, { 19, 24, -2 }, { 20, 24, -4 },
+ { 21, 25, -6 }, { 21, 25, -6 }, { 22, 25, -6 }, { 22, 26, -8 },
+ { 23, 27, -8 }, { 24, 28, -10 }, { 25, 28, -10 },
+ { 26, 28, -12 }, { 26, 28, -12 }, { 27, 28, -12 },
+ { 29, 30, -12 }
+ }
+ },
+},
+{
+ /* 8BPP/8BPC */
+ { 512, 12, 6144, 3, 12, 11, 11, {
+ { 0, 4, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 }, { 5, 12, -12 },
+ { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 8BPP/10BPC */
+ { 512, 12, 6144, 7, 16, 15, 15, {
+ { 0, 4, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 5, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 8BPP/12BPC */
+ { 512, 12, 6144, 11, 20, 19, 19, {
+ { 0, 12, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 9, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 8BPP/14BPC */
+ { 512, 12, 6144, 15, 24, 23, 23, {
+ { 0, 12, 0 }, { 5, 13, 0 }, { 11, 15, 0 }, { 12, 17, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 22, -10 }, { 17, 22, -10 },
+ { 17, 23, -12 }, { 17, 23, -12 }, { 21, 24, -12 },
+ { 24, 25, -12 }
+ }
+ },
+ /* 8BPP/16BPC */
+ { 512, 12, 6144, 19, 28, 27, 27, {
+ { 0, 12, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 15, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 26, -10 }, { 21, 26, -10 },
+ { 21, 27, -12 }, { 21, 27, -12 }, { 25, 28, -12 },
+ { 28, 29, -12 }
+ }
+ },
+},
+{
+ /* 10BPP/8BPC */
+ { 410, 15, 5632, 3, 12, 11, 11, {
+ { 0, 3, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 2, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 9, -10 }, { 5, 10, -10 }, { 5, 10, -10 },
+ { 5, 11, -12 }, { 7, 11, -12 }, { 11, 12, -12 }
+ }
+ },
+ /* 10BPP/10BPC */
+ { 410, 15, 5632, 7, 16, 15, 15, {
+ { 0, 7, 2 }, { 4, 8, 0 }, { 5, 9, 0 }, { 6, 10, -2 },
+ { 7, 11, -4 }, { 7, 11, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 13, -10 }, { 9, 14, -10 }, { 9, 14, -10 },
+ { 9, 15, -12 }, { 11, 15, -12 }, { 15, 16, -12 }
+ }
+ },
+ /* 10BPP/12BPC */
+ { 410, 15, 5632, 11, 20, 19, 19, {
+ { 0, 11, 2 }, { 4, 12, 0 }, { 9, 13, 0 }, { 10, 14, -2 },
+ { 11, 15, -4 }, { 11, 15, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 17, -10 }, { 13, 18, -10 },
+ { 13, 18, -10 }, { 13, 19, -12 }, { 15, 19, -12 },
+ { 19, 20, -12 }
+ }
+ },
+ /* 10BPP/14BPC */
+ { 410, 15, 5632, 15, 24, 23, 23, {
+ { 0, 11, 2 }, { 5, 13, 0 }, { 11, 15, 0 }, { 13, 18, -2 },
+ { 15, 19, -4 }, { 15, 19, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 21, -8 }, { 15, 21, -10 }, { 17, 22, -10 },
+ { 17, 22, -10 }, { 17, 23, -12 }, { 19, 23, -12 },
+ { 23, 24, -12 }
+ }
+ },
+ /* 10BPP/16BPC */
+ { 410, 15, 5632, 19, 28, 27, 27, {
+ { 0, 11, 2 }, { 6, 14, 0 }, { 13, 17, 0 }, { 16, 20, -2 },
+ { 19, 23, -4 }, { 19, 23, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 25, -8 }, { 19, 25, -10 }, { 21, 26, -10 },
+ { 21, 26, -10 }, { 21, 27, -12 }, { 23, 27, -12 },
+ { 27, 28, -12 }
+ }
+ },
+},
+{
+ /* 12BPP/8BPC */
+ { 341, 15, 2048, 3, 12, 11, 11, {
+ { 0, 2, 2 }, { 0, 4, 0 }, { 1, 5, 0 }, { 1, 6, -2 },
+ { 3, 7, -4 }, { 3, 7, -6 }, { 3, 7, -8 }, { 3, 8, -8 },
+ { 3, 9, -8 }, { 3, 10, -10 }, { 5, 11, -10 },
+ { 5, 12, -12 }, { 5, 13, -12 }, { 7, 13, -12 }, { 13, 15, -12 }
+ }
+ },
+ /* 12BPP/10BPC */
+ { 341, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 2 }, { 2, 5, 0 }, { 3, 7, 0 }, { 4, 8, -2 },
+ { 6, 9, -4 }, { 7, 10, -6 }, { 7, 11, -8 }, { 7, 12, -8 },
+ { 7, 13, -8 }, { 7, 14, -10 }, { 9, 15, -10 }, { 9, 16, -12 },
+ { 9, 17, -12 }, { 11, 17, -12 }, { 17, 19, -12 }
+ }
+ },
+ /* 12BPP/12BPC */
+ { 341, 15, 2048, 11, 20, 19, 19, {
+ { 0, 6, 2 }, { 4, 9, 0 }, { 7, 11, 0 }, { 8, 12, -2 },
+ { 10, 13, -4 }, { 11, 14, -6 }, { 11, 15, -8 }, { 11, 16, -8 },
+ { 11, 17, -8 }, { 11, 18, -10 }, { 13, 19, -10 },
+ { 13, 20, -12 }, { 13, 21, -12 }, { 15, 21, -12 },
+ { 21, 23, -12 }
+ }
+ },
+ /* 12BPP/14BPC */
+ { 341, 15, 2048, 15, 24, 23, 23, {
+ { 0, 6, 2 }, { 7, 10, 0 }, { 9, 13, 0 }, { 11, 16, -2 },
+ { 14, 17, -4 }, { 15, 18, -6 }, { 15, 19, -8 }, { 15, 20, -8 },
+ { 15, 20, -8 }, { 15, 21, -10 }, { 17, 21, -10 },
+ { 17, 21, -12 }, { 17, 21, -12 }, { 19, 22, -12 },
+ { 22, 23, -12 }
+ }
+ },
+ /* 12BPP/16BPC */
+ { 341, 15, 2048, 19, 28, 27, 27, {
+ { 0, 6, 2 }, { 6, 11, 0 }, { 11, 15, 0 }, { 14, 18, -2 },
+ { 18, 21, -4 }, { 19, 22, -6 }, { 19, 23, -8 }, { 19, 24, -8 },
+ { 19, 24, -8 }, { 19, 25, -10 }, { 21, 25, -10 },
+ { 21, 25, -12 }, { 21, 25, -12 }, { 23, 26, -12 },
+ { 26, 27, -12 }
+ }
+ },
+},
+{
+ /* 15BPP/8BPC */
+ { 273, 15, 2048, 3, 12, 11, 11, {
+ { 0, 0, 10 }, { 0, 1, 8 }, { 0, 1, 6 }, { 0, 2, 4 },
+ { 1, 2, 2 }, { 1, 3, 0 }, { 1, 3, -2 }, { 2, 4, -4 },
+ { 2, 5, -6 }, { 3, 5, -8 }, { 4, 6, -10 }, { 4, 7, -10 },
+ { 5, 7, -12 }, { 7, 8, -12 }, { 8, 9, -12 }
+ }
+ },
+ /* 15BPP/10BPC */
+ { 273, 15, 2048, 7, 16, 15, 15, {
+ { 0, 2, 10 }, { 2, 5, 8 }, { 3, 5, 6 }, { 4, 6, 4 },
+ { 5, 6, 2 }, { 5, 7, 0 }, { 5, 7, -2 }, { 6, 8, -4 },
+ { 6, 9, -6 }, { 7, 9, -8 }, { 8, 10, -10 }, { 8, 11, -10 },
+ { 9, 11, -12 }, { 11, 12, -12 }, { 12, 13, -12 }
+ }
+ },
+ /* 15BPP/12BPC */
+ { 273, 15, 2048, 11, 20, 19, 19, {
+ { 0, 4, 10 }, { 2, 7, 8 }, { 4, 9, 6 }, { 6, 11, 4 },
+ { 9, 11, 2 }, { 9, 11, 0 }, { 9, 12, -2 }, { 10, 12, -4 },
+ { 11, 13, -6 }, { 11, 13, -8 }, { 12, 14, -10 },
+ { 13, 15, -10 }, { 13, 15, -12 }, { 15, 16, -12 },
+ { 16, 17, -12 }
+ }
+ },
+ /* 15BPP/14BPC */
+ { 273, 15, 2048, 15, 24, 23, 23, {
+ { 0, 4, 10 }, { 3, 8, 8 }, { 6, 11, 6 }, { 9, 14, 4 },
+ { 13, 15, 2 }, { 13, 15, 0 }, { 13, 16, -2 }, { 14, 16, -4 },
+ { 15, 17, -6 }, { 15, 17, -8 }, { 16, 18, -10 },
+ { 17, 19, -10 }, { 17, 19, -12 }, { 19, 20, -12 },
+ { 20, 21, -12 }
+ }
+ },
+ /* 15BPP/16BPC */
+ { 273, 15, 2048, 19, 28, 27, 27, {
+ { 0, 4, 10 }, { 4, 9, 8 }, { 8, 13, 6 }, { 12, 17, 4 },
+ { 17, 19, 2 }, { 17, 20, 0 }, { 17, 20, -2 }, { 18, 20, -4 },
+ { 19, 21, -6 }, { 19, 21, -8 }, { 20, 22, -10 },
+ { 21, 23, -10 }, { 21, 23, -12 }, { 23, 24, -12 },
+ { 24, 25, -12 }
+ }
+ }
+}
+
+};
+
+static int get_row_index_for_rc_params(u16 compressed_bpp)
+{
+ switch (compressed_bpp) {
+ case 6:
+ return ROW_INDEX_6BPP;
+ case 8:
+ return ROW_INDEX_8BPP;
+ case 10:
+ return ROW_INDEX_10BPP;
+ case 12:
+ return ROW_INDEX_12BPP;
+ case 15:
+ return ROW_INDEX_15BPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int get_column_index_for_rc_params(u8 bits_per_component)
+{
+ switch (bits_per_component) {
+ case 8:
+ return COLUMN_INDEX_8BPC;
+ case 10:
+ return COLUMN_INDEX_10BPC;
+ case 12:
+ return COLUMN_INDEX_12BPC;
+ case 14:
+ return COLUMN_INDEX_14BPC;
+ case 16:
+ return COLUMN_INDEX_16BPC;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+
+int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg;
+ u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp;
+ u8 i = 0;
+ int row_index = 0;
+ int column_index = 0;
+ u8 line_buf_depth = 0;
+
+ vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay;
+ vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay;
+ vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
+ pipe_config->dsc_params.slice_count);
+ /*
+ * Slice Height of 8 works for all currently available panels. So start
+ * with that if pic_height is an integral multiple of 8.
+ * Eventually add logic to try multiple slice heights.
+ */
+ if (vdsc_cfg->pic_height % 8 == 0)
+ vdsc_cfg->slice_height = 8;
+ else if (vdsc_cfg->pic_height % 4 == 0)
+ vdsc_cfg->slice_height = 4;
+ else
+ vdsc_cfg->slice_height = 2;
+
+ /* Values filled from DSC Sink DPCD */
+ vdsc_cfg->dsc_version_major =
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
+ vdsc_cfg->dsc_version_minor =
+ min(DSC_SUPPORTED_VERSION_MIN,
+ (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
+ DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
+
+ vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
+ DP_DSC_RGB;
+
+ line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
+ if (!line_buf_depth) {
+ DRM_DEBUG_KMS("DSC Sink Line Buffer Depth invalid\n");
+ return -EINVAL;
+ }
+ if (vdsc_cfg->dsc_version_minor == 2)
+ vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
+ else
+ vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
+ DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
+
+ /* Gen 11 does not support YCbCr */
+ vdsc_cfg->enable422 = false;
+ /* Gen 11 does not support VBR */
+ vdsc_cfg->vbr_enable = false;
+ vdsc_cfg->block_pred_enable =
+ intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
+
+ /* Gen 11 only supports integral values of bpp */
+ vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_component = pipe_config->pipe_bpp / 3;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ /*
+ * six 0s are appended to the lsb of each threshold value
+ * internally in h/w.
+ * Only 8 bits are allowed for programming RcBufThreshold
+ */
+ vdsc_cfg->rc_buf_thresh[i] = rc_buf_thresh[i] >> 6;
+ }
+
+ /*
+ * For 6bpp, RC Buffer threshold 12 and 13 need a different value
+ * as per C Model
+ */
+ if (compressed_bpp == 6) {
+ vdsc_cfg->rc_buf_thresh[12] = 0x7C;
+ vdsc_cfg->rc_buf_thresh[13] = 0x7D;
+ }
+
+ row_index = get_row_index_for_rc_params(compressed_bpp);
+ column_index =
+ get_column_index_for_rc_params(vdsc_cfg->bits_per_component);
+
+ if (row_index < 0 || column_index < 0)
+ return -EINVAL;
+
+ vdsc_cfg->first_line_bpg_offset =
+ rc_params[row_index][column_index].first_line_bpg_offset;
+ vdsc_cfg->initial_xmit_delay =
+ rc_params[row_index][column_index].initial_xmit_delay;
+ vdsc_cfg->initial_offset =
+ rc_params[row_index][column_index].initial_offset;
+ vdsc_cfg->flatness_min_qp =
+ rc_params[row_index][column_index].flatness_min_qp;
+ vdsc_cfg->flatness_max_qp =
+ rc_params[row_index][column_index].flatness_max_qp;
+ vdsc_cfg->rc_quant_incr_limit0 =
+ rc_params[row_index][column_index].rc_quant_incr_limit0;
+ vdsc_cfg->rc_quant_incr_limit1 =
+ rc_params[row_index][column_index].rc_quant_incr_limit1;
+
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ vdsc_cfg->rc_range_params[i].range_min_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_min_qp;
+ vdsc_cfg->rc_range_params[i].range_max_qp =
+ rc_params[row_index][column_index].rc_range_params[i].range_max_qp;
+ /*
+ * Range BPG Offset uses 2's complement and is only a 6 bits. So
+ * mask it to get only 6 bits.
+ */
+ vdsc_cfg->rc_range_params[i].range_bpg_offset =
+ rc_params[row_index][column_index].rc_range_params[i].range_bpg_offset &
+ DSC_RANGE_BPG_OFFSET_MASK;
+ }
+
+ /*
+ * BitsPerComponent value determines mux_word_size:
+ * When BitsPerComponent is 12bpc, muxWordSize will be equal to 64 bits
+ * When BitsPerComponent is 8 or 10bpc, muxWordSize will be equal to
+ * 48 bits
+ */
+ if (vdsc_cfg->bits_per_component == 8 ||
+ vdsc_cfg->bits_per_component == 10)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_8_10_BPC;
+ else if (vdsc_cfg->bits_per_component == 12)
+ vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC;
+
+ /* RC_MODEL_SIZE is a constant across all configurations */
+ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */
+ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
+
+ return intel_compute_rc_parameters(vdsc_cfg);
+}
+
+enum intel_display_power_domain
+intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+{
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /*
+ * On ICL VDSC/joining for eDP transcoder uses a separate power well PW2
+ * This requires POWER_DOMAIN_TRANSCODER_EDP_VDSC power domain.
+ * For any other transcoder, VDSC/joining uses the power well associated
+ * with the pipe/transcoder in use. Hence another reference on the
+ * transcoder power domain will suffice.
+ */
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return POWER_DOMAIN_TRANSCODER_EDP_VDSC;
+ else
+ return POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+}
+
+static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 pps_val = 0;
+ u32 rc_buf_thresh_dword[4];
+ u32 rc_range_params_dword[8];
+ u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1;
+ int i = 0;
+
+ /* Populate PICTURE_PARAMETER_SET_0 registers */
+ pps_val = DSC_VER_MAJ | vdsc_cfg->dsc_version_minor <<
+ DSC_VER_MIN_SHIFT |
+ vdsc_cfg->bits_per_component << DSC_BPC_SHIFT |
+ vdsc_cfg->line_buf_depth << DSC_LINE_BUF_DEPTH_SHIFT;
+ if (vdsc_cfg->block_pred_enable)
+ pps_val |= DSC_BLOCK_PREDICTION;
+ if (vdsc_cfg->convert_rgb)
+ pps_val |= DSC_COLOR_SPACE_CONVERSION;
+ if (vdsc_cfg->enable422)
+ pps_val |= DSC_422_ENABLE;
+ if (vdsc_cfg->vbr_enable)
+ pps_val |= DSC_VBR_ENABLE;
+ DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_1 registers */
+ pps_val = 0;
+ pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
+ DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_2 registers */
+ pps_val = 0;
+ pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
+ DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
+ DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_3 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
+ DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
+ DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_4 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
+ DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
+ DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_5 registers */
+ pps_val = 0;
+ pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
+ DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
+ DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_6 registers */
+ pps_val = 0;
+ pps_val |= DSC_INITIAL_SCALE_VALUE(vdsc_cfg->initial_scale_value) |
+ DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
+ DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
+ DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
+ DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_7 registers */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
+ DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
+ DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_8 registers */
+ pps_val = 0;
+ pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
+ DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
+ DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_9 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
+ DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
+ DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
+ }
+
+ /* Populate PICTURE_PARAMETER_SET_10 registers */
+ pps_val = 0;
+ pps_val |= DSC_RC_QUANT_INC_LIMIT0(vdsc_cfg->rc_quant_incr_limit0) |
+ DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
+ DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
+ DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
+ DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
+ }
+
+ /* Populate Picture parameter set 16 */
+ pps_val = 0;
+ pps_val |= DSC_SLICE_CHUNK_SIZE(vdsc_cfg->slice_chunk_size) |
+ DSC_SLICE_PER_LINE((vdsc_cfg->pic_width / num_vdsc_instances) /
+ vdsc_cfg->slice_width) |
+ DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
+ vdsc_cfg->slice_height);
+ DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ /*
+ * If 2 VDSC instances are needed, configure PPS for second
+ * VDSC
+ */
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ } else {
+ I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ if (crtc_state->dsc_params.dsc_split)
+ I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
+ }
+
+ /* Populate the RC_BUF_THRESH registers */
+ memset(rc_buf_thresh_dword, 0, sizeof(rc_buf_thresh_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++) {
+ rc_buf_thresh_dword[i / 4] |=
+ (u32)(vdsc_cfg->rc_buf_thresh[i] <<
+ BITS_PER_BYTE * (i % 4));
+ DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ rc_buf_thresh_dword[i / 4]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
+ }
+ }
+
+ /* Populate the RC_RANGE_PARAMETERS registers */
+ memset(rc_range_params_dword, 0, sizeof(rc_range_params_dword));
+ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
+ rc_range_params_dword[i / 2] |=
+ (u32)(((vdsc_cfg->rc_range_params[i].range_bpg_offset <<
+ RC_BPG_OFFSET_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_max_qp <<
+ RC_MAX_QP_SHIFT) |
+ (vdsc_cfg->rc_range_params[i].range_min_qp <<
+ RC_MIN_QP_SHIFT)) << 16 * (i % 2));
+ DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ rc_range_params_dword[i / 2]);
+ }
+ if (cpu_transcoder == TRANSCODER_EDP) {
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
+ }
+ } else {
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ if (crtc_state->dsc_params.dsc_split) {
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
+ }
+ }
+}
+
+static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg;
+ struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
+
+ /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+
+ /* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
+ drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+
+ intel_dig_port->write_infoframe(encoder, crtc_state,
+ DP_SDP_PPS, &dp_dsc_pps_sdp,
+ sizeof(dp_dsc_pps_sdp));
+}
+
+void intel_dsc_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0;
+ u32 dss_ctl2_val = 0;
+
+ if (!crtc_state->dsc_params.compression_enable)
+ return;
+
+ /* Enable Power wells for VDSC/joining */
+ intel_display_power_get(dev_priv,
+ intel_dsc_power_domain(crtc_state));
+
+ intel_configure_pps_for_dsc_encoder(encoder, crtc_state);
+
+ intel_dp_write_dsc_pps_sdp(encoder, crtc_state);
+
+ if (crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE;
+ if (crtc_state->dsc_params.dsc_split) {
+ dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
+ dss_ctl1_val |= JOINER_ENABLE;
+ }
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+}
+
+void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ i915_reg_t dss_ctl1_reg, dss_ctl2_reg;
+ u32 dss_ctl1_val = 0, dss_ctl2_val = 0;
+
+ if (!old_crtc_state->dsc_params.compression_enable)
+ return;
+
+ if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) {
+ dss_ctl1_reg = DSS_CTL1;
+ dss_ctl2_reg = DSS_CTL2;
+ } else {
+ dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
+ dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
+ }
+ dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ if (dss_ctl1_val & JOINER_ENABLE)
+ dss_ctl1_val &= ~JOINER_ENABLE;
+ I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+
+ dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
+ dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
+ dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
+ RIGHT_BRANCH_VDSC_ENABLE);
+ I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+
+ /* Disable Power wells for VDSC/joining */
+ intel_display_power_put(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
+}
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4bcdeaf8d98f..4f41e326f3f3 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -48,58 +48,112 @@
* - Public functions to init or apply the given workaround type.
*/
-static void wa_add(struct drm_i915_private *i915,
- i915_reg_t reg, const u32 mask, const u32 val)
+static void wa_init_start(struct i915_wa_list *wal, const char *name)
{
- struct i915_workarounds *wa = &i915->workarounds;
- unsigned int start = 0, end = wa->count;
- unsigned int addr = i915_mmio_reg_offset(reg);
- struct i915_wa_reg *r;
+ wal->name = name;
+}
+
+#define WA_LIST_CHUNK (1 << 4)
+
+static void wa_init_finish(struct i915_wa_list *wal)
+{
+ /* Trim unused entries. */
+ if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
+ struct i915_wa *list = kmemdup(wal->list,
+ wal->count * sizeof(*list),
+ GFP_KERNEL);
+
+ if (list) {
+ kfree(wal->list);
+ wal->list = list;
+ }
+ }
+
+ if (!wal->count)
+ return;
+
+ DRM_DEBUG_DRIVER("Initialized %u %s workarounds\n",
+ wal->wa_count, wal->name);
+}
+
+static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
+{
+ unsigned int addr = i915_mmio_reg_offset(wa->reg);
+ unsigned int start = 0, end = wal->count;
+ const unsigned int grow = WA_LIST_CHUNK;
+ struct i915_wa *wa_;
+
+ GEM_BUG_ON(!is_power_of_2(grow));
+
+ if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
+ struct i915_wa *list;
+
+ list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
+ GFP_KERNEL);
+ if (!list) {
+ DRM_ERROR("No space for workaround init!\n");
+ return;
+ }
+
+ if (wal->list)
+ memcpy(list, wal->list, sizeof(*wa) * wal->count);
+
+ wal->list = list;
+ }
while (start < end) {
unsigned int mid = start + (end - start) / 2;
- if (wa->reg[mid].addr < addr) {
+ if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
start = mid + 1;
- } else if (wa->reg[mid].addr > addr) {
+ } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
end = mid;
} else {
- r = &wa->reg[mid];
+ wa_ = &wal->list[mid];
- if ((mask & ~r->mask) == 0) {
+ if ((wa->mask & ~wa_->mask) == 0) {
DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, r->mask, r->value);
+ i915_mmio_reg_offset(wa_->reg),
+ wa_->mask, wa_->val);
- r->value &= ~mask;
+ wa_->val &= ~wa->mask;
}
- r->value |= val;
- r->mask |= mask;
+ wal->wa_count++;
+ wa_->val |= wa->val;
+ wa_->mask |= wa->mask;
return;
}
}
- if (WARN_ON_ONCE(wa->count >= I915_MAX_WA_REGS)) {
- DRM_ERROR("Dropping w/a for reg %04x (mask: %08x, value: %08x)\n",
- addr, mask, val);
- return;
- }
-
- r = &wa->reg[wa->count++];
- r->addr = addr;
- r->value = val;
- r->mask = mask;
+ wal->wa_count++;
+ wa_ = &wal->list[wal->count++];
+ *wa_ = *wa;
- while (r-- > wa->reg) {
- GEM_BUG_ON(r[0].addr == r[1].addr);
- if (r[1].addr > r[0].addr)
+ while (wa_-- > wal->list) {
+ GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
+ i915_mmio_reg_offset(wa_[1].reg));
+ if (i915_mmio_reg_offset(wa_[1].reg) >
+ i915_mmio_reg_offset(wa_[0].reg))
break;
- swap(r[1], r[0]);
+ swap(wa_[1], wa_[0]);
}
}
-#define WA_REG(addr, mask, val) wa_add(dev_priv, (addr), (mask), (val))
+static void
+__wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask, u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
+}
+
+#define WA_REG(addr, mask, val) __wa_add(wal, (addr), (mask), (val))
#define WA_SET_BIT_MASKED(addr, mask) \
WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
@@ -110,8 +164,10 @@ static void wa_add(struct drm_i915_private *i915,
#define WA_SET_FIELD_MASKED(addr, mask, value) \
WA_REG(addr, (mask), _MASKED_FIELD(mask, value))
-static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
/* WaDisableAsyncFlipPerfMode:bdw,chv */
@@ -155,17 +211,14 @@ static int gen8_ctx_workarounds_init(struct drm_i915_private *dev_priv)
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
-
- return 0;
}
-static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
@@ -185,31 +238,28 @@ static int bdw_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaForceContextSaveRestoreNonCoherent:bdw */
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
+ (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
}
-static int chv_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void chv_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen8_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen8_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:chv */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
/* Improve HiZ throughput on CHV. */
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
}
-static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- if (HAS_LLC(dev_priv)) {
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
@@ -228,7 +278,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- if (!IS_COFFEELAKE(dev_priv))
+ if (!IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
@@ -271,9 +321,7 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
HDC_FORCE_NON_COHERENT);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_COFFEELAKE(dev_priv))
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
@@ -300,14 +348,14 @@ static int gen9_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
- if (IS_GEN9_LP(dev_priv))
+ if (IS_GEN9_LP(i915))
WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
-
- return 0;
}
-static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
+static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
u8 vals[3] = { 0, 0, 0 };
unsigned int i;
@@ -318,7 +366,7 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -327,12 +375,12 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
+ return;
/* Tune IZ hashing. See intel_device_info_runtime_init() */
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
@@ -342,28 +390,19 @@ static int skl_tune_iz_hashing(struct drm_i915_private *dev_priv)
GEN9_IZ_HASHING(2, vals[2]) |
GEN9_IZ_HASHING(1, vals[1]) |
GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
}
-static int skl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void skl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(dev_priv);
+ gen9_ctx_workarounds_init(engine);
+ skl_tune_iz_hashing(engine);
}
-static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaDisableThreadStallDopClockGating:bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
@@ -372,57 +411,41 @@ static int bxt_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaToEnableHwFixForPushConstHWBug:bxt */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int kbl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
-
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int glk_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void glk_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:glk */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- return 0;
}
-static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
- int ret;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
- ret = gen9_ctx_workarounds_init(dev_priv);
- if (ret)
- return ret;
+ gen9_ctx_workarounds_init(engine);
/* WaToEnableHwFixForPushConstHWBug:cfl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -431,18 +454,19 @@ static int cfl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableSbeCacheDispatchPortSharing:cfl */
WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- return 0;
}
-static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */
@@ -450,7 +474,7 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, 0, CNL_REVID_B0))
+ if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
@@ -470,16 +494,17 @@ static int cnl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* WaDisableEarlyEOT:cnl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-
- return 0;
}
-static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
PUSH_CONSTANT_DEREF_DISABLE);
@@ -495,7 +520,7 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
/* Wa_2006611047:icl (pre-prod)
* Formerly known as WaDisableImprovedTdlClkGating
*/
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
@@ -504,70 +529,67 @@ static int icl_ctx_workarounds_init(struct drm_i915_private *dev_priv)
GEN11_STATE_CACHE_REDIRECT_TO_CS);
/* Wa_2006665173:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
- return 0;
}
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv)
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
{
- int err = 0;
-
- dev_priv->workarounds.count = 0;
-
- if (INTEL_GEN(dev_priv) < 8)
- err = 0;
- else if (IS_BROADWELL(dev_priv))
- err = bdw_ctx_workarounds_init(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- err = chv_ctx_workarounds_init(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- err = skl_ctx_workarounds_init(dev_priv);
- else if (IS_BROXTON(dev_priv))
- err = bxt_ctx_workarounds_init(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- err = kbl_ctx_workarounds_init(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- err = glk_ctx_workarounds_init(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- err = cfl_ctx_workarounds_init(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- err = cnl_ctx_workarounds_init(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- err = icl_ctx_workarounds_init(dev_priv);
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->ctx_wa_list;
+
+ wa_init_start(wal, "context");
+
+ if (INTEL_GEN(i915) < 8)
+ return;
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_CANNONLAKE(i915))
+ cnl_ctx_workarounds_init(engine);
+ else if (IS_ICELAKE(i915))
+ icl_ctx_workarounds_init(engine);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
- if (err)
- return err;
+ MISSING_CASE(INTEL_GEN(i915));
- DRM_DEBUG_DRIVER("Number of context specific w/a: %d\n",
- dev_priv->workarounds.count);
- return 0;
+ wa_init_finish(wal);
}
-int intel_ctx_workarounds_emit(struct i915_request *rq)
+int intel_engine_emit_ctx_wa(struct i915_request *rq)
{
- struct i915_workarounds *w = &rq->i915->workarounds;
+ struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
+ struct i915_wa *wa;
+ unsigned int i;
u32 *cs;
- int ret, i;
+ int ret;
- if (w->count == 0)
+ if (wal->count == 0)
return 0;
ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
if (ret)
return ret;
- cs = intel_ring_begin(rq, (w->count * 2 + 2));
+ cs = intel_ring_begin(rq, (wal->count * 2 + 2));
if (IS_ERR(cs))
return PTR_ERR(cs);
- *cs++ = MI_LOAD_REGISTER_IMM(w->count);
- for (i = 0; i < w->count; i++) {
- *cs++ = w->reg[i].addr;
- *cs++ = w->reg[i].value;
+ *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ *cs++ = i915_mmio_reg_offset(wa->reg);
+ *cs++ = wa->val;
}
*cs++ = MI_NOOP;
@@ -580,160 +602,149 @@ int intel_ctx_workarounds_emit(struct i915_request *rq)
return 0;
}
-static void bdw_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = val,
+ .val = _MASKED_BIT_ENABLE(val)
+ };
+
+ _wa_add(wal, &wa);
+}
+
+static void
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
+ u32 val)
+{
+ struct i915_wa wa = {
+ .reg = reg,
+ .mask = mask,
+ .val = val
+ };
+
+ _wa_add(wal, &wa);
}
-static void chv_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
+ wa_write_masked_or(wal, reg, ~0, val);
}
-static void gen9_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+ wa_write_masked_or(wal, reg, val, val);
+}
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
/* WaDisableKillLogic:bxt,skl,kbl */
- if (!IS_COFFEELAKE(dev_priv))
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
+ if (!IS_COFFEELAKE(i915))
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ ECOCHK_DIS_TLB);
- if (HAS_LLC(dev_priv)) {
+ if (HAS_LLC(i915)) {
/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
*
* Must match Display Engine. See
* WaCompressedResourceDisplayNewHashMode.
*/
- I915_WRITE(MMCD_MISC_CTRL,
- I915_READ(MMCD_MISC_CTRL) |
- MMCD_PCLA |
- MMCD_HOTSPOT_EN);
+ wa_write_or(wal,
+ MMCD_MISC_CTRL,
+ MMCD_PCLA | MMCD_HOTSPOT_EN);
}
/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
- if (IS_GEN9_LP(dev_priv)) {
- u32 val = I915_READ(GEN8_L3SQCREG1);
-
- val &= ~L3_PRIO_CREDITS_MASK;
- val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
- I915_WRITE(GEN8_L3SQCREG1, val);
- }
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
- I915_WRITE(GEN8_L3SQCREG4,
- I915_READ(GEN8_L3SQCREG4) | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GAM_ECOCHK,
+ BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void skl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:skl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- I915_WRITE(FF_SLICE_CS_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE));
+ gen9_gt_workarounds_init(i915);
/* WaInPlaceDecompressionHang:bxt */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+ if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableGafsUnitClkGating:kbl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:kbl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaKBLVECSSemaphoreWaitPoll:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_E0)) {
- struct intel_engine_cs *engine;
- unsigned int tmp;
-
- for_each_engine(engine, dev_priv, tmp) {
- if (engine->id == RCS)
- continue;
-
- I915_WRITE(RING_SEMA_WAIT_POLL(engine->mmio_base), 1);
- }
- }
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void glk_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ gen9_gt_workarounds_init(i915);
}
-static void cfl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
{
- gen9_gt_workarounds_apply(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* WaEnableGapsTsvCreditFix:cfl */
- I915_WRITE(GEN8_GARBCNTL,
- I915_READ(GEN8_GARBCNTL) | GEN9_GAPS_TSV_CREDIT_DISABLE);
+ gen9_gt_workarounds_init(i915);
/* WaDisableGafsUnitClkGating:cfl */
- I915_WRITE(GEN7_UCGCTL4,
- I915_READ(GEN7_UCGCTL4) | GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+ wa_write_or(wal,
+ GEN7_UCGCTL4,
+ GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:cfl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void wa_init_mcr(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- u32 mcr;
+ struct i915_wa_list *wal = &dev_priv->gt_wa_list;
u32 mcr_slice_subslice_mask;
/*
@@ -770,8 +781,6 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- mcr = I915_READ(GEN8_MCR_SELECTOR);
-
if (INTEL_GEN(dev_priv) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
@@ -789,173 +798,220 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
* occasions, such as INSTDONE, where this value is dependent
* on s/ss combo, the read should be done with read_subslice_reg.
*/
- mcr &= ~mcr_slice_subslice_mask;
- mcr |= intel_calculate_mcr_s_ss_select(dev_priv);
- I915_WRITE(GEN8_MCR_SELECTOR, mcr);
+ wa_write_masked_or(wal,
+ GEN8_MCR_SELECTOR,
+ mcr_slice_subslice_mask,
+ intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_mcr(i915);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
- if (IS_CNL_REVID(dev_priv, CNL_REVID_B0, CNL_REVID_B0))
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
+ if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA,
- I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaEnablePreemptionGranularityControlByUMD:cnl */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+static void icl_gt_workarounds_init(struct drm_i915_private *i915)
{
- wa_init_mcr(dev_priv);
+ struct i915_wa_list *wal = &i915->gt_wa_list;
- /* This is not an Wa. Enable for better image quality */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
+ wa_init_mcr(i915);
/* WaInPlaceDecompressionHang:icl */
- I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, I915_READ(GEN9_GAMT_ECO_REG_RW_IA) |
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaPipelineFlushCoherentLines:icl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- /* Wa_1405543622:icl
- * Formerly known as WaGAPZPriorityScheme
- */
- I915_WRITE(GEN8_GARBCNTL, I915_READ(GEN8_GARBCNTL) |
- GEN11_ARBITRATION_PRIO_ORDER_MASK);
-
- /* Wa_1604223664:icl
- * Formerly known as WaL3BankAddressHashing
- */
- I915_WRITE(GEN8_GARBCNTL,
- (I915_READ(GEN8_GARBCNTL) & ~GEN11_HASH_CTRL_EXCL_MASK) |
- GEN11_HASH_CTRL_EXCL_BIT0);
- I915_WRITE(GEN11_GLBLINVL,
- (I915_READ(GEN11_GLBLINVL) & ~GEN11_BANK_HASH_ADDR_EXCL_MASK) |
- GEN11_BANK_HASH_ADDR_EXCL_BIT0);
+ wa_write_or(wal,
+ GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
/* WaModifyGamTlbPartitioning:icl */
- I915_WRITE(GEN11_GACB_PERF_CTRL,
- (I915_READ(GEN11_GACB_PERF_CTRL) & ~GEN11_HASH_CTRL_MASK) |
- GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
-
- /* Wa_1405733216:icl
- * Formerly known as WaDisableCleanEvicts
- */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN11_LQSC_CLEAN_EVICT_DISABLE);
+ wa_write_masked_or(wal,
+ GEN11_GACB_PERF_CTRL,
+ GEN11_HASH_CTRL_MASK,
+ GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
/* Wa_1405766107:icl
* Formerly known as WaCL2SFHalfMaxAlloc
*/
- I915_WRITE(GEN11_LSN_UNSLCVC, I915_READ(GEN11_LSN_UNSLCVC) |
- GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
- GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
+ wa_write_or(wal,
+ GEN11_LSN_UNSLCVC,
+ GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
+ GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
/* Wa_220166154:icl
* Formerly known as WaDisCtxReload
*/
- I915_WRITE(GAMW_ECO_DEV_RW_IA_REG, I915_READ(GAMW_ECO_DEV_RW_IA_REG) |
- GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
+ wa_write_or(wal,
+ GEN8_GAMW_ECO_DEV_RW_IA,
+ GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
/* Wa_1405779004:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_A0))
- I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SLICE_UNIT_LEVEL_CLKGATE) |
- MSCUNIT_CLKGATE_DIS);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
+ wa_write_or(wal,
+ SLICE_UNIT_LEVEL_CLKGATE,
+ MSCUNIT_CLKGATE_DIS);
/* Wa_1406680159:icl */
- I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE,
- I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE) |
- GWUNIT_CLKGATE_DIS);
-
- /* Wa_1604302699:icl */
- I915_WRITE(GEN10_L3_CHICKEN_MODE_REGISTER,
- I915_READ(GEN10_L3_CHICKEN_MODE_REGISTER) |
- GEN11_I2M_WRITE_DISABLE);
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
/* Wa_1406838659:icl (pre-prod) */
- if (IS_ICL_REVID(dev_priv, ICL_REVID_A0, ICL_REVID_B0))
- I915_WRITE(INF_UNIT_LEVEL_CLKGATE,
- I915_READ(INF_UNIT_LEVEL_CLKGATE) |
- CGPSF_CLKGATE_DIS);
-
- /* WaForwardProgressSoftReset:icl */
- I915_WRITE(GEN10_SCRATCH_LNCF2,
- I915_READ(GEN10_SCRATCH_LNCF2) |
- PMFLUSHDONE_LNICRSDROP |
- PMFLUSH_GAPL3UNBLOCK |
- PMFLUSHDONE_LNEBLK);
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ INF_UNIT_LEVEL_CLKGATE,
+ CGPSF_CLKGATE_DIS);
/* Wa_1406463099:icl
* Formerly known as WaGamTlbPendError
*/
- I915_WRITE(GAMT_CHKN_BIT_REG,
- I915_READ(GAMT_CHKN_BIT_REG) |
- GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ wa_write_or(wal,
+ GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv)
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
{
- if (INTEL_GEN(dev_priv) < 8)
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+
+ if (INTEL_GEN(i915) < 8)
return;
- else if (IS_BROADWELL(dev_priv))
- bdw_gt_workarounds_apply(dev_priv);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_gt_workarounds_apply(dev_priv);
- else if (IS_SKYLAKE(dev_priv))
- skl_gt_workarounds_apply(dev_priv);
- else if (IS_BROXTON(dev_priv))
- bxt_gt_workarounds_apply(dev_priv);
- else if (IS_KABYLAKE(dev_priv))
- kbl_gt_workarounds_apply(dev_priv);
- else if (IS_GEMINILAKE(dev_priv))
- glk_gt_workarounds_apply(dev_priv);
- else if (IS_COFFEELAKE(dev_priv))
- cfl_gt_workarounds_apply(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_gt_workarounds_apply(dev_priv);
- else if (IS_ICELAKE(dev_priv))
- icl_gt_workarounds_apply(dev_priv);
+ else if (IS_BROADWELL(i915))
+ return;
+ else if (IS_CHERRYVIEW(i915))
+ return;
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_gt_workarounds_init(i915);
+ else if (IS_ICELAKE(i915))
+ icl_gt_workarounds_init(i915);
else
- MISSING_CASE(INTEL_GEN(dev_priv));
+ MISSING_CASE(INTEL_GEN(i915));
+
+ wa_init_finish(wal);
}
-struct whitelist {
- i915_reg_t reg[RING_MAX_NONPRIV_SLOTS];
- unsigned int count;
- u32 nopid;
-};
+static enum forcewake_domains
+wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal)
+{
+ enum forcewake_domains fw = 0;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ wa->reg,
+ FW_REG_READ |
+ FW_REG_WRITE);
+
+ return fw;
+}
-static void whitelist_reg(struct whitelist *w, i915_reg_t reg)
+static void
+wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
{
- if (GEM_WARN_ON(w->count >= RING_MAX_NONPRIV_SLOTS))
+ enum forcewake_domains fw;
+ unsigned long flags;
+ struct i915_wa *wa;
+ unsigned int i;
+
+ if (!wal->count)
return;
- w->reg[w->count++] = reg;
+ fw = wal_get_fw_for_rmw(dev_priv, wal);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
+ u32 val = I915_READ_FW(wa->reg);
+
+ val &= ~wa->mask;
+ val |= wa->val;
+
+ I915_WRITE_FW(wa->reg, val);
+ }
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
+}
+
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+{
+ wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+}
+
+static bool
+wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
+{
+ if ((cur ^ wa->val) & wa->mask) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ name, from, i915_mmio_reg_offset(wa->reg), cur,
+ cur & wa->mask, wa->val, wa->mask);
+
+ return false;
+ }
+
+ return true;
}
-static void bdw_whitelist_build(struct whitelist *w)
+static bool wa_list_verify(struct drm_i915_private *dev_priv,
+ const struct i915_wa_list *wal,
+ const char *from)
{
+ struct i915_wa *wa;
+ unsigned int i;
+ bool ok = true;
+
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+
+ return ok;
}
-static void chv_whitelist_build(struct whitelist *w)
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from)
{
+ return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
}
-static void gen9_whitelist_build(struct whitelist *w)
+static void
+whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
+{
+ struct i915_wa wa = {
+ .reg = reg
+ };
+
+ if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
+ return;
+
+ _wa_add(wal, &wa);
+}
+
+static void gen9_whitelist_build(struct i915_wa_list *w)
{
/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
@@ -967,7 +1023,7 @@ static void gen9_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_HDC_CHICKEN1);
}
-static void skl_whitelist_build(struct whitelist *w)
+static void skl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -975,12 +1031,12 @@ static void skl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void bxt_whitelist_build(struct whitelist *w)
+static void bxt_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void kbl_whitelist_build(struct whitelist *w)
+static void kbl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -988,7 +1044,7 @@ static void kbl_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN8_L3SQCREG4);
}
-static void glk_whitelist_build(struct whitelist *w)
+static void glk_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
@@ -996,37 +1052,41 @@ static void glk_whitelist_build(struct whitelist *w)
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
-static void cfl_whitelist_build(struct whitelist *w)
+static void cfl_whitelist_build(struct i915_wa_list *w)
{
gen9_whitelist_build(w);
}
-static void cnl_whitelist_build(struct whitelist *w)
+static void cnl_whitelist_build(struct i915_wa_list *w)
{
/* WaEnablePreemptionGranularityControlByUMD:cnl */
whitelist_reg(w, GEN8_CS_CHICKEN1);
}
-static void icl_whitelist_build(struct whitelist *w)
+static void icl_whitelist_build(struct i915_wa_list *w)
{
+ /* WaAllowUMDToModifyHalfSliceChicken7:icl */
+ whitelist_reg(w, GEN9_HALF_SLICE_CHICKEN7);
+
+ /* WaAllowUMDToModifySamplerMode:icl */
+ whitelist_reg(w, GEN10_SAMPLER_MODE);
}
-static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
- struct whitelist *w)
+void intel_engine_init_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *w = &engine->whitelist;
GEM_BUG_ON(engine->id != RCS);
- w->count = 0;
- w->nopid = i915_mmio_reg_offset(RING_NOPID(engine->mmio_base));
+ wa_init_start(w, "whitelist");
if (INTEL_GEN(i915) < 8)
- return NULL;
+ return;
else if (IS_BROADWELL(i915))
- bdw_whitelist_build(w);
+ return;
else if (IS_CHERRYVIEW(i915))
- chv_whitelist_build(w);
+ return;
else if (IS_SKYLAKE(i915))
skl_whitelist_build(w);
else if (IS_BROXTON(i915))
@@ -1044,39 +1104,180 @@ static struct whitelist *whitelist_build(struct intel_engine_cs *engine,
else
MISSING_CASE(INTEL_GEN(i915));
- return w;
+ wa_init_finish(w);
}
-static void whitelist_apply(struct intel_engine_cs *engine,
- const struct whitelist *w)
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ const struct i915_wa_list *wal = &engine->whitelist;
const u32 base = engine->mmio_base;
+ struct i915_wa *wa;
unsigned int i;
- if (!w)
+ if (!wal->count)
return;
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- for (i = 0; i < w->count; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(w->reg[i]));
+ for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE_FW(RING_FORCE_TO_NONPRIV(base, i), w->nopid);
+ I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine)
+static void rcs_engine_wa_init(struct intel_engine_cs *engine)
{
- struct whitelist w;
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (IS_ICELAKE(i915)) {
+ /* This is not an Wa. Enable for better image quality */
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
+
+ /* WaPipelineFlushCoherentLines:icl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+
+ /*
+ * Wa_1405543622:icl
+ * Formerly known as WaGAPZPriorityScheme
+ */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_ARBITRATION_PRIO_ORDER_MASK);
+
+ /*
+ * Wa_1604223664:icl
+ * Formerly known as WaL3BankAddressHashing
+ */
+ wa_write_masked_or(wal,
+ GEN8_GARBCNTL,
+ GEN11_HASH_CTRL_EXCL_MASK,
+ GEN11_HASH_CTRL_EXCL_BIT0);
+ wa_write_masked_or(wal,
+ GEN11_GLBLINVL,
+ GEN11_BANK_HASH_ADDR_EXCL_MASK,
+ GEN11_BANK_HASH_ADDR_EXCL_BIT0);
- whitelist_apply(engine, whitelist_build(engine, &w));
+ /*
+ * Wa_1405733216:icl
+ * Formerly known as WaDisableCleanEvicts
+ */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN11_LQSC_CLEAN_EVICT_DISABLE);
+
+ /* WaForwardProgressSoftReset:icl */
+ wa_write_or(wal,
+ GEN10_SCRATCH_LNCF2,
+ PMFLUSHDONE_LNICRSDROP |
+ PMFLUSH_GAPL3UNBLOCK |
+ PMFLUSHDONE_LNEBLK);
+
+ /* Wa_1406609255:icl (pre-prod) */
+ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_DEMAND_PREFETCH |
+ GEN7_DISABLE_SAMPLER_PREFETCH);
+ }
+
+ if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ wa_masked_en(wal,
+ GEN7_FF_SLICE_CS_CHICKEN1,
+ GEN9_FFSC_PERCTX_PREEMPT_CTRL);
+ }
+
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915)) {
+ /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_GARBCNTL,
+ GEN9_GAPS_TSV_CREDIT_DISABLE);
+ }
+
+ if (IS_BROXTON(i915)) {
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ wa_masked_en(wal,
+ FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ if (IS_GEN9(i915)) {
+ /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
+ wa_masked_en(wal,
+ GEN9_CSFE_CHICKEN1_RCS,
+ GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
+ wa_write_or(wal,
+ BDW_SCRATCH1,
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
+ if (IS_GEN9_LP(i915))
+ wa_write_masked_or(wal,
+ GEN8_L3SQCREG1,
+ L3_PRIO_CREDITS_MASK,
+ L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
+ wa_write_or(wal,
+ GEN8_L3SQCREG4,
+ GEN8_LQSC_FLUSH_COHERENT_LINES);
+ }
+}
+
+static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ /* WaKBLVECSSemaphoreWaitPoll:kbl */
+ if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
+ wa_write(wal,
+ RING_SEMA_WAIT_POLL(engine->mmio_base),
+ 1);
+ }
+}
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct i915_wa_list *wal = &engine->wa_list;
+
+ if (GEM_WARN_ON(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ wa_init_start(wal, engine->name);
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine);
+ else
+ xcs_engine_wa_init(engine);
+
+ wa_init_finish(wal);
+}
+
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
+{
+ wa_list_apply(engine->i915, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
+ const char *from)
+{
+ return wa_list_verify(engine->i915, &engine->wa_list, from);
+}
+
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index b11d0623e626..7c734714b05e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -7,11 +7,39 @@
#ifndef _I915_WORKAROUNDS_H_
#define _I915_WORKAROUNDS_H_
-int intel_ctx_workarounds_init(struct drm_i915_private *dev_priv);
-int intel_ctx_workarounds_emit(struct i915_request *rq);
+#include <linux/slab.h>
-void intel_gt_workarounds_apply(struct drm_i915_private *dev_priv);
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
-void intel_whitelist_workarounds_apply(struct intel_engine_cs *engine);
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+static inline void intel_wa_list_free(struct i915_wa_list *wal)
+{
+ kfree(wal->list);
+ memset(wal, 0, sizeof(*wal));
+}
+
+void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
+int intel_engine_emit_ctx_wa(struct i915_request *rq);
+
+void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
+void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
+bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+ const char *from);
+
+void intel_engine_init_whitelist(struct intel_engine_cs *engine);
+void intel_engine_apply_whitelist(struct intel_engine_cs *engine);
+
+void intel_engine_init_workarounds(struct intel_engine_cs *engine);
+void intel_engine_apply_workarounds(struct intel_engine_cs *engine);
#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 5c22f2c8d4cf..26c065c8d2c0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1135,7 +1135,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
n = 0;
for_each_engine(engine, i915, id) {
if (!intel_engine_can_store_dword(engine)) {
- pr_info("store-dword-imm not supported on engine=%u\n", id);
+ pr_info("store-dword-imm not supported on engine=%u\n",
+ id);
continue;
}
engines[n++] = engine;
@@ -1167,17 +1168,30 @@ static int igt_write_huge(struct i915_gem_context *ctx,
engine = engines[order[i] % n];
i = (i + 1) % (n * I915_NUM_ENGINES);
- err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+ /*
+ * In order to utilize 64K pages we need to both pad the vma
+ * size and ensure the vma offset is at the start of the pt
+ * boundary, however to improve coverage we opt for testing both
+ * aligned and unaligned offsets.
+ */
+ if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ offset_low = round_down(offset_low,
+ I915_GTT_PAGE_SIZE_2M);
+
+ err = __igt_write_huge(ctx, engine, obj, size, offset_low,
+ dword, num + 1);
if (err)
break;
- err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+ err = __igt_write_huge(ctx, engine, obj, size, offset_high,
+ dword, num + 1);
if (err)
break;
if (igt_timeout(end_time,
"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
- __func__, engine->id, offset_low, offset_high, max_page_size))
+ __func__, engine->id, offset_low, offset_high,
+ max_page_size))
break;
}
@@ -1436,7 +1450,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!USES_FULL_48BIT_PPGTT(dev_priv)) {
+ if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1687,10 +1701,9 @@ int i915_gem_huge_page_mock_selftests(void)
SUBTEST(igt_mock_ppgtt_huge_fill),
SUBTEST(igt_mock_ppgtt_64K),
};
- int saved_ppgtt = i915_modparams.enable_ppgtt;
struct drm_i915_private *dev_priv;
- struct pci_dev *pdev;
struct i915_hw_ppgtt *ppgtt;
+ struct pci_dev *pdev;
int err;
dev_priv = mock_gem_device();
@@ -1698,7 +1711,7 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- i915_modparams.enable_ppgtt = 3;
+ mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
pdev = dev_priv->drm.pdev;
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
@@ -1731,9 +1744,6 @@ out_close:
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
-
- i915_modparams.enable_ppgtt = saved_ppgtt;
-
drm_dev_put(&dev_priv->drm);
return err;
@@ -1753,7 +1763,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
struct i915_gem_context *ctx;
int err;
- if (!USES_PPGTT(dev_priv)) {
+ if (!HAS_PPGTT(dev_priv)) {
pr_info("PPGTT not supported, skipping live-selftests\n");
return 0;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 76df25aa90c9..7d82043aff10 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -39,7 +39,8 @@ struct live_test {
const char *func;
const char *name;
- unsigned int reset_count;
+ unsigned int reset_global;
+ unsigned int reset_engine[I915_NUM_ENGINES];
};
static int begin_live_test(struct live_test *t,
@@ -47,6 +48,8 @@ static int begin_live_test(struct live_test *t,
const char *func,
const char *name)
{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err;
t->i915 = i915;
@@ -63,7 +66,11 @@ static int begin_live_test(struct live_test *t,
}
i915->gpu_error.missed_irq_rings = 0;
- t->reset_count = i915_reset_count(&i915->gpu_error);
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+
+ for_each_engine(engine, i915, id)
+ t->reset_engine[id] =
+ i915_reset_engine_count(&i915->gpu_error, engine);
return 0;
}
@@ -71,14 +78,28 @@ static int begin_live_test(struct live_test *t,
static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (igt_flush_test(i915, I915_WAIT_LOCKED))
return -EIO;
- if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+ if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
pr_err("%s(%s): GPU was reset %d times!\n",
t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_count);
+ i915_reset_count(&i915->gpu_error) - t->reset_global);
+ return -EIO;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ pr_err("%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
return -EIO;
}
@@ -531,11 +552,11 @@ static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- unsigned long ncontexts, ndwords, dw;
- bool first_shared_gtt = true;
+ struct live_test t;
int err = -ENODEV;
/*
@@ -553,6 +574,10 @@ static int igt_ctx_exec(void *arg)
mutex_lock(&i915->drm.struct_mutex);
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
ncontexts = 0;
ndwords = 0;
dw = 0;
@@ -561,12 +586,7 @@ static int igt_ctx_exec(void *arg)
struct i915_gem_context *ctx;
unsigned int id;
- if (first_shared_gtt) {
- ctx = __create_hw_context(i915, file->driver_priv);
- first_shared_gtt = false;
- } else {
- ctx = i915_gem_create_context(i915, file->driver_priv);
- }
+ ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -622,7 +642,7 @@ static int igt_ctx_exec(void *arg)
}
out_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -634,13 +654,14 @@ static int igt_ctx_readonly(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ unsigned long ndwords, dw;
struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct i915_gem_context *ctx;
- struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ struct live_test t;
int err = -ENODEV;
/*
@@ -655,6 +676,10 @@ static int igt_ctx_readonly(void *arg)
mutex_lock(&i915->drm.struct_mutex);
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
ctx = i915_gem_create_context(i915, file->driver_priv);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -727,7 +752,324 @@ static int igt_ctx_readonly(void *arg)
}
out_unlock:
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ if (end_live_test(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int check_scratch(struct i915_gem_context *ctx, u64 offset)
+{
+ struct drm_mm_node *node =
+ __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
+ offset, offset + sizeof(u32) - 1);
+ if (!node || node->start > offset)
+ return 0;
+
+ GEM_BUG_ON(offset >= node->start + node->size);
+
+ pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
+ upper_32_bits(offset), lower_32_bits(offset));
+ return -EINVAL;
+}
+
+static int write_to_scratch(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset, u32 value)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ } else {
+ *cmd++ = 0;
+ *cmd++ = offset;
+ }
+ *cmd++ = value;
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto err;
+
+ err = check_scratch(ctx, offset);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto skip_request;
+
+ i915_gem_object_set_active_reference(obj);
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_request_add(rq);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int read_from_scratch(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u64 offset, u32 *value)
+{
+ struct drm_i915_private *i915 = ctx->i915;
+ struct drm_i915_gem_object *obj;
+ const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
+ const u32 result = 0x100;
+ struct i915_request *rq;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
+ if (INTEL_GEN(i915) >= 8) {
+ *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = result;
+ *cmd++ = 0;
+ } else {
+ *cmd++ = MI_LOAD_REGISTER_MEM;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = offset;
+ *cmd++ = MI_STORE_REGISTER_MEM;
+ *cmd++ = RCS_GPR0;
+ *cmd++ = result;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto err;
+
+ err = check_scratch(ctx, offset);
+ if (err)
+ goto err_unpin;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ if (err)
+ goto err_request;
+
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ if (err)
+ goto skip_request;
+
+ i915_vma_unpin(vma);
+ i915_vma_close(vma);
+
+ i915_request_add(rq);
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ *value = cmd[result / sizeof(*cmd)];
+ i915_gem_object_unpin_map(obj);
+ i915_gem_object_put(obj);
+
+ return 0;
+
+skip_request:
+ i915_request_skip(rq, err);
+err_request:
+ i915_request_add(rq);
+err_unpin:
+ i915_vma_unpin(vma);
+err:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_vm_isolation(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_a, *ctx_b;
+ struct intel_engine_cs *engine;
+ struct drm_file *file;
+ I915_RND_STATE(prng);
+ unsigned long count;
+ struct live_test t;
+ unsigned int id;
+ u64 vm_total;
+ int err;
+
+ if (INTEL_GEN(i915) < 7)
+ return 0;
+
+ /*
+ * The simple goal here is that a write into one context is not
+ * observed in a second (separate page tables and scratch).
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx_a)) {
+ err = PTR_ERR(ctx_a);
+ goto out_unlock;
+ }
+
+ ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ if (IS_ERR(ctx_b)) {
+ err = PTR_ERR(ctx_b);
+ goto out_unlock;
+ }
+
+ /* We can only test vm isolation, if the vm are distinct */
+ if (ctx_a->ppgtt == ctx_b->ppgtt)
+ goto out_unlock;
+
+ vm_total = ctx_a->ppgtt->vm.total;
+ GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
+ vm_total -= I915_GTT_PAGE_SIZE;
+
+ intel_runtime_pm_get(i915);
+
+ count = 0;
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ unsigned long this = 0;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ while (!__igt_timeout(end_time, NULL)) {
+ u32 value = 0xc5c5c5c5;
+ u64 offset;
+
+ div64_u64_rem(i915_prandom_u64_state(&prng),
+ vm_total, &offset);
+ offset &= ~sizeof(u32);
+ offset += I915_GTT_PAGE_SIZE;
+
+ err = write_to_scratch(ctx_a, engine,
+ offset, 0xdeadbeef);
+ if (err == 0)
+ err = read_from_scratch(ctx_b, engine,
+ offset, &value);
+ if (err)
+ goto out_rpm;
+
+ if (value) {
+ pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
+ engine->name, value,
+ upper_32_bits(offset),
+ lower_32_bits(offset),
+ this);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ this++;
+ }
+ count += this;
+ }
+ pr_info("Checked %lu scratch offsets across %d engines\n",
+ count, INTEL_INFO(i915)->num_rings);
+
+out_rpm:
+ intel_runtime_pm_put(i915);
+out_unlock:
+ if (end_live_test(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -865,33 +1207,6 @@ out_unlock:
return err;
}
-static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
-{
- struct drm_i915_gem_object *obj;
- int err;
-
- err = i915_gem_init_aliasing_ppgtt(i915);
- if (err)
- return err;
-
- list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
- struct i915_vma *vma;
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- continue;
-
- vma->flags &= ~I915_VMA_LOCAL_BIND;
- }
-
- return 0;
-}
-
-static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
-{
- i915_gem_fini_aliasing_ppgtt(i915);
-}
-
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -917,32 +1232,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(live_nop_switch),
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
+ SUBTEST(igt_vm_isolation),
};
- bool fake_alias = false;
- int err;
if (i915_terminally_wedged(&dev_priv->gpu_error))
return 0;
- /* Install a fake aliasing gtt for exercise */
- if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
- mutex_lock(&dev_priv->drm.struct_mutex);
- err = fake_aliasing_ppgtt_enable(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (err)
- return err;
-
- GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
- fake_alias = true;
- }
-
- err = i915_subtests(tests, dev_priv);
-
- if (fake_alias) {
- mutex_lock(&dev_priv->drm.struct_mutex);
- fake_aliasing_ppgtt_disable(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
-
- return err;
+ return i915_subtests(tests, dev_priv);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 128ad1cf0647..4365979d8222 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -351,7 +351,7 @@ static int igt_evict_contexts(void *arg)
* where the GTT space of the request is separate from the GGTT
* allocation required to build the request.
*/
- if (!USES_FULL_PPGTT(i915))
+ if (!HAS_FULL_PPGTT(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 127d81513671..69fe86b30fbb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -153,7 +153,7 @@ static int igt_ppgtt_alloc(void *arg)
/* Allocate a ppggt and try to fill the entire range */
- if (!USES_PPGTT(dev_priv))
+ if (!HAS_PPGTT(dev_priv))
return 0;
ppgtt = __hw_ppgtt_create(dev_priv);
@@ -1001,7 +1001,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
IGT_TIMEOUT(end_time);
int err;
- if (!USES_FULL_PPGTT(dev_priv))
+ if (!HAS_FULL_PPGTT(dev_priv))
return 0;
file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c
new file mode 100644
index 000000000000..208a966da8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_reset.h"
+
+#include "../i915_drv.h"
+#include "../intel_ringbuffer.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ pr_debug("%s: current gpu_error=%08lx\n",
+ __func__, i915->gpu_error.flags);
+
+ while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+
+ for_each_engine(engine, i915, id) {
+ while (test_and_set_bit(I915_RESET_ENGINE + id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + id,
+ TASK_UNINTERRUPTIBLE);
+ }
+}
+
+void igt_global_reset_unlock(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.h b/drivers/gpu/drm/i915/selftests/igt_reset.h
new file mode 100644
index 000000000000..5f0234d045d5
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_reset.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_RESET_H__
+#define __I915_SELFTESTS_IGT_RESET_H__
+
+#include "../i915_drv.h"
+
+void igt_global_reset_lock(struct drm_i915_private *i915);
+void igt_global_reset_unlock(struct drm_i915_private *i915);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
new file mode 100644
index 000000000000..8cd34f6e6859
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -0,0 +1,199 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "igt_spinner.h"
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
+{
+ unsigned int mode;
+ void *vaddr;
+ int err;
+
+ GEM_BUG_ON(INTEL_GEN(i915) < 8);
+
+ memset(spin, 0, sizeof(*spin));
+ spin->i915 = i915;
+
+ spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->hws)) {
+ err = PTR_ERR(spin->hws);
+ goto err;
+ }
+
+ spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(spin->obj)) {
+ err = PTR_ERR(spin->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ mode = i915_coherent_map_type(i915);
+ vaddr = i915_gem_object_pin_map(spin->obj, mode);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ spin->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(spin->hws);
+err_obj:
+ i915_gem_object_put(spin->obj);
+err_hws:
+ i915_gem_object_put(spin->hws);
+err:
+ return err;
+}
+
+static unsigned int seqno_offset(u64 fence)
+{
+ return offset_in_page(sizeof(u32) * fence);
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct i915_request *rq)
+{
+ return hws->node.start + seqno_offset(rq->fence.context);
+}
+
+static int emit_recurse_batch(struct igt_spinner *spin,
+ struct i915_request *rq,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ struct i915_vma *hws, *vma;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(spin->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(spin->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = i915_vma_move_to_active(vma, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ err = i915_vma_move_to_active(hws, rq, 0);
+ if (err)
+ goto unpin_hws;
+
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = spin->batch;
+
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+
+ *batch++ = arbitration_command;
+
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ i915_gem_chipset_flush(spin->i915);
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_request *rq;
+ int err;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(spin, rq, arbitration_command);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32
+hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
+{
+ u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
+
+ return READ_ONCE(*seqno);
+}
+
+void igt_spinner_end(struct igt_spinner *spin)
+{
+ *spin->batch = MI_BATCH_BUFFER_END;
+ i915_gem_chipset_flush(spin->i915);
+}
+
+void igt_spinner_fini(struct igt_spinner *spin)
+{
+ igt_spinner_end(spin);
+
+ i915_gem_object_unpin_map(spin->obj);
+ i915_gem_object_put(spin->obj);
+
+ i915_gem_object_unpin_map(spin->hws);
+ i915_gem_object_put(spin->hws);
+}
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
+{
+ if (!wait_event_timeout(rq->execute,
+ READ_ONCE(rq->global_seqno),
+ msecs_to_jiffies(10)))
+ return false;
+
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(spin, rq),
+ rq->fence.seqno),
+ 1000));
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h
new file mode 100644
index 000000000000..391777c76dc7
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h
@@ -0,0 +1,37 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef __I915_SELFTESTS_IGT_SPINNER_H__
+#define __I915_SELFTESTS_IGT_SPINNER_H__
+
+#include "../i915_selftest.h"
+
+#include "../i915_drv.h"
+#include "../i915_request.h"
+#include "../intel_ringbuffer.h"
+#include "../i915_gem_context.h"
+
+struct igt_spinner {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *batch;
+ void *seqno;
+};
+
+int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915);
+void igt_spinner_fini(struct igt_spinner *spin);
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command);
+void igt_spinner_end(struct igt_spinner *spin);
+
+bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq);
+
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 0c0ab82b6228..32cba4cae31a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -159,6 +159,7 @@ static int igt_guc_clients(void *args)
* Get rid of clients created during driver load because the test will
* recreate them.
*/
+ guc_clients_disable(guc);
guc_clients_destroy(guc);
if (guc->execbuf_client || guc->preempt_client) {
pr_err("guc_clients_destroy lied!\n");
@@ -197,8 +198,8 @@ static int igt_guc_clients(void *args)
goto out;
}
- /* Now create the doorbells */
- guc_clients_doorbell_init(guc);
+ /* Now enable the clients */
+ guc_clients_enable(guc);
/* each client should now have received a doorbell */
if (!client_doorbell_in_sync(guc->execbuf_client) ||
@@ -212,63 +213,17 @@ static int igt_guc_clients(void *args)
* Basic test - an attempt to reallocate a valid doorbell to the
* client it is currently assigned should not cause a failure.
*/
- err = guc_clients_doorbell_init(guc);
- if (err)
- goto out;
-
- /*
- * Negative test - a client with no doorbell (invalid db id).
- * After destroying the doorbell, the db id is changed to
- * GUC_DOORBELL_INVALID and the firmware will reject any attempt to
- * allocate a doorbell with an invalid id (db has to be reserved before
- * allocation).
- */
- destroy_doorbell(guc->execbuf_client);
- if (client_doorbell_in_sync(guc->execbuf_client)) {
- pr_err("destroy db did not work\n");
- err = -EINVAL;
- goto out;
- }
-
- unreserve_doorbell(guc->execbuf_client);
-
- __create_doorbell(guc->execbuf_client);
- err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
- if (err != -EIO) {
- pr_err("unexpected (err = %d)", err);
- goto out_db;
- }
-
- if (!available_dbs(guc, guc->execbuf_client->priority)) {
- pr_err("doorbell not available when it should\n");
- err = -EIO;
- goto out_db;
- }
-
-out_db:
- /* clean after test */
- __destroy_doorbell(guc->execbuf_client);
- err = reserve_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("failed to reserve back the doorbell back\n");
- }
err = create_doorbell(guc->execbuf_client);
- if (err) {
- pr_err("recreate doorbell failed\n");
- goto out;
- }
out:
/*
* Leave clean state for other test, plus the driver always destroy the
* clients during unload.
*/
- destroy_doorbell(guc->execbuf_client);
- if (guc->preempt_client)
- destroy_doorbell(guc->preempt_client);
+ guc_clients_disable(guc);
guc_clients_destroy(guc);
guc_clients_create(guc);
- guc_clients_doorbell_init(guc);
+ guc_clients_enable(guc);
unlock:
intel_runtime_pm_put(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -352,7 +307,7 @@ static int igt_guc_doorbells(void *arg)
db_id = clients[i]->doorbell_id;
- err = create_doorbell(clients[i]);
+ err = __guc_client_enable(clients[i]);
if (err) {
pr_err("[%d] Failed to create a doorbell\n", i);
goto out;
@@ -378,7 +333,7 @@ static int igt_guc_doorbells(void *arg)
out:
for (i = 0; i < ATTEMPTS; i++)
if (!IS_ERR_OR_NULL(clients[i])) {
- destroy_doorbell(clients[i]);
+ __guc_client_disable(clients[i]);
guc_client_free(clients[i]);
}
unlock:
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index db378226ac10..40efbed611de 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_reset.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -76,7 +77,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
vaddr = i915_gem_object_pin_map(h->obj,
- HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+ i915_coherent_map_type(i915));
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
goto err_unpin_hws;
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
- HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+ i915_coherent_map_type(h->i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
@@ -308,6 +309,7 @@ static int igt_hang_sanitycheck(void *arg)
goto unlock;
for_each_engine(engine, i915, id) {
+ struct igt_wedge_me w;
long timeout;
if (!intel_engine_can_store_dword(engine))
@@ -328,9 +330,14 @@ static int igt_hang_sanitycheck(void *arg)
i915_request_add(rq);
- timeout = i915_request_wait(rq,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
+ timeout = 0;
+ igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
+ timeout = i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ timeout = -EIO;
+
i915_request_put(rq);
if (timeout < 0) {
@@ -348,40 +355,6 @@ unlock:
return err;
}
-static void global_reset_lock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- pr_debug("%s: current gpu_error=%08lx\n",
- __func__, i915->gpu_error.flags);
-
- while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
-
- for_each_engine(engine, i915, id) {
- while (test_and_set_bit(I915_RESET_ENGINE + id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
- I915_RESET_ENGINE + id,
- TASK_UNINTERRUPTIBLE);
- }
-}
-
-static void global_reset_unlock(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id)
- clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
-
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
static int igt_global_reset(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -390,7 +363,7 @@ static int igt_global_reset(void *arg)
/* Check that we can issue a global GPU reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
mutex_lock(&i915->drm.struct_mutex);
@@ -405,7 +378,7 @@ static int igt_global_reset(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
err = -EIO;
@@ -936,7 +909,7 @@ static int igt_reset_wait(void *arg)
/* Check that we detect a stuck waiter and issue a reset */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -988,7 +961,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1066,7 +1039,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
/* Check that we can recover an unbind stuck on a hanging request */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1150,6 +1123,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
tsk = NULL;
goto out_reset;
}
+ get_task_struct(tsk);
wait_for_completion(&arg.completion);
@@ -1172,6 +1146,8 @@ out_reset:
/* The reset, even indirectly, should take less than 10ms. */
igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
err = kthread_stop(tsk);
+
+ put_task_struct(tsk);
}
mutex_lock(&i915->drm.struct_mutex);
@@ -1183,7 +1159,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
@@ -1263,7 +1239,7 @@ static int igt_reset_queue(void *arg)
/* Check that we replay pending requests following a hang */
- global_reset_lock(i915);
+ igt_global_reset_lock(i915);
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
@@ -1394,7 +1370,7 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- global_reset_unlock(i915);
+ igt_global_reset_unlock(i915);
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1aea7a8f2224..ca461e3a5f27 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -6,215 +6,18 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_spinner.h"
+#include "i915_random.h"
#include "mock_context.h"
-struct spinner {
- struct drm_i915_private *i915;
- struct drm_i915_gem_object *hws;
- struct drm_i915_gem_object *obj;
- u32 *batch;
- void *seqno;
-};
-
-static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
-{
- unsigned int mode;
- void *vaddr;
- int err;
-
- GEM_BUG_ON(INTEL_GEN(i915) < 8);
-
- memset(spin, 0, sizeof(*spin));
- spin->i915 = i915;
-
- spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->hws)) {
- err = PTR_ERR(spin->hws);
- goto err;
- }
-
- spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(spin->obj)) {
- err = PTR_ERR(spin->obj);
- goto err_hws;
- }
-
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
- vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_obj;
- }
- spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
-
- mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
- vaddr = i915_gem_object_pin_map(spin->obj, mode);
- if (IS_ERR(vaddr)) {
- err = PTR_ERR(vaddr);
- goto err_unpin_hws;
- }
- spin->batch = vaddr;
-
- return 0;
-
-err_unpin_hws:
- i915_gem_object_unpin_map(spin->hws);
-err_obj:
- i915_gem_object_put(spin->obj);
-err_hws:
- i915_gem_object_put(spin->hws);
-err:
- return err;
-}
-
-static unsigned int seqno_offset(u64 fence)
-{
- return offset_in_page(sizeof(u32) * fence);
-}
-
-static u64 hws_address(const struct i915_vma *hws,
- const struct i915_request *rq)
-{
- return hws->node.start + seqno_offset(rq->fence.context);
-}
-
-static int emit_recurse_batch(struct spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
-{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
- struct i915_vma *hws, *vma;
- u32 *batch;
- int err;
-
- vma = i915_vma_instance(spin->obj, vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- hws = i915_vma_instance(spin->hws, vm, NULL);
- if (IS_ERR(hws))
- return PTR_ERR(hws);
-
- err = i915_vma_pin(vma, 0, 0, PIN_USER);
- if (err)
- return err;
-
- err = i915_vma_pin(hws, 0, 0, PIN_USER);
- if (err)
- goto unpin_vma;
-
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
- }
-
- err = i915_vma_move_to_active(hws, rq, 0);
- if (err)
- goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
-
- batch = spin->batch;
-
- *batch++ = MI_STORE_DWORD_IMM_GEN4;
- *batch++ = lower_32_bits(hws_address(hws, rq));
- *batch++ = upper_32_bits(hws_address(hws, rq));
- *batch++ = rq->fence.seqno;
-
- *batch++ = arbitration_command;
-
- *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
- *batch++ = MI_BATCH_BUFFER_END; /* not reached */
-
- i915_gem_chipset_flush(spin->i915);
-
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
-
-unpin_hws:
- i915_vma_unpin(hws);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-spinner_create_request(struct spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
-{
- u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
-
- return READ_ONCE(*seqno);
-}
-
-static void spinner_end(struct spinner *spin)
-{
- *spin->batch = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(spin->i915);
-}
-
-static void spinner_fini(struct spinner *spin)
-{
- spinner_end(spin);
-
- i915_gem_object_unpin_map(spin->obj);
- i915_gem_object_put(spin->obj);
-
- i915_gem_object_unpin_map(spin->hws);
- i915_gem_object_put(spin->hws);
-}
-
-static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
-{
- if (!wait_event_timeout(rq->execute,
- READ_ONCE(rq->global_seqno),
- msecs_to_jiffies(10)))
- return false;
-
- return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 10) &&
- wait_for(i915_seqno_passed(hws_seqno(spin, rq),
- rq->fence.seqno),
- 1000));
-}
-
static int live_sanitycheck(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
- struct spinner spin;
+ struct igt_spinner spin;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
@@ -223,7 +26,7 @@ static int live_sanitycheck(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin, i915))
+ if (igt_spinner_init(&spin, i915))
goto err_unlock;
ctx = kernel_context(i915);
@@ -233,14 +36,14 @@ static int live_sanitycheck(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin, rq)) {
+ if (!igt_wait_for_spinner(&spin, rq)) {
GEM_TRACE("spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -248,7 +51,7 @@ static int live_sanitycheck(void *arg)
goto err_ctx;
}
- spinner_end(&spin);
+ igt_spinner_end(&spin);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx;
@@ -259,7 +62,7 @@ static int live_sanitycheck(void *arg)
err_ctx:
kernel_context_close(ctx);
err_spin:
- spinner_fini(&spin);
+ igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -271,7 +74,7 @@ static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -282,34 +85,36 @@ static int live_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
if (!ctx_hi)
goto err_spin_lo;
- ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+ ctx_hi->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
ctx_lo = kernel_context(i915);
if (!ctx_lo)
goto err_ctx_hi;
- ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+ ctx_lo->sched.priority =
+ I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -317,16 +122,16 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -334,8 +139,8 @@ static int live_preempt(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -348,9 +153,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -362,7 +167,7 @@ static int live_late_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
@@ -374,10 +179,10 @@ static int live_late_preempt(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -391,43 +196,44 @@ static int live_late_preempt(void *arg)
for_each_engine(engine, i915, id) {
struct i915_request *rq;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
pr_err("First context failed to start\n");
goto err_wedged;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_NOOP);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (wait_for_spinner(&spin_hi, rq)) {
+ if (igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("Second context overtook first?\n");
goto err_wedged;
}
- attr.priority = I915_PRIORITY_MAX;
+ attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
engine->schedule(rq, &attr);
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
pr_err("High priority context failed to preempt the low priority context\n");
GEM_TRACE_DUMP();
goto err_wedged;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -440,9 +246,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -450,8 +256,8 @@ err_unlock:
return err;
err_wedged:
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
i915_gem_set_wedged(i915);
err = -EIO;
goto err_ctx_lo;
@@ -461,7 +267,7 @@ static int live_preempt_hang(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_hi, *ctx_lo;
- struct spinner spin_hi, spin_lo;
+ struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int err = -ENOMEM;
@@ -475,10 +281,10 @@ static int live_preempt_hang(void *arg)
mutex_lock(&i915->drm.struct_mutex);
intel_runtime_pm_get(i915);
- if (spinner_init(&spin_hi, i915))
+ if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
- if (spinner_init(&spin_lo, i915))
+ if (igt_spinner_init(&spin_lo, i915))
goto err_spin_hi;
ctx_hi = kernel_context(i915);
@@ -497,15 +303,15 @@ static int live_preempt_hang(void *arg)
if (!intel_engine_has_preemption(engine))
continue;
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_ctx_lo;
}
i915_request_add(rq);
- if (!wait_for_spinner(&spin_lo, rq)) {
+ if (!igt_wait_for_spinner(&spin_lo, rq)) {
GEM_TRACE("lo spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -513,10 +319,10 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
+ rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
+ MI_ARB_CHECK);
if (IS_ERR(rq)) {
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_lo);
err = PTR_ERR(rq);
goto err_ctx_lo;
}
@@ -541,7 +347,7 @@ static int live_preempt_hang(void *arg)
engine->execlists.preempt_hang.inject_hang = false;
- if (!wait_for_spinner(&spin_hi, rq)) {
+ if (!igt_wait_for_spinner(&spin_hi, rq)) {
GEM_TRACE("hi spinner failed to start\n");
GEM_TRACE_DUMP();
i915_gem_set_wedged(i915);
@@ -549,8 +355,8 @@ static int live_preempt_hang(void *arg)
goto err_ctx_lo;
}
- spinner_end(&spin_hi);
- spinner_end(&spin_lo);
+ igt_spinner_end(&spin_hi);
+ igt_spinner_end(&spin_lo);
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
err = -EIO;
goto err_ctx_lo;
@@ -563,9 +369,9 @@ err_ctx_lo:
err_ctx_hi:
kernel_context_close(ctx_hi);
err_spin_lo:
- spinner_fini(&spin_lo);
+ igt_spinner_fini(&spin_lo);
err_spin_hi:
- spinner_fini(&spin_hi);
+ igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
intel_runtime_pm_put(i915);
@@ -573,6 +379,261 @@ err_unlock:
return err;
}
+static int random_range(struct rnd_state *rnd, int min, int max)
+{
+ return i915_prandom_u32_max_state(max - min, rnd) + min;
+}
+
+static int random_priority(struct rnd_state *rnd)
+{
+ return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
+}
+
+struct preempt_smoke {
+ struct drm_i915_private *i915;
+ struct i915_gem_context **contexts;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *batch;
+ unsigned int ncontext;
+ struct rnd_state prng;
+ unsigned long count;
+};
+
+static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
+{
+ return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
+ &smoke->prng)];
+}
+
+static int smoke_submit(struct preempt_smoke *smoke,
+ struct i915_gem_context *ctx, int prio,
+ struct drm_i915_gem_object *batch)
+{
+ struct i915_request *rq;
+ struct i915_vma *vma = NULL;
+ int err = 0;
+
+ if (batch) {
+ vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+ }
+
+ ctx->sched.priority = prio;
+
+ rq = i915_request_alloc(smoke->engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto unpin;
+ }
+
+ if (vma) {
+ err = rq->engine->emit_bb_start(rq,
+ vma->node.start,
+ PAGE_SIZE, 0);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, 0);
+ }
+
+ i915_request_add(rq);
+
+unpin:
+ if (vma)
+ i915_vma_unpin(vma);
+
+ return err;
+}
+
+static int smoke_crescendo_thread(void *arg)
+{
+ struct preempt_smoke *smoke = arg;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+ err = smoke_submit(smoke,
+ ctx, count % I915_PRIORITY_MAX,
+ smoke->batch);
+ mutex_unlock(&smoke->i915->drm.struct_mutex);
+ if (err)
+ return err;
+
+ count++;
+ } while (!__igt_timeout(end_time, NULL));
+
+ smoke->count = count;
+ return 0;
+}
+
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
+{
+ struct task_struct *tsk[I915_NUM_ENGINES] = {};
+ struct preempt_smoke arg[I915_NUM_ENGINES];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long count;
+ int err = 0;
+
+ mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+ for_each_engine(engine, smoke->i915, id) {
+ arg[id] = *smoke;
+ arg[id].engine = engine;
+ if (!(flags & BATCH))
+ arg[id].batch = NULL;
+ arg[id].count = 0;
+
+ tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+ "igt/smoke:%d", id);
+ if (IS_ERR(tsk[id])) {
+ err = PTR_ERR(tsk[id]);
+ break;
+ }
+ get_task_struct(tsk[id]);
+ }
+
+ count = 0;
+ for_each_engine(engine, smoke->i915, id) {
+ int status;
+
+ if (IS_ERR_OR_NULL(tsk[id]))
+ continue;
+
+ status = kthread_stop(tsk[id]);
+ if (status && !err)
+ err = status;
+
+ count += arg[id].count;
+
+ put_task_struct(tsk[id]);
+ }
+
+ mutex_lock(&smoke->i915->drm.struct_mutex);
+
+ pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ return 0;
+}
+
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
+{
+ enum intel_engine_id id;
+ IGT_TIMEOUT(end_time);
+ unsigned long count;
+
+ count = 0;
+ do {
+ for_each_engine(smoke->engine, smoke->i915, id) {
+ struct i915_gem_context *ctx = smoke_context(smoke);
+ int err;
+
+ err = smoke_submit(smoke,
+ ctx, random_priority(&smoke->prng),
+ flags & BATCH ? smoke->batch : NULL);
+ if (err)
+ return err;
+
+ count++;
+ }
+ } while (!__igt_timeout(end_time, NULL));
+
+ pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+ count, flags,
+ INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ return 0;
+}
+
+static int live_preempt_smoke(void *arg)
+{
+ struct preempt_smoke smoke = {
+ .i915 = arg,
+ .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
+ .ncontext = 1024,
+ };
+ const unsigned int phase[] = { 0, BATCH };
+ int err = -ENOMEM;
+ u32 *cs;
+ int n;
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
+ return 0;
+
+ smoke.contexts = kmalloc_array(smoke.ncontext,
+ sizeof(*smoke.contexts),
+ GFP_KERNEL);
+ if (!smoke.contexts)
+ return -ENOMEM;
+
+ mutex_lock(&smoke.i915->drm.struct_mutex);
+ intel_runtime_pm_get(smoke.i915);
+
+ smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+ if (IS_ERR(smoke.batch)) {
+ err = PTR_ERR(smoke.batch);
+ goto err_unlock;
+ }
+
+ cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_batch;
+ }
+ for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+ cs[n] = MI_ARB_CHECK;
+ cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(smoke.batch);
+
+ err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+ if (err)
+ goto err_batch;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ smoke.contexts[n] = kernel_context(smoke.i915);
+ if (!smoke.contexts[n])
+ goto err_ctx;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(phase); n++) {
+ err = smoke_crescendo(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+
+ err = smoke_random(&smoke, phase[n]);
+ if (err)
+ goto err_ctx;
+ }
+
+err_ctx:
+ if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ for (n = 0; n < smoke.ncontext; n++) {
+ if (!smoke.contexts[n])
+ break;
+ kernel_context_close(smoke.contexts[n]);
+ }
+
+err_batch:
+ i915_gem_object_put(smoke.batch);
+err_unlock:
+ intel_runtime_pm_put(smoke.i915);
+ mutex_unlock(&smoke.i915->drm.struct_mutex);
+ kfree(smoke.contexts);
+
+ return err;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
@@ -580,6 +641,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_preempt_hang),
+ SUBTEST(live_preempt_smoke),
};
if (!HAS_EXECLISTS(i915))
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index d1a0923d2f38..67017d5175b8 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -6,6 +6,9 @@
#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_reset.h"
+#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
@@ -91,17 +94,23 @@ err_obj:
return ERR_PTR(err);
}
-static u32 get_whitelist_reg(const struct whitelist *w, unsigned int i)
+static u32
+get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
{
- return i < w->count ? i915_mmio_reg_offset(w->reg[i]) : w->nopid;
+ i915_reg_t reg = i < engine->whitelist.count ?
+ engine->whitelist.list[i].reg :
+ RING_NOPID(engine->mmio_base);
+
+ return i915_mmio_reg_offset(reg);
}
-static void print_results(const struct whitelist *w, const u32 *results)
+static void
+print_results(const struct intel_engine_cs *engine, const u32 *results)
{
unsigned int i;
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = results[i];
pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
@@ -109,8 +118,7 @@ static void print_results(const struct whitelist *w, const u32 *results)
}
}
-static int check_whitelist(const struct whitelist *w,
- struct i915_gem_context *ctx,
+static int check_whitelist(struct i915_gem_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *results;
@@ -138,11 +146,11 @@ static int check_whitelist(const struct whitelist *w,
}
for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
- u32 expected = get_whitelist_reg(w, i);
+ u32 expected = get_whitelist_reg(engine, i);
u32 actual = vaddr[i];
if (expected != actual) {
- print_results(w, vaddr);
+ print_results(engine, vaddr);
pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
i, expected, actual);
@@ -159,66 +167,107 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), NULL);
+ set_bit(I915_RESET_HANDOFF, &engine->i915->gpu_error.flags);
+ i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
return 0;
}
static int do_engine_reset(struct intel_engine_cs *engine)
{
- return i915_reset_engine(engine, NULL);
+ return i915_reset_engine(engine, "live_workarounds");
}
-static int switch_to_scratch_context(struct intel_engine_cs *engine)
+static int
+switch_to_scratch_context(struct intel_engine_cs *engine,
+ struct igt_spinner *spin)
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
+
+ if (spin)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+
intel_runtime_pm_put(engine->i915);
kernel_context_close(ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+
+ if (IS_ERR(rq)) {
+ spin = NULL;
+ err = PTR_ERR(rq);
+ goto err;
+ }
i915_request_add(rq);
- return 0;
+ if (spin && !igt_wait_for_spinner(spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ err = -ETIMEDOUT;
+ }
+
+err:
+ if (err && spin)
+ igt_spinner_end(spin);
+
+ return err;
}
static int check_whitelist_across_reset(struct intel_engine_cs *engine,
int (*reset)(struct intel_engine_cs *),
- const struct whitelist *w,
const char *name)
{
+ struct drm_i915_private *i915 = engine->i915;
+ bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
+ struct igt_spinner spin;
int err;
- ctx = kernel_context(engine->i915);
+ pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
+ engine->whitelist.count, name);
+
+ if (want_spin) {
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
+ }
+
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *before* %s reset!\n", name);
goto out;
}
- err = switch_to_scratch_context(engine);
+ err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
if (err)
goto out;
+ intel_runtime_pm_get(i915);
err = reset(engine);
+ intel_runtime_pm_put(i915);
+
+ if (want_spin) {
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+ }
+
if (err) {
pr_err("%s reset failed\n", name);
goto out;
}
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Whitelist not preserved in context across %s reset!\n",
name);
@@ -227,11 +276,11 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
kernel_context_close(ctx);
- ctx = kernel_context(engine->i915);
+ ctx = kernel_context(i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- err = check_whitelist(w, ctx, engine);
+ err = check_whitelist(ctx, engine);
if (err) {
pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
name);
@@ -247,26 +296,18 @@ static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine = i915->engine[RCS];
- struct i915_gpu_error *error = &i915->gpu_error;
- struct whitelist w;
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
- if (!engine)
- return 0;
-
- if (!whitelist_build(engine, &w))
+ if (!engine || engine->whitelist.count == 0)
return 0;
- pr_info("Checking %d whitelisted registers (RING_NONPRIV)\n", w.count);
-
- set_bit(I915_RESET_BACKOFF, &error->flags);
- set_bit(I915_RESET_ENGINE + engine->id, &error->flags);
+ igt_global_reset_lock(i915);
if (intel_has_reset_engine(i915)) {
err = check_whitelist_across_reset(engine,
- do_engine_reset, &w,
+ do_engine_reset,
"engine");
if (err)
goto out;
@@ -274,22 +315,156 @@ static int live_reset_whitelist(void *arg)
if (intel_has_gpu_reset(i915)) {
err = check_whitelist_across_reset(engine,
- do_device_reset, &w,
+ do_device_reset,
"device");
if (err)
goto out;
}
out:
- clear_bit(I915_RESET_ENGINE + engine->id, &error->flags);
- clear_bit(I915_RESET_BACKOFF, &error->flags);
+ igt_global_reset_unlock(i915);
return err;
}
+static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ bool ok = true;
+
+ ok &= intel_gt_verify_workarounds(i915, str);
+
+ for_each_engine(engine, i915, id)
+ ok &= intel_engine_verify_workarounds(engine, str);
+
+ return ok;
+}
+
+static int
+live_gpu_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool ok;
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ pr_info("Verifying after GPU reset...\n");
+
+ igt_global_reset_lock(i915);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok)
+ goto out;
+
+ intel_runtime_pm_get(i915);
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ i915_reset(i915, ALL_ENGINES, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after reset");
+
+out:
+ igt_global_reset_unlock(i915);
+
+ return ok ? 0 : -ESRCH;
+}
+
+static int
+live_engine_reset_gt_engine_workarounds(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ struct igt_spinner spin;
+ enum intel_engine_id id;
+ struct i915_request *rq;
+ int ret = 0;
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ igt_global_reset_lock(i915);
+
+ for_each_engine(engine, i915, id) {
+ bool ok;
+
+ pr_info("Verifying after %s reset...\n", engine->name);
+
+ ok = verify_gt_engine_wa(i915, "before reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ intel_runtime_pm_get(i915);
+ i915_reset_engine(engine, "live_workarounds");
+ intel_runtime_pm_put(i915);
+
+ ok = verify_gt_engine_wa(i915, "after idle reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+
+ ret = igt_spinner_init(&spin, i915);
+ if (ret)
+ goto err;
+
+ intel_runtime_pm_get(i915);
+
+ rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ goto err;
+ }
+
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ pr_err("Spinner failed to start\n");
+ igt_spinner_fini(&spin);
+ intel_runtime_pm_put(i915);
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ i915_reset_engine(engine, "live_workarounds");
+
+ intel_runtime_pm_put(i915);
+
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+ ok = verify_gt_engine_wa(i915, "after busy reset");
+ if (!ok) {
+ ret = -ESRCH;
+ goto err;
+ }
+ }
+
+err:
+ igt_global_reset_unlock(i915);
+ kernel_context_close(ctx);
+
+ igt_flush_test(i915, I915_WAIT_LOCKED);
+
+ return ret;
+}
+
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_reset_whitelist),
+ SUBTEST(live_gpu_reset_gt_engine_workarounds),
+ SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 22a73da45ad5..d0c44c18db42 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -200,7 +200,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.submit_request = mock_submit_request;
i915_timeline_init(i915, &engine->base.timeline, engine->base.name);
- lockdep_set_subclass(&engine->base.timeline.lock, TIMELINE_ENGINE);
+ i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(&engine->base);
engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 435a2c35ee8c..361e962a7969 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -206,39 +206,6 @@ static const struct mipi_dsi_host_ops intel_dsi_host_ops = {
.transfer = intel_dsi_host_transfer,
};
-static struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi,
- enum port port)
-{
- struct intel_dsi_host *host;
- struct mipi_dsi_device *device;
-
- host = kzalloc(sizeof(*host), GFP_KERNEL);
- if (!host)
- return NULL;
-
- host->base.ops = &intel_dsi_host_ops;
- host->intel_dsi = intel_dsi;
- host->port = port;
-
- /*
- * We should call mipi_dsi_host_register(&host->base) here, but we don't
- * have a host->dev, and we don't have OF stuff either. So just use the
- * dsi framework as a library and hope for the best. Create the dsi
- * devices by ourselves here too. Need to be careful though, because we
- * don't initialize any of the driver model devices here.
- */
- device = kzalloc(sizeof(*device), GFP_KERNEL);
- if (!device) {
- kfree(host);
- return NULL;
- }
-
- device->host = &host->base;
- host->device = device;
-
- return host;
-}
-
/*
* send a video mode command
*
@@ -290,16 +257,6 @@ static void band_gap_reset(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->sb_lock);
}
-static inline bool is_vid_mode(struct intel_dsi *intel_dsi)
-{
- return intel_dsi->operation_mode == INTEL_DSI_VIDEO_MODE;
-}
-
-static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
-{
- return intel_dsi->operation_mode == INTEL_DSI_COMMAND_MODE;
-}
-
static bool intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -314,6 +271,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
int ret;
DRM_DEBUG_KMS("\n");
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (fixed_mode) {
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
@@ -745,17 +703,6 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
const struct intel_crtc_state *pipe_config);
static void intel_dsi_unprepare(struct intel_encoder *encoder);
-static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
-
- /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
- if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
- return;
-
- msleep(msec);
-}
-
/*
* Panel enable/disable sequences from the VBT spec.
*
@@ -793,6 +740,10 @@ static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
* - wait t4 - wait t4
*/
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook instead of the enable hook.
+ */
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
@@ -895,17 +846,6 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
}
/*
- * DSI port enable has to be done before pipe and plane enable, so we do it in
- * the pre_enable hook.
- */
-static void intel_dsi_enable_nop(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
-{
- DRM_DEBUG_KMS("\n");
-}
-
-/*
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
*/
@@ -1272,31 +1212,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
}
}
-static enum drm_mode_status
-intel_dsi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
-
- DRM_DEBUG_KMS("\n");
-
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- if (fixed_mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
- }
-
- return MODE_OK;
-}
-
/* return txclkesc cycles in terms of divider and duration in us */
static u16 txclkesc(u32 divider, unsigned int us)
{
@@ -1619,39 +1534,6 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
}
}
-static int intel_dsi_get_modes(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_display_mode *mode;
-
- DRM_DEBUG_KMS("\n");
-
- if (!intel_connector->panel.fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
- return 0;
- }
-
- mode = drm_mode_duplicate(connector->dev,
- intel_connector->panel.fixed_mode);
- if (!mode) {
- DRM_DEBUG_KMS("drm_mode_duplicate failed\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- return 1;
-}
-
-static void intel_dsi_connector_destroy(struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- DRM_DEBUG_KMS("\n");
- intel_panel_fini(&intel_connector->panel);
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
@@ -1676,7 +1558,7 @@ static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs
static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
- .destroy = intel_dsi_connector_destroy,
+ .destroy = intel_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
@@ -1684,27 +1566,57 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static int intel_dsi_get_panel_orientation(struct intel_connector *connector)
+static enum drm_panel_orientation
+vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- enum i9xx_plane_id i9xx_plane;
+ struct intel_encoder *encoder = connector->encoder;
+ enum intel_display_power_domain power_domain;
+ enum drm_panel_orientation orientation;
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+ enum pipe pipe;
u32 val;
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (connector->encoder->crtc_mask == BIT(PIPE_B))
- i9xx_plane = PLANE_B;
- else
- i9xx_plane = PLANE_A;
+ if (!encoder->get_hw_state(encoder, &pipe))
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- val = I915_READ(DSPCNTR(i9xx_plane));
- if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- }
+ crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ plane = to_intel_plane(crtc->base.primary);
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+
+ val = I915_READ(DSPCNTR(plane->i9xx_plane));
+
+ if (!(val & DISPLAY_PLANE_ENABLE))
+ orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
+ else if (val & DISPPLANE_ROTATE_180)
+ orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
+ else
+ orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
+
+ intel_display_power_put(dev_priv, power_domain);
return orientation;
}
+static enum drm_panel_orientation
+vlv_dsi_get_panel_orientation(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum drm_panel_orientation orientation;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ orientation = vlv_dsi_get_hw_panel_orientation(connector);
+ if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return orientation;
+ }
+
+ return intel_dsi_get_panel_orientation(connector);
+}
+
static void intel_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1722,7 +1634,7 @@ static void intel_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
+ vlv_dsi_get_panel_orientation(connector);
drm_connector_init_panel_orientation_property(
&connector->base,
connector->panel.fixed_mode->hdisplay,
@@ -1773,7 +1685,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
- intel_encoder->enable = intel_dsi_enable_nop;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1806,7 +1717,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
for_each_dsi_port(port, intel_dsi->ports) {
struct intel_dsi_host *host;
- host = intel_dsi_host_init(intel_dsi, port);
+ host = intel_dsi_host_init(intel_dsi, &intel_dsi_host_ops,
+ port);
if (!host)
goto err;
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index fe6becdcc29e..77a26fd3a44a 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2011-2013 Freescale Semiconductor, Inc.
*
* derived from imx-hdmi.c(renamed to bridge/dw_hdmi.c now)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 0e6942f21a4e..820c7e3878f0 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale i.MX drm driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/component.h>
#include <linux/device.h>
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 3bd0f8a18e74..2c5bbe317353 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - LVDS display bridge
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index cffd3310240e..293dd5752583 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - Television Encoder (TVEv2)
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -442,7 +434,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static struct clk_ops clk_tve_di_ops = {
+static const struct clk_ops clk_tve_di_ops = {
.round_rate = clk_tve_di_round_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 7d4b710b837a..058b53c0aa7e 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 Graphics driver
*
* Copyright (C) 2011 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 40605fdf0e33..c390924de93d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX IPUv3 DP Overlay Planes
*
* Copyright (C) 2013 Philipp Zabel, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <drm/drmP.h>
@@ -236,9 +228,15 @@ static void ipu_plane_enable(struct ipu_plane *ipu_plane)
void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
{
+ int ret;
+
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
- ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ ret = ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
+ if (ret == -ETIMEDOUT) {
+ DRM_ERROR("[PLANE:%d] IDMAC timeout\n",
+ ipu_plane->base.base.id);
+ }
if (ipu_plane->dp && disable_dp_channel)
ipu_dp_disable_channel(ipu_plane->dp, false);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index aefd04e18f93..f3ce51121dd6 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* i.MX drm driver - parallel display implementation
*
* Copyright (C) 2012 Sascha Hauer, Pengutronix
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/component.h>
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 66df1b177959..27b507eb4a99 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -818,10 +818,13 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
dsi->encoder.possible_crtcs = 1;
/* If there's a bridge, attach to it and let it create the connector */
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
- if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
-
+ if (dsi->bridge) {
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to attach bridge to drm\n");
+ goto err_encoder_cleanup;
+ }
+ } else {
/* Otherwise create our own connector and attach to a panel */
ret = mtk_dsi_create_connector(drm, dsi);
if (ret)
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 3ce51d8dfe1c..c28b69f48555 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -7,6 +7,7 @@ config DRM_MESON
select DRM_GEM_CMA_HELPER
select VIDEOMODE_HELPERS
select REGMAP_MMIO
+ select MESON_CANVAS
config DRM_MESON_DW_HDMI
tristate "HDMI Synopsys Controller support for Amlogic Meson Display"
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index c5c4cc362f02..7709f2fbb9f7 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
index 08f6073d967e..5de11aa7c775 100644
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ b/drivers/gpu/drm/meson/meson_canvas.c
@@ -39,6 +39,7 @@
#define CANVAS_WIDTH_HBIT 0
#define CANVAS_HEIGHT_BIT 9
#define CANVAS_BLKMODE_BIT 24
+#define CANVAS_ENDIAN_BIT 26
#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
#define CANVAS_LUT_WR_EN (0x2 << 8)
#define CANVAS_LUT_RD_EN (0x1 << 8)
@@ -47,7 +48,8 @@ void meson_canvas_setup(struct meson_drm *priv,
uint32_t canvas_index, uint32_t addr,
uint32_t stride, uint32_t height,
unsigned int wrap,
- unsigned int blkmode)
+ unsigned int blkmode,
+ unsigned int endian)
{
unsigned int val;
@@ -60,7 +62,8 @@ void meson_canvas_setup(struct meson_drm *priv,
CANVAS_WIDTH_HBIT) |
(height << CANVAS_HEIGHT_BIT) |
(wrap << 22) |
- (blkmode << CANVAS_BLKMODE_BIT));
+ (blkmode << CANVAS_BLKMODE_BIT) |
+ (endian << CANVAS_ENDIAN_BIT));
regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
CANVAS_LUT_WR_EN | canvas_index);
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
index af1759da4b27..85dbf26e2826 100644
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ b/drivers/gpu/drm/meson/meson_canvas.h
@@ -23,6 +23,9 @@
#define __MESON_CANVAS_H
#define MESON_CANVAS_ID_OSD1 0x4e
+#define MESON_CANVAS_ID_VD1_0 0x60
+#define MESON_CANVAS_ID_VD1_1 0x61
+#define MESON_CANVAS_ID_VD1_2 0x62
/* Canvas configuration. */
#define MESON_CANVAS_WRAP_NONE 0x00
@@ -33,10 +36,16 @@
#define MESON_CANVAS_BLKMODE_32x32 0x01
#define MESON_CANVAS_BLKMODE_64x64 0x02
+#define MESON_CANVAS_ENDIAN_SWAP16 0x1
+#define MESON_CANVAS_ENDIAN_SWAP32 0x3
+#define MESON_CANVAS_ENDIAN_SWAP64 0x7
+#define MESON_CANVAS_ENDIAN_SWAP128 0xf
+
void meson_canvas_setup(struct meson_drm *priv,
uint32_t canvas_index, uint32_t addr,
uint32_t stride, uint32_t height,
unsigned int wrap,
- unsigned int blkmode);
+ unsigned int blkmode,
+ unsigned int endian);
#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 05520202c967..75d97f1b2e8f 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/bitfield.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -45,6 +46,7 @@ struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
+ bool enabled;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -80,8 +82,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+static void meson_crtc_enable(struct drm_crtc *crtc)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -98,9 +99,29 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
writel(crtc_state->mode.hdisplay,
priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+ /* VD1 Preblend vertical start/end */
+ writel(FIELD_PREP(GENMASK(11, 0), 2303),
+ priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
priv->io_base + _REG(VPP_MISC));
+ drm_crtc_vblank_on(crtc);
+
+ meson_crtc->enabled = true;
+}
+
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!meson_crtc->enabled)
+ meson_crtc_enable(crtc);
+
priv->viu.osd1_enabled = true;
}
@@ -110,11 +131,19 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_crtc_vblank_off(crtc);
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
+ priv->viu.vd1_enabled = false;
+ priv->viu.vd1_commit = false;
+
/* Disable VPP Postblend */
- writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_VD1_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_POSTBLEND_ENABLE, 0,
priv->io_base + _REG(VPP_MISC));
if (crtc->state->event && !crtc->state->active) {
@@ -124,6 +153,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
+
+ meson_crtc->enabled = false;
}
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -132,6 +163,9 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags;
+ if (crtc->state->enable && !meson_crtc->enabled)
+ meson_crtc_enable(crtc);
+
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
@@ -149,6 +183,7 @@ static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
struct meson_drm *priv = meson_crtc->priv;
priv->viu.osd1_commit = true;
+ priv->viu.vd1_commit = true;
}
static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
@@ -177,26 +212,37 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W3));
writel_relaxed(priv->viu.osd1_blk0_cfg[4],
priv->io_base + _REG(VIU_OSD1_BLK0_CFG_W4));
-
- /* If output is interlace, make use of the Scaler */
- if (priv->viu.osd1_interlace) {
- struct drm_plane *plane = priv->primary_plane;
- struct drm_plane_state *state = plane->state;
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
-
- meson_vpp_setup_interlace_vscaler_osd1(priv, &dest);
- } else
- meson_vpp_disable_interlace_vscaler_osd1(priv);
-
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR);
+ writel_relaxed(priv->viu.osd_sc_ctrl0,
+ priv->io_base + _REG(VPP_OSD_SC_CTRL0));
+ writel_relaxed(priv->viu.osd_sc_i_wh_m1,
+ priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
+ writel_relaxed(priv->viu.osd_sc_o_h_start_end,
+ priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
+ writel_relaxed(priv->viu.osd_sc_o_v_start_end,
+ priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
+ writel_relaxed(priv->viu.osd_sc_v_ini_phase,
+ priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
+ writel_relaxed(priv->viu.osd_sc_v_phase_step,
+ priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
+ writel_relaxed(priv->viu.osd_sc_h_ini_phase,
+ priv->io_base + _REG(VPP_OSD_HSC_INI_PHASE));
+ writel_relaxed(priv->viu.osd_sc_h_phase_step,
+ priv->io_base + _REG(VPP_OSD_HSC_PHASE_STEP));
+ writel_relaxed(priv->viu.osd_sc_h_ctrl0,
+ priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+ writel_relaxed(priv->viu.osd_sc_v_ctrl0,
+ priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
+
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
@@ -205,6 +251,206 @@ void meson_crtc_irq(struct meson_drm *priv)
priv->viu.osd1_commit = false;
}
+ /* Update the VD1 registers */
+ if (priv->viu.vd1_enabled && priv->viu.vd1_commit) {
+
+ switch (priv->viu.vd1_planes) {
+ case 3:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ /* fallthrough */
+ case 2:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_1,
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ /* fallthrough */
+ case 1:
+ if (priv->canvas)
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_0,
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ else
+ meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
+ };
+
+ writel_relaxed(priv->viu.vd1_if0_gen_reg,
+ priv->io_base + _REG(VD1_IF0_GEN_REG));
+ writel_relaxed(priv->viu.vd1_if0_gen_reg,
+ priv->io_base + _REG(VD2_IF0_GEN_REG));
+ writel_relaxed(priv->viu.vd1_if0_gen_reg2,
+ priv->io_base + _REG(VD1_IF0_GEN_REG2));
+ writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+ priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+ writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
+ priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+ writel_relaxed(priv->viu.viu_vd1_fmt_w,
+ priv->io_base + _REG(VIU_VD1_FMT_W));
+ writel_relaxed(priv->viu.viu_vd1_fmt_w,
+ priv->io_base + _REG(VIU_VD2_FMT_W));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD1_IF0_CANVAS0));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD1_IF0_CANVAS1));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD2_IF0_CANVAS0));
+ writel_relaxed(priv->viu.vd1_if0_canvas0,
+ priv->io_base + _REG(VD2_IF0_CANVAS1));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD1_IF0_LUMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD1_IF0_LUMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD2_IF0_LUMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_luma_x0,
+ priv->io_base + _REG(VD2_IF0_LUMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_luma_y0,
+ priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_x0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+ writel_relaxed(priv->viu.vd1_if0_chroma_y0,
+ priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+ writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+ priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+ writel_relaxed(priv->viu.vd1_if0_repeat_loop,
+ priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
+ priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+ writel_relaxed(priv->viu.vd1_range_map_y,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+ writel_relaxed(priv->viu.vd1_range_map_cb,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+ writel_relaxed(priv->viu.vd1_range_map_cr,
+ priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+ writel_relaxed(0x78404,
+ priv->io_base + _REG(VPP_SC_MISC));
+ writel_relaxed(priv->viu.vpp_pic_in_height,
+ priv->io_base + _REG(VPP_PIC_IN_HEIGHT));
+ writel_relaxed(priv->viu.vpp_postblend_vd1_h_start_end,
+ priv->io_base + _REG(VPP_POSTBLEND_VD1_H_START_END));
+ writel_relaxed(priv->viu.vpp_blend_vd2_h_start_end,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ writel_relaxed(priv->viu.vpp_postblend_vd1_v_start_end,
+ priv->io_base + _REG(VPP_POSTBLEND_VD1_V_START_END));
+ writel_relaxed(priv->viu.vpp_blend_vd2_v_start_end,
+ priv->io_base + _REG(VPP_BLEND_VD2_V_START_END));
+ writel_relaxed(priv->viu.vpp_hsc_region12_startp,
+ priv->io_base + _REG(VPP_HSC_REGION12_STARTP));
+ writel_relaxed(priv->viu.vpp_hsc_region34_startp,
+ priv->io_base + _REG(VPP_HSC_REGION34_STARTP));
+ writel_relaxed(priv->viu.vpp_hsc_region4_endp,
+ priv->io_base + _REG(VPP_HSC_REGION4_ENDP));
+ writel_relaxed(priv->viu.vpp_hsc_start_phase_step,
+ priv->io_base + _REG(VPP_HSC_START_PHASE_STEP));
+ writel_relaxed(priv->viu.vpp_hsc_region1_phase_slope,
+ priv->io_base + _REG(VPP_HSC_REGION1_PHASE_SLOPE));
+ writel_relaxed(priv->viu.vpp_hsc_region3_phase_slope,
+ priv->io_base + _REG(VPP_HSC_REGION3_PHASE_SLOPE));
+ writel_relaxed(priv->viu.vpp_line_in_length,
+ priv->io_base + _REG(VPP_LINE_IN_LENGTH));
+ writel_relaxed(priv->viu.vpp_preblend_h_size,
+ priv->io_base + _REG(VPP_PREBLEND_H_SIZE));
+ writel_relaxed(priv->viu.vpp_vsc_region12_startp,
+ priv->io_base + _REG(VPP_VSC_REGION12_STARTP));
+ writel_relaxed(priv->viu.vpp_vsc_region34_startp,
+ priv->io_base + _REG(VPP_VSC_REGION34_STARTP));
+ writel_relaxed(priv->viu.vpp_vsc_region4_endp,
+ priv->io_base + _REG(VPP_VSC_REGION4_ENDP));
+ writel_relaxed(priv->viu.vpp_vsc_start_phase_step,
+ priv->io_base + _REG(VPP_VSC_START_PHASE_STEP));
+ writel_relaxed(priv->viu.vpp_vsc_ini_phase,
+ priv->io_base + _REG(VPP_VSC_INI_PHASE));
+ writel_relaxed(priv->viu.vpp_vsc_phase_ctrl,
+ priv->io_base + _REG(VPP_VSC_PHASE_CTRL));
+ writel_relaxed(priv->viu.vpp_hsc_phase_ctrl,
+ priv->io_base + _REG(VPP_HSC_PHASE_CTRL));
+ writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+
+ /* Enable VD1 */
+ writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+
+ priv->viu.vd1_commit = false;
+ }
+
drm_crtc_handle_vblank(priv->crtc);
spin_lock_irqsave(&priv->drm->event_lock, flags);
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index d3443125e661..3ee4d4a4ecba 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -41,6 +41,7 @@
#include "meson_drv.h"
#include "meson_plane.h"
+#include "meson_overlay.h"
#include "meson_crtc.h"
#include "meson_venc_cvbs.h"
@@ -68,15 +69,7 @@
* - Powering Up HDMI controller and PHY
*/
-static void meson_fb_output_poll_changed(struct drm_device *dev)
-{
- struct meson_drm *priv = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(priv->fbdev);
-}
-
static const struct drm_mode_config_funcs meson_mode_config_funcs = {
- .output_poll_changed = meson_fb_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
@@ -216,24 +209,51 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
goto free_drm;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res) {
- ret = -EINVAL;
- goto free_drm;
- }
- /* Simply ioremap since it may be a shared register zone */
- regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs) {
- ret = -EADDRNOTAVAIL;
- goto free_drm;
- }
+ priv->canvas = meson_canvas_get(dev);
+ if (!IS_ERR(priv->canvas)) {
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ goto free_drm;
+ }
+ } else {
+ priv->canvas = NULL;
- priv->dmc = devm_regmap_init_mmio(dev, regs,
- &meson_regmap_config);
- if (IS_ERR(priv->dmc)) {
- dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- ret = PTR_ERR(priv->dmc);
- goto free_drm;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
+ /* Simply ioremap since it may be a shared register zone */
+ regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
+
+ priv->dmc = devm_regmap_init_mmio(dev, regs,
+ &meson_regmap_config);
+ if (IS_ERR(priv->dmc)) {
+ dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
+ ret = PTR_ERR(priv->dmc);
+ goto free_drm;
+ }
}
priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -272,6 +292,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ ret = meson_overlay_create(priv);
+ if (ret)
+ goto free_drm;
+
ret = meson_crtc_create(priv);
if (ret)
goto free_drm;
@@ -282,13 +306,6 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm_mode_config_reset(drm);
- priv->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
- if (IS_ERR(priv->fbdev)) {
- ret = PTR_ERR(priv->fbdev);
- goto free_drm;
- }
-
drm_kms_helper_poll_init(drm);
platform_set_drvdata(pdev, priv);
@@ -297,6 +314,8 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
free_drm:
@@ -315,9 +334,15 @@ static void meson_drv_unbind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
struct meson_drm *priv = drm->dev_private;
+ if (priv->canvas) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_2);
+ }
+
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
- drm_fbdev_cma_fini(priv->fbdev);
drm_mode_config_cleanup(drm);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 8450d6ac8c9b..4dccf4cd042a 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -22,6 +22,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/of.h>
+#include <linux/soc/amlogic/meson-canvas.h>
#include <drm/drmP.h>
struct meson_drm {
@@ -31,10 +32,16 @@ struct meson_drm {
struct regmap *dmc;
int vsync_irq;
+ struct meson_canvas *canvas;
+ u8 canvas_id_osd1;
+ u8 canvas_id_vd1_0;
+ u8 canvas_id_vd1_1;
+ u8 canvas_id_vd1_2;
+
struct drm_device *drm;
struct drm_crtc *crtc;
- struct drm_fbdev_cma *fbdev;
struct drm_plane *primary_plane;
+ struct drm_plane *overlay_plane;
/* Components Data */
struct {
@@ -46,6 +53,64 @@ struct meson_drm {
uint32_t osd1_addr;
uint32_t osd1_stride;
uint32_t osd1_height;
+ uint32_t osd_sc_ctrl0;
+ uint32_t osd_sc_i_wh_m1;
+ uint32_t osd_sc_o_h_start_end;
+ uint32_t osd_sc_o_v_start_end;
+ uint32_t osd_sc_v_ini_phase;
+ uint32_t osd_sc_v_phase_step;
+ uint32_t osd_sc_h_ini_phase;
+ uint32_t osd_sc_h_phase_step;
+ uint32_t osd_sc_h_ctrl0;
+ uint32_t osd_sc_v_ctrl0;
+
+ bool vd1_enabled;
+ bool vd1_commit;
+ unsigned int vd1_planes;
+ uint32_t vd1_if0_gen_reg;
+ uint32_t vd1_if0_luma_x0;
+ uint32_t vd1_if0_luma_y0;
+ uint32_t vd1_if0_chroma_x0;
+ uint32_t vd1_if0_chroma_y0;
+ uint32_t vd1_if0_repeat_loop;
+ uint32_t vd1_if0_luma0_rpt_pat;
+ uint32_t vd1_if0_chroma0_rpt_pat;
+ uint32_t vd1_range_map_y;
+ uint32_t vd1_range_map_cb;
+ uint32_t vd1_range_map_cr;
+ uint32_t viu_vd1_fmt_w;
+ uint32_t vd1_if0_canvas0;
+ uint32_t vd1_if0_gen_reg2;
+ uint32_t viu_vd1_fmt_ctrl;
+ uint32_t vd1_addr0;
+ uint32_t vd1_addr1;
+ uint32_t vd1_addr2;
+ uint32_t vd1_stride0;
+ uint32_t vd1_stride1;
+ uint32_t vd1_stride2;
+ uint32_t vd1_height0;
+ uint32_t vd1_height1;
+ uint32_t vd1_height2;
+ uint32_t vpp_pic_in_height;
+ uint32_t vpp_postblend_vd1_h_start_end;
+ uint32_t vpp_postblend_vd1_v_start_end;
+ uint32_t vpp_hsc_region12_startp;
+ uint32_t vpp_hsc_region34_startp;
+ uint32_t vpp_hsc_region4_endp;
+ uint32_t vpp_hsc_start_phase_step;
+ uint32_t vpp_hsc_region1_phase_slope;
+ uint32_t vpp_hsc_region3_phase_slope;
+ uint32_t vpp_line_in_length;
+ uint32_t vpp_preblend_h_size;
+ uint32_t vpp_vsc_region12_startp;
+ uint32_t vpp_vsc_region34_startp;
+ uint32_t vpp_vsc_region4_endp;
+ uint32_t vpp_vsc_start_phase_step;
+ uint32_t vpp_vsc_ini_phase;
+ uint32_t vpp_vsc_phase_ctrl;
+ uint32_t vpp_hsc_phase_ctrl;
+ uint32_t vpp_blend_vd2_h_start_end;
+ uint32_t vpp_blend_vd2_v_start_end;
} viu;
struct {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index df7247cd93f9..807111ebfdd9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -594,17 +594,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
vclk_freq, venc_freq, hdmi_freq);
- /* Finally filter by configurable vclk frequencies for VIC modes */
- switch (vclk_freq) {
- case 54000:
- case 74250:
- case 148500:
- case 297000:
- case 594000:
- return MODE_OK;
- }
-
- return MODE_CLOCK_RANGE;
+ return meson_vclk_vic_supported_freq(vclk_freq);
}
/* Encoder */
@@ -706,6 +696,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.reg_read = meson_dw_hdmi_reg_read,
.reg_write = meson_dw_hdmi_reg_write,
.max_register = 0x10000,
+ .fast_io = true,
};
static bool meson_hdmi_connector_is_available(struct device *dev)
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
new file mode 100644
index 000000000000..691a9fd16b36
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -0,0 +1,588 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_rect.h>
+
+#include "meson_overlay.h"
+#include "meson_vpp.h"
+#include "meson_viu.h"
+#include "meson_canvas.h"
+#include "meson_registers.h"
+
+/* VD1_IF0_GEN_REG */
+#define VD_URGENT_CHROMA BIT(28)
+#define VD_URGENT_LUMA BIT(27)
+#define VD_HOLD_LINES(lines) FIELD_PREP(GENMASK(24, 19), lines)
+#define VD_DEMUX_MODE_RGB BIT(16)
+#define VD_BYTES_PER_PIXEL(val) FIELD_PREP(GENMASK(15, 14), val)
+#define VD_CHRO_RPT_LASTL_CTRL BIT(6)
+#define VD_LITTLE_ENDIAN BIT(4)
+#define VD_SEPARATE_EN BIT(1)
+#define VD_ENABLE BIT(0)
+
+/* VD1_IF0_CANVAS0 */
+#define CANVAS_ADDR2(addr) FIELD_PREP(GENMASK(23, 16), addr)
+#define CANVAS_ADDR1(addr) FIELD_PREP(GENMASK(15, 8), addr)
+#define CANVAS_ADDR0(addr) FIELD_PREP(GENMASK(7, 0), addr)
+
+/* VD1_IF0_LUMA_X0 VD1_IF0_CHROMA_X0 */
+#define VD_X_START(value) FIELD_PREP(GENMASK(14, 0), value)
+#define VD_X_END(value) FIELD_PREP(GENMASK(30, 16), value)
+
+/* VD1_IF0_LUMA_Y0 VD1_IF0_CHROMA_Y0 */
+#define VD_Y_START(value) FIELD_PREP(GENMASK(12, 0), value)
+#define VD_Y_END(value) FIELD_PREP(GENMASK(28, 16), value)
+
+/* VD1_IF0_GEN_REG2 */
+#define VD_COLOR_MAP(value) FIELD_PREP(GENMASK(1, 0), value)
+
+/* VIU_VD1_FMT_CTRL */
+#define VD_HORZ_Y_C_RATIO(value) FIELD_PREP(GENMASK(22, 21), value)
+#define VD_HORZ_FMT_EN BIT(20)
+#define VD_VERT_RPT_LINE0 BIT(16)
+#define VD_VERT_INITIAL_PHASE(value) FIELD_PREP(GENMASK(11, 8), value)
+#define VD_VERT_PHASE_STEP(value) FIELD_PREP(GENMASK(7, 1), value)
+#define VD_VERT_FMT_EN BIT(0)
+
+/* VPP_POSTBLEND_VD1_H_START_END */
+#define VD_H_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_POSTBLEND_VD1_V_START_END */
+#define VD_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_BLEND_VD2_V_START_END */
+#define VD2_V_END(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD2_V_START(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VIU_VD1_FMT_W */
+#define VD_V_WIDTH(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_H_WIDTH(value) FIELD_PREP(GENMASK(27, 16), value)
+
+/* VPP_HSC_REGION12_STARTP VPP_HSC_REGION34_STARTP */
+#define VD_REGION24_START(value) FIELD_PREP(GENMASK(11, 0), value)
+#define VD_REGION13_END(value) FIELD_PREP(GENMASK(27, 16), value)
+
+struct meson_overlay {
+ struct drm_plane base;
+ struct meson_drm *priv;
+};
+#define to_meson_overlay(x) container_of(x, struct meson_overlay, base)
+
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
+static int meson_overlay_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+
+ if (!state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ FRAC_16_16(1, 5),
+ FRAC_16_16(5, 1),
+ true, true);
+}
+
+/* Takes a fixed 16.16 number and converts it to integer. */
+static inline int64_t fixed16_to_int(int64_t value)
+{
+ return value >> 16;
+}
+
+static const uint8_t skip_tab[6] = {
+ 0x24, 0x04, 0x68, 0x48, 0x28, 0x08,
+};
+
+static void meson_overlay_get_vertical_phase(unsigned int ratio_y, int *phase,
+ int *repeat, bool interlace)
+{
+ int offset_in = 0;
+ int offset_out = 0;
+ int repeat_skip = 0;
+
+ if (!interlace && ratio_y > (1 << 18))
+ offset_out = (1 * ratio_y) >> 10;
+
+ while ((offset_in + (4 << 8)) <= offset_out) {
+ repeat_skip++;
+ offset_in += 4 << 8;
+ }
+
+ *phase = (offset_out - offset_in) >> 2;
+
+ if (*phase > 0x100)
+ repeat_skip++;
+
+ *phase = *phase & 0xff;
+
+ if (repeat_skip > 5)
+ repeat_skip = 5;
+
+ *repeat = skip_tab[repeat_skip];
+}
+
+static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
+ struct drm_plane *plane,
+ bool interlace_mode)
+{
+ struct drm_crtc_state *crtc_state = priv->crtc->state;
+ int video_top, video_left, video_width, video_height;
+ struct drm_plane_state *state = plane->state;
+ unsigned int vd_start_lines, vd_end_lines;
+ unsigned int hd_start_lines, hd_end_lines;
+ unsigned int crtc_height, crtc_width;
+ unsigned int vsc_startp, vsc_endp;
+ unsigned int hsc_startp, hsc_endp;
+ unsigned int crop_top, crop_left;
+ int vphase, vphase_repeat_skip;
+ unsigned int ratio_x, ratio_y;
+ int temp_height, temp_width;
+ unsigned int w_in, h_in;
+ int temp, start, end;
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
+ crtc_height = crtc_state->mode.vdisplay;
+ crtc_width = crtc_state->mode.hdisplay;
+
+ w_in = fixed16_to_int(state->src_w);
+ h_in = fixed16_to_int(state->src_h);
+ crop_top = fixed16_to_int(state->src_x);
+ crop_left = fixed16_to_int(state->src_x);
+
+ video_top = state->crtc_y;
+ video_left = state->crtc_x;
+ video_width = state->crtc_w;
+ video_height = state->crtc_h;
+
+ DRM_DEBUG("crtc_width %d crtc_height %d interlace %d\n",
+ crtc_width, crtc_height, interlace_mode);
+ DRM_DEBUG("w_in %d h_in %d crop_top %d crop_left %d\n",
+ w_in, h_in, crop_top, crop_left);
+ DRM_DEBUG("video top %d left %d width %d height %d\n",
+ video_top, video_left, video_width, video_height);
+
+ ratio_x = (w_in << 18) / video_width;
+ ratio_y = (h_in << 18) / video_height;
+
+ if (ratio_x * video_width < (w_in << 18))
+ ratio_x++;
+
+ DRM_DEBUG("ratio x 0x%x y 0x%x\n", ratio_x, ratio_y);
+
+ meson_overlay_get_vertical_phase(ratio_y, &vphase, &vphase_repeat_skip,
+ interlace_mode);
+
+ DRM_DEBUG("vphase 0x%x skip %d\n", vphase, vphase_repeat_skip);
+
+ /* Vertical */
+
+ start = video_top + video_height / 2 - ((h_in << 17) / ratio_y);
+ end = (h_in << 18) / ratio_y + start - 1;
+
+ if (video_top < 0 && start < 0)
+ vd_start_lines = (-(start) * ratio_y) >> 18;
+ else if (start < video_top)
+ vd_start_lines = ((video_top - start) * ratio_y) >> 18;
+ else
+ vd_start_lines = 0;
+
+ if (video_top < 0)
+ temp_height = min_t(unsigned int,
+ video_top + video_height - 1,
+ crtc_height - 1);
+ else
+ temp_height = min_t(unsigned int,
+ video_top + video_height - 1,
+ crtc_height - 1) - video_top + 1;
+
+ temp = vd_start_lines + (temp_height * ratio_y >> 18);
+ vd_end_lines = (temp <= (h_in - 1)) ? temp : (h_in - 1);
+
+ vd_start_lines += crop_left;
+ vd_end_lines += crop_left;
+
+ /*
+ * TOFIX: Input frames are handled and scaled like progressive frames,
+ * proper handling of interlaced field input frames need to be figured
+ * out using the proper framebuffer flags set by userspace.
+ */
+ if (interlace_mode) {
+ start >>= 1;
+ end >>= 1;
+ }
+
+ vsc_startp = max_t(int, start,
+ max_t(int, 0, video_top));
+ vsc_endp = min_t(int, end,
+ min_t(int, crtc_height - 1,
+ video_top + video_height - 1));
+
+ DRM_DEBUG("vsc startp %d endp %d start_lines %d end_lines %d\n",
+ vsc_startp, vsc_endp, vd_start_lines, vd_end_lines);
+
+ /* Horizontal */
+
+ start = video_left + video_width / 2 - ((w_in << 17) / ratio_x);
+ end = (w_in << 18) / ratio_x + start - 1;
+
+ if (video_left < 0 && start < 0)
+ hd_start_lines = (-(start) * ratio_x) >> 18;
+ else if (start < video_left)
+ hd_start_lines = ((video_left - start) * ratio_x) >> 18;
+ else
+ hd_start_lines = 0;
+
+ if (video_left < 0)
+ temp_width = min_t(unsigned int,
+ video_left + video_width - 1,
+ crtc_width - 1);
+ else
+ temp_width = min_t(unsigned int,
+ video_left + video_width - 1,
+ crtc_width - 1) - video_left + 1;
+
+ temp = hd_start_lines + (temp_width * ratio_x >> 18);
+ hd_end_lines = (temp <= (w_in - 1)) ? temp : (w_in - 1);
+
+ priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+ hsc_startp = max_t(int, start, max_t(int, 0, video_left));
+ hsc_endp = min_t(int, end, min_t(int, crtc_width - 1,
+ video_left + video_width - 1));
+
+ hd_start_lines += crop_top;
+ hd_end_lines += crop_top;
+
+ DRM_DEBUG("hsc startp %d endp %d start_lines %d end_lines %d\n",
+ hsc_startp, hsc_endp, hd_start_lines, hd_end_lines);
+
+ priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+
+ priv->viu.vpp_vsc_ini_phase = vphase << 8;
+ priv->viu.vpp_vsc_phase_ctrl = (1 << 13) | (4 << 8) |
+ vphase_repeat_skip;
+
+ priv->viu.vd1_if0_luma_x0 = VD_X_START(hd_start_lines) |
+ VD_X_END(hd_end_lines);
+ priv->viu.vd1_if0_chroma_x0 = VD_X_START(hd_start_lines >> 1) |
+ VD_X_END(hd_end_lines >> 1);
+
+ priv->viu.viu_vd1_fmt_w =
+ VD_H_WIDTH(hd_end_lines - hd_start_lines + 1) |
+ VD_V_WIDTH(hd_end_lines/2 - hd_start_lines/2 + 1);
+
+ priv->viu.vd1_if0_luma_y0 = VD_Y_START(vd_start_lines) |
+ VD_Y_END(vd_end_lines);
+
+ priv->viu.vd1_if0_chroma_y0 = VD_Y_START(vd_start_lines >> 1) |
+ VD_Y_END(vd_end_lines >> 1);
+
+ priv->viu.vpp_pic_in_height = h_in;
+
+ priv->viu.vpp_postblend_vd1_h_start_end = VD_H_START(hsc_startp) |
+ VD_H_END(hsc_endp);
+ priv->viu.vpp_blend_vd2_h_start_end = VD_H_START(hd_start_lines) |
+ VD_H_END(hd_end_lines);
+ priv->viu.vpp_hsc_region12_startp = VD_REGION13_END(0) |
+ VD_REGION24_START(hsc_startp);
+ priv->viu.vpp_hsc_region34_startp =
+ VD_REGION13_END(hsc_startp) |
+ VD_REGION24_START(hsc_endp - hsc_startp);
+ priv->viu.vpp_hsc_region4_endp = hsc_endp - hsc_startp;
+ priv->viu.vpp_hsc_start_phase_step = ratio_x << 6;
+ priv->viu.vpp_hsc_region1_phase_slope = 0;
+ priv->viu.vpp_hsc_region3_phase_slope = 0;
+ priv->viu.vpp_hsc_phase_ctrl = (1 << 21) | (4 << 16);
+
+ priv->viu.vpp_line_in_length = hd_end_lines - hd_start_lines + 1;
+ priv->viu.vpp_preblend_h_size = hd_end_lines - hd_start_lines + 1;
+
+ priv->viu.vpp_postblend_vd1_v_start_end = VD_V_START(vsc_startp) |
+ VD_V_END(vsc_endp);
+ priv->viu.vpp_blend_vd2_v_start_end =
+ VD2_V_START((vd_end_lines + 1) >> 1) |
+ VD2_V_END(vd_end_lines);
+
+ priv->viu.vpp_vsc_region12_startp = 0;
+ priv->viu.vpp_vsc_region34_startp =
+ VD_REGION13_END(vsc_endp - vsc_startp) |
+ VD_REGION24_START(vsc_endp - vsc_startp);
+ priv->viu.vpp_vsc_region4_endp = vsc_endp - vsc_startp;
+ priv->viu.vpp_vsc_start_phase_step = ratio_y << 6;
+}
+
+static void meson_overlay_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct meson_drm *priv = meson_overlay->priv;
+ struct drm_gem_cma_object *gem;
+ unsigned long flags;
+ bool interlace_mode;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ /* Fallback is canvas provider is not available */
+ if (!priv->canvas) {
+ priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
+ priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
+ priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
+ }
+
+ interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+
+ spin_lock_irqsave(&priv->drm->event_lock, flags);
+
+ priv->viu.vd1_if0_gen_reg = VD_URGENT_CHROMA |
+ VD_URGENT_LUMA |
+ VD_HOLD_LINES(9) |
+ VD_CHRO_RPT_LASTL_CTRL |
+ VD_ENABLE;
+
+ /* Setup scaler params */
+ meson_overlay_setup_scaler_params(priv, plane, interlace_mode);
+
+ priv->viu.vd1_if0_repeat_loop = 0;
+ priv->viu.vd1_if0_luma0_rpt_pat = interlace_mode ? 8 : 0;
+ priv->viu.vd1_if0_chroma0_rpt_pat = interlace_mode ? 8 : 0;
+ priv->viu.vd1_range_map_y = 0;
+ priv->viu.vd1_range_map_cb = 0;
+ priv->viu.vd1_range_map_cr = 0;
+
+ /* Default values for RGB888/YUV444 */
+ priv->viu.vd1_if0_gen_reg2 = 0;
+ priv->viu.viu_vd1_fmt_ctrl = 0;
+
+ switch (fb->format->format) {
+ /* TOFIX DRM_FORMAT_RGB888 should be supported */
+ case DRM_FORMAT_YUYV:
+ priv->viu.vd1_if0_gen_reg |= VD_BYTES_PER_PIXEL(1);
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_0) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_0) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ if (fb->format->format == DRM_FORMAT_NV12)
+ priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(1);
+ else
+ priv->viu.vd1_if0_gen_reg2 = VD_COLOR_MAP(2);
+ priv->viu.viu_vd1_fmt_ctrl = VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YUV410:
+ priv->viu.vd1_if0_gen_reg |= VD_SEPARATE_EN;
+ priv->viu.vd1_if0_canvas0 =
+ CANVAS_ADDR2(priv->canvas_id_vd1_2) |
+ CANVAS_ADDR1(priv->canvas_id_vd1_1) |
+ CANVAS_ADDR0(priv->canvas_id_vd1_0);
+ switch (fb->format->format) {
+ case DRM_FORMAT_YUV422:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV420:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(1) | /* /2 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV411:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(2) | /* /4 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(16) | /* /2 */
+ VD_VERT_FMT_EN;
+ break;
+ case DRM_FORMAT_YUV410:
+ priv->viu.viu_vd1_fmt_ctrl =
+ VD_HORZ_Y_C_RATIO(2) | /* /4 */
+ VD_HORZ_FMT_EN |
+ VD_VERT_RPT_LINE0 |
+ VD_VERT_INITIAL_PHASE(12) |
+ VD_VERT_PHASE_STEP(8) | /* /4 */
+ VD_VERT_FMT_EN;
+ break;
+ }
+ break;
+ }
+
+ /* Update Canvas with buffer address */
+ priv->viu.vd1_planes = drm_format_num_planes(fb->format->format);
+
+ switch (priv->viu.vd1_planes) {
+ case 3:
+ gem = drm_fb_cma_get_gem_obj(fb, 2);
+ priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
+ priv->viu.vd1_stride2 = fb->pitches[2];
+ priv->viu.vd1_height2 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 2);
+ DRM_DEBUG("plane 2 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2);
+ /* fallthrough */
+ case 2:
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+ priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
+ priv->viu.vd1_stride1 = fb->pitches[1];
+ priv->viu.vd1_height1 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 1);
+ DRM_DEBUG("plane 1 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1);
+ /* fallthrough */
+ case 1:
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
+ priv->viu.vd1_stride0 = fb->pitches[0];
+ priv->viu.vd1_height0 =
+ drm_format_plane_height(fb->height,
+ fb->format->format, 0);
+ DRM_DEBUG("plane 0 addr 0x%x stride %d height %d\n",
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0);
+ }
+
+ priv->viu.vd1_enabled = true;
+
+ spin_unlock_irqrestore(&priv->drm->event_lock, flags);
+
+ DRM_DEBUG_DRIVER("\n");
+}
+
+static void meson_overlay_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct meson_overlay *meson_overlay = to_meson_overlay(plane);
+ struct meson_drm *priv = meson_overlay->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ priv->viu.vd1_enabled = false;
+
+ /* Disable VD1 */
+ writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+}
+
+static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
+ .atomic_check = meson_overlay_atomic_check,
+ .atomic_disable = meson_overlay_atomic_disable,
+ .atomic_update = meson_overlay_atomic_update,
+ .prepare_fb = drm_gem_fb_prepare_fb,
+};
+
+static const struct drm_plane_funcs meson_overlay_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const uint32_t supported_drm_formats[] = {
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV410,
+};
+
+int meson_overlay_create(struct meson_drm *priv)
+{
+ struct meson_overlay *meson_overlay;
+ struct drm_plane *plane;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ meson_overlay = devm_kzalloc(priv->drm->dev, sizeof(*meson_overlay),
+ GFP_KERNEL);
+ if (!meson_overlay)
+ return -ENOMEM;
+
+ meson_overlay->priv = priv;
+ plane = &meson_overlay->base;
+
+ drm_universal_plane_init(priv->drm, plane, 0xFF,
+ &meson_overlay_funcs,
+ supported_drm_formats,
+ ARRAY_SIZE(supported_drm_formats),
+ NULL,
+ DRM_PLANE_TYPE_OVERLAY, "meson_overlay_plane");
+
+ drm_plane_helper_add(plane, &meson_overlay_helper_funcs);
+
+ priv->overlay_plane = plane;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/meson/meson_overlay.h b/drivers/gpu/drm/meson/meson_overlay.h
new file mode 100644
index 000000000000..dae24f5ac63d
--- /dev/null
+++ b/drivers/gpu/drm/meson/meson_overlay.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#ifndef __MESON_OVERLAY_H
+#define __MESON_OVERLAY_H
+
+#include "meson_drv.h"
+
+int meson_overlay_create(struct meson_drm *priv);
+
+#endif /* __MESON_OVERLAY_H */
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 12c80dfcff59..6119a0224278 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -24,6 +24,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/bitfield.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -31,6 +32,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
#include "meson_plane.h"
@@ -39,12 +41,51 @@
#include "meson_canvas.h"
#include "meson_registers.h"
+/* OSD_SCI_WH_M1 */
+#define SCI_WH_M1_W(w) FIELD_PREP(GENMASK(28, 16), w)
+#define SCI_WH_M1_H(h) FIELD_PREP(GENMASK(12, 0), h)
+
+/* OSD_SCO_H_START_END */
+/* OSD_SCO_V_START_END */
+#define SCO_HV_START(start) FIELD_PREP(GENMASK(27, 16), start)
+#define SCO_HV_END(end) FIELD_PREP(GENMASK(11, 0), end)
+
+/* OSD_SC_CTRL0 */
+#define SC_CTRL0_PATH_EN BIT(3)
+#define SC_CTRL0_SEL_OSD1 BIT(2)
+
+/* OSD_VSC_CTRL0 */
+#define VSC_BANK_LEN(value) FIELD_PREP(GENMASK(2, 0), value)
+#define VSC_TOP_INI_RCV_NUM(value) FIELD_PREP(GENMASK(6, 3), value)
+#define VSC_TOP_RPT_L0_NUM(value) FIELD_PREP(GENMASK(9, 8), value)
+#define VSC_BOT_INI_RCV_NUM(value) FIELD_PREP(GENMASK(14, 11), value)
+#define VSC_BOT_RPT_L0_NUM(value) FIELD_PREP(GENMASK(17, 16), value)
+#define VSC_PROG_INTERLACE BIT(23)
+#define VSC_VERTICAL_SCALER_EN BIT(24)
+
+/* OSD_VSC_INI_PHASE */
+#define VSC_INI_PHASE_BOT(bottom) FIELD_PREP(GENMASK(31, 16), bottom)
+#define VSC_INI_PHASE_TOP(top) FIELD_PREP(GENMASK(15, 0), top)
+
+/* OSD_HSC_CTRL0 */
+#define HSC_BANK_LENGTH(value) FIELD_PREP(GENMASK(2, 0), value)
+#define HSC_INI_RCV_NUM0(value) FIELD_PREP(GENMASK(6, 3), value)
+#define HSC_RPT_P0_NUM0(value) FIELD_PREP(GENMASK(9, 8), value)
+#define HSC_HORIZ_SCALER_EN BIT(22)
+
+/* VPP_OSD_VSC_PHASE_STEP */
+/* VPP_OSD_HSC_PHASE_STEP */
+#define SC_PHASE_STEP(value) FIELD_PREP(GENMASK(27, 0), value)
+
struct meson_plane {
struct drm_plane base;
struct meson_drm *priv;
+ bool enabled;
};
#define to_meson_plane(x) container_of(x, struct meson_plane, base)
+#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
+
static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -57,10 +98,15 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
+ /*
+ * Only allow :
+ * - Upscaling up to 5x, vertical and horizontal
+ * - Final coordinates must match crtc size
+ */
return drm_atomic_helper_check_plane_state(state, crtc_state,
+ FRAC_16_16(1, 5),
DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
+ false, true);
}
/* Takes a fixed 16.16 number and converts it to integer. */
@@ -74,22 +120,20 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_rect dest = drm_plane_state_dest(state);
struct meson_drm *priv = meson_plane->priv;
+ struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *gem;
- struct drm_rect src = {
- .x1 = (state->src_x),
- .y1 = (state->src_y),
- .x2 = (state->src_x + state->src_w),
- .y2 = (state->src_y + state->src_h),
- };
- struct drm_rect dest = {
- .x1 = state->crtc_x,
- .y1 = state->crtc_y,
- .x2 = state->crtc_x + state->crtc_w,
- .y2 = state->crtc_y + state->crtc_h,
- };
unsigned long flags;
+ int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
+ int vsc_bot_rcv_num, vsc_bot_rpt_p0_num;
+ int hsc_ini_rcv_num, hsc_ini_rpt_p0_num;
+ int hf_phase_step, vf_phase_step;
+ int src_w, src_h, dst_w, dst_h;
+ int bot_ini_phase;
+ int hf_bank_len;
+ int vf_bank_len;
+ u8 canvas_id_osd1;
/*
* Update Coordinates
@@ -104,8 +148,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
+ if (priv->canvas)
+ canvas_id_osd1 = priv->canvas_id_osd1;
+ else
+ canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+
/* Set up BLK0 to point to the right canvas */
- priv->viu.osd1_blk0_cfg[0] = ((MESON_CANVAS_ID_OSD1 << OSD_CANVAS_SEL) |
+ priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
OSD_ENDIANNESS_LE);
/* On GXBB, Use the old non-HDR RGB2YUV converter */
@@ -137,23 +186,115 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
break;
};
+ /* Default scaler parameters */
+ vsc_bot_rcv_num = 0;
+ vsc_bot_rpt_p0_num = 0;
+ hf_bank_len = 4;
+ vf_bank_len = 4;
+
if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
- priv->viu.osd1_interlace = true;
+ vsc_bot_rcv_num = 6;
+ vsc_bot_rpt_p0_num = 2;
+ }
+
+ hsc_ini_rcv_num = hf_bank_len;
+ vsc_ini_rcv_num = vf_bank_len;
+ hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
+ vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
+
+ src_w = fixed16_to_int(state->src_w);
+ src_h = fixed16_to_int(state->src_h);
+ dst_w = state->crtc_w;
+ dst_h = state->crtc_h;
+ /*
+ * When the output is interlaced, the OSD must switch between
+ * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
+ * at each vsync.
+ * But the vertical scaler can provide such funtionnality if
+ * is configured for 2:1 scaling with interlace options enabled.
+ */
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
dest.y1 /= 2;
dest.y2 /= 2;
- } else
- priv->viu.osd1_interlace = false;
+ dst_h /= 2;
+ }
+
+ hf_phase_step = ((src_w << 18) / dst_w) << 6;
+ vf_phase_step = (src_h << 20) / dst_h;
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ bot_ini_phase = ((vf_phase_step / 2) >> 4);
+ else
+ bot_ini_phase = 0;
+
+ vf_phase_step = (vf_phase_step << 4);
+
+ /* In interlaced mode, scaler is always active */
+ if (src_h != dst_h || src_w != dst_w) {
+ priv->viu.osd_sc_i_wh_m1 = SCI_WH_M1_W(src_w - 1) |
+ SCI_WH_M1_H(src_h - 1);
+ priv->viu.osd_sc_o_h_start_end = SCO_HV_START(dest.x1) |
+ SCO_HV_END(dest.x2 - 1);
+ priv->viu.osd_sc_o_v_start_end = SCO_HV_START(dest.y1) |
+ SCO_HV_END(dest.y2 - 1);
+ /* Enable OSD Scaler */
+ priv->viu.osd_sc_ctrl0 = SC_CTRL0_PATH_EN | SC_CTRL0_SEL_OSD1;
+ } else {
+ priv->viu.osd_sc_i_wh_m1 = 0;
+ priv->viu.osd_sc_o_h_start_end = 0;
+ priv->viu.osd_sc_o_v_start_end = 0;
+ priv->viu.osd_sc_ctrl0 = 0;
+ }
+
+ /* In interlaced mode, vertical scaler is always active */
+ if (src_h != dst_h) {
+ priv->viu.osd_sc_v_ctrl0 =
+ VSC_BANK_LEN(vf_bank_len) |
+ VSC_TOP_INI_RCV_NUM(vsc_ini_rcv_num) |
+ VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
+ VSC_VERTICAL_SCALER_EN;
+
+ if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ priv->viu.osd_sc_v_ctrl0 |=
+ VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
+ VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
+ VSC_PROG_INTERLACE;
+
+ priv->viu.osd_sc_v_phase_step = SC_PHASE_STEP(vf_phase_step);
+ priv->viu.osd_sc_v_ini_phase = VSC_INI_PHASE_BOT(bot_ini_phase);
+ } else {
+ priv->viu.osd_sc_v_ctrl0 = 0;
+ priv->viu.osd_sc_v_phase_step = 0;
+ priv->viu.osd_sc_v_ini_phase = 0;
+ }
+
+ /* Horizontal scaler is only used if width does not match */
+ if (src_w != dst_w) {
+ priv->viu.osd_sc_h_ctrl0 =
+ HSC_BANK_LENGTH(hf_bank_len) |
+ HSC_INI_RCV_NUM0(hsc_ini_rcv_num) |
+ HSC_RPT_P0_NUM0(hsc_ini_rpt_p0_num) |
+ HSC_HORIZ_SCALER_EN;
+ priv->viu.osd_sc_h_phase_step = SC_PHASE_STEP(hf_phase_step);
+ priv->viu.osd_sc_h_ini_phase = 0;
+ } else {
+ priv->viu.osd_sc_h_ctrl0 = 0;
+ priv->viu.osd_sc_h_phase_step = 0;
+ priv->viu.osd_sc_h_ini_phase = 0;
+ }
/*
* The format of these registers is (x2 << 16 | x1),
* where x2 is exclusive.
* e.g. +30x1920 would be (1919 << 16) | 30
*/
- priv->viu.osd1_blk0_cfg[1] = ((fixed16_to_int(src.x2) - 1) << 16) |
- fixed16_to_int(src.x1);
- priv->viu.osd1_blk0_cfg[2] = ((fixed16_to_int(src.y2) - 1) << 16) |
- fixed16_to_int(src.y1);
+ priv->viu.osd1_blk0_cfg[1] =
+ ((fixed16_to_int(state->src.x2) - 1) << 16) |
+ fixed16_to_int(state->src.x1);
+ priv->viu.osd1_blk0_cfg[2] =
+ ((fixed16_to_int(state->src.y2) - 1) << 16) |
+ fixed16_to_int(state->src.y1);
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
@@ -164,6 +305,15 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_stride = fb->pitches[0];
priv->viu.osd1_height = fb->height;
+ if (!meson_plane->enabled) {
+ /* Reset OSD1 before enabling it on GXL+ SoCs */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_viu_osd1_reset(priv);
+
+ meson_plane->enabled = true;
+ }
+
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
@@ -177,12 +327,15 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC));
+ meson_plane->enabled = false;
+
}
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.atomic_check = meson_plane_atomic_check,
.atomic_disable = meson_plane_atomic_disable,
.atomic_update = meson_plane_atomic_update,
+ .prepare_fb = drm_gem_fb_prepare_fb,
};
static const struct drm_plane_funcs meson_plane_funcs = {
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index bca87143e548..5c7e02c703bc 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -286,6 +286,7 @@
#define VIU_OSD1_MATRIX_COEF22_30 0x1a9d
#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
+#define VD1_IF0_GEN_REG3 0x1aa7
#define VIU_OSD1_EOTF_CTL 0x1ad4
#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -297,6 +298,7 @@
#define VIU_OSD1_OETF_CTL 0x1adc
#define VIU_OSD1_OETF_LUT_ADDR_PORT 0x1add
#define VIU_OSD1_OETF_LUT_DATA_PORT 0x1ade
+#define AFBC_ENABLE 0x1ae0
/* vpp */
#define VPP_DUMMY_DATA 0x1d00
@@ -349,6 +351,7 @@
#define VPP_VD2_PREBLEND BIT(15)
#define VPP_OSD1_PREBLEND BIT(16)
#define VPP_OSD2_PREBLEND BIT(17)
+#define VPP_COLOR_MNG_ENABLE BIT(28)
#define VPP_OFIFO_SIZE 0x1d27
#define VPP_FIFO_STATUS 0x1d28
#define VPP_SMOKE_CTRL 0x1d29
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index ae5473257f72..f6ba35a405f8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -117,6 +117,8 @@
#define HDMI_PLL_RESET BIT(28)
#define HDMI_PLL_LOCK BIT(31)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
+
/* VID PLL Dividers */
enum {
VID_PLL_DIV_1 = 0,
@@ -323,7 +325,7 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
enum {
/* PLL O1 O2 O3 VP DV EN TX */
/* 4320 /4 /4 /1 /5 /1 => /2 /2 */
- MESON_VCLK_HDMI_ENCI_54000 = 1,
+ MESON_VCLK_HDMI_ENCI_54000 = 0,
/* 4320 /4 /4 /1 /5 /1 => /1 /2 */
MESON_VCLK_HDMI_DDR_54000,
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
@@ -339,6 +341,7 @@ enum {
};
struct meson_vclk_params {
+ unsigned int pixel_freq;
unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
@@ -347,6 +350,7 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pixel_freq = 54000,
.pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
@@ -355,6 +359,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
+ .pixel_freq = 54000,
.pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
@@ -363,6 +368,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
+ .pixel_freq = 148500,
.pll_base_freq = 2970000,
.pll_od1 = 4,
.pll_od2 = 1,
@@ -371,6 +377,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pixel_freq = 74250,
.pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
@@ -379,6 +386,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pixel_freq = 148500,
.pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
@@ -387,6 +395,7 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pixel_freq = 297000,
.pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 1,
@@ -395,6 +404,7 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pixel_freq = 594000,
.pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
@@ -402,6 +412,7 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ { /* sentinel */ },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -626,12 +637,37 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq)
+{
+ int i;
+
+ DRM_DEBUG_DRIVER("freq = %d\n", freq);
+
+ for (i = 0 ; params[i].pixel_freq ; ++i) {
+ DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
+ i, params[i].pixel_freq,
+ FREQ_1000_1001(params[i].pixel_freq));
+ /* Match strict frequency */
+ if (freq == params[i].pixel_freq)
+ return MODE_OK;
+ /* Match 1000/1001 variant */
+ if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ return MODE_OK;
+ }
+
+ return MODE_CLOCK_RANGE;
+}
+EXPORT_SYMBOL_GPL(meson_vclk_vic_supported_freq);
+
static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
unsigned int od1, unsigned int od2, unsigned int od3,
unsigned int vid_pll_div, unsigned int vclk_div,
unsigned int hdmi_tx_div, unsigned int venc_div,
- bool hdmi_use_enci)
+ bool hdmi_use_enci, bool vic_alternate_clock)
{
+ unsigned int m = 0, frac = 0;
+
/* Set HDMI-TX sys clock */
regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL,
CTS_HDMI_SYS_SEL_MASK, 0);
@@ -646,34 +682,38 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
switch (pll_base_freq) {
case 2970000:
- meson_hdmi_pll_set_params(priv, 0x3d, 0xe00,
- od1, od2, od3);
+ m = 0x3d;
+ frac = vic_alternate_clock ? 0xd02 : 0xe00;
break;
case 4320000:
- meson_hdmi_pll_set_params(priv, 0x5a, 0,
- od1, od2, od3);
+ m = vic_alternate_clock ? 0x59 : 0x5a;
+ frac = vic_alternate_clock ? 0xe8f : 0;
break;
case 5940000:
- meson_hdmi_pll_set_params(priv, 0x7b, 0xc00,
- od1, od2, od3);
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0xa05 : 0xc00;
break;
}
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
switch (pll_base_freq) {
case 2970000:
- meson_hdmi_pll_set_params(priv, 0x7b, 0x300,
- od1, od2, od3);
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0x281 : 0x300;
break;
case 4320000:
- meson_hdmi_pll_set_params(priv, 0xb4, 0,
- od1, od2, od3);
+ m = vic_alternate_clock ? 0xb3 : 0xb4;
+ frac = vic_alternate_clock ? 0x347 : 0;
break;
case 5940000:
- meson_hdmi_pll_set_params(priv, 0xf7, 0x200,
- od1, od2, od3);
+ m = 0xf7;
+ frac = vic_alternate_clock ? 0x102 : 0x200;
break;
}
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
@@ -826,6 +866,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
unsigned int dac_freq, bool hdmi_use_enci)
{
+ bool vic_alternate_clock = false;
unsigned int freq;
unsigned int hdmi_tx_div;
unsigned int venc_div;
@@ -843,7 +884,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - encp encoder
*/
meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
- VID_PLL_DIV_5, 2, 1, 1, false);
+ VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -863,31 +904,35 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- switch (vclk_freq) {
- case 54000:
- if (hdmi_use_enci)
- freq = MESON_VCLK_HDMI_ENCI_54000;
- else
- freq = MESON_VCLK_HDMI_DDR_54000;
- break;
- case 74250:
- freq = MESON_VCLK_HDMI_74250;
- break;
- case 148500:
- if (dac_freq != 148500)
- freq = MESON_VCLK_HDMI_DDR_148500;
- else
- freq = MESON_VCLK_HDMI_148500;
- break;
- case 297000:
- freq = MESON_VCLK_HDMI_297000;
- break;
- case 594000:
- freq = MESON_VCLK_HDMI_594000;
- break;
- default:
- pr_err("Fatal Error, invalid HDMI vclk freq %d\n",
- vclk_freq);
+ for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
+ if (vclk_freq == params[freq].pixel_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
+ if (vclk_freq != params[freq].pixel_freq)
+ vic_alternate_clock = true;
+ else
+ vic_alternate_clock = false;
+
+ if (freq == MESON_VCLK_HDMI_ENCI_54000 &&
+ !hdmi_use_enci)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_DDR_54000 &&
+ hdmi_use_enci)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_DDR_148500 &&
+ dac_freq == vclk_freq)
+ continue;
+
+ if (freq == MESON_VCLK_HDMI_148500 &&
+ dac_freq != vclk_freq)
+ continue;
+ break;
+ }
+ }
+
+ if (!params[freq].pixel_freq) {
+ pr_err("Fatal Error, invalid HDMI vclk freq %d\n", vclk_freq);
return;
}
@@ -895,6 +940,6 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
- hdmi_use_enci);
+ hdmi_use_enci, vic_alternate_clock);
}
EXPORT_SYMBOL_GPL(meson_vclk_setup);
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index 869fa3a3073e..4bd8752da02a 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -32,6 +32,8 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
+enum drm_mode_status
+meson_vclk_vic_supported_freq(unsigned int freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int vclk_freq, unsigned int venc_freq,
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index acbbad3e322c..0ba04f6813e6 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -71,6 +71,7 @@
*/
/* HHI Registers */
+#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
@@ -697,6 +698,132 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p24 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+1660-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p25 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+1440-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_2160p30 = {
+ .encp = {
+ .dvi_settings = 0x1,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x8,
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ .video_filt_ctrl = 0x1000,
+ .video_filt_ctrl_present = true,
+ /* video_ofld_voav_ofst */
+ .yfp1_htime = 140,
+ .yfp2_htime = 140+3840,
+ .max_pxcnt = 3840+560-1,
+ .hspuls_begin = 2156+1920,
+ .hspuls_end = 44,
+ .hspuls_switch = 44,
+ .vspuls_begin = 140,
+ .vspuls_end = 2059+1920,
+ .vspuls_bline = 0,
+ .vspuls_eline = 4,
+ .havon_begin = 148,
+ .havon_end = 3987,
+ .vavon_bline = 89,
+ .vavon_eline = 2248,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 44,
+ .hso_end = 2156+1920,
+ .vso_begin = 2100+1920,
+ .vso_end = 2164+1920,
+ .vso_bline = 51,
+ .vso_eline = 53,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 2249,
+ },
+};
+
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -714,9 +841,13 @@ struct meson_hdmi_venc_vic_mode {
{ 5, &meson_hdmi_encp_mode_1080i60 },
{ 20, &meson_hdmi_encp_mode_1080i50 },
{ 32, &meson_hdmi_encp_mode_1080p24 },
+ { 33, &meson_hdmi_encp_mode_1080p50 },
{ 34, &meson_hdmi_encp_mode_1080p30 },
{ 31, &meson_hdmi_encp_mode_1080p50 },
{ 16, &meson_hdmi_encp_mode_1080p60 },
+ { 93, &meson_hdmi_encp_mode_2160p24 },
+ { 94, &meson_hdmi_encp_mode_2160p25 },
+ { 95, &meson_hdmi_encp_mode_2160p30 },
{ 0, NULL}, /* sentinel */
};
@@ -1530,10 +1661,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
void meson_venc_enable_vsync(struct meson_drm *priv)
{
writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+ regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
}
void meson_venc_disable_vsync(struct meson_drm *priv)
{
+ regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
}
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 6bcfa527c180..e46e05f50bad 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
if (lut_sel == VIU_LUT_OSD_OETF) {
writel(0, priv->io_base + _REG(addr_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
@@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
} else if (lut_sel == VIU_LUT_OSD_EOTF) {
writel(0, priv->io_base + _REG(addr_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
priv->io_base + _REG(data_port));
- for (i = 0; i < 20; i++)
+ for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
priv->io_base + _REG(data_port));
@@ -296,6 +296,33 @@ static void meson_viu_load_matrix(struct meson_drm *priv)
true);
}
+/* VIU OSD1 Reset as workaround for GXL+ Alpha OSD Bug */
+void meson_viu_osd1_reset(struct meson_drm *priv)
+{
+ uint32_t osd1_fifo_ctrl_stat, osd1_ctrl_stat2;
+
+ /* Save these 2 registers state */
+ osd1_fifo_ctrl_stat = readl_relaxed(
+ priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ osd1_ctrl_stat2 = readl_relaxed(
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+ /* Reset OSD1 */
+ writel_bits_relaxed(BIT(0), BIT(0),
+ priv->io_base + _REG(VIU_SW_RESET));
+ writel_bits_relaxed(BIT(0), 0,
+ priv->io_base + _REG(VIU_SW_RESET));
+
+ /* Rewrite these registers state lost in the reset */
+ writel_relaxed(osd1_fifo_ctrl_stat,
+ priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
+ writel_relaxed(osd1_ctrl_stat2,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT2));
+
+ /* Reload the conversion matrix */
+ meson_viu_load_matrix(priv);
+}
+
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
@@ -329,6 +356,21 @@ void meson_viu_init(struct meson_drm *priv)
0xff << OSD_REPLACE_SHIFT,
priv->io_base + _REG(VIU_OSD2_CTRL_STAT2));
+ /* Disable VD1 AFBC */
+ /* di_mif0_en=0 mif0_to_vpp_en=0 di_mad_en=0 */
+ writel_bits_relaxed(0x7 << 16, 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
+ /* afbc vd1 set=0 */
+ writel_bits_relaxed(BIT(20), 0,
+ priv->io_base + _REG(VIU_MISC_CTRL0));
+ writel_relaxed(0, priv->io_base + _REG(AFBC_ENABLE));
+
+ writel_relaxed(0x00FF00C0,
+ priv->io_base + _REG(VD1_IF0_LUMA_FIFO_SIZE));
+ writel_relaxed(0x00FF00C0,
+ priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+
+
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
priv->viu.osd1_interlace = false;
diff --git a/drivers/gpu/drm/meson/meson_viu.h b/drivers/gpu/drm/meson/meson_viu.h
index 073b1910bd1b..0f84bddd2ff0 100644
--- a/drivers/gpu/drm/meson/meson_viu.h
+++ b/drivers/gpu/drm/meson/meson_viu.h
@@ -59,6 +59,7 @@
#define OSD_REPLACE_EN BIT(14)
#define OSD_REPLACE_SHIFT 6
+void meson_viu_osd1_reset(struct meson_drm *priv);
void meson_viu_init(struct meson_drm *priv);
#endif /* __MESON_VIU_H */
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index 27356f81a0ab..f9efb431e953 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -51,52 +51,6 @@ void meson_vpp_setup_mux(struct meson_drm *priv, unsigned int mux)
writel(mux, priv->io_base + _REG(VPU_VIU_VENC_MUX_CTRL));
}
-/*
- * When the output is interlaced, the OSD must switch between
- * each field using the INTERLACE_SEL_ODD (0) of VIU_OSD1_BLK0_CFG_W0
- * at each vsync.
- * But the vertical scaler can provide such funtionnality if
- * is configured for 2:1 scaling with interlace options enabled.
- */
-void meson_vpp_setup_interlace_vscaler_osd1(struct meson_drm *priv,
- struct drm_rect *input)
-{
- writel_relaxed(BIT(3) /* Enable scaler */ |
- BIT(2), /* Select OSD1 */
- priv->io_base + _REG(VPP_OSD_SC_CTRL0));
-
- writel_relaxed(((drm_rect_width(input) - 1) << 16) |
- (drm_rect_height(input) - 1),
- priv->io_base + _REG(VPP_OSD_SCI_WH_M1));
- /* 2:1 scaling */
- writel_relaxed(((input->x1) << 16) | (input->x2),
- priv->io_base + _REG(VPP_OSD_SCO_H_START_END));
- writel_relaxed(((input->y1 >> 1) << 16) | (input->y2 >> 1),
- priv->io_base + _REG(VPP_OSD_SCO_V_START_END));
-
- /* 2:1 scaling values */
- writel_relaxed(BIT(16), priv->io_base + _REG(VPP_OSD_VSC_INI_PHASE));
- writel_relaxed(BIT(25), priv->io_base + _REG(VPP_OSD_VSC_PHASE_STEP));
-
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-
- writel_relaxed((4 << 0) /* osd_vsc_bank_length */ |
- (4 << 3) /* osd_vsc_top_ini_rcv_num0 */ |
- (1 << 8) /* osd_vsc_top_rpt_p0_num0 */ |
- (6 << 11) /* osd_vsc_bot_ini_rcv_num0 */ |
- (2 << 16) /* osd_vsc_bot_rpt_p0_num0 */ |
- BIT(23) /* osd_prog_interlace */ |
- BIT(24), /* Enable vertical scaler */
- priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
-}
-
-void meson_vpp_disable_interlace_vscaler_osd1(struct meson_drm *priv)
-{
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
-}
-
static unsigned int vpp_filter_coefs_4point_bspline[] = {
0x15561500, 0x14561600, 0x13561700, 0x12561800,
0x11551a00, 0x11541b00, 0x10541c00, 0x0f541d00,
@@ -122,6 +76,31 @@ static void meson_vpp_write_scaling_filter_coefs(struct meson_drm *priv,
priv->io_base + _REG(VPP_OSD_SCALE_COEF));
}
+static const uint32_t vpp_filter_coefs_bicubic[] = {
+ 0x00800000, 0x007f0100, 0xff7f0200, 0xfe7f0300,
+ 0xfd7e0500, 0xfc7e0600, 0xfb7d0800, 0xfb7c0900,
+ 0xfa7b0b00, 0xfa7a0dff, 0xf9790fff, 0xf97711ff,
+ 0xf87613ff, 0xf87416fe, 0xf87218fe, 0xf8701afe,
+ 0xf76f1dfd, 0xf76d1ffd, 0xf76b21fd, 0xf76824fd,
+ 0xf76627fc, 0xf76429fc, 0xf7612cfc, 0xf75f2ffb,
+ 0xf75d31fb, 0xf75a34fb, 0xf75837fa, 0xf7553afa,
+ 0xf8523cfa, 0xf8503ff9, 0xf84d42f9, 0xf84a45f9,
+ 0xf84848f8
+};
+
+static void meson_vpp_write_vd_scaling_filter_coefs(struct meson_drm *priv,
+ const unsigned int *coefs,
+ bool is_horizontal)
+{
+ int i;
+
+ writel_relaxed(is_horizontal ? BIT(8) : 0,
+ priv->io_base + _REG(VPP_SCALE_COEF_IDX));
+ for (i = 0; i < 33; i++)
+ writel_relaxed(coefs[i],
+ priv->io_base + _REG(VPP_SCALE_COEF));
+}
+
void meson_vpp_init(struct meson_drm *priv)
{
/* set dummy data default YUV black */
@@ -150,17 +129,34 @@ void meson_vpp_init(struct meson_drm *priv)
/* Force all planes off */
writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
- VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND, 0,
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
priv->io_base + _REG(VPP_MISC));
+ /* Setup default VD settings */
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_HSC_CTRL0));
+ writel_relaxed(4 | (4 << 8) | BIT(15),
+ priv->io_base + _REG(VPP_SC_MISC));
+
+ writel_relaxed(1, priv->io_base + _REG(VPP_VADJ_CTRL));
/* Write in the proper filter coefficients. */
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, false);
meson_vpp_write_scaling_filter_coefs(priv,
vpp_filter_coefs_4point_bspline, true);
+
+ /* Write the VD proper filter coefficients. */
+ meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+ false);
+ meson_vpp_write_vd_scaling_filter_coefs(priv, vpp_filter_coefs_bicubic,
+ true);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 04f1dfba12e5..0aaedc554879 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -212,8 +212,6 @@ struct mga_device {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05570f0de4d7..d96a9b32455e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -36,63 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct mga_device, ttm.bdev);
}
-static int
-mgag200_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-mgag200_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int mgag200_ttm_global_init(struct mga_device *ast)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- global_ref = &ast->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &mgag200_ttm_mem_global_init;
- global_ref->release = &mgag200_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- ast->ttm.bo_global_ref.mem_glob =
- ast->ttm.mem_global_ref.object;
- global_ref = &ast->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- return r;
- }
- return 0;
-}
-
-static void
-mgag200_ttm_global_release(struct mga_device *ast)
-{
- if (ast->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
- drm_global_item_unref(&ast->ttm.mem_global_ref);
- ast->ttm.mem_global_ref.release = NULL;
-}
-
-
static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct mgag200_bo *bo;
@@ -232,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
struct ttm_bo_device *bdev = &mdev->ttm.bdev;
- ret = mgag200_ttm_global_init(mdev);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&mdev->ttm.bdev,
- mdev->ttm.bo_global_ref.ref.object,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -268,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev)
ttm_bo_device_release(&mdev->ttm.bdev);
- mgag200_ttm_global_release(mdev);
-
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 843a9d40c05e..cf549f1ed403 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -2,7 +2,7 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK
depends on MMU
select QCOM_MDT_LOADER if ARCH_QCOM
@@ -11,7 +11,7 @@ config DRM_MSM
select DRM_PANEL
select SHMEM
select TMPFS
- select QCOM_SCM
+ select QCOM_SCM if ARCH_QCOM
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 19ab521d4c3a..56a70c74af4e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -6,6 +6,7 @@ ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
adreno/adreno_device.o \
adreno/adreno_gpu.o \
+ adreno/a2xx_gpu.o \
adreno/a3xx_gpu.o \
adreno/a4xx_gpu.o \
adreno/a5xx_gpu.o \
@@ -14,6 +15,7 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
+ adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -68,11 +70,9 @@ msm-y := \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
disp/dpu1/dpu_io_util.o \
- disp/dpu1/dpu_irq.o \
disp/dpu1/dpu_kms.o \
disp/dpu1/dpu_mdss.o \
disp/dpu1/dpu_plane.o \
- disp/dpu1/dpu_power_handle.o \
disp/dpu1/dpu_rm.o \
disp/dpu1/dpu_vbif.o \
msm_atomic.o \
@@ -90,10 +90,11 @@ msm-y := \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_submitqueue.o
+ msm_submitqueue.o \
+ msm_gpu_tracepoints.o \
+ msm_gpummu.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
- disp/dpu1/dpu_dbg.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 12b0ba270b5e..14eb52f3e605 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -239,7 +239,63 @@ enum sq_tex_swiz {
enum sq_tex_filter {
SQ_TEX_FILTER_POINT = 0,
SQ_TEX_FILTER_BILINEAR = 1,
- SQ_TEX_FILTER_BICUBIC = 2,
+ SQ_TEX_FILTER_BASEMAP = 2,
+ SQ_TEX_FILTER_USE_FETCH_CONST = 3,
+};
+
+enum sq_tex_aniso_filter {
+ SQ_TEX_ANISO_FILTER_DISABLED = 0,
+ SQ_TEX_ANISO_FILTER_MAX_1_1 = 1,
+ SQ_TEX_ANISO_FILTER_MAX_2_1 = 2,
+ SQ_TEX_ANISO_FILTER_MAX_4_1 = 3,
+ SQ_TEX_ANISO_FILTER_MAX_8_1 = 4,
+ SQ_TEX_ANISO_FILTER_MAX_16_1 = 5,
+ SQ_TEX_ANISO_FILTER_USE_FETCH_CONST = 7,
+};
+
+enum sq_tex_dimension {
+ SQ_TEX_DIMENSION_1D = 0,
+ SQ_TEX_DIMENSION_2D = 1,
+ SQ_TEX_DIMENSION_3D = 2,
+ SQ_TEX_DIMENSION_CUBE = 3,
+};
+
+enum sq_tex_border_color {
+ SQ_TEX_BORDER_COLOR_BLACK = 0,
+ SQ_TEX_BORDER_COLOR_WHITE = 1,
+ SQ_TEX_BORDER_COLOR_ACBYCR_BLACK = 2,
+ SQ_TEX_BORDER_COLOR_ACBCRY_BLACK = 3,
+};
+
+enum sq_tex_sign {
+ SQ_TEX_SIGN_UNISIGNED = 0,
+ SQ_TEX_SIGN_SIGNED = 1,
+ SQ_TEX_SIGN_UNISIGNED_BIASED = 2,
+ SQ_TEX_SIGN_GAMMA = 3,
+};
+
+enum sq_tex_endian {
+ SQ_TEX_ENDIAN_NONE = 0,
+ SQ_TEX_ENDIAN_8IN16 = 1,
+ SQ_TEX_ENDIAN_8IN32 = 2,
+ SQ_TEX_ENDIAN_16IN32 = 3,
+};
+
+enum sq_tex_clamp_policy {
+ SQ_TEX_CLAMP_POLICY_D3D = 0,
+ SQ_TEX_CLAMP_POLICY_OGL = 1,
+};
+
+enum sq_tex_num_format {
+ SQ_TEX_NUM_FORMAT_FRAC = 0,
+ SQ_TEX_NUM_FORMAT_INT = 1,
+};
+
+enum sq_tex_type {
+ SQ_TEX_TYPE_0 = 0,
+ SQ_TEX_TYPE_1 = 1,
+ SQ_TEX_TYPE_2 = 2,
+ SQ_TEX_TYPE_3 = 3,
};
#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
@@ -323,6 +379,18 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
}
#define REG_A2XX_MH_MMU_VA_RANGE 0x00000041
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK 0x00000fff
+#define A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT 0
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__SHIFT) & A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS__MASK;
+}
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK 0xfffff000
+#define A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT 12
+static inline uint32_t A2XX_MH_MMU_VA_RANGE_VA_BASE(uint32_t val)
+{
+ return ((val) << A2XX_MH_MMU_VA_RANGE_VA_BASE__SHIFT) & A2XX_MH_MMU_VA_RANGE_VA_BASE__MASK;
+}
#define REG_A2XX_MH_MMU_PT_BASE 0x00000042
@@ -331,6 +399,8 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_MH_MMU_TRAN_ERROR 0x00000044
#define REG_A2XX_MH_MMU_INVALIDATE 0x00000045
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL 0x00000001
+#define A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC 0x00000002
#define REG_A2XX_MH_MMU_MPU_BASE 0x00000046
@@ -389,12 +459,19 @@ static inline uint32_t A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_cln
#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+#define A2XX_RBBM_INT_CNTL_RDERR_INT_MASK 0x00000001
+#define A2XX_RBBM_INT_CNTL_DISPLAY_UPDATE_INT_MASK 0x00000002
+#define A2XX_RBBM_INT_CNTL_GUI_IDLE_INT_MASK 0x00080000
#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
#define REG_A2XX_RBBM_INT_ACK 0x000003b6
#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+#define A2XX_MASTER_INT_SIGNAL_MH_INT_STAT 0x00000020
+#define A2XX_MASTER_INT_SIGNAL_SQ_INT_STAT 0x04000000
+#define A2XX_MASTER_INT_SIGNAL_CP_INT_STAT 0x40000000
+#define A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT 0x80000000
#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
@@ -467,6 +544,19 @@ static inline uint32_t A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(uint32_t val)
#define A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE 0x02000000
#define A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE 0x04000000
+#define REG_A2XX_MH_INTERRUPT_MASK 0x00000a42
+#define A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR 0x00000001
+#define A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR 0x00000002
+#define A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT 0x00000004
+
+#define REG_A2XX_MH_INTERRUPT_STATUS 0x00000a43
+
+#define REG_A2XX_MH_INTERRUPT_CLEAR 0x00000a44
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1 0x00000a54
+
+#define REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG2 0x00000a55
+
#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
@@ -648,6 +738,18 @@ static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val
#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK 0x00003fff
+#define A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_SURFACE_INFO_SURFACE_PITCH(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_SURFACE_PITCH__SHIFT) & A2XX_RB_SURFACE_INFO_SURFACE_PITCH__MASK;
+}
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK 0x0000c000
+#define A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT 14
+static inline uint32_t A2XX_RB_SURFACE_INFO_MSAA_SAMPLES(uint32_t val)
+{
+ return ((val) << A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__SHIFT) & A2XX_RB_SURFACE_INFO_MSAA_SAMPLES__MASK;
+}
#define REG_A2XX_RB_COLOR_INFO 0x00002001
#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
@@ -679,7 +781,7 @@ static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
}
#define REG_A2XX_RB_DEPTH_INFO 0x00002002
@@ -693,7 +795,7 @@ static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_form
#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
{
- return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+ return ((val >> 12) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
}
#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
@@ -1757,6 +1859,36 @@ static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_TYPE__MASK 0x00000003
+#define A2XX_SQ_TEX_0_TYPE__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_0_TYPE(enum sq_tex_type val)
+{
+ return ((val) << A2XX_SQ_TEX_0_TYPE__SHIFT) & A2XX_SQ_TEX_0_TYPE__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_X__MASK 0x0000000c
+#define A2XX_SQ_TEX_0_SIGN_X__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_X(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_X__SHIFT) & A2XX_SQ_TEX_0_SIGN_X__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Y__MASK 0x00000030
+#define A2XX_SQ_TEX_0_SIGN_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Y(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Y__SHIFT) & A2XX_SQ_TEX_0_SIGN_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_Z__MASK 0x000000c0
+#define A2XX_SQ_TEX_0_SIGN_Z__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_Z(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_Z__SHIFT) & A2XX_SQ_TEX_0_SIGN_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_SIGN_W__MASK 0x00000300
+#define A2XX_SQ_TEX_0_SIGN_W__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_0_SIGN_W(enum sq_tex_sign val)
+{
+ return ((val) << A2XX_SQ_TEX_0_SIGN_W__SHIFT) & A2XX_SQ_TEX_0_SIGN_W__MASK;
+}
#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
@@ -1775,14 +1907,46 @@ static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
{
return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
}
-#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__MASK 0x7fc00000
#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
{
return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
}
+#define A2XX_SQ_TEX_0_TILED 0x00000002
#define REG_A2XX_SQ_TEX_1 0x00000001
+#define A2XX_SQ_TEX_1_FORMAT__MASK 0x0000003f
+#define A2XX_SQ_TEX_1_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_1_FORMAT(enum a2xx_sq_surfaceformat val)
+{
+ return ((val) << A2XX_SQ_TEX_1_FORMAT__SHIFT) & A2XX_SQ_TEX_1_FORMAT__MASK;
+}
+#define A2XX_SQ_TEX_1_ENDIANNESS__MASK 0x000000c0
+#define A2XX_SQ_TEX_1_ENDIANNESS__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_1_ENDIANNESS(enum sq_tex_endian val)
+{
+ return ((val) << A2XX_SQ_TEX_1_ENDIANNESS__SHIFT) & A2XX_SQ_TEX_1_ENDIANNESS__MASK;
+}
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__MASK 0x00000300
+#define A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT 8
+static inline uint32_t A2XX_SQ_TEX_1_REQUEST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_1_REQUEST_SIZE__SHIFT) & A2XX_SQ_TEX_1_REQUEST_SIZE__MASK;
+}
+#define A2XX_SQ_TEX_1_STACKED 0x00000400
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__MASK 0x00000800
+#define A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT 11
+static inline uint32_t A2XX_SQ_TEX_1_CLAMP_POLICY(enum sq_tex_clamp_policy val)
+{
+ return ((val) << A2XX_SQ_TEX_1_CLAMP_POLICY__SHIFT) & A2XX_SQ_TEX_1_CLAMP_POLICY__MASK;
+}
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_1_BASE_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_1_BASE_ADDRESS__SHIFT) & A2XX_SQ_TEX_1_BASE_ADDRESS__MASK;
+}
#define REG_A2XX_SQ_TEX_2 0x00000002
#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
@@ -1797,8 +1961,20 @@ static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
{
return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
}
+#define A2XX_SQ_TEX_2_DEPTH__MASK 0xfc000000
+#define A2XX_SQ_TEX_2_DEPTH__SHIFT 26
+static inline uint32_t A2XX_SQ_TEX_2_DEPTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_DEPTH__SHIFT) & A2XX_SQ_TEX_2_DEPTH__MASK;
+}
#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_NUM_FORMAT__MASK 0x00000001
+#define A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_3_NUM_FORMAT(enum sq_tex_num_format val)
+{
+ return ((val) << A2XX_SQ_TEX_3_NUM_FORMAT__SHIFT) & A2XX_SQ_TEX_3_NUM_FORMAT__MASK;
+}
#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
@@ -1823,6 +1999,12 @@ static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
{
return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
}
+#define A2XX_SQ_TEX_3_EXP_ADJUST__MASK 0x0007e000
+#define A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_3_EXP_ADJUST(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_EXP_ADJUST__SHIFT) & A2XX_SQ_TEX_3_EXP_ADJUST__MASK;
+}
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
@@ -1835,6 +2017,104 @@ static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
{
return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
}
+#define A2XX_SQ_TEX_3_MIP_FILTER__MASK 0x01800000
+#define A2XX_SQ_TEX_3_MIP_FILTER__SHIFT 23
+static inline uint32_t A2XX_SQ_TEX_3_MIP_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_MIP_FILTER__SHIFT) & A2XX_SQ_TEX_3_MIP_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_ANISO_FILTER__MASK 0x0e000000
+#define A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT 25
+static inline uint32_t A2XX_SQ_TEX_3_ANISO_FILTER(enum sq_tex_aniso_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_ANISO_FILTER__SHIFT) & A2XX_SQ_TEX_3_ANISO_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_BORDER_SIZE__MASK 0x80000000
+#define A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT 31
+static inline uint32_t A2XX_SQ_TEX_3_BORDER_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_3_BORDER_SIZE__SHIFT) & A2XX_SQ_TEX_3_BORDER_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_4 0x00000004
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK 0x00000001
+#define A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK 0x00000002
+#define A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_4_VOL_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_4_VOL_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_4_VOL_MIN_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK 0x0000003c
+#define A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT 2
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MIN_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MIN_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MIN_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK 0x000003c0
+#define A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT 6
+static inline uint32_t A2XX_SQ_TEX_4_MIP_MAX_LEVEL(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_MIP_MAX_LEVEL__SHIFT) & A2XX_SQ_TEX_4_MIP_MAX_LEVEL__MASK;
+}
+#define A2XX_SQ_TEX_4_MAX_ANISO_WALK 0x00000400
+#define A2XX_SQ_TEX_4_MIN_ANISO_WALK 0x00000800
+#define A2XX_SQ_TEX_4_LOD_BIAS__MASK 0x003ff000
+#define A2XX_SQ_TEX_4_LOD_BIAS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_4_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 32.0))) << A2XX_SQ_TEX_4_LOD_BIAS__SHIFT) & A2XX_SQ_TEX_4_LOD_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK 0x07c00000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_H__MASK;
+}
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK 0xf8000000
+#define A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT 27
+static inline uint32_t A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__SHIFT) & A2XX_SQ_TEX_4_GRAD_EXP_ADJUST_V__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_5 0x00000005
+#define A2XX_SQ_TEX_5_BORDER_COLOR__MASK 0x00000003
+#define A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_5_BORDER_COLOR(enum sq_tex_border_color val)
+{
+ return ((val) << A2XX_SQ_TEX_5_BORDER_COLOR__SHIFT) & A2XX_SQ_TEX_5_BORDER_COLOR__MASK;
+}
+#define A2XX_SQ_TEX_5_FORCE_BCW_MAX 0x00000004
+#define A2XX_SQ_TEX_5_TRI_CLAMP__MASK 0x00000018
+#define A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT 3
+static inline uint32_t A2XX_SQ_TEX_5_TRI_CLAMP(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_5_TRI_CLAMP__SHIFT) & A2XX_SQ_TEX_5_TRI_CLAMP__MASK;
+}
+#define A2XX_SQ_TEX_5_ANISO_BIAS__MASK 0x000001e0
+#define A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT 5
+static inline uint32_t A2XX_SQ_TEX_5_ANISO_BIAS(float val)
+{
+ return ((((int32_t)(val * 1.0))) << A2XX_SQ_TEX_5_ANISO_BIAS__SHIFT) & A2XX_SQ_TEX_5_ANISO_BIAS__MASK;
+}
+#define A2XX_SQ_TEX_5_DIMENSION__MASK 0x00000600
+#define A2XX_SQ_TEX_5_DIMENSION__SHIFT 9
+static inline uint32_t A2XX_SQ_TEX_5_DIMENSION(enum sq_tex_dimension val)
+{
+ return ((val) << A2XX_SQ_TEX_5_DIMENSION__SHIFT) & A2XX_SQ_TEX_5_DIMENSION__MASK;
+}
+#define A2XX_SQ_TEX_5_PACKED_MIPS 0x00000800
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__MASK 0xfffff000
+#define A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT 12
+static inline uint32_t A2XX_SQ_TEX_5_MIP_ADDRESS(uint32_t val)
+{
+ return ((val >> 12) << A2XX_SQ_TEX_5_MIP_ADDRESS__SHIFT) & A2XX_SQ_TEX_5_MIP_ADDRESS__MASK;
+}
#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
new file mode 100644
index 000000000000..1f83bc18d500
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -0,0 +1,492 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "a2xx_gpu.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+extern bool hang_debug;
+
+static void a2xx_dump(struct msm_gpu *gpu);
+static bool a2xx_idle(struct msm_gpu *gpu);
+
+static bool a2xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT3(ring, CP_ME_INIT, 18);
+
+ /* All fields present (bits 9:0) */
+ OUT_RING(ring, 0x000003ff);
+ /* Disable/Enable Real-Time Stream processing (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+ /* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
+ OUT_RING(ring, 0x00000000);
+
+ OUT_RING(ring, REG_A2XX_RB_SURFACE_INFO - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_WINDOW_OFFSET - 0x2000);
+ OUT_RING(ring, REG_A2XX_VGT_MAX_VTX_INDX - 0x2000);
+ OUT_RING(ring, REG_A2XX_SQ_PROGRAM_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_RB_DEPTHCONTROL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POINT_SIZE - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SC_LINE_CNTL - 0x2000);
+ OUT_RING(ring, REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE - 0x2000);
+
+ /* Vertex and Pixel Shader Start Addresses in instructions
+ * (3 DWORDS per instruction) */
+ OUT_RING(ring, 0x80000180);
+ /* Maximum Contexts */
+ OUT_RING(ring, 0x00000001);
+ /* Write Confirm Interval and The CP will wait the
+ * wait_interval * 16 clocks between polling */
+ OUT_RING(ring, 0x00000000);
+ /* NQ and External Memory Swap */
+ OUT_RING(ring, 0x00000000);
+ /* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
+ OUT_RING(ring, 0x200001f2);
+ /* Disable header dumping and Header dump address */
+ OUT_RING(ring, 0x00000000);
+ /* Header dump size */
+ OUT_RING(ring, 0x00000000);
+
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+ return a2xx_idle(gpu);
+}
+
+static int a2xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ dma_addr_t pt_base, tran_error;
+ uint32_t *ptr, len;
+ int i, ret;
+
+ msm_gpummu_params(gpu->aspace->mmu, &pt_base, &tran_error);
+
+ DBG("%s", gpu->name);
+
+ /* halt ME to avoid ucode upload issues on a20x */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, AXXX_CP_ME_CNTL_HALT);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0xfffffffe);
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0xffffffff);
+
+ /* note: kgsl uses 0x00000001 after first reset on a22x */
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0xffffffff);
+ msleep(30);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0x00000000);
+
+ if (adreno_is_a225(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_SQ_FLOW_CONTROL, 0x18000000);
+
+ /* note: kgsl uses 0x0000ffff for a20x */
+ gpu_write(gpu, REG_A2XX_RBBM_CNTL, 0x00004442);
+
+ /* MPU: physical range */
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_BASE, 0x00000000);
+ gpu_write(gpu, REG_A2XX_MH_MMU_MPU_END, 0xfffff000);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_CONFIG, A2XX_MH_MMU_CONFIG_MMU_ENABLE |
+ A2XX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(BEH_TRAN_RNG) |
+ A2XX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(BEH_TRAN_RNG));
+
+ /* same as parameters in adreno_gpu */
+ gpu_write(gpu, REG_A2XX_MH_MMU_VA_RANGE, SZ_16M |
+ A2XX_MH_MMU_VA_RANGE_NUM_64KB_REGIONS(0xfff));
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_PT_BASE, pt_base);
+ gpu_write(gpu, REG_A2XX_MH_MMU_TRAN_ERROR, tran_error);
+
+ gpu_write(gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+
+ gpu_write(gpu, REG_A2XX_MH_ARBITER_CONFIG,
+ A2XX_MH_ARBITER_CONFIG_SAME_PAGE_LIMIT(16) |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_L1_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PAGE_SIZE(1) |
+ A2XX_MH_ARBITER_CONFIG_TC_REORDER_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_ARB_HOLD_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_IN_FLIGHT_LIMIT(8) |
+ A2XX_MH_ARBITER_CONFIG_CP_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_VGT_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_TC_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_RB_CLNT_ENABLE |
+ A2XX_MH_ARBITER_CONFIG_PA_CLNT_ENABLE);
+ if (!adreno_is_a20x(adreno_gpu))
+ gpu_write(gpu, REG_A2XX_MH_CLNT_INTF_CTRL_CONFIG1, 0x00032f07);
+
+ gpu_write(gpu, REG_A2XX_SQ_VS_PROGRAM, 0x00000000);
+ gpu_write(gpu, REG_A2XX_SQ_PS_PROGRAM, 0x00000000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE1, 0); /* 0x200 for msm8960? */
+ gpu_write(gpu, REG_A2XX_RBBM_PM_OVERRIDE2, 0); /* 0x80/0x1a0 for a22x? */
+
+ /* note: gsl doesn't set this */
+ gpu_write(gpu, REG_A2XX_RBBM_DEBUG, 0x00080000);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_CNTL,
+ A2XX_RBBM_INT_CNTL_RDERR_INT_MASK);
+ gpu_write(gpu, REG_AXXX_CP_INT_CNTL,
+ AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK |
+ AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK |
+ AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB_ERROR_MASK |
+ AXXX_CP_INT_CNTL_IB1_INT_MASK |
+ AXXX_CP_INT_CNTL_RB_INT_MASK);
+ gpu_write(gpu, REG_A2XX_SQ_INT_CNTL, 0);
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_MASK,
+ A2XX_MH_INTERRUPT_MASK_AXI_READ_ERROR |
+ A2XX_MH_INTERRUPT_MASK_AXI_WRITE_ERROR |
+ A2XX_MH_INTERRUPT_MASK_MMU_PAGE_FAULT);
+
+ for (i = 3; i <= 5; i++)
+ if ((SZ_16K << i) == adreno_gpu->gmem)
+ break;
+ gpu_write(gpu, REG_A2XX_RB_EDRAM_INFO, i);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
+ DBG("loading PM4 ucode version: %x", ptr[1]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
+ DBG("loading PFP ucode version: %x", ptr[5]);
+
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A2XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 0x000C0804);
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ return a2xx_me_init(gpu) ? 0 : -EINVAL;
+}
+
+static void a2xx_recover(struct msm_gpu *gpu)
+{
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++) {
+ printk("CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
+ }
+
+ /* dump registers before resetting gpu, if enabled: */
+ if (hang_debug)
+ a2xx_dump(gpu);
+
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 1);
+ gpu_read(gpu, REG_A2XX_RBBM_SOFT_RESET);
+ gpu_write(gpu, REG_A2XX_RBBM_SOFT_RESET, 0);
+ adreno_recover(gpu);
+}
+
+static void a2xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+
+ kfree(a2xx_gpu);
+}
+
+static bool a2xx_idle(struct msm_gpu *gpu)
+{
+ /* wait for ringbuffer to drain: */
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
+
+ /* then wait for GPU to finish: */
+ if (spin_until(!(gpu_read(gpu, REG_A2XX_RBBM_STATUS) &
+ A2XX_RBBM_STATUS_GUI_ACTIVE))) {
+ DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
+}
+
+static irqreturn_t a2xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t mstatus, status;
+
+ mstatus = gpu_read(gpu, REG_A2XX_MASTER_INT_SIGNAL);
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_MH_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_MH_INTERRUPT_STATUS);
+
+ dev_warn(gpu->dev->dev, "MH_INT: %08X\n", status);
+ dev_warn(gpu->dev->dev, "MMU_PAGE_FAULT: %08X\n",
+ gpu_read(gpu, REG_A2XX_MH_MMU_PAGE_FAULT));
+
+ gpu_write(gpu, REG_A2XX_MH_INTERRUPT_CLEAR, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_CP_INT_STAT) {
+ status = gpu_read(gpu, REG_AXXX_CP_INT_STATUS);
+
+ /* only RB_INT is expected */
+ if (status & ~AXXX_CP_INT_CNTL_RB_INT_MASK)
+ dev_warn(gpu->dev->dev, "CP_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_AXXX_CP_INT_ACK, status);
+ }
+
+ if (mstatus & A2XX_MASTER_INT_SIGNAL_RBBM_INT_STAT) {
+ status = gpu_read(gpu, REG_A2XX_RBBM_INT_STATUS);
+
+ dev_warn(gpu->dev->dev, "RBBM_INT: %08X\n", status);
+
+ gpu_write(gpu, REG_A2XX_RBBM_INT_ACK, status);
+ }
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const unsigned int a200_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A43, 0x0A45, 0x0A45,
+ 0x0A4E, 0x0A4F, 0x0C2C, 0x0C2C, 0x0C30, 0x0C30, 0x0C38, 0x0C3C,
+ 0x0C40, 0x0C40, 0x0C44, 0x0C44, 0x0C80, 0x0C86, 0x0C88, 0x0C94,
+ 0x0C99, 0x0C9A, 0x0CA4, 0x0CA5, 0x0D00, 0x0D03, 0x0D06, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x0F0C, 0x0F0C, 0x0F0E, 0x0F12,
+ 0x0F26, 0x0F2A, 0x0F2C, 0x0F2C, 0x2000, 0x2002, 0x2006, 0x200F,
+ 0x2080, 0x2082, 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184,
+ 0x21F5, 0x21F7, 0x2200, 0x2208, 0x2280, 0x2283, 0x2293, 0x2294,
+ 0x2300, 0x2308, 0x2312, 0x2312, 0x2316, 0x231D, 0x2324, 0x2326,
+ 0x2380, 0x2383, 0x2400, 0x2402, 0x2406, 0x240F, 0x2480, 0x2482,
+ 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7,
+ 0x2600, 0x2608, 0x2680, 0x2683, 0x2693, 0x2694, 0x2700, 0x2708,
+ 0x2712, 0x2712, 0x2716, 0x271D, 0x2724, 0x2726, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4805, 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a220_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x01C0, 0x01C1, 0x01C3, 0x01C8, 0x01D5, 0x01D9,
+ 0x01DC, 0x01DD, 0x01EA, 0x01EA, 0x01EE, 0x01F3, 0x01F6, 0x01F7,
+ 0x01FC, 0x01FF, 0x0391, 0x0392, 0x039B, 0x039E, 0x03B2, 0x03B5,
+ 0x03B7, 0x03B7, 0x03F8, 0x03FB, 0x0440, 0x0440, 0x0443, 0x0444,
+ 0x044B, 0x044B, 0x044D, 0x044F, 0x0452, 0x0452, 0x0454, 0x045B,
+ 0x047F, 0x047F, 0x0578, 0x0587, 0x05C9, 0x05C9, 0x05D0, 0x05D0,
+ 0x0601, 0x0604, 0x0606, 0x0609, 0x060B, 0x060E, 0x0613, 0x0614,
+ 0x0A29, 0x0A2B, 0x0A2F, 0x0A31, 0x0A40, 0x0A40, 0x0A42, 0x0A43,
+ 0x0A45, 0x0A45, 0x0A4E, 0x0A4F, 0x0C30, 0x0C30, 0x0C38, 0x0C39,
+ 0x0C3C, 0x0C3C, 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03,
+ 0x0D05, 0x0D06, 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1,
+ 0x0DC8, 0x0DD4, 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04,
+ 0x0E17, 0x0E1E, 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0,
+ 0x0ED4, 0x0ED7, 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x2002,
+ 0x2006, 0x200F, 0x2080, 0x2082, 0x2100, 0x2102, 0x2104, 0x2109,
+ 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7, 0x2200, 0x2202,
+ 0x2204, 0x2204, 0x2208, 0x2208, 0x2280, 0x2282, 0x2294, 0x2294,
+ 0x2300, 0x2308, 0x2309, 0x230A, 0x2312, 0x2312, 0x2316, 0x2316,
+ 0x2318, 0x231D, 0x2324, 0x2326, 0x2380, 0x2383, 0x2400, 0x2402,
+ 0x2406, 0x240F, 0x2480, 0x2482, 0x2500, 0x2502, 0x2504, 0x2509,
+ 0x250C, 0x2514, 0x2580, 0x2584, 0x25F5, 0x25F7, 0x2600, 0x2602,
+ 0x2604, 0x2606, 0x2608, 0x2608, 0x2680, 0x2682, 0x2694, 0x2694,
+ 0x2700, 0x2708, 0x2712, 0x2712, 0x2716, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2780, 0x2783, 0x4000, 0x4003, 0x4800, 0x4805,
+ 0x4900, 0x4900, 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+static const unsigned int a225_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x000B, 0x003B, 0x003D, 0x0040, 0x0044,
+ 0x0046, 0x0047, 0x013C, 0x013C, 0x0140, 0x014F, 0x01C0, 0x01C1,
+ 0x01C3, 0x01C8, 0x01D5, 0x01D9, 0x01DC, 0x01DD, 0x01EA, 0x01EA,
+ 0x01EE, 0x01F3, 0x01F6, 0x01F7, 0x01FC, 0x01FF, 0x0391, 0x0392,
+ 0x039B, 0x039E, 0x03B2, 0x03B5, 0x03B7, 0x03B7, 0x03F8, 0x03FB,
+ 0x0440, 0x0440, 0x0443, 0x0444, 0x044B, 0x044B, 0x044D, 0x044F,
+ 0x0452, 0x0452, 0x0454, 0x045B, 0x047F, 0x047F, 0x0578, 0x0587,
+ 0x05C9, 0x05C9, 0x05D0, 0x05D0, 0x0601, 0x0604, 0x0606, 0x0609,
+ 0x060B, 0x060E, 0x0613, 0x0614, 0x0A29, 0x0A2B, 0x0A2F, 0x0A31,
+ 0x0A40, 0x0A40, 0x0A42, 0x0A43, 0x0A45, 0x0A45, 0x0A4E, 0x0A4F,
+ 0x0C01, 0x0C1D, 0x0C30, 0x0C30, 0x0C38, 0x0C39, 0x0C3C, 0x0C3C,
+ 0x0C80, 0x0C81, 0x0C88, 0x0C93, 0x0D00, 0x0D03, 0x0D05, 0x0D06,
+ 0x0D08, 0x0D0B, 0x0D34, 0x0D35, 0x0DAE, 0x0DC1, 0x0DC8, 0x0DD4,
+ 0x0DD8, 0x0DD9, 0x0E00, 0x0E00, 0x0E02, 0x0E04, 0x0E17, 0x0E1E,
+ 0x0EC0, 0x0EC9, 0x0ECB, 0x0ECC, 0x0ED0, 0x0ED0, 0x0ED4, 0x0ED7,
+ 0x0EE0, 0x0EE2, 0x0F01, 0x0F02, 0x2000, 0x200F, 0x2080, 0x2082,
+ 0x2100, 0x2109, 0x210C, 0x2114, 0x2180, 0x2184, 0x21F5, 0x21F7,
+ 0x2200, 0x2202, 0x2204, 0x2206, 0x2208, 0x2210, 0x2220, 0x2222,
+ 0x2280, 0x2282, 0x2294, 0x2294, 0x2297, 0x2297, 0x2300, 0x230A,
+ 0x2312, 0x2312, 0x2315, 0x2316, 0x2318, 0x231D, 0x2324, 0x2326,
+ 0x2340, 0x2357, 0x2360, 0x2360, 0x2380, 0x2383, 0x2400, 0x240F,
+ 0x2480, 0x2482, 0x2500, 0x2509, 0x250C, 0x2514, 0x2580, 0x2584,
+ 0x25F5, 0x25F7, 0x2600, 0x2602, 0x2604, 0x2606, 0x2608, 0x2610,
+ 0x2620, 0x2622, 0x2680, 0x2682, 0x2694, 0x2694, 0x2697, 0x2697,
+ 0x2700, 0x270A, 0x2712, 0x2712, 0x2715, 0x2716, 0x2718, 0x271D,
+ 0x2724, 0x2726, 0x2740, 0x2757, 0x2760, 0x2760, 0x2780, 0x2783,
+ 0x4000, 0x4003, 0x4800, 0x4806, 0x4808, 0x4808, 0x4900, 0x4900,
+ 0x4908, 0x4908,
+ ~0 /* sentinel */
+};
+
+/* would be nice to not have to duplicate the _show() stuff with printk(): */
+static void a2xx_dump(struct msm_gpu *gpu)
+{
+ printk("status: %08x\n",
+ gpu_read(gpu, REG_A2XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A2XX_RBBM_STATUS);
+
+ return state;
+}
+
+/* Register offset defines for A2XX - copy of A3XX */
+static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
+};
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ },
+};
+
+static const struct msm_gpu_perfcntr perfcntrs[] = {
+/* TODO */
+};
+
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+{
+ struct a2xx_gpu *a2xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a2xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ a2xx_gpu = kzalloc(sizeof(*a2xx_gpu), GFP_KERNEL);
+ if (!a2xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ adreno_gpu = &a2xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ gpu->perfcntrs = perfcntrs;
+ gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
+
+ if (adreno_is_a20x(adreno_gpu))
+ adreno_gpu->registers = a200_registers;
+ else if (adreno_is_a225(adreno_gpu))
+ adreno_gpu->registers = a225_registers;
+ else
+ adreno_gpu->registers = a220_registers;
+
+ adreno_gpu->reg_offsets = a2xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret)
+ goto fail;
+
+ if (!gpu->aspace) {
+ dev_err(dev->dev, "No memory protection without MMU\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ return gpu;
+
+fail:
+ if (a2xx_gpu)
+ a2xx_destroy(&a2xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
new file mode 100644
index 000000000000..02fba2cb8932
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef __A2XX_GPU_H__
+#define __A2XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a2xx.xml.h"
+
+struct a2xx_gpu {
+ struct adreno_gpu base;
+ bool pm_enabled;
+};
+#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+
+#endif /* __A2XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index a89f7bb8b5cc..17059f242a98 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 669c2d4b070d..c3b4bc6e4155 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -481,7 +481,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a3xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a3xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -528,7 +528,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 858690f52854..9b51e25a9583 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 7c4e6dc1ed59..18f9a8e0bf3b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -561,7 +561,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "no a4xx device\n");
+ DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
ret = -ENXIO;
goto fail;
}
@@ -608,7 +608,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* to not be possible to restrict access, then we must
* implement a cmdstream validator.
*/
- dev_err(dev->dev, "No memory protection without IOMMU\n");
+ DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index b4944cc0e62f..cf4fe14ddd6e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index d2127b1c4ece..d9af3aff690f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -130,15 +130,13 @@ reset_set(void *data, u64 val)
adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
@@ -173,7 +171,7 @@ int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 48b5304f460c..d5f5e56422f5 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -20,7 +20,6 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
-#include <linux/iopoll.h>
#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -511,13 +510,16 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
+
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PM4: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pm4_bo, "pm4fw");
}
if (!a5xx_gpu->pfp_bo) {
@@ -527,10 +529,12 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
- dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "could not allocate PFP: %d\n",
ret);
return ret;
}
+
+ msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
}
gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -841,20 +845,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
a5xx_preempt_fini(gpu);
if (a5xx_gpu->pm4_bo) {
- if (a5xx_gpu->pm4_iova)
- msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
- if (a5xx_gpu->pfp_iova)
- msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
@@ -1028,7 +1029,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
- dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ DRM_DEV_ERROR(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->seqno : 0,
gpu_read(gpu, REG_A5XX_RBBM_STATUS),
gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
@@ -1134,7 +1135,7 @@ static const u32 a5xx_registers[] = {
static void a5xx_dump(struct msm_gpu *gpu)
{
- dev_info(gpu->dev->dev, "status: %08x\n",
+ DRM_DEV_INFO(gpu->dev->dev, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -1211,10 +1212,6 @@ struct a5xx_gpu_state {
u32 *hlsqregs;
};
-#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
- readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
- interval, timeout)
-
static int a5xx_crashdumper_init(struct msm_gpu *gpu,
struct a5xx_crashdumper *dumper)
{
@@ -1222,19 +1219,10 @@ static int a5xx_crashdumper_init(struct msm_gpu *gpu,
SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
&dumper->bo, &dumper->iova);
- if (IS_ERR(dumper->ptr))
- return PTR_ERR(dumper->ptr);
-
- return 0;
-}
-
-static void a5xx_crashdumper_free(struct msm_gpu *gpu,
- struct a5xx_crashdumper *dumper)
-{
- msm_gem_put_iova(dumper->bo, gpu->aspace);
- msm_gem_put_vaddr(dumper->bo);
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
- drm_gem_object_put(dumper->bo);
+ return PTR_ERR_OR_ZERO(dumper->ptr);
}
static int a5xx_crashdumper_run(struct msm_gpu *gpu,
@@ -1329,7 +1317,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
if (a5xx_crashdumper_run(gpu, &dumper)) {
kfree(a5xx_state->hlsqregs);
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
return;
}
@@ -1337,7 +1325,7 @@ static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
count * sizeof(u32));
- a5xx_crashdumper_free(gpu, &dumper);
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
}
static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
@@ -1508,7 +1496,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
int ret;
if (!pdev) {
- dev_err(dev->dev, "No A5XX device is defined\n");
+ DRM_DEV_ERROR(dev->dev, "No A5XX device is defined\n");
return ERR_PTR(-ENXIO);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 7a41e1c147e4..70e65c94e525 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -298,7 +298,9 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
if (IS_ERR(ptr))
- goto err;
+ return;
+
+ msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
while (cmds_size > 0) {
int i;
@@ -317,15 +319,4 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
-
- return;
-err:
- if (a5xx_gpu->gpmu_iova)
- msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- if (a5xx_gpu->gpmu_bo)
- drm_gem_object_put(a5xx_gpu->gpmu_bo);
-
- a5xx_gpu->gpmu_bo = NULL;
- a5xx_gpu->gpmu_iova = 0;
- a5xx_gpu->gpmu_dwords = 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 4c357ead1be6..3d62310a535f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -92,7 +92,7 @@ static void a5xx_preempt_timer(struct timer_list *t)
if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
return;
- dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: preemption timed out\n", gpu->name);
queue_work(priv->wq, &gpu->recover_work);
}
@@ -188,7 +188,7 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
if (unlikely(status)) {
set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
- dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ DRM_DEV_ERROR(dev->dev, "%s: Preemption failed to complete\n",
gpu->name);
queue_work(priv->wq, &gpu->recover_work);
return;
@@ -245,6 +245,8 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ msm_gem_object_set_name(bo, "preempt");
+
a5xx_gpu->preempt_bo[ring->id] = bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -267,18 +269,8 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++) {
- if (!a5xx_gpu->preempt_bo[i])
- continue;
-
- msm_gem_put_vaddr(a5xx_gpu->preempt_bo[i]);
-
- if (a5xx_gpu->preempt_iova[i])
- msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
-
- drm_gem_object_put(a5xx_gpu->preempt_bo[i]);
- a5xx_gpu->preempt_bo[i] = NULL;
- }
+ for (i = 0; i < gpu->nr_rings; i++)
+ msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index a6f7c40454a6..f44553ec3193 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -501,7 +501,7 @@ enum a6xx_vfd_perfcounter_select {
PERF_VFDP_VS_STAGE_WAVES = 22,
};
-enum a6xx_hslq_perfcounter_select {
+enum a6xx_hlsq_perfcounter_select {
PERF_HLSQ_BUSY_CYCLES = 0,
PERF_HLSQ_STALL_CYCLES_UCHE = 1,
PERF_HLSQ_STALL_CYCLES_SP_STATE = 2,
@@ -2959,6 +2959,8 @@ static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+#define A6XX_GRAS_LRZ_CNTL_UNK3 0x00000008
+#define A6XX_GRAS_LRZ_CNTL_UNK4 0x00000010
#define REG_A6XX_GRAS_UNKNOWN_8101 0x00008101
@@ -2997,6 +2999,13 @@ static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
#define REG_A6XX_GRAS_UNKNOWN_8110 0x00008110
#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+#define A6XX_GRAS_2D_BLIT_CNTL_SCISSOR 0x00010000
#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
@@ -3449,6 +3458,7 @@ static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3642,6 +3652,9 @@ static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+#define REG_A6XX_RB_LRZ_CNTL 0x00008898
+#define A6XX_RB_LRZ_CNTL_ENABLE 0x00000001
+
#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
@@ -3674,6 +3687,14 @@ static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
}
+#define REG_A6XX_RB_MSAA_CNTL 0x000088d5
+#define A6XX_RB_MSAA_CNTL_SAMPLES__MASK 0x00000018
+#define A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_MSAA_CNTL_SAMPLES__MASK;
+}
+
#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
@@ -3684,6 +3705,12 @@ static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
}
#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK 0x00000018
+#define A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT 3
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_SAMPLES__SHIFT) & A6XX_RB_BLIT_DST_INFO_SAMPLES__MASK;
+}
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
@@ -3780,6 +3807,9 @@ static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val
{
return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
}
+#define A6XX_RB_2D_BLIT_CNTL_SCISSOR 0x00010000
+
+#define REG_A6XX_RB_UNKNOWN_8C01 0x00008c01
#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4465,6 +4495,7 @@ static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A6XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
@@ -4643,6 +4674,8 @@ static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
#define REG_A6XX_SP_UNKNOWN_AB20 0x0000ab20
+#define REG_A6XX_SP_UNKNOWN_ACC0 0x0000acc0
+
#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
#define REG_A6XX_SP_UNKNOWN_AE03 0x0000ae03
@@ -4700,11 +4733,34 @@ static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap va
return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+#define A6XX_SP_PS_2D_SRC_INFO_FILTER 0x00010000
+
+#define REG_A6XX_SP_PS_2D_SRC_SIZE 0x0000b4c1
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK 0x00007fff
+#define A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_WIDTH__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_WIDTH__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK 0x3fff8000
+#define A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_SP_PS_2D_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__SHIFT) & A6XX_SP_PS_2D_SRC_SIZE_HEIGHT__MASK;
+}
#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+#define REG_A6XX_SP_PS_2D_SRC_PITCH 0x0000b4c4
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK 0x01fffe00
+#define A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT 9
+static inline uint32_t A6XX_SP_PS_2D_SRC_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_SP_PS_2D_SRC_PITCH_PITCH__SHIFT) & A6XX_SP_PS_2D_SRC_PITCH_PITCH__MASK;
+}
+
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
@@ -5033,6 +5089,12 @@ static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A6XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A6XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A6XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SAMPLES__SHIFT) & A6XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A6XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
@@ -5365,5 +5427,9 @@ static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000030
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_0 0x00000001
+
+#define REG_A6XX_CX_MISC_SYSTEM_CACHE_CNTL_1 0x00000002
+
#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d4e98e5876bc..c58e953fefa3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -51,10 +51,31 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
return IRQ_HANDLED;
}
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF));
+}
+
/* Check to see if the GX rail is still powered */
-static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
- u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ u32 val;
+
+ /* This can be called from gpu state code so make sure GMU is valid */
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return false;
+
+ val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
@@ -153,7 +174,7 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
val == 0xbabeface, 100, 10000);
if (ret)
- dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
return ret;
}
@@ -168,7 +189,7 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
val & 1, 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to start the HFI queues\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to start the HFI queues\n");
return ret;
}
@@ -209,7 +230,7 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
val & (1 << ack), 100, 10000);
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Timeout waiting for GMU OOB set %s: 0x%x\n",
name,
gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
@@ -251,7 +272,7 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
(val & 0x38) == 0x28, 1, 100);
if (ret) {
- dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -273,7 +294,7 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
(val & 0x04), 100, 10000);
if (ret)
- dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ DRM_DEV_ERROR(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
}
@@ -317,7 +338,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
/* Check to see if the GMU really did slumber */
if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
!= 0x0f) {
- dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ DRM_DEV_ERROR(gmu->dev, "The GMU did not go into slumber\n");
ret = -ETIMEDOUT;
}
}
@@ -339,23 +360,27 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
val & (1 << 1), 100, 10000);
if (ret) {
- dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power on the GPU RSC\n");
return ret;
}
ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
- if (!ret) {
- gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
-
- /* Re-enable the power counter */
- gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
- return 0;
+ if (ret) {
+ DRM_DEV_ERROR(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
}
- dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
- return ret;
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Set up CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+ gmu_rmw(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+
+ /* Enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
}
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
@@ -368,7 +393,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
- dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
}
@@ -520,7 +545,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
/* Sanity check the size of the firmware that was loaded */
if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"GMU firmware is bigger than the available region\n");
return -EINVAL;
}
@@ -764,7 +789,7 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
*/
if (ret)
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unable to slumber GMU: status = 0%x/0%x\n",
gmu_read(gmu,
REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
@@ -843,7 +868,7 @@ static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
- dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
for (i = i - 1 ; i >= 0; i--)
iommu_unmap(gmu->domain,
@@ -969,12 +994,12 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
}
if (j == pri_count) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"Level %u not found in in the RPMh list\n",
level);
- dev_err(dev, "Available levels:\n");
+ DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
- dev_err(dev, " %u\n", pri[j]);
+ DRM_DEV_ERROR(dev, " %u\n", pri[j]);
return -EINVAL;
}
@@ -1081,7 +1106,7 @@ static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
*/
ret = dev_pm_opp_of_add_table(gmu->dev);
if (ret) {
- dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ DRM_DEV_ERROR(gmu->dev, "Unable to set the OPP table for the GMU\n");
return ret;
}
@@ -1122,13 +1147,13 @@ static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
IORESOURCE_MEM, name);
if (!res) {
- dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!ret) {
- dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
return ERR_PTR(-EINVAL);
}
@@ -1145,7 +1170,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
name, gmu);
if (ret) {
- dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s\n", name);
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 35f765afae45..c721d9165d8e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -164,4 +164,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
+bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index db56f263ed77..1cc1c135236b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 631257c297fd..fefe773c989e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -4,6 +4,7 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
@@ -67,13 +68,36 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
}
+static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
+ u64 iova)
+{
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, counter | (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
+ unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_start));
+
+ /*
+ * For PM4 the GMU register offsets are calculated from the base of the
+ * GPU registers so we need to add 0x1a800 to the register value on A630
+ * to get the right value from PM4.
+ */
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_start));
+
/* Invalidate CCU depth and color */
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
@@ -98,6 +122,11 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
}
+ get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
+ rbmemptr_stats(ring, index, alwayson_end));
+
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
@@ -112,6 +141,10 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
+ trace_msm_gpu_submit_flush(submit,
+ gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
+ REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
+
a6xx_flush(gpu, ring);
}
@@ -300,6 +333,8 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return ret;
}
+
+ msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -387,14 +422,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- /* FIXME: not sure if this should live here or in a6xx_gmu.c */
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
- 0xff000000);
- gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
- 0xff, 0x20);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
- 0x01);
-
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
@@ -481,7 +508,7 @@ out:
static void a6xx_dump(struct msm_gpu *gpu)
{
- dev_info(&gpu->pdev->dev, "status: %08x\n",
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n",
gpu_read(gpu, REG_A6XX_RBBM_STATUS));
adreno_dump(gpu);
}
@@ -498,7 +525,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
adreno_dump_info(gpu);
for (i = 0; i < 8; i++)
- dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
if (hang_debug)
@@ -645,33 +672,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-static const u32 a6xx_registers[] = {
- 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
- 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
- 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
- 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
- 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
- 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
- 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
- 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
- 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
- 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
- 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
- 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
- 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
- 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
- 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
- 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
- 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
- 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
- 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
- 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
- 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
- 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
- 0xa610, 0xa617, 0xa630, 0xa630,
- ~0
-};
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -724,14 +724,6 @@ static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
- struct drm_printer *p)
-{
- adreno_show(gpu, state, p);
-}
-#endif
-
static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -746,8 +738,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
if (a6xx_gpu->sqe_bo) {
- if (a6xx_gpu->sqe_iova)
- msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
}
@@ -796,6 +787,8 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
+ .gpu_state_get = a6xx_gpu_state_get,
+ .gpu_state_put = a6xx_gpu_state_put,
},
.get_timestamp = a6xx_get_timestamp,
};
@@ -817,7 +810,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu = &a6xx_gpu->base;
gpu = &adreno_gpu->base;
- adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 4127dcebc202..528a4cfe07cd 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -56,6 +56,14 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq);
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu);
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu);
+int a6xx_gpu_state_put(struct msm_gpu_state *state);
+
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
new file mode 100644
index 000000000000..e686331fa089
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -0,0 +1,1165 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/ascii85.h>
+#include "msm_gem.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.h"
+#include "a6xx_gpu_state.h"
+#include "a6xx_gmu.xml.h"
+
+struct a6xx_gpu_state_obj {
+ const void *handle;
+ u32 *data;
+};
+
+struct a6xx_gpu_state {
+ struct msm_gpu_state base;
+
+ struct a6xx_gpu_state_obj *gmu_registers;
+ int nr_gmu_registers;
+
+ struct a6xx_gpu_state_obj *registers;
+ int nr_registers;
+
+ struct a6xx_gpu_state_obj *shaders;
+ int nr_shaders;
+
+ struct a6xx_gpu_state_obj *clusters;
+ int nr_clusters;
+
+ struct a6xx_gpu_state_obj *dbgahb_clusters;
+ int nr_dbgahb_clusters;
+
+ struct a6xx_gpu_state_obj *indexed_regs;
+ int nr_indexed_regs;
+
+ struct a6xx_gpu_state_obj *debugbus;
+ int nr_debugbus;
+
+ struct a6xx_gpu_state_obj *vbif_debugbus;
+
+ struct a6xx_gpu_state_obj *cx_debugbus;
+ int nr_cx_debugbus;
+
+ struct list_head objs;
+};
+
+static inline int CRASHDUMP_WRITE(u64 *in, u32 reg, u32 val)
+{
+ in[0] = val;
+ in[1] = (((u64) reg) << 44 | (1 << 21) | 1);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_READ(u64 *in, u32 reg, u32 dwords, u64 target)
+{
+ in[0] = target;
+ in[1] = (((u64) reg) << 44 | dwords);
+
+ return 2;
+}
+
+static inline int CRASHDUMP_FINI(u64 *in)
+{
+ in[0] = 0;
+ in[1] = 0;
+
+ return 2;
+}
+
+struct a6xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a6xx_state_memobj {
+ struct list_head node;
+ unsigned long long data[];
+};
+
+void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
+{
+ struct a6xx_state_memobj *obj =
+ kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
+
+ if (!obj)
+ return NULL;
+
+ list_add_tail(&obj->node, &a6xx_state->objs);
+ return &obj->data;
+}
+
+void *state_kmemdup(struct a6xx_gpu_state *a6xx_state, void *src,
+ size_t size)
+{
+ void *dst = state_kcalloc(a6xx_state, 1, size);
+
+ if (dst)
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/*
+ * Allocate 1MB for the crashdumper scratch region - 8k for the script and
+ * the rest for the data
+ */
+#define A6XX_CD_DATA_OFFSET 8192
+#define A6XX_CD_DATA_SIZE (SZ_1M - 8192)
+
+static int a6xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
+
+ if (!IS_ERR(dumper->ptr))
+ msm_gem_object_set_name(dumper->bo, "crashdump");
+
+ return PTR_ERR_OR_ZERO(dumper->ptr);
+}
+
+static int a6xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a6xx_crashdumper *dumper)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu))
+ return -EINVAL;
+
+ /* Make sure all pending memory writes are posted */
+ wmb();
+
+ gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1);
+
+ ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val,
+ val & 0x02, 100, 10000);
+
+ gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0);
+
+ return ret;
+}
+
+/* read a value from the GX debug bus */
+static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(offset) |
+ A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = gpu_read(gpu, REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+#define cxdbg_write(ptr, offset, val) \
+ msm_writel((val), (ptr) + ((offset) << 2))
+
+#define cxdbg_read(ptr, offset) \
+ msm_readl((ptr) + ((offset) << 2))
+
+/* read a value from the CX debug bus */
+static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+ u32 *data)
+{
+ u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
+ A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C, reg);
+ cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D, reg);
+
+ /* Wait 1 us to make sure the data is flowing */
+ udelay(1);
+
+ data[0] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2);
+ data[1] = cxdbg_read(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1);
+
+ return 2;
+}
+
+/* Read a chunk of data from the VBIF debug bus */
+static int vbif_debugbus_read(struct msm_gpu *gpu, u32 ctrl0, u32 ctrl1,
+ u32 reg, int count, u32 *data)
+{
+ int i;
+
+ gpu_write(gpu, ctrl0, reg);
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, ctrl1, i);
+ data[i] = gpu_read(gpu, REG_A6XX_VBIF_TEST_BUS_OUT);
+ }
+
+ return count;
+}
+
+#define AXI_ARB_BLOCKS 2
+#define XIN_AXI_BLOCKS 5
+#define XIN_CORE_BLOCKS 4
+
+#define VBIF_DEBUGBUS_BLOCK_SIZE \
+ ((16 * AXI_ARB_BLOCKS) + \
+ (18 * XIN_AXI_BLOCKS) + \
+ (12 * XIN_CORE_BLOCKS))
+
+static void a6xx_get_vbif_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_gpu_state_obj *obj)
+{
+ u32 clk, *ptr;
+ int i;
+
+ obj->data = state_kcalloc(a6xx_state, VBIF_DEBUGBUS_BLOCK_SIZE,
+ sizeof(u32));
+ if (!obj->data)
+ return;
+
+ obj->handle = NULL;
+
+ /* Get the current clock setting */
+ clk = gpu_read(gpu, REG_A6XX_VBIF_CLKON);
+
+ /* Force on the bus so we can read it */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON,
+ clk | A6XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ /* We will read from BUS2 first, so disable BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS1_CTRL0, 0);
+
+ /* Enable the VBIF bus for reading */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS_OUT_CTRL, 1);
+
+ ptr = obj->data;
+
+ for (i = 0; i < AXI_ARB_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << (i + 16), 16, ptr);
+
+ for (i = 0; i < XIN_AXI_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS2_CTRL1,
+ 1 << i, 18, ptr);
+
+ /* Stop BUS2 so we can turn on BUS1 */
+ gpu_write(gpu, REG_A6XX_VBIF_TEST_BUS2_CTRL0, 0);
+
+ for (i = 0; i < XIN_CORE_BLOCKS; i++)
+ ptr += vbif_debugbus_read(gpu,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL0,
+ REG_A6XX_VBIF_TEST_BUS1_CTRL1,
+ 1 << i, 12, ptr);
+
+ /* Restore the VBIF clock setting */
+ gpu_write(gpu, REG_A6XX_VBIF_CLKON, clk);
+}
+
+static void a6xx_get_debugbus_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += debugbus_read(gpu, block->id, i, ptr);
+}
+
+static void a6xx_get_cx_debugbus_block(void __iomem *cxdbg,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_debugbus_block *block,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+ u32 *ptr;
+
+ obj->data = state_kcalloc(a6xx_state, block->count, sizeof(u64));
+ if (!obj->data)
+ return;
+
+ obj->handle = block;
+
+ for (ptr = obj->data, i = 0; i < block->count; i++)
+ ptr += cx_debugbus_read(cxdbg, block->id, i, ptr);
+}
+
+static void a6xx_get_debugbus(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct resource *res;
+ void __iomem *cxdbg = NULL;
+
+ /* Set up the GX debug bus */
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98);
+
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ gpu_write(gpu, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+
+ /* Set up the CX debug bus - it lives elsewhere in the system so do a
+ * temporary ioremap for the registers
+ */
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM,
+ "cx_dbgc");
+
+ if (res)
+ cxdbg = ioremap(res->start, resource_size(res));
+
+ if (cxdbg) {
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT,
+ A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM,
+ A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf));
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0,
+ 0x76543210);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1,
+ 0xFEDCBA98);
+
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0);
+ cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0);
+ }
+
+ a6xx_state->debugbus = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_debugbus_blocks),
+ sizeof(*a6xx_state->debugbus));
+
+ if (a6xx_state->debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a6xx_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+
+ a6xx_state->nr_debugbus = ARRAY_SIZE(a6xx_debugbus_blocks);
+ }
+
+ a6xx_state->vbif_debugbus =
+ state_kcalloc(a6xx_state, 1,
+ sizeof(*a6xx_state->vbif_debugbus));
+
+ if (a6xx_state->vbif_debugbus)
+ a6xx_get_vbif_debugbus_block(gpu, a6xx_state,
+ a6xx_state->vbif_debugbus);
+
+ if (cxdbg) {
+ a6xx_state->cx_debugbus =
+ state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks),
+ sizeof(*a6xx_state->cx_debugbus));
+
+ if (a6xx_state->cx_debugbus) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_cx_debugbus_blocks); i++)
+ a6xx_get_cx_debugbus_block(cxdbg,
+ a6xx_state,
+ &a6xx_cx_debugbus_blocks[i],
+ &a6xx_state->cx_debugbus[i]);
+
+ a6xx_state->nr_cx_debugbus =
+ ARRAY_SIZE(a6xx_cx_debugbus_blocks);
+ }
+
+ iounmap(cxdbg);
+ }
+}
+
+#define RANGE(reg, a) ((reg)[(a) + 1] - (reg)[(a)] + 1)
+
+/* Read a data cluster from behind the AHB aperture */
+static void a6xx_get_dbgahb_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_dbgahb_cluster *dbgahb,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (dbgahb->statetype + i * 2) << 8);
+
+ for (j = 0; j < dbgahb->count; j += 2) {
+ int count = RANGE(dbgahb->registers, j);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ dbgahb->registers[j] - (dbgahb->base >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = dbgahb;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_dbgahb_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_dbgahb_clusters),
+ sizeof(*a6xx_state->dbgahb_clusters));
+
+ if (!a6xx_state->dbgahb_clusters)
+ return;
+
+ a6xx_state->nr_dbgahb_clusters = ARRAY_SIZE(a6xx_dbgahb_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_dbgahb_clusters); i++)
+ a6xx_get_dbgahb_cluster(gpu, a6xx_state,
+ &a6xx_dbgahb_clusters[i],
+ &a6xx_state->dbgahb_clusters[i], dumper);
+}
+
+/* Read a data cluster from the CP aperture with the crashdumper */
+static void a6xx_get_cluster(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_cluster *cluster,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ size_t datasize;
+ int i, regcount = 0;
+
+ /* Some clusters need a selector register to be programmed too */
+ if (cluster->sel_reg)
+ in += CRASHDUMP_WRITE(in, cluster->sel_reg, cluster->sel_val);
+
+ for (i = 0; i < A6XX_NUM_CONTEXTS; i++) {
+ int j;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
+ (cluster->id << 8) | (i << 4) | i);
+
+ for (j = 0; j < cluster->count; j += 2) {
+ int count = RANGE(cluster->registers, j);
+
+ in += CRASHDUMP_READ(in, cluster->registers[j],
+ count, out);
+
+ out += count * sizeof(u32);
+
+ if (i == 0)
+ regcount += count;
+ }
+ }
+
+ CRASHDUMP_FINI(in);
+
+ datasize = regcount * A6XX_NUM_CONTEXTS * sizeof(u32);
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = cluster;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_clusters(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->clusters = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_clusters), sizeof(*a6xx_state->clusters));
+
+ if (!a6xx_state->clusters)
+ return;
+
+ a6xx_state->nr_clusters = ARRAY_SIZE(a6xx_clusters);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++)
+ a6xx_get_cluster(gpu, a6xx_state, &a6xx_clusters[i],
+ &a6xx_state->clusters[i], dumper);
+}
+
+/* Read a shader / debug block from the HLSQ aperture with the crashdumper */
+static void a6xx_get_shader_block(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_shader_block *block,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+{
+ u64 *in = dumper->ptr;
+ size_t datasize = block->size * A6XX_NUM_SHADER_BANKS * sizeof(u32);
+ int i;
+
+ if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
+ return;
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL,
+ (block->type << 8) | i);
+
+ in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
+ block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = block;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ datasize);
+}
+
+static void a6xx_get_shaders(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i;
+
+ a6xx_state->shaders = state_kcalloc(a6xx_state,
+ ARRAY_SIZE(a6xx_shader_blocks), sizeof(*a6xx_state->shaders));
+
+ if (!a6xx_state->shaders)
+ return;
+
+ a6xx_state->nr_shaders = ARRAY_SIZE(a6xx_shader_blocks);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_shader_blocks); i++)
+ a6xx_get_shader_block(gpu, a6xx_state, &a6xx_shader_blocks[i],
+ &a6xx_state->shaders[i], dumper);
+}
+
+/* Read registers from behind the HLSQ aperture with the crashdumper */
+static void a6xx_get_crashdumper_hlsq_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ in += CRASHDUMP_WRITE(in, REG_A6XX_HLSQ_DBG_READ_SEL, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ u32 offset = REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE +
+ regs->registers[i] - (regs->val0 >> 2);
+
+ in += CRASHDUMP_READ(in, offset, count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers using the crashdumper */
+static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj,
+ struct a6xx_crashdumper *dumper)
+
+{
+ u64 *in = dumper->ptr;
+ u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
+ int i, regcount = 0;
+
+ /* Some blocks might need to program a selector register first */
+ if (regs->val0)
+ in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+
+ in += CRASHDUMP_READ(in, regs->registers[i], count, out);
+
+ out += count * sizeof(u32);
+ regcount += count;
+ }
+
+ CRASHDUMP_FINI(in);
+
+ if (WARN_ON((regcount * sizeof(u32)) > A6XX_CD_DATA_SIZE))
+ return;
+
+ if (a6xx_crashdumper_run(gpu, dumper))
+ return;
+
+ obj->handle = regs;
+ obj->data = state_kmemdup(a6xx_state, dumper->ptr + A6XX_CD_DATA_OFFSET,
+ regcount * sizeof(u32));
+}
+
+/* Read a block of registers via AHB */
+static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gpu_read(gpu,
+ regs->registers[i] + j);
+ }
+}
+
+/* Read a block of GMU registers */
+static void _a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_registers *regs,
+ struct a6xx_gpu_state_obj *obj)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int i, regcount = 0, index = 0;
+
+ for (i = 0; i < regs->count; i += 2)
+ regcount += RANGE(regs->registers, i);
+
+ obj->handle = (const void *) regs;
+ obj->data = state_kcalloc(a6xx_state, regcount, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ for (i = 0; i < regs->count; i += 2) {
+ u32 count = RANGE(regs->registers, i);
+ int j;
+
+ for (j = 0; j < count; j++)
+ obj->data[index++] = gmu_read(gmu,
+ regs->registers[i] + j);
+ }
+}
+
+static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
+ 2, sizeof(*a6xx_state->gmu_registers));
+
+ if (!a6xx_state->gmu_registers)
+ return;
+
+ a6xx_state->nr_gmu_registers = 2;
+
+ /* Get the CX GMU registers from AHB */
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
+ &a6xx_state->gmu_registers[0]);
+
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return;
+
+ /* Set the fence to ALLOW mode so we can access the registers */
+ gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+
+ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[1],
+ &a6xx_state->gmu_registers[1]);
+}
+
+static void a6xx_get_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ struct a6xx_crashdumper *dumper)
+{
+ int i, count = ARRAY_SIZE(a6xx_ahb_reglist) +
+ ARRAY_SIZE(a6xx_reglist) +
+ ARRAY_SIZE(a6xx_hlsq_reglist);
+ int index = 0;
+
+ a6xx_state->registers = state_kcalloc(a6xx_state,
+ count, sizeof(*a6xx_state->registers));
+
+ if (!a6xx_state->registers)
+ return;
+
+ a6xx_state->nr_registers = count;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_ahb_reglist); i++)
+ a6xx_get_ahb_gpu_registers(gpu,
+ a6xx_state, &a6xx_ahb_reglist[i],
+ &a6xx_state->registers[index++]);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_reglist); i++)
+ a6xx_get_crashdumper_registers(gpu,
+ a6xx_state, &a6xx_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hlsq_reglist); i++)
+ a6xx_get_crashdumper_hlsq_registers(gpu,
+ a6xx_state, &a6xx_hlsq_reglist[i],
+ &a6xx_state->registers[index++],
+ dumper);
+}
+
+/* Read a block of data from an indexed register pair */
+static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state,
+ const struct a6xx_indexed_registers *indexed,
+ struct a6xx_gpu_state_obj *obj)
+{
+ int i;
+
+ obj->handle = (const void *) indexed;
+ obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
+ if (!obj->data)
+ return;
+
+ /* All the indexed banks start at address 0 */
+ gpu_write(gpu, indexed->addr, 0);
+
+ /* Read the data - each read increments the internal address by 1 */
+ for (i = 0; i < indexed->count; i++)
+ obj->data[i] = gpu_read(gpu, indexed->data);
+}
+
+static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ u32 mempool_size;
+ int count = ARRAY_SIZE(a6xx_indexed_reglist) + 1;
+ int i;
+
+ a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
+ sizeof(a6xx_state->indexed_regs));
+ if (!a6xx_state->indexed_regs)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_indexed_reglist); i++)
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
+ &a6xx_state->indexed_regs[i]);
+
+ /* Set the CP mempool size to 0 to stabilize it while dumping */
+ mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ /*
+ * Offset 0x2000 in the mempool is the size - copy the saved size over
+ * so the data is consistent
+ */
+ a6xx_state->indexed_regs[i].data[0x2000] = mempool_size;
+
+ /* Restore the size in the hardware */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, mempool_size);
+
+ a6xx_state->nr_indexed_regs = count;
+}
+
+struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a6xx_crashdumper dumper = { 0 };
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
+ GFP_KERNEL);
+
+ if (!a6xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&a6xx_state->objs);
+
+ /* Get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &a6xx_state->base);
+
+ a6xx_get_gmu_registers(gpu, a6xx_state);
+
+ /* If GX isn't on the rest of the data isn't going to be accessible */
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ return &a6xx_state->base;
+
+ /* Get the banks of indexed registers */
+ a6xx_get_indexed_registers(gpu, a6xx_state);
+
+ /* Try to initialize the crashdumper */
+ if (!a6xx_crashdumper_init(gpu, &dumper)) {
+ a6xx_get_registers(gpu, a6xx_state, &dumper);
+ a6xx_get_shaders(gpu, a6xx_state, &dumper);
+ a6xx_get_clusters(gpu, a6xx_state, &dumper);
+ a6xx_get_dbgahb_clusters(gpu, a6xx_state, &dumper);
+
+ msm_gem_kernel_put(dumper.bo, gpu->aspace, true);
+ }
+
+ a6xx_get_debugbus(gpu, a6xx_state);
+
+ return &a6xx_state->base;
+}
+
+void a6xx_gpu_state_destroy(struct kref *kref)
+{
+ struct a6xx_state_memobj *obj, *tmp;
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+
+ list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
+ kfree(obj);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a6xx_state);
+}
+
+int a6xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a6xx_gpu_state_destroy);
+}
+
+static void a6xx_show_registers(const u32 *registers, u32 *data, size_t count,
+ struct drm_printer *p)
+{
+ int i, index = 0;
+
+ if (!data)
+ return;
+
+ for (i = 0; i < count; i += 2) {
+ u32 count = RANGE(registers, i);
+ u32 offset = registers[i];
+ int j;
+
+ for (j = 0; j < count; index++, offset++, j++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+}
+
+static void print_ascii85(struct drm_printer *p, size_t len, u32 *data)
+{
+ char out[ASCII85_BUFSZ];
+ long i, l, datalen = 0;
+
+ for (i = 0; i < len >> 2; i++) {
+ if (data[i])
+ datalen = (i + 1) << 2;
+ }
+
+ if (datalen == 0)
+ return;
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+
+ l = ascii85_encode_len(datalen);
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(data[i], out));
+
+ drm_puts(p, "\n");
+}
+
+static void print_name(struct drm_printer *p, const char *fmt, const char *name)
+{
+ drm_puts(p, fmt);
+ drm_puts(p, name);
+ drm_puts(p, "\n");
+}
+
+static void a6xx_show_shader(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_shader_block *block = obj->handle;
+ int i;
+
+ if (!obj->handle)
+ return;
+
+ print_name(p, " - type: ", block->name);
+
+ for (i = 0; i < A6XX_NUM_SHADER_BANKS; i++) {
+ drm_printf(p, " - bank: %d\n", i);
+ drm_printf(p, " size: %d\n", block->size);
+
+ if (!obj->data)
+ continue;
+
+ print_ascii85(p, block->size << 2,
+ obj->data + (block->size * i));
+ }
+}
+
+static void a6xx_show_cluster_data(const u32 *registers, int size, u32 *data,
+ struct drm_printer *p)
+{
+ int ctx, index = 0;
+
+ for (ctx = 0; ctx < A6XX_NUM_CONTEXTS; ctx++) {
+ int j;
+
+ drm_printf(p, " - context: %d\n", ctx);
+
+ for (j = 0; j < size; j += 2) {
+ u32 count = RANGE(registers, j);
+ u32 offset = registers[j];
+ int k;
+
+ for (k = 0; k < count; index++, offset++, k++) {
+ if (data[index] == 0xdeafbead)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%06x, value: 0x%08x }\n",
+ offset << 2, data[index]);
+ }
+ }
+ }
+}
+
+static void a6xx_show_dbgahb_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_dbgahb_cluster *dbgahb = obj->handle;
+
+ if (dbgahb) {
+ print_name(p, " - cluster-name: ", dbgahb->name);
+ a6xx_show_cluster_data(dbgahb->registers, dbgahb->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_cluster(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_cluster *cluster = obj->handle;
+
+ if (cluster) {
+ print_name(p, " - cluster-name: ", cluster->name);
+ a6xx_show_cluster_data(cluster->registers, cluster->count,
+ obj->data, p);
+ }
+}
+
+static void a6xx_show_indexed_regs(struct a6xx_gpu_state_obj *obj,
+ struct drm_printer *p)
+{
+ const struct a6xx_indexed_registers *indexed = obj->handle;
+
+ if (!indexed)
+ return;
+
+ print_name(p, " - regs-name: ", indexed->name);
+ drm_printf(p, " dwords: %d\n", indexed->count);
+
+ print_ascii85(p, indexed->count << 2, obj->data);
+}
+
+static void a6xx_show_debugbus_block(const struct a6xx_debugbus_block *block,
+ u32 *data, struct drm_printer *p)
+{
+ if (block) {
+ print_name(p, " - debugbus-block: ", block->name);
+
+ /*
+ * count for regular debugbus data is in quadwords,
+ * but print the size in dwords for consistency
+ */
+ drm_printf(p, " count: %d\n", block->count << 1);
+
+ print_ascii85(p, block->count << 3, data);
+ }
+}
+
+static void a6xx_show_debugbus(struct a6xx_gpu_state *a6xx_state,
+ struct drm_printer *p)
+{
+ int i;
+
+ for (i = 0; i < a6xx_state->nr_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+
+ if (a6xx_state->vbif_debugbus) {
+ struct a6xx_gpu_state_obj *obj = a6xx_state->vbif_debugbus;
+
+ drm_puts(p, " - debugbus-block: A6XX_DBGBUS_VBIF\n");
+ drm_printf(p, " count: %d\n", VBIF_DEBUGBUS_BLOCK_SIZE);
+
+ /* vbif debugbus data is in dwords. Confusing, huh? */
+ print_ascii85(p, VBIF_DEBUGBUS_BLOCK_SIZE << 2, obj->data);
+ }
+
+ for (i = 0; i < a6xx_state->nr_cx_debugbus; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->cx_debugbus[i];
+
+ a6xx_show_debugbus_block(obj->handle, obj->data, p);
+ }
+}
+
+void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct a6xx_gpu_state *a6xx_state = container_of(state,
+ struct a6xx_gpu_state, base);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ drm_puts(p, "registers:\n");
+ for (i = 0; i < a6xx_state->nr_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "registers-gmu:\n");
+ for (i = 0; i < a6xx_state->nr_gmu_registers; i++) {
+ struct a6xx_gpu_state_obj *obj = &a6xx_state->gmu_registers[i];
+ const struct a6xx_registers *regs = obj->handle;
+
+ if (!obj->handle)
+ continue;
+
+ a6xx_show_registers(regs->registers, obj->data, regs->count, p);
+ }
+
+ drm_puts(p, "indexed-registers:\n");
+ for (i = 0; i < a6xx_state->nr_indexed_regs; i++)
+ a6xx_show_indexed_regs(&a6xx_state->indexed_regs[i], p);
+
+ drm_puts(p, "shader-blocks:\n");
+ for (i = 0; i < a6xx_state->nr_shaders; i++)
+ a6xx_show_shader(&a6xx_state->shaders[i], p);
+
+ drm_puts(p, "clusters:\n");
+ for (i = 0; i < a6xx_state->nr_clusters; i++)
+ a6xx_show_cluster(&a6xx_state->clusters[i], p);
+
+ for (i = 0; i < a6xx_state->nr_dbgahb_clusters; i++)
+ a6xx_show_dbgahb_cluster(&a6xx_state->dbgahb_clusters[i], p);
+
+ drm_puts(p, "debugbus:\n");
+ a6xx_show_debugbus(a6xx_state, p);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
new file mode 100644
index 000000000000..68cccfa2870a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_CRASH_DUMP_H_
+#define _A6XX_CRASH_DUMP_H_
+
+#include "a6xx.xml.h"
+
+#define A6XX_NUM_CONTEXTS 2
+#define A6XX_NUM_SHADER_BANKS 3
+
+static const u32 a6xx_gras_cluster[] = {
+ 0x8000, 0x8006, 0x8010, 0x8092, 0x8094, 0x809d, 0x80a0, 0x80a6,
+ 0x80af, 0x80f1, 0x8100, 0x8107, 0x8109, 0x8109, 0x8110, 0x8110,
+ 0x8400, 0x840b,
+};
+
+static const u32 a6xx_ps_cluster_rac[] = {
+ 0x8800, 0x8806, 0x8809, 0x8811, 0x8818, 0x881e, 0x8820, 0x8865,
+ 0x8870, 0x8879, 0x8880, 0x8889, 0x8890, 0x8891, 0x8898, 0x8898,
+ 0x88c0, 0x88c1, 0x88d0, 0x88e3, 0x8900, 0x890c, 0x890f, 0x891a,
+ 0x8c00, 0x8c01, 0x8c08, 0x8c10, 0x8c17, 0x8c1f, 0x8c26, 0x8c33,
+};
+
+static const u32 a6xx_ps_cluster_rbp[] = {
+ 0x88f0, 0x88f3, 0x890d, 0x890e, 0x8927, 0x8928, 0x8bf0, 0x8bf1,
+ 0x8c02, 0x8c07, 0x8c11, 0x8c16, 0x8c20, 0x8c25,
+};
+
+static const u32 a6xx_ps_cluster[] = {
+ 0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
+};
+
+static const u32 a6xx_fe_cluster[] = {
+ 0x9300, 0x9306, 0x9800, 0x9806, 0x9b00, 0x9b07, 0xa000, 0xa009,
+ 0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
+};
+
+static const u32 a6xx_pc_vs_cluster[] = {
+ 0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
+};
+
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+
+#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
+ { .id = _id, .name = #_id,\
+ .registers = _reg, \
+ .count = ARRAY_SIZE(_reg), \
+ .sel_reg = _sel_reg, .sel_val = _sel_val }
+
+static const struct a6xx_cluster {
+ u32 id;
+ const char *name;
+ const u32 *registers;
+ size_t count;
+ u32 sel_reg;
+ u32 sel_val;
+} a6xx_clusters[] = {
+ CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
+ CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+};
+
+static const u32 a6xx_sp_vs_hlsq_cluster[] = {
+ 0xb800, 0xb803, 0xb820, 0xb822,
+};
+
+static const u32 a6xx_sp_vs_sp_cluster[] = {
+ 0xa800, 0xa824, 0xa830, 0xa83c, 0xa840, 0xa864, 0xa870, 0xa895,
+ 0xa8a0, 0xa8af, 0xa8c0, 0xa8c3,
+};
+
+static const u32 a6xx_hlsq_duplicate_cluster[] = {
+ 0xbb10, 0xbb11, 0xbb20, 0xbb29,
+};
+
+static const u32 a6xx_hlsq_2d_duplicate_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_duplicate_cluster[] = {
+ 0xab00, 0xab00, 0xab04, 0xab05, 0xab10, 0xab1b, 0xab20, 0xab20,
+};
+
+static const u32 a6xx_tp_duplicate_cluster[] = {
+ 0xb300, 0xb307, 0xb309, 0xb309, 0xb380, 0xb382,
+};
+
+static const u32 a6xx_sp_ps_hlsq_cluster[] = {
+ 0xb980, 0xb980, 0xb982, 0xb987, 0xb990, 0xb99b, 0xb9a0, 0xb9a2,
+ 0xb9c0, 0xb9c9,
+};
+
+static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
+ 0xbd80, 0xbd80,
+};
+
+static const u32 a6xx_sp_ps_sp_cluster[] = {
+ 0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31,
+};
+
+static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
+ 0xacc0, 0xacc0,
+};
+
+static const u32 a6xx_sp_ps_tp_cluster[] = {
+ 0xb180, 0xb183, 0xb190, 0xb191,
+};
+
+static const u32 a6xx_sp_ps_tp_2d_cluster[] = {
+ 0xb4c0, 0xb4d1,
+};
+
+#define CLUSTER_DBGAHB(_id, _base, _type, _reg) \
+ { .name = #_id, .statetype = _type, .base = _base, \
+ .registers = _reg, .count = ARRAY_SIZE(_reg) }
+
+static const struct a6xx_dbgahb_cluster {
+ const char *name;
+ u32 statetype;
+ u32 base;
+ const u32 *registers;
+ size_t count;
+} a6xx_dbgahb_clusters[] = {
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_sp_vs_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_vs_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002e000, 0x41, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002f000, 0x45, a6xx_hlsq_2d_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002a000, 0x21, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_VS, 0x0002c000, 0x1, a6xx_tp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_sp_ps_hlsq_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002f000, 0x46, a6xx_sp_ps_hlsq_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_ps_sp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002b000, 0x26, a6xx_sp_ps_sp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_sp_ps_tp_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002d000, 0x6, a6xx_sp_ps_tp_2d_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002e000, 0x42, a6xx_hlsq_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002a000, 0x22, a6xx_sp_duplicate_cluster),
+ CLUSTER_DBGAHB(CLUSTER_SP_PS, 0x0002c000, 0x2, a6xx_tp_duplicate_cluster),
+};
+
+static const u32 a6xx_hlsq_registers[] = {
+ 0xbe00, 0xbe01, 0xbe04, 0xbe05, 0xbe08, 0xbe09, 0xbe10, 0xbe15,
+ 0xbe20, 0xbe23,
+};
+
+static const u32 a6xx_sp_registers[] = {
+ 0xae00, 0xae04, 0xae0c, 0xae0c, 0xae0f, 0xae2b, 0xae30, 0xae32,
+ 0xae35, 0xae35, 0xae3a, 0xae3f, 0xae50, 0xae52,
+};
+
+static const u32 a6xx_tp_registers[] = {
+ 0xb600, 0xb601, 0xb604, 0xb605, 0xb610, 0xb61b, 0xb620, 0xb623,
+};
+
+struct a6xx_registers {
+ const u32 *registers;
+ size_t count;
+ u32 val0;
+ u32 val1;
+};
+
+#define HLSQ_DBG_REGS(_base, _type, _array) \
+ { .val0 = _base, .val1 = _type, .registers = _array, \
+ .count = ARRAY_SIZE(_array), }
+
+static const struct a6xx_registers a6xx_hlsq_reglist[] = {
+ HLSQ_DBG_REGS(0x0002F800, 0x40, a6xx_hlsq_registers),
+ HLSQ_DBG_REGS(0x0002B800, 0x20, a6xx_sp_registers),
+ HLSQ_DBG_REGS(0x0002D800, 0x0, a6xx_tp_registers),
+};
+
+#define SHADER(_type, _size) \
+ { .type = _type, .name = #_type, .size = _size }
+
+static const struct a6xx_shader_block {
+ const char *name;
+ u32 type;
+ u32 size;
+} a6xx_shader_blocks[] = {
+ SHADER(A6XX_TP0_TMO_DATA, 0x200),
+ SHADER(A6XX_TP0_SMO_DATA, 0x80),
+ SHADER(A6XX_TP0_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_TP1_TMO_DATA, 0x200),
+ SHADER(A6XX_TP1_SMO_DATA, 0x80),
+ SHADER(A6XX_TP1_MIPMAP_BASE_DATA, 0x3c0),
+ SHADER(A6XX_SP_INST_DATA, 0x800),
+ SHADER(A6XX_SP_LB_0_DATA, 0x800),
+ SHADER(A6XX_SP_LB_1_DATA, 0x800),
+ SHADER(A6XX_SP_LB_2_DATA, 0x800),
+ SHADER(A6XX_SP_LB_3_DATA, 0x800),
+ SHADER(A6XX_SP_LB_4_DATA, 0x800),
+ SHADER(A6XX_SP_LB_5_DATA, 0x200),
+ SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000),
+ SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280),
+ SHADER(A6XX_SP_UAV_DATA, 0x80),
+ SHADER(A6XX_SP_INST_TAG, 0x80),
+ SHADER(A6XX_SP_CB_BINDLESS_TAG, 0x80),
+ SHADER(A6XX_SP_TMO_UMO_TAG, 0x80),
+ SHADER(A6XX_SP_SMO_TAG, 0x80),
+ SHADER(A6XX_SP_STATE_DATA, 0x3f),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM, 0x280),
+ SHADER(A6XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40),
+ SHADER(A6XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x4),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM, 0x1c0),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM, 0x580),
+ SHADER(A6XX_HLSQ_INST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM, 0x800),
+ SHADER(A6XX_HLSQ_CVS_MISC_RAM_TAG, 0x8),
+ SHADER(A6XX_HLSQ_CPS_MISC_RAM_TAG, 0x4),
+ SHADER(A6XX_HLSQ_INST_RAM_TAG, 0x80),
+ SHADER(A6XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0xc),
+ SHADER(A6XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10),
+ SHADER(A6XX_HLSQ_PWR_REST_RAM, 0x28),
+ SHADER(A6XX_HLSQ_PWR_REST_TAG, 0x14),
+ SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
+ SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
+ SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+};
+
+static const u32 a6xx_rb_rac_registers[] = {
+ 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e10, 0x8e1c, 0x8e20, 0x8e25,
+ 0x8e28, 0x8e28, 0x8e2c, 0x8e2f, 0x8e50, 0x8e52,
+};
+
+static const u32 a6xx_rb_rbp_registers[] = {
+ 0x8e01, 0x8e01, 0x8e0c, 0x8e0c, 0x8e3b, 0x8e3e, 0x8e40, 0x8e43,
+ 0x8e53, 0x8e5f, 0x8e70, 0x8e77,
+};
+
+static const u32 a6xx_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0218, 0x023d, 0x0400, 0x04f9,
+ 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511, 0x0533, 0x0533,
+ 0x0540, 0x0555,
+ /* CP */
+ 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+ 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
+ 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
+ 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
+ 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1, 0x09c2, 0x09c8,
+ 0x0a00, 0x0a03,
+ /* VSC */
+ 0x0c00, 0x0c04, 0x0c06, 0x0c06, 0x0c10, 0x0cd9, 0x0e00, 0x0e0e,
+ /* UCHE */
+ 0x0e10, 0x0e13, 0x0e17, 0x0e19, 0x0e1c, 0x0e2b, 0x0e30, 0x0e32,
+ 0x0e38, 0x0e39,
+ /* GRAS */
+ 0x8600, 0x8601, 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b,
+ 0x8630, 0x8637,
+ /* VPC */
+ 0x9600, 0x9604, 0x9624, 0x9637,
+ /* PC */
+ 0x9e00, 0x9e01, 0x9e03, 0x9e0e, 0x9e11, 0x9e16, 0x9e19, 0x9e19,
+ 0x9e1c, 0x9e1c, 0x9e20, 0x9e23, 0x9e30, 0x9e31, 0x9e34, 0x9e34,
+ 0x9e70, 0x9e72, 0x9e78, 0x9e79, 0x9e80, 0x9fff,
+ /* VFD */
+ 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
+ 0xa630, 0xa630,
+};
+
+#define REGS(_array, _sel_reg, _sel_val) \
+ { .registers = _array, .count = ARRAY_SIZE(_array), \
+ .val0 = _sel_reg, .val1 = _sel_val }
+
+static const struct a6xx_registers a6xx_reglist[] = {
+ REGS(a6xx_registers, 0, 0),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+};
+
+static const u32 a6xx_ahb_registers[] = {
+ /* RBBM_STATUS - RBBM_STATUS3 */
+ 0x210, 0x213,
+ /* CP_STATUS_1 */
+ 0x825, 0x825,
+};
+
+static const u32 a6xx_vbif_registers[] = {
+ 0x3000, 0x3007, 0x300c, 0x3014, 0x3018, 0x302d, 0x3030, 0x3031,
+ 0x3034, 0x3036, 0x303c, 0x303d, 0x3040, 0x3040, 0x3042, 0x3042,
+ 0x3049, 0x3049, 0x3058, 0x3058, 0x305a, 0x3061, 0x3064, 0x3068,
+ 0x306c, 0x306d, 0x3080, 0x3088, 0x308b, 0x308c, 0x3090, 0x3094,
+ 0x3098, 0x3098, 0x309c, 0x309c, 0x30c0, 0x30c0, 0x30c8, 0x30c8,
+ 0x30d0, 0x30d0, 0x30d8, 0x30d8, 0x30e0, 0x30e0, 0x3100, 0x3100,
+ 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
+ 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x3154, 0x3154,
+ 0x3156, 0x3156, 0x3158, 0x3158, 0x315a, 0x315a, 0x315c, 0x315c,
+ 0x315e, 0x315e, 0x3160, 0x3160, 0x3162, 0x3162, 0x340c, 0x340c,
+ 0x3410, 0x3410, 0x3800, 0x3801,
+};
+
+static const struct a6xx_registers a6xx_ahb_reglist[] = {
+ REGS(a6xx_ahb_registers, 0, 0),
+ REGS(a6xx_vbif_registers, 0, 0),
+};
+
+static const u32 a6xx_gmu_gx_registers[] = {
+ /* GMU GX */
+ 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
+ 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
+ 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
+ 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
+ 0x0100, 0x012b, 0x0140, 0x0140,
+};
+
+static const u32 a6xx_gmu_cx_registers[] = {
+ /* GMU CX */
+ 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
+ 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
+ 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
+ 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
+ 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
+ 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
+ 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
+ 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
+ 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ /* GPU RSCC */
+ 0x8c8c, 0x8c8c, 0x8d01, 0x8d02, 0x8f40, 0x8f42, 0x8f44, 0x8f47,
+ 0x8f4c, 0x8f87, 0x8fec, 0x8fef, 0x8ff4, 0x902f, 0x9094, 0x9097,
+ 0x909c, 0x90d7, 0x913c, 0x913f, 0x9144, 0x917f,
+ /* GMU AO */
+ 0x9300, 0x9316, 0x9400, 0x9400,
+ /* GPU CC */
+ 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
+ 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
+ 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
+ 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
+ 0xb800, 0xb802,
+ /* GPU CC ACD */
+ 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+};
+
+static const struct a6xx_registers a6xx_gmu_reglist[] = {
+ REGS(a6xx_gmu_cx_registers, 0, 0),
+ REGS(a6xx_gmu_gx_registers, 0, 0),
+};
+
+static const struct a6xx_indexed_registers {
+ const char *name;
+ u32 addr;
+ u32 data;
+ u32 count;
+} a6xx_indexed_reglist[] = {
+ { "CP_SEQ_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+ { "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+ { "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+ { "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
+ REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+};
+
+static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+ "CP_MEMPOOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+};
+
+#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
+
+static const struct a6xx_debugbus_block {
+ const char *name;
+ u32 id;
+ u32 count;
+} a6xx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_CP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBBM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DPM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TESS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_PC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFDP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TSE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RAS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VSC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_COM, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LRZ, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_A2D, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCUFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RBP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DCS, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_DBGC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GMU_GX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPFCHE, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_GPC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_LARC, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_HLSQ_SPTP, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_RB_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_UCHE_WRAPPER, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_3, 0x100),
+};
+
+static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_GMU_CX, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 6ff9baec2658..eda11abc5f01 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -91,7 +91,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
if (ret) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
@@ -110,7 +110,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
/* If the queue is empty our response never made it */
if (!ret) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
@@ -120,20 +120,20 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
- dev_err(gmu->dev, "GMU firmware error %d\n",
+ DRM_DEV_ERROR(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
if (resp.error) {
- dev_err(gmu->dev,
+ DRM_DEV_ERROR(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
@@ -163,7 +163,7 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
- dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ DRM_DEV_ERROR(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
return ret;
}
@@ -317,7 +317,7 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
continue;
if (queue->header->read_index != queue->header->write_index)
- dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+ DRM_DEV_ERROR(gmu->dev, "HFI queue %d is not empty\n", i);
queue->header->read_index = 0;
queue->header->write_index = 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 1318959d504d..641d3ba477b6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -339,6 +339,15 @@ static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
#define REG_AXXX_CP_INT_CNTL 0x000001f2
+#define AXXX_CP_INT_CNTL_SW_INT_MASK 0x00080000
+#define AXXX_CP_INT_CNTL_T0_PACKET_IN_IB_MASK 0x00800000
+#define AXXX_CP_INT_CNTL_OPCODE_ERROR_MASK 0x01000000
+#define AXXX_CP_INT_CNTL_PROTECTED_MODE_ERROR_MASK 0x02000000
+#define AXXX_CP_INT_CNTL_RESERVED_BIT_ERROR_MASK 0x04000000
+#define AXXX_CP_INT_CNTL_IB_ERROR_MASK 0x08000000
+#define AXXX_CP_INT_CNTL_IB2_INT_MASK 0x20000000
+#define AXXX_CP_INT_CNTL_IB1_INT_MASK 0x40000000
+#define AXXX_CP_INT_CNTL_RB_INT_MASK 0x80000000
#define REG_AXXX_CP_INT_STATUS 0x000001f3
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 86abdb2b3a9c..714ed6505e47 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -27,6 +27,39 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
static const struct adreno_info gpulist[] = {
{
+ .rev = ADRENO_REV(2, 0, 0, 0),
+ .revn = 200,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, { /* a200 on i.mx51 has only 128kib gmem */
+ .rev = ADRENO_REV(2, 0, 0, 1),
+ .revn = 201,
+ .name = "A200",
+ .fw = {
+ [ADRENO_FW_PM4] = "yamato_pm4.fw",
+ [ADRENO_FW_PFP] = "yamato_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(2, 2, 0, ANY_ID),
+ .revn = 220,
+ .name = "A220",
+ .fw = {
+ [ADRENO_FW_PM4] = "leia_pm4_470.fw",
+ [ADRENO_FW_PFP] = "leia_pfp_470.fw",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a2xx_gpu_init,
+ }, {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
@@ -196,7 +229,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
- dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
@@ -205,7 +238,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
}
@@ -238,7 +271,8 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
if (ret == 0) {
unsigned int r, patch;
- if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2) {
+ if (sscanf(compat, "qcom,adreno-%u.%u", &r, &patch) == 2 ||
+ sscanf(compat, "amd,imageon-%u.%u", &r, &patch) == 2) {
rev->core = r / 100;
r %= 100;
rev->major = r / 10;
@@ -253,7 +287,7 @@ static int find_chipid(struct device *dev, struct adreno_rev *rev)
/* and if that fails, fall back to legacy "qcom,chipid" property: */
ret = of_property_read_u32(node, "qcom,chipid", &chipid);
if (ret) {
- dev_err(dev, "could not parse qcom,chipid: %d\n", ret);
+ DRM_DEV_ERROR(dev, "could not parse qcom,chipid: %d\n", ret);
return ret;
}
@@ -274,6 +308,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
static struct adreno_platform_config config = {};
const struct adreno_info *info;
struct drm_device *drm = dev_get_drvdata(master);
+ struct msm_drm_private *priv = drm->dev_private;
struct msm_gpu *gpu;
int ret;
@@ -296,6 +331,8 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
DBG("Found GPU: %u.%u.%u.%u", config.rev.core, config.rev.major,
config.rev.minor, config.rev.patchid);
+ priv->is_a2xx = config.rev.core == 2;
+
gpu = info->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
@@ -323,9 +360,37 @@ static const struct component_ops a3xx_ops = {
.unbind = adreno_unbind,
};
+static void adreno_device_register_headless(void)
+{
+ /* on imx5, we don't have a top-level mdp/dpu node
+ * this creates a dummy node for the driver for that case
+ */
+ struct platform_device_info dummy_info = {
+ .parent = NULL,
+ .name = "msm",
+ .id = -1,
+ .res = NULL,
+ .num_res = 0,
+ .data = NULL,
+ .size_data = 0,
+ .dma_mask = ~0,
+ };
+ platform_device_register_full(&dummy_info);
+}
+
static int adreno_probe(struct platform_device *pdev)
{
- return component_add(&pdev->dev, &a3xx_ops);
+
+ int ret;
+
+ ret = component_add(&pdev->dev, &a3xx_ops);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "amd,imageon"))
+ adreno_device_register_headless();
+
+ return 0;
}
static int adreno_remove(struct platform_device *pdev)
@@ -337,6 +402,8 @@ static int adreno_remove(struct platform_device *pdev)
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ /* for compatibility with imx5 gpu: */
+ { .compatible = "amd,imageon" },
/* for backwards compat w/ downstream kgsl DT files: */
{ .compatible = "qcom,kgsl-3d0" },
{}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 93d70f4a2154..2e4372ef17a3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -89,12 +89,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from new location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -109,12 +109,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s from legacy location\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
fwname, ret);
fw = ERR_PTR(ret);
goto out;
@@ -130,19 +130,19 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware(&fw, newname, drm->dev);
if (!ret) {
- dev_info(drm->dev, "loaded %s with helper\n",
+ DRM_DEV_INFO(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
- dev_err(drm->dev, "failed to load %s: %d\n",
+ DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n",
newname, ret);
fw = ERR_PTR(ret);
goto out;
}
}
- dev_err(drm->dev, "failed to load %s\n", fwname);
+ DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname);
fw = ERR_PTR(-ENOENT);
out:
kfree(newname);
@@ -209,14 +209,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
if (!ring)
continue;
- ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova);
- if (ret) {
- ring->iova = 0;
- dev_err(gpu->dev->dev,
- "could not map ringbuffer %d: %d\n", i, ret);
- return ret;
- }
-
ring->cur = ring->start;
ring->next = ring->start;
@@ -277,7 +269,7 @@ void adreno_recover(struct msm_gpu *gpu)
ret = msm_gpu_hw_init(gpu);
if (ret) {
- dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
@@ -319,16 +311,27 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
*/
OUT_PKT3(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, HLSQ_FLUSH);
-
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
}
- /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
- OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
- OUT_RING(ring, rbmemptr(ring, fence));
- OUT_RING(ring, submit->seqno);
+ /* wait for idle before cache flush/interrupt */
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ if (!adreno_is_a2xx(adreno_gpu)) {
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ } else {
+ /* BIT(31) means something else on a2xx */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(ring, fence));
+ OUT_RING(ring, submit->seqno);
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+ }
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
@@ -406,7 +409,7 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
- state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
if (state->ring[i].data) {
memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
state->ring[i].data_size = size << 2;
@@ -414,6 +417,10 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
}
}
+ /* Some targets prefer to collect their own registers */
+ if (!adreno_gpu->registers)
+ return 0;
+
/* Count the number of registers */
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
count += adreno_gpu->registers[i + 1] -
@@ -445,7 +452,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state)
int i;
for (i = 0; i < ARRAY_SIZE(state->ring); i++)
- kfree(state->ring[i].data);
+ kvfree(state->ring[i].data);
for (i = 0; state->bos && i < state->nr_bos; i++)
kvfree(state->bos[i].data);
@@ -475,34 +482,74 @@ int adreno_gpu_state_put(struct msm_gpu_state *state)
#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
-static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
{
+ void *buf;
+ size_t buf_itr = 0, buffer_size;
char out[ASCII85_BUFSZ];
- long l, datalen, i;
+ long l;
+ int i;
- if (!ptr || !len)
- return;
+ if (!src || !len)
+ return NULL;
+
+ l = ascii85_encode_len(len);
/*
- * Only dump the non-zero part of the buffer - rarely will any data
- * completely fill the entire allocated size of the buffer
+ * Ascii85 outputs either a 5 byte string or a 1 byte string. So we
+ * account for the worst case of 5 bytes per dword plus the 1 for '\0'
*/
- for (datalen = 0, i = 0; i < len >> 2; i++) {
- if (ptr[i])
- datalen = (i << 2) + 1;
- }
+ buffer_size = (l * 5) + 1;
+
+ buf = kvmalloc(buffer_size, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ for (i = 0; i < l; i++)
+ buf_itr += snprintf(buf + buf_itr, buffer_size - buf_itr, "%s",
+ ascii85_encode(src[i], out));
+
+ return buf;
+}
- /* Skip printing the object if it is empty */
- if (datalen == 0)
+/* len is expected to be in bytes */
+static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded)
+{
+ if (!*ptr || !len)
return;
- l = ascii85_encode_len(datalen);
+ if (!*encoded) {
+ long datalen, i;
+ u32 *buf = *ptr;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will
+ * any data completely fill the entire allocated size of
+ * the buffer.
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++)
+ if (buf[i])
+ datalen = ((i + 1) << 2);
+
+ /*
+ * If we reach here, then the originally captured binary buffer
+ * will be replaced with the ascii85 encoded string
+ */
+ *ptr = adreno_gpu_ascii85_encode(buf, datalen);
+
+ kvfree(buf);
+
+ *encoded = true;
+ }
+
+ if (!*ptr)
+ return;
drm_puts(p, " data: !!ascii85 |\n");
drm_puts(p, " ");
- for (i = 0; i < l; i++)
- drm_puts(p, ascii85_encode(ptr[i], out));
+ drm_puts(p, *ptr);
drm_puts(p, "\n");
}
@@ -534,8 +581,8 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
- adreno_show_object(p, state->ring[i].data,
- state->ring[i].data_size);
+ adreno_show_object(p, &state->ring[i].data,
+ state->ring[i].data_size, &state->ring[i].encoded);
}
if (state->bos) {
@@ -546,17 +593,19 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
state->bos[i].iova);
drm_printf(p, " size: %zd\n", state->bos[i].size);
- adreno_show_object(p, state->bos[i].data,
- state->bos[i].size);
+ adreno_show_object(p, &state->bos[i].data,
+ state->bos[i].size, &state->bos[i].encoded);
}
}
- drm_puts(p, "registers:\n");
+ if (state->nr_registers) {
+ drm_puts(p, "registers:\n");
- for (i = 0; i < state->nr_registers; i++) {
- drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
- state->registers[i * 2] << 2,
- state->registers[(i * 2) + 1]);
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
}
#endif
@@ -595,6 +644,9 @@ void adreno_dump(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i;
+ if (!adreno_gpu->registers)
+ return;
+
/* dump these out in a form that can be parsed by demsm: */
printk("IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -635,7 +687,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
- dev_err(dev, "Could not find the GPU powerlevels\n");
+ DRM_DEV_ERROR(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
}
@@ -674,7 +726,7 @@ static int adreno_get_pwrlevels(struct device *dev,
else {
ret = dev_pm_opp_of_add_table(dev);
if (ret)
- dev_err(dev, "Unable to set the OPP table\n");
+ DRM_DEV_ERROR(dev, "Unable to set the OPP table\n");
}
if (!ret) {
@@ -717,6 +769,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.va_start = SZ_16M;
adreno_gpu_config.va_end = 0xffffffff;
+ /* maximum range of a2xx mmu */
+ if (adreno_is_a2xx(adreno_gpu))
+ adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
adreno_gpu_config.nr_rings = nr_rings;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index de6e6ee42fba..5db459bc28a7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -21,6 +21,7 @@
#define __ADRENO_GPU_H__
#include <linux/firmware.h>
+#include <linux/iopoll.h>
#include "msm_gpu.h"
@@ -154,6 +155,20 @@ struct adreno_platform_config {
__ret; \
})
+static inline bool adreno_is_a2xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 300);
+}
+
+static inline bool adreno_is_a20x(struct adreno_gpu *gpu)
+{
+ return (gpu->revn < 210);
+}
+
+static inline bool adreno_is_a225(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 225;
+}
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -334,6 +349,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
@@ -375,4 +391,9 @@ static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
((1 << 29) \
((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 15eb03bed984..79b907ac0b4b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -10,13 +10,13 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42585 bytes, from 2018-10-04 19:06:37)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 42463 bytes, from 2018-11-19 13:44:03)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 14201 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 43052 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
-- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-10-04 19:06:37)
-- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 139581 bytes, from 2018-10-04 19:06:42)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-12-02 17:29:54)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 140790 bytes, from 2018-12-02 17:29:54)
- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-09-14 13:03:07)
- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
@@ -108,6 +108,13 @@ enum pc_di_src_sel {
DI_SRC_SEL_RESERVED = 3,
};
+enum pc_di_face_cull_sel {
+ DI_FACE_CULL_NONE = 0,
+ DI_FACE_CULL_FETCH = 1,
+ DI_FACE_BACKFACE_CULL = 2,
+ DI_FACE_FRONTFACE_CULL = 3,
+};
+
enum pc_di_index_size {
INDEX_SIZE_IGN = 0,
INDEX_SIZE_16_BIT = 0,
@@ -356,6 +363,7 @@ enum a6xx_render_mode {
RM6_GMEM = 4,
RM6_BLIT2D = 5,
RM6_RESOLVE = 6,
+ RM6_BLIT2DSCALE = 12,
};
enum pseudo_reg {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
index 879c13fe74e0..e45c69044935 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -319,10 +319,8 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
unsigned long irq_flags;
int i, irq_count, enable_count, cb_count;
- if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
- DPU_ERROR("invalid parameters\n");
+ if (WARN_ON(!irq_obj->enable_counts || !irq_obj->irq_cb_tbl))
return 0;
- }
for (i = 0; i < irq_obj->total_irqs; i++) {
spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
@@ -343,31 +341,11 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
- struct dentry *parent)
-{
- dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
- parent, &dpu_kms->irq_obj,
- &dpu_debugfs_core_irq_fops);
-
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove(dpu_kms->irq_obj.debugfs_file);
- dpu_kms->irq_obj.debugfs_file = NULL;
-}
-
-#else
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- return 0;
-}
-
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
-{
+ debugfs_create_file("core_irq", 0600, parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
}
#endif
@@ -376,10 +354,7 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
@@ -410,20 +385,12 @@ void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
}
}
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
-{
- return 0;
-}
-
void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
{
struct msm_drm_private *priv;
int i;
- if (!dpu_kms) {
- DPU_ERROR("invalid dpu_kms\n");
- return;
- } else if (!dpu_kms->dev) {
+ if (!dpu_kms->dev) {
DPU_ERROR("invalid drm device\n");
return;
} else if (!dpu_kms->dev->dev_private) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
index 5e98bba46af5..e9015a2b23fe 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -24,13 +24,6 @@
void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
/**
- * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
- * @dpu_kms: DPU handle
- * @return: 0 if success; error code otherwise
- */
-int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
-
-/**
* dpu_core_irq_uninstall - uninstall core IRQ handler
* @dpu_kms: DPU handle
* @return: none
@@ -139,15 +132,8 @@ int dpu_core_irq_unregister_callback(
* dpu_debugfs_core_irq_init - register core irq debugfs
* @dpu_kms: pointer to kms
* @parent: debugfs directory root
- * @Return: 0 on success
*/
-int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
struct dentry *parent);
-/**
- * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
- * @dpu_kms: pointer to kms
- */
-void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
-
#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 41c5191f9056..9f20f397f77d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -24,8 +24,6 @@
#include "dpu_crtc.h"
#include "dpu_core_perf.h"
-#define DPU_PERF_MODE_STRING_SIZE 128
-
/**
* enum dpu_perf_mode - performance tuning mode
* @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
@@ -57,31 +55,20 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
-{
- return dpu_crtc_is_enabled(crtc);
-}
-
static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
{
struct drm_crtc *tmp_crtc;
- bool intf_connected = false;
-
- if (!crtc)
- goto end;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ tmp_crtc->enabled) {
DPU_DEBUG("video interface connected crtc:%d\n",
tmp_crtc->base.id);
- intf_connected = true;
- goto end;
+ return true;
}
}
-end:
- return intf_connected;
+ return false;
}
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
@@ -101,20 +88,20 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
memset(perf, 0, sizeof(struct dpu_core_perf_params));
if (!dpu_cstate->bw_control) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
1000ULL;
perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
}
perf->core_clk_rate = kms->perf.max_core_clk_rate;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = 0;
perf->max_per_pipe_ib[i] = 0;
}
perf->core_clk_rate = 0;
} else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
}
@@ -124,12 +111,12 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
DPU_DEBUG(
"crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
crtc->base.id, perf->core_clk_rate,
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
- perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_EBI],
+ perf->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI]);
}
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
@@ -164,13 +151,13 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
/* obtain new values */
_dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+ i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
curr_client_type = dpu_crtc_get_client_type(crtc);
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
(dpu_crtc_get_client_type(tmp_crtc) ==
curr_client_type) &&
(tmp_crtc != crtc)) {
@@ -229,7 +216,7 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
int ret = 0;
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
curr_client_type ==
dpu_crtc_get_client_type(tmp_crtc)) {
dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
@@ -286,7 +273,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
*/
if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ if (tmp_crtc->enabled &&
dpu_crtc_get_intf_mode(tmp_crtc) ==
INTF_MODE_VIDEO)
return;
@@ -296,7 +283,7 @@ void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
if (kms->perf.enable_bw_release) {
trace_dpu_cmd_release_bw(crtc->base.id);
DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
dpu_crtc->cur_perf.bw_ctl[i] = 0;
_dpu_core_perf_crtc_update_bus(kms, crtc, i);
}
@@ -321,7 +308,7 @@ static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
struct dpu_crtc_state *dpu_cstate;
drm_for_each_crtc(crtc, kms->dev) {
- if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ if (crtc->enabled) {
dpu_cstate = to_dpu_crtc_state(crtc->state);
clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
clk_rate);
@@ -372,8 +359,8 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
old = &dpu_crtc->cur_perf;
new = &dpu_cstate->new_perf;
- if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (crtc->enabled && !stop_req) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
/*
* cases for bus bandwidth update.
* 1. new bandwidth vote - "ab or ib vote" is higher
@@ -415,13 +402,13 @@ int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
update_clk = 1;
}
trace_dpu_perf_crtc_update(crtc->base.id,
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
- new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MNOC],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_LLCC],
+ new->bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_EBI],
new->core_clk_rate, stop_req,
update_bus, update_clk);
- for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ for (i = 0; i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
if (update_bus & BIT(i)) {
ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
if (ret) {
@@ -462,24 +449,14 @@ static ssize_t _dpu_core_perf_mode_write(struct file *file,
struct dpu_core_perf *perf = file->private_data;
struct dpu_perf_cfg *cfg = &perf->catalog->perf;
u32 perf_mode = 0;
- char buf[10];
-
- if (!perf)
- return -ENODEV;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
+ int ret;
- if (kstrtouint(buf, 0, &perf_mode))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &perf_mode);
+ if (ret)
+ return ret;
if (perf_mode >= DPU_PERF_MODE_MAX)
- return -EFAULT;
+ return -EINVAL;
if (perf_mode == DPU_PERF_MODE_FIXED) {
DRM_INFO("fix performance mode\n");
@@ -504,29 +481,16 @@ static ssize_t _dpu_core_perf_mode_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_core_perf *perf = file->private_data;
- int len = 0;
- char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
-
- if (!perf)
- return -ENODEV;
+ int len;
+ char buf[128];
- if (*ppos)
- return 0; /* the end */
-
- len = snprintf(buf, sizeof(buf),
+ len = scnprintf(buf, sizeof(buf),
"mode %d min_mdp_clk %llu min_bus_vote %llu\n",
perf->perf_tune.mode,
perf->perf_tune.min_core_clk,
perf->perf_tune.min_bus_vote);
- if (len < 0 || len >= sizeof(buf))
- return 0;
-
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
-
- *ppos += len; /* increase offset */
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static const struct file_operations dpu_core_perf_mode_fops = {
@@ -535,70 +499,43 @@ static const struct file_operations dpu_core_perf_mode_fops = {
.write = _dpu_core_perf_mode_write,
};
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
- debugfs_remove_recursive(perf->debugfs_root);
- perf->debugfs_root = NULL;
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
{
+ struct dpu_core_perf *perf = &dpu_kms->perf;
struct dpu_mdss_cfg *catalog = perf->catalog;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- priv = perf->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
+ struct dentry *entry;
- dpu_kms = to_dpu_kms(priv->kms);
-
- perf->debugfs_root = debugfs_create_dir("core_perf", parent);
- if (!perf->debugfs_root) {
- DPU_ERROR("failed to create core perf debugfs\n");
+ entry = debugfs_create_dir("core_perf", parent);
+ if (IS_ERR_OR_NULL(entry))
return -EINVAL;
- }
- debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("max_core_clk_rate", 0600, entry,
&perf->max_core_clk_rate);
- debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("core_clk_rate", 0600, entry,
&perf->core_clk_rate);
- debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ debugfs_create_u32("enable_bw_release", 0600, entry,
(u32 *)&perf->enable_bw_release);
- debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_low", 0600, entry,
(u32 *)&catalog->perf.max_bw_low);
- debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ debugfs_create_u32("threshold_high", 0600, entry,
(u32 *)&catalog->perf.max_bw_high);
- debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_core_ib", 0600, entry,
(u32 *)&catalog->perf.min_core_ib);
- debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_llcc_ib", 0600, entry,
(u32 *)&catalog->perf.min_llcc_ib);
- debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ debugfs_create_u32("min_dram_ib", 0600, entry,
(u32 *)&catalog->perf.min_dram_ib);
- debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ debugfs_create_file("perf_mode", 0600, entry,
(u32 *)perf, &dpu_core_perf_mode_fops);
- debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_clk_rate", 0600, entry,
&perf->fix_core_clk_rate);
- debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ib_vote", 0600, entry,
&perf->fix_core_ib_vote);
- debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ debugfs_create_u64("fix_core_ab_vote", 0600, entry,
&perf->fix_core_ab_vote);
return 0;
}
-#else
-static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
-{
-}
-
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent)
-{
- return 0;
-}
#endif
void dpu_core_perf_destroy(struct dpu_core_perf *perf)
@@ -608,10 +545,8 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
return;
}
- dpu_core_perf_debugfs_destroy(perf);
perf->max_core_clk_rate = 0;
perf->core_clk = NULL;
- perf->phandle = NULL;
perf->catalog = NULL;
perf->dev = NULL;
}
@@ -619,12 +554,10 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf)
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk)
{
perf->dev = dev;
perf->catalog = catalog;
- perf->phandle = phandle;
perf->core_clk = core_clk;
perf->max_core_clk_rate = core_clk->max_rate;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index fbcbe0c7527a..37f518815eb7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -19,19 +19,31 @@
#include <drm/drm_crtc.h>
#include "dpu_hw_catalog.h"
-#include "dpu_power_handle.h"
#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
/**
+ * enum dpu_core_perf_data_bus_id - data bus identifier
+ * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus
+ */
+enum dpu_core_perf_data_bus_id {
+ DPU_CORE_PERF_DATA_BUS_ID_MNOC,
+ DPU_CORE_PERF_DATA_BUS_ID_LLCC,
+ DPU_CORE_PERF_DATA_BUS_ID_EBI,
+ DPU_CORE_PERF_DATA_BUS_ID_MAX,
+};
+
+/**
* struct dpu_core_perf_params - definition of performance parameters
* @max_per_pipe_ib: maximum instantaneous bandwidth request
* @bw_ctl: arbitrated bandwidth request
* @core_clk_rate: core clock rate request
*/
struct dpu_core_perf_params {
- u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
- u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 max_per_pipe_ib[DPU_CORE_PERF_DATA_BUS_ID_MAX];
+ u64 bw_ctl[DPU_CORE_PERF_DATA_BUS_ID_MAX];
u64 core_clk_rate;
};
@@ -52,7 +64,6 @@ struct dpu_core_perf_tune {
* @dev: Pointer to drm device
* @debugfs_root: top level debug folder
* @catalog: Pointer to catalog configuration
- * @phandle: Pointer to power handler
* @core_clk: Pointer to core clock structure
* @core_clk_rate: current core clock rate
* @max_core_clk_rate: maximum allowable core clock rate
@@ -66,7 +77,6 @@ struct dpu_core_perf {
struct drm_device *dev;
struct dentry *debugfs_root;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle *phandle;
struct dss_clk *core_clk;
u64 core_clk_rate;
u64 max_core_clk_rate;
@@ -113,21 +123,20 @@ void dpu_core_perf_destroy(struct dpu_core_perf *perf);
* @perf: Pointer to core performance context
* @dev: Pointer to drm device
* @catalog: Pointer to catalog
- * @phandle: Pointer to power handle
* @core_clk: pointer to core clock
*/
int dpu_core_perf_init(struct dpu_core_perf *perf,
struct drm_device *dev,
struct dpu_mdss_cfg *catalog,
- struct dpu_power_handle *phandle,
struct dss_clk *core_clk);
+struct dpu_kms;
+
/**
* dpu_core_perf_debugfs_init - initialize debugfs for core performance context
- * @perf: Pointer to core performance context
+ * @dpu_kms: Pointer to the dpu_kms struct
* @debugfs_parent: Pointer to parent debugfs
*/
-int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
- struct dentry *parent);
+int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent);
#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index d4530d60767b..9be7c355debd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -33,7 +33,6 @@
#include "dpu_plane.h"
#include "dpu_encoder.h"
#include "dpu_vbif.h"
-#include "dpu_power_handle.h"
#include "dpu_core_perf.h"
#include "dpu_trace.h"
@@ -47,13 +46,7 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
-static inline int _dpu_crtc_get_mixer_width(struct dpu_crtc_state *cstate,
- struct drm_display_mode *mode)
-{
- return mode->hdisplay / cstate->num_mixers;
-}
-
-static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -69,10 +62,7 @@ static void dpu_crtc_destroy(struct drm_crtc *crtc)
if (!crtc)
return;
- dpu_crtc->phandle = NULL;
-
drm_crtc_cleanup(crtc);
- mutex_destroy(&dpu_crtc->crtc_lock);
kfree(dpu_crtc);
}
@@ -287,16 +277,17 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
return INTF_MODE_NONE;
}
- drm_for_each_encoder(encoder, crtc->dev)
- if (encoder->crtc == crtc)
- return dpu_encoder_get_intf_mode(encoder);
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+
+ /* TODO: Returns the first INTF_MODE, could there be multiple values? */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
-static void dpu_crtc_vblank_cb(void *data)
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc = (struct drm_crtc *)data;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */
@@ -309,6 +300,19 @@ static void dpu_crtc_vblank_cb(void *data)
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
+static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
+{
+ int ret = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
+ dpu_core_perf_crtc_release_bw(crtc);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
+ if (ret)
+ DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
+ ret);
+}
+
static void dpu_crtc_frame_event_work(struct kthread_work *work)
{
struct dpu_crtc_frame_event *fevent = container_of(work,
@@ -338,7 +342,7 @@ static void dpu_crtc_frame_event_work(struct kthread_work *work)
/* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
- dpu_core_perf_crtc_release_bw(crtc);
+ dpu_crtc_release_bw_unlocked(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
@@ -473,28 +477,21 @@ static void _dpu_crtc_setup_mixer_for_encoder(
static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *enc;
- mutex_lock(&dpu_crtc->crtc_lock);
- /* Check for mixers on all encoders attached to this crtc */
- list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
+ WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ /* Check for mixers on all encoders attached to this crtc */
+ drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
_dpu_crtc_setup_mixer_for_encoder(crtc, enc);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
}
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
struct drm_display_mode *adj_mode = &state->adjusted_mode;
- u32 crtc_split_width = _dpu_crtc_get_mixer_width(cstate, adj_mode);
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
int i;
for (i = 0; i < cstate->num_mixers; i++) {
@@ -502,7 +499,7 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
- r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+ r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
}
@@ -552,13 +549,9 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- /* encoder will trigger pending mask now */
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
- }
/*
* If no mixers have been allocated in dpu_crtc_atomic_check(),
@@ -702,10 +695,9 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
return rc;
}
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
{
struct drm_encoder *encoder;
- struct drm_device *dev = crtc->dev;
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
@@ -721,127 +713,59 @@ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("crtc_commit");
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ /*
+ * Encoder will flush/start now, unless it has a tx pending. If so, it
+ * may delay and flush at an irq event (e.g. ppdone)
+ */
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc->state->encoder_mask) {
struct dpu_encoder_kickoff_params params = { 0 };
-
- if (encoder->crtc != crtc)
- continue;
-
- /*
- * Encoder will flush/start now, unless it has a tx pending.
- * If so, it may delay and flush at an irq event (e.g. ppdone)
- */
- dpu_encoder_prepare_for_kickoff(encoder, &params);
+ dpu_encoder_prepare_for_kickoff(encoder, &params, async);
}
- /* wait for frame_event_done completion */
- DPU_ATRACE_BEGIN("wait_for_frame_done_event");
- ret = _dpu_crtc_wait_for_frame_done(crtc);
- DPU_ATRACE_END("wait_for_frame_done_event");
- if (ret) {
- DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
- crtc->base.id,
- atomic_read(&dpu_crtc->frame_pending));
- goto end;
- }
- if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
- /* acquire bandwidth and other resources */
- DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
- } else
- DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+ if (!async) {
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
- dpu_crtc->play_count++;
+ dpu_crtc->play_count++;
+ }
dpu_vbif_clear_errors(dpu_kms);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_kickoff(encoder);
- }
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_kickoff(encoder, async);
end:
- reinit_completion(&dpu_crtc->frame_done_comp);
+ if (!async)
+ reinit_completion(&dpu_crtc->frame_done_comp);
DPU_ATRACE_END("crtc_commit");
}
-/**
- * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
- * @dpu_crtc: Pointer to dpu crtc structure
- * @enable: Whether to enable/disable vblanks
- */
-static void _dpu_crtc_vblank_enable_no_lock(
- struct dpu_crtc *dpu_crtc, bool enable)
-{
- struct drm_crtc *crtc = &dpu_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *enc;
-
- if (enable) {
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- pm_runtime_get_sync(dev->dev);
- mutex_lock(&dpu_crtc->crtc_lock);
-
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc,
- dpu_crtc_vblank_cb, (void *)crtc);
- }
- } else {
- list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
- if (enc->crtc != crtc)
- continue;
-
- trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
- DRMID(enc), enable,
- dpu_crtc);
-
- dpu_encoder_register_vblank_callback(enc, NULL, NULL);
- }
-
- /* drop lock since power crtc cb may try to re-acquire lock */
- mutex_unlock(&dpu_crtc->crtc_lock);
- pm_runtime_put_sync(dev->dev);
- mutex_lock(&dpu_crtc->crtc_lock);
- }
-}
-
-/**
- * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
- * @crtc: Pointer to drm crtc object
- * @enable: true to enable suspend, false to indicate resume
- */
-static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+static void dpu_crtc_reset(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
-
- DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
-
- mutex_lock(&dpu_crtc->crtc_lock);
+ struct dpu_crtc_state *cstate;
- /*
- * If the vblank is enabled, release a power reference on suspend
- * and take it back during resume (if it is still enabled).
- */
- trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
- if (dpu_crtc->suspend == enable)
- DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
- crtc->base.id, enable);
- else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
- }
+ if (crtc->state)
+ dpu_crtc_destroy_state(crtc, crtc->state);
- dpu_crtc->suspend = enable;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (crtc->state)
+ crtc->state->crtc = crtc;
}
/**
@@ -873,65 +797,8 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return &cstate->base;
}
-/**
- * dpu_crtc_reset - reset hook for CRTCs
- * Resets the atomic state for @crtc by freeing the state pointer (which might
- * be NULL, e.g. at driver load time) and allocating a new empty state object.
- * @crtc: Pointer to drm crtc structure
- */
-static void dpu_crtc_reset(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
- struct dpu_crtc_state *cstate;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return;
- }
-
- /* revert suspend actions, if necessary */
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, false);
-
- /* remove previous state, if present */
- if (crtc->state) {
- dpu_crtc_destroy_state(crtc, crtc->state);
- crtc->state = 0;
- }
-
- dpu_crtc = to_dpu_crtc(crtc);
- cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
- if (!cstate) {
- DPU_ERROR("failed to allocate state\n");
- return;
- }
-
- cstate->base.crtc = crtc;
- crtc->state = &cstate->base;
-}
-
-static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
-{
- struct drm_crtc *crtc = arg;
- struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct drm_encoder *encoder;
-
- mutex_lock(&dpu_crtc->crtc_lock);
-
- trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
-
- /* restore encoder; crtc will be programmed during commit */
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
-
- dpu_encoder_virt_restore(encoder);
- }
-
- mutex_unlock(&dpu_crtc->crtc_lock);
-}
-
-static void dpu_crtc_disable(struct drm_crtc *crtc)
+static void dpu_crtc_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
@@ -951,13 +818,12 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
- if (dpu_kms_is_suspend_state(crtc->dev))
- _dpu_crtc_set_suspend(crtc, true);
-
/* Disable/save vblank irq handling */
drm_crtc_vblank_off(crtc);
- mutex_lock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ old_crtc_state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, NULL);
/* wait for frame_event_done completion */
if (_dpu_crtc_wait_for_frame_done(crtc))
@@ -966,10 +832,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
atomic_read(&dpu_crtc->frame_pending));
trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
- }
dpu_crtc->enabled = false;
if (atomic_read(&dpu_crtc->frame_pending)) {
@@ -981,15 +843,8 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
dpu_core_perf_crtc_update(crtc, 0, true);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
- }
-
- if (dpu_crtc->power_event)
- dpu_power_handle_unregister_event(dpu_crtc->phandle,
- dpu_crtc->power_event);
memset(cstate->mixers, 0, sizeof(cstate->mixers));
cstate->num_mixers = 0;
@@ -998,14 +853,14 @@ static void dpu_crtc_disable(struct drm_crtc *crtc)
cstate->bw_control = false;
cstate->bw_split_vote = false;
- mutex_unlock(&dpu_crtc->crtc_lock);
-
if (crtc->state->event && !crtc->state->active) {
spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
+
+ pm_runtime_put_sync(crtc->dev->dev);
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
@@ -1021,33 +876,23 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
}
priv = crtc->dev->dev_private;
+ pm_runtime_get_sync(crtc->dev->dev);
+
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
dpu_crtc = to_dpu_crtc(crtc);
- drm_for_each_encoder(encoder, crtc->dev) {
- if (encoder->crtc != crtc)
- continue;
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_register_frame_event_callback(encoder,
dpu_crtc_frame_event_cb, (void *)crtc);
- }
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
- if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
- dpu_crtc->vblank_requested) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
- }
dpu_crtc->enabled = true;
- mutex_unlock(&dpu_crtc->crtc_lock);
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+ dpu_encoder_assign_crtc(encoder, crtc);
/* Enable/restore vblank irq handling */
drm_crtc_vblank_on(crtc);
-
- dpu_crtc->power_event = dpu_power_handle_register_event(
- dpu_crtc->phandle, DPU_POWER_EVENT_ENABLE,
- dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
-
}
struct plane_state {
@@ -1101,7 +946,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
memset(pipe_staged, 0, sizeof(pipe_staged));
- mixer_width = _dpu_crtc_get_mixer_width(cstate, mode);
+ mixer_width = mode->hdisplay / cstate->num_mixers;
_dpu_crtc_setup_lm_bounds(crtc, state);
@@ -1289,21 +1134,32 @@ end:
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc) {
- DPU_ERROR("invalid crtc\n");
- return -EINVAL;
- }
- dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
- mutex_lock(&dpu_crtc->crtc_lock);
trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
- if (dpu_crtc->enabled && !dpu_crtc->suspend) {
- _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+
+ /*
+ * Normally we would iterate through encoder_mask in crtc state to find
+ * attached encoders. In this case, we might be disabling vblank _after_
+ * encoder_mask has been cleared.
+ *
+ * Instead, we "assign" a crtc to the encoder in enable and clear it in
+ * disable (which is also after encoder_mask is cleared). So instead of
+ * using encoder mask, we'll ask the encoder to toggle itself iff it's
+ * currently assigned to our crtc.
+ *
+ * Note also that this function cannot be called while crtc is disabled
+ * since we use drm_crtc_vblank_on/off. So we don't need to worry
+ * about the assigned crtcs being inconsistent with the current state
+ * (which means no need to worry about modeset locks).
+ */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
+ dpu_crtc);
+
+ dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
}
- dpu_crtc->vblank_requested = en;
- mutex_unlock(&dpu_crtc->crtc_lock);
return 0;
}
@@ -1324,18 +1180,14 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
int i, out_width;
- if (!s || !s->private)
- return -EINVAL;
-
dpu_crtc = s->private;
crtc = &dpu_crtc->base;
drm_modeset_lock_all(crtc->dev);
cstate = to_dpu_crtc_state(crtc->state);
- mutex_lock(&dpu_crtc->crtc_lock);
mode = &crtc->state->adjusted_mode;
- out_width = _dpu_crtc_get_mixer_width(cstate, mode);
+ out_width = mode->hdisplay / cstate->num_mixers;
seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
mode->hdisplay, mode->vdisplay);
@@ -1420,9 +1272,6 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
dpu_crtc->vblank_cb_time = ktime_set(0, 0);
}
- seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
-
- mutex_unlock(&dpu_crtc->crtc_lock);
drm_modeset_unlock_all(crtc->dev);
return 0;
@@ -1456,13 +1305,11 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
seq_printf(s, "core_clk_rate: %llu\n",
dpu_crtc->cur_perf.core_clk_rate);
- for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
- i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
- seq_printf(s, "bw_ctl[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
+ for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
+ i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%d]: %llu\n", i,
dpu_crtc->cur_perf.bw_ctl[i]);
- seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
- dpu_power_handle_get_dbus_name(i),
+ seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
dpu_crtc->cur_perf.max_per_pipe_ib[i]);
}
@@ -1472,8 +1319,7 @@ DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
- struct dpu_kms *dpu_kms;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
static const struct file_operations debugfs_status_fops = {
.open = _dpu_debugfs_status_open,
@@ -1482,12 +1328,6 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
.release = single_release,
};
- if (!crtc)
- return -EINVAL;
- dpu_crtc = to_dpu_crtc(crtc);
-
- dpu_kms = _dpu_crtc_get_kms(crtc);
-
dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
crtc->dev->primary->debugfs_root);
if (!dpu_crtc->debugfs_root)
@@ -1504,25 +1344,11 @@ static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return;
- dpu_crtc = to_dpu_crtc(crtc);
- debugfs_remove_recursive(dpu_crtc->debugfs_root);
-}
#else
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
return 0;
}
-
-static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
-{
-}
#endif /* CONFIG_DEBUG_FS */
static int dpu_crtc_late_register(struct drm_crtc *crtc)
@@ -1532,7 +1358,9 @@ static int dpu_crtc_late_register(struct drm_crtc *crtc)
static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
{
- _dpu_crtc_destroy_debugfs(crtc);
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
}
static const struct drm_crtc_funcs dpu_crtc_funcs = {
@@ -1547,7 +1375,7 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
- .disable = dpu_crtc_disable,
+ .atomic_disable = dpu_crtc_disable,
.atomic_enable = dpu_crtc_enable,
.atomic_check = dpu_crtc_atomic_check,
.atomic_begin = dpu_crtc_atomic_begin,
@@ -1574,7 +1402,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
crtc = &dpu_crtc->base;
crtc->dev = dev;
- mutex_init(&dpu_crtc->crtc_lock);
spin_lock_init(&dpu_crtc->spin_lock);
atomic_set(&dpu_crtc->frame_pending, 0);
@@ -1594,7 +1421,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
NULL);
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
- plane->crtc = crtc;
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
@@ -1602,8 +1428,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
/* initialize event handling */
spin_lock_init(&dpu_crtc->event_lock);
- dpu_crtc->phandle = &kms->phandle;
-
DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
return crtc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 3723b4830335..dbfb38a1986c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -132,8 +132,6 @@ struct dpu_crtc_frame_event {
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
* @vblank_cb_time : ktime at vblank count reset
- * @vblank_requested : whether the user has requested vblank events
- * @suspend : whether or not a suspend operation is in progress
* @enabled : whether the DPU CRTC is currently enabled. updated in the
* commit-thread, not state-swap time which is earlier, so
* safe to make decisions on during VBLANK on/off work
@@ -142,7 +140,6 @@ struct dpu_crtc_frame_event {
* @dirty_list : list of color processing features are dirty
* @ad_dirty: list containing ad properties that are dirty
* @ad_active: list containing ad properties that are active
- * @crtc_lock : crtc lock around create, destroy and access.
* @frame_pending : Whether or not an update is pending
* @frame_events : static allocation of in-flight frame events
* @frame_event_list : available frame event list
@@ -152,7 +149,6 @@ struct dpu_crtc_frame_event {
* @event_worker : Event worker queue
* @event_lock : Spinlock around event handling code
* @phandle: Pointer to power handler
- * @power_event : registered power event handle
* @cur_perf : current performance committed to clock/bandwidth driver
*/
struct dpu_crtc {
@@ -168,8 +164,6 @@ struct dpu_crtc {
u32 vblank_cb_count;
u64 play_count;
ktime_t vblank_cb_time;
- bool vblank_requested;
- bool suspend;
bool enabled;
struct list_head feature_list;
@@ -178,8 +172,6 @@ struct dpu_crtc {
struct list_head ad_dirty;
struct list_head ad_active;
- struct mutex crtc_lock;
-
atomic_t frame_pending;
struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
struct list_head frame_event_list;
@@ -189,9 +181,6 @@ struct dpu_crtc {
/* for handling internal event thread */
spinlock_t event_lock;
- struct dpu_power_handle *phandle;
- struct dpu_power_event *power_event;
-
struct dpu_core_perf_params cur_perf;
struct dpu_crtc_smmu_state_data smmu_state;
@@ -238,41 +227,12 @@ struct dpu_crtc_state {
container_of(x, struct dpu_crtc_state, base)
/**
- * dpu_crtc_state_is_stereo - Is crtc virtualized with two mixers?
- * @cstate: Pointer to dpu crtc state
- * @Return: true - has two mixers, false - has one mixer
- */
-static inline bool dpu_crtc_state_is_stereo(struct dpu_crtc_state *cstate)
-{
- return cstate->num_mixers == CRTC_DUAL_MIXERS;
-}
-
-/**
- * dpu_crtc_get_mixer_height - get the mixer height
- * Mixer height will be same as panel height
- */
-static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
- struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
-{
- if (!dpu_crtc || !cstate || !mode)
- return 0;
-
- return mode->vdisplay;
-}
-
-/**
* dpu_crtc_frame_pending - retun the number of pending frames
* @crtc: Pointer to drm crtc object
*/
static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
-
- if (!crtc)
- return -EINVAL;
-
- dpu_crtc = to_dpu_crtc(crtc);
- return atomic_read(&dpu_crtc->frame_pending);
+ return crtc ? atomic_read(&to_dpu_crtc(crtc)->frame_pending) : -EINVAL;
}
/**
@@ -283,10 +243,17 @@ static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
/**
+ * dpu_crtc_vblank_callback - called on vblank irq, issues completion events
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_vblank_callback(struct drm_crtc *crtc);
+
+/**
* dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
* @crtc: Pointer to drm crtc object
+ * @async: true if the commit is asynchronous, false otherwise
*/
-void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async);
/**
* dpu_crtc_complete_commit - callback signalling completion of current commit
@@ -329,22 +296,7 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
struct drm_crtc *crtc)
{
- struct dpu_crtc_state *cstate =
- crtc ? to_dpu_crtc_state(crtc->state) : NULL;
-
- if (!cstate)
- return NRT_CLIENT;
-
- return RT_CLIENT;
-}
-
-/**
- * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
- * @crtc: Pointer to crtc
- */
-static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
-{
- return crtc ? crtc->enabled : false;
+ return crtc && crtc->state ? RT_CLIENT : NRT_CLIENT;
}
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
deleted file mode 100644
index ae2aee7ed9e1..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
+++ /dev/null
@@ -1,2393 +0,0 @@
-/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/ktime.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/dma-buf.h>
-#include <linux/slab.h>
-#include <linux/list_sort.h>
-#include <linux/pm_runtime.h>
-
-#include "dpu_dbg.h"
-#include "disp/dpu1/dpu_hw_catalog.h"
-
-
-#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
-#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
-#define REG_BASE_NAME_LEN 80
-
-#define DBGBUS_FLAGS_DSPP BIT(0)
-#define DBGBUS_DSPP_STATUS 0x34C
-
-#define DBGBUS_NAME_DPU "dpu"
-#define DBGBUS_NAME_VBIF_RT "vbif_rt"
-
-/* offsets from dpu top address for the debug buses */
-#define DBGBUS_SSPP0 0x188
-#define DBGBUS_AXI_INTF 0x194
-#define DBGBUS_SSPP1 0x298
-#define DBGBUS_DSPP 0x348
-#define DBGBUS_PERIPH 0x418
-
-#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
-
-/* following offsets are with respect to MDP VBIF base for DBG BUS access */
-#define MMSS_VBIF_CLKON 0x4
-#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
-#define MMSS_VBIF_TEST_BUS_OUT 0x230
-
-/* Vbif error info */
-#define MMSS_VBIF_PND_ERR 0x190
-#define MMSS_VBIF_SRC_ERR 0x194
-#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
-#define MMSS_VBIF_ERR_INFO 0X1a0
-#define MMSS_VBIF_ERR_INFO_1 0x1a4
-#define MMSS_VBIF_CLIENT_NUM 14
-
-/**
- * struct dpu_dbg_reg_base - register region base.
- * may sub-ranges: sub-ranges are used for dumping
- * or may not have sub-ranges: dumping is base -> max_offset
- * @reg_base_head: head of this node
- * @name: register base name
- * @base: base pointer
- * @off: cached offset of region for manual register dumping
- * @cnt: cached range of region for manual register dumping
- * @max_offset: length of region
- * @buf: buffer used for manual register dumping
- * @buf_len: buffer length used for manual register dumping
- * @cb: callback for external dump function, null if not defined
- * @cb_ptr: private pointer to callback function
- */
-struct dpu_dbg_reg_base {
- struct list_head reg_base_head;
- char name[REG_BASE_NAME_LEN];
- void __iomem *base;
- size_t off;
- size_t cnt;
- size_t max_offset;
- char *buf;
- size_t buf_len;
- void (*cb)(void *ptr);
- void *cb_ptr;
-};
-
-struct dpu_debug_bus_entry {
- u32 wr_addr;
- u32 block_id;
- u32 test_id;
- void (*analyzer)(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val);
-};
-
-struct vbif_debug_bus_entry {
- u32 disable_bus_addr;
- u32 block_bus_addr;
- u32 bit_offset;
- u32 block_cnt;
- u32 test_pnt_start;
- u32 test_pnt_cnt;
-};
-
-struct dpu_dbg_debug_bus_common {
- char *name;
- u32 enable_mask;
- bool include_in_deferred_work;
- u32 flags;
- u32 entries_size;
- u32 *dumped_content;
-};
-
-struct dpu_dbg_dpu_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct dpu_debug_bus_entry *entries;
- u32 top_blk_off;
-};
-
-struct dpu_dbg_vbif_debug_bus {
- struct dpu_dbg_debug_bus_common cmn;
- struct vbif_debug_bus_entry *entries;
-};
-
-/**
- * struct dpu_dbg_base - global dpu debug base structure
- * @reg_base_list: list of register dumping regions
- * @dev: device pointer
- * @dump_work: work struct for deferring register dump work to separate thread
- * @dbgbus_dpu: debug bus structure for the dpu
- * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
- */
-static struct dpu_dbg_base {
- struct list_head reg_base_list;
- struct device *dev;
-
- struct work_struct dump_work;
-
- struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
- struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
-} dpu_dbg_base;
-
-static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & 0xFFF000))
- return;
-
- dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
- struct dpu_debug_bus_entry *entry, u32 val)
-{
- if (!(val & BIT(15)))
- return;
-
- dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
- entry->wr_addr, entry->block_id, entry->test_id, val);
-}
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
- { DBGBUS_SSPP0, 85, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
- { DBGBUS_SSPP1, 85, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* cursor 1 */
- { DBGBUS_SSPP1, 80, 0 },
- { DBGBUS_SSPP1, 80, 1 },
- { DBGBUS_SSPP1, 80, 2 },
- { DBGBUS_SSPP1, 80, 3 },
- { DBGBUS_SSPP1, 80, 4 },
- { DBGBUS_SSPP1, 80, 5 },
- { DBGBUS_SSPP1, 80, 6 },
- { DBGBUS_SSPP1, 80, 7 },
-
- { DBGBUS_SSPP1, 81, 0 },
- { DBGBUS_SSPP1, 81, 1 },
- { DBGBUS_SSPP1, 81, 2 },
- { DBGBUS_SSPP1, 81, 3 },
- { DBGBUS_SSPP1, 81, 4 },
- { DBGBUS_SSPP1, 81, 5 },
- { DBGBUS_SSPP1, 81, 6 },
- { DBGBUS_SSPP1, 81, 7 },
-
- { DBGBUS_SSPP1, 82, 0 },
- { DBGBUS_SSPP1, 82, 1 },
- { DBGBUS_SSPP1, 82, 2 },
- { DBGBUS_SSPP1, 82, 3 },
- { DBGBUS_SSPP1, 82, 4 },
- { DBGBUS_SSPP1, 82, 5 },
- { DBGBUS_SSPP1, 82, 6 },
- { DBGBUS_SSPP1, 82, 7 },
-
- { DBGBUS_SSPP1, 83, 0 },
- { DBGBUS_SSPP1, 83, 1 },
- { DBGBUS_SSPP1, 83, 2 },
- { DBGBUS_SSPP1, 83, 3 },
- { DBGBUS_SSPP1, 83, 4 },
- { DBGBUS_SSPP1, 83, 5 },
- { DBGBUS_SSPP1, 83, 6 },
- { DBGBUS_SSPP1, 83, 7 },
-
- { DBGBUS_SSPP1, 84, 0 },
- { DBGBUS_SSPP1, 84, 1 },
- { DBGBUS_SSPP1, 84, 2 },
- { DBGBUS_SSPP1, 84, 3 },
- { DBGBUS_SSPP1, 84, 4 },
- { DBGBUS_SSPP1, 84, 5 },
- { DBGBUS_SSPP1, 84, 6 },
- { DBGBUS_SSPP1, 84, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 0},
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 0},
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 0},
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 0},
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 0},
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 0},
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 0},
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 0},
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 0},
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 0},
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 0},
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 0},
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 0},
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 0},
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 9, 0},
- { DBGBUS_SSPP0, 9, 1},
- { DBGBUS_SSPP0, 9, 3},
- { DBGBUS_SSPP0, 29, 0},
- { DBGBUS_SSPP0, 29, 1},
- { DBGBUS_SSPP0, 29, 3},
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 9, 0},
- { DBGBUS_SSPP1, 9, 1},
- { DBGBUS_SSPP1, 9, 3},
- { DBGBUS_SSPP1, 29, 0},
- { DBGBUS_SSPP1, 29, 1},
- { DBGBUS_SSPP1, 29, 3},
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- { DBGBUS_PERIPH, 60, 0},
-};
-
-static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
-
- /* Unpack 0 sspp 0*/
- { DBGBUS_SSPP0, 50, 2 },
- { DBGBUS_SSPP0, 60, 2 },
- { DBGBUS_SSPP0, 70, 2 },
-
- /* Upack 0 sspp 1*/
- { DBGBUS_SSPP1, 50, 2 },
- { DBGBUS_SSPP1, 60, 2 },
- { DBGBUS_SSPP1, 70, 2 },
-
- /* scheduler */
- { DBGBUS_DSPP, 130, 0 },
- { DBGBUS_DSPP, 130, 1 },
- { DBGBUS_DSPP, 130, 2 },
- { DBGBUS_DSPP, 130, 3 },
- { DBGBUS_DSPP, 130, 4 },
- { DBGBUS_DSPP, 130, 5 },
-
- /* qseed */
- { DBGBUS_SSPP0, 6, 0},
- { DBGBUS_SSPP0, 6, 1},
- { DBGBUS_SSPP0, 26, 0},
- { DBGBUS_SSPP0, 26, 1},
- { DBGBUS_SSPP1, 6, 0},
- { DBGBUS_SSPP1, 6, 1},
- { DBGBUS_SSPP1, 26, 0},
- { DBGBUS_SSPP1, 26, 1},
-
- /* scale */
- { DBGBUS_SSPP0, 16, 0},
- { DBGBUS_SSPP0, 16, 1},
- { DBGBUS_SSPP0, 36, 0},
- { DBGBUS_SSPP0, 36, 1},
- { DBGBUS_SSPP1, 16, 0},
- { DBGBUS_SSPP1, 16, 1},
- { DBGBUS_SSPP1, 36, 0},
- { DBGBUS_SSPP1, 36, 1},
-
- /* fetch sspp0 */
-
- /* vig 0 */
- { DBGBUS_SSPP0, 0, 0 },
- { DBGBUS_SSPP0, 0, 1 },
- { DBGBUS_SSPP0, 0, 2 },
- { DBGBUS_SSPP0, 0, 3 },
- { DBGBUS_SSPP0, 0, 4 },
- { DBGBUS_SSPP0, 0, 5 },
- { DBGBUS_SSPP0, 0, 6 },
- { DBGBUS_SSPP0, 0, 7 },
-
- { DBGBUS_SSPP0, 1, 0 },
- { DBGBUS_SSPP0, 1, 1 },
- { DBGBUS_SSPP0, 1, 2 },
- { DBGBUS_SSPP0, 1, 3 },
- { DBGBUS_SSPP0, 1, 4 },
- { DBGBUS_SSPP0, 1, 5 },
- { DBGBUS_SSPP0, 1, 6 },
- { DBGBUS_SSPP0, 1, 7 },
-
- { DBGBUS_SSPP0, 2, 0 },
- { DBGBUS_SSPP0, 2, 1 },
- { DBGBUS_SSPP0, 2, 2 },
- { DBGBUS_SSPP0, 2, 3 },
- { DBGBUS_SSPP0, 2, 4 },
- { DBGBUS_SSPP0, 2, 5 },
- { DBGBUS_SSPP0, 2, 6 },
- { DBGBUS_SSPP0, 2, 7 },
-
- { DBGBUS_SSPP0, 4, 0 },
- { DBGBUS_SSPP0, 4, 1 },
- { DBGBUS_SSPP0, 4, 2 },
- { DBGBUS_SSPP0, 4, 3 },
- { DBGBUS_SSPP0, 4, 4 },
- { DBGBUS_SSPP0, 4, 5 },
- { DBGBUS_SSPP0, 4, 6 },
- { DBGBUS_SSPP0, 4, 7 },
-
- { DBGBUS_SSPP0, 5, 0 },
- { DBGBUS_SSPP0, 5, 1 },
- { DBGBUS_SSPP0, 5, 2 },
- { DBGBUS_SSPP0, 5, 3 },
- { DBGBUS_SSPP0, 5, 4 },
- { DBGBUS_SSPP0, 5, 5 },
- { DBGBUS_SSPP0, 5, 6 },
- { DBGBUS_SSPP0, 5, 7 },
-
- /* vig 2 */
- { DBGBUS_SSPP0, 20, 0 },
- { DBGBUS_SSPP0, 20, 1 },
- { DBGBUS_SSPP0, 20, 2 },
- { DBGBUS_SSPP0, 20, 3 },
- { DBGBUS_SSPP0, 20, 4 },
- { DBGBUS_SSPP0, 20, 5 },
- { DBGBUS_SSPP0, 20, 6 },
- { DBGBUS_SSPP0, 20, 7 },
-
- { DBGBUS_SSPP0, 21, 0 },
- { DBGBUS_SSPP0, 21, 1 },
- { DBGBUS_SSPP0, 21, 2 },
- { DBGBUS_SSPP0, 21, 3 },
- { DBGBUS_SSPP0, 21, 4 },
- { DBGBUS_SSPP0, 21, 5 },
- { DBGBUS_SSPP0, 21, 6 },
- { DBGBUS_SSPP0, 21, 7 },
-
- { DBGBUS_SSPP0, 22, 0 },
- { DBGBUS_SSPP0, 22, 1 },
- { DBGBUS_SSPP0, 22, 2 },
- { DBGBUS_SSPP0, 22, 3 },
- { DBGBUS_SSPP0, 22, 4 },
- { DBGBUS_SSPP0, 22, 5 },
- { DBGBUS_SSPP0, 22, 6 },
- { DBGBUS_SSPP0, 22, 7 },
-
- { DBGBUS_SSPP0, 24, 0 },
- { DBGBUS_SSPP0, 24, 1 },
- { DBGBUS_SSPP0, 24, 2 },
- { DBGBUS_SSPP0, 24, 3 },
- { DBGBUS_SSPP0, 24, 4 },
- { DBGBUS_SSPP0, 24, 5 },
- { DBGBUS_SSPP0, 24, 6 },
- { DBGBUS_SSPP0, 24, 7 },
-
- { DBGBUS_SSPP0, 25, 0 },
- { DBGBUS_SSPP0, 25, 1 },
- { DBGBUS_SSPP0, 25, 2 },
- { DBGBUS_SSPP0, 25, 3 },
- { DBGBUS_SSPP0, 25, 4 },
- { DBGBUS_SSPP0, 25, 5 },
- { DBGBUS_SSPP0, 25, 6 },
- { DBGBUS_SSPP0, 25, 7 },
-
- /* dma 2 */
- { DBGBUS_SSPP0, 30, 0 },
- { DBGBUS_SSPP0, 30, 1 },
- { DBGBUS_SSPP0, 30, 2 },
- { DBGBUS_SSPP0, 30, 3 },
- { DBGBUS_SSPP0, 30, 4 },
- { DBGBUS_SSPP0, 30, 5 },
- { DBGBUS_SSPP0, 30, 6 },
- { DBGBUS_SSPP0, 30, 7 },
-
- { DBGBUS_SSPP0, 31, 0 },
- { DBGBUS_SSPP0, 31, 1 },
- { DBGBUS_SSPP0, 31, 2 },
- { DBGBUS_SSPP0, 31, 3 },
- { DBGBUS_SSPP0, 31, 4 },
- { DBGBUS_SSPP0, 31, 5 },
- { DBGBUS_SSPP0, 31, 6 },
- { DBGBUS_SSPP0, 31, 7 },
-
- { DBGBUS_SSPP0, 32, 0 },
- { DBGBUS_SSPP0, 32, 1 },
- { DBGBUS_SSPP0, 32, 2 },
- { DBGBUS_SSPP0, 32, 3 },
- { DBGBUS_SSPP0, 32, 4 },
- { DBGBUS_SSPP0, 32, 5 },
- { DBGBUS_SSPP0, 32, 6 },
- { DBGBUS_SSPP0, 32, 7 },
-
- { DBGBUS_SSPP0, 33, 0 },
- { DBGBUS_SSPP0, 33, 1 },
- { DBGBUS_SSPP0, 33, 2 },
- { DBGBUS_SSPP0, 33, 3 },
- { DBGBUS_SSPP0, 33, 4 },
- { DBGBUS_SSPP0, 33, 5 },
- { DBGBUS_SSPP0, 33, 6 },
- { DBGBUS_SSPP0, 33, 7 },
-
- { DBGBUS_SSPP0, 34, 0 },
- { DBGBUS_SSPP0, 34, 1 },
- { DBGBUS_SSPP0, 34, 2 },
- { DBGBUS_SSPP0, 34, 3 },
- { DBGBUS_SSPP0, 34, 4 },
- { DBGBUS_SSPP0, 34, 5 },
- { DBGBUS_SSPP0, 34, 6 },
- { DBGBUS_SSPP0, 34, 7 },
-
- { DBGBUS_SSPP0, 35, 0 },
- { DBGBUS_SSPP0, 35, 1 },
- { DBGBUS_SSPP0, 35, 2 },
- { DBGBUS_SSPP0, 35, 3 },
-
- /* dma 0 */
- { DBGBUS_SSPP0, 40, 0 },
- { DBGBUS_SSPP0, 40, 1 },
- { DBGBUS_SSPP0, 40, 2 },
- { DBGBUS_SSPP0, 40, 3 },
- { DBGBUS_SSPP0, 40, 4 },
- { DBGBUS_SSPP0, 40, 5 },
- { DBGBUS_SSPP0, 40, 6 },
- { DBGBUS_SSPP0, 40, 7 },
-
- { DBGBUS_SSPP0, 41, 0 },
- { DBGBUS_SSPP0, 41, 1 },
- { DBGBUS_SSPP0, 41, 2 },
- { DBGBUS_SSPP0, 41, 3 },
- { DBGBUS_SSPP0, 41, 4 },
- { DBGBUS_SSPP0, 41, 5 },
- { DBGBUS_SSPP0, 41, 6 },
- { DBGBUS_SSPP0, 41, 7 },
-
- { DBGBUS_SSPP0, 42, 0 },
- { DBGBUS_SSPP0, 42, 1 },
- { DBGBUS_SSPP0, 42, 2 },
- { DBGBUS_SSPP0, 42, 3 },
- { DBGBUS_SSPP0, 42, 4 },
- { DBGBUS_SSPP0, 42, 5 },
- { DBGBUS_SSPP0, 42, 6 },
- { DBGBUS_SSPP0, 42, 7 },
-
- { DBGBUS_SSPP0, 44, 0 },
- { DBGBUS_SSPP0, 44, 1 },
- { DBGBUS_SSPP0, 44, 2 },
- { DBGBUS_SSPP0, 44, 3 },
- { DBGBUS_SSPP0, 44, 4 },
- { DBGBUS_SSPP0, 44, 5 },
- { DBGBUS_SSPP0, 44, 6 },
- { DBGBUS_SSPP0, 44, 7 },
-
- { DBGBUS_SSPP0, 45, 0 },
- { DBGBUS_SSPP0, 45, 1 },
- { DBGBUS_SSPP0, 45, 2 },
- { DBGBUS_SSPP0, 45, 3 },
- { DBGBUS_SSPP0, 45, 4 },
- { DBGBUS_SSPP0, 45, 5 },
- { DBGBUS_SSPP0, 45, 6 },
- { DBGBUS_SSPP0, 45, 7 },
-
- /* fetch sspp1 */
- /* vig 1 */
- { DBGBUS_SSPP1, 0, 0 },
- { DBGBUS_SSPP1, 0, 1 },
- { DBGBUS_SSPP1, 0, 2 },
- { DBGBUS_SSPP1, 0, 3 },
- { DBGBUS_SSPP1, 0, 4 },
- { DBGBUS_SSPP1, 0, 5 },
- { DBGBUS_SSPP1, 0, 6 },
- { DBGBUS_SSPP1, 0, 7 },
-
- { DBGBUS_SSPP1, 1, 0 },
- { DBGBUS_SSPP1, 1, 1 },
- { DBGBUS_SSPP1, 1, 2 },
- { DBGBUS_SSPP1, 1, 3 },
- { DBGBUS_SSPP1, 1, 4 },
- { DBGBUS_SSPP1, 1, 5 },
- { DBGBUS_SSPP1, 1, 6 },
- { DBGBUS_SSPP1, 1, 7 },
-
- { DBGBUS_SSPP1, 2, 0 },
- { DBGBUS_SSPP1, 2, 1 },
- { DBGBUS_SSPP1, 2, 2 },
- { DBGBUS_SSPP1, 2, 3 },
- { DBGBUS_SSPP1, 2, 4 },
- { DBGBUS_SSPP1, 2, 5 },
- { DBGBUS_SSPP1, 2, 6 },
- { DBGBUS_SSPP1, 2, 7 },
-
- { DBGBUS_SSPP1, 4, 0 },
- { DBGBUS_SSPP1, 4, 1 },
- { DBGBUS_SSPP1, 4, 2 },
- { DBGBUS_SSPP1, 4, 3 },
- { DBGBUS_SSPP1, 4, 4 },
- { DBGBUS_SSPP1, 4, 5 },
- { DBGBUS_SSPP1, 4, 6 },
- { DBGBUS_SSPP1, 4, 7 },
-
- { DBGBUS_SSPP1, 5, 0 },
- { DBGBUS_SSPP1, 5, 1 },
- { DBGBUS_SSPP1, 5, 2 },
- { DBGBUS_SSPP1, 5, 3 },
- { DBGBUS_SSPP1, 5, 4 },
- { DBGBUS_SSPP1, 5, 5 },
- { DBGBUS_SSPP1, 5, 6 },
- { DBGBUS_SSPP1, 5, 7 },
-
- /* vig 3 */
- { DBGBUS_SSPP1, 20, 0 },
- { DBGBUS_SSPP1, 20, 1 },
- { DBGBUS_SSPP1, 20, 2 },
- { DBGBUS_SSPP1, 20, 3 },
- { DBGBUS_SSPP1, 20, 4 },
- { DBGBUS_SSPP1, 20, 5 },
- { DBGBUS_SSPP1, 20, 6 },
- { DBGBUS_SSPP1, 20, 7 },
-
- { DBGBUS_SSPP1, 21, 0 },
- { DBGBUS_SSPP1, 21, 1 },
- { DBGBUS_SSPP1, 21, 2 },
- { DBGBUS_SSPP1, 21, 3 },
- { DBGBUS_SSPP1, 21, 4 },
- { DBGBUS_SSPP1, 21, 5 },
- { DBGBUS_SSPP1, 21, 6 },
- { DBGBUS_SSPP1, 21, 7 },
-
- { DBGBUS_SSPP1, 22, 0 },
- { DBGBUS_SSPP1, 22, 1 },
- { DBGBUS_SSPP1, 22, 2 },
- { DBGBUS_SSPP1, 22, 3 },
- { DBGBUS_SSPP1, 22, 4 },
- { DBGBUS_SSPP1, 22, 5 },
- { DBGBUS_SSPP1, 22, 6 },
- { DBGBUS_SSPP1, 22, 7 },
-
- { DBGBUS_SSPP1, 24, 0 },
- { DBGBUS_SSPP1, 24, 1 },
- { DBGBUS_SSPP1, 24, 2 },
- { DBGBUS_SSPP1, 24, 3 },
- { DBGBUS_SSPP1, 24, 4 },
- { DBGBUS_SSPP1, 24, 5 },
- { DBGBUS_SSPP1, 24, 6 },
- { DBGBUS_SSPP1, 24, 7 },
-
- { DBGBUS_SSPP1, 25, 0 },
- { DBGBUS_SSPP1, 25, 1 },
- { DBGBUS_SSPP1, 25, 2 },
- { DBGBUS_SSPP1, 25, 3 },
- { DBGBUS_SSPP1, 25, 4 },
- { DBGBUS_SSPP1, 25, 5 },
- { DBGBUS_SSPP1, 25, 6 },
- { DBGBUS_SSPP1, 25, 7 },
-
- /* dma 3 */
- { DBGBUS_SSPP1, 30, 0 },
- { DBGBUS_SSPP1, 30, 1 },
- { DBGBUS_SSPP1, 30, 2 },
- { DBGBUS_SSPP1, 30, 3 },
- { DBGBUS_SSPP1, 30, 4 },
- { DBGBUS_SSPP1, 30, 5 },
- { DBGBUS_SSPP1, 30, 6 },
- { DBGBUS_SSPP1, 30, 7 },
-
- { DBGBUS_SSPP1, 31, 0 },
- { DBGBUS_SSPP1, 31, 1 },
- { DBGBUS_SSPP1, 31, 2 },
- { DBGBUS_SSPP1, 31, 3 },
- { DBGBUS_SSPP1, 31, 4 },
- { DBGBUS_SSPP1, 31, 5 },
- { DBGBUS_SSPP1, 31, 6 },
- { DBGBUS_SSPP1, 31, 7 },
-
- { DBGBUS_SSPP1, 32, 0 },
- { DBGBUS_SSPP1, 32, 1 },
- { DBGBUS_SSPP1, 32, 2 },
- { DBGBUS_SSPP1, 32, 3 },
- { DBGBUS_SSPP1, 32, 4 },
- { DBGBUS_SSPP1, 32, 5 },
- { DBGBUS_SSPP1, 32, 6 },
- { DBGBUS_SSPP1, 32, 7 },
-
- { DBGBUS_SSPP1, 33, 0 },
- { DBGBUS_SSPP1, 33, 1 },
- { DBGBUS_SSPP1, 33, 2 },
- { DBGBUS_SSPP1, 33, 3 },
- { DBGBUS_SSPP1, 33, 4 },
- { DBGBUS_SSPP1, 33, 5 },
- { DBGBUS_SSPP1, 33, 6 },
- { DBGBUS_SSPP1, 33, 7 },
-
- { DBGBUS_SSPP1, 34, 0 },
- { DBGBUS_SSPP1, 34, 1 },
- { DBGBUS_SSPP1, 34, 2 },
- { DBGBUS_SSPP1, 34, 3 },
- { DBGBUS_SSPP1, 34, 4 },
- { DBGBUS_SSPP1, 34, 5 },
- { DBGBUS_SSPP1, 34, 6 },
- { DBGBUS_SSPP1, 34, 7 },
-
- { DBGBUS_SSPP1, 35, 0 },
- { DBGBUS_SSPP1, 35, 1 },
- { DBGBUS_SSPP1, 35, 2 },
-
- /* dma 1 */
- { DBGBUS_SSPP1, 40, 0 },
- { DBGBUS_SSPP1, 40, 1 },
- { DBGBUS_SSPP1, 40, 2 },
- { DBGBUS_SSPP1, 40, 3 },
- { DBGBUS_SSPP1, 40, 4 },
- { DBGBUS_SSPP1, 40, 5 },
- { DBGBUS_SSPP1, 40, 6 },
- { DBGBUS_SSPP1, 40, 7 },
-
- { DBGBUS_SSPP1, 41, 0 },
- { DBGBUS_SSPP1, 41, 1 },
- { DBGBUS_SSPP1, 41, 2 },
- { DBGBUS_SSPP1, 41, 3 },
- { DBGBUS_SSPP1, 41, 4 },
- { DBGBUS_SSPP1, 41, 5 },
- { DBGBUS_SSPP1, 41, 6 },
- { DBGBUS_SSPP1, 41, 7 },
-
- { DBGBUS_SSPP1, 42, 0 },
- { DBGBUS_SSPP1, 42, 1 },
- { DBGBUS_SSPP1, 42, 2 },
- { DBGBUS_SSPP1, 42, 3 },
- { DBGBUS_SSPP1, 42, 4 },
- { DBGBUS_SSPP1, 42, 5 },
- { DBGBUS_SSPP1, 42, 6 },
- { DBGBUS_SSPP1, 42, 7 },
-
- { DBGBUS_SSPP1, 44, 0 },
- { DBGBUS_SSPP1, 44, 1 },
- { DBGBUS_SSPP1, 44, 2 },
- { DBGBUS_SSPP1, 44, 3 },
- { DBGBUS_SSPP1, 44, 4 },
- { DBGBUS_SSPP1, 44, 5 },
- { DBGBUS_SSPP1, 44, 6 },
- { DBGBUS_SSPP1, 44, 7 },
-
- { DBGBUS_SSPP1, 45, 0 },
- { DBGBUS_SSPP1, 45, 1 },
- { DBGBUS_SSPP1, 45, 2 },
- { DBGBUS_SSPP1, 45, 3 },
- { DBGBUS_SSPP1, 45, 4 },
- { DBGBUS_SSPP1, 45, 5 },
- { DBGBUS_SSPP1, 45, 6 },
- { DBGBUS_SSPP1, 45, 7 },
-
- /* dspp */
- { DBGBUS_DSPP, 13, 0 },
- { DBGBUS_DSPP, 19, 0 },
- { DBGBUS_DSPP, 14, 0 },
- { DBGBUS_DSPP, 14, 1 },
- { DBGBUS_DSPP, 14, 3 },
- { DBGBUS_DSPP, 20, 0 },
- { DBGBUS_DSPP, 20, 1 },
- { DBGBUS_DSPP, 20, 3 },
-
- /* ppb_0 */
- { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
- { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
-
- /* ppb_1 */
- { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
- { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
-
- /* lm_lut */
- { DBGBUS_DSPP, 109, 0 },
- { DBGBUS_DSPP, 105, 0 },
- { DBGBUS_DSPP, 103, 0 },
-
- /* crossbar */
- { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
-
- /* rotator */
- { DBGBUS_DSPP, 9, 0},
-
- /* blend */
- /* LM0 */
- { DBGBUS_DSPP, 63, 1},
- { DBGBUS_DSPP, 63, 2},
- { DBGBUS_DSPP, 63, 3},
- { DBGBUS_DSPP, 63, 4},
- { DBGBUS_DSPP, 63, 5},
- { DBGBUS_DSPP, 63, 6},
- { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 64, 1},
- { DBGBUS_DSPP, 64, 2},
- { DBGBUS_DSPP, 64, 3},
- { DBGBUS_DSPP, 64, 4},
- { DBGBUS_DSPP, 64, 5},
- { DBGBUS_DSPP, 64, 6},
- { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 65, 1},
- { DBGBUS_DSPP, 65, 2},
- { DBGBUS_DSPP, 65, 3},
- { DBGBUS_DSPP, 65, 4},
- { DBGBUS_DSPP, 65, 5},
- { DBGBUS_DSPP, 65, 6},
- { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 66, 1},
- { DBGBUS_DSPP, 66, 2},
- { DBGBUS_DSPP, 66, 3},
- { DBGBUS_DSPP, 66, 4},
- { DBGBUS_DSPP, 66, 5},
- { DBGBUS_DSPP, 66, 6},
- { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 67, 1},
- { DBGBUS_DSPP, 67, 2},
- { DBGBUS_DSPP, 67, 3},
- { DBGBUS_DSPP, 67, 4},
- { DBGBUS_DSPP, 67, 5},
- { DBGBUS_DSPP, 67, 6},
- { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 68, 1},
- { DBGBUS_DSPP, 68, 2},
- { DBGBUS_DSPP, 68, 3},
- { DBGBUS_DSPP, 68, 4},
- { DBGBUS_DSPP, 68, 5},
- { DBGBUS_DSPP, 68, 6},
- { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 69, 1},
- { DBGBUS_DSPP, 69, 2},
- { DBGBUS_DSPP, 69, 3},
- { DBGBUS_DSPP, 69, 4},
- { DBGBUS_DSPP, 69, 5},
- { DBGBUS_DSPP, 69, 6},
- { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 84, 1},
- { DBGBUS_DSPP, 84, 2},
- { DBGBUS_DSPP, 84, 3},
- { DBGBUS_DSPP, 84, 4},
- { DBGBUS_DSPP, 84, 5},
- { DBGBUS_DSPP, 84, 6},
- { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 85, 1},
- { DBGBUS_DSPP, 85, 2},
- { DBGBUS_DSPP, 85, 3},
- { DBGBUS_DSPP, 85, 4},
- { DBGBUS_DSPP, 85, 5},
- { DBGBUS_DSPP, 85, 6},
- { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 86, 1},
- { DBGBUS_DSPP, 86, 2},
- { DBGBUS_DSPP, 86, 3},
- { DBGBUS_DSPP, 86, 4},
- { DBGBUS_DSPP, 86, 5},
- { DBGBUS_DSPP, 86, 6},
- { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
-
-
- { DBGBUS_DSPP, 87, 1},
- { DBGBUS_DSPP, 87, 2},
- { DBGBUS_DSPP, 87, 3},
- { DBGBUS_DSPP, 87, 4},
- { DBGBUS_DSPP, 87, 5},
- { DBGBUS_DSPP, 87, 6},
- { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
-
- /* LM1 */
- { DBGBUS_DSPP, 70, 1},
- { DBGBUS_DSPP, 70, 2},
- { DBGBUS_DSPP, 70, 3},
- { DBGBUS_DSPP, 70, 4},
- { DBGBUS_DSPP, 70, 5},
- { DBGBUS_DSPP, 70, 6},
- { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 71, 1},
- { DBGBUS_DSPP, 71, 2},
- { DBGBUS_DSPP, 71, 3},
- { DBGBUS_DSPP, 71, 4},
- { DBGBUS_DSPP, 71, 5},
- { DBGBUS_DSPP, 71, 6},
- { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 72, 1},
- { DBGBUS_DSPP, 72, 2},
- { DBGBUS_DSPP, 72, 3},
- { DBGBUS_DSPP, 72, 4},
- { DBGBUS_DSPP, 72, 5},
- { DBGBUS_DSPP, 72, 6},
- { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 73, 1},
- { DBGBUS_DSPP, 73, 2},
- { DBGBUS_DSPP, 73, 3},
- { DBGBUS_DSPP, 73, 4},
- { DBGBUS_DSPP, 73, 5},
- { DBGBUS_DSPP, 73, 6},
- { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 74, 1},
- { DBGBUS_DSPP, 74, 2},
- { DBGBUS_DSPP, 74, 3},
- { DBGBUS_DSPP, 74, 4},
- { DBGBUS_DSPP, 74, 5},
- { DBGBUS_DSPP, 74, 6},
- { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 75, 1},
- { DBGBUS_DSPP, 75, 2},
- { DBGBUS_DSPP, 75, 3},
- { DBGBUS_DSPP, 75, 4},
- { DBGBUS_DSPP, 75, 5},
- { DBGBUS_DSPP, 75, 6},
- { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 76, 1},
- { DBGBUS_DSPP, 76, 2},
- { DBGBUS_DSPP, 76, 3},
- { DBGBUS_DSPP, 76, 4},
- { DBGBUS_DSPP, 76, 5},
- { DBGBUS_DSPP, 76, 6},
- { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 88, 1},
- { DBGBUS_DSPP, 88, 2},
- { DBGBUS_DSPP, 88, 3},
- { DBGBUS_DSPP, 88, 4},
- { DBGBUS_DSPP, 88, 5},
- { DBGBUS_DSPP, 88, 6},
- { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 89, 1},
- { DBGBUS_DSPP, 89, 2},
- { DBGBUS_DSPP, 89, 3},
- { DBGBUS_DSPP, 89, 4},
- { DBGBUS_DSPP, 89, 5},
- { DBGBUS_DSPP, 89, 6},
- { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 90, 1},
- { DBGBUS_DSPP, 90, 2},
- { DBGBUS_DSPP, 90, 3},
- { DBGBUS_DSPP, 90, 4},
- { DBGBUS_DSPP, 90, 5},
- { DBGBUS_DSPP, 90, 6},
- { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 91, 1},
- { DBGBUS_DSPP, 91, 2},
- { DBGBUS_DSPP, 91, 3},
- { DBGBUS_DSPP, 91, 4},
- { DBGBUS_DSPP, 91, 5},
- { DBGBUS_DSPP, 91, 6},
- { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
-
- /* LM2 */
- { DBGBUS_DSPP, 77, 0},
- { DBGBUS_DSPP, 77, 1},
- { DBGBUS_DSPP, 77, 2},
- { DBGBUS_DSPP, 77, 3},
- { DBGBUS_DSPP, 77, 4},
- { DBGBUS_DSPP, 77, 5},
- { DBGBUS_DSPP, 77, 6},
- { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 78, 0},
- { DBGBUS_DSPP, 78, 1},
- { DBGBUS_DSPP, 78, 2},
- { DBGBUS_DSPP, 78, 3},
- { DBGBUS_DSPP, 78, 4},
- { DBGBUS_DSPP, 78, 5},
- { DBGBUS_DSPP, 78, 6},
- { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 79, 0},
- { DBGBUS_DSPP, 79, 1},
- { DBGBUS_DSPP, 79, 2},
- { DBGBUS_DSPP, 79, 3},
- { DBGBUS_DSPP, 79, 4},
- { DBGBUS_DSPP, 79, 5},
- { DBGBUS_DSPP, 79, 6},
- { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 80, 0},
- { DBGBUS_DSPP, 80, 1},
- { DBGBUS_DSPP, 80, 2},
- { DBGBUS_DSPP, 80, 3},
- { DBGBUS_DSPP, 80, 4},
- { DBGBUS_DSPP, 80, 5},
- { DBGBUS_DSPP, 80, 6},
- { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 81, 0},
- { DBGBUS_DSPP, 81, 1},
- { DBGBUS_DSPP, 81, 2},
- { DBGBUS_DSPP, 81, 3},
- { DBGBUS_DSPP, 81, 4},
- { DBGBUS_DSPP, 81, 5},
- { DBGBUS_DSPP, 81, 6},
- { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 82, 0},
- { DBGBUS_DSPP, 82, 1},
- { DBGBUS_DSPP, 82, 2},
- { DBGBUS_DSPP, 82, 3},
- { DBGBUS_DSPP, 82, 4},
- { DBGBUS_DSPP, 82, 5},
- { DBGBUS_DSPP, 82, 6},
- { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 83, 0},
- { DBGBUS_DSPP, 83, 1},
- { DBGBUS_DSPP, 83, 2},
- { DBGBUS_DSPP, 83, 3},
- { DBGBUS_DSPP, 83, 4},
- { DBGBUS_DSPP, 83, 5},
- { DBGBUS_DSPP, 83, 6},
- { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 92, 1},
- { DBGBUS_DSPP, 92, 2},
- { DBGBUS_DSPP, 92, 3},
- { DBGBUS_DSPP, 92, 4},
- { DBGBUS_DSPP, 92, 5},
- { DBGBUS_DSPP, 92, 6},
- { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 93, 1},
- { DBGBUS_DSPP, 93, 2},
- { DBGBUS_DSPP, 93, 3},
- { DBGBUS_DSPP, 93, 4},
- { DBGBUS_DSPP, 93, 5},
- { DBGBUS_DSPP, 93, 6},
- { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 94, 1},
- { DBGBUS_DSPP, 94, 2},
- { DBGBUS_DSPP, 94, 3},
- { DBGBUS_DSPP, 94, 4},
- { DBGBUS_DSPP, 94, 5},
- { DBGBUS_DSPP, 94, 6},
- { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 95, 1},
- { DBGBUS_DSPP, 95, 2},
- { DBGBUS_DSPP, 95, 3},
- { DBGBUS_DSPP, 95, 4},
- { DBGBUS_DSPP, 95, 5},
- { DBGBUS_DSPP, 95, 6},
- { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
-
- /* LM5 */
- { DBGBUS_DSPP, 110, 1},
- { DBGBUS_DSPP, 110, 2},
- { DBGBUS_DSPP, 110, 3},
- { DBGBUS_DSPP, 110, 4},
- { DBGBUS_DSPP, 110, 5},
- { DBGBUS_DSPP, 110, 6},
- { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 111, 1},
- { DBGBUS_DSPP, 111, 2},
- { DBGBUS_DSPP, 111, 3},
- { DBGBUS_DSPP, 111, 4},
- { DBGBUS_DSPP, 111, 5},
- { DBGBUS_DSPP, 111, 6},
- { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 112, 1},
- { DBGBUS_DSPP, 112, 2},
- { DBGBUS_DSPP, 112, 3},
- { DBGBUS_DSPP, 112, 4},
- { DBGBUS_DSPP, 112, 5},
- { DBGBUS_DSPP, 112, 6},
- { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 113, 1},
- { DBGBUS_DSPP, 113, 2},
- { DBGBUS_DSPP, 113, 3},
- { DBGBUS_DSPP, 113, 4},
- { DBGBUS_DSPP, 113, 5},
- { DBGBUS_DSPP, 113, 6},
- { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 114, 1},
- { DBGBUS_DSPP, 114, 2},
- { DBGBUS_DSPP, 114, 3},
- { DBGBUS_DSPP, 114, 4},
- { DBGBUS_DSPP, 114, 5},
- { DBGBUS_DSPP, 114, 6},
- { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 115, 1},
- { DBGBUS_DSPP, 115, 2},
- { DBGBUS_DSPP, 115, 3},
- { DBGBUS_DSPP, 115, 4},
- { DBGBUS_DSPP, 115, 5},
- { DBGBUS_DSPP, 115, 6},
- { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 116, 1},
- { DBGBUS_DSPP, 116, 2},
- { DBGBUS_DSPP, 116, 3},
- { DBGBUS_DSPP, 116, 4},
- { DBGBUS_DSPP, 116, 5},
- { DBGBUS_DSPP, 116, 6},
- { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 117, 1},
- { DBGBUS_DSPP, 117, 2},
- { DBGBUS_DSPP, 117, 3},
- { DBGBUS_DSPP, 117, 4},
- { DBGBUS_DSPP, 117, 5},
- { DBGBUS_DSPP, 117, 6},
- { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 118, 1},
- { DBGBUS_DSPP, 118, 2},
- { DBGBUS_DSPP, 118, 3},
- { DBGBUS_DSPP, 118, 4},
- { DBGBUS_DSPP, 118, 5},
- { DBGBUS_DSPP, 118, 6},
- { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 119, 1},
- { DBGBUS_DSPP, 119, 2},
- { DBGBUS_DSPP, 119, 3},
- { DBGBUS_DSPP, 119, 4},
- { DBGBUS_DSPP, 119, 5},
- { DBGBUS_DSPP, 119, 6},
- { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
-
- { DBGBUS_DSPP, 120, 1},
- { DBGBUS_DSPP, 120, 2},
- { DBGBUS_DSPP, 120, 3},
- { DBGBUS_DSPP, 120, 4},
- { DBGBUS_DSPP, 120, 5},
- { DBGBUS_DSPP, 120, 6},
- { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
-
- /* csc */
- { DBGBUS_SSPP0, 7, 0},
- { DBGBUS_SSPP0, 7, 1},
- { DBGBUS_SSPP0, 27, 0},
- { DBGBUS_SSPP0, 27, 1},
- { DBGBUS_SSPP1, 7, 0},
- { DBGBUS_SSPP1, 7, 1},
- { DBGBUS_SSPP1, 27, 0},
- { DBGBUS_SSPP1, 27, 1},
-
- /* pcc */
- { DBGBUS_SSPP0, 3, 3},
- { DBGBUS_SSPP0, 23, 3},
- { DBGBUS_SSPP0, 33, 3},
- { DBGBUS_SSPP0, 43, 3},
- { DBGBUS_SSPP1, 3, 3},
- { DBGBUS_SSPP1, 23, 3},
- { DBGBUS_SSPP1, 33, 3},
- { DBGBUS_SSPP1, 43, 3},
-
- /* spa */
- { DBGBUS_SSPP0, 8, 0},
- { DBGBUS_SSPP0, 28, 0},
- { DBGBUS_SSPP1, 8, 0},
- { DBGBUS_SSPP1, 28, 0},
- { DBGBUS_DSPP, 13, 0},
- { DBGBUS_DSPP, 19, 0},
-
- /* igc */
- { DBGBUS_SSPP0, 17, 0},
- { DBGBUS_SSPP0, 17, 1},
- { DBGBUS_SSPP0, 17, 3},
- { DBGBUS_SSPP0, 37, 0},
- { DBGBUS_SSPP0, 37, 1},
- { DBGBUS_SSPP0, 37, 3},
- { DBGBUS_SSPP0, 46, 0},
- { DBGBUS_SSPP0, 46, 1},
- { DBGBUS_SSPP0, 46, 3},
-
- { DBGBUS_SSPP1, 17, 0},
- { DBGBUS_SSPP1, 17, 1},
- { DBGBUS_SSPP1, 17, 3},
- { DBGBUS_SSPP1, 37, 0},
- { DBGBUS_SSPP1, 37, 1},
- { DBGBUS_SSPP1, 37, 3},
- { DBGBUS_SSPP1, 46, 0},
- { DBGBUS_SSPP1, 46, 1},
- { DBGBUS_SSPP1, 46, 3},
-
- { DBGBUS_DSPP, 14, 0},
- { DBGBUS_DSPP, 14, 1},
- { DBGBUS_DSPP, 14, 3},
- { DBGBUS_DSPP, 20, 0},
- { DBGBUS_DSPP, 20, 1},
- { DBGBUS_DSPP, 20, 3},
-
- /* intf0-3 */
- { DBGBUS_PERIPH, 0, 0},
- { DBGBUS_PERIPH, 1, 0},
- { DBGBUS_PERIPH, 2, 0},
- { DBGBUS_PERIPH, 3, 0},
-
- /* te counter wrapper */
- { DBGBUS_PERIPH, 60, 0},
-
- /* dsc0 */
- { DBGBUS_PERIPH, 47, 0},
- { DBGBUS_PERIPH, 47, 1},
- { DBGBUS_PERIPH, 47, 2},
- { DBGBUS_PERIPH, 47, 3},
- { DBGBUS_PERIPH, 47, 4},
- { DBGBUS_PERIPH, 47, 5},
- { DBGBUS_PERIPH, 47, 6},
- { DBGBUS_PERIPH, 47, 7},
-
- /* dsc1 */
- { DBGBUS_PERIPH, 48, 0},
- { DBGBUS_PERIPH, 48, 1},
- { DBGBUS_PERIPH, 48, 2},
- { DBGBUS_PERIPH, 48, 3},
- { DBGBUS_PERIPH, 48, 4},
- { DBGBUS_PERIPH, 48, 5},
- { DBGBUS_PERIPH, 48, 6},
- { DBGBUS_PERIPH, 48, 7},
-
- /* dsc2 */
- { DBGBUS_PERIPH, 51, 0},
- { DBGBUS_PERIPH, 51, 1},
- { DBGBUS_PERIPH, 51, 2},
- { DBGBUS_PERIPH, 51, 3},
- { DBGBUS_PERIPH, 51, 4},
- { DBGBUS_PERIPH, 51, 5},
- { DBGBUS_PERIPH, 51, 6},
- { DBGBUS_PERIPH, 51, 7},
-
- /* dsc3 */
- { DBGBUS_PERIPH, 52, 0},
- { DBGBUS_PERIPH, 52, 1},
- { DBGBUS_PERIPH, 52, 2},
- { DBGBUS_PERIPH, 52, 3},
- { DBGBUS_PERIPH, 52, 4},
- { DBGBUS_PERIPH, 52, 5},
- { DBGBUS_PERIPH, 52, 6},
- { DBGBUS_PERIPH, 52, 7},
-
- /* tear-check */
- { DBGBUS_PERIPH, 63, 0 },
- { DBGBUS_PERIPH, 64, 0 },
- { DBGBUS_PERIPH, 65, 0 },
- { DBGBUS_PERIPH, 73, 0 },
- { DBGBUS_PERIPH, 74, 0 },
-
- /* cdwn */
- { DBGBUS_PERIPH, 80, 0},
- { DBGBUS_PERIPH, 80, 1},
- { DBGBUS_PERIPH, 80, 2},
-
- { DBGBUS_PERIPH, 81, 0},
- { DBGBUS_PERIPH, 81, 1},
- { DBGBUS_PERIPH, 81, 2},
-
- { DBGBUS_PERIPH, 82, 0},
- { DBGBUS_PERIPH, 82, 1},
- { DBGBUS_PERIPH, 82, 2},
- { DBGBUS_PERIPH, 82, 3},
- { DBGBUS_PERIPH, 82, 4},
- { DBGBUS_PERIPH, 82, 5},
- { DBGBUS_PERIPH, 82, 6},
- { DBGBUS_PERIPH, 82, 7},
-
- /* hdmi */
- { DBGBUS_PERIPH, 68, 0},
- { DBGBUS_PERIPH, 68, 1},
- { DBGBUS_PERIPH, 68, 2},
- { DBGBUS_PERIPH, 68, 3},
- { DBGBUS_PERIPH, 68, 4},
- { DBGBUS_PERIPH, 68, 5},
-
- /* edp */
- { DBGBUS_PERIPH, 69, 0},
- { DBGBUS_PERIPH, 69, 1},
- { DBGBUS_PERIPH, 69, 2},
- { DBGBUS_PERIPH, 69, 3},
- { DBGBUS_PERIPH, 69, 4},
- { DBGBUS_PERIPH, 69, 5},
-
- /* dsi0 */
- { DBGBUS_PERIPH, 70, 0},
- { DBGBUS_PERIPH, 70, 1},
- { DBGBUS_PERIPH, 70, 2},
- { DBGBUS_PERIPH, 70, 3},
- { DBGBUS_PERIPH, 70, 4},
- { DBGBUS_PERIPH, 70, 5},
-
- /* dsi1 */
- { DBGBUS_PERIPH, 71, 0},
- { DBGBUS_PERIPH, 71, 1},
- { DBGBUS_PERIPH, 71, 2},
- { DBGBUS_PERIPH, 71, 3},
- { DBGBUS_PERIPH, 71, 4},
- { DBGBUS_PERIPH, 71, 5},
-};
-
-static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
- {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
- {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
- {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
- {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
- {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
-};
-
-/**
- * _dpu_dbg_enable_power - use callback to turn power on for hw register access
- * @enable: whether to turn power on or off
- */
-static inline void _dpu_dbg_enable_power(int enable)
-{
- if (enable)
- pm_runtime_get_sync(dpu_dbg_base.dev);
- else
- pm_runtime_put_sync(dpu_dbg_base.dev);
-}
-
-static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 status = 0;
- struct dpu_debug_bus_entry *head;
- phys_addr_t phys = 0;
- int list_size;
- int i;
- u32 offset;
- void __iomem *mem_base = NULL;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base + bus->top_blk_off;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dump_mem = &bus->cmn.dumped_content;
-
- /* will keep in memory 4 entries of 4 bytes each */
- list_size = (bus->cmn.entries_size * 4 * 4);
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &phys, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
- for (i = 0; i < bus->cmn.entries_size; i++) {
- head = bus->entries + i;
- writel_relaxed(TEST_MASK(head->block_id, head->test_id),
- mem_base + head->wr_addr);
- wmb(); /* make sure test bits were written */
-
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
- offset = DBGBUS_DSPP_STATUS;
- /* keep DSPP test point enabled */
- if (head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
- } else {
- offset = head->wr_addr + 0x4;
- }
-
- status = readl_relaxed(mem_base + offset);
-
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "waddr=0x%x blk=%d tst=%d val=0x%x\n",
- head->wr_addr, head->block_id,
- head->test_id, status);
-
- if (dump_addr && in_mem) {
- dump_addr[i*4] = head->wr_addr;
- dump_addr[i*4 + 1] = head->block_id;
- dump_addr[i*4 + 2] = head->test_id;
- dump_addr[i*4 + 3] = status;
- }
-
- if (head->analyzer)
- head->analyzer(mem_base, head, status);
-
- /* Disable debug bus once we are done */
- writel_relaxed(0, mem_base + head->wr_addr);
- if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
- head->wr_addr != DBGBUS_DSPP)
- writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
- }
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-static void _dpu_dbg_dump_vbif_debug_bus_entry(
- struct vbif_debug_bus_entry *head, void __iomem *mem_base,
- u32 *dump_addr, bool in_log)
-{
- int i, j;
- u32 val;
-
- if (!dump_addr && !in_log)
- return;
-
- for (i = 0; i < head->block_cnt; i++) {
- writel_relaxed(1 << (i + head->bit_offset),
- mem_base + head->block_bus_addr);
- /* make sure that current bus blcok enable */
- wmb();
- for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
- writel_relaxed(j, mem_base + head->block_bus_addr + 4);
- /* make sure that test point is enabled */
- wmb();
- val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
- if (dump_addr) {
- *dump_addr++ = head->block_bus_addr;
- *dump_addr++ = i;
- *dump_addr++ = j;
- *dump_addr++ = val;
- }
- if (in_log)
- dev_info(dpu_dbg_base.dev,
- "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
- head->block_bus_addr, i, j, val);
- }
- }
-}
-
-static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
-{
- bool in_log, in_mem;
- u32 **dump_mem = NULL;
- u32 *dump_addr = NULL;
- u32 value, d0, d1;
- unsigned long reg, reg1, reg2;
- struct vbif_debug_bus_entry *head;
- phys_addr_t phys = 0;
- int i, list_size = 0;
- void __iomem *mem_base = NULL;
- struct vbif_debug_bus_entry *dbg_bus;
- u32 bus_size;
- struct dpu_dbg_reg_base *reg_base;
-
- if (!bus || !bus->cmn.entries_size)
- return;
-
- list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
- reg_base_head)
- if (strlen(reg_base->name) &&
- !strcmp(reg_base->name, bus->cmn.name))
- mem_base = reg_base->base;
-
- if (!mem_base) {
- pr_err("unable to find mem_base for %s\n", bus->cmn.name);
- return;
- }
-
- dbg_bus = bus->entries;
- bus_size = bus->cmn.entries_size;
- list_size = bus->cmn.entries_size;
- dump_mem = &bus->cmn.dumped_content;
-
- dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
- bus->cmn.name);
-
- if (!dump_mem || !dbg_bus || !bus_size || !list_size)
- return;
-
- /* allocate memory for each test point */
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
- list_size += (head->block_cnt * head->test_pnt_cnt);
- }
-
- /* 4 bytes * 4 entries for each test point*/
- list_size *= 16;
-
- in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
- in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
-
- if (!in_log && !in_mem)
- return;
-
- if (in_mem) {
- if (!(*dump_mem))
- *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
- list_size, &phys, GFP_KERNEL);
-
- if (*dump_mem) {
- dump_addr = *dump_mem;
- dev_info(dpu_dbg_base.dev,
- "%s: start_addr:0x%pK len:0x%x\n",
- __func__, dump_addr, list_size);
- } else {
- in_mem = false;
- pr_err("dump_mem: allocation fails\n");
- }
- }
-
- _dpu_dbg_enable_power(true);
-
- value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
- writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
-
- /* make sure that vbif core is on */
- wmb();
-
- /**
- * Extract VBIF error info based on XIN halt and error status.
- * If the XIN client is not in HALT state, or an error is detected,
- * then retrieve the VBIF error info for it.
- */
- reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
- reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
- reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
- dev_err(dpu_dbg_base.dev,
- "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
- reg, reg1, reg2);
- reg >>= 16;
- reg &= ~(reg1 | reg2);
- for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
- if (!test_bit(0, &reg)) {
- writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
- /* make sure reg write goes through */
- wmb();
-
- d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
- d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
-
- dev_err(dpu_dbg_base.dev,
- "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
- i, d0, d1);
- }
- reg >>= 1;
- }
-
- for (i = 0; i < bus_size; i++) {
- head = dbg_bus + i;
-
- writel_relaxed(0, mem_base + head->disable_bus_addr);
- writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
- /* make sure that other bus is off */
- wmb();
-
- _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
- in_log);
- if (dump_addr)
- dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
- }
-
- _dpu_dbg_enable_power(false);
-
- dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
- bus->cmn.name);
-}
-
-/**
- * _dpu_dump_array - dump array of register bases
- * @name: string indicating origin of dump
- * @dump_dbgbus_dpu: whether to dump the dpu debug bus
- * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
- */
-static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (dump_dbgbus_dpu)
- _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
-
- if (dump_dbgbus_vbif_rt)
- _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
-}
-
-/**
- * _dpu_dump_work - deferred dump work function
- * @work: work structure
- */
-static void _dpu_dump_work(struct work_struct *work)
-{
- _dpu_dump_array("dpudump_workitem",
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
-}
-
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt)
-{
- if (queue_work && work_pending(&dpu_dbg_base.dump_work))
- return;
-
- if (!queue_work) {
- _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
- return;
- }
-
- /* schedule work to dump later */
- dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
- dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
- dump_dbgbus_vbif_rt;
- schedule_work(&dpu_dbg_base.dump_work);
-}
-
-/*
- * dpu_dbg_debugfs_open - debugfs open handler for debug dump
- * @inode: debugfs inode
- * @file: file handle
- */
-static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
-{
- /* non-seekable */
- file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
- file->private_data = inode->i_private;
- return 0;
-}
-
-/**
- * dpu_dbg_dump_write - debugfs write handler for debug dump
- * @file: file handler
- * @user_buf: user buffer content from debugfs
- * @count: size of user buffer
- * @ppos: position offset of user buffer
- */
-static ssize_t dpu_dbg_dump_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- _dpu_dump_array("dump_debugfs", true, true);
- return count;
-}
-
-static const struct file_operations dpu_dbg_dump_fops = {
- .open = dpu_dbg_debugfs_open,
- .write = dpu_dbg_dump_write,
-};
-
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
- char debug_name[80] = "";
-
- if (!debugfs_root)
- return -EINVAL;
-
- debugfs_create_file("dump", 0600, debugfs_root, NULL,
- &dpu_dbg_dump_fops);
-
- if (dbg->dbgbus_dpu.entries) {
- dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_dpu.cmn.name);
- dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_dpu.cmn.enable_mask);
- }
-
- if (dbg->dbgbus_vbif_rt.entries) {
- dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
- snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
- dbg->dbgbus_vbif_rt.cmn.name);
- dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
- debugfs_create_u32(debug_name, 0600, debugfs_root,
- &dbg->dbgbus_vbif_rt.cmn.enable_mask);
- }
-
- return 0;
-}
-
-static void _dpu_dbg_debugfs_destroy(void)
-{
-}
-
-void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
- static struct dpu_dbg_base *dbg = &dpu_dbg_base;
-
- memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
- memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
-
- if (IS_MSM8998_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
- dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
- dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
- dbg->dbgbus_dpu.cmn.entries_size =
- ARRAY_SIZE(dbg_bus_dpu_sdm845);
- dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
-
- /* vbif is unchanged vs 8998 */
- dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
- dbg->dbgbus_vbif_rt.cmn.entries_size =
- ARRAY_SIZE(vbif_dbg_bus_msm8998);
- } else {
- pr_err("unsupported chipset id %X\n", hwversion);
- }
-}
-
-int dpu_dbg_init(struct device *dev)
-{
- if (!dev) {
- pr_err("invalid params\n");
- return -EINVAL;
- }
-
- INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
- dpu_dbg_base.dev = dev;
-
- INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
-
- return 0;
-}
-
-/**
- * dpu_dbg_destroy - destroy dpu debug facilities
- */
-void dpu_dbg_destroy(void)
-{
- _dpu_dbg_debugfs_destroy();
-}
-
-void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
- dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
deleted file mode 100644
index 1e6fa945f98b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef DPU_DBG_H_
-#define DPU_DBG_H_
-
-#include <stdarg.h>
-#include <linux/debugfs.h>
-#include <linux/list.h>
-
-enum dpu_dbg_dump_flag {
- DPU_DBG_DUMP_IN_LOG = BIT(0),
- DPU_DBG_DUMP_IN_MEM = BIT(1),
-};
-
-#if defined(CONFIG_DEBUG_FS)
-
-/**
- * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
- * @hwversion: Chipset revision
- */
-void dpu_dbg_init_dbg_buses(u32 hwversion);
-
-/**
- * dpu_dbg_init - initialize global dpu debug facilities: regdump
- * @dev: device handle
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_init(struct device *dev);
-
-/**
- * dpu_dbg_debugfs_register - register entries at the given debugfs dir
- * @debugfs_root: debugfs root in which to create dpu debug entries
- * Returns: 0 or -ERROR
- */
-int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
-
-/**
- * dpu_dbg_destroy - destroy the global dpu debug facilities
- * Returns: none
- */
-void dpu_dbg_destroy(void);
-
-/**
- * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
- * @queue_work: whether to queue the dumping work to the work_struct
- * @name: string indicating origin of dump
- * @dump_dbgbus: dump the dpu debug bus
- * @dump_vbif_rt: dump the vbif rt bus
- * Returns: none
- */
-void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
- bool dump_dbgbus_vbif_rt);
-
-/**
- * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
- * address of the top registers. Used for accessing debug bus controls.
- * @blk_off: offset from mdss base of the top block
- */
-void dpu_dbg_set_dpu_top_offset(u32 blk_off);
-
-#else
-
-static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
-{
-}
-
-static inline int dpu_dbg_init(struct device *dev)
-{
- return 0;
-}
-
-static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
-{
- return 0;
-}
-
-static inline void dpu_dbg_destroy(void)
-{
-}
-
-static inline void dpu_dbg_dump(bool queue_work, const char *name,
- bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
-{
-}
-
-static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
-{
-}
-
-#endif /* defined(CONFIG_DEBUG_FS) */
-
-
-#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 96cdf06e7da2..36158b7d99cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -130,8 +130,9 @@ enum dpu_enc_rc_states {
* Virtual encoder defers as much as possible to the physical encoders.
* Virtual encoder registers itself with the DRM Framework as the encoder.
* @base: drm_encoder base class for registration with DRM
- * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
* @bus_scaling_client: Client handle to the bus scaling interface
+ * @enabled: True if the encoder is active, protected by enc_lock
* @num_phys_encs: Actual number of physical encoders contained.
* @phys_encs: Container of physical encoders managed.
* @cur_master: Pointer to the current master in this mode. Optimization
@@ -141,15 +142,17 @@ enum dpu_enc_rc_states {
* @intfs_swapped Whether or not the phys_enc interfaces have been swapped
* for partial update right-only cases, such as pingpong
* split where virtual pingpong does not generate IRQs
- * @crtc_vblank_cb: Callback into the upper layer / CRTC for
- * notification of the VBLANK
- * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc: Pointer to the currently assigned crtc. Normally you
+ * would use crtc->state->encoder_mask to determine the
+ * link between encoder/crtc. However in this case we need
+ * to track crtc in the disable() hook which is called
+ * _after_ encoder_mask is cleared.
* @crtc_kickoff_cb: Callback into CRTC that will flush & start
* all CTL paths
* @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
* @debugfs_root: Debug file system root file node
- * @enc_lock: Lock around physical encoder create/destroy and
- access.
+ * @enc_lock: Lock around physical encoder
+ * create/destroy/enable/disable
* @frame_busy_mask: Bitmask tracking which phys_enc we are still
* busy processing current command.
* Bit0 = phys_encs[0] etc.
@@ -175,6 +178,8 @@ struct dpu_encoder_virt {
spinlock_t enc_spinlock;
uint32_t bus_scaling_client;
+ bool enabled;
+
unsigned int num_phys_encs;
struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
struct dpu_encoder_phys *cur_master;
@@ -183,8 +188,7 @@ struct dpu_encoder_virt {
bool intfs_swapped;
- void (*crtc_vblank_cb)(void *);
- void *crtc_vblank_cb_data;
+ struct drm_crtc *crtc;
struct dentry *debugfs_root;
struct mutex enc_lock;
@@ -210,39 +214,6 @@ struct dpu_encoder_virt {
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
-static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
- bool enable)
-{
- struct drm_encoder *drm_enc;
- struct msm_drm_private *priv;
- struct dpu_kms *dpu_kms;
-
- if (!dpu_enc) {
- DPU_ERROR("invalid dpu enc\n");
- return -EINVAL;
- }
-
- drm_enc = &dpu_enc->base;
- if (!drm_enc->dev || !drm_enc->dev->dev_private) {
- DPU_ERROR("drm device invalid\n");
- return -EINVAL;
- }
-
- priv = drm_enc->dev->dev_private;
- if (!priv->kms) {
- DPU_ERROR("invalid kms\n");
- return -EINVAL;
- }
-
- dpu_kms = to_dpu_kms(priv->kms);
-
- if (enable)
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- else
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- return 0;
-}
void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
enum dpu_intr_idx intr_idx)
@@ -488,8 +459,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
drm_encoder_cleanup(drm_enc);
mutex_destroy(&dpu_enc->enc_lock);
-
- kfree(dpu_enc);
}
void dpu_encoder_helper_split_config(
@@ -1119,28 +1088,24 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
_dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
}
-void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc = NULL;
- int i;
-
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
- dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
- for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ mutex_lock(&dpu_enc->enc_lock);
- if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
- phys->ops.restore(phys);
- }
+ if (!dpu_enc->enabled)
+ goto out;
+ if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
+ dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
_dpu_encoder_virt_enable_helper(drm_enc);
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
@@ -1154,6 +1119,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
return;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ mutex_lock(&dpu_enc->enc_lock);
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
@@ -1170,10 +1137,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
ret);
- return;
+ goto out;
}
_dpu_encoder_virt_enable_helper(drm_enc);
+
+ dpu_enc->enabled = true;
+
+out:
+ mutex_unlock(&dpu_enc->enc_lock);
}
static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
@@ -1195,11 +1167,14 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
return;
}
- mode = &drm_enc->crtc->state->adjusted_mode;
-
dpu_enc = to_dpu_encoder_virt(drm_enc);
DPU_DEBUG_ENC(dpu_enc, "\n");
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->enabled = false;
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1233,6 +1208,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
dpu_rm_release(&dpu_kms->rm, drm_enc);
+
+ mutex_unlock(&dpu_enc->enc_lock);
}
static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
@@ -1263,8 +1240,8 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
dpu_enc = to_dpu_encoder_virt(drm_enc);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- if (dpu_enc->crtc_vblank_cb)
- dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ if (dpu_enc->crtc)
+ dpu_crtc_vblank_callback(dpu_enc->crtc);
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
atomic_inc(&phy_enc->vsync_cnt);
@@ -1284,25 +1261,32 @@ static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_underrun_callback");
}
-void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
- void (*vbl_cb)(void *), void *vbl_data)
+void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
unsigned long lock_flags;
- bool enable;
- int i;
- enable = vbl_cb ? true : false;
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ /* crtc should always be cleared before re-assigning */
+ WARN_ON(crtc && dpu_enc->crtc);
+ dpu_enc->crtc = crtc;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
+ struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ int i;
- if (!drm_enc) {
- DPU_ERROR("invalid encoder\n");
- return;
- }
trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
- dpu_enc->crtc_vblank_cb = vbl_cb;
- dpu_enc->crtc_vblank_cb_data = vbl_data;
+ if (dpu_enc->crtc != crtc) {
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+ return;
+ }
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1407,8 +1391,9 @@ static void dpu_encoder_off_work(struct kthread_work *work)
* phys: Pointer to physical encoder structure
* extra_flush_bits: Additional bit mask to include in flush trigger
*/
-static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
- struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits,
+ bool async)
{
struct dpu_hw_ctl *ctl;
int pending_kickoff_cnt;
@@ -1431,7 +1416,10 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
return;
}
- pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ if (!async)
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+ else
+ pending_kickoff_cnt = atomic_read(&phys->pending_kickoff_cnt);
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
@@ -1450,7 +1438,7 @@ static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
* _dpu_encoder_trigger_start - trigger start for a physical encoder
* phys: Pointer to physical encoder structure
*/
-static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
{
if (!phys) {
DPU_ERROR("invalid argument(s)\n");
@@ -1507,7 +1495,7 @@ static int dpu_encoder_helper_wait_event_timeout(
return rc;
}
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_hw_ctl *ctl;
@@ -1527,10 +1515,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
ctl->idx);
rc = ctl->ops.reset(ctl);
- if (rc) {
+ if (rc)
DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
- dpu_dbg_dump(false, __func__, true, true);
- }
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -1544,7 +1530,8 @@ void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
* a time.
* dpu_enc: Pointer to virtual encoder structure
*/
-static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
+ bool async)
{
struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush;
@@ -1575,7 +1562,8 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
set_bit(i, dpu_enc->frame_busy_mask);
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
- _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
+ async);
else if (ctl->ops.get_pending_flush)
pending_flush |= ctl->ops.get_pending_flush(ctl);
}
@@ -1585,7 +1573,7 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
_dpu_encoder_trigger_flush(
&dpu_enc->base,
dpu_enc->cur_master,
- pending_flush);
+ pending_flush, async);
}
_dpu_encoder_trigger_start(dpu_enc->cur_master);
@@ -1769,7 +1757,7 @@ static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
}
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
- struct dpu_encoder_kickoff_params *params)
+ struct dpu_encoder_kickoff_params *params, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@@ -1803,14 +1791,12 @@ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
- phys = dpu_enc->phys_encs[i];
- if (phys && phys->ops.hw_reset)
- phys->ops.hw_reset(phys);
+ dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
}
-void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
{
struct dpu_encoder_virt *dpu_enc;
struct dpu_encoder_phys *phys;
@@ -1833,7 +1819,7 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
/* All phys encs are ready to go, trigger the kickoff */
- _dpu_encoder_kickoff_phys(dpu_enc);
+ _dpu_encoder_kickoff_phys(dpu_enc, async);
/* allow phys encs to handle any post-kickoff business */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1875,14 +1861,9 @@ void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
#ifdef CONFIG_DEBUG_FS
static int _dpu_encoder_status_show(struct seq_file *s, void *data)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = s->private;
int i;
- if (!s || !s->private)
- return -EINVAL;
-
- dpu_enc = s->private;
-
mutex_lock(&dpu_enc->enc_lock);
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1920,7 +1901,7 @@ static int _dpu_encoder_debugfs_status_open(struct inode *inode,
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms;
int i;
@@ -1934,12 +1915,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
char name[DPU_NAME_SIZE];
- if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
DPU_ERROR("invalid encoder or kms\n");
return -EINVAL;
}
- dpu_enc = to_dpu_encoder_virt(drm_enc);
priv = drm_enc->dev->dev_private;
dpu_kms = to_dpu_kms(priv->kms);
@@ -1964,26 +1944,11 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
- struct dpu_encoder_virt *dpu_enc;
-
- if (!drm_enc)
- return;
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- debugfs_remove_recursive(dpu_enc->debugfs_root);
-}
#else
static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
{
return 0;
}
-
-static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
-{
-}
#endif
static int dpu_encoder_late_register(struct drm_encoder *encoder)
@@ -1993,7 +1958,9 @@ static int dpu_encoder_late_register(struct drm_encoder *encoder)
static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
{
- _dpu_encoder_destroy_debugfs(encoder);
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
+
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
}
static int dpu_encoder_virt_add_phys_encs(
@@ -2268,6 +2235,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+ dpu_enc->enabled = false;
+
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 9dbf38f446d9..3f5dafe00580 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -55,14 +55,22 @@ void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
struct dpu_encoder_hw_resources *hw_res);
/**
- * dpu_encoder_register_vblank_callback - provide callback to encoder that
- * will be called on the next vblank.
+ * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to
* @encoder: encoder pointer
- * @cb: callback pointer, provide NULL to deregister and disable IRQs
- * @data: user data provided to callback
+ * @crtc: crtc pointer
+ */
+void dpu_encoder_assign_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc);
+
+/**
+ * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if
+ * the encoder is assigned to the given crtc
+ * @encoder: encoder pointer
+ * @crtc: crtc pointer
+ * @enable: true if vblank should be enabled
*/
-void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
- void (*cb)(void *), void *data);
+void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *encoder,
+ struct drm_crtc *crtc, bool enable);
/**
* dpu_encoder_register_frame_event_callback - provide callback to encoder that
@@ -81,9 +89,10 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
* Delayed: Block until next trigger can be issued.
* @encoder: encoder pointer
* @params: kickoff time parameters
+ * @async: true if this is an asynchronous commit
*/
void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
- struct dpu_encoder_kickoff_params *params);
+ struct dpu_encoder_kickoff_params *params, bool async);
/**
* dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
@@ -96,8 +105,9 @@ void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
* dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
* (i.e. ctl flush and start) immediately.
* @encoder: encoder pointer
+ * @async: true if this is an asynchronous commit
*/
-void dpu_encoder_kickoff(struct drm_encoder *encoder);
+void dpu_encoder_kickoff(struct drm_encoder *encoder, bool async);
/**
* dpu_encoder_wait_for_event - Waits for encoder events
@@ -126,10 +136,10 @@ int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
/**
- * dpu_encoder_virt_restore - restore the encoder configs
+ * dpu_encoder_virt_runtime_resume - pm runtime resume the encoder configs
* @encoder: encoder pointer
*/
-void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+void dpu_encoder_virt_runtime_resume(struct drm_encoder *encoder);
/**
* dpu_encoder_init - initialize virtual encoder object
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 964efcc757a4..44e6f8b68e70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -114,8 +114,6 @@ struct dpu_encoder_virt_ops {
* @handle_post_kickoff: Do any work necessary post-kickoff work
* @trigger_start: Process start event on physical encoder
* @needs_single_flush: Whether encoder slaves need to be flushed
- * @hw_reset: Issue HW recovery such as CTL reset and clear
- * DPU_ENC_ERR_NEEDS_HW_RESET state
* @irq_control: Handler to enable/disable all the encoder IRQs
* @prepare_idle_pc: phys encoder can update the vsync_enable status
* on idle power collapse prepare
@@ -151,7 +149,6 @@ struct dpu_encoder_phys_ops {
void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
- void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
void (*restore)(struct dpu_encoder_phys *phys);
@@ -342,15 +339,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
*/
void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
-/**
- * dpu_encoder_helper_hw_reset - issue ctl hw reset
- * This helper function may be optionally specified by physical
- * encoders if they require ctl hw reset. If state is currently
- * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
- * @phys_enc: Pointer to physical encoder structure
- */
-void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
-
static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
struct dpu_encoder_phys *phys_enc)
{
@@ -362,7 +350,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- dpu_crtc_state_is_stereo(dpu_cstate))
+ dpu_cstate->num_mixers == CRTC_DUAL_MIXERS)
return BLEND_3D_H_ROW_INT;
return BLEND_3D_NONE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index b2d7f0ded24c..99ab5ca9bed3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -44,14 +44,7 @@
#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
-static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
- struct dpu_encoder_phys_cmd *cmd_enc)
-{
- return KICKOFF_TIMEOUT_MS;
-}
-
-static inline bool dpu_encoder_phys_cmd_is_master(
- struct dpu_encoder_phys *phys_enc)
+static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
}
@@ -243,7 +236,6 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
atomic_read(&phys_enc->pending_kickoff_cnt));
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
- dpu_dbg_dump(false, __func__, true, true);
}
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -496,14 +488,11 @@ static void dpu_encoder_phys_cmd_enable_helper(
_dpu_encoder_phys_cmd_pingpong_config(phys_enc);
if (!dpu_encoder_phys_cmd_is_master(phys_enc))
- goto skip_flush;
+ return;
ctl = phys_enc->hw_ctl;
ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
-
-skip_flush:
- return;
}
static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
@@ -727,7 +716,7 @@ static int dpu_encoder_phys_cmd_wait_for_vblank(
wait_info.wq = &cmd_enc->pending_vblank_wq;
wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
- wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
atomic_inc(&cmd_enc->pending_vblank_cnt);
@@ -776,7 +765,6 @@ static void dpu_encoder_phys_cmd_init_ops(
ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->irq_control = dpu_encoder_phys_cmd_irq_control;
ops->restore = dpu_encoder_phys_cmd_enable_helper;
ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
@@ -798,7 +786,7 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
if (!cmd_enc) {
ret = -ENOMEM;
DPU_ERROR("failed to allocate\n");
- goto fail;
+ return ERR_PTR(ret);
}
phys_enc = &cmd_enc->base;
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
@@ -856,6 +844,5 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
return phys_enc;
-fail:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 84de385a9f62..acdab5b0db18 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -110,7 +110,7 @@ static void drm_mode_to_intf_timing_params(
*/
}
-static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+static u32 get_horizontal_total(const struct intf_timing_params *timing)
{
u32 active = timing->xres;
u32 inactive =
@@ -119,7 +119,7 @@ static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
return active + inactive;
}
-static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+static u32 get_vertical_total(const struct intf_timing_params *timing)
{
u32 active = timing->yres;
u32 inactive =
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -613,7 +613,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
- dpu_dbg_dump(false, __func__, true, true);
}
}
@@ -766,7 +765,6 @@ static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
- ops->hw_reset = dpu_encoder_helper_hw_reset;
ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index bfcd165e96df..0874f0a53bf9 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
- true, 4, 0,
+ false, 4, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
@@ -921,7 +921,7 @@ static int _dpu_format_populate_addrs_ubwc(
+ layout->plane_size[2] + layout->plane_size[3];
if (!meta)
- goto done;
+ return 0;
/* configure Y metadata plane */
layout->plane_addr[2] = base_addr;
@@ -952,12 +952,11 @@ static int _dpu_format_populate_addrs_ubwc(
layout->plane_addr[1] = 0;
if (!meta)
- goto done;
+ return 0;
layout->plane_addr[2] = base_addr;
layout->plane_addr[3] = 0;
}
-done:
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
index 58d29e43faef..92f1c4241b9a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -30,16 +30,10 @@ static LIST_HEAD(dpu_hw_blk_list);
* @type: hw block type - enum dpu_hw_blk_type
* @id: instance id of the hw block
* @ops: Pointer to block operations
- * return: 0 if success; error code otherwise
*/
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops)
{
- if (!hw_blk) {
- pr_err("invalid parameters\n");
- return -EINVAL;
- }
-
INIT_LIST_HEAD(&hw_blk->list);
hw_blk->type = type;
hw_blk->id = id;
@@ -51,8 +45,6 @@ int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
mutex_lock(&dpu_hw_blk_lock);
list_add(&hw_blk->list, &dpu_hw_blk_list);
mutex_unlock(&dpu_hw_blk_lock);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
index 0f4ca8af1ec5..1934c2f7e8fa 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -44,7 +44,7 @@ struct dpu_hw_blk {
struct dpu_hw_blk_ops ops;
};
-int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+void dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
struct dpu_hw_blk_ops *ops);
void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index dc060e7358e4..144358a3d0fb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -736,13 +736,4 @@ struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
*/
void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
-/**
- * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
- * @cfg: pointer to sspp cfg
- */
-static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
-{
- return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
- test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
-}
#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index eec1051f2afc..1068b4b7940f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -13,8 +13,8 @@
#include <linux/delay.h>
#include "dpu_hwio.h"
#include "dpu_hw_ctl.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
+#include "dpu_trace.h"
#define CTL_LAYER(lm) \
(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
@@ -72,24 +72,39 @@ static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
return stages;
}
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
}
static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
}
static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
{
+ trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
ctx->pending_flush_mask = 0x0;
}
static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
u32 flushbits)
{
+ trace_dpu_hw_ctl_update_pending_flush(flushbits,
+ ctx->pending_flush_mask);
ctx->pending_flush_mask |= flushbits;
}
@@ -103,18 +118,12 @@ static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
{
-
+ trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
+ dpu_hw_ctl_get_flush_register(ctx));
DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
}
-static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
-{
- struct dpu_hw_blk_reg_map *c = &ctx->hw;
-
- return DPU_REG_READ(c, CTL_FLUSH);
-}
-
-static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
enum dpu_sspp sspp)
{
uint32_t flushbits = 0;
@@ -169,7 +178,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
return flushbits;
}
-static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
enum dpu_lm lm)
{
uint32_t flushbits = 0;
@@ -202,7 +211,7 @@ static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
return flushbits;
}
-static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
u32 *flushbits, enum dpu_intf intf)
{
switch (intf) {
@@ -474,10 +483,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
void __iomem *addr,
@@ -485,7 +491,6 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
{
struct dpu_hw_ctl *c;
struct dpu_ctl_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -504,18 +509,9 @@ struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
c->mixer_count = m->mixer_count;
c->mixer_hw_caps = m->mixer;
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
index 9c6bba0ac7c3..f6a83daa385b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define INTF_TIMING_ENGINE_EN 0x000
@@ -265,10 +264,7 @@ static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
ops->get_line_count = dpu_hw_intf_get_line_count;
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
void __iomem *addr,
@@ -276,7 +272,6 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
{
struct dpu_hw_intf *c;
struct dpu_intf_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -297,18 +292,9 @@ struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
c->mdss = m;
_setup_intf_ops(&c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
index 3b77df460dea..a2b0dbc23058 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -92,16 +92,6 @@ struct dpu_hw_intf {
};
/**
- * to_dpu_hw_intf - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_intf, base);
-}
-
-/**
* dpu_hw_intf_init(): Initializes the intf driver for the passed
* interface idx.
* @idx: interface index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index acb8dc8acaa5..018df2c3b7ed 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -15,7 +15,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_mdss.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define LM_OP_MODE 0x00
@@ -64,16 +63,10 @@ static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
{
const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
- int rc;
+ if (stage != DPU_STAGE_BASE && stage <= sblk->maxblendstages)
+ return sblk->blendstage_base[stage - DPU_STAGE_0];
- if (stage == DPU_STAGE_BASE)
- rc = -EINVAL;
- else if (stage <= sblk->maxblendstages)
- rc = sblk->blendstage_base[stage - DPU_STAGE_0];
- else
- rc = -EINVAL;
-
- return rc;
+ return -EINVAL;
}
static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
@@ -163,11 +156,6 @@ static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
}
-static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
- void *cfg)
-{
-}
-
static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
struct dpu_hw_lm_ops *ops,
unsigned long features)
@@ -179,13 +167,9 @@ static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
- ops->setup_gc = dpu_hw_lm_gc;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
void __iomem *addr,
@@ -193,7 +177,6 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
{
struct dpu_hw_mixer *c;
struct dpu_lm_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -210,18 +193,9 @@ struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
c->cap = cfg;
_setup_mixer_ops(m, &c->ops, c->cap->features);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 5b036aca8340..6aee839a6a23 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -61,11 +61,6 @@ struct dpu_hw_lm_ops {
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
- /**
- * setup_gc : enable/disable gamma correction feature
- */
- void (*setup_gc)(struct dpu_hw_mixer *mixer,
- void *cfg);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
index cc3a623903f4..3bdf47ed1845 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -16,7 +16,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_pingpong.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#include "dpu_trace.h"
@@ -177,7 +176,7 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
if (height < init)
- goto line_count_exit;
+ return line;
line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
@@ -186,7 +185,6 @@ static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
else
line -= init;
-line_count_exit:
return line;
}
@@ -201,10 +199,7 @@ static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
ops->get_line_count = dpu_hw_pp_get_line_count;
};
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
void __iomem *addr,
@@ -212,7 +207,6 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
{
struct dpu_hw_pingpong *c;
struct dpu_pingpong_cfg *cfg;
- int rc;
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
@@ -228,18 +222,9 @@ struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
c->caps = cfg;
_setup_pingpong_ops(&c->ops, c->caps);
- rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
return c;
-
-blk_init_error:
- kzfree(c);
-
- return ERR_PTR(rc);
}
void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
index 3caccd7d6a3e..0e02e43cee14 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -105,16 +105,6 @@ struct dpu_hw_pingpong {
};
/**
- * dpu_hw_pingpong - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pingpong, base);
-}
-
-/**
* dpu_hw_pingpong_init - initializes the pingpong driver for the passed
* pingpong idx.
* @idx: Pingpong index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index c25b52a6b219..e9132bf5166b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -14,7 +14,6 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_sspp.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
@@ -141,7 +140,7 @@
/* traffic shaper clock in Hz */
#define TS_CLK 19200000
-static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
int s_id,
u32 *idx)
{
@@ -662,7 +661,8 @@ static void _setup_layer_ops(struct dpu_hw_pipe *c,
test_bit(DPU_SSPP_CSC_10BIT, &features))
c->ops.setup_csc = dpu_hw_sspp_setup_csc;
- if (dpu_hw_sspp_multirect_enabled(c->cap))
+ if (test_bit(DPU_SSPP_SMART_DMA_V1, &c->cap->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &c->cap->features))
c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
@@ -697,10 +697,7 @@ static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
return ERR_PTR(-ENOMEM);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
void __iomem *addr, struct dpu_mdss_cfg *catalog,
@@ -708,7 +705,6 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
{
struct dpu_hw_pipe *hw_pipe;
struct dpu_sspp_cfg *cfg;
- int rc;
if (!addr || !catalog)
return ERR_PTR(-EINVAL);
@@ -730,18 +726,9 @@ struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
hw_pipe->cap = cfg;
_setup_layer_ops(hw_pipe, hw_pipe->cap->features);
- rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
+ dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
return hw_pipe;
-
-blk_init_error:
- kzfree(hw_pipe);
-
- return ERR_PTR(rc);
}
void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index 4d81e5f5ce1b..119b4e1c16be 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -392,16 +392,6 @@ struct dpu_hw_pipe {
};
/**
- * dpu_hw_pipe - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_pipe, base);
-}
-
-/**
* dpu_hw_sspp_init - initializes the sspp hw driver object.
* Should be called once before accessing every pipe.
* @idx: Pipe index for which driver object is required
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index b8781256e21b..a041597bb849 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_top.h"
-#include "dpu_dbg.h"
#include "dpu_kms.h"
#define SSPP_SPARE 0x28
@@ -322,10 +321,7 @@ static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
return ERR_PTR(-EINVAL);
}
-static struct dpu_hw_blk_ops dpu_hw_ops = {
- .start = NULL,
- .stop = NULL,
-};
+static struct dpu_hw_blk_ops dpu_hw_ops;
struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
void __iomem *addr,
@@ -333,7 +329,6 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
{
struct dpu_hw_mdp *mdp;
const struct dpu_mdp_cfg *cfg;
- int rc;
if (!addr || !m)
return ERR_PTR(-EINVAL);
@@ -355,20 +350,9 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
mdp->caps = cfg;
_setup_mdp_ops(&mdp->ops, mdp->caps->features);
- rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
- if (rc) {
- DPU_ERROR("failed to init hw blk %d\n", rc);
- goto blk_init_error;
- }
-
- dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+ dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
return mdp;
-
-blk_init_error:
- kzfree(mdp);
-
- return ERR_PTR(rc);
}
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 192e338f20bb..aa21fd834398 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -161,16 +161,6 @@ struct dpu_hw_mdp {
};
/**
- * to_dpu_hw_mdp - convert base object dpu_hw_base to container
- * @hw: Pointer to base hardware block
- * return: Pointer to hardware block container
- */
-static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
-{
- return container_of(hw, struct dpu_hw_mdp, base);
-}
-
-/**
* dpu_hw_mdptop_init - initializes the top driver for the passed idx
* @idx: Interface index for which driver object is required
* @addr: Mapped register io address of MDP
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
index d43905525f92..38bfd222ed72 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -13,7 +13,6 @@
#include "dpu_hwio.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_vbif.h"
-#include "dpu_dbg.h"
#define VBIF_VERSION 0x0000
#define VBIF_CLK_FORCE_CTRL0 0x0008
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index b557687b1964..78833c2c27f8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -16,6 +16,8 @@
#include <linux/err.h>
#include <linux/delay.h>
+#include <drm/drm_print.h>
+
#include "dpu_io_util.h"
void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
@@ -164,7 +166,7 @@ int msm_dss_parse_clock(struct platform_device *pdev,
"clock-names", i,
&clock_name);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock name for %d\n",
i);
break;
}
@@ -176,13 +178,13 @@ int msm_dss_parse_clock(struct platform_device *pdev,
rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
if (rc) {
- dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to get clock refs %d\n", rc);
goto err;
}
rc = of_clk_set_defaults(pdev->dev.of_node, false);
if (rc) {
- dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ DRM_DEV_ERROR(&pdev->dev, "Failed to set clock defaults %d\n", rc);
goto err;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
deleted file mode 100644
index d5e6ce0140cf..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_irq.h"
-#include "dpu_core_irq.h"
-
-irqreturn_t dpu_irq(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- return dpu_core_irq(dpu_kms);
-}
-
-void dpu_irq_preinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!dpu_kms->dev || !dpu_kms->dev->dev) {
- pr_err("invalid device handles\n");
- return;
- }
-
- dpu_core_irq_preinstall(dpu_kms);
-}
-
-int dpu_irq_postinstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- int rc;
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return -EINVAL;
- }
-
- rc = dpu_core_irq_postinstall(dpu_kms);
-
- return rc;
-}
-
-void dpu_irq_uninstall(struct msm_kms *kms)
-{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
-
- if (!kms) {
- DPU_ERROR("invalid parameters\n");
- return;
- }
-
- dpu_core_irq_uninstall(dpu_kms);
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
deleted file mode 100644
index 3e147f7176e2..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DPU_IRQ_H__
-#define __DPU_IRQ_H__
-
-#include <linux/kernel.h>
-#include <linux/irqdomain.h>
-
-#include "msm_kms.h"
-
-/**
- * dpu_irq_controller - define MDSS level interrupt controller context
- * @enabled_mask: enable status of MDSS level interrupt
- * @domain: interrupt domain of this controller
- */
-struct dpu_irq_controller {
- unsigned long enabled_mask;
- struct irq_domain *domain;
-};
-
-/**
- * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: none
- */
-void dpu_irq_preinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
- * @kms: pointer to kms context
- * @return: 0 if success; error code otherwise
- */
-int dpu_irq_postinstall(struct msm_kms *kms);
-
-/**
- * dpu_irq_uninstall - uninstall MDSS IRQ handler
- * @drm_dev: pointer to kms context
- * @return: none
- */
-void dpu_irq_uninstall(struct msm_kms *kms);
-
-/**
- * dpu_irq - MDSS level IRQ handler
- * @kms: pointer to kms context
- * @return: interrupt handling status
- */
-irqreturn_t dpu_irq(struct msm_kms *kms);
-
-#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 0a683e65a9f3..4d67b3c96702 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -81,7 +81,7 @@ static int _dpu_danger_signal_status(struct seq_file *s,
struct dpu_danger_safe_status status;
int i;
- if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ if (!kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
DPU_ERROR("invalid arg(s)\n");
return 0;
}
@@ -138,46 +138,29 @@ static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
}
DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
-static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_danger);
- dpu_kms->debugfs_danger = NULL;
-}
-
-static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
struct dentry *parent)
{
- dpu_kms->debugfs_danger = debugfs_create_dir("danger",
- parent);
- if (!dpu_kms->debugfs_danger) {
- DPU_ERROR("failed to create danger debugfs\n");
- return -EINVAL;
- }
+ struct dentry *entry = debugfs_create_dir("danger", parent);
+ if (IS_ERR_OR_NULL(entry))
+ return;
- debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("danger_status", 0600, entry,
dpu_kms, &dpu_debugfs_danger_stats_fops);
- debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ debugfs_create_file("safe_status", 0600, entry,
dpu_kms, &dpu_debugfs_safe_stats_fops);
-
- return 0;
}
static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
{
- struct dpu_debugfs_regset32 *regset;
- struct dpu_kms *dpu_kms;
+ struct dpu_debugfs_regset32 *regset = s->private;
+ struct dpu_kms *dpu_kms = regset->dpu_kms;
struct drm_device *dev;
struct msm_drm_private *priv;
void __iomem *base;
uint32_t i, addr;
- if (!s || !s->private)
- return 0;
-
- regset = s->private;
-
- dpu_kms = regset->dpu_kms;
- if (!dpu_kms || !dpu_kms->mmio)
+ if (!dpu_kms->mmio)
return 0;
dev = dpu_kms->dev;
@@ -250,57 +233,24 @@ void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
{
- void *p;
- int rc;
-
- p = dpu_hw_util_get_log_mask_ptr();
+ void *p = dpu_hw_util_get_log_mask_ptr();
+ struct dentry *entry;
- if (!dpu_kms || !p)
+ if (!p)
return -EINVAL;
- dpu_kms->debugfs_root = debugfs_create_dir("debug",
- dpu_kms->dev->primary->debugfs_root);
- if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
- DRM_ERROR("debugfs create_dir failed %ld\n",
- PTR_ERR(dpu_kms->debugfs_root));
- return PTR_ERR(dpu_kms->debugfs_root);
- }
-
- rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
- if (rc) {
- DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
- return rc;
- }
+ entry = debugfs_create_dir("debug", dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(entry))
+ return -ENODEV;
/* allow root to be NULL */
- debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
-
- (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
- (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
-
- rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
- if (rc) {
- DPU_ERROR("failed to init perf %d\n", rc);
- return rc;
- }
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
- return 0;
-}
+ dpu_debugfs_danger_init(dpu_kms, entry);
+ dpu_debugfs_vbif_init(dpu_kms, entry);
+ dpu_debugfs_core_irq_init(dpu_kms, entry);
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
- /* don't need to NULL check debugfs_root */
- if (dpu_kms) {
- dpu_debugfs_vbif_destroy(dpu_kms);
- dpu_debugfs_danger_destroy(dpu_kms);
- dpu_debugfs_core_irq_destroy(dpu_kms);
- debugfs_remove_recursive(dpu_kms->debugfs_root);
- }
-}
-#else
-static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
-{
+ return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
#endif
@@ -320,7 +270,10 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
struct dpu_kms *dpu_kms;
struct msm_drm_private *priv;
struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_encoder *encoder;
+ int i;
if (!kms)
return;
@@ -332,9 +285,13 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
priv = dev->dev_private;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
- if (encoder->crtc != NULL)
+ /* Call prepare_commit for all affected encoders */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ drm_for_each_encoder_mask(encoder, crtc->dev,
+ crtc_state->encoder_mask) {
dpu_encoder_prepare_commit(encoder);
+ }
+ }
}
/*
@@ -344,15 +301,20 @@ static void dpu_kms_prepare_commit(struct msm_kms *kms,
void dpu_kms_encoder_enable(struct drm_encoder *encoder)
{
const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
- struct drm_crtc *crtc = encoder->crtc;
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc;
/* Forward this enable call to the commit hook */
if (funcs && funcs->commit)
funcs->commit(encoder);
- if (crtc && crtc->state->active) {
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_for_each_crtc(crtc, dev) {
+ if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
+ continue;
+
trace_dpu_kms_enc_enable(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
+ dpu_crtc_commit_kickoff(crtc, false);
}
}
@@ -369,7 +331,8 @@ static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
if (crtc->state->active) {
trace_dpu_kms_commit(DRMID(crtc));
- dpu_crtc_commit_kickoff(crtc);
+ dpu_crtc_commit_kickoff(crtc,
+ state->legacy_cursor_update);
}
}
}
@@ -613,22 +576,7 @@ fail:
#ifdef CONFIG_DEBUG_FS
static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- struct dpu_kms *dpu_kms = to_dpu_kms(kms);
- struct drm_device *dev;
- int rc;
-
- if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
- DPU_ERROR("invalid dpu_kms\n");
- return -EINVAL;
- }
-
- dev = dpu_kms->dev;
-
- rc = _dpu_debugfs_init(dpu_kms);
- if (rc)
- DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
-
- return rc;
+ return _dpu_debugfs_init(to_dpu_kms(kms));
}
#endif
@@ -651,12 +599,7 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_intr_destroy(dpu_kms->hw_intr);
dpu_kms->hw_intr = NULL;
- if (dpu_kms->power_event)
- dpu_power_handle_unregister_event(
- &dpu_kms->phandle, dpu_kms->power_event);
-
/* safe to call these more than once during shutdown */
- _dpu_debugfs_destroy(dpu_kms);
_dpu_kms_mmu_destroy(dpu_kms);
if (dpu_kms->catalog) {
@@ -676,11 +619,6 @@ static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
dpu_hw_catalog_deinit(dpu_kms->catalog);
dpu_kms->catalog = NULL;
- if (dpu_kms->core_client)
- dpu_power_client_destroy(&dpu_kms->phandle,
- dpu_kms->core_client);
- dpu_kms->core_client = NULL;
-
if (dpu_kms->vbif[VBIF_NRT])
devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
dpu_kms->vbif[VBIF_NRT] = NULL;
@@ -705,131 +643,9 @@ static void dpu_kms_destroy(struct msm_kms *kms)
dpu_kms = to_dpu_kms(kms);
- dpu_dbg_destroy();
_dpu_kms_hw_destroy(dpu_kms);
}
-static int dpu_kms_pm_suspend(struct device *dev)
-{
- struct drm_device *ddev;
- struct drm_modeset_acquire_ctx ctx;
- struct drm_atomic_state *state;
- struct dpu_kms *dpu_kms;
- int ret = 0, num_crtcs = 0;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- /* disable hot-plug polling */
- drm_kms_helper_poll_disable(ddev);
-
- /* acquire modeset lock(s) */
- drm_modeset_acquire_init(&ctx, 0);
-
-retry:
- DPU_ATRACE_BEGIN("kms_pm_suspend");
-
- ret = drm_modeset_lock_all_ctx(ddev, &ctx);
- if (ret)
- goto unlock;
-
- /* save current state for resume */
- if (dpu_kms->suspend_state)
- drm_atomic_state_put(dpu_kms->suspend_state);
- dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
- if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
- DRM_ERROR("failed to back up suspend state\n");
- dpu_kms->suspend_state = NULL;
- goto unlock;
- }
-
- /* create atomic state to disable all CRTCs */
- state = drm_atomic_state_alloc(ddev);
- if (IS_ERR_OR_NULL(state)) {
- DRM_ERROR("failed to allocate crtc disable state\n");
- goto unlock;
- }
-
- state->acquire_ctx = &ctx;
-
- /* check for nothing to do */
- if (num_crtcs == 0) {
- DRM_DEBUG("all crtcs are already in the off state\n");
- drm_atomic_state_put(state);
- goto suspended;
- }
-
- /* commit the "disable all" state */
- ret = drm_atomic_commit(state);
- if (ret < 0) {
- DRM_ERROR("failed to disable crtcs, %d\n", ret);
- drm_atomic_state_put(state);
- goto unlock;
- }
-
-suspended:
- dpu_kms->suspend_block = true;
-
-unlock:
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- }
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- DPU_ATRACE_END("kms_pm_suspend");
- return 0;
-}
-
-static int dpu_kms_pm_resume(struct device *dev)
-{
- struct drm_device *ddev;
- struct dpu_kms *dpu_kms;
- int ret;
-
- if (!dev)
- return -EINVAL;
-
- ddev = dev_get_drvdata(dev);
- if (!ddev || !ddev_to_msm_kms(ddev))
- return -EINVAL;
-
- dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
-
- DPU_ATRACE_BEGIN("kms_pm_resume");
-
- drm_mode_config_reset(ddev);
-
- drm_modeset_lock_all(ddev);
-
- dpu_kms->suspend_block = false;
-
- if (dpu_kms->suspend_state) {
- dpu_kms->suspend_state->acquire_ctx =
- ddev->mode_config.acquire_ctx;
- ret = drm_atomic_commit(dpu_kms->suspend_state);
- if (ret < 0) {
- DRM_ERROR("failed to restore state, %d\n", ret);
- drm_atomic_state_put(dpu_kms->suspend_state);
- }
- dpu_kms->suspend_state = NULL;
- }
- drm_modeset_unlock_all(ddev);
-
- /* enable hot-plug polling */
- drm_kms_helper_poll_enable(ddev);
-
- DPU_ATRACE_END("kms_pm_resume");
- return 0;
-}
-
static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode)
@@ -858,10 +674,30 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
encoder->base.id, rc);
}
+static irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+static void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+static void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
+
static const struct msm_kms_funcs kms_funcs = {
.hw_init = dpu_kms_hw_init,
.irq_preinstall = dpu_irq_preinstall,
- .irq_postinstall = dpu_irq_postinstall,
.irq_uninstall = dpu_irq_uninstall,
.irq = dpu_irq,
.prepare_commit = dpu_kms_prepare_commit,
@@ -873,8 +709,6 @@ static const struct msm_kms_funcs kms_funcs = {
.check_modified_format = dpu_format_check_modified_format,
.get_format = dpu_get_msm_format,
.round_pixclk = dpu_kms_round_pixclk,
- .pm_suspend = dpu_kms_pm_suspend,
- .pm_resume = dpu_kms_pm_resume,
.destroy = dpu_kms_destroy,
.set_encoder_mode = _dpu_kms_set_encoder_mode,
#ifdef CONFIG_DEBUG_FS
@@ -882,12 +716,6 @@ static const struct msm_kms_funcs kms_funcs = {
#endif
};
-/* the caller api needs to turn on clock before calling it */
-static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
-{
- dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
-}
-
static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
{
struct msm_mmu *mmu;
@@ -911,6 +739,9 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
if (!domain)
return 0;
+ domain->geometry.aperture_start = 0x1000;
+ domain->geometry.aperture_end = 0xffffffff;
+
aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
domain, "dpu1");
if (IS_ERR(aspace)) {
@@ -960,16 +791,6 @@ u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
return clk_get_rate(clk->clk);
}
-static void dpu_kms_handle_power_event(u32 event_type, void *usr)
-{
- struct dpu_kms *dpu_kms = usr;
-
- if (!dpu_kms)
- return;
-
- dpu_vbif_init_memtypes(dpu_kms);
-}
-
static int dpu_kms_hw_init(struct msm_kms *kms)
{
struct dpu_kms *dpu_kms;
@@ -979,26 +800,20 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
if (!kms) {
DPU_ERROR("invalid kms\n");
- goto end;
+ return rc;
}
dpu_kms = to_dpu_kms(kms);
dev = dpu_kms->dev;
if (!dev) {
DPU_ERROR("invalid device\n");
- goto end;
- }
-
- rc = dpu_dbg_init(&dpu_kms->pdev->dev);
- if (rc) {
- DRM_ERROR("failed to init dpu dbg: %d\n", rc);
- goto end;
+ return rc;
}
priv = dev->dev_private;
if (!priv) {
DPU_ERROR("invalid private data\n");
- goto dbg_destroy;
+ return rc;
}
dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
@@ -1036,20 +851,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
}
- dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
- "core");
- if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
- rc = PTR_ERR(dpu_kms->core_client);
- if (!dpu_kms->core_client)
- rc = -EINVAL;
- DPU_ERROR("dpu power client create failed: %d\n", rc);
- dpu_kms->core_client = NULL;
- goto error;
- }
-
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_kms_core_hw_rev_init(dpu_kms);
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
@@ -1063,8 +867,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto power_error;
}
- dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
-
/*
* Now we need to read the HW catalog and initialize resources such as
* clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
@@ -1110,7 +912,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
}
rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
- &dpu_kms->phandle,
_dpu_kms_get_clk(dpu_kms, "core"));
if (rc) {
DPU_ERROR("failed to init perf %d\n", rc);
@@ -1151,13 +952,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- /*
- * Handle (re)initializations during power enable
- */
- dpu_kms_handle_power_event(DPU_POWER_EVENT_ENABLE, dpu_kms);
- dpu_kms->power_event = dpu_power_handle_register_event(
- &dpu_kms->phandle, DPU_POWER_EVENT_ENABLE,
- dpu_kms_handle_power_event, dpu_kms, "kms");
+ dpu_vbif_init_memtypes(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
@@ -1171,9 +966,7 @@ power_error:
pm_runtime_put_sync(&dpu_kms->pdev->dev);
error:
_dpu_kms_hw_destroy(dpu_kms);
-dbg_destroy:
- dpu_dbg_destroy();
-end:
+
return rc;
}
@@ -1221,8 +1014,6 @@ static int dpu_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dpu_power_resource_init(pdev, &dpu_kms->phandle);
-
platform_set_drvdata(pdev, dpu_kms);
msm_kms_init(&dpu_kms->base, &kms_funcs);
@@ -1242,7 +1033,6 @@ static void dpu_unbind(struct device *dev, struct device *master, void *data)
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
struct dss_module_power *mp = &dpu_kms->mp;
- dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
mp->num_clk = 0;
@@ -1278,19 +1068,13 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, false);
- if (rc)
- DPU_ERROR("resource disable failed: %d\n", rc);
-
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
if (rc)
DPU_ERROR("clock disable failed rc:%d\n", rc);
-exit:
return rc;
}
@@ -1299,27 +1083,27 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
int rc = -1;
struct platform_device *pdev = to_platform_device(dev);
struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_encoder *encoder;
struct drm_device *ddev;
struct dss_module_power *mp = &dpu_kms->mp;
ddev = dpu_kms->dev;
if (!ddev) {
DPU_ERROR("invalid drm_device\n");
- goto exit;
+ return rc;
}
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {
DPU_ERROR("clock enable failed rc:%d\n", rc);
- goto exit;
+ return rc;
}
- rc = dpu_power_resource_enable(&dpu_kms->phandle,
- dpu_kms->core_client, true);
- if (rc)
- DPU_ERROR("resource enable failed: %d\n", rc);
+ dpu_vbif_init_memtypes(dpu_kms);
+
+ drm_for_each_encoder(encoder, ddev)
+ dpu_encoder_virt_runtime_resume(encoder);
-exit:
return rc;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 66d466628e2b..ac75cfc267f4 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -23,15 +23,13 @@
#include "msm_kms.h"
#include "msm_mmu.h"
#include "msm_gem.h"
-#include "dpu_dbg.h"
#include "dpu_hw_catalog.h"
#include "dpu_hw_ctl.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_top.h"
+#include "dpu_io_util.h"
#include "dpu_rm.h"
-#include "dpu_power_handle.h"
-#include "dpu_irq.h"
#include "dpu_core_perf.h"
#define DRMID(x) ((x) ? (x)->base.id : -1)
@@ -104,7 +102,6 @@ struct dpu_irq {
atomic_t *enable_counts;
atomic_t *irq_counts;
spinlock_t cb_lock;
- struct dentry *debugfs_file;
};
struct dpu_kms {
@@ -113,15 +110,6 @@ struct dpu_kms {
int core_rev;
struct dpu_mdss_cfg *catalog;
- struct dpu_power_handle phandle;
- struct dpu_power_client *core_client;
- struct dpu_power_event *power_event;
-
- /* directory entry for debugfs */
- struct dentry *debugfs_root;
- struct dentry *debugfs_danger;
- struct dentry *debugfs_vbif;
-
/* io/register spaces: */
void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
@@ -135,10 +123,6 @@ struct dpu_kms {
struct dpu_core_perf perf;
- /* saved atomic state during system suspend */
- struct drm_atomic_state *suspend_state;
- bool suspend_block;
-
struct dpu_rm rm;
bool rm_init;
@@ -164,33 +148,6 @@ struct vsync_info {
((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
/**
- * dpu_kms_is_suspend_state - whether or not the system is pm suspended
- * @dev: Pointer to drm device
- * Return: Suspend status
- */
-static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
-{
- if (!ddev_to_msm_kms(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
-}
-
-/**
- * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
- * suspend status
- * @dev: Pointer to drm device
- * Return: True if commits should be rejected due to pm suspend
- */
-static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
-{
- if (!dpu_kms_is_suspend_state(dev))
- return false;
-
- return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
-}
-
-/**
* Debugfs functions - extra helper functions for debugfs support
*
* Main debugfs documentation is located at,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 2235ef8129f4..cb307a2abf06 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -9,6 +9,11 @@
#define HW_INTR_STATUS 0x0010
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@@ -115,13 +120,12 @@ static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
return 0;
}
-static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+static void _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
{
if (dpu_mdss->irq_controller.domain) {
irq_domain_remove(dpu_mdss->irq_controller.domain);
dpu_mdss->irq_controller.domain = NULL;
}
- return 0;
}
static int dpu_mdss_enable(struct msm_mdss *mdss)
{
@@ -156,18 +160,16 @@ static void dpu_mdss_destroy(struct drm_device *dev)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
+ pm_runtime_suspend(dev->dev);
+ pm_runtime_disable(dev->dev);
_dpu_mdss_irq_domain_fini(dpu_mdss);
-
free_irq(platform_get_irq(pdev, 0), dpu_mdss);
-
msm_dss_put_clk(mp->clk_config, mp->num_clk);
devm_kfree(&pdev->dev, mp->clk_config);
if (dpu_mdss->mmio)
devm_iounmap(&pdev->dev, dpu_mdss->mmio);
dpu_mdss->mmio = NULL;
-
- pm_runtime_disable(dev->dev);
priv->mdss = NULL;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index f549daf30fe6..fd75870eb17f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -137,7 +137,7 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* @src_wdith: width of source buffer
* Return: fill level corresponding to the source buffer/format or 0 if error
*/
-static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
const struct dpu_format *fmt, u32 src_width)
{
struct dpu_plane *pdpu, *tmp;
@@ -430,24 +430,14 @@ static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
}
-/**
- * _dpu_plane_get_aspace: gets the address space
- */
-static inline struct msm_gem_address_space *_dpu_plane_get_aspace(
- struct dpu_plane *pdpu)
-{
- struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
-
- return kms->base.aspace;
-}
-
-static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+static void _dpu_plane_set_scanout(struct drm_plane *plane,
struct dpu_plane_state *pstate,
struct dpu_hw_pipe_cfg *pipe_cfg,
struct drm_framebuffer *fb)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
+ struct msm_gem_address_space *aspace = kms->base.aspace;
int ret;
ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
@@ -525,7 +515,7 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
-static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
{
static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
{
@@ -801,7 +791,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
struct dma_fence *fence;
- struct msm_gem_address_space *aspace = _dpu_plane_get_aspace(pdpu);
+ struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
int ret;
if (!new_state->fb)
@@ -810,7 +800,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
/* cache aspace */
- pstate->aspace = aspace;
+ pstate->aspace = kms->base.aspace;
/*
* TODO: Need to sort out the msm_framebuffer_prepare() call below so
@@ -1179,8 +1169,6 @@ static void dpu_plane_destroy(struct drm_plane *plane)
mutex_destroy(&pdpu->lock);
- drm_plane_helper_disable(plane, NULL);
-
/* this will destroy the states as well */
drm_plane_cleanup(plane);
@@ -1193,19 +1181,8 @@ static void dpu_plane_destroy(struct drm_plane *plane)
static void dpu_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct dpu_plane_state *pstate;
-
- if (!plane || !state) {
- DPU_ERROR("invalid arg(s), plane %d state %d\n",
- plane != 0, state != 0);
- return;
- }
-
- pstate = to_dpu_plane_state(state);
-
__drm_atomic_helper_plane_destroy_state(state);
-
- kfree(pstate);
+ kfree(to_dpu_plane_state(state));
}
static struct drm_plane_state *
@@ -1275,26 +1252,12 @@ static ssize_t _dpu_plane_danger_read(struct file *file,
char __user *buff, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
- int len = 0;
- char buf[40] = {'\0'};
-
- if (!cfg)
- return -ENODEV;
-
- if (*ppos)
- return 0; /* the end */
+ int len;
+ char buf[40];
- len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- if (len < 0 || len >= sizeof(buf))
- return 0;
-
- if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
- return -EFAULT;
+ len = scnprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
- *ppos += len; /* increase offset */
-
- return len;
+ return simple_read_from_buffer(buff, count, ppos, buf, len);
}
static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
@@ -1324,23 +1287,12 @@ static ssize_t _dpu_plane_danger_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
struct dpu_kms *kms = file->private_data;
- struct dpu_mdss_cfg *cfg = kms->catalog;
int disable_panic;
- char buf[10];
-
- if (!cfg)
- return -EFAULT;
-
- if (count >= sizeof(buf))
- return -EFAULT;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
+ int ret;
- if (kstrtoint(buf, 0, &disable_panic))
- return -EFAULT;
+ ret = kstrtouint_from_user(user_buf, count, 0, &disable_panic);
+ if (ret)
+ return ret;
if (disable_panic) {
/* Disable panic signal for all active pipes */
@@ -1365,33 +1317,10 @@ static const struct file_operations dpu_plane_danger_enable = {
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
- struct dpu_plane *pdpu;
- struct dpu_kms *kms;
- struct msm_drm_private *priv;
- const struct dpu_sspp_sub_blks *sblk = 0;
- const struct dpu_sspp_cfg *cfg = 0;
-
- if (!plane || !plane->dev) {
- DPU_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- priv = plane->dev->dev_private;
- if (!priv || !priv->kms) {
- DPU_ERROR("invalid KMS reference\n");
- return -EINVAL;
- }
-
- kms = to_dpu_kms(priv->kms);
- pdpu = to_dpu_plane(plane);
-
- if (pdpu && pdpu->pipe_hw)
- cfg = pdpu->pipe_hw->cap;
- if (cfg)
- sblk = cfg->sblk;
-
- if (!sblk)
- return 0;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_kms *kms = _dpu_plane_get_kms(plane);
+ const struct dpu_sspp_cfg *cfg = pdpu->pipe_hw->cap;
+ const struct dpu_sspp_sub_blks *sblk = cfg->sblk;
/* create overall sub-directory for the pipe */
pdpu->debugfs_root =
@@ -1462,25 +1391,11 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
return 0;
}
-
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
- struct dpu_plane *pdpu;
-
- if (!plane)
- return;
- pdpu = to_dpu_plane(plane);
-
- debugfs_remove_recursive(pdpu->debugfs_root);
-}
#else
static int _dpu_plane_init_debugfs(struct drm_plane *plane)
{
return 0;
}
-static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
-{
-}
#endif
static int dpu_plane_late_register(struct drm_plane *plane)
@@ -1490,7 +1405,9 @@ static int dpu_plane_late_register(struct drm_plane *plane)
static void dpu_plane_early_unregister(struct drm_plane *plane)
{
- _dpu_plane_destroy_debugfs(plane);
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
}
static const struct drm_plane_funcs dpu_plane_funcs = {
@@ -1539,7 +1456,7 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
if (!pdpu) {
DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
ret = -ENOMEM;
- goto exit;
+ return ERR_PTR(ret);
}
/* cache local stuff for later */
@@ -1625,6 +1542,5 @@ clean_sspp:
dpu_hw_sspp_destroy(pdpu->pipe_hw);
clean_plane:
kfree(pdpu);
-exit:
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
deleted file mode 100644
index fc14116789f2..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
+++ /dev/null
@@ -1,240 +0,0 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/string.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/of_platform.h>
-
-#include "dpu_power_handle.h"
-#include "dpu_trace.h"
-
-static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
- [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
- [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
- [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
-};
-
-const char *dpu_power_handle_get_dbus_name(u32 bus_id)
-{
- if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
- return data_bus_name[bus_id];
-
- return NULL;
-}
-
-static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
- u32 event_type)
-{
- struct dpu_power_event *event;
-
- list_for_each_entry(event, &phandle->event_list, list) {
- if (event->event_type & event_type)
- event->cb_fnc(event_type, event->usr);
- }
-}
-
-struct dpu_power_client *dpu_power_client_create(
- struct dpu_power_handle *phandle, char *client_name)
-{
- struct dpu_power_client *client;
- static u32 id;
-
- if (!client_name || !phandle) {
- pr_err("client name is null or invalid power data\n");
- return ERR_PTR(-EINVAL);
- }
-
- client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
- if (!client)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&phandle->phandle_lock);
- strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
- client->usecase_ndx = VOTE_INDEX_DISABLE;
- client->id = id;
- client->active = true;
- pr_debug("client %s created:%pK id :%d\n", client_name,
- client, id);
- id++;
- list_add(&client->list, &phandle->power_client_clist);
- mutex_unlock(&phandle->phandle_lock);
-
- return client;
-}
-
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client)
-{
- if (!client || !phandle) {
- pr_err("reg bus vote: invalid client handle\n");
- } else if (!client->active) {
- pr_err("dpu power deinit already done\n");
- kfree(client);
- } else {
- pr_debug("bus vote client %s destroyed:%pK id:%u\n",
- client->name, client, client->id);
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&client->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(client);
- }
-}
-
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- phandle->dev = &pdev->dev;
-
- INIT_LIST_HEAD(&phandle->power_client_clist);
- INIT_LIST_HEAD(&phandle->event_list);
-
- mutex_init(&phandle->phandle_lock);
-}
-
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *phandle)
-{
- struct dpu_power_client *curr_client, *next_client;
- struct dpu_power_event *curr_event, *next_event;
-
- if (!phandle || !pdev) {
- pr_err("invalid input param\n");
- return;
- }
-
- mutex_lock(&phandle->phandle_lock);
- list_for_each_entry_safe(curr_client, next_client,
- &phandle->power_client_clist, list) {
- pr_err("client:%s-%d still registered with refcount:%d\n",
- curr_client->name, curr_client->id,
- curr_client->refcount);
- curr_client->active = false;
- list_del(&curr_client->list);
- }
-
- list_for_each_entry_safe(curr_event, next_event,
- &phandle->event_list, list) {
- pr_err("event:%d, client:%s still registered\n",
- curr_event->event_type,
- curr_event->client_name);
- curr_event->active = false;
- list_del(&curr_event->list);
- }
- mutex_unlock(&phandle->phandle_lock);
-}
-
-int dpu_power_resource_enable(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, bool enable)
-{
- bool changed = false;
- u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
- struct dpu_power_client *client;
- u32 event_type;
-
- if (!phandle || !pclient) {
- pr_err("invalid input argument\n");
- return -EINVAL;
- }
-
- mutex_lock(&phandle->phandle_lock);
- if (enable)
- pclient->refcount++;
- else if (pclient->refcount)
- pclient->refcount--;
-
- if (pclient->refcount)
- pclient->usecase_ndx = VOTE_INDEX_LOW;
- else
- pclient->usecase_ndx = VOTE_INDEX_DISABLE;
-
- list_for_each_entry(client, &phandle->power_client_clist, list) {
- if (client->usecase_ndx < VOTE_INDEX_MAX &&
- client->usecase_ndx > max_usecase_ndx)
- max_usecase_ndx = client->usecase_ndx;
- }
-
- if (phandle->current_usecase_ndx != max_usecase_ndx) {
- changed = true;
- prev_usecase_ndx = phandle->current_usecase_ndx;
- phandle->current_usecase_ndx = max_usecase_ndx;
- }
-
- pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
- __builtin_return_address(0), changed, max_usecase_ndx,
- pclient->name, pclient->id, enable, pclient->refcount);
-
- if (!changed)
- goto end;
-
- event_type = enable ? DPU_POWER_EVENT_ENABLE : DPU_POWER_EVENT_DISABLE;
-
- dpu_power_event_trigger_locked(phandle, event_type);
-end:
- mutex_unlock(&phandle->phandle_lock);
- return 0;
-}
-
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name)
-{
- struct dpu_power_event *event;
-
- if (!phandle) {
- pr_err("invalid power handle\n");
- return ERR_PTR(-EINVAL);
- } else if (!cb_fnc || !event_type) {
- pr_err("no callback fnc or event type\n");
- return ERR_PTR(-EINVAL);
- }
-
- event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
- if (!event)
- return ERR_PTR(-ENOMEM);
-
- event->event_type = event_type;
- event->cb_fnc = cb_fnc;
- event->usr = usr;
- strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
- event->active = true;
-
- mutex_lock(&phandle->phandle_lock);
- list_add(&event->list, &phandle->event_list);
- mutex_unlock(&phandle->phandle_lock);
-
- return event;
-}
-
-void dpu_power_handle_unregister_event(
- struct dpu_power_handle *phandle,
- struct dpu_power_event *event)
-{
- if (!phandle || !event) {
- pr_err("invalid phandle or event\n");
- } else if (!event->active) {
- pr_err("power handle deinit already done\n");
- kfree(event);
- } else {
- mutex_lock(&phandle->phandle_lock);
- list_del_init(&event->list);
- mutex_unlock(&phandle->phandle_lock);
- kfree(event);
- }
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
deleted file mode 100644
index a65b7a297f21..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _DPU_POWER_HANDLE_H_
-#define _DPU_POWER_HANDLE_H_
-
-#define MAX_CLIENT_NAME_LEN 128
-
-#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
-#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
-#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
-
-#include "dpu_io_util.h"
-
-/* events will be triggered on power handler enable/disable */
-#define DPU_POWER_EVENT_DISABLE BIT(0)
-#define DPU_POWER_EVENT_ENABLE BIT(1)
-
-/**
- * mdss_bus_vote_type: register bus vote type
- * VOTE_INDEX_DISABLE: removes the client vote
- * VOTE_INDEX_LOW: keeps the lowest vote for register bus
- * VOTE_INDEX_MAX: invalid
- */
-enum mdss_bus_vote_type {
- VOTE_INDEX_DISABLE,
- VOTE_INDEX_LOW,
- VOTE_INDEX_MAX,
-};
-
-/**
- * enum dpu_power_handle_data_bus_client - type of axi bus clients
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
- * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
- */
-enum dpu_power_handle_data_bus_client {
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
- DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
-};
-
-/**
- * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
- * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
- * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
- */
-enum DPU_POWER_HANDLE_DBUS_ID {
- DPU_POWER_HANDLE_DBUS_ID_MNOC,
- DPU_POWER_HANDLE_DBUS_ID_LLCC,
- DPU_POWER_HANDLE_DBUS_ID_EBI,
- DPU_POWER_HANDLE_DBUS_ID_MAX,
-};
-
-/**
- * struct dpu_power_client: stores the power client for dpu driver
- * @name: name of the client
- * @usecase_ndx: current regs bus vote type
- * @refcount: current refcount if multiple modules are using same
- * same client for enable/disable. Power module will
- * aggregate the refcount and vote accordingly for this
- * client.
- * @id: assigned during create. helps for debugging.
- * @list: list to attach power handle master list
- * @ab: arbitrated bandwidth for each bus client
- * @ib: instantaneous bandwidth for each bus client
- * @active: inidcates the state of dpu power handle
- */
-struct dpu_power_client {
- char name[MAX_CLIENT_NAME_LEN];
- short usecase_ndx;
- short refcount;
- u32 id;
- struct list_head list;
- u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
- bool active;
-};
-
-/*
- * struct dpu_power_event - local event registration structure
- * @client_name: name of the client registering
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback event trigger
- * @event: refer to DPU_POWER_HANDLE_EVENT_*
- * @list: list to attach event master list
- * @active: indicates the state of dpu power handle
- */
-struct dpu_power_event {
- char client_name[MAX_CLIENT_NAME_LEN];
- void (*cb_fnc)(u32 event_type, void *usr);
- void *usr;
- u32 event_type;
- struct list_head list;
- bool active;
-};
-
-/**
- * struct dpu_power_handle: power handle main struct
- * @client_clist: master list to store all clients
- * @phandle_lock: lock to synchronize the enable/disable
- * @dev: pointer to device structure
- * @usecase_ndx: current usecase index
- * @event_list: current power handle event list
- */
-struct dpu_power_handle {
- struct list_head power_client_clist;
- struct mutex phandle_lock;
- struct device *dev;
- u32 current_usecase_ndx;
- struct list_head event_list;
-};
-
-/**
- * dpu_power_resource_init() - initializes the dpu power handle
- * @pdev: platform device to search the power resources
- * @pdata: power handle to store the power resources
- */
-void dpu_power_resource_init(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_resource_deinit() - release the dpu power handle
- * @pdev: platform device for power resources
- * @pdata: power handle containing the resources
- *
- * Return: error code.
- */
-void dpu_power_resource_deinit(struct platform_device *pdev,
- struct dpu_power_handle *pdata);
-
-/**
- * dpu_power_client_create() - create the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: error code.
- */
-struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
- char *client_name);
-
-/**
- * dpu_power_client_destroy() - destroy the client on power handle
- * @pdata: power handle containing the resources
- * @client_name: new client name for registration
- *
- * Return: none
- */
-void dpu_power_client_destroy(struct dpu_power_handle *phandle,
- struct dpu_power_client *client);
-
-/**
- * dpu_power_resource_enable() - enable/disable the power resources
- * @pdata: power handle containing the resources
- * @client: client information to enable/disable its vote
- * @enable: boolean request for enable/disable
- *
- * Return: error code.
- */
-int dpu_power_resource_enable(struct dpu_power_handle *pdata,
- struct dpu_power_client *pclient, bool enable);
-
-/**
- * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
- * @phandle: power handle containing the resources
- * @client: client information to bandwidth control
- * @enable: true to enable bandwidth for data base
- *
- * Return: none
- */
-void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
- struct dpu_power_client *pclient, int enable);
-
-/**
- * dpu_power_handle_register_event - register a callback function for an event.
- * Clients can register for multiple events with a single register.
- * Any block with access to phandle can register for the event
- * notification.
- * @phandle: power handle containing the resources
- * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
- * @cb_fnc: pointer to desired callback function
- * @usr: user pointer to pass to callback on event trigger
- *
- * Return: event pointer if success, or error code otherwise
- */
-struct dpu_power_event *dpu_power_handle_register_event(
- struct dpu_power_handle *phandle,
- u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
- void *usr, char *client_name);
-/**
- * dpu_power_handle_unregister_event - unregister callback for event(s)
- * @phandle: power handle containing the resources
- * @event: event pointer returned after power handle register
- */
-void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
- struct dpu_power_event *event);
-
-/**
- * dpu_power_handle_get_dbus_name - get name of given data bus identifier
- * @bus_id: data bus identifier
- * Return: Pointer to name string if success; NULL otherwise
- */
-const char *dpu_power_handle_get_dbus_name(u32 bus_id);
-
-#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index e12c4cefb742..c78b521ceda1 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -99,27 +99,6 @@ TRACE_EVENT(dpu_perf_set_ot,
__entry->vbif_idx)
)
-TRACE_EVENT(dpu_perf_update_bus,
- TP_PROTO(int client, unsigned long long ab_quota,
- unsigned long long ib_quota),
- TP_ARGS(client, ab_quota, ib_quota),
- TP_STRUCT__entry(
- __field(int, client)
- __field(u64, ab_quota)
- __field(u64, ib_quota)
- ),
- TP_fast_assign(
- __entry->client = client;
- __entry->ab_quota = ab_quota;
- __entry->ib_quota = ib_quota;
- ),
- TP_printk("Request client:%d ab=%llu ib=%llu",
- __entry->client,
- __entry->ab_quota,
- __entry->ib_quota)
-)
-
-
TRACE_EVENT(dpu_cmd_release_bw,
TP_PROTO(u32 crtc_id),
TP_ARGS(crtc_id),
@@ -319,6 +298,10 @@ DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
TP_PROTO(uint32_t drm_id),
TP_ARGS(drm_id)
);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_runtime_resume,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
TRACE_EVENT(dpu_enc_enable,
TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
@@ -539,10 +522,6 @@ DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
);
-DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
- TP_PROTO(uint32_t drm_id, u32 event),
- TP_ARGS(drm_id, event)
-);
DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
TP_PROTO(uint32_t drm_id, u32 event),
TP_ARGS(drm_id, event)
@@ -749,24 +728,17 @@ TRACE_EVENT(dpu_crtc_vblank_enable,
__field( uint32_t, enc_id )
__field( bool, enable )
__field( bool, enabled )
- __field( bool, suspend )
- __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enc_id = enc_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
- __entry->suspend = crtc->suspend;
- __entry->vblank_requested = crtc->vblank_requested;
),
- TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
- "vblank_req:%s}",
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enc_id,
__entry->enable ? "true" : "false",
- __entry->enabled ? "true" : "false",
- __entry->suspend ? "true" : "false",
- __entry->vblank_requested ? "true" : "false")
+ __entry->enabled ? "true" : "false")
);
DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
@@ -776,25 +748,15 @@ DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
__field( uint32_t, drm_id )
__field( bool, enable )
__field( bool, enabled )
- __field( bool, suspend )
- __field( bool, vblank_requested )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->enable = enable;
__entry->enabled = crtc->enabled;
- __entry->suspend = crtc->suspend;
- __entry->vblank_requested = crtc->vblank_requested;
),
- TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ TP_printk("id:%u enable:%s state{enabled:%s}",
__entry->drm_id, __entry->enable ? "true" : "false",
- __entry->enabled ? "true" : "false",
- __entry->suspend ? "true" : "false",
- __entry->vblank_requested ? "true" : "false")
-);
-DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
- TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
- TP_ARGS(drm_id, enable, crtc)
+ __entry->enabled ? "true" : "false")
);
DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
@@ -1004,6 +966,53 @@ TRACE_EVENT(dpu_core_perf_update_clk,
__entry->stop_req ? "true" : "false", __entry->clk_rate)
);
+TRACE_EVENT(dpu_hw_ctl_update_pending_flush,
+ TP_PROTO(u32 new_bits, u32 pending_mask),
+ TP_ARGS(new_bits, pending_mask),
+ TP_STRUCT__entry(
+ __field( u32, new_bits )
+ __field( u32, pending_mask )
+ ),
+ TP_fast_assign(
+ __entry->new_bits = new_bits;
+ __entry->pending_mask = pending_mask;
+ ),
+ TP_printk("new=%x existing=%x", __entry->new_bits,
+ __entry->pending_mask)
+);
+
+DECLARE_EVENT_CLASS(dpu_hw_ctl_pending_flush_template,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush),
+ TP_STRUCT__entry(
+ __field( u32, pending_mask )
+ __field( u32, ctl_flush )
+ ),
+ TP_fast_assign(
+ __entry->pending_mask = pending_mask;
+ __entry->ctl_flush = ctl_flush;
+ ),
+ TP_printk("pending_mask=%x CTL_FLUSH=%x", __entry->pending_mask,
+ __entry->ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_clear_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template,
+ dpu_hw_ctl_trigger_pending_flush,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_prepare,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+DEFINE_EVENT(dpu_hw_ctl_pending_flush_template, dpu_hw_ctl_trigger_start,
+ TP_PROTO(u32 pending_mask, u32 ctl_flush),
+ TP_ARGS(pending_mask, ctl_flush)
+);
+
#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
index 295528292296..ef753ea9c499 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -191,7 +191,7 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
if (ot_lim == 0)
- goto exit;
+ return;
trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
params->vbif_idx);
@@ -210,8 +210,6 @@ void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
if (forced_on)
mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
-exit:
- return;
}
void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
@@ -312,31 +310,25 @@ void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
}
#ifdef CONFIG_DEBUG_FS
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
- debugfs_remove_recursive(dpu_kms->debugfs_vbif);
- dpu_kms->debugfs_vbif = NULL;
-}
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
{
char vbif_name[32];
- struct dentry *debugfs_vbif;
+ struct dentry *entry, *debugfs_vbif;
int i, j;
- dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
- if (!dpu_kms->debugfs_vbif) {
- DPU_ERROR("failed to create vbif debugfs\n");
- return -EINVAL;
- }
+ entry = debugfs_create_dir("vbif", debugfs_root);
+ if (IS_ERR_OR_NULL(entry))
+ return;
for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
- debugfs_vbif = debugfs_create_dir(vbif_name,
- dpu_kms->debugfs_vbif);
+ debugfs_vbif = debugfs_create_dir(vbif_name, entry);
+ if (IS_ERR_OR_NULL(debugfs_vbif))
+ continue;
debugfs_create_u32("features", 0600, debugfs_vbif,
(u32 *)&vbif->features);
@@ -378,7 +370,5 @@ int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
(u32 *)&cfg->ot_limit);
}
}
-
- return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
index f17af52dbbd5..6356876d7a66 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -78,17 +78,6 @@ void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
*/
void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
-#ifdef CONFIG_DEBUG_FS
-int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
-void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
-#else
-static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
- struct dentry *debugfs_root)
-{
- return 0;
-}
-static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
-{
-}
-#endif
+void dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+
#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
index 4f12e5c534c8..9fc9dbde8a27 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -813,18 +813,6 @@ enum color_fmts {
#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
#define COLOR_FMT_P010 COLOR_FMT_P010
-static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
-{
- (void)height;
- (void)width;
-
- /*
- * In the future, calculate the size based on the w/h but just
- * hardcode it for now since 16K satisfies all current usecases.
- */
- return 16 * 1024;
-}
-
/*
* Function arguments:
* @color_fmt
@@ -832,38 +820,32 @@ static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
+ stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
-invalid_input:
+
return stride;
}
@@ -874,38 +856,32 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
{
- unsigned int alignment, stride = 0;
+ unsigned int stride = 0;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
case COLOR_FMT_NV12:
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width, alignment);
+ stride = MSM_MEDIA_ALIGN(width, 128);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
- alignment = 256;
stride = MSM_MEDIA_ALIGN(width, 192);
- stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ stride = MSM_MEDIA_ALIGN(stride * 4 / 3, 256);
break;
case COLOR_FMT_P010_UBWC:
- alignment = 256;
- stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ stride = MSM_MEDIA_ALIGN(width * 2, 256);
break;
case COLOR_FMT_P010:
- alignment = 128;
- stride = MSM_MEDIA_ALIGN(width*2, alignment);
- break;
- default:
+ stride = MSM_MEDIA_ALIGN(width * 2, 128);
break;
}
-invalid_input:
+
return stride;
}
@@ -916,12 +892,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -929,17 +905,14 @@ static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_MVTB:
case COLOR_FMT_NV12_UBWC:
case COLOR_FMT_P010:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN(height, 32);
break;
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN(height, 16);
break;
- default:
- return 0;
}
- sclines = MSM_MEDIA_ALIGN(height, alignment);
-invalid_input:
+
return sclines;
}
@@ -950,12 +923,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment, sclines = 0;
+ unsigned int sclines = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV21:
@@ -964,18 +937,13 @@ static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
case COLOR_FMT_NV12_BPP10_UBWC:
case COLOR_FMT_P010_UBWC:
case COLOR_FMT_P010:
- alignment = 16;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 16);
break;
case COLOR_FMT_NV12_UBWC:
- alignment = 32;
+ sclines = MSM_MEDIA_ALIGN((height + 1) >> 1, 32);
break;
- default:
- goto invalid_input;
}
- sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
-
-invalid_input:
return sclines;
}
@@ -986,12 +954,12 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
{
- int y_tile_width = 0, y_meta_stride = 0;
+ int y_tile_width = 0, y_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1002,14 +970,11 @@ static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
y_tile_width = 48;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
- y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
-
-invalid_input:
- return y_meta_stride;
+ return MSM_MEDIA_ALIGN(y_meta_stride, 64);
}
/*
@@ -1019,12 +984,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
{
- int y_tile_height = 0, y_meta_scanlines = 0;
+ int y_tile_height = 0, y_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1035,14 +1000,11 @@ static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
y_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
- y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
-
-invalid_input:
- return y_meta_scanlines;
+ return MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
}
/*
@@ -1052,12 +1014,12 @@ invalid_input:
* Progressive: width
* Interlaced: width
*/
-static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
{
- int uv_tile_width = 0, uv_meta_stride = 0;
+ int uv_tile_width = 0, uv_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1068,14 +1030,11 @@ static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
uv_tile_width = 24;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
- uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
-
-invalid_input:
- return uv_meta_stride;
+ return MSM_MEDIA_ALIGN(uv_meta_stride, 64);
}
/*
@@ -1085,12 +1044,12 @@ invalid_input:
* Progressive: height
* Interlaced: (height+1)>>1
*/
-static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
{
- int uv_tile_height = 0, uv_meta_scanlines = 0;
+ int uv_tile_height = 0, uv_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_NV12_UBWC:
@@ -1101,22 +1060,19 @@ static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
uv_tile_height = 4;
break;
default:
- goto invalid_input;
+ return 0;
}
uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
- uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
-
-invalid_input:
- return uv_meta_scanlines;
+ return MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
}
-static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
{
- unsigned int alignment = 0, stride = 0, bpp = 4;
+ unsigned int alignment = 0, bpp = 4;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1131,21 +1087,18 @@ static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
alignment = 256;
break;
default:
- goto invalid_input;
+ return 0;
}
- stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
-
-invalid_input:
- return stride;
+ return MSM_MEDIA_ALIGN(width * bpp, alignment);
}
-static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
{
- unsigned int alignment = 0, scanlines = 0;
+ unsigned int alignment = 0;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888:
@@ -1157,220 +1110,46 @@ static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
alignment = 16;
break;
default:
- goto invalid_input;
+ return 0;
}
- scanlines = MSM_MEDIA_ALIGN(height, alignment);
-
-invalid_input:
- return scanlines;
+ return MSM_MEDIA_ALIGN(height, alignment);
}
-static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+static unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
{
- int rgb_tile_width = 0, rgb_meta_stride = 0;
+ int rgb_meta_stride;
if (!width)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_width = 16;
- break;
- default:
- goto invalid_input;
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, 16);
+ return MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
}
- rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
- rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
-
-invalid_input:
- return rgb_meta_stride;
+ return 0;
}
-static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+static unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
{
- int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+ int rgb_meta_scanlines;
if (!height)
- goto invalid_input;
+ return 0;
switch (color_fmt) {
case COLOR_FMT_RGBA8888_UBWC:
case COLOR_FMT_RGBA1010102_UBWC:
case COLOR_FMT_RGB565_UBWC:
- rgb_tile_height = 4;
- break;
- default:
- goto invalid_input;
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, 4);
+ return MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
}
- rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
- rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
-
-invalid_input:
- return rgb_meta_scanlines;
-}
-
-/*
- * Function arguments:
- * @color_fmt
- * @width
- * Progressive: width
- * Interlaced: width
- * @height
- * Progressive: height
- * Interlaced: height
- */
-static inline unsigned int VENUS_BUFFER_SIZE(
- int color_fmt, int width, int height)
-{
- const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
- unsigned int uv_alignment = 0, size = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
- unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
- unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
- unsigned int y_meta_plane = 0, uv_meta_plane = 0;
- unsigned int rgb_stride = 0, rgb_scanlines = 0;
- unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
- unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
-
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
- rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
-
- switch (color_fmt) {
- case COLOR_FMT_NV21:
- case COLOR_FMT_NV12:
- case COLOR_FMT_P010:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane +
- MSM_MEDIA_MAX(extra_size, 8 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_MVTB:
- uv_alignment = 4096;
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines + uv_alignment;
- size = y_plane + uv_plane;
- size = 2 * size + extra_size;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_UBWC:
- y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines =
- VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines =
- VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane)*2 +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_NV12_BPP10_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane +
- MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_P010_UBWC:
- y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
- uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
- y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
- y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
- y_meta_plane = MSM_MEDIA_ALIGN(
- y_meta_stride * y_meta_scanlines, 4096);
- uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
- uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
- uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
- uv_meta_scanlines, 4096);
-
- size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
- uv_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888:
- rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
- size = rgb_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- case COLOR_FMT_RGBA8888_UBWC:
- case COLOR_FMT_RGBA1010102_UBWC:
- case COLOR_FMT_RGB565_UBWC:
- rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
- 4096);
- rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
- rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
- height);
- rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
- rgb_meta_scanlines, 4096);
- size = rgb_ubwc_plane + rgb_meta_plane;
- size = MSM_MEDIA_ALIGN(size, 4096);
- break;
- default:
- break;
- }
-invalid_input:
- return size;
-}
-
-static inline unsigned int VENUS_VIEW2_OFFSET(
- int color_fmt, int width, int height)
-{
- unsigned int offset = 0;
- unsigned int y_plane, uv_plane, y_stride,
- uv_stride, y_sclines, uv_sclines;
- if (!width || !height)
- goto invalid_input;
-
- y_stride = VENUS_Y_STRIDE(color_fmt, width);
- uv_stride = VENUS_UV_STRIDE(color_fmt, width);
- y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
- uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
- switch (color_fmt) {
- case COLOR_FMT_NV12_MVTB:
- y_plane = y_stride * y_sclines;
- uv_plane = uv_stride * uv_sclines;
- offset = y_plane + uv_plane;
- break;
- default:
- break;
- }
-invalid_input:
- return offset;
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 457c29dba4a1..8f2359dc87b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -128,7 +128,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
struct msm_kms *kms = &mdp4_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -384,7 +384,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_get(next_bo);
- msm_gem_get_iova(next_bo, kms->aspace, &iova);
+ msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
@@ -429,7 +429,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -442,7 +442,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, kms->aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587f734b..a8fd14d4846b 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
@@ -45,7 +45,7 @@ static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
if (!dtv_pdata) {
- dev_err(dev->dev, "could not find dtv pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
return;
}
@@ -209,16 +209,16 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set mdp_clk to %lu: %d\n",
pc, ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
if (ret)
- dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
if (ret)
- dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
@@ -258,14 +258,14 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
- dev_err(dev->dev, "failed to get hdmi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get hdmi_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
goto fail;
}
mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
- dev_err(dev->dev, "failed to get tv_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get tv_clk\n");
ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 44d1cda56974..e437aa806f7b 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -43,7 +43,7 @@ static int mdp4_hw_init(struct msm_kms *kms)
DBG("found MDP4 version v%d.%d", major, minor);
if (major != 4) {
- dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto out;
@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = kms->aspace;
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
+ msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
@@ -206,7 +206,8 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
clk_disable_unprepare(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_disable_unprepare(mdp4_kms->pclk);
- clk_disable_unprepare(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_disable_unprepare(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_disable_unprepare(mdp4_kms->axi_clk);
@@ -220,7 +221,8 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
clk_prepare_enable(mdp4_kms->clk);
if (mdp4_kms->pclk)
clk_prepare_enable(mdp4_kms->pclk);
- clk_prepare_enable(mdp4_kms->lut_clk);
+ if (mdp4_kms->lut_clk)
+ clk_prepare_enable(mdp4_kms->lut_clk);
if (mdp4_kms->axi_clk)
clk_prepare_enable(mdp4_kms->axi_clk);
@@ -251,7 +253,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_lcdc_encoder_init(dev, panel_node);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n");
return PTR_ERR(encoder);
}
@@ -260,7 +262,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
connector = mdp4_lvds_connector_init(dev, panel_node, encoder);
if (IS_ERR(connector)) {
- dev_err(dev->dev, "failed to initialize LVDS connector\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n");
return PTR_ERR(connector);
}
@@ -271,7 +273,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
case DRM_MODE_ENCODER_TMDS:
encoder = mdp4_dtv_encoder_init(dev);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct DTV encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct DTV encoder\n");
return PTR_ERR(encoder);
}
@@ -282,7 +284,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
/* Construct bridge/connector for HDMI: */
ret = msm_hdmi_modeset_init(priv->hdmi, dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to initialize HDMI: %d\n", ret);
return ret;
}
}
@@ -300,7 +302,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
encoder = mdp4_dsi_encoder_init(dev);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct DSI encoder: %d\n", ret);
return ret;
}
@@ -311,14 +313,14 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms,
ret = msm_dsi_modeset_init(priv->dsi[dsi_id], dev, encoder);
if (ret) {
- dev_err(dev->dev, "failed to initialize DSI: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize DSI: %d\n",
ret);
return ret;
}
break;
default:
- dev_err(dev->dev, "Invalid or unsupported interface\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid or unsupported interface\n");
return -EINVAL;
}
@@ -354,7 +356,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(vg_planes); i++) {
plane = mdp4_plane_init(dev, vg_planes[i], false);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for VG%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -365,7 +367,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_crtcs); i++) {
plane = mdp4_plane_init(dev, rgb_planes[i], true);
if (IS_ERR(plane)) {
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to construct plane for RGB%d\n", i + 1);
ret = PTR_ERR(plane);
goto fail;
@@ -374,7 +376,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, i,
mdp4_crtcs[i]);
if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for %s\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc for %s\n",
mdp4_crtc_names[i]);
ret = PTR_ERR(crtc);
goto fail;
@@ -396,7 +398,7 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
for (i = 0; i < ARRAY_SIZE(mdp4_intfs); i++) {
ret = mdp4_modeset_init_intf(mdp4_kms, mdp4_intfs[i]);
if (ret) {
- dev_err(dev->dev, "failed to initialize intf: %d, %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to initialize intf: %d, %d\n",
i, ret);
goto fail;
}
@@ -419,7 +421,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms) {
- dev_err(dev->dev, "failed to allocate kms\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate kms\n");
ret = -ENOMEM;
goto fail;
}
@@ -439,7 +441,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -456,14 +458,14 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (mdp4_kms->vdd) {
ret = regulator_enable(mdp4_kms->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", ret);
goto fail;
}
}
mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(mdp4_kms->clk)) {
- dev_err(dev->dev, "failed to get core_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get core_clk\n");
ret = PTR_ERR(mdp4_kms->clk);
goto fail;
}
@@ -472,23 +474,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (IS_ERR(mdp4_kms->pclk))
mdp4_kms->pclk = NULL;
- // XXX if (rev >= MDP_REV_42) { ???
- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
- if (IS_ERR(mdp4_kms->lut_clk)) {
- dev_err(dev->dev, "failed to get lut_clk\n");
- ret = PTR_ERR(mdp4_kms->lut_clk);
- goto fail;
+ if (mdp4_kms->rev >= 2) {
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
}
mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
if (IS_ERR(mdp4_kms->axi_clk)) {
- dev_err(dev->dev, "failed to get axi_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
ret = PTR_ERR(mdp4_kms->axi_clk);
goto fail;
}
clk_set_rate(mdp4_kms->clk, config->max_clk);
- clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+ if (mdp4_kms->lut_clk)
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
pm_runtime_enable(dev->dev);
mdp4_kms->rpm_enabled = true;
@@ -519,29 +523,29 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
if (ret)
goto fail;
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
+ DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
aspace = NULL;
}
ret = modeset_init(mdp4_kms);
if (ret) {
- dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
- mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC | MSM_BO_SCANOUT);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
- dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
mdp4_kms->blank_cursor_bo = NULL;
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(mdp4_kms->blank_cursor_bo, kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
- dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 2bfb39082f54..c9e34501a89e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -47,7 +47,7 @@ static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
if (!lcdc_pdata) {
- dev_err(dev->dev, "could not find lvds pdata\n");
+ DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
return;
}
@@ -224,7 +224,7 @@ static void setup_phy(struct drm_encoder *encoder)
break;
default:
- dev_err(dev->dev, "unknown bpp: %d\n", bpp);
+ DRM_DEV_ERROR(dev->dev, "unknown bpp: %d\n", bpp);
return;
}
@@ -241,7 +241,7 @@ static void setup_phy(struct drm_encoder *encoder)
MDP4_LCDC_LVDS_INTF_CTL_CH1_CLK_LANE_EN;
break;
default:
- dev_err(dev->dev, "unknown # of channels: %d\n", nchan);
+ DRM_DEV_ERROR(dev->dev, "unknown # of channels: %d\n", nchan);
return;
}
@@ -361,7 +361,7 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_disable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to disable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
}
bs_set(mdp4_lcdc_encoder, 0);
@@ -377,20 +377,25 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
unsigned long pc = mdp4_lcdc_encoder->pixclock;
struct mdp4_kms *mdp4_kms = get_kms(encoder);
struct drm_panel *panel;
+ uint32_t config;
int i, ret;
if (WARN_ON(mdp4_lcdc_encoder->enabled))
return;
/* TODO: hard-coded for 18bpp: */
- mdp4_crtc_set_config(encoder->crtc,
- MDP4_DMA_CONFIG_R_BPC(BPC6) |
- MDP4_DMA_CONFIG_G_BPC(BPC6) |
- MDP4_DMA_CONFIG_B_BPC(BPC6) |
- MDP4_DMA_CONFIG_PACK_ALIGN_MSB |
- MDP4_DMA_CONFIG_PACK(0x21) |
- MDP4_DMA_CONFIG_DEFLKR_EN |
- MDP4_DMA_CONFIG_DITHER_EN);
+ config =
+ MDP4_DMA_CONFIG_R_BPC(BPC6) |
+ MDP4_DMA_CONFIG_G_BPC(BPC6) |
+ MDP4_DMA_CONFIG_B_BPC(BPC6) |
+ MDP4_DMA_CONFIG_PACK(0x21) |
+ MDP4_DMA_CONFIG_DEFLKR_EN |
+ MDP4_DMA_CONFIG_DITHER_EN;
+
+ if (!of_property_read_bool(dev->dev->of_node, "qcom,lcdc-align-lsb"))
+ config |= MDP4_DMA_CONFIG_PACK_ALIGN_MSB;
+
+ mdp4_crtc_set_config(encoder->crtc, config);
mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
bs_set(mdp4_lcdc_encoder, 1);
@@ -398,16 +403,16 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
if (ret)
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator: %d\n", ret);
}
DBG("setting lcdc_clk=%lu", pc);
ret = clk_set_rate(mdp4_lcdc_encoder->lcdc_clk, pc);
if (ret)
- dev_err(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to configure lcdc_clk: %d\n", ret);
ret = clk_prepare_enable(mdp4_lcdc_encoder->lcdc_clk);
if (ret)
- dev_err(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable lcdc_clk: %d\n", ret);
panel = of_drm_find_panel(mdp4_lcdc_encoder->panel_node);
if (!IS_ERR(panel)) {
@@ -461,7 +466,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
/* TODO: do we need different pll in other cases? */
mdp4_lcdc_encoder->lcdc_clk = mpd4_lvds_pll_init(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
- dev_err(dev->dev, "failed to get lvds_clk\n");
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
ret = PTR_ERR(mdp4_lcdc_encoder->lcdc_clk);
goto fail;
}
@@ -470,7 +475,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vccs-3p3v");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vccs-3p3v: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[0] = reg;
@@ -478,7 +483,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-pll-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-pll-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[1] = reg;
@@ -486,7 +491,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
reg = devm_regulator_get(dev->dev, "lvds-vdda");
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev->dev, "failed to get lvds-vdda: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get lvds-vdda: %d\n", ret);
goto fail;
}
mdp4_lcdc_encoder->regs[2] = reg;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 79ff653d8081..005066f7154d 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -68,7 +68,6 @@ static void mdp4_plane_destroy(struct drm_plane *plane)
{
struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp4_plane);
@@ -235,22 +234,22 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(fb));
if (src_w > (crtc_w * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Width down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width down scaling exceeds limits!\n");
return -ERANGE;
}
if (src_h > (crtc_h * DOWN_SCALE_MAX)) {
- dev_err(dev->dev, "Height down scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height down scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_w > (src_w * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Width up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Width up scaling exceeds limits!\n");
return -ERANGE;
}
if (crtc_h > (src_h * UP_SCALE_MAX)) {
- dev_err(dev->dev, "Height up scaling exceeds limits!\n");
+ DRM_DEV_ERROR(dev->dev, "Height up scaling exceeds limits!\n");
return -ERANGE;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d2d427..ea8f7d7daf7f 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -553,6 +553,91 @@ const struct mdp5_cfg_hw msm8x96_config = {
.max_clk = 412500000,
};
+const struct mdp5_cfg_hw msm8917_config = {
+ .name = "msm8917",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
+ .nb_stages = 8,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ },
+ },
+ .max_clk = 320000000,
+};
+
static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 0, .config = { .hw = &msm8x74v1_config } },
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
@@ -560,6 +645,7 @@ static const struct mdp5_cfg_handler cfg_handlers[] = {
{ .revision = 6, .config = { .hw = &msm8x16_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
+ { .revision = 15, .config = { .hw = &msm8917_config } },
};
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
@@ -600,7 +686,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
}
if (major != 1) {
- dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP major version: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
@@ -615,7 +701,7 @@ struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
break;
}
if (unlikely(!mdp5_cfg)) {
- dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
+ DRM_DEV_ERROR(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
major, minor);
ret = -ENXIO;
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index d6f79dc755b4..c1962f29ec7d 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -55,20 +55,20 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
- dev_err(dev, "vsync_clk is not initialized\n");
+ DRM_DEV_ERROR(dev, "vsync_clk is not initialized\n");
return -EINVAL;
}
total_lines_x100 = mode->vtotal * mode->vrefresh;
if (!total_lines_x100) {
- dev_err(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
+ DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
__func__, mode->vtotal, mode->vrefresh);
return -EINVAL;
}
vsync_clk_speed = clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE);
if (vsync_clk_speed <= 0) {
- dev_err(dev, "vsync_clk round rate failed %ld\n",
+ DRM_DEV_ERROR(dev, "vsync_clk round rate failed %ld\n",
vsync_clk_speed);
return -EINVAL;
}
@@ -102,13 +102,13 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
ret = clk_set_rate(mdp5_kms->vsync_clk,
clk_round_rate(mdp5_kms->vsync_clk, VSYNC_CLK_RATE));
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_set_rate failed, %d\n", ret);
return ret;
}
ret = clk_prepare_enable(mdp5_kms->vsync_clk);
if (ret) {
- dev_err(encoder->dev->dev,
+ DRM_DEV_ERROR(encoder->dev->dev,
"vsync_clk clk_prepare_enable failed, %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index b1da9ce54379..c5fde1a4191a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -173,7 +173,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
struct msm_kms *kms = &mdp5_kms->base.base;
- msm_gem_put_iova(val, kms->aspace);
+ msm_gem_unpin_iova(val, kms->aspace);
drm_gem_object_put_unlocked(val);
}
@@ -662,7 +662,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
if (ret) {
- dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
return ret;
}
@@ -679,7 +679,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
* and that we don't have conflicting mixer stages:
*/
if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ DRM_DEV_ERROR(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
cnt, start);
return -EINVAL;
}
@@ -879,7 +879,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
}
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
- dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
return -EINVAL;
}
@@ -903,7 +903,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, kms->aspace,
+ ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace,
&mdp5_crtc->cursor.iova);
if (ret)
return -EINVAL;
@@ -924,7 +924,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
set_cursor:
ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
- dev_err(dev->dev, "failed to %sable cursor: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
goto end;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index f93d5681267c..65a871f9f0d9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -262,13 +262,13 @@ int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
struct mdp5_hw_mixer *mixer = pipeline->mixer;
if (unlikely(WARN_ON(!mixer))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
ctl->id);
return -EINVAL;
}
if (pipeline->r_mixer) {
- dev_err(ctl_mgr->dev->dev, "unsupported configuration");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
@@ -604,10 +604,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
return 0;
} else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
- dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
return -EINVAL;
} else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
- dev_err(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
return -EINVAL;
}
@@ -652,7 +652,7 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
if ((ctl_mgr->ctls[c].status & checkm) == match)
goto found;
- dev_err(ctl_mgr->dev->dev, "No more CTL available!");
+ DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
goto unlock;
found:
@@ -698,13 +698,13 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
if (!ctl_mgr) {
- dev_err(dev->dev, "failed to allocate CTL manager\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
ret = -ENOMEM;
goto fail;
}
if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
- dev_err(dev->dev, "Increase static pool size to at least %d\n",
+ DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count);
ret = -ENOSPC;
goto fail;
@@ -723,7 +723,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
- dev_err(dev->dev, "CTL_%d: base is null!\n", c);
+ DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index bddd625ab91b..d27e35a217bd 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -264,7 +264,7 @@ static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install mdp5_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
return ret;
}
@@ -337,7 +337,7 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct encoder\n");
+ DRM_DEV_ERROR(dev->dev, "failed to construct encoder\n");
return encoder;
}
@@ -418,7 +418,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
- dev_err(dev->dev, "failed to find dsi from intf %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to find dsi from intf %d\n",
intf->num);
ret = -EINVAL;
break;
@@ -443,7 +443,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
break;
}
default:
- dev_err(dev->dev, "unknown intf: %d\n", intf->type);
+ DRM_DEV_ERROR(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
@@ -500,7 +500,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
plane = mdp5_plane_init(dev, type);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
- dev_err(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct plane %d (%d)\n", i, ret);
goto fail;
}
priv->planes[priv->num_planes++] = plane;
@@ -517,7 +517,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
crtc = mdp5_crtc_init(dev, primary[i], cursor[i], i);
if (IS_ERR(crtc)) {
ret = PTR_ERR(crtc);
- dev_err(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
+ DRM_DEV_ERROR(dev->dev, "failed to construct crtc %d (%d)\n", i, ret);
goto fail;
}
priv->crtcs[priv->num_crtcs++] = crtc;
@@ -552,7 +552,7 @@ static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
*major = FIELD(version, MDP5_HW_VERSION_MAJOR);
*minor = FIELD(version, MDP5_HW_VERSION_MINOR);
- dev_info(dev, "MDP5 version v%d.%d", *major, *minor);
+ DRM_DEV_INFO(dev, "MDP5 version v%d.%d", *major, *minor);
}
static int get_clk(struct platform_device *pdev, struct clk **clkp,
@@ -561,7 +561,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
struct device *dev = &pdev->dev;
struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) {
- dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
+ DRM_DEV_ERROR(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk);
}
if (IS_ERR(clk))
@@ -688,7 +688,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (irq < 0) {
ret = irq;
- dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -724,12 +724,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
- dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
ret);
goto fail;
}
} else {
- dev_info(&pdev->dev,
+ DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
aspace = NULL;
}
@@ -738,7 +738,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
ret = modeset_init(mdp5_kms);
if (ret) {
- dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "modeset_init failed: %d\n", ret);
goto fail;
}
@@ -795,7 +795,7 @@ static int construct_pipes(struct mdp5_kms *mdp5_kms, int cnt,
hwpipe = mdp5_pipe_init(pipes[i], offsets[i], caps);
if (IS_ERR(hwpipe)) {
ret = PTR_ERR(hwpipe);
- dev_err(dev->dev, "failed to construct pipe for %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct pipe for %s (%d)\n",
pipe2name(pipes[i]), ret);
return ret;
}
@@ -867,7 +867,7 @@ static int hwmixer_init(struct mdp5_kms *mdp5_kms)
mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
if (IS_ERR(mixer)) {
ret = PTR_ERR(mixer);
- dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to construct LM%d (%d)\n",
i, ret);
return ret;
}
@@ -897,7 +897,7 @@ static int interface_init(struct mdp5_kms *mdp5_kms)
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
if (!intf) {
- dev_err(dev->dev, "failed to construct INTF%d\n", i);
+ DRM_DEV_ERROR(dev->dev, "failed to construct INTF%d\n", i);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 1cc4e57f0226..889c2940692c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -132,7 +132,7 @@ static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
mdp5_mdss);
if (!d) {
- dev_err(dev, "mdss irq domain add failed\n");
+ DRM_DEV_ERROR(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
@@ -246,7 +246,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to get clocks: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
@@ -259,7 +259,7 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
- dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
goto fail;
}
@@ -267,13 +267,13 @@ int mdp5_mdss_init(struct drm_device *dev)
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
- dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7f42c3e68a53..be13140967b4 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -46,7 +46,6 @@ static void mdp5_plane_destroy(struct drm_plane *plane)
{
struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
kfree(mdp5_plane);
@@ -126,7 +125,7 @@ static int mdp5_plane_atomic_set_property(struct drm_plane *plane,
SET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -154,7 +153,7 @@ static int mdp5_plane_atomic_get_property(struct drm_plane *plane,
GET_PROPERTY(zpos, ZPOS, uint8_t);
- dev_err(dev->dev, "Invalid property\n");
+ DRM_DEV_ERROR(dev->dev, "Invalid property\n");
ret = -EINVAL;
done:
return ret;
@@ -659,7 +658,7 @@ static int calc_scalex_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasex_step);
if (ret) {
- dev_err(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
@@ -684,7 +683,7 @@ static int calc_scaley_steps(struct drm_plane *plane,
ret = calc_phase_step(src, dest, &phasey_step);
if (ret) {
- dev_err(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
+ DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 96c2b828dba4..7cebcb2b3a37 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -88,7 +88,7 @@ static int smp_request_block(struct mdp5_smp *smp,
avail = cnt - bitmap_weight(state->state, cnt);
if (nblks > avail) {
- dev_err(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
+ DRM_DEV_ERROR(smp->dev->dev, "out of blks (req=%d > avail=%d)\n",
nblks, avail);
return -ENOSPC;
}
@@ -188,7 +188,7 @@ int mdp5_smp_assign(struct mdp5_smp *smp, struct mdp5_smp_state *state,
DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
ret = smp_request_block(smp, state, cid, n);
if (ret) {
- dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
+ DRM_DEV_ERROR(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
n, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index a9768f823290..7b2a1e6a8810 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -40,7 +40,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
if (!phy_pdev || !msm_dsi->phy) {
- dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
@@ -210,7 +210,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
if (ret) {
- dev_err(dev->dev, "failed to modeset init host: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to modeset init host: %d\n", ret);
goto fail;
}
@@ -222,7 +222,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
if (IS_ERR(msm_dsi->bridge)) {
ret = PTR_ERR(msm_dsi->bridge);
- dev_err(dev->dev, "failed to create dsi bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create dsi bridge: %d\n", ret);
msm_dsi->bridge = NULL;
goto fail;
}
@@ -244,7 +244,7 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
if (IS_ERR(msm_dsi->connector)) {
ret = PTR_ERR(msm_dsi->connector);
- dev_err(dev->dev,
+ DRM_DEV_ERROR(dev->dev,
"failed to create dsi connector: %d\n", ret);
msm_dsi->connector = NULL;
goto fail;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 9c6c523eacdc..38e481d2d606 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1050,7 +1050,7 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
msecs_to_jiffies(70));
if (ret <= 0)
- dev_err(dev, "wait for video done timed out\n");
+ DRM_DEV_ERROR(dev, "wait for video done timed out\n");
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_VIDEO_DONE, 0);
}
@@ -1083,6 +1083,8 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
return PTR_ERR(data);
}
+ msm_gem_object_set_name(msm_host->tx_gem_obj, "tx_gem");
+
msm_host->tx_size = msm_host->tx_gem_obj->size;
return 0;
@@ -1118,7 +1120,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
- msm_gem_put_iova(msm_host->tx_gem_obj, priv->kms->aspace);
+ msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
@@ -1248,7 +1250,7 @@ int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
if (!dma_base)
return -EINVAL;
- return msm_gem_get_iova(msm_host->tx_gem_obj,
+ return msm_gem_get_and_pin_iova(msm_host->tx_gem_obj,
priv->kms->aspace, dma_base);
}
@@ -1673,7 +1675,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
prop = of_find_property(ep, "data-lanes", &len);
if (!prop) {
- dev_dbg(dev,
+ DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
return 0;
}
@@ -1681,7 +1683,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
num_lanes = len / sizeof(u32);
if (num_lanes < 1 || num_lanes > 4) {
- dev_err(dev, "bad number of data lanes\n");
+ DRM_DEV_ERROR(dev, "bad number of data lanes\n");
return -EINVAL;
}
@@ -1690,7 +1692,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
num_lanes);
if (ret) {
- dev_err(dev, "failed to read lane data\n");
+ DRM_DEV_ERROR(dev, "failed to read lane data\n");
return ret;
}
@@ -1711,7 +1713,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
*/
for (j = 0; j < num_lanes; j++) {
if (lane_map[j] < 0 || lane_map[j] > 3)
- dev_err(dev, "bad physical lane entry %u\n",
+ DRM_DEV_ERROR(dev, "bad physical lane entry %u\n",
lane_map[j]);
if (swap[lane_map[j]] != j)
@@ -1742,13 +1744,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
*/
endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
if (!endpoint) {
- dev_dbg(dev, "%s: no endpoint\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no endpoint\n", __func__);
return 0;
}
ret = dsi_host_parse_lane_data(msm_host, endpoint);
if (ret) {
- dev_err(dev, "%s: invalid lane configuration %d\n",
+ DRM_DEV_ERROR(dev, "%s: invalid lane configuration %d\n",
__func__, ret);
ret = -EINVAL;
goto err;
@@ -1757,7 +1759,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
/* Get panel node from the output port's endpoint data */
device_node = of_graph_get_remote_node(np, 1, 0);
if (!device_node) {
- dev_dbg(dev, "%s: no valid device\n", __func__);
+ DRM_DEV_DEBUG(dev, "%s: no valid device\n", __func__);
ret = -ENODEV;
goto err;
}
@@ -1768,7 +1770,7 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
msm_host->sfpb = syscon_regmap_lookup_by_phandle(np,
"syscon-sfpb");
if (IS_ERR(msm_host->sfpb)) {
- dev_err(dev, "%s: failed to get sfpb regmap\n",
+ DRM_DEV_ERROR(dev, "%s: failed to get sfpb regmap\n",
__func__);
ret = PTR_ERR(msm_host->sfpb);
}
@@ -1918,7 +1920,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (msm_host->irq < 0) {
ret = msm_host->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
return ret;
}
@@ -1926,7 +1928,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"dsi_isr", msm_host);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
msm_host->irq, ret);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9a9fa0c75a13..1760483b247e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -404,7 +404,7 @@ static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
ret = devm_regulator_bulk_get(dev, num, s);
if (ret < 0) {
- dev_err(dev, "%s: failed to init regulator, ret=%d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to init regulator, ret=%d\n",
__func__, ret);
return ret;
}
@@ -441,7 +441,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_set_load(s[i].consumer,
regs[i].enable_load);
if (ret < 0) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"regulator %d set op mode failed, %d\n",
i, ret);
goto fail;
@@ -451,7 +451,7 @@ static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
ret = regulator_bulk_enable(num, s);
if (ret < 0) {
- dev_err(dev, "regulator enable failed, %d\n", ret);
+ DRM_DEV_ERROR(dev, "regulator enable failed, %d\n", ret);
goto fail;
}
@@ -472,7 +472,7 @@ static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
ret = clk_prepare_enable(phy->ahb_clk);
if (ret) {
- dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
pm_runtime_put_sync(dev);
}
@@ -543,7 +543,7 @@ int msm_dsi_phy_init_common(struct msm_dsi_phy *phy)
phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
"DSI_PHY_REG");
if (IS_ERR(phy->reg_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy regulator base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy regulator base\n",
__func__);
ret = -ENOMEM;
goto fail;
@@ -574,7 +574,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->id = dsi_phy_get_id(phy);
if (phy->id < 0) {
ret = phy->id;
- dev_err(dev, "%s: couldn't identify PHY index, %d\n",
+ DRM_DEV_ERROR(dev, "%s: couldn't identify PHY index, %d\n",
__func__, ret);
goto fail;
}
@@ -584,20 +584,20 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR(phy->base)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
ret = -ENOMEM;
goto fail;
}
ret = dsi_phy_regulator_init(phy);
if (ret) {
- dev_err(dev, "%s: failed to init regulator\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init regulator\n", __func__);
goto fail;
}
phy->ahb_clk = msm_clk_get(pdev, "iface");
if (IS_ERR(phy->ahb_clk)) {
- dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: Unable to get ahb clk\n", __func__);
ret = PTR_ERR(phy->ahb_clk);
goto fail;
}
@@ -617,7 +617,7 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
if (IS_ERR_OR_NULL(phy->pll))
- dev_info(dev,
+ DRM_DEV_INFO(dev,
"%s: pll init failed: %ld, need separate pll clk driver\n",
__func__, PTR_ERR(phy->pll));
@@ -675,21 +675,21 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = dsi_phy_enable_resource(phy);
if (ret) {
- dev_err(dev, "%s: resource enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
__func__, ret);
goto res_en_fail;
}
ret = dsi_phy_regulator_enable(phy);
if (ret) {
- dev_err(dev, "%s: regulator enable failed, %d\n",
+ DRM_DEV_ERROR(dev, "%s: regulator enable failed, %d\n",
__func__, ret);
goto reg_en_fail;
}
ret = phy->cfg->ops.enable(phy, src_pll_id, clk_req);
if (ret) {
- dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
+ DRM_DEV_ERROR(dev, "%s: phy enable failed, %d\n", __func__, ret);
goto phy_en_fail;
}
@@ -702,7 +702,7 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
if (phy->usecase != MSM_DSI_PHY_SLAVE) {
ret = msm_dsi_pll_restore_state(phy->pll);
if (ret) {
- dev_err(dev, "%s: failed to restore pll state, %d\n",
+ DRM_DEV_ERROR(dev, "%s: failed to restore pll state, %d\n",
__func__, ret);
goto pll_restor_fail;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index b3fffc8dbb2a..44959e79ce28 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -93,7 +93,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -172,7 +172,7 @@ static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -196,7 +196,7 @@ static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 513f4234adc1..a172c667e8bc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -64,7 +64,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
void __iomem *lane_base = phy->lane_base;
if (msm_dsi_dphy_timing_calc_v2(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
@@ -115,7 +115,7 @@ static int dsi_14nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
if (ret) {
- dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret);
return ret;
}
@@ -142,7 +142,7 @@ static int dsi_14nm_phy_init(struct msm_dsi_phy *phy)
phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
"DSI_PHY_LANE");
if (IS_ERR(phy->lane_base)) {
- dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
__func__);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index 1ca6c69516f5..9ea9478d3707 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -82,7 +82,7 @@ static int dsi_20nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 4972b52cbe44..c79505d97fe8 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -76,7 +76,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 398004463498..98790b44da48 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -132,7 +132,7 @@ static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
DBG("");
if (msm_dsi_dphy_timing_calc(timing, clk_req)) {
- dev_err(&phy->pdev->dev,
+ DRM_DEV_ERROR(&phy->pdev->dev,
"%s: D-PHY timing calculation failed\n", __func__);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 613e206fa4fc..7a1fb4da2ad3 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -175,7 +175,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
}
if (IS_ERR(pll)) {
- dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to init DSI PLL\n", __func__);
return pll;
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 4c03f0b7343e..aabab6311043 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
* | |
* | |
* +---------+ | +----------+ | +----+
- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
- * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
@@ -39,6 +39,8 @@
#define DSI_PIXEL_PLL_CLK 1
#define NUM_PROVIDED_CLKS 2
+#define VCO_REF_CLK_RATE 19200000
+
struct dsi_pll_regs {
u32 pll_prop_gain_rate;
u32 pll_lockdet_rate;
@@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
parent_rate);
pll_10nm->vco_current_rate = rate;
- pll_10nm->vco_ref_clk_rate = parent_rate;
+ pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
dsi_pll_setup_config(pll_10nm);
@@ -688,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -737,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
@@ -760,7 +762,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_10nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -788,13 +790,13 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -813,7 +815,7 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
ret = pll_10nm_register(pll_10nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
index 71fe60e5f01f..0e18cddd6f22 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_14nm.c
@@ -783,7 +783,7 @@ static int dsi_pll_14nm_enable_seq(struct msm_dsi_pll *pll)
POLL_TIMEOUT_US);
if (unlikely(!locked))
- dev_err(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -829,7 +829,7 @@ static int dsi_pll_14nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_14nm_vco_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_14nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_14nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -1039,7 +1039,7 @@ static int pll_14nm_register(struct dsi_pll_14nm *pll_14nm)
ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
pll_14nm->hw_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -1067,13 +1067,13 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
pll_14nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
if (IS_ERR_OR_NULL(pll_14nm->phy_cmn_mmio)) {
- dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
return ERR_PTR(-ENOMEM);
}
pll_14nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_14nm->mmio)) {
- dev_err(&pdev->dev, "failed to map PLL base\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
return ERR_PTR(-ENOMEM);
}
@@ -1096,7 +1096,7 @@ struct msm_dsi_pll *msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
ret = pll_14nm_register(pll_14nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 26e3a01a99c2..dcbbaeb1b1fb 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -156,7 +156,7 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (rate <= lpfr_lut[i].vco_rate)
break;
if (i == LPFR_LUT_SIZE) {
- dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
+ DRM_DEV_ERROR(dev, "unable to get loop filter resistance. vco=%lu\n",
rate);
return -EINVAL;
}
@@ -386,7 +386,7 @@ static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
}
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL Lock success");
@@ -429,7 +429,7 @@ static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -468,7 +468,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -581,7 +581,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -607,7 +607,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -633,13 +633,13 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
pll->en_seq_cnt = 1;
pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
} else {
- dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
+ DRM_DEV_ERROR(&pdev->dev, "phy type (%d) is not 28nm\n", type);
return ERR_PTR(-EINVAL);
}
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 49008451085b..d6897464755f 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -327,7 +327,7 @@ static int dsi_pll_28nm_enable_seq(struct msm_dsi_pll *pll)
locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
if (unlikely(!locked))
- dev_err(dev, "DSI PLL lock failed\n");
+ DRM_DEV_ERROR(dev, "DSI PLL lock failed\n");
else
DBG("DSI PLL lock success");
@@ -368,7 +368,7 @@ static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
cached_state->vco_rate, 0);
if (ret) {
- dev_err(&pll_28nm->pdev->dev,
+ DRM_DEV_ERROR(&pll_28nm->pdev->dev,
"restore vco rate failed. ret=%d\n", ret);
return ret;
}
@@ -482,7 +482,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
ret = of_clk_add_provider(dev->of_node,
of_clk_src_onecell_get, &pll_28nm->clk_data);
if (ret) {
- dev_err(dev, "failed to register clk provider: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
return ret;
}
@@ -508,7 +508,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
- dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
+ DRM_DEV_ERROR(&pdev->dev, "%s: failed to map pll base\n", __func__);
return ERR_PTR(-ENOMEM);
}
@@ -526,7 +526,7 @@ struct msm_dsi_pll *msm_dsi_pll_28nm_8960_init(struct platform_device *pdev,
ret = pll_28nm_register(pll_28nm);
if (ret) {
- dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84b2821..6a63aba98a30 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -157,7 +157,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->bridge = msm_edp_bridge_init(edp);
if (IS_ERR(edp->bridge)) {
ret = PTR_ERR(edp->bridge);
- dev_err(dev->dev, "failed to create eDP bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
edp->bridge = NULL;
goto fail;
}
@@ -165,7 +165,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->connector = msm_edp_connector_init(edp);
if (IS_ERR(edp->connector)) {
ret = PTR_ERR(edp->connector);
- dev_err(dev->dev, "failed to create eDP connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
edp->connector = NULL;
goto fail;
}
@@ -173,7 +173,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (edp->irq < 0) {
ret = edp->irq;
- dev_err(dev->dev, "failed to get IRQ: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
goto fail;
}
@@ -181,7 +181,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"edp_isr", edp);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
edp->irq, ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index c79659ca5706..e247d6942a49 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -98,7 +98,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "cannot find phy device\n");
+ DRM_DEV_ERROR(&pdev->dev, "cannot find phy device\n");
return -ENXIO;
}
@@ -109,7 +109,7 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
if (!phy_pdev || !hdmi->phy) {
- dev_err(&pdev->dev, "phy driver is not ready\n");
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
@@ -153,7 +153,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->qfprom_mmio = msm_ioremap(pdev,
config->qfprom_mmio_name, "HDMI_QFPROM");
if (IS_ERR(hdmi->qfprom_mmio)) {
- dev_info(&pdev->dev, "can't find qfprom resource\n");
+ DRM_DEV_INFO(&pdev->dev, "can't find qfprom resource\n");
hdmi->qfprom_mmio = NULL;
}
@@ -172,7 +172,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
config->hpd_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -195,7 +195,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
config->pwr_reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
goto fail;
}
@@ -217,7 +217,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
goto fail;
}
@@ -239,7 +239,7 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
goto fail;
}
@@ -254,14 +254,14 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
ret = PTR_ERR(hdmi->i2c);
- dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get i2c: %d\n", ret);
hdmi->i2c = NULL;
goto fail;
}
ret = msm_hdmi_get_phy(hdmi);
if (ret) {
- dev_err(&pdev->dev, "failed to get phy\n");
+ DRM_DEV_ERROR(&pdev->dev, "failed to get phy\n");
goto fail;
}
@@ -303,7 +303,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->bridge = msm_hdmi_bridge_init(hdmi);
if (IS_ERR(hdmi->bridge)) {
ret = PTR_ERR(hdmi->bridge);
- dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI bridge: %d\n", ret);
hdmi->bridge = NULL;
goto fail;
}
@@ -311,7 +311,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->connector = msm_hdmi_connector_init(hdmi);
if (IS_ERR(hdmi->connector)) {
ret = PTR_ERR(hdmi->connector);
- dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to create HDMI connector: %d\n", ret);
hdmi->connector = NULL;
goto fail;
}
@@ -319,7 +319,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
if (hdmi->irq < 0) {
ret = hdmi->irq;
- dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
goto fail;
}
@@ -327,11 +327,17 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
msm_hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"hdmi_isr", hdmi);
if (ret < 0) {
- dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
hdmi->irq, ret);
goto fail;
}
+ ret = msm_hdmi_hpd_enable(hdmi->connector);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
encoder->bridge = hdmi->bridge;
priv->bridges[priv->num_bridges++] = hdmi->bridge;
@@ -476,7 +482,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
unsigned int level_shift = 0; /* 0dB */
bool down_mix = false;
- dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
+ DRM_DEV_DEBUG(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
params->sample_width, params->cea.channels);
switch (params->cea.channels) {
@@ -527,7 +533,7 @@ static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
rate = HDMI_SAMPLE_RATE_192KHZ;
break;
default:
- dev_err(dev, "rate[%d] not supported!\n",
+ DRM_DEV_ERROR(dev, "rate[%d] not supported!\n",
params->sample_rate);
return -EINVAL;
}
@@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- static struct hdmi_platform_config *hdmi_cfg;
+ struct hdmi_platform_config *hdmi_cfg;
struct hdmi *hdmi;
struct device_node *of_node = dev->of_node;
int i, err;
@@ -579,7 +585,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg = (struct hdmi_platform_config *)
of_device_get_match_data(dev);
if (!hdmi_cfg) {
- dev_err(dev, "unknown hdmi_cfg: %s\n", of_node->name);
+ DRM_DEV_ERROR(dev, "unknown hdmi_cfg: %pOFn\n", of_node);
return -ENXIO;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index accc9a61611d..5c5df6ab2a57 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
void msm_hdmi_connector_irq(struct drm_connector *connector);
struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
+int msm_hdmi_hpd_enable(struct drm_connector *connector);
/*
* i2c adapter for ddc:
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 7e357077ed26..98d61c690260 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -40,7 +40,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_enable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -49,7 +49,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
DBG("pixclock: %lu", hdmi->pixclock);
ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
if (ret) {
- dev_err(dev->dev, "failed to set pixel clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to set pixel clk: %s (%d)\n",
config->pwr_clk_names[0], ret);
}
}
@@ -57,7 +57,7 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_clk_cnt; i++) {
ret = clk_prepare_enable(hdmi->pwr_clks[i]);
if (ret) {
- dev_err(dev->dev, "failed to enable pwr clk: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr clk: %s (%d)\n",
config->pwr_clk_names[i], ret);
}
}
@@ -82,7 +82,7 @@ static void power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_reg_cnt; i++) {
ret = regulator_disable(hdmi->pwr_regs[i]);
if (ret) {
- dev_err(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
config->pwr_reg_names[i], ret);
}
}
@@ -105,7 +105,7 @@ static void msm_hdmi_config_avi_infoframe(struct hdmi *hdmi)
len = hdmi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (len < 0) {
- dev_err(&hdmi->pdev->dev,
+ DRM_DEV_ERROR(&hdmi->pdev->dev,
"failed to configure avi infoframe\n");
return;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e9c9a0af508e..a6eeab2c4dc3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -90,7 +90,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
if (gpio.num != -1) {
ret = gpio_request(gpio.num, gpio.label);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"'%s'(%d) gpio_request failed: %d\n",
gpio.label, gpio.num, ret);
goto err;
@@ -156,7 +156,7 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
ret = clk_prepare_enable(hdmi->hpd_clks[i]);
if (ret) {
- dev_err(dev,
+ DRM_DEV_ERROR(dev,
"failed to enable hpd clk: %s (%d)\n",
config->hpd_clk_names[i], ret);
}
@@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
}
}
-static int hpd_enable(struct hdmi_connector *hdmi_connector)
+int msm_hdmi_hpd_enable(struct drm_connector *connector)
{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
@@ -179,7 +180,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
for (i = 0; i < config->hpd_reg_cnt; i++) {
ret = regulator_enable(hdmi->hpd_regs[i]);
if (ret) {
- dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
config->hpd_reg_names[i], ret);
goto fail;
}
@@ -187,13 +188,13 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
ret = pinctrl_pm_select_default_state(dev);
if (ret) {
- dev_err(dev, "pinctrl state chg failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "pinctrl state chg failed: %d\n", ret);
goto fail;
}
ret = gpio_config(hdmi, true);
if (ret) {
- dev_err(dev, "failed to configure GPIOs: %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to configure GPIOs: %d\n", ret);
goto fail;
}
@@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
{
struct drm_connector *connector = NULL;
struct hdmi_connector *hdmi_connector;
- int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
if (!hdmi_connector)
@@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = hpd_enable(hdmi_connector);
- if (ret) {
- dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- return ERR_PTR(ret);
- }
-
drm_connector_attach_encoder(connector, hdmi->encoder);
return connector;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index 73e20219d431..25d2fe2c60e8 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -66,7 +66,7 @@ static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
if (!retry) {
- dev_err(dev->dev, "timeout waiting for DDC\n");
+ DRM_DEV_ERROR(dev->dev, "timeout waiting for DDC\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 4157722d6b4d..1f4331ed69bd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -37,7 +37,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
reg = devm_regulator_get(dev, cfg->reg_names[i]);
if (IS_ERR(reg)) {
ret = PTR_ERR(reg);
- dev_err(dev, "failed to get phy regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy regulator: %s (%d)\n",
cfg->reg_names[i], ret);
return ret;
}
@@ -51,7 +51,7 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
clk = msm_clk_get(phy->pdev, cfg->clk_names[i]);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- dev_err(dev, "failed to get phy clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to get phy clock: %s (%d)\n",
cfg->clk_names[i], ret);
return ret;
}
@@ -73,14 +73,14 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
for (i = 0; i < cfg->num_regs; i++) {
ret = regulator_enable(phy->regs[i]);
if (ret)
- dev_err(dev, "failed to enable regulator: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
cfg->reg_names[i], ret);
}
for (i = 0; i < cfg->num_clks; i++) {
ret = clk_prepare_enable(phy->clks[i]);
if (ret)
- dev_err(dev, "failed to enable clock: %s (%d)\n",
+ DRM_DEV_ERROR(dev, "failed to enable clock: %s (%d)\n",
cfg->clk_names[i], ret);
}
@@ -159,7 +159,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
phy->mmio = msm_ioremap(pdev, "hdmi_phy", "HDMI_PHY");
if (IS_ERR(phy->mmio)) {
- dev_err(dev, "%s: failed to map phy base\n", __func__);
+ DRM_DEV_ERROR(dev, "%s: failed to map phy base\n", __func__);
return -ENOMEM;
}
@@ -177,7 +177,7 @@ static int msm_hdmi_phy_probe(struct platform_device *pdev)
ret = msm_hdmi_phy_pll_init(pdev, phy->cfg->type);
if (ret) {
- dev_err(dev, "couldn't init PLL\n");
+ DRM_DEV_ERROR(dev, "couldn't init PLL\n");
msm_hdmi_phy_resource_disable(phy);
return ret;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 0df504c61833..318708f26731 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -725,7 +725,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio_qserdes_com)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -737,7 +737,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name, label);
if (IS_ERR(pll->mmio_qserdes_tx[i])) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
}
@@ -745,7 +745,7 @@ int msm_hdmi_pll_8996_init(struct platform_device *pdev)
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 99590758c68b..c6dae6e437f9 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -445,7 +445,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
pll->mmio = msm_ioremap(pdev, "hdmi_pll", "HDMI_PLL");
if (IS_ERR(pll->mmio)) {
- dev_err(dev, "failed to map pll base\n");
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
return -ENOMEM;
}
@@ -454,7 +454,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
clk = devm_clk_register(dev, &pll->clk_hw);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to register pll clock\n");
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 4bcdeca7479d..f5b1256e32b6 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (!new_crtc_state->active)
continue;
+ if (drm_crtc_vblank_get(crtc))
+ continue;
+
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+ drm_crtc_vblank_put(crtc);
}
}
@@ -78,7 +83,8 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
kms->funcs->commit(kms, state);
}
- msm_atomic_wait_for_commit_done(dev, state);
+ if (!state->legacy_cursor_update)
+ msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index f0da0d3c8a80..fb423d309e91 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
@@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
- kfree(show_priv);
- return ret;
+ goto free_priv;
}
show_priv->dev = dev;
- return single_open(file, msm_gpu_show, show_priv);
+ ret = single_open(file, msm_gpu_show, show_priv);
+ if (ret)
+ goto free_priv;
+
+ return 0;
+
+free_priv:
+ kfree(show_priv);
+ return ret;
}
static const struct file_operations msm_gpu_fops = {
@@ -194,13 +201,13 @@ static int late_init_minor(struct drm_minor *minor)
ret = msm_rd_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install rd debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install rd debugfs\n");
return ret;
}
ret = msm_perf_debugfs_init(minor);
if (ret) {
- dev_err(minor->dev->dev, "could not install perf debugfs\n");
+ DRM_DEV_ERROR(minor->dev->dev, "could not install perf debugfs\n");
return ret;
}
@@ -228,14 +235,14 @@ int msm_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
if (ret) {
- dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
return ret;
}
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms->funcs->debugfs_init) {
+ if (priv->kms && priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4904d0d41409..d2cdc7b553fe 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,8 +23,10 @@
#include "msm_drv.h"
#include "msm_debugfs.h"
#include "msm_fence.h"
+#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_kms.h"
+#include "adreno/adreno_gpu.h"
/*
@@ -35,9 +37,11 @@
* - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
* SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
* MSM_GEM_INFO ioctl.
+ * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
+ * GEM object's debug name
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 3
+#define MSM_VERSION_MINOR 4
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -170,7 +174,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to get memory resource: %s\n", name);
return ERR_PTR(-EINVAL);
}
@@ -178,7 +182,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
if (!ptr) {
- dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ DRM_DEV_ERROR(&pdev->dev, "failed to ioremap: %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -312,6 +316,7 @@ static int msm_drm_uninit(struct device *dev)
if (fbdev && priv->fbdev)
msm_fbdev_free(ddev);
#endif
+ drm_atomic_helper_shutdown(ddev);
drm_mode_config_cleanup(ddev);
pm_runtime_get_sync(dev);
@@ -357,6 +362,14 @@ static int get_mdp_ver(struct platform_device *pdev)
#include <linux/of_address.h>
+bool msm_use_mmu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* a2xx comes with its own MMU */
+ return priv->is_a2xx || iommu_present(&platform_bus_type);
+}
+
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -395,7 +408,7 @@ static int msm_init_vram(struct drm_device *dev)
* Grab the entire CMA chunk carved out in early startup in
* mach-msm:
*/
- } else if (!iommu_present(&platform_bus_type)) {
+ } else if (!msm_use_mmu(dev)) {
DRM_INFO("using %s VRAM carveout\n", vram);
size = memparse(vram, NULL);
}
@@ -418,12 +431,12 @@ static int msm_init_vram(struct drm_device *dev)
p = dma_alloc_attrs(dev->dev, size,
&priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
- dev_err(dev->dev, "failed to allocate VRAM\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
return -ENOMEM;
}
- dev_info(dev->dev, "VRAM: %08x->%08x\n",
+ DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n",
(uint32_t)priv->vram.paddr,
(uint32_t)(priv->vram.paddr + size));
}
@@ -443,7 +456,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
- dev_err(dev, "failed to allocate drm_device\n");
+ DRM_DEV_ERROR(dev, "failed to allocate drm_device\n");
return PTR_ERR(ddev);
}
@@ -507,19 +520,16 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->kms = kms;
break;
default:
- kms = ERR_PTR(-ENODEV);
+ /* valid only for the dummy headless case, where of_node=NULL */
+ WARN_ON(dev->of_node);
+ kms = NULL;
break;
}
if (IS_ERR(kms)) {
- /*
- * NOTE: once we have GPU support, having no kms should not
- * be considered fatal.. ideally we would still support gpu
- * and (for example) use dmabuf/prime to share buffers with
- * imx drm driver on iMX5
- */
- dev_err(dev, "failed to load kms\n");
+ DRM_DEV_ERROR(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
+ priv->kms = NULL;
goto err_msm_uninit;
}
@@ -529,7 +539,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
- dev_err(dev, "kms hw init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev, "kms hw init failed: %d\n", ret);
goto err_msm_uninit;
}
}
@@ -553,17 +563,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->disp_thread[i].worker,
"crtc_commit:%d", priv->disp_thread[i].crtc_id);
- ret = sched_setscheduler(priv->disp_thread[i].thread,
- SCHED_FIFO, &param);
- if (ret)
- pr_warn("display thread priority update failed: %d\n",
- ret);
-
if (IS_ERR(priv->disp_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_commit kthread\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n");
priv->disp_thread[i].thread = NULL;
+ goto err_msm_uninit;
}
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ dev_warn(dev, "disp_thread set priority failed: %d\n",
+ ret);
+
/* initialize event thread */
priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
kthread_init_worker(&priv->event_thread[i].worker);
@@ -572,6 +583,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
kthread_run(kthread_worker_fn,
&priv->event_thread[i].worker,
"crtc_event:%d", priv->event_thread[i].crtc_id);
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ goto err_msm_uninit;
+ }
+
/**
* event thread should also run at same priority as disp_thread
* because it is handling frame_done events. A lower priority
@@ -580,39 +597,15 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
* failure at crtc commit level.
*/
ret = sched_setscheduler(priv->event_thread[i].thread,
- SCHED_FIFO, &param);
+ SCHED_FIFO, &param);
if (ret)
- pr_warn("display event thread priority update failed: %d\n",
- ret);
-
- if (IS_ERR(priv->event_thread[i].thread)) {
- dev_err(dev, "failed to create crtc_event kthread\n");
- priv->event_thread[i].thread = NULL;
- }
-
- if ((!priv->disp_thread[i].thread) ||
- !priv->event_thread[i].thread) {
- /* clean up previously created threads if any */
- for ( ; i >= 0; i--) {
- if (priv->disp_thread[i].thread) {
- kthread_stop(
- priv->disp_thread[i].thread);
- priv->disp_thread[i].thread = NULL;
- }
-
- if (priv->event_thread[i].thread) {
- kthread_stop(
- priv->event_thread[i].thread);
- priv->event_thread[i].thread = NULL;
- }
- }
- goto err_msm_uninit;
- }
+ dev_warn(dev, "event_thread set priority failed:%d\n",
+ ret);
}
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
- dev_err(dev, "failed to initialize vblank\n");
+ DRM_DEV_ERROR(dev, "failed to initialize vblank\n");
goto err_msm_uninit;
}
@@ -621,7 +614,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = drm_irq_install(ddev, kms->irq);
pm_runtime_put_sync(dev);
if (ret < 0) {
- dev_err(dev, "failed to install IRQ handler\n");
+ DRM_DEV_ERROR(dev, "failed to install IRQ handler\n");
goto err_msm_uninit;
}
}
@@ -633,7 +626,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (fbdev)
+ if (kms && fbdev)
priv->fbdev = msm_fbdev_init(ddev);
#endif
@@ -741,7 +734,11 @@ static int msm_irq_postinstall(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
BUG_ON(!kms);
- return kms->funcs->irq_postinstall(kms);
+
+ if (kms->funcs->irq_postinstall)
+ return kms->funcs->irq_postinstall(kms);
+
+ return 0;
}
static void msm_irq_uninstall(struct drm_device *dev)
@@ -808,7 +805,7 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
}
return msm_gem_new_handle(dev, file, args->size,
- args->flags, &args->handle);
+ args->flags, &args->handle, NULL);
}
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
@@ -866,6 +863,10 @@ static int msm_ioctl_gem_info_iova(struct drm_device *dev,
if (!priv->gpu)
return -EINVAL;
+ /*
+ * Don't pin the memory here - just get an address so that userspace can
+ * be productive
+ */
return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
}
@@ -874,23 +875,66 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
- int ret = 0;
+ struct msm_gem_object *msm_obj;
+ int i, ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
- if (args->flags & ~MSM_INFO_FLAGS)
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ case MSM_INFO_GET_IOVA:
+ /* value returned as immediate, not pointer, so len==0: */
+ if (args->len)
+ return -EINVAL;
+ break;
+ case MSM_INFO_SET_NAME:
+ case MSM_INFO_GET_NAME:
+ break;
+ default:
return -EINVAL;
+ }
obj = drm_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- if (args->flags & MSM_INFO_IOVA) {
- uint64_t iova;
+ msm_obj = to_msm_bo(obj);
- ret = msm_ioctl_gem_info_iova(dev, obj, &iova);
- if (!ret)
- args->offset = iova;
- } else {
- args->offset = msm_gem_mmap_offset(obj);
+ switch (args->info) {
+ case MSM_INFO_GET_OFFSET:
+ args->value = msm_gem_mmap_offset(obj);
+ break;
+ case MSM_INFO_GET_IOVA:
+ ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+ break;
+ case MSM_INFO_SET_NAME:
+ /* length check should leave room for terminating null: */
+ if (args->len >= sizeof(msm_obj->name)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = copy_from_user(msm_obj->name,
+ u64_to_user_ptr(args->value), args->len);
+ msm_obj->name[args->len] = '\0';
+ for (i = 0; i < args->len; i++) {
+ if (!isprint(msm_obj->name[i])) {
+ msm_obj->name[i] = '\0';
+ break;
+ }
+ }
+ break;
+ case MSM_INFO_GET_NAME:
+ if (args->value && (args->len < strlen(msm_obj->name))) {
+ ret = -EINVAL;
+ break;
+ }
+ args->len = strlen(msm_obj->name);
+ if (args->value) {
+ ret = copy_to_user(u64_to_user_ptr(args->value),
+ msm_obj->name, args->len);
+ }
+ break;
}
drm_gem_object_put_unlocked(obj);
@@ -1069,18 +1113,15 @@ static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_suspend)
- return kms->funcs->pm_suspend(dev);
-
- drm_kms_helper_poll_disable(ddev);
+ if (WARN_ON(priv->pm_state))
+ drm_atomic_state_put(priv->pm_state);
priv->pm_state = drm_atomic_helper_suspend(ddev);
if (IS_ERR(priv->pm_state)) {
- drm_kms_helper_poll_enable(ddev);
- return PTR_ERR(priv->pm_state);
+ int ret = PTR_ERR(priv->pm_state);
+ DRM_ERROR("Failed to suspend dpu, %d\n", ret);
+ return ret;
}
return 0;
@@ -1090,16 +1131,16 @@ static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
- struct msm_kms *kms = priv->kms;
+ int ret;
- /* TODO: Use atomic helper suspend/resume */
- if (kms && kms->funcs && kms->funcs->pm_resume)
- return kms->funcs->pm_resume(dev);
+ if (WARN_ON(!priv->pm_state))
+ return -ENOENT;
- drm_atomic_helper_resume(ddev, priv->pm_state);
- drm_kms_helper_poll_enable(ddev);
+ ret = drm_atomic_helper_resume(ddev, priv->pm_state);
+ if (!ret)
+ priv->pm_state = NULL;
- return 0;
+ return ret;
}
#endif
@@ -1184,7 +1225,7 @@ static int add_components_mdp(struct device *mdp_dev,
ret = of_graph_parse_endpoint(ep_node, &ep);
if (ret) {
- dev_err(mdp_dev, "unable to parse port endpoint\n");
+ DRM_DEV_ERROR(mdp_dev, "unable to parse port endpoint\n");
of_node_put(ep_node);
return ret;
}
@@ -1206,8 +1247,10 @@ static int add_components_mdp(struct device *mdp_dev,
if (!intf)
continue;
- drm_of_component_match_add(master_dev, matchptr, compare_of,
- intf);
+ if (of_device_is_available(intf))
+ drm_of_component_match_add(master_dev, matchptr,
+ compare_of, intf);
+
of_node_put(intf);
}
@@ -1235,13 +1278,13 @@ static int add_display_components(struct device *dev,
of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
- dev_err(dev, "failed to populate children devices\n");
+ DRM_DEV_ERROR(dev, "failed to populate children devices\n");
return ret;
}
mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
if (!mdp_dev) {
- dev_err(dev, "failed to find MDSS MDP node\n");
+ DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
return -ENODEV;
}
@@ -1271,6 +1314,7 @@ static int add_display_components(struct device *dev,
static const struct of_device_id msm_gpu_match[] = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-3xx" },
+ { .compatible = "amd,imageon" },
{ .compatible = "qcom,kgsl-3d0" },
{ },
};
@@ -1315,9 +1359,11 @@ static int msm_pdev_probe(struct platform_device *pdev)
struct component_match *match = NULL;
int ret;
- ret = add_display_components(&pdev->dev, &match);
- if (ret)
- return ret;
+ if (get_mdp_ver(pdev)) {
+ ret = add_display_components(&pdev->dev, &match);
+ if (ret)
+ return ret;
+ }
ret = add_gpu_components(&pdev->dev, &match);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 9d11f321f5a9..9cd6a96c6bf2 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -179,6 +179,8 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
struct msm_file_private *lastctx;
+ /* gpu is only set on open(), but we need this info earlier */
+ bool is_a2xx;
struct drm_fb_helper *fbdev;
@@ -241,10 +243,16 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages);
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt);
+ struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
@@ -252,9 +260,15 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end);
+
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+bool msm_use_mmu(struct drm_device *dev);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -269,12 +283,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace);
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -301,7 +317,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
void msm_gem_free_object(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle);
+ uint32_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
@@ -312,9 +328,13 @@ void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
uint32_t flags, struct msm_gem_address_space *aspace,
struct drm_gem_object **bo, uint64_t *iova);
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
+
int msm_framebuffer_prepare(struct drm_framebuffer *fb,
struct msm_gem_address_space *aspace);
void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 2a7348aeb38d..67dfd8d3dc12 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -66,7 +66,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
+ ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
@@ -81,7 +81,7 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
int i, n = fb->format->num_planes;
for (i = 0; i < n; i++)
- msm_gem_put_iova(fb->obj[i], aspace);
+ msm_gem_unpin_iova(fb->obj[i], aspace);
}
uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
@@ -154,7 +154,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (!format) {
- dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
ret = -EINVAL;
goto fail;
@@ -196,7 +196,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
if (ret) {
- dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "framebuffer init failed: %d\n", ret);
goto fail;
}
@@ -233,13 +233,15 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
}
if (IS_ERR(bo)) {
- dev_err(dev->dev, "failed to allocate buffer object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate buffer object\n");
return ERR_CAST(bo);
}
+ msm_gem_object_set_name(bo, "stolenfb");
+
fb = msm_framebuffer_init(dev, &mode_cmd, &bo);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 456622b46335..c03e860ba737 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -91,7 +91,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
sizes->surface_height, pitch, format);
if (IS_ERR(fb)) {
- dev_err(dev->dev, "failed to allocate fb\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n");
return PTR_ERR(fb);
}
@@ -104,15 +104,15 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova(bo, priv->kms->aspace, &paddr);
+ ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
- dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- dev_err(dev->dev, "failed to allocate fb info\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
goto fail_unlock;
}
@@ -176,7 +176,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
if (ret) {
- dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f59ca27a4a35..51a95da694d8 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -88,7 +88,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
p = get_pages_vram(obj, npages);
if (IS_ERR(p)) {
- dev_err(dev->dev, "could not get pages: %ld\n",
+ DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
PTR_ERR(p));
return p;
}
@@ -99,7 +99,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
- dev_err(dev->dev, "failed to allocate sgt\n");
+ DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
@@ -280,7 +280,7 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
ret = drm_gem_create_mmap_offset(obj);
if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
+ DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
return 0;
}
@@ -352,63 +352,104 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&msm_obj->lock));
list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
- msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+ msm_gem_purge_vma(vma->aspace, vma);
+ msm_gem_close_vma(vma->aspace, vma);
del_vma(vma);
}
}
-/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj,
+static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
int ret = 0;
- mutex_lock(&msm_obj->lock);
-
- if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
- mutex_unlock(&msm_obj->lock);
- return -EBUSY;
- }
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
vma = lookup_vma(obj, aspace);
if (!vma) {
- struct page **pages;
-
vma = add_vma(obj, aspace);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto unlock;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
- pages = get_pages(obj);
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto fail;
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ if (ret) {
+ del_vma(vma);
+ return ret;
}
-
- ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
- obj->size >> PAGE_SHIFT);
- if (ret)
- goto fail;
}
*iova = vma->iova;
+ return 0;
+}
+
+static int msm_gem_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+ struct page **pages;
+
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
+
+ if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
+ return -EBUSY;
+
+ vma = lookup_vma(obj, aspace);
+ if (WARN_ON(!vma))
+ return -EINVAL;
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
+ obj->size >> PAGE_SHIFT);
+}
+
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ u64 local;
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+
+ ret = msm_gem_get_iova_locked(obj, aspace, &local);
+
+ if (!ret)
+ ret = msm_gem_pin_iova(obj, aspace);
+
+ if (!ret)
+ *iova = local;
mutex_unlock(&msm_obj->lock);
- return 0;
+ return ret;
+}
-fail:
- del_vma(vma);
-unlock:
+/*
+ * Get an iova but don't pin it. Doesn't need a put because iovas are currently
+ * valid for the life of the object
+ */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret;
+
+ mutex_lock(&msm_obj->lock);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&msm_obj->lock);
+
return ret;
}
/* get iova without taking a reference, used in places where you have
- * already done a 'msm_gem_get_iova()'.
+ * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
*/
uint64_t msm_gem_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
@@ -424,15 +465,24 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
return vma ? vma->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj,
+/*
+ * Unpin a iova by updating the reference counts. The memory isn't actually
+ * purged until something else (shrinker, mm_notifier, destroy, etc) decides
+ * to get rid of it
+ */
+void msm_gem_unpin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
- // XXX TODO ..
- // NOTE: probably don't need a _locked() version.. we wouldn't
- // normally unmap here, but instead just mark that it could be
- // unmapped (if the iova refcnt drops to zero), but then later
- // if another _get_iova_locked() fails we can start unmapping
- // things that are no longer needed..
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *vma;
+
+ mutex_lock(&msm_obj->lock);
+ vma = lookup_vma(obj, aspace);
+
+ if (!WARN_ON(!vma))
+ msm_gem_unmap_vma(aspace, vma);
+
+ mutex_unlock(&msm_obj->lock);
}
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -441,7 +491,7 @@ int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->pitch = align_pitch(args->width, args->bpp);
args->size = PAGE_ALIGN(args->pitch * args->height);
return msm_gem_new_handle(dev, file, args->size,
- MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -473,7 +523,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv > madv)) {
- dev_err(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+ DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
msm_obj->madv, madv);
mutex_unlock(&msm_obj->lock);
return ERR_PTR(-EBUSY);
@@ -739,16 +789,24 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, kref_read(&obj->refcount),
off, msm_obj->vaddr);
- /* FIXME: we need to print the address space here too */
- list_for_each_entry(vma, &msm_obj->vmas, list)
- seq_printf(m, " %08llx", vma->iova);
+ seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
+
+ if (!list_empty(&msm_obj->vmas)) {
+
+ seq_puts(m, " vmas:");
- seq_printf(m, " %zu%s\n", obj->size, madv);
+ list_for_each_entry(vma, &msm_obj->vmas, list)
+ seq_printf(m, " [%s: %08llx,%s,inuse=%d]", vma->aspace->name,
+ vma->iova, vma->mapped ? "mapped" : "unmapped",
+ vma->inuse);
+
+ seq_puts(m, "\n");
+ }
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
@@ -775,9 +833,10 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
int count = 0;
size_t size = 0;
+ seq_puts(m, " flags id ref offset kaddr size madv name\n");
list_for_each_entry(msm_obj, list, mm_list) {
struct drm_gem_object *obj = &msm_obj->base;
- seq_printf(m, " ");
+ seq_puts(m, " ");
msm_gem_describe(obj, m);
count++;
size += obj->size;
@@ -831,7 +890,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle)
+ uint32_t size, uint32_t flags, uint32_t *handle,
+ char *name)
{
struct drm_gem_object *obj;
int ret;
@@ -841,6 +901,9 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
if (IS_ERR(obj))
return PTR_ERR(obj);
+ if (name)
+ msm_gem_object_set_name(obj, "%s", name);
+
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
@@ -864,7 +927,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
case MSM_BO_WC:
break;
default:
- dev_err(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
@@ -912,9 +975,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
- if (!iommu_present(&platform_bus_type))
+ if (!msm_use_mmu(dev))
use_vram = true;
- else if ((flags & MSM_BO_STOLEN) && priv->vram.size)
+ else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
use_vram = true;
if (WARN_ON(use_vram && !priv->vram.size))
@@ -989,8 +1052,8 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
int ret, npages;
/* if we don't have IOMMU, don't bother pretending we can import: */
- if (!iommu_present(&platform_bus_type)) {
- dev_err(dev->dev, "cannot import without IOMMU\n");
+ if (!msm_use_mmu(dev)) {
+ DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
return ERR_PTR(-EINVAL);
}
@@ -1040,24 +1103,30 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return ERR_CAST(obj);
if (iova) {
- ret = msm_gem_get_iova(obj, aspace, iova);
- if (ret) {
- drm_gem_object_put(obj);
- return ERR_PTR(ret);
- }
+ ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
+ if (ret)
+ goto err;
}
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
- msm_gem_put_iova(obj, aspace);
- drm_gem_object_put(obj);
- return ERR_CAST(vaddr);
+ msm_gem_unpin_iova(obj, aspace);
+ ret = PTR_ERR(vaddr);
+ goto err;
}
if (bo)
*bo = obj;
return vaddr;
+err:
+ if (locked)
+ drm_gem_object_put(obj);
+ else
+ drm_gem_object_put_unlocked(obj);
+
+ return ERR_PTR(ret);
+
}
void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
@@ -1073,3 +1142,31 @@ void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
{
return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
}
+
+void msm_gem_kernel_put(struct drm_gem_object *bo,
+ struct msm_gem_address_space *aspace, bool locked)
+{
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ msm_gem_put_vaddr(bo);
+ msm_gem_unpin_iova(bo, aspace);
+
+ if (locked)
+ drm_gem_object_put(bo);
+ else
+ drm_gem_object_put_unlocked(bo);
+}
+
+void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(bo);
+ va_list ap;
+
+ if (!fmt)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);
+ va_end(ap);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5d9bd3e47a8..2064fac871b8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -41,6 +41,8 @@ struct msm_gem_vma {
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
+ bool mapped;
+ int inuse;
};
struct msm_gem_object {
@@ -91,6 +93,8 @@ struct msm_gem_object {
*/
struct drm_mm_node *vram_node;
struct mutex lock; /* Protects resources associated with bo */
+
+ char name[32]; /* Identifier to print for the debugfs files */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
@@ -150,6 +154,7 @@ struct msm_gem_submit {
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
+ u32 ident; /* A "identifier" for the submit for logging */
struct {
uint32_t type;
uint32_t size; /* in dwords */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 7a7923e6220d..a28465d90529 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -20,6 +20,7 @@
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_gpu_trace.h"
/*
* Cmdstream submission:
@@ -48,7 +49,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit->dev = dev;
submit->gpu = gpu;
submit->fence = NULL;
- submit->pid = get_pid(task_pid(current));
submit->cmd = (void *)&submit->bos[nr_bos];
submit->queue = queue;
submit->ring = gpu->rb[queue->prio];
@@ -114,8 +114,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
+/* at least one of READ and/or WRITE flags should be set: */
+#define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+
if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
- !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+ !(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -167,7 +170,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -241,7 +244,8 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->resv);
+ ret = reservation_object_reserve_shared(msm_obj->resv,
+ 1);
if (ret)
return ret;
}
@@ -269,7 +273,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
uint64_t iova;
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
if (ret)
@@ -317,6 +321,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
uint32_t *ptr;
int ret = 0;
+ if (!nr_relocs)
+ return 0;
+
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
return -EINVAL;
@@ -405,19 +412,19 @@ static void submit_cleanup(struct msm_gem_submit *submit)
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ static atomic_t ident = ATOMIC_INIT(0);
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
struct msm_gpu *gpu = priv->gpu;
- struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
int out_fence_fd = -1;
+ struct pid *pid = get_pid(task_pid(current));
unsigned i;
- int ret;
-
+ int ret, submitid;
if (!gpu)
return -ENXIO;
@@ -440,9 +447,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!queue)
return -ENOENT;
+ /* Get a unique identifier for the submission for logging purposes */
+ submitid = atomic_inc_return(&ident) - 1;
+
ring = gpu->rb[queue->prio];
+ trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
+ args->nr_bos, args->nr_cmds);
if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence)
@@ -452,11 +466,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
- if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, ring->fctx->context))
ret = dma_fence_wait(in_fence, true);
- if (ret)
- return ret;
- }
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
}
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -477,6 +493,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
+ submit->pid = pid;
+ submit->ident = submitid;
+
if (args->flags & MSM_SUBMIT_SUDO)
submit->in_rb = true;
@@ -582,8 +601,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
out:
- if (in_fence)
- dma_fence_put(in_fence);
submit_cleanup(submit);
if (ret)
msm_gem_submit_free(submit);
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index ffbec224551b..557360788084 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -38,20 +38,72 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
kref_put(&aspace->kref, msm_gem_address_space_destroy);
}
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt)
+/* Actually unmap memory for the vma */
+void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
{
- if (!aspace || !vma->iova)
+ unsigned size = vma->node.size << PAGE_SHIFT;
+
+ /* Print a message if we try to purge a vma in use */
+ if (WARN_ON(vma->inuse > 0))
+ return;
+
+ /* Don't do anything if the memory isn't mapped */
+ if (!vma->mapped)
return;
- if (aspace->mmu) {
- unsigned size = vma->node.size << PAGE_SHIFT;
- aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
- }
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
+
+ vma->mapped = false;
+}
+
+/* Remove reference counts for the mapping */
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (!WARN_ON(!vma->iova))
+ vma->inuse--;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ unsigned size = npages << PAGE_SHIFT;
+ int ret = 0;
+
+ if (WARN_ON(!vma->iova))
+ return -EINVAL;
+
+ /* Increase the usage counter */
+ vma->inuse++;
+
+ if (vma->mapped)
+ return 0;
+
+ vma->mapped = true;
+
+ if (aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+
+ if (ret)
+ vma->mapped = false;
+
+ return ret;
+}
+
+/* Close an iova. Warn if it is still in use */
+void msm_gem_close_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ if (WARN_ON(vma->inuse > 0 || vma->mapped))
+ return;
spin_lock(&aspace->lock);
- drm_mm_remove_node(&vma->node);
+ if (vma->iova)
+ drm_mm_remove_node(&vma->node);
spin_unlock(&aspace->lock);
vma->iova = 0;
@@ -59,18 +111,16 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
msm_gem_address_space_put(aspace);
}
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+/* Initialize a new vma and allocate an iova for it */
+int msm_gem_init_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, int npages)
{
int ret;
- spin_lock(&aspace->lock);
- if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
- spin_unlock(&aspace->lock);
- return 0;
- }
+ if (WARN_ON(vma->iova))
+ return -EBUSY;
+ spin_lock(&aspace->lock);
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
@@ -78,19 +128,14 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
+ vma->mapped = false;
- if (aspace->mmu) {
- unsigned size = npages << PAGE_SHIFT;
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
- size, IOMMU_READ | IOMMU_WRITE);
- }
-
- /* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
- return ret;
+ return 0;
}
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
@@ -114,3 +159,26 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
return aspace;
}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
+ const char *name, uint64_t va_start, uint64_t va_end)
+{
+ struct msm_gem_address_space *aspace;
+ u64 size = va_end - va_start;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&aspace->lock);
+ aspace->name = name;
+ aspace->mmu = msm_gpummu_new(dev, gpu);
+
+ drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
+ size >> PAGE_SHIFT);
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 11aac8337066..5f3eff304355 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -19,6 +19,8 @@
#include "msm_gem.h"
#include "msm_mmu.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
+#include "adreno/adreno_gpu.h"
#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
@@ -107,7 +109,7 @@ static void msm_devfreq_init(struct msm_gpu *gpu)
&msm_devfreq_profile, "simple_ondemand", NULL);
if (IS_ERR(gpu->devfreq.devfreq)) {
- dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
gpu->devfreq.devfreq = NULL;
}
@@ -122,7 +124,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
if (gpu->gpu_reg) {
ret = regulator_enable(gpu->gpu_reg);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
return ret;
}
}
@@ -130,7 +132,7 @@ static int enable_pwrrail(struct msm_gpu *gpu)
if (gpu->gpu_cx) {
ret = regulator_enable(gpu->gpu_cx);
if (ret) {
- dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
return ret;
}
}
@@ -315,28 +317,28 @@ static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
/* Don't record write only objects */
-
state_bo->size = obj->base.size;
state_bo->iova = iova;
- /* Only store the data for buffer objects marked for read */
- if ((flags & MSM_SUBMIT_BO_READ)) {
+ /* Only store data for non imported buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ) && !obj->base.import_attach) {
void *ptr;
state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
if (!state_bo->data)
- return;
+ goto out;
ptr = msm_gem_get_vaddr_active(&obj->base);
if (IS_ERR(ptr)) {
kvfree(state_bo->data);
- return;
+ state_bo->data = NULL;
+ goto out;
}
memcpy(state_bo->data, ptr, obj->base.size);
msm_gem_put_vaddr(&obj->base);
}
-
+out:
state->nr_bos++;
}
@@ -345,6 +347,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
{
struct msm_gpu_state *state;
+ /* Check if the target supports capturing crash state */
+ if (!gpu->funcs->gpu_state_get)
+ return;
+
/* Only save one crash state at a time */
if (gpu->crashstate)
return;
@@ -360,12 +366,15 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
if (submit) {
int i;
- state->bos = kcalloc(submit->nr_bos,
+ state->bos = kcalloc(submit->nr_cmds,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- for (i = 0; state->bos && i < submit->nr_bos; i++)
- msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
- submit->bos[i].iova, submit->bos[i].flags);
+ for (i = 0; state->bos && i < submit->nr_cmds; i++) {
+ int idx = submit->cmd[i].idx;
+
+ msm_gpu_crashstate_get_bo(state, submit->bos[idx].obj,
+ submit->bos[idx].iova, submit->bos[idx].flags);
+ }
}
/* Set the active crash state to be dumped on failure */
@@ -428,16 +437,15 @@ static void recover_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit) {
struct task_struct *task;
- rcu_read_lock();
- task = pid_task(submit->pid, PIDTYPE_PID);
+ task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- comm = kstrdup(task->comm, GFP_ATOMIC);
+ comm = kstrdup(task->comm, GFP_KERNEL);
/*
* So slightly annoying, in other paths like
@@ -450,13 +458,13 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
+ cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ put_task_struct(task);
mutex_lock(&dev->struct_mutex);
}
- rcu_read_unlock();
if (comm && cmd) {
- dev_err(dev->dev, "%s: offending task: %s (%s)\n",
+ DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
@@ -539,11 +547,11 @@ static void hangcheck_handler(struct timer_list *t)
} else if (fence < ring->seqno) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
gpu->name, ring->id);
- dev_err(dev->dev, "%s: completed fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
- dev_err(dev->dev, "%s: submitted fence: %u\n",
+ DRM_DEV_ERROR(dev->dev, "%s: submitted fence: %u\n",
gpu->name, ring->seqno);
queue_work(priv->wq, &gpu->recover_work);
@@ -659,15 +667,33 @@ out:
* Cmdstream submission/retirement:
*/
-static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_submit *submit)
{
+ int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
+ volatile struct msm_gpu_submit_stats *stats;
+ u64 elapsed, clock = 0;
int i;
+ stats = &ring->memptrs->stats[index];
+ /* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
+ elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
+ do_div(elapsed, 192);
+
+ /* Calculate the clock frequency from the number of CP cycles */
+ if (elapsed) {
+ clock = (stats->cpcycles_end - stats->cpcycles_start) * 1000;
+ do_div(clock, elapsed);
+ }
+
+ trace_msm_gpu_submit_retired(submit, elapsed, clock,
+ stats->alwayson_start, stats->alwayson_end);
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
- msm_gem_put_iova(&msm_obj->base, gpu->aspace);
+ msm_gem_unpin_iova(&msm_obj->base, gpu->aspace);
drm_gem_object_put(&msm_obj->base);
}
@@ -690,7 +716,7 @@ static void retire_submits(struct msm_gpu *gpu)
list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
if (dma_fence_is_signaled(submit->fence))
- retire_submit(gpu, submit);
+ retire_submit(gpu, ring, submit);
}
}
}
@@ -751,7 +777,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* submit takes a reference to the bo and iova until retired: */
drm_gem_object_get(&msm_obj->base);
- msm_gem_get_iova(&msm_obj->base,
+ msm_gem_get_and_pin_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
@@ -800,7 +826,6 @@ static struct msm_gem_address_space *
msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
uint64_t va_start, uint64_t va_end)
{
- struct iommu_domain *iommu;
struct msm_gem_address_space *aspace;
int ret;
@@ -809,20 +834,27 @@ msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
* and have separate page tables per context. For now, to keep things
* simple and to get something working, just use a single address space:
*/
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
-
- dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+ if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu)
+ return NULL;
+
+ iommu->geometry.aperture_start = va_start;
+ iommu->geometry.aperture_end = va_end;
+
+ DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
+
+ aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
+ if (IS_ERR(aspace))
+ iommu_domain_free(iommu);
+ } else {
+ aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
+ va_start, va_end);
+ }
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
if (IS_ERR(aspace)) {
- dev_err(gpu->dev->dev, "failed to init iommu: %ld\n",
+ DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
PTR_ERR(aspace));
- iommu_domain_free(iommu);
return ERR_CAST(aspace);
}
@@ -871,14 +903,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
- dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "failed to get irq: %d\n", ret);
goto fail;
}
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, gpu->name, gpu);
if (ret) {
- dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
@@ -911,22 +943,25 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
config->va_start, config->va_end);
if (gpu->aspace == NULL)
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
else if (IS_ERR(gpu->aspace)) {
ret = PTR_ERR(gpu->aspace);
goto fail;
}
- memptrs = msm_gem_kernel_new(drm, sizeof(*gpu->memptrs_bo),
+ memptrs = msm_gem_kernel_new(drm,
+ sizeof(struct msm_rbmemptrs) * nr_rings,
MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
ret = PTR_ERR(memptrs);
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
goto fail;
}
+ msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
+
if (nr_rings > ARRAY_SIZE(gpu->rb)) {
DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
ARRAY_SIZE(gpu->rb));
@@ -939,7 +974,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->rb[i])) {
ret = PTR_ERR(gpu->rb[i]);
- dev_err(drm->dev,
+ DRM_DEV_ERROR(drm->dev,
"could not create ringbuffer %d: %d\n", i, ret);
goto fail;
}
@@ -958,11 +993,7 @@ fail:
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
platform_set_drvdata(pdev, NULL);
return ret;
@@ -981,11 +1012,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
gpu->rb[i] = NULL;
}
- if (gpu->memptrs_bo) {
- msm_gem_put_vaddr(gpu->memptrs_bo);
- msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_put_unlocked(gpu->memptrs_bo);
- }
+ msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace, false);
if (!IS_ERR_OR_NULL(gpu->aspace)) {
gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index f82bac086666..efb49bb64191 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -187,6 +187,7 @@ struct msm_gpu_state_bo {
u64 iova;
size_t size;
void *data;
+ bool encoded;
};
struct msm_gpu_state {
@@ -201,6 +202,7 @@ struct msm_gpu_state {
u32 wptr;
void *data;
int data_size;
+ bool encoded;
} ring[MSM_GPU_MAX_RINGS];
int nr_registers;
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
new file mode 100644
index 000000000000..1155118a27a1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(_MSM_GPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_GPU_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm_msm
+#define TRACE_INCLUDE_FILE msm_gpu_trace
+
+TRACE_EVENT(msm_gpu_submit,
+ TP_PROTO(pid_t pid, u32 ringid, u32 id, u32 nr_bos, u32 nr_cmds),
+ TP_ARGS(pid, ringid, id, nr_bos, nr_cmds),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, nr_cmds)
+ __field(u32, nr_bos)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->id = id;
+ __entry->ringid = ringid;
+ __entry->nr_bos = nr_bos;
+ __entry->nr_cmds = nr_cmds
+ ),
+ TP_printk("id=%d pid=%d ring=%d bos=%d cmds=%d",
+ __entry->id, __entry->pid, __entry->ringid,
+ __entry->nr_bos, __entry->nr_cmds)
+);
+
+TRACE_EVENT(msm_gpu_submit_flush,
+ TP_PROTO(struct msm_gem_submit *submit, u64 ticks),
+ TP_ARGS(submit, ticks),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->ticks = ticks;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d ticks=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->ticks)
+);
+
+
+TRACE_EVENT(msm_gpu_submit_retired,
+ TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock,
+ u64 start, u64 end),
+ TP_ARGS(submit, elapsed, clock, start, end),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(u32, id)
+ __field(u32, ringid)
+ __field(u32, seqno)
+ __field(u64, elapsed)
+ __field(u64, clock)
+ __field(u64, start_ticks)
+ __field(u64, end_ticks)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid_nr(submit->pid);
+ __entry->id = submit->ident;
+ __entry->ringid = submit->ring->id;
+ __entry->seqno = submit->seqno;
+ __entry->elapsed = elapsed;
+ __entry->clock = clock;
+ __entry->start_ticks = start;
+ __entry->end_ticks = end;
+ ),
+ TP_printk("id=%d pid=%d ring=%d:%d elapsed=%lld ns mhz=%lld start=%lld end=%lld",
+ __entry->id, __entry->pid, __entry->ringid, __entry->seqno,
+ __entry->elapsed, __entry->clock,
+ __entry->start_ticks, __entry->end_ticks)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/msm
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/msm_gpu_tracepoints.c b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
new file mode 100644
index 000000000000..72c074f8c4f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu_tracepoints.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "msm_gem.h"
+#include "msm_ringbuffer.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_gpu_trace.h"
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
new file mode 100644
index 000000000000..27312b553dd8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 The Linux Foundation. All rights reserved. */
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "adreno/adreno_gpu.h"
+#include "adreno/a2xx.xml.h"
+
+struct msm_gpummu {
+ struct msm_mmu base;
+ struct msm_gpu *gpu;
+ dma_addr_t pt_base;
+ uint32_t *table;
+};
+#define to_msm_gpummu(x) container_of(x, struct msm_gpummu, base)
+
+#define GPUMMU_VA_START SZ_16M
+#define GPUMMU_VA_RANGE (0xfff * SZ_64K)
+#define GPUMMU_PAGE_SIZE SZ_4K
+#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
+
+static int msm_gpummu_attach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+ return 0;
+}
+
+static void msm_gpummu_detach(struct msm_mmu *mmu, const char * const *names,
+ int cnt)
+{
+}
+
+static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, unsigned len, int prot)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ struct scatterlist *sg;
+ unsigned prot_bits = 0;
+ unsigned i, j;
+
+ if (prot & IOMMU_WRITE)
+ prot_bits |= 1;
+ if (prot & IOMMU_READ)
+ prot_bits |= 2;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ dma_addr_t addr = sg->dma_address;
+ for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
+ gpummu->table[idx] = addr | prot_bits;
+ addr += GPUMMU_PAGE_SIZE;
+ }
+ }
+
+ /* we can improve by deferring flush for multiple map() */
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+ unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
+ unsigned i;
+
+ for (i = 0; i < len / GPUMMU_PAGE_SIZE; i++, idx++)
+ gpummu->table[idx] = 0;
+
+ gpu_write(gpummu->gpu, REG_A2XX_MH_MMU_INVALIDATE,
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_ALL |
+ A2XX_MH_MMU_INVALIDATE_INVALIDATE_TC);
+ return 0;
+}
+
+static void msm_gpummu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
+
+ dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ DMA_ATTR_FORCE_CONTIGUOUS);
+
+ kfree(gpummu);
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_gpummu_attach,
+ .detach = msm_gpummu_detach,
+ .map = msm_gpummu_map,
+ .unmap = msm_gpummu_unmap,
+ .destroy = msm_gpummu_destroy,
+};
+
+struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
+{
+ struct msm_gpummu *gpummu;
+
+ gpummu = kzalloc(sizeof(*gpummu), GFP_KERNEL);
+ if (!gpummu)
+ return ERR_PTR(-ENOMEM);
+
+ gpummu->table = dma_alloc_attrs(dev, TABLE_SIZE + 32, &gpummu->pt_base,
+ GFP_KERNEL | __GFP_ZERO, DMA_ATTR_FORCE_CONTIGUOUS);
+ if (!gpummu->table) {
+ kfree(gpummu);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ gpummu->gpu = gpu;
+ msm_mmu_init(&gpummu->base, dev, &funcs);
+
+ return &gpummu->base;
+}
+
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error)
+{
+ dma_addr_t base = to_msm_gpummu(mmu)->pt_base;
+
+ *pt_base = base;
+ *tran_error = base + TABLE_SIZE; /* 32-byte aligned */
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index b23d33622f37..4d62790cd425 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -66,13 +66,12 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev);
- WARN_ON(ret < 0);
+ WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index fd88cebb6adb..2b81b43a4bab 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -67,9 +67,6 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
- /* pm suspend/resume hooks */
- int (*pm_suspend)(struct device *dev);
- int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4580c8..d21b26604d0b 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -25,8 +25,7 @@ struct msm_mmu_funcs {
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
void (*destroy)(struct msm_mmu *mmu);
};
@@ -54,4 +53,7 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
mmu->handler = handler;
}
+void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
+ dma_addr_t *tran_error);
+
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index cca933458439..90e9d0a48dc0 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
uint64_t iova, uint32_t size)
{
struct msm_gem_object *obj = submit->bos[idx].obj;
+ unsigned offset = 0;
const char *buf;
if (iova) {
- buf += iova - submit->bos[idx].iova;
+ offset = iova - submit->bos[idx].iova;
} else {
iova = submit->bos[idx].iova;
size = obj->base.size;
@@ -340,11 +341,19 @@ static void snapshot_buf(struct msm_rd_state *rd,
if (IS_ERR(buf))
return;
+ buf += offset;
+
rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
msm_gem_put_vaddr(&obj->base);
}
+static bool
+should_dump(struct msm_gem_submit *submit, int idx)
+{
+ return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP);
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
@@ -386,15 +395,16 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- for (i = 0; rd_full && i < submit->nr_bos; i++)
- snapshot_buf(rd, submit, i, 0, 0);
+ for (i = 0; i < submit->nr_bos; i++)
+ if (should_dump(submit, i))
+ snapshot_buf(rd, submit, i, 0, 0);
for (i = 0; i < submit->nr_cmds; i++) {
uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
- if (!rd_full) {
+ if (!should_dump(submit, i)) {
snapshot_buf(rd, submit, submit->cmd[i].idx,
submit->cmd[i].iova, szd * 4);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6f5295b3f2f6..20a96fe69dcd 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -36,15 +36,18 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->gpu = gpu;
ring->id = id;
- /* Pass NULL for the iova pointer - we will map it later */
+
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC, gpu->aspace, &ring->bo, NULL);
+ MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
ring->start = 0;
goto fail;
}
+
+ msm_gem_object_set_name(ring->bo, "ring%d", id);
+
ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
ring->next = ring->start;
ring->cur = ring->start;
@@ -73,10 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
msm_fence_context_free(ring->fctx);
- if (ring->bo) {
- msm_gem_put_iova(ring->bo, ring->gpu->aspace);
- msm_gem_put_vaddr(ring->bo);
- drm_gem_object_put_unlocked(ring->bo);
- }
+ msm_gem_kernel_put(ring->bo, ring->gpu->aspace, false);
+
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index cffce094aecb..6434ebb13136 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -23,9 +23,25 @@
#define rbmemptr(ring, member) \
((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
+#define rbmemptr_stats(ring, index, member) \
+ (rbmemptr((ring), stats) + \
+ ((index) * sizeof(struct msm_gpu_submit_stats)) + \
+ offsetof(struct msm_gpu_submit_stats, member))
+
+struct msm_gpu_submit_stats {
+ u64 cpcycles_start;
+ u64 cpcycles_end;
+ u64 alwayson_start;
+ u64 alwayson_end;
+};
+
+#define MSM_GPU_SUBMIT_STATS_COUNT 64
+
struct msm_rbmemptrs {
volatile uint32_t rptr;
volatile uint32_t fence;
+
+ volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
};
struct msm_ringbuffer {
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 2393e6d16ffd..88ba003979e6 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -417,7 +417,7 @@ static int mxsfb_probe(struct platform_device *pdev)
err_unload:
mxsfb_unload(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -428,7 +428,7 @@ static int mxsfb_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mxsfb_unload(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 70dce544984e..1727d399833c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -67,7 +67,7 @@ nv04_display_create(struct drm_device *dev)
for (i = 0; i < dcb->entries; i++) {
struct dcb_output *dcbent = &dcb->entry[i];
- connector = nouveau_connector_create(dev, dcbent->connector);
+ connector = nouveau_connector_create(dev, dcbent);
if (IS_ERR(connector))
continue;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/Kbuild b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
index 849b0f45afb8..3d074aa31173 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/Kbuild
+++ b/drivers/gpu/drm/nouveau/dispnv50/Kbuild
@@ -7,6 +7,7 @@ nouveau-y += dispnv50/core827d.o
nouveau-y += dispnv50/core907d.o
nouveau-y += dispnv50/core917d.o
nouveau-y += dispnv50/corec37d.o
+nouveau-y += dispnv50/corec57d.o
nouveau-y += dispnv50/dac507d.o
nouveau-y += dispnv50/dac907d.o
@@ -23,12 +24,14 @@ nouveau-y += dispnv50/head827d.o
nouveau-y += dispnv50/head907d.o
nouveau-y += dispnv50/head917d.o
nouveau-y += dispnv50/headc37d.o
+nouveau-y += dispnv50/headc57d.o
nouveau-y += dispnv50/wimm.o
nouveau-y += dispnv50/wimmc37b.o
nouveau-y += dispnv50/wndw.o
nouveau-y += dispnv50/wndwc37e.o
+nouveau-y += dispnv50/wndwc57e.o
nouveau-y += dispnv50/base.o
nouveau-y += dispnv50/base507c.o
diff --git a/drivers/gpu/drm/nouveau/dispnv50/atom.h b/drivers/gpu/drm/nouveau/dispnv50/atom.h
index 908feb1fc60f..a194990d2b0d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/atom.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/atom.h
@@ -54,9 +54,10 @@ struct nv50_head_atom {
u64 offset:40;
u8 buffer:1;
u8 mode:4;
- u8 size:2;
+ u16 size:11;
u8 range:2;
u8 output_mode:2;
+ void (*load)(struct drm_color_lut *, int size, void __iomem *);
} olut;
struct {
@@ -169,9 +170,11 @@ struct nv50_wndw_atom {
u8 buffer:1;
u8 enable:2;
u8 mode:4;
- u8 size:2;
+ u16 size:11;
u8 range:2;
u8 output_mode:2;
+ void (*load)(struct drm_color_lut *, int size,
+ void __iomem *);
} i;
} xlut;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base907c.c b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
index a562fc94ce59..049ce6da321c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base907c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base907c.c
@@ -80,6 +80,7 @@ base907c_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
asyw->xlut.i.mode = 7;
asyw->xlut.i.enable = 2;
+ asyw->xlut.i.load = head907d_olut_load;
}
const struct nv50_wndw_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.c b/drivers/gpu/drm/nouveau/dispnv50/core.c
index f3c49adb1bdb..c25e0ebe3c92 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.c
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
int version;
int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
} cores[] = {
+ { TU104_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
{ GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
{ GP102_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
{ GP100_DISP_CORE_CHANNEL_DMA, 0, core917d_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index 8470df9dd13d..df8336b593f7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -46,5 +46,9 @@ extern const struct nv50_outp_func sor907d;
int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
+void corec37d_update(struct nv50_core *, u32 *, bool);
extern const struct nv50_outp_func sorc37d;
+
+int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index b5c17c948918..7860774b65bc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,7 +24,7 @@
#include <nouveau_bo.h>
-static void
+void
corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
{
u32 *push;
@@ -71,7 +71,7 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
}
-void
+static void
corec37d_init(struct nv50_core *core)
{
const u32 windows = 8; /*XXX*/
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
new file mode 100644
index 000000000000..b606d68cda10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "core.h"
+#include "head.h"
+
+static void
+corec57d_init(struct nv50_core *core)
+{
+ const u32 windows = 8; /*XXX*/
+ u32 *push, i;
+ if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) {
+ evo_mthd(push, 0x0208, 1);
+ evo_data(push, core->chan.sync.handle);
+ for (i = 0; i < windows; i++) {
+ evo_mthd(push, 0x1000 + (i * 0x080), 3);
+ evo_data(push, i >> 1);
+ evo_data(push, 0x0000000f);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x1010 + (i * 0x080), 1);
+ evo_data(push, 0x00117fff);
+ }
+ evo_mthd(push, 0x0200, 1);
+ evo_data(push, 0x00000001);
+ evo_kick(push, &core->chan);
+ }
+}
+
+static const struct nv50_core_func
+corec57d = {
+ .init = corec57d_init,
+ .ntfy_init = corec37d_ntfy_init,
+ .ntfy_wait_done = corec37d_ntfy_wait_done,
+ .update = corec37d_update,
+ .head = &headc57d,
+ .sor = &sorc37d,
+};
+
+int
+corec57d_new(struct nouveau_drm *drm, s32 oclass, struct nv50_core **pcore)
+{
+ return core507d_new_(&corec57d, drm, oclass, pcore);
+}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs.c b/drivers/gpu/drm/nouveau/dispnv50/curs.c
index f592087338c4..cb6e4d2b1b45 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs.c
@@ -31,6 +31,7 @@ nv50_curs_new(struct nouveau_drm *drm, int head, struct nv50_wndw **pwndw)
int version;
int (*new)(struct nouveau_drm *, int, s32, struct nv50_wndw **);
} curses[] = {
+ { TU104_DISP_CURSOR, 0, cursc37a_new },
{ GV100_DISP_CURSOR, 0, cursc37a_new },
{ GK104_DISP_CURSOR, 0, curs907a_new },
{ GF110_DISP_CURSOR, 0, curs907a_new },
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6cbbae3f438b..134701a837c8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -198,6 +198,22 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
/******************************************************************************
* EVO channel helpers
*****************************************************************************/
+static void
+evo_flush(struct nv50_dmac *dmac)
+{
+ /* Push buffer fetches are not coherent with BAR1, we need to ensure
+ * writes have been flushed right through to VRAM before writing PUT.
+ */
+ if (dmac->push.type & NVIF_MEM_VRAM) {
+ struct nvif_device *device = dmac->base.device;
+ nvif_wr32(&device->object, 0x070000, 0x00000001);
+ nvif_msec(device, 2000,
+ if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
+ break;
+ );
+ }
+}
+
u32 *
evo_wait(struct nv50_dmac *evoc, int nr)
{
@@ -208,6 +224,7 @@ evo_wait(struct nv50_dmac *evoc, int nr)
mutex_lock(&dmac->lock);
if (put + nr >= (PAGE_SIZE / 4) - 8) {
dmac->ptr[put] = 0x20000000;
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
if (nvif_msec(device, 2000,
@@ -230,17 +247,7 @@ evo_kick(u32 *push, struct nv50_dmac *evoc)
{
struct nv50_dmac *dmac = evoc;
- /* Push buffer fetches are not coherent with BAR1, we need to ensure
- * writes have been flushed right through to VRAM before writing PUT.
- */
- if (dmac->push.type & NVIF_MEM_VRAM) {
- struct nvif_device *device = dmac->base.device;
- nvif_wr32(&device->object, 0x070000, 0x00000001);
- nvif_msec(device, 2000,
- if (!(nvif_rd32(&device->object, 0x070000) & 0x00000002))
- break;
- );
- }
+ evo_flush(dmac);
nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
mutex_unlock(&dmac->lock);
@@ -1255,8 +1262,16 @@ nv50_mstm_fini(struct nv50_mstm *mstm)
static void
nv50_mstm_init(struct nv50_mstm *mstm)
{
- if (mstm && mstm->mgr.mst_state)
- drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ int ret;
+
+ if (!mstm || !mstm->mgr.mst_state)
+ return;
+
+ ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr);
+ if (ret == -1) {
+ drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
+ drm_kms_helper_hotplug_event(mstm->mgr.dev);
+ }
}
static void
@@ -1264,6 +1279,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{
struct nv50_mstm *mstm = *pmstm;
if (mstm) {
+ drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm);
*pmstm = NULL;
}
@@ -2293,7 +2309,7 @@ nv50_display_create(struct drm_device *dev)
/* create encoder/connector objects based on VBIOS DCB table */
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe->connector);
+ connector = nouveau_connector_create(dev, dcbe);
if (IS_ERR(connector))
continue;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index e48c5eb35b49..2216c58620c2 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -45,6 +45,8 @@ struct nv50_disp_interlock {
void corec37d_ntfy_init(struct nouveau_bo *, u32);
+void head907d_olut_load(struct drm_color_lut *, int size, void __iomem *);
+
struct nv50_chan {
struct nvif_object user;
struct nvif_device *device;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index 4f57e5379796..ac97ebce5b35 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -50,9 +50,9 @@ nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
if (asyh->set.core ) head->func->core_set(head, asyh);
if (asyh->set.olut ) {
asyh->olut.offset = nv50_lut_load(&head->olut,
- asyh->olut.mode <= 1,
asyh->olut.buffer,
- asyh->state.gamma_lut);
+ asyh->state.gamma_lut,
+ asyh->olut.load);
head->func->olut_set(head, asyh);
}
if (asyh->set.curs ) head->func->curs_set(head, asyh);
@@ -210,7 +210,7 @@ nv50_head_atomic_check_lut(struct nv50_head *head,
}
}
- if (!olut) {
+ if (!olut && !head->func->olut_identity) {
asyh->olut.handle = 0;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 37b3248c6dae..d1c002f534d4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -21,6 +21,7 @@ struct nv50_head_func {
void (*view)(struct nv50_head *, struct nv50_head_atom *);
void (*mode)(struct nv50_head *, struct nv50_head_atom *);
void (*olut)(struct nv50_head *, struct nv50_head_atom *);
+ bool olut_identity;
void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
void (*olut_clr)(struct nv50_head *);
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
@@ -75,4 +76,14 @@ int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
struct nv50_head_atom *);
extern const struct nv50_head_func headc37d;
+void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_core_clr(struct nv50_head *);
+int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
+void headc37d_curs_clr(struct nv50_head *);
+void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
+
+extern const struct nv50_head_func headc57d;
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head507d.c b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
index 51bc5996fd37..7561be5ca707 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head507d.c
@@ -254,6 +254,23 @@ head507d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
+static void
+head507d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ for (; size--; in++, mem += 8) {
+ writew(drm_color_lut_extract(in-> red, 11) << 3, mem + 0);
+ writew(drm_color_lut_extract(in->green, 11) << 3, mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 11) << 3, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
void
head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
{
@@ -261,6 +278,8 @@ head507d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.mode = 0;
else
asyh->olut.mode = 1;
+
+ asyh->olut.load = head507d_olut_load;
}
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head907d.c b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
index 633907163eb1..c2d09dd97b1f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head907d.c
@@ -214,9 +214,27 @@ head907d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
void
+head907d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ for (; size--; in++, mem += 8) {
+ writew(drm_color_lut_extract(in-> red, 14) + 0x6000, mem + 0);
+ writew(drm_color_lut_extract(in->green, 14) + 0x6000, mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 14) + 0x6000, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+void
head907d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
{
asyh->olut.mode = 7;
+ asyh->olut.load = head907d_olut_load;
}
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 989c14083066..ef6a99d95a9c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -65,7 +65,7 @@ headc37d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
+void
headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -79,7 +79,7 @@ headc37d_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
+void
headc37d_curs_clr(struct nv50_head *head)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -93,7 +93,7 @@ headc37d_curs_clr(struct nv50_head *head)
}
}
-static void
+void
headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
@@ -112,7 +112,7 @@ headc37d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static int
+int
headc37d_curs_format(struct nv50_head *head, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
@@ -155,6 +155,7 @@ headc37d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
asyh->olut.size = 0;
asyh->olut.range = 0;
asyh->olut.output_mode = 1;
+ asyh->olut.load = head907d_olut_load;
}
static void
@@ -181,7 +182,7 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
}
}
-static void
+void
headc37d_view(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
new file mode 100644
index 000000000000..32a7f9e85fb0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "head.h"
+#include "atom.h"
+#include "core.h"
+
+static void
+headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ /*XXX: This is a dirty hack until OR depth handling is
+ * improved later for deep colour etc.
+ */
+ switch (asyh->or.depth) {
+ case 6: asyh->or.depth = 5; break;
+ case 5: asyh->or.depth = 4; break;
+ case 2: asyh->or.depth = 1; break;
+ case 0: asyh->or.depth = 4; break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ evo_mthd(push, 0x2004 + (head->base.index * 0x400), 1);
+ evo_data(push, 0xfc000001 |
+ asyh->or.depth << 4 |
+ asyh->or.nvsync << 3 |
+ asyh->or.nhsync << 2);
+ evo_kick(push, core);
+ }
+}
+
+static void
+headc57d_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ evo_mthd(push, 0x2000 + (head->base.index * 0x400), 1);
+#if 0
+ evo_data(push, 0x80000000 |
+ asyh->procamp.sat.sin << 16 |
+ asyh->procamp.sat.cos << 4);
+#else
+ evo_data(push, 0);
+#endif
+ evo_kick(push, core);
+ }
+}
+
+void
+headc57d_olut_clr(struct nv50_head *head)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 2))) {
+ evo_mthd(push, 0x2288 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, core);
+ }
+}
+
+void
+headc57d_olut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ u32 *push;
+ if ((push = evo_wait(core, 4))) {
+ evo_mthd(push, 0x2280 + (head->base.index * 0x400), 4);
+ evo_data(push, asyh->olut.size << 8 |
+ asyh->olut.mode << 2 |
+ asyh->olut.output_mode);
+ evo_data(push, 0xffffffff); /* FP_NORM_SCALE. */
+ evo_data(push, asyh->olut.handle);
+ evo_data(push, asyh->olut.offset >> 8);
+ evo_kick(push, core);
+ }
+}
+
+static void
+headc57d_olut_load_8(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ while (size--) {
+ u16 r = drm_color_lut_extract(in-> red + 0, 16);
+ u16 g = drm_color_lut_extract(in->green + 0, 16);
+ u16 b = drm_color_lut_extract(in-> blue + 0, 16);
+ u16 ri = 0, gi = 0, bi = 0, i;
+
+ if (in++, size) {
+ ri = (drm_color_lut_extract(in-> red, 16) - r) / 4;
+ gi = (drm_color_lut_extract(in->green, 16) - g) / 4;
+ bi = (drm_color_lut_extract(in-> blue, 16) - b) / 4;
+ }
+
+ for (i = 0; i < 4; i++, mem += 8) {
+ writew(r + ri * i, mem + 0);
+ writew(g + gi * i, mem + 2);
+ writew(b + bi * i, mem + 4);
+ }
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+static void
+headc57d_olut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ for (; size--; in++, mem += 0x08) {
+ writew(drm_color_lut_extract(in-> red, 16), mem + 0);
+ writew(drm_color_lut_extract(in->green, 16), mem + 2);
+ writew(drm_color_lut_extract(in-> blue, 16), mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+void
+headc57d_olut(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ asyh->olut.mode = 2; /* DIRECT10 */
+ asyh->olut.size = 4 /* VSS header. */ + 1024 + 1 /* Entries. */;
+ asyh->olut.output_mode = 1; /* INTERPOLATE_ENABLE. */
+ if (asyh->state.gamma_lut &&
+ asyh->state.gamma_lut->length / sizeof(struct drm_color_lut) == 256)
+ asyh->olut.load = headc57d_olut_load_8;
+ else
+ asyh->olut.load = headc57d_olut_load;
+}
+
+static void
+headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+ struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
+ struct nv50_head_mode *m = &asyh->mode;
+ u32 *push;
+ if ((push = evo_wait(core, 12))) {
+ evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
+ evo_data(push, (m->v.active << 16) | m->h.active );
+ evo_data(push, (m->v.synce << 16) | m->h.synce );
+ evo_data(push, (m->v.blanke << 16) | m->h.blanke );
+ evo_data(push, (m->v.blanks << 16) | m->h.blanks );
+ evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
+ evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_data(push, m->clock * 1000);
+ evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
+ evo_data(push, m->clock * 1000);
+ /*XXX: HEAD_USAGE_BOUNDS, doesn't belong here. */
+ evo_mthd(push, 0x2030 + (head->base.index * 0x400), 1);
+ evo_data(push, 0x00001014);
+ evo_kick(push, core);
+ }
+}
+
+const struct nv50_head_func
+headc57d = {
+ .view = headc37d_view,
+ .mode = headc57d_mode,
+ .olut = headc57d_olut,
+ .olut_identity = true,
+ .olut_set = headc57d_olut_set,
+ .olut_clr = headc57d_olut_clr,
+ .curs_layout = head917d_curs_layout,
+ .curs_format = headc37d_curs_format,
+ .curs_set = headc37d_curs_set,
+ .curs_clr = headc37d_curs_clr,
+ .dither = headc37d_dither,
+ .procamp = headc57d_procamp,
+ .or = headc57d_or,
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.c b/drivers/gpu/drm/nouveau/dispnv50/lut.c
index a6b96ae2a22f..994def4fd51a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.c
@@ -29,45 +29,29 @@
#include <nvif/class.h>
u32
-nv50_lut_load(struct nv50_lut *lut, bool legacy, int buffer,
- struct drm_property_blob *blob)
+nv50_lut_load(struct nv50_lut *lut, int buffer, struct drm_property_blob *blob,
+ void (*load)(struct drm_color_lut *, int, void __iomem *))
{
- struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
+ struct drm_color_lut *in = blob ? blob->data : NULL;
void __iomem *mem = lut->mem[buffer].object.map.ptr;
- const int size = blob->length / sizeof(*in);
- int bits, shift, i;
- u16 zero, r, g, b;
- u32 addr = lut->mem[buffer].addr;
-
- /* This can't happen.. But it shuts the compiler up. */
- if (WARN_ON(size != 256))
- return 0;
+ const u32 addr = lut->mem[buffer].addr;
+ int i;
- if (legacy) {
- bits = 11;
- shift = 3;
- zero = 0x0000;
+ if (!in) {
+ in = kvmalloc_array(1024, sizeof(*in), GFP_KERNEL);
+ if (!WARN_ON(!in)) {
+ for (i = 0; i < 1024; i++) {
+ in[i].red =
+ in[i].green =
+ in[i].blue = (i << 16) >> 10;
+ }
+ load(in, 1024, mem);
+ kvfree(in);
+ }
} else {
- bits = 14;
- shift = 0;
- zero = 0x6000;
- }
-
- for (i = 0; i < size; i++) {
- r = (drm_color_lut_extract(in[i]. red, bits) + zero) << shift;
- g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
- b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
- writew(r, mem + (i * 0x08) + 0);
- writew(g, mem + (i * 0x08) + 2);
- writew(b, mem + (i * 0x08) + 4);
+ load(in, blob->length / sizeof(*in), mem);
}
- /* INTERPOLATE modes require a "next" entry to interpolate with,
- * so we replicate the last entry to deal with this for now.
- */
- writew(r, mem + (i * 0x08) + 0);
- writew(g, mem + (i * 0x08) + 2);
- writew(b, mem + (i * 0x08) + 4);
return addr;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/lut.h b/drivers/gpu/drm/nouveau/dispnv50/lut.h
index 6d7b8352e4cb..b3b9040cfe9a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/lut.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/lut.h
@@ -2,6 +2,7 @@
#define __NV50_KMS_LUT_H__
#include <nvif/mem.h>
struct drm_property_blob;
+struct drm_color_lut;
struct nv50_disp;
struct nv50_lut {
@@ -10,6 +11,6 @@ struct nv50_lut {
int nv50_lut_init(struct nv50_disp *, struct nvif_mmu *, struct nv50_lut *);
void nv50_lut_fini(struct nv50_lut *);
-u32 nv50_lut_load(struct nv50_lut *, bool legacy, int buffer,
- struct drm_property_blob *);
+u32 nv50_lut_load(struct nv50_lut *, int buffer, struct drm_property_blob *,
+ void (*)(struct drm_color_lut *, int size, void __iomem *));
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wimm.c b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
index fc36e0696407..bc9eeaf212ae 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wimm.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wimm.c
@@ -31,6 +31,7 @@ nv50_wimm_init(struct nouveau_drm *drm, struct nv50_wndw *wndw)
int version;
int (*init)(struct nouveau_drm *, s32, struct nv50_wndw *);
} wimms[] = {
+ { TU104_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{ GV100_DISP_WINDOW_IMM_CHANNEL_DMA, 0, wimmc37b_init },
{}
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 2187922e8dc2..ba9eea2ff16b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -139,10 +139,8 @@ nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 *interlock,
if (asyw->set.xlut ) {
if (asyw->ilut) {
asyw->xlut.i.offset =
- nv50_lut_load(&wndw->ilut,
- asyw->xlut.i.mode <= 1,
- asyw->xlut.i.buffer,
- asyw->ilut);
+ nv50_lut_load(&wndw->ilut, asyw->xlut.i.buffer,
+ asyw->ilut, asyw->xlut.i.load);
}
wndw->func->xlut_set(wndw, asyw);
}
@@ -322,6 +320,11 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
asyh->wndw.olut &= ~BIT(wndw->id);
}
+ if (!ilut && wndw->func->ilut_identity) {
+ static struct drm_property_blob dummy = {};
+ ilut = &dummy;
+ }
+
/* Recalculate LUT state. */
memset(&asyw->xlut, 0x00, sizeof(asyw->xlut));
if ((asyw->ilut = wndw->func->ilut ? ilut : NULL)) {
@@ -623,6 +626,7 @@ nv50_wndw_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
int (*new)(struct nouveau_drm *, enum drm_plane_type,
int, s32, struct nv50_wndw **);
} wndws[] = {
+ { TU104_DISP_WINDOW_CHANNEL_DMA, 0, wndwc57e_new },
{ GV100_DISP_WINDOW_CHANNEL_DMA, 0, wndwc37e_new },
{}
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index b0b6428034b0..03f3d8dc235a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -65,6 +65,7 @@ struct nv50_wndw_func {
int (*ntfy_wait_begun)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
void (*ilut)(struct nv50_wndw *, struct nv50_wndw_atom *);
+ bool ilut_identity;
bool olut_core;
void (*xlut_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
void (*xlut_clr)(struct nv50_wndw *);
@@ -90,6 +91,23 @@ extern const struct nv50_wimm_func curs507a;
int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
+int wndwc37e_new_(const struct nv50_wndw_func *, struct nouveau_drm *,
+ enum drm_plane_type type, int index, s32 oclass, u32 heads,
+ struct nv50_wndw **);
+int wndwc37e_acquire(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void wndwc37e_release(struct nv50_wndw *, struct nv50_wndw_atom *,
+ struct nv50_head_atom *);
+void wndwc37e_sema_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_sema_clr(struct nv50_wndw *);
+void wndwc37e_ntfy_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_ntfy_clr(struct nv50_wndw *);
+void wndwc37e_image_set(struct nv50_wndw *, struct nv50_wndw_atom *);
+void wndwc37e_image_clr(struct nv50_wndw *);
+void wndwc37e_update(struct nv50_wndw *, u32 *);
+
+int wndwc57e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
+ struct nv50_wndw **);
int nv50_wndw_new(struct nouveau_drm *, enum drm_plane_type, int index,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
index 44afb0f069a5..e52a85c83f7a 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c
@@ -61,9 +61,10 @@ wndwc37e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
asyw->xlut.i.size = 0;
asyw->xlut.i.range = 0;
asyw->xlut.i.output_mode = 1;
+ asyw->xlut.i.load = head907d_olut_load;
}
-static void
+void
wndwc37e_image_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -76,7 +77,7 @@ wndwc37e_image_clr(struct nv50_wndw *wndw)
}
}
-static void
+void
wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
@@ -117,7 +118,7 @@ wndwc37e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
evo_kick(push, &wndw->wndw);
}
-static void
+void
wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -128,7 +129,7 @@ wndwc37e_ntfy_clr(struct nv50_wndw *wndw)
}
}
-static void
+void
wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
@@ -140,7 +141,7 @@ wndwc37e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
+void
wndwc37e_sema_clr(struct nv50_wndw *wndw)
{
u32 *push;
@@ -151,7 +152,7 @@ wndwc37e_sema_clr(struct nv50_wndw *wndw)
}
}
-static void
+void
wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
u32 *push;
@@ -165,7 +166,7 @@ wndwc37e_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
}
}
-static void
+void
wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
{
u32 *push;
@@ -183,13 +184,13 @@ wndwc37e_update(struct nv50_wndw *wndw, u32 *interlock)
}
}
-static void
+void
wndwc37e_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
}
-static int
+int
wndwc37e_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
@@ -236,7 +237,7 @@ wndwc37e = {
.update = wndwc37e_update,
};
-static int
+int
wndwc37e_new_(const struct nv50_wndw_func *func, struct nouveau_drm *drm,
enum drm_plane_type type, int index, s32 oclass, u32 heads,
struct nv50_wndw **pwndw)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
new file mode 100644
index 000000000000..ba89f1a5fcfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "wndw.h"
+#include "atom.h"
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <nouveau_bo.h>
+
+#include <nvif/clc37e.h>
+
+static void
+wndwc57e_ilut_clr(struct nv50_wndw *wndw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 2))) {
+ evo_mthd(push, 0x0444, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static void
+wndwc57e_ilut_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u32 *push;
+ if ((push = evo_wait(&wndw->wndw, 4))) {
+ evo_mthd(push, 0x0440, 3);
+ evo_data(push, asyw->xlut.i.size << 8 |
+ asyw->xlut.i.mode << 2 |
+ asyw->xlut.i.output_mode);
+ evo_data(push, asyw->xlut.handle);
+ evo_data(push, asyw->xlut.i.offset >> 8);
+ evo_kick(push, &wndw->wndw);
+ }
+}
+
+static u16
+fixedU0_16_FP16(u16 fixed)
+{
+ int sign = 0, exp = 0, man = 0;
+ if (fixed) {
+ while (--exp && !(fixed & 0x8000))
+ fixed <<= 1;
+ man = ((fixed << 1) & 0xffc0) >> 6;
+ exp += 15;
+ }
+ return (sign << 15) | (exp << 10) | man;
+}
+
+static void
+wndwc57e_ilut_load(struct drm_color_lut *in, int size, void __iomem *mem)
+{
+ memset_io(mem, 0x00, 0x20); /* VSS header. */
+ mem += 0x20;
+
+ for (; size--; in++, mem += 0x08) {
+ u16 r = fixedU0_16_FP16(drm_color_lut_extract(in-> red, 16));
+ u16 g = fixedU0_16_FP16(drm_color_lut_extract(in->green, 16));
+ u16 b = fixedU0_16_FP16(drm_color_lut_extract(in-> blue, 16));
+ writew(r, mem + 0);
+ writew(g, mem + 2);
+ writew(b, mem + 4);
+ }
+
+ /* INTERPOLATE modes require a "next" entry to interpolate with,
+ * so we replicate the last entry to deal with this for now.
+ */
+ writew(readw(mem - 8), mem + 0);
+ writew(readw(mem - 6), mem + 2);
+ writew(readw(mem - 4), mem + 4);
+}
+
+static void
+wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
+{
+ u16 size = asyw->ilut->length / sizeof(struct drm_color_lut);
+ if (size == 256) {
+ asyw->xlut.i.mode = 1; /* DIRECT8. */
+ } else {
+ asyw->xlut.i.mode = 2; /* DIRECT10. */
+ size = 1024;
+ }
+ asyw->xlut.i.size = 4 /* VSS header. */ + size + 1 /* Entries. */;
+ asyw->xlut.i.output_mode = 0; /* INTERPOLATE_DISABLE. */
+ asyw->xlut.i.load = wndwc57e_ilut_load;
+}
+
+static const struct nv50_wndw_func
+wndwc57e = {
+ .acquire = wndwc37e_acquire,
+ .release = wndwc37e_release,
+ .sema_set = wndwc37e_sema_set,
+ .sema_clr = wndwc37e_sema_clr,
+ .ntfy_set = wndwc37e_ntfy_set,
+ .ntfy_clr = wndwc37e_ntfy_clr,
+ .ntfy_reset = corec37d_ntfy_init,
+ .ntfy_wait_begun = base507c_ntfy_wait_begun,
+ .ilut = wndwc57e_ilut,
+ .ilut_identity = true,
+ .xlut_set = wndwc57e_ilut_set,
+ .xlut_clr = wndwc57e_ilut_clr,
+ .image_set = wndwc37e_image_set,
+ .image_clr = wndwc37e_image_clr,
+ .update = wndwc37e_update,
+};
+
+int
+wndwc57e_new(struct nouveau_drm *drm, enum drm_plane_type type, int index,
+ s32 oclass, struct nv50_wndw **pwndw)
+{
+ return wndwc37e_new_(&wndwc57e, drm, type, index, oclass,
+ BIT(index >> 1), pwndw);
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 4f5233107f5f..4cbed0329367 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -32,6 +32,7 @@ struct nv_device_info_v0 {
#define NV_DEVICE_INFO_V0_MAXWELL 0x09
#define NV_DEVICE_INFO_V0_PASCAL 0x0a
#define NV_DEVICE_INFO_V0_VOLTA 0x0b
+#define NV_DEVICE_INFO_V0_TURING 0x0c
__u8 family;
__u8 pad06[2];
__u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
index fbfcffc5feb2..81401eb970ea 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cla06f.h
@@ -4,12 +4,13 @@
struct kepler_channel_gpfifo_a_v0 {
__u8 version;
- __u8 pad01[1];
+ __u8 priv;
__u16 chid;
__u32 ilength;
__u64 ioffset;
__u64 runlist;
__u64 vmm;
+ __u64 inst;
};
#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 6db56bd7d67e..1d82cbf70cf4 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -68,7 +68,8 @@
#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
-#define VOLTA_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c36f
+#define VOLTA_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c36f
+#define TURING_CHANNEL_GPFIFO_A /* clc36f.h */ 0x0000c46f
#define NV50_DISP /* cl5070.h */ 0x00005070
#define G82_DISP /* cl5070.h */ 0x00008270
@@ -83,6 +84,7 @@
#define GP100_DISP /* cl5070.h */ 0x00009770
#define GP102_DISP /* cl5070.h */ 0x00009870
#define GV100_DISP /* cl5070.h */ 0x0000c370
+#define TU104_DISP /* cl5070.h */ 0x0000c570
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
@@ -95,6 +97,7 @@
#define GF110_DISP_CURSOR /* cl507a.h */ 0x0000907a
#define GK104_DISP_CURSOR /* cl507a.h */ 0x0000917a
#define GV100_DISP_CURSOR /* cl507a.h */ 0x0000c37a
+#define TU104_DISP_CURSOR /* cl507a.h */ 0x0000c57a
#define NV50_DISP_OVERLAY /* cl507b.h */ 0x0000507b
#define G82_DISP_OVERLAY /* cl507b.h */ 0x0000827b
@@ -103,6 +106,7 @@
#define GK104_DISP_OVERLAY /* cl507b.h */ 0x0000917b
#define GV100_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c37b
+#define TU104_DISP_WINDOW_IMM_CHANNEL_DMA /* clc37b.h */ 0x0000c57b
#define NV50_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000507c
#define G82_DISP_BASE_CHANNEL_DMA /* cl507c.h */ 0x0000827c
@@ -125,6 +129,7 @@
#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
#define GP102_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
#define GV100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c37d
+#define TU104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000c57d
#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -134,6 +139,7 @@
#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
#define GV100_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c37e
+#define TU104_DISP_WINDOW_CHANNEL_DMA /* clc37e.h */ 0x0000c57e
#define NV50_TESLA 0x00005097
#define G82_TESLA 0x00008297
@@ -183,6 +189,7 @@
#define PASCAL_DMA_COPY_A 0x0000c0b5
#define PASCAL_DMA_COPY_B 0x0000c1b5
#define VOLTA_DMA_COPY_A 0x0000c3b5
+#define TURING_DMA_COPY_A 0x0000c5b5
#define FERMI_DECOMPRESS 0x000090b8
diff --git a/drivers/gpu/drm/nouveau/include/nvif/clc36f.h b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
new file mode 100644
index 000000000000..6b14d7e3f6bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/clc36f.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NVIF_CLC36F_H__
+#define __NVIF_CLC36F_H__
+
+struct volta_channel_gpfifo_a_v0 {
+ __u8 version;
+ __u8 priv;
+ __u16 chid;
+ __u32 ilength;
+ __u64 ioffset;
+ __u64 runlist;
+ __u64 vmm;
+ __u64 inst;
+ __u32 token;
+};
+
+#define NVC36F_V0_NTFY_NON_STALL_INTERRUPT 0x00
+#define NVC36F_V0_NTFY_KILLED 0x01
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d83d834b7452..72e4dc1f0236 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -61,7 +61,11 @@ enum nvkm_devidx {
NVKM_ENGINE_NVENC2,
NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
- NVKM_ENGINE_NVDEC,
+ NVKM_ENGINE_NVDEC0,
+ NVKM_ENGINE_NVDEC1,
+ NVKM_ENGINE_NVDEC2,
+ NVKM_ENGINE_NVDEC_LAST = NVKM_ENGINE_NVDEC2,
+
NVKM_ENGINE_PM,
NVKM_ENGINE_SEC,
NVKM_ENGINE_SEC2,
@@ -114,6 +118,7 @@ struct nvkm_device {
GM100 = 0x110,
GP100 = 0x130,
GV100 = 0x140,
+ TU100 = 0x160,
} card_type;
u32 chipset;
u8 chiprev;
@@ -163,7 +168,7 @@ struct nvkm_device {
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
struct nvkm_engine *nvenc[3];
- struct nvkm_nvdec *nvdec;
+ struct nvkm_nvdec *nvdec[3];
struct nvkm_pm *pm;
struct nvkm_engine *sec;
struct nvkm_sec2 *sec2;
@@ -235,7 +240,7 @@ struct nvkm_device_chip {
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
+ int (*nvdec[3])(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index 05f505de0075..f34c80310861 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -29,6 +29,7 @@ struct nvkm_memory_func {
void *(*dtor)(struct nvkm_memory *);
enum nvkm_memory_target (*target)(struct nvkm_memory *);
u8 (*page)(struct nvkm_memory *);
+ u64 (*bar2)(struct nvkm_memory *);
u64 (*addr)(struct nvkm_memory *);
u64 (*size)(struct nvkm_memory *);
void (*boot)(struct nvkm_memory *, struct nvkm_vmm *);
@@ -56,6 +57,7 @@ void nvkm_memory_tags_put(struct nvkm_memory *, struct nvkm_device *,
#define nvkm_memory_target(p) (p)->func->target(p)
#define nvkm_memory_page(p) (p)->func->page(p)
+#define nvkm_memory_bar2(p) (p)->func->bar2(p)
#define nvkm_memory_addr(p) (p)->func->addr(p)
#define nvkm_memory_size(p) (p)->func->size(p)
#define nvkm_memory_boot(p,v) (p)->func->boot((p),(v))
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index fc295e1faa19..86abe76023c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -11,4 +11,5 @@ int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gp102_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
int gv100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
+int tu104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index ef7dc0844d26..5ca86e178bb9 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -36,4 +36,5 @@ int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gp102_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
int gv100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
+int tu104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 7e39fbed2519..3b2b685778eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -74,4 +74,5 @@ int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gv100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int tu104_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
index f6bd94c7e0f7..fd9d713b611c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
@@ -16,8 +16,10 @@ struct nvkm_bar {
};
struct nvkm_vmm *nvkm_bar_bar1_vmm(struct nvkm_device *);
+void nvkm_bar_bar1_reset(struct nvkm_device *);
void nvkm_bar_bar2_init(struct nvkm_device *);
void nvkm_bar_bar2_fini(struct nvkm_device *);
+void nvkm_bar_bar2_reset(struct nvkm_device *);
struct nvkm_vmm *nvkm_bar_bar2_vmm(struct nvkm_device *);
void nvkm_bar_flush(struct nvkm_bar *);
@@ -27,4 +29,5 @@ int gf100_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gk20a_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm107_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
int gm20b_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
+int tu104_bar_new(struct nvkm_device *, int, struct nvkm_bar **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
index 703a5b524b96..425ccc47e3b7 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/M0203.h
@@ -12,11 +12,14 @@ u32 nvbios_M0203Tp(struct nvkm_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
struct nvbios_M0203T *);
struct nvbios_M0203E {
-#define M0203E_TYPE_DDR2 0x0
-#define M0203E_TYPE_DDR3 0x1
-#define M0203E_TYPE_GDDR3 0x2
-#define M0203E_TYPE_GDDR5 0x3
-#define M0203E_TYPE_SKIP 0xf
+#define M0203E_TYPE_DDR2 0x0
+#define M0203E_TYPE_DDR3 0x1
+#define M0203E_TYPE_GDDR3 0x2
+#define M0203E_TYPE_GDDR5 0x3
+#define M0203E_TYPE_HBM2 0x6
+#define M0203E_TYPE_GDDR5X 0x8
+#define M0203E_TYPE_GDDR6 0x9
+#define M0203E_TYPE_SKIP 0xf
u8 type;
u8 strap;
u8 group;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
index ed9e0a6a0011..8463b421d345 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
@@ -20,6 +20,7 @@ enum dcb_connector_type {
DCB_CONNECTOR_DMS59_DP0 = 0x64,
DCB_CONNECTOR_DMS59_DP1 = 0x65,
DCB_CONNECTOR_WFD = 0x70,
+ DCB_CONNECTOR_USB_C = 0x71,
DCB_CONNECTOR_NONE = 0xff
};
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
index 486e7635c29d..1b71812a790b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/devinit.h
@@ -31,4 +31,5 @@ int gf100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm107_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gm200_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
int gv100_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
+int tu104_devinit_new(struct nvkm_device *, int, struct nvkm_devinit **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
index 5a77498fe6a0..127f48066026 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fault.h
@@ -30,4 +30,5 @@ struct nvkm_fault_data {
int gp100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
int gv100_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
+int tu104_fault_new(struct nvkm_device *, int, struct nvkm_fault **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 96ccc624ee81..27298f8b7ead 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -105,7 +105,10 @@ enum nvkm_ram_type {
NVKM_RAM_TYPE_GDDR2,
NVKM_RAM_TYPE_GDDR3,
NVKM_RAM_TYPE_GDDR4,
- NVKM_RAM_TYPE_GDDR5
+ NVKM_RAM_TYPE_GDDR5,
+ NVKM_RAM_TYPE_GDDR5X,
+ NVKM_RAM_TYPE_GDDR6,
+ NVKM_RAM_TYPE_HBM2,
};
struct nvkm_ram {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 61c93c86e2e2..b66dedd8abb6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -31,4 +31,5 @@ int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int tu104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
index 688595545e21..0a0e064f22e5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
@@ -130,4 +130,5 @@ int gm20b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gp10b_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
int gv100_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
+int tu104_mmu_new(struct nvkm_device *, int, struct nvkm_mmu **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index e9b0746826ca..3693ebf371b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -28,6 +28,18 @@ struct nvkm_timer {
u64 nvkm_timer_read(struct nvkm_timer *);
void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
+struct nvkm_timer_wait {
+ struct nvkm_timer *tmr;
+ u64 limit;
+ u64 time0;
+ u64 time1;
+ int reads;
+};
+
+void nvkm_timer_wait_init(struct nvkm_device *, u64 nsec,
+ struct nvkm_timer_wait *);
+s64 nvkm_timer_wait_test(struct nvkm_timer_wait *);
+
/* Delay based on GPU time (ie. PTIMER).
*
* Will return -ETIMEDOUT unless the loop was terminated with 'break',
@@ -38,21 +50,17 @@ void nvkm_timer_alarm(struct nvkm_timer *, u32 nsec, struct nvkm_alarm *);
*/
#define NVKM_DELAY _warn = false;
#define nvkm_nsec(d,n,cond...) ({ \
- struct nvkm_device *_device = (d); \
- struct nvkm_timer *_tmr = _device->timer; \
- u64 _nsecs = (n), _time0 = nvkm_timer_read(_tmr); \
- s64 _taken = 0; \
+ struct nvkm_timer_wait _wait; \
bool _warn = true; \
+ s64 _taken = 0; \
\
+ nvkm_timer_wait_init((d), (n), &_wait); \
do { \
cond \
- } while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
+ } while ((_taken = nvkm_timer_wait_test(&_wait)) >= 0); \
\
- if (_taken >= _nsecs) { \
- if (_warn) \
- dev_WARN(_device->dev, "timeout\n"); \
- _taken = -ETIMEDOUT; \
- } \
+ if (_warn && _taken < 0) \
+ dev_WARN(_wait.tmr->subdev.device->dev, "timeout\n"); \
_taken; \
})
#define nvkm_usec(d,u,cond...) nvkm_nsec((d), (u) * 1000, ##cond)
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e67a471331b5..b06cdac8f3a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -306,7 +306,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
/* create channel object and initialise dma and fence management */
ret = nouveau_channel_new(drm, device, init->fb_ctxdma_handle,
- init->tt_ctxdma_handle, &chan->chan);
+ init->tt_ctxdma_handle, false, &chan->chan);
if (ret)
goto done;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7214022dfb91..73eff52036d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1141,6 +1141,8 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
+ { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
{ "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 92d3115f96b5..668afbc29c3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -29,6 +29,7 @@
#include <nvif/cl506f.h>
#include <nvif/cl906f.h>
#include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
#include <nvif/ioctl.h>
/*XXX*/
@@ -217,10 +218,11 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
static int
nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
- u64 runlist, struct nouveau_channel **pchan)
+ u64 runlist, bool priv, struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
- static const u16 oclasses[] = { VOLTA_CHANNEL_GPFIFO_A,
+ static const u16 oclasses[] = { TURING_CHANNEL_GPFIFO_A,
+ VOLTA_CHANNEL_GPFIFO_A,
PASCAL_CHANNEL_GPFIFO_A,
MAXWELL_CHANNEL_GPFIFO_A,
KEPLER_CHANNEL_GPFIFO_B,
@@ -234,6 +236,7 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
struct nv50_channel_gpfifo_v0 nv50;
struct fermi_channel_gpfifo_v0 fermi;
struct kepler_channel_gpfifo_a_v0 kepler;
+ struct volta_channel_gpfifo_a_v0 volta;
} args;
struct nouveau_channel *chan;
u32 size;
@@ -247,12 +250,22 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
/* create channel object */
do {
+ if (oclass[0] >= VOLTA_CHANNEL_GPFIFO_A) {
+ args.volta.version = 0;
+ args.volta.ilength = 0x02000;
+ args.volta.ioffset = 0x10000 + chan->push.addr;
+ args.volta.runlist = runlist;
+ args.volta.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.volta.priv = priv;
+ size = sizeof(args.volta);
+ } else
if (oclass[0] >= KEPLER_CHANNEL_GPFIFO_A) {
args.kepler.version = 0;
args.kepler.ilength = 0x02000;
args.kepler.ioffset = 0x10000 + chan->push.addr;
args.kepler.runlist = runlist;
args.kepler.vmm = nvif_handle(&cli->vmm.vmm.object);
+ args.kepler.priv = priv;
size = sizeof(args.kepler);
} else
if (oclass[0] >= FERMI_CHANNEL_GPFIFO) {
@@ -273,13 +286,20 @@ nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
ret = nvif_object_init(&device->object, 0, *oclass++,
&args, size, &chan->user);
if (ret == 0) {
- if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A)
+ if (chan->user.oclass >= VOLTA_CHANNEL_GPFIFO_A) {
+ chan->chid = args.volta.chid;
+ chan->inst = args.volta.inst;
+ chan->token = args.volta.token;
+ } else
+ if (chan->user.oclass >= KEPLER_CHANNEL_GPFIFO_A) {
chan->chid = args.kepler.chid;
- else
- if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO)
+ chan->inst = args.kepler.inst;
+ } else
+ if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
chan->chid = args.fermi.chid;
- else
+ } else {
chan->chid = args.nv50.chid;
+ }
return ret;
}
} while (*oclass);
@@ -448,7 +468,8 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
int
nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
- u32 arg0, u32 arg1, struct nouveau_channel **pchan)
+ u32 arg0, u32 arg1, bool priv,
+ struct nouveau_channel **pchan)
{
struct nouveau_cli *cli = (void *)device->object.client;
bool super;
@@ -458,7 +479,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
super = cli->base.super;
cli->base.super = true;
- ret = nouveau_channel_ind(drm, device, arg0, pchan);
+ ret = nouveau_channel_ind(drm, device, arg0, priv, pchan);
if (ret) {
NV_PRINTK(dbg, cli, "ib channel create, %d\n", ret);
ret = nouveau_channel_dma(drm, device, pchan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 64454c2ebd90..28418f4e5748 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -10,6 +10,8 @@ struct nouveau_channel {
struct nouveau_drm *drm;
int chid;
+ u64 inst;
+ u32 token;
struct nvif_object vram;
struct nvif_object gart;
@@ -48,7 +50,8 @@ struct nouveau_channel {
int nouveau_channels_init(struct nouveau_drm *);
int nouveau_channel_new(struct nouveau_drm *, struct nvif_device *,
- u32 arg0, u32 arg1, struct nouveau_channel **);
+ u32 arg0, u32 arg1, bool priv,
+ struct nouveau_channel **);
void nouveau_channel_del(struct nouveau_channel **);
int nouveau_channel_idle(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index fd80661dff92..3f463c91314a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -403,6 +403,7 @@ nouveau_connector_destroy(struct drm_connector *connector)
if (nv_connector->aux.transfer) {
drm_dp_cec_unregister_connector(&nv_connector->aux);
drm_dp_aux_unregister(&nv_connector->aux);
+ kfree(nv_connector->aux.name);
}
kfree(connector);
}
@@ -1218,7 +1219,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
- case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
case DCB_CONNECTOR_HDMI_1 :
@@ -1232,7 +1234,8 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
}
struct drm_connector *
-nouveau_connector_create(struct drm_device *dev, int index)
+nouveau_connector_create(struct drm_device *dev,
+ const struct dcb_output *dcbe)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -1240,6 +1243,8 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ char aux_name[48] = {0};
+ int index = dcbe->connector;
int type, ret = 0;
bool dummy;
@@ -1342,6 +1347,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = dev->dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
+ snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
+ dcbe->hasht, dcbe->hashm);
+ nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
ret = drm_dp_aux_register(&nv_connector->aux);
if (ret) {
NV_ERROR(drm, "failed to register aux channel\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index f57ef35b1e5e..f43a8d63aef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -38,6 +38,7 @@
#include "nouveau_encoder.h"
struct nvkm_i2c_port;
+struct dcb_output;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_backlight;
@@ -113,7 +114,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
struct drm_connector *
-nouveau_connector_create(struct drm_device *, int index);
+nouveau_connector_create(struct drm_device *, const struct dcb_output *);
extern int nouveau_tv_disable;
extern int nouveau_ignorelid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 9109b69cd052..88a52f6b39fe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,6 +47,26 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
}
static int
+nouveau_debugfs_strap_peek(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
+ int ret;
+
+ ret = pm_runtime_get_sync(drm->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return ret;
+
+ seq_printf(m, "0x%08x\n",
+ nvif_rd32(&drm->client.device.object, 0x101000));
+
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
+
+ return 0;
+}
+
+static int
nouveau_debugfs_pstate_get(struct seq_file *m, void *data)
{
struct drm_device *drm = m->private;
@@ -185,7 +205,8 @@ static const struct file_operations nouveau_pstate_fops = {
};
static struct drm_info_list nouveau_debugfs_list[] = {
- { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
+ { "strap_peek", nouveau_debugfs_strap_peek, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
@@ -199,8 +220,9 @@ static const struct nouveau_debugfs_files {
int
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
+ struct nouveau_drm *drm = nouveau_drm(minor->dev);
struct dentry *dentry;
- int i;
+ int i, ret;
for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
@@ -211,9 +233,23 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
return -ENOMEM;
}
- return drm_debugfs_create_files(nouveau_debugfs_list,
- NOUVEAU_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ ret = drm_debugfs_create_files(nouveau_debugfs_list,
+ NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
+ if (ret)
+ return ret;
+
+ /* Set the size of the vbios since we know it, and it's confusing to
+ * userspace if it wants to seek() but the file has a length of 0
+ */
+ dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
+ if (!dentry)
+ return 0;
+
+ d_inode(dentry)->i_size = drm->vbios.length;
+ dput(dentry);
+
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 945afd34138e..078f65d849ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -101,7 +101,7 @@ nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
nvif_wr32(&chan->user, 0x8c, chan->dma.ib_put);
if (user->func && user->func->doorbell)
- user->func->doorbell(user, chan->chid);
+ user->func->doorbell(user, chan->token);
chan->dma.ib_free--;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 2b2baf6e0e0d..f900e94592f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -353,6 +353,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
case MAXWELL_CHANNEL_GPFIFO_A:
case PASCAL_CHANNEL_GPFIFO_A:
case VOLTA_CHANNEL_GPFIFO_A:
+ case TURING_CHANNEL_GPFIFO_A:
ret = nvc0_fence_create(drm);
break;
default:
@@ -370,7 +371,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
ret = nouveau_channel_new(drm, &drm->client.device,
nvif_fifo_runlist_ce(device), 0,
- &drm->cechan);
+ true, &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -381,7 +382,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
device->info.chipset != 0xaa &&
device->info.chipset != 0xac) {
ret = nouveau_channel_new(drm, &drm->client.device,
- NvDmaFB, NvDmaTT, &drm->cechan);
+ NvDmaFB, NvDmaTT, false,
+ &drm->cechan);
if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
@@ -393,7 +395,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
}
ret = nouveau_channel_new(drm, &drm->client.device,
- arg0, arg1, &drm->channel);
+ arg0, arg1, false, &drm->channel);
if (ret) {
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
nouveau_accel_fini(drm);
@@ -1171,10 +1173,16 @@ nouveau_platform_device_create(const struct nvkm_device_tegra_func *func,
goto err_free;
}
+ err = nouveau_drm_device_init(drm);
+ if (err)
+ goto err_put;
+
platform_set_drvdata(pdev, drm);
return drm;
+err_put:
+ drm_dev_put(drm);
err_free:
nvkm_device_del(pdevice);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 0b2191fa96f7..d20b9ba4b1c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -146,8 +146,6 @@ struct nouveau_drm {
/* TTM interface support */
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 99be61ddeb75..d4964f3397a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -341,7 +341,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
int ret = 0, i;
if (!exclusive) {
- ret = reservation_object_reserve_shared(resv);
+ ret = reservation_object_reserve_shared(resv, 1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 8edb9f2a4269..1543c2f8d3d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,66 +175,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
}
static int
-nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void
-nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-int
-nouveau_ttm_global_init(struct nouveau_drm *drm)
-{
- struct drm_global_reference *global_ref;
- int ret;
-
- global_ref = &drm->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &nouveau_ttm_mem_global_init;
- global_ref->release = &nouveau_ttm_mem_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM memory accounting\n");
- drm->ttm.mem_global_ref.release = NULL;
- return ret;
- }
-
- drm->ttm.bo_global_ref.mem_glob = global_ref->object;
- global_ref = &drm->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM BO subsystem\n");
- drm_global_item_unref(&drm->ttm.mem_global_ref);
- drm->ttm.mem_global_ref.release = NULL;
- return ret;
- }
-
- return 0;
-}
-
-void
-nouveau_ttm_global_release(struct nouveau_drm *drm)
-{
- if (drm->ttm.mem_global_ref.release == NULL)
- return;
-
- drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
- drm_global_item_unref(&drm->ttm.mem_global_ref);
- drm->ttm.mem_global_ref.release = NULL;
-}
-
-static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
struct nvif_mmu *mmu = &drm->client.mmu;
@@ -296,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
- ret = nouveau_ttm_global_init(drm);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&drm->ttm.bdev,
- drm->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -356,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
ttm_bo_device_release(&drm->ttm.bdev);
- nouveau_ttm_global_release(drm);
-
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
diff --git a/drivers/gpu/drm/nouveau/nouveau_vmm.h b/drivers/gpu/drm/nouveau/nouveau_vmm.h
index 7e3b118cf7c4..ede872f6f668 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vmm.h
@@ -25,7 +25,6 @@ void nouveau_vma_unmap(struct nouveau_vma *);
struct nouveau_vmm {
struct nouveau_cli *cli;
struct nvif_vmm vmm;
- struct nvkm_vm *vm;
};
int nouveau_vmm_init(struct nouveau_cli *, s32 oclass, struct nouveau_vmm *);
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 18c7d064f75c..ef97dd223a32 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -34,6 +34,7 @@ int
nvif_disp_ctor(struct nvif_device *device, s32 oclass, struct nvif_disp *disp)
{
static const struct nvif_mclass disps[] = {
+ { TU104_DISP, -1 },
{ GV100_DISP, -1 },
{ GP102_DISP, -1 },
{ GP100_DISP, -1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 03f676c18aad..c61b467cf45e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -79,7 +79,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_NVENC0 ] = "nvenc0",
[NVKM_ENGINE_NVENC1 ] = "nvenc1",
[NVKM_ENGINE_NVENC2 ] = "nvenc2",
- [NVKM_ENGINE_NVDEC ] = "nvdec",
+ [NVKM_ENGINE_NVDEC0 ] = "nvdec0",
+ [NVKM_ENGINE_NVDEC1 ] = "nvdec1",
+ [NVKM_ENGINE_NVDEC2 ] = "nvdec2",
[NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec",
[NVKM_ENGINE_SEC2 ] = "sec2",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 80d784441904..177a23301d6a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -6,3 +6,4 @@ nvkm-y += nvkm/engine/ce/gm200.o
nvkm-y += nvkm/engine/ce/gp100.o
nvkm-y += nvkm/engine/ce/gp102.o
nvkm-y += nvkm/engine/ce/gv100.o
+nvkm-y += nvkm/engine/ce/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
new file mode 100644
index 000000000000..3c25043bbb33
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/tu104.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu104_ce = {
+ .intr = gp100_ce_intr,
+ .sclass = {
+ { -1, -1, TURING_DMA_COPY_A },
+ {}
+ }
+};
+
+int
+tu104_ce_new(struct nvkm_device *device, int index,
+ struct nvkm_engine **pengine)
+{
+ return nvkm_engine_new_(&tu104_ce, device, index, true, pengine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index e294013426ce..bfbc9341e0c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2221,7 +2221,7 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2257,7 +2257,7 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2293,7 +2293,7 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp104_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2329,7 +2329,7 @@ nv137_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2365,7 +2365,7 @@ nv138_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp107_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2430,10 +2430,74 @@ nv140_chipset = {
.dma = gv100_dma_new,
.fifo = gv100_fifo_new,
.gr = gv100_gr_new,
- .nvdec = gp102_nvdec_new,
+ .nvdec[0] = gp102_nvdec_new,
.sec2 = gp102_sec2_new,
};
+static const struct nvkm_device_chip
+nv164_chipset = {
+ .name = "TU104",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
+static const struct nvkm_device_chip
+nv166_chipset = {
+ .name = "TU106",
+ .bar = tu104_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = tu104_devinit_new,
+ .fault = tu104_fault_new,
+ .fb = gv100_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp102_ltc_new,
+ .mc = tu104_mc_new,
+ .mmu = tu104_mmu_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .therm = gp100_therm_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = tu104_ce_new,
+ .ce[1] = tu104_ce_new,
+ .ce[2] = tu104_ce_new,
+ .ce[3] = tu104_ce_new,
+ .ce[4] = tu104_ce_new,
+ .disp = tu104_disp_new,
+ .dma = gv100_dma_new,
+ .fifo = tu104_fifo_new,
+};
+
static int
nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
@@ -2529,7 +2593,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
_(NVENC2 , device->nvenc[2], device->nvenc[2]);
- _(NVDEC , device->nvdec , &device->nvdec->engine);
+ _(NVDEC0 , device->nvdec[0], &device->nvdec[0]->engine);
+ _(NVDEC1 , device->nvdec[1], &device->nvdec[1]->engine);
+ _(NVDEC2 , device->nvdec[2], &device->nvdec[2]->engine);
_(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec);
_(SEC2 , device->sec2 , &device->sec2->engine);
@@ -2791,6 +2857,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x120: device->card_type = GM100; break;
case 0x130: device->card_type = GP100; break;
case 0x140: device->card_type = GV100; break;
+ case 0x160: device->card_type = TU100; break;
default:
break;
}
@@ -2883,6 +2950,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x138: device->chip = &nv138_chipset; break;
case 0x13b: device->chip = &nv13b_chipset; break;
case 0x140: device->chip = &nv140_chipset; break;
+ case 0x164: device->chip = &nv164_chipset; break;
+ case 0x166: device->chip = &nv166_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
@@ -2988,7 +3057,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_NVENC0 , nvenc[0]);
_(NVKM_ENGINE_NVENC1 , nvenc[1]);
_(NVKM_ENGINE_NVENC2 , nvenc[2]);
- _(NVKM_ENGINE_NVDEC , nvdec);
+ _(NVKM_ENGINE_NVDEC0 , nvdec[0]);
+ _(NVKM_ENGINE_NVDEC1 , nvdec[1]);
+ _(NVKM_ENGINE_NVDEC2 , nvdec[2]);
_(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec);
_(NVKM_ENGINE_SEC2 , sec2);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index dde6bbafa709..092ddc4ffefa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -91,7 +91,7 @@ nvkm_udevice_info_v1(struct nvkm_device *device,
case ENGINE_A(MSENC ); break;
case ENGINE_A(VIC ); break;
case ENGINE_A(SEC2 ); break;
- case ENGINE_A(NVDEC ); break;
+ case ENGINE_B(NVDEC ); break;
case ENGINE_B(NVENC ); break;
default:
args->mthd = NV_DEVICE_INFO_INVALID;
@@ -175,6 +175,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
+ case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
default:
args->v0.family = 0;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 8089ac9a12e2..c6a257ba4347 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -15,6 +15,7 @@ nvkm-y += nvkm/engine/disp/gm200.o
nvkm-y += nvkm/engine/disp/gp100.o
nvkm-y += nvkm/engine/disp/gp102.o
nvkm-y += nvkm/engine/disp/gv100.o
+nvkm-y += nvkm/engine/disp/tu104.o
nvkm-y += nvkm/engine/disp/vga.o
nvkm-y += nvkm/engine/disp/head.o
@@ -38,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgk104.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
nvkm-y += nvkm/engine/disp/sorgv100.o
+nvkm-y += nvkm/engine/disp/sortu104.o
nvkm-y += nvkm/engine/disp/outp.o
nvkm-y += nvkm/engine/disp/dp.o
@@ -69,6 +71,7 @@ nvkm-y += nvkm/engine/disp/rootgm200.o
nvkm-y += nvkm/engine/disp/rootgp100.o
nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
+nvkm-y += nvkm/engine/disp/roottu104.o
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index d0a7e3456da1..47be0ba4aebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -28,7 +28,7 @@
#include <core/gpuobj.h>
#include <subdev/timer.h>
-static int
+int
gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
{
struct nvkm_device *device = disp->engine.subdev.device;
@@ -36,7 +36,7 @@ gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
}
-static void
+void
gv100_disp_super(struct work_struct *work)
{
struct nv50_disp *disp =
@@ -257,7 +257,7 @@ gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
}
}
-static void
+void
gv100_disp_intr(struct nv50_disp *disp)
{
struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -297,7 +297,7 @@ gv100_disp_intr(struct nv50_disp *disp)
nvkm_warn(subdev, "intr %08x\n", stat);
}
-static void
+void
gv100_disp_fini(struct nv50_disp *disp)
{
struct nvkm_device *device = disp->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 0f0c86c32ec3..790e42f460fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -144,6 +144,11 @@ void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
int gm200_sor_route_get(struct nvkm_outp *, int *);
void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
+void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
+void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
+void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
+void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
+
void g84_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gt215_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gf119_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
@@ -195,4 +200,6 @@ int gm200_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
+
+int tu104_sor_new(struct nvkm_disp *, int);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index 8580382ab248..c36a8a7cafa1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -78,6 +78,11 @@ void gf119_disp_intr(struct nv50_disp *);
void gf119_disp_super(struct work_struct *);
void gf119_disp_intr_error(struct nv50_disp *, int);
+void gv100_disp_fini(struct nv50_disp *);
+void gv100_disp_intr(struct nv50_disp *);
+void gv100_disp_super(struct work_struct *);
+int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
+
void nv50_disp_dptmds_war_2(struct nv50_disp *, struct dcb_output *);
void nv50_disp_dptmds_war_3(struct nv50_disp *, struct dcb_output *);
void nv50_disp_update_sppll1(struct nv50_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index 6ca4f9184b51..97de928cbde1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -37,4 +37,5 @@ extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
extern const struct nvkm_disp_oclass gp102_disp_root_oclass;
extern const struct nvkm_disp_oclass gv100_disp_root_oclass;
+extern const struct nvkm_disp_oclass tu104_disp_root_oclass;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
new file mode 100644
index 000000000000..ad438c62f66c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu104.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "rootnv50.h"
+#include "channv50.h"
+
+#include <nvif/class.h>
+
+static const struct nv50_disp_root_func
+tu104_disp_root = {
+ .user = {
+ {{0,0,TU104_DISP_CURSOR }, gv100_disp_curs_new },
+ {{0,0,TU104_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
+ {{0,0,TU104_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
+ {{0,0,TU104_DISP_WINDOW_CHANNEL_DMA }, gv100_disp_wndw_new },
+ {}
+ },
+};
+
+static int
+tu104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ return nv50_disp_root_new_(&tu104_disp_root, disp, oclass,
+ data, size, pobject);
+}
+
+const struct nvkm_disp_oclass
+tu104_disp_root_oclass = {
+ .base.oclass = TU104_DISP,
+ .base.minver = -1,
+ .base.maxver = -1,
+ .ctor = tu104_disp_root_new,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index 8ba881a729ee..b0597ff9a714 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -23,7 +23,7 @@
#include <subdev/timer.h>
-static void
+void
gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -31,7 +31,7 @@ gv100_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
nvkm_mask(device, 0x616550 + hoff, 0x0c00003f, 0x08000000 | watermark);
}
-static void
+void
gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -40,7 +40,7 @@ gv100_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
nvkm_mask(device, 0x61656c + hoff, 0x00ffffff, v);
}
-static void
+void
gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
@@ -54,7 +54,7 @@ gv100_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
);
}
-static void
+void
gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
{
struct nvkm_device *device = sor->disp->engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
new file mode 100644
index 000000000000..df026a525ef1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu104.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_sor_dp_vcpi(struct nvkm_ior *sor, int head,
+ u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 hoff = head * 0x800;
+
+ nvkm_mask(device, 0x61657c + hoff, 0xffffffff, (aligned << 16) | pbn);
+ nvkm_mask(device, 0x616578 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
+}
+
+static int
+tu104_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
+{
+ struct nvkm_device *device = sor->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(sor);
+ const u32 loff = nv50_sor_link(sor);
+ u32 dpctrl = 0x00000000;
+ u32 clksor = 0x00000000;
+
+ clksor |= sor->dp.bw << 18;
+ dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
+ if (sor->dp.mst)
+ dpctrl |= 0x40000000;
+ if (sor->dp.ef)
+ dpctrl |= 0x00004000;
+
+ nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
+
+ /*XXX*/
+ nvkm_msec(device, 40, NVKM_DELAY);
+ nvkm_mask(device, 0x612300 + soff, 0x00030000, 0x00010000);
+ nvkm_mask(device, 0x61c10c + loff, 0x00000003, 0x00000001);
+
+ nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
+ return 0;
+}
+
+static const struct nvkm_ior_func
+tu104_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = tu104_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu104_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ },
+};
+
+int
+tu104_sor_new(struct nvkm_disp *disp, int id)
+{
+ return nvkm_ior_new_(&tu104_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
new file mode 100644
index 000000000000..13fa21459d38
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu104.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+#include "head.h"
+#include "ior.h"
+#include "channv50.h"
+#include "rootnv50.h"
+
+#include <core/gpuobj.h>
+#include <subdev/timer.h>
+
+static int
+tu104_disp_init(struct nv50_disp *disp)
+{
+ struct nvkm_device *device = disp->base.engine.subdev.device;
+ struct nvkm_head *head;
+ int i, j;
+ u32 tmp;
+
+ /* Claim ownership of display. */
+ if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
+ nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
+ break;
+ ) < 0)
+ return -EBUSY;
+ }
+
+ /* Lock pin capabilities. */
+ tmp = 0x00000021; /*XXX*/
+ nvkm_wr32(device, 0x640008, tmp);
+
+ /* SOR capabilities. */
+ for (i = 0; i < disp->sor.nr; i++) {
+ tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
+ nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
+ nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
+ }
+
+ /* Head capabilities. */
+ list_for_each_entry(head, &disp->base.head, head) {
+ const int id = head->id;
+
+ /* RG. */
+ tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
+ nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
+
+ /* POSTCOMP. */
+ for (j = 0; j < 5 * 4; j += 4) {
+ tmp = nvkm_rd32(device, 0x616140 + (id * 0x800) + j);
+ nvkm_wr32(device, 0x640680 + (id * 0x20) + j, tmp);
+ }
+ }
+
+ /* Window capabilities. */
+ for (i = 0; i < disp->wndw.nr; i++) {
+ nvkm_mask(device, 0x640004, 1 << i, 1 << i);
+ for (j = 0; j < 6 * 4; j += 4) {
+ tmp = nvkm_rd32(device, 0x630100 + (i * 0x800) + j);
+ nvkm_mask(device, 0x640780 + (i * 0x20) + j, 0xffffffff, tmp);
+ }
+ nvkm_mask(device, 0x64000c, 0x00000100, 0x00000100);
+ }
+
+ /* IHUB capabilities. */
+ for (i = 0; i < 3; i++) {
+ tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
+ nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
+ }
+
+ nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
+
+ /* Setup instance memory. */
+ switch (nvkm_memory_target(disp->inst->memory)) {
+ case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
+ case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
+ case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
+ default:
+ break;
+ }
+ nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
+ nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
+
+ /* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
+ nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
+ nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
+
+ /* EXC_OTHER: CURSn, CORE. */
+ nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
+ 0x00000001); /* MSK. */
+ nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
+
+ /* EXC_WINIM. */
+ nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
+ nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
+
+ /* EXC_WIN. */
+ nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
+ nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
+
+ /* HEAD_TIMING(n): VBLANK. */
+ list_for_each_entry(head, &disp->base.head, head) {
+ const u32 hoff = head->id * 4;
+ nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
+ nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
+ }
+
+ /* OR. */
+ nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
+ nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
+ return 0;
+}
+
+static const struct nv50_disp_func
+tu104_disp = {
+ .init = tu104_disp_init,
+ .fini = gv100_disp_fini,
+ .intr = gv100_disp_intr,
+ .uevent = &gv100_disp_chan_uevent,
+ .super = gv100_disp_super,
+ .root = &tu104_disp_root_oclass,
+ .wndw = { .cnt = gv100_disp_wndw_cnt },
+ .head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
+ .sor = { .cnt = gv100_sor_cnt, .new = tu104_sor_new },
+ .ramht_size = 0x2000,
+};
+
+int
+tu104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
+{
+ return nv50_disp_new_(&tu104_disp, device, index, pdisp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
index 98911805aabf..5d3b641dbb14 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/wndwgv100.c
@@ -118,7 +118,7 @@ gv100_disp_wndw_mthd_base = {
const struct nv50_disp_chan_mthd
gv100_disp_wndw_mthd = {
- .name = "Base",
+ .name = "Window",
.addr = 0x001000,
.prev = 0x000800,
.data = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index f00408577a6a..87d8e054e40a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -16,6 +16,7 @@ nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/gp100.o
nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/gv100.o
+nvkm-y += nvkm/engine/fifo/tu104.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
@@ -33,5 +34,7 @@ nvkm-y += nvkm/engine/fifo/gpfifog84.o
nvkm-y += nvkm/engine/fifo/gpfifogf100.o
nvkm-y += nvkm/engine/fifo/gpfifogk104.o
nvkm-y += nvkm/engine/fifo/gpfifogv100.o
+nvkm-y += nvkm/engine/fifo/gpfifotu104.o
nvkm-y += nvkm/engine/fifo/usergv100.o
+nvkm-y += nvkm/engine/fifo/usertu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
index 3ffef236189e..2c7c5afc1ea5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
@@ -17,6 +17,7 @@ struct nvkm_fifo_chan_func {
bool suspend);
int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
void (*object_dtor)(struct nvkm_fifo_chan *, int);
+ u32 (*submit_token)(struct nvkm_fifo_chan *);
};
int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index 8e28ba6b2307..a14545d871d8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -14,6 +14,8 @@ struct gk104_fifo_chan {
struct list_head head;
bool killed;
+ struct nvkm_memory *mthd;
+
struct {
struct nvkm_gpuobj *inst;
struct nvkm_vma *vma;
@@ -36,4 +38,15 @@ int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
+int gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *,
+ struct gk104_fifo *, u64 *, u16 *, u64, u64, u64,
+ u64 *, bool, u32 *, const struct nvkm_oclass *,
+ struct nvkm_object **);
+int gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *,
+ struct nvkm_engine *);
+int gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *,
+ struct nvkm_engine *, bool);
+
+int tu104_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
+ void *data, u32 size, struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
index f69576868164..10a2e7039a75 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
@@ -346,10 +346,10 @@ gf100_fifo_intr_fault(struct gf100_fifo *fifo, int unit)
if (eu && eu->data2) {
switch (eu->data2) {
case NVKM_SUBDEV_BAR:
- nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+ nvkm_bar_bar1_reset(device);
break;
case NVKM_SUBDEV_INSTMEM:
- nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+ nvkm_bar_bar2_reset(device);
break;
case NVKM_ENGINE_IFB:
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index afccf9721cf0..1053fe796466 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -149,16 +149,41 @@ gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
}
void
-gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
+gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+ struct nvkm_memory *mem, int nr)
+{
+ struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ int target;
+
+ switch (nvkm_memory_target(mem)) {
+ case NVKM_MEM_TARGET_VRAM: target = 0; break;
+ case NVKM_MEM_TARGET_NCOH: target = 3; break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
+ (target << 28));
+ nvkm_wr32(device, 0x002274, (runl << 20) | nr);
+
+ if (nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
+ break;
+ ) < 0)
+ nvkm_error(subdev, "runlist %d update timeout\n", runl);
+}
+
+void
+gk104_fifo_runlist_update(struct gk104_fifo *fifo, int runl)
{
const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
struct gk104_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
struct nvkm_memory *mem;
struct nvkm_fifo_cgrp *cgrp;
int nr = 0;
- int target;
mutex_lock(&subdev->mutex);
mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
@@ -177,24 +202,7 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
}
nvkm_done(mem);
- switch (nvkm_memory_target(mem)) {
- case NVKM_MEM_TARGET_VRAM: target = 0; break;
- case NVKM_MEM_TARGET_NCOH: target = 3; break;
- default:
- WARN_ON(1);
- goto unlock;
- }
-
- nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
- (target << 28));
- nvkm_wr32(device, 0x002274, (runl << 20) | nr);
-
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
- break;
- ) < 0)
- nvkm_error(subdev, "runlist %d update timeout\n", runl);
-unlock:
+ func->commit(fifo, runl, mem, nr);
mutex_unlock(&subdev->mutex);
}
@@ -238,6 +246,29 @@ const struct gk104_fifo_runlist_func
gk104_fifo_runlist = {
.size = 8,
.chan = gk104_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
+};
+
+void
+gk104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+}
+
+int
+gk104_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ /* Determine number of PBDMAs by checking valid enable bits. */
+ nvkm_wr32(device, 0x000204, 0xffffffff);
+ return hweight32(nvkm_rd32(device, 0x000204));
+}
+
+const struct gk104_fifo_pbdma_func
+gk104_fifo_pbdma = {
+ .nr = gk104_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
};
static void
@@ -267,7 +298,7 @@ gk104_fifo_recover_work(struct work_struct *w)
}
for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
- gk104_fifo_runlist_commit(fifo, runl);
+ gk104_fifo_runlist_update(fifo, runl);
nvkm_wr32(device, 0x00262c, runm);
nvkm_mask(device, 0x002630, runm, 0x00000000);
@@ -456,10 +487,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
if (ee && ee->data2) {
switch (ee->data2) {
case NVKM_SUBDEV_BAR:
- nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
+ nvkm_bar_bar1_reset(device);
break;
case NVKM_SUBDEV_INSTMEM:
- nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
+ nvkm_bar_bar2_reset(device);
break;
case NVKM_ENGINE_IFB:
nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
@@ -904,9 +935,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
enum nvkm_devidx engidx;
u32 *map;
- /* Determine number of PBDMAs by checking valid enable bits. */
- nvkm_wr32(device, 0x000204, 0xffffffff);
- fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
+ fifo->pbdma_nr = fifo->func->pbdma->nr(fifo);
nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
/* Read PBDMA->runlist(s) mapping from HW. */
@@ -978,7 +1007,7 @@ gk104_fifo_init(struct nvkm_fifo *base)
int i;
/* Enable PBDMAs. */
- nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
+ fifo->func->pbdma->init(fifo);
/* PBDMA[n] */
for (i = 0; i < fifo->pbdma_nr; i++) {
@@ -995,8 +1024,8 @@ gk104_fifo_init(struct nvkm_fifo *base)
nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
- if (fifo->func->init_pbdma_timeout)
- fifo->func->init_pbdma_timeout(fifo);
+ if (fifo->func->pbdma->init_timeout)
+ fifo->func->pbdma->init_timeout(fifo);
nvkm_wr32(device, 0x002100, 0xffffffff);
nvkm_wr32(device, 0x002140, 0x7fffffff);
@@ -1175,6 +1204,7 @@ gk104_fifo_fault_gpcclient[] = {
static const struct gk104_fifo_func
gk104_fifo = {
+ .pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index d295b81e18d6..d4e565658f46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -45,7 +45,11 @@ struct gk104_fifo {
};
struct gk104_fifo_func {
- void (*init_pbdma_timeout)(struct gk104_fifo *);
+ const struct gk104_fifo_pbdma_func {
+ int (*nr)(struct gk104_fifo *);
+ void (*init)(struct gk104_fifo *);
+ void (*init_timeout)(struct gk104_fifo *);
+ } *pbdma;
struct {
const struct nvkm_enum *access;
@@ -61,6 +65,8 @@ struct gk104_fifo_func {
struct nvkm_memory *, u32 offset);
void (*chan)(struct gk104_fifo_chan *,
struct nvkm_memory *, u32 offset);
+ void (*commit)(struct gk104_fifo *, int runl,
+ struct nvkm_memory *, int entries);
} *runlist;
struct gk104_fifo_user_user {
@@ -81,8 +87,11 @@ int gk104_fifo_new_(const struct gk104_fifo_func *, struct nvkm_device *,
int index, int nr, struct nvkm_fifo **);
void gk104_fifo_runlist_insert(struct gk104_fifo *, struct gk104_fifo_chan *);
void gk104_fifo_runlist_remove(struct gk104_fifo *, struct gk104_fifo_chan *);
-void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl);
+void gk104_fifo_runlist_update(struct gk104_fifo *, int runl);
+extern const struct gk104_fifo_pbdma_func gk104_fifo_pbdma;
+int gk104_fifo_pbdma_nr(struct gk104_fifo *);
+void gk104_fifo_pbdma_init(struct gk104_fifo *);
extern const struct nvkm_enum gk104_fifo_fault_access[];
extern const struct nvkm_enum gk104_fifo_fault_engine[];
extern const struct nvkm_enum gk104_fifo_fault_reason[];
@@ -91,15 +100,30 @@ extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
extern const struct gk104_fifo_runlist_func gk104_fifo_runlist;
void gk104_fifo_runlist_chan(struct gk104_fifo_chan *,
struct nvkm_memory *, u32);
+void gk104_fifo_runlist_commit(struct gk104_fifo *, int runl,
+ struct nvkm_memory *, int);
extern const struct gk104_fifo_runlist_func gk110_fifo_runlist;
void gk110_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
struct nvkm_memory *, u32);
-void gk208_fifo_init_pbdma_timeout(struct gk104_fifo *);
+extern const struct gk104_fifo_pbdma_func gk208_fifo_pbdma;
+void gk208_fifo_pbdma_init_timeout(struct gk104_fifo *);
extern const struct nvkm_enum gm107_fifo_fault_engine[];
extern const struct gk104_fifo_runlist_func gm107_fifo_runlist;
+extern const struct gk104_fifo_pbdma_func gm200_fifo_pbdma;
+int gm200_fifo_pbdma_nr(struct gk104_fifo *);
+
extern const struct nvkm_enum gp100_fifo_fault_engine[];
+
+extern const struct nvkm_enum gv100_fifo_fault_access[];
+extern const struct nvkm_enum gv100_fifo_fault_reason[];
+extern const struct nvkm_enum gv100_fifo_fault_hubclient[];
+extern const struct nvkm_enum gv100_fifo_fault_gpcclient[];
+void gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *,
+ struct nvkm_memory *, u32);
+void gv100_fifo_runlist_chan(struct gk104_fifo_chan *,
+ struct nvkm_memory *, u32);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
index ac7655a130fb..8adfa6b182cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
@@ -43,10 +43,12 @@ gk110_fifo_runlist = {
.size = 8,
.cgrp = gk110_fifo_runlist_cgrp,
.chan = gk104_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
};
static const struct gk104_fifo_func
gk110_fifo = {
+ .pbdma = &gk104_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
index 5ea7e452cc66..9553fb4af601 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk208.c
@@ -27,7 +27,7 @@
#include <nvif/class.h>
void
-gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
+gk208_fifo_pbdma_init_timeout(struct gk104_fifo *fifo)
{
struct nvkm_device *device = fifo->base.engine.subdev.device;
int i;
@@ -36,9 +36,16 @@ gk208_fifo_init_pbdma_timeout(struct gk104_fifo *fifo)
nvkm_wr32(device, 0x04012c + (i * 0x2000), 0x0000ffff);
}
+const struct gk104_fifo_pbdma_func
+gk208_fifo_pbdma = {
+ .nr = gk104_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
static const struct gk104_fifo_func
gk208_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
index 535a0eb67a5f..a4c6ac3cd6c7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk20a.c
@@ -26,7 +26,7 @@
static const struct gk104_fifo_func
gk20a_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gk104_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
index 79ae19b1db67..acf230764cb0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
@@ -41,6 +41,7 @@ gm107_fifo_runlist = {
.size = 8,
.cgrp = gk110_fifo_runlist_cgrp,
.chan = gm107_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
};
const struct nvkm_enum
@@ -68,7 +69,7 @@ gm107_fifo_fault_engine[] = {
static const struct gk104_fifo_func
gm107_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gk208_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
index 49565faa854d..b96c1c5d6577 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm200.c
@@ -26,9 +26,23 @@
#include <nvif/class.h>
+int
+gm200_fifo_pbdma_nr(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ return nvkm_rd32(device, 0x002004) & 0x000000ff;
+}
+
+const struct gk104_fifo_pbdma_func
+gm200_fifo_pbdma = {
+ .nr = gm200_fifo_pbdma_nr,
+ .init = gk104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
static const struct gk104_fifo_func
gm200_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
index 46736513bd11..a49539b9e4ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm20b.c
@@ -26,7 +26,7 @@
static const struct gk104_fifo_func
gm20b_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gm107_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index e2f8f9087d7c..54377e0f6a88 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -52,7 +52,7 @@ gp100_fifo_fault_engine[] = {
static const struct gk104_fifo_func
gp100_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
index 7733bf7c6545..778ba7e46fb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -26,7 +26,7 @@
static const struct gk104_fifo_func
gp10b_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gk104_fifo_fault_access,
.fault.engine = gp100_fifo_fault_engine,
.fault.reason = gk104_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
index 118b37aea318..728a1edbf98c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogk104.c
@@ -85,7 +85,7 @@ gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
case NVKM_ENGINE_MSVLD : return 0x0270;
case NVKM_ENGINE_VIC : return 0x0280;
case NVKM_ENGINE_MSENC : return 0x0290;
- case NVKM_ENGINE_NVDEC : return 0x02100270;
+ case NVKM_ENGINE_NVDEC0: return 0x02100270;
case NVKM_ENGINE_NVENC0: return 0x02100290;
case NVKM_ENGINE_NVENC1: return 0x0210;
default:
@@ -192,7 +192,7 @@ gk104_fifo_gpfifo_fini(struct nvkm_fifo_chan *base)
gk104_fifo_runlist_remove(fifo, chan);
nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
gk104_fifo_gpfifo_kick(chan);
- gk104_fifo_runlist_commit(fifo, chan->runl);
+ gk104_fifo_runlist_update(fifo, chan->runl);
}
nvkm_wr32(device, 0x800000 + coff, 0x00000000);
@@ -213,7 +213,7 @@ gk104_fifo_gpfifo_init(struct nvkm_fifo_chan *base)
if (list_empty(&chan->head) && !chan->killed) {
gk104_fifo_runlist_insert(fifo, chan);
nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
- gk104_fifo_runlist_commit(fifo, chan->runl);
+ gk104_fifo_runlist_update(fifo, chan->runl);
nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
}
}
@@ -222,6 +222,7 @@ void *
gk104_fifo_gpfifo_dtor(struct nvkm_fifo_chan *base)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ nvkm_memory_unref(&chan->mthd);
kfree(chan->cgrp);
return chan;
}
@@ -240,7 +241,7 @@ gk104_fifo_gpfifo_func = {
static int
gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
- u64 vmm, u64 ioffset, u64 ilength,
+ u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
@@ -279,6 +280,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
return ret;
*chid = chan->base.chid;
+ *inst = chan->base.inst->addr;
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
@@ -315,6 +317,7 @@ gk104_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
nvkm_wo32(chan->base.inst, 0x94, 0x30000001);
nvkm_wo32(chan->base.inst, 0x9c, 0x00000100);
nvkm_wo32(chan->base.inst, 0xac, 0x0000001f);
+ nvkm_wo32(chan->base.inst, 0xe4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->base.inst, 0xe8, chan->base.chid);
nvkm_wo32(chan->base.inst, 0xb8, 0xf8000000);
nvkm_wo32(chan->base.inst, 0xf8, 0x10003080); /* 0x002310 */
@@ -337,15 +340,19 @@ gk104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x "
- "runlist %016llx\n",
+ "runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
- args->v0.ilength, args->v0.runlist);
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ if (args->v0.priv && !oclass->client->super)
+ return -EINVAL;
return gk104_fifo_gpfifo_new_(fifo,
&args->v0.runlist,
&args->v0.chid,
args->v0.vmm,
args->v0.ioffset,
args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
oclass, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
index 9598853ced56..a7462cf59d65 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
@@ -25,9 +25,15 @@
#include <core/client.h>
#include <core/gpuobj.h>
-#include <nvif/cla06f.h>
+#include <nvif/clc36f.h>
#include <nvif/unpack.h>
+static u32
+gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
+{
+ return chan->chid;
+}
+
static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
{
@@ -56,7 +62,7 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
return ret;
}
-static int
+int
gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine, bool suspend)
{
@@ -79,7 +85,7 @@ gv100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
return ret;
}
-static int
+int
gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
struct nvkm_engine *engine)
{
@@ -100,8 +106,8 @@ gv100_fifo_gpfifo_engine_init(struct nvkm_fifo_chan *base,
return gv100_fifo_gpfifo_engine_valid(chan, false, true);
}
-const struct nvkm_fifo_chan_func
-gv100_fifo_gpfifo_func = {
+static const struct nvkm_fifo_chan_func
+gv100_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.init = gk104_fifo_gpfifo_init,
.fini = gk104_fifo_gpfifo_fini,
@@ -110,19 +116,23 @@ gv100_fifo_gpfifo_func = {
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
+ .submit_token = gv100_fifo_gpfifo_submit_token,
};
-static int
-gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
- u64 vmm, u64 ioffset, u64 ilength,
- const struct nvkm_oclass *oclass,
+int
+gv100_fifo_gpfifo_new_(const struct nvkm_fifo_chan_func *func,
+ struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
+ u64 vmm, u64 ioffset, u64 ilength, u64 *inst, bool priv,
+ u32 *token, const struct nvkm_oclass *oclass,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
struct gk104_fifo_chan *chan;
int runlist = ffs(*runlists) -1, ret, i;
unsigned long engm;
u64 subdevs = 0;
- u64 usermem;
+ u64 usermem, mthd;
+ u32 size;
if (!vmm || runlist < 0 || runlist >= fifo->runlist_nr)
return -EINVAL;
@@ -142,14 +152,15 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
chan->runl = runlist;
INIT_LIST_HEAD(&chan->head);
- ret = nvkm_fifo_chan_ctor(&gv100_fifo_gpfifo_func, &fifo->base,
- 0x1000, 0x1000, true, vmm, 0, subdevs,
- 1, fifo->user.bar->addr, 0x200,
+ ret = nvkm_fifo_chan_ctor(func, &fifo->base, 0x1000, 0x1000, true, vmm,
+ 0, subdevs, 1, fifo->user.bar->addr, 0x200,
oclass, &chan->base);
if (ret)
return ret;
*chid = chan->base.chid;
+ *inst = chan->base.inst->addr;
+ *token = chan->base.func->submit_token(&chan->base);
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
@@ -173,6 +184,20 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
nvkm_done(fifo->user.mem);
usermem = nvkm_memory_addr(fifo->user.mem) + usermem;
+ /* Allocate fault method buffer (magics come from nvgpu). */
+ size = nvkm_rd32(device, 0x104028); /* NV_PCE_PCE_MAP */
+ size = 27 * 5 * (((9 + 1 + 3) * hweight32(size)) + 2);
+ size = roundup(size, PAGE_SIZE);
+
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, 0x1000, true,
+ &chan->mthd);
+ if (ret)
+ return ret;
+
+ mthd = nvkm_memory_bar2(chan->mthd);
+ if (mthd == ~0ULL)
+ return -EFAULT;
+
/* RAMFC */
nvkm_kmap(chan->base.inst);
nvkm_wo32(chan->base.inst, 0x008, lower_32_bits(usermem));
@@ -184,13 +209,13 @@ gv100_fifo_gpfifo_new_(struct gk104_fifo *fifo, u64 *runlists, u16 *chid,
(ilength << 16));
nvkm_wo32(chan->base.inst, 0x084, 0x20400000);
nvkm_wo32(chan->base.inst, 0x094, 0x30000001);
- nvkm_wo32(chan->base.inst, 0x0e4, 0x00000020);
+ nvkm_wo32(chan->base.inst, 0x0e4, priv ? 0x00000020 : 0x00000000);
nvkm_wo32(chan->base.inst, 0x0e8, chan->base.chid);
- nvkm_wo32(chan->base.inst, 0x0f4, 0x00001100);
+ nvkm_wo32(chan->base.inst, 0x0f4, 0x00001000);
nvkm_wo32(chan->base.inst, 0x0f8, 0x10003080);
nvkm_mo32(chan->base.inst, 0x218, 0x00000000, 0x00000000);
- nvkm_wo32(chan->base.inst, 0x220, 0x020a1000);
- nvkm_wo32(chan->base.inst, 0x224, 0x00000000);
+ nvkm_wo32(chan->base.inst, 0x220, lower_32_bits(mthd));
+ nvkm_wo32(chan->base.inst, 0x224, upper_32_bits(mthd));
nvkm_done(chan->base.inst);
return gv100_fifo_gpfifo_engine_valid(chan, true, true);
}
@@ -201,7 +226,7 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
{
struct nvkm_object *parent = oclass->parent;
union {
- struct kepler_channel_gpfifo_a_v0 v0;
+ struct volta_channel_gpfifo_a_v0 v0;
} *args = data;
int ret = -ENOSYS;
@@ -209,15 +234,20 @@ gv100_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
"ioffset %016llx ilength %08x "
- "runlist %016llx\n",
+ "runlist %016llx priv %d\n",
args->v0.version, args->v0.vmm, args->v0.ioffset,
- args->v0.ilength, args->v0.runlist);
- return gv100_fifo_gpfifo_new_(fifo,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ if (args->v0.priv && !oclass->client->super)
+ return -EINVAL;
+ return gv100_fifo_gpfifo_new_(&gv100_fifo_gpfifo, fifo,
&args->v0.runlist,
&args->v0.chid,
args->v0.vmm,
args->v0.ioffset,
args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ &args->v0.token,
oclass, pobject);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
new file mode 100644
index 000000000000..ff70484dd01a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifotu104.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "changk104.h"
+#include "cgrp.h"
+
+#include <core/client.h>
+#include <core/gpuobj.h>
+
+#include <nvif/clc36f.h>
+#include <nvif/unpack.h>
+
+static u32
+tu104_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
+{
+ struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
+ return (chan->runl << 16) | chan->base.chid;
+}
+
+static const struct nvkm_fifo_chan_func
+tu104_fifo_gpfifo = {
+ .dtor = gk104_fifo_gpfifo_dtor,
+ .init = gk104_fifo_gpfifo_init,
+ .fini = gk104_fifo_gpfifo_fini,
+ .ntfy = gf100_fifo_chan_ntfy,
+ .engine_ctor = gk104_fifo_gpfifo_engine_ctor,
+ .engine_dtor = gk104_fifo_gpfifo_engine_dtor,
+ .engine_init = gv100_fifo_gpfifo_engine_init,
+ .engine_fini = gv100_fifo_gpfifo_engine_fini,
+ .submit_token = tu104_fifo_gpfifo_submit_token,
+};
+
+int
+tu104_fifo_gpfifo_new(struct gk104_fifo *fifo, const struct nvkm_oclass *oclass,
+ void *data, u32 size, struct nvkm_object **pobject)
+{
+ struct nvkm_object *parent = oclass->parent;
+ union {
+ struct volta_channel_gpfifo_a_v0 v0;
+ } *args = data;
+ int ret = -ENOSYS;
+
+ nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
+ if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
+ nvif_ioctl(parent, "create channel gpfifo vers %d vmm %llx "
+ "ioffset %016llx ilength %08x "
+ "runlist %016llx priv %d\n",
+ args->v0.version, args->v0.vmm, args->v0.ioffset,
+ args->v0.ilength, args->v0.runlist, args->v0.priv);
+ if (args->v0.priv && !oclass->client->super)
+ return -EINVAL;
+ return gv100_fifo_gpfifo_new_(&tu104_fifo_gpfifo, fifo,
+ &args->v0.runlist,
+ &args->v0.chid,
+ args->v0.vmm,
+ args->v0.ioffset,
+ args->v0.ilength,
+ &args->v0.inst,
+ args->v0.priv,
+ &args->v0.token,
+ oclass, pobject);
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
index 4e1d159c0ae7..6ee1bb32a071 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
@@ -28,7 +28,7 @@
#include <nvif/class.h>
-static void
+void
gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
struct nvkm_memory *memory, u32 offset)
{
@@ -42,7 +42,7 @@ gv100_fifo_runlist_chan(struct gk104_fifo_chan *chan,
nvkm_wo32(memory, offset + 0xc, upper_32_bits(inst));
}
-static void
+void
gv100_fifo_runlist_cgrp(struct nvkm_fifo_cgrp *cgrp,
struct nvkm_memory *memory, u32 offset)
{
@@ -57,9 +57,10 @@ gv100_fifo_runlist = {
.size = 16,
.cgrp = gv100_fifo_runlist_cgrp,
.chan = gv100_fifo_runlist_chan,
+ .commit = gk104_fifo_runlist_commit,
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_gpcclient[] = {
{ 0x00, "T1_0" },
{ 0x01, "T1_1" },
@@ -161,7 +162,7 @@ gv100_fifo_fault_gpcclient[] = {
{}
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_hubclient[] = {
{ 0x00, "VIP" },
{ 0x01, "CE0" },
@@ -223,7 +224,7 @@ gv100_fifo_fault_hubclient[] = {
{}
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_reason[] = {
{ 0x00, "PDE" },
{ 0x01, "PDE_SIZE" },
@@ -271,7 +272,7 @@ gv100_fifo_fault_engine[] = {
{}
};
-static const struct nvkm_enum
+const struct nvkm_enum
gv100_fifo_fault_access[] = {
{ 0x0, "VIRT_READ" },
{ 0x1, "VIRT_WRITE" },
@@ -287,7 +288,7 @@ gv100_fifo_fault_access[] = {
static const struct gk104_fifo_func
gv100_fifo = {
- .init_pbdma_timeout = gk208_fifo_init_pbdma_timeout,
+ .pbdma = &gm200_fifo_pbdma,
.fault.access = gv100_fifo_fault_access,
.fault.engine = gv100_fifo_fault_engine,
.fault.reason = gv100_fifo_fault_reason,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
new file mode 100644
index 000000000000..98c80705bc61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu104.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "cgrp.h"
+#include "changk104.h"
+#include "user.h"
+
+#include <core/gpuobj.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl,
+ struct nvkm_memory *mem, int nr)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ u64 addr = nvkm_memory_addr(mem);
+ /*XXX: target? */
+
+ nvkm_wr32(device, 0x002b00 + (runl * 0x10), lower_32_bits(addr));
+ nvkm_wr32(device, 0x002b04 + (runl * 0x10), upper_32_bits(addr));
+ nvkm_wr32(device, 0x002b08 + (runl * 0x10), nr);
+
+ /*XXX: how to wait? can you even wait? */
+}
+
+const struct gk104_fifo_runlist_func
+tu104_fifo_runlist = {
+ .size = 16,
+ .cgrp = gv100_fifo_runlist_cgrp,
+ .chan = gv100_fifo_runlist_chan,
+ .commit = tu104_fifo_runlist_commit,
+};
+
+static const struct nvkm_enum
+tu104_fifo_fault_engine[] = {
+ { 0x01, "DISPLAY" },
+ { 0x03, "PTP" },
+ { 0x06, "PWR_PMU" },
+ { 0x08, "IFB", NULL, NVKM_ENGINE_IFB },
+ { 0x09, "PERF" },
+ { 0x1f, "PHYSICAL" },
+ { 0x20, "HOST0" },
+ { 0x21, "HOST1" },
+ { 0x22, "HOST2" },
+ { 0x23, "HOST3" },
+ { 0x24, "HOST4" },
+ { 0x25, "HOST5" },
+ { 0x26, "HOST6" },
+ { 0x27, "HOST7" },
+ { 0x28, "HOST8" },
+ { 0x29, "HOST9" },
+ { 0x2a, "HOST10" },
+ { 0x2b, "HOST11" },
+ { 0x2c, "HOST12" },
+ { 0x2d, "HOST13" },
+ { 0x2e, "HOST14" },
+ { 0x80, "BAR1", NULL, NVKM_SUBDEV_BAR },
+ { 0xc0, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
+ {}
+};
+
+static void
+tu104_fifo_pbdma_init(struct gk104_fifo *fifo)
+{
+ struct nvkm_device *device = fifo->base.engine.subdev.device;
+ const u32 mask = (1 << fifo->pbdma_nr) - 1;
+ /*XXX: this is a bit of a guess at this point in time. */
+ nvkm_mask(device, 0xb65000, 0x80000fff, 0x80000000 | mask);
+}
+
+static const struct gk104_fifo_pbdma_func
+tu104_fifo_pbdma = {
+ .nr = gm200_fifo_pbdma_nr,
+ .init = tu104_fifo_pbdma_init,
+ .init_timeout = gk208_fifo_pbdma_init_timeout,
+};
+
+static const struct gk104_fifo_func
+tu104_fifo = {
+ .pbdma = &tu104_fifo_pbdma,
+ .fault.access = gv100_fifo_fault_access,
+ .fault.engine = tu104_fifo_fault_engine,
+ .fault.reason = gv100_fifo_fault_reason,
+ .fault.hubclient = gv100_fifo_fault_hubclient,
+ .fault.gpcclient = gv100_fifo_fault_gpcclient,
+ .runlist = &tu104_fifo_runlist,
+ .user = {{-1,-1,VOLTA_USERMODE_A }, tu104_fifo_user_new },
+ .chan = {{ 0, 0,TURING_CHANNEL_GPFIFO_A}, tu104_fifo_gpfifo_new },
+ .cgrp_force = true,
+};
+
+int
+tu104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&tu104_fifo, device, index, 4096, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
index ed840921ebe8..14b0c6bde8eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/user.h
@@ -3,4 +3,6 @@
#include "priv.h"
int gv100_fifo_user_new(const struct nvkm_oclass *, void *, u32,
struct nvkm_object **);
+int tu104_fifo_user_new(const struct nvkm_oclass *, void *, u32,
+ struct nvkm_object **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
new file mode 100644
index 000000000000..8f98548a21f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu104.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "user.h"
+
+static int
+tu104_fifo_user_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct nvkm_device *device = object->engine->subdev.device;
+ *addr = 0xbb0000 + device->func->resource_addr(device, 0);
+ *size = 0x010000;
+ *type = NVKM_OBJECT_MAP_IO;
+ return 0;
+}
+
+static const struct nvkm_object_func
+tu104_fifo_user = {
+ .map = tu104_fifo_user_map,
+};
+
+int
+tu104_fifo_user_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nvkm_object **pobject)
+{
+ return nvkm_object_new_(&tu104_fifo_user, oclass, argv, argc, pobject);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 14be41f24155..427340153640 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -197,7 +197,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
case NVKM_SUBDEV_PMU:
debug_reg = 0xc08;
break;
- case NVKM_ENGINE_NVDEC:
+ case NVKM_ENGINE_NVDEC0:
debug_reg = 0xd00;
break;
case NVKM_ENGINE_SEC2:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
index e5830453813d..ab0282dc0736 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
@@ -5,3 +5,4 @@ nvkm-y += nvkm/subdev/bar/gf100.o
nvkm-y += nvkm/subdev/bar/gk20a.o
nvkm-y += nvkm/subdev/bar/gm107.o
nvkm-y += nvkm/subdev/bar/gm20b.o
+nvkm-y += nvkm/subdev/bar/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
index 243f0a5c8a62..209a6a40834a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
@@ -36,6 +36,16 @@ nvkm_bar_bar1_vmm(struct nvkm_device *device)
return device->bar->func->bar1.vmm(device->bar);
}
+void
+nvkm_bar_bar1_reset(struct nvkm_device *device)
+{
+ struct nvkm_bar *bar = device->bar;
+ if (bar) {
+ bar->func->bar1.init(bar);
+ bar->func->bar1.wait(bar);
+ }
+}
+
struct nvkm_vmm *
nvkm_bar_bar2_vmm(struct nvkm_device *device)
{
@@ -49,6 +59,16 @@ nvkm_bar_bar2_vmm(struct nvkm_device *device)
}
void
+nvkm_bar_bar2_reset(struct nvkm_device *device)
+{
+ struct nvkm_bar *bar = device->bar;
+ if (bar && bar->bar2) {
+ bar->func->bar2.init(bar);
+ bar->func->bar2.wait(bar);
+ }
+}
+
+void
nvkm_bar_bar2_fini(struct nvkm_device *device)
{
struct nvkm_bar *bar = device->bar;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
new file mode 100644
index 000000000000..ecaead156e9b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu104.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/timer.h>
+
+static void
+tu104_bar_bar2_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb80f50) & 0x0000000c))
+ break;
+ );
+}
+
+static void
+tu104_bar_bar2_fini(struct nvkm_bar *bar)
+{
+ nvkm_mask(bar->subdev.device, 0xb80f48, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar2_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gf100_bar *bar = gf100_bar(base);
+ u32 addr = nvkm_memory_addr(bar->bar[0].inst) >> 12;
+ if (bar->bar2_halve)
+ addr |= 0x40000000;
+ nvkm_wr32(device, 0xb80f48, 0x80000000 | addr);
+}
+
+static void
+tu104_bar_bar1_wait(struct nvkm_bar *bar)
+{
+ struct nvkm_device *device = bar->subdev.device;
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb80f50) & 0x00000003))
+ break;
+ );
+}
+
+static void
+tu104_bar_bar1_fini(struct nvkm_bar *bar)
+{
+ nvkm_mask(bar->subdev.device, 0xb80f40, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_bar_bar1_init(struct nvkm_bar *base)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct gf100_bar *bar = gf100_bar(base);
+ const u32 addr = nvkm_memory_addr(bar->bar[1].inst) >> 12;
+ nvkm_wr32(device, 0xb80f40, 0x80000000 | addr);
+}
+
+static const struct nvkm_bar_func
+tu104_bar = {
+ .dtor = gf100_bar_dtor,
+ .oneinit = gf100_bar_oneinit,
+ .bar1.init = tu104_bar_bar1_init,
+ .bar1.fini = tu104_bar_bar1_fini,
+ .bar1.wait = tu104_bar_bar1_wait,
+ .bar1.vmm = gf100_bar_bar1_vmm,
+ .bar2.init = tu104_bar_bar2_init,
+ .bar2.fini = tu104_bar_bar2_fini,
+ .bar2.wait = tu104_bar_bar2_wait,
+ .bar2.vmm = gf100_bar_bar2_vmm,
+ .flush = g84_bar_flush,
+};
+
+int
+tu104_bar_new(struct nvkm_device *device, int index, struct nvkm_bar **pbar)
+{
+ return gf100_bar_new_(&tu104_bar, device, index, pbar);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
index 50a436926484..3ef505a5c01b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
@@ -13,3 +13,4 @@ nvkm-y += nvkm/subdev/devinit/gf100.o
nvkm-y += nvkm/subdev/devinit/gm107.o
nvkm-y += nvkm/subdev/devinit/gm200.o
nvkm-y += nvkm/subdev/devinit/gv100.o
+nvkm-y += nvkm/subdev/devinit/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index 17235e940ca9..59940dacc2ba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -105,6 +105,15 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
return pmu_exec(init, pmu.init_addr_pmu), 0;
}
+void
+gm200_devinit_preos(struct nv50_devinit *init, bool post)
+{
+ /* Optional: Execute PRE_OS application on PMU, which should at
+ * least take care of fans until a full PMU has been loaded.
+ */
+ pmu_load(init, 0x01, post, NULL, NULL);
+}
+
int
gm200_devinit_post(struct nvkm_devinit *base, bool post)
{
@@ -156,10 +165,7 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -ETIMEDOUT;
}
- /* Optional: Execute PRE_OS application on PMU, which should at
- * least take care of fans until a full PMU has been loaded.
- */
- pmu_load(init, 0x01, post, NULL, NULL);
+ gm200_devinit_preos(init, post);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 9b9f0dc1e192..72d130bb7f7c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -26,4 +26,5 @@ void gf100_devinit_preinit(struct nvkm_devinit *);
u64 gm107_devinit_disable(struct nvkm_devinit *);
int gm200_devinit_post(struct nvkm_devinit *, bool);
+void gm200_devinit_preos(struct nv50_devinit *, bool);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
new file mode 100644
index 000000000000..aae87b3fc429
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu104.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pll.h>
+#include <subdev/clk/pll.h>
+
+static int
+tu104_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
+{
+ struct nvkm_subdev *subdev = &init->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvbios_pll info;
+ int head = type - PLL_VPLL0;
+ int N, fN, M, P;
+ int ret;
+
+ ret = nvbios_pll_parse(device->bios, type, &info);
+ if (ret)
+ return ret;
+
+ ret = gt215_pll_calc(subdev, &info, freq, &N, &fN, &M, &P);
+ if (ret < 0)
+ return ret;
+
+ switch (info.type) {
+ case PLL_VPLL0:
+ case PLL_VPLL1:
+ case PLL_VPLL2:
+ case PLL_VPLL3:
+ nvkm_wr32(device, 0x00ef10 + (head * 0x40), fN << 16);
+ nvkm_wr32(device, 0x00ef04 + (head * 0x40), (P << 16) |
+ (N << 8) |
+ (M << 0));
+ /*XXX*/
+ nvkm_wr32(device, 0x00ef0c + (head * 0x40), 0x00000900);
+ nvkm_wr32(device, 0x00ef00 + (head * 0x40), 0x02000014);
+ break;
+ default:
+ nvkm_warn(subdev, "%08x/%dKhz unimplemented\n", type, freq);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+tu104_devinit_post(struct nvkm_devinit *base, bool post)
+{
+ struct nv50_devinit *init = nv50_devinit(base);
+ gm200_devinit_preos(init, post);
+ return 0;
+}
+
+static const struct nvkm_devinit_func
+tu104_devinit = {
+ .init = nv50_devinit_init,
+ .post = tu104_devinit_post,
+ .pll_set = tu104_devinit_pll_set,
+ .disable = gm107_devinit_disable,
+};
+
+int
+tu104_devinit_new(struct nvkm_device *device, int index,
+ struct nvkm_devinit **pinit)
+{
+ return nv50_devinit_new_(&tu104_devinit, device, index, pinit);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
index 45bb46fb0929..794eb1745b2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/Kbuild
@@ -1,3 +1,4 @@
nvkm-y += nvkm/subdev/fault/base.o
nvkm-y += nvkm/subdev/fault/gp100.o
nvkm-y += nvkm/subdev/fault/gv100.o
+nvkm-y += nvkm/subdev/fault/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
index 16ad91c91a7b..4ba1e21e8fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/base.c
@@ -23,21 +23,19 @@
#include <core/memory.h>
#include <core/notify.h>
-#include <subdev/bar.h>
-#include <subdev/mmu.h>
static void
nvkm_fault_ntfy_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
- fault->func->buffer.fini(fault->buffer[index]);
+ fault->func->buffer.intr(fault->buffer[index], false);
}
static void
nvkm_fault_ntfy_init(struct nvkm_event *event, int type, int index)
{
struct nvkm_fault *fault = container_of(event, typeof(*fault), event);
- fault->func->buffer.init(fault->buffer[index]);
+ fault->func->buffer.intr(fault->buffer[index], true);
}
static int
@@ -91,7 +89,6 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
{
struct nvkm_subdev *subdev = &fault->subdev;
struct nvkm_device *device = subdev->device;
- struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(device);
struct nvkm_fault_buffer *buffer;
int ret;
@@ -99,7 +96,7 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
return -ENOMEM;
buffer->fault = fault;
buffer->id = id;
- buffer->entries = fault->func->buffer.entries(buffer);
+ fault->func->buffer.info(buffer);
fault->buffer[id] = buffer;
nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries);
@@ -110,12 +107,12 @@ nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id)
if (ret)
return ret;
- ret = nvkm_vmm_get(bar2, 12, nvkm_memory_size(buffer->mem),
- &buffer->vma);
- if (ret)
- return ret;
+ /* Pin fault buffer in BAR2. */
+ buffer->addr = nvkm_memory_bar2(buffer->mem);
+ if (buffer->addr == ~0ULL)
+ return -EFAULT;
- return nvkm_memory_map(buffer->mem, 0, bar2, buffer->vma, NULL, 0);
+ return 0;
}
static int
@@ -146,7 +143,6 @@ nvkm_fault_oneinit(struct nvkm_subdev *subdev)
static void *
nvkm_fault_dtor(struct nvkm_subdev *subdev)
{
- struct nvkm_vmm *bar2 = nvkm_bar_bar2_vmm(subdev->device);
struct nvkm_fault *fault = nvkm_fault(subdev);
int i;
@@ -154,7 +150,6 @@ nvkm_fault_dtor(struct nvkm_subdev *subdev)
for (i = 0; i < fault->buffer_nr; i++) {
if (fault->buffer[i]) {
- nvkm_vmm_put(bar2, &fault->buffer[i]->vma);
nvkm_memory_unref(&fault->buffer[i]->mem);
kfree(fault->buffer[i]);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
index 5e71db2e8d75..8fb96fe614f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gp100.c
@@ -21,7 +21,14 @@
*/
#include "priv.h"
-#include <subdev/mmu.h>
+#include <subdev/mc.h>
+
+static void
+gp100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ nvkm_mc_intr_mask(device, NVKM_SUBDEV_FAULT, enable);
+}
static void
gp100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
@@ -34,15 +41,17 @@ static void
gp100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
- nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->vma->addr));
- nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->vma->addr));
+ nvkm_wr32(device, 0x002a74, upper_32_bits(buffer->addr));
+ nvkm_wr32(device, 0x002a70, lower_32_bits(buffer->addr));
nvkm_mask(device, 0x002a70, 0x00000001, 0x00000001);
}
-static u32
-gp100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gp100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
- return nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+ buffer->entries = nvkm_rd32(buffer->fault->subdev.device, 0x002a78);
+ buffer->get = 0x002a7c;
+ buffer->put = 0x002a80;
}
static void
@@ -56,9 +65,10 @@ gp100_fault = {
.intr = gp100_fault_intr,
.buffer.nr = 1,
.buffer.entry_size = 32,
- .buffer.entries = gp100_fault_buffer_entries,
+ .buffer.info = gp100_fault_buffer_info,
.buffer.init = gp100_fault_buffer_init,
.buffer.fini = gp100_fault_buffer_fini,
+ .buffer.intr = gp100_fault_buffer_intr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
index 3cd610d7deb5..6fc54e17c935 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/gv100.c
@@ -30,9 +30,8 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
struct nvkm_memory *mem = buffer->mem;
- const u32 foff = buffer->id * 0x14;
- u32 get = nvkm_rd32(device, 0x100e2c + foff);
- u32 put = nvkm_rd32(device, 0x100e30 + foff);
+ u32 get = nvkm_rd32(device, buffer->get);
+ u32 put = nvkm_rd32(device, buffer->put);
if (put == get)
return;
@@ -51,7 +50,7 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
if (++get == buffer->entries)
get = 0;
- nvkm_wr32(device, 0x100e2c + foff, get);
+ nvkm_wr32(device, buffer->get, get);
info.addr = ((u64)addrhi << 32) | addrlo;
info.inst = ((u64)insthi << 32) | instlo;
@@ -70,13 +69,21 @@ gv100_fault_buffer_process(struct nvkm_fault_buffer *buffer)
}
static void
-gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+gv100_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
- const u32 foff = buffer->id * 0x14;
+ if (enable)
+ nvkm_mask(device, 0x100a2c, intr, intr);
+ else
+ nvkm_mask(device, 0x100a34, intr, intr);
+}
- nvkm_mask(device, 0x100a34, intr, intr);
+static void
+gv100_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x00000000);
}
@@ -84,23 +91,25 @@ static void
gv100_fault_buffer_init(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
- const u32 intr = buffer->id ? 0x08000000 : 0x20000000;
const u32 foff = buffer->id * 0x14;
nvkm_mask(device, 0x100e34 + foff, 0xc0000000, 0x40000000);
- nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->vma->addr));
- nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->vma->addr));
+ nvkm_wr32(device, 0x100e28 + foff, upper_32_bits(buffer->addr));
+ nvkm_wr32(device, 0x100e24 + foff, lower_32_bits(buffer->addr));
nvkm_mask(device, 0x100e34 + foff, 0x80000000, 0x80000000);
- nvkm_mask(device, 0x100a2c, intr, intr);
}
-static u32
-gv100_fault_buffer_entries(struct nvkm_fault_buffer *buffer)
+static void
+gv100_fault_buffer_info(struct nvkm_fault_buffer *buffer)
{
struct nvkm_device *device = buffer->fault->subdev.device;
const u32 foff = buffer->id * 0x14;
+
nvkm_mask(device, 0x100e34 + foff, 0x40000000, 0x40000000);
- return nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+
+ buffer->entries = nvkm_rd32(device, 0x100e34 + foff) & 0x000fffff;
+ buffer->get = 0x100e2c + foff;
+ buffer->put = 0x100e30 + foff;
}
static int
@@ -166,6 +175,8 @@ static void
gv100_fault_fini(struct nvkm_fault *fault)
{
nvkm_notify_put(&fault->nrpfb);
+ if (fault->buffer[0])
+ fault->func->buffer.fini(fault->buffer[0]);
nvkm_mask(fault->subdev.device, 0x100a34, 0x80000000, 0x80000000);
}
@@ -173,14 +184,15 @@ static void
gv100_fault_init(struct nvkm_fault *fault)
{
nvkm_mask(fault->subdev.device, 0x100a2c, 0x80000000, 0x80000000);
+ fault->func->buffer.init(fault->buffer[0]);
nvkm_notify_get(&fault->nrpfb);
}
-static int
+int
gv100_fault_oneinit(struct nvkm_fault *fault)
{
return nvkm_notify_init(&fault->buffer[0]->object, &fault->event,
- gv100_fault_ntfy_nrpfb, false, NULL, 0, 0,
+ gv100_fault_ntfy_nrpfb, true, NULL, 0, 0,
&fault->nrpfb);
}
@@ -192,9 +204,10 @@ gv100_fault = {
.intr = gv100_fault_intr,
.buffer.nr = 2,
.buffer.entry_size = 32,
- .buffer.entries = gv100_fault_buffer_entries,
+ .buffer.info = gv100_fault_buffer_info,
.buffer.init = gv100_fault_buffer_init,
.buffer.fini = gv100_fault_buffer_fini,
+ .buffer.intr = gv100_fault_buffer_intr,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
index e4d2f5234fd1..8ca8b2876dad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/priv.h
@@ -12,8 +12,10 @@ struct nvkm_fault_buffer {
struct nvkm_fault *fault;
int id;
int entries;
+ u32 get;
+ u32 put;
struct nvkm_memory *mem;
- struct nvkm_vma *vma;
+ u64 addr;
};
int nvkm_fault_new_(const struct nvkm_fault_func *, struct nvkm_device *,
@@ -27,9 +29,12 @@ struct nvkm_fault_func {
struct {
int nr;
u32 entry_size;
- u32 (*entries)(struct nvkm_fault_buffer *);
+ void (*info)(struct nvkm_fault_buffer *);
void (*init)(struct nvkm_fault_buffer *);
void (*fini)(struct nvkm_fault_buffer *);
+ void (*intr)(struct nvkm_fault_buffer *, bool enable);
} buffer;
};
+
+int gv100_fault_oneinit(struct nvkm_fault *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
new file mode 100644
index 000000000000..9c8a3adf99d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu104.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/memory.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvif/class.h>
+
+static void
+tu104_fault_buffer_intr(struct nvkm_fault_buffer *buffer, bool enable)
+{
+ /*XXX: Earlier versions of RM touched the old regs on Turing,
+ * which don't appear to actually work anymore, but newer
+ * versions of RM don't appear to touch anything at all..
+ */
+}
+
+static void
+tu104_fault_buffer_fini(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x20;
+ nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x00000000);
+}
+
+static void
+tu104_fault_buffer_init(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x20;
+
+ nvkm_mask(device, 0xb83010 + foff, 0xc0000000, 0x40000000);
+ nvkm_wr32(device, 0xb83004 + foff, upper_32_bits(buffer->addr));
+ nvkm_wr32(device, 0xb83000 + foff, lower_32_bits(buffer->addr));
+ nvkm_mask(device, 0xb83010 + foff, 0x80000000, 0x80000000);
+}
+
+static void
+tu104_fault_buffer_info(struct nvkm_fault_buffer *buffer)
+{
+ struct nvkm_device *device = buffer->fault->subdev.device;
+ const u32 foff = buffer->id * 0x20;
+
+ nvkm_mask(device, 0xb83010 + foff, 0x40000000, 0x40000000);
+
+ buffer->entries = nvkm_rd32(device, 0xb83010 + foff) & 0x000fffff;
+ buffer->get = 0xb83008 + foff;
+ buffer->put = 0xb8300c + foff;
+}
+
+static void
+tu104_fault_intr_fault(struct nvkm_fault *fault)
+{
+ struct nvkm_subdev *subdev = &fault->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_fault_data info;
+ const u32 addrlo = nvkm_rd32(device, 0xb83080);
+ const u32 addrhi = nvkm_rd32(device, 0xb83084);
+ const u32 info0 = nvkm_rd32(device, 0xb83088);
+ const u32 insthi = nvkm_rd32(device, 0xb8308c);
+ const u32 info1 = nvkm_rd32(device, 0xb83090);
+
+ info.addr = ((u64)addrhi << 32) | addrlo;
+ info.inst = ((u64)insthi << 32) | (info0 & 0xfffff000);
+ info.time = 0;
+ info.engine = (info0 & 0x000000ff);
+ info.valid = (info1 & 0x80000000) >> 31;
+ info.gpc = (info1 & 0x1f000000) >> 24;
+ info.hub = (info1 & 0x00100000) >> 20;
+ info.access = (info1 & 0x000f0000) >> 16;
+ info.client = (info1 & 0x00007f00) >> 8;
+ info.reason = (info1 & 0x0000001f);
+
+ nvkm_fifo_fault(device->fifo, &info);
+}
+
+static void
+tu104_fault_intr(struct nvkm_fault *fault)
+{
+ struct nvkm_subdev *subdev = &fault->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 stat = nvkm_rd32(device, 0xb83094);
+
+ if (stat & 0x80000000) {
+ tu104_fault_intr_fault(fault);
+ nvkm_wr32(device, 0xb83094, 0x80000000);
+ stat &= ~0x80000000;
+ }
+
+ if (stat & 0x00000200) {
+ if (fault->buffer[0]) {
+ nvkm_event_send(&fault->event, 1, 0, NULL, 0);
+ stat &= ~0x00000200;
+ }
+ }
+
+ /*XXX: guess, can't confirm until we get fw... */
+ if (stat & 0x00000100) {
+ if (fault->buffer[1]) {
+ nvkm_event_send(&fault->event, 1, 1, NULL, 0);
+ stat &= ~0x00000100;
+ }
+ }
+
+ if (stat) {
+ nvkm_debug(subdev, "intr %08x\n", stat);
+ }
+}
+
+static void
+tu104_fault_fini(struct nvkm_fault *fault)
+{
+ nvkm_notify_put(&fault->nrpfb);
+ if (fault->buffer[0])
+ fault->func->buffer.fini(fault->buffer[0]);
+ /*XXX: disable priv faults */
+}
+
+static void
+tu104_fault_init(struct nvkm_fault *fault)
+{
+ /*XXX: enable priv faults */
+ fault->func->buffer.init(fault->buffer[0]);
+ nvkm_notify_get(&fault->nrpfb);
+}
+
+static const struct nvkm_fault_func
+tu104_fault = {
+ .oneinit = gv100_fault_oneinit,
+ .init = tu104_fault_init,
+ .fini = tu104_fault_fini,
+ .intr = tu104_fault_intr,
+ .buffer.nr = 2,
+ .buffer.entry_size = 32,
+ .buffer.info = tu104_fault_buffer_info,
+ .buffer.init = tu104_fault_buffer_init,
+ .buffer.fini = tu104_fault_buffer_fini,
+ .buffer.intr = tu104_fault_buffer_intr,
+};
+
+int
+tu104_fault_new(struct nvkm_device *device, int index,
+ struct nvkm_fault **pfault)
+{
+ return nvkm_fault_new_(&tu104_fault, device, index, pfault);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 434d2fc5bb1c..b2bb5a3ccb02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -68,10 +68,13 @@ nvkm_fb_bios_memtype(struct nvkm_bios *bios)
if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
switch (M0203E.type) {
- case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
- case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
- case M0203E_TYPE_GDDR3: return NVKM_RAM_TYPE_GDDR3;
- case M0203E_TYPE_GDDR5: return NVKM_RAM_TYPE_GDDR5;
+ case M0203E_TYPE_DDR2 : return NVKM_RAM_TYPE_DDR2;
+ case M0203E_TYPE_DDR3 : return NVKM_RAM_TYPE_DDR3;
+ case M0203E_TYPE_GDDR3 : return NVKM_RAM_TYPE_GDDR3;
+ case M0203E_TYPE_GDDR5 : return NVKM_RAM_TYPE_GDDR5;
+ case M0203E_TYPE_GDDR5X: return NVKM_RAM_TYPE_GDDR5X;
+ case M0203E_TYPE_GDDR6 : return NVKM_RAM_TYPE_GDDR6;
+ case M0203E_TYPE_HBM2 : return NVKM_RAM_TYPE_HBM2;
default:
nvkm_warn(subdev, "M0203E type %02x\n", M0203E.type);
return NVKM_RAM_TYPE_UNKNOWN;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
index 24c7bd505731..b11867f682cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
@@ -184,6 +184,9 @@ nvkm_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
[NVKM_RAM_TYPE_GDDR3 ] = "GDDR3",
[NVKM_RAM_TYPE_GDDR4 ] = "GDDR4",
[NVKM_RAM_TYPE_GDDR5 ] = "GDDR5",
+ [NVKM_RAM_TYPE_GDDR5X ] = "GDDR5X",
+ [NVKM_RAM_TYPE_GDDR6 ] = "GDDR6",
+ [NVKM_RAM_TYPE_HBM2 ] = "HBM2",
};
struct nvkm_subdev *subdev = &fb->subdev;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
index db48a1daca0c..02c4eb28cef4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
@@ -288,6 +288,19 @@ nv50_instobj_addr(struct nvkm_memory *memory)
return nvkm_memory_addr(nv50_instobj(memory)->ram);
}
+static u64
+nv50_instobj_bar2(struct nvkm_memory *memory)
+{
+ struct nv50_instobj *iobj = nv50_instobj(memory);
+ u64 addr = ~0ULL;
+ if (nv50_instobj_acquire(&iobj->base.memory)) {
+ iobj->lru.next = NULL; /* Exclude from eviction. */
+ addr = iobj->bar->addr;
+ }
+ nv50_instobj_release(&iobj->base.memory);
+ return addr;
+}
+
static enum nvkm_memory_target
nv50_instobj_target(struct nvkm_memory *memory)
{
@@ -325,8 +338,9 @@ static const struct nvkm_memory_func
nv50_instobj_func = {
.dtor = nv50_instobj_dtor,
.target = nv50_instobj_target,
- .size = nv50_instobj_size,
+ .bar2 = nv50_instobj_bar2,
.addr = nv50_instobj_addr,
+ .size = nv50_instobj_size,
.boot = nv50_instobj_boot,
.acquire = nv50_instobj_acquire,
.release = nv50_instobj_release,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 2befbe36dc28..f3b06329c338 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -12,3 +12,4 @@ nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
nvkm-y += nvkm/subdev/mc/gp10b.o
+nvkm-y += nvkm/subdev/mc/tu104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 09f669ac6630..0e57ab2a709f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -108,6 +108,9 @@ nvkm_mc_intr(struct nvkm_device *device, bool *handled)
if (stat)
nvkm_error(&mc->subdev, "intr %08x\n", stat);
*handled = intr != 0;
+
+ if (mc->func->intr_hack)
+ mc->func->intr_hack(mc, handled);
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index d9e3691d45b7..eb91a4cf452b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -26,6 +26,7 @@ struct nvkm_mc_func {
void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
/* retrieve pending interrupt mask (NV_PMC_INTR) */
u32 (*intr_stat)(struct nvkm_mc *);
+ void (*intr_hack)(struct nvkm_mc *, bool *handled);
const struct nvkm_mc_map *reset;
void (*unk260)(struct nvkm_mc *, u32);
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
new file mode 100644
index 000000000000..b7165bd18999
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/tu104.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+tu104_mc_intr_hack(struct nvkm_mc *mc, bool *handled)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ u32 stat = nvkm_rd32(device, 0xb81010);
+ if (stat & 0x00000050) {
+ struct nvkm_subdev *subdev =
+ nvkm_device_subdev(device, NVKM_SUBDEV_FAULT);
+ nvkm_wr32(device, 0xb81010, stat & 0x00000050);
+ if (subdev)
+ nvkm_subdev_intr(subdev);
+ *handled = true;
+ }
+}
+
+static const struct nvkm_mc_func
+tu104_mc = {
+ .init = nv50_mc_init,
+ .intr = gp100_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .intr_hack = tu104_mc_intr_hack,
+ .reset = gk104_mc_reset,
+};
+
+int
+tu104_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return gp100_mc_new_(&tu104_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
index 58a24e3a0598..8966180b36cc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
@@ -13,6 +13,7 @@ nvkm-y += nvkm/subdev/mmu/gm20b.o
nvkm-y += nvkm/subdev/mmu/gp100.o
nvkm-y += nvkm/subdev/mmu/gp10b.o
nvkm-y += nvkm/subdev/mmu/gv100.o
+nvkm-y += nvkm/subdev/mmu/tu104.o
nvkm-y += nvkm/subdev/mmu/mem.o
nvkm-y += nvkm/subdev/mmu/memnv04.o
@@ -33,6 +34,7 @@ nvkm-y += nvkm/subdev/mmu/vmmgm20b.o
nvkm-y += nvkm/subdev/mmu/vmmgp100.o
nvkm-y += nvkm/subdev/mmu/vmmgp10b.o
nvkm-y += nvkm/subdev/mmu/vmmgv100.o
+nvkm-y += nvkm/subdev/mmu/vmmtu104.o
nvkm-y += nvkm/subdev/mmu/umem.o
nvkm-y += nvkm/subdev/mmu/ummu.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
new file mode 100644
index 000000000000..8e6f4096170d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu104.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "mem.h"
+#include "vmm.h"
+
+#include <core/option.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_mmu_func
+tu104_mmu = {
+ .dma_bits = 47,
+ .mmu = {{ -1, -1, NVIF_CLASS_MMU_GF100}},
+ .mem = {{ -1, 0, NVIF_CLASS_MEM_GF100}, gf100_mem_new, gf100_mem_map },
+ .vmm = {{ -1, 0, NVIF_CLASS_VMM_GP100}, tu104_vmm_new },
+ .kind = gm200_mmu_kind,
+ .kind_sys = true,
+};
+
+int
+tu104_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
+{
+ return nvkm_mmu_new_(&tu104_mmu, device, index, pmmu);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
index 37b201b95f15..6889076097ec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
@@ -134,23 +134,10 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc)
goto fail;
}
- if (vma->addr != addr) {
- const u64 tail = vma->size + vma->addr - addr;
- if (ret = -ENOMEM, !(vma = nvkm_vma_tail(vma, tail)))
- goto fail;
- vma->part = true;
- nvkm_vmm_node_insert(vmm, vma);
- }
-
- if (vma->size != size) {
- const u64 tail = vma->size - size;
- struct nvkm_vma *tmp;
- if (ret = -ENOMEM, !(tmp = nvkm_vma_tail(vma, tail))) {
- nvkm_vmm_unmap_region(vmm, vma);
- goto fail;
- }
- tmp->part = true;
- nvkm_vmm_node_insert(vmm, tmp);
+ vma = nvkm_vmm_node_split(vmm, vma, addr, size);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto fail;
}
}
vma->busy = true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 7459def78d50..6b87fff014b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -767,6 +767,20 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
return new;
}
+static inline void
+nvkm_vmm_free_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ rb_erase(&vma->tree, &vmm->free);
+}
+
+static inline void
+nvkm_vmm_free_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ nvkm_vmm_free_remove(vmm, vma);
+ list_del(&vma->head);
+ kfree(vma);
+}
+
static void
nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
@@ -795,7 +809,21 @@ nvkm_vmm_free_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
rb_insert_color(&vma->tree, &vmm->free);
}
-void
+static inline void
+nvkm_vmm_node_remove(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ rb_erase(&vma->tree, &vmm->root);
+}
+
+static inline void
+nvkm_vmm_node_delete(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
+{
+ nvkm_vmm_node_remove(vmm, vma);
+ list_del(&vma->head);
+ kfree(vma);
+}
+
+static void
nvkm_vmm_node_insert(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
struct rb_node **ptr = &vmm->root.rb_node;
@@ -834,6 +862,78 @@ nvkm_vmm_node_search(struct nvkm_vmm *vmm, u64 addr)
return NULL;
}
+#define node(root, dir) (((root)->head.dir == &vmm->list) ? NULL : \
+ list_entry((root)->head.dir, struct nvkm_vma, head))
+
+static struct nvkm_vma *
+nvkm_vmm_node_merge(struct nvkm_vmm *vmm, struct nvkm_vma *prev,
+ struct nvkm_vma *vma, struct nvkm_vma *next, u64 size)
+{
+ if (next) {
+ if (vma->size == size) {
+ vma->size += next->size;
+ nvkm_vmm_node_delete(vmm, next);
+ if (prev) {
+ prev->size += vma->size;
+ nvkm_vmm_node_delete(vmm, vma);
+ return prev;
+ }
+ return vma;
+ }
+ BUG_ON(prev);
+
+ nvkm_vmm_node_remove(vmm, next);
+ vma->size -= size;
+ next->addr -= size;
+ next->size += size;
+ nvkm_vmm_node_insert(vmm, next);
+ return next;
+ }
+
+ if (prev) {
+ if (vma->size != size) {
+ nvkm_vmm_node_remove(vmm, vma);
+ prev->size += size;
+ vma->addr += size;
+ vma->size -= size;
+ nvkm_vmm_node_insert(vmm, vma);
+ } else {
+ prev->size += vma->size;
+ nvkm_vmm_node_delete(vmm, vma);
+ }
+ return prev;
+ }
+
+ return vma;
+}
+
+struct nvkm_vma *
+nvkm_vmm_node_split(struct nvkm_vmm *vmm,
+ struct nvkm_vma *vma, u64 addr, u64 size)
+{
+ struct nvkm_vma *prev = NULL;
+
+ if (vma->addr != addr) {
+ prev = vma;
+ if (!(vma = nvkm_vma_tail(vma, vma->size + vma->addr - addr)))
+ return NULL;
+ vma->part = true;
+ nvkm_vmm_node_insert(vmm, vma);
+ }
+
+ if (vma->size != size) {
+ struct nvkm_vma *tmp;
+ if (!(tmp = nvkm_vma_tail(vma, vma->size - size))) {
+ nvkm_vmm_node_merge(vmm, prev, vma, NULL, vma->size);
+ return NULL;
+ }
+ tmp->part = true;
+ nvkm_vmm_node_insert(vmm, tmp);
+ }
+
+ return vma;
+}
+
static void
nvkm_vmm_dtor(struct nvkm_vmm *vmm)
{
@@ -954,37 +1054,20 @@ nvkm_vmm_new_(const struct nvkm_vmm_func *func, struct nvkm_mmu *mmu,
return nvkm_vmm_ctor(func, mmu, hdr, addr, size, key, name, *pvmm);
}
-#define node(root, dir) ((root)->head.dir == &vmm->list) ? NULL : \
- list_entry((root)->head.dir, struct nvkm_vma, head)
-
void
nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
{
- struct nvkm_vma *next;
+ struct nvkm_vma *next = node(vma, next);
+ struct nvkm_vma *prev = NULL;
nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
nvkm_memory_unref(&vma->memory);
- if (vma->part) {
- struct nvkm_vma *prev = node(vma, prev);
- if (!prev->memory) {
- prev->size += vma->size;
- rb_erase(&vma->tree, &vmm->root);
- list_del(&vma->head);
- kfree(vma);
- vma = prev;
- }
- }
-
- next = node(vma, next);
- if (next && next->part) {
- if (!next->memory) {
- vma->size += next->size;
- rb_erase(&next->tree, &vmm->root);
- list_del(&next->head);
- kfree(next);
- }
- }
+ if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+ prev = NULL;
+ if (!next->part || next->memory)
+ next = NULL;
+ nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
}
void
@@ -1163,18 +1246,14 @@ nvkm_vmm_put_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
struct nvkm_vma *prev, *next;
if ((prev = node(vma, prev)) && !prev->used) {
- rb_erase(&prev->tree, &vmm->free);
- list_del(&prev->head);
vma->addr = prev->addr;
vma->size += prev->size;
- kfree(prev);
+ nvkm_vmm_free_delete(vmm, prev);
}
if ((next = node(vma, next)) && !next->used) {
- rb_erase(&next->tree, &vmm->free);
- list_del(&next->head);
vma->size += next->size;
- kfree(next);
+ nvkm_vmm_free_delete(vmm, next);
}
nvkm_vmm_free_insert(vmm, vma);
@@ -1250,7 +1329,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
}
/* Remove VMA from the list of allocated nodes. */
- rb_erase(&vma->tree, &vmm->root);
+ nvkm_vmm_node_remove(vmm, vma);
/* Merge VMA back into the free list. */
vma->page = NVKM_VMA_PAGE_NONE;
@@ -1357,7 +1436,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
tail = ALIGN_DOWN(tail, vmm->func->page_block);
if (addr <= tail && tail - addr >= size) {
- rb_erase(&this->tree, &vmm->free);
+ nvkm_vmm_free_remove(vmm, this);
vma = this;
break;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 1a3b0a3724ca..42ad326521a3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -157,6 +157,8 @@ int nvkm_vmm_ctor(const struct nvkm_vmm_func *, struct nvkm_mmu *,
u32 pd_header, u64 addr, u64 size, struct lock_class_key *,
const char *name, struct nvkm_vmm *);
struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr);
+struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *,
+ u64 addr, u64 size);
int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref,
bool sparse, u8 page, u8 align, u64 size,
struct nvkm_vma **pvma);
@@ -165,7 +167,6 @@ void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *);
void nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma);
struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail);
-void nvkm_vmm_node_insert(struct nvkm_vmm *, struct nvkm_vma *);
int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32,
u64, u64, void *, u32, struct lock_class_key *,
@@ -200,6 +201,8 @@ int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *);
void gp100_vmm_flush(struct nvkm_vmm *, int);
+int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *);
+
int nv04_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *, struct nvkm_vmm **);
int nv41_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
@@ -239,6 +242,9 @@ int gp10b_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
int gv100_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
struct lock_class_key *, const char *,
struct nvkm_vmm **);
+int tu104_vmm_new(struct nvkm_mmu *, u64, u64, void *, u32,
+ struct lock_class_key *, const char *,
+ struct nvkm_vmm **);
#define VMM_PRINT(l,v,p,f,a...) do { \
struct nvkm_vmm *_vmm = (v); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
new file mode 100644
index 000000000000..adaadd92110f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu104.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <subdev/timer.h>
+
+static void
+tu104_vmm_flush(struct nvkm_vmm *vmm, int depth)
+{
+ struct nvkm_subdev *subdev = &vmm->mmu->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 type = depth << 24; /*XXX: not confirmed */
+
+ type = 0x00000001; /* PAGE_ALL */
+ if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
+ type |= 0x00000004; /* HUB_ONLY */
+
+ mutex_lock(&subdev->mutex);
+
+ nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+ nvkm_wr32(device, 0xb830a4, 0x00000000);
+ nvkm_wr32(device, 0x100e68, 0x00000000);
+ nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
+
+ nvkm_msec(device, 2000,
+ if (!(nvkm_rd32(device, 0xb830b0) & 0x80000000))
+ break;
+ );
+
+ mutex_unlock(&subdev->mutex);
+}
+
+static const struct nvkm_vmm_func
+tu104_vmm = {
+ .join = gv100_vmm_join,
+ .part = gf100_vmm_part,
+ .aper = gf100_vmm_aper,
+ .valid = gp100_vmm_valid,
+ .flush = tu104_vmm_flush,
+ .page = {
+ { 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
+ { 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
+ { 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
+ {}
+ }
+};
+
+int
+tu104_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size,
+ void *argv, u32 argc, struct lock_class_key *key,
+ const char *name, struct nvkm_vmm **pvmm)
+{
+ return nv04_vmm_new_(&tu104_vmm, mmu, 0, addr, size,
+ argv, argc, key, name, pvmm);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index 1f7a3c1a7f50..84a2f243ed9b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -59,10 +59,10 @@ gp102_run_secure_scrub(struct nvkm_secboot *sb)
nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
- engine = nvkm_engine_ref(&device->nvdec->engine);
+ engine = nvkm_engine_ref(&device->nvdec[0]->engine);
if (IS_ERR(engine))
return PTR_ERR(engine);
- falcon = device->nvdec->falcon;
+ falcon = device->nvdec[0]->falcon;
nvkm_falcon_get(falcon, &sb->subdev);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index 36de23d12ae4..dd922033628c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -23,6 +23,42 @@
*/
#include "priv.h"
+s64
+nvkm_timer_wait_test(struct nvkm_timer_wait *wait)
+{
+ struct nvkm_subdev *subdev = &wait->tmr->subdev;
+ u64 time = nvkm_timer_read(wait->tmr);
+
+ if (wait->reads == 0) {
+ wait->time0 = time;
+ wait->time1 = time;
+ }
+
+ if (wait->time1 == time) {
+ if (wait->reads++ == 16) {
+ nvkm_fatal(subdev, "stalled at %016llx\n", time);
+ return -ETIMEDOUT;
+ }
+ } else {
+ wait->time1 = time;
+ wait->reads = 1;
+ }
+
+ if (wait->time1 - wait->time0 > wait->limit)
+ return -ETIMEDOUT;
+
+ return wait->time1 - wait->time0;
+}
+
+void
+nvkm_timer_wait_init(struct nvkm_device *device, u64 nsec,
+ struct nvkm_timer_wait *wait)
+{
+ wait->tmr = device->timer;
+ wait->limit = nsec;
+ wait->reads = 0;
+}
+
u64
nvkm_timer_read(struct nvkm_timer *tmr)
{
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index 4f1f3e890650..39081eadfd84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -86,7 +86,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x0000000d: A_(SEC2 ); break;
case 0x0000000e: B_(NVENC ); break;
case 0x0000000f: A_(NVENC1); break;
- case 0x00000010: A_(NVDEC ); break;
+ case 0x00000010: B_(NVDEC ); break;
case 0x00000013: B_(CE ); break;
break;
default:
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 1f8161b041be..465120809eb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 0a485c5b982e..00a9c2ab9e6c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5418,9 +5418,15 @@ static int dsi_probe(struct platform_device *pdev)
dsi->num_lanes_supported = 3;
}
+ r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (r) {
+ DSSERR("Failed to populate DSI child devices: %d\n", r);
+ goto err_pm_disable;
+ }
+
r = dsi_init_output(dsi);
if (r)
- goto err_pm_disable;
+ goto err_of_depopulate;
r = dsi_probe_of(dsi);
if (r) {
@@ -5428,22 +5434,16 @@ static int dsi_probe(struct platform_device *pdev)
goto err_uninit_output;
}
- r = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (r) {
- DSSERR("Failed to populate DSI child devices: %d\n", r);
- goto err_uninit_output;
- }
-
r = component_add(&pdev->dev, &dsi_component_ops);
if (r)
- goto err_of_depopulate;
+ goto err_uninit_output;
return 0;
-err_of_depopulate:
- of_platform_depopulate(dev);
err_uninit_output:
dsi_uninit_output(dsi);
+err_of_depopulate:
+ of_platform_depopulate(dev);
err_pm_disable:
pm_runtime_disable(dev);
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 1f698a95a94a..33e15cb77efa 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -432,7 +432,7 @@ struct omap_dss_device {
const struct omap_dss_driver *driver;
const struct omap_dss_device_ops *ops;
unsigned long ops_flags;
- unsigned long bus_flags;
+ u32 bus_flags;
/* helper variable for driver suspend/resume */
bool activate_after_resume;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 452e625f6ce3..933ebc9f9faa 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
+static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_connector *connector;
+ bool hdmi_mode;
+
+ hdmi_mode = false;
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
+ break;
+ }
+ }
+
+ if (dssdev->ops->hdmi.set_hdmi_mode)
+ dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
+
+ if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
+ struct hdmi_avi_infoframe avi;
+ int r;
+
+ r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
+ false);
+ if (r == 0)
+ dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
+ }
+}
+
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct drm_connector *connector;
struct omap_dss_device *dssdev;
struct videomode vm = { 0 };
- bool hdmi_mode;
- int r;
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
-
- dssdev = omap_encoder->output;
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
- false);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
+ if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 6020c30a33b3..3f3537719beb 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -90,6 +90,18 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_OLIMEX_LCD_OLINUXINO
+ tristate "Olimex LCD-OLinuXino panel"
+ depends on OF
+ depends on I2C
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ The panel is used with different sizes LCDs, from 480x272 to
+ 1280x800, and 24 bit per pixel.
+
+ Say Y here if you want to enable support for Olimex Ltd.
+ LCD-OLinuXino panel.
+
config DRM_PANEL_ORISETECH_OTM8009A
tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
depends on OF
@@ -126,6 +138,12 @@ config DRM_PANEL_RAYDIUM_RM68200
Say Y here if you want to enable support for Raydium RM68200
720x1280 DSI video mode panel.
+config DRM_PANEL_SAMSUNG_S6D16D0
+ tristate "Samsung S6D16D0 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
@@ -186,4 +204,11 @@ config DRM_PANEL_SITRONIX_ST7789V
Say Y here if you want to enable support for the Sitronix
ST7789V controller for 240x320 LCD panels
+config DRM_PANEL_TRULY_NT35597_WQXGA
+ tristate "Truly WQXGA"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
+ Video Mode panel
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 5ccaaa9d13af..4396658a7996 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -7,11 +7,13 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
@@ -19,3 +21,4 @@ obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
+obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 72edb334d997..ca4ae45dd307 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -506,8 +506,7 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
static void innolux_panel_del(struct innolux_panel *innolux)
{
- if (innolux->base.dev)
- drm_panel_remove(&innolux->base);
+ drm_panel_remove(&innolux->base);
}
static int innolux_panel_probe(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
new file mode 100644
index 000000000000..5e8d4523e9ed
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * LCD-OLinuXino support for panel driver
+ *
+ * Copyright (C) 2018 Olimex Ltd.
+ * Author: Stefan Mavrodiev <stefan@olimex.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/crc32.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+
+#include <video/videomode.h>
+#include <video/display_timing.h>
+
+#define LCD_OLINUXINO_HEADER_MAGIC 0x4F4CB727
+#define LCD_OLINUXINO_DATA_LEN 256
+
+struct lcd_olinuxino_mode {
+ u32 pixelclock;
+ u32 hactive;
+ u32 hfp;
+ u32 hbp;
+ u32 hpw;
+ u32 vactive;
+ u32 vfp;
+ u32 vbp;
+ u32 vpw;
+ u32 refresh;
+ u32 flags;
+};
+
+struct lcd_olinuxino_info {
+ char name[32];
+ u32 width_mm;
+ u32 height_mm;
+ u32 bpc;
+ u32 bus_format;
+ u32 bus_flag;
+} __attribute__((__packed__));
+
+struct lcd_olinuxino_eeprom {
+ u32 header;
+ u32 id;
+ char revision[4];
+ u32 serial;
+ struct lcd_olinuxino_info info;
+ u32 num_modes;
+ u8 reserved[180];
+ u32 checksum;
+} __attribute__((__packed__));
+
+struct lcd_olinuxino {
+ struct drm_panel panel;
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex mutex;
+
+ bool prepared;
+ bool enabled;
+
+ struct backlight_device *backlight;
+ struct regulator *supply;
+ struct gpio_desc *enable_gpio;
+
+ struct lcd_olinuxino_eeprom eeprom;
+};
+
+static inline struct lcd_olinuxino *to_lcd_olinuxino(struct drm_panel *panel)
+{
+ return container_of(panel, struct lcd_olinuxino, panel);
+}
+
+static int lcd_olinuxino_disable(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+ if (!lcd->enabled)
+ return 0;
+
+ backlight_disable(lcd->backlight);
+
+ lcd->enabled = false;
+
+ return 0;
+}
+
+static int lcd_olinuxino_unprepare(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+ if (!lcd->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 0);
+ regulator_disable(lcd->supply);
+
+ lcd->prepared = false;
+
+ return 0;
+}
+
+static int lcd_olinuxino_prepare(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+ int ret;
+
+ if (lcd->prepared)
+ return 0;
+
+ ret = regulator_enable(lcd->supply);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value_cansleep(lcd->enable_gpio, 1);
+ lcd->prepared = true;
+
+ return 0;
+}
+
+static int lcd_olinuxino_enable(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+
+ if (lcd->enabled)
+ return 0;
+
+ backlight_enable(lcd->backlight);
+
+ lcd->enabled = true;
+
+ return 0;
+}
+
+static int lcd_olinuxino_get_modes(struct drm_panel *panel)
+{
+ struct lcd_olinuxino *lcd = to_lcd_olinuxino(panel);
+ struct drm_connector *connector = lcd->panel.connector;
+ struct lcd_olinuxino_info *lcd_info = &lcd->eeprom.info;
+ struct drm_device *drm = lcd->panel.drm;
+ struct lcd_olinuxino_mode *lcd_mode;
+ struct drm_display_mode *mode;
+ u32 i, num = 0;
+
+ for (i = 0; i < lcd->eeprom.num_modes; i++) {
+ lcd_mode = (struct lcd_olinuxino_mode *)
+ &lcd->eeprom.reserved[i * sizeof(*lcd_mode)];
+
+ mode = drm_mode_create(drm);
+ if (!mode) {
+ dev_err(drm->dev, "failed to add mode %ux%u@%u\n",
+ lcd_mode->hactive,
+ lcd_mode->vactive,
+ lcd_mode->refresh);
+ continue;
+ }
+
+ mode->clock = lcd_mode->pixelclock;
+ mode->hdisplay = lcd_mode->hactive;
+ mode->hsync_start = lcd_mode->hactive + lcd_mode->hfp;
+ mode->hsync_end = lcd_mode->hactive + lcd_mode->hfp +
+ lcd_mode->hpw;
+ mode->htotal = lcd_mode->hactive + lcd_mode->hfp +
+ lcd_mode->hpw + lcd_mode->hbp;
+ mode->vdisplay = lcd_mode->vactive;
+ mode->vsync_start = lcd_mode->vactive + lcd_mode->vfp;
+ mode->vsync_end = lcd_mode->vactive + lcd_mode->vfp +
+ lcd_mode->vpw;
+ mode->vtotal = lcd_mode->vactive + lcd_mode->vfp +
+ lcd_mode->vpw + lcd_mode->vbp;
+ mode->vrefresh = lcd_mode->refresh;
+
+ /* Always make the first mode preferred */
+ if (i == 0)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ num++;
+ }
+
+ memcpy(connector->display_info.name, lcd_info->name, 32);
+ connector->display_info.width_mm = lcd_info->width_mm;
+ connector->display_info.height_mm = lcd_info->height_mm;
+ connector->display_info.bpc = lcd_info->bpc;
+
+ if (lcd_info->bus_format)
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &lcd_info->bus_format, 1);
+ connector->display_info.bus_flags = lcd_info->bus_flag;
+
+ return num;
+}
+
+static const struct drm_panel_funcs lcd_olinuxino_funcs = {
+ .disable = lcd_olinuxino_disable,
+ .unprepare = lcd_olinuxino_unprepare,
+ .prepare = lcd_olinuxino_prepare,
+ .enable = lcd_olinuxino_enable,
+ .get_modes = lcd_olinuxino_get_modes,
+};
+
+static int lcd_olinuxino_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct lcd_olinuxino *lcd;
+ u32 checksum, i;
+ int ret = 0;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK))
+ return -ENODEV;
+
+ lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
+ if (!lcd)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, lcd);
+ lcd->dev = dev;
+ lcd->client = client;
+
+ mutex_init(&lcd->mutex);
+
+ /* Copy data into buffer */
+ for (i = 0; i < LCD_OLINUXINO_DATA_LEN; i += I2C_SMBUS_BLOCK_MAX) {
+ mutex_lock(&lcd->mutex);
+ ret = i2c_smbus_read_i2c_block_data(client,
+ i,
+ I2C_SMBUS_BLOCK_MAX,
+ (u8 *)&lcd->eeprom + i);
+ mutex_unlock(&lcd->mutex);
+ if (ret < 0) {
+ dev_err(dev, "error reading from device at %02x\n", i);
+ return ret;
+ }
+ }
+
+ /* Check configuration checksum */
+ checksum = ~crc32(~0, (u8 *)&lcd->eeprom, 252);
+ if (checksum != lcd->eeprom.checksum) {
+ dev_err(dev, "configuration checksum does not match!\n");
+ return -EINVAL;
+ }
+
+ /* Check magic header */
+ if (lcd->eeprom.header != LCD_OLINUXINO_HEADER_MAGIC) {
+ dev_err(dev, "magic header does not match\n");
+ return -EINVAL;
+ }
+
+ dev_info(dev, "Detected %s, Rev. %s, Serial: %08x\n",
+ lcd->eeprom.info.name,
+ lcd->eeprom.revision,
+ lcd->eeprom.serial);
+
+ /*
+ * The eeprom can hold up to 4 modes.
+ * If the stored value is bigger, overwrite it.
+ */
+ if (lcd->eeprom.num_modes > 4) {
+ dev_warn(dev, "invalid number of modes, falling back to 4\n");
+ lcd->eeprom.num_modes = 4;
+ }
+
+ lcd->enabled = false;
+ lcd->prepared = false;
+
+ lcd->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(lcd->supply))
+ return PTR_ERR(lcd->supply);
+
+ lcd->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->enable_gpio))
+ return PTR_ERR(lcd->enable_gpio);
+
+ lcd->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(lcd->backlight))
+ return PTR_ERR(lcd->backlight);
+
+ drm_panel_init(&lcd->panel);
+ lcd->panel.dev = dev;
+ lcd->panel.funcs = &lcd_olinuxino_funcs;
+
+ return drm_panel_add(&lcd->panel);
+}
+
+static int lcd_olinuxino_remove(struct i2c_client *client)
+{
+ struct lcd_olinuxino *panel = i2c_get_clientdata(client);
+
+ drm_panel_remove(&panel->panel);
+
+ lcd_olinuxino_disable(&panel->panel);
+ lcd_olinuxino_unprepare(&panel->panel);
+
+ return 0;
+}
+
+static const struct of_device_id lcd_olinuxino_of_ids[] = {
+ { .compatible = "olimex,lcd-olinuxino" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lcd_olinuxino_of_ids);
+
+static struct i2c_driver lcd_olinuxino_driver = {
+ .driver = {
+ .name = "lcd_olinuxino",
+ .of_match_table = lcd_olinuxino_of_ids,
+ },
+ .probe = lcd_olinuxino_probe,
+ .remove = lcd_olinuxino_remove,
+};
+
+module_i2c_driver(lcd_olinuxino_driver);
+
+MODULE_AUTHOR("Stefan Mavrodiev <stefan@olimex.com>");
+MODULE_DESCRIPTION("LCD-OLinuXino driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
new file mode 100644
index 000000000000..33c22ee036f8
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * MIPI-DSI Samsung s6d16d0 panel driver. This is a 864x480
+ * AMOLED panel with a command-only DSI interface.
+ */
+
+#include <drm/drm_modes.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+
+struct s6d16d0 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+};
+
+/*
+ * The timings are not very helpful as the display is used in
+ * command mode.
+ */
+static const struct drm_display_mode samsung_s6d16d0_mode = {
+ /* HS clock, (htotal*vtotal*vrefresh)/1000 */
+ .clock = 420160,
+ .hdisplay = 864,
+ .hsync_start = 864 + 154,
+ .hsync_end = 864 + 154 + 16,
+ .htotal = 864 + 154 + 16 + 32,
+ .vdisplay = 480,
+ .vsync_start = 480 + 1,
+ .vsync_end = 480 + 1 + 1,
+ .vtotal = 480 + 1 + 1 + 1,
+ /*
+ * This depends on the clocking HS vs LP rate, this value
+ * is calculated as:
+ * vrefresh = (clock * 1000) / (htotal*vtotal)
+ */
+ .vrefresh = 816,
+ .width_mm = 84,
+ .height_mm = 48,
+};
+
+static inline struct s6d16d0 *panel_to_s6d16d0(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6d16d0, panel);
+}
+
+static int s6d16d0_unprepare(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(s6->reset_gpio, 1);
+ regulator_disable(s6->supply);
+
+ return 0;
+}
+
+static int s6d16d0_prepare(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ ret = regulator_enable(s6->supply);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to enable supply (%d)\n", ret);
+ return ret;
+ }
+
+ /* Assert RESET */
+ gpiod_set_value_cansleep(s6->reset_gpio, 1);
+ udelay(10);
+ /* De-assert RESET */
+ gpiod_set_value_cansleep(s6->reset_gpio, 0);
+ msleep(120);
+
+ /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
+ ret = mipi_dsi_dcs_set_tear_on(dsi,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to enable vblank TE (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Exit sleep mode and power on */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6d16d0_enable(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to turn display on (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6d16d0_disable(struct drm_panel *panel)
+{
+ struct s6d16d0 *s6 = panel_to_s6d16d0(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(s6->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(s6->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6d16d0_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ strncpy(connector->display_info.name, "Samsung S6D16D0\0",
+ DRM_DISPLAY_INFO_LEN);
+
+ mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs s6d16d0_drm_funcs = {
+ .disable = s6d16d0_disable,
+ .unprepare = s6d16d0_unprepare,
+ .prepare = s6d16d0_prepare,
+ .enable = s6d16d0_enable,
+ .get_modes = s6d16d0_get_modes,
+};
+
+static int s6d16d0_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6d16d0 *s6;
+ int ret;
+
+ s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
+ if (!s6)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, s6);
+ s6->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->hs_rate = 420160000;
+ dsi->lp_rate = 19200000;
+ /*
+ * This display uses command mode so no MIPI_DSI_MODE_VIDEO
+ * or MIPI_DSI_MODE_VIDEO_SYNC_PULSE
+ *
+ * As we only send commands we do not need to be continuously
+ * clocked.
+ */
+ dsi->mode_flags =
+ MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ s6->supply = devm_regulator_get(dev, "vdd1");
+ if (IS_ERR(s6->supply))
+ return PTR_ERR(s6->supply);
+
+ /* This asserts RESET by default */
+ s6->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(s6->reset_gpio)) {
+ ret = PTR_ERR(s6->reset_gpio);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to request GPIO (%d)\n",
+ ret);
+ return ret;
+ }
+
+ drm_panel_init(&s6->panel);
+ s6->panel.dev = dev;
+ s6->panel.funcs = &s6d16d0_drm_funcs;
+
+ ret = drm_panel_add(&s6->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&s6->panel);
+
+ return ret;
+}
+
+static int s6d16d0_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6d16d0 *s6 = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&s6->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6d16d0_of_match[] = {
+ { .compatible = "samsung,s6d16d0" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6d16d0_of_match);
+
+static struct mipi_dsi_driver s6d16d0_driver = {
+ .probe = s6d16d0_probe,
+ .remove = s6d16d0_remove,
+ .driver = {
+ .name = "panel-samsung-s6d16d0",
+ .of_match_table = s6d16d0_of_match,
+ },
+};
+module_mipi_dsi_driver(s6d16d0_driver);
+
+MODULE_AUTHOR("Linus Wallei <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("MIPI-DSI s6d16d0 Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 75f925390551..2d99e28ff117 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 NXP Semiconductors.
* Author: Marco Franchi <marco.franchi@nxp.com>
*
* Based on Panel Simple driver by Thierry Reding <treding@nvidia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
*/
#include <linux/backlight.h>
@@ -366,6 +363,6 @@ static struct platform_driver seiko_panel_platform_driver = {
};
module_platform_driver(seiko_panel_platform_driver);
-MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com");
+MODULE_AUTHOR("Marco Franchi <marco.franchi@nxp.com>");
MODULE_DESCRIPTION("Seiko 43WVF1G panel driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index a04ffb3b2174..9c69e739a524 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -618,6 +618,30 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
+static const struct drm_display_mode auo_g101evn010_mode = {
+ .clock = 68930,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 82,
+ .hsync_end = 1280 + 82 + 2,
+ .htotal = 1280 + 82 + 2 + 84,
+ .vdisplay = 800,
+ .vsync_start = 800 + 8,
+ .vsync_end = 800 + 8 + 2,
+ .vtotal = 800 + 8 + 2 + 6,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g101evn010 = {
+ .modes = &auo_g101evn010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+};
+
static const struct drm_display_mode auo_g104sn02_mode = {
.clock = 40000,
.hdisplay = 800,
@@ -782,16 +806,38 @@ static const struct panel_desc avic_tm070ddh03 = {
},
};
+static const struct drm_display_mode bananapi_s070wv20_ct16_mode = {
+ .clock = 30000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 48,
+ .htotal = 800 + 40 + 48 + 40,
+ .vdisplay = 480,
+ .vsync_start = 480 + 13,
+ .vsync_end = 480 + 13 + 3,
+ .vtotal = 480 + 13 + 3 + 29,
+};
+
+static const struct panel_desc bananapi_s070wv20_ct16 = {
+ .modes = &bananapi_s070wv20_ct16_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+};
+
static const struct drm_display_mode boe_hv070wsa_mode = {
- .clock = 40800,
+ .clock = 42105,
.hdisplay = 1024,
- .hsync_start = 1024 + 90,
- .hsync_end = 1024 + 90 + 90,
- .htotal = 1024 + 90 + 90 + 90,
+ .hsync_start = 1024 + 30,
+ .hsync_end = 1024 + 30 + 30,
+ .htotal = 1024 + 30 + 30 + 30,
.vdisplay = 600,
- .vsync_start = 600 + 3,
- .vsync_end = 600 + 3 + 4,
- .vtotal = 600 + 3 + 4 + 3,
+ .vsync_start = 600 + 10,
+ .vsync_end = 600 + 10 + 10,
+ .vtotal = 600 + 10 + 10 + 10,
.vrefresh = 60,
};
@@ -846,6 +892,55 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 5,
+ .hsync_end = 480 + 5 + 5,
+ .htotal = 480 + 5 + 5 + 40,
+ .vdisplay = 272,
+ .vsync_start = 272 + 8,
+ .vsync_end = 272 + 8 + 8,
+ .vtotal = 272 + 8 + 8 + 8,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc cdtech_s043wq26h_ct7 = {
+ .modes = &cdtech_s043wq26h_ct7_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 95,
+ .height = 54,
+ },
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+};
+
+static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
+ .clock = 35000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 40,
+ .htotal = 800 + 40 + 40 + 48,
+ .vdisplay = 480,
+ .vsync_start = 480 + 29,
+ .vsync_end = 480 + 29 + 13,
+ .vtotal = 480 + 29 + 13 + 3,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc cdtech_s070wv95_ct16 = {
+ .modes = &cdtech_s070wv95_ct16_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 154,
+ .height = 85,
+ },
+};
+
static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
.clock = 66770,
.hdisplay = 800,
@@ -971,6 +1066,36 @@ static const struct panel_desc dlc_dlc0700yzg_1 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
};
+static const struct display_timing dlc_dlc1010gig_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 43, 53, 63 },
+ .hback_porch = { 43, 53, 63 },
+ .hsync_len = { 44, 54, 64 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 8, 11 },
+ .vback_porch = { 5, 8, 11 },
+ .vsync_len = { 5, 7, 11 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc dlc_dlc1010gig = {
+ .timings = &dlc_dlc1010gig_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 216,
+ .height = 135,
+ },
+ .delay = {
+ .prepare = 60,
+ .enable = 150,
+ .disable = 100,
+ .unprepare = 60,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+};
+
static const struct drm_display_mode edt_et057090dhu_mode = {
.clock = 25175,
.hdisplay = 640,
@@ -2334,6 +2459,33 @@ static const struct panel_desc winstar_wf35ltiacd = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode arm_rtsm_mode[] = {
+ {
+ .clock = 65000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 24,
+ .hsync_end = 1024 + 24 + 136,
+ .htotal = 1024 + 24 + 136 + 160,
+ .vdisplay = 768,
+ .vsync_start = 768 + 3,
+ .vsync_end = 768 + 3 + 6,
+ .vtotal = 768 + 3 + 6 + 29,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ },
+};
+
+static const struct panel_desc arm_rtsm = {
+ .modes = arm_rtsm_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 400,
+ .height = 300,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am-480272h3tmqw-t01h",
@@ -2342,6 +2494,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ampire,am800480r3tmqwa1h",
.data = &ampire_am800480r3tmqwa1h,
}, {
+ .compatible = "arm,rtsm-display",
+ .data = &arm_rtsm,
+ }, {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
@@ -2363,6 +2518,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
+ .compatible = "auo,g101evn010",
+ .data = &auo_g101evn010,
+ }, {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
@@ -2381,12 +2539,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "avic,tm070ddh03",
.data = &avic_tm070ddh03,
}, {
+ .compatible = "bananapi,s070wv20-ct16",
+ .data = &bananapi_s070wv20_ct16,
+ }, {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "cdtech,s043wq26h-ct7",
+ .data = &cdtech_s043wq26h_ct7,
+ }, {
+ .compatible = "cdtech,s070wv95-ct16",
+ .data = &cdtech_s070wv95_ct16,
+ }, {
.compatible = "chunghwa,claa070wp03xg",
.data = &chunghwa_claa070wp03xg,
}, {
@@ -2402,6 +2569,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "dlc,dlc0700yzg-1",
.data = &dlc_dlc0700yzg_1,
}, {
+ .compatible = "dlc,dlc1010gig",
+ .data = &dlc_dlc1010gig,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
new file mode 100644
index 000000000000..fc2a66c53db4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+static const char * const regulator_names[] = {
+ "vdda",
+ "vdispp",
+ "vdispn",
+};
+
+static unsigned long const regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000,
+};
+
+static unsigned long const regulator_disable_loads[] = {
+ 80,
+ 100,
+ 100,
+};
+
+struct cmd_set {
+ u8 commands[4];
+ u8 size;
+};
+
+struct nt35597_config {
+ u32 width_mm;
+ u32 height_mm;
+ const char *panel_name;
+ const struct cmd_set *panel_on_cmds;
+ u32 num_on_cmds;
+ const struct drm_display_mode *dm;
+};
+
+struct truly_nt35597 {
+ struct device *dev;
+ struct drm_panel panel;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *mode_gpio;
+
+ struct backlight_device *backlight;
+
+ struct mipi_dsi_device *dsi[2];
+
+ const struct nt35597_config *config;
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct truly_nt35597 *panel_to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct truly_nt35597, panel);
+}
+
+static const struct cmd_set qcom_2k_panel_magic_cmds[] = {
+ /* CMD2_P0 */
+ { { 0xff, 0x20 }, 2 },
+ { { 0xfb, 0x01 }, 2 },
+ { { 0x00, 0x01 }, 2 },
+ { { 0x01, 0x55 }, 2 },
+ { { 0x02, 0x45 }, 2 },
+ { { 0x05, 0x40 }, 2 },
+ { { 0x06, 0x19 }, 2 },
+ { { 0x07, 0x1e }, 2 },
+ { { 0x0b, 0x73 }, 2 },
+ { { 0x0c, 0x73 }, 2 },
+ { { 0x0e, 0xb0 }, 2 },
+ { { 0x0f, 0xae }, 2 },
+ { { 0x11, 0xb8 }, 2 },
+ { { 0x13, 0x00 }, 2 },
+ { { 0x58, 0x80 }, 2 },
+ { { 0x59, 0x01 }, 2 },
+ { { 0x5a, 0x00 }, 2 },
+ { { 0x5b, 0x01 }, 2 },
+ { { 0x5c, 0x80 }, 2 },
+ { { 0x5d, 0x81 }, 2 },
+ { { 0x5e, 0x00 }, 2 },
+ { { 0x5f, 0x01 }, 2 },
+ { { 0x72, 0x11 }, 2 },
+ { { 0x68, 0x03 }, 2 },
+ /* CMD2_P4 */
+ { { 0xFF, 0x24 }, 2 },
+ { { 0xFB, 0x01 }, 2 },
+ { { 0x00, 0x1C }, 2 },
+ { { 0x01, 0x0B }, 2 },
+ { { 0x02, 0x0C }, 2 },
+ { { 0x03, 0x01 }, 2 },
+ { { 0x04, 0x0F }, 2 },
+ { { 0x05, 0x10 }, 2 },
+ { { 0x06, 0x10 }, 2 },
+ { { 0x07, 0x10 }, 2 },
+ { { 0x08, 0x89 }, 2 },
+ { { 0x09, 0x8A }, 2 },
+ { { 0x0A, 0x13 }, 2 },
+ { { 0x0B, 0x13 }, 2 },
+ { { 0x0C, 0x15 }, 2 },
+ { { 0x0D, 0x15 }, 2 },
+ { { 0x0E, 0x17 }, 2 },
+ { { 0x0F, 0x17 }, 2 },
+ { { 0x10, 0x1C }, 2 },
+ { { 0x11, 0x0B }, 2 },
+ { { 0x12, 0x0C }, 2 },
+ { { 0x13, 0x01 }, 2 },
+ { { 0x14, 0x0F }, 2 },
+ { { 0x15, 0x10 }, 2 },
+ { { 0x16, 0x10 }, 2 },
+ { { 0x17, 0x10 }, 2 },
+ { { 0x18, 0x89 }, 2 },
+ { { 0x19, 0x8A }, 2 },
+ { { 0x1A, 0x13 }, 2 },
+ { { 0x1B, 0x13 }, 2 },
+ { { 0x1C, 0x15 }, 2 },
+ { { 0x1D, 0x15 }, 2 },
+ { { 0x1E, 0x17 }, 2 },
+ { { 0x1F, 0x17 }, 2 },
+ /* STV */
+ { { 0x20, 0x40 }, 2 },
+ { { 0x21, 0x01 }, 2 },
+ { { 0x22, 0x00 }, 2 },
+ { { 0x23, 0x40 }, 2 },
+ { { 0x24, 0x40 }, 2 },
+ { { 0x25, 0x6D }, 2 },
+ { { 0x26, 0x40 }, 2 },
+ { { 0x27, 0x40 }, 2 },
+ /* Vend */
+ { { 0xE0, 0x00 }, 2 },
+ { { 0xDC, 0x21 }, 2 },
+ { { 0xDD, 0x22 }, 2 },
+ { { 0xDE, 0x07 }, 2 },
+ { { 0xDF, 0x07 }, 2 },
+ { { 0xE3, 0x6D }, 2 },
+ { { 0xE1, 0x07 }, 2 },
+ { { 0xE2, 0x07 }, 2 },
+ /* UD */
+ { { 0x29, 0xD8 }, 2 },
+ { { 0x2A, 0x2A }, 2 },
+ /* CLK */
+ { { 0x4B, 0x03 }, 2 },
+ { { 0x4C, 0x11 }, 2 },
+ { { 0x4D, 0x10 }, 2 },
+ { { 0x4E, 0x01 }, 2 },
+ { { 0x4F, 0x01 }, 2 },
+ { { 0x50, 0x10 }, 2 },
+ { { 0x51, 0x00 }, 2 },
+ { { 0x52, 0x80 }, 2 },
+ { { 0x53, 0x00 }, 2 },
+ { { 0x56, 0x00 }, 2 },
+ { { 0x54, 0x07 }, 2 },
+ { { 0x58, 0x07 }, 2 },
+ { { 0x55, 0x25 }, 2 },
+ /* Reset XDONB */
+ { { 0x5B, 0x43 }, 2 },
+ { { 0x5C, 0x00 }, 2 },
+ { { 0x5F, 0x73 }, 2 },
+ { { 0x60, 0x73 }, 2 },
+ { { 0x63, 0x22 }, 2 },
+ { { 0x64, 0x00 }, 2 },
+ { { 0x67, 0x08 }, 2 },
+ { { 0x68, 0x04 }, 2 },
+ /* Resolution:1440x2560 */
+ { { 0x72, 0x02 }, 2 },
+ /* mux */
+ { { 0x7A, 0x80 }, 2 },
+ { { 0x7B, 0x91 }, 2 },
+ { { 0x7C, 0xD8 }, 2 },
+ { { 0x7D, 0x60 }, 2 },
+ { { 0x7F, 0x15 }, 2 },
+ { { 0x75, 0x15 }, 2 },
+ /* ABOFF */
+ { { 0xB3, 0xC0 }, 2 },
+ { { 0xB4, 0x00 }, 2 },
+ { { 0xB5, 0x00 }, 2 },
+ /* Source EQ */
+ { { 0x78, 0x00 }, 2 },
+ { { 0x79, 0x00 }, 2 },
+ { { 0x80, 0x00 }, 2 },
+ { { 0x83, 0x00 }, 2 },
+ /* FP BP */
+ { { 0x93, 0x0A }, 2 },
+ { { 0x94, 0x0A }, 2 },
+ /* Inversion Type */
+ { { 0x8A, 0x00 }, 2 },
+ { { 0x9B, 0xFF }, 2 },
+ /* IMGSWAP =1 @PortSwap=1 */
+ { { 0x9D, 0xB0 }, 2 },
+ { { 0x9F, 0x63 }, 2 },
+ { { 0x98, 0x10 }, 2 },
+ /* FRM */
+ { { 0xEC, 0x00 }, 2 },
+ /* CMD1 */
+ { { 0xFF, 0x10 }, 2 },
+ /* VBP+VSA=,VFP = 10H */
+ { { 0x3B, 0x03, 0x0A, 0x0A }, 4 },
+ /* FTE on */
+ { { 0x35, 0x00 }, 2 },
+ /* EN_BK =1(auto black) */
+ { { 0xE5, 0x01 }, 2 },
+ /* CMD mode(10) VDO mode(03) */
+ { { 0xBB, 0x03 }, 2 },
+ /* Non Reload MTP */
+ { { 0xFB, 0x01 }, 2 },
+};
+
+static int truly_dcs_write(struct drm_panel *panel, u32 command)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ ret = mipi_dsi_dcs_write(ctx->dsi[i], command, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "cmd 0x%x failed for dsi = %d\n",
+ command, i);
+ }
+ }
+
+ return ret;
+}
+
+static int truly_dcs_write_buf(struct drm_panel *panel,
+ u32 size, const u8 *buf)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi[i], buf, size);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to tx cmd [%d], err: %d\n", i, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int truly_35597_power_on(struct truly_nt35597 *ctx)
+{
+ int ret, i;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_enable_loads[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset sequence of truly panel requires the panel to be
+ * out of reset for 10ms, followed by being held in reset
+ * for 10ms and then out again
+ */
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ return 0;
+}
+
+static int truly_nt35597_power_off(struct truly_nt35597 *ctx)
+{
+ int ret = 0;
+ int i;
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) {
+ ret = regulator_set_load(ctx->supplies[i].consumer,
+ regulator_disable_loads[i]);
+ if (ret) {
+ DRM_DEV_ERROR(ctx->dev,
+ "regulator_set_load failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret) {
+ DRM_DEV_ERROR(ctx->dev,
+ "regulator_bulk_disable failed %d\n", ret);
+ }
+ return ret;
+}
+
+static int truly_nt35597_disable(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (!ctx->enabled)
+ return 0;
+
+ if (ctx->backlight) {
+ ret = backlight_disable(ctx->backlight);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "backlight disable failed %d\n",
+ ret);
+ }
+
+ ctx->enabled = false;
+ return 0;
+}
+
+static int truly_nt35597_unprepare(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret = 0;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ctx->dsi[0]->mode_flags = 0;
+ ctx->dsi[1]->mode_flags = 0;
+
+ ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_OFF);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "set_display_off cmd failed ret = %d\n",
+ ret);
+ }
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = truly_dcs_write(panel, MIPI_DCS_ENTER_SLEEP_MODE);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "enter_sleep cmd failed ret = %d\n", ret);
+ }
+
+ ret = truly_nt35597_power_off(ctx);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "power_off failed ret = %d\n", ret);
+
+ ctx->prepared = false;
+ return ret;
+}
+
+static int truly_nt35597_prepare(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret;
+ int i;
+ const struct cmd_set *panel_on_cmds;
+ const struct nt35597_config *config;
+ u32 num_cmds;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = truly_35597_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->dsi[0]->mode_flags |= MIPI_DSI_MODE_LPM;
+ ctx->dsi[1]->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ config = ctx->config;
+ panel_on_cmds = config->panel_on_cmds;
+ num_cmds = config->num_on_cmds;
+
+ for (i = 0; i < num_cmds; i++) {
+ ret = truly_dcs_write_buf(panel,
+ panel_on_cmds[i].size,
+ panel_on_cmds[i].commands);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "cmd set tx failed i = %d ret = %d\n",
+ i, ret);
+ goto power_off;
+ }
+ }
+
+ ret = truly_dcs_write(panel, MIPI_DCS_EXIT_SLEEP_MODE);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "exit_sleep_mode cmd failed ret = %d\n",
+ ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+ msleep(120);
+
+ ret = truly_dcs_write(panel, MIPI_DCS_SET_DISPLAY_ON);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "set_display_on cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+ msleep(120);
+
+ ctx->prepared = true;
+
+ return 0;
+
+power_off:
+ if (truly_nt35597_power_off(ctx))
+ DRM_DEV_ERROR(ctx->dev, "power_off failed\n");
+ return ret;
+}
+
+static int truly_nt35597_enable(struct drm_panel *panel)
+{
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (ctx->enabled)
+ return 0;
+
+ if (ctx->backlight) {
+ ret = backlight_enable(ctx->backlight);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "backlight enable failed %d\n",
+ ret);
+ }
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int truly_nt35597_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct truly_nt35597 *ctx = panel_to_ctx(panel);
+ struct drm_display_mode *mode;
+ const struct nt35597_config *config;
+
+ config = ctx->config;
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to create a new display mode\n");
+ return 0;
+ }
+
+ connector->display_info.width_mm = config->width_mm;
+ connector->display_info.height_mm = config->height_mm;
+ drm_mode_copy(mode, config->dm);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs truly_nt35597_drm_funcs = {
+ .disable = truly_nt35597_disable,
+ .unprepare = truly_nt35597_unprepare,
+ .prepare = truly_nt35597_prepare,
+ .enable = truly_nt35597_enable,
+ .get_modes = truly_nt35597_get_modes,
+};
+
+static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
+{
+ struct device *dev = ctx->dev;
+ int ret, i;
+ const struct nt35597_config *config;
+
+ config = ctx->config;
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->mode_gpio = devm_gpiod_get(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->mode_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get mode gpio %ld\n",
+ PTR_ERR(ctx->mode_gpio));
+ return PTR_ERR(ctx->mode_gpio);
+ }
+
+ /* dual port */
+ gpiod_set_value(ctx->mode_gpio, 0);
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &truly_nt35597_drm_funcs;
+ drm_panel_add(&ctx->panel);
+
+ return 0;
+}
+
+static const struct drm_display_mode qcom_sdm845_mtp_2k_mode = {
+ .name = "1440x2560",
+ .clock = 268316,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 200,
+ .hsync_end = 1440 + 200 + 32,
+ .htotal = 1440 + 200 + 32 + 64,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 8,
+ .vsync_end = 2560 + 8 + 1,
+ .vtotal = 2560 + 8 + 1 + 7,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static const struct nt35597_config nt35597_dir = {
+ .width_mm = 74,
+ .height_mm = 131,
+ .panel_name = "qcom_sdm845_mtp_2k_panel",
+ .dm = &qcom_sdm845_mtp_2k_mode,
+ .panel_on_cmds = qcom_2k_panel_magic_cmds,
+ .num_on_cmds = ARRAY_SIZE(qcom_2k_panel_magic_cmds),
+};
+
+static int truly_nt35597_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct truly_nt35597 *ctx;
+ struct mipi_dsi_device *dsi1_device;
+ struct device_node *dsi1;
+ struct mipi_dsi_host *dsi1_host;
+ struct mipi_dsi_device *dsi_dev;
+ int ret = 0;
+ int i;
+
+ const struct mipi_dsi_device_info info = {
+ .type = "trulynt35597",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+
+ if (!ctx)
+ return -ENOMEM;
+
+ /*
+ * This device represents itself as one with two input ports which are
+ * fed by the output ports of the two DSI controllers . The DSI0 is
+ * the master controller and has most of the panel related info in its
+ * child node.
+ */
+
+ ctx->config = of_device_get_match_data(dev);
+
+ if (!ctx->config) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ dsi1 = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (!dsi1) {
+ DRM_DEV_ERROR(dev,
+ "failed to get remote node for dsi1_device\n");
+ return -ENODEV;
+ }
+
+ dsi1_host = of_find_mipi_dsi_host_by_node(dsi1);
+ of_node_put(dsi1);
+ if (!dsi1_host) {
+ DRM_DEV_ERROR(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* register the second DSI device */
+ dsi1_device = mipi_dsi_device_register_full(dsi1_host, &info);
+ if (IS_ERR(dsi1_device)) {
+ DRM_DEV_ERROR(dev, "failed to create dsi device\n");
+ return PTR_ERR(dsi1_device);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+ ctx->dsi[0] = dsi;
+ ctx->dsi[1] = dsi1_device;
+
+ ret = truly_nt35597_panel_add(ctx);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to add panel\n");
+ goto err_panel_add;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctx->dsi); i++) {
+ dsi_dev = ctx->dsi[i];
+ dsi_dev->lanes = 4;
+ dsi_dev->format = MIPI_DSI_FMT_RGB888;
+ dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ret = mipi_dsi_attach(dsi_dev);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev,
+ "dsi attach failed i = %d\n", i);
+ goto err_dsi_attach;
+ }
+ }
+
+ return 0;
+
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+err_panel_add:
+ mipi_dsi_device_unregister(dsi1_device);
+ return ret;
+}
+
+static int truly_nt35597_remove(struct mipi_dsi_device *dsi)
+{
+ struct truly_nt35597 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ if (ctx->dsi[0])
+ mipi_dsi_detach(ctx->dsi[0]);
+ if (ctx->dsi[1]) {
+ mipi_dsi_detach(ctx->dsi[1]);
+ mipi_dsi_device_unregister(ctx->dsi[1]);
+ }
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id truly_nt35597_of_match[] = {
+ {
+ .compatible = "truly,nt35597-2K-display",
+ .data = &nt35597_dir,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, truly_nt35597_of_match);
+
+static struct mipi_dsi_driver truly_nt35597_driver = {
+ .driver = {
+ .name = "panel-truly-nt35597",
+ .of_match_table = truly_nt35597_of_match,
+ },
+ .probe = truly_nt35597_probe,
+ .remove = truly_nt35597_remove,
+};
+module_mipi_dsi_driver(truly_nt35597_driver);
+
+MODULE_DESCRIPTION("Truly NT35597 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index 5fa0441bb6df..38c938c9adda 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -55,6 +55,8 @@ int pl111_vexpress_clcd_init(struct device *dev,
}
}
+ of_node_put(root);
+
/*
* If there is a coretile HDLCD and it has a driver,
* do not mux the CLCD on the motherboard to the DVI.
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 208af9f37914..dffc5093ff16 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -84,6 +84,7 @@ static int qxl_check_header(struct qxl_ring *ring)
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod - header->cons < header->num_items;
if (ret == 0)
@@ -97,6 +98,7 @@ int qxl_check_idle(struct qxl_ring *ring)
int ret;
struct qxl_ring_header *header = &(ring->ring->header);
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
ret = header->prod == header->cons;
spin_unlock_irqrestore(&ring->lock, flags);
@@ -110,6 +112,7 @@ int qxl_ring_push(struct qxl_ring *ring,
uint8_t *elt;
int idx, ret;
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
if (header->prod - header->cons == header->num_items) {
header->notify_on_cons = header->cons + 1;
@@ -156,6 +159,7 @@ static bool qxl_ring_pop(struct qxl_ring *ring,
volatile uint8_t *ring_elt;
int idx;
unsigned long flags;
+
spin_lock_irqsave(&ring->lock, flags);
if (header->cons == header->prod) {
header->notify_on_prod = header->cons + 1;
@@ -365,7 +369,6 @@ void qxl_io_flush_surfaces(struct qxl_device *qdev)
wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC);
}
-
void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
@@ -373,7 +376,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
}
void qxl_io_create_primary(struct qxl_device *qdev,
- unsigned offset, struct qxl_bo *bo)
+ unsigned int offset, struct qxl_bo *bo)
{
struct qxl_surface_create *create;
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 15c84068d3fb..118422549828 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -34,7 +34,6 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-
#if defined(CONFIG_DEBUG_FS)
static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
@@ -102,9 +101,9 @@ qxl_debugfs_init(struct drm_minor *minor)
int qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
- unsigned nfiles)
+ unsigned int nfiles)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h
index 94c5aec71920..a0ee41632d7e 100644
--- a/drivers/gpu/drm/qxl/qxl_dev.h
+++ b/drivers/gpu/drm/qxl/qxl_dev.h
@@ -28,7 +28,6 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef H_QXL_DEV
#define H_QXL_DEV
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 87d16a0ce01e..ce0b9c40fc21 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -253,12 +253,13 @@ static struct mode_size {
};
static int qxl_add_common_modes(struct drm_connector *connector,
- unsigned pwidth,
- unsigned pheight)
+ unsigned int pwidth,
+ unsigned int pheight)
{
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode = NULL;
int i;
+
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
60, false, false, false);
@@ -315,6 +316,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
oldcount = qdev->monitors_config->count;
if (crtc->state->active) {
struct drm_display_mode *mode = &crtc->mode;
+
head.width = mode->hdisplay;
head.height = mode->vdisplay;
head.x = crtc->x;
@@ -391,9 +393,9 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips)
+ unsigned int num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
struct qxl_device *qdev = fb->dev->dev_private;
@@ -620,10 +622,14 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
goto out_kunmap;
- ret = qxl_release_reserve_list(release, true);
+ ret = qxl_bo_pin(cursor_bo);
if (ret)
goto out_free_bo;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_unpin;
+
ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
if (ret)
goto out_backoff;
@@ -668,15 +674,17 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
- if (old_cursor_bo)
- qxl_bo_unref(&old_cursor_bo);
-
+ if (old_cursor_bo != NULL)
+ qxl_bo_unpin(old_cursor_bo);
+ qxl_bo_unref(&old_cursor_bo);
qxl_bo_unref(&cursor_bo);
return;
out_backoff:
qxl_release_backoff_reserve_list(release);
+out_unpin:
+ qxl_bo_unpin(cursor_bo);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_kunmap:
@@ -755,7 +763,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ ret = qxl_bo_pin(user_bo);
if (ret)
return ret;
@@ -917,8 +925,8 @@ free_mem:
static int qxl_conn_get_modes(struct drm_connector *connector)
{
- unsigned pwidth = 1024;
- unsigned pheight = 768;
+ unsigned int pwidth = 1024;
+ unsigned int pheight = 768;
int ret = 0;
ret = qxl_add_monitors_config_modes(connector, &pwidth, &pheight);
@@ -938,8 +946,8 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
/* TODO: is this called for user defined modes? (xrandr --add-mode)
* TODO: check that the mode fits in the framebuffer */
- if(qdev->monitors_config_width == mode->hdisplay &&
- qdev->monitors_config_height == mode->vdisplay)
+ if (qdev->monitors_config_width == mode->hdisplay &&
+ qdev->monitors_config_height == mode->vdisplay)
return MODE_OK;
for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
@@ -958,7 +966,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
return &qxl_output->enc;
}
-
static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
};
@@ -1103,7 +1110,7 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
}
qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
- ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL);
+ ret = qxl_bo_pin(qdev->monitors_config_bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index cc5b32e749ce..c408bb83c7a9 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -25,7 +25,7 @@
static int alloc_clips(struct qxl_device *qdev,
struct qxl_release *release,
- unsigned num_clips,
+ unsigned int num_clips,
struct qxl_bo **clips_bo)
{
int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
@@ -37,7 +37,7 @@ static int alloc_clips(struct qxl_device *qdev,
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
- unsigned num_clips,
+ unsigned int num_clips,
struct qxl_bo *clips_bo)
{
struct qxl_clip_rects *dev_clips;
@@ -168,6 +168,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
int ret;
struct qxl_drm_image *dimage;
struct qxl_bo *palette_bo = NULL;
+
if (stride == 0)
stride = depth * width / 8;
@@ -214,6 +215,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
if (depth == 1) {
void *ptr;
+
ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
@@ -245,8 +247,7 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
qxl_release_fence_buffer_objects(release);
out_free_palette:
- if (palette_bo)
- qxl_bo_unref(&palette_bo);
+ qxl_bo_unref(&palette_bo);
out_free_image:
qxl_image_free_objects(qdev, dimage);
out_free_drawable:
@@ -264,9 +265,9 @@ out_free_drawable:
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct drm_framebuffer *fb,
struct qxl_bo *bo,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc)
+ unsigned int num_clips, int inc)
{
/*
* TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
@@ -340,7 +341,6 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
-
ret = qxl_image_init(qdev, release, dimage, surface_base,
left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 8ff70a7281a7..13a0254b59a1 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -23,7 +23,6 @@
* Alon Levy
*/
-
#ifndef QXL_DRV_H
#define QXL_DRV_H
@@ -83,16 +82,16 @@ struct qxl_bo {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
- unsigned pin_count;
+ unsigned int pin_count;
void *kptr;
int type;
/* Constant after initialization */
struct drm_gem_object gem_base;
- bool is_primary; /* is this now a primary surface */
- bool is_dumb;
+ unsigned int is_primary:1; /* is this now a primary surface */
+ unsigned int is_dumb:1;
struct qxl_bo *shadow;
- bool hw_surf_alloc;
+ unsigned int hw_surf_alloc:1;
struct qxl_surface surf;
uint32_t surface_id;
struct qxl_release *surf_create;
@@ -127,13 +126,9 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
};
-
struct qxl_memslot {
uint8_t generation;
uint64_t start_phys_addr;
@@ -191,12 +186,12 @@ struct qxl_draw_fill {
*/
struct qxl_debugfs {
struct drm_info_list *files;
- unsigned num_files;
+ unsigned int num_files;
};
int qxl_debugfs_add_files(struct qxl_device *rdev,
struct drm_info_list *files,
- unsigned nfiles);
+ unsigned int nfiles);
int qxl_debugfs_fence_init(struct qxl_device *rdev);
struct qxl_device;
@@ -231,7 +226,7 @@ struct qxl_device {
struct qxl_ram_header *ram_header;
- bool primary_created;
+ unsigned int primary_created:1;
struct qxl_memslot *mem_slots;
uint8_t n_mem_slots;
@@ -254,7 +249,7 @@ struct qxl_device {
atomic_t irq_received_display;
atomic_t irq_received_cursor;
atomic_t irq_received_io_cmd;
- unsigned irq_received_error;
+ unsigned int irq_received_error;
wait_queue_head_t display_event;
wait_queue_head_t cursor_event;
wait_queue_head_t io_cmd_event;
@@ -262,7 +257,7 @@ struct qxl_device {
/* debugfs */
struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
+ unsigned int debugfs_count;
struct mutex update_area_mutex;
@@ -372,7 +367,6 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
@@ -398,7 +392,7 @@ void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
void qxl_io_create_primary(struct qxl_device *qdev,
- unsigned offset,
+ unsigned int offset,
struct qxl_bo *bo);
void qxl_io_destroy_primary(struct qxl_device *qdev);
void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id);
@@ -449,9 +443,9 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct drm_framebuffer *fb,
struct qxl_bo *bo,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips, int inc);
+ unsigned int num_clips, int inc);
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec);
@@ -496,7 +490,7 @@ bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj);
int qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
- unsigned nfiles);
+ unsigned int nfiles);
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index c666b89eed5d..e3765739c396 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -38,6 +38,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
int r;
struct qxl_surface surf;
uint32_t pitch, format;
+
pitch = args->width * ((args->bpp + 1) / 8);
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
@@ -52,7 +53,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
default:
return -EINVAL;
}
-
+
surf.width = args->width;
surf.height = args->height;
surf.stride = pitch;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 2294b7f14fdf..a819d24225d2 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -111,7 +111,7 @@ static int qxlfb_create_pinned_object(struct qxl_device *qdev,
qbo->surf.stride = mode_cmd->pitches[0];
qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB;
- ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL);
+ ret = qxl_bo_pin(qbo);
if (ret) {
goto out_unref;
}
@@ -134,9 +134,9 @@ out_unref:
*/
static int qxlfb_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
- unsigned flags, unsigned color,
+ unsigned int flags, unsigned int color,
struct drm_clip_rect *clips,
- unsigned num_clips)
+ unsigned int num_clips)
{
struct qxl_device *qdev = fb->dev->dev_private;
struct fb_info *info = qdev->fb_helper.fbdev;
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 7fbcc35e8ad3..43688ecdd8a0 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -136,6 +136,7 @@ qxl_image_init_helper(struct qxl_device *qdev,
int remain;
int page;
int size;
+
if (stride == linesize && chunk_stride == stride) {
remain = linesize * height;
page = 0;
@@ -162,7 +163,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
page++;
}
} else {
- unsigned page_base, page_offset, out_offset;
+ unsigned int page_base, page_offset, out_offset;
+
for (i = 0 ; i < height ; ++i) {
i_data = (void *)data + i * stride;
remain = linesize;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6cc9f3367fa0..6e828158bcb0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -85,6 +85,7 @@ static void
apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
+
reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
info->src_bo,
@@ -189,6 +190,7 @@ static int qxl_process_single_command(struct qxl_device *qdev,
{
struct qxl_drawable *draw = fb_cmd;
+
draw->mm_time = qdev->rom->mm_clock;
}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index e25c589d5f50..15238a413f9d 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -92,6 +92,7 @@ void qxl_reinit_memslots(struct qxl_device *qdev)
static void qxl_gc_work(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
+
qxl_garbage_collect(qdev);
}
@@ -284,7 +285,6 @@ int qxl_device_init(struct qxl_device *qdev,
(unsigned long)qdev->surfaceram_base,
(unsigned long)qdev->surfaceram_size);
-
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
@@ -313,10 +313,8 @@ error:
void qxl_device_fini(struct qxl_device *qdev)
{
- if (qdev->current_release_bo[0])
- qxl_bo_unref(&qdev->current_release_bo[0]);
- if (qdev->current_release_bo[1])
- qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_bo_unref(&qdev->current_release_bo[0]);
+ qxl_bo_unref(&qdev->current_release_bo[1]);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 6a30196e9d6c..91f3bbc73ecc 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -54,7 +54,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
- unsigned i;
+ unsigned int i;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
@@ -74,7 +74,6 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
}
}
-
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
@@ -187,13 +186,9 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
- struct io_mapping *map;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
- map = qdev->vram_mapping;
- else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
- map = qdev->surface_mapping;
- else
+ if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.mem.mem_type != TTM_PL_PRIV))
goto fallback;
io_mapping_unmap_atomic(pmap);
@@ -201,7 +196,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
- return ;
+ return;
fallback:
qxl_bo_kunmap(bo);
}
@@ -221,7 +216,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
return bo;
}
-static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+static int __qxl_bo_pin(struct qxl_bo *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
@@ -229,16 +224,12 @@ static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
if (bo->pin_count) {
bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain, true);
+ qxl_ttm_placement_from_domain(bo, bo->type, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = qxl_bo_gpu_offset(bo);
}
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
@@ -266,13 +257,12 @@ static int __qxl_bo_unpin(struct qxl_bo *bo)
return r;
}
-
/*
* Reserve the BO before pinning the object. If the BO was reserved
* beforehand, use the internal version directly __qxl_bo_pin.
*
*/
-int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
+int qxl_bo_pin(struct qxl_bo *bo)
{
int r;
@@ -280,7 +270,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
if (r)
return r;
- r = __qxl_bo_pin(bo, bo->type, NULL);
+ r = __qxl_bo_pin(bo);
qxl_bo_unreserve(bo);
return r;
}
@@ -335,6 +325,7 @@ void qxl_bo_fini(struct qxl_device *qdev)
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
{
int ret;
+
if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
/* allocate a surface id for this surface now */
ret = qxl_surface_id_alloc(qdev, bo);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 0374fd93f4d6..255b914e2a7b 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -35,6 +35,7 @@ static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct drm_device *ddev = bo->gem_base.dev;
+
dev_err(ddev->dev, "%p reserve failed\n", bo);
}
return r;
@@ -71,6 +72,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
struct drm_device *ddev = bo->gem_base.dev;
+
dev_err(ddev->dev, "%p reserve failed for wait\n",
bo);
}
@@ -95,7 +97,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int pa
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
-extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
+extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 9f029dda1f07..a55dece118b2 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -38,7 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented");
}
-
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index e37f0097f744..30f85f0130cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -217,7 +217,7 @@ int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
qxl_bo_ref(bo);
entry->tv.bo = &bo->tbo;
- entry->tv.shared = false;
+ entry->tv.num_shared = 0;
list_add_tail(&entry->tv.head, &release->bos);
return 0;
}
@@ -234,7 +234,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
}
- ret = reservation_object_reserve_shared(bo->tbo.resv);
+ ret = reservation_object_reserve_shared(bo->tbo.resv, 1);
if (ret)
return ret;
@@ -282,7 +282,6 @@ void qxl_release_backoff_reserve_list(struct qxl_release *release)
ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
-
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
@@ -428,8 +427,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
- struct qxl_bo *qbo;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -450,14 +447,12 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- driver = bdev->driver;
glob = bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- qbo = to_qxl_bo(bo);
reservation_object_add_shared_fence(bo->resv, &release->base);
ttm_bo_add_to_lru(bo);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 86a1fb32f6db..886f61e94f24 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -46,62 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void qxl_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int qxl_ttm_global_init(struct qxl_device *qdev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- qdev->mman.mem_global_referenced = false;
- global_ref = &qdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &qxl_ttm_mem_global_init;
- global_ref->release = &qxl_ttm_mem_global_release;
-
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- qdev->mman.bo_global_ref.mem_glob =
- qdev->mman.mem_global_ref.object;
- global_ref = &qdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&qdev->mman.mem_global_ref);
- return r;
- }
-
- qdev->mman.mem_global_referenced = true;
- return 0;
-}
-
-static void qxl_ttm_global_fini(struct qxl_device *qdev)
-{
- if (qdev->mman.mem_global_referenced) {
- drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&qdev->mman.mem_global_ref);
- qdev->mman.mem_global_referenced = false;
- }
-}
-
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
@@ -174,7 +118,7 @@ static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
return -EINVAL;
}
return 0;
@@ -331,7 +275,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
if (ret)
return ret;
-
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
qxl_move_null(bo, new_mem);
return 0;
@@ -373,12 +316,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
int r;
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
- r = qxl_ttm_global_init(qdev);
- if (r)
- return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
- qdev->mman.bo_global_ref.ref.object,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@@ -401,11 +340,11 @@ int qxl_ttm_init(struct qxl_device *qdev)
return r;
}
DRM_INFO("qxl: %uM of VRAM memory size\n",
- (unsigned)qdev->vram_size / (1024 * 1024));
+ (unsigned int)qdev->vram_size / (1024 * 1024));
DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n",
- ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024));
+ ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024));
DRM_INFO("qxl: %uM of Surface memory size\n",
- (unsigned)qdev->surfaceram_size / (1024 * 1024));
+ (unsigned int)qdev->surfaceram_size / (1024 * 1024));
return 0;
}
@@ -414,11 +353,9 @@ void qxl_ttm_fini(struct qxl_device *qdev)
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
- qxl_ttm_global_fini(qdev);
DRM_INFO("qxl: ttm finalized\n");
}
-
#define QXL_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
@@ -443,7 +380,7 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
- unsigned i;
+ unsigned int i;
for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
if (i == 0)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 21161aa8acbf..652126fd6dd4 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -814,7 +814,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
((idx_value >> 21) & 0xF));
return -EINVAL;
}
- /* Pass through. */
+ /* Fall through. */
case 6:
track->cb[i].cpp = 4;
break;
@@ -965,7 +965,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
- /* Pass through. */
+ /* Fall through. */
case R300_TX_FORMAT_DXT3:
case R300_TX_FORMAT_DXT5:
track->textures[i].cpp = 1;
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 45e1d4e60759..2318d9e3ed96 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -109,6 +109,7 @@ void r420_pipes_init(struct radeon_device *rdev)
default:
/* force to 1 pipe */
num_pipes = 1;
+ /* fall through */
case 1:
tmp = (0 << 1);
break;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1a6f6edb3515..32808e50be12 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -448,10 +448,7 @@ struct radeon_surface_reg {
* TTM.
*/
struct radeon_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
- bool mem_global_referenced;
bool initialized;
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1ae31dbc61c6..f43305329939 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -178,7 +178,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
}
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
- p->relocs[i].tv.shared = !r->write_domain;
+ p->relocs[i].tv.num_shared = !r->write_domain;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -253,7 +253,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
resv = reloc->robj->tbo.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- reloc->tv.shared);
+ reloc->tv.num_shared);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 27d8e7dd2d06..44617dec8183 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -552,7 +552,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
INIT_LIST_HEAD(&list);
tv.bo = &bo_va->bo->tbo;
- tv.shared = true;
+ tv.num_shared = 1;
list_add(&tv.head, &list);
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 4278272e3191..3dae2c4dec71 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -421,24 +421,14 @@ static void radeon_legacy_write_tv_restarts(struct radeon_encoder *radeon_encode
static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
- struct radeon_crtc *radeon_crtc;
int restart;
unsigned int h_total, v_total, f_total;
int v_offset, h_offset;
u16 p1, p2, h_inc;
bool h_changed;
const struct radeon_tv_mode_constants *const_ptr;
- struct radeon_pll *pll;
-
- radeon_crtc = to_radeon_crtc(radeon_encoder->base.crtc);
- if (radeon_crtc->crtc_id == 1)
- pll = &rdev->clock.p2pll;
- else
- pll = &rdev->clock.p1pll;
const_ptr = radeon_legacy_tv_get_std_mode(radeon_encoder, NULL);
if (!const_ptr)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 92f6d4002eea..833e909706a9 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -314,11 +314,9 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
void radeon_bo_unref(struct radeon_bo **bo)
{
struct ttm_buffer_object *tbo;
- struct radeon_device *rdev;
if ((*bo) == NULL)
return;
- rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
ttm_bo_put(tbo);
*bo = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cbb67e9ffb3a..9920a6fc11bf 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -60,65 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-
-/*
- * Global memory.
- */
-static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int radeon_ttm_global_init(struct radeon_device *rdev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- rdev->mman.mem_global_referenced = false;
- global_ref = &rdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &radeon_ttm_mem_global_init;
- global_ref->release = &radeon_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- rdev->mman.bo_global_ref.mem_glob =
- rdev->mman.mem_global_ref.object;
- global_ref = &rdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&rdev->mman.mem_global_ref);
- return r;
- }
-
- rdev->mman.mem_global_referenced = true;
- return 0;
-}
-
-static void radeon_ttm_global_fini(struct radeon_device *rdev)
-{
- if (rdev->mman.mem_global_referenced) {
- drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&rdev->mman.mem_global_ref);
- rdev->mman.mem_global_referenced = false;
- }
-}
-
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -847,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
{
int r;
- r = radeon_ttm_global_init(rdev);
- if (r) {
- return r;
- }
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
- rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@@ -925,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
- radeon_ttm_global_fini(rdev);
rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 7f1a9c787bd1..0d374211661c 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -142,7 +142,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
- list[0].tv.shared = true;
+ list[0].tv.num_shared = 1;
list[0].tiling_flags = 0;
list_add(&list[0].tv.head, head);
@@ -154,7 +154,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
- list[idx].tv.shared = true;
+ list[idx].tv.num_shared = 1;
list[idx].tiling_flags = 0;
list_add(&list[idx++].tv.head, head);
}
@@ -831,7 +831,7 @@ static int radeon_vm_update_ptes(struct radeon_device *rdev,
int r;
radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
- r = reservation_object_reserve_shared(pt->tbo.resv);
+ r = reservation_object_reserve_shared(pt->tbo.resv, 1);
if (r)
return r;
@@ -946,7 +946,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 17741843cf51..90dacab67be5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -226,9 +226,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
* system clock, and have no internal clock divider.
*/
- if (WARN_ON(!rcrtc->extclock))
- return;
-
/*
* The H3 ES1.x exhibits dot clock duty cycle stability issues.
* We can work around them by configuring the DPLL to twice the
@@ -701,7 +698,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
* CRTC will be put later in .atomic_disable().
*
* If a mode set is not in progress the CRTC is enabled, and the
- * following get call will be a no-op. There is thus no need to belance
+ * following get call will be a no-op. There is thus no need to balance
* it in .atomic_flush() either.
*/
rcar_du_crtc_get(rcrtc);
@@ -738,10 +735,22 @@ enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_device *rcdu = rcrtc->group->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+ unsigned int vbp;
if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
return MODE_NO_INTERLACE;
+ /*
+ * The hardware requires a minimum combined horizontal sync and back
+ * porch of 20 pixels and a minimum vertical back porch of 3 lines.
+ */
+ if (mode->htotal - mode->hsync_start < 20)
+ return MODE_HBLANK_NARROW;
+
+ vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
+ if (vbp < 3)
+ return MODE_VBLANK_NARROW;
+
return MODE_OK;
}
@@ -1002,7 +1011,7 @@ unlock:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- return 0;
+ return ret;
}
static const struct drm_crtc_funcs crtc_funcs_gen2 = {
@@ -1113,9 +1122,16 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
clk = devm_clk_get(rcdu->dev, clk_name);
if (!IS_ERR(clk)) {
rcrtc->extclock = clk;
- } else if (PTR_ERR(rcrtc->clock) == -EPROBE_DEFER) {
- dev_info(rcdu->dev, "can't get external clock %u\n", hwindex);
+ } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
return -EPROBE_DEFER;
+ } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
+ /*
+ * DU channels that have a display PLL can't use the internal
+ * system clock and thus require an external clock.
+ */
+ ret = PTR_ERR(clk);
+ dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
+ return ret;
}
init_waitqueue_head(&rcrtc->flip_wait);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 084f58df4a8c..f50a3b1864bb 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_drv.h"
@@ -41,7 +42,7 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.channels_mask = BIT(1) | BIT(0),
.routes = {
/*
- * R8A7743 has one RGB output and one LVDS output
+ * R8A774[34] has one RGB output and one LVDS output
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
@@ -77,6 +78,33 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
},
};
+static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
+ .gen = 2,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ | RCAR_DU_FEATURE_EXT_CTRL_REGS
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .channels_mask = BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A77470 has two RGB outputs, one LVDS output, and
+ * one (currently unsupported) analog video output
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0) | BIT(1),
+ .port = 2,
+ },
+ },
+};
+
static const struct rcar_du_device_info rcar_du_r8a7779_info = {
.gen = 2,
.features = RCAR_DU_FEATURE_INTERLACED
@@ -341,7 +369,9 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
+ { .compatible = "renesas,du-r8a7744", .data = &rzg1_du_r8a7743_info },
{ .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info },
+ { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info },
{ .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info },
{ .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7791", .data = &rcar_du_r8a7791_info },
@@ -363,19 +393,11 @@ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
* DRM operations
*/
-static void rcar_du_lastclose(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- drm_fbdev_cma_restore_mode(rcdu->fbdev);
-}
-
DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
- .lastclose = rcar_du_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -404,32 +426,15 @@ static struct drm_driver rcar_du_driver = {
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- struct drm_atomic_state *state;
- drm_kms_helper_poll_disable(rcdu->ddev);
- drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, true);
-
- state = drm_atomic_helper_suspend(rcdu->ddev);
- if (IS_ERR(state)) {
- drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
- drm_kms_helper_poll_enable(rcdu->ddev);
- return PTR_ERR(state);
- }
-
- rcdu->suspend_state = state;
-
- return 0;
+ return drm_mode_config_helper_suspend(rcdu->ddev);
}
static int rcar_du_pm_resume(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
- drm_atomic_helper_resume(rcdu->ddev, rcdu->suspend_state);
- drm_fbdev_cma_set_suspend_unlocked(rcdu->fbdev, false);
- drm_kms_helper_poll_enable(rcdu->ddev);
-
- return 0;
+ return drm_mode_config_helper_resume(rcdu->ddev);
}
#endif
@@ -448,13 +453,10 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
- if (rcdu->fbdev)
- drm_fbdev_cma_fini(rcdu->fbdev);
-
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
@@ -510,6 +512,8 @@ static int rcar_du_probe(struct platform_device *pdev)
DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+ drm_fbdev_generic_setup(ddev, 32);
+
return 0;
error:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 143c037e2c0f..a68da79b424e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -20,7 +20,6 @@
struct clk;
struct device;
struct drm_device;
-struct drm_fbdev_cma;
struct rcar_du_device;
#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
@@ -78,8 +77,6 @@ struct rcar_du_device {
void __iomem *mmio;
struct drm_device *ddev;
- struct drm_fbdev_cma *fbdev;
- struct drm_atomic_state *suspend_state;
struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS];
unsigned int num_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index d85f0a1c1581..cebf313c6e1f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -202,10 +202,25 @@ void rcar_du_group_put(struct rcar_du_group *rgrp)
static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
{
- struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
+ struct rcar_du_device *rcdu = rgrp->dev;
+
+ /*
+ * Group start/stop is controlled by the DRES and DEN bits of DSYSR0
+ * for the first group and DSYSR2 for the second group. On most DU
+ * instances, this maps to the first CRTC of the group, and we can just
+ * use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
+ * M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
+ * access the register directly using group read/write.
+ */
+ if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
+ struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
- rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
- start ? DSYSR_DEN : DSYSR_DRES);
+ rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
+ start ? DSYSR_DEN : DSYSR_DRES);
+ } else {
+ rcar_du_group_write(rgrp, DSYSR,
+ start ? DSYSR_DEN : DSYSR_DRES);
+ }
}
void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 4ebd61ecbee1..9c7007d45408 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -255,13 +255,6 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
-static void rcar_du_output_poll_changed(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- drm_fbdev_cma_hotplug_event(rcdu->fbdev);
-}
-
/* -----------------------------------------------------------------------------
* Atomic Check and Update
*/
@@ -308,7 +301,6 @@ static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
- .output_poll_changed = rcar_du_output_poll_changed,
.atomic_check = rcar_du_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -543,7 +535,6 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
struct drm_device *dev = rcdu->ddev;
struct drm_encoder *encoder;
- struct drm_fbdev_cma *fbdev;
unsigned int dpad0_sources;
unsigned int num_encoders;
unsigned int num_groups;
@@ -582,7 +573,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
* Initialize vertical blanking interrupts handling. Start with vblank
* disabled for all CRTCs.
*/
- ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
+ ret = drm_vblank_init(dev, rcdu->num_crtcs);
if (ret < 0)
return ret;
@@ -682,17 +673,5 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
drm_kms_helper_poll_init(dev);
- if (dev->mode_config.num_connector) {
- fbdev = drm_fbdev_cma_init(dev, 32,
- dev->mode_config.num_connector);
- if (IS_ERR(fbdev))
- return PTR_ERR(fbdev);
-
- rcdu->fbdev = fbdev;
- } else {
- dev_info(rcdu->dev,
- "no connector found, disabling fbdev emulation\n");
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 9e07758a755c..39d5ae3fdf72 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -783,13 +783,14 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_helper_add(&plane->plane,
&rcar_du_plane_helper_funcs);
+ drm_plane_create_alpha_property(&plane->plane);
+
if (type == DRM_PLANE_TYPE_PRIMARY)
continue;
drm_object_attach_property(&plane->plane.base,
rcdu->props.colorkey,
RCAR_DU_COLORKEY_NONE);
- drm_plane_create_alpha_property(&plane->plane);
drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 173d7ad0b991..534a128a869d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -790,6 +790,7 @@ static const struct of_device_id rcar_lvds_of_table[] = {
{ .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info },
{ .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a7796-lvds", .data = &rcar_lvds_gen3_info },
+ { .compatible = "renesas,r8a77965-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77970-lvds", .data = &rcar_lvds_r8a77970_info },
{ .compatible = "renesas,r8a77980-lvds", .data = &rcar_lvds_gen3_info },
{ .compatible = "renesas,r8a77990-lvds", .data = &rcar_lvds_r8a77990_info },
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 26438d45732b..1e75196f9659 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -7,7 +7,7 @@ config DRM_ROCKCHIP
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
- select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
select DRM_RGB if ROCKCHIP_RGB
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 868263ff0302..f6fc9d5dd0ad 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -11,7 +11,7 @@ rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
-rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965fc260..5a485489a1e2 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
}
static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
- u8 *buff, u8 buff_size)
+ u8 *buff, u16 buff_size)
{
u32 i;
int ret;
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
new file mode 100644
index 000000000000..7ee359bcee62
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -0,0 +1,1076 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Chris Zhong <zyw@rock-chips.com>
+ * Nickey Yang <nickey.yang@rock-chips.com>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/bridge/dw_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <video/mipi_display.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define DSI_PHY_RSTZ 0xa0
+#define PHY_DISFORCEPLL 0
+#define PHY_ENFORCEPLL BIT(3)
+#define PHY_DISABLECLK 0
+#define PHY_ENABLECLK BIT(2)
+#define PHY_RSTZ 0
+#define PHY_UNRSTZ BIT(1)
+#define PHY_SHUTDOWNZ 0
+#define PHY_UNSHUTDOWNZ BIT(0)
+
+#define DSI_PHY_IF_CFG 0xa4
+#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
+#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
+
+#define DSI_PHY_STATUS 0xb0
+#define LOCK BIT(0)
+#define STOP_STATE_CLK_LANE BIT(2)
+
+#define DSI_PHY_TST_CTRL0 0xb4
+#define PHY_TESTCLK BIT(1)
+#define PHY_UNTESTCLK 0
+#define PHY_TESTCLR BIT(0)
+#define PHY_UNTESTCLR 0
+
+#define DSI_PHY_TST_CTRL1 0xb8
+#define PHY_TESTEN BIT(16)
+#define PHY_UNTESTEN 0
+#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
+#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
+
+#define DSI_INT_ST0 0xbc
+#define DSI_INT_ST1 0xc0
+#define DSI_INT_MSK0 0xc4
+#define DSI_INT_MSK1 0xc8
+
+#define PHY_STATUS_TIMEOUT_US 10000
+#define CMD_PKT_STATUS_TIMEOUT_US 20000
+
+#define BYPASS_VCO_RANGE BIT(7)
+#define VCO_RANGE_CON_SEL(val) (((val) & 0x7) << 3)
+#define VCO_IN_CAP_CON_DEFAULT (0x0 << 1)
+#define VCO_IN_CAP_CON_LOW (0x1 << 1)
+#define VCO_IN_CAP_CON_HIGH (0x2 << 1)
+#define REF_BIAS_CUR_SEL BIT(0)
+
+#define CP_CURRENT_3UA 0x1
+#define CP_CURRENT_4_5UA 0x2
+#define CP_CURRENT_7_5UA 0x6
+#define CP_CURRENT_6UA 0x9
+#define CP_CURRENT_12UA 0xb
+#define CP_CURRENT_SEL(val) ((val) & 0xf)
+#define CP_PROGRAM_EN BIT(7)
+
+#define LPF_RESISTORS_15_5KOHM 0x1
+#define LPF_RESISTORS_13KOHM 0x2
+#define LPF_RESISTORS_11_5KOHM 0x4
+#define LPF_RESISTORS_10_5KOHM 0x8
+#define LPF_RESISTORS_8KOHM 0x10
+#define LPF_PROGRAM_EN BIT(6)
+#define LPF_RESISTORS_SEL(val) ((val) & 0x3f)
+
+#define HSFREQRANGE_SEL(val) (((val) & 0x3f) << 1)
+
+#define INPUT_DIVIDER(val) (((val) - 1) & 0x7f)
+#define LOW_PROGRAM_EN 0
+#define HIGH_PROGRAM_EN BIT(7)
+#define LOOP_DIV_LOW_SEL(val) (((val) - 1) & 0x1f)
+#define LOOP_DIV_HIGH_SEL(val) ((((val) - 1) >> 5) & 0xf)
+#define PLL_LOOP_DIV_EN BIT(5)
+#define PLL_INPUT_DIV_EN BIT(4)
+
+#define POWER_CONTROL BIT(6)
+#define INTERNAL_REG_CURRENT BIT(3)
+#define BIAS_BLOCK_ON BIT(2)
+#define BANDGAP_ON BIT(0)
+
+#define TER_RESISTOR_HIGH BIT(7)
+#define TER_RESISTOR_LOW 0
+#define LEVEL_SHIFTERS_ON BIT(6)
+#define TER_CAL_DONE BIT(5)
+#define SETRD_MAX (0x7 << 2)
+#define POWER_MANAGE BIT(1)
+#define TER_RESISTORS_ON BIT(0)
+
+#define BIASEXTR_SEL(val) ((val) & 0x7)
+#define BANDGAP_SEL(val) ((val) & 0x7)
+#define TLP_PROGRAM_EN BIT(7)
+#define THS_PRE_PROGRAM_EN BIT(7)
+#define THS_ZERO_PROGRAM_EN BIT(6)
+
+#define PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL 0x10
+#define PLL_CP_CONTROL_PLL_LOCK_BYPASS 0x11
+#define PLL_LPF_AND_CP_CONTROL 0x12
+#define PLL_INPUT_DIVIDER_RATIO 0x17
+#define PLL_LOOP_DIVIDER_RATIO 0x18
+#define PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL 0x19
+#define BANDGAP_AND_BIAS_CONTROL 0x20
+#define TERMINATION_RESISTER_CONTROL 0x21
+#define AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY 0x22
+#define HS_RX_CONTROL_OF_LANE_0 0x44
+#define HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL 0x60
+#define HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL 0x61
+#define HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL 0x62
+#define HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL 0x63
+#define HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL 0x64
+#define HS_TX_CLOCK_LANE_POST_TIME_CONTROL 0x65
+#define HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL 0x70
+#define HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL 0x71
+#define HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL 0x72
+#define HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL 0x73
+#define HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL 0x74
+
+#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
+#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
+
+#define RK3288_GRF_SOC_CON6 0x025c
+#define RK3288_DSI0_LCDC_SEL BIT(6)
+#define RK3288_DSI1_LCDC_SEL BIT(9)
+
+#define RK3399_GRF_SOC_CON20 0x6250
+#define RK3399_DSI0_LCDC_SEL BIT(0)
+#define RK3399_DSI1_LCDC_SEL BIT(4)
+
+#define RK3399_GRF_SOC_CON22 0x6258
+#define RK3399_DSI0_TURNREQUEST (0xf << 12)
+#define RK3399_DSI0_TURNDISABLE (0xf << 8)
+#define RK3399_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3399_DSI0_FORCERXMODE (0xf << 0)
+
+#define RK3399_GRF_SOC_CON23 0x625c
+#define RK3399_DSI1_TURNDISABLE (0xf << 12)
+#define RK3399_DSI1_FORCETXSTOPMODE (0xf << 8)
+#define RK3399_DSI1_FORCERXMODE (0xf << 4)
+#define RK3399_DSI1_ENABLE (0xf << 0)
+
+#define RK3399_GRF_SOC_CON24 0x6260
+#define RK3399_TXRX_MASTERSLAVEZ BIT(7)
+#define RK3399_TXRX_ENABLECLK BIT(6)
+#define RK3399_TXRX_BASEDIR BIT(5)
+
+#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
+
+#define to_dsi(nm) container_of(nm, struct dw_mipi_dsi_rockchip, nm)
+
+enum {
+ BANDGAP_97_07,
+ BANDGAP_98_05,
+ BANDGAP_99_02,
+ BANDGAP_100_00,
+ BANDGAP_93_17,
+ BANDGAP_94_15,
+ BANDGAP_95_12,
+ BANDGAP_96_10,
+};
+
+enum {
+ BIASEXTR_87_1,
+ BIASEXTR_91_5,
+ BIASEXTR_95_9,
+ BIASEXTR_100,
+ BIASEXTR_105_94,
+ BIASEXTR_111_88,
+ BIASEXTR_118_8,
+ BIASEXTR_127_7,
+};
+
+struct rockchip_dw_dsi_chip_data {
+ u32 reg;
+
+ u32 lcdsel_grf_reg;
+ u32 lcdsel_big;
+ u32 lcdsel_lit;
+
+ u32 enable_grf_reg;
+ u32 enable;
+
+ u32 lanecfg1_grf_reg;
+ u32 lanecfg1;
+ u32 lanecfg2_grf_reg;
+ u32 lanecfg2;
+
+ unsigned int flags;
+ unsigned int max_data_lanes;
+};
+
+struct dw_mipi_dsi_rockchip {
+ struct device *dev;
+ struct drm_encoder encoder;
+ void __iomem *base;
+
+ struct regmap *grf_regmap;
+ struct clk *pllref_clk;
+ struct clk *grf_clk;
+ struct clk *phy_cfg_clk;
+
+ /* dual-channel */
+ bool is_slave;
+ struct dw_mipi_dsi_rockchip *slave;
+
+ unsigned int lane_mbps; /* per lane */
+ u16 input_div;
+ u16 feedback_div;
+ u32 format;
+
+ struct dw_mipi_dsi *dmd;
+ const struct rockchip_dw_dsi_chip_data *cdata;
+ struct dw_mipi_dsi_plat_data pdata;
+ int devcnt;
+};
+
+struct dphy_pll_parameter_map {
+ unsigned int max_mbps;
+ u8 hsfreqrange;
+ u8 icpctrl;
+ u8 lpfctrl;
+};
+
+/* The table is based on 27MHz DPHY pll reference clock. */
+static const struct dphy_pll_parameter_map dppa_map[] = {
+ { 89, 0x00, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+ { 99, 0x10, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+ { 109, 0x20, CP_CURRENT_3UA, LPF_RESISTORS_13KOHM },
+ { 129, 0x01, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 139, 0x11, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 149, 0x21, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 169, 0x02, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+ { 179, 0x12, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+ { 199, 0x22, CP_CURRENT_6UA, LPF_RESISTORS_13KOHM },
+ { 219, 0x03, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+ { 239, 0x13, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+ { 249, 0x23, CP_CURRENT_4_5UA, LPF_RESISTORS_13KOHM },
+ { 269, 0x04, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
+ { 299, 0x14, CP_CURRENT_6UA, LPF_RESISTORS_11_5KOHM },
+ { 329, 0x05, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 359, 0x15, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 399, 0x25, CP_CURRENT_3UA, LPF_RESISTORS_15_5KOHM },
+ { 449, 0x06, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 499, 0x16, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 549, 0x07, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
+ { 599, 0x17, CP_CURRENT_7_5UA, LPF_RESISTORS_10_5KOHM },
+ { 649, 0x08, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 699, 0x18, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 749, 0x09, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 799, 0x19, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 849, 0x29, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 899, 0x39, CP_CURRENT_7_5UA, LPF_RESISTORS_11_5KOHM },
+ { 949, 0x0a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ { 999, 0x1a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ {1049, 0x2a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ {1099, 0x3a, CP_CURRENT_12UA, LPF_RESISTORS_8KOHM },
+ {1149, 0x0b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1199, 0x1b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1249, 0x2b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1299, 0x3b, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1349, 0x0c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1399, 0x1c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1449, 0x2c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM },
+ {1500, 0x3c, CP_CURRENT_12UA, LPF_RESISTORS_10_5KOHM }
+};
+
+static int max_mbps_to_parameter(unsigned int max_mbps)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dppa_map); i++)
+ if (dppa_map[i].max_mbps >= max_mbps)
+ return i;
+
+ return -EINVAL;
+}
+
+static inline void dsi_write(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 val)
+{
+ writel(val, dsi->base + reg);
+}
+
+static inline u32 dsi_read(struct dw_mipi_dsi_rockchip *dsi, u32 reg)
+{
+ return readl(dsi->base + reg);
+}
+
+static inline void dsi_set(struct dw_mipi_dsi_rockchip *dsi, u32 reg, u32 mask)
+{
+ dsi_write(dsi, reg, dsi_read(dsi, reg) | mask);
+}
+
+static inline void dsi_update_bits(struct dw_mipi_dsi_rockchip *dsi, u32 reg,
+ u32 mask, u32 val)
+{
+ dsi_write(dsi, reg, (dsi_read(dsi, reg) & ~mask) | val);
+}
+
+static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi_rockchip *dsi,
+ u8 test_code,
+ u8 test_data)
+{
+ /*
+ * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
+ * is latched internally as the current test code. Test data is
+ * programmed internally by rising edge on TESTCLK.
+ */
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
+ PHY_TESTDIN(test_code));
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
+ PHY_TESTDIN(test_data));
+
+ dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
+}
+
+/**
+ * ns2bc - Nanoseconds to byte clock cycles
+ */
+static inline unsigned int ns2bc(struct dw_mipi_dsi_rockchip *dsi, int ns)
+{
+ return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
+}
+
+/**
+ * ns2ui - Nanoseconds to UI time periods
+ */
+static inline unsigned int ns2ui(struct dw_mipi_dsi_rockchip *dsi, int ns)
+{
+ return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
+}
+
+static int dw_mipi_dsi_phy_init(void *priv_data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ int ret, i, vco;
+
+ /*
+ * Get vco from frequency(lane_mbps)
+ * vco frequency table
+ * 000 - between 80 and 200 MHz
+ * 001 - between 200 and 300 MHz
+ * 010 - between 300 and 500 MHz
+ * 011 - between 500 and 700 MHz
+ * 100 - between 700 and 900 MHz
+ * 101 - between 900 and 1100 MHz
+ * 110 - between 1100 and 1300 MHz
+ * 111 - between 1300 and 1500 MHz
+ */
+ vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
+
+ i = max_mbps_to_parameter(dsi->lane_mbps);
+ if (i < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get parameter for %dmbps clock\n",
+ dsi->lane_mbps);
+ return i;
+ }
+
+ ret = clk_prepare_enable(dsi->phy_cfg_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
+ return ret;
+ }
+
+ dw_mipi_dsi_phy_write(dsi, PLL_BIAS_CUR_SEL_CAP_VCO_CONTROL,
+ BYPASS_VCO_RANGE |
+ VCO_RANGE_CON_SEL(vco) |
+ VCO_IN_CAP_CON_LOW |
+ REF_BIAS_CUR_SEL);
+
+ dw_mipi_dsi_phy_write(dsi, PLL_CP_CONTROL_PLL_LOCK_BYPASS,
+ CP_CURRENT_SEL(dppa_map[i].icpctrl));
+ dw_mipi_dsi_phy_write(dsi, PLL_LPF_AND_CP_CONTROL,
+ CP_PROGRAM_EN | LPF_PROGRAM_EN |
+ LPF_RESISTORS_SEL(dppa_map[i].lpfctrl));
+
+ dw_mipi_dsi_phy_write(dsi, HS_RX_CONTROL_OF_LANE_0,
+ HSFREQRANGE_SEL(dppa_map[i].hsfreqrange));
+
+ dw_mipi_dsi_phy_write(dsi, PLL_INPUT_DIVIDER_RATIO,
+ INPUT_DIVIDER(dsi->input_div));
+ dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
+ LOOP_DIV_LOW_SEL(dsi->feedback_div) |
+ LOW_PROGRAM_EN);
+ /*
+ * We need set PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL immediately
+ * to make the configured LSB effective according to IP simulation
+ * and lab test results.
+ * Only in this way can we get correct mipi phy pll frequency.
+ */
+ dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
+ PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
+ dw_mipi_dsi_phy_write(dsi, PLL_LOOP_DIVIDER_RATIO,
+ LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
+ HIGH_PROGRAM_EN);
+ dw_mipi_dsi_phy_write(dsi, PLL_INPUT_AND_LOOP_DIVIDER_RATIOS_CONTROL,
+ PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
+
+ dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
+ LOW_PROGRAM_EN | BIASEXTR_SEL(BIASEXTR_127_7));
+ dw_mipi_dsi_phy_write(dsi, AFE_BIAS_BANDGAP_ANALOG_PROGRAMMABILITY,
+ HIGH_PROGRAM_EN | BANDGAP_SEL(BANDGAP_96_10));
+
+ dw_mipi_dsi_phy_write(dsi, BANDGAP_AND_BIAS_CONTROL,
+ POWER_CONTROL | INTERNAL_REG_CURRENT |
+ BIAS_BLOCK_ON | BANDGAP_ON);
+
+ dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
+ TER_RESISTOR_LOW | TER_CAL_DONE |
+ SETRD_MAX | TER_RESISTORS_ON);
+ dw_mipi_dsi_phy_write(dsi, TERMINATION_RESISTER_CONTROL,
+ TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
+ SETRD_MAX | POWER_MANAGE |
+ TER_RESISTORS_ON);
+
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_REQUEST_STATE_TIME_CONTROL,
+ TLP_PROGRAM_EN | ns2bc(dsi, 500));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_PREPARE_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_HS_ZERO_STATE_TIME_CONTROL,
+ THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_TRAIL_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_EXIT_STATE_TIME_CONTROL,
+ BIT(5) | ns2bc(dsi, 100));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_CLOCK_LANE_POST_TIME_CONTROL,
+ BIT(5) | (ns2bc(dsi, 60) + 7));
+
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_REQUEST_STATE_TIME_CONTROL,
+ TLP_PROGRAM_EN | ns2bc(dsi, 500));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_PREPARE_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 20));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_HS_ZERO_STATE_TIME_CONTROL,
+ THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_TRAIL_STATE_TIME_CONTROL,
+ THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
+ dw_mipi_dsi_phy_write(dsi, HS_TX_DATA_LANE_EXIT_STATE_TIME_CONTROL,
+ BIT(5) | ns2bc(dsi, 100));
+
+ clk_disable_unprepare(dsi->phy_cfg_clk);
+
+ return ret;
+}
+
+static int
+dw_mipi_dsi_get_lane_mbps(void *priv_data, struct drm_display_mode *mode,
+ unsigned long mode_flags, u32 lanes, u32 format,
+ unsigned int *lane_mbps)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ int bpp;
+ unsigned long mpclk, tmp;
+ unsigned int target_mbps = 1000;
+ unsigned int max_mbps = dppa_map[ARRAY_SIZE(dppa_map) - 1].max_mbps;
+ unsigned long best_freq = 0;
+ unsigned long fvco_min, fvco_max, fin, fout;
+ unsigned int min_prediv, max_prediv;
+ unsigned int _prediv, uninitialized_var(best_prediv);
+ unsigned long _fbdiv, uninitialized_var(best_fbdiv);
+ unsigned long min_delta = ULONG_MAX;
+
+ dsi->format = format;
+ bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+ if (bpp < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to get bpp for pixel format %d\n",
+ dsi->format);
+ return bpp;
+ }
+
+ mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
+ if (mpclk) {
+ /* take 1 / 0.8, since mbps must big than bandwidth of RGB */
+ tmp = mpclk * (bpp / lanes) * 10 / 8;
+ if (tmp < max_mbps)
+ target_mbps = tmp;
+ else
+ DRM_DEV_ERROR(dsi->dev,
+ "DPHY clock frequency is out of range\n");
+ }
+
+ fin = clk_get_rate(dsi->pllref_clk);
+ fout = target_mbps * USEC_PER_SEC;
+
+ /* constraint: 5Mhz <= Fref / N <= 40MHz */
+ min_prediv = DIV_ROUND_UP(fin, 40 * USEC_PER_SEC);
+ max_prediv = fin / (5 * USEC_PER_SEC);
+
+ /* constraint: 80MHz <= Fvco <= 1500Mhz */
+ fvco_min = 80 * USEC_PER_SEC;
+ fvco_max = 1500 * USEC_PER_SEC;
+
+ for (_prediv = min_prediv; _prediv <= max_prediv; _prediv++) {
+ u64 tmp;
+ u32 delta;
+ /* Fvco = Fref * M / N */
+ tmp = (u64)fout * _prediv;
+ do_div(tmp, fin);
+ _fbdiv = tmp;
+ /*
+ * Due to the use of a "by 2 pre-scaler," the range of the
+ * feedback multiplication value M is limited to even division
+ * numbers, and m must be greater than 6, not bigger than 512.
+ */
+ if (_fbdiv < 6 || _fbdiv > 512)
+ continue;
+
+ _fbdiv += _fbdiv % 2;
+
+ tmp = (u64)_fbdiv * fin;
+ do_div(tmp, _prediv);
+ if (tmp < fvco_min || tmp > fvco_max)
+ continue;
+
+ delta = abs(fout - tmp);
+ if (delta < min_delta) {
+ best_prediv = _prediv;
+ best_fbdiv = _fbdiv;
+ min_delta = delta;
+ best_freq = tmp;
+ }
+ }
+
+ if (best_freq) {
+ dsi->lane_mbps = DIV_ROUND_UP(best_freq, USEC_PER_SEC);
+ *lane_mbps = dsi->lane_mbps;
+ dsi->input_div = best_prediv;
+ dsi->feedback_div = best_fbdiv;
+ } else {
+ DRM_DEV_ERROR(dsi->dev, "Can not find best_freq for DPHY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi_phy_ops dw_mipi_dsi_rockchip_phy_ops = {
+ .init = dw_mipi_dsi_phy_init,
+ .get_lane_mbps = dw_mipi_dsi_get_lane_mbps,
+};
+
+static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi,
+ int mux)
+{
+ if (dsi->cdata->lcdsel_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg,
+ mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big);
+
+ if (dsi->cdata->lanecfg1_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg1_grf_reg,
+ dsi->cdata->lanecfg1);
+
+ if (dsi->cdata->lanecfg2_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->lanecfg2_grf_reg,
+ dsi->cdata->lanecfg2);
+
+ if (dsi->cdata->enable_grf_reg)
+ regmap_write(dsi->grf_regmap, dsi->cdata->enable_grf_reg,
+ dsi->cdata->enable);
+}
+
+static int
+dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB888:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MIPI_DSI_FMT_RGB565:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_DSI;
+ if (dsi->slave)
+ s->output_flags = ROCKCHIP_OUTPUT_DSI_DUAL;
+
+ return 0;
+}
+
+static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+ int ret, mux;
+
+ mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node,
+ &dsi->encoder);
+ if (mux < 0)
+ return;
+
+ pm_runtime_get_sync(dsi->dev);
+ if (dsi->slave)
+ pm_runtime_get_sync(dsi->slave->dev);
+
+ /*
+ * For the RK3399, the clk of grf must be enabled before writing grf
+ * register. And for RK3288 or other soc, this grf_clk must be NULL,
+ * the clk_prepare_enable return true directly.
+ */
+ ret = clk_prepare_enable(dsi->grf_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
+ return;
+ }
+
+ dw_mipi_dsi_rockchip_config(dsi, mux);
+ if (dsi->slave)
+ dw_mipi_dsi_rockchip_config(dsi->slave, mux);
+
+ clk_disable_unprepare(dsi->grf_clk);
+}
+
+static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct dw_mipi_dsi_rockchip *dsi = to_dsi(encoder);
+
+ if (dsi->slave)
+ pm_runtime_put(dsi->slave->dev);
+ pm_runtime_put(dsi->dev);
+}
+
+static const struct drm_encoder_helper_funcs
+dw_mipi_dsi_encoder_helper_funcs = {
+ .atomic_check = dw_mipi_dsi_encoder_atomic_check,
+ .enable = dw_mipi_dsi_encoder_enable,
+ .disable = dw_mipi_dsi_encoder_disable,
+};
+
+static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
+ struct drm_device *drm_dev)
+{
+ struct drm_encoder *encoder = &dsi->encoder;
+ int ret;
+
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
+ dsi->dev->of_node);
+
+ ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
+ DRM_MODE_ENCODER_DSI, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to initialize encoder with drm\n");
+ return ret;
+ }
+
+ drm_encoder_helper_add(encoder, &dw_mipi_dsi_encoder_helper_funcs);
+
+ return 0;
+}
+
+static struct device
+*dw_mipi_dsi_rockchip_find_second(struct dw_mipi_dsi_rockchip *dsi)
+{
+ const struct of_device_id *match;
+ struct device_node *node = NULL, *local;
+
+ match = of_match_device(dsi->dev->driver->of_match_table, dsi->dev);
+
+ local = of_graph_get_remote_node(dsi->dev->of_node, 1, 0);
+ if (!local)
+ return NULL;
+
+ while ((node = of_find_compatible_node(node, NULL,
+ match->compatible))) {
+ struct device_node *remote;
+
+ /* found ourself */
+ if (node == dsi->dev->of_node)
+ continue;
+
+ remote = of_graph_get_remote_node(node, 1, 0);
+ if (!remote)
+ continue;
+
+ /* same display device in port1-ep0 for both */
+ if (remote == local) {
+ struct dw_mipi_dsi_rockchip *dsi2;
+ struct platform_device *pdev;
+
+ pdev = of_find_device_by_node(node);
+
+ /*
+ * we have found the second, so will either return it
+ * or return with an error. In any case won't need the
+ * nodes anymore nor continue the loop.
+ */
+ of_node_put(remote);
+ of_node_put(node);
+ of_node_put(local);
+
+ if (!pdev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ dsi2 = platform_get_drvdata(pdev);
+ if (!dsi2) {
+ platform_device_put(pdev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return &pdev->dev;
+ }
+
+ of_node_put(remote);
+ }
+
+ of_node_put(local);
+
+ return NULL;
+}
+
+static int dw_mipi_dsi_rockchip_bind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct device *second;
+ bool master1, master2;
+ int ret;
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+ if (IS_ERR(second))
+ return PTR_ERR(second);
+
+ if (second) {
+ master1 = of_property_read_bool(dsi->dev->of_node,
+ "clock-master");
+ master2 = of_property_read_bool(second->of_node,
+ "clock-master");
+
+ if (master1 && master2) {
+ DRM_DEV_ERROR(dsi->dev, "only one clock-master allowed\n");
+ return -EINVAL;
+ }
+
+ if (!master1 && !master2) {
+ DRM_DEV_ERROR(dsi->dev, "no clock-master defined\n");
+ return -EINVAL;
+ }
+
+ /* we are the slave in dual-DSI */
+ if (!master1) {
+ dsi->is_slave = true;
+ return 0;
+ }
+
+ dsi->slave = dev_get_drvdata(second);
+ if (!dsi->slave) {
+ DRM_DEV_ERROR(dev, "could not get slaves data\n");
+ return -ENODEV;
+ }
+
+ dsi->slave->is_slave = true;
+ dw_mipi_dsi_set_slave(dsi->dmd, dsi->slave->dmd);
+ put_device(second);
+ }
+
+ ret = clk_prepare_enable(dsi->pllref_clk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to enable pllref_clk: %d\n", ret);
+ return ret;
+ }
+
+ ret = rockchip_dsi_drm_create_encoder(dsi, drm_dev);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to create drm encoder\n");
+ return ret;
+ }
+
+ ret = dw_mipi_dsi_bind(dsi->dmd, &dsi->encoder);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to bind: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_mipi_dsi_rockchip_unbind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct dw_mipi_dsi_rockchip *dsi = dev_get_drvdata(dev);
+
+ if (dsi->is_slave)
+ return;
+
+ dw_mipi_dsi_unbind(dsi->dmd);
+
+ clk_disable_unprepare(dsi->pllref_clk);
+}
+
+static const struct component_ops dw_mipi_dsi_rockchip_ops = {
+ .bind = dw_mipi_dsi_rockchip_bind,
+ .unbind = dw_mipi_dsi_rockchip_unbind,
+};
+
+static int dw_mipi_dsi_rockchip_host_attach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ struct device *second;
+ int ret;
+
+ ret = component_add(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+ if (ret) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n",
+ ret);
+ return ret;
+ }
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+ if (IS_ERR(second))
+ return PTR_ERR(second);
+ if (second) {
+ ret = component_add(second, &dw_mipi_dsi_rockchip_ops);
+ if (ret) {
+ DRM_DEV_ERROR(second,
+ "Failed to register component: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dw_mipi_dsi_rockchip_host_detach(void *priv_data,
+ struct mipi_dsi_device *device)
+{
+ struct dw_mipi_dsi_rockchip *dsi = priv_data;
+ struct device *second;
+
+ second = dw_mipi_dsi_rockchip_find_second(dsi);
+ if (second && !IS_ERR(second))
+ component_del(second, &dw_mipi_dsi_rockchip_ops);
+
+ component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+
+ return 0;
+}
+
+static const struct dw_mipi_dsi_host_ops dw_mipi_dsi_rockchip_host_ops = {
+ .attach = dw_mipi_dsi_rockchip_host_attach,
+ .detach = dw_mipi_dsi_rockchip_host_detach,
+};
+
+static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct dw_mipi_dsi_rockchip *dsi;
+ struct resource *res;
+ const struct rockchip_dw_dsi_chip_data *cdata =
+ of_device_get_match_data(dev);
+ int ret, i;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dsi->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(dsi->base)) {
+ DRM_DEV_ERROR(dev, "Unable to get dsi registers\n");
+ return PTR_ERR(dsi->base);
+ }
+
+ i = 0;
+ while (cdata[i].reg) {
+ if (cdata[i].reg == res->start) {
+ dsi->cdata = &cdata[i];
+ break;
+ }
+
+ i++;
+ }
+
+ if (!dsi->cdata) {
+ dev_err(dev, "no dsi-config for %s node\n", np->name);
+ return -EINVAL;
+ }
+
+ dsi->pllref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(dsi->pllref_clk)) {
+ ret = PTR_ERR(dsi->pllref_clk);
+ DRM_DEV_ERROR(dev,
+ "Unable to get pll reference clock: %d\n", ret);
+ return ret;
+ }
+
+ if (dsi->cdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
+ dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
+ if (IS_ERR(dsi->phy_cfg_clk)) {
+ ret = PTR_ERR(dsi->phy_cfg_clk);
+ DRM_DEV_ERROR(dev,
+ "Unable to get phy_cfg_clk: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (dsi->cdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
+ dsi->grf_clk = devm_clk_get(dev, "grf");
+ if (IS_ERR(dsi->grf_clk)) {
+ ret = PTR_ERR(dsi->grf_clk);
+ DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+ if (IS_ERR(dsi->grf_regmap)) {
+ DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
+ return PTR_ERR(dsi->grf_regmap);
+ }
+
+ dsi->dev = dev;
+ dsi->pdata.base = dsi->base;
+ dsi->pdata.max_data_lanes = dsi->cdata->max_data_lanes;
+ dsi->pdata.phy_ops = &dw_mipi_dsi_rockchip_phy_ops;
+ dsi->pdata.host_ops = &dw_mipi_dsi_rockchip_host_ops;
+ dsi->pdata.priv_data = dsi;
+ platform_set_drvdata(pdev, dsi);
+
+ dsi->dmd = dw_mipi_dsi_probe(pdev, &dsi->pdata);
+ if (IS_ERR(dsi->dmd)) {
+ ret = PTR_ERR(dsi->dmd);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to probe dw_mipi_dsi: %d\n", ret);
+ goto err_clkdisable;
+ }
+
+ return 0;
+
+err_clkdisable:
+ clk_disable_unprepare(dsi->pllref_clk);
+ return ret;
+}
+
+static int dw_mipi_dsi_rockchip_remove(struct platform_device *pdev)
+{
+ struct dw_mipi_dsi_rockchip *dsi = platform_get_drvdata(pdev);
+
+ if (dsi->devcnt == 0)
+ component_del(dsi->dev, &dw_mipi_dsi_rockchip_ops);
+
+ dw_mipi_dsi_remove(dsi->dmd);
+
+ return 0;
+}
+
+static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
+ {
+ .reg = 0xff960000,
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
+
+ .max_data_lanes = 4,
+ },
+ {
+ .reg = 0xff964000,
+ .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
+
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
+static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
+ {
+ .reg = 0xff960000,
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
+ RK3399_DSI0_LCDC_SEL),
+
+ .lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
+ RK3399_DSI0_TURNDISABLE |
+ RK3399_DSI0_FORCETXSTOPMODE |
+ RK3399_DSI0_FORCERXMODE),
+
+ .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
+ .max_data_lanes = 4,
+ },
+ {
+ .reg = 0xff968000,
+ .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
+ .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
+ .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
+ RK3399_DSI1_LCDC_SEL),
+
+ .lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
+ .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
+ RK3399_DSI1_FORCETXSTOPMODE |
+ RK3399_DSI1_FORCERXMODE |
+ RK3399_DSI1_ENABLE),
+
+ .lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
+ .lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
+ RK3399_TXRX_ENABLECLK,
+ RK3399_TXRX_MASTERSLAVEZ |
+ RK3399_TXRX_ENABLECLK |
+ RK3399_TXRX_BASEDIR),
+
+ .enable_grf_reg = RK3399_GRF_SOC_CON23,
+ .enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
+
+ .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
+ {
+ .compatible = "rockchip,rk3288-mipi-dsi",
+ .data = &rk3288_chip_data,
+ }, {
+ .compatible = "rockchip,rk3399-mipi-dsi",
+ .data = &rk3399_chip_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dw_mipi_dsi_rockchip_dt_ids);
+
+struct platform_driver dw_mipi_dsi_rockchip_driver = {
+ .probe = dw_mipi_dsi_rockchip_probe,
+ .remove = dw_mipi_dsi_rockchip_remove,
+ .driver = {
+ .of_match_table = dw_mipi_dsi_rockchip_dt_ids,
+ .name = "dw-mipi-dsi-rockchip",
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
deleted file mode 100644
index 662b6cb5d3f0..000000000000
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ /dev/null
@@ -1,1349 +0,0 @@
-/*
- * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/iopoll.h>
-#include <linux/math64.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/regmap.h>
-#include <linux/reset.h>
-#include <linux/mfd/syscon.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
-#include <drm/drmP.h>
-#include <video/mipi_display.h>
-
-#include "rockchip_drm_drv.h"
-#include "rockchip_drm_vop.h"
-
-#define DRIVER_NAME "dw-mipi-dsi"
-
-#define RK3288_GRF_SOC_CON6 0x025c
-#define RK3288_DSI0_SEL_VOP_LIT BIT(6)
-#define RK3288_DSI1_SEL_VOP_LIT BIT(9)
-
-#define RK3399_GRF_SOC_CON20 0x6250
-#define RK3399_DSI0_SEL_VOP_LIT BIT(0)
-#define RK3399_DSI1_SEL_VOP_LIT BIT(4)
-
-/* disable turnrequest, turndisable, forcetxstopmode, forcerxmode */
-#define RK3399_GRF_SOC_CON22 0x6258
-#define RK3399_GRF_DSI_MODE 0xffff0000
-
-#define DSI_VERSION 0x00
-#define DSI_PWR_UP 0x04
-#define RESET 0
-#define POWERUP BIT(0)
-
-#define DSI_CLKMGR_CFG 0x08
-#define TO_CLK_DIVIDSION(div) (((div) & 0xff) << 8)
-#define TX_ESC_CLK_DIVIDSION(div) (((div) & 0xff) << 0)
-
-#define DSI_DPI_VCID 0x0c
-#define DPI_VID(vid) (((vid) & 0x3) << 0)
-
-#define DSI_DPI_COLOR_CODING 0x10
-#define EN18_LOOSELY BIT(8)
-#define DPI_COLOR_CODING_16BIT_1 0x0
-#define DPI_COLOR_CODING_16BIT_2 0x1
-#define DPI_COLOR_CODING_16BIT_3 0x2
-#define DPI_COLOR_CODING_18BIT_1 0x3
-#define DPI_COLOR_CODING_18BIT_2 0x4
-#define DPI_COLOR_CODING_24BIT 0x5
-
-#define DSI_DPI_CFG_POL 0x14
-#define COLORM_ACTIVE_LOW BIT(4)
-#define SHUTD_ACTIVE_LOW BIT(3)
-#define HSYNC_ACTIVE_LOW BIT(2)
-#define VSYNC_ACTIVE_LOW BIT(1)
-#define DATAEN_ACTIVE_LOW BIT(0)
-
-#define DSI_DPI_LP_CMD_TIM 0x18
-#define OUTVACT_LPCMD_TIME(p) (((p) & 0xff) << 16)
-#define INVACT_LPCMD_TIME(p) ((p) & 0xff)
-
-#define DSI_DBI_CFG 0x20
-#define DSI_DBI_CMDSIZE 0x28
-
-#define DSI_PCKHDL_CFG 0x2c
-#define EN_CRC_RX BIT(4)
-#define EN_ECC_RX BIT(3)
-#define EN_BTA BIT(2)
-#define EN_EOTP_RX BIT(1)
-#define EN_EOTP_TX BIT(0)
-
-#define DSI_MODE_CFG 0x34
-#define ENABLE_VIDEO_MODE 0
-#define ENABLE_CMD_MODE BIT(0)
-
-#define DSI_VID_MODE_CFG 0x38
-#define FRAME_BTA_ACK BIT(14)
-#define ENABLE_LOW_POWER (0x3f << 8)
-#define ENABLE_LOW_POWER_MASK (0x3f << 8)
-#define VID_MODE_TYPE_NON_BURST_SYNC_PULSES 0x0
-#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
-#define VID_MODE_TYPE_BURST 0x2
-#define VID_MODE_TYPE_MASK 0x3
-
-#define DSI_VID_PKT_SIZE 0x3c
-#define VID_PKT_SIZE(p) (((p) & 0x3fff) << 0)
-#define VID_PKT_MAX_SIZE 0x3fff
-
-#define DSI_VID_HSA_TIME 0x48
-#define DSI_VID_HBP_TIME 0x4c
-#define DSI_VID_HLINE_TIME 0x50
-#define DSI_VID_VSA_LINES 0x54
-#define DSI_VID_VBP_LINES 0x58
-#define DSI_VID_VFP_LINES 0x5c
-#define DSI_VID_VACTIVE_LINES 0x60
-#define DSI_CMD_MODE_CFG 0x68
-#define MAX_RD_PKT_SIZE_LP BIT(24)
-#define DCS_LW_TX_LP BIT(19)
-#define DCS_SR_0P_TX_LP BIT(18)
-#define DCS_SW_1P_TX_LP BIT(17)
-#define DCS_SW_0P_TX_LP BIT(16)
-#define GEN_LW_TX_LP BIT(14)
-#define GEN_SR_2P_TX_LP BIT(13)
-#define GEN_SR_1P_TX_LP BIT(12)
-#define GEN_SR_0P_TX_LP BIT(11)
-#define GEN_SW_2P_TX_LP BIT(10)
-#define GEN_SW_1P_TX_LP BIT(9)
-#define GEN_SW_0P_TX_LP BIT(8)
-#define EN_ACK_RQST BIT(1)
-#define EN_TEAR_FX BIT(0)
-
-#define CMD_MODE_ALL_LP (MAX_RD_PKT_SIZE_LP | \
- DCS_LW_TX_LP | \
- DCS_SR_0P_TX_LP | \
- DCS_SW_1P_TX_LP | \
- DCS_SW_0P_TX_LP | \
- GEN_LW_TX_LP | \
- GEN_SR_2P_TX_LP | \
- GEN_SR_1P_TX_LP | \
- GEN_SR_0P_TX_LP | \
- GEN_SW_2P_TX_LP | \
- GEN_SW_1P_TX_LP | \
- GEN_SW_0P_TX_LP)
-
-#define DSI_GEN_HDR 0x6c
-#define GEN_HDATA(data) (((data) & 0xffff) << 8)
-#define GEN_HDATA_MASK (0xffff << 8)
-#define GEN_HTYPE(type) (((type) & 0xff) << 0)
-#define GEN_HTYPE_MASK 0xff
-
-#define DSI_GEN_PLD_DATA 0x70
-
-#define DSI_CMD_PKT_STATUS 0x74
-#define GEN_CMD_EMPTY BIT(0)
-#define GEN_CMD_FULL BIT(1)
-#define GEN_PLD_W_EMPTY BIT(2)
-#define GEN_PLD_W_FULL BIT(3)
-#define GEN_PLD_R_EMPTY BIT(4)
-#define GEN_PLD_R_FULL BIT(5)
-#define GEN_RD_CMD_BUSY BIT(6)
-
-#define DSI_TO_CNT_CFG 0x78
-#define HSTX_TO_CNT(p) (((p) & 0xffff) << 16)
-#define LPRX_TO_CNT(p) ((p) & 0xffff)
-
-#define DSI_BTA_TO_CNT 0x8c
-#define DSI_LPCLK_CTRL 0x94
-#define AUTO_CLKLANE_CTRL BIT(1)
-#define PHY_TXREQUESTCLKHS BIT(0)
-
-#define DSI_PHY_TMR_LPCLK_CFG 0x98
-#define PHY_CLKHS2LP_TIME(lbcc) (((lbcc) & 0x3ff) << 16)
-#define PHY_CLKLP2HS_TIME(lbcc) ((lbcc) & 0x3ff)
-
-#define DSI_PHY_TMR_CFG 0x9c
-#define PHY_HS2LP_TIME(lbcc) (((lbcc) & 0xff) << 24)
-#define PHY_LP2HS_TIME(lbcc) (((lbcc) & 0xff) << 16)
-#define MAX_RD_TIME(lbcc) ((lbcc) & 0x7fff)
-
-#define DSI_PHY_RSTZ 0xa0
-#define PHY_DISFORCEPLL 0
-#define PHY_ENFORCEPLL BIT(3)
-#define PHY_DISABLECLK 0
-#define PHY_ENABLECLK BIT(2)
-#define PHY_RSTZ 0
-#define PHY_UNRSTZ BIT(1)
-#define PHY_SHUTDOWNZ 0
-#define PHY_UNSHUTDOWNZ BIT(0)
-
-#define DSI_PHY_IF_CFG 0xa4
-#define N_LANES(n) ((((n) - 1) & 0x3) << 0)
-#define PHY_STOP_WAIT_TIME(cycle) (((cycle) & 0xff) << 8)
-
-#define DSI_PHY_STATUS 0xb0
-#define LOCK BIT(0)
-#define STOP_STATE_CLK_LANE BIT(2)
-
-#define DSI_PHY_TST_CTRL0 0xb4
-#define PHY_TESTCLK BIT(1)
-#define PHY_UNTESTCLK 0
-#define PHY_TESTCLR BIT(0)
-#define PHY_UNTESTCLR 0
-
-#define DSI_PHY_TST_CTRL1 0xb8
-#define PHY_TESTEN BIT(16)
-#define PHY_UNTESTEN 0
-#define PHY_TESTDOUT(n) (((n) & 0xff) << 8)
-#define PHY_TESTDIN(n) (((n) & 0xff) << 0)
-
-#define DSI_INT_ST0 0xbc
-#define DSI_INT_ST1 0xc0
-#define DSI_INT_MSK0 0xc4
-#define DSI_INT_MSK1 0xc8
-
-#define PHY_STATUS_TIMEOUT_US 10000
-#define CMD_PKT_STATUS_TIMEOUT_US 20000
-
-#define BYPASS_VCO_RANGE BIT(7)
-#define VCO_RANGE_CON_SEL(val) (((val) & 0x7) << 3)
-#define VCO_IN_CAP_CON_DEFAULT (0x0 << 1)
-#define VCO_IN_CAP_CON_LOW (0x1 << 1)
-#define VCO_IN_CAP_CON_HIGH (0x2 << 1)
-#define REF_BIAS_CUR_SEL BIT(0)
-
-#define CP_CURRENT_3MA BIT(3)
-#define CP_PROGRAM_EN BIT(7)
-#define LPF_PROGRAM_EN BIT(6)
-#define LPF_RESISTORS_20_KOHM 0
-
-#define HSFREQRANGE_SEL(val) (((val) & 0x3f) << 1)
-
-#define INPUT_DIVIDER(val) (((val) - 1) & 0x7f)
-#define LOW_PROGRAM_EN 0
-#define HIGH_PROGRAM_EN BIT(7)
-#define LOOP_DIV_LOW_SEL(val) (((val) - 1) & 0x1f)
-#define LOOP_DIV_HIGH_SEL(val) ((((val) - 1) >> 5) & 0x1f)
-#define PLL_LOOP_DIV_EN BIT(5)
-#define PLL_INPUT_DIV_EN BIT(4)
-
-#define POWER_CONTROL BIT(6)
-#define INTERNAL_REG_CURRENT BIT(3)
-#define BIAS_BLOCK_ON BIT(2)
-#define BANDGAP_ON BIT(0)
-
-#define TER_RESISTOR_HIGH BIT(7)
-#define TER_RESISTOR_LOW 0
-#define LEVEL_SHIFTERS_ON BIT(6)
-#define TER_CAL_DONE BIT(5)
-#define SETRD_MAX (0x7 << 2)
-#define POWER_MANAGE BIT(1)
-#define TER_RESISTORS_ON BIT(0)
-
-#define BIASEXTR_SEL(val) ((val) & 0x7)
-#define BANDGAP_SEL(val) ((val) & 0x7)
-#define TLP_PROGRAM_EN BIT(7)
-#define THS_PRE_PROGRAM_EN BIT(7)
-#define THS_ZERO_PROGRAM_EN BIT(6)
-
-#define DW_MIPI_NEEDS_PHY_CFG_CLK BIT(0)
-#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
-
-enum {
- BANDGAP_97_07,
- BANDGAP_98_05,
- BANDGAP_99_02,
- BANDGAP_100_00,
- BANDGAP_93_17,
- BANDGAP_94_15,
- BANDGAP_95_12,
- BANDGAP_96_10,
-};
-
-enum {
- BIASEXTR_87_1,
- BIASEXTR_91_5,
- BIASEXTR_95_9,
- BIASEXTR_100,
- BIASEXTR_105_94,
- BIASEXTR_111_88,
- BIASEXTR_118_8,
- BIASEXTR_127_7,
-};
-
-struct dw_mipi_dsi_plat_data {
- u32 dsi0_en_bit;
- u32 dsi1_en_bit;
- u32 grf_switch_reg;
- u32 grf_dsi0_mode;
- u32 grf_dsi0_mode_reg;
- unsigned int flags;
- unsigned int max_data_lanes;
-};
-
-struct dw_mipi_dsi {
- struct drm_encoder encoder;
- struct drm_connector connector;
- struct mipi_dsi_host dsi_host;
- struct drm_panel *panel;
- struct device *dev;
- struct regmap *grf_regmap;
- void __iomem *base;
-
- struct clk *grf_clk;
- struct clk *pllref_clk;
- struct clk *pclk;
- struct clk *phy_cfg_clk;
-
- int dpms_mode;
- unsigned int lane_mbps; /* per lane */
- u32 channel;
- u32 lanes;
- u32 format;
- u16 input_div;
- u16 feedback_div;
- unsigned long mode_flags;
-
- const struct dw_mipi_dsi_plat_data *pdata;
-};
-
-enum dw_mipi_dsi_mode {
- DW_MIPI_DSI_CMD_MODE,
- DW_MIPI_DSI_VID_MODE,
-};
-
-struct dphy_pll_testdin_map {
- unsigned int max_mbps;
- u8 testdin;
-};
-
-/* The table is based on 27MHz DPHY pll reference clock. */
-static const struct dphy_pll_testdin_map dptdin_map[] = {
- { 90, 0x00}, { 100, 0x10}, { 110, 0x20}, { 130, 0x01},
- { 140, 0x11}, { 150, 0x21}, { 170, 0x02}, { 180, 0x12},
- { 200, 0x22}, { 220, 0x03}, { 240, 0x13}, { 250, 0x23},
- { 270, 0x04}, { 300, 0x14}, { 330, 0x05}, { 360, 0x15},
- { 400, 0x25}, { 450, 0x06}, { 500, 0x16}, { 550, 0x07},
- { 600, 0x17}, { 650, 0x08}, { 700, 0x18}, { 750, 0x09},
- { 800, 0x19}, { 850, 0x29}, { 900, 0x39}, { 950, 0x0a},
- {1000, 0x1a}, {1050, 0x2a}, {1100, 0x3a}, {1150, 0x0b},
- {1200, 0x1b}, {1250, 0x2b}, {1300, 0x3b}, {1350, 0x0c},
- {1400, 0x1c}, {1450, 0x2c}, {1500, 0x3c}
-};
-
-static int max_mbps_to_testdin(unsigned int max_mbps)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dptdin_map); i++)
- if (dptdin_map[i].max_mbps > max_mbps)
- return dptdin_map[i].testdin;
-
- return -EINVAL;
-}
-
-/*
- * The controller should generate 2 frames before
- * preparing the peripheral.
- */
-static void dw_mipi_dsi_wait_for_two_frames(struct drm_display_mode *mode)
-{
- int refresh, two_frames;
-
- refresh = drm_mode_vrefresh(mode);
- two_frames = DIV_ROUND_UP(MSEC_PER_SEC, refresh) * 2;
- msleep(two_frames);
-}
-
-static inline struct dw_mipi_dsi *host_to_dsi(struct mipi_dsi_host *host)
-{
- return container_of(host, struct dw_mipi_dsi, dsi_host);
-}
-
-static inline struct dw_mipi_dsi *con_to_dsi(struct drm_connector *con)
-{
- return container_of(con, struct dw_mipi_dsi, connector);
-}
-
-static inline struct dw_mipi_dsi *encoder_to_dsi(struct drm_encoder *encoder)
-{
- return container_of(encoder, struct dw_mipi_dsi, encoder);
-}
-
-static inline void dsi_write(struct dw_mipi_dsi *dsi, u32 reg, u32 val)
-{
- writel(val, dsi->base + reg);
-}
-
-static inline u32 dsi_read(struct dw_mipi_dsi *dsi, u32 reg)
-{
- return readl(dsi->base + reg);
-}
-
-static void dw_mipi_dsi_phy_write(struct dw_mipi_dsi *dsi, u8 test_code,
- u8 test_data)
-{
- /*
- * With the falling edge on TESTCLK, the TESTDIN[7:0] signal content
- * is latched internally as the current test code. Test data is
- * programmed internally by rising edge on TESTCLK.
- */
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
-
- dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_TESTEN | PHY_TESTDOUT(0) |
- PHY_TESTDIN(test_code));
-
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLK | PHY_UNTESTCLR);
-
- dsi_write(dsi, DSI_PHY_TST_CTRL1, PHY_UNTESTEN | PHY_TESTDOUT(0) |
- PHY_TESTDIN(test_data));
-
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
-}
-
-/**
- * ns2bc - Nanoseconds to byte clock cycles
- */
-static inline unsigned int ns2bc(struct dw_mipi_dsi *dsi, int ns)
-{
- return DIV_ROUND_UP(ns * dsi->lane_mbps / 8, 1000);
-}
-
-/**
- * ns2ui - Nanoseconds to UI time periods
- */
-static inline unsigned int ns2ui(struct dw_mipi_dsi *dsi, int ns)
-{
- return DIV_ROUND_UP(ns * dsi->lane_mbps, 1000);
-}
-
-static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
-{
- int ret, testdin, vco, val;
-
- vco = (dsi->lane_mbps < 200) ? 0 : (dsi->lane_mbps + 100) / 200;
-
- testdin = max_mbps_to_testdin(dsi->lane_mbps);
- if (testdin < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get testdin for %dmbps lane clock\n",
- dsi->lane_mbps);
- return testdin;
- }
-
- /* Start by clearing PHY state */
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLR);
- dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_UNTESTCLR);
-
- ret = clk_prepare_enable(dsi->phy_cfg_clk);
- if (ret) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable phy_cfg_clk\n");
- return ret;
- }
-
- dw_mipi_dsi_phy_write(dsi, 0x10, BYPASS_VCO_RANGE |
- VCO_RANGE_CON_SEL(vco) |
- VCO_IN_CAP_CON_LOW |
- REF_BIAS_CUR_SEL);
-
- dw_mipi_dsi_phy_write(dsi, 0x11, CP_CURRENT_3MA);
- dw_mipi_dsi_phy_write(dsi, 0x12, CP_PROGRAM_EN | LPF_PROGRAM_EN |
- LPF_RESISTORS_20_KOHM);
-
- dw_mipi_dsi_phy_write(dsi, 0x44, HSFREQRANGE_SEL(testdin));
-
- dw_mipi_dsi_phy_write(dsi, 0x17, INPUT_DIVIDER(dsi->input_div));
- dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_LOW_SEL(dsi->feedback_div) |
- LOW_PROGRAM_EN);
- dw_mipi_dsi_phy_write(dsi, 0x18, LOOP_DIV_HIGH_SEL(dsi->feedback_div) |
- HIGH_PROGRAM_EN);
- dw_mipi_dsi_phy_write(dsi, 0x19, PLL_LOOP_DIV_EN | PLL_INPUT_DIV_EN);
-
- dw_mipi_dsi_phy_write(dsi, 0x22, LOW_PROGRAM_EN |
- BIASEXTR_SEL(BIASEXTR_127_7));
- dw_mipi_dsi_phy_write(dsi, 0x22, HIGH_PROGRAM_EN |
- BANDGAP_SEL(BANDGAP_96_10));
-
- dw_mipi_dsi_phy_write(dsi, 0x20, POWER_CONTROL | INTERNAL_REG_CURRENT |
- BIAS_BLOCK_ON | BANDGAP_ON);
-
- dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_LOW | TER_CAL_DONE |
- SETRD_MAX | TER_RESISTORS_ON);
- dw_mipi_dsi_phy_write(dsi, 0x21, TER_RESISTOR_HIGH | LEVEL_SHIFTERS_ON |
- SETRD_MAX | POWER_MANAGE |
- TER_RESISTORS_ON);
-
- dw_mipi_dsi_phy_write(dsi, 0x60, TLP_PROGRAM_EN | ns2bc(dsi, 500));
- dw_mipi_dsi_phy_write(dsi, 0x61, THS_PRE_PROGRAM_EN | ns2ui(dsi, 40));
- dw_mipi_dsi_phy_write(dsi, 0x62, THS_ZERO_PROGRAM_EN | ns2bc(dsi, 300));
- dw_mipi_dsi_phy_write(dsi, 0x63, THS_PRE_PROGRAM_EN | ns2ui(dsi, 100));
- dw_mipi_dsi_phy_write(dsi, 0x64, BIT(5) | ns2bc(dsi, 100));
- dw_mipi_dsi_phy_write(dsi, 0x65, BIT(5) | (ns2bc(dsi, 60) + 7));
-
- dw_mipi_dsi_phy_write(dsi, 0x70, TLP_PROGRAM_EN | ns2bc(dsi, 500));
- dw_mipi_dsi_phy_write(dsi, 0x71,
- THS_PRE_PROGRAM_EN | (ns2ui(dsi, 50) + 5));
- dw_mipi_dsi_phy_write(dsi, 0x72,
- THS_ZERO_PROGRAM_EN | (ns2bc(dsi, 140) + 2));
- dw_mipi_dsi_phy_write(dsi, 0x73,
- THS_PRE_PROGRAM_EN | (ns2ui(dsi, 60) + 8));
- dw_mipi_dsi_phy_write(dsi, 0x74, BIT(5) | ns2bc(dsi, 100));
-
- dsi_write(dsi, DSI_PHY_RSTZ, PHY_ENFORCEPLL | PHY_ENABLECLK |
- PHY_UNRSTZ | PHY_UNSHUTDOWNZ);
-
- ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & LOCK, 1000, PHY_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "failed to wait for phy lock state\n");
- goto phy_init_end;
- }
-
- ret = readl_poll_timeout(dsi->base + DSI_PHY_STATUS,
- val, val & STOP_STATE_CLK_LANE, 1000,
- PHY_STATUS_TIMEOUT_US);
- if (ret < 0)
- DRM_DEV_ERROR(dsi->dev,
- "failed to wait for phy clk lane stop state\n");
-
-phy_init_end:
- clk_disable_unprepare(dsi->phy_cfg_clk);
-
- return ret;
-}
-
-static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- unsigned int i, pre;
- unsigned long mpclk, pllref, tmp;
- unsigned int m = 1, n = 1, target_mbps = 1000;
- unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
- int bpp;
-
- bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- if (bpp < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get bpp for pixel format %d\n",
- dsi->format);
- return bpp;
- }
-
- mpclk = DIV_ROUND_UP(mode->clock, MSEC_PER_SEC);
- if (mpclk) {
- /* take 1 / 0.8, since mbps must big than bandwidth of RGB */
- tmp = mpclk * (bpp / dsi->lanes) * 10 / 8;
- if (tmp < max_mbps)
- target_mbps = tmp;
- else
- DRM_DEV_ERROR(dsi->dev,
- "DPHY clock frequency is out of range\n");
- }
-
- pllref = DIV_ROUND_UP(clk_get_rate(dsi->pllref_clk), USEC_PER_SEC);
- tmp = pllref;
-
- /*
- * The limits on the PLL divisor are:
- *
- * 5MHz <= (pllref / n) <= 40MHz
- *
- * we walk over these values in descreasing order so that if we hit
- * an exact match for target_mbps it is more likely that "m" will be
- * even.
- *
- * TODO: ensure that "m" is even after this loop.
- */
- for (i = pllref / 5; i > (pllref / 40); i--) {
- pre = pllref / i;
- if ((tmp > (target_mbps % pre)) && (target_mbps / pre < 512)) {
- tmp = target_mbps % pre;
- n = i;
- m = target_mbps / pre;
- }
- if (tmp == 0)
- break;
- }
-
- dsi->lane_mbps = pllref / n * m;
- dsi->input_div = n;
- dsi->feedback_div = m;
-
- return 0;
-}
-
-static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct dw_mipi_dsi *dsi = host_to_dsi(host);
-
- if (device->lanes > dsi->pdata->max_data_lanes) {
- DRM_DEV_ERROR(dsi->dev,
- "the number of data lanes(%u) is too many\n",
- device->lanes);
- return -EINVAL;
- }
-
- dsi->lanes = device->lanes;
- dsi->channel = device->channel;
- dsi->format = device->format;
- dsi->mode_flags = device->mode_flags;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (!IS_ERR(dsi->panel))
- return drm_panel_attach(dsi->panel, &dsi->connector);
-
- return -EINVAL;
-}
-
-static int dw_mipi_dsi_host_detach(struct mipi_dsi_host *host,
- struct mipi_dsi_device *device)
-{
- struct dw_mipi_dsi *dsi = host_to_dsi(host);
-
- drm_panel_detach(dsi->panel);
-
- return 0;
-}
-
-static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
- u32 val = 0;
-
- if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
- val |= EN_ACK_RQST;
- if (lpm)
- val |= CMD_MODE_ALL_LP;
-
- dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
- dsi_write(dsi, DSI_CMD_MODE_CFG, val);
-}
-
-static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
-{
- int ret;
- u32 val, mask;
-
- ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
- val, !(val & GEN_CMD_FULL), 1000,
- CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get available command FIFO\n");
- return ret;
- }
-
- dsi_write(dsi, DSI_GEN_HDR, hdr_val);
-
- mask = GEN_CMD_EMPTY | GEN_PLD_W_EMPTY;
- ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
- val, (val & mask) == mask,
- 1000, CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "failed to write command FIFO\n");
- return ret;
- }
-
- return 0;
-}
-
-static int dw_mipi_dsi_dcs_short_write(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- const u8 *tx_buf = msg->tx_buf;
- u16 data = 0;
- u32 val;
-
- if (msg->tx_len > 0)
- data |= tx_buf[0];
- if (msg->tx_len > 1)
- data |= tx_buf[1] << 8;
-
- if (msg->tx_len > 2) {
- DRM_DEV_ERROR(dsi->dev,
- "too long tx buf length %zu for short write\n",
- msg->tx_len);
- return -EINVAL;
- }
-
- val = GEN_HDATA(data) | GEN_HTYPE(msg->type);
- return dw_mipi_dsi_gen_pkt_hdr_write(dsi, val);
-}
-
-static int dw_mipi_dsi_dcs_long_write(struct dw_mipi_dsi *dsi,
- const struct mipi_dsi_msg *msg)
-{
- const u8 *tx_buf = msg->tx_buf;
- int len = msg->tx_len, pld_data_bytes = sizeof(u32), ret;
- u32 hdr_val = GEN_HDATA(msg->tx_len) | GEN_HTYPE(msg->type);
- u32 remainder;
- u32 val;
-
- if (msg->tx_len < 3) {
- DRM_DEV_ERROR(dsi->dev,
- "wrong tx buf length %zu for long write\n",
- msg->tx_len);
- return -EINVAL;
- }
-
- while (DIV_ROUND_UP(len, pld_data_bytes)) {
- if (len < pld_data_bytes) {
- remainder = 0;
- memcpy(&remainder, tx_buf, len);
- dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
- len = 0;
- } else {
- memcpy(&remainder, tx_buf, pld_data_bytes);
- dsi_write(dsi, DSI_GEN_PLD_DATA, remainder);
- tx_buf += pld_data_bytes;
- len -= pld_data_bytes;
- }
-
- ret = readl_poll_timeout(dsi->base + DSI_CMD_PKT_STATUS,
- val, !(val & GEN_PLD_W_FULL), 1000,
- CMD_PKT_STATUS_TIMEOUT_US);
- if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev,
- "failed to get available write payload FIFO\n");
- return ret;
- }
- }
-
- return dw_mipi_dsi_gen_pkt_hdr_write(dsi, hdr_val);
-}
-
-static ssize_t dw_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
- const struct mipi_dsi_msg *msg)
-{
- struct dw_mipi_dsi *dsi = host_to_dsi(host);
- int ret;
-
- dw_mipi_message_config(dsi, msg);
-
- switch (msg->type) {
- case MIPI_DSI_DCS_SHORT_WRITE:
- case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
- ret = dw_mipi_dsi_dcs_short_write(dsi, msg);
- break;
- case MIPI_DSI_DCS_LONG_WRITE:
- ret = dw_mipi_dsi_dcs_long_write(dsi, msg);
- break;
- default:
- DRM_DEV_ERROR(dsi->dev, "unsupported message type 0x%02x\n",
- msg->type);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-static const struct mipi_dsi_host_ops dw_mipi_dsi_host_ops = {
- .attach = dw_mipi_dsi_host_attach,
- .detach = dw_mipi_dsi_host_detach,
- .transfer = dw_mipi_dsi_host_transfer,
-};
-
-static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
-{
- u32 val;
-
- val = ENABLE_LOW_POWER;
-
- if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- val |= VID_MODE_TYPE_BURST;
- else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- val |= VID_MODE_TYPE_NON_BURST_SYNC_PULSES;
- else
- val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
-
- dsi_write(dsi, DSI_VID_MODE_CFG, val);
-}
-
-static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
- enum dw_mipi_dsi_mode mode)
-{
- if (mode == DW_MIPI_DSI_CMD_MODE) {
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
- dsi_write(dsi, DSI_PWR_UP, POWERUP);
- } else {
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
- dw_mipi_dsi_video_mode_config(dsi);
- dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
- dsi_write(dsi, DSI_PWR_UP, POWERUP);
- }
-}
-
-static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_PHY_RSTZ, PHY_RSTZ);
-}
-
-static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
-{
- /*
- * The maximum permitted escape clock is 20MHz and it is derived from
- * lanebyteclk, which is running at "lane_mbps / 8". Thus we want:
- *
- * (lane_mbps >> 3) / esc_clk_division < 20
- * which is:
- * (lane_mbps >> 3) / 20 > esc_clk_division
- */
- u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1;
-
- dsi_write(dsi, DSI_PWR_UP, RESET);
- dsi_write(dsi, DSI_PHY_RSTZ, PHY_DISFORCEPLL | PHY_DISABLECLK
- | PHY_RSTZ | PHY_SHUTDOWNZ);
- dsi_write(dsi, DSI_CLKMGR_CFG, TO_CLK_DIVIDSION(10) |
- TX_ESC_CLK_DIVIDSION(esc_clk_division));
-}
-
-static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- u32 val = 0, color = 0;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- color = DPI_COLOR_CODING_24BIT;
- break;
- case MIPI_DSI_FMT_RGB666:
- color = DPI_COLOR_CODING_18BIT_2 | EN18_LOOSELY;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- color = DPI_COLOR_CODING_18BIT_1;
- break;
- case MIPI_DSI_FMT_RGB565:
- color = DPI_COLOR_CODING_16BIT_1;
- break;
- }
-
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- val |= VSYNC_ACTIVE_LOW;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- val |= HSYNC_ACTIVE_LOW;
-
- dsi_write(dsi, DSI_DPI_VCID, DPI_VID(dsi->channel));
- dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
- dsi_write(dsi, DSI_DPI_CFG_POL, val);
- dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
- | INVACT_LPCMD_TIME(4));
-}
-
-static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PCKHDL_CFG, EN_CRC_RX | EN_ECC_RX | EN_BTA);
-}
-
-static void dw_mipi_dsi_video_packet_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- dsi_write(dsi, DSI_VID_PKT_SIZE, VID_PKT_SIZE(mode->hdisplay));
-}
-
-static void dw_mipi_dsi_command_mode_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_TO_CNT_CFG, HSTX_TO_CNT(1000) | LPRX_TO_CNT(1000));
- dsi_write(dsi, DSI_BTA_TO_CNT, 0xd00);
- dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
-}
-
-/* Get lane byte clock cycles. */
-static u32 dw_mipi_dsi_get_hcomponent_lbcc(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode,
- u32 hcomponent)
-{
- u32 frac, lbcc;
-
- lbcc = hcomponent * dsi->lane_mbps * MSEC_PER_SEC / 8;
-
- frac = lbcc % mode->clock;
- lbcc = lbcc / mode->clock;
- if (frac)
- lbcc++;
-
- return lbcc;
-}
-
-static void dw_mipi_dsi_line_timer_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- u32 htotal, hsa, hbp, lbcc;
-
- htotal = mode->htotal;
- hsa = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
-
- lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, htotal);
- dsi_write(dsi, DSI_VID_HLINE_TIME, lbcc);
-
- lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hsa);
- dsi_write(dsi, DSI_VID_HSA_TIME, lbcc);
-
- lbcc = dw_mipi_dsi_get_hcomponent_lbcc(dsi, mode, hbp);
- dsi_write(dsi, DSI_VID_HBP_TIME, lbcc);
-}
-
-static void dw_mipi_dsi_vertical_timing_config(struct dw_mipi_dsi *dsi,
- struct drm_display_mode *mode)
-{
- u32 vactive, vsa, vfp, vbp;
-
- vactive = mode->vdisplay;
- vsa = mode->vsync_end - mode->vsync_start;
- vfp = mode->vsync_start - mode->vdisplay;
- vbp = mode->vtotal - mode->vsync_end;
-
- dsi_write(dsi, DSI_VID_VACTIVE_LINES, vactive);
- dsi_write(dsi, DSI_VID_VSA_LINES, vsa);
- dsi_write(dsi, DSI_VID_VFP_LINES, vfp);
- dsi_write(dsi, DSI_VID_VBP_LINES, vbp);
-}
-
-static void dw_mipi_dsi_dphy_timing_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PHY_TMR_CFG, PHY_HS2LP_TIME(0x40)
- | PHY_LP2HS_TIME(0x40) | MAX_RD_TIME(10000));
-
- dsi_write(dsi, DSI_PHY_TMR_LPCLK_CFG, PHY_CLKHS2LP_TIME(0x40)
- | PHY_CLKLP2HS_TIME(0x40));
-}
-
-static void dw_mipi_dsi_dphy_interface_config(struct dw_mipi_dsi *dsi)
-{
- dsi_write(dsi, DSI_PHY_IF_CFG, PHY_STOP_WAIT_TIME(0x20) |
- N_LANES(dsi->lanes));
-}
-
-static void dw_mipi_dsi_clear_err(struct dw_mipi_dsi *dsi)
-{
- dsi_read(dsi, DSI_INT_ST0);
- dsi_read(dsi, DSI_INT_ST1);
- dsi_write(dsi, DSI_INT_MSK0, 0);
- dsi_write(dsi, DSI_INT_MSK1, 0);
-}
-
-static void dw_mipi_dsi_encoder_disable(struct drm_encoder *encoder)
-{
- struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-
- if (dsi->dpms_mode != DRM_MODE_DPMS_ON)
- return;
-
- if (clk_prepare_enable(dsi->pclk)) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
- return;
- }
-
- drm_panel_disable(dsi->panel);
-
- dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
- drm_panel_unprepare(dsi->panel);
-
- dw_mipi_dsi_disable(dsi);
- pm_runtime_put(dsi->dev);
- clk_disable_unprepare(dsi->pclk);
- dsi->dpms_mode = DRM_MODE_DPMS_OFF;
-}
-
-static void dw_mipi_dsi_encoder_enable(struct drm_encoder *encoder)
-{
- struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- const struct dw_mipi_dsi_plat_data *pdata = dsi->pdata;
- int mux = drm_of_encoder_active_endpoint_id(dsi->dev->of_node, encoder);
- u32 val;
- int ret;
-
- ret = dw_mipi_dsi_get_lane_bps(dsi, mode);
- if (ret < 0)
- return;
-
- if (dsi->dpms_mode == DRM_MODE_DPMS_ON)
- return;
-
- if (clk_prepare_enable(dsi->pclk)) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable pclk\n");
- return;
- }
-
- pm_runtime_get_sync(dsi->dev);
- dw_mipi_dsi_init(dsi);
- dw_mipi_dsi_dpi_config(dsi, mode);
- dw_mipi_dsi_packet_handler_config(dsi);
- dw_mipi_dsi_video_mode_config(dsi);
- dw_mipi_dsi_video_packet_config(dsi, mode);
- dw_mipi_dsi_command_mode_config(dsi);
- dw_mipi_dsi_line_timer_config(dsi, mode);
- dw_mipi_dsi_vertical_timing_config(dsi, mode);
- dw_mipi_dsi_dphy_timing_config(dsi);
- dw_mipi_dsi_dphy_interface_config(dsi);
- dw_mipi_dsi_clear_err(dsi);
-
- /*
- * For the RK3399, the clk of grf must be enabled before writing grf
- * register. And for RK3288 or other soc, this grf_clk must be NULL,
- * the clk_prepare_enable return true directly.
- */
- ret = clk_prepare_enable(dsi->grf_clk);
- if (ret) {
- DRM_DEV_ERROR(dsi->dev, "Failed to enable grf_clk: %d\n", ret);
- return;
- }
-
- if (pdata->grf_dsi0_mode_reg)
- regmap_write(dsi->grf_regmap, pdata->grf_dsi0_mode_reg,
- pdata->grf_dsi0_mode);
-
- dw_mipi_dsi_phy_init(dsi);
- dw_mipi_dsi_wait_for_two_frames(mode);
-
- dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_CMD_MODE);
- if (drm_panel_prepare(dsi->panel))
- DRM_DEV_ERROR(dsi->dev, "failed to prepare panel\n");
-
- dw_mipi_dsi_set_mode(dsi, DW_MIPI_DSI_VID_MODE);
- drm_panel_enable(dsi->panel);
-
- clk_disable_unprepare(dsi->pclk);
-
- if (mux)
- val = pdata->dsi0_en_bit | (pdata->dsi0_en_bit << 16);
- else
- val = pdata->dsi0_en_bit << 16;
-
- regmap_write(dsi->grf_regmap, pdata->grf_switch_reg, val);
- DRM_DEV_DEBUG(dsi->dev,
- "vop %s output to dsi0\n", (mux) ? "LIT" : "BIG");
- dsi->dpms_mode = DRM_MODE_DPMS_ON;
-
- clk_disable_unprepare(dsi->grf_clk);
-}
-
-static int
-dw_mipi_dsi_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
- struct dw_mipi_dsi *dsi = encoder_to_dsi(encoder);
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB888:
- s->output_mode = ROCKCHIP_OUT_MODE_P888;
- break;
- case MIPI_DSI_FMT_RGB666:
- s->output_mode = ROCKCHIP_OUT_MODE_P666;
- break;
- case MIPI_DSI_FMT_RGB565:
- s->output_mode = ROCKCHIP_OUT_MODE_P565;
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- s->output_type = DRM_MODE_CONNECTOR_DSI;
-
- return 0;
-}
-
-static const struct drm_encoder_helper_funcs
-dw_mipi_dsi_encoder_helper_funcs = {
- .enable = dw_mipi_dsi_encoder_enable,
- .disable = dw_mipi_dsi_encoder_disable,
- .atomic_check = dw_mipi_dsi_encoder_atomic_check,
-};
-
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-static int dw_mipi_dsi_connector_get_modes(struct drm_connector *connector)
-{
- struct dw_mipi_dsi *dsi = con_to_dsi(connector);
-
- return drm_panel_get_modes(dsi->panel);
-}
-
-static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
- .get_modes = dw_mipi_dsi_connector_get_modes,
-};
-
-static void dw_mipi_dsi_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_funcs dw_mipi_dsi_atomic_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = dw_mipi_dsi_drm_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dw_mipi_dsi_register(struct drm_device *drm,
- struct dw_mipi_dsi *dsi)
-{
- struct drm_encoder *encoder = &dsi->encoder;
- struct drm_connector *connector = &dsi->connector;
- struct device *dev = dsi->dev;
- int ret;
-
- encoder->possible_crtcs = drm_of_find_possible_crtcs(drm,
- dev->of_node);
- /*
- * If we failed to find the CRTC(s) which this encoder is
- * supposed to be connected to, it's because the CRTC has
- * not been registered yet. Defer probing, and hope that
- * the required CRTC is added later.
- */
- if (encoder->possible_crtcs == 0)
- return -EPROBE_DEFER;
-
- drm_encoder_helper_add(&dsi->encoder,
- &dw_mipi_dsi_encoder_helper_funcs);
- ret = drm_encoder_init(drm, &dsi->encoder, &dw_mipi_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to initialize encoder with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(connector,
- &dw_mipi_dsi_connector_helper_funcs);
-
- drm_connector_init(drm, &dsi->connector,
- &dw_mipi_dsi_atomic_connector_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
- drm_connector_attach_encoder(connector, encoder);
-
- return 0;
-}
-
-static int rockchip_mipi_parse_dt(struct dw_mipi_dsi *dsi)
-{
- struct device_node *np = dsi->dev->of_node;
-
- dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
- if (IS_ERR(dsi->grf_regmap)) {
- DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n");
- return PTR_ERR(dsi->grf_regmap);
- }
-
- return 0;
-}
-
-static struct dw_mipi_dsi_plat_data rk3288_mipi_dsi_drv_data = {
- .dsi0_en_bit = RK3288_DSI0_SEL_VOP_LIT,
- .dsi1_en_bit = RK3288_DSI1_SEL_VOP_LIT,
- .grf_switch_reg = RK3288_GRF_SOC_CON6,
- .max_data_lanes = 4,
-};
-
-static struct dw_mipi_dsi_plat_data rk3399_mipi_dsi_drv_data = {
- .dsi0_en_bit = RK3399_DSI0_SEL_VOP_LIT,
- .dsi1_en_bit = RK3399_DSI1_SEL_VOP_LIT,
- .grf_switch_reg = RK3399_GRF_SOC_CON20,
- .grf_dsi0_mode = RK3399_GRF_DSI_MODE,
- .grf_dsi0_mode_reg = RK3399_GRF_SOC_CON22,
- .flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
- .max_data_lanes = 4,
-};
-
-static const struct of_device_id dw_mipi_dsi_dt_ids[] = {
- {
- .compatible = "rockchip,rk3288-mipi-dsi",
- .data = &rk3288_mipi_dsi_drv_data,
- }, {
- .compatible = "rockchip,rk3399-mipi-dsi",
- .data = &rk3399_mipi_dsi_drv_data,
- },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, dw_mipi_dsi_dt_ids);
-
-static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
- void *data)
-{
- const struct of_device_id *of_id =
- of_match_device(dw_mipi_dsi_dt_ids, dev);
- const struct dw_mipi_dsi_plat_data *pdata = of_id->data;
- struct platform_device *pdev = to_platform_device(dev);
- struct reset_control *apb_rst;
- struct drm_device *drm = data;
- struct dw_mipi_dsi *dsi;
- struct resource *res;
- int ret;
-
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
-
- dsi->dev = dev;
- dsi->pdata = pdata;
- dsi->dpms_mode = DRM_MODE_DPMS_OFF;
-
- ret = rockchip_mipi_parse_dt(dsi);
- if (ret)
- return ret;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(dsi->base))
- return PTR_ERR(dsi->base);
-
- dsi->pllref_clk = devm_clk_get(dev, "ref");
- if (IS_ERR(dsi->pllref_clk)) {
- ret = PTR_ERR(dsi->pllref_clk);
- DRM_DEV_ERROR(dev,
- "Unable to get pll reference clock: %d\n", ret);
- return ret;
- }
-
- dsi->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(dsi->pclk)) {
- ret = PTR_ERR(dsi->pclk);
- DRM_DEV_ERROR(dev, "Unable to get pclk: %d\n", ret);
- return ret;
- }
-
- /*
- * Note that the reset was not defined in the initial device tree, so
- * we have to be prepared for it not being found.
- */
- apb_rst = devm_reset_control_get(dev, "apb");
- if (IS_ERR(apb_rst)) {
- ret = PTR_ERR(apb_rst);
- if (ret == -ENOENT) {
- apb_rst = NULL;
- } else {
- DRM_DEV_ERROR(dev,
- "Unable to get reset control: %d\n", ret);
- return ret;
- }
- }
-
- if (apb_rst) {
- ret = clk_prepare_enable(dsi->pclk);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to enable pclk\n");
- return ret;
- }
-
- reset_control_assert(apb_rst);
- usleep_range(10, 20);
- reset_control_deassert(apb_rst);
-
- clk_disable_unprepare(dsi->pclk);
- }
-
- if (pdata->flags & DW_MIPI_NEEDS_PHY_CFG_CLK) {
- dsi->phy_cfg_clk = devm_clk_get(dev, "phy_cfg");
- if (IS_ERR(dsi->phy_cfg_clk)) {
- ret = PTR_ERR(dsi->phy_cfg_clk);
- DRM_DEV_ERROR(dev,
- "Unable to get phy_cfg_clk: %d\n", ret);
- return ret;
- }
- }
-
- if (pdata->flags & DW_MIPI_NEEDS_GRF_CLK) {
- dsi->grf_clk = devm_clk_get(dev, "grf");
- if (IS_ERR(dsi->grf_clk)) {
- ret = PTR_ERR(dsi->grf_clk);
- DRM_DEV_ERROR(dev, "Unable to get grf_clk: %d\n", ret);
- return ret;
- }
- }
-
- ret = clk_prepare_enable(dsi->pllref_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to enable pllref_clk\n");
- return ret;
- }
-
- ret = dw_mipi_dsi_register(drm, dsi);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to register mipi_dsi: %d\n", ret);
- goto err_pllref;
- }
-
- dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
- dsi->dsi_host.dev = dev;
- ret = mipi_dsi_host_register(&dsi->dsi_host);
- if (ret) {
- DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
- goto err_cleanup;
- }
-
- if (!dsi->panel) {
- ret = -EPROBE_DEFER;
- goto err_mipi_dsi_host;
- }
-
- dev_set_drvdata(dev, dsi);
- pm_runtime_enable(dev);
- return 0;
-
-err_mipi_dsi_host:
- mipi_dsi_host_unregister(&dsi->dsi_host);
-err_cleanup:
- dsi->connector.funcs->destroy(&dsi->connector);
- dsi->encoder.funcs->destroy(&dsi->encoder);
-err_pllref:
- clk_disable_unprepare(dsi->pllref_clk);
- return ret;
-}
-
-static void dw_mipi_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct dw_mipi_dsi *dsi = dev_get_drvdata(dev);
-
- mipi_dsi_host_unregister(&dsi->dsi_host);
- pm_runtime_disable(dev);
-
- dsi->connector.funcs->destroy(&dsi->connector);
- dsi->encoder.funcs->destroy(&dsi->encoder);
-
- clk_disable_unprepare(dsi->pllref_clk);
-}
-
-static const struct component_ops dw_mipi_dsi_ops = {
- .bind = dw_mipi_dsi_bind,
- .unbind = dw_mipi_dsi_unbind,
-};
-
-static int dw_mipi_dsi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &dw_mipi_dsi_ops);
-}
-
-static int dw_mipi_dsi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &dw_mipi_dsi_ops);
- return 0;
-}
-
-struct platform_driver dw_mipi_dsi_driver = {
- .probe = dw_mipi_dsi_probe,
- .remove = dw_mipi_dsi_remove,
- .driver = {
- .of_match_table = dw_mipi_dsi_dt_ids,
- .name = DRIVER_NAME,
- },
-};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 11309a2a4e43..89c63cfde5c8 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -11,6 +11,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <drm/drm_of.h>
@@ -24,6 +25,24 @@
#define RK3288_GRF_SOC_CON6 0x025C
#define RK3288_HDMI_LCDC_SEL BIT(4)
+#define RK3328_GRF_SOC_CON2 0x0408
+
+#define RK3328_HDMI_SDAIN_MSK BIT(11)
+#define RK3328_HDMI_SCLIN_MSK BIT(10)
+#define RK3328_HDMI_HPD_IOE BIT(2)
+#define RK3328_GRF_SOC_CON3 0x040c
+/* need to be unset if hdmi or i2c should control voltage */
+#define RK3328_HDMI_SDA5V_GRF BIT(15)
+#define RK3328_HDMI_SCL5V_GRF BIT(14)
+#define RK3328_HDMI_HPD5V_GRF BIT(13)
+#define RK3328_HDMI_CEC5V_GRF BIT(12)
+#define RK3328_GRF_SOC_CON4 0x0410
+#define RK3328_HDMI_HPD_SARADC BIT(13)
+#define RK3328_HDMI_CEC_5V BIT(11)
+#define RK3328_HDMI_SDA_5V BIT(10)
+#define RK3328_HDMI_SCL_5V BIT(9)
+#define RK3328_HDMI_HPD_5V BIT(8)
+
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_HDMI_LCDC_SEL BIT(6)
@@ -36,7 +55,7 @@
* @lcdsel_lit: reg value of selecting vop little for HDMI
*/
struct rockchip_hdmi_chip_data {
- u32 lcdsel_grf_reg;
+ int lcdsel_grf_reg;
u32 lcdsel_big;
u32 lcdsel_lit;
};
@@ -49,6 +68,7 @@ struct rockchip_hdmi {
struct clk *vpll_clk;
struct clk *grf_clk;
struct dw_hdmi *hdmi;
+ struct phy *phy;
};
#define to_rockchip_hdmi(x) container_of(x, struct rockchip_hdmi, x)
@@ -245,6 +265,9 @@ static void dw_hdmi_rockchip_encoder_enable(struct drm_encoder *encoder)
u32 val;
int ret;
+ if (hdmi->chip_data->lcdsel_grf_reg < 0)
+ return;
+
ret = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
if (ret)
val = hdmi->chip_data->lcdsel_lit;
@@ -287,6 +310,66 @@ static const struct drm_encoder_helper_funcs dw_hdmi_rockchip_encoder_helper_fun
.atomic_check = dw_hdmi_rockchip_encoder_atomic_check,
};
+static int dw_hdmi_rockchip_genphy_init(struct dw_hdmi *dw_hdmi, void *data,
+ struct drm_display_mode *mode)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+ return phy_power_on(hdmi->phy);
+}
+
+static void dw_hdmi_rockchip_genphy_disable(struct dw_hdmi *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+ phy_power_off(hdmi->phy);
+}
+
+static enum drm_connector_status
+dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+ enum drm_connector_status status;
+
+ status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
+
+ if (status == connector_status_connected)
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON4,
+ HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
+ RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
+ else
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON4,
+ HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
+ RK3328_HDMI_SCL_5V));
+ return status;
+}
+
+static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
+{
+ struct rockchip_hdmi *hdmi = (struct rockchip_hdmi *)data;
+
+ dw_hdmi_phy_setup_hpd(dw_hdmi, data);
+
+ /* Enable and map pins to 3V grf-controlled io-voltage */
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON4,
+ HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
+ RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
+ RK3328_HDMI_HPD_5V));
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON3,
+ HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
+ RK3328_HDMI_HPD5V_GRF |
+ RK3328_HDMI_CEC5V_GRF));
+ regmap_write(hdmi->regmap,
+ RK3328_GRF_SOC_CON2,
+ HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
+ RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
+ RK3328_HDMI_HPD_IOE));
+}
+
static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
.lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
@@ -301,6 +384,29 @@ static const struct dw_hdmi_plat_data rk3288_hdmi_drv_data = {
.phy_data = &rk3288_chip_data,
};
+static const struct dw_hdmi_phy_ops rk3328_hdmi_phy_ops = {
+ .init = dw_hdmi_rockchip_genphy_init,
+ .disable = dw_hdmi_rockchip_genphy_disable,
+ .read_hpd = dw_hdmi_rk3328_read_hpd,
+ .update_hpd = dw_hdmi_phy_update_hpd,
+ .setup_hpd = dw_hdmi_rk3328_setup_hpd,
+};
+
+static struct rockchip_hdmi_chip_data rk3328_chip_data = {
+ .lcdsel_grf_reg = -1,
+};
+
+static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
+ .mode_valid = dw_hdmi_rockchip_mode_valid,
+ .mpll_cfg = rockchip_mpll_cfg,
+ .cur_ctr = rockchip_cur_ctr,
+ .phy_config = rockchip_phy_config,
+ .phy_data = &rk3328_chip_data,
+ .phy_ops = &rk3328_hdmi_phy_ops,
+ .phy_name = "inno_dw_hdmi_phy2",
+ .phy_force_vendor = true,
+};
+
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
.lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
@@ -319,6 +425,9 @@ static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = {
{ .compatible = "rockchip,rk3288-dw-hdmi",
.data = &rk3288_hdmi_drv_data
},
+ { .compatible = "rockchip,rk3328-dw-hdmi",
+ .data = &rk3328_hdmi_drv_data
+ },
{ .compatible = "rockchip,rk3399-dw-hdmi",
.data = &rk3399_hdmi_drv_data
},
@@ -330,7 +439,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
- const struct dw_hdmi_plat_data *plat_data;
+ struct dw_hdmi_plat_data *plat_data;
const struct of_device_id *match;
struct drm_device *drm = data;
struct drm_encoder *encoder;
@@ -345,9 +454,14 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return -ENOMEM;
match = of_match_node(dw_hdmi_rockchip_dt_ids, pdev->dev.of_node);
- plat_data = match->data;
+ plat_data = devm_kmemdup(&pdev->dev, match->data,
+ sizeof(*plat_data), GFP_KERNEL);
+ if (!plat_data)
+ return -ENOMEM;
+
hdmi->dev = &pdev->dev;
hdmi->chip_data = plat_data->phy_data;
+ plat_data->phy_data = hdmi;
encoder = &hdmi->encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -373,6 +487,14 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ hdmi->phy = devm_phy_optional_get(dev, "hdmi");
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(hdmi->dev, "failed to get phy\n");
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 941f35233b1f..be6c2573039a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,11 +448,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
-static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
-{
- rockchip_drm_platform_remove(pdev);
-}
-
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -462,7 +457,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
- .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
@@ -489,7 +483,7 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
- ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_driver,
+ ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 21a023a97bb8..ce48568ec8a0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -37,6 +37,7 @@ struct rockchip_crtc_state {
int output_type;
int output_mode;
int output_bpc;
+ int output_flags;
};
#define to_rockchip_crtc_state(s) \
container_of(s, struct rockchip_crtc_state, base)
@@ -67,7 +68,7 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
-extern struct platform_driver dw_mipi_dsi_driver;
+extern struct platform_driver dw_mipi_dsi_rockchip_driver;
extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
index 79d00d861a31..01ff3c858875 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_psr.c
@@ -189,12 +189,14 @@ EXPORT_SYMBOL(rockchip_drm_psr_flush_all);
int rockchip_drm_psr_register(struct drm_encoder *encoder,
int (*psr_set)(struct drm_encoder *, bool enable))
{
- struct rockchip_drm_private *drm_drv = encoder->dev->dev_private;
+ struct rockchip_drm_private *drm_drv;
struct psr_drv *psr;
if (!encoder || !psr_set)
return -EINVAL;
+ drm_drv = encoder->dev->dev_private;
+
psr = kzalloc(sizeof(struct psr_drv), GFP_KERNEL);
if (!psr)
return -ENOMEM;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0c35a88e33dd..fb70fb486fbf 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -916,6 +916,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ?
BIT(VSYNC_POSITIVE) : 0;
VOP_REG_SET(vop, output, pin_pol, pin_pol);
+ VOP_REG_SET(vop, output, mipi_dual_channel_en, 0);
switch (s->output_type) {
case DRM_MODE_CONNECTOR_LVDS:
@@ -933,6 +934,8 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
case DRM_MODE_CONNECTOR_DSI:
VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol);
VOP_REG_SET(vop, output, mipi_en, 1);
+ VOP_REG_SET(vop, output, mipi_dual_channel_en,
+ !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL));
break;
case DRM_MODE_CONNECTOR_DisplayPort:
pin_pol &= ~BIT(DCLK_INVERT);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index fd5765dfd637..0fe40e1983d9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -60,6 +60,7 @@ struct vop_output {
struct vop_reg edp_en;
struct vop_reg hdmi_en;
struct vop_reg mipi_en;
+ struct vop_reg mipi_dual_channel_en;
struct vop_reg rgb_en;
};
@@ -214,6 +215,9 @@ struct vop_data {
/* for use special outface */
#define ROCKCHIP_OUT_MODE_AAAA 15
+/* output flags */
+#define ROCKCHIP_OUTPUT_DSI_DUAL BIT(0)
+
enum alpha_mode {
ALPHA_STRAIGHT,
ALPHA_INVERSE,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index a6db3cd5544b..08fc40af52c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -361,7 +361,11 @@ static const struct vop_win_data rk3188_vop_win_data[] = {
};
static const int rk3188_vop_intrs[] = {
- 0,
+ /*
+ * hs_start interrupt fires at frame-start, so serves
+ * the same purpose as dsp_hold in the driver.
+ */
+ DSP_HOLD_VALID_INTR,
FS_INTR,
LINE_FLAG_INTR,
BUS_ERROR_INTR,
@@ -630,6 +634,7 @@ static const struct vop_output rk3399_output = {
.hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13),
.edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14),
.mipi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 15),
+ .mipi_dual_channel_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 3),
};
static const struct vop_data rk3399_vop_big = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3e22a54a99c2..4463d3826ecb 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -130,7 +130,14 @@ drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
int i;
for (i = 0; i < entity->num_rq_list; ++i) {
- num_jobs = atomic_read(&entity->rq_list[i]->sched->num_jobs);
+ struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
+
+ if (!entity->rq_list[i]->sched->ready) {
+ DRM_WARN("sched%s is not ready, skipping", sched->name);
+ continue;
+ }
+
+ num_jobs = atomic_read(&sched->num_jobs);
if (num_jobs < min_jobs) {
min_jobs = num_jobs;
rq = entity->rq_list[i];
@@ -204,7 +211,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
drm_sched_fence_finished(job->s_fence);
WARN_ON(job->s_fence->parent);
- dma_fence_put(&job->s_fence->finished);
job->sched->ops->free_job(job);
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 44fe587aaef9..dbb69063b3d5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -60,6 +60,8 @@
static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job);
+
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -196,6 +198,75 @@ static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
+/**
+ * drm_sched_fault - immediately start timeout handler
+ *
+ * @sched: scheduler where the timeout handling should be started.
+ *
+ * Start timeout handling immediately when the driver detects a hardware fault.
+ */
+void drm_sched_fault(struct drm_gpu_scheduler *sched)
+{
+ mod_delayed_work(system_wq, &sched->work_tdr, 0);
+}
+EXPORT_SYMBOL(drm_sched_fault);
+
+/**
+ * drm_sched_suspend_timeout - Suspend scheduler job timeout
+ *
+ * @sched: scheduler instance for which to suspend the timeout
+ *
+ * Suspend the delayed work timeout for the scheduler. This is done by
+ * modifying the delayed work timeout to an arbitrary large value,
+ * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
+ * called from an IRQ context.
+ *
+ * Returns the timeout remaining
+ *
+ */
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
+{
+ unsigned long sched_timeout, now = jiffies;
+
+ sched_timeout = sched->work_tdr.timer.expires;
+
+ /*
+ * Modify the timeout to an arbitrarily large value. This also prevents
+ * the timeout to be restarted when new submissions arrive
+ */
+ if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
+ && time_after(sched_timeout, now))
+ return sched_timeout - now;
+ else
+ return sched->timeout;
+}
+EXPORT_SYMBOL(drm_sched_suspend_timeout);
+
+/**
+ * drm_sched_resume_timeout - Resume scheduler job timeout
+ *
+ * @sched: scheduler instance for which to resume the timeout
+ * @remaining: remaining timeout
+ *
+ * Resume the delayed work timeout for the scheduler. Note that
+ * this function can be called from an IRQ context.
+ */
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+
+ if (list_empty(&sched->ring_mirror_list))
+ cancel_delayed_work(&sched->work_tdr);
+ else
+ mod_delayed_work(system_wq, &sched->work_tdr, remaining);
+
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
+}
+EXPORT_SYMBOL(drm_sched_resume_timeout);
+
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@@ -203,6 +274,7 @@ static void drm_sched_job_finish(struct work_struct *work)
struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
/*
* Canceling the timeout without removing our job from the ring mirror
@@ -213,14 +285,13 @@ static void drm_sched_job_finish(struct work_struct *work)
*/
cancel_delayed_work_sync(&sched->work_tdr);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
/* remove job from ring_mirror_list */
- list_del(&s_job->node);
+ list_del_init(&s_job->node);
/* queue TDR for next job */
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
- dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -235,55 +306,33 @@ static void drm_sched_job_finish_cb(struct dma_fence *f,
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
+ unsigned long flags;
dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
drm_sched_job_finish_cb);
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- int r;
+ unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
-
- spin_lock(&sched->job_list_lock);
- list_for_each_entry_reverse(job, &sched->ring_mirror_list, node) {
- struct drm_sched_fence *fence = job->s_fence;
-
- if (!dma_fence_remove_callback(fence->parent, &fence->cb))
- goto already_signaled;
- }
-
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
- spin_unlock(&sched->job_list_lock);
if (job)
- sched->ops->timedout_job(job);
-
- spin_lock(&sched->job_list_lock);
- list_for_each_entry(job, &sched->ring_mirror_list, node) {
- struct drm_sched_fence *fence = job->s_fence;
+ job->sched->ops->timedout_job(job);
- if (!fence->parent || !list_empty(&fence->cb.node))
- continue;
-
- r = dma_fence_add_callback(fence->parent, &fence->cb,
- drm_sched_process_job);
- if (r)
- drm_sched_process_job(fence->parent, &fence->cb);
-
-already_signaled:
- ;
- }
- spin_unlock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
+ drm_sched_start_timeout(sched);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
/**
@@ -297,9 +346,10 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
+ unsigned long flags;
int i;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
if (s_job->s_fence->parent &&
dma_fence_remove_callback(s_job->s_fence->parent,
@@ -309,7 +359,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
atomic_dec(&sched->hw_rq_count);
}
}
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
atomic_inc(&bad->karma);
@@ -347,9 +397,10 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *s_job, *tmp;
bool found_guilty = false;
+ unsigned long flags;
int r;
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@@ -363,7 +414,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
dma_fence_set_error(&s_fence->finished, -ECANCELED);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
fence = sched->ops->run_job(s_job);
atomic_inc(&sched->hw_rq_count);
@@ -378,12 +429,14 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
r);
dma_fence_put(fence);
} else {
+ if (s_fence->finished.error < 0)
+ drm_sched_expel_job_unlocked(s_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
- spin_lock(&sched->job_list_lock);
+ spin_lock_irqsave(&sched->job_list_lock, flags);
}
drm_sched_start_timeout(sched);
- spin_unlock(&sched->job_list_lock);
+ spin_unlock_irqrestore(&sched->job_list_lock, flags);
}
EXPORT_SYMBOL(drm_sched_job_recovery);
@@ -406,6 +459,9 @@ int drm_sched_job_init(struct drm_sched_job *job,
struct drm_gpu_scheduler *sched;
drm_sched_entity_select_rq(entity);
+ if (!entity->rq)
+ return -ENOENT;
+
sched = entity->rq->sched;
job->sched = sched;
@@ -424,6 +480,18 @@ int drm_sched_job_init(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_init);
/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
+ *
+ * @job: scheduler job to clean up
+ */
+void drm_sched_job_cleanup(struct drm_sched_job *job)
+{
+ dma_fence_put(&job->s_fence->finished);
+ job->s_fence = NULL;
+}
+EXPORT_SYMBOL(drm_sched_job_cleanup);
+
+/**
* drm_sched_ready - is the scheduler ready
*
* @sched: scheduler instance
@@ -567,6 +635,8 @@ static int drm_sched_main(void *param)
r);
dma_fence_put(fence);
} else {
+ if (s_fence->finished.error < 0)
+ drm_sched_expel_job_unlocked(sched_job);
drm_sched_process_job(NULL, &s_fence->cb);
}
@@ -575,6 +645,15 @@ static int drm_sched_main(void *param)
return 0;
}
+static void drm_sched_expel_job_unlocked(struct drm_sched_job *s_job)
+{
+ struct drm_gpu_scheduler *sched = s_job->sched;
+
+ spin_lock(&sched->job_list_lock);
+ list_del_init(&s_job->node);
+ spin_unlock(&sched->job_list_lock);
+}
+
/**
* drm_sched_init - Init a gpu scheduler instance
*
@@ -594,7 +673,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
long timeout,
const char *name)
{
- int i;
+ int i, ret;
sched->ops = ops;
sched->hw_submission_limit = hw_submission;
sched->name = name;
@@ -615,10 +694,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(drm_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
+ ret = PTR_ERR(sched->thread);
+ sched->thread = NULL;
DRM_ERROR("Failed to create scheduler for %s.\n", name);
- return PTR_ERR(sched->thread);
+ return ret;
}
+ sched->ready = true;
return 0;
}
EXPORT_SYMBOL(drm_sched_init);
@@ -634,5 +716,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
{
if (sched->thread)
kthread_stop(sched->thread);
+
+ sched->ready = false;
}
EXPORT_SYMBOL(drm_sched_fini);
diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile
index 9fc349fa18e9..1bb73dc4c88c 100644
--- a/drivers/gpu/drm/selftests/Makefile
+++ b/drivers/gpu/drm/selftests/Makefile
@@ -1 +1,5 @@
-obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm-helper.o
+test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \
+ test-drm_format.o test-drm_framebuffer.o \
+ test-drm_damage_helper.o
+
+obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o
diff --git a/drivers/gpu/drm/selftests/drm_helper_selftests.h b/drivers/gpu/drm/selftests/drm_helper_selftests.h
deleted file mode 100644
index 9771290ed228..000000000000
--- a/drivers/gpu/drm/selftests/drm_helper_selftests.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* List each unit test as selftest(name, function)
- *
- * The name is used as both an enum and expanded as igt__name to create
- * a module parameter. It must be unique and legal for a C identifier.
- *
- * Tests are executed in order by igt/drm_selftests_helper
- */
-selftest(check_plane_state, igt_check_plane_state)
diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
new file mode 100644
index 000000000000..464753746013
--- /dev/null
+++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as igt__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * Tests are executed in order by igt/drm_selftests_helper
+ */
+selftest(check_plane_state, igt_check_plane_state)
+selftest(check_drm_format_block_width, igt_check_drm_format_block_width)
+selftest(check_drm_format_block_height, igt_check_drm_format_block_height)
+selftest(check_drm_format_min_pitch, igt_check_drm_format_min_pitch)
+selftest(check_drm_framebuffer_create, igt_check_drm_framebuffer_create)
+selftest(damage_iter_no_damage, igt_damage_iter_no_damage)
+selftest(damage_iter_no_damage_fractional_src, igt_damage_iter_no_damage_fractional_src)
+selftest(damage_iter_no_damage_src_moved, igt_damage_iter_no_damage_src_moved)
+selftest(damage_iter_no_damage_fractional_src_moved, igt_damage_iter_no_damage_fractional_src_moved)
+selftest(damage_iter_no_damage_not_visible, igt_damage_iter_no_damage_not_visible)
+selftest(damage_iter_no_damage_no_crtc, igt_damage_iter_no_damage_no_crtc)
+selftest(damage_iter_no_damage_no_fb, igt_damage_iter_no_damage_no_fb)
+selftest(damage_iter_simple_damage, igt_damage_iter_simple_damage)
+selftest(damage_iter_single_damage, igt_damage_iter_single_damage)
+selftest(damage_iter_single_damage_intersect_src, igt_damage_iter_single_damage_intersect_src)
+selftest(damage_iter_single_damage_outside_src, igt_damage_iter_single_damage_outside_src)
+selftest(damage_iter_single_damage_fractional_src, igt_damage_iter_single_damage_fractional_src)
+selftest(damage_iter_single_damage_intersect_fractional_src, igt_damage_iter_single_damage_intersect_fractional_src)
+selftest(damage_iter_single_damage_outside_fractional_src, igt_damage_iter_single_damage_outside_fractional_src)
+selftest(damage_iter_single_damage_src_moved, igt_damage_iter_single_damage_src_moved)
+selftest(damage_iter_single_damage_fractional_src_moved, igt_damage_iter_single_damage_fractional_src_moved)
+selftest(damage_iter_damage, igt_damage_iter_damage)
+selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect)
+selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside)
+selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved)
+selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible)
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
new file mode 100644
index 000000000000..9d2bcdf8bc29
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ */
+
+#define pr_fmt(fmt) "drm_damage_helper: " fmt
+
+#include <drm/drm_damage_helper.h>
+
+#include "test-drm_modeset_common.h"
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, uint32_t size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static bool check_damage_clip(struct drm_plane_state *state, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state->src.x1 >> 16;
+ int src_y1 = state->src.y1 >> 16;
+ int src_x2 = (state->src.x2 >> 16) + !!(state->src.x2 & 0xFFFF);
+ int src_y2 = (state->src.y2 >> 16) + !!(state->src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2) {
+ pr_err("Cannot have damage clip with no dimension.\n");
+ return false;
+ }
+
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2) {
+ pr_err("Damage cannot be outside rounded plane src.\n");
+ return false;
+ }
+
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2) {
+ pr_err("Damage = %d %d %d %d\n", r->x1, r->y1, r->x2, r->y2);
+ return false;
+ }
+
+ return true;
+}
+
+int igt_damage_iter_no_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src same as fb size. */
+ set_plane_src(&old_state, 0, 0, fb.width << 16, fb.height << 16);
+ set_plane_src(&state, 0, 0, fb.width << 16, fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 2048, 2048));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_crtc(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = 0,
+ .fb = &fb,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_no_damage_no_fb(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = 0,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_simple_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 0, 0, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 768, 576));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 256, 192, 1024, 768));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 256, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage clipped to rounded off src.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 4, 1029, 330));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src has fractional part. */
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should have no damage.");
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 10, 10, 1034, 778));
+
+ return 0;
+}
+
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return rounded off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+
+ return 0;
+}
+
+int igt_damage_iter_damage(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_intersect(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ FAIL_ON(!check_damage_clip(&state, &clip, 20, 30, 200, 180));
+ if (num_hits == 1)
+ FAIL_ON(!check_damage_clip(&state, &clip, 4, 4, 1029, 773));
+ num_hits++;
+ }
+
+ FAIL(num_hits != 2, "Should return damage when set.");
+
+ return 0;
+}
+
+int igt_damage_iter_damage_one_outside(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return damage when set.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 240, 200, 280, 250));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_src_moved(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = true,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 1, "Should return round off plane src as damage.");
+ FAIL_ON(!check_damage_clip(&state, &clip, 3, 3, 1028, 772));
+
+ return 0;
+}
+
+int igt_damage_iter_damage_not_visible(void *ignored)
+{
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_plane_state old_state;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ uint32_t num_hits = 0;
+
+ struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+
+ struct drm_plane_state state = {
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .visible = false,
+ };
+
+ set_plane_src(&old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &old_state, &state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ FAIL(num_hits != 0, "Should not return any damage.");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_format.c b/drivers/gpu/drm/selftests/test-drm_format.c
new file mode 100644
index 000000000000..c5e212afa27a
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_format.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_format functions
+ */
+
+#define pr_fmt(fmt) "drm_format: " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "test-drm_modeset_common.h"
+
+int igt_check_drm_format_block_width(void *ignored)
+{
+ const struct drm_format_info *info = NULL;
+
+ /* Test invalid arguments */
+ FAIL_ON(drm_format_info_block_width(info, 0) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+
+ /* Test 1 plane format */
+ info = drm_format_info(DRM_FORMAT_XRGB4444);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ /* Test 2 planes format */
+ info = drm_format_info(DRM_FORMAT_NV12);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 2) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ /* Test 3 planes format */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 2) != 1);
+ FAIL_ON(drm_format_info_block_width(info, 3) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ /* Test a tiled format */
+ info = drm_format_info(DRM_FORMAT_X0L0);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_width(info, 0) != 2);
+ FAIL_ON(drm_format_info_block_width(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_width(info, -1) != 0);
+
+ return 0;
+}
+
+int igt_check_drm_format_block_height(void *ignored)
+{
+ const struct drm_format_info *info = NULL;
+
+ /* Test invalid arguments */
+ FAIL_ON(drm_format_info_block_height(info, 0) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+
+ /* Test 1 plane format */
+ info = drm_format_info(DRM_FORMAT_XRGB4444);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ /* Test 2 planes format */
+ info = drm_format_info(DRM_FORMAT_NV12);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 2) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ /* Test 3 planes format */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 2) != 1);
+ FAIL_ON(drm_format_info_block_height(info, 3) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ /* Test a tiled format */
+ info = drm_format_info(DRM_FORMAT_X0L0);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_block_height(info, 0) != 2);
+ FAIL_ON(drm_format_info_block_height(info, 1) != 0);
+ FAIL_ON(drm_format_info_block_height(info, -1) != 0);
+
+ return 0;
+}
+
+int igt_check_drm_format_min_pitch(void *ignored)
+{
+ const struct drm_format_info *info = NULL;
+
+ /* Test invalid arguments */
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ /* Test 1 plane 8 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_RGB332);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+ (uint64_t)(UINT_MAX - 1));
+
+ /* Test 1 plane 16 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_XRGB4444);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+ (uint64_t)(UINT_MAX - 1) * 2);
+
+ /* Test 1 plane 24 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_RGB888);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 3);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 6);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 3072);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 5760);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 12288);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2013);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 3);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+ (uint64_t)(UINT_MAX - 1) * 3);
+
+ /* Test 1 plane 32 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_ABGR8888);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 8);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 2560);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 7680);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 16384);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 2684);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+ (uint64_t)(UINT_MAX - 1) * 4);
+
+ /* Test 2 planes format */
+ info = drm_format_info(DRM_FORMAT_NV12);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 672);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
+ (uint64_t)UINT_MAX + 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)) !=
+ (uint64_t)(UINT_MAX - 1));
+ FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1));
+
+ /* Test 3 planes 8 bits per pixel format */
+ info = drm_format_info(DRM_FORMAT_YUV422);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 3, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 1) != 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 2) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 640);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 320) != 320);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 320) != 320);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 1024);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 512) != 512);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 512) != 512);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 1920);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 960) != 960);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 960) != 960);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 4096);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 2048) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 2048) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 671);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 336) != 336);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, 336) != 336);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1) !=
+ (uint64_t)UINT_MAX / 2 + 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1) !=
+ (uint64_t)UINT_MAX / 2 + 1);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1) / 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1) / 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2) !=
+ (uint64_t)(UINT_MAX - 1) / 2);
+
+ /* Test tiled format */
+ info = drm_format_info(DRM_FORMAT_X0L2);
+ FAIL_ON(!info);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, -1, 0) != 0);
+ FAIL_ON(drm_format_info_min_pitch(info, 1, 0) != 0);
+
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1) != 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 2) != 4);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 640) != 1280);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1024) != 2048);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 1920) != 3840);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 4096) != 8192);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, 671) != 1342);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX) !=
+ (uint64_t)UINT_MAX * 2);
+ FAIL_ON(drm_format_info_min_pitch(info, 0, UINT_MAX - 1) !=
+ (uint64_t)(UINT_MAX - 1) * 2);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
new file mode 100644
index 000000000000..a04d02dacce2
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_framebuffer functions
+ */
+
+#include <drm/drmP.h>
+#include "../drm_crtc_internal.h"
+
+#include "test-drm_modeset_common.h"
+
+#define MIN_WIDTH 4
+#define MAX_WIDTH 4096
+#define MIN_HEIGHT 4
+#define MAX_HEIGHT 4096
+
+struct drm_framebuffer_test {
+ int buffer_created;
+ struct drm_mode_fb_cmd2 cmd;
+ const char *name;
+};
+
+static struct drm_framebuffer_test createbuffer_tests[] = {
+{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 pitch greater than min required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH + 1, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 pitch less than min required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH - 1, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid width",
+ .cmd = { .width = MAX_WIDTH + 1, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * (MAX_WIDTH + 1), 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer handle",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 0, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "No pixel format",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = 0,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Width 0",
+ .cmd = { .width = 0, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Height 0",
+ .cmd = { .width = MAX_WIDTH, .height = 0, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ .flags = DRM_MODE_FB_MODIFIERS, .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { 600, 600, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 different modifier per-plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 600, 300, 300 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2),
+ DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) - 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Different pitches",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH + MAX_WIDTH * MAX_HEIGHT,
+ MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1, DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Valid modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Different modifiers per plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_YTR,
+ AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH - 1, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Pitch greater than minimum required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Handle for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier without DRM_MODE_FB_MODIFIERS set",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Valid modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT,
+ .pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 },
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+};
+
+static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int *buffer_created = dev->dev_private;
+ *buffer_created = 1;
+ return ERR_PTR(-EINVAL);
+}
+
+static struct drm_mode_config_funcs mock_config_funcs = {
+ .fb_create = fb_create_mock,
+};
+
+static struct drm_device mock_drm_device = {
+ .mode_config = {
+ .min_width = MIN_WIDTH,
+ .max_width = MAX_WIDTH,
+ .min_height = MIN_HEIGHT,
+ .max_height = MAX_HEIGHT,
+ .allow_fb_modifiers = true,
+ .funcs = &mock_config_funcs,
+ },
+};
+
+static int execute_drm_mode_fb_cmd2(struct drm_mode_fb_cmd2 *r)
+{
+ int buffer_created = 0;
+ struct drm_framebuffer *fb;
+
+ mock_drm_device.dev_private = &buffer_created;
+ fb = drm_internal_framebuffer_create(&mock_drm_device, r, NULL);
+ return buffer_created;
+}
+
+int igt_check_drm_framebuffer_create(void *ignored)
+{
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(createbuffer_tests); i++) {
+ FAIL(createbuffer_tests[i].buffer_created !=
+ execute_drm_mode_fb_cmd2(&createbuffer_tests[i].cmd),
+ "Test %d: \"%s\" failed\n", i, createbuffer_tests[i].name);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.c b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
new file mode 100644
index 000000000000..2a7f93774006
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common file for modeset selftests.
+ */
+
+#include <linux/module.h>
+
+#include "test-drm_modeset_common.h"
+
+#define TESTS "drm_modeset_selftests.h"
+#include "drm_selftest.h"
+
+#include "drm_selftest.c"
+
+static int __init test_drm_modeset_init(void)
+{
+ int err;
+
+ err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
+
+ return err > 0 ? 0 : err;
+}
+
+static void __exit test_drm_modeset_exit(void)
+{
+}
+
+module_init(test_drm_modeset_init);
+module_exit(test_drm_modeset_exit);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
new file mode 100644
index 000000000000..8c76f09c12d1
--- /dev/null
+++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __TEST_DRM_MODESET_COMMON_H__
+#define __TEST_DRM_MODESET_COMMON_H__
+
+#define FAIL(test, msg, ...) \
+ do { \
+ if (test) { \
+ pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
+ return -EINVAL; \
+ } \
+ } while (0)
+
+#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+
+int igt_check_plane_state(void *ignored);
+int igt_check_drm_format_block_width(void *ignored);
+int igt_check_drm_format_block_height(void *ignored);
+int igt_check_drm_format_min_pitch(void *ignored);
+int igt_check_drm_framebuffer_create(void *ignored);
+int igt_damage_iter_no_damage(void *ignored);
+int igt_damage_iter_no_damage_fractional_src(void *ignored);
+int igt_damage_iter_no_damage_src_moved(void *ignored);
+int igt_damage_iter_no_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_no_damage_not_visible(void *ignored);
+int igt_damage_iter_no_damage_no_crtc(void *ignored);
+int igt_damage_iter_no_damage_no_fb(void *ignored);
+int igt_damage_iter_simple_damage(void *ignored);
+int igt_damage_iter_single_damage(void *ignored);
+int igt_damage_iter_single_damage_intersect_src(void *ignored);
+int igt_damage_iter_single_damage_outside_src(void *ignored);
+int igt_damage_iter_single_damage_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_intersect_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_outside_fractional_src(void *ignored);
+int igt_damage_iter_single_damage_src_moved(void *ignored);
+int igt_damage_iter_single_damage_fractional_src_moved(void *ignored);
+int igt_damage_iter_damage(void *ignored);
+int igt_damage_iter_damage_one_intersect(void *ignored);
+int igt_damage_iter_damage_one_outside(void *ignored);
+int igt_damage_iter_damage_src_moved(void *ignored);
+int igt_damage_iter_damage_not_visible(void *ignored);
+
+#endif
diff --git a/drivers/gpu/drm/selftests/test-drm-helper.c b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
index a015712b43e8..0a9553f51796 100644
--- a/drivers/gpu/drm/selftests/test-drm-helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_plane_helper.c
@@ -1,27 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * Test cases for the drm_kms_helper functions
+ * Test cases for the drm_plane_helper functions
*/
-#define pr_fmt(fmt) "drm_kms_helper: " fmt
-
-#include <linux/module.h>
+#define pr_fmt(fmt) "drm_plane_helper: " fmt
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_modes.h>
-#define TESTS "drm_helper_selftests.h"
-#include "drm_selftest.h"
-
-#define FAIL(test, msg, ...) \
- do { \
- if (test) { \
- pr_err("%s/%u: " msg, __FUNCTION__, __LINE__, ##__VA_ARGS__); \
- return -EINVAL; \
- } \
- } while (0)
-
-#define FAIL_ON(x) FAIL((x), "%s", "FAIL_ON(" __stringify(x) ")\n")
+#include "test-drm_modeset_common.h"
static void set_src(struct drm_plane_state *plane_state,
unsigned src_x, unsigned src_y,
@@ -85,7 +73,7 @@ static bool check_crtc_eq(struct drm_plane_state *plane_state,
return true;
}
-static int igt_check_plane_state(void *ignored)
+int igt_check_plane_state(void *ignored)
{
int ret;
@@ -229,19 +217,3 @@ static int igt_check_plane_state(void *ignored)
return 0;
}
-
-#include "drm_selftest.c"
-
-static int __init test_drm_helper_init(void)
-{
- int err;
-
- err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
-
- return err > 0 ? 0 : err;
-}
-
-module_init(test_drm_helper_init);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 6ececad6f845..8554102a6ead 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -194,7 +194,7 @@ static int shmob_drm_remove(struct platform_device *pdev)
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
drm_irq_uninstall(ddev);
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return 0;
}
@@ -290,7 +290,7 @@ err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
err_free_drm_dev:
- drm_dev_unref(ddev);
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 5824e6aca8f4..ed76e52eb213 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -40,6 +40,8 @@ static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
DRM_DEBUG_DRIVER("\n");
mixer->status = STI_MIXER_DISABLING;
+
+ drm_crtc_wait_one_vblank(crtc);
}
static int
@@ -250,10 +252,8 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
struct sti_compositor *compo;
struct drm_crtc *crtc = data;
struct sti_mixer *mixer;
- struct sti_private *priv;
unsigned int pipe;
- priv = crtc->dev->dev_private;
pipe = drm_crtc_index(crtc);
compo = container_of(nb, struct sti_compositor, vtg_vblank_nb[pipe]);
mixer = compo->mixer[pipe];
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 57b870e1e696..bc908453ffb3 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -332,7 +332,6 @@ static void sti_cursor_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 6dced8abcf16..ac54e0f9caea 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -206,6 +206,8 @@ static void sti_cleanup(struct drm_device *ddev)
struct sti_private *private = ddev->dev_private;
drm_kms_helper_poll_fini(ddev);
+ drm_atomic_helper_shutdown(ddev);
+ drm_mode_config_cleanup(ddev);
component_unbind_all(ddev->dev, ddev);
kfree(private);
ddev->dev_private = NULL;
@@ -230,7 +232,7 @@ static int sti_bind(struct device *dev)
ret = drm_dev_register(ddev, 0);
if (ret)
- goto err_register;
+ goto err_cleanup;
drm_mode_config_reset(ddev);
@@ -238,8 +240,6 @@ static int sti_bind(struct device *dev)
return 0;
-err_register:
- drm_mode_config_cleanup(ddev);
err_cleanup:
sti_cleanup(ddev);
err_drm_dev_put:
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index c32de6cbf061..cff7b2b5ee9e 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -517,7 +517,7 @@ static void sti_gdp_init(struct sti_gdp *gdp)
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
- base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
+ base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL);
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
@@ -883,7 +883,6 @@ static void sti_gdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 03ac3b4a4469..23565f52dd71 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1260,7 +1260,6 @@ static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
{
DRM_DEBUG_DRIVER("\n");
- drm_plane_helper_disable(drm_plane, NULL);
drm_plane_cleanup(drm_plane);
}
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index f2021b23554d..8dec001b9d37 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -26,7 +26,6 @@
static const struct drm_mode_config_funcs drv_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -52,7 +51,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drv_driver_fops);
static struct drm_driver drv_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = drm_fb_helper_lastclose,
.name = "stm",
.desc = "STMicroelectronics SoC DRM",
.date = "20170330",
@@ -72,6 +70,8 @@ static struct drm_driver drv_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
+ .get_scanout_position = ltdc_crtc_scanoutpos,
+ .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
};
static int drv_load(struct drm_device *ddev)
@@ -108,12 +108,6 @@ static int drv_load(struct drm_device *ddev)
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
- if (ddev->mode_config.num_connector) {
- ret = drm_fb_cma_fbdev_init(ddev, 16, 0);
- if (ret)
- DRM_DEBUG("Warning: fails to create fbdev\n");
- }
-
platform_set_drvdata(pdev, ddev);
return 0;
@@ -126,7 +120,6 @@ static void drv_unload(struct drm_device *ddev)
{
DRM_DEBUG("%s\n", __func__);
- drm_fb_cma_fbdev_fini(ddev);
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
drm_mode_config_cleanup(ddev);
@@ -154,6 +147,8 @@ static int stm_drm_platform_probe(struct platform_device *pdev)
if (ret)
goto err_put;
+ drm_fbdev_generic_setup(ddev, 16);
+
return 0;
err_put:
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 808d9fb627e9..61dd661aa0ac 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -148,6 +148,8 @@
#define IER_TERRIE BIT(2) /* Transfer ERRor Interrupt Enable */
#define IER_RRIE BIT(3) /* Register Reload Interrupt enable */
+#define CPSR_CYPOS GENMASK(15, 0) /* Current Y position */
+
#define ISR_LIF BIT(0) /* Line Interrupt Flag */
#define ISR_FUIF BIT(1) /* Fifo Underrun Interrupt Flag */
#define ISR_TERRIF BIT(2) /* Transfer ERRor Interrupt Flag */
@@ -626,6 +628,49 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
reg_clear(ldev->regs, LTDC_IER, IER_LIE);
}
+bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+ int line, vactive_start, vactive_end, vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ /* The active area starts after vsync + front porch and ends
+ * at vsync + front porc + display size.
+ * The total height also include back porch.
+ * We have 3 possible cases to handle:
+ * - line < vactive_start: vpos = line - vactive_start and will be
+ * negative
+ * - vactive_start < line < vactive_end: vpos = line - vactive_start
+ * and will be positive
+ * - line > vactive_end: vpos = line - vtotal - vactive_start
+ * and will negative
+ *
+ * Computation for the two first cases are identical so we can
+ * simplify the code and only test if line > vactive_end
+ */
+ line = reg_read(ldev->regs, LTDC_CPSR) & CPSR_CYPOS;
+ vactive_start = reg_read(ldev->regs, LTDC_BPCR) & BPCR_AVBP;
+ vactive_end = reg_read(ldev->regs, LTDC_AWCR) & AWCR_AAH;
+ vtotal = reg_read(ldev->regs, LTDC_TWCR) & TWCR_TOTALH;
+
+ if (line > vactive_end)
+ *vpos = line - vtotal - vactive_start;
+ else
+ *vpos = line - vactive_start;
+
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index d5afb8960867..e46f477a8494 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -38,6 +38,11 @@ struct ltdc_device {
struct fps_info plane_fpsi[LTDC_MAX_LAYER];
};
+bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf49c55b0f2c..9e9255ee59cd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -48,8 +48,12 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
/*
* These coefficients are taken from the A33 BSP from Allwinner.
*
- * The formula is for each component, each coefficient being multiplied by
- * 1024 and each constant being multiplied by 16:
+ * The first three values of each row are coded as 13-bit signed fixed-point
+ * numbers, with 10 bits for the fractional part. The fourth value is a
+ * constant coded as a 14-bit signed fixed-point number with 4 bits for the
+ * fractional part.
+ *
+ * The values in table order give the following colorspace translation:
* G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
* R = 1.164 * Y + 1.596 * V - 222
* B = 1.164 * Y + 2.018 * U + 276
@@ -155,6 +159,36 @@ static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
return 0;
}
+static const uint32_t sun4i_backend_formats[] = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+};
+
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+ unsigned int i;
+
+ if (modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
+ if (sun4i_backend_formats[i] == fmt)
+ return true;
+
+ return false;
+}
+
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -395,6 +429,15 @@ int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
return 0;
}
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+ int layer)
+{
+ regmap_update_bits(backend->engine.regs,
+ SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
+}
+
static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
{
u16 src_h = state->src_h >> 16;
@@ -413,11 +456,50 @@ static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
struct sun4i_backend *backend = layer->backend;
+ uint32_t format = state->fb->format->format;
+ uint64_t modifier = state->fb->modifier;
if (IS_ERR(backend->frontend))
return false;
- return sun4i_backend_plane_uses_scaler(state);
+ if (!sun4i_frontend_format_is_supported(format, modifier))
+ return false;
+
+ if (!sun4i_backend_format_is_supported(format, modifier))
+ return true;
+
+ /*
+ * TODO: The backend alone allows 2x and 4x integer scaling, including
+ * support for an alpha component (which the frontend doesn't support).
+ * Use the backend directly instead of the frontend in this case, with
+ * another test to return false.
+ */
+
+ if (sun4i_backend_plane_uses_scaler(state))
+ return true;
+
+ /*
+ * Here the format is supported by both the frontend and the backend
+ * and no frontend scaling is required, so use the backend directly.
+ */
+ return false;
+}
+
+static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
+ bool *uses_frontend)
+{
+ if (sun4i_backend_plane_uses_frontend(state)) {
+ *uses_frontend = true;
+ return true;
+ }
+
+ *uses_frontend = false;
+
+ /* Scaling is not supported without the frontend. */
+ if (sun4i_backend_plane_uses_scaler(state))
+ return false;
+
+ return true;
}
static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
@@ -460,14 +542,19 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_format_name_buf format_name;
- if (sun4i_backend_plane_uses_frontend(plane_state)) {
+ if (!sun4i_backend_plane_is_supported(plane_state,
+ &layer_state->uses_frontend))
+ return -EINVAL;
+
+ if (layer_state->uses_frontend) {
DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
plane->index);
-
- layer_state->uses_frontend = true;
num_frontend_planes++;
} else {
- layer_state->uses_frontend = false;
+ if (fb->format->is_yuv) {
+ DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
+ num_yuv_planes++;
+ }
}
DRM_DEBUG_DRIVER("Plane FB format is %s\n",
@@ -476,11 +563,6 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
- if (fb->format->is_yuv) {
- DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
- num_yuv_planes++;
- }
-
DRM_DEBUG_DRIVER("Plane zpos is %d\n",
plane_state->normalized_zpos);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index e3d4c6035eb2..01f66463271b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -198,6 +198,7 @@ engine_to_sun4i_backend(struct sunxi_engine *engine)
void sun4i_backend_layer_enable(struct sun4i_backend *backend,
int layer, bool enable);
+bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier);
int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
@@ -208,5 +209,7 @@ int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
int layer, uint32_t in_fmt);
int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend,
int layer, struct drm_plane *plane);
+void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
+ int layer);
#endif /* _SUN4I_BACKEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 1e41c3f5fd6d..9e4c375ccc96 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -28,13 +28,22 @@
#include "sun4i_tcon.h"
#include "sun8i_tcon_top.h"
+static int drm_sun4i_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ /* The hardware only allows even pitches for YUV buffers. */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), 2);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
static struct drm_driver sun4i_drv_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
/* Generic Operations */
- .lastclose = drm_fb_helper_lastclose,
.fops = &sun4i_drv_fops,
.name = "sun4i-drm",
.desc = "Allwinner sun4i Display Engine",
@@ -43,7 +52,7 @@ static struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = drm_sun4i_gem_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -105,12 +114,7 @@ static int sun4i_drv_bind(struct device *dev)
/* Remove early framebuffers (ie. simplefb) */
drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);
- /* Create our framebuffer */
- ret = sun4i_framebuffer_init(drm);
- if (ret) {
- dev_err(drm->dev, "Couldn't create our framebuffer\n");
- goto cleanup_mode_config;
- }
+ sun4i_framebuffer_init(drm);
/* Enable connectors polling */
drm_kms_helper_poll_init(drm);
@@ -119,11 +123,12 @@ static int sun4i_drv_bind(struct device *dev)
if (ret)
goto finish_poll;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
finish_poll:
drm_kms_helper_poll_fini(drm);
- sun4i_framebuffer_free(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
@@ -138,7 +143,6 @@ static void sun4i_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
- sun4i_framebuffer_free(drm);
drm_mode_config_cleanup(drm);
of_reserved_mem_device_release(dev);
drm_dev_put(drm);
@@ -406,6 +410,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ .compatible = "allwinner,sun50i-a64-display-engine" },
+ { .compatible = "allwinner,sun50i-h6-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 5f29850ef8ac..cb828028ae06 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -12,8 +12,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
@@ -37,7 +35,6 @@ static int sun4i_de_atomic_check(struct drm_device *dev,
}
static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = sun4i_de_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
@@ -47,7 +44,7 @@ static struct drm_mode_config_helper_funcs sun4i_de_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
-int sun4i_framebuffer_init(struct drm_device *drm)
+void sun4i_framebuffer_init(struct drm_device *drm)
{
drm_mode_config_reset(drm);
@@ -56,11 +53,4 @@ int sun4i_framebuffer_init(struct drm_device *drm)
drm->mode_config.funcs = &sun4i_de_mode_config_funcs;
drm->mode_config.helper_private = &sun4i_de_mode_config_helpers;
-
- return drm_fb_cma_fbdev_init(drm, 32, 0);
-}
-
-void sun4i_framebuffer_free(struct drm_device *drm)
-{
- drm_fb_cma_fbdev_fini(drm);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
index 7ef0aed8384c..6fe5bd8c4026 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.h
@@ -13,7 +13,6 @@
#ifndef _SUN4I_FRAMEBUFFER_H_
#define _SUN4I_FRAMEBUFFER_H_
-int sun4i_framebuffer_init(struct drm_device *drm);
-void sun4i_framebuffer_free(struct drm_device *drm);
+void sun4i_framebuffer_init(struct drm_device *drm);
#endif /* _SUN4I_FRAMEBUFFER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index ddf6cfa6dd23..1a7ebc45747e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -107,8 +107,34 @@ EXPORT_SYMBOL(sun4i_frontend_update_buffer);
static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
- case DRM_FORMAT_ARGB8888:
- *val = 5;
+ case DRM_FORMAT_XRGB8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sun4i_frontend_drm_format_to_input_mode(uint32_t fmt, u32 *val)
+{
+ if (drm_format_num_planes(fmt) == 1)
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sun4i_frontend_drm_format_to_input_sequence(uint32_t fmt, u32 *val)
+{
+ switch (fmt) {
+ case DRM_FORMAT_BGRX8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX;
+ return 0;
+
+ case DRM_FORMAT_XRGB8888:
+ *val = SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB;
return 0;
default:
@@ -119,9 +145,12 @@ static int sun4i_frontend_drm_format_to_input_fmt(uint32_t fmt, u32 *val)
static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
{
switch (fmt) {
+ case DRM_FORMAT_BGRX8888:
+ *val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888;
+ return 0;
+
case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- *val = 2;
+ *val = SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888;
return 0;
default:
@@ -129,22 +158,54 @@ static int sun4i_frontend_drm_format_to_output_fmt(uint32_t fmt, u32 *val)
}
}
+static const uint32_t sun4i_frontend_formats[] = {
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier)
+{
+ unsigned int i;
+
+ if (modifier != DRM_FORMAT_MOD_LINEAR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(sun4i_frontend_formats); i++)
+ if (sun4i_frontend_formats[i] == fmt)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sun4i_frontend_format_is_supported);
+
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
+ uint32_t format = fb->format->format;
u32 out_fmt_val;
- u32 in_fmt_val;
+ u32 in_fmt_val, in_mod_val, in_ps_val;
int ret;
- ret = sun4i_frontend_drm_format_to_input_fmt(fb->format->format,
- &in_fmt_val);
+ ret = sun4i_frontend_drm_format_to_input_fmt(format, &in_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid input format\n");
return ret;
}
+ ret = sun4i_frontend_drm_format_to_input_mode(format, &in_mod_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid input mode\n");
+ return ret;
+ }
+
+ ret = sun4i_frontend_drm_format_to_input_sequence(format, &in_ps_val);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid pixel sequence\n");
+ return ret;
+ }
+
ret = sun4i_frontend_drm_format_to_output_fmt(out_fmt, &out_fmt_val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid output format\n");
@@ -162,10 +223,12 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTPHASE1_REG, 0x400);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTPHASE1_REG, 0x400);
+ regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
+ SUN4I_FRONTEND_BYPASS_CSC_EN,
+ SUN4I_FRONTEND_BYPASS_CSC_EN);
+
regmap_write(frontend->regs, SUN4I_FRONTEND_INPUT_FMT_REG,
- SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(1) |
- SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(in_fmt_val) |
- SUN4I_FRONTEND_INPUT_FMT_PS(1));
+ in_mod_val | in_fmt_val | in_ps_val);
/*
* TODO: It look like the A31 and A80 at least will need the
@@ -173,7 +236,7 @@ int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
* ARGB8888).
*/
regmap_write(frontend->regs, SUN4I_FRONTEND_OUTPUT_FMT_REG,
- SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(out_fmt_val));
+ out_fmt_val);
return 0;
}
@@ -183,16 +246,24 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ uint32_t luma_width, luma_height;
+ uint32_t chroma_width, chroma_height;
/* Set height and width */
DRM_DEBUG_DRIVER("Frontend size W: %u H: %u\n",
state->crtc_w, state->crtc_h);
+
+ luma_width = state->src_w >> 16;
+ luma_height = state->src_h >> 16;
+
+ chroma_width = DIV_ROUND_UP(luma_width, fb->format->hsub);
+ chroma_height = DIV_ROUND_UP(luma_height, fb->format->vsub);
+
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_INSIZE_REG,
- SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
- state->src_w >> 16));
+ SUN4I_FRONTEND_INSIZE(luma_height, luma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_INSIZE_REG,
- SUN4I_FRONTEND_INSIZE(state->src_h >> 16,
- state->src_w >> 16));
+ SUN4I_FRONTEND_INSIZE(chroma_height, chroma_width));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_OUTSIZE_REG,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
@@ -200,14 +271,14 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
SUN4I_FRONTEND_OUTSIZE(state->crtc_h, state->crtc_w));
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_HORZFACT_REG,
- state->src_w / state->crtc_w);
+ (luma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_HORZFACT_REG,
- state->src_w / state->crtc_w);
+ (chroma_width << 16) / state->crtc_w);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH0_VERTFACT_REG,
- state->src_h / state->crtc_h);
+ (luma_height << 16) / state->crtc_h);
regmap_write(frontend->regs, SUN4I_FRONTEND_CH1_VERTFACT_REG,
- state->src_h / state->crtc_h);
+ (chroma_height << 16) / state->crtc_h);
regmap_write_bits(frontend->regs, SUN4I_FRONTEND_FRM_CTRL_REG,
SUN4I_FRONTEND_FRM_CTRL_REG_RDY,
@@ -339,10 +410,6 @@ static int sun4i_frontend_runtime_resume(struct device *dev)
SUN4I_FRONTEND_EN_EN,
SUN4I_FRONTEND_EN_EN);
- regmap_update_bits(frontend->regs, SUN4I_FRONTEND_BYPASS_REG,
- SUN4I_FRONTEND_BYPASS_CSC_EN,
- SUN4I_FRONTEND_BYPASS_CSC_EN);
-
sun4i_frontend_scaler_init(frontend);
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.h b/drivers/gpu/drm/sun4i/sun4i_frontend.h
index 02661ce81f3e..ad146e8d8d70 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.h
@@ -26,12 +26,14 @@
#define SUN4I_FRONTEND_LINESTRD0_REG 0x040
#define SUN4I_FRONTEND_INPUT_FMT_REG 0x04c
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD(mod) ((mod) << 8)
-#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT(fmt) ((fmt) << 4)
-#define SUN4I_FRONTEND_INPUT_FMT_PS(ps) (ps)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_MOD_PACKED (1 << 8)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_FMT_RGB (5 << 4)
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_BGRX 0
+#define SUN4I_FRONTEND_INPUT_FMT_DATA_PS_XRGB 1
#define SUN4I_FRONTEND_OUTPUT_FMT_REG 0x05c
-#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT(fmt) (fmt)
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_BGRX8888 1
+#define SUN4I_FRONTEND_OUTPUT_FMT_DATA_FMT_XRGB8888 2
#define SUN4I_FRONTEND_CH0_INSIZE_REG 0x100
#define SUN4I_FRONTEND_INSIZE(h, w) ((((h) - 1) << 16) | (((w) - 1)))
@@ -95,5 +97,6 @@ void sun4i_frontend_update_coord(struct sun4i_frontend *frontend,
struct drm_plane *plane);
int sun4i_frontend_update_formats(struct sun4i_frontend *frontend,
struct drm_plane *plane, uint32_t out_fmt);
+bool sun4i_frontend_format_is_supported(uint32_t fmt, uint64_t modifier);
#endif /* _SUN4I_FRONTEND_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
index 3ecffa52c814..fb985ba1a176 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
@@ -35,7 +35,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
{
unsigned long best_rate = 0;
u8 best_m = 0, m;
- bool is_double;
+ bool is_double = false;
for (m = div_offset ?: 1; m < (16 + div_offset); m++) {
u8 d;
@@ -52,7 +52,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
(rate - tmp_rate) < (rate - best_rate)) {
best_rate = tmp_rate;
best_m = m;
- is_double = d;
+ is_double = (d == 2) ? true : false;
}
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 78f77af8805a..29631e0efde3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drmP.h>
#include "sun4i_backend.h"
@@ -92,14 +93,16 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
struct sun4i_backend *backend = layer->backend;
struct sun4i_frontend *frontend = backend->frontend;
+ sun4i_backend_cleanup_layer(backend, layer->id);
+
if (layer_state->uses_frontend) {
sun4i_frontend_init(frontend);
sun4i_frontend_update_coord(frontend, plane);
sun4i_frontend_update_buffer(frontend, plane);
sun4i_frontend_update_formats(frontend, plane,
- DRM_FORMAT_ARGB8888);
+ DRM_FORMAT_XRGB8888);
sun4i_backend_update_layer_frontend(backend, layer->id,
- DRM_FORMAT_ARGB8888);
+ DRM_FORMAT_XRGB8888);
sun4i_frontend_enable(frontend);
} else {
sun4i_backend_update_layer_formats(backend, layer->id, plane);
@@ -112,6 +115,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
@@ -125,10 +129,11 @@ static const struct drm_plane_funcs sun4i_backend_layer_funcs = {
.update_plane = drm_atomic_helper_update_plane,
};
-static const uint32_t sun4i_backend_layer_formats[] = {
+static const uint32_t sun4i_layer_formats[] = {
DRM_FORMAT_ARGB8888,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGBA5551,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
@@ -154,8 +159,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
- sun4i_backend_layer_formats,
- ARRAY_SIZE(sun4i_backend_layer_formats),
+ sun4i_layer_formats,
+ ARRAY_SIZE(sun4i_layer_formats),
NULL, type, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index f949287d926c..0420f5c978b9 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -478,8 +478,11 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
}
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder,
const struct drm_display_mode *mode)
{
+ struct drm_connector *connector = sun4i_tcon_get_connector(encoder);
+ struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -491,8 +494,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_common(tcon, mode);
/* Set dithering if needed */
- if (tcon->panel)
- sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+ sun4i_tcon0_mode_set_dithering(tcon, connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -541,6 +543,9 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ if (display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+
/*
* On A20 and similar SoCs, the only way to achieve Positive Edge
* (Rising Edge), is setting dclk clock phase to 2/3(240°).
@@ -556,20 +561,16 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (tcon->panel) {
- struct drm_panel *panel = tcon->panel;
- struct drm_connector *connector = panel->connector;
- struct drm_display_info display_info = connector->display_info;
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
- }
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
- SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
+ SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_DE_NEGATIVE,
val);
/* Map output pins to channel 0 */
@@ -684,7 +685,7 @@ void sun4i_tcon_mode_set(struct sun4i_tcon *tcon,
sun4i_tcon0_mode_set_lvds(tcon, encoder, mode);
break;
case DRM_MODE_ENCODER_NONE:
- sun4i_tcon0_mode_set_rgb(tcon, mode);
+ sun4i_tcon0_mode_set_rgb(tcon, encoder, mode);
sun4i_tcon_set_mux(tcon, 0, encoder);
break;
case DRM_MODE_ENCODER_TVDAC:
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 3d492c8be1fc..b5214d71610f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -116,6 +116,7 @@
#define SUN4I_TCON0_IO_POL_REG 0x88
#define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
+#define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
#define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
#define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index b14925b40ccf..e7608a72f26f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -34,6 +34,41 @@ static const u32 yvu2rgb[] = {
0x000004A8, 0x00000000, 0x00000813, 0xFFFBAC4A,
};
+/*
+ * DE3 has a bit different CSC units. Factors are in two's complement format.
+ * First three factors in a row are multiplication factors which have 17 bits
+ * for fractional part. Fourth value in a row is comprised of two factors.
+ * Upper 16 bits represents difference, which is subtracted from the input
+ * value before multiplication and lower 16 bits represents constant, which
+ * is addes at the end.
+ *
+ * x' = c00 * (x + d0) + c01 * (y + d1) + c02 * (z + d2) + const0
+ * y' = c10 * (x + d0) + c11 * (y + d1) + c12 * (z + d2) + const1
+ * z' = c20 * (x + d0) + c21 * (y + d1) + c22 * (z + d2) + const2
+ *
+ * Please note that above formula is true only for Blender CSC. Other DE3 CSC
+ * units takes only positive value for difference. From what can be deducted
+ * from BSP driver code, those units probably automatically assume that
+ * difference has to be subtracted.
+ *
+ * Layout of factors in table:
+ * c00 c01 c02 [d0 const0]
+ * c10 c11 c12 [d1 const1]
+ * c20 c21 c22 [d2 const2]
+ */
+
+static const u32 yuv2rgb_de3[] = {
+ 0x0002542a, 0x00000000, 0x0003312a, 0xffc00000,
+ 0x0002542a, 0xffff376b, 0xfffe5fc3, 0xfe000000,
+ 0x0002542a, 0x000408d3, 0x00000000, 0xfe000000,
+};
+
+static const u32 yvu2rgb_de3[] = {
+ 0x0002542a, 0x0003312a, 0x00000000, 0xffc00000,
+ 0x0002542a, 0xfffe5fc3, 0xffff376b, 0xfe000000,
+ 0x0002542a, 0x00000000, 0x000408d3, 0xfe000000,
+};
+
static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
enum sun8i_csc_mode mode)
{
@@ -61,6 +96,28 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
}
}
+static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
+ enum sun8i_csc_mode mode)
+{
+ const u32 *table;
+ u32 base_reg;
+
+ switch (mode) {
+ case SUN8I_CSC_MODE_YUV2RGB:
+ table = yuv2rgb_de3;
+ break;
+ case SUN8I_CSC_MODE_YVU2RGB:
+ table = yvu2rgb_de3;
+ break;
+ default:
+ DRM_WARN("Wrong CSC mode specified.\n");
+ return;
+ }
+
+ base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0);
+ regmap_bulk_write(map, base_reg, table, 12);
+}
+
static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
{
u32 val;
@@ -73,11 +130,32 @@ static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val);
}
+static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
+{
+ u32 val, mask;
+
+ mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE),
+ mask, val);
+}
+
void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
enum sun8i_csc_mode mode)
{
u32 base;
+ if (mixer->cfg->is_de3) {
+ sun8i_de3_ccsc_set_coefficients(mixer->engine.regs,
+ layer, mode);
+ return;
+ }
+
base = ccsc_base[mixer->cfg->ccsc][layer];
sun8i_csc_set_coefficients(mixer->engine.regs, base, mode);
@@ -87,6 +165,11 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
+ if (mixer->cfg->is_de3) {
+ sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
+ return;
+ }
+
base = ccsc_base[mixer->cfg->ccsc][layer];
sun8i_csc_enable(mixer->engine.regs, base, enable);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index ed2983770e9c..dc47720c99ba 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -5,6 +5,7 @@
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drm_of.h>
@@ -20,7 +21,8 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
- clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+ if (hdmi->quirks->set_rate)
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@@ -33,8 +35,8 @@ static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
};
static enum drm_mode_status
-sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
+sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
if (mode->clock > 297000)
return MODE_CLOCK_HIGH;
@@ -42,6 +44,17 @@ sun8i_dw_hdmi_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static enum drm_mode_status
+sun8i_dw_hdmi_mode_valid_h6(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ /* This is max for HDMI 2.0b (4K@60Hz) */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static bool sun8i_dw_hdmi_node_is_tcon_top(struct device_node *node)
{
return IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
@@ -102,6 +115,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = &pdev->dev;
encoder = &hdmi->encoder;
+ hdmi->quirks = of_device_get_match_data(dev);
+
encoder->possible_crtcs =
sun8i_dw_hdmi_find_possible_crtcs(drm, dev->of_node);
/*
@@ -168,10 +183,8 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
sun8i_hdmi_phy_init(hdmi->phy);
- plat_data->mode_valid = &sun8i_dw_hdmi_mode_valid;
- plat_data->phy_ops = sun8i_hdmi_phy_get_ops();
- plat_data->phy_name = "sun8i_dw_hdmi_phy";
- plat_data->phy_data = hdmi->phy;
+ plat_data->mode_valid = hdmi->quirks->mode_valid;
+ sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data);
platform_set_drvdata(pdev, hdmi);
@@ -230,8 +243,24 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
return 0;
}
+static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
+ .mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
+ .set_rate = true,
+};
+
+static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
+ .mode_valid = sun8i_dw_hdmi_mode_valid_h6,
+};
+
static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = {
- { .compatible = "allwinner,sun8i-a83t-dw-hdmi" },
+ {
+ .compatible = "allwinner,sun8i-a83t-dw-hdmi",
+ .data = &sun8i_a83t_quirks,
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-dw-hdmi",
+ .data = &sun50i_h6_quirks,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sun8i_dw_hdmi_dt_ids);
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index 7fdc1ecd2892..720c5aa8adc1 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -150,6 +150,10 @@ struct sun8i_hdmi_phy;
struct sun8i_hdmi_phy_variant {
bool has_phy_clk;
bool has_second_pll;
+ unsigned int is_custom_phy : 1;
+ const struct dw_hdmi_curr_ctrl *cur_ctr;
+ const struct dw_hdmi_mpll_config *mpll_cfg;
+ const struct dw_hdmi_phy_config *phy_cfg;
void (*phy_init)(struct sun8i_hdmi_phy *phy);
void (*phy_disable)(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy);
@@ -170,6 +174,12 @@ struct sun8i_hdmi_phy {
struct sun8i_hdmi_phy_variant *variant;
};
+struct sun8i_dw_hdmi_quirks {
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ const struct drm_display_mode *mode);
+ unsigned int set_rate : 1;
+};
+
struct sun8i_dw_hdmi {
struct clk *clk_tmds;
struct device *dev;
@@ -178,6 +188,7 @@ struct sun8i_dw_hdmi {
struct sun8i_hdmi_phy *phy;
struct dw_hdmi_plat_data plat_data;
struct regulator *regulator;
+ const struct sun8i_dw_hdmi_quirks *quirks;
struct reset_control *rst_ctrl;
};
@@ -191,7 +202,8 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
-const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void);
+void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+ struct dw_hdmi_plat_data *plat_data);
int sun8i_phy_clk_create(struct sun8i_hdmi_phy *phy, struct device *dev,
bool second_parent);
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 471993097ced..66ea3a902e36 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -14,6 +14,122 @@
*/
#define I2C_ADDR 0x69
+static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
+ {
+ 30666000, {
+ { 0x00b3, 0x0000 },
+ { 0x2153, 0x0000 },
+ { 0x40f3, 0x0000 },
+ },
+ }, {
+ 36800000, {
+ { 0x00b3, 0x0000 },
+ { 0x2153, 0x0000 },
+ { 0x40a2, 0x0001 },
+ },
+ }, {
+ 46000000, {
+ { 0x00b3, 0x0000 },
+ { 0x2142, 0x0001 },
+ { 0x40a2, 0x0001 },
+ },
+ }, {
+ 61333000, {
+ { 0x0072, 0x0001 },
+ { 0x2142, 0x0001 },
+ { 0x40a2, 0x0001 },
+ },
+ }, {
+ 73600000, {
+ { 0x0072, 0x0001 },
+ { 0x2142, 0x0001 },
+ { 0x4061, 0x0002 },
+ },
+ }, {
+ 92000000, {
+ { 0x0072, 0x0001 },
+ { 0x2145, 0x0002 },
+ { 0x4061, 0x0002 },
+ },
+ }, {
+ 122666000, {
+ { 0x0051, 0x0002 },
+ { 0x2145, 0x0002 },
+ { 0x4061, 0x0002 },
+ },
+ }, {
+ 147200000, {
+ { 0x0051, 0x0002 },
+ { 0x2145, 0x0002 },
+ { 0x4064, 0x0003 },
+ },
+ }, {
+ 184000000, {
+ { 0x0051, 0x0002 },
+ { 0x214c, 0x0003 },
+ { 0x4064, 0x0003 },
+ },
+ }, {
+ 226666000, {
+ { 0x0040, 0x0003 },
+ { 0x214c, 0x0003 },
+ { 0x4064, 0x0003 },
+ },
+ }, {
+ 272000000, {
+ { 0x0040, 0x0003 },
+ { 0x214c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
+ 340000000, {
+ { 0x0040, 0x0003 },
+ { 0x3b4c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
+ 594000000, {
+ { 0x1a40, 0x0003 },
+ { 0x3b4c, 0x0003 },
+ { 0x5a64, 0x0003 },
+ },
+ }, {
+ ~0UL, {
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ { 0x0000, 0x0000 },
+ },
+ }
+};
+
+static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
+ /* pixelclk bpp8 bpp10 bpp12 */
+ { 25175000, { 0x0000, 0x0000, 0x0000 }, },
+ { 27000000, { 0x0012, 0x0000, 0x0000 }, },
+ { 59400000, { 0x0008, 0x0008, 0x0008 }, },
+ { 72000000, { 0x0008, 0x0008, 0x001b }, },
+ { 74250000, { 0x0013, 0x0013, 0x0013 }, },
+ { 90000000, { 0x0008, 0x001a, 0x001b }, },
+ { 118800000, { 0x001b, 0x001a, 0x001b }, },
+ { 144000000, { 0x001b, 0x001a, 0x0034 }, },
+ { 180000000, { 0x001b, 0x0033, 0x0034 }, },
+ { 216000000, { 0x0036, 0x0033, 0x0034 }, },
+ { 237600000, { 0x0036, 0x0033, 0x001b }, },
+ { 288000000, { 0x0036, 0x001b, 0x001b }, },
+ { 297000000, { 0x0019, 0x001b, 0x0019 }, },
+ { 330000000, { 0x0036, 0x001b, 0x001b }, },
+ { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { ~0UL, { 0x0000, 0x0000, 0x0000 }, }
+};
+
+static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
+ /*pixelclk symbol term vlev*/
+ { 74250000, 0x8009, 0x0004, 0x0232},
+ { 148500000, 0x8029, 0x0004, 0x0273},
+ { 594000000, 0x8039, 0x0004, 0x014a},
+ { ~0UL, 0x0000, 0x0000, 0x0000}
+};
+
static int sun8i_hdmi_phy_config_a83t(struct dw_hdmi *hdmi,
struct sun8i_hdmi_phy *phy,
unsigned int clk_rate)
@@ -279,8 +395,31 @@ static const struct dw_hdmi_phy_ops sun8i_hdmi_phy_ops = {
.setup_hpd = &dw_hdmi_phy_setup_hpd,
};
+static void sun8i_hdmi_phy_unlock(struct sun8i_hdmi_phy *phy)
+{
+ /* enable read access to HDMI controller */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
+ SUN8I_HDMI_PHY_READ_EN_MAGIC);
+
+ /* unscramble register offsets */
+ regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
+ SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
+}
+
+static void sun50i_hdmi_phy_init_h6(struct sun8i_hdmi_phy *phy)
+{
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN,
+ SUN8I_HDMI_PHY_REXT_CTRL_REXT_EN);
+
+ regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_REXT_CTRL_REG,
+ 0xffff0000, 0x80c00000);
+}
+
static void sun8i_hdmi_phy_init_a83t(struct sun8i_hdmi_phy *phy)
{
+ sun8i_hdmi_phy_unlock(phy);
+
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_DBG_CTRL_REG,
SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK,
SUN8I_HDMI_PHY_DBG_CTRL_PX_LOCK);
@@ -298,6 +437,8 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
{
unsigned int val;
+ sun8i_hdmi_phy_unlock(phy);
+
regmap_write(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG, 0);
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
SUN8I_HDMI_PHY_ANA_CFG1_ENBI,
@@ -372,20 +513,23 @@ static void sun8i_hdmi_phy_init_h3(struct sun8i_hdmi_phy *phy)
void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy)
{
- /* enable read access to HDMI controller */
- regmap_write(phy->regs, SUN8I_HDMI_PHY_READ_EN_REG,
- SUN8I_HDMI_PHY_READ_EN_MAGIC);
-
- /* unscramble register offsets */
- regmap_write(phy->regs, SUN8I_HDMI_PHY_UNSCRAMBLE_REG,
- SUN8I_HDMI_PHY_UNSCRAMBLE_MAGIC);
-
phy->variant->phy_init(phy);
}
-const struct dw_hdmi_phy_ops *sun8i_hdmi_phy_get_ops(void)
+void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
+ struct dw_hdmi_plat_data *plat_data)
{
- return &sun8i_hdmi_phy_ops;
+ struct sun8i_hdmi_phy_variant *variant = phy->variant;
+
+ if (variant->is_custom_phy) {
+ plat_data->phy_ops = &sun8i_hdmi_phy_ops;
+ plat_data->phy_name = "sun8i_dw_hdmi_phy";
+ plat_data->phy_data = phy;
+ } else {
+ plat_data->mpll_cfg = variant->mpll_cfg;
+ plat_data->cur_ctr = variant->cur_ctr;
+ plat_data->phy_config = variant->phy_cfg;
+ }
}
static struct regmap_config sun8i_hdmi_phy_regmap_config = {
@@ -396,14 +540,8 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
.name = "phy"
};
-static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
- .has_phy_clk = true,
- .phy_init = &sun8i_hdmi_phy_init_h3,
- .phy_disable = &sun8i_hdmi_phy_disable_h3,
- .phy_config = &sun8i_hdmi_phy_config_h3,
-};
-
static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
+ .is_custom_phy = true,
.phy_init = &sun8i_hdmi_phy_init_a83t,
.phy_disable = &sun8i_hdmi_phy_disable_a83t,
.phy_config = &sun8i_hdmi_phy_config_a83t,
@@ -411,6 +549,7 @@ static const struct sun8i_hdmi_phy_variant sun8i_a83t_hdmi_phy = {
static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
.has_phy_clk = true,
+ .is_custom_phy = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
@@ -419,17 +558,29 @@ static const struct sun8i_hdmi_phy_variant sun8i_h3_hdmi_phy = {
static const struct sun8i_hdmi_phy_variant sun8i_r40_hdmi_phy = {
.has_phy_clk = true,
.has_second_pll = true,
+ .is_custom_phy = true,
+ .phy_init = &sun8i_hdmi_phy_init_h3,
+ .phy_disable = &sun8i_hdmi_phy_disable_h3,
+ .phy_config = &sun8i_hdmi_phy_config_h3,
+};
+
+static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
+ .has_phy_clk = true,
+ .is_custom_phy = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
};
+static const struct sun8i_hdmi_phy_variant sun50i_h6_hdmi_phy = {
+ .cur_ctr = sun50i_h6_cur_ctr,
+ .mpll_cfg = sun50i_h6_mpll_cfg,
+ .phy_cfg = sun50i_h6_phy_config,
+ .phy_init = &sun50i_hdmi_phy_init_h6,
+};
+
static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
{
- .compatible = "allwinner,sun50i-a64-hdmi-phy",
- .data = &sun50i_a64_hdmi_phy,
- },
- {
.compatible = "allwinner,sun8i-a83t-hdmi-phy",
.data = &sun8i_a83t_hdmi_phy,
},
@@ -441,6 +592,14 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
.compatible = "allwinner,sun8i-r40-hdmi-phy",
.data = &sun8i_r40_hdmi_phy,
},
+ {
+ .compatible = "allwinner,sun50i-a64-hdmi-phy",
+ .data = &sun50i_a64_hdmi_phy,
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-hdmi-phy",
+ .data = &sun50i_h6_hdmi_phy,
+ },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b3d02b146b7..44a9ba7d8433 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -368,6 +368,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun8i_mixer *mixer;
struct resource *res;
void __iomem *regs;
+ unsigned int base;
int plane_cnt;
int i, ret;
@@ -456,33 +457,60 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
list_add_tail(&mixer->engine.list, &drv->engine_list);
- /* Reset the registers */
- for (i = 0x0; i < 0x20000; i += 4)
- regmap_write(mixer->engine.regs, i, 0);
+ base = sun8i_blender_base(mixer);
+
+ /* Reset registers and disable unused sub-engines */
+ if (mixer->cfg->is_de3) {
+ for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
+ regmap_write(mixer->engine.regs, i, 0);
+
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_FCE_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_PEAK_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_LCTI_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_BLS_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_FCC_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_DNS_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_DRC_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
+ regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
+ } else {
+ for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
+ regmap_write(mixer->engine.regs, i, 0);
+
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_FCE_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BWS_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_LTI_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_PEAK_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_ASE_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_FCC_EN, 0);
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
+ }
/* Enable the mixer */
regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
SUN8I_MIXER_GLOBAL_CTL_RT_EN);
/* Set background color to black */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR,
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
SUN8I_MIXER_BLEND_COLOR_BLACK);
/*
* Set fill color of bottom plane to black. Generally not needed
* except when VI plane is at bottom (zpos = 0) and enabled.
*/
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(0),
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
SUN8I_MIXER_BLEND_COLOR_BLACK);
plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
for (i = 0; i < plane_cnt; i++)
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_MODE(i),
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_MODE(base, i),
SUN8I_MIXER_BLEND_MODE_DEF);
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL,
+ regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
return 0;
@@ -585,6 +613,15 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
+ .ccsc = 0,
+ .is_de3 = true,
+ .mod_rate = 600000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -618,6 +655,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun50i-a64-de2-mixer-1",
.data = &sun50i_a64_mixer1_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-h6-de3-mixer-0",
+ .data = &sun50i_h6_mixer0_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 406c42e752d7..913d14ce68b0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -29,24 +29,41 @@
#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
-#define SUN8I_MIXER_BLEND_PIPE_CTL 0x1000
-#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(x) (0x1004 + 0x10 * (x) + 0x0)
-#define SUN8I_MIXER_BLEND_ATTR_INSIZE(x) (0x1004 + 0x10 * (x) + 0x4)
-#define SUN8I_MIXER_BLEND_ATTR_COORD(x) (0x1004 + 0x10 * (x) + 0x8)
-#define SUN8I_MIXER_BLEND_ROUTE 0x1080
-#define SUN8I_MIXER_BLEND_PREMULTIPLY 0x1084
-#define SUN8I_MIXER_BLEND_BKCOLOR 0x1088
-#define SUN8I_MIXER_BLEND_OUTSIZE 0x108c
-#define SUN8I_MIXER_BLEND_MODE(x) (0x1090 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_CK_CTL 0x10b0
-#define SUN8I_MIXER_BLEND_CK_CFG 0x10b4
-#define SUN8I_MIXER_BLEND_CK_MAX(x) (0x10c0 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_CK_MIN(x) (0x10e0 + 0x04 * (x))
-#define SUN8I_MIXER_BLEND_OUTCTL 0x10fc
+#define DE2_MIXER_UNIT_SIZE 0x6000
+#define DE3_MIXER_UNIT_SIZE 0x3000
+
+#define DE2_BLD_BASE 0x1000
+#define DE2_CH_BASE 0x2000
+#define DE2_CH_SIZE 0x1000
+
+#define DE3_BLD_BASE 0x0800
+#define DE3_CH_BASE 0x1000
+#define DE3_CH_SIZE 0x0800
+
+#define SUN8I_MIXER_BLEND_PIPE_CTL(base) ((base) + 0)
+#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, x) ((base) + 0x4 + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ATTR_INSIZE(base, x) ((base) + 0x8 + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ATTR_COORD(base, x) ((base) + 0xc + 0x10 * (x))
+#define SUN8I_MIXER_BLEND_ROUTE(base) ((base) + 0x80)
+#define SUN8I_MIXER_BLEND_PREMULTIPLY(base) ((base) + 0x84)
+#define SUN8I_MIXER_BLEND_BKCOLOR(base) ((base) + 0x88)
+#define SUN8I_MIXER_BLEND_OUTSIZE(base) ((base) + 0x8c)
+#define SUN8I_MIXER_BLEND_MODE(base, x) ((base) + 0x90 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_CK_CTL(base) ((base) + 0xb0)
+#define SUN8I_MIXER_BLEND_CK_CFG(base) ((base) + 0xb4)
+#define SUN8I_MIXER_BLEND_CK_MAX(base, x) ((base) + 0xc0 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_CK_MIN(base, x) ((base) + 0xe0 + 0x04 * (x))
+#define SUN8I_MIXER_BLEND_OUTCTL(base) ((base) + 0xfc)
+#define SUN50I_MIXER_BLEND_CSC_CTL(base) ((base) + 0x100)
+#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \
+ ((base) + 0x110 + (layer) * 0x30 + (x) * 0x10 + 4 * (y))
+#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \
+ ((base) + 0x110 + (layer) * 0x30 + (i) * 0x10 + 0x0c)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8)
#define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe)
#define SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(pipe) BIT(pipe)
+
/* colors are always in AARRGGBB format */
#define SUN8I_MIXER_BLEND_COLOR_BLACK 0xff000000
/* The following numbers are some still unknown magic numbers */
@@ -57,6 +74,9 @@
#define SUN8I_MIXER_BLEND_OUTCTL_INTERLACED BIT(1)
+#define SUN50I_MIXER_BLEND_CSC_CTL_EN(ch) BIT(ch)
+#define SUN50I_MIXER_BLEND_CSC_CONST_VAL(d, c) (((d) << 16) | ((c) & 0xffff))
+
#define SUN8I_MIXER_FBFMT_ARGB8888 0
#define SUN8I_MIXER_FBFMT_ABGR8888 1
#define SUN8I_MIXER_FBFMT_RGBA8888 2
@@ -95,8 +115,8 @@
#define SUN8I_MIXER_FBFMT_YUV411 14
/*
- * These sub-engines are still unknown now, the EN registers are here only to
- * be used to disable these sub-engines.
+ * Sub-engines listed bellow are unused for now. The EN registers are here only
+ * to be used to disable these sub-engines.
*/
#define SUN8I_MIXER_FCE_EN 0xa0000
#define SUN8I_MIXER_BWS_EN 0xa2000
@@ -106,6 +126,17 @@
#define SUN8I_MIXER_FCC_EN 0xaa000
#define SUN8I_MIXER_DCSC_EN 0xb0000
+#define SUN50I_MIXER_FCE_EN 0x70000
+#define SUN50I_MIXER_PEAK_EN 0x70800
+#define SUN50I_MIXER_LCTI_EN 0x71000
+#define SUN50I_MIXER_BLS_EN 0x71800
+#define SUN50I_MIXER_FCC_EN 0x72000
+#define SUN50I_MIXER_DNS_EN 0x80000
+#define SUN50I_MIXER_DRC_EN 0xa0000
+#define SUN50I_MIXER_FMT_EN 0xa8000
+#define SUN50I_MIXER_CDC0_EN 0xd0000
+#define SUN50I_MIXER_CDC1_EN 0xd8000
+
struct de2_fmt_info {
u32 drm_fmt;
u32 de2_fmt;
@@ -127,6 +158,7 @@ struct de2_fmt_info {
* are invalid.
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
+ * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -134,6 +166,7 @@ struct sun8i_mixer_cfg {
int scaler_mask;
int ccsc;
unsigned long mod_rate;
+ unsigned int is_de3 : 1;
};
struct sun8i_mixer {
@@ -153,5 +186,20 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
return container_of(engine, struct sun8i_mixer, engine);
}
+static inline u32
+sun8i_blender_base(struct sun8i_mixer *mixer)
+{
+ return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline u32
+sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
+{
+ if (mixer->cfg->is_de3)
+ return DE3_CH_BASE + channel * DE3_CH_SIZE;
+ else
+ return DE2_CH_BASE + channel * DE2_CH_SIZE;
+}
+
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
#endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 3040a79f298f..fc36e0c10a37 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -9,11 +9,17 @@
#include <linux/component.h>
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include "sun8i_tcon_top.h"
+struct sun8i_tcon_top_quirks {
+ bool has_tcon_tv1;
+ bool has_dsi;
+};
+
static bool sun8i_tcon_top_node_is_tcon_top(struct device_node *node)
{
return !!of_match_node(sun8i_tcon_top_of_table, node);
@@ -121,10 +127,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
+ const struct sun8i_tcon_top_quirks *quirks;
struct resource *res;
void __iomem *regs;
int ret, i;
+ quirks = of_device_get_match_data(&pdev->dev);
+
tcon_top = devm_kzalloc(dev, sizeof(*tcon_top), GFP_KERNEL);
if (!tcon_top)
return -ENOMEM;
@@ -168,6 +177,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
}
/*
+ * At least on H6, some registers have some bits set by default
+ * which may cause issues. Clear them here.
+ */
+ writel(0, regs + TCON_TOP_PORT_SEL_REG);
+ writel(0, regs + TCON_TOP_GATE_SRC_REG);
+
+ /*
* TCON TOP has two muxes, which select parent clock for each TCON TV
* channel clock. Parent could be either TCON TV or TVE clock. For now
* we leave this fixed to TCON TV, since TVE driver for R40 is not yet
@@ -180,15 +196,17 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
&tcon_top->reg_lock,
TCON_TOP_TCON_TV0_GATE, 0);
- clk_data->hws[CLK_TCON_TOP_TV1] =
- sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
- &tcon_top->reg_lock,
- TCON_TOP_TCON_TV1_GATE, 1);
+ if (quirks->has_tcon_tv1)
+ clk_data->hws[CLK_TCON_TOP_TV1] =
+ sun8i_tcon_top_register_gate(dev, "tcon-tv1", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_TV1_GATE, 1);
- clk_data->hws[CLK_TCON_TOP_DSI] =
- sun8i_tcon_top_register_gate(dev, "dsi", regs,
- &tcon_top->reg_lock,
- TCON_TOP_TCON_DSI_GATE, 2);
+ if (quirks->has_dsi)
+ clk_data->hws[CLK_TCON_TOP_DSI] =
+ sun8i_tcon_top_register_gate(dev, "dsi", regs,
+ &tcon_top->reg_lock,
+ TCON_TOP_TCON_DSI_GATE, 2);
for (i = 0; i < CLK_NUM; i++)
if (IS_ERR(clk_data->hws[i])) {
@@ -250,9 +268,25 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
return 0;
}
+const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
+ .has_tcon_tv1 = true,
+ .has_dsi = true,
+};
+
+const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
+ /* Nothing special */
+};
+
/* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = {
- { .compatible = "allwinner,sun8i-r40-tcon-top" },
+ {
+ .compatible = "allwinner,sun8i-r40-tcon-top",
+ .data = &sun8i_r40_tcon_top_quirks
+ },
+ {
+ .compatible = "allwinner,sun50i-h6-tcon-top",
+ .data = &sun50i_h6_tcon_top_quirks
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 28c15c6ef1ef..18534263a05d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
@@ -30,7 +31,10 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
int overlay, bool enable, unsigned int zpos,
unsigned int old_zpos)
{
- u32 val;
+ u32 val, bld_base, ch_base;
+
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
DRM_DEBUG_DRIVER("%sabling channel %d overlay %d\n",
enable ? "En" : "Dis", channel, overlay);
@@ -41,17 +45,17 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN, val);
if (!enable || zpos != old_zpos) {
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
0);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
0);
}
@@ -60,12 +64,13 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ val, val);
val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
val);
}
@@ -77,12 +82,16 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
+ u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n",
channel, overlay);
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
+
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
@@ -103,8 +112,8 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
regmap_write(mixer->engine.regs,
SUN8I_MIXER_GLOBAL_SIZE,
outsize);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_OUTSIZE,
- outsize);
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_BLEND_OUTSIZE(bld_base), outsize);
if (state->crtc)
interlaced = state->crtc->state->adjusted_mode.flags
@@ -116,7 +125,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_OUTCTL,
+ SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED,
val);
@@ -129,10 +138,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->src.x1 >> 16, state->src.y1 >> 16);
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_SIZE(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, overlay),
insize);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_OVL_SIZE(channel),
+ SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch_base),
insize);
if (insize != outsize || hphase || vphase) {
@@ -156,10 +165,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
+ SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
return 0;
@@ -170,7 +179,9 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
const struct de2_fmt_info *fmt_info;
- u32 val;
+ u32 val, ch_base;
+
+ ch_base = sun8i_channel_base(mixer, channel);
fmt_info = sun8i_mixer_format_info(state->fb->format->format);
if (!fmt_info || !fmt_info->rgb) {
@@ -180,7 +191,7 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
return 0;
@@ -193,8 +204,11 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *gem;
dma_addr_t paddr;
+ u32 ch_base;
int bpp;
+ ch_base = sun8i_channel_base(mixer, channel);
+
/* Get the physical address of the buffer in memory */
gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -211,13 +225,13 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_PITCH(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
fb->pitches[0]);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(channel, overlay),
+ SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
lower_32_bits(paddr));
return 0;
@@ -287,6 +301,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
}
static struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_ui_layer_atomic_check,
.atomic_disable = sun8i_ui_layer_atomic_disable,
.atomic_update = sun8i_ui_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
index 123b15ea9918..f4389cf0ba20 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
@@ -18,23 +18,26 @@
#include <drm/drm_plane.h>
-#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0xc)
-#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x10)
-#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x14)
-#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x20 * (layer) + 0x18)
-#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x80)
-#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(ch) (0x2000 + 0x1000 * (ch) + 0x84)
-#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0x88)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_UI_LAYER_SIZE(base, layer) \
+ ((base) + 0x20 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_UI_LAYER_COORD(base, layer) \
+ ((base) + 0x20 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_UI_LAYER_PITCH(base, layer) \
+ ((base) + 0x20 * (layer) + 0xc)
+#define SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x10)
+#define SUN8I_MIXER_CHAN_UI_LAYER_BOT_LADDR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x14)
+#define SUN8I_MIXER_CHAN_UI_LAYER_FCOLOR(base, layer) \
+ ((base) + 0x20 * (layer) + 0x18)
+#define SUN8I_MIXER_CHAN_UI_TOP_HADDR(base) \
+ ((base) + 0x80)
+#define SUN8I_MIXER_CHAN_UI_BOT_HADDR(base) \
+ ((base) + 0x84)
+#define SUN8I_MIXER_CHAN_UI_OVL_SIZE(base) \
+ ((base) + 0x88)
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN BIT(0)
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1)
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index 6bb2aa164c8e..ae0806bccac7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -10,6 +10,7 @@
*/
#include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
static const u32 lan2coefftab16[240] = {
0x00004000, 0x00033ffe, 0x00063efc, 0x000a3bfb,
@@ -88,6 +89,20 @@ static const u32 lan2coefftab16[240] = {
0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301,
};
+static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
+{
+ int vi_num = mixer->cfg->vi_num;
+
+ if (mixer->cfg->is_de3)
+ return DE3_VI_SCALER_UNIT_BASE +
+ DE3_VI_SCALER_UNIT_SIZE * vi_num +
+ DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+ else
+ return DE2_VI_SCALER_UNIT_BASE +
+ DE2_VI_SCALER_UNIT_SIZE * vi_num +
+ DE2_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+}
+
static int sun8i_ui_scaler_coef_index(unsigned int step)
{
unsigned int scale, int_part, float_part;
@@ -114,33 +129,35 @@ static int sun8i_ui_scaler_coef_index(unsigned int step)
void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
{
- int vi_cnt = mixer->cfg->vi_num;
- u32 val;
+ u32 val, base;
- if (WARN_ON(layer < vi_cnt))
+ if (WARN_ON(layer < mixer->cfg->vi_num))
return;
+ base = sun8i_ui_scaler_base(mixer, layer);
+
if (enable)
val = SUN8I_SCALER_GSU_CTRL_EN |
SUN8I_SCALER_GSU_CTRL_COEFF_RDY;
else
val = 0;
- regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_CTRL(vi_cnt, layer - vi_cnt), val);
+ regmap_write(mixer->engine.regs, SUN8I_SCALER_GSU_CTRL(base), val);
}
void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase)
{
- int vi_cnt = mixer->cfg->vi_num;
u32 insize, outsize;
int i, offset;
+ u32 base;
- if (WARN_ON(layer < vi_cnt))
+ if (WARN_ON(layer < mixer->cfg->vi_num))
return;
+ base = sun8i_ui_scaler_base(mixer, layer);
+
hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
hscale <<= SUN8I_UI_SCALER_SCALE_FRAC - 16;
@@ -149,24 +166,22 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
insize = SUN8I_UI_SCALER_SIZE(src_w, src_h);
outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h);
- layer -= vi_cnt;
-
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, layer), outsize);
+ SUN8I_SCALER_GSU_OUTSIZE(base), outsize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_INSIZE(vi_cnt, layer), insize);
+ SUN8I_SCALER_GSU_INSIZE(base), insize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_HSTEP(vi_cnt, layer), hscale);
+ SUN8I_SCALER_GSU_HSTEP(base), hscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_VSTEP(vi_cnt, layer), vscale);
+ SUN8I_SCALER_GSU_VSTEP(base), vscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_HPHASE(vi_cnt, layer), hphase);
+ SUN8I_SCALER_GSU_HPHASE(base), hphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_VPHASE(vi_cnt, layer), vphase);
+ SUN8I_SCALER_GSU_VPHASE(base), vphase);
offset = sun8i_ui_scaler_coef_index(hscale) *
SUN8I_UI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++)
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_GSU_HCOEFF(vi_cnt, layer, i),
+ SUN8I_SCALER_GSU_HCOEFF(base, i),
lan2coefftab16[offset + i]);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
index 86295be8be78..1ef4bd6f2718 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
@@ -11,6 +11,9 @@
#include "sun8i_mixer.h"
+#define DE2_UI_SCALER_UNIT_SIZE 0x10000
+#define DE3_UI_SCALER_UNIT_SIZE 0x08000
+
/* this two macros assumes 16 fractional bits which is standard in DRM */
#define SUN8I_UI_SCALER_SCALE_MIN 1
#define SUN8I_UI_SCALER_SCALE_MAX ((1UL << 20) - 1)
@@ -20,23 +23,14 @@
#define SUN8I_UI_SCALER_COEFF_COUNT 16
#define SUN8I_UI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
-#define SUN8I_SCALER_GSU_CTRL(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x0)
-#define SUN8I_SCALER_GSU_OUTSIZE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x40)
-#define SUN8I_SCALER_GSU_INSIZE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x80)
-#define SUN8I_SCALER_GSU_HSTEP(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x88)
-#define SUN8I_SCALER_GSU_VSTEP(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x8c)
-#define SUN8I_SCALER_GSU_HPHASE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x90)
-#define SUN8I_SCALER_GSU_VPHASE(vi_cnt, ui_idx) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x98)
-#define SUN8I_SCALER_GSU_HCOEFF(vi_cnt, ui_idx, index) \
- (0x20000 + 0x20000 * (vi_cnt) + 0x10000 * (ui_idx) + 0x200 + \
- 0x4 * (index))
+#define SUN8I_SCALER_GSU_CTRL(base) ((base) + 0x0)
+#define SUN8I_SCALER_GSU_OUTSIZE(base) ((base) + 0x40)
+#define SUN8I_SCALER_GSU_INSIZE(base) ((base) + 0x80)
+#define SUN8I_SCALER_GSU_HSTEP(base) ((base) + 0x88)
+#define SUN8I_SCALER_GSU_VSTEP(base) ((base) + 0x8c)
+#define SUN8I_SCALER_GSU_HPHASE(base) ((base) + 0x90)
+#define SUN8I_SCALER_GSU_VPHASE(base) ((base) + 0x98)
+#define SUN8I_SCALER_GSU_HCOEFF(base, index) ((base) + 0x200 + 0x4 * (index))
#define SUN8I_SCALER_GSU_CTRL_EN BIT(0)
#define SUN8I_SCALER_GSU_CTRL_COEFF_RDY BIT(4)
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index f4fe97813f94..87be898f9b7a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drmP.h>
@@ -24,7 +25,10 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
int overlay, bool enable, unsigned int zpos,
unsigned int old_zpos)
{
- u32 val;
+ u32 val, bld_base, ch_base;
+
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
DRM_DEBUG_DRIVER("%sabling VI channel %d overlay %d\n",
enable ? "En" : "Dis", channel, overlay);
@@ -35,17 +39,17 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN, val);
if (!enable || zpos != old_zpos) {
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL,
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
SUN8I_MIXER_BLEND_PIPE_CTL_EN(old_zpos),
0);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(old_zpos),
0);
}
@@ -54,12 +58,13 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
val = SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_PIPE_CTL, val, val);
+ SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ val, val);
val = channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ROUTE,
+ SUN8I_MIXER_BLEND_ROUTE(bld_base),
SUN8I_MIXER_BLEND_ROUTE_PIPE_MSK(zpos),
val);
}
@@ -72,6 +77,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
+ u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
bool subsampled;
@@ -79,6 +85,9 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
channel, overlay);
+ bld_base = sun8i_blender_base(mixer);
+ ch_base = sun8i_channel_base(mixer, channel);
+
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
dst_w = drm_rect_width(&state->dst);
@@ -115,10 +124,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
(state->src.y1 >> 16) & ~(format->vsub - 1));
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_SIZE(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, overlay),
insize);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_OVL_SIZE(channel),
+ SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch_base),
insize);
/*
@@ -149,10 +158,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(zpos),
+ SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(zpos),
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
return 0;
@@ -163,7 +172,9 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
const struct de2_fmt_info *fmt_info;
- u32 val;
+ u32 val, ch_base;
+
+ ch_base = sun8i_channel_base(mixer, channel);
fmt_info = sun8i_mixer_format_info(state->fb->format->format);
if (!fmt_info) {
@@ -173,7 +184,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
@@ -189,9 +200,17 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
val = 0;
regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(channel, overlay),
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
+ /* It seems that YUV formats use global alpha setting. */
+ if (mixer->cfg->is_de3)
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
+ overlay),
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK,
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(0xff));
+
return 0;
}
@@ -204,8 +223,11 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
struct drm_gem_cma_object *gem;
u32 dx, dy, src_x, src_y;
dma_addr_t paddr;
+ u32 ch_base;
int i;
+ ch_base = sun8i_channel_base(mixer, channel);
+
/* Adjust x and y to be dividable by subsampling factor */
src_x = (state->src.x1 >> 16) & ~(format->hsub - 1);
src_y = (state->src.y1 >> 16) & ~(format->vsub - 1);
@@ -235,17 +257,17 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
i + 1, fb->pitches[i]);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_PITCH(channel,
+ SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch_base,
overlay, i),
- fb->pitches[i]);
+ fb->pitches[i]);
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
i + 1, &paddr);
regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(channel,
+ SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
overlay, i),
- lower_32_bits(paddr));
+ lower_32_bits(paddr));
}
return 0;
@@ -315,6 +337,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
}
static struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = sun8i_vi_layer_atomic_check,
.atomic_disable = sun8i_vi_layer_atomic_disable,
.atomic_update = sun8i_vi_layer_atomic_update,
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 6996627a0a76..8a5e6d01c85d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -12,23 +12,26 @@
#include <drm/drm_plane.h>
-#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x0)
-#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x4)
-#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(ch, layer) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x8)
-#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch, layer, plane) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0xc + 4 * (plane))
-#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch, layer, plane) \
- (0x2000 + 0x1000 * (ch) + 0x30 * (layer) + 0x18 + 4 * (plane))
-#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch) (0x2000 + 0x1000 * (ch) + 0xe8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR(base, layer) \
+ ((base) + 0x30 * (layer) + 0x0)
+#define SUN8I_MIXER_CHAN_VI_LAYER_SIZE(base, layer) \
+ ((base) + 0x30 * (layer) + 0x4)
+#define SUN8I_MIXER_CHAN_VI_LAYER_COORD(base, layer) \
+ ((base) + 0x30 * (layer) + 0x8)
+#define SUN8I_MIXER_CHAN_VI_LAYER_PITCH(base, layer, plane) \
+ ((base) + 0x30 * (layer) + 0xc + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(base, layer, plane) \
+ ((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
+#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
+ ((base) + 0xe8)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
/* RGB mode should be set for RGB formats and cleared for YCbCr */
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE BIT(15)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET 8
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
struct sun8i_mixer;
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index d3f1acb234b7..7ba75011adf9 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -833,6 +833,16 @@ static const u32 bicubic4coefftab32[480] = {
0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c,
};
+static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
+{
+ if (mixer->cfg->is_de3)
+ return DE3_VI_SCALER_UNIT_BASE +
+ DE3_VI_SCALER_UNIT_SIZE * channel;
+ else
+ return DE2_VI_SCALER_UNIT_BASE +
+ DE2_VI_SCALER_UNIT_SIZE * channel;
+}
+
static int sun8i_vi_scaler_coef_index(unsigned int step)
{
unsigned int scale, int_part, float_part;
@@ -857,7 +867,7 @@ static int sun8i_vi_scaler_coef_index(unsigned int step)
}
}
-static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
+static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base,
u32 hstep, u32 vstep,
const struct drm_format_info *format)
{
@@ -877,29 +887,31 @@ static void sun8i_vi_scaler_set_coeff(struct regmap *map, int layer,
offset = sun8i_vi_scaler_coef_index(hstep) *
SUN8I_VI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
- regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(base, i),
lan3coefftab32_left[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(base, i),
lan3coefftab32_right[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(base, i),
ch_left[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(base, i),
ch_right[offset + i]);
}
offset = sun8i_vi_scaler_coef_index(hstep) *
SUN8I_VI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_VI_SCALER_COEFF_COUNT; i++) {
- regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(base, i),
lan2coefftab32[offset + i]);
- regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(layer, i),
+ regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(base, i),
cy[offset + i]);
}
}
void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
{
- u32 val;
+ u32 val, base;
+
+ base = sun8i_vi_scaler_base(mixer, layer);
if (enable)
val = SUN8I_SCALER_VSU_CTRL_EN |
@@ -907,7 +919,8 @@ void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
else
val = 0;
- regmap_write(mixer->engine.regs, SUN8I_SCALER_VSU_CTRL(layer), val);
+ regmap_write(mixer->engine.regs,
+ SUN8I_SCALER_VSU_CTRL(base), val);
}
void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
@@ -917,6 +930,9 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
{
u32 chphase, cvphase;
u32 insize, outsize;
+ u32 base;
+
+ base = sun8i_vi_scaler_base(mixer, layer);
hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
@@ -940,32 +956,44 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
+ if (mixer->cfg->is_de3) {
+ u32 val;
+
+ if (format->hsub == 1 && format->vsub == 1)
+ val = SUN50I_SCALER_VSU_SCALE_MODE_UI;
+ else
+ val = SUN50I_SCALER_VSU_SCALE_MODE_NORMAL;
+
+ regmap_write(mixer->engine.regs,
+ SUN50I_SCALER_VSU_SCALE_MODE(base), val);
+ }
+
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_OUTSIZE(layer), outsize);
+ SUN8I_SCALER_VSU_OUTSIZE(base), outsize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YINSIZE(layer), insize);
+ SUN8I_SCALER_VSU_YINSIZE(base), insize);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YHSTEP(layer), hscale);
+ SUN8I_SCALER_VSU_YHSTEP(base), hscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YVSTEP(layer), vscale);
+ SUN8I_SCALER_VSU_YVSTEP(base), vscale);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YHPHASE(layer), hphase);
+ SUN8I_SCALER_VSU_YHPHASE(base), hphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_YVPHASE(layer), vphase);
+ SUN8I_SCALER_VSU_YVPHASE(base), vphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CINSIZE(layer),
+ SUN8I_SCALER_VSU_CINSIZE(base),
SUN8I_VI_SCALER_SIZE(src_w / format->hsub,
src_h / format->vsub));
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CHSTEP(layer),
+ SUN8I_SCALER_VSU_CHSTEP(base),
hscale / format->hsub);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CVSTEP(layer),
+ SUN8I_SCALER_VSU_CVSTEP(base),
vscale / format->vsub);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CHPHASE(layer), chphase);
+ SUN8I_SCALER_VSU_CHPHASE(base), chphase);
regmap_write(mixer->engine.regs,
- SUN8I_SCALER_VSU_CVPHASE(layer), cvphase);
- sun8i_vi_scaler_set_coeff(mixer->engine.regs, layer,
+ SUN8I_SCALER_VSU_CVPHASE(base), cvphase);
+ sun8i_vi_scaler_set_coeff(mixer->engine.regs, base,
hscale, vscale, format);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
index a595ab643a5a..68f6593b369a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
@@ -12,6 +12,12 @@
#include <drm/drm_fourcc.h>
#include "sun8i_mixer.h"
+#define DE2_VI_SCALER_UNIT_BASE 0x20000
+#define DE2_VI_SCALER_UNIT_SIZE 0x20000
+
+#define DE3_VI_SCALER_UNIT_BASE 0x20000
+#define DE3_VI_SCALER_UNIT_SIZE 0x08000
+
/* this two macros assumes 16 fractional bits which is standard in DRM */
#define SUN8I_VI_SCALER_SCALE_MIN 1
#define SUN8I_VI_SCALER_SCALE_MAX ((1UL << 20) - 1)
@@ -21,34 +27,48 @@
#define SUN8I_VI_SCALER_COEFF_COUNT 32
#define SUN8I_VI_SCALER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
-#define SUN8I_SCALER_VSU_CTRL(ch) (0x20000 + 0x20000 * (ch) + 0x0)
-#define SUN8I_SCALER_VSU_OUTSIZE(ch) (0x20000 + 0x20000 * (ch) + 0x40)
-#define SUN8I_SCALER_VSU_YINSIZE(ch) (0x20000 + 0x20000 * (ch) + 0x80)
-#define SUN8I_SCALER_VSU_YHSTEP(ch) (0x20000 + 0x20000 * (ch) + 0x88)
-#define SUN8I_SCALER_VSU_YVSTEP(ch) (0x20000 + 0x20000 * (ch) + 0x8c)
-#define SUN8I_SCALER_VSU_YHPHASE(ch) (0x20000 + 0x20000 * (ch) + 0x90)
-#define SUN8I_SCALER_VSU_YVPHASE(ch) (0x20000 + 0x20000 * (ch) + 0x98)
-#define SUN8I_SCALER_VSU_CINSIZE(ch) (0x20000 + 0x20000 * (ch) + 0xc0)
-#define SUN8I_SCALER_VSU_CHSTEP(ch) (0x20000 + 0x20000 * (ch) + 0xc8)
-#define SUN8I_SCALER_VSU_CVSTEP(ch) (0x20000 + 0x20000 * (ch) + 0xcc)
-#define SUN8I_SCALER_VSU_CHPHASE(ch) (0x20000 + 0x20000 * (ch) + 0xd0)
-#define SUN8I_SCALER_VSU_CVPHASE(ch) (0x20000 + 0x20000 * (ch) + 0xd8)
-#define SUN8I_SCALER_VSU_YHCOEFF0(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x200 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_YHCOEFF1(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x300 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_YVCOEFF(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x400 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CHCOEFF0(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x600 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CHCOEFF1(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x700 + 0x4 * (i))
-#define SUN8I_SCALER_VSU_CVCOEFF(ch, i) \
- (0x20000 + 0x20000 * (ch) + 0x800 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CTRL(base) ((base) + 0x0)
+#define SUN50I_SCALER_VSU_SCALE_MODE(base) ((base) + 0x10)
+#define SUN50I_SCALER_VSU_DIR_THR(base) ((base) + 0x20)
+#define SUN50I_SCALER_VSU_EDGE_THR(base) ((base) + 0x24)
+#define SUN50I_SCALER_VSU_EDSCL_CTRL(base) ((base) + 0x28)
+#define SUN50I_SCALER_VSU_ANGLE_THR(base) ((base) + 0x2c)
+#define SUN8I_SCALER_VSU_OUTSIZE(base) ((base) + 0x40)
+#define SUN8I_SCALER_VSU_YINSIZE(base) ((base) + 0x80)
+#define SUN8I_SCALER_VSU_YHSTEP(base) ((base) + 0x88)
+#define SUN8I_SCALER_VSU_YVSTEP(base) ((base) + 0x8c)
+#define SUN8I_SCALER_VSU_YHPHASE(base) ((base) + 0x90)
+#define SUN8I_SCALER_VSU_YVPHASE(base) ((base) + 0x98)
+#define SUN8I_SCALER_VSU_CINSIZE(base) ((base) + 0xc0)
+#define SUN8I_SCALER_VSU_CHSTEP(base) ((base) + 0xc8)
+#define SUN8I_SCALER_VSU_CVSTEP(base) ((base) + 0xcc)
+#define SUN8I_SCALER_VSU_CHPHASE(base) ((base) + 0xd0)
+#define SUN8I_SCALER_VSU_CVPHASE(base) ((base) + 0xd8)
+#define SUN8I_SCALER_VSU_YHCOEFF0(base, i) ((base) + 0x200 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YHCOEFF1(base, i) ((base) + 0x300 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_YVCOEFF(base, i) ((base) + 0x400 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF0(base, i) ((base) + 0x600 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CHCOEFF1(base, i) ((base) + 0x700 + 0x4 * (i))
+#define SUN8I_SCALER_VSU_CVCOEFF(base, i) ((base) + 0x800 + 0x4 * (i))
#define SUN8I_SCALER_VSU_CTRL_EN BIT(0)
#define SUN8I_SCALER_VSU_CTRL_COEFF_RDY BIT(4)
+#define SUN50I_SCALER_VSU_SUB_ZERO_DIR_THR(x) (((x) << 24) & 0xFF)
+#define SUN50I_SCALER_VSU_ZERO_DIR_THR(x) (((x) << 16) & 0xFF)
+#define SUN50I_SCALER_VSU_HORZ_DIR_THR(x) (((x) << 8) & 0xFF)
+#define SUN50I_SCALER_VSU_VERT_DIR_THR(x) ((x) & 0xFF)
+
+#define SUN50I_SCALER_VSU_SCALE_MODE_UI 0
+#define SUN50I_SCALER_VSU_SCALE_MODE_NORMAL 1
+#define SUN50I_SCALER_VSU_SCALE_MODE_ED_SCALE 2
+
+#define SUN50I_SCALER_VSU_EDGE_SHIFT(x) (((x) << 16) & 0xF)
+#define SUN50I_SCALER_VSU_EDGE_OFFSET(x) ((x) & 0xFF)
+
+#define SUN50I_SCALER_VSU_ANGLE_SHIFT(x) (((x) << 16) & 0xF)
+#define SUN50I_SCALER_VSU_ANGLE_OFFSET(x) ((x) & 0xFF)
+
void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index f80e82e16475..607a6ea17ecc 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1978,6 +1978,23 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static bool tegra_dc_has_window_groups(struct tegra_dc *dc)
+{
+ unsigned int i;
+
+ if (!dc->soc->wgrps)
+ return true;
+
+ for (i = 0; i < dc->soc->num_wgrps; i++) {
+ const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
+
+ if (wgrp->dc == dc->pipe && wgrp->num_windows > 0)
+ return true;
+ }
+
+ return false;
+}
+
static int tegra_dc_init(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
@@ -1993,22 +2010,8 @@ static int tegra_dc_init(struct host1x_client *client)
* assign a primary plane to them, which in turn will cause KMS to
* crash.
*/
- if (dc->soc->wgrps) {
- bool has_wgrps = false;
- unsigned int i;
-
- for (i = 0; i < dc->soc->num_wgrps; i++) {
- const struct tegra_windowgroup_soc *wgrp = &dc->soc->wgrps[i];
-
- if (wgrp->dc == dc->pipe && wgrp->num_windows > 0) {
- has_wgrps = true;
- break;
- }
- }
-
- if (!has_wgrps)
- return 0;
- }
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
dc->syncpt = host1x_syncpt_request(client, flags);
if (!dc->syncpt)
@@ -2094,6 +2097,9 @@ static int tegra_dc_exit(struct host1x_client *client)
struct tegra_dc *dc = host1x_client_to_dc(client);
int err;
+ if (!tegra_dc_has_window_groups(dc))
+ return 0;
+
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 65ea4988b332..4b70ce664c41 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1274,6 +1274,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra194-display", },
{ .compatible = "nvidia,tegra194-dc", },
{ .compatible = "nvidia,tegra194-sor", },
+ { .compatible = "nvidia,tegra194-vic", },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c
index f685e72949d1..352d05feabb0 100644
--- a/drivers/gpu/drm/tegra/falcon.c
+++ b/drivers/gpu/drm/tegra/falcon.c
@@ -141,9 +141,9 @@ int falcon_load_firmware(struct falcon *falcon)
/* allocate iova space for the firmware */
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
&falcon->firmware.paddr);
- if (!falcon->firmware.vaddr) {
- dev_err(falcon->dev, "dma memory mapping failed\n");
- return -ENOMEM;
+ if (IS_ERR(falcon->firmware.vaddr)) {
+ dev_err(falcon->dev, "DMA memory mapping failed\n");
+ return PTR_ERR(falcon->firmware.vaddr);
}
/* copy firmware image into local area. this also ensures endianness */
@@ -197,11 +197,19 @@ void falcon_exit(struct falcon *falcon)
int falcon_boot(struct falcon *falcon)
{
unsigned long offset;
+ u32 value;
int err;
if (!falcon->firmware.vaddr)
return -EINVAL;
+ err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
+ (value & (FALCON_DMACTL_IMEM_SCRUBBING |
+ FALCON_DMACTL_DMEM_SCRUBBING)) == 0,
+ 10, 10000);
+ if (err < 0)
+ return err;
+
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 6112d9042979..922a48d5a483 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -742,7 +742,9 @@ static const struct host1x_client_ops tegra_display_hub_ops = {
static int tegra_display_hub_probe(struct platform_device *pdev)
{
+ struct device_node *child = NULL;
struct tegra_display_hub *hub;
+ struct clk *clk;
unsigned int i;
int err;
@@ -801,6 +803,34 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
return err;
}
+ hub->num_heads = of_get_child_count(pdev->dev.of_node);
+
+ hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
+ GFP_KERNEL);
+ if (!hub->clk_heads)
+ return -ENOMEM;
+
+ for (i = 0; i < hub->num_heads; i++) {
+ child = of_get_next_child(pdev->dev.of_node, child);
+ if (!child) {
+ dev_err(&pdev->dev, "failed to find node for head %u\n",
+ i);
+ return -ENODEV;
+ }
+
+ clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "failed to get clock for head %u\n",
+ i);
+ of_node_put(child);
+ return PTR_ERR(clk);
+ }
+
+ hub->clk_heads[i] = clk;
+ }
+
+ of_node_put(child);
+
/* XXX: enable clock across reset? */
err = reset_control_assert(hub->rst);
if (err < 0)
@@ -840,12 +870,16 @@ static int tegra_display_hub_remove(struct platform_device *pdev)
static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
{
struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ unsigned int i = hub->num_heads;
int err;
err = reset_control_assert(hub->rst);
if (err < 0)
return err;
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
clk_disable_unprepare(hub->clk_hub);
clk_disable_unprepare(hub->clk_dsc);
clk_disable_unprepare(hub->clk_disp);
@@ -856,6 +890,7 @@ static int __maybe_unused tegra_display_hub_suspend(struct device *dev)
static int __maybe_unused tegra_display_hub_resume(struct device *dev)
{
struct tegra_display_hub *hub = dev_get_drvdata(dev);
+ unsigned int i;
int err;
err = clk_prepare_enable(hub->clk_disp);
@@ -870,13 +905,22 @@ static int __maybe_unused tegra_display_hub_resume(struct device *dev)
if (err < 0)
goto disable_dsc;
+ for (i = 0; i < hub->num_heads; i++) {
+ err = clk_prepare_enable(hub->clk_heads[i]);
+ if (err < 0)
+ goto disable_heads;
+ }
+
err = reset_control_deassert(hub->rst);
if (err < 0)
- goto disable_hub;
+ goto disable_heads;
return 0;
-disable_hub:
+disable_heads:
+ while (i--)
+ clk_disable_unprepare(hub->clk_heads[i]);
+
clk_disable_unprepare(hub->clk_hub);
disable_dsc:
clk_disable_unprepare(hub->clk_dsc);
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 6696a85fc1f2..479087c0705a 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -49,6 +49,9 @@ struct tegra_display_hub {
struct clk *clk_hub;
struct reset_control *rst;
+ unsigned int num_heads;
+ struct clk **clk_heads;
+
const struct tegra_display_hub_soc *soc;
struct tegra_windowgroup *wgrps;
};
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index b129da2e5afd..ef8692b7075a 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -19,6 +19,8 @@
#include <soc/tegra/pmc.h>
+#include <sound/hda_verbs.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_panel.h>
@@ -29,14 +31,6 @@
#include "sor.h"
#include "trace.h"
-/*
- * XXX Remove this after the commit adding it to soc/tegra/pmc.h has been
- * merged. Having this around after the commit is merged should be safe since
- * the preprocessor will effectively replace all occurrences and therefore no
- * duplicate will be defined.
- */
-#define TEGRA_IO_PAD_HDMI_DP0 26
-
#define SOR_REKEY 0x38
struct tegra_sor_hdmi_settings {
@@ -407,6 +401,7 @@ struct tegra_sor {
const struct tegra_sor_soc *soc;
void __iomem *regs;
unsigned int index;
+ unsigned int irq;
struct reset_control *rst;
struct clk *clk_parent;
@@ -433,6 +428,11 @@ struct tegra_sor {
struct delayed_work scdc;
bool scdc_enabled;
+
+ struct {
+ unsigned int sample_rate;
+ unsigned int channels;
+ } audio;
};
struct tegra_sor_state {
@@ -2139,6 +2139,144 @@ tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
return 0;
}
+static void tegra_sor_write_eld(struct tegra_sor *sor)
+{
+ size_t length = drm_eld_size(sor->output.connector.eld), i;
+
+ for (i = 0; i < length; i++)
+ tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
+ SOR_AUDIO_HDA_ELD_BUFWR);
+
+ /*
+ * The HDA codec will always report an ELD buffer size of 96 bytes and
+ * the HDA codec driver will check that each byte read from the buffer
+ * is valid. Therefore every byte must be written, even if no 96 bytes
+ * were parsed from EDID.
+ */
+ for (i = length; i < 96; i++)
+ tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
+}
+
+static void tegra_sor_audio_prepare(struct tegra_sor *sor)
+{
+ u32 value;
+
+ tegra_sor_write_eld(sor);
+
+ value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
+ tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
+{
+ tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
+}
+
+static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
+{
+ u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
+ struct hdmi_audio_infoframe frame;
+ u32 value;
+ int err;
+
+ err = hdmi_audio_infoframe_init(&frame);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
+ return err;
+ }
+
+ frame.channels = sor->audio.channels;
+
+ err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
+ return err;
+ }
+
+ tegra_sor_hdmi_write_infopack(sor, buffer, err);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+ value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
+ value |= INFOFRAME_CTRL_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
+
+ return 0;
+}
+
+static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
+{
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
+
+ /* select HDA audio input */
+ value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
+ value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
+
+ /* inject null samples */
+ if (sor->audio.channels != 2)
+ value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+ else
+ value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
+
+ value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
+
+ tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
+
+ /* enable advertising HBR capability */
+ tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
+
+ tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
+
+ value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
+ SOR_HDMI_SPARE_CTS_RESET(1) |
+ SOR_HDMI_SPARE_HW_CTS_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
+
+ /* enable HW CTS */
+ value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
+
+ /* allow packet to be sent */
+ value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
+ tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
+
+ /* reset N counter and enable lookup */
+ value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ value = (24000 * 4096) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
+ tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
+ tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
+ tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
+
+ tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
+ tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
+
+ value = (24000 * 6144) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
+ tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
+
+ value = (24000 * 12288) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
+ tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
+
+ value = (24000 * 24576) / (128 * sor->audio.sample_rate / 1000);
+ tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
+ tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
+
+ value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
+ value &= ~SOR_HDMI_AUDIO_N_RESET;
+ tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
+
+ tegra_sor_hdmi_enable_audio_infoframe(sor);
+}
+
static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
{
u32 value;
@@ -2148,6 +2286,11 @@ static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
+static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
+{
+ tegra_sor_hdmi_disable_audio_infoframe(sor);
+}
+
static struct tegra_sor_hdmi_settings *
tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
{
@@ -2243,6 +2386,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
u32 value;
int err;
+ tegra_sor_audio_unprepare(sor);
tegra_sor_hdmi_scdc_stop(sor);
err = tegra_sor_detach(sor);
@@ -2651,6 +2795,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
tegra_sor_hdmi_scdc_start(sor);
+ tegra_sor_audio_prepare(sor);
}
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
@@ -2666,6 +2811,7 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
+ u32 value;
int err;
if (!sor->aux) {
@@ -2759,6 +2905,15 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
+ /*
+ * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
+ * is used for interoperability between the HDA codec driver and the
+ * HDMI/DP driver.
+ */
+ value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
+ tegra_sor_writel(sor, value, SOR_INT_ENABLE);
+ tegra_sor_writel(sor, value, SOR_INT_MASK);
+
return 0;
}
@@ -2767,6 +2922,9 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
+ tegra_sor_writel(sor, 0, SOR_INT_MASK);
+ tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
+
tegra_output_exit(&sor->output);
if (sor->aux) {
@@ -3037,6 +3195,54 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
return 0;
}
+static void tegra_hda_parse_format(unsigned int format, unsigned int *rate,
+ unsigned int *channels)
+{
+ unsigned int mul, div;
+
+ if (format & AC_FMT_BASE_44K)
+ *rate = 44100;
+ else
+ *rate = 48000;
+
+ mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
+ div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
+
+ *rate = *rate * (mul + 1) / (div + 1);
+
+ *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
+}
+
+static irqreturn_t tegra_sor_irq(int irq, void *data)
+{
+ struct tegra_sor *sor = data;
+ u32 value;
+
+ value = tegra_sor_readl(sor, SOR_INT_STATUS);
+ tegra_sor_writel(sor, value, SOR_INT_STATUS);
+
+ if (value & SOR_INT_CODEC_SCRATCH0) {
+ value = tegra_sor_readl(sor, SOR_AUDIO_HDA_CODEC_SCRATCH0);
+
+ if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
+ unsigned int format, sample_rate, channels;
+
+ format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
+
+ tegra_hda_parse_format(format, &sample_rate, &channels);
+
+ sor->audio.sample_rate = sample_rate;
+ sor->audio.channels = channels;
+
+ tegra_sor_hdmi_audio_enable(sor);
+ } else {
+ tegra_sor_hdmi_audio_disable(sor);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static int tegra_sor_probe(struct platform_device *pdev)
{
struct device_node *np;
@@ -3119,14 +3325,38 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- if (!pdev->dev.pm_domain) {
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
- if (IS_ERR(sor->rst)) {
- err = PTR_ERR(sor->rst);
+ err = platform_get_irq(pdev, 0);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->irq = err;
+
+ err = devm_request_irq(sor->dev, sor->irq, tegra_sor_irq, 0,
+ dev_name(sor->dev), sor);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ goto remove;
+ }
+
+ sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ if (IS_ERR(sor->rst)) {
+ err = PTR_ERR(sor->rst);
+
+ if (err != -EBUSY || WARN_ON(!pdev->dev.pm_domain)) {
dev_err(&pdev->dev, "failed to get reset control: %d\n",
err);
goto remove;
}
+
+ /*
+ * At this point, the reset control is most likely being used
+ * by the generic power domain implementation. With any luck
+ * the power domain will have taken care of resetting the SOR
+ * and we don't have to do anything.
+ */
+ sor->rst = NULL;
}
sor->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index fb0854d92a27..13f7e68bec42 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -364,12 +364,28 @@
#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
+#define SOR_HDMI_ACR_CTRL 0xb1
+
+#define SOR_HDMI_ACR_0320_SUBPACK_LOW 0xb2
+#define SOR_HDMI_ACR_SUBPACK_LOW_SB1(x) (((x) & 0xff) << 24)
+
+#define SOR_HDMI_ACR_0320_SUBPACK_HIGH 0xb3
+#define SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE (1 << 31)
+
+#define SOR_HDMI_ACR_0441_SUBPACK_LOW 0xb4
+#define SOR_HDMI_ACR_0441_SUBPACK_HIGH 0xb5
+
#define SOR_HDMI_CTRL 0xc0
#define SOR_HDMI_CTRL_ENABLE (1 << 30)
#define SOR_HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
#define SOR_HDMI_CTRL_AUDIO_LAYOUT (1 << 10)
#define SOR_HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
+#define SOR_HDMI_SPARE 0xcb
+#define SOR_HDMI_SPARE_ACR_PRIORITY_HIGH (1 << 31)
+#define SOR_HDMI_SPARE_CTS_RESET(x) (((x) & 0x7) << 16)
+#define SOR_HDMI_SPARE_HW_CTS_ENABLE (1 << 0)
+
#define SOR_REFCLK 0xe6
#define SOR_REFCLK_DIV_INT(x) ((((x) >> 2) & 0xff) << 8)
#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x3) << 6)
@@ -378,10 +394,62 @@
#define SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED (1 << 1)
#define SOR_INPUT_CONTROL_HDMI_SRC_SELECT(x) (((x) & 0x1) << 0)
+#define SOR_AUDIO_CNTRL 0xfc
+#define SOR_AUDIO_CNTRL_INJECT_NULLSMPL (1 << 29)
+#define SOR_AUDIO_CNTRL_SOURCE_SELECT(x) (((x) & 0x3) << 20)
+#define SOURCE_SELECT_MASK 0x3
+#define SOURCE_SELECT_HDA 0x2
+#define SOURCE_SELECT_SPDIF 0x1
+#define SOURCE_SELECT_AUTO 0x0
+#define SOR_AUDIO_CNTRL_AFIFO_FLUSH (1 << 12)
+
+#define SOR_AUDIO_SPARE 0xfe
+#define SOR_AUDIO_SPARE_HBR_ENABLE (1 << 27)
+
+#define SOR_AUDIO_NVAL_0320 0xff
+#define SOR_AUDIO_NVAL_0441 0x100
+#define SOR_AUDIO_NVAL_0882 0x101
+#define SOR_AUDIO_NVAL_1764 0x102
+#define SOR_AUDIO_NVAL_0480 0x103
+#define SOR_AUDIO_NVAL_0960 0x104
+#define SOR_AUDIO_NVAL_1920 0x105
+
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0 0x10a
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
+#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
+
+#define SOR_AUDIO_HDA_ELD_BUFWR 0x10c
+#define SOR_AUDIO_HDA_ELD_BUFWR_INDEX(x) (((x) & 0xff) << 8)
+#define SOR_AUDIO_HDA_ELD_BUFWR_DATA(x) (((x) & 0xff) << 0)
+
+#define SOR_AUDIO_HDA_PRESENSE 0x10d
+#define SOR_AUDIO_HDA_PRESENSE_ELDV (1 << 1)
+#define SOR_AUDIO_HDA_PRESENSE_PD (1 << 0)
+
+#define SOR_AUDIO_AVAL_0320 0x10f
+#define SOR_AUDIO_AVAL_0441 0x110
+#define SOR_AUDIO_AVAL_0882 0x111
+#define SOR_AUDIO_AVAL_1764 0x112
+#define SOR_AUDIO_AVAL_0480 0x113
+#define SOR_AUDIO_AVAL_0960 0x114
+#define SOR_AUDIO_AVAL_1920 0x115
+
+#define SOR_INT_STATUS 0x11c
+#define SOR_INT_CODEC_CP_REQUEST (1 << 2)
+#define SOR_INT_CODEC_SCRATCH1 (1 << 1)
+#define SOR_INT_CODEC_SCRATCH0 (1 << 0)
+
+#define SOR_INT_MASK 0x11d
+#define SOR_INT_ENABLE 0x11e
+
#define SOR_HDMI_VSI_INFOFRAME_CTRL 0x123
#define SOR_HDMI_VSI_INFOFRAME_STATUS 0x124
#define SOR_HDMI_VSI_INFOFRAME_HEADER 0x125
+#define SOR_HDMI_AUDIO_N 0x13c
+#define SOR_HDMI_AUDIO_N_LOOKUP (1 << 28)
+#define SOR_HDMI_AUDIO_N_RESET (1 << 20)
+
#define SOR_HDMI2_CTRL 0x13e
#define SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4 (1 << 1)
#define SOR_HDMI2_CTRL_SCRAMBLE (1 << 0)
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 9f657a63b0bb..d47983deb1cf 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -38,6 +38,7 @@ struct vic {
struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
+ struct reset_control *rst;
/* Platform configuration */
const struct vic_config *config;
@@ -56,13 +57,37 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
static int vic_runtime_resume(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(vic->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(10, 20);
+
+ err = reset_control_deassert(vic->rst);
+ if (err < 0)
+ goto disable;
+
+ usleep_range(10, 20);
+
+ return 0;
- return clk_prepare_enable(vic->clk);
+disable:
+ clk_disable_unprepare(vic->clk);
+ return err;
}
static int vic_runtime_suspend(struct device *dev)
{
struct vic *vic = dev_get_drvdata(dev);
+ int err;
+
+ err = reset_control_assert(vic->rst);
+ if (err < 0)
+ return err;
+
+ usleep_range(2000, 4000);
clk_disable_unprepare(vic->clk);
@@ -282,10 +307,18 @@ static const struct vic_config vic_t186_config = {
.version = 0x18,
};
+#define NVIDIA_TEGRA_194_VIC_FIRMWARE "nvidia/tegra194/vic.bin"
+
+static const struct vic_config vic_t194_config = {
+ .firmware = NVIDIA_TEGRA_194_VIC_FIRMWARE,
+ .version = 0x19,
+};
+
static const struct of_device_id vic_match[] = {
{ .compatible = "nvidia,tegra124-vic", .data = &vic_t124_config },
{ .compatible = "nvidia,tegra210-vic", .data = &vic_t210_config },
{ .compatible = "nvidia,tegra186-vic", .data = &vic_t186_config },
+ { .compatible = "nvidia,tegra194-vic", .data = &vic_t194_config },
{ },
};
@@ -323,6 +356,14 @@ static int vic_probe(struct platform_device *pdev)
return PTR_ERR(vic->clk);
}
+ if (!dev->pm_domain) {
+ vic->rst = devm_reset_control_get(dev, "vic");
+ if (IS_ERR(vic->rst)) {
+ dev_err(&pdev->dev, "failed to get reset\n");
+ return PTR_ERR(vic->rst);
+ }
+ }
+
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
vic->falcon.ops = &vic_falcon_ops;
@@ -418,3 +459,6 @@ MODULE_FIRMWARE(NVIDIA_TEGRA_210_VIC_FIRMWARE);
#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE(NVIDIA_TEGRA_186_VIC_FIRMWARE);
#endif
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_194_VIC_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 33e533268488..3dac08b24140 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -140,7 +140,6 @@ static int tilcdc_commit(struct drm_device *dev,
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = tilcdc_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = tilcdc_atomic_check,
.atomic_commit = tilcdc_commit,
};
@@ -191,9 +190,6 @@ static void tilcdc_fini(struct drm_device *dev)
drm_dev_unregister(dev);
drm_kms_helper_poll_fini(dev);
-
- drm_fb_cma_fbdev_fini(dev);
-
drm_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
tilcdc_remove_external_device(dev);
@@ -396,16 +392,14 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
drm_mode_config_reset(ddev);
- ret = drm_fb_cma_fbdev_init(ddev, bpp, 0);
- if (ret)
- goto init_failed;
-
drm_kms_helper_poll_init(ddev);
ret = drm_dev_register(ddev, 0);
if (ret)
goto init_failed;
+ drm_fbdev_generic_setup(ddev, bpp);
+
priv->is_registered = true;
return 0;
@@ -519,7 +513,6 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
.driver_features = (DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET |
DRIVER_PRIME | DRIVER_ATOMIC),
- .lastclose = drm_fb_helper_lastclose,
.irq_handler = tilcdc_irq,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_print_info = drm_gem_cma_print_info,
diff --git a/drivers/gpu/drm/tinydrm/Kconfig b/drivers/gpu/drm/tinydrm/Kconfig
index 16f4b5c91f1b..2c408ac1a900 100644
--- a/drivers/gpu/drm/tinydrm/Kconfig
+++ b/drivers/gpu/drm/tinydrm/Kconfig
@@ -10,6 +10,17 @@ menuconfig DRM_TINYDRM
config TINYDRM_MIPI_DBI
tristate
+config TINYDRM_HX8357D
+ tristate "DRM support for HX8357D display panels"
+ depends on DRM_TINYDRM && SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select TINYDRM_MIPI_DBI
+ help
+ DRM driver for the following HX8357D panels:
+ * YX350HV15-T 3.5" 340x350 TFT (Adafruit 3.5")
+
+ If M is selected the module will be called hx8357d.
+
config TINYDRM_ILI9225
tristate "DRM support for ILI9225 display panels"
depends on DRM_TINYDRM && SPI
diff --git a/drivers/gpu/drm/tinydrm/Makefile b/drivers/gpu/drm/tinydrm/Makefile
index 14d99080665a..f823066f7743 100644
--- a/drivers/gpu/drm/tinydrm/Makefile
+++ b/drivers/gpu/drm/tinydrm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_TINYDRM) += core/
obj-$(CONFIG_TINYDRM_MIPI_DBI) += mipi-dbi.o
# Displays
+obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 255341ee4eb9..01a6f2d42440 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -36,77 +36,6 @@
* and registers the DRM device using devm_tinydrm_register().
*/
-/**
- * tinydrm_gem_cma_prime_import_sg_table - Produce a CMA GEM object from
- * another driver's scatter/gather table of pinned pages
- * @drm: DRM device to import into
- * @attach: DMA-BUF attachment
- * @sgt: Scatter/gather table of pinned pages
- *
- * This function imports a scatter/gather table exported via DMA-BUF by
- * another driver using drm_gem_cma_prime_import_sg_table(). It sets the
- * kernel virtual address on the CMA object. Drivers should use this as their
- * &drm_driver->gem_prime_import_sg_table callback if they need the virtual
- * address. tinydrm_gem_cma_free_object() should be used in combination with
- * this function.
- *
- * Returns:
- * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
- * error code on failure.
- */
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt)
-{
- struct drm_gem_cma_object *cma_obj;
- struct drm_gem_object *obj;
- void *vaddr;
-
- vaddr = dma_buf_vmap(attach->dmabuf);
- if (!vaddr) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(-ENOMEM);
- }
-
- obj = drm_gem_cma_prime_import_sg_table(drm, attach, sgt);
- if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, vaddr);
- return obj;
- }
-
- cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = vaddr;
-
- return obj;
-}
-EXPORT_SYMBOL(tinydrm_gem_cma_prime_import_sg_table);
-
-/**
- * tinydrm_gem_cma_free_object - Free resources associated with a CMA GEM
- * object
- * @gem_obj: GEM object to free
- *
- * This function frees the backing memory of the CMA GEM object, cleans up the
- * GEM object state and frees the memory used to store the object itself using
- * drm_gem_cma_free_object(). It also handles PRIME buffers which has the kernel
- * virtual address set by tinydrm_gem_cma_prime_import_sg_table(). Drivers
- * can use this as their &drm_driver->gem_free_object_unlocked callback.
- */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj)
-{
- if (gem_obj->import_attach) {
- struct drm_gem_cma_object *cma_obj;
-
- cma_obj = to_drm_gem_cma_obj(gem_obj);
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
- cma_obj->vaddr = NULL;
- }
-
- drm_gem_cma_free_object(gem_obj);
-}
-EXPORT_SYMBOL_GPL(tinydrm_gem_cma_free_object);
-
static struct drm_framebuffer *
tinydrm_fb_create(struct drm_device *drm, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -146,6 +75,7 @@ static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
drm->dev_private = tdev;
drm_mode_config_init(drm);
drm->mode_config.funcs = &tinydrm_mode_config_funcs;
+ drm->mode_config.allow_fb_modifiers = true;
return 0;
}
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index dcd390163a4a..bf6bfbc5d412 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -9,12 +9,18 @@
#include <linux/backlight.h>
#include <linux/dma-buf.h>
+#include <linux/module.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/swab.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
static unsigned int spi_max;
module_param(spi_max, uint, 0400);
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index 7e8e24d0b7a7..eacfc0ec8ff1 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -184,6 +184,10 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
struct drm_display_mode mode_copy;
struct drm_connector *connector;
int ret;
+ static const uint64_t modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+ };
drm_mode_copy(&mode_copy, mode);
ret = tinydrm_rotate_mode(&mode_copy, rotation);
@@ -202,6 +206,6 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
return PTR_ERR(connector);
return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
- format_count, NULL, connector);
+ format_count, modifiers, connector);
}
EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
new file mode 100644
index 000000000000..81a2bbeb25d4
--- /dev/null
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for the HX8357D LCD controller
+ *
+ * Copyright 2018 Broadcom
+ * Copyright 2018 David Lechner <david@lechnology.com>
+ * Copyright 2016 Noralf Trønnes
+ * Copyright (C) 2015 Adafruit Industries
+ * Copyright (C) 2013 Christian Vogelgsang
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <video/mipi_display.h>
+
+#define HX8357D_SETOSC 0xb0
+#define HX8357D_SETPOWER 0xb1
+#define HX8357D_SETRGB 0xb3
+#define HX8357D_SETCYC 0xb3
+#define HX8357D_SETCOM 0xb6
+#define HX8357D_SETEXTC 0xb9
+#define HX8357D_SETSTBA 0xc0
+#define HX8357D_SETPANEL 0xcc
+#define HX8357D_SETGAMMA 0xe0
+
+#define HX8357D_MADCTL_MY 0x80
+#define HX8357D_MADCTL_MX 0x40
+#define HX8357D_MADCTL_MV 0x20
+#define HX8357D_MADCTL_ML 0x10
+#define HX8357D_MADCTL_RGB 0x00
+#define HX8357D_MADCTL_BGR 0x08
+#define HX8357D_MADCTL_MH 0x04
+
+static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
+ struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ u8 addr_mode;
+ int ret;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(mipi);
+ if (ret < 0)
+ return;
+ if (ret == 1)
+ goto out_enable;
+
+ /* setextc */
+ mipi_dbi_command(mipi, HX8357D_SETEXTC, 0xFF, 0x83, 0x57);
+ msleep(150);
+
+ /* setRGB which also enables SDO */
+ mipi_dbi_command(mipi, HX8357D_SETRGB, 0x00, 0x00, 0x06, 0x06);
+
+ /* -1.52V */
+ mipi_dbi_command(mipi, HX8357D_SETCOM, 0x25);
+
+ /* Normal mode 70Hz, Idle mode 55 Hz */
+ mipi_dbi_command(mipi, HX8357D_SETOSC, 0x68);
+
+ /* Set Panel - BGR, Gate direction swapped */
+ mipi_dbi_command(mipi, HX8357D_SETPANEL, 0x05);
+
+ mipi_dbi_command(mipi, HX8357D_SETPOWER,
+ 0x00, /* Not deep standby */
+ 0x15, /* BT */
+ 0x1C, /* VSPR */
+ 0x1C, /* VSNR */
+ 0x83, /* AP */
+ 0xAA); /* FS */
+
+ mipi_dbi_command(mipi, HX8357D_SETSTBA,
+ 0x50, /* OPON normal */
+ 0x50, /* OPON idle */
+ 0x01, /* STBA */
+ 0x3C, /* STBA */
+ 0x1E, /* STBA */
+ 0x08); /* GEN */
+
+ mipi_dbi_command(mipi, HX8357D_SETCYC,
+ 0x02, /* NW 0x02 */
+ 0x40, /* RTN */
+ 0x00, /* DIV */
+ 0x2A, /* DUM */
+ 0x2A, /* DUM */
+ 0x0D, /* GDON */
+ 0x78); /* GDOFF */
+
+ mipi_dbi_command(mipi, HX8357D_SETGAMMA,
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x02,
+ 0x0A,
+ 0x11,
+ 0x1d,
+ 0x23,
+ 0x35,
+ 0x41,
+ 0x4b,
+ 0x4b,
+ 0x42,
+ 0x3A,
+ 0x27,
+ 0x1B,
+ 0x08,
+ 0x09,
+ 0x03,
+ 0x00,
+ 0x01);
+
+ /* 16 bit */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_PIXEL_FORMAT,
+ MIPI_DCS_PIXEL_FMT_16BIT);
+
+ /* TE off */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_ON, 0x00);
+
+ /* tear line */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_TEAR_SCANLINE, 0x00, 0x02);
+
+ /* Exit Sleep */
+ mipi_dbi_command(mipi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(150);
+
+ /* display on */
+ mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+ usleep_range(5000, 7000);
+
+out_enable:
+ switch (mipi->rotation) {
+ default:
+ addr_mode = HX8357D_MADCTL_MX | HX8357D_MADCTL_MY;
+ break;
+ case 90:
+ addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = 0;
+ break;
+ case 270:
+ addr_mode = HX8357D_MADCTL_MV | HX8357D_MADCTL_MX;
+ break;
+ }
+ mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+}
+
+static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
+ .enable = yx240qv29_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = tinydrm_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode yx350hv15_mode = {
+ TINYDRM_MODE(320, 480, 60, 75),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
+
+static struct drm_driver hx8357d_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+ .fops = &hx8357d_fops,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "hx8357d",
+ .desc = "HX8357D",
+ .date = "20181023",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id hx8357d_of_match[] = {
+ { .compatible = "adafruit,yx350hv15" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hx8357d_of_match);
+
+static const struct spi_device_id hx8357d_id[] = {
+ { "yx350hv15", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, hx8357d_id);
+
+static int hx8357d_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi *mipi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ if (!mipi)
+ return -ENOMEM;
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ mipi->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(mipi->backlight))
+ return PTR_ERR(mipi->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, mipi, dc);
+ if (ret)
+ return ret;
+
+ ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs,
+ &hx8357d_driver, &yx350hv15_mode, rotation);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, mipi);
+
+ return devm_tinydrm_register(&mipi->tinydrm);
+}
+
+static void hx8357d_shutdown(struct spi_device *spi)
+{
+ struct mipi_dbi *mipi = spi_get_drvdata(spi);
+
+ tinydrm_shutdown(&mipi->tinydrm);
+}
+
+static struct spi_driver hx8357d_spi_driver = {
+ .driver = {
+ .name = "hx8357d",
+ .of_match_table = hx8357d_of_match,
+ },
+ .id_table = hx8357d_id,
+ .probe = hx8357d_probe,
+ .shutdown = hx8357d_shutdown,
+};
+module_spi_driver(hx8357d_spi_driver);
+
+MODULE_DESCRIPTION("HX8357D DRM driver");
+MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 455fefe012f5..78f7c2d1b449 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,7 +20,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -367,7 +368,7 @@ static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &ili9225_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 6701037749a7..51395bdc6ca2 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,7 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -144,7 +144,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index d7bb4c5e6657..3fa62e77c30b 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,9 +17,9 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_modeset_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#include <video/mipi_display.h>
@@ -153,7 +153,7 @@ static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index cb3441e51d5f..3a05e56f9b0d 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -9,15 +9,19 @@
* (at your option) any later version.
*/
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/tinydrm/mipi-dbi.h>
-#include <drm/tinydrm/tinydrm-helpers.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/tinydrm/mipi-dbi.h>
+#include <drm/tinydrm/tinydrm-helpers.h>
+#include <uapi/drm/drm.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -240,10 +244,10 @@ static int mipi_dbi_fb_dirty(struct drm_framebuffer *fb,
mipi_dbi_command(mipi, MIPI_DCS_SET_COLUMN_ADDRESS,
(clip.x1 >> 8) & 0xFF, clip.x1 & 0xFF,
- (clip.x2 >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
+ ((clip.x2 - 1) >> 8) & 0xFF, (clip.x2 - 1) & 0xFF);
mipi_dbi_command(mipi, MIPI_DCS_SET_PAGE_ADDRESS,
(clip.y1 >> 8) & 0xFF, clip.y1 & 0xFF,
- (clip.y2 >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
+ ((clip.y2 - 1) >> 8) & 0xFF, (clip.y2 - 1) & 0xFF);
ret = mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START, tr,
(clip.x2 - clip.x1) * (clip.y2 - clip.y1) * 2);
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index 50a1d4216ce7..54d6fe0f37ce 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,6 +26,8 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/tinydrm.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -106,12 +108,11 @@ static int repaper_spi_transfer(struct spi_device *spi, u8 header,
/* Stack allocated tx? */
if (tx && len <= 32) {
- txbuf = kmalloc(len, GFP_KERNEL);
+ txbuf = kmemdup(tx, len, GFP_KERNEL);
if (!txbuf) {
ret = -ENOMEM;
goto out_free;
}
- memcpy(txbuf, tx, len);
}
if (rx) {
@@ -882,7 +883,7 @@ static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &repaper_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 2fcbc3067d71..a6a8a1081b73 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,7 +17,8 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -303,7 +304,7 @@ static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7586_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3081bc57c116..b39779e0dcd8 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,7 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
#include <drm/tinydrm/tinydrm-helpers.h>
@@ -119,7 +119,7 @@ static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7735r_fops,
- TINYDRM_GEM_DRIVER_OPS,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 26b889f86670..d87935bf8e30 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,6 +45,14 @@
static void ttm_bo_global_kobj_release(struct kobject *kobj);
+/**
+ * ttm_global_mutex - protecting the global BO state
+ */
+DEFINE_MUTEX(ttm_global_mutex);
+struct ttm_bo_global ttm_bo_glob = {
+ .use_count = 0
+};
+
static struct attribute ttm_bo_count = {
.name = "bo_count",
.mode = S_IRUGO
@@ -872,7 +880,7 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (fence) {
reservation_object_add_shared_fence(bo->resv, fence);
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret))
return ret;
@@ -977,7 +985,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
bool has_erestartsys = false;
int i, ret;
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (unlikely(ret))
return ret;
@@ -1519,35 +1527,45 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
container_of(kobj, struct ttm_bo_global, kobj);
__free_page(glob->dummy_read_page);
- kfree(glob);
}
-void ttm_bo_global_release(struct drm_global_reference *ref)
+static void ttm_bo_global_release(void)
{
- struct ttm_bo_global *glob = ref->object;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+
+ mutex_lock(&ttm_global_mutex);
+ if (--glob->use_count > 0)
+ goto out;
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
+ ttm_mem_global_release(&ttm_mem_glob);
+out:
+ mutex_unlock(&ttm_global_mutex);
}
-EXPORT_SYMBOL(ttm_bo_global_release);
-int ttm_bo_global_init(struct drm_global_reference *ref)
+static int ttm_bo_global_init(void)
{
- struct ttm_bo_global_ref *bo_ref =
- container_of(ref, struct ttm_bo_global_ref, ref);
- struct ttm_bo_global *glob = ref->object;
- int ret;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ int ret = 0;
unsigned i;
- mutex_init(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
+ if (++glob->use_count > 1)
+ goto out;
+
+ ret = ttm_mem_global_init(&ttm_mem_glob);
+ if (ret)
+ goto out;
+
spin_lock_init(&glob->lru_lock);
- glob->mem_glob = bo_ref->mem_glob;
+ glob->mem_glob = &ttm_mem_glob;
glob->mem_glob->bo_glob = glob;
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM;
- goto out_no_drp;
+ goto out;
}
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
@@ -1559,13 +1577,10 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
&glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
- return ret;
-out_no_drp:
- kfree(glob);
+out:
+ mutex_unlock(&ttm_global_mutex);
return ret;
}
-EXPORT_SYMBOL(ttm_bo_global_init);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
@@ -1587,9 +1602,9 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
}
}
- mutex_lock(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ mutex_unlock(&ttm_global_mutex);
cancel_delayed_work_sync(&bdev->wq);
@@ -1604,18 +1619,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
drm_vma_offset_manager_destroy(&bdev->vma_manager);
+ if (!ret)
+ ttm_bo_global_release();
+
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
- int ret = -EINVAL;
+ struct ttm_bo_global *glob = &ttm_bo_glob;
+ int ret;
+
+ ret = ttm_bo_global_init();
+ if (ret)
+ return ret;
bdev->driver = driver;
@@ -1636,12 +1658,13 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
- mutex_lock(&glob->device_list_mutex);
+ mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
- mutex_unlock(&glob->device_list_mutex);
+ mutex_unlock(&ttm_global_mutex);
return 0;
out_no_sys:
+ ttm_bo_global_release();
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index ba80150d1052..895d77d799e4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (!fbo)
return -ENOMEM;
- ttm_bo_get(bo);
fbo->base = *bo;
+ fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+ ttm_bo_get(bo);
fbo->bo = bo;
/**
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index e73ae0d22897..93860346c426 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -126,10 +126,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
if (!ret) {
- if (!entry->shared)
+ if (!entry->num_shared)
continue;
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (!ret)
continue;
}
@@ -150,8 +151,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
}
}
- if (!ret && entry->shared)
- ret = reservation_object_reserve_shared(bo->resv);
+ if (!ret && entry->num_shared)
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
if (unlikely(ret != 0)) {
if (ret == -EINTR)
@@ -187,21 +189,19 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
struct ttm_bo_device *bdev;
- struct ttm_bo_driver *driver;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
bdev = bo->bdev;
- driver = bdev->driver;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
list_for_each_entry(entry, list, head) {
bo = entry->bo;
- if (entry->shared)
+ if (entry->num_shared)
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 450387c92b63..f1567c353b54 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -41,6 +41,9 @@
#define TTM_MEMORY_ALLOC_RETRIES 4
+struct ttm_mem_global ttm_mem_glob;
+EXPORT_SYMBOL(ttm_mem_glob);
+
struct ttm_mem_zone {
struct kobject kobj;
struct ttm_mem_global *glob;
@@ -216,14 +219,6 @@ static ssize_t ttm_mem_global_store(struct kobject *kobj,
return size;
}
-static void ttm_mem_global_kobj_release(struct kobject *kobj)
-{
- struct ttm_mem_global *glob =
- container_of(kobj, struct ttm_mem_global, kobj);
-
- kfree(glob);
-}
-
static struct attribute *ttm_mem_global_attrs[] = {
&ttm_mem_global_lower_mem_limit,
NULL
@@ -235,7 +230,6 @@ static const struct sysfs_ops ttm_mem_global_ops = {
};
static struct kobj_type ttm_mem_glob_kobj_type = {
- .release = &ttm_mem_global_kobj_release,
.sysfs_ops = &ttm_mem_global_ops,
.default_attrs = ttm_mem_global_attrs,
};
@@ -464,7 +458,6 @@ out_no_zone:
ttm_mem_global_release(glob);
return ret;
}
-EXPORT_SYMBOL(ttm_mem_global_init);
void ttm_mem_global_release(struct ttm_mem_global *glob)
{
@@ -486,7 +479,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
kobject_del(&glob->kobj);
kobject_put(&glob->kobj);
}
-EXPORT_SYMBOL(ttm_mem_global_release);
static void ttm_check_swapping(struct ttm_mem_global *glob)
{
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 72efcecb44f7..28e2d03c0ccf 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -249,7 +249,7 @@ static int tve200_probe(struct platform_device *pdev)
clk_disable:
clk_disable_unprepare(priv->pclk);
dev_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -263,7 +263,7 @@ static int tve200_remove(struct platform_device *pdev)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
clk_disable_unprepare(priv->pclk);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f455f095a146..1b014d92855b 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err;
- ret = drm_vblank_init(dev, 1);
- if (ret)
- goto err_fb;
-
drm_kms_helper_poll_init(dev);
return 0;
-err_fb:
- udl_fbdev_cleanup(dev);
+
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 54d96518a131..a08766d39eab 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
bo->resv = attach->dmabuf->resv;
bo->sgt = sgt;
+ obj->import_attach = attach;
v3d_bo_get_pages(bo);
v3d_mmu_insert_ptes(bo);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 4db62c545748..eb2b2d2f8553 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
V3D_READ(v3d_hub_reg_defs[i].reg));
}
- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ if (v3d->ver < 41) {
+ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ v3d_gca_reg_defs[i].name,
+ v3d_gca_reg_defs[i].reg,
+ V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ }
}
for (core = 0; core < v3d->cores; core++) {
@@ -176,9 +179,44 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
return 0;
}
+static int v3d_measure_clock(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ uint32_t cycles;
+ int core = 0;
+ int measure_ms = 1000;
+
+ if (v3d->ver >= 40) {
+ V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
+ V3D_SET_FIELD(V3D_PCTR_CYCLE_COUNT,
+ V3D_PCTR_S0));
+ V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1);
+ V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1);
+ } else {
+ V3D_CORE_WRITE(core, V3D_V3_PCTR_0_PCTRS0,
+ V3D_PCTR_CYCLE_COUNT);
+ V3D_CORE_WRITE(core, V3D_V3_PCTR_0_CLR, 1);
+ V3D_CORE_WRITE(core, V3D_V3_PCTR_0_EN,
+ V3D_V3_PCTR_0_EN_ENABLE |
+ 1);
+ }
+ msleep(measure_ms);
+ cycles = V3D_CORE_READ(core, V3D_PCTR_0_PCTR0);
+
+ seq_printf(m, "cycles: %d (%d.%d Mhz)\n",
+ cycles,
+ cycles / (measure_ms * 1000),
+ (cycles / (measure_ms * 100)) % 10);
+
+ return 0;
+}
+
static const struct drm_info_list v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
+ {"measure_clock", v3d_measure_clock, 0},
{"bo_stats", v3d_debugfs_bo_stats, 0},
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 2a85fa68ffea..f0afcec72c34 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -112,10 +112,15 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
- /* Any params that aren't just register reads would go here. */
- DRM_DEBUG("Unknown parameter %d\n", args->param);
- return -EINVAL;
+ switch (args->param) {
+ case DRM_V3D_PARAM_SUPPORTS_TFU:
+ args->value = 1;
+ return 0;
+ default:
+ DRM_DEBUG("Unknown parameter %d\n", args->param);
+ return -EINVAL;
+ }
}
static int
@@ -170,7 +175,8 @@ static const struct file_operations v3d_drm_fops = {
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
* able to submit CLs that could access BOs from clients authenticated
- * with the master node.
+ * with the master node. The TFU doesn't use the GMP, so it would
+ * need to stay DRM_AUTH until we do buffer size/offset validation.
*/
static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CL, v3d_submit_cl_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
@@ -179,6 +185,7 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_MMAP_BO, v3d_mmap_bo_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_PARAM, v3d_get_param_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(V3D_GET_BO_OFFSET, v3d_get_bo_offset_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
static const struct vm_operations_struct v3d_vm_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index e6fed696ad86..dcb772a19191 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -7,19 +7,18 @@
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
+#include "uapi/drm/v3d_drm.h"
#define GMP_GRANULARITY (128 * 1024)
-/* Enum for each of the V3D queues. We maintain various queue
- * tracking as an array because at some point we'll want to support
- * the TFU (texture formatting unit) as another queue.
- */
+/* Enum for each of the V3D queues. */
enum v3d_queue {
V3D_BIN,
V3D_RENDER,
+ V3D_TFU,
};
-#define V3D_MAX_QUEUES (V3D_RENDER + 1)
+#define V3D_MAX_QUEUES (V3D_TFU + 1)
struct v3d_queue_state {
struct drm_gpu_scheduler sched;
@@ -68,6 +67,7 @@ struct v3d_dev {
struct v3d_exec_info *bin_job;
struct v3d_exec_info *render_job;
+ struct v3d_tfu_job *tfu_job;
struct v3d_queue_state queue[V3D_MAX_QUEUES];
@@ -198,6 +198,11 @@ struct v3d_exec_info {
*/
struct dma_fence *bin_done_fence;
+ /* Fence for when the scheduler considers the render to be
+ * done, for when the BOs reservations should be complete.
+ */
+ struct dma_fence *render_done_fence;
+
struct kref refcount;
/* This is the array of BOs that were looked up at the start of exec. */
@@ -213,6 +218,25 @@ struct v3d_exec_info {
u32 qma, qms, qts;
};
+struct v3d_tfu_job {
+ struct drm_sched_job base;
+
+ struct drm_v3d_submit_tfu args;
+
+ /* An optional fence userspace can pass in for the job to depend on. */
+ struct dma_fence *in_fence;
+
+ /* v3d fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct v3d_dev *v3d;
+
+ struct kref refcount;
+
+ /* This is the array of BOs that were looked up at the start of exec. */
+ struct v3d_bo *bo[4];
+};
+
/**
* _wait_for - magic (register) wait macro
*
@@ -276,9 +300,12 @@ int v3d_gem_init(struct drm_device *dev);
void v3d_gem_destroy(struct drm_device *dev);
int v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void v3d_exec_put(struct v3d_exec_info *exec);
+void v3d_tfu_job_put(struct v3d_tfu_job *exec);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_flush_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 50bfcf9a8a1a..b0a2a1ae2eb1 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -29,10 +29,16 @@ static const char *v3d_fence_get_timeline_name(struct dma_fence *fence)
{
struct v3d_fence *f = to_v3d_fence(fence);
- if (f->queue == V3D_BIN)
+ switch (f->queue) {
+ case V3D_BIN:
return "v3d-bin";
- else
+ case V3D_RENDER:
return "v3d-render";
+ case V3D_TFU:
+ return "v3d-tfu";
+ default:
+ return NULL;
+ }
}
const struct dma_fence_ops v3d_fence_ops = {
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 70c54774400b..05ca6319065e 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -207,32 +207,26 @@ v3d_flush_caches(struct v3d_dev *v3d)
}
static void
-v3d_attach_object_fences(struct v3d_exec_info *exec)
+v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
+ struct dma_fence *fence)
{
- struct dma_fence *out_fence = &exec->render.base.s_fence->finished;
- struct v3d_bo *bo;
int i;
- for (i = 0; i < exec->bo_count; i++) {
- bo = to_v3d_bo(&exec->bo[i]->base);
-
+ for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bo->resv, out_fence);
+ reservation_object_add_excl_fence(bos[i]->resv, fence);
}
}
static void
-v3d_unlock_bo_reservations(struct drm_device *dev,
- struct v3d_exec_info *exec,
+v3d_unlock_bo_reservations(struct v3d_bo **bos,
+ int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
int i;
- for (i = 0; i < exec->bo_count; i++) {
- struct v3d_bo *bo = to_v3d_bo(&exec->bo[i]->base);
-
- ww_mutex_unlock(&bo->resv->lock);
- }
+ for (i = 0; i < bo_count; i++)
+ ww_mutex_unlock(&bos[i]->resv->lock);
ww_acquire_fini(acquire_ctx);
}
@@ -245,19 +239,19 @@ v3d_unlock_bo_reservations(struct drm_device *dev,
* to v3d, so we don't attach dma-buf fences to them.
*/
static int
-v3d_lock_bo_reservations(struct drm_device *dev,
- struct v3d_exec_info *exec,
+v3d_lock_bo_reservations(struct v3d_bo **bos,
+ int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
int contended_lock = -1;
int i, ret;
- struct v3d_bo *bo;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended_lock != -1) {
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+ struct v3d_bo *bo = bos[contended_lock];
+
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx);
if (ret) {
@@ -266,23 +260,20 @@ retry:
}
}
- for (i = 0; i < exec->bo_count; i++) {
+ for (i = 0; i < bo_count; i++) {
if (i == contended_lock)
continue;
- bo = to_v3d_bo(&exec->bo[i]->base);
-
- ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
+ ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
+ acquire_ctx);
if (ret) {
int j;
- for (j = 0; j < i; j++) {
- bo = to_v3d_bo(&exec->bo[j]->base);
- ww_mutex_unlock(&bo->resv->lock);
- }
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&bos[j]->resv->lock);
if (contended_lock != -1 && contended_lock >= i) {
- bo = to_v3d_bo(&exec->bo[contended_lock]->base);
+ struct v3d_bo *bo = bos[contended_lock];
ww_mutex_unlock(&bo->resv->lock);
}
@@ -302,12 +293,11 @@ retry:
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
- for (i = 0; i < exec->bo_count; i++) {
- bo = to_v3d_bo(&exec->bo[i]->base);
-
- ret = reservation_object_reserve_shared(bo->resv);
+ for (i = 0; i < bo_count; i++) {
+ ret = reservation_object_reserve_shared(bos[i]->resv, 1);
if (ret) {
- v3d_unlock_bo_reservations(dev, exec, acquire_ctx);
+ v3d_unlock_bo_reservations(bos, bo_count,
+ acquire_ctx);
return ret;
}
}
@@ -409,6 +399,7 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->render.done_fence);
dma_fence_put(exec->bin_done_fence);
+ dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++)
drm_gem_object_put_unlocked(&exec->bo[i]->base);
@@ -429,6 +420,33 @@ void v3d_exec_put(struct v3d_exec_info *exec)
kref_put(&exec->refcount, v3d_exec_cleanup);
}
+static void
+v3d_tfu_job_cleanup(struct kref *ref)
+{
+ struct v3d_tfu_job *job = container_of(ref, struct v3d_tfu_job,
+ refcount);
+ struct v3d_dev *v3d = job->v3d;
+ unsigned int i;
+
+ dma_fence_put(job->in_fence);
+ dma_fence_put(job->done_fence);
+
+ for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
+ if (job->bo[i])
+ drm_gem_object_put_unlocked(&job->bo[i]->base);
+ }
+
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
+ kfree(job);
+}
+
+void v3d_tfu_job_put(struct v3d_tfu_job *job)
+{
+ kref_put(&job->refcount, v3d_tfu_job_cleanup);
+}
+
int
v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -503,6 +521,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct drm_syncobj *sync_out;
int ret = 0;
+ trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
+
if (args->pad != 0) {
DRM_INFO("pad must be zero: %d\n", args->pad);
return -EINVAL;
@@ -521,12 +541,12 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
kref_init(&exec->refcount);
ret = drm_syncobj_find_fence(file_priv, args->in_sync_bcl,
- 0, &exec->bin.in_fence);
+ 0, 0, &exec->bin.in_fence);
if (ret == -EINVAL)
goto fail;
ret = drm_syncobj_find_fence(file_priv, args->in_sync_rcl,
- 0, &exec->render.in_fence);
+ 0, 0, &exec->render.in_fence);
if (ret == -EINVAL)
goto fail;
@@ -546,7 +566,8 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
- ret = v3d_lock_bo_reservations(dev, exec, &acquire_ctx);
+ ret = v3d_lock_bo_reservations(exec->bo, exec->bo_count,
+ &acquire_ctx);
if (ret)
goto fail;
@@ -572,20 +593,23 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail_unreserve;
+ exec->render_done_fence =
+ dma_fence_get(&exec->render.base.s_fence->finished);
+
kref_get(&exec->refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(&exec->render.base,
&v3d_priv->sched_entity[V3D_RENDER]);
mutex_unlock(&v3d->sched_lock);
- v3d_attach_object_fences(exec);
+ v3d_attach_object_fences(exec->bo, exec->bo_count,
+ exec->render_done_fence);
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+ v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
/* Update the return sync object for the */
sync_out = drm_syncobj_find(file_priv, args->out_sync);
if (sync_out) {
- drm_syncobj_replace_fence(sync_out, 0,
- &exec->render.base.s_fence->finished);
+ drm_syncobj_replace_fence(sync_out, exec->render_done_fence);
drm_syncobj_put(sync_out);
}
@@ -595,13 +619,121 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
fail_unreserve:
mutex_unlock(&v3d->sched_lock);
- v3d_unlock_bo_reservations(dev, exec, &acquire_ctx);
+ v3d_unlock_bo_reservations(exec->bo, exec->bo_count, &acquire_ctx);
fail:
v3d_exec_put(exec);
return ret;
}
+/**
+ * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
+ * @dev: DRM device
+ * @data: ioctl argument
+ * @file_priv: DRM file for this fd
+ *
+ * Userspace provides the register setup for the TFU, which we don't
+ * need to validate since the TFU is behind the MMU.
+ */
+int
+v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct drm_v3d_submit_tfu *args = data;
+ struct v3d_tfu_job *job;
+ struct ww_acquire_ctx acquire_ctx;
+ struct drm_syncobj *sync_out;
+ struct dma_fence *sched_done_fence;
+ int ret = 0;
+ int bo_count;
+
+ trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
+
+ job = kcalloc(1, sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return -ENOMEM;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0) {
+ kfree(job);
+ return ret;
+ }
+
+ kref_init(&job->refcount);
+
+ ret = drm_syncobj_find_fence(file_priv, args->in_sync,
+ 0, 0, &job->in_fence);
+ if (ret == -EINVAL)
+ goto fail;
+
+ job->args = *args;
+ job->v3d = v3d;
+
+ spin_lock(&file_priv->table_lock);
+ for (bo_count = 0; bo_count < ARRAY_SIZE(job->bo); bo_count++) {
+ struct drm_gem_object *bo;
+
+ if (!args->bo_handles[bo_count])
+ break;
+
+ bo = idr_find(&file_priv->object_idr,
+ args->bo_handles[bo_count]);
+ if (!bo) {
+ DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
+ bo_count, args->bo_handles[bo_count]);
+ ret = -ENOENT;
+ spin_unlock(&file_priv->table_lock);
+ goto fail;
+ }
+ drm_gem_object_get(bo);
+ job->bo[bo_count] = to_v3d_bo(bo);
+ }
+ spin_unlock(&file_priv->table_lock);
+
+ ret = v3d_lock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&v3d->sched_lock);
+ ret = drm_sched_job_init(&job->base,
+ &v3d_priv->sched_entity[V3D_TFU],
+ v3d_priv);
+ if (ret)
+ goto fail_unreserve;
+
+ sched_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+ drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[V3D_TFU]);
+ mutex_unlock(&v3d->sched_lock);
+
+ v3d_attach_object_fences(job->bo, bo_count, sched_done_fence);
+
+ v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+
+ /* Update the return sync object */
+ sync_out = drm_syncobj_find(file_priv, args->out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, sched_done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ dma_fence_put(sched_done_fence);
+
+ v3d_tfu_job_put(job);
+
+ return 0;
+
+fail_unreserve:
+ mutex_unlock(&v3d->sched_lock);
+ v3d_unlock_bo_reservations(job->bo, bo_count, &acquire_ctx);
+fail:
+ v3d_tfu_job_put(job);
+
+ return ret;
+}
+
int
v3d_gem_init(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index e07514eb11b5..69338da70ddc 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -4,8 +4,8 @@
/**
* DOC: Interrupt management for the V3D engine
*
- * When we take a binning or rendering flush done interrupt, we need
- * to signal the fence for that job so that the scheduler can queue up
+ * When we take a bin, render, or TFU done interrupt, we need to
+ * signal the fence for that job so that the scheduler can queue up
* the next one and unblock any waiters.
*
* When we take the binner out of memory interrupt, we need to
@@ -15,6 +15,7 @@
#include "v3d_drv.h"
#include "v3d_regs.h"
+#include "v3d_trace.h"
#define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
V3D_INT_FLDONE | \
@@ -23,7 +24,8 @@
#define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
V3D_HUB_INT_MMU_PTI | \
- V3D_HUB_INT_MMU_CAP))
+ V3D_HUB_INT_MMU_CAP | \
+ V3D_HUB_INT_TFUC))
static void
v3d_overflow_mem_work(struct work_struct *work)
@@ -87,12 +89,20 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- dma_fence_signal(v3d->bin_job->bin.done_fence);
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->bin_job->bin.done_fence);
+
+ trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- dma_fence_signal(v3d->render_job->render.done_fence);
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->render_job->render.done_fence);
+
+ trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
status = IRQ_HANDLED;
}
@@ -117,6 +127,15 @@ v3d_hub_irq(int irq, void *arg)
/* Acknowledge the interrupts we're handling here. */
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
+ if (intsts & V3D_HUB_INT_TFUC) {
+ struct v3d_fence *fence =
+ to_v3d_fence(v3d->tfu_job->done_fence);
+
+ trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
+ status = IRQ_HANDLED;
+ }
+
if (intsts & (V3D_HUB_INT_MMU_WRV |
V3D_HUB_INT_MMU_PTI |
V3D_HUB_INT_MMU_CAP)) {
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 854046565989..6ccdee9d47bd 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -86,6 +86,55 @@
# define V3D_TOP_GR_BRIDGE_SW_INIT_1 0x0000c
# define V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT BIT(0)
+#define V3D_TFU_CS 0x00400
+/* Stops current job, empties input fifo. */
+# define V3D_TFU_CS_TFURST BIT(31)
+# define V3D_TFU_CS_CVTCT_MASK V3D_MASK(23, 16)
+# define V3D_TFU_CS_CVTCT_SHIFT 16
+# define V3D_TFU_CS_NFREE_MASK V3D_MASK(13, 8)
+# define V3D_TFU_CS_NFREE_SHIFT 8
+# define V3D_TFU_CS_BUSY BIT(0)
+
+#define V3D_TFU_SU 0x00404
+/* Interrupt when FINTTHR input slots are free (0 = disabled) */
+# define V3D_TFU_SU_FINTTHR_MASK V3D_MASK(13, 8)
+# define V3D_TFU_SU_FINTTHR_SHIFT 8
+/* Skips resetting the CRC at the start of CRC generation. */
+# define V3D_TFU_SU_CRCCHAIN BIT(4)
+/* skips writes, computes CRC of the image. miplevels must be 0. */
+# define V3D_TFU_SU_CRC BIT(3)
+# define V3D_TFU_SU_THROTTLE_MASK V3D_MASK(1, 0)
+# define V3D_TFU_SU_THROTTLE_SHIFT 0
+
+#define V3D_TFU_ICFG 0x00408
+/* Interrupt when the conversion is complete. */
+# define V3D_TFU_ICFG_IOC BIT(0)
+
+/* Input Image Address */
+#define V3D_TFU_IIA 0x0040c
+/* Input Chroma Address */
+#define V3D_TFU_ICA 0x00410
+/* Input Image Stride */
+#define V3D_TFU_IIS 0x00414
+/* Input Image U-Plane Address */
+#define V3D_TFU_IUA 0x00418
+/* Output Image Address */
+#define V3D_TFU_IOA 0x0041c
+/* Image Output Size */
+#define V3D_TFU_IOS 0x00420
+/* TFU YUV Coefficient 0 */
+#define V3D_TFU_COEF0 0x00424
+/* Use these regs instead of the defaults. */
+# define V3D_TFU_COEF0_USECOEF BIT(31)
+/* TFU YUV Coefficient 1 */
+#define V3D_TFU_COEF1 0x00428
+/* TFU YUV Coefficient 2 */
+#define V3D_TFU_COEF2 0x0042c
+/* TFU YUV Coefficient 3 */
+#define V3D_TFU_COEF3 0x00430
+
+#define V3D_TFU_CRC 0x00434
+
/* Per-MMU registers. */
#define V3D_MMUC_CONTROL 0x01000
@@ -267,6 +316,36 @@
# define V3D_PTB_BXCF_RWORDERDISA BIT(1)
# define V3D_PTB_BXCF_CLIPDISA BIT(0)
+#define V3D_V3_PCTR_0_EN 0x00674
+#define V3D_V3_PCTR_0_EN_ENABLE BIT(31)
+#define V3D_V4_PCTR_0_EN 0x00650
+/* When a bit is set, resets the counter to 0. */
+#define V3D_V3_PCTR_0_CLR 0x00670
+#define V3D_V4_PCTR_0_CLR 0x00654
+#define V3D_PCTR_0_OVERFLOW 0x00658
+
+#define V3D_V3_PCTR_0_PCTRS0 0x00684
+#define V3D_V3_PCTR_0_PCTRS15 0x00660
+#define V3D_V3_PCTR_0_PCTRSX(x) (V3D_V3_PCTR_0_PCTRS0 + \
+ 4 * (x))
+/* Each src reg muxes four counters each. */
+#define V3D_V4_PCTR_0_SRC_0_3 0x00660
+#define V3D_V4_PCTR_0_SRC_28_31 0x0067c
+# define V3D_PCTR_S0_MASK V3D_MASK(6, 0)
+# define V3D_PCTR_S0_SHIFT 0
+# define V3D_PCTR_S1_MASK V3D_MASK(14, 8)
+# define V3D_PCTR_S1_SHIFT 8
+# define V3D_PCTR_S2_MASK V3D_MASK(22, 16)
+# define V3D_PCTR_S2_SHIFT 16
+# define V3D_PCTR_S3_MASK V3D_MASK(30, 24)
+# define V3D_PCTR_S3_SHIFT 24
+# define V3D_PCTR_CYCLE_COUNT 32
+
+/* Output values of the counters. */
+#define V3D_PCTR_0_PCTR0 0x00680
+#define V3D_PCTR_0_PCTR31 0x006fc
+#define V3D_PCTR_0_PCTRX(x) (V3D_PCTR_0_PCTR0 + \
+ 4 * (x))
#define V3D_GMP_STATUS 0x00800
# define V3D_GMP_STATUS_GMPRST BIT(31)
# define V3D_GMP_STATUS_WR_COUNT_MASK V3D_MASK(30, 24)
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 9243dea6e6ad..f7508e907536 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -30,16 +30,34 @@ to_v3d_job(struct drm_sched_job *sched_job)
return container_of(sched_job, struct v3d_job, base);
}
+static struct v3d_tfu_job *
+to_tfu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct v3d_tfu_job, base);
+}
+
static void
v3d_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
+ drm_sched_job_cleanup(sched_job);
+
v3d_exec_put(job->exec);
}
+static void
+v3d_tfu_job_free(struct drm_sched_job *sched_job)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ v3d_tfu_job_put(job);
+}
+
/**
- * Returns the fences that the bin job depends on, one by one.
+ * Returns the fences that the bin or render job depends on, one by one.
* v3d_job_run() won't be called until all of them have been signaled.
*/
static struct dma_fence *
@@ -76,6 +94,27 @@ v3d_job_dependency(struct drm_sched_job *sched_job,
return fence;
}
+/**
+ * Returns the fences that the TFU job depends on, one by one.
+ * v3d_tfu_job_run() won't be called until all of them have been
+ * signaled.
+ */
+static struct dma_fence *
+v3d_tfu_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct dma_fence *fence;
+
+ fence = job->in_fence;
+ if (fence) {
+ job->in_fence = NULL;
+ return fence;
+ }
+
+ return NULL;
+}
+
static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
@@ -147,31 +186,47 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
return fence;
}
-static void
-v3d_job_timedout(struct drm_sched_job *sched_job)
+static struct dma_fence *
+v3d_tfu_job_run(struct drm_sched_job *sched_job)
{
- struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_exec_info *exec = job->exec;
- struct v3d_dev *v3d = exec->v3d;
- enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
- enum v3d_queue q;
- u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
- u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+ struct v3d_dev *v3d = job->v3d;
+ struct drm_device *dev = &v3d->drm;
+ struct dma_fence *fence;
- /* If the current address or return address have changed, then
- * the GPU has probably made progress and we should delay the
- * reset. This could fail if the GPU got in an infinite loop
- * in the CL, but that is pretty unlikely outside of an i-g-t
- * testcase.
- */
- if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
- job->timedout_ctca = ctca;
- job->timedout_ctra = ctra;
+ fence = v3d_fence_create(v3d, V3D_TFU);
+ if (IS_ERR(fence))
+ return NULL;
- schedule_delayed_work(&job->base.sched->work_tdr,
- job->base.sched->timeout);
- return;
+ v3d->tfu_job = job;
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
+
+ V3D_WRITE(V3D_TFU_IIA, job->args.iia);
+ V3D_WRITE(V3D_TFU_IIS, job->args.iis);
+ V3D_WRITE(V3D_TFU_ICA, job->args.ica);
+ V3D_WRITE(V3D_TFU_IUA, job->args.iua);
+ V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
+ V3D_WRITE(V3D_TFU_IOS, job->args.ios);
+ V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
+ if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
+ V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
+ V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
+ V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
}
+ /* ICFG kicks off the job. */
+ V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
+
+ return fence;
+}
+
+static void
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+{
+ enum v3d_queue q;
mutex_lock(&v3d->reset_lock);
@@ -196,6 +251,39 @@ v3d_job_timedout(struct drm_sched_job *sched_job)
mutex_unlock(&v3d->reset_lock);
}
+static void
+v3d_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_exec_info *exec = job->exec;
+ struct v3d_dev *v3d = exec->v3d;
+ enum v3d_queue job_q = job == &exec->bin ? V3D_BIN : V3D_RENDER;
+ u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(job_q));
+ u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(job_q));
+
+ /* If the current address or return address have changed, then
+ * the GPU has probably made progress and we should delay the
+ * reset. This could fail if the GPU got in an infinite loop
+ * in the CL, but that is pretty unlikely outside of an i-g-t
+ * testcase.
+ */
+ if (job->timedout_ctca != ctca || job->timedout_ctra != ctra) {
+ job->timedout_ctca = ctca;
+ job->timedout_ctra = ctra;
+ return;
+ }
+
+ v3d_gpu_reset_for_timeout(v3d, sched_job);
+}
+
+static void
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct v3d_tfu_job *job = to_tfu_job(sched_job);
+
+ v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+}
+
static const struct drm_sched_backend_ops v3d_sched_ops = {
.dependency = v3d_job_dependency,
.run_job = v3d_job_run,
@@ -203,6 +291,13 @@ static const struct drm_sched_backend_ops v3d_sched_ops = {
.free_job = v3d_job_free
};
+static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
+ .dependency = v3d_tfu_job_dependency,
+ .run_job = v3d_tfu_job_run,
+ .timedout_job = v3d_tfu_job_timedout,
+ .free_job = v3d_tfu_job_free
+};
+
int
v3d_sched_init(struct v3d_dev *v3d)
{
@@ -233,6 +328,19 @@ v3d_sched_init(struct v3d_dev *v3d)
return ret;
}
+ ret = drm_sched_init(&v3d->queue[V3D_TFU].sched,
+ &v3d_tfu_sched_ops,
+ hw_jobs_limit, job_hang_limit,
+ msecs_to_jiffies(hang_limit_ms),
+ "v3d_tfu");
+ if (ret) {
+ dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+ ret);
+ drm_sched_fini(&v3d->queue[V3D_RENDER].sched);
+ drm_sched_fini(&v3d->queue[V3D_BIN].sched);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_trace.h b/drivers/gpu/drm/v3d/v3d_trace.h
index 85dd351e1e09..edd984afa33f 100644
--- a/drivers/gpu/drm/v3d/v3d_trace.h
+++ b/drivers/gpu/drm/v3d/v3d_trace.h
@@ -12,6 +12,28 @@
#define TRACE_SYSTEM v3d
#define TRACE_INCLUDE_FILE v3d_trace
+TRACE_EVENT(v3d_submit_cl_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 ct1qba, u32 ct1qea),
+ TP_ARGS(dev, ct1qba, ct1qea),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ct1qba)
+ __field(u32, ct1qea)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->ct1qba = ct1qba;
+ __entry->ct1qea = ct1qea;
+ ),
+
+ TP_printk("dev=%u, RCL 0x%08x..0x%08x",
+ __entry->dev,
+ __entry->ct1qba,
+ __entry->ct1qea)
+);
+
TRACE_EVENT(v3d_submit_cl,
TP_PROTO(struct drm_device *dev, bool is_render,
uint64_t seqno,
@@ -42,6 +64,105 @@ TRACE_EVENT(v3d_submit_cl,
__entry->ctnqea)
);
+TRACE_EVENT(v3d_bcl_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_rcl_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_tfu_irq,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
+TRACE_EVENT(v3d_submit_tfu_ioctl,
+ TP_PROTO(struct drm_device *dev, u32 iia),
+ TP_ARGS(dev, iia),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, iia)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->iia = iia;
+ ),
+
+ TP_printk("dev=%u, IIA 0x%08x",
+ __entry->dev,
+ __entry->iia)
+);
+
+TRACE_EVENT(v3d_submit_tfu,
+ TP_PROTO(struct drm_device *dev,
+ uint64_t seqno),
+ TP_ARGS(dev, seqno),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = dev->primary->index;
+ __entry->seqno = seqno;
+ ),
+
+ TP_printk("dev=%u, seqno=%llu",
+ __entry->dev,
+ __entry->seqno)
+);
+
TRACE_EVENT(v3d_reset_begin,
TP_PROTO(struct drm_device *dev),
TP_ARGS(dev),
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 1f1780ccdbdf..f6f5cd80c04d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -33,6 +33,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_atomic_helper.h>
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
@@ -308,6 +309,8 @@ static void vc4_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
+
drm_mode_config_cleanup(drm);
drm_atomic_private_obj_fini(&vc4->ctm_manager);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index bd6ef1f31822..4f87b03f837d 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -338,6 +338,7 @@ struct vc4_plane_state {
u32 pos0_offset;
u32 pos2_offset;
u32 ptr0_offset;
+ u32 lbm_offset;
/* Offset where the plane's dlist was last stored in the
* hardware at vc4_crtc_atomic_flush() time.
@@ -369,6 +370,11 @@ struct vc4_plane_state {
* to enable background color fill.
*/
bool needs_bg_fill;
+
+ /* Mark the dlist as initialized. Useful to avoid initializing it twice
+ * when async update is not possible.
+ */
+ bool dlist_initialized;
};
static inline struct vc4_plane_state *
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 5b22e996af6c..aea2b8dfec17 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -635,7 +635,7 @@ retry:
for (i = 0; i < exec->bo_count; i++) {
bo = to_vc4_bo(&exec->bo[i]->base);
- ret = reservation_object_reserve_shared(bo->resv);
+ ret = reservation_object_reserve_shared(bo->resv, 1);
if (ret) {
vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
return ret;
@@ -681,7 +681,7 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec,
exec->fence = &fence->base;
if (out_sync)
- drm_syncobj_replace_fence(out_sync, 0, exec->fence);
+ drm_syncobj_replace_fence(out_sync, exec->fence);
vc4_update_bo_seqnos(exec, seqno);
@@ -1173,7 +1173,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
if (args->in_sync) {
ret = drm_syncobj_find_fence(file_priv, args->in_sync,
- 0, &in_fence);
+ 0, 0, &in_fence);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c6635f23918a..75db62cbe468 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -129,12 +129,12 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
{
- if (dst > src)
+ if (dst == src)
+ return VC4_SCALING_NONE;
+ if (3 * dst >= 2 * src)
return VC4_SCALING_PPF;
- else if (dst < src)
- return VC4_SCALING_TPZ;
else
- return VC4_SCALING_NONE;
+ return VC4_SCALING_TPZ;
}
static bool plane_enabled(struct drm_plane_state *state)
@@ -154,6 +154,7 @@ static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane
return NULL;
memset(&vc4_state->lbm, 0, sizeof(vc4_state->lbm));
+ vc4_state->dlist_initialized = 0;
__drm_atomic_helper_plane_duplicate_state(plane, &vc4_state->base);
@@ -259,37 +260,51 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
- struct drm_plane *plane = state->plane;
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0);
u32 subpixel_src_mask = (1 << 16) - 1;
u32 format = fb->format->format;
int num_planes = fb->format->num_planes;
- u32 h_subsample = 1;
- u32 v_subsample = 1;
- int i;
+ struct drm_crtc_state *crtc_state;
+ u32 h_subsample, v_subsample;
+ int i, ret;
+
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ if (!crtc_state) {
+ DRM_DEBUG_KMS("Invalid crtc state\n");
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 1,
+ INT_MAX, true, true);
+ if (ret)
+ return ret;
+
+ h_subsample = drm_format_horz_chroma_subsampling(format);
+ v_subsample = drm_format_vert_chroma_subsampling(format);
for (i = 0; i < num_planes; i++)
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
/* We don't support subpixel source positioning for scaling. */
- if ((state->src_x & subpixel_src_mask) ||
- (state->src_y & subpixel_src_mask) ||
- (state->src_w & subpixel_src_mask) ||
- (state->src_h & subpixel_src_mask)) {
+ if ((state->src.x1 & subpixel_src_mask) ||
+ (state->src.x2 & subpixel_src_mask) ||
+ (state->src.y1 & subpixel_src_mask) ||
+ (state->src.y2 & subpixel_src_mask)) {
return -EINVAL;
}
- vc4_state->src_x = state->src_x >> 16;
- vc4_state->src_y = state->src_y >> 16;
- vc4_state->src_w[0] = state->src_w >> 16;
- vc4_state->src_h[0] = state->src_h >> 16;
+ vc4_state->src_x = state->src.x1 >> 16;
+ vc4_state->src_y = state->src.y1 >> 16;
+ vc4_state->src_w[0] = (state->src.x2 - state->src.x1) >> 16;
+ vc4_state->src_h[0] = (state->src.y2 - state->src.y1) >> 16;
- vc4_state->crtc_x = state->crtc_x;
- vc4_state->crtc_y = state->crtc_y;
- vc4_state->crtc_w = state->crtc_w;
- vc4_state->crtc_h = state->crtc_h;
+ vc4_state->crtc_x = state->dst.x1;
+ vc4_state->crtc_y = state->dst.y1;
+ vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
+ vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
vc4_state->crtc_w);
@@ -302,8 +317,6 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (num_planes > 1) {
vc4_state->is_yuv = true;
- h_subsample = drm_format_horz_chroma_subsampling(format);
- v_subsample = drm_format_vert_chroma_subsampling(format);
vc4_state->src_w[1] = vc4_state->src_w[0] / h_subsample;
vc4_state->src_h[1] = vc4_state->src_h[0] / v_subsample;
@@ -314,52 +327,20 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that horizontal scaling be enabled,
- * even on a plane that's otherwise 1:1. Looks like only PPF
- * works in that case, so let's pick that one.
+ /* YUV conversion requires that horizontal scaling be enabled
+ * on the UV plane even if vc4_get_scaling_mode() returned
+ * VC4_SCALING_NONE (which can happen when the down-scaling
+ * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
+ * case.
*/
- if (vc4_state->is_unity)
- vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
+ vc4_state->x_scaling[1] = VC4_SCALING_PPF;
} else {
+ vc4_state->is_yuv = false;
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
- /* No configuring scaling on the cursor plane, since it gets
- non-vblank-synced updates, and scaling requires requires
- LBM changes which have to be vblank-synced.
- */
- if (plane->type == DRM_PLANE_TYPE_CURSOR && !vc4_state->is_unity)
- return -EINVAL;
-
- /* Clamp the on-screen start x/y to 0. The hardware doesn't
- * support negative y, and negative x wastes bandwidth.
- */
- if (vc4_state->crtc_x < 0) {
- for (i = 0; i < num_planes; i++) {
- u32 cpp = fb->format->cpp[i];
- u32 subs = ((i == 0) ? 1 : h_subsample);
-
- vc4_state->offsets[i] += (cpp *
- (-vc4_state->crtc_x) / subs);
- }
- vc4_state->src_w[0] += vc4_state->crtc_x;
- vc4_state->src_w[1] += vc4_state->crtc_x / h_subsample;
- vc4_state->crtc_x = 0;
- }
-
- if (vc4_state->crtc_y < 0) {
- for (i = 0; i < num_planes; i++) {
- u32 subs = ((i == 0) ? 1 : v_subsample);
-
- vc4_state->offsets[i] += (fb->pitches[i] *
- (-vc4_state->crtc_y) / subs);
- }
- vc4_state->src_h[0] += vc4_state->crtc_y;
- vc4_state->src_h[1] += vc4_state->crtc_y / v_subsample;
- vc4_state->crtc_y = 0;
- }
-
return 0;
}
@@ -400,10 +381,13 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
u32 pix_per_line = max(vc4_state->src_w[0], (u32)vc4_state->crtc_w);
u32 lbm;
+ /* LBM is not needed when there's no vertical scaling. */
+ if (vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[1] == VC4_SCALING_NONE)
+ return 0;
+
if (!vc4_state->is_yuv) {
- if (vc4_state->is_unity)
- return 0;
- else if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
+ if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ)
lbm = pix_per_line * 8;
else {
/* In special cases, this multiplier might be 12. */
@@ -454,6 +438,43 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
}
}
+static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ unsigned long irqflags;
+ u32 lbm_size;
+
+ lbm_size = vc4_lbm_size(state);
+ if (!lbm_size)
+ return 0;
+
+ if (WARN_ON(!vc4_state->lbm_offset))
+ return -EINVAL;
+
+ /* Allocate the LBM memory that the HVS will use for temporary
+ * storage due to our scaling/format conversion.
+ */
+ if (!vc4_state->lbm.allocated) {
+ int ret;
+
+ spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
+ ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
+ &vc4_state->lbm,
+ lbm_size, 32, 0, 0);
+ spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
+
+ if (ret)
+ return ret;
+ } else {
+ WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
+ }
+
+ vc4_state->dlist[vc4_state->lbm_offset] = vc4_state->lbm.start;
+
+ return 0;
+}
+
/* Writes out a full display list for an active plane to the plane's
* private dlist state.
*/
@@ -467,34 +488,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
u64 base_format_mod = fourcc_mod_broadcom_mod(fb->modifier);
int num_planes = drm_format_num_planes(format->drm);
+ u32 h_subsample, v_subsample;
bool mix_plane_alpha;
bool covers_screen;
u32 scl0, scl1, pitch0;
- u32 lbm_size, tiling;
- unsigned long irqflags;
+ u32 tiling;
u32 hvs_format = format->hvs;
int ret, i;
- ret = vc4_plane_setup_clipping_and_scaling(state);
- if (ret)
- return ret;
-
- /* Allocate the LBM memory that the HVS will use for temporary
- * storage due to our scaling/format conversion.
- */
- lbm_size = vc4_lbm_size(state);
- if (lbm_size) {
- if (!vc4_state->lbm.allocated) {
- spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags);
- ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
- &vc4_state->lbm,
- lbm_size, 32, 0, 0);
- spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
- } else {
- WARN_ON_ONCE(lbm_size != vc4_state->lbm.size);
- }
- }
+ if (vc4_state->dlist_initialized)
+ return 0;
+ ret = vc4_plane_setup_clipping_and_scaling(state);
if (ret)
return ret;
@@ -512,26 +517,77 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
scl1 = vc4_get_scl_field(state, 0);
}
+ h_subsample = drm_format_horz_chroma_subsampling(format->drm);
+ v_subsample = drm_format_vert_chroma_subsampling(format->drm);
+
switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR;
pitch0 = VC4_SET_FIELD(fb->pitches[0], SCALER_SRC_PITCH);
+
+ /* Adjust the base pointer to the first pixel to be scanned
+ * out.
+ */
+ for (i = 0; i < num_planes; i++) {
+ vc4_state->offsets[i] += vc4_state->src_y /
+ (i ? v_subsample : 1) *
+ fb->pitches[i];
+ vc4_state->offsets[i] += vc4_state->src_x /
+ (i ? h_subsample : 1) *
+ fb->format->cpp[i];
+ }
+
break;
case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED: {
- /* For T-tiled, the FB pitch is "how many bytes from
- * one row to the next, such that pitch * tile_h ==
- * tile_size * tiles_per_row."
- */
u32 tile_size_shift = 12; /* T tiles are 4kb */
+ /* Whole-tile offsets, mostly for setting the pitch. */
+ u32 tile_w_shift = fb->format->cpp[0] == 2 ? 6 : 5;
u32 tile_h_shift = 5; /* 16 and 32bpp are 32 pixels high */
+ u32 tile_w_mask = (1 << tile_w_shift) - 1;
+ /* The height mask on 32-bit-per-pixel tiles is 63, i.e. twice
+ * the height (in pixels) of a 4k tile.
+ */
+ u32 tile_h_mask = (2 << tile_h_shift) - 1;
+ /* For T-tiled, the FB pitch is "how many bytes from one row to
+ * the next, such that
+ *
+ * pitch * tile_h == tile_size * tiles_per_row
+ */
u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
+ u32 tiles_l = vc4_state->src_x >> tile_w_shift;
+ u32 tiles_r = tiles_w - tiles_l;
+ u32 tiles_t = vc4_state->src_y >> tile_h_shift;
+ /* Intra-tile offsets, which modify the base address (the
+ * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
+ * base address).
+ */
+ u32 tile_y = (vc4_state->src_y >> 4) & 1;
+ u32 subtile_y = (vc4_state->src_y >> 2) & 3;
+ u32 utile_y = vc4_state->src_y & 3;
+ u32 x_off = vc4_state->src_x & tile_w_mask;
+ u32 y_off = vc4_state->src_y & tile_h_mask;
tiling = SCALER_CTL0_TILING_256B_OR_T;
+ pitch0 = (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
+ VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
+ VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
+ VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
+ vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
+ vc4_state->offsets[0] += subtile_y << 8;
+ vc4_state->offsets[0] += utile_y << 4;
+
+ /* Rows of tiles alternate left-to-right and right-to-left. */
+ if (tiles_t & 1) {
+ pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR;
+ vc4_state->offsets[0] += (tiles_w - tiles_l) <<
+ tile_size_shift;
+ vc4_state->offsets[0] -= (1 + !tile_y) << 10;
+ } else {
+ vc4_state->offsets[0] += tiles_l << tile_size_shift;
+ vc4_state->offsets[0] += tile_y << 10;
+ }
- pitch0 = (VC4_SET_FIELD(0, SCALER_PITCH0_TILE_Y_OFFSET) |
- VC4_SET_FIELD(0, SCALER_PITCH0_TILE_WIDTH_L) |
- VC4_SET_FIELD(tiles_w, SCALER_PITCH0_TILE_WIDTH_R));
break;
}
@@ -667,15 +723,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
+ vc4_state->lbm_offset = 0;
+
if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
- /* LBM Base Address. */
+ /* Reserve a slot for the LBM Base Address. The real value will
+ * be set when calling vc4_plane_allocate_lbm().
+ */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
- vc4_dlist_write(vc4_state, vc4_state->lbm.start);
- }
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+ vc4_state->lbm_offset = vc4_state->dlist_count++;
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
@@ -721,6 +780,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen ||
state->alpha != DRM_BLEND_ALPHA_OPAQUE;
+ /* Flag the dlist as initialized to avoid checking it twice in case
+ * the async update check already called vc4_plane_mode_set() and
+ * decided to fallback to sync update because async update was not
+ * possible.
+ */
+ vc4_state->dlist_initialized = 1;
+
return 0;
}
@@ -735,13 +801,18 @@ static int vc4_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ int ret;
vc4_state->dlist_count = 0;
- if (plane_enabled(state))
- return vc4_plane_mode_set(plane, state);
- else
+ if (!plane_enabled(state))
return 0;
+
+ ret = vc4_plane_mode_set(plane, state);
+ if (ret)
+ return ret;
+
+ return vc4_plane_allocate_lbm(state);
}
static void vc4_plane_atomic_update(struct drm_plane *plane,
@@ -809,30 +880,50 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
{
struct vc4_plane_state *vc4_state, *new_vc4_state;
- if (plane->state->fb != state->fb) {
- vc4_plane_async_set_fb(plane, state->fb);
- drm_atomic_set_fb_for_plane(plane->state, state->fb);
- }
-
- /* Set the cursor's position on the screen. This is the
- * expected change from the drm_mode_cursor_universal()
- * helper.
- */
+ drm_atomic_set_fb_for_plane(plane->state, state->fb);
plane->state->crtc_x = state->crtc_x;
plane->state->crtc_y = state->crtc_y;
-
- /* Allow changing the start position within the cursor BO, if
- * that matters.
- */
+ plane->state->crtc_w = state->crtc_w;
+ plane->state->crtc_h = state->crtc_h;
plane->state->src_x = state->src_x;
plane->state->src_y = state->src_y;
-
- /* Update the display list based on the new crtc_x/y. */
- vc4_plane_atomic_check(plane, state);
+ plane->state->src_w = state->src_w;
+ plane->state->src_h = state->src_h;
+ plane->state->src_h = state->src_h;
+ plane->state->alpha = state->alpha;
+ plane->state->pixel_blend_mode = state->pixel_blend_mode;
+ plane->state->rotation = state->rotation;
+ plane->state->zpos = state->zpos;
+ plane->state->normalized_zpos = state->normalized_zpos;
+ plane->state->color_encoding = state->color_encoding;
+ plane->state->color_range = state->color_range;
+ plane->state->src = state->src;
+ plane->state->dst = state->dst;
+ plane->state->visible = state->visible;
new_vc4_state = to_vc4_plane_state(state);
vc4_state = to_vc4_plane_state(plane->state);
+ vc4_state->crtc_x = new_vc4_state->crtc_x;
+ vc4_state->crtc_y = new_vc4_state->crtc_y;
+ vc4_state->crtc_h = new_vc4_state->crtc_h;
+ vc4_state->crtc_w = new_vc4_state->crtc_w;
+ vc4_state->src_x = new_vc4_state->src_x;
+ vc4_state->src_y = new_vc4_state->src_y;
+ memcpy(vc4_state->src_w, new_vc4_state->src_w,
+ sizeof(vc4_state->src_w));
+ memcpy(vc4_state->src_h, new_vc4_state->src_h,
+ sizeof(vc4_state->src_h));
+ memcpy(vc4_state->x_scaling, new_vc4_state->x_scaling,
+ sizeof(vc4_state->x_scaling));
+ memcpy(vc4_state->y_scaling, new_vc4_state->y_scaling,
+ sizeof(vc4_state->y_scaling));
+ vc4_state->is_unity = new_vc4_state->is_unity;
+ vc4_state->is_yuv = new_vc4_state->is_yuv;
+ memcpy(vc4_state->offsets, new_vc4_state->offsets,
+ sizeof(vc4_state->offsets));
+ vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill;
+
/* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
vc4_state->dlist[vc4_state->pos0_offset] =
new_vc4_state->dlist[vc4_state->pos0_offset];
@@ -856,13 +947,38 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- /* No configuring new scaling in the fast path. */
- if (plane->state->crtc_w != state->crtc_w ||
- plane->state->crtc_h != state->crtc_h ||
- plane->state->src_w != state->src_w ||
- plane->state->src_h != state->src_h)
+ struct vc4_plane_state *old_vc4_state, *new_vc4_state;
+ int ret;
+ u32 i;
+
+ ret = vc4_plane_mode_set(plane, state);
+ if (ret)
+ return ret;
+
+ old_vc4_state = to_vc4_plane_state(plane->state);
+ new_vc4_state = to_vc4_plane_state(state);
+ if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
+ old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
+ old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
+ old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
+ vc4_lbm_size(plane->state) != vc4_lbm_size(state))
return -EINVAL;
+ /* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
+ * if anything else has changed, fallback to a sync update.
+ */
+ for (i = 0; i < new_vc4_state->dlist_count; i++) {
+ if (i == new_vc4_state->pos0_offset ||
+ i == new_vc4_state->pos2_offset ||
+ i == new_vc4_state->ptr0_offset ||
+ (new_vc4_state->lbm_offset &&
+ i == new_vc4_state->lbm_offset))
+ continue;
+
+ if (new_vc4_state->dlist[i] != old_vc4_state->dlist[i])
+ return -EINVAL;
+ }
+
return 0;
}
@@ -914,7 +1030,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = {
static void vc4_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
@@ -980,7 +1095,6 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
struct drm_plane *plane = NULL;
struct vc4_plane *vc4_plane;
u32 formats[ARRAY_SIZE(hvs_formats)];
- u32 num_formats = 0;
int ret = 0;
unsigned i;
static const uint64_t modifiers[] = {
@@ -997,20 +1111,13 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
if (!vc4_plane)
return ERR_PTR(-ENOMEM);
- for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- /* Don't allow YUV in cursor planes, since that means
- * tuning on the scaler, which we don't allow for the
- * cursor.
- */
- if (type != DRM_PLANE_TYPE_CURSOR ||
- hvs_formats[i].hvs < HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE) {
- formats[num_formats++] = hvs_formats[i].drm;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(hvs_formats); i++)
+ formats[i] = hvs_formats[i].drm;
+
plane = &vc4_plane->base;
ret = drm_universal_plane_init(dev, plane, 0,
&vc4_plane_funcs,
- formats, num_formats,
+ formats, ARRAY_SIZE(formats),
modifiers, type, NULL);
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index d6864fa4bd14..931088014272 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -1037,14 +1037,18 @@ enum hvs_pixel_format {
#define SCALER_TILE_HEIGHT_MASK VC4_MASK(15, 0)
#define SCALER_TILE_HEIGHT_SHIFT 0
+/* Common PITCH0 fields */
+#define SCALER_PITCH0_SINK_PIX_MASK VC4_MASK(31, 26)
+#define SCALER_PITCH0_SINK_PIX_SHIFT 26
+
/* PITCH0 fields for T-tiled. */
#define SCALER_PITCH0_TILE_WIDTH_L_MASK VC4_MASK(22, 16)
#define SCALER_PITCH0_TILE_WIDTH_L_SHIFT 16
#define SCALER_PITCH0_TILE_LINE_DIR BIT(15)
#define SCALER_PITCH0_TILE_INITIAL_LINE_DIR BIT(14)
/* Y offset within a tile. */
-#define SCALER_PITCH0_TILE_Y_OFFSET_MASK VC4_MASK(13, 7)
-#define SCALER_PITCH0_TILE_Y_OFFSET_SHIFT 7
+#define SCALER_PITCH0_TILE_Y_OFFSET_MASK VC4_MASK(13, 8)
+#define SCALER_PITCH0_TILE_Y_OFFSET_SHIFT 8
#define SCALER_PITCH0_TILE_WIDTH_R_MASK VC4_MASK(6, 0)
#define SCALER_PITCH0_TILE_WIDTH_R_SHIFT 0
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index ec6af8b920da..5930facd6d2d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -431,7 +431,8 @@ static void vgem_release(struct drm_device *dev)
}
static struct drm_driver vgem_driver = {
- .driver_features = DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
.release = vgem_release,
.open = vgem_open,
.postclose = vgem_postclose,
@@ -471,31 +472,31 @@ static int __init vgem_init(void)
if (!vgem_device)
return -ENOMEM;
- ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
- if (ret)
- goto out_free;
-
vgem_device->platform =
platform_device_register_simple("vgem", -1, NULL, 0);
if (IS_ERR(vgem_device->platform)) {
ret = PTR_ERR(vgem_device->platform);
- goto out_fini;
+ goto out_free;
}
dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
DMA_BIT_MASK(64));
+ ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
+ &vgem_device->platform->dev);
+ if (ret)
+ goto out_unregister;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_unregister;
+ goto out_fini;
return 0;
-out_unregister:
- platform_device_unregister(vgem_device->platform);
out_fini:
drm_dev_fini(&vgem_device->drm);
+out_unregister:
+ platform_device_unregister(vgem_device->platform);
out_free:
kfree(vgem_device);
return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index e6ee71323a66..c1c420afe2dd 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -180,7 +180,7 @@ int vgem_fence_attach_ioctl(struct drm_device *dev,
reservation_object_lock(resv, NULL);
if (arg->flags & VGEM_FENCE_WRITE)
reservation_object_add_excl_fence(resv, fence);
- else if ((ret = reservation_object_reserve_shared(resv)) == 0)
+ else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
reservation_object_add_shared_fence(resv, fence);
reservation_object_unlock(resv);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 8f8fed471e34..b5580b11a063 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -169,6 +169,12 @@ static int virtio_gpu_conn_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode = NULL;
int count, width, height;
+ if (output->edid) {
+ count = drm_add_edid_modes(connector, output->edid);
+ if (count)
+ return count;
+ }
+
width = le32_to_cpu(output->info.r.width);
height = le32_to_cpu(output->info.r.height);
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
@@ -287,6 +293,8 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs);
+ if (vgdev->has_edid)
+ drm_connector_attach_edid_property(connector);
drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -378,6 +386,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
+ int i;
+
+ for (i = 0 ; i < vgdev->num_scanouts; ++i)
+ kfree(vgdev->outputs[i].edid);
virtio_gpu_fbdev_fini(vgdev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 757ca28ab93e..0887e0b64b9c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -53,6 +53,37 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
0,
"virtiodrmfb");
+ /*
+ * Normally the drm_dev_set_unique() call is done by core DRM.
+ * The following comment covers, why virtio cannot rely on it.
+ *
+ * Unlike the other virtual GPU drivers, virtio abstracts the
+ * underlying bus type by using struct virtio_device.
+ *
+ * Hence the dev_is_pci() check, used in core DRM, will fail
+ * and the unique returned will be the virtio_device "virtio0",
+ * while a "pci:..." one is required.
+ *
+ * A few other ideas were considered:
+ * - Extend the dev_is_pci() check [in drm_set_busid] to
+ * consider virtio.
+ * Seems like a bigger hack than what we have already.
+ *
+ * - Point drm_device::dev to the parent of the virtio_device
+ * Semantic changes:
+ * * Using the wrong device for i2c, framebuffer_alloc and
+ * prime import.
+ * Visual changes:
+ * * Helpers such as DRM_DEV_ERROR, dev_info, drm_printer,
+ * will print the wrong information.
+ *
+ * We could address the latter issues, by introducing
+ * drm_device::bus_dev, ... which would be used solely for this.
+ *
+ * So for the moment keep things as-is, with a bulky comment
+ * for the next person who feels like removing this
+ * drm_dev_set_unique() quirk.
+ */
snprintf(unique, sizeof(unique), "pci:%s", pname);
ret = drm_dev_set_unique(dev, unique);
if (ret)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index d9287c144fe5..f7f32a885af7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -80,6 +80,7 @@ static unsigned int features[] = {
*/
VIRTIO_GPU_F_VIRGL,
#endif
+ VIRTIO_GPU_F_EDID,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d29f0c7c768c..1deb41d42ea4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -47,8 +47,8 @@
#define DRIVER_DATE "0"
#define DRIVER_MAJOR 0
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 1
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
/* virtgpu_drm_bus.c */
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
@@ -65,6 +65,7 @@ struct virtio_gpu_object {
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
+ bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, gem_base)
@@ -114,6 +115,7 @@ struct virtio_gpu_output {
struct drm_encoder enc;
struct virtio_gpu_display_one info;
struct virtio_gpu_update_cursor cursor;
+ struct edid *edid;
int cur_x;
int cur_y;
bool enabled;
@@ -130,6 +132,7 @@ struct virtio_gpu_framebuffer {
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
uint32_t hw_res_handle;
+ struct virtio_gpu_fence *fence;
};
#define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base)
@@ -142,9 +145,6 @@ struct virtio_gpu_fbdev {
};
struct virtio_gpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- bool mem_global_referenced;
struct ttm_bo_device bdev;
};
@@ -190,8 +190,7 @@ struct virtio_gpu_device {
struct kmem_cache *vbufs;
bool vqs_ready;
- struct idr resource_idr;
- spinlock_t resource_idr_lock;
+ struct ida resource_ida;
wait_queue_head_t resp_wq;
/* current display info */
@@ -200,10 +199,10 @@ struct virtio_gpu_device {
struct virtio_gpu_fence_driver fence_drv;
- struct idr ctx_id_idr;
- spinlock_t ctx_id_idr_lock;
+ struct ida ctx_id_ida;
bool has_virgl_3d;
+ bool has_edid;
struct work_struct config_changed_work;
@@ -259,11 +258,8 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid);
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
+ struct virtio_gpu_object *bo,
uint32_t format,
uint32_t width,
uint32_t height);
@@ -274,7 +270,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
@@ -285,8 +281,7 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
@@ -298,6 +293,7 @@ int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
int idx, int version,
struct virtio_gpu_drv_cap_cache **cache_p);
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
@@ -310,22 +306,22 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence);
+ uint32_t ctx_id, struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_resource_create_3d *rc_3d);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -353,9 +349,12 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(
+ struct virtio_gpu_device *vgdev);
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
- struct virtio_gpu_fence **fence);
+ struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index cea749f4ec39..fb1cc8b2f119 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -214,7 +214,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct virtio_gpu_object *obj;
- uint32_t resid, format, size;
+ uint32_t format, size;
int ret;
mode_cmd.width = sizes->surface_width;
@@ -231,8 +231,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
if (IS_ERR(obj))
return PTR_ERR(obj);
- virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ virtio_gpu_cmd_create_resource(vgdev, obj, format,
mode_cmd.width, mode_cmd.height);
ret = virtio_gpu_object_kmap(obj);
@@ -242,7 +241,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
}
/* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+ ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto err_obj_attach;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 00c742a441bf..4d6826b27814 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -67,28 +67,43 @@ static const struct dma_fence_ops virtio_fence_ops = {
.timeline_value_str = virtio_timeline_value_str,
};
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
+ GFP_ATOMIC);
+ if (!fence)
+ return fence;
+
+ fence->drv = drv;
+ dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0);
+
+ return fence;
+}
+
+void virtio_gpu_fence_cleanup(struct virtio_gpu_fence *fence)
+{
+ if (!fence)
+ return;
+
+ dma_fence_put(&fence->f);
+}
+
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
unsigned long irq_flags;
- *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
- if ((*fence) == NULL)
- return -ENOMEM;
-
spin_lock_irqsave(&drv->lock, irq_flags);
- (*fence)->drv = drv;
- (*fence)->seq = ++drv->sync_seq;
- dma_fence_init(&(*fence)->f, &virtio_fence_ops, &drv->lock,
- drv->context, (*fence)->seq);
- dma_fence_get(&(*fence)->f);
- list_add_tail(&(*fence)->node, &drv->fences);
+ fence->seq = ++drv->sync_seq;
+ dma_fence_get(&fence->f);
+ list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
- cmd_hdr->fence_id = cpu_to_le64((*fence)->seq);
+ cmd_hdr->fence_id = cpu_to_le64(fence->seq);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 82c817f37cf7..f06586393974 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -87,7 +87,6 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct virtio_gpu_object *obj;
int ret;
uint32_t pitch;
- uint32_t resid;
uint32_t format;
if (args->bpp != 32)
@@ -103,13 +102,12 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
goto fail;
format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
- virtio_gpu_resource_id_get(vgdev, &resid);
- virtio_gpu_cmd_create_resource(vgdev, resid, format,
+ obj = gem_to_virtio_gpu_obj(gobj);
+ virtio_gpu_cmd_create_resource(vgdev, obj, format,
args->width, args->height);
/* attach the object to the resource */
- obj = gem_to_virtio_gpu_obj(gobj);
- ret = virtio_gpu_object_attach(vgdev, obj, resid, NULL);
+ ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index f16b875d6a46..161b80fee492 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include <drm/virtgpu_drm.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include <linux/sync_file.h>
#include "virtgpu_drv.h"
@@ -105,7 +106,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_gem_object *gobj;
- struct virtio_gpu_fence *fence;
+ struct virtio_gpu_fence *out_fence;
struct virtio_gpu_object *qobj;
int ret;
uint32_t *bo_handles = NULL;
@@ -114,11 +115,46 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
struct ttm_validate_buffer *buflist = NULL;
int i;
struct ww_acquire_ctx ticket;
+ struct sync_file *sync_file;
+ int in_fence_fd = exbuf->fence_fd;
+ int out_fence_fd = -1;
void *buf;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
+ return -EINVAL;
+
+ exbuf->fence_fd = -1;
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
+ struct dma_fence *in_fence;
+
+ in_fence = sync_file_get_fence(in_fence_fd);
+
+ if (!in_fence)
+ return -EINVAL;
+
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ ret = 0;
+ if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
+ ret = dma_fence_wait(in_fence, true);
+
+ dma_fence_put(in_fence);
+ if (ret)
+ return ret;
+ }
+
+ if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0)
+ return out_fence_fd;
+ }
+
INIT_LIST_HEAD(&validate_list);
if (exbuf->num_bo_handles) {
@@ -128,26 +164,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
sizeof(struct ttm_validate_buffer),
GFP_KERNEL | __GFP_ZERO);
if (!bo_handles || !buflist) {
- kvfree(bo_handles);
- kvfree(buflist);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unused_fd;
}
user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
if (copy_from_user(bo_handles, user_bo_handles,
exbuf->num_bo_handles * sizeof(uint32_t))) {
ret = -EFAULT;
- kvfree(bo_handles);
- kvfree(buflist);
- return ret;
+ goto out_unused_fd;
}
for (i = 0; i < exbuf->num_bo_handles; i++) {
gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
if (!gobj) {
- kvfree(bo_handles);
- kvfree(buflist);
- return -ENOENT;
+ ret = -ENOENT;
+ goto out_unused_fd;
}
qobj = gem_to_virtio_gpu_obj(gobj);
@@ -156,6 +188,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
list_add(&buflist[i].head, &validate_list);
}
kvfree(bo_handles);
+ bo_handles = NULL;
}
ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
@@ -168,22 +201,48 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ret = PTR_ERR(buf);
goto out_unresv;
}
+
+ out_fence = virtio_gpu_fence_alloc(vgdev);
+ if(!out_fence) {
+ ret = -ENOMEM;
+ goto out_memdup;
+ }
+
+ if (out_fence_fd >= 0) {
+ sync_file = sync_file_create(&out_fence->f);
+ if (!sync_file) {
+ dma_fence_put(&out_fence->f);
+ ret = -ENOMEM;
+ goto out_memdup;
+ }
+
+ exbuf->fence_fd = out_fence_fd;
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
- vfpriv->ctx_id, &fence);
+ vfpriv->ctx_id, out_fence);
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
/* fence the command bo */
virtio_gpu_unref_list(&validate_list);
kvfree(buflist);
- dma_fence_put(&fence->f);
return 0;
+out_memdup:
+ kfree(buf);
out_unresv:
ttm_eu_backoff_reservation(&ticket, &validate_list);
out_free:
virtio_gpu_unref_list(&validate_list);
+out_unused_fd:
+ kvfree(bo_handles);
kvfree(buflist);
+
+ if (out_fence_fd >= 0)
+ put_unused_fd(out_fence_fd);
+
return ret;
}
@@ -217,7 +276,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
int ret;
- uint32_t res_id;
struct virtio_gpu_object *qobj;
struct drm_gem_object *obj;
uint32_t handle = 0;
@@ -244,8 +302,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
INIT_LIST_HEAD(&validate_list);
memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
- virtio_gpu_resource_id_get(vgdev, &res_id);
-
size = rc->size;
/* allocate a single page size object */
@@ -253,17 +309,15 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
size = PAGE_SIZE;
qobj = virtio_gpu_alloc_object(dev, size, false, false);
- if (IS_ERR(qobj)) {
- ret = PTR_ERR(qobj);
- goto fail_id;
- }
+ if (IS_ERR(qobj))
+ return PTR_ERR(qobj);
obj = &qobj->gem_base;
if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
+ virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
rc->width, rc->height);
- ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
+ ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
} else {
/* use a gem reference since unref list undoes them */
drm_gem_object_get(&qobj->gem_base);
@@ -276,7 +330,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
goto fail_unref;
}
- rc_3d.resource_id = cpu_to_le32(res_id);
+ rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
rc_3d.target = cpu_to_le32(rc->target);
rc_3d.format = cpu_to_le32(rc->format);
rc_3d.bind = cpu_to_le32(rc->bind);
@@ -288,17 +342,21 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
rc_3d.flags = cpu_to_le32(rc->flags);
- virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
- ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto fail_backoff;
+ }
+
+ virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
+ ret = virtio_gpu_object_attach(vgdev, qobj, fence);
if (ret) {
- ttm_eu_backoff_reservation(&ticket, &validate_list);
- goto fail_unref;
+ virtio_gpu_fence_cleanup(fence);
+ goto fail_backoff;
}
ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
}
- qobj->hw_res_handle = res_id;
-
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
@@ -311,7 +369,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
drm_gem_object_put_unlocked(obj);
- rc->res_handle = res_id; /* similiar to a VM address */
+ rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
if (vgdev->has_virgl_3d) {
@@ -319,6 +377,8 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
dma_fence_put(&fence->f);
}
return 0;
+fail_backoff:
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
fail_unref:
if (vgdev->has_virgl_3d) {
virtio_gpu_unref_list(&validate_list);
@@ -326,8 +386,6 @@ fail_unref:
}
//fail_obj:
// drm_gem_object_handle_unreference_unlocked(obj);
-fail_id:
- virtio_gpu_resource_id_put(vgdev, res_id);
return ret;
}
@@ -383,10 +441,16 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
goto out_unres;
convert_to_hw_box(&box, &args->box);
+
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unres;
+ }
virtio_gpu_cmd_transfer_from_host_3d
(vgdev, qobj->hw_res_handle,
vfpriv->ctx_id, offset, args->level,
- &box, &fence);
+ &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
@@ -432,10 +496,15 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
(vgdev, qobj, offset,
box.w, box.h, box.x, box.y, NULL);
} else {
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto out_unres;
+ }
virtio_gpu_cmd_transfer_to_host_3d
(vgdev, qobj,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, &fence);
+ args->level, &box, fence);
reservation_object_add_excl_fence(qobj->tbo.resv,
&fence->f);
dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 65060c08522d..3af6181c05a8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
@@ -52,39 +54,23 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static void virtio_gpu_ctx_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
+static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
+ uint32_t nlen, const char *name)
{
- int handle;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&vgdev->ctx_id_idr_lock);
- handle = idr_alloc(&vgdev->ctx_id_idr, NULL, 1, 0, 0);
- spin_unlock(&vgdev->ctx_id_idr_lock);
- idr_preload_end();
- *resid = handle;
-}
+ int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-static void virtio_gpu_ctx_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
- spin_lock(&vgdev->ctx_id_idr_lock);
- idr_remove(&vgdev->ctx_id_idr, id);
- spin_unlock(&vgdev->ctx_id_idr_lock);
-}
-
-static void virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
- uint32_t nlen, const char *name,
- uint32_t *ctx_id)
-{
- virtio_gpu_ctx_id_get(vgdev, ctx_id);
- virtio_gpu_cmd_context_create(vgdev, *ctx_id, nlen, name);
+ if (handle < 0)
+ return handle;
+ handle += 1;
+ virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
+ return handle;
}
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
- virtio_gpu_ctx_id_put(vgdev, ctx_id);
+ ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
@@ -151,10 +137,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
vgdev->dev = dev->dev;
spin_lock_init(&vgdev->display_info_lock);
- spin_lock_init(&vgdev->ctx_id_idr_lock);
- idr_init(&vgdev->ctx_id_idr);
- spin_lock_init(&vgdev->resource_idr_lock);
- idr_init(&vgdev->resource_idr);
+ ida_init(&vgdev->ctx_id_ida);
+ ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func);
virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func);
@@ -174,6 +158,10 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
#else
DRM_INFO("virgl 3d acceleration not supported by guest\n");
#endif
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ vgdev->has_edid = true;
+ DRM_INFO("EDID support available.\n");
+ }
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
@@ -219,6 +207,8 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
@@ -271,7 +261,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
- uint32_t id;
+ int id;
char dbgname[TASK_COMM_LEN];
/* can't create contexts without 3d renderer */
@@ -284,7 +274,11 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
get_task_comm(dbgname, current);
- virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname, &id);
+ id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
+ if (id < 0) {
+ kfree(vfpriv);
+ return id;
+ }
vfpriv->ctx_id = id;
file->driver_priv = vfpriv;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index eca765537470..f39a183d59c2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -25,6 +25,23 @@
#include "virtgpu_drv.h"
+static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid)
+{
+ int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
+ if (handle < 0)
+ return handle;
+
+ *resid = handle + 1;
+ return 0;
+}
+
+static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
+{
+ ida_free(&vgdev->resource_ida, id - 1);
+}
+
static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct virtio_gpu_object *bo;
@@ -33,13 +50,14 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
bo = container_of(tbo, struct virtio_gpu_object, tbo);
vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
- if (bo->hw_res_handle)
+ if (bo->created)
virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
if (bo->pages)
virtio_gpu_object_free_sg_table(bo);
if (bo->vmap)
virtio_gpu_object_kunmap(bo);
drm_gem_object_release(&bo->gem_base);
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
}
@@ -81,9 +99,15 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
+ ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
+ if (ret < 0) {
+ kfree(bo);
+ return ret;
+ }
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
if (ret != 0) {
+ virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a9f4ae7d4483..ead5c53d4e21 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -137,6 +137,41 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_h >> 16);
}
+static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_framebuffer *vgfb;
+ struct virtio_gpu_object *bo;
+
+ if (!new_state->fb)
+ return 0;
+
+ vgfb = to_virtio_gpu_framebuffer(new_state->fb);
+ bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
+ if (bo && bo->dumb && (plane->state->fb != new_state->fb)) {
+ vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+ if (!vgfb->fence)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct virtio_gpu_framebuffer *vgfb;
+
+ if (!plane->state->fb)
+ return;
+
+ vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
+ if (vgfb->fence)
+ virtio_gpu_fence_cleanup(vgfb->fence);
+}
+
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -144,7 +179,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
struct virtio_gpu_framebuffer *vgfb;
- struct virtio_gpu_fence *fence = NULL;
struct virtio_gpu_object *bo = NULL;
uint32_t handle;
int ret = 0;
@@ -170,13 +204,13 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
(vgdev, bo, 0,
cpu_to_le32(plane->state->crtc_w),
cpu_to_le32(plane->state->crtc_h),
- 0, 0, &fence);
+ 0, 0, vgfb->fence);
ret = virtio_gpu_object_reserve(bo, false);
if (!ret) {
reservation_object_add_excl_fence(bo->tbo.resv,
- &fence->f);
- dma_fence_put(&fence->f);
- fence = NULL;
+ &vgfb->fence->f);
+ dma_fence_put(&vgfb->fence->f);
+ vgfb->fence = NULL;
virtio_gpu_object_unreserve(bo);
virtio_gpu_object_wait(bo, false);
}
@@ -218,6 +252,8 @@ static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
};
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
+ .prepare_fb = virtio_gpu_cursor_prepare_fb,
+ .cleanup_fb = virtio_gpu_cursor_cleanup_fb,
.atomic_check = virtio_gpu_plane_atomic_check,
.atomic_update = virtio_gpu_cursor_plane_update,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index e3152d45c5f1..4bfbf25fabff 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -50,62 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
return vgdev;
}
-static int virtio_gpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void virtio_gpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
-{
- struct drm_global_reference *global_ref;
- int r;
-
- vgdev->mman.mem_global_referenced = false;
- global_ref = &vgdev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &virtio_gpu_ttm_mem_global_init;
- global_ref->release = &virtio_gpu_ttm_mem_global_release;
-
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- return r;
- }
-
- vgdev->mman.bo_global_ref.mem_glob =
- vgdev->mman.mem_global_ref.object;
- global_ref = &vgdev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r != 0) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&vgdev->mman.mem_global_ref);
- return r;
- }
-
- vgdev->mman.mem_global_referenced = true;
- return 0;
-}
-
-static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
-{
- if (vgdev->mman.mem_global_referenced) {
- drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
- drm_global_item_unref(&vgdev->mman.mem_global_ref);
- vgdev->mman.mem_global_referenced = false;
- }
-}
-
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
@@ -347,8 +291,7 @@ static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
- NULL);
+ virtio_gpu_object_attach(vgdev, bo, NULL);
}
}
}
@@ -383,12 +326,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
{
int r;
- r = virtio_gpu_ttm_global_init(vgdev);
- if (r)
- return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&vgdev->mman.bdev,
- vgdev->mman.bo_global_ref.ref.object,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@@ -407,13 +346,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
err_mm_init:
ttm_bo_device_release(&vgdev->mman.bdev);
err_dev_init:
- virtio_gpu_ttm_global_fini(vgdev);
return r;
}
void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
{
ttm_bo_device_release(&vgdev->mman.bdev);
- virtio_gpu_ttm_global_fini(vgdev);
DRM_INFO("virtio_gpu: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 4e2e037aed34..e27c4aedb809 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -38,26 +38,6 @@
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
-void virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
-{
- int handle;
-
- idr_preload(GFP_KERNEL);
- spin_lock(&vgdev->resource_idr_lock);
- handle = idr_alloc(&vgdev->resource_idr, NULL, 1, 0, GFP_NOWAIT);
- spin_unlock(&vgdev->resource_idr_lock);
- idr_preload_end();
- *resid = handle;
-}
-
-void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
-{
- spin_lock(&vgdev->resource_idr_lock);
- idr_remove(&vgdev->resource_idr, id);
- spin_unlock(&vgdev->resource_idr_lock);
-}
-
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
@@ -98,10 +78,9 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_alloc(vgdev->vbufs, GFP_KERNEL);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
if (!vbuf)
return ERR_PTR(-ENOMEM);
- memset(vbuf, 0, VBUFFER_SIZE);
BUG_ON(size > MAX_INLINE_CMD_SIZE);
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
@@ -319,7 +298,7 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
struct virtio_gpu_ctrl_hdr *hdr,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
int rc;
@@ -388,7 +367,7 @@ retry:
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
+ struct virtio_gpu_object *bo,
uint32_t format,
uint32_t width,
uint32_t height)
@@ -400,12 +379,13 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->format = cpu_to_le32(format);
cmd_p->width = cpu_to_le32(width);
cmd_p->height = cpu_to_le32(height);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
}
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
@@ -425,7 +405,7 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -487,7 +467,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
__le32 width, __le32 height,
__le32 x, __le32 y,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -517,7 +497,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_mem_entry *ents,
uint32_t nents,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_attach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -604,6 +584,45 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
wake_up(&vgdev->resp_wq);
}
+static int virtio_get_edid_block(void *data, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct virtio_gpu_resp_edid *resp = data;
+ size_t start = block * EDID_LENGTH;
+
+ if (start + len > le32_to_cpu(resp->size))
+ return -1;
+ memcpy(buf, resp->edid + start, len);
+ return 0;
+}
+
+static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_cmd_get_edid *cmd =
+ (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
+ struct virtio_gpu_resp_edid *resp =
+ (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
+ uint32_t scanout = le32_to_cpu(cmd->scanout);
+ struct virtio_gpu_output *output;
+ struct edid *new_edid, *old_edid;
+
+ if (scanout >= vgdev->num_scanouts)
+ return;
+ output = vgdev->outputs + scanout;
+
+ new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
+
+ spin_lock(&vgdev->display_info_lock);
+ old_edid = output->edid;
+ output->edid = new_edid;
+ drm_connector_update_edid_property(&output->conn, output->edid);
+ spin_unlock(&vgdev->display_info_lock);
+
+ kfree(old_edid);
+ wake_up(&vgdev->resp_wq);
+}
+
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_ctrl_hdr *cmd_p;
@@ -706,6 +725,34 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
return 0;
}
+int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
+{
+ struct virtio_gpu_cmd_get_edid *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ void *resp_buf;
+ int scanout;
+
+ if (WARN_ON(!vgdev->has_edid))
+ return -EINVAL;
+
+ for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
+ resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
+ GFP_KERNEL);
+ if (!resp_buf)
+ return -ENOMEM;
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
+ sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
+ resp_buf);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
+ cmd_p->scanout = cpu_to_le32(scanout);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ }
+
+ return 0;
+}
+
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
uint32_t nlen, const char *name)
{
@@ -772,8 +819,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_resource_create_3d *rc_3d,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_resource_create_3d *rc_3d)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -785,7 +832,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
cmd_p->hdr.flags = 0;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
}
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
@@ -793,7 +841,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -821,7 +869,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint32_t ctx_id,
uint64_t offset, uint32_t level,
struct virtio_gpu_box *box,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -841,7 +889,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void *data, uint32_t data_size,
- uint32_t ctx_id, struct virtio_gpu_fence **fence)
+ uint32_t ctx_id, struct virtio_gpu_fence *fence)
{
struct virtio_gpu_cmd_submit *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -861,14 +909,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
- struct virtio_gpu_fence **fence)
+ struct virtio_gpu_fence *fence)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
int si, nents;
+ if (!obj->created)
+ return 0;
+
if (!obj->pages) {
int ret;
@@ -902,10 +952,9 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
ents[si].padding = 0;
}
- virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
+ virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
fence);
- obj->hw_res_handle = resource_id;
return 0;
}
@@ -913,11 +962,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_fence *fence;
if (use_dma_api && obj->mapped) {
+ struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
/* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
dma_fence_wait(&fence->f, true);
dma_fence_put(&fence->f);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 07cfde1b4132..83087877565c 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -68,7 +68,6 @@ static struct drm_driver vkms_driver = {
.release = vkms_release,
.fops = &vkms_driver_fops,
.dumb_create = vkms_dumb_create,
- .dumb_map_offset = vkms_dumb_map,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
.get_vblank_timestamp = vkms_get_vblank_timestamp,
@@ -108,17 +107,18 @@ static int __init vkms_init(void)
if (!vkms_device)
return -ENOMEM;
- ret = drm_dev_init(&vkms_device->drm, &vkms_driver, NULL);
- if (ret)
- goto out_free;
-
vkms_device->platform =
platform_device_register_simple(DRIVER_NAME, -1, NULL, 0);
if (IS_ERR(vkms_device->platform)) {
ret = PTR_ERR(vkms_device->platform);
- goto out_fini;
+ goto out_free;
}
+ ret = drm_dev_init(&vkms_device->drm, &vkms_driver,
+ &vkms_device->platform->dev);
+ if (ret)
+ goto out_unregister;
+
vkms_device->drm.irq_enabled = true;
ret = drm_vblank_init(&vkms_device->drm, 1);
@@ -129,20 +129,20 @@ static int __init vkms_init(void)
ret = vkms_modeset_init(vkms_device);
if (ret)
- goto out_unregister;
+ goto out_fini;
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
- goto out_unregister;
+ goto out_fini;
return 0;
-out_unregister:
- platform_device_unregister(vkms_device->platform);
-
out_fini:
drm_dev_fini(&vkms_device->drm);
+out_unregister:
+ platform_device_unregister(vkms_device->platform);
+
out_free:
kfree(vkms_device);
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 1c93990693e3..e4469cd3d254 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -127,9 +127,6 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset);
-
void vkms_gem_free_object(struct drm_gem_object *obj);
int vkms_gem_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index d04e988b4cbe..80311daed47a 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -153,32 +153,6 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
return 0;
}
-int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
- u32 handle, u64 *offset)
-{
- struct drm_gem_object *obj;
- int ret;
-
- obj = drm_gem_object_lookup(file, handle);
- if (!obj)
- return -ENOENT;
-
- if (!obj->filp) {
- ret = -EINVAL;
- goto unref;
- }
-
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto unref;
-
- *offset = drm_vma_node_offset_addr(&obj->vma_node);
-unref:
- drm_gem_object_put_unlocked(obj);
-
- return ret;
-}
-
static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
{
struct drm_gem_object *gem_obj = &vkms_obj->gem;
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 7041007396ae..418817600ad1 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -23,8 +23,11 @@ vkms_plane_duplicate_state(struct drm_plane *plane)
return NULL;
crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
- if (WARN_ON(!crc_data))
- DRM_INFO("Couldn't allocate crc_data");
+ if (!crc_data) {
+ DRM_DEBUG_KMS("Couldn't allocate crc_data\n");
+ kfree(vkms_state);
+ return NULL;
+ }
vkms_state->crc_data = crc_data;
@@ -138,14 +141,12 @@ static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_gem_object *gem_obj;
- struct vkms_gem_object *vkms_obj;
int ret;
if (!state->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(state->fb, 0);
- vkms_obj = drm_gem_to_vkms_gem(gem_obj);
ret = vkms_gem_vmap(gem_obj);
if (ret)
DRM_ERROR("vmap failed: %d\n", ret);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 50637f372e9f..9fd5fbe8bebf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -49,6 +49,8 @@
#define VMWGFX_REPO "In Tree"
+#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
+
/**
* Fully encoded drm commands. Might move to vmw_drm.h
@@ -665,7 +667,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
- mutex_init(&dev_priv->requested_layout_mutex);
mutex_init(&dev_priv->global_kms_state_mutex);
ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
@@ -801,11 +802,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
- ret = vmw_ttm_global_init(dev_priv);
- if (unlikely(ret != 0))
- goto out_err0;
-
-
vmw_master_init(&dev_priv->fbdev_master);
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -816,7 +812,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
DRM_ERROR("Failed mapping MMIO.\n");
- goto out_err3;
+ goto out_err0;
}
/* Need mmio memory to check for fifo pitchlock cap. */
@@ -828,8 +824,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_err4;
}
- dev_priv->tdev = ttm_object_device_init
- (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
+ dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12,
+ &vmw_prime_dmabuf_ops);
if (unlikely(dev_priv->tdev == NULL)) {
DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -870,7 +866,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
ret = ttm_bo_device_init(&dev_priv->bdev,
- dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
VMWGFX_FILE_PAGE_OFFSET,
@@ -918,7 +913,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
spin_unlock(&dev_priv->cap_lock);
}
-
+ vmw_validation_mem_init_ttm(dev_priv, VMWGFX_VALIDATION_MEM_GRAN);
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
@@ -992,8 +987,6 @@ out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
memunmap(dev_priv->mmio_virt);
-out_err3:
- vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@@ -1045,7 +1038,6 @@ static void vmw_driver_unload(struct drm_device *dev)
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
- vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 59f614225bcd..cd607ba9c2fe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -417,8 +417,6 @@ enum {
struct vmw_private {
struct ttm_bo_device bdev;
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
struct vmw_fifo_state fifo;
@@ -468,15 +466,6 @@ struct vmw_private {
uint32_t num_displays;
/*
- * Currently requested_layout_mutex is used to protect the gui
- * positionig state in display unit. With that use case currently this
- * mutex is only taken during layout ioctl and atomic check_modeset.
- * Other display unit state can be protected with this mutex but that
- * needs careful consideration.
- */
- struct mutex requested_layout_mutex;
-
- /*
* Framebuffer info.
*/
@@ -486,8 +475,6 @@ struct vmw_private {
struct vmw_overlay *overlay_priv;
struct drm_property *hotplug_mode_update_property;
struct drm_property *implicit_placement_property;
- unsigned num_implicit;
- struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
struct drm_atomic_state *suspend_state;
@@ -606,6 +593,9 @@ struct vmw_private {
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
+
+ /* Validation memory reservation */
+ struct vmw_validation_mem vvm;
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -842,10 +832,10 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
* TTM glue - vmwgfx_ttm_glue.c
*/
-extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
-extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
+extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
+ size_t gran);
/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
@@ -1363,7 +1353,7 @@ vmw_bo_reference(struct vmw_buffer_object *buf)
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
{
- return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
+ return &ttm_mem_glob;
}
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 5a6b70ba137a..f2d13a72c05d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1738,7 +1738,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
void *buf)
{
struct vmw_buffer_object *vmw_bo;
- int ret;
struct {
uint32_t header;
@@ -1748,7 +1747,6 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
return vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->body.ptr,
&vmw_bo);
- return ret;
}
@@ -3837,6 +3835,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
struct sync_file *sync_file = NULL;
DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
+ vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
+
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index f87261545f2c..301260e23e52 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -906,13 +906,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
container_of(action, struct vmw_event_fence_action, action);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
- struct drm_file *file_priv;
-
if (unlikely(event == NULL))
return;
- file_priv = event->file_priv;
spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index dca04d4246ea..b351fb5214d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_rect.h>
+#include <drm/drm_damage_helper.h>
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
@@ -456,21 +457,8 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = state->crtc;
struct vmw_connector_state *vcs;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
vcs = vmw_connector_state_to_vcs(du->connector.state);
-
- /* Only one active implicit framebuffer at a time. */
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (vcs->is_implicit && dev_priv->implicit_fb &&
- !(dev_priv->num_implicit == 1 && du->active_implicit)
- && dev_priv->implicit_fb != vfb) {
- DRM_ERROR("Multiple implicit framebuffers "
- "not supported.\n");
- ret = -EINVAL;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
}
@@ -493,24 +481,24 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
int ret = 0;
+ struct drm_crtc_state *crtc_state = NULL;
struct vmw_surface *surface = NULL;
struct drm_framebuffer *fb = new_state->fb;
- struct drm_rect src = drm_plane_state_src(new_state);
- struct drm_rect dest = drm_plane_state_dest(new_state);
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
- /* Turning off */
- if (!fb)
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
return ret;
- ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
- &src, &dest,
- DRM_MODE_ROTATE_0,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true, &new_state->visible);
- if (!ret)
- return ret;
+ /* Turning off */
+ if (!fb)
+ return 0;
/* A lot of the code assumes this */
if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
@@ -846,58 +834,6 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
kfree(vfbs);
}
-static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
- struct drm_file *file_priv,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- struct vmw_framebuffer_surface *vfbs =
- vmw_framebuffer_to_vfbs(framebuffer);
- struct drm_clip_rect norect;
- int ret, inc = 1;
-
- /* Legacy Display Unit does not support 3D */
- if (dev_priv->active_display_unit == vmw_du_legacy)
- return -EINVAL;
-
- drm_modeset_lock_all(dev_priv->dev);
-
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(dev_priv->dev);
- return ret;
- }
-
- if (!num_clips) {
- num_clips = 1;
- clips = &norect;
- norect.x1 = norect.y1 = 0;
- norect.x2 = framebuffer->width;
- norect.y2 = framebuffer->height;
- } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
- num_clips /= 2;
- inc = 2; /* skip source rects */
- }
-
- if (dev_priv->active_display_unit == vmw_du_screen_object)
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
- else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
- clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL, NULL);
-
- vmw_fifo_flush(dev_priv, false);
- ttm_read_unlock(&dev_priv->reservation_sem);
-
- drm_modeset_unlock_all(dev_priv->dev);
-
- return 0;
-}
-
/**
* vmw_kms_readback - Perform a readback from the screen system to
* a buffer-object backed framebuffer.
@@ -941,7 +877,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
- .dirty = vmw_framebuffer_surface_dirty,
+ .dirty = drm_atomic_helper_dirtyfb,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -1084,16 +1020,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
}
switch (dev_priv->active_display_unit) {
- case vmw_du_screen_target:
- ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
- clips, NULL, num_clips, increment,
- true, true, NULL);
- break;
- case vmw_du_screen_object:
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, &vfbd->base,
- clips, NULL, num_clips,
- increment, true, NULL, NULL);
- break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
clips, num_clips, increment);
@@ -1112,9 +1038,25 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
return ret;
}
+static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+
+ if (dev_priv->active_display_unit == vmw_du_legacy)
+ return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
+ color, clips, num_clips);
+
+ return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
+ clips, num_clips);
+}
+
static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.destroy = vmw_framebuffer_bo_destroy,
- .dirty = vmw_framebuffer_bo_dirty,
+ .dirty = vmw_framebuffer_bo_dirty_ext,
};
/**
@@ -1565,6 +1507,88 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
}
/**
+ * vmw_crtc_state_and_lock - Return new or current crtc state with locked
+ * crtc mutex
+ * @state: The atomic state pointer containing the new atomic state
+ * @crtc: The crtc
+ *
+ * This function returns the new crtc state if it's part of the state update.
+ * Otherwise returns the current crtc state. It also makes sure that the
+ * crtc mutex is locked.
+ *
+ * Returns: A valid crtc state pointer or NULL. It may also return a
+ * pointer error, in particular -EDEADLK if locking needs to be rerun.
+ */
+static struct drm_crtc_state *
+vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state) {
+ lockdep_assert_held(&crtc->mutex.mutex.base);
+ } else {
+ int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
+
+ if (ret != 0 && ret != -EALREADY)
+ return ERR_PTR(ret);
+
+ crtc_state = crtc->state;
+ }
+
+ return crtc_state;
+}
+
+/**
+ * vmw_kms_check_implicit - Verify that all implicit display units scan out
+ * from the same fb after the new state is committed.
+ * @dev: The drm_device.
+ * @state: The new state to be checked.
+ *
+ * Returns:
+ * Zero on success,
+ * -EINVAL on invalid state,
+ * -EDEADLK if modeset locking needs to be rerun.
+ */
+static int vmw_kms_check_implicit(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_framebuffer *implicit_fb = NULL;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ drm_for_each_crtc(crtc, dev) {
+ struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
+
+ if (!du->is_implicit)
+ continue;
+
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state || !crtc_state->enable)
+ continue;
+
+ /*
+ * Can't move primary planes across crtcs, so this is OK.
+ * It also means we don't need to take the plane mutex.
+ */
+ plane_state = du->primary.state;
+ if (plane_state->crtc != crtc)
+ continue;
+
+ if (!implicit_fb)
+ implicit_fb = plane_state->fb;
+ else if (implicit_fb != plane_state->fb)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* vmw_kms_check_topology - Validates topology in drm_atomic_state
* @dev: DRM device
* @state: the driver state object
@@ -1575,7 +1599,6 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_atomic_state *state)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_rect *rects;
struct drm_crtc *crtc;
@@ -1587,19 +1610,31 @@ static int vmw_kms_check_topology(struct drm_device *dev,
if (!rects)
return -ENOMEM;
- mutex_lock(&dev_priv->requested_layout_mutex);
-
drm_for_each_crtc(crtc, dev) {
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_crtc_state *crtc_state = crtc->state;
+ struct drm_crtc_state *crtc_state;
i = drm_crtc_index(crtc);
- if (crtc_state && crtc_state->enable) {
+ crtc_state = vmw_crtc_state_and_lock(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto clean;
+ }
+
+ if (!crtc_state)
+ continue;
+
+ if (crtc_state->enable) {
rects[i].x1 = du->gui_x;
rects[i].y1 = du->gui_y;
rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
+ } else {
+ rects[i].x1 = 0;
+ rects[i].y1 = 0;
+ rects[i].x2 = 0;
+ rects[i].y2 = 0;
}
}
@@ -1611,14 +1646,6 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable) {
- rects[i].x1 = 0;
- rects[i].y1 = 0;
- rects[i].x2 = 0;
- rects[i].y2 = 0;
- continue;
- }
-
if (!du->pref_active) {
ret = -EINVAL;
goto clean;
@@ -1639,18 +1666,12 @@ static int vmw_kms_check_topology(struct drm_device *dev,
vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
vmw_conn_state->gui_x = du->gui_x;
vmw_conn_state->gui_y = du->gui_y;
-
- rects[i].x1 = du->gui_x;
- rects[i].y1 = du->gui_y;
- rects[i].x2 = du->gui_x + new_crtc_state->mode.hdisplay;
- rects[i].y2 = du->gui_y + new_crtc_state->mode.vdisplay;
}
ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
rects);
clean:
- mutex_unlock(&dev_priv->requested_layout_mutex);
kfree(rects);
return ret;
}
@@ -1681,6 +1702,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
if (ret)
return ret;
+ ret = vmw_kms_check_implicit(dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset)
return ret;
@@ -2003,11 +2028,25 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
struct vmw_display_unit *du;
struct drm_connector *con;
struct drm_connector_list_iter conn_iter;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /* Currently gui_x/y is protected with the crtc mutex */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_modeset_acquire_init(&ctx, 0);
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret < 0) {
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ goto out_fini;
+ }
+ }
- /*
- * Currently only gui_x/y is protected with requested_layout_mutex.
- */
- mutex_lock(&dev_priv->requested_layout_mutex);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(con, &conn_iter) {
du = vmw_connector_to_du(con);
@@ -2026,9 +2065,7 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
}
drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev_priv->requested_layout_mutex);
- mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num_rects > du->unit) {
@@ -2048,10 +2085,13 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv,
}
con->status = vmw_du_connector_detect(con, true);
}
- mutex_unlock(&dev->mode_config.mutex);
drm_sysfs_hotplug_event(dev);
-
+out_fini:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ mutex_unlock(&dev->mode_config.mutex);
+
return 0;
}
@@ -2275,84 +2315,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
return 1;
}
-int vmw_du_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
-
- if (property == dev_priv->implicit_placement_property)
- du->is_implicit = val;
-
- return 0;
-}
-
-
-
-/**
- * vmw_du_connector_atomic_set_property - Atomic version of get property
- *
- * @crtc - crtc the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_set_property(struct drm_connector *connector,
- struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
- struct vmw_display_unit *du = vmw_connector_to_du(connector);
-
-
- if (property == dev_priv->implicit_placement_property) {
- vcs->is_implicit = val;
-
- /*
- * We should really be doing a drm_atomic_commit() to
- * commit the new state, but since this doesn't cause
- * an immedate state change, this is probably ok
- */
- du->is_implicit = vcs->is_implicit;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-/**
- * vmw_du_connector_atomic_get_property - Atomic version of get property
- *
- * @connector - connector the property is associated with
- *
- * Returns:
- * Zero on success, negative errno on failure.
- */
-int
-vmw_du_connector_atomic_get_property(struct drm_connector *connector,
- const struct drm_connector_state *state,
- struct drm_property *property,
- uint64_t *val)
-{
- struct vmw_private *dev_priv = vmw_priv(connector->dev);
- struct vmw_connector_state *vcs = vmw_connector_state_to_vcs(state);
-
- if (property == dev_priv->implicit_placement_property)
- *val = vcs->is_implicit;
- else {
- DRM_ERROR("Invalid Property %s\n", property->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
* @dev: drm device for the ioctl
@@ -2742,143 +2704,25 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_del_active - unregister a crtc binding to the implicit framebuffer
- *
- * @dev_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- */
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- if (du->active_implicit) {
- if (--(dev_priv->num_implicit) == 0)
- dev_priv->implicit_fb = NULL;
- du->active_implicit = false;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_add_active - register a crtc binding to an implicit framebuffer
- *
- * @vmw_priv: Pointer to a device private struct.
- * @du: The display unit of the crtc.
- * @vfb: The implicit framebuffer
- *
- * Registers a binding to an implicit framebuffer.
- */
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb)
-{
- mutex_lock(&dev_priv->global_kms_state_mutex);
- WARN_ON_ONCE(!dev_priv->num_implicit && dev_priv->implicit_fb);
-
- if (!du->active_implicit && du->is_implicit) {
- dev_priv->implicit_fb = vfb;
- du->active_implicit = true;
- dev_priv->num_implicit++;
- }
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
- * vmw_kms_screen_object_flippable - Check whether we can page-flip a crtc.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc we want to flip.
- *
- * Returns true or false depending whether it's OK to flip this crtc
- * based on the criterion that we must not have more than one implicit
- * frame-buffer at any one time.
- */
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- bool ret;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
- ret = !du->is_implicit || dev_priv->num_implicit == 1;
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-
- return ret;
-}
-
-/**
- * vmw_kms_update_implicit_fb - Update the implicit fb.
- *
- * @dev_priv: Pointer to device-private struct.
- * @crtc: The crtc the new implicit frame-buffer is bound to.
- */
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc)
-{
- struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_plane *plane = crtc->primary;
- struct vmw_framebuffer *vfb;
-
- mutex_lock(&dev_priv->global_kms_state_mutex);
-
- if (!du->is_implicit)
- goto out_unlock;
-
- vfb = vmw_framebuffer_to_vfb(plane->state->fb);
- WARN_ON_ONCE(dev_priv->num_implicit != 1 &&
- dev_priv->implicit_fb != vfb);
-
- dev_priv->implicit_fb = vfb;
-out_unlock:
- mutex_unlock(&dev_priv->global_kms_state_mutex);
-}
-
-/**
* vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
- * @immutable: Whether the property is immutable.
*
* Sets up the implicit placement property unless it's already set up.
*/
void
-vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable)
+vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
{
if (dev_priv->implicit_placement_property)
return;
dev_priv->implicit_placement_property =
drm_property_create_range(dev_priv->dev,
- immutable ?
- DRM_MODE_PROP_IMMUTABLE : 0,
+ DRM_MODE_PROP_IMMUTABLE,
"implicit_placement", 0, 1);
-
}
-
-/**
- * vmw_kms_set_config - Wrapper around drm_atomic_helper_set_config
- *
- * @set: The configuration to set.
- *
- * The vmwgfx Xorg driver doesn't assign the mode::type member, which
- * when drm_mode_set_crtcinfo is called as part of the configuration setting
- * causes it to return incorrect crtc dimensions causing severe problems in
- * the vmwgfx modesetting. So explicitly clear that member before calling
- * into drm_atomic_helper_set_config.
- */
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
-{
- if (set && set->mode)
- set->mode->type = 0;
-
- return drm_atomic_helper_set_config(set, ctx);
-}
-
-
/**
* vmw_kms_suspend - Save modesetting state and turn modesetting off.
*
@@ -2935,3 +2779,124 @@ void vmw_kms_lost_device(struct drm_device *dev)
{
drm_atomic_helper_shutdown(dev);
}
+
+/**
+ * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
+ * @update: The closure structure.
+ *
+ * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
+ * update on display unit.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
+{
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ struct drm_rect bb;
+ DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
+ uint32_t reserved_size = 0;
+ uint32_t submit_size = 0;
+ uint32_t curr_size = 0;
+ uint32_t num_hits = 0;
+ void *cmd_start;
+ char *cmd_next;
+ int ret;
+
+ /*
+ * Iterate in advance to check if really need plane update and find the
+ * number of clips that actually are in plane src for fifo allocation.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ if (num_hits == 0)
+ return 0;
+
+ if (update->vfb->bo) {
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
+ update->cpu_blit);
+ } else {
+ struct vmw_framebuffer_surface *vfbs =
+ container_of(update->vfb, typeof(*vfbs), base);
+
+ ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
+ 0, NULL, NULL);
+ }
+
+ if (ret)
+ return ret;
+
+ ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
+ if (ret)
+ goto out_unref;
+
+ reserved_size = update->calc_fifo_size(update, num_hits);
+ cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ if (!cmd_start) {
+ ret = -ENOMEM;
+ goto out_revert;
+ }
+
+ cmd_next = cmd_start;
+
+ if (update->post_prepare) {
+ curr_size = update->post_prepare(update, cmd_next);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ if (update->pre_clip) {
+ curr_size = update->pre_clip(update, cmd_next, num_hits);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+
+ bb.x1 = INT_MAX;
+ bb.y1 = INT_MAX;
+ bb.x2 = INT_MIN;
+ bb.y2 = INT_MIN;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ uint32_t fb_x = clip.x1;
+ uint32_t fb_y = clip.y1;
+
+ vmw_du_translate_to_crtc(state, &clip);
+ if (update->clip) {
+ curr_size = update->clip(update, cmd_next, &clip, fb_x,
+ fb_y);
+ cmd_next += curr_size;
+ submit_size += curr_size;
+ }
+ bb.x1 = min_t(int, bb.x1, clip.x1);
+ bb.y1 = min_t(int, bb.y1, clip.y1);
+ bb.x2 = max_t(int, bb.x2, clip.x2);
+ bb.y2 = max_t(int, bb.y2, clip.y2);
+ }
+
+ curr_size = update->post_clip(update, cmd_next, &bb);
+ submit_size += curr_size;
+
+ if (reserved_size < submit_size)
+ submit_size = 0;
+
+ vmw_fifo_commit(update->dev_priv, submit_size);
+
+ vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
+ update->out_fence, NULL);
+ return ret;
+
+out_revert:
+ vmw_validation_revert(&val_ctx);
+
+out_unref:
+ vmw_validation_unref_lists(&val_ctx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 76ec570c0684..655abbcd4058 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -33,7 +33,123 @@
#include <drm/drm_encoder.h>
#include "vmwgfx_drv.h"
+/**
+ * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update
+ * @plane: Plane which is being updated.
+ * @old_state: Old state of plane.
+ * @dev_priv: Device private.
+ * @du: Display unit on which to update the plane.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: Out fence for resource finish.
+ * @mutex: The mutex used to protect resource reservation.
+ * @cpu_blit: True if need cpu blit.
+ * @intr: Whether to perform waits interruptible if possible.
+ *
+ * This structure loosely represent the set of operations needed to perform a
+ * plane update on a display unit. Implementer will define that functionality
+ * according to the function callbacks for this structure. In brief it involves
+ * surface/buffer object validation, populate FIFO commands and command
+ * submission to the device.
+ */
+struct vmw_du_update_plane {
+ /**
+ * @calc_fifo_size: Calculate fifo size.
+ *
+ * Determine fifo size for the commands needed for update. The number of
+ * damage clips on display unit @num_hits will be passed to allocate
+ * sufficient fifo space.
+ *
+ * Return: Fifo size needed
+ */
+ uint32_t (*calc_fifo_size)(struct vmw_du_update_plane *update,
+ uint32_t num_hits);
+
+ /**
+ * @post_prepare: Populate fifo for resource preparation.
+ *
+ * Some surface resource or buffer object need some extra cmd submission
+ * like update GB image for proxy surface and define a GMRFB for screen
+ * object. That should should be done here as this callback will be
+ * called after FIFO allocation with the address of command buufer.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_prepare)(struct vmw_du_update_plane *update, void *cmd);
+
+ /**
+ * @pre_clip: Populate fifo before clip.
+ *
+ * This is where pre clip related command should be populated like
+ * surface copy/DMA, etc.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*pre_clip)(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits);
+
+ /**
+ * @clip: Populate fifo for clip.
+ *
+ * This is where to populate clips for surface copy/dma or blit commands
+ * if needed. This will be called times have damage in display unit,
+ * which is one if doing full update. @clip is the damage in destination
+ * coordinates which is crtc/DU and @src_x, @src_y is damage clip src in
+ * framebuffer coordinate.
+ *
+ * This callback is optional.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t src_x, uint32_t src_y);
+
+ /**
+ * @post_clip: Populate fifo after clip.
+ *
+ * This is where to populate display unit update commands or blit
+ * commands.
+ *
+ * Return: Size of commands populated to command buffer.
+ */
+ uint32_t (*post_clip)(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb);
+
+ struct drm_plane *plane;
+ struct drm_plane_state *old_state;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *du;
+ struct vmw_framebuffer *vfb;
+ struct vmw_fence_obj **out_fence;
+ struct mutex *mutex;
+ bool cpu_blit;
+ bool intr;
+};
+
+/**
+ * struct vmw_du_update_plane_surface - closure structure for surface
+ * @base: base closure structure.
+ * @cmd_start: FIFO command start address (used by SOU only).
+ */
+struct vmw_du_update_plane_surface {
+ struct vmw_du_update_plane base;
+ /* This member is to handle special case SOU surface update */
+ void *cmd_start;
+};
+/**
+ * struct vmw_du_update_plane_buffer - Closure structure for buffer object
+ * @base: Base closure structure.
+ * @fb_left: x1 for fb damage bounding box.
+ * @fb_top: y1 for fb damage bounding box.
+ */
+struct vmw_du_update_plane_buffer {
+ struct vmw_du_update_plane base;
+ int fb_left, fb_top;
+};
/**
* struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
@@ -191,8 +307,6 @@ struct vmw_plane_state {
struct vmw_connector_state {
struct drm_connector_state base;
- bool is_implicit;
-
/**
* @gui_x:
*
@@ -254,7 +368,6 @@ struct vmw_display_unit {
int gui_x;
int gui_y;
bool is_implicit;
- bool active_implicit;
int set_gui_x;
int set_gui_y;
};
@@ -334,17 +447,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct drm_crtc **p_crtc,
struct drm_display_mode **p_mode);
void vmw_guess_mode_timing(struct drm_display_mode *mode);
-void vmw_kms_del_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du);
-void vmw_kms_add_active(struct vmw_private *dev_priv,
- struct vmw_display_unit *du,
- struct vmw_framebuffer *vfb);
-bool vmw_kms_crtc_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
-void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv,
- bool immutable);
+void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv);
+void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv);
/* Universal Plane Helpers */
void vmw_du_primary_plane_destroy(struct drm_plane *plane);
@@ -456,6 +560,20 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
bool interruptible,
struct drm_crtc *crtc);
-int vmw_kms_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
+int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
+
+/**
+ * vmw_du_translate_to_crtc - Translate a rect from framebuffer to crtc
+ * @state: Plane state.
+ * @r: Rectangle to translate.
+ */
+static inline void vmw_du_translate_to_crtc(struct drm_plane_state *state,
+ struct drm_rect *r)
+{
+ int translate_crtc_x = -((state->src_x >> 16) - state->crtc_x);
+ int translate_crtc_y = -((state->src_y >> 16) - state->crtc_y);
+
+ drm_rect_translate(r, translate_crtc_x, translate_crtc_y);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 723578117191..16be515c4c0f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -233,7 +233,7 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
+ .set_config = drm_atomic_helper_set_config,
};
@@ -263,18 +263,14 @@ static const struct drm_connector_funcs vmw_legacy_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_ldu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
- .best_encoder = drm_atomic_helper_best_encoder,
};
/*
@@ -417,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
-
vmw_du_connector_reset(connector);
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -428,8 +423,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_ldu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = true;
-
ret = drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -448,7 +441,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
-
vmw_du_crtc_reset(crtc);
ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
&ldu->base.cursor,
@@ -514,7 +506,7 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv)
if (ret != 0)
goto err_free;
- vmw_kms_create_implicit_placement_property(dev_priv, true);
+ vmw_kms_create_implicit_placement_property(dev_priv);
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8a029bade32a..3025bfc001a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -85,7 +85,7 @@ static void vmw_resource_release(struct kref *kref)
struct ttm_validate_buffer val_buf;
val_buf.bo = bo;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
res->backup_dirty = false;
@@ -462,7 +462,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
INIT_LIST_HEAD(&val_list);
val_buf->bo = ttm_bo_reference(&res->backup->base);
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
@@ -565,7 +565,7 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
BUG_ON(!func->may_evict);
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
if (unlikely(ret != 0))
return ret;
@@ -614,7 +614,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr)
return 0;
val_buf.bo = NULL;
- val_buf.shared = false;
+ val_buf.num_shared = 0;
if (res->backup)
val_buf.bo = &res->backup->base;
do {
@@ -685,7 +685,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
struct vmw_resource *res, *next;
struct ttm_validate_buffer val_buf = {
.bo = &vbo->base,
- .shared = false
+ .num_shared = 0
};
lockdep_assert_held(&vbo->base.resv->lock.base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 53316b1bda3d..cd586c52af7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -29,6 +29,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_sou(x) \
@@ -76,6 +77,11 @@ struct vmw_kms_sou_dirty_cmd {
SVGA3dCmdBlitSurfaceToScreen body;
};
+struct vmw_kms_sou_define_gmrfb {
+ uint32_t header;
+ SVGAFifoCmdDefineGMRFB body;
+};
+
/**
* Display unit using screen objects.
*/
@@ -241,28 +247,20 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
sou->buffer = vps->bo;
sou->buffer_size = vps->bo_size;
- if (sou->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- conn_state = sou->base.connector.state;
- vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
+ conn_state = sou->base.connector.state;
+ vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode);
if (ret)
DRM_ERROR("Failed to define Screen Object %dx%d\n",
crtc->x, crtc->y);
- vmw_kms_add_active(dev_priv, &sou->base, vfb);
} else {
sou->buffer = NULL;
sou->buffer_size = 0;
-
- vmw_kms_del_active(dev_priv, &sou->base);
}
}
@@ -317,38 +315,14 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
}
}
-static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- int ret;
-
- if (!vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-}
-
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.gamma_set = vmw_du_crtc_gamma_set,
.destroy = vmw_sou_crtc_destroy,
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_sou_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
/*
@@ -377,19 +351,15 @@ static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_sou_connector_helper_funcs = {
- .best_encoder = drm_atomic_helper_best_encoder,
};
@@ -499,6 +469,263 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return vmw_bo_pin_in_vram(dev_priv, vps->bo, true);
}
+static uint32_t vmw_sou_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_define_gmrfb) +
+ sizeof(struct vmw_kms_sou_bo_blit) * num_hits;
+}
+
+static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_framebuffer_bo *vfbbo =
+ container_of(update->vfb, typeof(*vfbbo), base);
+ struct vmw_kms_sou_define_gmrfb *gmr = cmd;
+ int depth = update->vfb->base.format->depth;
+
+ /* Emulate RGBA support, contrary to svga_reg.h this is not
+ * supported by hosts. This is only a problem if we are reading
+ * this value later and expecting what we uploaded back.
+ */
+ if (depth == 32)
+ depth = 24;
+
+ gmr->header = SVGA_CMD_DEFINE_GMRFB;
+
+ gmr->body.format.bitsPerPixel = update->vfb->base.format->cpp[0] * 8;
+ gmr->body.format.colorDepth = depth;
+ gmr->body.format.reserved = 0;
+ gmr->body.bytesPerLine = update->vfb->base.pitches[0];
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+
+ return sizeof(*gmr);
+}
+
+static uint32_t vmw_sou_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_kms_sou_bo_blit *blit = cmd;
+
+ blit->header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+ blit->body.destScreenId = update->du->unit;
+ blit->body.srcOrigin.x = fb_x;
+ blit->body.srcOrigin.y = fb_y;
+ blit->body.destRect.left = clip->x1;
+ blit->body.destRect.top = clip->y1;
+ blit->body.destRect.right = clip->x2;
+ blit->body.destRect.bottom = clip->y2;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_stud_bo_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = false;
+ bo_update.base.intr = true;
+
+ bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
+ bo_update.base.post_prepare = vmw_sou_bo_define_gmrfb;
+ bo_update.base.clip = vmw_sou_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stud_bo_post_clip;
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t vmw_sou_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_kms_sou_dirty_cmd) + sizeof(SVGASignedRect) *
+ num_hits;
+}
+
+static uint32_t vmw_sou_surface_post_prepare(struct vmw_du_update_plane *update,
+ void *cmd)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ /*
+ * SOU SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN is special in the sense that
+ * its bounding box is filled before iterating over all the clips. So
+ * store the FIFO start address and revisit to fill the details.
+ */
+ srf_update->cmd_start = cmd;
+
+ return 0;
+}
+
+static uint32_t vmw_sou_surface_pre_clip(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_kms_sou_dirty_cmd *blit = cmd;
+ struct vmw_framebuffer_surface *vfbs;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ blit->header.id = SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN;
+ blit->header.size = sizeof(blit->body) + sizeof(SVGASignedRect) *
+ num_hits;
+
+ blit->body.srcImage.sid = vfbs->surface->res.id;
+ blit->body.destScreenId = update->du->unit;
+
+ /* Update the source and destination bounding box later in post_clip */
+ blit->body.srcRect.left = 0;
+ blit->body.srcRect.top = 0;
+ blit->body.srcRect.right = 0;
+ blit->body.srcRect.bottom = 0;
+
+ blit->body.destRect.left = 0;
+ blit->body.destRect.top = 0;
+ blit->body.destRect.right = 0;
+ blit->body.destRect.bottom = 0;
+
+ return sizeof(*blit);
+}
+
+static uint32_t vmw_sou_surface_clip_rect(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t src_x, uint32_t src_y)
+{
+ SVGASignedRect *rect = cmd;
+
+ /*
+ * rects are relative to dest bounding box rect on screen object, so
+ * translate to it later in post_clip
+ */
+ rect->left = clip->x1;
+ rect->top = clip->y1;
+ rect->right = clip->x2;
+ rect->bottom = clip->y2;
+
+ return sizeof(*rect);
+}
+
+static uint32_t vmw_sou_surface_post_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_surface *srf_update;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_rect src_bb;
+ struct vmw_kms_sou_dirty_cmd *blit;
+ SVGASignedRect *rect;
+ uint32_t num_hits;
+ int translate_src_x;
+ int translate_src_y;
+ int i;
+
+ srf_update = container_of(update, typeof(*srf_update), base);
+
+ blit = srf_update->cmd_start;
+ rect = (SVGASignedRect *)&blit[1];
+
+ num_hits = (blit->header.size - sizeof(blit->body))/
+ sizeof(SVGASignedRect);
+
+ src_bb = *bb;
+
+ /* To translate bb back to fb src coord */
+ translate_src_x = (state->src_x >> 16) - state->crtc_x;
+ translate_src_y = (state->src_y >> 16) - state->crtc_y;
+
+ drm_rect_translate(&src_bb, translate_src_x, translate_src_y);
+
+ blit->body.srcRect.left = src_bb.x1;
+ blit->body.srcRect.top = src_bb.y1;
+ blit->body.srcRect.right = src_bb.x2;
+ blit->body.srcRect.bottom = src_bb.y2;
+
+ blit->body.destRect.left = bb->x1;
+ blit->body.destRect.top = bb->y1;
+ blit->body.destRect.right = bb->x2;
+ blit->body.destRect.bottom = bb->y2;
+
+ /* rects are relative to dest bb rect */
+ for (i = 0; i < num_hits; i++) {
+ rect->left -= bb->x1;
+ rect->top -= bb->y1;
+ rect->right -= bb->x1;
+ rect->bottom -= bb->y1;
+ rect++;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_sou_plane_update_surface - Update display unit for surface backed fb.
+ * @dev_priv: Device private.
+ * @plane: Plane state.
+ * @old_state: Old plane state.
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_surface srf_update;
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane_surface));
+ srf_update.base.plane = plane;
+ srf_update.base.old_state = old_state;
+ srf_update.base.dev_priv = dev_priv;
+ srf_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.base.vfb = vfb;
+ srf_update.base.out_fence = out_fence;
+ srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.base.cpu_blit = false;
+ srf_update.base.intr = true;
+
+ srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
+ srf_update.base.post_prepare = vmw_sou_surface_post_prepare;
+ srf_update.base.pre_clip = vmw_sou_surface_pre_clip;
+ srf_update.base.clip = vmw_sou_surface_clip_rect;
+ srf_update.base.post_clip = vmw_sou_surface_post_clip;
+
+ return vmw_du_helper_plane_update(&srf_update.base);
+}
static void
vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
@@ -509,47 +736,28 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct vmw_fence_obj *fence = NULL;
int ret;
+ /* In case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
-
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
if (vfb->bo)
- ret = vmw_kms_sou_do_bo_dirty(dev_priv, vfb, NULL,
- &vclips, 1, 1, true,
- &fence, crtc);
+ ret = vmw_sou_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, &fence, crtc);
-
- /*
- * We cannot really fail this function, so if we do, then output
- * an error and maintain consistent atomic state.
- */
+ ret = vmw_sou_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret != 0)
DRM_ERROR("Failed to update screen.\n");
} else {
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so should really blank
- * the screen object display unit, if not already done.
- */
+ /* Do nothing when fb and crtc is NULL (blank crtc) */
return;
}
+ /* For error case vblank event is send from vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
@@ -640,7 +848,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
primary = &sou->base.primary;
cursor = &sou->base.cursor;
- sou->base.active_implicit = false;
sou->base.pref_active = (unit == 0);
sou->base.pref_width = dev_priv->initial_width;
sou->base.pref_height = dev_priv->initial_height;
@@ -666,6 +873,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_sou_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -693,8 +901,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_sou_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, true);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
-
ret = drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -733,12 +939,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- sou->base.is_implicit);
-
return 0;
err_free_unregister:
@@ -764,15 +964,11 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
}
ret = -ENOMEM;
- dev_priv->num_implicit = 0;
- dev_priv->implicit_fb = NULL;
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
if (unlikely(ret != 0))
return ret;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
vmw_sou_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index e086565c1da6..096c2941a8e4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -30,7 +30,7 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-
+#include <drm/drm_damage_helper.h>
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
@@ -92,6 +92,10 @@ struct vmw_stdu_surface_copy {
SVGA3dCmdSurfaceCopy body;
};
+struct vmw_stdu_update_gb_image {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+};
/**
* struct vmw_screen_target_display_unit
@@ -396,13 +400,8 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!crtc->state->enable)
return;
- if (stdu->base.is_implicit) {
- x = crtc->x;
- y = crtc->y;
- } else {
- x = vmw_conn_state->gui_x;
- y = vmw_conn_state->gui_y;
- }
+ x = vmw_conn_state->gui_x;
+ y = vmw_conn_state->gui_y;
vmw_svga_enable(dev_priv);
ret = vmw_stdu_define_st(dev_priv, stdu, &crtc->mode, x, y);
@@ -417,27 +416,9 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
{
}
-
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct drm_plane_state *plane_state = crtc->primary->state;
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer *vfb;
- struct drm_framebuffer *fb;
-
-
- stdu = vmw_crtc_to_stdu(crtc);
- dev_priv = vmw_priv(crtc->dev);
- fb = plane_state->fb;
-
- vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
-
- if (vfb)
- vmw_kms_add_active(dev_priv, &stdu->base, vfb);
- else
- vmw_kms_del_active(dev_priv, &stdu->base);
}
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -472,49 +453,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
}
/**
- * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target
- *
- * @crtc: CRTC to attach FB to
- * @fb: FB to attach
- * @event: Event to be posted. This event should've been alloced
- * using k[mz]alloc, and should've been completely initialized.
- * @page_flip_flags: Input flags.
- *
- * If the STDU uses the same display and content buffers, i.e. a true flip,
- * this function will replace the existing display buffer with the new content
- * buffer.
- *
- * If the STDU uses different display and content buffers, i.e. a blit, then
- * only the content buffer will be updated.
- *
- * RETURNS:
- * 0 on success, error code on failure
- */
-static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *new_fb,
- struct drm_pending_vblank_event *event,
- uint32_t flags,
- struct drm_modeset_acquire_ctx *ctx)
-
-{
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
- int ret;
-
- if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
- return -EINVAL;
-
- ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
- if (ret) {
- DRM_ERROR("Page flip error %d.\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-
-/**
* vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
*
* @dirty: The closure structure.
@@ -986,8 +924,8 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.reset = vmw_du_crtc_reset,
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
- .set_config = vmw_kms_set_config,
- .page_flip = vmw_stdu_crtc_page_flip,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
};
@@ -1042,19 +980,15 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
.dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
- .set_property = vmw_du_connector_set_property,
.destroy = vmw_stdu_connector_destroy,
.reset = vmw_du_connector_reset,
.atomic_duplicate_state = vmw_du_connector_duplicate_state,
.atomic_destroy_state = vmw_du_connector_destroy_state,
- .atomic_set_property = vmw_du_connector_atomic_set_property,
- .atomic_get_property = vmw_du_connector_atomic_get_property,
};
static const struct
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
- .best_encoder = drm_atomic_helper_best_encoder,
};
@@ -1257,11 +1191,402 @@ out_srf_unref:
return ret;
}
+static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ return sizeof(struct vmw_stdu_update_gb_image) +
+ sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_stdu_dma *cmd_dma = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
+ cmd_dma->header.size = sizeof(cmd_dma->body) +
+ sizeof(struct SVGA3dCopyBox) * num_hits +
+ sizeof(SVGA3dCmdSurfaceDMASuffix);
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
+ cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
+ cmd_dma->body.host.sid = stdu->display_srf->res.id;
+ cmd_dma->body.host.face = 0;
+ cmd_dma->body.host.mipmap = 0;
+ cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
+
+ return sizeof(*cmd_dma);
+}
+
+static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *bb)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ suffix->suffixSize = sizeof(*suffix);
+ suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE;
+
+ vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
+ bb->y1, bb->y2);
+
+ return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
+}
+
+static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, uint32_t num_hits)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = INT_MAX;
+ bo_update->fb_top = INT_MAX;
+
+ return 0;
+}
+
+static uint32_t vmw_stdu_bo_clip_cpu(struct vmw_du_update_plane *update,
+ void *cmd, struct drm_rect *clip,
+ uint32_t fb_x, uint32_t fb_y)
+{
+ struct vmw_du_update_plane_buffer *bo_update =
+ container_of(update, typeof(*bo_update), base);
+
+ bo_update->fb_left = min_t(int, bo_update->fb_left, fb_x);
+ bo_update->fb_top = min_t(int, bo_update->fb_top, fb_y);
+
+ return 0;
+}
+
+static uint32_t
+vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ struct vmw_du_update_plane_buffer *bo_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_bo *vfbbo;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
+ struct vmw_stdu_update_gb_image *cmd_img = cmd;
+ struct vmw_stdu_update *cmd_update;
+ struct ttm_buffer_object *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ s32 src_pitch, dst_pitch;
+ s32 width, height;
+
+ bo_update = container_of(update, typeof(*bo_update), base);
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
+
+ width = bb->x2 - bb->x1;
+ height = bb->y2 - bb->y1;
+
+ diff.cpp = stdu->cpp;
+
+ dst_bo = &stdu->display_srf->res.backup->base;
+ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
+
+ src_bo = &vfbbo->buffer->base;
+ src_pitch = update->vfb->base.pitches[0];
+ src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
+ stdu->cpp;
+
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch, src_bo,
+ src_offset, src_pitch, width * stdu->cpp, height,
+ &diff);
+
+ if (drm_rect_visible(&diff.rect)) {
+ SVGA3dBox *box = &cmd_img->body.box;
+
+ cmd_img->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_img->header.size = sizeof(cmd_img->body);
+ cmd_img->body.image.sid = stdu->display_srf->res.id;
+ cmd_img->body.image.face = 0;
+ cmd_img->body.image.mipmap = 0;
+
+ box->x = diff.rect.x1;
+ box->y = diff.rect.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&diff.rect);
+ box->h = drm_rect_height(&diff.rect);
+ box->d = 1;
+
+ cmd_update = (struct vmw_stdu_update *)&cmd_img[1];
+ vmw_stdu_populate_update(cmd_update, stdu->base.unit,
+ diff.rect.x1, diff.rect.x2,
+ diff.rect.y1, diff.rect.y2);
+
+ return sizeof(*cmd_img) + sizeof(*cmd_update);
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_stdu_plane_update_bo - Update display unit for bo backed fb.
+ * @dev_priv: device private.
+ * @plane: plane state.
+ * @old_state: old plane state.
+ * @vfb: framebuffer which is blitted to display unit.
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane_buffer bo_update;
+
+ memset(&bo_update, 0, sizeof(struct vmw_du_update_plane_buffer));
+ bo_update.base.plane = plane;
+ bo_update.base.old_state = old_state;
+ bo_update.base.dev_priv = dev_priv;
+ bo_update.base.du = vmw_crtc_to_du(plane->state->crtc);
+ bo_update.base.vfb = vfb;
+ bo_update.base.out_fence = out_fence;
+ bo_update.base.mutex = NULL;
+ bo_update.base.cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ bo_update.base.intr = false;
+
+ /*
+ * VM without 3D support don't have surface DMA command and framebuffer
+ * should be moved out of VRAM.
+ */
+ if (bo_update.base.cpu_blit) {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
+ } else {
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
+ bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
+ bo_update.base.clip = vmw_stdu_bo_populate_clip;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update;
+ }
+
+ return vmw_du_helper_plane_update(&bo_update.base);
+}
+
+static uint32_t
+vmw_stdu_surface_fifo_size_same_display(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t vmw_stdu_surface_fifo_size(struct vmw_du_update_plane *update,
+ uint32_t num_hits)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ uint32_t size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ if (vfbs->is_bo_proxy)
+ size += sizeof(struct vmw_stdu_update_gb_image) * num_hits;
+
+ size += sizeof(struct vmw_stdu_surface_copy) + sizeof(SVGA3dCopyBox) *
+ num_hits + sizeof(struct vmw_stdu_update);
+
+ return size;
+}
+
+static uint32_t
+vmw_stdu_surface_update_proxy(struct vmw_du_update_plane *update, void *cmd)
+{
+ struct vmw_framebuffer_surface *vfbs;
+ struct drm_plane_state *state = update->plane->state;
+ struct drm_plane_state *old_state = update->old_state;
+ struct vmw_stdu_update_gb_image *cmd_update = cmd;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ uint32_t copy_size = 0;
+
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ /*
+ * proxy surface is special where a buffer object type fb is wrapped
+ * in a surface and need an update gb image command to sync with device.
+ */
+ drm_atomic_helper_damage_iter_init(&iter, old_state, state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ SVGA3dBox *box = &cmd_update->body.box;
+
+ cmd_update->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd_update->header.size = sizeof(cmd_update->body);
+ cmd_update->body.image.sid = vfbs->surface->res.id;
+ cmd_update->body.image.face = 0;
+ cmd_update->body.image.mipmap = 0;
+
+ box->x = clip.x1;
+ box->y = clip.y1;
+ box->z = 0;
+ box->w = drm_rect_width(&clip);
+ box->h = drm_rect_height(&clip);
+ box->d = 1;
+
+ copy_size += sizeof(*cmd_update);
+ cmd_update++;
+ }
+
+ return copy_size;
+}
+
+static uint32_t
+vmw_stdu_surface_populate_copy(struct vmw_du_update_plane *update, void *cmd,
+ uint32_t num_hits)
+{
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+ struct vmw_stdu_surface_copy *cmd_copy = cmd;
+
+ stdu = container_of(update->du, typeof(*stdu), base);
+ vfbs = container_of(update->vfb, typeof(*vfbs), base);
+
+ cmd_copy->header.id = SVGA_3D_CMD_SURFACE_COPY;
+ cmd_copy->header.size = sizeof(cmd_copy->body) + sizeof(SVGA3dCopyBox) *
+ num_hits;
+ cmd_copy->body.src.sid = vfbs->surface->res.id;
+ cmd_copy->body.dest.sid = stdu->display_srf->res.id;
+
+ return sizeof(*cmd_copy);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_clip(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *clip, uint32_t fb_x,
+ uint32_t fb_y)
+{
+ struct SVGA3dCopyBox *box = cmd;
+
+ box->srcx = fb_x;
+ box->srcy = fb_y;
+ box->srcz = 0;
+ box->x = clip->x1;
+ box->y = clip->y1;
+ box->z = 0;
+ box->w = drm_rect_width(clip);
+ box->h = drm_rect_height(clip);
+ box->d = 1;
+
+ return sizeof(*box);
+}
+
+static uint32_t
+vmw_stdu_surface_populate_update(struct vmw_du_update_plane *update, void *cmd,
+ struct drm_rect *bb)
+{
+ vmw_stdu_populate_update(cmd, update->du->unit, bb->x1, bb->x2, bb->y1,
+ bb->y2);
+ return sizeof(struct vmw_stdu_update);
+}
/**
- * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
+ * vmw_stdu_plane_update_surface - Update display unit for surface backed fb
+ * @dev_priv: Device private
+ * @plane: Plane state
+ * @old_state: Old plane state
+ * @vfb: Framebuffer which is blitted to display unit
+ * @out_fence: If non-NULL, will return a ref-counted pointer to vmw_fence_obj.
+ * The returned fence pointer may be NULL in which case the device
+ * has already synchronized.
*
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_state,
+ struct vmw_framebuffer *vfb,
+ struct vmw_fence_obj **out_fence)
+{
+ struct vmw_du_update_plane srf_update;
+ struct vmw_screen_target_display_unit *stdu;
+ struct vmw_framebuffer_surface *vfbs;
+
+ stdu = vmw_crtc_to_stdu(plane->state->crtc);
+ vfbs = container_of(vfb, typeof(*vfbs), base);
+
+ memset(&srf_update, 0, sizeof(struct vmw_du_update_plane));
+ srf_update.plane = plane;
+ srf_update.old_state = old_state;
+ srf_update.dev_priv = dev_priv;
+ srf_update.du = vmw_crtc_to_du(plane->state->crtc);
+ srf_update.vfb = vfb;
+ srf_update.out_fence = out_fence;
+ srf_update.mutex = &dev_priv->cmdbuf_mutex;
+ srf_update.cpu_blit = false;
+ srf_update.intr = true;
+
+ if (vfbs->is_bo_proxy)
+ srf_update.post_prepare = vmw_stdu_surface_update_proxy;
+
+ if (vfbs->surface->res.id != stdu->display_srf->res.id) {
+ srf_update.calc_fifo_size = vmw_stdu_surface_fifo_size;
+ srf_update.pre_clip = vmw_stdu_surface_populate_copy;
+ srf_update.clip = vmw_stdu_surface_populate_clip;
+ } else {
+ srf_update.calc_fifo_size =
+ vmw_stdu_surface_fifo_size_same_display;
+ }
+
+ srf_update.post_clip = vmw_stdu_surface_populate_update;
+
+ return vmw_du_helper_plane_update(&srf_update);
+}
+
+/**
+ * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
* @plane: display plane
* @old_state: Only used to get crtc info
*
@@ -1278,17 +1603,14 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc;
struct vmw_screen_target_display_unit *stdu;
struct drm_pending_vblank_event *event;
+ struct vmw_fence_obj *fence = NULL;
struct vmw_private *dev_priv;
int ret;
- /*
- * We cannot really fail this function, so if we do, then output an
- * error and maintain consistent atomic state.
- */
+ /* If case of device error, maintain consistent atomic state */
if (crtc && plane->state->fb) {
struct vmw_framebuffer *vfb =
vmw_framebuffer_to_vfb(plane->state->fb);
- struct drm_vmw_rect vclips;
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
@@ -1296,23 +1618,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu->content_fb_type = vps->content_fb_type;
stdu->cpp = vps->cpp;
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
if (ret)
DRM_ERROR("Failed to bind surface to STDU.\n");
if (vfb->bo)
- ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
- &vclips, 1, 1, true, false,
- crtc);
+ ret = vmw_stdu_plane_update_bo(dev_priv, plane,
+ old_state, vfb, &fence);
else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
- &vclips, NULL, 0, 0,
- 1, 1, NULL, crtc);
+ ret = vmw_stdu_plane_update_surface(dev_priv, plane,
+ old_state, vfb,
+ &fence);
if (ret)
DRM_ERROR("Failed to update STDU.\n");
} else {
@@ -1320,12 +1636,7 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
- /*
- * When disabling a plane, CRTC and FB should always be NULL
- * together, otherwise it's an error.
- * Here primary plane is being disable so blank the screen
- * target display unit, if not already done.
- */
+ /* Blank STDU when fb and crtc are NULL */
if (!stdu->defined)
return;
@@ -1340,36 +1651,25 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
return;
}
+ /* In case of error, vblank event is send in vmw_du_crtc_atomic_flush */
event = crtc->state->event;
- /*
- * In case of failure and other cases, vblank event will be sent in
- * vmw_du_crtc_atomic_flush.
- */
- if (event && (ret == 0)) {
- struct vmw_fence_obj *fence = NULL;
+ if (event && fence) {
struct drm_file *file_priv = event->base.file_priv;
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-
- /*
- * If fence is NULL, then already sync.
- */
- if (fence) {
- ret = vmw_event_fence_action_queue(
- file_priv, fence, &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- if (ret)
- DRM_ERROR("Failed to queue event on fence.\n");
- else
- crtc->state->event = NULL;
-
- vmw_fence_obj_unreference(&fence);
- }
- } else {
- (void) vmw_fifo_flush(dev_priv, false);
+ ret = vmw_event_fence_action_queue(file_priv,
+ fence,
+ &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+ if (ret)
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
}
+
+ if (fence)
+ vmw_fence_obj_unreference(&fence);
}
@@ -1457,11 +1757,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
stdu->base.pref_active = (unit == 0);
stdu->base.pref_width = dev_priv->initial_width;
stdu->base.pref_height = dev_priv->initial_height;
-
- /*
- * Remove this after enabling atomic because property values can
- * only exist in a state object
- */
stdu->base.is_implicit = false;
/* Initialize primary plane */
@@ -1478,6 +1773,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
}
drm_plane_helper_add(primary, &vmw_stdu_primary_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(primary);
/* Initialize cursor plane */
vmw_du_plane_reset(cursor);
@@ -1506,7 +1802,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
drm_connector_helper_add(connector, &vmw_stdu_connector_helper_funcs);
connector->status = vmw_du_connector_detect(connector, false);
- vmw_connector_state_to_vcs(connector->state)->is_implicit = false;
ret = drm_encoder_init(dev, encoder, &vmw_stdu_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
@@ -1544,11 +1839,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit)
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
- if (dev_priv->implicit_placement_property)
- drm_object_attach_property
- (&connector->base,
- dev_priv->implicit_placement_property,
- stdu->base.is_implicit);
return 0;
err_free_unregister:
@@ -1617,8 +1907,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- vmw_kms_create_implicit_placement_property(dev_priv, false);
-
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
ret = vmw_stdu_init(dev_priv, i);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 7b1e5a5cbd2c..e6d75e377dd8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -43,56 +43,38 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
-static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
+/* struct vmw_validation_mem callback */
+static int vmw_vmt_reserve(struct vmw_validation_mem *m, size_t size)
{
- DRM_INFO("global init.\n");
- return ttm_mem_global_init(ref->object);
-}
+ static struct ttm_operation_ctx ctx = {.interruptible = false,
+ .no_wait_gpu = false};
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
-static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
+ return ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ctx);
}
-int vmw_ttm_global_init(struct vmw_private *dev_priv)
+/* struct vmw_validation_mem callback */
+static void vmw_vmt_unreserve(struct vmw_validation_mem *m, size_t size)
{
- struct drm_global_reference *global_ref;
- int ret;
-
- global_ref = &dev_priv->mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &vmw_ttm_mem_global_init;
- global_ref->release = &vmw_ttm_mem_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM memory accounting.\n");
- return ret;
- }
+ struct vmw_private *dev_priv = container_of(m, struct vmw_private, vvm);
- dev_priv->bo_global_ref.mem_glob =
- dev_priv->mem_global_ref.object;
- global_ref = &dev_priv->bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- ret = drm_global_item_ref(global_ref);
-
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed setting up TTM buffer objects.\n");
- goto out_no_bo;
- }
-
- return 0;
-out_no_bo:
- drm_global_item_unref(&dev_priv->mem_global_ref);
- return ret;
+ return ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
}
-void vmw_ttm_global_release(struct vmw_private *dev_priv)
+/**
+ * vmw_validation_mem_init_ttm - Interface the validation memory tracker
+ * to ttm.
+ * @dev_priv: Pointer to struct vmw_private. The reason we choose a vmw private
+ * rather than a struct vmw_validation_mem is to make sure assumption in the
+ * callbacks that struct vmw_private derives from struct vmw_validation_mem
+ * holds true.
+ * @gran: The recommended allocation granularity
+ */
+void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran)
{
- drm_global_item_unref(&dev_priv->bo_global_ref.ref);
- drm_global_item_unref(&dev_priv->mem_global_ref);
+ struct vmw_validation_mem *vvm = &dev_priv->vvm;
+
+ vvm->reserve_mem = vmw_vmt_reserve;
+ vvm->unreserve_mem = vmw_vmt_unreserve;
+ vvm->gran = gran;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 184025fa938e..b3f547fc5d3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -104,11 +104,25 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
return NULL;
if (ctx->mem_size_left < size) {
- struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ struct page *page;
+ if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) {
+ int ret = ctx->vm->reserve_mem(ctx->vm, ctx->vm->gran);
+
+ if (ret)
+ return NULL;
+
+ ctx->vm_size_left += ctx->vm->gran;
+ ctx->total_mem += ctx->vm->gran;
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return NULL;
+ if (ctx->vm)
+ ctx->vm_size_left -= PAGE_SIZE;
+
list_add_tail(&page->lru, &ctx->page_list);
ctx->page_address = page_address(page);
ctx->mem_size_left = PAGE_SIZE;
@@ -138,6 +152,11 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
}
ctx->mem_size_left = 0;
+ if (ctx->vm && ctx->total_mem) {
+ ctx->vm->unreserve_mem(ctx->vm, ctx->total_mem);
+ ctx->total_mem = 0;
+ ctx->vm_size_left = 0;
+ }
}
/**
@@ -266,7 +285,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
if (!val_buf->bo)
return -ESRCH;
- val_buf->shared = false;
+ val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
bo_node->as_mob = as_mob;
bo_node->cpu_blit = cpu_blit;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index b57e3292c386..3b396fea40d7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -34,6 +34,21 @@
#include <drm/ttm/ttm_execbuf_util.h>
/**
+ * struct vmw_validation_mem - Custom interface to provide memory reservations
+ * for the validation code.
+ * @reserve_mem: Callback to reserve memory
+ * @unreserve_mem: Callback to unreserve memory
+ * @gran: Reservation granularity. Contains a hint how much memory should
+ * be reserved in each call to @reserve_mem(). A slow implementation may want
+ * reservation to be done in large batches.
+ */
+struct vmw_validation_mem {
+ int (*reserve_mem)(struct vmw_validation_mem *m, size_t size);
+ void (*unreserve_mem)(struct vmw_validation_mem *m, size_t size);
+ size_t gran;
+};
+
+/**
* struct vmw_validation_context - Per command submission validation context
* @ht: Hash table used to find resource- or buffer object duplicates
* @resource_list: List head for resource validation metadata
@@ -47,6 +62,10 @@
* buffer objects
* @mem_size_left: Free memory left in the last page in @page_list
* @page_address: Kernel virtual address of the last page in @page_list
+ * @vm: A pointer to the memory reservation interface or NULL if no
+ * memory reservation is needed.
+ * @vm_size_left: Amount of reserved memory that so far has not been allocated.
+ * @total_mem: Amount of reserved memory.
*/
struct vmw_validation_context {
struct drm_open_hash *ht;
@@ -59,6 +78,9 @@ struct vmw_validation_context {
unsigned int merge_dups;
unsigned int mem_size_left;
u8 *page_address;
+ struct vmw_validation_mem *vm;
+ size_t vm_size_left;
+ size_t total_mem;
};
struct vmw_buffer_object;
@@ -102,6 +124,21 @@ vmw_validation_has_bos(struct vmw_validation_context *ctx)
}
/**
+ * vmw_validation_set_val_mem - Register a validation mem object for
+ * validation memory reservation
+ * @ctx: The validation context
+ * @vm: Pointer to a struct vmw_validation_mem
+ *
+ * Must be set before the first attempt to allocate validation memory.
+ */
+static inline void
+vmw_validation_set_val_mem(struct vmw_validation_context *ctx,
+ struct vmw_validation_mem *vm)
+{
+ ctx->vm = vm;
+}
+
+/**
* vmw_validation_set_ht - Register a hash table for duplicate finding
* @ctx: The validation context
* @ht: Pointer to a hash table to use for duplicate finding
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
index 4cca160782ab..f969d486855d 100644
--- a/drivers/gpu/drm/xen/Kconfig
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select XEN_XENBUS_FRONTEND
+ select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
index 712afff5ffc3..825905f67faa 100644
--- a/drivers/gpu/drm/xen/Makefile
+++ b/drivers/gpu/drm/xen/Makefile
@@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \
xen_drm_front_kms.o \
xen_drm_front_conn.o \
xen_drm_front_evtchnl.o \
- xen_drm_front_shbuf.o \
xen_drm_front_cfg.o \
xen_drm_front_gem.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 6b6d5ab82ec3..4d3d36fc3a5d 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -19,6 +19,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
+#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/displif.h>
#include "xen_drm_front.h"
@@ -26,28 +27,20 @@
#include "xen_drm_front_evtchnl.h"
#include "xen_drm_front_gem.h"
#include "xen_drm_front_kms.h"
-#include "xen_drm_front_shbuf.h"
struct xen_drm_front_dbuf {
struct list_head list;
u64 dbuf_cookie;
u64 fb_cookie;
- struct xen_drm_front_shbuf *shbuf;
+
+ struct xen_front_pgdir_shbuf shbuf;
};
-static int dbuf_add_to_list(struct xen_drm_front_info *front_info,
- struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie)
+static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
{
- struct xen_drm_front_dbuf *dbuf;
-
- dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
- if (!dbuf)
- return -ENOMEM;
-
dbuf->dbuf_cookie = dbuf_cookie;
- dbuf->shbuf = shbuf;
list_add(&dbuf->list, &front_info->dbuf_list);
- return 0;
}
static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
@@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
return NULL;
}
-static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie)
-{
- struct xen_drm_front_dbuf *buf, *q;
-
- list_for_each_entry_safe(buf, q, dbuf_list, list)
- if (buf->fb_cookie == fb_cookie)
- xen_drm_front_shbuf_flush(buf->shbuf);
-}
-
static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
{
struct xen_drm_front_dbuf *buf, *q;
@@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
list_for_each_entry_safe(buf, q, dbuf_list, list)
if (buf->dbuf_cookie == dbuf_cookie) {
list_del(&buf->list);
- xen_drm_front_shbuf_unmap(buf->shbuf);
- xen_drm_front_shbuf_free(buf->shbuf);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
break;
}
@@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list)
list_for_each_entry_safe(buf, q, dbuf_list, list) {
list_del(&buf->list);
- xen_drm_front_shbuf_unmap(buf->shbuf);
- xen_drm_front_shbuf_free(buf->shbuf);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
kfree(buf);
}
}
@@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
u32 bpp, u64 size, struct page **pages)
{
struct xen_drm_front_evtchnl *evtchnl;
- struct xen_drm_front_shbuf *shbuf;
+ struct xen_drm_front_dbuf *dbuf;
struct xendispl_req *req;
- struct xen_drm_front_shbuf_cfg buf_cfg;
+ struct xen_front_pgdir_shbuf_cfg buf_cfg;
unsigned long flags;
int ret;
@@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (unlikely(!evtchnl))
return -EIO;
+ dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
+ if (!dbuf)
+ return -ENOMEM;
+
+ dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
+
memset(&buf_cfg, 0, sizeof(buf_cfg));
buf_cfg.xb_dev = front_info->xb_dev;
+ buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
buf_cfg.pages = pages;
- buf_cfg.size = size;
+ buf_cfg.pgdir = &dbuf->shbuf;
buf_cfg.be_alloc = front_info->cfg.be_alloc;
- shbuf = xen_drm_front_shbuf_alloc(&buf_cfg);
- if (IS_ERR(shbuf))
- return PTR_ERR(shbuf);
-
- ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie);
- if (ret < 0) {
- xen_drm_front_shbuf_free(shbuf);
- return ret;
- }
+ ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
+ if (ret < 0)
+ goto fail_shbuf_alloc;
mutex_lock(&evtchnl->u.req.req_io_lock);
spin_lock_irqsave(&front_info->io_lock, flags);
req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
req->op.dbuf_create.gref_directory =
- xen_drm_front_shbuf_get_dir_start(shbuf);
+ xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
req->op.dbuf_create.buffer_sz = size;
req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
req->op.dbuf_create.width = width;
@@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
if (ret < 0)
goto fail;
- ret = xen_drm_front_shbuf_map(shbuf);
+ ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
if (ret < 0)
goto fail;
@@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
fail:
mutex_unlock(&evtchnl->u.req.req_io_lock);
+fail_shbuf_alloc:
dbuf_free(&front_info->dbuf_list, dbuf_cookie);
return ret;
}
@@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
if (unlikely(conn_idx >= front_info->num_evt_pairs))
return -EINVAL;
- dbuf_flush_fb(&front_info->dbuf_list, fb_cookie);
evtchnl = &front_info->evt_pairs[conn_idx].req;
mutex_lock(&evtchnl->u.req.req_io_lock);
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 47ff019d3aef..28bc501af450 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -22,7 +22,6 @@
#include <xen/balloon.h>
#include "xen_drm_front.h"
-#include "xen_drm_front_shbuf.h"
struct xen_gem_object {
struct drm_gem_object base;
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
deleted file mode 100644
index d333b67cc1a0..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c
+++ /dev/null
@@ -1,414 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-
-/*
- * Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#include <drm/drmP.h>
-
-#if defined(CONFIG_X86)
-#include <drm/drm_cache.h>
-#endif
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <asm/xen/hypervisor.h>
-#include <xen/balloon.h>
-#include <xen/xen.h>
-#include <xen/xenbus.h>
-#include <xen/interface/io/ring.h>
-#include <xen/interface/io/displif.h>
-
-#include "xen_drm_front.h"
-#include "xen_drm_front_shbuf.h"
-
-struct xen_drm_front_shbuf_ops {
- /*
- * Calculate number of grefs required to handle this buffer,
- * e.g. if grefs are required for page directory only or the buffer
- * pages as well.
- */
- void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf);
- /* Fill page directory according to para-virtual display protocol. */
- void (*fill_page_dir)(struct xen_drm_front_shbuf *buf);
- /* Claim grant references for the pages of the buffer. */
- int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf,
- grant_ref_t *priv_gref_head, int gref_idx);
- /* Map grant references of the buffer. */
- int (*map)(struct xen_drm_front_shbuf *buf);
- /* Unmap grant references of the buffer. */
- int (*unmap)(struct xen_drm_front_shbuf *buf);
-};
-
-grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf)
-{
- if (!buf->grefs)
- return GRANT_INVALID_REF;
-
- return buf->grefs[0];
-}
-
-int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf)
-{
- if (buf->ops->map)
- return buf->ops->map(buf);
-
- /* no need to map own grant references */
- return 0;
-}
-
-int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf)
-{
- if (buf->ops->unmap)
- return buf->ops->unmap(buf);
-
- /* no need to unmap own grant references */
- return 0;
-}
-
-void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf)
-{
-#if defined(CONFIG_X86)
- drm_clflush_pages(buf->pages, buf->num_pages);
-#endif
-}
-
-void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf)
-{
- if (buf->grefs) {
- int i;
-
- for (i = 0; i < buf->num_grefs; i++)
- if (buf->grefs[i] != GRANT_INVALID_REF)
- gnttab_end_foreign_access(buf->grefs[i],
- 0, 0UL);
- }
- kfree(buf->grefs);
- kfree(buf->directory);
- kfree(buf);
-}
-
-/*
- * number of grefs a page can hold with respect to the
- * struct xendispl_page_directory header
- */
-#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
- offsetof(struct xendispl_page_directory, gref)) / \
- sizeof(grant_ref_t))
-
-static int get_num_pages_dir(struct xen_drm_front_shbuf *buf)
-{
- /* number of pages the page directory consumes itself */
- return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE);
-}
-
-static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf)
-{
- /* only for pages the page directory consumes itself */
- buf->num_grefs = get_num_pages_dir(buf);
-}
-
-static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf)
-{
- /*
- * number of pages the page directory consumes itself
- * plus grefs for the buffer pages
- */
- buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
-}
-
-#define xen_page_to_vaddr(page) \
- ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
-
-static int backend_unmap(struct xen_drm_front_shbuf *buf)
-{
- struct gnttab_unmap_grant_ref *unmap_ops;
- int i, ret;
-
- if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
- return 0;
-
- unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
- GFP_KERNEL);
- if (!unmap_ops) {
- DRM_ERROR("Failed to get memory while unmapping\n");
- return -ENOMEM;
- }
-
- for (i = 0; i < buf->num_pages; i++) {
- phys_addr_t addr;
-
- addr = xen_page_to_vaddr(buf->pages[i]);
- gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
- buf->backend_map_handles[i]);
- }
-
- ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
- buf->num_pages);
-
- for (i = 0; i < buf->num_pages; i++) {
- if (unlikely(unmap_ops[i].status != GNTST_okay))
- DRM_ERROR("Failed to unmap page %d: %d\n",
- i, unmap_ops[i].status);
- }
-
- if (ret)
- DRM_ERROR("Failed to unmap grant references, ret %d", ret);
-
- kfree(unmap_ops);
- kfree(buf->backend_map_handles);
- buf->backend_map_handles = NULL;
- return ret;
-}
-
-static int backend_map(struct xen_drm_front_shbuf *buf)
-{
- struct gnttab_map_grant_ref *map_ops = NULL;
- unsigned char *ptr;
- int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
-
- map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
- if (!map_ops)
- return -ENOMEM;
-
- buf->backend_map_handles = kcalloc(buf->num_pages,
- sizeof(*buf->backend_map_handles),
- GFP_KERNEL);
- if (!buf->backend_map_handles) {
- kfree(map_ops);
- return -ENOMEM;
- }
-
- /*
- * read page directory to get grefs from the backend: for external
- * buffer we only allocate buf->grefs for the page directory,
- * so buf->num_grefs has number of pages in the page directory itself
- */
- ptr = buf->directory;
- grefs_left = buf->num_pages;
- cur_page = 0;
- for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
- struct xendispl_page_directory *page_dir =
- (struct xendispl_page_directory *)ptr;
- int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
-
- if (to_copy > grefs_left)
- to_copy = grefs_left;
-
- for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
- phys_addr_t addr;
-
- addr = xen_page_to_vaddr(buf->pages[cur_page]);
- gnttab_set_map_op(&map_ops[cur_page], addr,
- GNTMAP_host_map,
- page_dir->gref[cur_gref],
- buf->xb_dev->otherend_id);
- cur_page++;
- }
-
- grefs_left -= to_copy;
- ptr += PAGE_SIZE;
- }
- ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
-
- /* save handles even if error, so we can unmap */
- for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
- buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
- if (unlikely(map_ops[cur_page].status != GNTST_okay))
- DRM_ERROR("Failed to map page %d: %d\n",
- cur_page, map_ops[cur_page].status);
- }
-
- if (ret) {
- DRM_ERROR("Failed to map grant references, ret %d", ret);
- backend_unmap(buf);
- }
-
- kfree(map_ops);
- return ret;
-}
-
-static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf)
-{
- struct xendispl_page_directory *page_dir;
- unsigned char *ptr;
- int i, num_pages_dir;
-
- ptr = buf->directory;
- num_pages_dir = get_num_pages_dir(buf);
-
- /* fill only grefs for the page directory itself */
- for (i = 0; i < num_pages_dir - 1; i++) {
- page_dir = (struct xendispl_page_directory *)ptr;
-
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- ptr += PAGE_SIZE;
- }
- /* last page must say there is no more pages */
- page_dir = (struct xendispl_page_directory *)ptr;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
-}
-
-static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf)
-{
- unsigned char *ptr;
- int cur_gref, grefs_left, to_copy, i, num_pages_dir;
-
- ptr = buf->directory;
- num_pages_dir = get_num_pages_dir(buf);
-
- /*
- * while copying, skip grefs at start, they are for pages
- * granted for the page directory itself
- */
- cur_gref = num_pages_dir;
- grefs_left = buf->num_pages;
- for (i = 0; i < num_pages_dir; i++) {
- struct xendispl_page_directory *page_dir =
- (struct xendispl_page_directory *)ptr;
-
- if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) {
- to_copy = grefs_left;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
- } else {
- to_copy = XEN_DRM_NUM_GREFS_PER_PAGE;
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- }
- memcpy(&page_dir->gref, &buf->grefs[cur_gref],
- to_copy * sizeof(grant_ref_t));
- ptr += PAGE_SIZE;
- grefs_left -= to_copy;
- cur_gref += to_copy;
- }
-}
-
-static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf,
- grant_ref_t *priv_gref_head,
- int gref_idx)
-{
- int i, cur_ref, otherend_id;
-
- otherend_id = buf->xb_dev->otherend_id;
- for (i = 0; i < buf->num_pages; i++) {
- cur_ref = gnttab_claim_grant_reference(priv_gref_head);
- if (cur_ref < 0)
- return cur_ref;
-
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
- xen_page_to_gfn(buf->pages[i]),
- 0);
- buf->grefs[gref_idx++] = cur_ref;
- }
- return 0;
-}
-
-static int grant_references(struct xen_drm_front_shbuf *buf)
-{
- grant_ref_t priv_gref_head;
- int ret, i, j, cur_ref;
- int otherend_id, num_pages_dir;
-
- ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
- if (ret < 0) {
- DRM_ERROR("Cannot allocate grant references\n");
- return ret;
- }
-
- otherend_id = buf->xb_dev->otherend_id;
- j = 0;
- num_pages_dir = get_num_pages_dir(buf);
- for (i = 0; i < num_pages_dir; i++) {
- unsigned long frame;
-
- cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
- if (cur_ref < 0)
- return cur_ref;
-
- frame = xen_page_to_gfn(virt_to_page(buf->directory +
- PAGE_SIZE * i));
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
- buf->grefs[j++] = cur_ref;
- }
-
- if (buf->ops->grant_refs_for_buffer) {
- ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
- if (ret)
- return ret;
- }
-
- gnttab_free_grant_references(priv_gref_head);
- return 0;
-}
-
-static int alloc_storage(struct xen_drm_front_shbuf *buf)
-{
- buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
- if (!buf->grefs)
- return -ENOMEM;
-
- buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
- if (!buf->directory)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * For be allocated buffers we don't need grant_refs_for_buffer as those
- * grant references are allocated at backend side
- */
-static const struct xen_drm_front_shbuf_ops backend_ops = {
- .calc_num_grefs = backend_calc_num_grefs,
- .fill_page_dir = backend_fill_page_dir,
- .map = backend_map,
- .unmap = backend_unmap
-};
-
-/* For locally granted references we do not need to map/unmap the references */
-static const struct xen_drm_front_shbuf_ops local_ops = {
- .calc_num_grefs = guest_calc_num_grefs,
- .fill_page_dir = guest_fill_page_dir,
- .grant_refs_for_buffer = guest_grant_refs_for_buffer,
-};
-
-struct xen_drm_front_shbuf *
-xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg)
-{
- struct xen_drm_front_shbuf *buf;
- int ret;
-
- buf = kzalloc(sizeof(*buf), GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- if (cfg->be_alloc)
- buf->ops = &backend_ops;
- else
- buf->ops = &local_ops;
-
- buf->xb_dev = cfg->xb_dev;
- buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE);
- buf->pages = cfg->pages;
-
- buf->ops->calc_num_grefs(buf);
-
- ret = alloc_storage(buf);
- if (ret)
- goto fail;
-
- ret = grant_references(buf);
- if (ret)
- goto fail;
-
- buf->ops->fill_page_dir(buf);
-
- return buf;
-
-fail:
- xen_drm_front_shbuf_free(buf);
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
deleted file mode 100644
index 7545c692539e..000000000000
--- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-
-/*
- * Xen para-virtual DRM device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#ifndef __XEN_DRM_FRONT_SHBUF_H_
-#define __XEN_DRM_FRONT_SHBUF_H_
-
-#include <linux/kernel.h>
-#include <linux/scatterlist.h>
-
-#include <xen/grant_table.h>
-
-struct xen_drm_front_shbuf {
- /*
- * number of references granted for the backend use:
- * - for allocated/imported dma-buf's this holds number of grant
- * references for the page directory and pages of the buffer
- * - for the buffer provided by the backend this holds number of
- * grant references for the page directory as grant references for
- * the buffer will be provided by the backend
- */
- int num_grefs;
- grant_ref_t *grefs;
- unsigned char *directory;
-
- int num_pages;
- struct page **pages;
-
- struct xenbus_device *xb_dev;
-
- /* these are the ops used internally depending on be_alloc mode */
- const struct xen_drm_front_shbuf_ops *ops;
-
- /* Xen map handles for the buffer allocated by the backend */
- grant_handle_t *backend_map_handles;
-};
-
-struct xen_drm_front_shbuf_cfg {
- struct xenbus_device *xb_dev;
- size_t size;
- struct page **pages;
- bool be_alloc;
-};
-
-struct xen_drm_front_shbuf *
-xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg);
-
-grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf);
-
-int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf);
-
-int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf);
-
-void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf);
-
-void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf);
-
-#endif /* __XEN_DRM_FRONT_SHBUF_H_ */
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 11ef17c2d1c1..f5ea32ae8600 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -114,7 +114,7 @@ out_unbind:
component_unbind_all(dev, drm);
out_unregister:
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -124,10 +124,11 @@ static void zx_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
component_unbind_all(dev, drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops zx_drm_master_ops = {
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index ae8c53b4b261..83d236fd893c 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -446,7 +446,6 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
static void zx_plane_destroy(struct drm_plane *plane)
{
- drm_plane_helper_disable(plane, NULL);
drm_plane_cleanup(plane);
}
diff --git a/drivers/gpu/host1x/Makefile b/drivers/gpu/host1x/Makefile
index b92016ce09b7..096017b8789d 100644
--- a/drivers/gpu/host1x/Makefile
+++ b/drivers/gpu/host1x/Makefile
@@ -13,6 +13,7 @@ host1x-y = \
hw/host1x02.o \
hw/host1x04.o \
hw/host1x05.o \
- hw/host1x06.o
+ hw/host1x06.o \
+ hw/host1x07.o
obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index de6bc4e7fa23..419d8929a98f 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -44,6 +44,7 @@
#include "hw/host1x04.h"
#include "hw/host1x05.h"
#include "hw/host1x06.h"
+#include "hw/host1x07.h"
void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r)
{
@@ -130,7 +131,19 @@ static const struct host1x_info host1x06_info = {
.has_hypervisor = true,
};
+static const struct host1x_info host1x07_info = {
+ .nb_channels = 63,
+ .nb_pts = 704,
+ .nb_mlocks = 32,
+ .nb_bases = 0,
+ .init = host1x07_init,
+ .sync_offset = 0x0,
+ .dma_mask = DMA_BIT_MASK(40),
+ .has_hypervisor = true,
+};
+
static const struct of_device_id host1x_of_match[] = {
+ { .compatible = "nvidia,tegra194-host1x", .data = &host1x07_info, },
{ .compatible = "nvidia,tegra186-host1x", .data = &host1x06_info, },
{ .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
{ .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d188f9068b91..95ea81172a83 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -26,7 +26,6 @@
#include "../intr.h"
#include "../job.h"
-#define HOST1X_CHANNEL_SIZE 16384
#define TRACE_MAX_LENGTH 128U
static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
@@ -203,7 +202,11 @@ static void enable_gather_filter(struct host1x *host,
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
unsigned int index)
{
- ch->regs = dev->regs + index * HOST1X_CHANNEL_SIZE;
+#if HOST1X_HW < 6
+ ch->regs = dev->regs + index * 0x4000;
+#else
+ ch->regs = dev->regs + index * 0x100;
+#endif
enable_gather_filter(dev, ch);
return 0;
}
diff --git a/drivers/gpu/host1x/hw/debug_hw_1x06.c b/drivers/gpu/host1x/hw/debug_hw_1x06.c
index b503c740c022..8b749516c051 100644
--- a/drivers/gpu/host1x/hw/debug_hw_1x06.c
+++ b/drivers/gpu/host1x/hw/debug_hw_1x06.c
@@ -62,9 +62,12 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
struct host1x_channel *ch,
struct output *o)
{
- u32 val, rd_ptr, wr_ptr, start, end;
+#if HOST1X_HW <= 6
+ u32 rd_ptr, wr_ptr, start, end;
u32 payload = INVALID_PAYLOAD;
unsigned int data_count = 0;
+#endif
+ u32 val;
host1x_debug_output(o, "%u: fifo:\n", ch->id);
@@ -78,6 +81,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
val = host1x_ch_readl(ch, HOST1X_CHANNEL_CMDFIFO_RDATA);
host1x_debug_output(o, "CMDFIFO_RDATA %08x\n", val);
+#if HOST1X_HW <= 6
/* Peek pointer values are invalid during SLCG, so disable it */
host1x_hypervisor_writel(host, 0x1, HOST1X_HV_ICG_EN_OVERRIDE);
@@ -127,6 +131,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
host1x_hypervisor_writel(host, 0x0, HOST1X_HV_CMDFIFO_PEEK_CTRL);
host1x_hypervisor_writel(host, 0x0, HOST1X_HV_ICG_EN_OVERRIDE);
+#endif
}
static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
diff --git a/drivers/gpu/host1x/hw/host1x07.c b/drivers/gpu/host1x/hw/host1x07.c
new file mode 100644
index 000000000000..04b779a53f08
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07.c
@@ -0,0 +1,44 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* include hw specification */
+#include "host1x07.h"
+#include "host1x07_hardware.h"
+
+/* include code */
+#define HOST1X_HW 7
+
+#include "cdma_hw.c"
+#include "channel_hw.c"
+#include "debug_hw.c"
+#include "intr_hw.c"
+#include "syncpt_hw.c"
+
+#include "../dev.h"
+
+int host1x07_init(struct host1x *host)
+{
+ host->channel_op = &host1x_channel_ops;
+ host->cdma_op = &host1x_cdma_ops;
+ host->cdma_pb_op = &host1x_pushbuffer_ops;
+ host->syncpt_op = &host1x_syncpt_ops;
+ host->intr_op = &host1x_intr_ops;
+ host->debug_op = &host1x_debug_ops;
+
+ return 0;
+}
diff --git a/drivers/gpu/host1x/hw/host1x07.h b/drivers/gpu/host1x/hw/host1x07.h
new file mode 100644
index 000000000000..57b19f354274
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07.h
@@ -0,0 +1,26 @@
+/*
+ * Host1x init for Tegra194 SoCs
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef HOST1X_HOST1X07_H
+#define HOST1X_HOST1X07_H
+
+struct host1x;
+
+int host1x07_init(struct host1x *host);
+
+#endif
diff --git a/drivers/gpu/host1x/hw/host1x07_hardware.h b/drivers/gpu/host1x/hw/host1x07_hardware.h
new file mode 100644
index 000000000000..1353e7ab71dd
--- /dev/null
+++ b/drivers/gpu/host1x/hw/host1x07_hardware.h
@@ -0,0 +1,142 @@
+/*
+ * Tegra host1x Register Offsets for Tegra194
+ *
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HOST1X_HOST1X07_HARDWARE_H
+#define __HOST1X_HOST1X07_HARDWARE_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include "hw_host1x07_uclass.h"
+#include "hw_host1x07_vm.h"
+#include "hw_host1x07_hypervisor.h"
+
+static inline u32 host1x_class_host_wait_syncpt(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_wait_syncpt_indx_f(indx)
+ | host1x_uclass_wait_syncpt_thresh_f(threshold);
+}
+
+static inline u32 host1x_class_host_load_syncpt_base(
+ unsigned indx, unsigned threshold)
+{
+ return host1x_uclass_load_syncpt_base_base_indx_f(indx)
+ | host1x_uclass_load_syncpt_base_value_f(threshold);
+}
+
+static inline u32 host1x_class_host_wait_syncpt_base(
+ unsigned indx, unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_wait_syncpt_base_indx_f(indx)
+ | host1x_uclass_wait_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_wait_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt_base(
+ unsigned base_indx, unsigned offset)
+{
+ return host1x_uclass_incr_syncpt_base_base_indx_f(base_indx)
+ | host1x_uclass_incr_syncpt_base_offset_f(offset);
+}
+
+static inline u32 host1x_class_host_incr_syncpt(
+ unsigned cond, unsigned indx)
+{
+ return host1x_uclass_incr_syncpt_cond_f(cond)
+ | host1x_uclass_incr_syncpt_indx_f(indx);
+}
+
+static inline u32 host1x_class_host_indoff_reg_write(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indbe_f(0xf)
+ | host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset);
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+static inline u32 host1x_class_host_indoff_reg_read(
+ unsigned mod_id, unsigned offset, bool auto_inc)
+{
+ u32 v = host1x_uclass_indoff_indmodid_f(mod_id)
+ | host1x_uclass_indoff_indroffset_f(offset)
+ | host1x_uclass_indoff_rwn_read_v();
+ if (auto_inc)
+ v |= host1x_uclass_indoff_autoinc_f(1);
+ return v;
+}
+
+/* cdma opcodes */
+static inline u32 host1x_opcode_setclass(
+ unsigned class_id, unsigned offset, unsigned mask)
+{
+ return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
+}
+
+static inline u32 host1x_opcode_incr(unsigned offset, unsigned count)
+{
+ return (1 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_nonincr(unsigned offset, unsigned count)
+{
+ return (2 << 28) | (offset << 16) | count;
+}
+
+static inline u32 host1x_opcode_mask(unsigned offset, unsigned mask)
+{
+ return (3 << 28) | (offset << 16) | mask;
+}
+
+static inline u32 host1x_opcode_imm(unsigned offset, unsigned value)
+{
+ return (4 << 28) | (offset << 16) | value;
+}
+
+static inline u32 host1x_opcode_imm_incr_syncpt(unsigned cond, unsigned indx)
+{
+ return host1x_opcode_imm(host1x_uclass_incr_syncpt_r(),
+ host1x_class_host_incr_syncpt(cond, indx));
+}
+
+static inline u32 host1x_opcode_restart(unsigned address)
+{
+ return (5 << 28) | (address >> 4);
+}
+
+static inline u32 host1x_opcode_gather(unsigned count)
+{
+ return (6 << 28) | count;
+}
+
+static inline u32 host1x_opcode_gather_nonincr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | count;
+}
+
+static inline u32 host1x_opcode_gather_incr(unsigned offset, unsigned count)
+{
+ return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
+}
+
+#define HOST1X_OPCODE_NOP host1x_opcode_nonincr(0, 0)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
index 4457486c72b0..e599e15bf999 100644
--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -59,7 +59,7 @@ static inline u32 host1x_uclass_incr_syncpt_r(void)
host1x_uclass_incr_syncpt_r()
static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
{
- return (v & 0xff) << 8;
+ return (v & 0xff) << 10;
}
#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
host1x_uclass_incr_syncpt_cond_f(v)
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
new file mode 100644
index 000000000000..2b99d68d3040
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_hypervisor.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_HV_SYNCPT_PROT_EN 0x1ac4
+#define HOST1X_HV_SYNCPT_PROT_EN_CH_EN BIT(1)
+#define HOST1X_HV_CH_KERNEL_FILTER_GBUFFER(x) (0x2020 + (x * 4))
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL 0x233c
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ADDR(x) (x)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_CHANNEL(x) ((x) << 16)
+#define HOST1X_HV_CMDFIFO_PEEK_CTRL_ENABLE BIT(31)
+#define HOST1X_HV_CMDFIFO_PEEK_READ 0x2340
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS 0x2344
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_WR_PTR_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_PEEK_PTRS_RD_PTR_V(x) ((x) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP(x) (0x2588 + (x * 4))
+#define HOST1X_HV_CMDFIFO_SETUP_LIMIT_V(x) (((x) >> 16) & 0xfff)
+#define HOST1X_HV_CMDFIFO_SETUP_BASE_V(x) ((x) & 0xfff)
+#define HOST1X_HV_ICG_EN_OVERRIDE 0x2aa8
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
new file mode 100644
index 000000000000..7e4e3b377f91
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+ /*
+ * Function naming determines intended use:
+ *
+ * <x>_r(void) : Returns the offset for register <x>.
+ *
+ * <x>_w(void) : Returns the word offset for word (4 byte) element <x>.
+ *
+ * <x>_<y>_s(void) : Returns size of field <y> of register <x> in bits.
+ *
+ * <x>_<y>_f(u32 v) : Returns a value based on 'v' which has been shifted
+ * and masked to place it at field <y> of register <x>. This value
+ * can be |'d with others to produce a full register value for
+ * register <x>.
+ *
+ * <x>_<y>_m(void) : Returns a mask for field <y> of register <x>. This
+ * value can be ~'d and then &'d to clear the value of field <y> for
+ * register <x>.
+ *
+ * <x>_<y>_<z>_f(void) : Returns the constant value <z> after being shifted
+ * to place it at field <y> of register <x>. This value can be |'d
+ * with others to produce a full register value for <x>.
+ *
+ * <x>_<y>_v(u32 r) : Returns the value of field <y> from a full register
+ * <x> value 'r' after being shifted to place its LSB at bit 0.
+ * This value is suitable for direct comparison with other unshifted
+ * values appropriate for use in field <y> of register <x>.
+ *
+ * <x>_<y>_<z>_v(void) : Returns the constant value for <z> defined for
+ * field <y> of register <x>. This value is suitable for direct
+ * comparison with unshifted values appropriate for use in field <y>
+ * of register <x>.
+ */
+
+#ifndef HOST1X_HW_HOST1X07_UCLASS_H
+#define HOST1X_HW_HOST1X07_UCLASS_H
+
+static inline u32 host1x_uclass_incr_syncpt_r(void)
+{
+ return 0x0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT \
+ host1x_uclass_incr_syncpt_r()
+static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
+{
+ return (v & 0xff) << 10;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_COND_F(v) \
+ host1x_uclass_incr_syncpt_cond_f(v)
+static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
+ host1x_uclass_incr_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_r(void)
+{
+ return 0x8;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT \
+ host1x_uclass_wait_syncpt_r()
+static inline u32 host1x_uclass_wait_syncpt_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_INDX_F(v) \
+ host1x_uclass_wait_syncpt_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_thresh_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_THRESH_F(v) \
+ host1x_uclass_wait_syncpt_thresh_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_r(void)
+{
+ return 0x9;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE \
+ host1x_uclass_wait_syncpt_base_r()
+static inline u32 host1x_uclass_wait_syncpt_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 16;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_wait_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_wait_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffff) << 0;
+}
+#define HOST1X_UCLASS_WAIT_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_wait_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_r(void)
+{
+ return 0xb;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE \
+ host1x_uclass_load_syncpt_base_r()
+static inline u32 host1x_uclass_load_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_load_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_load_syncpt_base_value_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F(v) \
+ host1x_uclass_load_syncpt_base_value_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_base_indx_f(u32 v)
+{
+ return (v & 0xff) << 24;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_BASE_INDX_F(v) \
+ host1x_uclass_incr_syncpt_base_base_indx_f(v)
+static inline u32 host1x_uclass_incr_syncpt_base_offset_f(u32 v)
+{
+ return (v & 0xffffff) << 0;
+}
+#define HOST1X_UCLASS_INCR_SYNCPT_BASE_OFFSET_F(v) \
+ host1x_uclass_incr_syncpt_base_offset_f(v)
+static inline u32 host1x_uclass_indoff_r(void)
+{
+ return 0x2d;
+}
+#define HOST1X_UCLASS_INDOFF \
+ host1x_uclass_indoff_r()
+static inline u32 host1x_uclass_indoff_indbe_f(u32 v)
+{
+ return (v & 0xf) << 28;
+}
+#define HOST1X_UCLASS_INDOFF_INDBE_F(v) \
+ host1x_uclass_indoff_indbe_f(v)
+static inline u32 host1x_uclass_indoff_autoinc_f(u32 v)
+{
+ return (v & 0x1) << 27;
+}
+#define HOST1X_UCLASS_INDOFF_AUTOINC_F(v) \
+ host1x_uclass_indoff_autoinc_f(v)
+static inline u32 host1x_uclass_indoff_indmodid_f(u32 v)
+{
+ return (v & 0xff) << 18;
+}
+#define HOST1X_UCLASS_INDOFF_INDMODID_F(v) \
+ host1x_uclass_indoff_indmodid_f(v)
+static inline u32 host1x_uclass_indoff_indroffset_f(u32 v)
+{
+ return (v & 0xffff) << 2;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+static inline u32 host1x_uclass_indoff_rwn_read_v(void)
+{
+ return 1;
+}
+#define HOST1X_UCLASS_INDOFF_INDROFFSET_F(v) \
+ host1x_uclass_indoff_indroffset_f(v)
+
+#endif
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_vm.h b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
new file mode 100644
index 000000000000..7e4629e77a2a
--- /dev/null
+++ b/drivers/gpu/host1x/hw/hw_host1x07_vm.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2018 NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#define HOST1X_CHANNEL_DMASTART 0x0000
+#define HOST1X_CHANNEL_DMASTART_HI 0x0004
+#define HOST1X_CHANNEL_DMAPUT 0x0008
+#define HOST1X_CHANNEL_DMAPUT_HI 0x000c
+#define HOST1X_CHANNEL_DMAGET 0x0010
+#define HOST1X_CHANNEL_DMAGET_HI 0x0014
+#define HOST1X_CHANNEL_DMAEND 0x0018
+#define HOST1X_CHANNEL_DMAEND_HI 0x001c
+#define HOST1X_CHANNEL_DMACTRL 0x0020
+#define HOST1X_CHANNEL_DMACTRL_DMASTOP BIT(0)
+#define HOST1X_CHANNEL_DMACTRL_DMAGETRST BIT(1)
+#define HOST1X_CHANNEL_DMACTRL_DMAINITGET BIT(2)
+#define HOST1X_CHANNEL_CMDFIFO_STAT 0x0024
+#define HOST1X_CHANNEL_CMDFIFO_STAT_EMPTY BIT(13)
+#define HOST1X_CHANNEL_CMDFIFO_RDATA 0x0028
+#define HOST1X_CHANNEL_CMDP_OFFSET 0x0030
+#define HOST1X_CHANNEL_CMDP_CLASS 0x0034
+#define HOST1X_CHANNEL_CHANNELSTAT 0x0038
+#define HOST1X_CHANNEL_CMDPROC_STOP 0x0048
+#define HOST1X_CHANNEL_TEARDOWN 0x004c
+
+#define HOST1X_SYNC_SYNCPT_CPU_INCR(x) (0x6400 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(x) (0x6464 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(x) (0x652c + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(x) (0x6590 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT(x) (0x8080 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_INT_THRESH(x) (0x8d00 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP(x) (0xa604 + 4 * (x))
+#define HOST1X_SYNC_SYNCPT_CH_APP_CH(v) (((v) & 0x3f) << 8)
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index a23bb3352d02..d946660d47f8 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -37,10 +37,12 @@ static void syncpt_restore(struct host1x_syncpt *sp)
*/
static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
{
+#if HOST1X_HW < 7
struct host1x *host = sp->host;
host1x_sync_writel(host, sp->base_val,
HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
}
/*
@@ -48,10 +50,12 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
*/
static void syncpt_read_wait_base(struct host1x_syncpt *sp)
{
+#if HOST1X_HW < 7
struct host1x *host = sp->host;
sp->base_val =
host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
+#endif
}
/*
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index a9d2501500a1..163fadb8a33a 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -259,6 +259,8 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_high_priority);
void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf)
{
+ WARN_ON_ONCE(buf & 0x7);
+
if (bufnum)
ipu_ch_param_write_field(ch, IPU_FIELD_EBA1, buf >> 3);
else
@@ -268,6 +270,8 @@ EXPORT_SYMBOL_GPL(ipu_cpmem_set_buffer);
void ipu_cpmem_set_uv_offset(struct ipuv3_channel *ch, u32 u_off, u32 v_off)
{
+ WARN_ON_ONCE((u_off & 0x7) || (v_off & 0x7));
+
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_off / 8);
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_off / 8);
}
@@ -435,6 +439,8 @@ void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch,
unsigned int uv_stride,
unsigned int u_offset, unsigned int v_offset)
{
+ WARN_ON_ONCE((u_offset & 0x7) || (v_offset & 0x7));
+
ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1);
ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8);
ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8);
@@ -739,48 +745,56 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
switch (pix->pixelformat) {
case V4L2_PIX_FMT_YUV420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = V_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
+ u_offset = image->u_offset ?
+ image->u_offset : U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ?
+ image->v_offset : V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_YVU420:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = V_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
+ u_offset = image->u_offset ?
+ image->u_offset : V_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ?
+ image->v_offset : U_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
- v_offset, u_offset);
+ u_offset, v_offset);
break;
case V4L2_PIX_FMT_YUV422P:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = U2_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = V2_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
+ u_offset = image->u_offset ?
+ image->u_offset : U2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ?
+ image->v_offset : V2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV12:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = UV_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = 0;
+ u_offset = image->u_offset ?
+ image->u_offset : UV_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ? image->v_offset : 0;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
break;
case V4L2_PIX_FMT_NV16:
offset = Y_OFFSET(pix, image->rect.left, image->rect.top);
- u_offset = UV2_OFFSET(pix, image->rect.left,
- image->rect.top) - offset;
- v_offset = 0;
+ u_offset = image->u_offset ?
+ image->u_offset : UV2_OFFSET(pix, image->rect.left,
+ image->rect.top) - offset;
+ v_offset = image->v_offset ? image->v_offset : 0;
ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline,
u_offset, v_offset);
diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
index 67cc820253a9..594c3cbc8291 100644
--- a/drivers/gpu/ipu-v3/ipu-ic.c
+++ b/drivers/gpu/ipu-v3/ipu-ic.c
@@ -442,36 +442,40 @@ unlock:
}
EXPORT_SYMBOL_GPL(ipu_ic_task_graphics_init);
-int ipu_ic_task_init(struct ipu_ic *ic,
- int in_width, int in_height,
- int out_width, int out_height,
- enum ipu_color_space in_cs,
- enum ipu_color_space out_cs)
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs,
+ u32 rsc)
{
struct ipu_ic_priv *priv = ic->priv;
- u32 reg, downsize_coeff, resize_coeff;
+ u32 downsize_coeff, resize_coeff;
unsigned long flags;
int ret = 0;
- /* Setup vertical resizing */
- ret = calc_resize_coeffs(ic, in_height, out_height,
- &resize_coeff, &downsize_coeff);
- if (ret)
- return ret;
+ if (!rsc) {
+ /* Setup vertical resizing */
- reg = (downsize_coeff << 30) | (resize_coeff << 16);
+ ret = calc_resize_coeffs(ic, in_height, out_height,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
+
+ rsc = (downsize_coeff << 30) | (resize_coeff << 16);
- /* Setup horizontal resizing */
- ret = calc_resize_coeffs(ic, in_width, out_width,
- &resize_coeff, &downsize_coeff);
- if (ret)
- return ret;
+ /* Setup horizontal resizing */
+ ret = calc_resize_coeffs(ic, in_width, out_width,
+ &resize_coeff, &downsize_coeff);
+ if (ret)
+ return ret;
- reg |= (downsize_coeff << 14) | resize_coeff;
+ rsc |= (downsize_coeff << 14) | resize_coeff;
+ }
spin_lock_irqsave(&priv->lock, flags);
- ipu_ic_write(ic, reg, ic->reg->rsc);
+ ipu_ic_write(ic, rsc, ic->reg->rsc);
/* Setup color space conversion */
ic->in_cs = in_cs;
@@ -487,6 +491,16 @@ unlock:
spin_unlock_irqrestore(&priv->lock, flags);
return ret;
}
+
+int ipu_ic_task_init(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs)
+{
+ return ipu_ic_task_init_rsc(ic, in_width, in_height, out_width,
+ out_height, in_cs, out_cs, 0);
+}
EXPORT_SYMBOL_GPL(ipu_ic_task_init);
int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel,
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index f4081962784c..13103ab86050 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -37,17 +37,36 @@
* when double_buffering boolean is set).
*
* Note that the input frame must be split up into the same number
- * of tiles as the output frame.
+ * of tiles as the output frame:
*
- * FIXME: at this point there is no attempt to deal with visible seams
- * at the tile boundaries when upscaling. The seams are caused by a reset
- * of the bilinear upscale interpolation when starting a new tile. The
- * seams are barely visible for small upscale factors, but become
- * increasingly visible as the upscale factor gets larger, since more
- * interpolated pixels get thrown out at the tile boundaries. A possilble
- * fix might be to overlap tiles of different sizes, but this must be done
- * while also maintaining the IDMAC dma buffer address alignment and 8x8 IRT
- * alignment restrictions of each tile.
+ * +---------+-----+
+ * +-----+---+ | A | B |
+ * | A | B | | | |
+ * +-----+---+ --> +---------+-----+
+ * | C | D | | C | D |
+ * +-----+---+ | | |
+ * +---------+-----+
+ *
+ * Clockwise 90° rotations are handled by first rescaling into a
+ * reusable temporary tile buffer and then rotating with the 8x8
+ * block rotator, writing to the correct destination:
+ *
+ * +-----+-----+
+ * | | |
+ * +-----+---+ +---------+ | C | A |
+ * | A | B | | A,B, | | | | |
+ * +-----+---+ --> | C,D | | --> | | |
+ * | C | D | +---------+ +-----+-----+
+ * +-----+---+ | D | B |
+ * | | |
+ * +-----+-----+
+ *
+ * If the 8x8 block rotator is used, horizontal or vertical flipping
+ * is done during the rotation step, otherwise flipping is done
+ * during the scaling step.
+ * With rotation or flipping, tile order changes between input and
+ * output image. Tiles are numbered row major from top left to bottom
+ * right for both input and output image.
*/
#define MAX_STRIPES_W 4
@@ -84,6 +103,8 @@ struct ipu_image_convert_dma_chan {
struct ipu_image_tile {
u32 width;
u32 height;
+ u32 left;
+ u32 top;
/* size and strides are in bytes */
u32 size;
u32 stride;
@@ -135,6 +156,12 @@ struct ipu_image_convert_ctx {
struct ipu_image_convert_image in;
struct ipu_image_convert_image out;
enum ipu_rotate_mode rot_mode;
+ u32 downsize_coeff_h;
+ u32 downsize_coeff_v;
+ u32 image_resize_coeff_h;
+ u32 image_resize_coeff_v;
+ u32 resize_coeffs_h[MAX_STRIPES_W];
+ u32 resize_coeffs_v[MAX_STRIPES_H];
/* intermediate buffer for rotation */
struct ipu_image_convert_dma_buf rot_intermediate[2];
@@ -300,12 +327,11 @@ static void dump_format(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_priv *priv = chan->priv;
dev_dbg(priv->ipu->dev,
- "task %u: ctx %p: %s format: %dx%d (%dx%d tiles of size %dx%d), %c%c%c%c\n",
+ "task %u: ctx %p: %s format: %dx%d (%dx%d tiles), %c%c%c%c\n",
chan->ic_task, ctx,
ic_image->type == IMAGE_CONVERT_OUT ? "Output" : "Input",
ic_image->base.pix.width, ic_image->base.pix.height,
ic_image->num_cols, ic_image->num_rows,
- ic_image->tile[0].width, ic_image->tile[0].height,
ic_image->fmt->fourcc & 0xff,
(ic_image->fmt->fourcc >> 8) & 0xff,
(ic_image->fmt->fourcc >> 16) & 0xff,
@@ -353,24 +379,459 @@ static int alloc_dma_buf(struct ipu_image_convert_priv *priv,
static inline int num_stripes(int dim)
{
- if (dim <= 1024)
- return 1;
- else if (dim <= 2048)
+ return (dim - 1) / 1024 + 1;
+}
+
+/*
+ * Calculate downsizing coefficients, which are the same for all tiles,
+ * and bilinear resizing coefficients, which are used to find the best
+ * seam positions.
+ */
+static int calc_image_resize_coefficients(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image *in,
+ struct ipu_image *out)
+{
+ u32 downsized_width = in->rect.width;
+ u32 downsized_height = in->rect.height;
+ u32 downsize_coeff_v = 0;
+ u32 downsize_coeff_h = 0;
+ u32 resized_width = out->rect.width;
+ u32 resized_height = out->rect.height;
+ u32 resize_coeff_h;
+ u32 resize_coeff_v;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ resized_width = out->rect.height;
+ resized_height = out->rect.width;
+ }
+
+ /* Do not let invalid input lead to an endless loop below */
+ if (WARN_ON(resized_width == 0 || resized_height == 0))
+ return -EINVAL;
+
+ while (downsized_width >= resized_width * 2) {
+ downsized_width >>= 1;
+ downsize_coeff_h++;
+ }
+
+ while (downsized_height >= resized_height * 2) {
+ downsized_height >>= 1;
+ downsize_coeff_v++;
+ }
+
+ /*
+ * Calculate the bilinear resizing coefficients that could be used if
+ * we were converting with a single tile. The bottom right output pixel
+ * should sample as close as possible to the bottom right input pixel
+ * out of the decimator, but not overshoot it:
+ */
+ resize_coeff_h = 8192 * (downsized_width - 1) / (resized_width - 1);
+ resize_coeff_v = 8192 * (downsized_height - 1) / (resized_height - 1);
+
+ dev_dbg(ctx->chan->priv->ipu->dev,
+ "%s: hscale: >>%u, *8192/%u vscale: >>%u, *8192/%u, %ux%u tiles\n",
+ __func__, downsize_coeff_h, resize_coeff_h, downsize_coeff_v,
+ resize_coeff_v, ctx->in.num_cols, ctx->in.num_rows);
+
+ if (downsize_coeff_h > 2 || downsize_coeff_v > 2 ||
+ resize_coeff_h > 0x3fff || resize_coeff_v > 0x3fff)
+ return -EINVAL;
+
+ ctx->downsize_coeff_h = downsize_coeff_h;
+ ctx->downsize_coeff_v = downsize_coeff_v;
+ ctx->image_resize_coeff_h = resize_coeff_h;
+ ctx->image_resize_coeff_v = resize_coeff_v;
+
+ return 0;
+}
+
+#define round_closest(x, y) round_down((x) + (y)/2, (y))
+
+/*
+ * Find the best aligned seam position in the inverval [out_start, out_end].
+ * Rotation and image offsets are out of scope.
+ *
+ * @out_start: start of inverval, must be within 1024 pixels / lines
+ * of out_end
+ * @out_end: end of interval, smaller than or equal to out_edge
+ * @in_edge: input right / bottom edge
+ * @out_edge: output right / bottom edge
+ * @in_align: input alignment, either horizontal 8-byte line start address
+ * alignment, or pixel alignment due to image format
+ * @out_align: output alignment, either horizontal 8-byte line start address
+ * alignment, or pixel alignment due to image format or rotator
+ * block size
+ * @in_burst: horizontal input burst size in case of horizontal flip
+ * @out_burst: horizontal output burst size or rotator block size
+ * @downsize_coeff: downsizing section coefficient
+ * @resize_coeff: main processing section resizing coefficient
+ * @_in_seam: aligned input seam position return value
+ * @_out_seam: aligned output seam position return value
+ */
+static void find_best_seam(struct ipu_image_convert_ctx *ctx,
+ unsigned int out_start,
+ unsigned int out_end,
+ unsigned int in_edge,
+ unsigned int out_edge,
+ unsigned int in_align,
+ unsigned int out_align,
+ unsigned int in_burst,
+ unsigned int out_burst,
+ unsigned int downsize_coeff,
+ unsigned int resize_coeff,
+ u32 *_in_seam,
+ u32 *_out_seam)
+{
+ struct device *dev = ctx->chan->priv->ipu->dev;
+ unsigned int out_pos;
+ /* Input / output seam position candidates */
+ unsigned int out_seam = 0;
+ unsigned int in_seam = 0;
+ unsigned int min_diff = UINT_MAX;
+
+ /*
+ * Output tiles must start at a multiple of 8 bytes horizontally and
+ * possibly at an even line horizontally depending on the pixel format.
+ * Only consider output aligned positions for the seam.
+ */
+ out_start = round_up(out_start, out_align);
+ for (out_pos = out_start; out_pos < out_end; out_pos += out_align) {
+ unsigned int in_pos;
+ unsigned int in_pos_aligned;
+ unsigned int abs_diff;
+
+ /*
+ * Tiles in the right row / bottom column may not be allowed to
+ * overshoot horizontally / vertically. out_burst may be the
+ * actual DMA burst size, or the rotator block size.
+ */
+ if ((out_burst > 1) && (out_edge - out_pos) % out_burst)
+ continue;
+
+ /*
+ * Input sample position, corresponding to out_pos, 19.13 fixed
+ * point.
+ */
+ in_pos = (out_pos * resize_coeff) << downsize_coeff;
+ /*
+ * The closest input sample position that we could actually
+ * start the input tile at, 19.13 fixed point.
+ */
+ in_pos_aligned = round_closest(in_pos, 8192U * in_align);
+
+ if ((in_burst > 1) &&
+ (in_edge - in_pos_aligned / 8192U) % in_burst)
+ continue;
+
+ if (in_pos < in_pos_aligned)
+ abs_diff = in_pos_aligned - in_pos;
+ else
+ abs_diff = in_pos - in_pos_aligned;
+
+ if (abs_diff < min_diff) {
+ in_seam = in_pos_aligned;
+ out_seam = out_pos;
+ min_diff = abs_diff;
+ }
+ }
+
+ *_out_seam = out_seam;
+ /* Convert 19.13 fixed point to integer seam position */
+ *_in_seam = DIV_ROUND_CLOSEST(in_seam, 8192U);
+
+ dev_dbg(dev, "%s: out_seam %u(%u) in [%u, %u], in_seam %u(%u) diff %u.%03u\n",
+ __func__, out_seam, out_align, out_start, out_end,
+ *_in_seam, in_align, min_diff / 8192,
+ DIV_ROUND_CLOSEST(min_diff % 8192 * 1000, 8192));
+}
+
+/*
+ * Tile left edges are required to be aligned to multiples of 8 bytes
+ * by the IDMAC.
+ */
+static inline u32 tile_left_align(const struct ipu_image_pixfmt *fmt)
+{
+ if (fmt->planar)
+ return fmt->uv_packed ? 8 : 8 * fmt->uv_width_dec;
+ else
+ return fmt->bpp == 32 ? 2 : fmt->bpp == 16 ? 4 : 8;
+}
+
+/*
+ * Tile top edge alignment is only limited by chroma subsampling.
+ */
+static inline u32 tile_top_align(const struct ipu_image_pixfmt *fmt)
+{
+ return fmt->uv_height_dec > 1 ? 2 : 1;
+}
+
+static inline u32 tile_width_align(enum ipu_image_convert_type type,
+ const struct ipu_image_pixfmt *fmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ if (type == IMAGE_CONVERT_IN) {
+ /*
+ * The IC burst reads 8 pixels at a time. Reading beyond the
+ * end of the line is usually acceptable. Those pixels are
+ * ignored, unless the IC has to write the scaled line in
+ * reverse.
+ */
+ return (!ipu_rot_mode_is_irt(rot_mode) &&
+ (rot_mode & IPU_ROT_BIT_HFLIP)) ? 8 : 2;
+ }
+
+ /*
+ * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+ * formats to guarantee 8-byte aligned line start addresses in the
+ * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+ * for all other formats.
+ */
+ return (ipu_rot_mode_is_irt(rot_mode) &&
+ fmt->planar && !fmt->uv_packed) ?
+ 8 * fmt->uv_width_dec : 8;
+}
+
+static inline u32 tile_height_align(enum ipu_image_convert_type type,
+ const struct ipu_image_pixfmt *fmt,
+ enum ipu_rotate_mode rot_mode)
+{
+ if (type == IMAGE_CONVERT_IN || !ipu_rot_mode_is_irt(rot_mode))
return 2;
+
+ /*
+ * Align to 16x16 pixel blocks for planar 4:2:0 chroma subsampled
+ * formats to guarantee 8-byte aligned line start addresses in the
+ * chroma planes when IRT is used. Align to 8x8 pixel IRT block size
+ * for all other formats.
+ */
+ return (fmt->planar && !fmt->uv_packed) ? 8 * fmt->uv_width_dec : 8;
+}
+
+/*
+ * Fill in left position and width and for all tiles in an input column, and
+ * for all corresponding output tiles. If the 90° rotator is used, the output
+ * tiles are in a row, and output tile top position and height are set.
+ */
+static void fill_tile_column(struct ipu_image_convert_ctx *ctx,
+ unsigned int col,
+ struct ipu_image_convert_image *in,
+ unsigned int in_left, unsigned int in_width,
+ struct ipu_image_convert_image *out,
+ unsigned int out_left, unsigned int out_width)
+{
+ unsigned int row, tile_idx;
+ struct ipu_image_tile *in_tile, *out_tile;
+
+ for (row = 0; row < in->num_rows; row++) {
+ tile_idx = in->num_cols * row + col;
+ in_tile = &in->tile[tile_idx];
+ out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+
+ in_tile->left = in_left;
+ in_tile->width = in_width;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ out_tile->top = out_left;
+ out_tile->height = out_width;
+ } else {
+ out_tile->left = out_left;
+ out_tile->width = out_width;
+ }
+ }
+}
+
+/*
+ * Fill in top position and height and for all tiles in an input row, and
+ * for all corresponding output tiles. If the 90° rotator is used, the output
+ * tiles are in a column, and output tile left position and width are set.
+ */
+static void fill_tile_row(struct ipu_image_convert_ctx *ctx, unsigned int row,
+ struct ipu_image_convert_image *in,
+ unsigned int in_top, unsigned int in_height,
+ struct ipu_image_convert_image *out,
+ unsigned int out_top, unsigned int out_height)
+{
+ unsigned int col, tile_idx;
+ struct ipu_image_tile *in_tile, *out_tile;
+
+ for (col = 0; col < in->num_cols; col++) {
+ tile_idx = in->num_cols * row + col;
+ in_tile = &in->tile[tile_idx];
+ out_tile = &out->tile[ctx->out_tile_map[tile_idx]];
+
+ in_tile->top = in_top;
+ in_tile->height = in_height;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ out_tile->left = out_top;
+ out_tile->width = out_height;
+ } else {
+ out_tile->top = out_top;
+ out_tile->height = out_height;
+ }
+ }
+}
+
+/*
+ * Find the best horizontal and vertical seam positions to split into tiles.
+ * Minimize the fractional part of the input sampling position for the
+ * top / left pixels of each tile.
+ */
+static void find_seams(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *in,
+ struct ipu_image_convert_image *out)
+{
+ struct device *dev = ctx->chan->priv->ipu->dev;
+ unsigned int resized_width = out->base.rect.width;
+ unsigned int resized_height = out->base.rect.height;
+ unsigned int col;
+ unsigned int row;
+ unsigned int in_left_align = tile_left_align(in->fmt);
+ unsigned int in_top_align = tile_top_align(in->fmt);
+ unsigned int out_left_align = tile_left_align(out->fmt);
+ unsigned int out_top_align = tile_top_align(out->fmt);
+ unsigned int out_width_align = tile_width_align(out->type, out->fmt,
+ ctx->rot_mode);
+ unsigned int out_height_align = tile_height_align(out->type, out->fmt,
+ ctx->rot_mode);
+ unsigned int in_right = in->base.rect.width;
+ unsigned int in_bottom = in->base.rect.height;
+ unsigned int out_right = out->base.rect.width;
+ unsigned int out_bottom = out->base.rect.height;
+ unsigned int flipped_out_left;
+ unsigned int flipped_out_top;
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ /* Switch width/height and align top left to IRT block size */
+ resized_width = out->base.rect.height;
+ resized_height = out->base.rect.width;
+ out_left_align = out_height_align;
+ out_top_align = out_width_align;
+ out_width_align = out_left_align;
+ out_height_align = out_top_align;
+ out_right = out->base.rect.height;
+ out_bottom = out->base.rect.width;
+ }
+
+ for (col = in->num_cols - 1; col > 0; col--) {
+ bool allow_in_overshoot = ipu_rot_mode_is_irt(ctx->rot_mode) ||
+ !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+ bool allow_out_overshoot = (col < in->num_cols - 1) &&
+ !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+ unsigned int out_start;
+ unsigned int out_end;
+ unsigned int in_left;
+ unsigned int out_left;
+
+ /*
+ * Align input width to burst length if the scaling step flips
+ * horizontally.
+ */
+
+ /* Start within 1024 pixels of the right edge */
+ out_start = max_t(int, 0, out_right - 1024);
+ /* End before having to add more columns to the left */
+ out_end = min_t(unsigned int, out_right, col * 1024);
+
+ find_best_seam(ctx, out_start, out_end,
+ in_right, out_right,
+ in_left_align, out_left_align,
+ allow_in_overshoot ? 1 : 8 /* burst length */,
+ allow_out_overshoot ? 1 : out_width_align,
+ ctx->downsize_coeff_h, ctx->image_resize_coeff_h,
+ &in_left, &out_left);
+
+ if (ctx->rot_mode & IPU_ROT_BIT_HFLIP)
+ flipped_out_left = resized_width - out_right;
+ else
+ flipped_out_left = out_left;
+
+ fill_tile_column(ctx, col, in, in_left, in_right - in_left,
+ out, flipped_out_left, out_right - out_left);
+
+ dev_dbg(dev, "%s: col %u: %u, %u -> %u, %u\n", __func__, col,
+ in_left, in_right - in_left,
+ flipped_out_left, out_right - out_left);
+
+ in_right = in_left;
+ out_right = out_left;
+ }
+
+ flipped_out_left = (ctx->rot_mode & IPU_ROT_BIT_HFLIP) ?
+ resized_width - out_right : 0;
+
+ fill_tile_column(ctx, 0, in, 0, in_right,
+ out, flipped_out_left, out_right);
+
+ dev_dbg(dev, "%s: col 0: 0, %u -> %u, %u\n", __func__,
+ in_right, flipped_out_left, out_right);
+
+ for (row = in->num_rows - 1; row > 0; row--) {
+ bool allow_overshoot = row < in->num_rows - 1;
+ unsigned int out_start;
+ unsigned int out_end;
+ unsigned int in_top;
+ unsigned int out_top;
+
+ /* Start within 1024 lines of the bottom edge */
+ out_start = max_t(int, 0, out_bottom - 1024);
+ /* End before having to add more rows above */
+ out_end = min_t(unsigned int, out_bottom, row * 1024);
+
+ find_best_seam(ctx, out_start, out_end,
+ in_bottom, out_bottom,
+ in_top_align, out_top_align,
+ 1, allow_overshoot ? 1 : out_height_align,
+ ctx->downsize_coeff_v, ctx->image_resize_coeff_v,
+ &in_top, &out_top);
+
+ if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+ ipu_rot_mode_is_irt(ctx->rot_mode))
+ flipped_out_top = resized_height - out_bottom;
+ else
+ flipped_out_top = out_top;
+
+ fill_tile_row(ctx, row, in, in_top, in_bottom - in_top,
+ out, flipped_out_top, out_bottom - out_top);
+
+ dev_dbg(dev, "%s: row %u: %u, %u -> %u, %u\n", __func__, row,
+ in_top, in_bottom - in_top,
+ flipped_out_top, out_bottom - out_top);
+
+ in_bottom = in_top;
+ out_bottom = out_top;
+ }
+
+ if ((ctx->rot_mode & IPU_ROT_BIT_VFLIP) ^
+ ipu_rot_mode_is_irt(ctx->rot_mode))
+ flipped_out_top = resized_height - out_bottom;
else
- return 4;
+ flipped_out_top = 0;
+
+ fill_tile_row(ctx, 0, in, 0, in_bottom,
+ out, flipped_out_top, out_bottom);
+
+ dev_dbg(dev, "%s: row 0: 0, %u -> %u, %u\n", __func__,
+ in_bottom, flipped_out_top, out_bottom);
}
static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
- int i;
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ unsigned int i;
for (i = 0; i < ctx->num_tiles; i++) {
- struct ipu_image_tile *tile = &image->tile[i];
+ struct ipu_image_tile *tile;
+ const unsigned int row = i / image->num_cols;
+ const unsigned int col = i % image->num_cols;
+
+ if (image->type == IMAGE_CONVERT_OUT)
+ tile = &image->tile[ctx->out_tile_map[i]];
+ else
+ tile = &image->tile[i];
- tile->height = image->base.pix.height / image->num_rows;
- tile->width = image->base.pix.width / image->num_cols;
tile->size = ((tile->height * image->fmt->bpp) >> 3) *
tile->width;
@@ -383,6 +844,13 @@ static void calc_tile_dimensions(struct ipu_image_convert_ctx *ctx,
tile->rot_stride =
(image->fmt->bpp * tile->height) >> 3;
}
+
+ dev_dbg(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%u,%u]: %ux%u@%u,%u\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ? "Input" : "Output",
+ row, col,
+ tile->width, tile->height, tile->left, tile->top);
}
}
@@ -459,14 +927,14 @@ static void calc_out_tile_map(struct ipu_image_convert_ctx *ctx)
}
}
-static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
- struct ipu_image_convert_image *image)
+static int calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
const struct ipu_image_pixfmt *fmt = image->fmt;
unsigned int row, col, tile = 0;
- u32 H, w, h, y_stride, uv_stride;
+ u32 H, top, y_stride, uv_stride;
u32 uv_row_off, uv_col_off, uv_off, u_off, v_off, tmp;
u32 y_row_off, y_col_off, y_off;
u32 y_size, uv_size;
@@ -483,13 +951,12 @@ static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
uv_size = y_size / (fmt->uv_width_dec * fmt->uv_height_dec);
for (row = 0; row < image->num_rows; row++) {
- w = image->tile[tile].width;
- h = image->tile[tile].height;
- y_row_off = row * h * y_stride;
- uv_row_off = (row * h * uv_stride) / fmt->uv_height_dec;
+ top = image->tile[tile].top;
+ y_row_off = top * y_stride;
+ uv_row_off = (top * uv_stride) / fmt->uv_height_dec;
for (col = 0; col < image->num_cols; col++) {
- y_col_off = col * w;
+ y_col_off = image->tile[tile].left;
uv_col_off = y_col_off / fmt->uv_width_dec;
if (fmt->uv_packed)
uv_col_off *= 2;
@@ -509,24 +976,30 @@ static void calc_tile_offsets_planar(struct ipu_image_convert_ctx *ctx,
image->tile[tile].u_off = u_off;
image->tile[tile++].v_off = v_off;
- dev_dbg(priv->ipu->dev,
- "task %u: ctx %p: %s@[%d,%d]: y_off %08x, u_off %08x, v_off %08x\n",
- chan->ic_task, ctx,
- image->type == IMAGE_CONVERT_IN ?
- "Input" : "Output", row, col,
- y_off, u_off, v_off);
+ if ((y_off & 0x7) || (u_off & 0x7) || (v_off & 0x7)) {
+ dev_err(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%d,%d]: "
+ "y_off %08x, u_off %08x, v_off %08x\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ?
+ "Input" : "Output", row, col,
+ y_off, u_off, v_off);
+ return -EINVAL;
+ }
}
}
+
+ return 0;
}
-static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
- struct ipu_image_convert_image *image)
+static int calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
+ struct ipu_image_convert_image *image)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
const struct ipu_image_pixfmt *fmt = image->fmt;
unsigned int row, col, tile = 0;
- u32 w, h, bpp, stride;
+ u32 bpp, stride, offset;
u32 row_off, col_off;
/* setup some convenience vars */
@@ -534,34 +1007,183 @@ static void calc_tile_offsets_packed(struct ipu_image_convert_ctx *ctx,
bpp = fmt->bpp;
for (row = 0; row < image->num_rows; row++) {
- w = image->tile[tile].width;
- h = image->tile[tile].height;
- row_off = row * h * stride;
+ row_off = image->tile[tile].top * stride;
for (col = 0; col < image->num_cols; col++) {
- col_off = (col * w * bpp) >> 3;
+ col_off = (image->tile[tile].left * bpp) >> 3;
+
+ offset = row_off + col_off;
- image->tile[tile].offset = row_off + col_off;
+ image->tile[tile].offset = offset;
image->tile[tile].u_off = 0;
image->tile[tile++].v_off = 0;
- dev_dbg(priv->ipu->dev,
- "task %u: ctx %p: %s@[%d,%d]: phys %08x\n",
- chan->ic_task, ctx,
- image->type == IMAGE_CONVERT_IN ?
- "Input" : "Output", row, col,
- row_off + col_off);
+ if (offset & 0x7) {
+ dev_err(priv->ipu->dev,
+ "task %u: ctx %p: %s@[%d,%d]: "
+ "phys %08x\n",
+ chan->ic_task, ctx,
+ image->type == IMAGE_CONVERT_IN ?
+ "Input" : "Output", row, col,
+ row_off + col_off);
+ return -EINVAL;
+ }
}
}
+
+ return 0;
}
-static void calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
+static int calc_tile_offsets(struct ipu_image_convert_ctx *ctx,
struct ipu_image_convert_image *image)
{
if (image->fmt->planar)
- calc_tile_offsets_planar(ctx, image);
+ return calc_tile_offsets_planar(ctx, image);
+
+ return calc_tile_offsets_packed(ctx, image);
+}
+
+/*
+ * Calculate the resizing ratio for the IC main processing section given input
+ * size, fixed downsizing coefficient, and output size.
+ * Either round to closest for the next tile's first pixel to minimize seams
+ * and distortion (for all but right column / bottom row), or round down to
+ * avoid sampling beyond the edges of the input image for this tile's last
+ * pixel.
+ * Returns the resizing coefficient, resizing ratio is 8192.0 / resize_coeff.
+ */
+static u32 calc_resize_coeff(u32 input_size, u32 downsize_coeff,
+ u32 output_size, bool allow_overshoot)
+{
+ u32 downsized = input_size >> downsize_coeff;
+
+ if (allow_overshoot)
+ return DIV_ROUND_CLOSEST(8192 * downsized, output_size);
else
- calc_tile_offsets_packed(ctx, image);
+ return 8192 * (downsized - 1) / (output_size - 1);
+}
+
+/*
+ * Slightly modify resize coefficients per tile to hide the bilinear
+ * interpolator reset at tile borders, shifting the right / bottom edge
+ * by up to a half input pixel. This removes noticeable seams between
+ * tiles at higher upscaling factors.
+ */
+static void calc_tile_resize_coefficients(struct ipu_image_convert_ctx *ctx)
+{
+ struct ipu_image_convert_chan *chan = ctx->chan;
+ struct ipu_image_convert_priv *priv = chan->priv;
+ struct ipu_image_tile *in_tile, *out_tile;
+ unsigned int col, row, tile_idx;
+ unsigned int last_output;
+
+ for (col = 0; col < ctx->in.num_cols; col++) {
+ bool closest = (col < ctx->in.num_cols - 1) &&
+ !(ctx->rot_mode & IPU_ROT_BIT_HFLIP);
+ u32 resized_width;
+ u32 resize_coeff_h;
+
+ tile_idx = col;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ resized_width = out_tile->height;
+ else
+ resized_width = out_tile->width;
+
+ resize_coeff_h = calc_resize_coeff(in_tile->width,
+ ctx->downsize_coeff_h,
+ resized_width, closest);
+
+ dev_dbg(priv->ipu->dev, "%s: column %u hscale: *8192/%u\n",
+ __func__, col, resize_coeff_h);
+
+
+ for (row = 0; row < ctx->in.num_rows; row++) {
+ tile_idx = row * ctx->in.num_cols + col;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ /*
+ * With the horizontal scaling factor known, round up
+ * resized width (output width or height) to burst size.
+ */
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ out_tile->height = round_up(resized_width, 8);
+ else
+ out_tile->width = round_up(resized_width, 8);
+
+ /*
+ * Calculate input width from the last accessed input
+ * pixel given resized width and scaling coefficients.
+ * Round up to burst size.
+ */
+ last_output = round_up(resized_width, 8) - 1;
+ if (closest)
+ last_output++;
+ in_tile->width = round_up(
+ (DIV_ROUND_UP(last_output * resize_coeff_h,
+ 8192) + 1)
+ << ctx->downsize_coeff_h, 8);
+ }
+
+ ctx->resize_coeffs_h[col] = resize_coeff_h;
+ }
+
+ for (row = 0; row < ctx->in.num_rows; row++) {
+ bool closest = (row < ctx->in.num_rows - 1) &&
+ !(ctx->rot_mode & IPU_ROT_BIT_VFLIP);
+ u32 resized_height;
+ u32 resize_coeff_v;
+
+ tile_idx = row * ctx->in.num_cols;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ resized_height = out_tile->width;
+ else
+ resized_height = out_tile->height;
+
+ resize_coeff_v = calc_resize_coeff(in_tile->height,
+ ctx->downsize_coeff_v,
+ resized_height, closest);
+
+ dev_dbg(priv->ipu->dev, "%s: row %u vscale: *8192/%u\n",
+ __func__, row, resize_coeff_v);
+
+ for (col = 0; col < ctx->in.num_cols; col++) {
+ tile_idx = row * ctx->in.num_cols + col;
+ in_tile = &ctx->in.tile[tile_idx];
+ out_tile = &ctx->out.tile[ctx->out_tile_map[tile_idx]];
+
+ /*
+ * With the vertical scaling factor known, round up
+ * resized height (output width or height) to IDMAC
+ * limitations.
+ */
+ if (ipu_rot_mode_is_irt(ctx->rot_mode))
+ out_tile->width = round_up(resized_height, 2);
+ else
+ out_tile->height = round_up(resized_height, 2);
+
+ /*
+ * Calculate input width from the last accessed input
+ * pixel given resized height and scaling coefficients.
+ * Align to IDMAC restrictions.
+ */
+ last_output = round_up(resized_height, 2) - 1;
+ if (closest)
+ last_output++;
+ in_tile->height = round_up(
+ (DIV_ROUND_UP(last_output * resize_coeff_v,
+ 8192) + 1)
+ << ctx->downsize_coeff_v, 2);
+ }
+
+ ctx->resize_coeffs_v[row] = resize_coeff_v;
+ }
}
/*
@@ -611,7 +1233,8 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
struct ipuv3_channel *channel,
struct ipu_image_convert_image *image,
enum ipu_rotate_mode rot_mode,
- bool rot_swap_width_height)
+ bool rot_swap_width_height,
+ unsigned int tile)
{
struct ipu_image_convert_chan *chan = ctx->chan;
unsigned int burst_size;
@@ -621,23 +1244,23 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
unsigned int tile_idx[2];
if (image->type == IMAGE_CONVERT_OUT) {
- tile_idx[0] = ctx->out_tile_map[0];
+ tile_idx[0] = ctx->out_tile_map[tile];
tile_idx[1] = ctx->out_tile_map[1];
} else {
- tile_idx[0] = 0;
+ tile_idx[0] = tile;
tile_idx[1] = 1;
}
if (rot_swap_width_height) {
- width = image->tile[0].height;
- height = image->tile[0].width;
- stride = image->tile[0].rot_stride;
+ width = image->tile[tile_idx[0]].height;
+ height = image->tile[tile_idx[0]].width;
+ stride = image->tile[tile_idx[0]].rot_stride;
addr0 = ctx->rot_intermediate[0].phys;
if (ctx->double_buffering)
addr1 = ctx->rot_intermediate[1].phys;
} else {
- width = image->tile[0].width;
- height = image->tile[0].height;
+ width = image->tile[tile_idx[0]].width;
+ height = image->tile[tile_idx[0]].height;
stride = image->stride;
addr0 = image->base.phys0 +
image->tile[tile_idx[0]].offset;
@@ -655,12 +1278,12 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
tile_image.pix.pixelformat = image->fmt->fourcc;
tile_image.phys0 = addr0;
tile_image.phys1 = addr1;
- ipu_cpmem_set_image(channel, &tile_image);
+ if (image->fmt->planar && !rot_swap_width_height) {
+ tile_image.u_offset = image->tile[tile_idx[0]].u_off;
+ tile_image.v_offset = image->tile[tile_idx[0]].v_off;
+ }
- if (image->fmt->planar && !rot_swap_width_height)
- ipu_cpmem_set_uv_offset(channel,
- image->tile[tile_idx[0]].u_off,
- image->tile[tile_idx[0]].v_off);
+ ipu_cpmem_set_image(channel, &tile_image);
if (rot_mode)
ipu_cpmem_set_rotation(channel, rot_mode);
@@ -687,7 +1310,7 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
}
-static int convert_start(struct ipu_image_convert_run *run)
+static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
{
struct ipu_image_convert_ctx *ctx = run->ctx;
struct ipu_image_convert_chan *chan = ctx->chan;
@@ -695,31 +1318,47 @@ static int convert_start(struct ipu_image_convert_run *run)
struct ipu_image_convert_image *s_image = &ctx->in;
struct ipu_image_convert_image *d_image = &ctx->out;
enum ipu_color_space src_cs, dest_cs;
+ unsigned int dst_tile = ctx->out_tile_map[tile];
unsigned int dest_width, dest_height;
+ unsigned int col, row;
+ u32 rsc;
int ret;
- dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p\n",
- __func__, chan->ic_task, ctx, run);
+ dev_dbg(priv->ipu->dev, "%s: task %u: starting ctx %p run %p tile %u -> %u\n",
+ __func__, chan->ic_task, ctx, run, tile, dst_tile);
src_cs = ipu_pixelformat_to_colorspace(s_image->fmt->fourcc);
dest_cs = ipu_pixelformat_to_colorspace(d_image->fmt->fourcc);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* swap width/height for resizer */
- dest_width = d_image->tile[0].height;
- dest_height = d_image->tile[0].width;
+ dest_width = d_image->tile[dst_tile].height;
+ dest_height = d_image->tile[dst_tile].width;
} else {
- dest_width = d_image->tile[0].width;
- dest_height = d_image->tile[0].height;
+ dest_width = d_image->tile[dst_tile].width;
+ dest_height = d_image->tile[dst_tile].height;
}
+ row = tile / s_image->num_cols;
+ col = tile % s_image->num_cols;
+
+ rsc = (ctx->downsize_coeff_v << 30) |
+ (ctx->resize_coeffs_v[row] << 16) |
+ (ctx->downsize_coeff_h << 14) |
+ (ctx->resize_coeffs_h[col]);
+
+ dev_dbg(priv->ipu->dev, "%s: %ux%u -> %ux%u (rsc = 0x%x)\n",
+ __func__, s_image->tile[tile].width,
+ s_image->tile[tile].height, dest_width, dest_height, rsc);
+
/* setup the IC resizer and CSC */
- ret = ipu_ic_task_init(chan->ic,
- s_image->tile[0].width,
- s_image->tile[0].height,
+ ret = ipu_ic_task_init_rsc(chan->ic,
+ s_image->tile[tile].width,
+ s_image->tile[tile].height,
dest_width,
dest_height,
- src_cs, dest_cs);
+ src_cs, dest_cs,
+ rsc);
if (ret) {
dev_err(priv->ipu->dev, "ipu_ic_task_init failed, %d\n", ret);
return ret;
@@ -727,27 +1366,27 @@ static int convert_start(struct ipu_image_convert_run *run)
/* init the source MEM-->IC PP IDMAC channel */
init_idmac_channel(ctx, chan->in_chan, s_image,
- IPU_ROTATE_NONE, false);
+ IPU_ROTATE_NONE, false, tile);
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* init the IC PP-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->out_chan, d_image,
- IPU_ROTATE_NONE, true);
+ IPU_ROTATE_NONE, true, tile);
/* init the MEM-->IC PP ROT IDMAC channel */
init_idmac_channel(ctx, chan->rotation_in_chan, d_image,
- ctx->rot_mode, true);
+ ctx->rot_mode, true, tile);
/* init the destination IC PP ROT-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->rotation_out_chan, d_image,
- IPU_ROTATE_NONE, false);
+ IPU_ROTATE_NONE, false, tile);
/* now link IC PP-->MEM to MEM-->IC PP ROT */
ipu_idmac_link(chan->out_chan, chan->rotation_in_chan);
} else {
/* init the destination IC PP-->MEM IDMAC channel */
init_idmac_channel(ctx, chan->out_chan, d_image,
- ctx->rot_mode, false);
+ ctx->rot_mode, false, tile);
}
/* enable the IC */
@@ -805,7 +1444,7 @@ static int do_run(struct ipu_image_convert_run *run)
list_del(&run->list);
chan->current_run = run;
- return convert_start(run);
+ return convert_start(run, 0);
}
/* hold irqlock when calling */
@@ -896,7 +1535,7 @@ static irqreturn_t do_bh(int irq, void *dev_id)
dev_dbg(priv->ipu->dev,
"%s: task %u: signaling abort for ctx %p\n",
__func__, chan->ic_task, ctx);
- complete(&ctx->aborted);
+ complete_all(&ctx->aborted);
}
}
@@ -908,6 +1547,24 @@ static irqreturn_t do_bh(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
+{
+ unsigned int cur_tile = ctx->next_tile - 1;
+ unsigned int next_tile = ctx->next_tile;
+
+ if (ctx->resize_coeffs_h[cur_tile % ctx->in.num_cols] !=
+ ctx->resize_coeffs_h[next_tile % ctx->in.num_cols] ||
+ ctx->resize_coeffs_v[cur_tile / ctx->in.num_cols] !=
+ ctx->resize_coeffs_v[next_tile / ctx->in.num_cols] ||
+ ctx->in.tile[cur_tile].width != ctx->in.tile[next_tile].width ||
+ ctx->in.tile[cur_tile].height != ctx->in.tile[next_tile].height ||
+ ctx->out.tile[cur_tile].width != ctx->out.tile[next_tile].width ||
+ ctx->out.tile[cur_tile].height != ctx->out.tile[next_tile].height)
+ return true;
+
+ return false;
+}
+
/* hold irqlock when calling */
static irqreturn_t do_irq(struct ipu_image_convert_run *run)
{
@@ -951,27 +1608,32 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
* not done, place the next tile buffers.
*/
if (!ctx->double_buffering) {
-
- src_tile = &s_image->tile[ctx->next_tile];
- dst_idx = ctx->out_tile_map[ctx->next_tile];
- dst_tile = &d_image->tile[dst_idx];
-
- ipu_cpmem_set_buffer(chan->in_chan, 0,
- s_image->base.phys0 + src_tile->offset);
- ipu_cpmem_set_buffer(outch, 0,
- d_image->base.phys0 + dst_tile->offset);
- if (s_image->fmt->planar)
- ipu_cpmem_set_uv_offset(chan->in_chan,
- src_tile->u_off,
- src_tile->v_off);
- if (d_image->fmt->planar)
- ipu_cpmem_set_uv_offset(outch,
- dst_tile->u_off,
- dst_tile->v_off);
-
- ipu_idmac_select_buffer(chan->in_chan, 0);
- ipu_idmac_select_buffer(outch, 0);
-
+ if (ic_settings_changed(ctx)) {
+ convert_stop(run);
+ convert_start(run, ctx->next_tile);
+ } else {
+ src_tile = &s_image->tile[ctx->next_tile];
+ dst_idx = ctx->out_tile_map[ctx->next_tile];
+ dst_tile = &d_image->tile[dst_idx];
+
+ ipu_cpmem_set_buffer(chan->in_chan, 0,
+ s_image->base.phys0 +
+ src_tile->offset);
+ ipu_cpmem_set_buffer(outch, 0,
+ d_image->base.phys0 +
+ dst_tile->offset);
+ if (s_image->fmt->planar)
+ ipu_cpmem_set_uv_offset(chan->in_chan,
+ src_tile->u_off,
+ src_tile->v_off);
+ if (d_image->fmt->planar)
+ ipu_cpmem_set_uv_offset(outch,
+ dst_tile->u_off,
+ dst_tile->v_off);
+
+ ipu_idmac_select_buffer(chan->in_chan, 0);
+ ipu_idmac_select_buffer(outch, 0);
+ }
} else if (ctx->next_tile < ctx->num_tiles - 1) {
src_tile = &s_image->tile[ctx->next_tile + 1];
@@ -1198,9 +1860,6 @@ static int fill_image(struct ipu_image_convert_ctx *ctx,
else
ic_image->stride = ic_image->base.pix.bytesperline;
- calc_tile_dimensions(ctx, ic_image);
- calc_tile_offsets(ctx, ic_image);
-
return 0;
}
@@ -1221,40 +1880,11 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
return x;
}
-/*
- * We have to adjust the tile width such that the tile physaddrs and
- * U and V plane offsets are multiples of 8 bytes as required by
- * the IPU DMA Controller. For the planar formats, this corresponds
- * to a pixel alignment of 16 (but use a more formal equation since
- * the variables are available). For all the packed formats, 8 is
- * good enough.
- */
-static inline u32 tile_width_align(const struct ipu_image_pixfmt *fmt)
-{
- return fmt->planar ? 8 * fmt->uv_width_dec : 8;
-}
-
-/*
- * For tile height alignment, we have to ensure that the output tile
- * heights are multiples of 8 lines if the IRT is required by the
- * given rotation mode (the IRT performs rotations on 8x8 blocks
- * at a time). If the IRT is not used, or for input image tiles,
- * 2 lines are good enough.
- */
-static inline u32 tile_height_align(enum ipu_image_convert_type type,
- enum ipu_rotate_mode rot_mode)
-{
- return (type == IMAGE_CONVERT_OUT &&
- ipu_rot_mode_is_irt(rot_mode)) ? 8 : 2;
-}
-
/* Adjusts input/output images to IPU restrictions */
void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
enum ipu_rotate_mode rot_mode)
{
const struct ipu_image_pixfmt *infmt, *outfmt;
- unsigned int num_in_rows, num_in_cols;
- unsigned int num_out_rows, num_out_cols;
u32 w_align, h_align;
infmt = get_format(in->pix.pixelformat);
@@ -1286,36 +1916,31 @@ void ipu_image_convert_adjust(struct ipu_image *in, struct ipu_image *out,
in->pix.height / 4);
}
- /* get tiling rows/cols from output format */
- num_out_rows = num_stripes(out->pix.height);
- num_out_cols = num_stripes(out->pix.width);
- if (ipu_rot_mode_is_irt(rot_mode)) {
- num_in_rows = num_out_cols;
- num_in_cols = num_out_rows;
- } else {
- num_in_rows = num_out_rows;
- num_in_cols = num_out_cols;
- }
-
/* align input width/height */
- w_align = ilog2(tile_width_align(infmt) * num_in_cols);
- h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, rot_mode) *
- num_in_rows);
+ w_align = ilog2(tile_width_align(IMAGE_CONVERT_IN, infmt, rot_mode));
+ h_align = ilog2(tile_height_align(IMAGE_CONVERT_IN, infmt, rot_mode));
in->pix.width = clamp_align(in->pix.width, MIN_W, MAX_W, w_align);
in->pix.height = clamp_align(in->pix.height, MIN_H, MAX_H, h_align);
/* align output width/height */
- w_align = ilog2(tile_width_align(outfmt) * num_out_cols);
- h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, rot_mode) *
- num_out_rows);
+ w_align = ilog2(tile_width_align(IMAGE_CONVERT_OUT, outfmt, rot_mode));
+ h_align = ilog2(tile_height_align(IMAGE_CONVERT_OUT, outfmt, rot_mode));
out->pix.width = clamp_align(out->pix.width, MIN_W, MAX_W, w_align);
out->pix.height = clamp_align(out->pix.height, MIN_H, MAX_H, h_align);
/* set input/output strides and image sizes */
- in->pix.bytesperline = (in->pix.width * infmt->bpp) >> 3;
- in->pix.sizeimage = in->pix.height * in->pix.bytesperline;
- out->pix.bytesperline = (out->pix.width * outfmt->bpp) >> 3;
- out->pix.sizeimage = out->pix.height * out->pix.bytesperline;
+ in->pix.bytesperline = infmt->planar ?
+ clamp_align(in->pix.width, 2 << w_align, MAX_W, w_align) :
+ clamp_align((in->pix.width * infmt->bpp) >> 3,
+ 2 << w_align, MAX_W, w_align);
+ in->pix.sizeimage = infmt->planar ?
+ (in->pix.height * in->pix.bytesperline * infmt->bpp) >> 3 :
+ in->pix.height * in->pix.bytesperline;
+ out->pix.bytesperline = outfmt->planar ? out->pix.width :
+ (out->pix.width * outfmt->bpp) >> 3;
+ out->pix.sizeimage = outfmt->planar ?
+ (out->pix.height * out->pix.bytesperline * outfmt->bpp) >> 3 :
+ out->pix.height * out->pix.bytesperline;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_adjust);
@@ -1360,6 +1985,7 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
struct ipu_image_convert_chan *chan;
struct ipu_image_convert_ctx *ctx;
unsigned long flags;
+ unsigned int i;
bool get_res;
int ret;
@@ -1412,8 +2038,26 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
if (ret)
goto out_free;
+ ret = calc_image_resize_coefficients(ctx, in, out);
+ if (ret)
+ goto out_free;
+
calc_out_tile_map(ctx);
+ find_seams(ctx, s_image, d_image);
+
+ calc_tile_dimensions(ctx, s_image);
+ ret = calc_tile_offsets(ctx, s_image);
+ if (ret)
+ goto out_free;
+
+ calc_tile_dimensions(ctx, d_image);
+ ret = calc_tile_offsets(ctx, d_image);
+ if (ret)
+ goto out_free;
+
+ calc_tile_resize_coefficients(ctx);
+
dump_format(ctx, s_image);
dump_format(ctx, d_image);
@@ -1429,21 +2073,51 @@ ipu_image_convert_prepare(struct ipu_soc *ipu, enum ipu_ic_task ic_task,
* for every tile, and therefore would have to be updated for
* each buffer which is not possible. So double-buffering is
* impossible when either the source or destination images are
- * a planar format (YUV420, YUV422P, etc.).
+ * a planar format (YUV420, YUV422P, etc.). Further, differently
+ * sized tiles or different resizing coefficients per tile
+ * prevent double-buffering as well.
*/
ctx->double_buffering = (ctx->num_tiles > 1 &&
!s_image->fmt->planar &&
!d_image->fmt->planar);
+ for (i = 1; i < ctx->num_tiles; i++) {
+ if (ctx->in.tile[i].width != ctx->in.tile[0].width ||
+ ctx->in.tile[i].height != ctx->in.tile[0].height ||
+ ctx->out.tile[i].width != ctx->out.tile[0].width ||
+ ctx->out.tile[i].height != ctx->out.tile[0].height) {
+ ctx->double_buffering = false;
+ break;
+ }
+ }
+ for (i = 1; i < ctx->in.num_cols; i++) {
+ if (ctx->resize_coeffs_h[i] != ctx->resize_coeffs_h[0]) {
+ ctx->double_buffering = false;
+ break;
+ }
+ }
+ for (i = 1; i < ctx->in.num_rows; i++) {
+ if (ctx->resize_coeffs_v[i] != ctx->resize_coeffs_v[0]) {
+ ctx->double_buffering = false;
+ break;
+ }
+ }
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
+ unsigned long intermediate_size = d_image->tile[0].size;
+
+ for (i = 1; i < ctx->num_tiles; i++) {
+ if (d_image->tile[i].size > intermediate_size)
+ intermediate_size = d_image->tile[i].size;
+ }
+
ret = alloc_dma_buf(priv, &ctx->rot_intermediate[0],
- d_image->tile[0].size);
+ intermediate_size);
if (ret)
goto out_free;
if (ctx->double_buffering) {
ret = alloc_dma_buf(priv,
&ctx->rot_intermediate[1],
- d_image->tile[0].size);
+ intermediate_size);
if (ret)
goto out_free_dmabuf0;
}
@@ -1524,16 +2198,13 @@ unlock:
EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
/* Abort any active or pending conversions for this context */
-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
struct ipu_image_convert_run *run, *active_run, *tmp;
unsigned long flags;
int run_count, ret;
- bool need_abort;
-
- reinit_completion(&ctx->aborted);
spin_lock_irqsave(&chan->irqlock, flags);
@@ -1549,22 +2220,28 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
active_run = (chan->current_run && chan->current_run->ctx == ctx) ?
chan->current_run : NULL;
- need_abort = (run_count || active_run);
+ if (active_run)
+ reinit_completion(&ctx->aborted);
- ctx->aborting = need_abort;
+ ctx->aborting = true;
spin_unlock_irqrestore(&chan->irqlock, flags);
- if (!need_abort) {
+ if (!run_count && !active_run) {
dev_dbg(priv->ipu->dev,
"%s: task %u: no abort needed for ctx %p\n",
__func__, chan->ic_task, ctx);
return;
}
+ if (!active_run) {
+ empty_done_q(chan);
+ return;
+ }
+
dev_dbg(priv->ipu->dev,
- "%s: task %u: wait for completion: %d runs, active run %p\n",
- __func__, chan->ic_task, run_count, active_run);
+ "%s: task %u: wait for completion: %d runs\n",
+ __func__, chan->ic_task, run_count);
ret = wait_for_completion_timeout(&ctx->aborted,
msecs_to_jiffies(10000));
@@ -1572,7 +2249,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
force_abort(ctx);
}
+}
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+ __ipu_image_convert_abort(ctx);
ctx->aborting = false;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
@@ -1586,7 +2267,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
bool put_res;
/* make sure no runs are hanging around */
- ipu_image_convert_abort(ctx);
+ __ipu_image_convert_abort(ctx);
dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
chan->ic_task, ctx);
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c61b04555779..dc8e039bfab5 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -676,7 +676,7 @@ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
vga_arbiter_check_bridge_sharing(vgadev);
/* Add to the list */
- list_add(&vgadev->list, &vga_list);
+ list_add_tail(&vgadev->list, &vga_list);
vga_count++;
vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n",
vga_iostate_to_str(vgadev->decodes),
@@ -1408,6 +1408,18 @@ static void __init vga_arb_select_default_device(void)
struct vga_device *vgadev;
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ u64 base = screen_info.lfb_base;
+ u64 size = screen_info.lfb_size;
+ u64 limit;
+ resource_size_t start, end;
+ unsigned long flags;
+ int i;
+
+ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)screen_info.ext_lfb_base << 32;
+
+ limit = base + size;
+
list_for_each_entry(vgadev, &vga_list, list) {
struct device *dev = &vgadev->pdev->dev;
/*
@@ -1418,11 +1430,6 @@ static void __init vga_arb_select_default_device(void)
* Select the device owning the boot framebuffer if there is
* one.
*/
- resource_size_t start, end, limit;
- unsigned long flags;
- int i;
-
- limit = screen_info.lfb_base + screen_info.lfb_size;
/* Does firmware framebuffer belong to us? */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -1437,7 +1444,7 @@ static void __init vga_arb_select_default_device(void)
if (!start || !end)
continue;
- if (screen_info.lfb_base < start || limit >= end)
+ if (base < start || limit >= end)
continue;
if (!vga_default_device())
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index b372854cf38d..704049e62d58 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
input_dev->input_buf, len, 1);
- pm_wakeup_event(&input_dev->device->device, 0);
+ pm_wakeup_hard_event(&input_dev->device->device);
break;
default:
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ed35c9a9a110..27519eb8ee63 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -17,6 +17,9 @@
#ifndef HID_IDS_H_FILE
#define HID_IDS_H_FILE
+#define USB_VENDOR_ID_258A 0x258a
+#define USB_DEVICE_ID_258A_6A88 0x6a88
+
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
#define USB_DEVICE_ID_3M2256 0x0502
@@ -941,6 +944,10 @@
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
+#define USB_VENDOR_ID_RETROUSB 0xf000
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPAD 0x0003
+#define USB_DEVICE_ID_RETROUSB_SNES_RETROPORT 0x00f1
+
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index 1882a4ab0f29..98b059d79bc8 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -42,6 +42,7 @@ static int ite_event(struct hid_device *hdev, struct hid_field *field,
static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index c85a79986b6a..94088c0ed68a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -137,6 +137,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index e8a114157f87..bb012bc032e0 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
sensor_inst->hsdev,
sensor_inst->hsdev->usage,
usage, report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC, false);
} else if (!strncmp(name, "units", strlen("units")))
value = sensor_inst->fields[field_index].attribute.units;
else if (!strncmp(name, "unit-expo", strlen("unit-expo")))
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 2b63487057c2..4256fdc5cd6d 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature);
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag)
+ enum sensor_hub_read_flags flag,
+ bool is_signed)
{
struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
unsigned long flags;
@@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
&hsdev->pending.ready, HZ*5);
switch (hsdev->pending.raw_size) {
case 1:
- ret_val = *(u8 *)hsdev->pending.raw_data;
+ if (is_signed)
+ ret_val = *(s8 *)hsdev->pending.raw_data;
+ else
+ ret_val = *(u8 *)hsdev->pending.raw_data;
break;
case 2:
- ret_val = *(u16 *)hsdev->pending.raw_data;
+ if (is_signed)
+ ret_val = *(s16 *)hsdev->pending.raw_data;
+ else
+ ret_val = *(u16 *)hsdev->pending.raw_data;
break;
case 4:
ret_val = *(u32 *)hsdev->pending.raw_data;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 97954f575c3f..1c1a2514d6f3 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -4,7 +4,7 @@ menu "Microsoft Hyper-V guest support"
config HYPERV
tristate "Microsoft Hyper-V client drivers"
- depends on X86 && ACPI && PCI && X86_LOCAL_APIC && HYPERVISOR_GUEST
+ depends on X86 && ACPI && X86_LOCAL_APIC && HYPERVISOR_GUEST
select PARAVIRT
help
Select this option to run Linux as a Hyper-V client operating
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index de8193f3b838..fe00b12e4417 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -516,6 +516,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
}
wait_for_completion(&msginfo->waitevent);
+ if (msginfo->response.gpadl_created.creation_status != 0) {
+ pr_err("Failed to establish GPADL: err = 0x%x\n",
+ msginfo->response.gpadl_created.creation_status);
+
+ ret = -EDQUOT;
+ goto cleanup;
+ }
+
if (channel->rescind) {
ret = -ENODEV;
goto cleanup;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6277597d3d58..edd34c167a9b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -435,61 +435,16 @@ void vmbus_free_channels(void)
}
}
-/*
- * vmbus_process_offer - Process the offer by creating a channel/device
- * associated with this offer
- */
-static void vmbus_process_offer(struct vmbus_channel *newchannel)
+/* Note: the function can run concurrently for primary/sub channels. */
+static void vmbus_add_channel_work(struct work_struct *work)
{
- struct vmbus_channel *channel;
- bool fnew = true;
+ struct vmbus_channel *newchannel =
+ container_of(work, struct vmbus_channel, add_channel_work);
+ struct vmbus_channel *primary_channel = newchannel->primary_channel;
unsigned long flags;
u16 dev_type;
int ret;
- /* Make sure this is a new offer */
- mutex_lock(&vmbus_connection.channel_mutex);
-
- /*
- * Now that we have acquired the channel_mutex,
- * we can release the potentially racing rescind thread.
- */
- atomic_dec(&vmbus_connection.offer_in_progress);
-
- list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
- if (!uuid_le_cmp(channel->offermsg.offer.if_type,
- newchannel->offermsg.offer.if_type) &&
- !uuid_le_cmp(channel->offermsg.offer.if_instance,
- newchannel->offermsg.offer.if_instance)) {
- fnew = false;
- break;
- }
- }
-
- if (fnew)
- list_add_tail(&newchannel->listentry,
- &vmbus_connection.chn_list);
-
- mutex_unlock(&vmbus_connection.channel_mutex);
-
- if (!fnew) {
- /*
- * Check to see if this is a sub-channel.
- */
- if (newchannel->offermsg.offer.sub_channel_index != 0) {
- /*
- * Process the sub-channel.
- */
- newchannel->primary_channel = channel;
- spin_lock_irqsave(&channel->lock, flags);
- list_add_tail(&newchannel->sc_list, &channel->sc_list);
- channel->num_sc++;
- spin_unlock_irqrestore(&channel->lock, flags);
- } else {
- goto err_free_chan;
- }
- }
-
dev_type = hv_get_dev_type(newchannel);
init_vp_index(newchannel, dev_type);
@@ -507,27 +462,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
/*
* This state is used to indicate a successful open
* so that when we do close the channel normally, we
- * can cleanup properly
+ * can cleanup properly.
*/
newchannel->state = CHANNEL_OPEN_STATE;
- if (!fnew) {
- struct hv_device *dev
- = newchannel->primary_channel->device_obj;
+ if (primary_channel != NULL) {
+ /* newchannel is a sub-channel. */
+ struct hv_device *dev = primary_channel->device_obj;
if (vmbus_add_channel_kobj(dev, newchannel))
- goto err_free_chan;
+ goto err_deq_chan;
+
+ if (primary_channel->sc_creation_callback != NULL)
+ primary_channel->sc_creation_callback(newchannel);
- if (channel->sc_creation_callback != NULL)
- channel->sc_creation_callback(newchannel);
newchannel->probe_done = true;
return;
}
/*
- * Start the process of binding this offer to the driver
- * We need to set the DeviceObject field before calling
- * vmbus_child_dev_add()
+ * Start the process of binding the primary channel to the driver
*/
newchannel->device_obj = vmbus_device_create(
&newchannel->offermsg.offer.if_type,
@@ -556,13 +510,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
err_deq_chan:
mutex_lock(&vmbus_connection.channel_mutex);
- list_del(&newchannel->listentry);
+
+ /*
+ * We need to set the flag, otherwise
+ * vmbus_onoffer_rescind() can be blocked.
+ */
+ newchannel->probe_done = true;
+
+ if (primary_channel == NULL) {
+ list_del(&newchannel->listentry);
+ } else {
+ spin_lock_irqsave(&primary_channel->lock, flags);
+ list_del(&newchannel->sc_list);
+ spin_unlock_irqrestore(&primary_channel->lock, flags);
+ }
+
mutex_unlock(&vmbus_connection.channel_mutex);
if (newchannel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(newchannel->target_cpu,
- percpu_channel_deq, newchannel, true);
+ percpu_channel_deq,
+ newchannel, true);
} else {
percpu_channel_deq(newchannel);
put_cpu();
@@ -570,14 +539,104 @@ err_deq_chan:
vmbus_release_relid(newchannel->offermsg.child_relid);
-err_free_chan:
free_channel(newchannel);
}
/*
+ * vmbus_process_offer - Process the offer by creating a channel/device
+ * associated with this offer
+ */
+static void vmbus_process_offer(struct vmbus_channel *newchannel)
+{
+ struct vmbus_channel *channel;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+ bool fnew = true;
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+
+ /*
+ * Now that we have acquired the channel_mutex,
+ * we can release the potentially racing rescind thread.
+ */
+ atomic_dec(&vmbus_connection.offer_in_progress);
+
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ if (!uuid_le_cmp(channel->offermsg.offer.if_type,
+ newchannel->offermsg.offer.if_type) &&
+ !uuid_le_cmp(channel->offermsg.offer.if_instance,
+ newchannel->offermsg.offer.if_instance)) {
+ fnew = false;
+ break;
+ }
+ }
+
+ if (fnew)
+ list_add_tail(&newchannel->listentry,
+ &vmbus_connection.chn_list);
+ else {
+ /*
+ * Check to see if this is a valid sub-channel.
+ */
+ if (newchannel->offermsg.offer.sub_channel_index == 0) {
+ mutex_unlock(&vmbus_connection.channel_mutex);
+ /*
+ * Don't call free_channel(), because newchannel->kobj
+ * is not initialized yet.
+ */
+ kfree(newchannel);
+ WARN_ON_ONCE(1);
+ return;
+ }
+ /*
+ * Process the sub-channel.
+ */
+ newchannel->primary_channel = channel;
+ spin_lock_irqsave(&channel->lock, flags);
+ list_add_tail(&newchannel->sc_list, &channel->sc_list);
+ spin_unlock_irqrestore(&channel->lock, flags);
+ }
+
+ mutex_unlock(&vmbus_connection.channel_mutex);
+
+ /*
+ * vmbus_process_offer() mustn't call channel->sc_creation_callback()
+ * directly for sub-channels, because sc_creation_callback() ->
+ * vmbus_open() may never get the host's response to the
+ * OPEN_CHANNEL message (the host may rescind a channel at any time,
+ * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
+ * may not wake up the vmbus_open() as it's blocked due to a non-zero
+ * vmbus_connection.offer_in_progress, and finally we have a deadlock.
+ *
+ * The above is also true for primary channels, if the related device
+ * drivers use sync probing mode by default.
+ *
+ * And, usually the handling of primary channels and sub-channels can
+ * depend on each other, so we should offload them to different
+ * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
+ * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
+ * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
+ * and waits for all the sub-channels to appear, but the latter
+ * can't get the rtnl_lock and this blocks the handling of
+ * sub-channels.
+ */
+ INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
+ wq = fnew ? vmbus_connection.handle_primary_chan_wq :
+ vmbus_connection.handle_sub_chan_wq;
+ queue_work(wq, &newchannel->add_channel_work);
+}
+
+/*
* We use this state to statically distribute the channel interrupt load.
*/
static int next_numa_node_id;
+/*
+ * init_vp_index() accesses global variables like next_numa_node_id, and
+ * it can run concurrently for primary channels and sub-channels: see
+ * vmbus_process_offer(), so we need the lock to protect the global
+ * variables.
+ */
+static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
/*
* Starting with Win8, we can statically distribute the incoming
@@ -613,6 +672,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
return;
}
+ spin_lock(&bind_channel_to_cpu_lock);
+
/*
* Based on the channel affinity policy, we will assign the NUMA
* nodes.
@@ -695,6 +756,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
channel->target_cpu = cur_cpu;
channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
+ spin_unlock(&bind_channel_to_cpu_lock);
+
free_cpumask_var(available_mask);
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index f4d08c8ac7f8..4fe117b761ce 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -190,6 +190,20 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.handle_primary_chan_wq =
+ create_workqueue("hv_pri_chan");
+ if (!vmbus_connection.handle_primary_chan_wq) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.handle_sub_chan_wq =
+ create_workqueue("hv_sub_chan");
+ if (!vmbus_connection.handle_sub_chan_wq) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
spin_lock_init(&vmbus_connection.channelmsg_lock);
@@ -280,10 +294,14 @@ void vmbus_disconnect(void)
*/
vmbus_initiate_unload(false);
- if (vmbus_connection.work_queue) {
- drain_workqueue(vmbus_connection.work_queue);
+ if (vmbus_connection.handle_sub_chan_wq)
+ destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
+
+ if (vmbus_connection.handle_primary_chan_wq)
+ destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+
+ if (vmbus_connection.work_queue)
destroy_workqueue(vmbus_connection.work_queue);
- }
if (vmbus_connection.int_page) {
free_pages((unsigned long)vmbus_connection.int_page, 0);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 332d7c34be5c..11273cd384d6 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -143,7 +143,7 @@ static int hv_ce_shutdown(struct clock_event_device *evt)
static int hv_ce_set_oneshot(struct clock_event_device *evt)
{
- union hv_timer_config timer_cfg;
+ union hv_stimer_config timer_cfg;
timer_cfg.as_uint64 = 0;
timer_cfg.enable = 1;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 72eaba3d50fc..ea201034b248 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -44,74 +44,6 @@
*/
#define HV_UTIL_NEGO_TIMEOUT 55
-/* Define synthetic interrupt controller flag constants. */
-#define HV_EVENT_FLAGS_COUNT (256 * 8)
-#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
-
-/*
- * Timer configuration register.
- */
-union hv_timer_config {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 periodic:1;
- u64 lazy:1;
- u64 auto_enable:1;
- u64 apic_vector:8;
- u64 direct_mode:1;
- u64 reserved_z0:3;
- u64 sintx:4;
- u64 reserved_z1:44;
- };
-};
-
-
-/* Define the synthetic interrupt controller event flags format. */
-union hv_synic_event_flags {
- unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
-};
-
-/* Define SynIC control register. */
-union hv_synic_scontrol {
- u64 as_uint64;
- struct {
- u64 enable:1;
- u64 reserved:63;
- };
-};
-
-/* Define synthetic interrupt source. */
-union hv_synic_sint {
- u64 as_uint64;
- struct {
- u64 vector:8;
- u64 reserved1:8;
- u64 masked:1;
- u64 auto_eoi:1;
- u64 reserved2:46;
- };
-};
-
-/* Define the format of the SIMP register */
-union hv_synic_simp {
- u64 as_uint64;
- struct {
- u64 simp_enabled:1;
- u64 preserved:11;
- u64 base_simp_gpa:52;
- };
-};
-
-/* Define the format of the SIEFP register */
-union hv_synic_siefp {
- u64 as_uint64;
- struct {
- u64 siefp_enabled:1;
- u64 preserved:11;
- u64 base_siefp_gpa:52;
- };
-};
/* Definitions for the monitored notification facility */
union hv_monitor_trigger_group {
@@ -335,7 +267,14 @@ struct vmbus_connection {
struct list_head chn_list;
struct mutex channel_mutex;
+ /*
+ * An offer message is handled first on the work_queue, and then
+ * is further handled on handle_primary_chan_wq or
+ * handle_sub_chan_wq.
+ */
struct workqueue_struct *work_queue;
+ struct workqueue_struct *handle_primary_chan_wq;
+ struct workqueue_struct *handle_sub_chan_wq;
};
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 283d184280af..d0ff65675292 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
}
@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_read_index);
}
@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_write_index);
}
@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
}
@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
}
@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
}
@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_read_index);
}
@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_write_index);
}
@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
}
@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 2cef0c37ff6f..9790f1f5eb98 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/pci_ids.h>
#include <asm/amd_nb.h>
#include <asm/processor.h>
@@ -41,14 +42,6 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
#endif
-#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
-#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
-#endif
-
-#ifndef PCI_DEVICE_ID_AMD_17H_M10H_DF_F3
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
-#endif
-
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -367,6 +360,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 8e60048a33f8..51d34959709b 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -74,8 +74,7 @@
MST_STATUS_ND)
#define MST_STATUS_ERR (MST_STATUS_NAK | \
MST_STATUS_AL | \
- MST_STATUS_IP | \
- MST_STATUS_TSS)
+ MST_STATUS_IP)
#define MST_TX_BYTES_XFRD 0x50
#define MST_RX_BYTES_XFRD 0x54
#define SCL_HIGH_PERIOD 0x80
@@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
*/
if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
idev->msg_err = -EPROTO;
- i2c_int_disable(idev, ~0);
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
break;
}
@@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
if (status & MST_STATUS_SCC) {
/* Stop completed */
- i2c_int_disable(idev, ~0);
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
} else if (status & MST_STATUS_SNS) {
/* Transfer done */
- i2c_int_disable(idev, ~0);
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
axxia_i2c_empty_rx_fifo(idev);
complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_TSS) {
+ /* Transfer timeout */
+ idev->msg_err = -ETIMEDOUT;
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
} else if (unlikely(status & MST_STATUS_ERR)) {
/* Transfer error */
i2c_int_disable(idev, ~0);
@@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
u32 rx_xfer, tx_xfer;
u32 addr_1, addr_2;
unsigned long time_left;
+ unsigned int wt_value;
idev->msg = msg;
idev->msg_xfrd = 0;
- idev->msg_err = 0;
reinit_completion(&idev->msg_complete);
if (i2c_m_ten(msg)) {
@@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
else if (axxia_i2c_fill_tx_fifo(idev) != 0)
int_mask |= MST_STATUS_TFL;
+ wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
+ /* Disable wait timer temporarly */
+ writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
+ /* Check if timeout error happened */
+ if (idev->msg_err)
+ goto out;
+
/* Start manual mode */
writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
+
i2c_int_enable(idev, int_mask);
time_left = wait_for_completion_timeout(&idev->msg_complete,
@@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
dev_warn(idev->dev, "busy after xfer\n");
- if (time_left == 0)
+ if (time_left == 0) {
idev->msg_err = -ETIMEDOUT;
-
- if (idev->msg_err == -ETIMEDOUT)
i2c_recover_bus(&idev->adapter);
+ axxia_i2c_init(idev);
+ }
- if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
+out:
+ if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
+ idev->msg_err != -ETIMEDOUT)
axxia_i2c_init(idev);
return idev->msg_err;
@@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
+ u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
unsigned long time_left;
reinit_completion(&idev->msg_complete);
@@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int i;
int ret = 0;
+ idev->msg_err = 0;
+ i2c_int_enable(idev, MST_STATUS_TSS);
+
for (i = 0; ret == 0 && i < num; ++i)
ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 8822357bca0c..4e67d5ed480e 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -89,7 +89,7 @@ static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
if (time_is_before_jiffies(target)) {
dev_err(i2cd->dev, "i2c timeout error %x\n", val);
- return -ETIME;
+ return -ETIMEDOUT;
}
val = readl(i2cd->regs + I2C_MST_CNTL);
@@ -97,9 +97,9 @@ static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
case I2C_MST_CNTL_STATUS_OKAY:
return 0;
case I2C_MST_CNTL_STATUS_NO_ACK:
- return -EIO;
+ return -ENXIO;
case I2C_MST_CNTL_STATUS_TIMEOUT:
- return -ETIME;
+ return -ETIMEDOUT;
default:
return 0;
}
@@ -218,6 +218,7 @@ stop:
static const struct i2c_adapter_quirks gpu_i2c_quirks = {
.max_read_len = 4,
+ .max_comb_2nd_msg_len = 4,
.flags = I2C_AQ_COMB_WRITE_THEN_READ,
};
@@ -341,7 +342,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
-static int gpu_i2c_resume(struct device *dev)
+static __maybe_unused int gpu_i2c_resume(struct device *dev)
{
struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 4aa7dde876f3..254e6219e538 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -779,6 +779,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(dev);
+ /* Check bus state before init otherwise bus busy info will be lost */
+ ret = rcar_i2c_bus_barrier(priv);
+ if (ret < 0)
+ goto out;
+
/* Gen3 needs a reset before allowing RXDMA once */
if (priv->devtype == I2C_RCAR_GEN3) {
priv->flags |= ID_P_NO_RXDMA;
@@ -791,10 +796,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
rcar_i2c_init(priv);
- ret = rcar_i2c_bus_barrier(priv);
- if (ret < 0)
- goto out;
-
for (i = 0; i < num; i++)
rcar_i2c_request_dma(priv, msgs + i);
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 7e9a2bbf5ddc..ff3f4553648f 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
const struct acpi_device_id *id;
+ int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
if (!smbus_cmi)
@@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
- if (smbus_cmi->cap_info == 0)
+ if (smbus_cmi->cap_info == 0) {
+ ret = -ENODEV;
goto err;
+ }
snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
"SMBus CMI adapter %s",
@@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
smbus_cmi->adapter.dev.parent = &device->dev;
- if (i2c_add_adapter(&smbus_cmi->adapter)) {
+ ret = i2c_add_adapter(&smbus_cmi->adapter);
+ if (ret) {
dev_err(&device->dev, "Couldn't register adapter!\n");
goto err;
}
@@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
err:
kfree(smbus_cmi);
device->driver_data = NULL;
- return -EIO;
+ return ret;
}
static int acpi_smbus_cmi_remove(struct acpi_device *device)
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index dd384743dbbd..03da4a539a2f 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -173,8 +173,6 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
"interrupt: enabled_irqs=%04x, irq_status=%04x\n",
priv->enabled_irqs, irq_status);
- uniphier_fi2c_clear_irqs(priv, irq_status);
-
if (irq_status & UNIPHIER_FI2C_INT_STOP)
goto complete;
@@ -214,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) {
uniphier_fi2c_drain_rxfifo(priv);
- if (!priv->len)
+ /*
+ * If the number of bytes to read is multiple of the FIFO size
+ * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little
+ * earlier than INT_RB. We wait for INT_RB to confirm the
+ * completion of the current message.
+ */
+ if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB))
goto data_done;
if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) {
@@ -253,12 +257,20 @@ complete:
}
handled:
+ /*
+ * This controller makes a pause while any bit of the IRQ status is
+ * asserted. Clear the asserted bit to kick the controller just before
+ * exiting the handler.
+ */
+ uniphier_fi2c_clear_irqs(priv, irq_status);
+
spin_unlock(&priv->lock);
return IRQ_HANDLED;
}
-static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
+static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr,
+ bool repeat)
{
priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE;
uniphier_fi2c_set_irqs(priv);
@@ -268,8 +280,12 @@ static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
/* set slave address */
writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1,
priv->membase + UNIPHIER_FI2C_DTTX);
- /* first chunk of data */
- uniphier_fi2c_fill_txfifo(priv, true);
+ /*
+ * First chunk of data. For a repeated START condition, do not write
+ * data to the TX fifo here to avoid the timing issue.
+ */
+ if (!repeat)
+ uniphier_fi2c_fill_txfifo(priv, true);
}
static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr)
@@ -350,7 +366,7 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
if (is_read)
uniphier_fi2c_rx_init(priv, msg->addr);
else
- uniphier_fi2c_tx_init(priv, msg->addr);
+ uniphier_fi2c_tx_init(priv, msg->addr, repeat);
dev_dbg(&adap->dev, "start condition\n");
/*
@@ -502,9 +518,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv)
uniphier_fi2c_reset(priv);
+ /*
+ * Standard-mode: tLOW + tHIGH = 10 us
+ * Fast-mode: tLOW + tHIGH = 2.5 us
+ */
writel(cyc, priv->membase + UNIPHIER_FI2C_CYC);
- writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL);
+ /*
+ * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us
+ * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us
+ * "tLow/tHIGH = 5/4" meets both.
+ */
+ writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL);
+ /*
+ * Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us
+ * Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us
+ */
writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT);
+ /*
+ * Standard-mode: tSU;DAT = 250 ns
+ * Fast-mode: tSU;DAT = 100 ns
+ */
writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT);
uniphier_fi2c_prepare_operation(priv);
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 454f914ae66d..c488e558aef7 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv)
uniphier_i2c_reset(priv, true);
- writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
+ /*
+ * Bit30-16: clock cycles of tLOW.
+ * Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us
+ * Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us
+ * "tLow/tHIGH = 5/4" meets both.
+ */
+ writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
uniphier_i2c_reset(priv, false);
}
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 32affd3fa8bd..272800692088 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -45,6 +45,33 @@ struct i2c_acpi_lookup {
u32 min_speed;
};
+/**
+ * i2c_acpi_get_i2c_resource - Gets I2cSerialBus resource if type matches
+ * @ares: ACPI resource
+ * @i2c: Pointer to I2cSerialBus resource will be returned here
+ *
+ * Checks if the given ACPI resource is of type I2cSerialBus.
+ * In this case, returns a pointer to it to the caller.
+ *
+ * Returns true if resource type is of I2cSerialBus, otherwise false.
+ */
+bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c)
+{
+ struct acpi_resource_i2c_serialbus *sb;
+
+ if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
+ return false;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C)
+ return false;
+
+ *i2c = sb;
+ return true;
+}
+EXPORT_SYMBOL_GPL(i2c_acpi_get_i2c_resource);
+
static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
{
struct i2c_acpi_lookup *lookup = data;
@@ -52,11 +79,7 @@ static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
struct acpi_resource_i2c_serialbus *sb;
acpi_status status;
- if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
- return 1;
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C)
+ if (info->addr || !i2c_acpi_get_i2c_resource(ares, &sb))
return 1;
if (lookup->index != -1 && lookup->n++ != lookup->index)
@@ -65,7 +88,7 @@ static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
status = acpi_get_handle(lookup->device_handle,
sb->resource_source.string_ptr,
&lookup->adapter_handle);
- if (!ACPI_SUCCESS(status))
+ if (ACPI_FAILURE(status))
return 1;
info->addr = sb->slave_address;
@@ -386,20 +409,22 @@ struct notifier_block i2c_acpi_notifier = {
*
* Also see i2c_new_device, which this function calls to create the i2c-client.
*
- * Returns a pointer to the new i2c-client, or NULL if the adapter is not found.
+ * Returns a pointer to the new i2c-client, or error pointer in case of failure.
+ * Specifically, -EPROBE_DEFER is returned if the adapter is not found.
*/
struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
struct i2c_board_info *info)
{
struct i2c_acpi_lookup lookup;
struct i2c_adapter *adapter;
+ struct i2c_client *client;
struct acpi_device *adev;
LIST_HEAD(resource_list);
int ret;
adev = ACPI_COMPANION(dev);
if (!adev)
- return NULL;
+ return ERR_PTR(-EINVAL);
memset(&lookup, 0, sizeof(lookup));
lookup.info = info;
@@ -408,16 +433,23 @@ struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
ret = acpi_dev_get_resources(adev, &resource_list,
i2c_acpi_fill_info, &lookup);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
acpi_dev_free_resource_list(&resource_list);
- if (ret < 0 || !info->addr)
- return NULL;
+ if (!info->addr)
+ return ERR_PTR(-EADDRNOTAVAIL);
adapter = i2c_acpi_find_adapter_by_handle(lookup.adapter_handle);
if (!adapter)
- return NULL;
+ return ERR_PTR(-EPROBE_DEFER);
+
+ client = i2c_new_device(adapter, info);
+ if (!client)
+ return ERR_PTR(-ENODEV);
- return i2c_new_device(adapter, info);
+ return client;
}
EXPORT_SYMBOL_GPL(i2c_acpi_new_device);
@@ -525,13 +557,7 @@ i2c_acpi_space_handler(u32 function, acpi_physical_address command,
goto err;
}
- if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- ret = AE_BAD_PARAMETER;
- goto err;
- }
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) {
ret = AE_BAD_PARAMETER;
goto err;
}
diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
new file mode 100644
index 000000000000..30a441506f61
--- /dev/null
+++ b/drivers/i3c/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig I3C
+ tristate "I3C support"
+ select I2C
+ help
+ I3C is a serial protocol standardized by the MIPI alliance.
+
+ It's supposed to be backward compatible with I2C while providing
+ support for high speed transfers and native interrupt support
+ without the need for extra pins.
+
+ The I3C protocol also standardizes the slave device types and is
+ mainly designed to communicate with sensors.
+
+ If you want I3C support, you should say Y here and also to the
+ specific driver for your bus adapter(s) below.
+
+ This I3C support can also be built as a module. If so, the module
+ will be called i3c.
+
+if I3C
+source "drivers/i3c/master/Kconfig"
+endif # I3C
diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile
new file mode 100644
index 000000000000..11982efbc6d9
--- /dev/null
+++ b/drivers/i3c/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+i3c-y := device.o master.o
+obj-$(CONFIG_I3C) += i3c.o
+obj-$(CONFIG_I3C) += master/
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
new file mode 100644
index 000000000000..69cc040c3a1c
--- /dev/null
+++ b/drivers/i3c/device.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/**
+ * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
+ * specific device
+ *
+ * @dev: device with which the transfers should be done
+ * @xfers: array of transfers
+ * @nxfers: number of transfers
+ *
+ * Initiate one or several private SDR transfers with @dev.
+ *
+ * This function can sleep and thus cannot be called in atomic context.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ int ret, i;
+
+ if (nxfers < 1)
+ return 0;
+
+ for (i = 0; i < nxfers; i++) {
+ if (!xfers[i].len || !xfers[i].data.in)
+ return -EINVAL;
+ }
+
+ i3c_bus_normaluse_lock(dev->bus);
+ ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+
+/**
+ * i3c_device_get_info() - get I3C device information
+ *
+ * @dev: device we want information on
+ * @info: the information object to fill in
+ *
+ * Retrieve I3C dev info.
+ */
+void i3c_device_get_info(struct i3c_device *dev,
+ struct i3c_device_info *info)
+{
+ if (!info)
+ return;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc)
+ *info = dev->desc->info;
+ i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_get_info);
+
+/**
+ * i3c_device_disable_ibi() - Disable IBIs coming from a specific device
+ * @dev: device on which IBIs should be disabled
+ *
+ * This function disable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_disable_ibi(struct i3c_device *dev)
+{
+ int ret = -ENOENT;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_disable_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_disable_ibi);
+
+/**
+ * i3c_device_enable_ibi() - Enable IBIs coming from a specific device
+ * @dev: device on which IBIs should be enabled
+ *
+ * This function enable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed. This should be called on a device
+ * where i3c_device_request_ibi() has succeeded.
+ *
+ * Note that IBIs from this device might be received before this function
+ * returns to its caller.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_enable_ibi(struct i3c_device *dev)
+{
+ int ret = -ENOENT;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_enable_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_enable_ibi);
+
+/**
+ * i3c_device_request_ibi() - Request an IBI
+ * @dev: device for which we should enable IBIs
+ * @req: setup requested for this IBI
+ *
+ * This function is responsible for pre-allocating all resources needed to
+ * process IBIs coming from @dev. When this function returns, the IBI is not
+ * enabled until i3c_device_enable_ibi() is called.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *req)
+{
+ int ret = -ENOENT;
+
+ if (!req->handler || !req->num_slots)
+ return -EINVAL;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(dev->desc, req);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_request_ibi);
+
+/**
+ * i3c_device_free_ibi() - Free all resources needed for IBI handling
+ * @dev: device on which you want to release IBI resources
+ *
+ * This function is responsible for de-allocating resources previously
+ * allocated by i3c_device_request_ibi(). It should be called after disabling
+ * IBIs with i3c_device_disable_ibi().
+ */
+void i3c_device_free_ibi(struct i3c_device *dev)
+{
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ i3c_dev_free_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_free_ibi);
+
+/**
+ * i3cdev_to_dev() - Returns the device embedded in @i3cdev
+ * @i3cdev: I3C device
+ *
+ * Return: a pointer to a device object.
+ */
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
+{
+ return &i3cdev->dev;
+}
+EXPORT_SYMBOL_GPL(i3cdev_to_dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+struct i3c_device *dev_to_i3cdev(struct device *dev)
+{
+ return container_of(dev, struct i3c_device, dev);
+}
+EXPORT_SYMBOL_GPL(dev_to_i3cdev);
+
+/**
+ * i3c_driver_register_with_owner() - register an I3C device driver
+ *
+ * @drv: driver to register
+ * @owner: module that owns this driver
+ *
+ * Register @drv to the core.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
+{
+ drv->driver.owner = owner;
+ drv->driver.bus = &i3c_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
+
+/**
+ * i3c_driver_unregister() - unregister an I3C device driver
+ *
+ * @drv: driver to unregister
+ *
+ * Unregister @drv.
+ */
+void i3c_driver_unregister(struct i3c_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_unregister);
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
new file mode 100644
index 000000000000..86b7b44cfca2
--- /dev/null
+++ b/drivers/i3c/internals.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_INTERNALS_H
+#define I3C_INTERNALS_H
+
+#include <linux/i3c/master.h>
+
+extern struct bus_type i3c_bus_type;
+
+void i3c_bus_normaluse_lock(struct i3c_bus *bus);
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
new file mode 100644
index 000000000000..c39f89d2deba
--- /dev/null
+++ b/drivers/i3c/master.c
@@ -0,0 +1,2659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "internals.h"
+
+static DEFINE_IDR(i3c_bus_idr);
+static DEFINE_MUTEX(i3c_core_lock);
+
+/**
+ * i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock so that no other operations can occur on
+ * the bus. This is needed for all kind of bus maintenance operation, like
+ * - enabling/disabling slave events
+ * - re-triggering DAA
+ * - changing the dynamic address of a device
+ * - relinquishing mastership
+ * - ...
+ *
+ * The reason for this kind of locking is that we don't want drivers and core
+ * logic to rely on I3C device information that could be changed behind their
+ * back.
+ */
+static void i3c_bus_maintenance_lock(struct i3c_bus *bus)
+{
+ down_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_maintenance_unlock - Release the bus lock after a maintenance
+ * operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when the bus maintenance operation is done. See
+ * i3c_bus_maintenance_lock() for more details on what these maintenance
+ * operations are.
+ */
+static void i3c_bus_maintenance_unlock(struct i3c_bus *bus)
+{
+ up_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_lock - Lock the bus for a normal operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock for any operation that is not a maintenance
+ * operation (see i3c_bus_maintenance_lock() for a non-exhaustive list of
+ * maintenance operations). Basically all communications with I3C devices are
+ * normal operations (HDR, SDR transfers or CCC commands that do not change bus
+ * state or I3C dynamic address).
+ *
+ * Note that this lock is not guaranteeing serialization of normal operations.
+ * In other words, transfer requests passed to the I3C master can be submitted
+ * in parallel and I3C master drivers have to use their own locking to make
+ * sure two different communications are not inter-mixed, or access to the
+ * output/input queue is not done while the engine is busy.
+ */
+void i3c_bus_normaluse_lock(struct i3c_bus *bus)
+{
+ down_read(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_unlock - Release the bus lock after a normal operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when a normal operation is done. See
+ * i3c_bus_normaluse_lock() for more details on what these normal operations
+ * are.
+ */
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
+{
+ up_read(&bus->lock);
+}
+
+static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+{
+ return container_of(dev, struct i3c_master_controller, dev);
+}
+
+static const struct device_type i3c_device_type;
+
+static struct i3c_bus *dev_to_i3cbus(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->bus;
+
+ master = dev_to_i3cmaster(dev);
+
+ return &master->bus;
+}
+
+static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->desc;
+
+ master = container_of(dev, struct i3c_master_controller, dev);
+
+ return master->this;
+}
+
+static ssize_t bcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.bcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(bcr);
+
+static ssize_t dcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.dcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dcr);
+
+static ssize_t pid_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%llx\n", desc->info.pid);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(pid);
+
+static ssize_t dynamic_address_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dynamic_address);
+
+static const char * const hdrcap_strings[] = {
+ "hdr-ddr", "hdr-tsp", "hdr-tsl",
+};
+
+static ssize_t hdrcap_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t offset = 0, ret;
+ unsigned long caps;
+ int mode;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ caps = desc->info.hdr_cap;
+ for_each_set_bit(mode, &caps, 8) {
+ if (mode >= ARRAY_SIZE(hdrcap_strings))
+ break;
+
+ if (!hdrcap_strings[mode])
+ continue;
+
+ ret = sprintf(buf + offset, offset ? " %s" : "%s",
+ hdrcap_strings[mode]);
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ }
+
+ ret = sprintf(buf + offset, "\n");
+ if (ret < 0)
+ goto out;
+
+ ret = offset + ret;
+
+out:
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(hdrcap);
+
+static struct attribute *i3c_device_attrs[] = {
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_device);
+
+static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return add_uevent_var(env, "MODALIAS=i3c:dcr%02Xmanuf%04X",
+ devinfo.dcr, manuf);
+
+ return add_uevent_var(env,
+ "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04xext%04x",
+ devinfo.dcr, manuf, part, ext);
+}
+
+static const struct device_type i3c_device_type = {
+ .groups = i3c_device_groups,
+ .uevent = i3c_device_uevent,
+};
+
+static const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+ const struct i3c_device_id *id_table)
+{
+ struct i3c_device_info devinfo;
+ const struct i3c_device_id *id;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+
+ /*
+ * The lower 32bits of the provisional ID is just filled with a random
+ * value, try to match using DCR info.
+ */
+ if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
+ u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ u16 part = I3C_PID_PART_ID(devinfo.pid);
+ u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ /* First try to match by manufacturer/part ID. */
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
+ I3C_MATCH_MANUF_AND_PART)
+ continue;
+
+ if (manuf != id->manuf_id || part != id->part_id)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+ ext_info != id->extra_info)
+ continue;
+
+ return id;
+ }
+ }
+
+ /* Fallback to DCR match. */
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_DCR) &&
+ id->dcr == devinfo.dcr)
+ return id;
+ }
+
+ return NULL;
+}
+
+static int i3c_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct i3c_device *i3cdev;
+ struct i3c_driver *i3cdrv;
+
+ if (dev->type != &i3c_device_type)
+ return 0;
+
+ i3cdev = dev_to_i3cdev(dev);
+ i3cdrv = drv_to_i3cdrv(drv);
+ if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+ return 1;
+
+ return 0;
+}
+
+static int i3c_device_probe(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+
+ return driver->probe(i3cdev);
+}
+
+static int i3c_device_remove(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+ int ret;
+
+ ret = driver->remove(i3cdev);
+ if (ret)
+ return ret;
+
+ i3c_device_free_ibi(i3cdev);
+
+ return ret;
+}
+
+struct bus_type i3c_bus_type = {
+ .name = "i3c",
+ .match = i3c_device_match,
+ .probe = i3c_device_probe,
+ .remove = i3c_device_remove,
+};
+
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+ int status, bitpos = addr * 2;
+
+ if (addr > I2C_MAX_ADDR)
+ return I3C_ADDR_SLOT_RSVD;
+
+ status = bus->addrslots[bitpos / BITS_PER_LONG];
+ status >>= bitpos % BITS_PER_LONG;
+
+ return status & I3C_ADDR_SLOT_STATUS_MASK;
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status)
+{
+ int bitpos = addr * 2;
+ unsigned long *ptr;
+
+ if (addr > I2C_MAX_ADDR)
+ return;
+
+ ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
+ *ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
+ *ptr |= status << (bitpos % BITS_PER_LONG);
+}
+
+static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+{
+ enum i3c_addr_slot_status status;
+
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+
+ return status == I3C_ADDR_SLOT_FREE;
+}
+
+static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
+{
+ enum i3c_addr_slot_status status;
+ u8 addr;
+
+ for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+ if (status == I3C_ADDR_SLOT_FREE)
+ return addr;
+ }
+
+ return -ENOMEM;
+}
+
+static void i3c_bus_init_addrslots(struct i3c_bus *bus)
+{
+ int i;
+
+ /* Addresses 0 to 7 are reserved. */
+ for (i = 0; i < 8; i++)
+ i3c_bus_set_addr_slot_status(bus, i, I3C_ADDR_SLOT_RSVD);
+
+ /*
+ * Reserve broadcast address and all addresses that might collide
+ * with the broadcast address when facing a single bit error.
+ */
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR,
+ I3C_ADDR_SLOT_RSVD);
+ for (i = 0; i < 7; i++)
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR ^ BIT(i),
+ I3C_ADDR_SLOT_RSVD);
+}
+
+static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
+{
+ mutex_lock(&i3c_core_lock);
+ idr_remove(&i3c_bus_idr, i3cbus->id);
+ mutex_unlock(&i3c_core_lock);
+}
+
+static int i3c_bus_init(struct i3c_bus *i3cbus)
+{
+ int ret;
+
+ init_rwsem(&i3cbus->lock);
+ INIT_LIST_HEAD(&i3cbus->devs.i2c);
+ INIT_LIST_HEAD(&i3cbus->devs.i3c);
+ i3c_bus_init_addrslots(i3cbus);
+ i3cbus->mode = I3C_BUS_MODE_PURE;
+
+ mutex_lock(&i3c_core_lock);
+ ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+ mutex_unlock(&i3c_core_lock);
+
+ if (ret < 0)
+ return ret;
+
+ i3cbus->id = ret;
+
+ return 0;
+}
+
+static const char * const i3c_bus_mode_strings[] = {
+ [I3C_BUS_MODE_PURE] = "pure",
+ [I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+ [I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
+};
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ if (i3cbus->mode < 0 ||
+ i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
+ !i3c_bus_mode_strings[i3cbus->mode])
+ ret = sprintf(buf, "unknown\n");
+ else
+ ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(mode);
+
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
+ i3cbus->cur_master->info.pid);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(current_master);
+
+static ssize_t i3c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i3c_scl_frequency);
+
+static ssize_t i2c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i2c_scl_frequency);
+
+static struct attribute *i3c_masterdev_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_current_master.attr,
+ &dev_attr_i3c_scl_frequency.attr,
+ &dev_attr_i2c_scl_frequency.attr,
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_masterdev);
+
+static void i3c_masterdev_release(struct device *dev)
+{
+ struct i3c_master_controller *master = dev_to_i3cmaster(dev);
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+
+ if (master->wq)
+ destroy_workqueue(master->wq);
+
+ WARN_ON(!list_empty(&bus->devs.i2c) || !list_empty(&bus->devs.i3c));
+ i3c_bus_cleanup(bus);
+
+ of_node_put(dev->of_node);
+}
+
+static const struct device_type i3c_masterdev_type = {
+ .groups = i3c_masterdev_groups,
+};
+
+int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode)
+{
+ i3cbus->mode = mode;
+
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+
+ if (!i3cbus->scl_rate.i2c) {
+ if (i3cbus->mode == I3C_BUS_MODE_MIXED_SLOW)
+ i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+ else
+ i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ }
+
+ /*
+ * I3C/I2C frequency may have been overridden, check that user-provided
+ * values are not exceeding max possible frequency.
+ */
+ if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct i3c_master_controller *
+i2c_adapter_to_i3c_master(struct i2c_adapter *adap)
+{
+ return container_of(adap, struct i3c_master_controller, i2c);
+}
+
+static struct i2c_adapter *
+i3c_master_to_i2c_adapter(struct i3c_master_controller *master)
+{
+ return &master->i2c;
+}
+
+static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i2c_dev_desc *
+i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
+ const struct i2c_dev_boardinfo *boardinfo)
+{
+ struct i2c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->boardinfo = boardinfo;
+
+ return dev;
+}
+
+static void *i3c_ccc_cmd_dest_init(struct i3c_ccc_cmd_dest *dest, u8 addr,
+ u16 payloadlen)
+{
+ dest->addr = addr;
+ dest->payload.len = payloadlen;
+ if (payloadlen)
+ dest->payload.data = kzalloc(payloadlen, GFP_KERNEL);
+ else
+ dest->payload.data = NULL;
+
+ return dest->payload.data;
+}
+
+static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
+{
+ kfree(dest->payload.data);
+}
+
+static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
+ struct i3c_ccc_cmd_dest *dests,
+ unsigned int ndests)
+{
+ cmd->rnw = rnw ? 1 : 0;
+ cmd->id = id;
+ cmd->dests = dests;
+ cmd->ndests = ndests;
+ cmd->err = I3C_ERROR_UNKNOWN;
+}
+
+static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd)
+{
+ int ret;
+
+ if (!cmd || !master)
+ return -EINVAL;
+
+ if (WARN_ON(master->init_done &&
+ !rwsem_is_locked(&master->bus.lock)))
+ return -EINVAL;
+
+ if (!master->ops->send_ccc_cmd)
+ return -ENOTSUPP;
+
+ if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
+ return -EINVAL;
+
+ if (master->ops->supports_ccc_cmd &&
+ !master->ops->supports_ccc_cmd(master, cmd))
+ return -ENOTSUPP;
+
+ ret = master->ops->send_ccc_cmd(master, cmd);
+ if (ret) {
+ if (cmd->err != I3C_ERROR_UNKNOWN)
+ return cmd->err;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct i2c_dev_desc *
+i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
+ u16 addr)
+{
+ struct i2c_dev_desc *dev;
+
+ i3c_bus_for_each_i2cdev(&master->bus, dev) {
+ if (dev->boardinfo->base.addr == addr)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_get_free_addr() - get a free address on the bus
+ * @master: I3C master object
+ * @start_addr: where to start searching
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: the first free address starting at @start_addr (included) or -ENOMEM
+ * if there's no more address available.
+ */
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr)
+{
+ return i3c_bus_get_free_addr(&master->bus, start_addr);
+}
+EXPORT_SYMBOL_GPL(i3c_master_get_free_addr);
+
+static void i3c_device_release(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+
+ WARN_ON(i3cdev->desc);
+
+ of_node_put(i3cdev->dev.of_node);
+ kfree(i3cdev);
+}
+
+static void i3c_master_free_i3c_dev(struct i3c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i3c_dev_desc *
+i3c_master_alloc_i3c_dev(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->info = *info;
+ mutex_init(&dev->ibi_lock);
+
+ return dev;
+}
+
+static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ enum i3c_addr_slot_status addrstat;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ addrstat = i3c_bus_get_addr_slot_status(&master->bus, addr);
+ if (addr != I3C_BROADCAST_ADDR && addrstat != I3C_ADDR_SLOT_I3C_DEV)
+ return -EINVAL;
+
+ i3c_ccc_cmd_dest_init(&dest, addr, 0);
+ i3c_ccc_cmd_init(&cmd, false,
+ I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_entdaa_locked() - start a DAA (Dynamic Address Assignment)
+ * procedure
+ * @master: master used to send frames on the bus
+ *
+ * Send a ENTDAA CCC command to start a DAA procedure.
+ *
+ * Note that this function only sends the ENTDAA CCC command, all the logic
+ * behind dynamic address assignment has to be handled in the I3C master
+ * driver.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_entdaa_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_entdaa_locked);
+
+static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
+ u8 addr, bool enable, u8 evts)
+{
+ struct i3c_ccc_events *events;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ events = i3c_ccc_cmd_dest_init(&dest, addr, sizeof(*events));
+ if (!events)
+ return -ENOMEM;
+
+ events->events = evts;
+ i3c_ccc_cmd_init(&cmd, false,
+ enable ?
+ I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
+ I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_disec_locked() - send a DISEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Send a DISEC CCC command to disable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, false, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disec_locked);
+
+/**
+ * i3c_master_enec_locked() - send an ENEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Sends an ENEC CCC command to enable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, true, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enec_locked);
+
+/**
+ * i3c_master_defslvs_locked() - send a DEFSLVS CCC command
+ * @master: master used to send frames on the bus
+ *
+ * Send a DEFSLVS CCC command containing all the devices known to the @master.
+ * This is useful when you have secondary masters on the bus to propagate
+ * device information.
+ *
+ * This should be called after all I3C devices have been discovered (in other
+ * words, after the DAA procedure has finished) and instantiated in
+ * &i3c_master_controller_ops->bus_init().
+ * It should also be called if a master ACKed an Hot-Join request and assigned
+ * a dynamic address to the device joining the bus.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_defslvs *defslvs;
+ struct i3c_ccc_dev_desc *desc;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_dev_desc *i3cdev;
+ struct i2c_dev_desc *i2cdev;
+ struct i3c_ccc_cmd cmd;
+ struct i3c_bus *bus;
+ bool send = false;
+ int ndevs = 0, ret;
+
+ if (!master)
+ return -EINVAL;
+
+ bus = i3c_master_get_bus(master);
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ ndevs++;
+
+ if (i3cdev == master->this)
+ continue;
+
+ if (I3C_BCR_DEVICE_ROLE(i3cdev->info.bcr) ==
+ I3C_BCR_I3C_MASTER)
+ send = true;
+ }
+
+ /* No other master on the bus, skip DEFSLVS. */
+ if (!send)
+ return 0;
+
+ i3c_bus_for_each_i2cdev(bus, i2cdev)
+ ndevs++;
+
+ defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
+ sizeof(*defslvs) +
+ ((ndevs - 1) *
+ sizeof(struct i3c_ccc_dev_desc)));
+ if (!defslvs)
+ return -ENOMEM;
+
+ defslvs->count = ndevs;
+ defslvs->master.bcr = master->this->info.bcr;
+ defslvs->master.dcr = master->this->info.dcr;
+ defslvs->master.dyn_addr = master->this->info.dyn_addr << 1;
+ defslvs->master.static_addr = I3C_BROADCAST_ADDR << 1;
+
+ desc = defslvs->slaves;
+ i3c_bus_for_each_i2cdev(bus, i2cdev) {
+ desc->lvr = i2cdev->boardinfo->lvr;
+ desc->static_addr = i2cdev->boardinfo->base.addr << 1;
+ desc++;
+ }
+
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ /* Skip the I3C dev representing this master. */
+ if (i3cdev == master->this)
+ continue;
+
+ desc->bcr = i3cdev->info.bcr;
+ desc->dcr = i3cdev->info.dcr;
+ desc->dyn_addr = i3cdev->info.dyn_addr << 1;
+ desc->static_addr = i3cdev->info.static_addr << 1;
+ desc++;
+ }
+
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
+
+static int i3c_master_setda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr, bool setdasa)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_setda *setda;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!oldaddr || !newaddr)
+ return -EINVAL;
+
+ setda = i3c_ccc_cmd_dest_init(&dest, oldaddr, sizeof(*setda));
+ if (!setda)
+ return -ENOMEM;
+
+ setda->addr = newaddr << 1;
+ i3c_ccc_cmd_init(&cmd, false,
+ setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
+ u8 static_addr, u8 dyn_addr)
+{
+ return i3c_master_setda_locked(master, static_addr, dyn_addr, true);
+}
+
+static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr)
+{
+ return i3c_master_setda_locked(master, oldaddr, newaddr, false);
+}
+
+static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ unsigned int expected_len;
+ struct i3c_ccc_mrl *mrl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
+ if (!mrl)
+ return -ENOMEM;
+
+ /*
+ * When the device does not have IBI payload GETMRL only returns 2
+ * bytes of data.
+ */
+ if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
+ dest.payload.len -= 1;
+
+ expected_len = dest.payload.len;
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != expected_len) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_read_len = be16_to_cpu(mrl->read_len);
+
+ if (info->bcr & I3C_BCR_IBI_PAYLOAD)
+ info->max_ibi_len = mrl->ibi_len;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mwl *mwl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
+ if (!mwl)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != sizeof(*mwl))
+ return -EIO;
+
+ info->max_write_len = be16_to_cpu(mwl->len);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getmxds *getmaxds;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getmaxds = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*getmaxds));
+ if (!getmaxds)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 2 && dest.payload.len != 5) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_read_ds = getmaxds->maxrd;
+ info->max_write_ds = getmaxds->maxwr;
+ if (dest.payload.len == 5)
+ info->max_read_turnaround = getmaxds->maxrdturn[0] |
+ ((u32)getmaxds->maxrdturn[1] << 8) |
+ ((u32)getmaxds->maxrdturn[2] << 16);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_gethdrcap *gethdrcap;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ gethdrcap = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*gethdrcap));
+ if (!gethdrcap)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 1) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->hdr_cap = gethdrcap->modes;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getpid_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getpid *getpid;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret, i;
+
+ getpid = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getpid));
+ if (!getpid)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->pid = 0;
+ for (i = 0; i < sizeof(getpid->pid); i++) {
+ int sft = (sizeof(getpid->pid) - i - 1) * 8;
+
+ info->pid |= (u64)getpid->pid[i] << sft;
+ }
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getbcr *getbcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getbcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getbcr));
+ if (!getbcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->bcr = getbcr->bcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getdcr *getdcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getdcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getdcr));
+ if (!getdcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->dcr = getdcr->dcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status slot_status;
+ int ret;
+
+ if (!dev->info.dyn_addr)
+ return -EINVAL;
+
+ slot_status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (slot_status == I3C_ADDR_SLOT_RSVD ||
+ slot_status == I3C_ADDR_SLOT_I2C_DEV)
+ return -EINVAL;
+
+ ret = i3c_master_getpid_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getbcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getdcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
+ ret = i3c_master_getmxds_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ dev->info.max_ibi_len = 1;
+
+ i3c_master_getmrl_locked(master, &dev->info);
+ i3c_master_getmwl_locked(master, &dev->info);
+
+ if (dev->info.bcr & I3C_BCR_HDR_CAP) {
+ ret = i3c_master_gethdrcap_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->info.dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+}
+
+static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ if (dev->info.static_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.static_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ /*
+ * ->init_dyn_addr should have been reserved before that, so, if we're
+ * trying to apply a pre-reserved dynamic address, we should not try
+ * to reserve the address slot a second time.
+ */
+ if (dev->info.dyn_addr &&
+ (!dev->boardinfo ||
+ dev->boardinfo->init_dyn_addr != dev->info.dyn_addr)) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ goto err_release_static_addr;
+
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ return 0;
+
+err_release_static_addr:
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ return -EBUSY;
+}
+
+static int i3c_master_attach_i3c_dev(struct i3c_master_controller *master,
+ struct i3c_dev_desc *dev)
+{
+ int ret;
+
+ /*
+ * We don't attach devices to the controller until they are
+ * addressable on the bus.
+ */
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ ret = i3c_master_get_i3c_addrs(dev);
+ if (ret)
+ return ret;
+
+ /* Do not attach the master device itself. */
+ if (master->this != dev && master->ops->attach_i3c_dev) {
+ ret = master->ops->attach_i3c_dev(dev);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i3c);
+
+ return 0;
+}
+
+static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+ int ret;
+
+ if (dev->info.dyn_addr != old_dyn_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ if (master->ops->reattach_i3c_dev) {
+ ret = master->ops->reattach_i3c_dev(dev, old_dyn_addr);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ /* Do not detach the master device itself. */
+ if (master->this != dev && master->ops->detach_i3c_dev)
+ master->ops->detach_i3c_dev(dev);
+
+ i3c_master_put_i3c_addrs(dev);
+ list_del(&dev->common.node);
+}
+
+static int i3c_master_attach_i2c_dev(struct i3c_master_controller *master,
+ struct i2c_dev_desc *dev)
+{
+ int ret;
+
+ if (master->ops->attach_i2c_dev) {
+ ret = master->ops->attach_i2c_dev(dev);
+ if (ret)
+ return ret;
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i2c);
+
+ return 0;
+}
+
+static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i2c_dev_get_master(dev);
+
+ list_del(&dev->common.node);
+
+ if (master->ops->detach_i2c_dev)
+ master->ops->detach_i2c_dev(dev);
+}
+
+static void i3c_master_pre_assign_dyn_addr(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ int ret;
+
+ if (!dev->boardinfo || !dev->boardinfo->init_dyn_addr ||
+ !dev->boardinfo->static_addr)
+ return;
+
+ ret = i3c_master_setdasa_locked(master, dev->info.static_addr,
+ dev->boardinfo->init_dyn_addr);
+ if (ret)
+ return;
+
+ dev->info.dyn_addr = dev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(dev, 0);
+ if (ret)
+ goto err_rstdaa;
+
+ ret = i3c_master_retrieve_dev_info(dev);
+ if (ret)
+ goto err_rstdaa;
+
+ return;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, dev->boardinfo->init_dyn_addr);
+}
+
+static void
+i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *desc;
+ int ret;
+
+ if (!master->init_done)
+ return;
+
+ i3c_bus_for_each_i3cdev(&master->bus, desc) {
+ if (desc->dev || !desc->info.dyn_addr || desc == master->this)
+ continue;
+
+ desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
+ if (!desc->dev)
+ continue;
+
+ desc->dev->bus = &master->bus;
+ desc->dev->desc = desc;
+ desc->dev->dev.parent = &master->dev;
+ desc->dev->dev.type = &i3c_device_type;
+ desc->dev->dev.bus = &i3c_bus_type;
+ desc->dev->dev.release = i3c_device_release;
+ dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id,
+ desc->info.pid);
+
+ if (desc->boardinfo)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+ if (ret)
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
+ }
+}
+
+/**
+ * i3c_master_do_daa() - do a DAA (Dynamic Address Assignment)
+ * @master: master doing the DAA
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_do_daa(struct i3c_master_controller *master)
+{
+ int ret;
+
+ i3c_bus_maintenance_lock(&master->bus);
+ ret = master->ops->do_daa(master);
+ i3c_bus_maintenance_unlock(&master->bus);
+
+ if (ret)
+ return ret;
+
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_do_daa);
+
+/**
+ * i3c_master_set_info() - set master device information
+ * @master: master used to send frames on the bus
+ * @info: I3C device information
+ *
+ * Set master device info. This should be called from
+ * &i3c_master_controller_ops->bus_init().
+ *
+ * Not all &i3c_device_info fields are meaningful for a master device.
+ * Here is a list of fields that should be properly filled:
+ *
+ * - &i3c_device_info->dyn_addr
+ * - &i3c_device_info->bcr
+ * - &i3c_device_info->dcr
+ * - &i3c_device_info->pid
+ * - &i3c_device_info->hdr_cap if %I3C_BCR_HDR_CAP bit is set in
+ * &i3c_device_info->bcr
+ *
+ * This function must be called with the bus lock held in maintenance mode.
+ *
+ * Return: 0 if @info contains valid information (not every piece of
+ * information can be checked, but we can at least make sure @info->dyn_addr
+ * and @info->bcr are correct), -EINVAL otherwise.
+ */
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *i3cdev;
+ int ret;
+
+ if (!i3c_bus_dev_addr_is_avail(&master->bus, info->dyn_addr))
+ return -EINVAL;
+
+ if (I3C_BCR_DEVICE_ROLE(info->bcr) == I3C_BCR_I3C_MASTER &&
+ master->secondary)
+ return -EINVAL;
+
+ if (master->this)
+ return -EINVAL;
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, info);
+ if (IS_ERR(i3cdev))
+ return PTR_ERR(i3cdev);
+
+ master->this = i3cdev;
+ master->bus.cur_master = master->this;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_set_info);
+
+static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev, *i3ctmp;
+ struct i2c_dev_desc *i2cdev, *i2ctmp;
+
+ list_for_each_entry_safe(i3cdev, i3ctmp, &master->bus.devs.i3c,
+ common.node) {
+ i3c_master_detach_i3c_dev(i3cdev);
+
+ if (i3cdev->boardinfo && i3cdev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cdev->boardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ i3c_master_free_i3c_dev(i3cdev);
+ }
+
+ list_for_each_entry_safe(i2cdev, i2ctmp, &master->bus.devs.i2c,
+ common.node) {
+ i3c_master_detach_i2c_dev(i2cdev);
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cdev->boardinfo->base.addr,
+ I3C_ADDR_SLOT_FREE);
+ i3c_master_free_i2c_dev(i2cdev);
+ }
+}
+
+/**
+ * i3c_master_bus_init() - initialize an I3C bus
+ * @master: main master initializing the bus
+ *
+ * This function is following all initialisation steps described in the I3C
+ * specification:
+ *
+ * 1. Attach I2C and statically defined I3C devs to the master so that the
+ * master can fill its internal device table appropriately
+ *
+ * 2. Call &i3c_master_controller_ops->bus_init() method to initialize
+ * the master controller. That's usually where the bus mode is selected
+ * (pure bus or mixed fast/slow bus)
+ *
+ * 3. Instruct all devices on the bus to drop their dynamic address. This is
+ * particularly important when the bus was previously configured by someone
+ * else (for example the bootloader)
+ *
+ * 4. Disable all slave events.
+ *
+ * 5. Pre-assign dynamic addresses requested by the FW with SETDASA for I3C
+ * devices that have a static address
+ *
+ * 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
+ * remaining I3C devices
+ *
+ * Once this is done, all I3C and I2C devices should be usable.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+static int i3c_master_bus_init(struct i3c_master_controller *master)
+{
+ enum i3c_addr_slot_status status;
+ struct i2c_dev_boardinfo *i2cboardinfo;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+ struct i3c_dev_desc *i3cdev;
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ /*
+ * First attach all devices with static definitions provided by the
+ * FW.
+ */
+ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr);
+ if (status != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_detach_devs;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr,
+ I3C_ADDR_SLOT_I2C_DEV);
+
+ i2cdev = i3c_master_alloc_i2c_dev(master, i2cboardinfo);
+ if (IS_ERR(i2cdev)) {
+ ret = PTR_ERR(i2cdev);
+ goto err_detach_devs;
+ }
+
+ ret = i3c_master_attach_i2c_dev(master, i2cdev);
+ if (ret) {
+ i3c_master_free_i2c_dev(i2cdev);
+ goto err_detach_devs;
+ }
+ }
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+ struct i3c_device_info info = {
+ .static_addr = i3cboardinfo->static_addr,
+ };
+
+ if (i3cboardinfo->init_dyn_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_detach_devs;
+ }
+ }
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(i3cdev)) {
+ ret = PTR_ERR(i3cdev);
+ goto err_detach_devs;
+ }
+
+ i3cdev->boardinfo = i3cboardinfo;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret) {
+ i3c_master_free_i3c_dev(i3cdev);
+ goto err_detach_devs;
+ }
+ }
+
+ /*
+ * Now execute the controller specific ->bus_init() routine, which
+ * might configure its internal logic to match the bus limitations.
+ */
+ ret = master->ops->bus_init(master);
+ if (ret)
+ goto err_detach_devs;
+
+ /*
+ * The master device should have been instantiated in ->bus_init(),
+ * complain if this was not the case.
+ */
+ if (!master->this) {
+ dev_err(&master->dev,
+ "master_set_info() was not called in ->bus_init()\n");
+ ret = -EINVAL;
+ goto err_bus_cleanup;
+ }
+
+ /*
+ * Reset all dynamic address that may have been assigned before
+ * (assigned by the bootloader for example).
+ */
+ ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /* Disable all slave events before starting DAA. */
+ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
+ I3C_CCC_EVENT_HJ);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /*
+ * Pre-assign dynamic address and retrieve device information if
+ * needed.
+ */
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev)
+ i3c_master_pre_assign_dyn_addr(i3cdev);
+
+ ret = i3c_master_do_daa(master);
+ if (ret)
+ goto err_rstdaa;
+
+ return 0;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+
+err_bus_cleanup:
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+err_detach_devs:
+ i3c_master_detach_free_devs(master);
+
+ return ret;
+}
+
+static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+{
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+ i3c_master_detach_free_devs(master);
+}
+
+static struct i3c_dev_desc *
+i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+{
+ struct i3c_master_controller *master = refdev->common.master;
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
+ return i3cdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_add_i3c_dev_locked() - add an I3C slave to the bus
+ * @master: master used to send frames on the bus
+ * @addr: I3C slave dynamic address assigned to the device
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ struct i3c_device_info info = { .dyn_addr = addr };
+ struct i3c_dev_desc *newdev, *olddev;
+ u8 old_dyn_addr = addr, expected_dyn_addr;
+ struct i3c_ibi_setup ibireq = { };
+ bool enable_ibi = false;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ newdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(newdev))
+ return PTR_ERR(newdev);
+
+ ret = i3c_master_attach_i3c_dev(master, newdev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = i3c_master_retrieve_dev_info(newdev);
+ if (ret)
+ goto err_free_dev;
+
+ olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ if (olddev) {
+ newdev->boardinfo = olddev->boardinfo;
+ newdev->info.static_addr = olddev->info.static_addr;
+ newdev->dev = olddev->dev;
+ if (newdev->dev)
+ newdev->dev->desc = newdev;
+
+ /*
+ * We need to restore the IBI state too, so let's save the
+ * IBI information and try to restore them after olddev has
+ * been detached+released and its IBI has been stopped and
+ * the associated resources have been freed.
+ */
+ mutex_lock(&olddev->ibi_lock);
+ if (olddev->ibi) {
+ ibireq.handler = olddev->ibi->handler;
+ ibireq.max_payload_len = olddev->ibi->max_payload_len;
+ ibireq.num_slots = olddev->ibi->num_slots;
+
+ if (olddev->ibi->enabled) {
+ enable_ibi = true;
+ i3c_dev_disable_ibi_locked(olddev);
+ }
+
+ i3c_dev_free_ibi_locked(olddev);
+ }
+ mutex_unlock(&olddev->ibi_lock);
+
+ old_dyn_addr = olddev->info.dyn_addr;
+
+ i3c_master_detach_i3c_dev(olddev);
+ i3c_master_free_i3c_dev(olddev);
+ }
+
+ ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ if (ret)
+ goto err_detach_dev;
+
+ /*
+ * Depending on our previous state, the expected dynamic address might
+ * differ:
+ * - if the device already had a dynamic address assigned, let's try to
+ * re-apply this one
+ * - if the device did not have a dynamic address and the firmware
+ * requested a specific address, pick this one
+ * - in any other case, keep the address automatically assigned by the
+ * master
+ */
+ if (old_dyn_addr && old_dyn_addr != newdev->info.dyn_addr)
+ expected_dyn_addr = old_dyn_addr;
+ else if (newdev->boardinfo && newdev->boardinfo->init_dyn_addr)
+ expected_dyn_addr = newdev->boardinfo->init_dyn_addr;
+ else
+ expected_dyn_addr = newdev->info.dyn_addr;
+
+ if (newdev->info.dyn_addr != expected_dyn_addr) {
+ /*
+ * Try to apply the expected dynamic address. If it fails, keep
+ * the address assigned by the master.
+ */
+ ret = i3c_master_setnewda_locked(master,
+ newdev->info.dyn_addr,
+ expected_dyn_addr);
+ if (!ret) {
+ old_dyn_addr = newdev->info.dyn_addr;
+ newdev->info.dyn_addr = expected_dyn_addr;
+ i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ } else {
+ dev_err(&master->dev,
+ "Failed to assign reserved/old address to device %d%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ }
+
+ /*
+ * Now is time to try to restore the IBI setup. If we're lucky,
+ * everything works as before, otherwise, all we can do is complain.
+ * FIXME: maybe we should add callback to inform the driver that it
+ * should request the IBI again instead of trying to hide that from
+ * him.
+ */
+ if (ibireq.handler) {
+ mutex_lock(&newdev->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(newdev, &ibireq);
+ if (ret) {
+ dev_err(&master->dev,
+ "Failed to request IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ } else if (enable_ibi) {
+ ret = i3c_dev_enable_ibi_locked(newdev);
+ if (ret)
+ dev_err(&master->dev,
+ "Failed to re-enable IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ mutex_unlock(&newdev->ibi_lock);
+ }
+
+ return 0;
+
+err_detach_dev:
+ if (newdev->dev && newdev->dev->desc)
+ newdev->dev->desc = NULL;
+
+ i3c_master_detach_i3c_dev(newdev);
+
+err_free_dev:
+ i3c_master_free_i3c_dev(newdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_add_i3c_dev_locked);
+
+#define OF_I3C_REG1_IS_I2C_DEV BIT(31)
+
+static int
+of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i2c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ int ret;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
+ if (ret)
+ return ret;
+
+ /* LVR is encoded in reg[2]. */
+ boardinfo->lvr = reg[2];
+
+ if (boardinfo->lvr & I3C_LVR_I2C_FM_MODE)
+ master->bus.scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+
+ list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
+ of_node_get(node);
+
+ return 0;
+}
+
+static int
+of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i3c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ struct i3c_device_info info = { };
+ enum i3c_addr_slot_status addrstatus;
+ u32 init_dyn_addr = 0;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ if (reg[0]) {
+ if (reg[0] > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ reg[0]);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->static_addr = reg[0];
+
+ if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
+ if (init_dyn_addr > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ init_dyn_addr);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
+
+ if ((info.pid & GENMASK_ULL(63, 48)) ||
+ I3C_PID_RND_LOWER_32BITS(info.pid))
+ return -EINVAL;
+
+ boardinfo->init_dyn_addr = init_dyn_addr;
+ boardinfo->of_node = of_node_get(node);
+ list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
+
+ return 0;
+}
+
+static int of_i3c_master_add_dev(struct i3c_master_controller *master,
+ struct device_node *node)
+{
+ u32 reg[3];
+ int ret;
+
+ if (!master || !node)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
+ if (ret)
+ return ret;
+
+ /*
+ * The manufacturer ID can't be 0. If reg[1] == 0 that means we're
+ * dealing with an I2C device.
+ */
+ if (!reg[1])
+ ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
+ else
+ ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
+
+ return ret;
+}
+
+static int of_populate_i3c_bus(struct i3c_master_controller *master)
+{
+ struct device *dev = &master->dev;
+ struct device_node *i3cbus_np = dev->of_node;
+ struct device_node *node;
+ int ret;
+ u32 val;
+
+ if (!i3cbus_np)
+ return 0;
+
+ for_each_available_child_of_node(i3cbus_np, node) {
+ ret = of_i3c_master_add_dev(master, node);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The user might want to limit I2C and I3C speed in case some devices
+ * on the bus are not supporting typical rates, or if the bus topology
+ * prevents it from using max possible rate.
+ */
+ if (!of_property_read_u32(i3cbus_np, "i2c-scl-hz", &val))
+ master->bus.scl_rate.i2c = val;
+
+ if (!of_property_read_u32(i3cbus_np, "i3c-scl-hz", &val))
+ master->bus.scl_rate.i3c = val;
+
+ return 0;
+}
+
+static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+ struct i2c_dev_desc *dev;
+ int i, ret;
+ u16 addr;
+
+ if (!xfers || !master || nxfers <= 0)
+ return -EINVAL;
+
+ if (!master->ops->i2c_xfers)
+ return -ENOTSUPP;
+
+ /* Doing transfers to different devices is not supported. */
+ addr = xfers[0].addr;
+ for (i = 1; i < nxfers; i++) {
+ if (addr != xfers[i].addr)
+ return -ENOTSUPP;
+ }
+
+ i3c_bus_normaluse_lock(&master->bus);
+ dev = i3c_master_find_i2c_dev_by_addr(master, addr);
+ if (!dev)
+ ret = -ENOENT;
+ else
+ ret = master->ops->i2c_xfers(dev, xfers, nxfers);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return ret ? ret : nxfers;
+}
+
+static u32 i3c_master_i2c_functionalities(struct i2c_adapter *adap)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+
+ return master->ops->i2c_funcs(master);
+}
+
+static const struct i2c_algorithm i3c_master_i2c_algo = {
+ .master_xfer = i3c_master_i2c_adapter_xfer,
+ .functionality = i3c_master_i2c_functionalities,
+};
+
+static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+{
+ struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ adap->dev.parent = master->dev.parent;
+ adap->owner = master->dev.parent->driver->owner;
+ adap->algo = &i3c_master_i2c_algo;
+ strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+
+ /* FIXME: Should we allow i3c masters to override these values? */
+ adap->timeout = 1000;
+ adap->retries = 3;
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ /*
+ * We silently ignore failures here. The bus should keep working
+ * correctly even if one or more i2c devices are not registered.
+ */
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = i2c_new_device(adap, &i2cdev->boardinfo->base);
+
+ return 0;
+}
+
+static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
+{
+ struct i2c_dev_desc *i2cdev;
+
+ i2c_del_adapter(&master->i2c);
+
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = NULL;
+}
+
+static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (!i3cdev->dev)
+ continue;
+
+ i3cdev->dev->desc = NULL;
+ if (device_is_registered(&i3cdev->dev->dev))
+ device_unregister(&i3cdev->dev->dev);
+ else
+ put_device(&i3cdev->dev->dev);
+ i3cdev->dev = NULL;
+ }
+}
+
+/**
+ * i3c_master_queue_ibi() - Queue an IBI
+ * @dev: the device this IBI is coming from
+ * @slot: the IBI slot used to store the payload
+ *
+ * Queue an IBI to the controller workqueue. The IBI handler attached to
+ * the dev will be called from a workqueue context.
+ */
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+{
+ atomic_inc(&dev->ibi->pending_ibis);
+ queue_work(dev->common.master->wq, &slot->work);
+}
+EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
+
+static void i3c_master_handle_ibi(struct work_struct *work)
+{
+ struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
+ work);
+ struct i3c_dev_desc *dev = slot->dev;
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_ibi_payload payload;
+
+ payload.data = slot->data;
+ payload.len = slot->len;
+
+ if (dev->dev)
+ dev->ibi->handler(dev->dev, &payload);
+
+ master->ops->recycle_ibi_slot(dev, slot);
+ if (atomic_dec_and_test(&dev->ibi->pending_ibis))
+ complete(&dev->ibi->all_ibis_handled);
+}
+
+static void i3c_master_init_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ slot->dev = dev;
+ INIT_WORK(&slot->work, i3c_master_handle_ibi);
+}
+
+struct i3c_generic_ibi_slot {
+ struct list_head node;
+ struct i3c_ibi_slot base;
+};
+
+struct i3c_generic_ibi_pool {
+ spinlock_t lock;
+ unsigned int num_slots;
+ struct i3c_generic_ibi_slot *slots;
+ void *payload_buf;
+ struct list_head free_slots;
+ struct list_head pending;
+};
+
+/**
+ * i3c_generic_ibi_free_pool() - Free a generic IBI pool
+ * @pool: the IBI pool to free
+ *
+ * Free all IBI slots allated by a generic IBI pool.
+ */
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int nslots = 0;
+
+ while (!list_empty(&pool->free_slots)) {
+ slot = list_first_entry(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ list_del(&slot->node);
+ nslots++;
+ }
+
+ /*
+ * If the number of freed slots is not equal to the number of allocated
+ * slots we have a leak somewhere.
+ */
+ WARN_ON(nslots != pool->num_slots);
+
+ kfree(pool->payload_buf);
+ kfree(pool->slots);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_free_pool);
+
+/**
+ * i3c_generic_ibi_alloc_pool() - Create a generic IBI pool
+ * @dev: the device this pool will be used for
+ * @req: IBI setup request describing what the device driver expects
+ *
+ * Create a generic IBI pool based on the information provided in @req.
+ *
+ * Return: a valid IBI pool in case of success, an ERR_PTR() otherwise.
+ */
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_generic_ibi_pool *pool;
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int i;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free_slots);
+ INIT_LIST_HEAD(&pool->pending);
+
+ pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
+ if (!pool->slots) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+
+ if (req->max_payload_len) {
+ pool->payload_buf = kcalloc(req->num_slots,
+ req->max_payload_len, GFP_KERNEL);
+ if (!pool->payload_buf) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+ }
+
+ for (i = 0; i < req->num_slots; i++) {
+ slot = &pool->slots[i];
+ i3c_master_init_ibi_slot(dev, &slot->base);
+
+ if (req->max_payload_len)
+ slot->base.data = pool->payload_buf +
+ (i * req->max_payload_len);
+
+ list_add_tail(&slot->node, &pool->free_slots);
+ pool->num_slots++;
+ }
+
+ return pool;
+
+err_free_pool:
+ i3c_generic_ibi_free_pool(pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_alloc_pool);
+
+/**
+ * i3c_generic_ibi_get_free_slot() - Get a free slot from a generic IBI pool
+ * @pool: the pool to query an IBI slot on
+ *
+ * Search for a free slot in a generic IBI pool.
+ * The slot should be returned to the pool using i3c_generic_ibi_recycle_slot()
+ * when it's no longer needed.
+ *
+ * Return: a pointer to a free slot, or NULL if there's no free slot available.
+ */
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ slot = list_first_entry_or_null(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ if (slot)
+ list_del(&slot->node);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return slot ? &slot->base : NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_get_free_slot);
+
+/**
+ * i3c_generic_ibi_recycle_slot() - Return a slot to a generic IBI pool
+ * @pool: the pool to return the IBI slot to
+ * @s: IBI slot to recycle
+ *
+ * Add an IBI slot back to its generic IBI pool. Should be called from the
+ * master driver struct_master_controller_ops->recycle_ibi() method.
+ */
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *s)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ if (!s)
+ return;
+
+ slot = container_of(s, struct i3c_generic_ibi_slot, base);
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_tail(&slot->node, &pool->free_slots);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
+
+static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
+{
+ if (!ops || !ops->bus_init || !ops->priv_xfers ||
+ !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers ||
+ !ops->i2c_funcs)
+ return -EINVAL;
+
+ if (ops->request_ibi &&
+ (!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
+ !ops->recycle_ibi_slot))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * i3c_master_register() - register an I3C master
+ * @master: master used to send frames on the bus
+ * @parent: the parent device (the one that provides this I3C master
+ * controller)
+ * @ops: the master controller operations
+ * @secondary: true if you are registering a secondary master. Will return
+ * -ENOTSUPP if set to true since secondary masters are not yet
+ * supported
+ *
+ * This function takes care of everything for you:
+ *
+ * - creates and initializes the I3C bus
+ * - populates the bus with static I2C devs if @parent->of_node is not
+ * NULL
+ * - registers all I3C devices added by the controller during bus
+ * initialization
+ * - registers the I2C adapter and all I2C devices
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary)
+{
+ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+ enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
+ struct i2c_dev_boardinfo *i2cbi;
+ int ret;
+
+ /* We do not support secondary masters yet. */
+ if (secondary)
+ return -ENOTSUPP;
+
+ ret = i3c_master_check_ops(ops);
+ if (ret)
+ return ret;
+
+ master->dev.parent = parent;
+ master->dev.of_node = of_node_get(parent->of_node);
+ master->dev.bus = &i3c_bus_type;
+ master->dev.type = &i3c_masterdev_type;
+ master->dev.release = i3c_masterdev_release;
+ master->ops = ops;
+ master->secondary = secondary;
+ INIT_LIST_HEAD(&master->boardinfo.i2c);
+ INIT_LIST_HEAD(&master->boardinfo.i3c);
+
+ ret = i3c_bus_init(i3cbus);
+ if (ret)
+ return ret;
+
+ device_initialize(&master->dev);
+ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
+ ret = of_populate_i3c_bus(master);
+ if (ret)
+ goto err_put_dev;
+
+ list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
+ switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) {
+ case I3C_LVR_I2C_INDEX(0):
+ if (mode < I3C_BUS_MODE_MIXED_FAST)
+ mode = I3C_BUS_MODE_MIXED_FAST;
+ break;
+ case I3C_LVR_I2C_INDEX(1):
+ case I3C_LVR_I2C_INDEX(2):
+ if (mode < I3C_BUS_MODE_MIXED_SLOW)
+ mode = I3C_BUS_MODE_MIXED_SLOW;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_put_dev;
+ }
+ }
+
+ ret = i3c_bus_set_mode(i3cbus, mode);
+ if (ret)
+ goto err_put_dev;
+
+ master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+ if (!master->wq) {
+ ret = -ENOMEM;
+ goto err_put_dev;
+ }
+
+ ret = i3c_master_bus_init(master);
+ if (ret)
+ goto err_put_dev;
+
+ ret = device_add(&master->dev);
+ if (ret)
+ goto err_cleanup_bus;
+
+ /*
+ * Expose our I3C bus as an I2C adapter so that I2C devices are exposed
+ * through the I2C subsystem.
+ */
+ ret = i3c_master_i2c_adapter_init(master);
+ if (ret)
+ goto err_del_dev;
+
+ /*
+ * We're done initializing the bus and the controller, we can now
+ * register I3C devices dicovered during the initial DAA.
+ */
+ master->init_done = true;
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+
+err_del_dev:
+ device_del(&master->dev);
+
+err_cleanup_bus:
+ i3c_master_bus_cleanup(master);
+
+err_put_dev:
+ put_device(&master->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_register);
+
+/**
+ * i3c_master_unregister() - unregister an I3C master
+ * @master: master used to send frames on the bus
+ *
+ * Basically undo everything done in i3c_master_register().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_unregister(struct i3c_master_controller *master)
+{
+ i3c_master_i2c_adapter_cleanup(master);
+ i3c_master_unregister_i3c_devs(master);
+ i3c_master_bus_cleanup(master);
+ device_unregister(&master->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_unregister);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *master;
+
+ if (!dev)
+ return -ENOENT;
+
+ master = i3c_dev_get_master(dev);
+ if (!master || !xfers)
+ return -EINVAL;
+
+ if (!master->ops->priv_xfers)
+ return -ENOTSUPP;
+
+ return master->ops->priv_xfers(dev, xfers, nxfers);
+}
+
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master;
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ master = i3c_dev_get_master(dev);
+ ret = master->ops->disable_ibi(dev);
+ if (ret)
+ return ret;
+
+ reinit_completion(&dev->ibi->all_ibis_handled);
+ if (atomic_read(&dev->ibi->pending_ibis))
+ wait_for_completion(&dev->ibi->all_ibis_handled);
+
+ dev->ibi->enabled = false;
+
+ return 0;
+}
+
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ ret = master->ops->enable_ibi(dev);
+ if (!ret)
+ dev->ibi->enabled = true;
+
+ return ret;
+}
+
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_device_ibi_info *ibi;
+ int ret;
+
+ if (!master->ops->request_ibi)
+ return -ENOTSUPP;
+
+ if (dev->ibi)
+ return -EBUSY;
+
+ ibi = kzalloc(sizeof(*ibi), GFP_KERNEL);
+ if (!ibi)
+ return -ENOMEM;
+
+ atomic_set(&ibi->pending_ibis, 0);
+ init_completion(&ibi->all_ibis_handled);
+ ibi->handler = req->handler;
+ ibi->max_payload_len = req->max_payload_len;
+ ibi->num_slots = req->num_slots;
+
+ dev->ibi = ibi;
+ ret = master->ops->request_ibi(dev, req);
+ if (ret) {
+ kfree(ibi);
+ dev->ibi = NULL;
+ }
+
+ return ret;
+}
+
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (!dev->ibi)
+ return;
+
+ if (WARN_ON(dev->ibi->enabled))
+ WARN_ON(i3c_dev_disable_ibi_locked(dev));
+
+ master->ops->free_ibi(dev);
+ kfree(dev->ibi);
+ dev->ibi = NULL;
+}
+
+static int __init i3c_init(void)
+{
+ return bus_register(&i3c_bus_type);
+}
+subsys_initcall(i3c_init);
+
+static void __exit i3c_exit(void)
+{
+ idr_destroy(&i3c_bus_idr);
+ bus_unregister(&i3c_bus_type);
+}
+module_exit(i3c_exit);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("I3C core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
new file mode 100644
index 000000000000..26c6b585894e
--- /dev/null
+++ b/drivers/i3c/master/Kconfig
@@ -0,0 +1,22 @@
+config CDNS_I3C_MASTER
+ tristate "Cadence I3C master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ help
+ Enable this driver if you want to support Cadence I3C master block.
+
+config DW_I3C_MASTER
+ tristate "Synospsys DesignWare I3C master driver"
+ depends on I3C
+ depends on HAS_IOMEM
+ depends on !(ALPHA || PARISC)
+ # ALPHA and PARISC needs {read,write}sl()
+ help
+ Support for Synopsys DesignWare MIPI I3C Controller.
+
+ For details please see
+ https://www.synopsys.com/dw/ipdir.php?ds=mipi_i3c
+
+ This driver can also be built as a module. If so, the module
+ will be called dw-i3c-master.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
new file mode 100644
index 000000000000..fc53939a0bb1
--- /dev/null
+++ b/drivers/i3c/master/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
+obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
new file mode 100644
index 000000000000..b532e2c9cf5c
--- /dev/null
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -0,0 +1,1216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ *
+ * Author: Vitor Soares <vitor.soares@synopsys.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define DEVICE_CTRL 0x0
+#define DEV_CTRL_ENABLE BIT(31)
+#define DEV_CTRL_RESUME BIT(30)
+#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
+#define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
+
+#define DEVICE_ADDR 0x4
+#define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
+#define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
+
+#define HW_CAPABILITY 0x8
+#define COMMAND_QUEUE_PORT 0xc
+#define COMMAND_PORT_TOC BIT(30)
+#define COMMAND_PORT_READ_TRANSFER BIT(28)
+#define COMMAND_PORT_SDAP BIT(27)
+#define COMMAND_PORT_ROC BIT(26)
+#define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
+#define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
+#define COMMAND_PORT_CP BIT(15)
+#define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
+#define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
+
+#define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
+#define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
+#define COMMAND_PORT_TRANSFER_ARG 0x01
+
+#define COMMAND_PORT_SDA_DATA_BYTE_3(x) (((x) << 24) & GENMASK(31, 24))
+#define COMMAND_PORT_SDA_DATA_BYTE_2(x) (((x) << 16) & GENMASK(23, 16))
+#define COMMAND_PORT_SDA_DATA_BYTE_1(x) (((x) << 8) & GENMASK(15, 8))
+#define COMMAND_PORT_SDA_BYTE_STRB_3 BIT(5)
+#define COMMAND_PORT_SDA_BYTE_STRB_2 BIT(4)
+#define COMMAND_PORT_SDA_BYTE_STRB_1 BIT(3)
+#define COMMAND_PORT_SHORT_DATA_ARG 0x02
+
+#define COMMAND_PORT_DEV_COUNT(x) (((x) << 21) & GENMASK(25, 21))
+#define COMMAND_PORT_ADDR_ASSGN_CMD 0x03
+
+#define RESPONSE_QUEUE_PORT 0x10
+#define RESPONSE_PORT_ERR_STATUS(x) (((x) & GENMASK(31, 28)) >> 28)
+#define RESPONSE_NO_ERROR 0
+#define RESPONSE_ERROR_CRC 1
+#define RESPONSE_ERROR_PARITY 2
+#define RESPONSE_ERROR_FRAME 3
+#define RESPONSE_ERROR_IBA_NACK 4
+#define RESPONSE_ERROR_ADDRESS_NACK 5
+#define RESPONSE_ERROR_OVER_UNDER_FLOW 6
+#define RESPONSE_ERROR_TRANSF_ABORT 8
+#define RESPONSE_ERROR_I2C_W_NACK_ERR 9
+#define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
+#define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
+
+#define RX_TX_DATA_PORT 0x14
+#define IBI_QUEUE_STATUS 0x18
+#define QUEUE_THLD_CTRL 0x1c
+#define QUEUE_THLD_CTRL_RESP_BUF_MASK GENMASK(15, 8)
+#define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
+
+#define DATA_BUFFER_THLD_CTRL 0x20
+#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
+
+#define IBI_QUEUE_CTRL 0x24
+#define IBI_MR_REQ_REJECT 0x2C
+#define IBI_SIR_REQ_REJECT 0x30
+#define IBI_REQ_REJECT_ALL GENMASK(31, 0)
+
+#define RESET_CTRL 0x34
+#define RESET_CTRL_IBI_QUEUE BIT(5)
+#define RESET_CTRL_RX_FIFO BIT(4)
+#define RESET_CTRL_TX_FIFO BIT(3)
+#define RESET_CTRL_RESP_QUEUE BIT(2)
+#define RESET_CTRL_CMD_QUEUE BIT(1)
+#define RESET_CTRL_SOFT BIT(0)
+
+#define SLV_EVENT_CTRL 0x38
+#define INTR_STATUS 0x3c
+#define INTR_STATUS_EN 0x40
+#define INTR_SIGNAL_EN 0x44
+#define INTR_FORCE 0x48
+#define INTR_BUSOWNER_UPDATE_STAT BIT(13)
+#define INTR_IBI_UPDATED_STAT BIT(12)
+#define INTR_READ_REQ_RECV_STAT BIT(11)
+#define INTR_DEFSLV_STAT BIT(10)
+#define INTR_TRANSFER_ERR_STAT BIT(9)
+#define INTR_DYN_ADDR_ASSGN_STAT BIT(8)
+#define INTR_CCC_UPDATED_STAT BIT(6)
+#define INTR_TRANSFER_ABORT_STAT BIT(5)
+#define INTR_RESP_READY_STAT BIT(4)
+#define INTR_CMD_QUEUE_READY_STAT BIT(3)
+#define INTR_IBI_THLD_STAT BIT(2)
+#define INTR_RX_THLD_STAT BIT(1)
+#define INTR_TX_THLD_STAT BIT(0)
+#define INTR_ALL (INTR_BUSOWNER_UPDATE_STAT | \
+ INTR_IBI_UPDATED_STAT | \
+ INTR_READ_REQ_RECV_STAT | \
+ INTR_DEFSLV_STAT | \
+ INTR_TRANSFER_ERR_STAT | \
+ INTR_DYN_ADDR_ASSGN_STAT | \
+ INTR_CCC_UPDATED_STAT | \
+ INTR_TRANSFER_ABORT_STAT | \
+ INTR_RESP_READY_STAT | \
+ INTR_CMD_QUEUE_READY_STAT | \
+ INTR_IBI_THLD_STAT | \
+ INTR_TX_THLD_STAT | \
+ INTR_RX_THLD_STAT)
+
+#define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
+ INTR_RESP_READY_STAT)
+
+#define QUEUE_STATUS_LEVEL 0x4c
+#define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
+#define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QUEUE_STATUS_LEVEL_RESP(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QUEUE_STATUS_LEVEL_CMD(x) ((x) & GENMASK(7, 0))
+
+#define DATA_BUFFER_STATUS_LEVEL 0x50
+#define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
+
+#define PRESENT_STATE 0x54
+#define CCC_DEVICE_STATUS 0x58
+#define DEVICE_ADDR_TABLE_POINTER 0x5c
+#define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEVICE_ADDR_TABLE_ADDR(x) ((x) & GENMASK(7, 0))
+
+#define DEV_CHAR_TABLE_POINTER 0x60
+#define VENDOR_SPECIFIC_REG_POINTER 0x6c
+#define SLV_PID_VALUE 0x74
+#define SLV_CHAR_CTRL 0x78
+#define SLV_MAX_LEN 0x7c
+#define MAX_READ_TURNAROUND 0x80
+#define MAX_DATA_SPEED 0x84
+#define SLV_DEBUG_STATUS 0x88
+#define SLV_INTR_REQ 0x8c
+#define DEVICE_CTRL_EXTENDED 0xb0
+#define SCL_I3C_OD_TIMING 0xb4
+#define SCL_I3C_PP_TIMING 0xb8
+#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
+#define SCL_I3C_TIMING_CNT_MIN 5
+
+#define SCL_I2C_FM_TIMING 0xbc
+#define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
+#define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
+
+#define SCL_I2C_FMP_TIMING 0xc0
+#define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
+
+#define SCL_EXT_LCNT_TIMING 0xc8
+#define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
+#define SCL_EXT_LCNT_3(x) (((x) << 16) & GENMASK(23, 16))
+#define SCL_EXT_LCNT_2(x) (((x) << 8) & GENMASK(15, 8))
+#define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
+
+#define SCL_EXT_TERMN_LCNT_TIMING 0xcc
+#define BUS_FREE_TIMING 0xd4
+#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
+
+#define BUS_IDLE_TIMING 0xd8
+#define I3C_VER_ID 0xe0
+#define I3C_VER_TYPE 0xe4
+#define EXTENDED_CAPABILITY 0xe8
+#define SLAVE_CONFIG 0xec
+
+#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
+#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
+#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
+
+#define MAX_DEVS 32
+
+#define I3C_BUS_SDR1_SCL_RATE 8000000
+#define I3C_BUS_SDR2_SCL_RATE 6000000
+#define I3C_BUS_SDR3_SCL_RATE 4000000
+#define I3C_BUS_SDR4_SCL_RATE 2000000
+#define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
+#define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
+#define I3C_BUS_THIGH_MAX_NS 41
+
+#define XFER_TIMEOUT (msecs_to_jiffies(1000))
+
+struct dw_i3c_master_caps {
+ u8 cmdfifodepth;
+ u8 datafifodepth;
+};
+
+struct dw_i3c_cmd {
+ u32 cmd_lo;
+ u32 cmd_hi;
+ u16 tx_len;
+ const void *tx_buf;
+ u16 rx_len;
+ void *rx_buf;
+ u8 error;
+};
+
+struct dw_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ struct dw_i3c_cmd cmds[0];
+};
+
+struct dw_i3c_master {
+ struct i3c_master_controller base;
+ u16 maxdevs;
+ u16 datstartaddr;
+ u32 free_pos;
+ struct {
+ struct list_head list;
+ struct dw_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ struct dw_i3c_master_caps caps;
+ void __iomem *regs;
+ struct reset_control *core_rst;
+ struct clk *core_clk;
+ char version[5];
+ char type[5];
+ u8 addrs[MAX_DEVS];
+};
+
+struct dw_i3c_i2c_dev_data {
+ u8 index;
+};
+
+static u8 even_parity(u8 p)
+{
+ p ^= p >> 4;
+ p &= 0xf;
+
+ return (0x9669 >> p) & 1;
+}
+
+static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_ENTHDR(0):
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline struct dw_i3c_master *
+to_dw_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct dw_i3c_master, base);
+}
+
+static void dw_i3c_master_disable(struct dw_i3c_master *master)
+{
+ writel(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
+}
+
+static void dw_i3c_master_enable(struct dw_i3c_master *master)
+{
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
+}
+
+static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
+{
+ int pos;
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (addr == master->addrs[pos])
+ return pos;
+ }
+
+ return -EINVAL;
+}
+
+static int dw_i3c_master_get_free_pos(struct dw_i3c_master *master)
+{
+ if (!(master->free_pos & GENMASK(master->maxdevs - 1, 0)))
+ return -ENOSPC;
+
+ return ffs(master->free_pos) - 1;
+}
+
+static void dw_i3c_master_wr_tx_fifo(struct dw_i3c_master *master,
+ const u8 *bytes, int nbytes)
+{
+ writesl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
+ writesl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ }
+}
+
+static void dw_i3c_master_read_rx_fifo(struct dw_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ readsl(master->regs + RX_TX_DATA_PORT, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ readsl(master->regs + RX_TX_DATA_PORT, &tmp, 1);
+ memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
+static struct dw_i3c_xfer *
+dw_i3c_master_alloc_xfer(struct dw_i3c_master *master, unsigned int ncmds)
+{
+ struct dw_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void dw_i3c_master_free_xfer(struct dw_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void dw_i3c_master_start_xfer_locked(struct dw_i3c_master *master)
+{
+ struct dw_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i;
+ u32 thld_ctrl;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ thld_ctrl |= QUEUE_THLD_CTRL_RESP_BUF(xfer->ncmds);
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ writel(cmd->cmd_hi, master->regs + COMMAND_QUEUE_PORT);
+ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
+ }
+}
+
+static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ dw_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master,
+ struct dw_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur == xfer) {
+ u32 status;
+
+ master->xferqueue.cur = NULL;
+
+ writel(RESET_CTRL_RX_FIFO | RESET_CTRL_TX_FIFO |
+ RESET_CTRL_RESP_QUEUE | RESET_CTRL_CMD_QUEUE,
+ master->regs + RESET_CTRL);
+
+ readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
+ !status, 10, 1000000);
+ } else {
+ list_del_init(&xfer->node);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
+{
+ struct dw_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+ u32 nresp;
+
+ if (!xfer)
+ return;
+
+ nresp = readl(master->regs + QUEUE_STATUS_LEVEL);
+ nresp = QUEUE_STATUS_LEVEL_RESP(nresp);
+
+ for (i = 0; i < nresp; i++) {
+ struct dw_i3c_cmd *cmd;
+ u32 resp;
+
+ resp = readl(master->regs + RESPONSE_QUEUE_PORT);
+
+ cmd = &xfer->cmds[RESPONSE_PORT_TID(resp)];
+ cmd->rx_len = RESPONSE_PORT_DATA_LEN(resp);
+ cmd->error = RESPONSE_PORT_ERR_STATUS(resp);
+ if (cmd->rx_len && !cmd->error)
+ dw_i3c_master_read_rx_fifo(master, cmd->rx_buf,
+ cmd->rx_len);
+ }
+
+ for (i = 0; i < nresp; i++) {
+ switch (xfer->cmds[i].error) {
+ case RESPONSE_NO_ERROR:
+ break;
+ case RESPONSE_ERROR_PARITY:
+ case RESPONSE_ERROR_IBA_NACK:
+ case RESPONSE_ERROR_TRANSF_ABORT:
+ case RESPONSE_ERROR_CRC:
+ case RESPONSE_ERROR_FRAME:
+ ret = -EIO;
+ break;
+ case RESPONSE_ERROR_OVER_UNDER_FLOW:
+ ret = -ENOSPC;
+ break;
+ case RESPONSE_ERROR_I2C_W_NACK_ERR:
+ case RESPONSE_ERROR_ADDRESS_NACK:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ if (ret < 0) {
+ dw_i3c_master_dequeue_xfer(master, xfer);
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
+ master->regs + DEVICE_CTRL);
+ }
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct dw_i3c_xfer,
+ node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ dw_i3c_master_start_xfer_locked(master);
+}
+
+static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u32 scl_timing;
+ u8 hcnt, lcnt;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+
+ hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
+ if (hcnt < SCL_I3C_TIMING_CNT_MIN)
+ hcnt = SCL_I3C_TIMING_CNT_MIN;
+
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_TYP_I3C_SCL_RATE) - hcnt;
+ if (lcnt < SCL_I3C_TIMING_CNT_MIN)
+ lcnt = SCL_I3C_TIMING_CNT_MIN;
+
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
+
+ if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_I2C_SLAVE_PRESENT))
+ writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period);
+ scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
+ scl_timing = SCL_EXT_LCNT_1(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_2(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR3_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_3(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR4_SCL_RATE) - hcnt;
+ scl_timing |= SCL_EXT_LCNT_4(lcnt);
+ writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
+
+ return 0;
+}
+
+static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
+{
+ unsigned long core_rate, core_period;
+ u16 hcnt, lcnt;
+ u32 scl_timing;
+
+ core_rate = clk_get_rate(master->core_clk);
+ if (!core_rate)
+ return -EINVAL;
+
+ core_period = DIV_ROUND_UP(1000000000, core_rate);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+ scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
+ SCL_I2C_FMP_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
+
+ lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
+ hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+ scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
+ SCL_I2C_FM_TIMING_LCNT(lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
+
+ writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
+ master->regs + DEVICE_CTRL);
+
+ return 0;
+}
+
+static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = { };
+ u32 thld_ctrl;
+ int ret;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_MIXED_FAST:
+ ret = dw_i2c_clk_cfg(master);
+ if (ret)
+ return ret;
+ case I3C_BUS_MODE_PURE:
+ ret = dw_i3c_clk_cfg(master);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
+ writel(thld_ctrl, master->regs + QUEUE_THLD_CTRL);
+
+ thld_ctrl = readl(master->regs + DATA_BUFFER_THLD_CTRL);
+ thld_ctrl &= ~DATA_BUFFER_THLD_CTRL_RX_BUF;
+ writel(thld_ctrl, master->regs + DATA_BUFFER_THLD_CTRL);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ writel(INTR_MASTER_MASK, master->regs + INTR_STATUS_EN);
+ writel(INTR_MASTER_MASK, master->regs + INTR_SIGNAL_EN);
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
+ master->regs + DEVICE_ADDR);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_SIR_REQ_REJECT);
+ writel(IBI_REQ_REJECT_ALL, master->regs + IBI_MR_REQ_REJECT);
+
+ /* For now don't support Hot-Join */
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
+ master->regs + DEVICE_CTRL);
+
+ dw_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ dw_i3c_master_disable(master);
+}
+
+static int dw_i3c_ccc_set(struct dw_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+ pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ cmd = xfer->cmds;
+ cmd->tx_buf = ccc->dests[0].payload.data;
+ cmd->tx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+ ccc->err = I3C_ERROR_M2;
+
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ int ret, pos;
+
+ pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ cmd = xfer->cmds;
+ cmd->rx_buf = ccc->dests[0].payload.data;
+ cmd->rx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+ ccc->err = I3C_ERROR_M2;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int ret = 0;
+
+ if (ccc->id == I3C_CCC_ENTDAA)
+ return -EINVAL;
+
+ if (ccc->rnw)
+ ret = dw_i3c_ccc_get(master, ccc);
+ else
+ ret = dw_i3c_ccc_set(master, ccc);
+
+ return ret;
+}
+
+static int dw_i3c_master_daa(struct i3c_master_controller *m)
+{
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+ u32 olddevs, newdevs;
+ u8 p, last_addr = 0;
+ int ret, pos;
+
+ olddevs = ~(master->free_pos);
+
+ /* Prepare DAT before launching DAA. */
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (olddevs & BIT(pos))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ master->addrs[pos] = ret;
+ p = even_parity(ret);
+ last_addr = ret;
+ ret |= (p << 7);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ cmd = &xfer->cmds[0];
+ cmd->cmd_hi = 0x1;
+ cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(I3C_CCC_ENTDAA) |
+ COMMAND_PORT_ADDR_ASSGN_CMD |
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
+ newdevs &= ~olddevs;
+
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, master->addrs[pos]);
+ }
+
+ dw_i3c_master_free_xfer(xfer);
+
+ i3c_master_disec_locked(m, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_HJ |
+ I3C_CCC_EVENT_MR |
+ I3C_CCC_EVENT_SIR);
+
+ return 0;
+}
+
+static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int i3c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (!i3c_nxfers)
+ return 0;
+
+ if (i3c_nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ if (i3c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ if (i3c_xfers[i].rnw)
+ nrxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(i3c_xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.datafifodepth ||
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i3c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ if (i3c_xfers[i].rnw) {
+ cmd->rx_buf = i3c_xfers[i].data.in;
+ cmd->rx_len = i3c_xfers[i].len;
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_SPEED(dev->info.max_read_ds);
+
+ } else {
+ cmd->tx_buf = i3c_xfers[i].data.out;
+ cmd->tx_len = i3c_xfers[i].len;
+ cmd->cmd_lo =
+ COMMAND_PORT_SPEED(dev->info.max_write_ds);
+ }
+
+ cmd->cmd_lo |= COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i == (i3c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ if (!old_dyn_addr)
+ return 0;
+
+ master->addrs[data->index] = dev->info.dyn_addr;
+
+ return 0;
+}
+
+static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ master->addrs[pos] = dev->info.dyn_addr;
+ master->free_pos &= ~BIT(pos);
+ i3c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ return 0;
+}
+
+static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+{
+ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (!i2c_nxfers)
+ return 0;
+
+ if (i2c_nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ if (i2c_xfers[i].len > COMMAND_PORT_ARG_DATA_LEN_MAX)
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ if (i2c_xfers[i].flags & I2C_M_RD)
+ nrxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(i2c_xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.datafifodepth ||
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
+ xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < i2c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(i2c_xfers[i].len) |
+ COMMAND_PORT_TRANSFER_ARG;
+
+ cmd->cmd_lo = COMMAND_PORT_TID(i) |
+ COMMAND_PORT_DEV_INDEX(data->index) |
+ COMMAND_PORT_ROC;
+
+ if (i2c_xfers[i].flags & I2C_M_RD) {
+ cmd->cmd_lo |= COMMAND_PORT_READ_TRANSFER;
+ cmd->rx_buf = i2c_xfers[i].buf;
+ cmd->rx_len = i2c_xfers[i].len;
+ } else {
+ cmd->tx_buf = i2c_xfers[i].buf;
+ cmd->tx_len = i2c_xfers[i].len;
+ }
+
+ if (i == (i2c_nxfers - 1))
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
+ dw_i3c_master_enqueue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
+ dw_i3c_master_dequeue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_i2c_dev_data *data;
+ int pos;
+
+ pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0)
+ return pos;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->index = pos;
+ master->addrs[pos] = dev->boardinfo->base.addr;
+ master->free_pos &= ~BIT(pos);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
+ DEV_ADDR_TABLE_STATIC_ADDR(dev->boardinfo->base.addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ return 0;
+}
+
+static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+
+ writel(0,
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->addrs[data->index] = 0;
+ master->free_pos |= BIT(data->index);
+ kfree(data);
+}
+
+static u32 dw_i3c_master_i2c_funcs(struct i3c_master_controller *m)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
+{
+ struct dw_i3c_master *master = dev_id;
+ u32 status;
+
+ status = readl(master->regs + INTR_STATUS);
+
+ if (!(status & readl(master->regs + INTR_STATUS_EN))) {
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ return IRQ_NONE;
+ }
+
+ spin_lock(&master->xferqueue.lock);
+ dw_i3c_master_end_xfer_locked(master, status);
+ if (status & INTR_TRANSFER_ERR_STAT)
+ writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
+ spin_unlock(&master->xferqueue.lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
+ .bus_init = dw_i3c_master_bus_init,
+ .bus_cleanup = dw_i3c_master_bus_cleanup,
+ .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
+ .do_daa = dw_i3c_master_daa,
+ .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
+ .priv_xfers = dw_i3c_master_priv_xfers,
+ .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
+ .i2c_xfers = dw_i3c_master_i2c_xfers,
+ .i2c_funcs = dw_i3c_master_i2c_funcs,
+};
+
+static int dw_i3c_probe(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master;
+ struct resource *res;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ master->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->core_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(master->core_clk))
+ return PTR_ERR(master->core_clk);
+
+ master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "core_rst");
+ if (IS_ERR(master->core_rst))
+ return PTR_ERR(master->core_rst);
+
+ ret = clk_prepare_enable(master->core_clk);
+ if (ret)
+ goto err_disable_core_clk;
+
+ reset_control_deassert(master->core_rst);
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ writel(INTR_ALL, master->regs + INTR_STATUS);
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq,
+ dw_i3c_master_irq_handler, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_assert_rst;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Information regarding the FIFOs/QUEUEs depth */
+ ret = readl(master->regs + QUEUE_STATUS_LEVEL);
+ master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
+
+ ret = readl(master->regs + DATA_BUFFER_STATUS_LEVEL);
+ master->caps.datafifodepth = DATA_BUFFER_STATUS_LEVEL_TX(ret);
+
+ ret = readl(master->regs + DEVICE_ADDR_TABLE_POINTER);
+ master->datstartaddr = ret;
+ master->maxdevs = ret >> 16;
+ master->free_pos = GENMASK(master->maxdevs - 1, 0);
+
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &dw_mipi_i3c_ops, false);
+ if (ret)
+ goto err_assert_rst;
+
+ return 0;
+
+err_assert_rst:
+ reset_control_assert(master->core_rst);
+
+err_disable_core_clk:
+ clk_disable_unprepare(master->core_clk);
+
+ return ret;
+}
+
+static int dw_i3c_remove(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ reset_control_assert(master->core_rst);
+
+ clk_disable_unprepare(master->core_clk);
+
+ return 0;
+}
+
+static const struct of_device_id dw_i3c_master_of_match[] = {
+ { .compatible = "snps,dw-i3c-master-1.00a", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
+
+static struct platform_driver dw_i3c_driver = {
+ .probe = dw_i3c_probe,
+ .remove = dw_i3c_remove,
+ .driver = {
+ .name = "dw-i3c-master",
+ .of_match_table = of_match_ptr(dw_i3c_master_of_match),
+ },
+};
+module_platform_driver(dw_i3c_driver);
+
+MODULE_AUTHOR("Vitor Soares <vitor.soares@synopsys.com>");
+MODULE_DESCRIPTION("DesignWare MIPI I3C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
new file mode 100644
index 000000000000..bbd79b8b1a80
--- /dev/null
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -0,0 +1,1666 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define DEV_ID 0x0
+#define DEV_ID_I3C_MASTER 0x5034
+
+#define CONF_STATUS0 0x4
+#define CONF_STATUS0_CMDR_DEPTH(x) (4 << (((x) & GENMASK(31, 29)) >> 29))
+#define CONF_STATUS0_ECC_CHK BIT(28)
+#define CONF_STATUS0_INTEG_CHK BIT(27)
+#define CONF_STATUS0_CSR_DAP_CHK BIT(26)
+#define CONF_STATUS0_TRANS_TOUT_CHK BIT(25)
+#define CONF_STATUS0_PROT_FAULTS_CHK BIT(24)
+#define CONF_STATUS0_GPO_NUM(x) (((x) & GENMASK(23, 16)) >> 16)
+#define CONF_STATUS0_GPI_NUM(x) (((x) & GENMASK(15, 8)) >> 8)
+#define CONF_STATUS0_IBIR_DEPTH(x) (4 << (((x) & GENMASK(7, 6)) >> 7))
+#define CONF_STATUS0_SUPPORTS_DDR BIT(5)
+#define CONF_STATUS0_SEC_MASTER BIT(4)
+#define CONF_STATUS0_DEVS_NUM(x) ((x) & GENMASK(3, 0))
+
+#define CONF_STATUS1 0x8
+#define CONF_STATUS1_IBI_HW_RES(x) ((((x) & GENMASK(31, 28)) >> 28) + 1)
+#define CONF_STATUS1_CMD_DEPTH(x) (4 << (((x) & GENMASK(27, 26)) >> 26))
+#define CONF_STATUS1_SLVDDR_RX_DEPTH(x) (8 << (((x) & GENMASK(25, 21)) >> 21))
+#define CONF_STATUS1_SLVDDR_TX_DEPTH(x) (8 << (((x) & GENMASK(20, 16)) >> 16))
+#define CONF_STATUS1_IBI_DEPTH(x) (2 << (((x) & GENMASK(12, 10)) >> 10))
+#define CONF_STATUS1_RX_DEPTH(x) (8 << (((x) & GENMASK(9, 5)) >> 5))
+#define CONF_STATUS1_TX_DEPTH(x) (8 << ((x) & GENMASK(4, 0)))
+
+#define REV_ID 0xc
+#define REV_ID_VID(id) (((id) & GENMASK(31, 20)) >> 20)
+#define REV_ID_PID(id) (((id) & GENMASK(19, 8)) >> 8)
+#define REV_ID_REV_MAJOR(id) (((id) & GENMASK(7, 4)) >> 4)
+#define REV_ID_REV_MINOR(id) ((id) & GENMASK(3, 0))
+
+#define CTRL 0x10
+#define CTRL_DEV_EN BIT(31)
+#define CTRL_HALT_EN BIT(30)
+#define CTRL_MCS BIT(29)
+#define CTRL_MCS_EN BIT(28)
+#define CTRL_HJ_DISEC BIT(8)
+#define CTRL_MST_ACK BIT(7)
+#define CTRL_HJ_ACK BIT(6)
+#define CTRL_HJ_INIT BIT(5)
+#define CTRL_MST_INIT BIT(4)
+#define CTRL_AHDR_OPT BIT(3)
+#define CTRL_PURE_BUS_MODE 0
+#define CTRL_MIXED_FAST_BUS_MODE 2
+#define CTRL_MIXED_SLOW_BUS_MODE 3
+#define CTRL_BUS_MODE_MASK GENMASK(1, 0)
+
+#define PRESCL_CTRL0 0x14
+#define PRESCL_CTRL0_I2C(x) ((x) << 16)
+#define PRESCL_CTRL0_I3C(x) (x)
+#define PRESCL_CTRL0_MAX GENMASK(9, 0)
+
+#define PRESCL_CTRL1 0x18
+#define PRESCL_CTRL1_PP_LOW_MASK GENMASK(15, 8)
+#define PRESCL_CTRL1_PP_LOW(x) ((x) << 8)
+#define PRESCL_CTRL1_OD_LOW_MASK GENMASK(7, 0)
+#define PRESCL_CTRL1_OD_LOW(x) (x)
+
+#define MST_IER 0x20
+#define MST_IDR 0x24
+#define MST_IMR 0x28
+#define MST_ICR 0x2c
+#define MST_ISR 0x30
+#define MST_INT_HALTED BIT(18)
+#define MST_INT_MR_DONE BIT(17)
+#define MST_INT_IMM_COMP BIT(16)
+#define MST_INT_TX_THR BIT(15)
+#define MST_INT_TX_OVF BIT(14)
+#define MST_INT_IBID_THR BIT(12)
+#define MST_INT_IBID_UNF BIT(11)
+#define MST_INT_IBIR_THR BIT(10)
+#define MST_INT_IBIR_UNF BIT(9)
+#define MST_INT_IBIR_OVF BIT(8)
+#define MST_INT_RX_THR BIT(7)
+#define MST_INT_RX_UNF BIT(6)
+#define MST_INT_CMDD_EMP BIT(5)
+#define MST_INT_CMDD_THR BIT(4)
+#define MST_INT_CMDD_OVF BIT(3)
+#define MST_INT_CMDR_THR BIT(2)
+#define MST_INT_CMDR_UNF BIT(1)
+#define MST_INT_CMDR_OVF BIT(0)
+
+#define MST_STATUS0 0x34
+#define MST_STATUS0_IDLE BIT(18)
+#define MST_STATUS0_HALTED BIT(17)
+#define MST_STATUS0_MASTER_MODE BIT(16)
+#define MST_STATUS0_TX_FULL BIT(13)
+#define MST_STATUS0_IBID_FULL BIT(12)
+#define MST_STATUS0_IBIR_FULL BIT(11)
+#define MST_STATUS0_RX_FULL BIT(10)
+#define MST_STATUS0_CMDD_FULL BIT(9)
+#define MST_STATUS0_CMDR_FULL BIT(8)
+#define MST_STATUS0_TX_EMP BIT(5)
+#define MST_STATUS0_IBID_EMP BIT(4)
+#define MST_STATUS0_IBIR_EMP BIT(3)
+#define MST_STATUS0_RX_EMP BIT(2)
+#define MST_STATUS0_CMDD_EMP BIT(1)
+#define MST_STATUS0_CMDR_EMP BIT(0)
+
+#define CMDR 0x38
+#define CMDR_NO_ERROR 0
+#define CMDR_DDR_PREAMBLE_ERROR 1
+#define CMDR_DDR_PARITY_ERROR 2
+#define CMDR_DDR_RX_FIFO_OVF 3
+#define CMDR_DDR_TX_FIFO_UNF 4
+#define CMDR_M0_ERROR 5
+#define CMDR_M1_ERROR 6
+#define CMDR_M2_ERROR 7
+#define CMDR_MST_ABORT 8
+#define CMDR_NACK_RESP 9
+#define CMDR_INVALID_DA 10
+#define CMDR_DDR_DROPPED 11
+#define CMDR_ERROR(x) (((x) & GENMASK(27, 24)) >> 24)
+#define CMDR_XFER_BYTES(x) (((x) & GENMASK(19, 8)) >> 8)
+#define CMDR_CMDID_HJACK_DISEC 0xfe
+#define CMDR_CMDID_HJACK_ENTDAA 0xff
+#define CMDR_CMDID(x) ((x) & GENMASK(7, 0))
+
+#define IBIR 0x3c
+#define IBIR_ACKED BIT(12)
+#define IBIR_SLVID(x) (((x) & GENMASK(11, 8)) >> 8)
+#define IBIR_ERROR BIT(7)
+#define IBIR_XFER_BYTES(x) (((x) & GENMASK(6, 2)) >> 2)
+#define IBIR_TYPE_IBI 0
+#define IBIR_TYPE_HJ 1
+#define IBIR_TYPE_MR 2
+#define IBIR_TYPE(x) ((x) & GENMASK(1, 0))
+
+#define SLV_IER 0x40
+#define SLV_IDR 0x44
+#define SLV_IMR 0x48
+#define SLV_ICR 0x4c
+#define SLV_ISR 0x50
+#define SLV_INT_TM BIT(20)
+#define SLV_INT_ERROR BIT(19)
+#define SLV_INT_EVENT_UP BIT(18)
+#define SLV_INT_HJ_DONE BIT(17)
+#define SLV_INT_MR_DONE BIT(16)
+#define SLV_INT_DA_UPD BIT(15)
+#define SLV_INT_SDR_FAIL BIT(14)
+#define SLV_INT_DDR_FAIL BIT(13)
+#define SLV_INT_M_RD_ABORT BIT(12)
+#define SLV_INT_DDR_RX_THR BIT(11)
+#define SLV_INT_DDR_TX_THR BIT(10)
+#define SLV_INT_SDR_RX_THR BIT(9)
+#define SLV_INT_SDR_TX_THR BIT(8)
+#define SLV_INT_DDR_RX_UNF BIT(7)
+#define SLV_INT_DDR_TX_OVF BIT(6)
+#define SLV_INT_SDR_RX_UNF BIT(5)
+#define SLV_INT_SDR_TX_OVF BIT(4)
+#define SLV_INT_DDR_RD_COMP BIT(3)
+#define SLV_INT_DDR_WR_COMP BIT(2)
+#define SLV_INT_SDR_RD_COMP BIT(1)
+#define SLV_INT_SDR_WR_COMP BIT(0)
+
+#define SLV_STATUS0 0x54
+#define SLV_STATUS0_REG_ADDR(s) (((s) & GENMASK(23, 16)) >> 16)
+#define SLV_STATUS0_XFRD_BYTES(s) ((s) & GENMASK(15, 0))
+
+#define SLV_STATUS1 0x58
+#define SLV_STATUS1_AS(s) (((s) & GENMASK(21, 20)) >> 20)
+#define SLV_STATUS1_VEN_TM BIT(19)
+#define SLV_STATUS1_HJ_DIS BIT(18)
+#define SLV_STATUS1_MR_DIS BIT(17)
+#define SLV_STATUS1_PROT_ERR BIT(16)
+#define SLV_STATUS1_DA(x) (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_HAS_DA BIT(8)
+#define SLV_STATUS1_DDR_RX_FULL BIT(7)
+#define SLV_STATUS1_DDR_TX_FULL BIT(6)
+#define SLV_STATUS1_DDR_RX_EMPTY BIT(5)
+#define SLV_STATUS1_DDR_TX_EMPTY BIT(4)
+#define SLV_STATUS1_SDR_RX_FULL BIT(3)
+#define SLV_STATUS1_SDR_TX_FULL BIT(2)
+#define SLV_STATUS1_SDR_RX_EMPTY BIT(1)
+#define SLV_STATUS1_SDR_TX_EMPTY BIT(0)
+
+#define CMD0_FIFO 0x60
+#define CMD0_FIFO_IS_DDR BIT(31)
+#define CMD0_FIFO_IS_CCC BIT(30)
+#define CMD0_FIFO_BCH BIT(29)
+#define XMIT_BURST_STATIC_SUBADDR 0
+#define XMIT_SINGLE_INC_SUBADDR 1
+#define XMIT_SINGLE_STATIC_SUBADDR 2
+#define XMIT_BURST_WITHOUT_SUBADDR 3
+#define CMD0_FIFO_PRIV_XMIT_MODE(m) ((m) << 27)
+#define CMD0_FIFO_SBCA BIT(26)
+#define CMD0_FIFO_RSBC BIT(25)
+#define CMD0_FIFO_IS_10B BIT(24)
+#define CMD0_FIFO_PL_LEN(l) ((l) << 12)
+#define CMD0_FIFO_PL_LEN_MAX 4095
+#define CMD0_FIFO_DEV_ADDR(a) ((a) << 1)
+#define CMD0_FIFO_RNW BIT(0)
+
+#define CMD1_FIFO 0x64
+#define CMD1_FIFO_CMDID(id) ((id) << 24)
+#define CMD1_FIFO_CSRADDR(a) (a)
+#define CMD1_FIFO_CCC(id) (id)
+
+#define TX_FIFO 0x68
+
+#define IMD_CMD0 0x70
+#define IMD_CMD0_PL_LEN(l) ((l) << 12)
+#define IMD_CMD0_DEV_ADDR(a) ((a) << 1)
+#define IMD_CMD0_RNW BIT(0)
+
+#define IMD_CMD1 0x74
+#define IMD_CMD1_CCC(id) (id)
+
+#define IMD_DATA 0x78
+#define RX_FIFO 0x80
+#define IBI_DATA_FIFO 0x84
+#define SLV_DDR_TX_FIFO 0x88
+#define SLV_DDR_RX_FIFO 0x8c
+
+#define CMD_IBI_THR_CTRL 0x90
+#define IBIR_THR(t) ((t) << 24)
+#define CMDR_THR(t) ((t) << 16)
+#define IBI_THR(t) ((t) << 8)
+#define CMD_THR(t) (t)
+
+#define TX_RX_THR_CTRL 0x94
+#define RX_THR(t) ((t) << 16)
+#define TX_THR(t) (t)
+
+#define SLV_DDR_TX_RX_THR_CTRL 0x98
+#define SLV_DDR_RX_THR(t) ((t) << 16)
+#define SLV_DDR_TX_THR(t) (t)
+
+#define FLUSH_CTRL 0x9c
+#define FLUSH_IBI_RESP BIT(23)
+#define FLUSH_CMD_RESP BIT(22)
+#define FLUSH_SLV_DDR_RX_FIFO BIT(22)
+#define FLUSH_SLV_DDR_TX_FIFO BIT(21)
+#define FLUSH_IMM_FIFO BIT(20)
+#define FLUSH_IBI_FIFO BIT(19)
+#define FLUSH_RX_FIFO BIT(18)
+#define FLUSH_TX_FIFO BIT(17)
+#define FLUSH_CMD_FIFO BIT(16)
+
+#define TTO_PRESCL_CTRL0 0xb0
+#define TTO_PRESCL_CTRL0_DIVB(x) ((x) << 16)
+#define TTO_PRESCL_CTRL0_DIVA(x) (x)
+
+#define TTO_PRESCL_CTRL1 0xb4
+#define TTO_PRESCL_CTRL1_DIVB(x) ((x) << 16)
+#define TTO_PRESCL_CTRL1_DIVA(x) (x)
+
+#define DEVS_CTRL 0xb8
+#define DEVS_CTRL_DEV_CLR_SHIFT 16
+#define DEVS_CTRL_DEV_CLR_ALL GENMASK(31, 16)
+#define DEVS_CTRL_DEV_CLR(dev) BIT(16 + (dev))
+#define DEVS_CTRL_DEV_ACTIVE(dev) BIT(dev)
+#define DEVS_CTRL_DEVS_ACTIVE_MASK GENMASK(15, 0)
+#define MAX_DEVS 16
+
+#define DEV_ID_RR0(d) (0xc0 + ((d) * 0x10))
+#define DEV_ID_RR0_LVR_EXT_ADDR BIT(11)
+#define DEV_ID_RR0_HDR_CAP BIT(10)
+#define DEV_ID_RR0_IS_I3C BIT(9)
+#define DEV_ID_RR0_DEV_ADDR_MASK (GENMASK(6, 0) | GENMASK(15, 13))
+#define DEV_ID_RR0_SET_DEV_ADDR(a) (((a) & GENMASK(6, 0)) | \
+ (((a) & GENMASK(9, 7)) << 6))
+#define DEV_ID_RR0_GET_DEV_ADDR(x) ((((x) >> 1) & GENMASK(6, 0)) | \
+ (((x) >> 6) & GENMASK(9, 7)))
+
+#define DEV_ID_RR1(d) (0xc4 + ((d) * 0x10))
+#define DEV_ID_RR1_PID_MSB(pid) (pid)
+
+#define DEV_ID_RR2(d) (0xc8 + ((d) * 0x10))
+#define DEV_ID_RR2_PID_LSB(pid) ((pid) << 16)
+#define DEV_ID_RR2_BCR(bcr) ((bcr) << 8)
+#define DEV_ID_RR2_DCR(dcr) (dcr)
+#define DEV_ID_RR2_LVR(lvr) (lvr)
+
+#define SIR_MAP(x) (0x180 + ((x) * 4))
+#define SIR_MAP_DEV_REG(d) SIR_MAP((d) / 2)
+#define SIR_MAP_DEV_SHIFT(d, fs) ((fs) + (((d) % 2) ? 16 : 0))
+#define SIR_MAP_DEV_CONF_MASK(d) (GENMASK(15, 0) << (((d) % 2) ? 16 : 0))
+#define SIR_MAP_DEV_CONF(d, c) ((c) << (((d) % 2) ? 16 : 0))
+#define DEV_ROLE_SLAVE 0
+#define DEV_ROLE_MASTER 1
+#define SIR_MAP_DEV_ROLE(role) ((role) << 14)
+#define SIR_MAP_DEV_SLOW BIT(13)
+#define SIR_MAP_DEV_PL(l) ((l) << 8)
+#define SIR_MAP_PL_MAX GENMASK(4, 0)
+#define SIR_MAP_DEV_DA(a) ((a) << 1)
+#define SIR_MAP_DEV_ACK BIT(0)
+
+#define GPIR_WORD(x) (0x200 + ((x) * 4))
+#define GPI_REG(val, id) \
+ (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
+
+#define GPOR_WORD(x) (0x220 + ((x) * 4))
+#define GPO_REG(val, id) \
+ (((val) >> (((id) % 4) * 8)) & GENMASK(7, 0))
+
+#define ASF_INT_STATUS 0x300
+#define ASF_INT_RAW_STATUS 0x304
+#define ASF_INT_MASK 0x308
+#define ASF_INT_TEST 0x30c
+#define ASF_INT_FATAL_SELECT 0x310
+#define ASF_INTEGRITY_ERR BIT(6)
+#define ASF_PROTOCOL_ERR BIT(5)
+#define ASF_TRANS_TIMEOUT_ERR BIT(4)
+#define ASF_CSR_ERR BIT(3)
+#define ASF_DAP_ERR BIT(2)
+#define ASF_SRAM_UNCORR_ERR BIT(1)
+#define ASF_SRAM_CORR_ERR BIT(0)
+
+#define ASF_SRAM_CORR_FAULT_STATUS 0x320
+#define ASF_SRAM_UNCORR_FAULT_STATUS 0x324
+#define ASF_SRAM_CORR_FAULT_INSTANCE(x) ((x) >> 24)
+#define ASF_SRAM_CORR_FAULT_ADDR(x) ((x) & GENMASK(23, 0))
+
+#define ASF_SRAM_FAULT_STATS 0x328
+#define ASF_SRAM_FAULT_UNCORR_STATS(x) ((x) >> 16)
+#define ASF_SRAM_FAULT_CORR_STATS(x) ((x) & GENMASK(15, 0))
+
+#define ASF_TRANS_TOUT_CTRL 0x330
+#define ASF_TRANS_TOUT_EN BIT(31)
+#define ASF_TRANS_TOUT_VAL(x) (x)
+
+#define ASF_TRANS_TOUT_FAULT_MASK 0x334
+#define ASF_TRANS_TOUT_FAULT_STATUS 0x338
+#define ASF_TRANS_TOUT_FAULT_APB BIT(3)
+#define ASF_TRANS_TOUT_FAULT_SCL_LOW BIT(2)
+#define ASF_TRANS_TOUT_FAULT_SCL_HIGH BIT(1)
+#define ASF_TRANS_TOUT_FAULT_FSCL_HIGH BIT(0)
+
+#define ASF_PROTO_FAULT_MASK 0x340
+#define ASF_PROTO_FAULT_STATUS 0x344
+#define ASF_PROTO_FAULT_SLVSDR_RD_ABORT BIT(31)
+#define ASF_PROTO_FAULT_SLVDDR_FAIL BIT(30)
+#define ASF_PROTO_FAULT_S(x) BIT(16 + (x))
+#define ASF_PROTO_FAULT_MSTSDR_RD_ABORT BIT(15)
+#define ASF_PROTO_FAULT_MSTDDR_FAIL BIT(14)
+#define ASF_PROTO_FAULT_M(x) BIT(x)
+
+struct cdns_i3c_master_caps {
+ u32 cmdfifodepth;
+ u32 cmdrfifodepth;
+ u32 txfifodepth;
+ u32 rxfifodepth;
+ u32 ibirfifodepth;
+};
+
+struct cdns_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct cdns_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ struct cdns_i3c_cmd cmds[0];
+};
+
+struct cdns_i3c_master {
+ struct work_struct hj_work;
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ unsigned int maxdevs;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock;
+ } ibi;
+ struct {
+ struct list_head list;
+ struct cdns_i3c_xfer *cur;
+ spinlock_t lock;
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *sysclk;
+ struct clk *pclk;
+ struct cdns_i3c_master_caps caps;
+ unsigned long i3c_scl_lim;
+};
+
+static inline struct cdns_i3c_master *
+to_cdns_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct cdns_i3c_master, base);
+}
+
+static void cdns_i3c_master_wr_to_tx_fifo(struct cdns_i3c_master *master,
+ const u8 *bytes, int nbytes)
+{
+ writesl(master->regs + TX_FIFO, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, bytes + (nbytes & ~3), nbytes & 3);
+ writesl(master->regs + TX_FIFO, &tmp, 1);
+ }
+}
+
+static void cdns_i3c_master_rd_from_rx_fifo(struct cdns_i3c_master *master,
+ u8 *bytes, int nbytes)
+{
+ readsl(master->regs + RX_FIFO, bytes, nbytes / 4);
+ if (nbytes & 3) {
+ u32 tmp;
+
+ readsl(master->regs + RX_FIFO, &tmp, 1);
+ memcpy(bytes + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+}
+
+static bool cdns_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_ENTAS(0, true):
+ case I3C_CCC_ENTAS(0, false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETMWL(true):
+ case I3C_CCC_SETMWL(false):
+ case I3C_CCC_SETMRL(true):
+ case I3C_CCC_SETMRL(false):
+ case I3C_CCC_DEFSLVS:
+ case I3C_CCC_ENTHDR(0):
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETACCMST:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int cdns_i3c_master_disable(struct cdns_i3c_master *master)
+{
+ u32 status;
+
+ writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN, master->regs + CTRL);
+
+ return readl_poll_timeout(master->regs + MST_STATUS0, status,
+ status & MST_STATUS0_IDLE, 10, 1000000);
+}
+
+static void cdns_i3c_master_enable(struct cdns_i3c_master *master)
+{
+ writel(readl(master->regs + CTRL) | CTRL_DEV_EN, master->regs + CTRL);
+}
+
+static struct cdns_i3c_xfer *
+cdns_i3c_master_alloc_xfer(struct cdns_i3c_master *master, unsigned int ncmds)
+{
+ struct cdns_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void cdns_i3c_master_free_xfer(struct cdns_i3c_xfer *xfer)
+{
+ kfree(xfer);
+}
+
+static void cdns_i3c_master_start_xfer_locked(struct cdns_i3c_master *master)
+{
+ struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i;
+
+ if (!xfer)
+ return;
+
+ writel(MST_INT_CMDD_EMP, master->regs + MST_ICR);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
+
+ cdns_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf,
+ cmd->tx_len);
+ }
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct cdns_i3c_cmd *cmd = &xfer->cmds[i];
+
+ writel(cmd->cmd1 | CMD1_FIFO_CMDID(i),
+ master->regs + CMD1_FIFO);
+ writel(cmd->cmd0, master->regs + CMD0_FIFO);
+ }
+
+ writel(readl(master->regs + CTRL) | CTRL_MCS,
+ master->regs + CTRL);
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IER);
+}
+
+static void cdns_i3c_master_end_xfer_locked(struct cdns_i3c_master *master,
+ u32 isr)
+{
+ struct cdns_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+ u32 status0;
+
+ if (!xfer)
+ return;
+
+ if (!(isr & MST_INT_CMDD_EMP))
+ return;
+
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
+
+ for (status0 = readl(master->regs + MST_STATUS0);
+ !(status0 & MST_STATUS0_CMDR_EMP);
+ status0 = readl(master->regs + MST_STATUS0)) {
+ struct cdns_i3c_cmd *cmd;
+ u32 cmdr, rx_len, id;
+
+ cmdr = readl(master->regs + CMDR);
+ id = CMDR_CMDID(cmdr);
+ if (id == CMDR_CMDID_HJACK_DISEC ||
+ id == CMDR_CMDID_HJACK_ENTDAA ||
+ WARN_ON(id >= xfer->ncmds))
+ continue;
+
+ cmd = &xfer->cmds[CMDR_CMDID(cmdr)];
+ rx_len = min_t(u32, CMDR_XFER_BYTES(cmdr), cmd->rx_len);
+ cdns_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ cmd->error = CMDR_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ switch (xfer->cmds[i].error) {
+ case CMDR_NO_ERROR:
+ break;
+
+ case CMDR_DDR_PREAMBLE_ERROR:
+ case CMDR_DDR_PARITY_ERROR:
+ case CMDR_M0_ERROR:
+ case CMDR_M1_ERROR:
+ case CMDR_M2_ERROR:
+ case CMDR_MST_ABORT:
+ case CMDR_NACK_RESP:
+ case CMDR_DDR_DROPPED:
+ ret = -EIO;
+ break;
+
+ case CMDR_DDR_RX_FIFO_OVF:
+ case CMDR_DDR_TX_FIFO_UNF:
+ ret = -ENOSPC;
+ break;
+
+ case CMDR_INVALID_DA:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct cdns_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ cdns_i3c_master_start_xfer_locked(master);
+}
+
+static void cdns_i3c_master_queue_xfer(struct cdns_i3c_master *master,
+ struct cdns_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ init_completion(&xfer->comp);
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ cdns_i3c_master_start_xfer_locked(master);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static void cdns_i3c_master_unqueue_xfer(struct cdns_i3c_master *master,
+ struct cdns_i3c_xfer *xfer)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->xferqueue.lock, flags);
+ if (master->xferqueue.cur == xfer) {
+ u32 status;
+
+ writel(readl(master->regs + CTRL) & ~CTRL_DEV_EN,
+ master->regs + CTRL);
+ readl_poll_timeout_atomic(master->regs + MST_STATUS0, status,
+ status & MST_STATUS0_IDLE, 10,
+ 1000000);
+ master->xferqueue.cur = NULL;
+ writel(FLUSH_RX_FIFO | FLUSH_TX_FIFO | FLUSH_CMD_FIFO |
+ FLUSH_CMD_RESP,
+ master->regs + FLUSH_CTRL);
+ writel(MST_INT_CMDD_EMP, master->regs + MST_IDR);
+ writel(readl(master->regs + CTRL) | CTRL_DEV_EN,
+ master->regs + CTRL);
+ } else {
+ list_del_init(&xfer->node);
+ }
+ spin_unlock_irqrestore(&master->xferqueue.lock, flags);
+}
+
+static enum i3c_error_code cdns_i3c_cmd_get_err(struct cdns_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case CMDR_M0_ERROR:
+ return I3C_ERROR_M0;
+
+ case CMDR_M1_ERROR:
+ return I3C_ERROR_M1;
+
+ case CMDR_M2_ERROR:
+ case CMDR_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int cdns_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_xfer *xfer;
+ struct cdns_i3c_cmd *ccmd;
+ int ret;
+
+ xfer = cdns_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = CMD1_FIFO_CCC(cmd->id);
+ ccmd->cmd0 = CMD0_FIFO_IS_CCC |
+ CMD0_FIFO_PL_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= CMD0_FIFO_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ cdns_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ cmd->err = cdns_i3c_cmd_get_err(&xfer->cmds[0]);
+ cdns_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static int cdns_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ int txslots = 0, rxslots = 0, i, ret;
+ struct cdns_i3c_xfer *cdns_xfer;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
+ return -ENOTSUPP;
+ }
+
+ if (!nxfers)
+ return 0;
+
+ if (nxfers > master->caps.cmdfifodepth ||
+ nxfers > master->caps.cmdrfifodepth)
+ return -ENOTSUPP;
+
+ /*
+ * First make sure that all transactions (block of transfers separated
+ * by a STOP marker) fit in the FIFOs.
+ */
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].rnw)
+ rxslots += DIV_ROUND_UP(xfers[i].len, 4);
+ else
+ txslots += DIV_ROUND_UP(xfers[i].len, 4);
+ }
+
+ if (rxslots > master->caps.rxfifodepth ||
+ txslots > master->caps.txfifodepth)
+ return -ENOTSUPP;
+
+ cdns_xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
+ if (!cdns_xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct cdns_i3c_cmd *ccmd = &cdns_xfer->cmds[i];
+ u32 pl_len = xfers[i].len;
+
+ ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(dev->info.dyn_addr) |
+ CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ pl_len++;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= CMD0_FIFO_PL_LEN(pl_len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= CMD0_FIFO_RSBC;
+
+ if (!i)
+ ccmd->cmd0 |= CMD0_FIFO_BCH;
+ }
+
+ cdns_i3c_master_queue_xfer(master, cdns_xfer);
+ if (!wait_for_completion_timeout(&cdns_xfer->comp,
+ msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, cdns_xfer);
+
+ ret = cdns_xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = cdns_i3c_cmd_get_err(&cdns_xfer->cmds[i]);
+
+ cdns_i3c_master_free_xfer(cdns_xfer);
+
+ return ret;
+}
+
+static int cdns_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct cdns_i3c_xfer *xfer;
+ int i, ret = 0;
+
+ if (nxfers > master->caps.cmdfifodepth)
+ return -ENOTSUPP;
+
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].len > CMD0_FIFO_PL_LEN_MAX)
+ return -ENOTSUPP;
+
+ if (xfers[i].flags & I2C_M_RD)
+ nrxwords += DIV_ROUND_UP(xfers[i].len, 4);
+ else
+ ntxwords += DIV_ROUND_UP(xfers[i].len, 4);
+ }
+
+ if (ntxwords > master->caps.txfifodepth ||
+ nrxwords > master->caps.rxfifodepth)
+ return -ENOTSUPP;
+
+ xfer = cdns_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct cdns_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = CMD0_FIFO_DEV_ADDR(xfers[i].addr) |
+ CMD0_FIFO_PL_LEN(xfers[i].len) |
+ CMD0_FIFO_PRIV_XMIT_MODE(XMIT_BURST_WITHOUT_SUBADDR);
+
+ if (xfers[i].flags & I2C_M_TEN)
+ ccmd->cmd0 |= CMD0_FIFO_IS_10B;
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= CMD0_FIFO_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+ }
+
+ cdns_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ cdns_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+ cdns_i3c_master_free_xfer(xfer);
+
+ return ret;
+}
+
+static u32 cdns_i3c_master_i2c_funcs(struct i3c_master_controller *m)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
+}
+
+struct cdns_i3c_i2c_dev_data {
+ u16 id;
+ s16 ibi;
+ struct i3c_generic_ibi_pool *ibi_pool;
+};
+
+static u32 prepare_rr0_dev_address(u32 addr)
+{
+ u32 ret = (addr << 1) & 0xff;
+
+ /* RR0[7:1] = addr[6:0] */
+ ret |= (addr & GENMASK(6, 0)) << 1;
+
+ /* RR0[15:13] = addr[9:7] */
+ ret |= (addr & GENMASK(9, 7)) << 6;
+
+ /* RR0[0] = ~XOR(addr[6:0]) */
+ if (!(hweight8(addr & 0x7f) & 1))
+ ret |= 1;
+
+ return ret;
+}
+
+static void cdns_i3c_master_upd_i3c_addr(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u32 rr;
+
+ rr = prepare_rr0_dev_address(dev->info.dyn_addr ?
+ dev->info.dyn_addr :
+ dev->info.static_addr);
+ writel(DEV_ID_RR0_IS_I3C | rr, master->regs + DEV_ID_RR0(data->id));
+}
+
+static int cdns_i3c_master_get_rr_slot(struct cdns_i3c_master *master,
+ u8 dyn_addr)
+{
+ u32 activedevs, rr;
+ int i;
+
+ if (!dyn_addr) {
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+ }
+
+ activedevs = readl(master->regs + DEVS_CTRL) &
+ DEVS_CTRL_DEVS_ACTIVE_MASK;
+
+ for (i = 1; i <= master->maxdevs; i++) {
+ if (!(BIT(i) & activedevs))
+ continue;
+
+ rr = readl(master->regs + DEV_ID_RR0(i));
+ if (!(rr & DEV_ID_RR0_IS_I3C) ||
+ DEV_ID_RR0_GET_DEV_ADDR(rr) != dyn_addr)
+ continue;
+
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int cdns_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ cdns_i3c_master_upd_i3c_addr(dev);
+
+ return 0;
+}
+
+static int cdns_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data;
+ int slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = cdns_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->ibi = -1;
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ if (!dev->info.dyn_addr) {
+ cdns_i3c_master_upd_i3c_addr(dev);
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_ACTIVE(data->id),
+ master->regs + DEVS_CTRL);
+ }
+
+ return 0;
+}
+
+static void cdns_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_CLR(data->id),
+ master->regs + DEVS_CTRL);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int cdns_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = cdns_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(prepare_rr0_dev_address(dev->boardinfo->base.addr) |
+ (dev->boardinfo->base.flags & I2C_CLIENT_TEN ?
+ DEV_ID_RR0_LVR_EXT_ADDR : 0),
+ master->regs + DEV_ID_RR0(data->id));
+ writel(dev->boardinfo->lvr, master->regs + DEV_ID_RR2(data->id));
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_ACTIVE(data->id),
+ master->regs + DEVS_CTRL);
+
+ return 0;
+}
+
+static void cdns_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(readl(master->regs + DEVS_CTRL) |
+ DEVS_CTRL_DEV_CLR(data->id),
+ master->regs + DEVS_CTRL);
+ master->free_rr_slots |= BIT(data->id);
+
+ i2c_dev_set_master_data(dev, NULL);
+ kfree(data);
+}
+
+static void cdns_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+
+ cdns_i3c_master_disable(master);
+}
+
+static void cdns_i3c_master_dev_rr_to_info(struct cdns_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 rr;
+
+ memset(info, 0, sizeof(*info));
+ rr = readl(master->regs + DEV_ID_RR0(slot));
+ info->dyn_addr = DEV_ID_RR0_GET_DEV_ADDR(rr);
+ rr = readl(master->regs + DEV_ID_RR2(slot));
+ info->dcr = rr;
+ info->bcr = rr >> 8;
+ info->pid = rr >> 16;
+ info->pid |= (u64)readl(master->regs + DEV_ID_RR1(slot)) << 16;
+}
+
+static void cdns_i3c_master_upd_i3c_scl_lim(struct cdns_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ unsigned long i3c_lim_period, pres_step, ncycles;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ unsigned long new_i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u32 prescl1, ctrl;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ unsigned long max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = 8000000;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = 6000000;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = 4000000;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = 2000000;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = 0;
+ break;
+ }
+
+ if (max_fscl &&
+ (new_i3c_scl_lim > max_fscl || !new_i3c_scl_lim))
+ new_i3c_scl_lim = max_fscl;
+ }
+
+ /* Only update PRESCL_CTRL1 if the I3C SCL limitation has changed. */
+ if (new_i3c_scl_lim == master->i3c_scl_lim)
+ return;
+ master->i3c_scl_lim = new_i3c_scl_lim;
+ if (!new_i3c_scl_lim)
+ return;
+ pres_step = 1000000000UL / (bus->scl_rate.i3c * 4);
+
+ /* Configure PP_LOW to meet I3C slave limitations. */
+ prescl1 = readl(master->regs + PRESCL_CTRL1) &
+ ~PRESCL_CTRL1_PP_LOW_MASK;
+ ctrl = readl(master->regs + CTRL);
+
+ i3c_lim_period = DIV_ROUND_UP(1000000000, master->i3c_scl_lim);
+ ncycles = DIV_ROUND_UP(i3c_lim_period, pres_step);
+ if (ncycles < 4)
+ ncycles = 0;
+ else
+ ncycles -= 4;
+
+ prescl1 |= PRESCL_CTRL1_PP_LOW(ncycles);
+
+ /* Disable I3C master before updating PRESCL_CTRL1. */
+ if (ctrl & CTRL_DEV_EN)
+ cdns_i3c_master_disable(master);
+
+ writel(prescl1, master->regs + PRESCL_CTRL1);
+
+ if (ctrl & CTRL_DEV_EN)
+ cdns_i3c_master_enable(master);
+}
+
+static int cdns_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ u32 olddevs, newdevs;
+ int ret, slot;
+ u8 addrs[MAX_DEVS] = { };
+ u8 last_addr = 0;
+
+ olddevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+
+ /* Prepare RR slots before launching DAA. */
+ for (slot = 1; slot <= master->maxdevs; slot++) {
+ if (olddevs & BIT(slot))
+ continue;
+
+ ret = i3c_master_get_free_addr(m, last_addr + 1);
+ if (ret < 0)
+ return -ENOSPC;
+
+ last_addr = ret;
+ addrs[slot] = last_addr;
+ writel(prepare_rr0_dev_address(last_addr) | DEV_ID_RR0_IS_I3C,
+ master->regs + DEV_ID_RR0(slot));
+ writel(0, master->regs + DEV_ID_RR1(slot));
+ writel(0, master->regs + DEV_ID_RR2(slot));
+ }
+
+ ret = i3c_master_entdaa_locked(&master->base);
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ newdevs = readl(master->regs + DEVS_CTRL) & DEVS_CTRL_DEVS_ACTIVE_MASK;
+ newdevs &= ~olddevs;
+
+ /*
+ * Clear all retaining registers filled during DAA. We already
+ * have the addressed assigned to them in the addrs array.
+ */
+ for (slot = 1; slot <= master->maxdevs; slot++) {
+ if (newdevs & BIT(slot))
+ i3c_master_add_i3c_dev_locked(m, addrs[slot]);
+ }
+
+ /*
+ * Clear slots that ended up not being used. Can be caused by I3C
+ * device creation failure or when the I3C device was already known
+ * by the system but with a different address (in this case the device
+ * already has a slot and does not need a new one).
+ */
+ writel(readl(master->regs + DEVS_CTRL) |
+ master->free_rr_slots << DEVS_CTRL_DEV_CLR_SHIFT,
+ master->regs + DEVS_CTRL);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ cdns_i3c_master_upd_i3c_scl_lim(master);
+
+ /* Unmask Hot-Join and Mastership request interrupts. */
+ i3c_master_enec_locked(m, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_HJ | I3C_CCC_EVENT_MR);
+
+ return 0;
+}
+
+static int cdns_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ unsigned long pres_step, sysclk_rate, max_i2cfreq;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u32 ctrl, prescl0, prescl1, pres, low;
+ struct i3c_device_info info = { };
+ int ret, ncycles;
+
+ switch (bus->mode) {
+ case I3C_BUS_MODE_PURE:
+ ctrl = CTRL_PURE_BUS_MODE;
+ break;
+
+ case I3C_BUS_MODE_MIXED_FAST:
+ ctrl = CTRL_MIXED_FAST_BUS_MODE;
+ break;
+
+ case I3C_BUS_MODE_MIXED_SLOW:
+ ctrl = CTRL_MIXED_SLOW_BUS_MODE;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ sysclk_rate = clk_get_rate(master->sysclk);
+ if (!sysclk_rate)
+ return -EINVAL;
+
+ pres = DIV_ROUND_UP(sysclk_rate, (bus->scl_rate.i3c * 4)) - 1;
+ if (pres > PRESCL_CTRL0_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i3c = sysclk_rate / ((pres + 1) * 4);
+
+ prescl0 = PRESCL_CTRL0_I3C(pres);
+
+ low = ((I3C_BUS_TLOW_OD_MIN_NS * sysclk_rate) / (pres + 1)) - 2;
+ prescl1 = PRESCL_CTRL1_OD_LOW(low);
+
+ max_i2cfreq = bus->scl_rate.i2c;
+
+ pres = (sysclk_rate / (max_i2cfreq * 5)) - 1;
+ if (pres > PRESCL_CTRL0_MAX)
+ return -ERANGE;
+
+ bus->scl_rate.i2c = sysclk_rate / ((pres + 1) * 5);
+
+ prescl0 |= PRESCL_CTRL0_I2C(pres);
+ writel(prescl0, master->regs + PRESCL_CTRL0);
+
+ /* Calculate OD and PP low. */
+ pres_step = 1000000000 / (bus->scl_rate.i3c * 4);
+ ncycles = DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, pres_step) - 2;
+ if (ncycles < 0)
+ ncycles = 0;
+ prescl1 = PRESCL_CTRL1_OD_LOW(ncycles);
+ writel(prescl1, master->regs + PRESCL_CTRL1);
+
+ /* Get an address for the master. */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ writel(prepare_rr0_dev_address(ret) | DEV_ID_RR0_IS_I3C,
+ master->regs + DEV_ID_RR0(0));
+
+ cdns_i3c_master_dev_rr_to_info(master, 0, &info);
+ if (info.bcr & I3C_BCR_HDR_CAP)
+ info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
+
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ /*
+ * Enable Hot-Join, and, when a Hot-Join request happens, disable all
+ * events coming from this device.
+ *
+ * We will issue ENTDAA afterwards from the threaded IRQ handler.
+ */
+ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN;
+ writel(ctrl, master->regs + CTRL);
+
+ cdns_i3c_master_enable(master);
+
+ return 0;
+}
+
+static void cdns_i3c_master_handle_ibi(struct cdns_i3c_master *master,
+ u32 ibir)
+{
+ struct cdns_i3c_i2c_dev_data *data;
+ bool data_consumed = false;
+ struct i3c_ibi_slot *slot;
+ u32 id = IBIR_SLVID(ibir);
+ struct i3c_dev_desc *dev;
+ size_t nbytes;
+ u8 *buf;
+
+ /*
+ * FIXME: maybe we should report the FIFO OVF errors to the upper
+ * layer.
+ */
+ if (id >= master->ibi.num_slots || (ibir & IBIR_ERROR))
+ goto out;
+
+ dev = master->ibi.slots[id];
+ spin_lock(&master->ibi.lock);
+
+ data = i3c_dev_get_master_data(dev);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ goto out_unlock;
+
+ buf = slot->data;
+
+ nbytes = IBIR_XFER_BYTES(ibir);
+ readsl(master->regs + IBI_DATA_FIFO, buf, nbytes / 4);
+ if (nbytes % 3) {
+ u32 tmp = __raw_readl(master->regs + IBI_DATA_FIFO);
+
+ memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
+ }
+
+ slot->len = min_t(unsigned int, IBIR_XFER_BYTES(ibir),
+ dev->ibi->max_payload_len);
+ i3c_master_queue_ibi(dev, slot);
+ data_consumed = true;
+
+out_unlock:
+ spin_unlock(&master->ibi.lock);
+
+out:
+ /* Consume data from the FIFO if it's not been done already. */
+ if (!data_consumed) {
+ int i;
+
+ for (i = 0; i < IBIR_XFER_BYTES(ibir); i += 4)
+ readl(master->regs + IBI_DATA_FIFO);
+ }
+}
+
+static void cnds_i3c_master_demux_ibis(struct cdns_i3c_master *master)
+{
+ u32 status0;
+
+ writel(MST_INT_IBIR_THR, master->regs + MST_ICR);
+
+ for (status0 = readl(master->regs + MST_STATUS0);
+ !(status0 & MST_STATUS0_IBIR_EMP);
+ status0 = readl(master->regs + MST_STATUS0)) {
+ u32 ibir = readl(master->regs + IBIR);
+
+ switch (IBIR_TYPE(ibir)) {
+ case IBIR_TYPE_IBI:
+ cdns_i3c_master_handle_ibi(master, ibir);
+ break;
+
+ case IBIR_TYPE_HJ:
+ WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ queue_work(master->base.wq, &master->hj_work);
+ break;
+
+ case IBIR_TYPE_MR:
+ WARN_ON(IBIR_XFER_BYTES(ibir) || (ibir & IBIR_ERROR));
+ default:
+ break;
+ }
+ }
+}
+
+static irqreturn_t cdns_i3c_master_interrupt(int irq, void *data)
+{
+ struct cdns_i3c_master *master = data;
+ u32 status;
+
+ status = readl(master->regs + MST_ISR);
+ if (!(status & readl(master->regs + MST_IMR)))
+ return IRQ_NONE;
+
+ spin_lock(&master->xferqueue.lock);
+ cdns_i3c_master_end_xfer_locked(master, status);
+ spin_unlock(&master->xferqueue.lock);
+
+ if (status & MST_INT_IBIR_THR)
+ cnds_i3c_master_demux_ibis(master);
+
+ return IRQ_HANDLED;
+}
+
+static int cdns_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ u32 sirmap;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi,
+ SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ return ret;
+}
+
+static int cdns_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ u32 sircfg, sirmap;
+ int ret;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sircfg = SIR_MAP_DEV_ROLE(dev->info.bcr >> 6) |
+ SIR_MAP_DEV_DA(dev->info.dyn_addr) |
+ SIR_MAP_DEV_PL(dev->info.max_ibi_len) |
+ SIR_MAP_DEV_ACK;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM)
+ sircfg |= SIR_MAP_DEV_SLOW;
+
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi, sircfg);
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ ret = i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+ if (ret) {
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ sirmap = readl(master->regs + SIR_MAP_DEV_REG(data->ibi));
+ sirmap &= ~SIR_MAP_DEV_CONF_MASK(data->ibi);
+ sirmap |= SIR_MAP_DEV_CONF(data->ibi,
+ SIR_MAP_DEV_DA(I3C_BROADCAST_ADDR));
+ writel(sirmap, master->regs + SIR_MAP_DEV_REG(data->ibi));
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+ }
+
+ return ret;
+}
+
+static int cdns_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+ unsigned int i;
+
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void cdns_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct cdns_i3c_master *master = to_cdns_i3c_master(m);
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->ibi.lock, flags);
+ master->ibi.slots[data->ibi] = NULL;
+ data->ibi = -1;
+ spin_unlock_irqrestore(&master->ibi.lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void cdns_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct cdns_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops cdns_i3c_master_ops = {
+ .bus_init = cdns_i3c_master_bus_init,
+ .bus_cleanup = cdns_i3c_master_bus_cleanup,
+ .do_daa = cdns_i3c_master_do_daa,
+ .attach_i3c_dev = cdns_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = cdns_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = cdns_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = cdns_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = cdns_i3c_master_detach_i2c_dev,
+ .supports_ccc_cmd = cdns_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = cdns_i3c_master_send_ccc_cmd,
+ .priv_xfers = cdns_i3c_master_priv_xfers,
+ .i2c_xfers = cdns_i3c_master_i2c_xfers,
+ .i2c_funcs = cdns_i3c_master_i2c_funcs,
+ .enable_ibi = cdns_i3c_master_enable_ibi,
+ .disable_ibi = cdns_i3c_master_disable_ibi,
+ .request_ibi = cdns_i3c_master_request_ibi,
+ .free_ibi = cdns_i3c_master_free_ibi,
+ .recycle_ibi_slot = cdns_i3c_master_recycle_ibi_slot,
+};
+
+static void cdns_i3c_master_hj(struct work_struct *work)
+{
+ struct cdns_i3c_master *master = container_of(work,
+ struct cdns_i3c_master,
+ hj_work);
+
+ i3c_master_do_daa(&master->base);
+}
+
+static int cdns_i3c_master_probe(struct platform_device *pdev)
+{
+ struct cdns_i3c_master *master;
+ struct resource *res;
+ int ret, irq;
+ u32 val;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ master->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ master->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ master->sysclk = devm_clk_get(&pdev->dev, "sysclk");
+ if (IS_ERR(master->pclk))
+ return PTR_ERR(master->pclk);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(master->sysclk);
+ if (ret)
+ goto err_disable_pclk;
+
+ if (readl(master->regs + DEV_ID) != DEV_ID_I3C_MASTER) {
+ ret = -EINVAL;
+ goto err_disable_sysclk;
+ }
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ INIT_WORK(&master->hj_work, cdns_i3c_master_hj);
+ writel(0xffffffff, master->regs + MST_IDR);
+ writel(0xffffffff, master->regs + SLV_IDR);
+ ret = devm_request_irq(&pdev->dev, irq, cdns_i3c_master_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_disable_sysclk;
+
+ platform_set_drvdata(pdev, master);
+
+ val = readl(master->regs + CONF_STATUS0);
+
+ /* Device ID0 is reserved to describe this master. */
+ master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
+ master->free_rr_slots = GENMASK(master->maxdevs, 1);
+
+ val = readl(master->regs + CONF_STATUS1);
+ master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
+ master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
+ master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
+ master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+ master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots)
+ goto err_disable_sysclk;
+
+ writel(IBIR_THR(1), master->regs + CMD_IBI_THR_CTRL);
+ writel(MST_INT_IBIR_THR, master->regs + MST_IER);
+ writel(DEVS_CTRL_DEV_CLR_ALL, master->regs + DEVS_CTRL);
+
+ ret = i3c_master_register(&master->base, &pdev->dev,
+ &cdns_i3c_master_ops, false);
+ if (ret)
+ goto err_disable_sysclk;
+
+ return 0;
+
+err_disable_sysclk:
+ clk_disable_unprepare(master->sysclk);
+
+err_disable_pclk:
+ clk_disable_unprepare(master->pclk);
+
+ return ret;
+}
+
+static int cdns_i3c_master_remove(struct platform_device *pdev)
+{
+ struct cdns_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&master->base);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(master->sysclk);
+ clk_disable_unprepare(master->pclk);
+
+ return 0;
+}
+
+static const struct of_device_id cdns_i3c_master_of_ids[] = {
+ { .compatible = "cdns,i3c-master" },
+ { /* sentinel */ },
+};
+
+static struct platform_driver cdns_i3c_master = {
+ .probe = cdns_i3c_master_probe,
+ .remove = cdns_i3c_master_remove,
+ .driver = {
+ .name = "cdns-i3c-master",
+ .of_match_table = cdns_i3c_master_of_ids,
+ },
+};
+module_platform_driver(cdns_i3c_master);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("Cadence I3C master driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cdns-i3c-master");
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 8b2b72b93885..da58020a144e 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -94,7 +94,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_MISC;
- rq->special = (char *)pc;
+ ide_req(rq)->special = pc;
if (buf && bufflen) {
error = blk_rq_map_kern(drive->queue, rq, buf, bufflen,
@@ -172,8 +172,8 @@ EXPORT_SYMBOL_GPL(ide_create_request_sense_cmd);
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{
struct request_sense *sense = &drive->sense_data;
- struct request *sense_rq = drive->sense_rq;
- struct scsi_request *req = scsi_req(sense_rq);
+ struct request *sense_rq;
+ struct scsi_request *req;
unsigned int cmd_len, sense_len;
int err;
@@ -196,9 +196,16 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
if (ata_sense_request(rq) || drive->sense_rq_armed)
return;
+ sense_rq = drive->sense_rq;
+ if (!sense_rq) {
+ sense_rq = blk_mq_alloc_request(drive->queue, REQ_OP_DRV_IN,
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ drive->sense_rq = sense_rq;
+ }
+ req = scsi_req(sense_rq);
+
memset(sense, 0, sizeof(*sense));
- blk_rq_init(rq->q, sense_rq);
scsi_req_init(req);
err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
@@ -207,6 +214,8 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
if (printk_ratelimit())
printk(KERN_WARNING PFX "%s: failed to map sense "
"buffer\n", drive->name);
+ blk_mq_free_request(sense_rq);
+ drive->sense_rq = NULL;
return;
}
@@ -226,6 +235,8 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{
+ struct request *sense_rq = drive->sense_rq;
+
/* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
@@ -233,12 +244,12 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
return -ENOMEM;
}
- drive->sense_rq->special = special;
+ ide_req(sense_rq)->special = special;
drive->sense_rq_armed = false;
drive->hwif->rq = NULL;
- elv_add_request(drive->queue, drive->sense_rq, ELEVATOR_INSERT_FRONT);
+ ide_insert_request_head(drive, sense_rq);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
@@ -270,10 +281,8 @@ void ide_retry_pc(ide_drive_t *drive)
*/
drive->hwif->rq = NULL;
ide_requeue_and_plug(drive, failed_rq);
- if (ide_queue_sense_rq(drive, pc)) {
- blk_start_request(failed_rq);
+ if (ide_queue_sense_rq(drive, pc))
ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(failed_rq));
- }
}
EXPORT_SYMBOL_GPL(ide_retry_pc);
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index f9b59d41813f..1f03884a6808 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -211,12 +211,12 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive,
static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq)
{
/*
- * For ATA_PRIV_SENSE, "rq->special" points to the original
+ * For ATA_PRIV_SENSE, "ide_req(rq)->special" points to the original
* failed request. Also, the sense data should be read
* directly from rq which might be different from the original
* sense buffer if it got copied during mapping.
*/
- struct request *failed = (struct request *)rq->special;
+ struct request *failed = ide_req(rq)->special;
void *sense = bio_data(rq->bio);
if (failed) {
@@ -258,11 +258,22 @@ static int ide_cd_breathe(ide_drive_t *drive, struct request *rq)
/*
* take a breather
*/
- blk_delay_queue(drive->queue, 1);
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(drive->queue, 1);
return 1;
}
}
+static void ide_cd_free_sense(ide_drive_t *drive)
+{
+ if (!drive->sense_rq)
+ return;
+
+ blk_mq_free_request(drive->sense_rq);
+ drive->sense_rq = NULL;
+ drive->sense_rq_armed = false;
+}
+
/**
* Returns:
* 0: if the request should be continued.
@@ -516,6 +527,82 @@ static bool ide_cd_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
return false;
}
+/* standard prep_rq that builds 10 byte cmds */
+static bool ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
+{
+ int hard_sect = queue_logical_block_size(q);
+ long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
+ unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
+ struct scsi_request *req = scsi_req(rq);
+
+ if (rq_data_dir(rq) == READ)
+ req->cmd[0] = GPCMD_READ_10;
+ else
+ req->cmd[0] = GPCMD_WRITE_10;
+
+ /*
+ * fill in lba
+ */
+ req->cmd[2] = (block >> 24) & 0xff;
+ req->cmd[3] = (block >> 16) & 0xff;
+ req->cmd[4] = (block >> 8) & 0xff;
+ req->cmd[5] = block & 0xff;
+
+ /*
+ * and transfer length
+ */
+ req->cmd[7] = (blocks >> 8) & 0xff;
+ req->cmd[8] = blocks & 0xff;
+ req->cmd_len = 10;
+ return true;
+}
+
+/*
+ * Most of the SCSI commands are supported directly by ATAPI devices.
+ * This transform handles the few exceptions.
+ */
+static bool ide_cdrom_prep_pc(struct request *rq)
+{
+ u8 *c = scsi_req(rq)->cmd;
+
+ /* transform 6-byte read/write commands to the 10-byte version */
+ if (c[0] == READ_6 || c[0] == WRITE_6) {
+ c[8] = c[4];
+ c[5] = c[3];
+ c[4] = c[2];
+ c[3] = c[1] & 0x1f;
+ c[2] = 0;
+ c[1] &= 0xe0;
+ c[0] += (READ_10 - READ_6);
+ scsi_req(rq)->cmd_len = 10;
+ return true;
+ }
+
+ /*
+ * it's silly to pretend we understand 6-byte sense commands, just
+ * reject with ILLEGAL_REQUEST and the caller should take the
+ * appropriate action
+ */
+ if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) {
+ scsi_req(rq)->result = ILLEGAL_REQUEST;
+ return false;
+ }
+
+ return true;
+}
+
+static bool ide_cdrom_prep_rq(ide_drive_t *drive, struct request *rq)
+{
+ if (!blk_rq_is_passthrough(rq)) {
+ scsi_req_init(scsi_req(rq));
+
+ return ide_cdrom_prep_fs(drive->queue, rq);
+ } else if (blk_rq_is_scsi(rq))
+ return ide_cdrom_prep_pc(rq);
+
+ return true;
+}
+
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
@@ -675,7 +762,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
out_end:
if (blk_rq_is_scsi(rq) && rc == 0) {
scsi_req(rq)->resid_len = 0;
- blk_end_request_all(rq, BLK_STS_OK);
+ blk_mq_end_request(rq, BLK_STS_OK);
hwif->rq = NULL;
} else {
if (sense && uptodate)
@@ -705,6 +792,8 @@ out_end:
if (sense && rc == 2)
ide_error(drive, "request sense failure", stat);
}
+
+ ide_cd_free_sense(drive);
return ide_stopped;
}
@@ -729,7 +818,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
* We may be retrying this request after an error. Fix up any
* weirdness which might be present in the request packet.
*/
- q->prep_rq_fn(q, rq);
+ ide_cdrom_prep_rq(drive, rq);
}
/* fs requests *must* be hardware frame aligned */
@@ -1323,82 +1412,6 @@ static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
return nslots;
}
-/* standard prep_rq_fn that builds 10 byte cmds */
-static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
-{
- int hard_sect = queue_logical_block_size(q);
- long block = (long)blk_rq_pos(rq) / (hard_sect >> 9);
- unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9);
- struct scsi_request *req = scsi_req(rq);
-
- q->initialize_rq_fn(rq);
-
- if (rq_data_dir(rq) == READ)
- req->cmd[0] = GPCMD_READ_10;
- else
- req->cmd[0] = GPCMD_WRITE_10;
-
- /*
- * fill in lba
- */
- req->cmd[2] = (block >> 24) & 0xff;
- req->cmd[3] = (block >> 16) & 0xff;
- req->cmd[4] = (block >> 8) & 0xff;
- req->cmd[5] = block & 0xff;
-
- /*
- * and transfer length
- */
- req->cmd[7] = (blocks >> 8) & 0xff;
- req->cmd[8] = blocks & 0xff;
- req->cmd_len = 10;
- return BLKPREP_OK;
-}
-
-/*
- * Most of the SCSI commands are supported directly by ATAPI devices.
- * This transform handles the few exceptions.
- */
-static int ide_cdrom_prep_pc(struct request *rq)
-{
- u8 *c = scsi_req(rq)->cmd;
-
- /* transform 6-byte read/write commands to the 10-byte version */
- if (c[0] == READ_6 || c[0] == WRITE_6) {
- c[8] = c[4];
- c[5] = c[3];
- c[4] = c[2];
- c[3] = c[1] & 0x1f;
- c[2] = 0;
- c[1] &= 0xe0;
- c[0] += (READ_10 - READ_6);
- scsi_req(rq)->cmd_len = 10;
- return BLKPREP_OK;
- }
-
- /*
- * it's silly to pretend we understand 6-byte sense commands, just
- * reject with ILLEGAL_REQUEST and the caller should take the
- * appropriate action
- */
- if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) {
- scsi_req(rq)->result = ILLEGAL_REQUEST;
- return BLKPREP_KILL;
- }
-
- return BLKPREP_OK;
-}
-
-static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq)
-{
- if (!blk_rq_is_passthrough(rq))
- return ide_cdrom_prep_fs(q, rq);
- else if (blk_rq_is_scsi(rq))
- return ide_cdrom_prep_pc(rq);
-
- return 0;
-}
-
struct cd_list_entry {
const char *id_model;
const char *id_firmware;
@@ -1508,7 +1521,7 @@ static int ide_cdrom_setup(ide_drive_t *drive)
ide_debug_log(IDE_DBG_PROBE, "enter");
- blk_queue_prep_rq(q, ide_cdrom_prep_fn);
+ drive->prep_rq = ide_cdrom_prep_rq;
blk_queue_dma_alignment(q, 31);
blk_queue_update_dma_pad(q, 15);
@@ -1569,7 +1582,7 @@ static void ide_cd_release(struct device *dev)
if (devinfo->handle == drive)
unregister_cdrom(devinfo);
drive->driver_data = NULL;
- blk_queue_prep_rq(drive->queue, NULL);
+ drive->prep_rq = NULL;
g->private_data = NULL;
put_disk(g);
kfree(info);
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index f4f8afdf8bbe..f2f93ed40356 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -171,7 +171,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
scsi_req(rq)->cmd_len = 5;
scsi_req(rq)->cmd[0] = REQ_DEVSET_EXEC;
*(int *)&scsi_req(rq)->cmd[1] = arg;
- rq->special = setting->set;
+ ide_req(rq)->special = setting->set;
blk_execute_rq(q, NULL, rq, 0);
ret = scsi_req(rq)->result;
@@ -182,7 +182,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq)
{
- int err, (*setfunc)(ide_drive_t *, int) = rq->special;
+ int err, (*setfunc)(ide_drive_t *, int) = ide_req(rq)->special;
err = setfunc(drive, *(int *)&scsi_req(rq)->cmd[1]);
if (err)
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index e3b4e659082d..197912af5c2f 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -427,16 +427,15 @@ static void ide_disk_unlock_native_capacity(ide_drive_t *drive)
drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */
}
-static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
+static bool idedisk_prep_rq(ide_drive_t *drive, struct request *rq)
{
- ide_drive_t *drive = q->queuedata;
struct ide_cmd *cmd;
if (req_op(rq) != REQ_OP_FLUSH)
- return BLKPREP_OK;
+ return true;
- if (rq->special) {
- cmd = rq->special;
+ if (ide_req(rq)->special) {
+ cmd = ide_req(rq)->special;
memset(cmd, 0, sizeof(*cmd));
} else {
cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
@@ -456,10 +455,10 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq)
rq->cmd_flags &= ~REQ_OP_MASK;
rq->cmd_flags |= REQ_OP_DRV_OUT;
ide_req(rq)->type = ATA_PRIV_TASKFILE;
- rq->special = cmd;
+ ide_req(rq)->special = cmd;
cmd->rq = rq;
- return BLKPREP_OK;
+ return true;
}
ide_devset_get(multcount, mult_count);
@@ -548,7 +547,7 @@ static void update_flush(ide_drive_t *drive)
if (barrier) {
wc = true;
- blk_queue_prep_rq(drive->queue, idedisk_prep_fn);
+ drive->prep_rq = idedisk_prep_rq;
}
}
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c
index 47d5f3379748..e1323e058454 100644
--- a/drivers/ide/ide-eh.c
+++ b/drivers/ide/ide-eh.c
@@ -125,7 +125,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat)
/* retry only "normal" I/O: */
if (blk_rq_is_passthrough(rq)) {
if (ata_taskfile_request(rq)) {
- struct ide_cmd *cmd = rq->special;
+ struct ide_cmd *cmd = ide_req(rq)->special;
if (cmd)
ide_complete_cmd(drive, cmd, stat, err);
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index a8df300f949c..780d33ccc5d8 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -276,7 +276,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive,
switch (ide_req(rq)->type) {
case ATA_PRIV_MISC:
case ATA_PRIV_SENSE:
- pc = (struct ide_atapi_pc *)rq->special;
+ pc = (struct ide_atapi_pc *)ide_req(rq)->special;
break;
default:
BUG();
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 0d93e0cfbeaf..8445b484ae69 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -67,7 +67,15 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
ide_dma_on(drive);
}
- return blk_end_request(rq, error, nr_bytes);
+ if (!blk_update_request(rq, error, nr_bytes)) {
+ if (rq == drive->sense_rq)
+ drive->sense_rq = NULL;
+
+ __blk_mq_end_request(rq, error);
+ return 0;
+ }
+
+ return 1;
}
EXPORT_SYMBOL_GPL(ide_end_rq);
@@ -103,7 +111,7 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err)
}
if (rq && ata_taskfile_request(rq)) {
- struct ide_cmd *orig_cmd = rq->special;
+ struct ide_cmd *orig_cmd = ide_req(rq)->special;
if (cmd->tf_flags & IDE_TFLAG_DYN)
kfree(orig_cmd);
@@ -253,7 +261,7 @@ EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
struct request *rq)
{
- struct ide_cmd *cmd = rq->special;
+ struct ide_cmd *cmd = ide_req(rq)->special;
if (cmd) {
if (cmd->protocol == ATA_PROT_PIO) {
@@ -307,8 +315,6 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
- BUG_ON(!(rq->rq_flags & RQF_STARTED));
-
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
drive->hwif->name, (unsigned long) rq);
@@ -320,6 +326,9 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
goto kill_rq;
}
+ if (drive->prep_rq && !drive->prep_rq(drive, rq))
+ return ide_stopped;
+
if (ata_pm_request(rq))
ide_check_pm_state(drive, rq);
@@ -343,7 +352,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
if (ata_taskfile_request(rq))
return execute_drive_cmd(drive, rq);
else if (ata_pm_request(rq)) {
- struct ide_pm_state *pm = rq->special;
+ struct ide_pm_state *pm = ide_req(rq)->special;
#ifdef DEBUG_PM
printk("%s: start_power_step(step: %d)\n",
drive->name, pm->pm_step);
@@ -430,44 +439,42 @@ static inline void ide_unlock_host(struct ide_host *host)
}
}
-static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq)
-{
- if (rq)
- blk_requeue_request(q, rq);
- if (rq || blk_peek_request(q)) {
- /* Use 3ms as that was the old plug delay */
- blk_delay_queue(q, 3);
- }
-}
-
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
- unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
- __ide_requeue_and_plug(q, rq);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ /* Use 3ms as that was the old plug delay */
+ if (rq) {
+ blk_mq_requeue_request(rq, false);
+ blk_mq_delay_kick_requeue_list(q, 3);
+ } else
+ blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
}
/*
* Issue a new request to a device.
*/
-void do_ide_request(struct request_queue *q)
+blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- ide_drive_t *drive = q->queuedata;
+ ide_drive_t *drive = hctx->queue->queuedata;
ide_hwif_t *hwif = drive->hwif;
struct ide_host *host = hwif->host;
- struct request *rq = NULL;
+ struct request *rq = bd->rq;
ide_startstop_t startstop;
- spin_unlock_irq(q->queue_lock);
+ if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
+ rq->rq_flags |= RQF_DONTPREP;
+ ide_req(rq)->special = NULL;
+ }
/* HLD do_request() callback might sleep, make sure it's okay */
might_sleep();
if (ide_lock_host(host, hwif))
- goto plug_device_2;
+ return BLK_STS_DEV_RESOURCE;
+
+ blk_mq_start_request(rq);
spin_lock_irq(&hwif->lock);
@@ -503,21 +510,16 @@ repeat:
hwif->cur_dev = drive;
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
- spin_unlock_irq(&hwif->lock);
- spin_lock_irq(q->queue_lock);
/*
* we know that the queue isn't empty, but this can happen
- * if the q->prep_rq_fn() decides to kill a request
+ * if ->prep_rq() decides to kill a request
*/
- if (!rq)
- rq = blk_fetch_request(drive->queue);
-
- spin_unlock_irq(q->queue_lock);
- spin_lock_irq(&hwif->lock);
-
if (!rq) {
- ide_unlock_port(hwif);
- goto out;
+ rq = bd->rq;
+ if (!rq) {
+ ide_unlock_port(hwif);
+ goto out;
+ }
}
/*
@@ -551,23 +553,24 @@ repeat:
if (startstop == ide_stopped) {
rq = hwif->rq;
hwif->rq = NULL;
- goto repeat;
+ if (rq)
+ goto repeat;
+ ide_unlock_port(hwif);
+ goto out;
}
- } else
- goto plug_device;
+ } else {
+plug_device:
+ spin_unlock_irq(&hwif->lock);
+ ide_unlock_host(host);
+ ide_requeue_and_plug(drive, rq);
+ return BLK_STS_OK;
+ }
+
out:
spin_unlock_irq(&hwif->lock);
if (rq == NULL)
ide_unlock_host(host);
- spin_lock_irq(q->queue_lock);
- return;
-
-plug_device:
- spin_unlock_irq(&hwif->lock);
- ide_unlock_host(host);
-plug_device_2:
- spin_lock_irq(q->queue_lock);
- __ide_requeue_and_plug(q, rq);
+ return BLK_STS_OK;
}
static int drive_is_ready(ide_drive_t *drive)
@@ -887,3 +890,16 @@ void ide_pad_transfer(ide_drive_t *drive, int write, int len)
}
}
EXPORT_SYMBOL_GPL(ide_pad_transfer);
+
+void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ list_add_tail(&rq->queuelist, &drive->rq_list);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+
+ kblockd_schedule_work(&drive->rq_work);
+}
+EXPORT_SYMBOL_GPL(ide_insert_request_head);
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 622f0edb3945..102aa3bc3e7f 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -27,7 +27,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
spin_unlock_irq(&hwif->lock);
if (start_queue)
- blk_run_queue(q);
+ blk_mq_run_hw_queues(q, true);
return;
}
spin_unlock_irq(&hwif->lock);
@@ -36,7 +36,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd[0] = REQ_PARK_HEADS;
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
- rq->special = &timeout;
+ ide_req(rq)->special = &timeout;
blk_execute_rq(q, NULL, rq, 1);
rc = scsi_req(rq)->result ? -EIO : 0;
blk_put_request(rq);
@@ -54,7 +54,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC;
- elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
+ ide_insert_request_head(drive, rq);
out:
return;
@@ -67,7 +67,7 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq)
memset(&cmd, 0, sizeof(cmd));
if (scsi_req(rq)->cmd[0] == REQ_PARK_HEADS) {
- drive->sleep = *(unsigned long *)rq->special;
+ drive->sleep = *(unsigned long *)ide_req(rq)->special;
drive->dev_flags |= IDE_DFLAG_SLEEPING;
tf->command = ATA_CMD_IDLEIMMEDIATE;
tf->feature = 0x44;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 59217aa1d1fb..192e6c65d34e 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -21,7 +21,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, 0);
ide_req(rq)->type = ATA_PRIV_PM_SUSPEND;
- rq->special = &rqpm;
+ ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
@@ -40,32 +40,17 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
return ret;
}
-static void ide_end_sync_rq(struct request *rq, blk_status_t error)
-{
- complete(rq->end_io_data);
-}
-
static int ide_pm_execute_rq(struct request *rq)
{
struct request_queue *q = rq->q;
- DECLARE_COMPLETION_ONSTACK(wait);
- rq->end_io_data = &wait;
- rq->end_io = ide_end_sync_rq;
-
- spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dying(q))) {
rq->rq_flags |= RQF_QUIET;
scsi_req(rq)->result = -ENXIO;
- __blk_end_request_all(rq, BLK_STS_OK);
- spin_unlock_irq(q->queue_lock);
+ blk_mq_end_request(rq, BLK_STS_OK);
return -ENXIO;
}
- __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
- __blk_run_queue_uncond(q);
- spin_unlock_irq(q->queue_lock);
-
- wait_for_completion_io(&wait);
+ blk_execute_rq(q, NULL, rq, true);
return scsi_req(rq)->result ? -EIO : 0;
}
@@ -79,6 +64,8 @@ int generic_ide_resume(struct device *dev)
struct ide_pm_state rqpm;
int err;
+ blk_mq_start_stopped_hw_queues(drive->queue, true);
+
if (ide_port_acpi(hwif)) {
/* call ACPI _PS0 / _STM only once */
if ((drive->dn & 1) == 0 || pair == NULL) {
@@ -92,7 +79,7 @@ int generic_ide_resume(struct device *dev)
memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
- rq->special = &rqpm;
+ ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
@@ -111,7 +98,7 @@ int generic_ide_resume(struct device *dev)
void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
{
- struct ide_pm_state *pm = rq->special;
+ struct ide_pm_state *pm = ide_req(rq)->special;
#ifdef DEBUG_PM
printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
@@ -141,7 +128,7 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
- struct ide_pm_state *pm = rq->special;
+ struct ide_pm_state *pm = ide_req(rq)->special;
struct ide_cmd cmd = { };
switch (pm->pm_step) {
@@ -213,8 +200,7 @@ out_do_tf:
void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
- struct ide_pm_state *pm = rq->special;
- unsigned long flags;
+ struct ide_pm_state *pm = ide_req(rq)->special;
ide_complete_power_step(drive, rq);
if (pm->pm_step != IDE_PM_COMPLETED)
@@ -224,22 +210,19 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq)
printk("%s: completing PM request, %s\n", drive->name,
(ide_req(rq)->type == ATA_PRIV_PM_SUSPEND) ? "suspend" : "resume");
#endif
- spin_lock_irqsave(q->queue_lock, flags);
if (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND)
- blk_stop_queue(q);
+ blk_mq_stop_hw_queues(q);
else
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
- spin_unlock_irqrestore(q->queue_lock, flags);
drive->hwif->rq = NULL;
- if (blk_end_request(rq, BLK_STS_OK, 0))
- BUG();
+ blk_mq_end_request(rq, BLK_STS_OK);
}
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
- struct ide_pm_state *pm = rq->special;
+ struct ide_pm_state *pm = ide_req(rq)->special;
if (blk_rq_is_private(rq) &&
ide_req(rq)->type == ATA_PRIV_PM_SUSPEND &&
@@ -260,7 +243,6 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
struct request_queue *q = drive->queue;
- unsigned long flags;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
@@ -274,8 +256,6 @@ void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ blk_mq_start_hw_queues(q);
}
}
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 3b75a7b7a284..63627be0811a 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -746,10 +746,16 @@ static void ide_initialize_rq(struct request *rq)
{
struct ide_request *req = blk_mq_rq_to_pdu(rq);
+ req->special = NULL;
scsi_req_init(&req->sreq);
req->sreq.sense = req->sense;
}
+static const struct blk_mq_ops ide_mq_ops = {
+ .queue_rq = ide_queue_rq,
+ .initialize_rq_fn = ide_initialize_rq,
+};
+
/*
* init request queue
*/
@@ -759,6 +765,7 @@ static int ide_init_queue(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif;
int max_sectors = 256;
int max_sg_entries = PRD_ENTRIES;
+ struct blk_mq_tag_set *set;
/*
* Our default set up assumes the normal IDE case,
@@ -767,19 +774,26 @@ static int ide_init_queue(ide_drive_t *drive)
* limits and LBA48 we could raise it but as yet
* do not.
*/
- q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL);
- if (!q)
+
+ set = &drive->tag_set;
+ set->ops = &ide_mq_ops;
+ set->nr_hw_queues = 1;
+ set->queue_depth = 32;
+ set->reserved_tags = 1;
+ set->cmd_size = sizeof(struct ide_request);
+ set->numa_node = hwif_to_node(hwif);
+ set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
+ if (blk_mq_alloc_tag_set(set))
return 1;
- q->request_fn = do_ide_request;
- q->initialize_rq_fn = ide_initialize_rq;
- q->cmd_size = sizeof(struct ide_request);
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
- if (blk_init_allocated_queue(q) < 0) {
- blk_cleanup_queue(q);
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(set);
return 1;
}
+ blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
+
q->queuedata = drive;
blk_queue_segment_boundary(q, 0xffff);
@@ -965,8 +979,12 @@ static void drive_release_dev (struct device *dev)
ide_proc_unregister_device(drive);
+ if (drive->sense_rq)
+ blk_mq_free_request(drive->sense_rq);
+
blk_cleanup_queue(drive->queue);
drive->queue = NULL;
+ blk_mq_free_tag_set(&drive->tag_set);
drive->dev_flags &= ~IDE_DFLAG_PRESENT;
@@ -1133,6 +1151,28 @@ static void ide_port_cable_detect(ide_hwif_t *hwif)
}
}
+/*
+ * Deferred request list insertion handler
+ */
+static void drive_rq_insert_work(struct work_struct *work)
+{
+ ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&hwif->lock);
+ if (!list_empty(&drive->rq_list))
+ list_splice_init(&drive->rq_list, &list);
+ spin_unlock_irq(&hwif->lock);
+
+ while (!list_empty(&list)) {
+ rq = list_first_entry(&list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
+ }
+}
+
static const u8 ide_hwif_to_major[] =
{ IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR,
IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR };
@@ -1145,12 +1185,10 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
ide_port_for_each_dev(i, drive, hwif) {
u8 j = (hwif->index * MAX_DRIVES) + i;
u16 *saved_id = drive->id;
- struct request *saved_sense_rq = drive->sense_rq;
memset(drive, 0, sizeof(*drive));
memset(saved_id, 0, SECTOR_SIZE);
drive->id = saved_id;
- drive->sense_rq = saved_sense_rq;
drive->media = ide_disk;
drive->select = (i << 4) | ATA_DEVICE_OBS;
@@ -1166,6 +1204,9 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
INIT_LIST_HEAD(&drive->list);
init_completion(&drive->gendev_rel_comp);
+
+ INIT_WORK(&drive->rq_work, drive_rq_insert_work);
+ INIT_LIST_HEAD(&drive->rq_list);
}
}
@@ -1255,7 +1296,6 @@ static void ide_port_free_devices(ide_hwif_t *hwif)
int i;
ide_port_for_each_dev(i, drive, hwif) {
- kfree(drive->sense_rq);
kfree(drive->id);
kfree(drive);
}
@@ -1283,17 +1323,10 @@ static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
if (drive->id == NULL)
goto out_free_drive;
- drive->sense_rq = kmalloc(sizeof(struct request) +
- sizeof(struct ide_request), GFP_KERNEL);
- if (!drive->sense_rq)
- goto out_free_id;
-
hwif->devices[i] = drive;
}
return 0;
-out_free_id:
- kfree(drive->id);
out_free_drive:
kfree(drive);
out_nomem:
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index 45c997430332..4c8c7a620d08 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -614,18 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
return 0;
}
-static int ide_drivers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, &ide_drivers_show, NULL);
-}
-
-static const struct file_operations ide_drivers_operations = {
- .owner = THIS_MODULE,
- .open = ide_drivers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ide_drivers);
void proc_ide_create(void)
{
@@ -634,7 +623,7 @@ void proc_ide_create(void)
if (!proc_ide_root)
return;
- proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
+ proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
}
void proc_ide_destroy(void)
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 34c1165226a4..db1a65f4b490 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -639,7 +639,7 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
goto out;
}
if (req->cmd[13] & REQ_IDETAPE_PC1) {
- pc = (struct ide_atapi_pc *)rq->special;
+ pc = (struct ide_atapi_pc *)ide_req(rq)->special;
req->cmd[13] &= ~(REQ_IDETAPE_PC1);
req->cmd[13] |= REQ_IDETAPE_PC2;
goto out;
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index c21d5c50ae3a..17b2e379e872 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -440,7 +440,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
goto put_req;
}
- rq->special = cmd;
+ ide_req(rq)->special = cmd;
cmd->rq = rq;
blk_execute_rq(drive->queue, NULL, rq, 0);
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index c5b902b86b44..92f840365718 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL);
+ of_node_put(root);
/* Get cable type from device-tree. */
if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook */
@@ -1045,7 +1046,7 @@ static int pmac_ide_setup_device(pmac_ide_hwif_t *pmif, struct ide_hw *hw)
d.port_ops = &pmac_ide_ata4_port_ops;
d.udma_mask = ATA_UDMA5;
} else if (of_device_is_compatible(np, "keylargo-ata")) {
- if (strcmp(np->name, "ata-4") == 0) {
+ if (of_node_name_eq(np, "ata-4")) {
pmif->kind = controller_kl_ata4;
d.port_ops = &pmac_ide_ata4_port_ops;
d.udma_mask = ATA_UDMA4;
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 41d97faf5013..38ff374a3ca4 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
struct hid_sensor_hub_device *hsdev =
accel_state->common_attributes.hsdev;
@@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
hid_sensor_power_state(&accel_state->common_attributes, true);
report_id = accel_state->accel[chan->scan_index].report_id;
+ min = accel_state->accel[chan->scan_index].logical_minimum;
address = accel_3d_addresses[chan->scan_index];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
accel_state->common_attributes.hsdev,
hsdev->usage, address, report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
else {
*val = 0;
hid_sensor_power_state(&accel_state->common_attributes,
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 36941e69f959..88e857c4baf4 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
*val = 0;
*val2 = 0;
@@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
hid_sensor_power_state(&gyro_state->common_attributes, true);
report_id = gyro_state->gyro[chan->scan_index].report_id;
+ min = gyro_state->gyro[chan->scan_index].logical_minimum;
address = gyro_3d_addresses[chan->scan_index];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
gyro_state->common_attributes.hsdev,
HID_USAGE_SENSOR_GYRO_3D, address,
report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
else {
*val = 0;
hid_sensor_power_state(&gyro_state->common_attributes,
diff --git a/drivers/iio/humidity/hid-sensor-humidity.c b/drivers/iio/humidity/hid-sensor-humidity.c
index beab6d6fd6e1..4bc95f31c730 100644
--- a/drivers/iio/humidity/hid-sensor-humidity.c
+++ b/drivers/iio/humidity/hid-sensor-humidity.c
@@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev,
HID_USAGE_SENSOR_HUMIDITY,
HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
humid_st->humidity_attr.report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ humid_st->humidity_attr.logical_minimum < 0);
hid_sensor_power_state(&humid_st->common_attributes, false);
return IIO_VAL_INT;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
index d78a10403bac..a961b5a06fe6 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_acpi.c
@@ -91,18 +91,14 @@ static int asus_acpi_get_sensor_info(struct acpi_device *adev,
static int acpi_i2c_check_resource(struct acpi_resource *ares, void *data)
{
+ struct acpi_resource_i2c_serialbus *sb;
u32 *addr = data;
- if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- struct acpi_resource_i2c_serialbus *sb;
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
- if (*addr)
- *addr |= (sb->slave_address << 16);
- else
- *addr = sb->slave_address;
- }
+ if (i2c_acpi_get_i2c_resource(ares, &sb)) {
+ if (*addr)
+ *addr |= (sb->slave_address << 16);
+ else
+ *addr = sb->slave_address;
}
/* Tell the ACPI core that we already copied this address */
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 406caaee9a3c..94f33250ba5a 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
*val = 0;
*val2 = 0;
@@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
case CHANNEL_SCAN_INDEX_INTENSITY:
case CHANNEL_SCAN_INDEX_ILLUM:
report_id = als_state->als_illum.report_id;
- address =
- HID_USAGE_SENSOR_LIGHT_ILLUM;
+ min = als_state->als_illum.logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
default:
report_id = -1;
@@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
als_state->common_attributes.hsdev,
HID_USAGE_SENSOR_ALS, address,
report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
hid_sensor_power_state(&als_state->common_attributes,
false);
} else {
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 45107f7537b5..cf5a0c242609 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
*val = 0;
*val2 = 0;
@@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
switch (chan->scan_index) {
case CHANNEL_SCAN_INDEX_PRESENCE:
report_id = prox_state->prox_attr.report_id;
- address =
- HID_USAGE_SENSOR_HUMAN_PRESENCE;
+ min = prox_state->prox_attr.logical_minimum;
+ address = HID_USAGE_SENSOR_HUMAN_PRESENCE;
break;
default:
report_id = -1;
@@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
prox_state->common_attributes.hsdev,
HID_USAGE_SENSOR_PROX, address,
report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
hid_sensor_power_state(&prox_state->common_attributes,
false);
} else {
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index d55c4885211a..f3c0d41e5a8c 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
*val = 0;
*val2 = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
- report_id =
- magn_state->magn[chan->address].report_id;
+ report_id = magn_state->magn[chan->address].report_id;
+ min = magn_state->magn[chan->address].logical_minimum;
address = magn_3d_addresses[chan->address];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
magn_state->magn_flux_attributes.hsdev,
HID_USAGE_SENSOR_COMPASS_3D, address,
report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
else {
*val = 0;
hid_sensor_power_state(
diff --git a/drivers/iio/magnetometer/st_magn_buffer.c b/drivers/iio/magnetometer/st_magn_buffer.c
index 0a9e8fadfa9d..37ab30566464 100644
--- a/drivers/iio/magnetometer/st_magn_buffer.c
+++ b/drivers/iio/magnetometer/st_magn_buffer.c
@@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
return st_sensors_set_dataready_irq(indio_dev, state);
}
-static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
-{
- return st_sensors_set_enable(indio_dev, true);
-}
-
static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
{
int err;
@@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
if (err < 0)
goto st_magn_buffer_postenable_error;
- return err;
+ return st_sensors_set_enable(indio_dev, true);
st_magn_buffer_postenable_error:
kfree(mdata->buffer_data);
@@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
int err;
struct st_sensor_data *mdata = iio_priv(indio_dev);
- err = iio_triggered_buffer_predisable(indio_dev);
+ err = st_sensors_set_enable(indio_dev, false);
if (err < 0)
goto st_magn_buffer_predisable_error;
- err = st_sensors_set_enable(indio_dev, false);
+ err = iio_triggered_buffer_predisable(indio_dev);
st_magn_buffer_predisable_error:
kfree(mdata->buffer_data);
@@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
}
static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
- .preenable = &st_magn_buffer_preenable,
.postenable = &st_magn_buffer_postenable,
.predisable = &st_magn_buffer_predisable,
};
diff --git a/drivers/iio/orientation/hid-sensor-incl-3d.c b/drivers/iio/orientation/hid-sensor-incl-3d.c
index 1e5451d1ff88..bdc5e4554ee4 100644
--- a/drivers/iio/orientation/hid-sensor-incl-3d.c
+++ b/drivers/iio/orientation/hid-sensor-incl-3d.c
@@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
*val = 0;
*val2 = 0;
switch (mask) {
case IIO_CHAN_INFO_RAW:
hid_sensor_power_state(&incl_state->common_attributes, true);
- report_id =
- incl_state->incl[chan->scan_index].report_id;
+ report_id = incl_state->incl[chan->scan_index].report_id;
+ min = incl_state->incl[chan->scan_index].logical_minimum;
address = incl_3d_addresses[chan->scan_index];
if (report_id >= 0)
*val = sensor_hub_input_attr_get_raw_value(
incl_state->common_attributes.hsdev,
HID_USAGE_SENSOR_INCLINOMETER_3D, address,
report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
else {
hid_sensor_power_state(&incl_state->common_attributes,
false);
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 4c437918f1d2..d7b1c00ceb4d 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev,
int report_id = -1;
u32 address;
int ret_type;
+ s32 min;
*val = 0;
*val2 = 0;
@@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
switch (chan->scan_index) {
case CHANNEL_SCAN_INDEX_PRESSURE:
report_id = press_state->press_attr.report_id;
- address =
- HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
+ min = press_state->press_attr.logical_minimum;
+ address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
break;
default:
report_id = -1;
@@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
press_state->common_attributes.hsdev,
HID_USAGE_SENSOR_PRESSURE, address,
report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ min < 0);
hid_sensor_power_state(&press_state->common_attributes,
false);
} else {
diff --git a/drivers/iio/temperature/hid-sensor-temperature.c b/drivers/iio/temperature/hid-sensor-temperature.c
index beaf6fd3e337..b592fc4f007e 100644
--- a/drivers/iio/temperature/hid-sensor-temperature.c
+++ b/drivers/iio/temperature/hid-sensor-temperature.c
@@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev,
HID_USAGE_SENSOR_TEMPERATURE,
HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
temp_st->temperature_attr.report_id,
- SENSOR_HUB_SYNC);
+ SENSOR_HUB_SYNC,
+ temp_st->temperature_attr.logical_minimum < 0);
hid_sensor_power_state(
&temp_st->common_attributes,
false);
diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c
index ee366199b169..558de0b9895c 100644
--- a/drivers/infiniband/core/roce_gid_mgmt.c
+++ b/drivers/infiniband/core/roce_gid_mgmt.c
@@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
struct net_device *cookie_ndev = cookie;
bool match = false;
+ if (!rdma_ndev)
+ return false;
+
rcu_read_lock();
if (netif_is_bond_master(cookie_ndev) &&
rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
@@ -767,8 +770,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGEADDR:
cmds[0] = netdev_del_cmd;
- cmds[1] = add_default_gid_cmd;
- cmds[2] = add_cmd;
+ if (ndev->reg_state == NETREG_REGISTERED) {
+ cmds[1] = add_default_gid_cmd;
+ cmds[2] = add_cmd;
+ }
break;
case NETDEV_CHANGEUPPER:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 2b4c5e7dd5a1..9608681224e6 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -137,15 +137,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
up_read(&per_mm->umem_rwsem);
}
-static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
- u64 end, void *cookie)
-{
- ib_umem_notifier_start_account(item);
- item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
- ib_umem_notifier_end_account(item);
- return 0;
-}
-
static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
u64 start, u64 end, void *cookie)
{
@@ -553,12 +544,13 @@ out:
put_page(page);
if (remove_existing_mapping && umem->context->invalidate_range) {
- invalidate_page_trampoline(
+ ib_umem_notifier_start_account(umem_odp);
+ umem->context->invalidate_range(
umem_odp,
- ib_umem_start(umem) + (page_index >> umem->page_shift),
- ib_umem_start(umem) + ((page_index + 1) >>
- umem->page_shift),
- NULL);
+ ib_umem_start(umem) + (page_index << umem->page_shift),
+ ib_umem_start(umem) +
+ ((page_index + 1) << umem->page_shift));
+ ib_umem_notifier_end_account(umem_odp);
ret = -EAGAIN;
}
@@ -655,8 +647,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
flags, local_page_list, NULL, NULL);
up_read(&owning_mm->mmap_sem);
- if (npages < 0)
+ if (npages < 0) {
+ if (npages != -EAGAIN)
+ pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
+ else
+ pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
break;
+ }
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
mutex_lock(&umem_odp->umem_mutex);
@@ -674,8 +671,13 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ret = ib_umem_odp_map_dma_single_page(
umem_odp, k, local_page_list[j],
access_mask, current_seq);
- if (ret < 0)
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
+ else
+ pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
break;
+ }
p = page_to_phys(local_page_list[j]);
k++;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index cf2282654210..77f095e5fbe3 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1268,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
/* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev);
if (rc) {
+ rtnl_unlock();
pr_err("Failed to register with netedev: %#x\n", rc);
return -EINVAL;
}
@@ -1466,6 +1467,7 @@ static void bnxt_re_task(struct work_struct *work)
"Failed to register with IB: %#x", rc);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
+ goto exit;
}
break;
case NETDEV_UP:
@@ -1489,6 +1491,7 @@ static void bnxt_re_task(struct work_struct *work)
}
smp_mb__before_atomic();
atomic_dec(&rdev->sched_count);
+exit:
kfree(re_work);
}
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 615413bd3e8d..97ecc8c684f5 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2058,8 +2058,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
}
ep->mtu = pdev->mtu;
ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
- cxgb4_port_viid(pdev));
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -2078,8 +2077,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
goto out;
ep->mtu = dst_mtu(dst);
ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
- cxgb4_port_viid(pdev));
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
step = cdev->rdev.lldi.ntxq /
cdev->rdev.lldi.nchan;
ep->txq_idx = cxgb4_port_idx(pdev) * step;
@@ -3944,7 +3942,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
} else {
vlan_eh = (struct vlan_ethhdr *)(req + 1);
iph = (struct iphdr *)(vlan_eh + 1);
- skb->vlan_tci = ntohs(cpl->vlan);
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
}
if (iph->version != 0x4)
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 9b20479dc710..7e6d70936c63 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
}
/* allocate space for the counter values */
- dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
+ dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
+ GFP_KERNEL);
if (!dd->cntrs)
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 1401b6ea4a28..2b882347d0c2 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -155,6 +155,8 @@ struct hfi1_ib_stats {
extern struct hfi1_ib_stats hfi1_stats;
extern const struct pci_error_handlers hfi1_pci_err_handler;
+extern int num_driver_cntrs;
+
/*
* First-cut criterion for "device is active" is
* two thousand dwords combined Tx, Rx traffic per
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 6f3bc4dab858..1a016248039f 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
default:
break;
}
+
+ /*
+ * System latency between send and schedule is large enough that
+ * forcing call_send to true for piothreshold packets is necessary.
+ */
+ if (wqe->length <= piothreshold)
+ *call_send = true;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 48e11e510358..a365089a9305 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
static const char **dev_cntr_names;
static const char **port_cntr_names;
-static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
+int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
static int cntr_names_initialized;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index a4c62ae23a9a..3beb1523e17c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1756,10 +1756,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
+static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+ struct hns_roce_mr *mr)
{
- struct hns_roce_v2_mpt_entry *mpt_entry;
struct scatterlist *sg;
u64 page_addr;
u64 *pages;
@@ -1767,6 +1766,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
int len;
int entry;
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba,
+ V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+
+ pages = (u64 *)__get_free_page(GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ i = 0;
+ for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (j = 0; j < len; ++j) {
+ page_addr = sg_dma_address(sg) +
+ (j << mr->umem->page_shift);
+ pages[i] = page_addr >> 6;
+ /* Record the first 2 entry directly to MTPT table */
+ if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+ goto found;
+ i++;
+ }
+ }
+found:
+ mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+ roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+ V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
+
+ mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+ roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+ V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ free_page((unsigned long)pages);
+
+ return 0;
+}
+
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+ unsigned long mtpt_idx)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+ int ret;
+
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
@@ -1781,7 +1827,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
V2_MPT_BYTE_4_PD_S, mr->pd);
- mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
@@ -1796,13 +1841,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
(mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
(mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
- mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
mr->type == MR_TYPE_MR ? 0 : 1);
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
1);
- mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -1813,53 +1856,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
-
- mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
- roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
- V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
- mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
-
- pages = (u64 *)__get_free_page(GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- i = 0;
- for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
- len = sg_dma_len(sg) >> PAGE_SHIFT;
- for (j = 0; j < len; ++j) {
- page_addr = sg_dma_address(sg) +
- (j << mr->umem->page_shift);
- pages[i] = page_addr >> 6;
-
- /* Record the first 2 entry directly to MTPT table */
- if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
- goto found;
- i++;
- }
- }
-
-found:
- mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
- roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
- V2_MPT_BYTE_56_PA0_H_S,
- upper_32_bits(pages[0]));
- mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
-
- mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
- roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
- V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+ ret = set_mtpt_pbl(mpt_entry, mr);
- free_page((unsigned long)pages);
-
- roce_set_field(mpt_entry->byte_64_buf_pa1,
- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
- V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
- mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
- mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
-
- return 0;
+ return ret;
}
static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
@@ -1868,6 +1867,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
u64 size, void *mb_buf)
{
struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ int ret = 0;
if (flags & IB_MR_REREG_PD) {
roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
@@ -1880,14 +1880,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
V2_MPT_BYTE_8_BIND_EN_S,
(mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
- V2_MPT_BYTE_8_ATOMIC_EN_S,
- (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
+ V2_MPT_BYTE_8_ATOMIC_EN_S,
+ mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
- (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
+ mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
- (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+ mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
- (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+ mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
}
if (flags & IB_MR_REREG_TRANS) {
@@ -1896,21 +1896,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
- mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
- mpt_entry->pbl_ba_l =
- cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
- roce_set_field(mpt_entry->byte_48_mode_ba,
- V2_MPT_BYTE_48_PBL_BA_H_M,
- V2_MPT_BYTE_48_PBL_BA_H_S,
- upper_32_bits(mr->pbl_ba >> 3));
- mpt_entry->byte_48_mode_ba =
- cpu_to_le32(mpt_entry->byte_48_mode_ba);
-
mr->iova = iova;
mr->size = size;
+
+ ret = set_mtpt_pbl(mpt_entry, mr);
}
- return 0;
+ return ret;
}
static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 771eb6bd0785..4b3999d88c9e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -404,7 +404,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
if (pdata)
pd_len = pdata->size;
- if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+ if (cm_node->vlan_id <= VLAN_VID_MASK)
eth_hlen += 4;
if (cm_node->ipv4)
@@ -433,7 +433,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
- if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ if (cm_node->vlan_id <= VLAN_VID_MASK) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
@@ -463,7 +463,7 @@ static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
ether_addr_copy(ethh->h_source, cm_node->loc_mac);
- if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ if (cm_node->vlan_id <= VLAN_VID_MASK) {
((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
@@ -3323,7 +3323,7 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->flow_label = 0;
tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
- if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+ if (cm_node->vlan_id <= VLAN_VID_MASK) {
tcp_info->insert_vlan_tag = true;
tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
cm_node->vlan_id);
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 82adc0d1d30e..43512347b4f0 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -181,6 +181,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_cq *cq;
struct mlx4_uar *uar;
+ void *buf_addr;
int err;
if (entries < 1 || entries > dev->dev->caps.max_cqes)
@@ -211,6 +212,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
goto err_cq;
}
+ buf_addr = (void *)(unsigned long)ucmd.buf_addr;
+
err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem,
ucmd.buf_addr, entries);
if (err)
@@ -237,6 +240,8 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
if (err)
goto err_db;
+ buf_addr = &cq->buf.buf;
+
uar = &dev->priv_uar;
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
}
@@ -246,7 +251,9 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
cq->db.dma, &cq->mcq, vector, 0,
- !!(cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
+ !!(cq->create_flags &
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION),
+ buf_addr, !!context);
if (err)
goto err_dbmap;
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index b8e4b15e2674..33f5adb14e4e 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,6 +1,8 @@
obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
-mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
+mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq_cmd.o \
+ srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o \
+ cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 7d769b5538b4..26ab9041f94a 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -35,6 +35,7 @@
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_cache.h>
#include "mlx5_ib.h"
+#include "srq.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq)
{
@@ -81,7 +82,7 @@ static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
- if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
!((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
return cqe;
} else {
@@ -177,8 +178,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_core_srq *msrq = NULL;
if (qp->ibqp.xrcd) {
- msrq = mlx5_core_get_srq(dev->mdev,
- be32_to_cpu(cqe->srqn));
+ msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
srq = to_mibsrq(msrq);
} else {
srq = to_msrq(qp->ibqp.srq);
@@ -197,7 +197,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
}
wc->byte_len = be32_to_cpu(cqe->byte_cnt);
- switch (cqe->op_own >> 4) {
+ switch (get_cqe_opcode(cqe)) {
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
@@ -537,7 +537,7 @@ repoll:
*/
rmb();
- opcode = cqe64->op_own >> 4;
+ opcode = get_cqe_opcode(cqe64);
if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
if (likely(cq->resize_buf)) {
free_cq_buf(dev, &cq->buf);
@@ -1295,7 +1295,7 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
return -EINVAL;
}
- while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
+ while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
(i + 1) & cq->resize_buf->nent);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 61aab7c0c513..45c421c87100 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
err = uverbs_get_flags32(&access, attrs,
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
- IB_ACCESS_SUPPORTED);
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
if (err)
return err;
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 584ff2ea7810..46a9ddc8ca56 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -4,6 +4,7 @@
*/
#include "ib_rep.h"
+#include "srq.h"
static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
@@ -21,6 +22,9 @@ static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_rep_roce_init,
mlx5_ib_stage_rep_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
mlx5_ib_stage_dev_res_init,
mlx5_ib_stage_dev_res_cleanup),
@@ -44,13 +48,21 @@ static const struct mlx5_ib_profile rep_profile = {
static int
mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = mlx5_ib_rep_to_dev(rep);
+ if (!__mlx5_ib_add(ibdev, ibdev->profile))
+ return -EINVAL;
return 0;
}
static void
mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
{
- rep->rep_if[REP_IB].priv = NULL;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = mlx5_ib_rep_to_dev(rep);
+ __mlx5_ib_remove(ibdev, ibdev->profile, MLX5_IB_STAGE_MAX);
}
static int
@@ -85,6 +97,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
dev = mlx5_ib_rep_to_dev(rep);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
rep->rep_if[REP_IB].priv = NULL;
+ ib_dealloc_device(&dev->ib_dev);
}
static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e9c428071df3..e85974ab06c0 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -60,6 +60,7 @@
#include "mlx5_ib.h"
#include "ib_rep.h"
#include "cmd.h"
+#include "srq.h"
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h>
@@ -82,10 +83,13 @@ static char mlx5_version[] =
struct mlx5_ib_event_work {
struct work_struct work;
- struct mlx5_core_dev *dev;
- void *context;
- enum mlx5_dev_event event;
- unsigned long param;
+ union {
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_multiport_info *mpi;
+ };
+ bool is_slave;
+ unsigned int event;
+ void *param;
};
enum {
@@ -441,7 +445,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
if (!ndev)
goto out;
- if (mlx5_lag_is_active(dev->mdev)) {
+ if (dev->lag_active) {
rcu_read_lock();
upper = netdev_master_upper_dev_get_rcu(ndev);
if (upper) {
@@ -1094,31 +1098,26 @@ enum mlx5_ib_width {
MLX5_IB_WIDTH_12X = 1 << 4
};
-static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+static void translate_active_width(struct ib_device *ibdev, u8 active_width,
u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- int err = 0;
- if (active_width & MLX5_IB_WIDTH_1X) {
+ if (active_width & MLX5_IB_WIDTH_1X)
*ib_width = IB_WIDTH_1X;
- } else if (active_width & MLX5_IB_WIDTH_2X) {
- mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
- (int)active_width);
- err = -EINVAL;
- } else if (active_width & MLX5_IB_WIDTH_4X) {
+ else if (active_width & MLX5_IB_WIDTH_4X)
*ib_width = IB_WIDTH_4X;
- } else if (active_width & MLX5_IB_WIDTH_8X) {
+ else if (active_width & MLX5_IB_WIDTH_8X)
*ib_width = IB_WIDTH_8X;
- } else if (active_width & MLX5_IB_WIDTH_12X) {
+ else if (active_width & MLX5_IB_WIDTH_12X)
*ib_width = IB_WIDTH_12X;
- } else {
- mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+ else {
+ mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
(int)active_width);
- err = -EINVAL;
+ *ib_width = IB_WIDTH_4X;
}
- return err;
+ return;
}
static int mlx5_mtu_to_ib_mtu(int mtu)
@@ -1225,10 +1224,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
if (err)
goto out;
- err = translate_active_width(ibdev, ib_link_width_oper,
- &props->active_width);
- if (err)
- goto out;
+ translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
+
err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
if (err)
goto out;
@@ -1851,7 +1848,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
- if (mlx5_lag_is_active(dev->mdev)) {
+ if (dev->lag_active) {
u8 port = mlx5_core_native_port_num(dev->mdev);
atomic_set(&context->tx_port_affinity,
@@ -2676,11 +2673,11 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
ntohs(ib_spec->gre.val.protocol));
memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
- gre_key_h),
+ gre_key.nvgre.hi),
&ib_spec->gre.mask.key,
sizeof(ib_spec->gre.mask.key));
memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
- gre_key_h),
+ gre_key.nvgre.hi),
&ib_spec->gre.val.key,
sizeof(ib_spec->gre.val.key));
break;
@@ -4233,6 +4230,63 @@ static void delay_drop_handler(struct work_struct *work)
mutex_unlock(&delay_drop->lock);
}
+static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
+ struct ib_event *ibev)
+{
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
+ break;
+ default: /* do nothing */
+ return;
+ }
+}
+
+static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
+ struct ib_event *ibev)
+{
+ u8 port = (eqe->data.port.port >> 4) & 0xf;
+
+ ibev->element.port_num = port;
+
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ /* In RoCE, port up/down events are handled in
+ * mlx5_netdev_event().
+ */
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return -EINVAL;
+
+ ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_LID:
+ ibev->event = IB_EVENT_LID_CHANGE;
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+ ibev->event = IB_EVENT_PKEY_CHANGE;
+ schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+ ibev->event = IB_EVENT_GID_CHANGE;
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ ibev->event = IB_EVENT_CLIENT_REREGISTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void mlx5_ib_handle_event(struct work_struct *_work)
{
struct mlx5_ib_event_work *work =
@@ -4240,65 +4294,37 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
struct mlx5_ib_dev *ibdev;
struct ib_event ibev;
bool fatal = false;
- u8 port = (u8)work->param;
- if (mlx5_core_is_mp_slave(work->dev)) {
- ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
+ if (work->is_slave) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
if (!ibdev)
goto out;
} else {
- ibdev = work->context;
+ ibdev = work->dev;
}
switch (work->event) {
case MLX5_DEV_EVENT_SYS_ERROR:
ibev.event = IB_EVENT_DEVICE_FATAL;
mlx5_ib_handle_internal_error(ibdev);
+ ibev.element.port_num = (u8)(unsigned long)work->param;
fatal = true;
break;
-
- case MLX5_DEV_EVENT_PORT_UP:
- case MLX5_DEV_EVENT_PORT_DOWN:
- case MLX5_DEV_EVENT_PORT_INITIALIZED:
- /* In RoCE, port up/down events are handled in
- * mlx5_netdev_event().
- */
- if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
- IB_LINK_LAYER_ETHERNET)
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ if (handle_port_change(ibdev, work->param, &ibev))
goto out;
-
- ibev.event = (work->event == MLX5_DEV_EVENT_PORT_UP) ?
- IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
- break;
-
- case MLX5_DEV_EVENT_LID_CHANGE:
- ibev.event = IB_EVENT_LID_CHANGE;
- break;
-
- case MLX5_DEV_EVENT_PKEY_CHANGE:
- ibev.event = IB_EVENT_PKEY_CHANGE;
- schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
- break;
-
- case MLX5_DEV_EVENT_GUID_CHANGE:
- ibev.event = IB_EVENT_GID_CHANGE;
- break;
-
- case MLX5_DEV_EVENT_CLIENT_REREG:
- ibev.event = IB_EVENT_CLIENT_REREGISTER;
break;
- case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
- schedule_work(&ibdev->delay_drop.delay_drop_work);
- goto out;
+ case MLX5_EVENT_TYPE_GENERAL_EVENT:
+ handle_general_event(ibdev, work->param, &ibev);
+ /* fall through */
default:
goto out;
}
- ibev.device = &ibdev->ib_dev;
- ibev.element.port_num = port;
+ ibev.device = &ibdev->ib_dev;
- if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
- mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
+ if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
+ mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
goto out;
}
@@ -4311,22 +4337,43 @@ out:
kfree(work);
}
-static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param)
+static int mlx5_ib_event(struct notifier_block *nb,
+ unsigned long event, void *param)
{
struct mlx5_ib_event_work *work;
work = kmalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
- return;
+ return NOTIFY_DONE;
INIT_WORK(&work->work, mlx5_ib_handle_event);
- work->dev = dev;
+ work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
+ work->is_slave = false;
work->param = param;
- work->context = context;
work->event = event;
queue_work(mlx5_ib_event_wq, &work->work);
+
+ return NOTIFY_OK;
+}
+
+static int mlx5_ib_event_slave_port(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
+ work->is_slave = true;
+ work->param = param;
+ work->event = event;
+ queue_work(mlx5_ib_event_wq, &work->work);
+
+ return NOTIFY_OK;
}
static int set_has_smi_cap(struct mlx5_ib_dev *dev)
@@ -4794,7 +4841,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
struct mlx5_flow_table *ft;
int err;
- if (!ns || !mlx5_lag_is_active(mdev))
+ if (!ns || !mlx5_lag_is_roce(mdev))
return 0;
err = mlx5_cmd_create_vport_lag(mdev);
@@ -4808,6 +4855,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
}
dev->flow_db->lag_demux_ft = ft;
+ dev->lag_active = true;
return 0;
err_destroy_vport_lag:
@@ -4819,7 +4867,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- if (dev->flow_db->lag_demux_ft) {
+ if (dev->lag_active) {
+ dev->lag_active = false;
+
mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
dev->flow_db->lag_demux_ft = NULL;
@@ -5337,7 +5387,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
+ return mlx5_comp_irq_get_affinity_mask(dev->mdev, comp_vector);
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */
@@ -5357,6 +5407,11 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
spin_unlock(&port->mp.mpi_lock);
return;
}
+
+ if (mpi->mdev_events.notifier_call)
+ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+ mpi->mdev_events.notifier_call = NULL;
+
mpi->ibdev = NULL;
spin_unlock(&port->mp.mpi_lock);
@@ -5412,6 +5467,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
ibdev->port[port_num].mp.mpi = mpi;
mpi->ibdev = ibdev;
+ mpi->mdev_events.notifier_call = NULL;
spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
@@ -5429,6 +5485,9 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
goto unbind;
}
+ mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
+ mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
+
err = mlx5_ib_init_cong_debugfs(ibdev, port_num);
if (err)
goto unbind;
@@ -5701,8 +5760,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports;
- dev->ib_dev.num_comp_vectors =
- dev->mdev->priv.eq_table.num_comp_vectors;
+ dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
dev->ib_dev.dev.parent = &mdev->pdev->dev;
mutex_init(&dev->cap_mask_mutex);
@@ -6041,6 +6099,11 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
return mlx5_ib_odp_init_one(dev);
}
+void mlx5_ib_stage_odp_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_odp_cleanup_one(dev);
+}
+
int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
@@ -6113,7 +6176,7 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
const char *name;
rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
- if (!mlx5_lag_is_active(dev->mdev))
+ if (!mlx5_lag_is_roce(dev->mdev))
name = "mlx5_%d";
else
name = "mlx5_bond_%d";
@@ -6147,16 +6210,32 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev);
}
-static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
{
- mlx5_ib_register_vport_reps(dev);
-
+ dev->mdev_events.notifier_call = mlx5_ib_event;
+ mlx5_notifier_register(dev->mdev, &dev->mdev_events);
return 0;
}
-static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
+static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
+}
+
+static int mlx5_ib_stage_devx_init(struct mlx5_ib_dev *dev)
{
- mlx5_ib_unregister_vport_reps(dev);
+ int uid;
+
+ uid = mlx5_ib_devx_create(dev);
+ if (uid > 0)
+ dev->devx_whitelist_uid = uid;
+
+ return 0;
+}
+static void mlx5_ib_stage_devx_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (dev->devx_whitelist_uid)
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
}
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
@@ -6169,10 +6248,6 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
if (profile->stage[stage].cleanup)
profile->stage[stage].cleanup(dev);
}
-
- if (dev->devx_whitelist_uid)
- mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
- ib_dealloc_device((struct ib_device *)dev);
}
void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
@@ -6180,7 +6255,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
{
int err;
int i;
- int uid;
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
@@ -6190,10 +6264,6 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
- uid = mlx5_ib_devx_create(dev);
- if (uid > 0)
- dev->devx_whitelist_uid = uid;
-
dev->profile = profile;
dev->ib_active = true;
@@ -6221,12 +6291,18 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_roce_init,
mlx5_ib_stage_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
mlx5_ib_stage_dev_res_init,
mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ mlx5_ib_stage_dev_notifier_init,
+ mlx5_ib_stage_dev_notifier_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_ODP,
mlx5_ib_stage_odp_init,
- NULL),
+ mlx5_ib_stage_odp_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
mlx5_ib_stage_counters_init,
mlx5_ib_stage_counters_cleanup),
@@ -6245,6 +6321,9 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_SPECS,
mlx5_ib_stage_populate_specs,
NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_stage_devx_init,
+ mlx5_ib_stage_devx_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
mlx5_ib_stage_ib_reg_init,
mlx5_ib_stage_ib_reg_cleanup),
@@ -6272,9 +6351,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_rep_roce_init,
mlx5_ib_stage_rep_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
mlx5_ib_stage_dev_res_init,
mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ mlx5_ib_stage_dev_notifier_init,
+ mlx5_ib_stage_dev_notifier_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
mlx5_ib_stage_counters_init,
mlx5_ib_stage_counters_cleanup),
@@ -6296,9 +6381,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
- mlx5_ib_stage_rep_reg_init,
- mlx5_ib_stage_rep_reg_cleanup),
};
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
@@ -6366,8 +6448,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
-
- return __mlx5_ib_add(dev, &nic_rep_profile);
+ dev->profile = &nic_rep_profile;
+ mlx5_ib_register_vport_reps(dev);
+ return dev;
}
return __mlx5_ib_add(dev, &pf_profile);
@@ -6389,16 +6472,17 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
}
dev = context;
- __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ if (dev->profile == &nic_rep_profile)
+ mlx5_ib_unregister_vport_reps(dev);
+ else
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+
+ ib_dealloc_device((struct ib_device *)dev);
}
static struct mlx5_interface mlx5_ib_interface = {
.add = mlx5_ib_add,
.remove = mlx5_ib_remove,
- .event = mlx5_ib_event,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- .pfault = mlx5_ib_pfault,
-#endif
.protocol = MLX5_INTERFACE_PROTOCOL_IB,
};
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index b651a7a6fde9..e507b6eb7c09 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -41,7 +41,6 @@
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
#include <linux/mlx5/fs.h>
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
@@ -50,6 +49,8 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include "srq.h"
+
#define mlx5_ib_dbg(_dev, format, arg...) \
dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
__LINE__, current->pid, ##arg)
@@ -774,7 +775,9 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_SRQ,
MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_DEVICE_NOTIFIER,
MLX5_IB_STAGE_ODP,
MLX5_IB_STAGE_COUNTERS,
MLX5_IB_STAGE_CONG_DEBUGFS,
@@ -782,11 +785,11 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_BFREG,
MLX5_IB_STAGE_PRE_IB_REG_UMR,
MLX5_IB_STAGE_SPECS,
+ MLX5_IB_STAGE_WHITELIST_UID,
MLX5_IB_STAGE_IB_REG,
MLX5_IB_STAGE_POST_IB_REG_UMR,
MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR,
- MLX5_IB_STAGE_REP_REG,
MLX5_IB_STAGE_MAX,
};
@@ -806,6 +809,7 @@ struct mlx5_ib_multiport_info {
struct list_head list;
struct mlx5_ib_dev *ibdev;
struct mlx5_core_dev *mdev;
+ struct notifier_block mdev_events;
struct completion unref_comp;
u64 sys_image_guid;
u32 mdev_refcnt;
@@ -880,10 +884,20 @@ struct mlx5_ib_lb_state {
bool enabled;
};
+struct mlx5_ib_pf_eq {
+ struct mlx5_ib_dev *dev;
+ struct mlx5_eq *core;
+ struct work_struct work;
+ spinlock_t lock; /* Pagefaults spinlock */
+ struct workqueue_struct *wq;
+ mempool_t *pool;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
const struct uverbs_object_tree_def *driver_trees[7];
struct mlx5_core_dev *mdev;
+ struct notifier_block mdev_events;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
/* serialize update of capability mask
@@ -902,6 +916,8 @@ struct mlx5_ib_dev {
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct ib_odp_caps odp_caps;
u64 odp_max_size;
+ struct mlx5_ib_pf_eq odp_pf_eq;
+
/*
* Sleepable RCU that prevents destruction of MRs while they are still
* being used by a page fault handler.
@@ -920,6 +936,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
struct mlx5_eswitch_rep *rep;
+ int lag_active;
struct mlx5_ib_lb_state lb;
u8 umr_fence;
@@ -927,6 +944,7 @@ struct mlx5_ib_dev {
u64 sys_image_guid;
struct mlx5_memic memic;
u16 devx_whitelist_uid;
+ struct mlx5_srq_table srq_table;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1158,9 +1176,8 @@ struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
-void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
- struct mlx5_pagefault *pfault);
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
@@ -1175,6 +1192,7 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
}
static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index b04eb6775326..7309fb6bf0d2 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -37,6 +37,46 @@
#include "mlx5_ib.h"
#include "cmd.h"
+#include <linux/mlx5/eq.h>
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u32 token;
+ u8 event_subtype;
+ u8 type;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * Number of resource holding WQE, depends on type.
+ */
+ u32 wq_num;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+
+ struct mlx5_ib_pf_eq *eq;
+ struct work_struct work;
+};
+
#define MAX_PREFETCH_LEN (4*1024*1024U)
/* Timeout in ms to wait for an active mmu notifier to complete when handling
@@ -304,14 +344,20 @@ static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
{
int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
pfault->wqe.wq_num : pfault->token;
- int ret = mlx5_core_page_fault_resume(dev->mdev,
- pfault->token,
- wq_num,
- pfault->type,
- error);
- if (ret)
- mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
- wq_num);
+ u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = { };
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = { };
+ int err;
+
+ MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
+ MLX5_SET(page_fault_resume_in, in, token, pfault->token);
+ MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
+ MLX5_SET(page_fault_resume_in, in, error, !!error);
+
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
+ wq_num, err);
}
static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
@@ -506,14 +552,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{
+ int npages = 0, current_seq, page_shift, ret, np;
+ bool implicit = false;
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT;
- int npages = 0, page_shift, np;
u64 start_idx, page_mask;
struct ib_umem_odp *odp;
- int current_seq;
size_t size;
- int ret;
if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
@@ -521,7 +566,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
if (IS_ERR(odp))
return PTR_ERR(odp);
mr = odp->private;
-
+ implicit = true;
} else {
odp = odp_mr;
}
@@ -600,15 +645,15 @@ next_mr:
out:
if (ret == -EAGAIN) {
- if (mr->parent || !odp->dying) {
+ if (implicit || !odp->dying) {
unsigned long timeout =
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
if (!wait_for_completion_timeout(
&odp->notifier_completion,
timeout)) {
- mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d\n",
- current_seq, odp->notifiers_seq);
+ mlx5_ib_warn(dev, "timeout waiting for mmu notifier. seq %d against %d. notifiers_count=%d\n",
+ current_seq, odp->notifiers_seq, odp->notifiers_count);
}
} else {
/* The MR is being killed, kill the QP as well. */
@@ -674,6 +719,15 @@ next_mr:
goto srcu_unlock;
}
+ if (!mr->umem->is_odp) {
+ mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ ret = 0;
+ goto srcu_unlock;
+ }
+
ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped);
if (ret < 0)
goto srcu_unlock;
@@ -735,6 +789,7 @@ next_mr:
head = frame;
bcnt -= frame->bcnt;
+ offset = 0;
}
break;
@@ -1016,16 +1071,31 @@ invalid_transport_or_opcode:
return 0;
}
-static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
- u32 wq_num)
+static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
+ u32 wq_num, int pf_type)
{
- struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
+ enum mlx5_res_type res_type;
- if (!mqp) {
- mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
+ switch (pf_type) {
+ case MLX5_WQE_PF_TYPE_RMP:
+ res_type = MLX5_RES_SRQ;
+ break;
+ case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
+ case MLX5_WQE_PF_TYPE_RESP:
+ case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
+ res_type = MLX5_RES_QP;
+ break;
+ default:
return NULL;
}
+ return mlx5_core_res_hold(dev->mdev, wq_num, res_type);
+}
+
+static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
+{
+ struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
+
return to_mibqp(mqp);
}
@@ -1039,18 +1109,30 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
int resume_with_error = 1;
u16 wqe_index = pfault->wqe.wqe_index;
int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
+ struct mlx5_core_rsc_common *res;
struct mlx5_ib_qp *qp;
+ res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
+ if (!res) {
+ mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
+ return;
+ }
+
+ switch (res->res) {
+ case MLX5_RES_QP:
+ qp = res_to_qp(res);
+ break;
+ default:
+ mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n", pfault->type);
+ goto resolve_page_fault;
+ }
+
buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer) {
mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
goto resolve_page_fault;
}
- qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
- if (!qp)
- goto resolve_page_fault;
-
ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
PAGE_SIZE, &qp->trans_qp.base);
if (ret < 0) {
@@ -1090,6 +1172,7 @@ resolve_page_fault:
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
pfault->wqe.wq_num, resume_with_error,
pfault->type);
+ mlx5_core_res_put(res);
free_page((unsigned long)buffer);
}
@@ -1168,10 +1251,8 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
}
}
-void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
- struct mlx5_pagefault *pfault)
+static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
{
- struct mlx5_ib_dev *dev = context;
u8 event_subtype = pfault->event_subtype;
switch (event_subtype) {
@@ -1188,6 +1269,203 @@ void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
}
}
+static void mlx5_ib_eqe_pf_action(struct work_struct *work)
+{
+ struct mlx5_pagefault *pfault = container_of(work,
+ struct mlx5_pagefault,
+ work);
+ struct mlx5_ib_pf_eq *eq = pfault->eq;
+
+ mlx5_ib_pfault(eq->dev, pfault);
+ mempool_free(pfault, eq->pool);
+}
+
+static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
+{
+ struct mlx5_eqe_page_fault *pf_eqe;
+ struct mlx5_pagefault *pfault;
+ struct mlx5_eqe *eqe;
+ int cc = 0;
+
+ while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
+ pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
+ if (!pfault) {
+ schedule_work(&eq->work);
+ break;
+ }
+
+ pf_eqe = &eqe->data.page_fault;
+ pfault->event_subtype = eqe->sub_type;
+ pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
+
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
+ eqe->sub_type, pfault->bytes_committed);
+
+ switch (eqe->sub_type) {
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ /* RDMA based event */
+ pfault->type =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
+ pfault->token =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) &
+ MLX5_24BIT_MASK;
+ pfault->rdma.r_key =
+ be32_to_cpu(pf_eqe->rdma.r_key);
+ pfault->rdma.packet_size =
+ be16_to_cpu(pf_eqe->rdma.packet_length);
+ pfault->rdma.rdma_op_len =
+ be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+ pfault->rdma.rdma_va =
+ be64_to_cpu(pf_eqe->rdma.rdma_va);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
+ pfault->type, pfault->token,
+ pfault->rdma.r_key);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
+ pfault->rdma.rdma_op_len,
+ pfault->rdma.rdma_va);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ /* WQE based event */
+ pfault->type =
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
+ pfault->token =
+ be32_to_cpu(pf_eqe->wqe.token);
+ pfault->wqe.wq_num =
+ be32_to_cpu(pf_eqe->wqe.pftype_wq) &
+ MLX5_24BIT_MASK;
+ pfault->wqe.wqe_index =
+ be16_to_cpu(pf_eqe->wqe.wqe_index);
+ pfault->wqe.packet_size =
+ be16_to_cpu(pf_eqe->wqe.packet_length);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+ pfault->type, pfault->token,
+ pfault->wqe.wq_num,
+ pfault->wqe.wqe_index);
+ break;
+
+ default:
+ mlx5_ib_warn(eq->dev,
+ "Unsupported page fault event sub-type: 0x%02hhx\n",
+ eqe->sub_type);
+ /* Unsupported page faults should still be
+ * resolved by the page fault handler
+ */
+ }
+
+ pfault->eq = eq;
+ INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
+ queue_work(eq->wq, &pfault->work);
+
+ cc = mlx5_eq_update_cc(eq->core, ++cc);
+ }
+
+ mlx5_eq_update_ci(eq->core, cc, 1);
+}
+
+static irqreturn_t mlx5_ib_eq_pf_int(int irq, void *eq_ptr)
+{
+ struct mlx5_ib_pf_eq *eq = eq_ptr;
+ unsigned long flags;
+
+ if (spin_trylock_irqsave(&eq->lock, flags)) {
+ mlx5_ib_eq_pf_process(eq);
+ spin_unlock_irqrestore(&eq->lock, flags);
+ } else {
+ schedule_work(&eq->work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* mempool_refill() was proposed but unfortunately wasn't accepted
+ * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
+ * Cheap workaround.
+ */
+static void mempool_refill(mempool_t *pool)
+{
+ while (pool->curr_nr < pool->min_nr)
+ mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
+}
+
+static void mlx5_ib_eq_pf_action(struct work_struct *work)
+{
+ struct mlx5_ib_pf_eq *eq =
+ container_of(work, struct mlx5_ib_pf_eq, work);
+
+ mempool_refill(eq->pool);
+
+ spin_lock_irq(&eq->lock);
+ mlx5_ib_eq_pf_process(eq);
+ spin_unlock_irq(&eq->lock);
+}
+
+enum {
+ MLX5_IB_NUM_PF_EQE = 0x1000,
+ MLX5_IB_NUM_PF_DRAIN = 64,
+};
+
+static int
+mlx5_ib_create_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+{
+ struct mlx5_eq_param param = {};
+ int err;
+
+ INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
+ spin_lock_init(&eq->lock);
+ eq->dev = dev;
+
+ eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
+ sizeof(struct mlx5_pagefault));
+ if (!eq->pool)
+ return -ENOMEM;
+
+ eq->wq = alloc_workqueue("mlx5_ib_page_fault",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
+ MLX5_NUM_CMD_EQE);
+ if (!eq->wq) {
+ err = -ENOMEM;
+ goto err_mempool;
+ }
+
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_PFAULT_IDX,
+ .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
+ .nent = MLX5_IB_NUM_PF_EQE,
+ .context = eq,
+ .handler = mlx5_ib_eq_pf_int
+ };
+ eq->core = mlx5_eq_create_generic(dev->mdev, "mlx5_ib_page_fault_eq", &param);
+ if (IS_ERR(eq->core)) {
+ err = PTR_ERR(eq->core);
+ goto err_wq;
+ }
+
+ return 0;
+err_wq:
+ destroy_workqueue(eq->wq);
+err_mempool:
+ mempool_destroy(eq->pool);
+ return err;
+}
+
+static int
+mlx5_ib_destroy_pf_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+{
+ int err;
+
+ err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
+ cancel_work_sync(&eq->work);
+ destroy_workqueue(eq->wq);
+ mempool_destroy(eq->pool);
+
+ return err;
+}
+
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
{
if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
@@ -1216,7 +1494,7 @@ void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent)
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
- int ret;
+ int ret = 0;
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
@@ -1226,7 +1504,20 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
}
}
- return 0;
+ if (!MLX5_CAP_GEN(dev->mdev, pg))
+ return ret;
+
+ ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
+
+ return ret;
+}
+
+void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, pg))
+ return;
+
+ mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
}
int mlx5_ib_odp_init(void)
@@ -1236,4 +1527,3 @@ int mlx5_ib_odp_init(void)
return 0;
}
-
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6841c0f9237f..a0e9ff763d42 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2633,8 +2633,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
if (access_flags & IB_ACCESS_REMOTE_READ)
*hw_access_flags |= MLX5_QP_BIT_RRE;
- if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
- qp->ibqp.qp_type == IB_QPT_RC) {
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
int atomic_mode;
atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
@@ -3259,7 +3258,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_RAW_PACKET) ||
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
- if (mlx5_lag_is_active(dev->mdev)) {
+ if (dev->lag_active) {
u8 p = mlx5_core_native_port_num(dev->mdev);
tx_affinity = get_tx_affinity(dev, pd, base, p);
context->flags |= cpu_to_be32(tx_affinity << 24);
@@ -4678,17 +4677,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out;
}
- if (wr->opcode == IB_WR_LOCAL_INV ||
- wr->opcode == IB_WR_REG_MR) {
+ if (wr->opcode == IB_WR_REG_MR) {
fence = dev->umr_fence;
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
- } else if (wr->send_flags & IB_SEND_FENCE) {
- if (qp->next_fence)
- fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
- else
- fence = MLX5_FENCE_MODE_FENCE;
- } else {
- fence = qp->next_fence;
+ } else {
+ if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
}
switch (ibqp->qp_type) {
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index d012e7dbcc38..91dcd3918d96 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -1,46 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
#include <linux/slab.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h>
-
#include "mlx5_ib.h"
-
-/* not supported currently */
-static int srq_signature;
+#include "srq.h"
static void *get_wqe(struct mlx5_ib_srq *srq, int n)
{
@@ -202,7 +171,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
err = -ENOMEM;
goto err_in;
}
- srq->wq_sig = !!srq_signature;
+ srq->wq_sig = 0;
in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
@@ -327,7 +296,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in.pd = to_mpd(pd)->pdn;
in.db_record = srq->db.dma;
- err = mlx5_core_create_srq(dev->mdev, &srq->msrq, &in);
+ err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
kvfree(in.pas);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
@@ -351,7 +320,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
return &srq->ibsrq;
err_core:
- mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
+ mlx5_cmd_destroy_srq(dev, &srq->msrq);
err_usr_kern_srq:
if (pd->uobject)
@@ -381,7 +350,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
mutex_lock(&srq->mutex);
- ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
+ ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
mutex_unlock(&srq->mutex);
if (ret)
@@ -402,7 +371,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
+ ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
if (ret)
goto out_box;
@@ -420,7 +389,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
- mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
+ mlx5_cmd_destroy_srq(dev, &msrq->msrq);
if (srq->uobject) {
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
new file mode 100644
index 000000000000..75eb5839ae95
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2018, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef MLX5_IB_SRQ_H
+#define MLX5_IB_SRQ_H
+
+enum {
+ MLX5_SRQ_FLAG_ERR = (1 << 0),
+ MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+ MLX5_SRQ_FLAG_RNDV = (1 << 2),
+};
+
+struct mlx5_srq_attr {
+ u32 type;
+ u32 flags;
+ u32 log_size;
+ u32 wqe_shift;
+ u32 log_page_size;
+ u32 wqe_cnt;
+ u32 srqn;
+ u32 xrcd;
+ u32 page_offset;
+ u32 cqn;
+ u32 pd;
+ u32 lwm;
+ u32 user_index;
+ u64 db_record;
+ __be64 *pas;
+ u32 tm_log_list_size;
+ u32 tm_next_tag;
+ u32 tm_hw_phase_cnt;
+ u32 tm_sw_phase_cnt;
+ u16 uid;
+};
+
+struct mlx5_ib_dev;
+
+struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
+ u32 srqn;
+ int max;
+ size_t max_gs;
+ size_t max_avail_gather;
+ int wqe_shift;
+ void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
+
+ atomic_t refcount;
+ struct completion free;
+ u16 uid;
+};
+
+struct mlx5_srq_table {
+ struct notifier_block nb;
+ /* protect radix tree
+ */
+ spinlock_t lock;
+ struct radix_tree_root tree;
+};
+
+int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in);
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out);
+int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq);
+struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn);
+
+int mlx5_init_srq_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev);
+#endif /* MLX5_IB_SRQ_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 6a6fc9be01e6..7aaaffbd4afa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -1,67 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
-#include <linux/mlx5/srq.h>
-#include <rdma/ib_verbs.h>
-#include "mlx5_core.h"
-#include <linux/mlx5/transobj.h>
-
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
-{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
- struct mlx5_core_srq *srq;
-
- spin_lock(&table->lock);
-
- srq = radix_tree_lookup(&table->tree, srqn);
- if (srq)
- atomic_inc(&srq->refcount);
-
- spin_unlock(&table->lock);
-
- if (!srq) {
- mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
- return;
- }
-
- srq->event(srq, event_type);
-
- if (atomic_dec_and_test(&srq->refcount))
- complete(&srq->free);
-}
+#include "mlx5_ib.h"
+#include "srq.h"
static int get_pas_size(struct mlx5_srq_attr *in)
{
@@ -132,9 +78,9 @@ static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
}
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
+struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
+ struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
spin_lock(&table->lock);
@@ -147,9 +93,8 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
return srq;
}
-EXPORT_SYMBOL(mlx5_core_get_srq);
-static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in)
{
u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
@@ -176,7 +121,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(create_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_SRQ);
- err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
if (!err) {
@@ -187,8 +132,7 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
return err;
}
-static int destroy_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
+static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
@@ -198,11 +142,11 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid);
- return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
+ sizeof(srq_out));
}
-static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
@@ -214,11 +158,11 @@ static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(arm_rq_in, srq_in, lwm, lwm);
MLX5_SET(arm_rq_in, srq_in, uid, srq->uid);
- return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
- srq_out, sizeof(srq_out));
+ return mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
+ sizeof(srq_out));
}
-static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
@@ -233,8 +177,8 @@ static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_srq_in, srq_in, opcode,
MLX5_CMD_OP_QUERY_SRQ);
MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
- err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
- srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
+ err = mlx5_cmd_exec(dev->mdev, srq_in, sizeof(srq_in), srq_out,
+ MLX5_ST_SZ_BYTES(query_srq_out));
if (err)
goto out;
@@ -247,7 +191,7 @@ out:
return err;
}
-static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in)
{
@@ -277,7 +221,7 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_XRC_SRQ);
memset(create_out, 0, sizeof(create_out));
- err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
sizeof(create_out));
if (err)
goto out;
@@ -289,7 +233,7 @@ out:
return err;
}
-static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
@@ -300,12 +244,12 @@ static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid);
- return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
}
-static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq, u16 lwm)
+static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
@@ -316,11 +260,11 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm);
MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid);
- return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+ return mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
xrcsrq_out, sizeof(xrcsrq_out));
}
-static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
@@ -338,8 +282,8 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
MLX5_CMD_OP_QUERY_XRC_SRQ);
MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
- err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
- MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+ err = mlx5_cmd_exec(dev->mdev, xrcsrq_in, sizeof(xrcsrq_in),
+ xrcsrq_out, MLX5_ST_SZ_BYTES(query_xrc_srq_out));
if (err)
goto out;
@@ -354,21 +298,27 @@ out:
return err;
}
-static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in)
{
- void *create_in;
+ void *create_out = NULL;
+ void *create_in = NULL;
void *rmpc;
void *wq;
int pas_size;
+ int outlen;
int inlen;
int err;
pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
create_in = kvzalloc(inlen, GFP_KERNEL);
- if (!create_in)
- return -ENOMEM;
+ create_out = kvzalloc(outlen, GFP_KERNEL);
+ if (!create_in || !create_out) {
+ err = -ENOMEM;
+ goto out;
+ }
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
@@ -378,16 +328,20 @@ static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
- err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
- if (!err)
+ MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
srq->uid = in->uid;
+ }
+out:
kvfree(create_in);
+ kvfree(create_out);
return err;
}
-static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
+static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {};
@@ -395,22 +349,30 @@ static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
-static int arm_rmp_cmd(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
+static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
u16 lwm)
{
- void *in;
+ void *out = NULL;
+ void *in = NULL;
void *rmpc;
void *wq;
void *bitmask;
+ int outlen;
+ int inlen;
int err;
- in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
+ inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
+ outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
@@ -422,25 +384,39 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
MLX5_SET(wq, wq, lwm, lwm);
MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
- err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
+out:
kvfree(in);
+ kvfree(out);
return err;
}
-static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
- u32 *rmp_out;
+ u32 *rmp_out = NULL;
+ u32 *rmp_in = NULL;
void *rmpc;
+ int outlen;
+ int inlen;
int err;
- rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL);
- if (!rmp_out)
- return -ENOMEM;
+ outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+ inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
- err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+ rmp_out = kvzalloc(outlen, GFP_KERNEL);
+ rmp_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!rmp_out || !rmp_in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
+ err = mlx5_cmd_exec(dev->mdev, rmp_in, inlen, rmp_out, outlen);
if (err)
goto out;
@@ -451,10 +427,11 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
out:
kvfree(rmp_out);
+ kvfree(rmp_in);
return err;
}
-static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in)
{
u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
@@ -489,7 +466,7 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(xrqc, xrqc, cqn, in->cqn);
MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
MLX5_SET(create_xrq_in, create_in, uid, in->uid);
- err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
sizeof(create_out));
kvfree(create_in);
if (!err) {
@@ -500,7 +477,7 @@ static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
return err;
}
-static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
+static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0};
@@ -509,10 +486,10 @@ static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
-static int arm_xrq_cmd(struct mlx5_core_dev *dev,
+static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
struct mlx5_core_srq *srq,
u16 lwm)
{
@@ -525,10 +502,10 @@ static int arm_xrq_cmd(struct mlx5_core_dev *dev,
MLX5_SET(arm_rq_in, in, lwm, lwm);
MLX5_SET(arm_rq_in, in, uid, srq->uid);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
-static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *out)
{
u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0};
@@ -544,7 +521,7 @@ static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
- err = mlx5_cmd_exec(dev, in, sizeof(in), xrq_out, outlen);
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), xrq_out, outlen);
if (err)
goto out;
@@ -567,11 +544,10 @@ out:
return err;
}
-static int create_srq_split(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq,
+static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
struct mlx5_srq_attr *in)
{
- if (!dev->issi)
+ if (!dev->mdev->issi)
return create_srq_cmd(dev, srq, in);
switch (srq->common.res) {
case MLX5_RES_XSRQ:
@@ -583,10 +559,9 @@ static int create_srq_split(struct mlx5_core_dev *dev,
}
}
-static int destroy_srq_split(struct mlx5_core_dev *dev,
- struct mlx5_core_srq *srq)
+static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- if (!dev->issi)
+ if (!dev->mdev->issi)
return destroy_srq_cmd(dev, srq);
switch (srq->common.res) {
case MLX5_RES_XSRQ:
@@ -598,11 +573,11 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
}
}
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in)
+int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
{
+ struct mlx5_srq_table *table = &dev->srq_table;
int err;
- struct mlx5_srq_table *table = &dev->priv.srq_table;
switch (in->type) {
case IB_SRQT_XRC:
@@ -625,10 +600,8 @@ int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, srq->srqn, srq);
spin_unlock_irq(&table->lock);
- if (err) {
- mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
+ if (err)
goto err_destroy_srq_split;
- }
return 0;
@@ -637,25 +610,18 @@ err_destroy_srq_split:
return err;
}
-EXPORT_SYMBOL(mlx5_core_create_srq);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
+ struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *tmp;
int err;
spin_lock_irq(&table->lock);
tmp = radix_tree_delete(&table->tree, srq->srqn);
spin_unlock_irq(&table->lock);
- if (!tmp) {
- mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
- return -EINVAL;
- }
- if (tmp != srq) {
- mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
+ if (!tmp || tmp != srq)
return -EINVAL;
- }
err = destroy_srq_split(dev, srq);
if (err)
@@ -667,12 +633,11 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
return 0;
}
-EXPORT_SYMBOL(mlx5_core_destroy_srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out)
+int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
{
- if (!dev->issi)
+ if (!dev->mdev->issi)
return query_srq_cmd(dev, srq, out);
switch (srq->common.res) {
case MLX5_RES_XSRQ:
@@ -683,12 +648,11 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
return query_rmp_cmd(dev, srq, out);
}
}
-EXPORT_SYMBOL(mlx5_core_query_srq);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq)
+int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
{
- if (!dev->issi)
+ if (!dev->mdev->issi)
return arm_srq_cmd(dev, srq, lwm, is_srq);
switch (srq->common.res) {
case MLX5_RES_XSRQ:
@@ -699,18 +663,60 @@ int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
return arm_rmp_cmd(dev, srq, lwm);
}
}
-EXPORT_SYMBOL(mlx5_core_arm_srq);
-void mlx5_init_srq_table(struct mlx5_core_dev *dev)
+static int srq_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_srq_table *table;
+ struct mlx5_core_srq *srq;
+ struct mlx5_eqe *eqe;
+ u32 srqn;
+
+ if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
+ type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
+ return NOTIFY_DONE;
+
+ table = container_of(nb, struct mlx5_srq_table, nb);
+
+ eqe = data;
+ srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+
+ spin_lock(&table->lock);
+
+ srq = radix_tree_lookup(&table->tree, srqn);
+ if (srq)
+ atomic_inc(&srq->refcount);
+
+ spin_unlock(&table->lock);
+
+ if (!srq)
+ return NOTIFY_OK;
+
+ srq->event(srq, eqe->type);
+
+ if (atomic_dec_and_test(&srq->refcount))
+ complete(&srq->free);
+
+ return NOTIFY_OK;
+}
+
+int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
{
- struct mlx5_srq_table *table = &dev->priv.srq_table;
+ struct mlx5_srq_table *table = &dev->srq_table;
memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+
+ table->nb.notifier_call = srq_event_notifier;
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
}
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
+void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
{
- /* nothing */
+ struct mlx5_srq_table *table = &dev->srq_table;
+
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
}
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index e96ffff61c3a..cc4dce5c3e5f 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -223,11 +223,11 @@ static struct sk_buff *nes_get_next_skb(struct nes_device *nesdev, struct nes_qp
}
old_skb = skb;
- skb = skb->next;
+ skb = skb_peek_next(skb, &nesqp->pau_list);
skb_unlink(old_skb, &nesqp->pau_list);
nes_mgt_free_skb(nesdev, old_skb, PCI_DMA_TODEVICE);
nes_rem_ref_cm_node(nesqp->cm_node);
- if (skb == (struct sk_buff *)&nesqp->pau_list)
+ if (!skb)
goto out;
}
return skb;
@@ -551,14 +551,14 @@ static void queue_fpdus(struct sk_buff *skb, struct nes_vnic *nesvnic, struct ne
/* Queue skb by sequence number */
if (skb_queue_len(&nesqp->pau_list) == 0) {
- skb_queue_head(&nesqp->pau_list, skb);
+ __skb_queue_head(&nesqp->pau_list, skb);
} else {
skb_queue_walk(&nesqp->pau_list, tmpskb) {
cb = (struct nes_rskb_cb *)&tmpskb->cb[0];
if (before(seqnum, cb->seqnum))
break;
}
- skb_insert(tmpskb, skb, &nesqp->pau_list);
+ __skb_insert(skb, tmpskb->prev, tmpskb, &nesqp->pau_list);
}
if (nesqp->pau_state == PAU_READY)
process_it = true;
diff --git a/drivers/infiniband/sw/rdmavt/ah.c b/drivers/infiniband/sw/rdmavt/ah.c
index 89ec0f64abfc..084bb4baebb5 100644
--- a/drivers/infiniband/sw/rdmavt/ah.c
+++ b/drivers/infiniband/sw/rdmavt/ah.c
@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
* rvt_create_ah - create an address handle
* @pd: the protection domain
* @ah_attr: the attributes of the AH
+ * @udata: pointer to user's input output buffer information.
*
* This may be called from interrupt context.
*
* Return: newly allocated ah
*/
struct ib_ah *rvt_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr)
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata)
{
struct rvt_ah *ah;
struct rvt_dev_info *dev = ib_to_rvt(pd->device);
diff --git a/drivers/infiniband/sw/rdmavt/ah.h b/drivers/infiniband/sw/rdmavt/ah.h
index 16105af99189..25271b48a683 100644
--- a/drivers/infiniband/sw/rdmavt/ah.h
+++ b/drivers/infiniband/sw/rdmavt/ah.h
@@ -51,7 +51,8 @@
#include <rdma/rdma_vt.h>
struct ib_ah *rvt_create_ah(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr,
+ struct ib_udata *udata);
int rvt_destroy_ah(struct ib_ah *ibah);
int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8710214594d8..6214d8c0d546 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -167,7 +167,7 @@ int ipoib_open(struct net_device *dev)
if (flags & IFF_UP)
continue;
- dev_change_flags(cpriv->dev, flags | IFF_UP);
+ dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
}
up_read(&priv->vlan_rwsem);
}
@@ -207,7 +207,7 @@ static int ipoib_stop(struct net_device *dev)
if (!(flags & IFF_UP))
continue;
- dev_change_flags(cpriv->dev, flags & ~IFF_UP);
+ dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
}
up_read(&priv->vlan_rwsem);
}
@@ -1823,7 +1823,7 @@ static void ipoib_parent_unregister_pre(struct net_device *ndev)
* running ensures the it will not add more work.
*/
rtnl_lock();
- dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP);
+ dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
rtnl_unlock();
/* ipoib_event() cannot be running once this returns */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 946b623ba5eb..4ff3d98fa6a4 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1124,7 +1124,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
pr_err("ib_check_mr_status failed, ret %d\n", ret);
- goto err;
+ /* Not a lot we can do, return ambiguous guard error */
+ *sector = 0;
+ return 0x1;
}
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
@@ -1152,9 +1154,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
}
return 0;
-err:
- /* Not alot we can do here, return ambiguous guard error */
- return 0x1;
}
void iser_err_comp(struct ib_wc *wc, const char *type)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d4b9db487b16..cfc8b94527b9 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -480,18 +480,18 @@ static const u8 xboxone_hori_init[] = {
};
/*
- * This packet is required for some of the PDP pads to start
+ * This packet is required for most (all?) of the PDP pads to start
* sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4).
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
static const u8 xboxone_pdp_init1[] = {
0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
};
/*
- * This packet is required for some of the PDP pads to start
+ * This packet is required for most (all?) of the PDP pads to start
* sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4).
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
*/
static const u8 xboxone_pdp_init2[] = {
0x06, 0x20, 0x00, 0x02, 0x01, 0x00
@@ -527,12 +527,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
- XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
- XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
- XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
- XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
- XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
- XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 7e75835e220f..850bb259c20e 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -841,7 +841,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
if (param[0] != 3) {
param[0] = 2;
if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET))
- return 2;
+ return 2;
}
ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR);
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 81be6f781f0b..d56001181598 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) {
const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
- if (buttons & BIT(map->bit))
+ if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) ||
+ (map->ev_type == EV_SW && (switches & BIT(map->bit))))
input_set_capability(idev, map->ev_type, map->code);
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index f51ae09596ef..403452ef00e6 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev)
struct matrix_keypad_platform_data *pdata;
struct device_node *np = dev->of_node;
unsigned int *gpios;
- int i, nrow, ncol;
+ int ret, i, nrow, ncol;
if (!np) {
dev_err(dev, "device lacks DT data\n");
@@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < pdata->num_row_gpios; i++)
- gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+ for (i = 0; i < nrow; i++) {
+ ret = of_get_named_gpio(np, "row-gpios", i);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ gpios[i] = ret;
+ }
- for (i = 0; i < pdata->num_col_gpios; i++)
- gpios[pdata->num_row_gpios + i] =
- of_get_named_gpio(np, "col-gpios", i);
+ for (i = 0; i < ncol; i++) {
+ ret = of_get_named_gpio(np, "col-gpios", i);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ gpios[nrow + i] = ret;
+ }
pdata->row_gpios = gpios;
pdata->col_gpios = &gpios[pdata->num_row_gpios];
@@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
pdata = matrix_keypad_parse_dt(&pdev->dev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "no platform data defined\n");
+ if (IS_ERR(pdata))
return PTR_ERR(pdata);
- }
} else if (!pdata->keymap_data) {
dev_err(&pdev->dev, "no keymap data defined\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 46406345742b..840e53732753 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -60,8 +60,18 @@
/* OMAP4 values */
#define OMAP4_VAL_IRQDISABLE 0x0
-#define OMAP4_VAL_DEBOUNCINGTIME 0x7
-#define OMAP4_VAL_PVT 0x7
+
+/*
+ * Errata i689: If a key is released for a time shorter than debounce time,
+ * the keyboard will idle and never detect the key release. The workaround
+ * is to use at least a 12ms debounce time. See omap5432 TRM chapter
+ * "26.4.6.2 Keyboard Controller Timer" for more information.
+ */
+#define OMAP4_KEYPAD_PTV_DIV_128 0x6
+#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
+ ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
+#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
+ OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
enum {
KBD_REVISION_OMAP4 = 0,
@@ -116,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
{
struct omap4_keypad *keypad_data = dev_id;
- if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
- /* Disable interrupts */
- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_VAL_IRQDISABLE);
+ if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
return IRQ_WAKE_THREAD;
- }
return IRQ_NONE;
}
@@ -163,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
- /* enable interrupts */
- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_DEF_IRQENABLE_EVENTEN |
- OMAP4_DEF_IRQENABLE_LONGKEY);
-
return IRQ_HANDLED;
}
@@ -181,9 +182,9 @@ static int omap4_keypad_open(struct input_dev *input)
kbd_writel(keypad_data, OMAP4_KBD_CTRL,
OMAP4_DEF_CTRL_NOSOFTMODE |
- (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
+ (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
- OMAP4_VAL_DEBOUNCINGTIME);
+ OMAP4_VAL_DEBOUNCINGTIME_16MS);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
@@ -204,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq);
- /* Disable interrupts */
+ /* Disable interrupts and wake-up events */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_VAL_IRQDISABLE);
+ kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@@ -355,7 +357,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
}
error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
- omap4_keypad_irq_thread_fn, 0,
+ omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
"omap4-keypad", keypad_data);
if (error) {
dev_err(&pdev->dev, "failed to register interrupt\n");
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index b0f9d19b3410..a94b6494e71a 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1348,6 +1348,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0618", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
+ { "ELAN061E", 0 },
+ { "ELAN0620", 0 },
+ { "ELAN0621", 0 },
{ "ELAN0622", 0 },
{ "ELAN1000", 0 },
{ }
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 2d95e8d93cc7..9fe075c137dc 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1767,6 +1767,18 @@ static int elantech_smbus = IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) ?
module_param_named(elantech_smbus, elantech_smbus, int, 0644);
MODULE_PARM_DESC(elantech_smbus, "Use a secondary bus for the Elantech device.");
+static const char * const i2c_blacklist_pnp_ids[] = {
+ /*
+ * These are known to not be working properly as bits are missing
+ * in elan_i2c.
+ */
+ "LEN2131", /* ThinkPad P52 w/ NFC */
+ "LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
+ NULL
+};
+
static int elantech_create_smbus(struct psmouse *psmouse,
struct elantech_device_info *info,
bool leave_breadcrumbs)
@@ -1802,10 +1814,12 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
if (elantech_smbus == ELANTECH_SMBUS_NOT_SET) {
/*
- * New ICs are enabled by default.
+ * New ICs are enabled by default, unless mentioned in
+ * i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+ if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+ psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5e85f3cca867..b6da0c1267e3 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -170,6 +170,8 @@ static const char * const smbus_pnp_ids[] = {
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN004a", /* W541 */
+ "LEN005b", /* P50 */
+ "LEN005e", /* T560 */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -177,6 +179,8 @@ static const char * const smbus_pnp_ids[] = {
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN200f", /* T450s */
+ "SYN3052", /* HP EliteBook 840 G4 */
+ "SYN3221", /* HP 15-ay000 */
NULL
};
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 49d8d53e50b7..96f9b5397367 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -381,9 +381,9 @@ static int __init gscps2_probe(struct parisc_device *dev)
goto fail;
#endif
- printk(KERN_INFO "serio: %s port at 0x%p irq %d @ %s\n",
+ pr_info("serio: %s port at 0x%08lx irq %d @ %s\n",
ps2port->port->name,
- ps2port->addr,
+ hpa,
ps2port->padev->irq,
ps2port->port->phys);
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 0b8a25c58d02..654252361653 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -884,8 +884,8 @@ static int __init hp_sdc_init(void)
"HP SDC NMI", &hp_sdc))
goto err2;
- printk(KERN_INFO PREFIX "HP SDC at 0x%p, IRQ %d (NMI IRQ %d)\n",
- (void *)hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi);
+ pr_info(PREFIX "HP SDC at 0x%08lx, IRQ %d (NMI IRQ %d)\n",
+ hp_sdc.base_io, hp_sdc.irq, hp_sdc.nmi);
hp_sdc_status_in8();
hp_sdc_data_in8();
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index 47a0e81a2989..a8b9be3e28db 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
* state because the Enter-UP can trigger a wakeup at once.
*/
if (!(info & IS_BREAK))
- pm_wakeup_event(&hv_dev->device, 0);
+ pm_wakeup_hard_event(&hv_dev->device);
break;
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 02fb11985819..42d3fd7e04d7 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Touch Screen driver for Renesas MIGO-R Platform
*
* Copyright (c) 2008 Magnus Damm
* Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>,
* Kenati Technologies Pvt Ltd.
- *
- * This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index b71673911aac..11ff32c68025 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST1232 Touchscreen Controller Driver
*
@@ -7,15 +8,6 @@
* Using code from:
* - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
* Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/delay.h>
@@ -295,4 +287,4 @@ module_i2c_driver(st1232_ts_driver);
MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 51a5ef0e96ed..3d1e60779078 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -150,6 +150,9 @@ config IMGPDC_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
+config MADERA_IRQ
+ tristate
+
config IRQ_MIPS_CPU
bool
select GENERIC_IRQ_CHIP
@@ -195,6 +198,10 @@ config JCORE_AIC
help
Support for the J-Core integrated AIC.
+config RDA_INTC
+ bool
+ select IRQ_DOMAIN
+
config RENESAS_INTC_IRQPIN
bool
select IRQ_DOMAIN
@@ -391,6 +398,14 @@ config CSKY_APB_INTC
by C-SKY single core SOC system. It use mmio map apb-bus to visit
the controller's register.
+config IMX_IRQSTEER
+ bool "i.MX IRQSTEER support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC
+ select IRQ_DOMAIN
+ help
+ Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
+
endmenu
config SIFIVE_PLIC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 794c13d3ac3d..c93713d24b86 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -43,6 +43,7 @@ obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
+obj-$(CONFIG_RDA_INTC) += irq-rda-intc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
@@ -91,3 +92,5 @@ obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
+obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
+obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index d2da8a1e6b1b..418245d31921 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2010 Broadcom
* Copyright 2012 Simon Arlott, Chris Boot, Stephen Warren
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Quirk 1: Shortcut interrupts don't set the bank 1/2 register pending bits
*
* If an interrupt fires on bank 1 that isn't in the shortcuts list, bit 8
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index dfe4a460340b..2038693f074c 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Root interrupt controller for the BCM2836 (Raspberry Pi 2).
*
* Copyright 2015 Broadcom
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/cpu.h>
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index 0a19618ce2c8..e4550e9c810b 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -105,7 +105,7 @@ static int __init dw_apb_ictl_init(struct device_node *np,
* DW IP can be configured to allow 2-64 irqs. We can determine
* the number of irqs supported by writing into enable register
* and look for bits not set, as corresponding flip-flops will
- * have been removed by sythesis tool.
+ * have been removed by synthesis tool.
*/
/* mask and enable all interrupts */
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 01e673c680cd..3c93c6f4d1f1 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -36,6 +36,18 @@ void gic_set_kvm_info(const struct gic_kvm_info *info)
gic_kvm_info = info;
}
+void gic_enable_of_quirks(const struct device_node *np,
+ const struct gic_quirk *quirks, void *data)
+{
+ for (; quirks->desc; quirks++) {
+ if (!of_device_is_compatible(np, quirks->compatible))
+ continue;
+ if (quirks->init(data))
+ pr_info("GIC: enabling workaround for %s\n",
+ quirks->desc);
+ }
+}
+
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data)
{
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 3919cd7c5285..97e58fb6c232 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -23,6 +23,7 @@
struct gic_quirk {
const char *desc;
+ const char *compatible;
bool (*init)(void *data);
u32 iidr;
u32 mask;
@@ -35,6 +36,8 @@ void gic_dist_config(void __iomem *base, int gic_irqs,
void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
void *data);
+void gic_enable_of_quirks(const struct device_node *np,
+ const struct gic_quirk *quirks, void *data);
void gic_set_kvm_info(const struct gic_kvm_info *info);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 8f87f40c9460..d3d4f65b377b 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -41,6 +41,8 @@
#include "irq-gic-common.h"
+#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
+
struct redist_region {
void __iomem *redist_base;
phys_addr_t phys_base;
@@ -55,6 +57,7 @@ struct gic_chip_data {
struct irq_domain *domain;
u64 redist_stride;
u32 nr_redist_regions;
+ u64 flags;
bool has_rss;
unsigned int irq_nr;
struct partition_desc *ppi_descs[16];
@@ -139,6 +142,9 @@ static void gic_enable_redist(bool enable)
u32 count = 1000000; /* 1s! */
u32 val;
+ if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
+ return;
+
rbase = gic_data_rdist_rd_base();
val = readl_relaxed(rbase + GICR_WAKER);
@@ -1067,6 +1073,15 @@ static const struct irq_domain_ops partition_domain_ops = {
.select = gic_irq_domain_select,
};
+static bool gic_enable_quirk_msm8996(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
+
+ return true;
+}
+
static int __init gic_init_bases(void __iomem *dist_base,
struct redist_region *rdist_regs,
u32 nr_redist_regions,
@@ -1271,6 +1286,16 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
gic_set_kvm_info(&gic_v3_kvm_info);
}
+static const struct gic_quirk gic_quirks[] = {
+ {
+ .desc = "GICv3: Qualcomm MSM8996 broken firmware",
+ .compatible = "qcom,msm8996-gic-v3",
+ .init = gic_enable_quirk_msm8996,
+ },
+ {
+ }
+};
+
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *dist_base;
@@ -1318,6 +1343,8 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
+ gic_enable_of_quirks(node, gic_quirks, &gic_data);
+
err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
redist_stride, &node->fwnode);
if (err)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ced10c44b68a..ba2a37a27a54 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -604,8 +604,8 @@ void gic_dist_save(struct gic_chip_data *gic)
/*
* Restores the GIC distributor registers during resume or when coming out of
* idle. Must be called before enabling interrupts. If a level interrupt
- * that occured while the GIC was suspended is still present, it will be
- * handled normally, but any edge interrupts that occured will not be seen by
+ * that occurred while the GIC was suspended is still present, it will be
+ * handled normally, but any edge interrupts that occurred will not be seen by
* the GIC and need to be handled by the platform-specific wakeup source.
*/
void gic_dist_restore(struct gic_chip_data *gic)
@@ -899,7 +899,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
gic_cpu_map[cpu] = 1 << new_cpu_id;
/*
- * Find all the peripheral interrupts targetting the current
+ * Find all the peripheral interrupts targeting the current
* CPU interface and migrate them to the new CPU interface.
* We skip DIST_TARGET 0 to 7 as they are read-only.
*/
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 4760307ab43f..66501ea4fd75 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -17,6 +17,9 @@
#define GPC_IMR1_CORE0 0x30
#define GPC_IMR1_CORE1 0x40
+#define GPC_IMR1_CORE2 0x1c0
+#define GPC_IMR1_CORE3 0x1d0
+
struct gpcv2_irqchip_data {
struct raw_spinlock rlock;
@@ -28,6 +31,11 @@ struct gpcv2_irqchip_data {
static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
+{
+ return cd->gpc_base + cd->cpu2wakeup + i * 4;
+}
+
static int gpcv2_wakeup_source_save(void)
{
struct gpcv2_irqchip_data *cd;
@@ -39,7 +47,7 @@ static int gpcv2_wakeup_source_save(void)
return 0;
for (i = 0; i < IMR_NUM; i++) {
- reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
+ reg = gpcv2_idx_to_reg(cd, i);
cd->saved_irq_mask[i] = readl_relaxed(reg);
writel_relaxed(cd->wakeup_sources[i], reg);
}
@@ -50,17 +58,14 @@ static int gpcv2_wakeup_source_save(void)
static void gpcv2_wakeup_source_restore(void)
{
struct gpcv2_irqchip_data *cd;
- void __iomem *reg;
int i;
cd = imx_gpcv2_instance;
if (!cd)
return;
- for (i = 0; i < IMR_NUM; i++) {
- reg = cd->gpc_base + cd->cpu2wakeup + i * 4;
- writel_relaxed(cd->saved_irq_mask[i], reg);
- }
+ for (i = 0; i < IMR_NUM; i++)
+ writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
}
static struct syscore_ops imx_gpcv2_syscore_ops = {
@@ -73,12 +78,10 @@ static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpcv2_irqchip_data *cd = d->chip_data;
unsigned int idx = d->hwirq / 32;
unsigned long flags;
- void __iomem *reg;
u32 mask, val;
raw_spin_lock_irqsave(&cd->rlock, flags);
- reg = cd->gpc_base + cd->cpu2wakeup + idx * 4;
- mask = 1 << d->hwirq % 32;
+ mask = BIT(d->hwirq % 32);
val = cd->wakeup_sources[idx];
cd->wakeup_sources[idx] = on ? (val & ~mask) : (val | mask);
@@ -99,9 +102,9 @@ static void imx_gpcv2_irq_unmask(struct irq_data *d)
u32 val;
raw_spin_lock(&cd->rlock);
- reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+ reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
val = readl_relaxed(reg);
- val &= ~(1 << d->hwirq % 32);
+ val &= ~BIT(d->hwirq % 32);
writel_relaxed(val, reg);
raw_spin_unlock(&cd->rlock);
@@ -115,9 +118,9 @@ static void imx_gpcv2_irq_mask(struct irq_data *d)
u32 val;
raw_spin_lock(&cd->rlock);
- reg = cd->gpc_base + cd->cpu2wakeup + d->hwirq / 32 * 4;
+ reg = gpcv2_idx_to_reg(cd, d->hwirq / 32);
val = readl_relaxed(reg);
- val |= 1 << (d->hwirq % 32);
+ val |= BIT(d->hwirq % 32);
writel_relaxed(val, reg);
raw_spin_unlock(&cd->rlock);
@@ -192,11 +195,19 @@ static const struct irq_domain_ops gpcv2_irqchip_data_domain_ops = {
.free = irq_domain_free_irqs_common,
};
+static const struct of_device_id gpcv2_of_match[] = {
+ { .compatible = "fsl,imx7d-gpc", .data = (const void *) 2 },
+ { .compatible = "fsl,imx8mq-gpc", .data = (const void *) 4 },
+ { /* END */ }
+};
+
static int __init imx_gpcv2_irqchip_init(struct device_node *node,
struct device_node *parent)
{
struct irq_domain *parent_domain, *domain;
struct gpcv2_irqchip_data *cd;
+ const struct of_device_id *id;
+ unsigned long core_num;
int i;
if (!parent) {
@@ -204,6 +215,14 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return -ENODEV;
}
+ id = of_match_node(gpcv2_of_match, node);
+ if (!id) {
+ pr_err("%pOF: unknown compatibility string\n", node);
+ return -ENODEV;
+ }
+
+ core_num = (unsigned long)id->data;
+
parent_domain = irq_find_host(parent);
if (!parent_domain) {
pr_err("%pOF: unable to get parent domain\n", node);
@@ -212,7 +231,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
cd = kzalloc(sizeof(struct gpcv2_irqchip_data), GFP_KERNEL);
if (!cd) {
- pr_err("kzalloc failed!\n");
+ pr_err("%pOF: kzalloc failed!\n", node);
return -ENOMEM;
}
@@ -220,7 +239,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) {
- pr_err("fsl-gpcv2: unable to map gpc registers\n");
+ pr_err("%pOF: unable to map gpc registers\n", node);
kfree(cd);
return -ENOMEM;
}
@@ -236,8 +255,17 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
/* Initially mask all interrupts */
for (i = 0; i < IMR_NUM; i++) {
- writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE0 + i * 4);
- writel_relaxed(~0, cd->gpc_base + GPC_IMR1_CORE1 + i * 4);
+ void __iomem *reg = cd->gpc_base + i * 4;
+
+ switch (core_num) {
+ case 4:
+ writel_relaxed(~0, reg + GPC_IMR1_CORE2);
+ writel_relaxed(~0, reg + GPC_IMR1_CORE3);
+ /* fall through */
+ case 2:
+ writel_relaxed(~0, reg + GPC_IMR1_CORE0);
+ writel_relaxed(~0, reg + GPC_IMR1_CORE1);
+ }
cd->wakeup_sources[i] = ~0;
}
@@ -262,4 +290,5 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
return 0;
}
-IRQCHIP_DECLARE(imx_gpcv2, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
+IRQCHIP_DECLARE(imx_gpcv2_imx7d, "fsl,imx7d-gpc", imx_gpcv2_irqchip_init);
+IRQCHIP_DECLARE(imx_gpcv2_imx8mq, "fsl,imx8mq-gpc", imx_gpcv2_irqchip_init);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
new file mode 100644
index 000000000000..5b3f1d735685
--- /dev/null
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2017 NXP
+ * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/spinlock.h>
+
+#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r)
+#define CHANCTRL 0x0
+#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
+#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
+#define CHANSTATUS(n, t) (CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
+#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
+#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
+
+struct irqsteer_data {
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int irq;
+ raw_spinlock_t lock;
+ int irq_groups;
+ int channel;
+ struct irq_domain *domain;
+ u32 *saved_reg;
+};
+
+static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
+ unsigned long irqnum)
+{
+ return (data->irq_groups * 2 - irqnum / 32 - 1);
+}
+
+static void imx_irqsteer_irq_unmask(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+ int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups));
+ val |= BIT(d->hwirq % 32);
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups));
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void imx_irqsteer_irq_mask(struct irq_data *d)
+{
+ struct irqsteer_data *data = d->chip_data;
+ int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&data->lock, flags);
+ val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups));
+ val &= ~BIT(d->hwirq % 32);
+ writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups));
+ raw_spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static struct irq_chip imx_irqsteer_irq_chip = {
+ .name = "irqsteer",
+ .irq_mask = imx_irqsteer_irq_mask,
+ .irq_unmask = imx_irqsteer_irq_unmask,
+};
+
+static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_status_flags(irq, IRQ_LEVEL);
+ irq_set_chip_data(irq, h->host_data);
+ irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops imx_irqsteer_domain_ops = {
+ .map = imx_irqsteer_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static void imx_irqsteer_irq_handler(struct irq_desc *desc)
+{
+ struct irqsteer_data *data = irq_desc_get_handler_data(desc);
+ int i;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ for (i = 0; i < data->irq_groups * 64; i += 32) {
+ int idx = imx_irqsteer_get_reg_index(data, i);
+ unsigned long irqmap;
+ int pos, virq;
+
+ irqmap = readl_relaxed(data->regs +
+ CHANSTATUS(idx, data->irq_groups));
+
+ for_each_set_bit(pos, &irqmap, 32) {
+ virq = irq_find_mapping(data->domain, pos + i);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static int imx_irqsteer_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct irqsteer_data *data;
+ struct resource *res;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(&pdev->dev, "failed to initialize reg\n");
+ return PTR_ERR(data->regs);
+ }
+
+ data->irq = platform_get_irq(pdev, 0);
+ if (data->irq <= 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+
+ data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(data->ipg_clk)) {
+ ret = PTR_ERR(data->ipg_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ raw_spin_lock_init(&data->lock);
+
+ of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups);
+ of_property_read_u32(np, "fsl,channel", &data->channel);
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP)) {
+ data->saved_reg = devm_kzalloc(&pdev->dev,
+ sizeof(u32) * data->irq_groups * 2,
+ GFP_KERNEL);
+ if (!data->saved_reg)
+ return -ENOMEM;
+ }
+
+ ret = clk_prepare_enable(data->ipg_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+
+ /* steer all IRQs into configured channel */
+ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
+
+ data->domain = irq_domain_add_linear(np, data->irq_groups * 64,
+ &imx_irqsteer_domain_ops, data);
+ if (!data->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ clk_disable_unprepare(data->ipg_clk);
+ return -ENOMEM;
+ }
+
+ irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler,
+ data);
+
+ platform_set_drvdata(pdev, data);
+
+ return 0;
+}
+
+static int imx_irqsteer_remove(struct platform_device *pdev)
+{
+ struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
+
+ irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL);
+ irq_domain_remove(irqsteer_data->domain);
+
+ clk_disable_unprepare(irqsteer_data->ipg_clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void imx_irqsteer_save_regs(struct irqsteer_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->irq_groups * 2; i++)
+ data->saved_reg[i] = readl_relaxed(data->regs +
+ CHANMASK(i, data->irq_groups));
+}
+
+static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
+{
+ int i;
+
+ writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
+ for (i = 0; i < data->irq_groups * 2; i++)
+ writel_relaxed(data->saved_reg[i],
+ data->regs + CHANMASK(i, data->irq_groups));
+}
+
+static int imx_irqsteer_suspend(struct device *dev)
+{
+ struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
+
+ imx_irqsteer_save_regs(irqsteer_data);
+ clk_disable_unprepare(irqsteer_data->ipg_clk);
+
+ return 0;
+}
+
+static int imx_irqsteer_resume(struct device *dev)
+{
+ struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(irqsteer_data->ipg_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ipg clk: %d\n", ret);
+ return ret;
+ }
+ imx_irqsteer_restore_regs(irqsteer_data);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx_irqsteer_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_irqsteer_suspend, imx_irqsteer_resume)
+};
+
+static const struct of_device_id imx_irqsteer_dt_ids[] = {
+ { .compatible = "fsl,imx-irqsteer", },
+ {},
+};
+
+static struct platform_driver imx_irqsteer_driver = {
+ .driver = {
+ .name = "imx-irqsteer",
+ .of_match_table = imx_irqsteer_dt_ids,
+ .pm = &imx_irqsteer_pm_ops,
+ },
+ .probe = imx_irqsteer_probe,
+ .remove = imx_irqsteer_remove,
+};
+builtin_platform_driver(imx_irqsteer_driver);
diff --git a/drivers/irqchip/irq-madera.c b/drivers/irqchip/irq-madera.c
new file mode 100644
index 000000000000..e9256dee1a45
--- /dev/null
+++ b/drivers/irqchip/irq-madera.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/irqchip/irq-madera.h>
+#include <linux/mfd/madera/core.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/mfd/madera/registers.h>
+
+#define MADERA_IRQ(_irq, _reg) \
+ [MADERA_IRQ_ ## _irq] = { \
+ .reg_offset = (_reg) - MADERA_IRQ1_STATUS_2, \
+ .mask = MADERA_ ## _irq ## _EINT1 \
+ }
+
+/* Mappings are the same for all Madera codecs */
+static const struct regmap_irq madera_irqs[MADERA_NUM_IRQ] = {
+ MADERA_IRQ(FLL1_LOCK, MADERA_IRQ1_STATUS_2),
+ MADERA_IRQ(FLL2_LOCK, MADERA_IRQ1_STATUS_2),
+ MADERA_IRQ(FLL3_LOCK, MADERA_IRQ1_STATUS_2),
+ MADERA_IRQ(FLLAO_LOCK, MADERA_IRQ1_STATUS_2),
+
+ MADERA_IRQ(MICDET1, MADERA_IRQ1_STATUS_6),
+ MADERA_IRQ(MICDET2, MADERA_IRQ1_STATUS_6),
+ MADERA_IRQ(HPDET, MADERA_IRQ1_STATUS_6),
+
+ MADERA_IRQ(MICD_CLAMP_RISE, MADERA_IRQ1_STATUS_7),
+ MADERA_IRQ(MICD_CLAMP_FALL, MADERA_IRQ1_STATUS_7),
+ MADERA_IRQ(JD1_RISE, MADERA_IRQ1_STATUS_7),
+ MADERA_IRQ(JD1_FALL, MADERA_IRQ1_STATUS_7),
+
+ MADERA_IRQ(ASRC2_IN1_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(ASRC2_IN2_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(ASRC1_IN1_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(ASRC1_IN2_LOCK, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(DRC2_SIG_DET, MADERA_IRQ1_STATUS_9),
+ MADERA_IRQ(DRC1_SIG_DET, MADERA_IRQ1_STATUS_9),
+
+ MADERA_IRQ(DSP_IRQ1, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ2, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ3, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ4, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ5, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ6, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ7, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ8, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ9, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ10, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ11, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ12, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ13, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ14, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ15, MADERA_IRQ1_STATUS_11),
+ MADERA_IRQ(DSP_IRQ16, MADERA_IRQ1_STATUS_11),
+
+ MADERA_IRQ(HP3R_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP3L_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP2R_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP2L_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP1R_SC, MADERA_IRQ1_STATUS_12),
+ MADERA_IRQ(HP1L_SC, MADERA_IRQ1_STATUS_12),
+
+ MADERA_IRQ(SPK_OVERHEAT_WARN, MADERA_IRQ1_STATUS_15),
+ MADERA_IRQ(SPK_OVERHEAT, MADERA_IRQ1_STATUS_15),
+
+ MADERA_IRQ(DSP1_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP2_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP3_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP4_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP5_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP6_BUS_ERR, MADERA_IRQ1_STATUS_33),
+ MADERA_IRQ(DSP7_BUS_ERR, MADERA_IRQ1_STATUS_33),
+};
+
+static const struct regmap_irq_chip madera_irq_chip = {
+ .name = "madera IRQ",
+ .status_base = MADERA_IRQ1_STATUS_2,
+ .mask_base = MADERA_IRQ1_MASK_2,
+ .ack_base = MADERA_IRQ1_STATUS_2,
+ .runtime_pm = true,
+ .num_regs = 32,
+ .irqs = madera_irqs,
+ .num_irqs = ARRAY_SIZE(madera_irqs),
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int madera_suspend(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "Suspend, disabling IRQ\n");
+
+ /*
+ * A runtime resume would be needed to access the chip interrupt
+ * controller but runtime pm doesn't function during suspend.
+ * Temporarily disable interrupts until we reach suspend_noirq state.
+ */
+ disable_irq(madera->irq);
+
+ return 0;
+}
+
+static int madera_suspend_noirq(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "No IRQ suspend, reenabling IRQ\n");
+
+ /* Re-enable interrupts to service wakeup interrupts from the chip */
+ enable_irq(madera->irq);
+
+ return 0;
+}
+
+static int madera_resume_noirq(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "No IRQ resume, disabling IRQ\n");
+
+ /*
+ * We can't handle interrupts until runtime pm is available again.
+ * Disable them temporarily.
+ */
+ disable_irq(madera->irq);
+
+ return 0;
+}
+
+static int madera_resume(struct device *dev)
+{
+ struct madera *madera = dev_get_drvdata(dev->parent);
+
+ dev_dbg(madera->irq_dev, "Resume, reenabling IRQ\n");
+
+ /* Interrupts can now be handled */
+ enable_irq(madera->irq);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops madera_irq_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(madera_suspend, madera_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(madera_suspend_noirq,
+ madera_resume_noirq)
+};
+
+static int madera_irq_probe(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+ struct irq_data *irq_data;
+ unsigned int irq_flags = 0;
+ int ret;
+
+ dev_dbg(&pdev->dev, "probe\n");
+
+ /*
+ * Read the flags from the interrupt controller if not specified
+ * by pdata
+ */
+ irq_flags = madera->pdata.irq_flags;
+ if (!irq_flags) {
+ irq_data = irq_get_irq_data(madera->irq);
+ if (!irq_data) {
+ dev_err(&pdev->dev, "Invalid IRQ: %d\n", madera->irq);
+ return -EINVAL;
+ }
+
+ irq_flags = irqd_get_trigger_type(irq_data);
+
+ /* Codec defaults to trigger low, use this if no flags given */
+ if (irq_flags == IRQ_TYPE_NONE)
+ irq_flags = IRQF_TRIGGER_LOW;
+ }
+
+ if (irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ dev_err(&pdev->dev, "Host interrupt not level-triggered\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The silicon always starts at active-low, check if we need to
+ * switch to active-high.
+ */
+ if (irq_flags & IRQF_TRIGGER_HIGH) {
+ ret = regmap_update_bits(madera->regmap, MADERA_IRQ1_CTRL,
+ MADERA_IRQ_POL_MASK, 0);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set IRQ polarity: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * NOTE: regmap registers this against the OF node of the parent of
+ * the regmap - that is, against the mfd driver
+ */
+ ret = regmap_add_irq_chip(madera->regmap, madera->irq, IRQF_ONESHOT, 0,
+ &madera_irq_chip, &madera->irq_data);
+ if (ret) {
+ dev_err(&pdev->dev, "add_irq_chip failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Save dev in parent MFD struct so it is accessible to siblings */
+ madera->irq_dev = &pdev->dev;
+
+ return 0;
+}
+
+static int madera_irq_remove(struct platform_device *pdev)
+{
+ struct madera *madera = dev_get_drvdata(pdev->dev.parent);
+
+ /*
+ * The IRQ is disabled by the parent MFD driver before
+ * it starts cleaning up all child drivers
+ */
+ madera->irq_dev = NULL;
+ regmap_del_irq_chip(madera->irq, madera->irq_data);
+
+ return 0;
+}
+
+static struct platform_driver madera_irq_driver = {
+ .probe = &madera_irq_probe,
+ .remove = &madera_irq_remove,
+ .driver = {
+ .name = "madera-irq",
+ .pm = &madera_irq_pm_ops,
+ }
+};
+module_platform_driver(madera_irq_driver);
+
+MODULE_SOFTDEP("pre: madera");
+MODULE_DESCRIPTION("Madera IRQ driver");
+MODULE_AUTHOR("Richard Fitzgerald <rf@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-mscc-ocelot.c b/drivers/irqchip/irq-mscc-ocelot.c
index b63e40c00a02..88143c0b700c 100644
--- a/drivers/irqchip/irq-mscc-ocelot.c
+++ b/drivers/irqchip/irq-mscc-ocelot.c
@@ -72,7 +72,7 @@ static int __init ocelot_irq_init(struct device_node *node,
domain = irq_domain_add_linear(node, OCELOT_NR_IRQ,
&irq_generic_chip_ops, NULL);
if (!domain) {
- pr_err("%s: unable to add irq domain\n", node->name);
+ pr_err("%pOFn: unable to add irq domain\n", node);
return -ENOMEM;
}
@@ -80,14 +80,14 @@ static int __init ocelot_irq_init(struct device_node *node,
"icpu", handle_level_irq,
0, 0, 0);
if (ret) {
- pr_err("%s: unable to alloc irq domain gc\n", node->name);
+ pr_err("%pOFn: unable to alloc irq domain gc\n", node);
goto err_domain_remove;
}
gc = irq_get_domain_generic_chip(domain, 0);
gc->reg_base = of_iomap(node, 0);
if (!gc->reg_base) {
- pr_err("%s: unable to map resource\n", node->name);
+ pr_err("%pOFn: unable to map resource\n", node);
ret = -ENOMEM;
goto err_gc_free;
}
diff --git a/drivers/irqchip/irq-rda-intc.c b/drivers/irqchip/irq-rda-intc.c
new file mode 100644
index 000000000000..960168303b73
--- /dev/null
+++ b/drivers/irqchip/irq-rda-intc.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * RDA8810PL SoC irqchip driver
+ *
+ * Copyright RDA Microelectronics Company Limited
+ * Copyright (c) 2017 Andreas Färber
+ * Copyright (c) 2018 Manivannan Sadhasivam
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+
+#include <asm/exception.h>
+
+#define RDA_INTC_FINALSTATUS 0x00
+#define RDA_INTC_MASK_SET 0x08
+#define RDA_INTC_MASK_CLR 0x0c
+
+#define RDA_IRQ_MASK_ALL 0xFFFFFFFF
+
+#define RDA_NR_IRQS 32
+
+static void __iomem *rda_intc_base;
+static struct irq_domain *rda_irq_domain;
+
+static void rda_intc_mask_irq(struct irq_data *d)
+{
+ writel_relaxed(BIT(d->hwirq), rda_intc_base + RDA_INTC_MASK_CLR);
+}
+
+static void rda_intc_unmask_irq(struct irq_data *d)
+{
+ writel_relaxed(BIT(d->hwirq), rda_intc_base + RDA_INTC_MASK_SET);
+}
+
+static int rda_intc_set_type(struct irq_data *data, unsigned int flow_type)
+{
+ /* Hardware supports only level triggered interrupts */
+ if ((flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) == flow_type)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs)
+{
+ u32 stat = readl_relaxed(rda_intc_base + RDA_INTC_FINALSTATUS);
+ u32 hwirq;
+
+ while (stat) {
+ hwirq = __fls(stat);
+ handle_domain_irq(rda_irq_domain, hwirq, regs);
+ stat &= ~BIT(hwirq);
+ }
+}
+
+static struct irq_chip rda_irq_chip = {
+ .name = "rda-intc",
+ .irq_mask = rda_intc_mask_irq,
+ .irq_unmask = rda_intc_unmask_irq,
+ .irq_set_type = rda_intc_set_type,
+};
+
+static int rda_irq_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &rda_irq_chip, handle_level_irq);
+ irq_set_chip_data(virq, d->host_data);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops rda_irq_domain_ops = {
+ .map = rda_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init rda8810_intc_init(struct device_node *node,
+ struct device_node *parent)
+{
+ rda_intc_base = of_io_request_and_map(node, 0, "rda-intc");
+ if (IS_ERR(rda_intc_base))
+ return PTR_ERR(rda_intc_base);
+
+ /* Mask all interrupt sources */
+ writel_relaxed(RDA_IRQ_MASK_ALL, rda_intc_base + RDA_INTC_MASK_CLR);
+
+ rda_irq_domain = irq_domain_create_linear(&node->fwnode, RDA_NR_IRQS,
+ &rda_irq_domain_ops,
+ rda_intc_base);
+ if (!rda_irq_domain) {
+ iounmap(rda_intc_base);
+ return -ENOMEM;
+ }
+
+ set_handle_irq(rda_handle_irq);
+
+ return 0;
+}
+
+IRQCHIP_DECLARE(rda_intc, "rda,8810pl-intc", rda8810_intc_init);
diff --git a/drivers/irqchip/irq-renesas-h8s.c b/drivers/irqchip/irq-renesas-h8s.c
index 85234d456638..4e2461bae944 100644
--- a/drivers/irqchip/irq-renesas-h8s.c
+++ b/drivers/irqchip/irq-renesas-h8s.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * H8S interrupt contoller driver
+ * H8S interrupt controller driver
*
* Copyright 2015 Yoshinori Sato <ysato@users.sourceforge.jp>
*/
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index c6e6c9e9137a..8c039525703f 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas INTC External IRQ Pin Driver
*
* Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index a4f11124024d..a449a7c839b3 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -1,20 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas IRQC Driver
*
* Copyright (C) 2013 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index c19766fe8a1a..b623f300f1b1 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -58,7 +58,7 @@ struct s3c_irq_data {
};
/*
- * Sructure holding the controller data
+ * Structure holding the controller data
* @reg_pending register holding pending irqs
* @reg_intpnd special register intpnd in main intc
* @reg_mask mask register
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 0a2088e12d96..6edfd4bfa169 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -6,6 +6,8 @@
*/
#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/hwspinlock.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -20,6 +22,9 @@
#define IRQS_PER_BANK 32
+#define HWSPNLCK_TIMEOUT 1000 /* usec */
+#define HWSPNLCK_RETRY_DELAY 100 /* usec */
+
struct stm32_exti_bank {
u32 imr_ofst;
u32 emr_ofst;
@@ -32,6 +37,12 @@ struct stm32_exti_bank {
#define UNDEF_REG ~0
+enum stm32_exti_hwspinlock {
+ HWSPINLOCK_UNKNOWN,
+ HWSPINLOCK_NONE,
+ HWSPINLOCK_READY,
+};
+
struct stm32_desc_irq {
u32 exti;
u32 irq_parent;
@@ -58,6 +69,9 @@ struct stm32_exti_host_data {
void __iomem *base;
struct stm32_exti_chip_data *chips_data;
const struct stm32_exti_drv_data *drv_data;
+ struct device_node *node;
+ enum stm32_exti_hwspinlock hwlock_state;
+ struct hwspinlock *hwlock;
};
static struct stm32_exti_host_data *stm32_host_data;
@@ -269,6 +283,64 @@ static int stm32_exti_set_type(struct irq_data *d,
return 0;
}
+static int stm32_exti_hwspin_lock(struct stm32_exti_chip_data *chip_data)
+{
+ struct stm32_exti_host_data *host_data = chip_data->host_data;
+ struct hwspinlock *hwlock;
+ int id, ret = 0, timeout = 0;
+
+ /* first time, check for hwspinlock availability */
+ if (unlikely(host_data->hwlock_state == HWSPINLOCK_UNKNOWN)) {
+ id = of_hwspin_lock_get_id(host_data->node, 0);
+ if (id >= 0) {
+ hwlock = hwspin_lock_request_specific(id);
+ if (hwlock) {
+ /* found valid hwspinlock */
+ host_data->hwlock_state = HWSPINLOCK_READY;
+ host_data->hwlock = hwlock;
+ pr_debug("%s hwspinlock = %d\n", __func__, id);
+ } else {
+ host_data->hwlock_state = HWSPINLOCK_NONE;
+ }
+ } else if (id != -EPROBE_DEFER) {
+ host_data->hwlock_state = HWSPINLOCK_NONE;
+ } else {
+ /* hwspinlock driver shall be ready at that stage */
+ ret = -EPROBE_DEFER;
+ }
+ }
+
+ if (likely(host_data->hwlock_state == HWSPINLOCK_READY)) {
+ /*
+ * Use the x_raw API since we are under spin_lock protection.
+ * Do not use the x_timeout API because we are under irq_disable
+ * mode (see __setup_irq())
+ */
+ do {
+ ret = hwspin_trylock_raw(host_data->hwlock);
+ if (!ret)
+ return 0;
+
+ udelay(HWSPNLCK_RETRY_DELAY);
+ timeout += HWSPNLCK_RETRY_DELAY;
+ } while (timeout < HWSPNLCK_TIMEOUT);
+
+ if (ret == -EBUSY)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ pr_err("%s can't get hwspinlock (%d)\n", __func__, ret);
+
+ return ret;
+}
+
+static void stm32_exti_hwspin_unlock(struct stm32_exti_chip_data *chip_data)
+{
+ if (likely(chip_data->host_data->hwlock_state == HWSPINLOCK_READY))
+ hwspin_unlock_raw(chip_data->host_data->hwlock);
+}
+
static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
@@ -279,21 +351,26 @@ static int stm32_irq_set_type(struct irq_data *d, unsigned int type)
irq_gc_lock(gc);
+ err = stm32_exti_hwspin_lock(chip_data);
+ if (err)
+ goto unlock;
+
rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
- if (err) {
- irq_gc_unlock(gc);
- return err;
- }
+ if (err)
+ goto unspinlock;
irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
+unspinlock:
+ stm32_exti_hwspin_unlock(chip_data);
+unlock:
irq_gc_unlock(gc);
- return 0;
+ return err;
}
static void stm32_chip_suspend(struct stm32_exti_chip_data *chip_data,
@@ -460,20 +537,27 @@ static int stm32_exti_h_set_type(struct irq_data *d, unsigned int type)
int err;
raw_spin_lock(&chip_data->rlock);
+
+ err = stm32_exti_hwspin_lock(chip_data);
+ if (err)
+ goto unlock;
+
rtsr = readl_relaxed(base + stm32_bank->rtsr_ofst);
ftsr = readl_relaxed(base + stm32_bank->ftsr_ofst);
err = stm32_exti_set_type(d, type, &rtsr, &ftsr);
- if (err) {
- raw_spin_unlock(&chip_data->rlock);
- return err;
- }
+ if (err)
+ goto unspinlock;
writel_relaxed(rtsr, base + stm32_bank->rtsr_ofst);
writel_relaxed(ftsr, base + stm32_bank->ftsr_ofst);
+
+unspinlock:
+ stm32_exti_hwspin_unlock(chip_data);
+unlock:
raw_spin_unlock(&chip_data->rlock);
- return 0;
+ return err;
}
static int stm32_exti_h_set_wake(struct irq_data *d, unsigned int on)
@@ -599,6 +683,8 @@ stm32_exti_host_data *stm32_exti_host_init(const struct stm32_exti_drv_data *dd,
return NULL;
host_data->drv_data = dd;
+ host_data->node = node;
+ host_data->hwlock_state = HWSPINLOCK_UNKNOWN;
host_data->chips_data = kcalloc(dd->bank_nr,
sizeof(struct stm32_exti_chip_data),
GFP_KERNEL);
@@ -625,8 +711,7 @@ free_host_data:
static struct
stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
- u32 bank_idx,
- struct device_node *node)
+ u32 bank_idx)
{
const struct stm32_exti_bank *stm32_bank;
struct stm32_exti_chip_data *chip_data;
@@ -656,8 +741,7 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
if (stm32_bank->fpr_ofst != UNDEF_REG)
writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
- pr_info("%s: bank%d, External IRQs available:%#x\n",
- node->full_name, bank_idx, irqs_mask);
+ pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
return chip_data;
}
@@ -678,8 +762,8 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
domain = irq_domain_add_linear(node, drv_data->bank_nr * IRQS_PER_BANK,
&irq_exti_domain_ops, NULL);
if (!domain) {
- pr_err("%s: Could not register interrupt domain.\n",
- node->name);
+ pr_err("%pOFn: Could not register interrupt domain.\n",
+ node);
ret = -ENOMEM;
goto out_unmap;
}
@@ -697,7 +781,7 @@ static int __init stm32_exti_init(const struct stm32_exti_drv_data *drv_data,
struct stm32_exti_chip_data *chip_data;
stm32_bank = drv_data->exti_banks[i];
- chip_data = stm32_exti_chip_init(host_data, i, node);
+ chip_data = stm32_exti_chip_init(host_data, i);
gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
@@ -760,7 +844,7 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
return -ENOMEM;
for (i = 0; i < drv_data->bank_nr; i++)
- stm32_exti_chip_init(host_data, i, node);
+ stm32_exti_chip_init(host_data, i);
domain = irq_domain_add_hierarchy(parent_domain, 0,
drv_data->bank_nr * IRQS_PER_BANK,
@@ -768,7 +852,7 @@ __init stm32_exti_hierarchy_init(const struct stm32_exti_drv_data *drv_data,
host_data);
if (!domain) {
- pr_err("%s: Could not register exti domain.\n", node->name);
+ pr_err("%pOFn: Could not register exti domain.\n", node);
ret = -ENOMEM;
goto out_unmap;
}
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index e3e5b9132b75..fb78d6623556 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -28,11 +28,21 @@
#define SUN4I_IRQ_NMI_CTRL_REG 0x0c
#define SUN4I_IRQ_PENDING_REG(x) (0x10 + 0x4 * x)
#define SUN4I_IRQ_FIQ_PENDING_REG(x) (0x20 + 0x4 * x)
-#define SUN4I_IRQ_ENABLE_REG(x) (0x40 + 0x4 * x)
-#define SUN4I_IRQ_MASK_REG(x) (0x50 + 0x4 * x)
+#define SUN4I_IRQ_ENABLE_REG(data, x) ((data)->enable_reg_offset + 0x4 * x)
+#define SUN4I_IRQ_MASK_REG(data, x) ((data)->mask_reg_offset + 0x4 * x)
+#define SUN4I_IRQ_ENABLE_REG_OFFSET 0x40
+#define SUN4I_IRQ_MASK_REG_OFFSET 0x50
+#define SUNIV_IRQ_ENABLE_REG_OFFSET 0x20
+#define SUNIV_IRQ_MASK_REG_OFFSET 0x30
+
+struct sun4i_irq_chip_data {
+ void __iomem *irq_base;
+ struct irq_domain *irq_domain;
+ u32 enable_reg_offset;
+ u32 mask_reg_offset;
+};
-static void __iomem *sun4i_irq_base;
-static struct irq_domain *sun4i_irq_domain;
+static struct sun4i_irq_chip_data *irq_ic_data;
static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs);
@@ -43,7 +53,7 @@ static void sun4i_irq_ack(struct irq_data *irqd)
if (irq != 0)
return; /* Only IRQ 0 / the ENMI needs to be acked */
- writel(BIT(0), sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
+ writel(BIT(0), irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(0));
}
static void sun4i_irq_mask(struct irq_data *irqd)
@@ -53,9 +63,10 @@ static void sun4i_irq_mask(struct irq_data *irqd)
int reg = irq / 32;
u32 val;
- val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+ val = readl(irq_ic_data->irq_base +
+ SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
writel(val & ~(1 << irq_off),
- sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+ irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
}
static void sun4i_irq_unmask(struct irq_data *irqd)
@@ -65,9 +76,10 @@ static void sun4i_irq_unmask(struct irq_data *irqd)
int reg = irq / 32;
u32 val;
- val = readl(sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+ val = readl(irq_ic_data->irq_base +
+ SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
writel(val | (1 << irq_off),
- sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(reg));
+ irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, reg));
}
static struct irq_chip sun4i_irq_chip = {
@@ -95,42 +107,76 @@ static const struct irq_domain_ops sun4i_irq_ops = {
static int __init sun4i_of_init(struct device_node *node,
struct device_node *parent)
{
- sun4i_irq_base = of_iomap(node, 0);
- if (!sun4i_irq_base)
+ irq_ic_data->irq_base = of_iomap(node, 0);
+ if (!irq_ic_data->irq_base)
panic("%pOF: unable to map IC registers\n",
node);
/* Disable all interrupts */
- writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(0));
- writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(1));
- writel(0, sun4i_irq_base + SUN4I_IRQ_ENABLE_REG(2));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, 0));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, 1));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_ENABLE_REG(irq_ic_data, 2));
/* Unmask all the interrupts, ENABLE_REG(x) is used for masking */
- writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(0));
- writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(1));
- writel(0, sun4i_irq_base + SUN4I_IRQ_MASK_REG(2));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_MASK_REG(irq_ic_data, 0));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_MASK_REG(irq_ic_data, 1));
+ writel(0, irq_ic_data->irq_base + SUN4I_IRQ_MASK_REG(irq_ic_data, 2));
/* Clear all the pending interrupts */
- writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0));
- writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(1));
- writel(0xffffffff, sun4i_irq_base + SUN4I_IRQ_PENDING_REG(2));
+ writel(0xffffffff, irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(0));
+ writel(0xffffffff, irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(1));
+ writel(0xffffffff, irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(2));
/* Enable protection mode */
- writel(0x01, sun4i_irq_base + SUN4I_IRQ_PROTECTION_REG);
+ writel(0x01, irq_ic_data->irq_base + SUN4I_IRQ_PROTECTION_REG);
/* Configure the external interrupt source type */
- writel(0x00, sun4i_irq_base + SUN4I_IRQ_NMI_CTRL_REG);
+ writel(0x00, irq_ic_data->irq_base + SUN4I_IRQ_NMI_CTRL_REG);
- sun4i_irq_domain = irq_domain_add_linear(node, 3 * 32,
+ irq_ic_data->irq_domain = irq_domain_add_linear(node, 3 * 32,
&sun4i_irq_ops, NULL);
- if (!sun4i_irq_domain)
+ if (!irq_ic_data->irq_domain)
panic("%pOF: unable to create IRQ domain\n", node);
set_handle_irq(sun4i_handle_irq);
return 0;
}
-IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_of_init);
+
+static int __init sun4i_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL);
+ if (!irq_ic_data) {
+ pr_err("kzalloc failed!\n");
+ return -ENOMEM;
+ }
+
+ irq_ic_data->enable_reg_offset = SUN4I_IRQ_ENABLE_REG_OFFSET;
+ irq_ic_data->mask_reg_offset = SUN4I_IRQ_MASK_REG_OFFSET;
+
+ return sun4i_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(allwinner_sun4i_ic, "allwinner,sun4i-a10-ic", sun4i_ic_of_init);
+
+static int __init suniv_ic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ irq_ic_data = kzalloc(sizeof(struct sun4i_irq_chip_data), GFP_KERNEL);
+ if (!irq_ic_data) {
+ pr_err("kzalloc failed!\n");
+ return -ENOMEM;
+ }
+
+ irq_ic_data->enable_reg_offset = SUNIV_IRQ_ENABLE_REG_OFFSET;
+ irq_ic_data->mask_reg_offset = SUNIV_IRQ_MASK_REG_OFFSET;
+
+ return sun4i_of_init(node, parent);
+}
+
+IRQCHIP_DECLARE(allwinner_sunvi_ic, "allwinner,suniv-f1c100s-ic",
+ suniv_ic_of_init);
static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
{
@@ -146,13 +192,15 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
* the extra check in the common case of 1 hapening after having
* read the vector-reg once.
*/
- hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+ hwirq = readl(irq_ic_data->irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
if (hwirq == 0 &&
- !(readl(sun4i_irq_base + SUN4I_IRQ_PENDING_REG(0)) & BIT(0)))
+ !(readl(irq_ic_data->irq_base + SUN4I_IRQ_PENDING_REG(0)) &
+ BIT(0)))
return;
do {
- handle_domain_irq(sun4i_irq_domain, hwirq, regs);
- hwirq = readl(sun4i_irq_base + SUN4I_IRQ_VECTOR_REG) >> 2;
+ handle_domain_irq(irq_ic_data->irq_domain, hwirq, regs);
+ hwirq = readl(irq_ic_data->irq_base +
+ SUN4I_IRQ_VECTOR_REG) >> 2;
} while (hwirq != 0);
}
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
index 580e2d72b9ba..ae28d8648679 100644
--- a/drivers/irqchip/irq-tango.c
+++ b/drivers/irqchip/irq-tango.c
@@ -184,11 +184,11 @@ static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
irq = irq_of_parse_and_map(node, 0);
if (!irq)
- panic("%s: failed to get IRQ", node->name);
+ panic("%pOFn: failed to get IRQ", node);
err = of_address_to_resource(node, 0, &res);
if (err)
- panic("%s: failed to get address", node->name);
+ panic("%pOFn: failed to get address", node);
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
chip->ctl = res.start - baseres->start;
@@ -196,12 +196,12 @@ static int __init tangox_irq_init(void __iomem *base, struct resource *baseres,
dom = irq_domain_add_linear(node, 64, &irq_generic_chip_ops, chip);
if (!dom)
- panic("%s: failed to create irqdomain", node->name);
+ panic("%pOFn: failed to create irqdomain", node);
err = irq_alloc_domain_generic_chips(dom, 32, 2, node->name,
handle_level_irq, 0, 0, 0);
if (err)
- panic("%s: failed to allocate irqchip", node->name);
+ panic("%pOFn: failed to allocate irqchip", node);
tangox_irq_domain_init(dom);
@@ -219,7 +219,7 @@ static int __init tangox_of_irq_init(struct device_node *node,
base = of_iomap(node, 0);
if (!base)
- panic("%s: of_iomap failed", node->name);
+ panic("%pOFn: of_iomap failed", node);
of_address_to_resource(node, 0, &res);
diff --git a/drivers/isdn/hardware/Kconfig b/drivers/isdn/hardware/Kconfig
index 30d028d24955..95c403088cce 100644
--- a/drivers/isdn/hardware/Kconfig
+++ b/drivers/isdn/hardware/Kconfig
@@ -5,5 +5,3 @@ comment "CAPI hardware drivers"
source "drivers/isdn/hardware/avm/Kconfig"
-source "drivers/isdn/hardware/eicon/Kconfig"
-
diff --git a/drivers/isdn/hardware/Makefile b/drivers/isdn/hardware/Makefile
index a5d8fce4c4c4..e503032b05a0 100644
--- a/drivers/isdn/hardware/Makefile
+++ b/drivers/isdn/hardware/Makefile
@@ -3,5 +3,4 @@
# Object files in subdirectories
obj-$(CONFIG_CAPI_AVM) += avm/
-obj-$(CONFIG_CAPI_EICON) += eicon/
obj-$(CONFIG_MISDN) += mISDN/
diff --git a/drivers/isdn/hardware/eicon/Kconfig b/drivers/isdn/hardware/eicon/Kconfig
deleted file mode 100644
index 6082b6a5ced3..000000000000
--- a/drivers/isdn/hardware/eicon/Kconfig
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-# ISDN DIVAS Eicon driver
-#
-
-menuconfig CAPI_EICON
- bool "Active Eicon DIVA Server cards"
- help
- Enable support for Eicon Networks active ISDN cards.
-
-if CAPI_EICON
-
-config ISDN_DIVAS
- tristate "Support Eicon DIVA Server cards"
- depends on PROC_FS && PCI
- help
- Say Y here if you have an Eicon Networks DIVA Server PCI ISDN card.
- In order to use this card, additional firmware is necessary, which
- has to be downloaded into the card using the divactrl utility.
-
-if ISDN_DIVAS
-
-config ISDN_DIVAS_BRIPCI
- bool "DIVA Server BRI/PCI support"
- help
- Enable support for DIVA Server BRI-PCI.
-
-config ISDN_DIVAS_PRIPCI
- bool "DIVA Server PRI/PCI support"
- help
- Enable support for DIVA Server PRI-PCI.
-
-config ISDN_DIVAS_DIVACAPI
- tristate "DIVA CAPI2.0 interface support"
- help
- You need this to provide the CAPI interface
- for DIVA Server cards.
-
-config ISDN_DIVAS_USERIDI
- tristate "DIVA User-IDI interface support"
- help
- Enable support for user-mode IDI interface.
-
-config ISDN_DIVAS_MAINT
- tristate "DIVA Maint driver support"
- depends on m
- help
- Enable Divas Maintenance driver.
-
-endif # ISDN_DIVAS
-
-endif # CAPI_EICON
diff --git a/drivers/isdn/hardware/eicon/Makefile b/drivers/isdn/hardware/eicon/Makefile
deleted file mode 100644
index a0ab2e2d7df0..000000000000
--- a/drivers/isdn/hardware/eicon/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# Makefile for the Eicon DIVA ISDN drivers.
-
-# Each configuration option enables a list of files.
-
-obj-$(CONFIG_ISDN_DIVAS) += divadidd.o divas.o
-obj-$(CONFIG_ISDN_DIVAS_MAINT) += diva_mnt.o
-obj-$(CONFIG_ISDN_DIVAS_USERIDI) += diva_idi.o
-obj-$(CONFIG_ISDN_DIVAS_DIVACAPI) += divacapi.o
-
-# Multipart objects.
-
-divas-y := divasmain.o divasfunc.o di.o io.o istream.o \
- diva.o divasproc.o diva_dma.o
-divas-$(CONFIG_ISDN_DIVAS_BRIPCI) += os_bri.o s_bri.o os_4bri.o s_4bri.o
-divas-$(CONFIG_ISDN_DIVAS_PRIPCI) += os_pri.o s_pri.o
-
-divacapi-y := capimain.o capifunc.o message.o capidtmf.o
-
-divadidd-y := diva_didd.o diddfunc.o dadapter.o
-
-diva_mnt-y := divamnt.o mntfunc.o debug.o maintidi.o
-
-diva_idi-y := divasi.o idifunc.o um_idi.o dqueue.o
diff --git a/drivers/isdn/hardware/eicon/adapter.h b/drivers/isdn/hardware/eicon/adapter.h
deleted file mode 100644
index f9b24eb8781d..000000000000
--- a/drivers/isdn/hardware/eicon/adapter.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: adapter.h,v 1.4 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVA_USER_MODE_IDI_ADAPTER_H__
-#define __DIVA_USER_MODE_IDI_ADAPTER_H__
-
-#define DIVA_UM_IDI_ADAPTER_REMOVED 0x00000001
-
-typedef struct _diva_um_idi_adapter {
- struct list_head link;
- DESCRIPTOR d;
- int adapter_nr;
- struct list_head entity_q; /* entities linked to this adapter */
- dword status;
-} diva_um_idi_adapter_t;
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/capi20.h b/drivers/isdn/hardware/eicon/capi20.h
deleted file mode 100644
index 391e4175b0b5..000000000000
--- a/drivers/isdn/hardware/eicon/capi20.h
+++ /dev/null
@@ -1,699 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef _INC_CAPI20
-#define _INC_CAPI20
-/* operations on message queues */
-/* the common device type for CAPI20 drivers */
-#define FILE_DEVICE_CAPI20 0x8001
-/* DEVICE_CONTROL codes for user and kernel mode applications */
-#define CAPI20_CTL_REGISTER 0x0801
-#define CAPI20_CTL_RELEASE 0x0802
-#define CAPI20_CTL_GET_MANUFACTURER 0x0805
-#define CAPI20_CTL_GET_VERSION 0x0806
-#define CAPI20_CTL_GET_SERIAL 0x0807
-#define CAPI20_CTL_GET_PROFILE 0x0808
-/* INTERNAL_DEVICE_CONTROL codes for kernel mode applicatios only */
-#define CAPI20_CTL_PUT_MESSAGE 0x0803
-#define CAPI20_CTL_GET_MESSAGE 0x0804
-/* the wrapped codes as required by the system */
-#define CAPI_CTL_CODE(f, m) CTL_CODE(FILE_DEVICE_CAPI20, f, m, FILE_ANY_ACCESS)
-#define IOCTL_CAPI_REGISTER CAPI_CTL_CODE(CAPI20_CTL_REGISTER, METHOD_BUFFERED)
-#define IOCTL_CAPI_RELEASE CAPI_CTL_CODE(CAPI20_CTL_RELEASE, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_MANUFACTURER CAPI_CTL_CODE(CAPI20_CTL_GET_MANUFACTURER, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_VERSION CAPI_CTL_CODE(CAPI20_CTL_GET_VERSION, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_SERIAL CAPI_CTL_CODE(CAPI20_CTL_GET_SERIAL, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_PROFILE CAPI_CTL_CODE(CAPI20_CTL_GET_PROFILE, METHOD_BUFFERED)
-#define IOCTL_CAPI_PUT_MESSAGE CAPI_CTL_CODE(CAPI20_CTL_PUT_MESSAGE, METHOD_BUFFERED)
-#define IOCTL_CAPI_GET_MESSAGE CAPI_CTL_CODE(CAPI20_CTL_GET_MESSAGE, METHOD_BUFFERED)
-struct divas_capi_register_params {
- word MessageBufferSize;
- word maxLogicalConnection;
- word maxBDataBlocks;
- word maxBDataLen;
-};
-struct divas_capi_version {
- word CapiMajor;
- word CapiMinor;
- word ManuMajor;
- word ManuMinor;
-};
-typedef struct api_profile_s {
- word Number;
- word Channels;
- dword Global_Options;
- dword B1_Protocols;
- dword B2_Protocols;
- dword B3_Protocols;
-} API_PROFILE;
-/* ISDN Common API message types */
-#define _ALERT_R 0x8001
-#define _CONNECT_R 0x8002
-#define _CONNECT_I 0x8202
-#define _CONNECT_ACTIVE_I 0x8203
-#define _DISCONNECT_R 0x8004
-#define _DISCONNECT_I 0x8204
-#define _LISTEN_R 0x8005
-#define _INFO_R 0x8008
-#define _INFO_I 0x8208
-#define _SELECT_B_REQ 0x8041
-#define _FACILITY_R 0x8080
-#define _FACILITY_I 0x8280
-#define _CONNECT_B3_R 0x8082
-#define _CONNECT_B3_I 0x8282
-#define _CONNECT_B3_ACTIVE_I 0x8283
-#define _DISCONNECT_B3_R 0x8084
-#define _DISCONNECT_B3_I 0x8284
-#define _DATA_B3_R 0x8086
-#define _DATA_B3_I 0x8286
-#define _RESET_B3_R 0x8087
-#define _RESET_B3_I 0x8287
-#define _CONNECT_B3_T90_ACTIVE_I 0x8288
-#define _MANUFACTURER_R 0x80ff
-#define _MANUFACTURER_I 0x82ff
-/* OR this to convert a REQUEST to a CONFIRM */
-#define CONFIRM 0x0100
-/* OR this to convert a INDICATION to a RESPONSE */
-#define RESPONSE 0x0100
-/*------------------------------------------------------------------*/
-/* diehl isdn private MANUFACTURER codes */
-/*------------------------------------------------------------------*/
-#define _DI_MANU_ID 0x44444944
-#define _DI_ASSIGN_PLCI 0x0001
-#define _DI_ADV_CODEC 0x0002
-#define _DI_DSP_CTRL 0x0003
-#define _DI_SIG_CTRL 0x0004
-#define _DI_RXT_CTRL 0x0005
-#define _DI_IDI_CTRL 0x0006
-#define _DI_CFG_CTRL 0x0007
-#define _DI_REMOVE_CODEC 0x0008
-#define _DI_OPTIONS_REQUEST 0x0009
-#define _DI_SSEXT_CTRL 0x000a
-#define _DI_NEGOTIATE_B3 0x000b
-/*------------------------------------------------------------------*/
-/* parameter structures */
-/*------------------------------------------------------------------*/
-/* ALERT-REQUEST */
-typedef struct {
- byte structs[0]; /* Additional Info */
-} _ALT_REQP;
-/* ALERT-CONFIRM */
-typedef struct {
- word Info;
-} _ALT_CONP;
-/* CONNECT-REQUEST */
-typedef struct {
- word CIP_Value;
- byte structs[0]; /* Called party number,
- Called party subaddress,
- Calling party number,
- Calling party subaddress,
- B_protocol,
- BC,
- LLC,
- HLC,
- Additional Info */
-} _CON_REQP;
-/* CONNECT-CONFIRM */
-typedef struct {
- word Info;
-} _CON_CONP;
-/* CONNECT-INDICATION */
-typedef struct {
- word CIP_Value;
- byte structs[0]; /* Called party number,
- Called party subaddress,
- Calling party number,
- Calling party subaddress,
- BC,
- LLC,
- HLC,
- Additional Info */
-} _CON_INDP;
-/* CONNECT-RESPONSE */
-typedef struct {
- word Accept;
- byte structs[0]; /* B_protocol,
- Connected party number,
- Connected party subaddress,
- LLC */
-} _CON_RESP;
-/* CONNECT-ACTIVE-INDICATION */
-typedef struct {
- byte structs[0]; /* Connected party number,
- Connected party subaddress,
- LLC */
-} _CON_A_INDP;
-/* CONNECT-ACTIVE-RESPONSE */
-typedef struct {
- byte structs[0]; /* empty */
-} _CON_A_RESP;
-/* DISCONNECT-REQUEST */
-typedef struct {
- byte structs[0]; /* Additional Info */
-} _DIS_REQP;
-/* DISCONNECT-CONFIRM */
-typedef struct {
- word Info;
-} _DIS_CONP;
-/* DISCONNECT-INDICATION */
-typedef struct {
- word Info;
-} _DIS_INDP;
-/* DISCONNECT-RESPONSE */
-typedef struct {
- byte structs[0]; /* empty */
-} _DIS_RESP;
-/* LISTEN-REQUEST */
-typedef struct {
- dword Info_Mask;
- dword CIP_Mask;
- byte structs[0]; /* Calling party number,
- Calling party subaddress */
-} _LIS_REQP;
-/* LISTEN-CONFIRM */
-typedef struct {
- word Info;
-} _LIS_CONP;
-/* INFO-REQUEST */
-typedef struct {
- byte structs[0]; /* Called party number,
- Additional Info */
-} _INF_REQP;
-/* INFO-CONFIRM */
-typedef struct {
- word Info;
-} _INF_CONP;
-/* INFO-INDICATION */
-typedef struct {
- word Number;
- byte structs[0]; /* Info element */
-} _INF_INDP;
-/* INFO-RESPONSE */
-typedef struct {
- byte structs[0]; /* empty */
-} _INF_RESP;
-/* SELECT-B-REQUEST */
-typedef struct {
- byte structs[0]; /* B-protocol */
-} _SEL_B_REQP;
-/* SELECT-B-CONFIRM */
-typedef struct {
- word Info;
-} _SEL_B_CONP;
-/* FACILITY-REQUEST */
-typedef struct {
- word Selector;
- byte structs[0]; /* Facility parameters */
-} _FAC_REQP;
-/* FACILITY-CONFIRM STRUCT FOR SUPPLEMENT. SERVICES */
-typedef struct {
- byte struct_length;
- word function;
- byte length;
- word SupplementaryServiceInfo;
- dword SupportedServices;
-} _FAC_CON_STRUCTS;
-/* FACILITY-CONFIRM */
-typedef struct {
- word Info;
- word Selector;
- byte structs[0]; /* Facility parameters */
-} _FAC_CONP;
-/* FACILITY-INDICATION */
-typedef struct {
- word Selector;
- byte structs[0]; /* Facility parameters */
-} _FAC_INDP;
-/* FACILITY-RESPONSE */
-typedef struct {
- word Selector;
- byte structs[0]; /* Facility parameters */
-} _FAC_RESP;
-/* CONNECT-B3-REQUEST */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _CON_B3_REQP;
-/* CONNECT-B3-CONFIRM */
-typedef struct {
- word Info;
-} _CON_B3_CONP;
-/* CONNECT-B3-INDICATION */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _CON_B3_INDP;
-/* CONNECT-B3-RESPONSE */
-typedef struct {
- word Accept;
- byte structs[0]; /* NCPI */
-} _CON_B3_RESP;
-/* CONNECT-B3-ACTIVE-INDICATION */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _CON_B3_A_INDP;
-/* CONNECT-B3-ACTIVE-RESPONSE */
-typedef struct {
- byte structs[0]; /* empty */
-} _CON_B3_A_RESP;
-/* DISCONNECT-B3-REQUEST */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _DIS_B3_REQP;
-/* DISCONNECT-B3-CONFIRM */
-typedef struct {
- word Info;
-} _DIS_B3_CONP;
-/* DISCONNECT-B3-INDICATION */
-typedef struct {
- word Info;
- byte structs[0]; /* NCPI */
-} _DIS_B3_INDP;
-/* DISCONNECT-B3-RESPONSE */
-typedef struct {
- byte structs[0]; /* empty */
-} _DIS_B3_RESP;
-/* DATA-B3-REQUEST */
-typedef struct {
- dword Data;
- word Data_Length;
- word Number;
- word Flags;
-} _DAT_B3_REQP;
-/* DATA-B3-REQUEST 64 BIT Systems */
-typedef struct {
- dword Data;
- word Data_Length;
- word Number;
- word Flags;
- void *pData;
-} _DAT_B3_REQ64P;
-/* DATA-B3-CONFIRM */
-typedef struct {
- word Number;
- word Info;
-} _DAT_B3_CONP;
-/* DATA-B3-INDICATION */
-typedef struct {
- dword Data;
- word Data_Length;
- word Number;
- word Flags;
-} _DAT_B3_INDP;
-/* DATA-B3-INDICATION 64 BIT Systems */
-typedef struct {
- dword Data;
- word Data_Length;
- word Number;
- word Flags;
- void *pData;
-} _DAT_B3_IND64P;
-/* DATA-B3-RESPONSE */
-typedef struct {
- word Number;
-} _DAT_B3_RESP;
-/* RESET-B3-REQUEST */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _RES_B3_REQP;
-/* RESET-B3-CONFIRM */
-typedef struct {
- word Info;
-} _RES_B3_CONP;
-/* RESET-B3-INDICATION */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _RES_B3_INDP;
-/* RESET-B3-RESPONSE */
-typedef struct {
- byte structs[0]; /* empty */
-} _RES_B3_RESP;
-/* CONNECT-B3-T90-ACTIVE-INDICATION */
-typedef struct {
- byte structs[0]; /* NCPI */
-} _CON_B3_T90_A_INDP;
-/* CONNECT-B3-T90-ACTIVE-RESPONSE */
-typedef struct {
- word Reject;
- byte structs[0]; /* NCPI */
-} _CON_B3_T90_A_RESP;
-/*------------------------------------------------------------------*/
-/* message structure */
-/*------------------------------------------------------------------*/
-typedef struct _API_MSG CAPI_MSG;
-typedef struct _MSG_HEADER CAPI_MSG_HEADER;
-struct _API_MSG {
- struct _MSG_HEADER {
- word length;
- word appl_id;
- word command;
- word number;
- byte controller;
- byte plci;
- word ncci;
- } header;
- union {
- _ALT_REQP alert_req;
- _ALT_CONP alert_con;
- _CON_REQP connect_req;
- _CON_CONP connect_con;
- _CON_INDP connect_ind;
- _CON_RESP connect_res;
- _CON_A_INDP connect_a_ind;
- _CON_A_RESP connect_a_res;
- _DIS_REQP disconnect_req;
- _DIS_CONP disconnect_con;
- _DIS_INDP disconnect_ind;
- _DIS_RESP disconnect_res;
- _LIS_REQP listen_req;
- _LIS_CONP listen_con;
- _INF_REQP info_req;
- _INF_CONP info_con;
- _INF_INDP info_ind;
- _INF_RESP info_res;
- _SEL_B_REQP select_b_req;
- _SEL_B_CONP select_b_con;
- _FAC_REQP facility_req;
- _FAC_CONP facility_con;
- _FAC_INDP facility_ind;
- _FAC_RESP facility_res;
- _CON_B3_REQP connect_b3_req;
- _CON_B3_CONP connect_b3_con;
- _CON_B3_INDP connect_b3_ind;
- _CON_B3_RESP connect_b3_res;
- _CON_B3_A_INDP connect_b3_a_ind;
- _CON_B3_A_RESP connect_b3_a_res;
- _DIS_B3_REQP disconnect_b3_req;
- _DIS_B3_CONP disconnect_b3_con;
- _DIS_B3_INDP disconnect_b3_ind;
- _DIS_B3_RESP disconnect_b3_res;
- _DAT_B3_REQP data_b3_req;
- _DAT_B3_REQ64P data_b3_req64;
- _DAT_B3_CONP data_b3_con;
- _DAT_B3_INDP data_b3_ind;
- _DAT_B3_IND64P data_b3_ind64;
- _DAT_B3_RESP data_b3_res;
- _RES_B3_REQP reset_b3_req;
- _RES_B3_CONP reset_b3_con;
- _RES_B3_INDP reset_b3_ind;
- _RES_B3_RESP reset_b3_res;
- _CON_B3_T90_A_INDP connect_b3_t90_a_ind;
- _CON_B3_T90_A_RESP connect_b3_t90_a_res;
- byte b[200];
- } info;
-};
-/*------------------------------------------------------------------*/
-/* non-fatal errors */
-/*------------------------------------------------------------------*/
-#define _NCPI_IGNORED 0x0001
-#define _FLAGS_IGNORED 0x0002
-#define _ALERT_IGNORED 0x0003
-/*------------------------------------------------------------------*/
-/* API function error codes */
-/*------------------------------------------------------------------*/
-#define GOOD 0x0000
-#define _TOO_MANY_APPLICATIONS 0x1001
-#define _BLOCK_TOO_SMALL 0x1002
-#define _BUFFER_TOO_BIG 0x1003
-#define _MSG_BUFFER_TOO_SMALL 0x1004
-#define _TOO_MANY_CONNECTIONS 0x1005
-#define _REG_CAPI_BUSY 0x1007
-#define _REG_RESOURCE_ERROR 0x1008
-#define _REG_CAPI_NOT_INSTALLED 0x1009
-#define _WRONG_APPL_ID 0x1101
-#define _BAD_MSG 0x1102
-#define _QUEUE_FULL 0x1103
-#define _GET_NO_MSG 0x1104
-#define _MSG_LOST 0x1105
-#define _WRONG_NOTIFY 0x1106
-#define _CAPI_BUSY 0x1107
-#define _RESOURCE_ERROR 0x1108
-#define _CAPI_NOT_INSTALLED 0x1109
-#define _NO_EXTERNAL_EQUIPMENT 0x110a
-#define _ONLY_EXTERNAL_EQUIPMENT 0x110b
-/*------------------------------------------------------------------*/
-/* addressing/coding error codes */
-/*------------------------------------------------------------------*/
-#define _WRONG_STATE 0x2001
-#define _WRONG_IDENTIFIER 0x2002
-#define _OUT_OF_PLCI 0x2003
-#define _OUT_OF_NCCI 0x2004
-#define _OUT_OF_LISTEN 0x2005
-#define _OUT_OF_FAX 0x2006
-#define _WRONG_MESSAGE_FORMAT 0x2007
-#define _OUT_OF_INTERCONNECT_RESOURCES 0x2008
-/*------------------------------------------------------------------*/
-/* configuration error codes */
-/*------------------------------------------------------------------*/
-#define _B1_NOT_SUPPORTED 0x3001
-#define _B2_NOT_SUPPORTED 0x3002
-#define _B3_NOT_SUPPORTED 0x3003
-#define _B1_PARM_NOT_SUPPORTED 0x3004
-#define _B2_PARM_NOT_SUPPORTED 0x3005
-#define _B3_PARM_NOT_SUPPORTED 0x3006
-#define _B_STACK_NOT_SUPPORTED 0x3007
-#define _NCPI_NOT_SUPPORTED 0x3008
-#define _CIP_NOT_SUPPORTED 0x3009
-#define _FLAGS_NOT_SUPPORTED 0x300a
-#define _FACILITY_NOT_SUPPORTED 0x300b
-#define _DATA_LEN_NOT_SUPPORTED 0x300c
-#define _RESET_NOT_SUPPORTED 0x300d
-#define _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED 0x300e
-#define _REQUEST_NOT_ALLOWED_IN_THIS_STATE 0x3010
-#define _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP 0x3011
-/*------------------------------------------------------------------*/
-/* reason codes */
-/*------------------------------------------------------------------*/
-#define _L1_ERROR 0x3301
-#define _L2_ERROR 0x3302
-#define _L3_ERROR 0x3303
-#define _OTHER_APPL_CONNECTED 0x3304
-#define _CAPI_GUARD_ERROR 0x3305
-#define _L3_CAUSE 0x3400
-/*------------------------------------------------------------------*/
-/* b3 reason codes */
-/*------------------------------------------------------------------*/
-#define _B_CHANNEL_LOST 0x3301
-#define _B2_ERROR 0x3302
-#define _B3_ERROR 0x3303
-/*------------------------------------------------------------------*/
-/* fax error codes */
-/*------------------------------------------------------------------*/
-#define _FAX_NO_CONNECTION 0x3311
-#define _FAX_TRAINING_ERROR 0x3312
-#define _FAX_REMOTE_REJECT 0x3313
-#define _FAX_REMOTE_ABORT 0x3314
-#define _FAX_PROTOCOL_ERROR 0x3315
-#define _FAX_TX_UNDERRUN 0x3316
-#define _FAX_RX_OVERFLOW 0x3317
-#define _FAX_LOCAL_ABORT 0x3318
-#define _FAX_PARAMETER_ERROR 0x3319
-/*------------------------------------------------------------------*/
-/* line interconnect error codes */
-/*------------------------------------------------------------------*/
-#define _LI_USER_INITIATED 0x0000
-#define _LI_LINE_NO_LONGER_AVAILABLE 0x3805
-#define _LI_INTERCONNECT_NOT_ESTABLISHED 0x3806
-#define _LI_LINES_NOT_COMPATIBLE 0x3807
-#define _LI2_USER_INITIATED 0x0000
-#define _LI2_PLCI_HAS_NO_BCHANNEL 0x3800
-#define _LI2_LINES_NOT_COMPATIBLE 0x3801
-#define _LI2_NOT_IN_SAME_INTERCONNECTION 0x3802
-/*------------------------------------------------------------------*/
-/* global options */
-/*------------------------------------------------------------------*/
-#define GL_INTERNAL_CONTROLLER_SUPPORTED 0x00000001L
-#define GL_EXTERNAL_EQUIPMENT_SUPPORTED 0x00000002L
-#define GL_HANDSET_SUPPORTED 0x00000004L
-#define GL_DTMF_SUPPORTED 0x00000008L
-#define GL_SUPPLEMENTARY_SERVICES_SUPPORTED 0x00000010L
-#define GL_CHANNEL_ALLOCATION_SUPPORTED 0x00000020L
-#define GL_BCHANNEL_OPERATION_SUPPORTED 0x00000040L
-#define GL_LINE_INTERCONNECT_SUPPORTED 0x00000080L
-#define GL_ECHO_CANCELLER_SUPPORTED 0x00000100L
-/*------------------------------------------------------------------*/
-/* protocol selection */
-/*------------------------------------------------------------------*/
-#define B1_HDLC 0
-#define B1_TRANSPARENT 1
-#define B1_V110_ASYNC 2
-#define B1_V110_SYNC 3
-#define B1_T30 4
-#define B1_HDLC_INVERTED 5
-#define B1_TRANSPARENT_R 6
-#define B1_MODEM_ALL_NEGOTIATE 7
-#define B1_MODEM_ASYNC 8
-#define B1_MODEM_SYNC_HDLC 9
-#define B2_X75 0
-#define B2_TRANSPARENT 1
-#define B2_SDLC 2
-#define B2_LAPD 3
-#define B2_T30 4
-#define B2_PPP 5
-#define B2_TRANSPARENT_NO_CRC 6
-#define B2_MODEM_EC_COMPRESSION 7
-#define B2_X75_V42BIS 8
-#define B2_V120_ASYNC 9
-#define B2_V120_ASYNC_V42BIS 10
-#define B2_V120_BIT_TRANSPARENT 11
-#define B2_LAPD_FREE_SAPI_SEL 12
-#define B3_TRANSPARENT 0
-#define B3_T90NL 1
-#define B3_ISO8208 2
-#define B3_X25_DCE 3
-#define B3_T30 4
-#define B3_T30_WITH_EXTENSIONS 5
-#define B3_RESERVED 6
-#define B3_MODEM 7
-/*------------------------------------------------------------------*/
-/* facility definitions */
-/*------------------------------------------------------------------*/
-#define SELECTOR_HANDSET 0
-#define SELECTOR_DTMF 1
-#define SELECTOR_V42BIS 2
-#define SELECTOR_SU_SERV 3
-#define SELECTOR_POWER_MANAGEMENT 4
-#define SELECTOR_LINE_INTERCONNECT 5
-#define SELECTOR_ECHO_CANCELLER 6
-/*------------------------------------------------------------------*/
-/* supplementary services definitions */
-/*------------------------------------------------------------------*/
-#define S_GET_SUPPORTED_SERVICES 0x0000
-#define S_LISTEN 0x0001
-#define S_HOLD 0x0002
-#define S_RETRIEVE 0x0003
-#define S_SUSPEND 0x0004
-#define S_RESUME 0x0005
-#define S_ECT 0x0006
-#define S_3PTY_BEGIN 0x0007
-#define S_3PTY_END 0x0008
-#define S_CALL_DEFLECTION 0x000d
-#define S_CALL_FORWARDING_START 0x0009
-#define S_CALL_FORWARDING_STOP 0x000a
-#define S_INTERROGATE_DIVERSION 0x000b /* or interrogate parameters */
-#define S_INTERROGATE_NUMBERS 0x000c
-#define S_CCBS_REQUEST 0x000f
-#define S_CCBS_DEACTIVATE 0x0010
-#define S_CCBS_INTERROGATE 0x0011
-#define S_CCBS_CALL 0x0012
-#define S_MWI_ACTIVATE 0x0013
-#define S_MWI_DEACTIVATE 0x0014
-#define S_CONF_BEGIN 0x0017
-#define S_CONF_ADD 0x0018
-#define S_CONF_SPLIT 0x0019
-#define S_CONF_DROP 0x001a
-#define S_CONF_ISOLATE 0x001b
-#define S_CONF_REATTACH 0x001c
-#define S_CCBS_ERASECALLLINKAGEID 0x800d
-#define S_CCBS_STOP_ALERTING 0x8012
-#define S_CCBS_INFO_RETAIN 0x8013
-#define S_MWI_INDICATE 0x8014
-#define S_CONF_PARTYDISC 0x8016
-#define S_CONF_NOTIFICATION 0x8017
-/* Service Masks */
-#define MASK_HOLD_RETRIEVE 0x00000001
-#define MASK_TERMINAL_PORTABILITY 0x00000002
-#define MASK_ECT 0x00000004
-#define MASK_3PTY 0x00000008
-#define MASK_CALL_FORWARDING 0x00000010
-#define MASK_CALL_DEFLECTION 0x00000020
-#define MASK_MWI 0x00000100
-#define MASK_CCNR 0x00000200
-#define MASK_CONF 0x00000400
-/*------------------------------------------------------------------*/
-/* dtmf definitions */
-/*------------------------------------------------------------------*/
-#define DTMF_LISTEN_START 1
-#define DTMF_LISTEN_STOP 2
-#define DTMF_DIGITS_SEND 3
-#define DTMF_SUCCESS 0
-#define DTMF_INCORRECT_DIGIT 1
-#define DTMF_UNKNOWN_REQUEST 2
-/*------------------------------------------------------------------*/
-/* line interconnect definitions */
-/*------------------------------------------------------------------*/
-#define LI_GET_SUPPORTED_SERVICES 0
-#define LI_REQ_CONNECT 1
-#define LI_REQ_DISCONNECT 2
-#define LI_IND_CONNECT_ACTIVE 1
-#define LI_IND_DISCONNECT 2
-#define LI_FLAG_CONFERENCE_A_B ((dword) 0x00000001L)
-#define LI_FLAG_CONFERENCE_B_A ((dword) 0x00000002L)
-#define LI_FLAG_MONITOR_A ((dword) 0x00000004L)
-#define LI_FLAG_MONITOR_B ((dword) 0x00000008L)
-#define LI_FLAG_ANNOUNCEMENT_A ((dword) 0x00000010L)
-#define LI_FLAG_ANNOUNCEMENT_B ((dword) 0x00000020L)
-#define LI_FLAG_MIX_A ((dword) 0x00000040L)
-#define LI_FLAG_MIX_B ((dword) 0x00000080L)
-#define LI_CONFERENCING_SUPPORTED ((dword) 0x00000001L)
-#define LI_MONITORING_SUPPORTED ((dword) 0x00000002L)
-#define LI_ANNOUNCEMENTS_SUPPORTED ((dword) 0x00000004L)
-#define LI_MIXING_SUPPORTED ((dword) 0x00000008L)
-#define LI_CROSS_CONTROLLER_SUPPORTED ((dword) 0x00000010L)
-#define LI2_GET_SUPPORTED_SERVICES 0
-#define LI2_REQ_CONNECT 1
-#define LI2_REQ_DISCONNECT 2
-#define LI2_IND_CONNECT_ACTIVE 1
-#define LI2_IND_DISCONNECT 2
-#define LI2_FLAG_INTERCONNECT_A_B ((dword) 0x00000001L)
-#define LI2_FLAG_INTERCONNECT_B_A ((dword) 0x00000002L)
-#define LI2_FLAG_MONITOR_B ((dword) 0x00000004L)
-#define LI2_FLAG_MIX_B ((dword) 0x00000008L)
-#define LI2_FLAG_MONITOR_X ((dword) 0x00000010L)
-#define LI2_FLAG_MIX_X ((dword) 0x00000020L)
-#define LI2_FLAG_LOOP_B ((dword) 0x00000040L)
-#define LI2_FLAG_LOOP_PC ((dword) 0x00000080L)
-#define LI2_FLAG_LOOP_X ((dword) 0x00000100L)
-#define LI2_CROSS_CONTROLLER_SUPPORTED ((dword) 0x00000001L)
-#define LI2_ASYMMETRIC_SUPPORTED ((dword) 0x00000002L)
-#define LI2_MONITORING_SUPPORTED ((dword) 0x00000004L)
-#define LI2_MIXING_SUPPORTED ((dword) 0x00000008L)
-#define LI2_REMOTE_MONITORING_SUPPORTED ((dword) 0x00000010L)
-#define LI2_REMOTE_MIXING_SUPPORTED ((dword) 0x00000020L)
-#define LI2_B_LOOPING_SUPPORTED ((dword) 0x00000040L)
-#define LI2_PC_LOOPING_SUPPORTED ((dword) 0x00000080L)
-#define LI2_X_LOOPING_SUPPORTED ((dword) 0x00000100L)
-/*------------------------------------------------------------------*/
-/* echo canceller definitions */
-/*------------------------------------------------------------------*/
-#define EC_GET_SUPPORTED_SERVICES 0
-#define EC_ENABLE_OPERATION 1
-#define EC_DISABLE_OPERATION 2
-#define EC_ENABLE_NON_LINEAR_PROCESSING 0x0001
-#define EC_DO_NOT_REQUIRE_REVERSALS 0x0002
-#define EC_DETECT_DISABLE_TONE 0x0004
-#define EC_ENABLE_ADAPTIVE_PREDELAY 0x0008
-#define EC_NON_LINEAR_PROCESSING_SUPPORTED 0x0001
-#define EC_BYPASS_ON_ANY_2100HZ_SUPPORTED 0x0002
-#define EC_BYPASS_ON_REV_2100HZ_SUPPORTED 0x0004
-#define EC_ADAPTIVE_PREDELAY_SUPPORTED 0x0008
-#define EC_BYPASS_INDICATION 1
-#define EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ 1
-#define EC_BYPASS_DUE_TO_REVERSED_2100HZ 2
-#define EC_BYPASS_RELEASED 3
-/*------------------------------------------------------------------*/
-/* function prototypes */
-/*------------------------------------------------------------------*/
-/*------------------------------------------------------------------*/
-#endif /* _INC_CAPI20 */
diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
deleted file mode 100644
index e3f778415199..000000000000
--- a/drivers/isdn/hardware/eicon/capidtmf.c
+++ /dev/null
@@ -1,685 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include "platform.h"
-
-
-
-
-
-
-
-
-
-#include "capidtmf.h"
-
-/* #define TRACE_ */
-
-#define FILE_ "CAPIDTMF.C"
-
-/*---------------------------------------------------------------------------*/
-
-
-#define trace(a)
-
-
-
-/*---------------------------------------------------------------------------*/
-
-static short capidtmf_expand_table_alaw[0x0100] =
-{
- -5504, 5504, -344, 344, -22016, 22016, -1376, 1376,
- -2752, 2752, -88, 88, -11008, 11008, -688, 688,
- -7552, 7552, -472, 472, -30208, 30208, -1888, 1888,
- -3776, 3776, -216, 216, -15104, 15104, -944, 944,
- -4480, 4480, -280, 280, -17920, 17920, -1120, 1120,
- -2240, 2240, -24, 24, -8960, 8960, -560, 560,
- -6528, 6528, -408, 408, -26112, 26112, -1632, 1632,
- -3264, 3264, -152, 152, -13056, 13056, -816, 816,
- -6016, 6016, -376, 376, -24064, 24064, -1504, 1504,
- -3008, 3008, -120, 120, -12032, 12032, -752, 752,
- -8064, 8064, -504, 504, -32256, 32256, -2016, 2016,
- -4032, 4032, -248, 248, -16128, 16128, -1008, 1008,
- -4992, 4992, -312, 312, -19968, 19968, -1248, 1248,
- -2496, 2496, -56, 56, -9984, 9984, -624, 624,
- -7040, 7040, -440, 440, -28160, 28160, -1760, 1760,
- -3520, 3520, -184, 184, -14080, 14080, -880, 880,
- -5248, 5248, -328, 328, -20992, 20992, -1312, 1312,
- -2624, 2624, -72, 72, -10496, 10496, -656, 656,
- -7296, 7296, -456, 456, -29184, 29184, -1824, 1824,
- -3648, 3648, -200, 200, -14592, 14592, -912, 912,
- -4224, 4224, -264, 264, -16896, 16896, -1056, 1056,
- -2112, 2112, -8, 8, -8448, 8448, -528, 528,
- -6272, 6272, -392, 392, -25088, 25088, -1568, 1568,
- -3136, 3136, -136, 136, -12544, 12544, -784, 784,
- -5760, 5760, -360, 360, -23040, 23040, -1440, 1440,
- -2880, 2880, -104, 104, -11520, 11520, -720, 720,
- -7808, 7808, -488, 488, -31232, 31232, -1952, 1952,
- -3904, 3904, -232, 232, -15616, 15616, -976, 976,
- -4736, 4736, -296, 296, -18944, 18944, -1184, 1184,
- -2368, 2368, -40, 40, -9472, 9472, -592, 592,
- -6784, 6784, -424, 424, -27136, 27136, -1696, 1696,
- -3392, 3392, -168, 168, -13568, 13568, -848, 848
-};
-
-static short capidtmf_expand_table_ulaw[0x0100] =
-{
- -32124, 32124, -1884, 1884, -7932, 7932, -372, 372,
- -15996, 15996, -876, 876, -3900, 3900, -120, 120,
- -23932, 23932, -1372, 1372, -5884, 5884, -244, 244,
- -11900, 11900, -620, 620, -2876, 2876, -56, 56,
- -28028, 28028, -1628, 1628, -6908, 6908, -308, 308,
- -13948, 13948, -748, 748, -3388, 3388, -88, 88,
- -19836, 19836, -1116, 1116, -4860, 4860, -180, 180,
- -9852, 9852, -492, 492, -2364, 2364, -24, 24,
- -30076, 30076, -1756, 1756, -7420, 7420, -340, 340,
- -14972, 14972, -812, 812, -3644, 3644, -104, 104,
- -21884, 21884, -1244, 1244, -5372, 5372, -212, 212,
- -10876, 10876, -556, 556, -2620, 2620, -40, 40,
- -25980, 25980, -1500, 1500, -6396, 6396, -276, 276,
- -12924, 12924, -684, 684, -3132, 3132, -72, 72,
- -17788, 17788, -988, 988, -4348, 4348, -148, 148,
- -8828, 8828, -428, 428, -2108, 2108, -8, 8,
- -31100, 31100, -1820, 1820, -7676, 7676, -356, 356,
- -15484, 15484, -844, 844, -3772, 3772, -112, 112,
- -22908, 22908, -1308, 1308, -5628, 5628, -228, 228,
- -11388, 11388, -588, 588, -2748, 2748, -48, 48,
- -27004, 27004, -1564, 1564, -6652, 6652, -292, 292,
- -13436, 13436, -716, 716, -3260, 3260, -80, 80,
- -18812, 18812, -1052, 1052, -4604, 4604, -164, 164,
- -9340, 9340, -460, 460, -2236, 2236, -16, 16,
- -29052, 29052, -1692, 1692, -7164, 7164, -324, 324,
- -14460, 14460, -780, 780, -3516, 3516, -96, 96,
- -20860, 20860, -1180, 1180, -5116, 5116, -196, 196,
- -10364, 10364, -524, 524, -2492, 2492, -32, 32,
- -24956, 24956, -1436, 1436, -6140, 6140, -260, 260,
- -12412, 12412, -652, 652, -3004, 3004, -64, 64,
- -16764, 16764, -924, 924, -4092, 4092, -132, 132,
- -8316, 8316, -396, 396, -1980, 1980, 0, 0
-};
-
-
-/*---------------------------------------------------------------------------*/
-
-static short capidtmf_recv_window_function[CAPIDTMF_RECV_ACCUMULATE_CYCLES] =
-{
- -500L, -999L, -1499L, -1998L, -2496L, -2994L, -3491L, -3988L,
- -4483L, -4978L, -5471L, -5963L, -6454L, -6943L, -7431L, -7917L,
- -8401L, -8883L, -9363L, -9840L, -10316L, -10789L, -11259L, -11727L,
- -12193L, -12655L, -13115L, -13571L, -14024L, -14474L, -14921L, -15364L,
- -15804L, -16240L, -16672L, -17100L, -17524L, -17944L, -18360L, -18772L,
- -19180L, -19583L, -19981L, -20375L, -20764L, -21148L, -21527L, -21901L,
- -22270L, -22634L, -22993L, -23346L, -23694L, -24037L, -24374L, -24705L,
- -25030L, -25350L, -25664L, -25971L, -26273L, -26568L, -26858L, -27141L,
- -27418L, -27688L, -27952L, -28210L, -28461L, -28705L, -28943L, -29174L,
- -29398L, -29615L, -29826L, -30029L, -30226L, -30415L, -30598L, -30773L,
- -30941L, -31102L, -31256L, -31402L, -31541L, -31673L, -31797L, -31914L,
- -32024L, -32126L, -32221L, -32308L, -32388L, -32460L, -32524L, -32581L,
- -32631L, -32673L, -32707L, -32734L, -32753L, -32764L, -32768L, -32764L,
- -32753L, -32734L, -32707L, -32673L, -32631L, -32581L, -32524L, -32460L,
- -32388L, -32308L, -32221L, -32126L, -32024L, -31914L, -31797L, -31673L,
- -31541L, -31402L, -31256L, -31102L, -30941L, -30773L, -30598L, -30415L,
- -30226L, -30029L, -29826L, -29615L, -29398L, -29174L, -28943L, -28705L,
- -28461L, -28210L, -27952L, -27688L, -27418L, -27141L, -26858L, -26568L,
- -26273L, -25971L, -25664L, -25350L, -25030L, -24705L, -24374L, -24037L,
- -23694L, -23346L, -22993L, -22634L, -22270L, -21901L, -21527L, -21148L,
- -20764L, -20375L, -19981L, -19583L, -19180L, -18772L, -18360L, -17944L,
- -17524L, -17100L, -16672L, -16240L, -15804L, -15364L, -14921L, -14474L,
- -14024L, -13571L, -13115L, -12655L, -12193L, -11727L, -11259L, -10789L,
- -10316L, -9840L, -9363L, -8883L, -8401L, -7917L, -7431L, -6943L,
- -6454L, -5963L, -5471L, -4978L, -4483L, -3988L, -3491L, -2994L,
- -2496L, -1998L, -1499L, -999L, -500L,
-};
-
-static byte capidtmf_leading_zeroes_table[0x100] =
-{
- 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-#define capidtmf_byte_leading_zeroes(b) (capidtmf_leading_zeroes_table[(BYTE)(b)])
-#define capidtmf_word_leading_zeroes(w) (((w) & 0xff00) ? capidtmf_leading_zeroes_table[(w) >> 8] : 8 + capidtmf_leading_zeroes_table[(w)])
-#define capidtmf_dword_leading_zeroes(d) (((d) & 0xffff0000L) ? (((d) & 0xff000000L) ? capidtmf_leading_zeroes_table[(d) >> 24] : 8 + capidtmf_leading_zeroes_table[(d) >> 16]) : (((d) & 0xff00) ? 16 + capidtmf_leading_zeroes_table[(d) >> 8] : 24 + capidtmf_leading_zeroes_table[(d)]))
-
-
-/*---------------------------------------------------------------------------*/
-
-
-static void capidtmf_goertzel_loop(long *buffer, long *coeffs, short *sample, long count)
-{
- int i, j;
- long c, d, q0, q1, q2;
-
- for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT - 1; i++)
- {
- q1 = buffer[i];
- q2 = buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
- d = coeffs[i] >> 1;
- c = d << 1;
- if (c >= 0)
- {
- for (j = 0; j < count; j++)
- {
- q0 = sample[j] - q2 + (c * (q1 >> 16)) + (((dword)(((dword) d) * ((dword)(q1 & 0xffff)))) >> 15);
- q2 = q1;
- q1 = q0;
- }
- }
- else
- {
- c = -c;
- d = -d;
- for (j = 0; j < count; j++)
- {
- q0 = sample[j] - q2 - ((c * (q1 >> 16)) + (((dword)(((dword) d) * ((dword)(q1 & 0xffff)))) >> 15));
- q2 = q1;
- q1 = q0;
- }
- }
- buffer[i] = q1;
- buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] = q2;
- }
- q1 = buffer[i];
- q2 = buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
- c = (coeffs[i] >> 1) << 1;
- if (c >= 0)
- {
- for (j = 0; j < count; j++)
- {
- q0 = sample[j] - q2 + (c * (q1 >> 16)) + (((dword)(((dword)(c >> 1)) * ((dword)(q1 & 0xffff)))) >> 15);
- q2 = q1;
- q1 = q0;
- c -= CAPIDTMF_RECV_FUNDAMENTAL_DECREMENT;
- }
- }
- else
- {
- c = -c;
- for (j = 0; j < count; j++)
- {
- q0 = sample[j] - q2 - ((c * (q1 >> 16)) + (((dword)(((dword)(c >> 1)) * ((dword)(q1 & 0xffff)))) >> 15));
- q2 = q1;
- q1 = q0;
- c += CAPIDTMF_RECV_FUNDAMENTAL_DECREMENT;
- }
- }
- coeffs[i] = c;
- buffer[i] = q1;
- buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] = q2;
-}
-
-
-static void capidtmf_goertzel_result(long *buffer, long *coeffs)
-{
- int i;
- long d, e, q1, q2, lo, mid, hi;
- dword k;
-
- for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
- {
- q1 = buffer[i];
- q2 = buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
- d = coeffs[i] >> 1;
- if (d >= 0)
- d = ((d << 1) * (-q1 >> 16)) + (((dword)(((dword) d) * ((dword)(-q1 & 0xffff)))) >> 15);
- else
- d = ((-d << 1) * (-q1 >> 16)) + (((dword)(((dword) -d) * ((dword)(-q1 & 0xffff)))) >> 15);
- e = (q2 >= 0) ? q2 : -q2;
- if (d >= 0)
- {
- k = ((dword)(d & 0xffff)) * ((dword)(e & 0xffff));
- lo = k & 0xffff;
- mid = k >> 16;
- k = ((dword)(d >> 16)) * ((dword)(e & 0xffff));
- mid += k & 0xffff;
- hi = k >> 16;
- k = ((dword)(d & 0xffff)) * ((dword)(e >> 16));
- mid += k & 0xffff;
- hi += k >> 16;
- hi += ((dword)(d >> 16)) * ((dword)(e >> 16));
- }
- else
- {
- d = -d;
- k = ((dword)(d & 0xffff)) * ((dword)(e & 0xffff));
- lo = -((long)(k & 0xffff));
- mid = -((long)(k >> 16));
- k = ((dword)(d >> 16)) * ((dword)(e & 0xffff));
- mid -= k & 0xffff;
- hi = -((long)(k >> 16));
- k = ((dword)(d & 0xffff)) * ((dword)(e >> 16));
- mid -= k & 0xffff;
- hi -= k >> 16;
- hi -= ((dword)(d >> 16)) * ((dword)(e >> 16));
- }
- if (q2 < 0)
- {
- lo = -lo;
- mid = -mid;
- hi = -hi;
- }
- d = (q1 >= 0) ? q1 : -q1;
- k = ((dword)(d & 0xffff)) * ((dword)(d & 0xffff));
- lo += k & 0xffff;
- mid += k >> 16;
- k = ((dword)(d >> 16)) * ((dword)(d & 0xffff));
- mid += (k & 0xffff) << 1;
- hi += (k >> 16) << 1;
- hi += ((dword)(d >> 16)) * ((dword)(d >> 16));
- d = (q2 >= 0) ? q2 : -q2;
- k = ((dword)(d & 0xffff)) * ((dword)(d & 0xffff));
- lo += k & 0xffff;
- mid += k >> 16;
- k = ((dword)(d >> 16)) * ((dword)(d & 0xffff));
- mid += (k & 0xffff) << 1;
- hi += (k >> 16) << 1;
- hi += ((dword)(d >> 16)) * ((dword)(d >> 16));
- mid += lo >> 16;
- hi += mid >> 16;
- buffer[i] = (lo & 0xffff) | (mid << 16);
- buffer[i + CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] = hi;
- }
-}
-
-
-/*---------------------------------------------------------------------------*/
-
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_697 0
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_770 1
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_852 2
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_941 3
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1209 4
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1336 5
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1477 6
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1633 7
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_635 8
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1010 9
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1140 10
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1272 11
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1405 12
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1555 13
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1715 14
-#define CAPIDTMF_RECV_GUARD_SNR_INDEX_1875 15
-
-#define CAPIDTMF_RECV_GUARD_SNR_DONTCARE 0xc000
-#define CAPIDTMF_RECV_NO_DIGIT 0xff
-#define CAPIDTMF_RECV_TIME_GRANULARITY (CAPIDTMF_RECV_ACCUMULATE_CYCLES + 1)
-
-#define CAPIDTMF_RECV_INDICATION_DIGIT 0x0001
-
-static long capidtmf_recv_goertzel_coef_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] =
-{
- 0xda97L * 2, /* 697 Hz (Low group 697 Hz) */
- 0xd299L * 2, /* 770 Hz (Low group 770 Hz) */
- 0xc8cbL * 2, /* 852 Hz (Low group 852 Hz) */
- 0xbd36L * 2, /* 941 Hz (Low group 941 Hz) */
- 0x9501L * 2, /* 1209 Hz (High group 1209 Hz) */
- 0x7f89L * 2, /* 1336 Hz (High group 1336 Hz) */
- 0x6639L * 2, /* 1477 Hz (High group 1477 Hz) */
- 0x48c6L * 2, /* 1633 Hz (High group 1633 Hz) */
- 0xe14cL * 2, /* 630 Hz (Lower guard of low group 631 Hz) */
- 0xb2e0L * 2, /* 1015 Hz (Upper guard of low group 1039 Hz) */
- 0xa1a0L * 2, /* 1130 Hz (Lower guard of high group 1140 Hz) */
- 0x8a87L * 2, /* 1272 Hz (Guard between 1209 Hz and 1336 Hz: 1271 Hz) */
- 0x7353L * 2, /* 1405 Hz (2nd harmonics of 697 Hz and guard between 1336 Hz and 1477 Hz: 1405 Hz) */
- 0x583bL * 2, /* 1552 Hz (2nd harmonics of 770 Hz and guard between 1477 Hz and 1633 Hz: 1553 Hz) */
- 0x37d8L * 2, /* 1720 Hz (2nd harmonics of 852 Hz and upper guard of high group: 1715 Hz) */
- 0x0000L * 2 /* 100-630 Hz (fundamentals) */
-};
-
-
-static word capidtmf_recv_guard_snr_low_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] =
-{
- 14, /* Low group peak versus 697 Hz */
- 14, /* Low group peak versus 770 Hz */
- 16, /* Low group peak versus 852 Hz */
- 16, /* Low group peak versus 941 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1209 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1336 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1477 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1633 Hz */
- 14, /* Low group peak versus 635 Hz */
- 16, /* Low group peak versus 1010 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1140 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* Low group peak versus 1272 Hz */
- DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 8, /* Low group peak versus 1405 Hz */
- DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 4, /* Low group peak versus 1555 Hz */
- DSPDTMF_RX_HARMONICS_SEL_DEFAULT - 4, /* Low group peak versus 1715 Hz */
- 12 /* Low group peak versus 100-630 Hz */
-};
-
-
-static word capidtmf_recv_guard_snr_high_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT] =
-{
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 697 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 770 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 852 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 941 Hz */
- 20, /* High group peak versus 1209 Hz */
- 20, /* High group peak versus 1336 Hz */
- 20, /* High group peak versus 1477 Hz */
- 20, /* High group peak versus 1633 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 635 Hz */
- CAPIDTMF_RECV_GUARD_SNR_DONTCARE, /* High group peak versus 1010 Hz */
- 16, /* High group peak versus 1140 Hz */
- 4, /* High group peak versus 1272 Hz */
- 6, /* High group peak versus 1405 Hz */
- 8, /* High group peak versus 1555 Hz */
- 16, /* High group peak versus 1715 Hz */
- 12 /* High group peak versus 100-630 Hz */
-};
-
-
-/*---------------------------------------------------------------------------*/
-
-static void capidtmf_recv_init(t_capidtmf_state *p_state)
-{
- p_state->recv.min_gap_duration = 1;
- p_state->recv.min_digit_duration = 1;
-
- p_state->recv.cycle_counter = 0;
- p_state->recv.current_digit_on_time = 0;
- p_state->recv.current_digit_off_time = 0;
- p_state->recv.current_digit_value = CAPIDTMF_RECV_NO_DIGIT;
-
- p_state->recv.digit_write_pos = 0;
- p_state->recv.digit_read_pos = 0;
- p_state->recv.indication_state = 0;
- p_state->recv.indication_state_ack = 0;
- p_state->recv.state = CAPIDTMF_RECV_STATE_IDLE;
-}
-
-
-void capidtmf_recv_enable(t_capidtmf_state *p_state, word min_digit_duration, word min_gap_duration)
-{
- p_state->recv.indication_state_ack &= CAPIDTMF_RECV_INDICATION_DIGIT;
- p_state->recv.min_digit_duration = (word)(((((dword) min_digit_duration) * 8) +
- ((dword)(CAPIDTMF_RECV_TIME_GRANULARITY / 2))) / ((dword) CAPIDTMF_RECV_TIME_GRANULARITY));
- if (p_state->recv.min_digit_duration <= 1)
- p_state->recv.min_digit_duration = 1;
- else
- (p_state->recv.min_digit_duration)--;
- p_state->recv.min_gap_duration =
- (word)((((dword) min_gap_duration) * 8) / ((dword) CAPIDTMF_RECV_TIME_GRANULARITY));
- if (p_state->recv.min_gap_duration <= 1)
- p_state->recv.min_gap_duration = 1;
- else
- (p_state->recv.min_gap_duration)--;
- p_state->recv.state |= CAPIDTMF_RECV_STATE_DTMF_ACTIVE;
-}
-
-
-void capidtmf_recv_disable(t_capidtmf_state *p_state)
-{
- p_state->recv.state &= ~CAPIDTMF_RECV_STATE_DTMF_ACTIVE;
- if (p_state->recv.state == CAPIDTMF_RECV_STATE_IDLE)
- capidtmf_recv_init(p_state);
- else
- {
- p_state->recv.cycle_counter = 0;
- p_state->recv.current_digit_on_time = 0;
- p_state->recv.current_digit_off_time = 0;
- p_state->recv.current_digit_value = CAPIDTMF_RECV_NO_DIGIT;
- }
-}
-
-
-word capidtmf_recv_indication(t_capidtmf_state *p_state, byte *buffer)
-{
- word i, j, k, flags;
-
- flags = p_state->recv.indication_state ^ p_state->recv.indication_state_ack;
- p_state->recv.indication_state_ack ^= flags & CAPIDTMF_RECV_INDICATION_DIGIT;
- if (p_state->recv.digit_write_pos != p_state->recv.digit_read_pos)
- {
- i = 0;
- k = p_state->recv.digit_write_pos;
- j = p_state->recv.digit_read_pos;
- do
- {
- buffer[i++] = p_state->recv.digit_buffer[j];
- j = (j == CAPIDTMF_RECV_DIGIT_BUFFER_SIZE - 1) ? 0 : j + 1;
- } while (j != k);
- p_state->recv.digit_read_pos = k;
- return (i);
- }
- p_state->recv.indication_state_ack ^= flags;
- return (0);
-}
-
-
-#define CAPIDTMF_RECV_WINDOWED_SAMPLES 32
-
-void capidtmf_recv_block(t_capidtmf_state *p_state, byte *buffer, word length)
-{
- byte result_digit;
- word sample_number, cycle_counter, n, i;
- word low_peak, high_peak;
- dword lo, hi;
- byte *p;
- short *q;
- byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
- short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
-
-
- if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
- {
- cycle_counter = p_state->recv.cycle_counter;
- sample_number = 0;
- while (sample_number < length)
- {
- if (cycle_counter < CAPIDTMF_RECV_ACCUMULATE_CYCLES)
- {
- if (cycle_counter == 0)
- {
- for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
- {
- p_state->recv.goertzel_buffer[0][i] = 0;
- p_state->recv.goertzel_buffer[1][i] = 0;
- }
- }
- n = CAPIDTMF_RECV_ACCUMULATE_CYCLES - cycle_counter;
- if (n > length - sample_number)
- n = length - sample_number;
- if (n > CAPIDTMF_RECV_WINDOWED_SAMPLES)
- n = CAPIDTMF_RECV_WINDOWED_SAMPLES;
- p = buffer + sample_number;
- q = capidtmf_recv_window_function + cycle_counter;
- if (p_state->ulaw)
- {
- for (i = 0; i < n; i++)
- {
- windowed_sample_buffer[i] =
- (short)((capidtmf_expand_table_ulaw[p[i]] * ((long)(q[i]))) >> 15);
- }
- }
- else
- {
- for (i = 0; i < n; i++)
- {
- windowed_sample_buffer[i] =
- (short)((capidtmf_expand_table_alaw[p[i]] * ((long)(q[i]))) >> 15);
- }
- }
- capidtmf_recv_goertzel_coef_table[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT - 1] = CAPIDTMF_RECV_FUNDAMENTAL_OFFSET;
- capidtmf_goertzel_loop(p_state->recv.goertzel_buffer[0],
- capidtmf_recv_goertzel_coef_table, windowed_sample_buffer, n);
- cycle_counter += n;
- sample_number += n;
- }
- else
- {
- capidtmf_goertzel_result(p_state->recv.goertzel_buffer[0],
- capidtmf_recv_goertzel_coef_table);
- for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
- {
- lo = (dword)(p_state->recv.goertzel_buffer[0][i]);
- hi = (dword)(p_state->recv.goertzel_buffer[1][i]);
- if (hi != 0)
- {
- n = capidtmf_dword_leading_zeroes(hi);
- hi = (hi << n) | (lo >> (32 - n));
- }
- else
- {
- n = capidtmf_dword_leading_zeroes(lo);
- hi = lo << n;
- n += 32;
- }
- n = 195 - 3 * n;
- if (hi >= 0xcb300000L)
- n += 2;
- else if (hi >= 0xa1450000L)
- n++;
- goertzel_result_buffer[i] = (byte) n;
- }
- low_peak = DSPDTMF_RX_SENSITIVITY_LOW_DEFAULT;
- result_digit = CAPIDTMF_RECV_NO_DIGIT;
- for (i = 0; i < CAPIDTMF_LOW_GROUP_FREQUENCIES; i++)
- {
- if (goertzel_result_buffer[i] > low_peak)
- {
- low_peak = goertzel_result_buffer[i];
- result_digit = (byte) i;
- }
- }
- high_peak = DSPDTMF_RX_SENSITIVITY_HIGH_DEFAULT;
- n = CAPIDTMF_RECV_NO_DIGIT;
- for (i = CAPIDTMF_LOW_GROUP_FREQUENCIES; i < CAPIDTMF_RECV_BASE_FREQUENCY_COUNT; i++)
- {
- if (goertzel_result_buffer[i] > high_peak)
- {
- high_peak = goertzel_result_buffer[i];
- n = (i - CAPIDTMF_LOW_GROUP_FREQUENCIES) << 2;
- }
- }
- result_digit |= (byte) n;
- if (low_peak + DSPDTMF_RX_HIGH_EXCEEDING_LOW_DEFAULT < high_peak)
- result_digit = CAPIDTMF_RECV_NO_DIGIT;
- if (high_peak + DSPDTMF_RX_LOW_EXCEEDING_HIGH_DEFAULT < low_peak)
- result_digit = CAPIDTMF_RECV_NO_DIGIT;
- n = 0;
- for (i = 0; i < CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT; i++)
- {
- if ((((short)(low_peak - goertzel_result_buffer[i] - capidtmf_recv_guard_snr_low_table[i])) < 0)
- || (((short)(high_peak - goertzel_result_buffer[i] - capidtmf_recv_guard_snr_high_table[i])) < 0))
- {
- n++;
- }
- }
- if (n != 2)
- result_digit = CAPIDTMF_RECV_NO_DIGIT;
-
- if (result_digit == CAPIDTMF_RECV_NO_DIGIT)
- {
- if (p_state->recv.current_digit_on_time != 0)
- {
- if (++(p_state->recv.current_digit_off_time) >= p_state->recv.min_gap_duration)
- {
- p_state->recv.current_digit_on_time = 0;
- p_state->recv.current_digit_off_time = 0;
- }
- }
- else
- {
- if (p_state->recv.current_digit_off_time != 0)
- (p_state->recv.current_digit_off_time)--;
- }
- }
- else
- {
- if ((p_state->recv.current_digit_on_time == 0)
- && (p_state->recv.current_digit_off_time != 0))
- {
- (p_state->recv.current_digit_off_time)--;
- }
- else
- {
- n = p_state->recv.current_digit_off_time;
- if ((p_state->recv.current_digit_on_time != 0)
- && (result_digit != p_state->recv.current_digit_value))
- {
- p_state->recv.current_digit_on_time = 0;
- n = 0;
- }
- p_state->recv.current_digit_value = result_digit;
- p_state->recv.current_digit_off_time = 0;
- if (p_state->recv.current_digit_on_time != 0xffff)
- {
- p_state->recv.current_digit_on_time += n + 1;
- if (p_state->recv.current_digit_on_time >= p_state->recv.min_digit_duration)
- {
- p_state->recv.current_digit_on_time = 0xffff;
- i = (p_state->recv.digit_write_pos == CAPIDTMF_RECV_DIGIT_BUFFER_SIZE - 1) ?
- 0 : p_state->recv.digit_write_pos + 1;
- if (i == p_state->recv.digit_read_pos)
- {
- trace(dprintf("%s,%d: Receive digit overrun",
- (char *)(FILE_), __LINE__));
- }
- else
- {
- p_state->recv.digit_buffer[p_state->recv.digit_write_pos] = result_digit;
- p_state->recv.digit_write_pos = i;
- p_state->recv.indication_state =
- (p_state->recv.indication_state & ~CAPIDTMF_RECV_INDICATION_DIGIT) |
- (~p_state->recv.indication_state_ack & CAPIDTMF_RECV_INDICATION_DIGIT);
- }
- }
- }
- }
- }
- cycle_counter = 0;
- sample_number++;
- }
- }
- p_state->recv.cycle_counter = cycle_counter;
- }
-}
-
-
-void capidtmf_init(t_capidtmf_state *p_state, byte ulaw)
-{
- p_state->ulaw = ulaw;
- capidtmf_recv_init(p_state);
-}
-
-
-/*---------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/capidtmf.h b/drivers/isdn/hardware/eicon/capidtmf.h
deleted file mode 100644
index 0a9cf59bb224..000000000000
--- a/drivers/isdn/hardware/eicon/capidtmf.h
+++ /dev/null
@@ -1,79 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef CAPIDTMF_H_
-#define CAPIDTMF_H_
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-#define CAPIDTMF_TONE_GROUP_COUNT 2
-#define CAPIDTMF_LOW_GROUP_FREQUENCIES 4
-#define CAPIDTMF_HIGH_GROUP_FREQUENCIES 4
-#define DSPDTMF_RX_SENSITIVITY_LOW_DEFAULT 50 /* -52 dBm */
-#define DSPDTMF_RX_SENSITIVITY_HIGH_DEFAULT 50 /* -52 dBm */
-#define DSPDTMF_RX_HIGH_EXCEEDING_LOW_DEFAULT 10 /* dB */
-#define DSPDTMF_RX_LOW_EXCEEDING_HIGH_DEFAULT 10 /* dB */
-#define DSPDTMF_RX_HARMONICS_SEL_DEFAULT 12 /* dB */
-#define CAPIDTMF_RECV_BASE_FREQUENCY_COUNT (CAPIDTMF_LOW_GROUP_FREQUENCIES + CAPIDTMF_HIGH_GROUP_FREQUENCIES)
-#define CAPIDTMF_RECV_GUARD_FREQUENCY_COUNT 8
-#define CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT (CAPIDTMF_RECV_BASE_FREQUENCY_COUNT + CAPIDTMF_RECV_GUARD_FREQUENCY_COUNT)
-#define CAPIDTMF_RECV_POSITIVE_COEFF_COUNT 16
-#define CAPIDTMF_RECV_NEGATIVE_COEFF_COUNT (CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT - CAPIDTMF_RECV_POSITIVE_COEFF_COUNT)
-#define CAPIDTMF_RECV_ACCUMULATE_CYCLES 205
-#define CAPIDTMF_RECV_FUNDAMENTAL_OFFSET (0xff35L * 2)
-#define CAPIDTMF_RECV_FUNDAMENTAL_DECREMENT (0x0028L * 2)
-#define CAPIDTMF_RECV_DIGIT_BUFFER_SIZE 32
-#define CAPIDTMF_RECV_STATE_IDLE 0x00
-#define CAPIDTMF_RECV_STATE_DTMF_ACTIVE 0x01
-typedef struct tag_capidtmf_recv_state
-{
- byte digit_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE];
- word digit_write_pos;
- word digit_read_pos;
- word indication_state;
- word indication_state_ack;
- long goertzel_buffer[2][CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
- word min_gap_duration;
- word min_digit_duration;
- word cycle_counter;
- word current_digit_on_time;
- word current_digit_off_time;
- byte current_digit_value;
- byte state;
-} t_capidtmf_recv_state;
-typedef struct tag_capidtmf_state
-{
- byte ulaw;
- t_capidtmf_recv_state recv;
-} t_capidtmf_state;
-word capidtmf_recv_indication(t_capidtmf_state *p_state, byte *buffer);
-void capidtmf_recv_block(t_capidtmf_state *p_state, byte *buffer, word length);
-void capidtmf_init(t_capidtmf_state *p_state, byte ulaw);
-void capidtmf_recv_enable(t_capidtmf_state *p_state, word min_digit_duration, word min_gap_duration);
-void capidtmf_recv_disable(t_capidtmf_state *p_state);
-#define capidtmf_indication(p_state, buffer) (((p_state)->recv.indication_state != (p_state)->recv.indication_state_ack) ? capidtmf_recv_indication(p_state, buffer) : 0)
-#define capidtmf_recv_process_block(p_state, buffer, length) { if ((p_state)->recv.state != CAPIDTMF_RECV_STATE_IDLE) capidtmf_recv_block(p_state, buffer, length); }
-/*---------------------------------------------------------------------------*/
-/*---------------------------------------------------------------------------*/
-#endif
diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
deleted file mode 100644
index 7a0bdbdd87ea..000000000000
--- a/drivers/isdn/hardware/eicon/capifunc.c
+++ /dev/null
@@ -1,1219 +0,0 @@
-/* $Id: capifunc.c,v 1.61.4.7 2005/02/11 19:40:25 armin Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface common functions
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#include "platform.h"
-#include "os_capi.h"
-#include "di_defs.h"
-#include "capi20.h"
-#include "divacapi.h"
-#include "divasync.h"
-#include "capifunc.h"
-
-#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-DIVA_CAPI_ADAPTER *adapter = (DIVA_CAPI_ADAPTER *) NULL;
-APPL *application = (APPL *) NULL;
-byte max_appl = MAX_APPL;
-byte max_adapter = 0;
-static CAPI_MSG *mapped_msg = (CAPI_MSG *) NULL;
-
-byte UnMapController(byte);
-char DRIVERRELEASE_CAPI[32];
-
-extern void AutomaticLaw(DIVA_CAPI_ADAPTER *);
-extern void callback(ENTITY *);
-extern word api_remove_start(void);
-extern word CapiRelease(word);
-extern word CapiRegister(word);
-extern word api_put(APPL *, CAPI_MSG *);
-
-static diva_os_spin_lock_t api_lock;
-
-static LIST_HEAD(cards);
-
-static dword notify_handle;
-static void DIRequest(ENTITY *e);
-static DESCRIPTOR MAdapter;
-static DESCRIPTOR DAdapter;
-static byte ControllerMap[MAX_DESCRIPTORS + 1];
-
-
-static void diva_register_appl(struct capi_ctr *, __u16,
- capi_register_params *);
-static void diva_release_appl(struct capi_ctr *, __u16);
-static char *diva_procinfo(struct capi_ctr *);
-static u16 diva_send_message(struct capi_ctr *,
- diva_os_message_buffer_s *);
-extern void diva_os_set_controller_struct(struct capi_ctr *);
-
-extern void DIVA_DIDD_Read(DESCRIPTOR *, int);
-
-/*
- * debug
- */
-static void no_printf(unsigned char *, ...);
-#include "debuglib.c"
-static void xlog(char *x, ...)
-{
-#ifndef DIVA_NO_DEBUGLIB
- va_list ap;
- if (myDriverDebugHandle.dbgMask & DL_XLOG) {
- va_start(ap, x);
- if (myDriverDebugHandle.dbg_irq) {
- myDriverDebugHandle.dbg_irq(myDriverDebugHandle.id,
- DLI_XLOG, x, ap);
- } else if (myDriverDebugHandle.dbg_old) {
- myDriverDebugHandle.dbg_old(myDriverDebugHandle.id,
- x, ap);
- }
- va_end(ap);
- }
-#endif
-}
-
-/*
- * info for proc
- */
-static char *diva_procinfo(struct capi_ctr *ctrl)
-{
- return (ctrl->serial);
-}
-
-/*
- * stop debugging
- */
-static void stop_dbg(void)
-{
- DbgDeregister();
- memset(&MAdapter, 0, sizeof(MAdapter));
- dprintf = no_printf;
-}
-
-/*
- * dummy debug function
- */
-static void no_printf(unsigned char *x, ...)
-{
-}
-
-/*
- * Controller mapping
- */
-byte MapController(byte Controller)
-{
- byte i;
- byte MappedController = 0;
- byte ctrl = Controller & 0x7f; /* mask external controller bit off */
-
- for (i = 1; i < max_adapter + 1; i++) {
- if (ctrl == ControllerMap[i]) {
- MappedController = (byte) i;
- break;
- }
- }
- if (i > max_adapter) {
- ControllerMap[0] = ctrl;
- MappedController = 0;
- }
- return (MappedController | (Controller & 0x80)); /* put back external controller bit */
-}
-
-/*
- * Controller unmapping
- */
-byte UnMapController(byte MappedController)
-{
- byte Controller;
- byte ctrl = MappedController & 0x7f; /* mask external controller bit off */
-
- if (ctrl <= max_adapter) {
- Controller = ControllerMap[ctrl];
- } else {
- Controller = 0;
- }
-
- return (Controller | (MappedController & 0x80)); /* put back external controller bit */
-}
-
-/*
- * find a new free id
- */
-static int find_free_id(void)
-{
- int num = 0;
- DIVA_CAPI_ADAPTER *a;
-
- while (num < MAX_DESCRIPTORS) {
- a = &adapter[num];
- if (!a->Id)
- break;
- num++;
- }
- return (num + 1);
-}
-
-/*
- * find a card structure by controller number
- */
-static diva_card *find_card_by_ctrl(word controller)
-{
- struct list_head *tmp;
- diva_card *card;
-
- list_for_each(tmp, &cards) {
- card = list_entry(tmp, diva_card, list);
- if (ControllerMap[card->Id] == controller) {
- if (card->remove_in_progress)
- card = NULL;
- return (card);
- }
- }
- return (diva_card *) 0;
-}
-
-/*
- * Buffer RX/TX
- */
-void *TransmitBufferSet(APPL *appl, dword ref)
-{
- appl->xbuffer_used[ref] = true;
- DBG_PRV1(("%d:xbuf_used(%d)", appl->Id, ref + 1))
- return (void *)(long)ref;
-}
-
-void *TransmitBufferGet(APPL *appl, void *p)
-{
- if (appl->xbuffer_internal[(dword)(long)p])
- return appl->xbuffer_internal[(dword)(long)p];
-
- return appl->xbuffer_ptr[(dword)(long)p];
-}
-
-void TransmitBufferFree(APPL *appl, void *p)
-{
- appl->xbuffer_used[(dword)(long)p] = false;
- DBG_PRV1(("%d:xbuf_free(%d)", appl->Id, ((dword)(long)p) + 1))
- }
-
-void *ReceiveBufferGet(APPL *appl, int Num)
-{
- return &appl->ReceiveBuffer[Num * appl->MaxDataLength];
-}
-
-/*
- * api_remove_start/complete for cleanup
- */
-void api_remove_complete(void)
-{
- DBG_PRV1(("api_remove_complete"))
- }
-
-/*
- * main function called by message.c
- */
-void sendf(APPL *appl, word command, dword Id, word Number, byte *format, ...)
-{
- word i, j;
- word length = 12, dlength = 0;
- byte *write;
- CAPI_MSG msg;
- byte *string = NULL;
- va_list ap;
- diva_os_message_buffer_s *dmb;
- diva_card *card = NULL;
- dword tmp;
-
- if (!appl)
- return;
-
- DBG_PRV1(("sendf(a=%d,cmd=%x,format=%s)",
- appl->Id, command, (byte *) format))
-
- PUT_WORD(&msg.header.appl_id, appl->Id);
- PUT_WORD(&msg.header.command, command);
- if ((byte) (command >> 8) == 0x82)
- Number = appl->Number++;
- PUT_WORD(&msg.header.number, Number);
-
- PUT_DWORD(&msg.header.controller, Id);
- write = (byte *)&msg;
- write += 12;
-
- va_start(ap, format);
- for (i = 0; format[i]; i++) {
- switch (format[i]) {
- case 'b':
- tmp = va_arg(ap, dword);
- *(byte *) write = (byte) (tmp & 0xff);
- write += 1;
- length += 1;
- break;
- case 'w':
- tmp = va_arg(ap, dword);
- PUT_WORD(write, (tmp & 0xffff));
- write += 2;
- length += 2;
- break;
- case 'd':
- tmp = va_arg(ap, dword);
- PUT_DWORD(write, tmp);
- write += 4;
- length += 4;
- break;
- case 's':
- case 'S':
- string = va_arg(ap, byte *);
- length += string[0] + 1;
- for (j = 0; j <= string[0]; j++)
- *write++ = string[j];
- break;
- }
- }
- va_end(ap);
-
- PUT_WORD(&msg.header.length, length);
- msg.header.controller = UnMapController(msg.header.controller);
-
- if (command == _DATA_B3_I)
- dlength = GET_WORD(
- ((byte *)&msg.info.data_b3_ind.Data_Length));
-
- if (!(dmb = diva_os_alloc_message_buffer(length + dlength,
- (void **) &write))) {
- DBG_ERR(("sendf: alloc_message_buffer failed, incoming msg dropped."))
- return;
- }
-
- /* copy msg header to sk_buff */
- memcpy(write, (byte *)&msg, length);
-
- /* if DATA_B3_IND, copy data too */
- if (command == _DATA_B3_I) {
- dword data = GET_DWORD(&msg.info.data_b3_ind.Data);
- memcpy(write + length, (void *)(long)data, dlength);
- }
-
-#ifndef DIVA_NO_DEBUGLIB
- if (myDriverDebugHandle.dbgMask & DL_XLOG) {
- switch (command) {
- default:
- xlog("\x00\x02", &msg, 0x81, length);
- break;
- case _DATA_B3_R | CONFIRM:
- if (myDriverDebugHandle.dbgMask & DL_BLK)
- xlog("\x00\x02", &msg, 0x81, length);
- break;
- case _DATA_B3_I:
- if (myDriverDebugHandle.dbgMask & DL_BLK) {
- xlog("\x00\x02", &msg, 0x81, length);
- for (i = 0; i < dlength; i += 256) {
- DBG_BLK((((char *)(long)GET_DWORD(&msg.info.data_b3_ind.Data)) + i,
- ((dlength - i) < 256) ? (dlength - i) : 256))
- if (!(myDriverDebugHandle.dbgMask & DL_PRV0))
- break; /* not more if not explicitly requested */
- }
- }
- break;
- }
- }
-#endif
-
- /* find the card structure for this controller */
- if (!(card = find_card_by_ctrl(write[8] & 0x7f))) {
- DBG_ERR(("sendf - controller %d not found, incoming msg dropped",
- write[8] & 0x7f))
- diva_os_free_message_buffer(dmb);
- return;
- }
- /* send capi msg to capi layer */
- capi_ctr_handle_message(&card->capi_ctrl, appl->Id, dmb);
-}
-
-/*
- * cleanup adapter
- */
-static void clean_adapter(int id, struct list_head *free_mem_q)
-{
- DIVA_CAPI_ADAPTER *a;
- int i, k;
-
- a = &adapter[id];
- k = li_total_channels - a->li_channels;
- if (k == 0) {
- if (li_config_table) {
- list_add((struct list_head *)li_config_table, free_mem_q);
- li_config_table = NULL;
- }
- } else {
- if (a->li_base < k) {
- memmove(&li_config_table[a->li_base],
- &li_config_table[a->li_base + a->li_channels],
- (k - a->li_base) * sizeof(LI_CONFIG));
- for (i = 0; i < k; i++) {
- memmove(&li_config_table[i].flag_table[a->li_base],
- &li_config_table[i].flag_table[a->li_base + a->li_channels],
- k - a->li_base);
- memmove(&li_config_table[i].
- coef_table[a->li_base],
- &li_config_table[i].coef_table[a->li_base + a->li_channels],
- k - a->li_base);
- }
- }
- }
- li_total_channels = k;
- for (i = id; i < max_adapter; i++) {
- if (adapter[i].request)
- adapter[i].li_base -= a->li_channels;
- }
- if (a->plci)
- list_add((struct list_head *)a->plci, free_mem_q);
-
- memset(a, 0x00, sizeof(DIVA_CAPI_ADAPTER));
- while ((max_adapter != 0) && !adapter[max_adapter - 1].request)
- max_adapter--;
-}
-
-/*
- * remove a card, but ensures consistent state of LI tables
- * in the time adapter is removed
- */
-static void divacapi_remove_card(DESCRIPTOR *d)
-{
- diva_card *card = NULL;
- diva_os_spin_lock_magic_t old_irql;
- LIST_HEAD(free_mem_q);
- struct list_head *link;
- struct list_head *tmp;
-
- /*
- * Set "remove in progress flag".
- * Ensures that there is no call from sendf to CAPI in
- * the time CAPI controller is about to be removed.
- */
- diva_os_enter_spin_lock(&api_lock, &old_irql, "remove card");
- list_for_each(tmp, &cards) {
- card = list_entry(tmp, diva_card, list);
- if (card->d.request == d->request) {
- card->remove_in_progress = 1;
- list_del(tmp);
- break;
- }
- }
- diva_os_leave_spin_lock(&api_lock, &old_irql, "remove card");
-
- if (card) {
- /*
- * Detach CAPI. Sendf cannot call to CAPI any more.
- * After detach no call to send_message() is done too.
- */
- detach_capi_ctr(&card->capi_ctrl);
-
- /*
- * Now get API lock (to ensure stable state of LI tables)
- * and update the adapter map/LI table.
- */
- diva_os_enter_spin_lock(&api_lock, &old_irql, "remove card");
-
- clean_adapter(card->Id - 1, &free_mem_q);
- DBG_TRC(("DelAdapterMap (%d) -> (%d)",
- ControllerMap[card->Id], card->Id))
- ControllerMap[card->Id] = 0;
- DBG_TRC(("adapter remove, max_adapter=%d",
- max_adapter));
- diva_os_leave_spin_lock(&api_lock, &old_irql, "remove card");
-
- /* After releasing the lock, we can free the memory */
- diva_os_free(0, card);
- }
-
- /* free queued memory areas */
- list_for_each_safe(link, tmp, &free_mem_q) {
- list_del(link);
- diva_os_free(0, link);
- }
-}
-
-/*
- * remove cards
- */
-static void divacapi_remove_cards(void)
-{
- DESCRIPTOR d;
- struct list_head *tmp;
- diva_card *card;
- diva_os_spin_lock_magic_t old_irql;
-
-rescan:
- diva_os_enter_spin_lock(&api_lock, &old_irql, "remove cards");
- list_for_each(tmp, &cards) {
- card = list_entry(tmp, diva_card, list);
- diva_os_leave_spin_lock(&api_lock, &old_irql, "remove cards");
- d.request = card->d.request;
- divacapi_remove_card(&d);
- goto rescan;
- }
- diva_os_leave_spin_lock(&api_lock, &old_irql, "remove cards");
-}
-
-/*
- * sync_callback
- */
-static void sync_callback(ENTITY *e)
-{
- diva_os_spin_lock_magic_t old_irql;
-
- DBG_TRC(("cb:Id=%x,Rc=%x,Ind=%x", e->Id, e->Rc, e->Ind))
-
- diva_os_enter_spin_lock(&api_lock, &old_irql, "sync_callback");
- callback(e);
- diva_os_leave_spin_lock(&api_lock, &old_irql, "sync_callback");
-}
-
-/*
- * add a new card
- */
-static int diva_add_card(DESCRIPTOR *d)
-{
- int k = 0, i = 0;
- diva_os_spin_lock_magic_t old_irql;
- diva_card *card = NULL;
- struct capi_ctr *ctrl = NULL;
- DIVA_CAPI_ADAPTER *a = NULL;
- IDI_SYNC_REQ sync_req;
- char serial[16];
- void *mem_to_free;
- LI_CONFIG *new_li_config_table;
- int j;
-
- if (!(card = (diva_card *) diva_os_malloc(0, sizeof(diva_card)))) {
- DBG_ERR(("diva_add_card: failed to allocate card struct."))
- return (0);
- }
- memset((char *) card, 0x00, sizeof(diva_card));
- memcpy(&card->d, d, sizeof(DESCRIPTOR));
- sync_req.GetName.Req = 0;
- sync_req.GetName.Rc = IDI_SYNC_REQ_GET_NAME;
- card->d.request((ENTITY *)&sync_req);
- strlcpy(card->name, sync_req.GetName.name, sizeof(card->name));
- ctrl = &card->capi_ctrl;
- strcpy(ctrl->name, card->name);
- ctrl->register_appl = diva_register_appl;
- ctrl->release_appl = diva_release_appl;
- ctrl->send_message = diva_send_message;
- ctrl->procinfo = diva_procinfo;
- ctrl->driverdata = card;
- diva_os_set_controller_struct(ctrl);
-
- if (attach_capi_ctr(ctrl)) {
- DBG_ERR(("diva_add_card: failed to attach controller."))
- diva_os_free(0, card);
- return (0);
- }
-
- diva_os_enter_spin_lock(&api_lock, &old_irql, "find id");
- card->Id = find_free_id();
- diva_os_leave_spin_lock(&api_lock, &old_irql, "find id");
-
- strlcpy(ctrl->manu, M_COMPANY, sizeof(ctrl->manu));
- ctrl->version.majorversion = 2;
- ctrl->version.minorversion = 0;
- ctrl->version.majormanuversion = DRRELMAJOR;
- ctrl->version.minormanuversion = DRRELMINOR;
- sync_req.GetSerial.Req = 0;
- sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL;
- sync_req.GetSerial.serial = 0;
- card->d.request((ENTITY *)&sync_req);
- if ((i = ((sync_req.GetSerial.serial & 0xff000000) >> 24))) {
- sprintf(serial, "%ld-%d",
- sync_req.GetSerial.serial & 0x00ffffff, i + 1);
- } else {
- sprintf(serial, "%ld", sync_req.GetSerial.serial);
- }
- serial[CAPI_SERIAL_LEN - 1] = 0;
- strlcpy(ctrl->serial, serial, sizeof(ctrl->serial));
-
- a = &adapter[card->Id - 1];
- card->adapter = a;
- a->os_card = card;
- ControllerMap[card->Id] = (byte) (ctrl->cnr);
-
- DBG_TRC(("AddAdapterMap (%d) -> (%d)", ctrl->cnr, card->Id))
-
- sync_req.xdi_capi_prms.Req = 0;
- sync_req.xdi_capi_prms.Rc = IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS;
- sync_req.xdi_capi_prms.info.structure_length =
- sizeof(diva_xdi_get_capi_parameters_t);
- card->d.request((ENTITY *)&sync_req);
- a->flag_dynamic_l1_down =
- sync_req.xdi_capi_prms.info.flag_dynamic_l1_down;
- a->group_optimization_enabled =
- sync_req.xdi_capi_prms.info.group_optimization_enabled;
- a->request = DIRequest; /* card->d.request; */
- a->max_plci = card->d.channels + 30;
- a->max_listen = (card->d.channels > 2) ? 8 : 2;
- if (!
- (a->plci =
- (PLCI *) diva_os_malloc(0, sizeof(PLCI) * a->max_plci))) {
- DBG_ERR(("diva_add_card: failed alloc plci struct."))
- memset(a, 0, sizeof(DIVA_CAPI_ADAPTER));
- return (0);
- }
- memset(a->plci, 0, sizeof(PLCI) * a->max_plci);
-
- for (k = 0; k < a->max_plci; k++) {
- a->Id = (byte) card->Id;
- a->plci[k].Sig.callback = sync_callback;
- a->plci[k].Sig.XNum = 1;
- a->plci[k].Sig.X = a->plci[k].XData;
- a->plci[k].Sig.user[0] = (word) (card->Id - 1);
- a->plci[k].Sig.user[1] = (word) k;
- a->plci[k].NL.callback = sync_callback;
- a->plci[k].NL.XNum = 1;
- a->plci[k].NL.X = a->plci[k].XData;
- a->plci[k].NL.user[0] = (word) ((card->Id - 1) | 0x8000);
- a->plci[k].NL.user[1] = (word) k;
- a->plci[k].adapter = a;
- }
-
- a->profile.Number = card->Id;
- a->profile.Channels = card->d.channels;
- if (card->d.features & DI_FAX3) {
- a->profile.Global_Options = 0x71;
- if (card->d.features & DI_CODEC)
- a->profile.Global_Options |= 0x6;
-#if IMPLEMENT_DTMF
- a->profile.Global_Options |= 0x8;
-#endif /* IMPLEMENT_DTMF */
- a->profile.Global_Options |= 0x80; /* Line Interconnect */
-#if IMPLEMENT_ECHO_CANCELLER
- a->profile.Global_Options |= 0x100;
-#endif /* IMPLEMENT_ECHO_CANCELLER */
- a->profile.B1_Protocols = 0xdf;
- a->profile.B2_Protocols = 0x1fdb;
- a->profile.B3_Protocols = 0xb7;
- a->manufacturer_features = MANUFACTURER_FEATURE_HARDDTMF;
- } else {
- a->profile.Global_Options = 0x71;
- if (card->d.features & DI_CODEC)
- a->profile.Global_Options |= 0x2;
- a->profile.B1_Protocols = 0x43;
- a->profile.B2_Protocols = 0x1f0f;
- a->profile.B3_Protocols = 0x07;
- a->manufacturer_features = 0;
- }
-
- a->li_pri = (a->profile.Channels > 2);
- a->li_channels = a->li_pri ? MIXER_CHANNELS_PRI : MIXER_CHANNELS_BRI;
- a->li_base = 0;
- for (i = 0; &adapter[i] != a; i++) {
- if (adapter[i].request)
- a->li_base = adapter[i].li_base + adapter[i].li_channels;
- }
- k = li_total_channels + a->li_channels;
- new_li_config_table =
- (LI_CONFIG *) diva_os_malloc(0, ((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * k) * ((k + 3) & ~3));
- if (new_li_config_table == NULL) {
- DBG_ERR(("diva_add_card: failed alloc li_config table."))
- memset(a, 0, sizeof(DIVA_CAPI_ADAPTER));
- return (0);
- }
-
- /* Prevent access to line interconnect table in process update */
- diva_os_enter_spin_lock(&api_lock, &old_irql, "add card");
-
- j = 0;
- for (i = 0; i < k; i++) {
- if ((i >= a->li_base) && (i < a->li_base + a->li_channels))
- memset(&new_li_config_table[i], 0, sizeof(LI_CONFIG));
- else
- memcpy(&new_li_config_table[i], &li_config_table[j], sizeof(LI_CONFIG));
- new_li_config_table[i].flag_table =
- ((byte *) new_li_config_table) + (((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * i) * ((k + 3) & ~3));
- new_li_config_table[i].coef_table =
- ((byte *) new_li_config_table) + (((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * i + 1) * ((k + 3) & ~3));
- if ((i >= a->li_base) && (i < a->li_base + a->li_channels)) {
- new_li_config_table[i].adapter = a;
- memset(&new_li_config_table[i].flag_table[0], 0, k);
- memset(&new_li_config_table[i].coef_table[0], 0, k);
- } else {
- if (a->li_base != 0) {
- memcpy(&new_li_config_table[i].flag_table[0],
- &li_config_table[j].flag_table[0],
- a->li_base);
- memcpy(&new_li_config_table[i].coef_table[0],
- &li_config_table[j].coef_table[0],
- a->li_base);
- }
- memset(&new_li_config_table[i].flag_table[a->li_base], 0, a->li_channels);
- memset(&new_li_config_table[i].coef_table[a->li_base], 0, a->li_channels);
- if (a->li_base + a->li_channels < k) {
- memcpy(&new_li_config_table[i].flag_table[a->li_base +
- a->li_channels],
- &li_config_table[j].flag_table[a->li_base],
- k - (a->li_base + a->li_channels));
- memcpy(&new_li_config_table[i].coef_table[a->li_base +
- a->li_channels],
- &li_config_table[j].coef_table[a->li_base],
- k - (a->li_base + a->li_channels));
- }
- j++;
- }
- }
- li_total_channels = k;
-
- mem_to_free = li_config_table;
-
- li_config_table = new_li_config_table;
- for (i = card->Id; i < max_adapter; i++) {
- if (adapter[i].request)
- adapter[i].li_base += a->li_channels;
- }
-
- if (a == &adapter[max_adapter])
- max_adapter++;
-
- list_add(&(card->list), &cards);
- AutomaticLaw(a);
-
- diva_os_leave_spin_lock(&api_lock, &old_irql, "add card");
-
- if (mem_to_free) {
- diva_os_free(0, mem_to_free);
- }
-
- i = 0;
- while (i++ < 30) {
- if (a->automatic_law > 3)
- break;
- diva_os_sleep(10);
- }
-
- /* profile information */
- PUT_WORD(&ctrl->profile.nbchannel, card->d.channels);
- ctrl->profile.goptions = a->profile.Global_Options;
- ctrl->profile.support1 = a->profile.B1_Protocols;
- ctrl->profile.support2 = a->profile.B2_Protocols;
- ctrl->profile.support3 = a->profile.B3_Protocols;
- /* manufacturer profile information */
- ctrl->profile.manu[0] = a->man_profile.private_options;
- ctrl->profile.manu[1] = a->man_profile.rtp_primary_payloads;
- ctrl->profile.manu[2] = a->man_profile.rtp_additional_payloads;
- ctrl->profile.manu[3] = 0;
- ctrl->profile.manu[4] = 0;
-
- capi_ctr_ready(ctrl);
-
- DBG_TRC(("adapter added, max_adapter=%d", max_adapter));
- return (1);
-}
-
-/*
- * register appl
- */
-static void diva_register_appl(struct capi_ctr *ctrl, __u16 appl,
- capi_register_params *rp)
-{
- APPL *this;
- word bnum, xnum;
- int i = 0;
- unsigned char *p;
- void *DataNCCI, *DataFlags, *ReceiveBuffer, *xbuffer_used;
- void **xbuffer_ptr, **xbuffer_internal;
- diva_os_spin_lock_magic_t old_irql;
- unsigned int mem_len;
- int nconn = rp->level3cnt;
-
-
- if (diva_os_in_irq()) {
- DBG_ERR(("CAPI_REGISTER - in irq context !"))
- return;
- }
-
- DBG_TRC(("application register Id=%d", appl))
-
- if (appl > MAX_APPL) {
- DBG_ERR(("CAPI_REGISTER - appl.Id exceeds MAX_APPL"))
- return;
- }
-
- if (nconn <= 0)
- nconn = ctrl->profile.nbchannel * -nconn;
-
- if (nconn == 0)
- nconn = ctrl->profile.nbchannel;
-
- DBG_LOG(("CAPI_REGISTER - Id = %d", appl))
- DBG_LOG((" MaxLogicalConnections = %d(%d)", nconn, rp->level3cnt))
- DBG_LOG((" MaxBDataBuffers = %d", rp->datablkcnt))
- DBG_LOG((" MaxBDataLength = %d", rp->datablklen))
-
- if (nconn < 1 ||
- nconn > 255 ||
- rp->datablklen < 80 ||
- rp->datablklen > 2150 || rp->datablkcnt > 255) {
- DBG_ERR(("CAPI_REGISTER - invalid parameters"))
- return;
- }
-
- if (application[appl - 1].Id == appl) {
- DBG_LOG(("CAPI_REGISTER - appl already registered"))
- return; /* appl already registered */
- }
-
- /* alloc memory */
-
- bnum = nconn * rp->datablkcnt;
- xnum = nconn * MAX_DATA_B3;
-
- mem_len = bnum * sizeof(word); /* DataNCCI */
- mem_len += bnum * sizeof(word); /* DataFlags */
- mem_len += bnum * rp->datablklen; /* ReceiveBuffer */
- mem_len += xnum; /* xbuffer_used */
- mem_len += xnum * sizeof(void *); /* xbuffer_ptr */
- mem_len += xnum * sizeof(void *); /* xbuffer_internal */
- mem_len += xnum * rp->datablklen; /* xbuffer_ptr[xnum] */
-
- DBG_LOG((" Allocated Memory = %d", mem_len))
- if (!(p = diva_os_malloc(0, mem_len))) {
- DBG_ERR(("CAPI_REGISTER - memory allocation failed"))
- return;
- }
- memset(p, 0, mem_len);
-
- DataNCCI = (void *)p;
- p += bnum * sizeof(word);
- DataFlags = (void *)p;
- p += bnum * sizeof(word);
- ReceiveBuffer = (void *)p;
- p += bnum * rp->datablklen;
- xbuffer_used = (void *)p;
- p += xnum;
- xbuffer_ptr = (void **)p;
- p += xnum * sizeof(void *);
- xbuffer_internal = (void **)p;
- p += xnum * sizeof(void *);
- for (i = 0; i < xnum; i++) {
- xbuffer_ptr[i] = (void *)p;
- p += rp->datablklen;
- }
-
- /* initialize application data */
- diva_os_enter_spin_lock(&api_lock, &old_irql, "register_appl");
-
- this = &application[appl - 1];
- memset(this, 0, sizeof(APPL));
-
- this->Id = appl;
-
- for (i = 0; i < max_adapter; i++) {
- adapter[i].CIP_Mask[appl - 1] = 0;
- }
-
- this->queue_size = 1000;
-
- this->MaxNCCI = (byte) nconn;
- this->MaxNCCIData = (byte) rp->datablkcnt;
- this->MaxBuffer = bnum;
- this->MaxDataLength = rp->datablklen;
-
- this->DataNCCI = DataNCCI;
- this->DataFlags = DataFlags;
- this->ReceiveBuffer = ReceiveBuffer;
- this->xbuffer_used = xbuffer_used;
- this->xbuffer_ptr = xbuffer_ptr;
- this->xbuffer_internal = xbuffer_internal;
- for (i = 0; i < xnum; i++) {
- this->xbuffer_ptr[i] = xbuffer_ptr[i];
- }
-
- CapiRegister(this->Id);
- diva_os_leave_spin_lock(&api_lock, &old_irql, "register_appl");
-
-}
-
-/*
- * release appl
- */
-static void diva_release_appl(struct capi_ctr *ctrl, __u16 appl)
-{
- diva_os_spin_lock_magic_t old_irql;
- APPL *this = &application[appl - 1];
- void *mem_to_free = NULL;
-
- DBG_TRC(("application %d(%d) cleanup", this->Id, appl))
-
- if (diva_os_in_irq()) {
- DBG_ERR(("CAPI_RELEASE - in irq context !"))
- return;
- }
-
- diva_os_enter_spin_lock(&api_lock, &old_irql, "release_appl");
- if (this->Id) {
- CapiRelease(this->Id);
- mem_to_free = this->DataNCCI;
- this->DataNCCI = NULL;
- this->Id = 0;
- }
- diva_os_leave_spin_lock(&api_lock, &old_irql, "release_appl");
-
- if (mem_to_free)
- diva_os_free(0, mem_to_free);
-
-}
-
-/*
- * send message
- */
-static u16 diva_send_message(struct capi_ctr *ctrl,
- diva_os_message_buffer_s *dmb)
-{
- int i = 0;
- word ret = 0;
- diva_os_spin_lock_magic_t old_irql;
- CAPI_MSG *msg = (CAPI_MSG *) DIVA_MESSAGE_BUFFER_DATA(dmb);
- APPL *this = &application[GET_WORD(&msg->header.appl_id) - 1];
- diva_card *card = ctrl->driverdata;
- __u32 length = DIVA_MESSAGE_BUFFER_LEN(dmb);
- word clength = GET_WORD(&msg->header.length);
- word command = GET_WORD(&msg->header.command);
- u16 retval = CAPI_NOERROR;
-
- if (diva_os_in_irq()) {
- DBG_ERR(("CAPI_SEND_MSG - in irq context !"))
- return CAPI_REGOSRESOURCEERR;
- }
- DBG_PRV1(("Write - appl = %d, cmd = 0x%x", this->Id, command))
-
- if (card->remove_in_progress) {
- DBG_ERR(("CAPI_SEND_MSG - remove in progress!"))
- return CAPI_REGOSRESOURCEERR;
- }
-
- diva_os_enter_spin_lock(&api_lock, &old_irql, "send message");
-
- if (!this->Id) {
- diva_os_leave_spin_lock(&api_lock, &old_irql, "send message");
- return CAPI_ILLAPPNR;
- }
-
- /* patch controller number */
- msg->header.controller = ControllerMap[card->Id]
- | (msg->header.controller & 0x80); /* preserve external controller bit */
-
- switch (command) {
- default:
- xlog("\x00\x02", msg, 0x80, clength);
- break;
-
- case _DATA_B3_I | RESPONSE:
-#ifndef DIVA_NO_DEBUGLIB
- if (myDriverDebugHandle.dbgMask & DL_BLK)
- xlog("\x00\x02", msg, 0x80, clength);
-#endif
- break;
-
- case _DATA_B3_R:
-#ifndef DIVA_NO_DEBUGLIB
- if (myDriverDebugHandle.dbgMask & DL_BLK)
- xlog("\x00\x02", msg, 0x80, clength);
-#endif
-
- if (clength == 24)
- clength = 22; /* workaround for PPcom bug */
- /* header is always 22 */
- if (GET_WORD(&msg->info.data_b3_req.Data_Length) >
- this->MaxDataLength
- || GET_WORD(&msg->info.data_b3_req.Data_Length) >
- (length - clength)) {
- DBG_ERR(("Write - invalid message size"))
- retval = CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
- goto write_end;
- }
-
- for (i = 0; i < (MAX_DATA_B3 * this->MaxNCCI)
- && this->xbuffer_used[i]; i++);
- if (i == (MAX_DATA_B3 * this->MaxNCCI)) {
- DBG_ERR(("Write - too many data pending"))
- retval = CAPI_SENDQUEUEFULL;
- goto write_end;
- }
- msg->info.data_b3_req.Data = i;
-
- this->xbuffer_internal[i] = NULL;
- memcpy(this->xbuffer_ptr[i], &((__u8 *) msg)[clength],
- GET_WORD(&msg->info.data_b3_req.Data_Length));
-
-#ifndef DIVA_NO_DEBUGLIB
- if ((myDriverDebugHandle.dbgMask & DL_BLK)
- && (myDriverDebugHandle.dbgMask & DL_XLOG)) {
- int j;
- for (j = 0; j <
- GET_WORD(&msg->info.data_b3_req.Data_Length);
- j += 256) {
- DBG_BLK((((char *) this->xbuffer_ptr[i]) + j,
- ((GET_WORD(&msg->info.data_b3_req.Data_Length) - j) <
- 256) ? (GET_WORD(&msg->info.data_b3_req.Data_Length) - j) : 256))
- if (!(myDriverDebugHandle.dbgMask & DL_PRV0))
- break; /* not more if not explicitly requested */
- }
- }
-#endif
- break;
- }
-
- memcpy(mapped_msg, msg, (__u32) clength);
- mapped_msg->header.controller = MapController(mapped_msg->header.controller);
- mapped_msg->header.length = clength;
- mapped_msg->header.command = command;
- mapped_msg->header.number = GET_WORD(&msg->header.number);
-
- ret = api_put(this, mapped_msg);
- switch (ret) {
- case 0:
- break;
- case _BAD_MSG:
- DBG_ERR(("Write - bad message"))
- retval = CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
- break;
- case _QUEUE_FULL:
- DBG_ERR(("Write - queue full"))
- retval = CAPI_SENDQUEUEFULL;
- break;
- default:
- DBG_ERR(("Write - api_put returned unknown error"))
- retval = CAPI_UNKNOWNNOTPAR;
- break;
- }
-
-write_end:
- diva_os_leave_spin_lock(&api_lock, &old_irql, "send message");
- if (retval == CAPI_NOERROR)
- diva_os_free_message_buffer(dmb);
- return retval;
-}
-
-
-/*
- * cards request function
- */
-static void DIRequest(ENTITY *e)
-{
- DIVA_CAPI_ADAPTER *a = &(adapter[(byte) e->user[0]]);
- diva_card *os_card = (diva_card *) a->os_card;
-
- if (e->Req && (a->FlowControlIdTable[e->ReqCh] == e->Id)) {
- a->FlowControlSkipTable[e->ReqCh] = 1;
- }
-
- (*(os_card->d.request)) (e);
-}
-
-/*
- * callback function from didd
- */
-static void didd_callback(void *context, DESCRIPTOR *adapter, int removal)
-{
- if (adapter->type == IDI_DADAPTER) {
- DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
- return;
- } else if (adapter->type == IDI_DIMAINT) {
- if (removal) {
- stop_dbg();
- } else {
- memcpy(&MAdapter, adapter, sizeof(MAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT);
- }
- } else if ((adapter->type > 0) && (adapter->type < 16)) { /* IDI Adapter */
- if (removal) {
- divacapi_remove_card(adapter);
- } else {
- diva_add_card(adapter);
- }
- }
- return;
-}
-
-/*
- * connect to didd
- */
-static int divacapi_connect_didd(void)
-{
- int x = 0;
- int dadapter = 0;
- IDI_SYNC_REQ req;
- DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
- DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
- for (x = 0; x < MAX_DESCRIPTORS; x++) {
- if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
- memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT);
- break;
- }
- }
- for (x = 0; x < MAX_DESCRIPTORS; x++) {
- if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
- dadapter = 1;
- memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc =
- IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
- req.didd_notify.info.callback = (void *)didd_callback;
- req.didd_notify.info.context = NULL;
- DAdapter.request((ENTITY *)&req);
- if (req.didd_notify.e.Rc != 0xff) {
- stop_dbg();
- return (0);
- }
- notify_handle = req.didd_notify.info.handle;
- }
- else if ((DIDD_Table[x].type > 0) && (DIDD_Table[x].type < 16)) { /* IDI Adapter found */
- diva_add_card(&DIDD_Table[x]);
- }
- }
-
- if (!dadapter) {
- stop_dbg();
- }
-
- return (dadapter);
-}
-
-/*
- * diconnect from didd
- */
-static void divacapi_disconnect_didd(void)
-{
- IDI_SYNC_REQ req;
-
- stop_dbg();
-
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
- req.didd_notify.info.handle = notify_handle;
- DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * we do not provide date/time here,
- * the application should do this.
- */
-int fax_head_line_time(char *buffer)
-{
- return (0);
-}
-
-/*
- * init (alloc) main structures
- */
-static int __init init_main_structs(void)
-{
- if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) {
- DBG_ERR(("init: failed alloc mapped_msg."))
- return 0;
- }
-
- if (!(adapter = diva_os_malloc(0, sizeof(DIVA_CAPI_ADAPTER) * MAX_DESCRIPTORS))) {
- DBG_ERR(("init: failed alloc adapter struct."))
- diva_os_free(0, mapped_msg);
- return 0;
- }
- memset(adapter, 0, sizeof(DIVA_CAPI_ADAPTER) * MAX_DESCRIPTORS);
-
- if (!(application = diva_os_malloc(0, sizeof(APPL) * MAX_APPL))) {
- DBG_ERR(("init: failed alloc application struct."))
- diva_os_free(0, mapped_msg);
- diva_os_free(0, adapter);
- return 0;
- }
- memset(application, 0, sizeof(APPL) * MAX_APPL);
-
- return (1);
-}
-
-/*
- * remove (free) main structures
- */
-static void remove_main_structs(void)
-{
- if (application)
- diva_os_free(0, application);
- if (adapter)
- diva_os_free(0, adapter);
- if (mapped_msg)
- diva_os_free(0, mapped_msg);
-}
-
-/*
- * api_remove_start
- */
-static void do_api_remove_start(void)
-{
- diva_os_spin_lock_magic_t old_irql;
- int ret = 1, count = 100;
-
- do {
- diva_os_enter_spin_lock(&api_lock, &old_irql, "api remove start");
- ret = api_remove_start();
- diva_os_leave_spin_lock(&api_lock, &old_irql, "api remove start");
-
- diva_os_sleep(10);
- } while (ret && count--);
-
- if (ret)
- DBG_ERR(("could not remove signaling ID's"))
- }
-
-/*
- * init
- */
-int __init init_capifunc(void)
-{
- diva_os_initialize_spin_lock(&api_lock, "capifunc");
- memset(ControllerMap, 0, MAX_DESCRIPTORS + 1);
- max_adapter = 0;
-
-
- if (!init_main_structs()) {
- DBG_ERR(("init: failed to init main structs."))
- diva_os_destroy_spin_lock(&api_lock, "capifunc");
- return (0);
- }
-
- if (!divacapi_connect_didd()) {
- DBG_ERR(("init: failed to connect to DIDD."))
- do_api_remove_start();
- divacapi_remove_cards();
- remove_main_structs();
- diva_os_destroy_spin_lock(&api_lock, "capifunc");
- return (0);
- }
-
- return (1);
-}
-
-/*
- * finit
- */
-void __exit finit_capifunc(void)
-{
- do_api_remove_start();
- divacapi_disconnect_didd();
- divacapi_remove_cards();
- remove_main_structs();
- diva_os_destroy_spin_lock(&api_lock, "capifunc");
-}
diff --git a/drivers/isdn/hardware/eicon/capifunc.h b/drivers/isdn/hardware/eicon/capifunc.h
deleted file mode 100644
index e96c45bb5638..000000000000
--- a/drivers/isdn/hardware/eicon/capifunc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* $Id: capifunc.h,v 1.11.4.1 2004/08/28 20:03:53 armin Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface common functions
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#ifndef __CAPIFUNC_H__
-#define __CAPIFUNC_H__
-
-#define DRRELMAJOR 2
-#define DRRELMINOR 0
-#define DRRELEXTRA ""
-
-#define M_COMPANY "Eicon Networks"
-
-extern char DRIVERRELEASE_CAPI[];
-
-typedef struct _diva_card {
- struct list_head list;
- int remove_in_progress;
- int Id;
- struct capi_ctr capi_ctrl;
- DIVA_CAPI_ADAPTER *adapter;
- DESCRIPTOR d;
- char name[32];
-} diva_card;
-
-/*
- * prototypes
- */
-int init_capifunc(void);
-void finit_capifunc(void);
-
-#endif /* __CAPIFUNC_H__ */
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
deleted file mode 100644
index f9244dc1c3c9..000000000000
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/* $Id: capimain.c,v 1.24 2003/09/09 06:51:05 schindler Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/uaccess.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-
-#include "os_capi.h"
-
-#include "platform.h"
-#include "di_defs.h"
-#include "capi20.h"
-#include "divacapi.h"
-#include "cp_vers.h"
-#include "capifunc.h"
-
-static char *main_revision = "$Revision: 1.24 $";
-static char *DRIVERNAME =
- "Eicon DIVA - CAPI Interface driver (http://www.melware.net)";
-static char *DRIVERLNAME = "divacapi";
-
-MODULE_DESCRIPTION("CAPI driver for Eicon DIVA cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("CAPI and DIVA card drivers");
-MODULE_LICENSE("GPL");
-
-/*
- * get revision number from revision string
- */
-static char *getrev(const char *revision)
-{
- char *rev;
- char *p;
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "1.0";
- return rev;
-
-}
-
-/*
- * alloc a message buffer
- */
-diva_os_message_buffer_s *diva_os_alloc_message_buffer(unsigned long size,
- void **data_buf)
-{
- diva_os_message_buffer_s *dmb = alloc_skb(size, GFP_ATOMIC);
- if (dmb) {
- *data_buf = skb_put(dmb, size);
- }
- return (dmb);
-}
-
-/*
- * free a message buffer
- */
-void diva_os_free_message_buffer(diva_os_message_buffer_s *dmb)
-{
- kfree_skb(dmb);
-}
-
-/*
- * proc function for controller info
- */
-static int diva_ctl_proc_show(struct seq_file *m, void *v)
-{
- struct capi_ctr *ctrl = m->private;
- diva_card *card = (diva_card *) ctrl->driverdata;
-
- seq_printf(m, "%s\n", ctrl->name);
- seq_printf(m, "Serial No. : %s\n", ctrl->serial);
- seq_printf(m, "Id : %d\n", card->Id);
- seq_printf(m, "Channels : %d\n", card->d.channels);
-
- return 0;
-}
-
-/*
- * set additional os settings in capi_ctr struct
- */
-void diva_os_set_controller_struct(struct capi_ctr *ctrl)
-{
- ctrl->driver_name = DRIVERLNAME;
- ctrl->load_firmware = NULL;
- ctrl->reset_ctr = NULL;
- ctrl->proc_show = diva_ctl_proc_show;
- ctrl->owner = THIS_MODULE;
-}
-
-/*
- * module init
- */
-static int __init divacapi_init(void)
-{
- char tmprev[32];
- int ret = 0;
-
- sprintf(DRIVERRELEASE_CAPI, "%d.%d%s", DRRELMAJOR, DRRELMINOR,
- DRRELEXTRA);
-
- printk(KERN_INFO "%s\n", DRIVERNAME);
- printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_CAPI);
- strcpy(tmprev, main_revision);
- printk("%s Build: %s(%s)\n", getrev(tmprev),
- diva_capi_common_code_build, DIVA_BUILD);
-
- if (!(init_capifunc())) {
- printk(KERN_ERR "%s: failed init capi_driver.\n",
- DRIVERLNAME);
- ret = -EIO;
- }
-
- return ret;
-}
-
-/*
- * module exit
- */
-static void __exit divacapi_exit(void)
-{
- finit_capifunc();
- printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divacapi_init);
-module_exit(divacapi_exit);
diff --git a/drivers/isdn/hardware/eicon/cardtype.h b/drivers/isdn/hardware/eicon/cardtype.h
deleted file mode 100644
index 8b20e22cae1e..000000000000
--- a/drivers/isdn/hardware/eicon/cardtype.h
+++ /dev/null
@@ -1,1098 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef _CARDTYPE_H_
-#define _CARDTYPE_H_
-#ifndef CARDTYPE_H_WANT_DATA
-#define CARDTYPE_H_WANT_DATA 0
-#endif
-#ifndef CARDTYPE_H_WANT_IDI_DATA
-#define CARDTYPE_H_WANT_IDI_DATA 0
-#endif
-#ifndef CARDTYPE_H_WANT_RESOURCE_DATA
-#define CARDTYPE_H_WANT_RESOURCE_DATA 1
-#endif
-#ifndef CARDTYPE_H_WANT_FILE_DATA
-#define CARDTYPE_H_WANT_FILE_DATA 1
-#endif
-/*
- * D-channel protocol identifiers
- *
- * Attention: Unfortunately the identifiers defined here differ from
- * the identifiers used in Protocol/1/Common/prot/q931.h .
- * The only reason for this is that q931.h has not a global
- * scope and we did not know about the definitions there.
- * But the definitions here cannot be changed easily because
- * they are used in setup scripts and programs.
- * Thus the definitions here have to be mapped if they are
- * used in the protocol code context !
- *
- * Now the identifiers are defined in the q931lib/constant.h file.
- * Unfortunately this file has also not a global scope.
- * But beginning with PROTTYPE_US any new identifier will get the same
- * value as the corresponding PROT_* definition in q931lib/constant.h !
- */
-#define PROTTYPE_MINVAL 0
-#define PROTTYPE_ETSI 0
-#define PROTTYPE_1TR6 1
-#define PROTTYPE_BELG 2
-#define PROTTYPE_FRANC 3
-#define PROTTYPE_ATEL 4
-#define PROTTYPE_NI 5 /* DMS 100, Nortel, National ISDN */
-#define PROTTYPE_5ESS 6 /* 5ESS , AT&T, 5ESS Custom */
-#define PROTTYPE_JAPAN 7
-#define PROTTYPE_SWED 8
-#define PROTTYPE_US 9 /* US autodetect */
-#define PROTTYPE_ITALY 10
-#define PROTTYPE_TWAN 11
-#define PROTTYPE_AUSTRAL 12
-#define PROTTYPE_4ESDN 13
-#define PROTTYPE_4ESDS 14
-#define PROTTYPE_4ELDS 15
-#define PROTTYPE_4EMGC 16
-#define PROTTYPE_4EMGI 17
-#define PROTTYPE_HONGKONG 18
-#define PROTTYPE_RBSCAS 19
-#define PROTTYPE_CORNETN 20
-#define PROTTYPE_QSIG 21
-#define PROTTYPE_NI_EWSD 22 /* EWSD, Siemens, National ISDN */
-#define PROTTYPE_5ESS_NI 23 /* 5ESS, Lucent, National ISDN */
-#define PROTTYPE_T1CORNETN 24
-#define PROTTYPE_CORNETNQ 25
-#define PROTTYPE_T1CORNETNQ 26
-#define PROTTYPE_T1QSIG 27
-#define PROTTYPE_E1UNCH 28
-#define PROTTYPE_T1UNCH 29
-#define PROTTYPE_E1CHAN 30
-#define PROTTYPE_T1CHAN 31
-#define PROTTYPE_R2CAS 32
-#define PROTTYPE_MAXVAL 32
-/*
- * Card type identifiers
- */
-#define CARD_UNKNOWN 0
-#define CARD_NONE 0
-/* DIVA cards */
-#define CARDTYPE_DIVA_MCA 0
-#define CARDTYPE_DIVA_ISA 1
-#define CARDTYPE_DIVA_PCM 2
-#define CARDTYPE_DIVAPRO_ISA 3
-#define CARDTYPE_DIVAPRO_PCM 4
-#define CARDTYPE_DIVAPICO_ISA 5
-#define CARDTYPE_DIVAPICO_PCM 6
-/* DIVA 2.0 cards */
-#define CARDTYPE_DIVAPRO20_PCI 7
-#define CARDTYPE_DIVA20_PCI 8
-/* S cards */
-#define CARDTYPE_QUADRO_ISA 9
-#define CARDTYPE_S_ISA 10
-#define CARDTYPE_S_MCA 11
-#define CARDTYPE_SX_ISA 12
-#define CARDTYPE_SX_MCA 13
-#define CARDTYPE_SXN_ISA 14
-#define CARDTYPE_SXN_MCA 15
-#define CARDTYPE_SCOM_ISA 16
-#define CARDTYPE_SCOM_MCA 17
-#define CARDTYPE_PR_ISA 18
-#define CARDTYPE_PR_MCA 19
-/* Diva Server cards (formerly called Maestra, later Amadeo) */
-#define CARDTYPE_MAESTRA_ISA 20
-#define CARDTYPE_MAESTRA_PCI 21
-/* Diva Server cards to be developed (Quadro, Primary rate) */
-#define CARDTYPE_DIVASRV_Q_8M_PCI 22
-#define CARDTYPE_DIVASRV_P_30M_PCI 23
-#define CARDTYPE_DIVASRV_P_2M_PCI 24
-#define CARDTYPE_DIVASRV_P_9M_PCI 25
-/* DIVA 2.0 cards */
-#define CARDTYPE_DIVA20_ISA 26
-#define CARDTYPE_DIVA20U_ISA 27
-#define CARDTYPE_DIVA20U_PCI 28
-#define CARDTYPE_DIVAPRO20_ISA 29
-#define CARDTYPE_DIVAPRO20U_ISA 30
-#define CARDTYPE_DIVAPRO20U_PCI 31
-/* DIVA combi cards (piccola ISDN + rockwell V.34 modem) */
-#define CARDTYPE_DIVAMOBILE_PCM 32
-#define CARDTYPE_TDKGLOBALPRO_PCM 33
-/* DIVA Pro PC OEM card for 'New Media Corporation' */
-#define CARDTYPE_NMC_DIVAPRO_PCM 34
-/* DIVA Pro 2.0 OEM cards for 'British Telecom' */
-#define CARDTYPE_BT_EXLANE_PCI 35
-#define CARDTYPE_BT_EXLANE_ISA 36
-/* DIVA low cost cards, 1st name DIVA 3.0, 2nd DIVA 2.01, 3rd ??? */
-#define CARDTYPE_DIVALOW_ISA 37
-#define CARDTYPE_DIVALOWU_ISA 38
-#define CARDTYPE_DIVALOW_PCI 39
-#define CARDTYPE_DIVALOWU_PCI 40
-/* DIVA combi cards (piccola ISDN + rockwell V.90 modem) */
-#define CARDTYPE_DIVAMOBILE_V90_PCM 41
-#define CARDTYPE_TDKGLOBPRO_V90_PCM 42
-#define CARDTYPE_DIVASRV_P_23M_PCI 43
-#define CARDTYPE_DIVALOW_USB 44
-/* DIVA Audio (CT) family */
-#define CARDTYPE_DIVA_CT_ST 45
-#define CARDTYPE_DIVA_CT_U 46
-#define CARDTYPE_DIVA_CTLITE_ST 47
-#define CARDTYPE_DIVA_CTLITE_U 48
-/* DIVA ISDN plus V.90 series */
-#define CARDTYPE_DIVAISDN_V90_PCM 49
-#define CARDTYPE_DIVAISDN_V90_PCI 50
-#define CARDTYPE_DIVAISDN_TA 51
-/* DIVA Server Voice cards */
-#define CARDTYPE_DIVASRV_VOICE_Q_8M_PCI 52
-/* DIVA Server V2 cards */
-#define CARDTYPE_DIVASRV_Q_8M_V2_PCI 53
-#define CARDTYPE_DIVASRV_P_30M_V2_PCI 54
-/* DIVA Server Voice V2 cards */
-#define CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI 55
-#define CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI 56
-/* Diva LAN */
-#define CARDTYPE_DIVAISDN_LAN 57
-#define CARDTYPE_DIVA_202_PCI_ST 58
-#define CARDTYPE_DIVA_202_PCI_U 59
-#define CARDTYPE_DIVASRV_B_2M_V2_PCI 60
-#define CARDTYPE_DIVASRV_B_2F_PCI 61
-#define CARDTYPE_DIVALOW_USBV2 62
-#define CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI 63
-#define CARDTYPE_DIVA_PRO_30_PCI_ST 64
-#define CARDTYPE_DIVA_CT_ST_V20 65
-/* Diva Mobile V.90 PC Card and Diva ISDN PC Card */
-#define CARDTYPE_DIVAMOBILE_V2_PCM 66
-#define CARDTYPE_DIVA_V2_PCM 67
-/* Re-badged Diva Pro PC Card */
-#define CARDTYPE_DIVA_PC_CARD 68
-/* next free card type identifier */
-#define CARDTYPE_MAX 69
-/*
- * The card families
- */
-#define FAMILY_DIVA 1
-#define FAMILY_S 2
-#define FAMILY_MAESTRA 3
-#define FAMILY_MAX 4
-/*
- * The basic card types
- */
-#define CARD_DIVA 1 /* DSP based, old DSP */
-#define CARD_PRO 2 /* DSP based, new DSP */
-#define CARD_PICO 3 /* HSCX based */
-#define CARD_S 4 /* IDI on board based */
-#define CARD_SX 5 /* IDI on board based */
-#define CARD_SXN 6 /* IDI on board based */
-#define CARD_SCOM 7 /* IDI on board based */
-#define CARD_QUAD 8 /* IDI on board based */
-#define CARD_PR 9 /* IDI on board based */
-#define CARD_MAE 10 /* IDI on board based */
-#define CARD_MAEQ 11 /* IDI on board based */
-#define CARD_MAEP 12 /* IDI on board based */
-#define CARD_DIVALOW 13 /* IPAC based */
-#define CARD_CT 14 /* SCOUT based */
-#define CARD_DIVATA 15 /* DIVA TA */
-#define CARD_DIVALAN 16 /* DIVA LAN */
-#define CARD_MAE2 17 /* IDI on board based */
-#define CARD_MAX 18
-/*
- * The internal card types of the S family
- */
-#define CARD_I_NONE 0
-#define CARD_I_S 0
-#define CARD_I_SX 1
-#define CARD_I_SCOM 2
-#define CARD_I_QUAD 3
-#define CARD_I_PR 4
-/*
- * The bus types we support
- */
-#define BUS_ISA 1
-#define BUS_PCM 2
-#define BUS_PCI 3
-#define BUS_MCA 4
-#define BUS_USB 5
-#define BUS_COM 6
-#define BUS_LAN 7
-/*
- * The chips we use for B-channel traffic
- */
-#define CHIP_NONE 0
-#define CHIP_DSP 1
-#define CHIP_HSCX 2
-#define CHIP_IPAC 3
-#define CHIP_SCOUT 4
-#define CHIP_EXTERN 5
-#define CHIP_IPACX 6
-/*
- * The structures where the card properties are aggregated by id
- */
-typedef struct CARD_PROPERTIES
-{ char *Name; /* official marketing name */
- unsigned short PnPId; /* plug and play ID (for non PCMIA cards) */
- unsigned short Version; /* major and minor version no of the card */
- unsigned char DescType; /* card type to set in the IDI descriptor */
- unsigned char Family; /* basic family of the card */
- unsigned short Features; /* features bits to set in the IDI desc. */
- unsigned char Card; /* basic card type */
- unsigned char IType; /* internal type of S cards (read from ram) */
- unsigned char Bus; /* bus type this card is designed for */
- unsigned char Chip; /* chipset used on card */
- unsigned char Adapters; /* number of adapters on card */
- unsigned char Channels; /* # of channels per adapter */
- unsigned short E_info; /* # of ram entity info structs per adapter */
- unsigned short SizeIo; /* size of IO window per adapter */
- unsigned short SizeMem; /* size of memory window per adapter */
-} CARD_PROPERTIES;
-typedef struct CARD_RESOURCE
-{ unsigned char Int[10];
- unsigned short IoFirst;
- unsigned short IoStep;
- unsigned short IoCnt;
- unsigned long MemFirst;
- unsigned long MemStep;
- unsigned short MemCnt;
-} CARD_RESOURCE;
-/* test if the card of type 't' is a plug & play card */
-#define IS_PNP(t) \
- ( \
- ( \
- CardProperties[t].Bus != BUS_ISA \
- && \
- CardProperties[t].Bus != BUS_MCA \
- ) \
- || \
- ( \
- CardProperties[t].Family != FAMILY_S \
- && \
- CardProperties[t].Card != CARD_DIVA \
- ) \
- )
-/* extract IDI Descriptor info for card type 't' (p == DescType/Features) */
-#define IDI_PROP(t, p) (CardProperties[t].p)
-#if CARDTYPE_H_WANT_DATA
-#if CARDTYPE_H_WANT_IDI_DATA
-/* include "di_defs.h" for IDI adapter type and feature flag definitions */
-#include "di_defs.h"
-#else /*!CARDTYPE_H_WANT_IDI_DATA*/
-/* define IDI adapter types and feature flags here to prevent inclusion */
-#ifndef IDI_ADAPTER_S
-#define IDI_ADAPTER_S 1
-#define IDI_ADAPTER_PR 2
-#define IDI_ADAPTER_DIVA 3
-#define IDI_ADAPTER_MAESTRA 4
-#endif
-#ifndef DI_VOICE
-#define DI_VOICE 0x0 /* obsolete define */
-#define DI_FAX3 0x1
-#define DI_MODEM 0x2
-#define DI_POST 0x4
-#define DI_V110 0x8
-#define DI_V120 0x10
-#define DI_POTS 0x20
-#define DI_CODEC 0x40
-#define DI_MANAGE 0x80
-#define DI_V_42 0x0100
-#define DI_EXTD_FAX 0x0200 /* Extended FAX (ECM, 2D, T.6, Polling) */
-#define DI_AT_PARSER 0x0400 /* Build-in AT Parser in the L2 */
-#define DI_VOICE_OVER_IP 0x0800 /* Voice over IP support */
-#endif
-#endif /*CARDTYPE_H_WANT_IDI_DATA*/
-#define DI_V1x0 (DI_V110 | DI_V120)
-#define DI_NULL 0x0000
-#if defined(SOFT_DSP_SUPPORT)
-#define SOFT_DSP_ADD_FEATURES (DI_MODEM | DI_FAX3 | DI_AT_PARSER)
-#else
-#define SOFT_DSP_ADD_FEATURES 0
-#endif
-#if defined(SOFT_V110_SUPPORT)
-#define DI_SOFT_V110 DI_V110
-#else
-#define DI_SOFT_V110 0
-#endif
-/*--- CardProperties [Index=CARDTYPE_....] ---------------------------------*/
-CARD_PROPERTIES CardProperties[] =
-{
- { /* 0 */
- "Diva MCA", 0x6336, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3,
- CARD_DIVA, CARD_I_NONE, BUS_MCA, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 1 */
- "Diva ISA", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3,
- CARD_DIVA, CARD_I_NONE, BUS_ISA, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 2 */
- "Diva/PCM", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3,
- CARD_DIVA, CARD_I_NONE, BUS_PCM, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 3 */
- "Diva PRO ISA", 0x0031, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_PRO, CARD_I_NONE, BUS_ISA, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 4 */
- "Diva PRO PC-Card", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_PRO, CARD_I_NONE, BUS_PCM, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 5 */
- "Diva PICCOLA ISA", 0x0051, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_ISA, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 6 */
- "Diva PICCOLA PCM", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 7 */
- "Diva PRO 2.0 S/T PCI", 0xe001, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
- CARD_PRO, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 8 */
- "Diva 2.0 S/T PCI", 0xe002, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCI, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 9 */
- "QUADRO ISA", 0x0000, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_NULL,
- CARD_QUAD, CARD_I_QUAD, BUS_ISA, CHIP_NONE,
- 4, 2, 16, 0, 0x800
- },
- { /* 10 */
- "S ISA", 0x0000, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_CODEC,
- CARD_S, CARD_I_S, BUS_ISA, CHIP_NONE,
- 1, 1, 16, 0, 0x800
- },
- { /* 11 */
- "S MCA", 0x6a93, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_CODEC,
- CARD_S, CARD_I_S, BUS_MCA, CHIP_NONE,
- 1, 1, 16, 16, 0x400
- },
- { /* 12 */
- "SX ISA", 0x0000, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_NULL,
- CARD_SX, CARD_I_SX, BUS_ISA, CHIP_NONE,
- 1, 2, 16, 0, 0x800
- },
- { /* 13 */
- "SX MCA", 0x6a93, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_NULL,
- CARD_SX, CARD_I_SX, BUS_MCA, CHIP_NONE,
- 1, 2, 16, 16, 0x400
- },
- { /* 14 */
- "SXN ISA", 0x0000, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_NULL,
- CARD_SXN, CARD_I_SCOM, BUS_ISA, CHIP_NONE,
- 1, 2, 16, 0, 0x800
- },
- { /* 15 */
- "SXN MCA", 0x6a93, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_NULL,
- CARD_SXN, CARD_I_SCOM, BUS_MCA, CHIP_NONE,
- 1, 2, 16, 16, 0x400
- },
- { /* 16 */
- "SCOM ISA", 0x0000, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_CODEC,
- CARD_SCOM, CARD_I_SCOM, BUS_ISA, CHIP_NONE,
- 1, 2, 16, 0, 0x800
- },
- { /* 17 */
- "SCOM MCA", 0x6a93, 0x0100,
- IDI_ADAPTER_S, FAMILY_S, DI_CODEC,
- CARD_SCOM, CARD_I_SCOM, BUS_MCA, CHIP_NONE,
- 1, 2, 16, 16, 0x400
- },
- { /* 18 */
- "S2M ISA", 0x0000, 0x0100,
- IDI_ADAPTER_PR, FAMILY_S, DI_NULL,
- CARD_PR, CARD_I_PR, BUS_ISA, CHIP_NONE,
- 1, 30, 256, 0, 0x4000
- },
- { /* 19 */
- "S2M MCA", 0x6abb, 0x0100,
- IDI_ADAPTER_PR, FAMILY_S, DI_NULL,
- CARD_PR, CARD_I_PR, BUS_MCA, CHIP_NONE,
- 1, 30, 256, 16, 0x4000
- },
- { /* 20 */
- "Diva Server BRI-2M ISA", 0x0041, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAE, CARD_I_NONE, BUS_ISA, CHIP_DSP,
- 1, 2, 16, 8, 0
- },
- { /* 21 */
- "Diva Server BRI-2M PCI", 0xE010, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAE, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 16, 8, 0
- },
- { /* 22 */
- "Diva Server 4BRI-8M PCI", 0xE012, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEQ, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 4, 2, 16, 8, 0
- },
- { /* 23 */
- "Diva Server PRI-30M PCI", 0xE014, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEP, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 30, 256, 8, 0
- },
- { /* 24 */
- "Diva Server PRI-2M PCI", 0xe014, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEP, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 30, 256, 8, 0
- },
- { /* 25 */
- "Diva Server PRI-9M PCI", 0x0000, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEP, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 30, 256, 8, 0
- },
- { /* 26 */
- "Diva 2.0 S/T ISA", 0x0071, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_ISA, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 27 */
- "Diva 2.0 U ISA", 0x0091, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_ISA, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 28 */
- "Diva 2.0 U PCI", 0xe004, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | DI_POTS | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCI, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 29 */
- "Diva PRO 2.0 S/T ISA", 0x0061, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
- CARD_PRO, CARD_I_NONE, BUS_ISA, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 30 */
- "Diva PRO 2.0 U ISA", 0x0081, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
- CARD_PRO, CARD_I_NONE, BUS_ISA, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 31 */
- "Diva PRO 2.0 U PCI", 0xe003, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
- CARD_PRO, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 32 */
- "Diva MOBILE", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 33 */
- "TDK DFI3600", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 34 (OEM version of 4 - "Diva PRO PC-Card") */
- "New Media ISDN", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_PRO, CARD_I_NONE, BUS_PCM, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 35 (OEM version of 7 - "Diva PRO 2.0 S/T PCI") */
- "BT ExLane PCI", 0xe101, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
- CARD_PRO, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 36 (OEM version of 29 - "Diva PRO 2.0 S/T ISA") */
- "BT ExLane ISA", 0x1061, 0x0200,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_POTS,
- CARD_PRO, CARD_I_NONE, BUS_ISA, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
- { /* 37 */
- "Diva 2.01 S/T ISA", 0x00A1, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_ISA, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 38 */
- "Diva 2.01 U ISA", 0x00B1, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_ISA, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 39 */
- "Diva 2.01 S/T PCI", 0xe005, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_PCI, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 40 no ID yet */
- "Diva 2.01 U PCI", 0x0000, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_PCI, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 41 */
- "Diva MOBILE V.90", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 42 */
- "TDK DFI3600 V.90", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_HSCX,
- 1, 2, 0, 8, 0
- },
- { /* 43 */
- "Diva Server PRI-23M PCI", 0xe014, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEP, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 30, 256, 8, 0
- },
- { /* 44 */
- "Diva 2.01 S/T USB", 0x1000, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_USB, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 45 */
- "Diva CT S/T PCI", 0xe006, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_CT, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 0, 0
- },
- { /* 46 */
- "Diva CT U PCI", 0xe007, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_CT, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 0, 0
- },
- { /* 47 */
- "Diva CT Lite S/T PCI", 0xe008, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_CT, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 0, 0
- },
- { /* 48 */
- "Diva CT Lite U PCI", 0xe009, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_CT, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 0, 0
- },
- { /* 49 */
- "Diva ISDN+V.90 PC Card", 0x8D8C, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_DIVALOW, CARD_I_NONE, BUS_PCM, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 50 */
- "Diva ISDN+V.90 PCI", 0xe00A, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_PCI, CHIP_IPAC,
- 1, 2, 0, 8, 0
- },
- { /* 51 (DivaTA) no ID */
- "Diva TA", 0x0000, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V110 | DI_FAX3 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVATA, CARD_I_NONE, BUS_COM, CHIP_EXTERN,
- 1, 1, 0, 8, 0
- },
- { /* 52 (Diva Server 4BRI-8M PCI adapter enabled for Voice) */
- "Diva Server Voice 4BRI-8M PCI", 0xE016, 0x0100,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
- CARD_MAEQ, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 4, 2, 16, 8, 0
- },
- { /* 53 (Diva Server 4BRI 2.0 adapter) */
- "Diva Server 4BRI-8M 2.0 PCI", 0xE013, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEQ, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 4, 2, 16, 8, 0
- },
- { /* 54 (Diva Server PRI 2.0 adapter) */
- "Diva Server PRI 2.0 PCI", 0xE015, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAEP, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 30, 256, 8, 0
- },
- { /* 55 (Diva Server 4BRI-8M 2.0 PCI adapter enabled for Voice) */
- "Diva Server Voice 4BRI-8M 2.0 PCI", 0xE017, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
- CARD_MAEQ, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 4, 2, 16, 8, 0
- },
- { /* 56 (Diva Server PRI 2.0 PCI adapter enabled for Voice) */
- "Diva Server Voice PRI 2.0 PCI", 0xE019, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
- CARD_MAEP, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 30, 256, 8, 0
- },
- {
- /* 57 (DivaLan ) no ID */
- "Diva LAN", 0x0000, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V110 | DI_FAX3 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALAN, CARD_I_NONE, BUS_LAN, CHIP_EXTERN,
- 1, 1, 0, 8, 0
- },
- { /* 58 */
- "Diva 2.02 PCI S/T", 0xE00B, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES | DI_SOFT_V110,
- CARD_DIVALOW, CARD_I_NONE, BUS_PCI, CHIP_IPACX,
- 1, 2, 0, 8, 0
- },
- { /* 59 */
- "Diva 2.02 PCI U", 0xE00C, 0x0300,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_PCI, CHIP_IPACX,
- 1, 2, 0, 8, 0
- },
- { /* 60 */
- "Diva Server BRI-2M 2.0 PCI", 0xE018, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_MAE2, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 16, 8, 0
- },
- { /* 61 (the previous name was Diva Server BRI-2F 2.0 PCI) */
- "Diva Server 2FX", 0xE01A, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_SOFT_V110,
- CARD_MAE2, CARD_I_NONE, BUS_PCI, CHIP_IPACX,
- 1, 2, 16, 8, 0
- },
- { /* 62 */
- " Diva ISDN USB 2.0", 0x1003, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_DIVALOW, CARD_I_NONE, BUS_USB, CHIP_IPACX,
- 1, 2, 0, 8, 0
- },
- { /* 63 (Diva Server BRI-2M 2.0 PCI adapter enabled for Voice) */
- "Diva Server Voice BRI-2M 2.0 PCI", 0xE01B, 0x0200,
- IDI_ADAPTER_MAESTRA, FAMILY_MAESTRA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_VOICE_OVER_IP,
- CARD_MAE2, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 16, 8, 0
- },
- { /* 64 */
- "Diva Pro 3.0 PCI", 0xe00d, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM,
- CARD_PRO, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 0, 0
- },
- { /* 65 */
- "Diva ISDN + CT 2.0", 0xE00E, 0x0300,
- IDI_ADAPTER_DIVA , FAMILY_DIVA, DI_V1x0 | DI_FAX3 | DI_MODEM | DI_CODEC,
- CARD_CT, CARD_I_NONE, BUS_PCI, CHIP_DSP,
- 1, 2, 0, 0, 0
- },
- { /* 66 */
- "Diva Mobile V.90 PC Card", 0x8331, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_IPACX,
- 1, 2, 0, 8, 0
- },
- { /* 67 */
- "Diva ISDN PC Card", 0x8311, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PICO, CARD_I_NONE, BUS_PCM, CHIP_IPACX,
- 1, 2, 0, 8, 0
- },
- { /* 68 */
- "Diva ISDN PC Card", 0x0000, 0x0100,
- IDI_ADAPTER_DIVA, FAMILY_DIVA, DI_V120 | SOFT_DSP_ADD_FEATURES,
- CARD_PRO, CARD_I_NONE, BUS_PCM, CHIP_DSP,
- 1, 2, 0, 8, 0
- },
-};
-#if CARDTYPE_H_WANT_RESOURCE_DATA
-/*--- CardResource [Index=CARDTYPE_....] ---------------------------(GEI)-*/
-CARD_RESOURCE CardResource[] = {
-/* Interrupts IO-Address Mem-Address */
- /* 0*/ { 3,4,9,0,0,0,0,0,0,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA MCA
- /* 1*/ { 3,4,9,10,11,12,0,0,0,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA ISA
- /* 2*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA PCMCIA
- /* 3*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA PRO ISA
- /* 4*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA PRO PCMCIA
- /* 5*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA PICCOLA ISA
- /* 6*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA PICCOLA PCMCIA
- /* 7*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA PRO 2.0 PCI
- /* 8*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 2.0 PCI
- /* 9*/ { 3,4,5,7,9,10,11,12,0,0, 0x0,0x0,0, 0x80000,0x2000,64 }, // QUADRO ISA
- /*10*/ { 3,4,9,10,11,12,0,0,0,0, 0x0,0x0,0, 0xc0000,0x2000,16 }, // S ISA
- /*11*/ { 3,4,9,0,0,0,0,0,0,0, 0xc00,0x10,16, 0xc0000,0x2000,16 }, // S MCA
- /*12*/ { 3,4,9,10,11,12,0,0,0,0, 0x0,0x0,0, 0xc0000,0x2000,16 }, // SX ISA
- /*13*/ { 3,4,9,0,0,0,0,0,0,0, 0xc00,0x10,16, 0xc0000,0x2000,16 }, // SX MCA
- /*14*/ { 3,4,5,7,9,10,11,12,0,0, 0x0,0x0,0, 0x80000,0x0800,256 }, // SXN ISA
- /*15*/ { 3,4,9,0,0,0,0,0,0,0, 0xc00,0x10,16, 0xc0000,0x2000,16 }, // SXN MCA
- /*16*/ { 3,4,5,7,9,10,11,12,0,0, 0x0,0x0,0, 0x80000,0x0800,256 }, // SCOM ISA
- /*17*/ { 3,4,9,0,0,0,0,0,0,0, 0xc00,0x10,16, 0xc0000,0x2000,16 }, // SCOM MCA
- /*18*/ { 3,4,5,7,9,10,11,12,0,0, 0x0,0x0,0, 0xc0000,0x4000,16 }, // S2M ISA
- /*19*/ { 3,4,9,0,0,0,0,0,0,0, 0xc00,0x10,16, 0xc0000,0x4000,16 }, // S2M MCA
- /*20*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // MAESTRA ISA
- /*21*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // MAESTRA PCI
- /*22*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // MAESTRA QUADRO ISA
- /*23*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048, 0x0,0x0,0 }, // MAESTRA QUADRO PCI
- /*24*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // MAESTRA PRIMARY ISA
- /*25*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // MAESTRA PRIMARY PCI
- /*26*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA 2.0 ISA
- /*27*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA 2.0 /U ISA
- /*28*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 2.0 /U PCI
- /*29*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA PRO 2.0 ISA
- /*30*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA PRO 2.0 /U ISA
- /*31*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA PRO 2.0 /U PCI
- /*32*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA MOBILE
- /*33*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // TDK DFI3600 (same as DIVA MOBILE [32])
- /*34*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // New Media ISDN (same as DIVA PRO PCMCIA [4])
- /*35*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // BT ExLane PCI (same as DIVA PRO 2.0 PCI [7])
- /*36*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // BT ExLane ISA (same as DIVA PRO 2.0 ISA [29])
- /*37*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA 2.01 S/T ISA
- /*38*/ { 3,5,7,9,10,11,12,14,15,0, 0x200,0x20,16, 0x0,0x0,0 }, // DIVA 2.01 U ISA
- /*39*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 2.01 S/T PCI
- /*40*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 2.01 U PCI
- /*41*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA MOBILE V.90
- /*42*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // TDK DFI3600 V.90 (same as DIVA MOBILE V.90 [39])
- /*43*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048, 0x0,0x0,0 }, // DIVA Server PRI-23M PCI
- /*44*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA 2.01 S/T USB
- /*45*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA CT S/T PCI
- /*46*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA CT U PCI
- /*47*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA CT Lite S/T PCI
- /*48*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA CT Lite U PCI
- /*49*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA ISDN+V.90 PC Card
- /*50*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA ISDN+V.90 PCI
- /*51*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA TA
- /*52*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048, 0x0,0x0,0 }, // MAESTRA VOICE QUADRO PCI
- /*53*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048, 0x0,0x0,0 }, // MAESTRA VOICE QUADRO PCI
- /*54*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // MAESTRA VOICE PRIMARY PCI
- /*55*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x20,2048, 0x0,0x0,0 }, // MAESTRA VOICE QUADRO PCI
- /*56*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // MAESTRA VOICE PRIMARY PCI
- /*57*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA LAN
- /*58*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 2.02 S/T PCI
- /*59*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 2.02 U PCI
- /*60*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // Diva Server BRI-2M 2.0 PCI
- /*61*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // Diva Server BRI-2F PCI
- /*62*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA 2.01 S/T USB
- /*63*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // Diva Server Voice BRI-2M 2.0 PCI
- /*64*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA 3.0 PCI
- /*65*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA CT S/T PCI V2.0
- /*66*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA Mobile V.90 PC Card
- /*67*/ { 0,0,0,0,0,0,0,0,0,0, 0x0,0x0,0, 0x0,0x0,0 }, // DIVA ISDN PC Card
- /*68*/ { 3,4,5,7,9,10,11,12,14,15, 0x0,0x8,8192, 0x0,0x0,0 }, // DIVA ISDN PC Card
-};
-#endif /*CARDTYPE_H_WANT_RESOURCE_DATA*/
-#else /*!CARDTYPE_H_WANT_DATA*/
-extern CARD_PROPERTIES CardProperties[];
-extern CARD_RESOURCE CardResource[];
-#endif /*CARDTYPE_H_WANT_DATA*/
-/*
- * all existing download files
- */
-#define CARD_DSP_CNT 5
-#define CARD_PROT_CNT 9
-#define CARD_FT_UNKNOWN 0
-#define CARD_FT_B 1
-#define CARD_FT_D 2
-#define CARD_FT_S 3
-#define CARD_FT_M 4
-#define CARD_FT_NEW_DSP_COMBIFILE 5 /* File format of new DSP code (the DSP code powered by Telindus) */
-#define CARD_FILE_NONE 0
-#define CARD_B_S 1
-#define CARD_B_P 2
-#define CARD_D_K1 3
-#define CARD_D_K2 4
-#define CARD_D_H 5
-#define CARD_D_V 6
-#define CARD_D_M 7
-#define CARD_D_F 8
-#define CARD_P_S_E 9
-#define CARD_P_S_1 10
-#define CARD_P_S_B 11
-#define CARD_P_S_F 12
-#define CARD_P_S_A 13
-#define CARD_P_S_N 14
-#define CARD_P_S_5 15
-#define CARD_P_S_J 16
-#define CARD_P_SX_E 17
-#define CARD_P_SX_1 18
-#define CARD_P_SX_B 19
-#define CARD_P_SX_F 20
-#define CARD_P_SX_A 21
-#define CARD_P_SX_N 22
-#define CARD_P_SX_5 23
-#define CARD_P_SX_J 24
-#define CARD_P_SY_E 25
-#define CARD_P_SY_1 26
-#define CARD_P_SY_B 27
-#define CARD_P_SY_F 28
-#define CARD_P_SY_A 29
-#define CARD_P_SY_N 30
-#define CARD_P_SY_5 31
-#define CARD_P_SY_J 32
-#define CARD_P_SQ_E 33
-#define CARD_P_SQ_1 34
-#define CARD_P_SQ_B 35
-#define CARD_P_SQ_F 36
-#define CARD_P_SQ_A 37
-#define CARD_P_SQ_N 38
-#define CARD_P_SQ_5 39
-#define CARD_P_SQ_J 40
-#define CARD_P_P_E 41
-#define CARD_P_P_1 42
-#define CARD_P_P_B 43
-#define CARD_P_P_F 44
-#define CARD_P_P_A 45
-#define CARD_P_P_N 46
-#define CARD_P_P_5 47
-#define CARD_P_P_J 48
-#define CARD_P_M_E 49
-#define CARD_P_M_1 50
-#define CARD_P_M_B 51
-#define CARD_P_M_F 52
-#define CARD_P_M_A 53
-#define CARD_P_M_N 54
-#define CARD_P_M_5 55
-#define CARD_P_M_J 56
-#define CARD_P_S_S 57
-#define CARD_P_SX_S 58
-#define CARD_P_SY_S 59
-#define CARD_P_SQ_S 60
-#define CARD_P_P_S 61
-#define CARD_P_M_S 62
-#define CARD_D_NEW_DSP_COMBIFILE 63
-typedef struct CARD_FILES_DATA
-{
- char *Name;
- unsigned char Type;
-}
- CARD_FILES_DATA;
-typedef struct CARD_FILES
-{
- unsigned char Boot;
- unsigned char Dsp[CARD_DSP_CNT];
- unsigned char DspTelindus;
- unsigned char Prot[CARD_PROT_CNT];
-}
- CARD_FILES;
-#if CARDTYPE_H_WANT_DATA
-#if CARDTYPE_H_WANT_FILE_DATA
-CARD_FILES_DATA CardFData[] = {
-// Filename Filetype
- 0, CARD_FT_UNKNOWN,
- "didnload.bin", CARD_FT_B,
- "diprload.bin", CARD_FT_B,
- "didiva.bin", CARD_FT_D,
- "didivapp.bin", CARD_FT_D,
- "dihscx.bin", CARD_FT_D,
- "div110.bin", CARD_FT_D,
- "dimodem.bin", CARD_FT_D,
- "difax.bin", CARD_FT_D,
- "di_etsi.bin", CARD_FT_S,
- "di_1tr6.bin", CARD_FT_S,
- "di_belg.bin", CARD_FT_S,
- "di_franc.bin", CARD_FT_S,
- "di_atel.bin", CARD_FT_S,
- "di_ni.bin", CARD_FT_S,
- "di_5ess.bin", CARD_FT_S,
- "di_japan.bin", CARD_FT_S,
- "di_etsi.sx", CARD_FT_S,
- "di_1tr6.sx", CARD_FT_S,
- "di_belg.sx", CARD_FT_S,
- "di_franc.sx", CARD_FT_S,
- "di_atel.sx", CARD_FT_S,
- "di_ni.sx", CARD_FT_S,
- "di_5ess.sx", CARD_FT_S,
- "di_japan.sx", CARD_FT_S,
- "di_etsi.sy", CARD_FT_S,
- "di_1tr6.sy", CARD_FT_S,
- "di_belg.sy", CARD_FT_S,
- "di_franc.sy", CARD_FT_S,
- "di_atel.sy", CARD_FT_S,
- "di_ni.sy", CARD_FT_S,
- "di_5ess.sy", CARD_FT_S,
- "di_japan.sy", CARD_FT_S,
- "di_etsi.sq", CARD_FT_S,
- "di_1tr6.sq", CARD_FT_S,
- "di_belg.sq", CARD_FT_S,
- "di_franc.sq", CARD_FT_S,
- "di_atel.sq", CARD_FT_S,
- "di_ni.sq", CARD_FT_S,
- "di_5ess.sq", CARD_FT_S,
- "di_japan.sq", CARD_FT_S,
- "di_etsi.p", CARD_FT_S,
- "di_1tr6.p", CARD_FT_S,
- "di_belg.p", CARD_FT_S,
- "di_franc.p", CARD_FT_S,
- "di_atel.p", CARD_FT_S,
- "di_ni.p", CARD_FT_S,
- "di_5ess.p", CARD_FT_S,
- "di_japan.p", CARD_FT_S,
- "di_etsi.sm", CARD_FT_M,
- "di_1tr6.sm", CARD_FT_M,
- "di_belg.sm", CARD_FT_M,
- "di_franc.sm", CARD_FT_M,
- "di_atel.sm", CARD_FT_M,
- "di_ni.sm", CARD_FT_M,
- "di_5ess.sm", CARD_FT_M,
- "di_japan.sm", CARD_FT_M,
- "di_swed.bin", CARD_FT_S,
- "di_swed.sx", CARD_FT_S,
- "di_swed.sy", CARD_FT_S,
- "di_swed.sq", CARD_FT_S,
- "di_swed.p", CARD_FT_S,
- "di_swed.sm", CARD_FT_M,
- "didspdld.bin", CARD_FT_NEW_DSP_COMBIFILE
-};
-CARD_FILES CardFiles[] =
-{
- { /* CARD_UNKNOWN */
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE
- },
- { /* CARD_DIVA */
- CARD_FILE_NONE,
- CARD_D_K1, CARD_D_H, CARD_D_V, CARD_FILE_NONE, CARD_D_F,
- CARD_D_NEW_DSP_COMBIFILE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE
- },
- { /* CARD_PRO */
- CARD_FILE_NONE,
- CARD_D_K2, CARD_D_H, CARD_D_V, CARD_D_M, CARD_D_F,
- CARD_D_NEW_DSP_COMBIFILE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE
- },
- { /* CARD_PICO */
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE
- },
- { /* CARD_S */
- CARD_B_S,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_P_S_E, CARD_P_S_1, CARD_P_S_B, CARD_P_S_F,
- CARD_P_S_A, CARD_P_S_N, CARD_P_S_5, CARD_P_S_J,
- CARD_P_S_S
- },
- { /* CARD_SX */
- CARD_B_S,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_P_SX_E, CARD_P_SX_1, CARD_P_SX_B, CARD_P_SX_F,
- CARD_P_SX_A, CARD_P_SX_N, CARD_P_SX_5, CARD_P_SX_J,
- CARD_P_SX_S
- },
- { /* CARD_SXN */
- CARD_B_S,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_P_SY_E, CARD_P_SY_1, CARD_P_SY_B, CARD_P_SY_F,
- CARD_P_SY_A, CARD_P_SY_N, CARD_P_SY_5, CARD_P_SY_J,
- CARD_P_SY_S
- },
- { /* CARD_SCOM */
- CARD_B_S,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_P_SY_E, CARD_P_SY_1, CARD_P_SY_B, CARD_P_SY_F,
- CARD_P_SY_A, CARD_P_SY_N, CARD_P_SY_5, CARD_P_SY_J,
- CARD_P_SY_S
- },
- { /* CARD_QUAD */
- CARD_B_S,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_P_SQ_E, CARD_P_SQ_1, CARD_P_SQ_B, CARD_P_SQ_F,
- CARD_P_SQ_A, CARD_P_SQ_N, CARD_P_SQ_5, CARD_P_SQ_J,
- CARD_P_SQ_S
- },
- { /* CARD_PR */
- CARD_B_P,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_P_P_E, CARD_P_P_1, CARD_P_P_B, CARD_P_P_F,
- CARD_P_P_A, CARD_P_P_N, CARD_P_P_5, CARD_P_P_J,
- CARD_P_P_S
- },
- { /* CARD_MAE */
- CARD_FILE_NONE,
- CARD_D_K2, CARD_D_H, CARD_D_V, CARD_D_M, CARD_D_F,
- CARD_D_NEW_DSP_COMBIFILE,
- CARD_P_M_E, CARD_P_M_1, CARD_P_M_B, CARD_P_M_F,
- CARD_P_M_A, CARD_P_M_N, CARD_P_M_5, CARD_P_M_J,
- CARD_P_M_S
- },
- { /* CARD_MAEQ */ /* currently not supported */
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE
- },
- { /* CARD_MAEP */ /* currently not supported */
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE, CARD_FILE_NONE,
- CARD_FILE_NONE
- }
-};
-#endif /*CARDTYPE_H_WANT_FILE_DATA*/
-#else /*!CARDTYPE_H_WANT_DATA*/
-extern CARD_FILES_DATA CardFData[];
-extern CARD_FILES CardFiles[];
-#endif /*CARDTYPE_H_WANT_DATA*/
-#endif /* _CARDTYPE_H_ */
diff --git a/drivers/isdn/hardware/eicon/cp_vers.h b/drivers/isdn/hardware/eicon/cp_vers.h
deleted file mode 100644
index c97230c60e71..000000000000
--- a/drivers/isdn/hardware/eicon/cp_vers.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-static char diva_capi_common_code_build[] = "102-28";
diff --git a/drivers/isdn/hardware/eicon/dadapter.c b/drivers/isdn/hardware/eicon/dadapter.c
deleted file mode 100644
index 51420999418d..000000000000
--- a/drivers/isdn/hardware/eicon/dadapter.c
+++ /dev/null
@@ -1,364 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "pc.h"
-#include "debuglib.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "dadapter.h"
-/* --------------------------------------------------------------------------
- Adapter array change notification framework
- -------------------------------------------------------------------------- */
-typedef struct _didd_adapter_change_notification {
- didd_adapter_change_callback_t callback;
- void IDI_CALL_ENTITY_T *context;
-} didd_adapter_change_notification_t, \
- * IDI_CALL_ENTITY_T pdidd_adapter_change_notification_t;
-#define DIVA_DIDD_MAX_NOTIFICATIONS 256
-static didd_adapter_change_notification_t \
-NotificationTable[DIVA_DIDD_MAX_NOTIFICATIONS];
-/* --------------------------------------------------------------------------
- Array to held adapter information
- -------------------------------------------------------------------------- */
-static DESCRIPTOR HandleTable[NEW_MAX_DESCRIPTORS];
-static dword Adapters = 0; /* Number of adapters */
-/* --------------------------------------------------------------------------
- Shadow IDI_DIMAINT
- and 'shadow' debug stuff
- -------------------------------------------------------------------------- */
-static void no_printf(unsigned char *format, ...)
-{
-#ifdef EBUG
- va_list ap;
- va_start(ap, format);
- debug((format, ap));
- va_end(ap);
-#endif
-}
-
-/* -------------------------------------------------------------------------
- Portable debug Library
- ------------------------------------------------------------------------- */
-#include "debuglib.c"
-
-static DESCRIPTOR MAdapter = {IDI_DIMAINT, /* Adapter Type */
- 0x00, /* Channels */
- 0x0000, /* Features */
- (IDI_CALL)no_printf};
-/* --------------------------------------------------------------------------
- DAdapter. Only IDI clients with buffer, that is huge enough to
- get all descriptors will receive information about DAdapter
- { byte type, byte channels, word features, IDI_CALL request }
- -------------------------------------------------------------------------- */
-static void IDI_CALL_LINK_T diva_dadapter_request(ENTITY IDI_CALL_ENTITY_T *);
-static DESCRIPTOR DAdapter = {IDI_DADAPTER, /* Adapter Type */
- 0x00, /* Channels */
- 0x0000, /* Features */
- diva_dadapter_request };
-/* --------------------------------------------------------------------------
- LOCALS
- -------------------------------------------------------------------------- */
-static dword diva_register_adapter_callback(\
- didd_adapter_change_callback_t callback,
- void IDI_CALL_ENTITY_T *context);
-static void diva_remove_adapter_callback(dword handle);
-static void diva_notify_adapter_change(DESCRIPTOR *d, int removal);
-static diva_os_spin_lock_t didd_spin;
-/* --------------------------------------------------------------------------
- Should be called as first step, after driver init
- -------------------------------------------------------------------------- */
-void diva_didd_load_time_init(void) {
- memset(&HandleTable[0], 0x00, sizeof(HandleTable));
- memset(&NotificationTable[0], 0x00, sizeof(NotificationTable));
- diva_os_initialize_spin_lock(&didd_spin, "didd");
-}
-/* --------------------------------------------------------------------------
- Should be called as last step, if driver does unload
- -------------------------------------------------------------------------- */
-void diva_didd_load_time_finit(void) {
- diva_os_destroy_spin_lock(&didd_spin, "didd");
-}
-/* --------------------------------------------------------------------------
- Called in order to register new adapter in adapter array
- return adapter handle (> 0) on success
- return -1 adapter array overflow
- -------------------------------------------------------------------------- */
-static int diva_didd_add_descriptor(DESCRIPTOR *d) {
- diva_os_spin_lock_magic_t irql;
- int i;
- if (d->type == IDI_DIMAINT) {
- if (d->request) {
- MAdapter.request = d->request;
- dprintf = (DIVA_DI_PRINTF)d->request;
- diva_notify_adapter_change(&MAdapter, 0); /* Inserted */
- DBG_TRC(("DIMAINT registered, dprintf=%08x", d->request))
- } else {
- DBG_TRC(("DIMAINT removed"))
- diva_notify_adapter_change(&MAdapter, 1); /* About to remove */
- MAdapter.request = (IDI_CALL)no_printf;
- dprintf = no_printf;
- }
- return (NEW_MAX_DESCRIPTORS);
- }
- for (i = 0; i < NEW_MAX_DESCRIPTORS; i++) {
- diva_os_enter_spin_lock(&didd_spin, &irql, "didd_add");
- if (HandleTable[i].type == 0) {
- memcpy(&HandleTable[i], d, sizeof(*d));
- Adapters++;
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_add");
- diva_notify_adapter_change(d, 0); /* we have new adapter */
- DBG_TRC(("Add adapter[%d], request=%08x", (i + 1), d->request))
- return (i + 1);
- }
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_add");
- }
- DBG_ERR(("Can't add adapter, out of resources"))
- return (-1);
-}
-/* --------------------------------------------------------------------------
- Called in order to remove one registered adapter from array
- return adapter handle (> 0) on success
- return 0 on success
- -------------------------------------------------------------------------- */
-static int diva_didd_remove_descriptor(IDI_CALL request) {
- diva_os_spin_lock_magic_t irql;
- int i;
- if (request == MAdapter.request) {
- DBG_TRC(("DIMAINT removed"))
- dprintf = no_printf;
- diva_notify_adapter_change(&MAdapter, 1); /* About to remove */
- MAdapter.request = (IDI_CALL)no_printf;
- return (0);
- }
- for (i = 0; (Adapters && (i < NEW_MAX_DESCRIPTORS)); i++) {
- if (HandleTable[i].request == request) {
- diva_notify_adapter_change(&HandleTable[i], 1); /* About to remove */
- diva_os_enter_spin_lock(&didd_spin, &irql, "didd_rm");
- memset(&HandleTable[i], 0x00, sizeof(HandleTable[0]));
- Adapters--;
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_rm");
- DBG_TRC(("Remove adapter[%d], request=%08x", (i + 1), request))
- return (0);
- }
- }
- DBG_ERR(("Invalid request=%08x, can't remove adapter", request))
- return (-1);
-}
-/* --------------------------------------------------------------------------
- Read adapter array
- return 1 if not enough space to save all available adapters
- -------------------------------------------------------------------------- */
-static int diva_didd_read_adapter_array(DESCRIPTOR *buffer, int length) {
- diva_os_spin_lock_magic_t irql;
- int src, dst;
- memset(buffer, 0x00, length);
- length /= sizeof(DESCRIPTOR);
- DBG_TRC(("DIDD_Read, space = %d, Adapters = %d", length, Adapters + 2))
-
- diva_os_enter_spin_lock(&didd_spin, &irql, "didd_read");
- for (src = 0, dst = 0;
- (Adapters && (src < NEW_MAX_DESCRIPTORS) && (dst < length));
- src++) {
- if (HandleTable[src].type) {
- memcpy(&buffer[dst], &HandleTable[src], sizeof(DESCRIPTOR));
- dst++;
- }
- }
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_read");
- if (dst < length) {
- memcpy(&buffer[dst], &MAdapter, sizeof(DESCRIPTOR));
- dst++;
- } else {
- DBG_ERR(("Can't write DIMAINT. Array too small"))
- }
- if (dst < length) {
- memcpy(&buffer[dst], &DAdapter, sizeof(DESCRIPTOR));
- dst++;
- } else {
- DBG_ERR(("Can't write DADAPTER. Array too small"))
- }
- DBG_TRC(("Read %d adapters", dst))
- return (dst == length);
-}
-/* --------------------------------------------------------------------------
- DAdapter request function.
- This function does process only synchronous requests, and is used
- for reception/registration of new interfaces
- -------------------------------------------------------------------------- */
-static void IDI_CALL_LINK_T diva_dadapter_request( \
- ENTITY IDI_CALL_ENTITY_T *e) {
- IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e;
- if (e->Req) { /* We do not process it, also return error */
- e->Rc = OUT_OF_RESOURCES;
- DBG_ERR(("Can't process async request, Req=%02x", e->Req))
- return;
- }
- /*
- So, we process sync request
- */
- switch (e->Rc) {
- case IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY: {
- diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info;
- pinfo->handle = diva_register_adapter_callback( \
- (didd_adapter_change_callback_t)pinfo->callback,
- (void IDI_CALL_ENTITY_T *)pinfo->context);
- e->Rc = 0xff;
- } break;
- case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY: {
- diva_didd_adapter_notify_t *pinfo = &syncReq->didd_notify.info;
- diva_remove_adapter_callback(pinfo->handle);
- e->Rc = 0xff;
- } break;
- case IDI_SYNC_REQ_DIDD_ADD_ADAPTER: {
- diva_didd_add_adapter_t *pinfo = &syncReq->didd_add_adapter.info;
- if (diva_didd_add_descriptor((DESCRIPTOR *)pinfo->descriptor) < 0) {
- e->Rc = OUT_OF_RESOURCES;
- } else {
- e->Rc = 0xff;
- }
- } break;
- case IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER: {
- diva_didd_remove_adapter_t *pinfo = &syncReq->didd_remove_adapter.info;
- if (diva_didd_remove_descriptor((IDI_CALL)pinfo->p_request) < 0) {
- e->Rc = OUT_OF_RESOURCES;
- } else {
- e->Rc = 0xff;
- }
- } break;
- case IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY: {
- diva_didd_read_adapter_array_t *pinfo =\
- &syncReq->didd_read_adapter_array.info;
- if (diva_didd_read_adapter_array((DESCRIPTOR *)pinfo->buffer,
- (int)pinfo->length)) {
- e->Rc = OUT_OF_RESOURCES;
- } else {
- e->Rc = 0xff;
- }
- } break;
- default:
- DBG_ERR(("Can't process sync request, Req=%02x", e->Rc))
- e->Rc = OUT_OF_RESOURCES;
- }
-}
-/* --------------------------------------------------------------------------
- IDI client does register his notification function
- -------------------------------------------------------------------------- */
-static dword diva_register_adapter_callback( \
- didd_adapter_change_callback_t callback,
- void IDI_CALL_ENTITY_T *context) {
- diva_os_spin_lock_magic_t irql;
- dword i;
-
- for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) {
- diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy_add");
- if (!NotificationTable[i].callback) {
- NotificationTable[i].callback = callback;
- NotificationTable[i].context = context;
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_add");
- DBG_TRC(("Register adapter notification[%d]=%08x", i + 1, callback))
- return (i + 1);
- }
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_add");
- }
- DBG_ERR(("Can't register adapter notification, overflow"))
- return (0);
-}
-/* --------------------------------------------------------------------------
- IDI client does register his notification function
- -------------------------------------------------------------------------- */
-static void diva_remove_adapter_callback(dword handle) {
- diva_os_spin_lock_magic_t irql;
- if (handle && ((--handle) < DIVA_DIDD_MAX_NOTIFICATIONS)) {
- diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy_rm");
- NotificationTable[handle].callback = NULL;
- NotificationTable[handle].context = NULL;
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy_rm");
- DBG_TRC(("Remove adapter notification[%d]", (int)(handle + 1)))
- return;
- }
- DBG_ERR(("Can't remove adapter notification, handle=%d", handle))
- }
-/* --------------------------------------------------------------------------
- Notify all client about adapter array change
- Does suppose following behavior in the client side:
- Step 1: Redister Notification
- Step 2: Read Adapter Array
- -------------------------------------------------------------------------- */
-static void diva_notify_adapter_change(DESCRIPTOR *d, int removal) {
- int i, do_notify;
- didd_adapter_change_notification_t nfy;
- diva_os_spin_lock_magic_t irql;
- for (i = 0; i < DIVA_DIDD_MAX_NOTIFICATIONS; i++) {
- do_notify = 0;
- diva_os_enter_spin_lock(&didd_spin, &irql, "didd_nfy");
- if (NotificationTable[i].callback) {
- memcpy(&nfy, &NotificationTable[i], sizeof(nfy));
- do_notify = 1;
- }
- diva_os_leave_spin_lock(&didd_spin, &irql, "didd_nfy");
- if (do_notify) {
- (*(nfy.callback))(nfy.context, d, removal);
- }
- }
-}
-/* --------------------------------------------------------------------------
- For all systems, that are linked by Kernel Mode Linker this is ONLY one
- function thet should be exported by this device driver
- IDI clients should look for IDI_DADAPTER, and use request function
- of this adapter (sync request) in order to receive appropriate services:
- - add new adapter
- - remove existing adapter
- - add adapter array notification
- - remove adapter array notification
- (read adapter is redundant in this case)
- INPUT:
- buffer - pointer to buffer that will receive adapter array
- length - length (in bytes) of space in buffer
- OUTPUT:
- Adapter array will be written to memory described by 'buffer'
- If the last adapter seen in the returned adapter array is
- IDI_DADAPTER or if last adapter in array does have type '0', then
- it was enougth space in buffer to accommodate all available
- adapter descriptors
- *NOTE 1 (debug interface):
- The IDI adapter of type 'IDI_DIMAINT' does register as 'request'
- famous 'dprintf' function (of type DI_PRINTF, please look
- include/debuglib.c and include/debuglib.h) for details.
- So dprintf is not exported from module debug module directly,
- instead of this IDI_DIMAINT is registered.
- Module load order will receive in this case:
- 1. DIDD (this file)
- 2. DIMAINT does load and register 'IDI_DIMAINT', at this step
- DIDD should be able to get 'dprintf', save it, and
- register with DIDD by means of 'dprintf' function.
- 3. any other driver is loaded and is able to access adapter array
- and debug interface
- This approach does allow to load/unload debug interface on demand,
- and save memory, it it is necessary.
- -------------------------------------------------------------------------- */
-void IDI_CALL_LINK_T DIVA_DIDD_Read(void IDI_CALL_ENTITY_T *buffer,
- int length) {
- diva_didd_read_adapter_array(buffer, length);
-}
diff --git a/drivers/isdn/hardware/eicon/dadapter.h b/drivers/isdn/hardware/eicon/dadapter.h
deleted file mode 100644
index 5540f46a5be3..000000000000
--- a/drivers/isdn/hardware/eicon/dadapter.h
+++ /dev/null
@@ -1,34 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DIDD_DADAPTER_INC__
-#define __DIVA_DIDD_DADAPTER_INC__
-
-void diva_didd_load_time_init(void);
-void diva_didd_load_time_finit(void);
-
-#define NEW_MAX_DESCRIPTORS 64
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c
deleted file mode 100644
index 301788115c4f..000000000000
--- a/drivers/isdn/hardware/eicon/debug.c
+++ /dev/null
@@ -1,2128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include "platform.h"
-#include "pc.h"
-#include "di_defs.h"
-#include "debug_if.h"
-#include "divasync.h"
-#include "kst_ifc.h"
-#include "maintidi.h"
-#include "man_defs.h"
-
-/*
- LOCALS
-*/
-#define DBG_MAGIC (0x47114711L)
-
-static void DI_register(void *arg);
-static void DI_deregister(pDbgHandle hDbg);
-static void DI_format(int do_lock, word id, int type, char *format, va_list argument_list);
-static void DI_format_locked(word id, int type, char *format, va_list argument_list);
-static void DI_format_old(word id, char *format, va_list ap) { }
-static void DiProcessEventLog(unsigned short id, unsigned long msgID, va_list ap) { }
-static void single_p(byte *P, word *PLength, byte Id);
-static void diva_maint_xdi_cb(ENTITY *e);
-static word SuperTraceCreateReadReq(byte *P, const char *path);
-static int diva_mnt_cmp_nmbr(const char *nmbr);
-static void diva_free_dma_descriptor(IDI_CALL request, int nr);
-static int diva_get_dma_descriptor(IDI_CALL request, dword *dma_magic);
-void diva_mnt_internal_dprintf(dword drv_id, dword type, char *p, ...);
-
-static dword MaxDumpSize = 256;
-static dword MaxXlogSize = 2 + 128;
-static char TraceFilter[DIVA_MAX_SELECTIVE_FILTER_LENGTH + 1];
-static int TraceFilterIdent = -1;
-static int TraceFilterChannel = -1;
-
-typedef struct _diva_maint_client {
- dword sec;
- dword usec;
- pDbgHandle hDbg;
- char drvName[128];
- dword dbgMask;
- dword last_dbgMask;
- IDI_CALL request;
- _DbgHandle_ Dbg;
- int logical;
- int channels;
- diva_strace_library_interface_t *pIdiLib;
- BUFFERS XData;
- char xbuffer[2048 + 512];
- byte *pmem;
- int request_pending;
- int dma_handle;
-} diva_maint_client_t;
-static diva_maint_client_t clients[MAX_DESCRIPTORS];
-
-static void diva_change_management_debug_mask(diva_maint_client_t *pC, dword old_mask);
-
-static void diva_maint_error(void *user_context,
- diva_strace_library_interface_t *hLib,
- int Adapter,
- int error,
- const char *file,
- int line);
-static void diva_maint_state_change_notify(void *user_context,
- diva_strace_library_interface_t *hLib,
- int Adapter,
- diva_trace_line_state_t *channel,
- int notify_subject);
-static void diva_maint_trace_notify(void *user_context,
- diva_strace_library_interface_t *hLib,
- int Adapter,
- void *xlog_buffer,
- int length);
-
-
-
-typedef struct MSG_QUEUE {
- dword Size; /* total size of queue (constant) */
- byte *Base; /* lowest address (constant) */
- byte *High; /* Base + Size (constant) */
- byte *Head; /* first message in queue (if any) */
- byte *Tail; /* first free position */
- byte *Wrap; /* current wraparound position */
- dword Count; /* current no of bytes in queue */
-} MSG_QUEUE;
-
-typedef struct MSG_HEAD {
- volatile dword Size; /* size of data following MSG_HEAD */
-#define MSG_INCOMPLETE 0x8000 /* ored to Size until queueCompleteMsg */
-} MSG_HEAD;
-
-#define queueCompleteMsg(p) do { ((MSG_HEAD *)p - 1)->Size &= ~MSG_INCOMPLETE; } while (0)
-#define queueCount(q) ((q)->Count)
-#define MSG_NEED(size) \
- ((sizeof(MSG_HEAD) + size + sizeof(dword) - 1) & ~(sizeof(dword) - 1))
-
-static void queueInit(MSG_QUEUE *Q, byte *Buffer, dword sizeBuffer) {
- Q->Size = sizeBuffer;
- Q->Base = Q->Head = Q->Tail = Buffer;
- Q->High = Buffer + sizeBuffer;
- Q->Wrap = NULL;
- Q->Count = 0;
-}
-
-static byte *queueAllocMsg(MSG_QUEUE *Q, word size) {
- /* Allocate 'size' bytes at tail of queue which will be filled later
- * directly with callers own message header info and/or message.
- * An 'alloced' message is marked incomplete by oring the 'Size' field
- * with MSG_INCOMPLETE.
- * This must be reset via queueCompleteMsg() after the message is filled.
- * As long as a message is marked incomplete queuePeekMsg() will return
- * a 'queue empty' condition when it reaches such a message. */
-
- MSG_HEAD *Msg;
- word need = MSG_NEED(size);
-
- if (Q->Tail == Q->Head) {
- if (Q->Wrap || need > Q->Size) {
- return NULL; /* full */
- }
- goto alloc; /* empty */
- }
-
- if (Q->Tail > Q->Head) {
- if (Q->Tail + need <= Q->High) goto alloc; /* append */
- if (Q->Base + need > Q->Head) {
- return NULL; /* too much */
- }
- /* wraparound the queue (but not the message) */
- Q->Wrap = Q->Tail;
- Q->Tail = Q->Base;
- goto alloc;
- }
-
- if (Q->Tail + need > Q->Head) {
- return NULL; /* too much */
- }
-
-alloc:
- Msg = (MSG_HEAD *)Q->Tail;
-
- Msg->Size = size | MSG_INCOMPLETE;
-
- Q->Tail += need;
- Q->Count += size;
-
-
-
- return ((byte *)(Msg + 1));
-}
-
-static void queueFreeMsg(MSG_QUEUE *Q) {
-/* Free the message at head of queue */
-
- word size = ((MSG_HEAD *)Q->Head)->Size & ~MSG_INCOMPLETE;
-
- Q->Head += MSG_NEED(size);
- Q->Count -= size;
-
- if (Q->Wrap) {
- if (Q->Head >= Q->Wrap) {
- Q->Head = Q->Base;
- Q->Wrap = NULL;
- }
- } else if (Q->Head >= Q->Tail) {
- Q->Head = Q->Tail = Q->Base;
- }
-}
-
-static byte *queuePeekMsg(MSG_QUEUE *Q, word *size) {
- /* Show the first valid message in queue BUT DON'T free the message.
- * After looking on the message contents it can be freed queueFreeMsg()
- * or simply remain in message queue. */
-
- MSG_HEAD *Msg = (MSG_HEAD *)Q->Head;
-
- if (((byte *)Msg == Q->Tail && !Q->Wrap) ||
- (Msg->Size & MSG_INCOMPLETE)) {
- return NULL;
- } else {
- *size = Msg->Size;
- return ((byte *)(Msg + 1));
- }
-}
-
-/*
- Message queue header
-*/
-static MSG_QUEUE *dbg_queue;
-static byte *dbg_base;
-static int external_dbg_queue;
-static diva_os_spin_lock_t dbg_q_lock;
-static diva_os_spin_lock_t dbg_adapter_lock;
-static int dbg_q_busy;
-static volatile dword dbg_sequence;
-
-/*
- INTERFACE:
- Initialize run time queue structures.
- base: base of the message queue
- length: length of the message queue
- do_init: perfor queue reset
-
- return: zero on success, -1 on error
-*/
-int diva_maint_init(byte *base, unsigned long length, int do_init) {
- if (dbg_queue || (!base) || (length < (4096 * 4))) {
- return (-1);
- }
-
- TraceFilter[0] = 0;
- TraceFilterIdent = -1;
- TraceFilterChannel = -1;
-
- dbg_base = base;
-
- *(dword *)base = (dword)DBG_MAGIC; /* Store Magic */
- base += sizeof(dword);
- length -= sizeof(dword);
-
- *(dword *)base = 2048; /* Extension Field Length */
- base += sizeof(dword);
- length -= sizeof(dword);
-
- strcpy(base, "KERNEL MODE BUFFER\n");
- base += 2048;
- length -= 2048;
-
- *(dword *)base = 0; /* Terminate extension */
- base += sizeof(dword);
- length -= sizeof(dword);
-
- *(void **)base = (void *)(base + sizeof(void *)); /* Store Base */
- base += sizeof(void *);
- length -= sizeof(void *);
-
- dbg_queue = (MSG_QUEUE *)base;
- queueInit(dbg_queue, base + sizeof(MSG_QUEUE), length - sizeof(MSG_QUEUE) - 512);
- external_dbg_queue = 0;
-
- if (!do_init) {
- external_dbg_queue = 1; /* memory was located on the external device */
- }
-
-
- if (diva_os_initialize_spin_lock(&dbg_q_lock, "dbg_init")) {
- dbg_queue = NULL;
- dbg_base = NULL;
- external_dbg_queue = 0;
- return (-1);
- }
-
- if (diva_os_initialize_spin_lock(&dbg_adapter_lock, "dbg_init")) {
- diva_os_destroy_spin_lock(&dbg_q_lock, "dbg_init");
- dbg_queue = NULL;
- dbg_base = NULL;
- external_dbg_queue = 0;
- return (-1);
- }
-
- return (0);
-}
-
-/*
- INTERFACE:
- Finit at unload time
- return address of internal queue or zero if queue
- was external
-*/
-void *diva_maint_finit(void) {
- void *ret = (void *)dbg_base;
- int i;
-
- dbg_queue = NULL;
- dbg_base = NULL;
-
- if (ret) {
- diva_os_destroy_spin_lock(&dbg_q_lock, "dbg_finit");
- diva_os_destroy_spin_lock(&dbg_adapter_lock, "dbg_finit");
- }
-
- if (external_dbg_queue) {
- ret = NULL;
- }
- external_dbg_queue = 0;
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- if (clients[i].pmem) {
- diva_os_free(0, clients[i].pmem);
- }
- }
-
- return (ret);
-}
-
-/*
- INTERFACE:
- Return amount of messages in debug queue
-*/
-dword diva_dbg_q_length(void) {
- return (dbg_queue ? queueCount(dbg_queue) : 0);
-}
-
-/*
- INTERFACE:
- Lock message queue and return the pointer to the first
- entry.
-*/
-diva_dbg_entry_head_t *diva_maint_get_message(word *size,
- diva_os_spin_lock_magic_t *old_irql) {
- diva_dbg_entry_head_t *pmsg = NULL;
-
- diva_os_enter_spin_lock(&dbg_q_lock, old_irql, "read");
- if (dbg_q_busy) {
- diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_busy");
- return NULL;
- }
- dbg_q_busy = 1;
-
- if (!(pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, size))) {
- dbg_q_busy = 0;
- diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_empty");
- }
-
- return (pmsg);
-}
-
-/*
- INTERFACE:
- acknowledge last message and unlock queue
-*/
-void diva_maint_ack_message(int do_release,
- diva_os_spin_lock_magic_t *old_irql) {
- if (!dbg_q_busy) {
- return;
- }
- if (do_release) {
- queueFreeMsg(dbg_queue);
- }
- dbg_q_busy = 0;
- diva_os_leave_spin_lock(&dbg_q_lock, old_irql, "read_ack");
-}
-
-
-/*
- INTERFACE:
- PRT COMP function used to register
- with MAINT adapter or log in compatibility
- mode in case older driver version is connected too
-*/
-void diva_maint_prtComp(char *format, ...) {
- void *hDbg;
- va_list ap;
-
- if (!format)
- return;
-
- va_start(ap, format);
-
- /*
- register to new log driver functions
- */
- if ((format[0] == 0) && ((unsigned char)format[1] == 255)) {
- hDbg = va_arg(ap, void *); /* ptr to DbgHandle */
- DI_register(hDbg);
- }
-
- va_end(ap);
-}
-
-static void DI_register(void *arg) {
- diva_os_spin_lock_magic_t old_irql;
- dword sec, usec;
- pDbgHandle hDbg;
- int id, free_id = -1, best_id = 0;
-
- diva_os_get_time(&sec, &usec);
-
- hDbg = (pDbgHandle)arg;
- /*
- Check for bad args, specially for the old obsolete debug handle
- */
- if ((hDbg == NULL) ||
- ((hDbg->id == 0) && (((_OldDbgHandle_ *)hDbg)->id == -1)) ||
- (hDbg->Registered != 0)) {
- return;
- }
-
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "register");
-
- for (id = 1; id < ARRAY_SIZE(clients); id++) {
- if (clients[id].hDbg == hDbg) {
- /*
- driver already registered
- */
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
- return;
- }
- if (clients[id].hDbg) { /* slot is busy */
- continue;
- }
- free_id = id;
- if (!strcmp(clients[id].drvName, hDbg->drvName)) {
- /*
- This driver was already registered with this name
- and slot is still free - reuse it
- */
- best_id = 1;
- break;
- }
- if (!clients[id].hDbg) { /* slot is busy */
- break;
- }
- }
-
- if (free_id != -1) {
- diva_dbg_entry_head_t *pmsg = NULL;
- int len;
- char tmp[256];
- word size;
-
- /*
- Register new driver with id == free_id
- */
- clients[free_id].hDbg = hDbg;
- clients[free_id].sec = sec;
- clients[free_id].usec = usec;
- strcpy(clients[free_id].drvName, hDbg->drvName);
-
- clients[free_id].dbgMask = hDbg->dbgMask;
- if (best_id) {
- hDbg->dbgMask |= clients[free_id].last_dbgMask;
- } else {
- clients[free_id].last_dbgMask = 0;
- }
-
- hDbg->Registered = DBG_HANDLE_REG_NEW;
- hDbg->id = (byte)free_id;
- hDbg->dbg_end = DI_deregister;
- hDbg->dbg_prt = DI_format_locked;
- hDbg->dbg_ev = DiProcessEventLog;
- hDbg->dbg_irq = DI_format_locked;
- if (hDbg->Version > 0) {
- hDbg->dbg_old = DI_format_old;
- }
- hDbg->next = (pDbgHandle)DBG_MAGIC;
-
- /*
- Log driver register, MAINT driver ID is '0'
- */
- len = sprintf(tmp, "DIMAINT - drv # %d = '%s' registered",
- free_id, hDbg->drvName);
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)(len + 1 + sizeof(*pmsg))))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
-
- if (pmsg) {
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_STRING;
- pmsg->dli = DLI_REG;
- pmsg->drv_id = 0; /* id 0 - DIMAINT */
- pmsg->di_cpu = 0;
- pmsg->data_length = len + 1;
-
- memcpy(&pmsg[1], tmp, len + 1);
- queueCompleteMsg(pmsg);
- diva_maint_wakeup_read();
- }
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-}
-
-static void DI_deregister(pDbgHandle hDbg) {
- diva_os_spin_lock_magic_t old_irql, old_irql1;
- dword sec, usec;
- int i;
- word size;
- byte *pmem = NULL;
-
- diva_os_get_time(&sec, &usec);
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "read");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read");
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- if (clients[i].hDbg == hDbg) {
- diva_dbg_entry_head_t *pmsg;
- char tmp[256];
- int len;
-
- clients[i].hDbg = NULL;
-
- hDbg->id = -1;
- hDbg->dbgMask = 0;
- hDbg->dbg_end = NULL;
- hDbg->dbg_prt = NULL;
- hDbg->dbg_irq = NULL;
- if (hDbg->Version > 0)
- hDbg->dbg_old = NULL;
- hDbg->Registered = 0;
- hDbg->next = NULL;
-
- if (clients[i].pIdiLib) {
- (*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib);
- clients[i].pIdiLib = NULL;
-
- pmem = clients[i].pmem;
- clients[i].pmem = NULL;
- }
-
- /*
- Log driver register, MAINT driver ID is '0'
- */
- len = sprintf(tmp, "DIMAINT - drv # %d = '%s' de-registered",
- i, hDbg->drvName);
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)(len + 1 + sizeof(*pmsg))))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
-
- if (pmsg) {
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_STRING;
- pmsg->dli = DLI_REG;
- pmsg->drv_id = 0; /* id 0 - DIMAINT */
- pmsg->di_cpu = 0;
- pmsg->data_length = len + 1;
-
- memcpy(&pmsg[1], tmp, len + 1);
- queueCompleteMsg(pmsg);
- diva_maint_wakeup_read();
- }
-
- break;
- }
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_ack");
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "read_ack");
-
- if (pmem) {
- diva_os_free(0, pmem);
- }
-}
-
-static void DI_format_locked(unsigned short id,
- int type,
- char *format,
- va_list argument_list) {
- DI_format(1, id, type, format, argument_list);
-}
-
-static void DI_format(int do_lock,
- unsigned short id,
- int type,
- char *format,
- va_list ap) {
- diva_os_spin_lock_magic_t old_irql;
- dword sec, usec;
- diva_dbg_entry_head_t *pmsg = NULL;
- dword length;
- word size;
- static char fmtBuf[MSG_FRAME_MAX_SIZE + sizeof(*pmsg) + 1];
- char *data;
- unsigned short code;
-
- if (diva_os_in_irq()) {
- dbg_sequence++;
- return;
- }
-
- if ((!format) ||
- ((TraceFilter[0] != 0) && ((TraceFilterIdent < 0) || (TraceFilterChannel < 0)))) {
- return;
- }
-
-
-
- diva_os_get_time(&sec, &usec);
-
- if (do_lock) {
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "format");
- }
-
- switch (type) {
- case DLI_MXLOG:
- case DLI_BLK:
- case DLI_SEND:
- case DLI_RECV:
- if (!(length = va_arg(ap, unsigned long))) {
- break;
- }
- if (length > MaxDumpSize) {
- length = MaxDumpSize;
- }
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)length + sizeof(*pmsg)))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
- if (pmsg) {
- memcpy(&pmsg[1], format, length);
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_BINARY;
- pmsg->dli = type; /* DLI_XXX */
- pmsg->drv_id = id; /* driver MAINT id */
- pmsg->di_cpu = 0;
- pmsg->data_length = length;
- queueCompleteMsg(pmsg);
- }
- break;
-
- case DLI_XLOG: {
- byte *p;
- data = va_arg(ap, char *);
- code = (unsigned short)va_arg(ap, unsigned int);
- length = (unsigned long)va_arg(ap, unsigned int);
-
- if (length > MaxXlogSize)
- length = MaxXlogSize;
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)length + sizeof(*pmsg) + 2))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
- if (pmsg) {
- p = (byte *)&pmsg[1];
- p[0] = (char)(code);
- p[1] = (char)(code >> 8);
- if (data && length) {
- memcpy(&p[2], &data[0], length);
- }
- length += 2;
-
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_BINARY;
- pmsg->dli = type; /* DLI_XXX */
- pmsg->drv_id = id; /* driver MAINT id */
- pmsg->di_cpu = 0;
- pmsg->data_length = length;
- queueCompleteMsg(pmsg);
- }
- } break;
-
- case DLI_LOG:
- case DLI_FTL:
- case DLI_ERR:
- case DLI_TRC:
- case DLI_REG:
- case DLI_MEM:
- case DLI_SPL:
- case DLI_IRP:
- case DLI_TIM:
- case DLI_TAPI:
- case DLI_NDIS:
- case DLI_CONN:
- case DLI_STAT:
- case DLI_PRV0:
- case DLI_PRV1:
- case DLI_PRV2:
- case DLI_PRV3:
- if ((length = (unsigned long)vsprintf(&fmtBuf[0], format, ap)) > 0) {
- length += (sizeof(*pmsg) + 1);
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)length))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
-
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_STRING;
- pmsg->dli = type; /* DLI_XXX */
- pmsg->drv_id = id; /* driver MAINT id */
- pmsg->di_cpu = 0;
- pmsg->data_length = length - sizeof(*pmsg);
-
- memcpy(&pmsg[1], fmtBuf, pmsg->data_length);
- queueCompleteMsg(pmsg);
- }
- break;
-
- } /* switch type */
-
-
- if (queueCount(dbg_queue)) {
- diva_maint_wakeup_read();
- }
-
- if (do_lock) {
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "format");
- }
-}
-
-/*
- Write driver ID and driver revision to callers buffer
-*/
-int diva_get_driver_info(dword id, byte *data, int data_length) {
- diva_os_spin_lock_magic_t old_irql;
- byte *p = data;
- int to_copy;
-
- if (!data || !id || (data_length < 17) ||
- (id >= ARRAY_SIZE(clients))) {
- return (-1);
- }
-
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
- if (clients[id].hDbg) {
- *p++ = 1;
- *p++ = (byte)clients[id].sec; /* save seconds */
- *p++ = (byte)(clients[id].sec >> 8);
- *p++ = (byte)(clients[id].sec >> 16);
- *p++ = (byte)(clients[id].sec >> 24);
-
- *p++ = (byte)(clients[id].usec / 1000); /* save mseconds */
- *p++ = (byte)((clients[id].usec / 1000) >> 8);
- *p++ = (byte)((clients[id].usec / 1000) >> 16);
- *p++ = (byte)((clients[id].usec / 1000) >> 24);
-
- data_length -= 9;
-
- if ((to_copy = min(strlen(clients[id].drvName), (size_t)(data_length - 1)))) {
- memcpy(p, clients[id].drvName, to_copy);
- p += to_copy;
- data_length -= to_copy;
- if ((data_length >= 4) && clients[id].hDbg->drvTag[0]) {
- *p++ = '(';
- data_length -= 1;
- if ((to_copy = min(strlen(clients[id].hDbg->drvTag), (size_t)(data_length - 2)))) {
- memcpy(p, clients[id].hDbg->drvTag, to_copy);
- p += to_copy;
- data_length -= to_copy;
- if (data_length >= 2) {
- *p++ = ')';
- data_length--;
- }
- }
- }
- }
- }
- *p++ = 0;
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
- return (p - data);
-}
-
-int diva_get_driver_dbg_mask(dword id, byte *data) {
- diva_os_spin_lock_magic_t old_irql;
- int ret = -1;
-
- if (!data || !id || (id >= ARRAY_SIZE(clients))) {
- return (-1);
- }
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
- if (clients[id].hDbg) {
- ret = 4;
- *data++ = (byte)(clients[id].hDbg->dbgMask);
- *data++ = (byte)(clients[id].hDbg->dbgMask >> 8);
- *data++ = (byte)(clients[id].hDbg->dbgMask >> 16);
- *data++ = (byte)(clients[id].hDbg->dbgMask >> 24);
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "driver info");
-
- return (ret);
-}
-
-int diva_set_driver_dbg_mask(dword id, dword mask) {
- diva_os_spin_lock_magic_t old_irql, old_irql1;
- int ret = -1;
-
-
- if (!id || (id >= ARRAY_SIZE(clients))) {
- return (-1);
- }
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "dbg mask");
-
- if (clients[id].hDbg) {
- dword old_mask = clients[id].hDbg->dbgMask;
- mask &= 0x7fffffff;
- clients[id].hDbg->dbgMask = mask;
- clients[id].last_dbgMask = (clients[id].hDbg->dbgMask | clients[id].dbgMask);
- ret = 4;
- diva_change_management_debug_mask(&clients[id], old_mask);
- }
-
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "dbg mask");
-
- if (clients[id].request_pending) {
- clients[id].request_pending = 0;
- (*(clients[id].request))((ENTITY *)(*(clients[id].pIdiLib->DivaSTraceGetHandle))(clients[id].pIdiLib->hLib));
- }
-
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
-
- return (ret);
-}
-
-static int diva_get_idi_adapter_info(IDI_CALL request, dword *serial, dword *logical) {
- IDI_SYNC_REQ sync_req;
-
- sync_req.xdi_logical_adapter_number.Req = 0;
- sync_req.xdi_logical_adapter_number.Rc = IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER;
- (*request)((ENTITY *)&sync_req);
- *logical = sync_req.xdi_logical_adapter_number.info.logical_adapter_number;
-
- sync_req.GetSerial.Req = 0;
- sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL;
- sync_req.GetSerial.serial = 0;
- (*request)((ENTITY *)&sync_req);
- *serial = sync_req.GetSerial.serial;
-
- return (0);
-}
-
-/*
- Register XDI adapter as MAINT compatible driver
-*/
-void diva_mnt_add_xdi_adapter(const DESCRIPTOR *d) {
- diva_os_spin_lock_magic_t old_irql, old_irql1;
- dword sec, usec, logical, serial, org_mask;
- int id, free_id = -1;
- char tmp[128];
- diva_dbg_entry_head_t *pmsg = NULL;
- int len;
- word size;
- byte *pmem;
-
- diva_os_get_time(&sec, &usec);
- diva_get_idi_adapter_info(d->request, &serial, &logical);
- if (serial & 0xff000000) {
- sprintf(tmp, "ADAPTER:%d SN:%u-%d",
- (int)logical,
- serial & 0x00ffffff,
- (byte)(((serial & 0xff000000) >> 24) + 1));
- } else {
- sprintf(tmp, "ADAPTER:%d SN:%u", (int)logical, serial);
- }
-
- if (!(pmem = diva_os_malloc(0, DivaSTraceGetMemotyRequirement(d->channels)))) {
- return;
- }
- memset(pmem, 0x00, DivaSTraceGetMemotyRequirement(d->channels));
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "register");
-
- for (id = 1; id < ARRAY_SIZE(clients); id++) {
- if (clients[id].hDbg && (clients[id].request == d->request)) {
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
- diva_os_free(0, pmem);
- return;
- }
- if (clients[id].hDbg) { /* slot is busy */
- continue;
- }
- if (free_id < 0) {
- free_id = id;
- }
- if (!strcmp(clients[id].drvName, tmp)) {
- /*
- This driver was already registered with this name
- and slot is still free - reuse it
- */
- free_id = id;
- break;
- }
- }
-
- if (free_id < 0) {
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
- diva_os_free(0, pmem);
- return;
- }
-
- id = free_id;
- clients[id].request = d->request;
- clients[id].request_pending = 0;
- clients[id].hDbg = &clients[id].Dbg;
- clients[id].sec = sec;
- clients[id].usec = usec;
- strcpy(clients[id].drvName, tmp);
- strcpy(clients[id].Dbg.drvName, tmp);
- clients[id].Dbg.drvTag[0] = 0;
- clients[id].logical = (int)logical;
- clients[id].channels = (int)d->channels;
- clients[id].dma_handle = -1;
-
- clients[id].Dbg.dbgMask = 0;
- clients[id].dbgMask = clients[id].Dbg.dbgMask;
- if (id) {
- clients[id].Dbg.dbgMask |= clients[free_id].last_dbgMask;
- } else {
- clients[id].last_dbgMask = 0;
- }
- clients[id].Dbg.Registered = DBG_HANDLE_REG_NEW;
- clients[id].Dbg.id = (byte)id;
- clients[id].Dbg.dbg_end = DI_deregister;
- clients[id].Dbg.dbg_prt = DI_format_locked;
- clients[id].Dbg.dbg_ev = DiProcessEventLog;
- clients[id].Dbg.dbg_irq = DI_format_locked;
- clients[id].Dbg.next = (pDbgHandle)DBG_MAGIC;
-
- {
- diva_trace_library_user_interface_t diva_maint_user_ifc = { &clients[id],
- diva_maint_state_change_notify,
- diva_maint_trace_notify,
- diva_maint_error };
-
- /*
- Attach to adapter management interface
- */
- if ((clients[id].pIdiLib =
- DivaSTraceLibraryCreateInstance((int)logical, &diva_maint_user_ifc, pmem))) {
- if (((*(clients[id].pIdiLib->DivaSTraceLibraryStart))(clients[id].pIdiLib->hLib))) {
- diva_mnt_internal_dprintf(0, DLI_ERR, "Adapter(%d) Start failed", (int)logical);
- (*(clients[id].pIdiLib->DivaSTraceLibraryFinit))(clients[id].pIdiLib->hLib);
- clients[id].pIdiLib = NULL;
- }
- } else {
- diva_mnt_internal_dprintf(0, DLI_ERR, "A(%d) management init failed", (int)logical);
- }
- }
-
- if (!clients[id].pIdiLib) {
- clients[id].request = NULL;
- clients[id].request_pending = 0;
- clients[id].hDbg = NULL;
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
- diva_os_free(0, pmem);
- return;
- }
-
- /*
- Log driver register, MAINT driver ID is '0'
- */
- len = sprintf(tmp, "DIMAINT - drv # %d = '%s' registered",
- id, clients[id].Dbg.drvName);
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)(len + 1 + sizeof(*pmsg))))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
-
- if (pmsg) {
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_STRING;
- pmsg->dli = DLI_REG;
- pmsg->drv_id = 0; /* id 0 - DIMAINT */
- pmsg->di_cpu = 0;
- pmsg->data_length = len + 1;
-
- memcpy(&pmsg[1], tmp, len + 1);
- queueCompleteMsg(pmsg);
- diva_maint_wakeup_read();
- }
-
- org_mask = clients[id].Dbg.dbgMask;
- clients[id].Dbg.dbgMask = 0;
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "register");
-
- if (clients[id].request_pending) {
- clients[id].request_pending = 0;
- (*(clients[id].request))((ENTITY *)(*(clients[id].pIdiLib->DivaSTraceGetHandle))(clients[id].pIdiLib->hLib));
- }
-
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "register");
-
- diva_set_driver_dbg_mask(id, org_mask);
-}
-
-/*
- De-Register XDI adapter
-*/
-void diva_mnt_remove_xdi_adapter(const DESCRIPTOR *d) {
- diva_os_spin_lock_magic_t old_irql, old_irql1;
- dword sec, usec;
- int i;
- word size;
- byte *pmem = NULL;
-
- diva_os_get_time(&sec, &usec);
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "read");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read");
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- if (clients[i].hDbg && (clients[i].request == d->request)) {
- diva_dbg_entry_head_t *pmsg;
- char tmp[256];
- int len;
-
- if (clients[i].pIdiLib) {
- (*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib);
- clients[i].pIdiLib = NULL;
-
- pmem = clients[i].pmem;
- clients[i].pmem = NULL;
- }
-
- clients[i].hDbg = NULL;
- clients[i].request_pending = 0;
- if (clients[i].dma_handle >= 0) {
- /*
- Free DMA handle
- */
- diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle);
- clients[i].dma_handle = -1;
- }
- clients[i].request = NULL;
-
- /*
- Log driver register, MAINT driver ID is '0'
- */
- len = sprintf(tmp, "DIMAINT - drv # %d = '%s' de-registered",
- i, clients[i].Dbg.drvName);
-
- memset(&clients[i].Dbg, 0x00, sizeof(clients[i].Dbg));
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)(len + 1 + sizeof(*pmsg))))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
-
- if (pmsg) {
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_STRING;
- pmsg->dli = DLI_REG;
- pmsg->drv_id = 0; /* id 0 - DIMAINT */
- pmsg->di_cpu = 0;
- pmsg->data_length = len + 1;
-
- memcpy(&pmsg[1], tmp, len + 1);
- queueCompleteMsg(pmsg);
- diva_maint_wakeup_read();
- }
-
- break;
- }
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_ack");
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "read_ack");
-
- if (pmem) {
- diva_os_free(0, pmem);
- }
-}
-
-/* ----------------------------------------------------------------
- Low level interface for management interface client
- ---------------------------------------------------------------- */
-/*
- Return handle to client structure
-*/
-void *SuperTraceOpenAdapter(int AdapterNumber) {
- int i;
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- if (clients[i].hDbg && clients[i].request && (clients[i].logical == AdapterNumber)) {
- return (&clients[i]);
- }
- }
-
- return NULL;
-}
-
-int SuperTraceCloseAdapter(void *AdapterHandle) {
- return (0);
-}
-
-int SuperTraceReadRequest(void *AdapterHandle, const char *name, byte *data) {
- diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
- if (pC && pC->pIdiLib && pC->request) {
- ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
- byte *xdata = (byte *)&pC->xbuffer[0];
- char tmp = 0;
- word length;
-
- if (!strcmp(name, "\\")) { /* Read ROOT */
- name = &tmp;
- }
- length = SuperTraceCreateReadReq(xdata, name);
- single_p(xdata, &length, 0); /* End Of Message */
-
- e->Req = MAN_READ;
- e->ReqCh = 0;
- e->X->PLength = length;
- e->X->P = (byte *)xdata;
-
- pC->request_pending = 1;
-
- return (0);
- }
-
- return (-1);
-}
-
-int SuperTraceGetNumberOfChannels(void *AdapterHandle) {
- if (AdapterHandle) {
- diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
- return (pC->channels);
- }
-
- return (0);
-}
-
-int SuperTraceASSIGN(void *AdapterHandle, byte *data) {
- diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
- if (pC && pC->pIdiLib && pC->request) {
- ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
- IDI_SYNC_REQ *preq;
- char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)];
- char features[4];
- word assign_data_length = 1;
-
- features[0] = 0;
- pC->xbuffer[0] = 0;
- preq = (IDI_SYNC_REQ *)&buffer[0];
- preq->xdi_extended_features.Req = 0;
- preq->xdi_extended_features.Rc = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES;
- preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features);
- preq->xdi_extended_features.info.features = &features[0];
-
- (*(pC->request))((ENTITY *)preq);
-
- if ((features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) &&
- (features[0] & DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA)) {
- dword uninitialized_var(rx_dma_magic);
- if ((pC->dma_handle = diva_get_dma_descriptor(pC->request, &rx_dma_magic)) >= 0) {
- pC->xbuffer[0] = LLI;
- pC->xbuffer[1] = 8;
- pC->xbuffer[2] = 0x40;
- pC->xbuffer[3] = (byte)pC->dma_handle;
- pC->xbuffer[4] = (byte)rx_dma_magic;
- pC->xbuffer[5] = (byte)(rx_dma_magic >> 8);
- pC->xbuffer[6] = (byte)(rx_dma_magic >> 16);
- pC->xbuffer[7] = (byte)(rx_dma_magic >> 24);
- pC->xbuffer[8] = (byte)(DIVA_MAX_MANAGEMENT_TRANSFER_SIZE & 0xFF);
- pC->xbuffer[9] = (byte)(DIVA_MAX_MANAGEMENT_TRANSFER_SIZE >> 8);
- pC->xbuffer[10] = 0;
-
- assign_data_length = 11;
- }
- } else {
- pC->dma_handle = -1;
- }
-
- e->Id = MAN_ID;
- e->callback = diva_maint_xdi_cb;
- e->XNum = 1;
- e->X = &pC->XData;
- e->Req = ASSIGN;
- e->ReqCh = 0;
- e->X->PLength = assign_data_length;
- e->X->P = (byte *)&pC->xbuffer[0];
-
- pC->request_pending = 1;
-
- return (0);
- }
-
- return (-1);
-}
-
-int SuperTraceREMOVE(void *AdapterHandle) {
- diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
- if (pC && pC->pIdiLib && pC->request) {
- ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
-
- e->XNum = 1;
- e->X = &pC->XData;
- e->Req = REMOVE;
- e->ReqCh = 0;
- e->X->PLength = 1;
- e->X->P = (byte *)&pC->xbuffer[0];
- pC->xbuffer[0] = 0;
-
- pC->request_pending = 1;
-
- return (0);
- }
-
- return (-1);
-}
-
-int SuperTraceTraceOnRequest(void *hAdapter, const char *name, byte *data) {
- diva_maint_client_t *pC = (diva_maint_client_t *)hAdapter;
-
- if (pC && pC->pIdiLib && pC->request) {
- ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
- byte *xdata = (byte *)&pC->xbuffer[0];
- char tmp = 0;
- word length;
-
- if (!strcmp(name, "\\")) { /* Read ROOT */
- name = &tmp;
- }
- length = SuperTraceCreateReadReq(xdata, name);
- single_p(xdata, &length, 0); /* End Of Message */
- e->Req = MAN_EVENT_ON;
- e->ReqCh = 0;
- e->X->PLength = length;
- e->X->P = (byte *)xdata;
-
- pC->request_pending = 1;
-
- return (0);
- }
-
- return (-1);
-}
-
-int SuperTraceWriteVar(void *AdapterHandle,
- byte *data,
- const char *name,
- void *var,
- byte type,
- byte var_length) {
- diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
- if (pC && pC->pIdiLib && pC->request) {
- ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
- diva_man_var_header_t *pVar = (diva_man_var_header_t *)&pC->xbuffer[0];
- word length = SuperTraceCreateReadReq((byte *)pVar, name);
-
- memcpy(&pC->xbuffer[length], var, var_length);
- length += var_length;
- pVar->length += var_length;
- pVar->value_length = var_length;
- pVar->type = type;
- single_p((byte *)pVar, &length, 0); /* End Of Message */
-
- e->Req = MAN_WRITE;
- e->ReqCh = 0;
- e->X->PLength = length;
- e->X->P = (byte *)pVar;
-
- pC->request_pending = 1;
-
- return (0);
- }
-
- return (-1);
-}
-
-int SuperTraceExecuteRequest(void *AdapterHandle,
- const char *name,
- byte *data) {
- diva_maint_client_t *pC = (diva_maint_client_t *)AdapterHandle;
-
- if (pC && pC->pIdiLib && pC->request) {
- ENTITY *e = (ENTITY *)(*(pC->pIdiLib->DivaSTraceGetHandle))(pC->pIdiLib->hLib);
- byte *xdata = (byte *)&pC->xbuffer[0];
- word length;
-
- length = SuperTraceCreateReadReq(xdata, name);
- single_p(xdata, &length, 0); /* End Of Message */
-
- e->Req = MAN_EXECUTE;
- e->ReqCh = 0;
- e->X->PLength = length;
- e->X->P = (byte *)xdata;
-
- pC->request_pending = 1;
-
- return (0);
- }
-
- return (-1);
-}
-
-static word SuperTraceCreateReadReq(byte *P, const char *path) {
- byte var_length;
- byte *plen;
-
- var_length = (byte)strlen(path);
-
- *P++ = ESC;
- plen = P++;
- *P++ = 0x80; /* MAN_IE */
- *P++ = 0x00; /* Type */
- *P++ = 0x00; /* Attribute */
- *P++ = 0x00; /* Status */
- *P++ = 0x00; /* Variable Length */
- *P++ = var_length;
- memcpy(P, path, var_length);
- P += var_length;
- *plen = var_length + 0x06;
-
- return ((word)(var_length + 0x08));
-}
-
-static void single_p(byte *P, word *PLength, byte Id) {
- P[(*PLength)++] = Id;
-}
-
-static void diva_maint_xdi_cb(ENTITY *e) {
- diva_strace_context_t *pLib = DIVAS_CONTAINING_RECORD(e, diva_strace_context_t, e);
- diva_maint_client_t *pC;
- diva_os_spin_lock_magic_t old_irql, old_irql1;
-
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "xdi_cb");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "xdi_cb");
-
- pC = (diva_maint_client_t *)pLib->hAdapter;
-
- if ((e->complete == 255) || (pC->dma_handle < 0)) {
- if ((*(pLib->instance.DivaSTraceMessageInput))(&pLib->instance)) {
- diva_mnt_internal_dprintf(0, DLI_ERR, "Trace internal library error");
- }
- } else {
- /*
- Process combined management interface indication
- */
- if ((*(pLib->instance.DivaSTraceMessageInput))(&pLib->instance)) {
- diva_mnt_internal_dprintf(0, DLI_ERR, "Trace internal library error (DMA mode)");
- }
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "xdi_cb");
-
-
- if (pC->request_pending) {
- pC->request_pending = 0;
- (*(pC->request))(e);
- }
-
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "xdi_cb");
-}
-
-
-static void diva_maint_error(void *user_context,
- diva_strace_library_interface_t *hLib,
- int Adapter,
- int error,
- const char *file,
- int line) {
- diva_mnt_internal_dprintf(0, DLI_ERR,
- "Trace library error(%d) A(%d) %s %d", error, Adapter, file, line);
-}
-
-static void print_ie(diva_trace_ie_t *ie, char *buffer, int length) {
- int i;
-
- buffer[0] = 0;
-
- if (length > 32) {
- for (i = 0; ((i < ie->length) && (length > 3)); i++) {
- sprintf(buffer, "%02x", ie->data[i]);
- buffer += 2;
- length -= 2;
- if (i < (ie->length - 1)) {
- strcpy(buffer, " ");
- buffer++;
- length--;
- }
- }
- }
-}
-
-static void diva_maint_state_change_notify(void *user_context,
- diva_strace_library_interface_t *hLib,
- int Adapter,
- diva_trace_line_state_t *channel,
- int notify_subject) {
- diva_maint_client_t *pC = (diva_maint_client_t *)user_context;
- diva_trace_fax_state_t *fax = &channel->fax;
- diva_trace_modem_state_t *modem = &channel->modem;
- char tmp[256];
-
- if (!pC->hDbg) {
- return;
- }
-
- switch (notify_subject) {
- case DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE: {
- int view = (TraceFilter[0] == 0);
- /*
- Process selective Trace
- */
- if (channel->Line[0] == 'I' && channel->Line[1] == 'd' &&
- channel->Line[2] == 'l' && channel->Line[3] == 'e') {
- if ((TraceFilterIdent == pC->hDbg->id) && (TraceFilterChannel == (int)channel->ChannelNumber)) {
- (*(hLib->DivaSTraceSetBChannel))(hLib, (int)channel->ChannelNumber, 0);
- (*(hLib->DivaSTraceSetAudioTap))(hLib, (int)channel->ChannelNumber, 0);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Selective Trace OFF for Ch=%d",
- (int)channel->ChannelNumber);
- TraceFilterIdent = -1;
- TraceFilterChannel = -1;
- view = 1;
- }
- } else if (TraceFilter[0] && (TraceFilterIdent < 0) && !(diva_mnt_cmp_nmbr(&channel->RemoteAddress[0]) &&
- diva_mnt_cmp_nmbr(&channel->LocalAddress[0]))) {
-
- if ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0) { /* Activate B-channel trace */
- (*(hLib->DivaSTraceSetBChannel))(hLib, (int)channel->ChannelNumber, 1);
- }
- if ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0) { /* Activate AudioTap Trace */
- (*(hLib->DivaSTraceSetAudioTap))(hLib, (int)channel->ChannelNumber, 1);
- }
-
- TraceFilterIdent = pC->hDbg->id;
- TraceFilterChannel = (int)channel->ChannelNumber;
-
- if (TraceFilterIdent >= 0) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG, "Selective Trace ON for Ch=%d",
- (int)channel->ChannelNumber);
- view = 1;
- }
- }
- if (view && (pC->hDbg->dbgMask & DIVA_MGT_DBG_LINE_EVENTS)) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Ch = %d",
- (int)channel->ChannelNumber);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Status = <%s>", &channel->Line[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer1 = <%s>", &channel->Framing[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer2 = <%s>", &channel->Layer2[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Layer3 = <%s>", &channel->Layer3[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L RAddr = <%s>",
- &channel->RemoteAddress[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L RSAddr = <%s>",
- &channel->RemoteSubAddress[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LAddr = <%s>",
- &channel->LocalAddress[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LSAddr = <%s>",
- &channel->LocalSubAddress[0]);
- print_ie(&channel->call_BC, tmp, sizeof(tmp));
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L BC = <%s>", tmp);
- print_ie(&channel->call_HLC, tmp, sizeof(tmp));
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L HLC = <%s>", tmp);
- print_ie(&channel->call_LLC, tmp, sizeof(tmp));
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L LLC = <%s>", tmp);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L CR = 0x%x", channel->CallReference);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Disc = 0x%x",
- channel->LastDisconnecCause);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "L Owner = <%s>", &channel->UserID[0]);
- }
-
- } break;
-
- case DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE:
- if (pC->hDbg->dbgMask & DIVA_MGT_DBG_MDM_PROGRESS) {
- {
- int ch = TraceFilterChannel;
- int id = TraceFilterIdent;
-
- if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) &&
- (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) {
- if (ch != (int)modem->ChannelNumber) {
- break;
- }
- } else if (TraceFilter[0] != 0) {
- break;
- }
- }
-
-
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Ch = %lu",
- (int)modem->ChannelNumber);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Event = %lu", modem->Event);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Norm = %lu", modem->Norm);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Opts. = 0x%08x", modem->Options);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Tx = %lu Bps", modem->TxSpeed);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Rx = %lu Bps", modem->RxSpeed);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RT = %lu mSec",
- modem->RoundtripMsec);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Sr = %lu", modem->SymbolRate);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Rxl = %d dBm", modem->RxLeveldBm);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM El = %d dBm", modem->EchoLeveldBm);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM SNR = %lu dB", modem->SNRdb);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM MAE = %lu", modem->MAE);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM LRet = %lu",
- modem->LocalRetrains);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RRet = %lu",
- modem->RemoteRetrains);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM LRes = %lu", modem->LocalResyncs);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM RRes = %lu",
- modem->RemoteResyncs);
- if (modem->Event == 3) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "MDM Disc = %lu", modem->DiscReason);
- }
- }
- if ((modem->Event == 3) && (pC->hDbg->dbgMask & DIVA_MGT_DBG_MDM_STATISTICS)) {
- (*(pC->pIdiLib->DivaSTraceGetModemStatistics))(pC->pIdiLib);
- }
- break;
-
- case DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE:
- if (pC->hDbg->dbgMask & DIVA_MGT_DBG_FAX_PROGRESS) {
- {
- int ch = TraceFilterChannel;
- int id = TraceFilterIdent;
-
- if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) &&
- (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) {
- if (ch != (int)fax->ChannelNumber) {
- break;
- }
- } else if (TraceFilter[0] != 0) {
- break;
- }
- }
-
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Ch = %lu", (int)fax->ChannelNumber);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Event = %lu", fax->Event);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Pages = %lu", fax->Page_Counter);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Feat. = 0x%08x", fax->Features);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX ID = <%s>", &fax->Station_ID[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Saddr = <%s>", &fax->Subaddress[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Pwd = <%s>", &fax->Password[0]);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Speed = %lu", fax->Speed);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Res. = 0x%08x", fax->Resolution);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Width = %lu", fax->Paper_Width);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Length= %lu", fax->Paper_Length);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX SLT = %lu", fax->Scanline_Time);
- if (fax->Event == 3) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT, "FAX Disc = %lu", fax->Disc_Reason);
- }
- }
- if ((fax->Event == 3) && (pC->hDbg->dbgMask & DIVA_MGT_DBG_FAX_STATISTICS)) {
- (*(pC->pIdiLib->DivaSTraceGetFaxStatistics))(pC->pIdiLib);
- }
- break;
-
- case DIVA_SUPER_TRACE_INTERFACE_CHANGE:
- if (pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_EVENTS) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT,
- "Layer 1 -> [%s]", channel->pInterface->Layer1);
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_STAT,
- "Layer 2 -> [%s]", channel->pInterface->Layer2);
- }
- break;
-
- case DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE:
- if (pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_STATISTICS) {
- /*
- Incoming Statistics
- */
- if (channel->pInterfaceStat->inc.Calls) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Calls =%lu", channel->pInterfaceStat->inc.Calls);
- }
- if (channel->pInterfaceStat->inc.Connected) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Connected =%lu", channel->pInterfaceStat->inc.Connected);
- }
- if (channel->pInterfaceStat->inc.User_Busy) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Busy =%lu", channel->pInterfaceStat->inc.User_Busy);
- }
- if (channel->pInterfaceStat->inc.Call_Rejected) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Rejected =%lu", channel->pInterfaceStat->inc.Call_Rejected);
- }
- if (channel->pInterfaceStat->inc.Wrong_Number) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Wrong Nr =%lu", channel->pInterfaceStat->inc.Wrong_Number);
- }
- if (channel->pInterfaceStat->inc.Incompatible_Dst) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Incomp. Dest =%lu", channel->pInterfaceStat->inc.Incompatible_Dst);
- }
- if (channel->pInterfaceStat->inc.Out_of_Order) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Out of Order =%lu", channel->pInterfaceStat->inc.Out_of_Order);
- }
- if (channel->pInterfaceStat->inc.Ignored) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Inc Ignored =%lu", channel->pInterfaceStat->inc.Ignored);
- }
-
- /*
- Outgoing Statistics
- */
- if (channel->pInterfaceStat->outg.Calls) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg Calls =%lu", channel->pInterfaceStat->outg.Calls);
- }
- if (channel->pInterfaceStat->outg.Connected) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg Connected =%lu", channel->pInterfaceStat->outg.Connected);
- }
- if (channel->pInterfaceStat->outg.User_Busy) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg Busy =%lu", channel->pInterfaceStat->outg.User_Busy);
- }
- if (channel->pInterfaceStat->outg.No_Answer) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg No Answer =%lu", channel->pInterfaceStat->outg.No_Answer);
- }
- if (channel->pInterfaceStat->outg.Wrong_Number) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg Wrong Nr =%lu", channel->pInterfaceStat->outg.Wrong_Number);
- }
- if (channel->pInterfaceStat->outg.Call_Rejected) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg Rejected =%lu", channel->pInterfaceStat->outg.Call_Rejected);
- }
- if (channel->pInterfaceStat->outg.Other_Failures) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "Outg Other Failures =%lu", channel->pInterfaceStat->outg.Other_Failures);
- }
- }
- break;
-
- case DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE:
- if (channel->pInterfaceStat->mdm.Disc_Normal) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Normal = %lu", channel->pInterfaceStat->mdm.Disc_Normal);
- }
- if (channel->pInterfaceStat->mdm.Disc_Unspecified) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Unsp. = %lu", channel->pInterfaceStat->mdm.Disc_Unspecified);
- }
- if (channel->pInterfaceStat->mdm.Disc_Busy_Tone) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Busy Tone = %lu", channel->pInterfaceStat->mdm.Disc_Busy_Tone);
- }
- if (channel->pInterfaceStat->mdm.Disc_Congestion) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Congestion = %lu", channel->pInterfaceStat->mdm.Disc_Congestion);
- }
- if (channel->pInterfaceStat->mdm.Disc_Carr_Wait) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Carrier Wait = %lu", channel->pInterfaceStat->mdm.Disc_Carr_Wait);
- }
- if (channel->pInterfaceStat->mdm.Disc_Trn_Timeout) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Trn. T.o. = %lu", channel->pInterfaceStat->mdm.Disc_Trn_Timeout);
- }
- if (channel->pInterfaceStat->mdm.Disc_Incompat) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Incompatible = %lu", channel->pInterfaceStat->mdm.Disc_Incompat);
- }
- if (channel->pInterfaceStat->mdm.Disc_Frame_Rej) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc Frame Reject = %lu", channel->pInterfaceStat->mdm.Disc_Frame_Rej);
- }
- if (channel->pInterfaceStat->mdm.Disc_V42bis) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "MDM Disc V.42bis = %lu", channel->pInterfaceStat->mdm.Disc_V42bis);
- }
- break;
-
- case DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE:
- if (channel->pInterfaceStat->fax.Disc_Normal) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Normal = %lu", channel->pInterfaceStat->fax.Disc_Normal);
- }
- if (channel->pInterfaceStat->fax.Disc_Not_Ident) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Not Ident. = %lu", channel->pInterfaceStat->fax.Disc_Not_Ident);
- }
- if (channel->pInterfaceStat->fax.Disc_No_Response) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc No Response = %lu", channel->pInterfaceStat->fax.Disc_No_Response);
- }
- if (channel->pInterfaceStat->fax.Disc_Retries) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Max Retries = %lu", channel->pInterfaceStat->fax.Disc_Retries);
- }
- if (channel->pInterfaceStat->fax.Disc_Unexp_Msg) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Unexp. Msg. = %lu", channel->pInterfaceStat->fax.Disc_Unexp_Msg);
- }
- if (channel->pInterfaceStat->fax.Disc_No_Polling) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc No Polling = %lu", channel->pInterfaceStat->fax.Disc_No_Polling);
- }
- if (channel->pInterfaceStat->fax.Disc_Training) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Training = %lu", channel->pInterfaceStat->fax.Disc_Training);
- }
- if (channel->pInterfaceStat->fax.Disc_Unexpected) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Unexpected = %lu", channel->pInterfaceStat->fax.Disc_Unexpected);
- }
- if (channel->pInterfaceStat->fax.Disc_Application) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Application = %lu", channel->pInterfaceStat->fax.Disc_Application);
- }
- if (channel->pInterfaceStat->fax.Disc_Incompat) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Incompatible = %lu", channel->pInterfaceStat->fax.Disc_Incompat);
- }
- if (channel->pInterfaceStat->fax.Disc_No_Command) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc No Command = %lu", channel->pInterfaceStat->fax.Disc_No_Command);
- }
- if (channel->pInterfaceStat->fax.Disc_Long_Msg) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Long Msg. = %lu", channel->pInterfaceStat->fax.Disc_Long_Msg);
- }
- if (channel->pInterfaceStat->fax.Disc_Supervisor) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Supervisor = %lu", channel->pInterfaceStat->fax.Disc_Supervisor);
- }
- if (channel->pInterfaceStat->fax.Disc_SUB_SEP_PWD) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc SUP SEP PWD = %lu", channel->pInterfaceStat->fax.Disc_SUB_SEP_PWD);
- }
- if (channel->pInterfaceStat->fax.Disc_Invalid_Msg) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Invalid Msg. = %lu", channel->pInterfaceStat->fax.Disc_Invalid_Msg);
- }
- if (channel->pInterfaceStat->fax.Disc_Page_Coding) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Page Coding = %lu", channel->pInterfaceStat->fax.Disc_Page_Coding);
- }
- if (channel->pInterfaceStat->fax.Disc_App_Timeout) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Appl. T.o. = %lu", channel->pInterfaceStat->fax.Disc_App_Timeout);
- }
- if (channel->pInterfaceStat->fax.Disc_Unspecified) {
- diva_mnt_internal_dprintf(pC->hDbg->id, DLI_LOG,
- "FAX Disc Unspec. = %lu", channel->pInterfaceStat->fax.Disc_Unspecified);
- }
- break;
- }
-}
-
-/*
- Receive trace information from the Management Interface and store it in the
- internal trace buffer with MSG_TYPE_MLOG as is, without any filtering.
- Event Filtering and formatting is done in Management Interface self.
-*/
-static void diva_maint_trace_notify(void *user_context,
- diva_strace_library_interface_t *hLib,
- int Adapter,
- void *xlog_buffer,
- int length) {
- diva_maint_client_t *pC = (diva_maint_client_t *)user_context;
- diva_dbg_entry_head_t *pmsg;
- word size;
- dword sec, usec;
- int ch = TraceFilterChannel;
- int id = TraceFilterIdent;
-
- /*
- Selective trace
- */
- if ((id >= 0) && (ch >= 0) && (id < ARRAY_SIZE(clients)) &&
- (clients[id].Dbg.id == (byte)id) && (clients[id].pIdiLib == hLib)) {
- const char *p = NULL;
- int ch_value = -1;
- MI_XLOG_HDR *TrcData = (MI_XLOG_HDR *)xlog_buffer;
-
- if (Adapter != clients[id].logical) {
- return; /* Ignore all trace messages from other adapters */
- }
-
- if (TrcData->code == 24) {
- p = (char *)&TrcData->code;
- p += 2;
- }
-
- /*
- All L1 messages start as [dsp,ch], so we can filter this information
- and filter out all messages that use different channel
- */
- if (p && p[0] == '[') {
- if (p[2] == ',') {
- p += 3;
- ch_value = *p - '0';
- } else if (p[3] == ',') {
- p += 4;
- ch_value = *p - '0';
- }
- if (ch_value >= 0) {
- if (p[2] == ']') {
- ch_value = ch_value * 10 + p[1] - '0';
- }
- if (ch_value != ch) {
- return; /* Ignore other channels */
- }
- }
- }
-
- } else if (TraceFilter[0] != 0) {
- return; /* Ignore trace if trace filter is activated, but idle */
- }
-
- diva_os_get_time(&sec, &usec);
-
- while (!(pmsg = (diva_dbg_entry_head_t *)queueAllocMsg(dbg_queue,
- (word)length + sizeof(*pmsg)))) {
- if ((pmsg = (diva_dbg_entry_head_t *)queuePeekMsg(dbg_queue, &size))) {
- queueFreeMsg(dbg_queue);
- } else {
- break;
- }
- }
- if (pmsg) {
- memcpy(&pmsg[1], xlog_buffer, length);
- pmsg->sequence = dbg_sequence++;
- pmsg->time_sec = sec;
- pmsg->time_usec = usec;
- pmsg->facility = MSG_TYPE_MLOG;
- pmsg->dli = pC->logical;
- pmsg->drv_id = pC->hDbg->id;
- pmsg->di_cpu = 0;
- pmsg->data_length = length;
- queueCompleteMsg(pmsg);
- if (queueCount(dbg_queue)) {
- diva_maint_wakeup_read();
- }
- }
-}
-
-
-/*
- Convert MAINT trace mask to management interface trace mask/work/facility and
- issue command to management interface
-*/
-static void diva_change_management_debug_mask(diva_maint_client_t *pC, dword old_mask) {
- if (pC->request && pC->hDbg && pC->pIdiLib) {
- dword changed = pC->hDbg->dbgMask ^ old_mask;
-
- if (changed & DIVA_MGT_DBG_TRACE) {
- (*(pC->pIdiLib->DivaSTraceSetInfo))(pC->pIdiLib,
- (pC->hDbg->dbgMask & DIVA_MGT_DBG_TRACE) != 0);
- }
- if (changed & DIVA_MGT_DBG_DCHAN) {
- (*(pC->pIdiLib->DivaSTraceSetDChannel))(pC->pIdiLib,
- (pC->hDbg->dbgMask & DIVA_MGT_DBG_DCHAN) != 0);
- }
- if (!TraceFilter[0]) {
- if (changed & DIVA_MGT_DBG_IFC_BCHANNEL) {
- int i, state = ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0);
-
- for (i = 0; i < pC->channels; i++) {
- (*(pC->pIdiLib->DivaSTraceSetBChannel))(pC->pIdiLib, i + 1, state);
- }
- }
- if (changed & DIVA_MGT_DBG_IFC_AUDIO) {
- int i, state = ((pC->hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0);
-
- for (i = 0; i < pC->channels; i++) {
- (*(pC->pIdiLib->DivaSTraceSetAudioTap))(pC->pIdiLib, i + 1, state);
- }
- }
- }
- }
-}
-
-
-void diva_mnt_internal_dprintf(dword drv_id, dword type, char *fmt, ...) {
- va_list ap;
-
- va_start(ap, fmt);
- DI_format(0, (word)drv_id, (int)type, fmt, ap);
- va_end(ap);
-}
-
-/*
- Shutdown all adapters before driver removal
-*/
-int diva_mnt_shutdown_xdi_adapters(void) {
- diva_os_spin_lock_magic_t old_irql, old_irql1;
- int i, fret = 0;
- byte *pmem;
-
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- pmem = NULL;
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "unload");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "unload");
-
- if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request) {
- if ((*(clients[i].pIdiLib->DivaSTraceLibraryStop))(clients[i].pIdiLib) == 1) {
- /*
- Adapter removal complete
- */
- if (clients[i].pIdiLib) {
- (*(clients[i].pIdiLib->DivaSTraceLibraryFinit))(clients[i].pIdiLib->hLib);
- clients[i].pIdiLib = NULL;
-
- pmem = clients[i].pmem;
- clients[i].pmem = NULL;
- }
- clients[i].hDbg = NULL;
- clients[i].request_pending = 0;
-
- if (clients[i].dma_handle >= 0) {
- /*
- Free DMA handle
- */
- diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle);
- clients[i].dma_handle = -1;
- }
- clients[i].request = NULL;
- } else {
- fret = -1;
- }
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "unload");
- if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request && clients[i].request_pending) {
- clients[i].request_pending = 0;
- (*(clients[i].request))((ENTITY *)(*(clients[i].pIdiLib->DivaSTraceGetHandle))(clients[i].pIdiLib->hLib));
- if (clients[i].dma_handle >= 0) {
- diva_free_dma_descriptor(clients[i].request, clients[i].dma_handle);
- clients[i].dma_handle = -1;
- }
- }
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "unload");
-
- if (pmem) {
- diva_os_free(0, pmem);
- }
- }
-
- return (fret);
-}
-
-/*
- Set/Read the trace filter used for selective tracing.
- Affects B- and Audio Tap trace mask at run time
-*/
-int diva_set_trace_filter(int filter_length, const char *filter) {
- diva_os_spin_lock_magic_t old_irql, old_irql1;
- int i, ch, on, client_b_on, client_atap_on;
-
- diva_os_enter_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
-
- if (filter_length <= DIVA_MAX_SELECTIVE_FILTER_LENGTH) {
- memcpy(&TraceFilter[0], filter, filter_length);
- if (TraceFilter[filter_length]) {
- TraceFilter[filter_length] = 0;
- }
- if (TraceFilter[0] == '*') {
- TraceFilter[0] = 0;
- }
- } else {
- filter_length = -1;
- }
-
- TraceFilterIdent = -1;
- TraceFilterChannel = -1;
-
- on = (TraceFilter[0] == 0);
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request) {
- client_b_on = on && ((clients[i].hDbg->dbgMask & DIVA_MGT_DBG_IFC_BCHANNEL) != 0);
- client_atap_on = on && ((clients[i].hDbg->dbgMask & DIVA_MGT_DBG_IFC_AUDIO) != 0);
- for (ch = 0; ch < clients[i].channels; ch++) {
- (*(clients[i].pIdiLib->DivaSTraceSetBChannel))(clients[i].pIdiLib->hLib, ch + 1, client_b_on);
- (*(clients[i].pIdiLib->DivaSTraceSetAudioTap))(clients[i].pIdiLib->hLib, ch + 1, client_atap_on);
- }
- }
- }
-
- for (i = 1; i < ARRAY_SIZE(clients); i++) {
- if (clients[i].hDbg && clients[i].pIdiLib && clients[i].request && clients[i].request_pending) {
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
- clients[i].request_pending = 0;
- (*(clients[i].request))((ENTITY *)(*(clients[i].pIdiLib->DivaSTraceGetHandle))(clients[i].pIdiLib->hLib));
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
- }
- }
-
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "write_filter");
- diva_os_leave_spin_lock(&dbg_adapter_lock, &old_irql1, "dbg mask");
-
- return (filter_length);
-}
-
-int diva_get_trace_filter(int max_length, char *filter) {
- diva_os_spin_lock_magic_t old_irql;
- int len;
-
- diva_os_enter_spin_lock(&dbg_q_lock, &old_irql, "read_filter");
- len = strlen(&TraceFilter[0]) + 1;
- if (max_length >= len) {
- memcpy(filter, &TraceFilter[0], len);
- }
- diva_os_leave_spin_lock(&dbg_q_lock, &old_irql, "read_filter");
-
- return (len);
-}
-
-static int diva_dbg_cmp_key(const char *ref, const char *key) {
- while (*key && (*ref++ == *key++));
- return (!*key && !*ref);
-}
-
-/*
- In case trace filter starts with "C" character then
- all following characters are interpreted as command.
- Following commands are available:
- - single, trace single call at time, independent from CPN/CiPN
-*/
-static int diva_mnt_cmp_nmbr(const char *nmbr) {
- const char *ref = &TraceFilter[0];
- int ref_len = strlen(&TraceFilter[0]), nmbr_len = strlen(nmbr);
-
- if (ref[0] == 'C') {
- if (diva_dbg_cmp_key(&ref[1], "single")) {
- return (0);
- }
- return (-1);
- }
-
- if (!ref_len || (ref_len > nmbr_len)) {
- return (-1);
- }
-
- nmbr = nmbr + nmbr_len - 1;
- ref = ref + ref_len - 1;
-
- while (ref_len--) {
- if (*nmbr-- != *ref--) {
- return (-1);
- }
- }
-
- return (0);
-}
-
-static int diva_get_dma_descriptor(IDI_CALL request, dword *dma_magic) {
- ENTITY e;
- IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
- if (!request) {
- return (-1);
- }
-
- pReq->xdi_dma_descriptor_operation.Req = 0;
- pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
- pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC;
- pReq->xdi_dma_descriptor_operation.info.descriptor_number = -1;
- pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
- pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
-
- (*request)((ENTITY *)pReq);
-
- if (!pReq->xdi_dma_descriptor_operation.info.operation &&
- (pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) &&
- pReq->xdi_dma_descriptor_operation.info.descriptor_magic) {
- *dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic;
- return (pReq->xdi_dma_descriptor_operation.info.descriptor_number);
- } else {
- return (-1);
- }
-}
-
-static void diva_free_dma_descriptor(IDI_CALL request, int nr) {
- ENTITY e;
- IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
- if (!request || (nr < 0)) {
- return;
- }
-
- pReq->xdi_dma_descriptor_operation.Req = 0;
- pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
- pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE;
- pReq->xdi_dma_descriptor_operation.info.descriptor_number = nr;
- pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
- pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
-
- (*request)((ENTITY *)pReq);
-}
diff --git a/drivers/isdn/hardware/eicon/debug_if.h b/drivers/isdn/hardware/eicon/debug_if.h
deleted file mode 100644
index fc5953a35ff6..000000000000
--- a/drivers/isdn/hardware/eicon/debug_if.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- *
- Copyright (c) Eicon Technology Corporation, 2000.
- *
- This source file is supplied for the use with Eicon
- Technology Corporation's range of DIVA Server Adapters.
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DEBUG_IF_H__
-#define __DIVA_DEBUG_IF_H__
-#define MSG_TYPE_DRV_ID 0x0001
-#define MSG_TYPE_FLAGS 0x0002
-#define MSG_TYPE_STRING 0x0003
-#define MSG_TYPE_BINARY 0x0004
-#define MSG_TYPE_MLOG 0x0005
-
-#define MSG_FRAME_MAX_SIZE 2150
-
-typedef struct _diva_dbg_entry_head {
- dword sequence;
- dword time_sec;
- dword time_usec;
- dword facility;
- dword dli;
- dword drv_id;
- dword di_cpu;
- dword data_length;
-} diva_dbg_entry_head_t;
-
-int diva_maint_init(byte *base, unsigned long length, int do_init);
-void *diva_maint_finit(void);
-dword diva_dbg_q_length(void);
-diva_dbg_entry_head_t *diva_maint_get_message(word *size,
- diva_os_spin_lock_magic_t *old_irql);
-void diva_maint_ack_message(int do_release,
- diva_os_spin_lock_magic_t *old_irql);
-void diva_maint_prtComp(char *format, ...);
-void diva_maint_wakeup_read(void);
-int diva_get_driver_info(dword id, byte *data, int data_length);
-int diva_get_driver_dbg_mask(dword id, byte *data);
-int diva_set_driver_dbg_mask(dword id, dword mask);
-void diva_mnt_remove_xdi_adapter(const DESCRIPTOR *d);
-void diva_mnt_add_xdi_adapter(const DESCRIPTOR *d);
-int diva_mnt_shutdown_xdi_adapters(void);
-
-#define DIVA_MAX_SELECTIVE_FILTER_LENGTH 127
-int diva_set_trace_filter(int filter_length, const char *filter);
-int diva_get_trace_filter(int max_length, char *filter);
-
-
-#define DITRACE_CMD_GET_DRIVER_INFO 1
-#define DITRACE_READ_DRIVER_DBG_MASK 2
-#define DITRACE_WRITE_DRIVER_DBG_MASK 3
-#define DITRACE_READ_TRACE_ENTRY 4
-#define DITRACE_READ_TRACE_ENTRYS 5
-#define DITRACE_WRITE_SELECTIVE_TRACE_FILTER 6
-#define DITRACE_READ_SELECTIVE_TRACE_FILTER 7
-
-/*
- Trace lavels for debug via management interface
-*/
-#define DIVA_MGT_DBG_TRACE 0x00000001 /* All trace messages from the card */
-#define DIVA_MGT_DBG_DCHAN 0x00000002 /* All D-channel relater trace messages */
-#define DIVA_MGT_DBG_MDM_PROGRESS 0x00000004 /* Modem progress events */
-#define DIVA_MGT_DBG_FAX_PROGRESS 0x00000008 /* Fax progress events */
-#define DIVA_MGT_DBG_IFC_STATISTICS 0x00000010 /* Interface call statistics */
-#define DIVA_MGT_DBG_MDM_STATISTICS 0x00000020 /* Global modem statistics */
-#define DIVA_MGT_DBG_FAX_STATISTICS 0x00000040 /* Global call statistics */
-#define DIVA_MGT_DBG_LINE_EVENTS 0x00000080 /* Line state events */
-#define DIVA_MGT_DBG_IFC_EVENTS 0x00000100 /* Interface/L1/L2 state events */
-#define DIVA_MGT_DBG_IFC_BCHANNEL 0x00000200 /* B-Channel trace for all channels */
-#define DIVA_MGT_DBG_IFC_AUDIO 0x00000400 /* Audio Tap trace for all channels */
-
-# endif /* DEBUG_IF___H */
diff --git a/drivers/isdn/hardware/eicon/debuglib.c b/drivers/isdn/hardware/eicon/debuglib.c
deleted file mode 100644
index d5b1092a54f0..000000000000
--- a/drivers/isdn/hardware/eicon/debuglib.c
+++ /dev/null
@@ -1,156 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include "debuglib.h"
-
-#ifdef DIVA_NO_DEBUGLIB
-static DIVA_DI_PRINTF dprintf;
-#else /* DIVA_NO_DEBUGLIB */
-
-_DbgHandle_ myDriverDebugHandle = { 0 /*!Registered*/, DBG_HANDLE_VERSION };
-DIVA_DI_PRINTF dprintf = no_printf;
-/*****************************************************************************/
-#define DBG_FUNC(name) \
- void \
- myDbgPrint_##name(char *format, ...) \
- { va_list ap; \
- if (myDriverDebugHandle.dbg_prt) \
- { va_start(ap, format); \
- (myDriverDebugHandle.dbg_prt) \
- (myDriverDebugHandle.id, DLI_##name, format, ap); \
- va_end(ap); \
- } }
-DBG_FUNC(LOG)
-DBG_FUNC(FTL)
-DBG_FUNC(ERR)
-DBG_FUNC(TRC)
-DBG_FUNC(MXLOG)
-DBG_FUNC(FTL_MXLOG)
-void
-myDbgPrint_EVL(long msgID, ...)
-{ va_list ap;
- if (myDriverDebugHandle.dbg_ev)
- { va_start(ap, msgID);
- (myDriverDebugHandle.dbg_ev)
- (myDriverDebugHandle.id, (unsigned long)msgID, ap);
- va_end(ap);
- } }
-DBG_FUNC(REG)
-DBG_FUNC(MEM)
-DBG_FUNC(SPL)
-DBG_FUNC(IRP)
-DBG_FUNC(TIM)
-DBG_FUNC(BLK)
-DBG_FUNC(TAPI)
-DBG_FUNC(NDIS)
-DBG_FUNC(CONN)
-DBG_FUNC(STAT)
-DBG_FUNC(SEND)
-DBG_FUNC(RECV)
-DBG_FUNC(PRV0)
-DBG_FUNC(PRV1)
-DBG_FUNC(PRV2)
-DBG_FUNC(PRV3)
-/*****************************************************************************/
-int
-DbgRegister(char *drvName, char *drvTag, unsigned long dbgMask)
-{
- int len;
-/*
- * deregister (if already registered) and zero out myDriverDebugHandle
- */
- DbgDeregister();
-/*
- * initialize the debug handle
- */
- myDriverDebugHandle.Version = DBG_HANDLE_VERSION;
- myDriverDebugHandle.id = -1;
- myDriverDebugHandle.dbgMask = dbgMask | (DL_EVL | DL_FTL | DL_LOG);
- len = strlen(drvName);
- memcpy(myDriverDebugHandle.drvName, drvName,
- (len < sizeof(myDriverDebugHandle.drvName)) ?
- len : sizeof(myDriverDebugHandle.drvName) - 1);
- len = strlen(drvTag);
- memcpy(myDriverDebugHandle.drvTag, drvTag,
- (len < sizeof(myDriverDebugHandle.drvTag)) ?
- len : sizeof(myDriverDebugHandle.drvTag) - 1);
-/*
- * Try to register debugging via old (and only) interface
- */
- dprintf("\000\377", &myDriverDebugHandle);
- if (myDriverDebugHandle.dbg_prt)
- {
- return (1);
- }
-/*
- * Check if we registered with an old maint driver (see debuglib.h)
- */
- if (myDriverDebugHandle.dbg_end != NULL
- /* location of 'dbg_prt' in _OldDbgHandle_ struct */
- && (myDriverDebugHandle.regTime.LowPart ||
- myDriverDebugHandle.regTime.HighPart))
- /* same location as in _OldDbgHandle_ struct */
- {
- dprintf("%s: Cannot log to old maint driver !", drvName);
- myDriverDebugHandle.dbg_end =
- ((_OldDbgHandle_ *)&myDriverDebugHandle)->dbg_end;
- DbgDeregister();
- }
- return (0);
-}
-/*****************************************************************************/
-void
-DbgSetLevel(unsigned long dbgMask)
-{
- myDriverDebugHandle.dbgMask = dbgMask | (DL_EVL | DL_FTL | DL_LOG);
-}
-/*****************************************************************************/
-void
-DbgDeregister(void)
-{
- if (myDriverDebugHandle.dbg_end)
- {
- (myDriverDebugHandle.dbg_end)(&myDriverDebugHandle);
- }
- memset(&myDriverDebugHandle, 0, sizeof(myDriverDebugHandle));
-}
-void xdi_dbg_xlog(char *x, ...) {
- va_list ap;
- va_start(ap, x);
- if (myDriverDebugHandle.dbg_end &&
- (myDriverDebugHandle.dbg_irq || myDriverDebugHandle.dbg_old) &&
- (myDriverDebugHandle.dbgMask & DL_STAT)) {
- if (myDriverDebugHandle.dbg_irq) {
- (*(myDriverDebugHandle.dbg_irq))(myDriverDebugHandle.id,
- (x[0] != 0) ? DLI_TRC : DLI_XLOG, x, ap);
- } else {
- (*(myDriverDebugHandle.dbg_old))(myDriverDebugHandle.id, x, ap);
- }
- }
- va_end(ap);
-}
-/*****************************************************************************/
-#endif /* DIVA_NO_DEBUGLIB */
diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h
deleted file mode 100644
index 6dcbf6afb8f9..000000000000
--- a/drivers/isdn/hardware/eicon/debuglib.h
+++ /dev/null
@@ -1,322 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#if !defined(__DEBUGLIB_H__)
-#define __DEBUGLIB_H__
-#include <stdarg.h>
-/*
- * define global debug priorities
- */
-#define DL_LOG 0x00000001 /* always worth mentioning */
-#define DL_FTL 0x00000002 /* always sampled error */
-#define DL_ERR 0x00000004 /* any kind of error */
-#define DL_TRC 0x00000008 /* verbose information */
-#define DL_XLOG 0x00000010 /* old xlog info */
-#define DL_MXLOG 0x00000020 /* maestra xlog info */
-#define DL_FTL_MXLOG 0x00000021 /* fatal maestra xlog info */
-#define DL_EVL 0x00000080 /* special NT eventlog msg */
-#define DL_COMPAT (DL_MXLOG | DL_XLOG)
-#define DL_PRIOR_MASK (DL_EVL | DL_COMPAT | DL_TRC | DL_ERR | DL_FTL | DL_LOG)
-#define DLI_LOG 0x0100
-#define DLI_FTL 0x0200
-#define DLI_ERR 0x0300
-#define DLI_TRC 0x0400
-#define DLI_XLOG 0x0500
-#define DLI_MXLOG 0x0600
-#define DLI_FTL_MXLOG 0x0600
-#define DLI_EVL 0x0800
-/*
- * define OS (operating system interface) debuglevel
- */
-#define DL_REG 0x00000100 /* init/query registry */
-#define DL_MEM 0x00000200 /* memory management */
-#define DL_SPL 0x00000400 /* event/spinlock handling */
-#define DL_IRP 0x00000800 /* I/O request handling */
-#define DL_TIM 0x00001000 /* timer/watchdog handling */
-#define DL_BLK 0x00002000 /* raw data block contents */
-#define DL_OS_MASK (DL_BLK | DL_TIM | DL_IRP | DL_SPL | DL_MEM | DL_REG)
-#define DLI_REG 0x0900
-#define DLI_MEM 0x0A00
-#define DLI_SPL 0x0B00
-#define DLI_IRP 0x0C00
-#define DLI_TIM 0x0D00
-#define DLI_BLK 0x0E00
-/*
- * define ISDN (connection interface) debuglevel
- */
-#define DL_TAPI 0x00010000 /* debug TAPI interface */
-#define DL_NDIS 0x00020000 /* debug NDIS interface */
-#define DL_CONN 0x00040000 /* connection handling */
-#define DL_STAT 0x00080000 /* trace state machines */
-#define DL_SEND 0x00100000 /* trace raw xmitted data */
-#define DL_RECV 0x00200000 /* trace raw received data */
-#define DL_DATA (DL_SEND | DL_RECV)
-#define DL_ISDN_MASK (DL_DATA | DL_STAT | DL_CONN | DL_NDIS | DL_TAPI)
-#define DLI_TAPI 0x1100
-#define DLI_NDIS 0x1200
-#define DLI_CONN 0x1300
-#define DLI_STAT 0x1400
-#define DLI_SEND 0x1500
-#define DLI_RECV 0x1600
-/*
- * define some private (unspecified) debuglevel
- */
-#define DL_PRV0 0x01000000
-#define DL_PRV1 0x02000000
-#define DL_PRV2 0x04000000
-#define DL_PRV3 0x08000000
-#define DL_PRIV_MASK (DL_PRV0 | DL_PRV1 | DL_PRV2 | DL_PRV3)
-#define DLI_PRV0 0x1900
-#define DLI_PRV1 0x1A00
-#define DLI_PRV2 0x1B00
-#define DLI_PRV3 0x1C00
-#define DT_INDEX(x) ((x) & 0x000F)
-#define DL_INDEX(x) ((((x) >> 8) & 0x00FF) - 1)
-#define DLI_NAME(x) ((x) & 0xFF00)
-/*
- * Debug mask for kernel mode tracing, if set the output is also sent to
- * the system debug function. Requires that the project is compiled
- * with _KERNEL_DBG_PRINT_
- */
-#define DL_TO_KERNEL 0x40000000
-
-#ifdef DIVA_NO_DEBUGLIB
-#define myDbgPrint_LOG(x...) do { } while (0);
-#define myDbgPrint_FTL(x...) do { } while (0);
-#define myDbgPrint_ERR(x...) do { } while (0);
-#define myDbgPrint_TRC(x...) do { } while (0);
-#define myDbgPrint_MXLOG(x...) do { } while (0);
-#define myDbgPrint_EVL(x...) do { } while (0);
-#define myDbgPrint_REG(x...) do { } while (0);
-#define myDbgPrint_MEM(x...) do { } while (0);
-#define myDbgPrint_SPL(x...) do { } while (0);
-#define myDbgPrint_IRP(x...) do { } while (0);
-#define myDbgPrint_TIM(x...) do { } while (0);
-#define myDbgPrint_BLK(x...) do { } while (0);
-#define myDbgPrint_TAPI(x...) do { } while (0);
-#define myDbgPrint_NDIS(x...) do { } while (0);
-#define myDbgPrint_CONN(x...) do { } while (0);
-#define myDbgPrint_STAT(x...) do { } while (0);
-#define myDbgPrint_SEND(x...) do { } while (0);
-#define myDbgPrint_RECV(x...) do { } while (0);
-#define myDbgPrint_PRV0(x...) do { } while (0);
-#define myDbgPrint_PRV1(x...) do { } while (0);
-#define myDbgPrint_PRV2(x...) do { } while (0);
-#define myDbgPrint_PRV3(x...) do { } while (0);
-#define DBG_TEST(func, args) do { } while (0);
-#define DBG_EVL_ID(args) do { } while (0);
-
-#else /* DIVA_NO_DEBUGLIB */
-/*
- * define low level macros for formatted & raw debugging
- */
-#define DBG_DECL(func) extern void myDbgPrint_##func(char *, ...);
-DBG_DECL(LOG)
-DBG_DECL(FTL)
-DBG_DECL(ERR)
-DBG_DECL(TRC)
-DBG_DECL(MXLOG)
-DBG_DECL(FTL_MXLOG)
-extern void myDbgPrint_EVL(long, ...);
-DBG_DECL(REG)
-DBG_DECL(MEM)
-DBG_DECL(SPL)
-DBG_DECL(IRP)
-DBG_DECL(TIM)
-DBG_DECL(BLK)
-DBG_DECL(TAPI)
-DBG_DECL(NDIS)
-DBG_DECL(CONN)
-DBG_DECL(STAT)
-DBG_DECL(SEND)
-DBG_DECL(RECV)
-DBG_DECL(PRV0)
-DBG_DECL(PRV1)
-DBG_DECL(PRV2)
-DBG_DECL(PRV3)
-#ifdef _KERNEL_DBG_PRINT_
-/*
- * tracing to maint and kernel if selected in the trace mask.
- */
-#define DBG_TEST(func, args) \
- { if ((myDriverDebugHandle.dbgMask) & (unsigned long)DL_##func) \
- { \
- if ((myDriverDebugHandle.dbgMask) & DL_TO_KERNEL) \
- { DbgPrint args; DbgPrint("\r\n"); } \
- myDbgPrint_##func args; \
- } }
-#else
-/*
- * Standard tracing to maint driver.
- */
-#define DBG_TEST(func, args) \
- { if ((myDriverDebugHandle.dbgMask) & (unsigned long)DL_##func) \
- { myDbgPrint_##func args; \
- } }
-#endif
-/*
- * For event level debug use a separate define, the parameter are
- * different and cause compiler errors on some systems.
- */
-#define DBG_EVL_ID(args) \
- { if ((myDriverDebugHandle.dbgMask) & (unsigned long)DL_EVL) \
- { myDbgPrint_EVL args; \
- } }
-
-#endif /* DIVA_NO_DEBUGLIB */
-
-#define DBG_LOG(args) DBG_TEST(LOG, args)
-#define DBG_FTL(args) DBG_TEST(FTL, args)
-#define DBG_ERR(args) DBG_TEST(ERR, args)
-#define DBG_TRC(args) DBG_TEST(TRC, args)
-#define DBG_MXLOG(args) DBG_TEST(MXLOG, args)
-#define DBG_FTL_MXLOG(args) DBG_TEST(FTL_MXLOG, args)
-#define DBG_EVL(args) DBG_EVL_ID(args)
-#define DBG_REG(args) DBG_TEST(REG, args)
-#define DBG_MEM(args) DBG_TEST(MEM, args)
-#define DBG_SPL(args) DBG_TEST(SPL, args)
-#define DBG_IRP(args) DBG_TEST(IRP, args)
-#define DBG_TIM(args) DBG_TEST(TIM, args)
-#define DBG_BLK(args) DBG_TEST(BLK, args)
-#define DBG_TAPI(args) DBG_TEST(TAPI, args)
-#define DBG_NDIS(args) DBG_TEST(NDIS, args)
-#define DBG_CONN(args) DBG_TEST(CONN, args)
-#define DBG_STAT(args) DBG_TEST(STAT, args)
-#define DBG_SEND(args) DBG_TEST(SEND, args)
-#define DBG_RECV(args) DBG_TEST(RECV, args)
-#define DBG_PRV0(args) DBG_TEST(PRV0, args)
-#define DBG_PRV1(args) DBG_TEST(PRV1, args)
-#define DBG_PRV2(args) DBG_TEST(PRV2, args)
-#define DBG_PRV3(args) DBG_TEST(PRV3, args)
-/*
- * prototypes for debug register/deregister functions in "debuglib.c"
- */
-#ifdef DIVA_NO_DEBUGLIB
-#define DbgRegister(name, tag, mask) do { } while (0)
-#define DbgDeregister() do { } while (0)
-#define DbgSetLevel(mask) do { } while (0)
-#else
-extern DIVA_DI_PRINTF dprintf;
-extern int DbgRegister(char *drvName, char *drvTag, unsigned long dbgMask);
-extern void DbgDeregister(void);
-extern void DbgSetLevel(unsigned long dbgMask);
-#endif
-/*
- * driver internal structure for debug handling;
- * in client drivers this structure is maintained in "debuglib.c",
- * in the debug driver "debug.c" maintains a chain of such structs.
- */
-typedef struct _DbgHandle_ *pDbgHandle;
-typedef void (*DbgEnd)(pDbgHandle);
-typedef void (*DbgLog)(unsigned short, int, char *, va_list);
-typedef void (*DbgOld)(unsigned short, char *, va_list);
-typedef void (*DbgEv)(unsigned short, unsigned long, va_list);
-typedef void (*DbgIrq)(unsigned short, int, char *, va_list);
-typedef struct _DbgHandle_
-{ char Registered; /* driver successfully registered */
-#define DBG_HANDLE_REG_NEW 0x01 /* this (new) structure */
-#define DBG_HANDLE_REG_OLD 0x7f /* old structure (see below) */
- char Version; /* version of this structure */
-#define DBG_HANDLE_VERSION 1 /* contains dbg_old function now */
-#define DBG_HANDLE_VER_EXT 2 /* pReserved points to extended info*/
- short id; /* internal id of registered driver */
- struct _DbgHandle_ *next; /* ptr to next registered driver */
- struct /*LARGE_INTEGER*/ {
- unsigned long LowPart;
- long HighPart;
- } regTime; /* timestamp for registration */
- void *pIrp; /* ptr to pending i/o request */
- unsigned long dbgMask; /* current debug mask */
- char drvName[128]; /* ASCII name of registered driver */
- char drvTag[64]; /* revision string */
- DbgEnd dbg_end; /* function for debug closing */
- DbgLog dbg_prt; /* function for debug appending */
- DbgOld dbg_old; /* function for old debug appending */
- DbgEv dbg_ev; /* function for Windows NT Eventlog */
- DbgIrq dbg_irq; /* function for irql checked debug */
- void *pReserved3;
-} _DbgHandle_;
-extern _DbgHandle_ myDriverDebugHandle;
-typedef struct _OldDbgHandle_
-{ struct _OldDbgHandle_ *next;
- void *pIrp;
- long regTime[2];
- unsigned long dbgMask;
- short id;
- char drvName[78];
- DbgEnd dbg_end;
- DbgLog dbg_prt;
-} _OldDbgHandle_;
-/* the differences in DbgHandles
- old: tmp: new:
- 0 long next char Registered char Registered
- char filler char Version
- short id short id
- 4 long pIrp long regTime.lo long next
- 8 long regTime.lo long regTime.hi long regTime.lo
- 12 long regTime.hi long next long regTime.hi
- 16 long dbgMask long pIrp long pIrp
- 20 short id long dbgMask long dbgMask
- 22 char drvName[78] ..
- 24 .. char drvName[16] char drvName[16]
- 40 .. char drvTag[64] char drvTag[64]
- 100 void *dbg_end .. ..
- 104 void *dbg_prt void *dbg_end void *dbg_end
- 108 .. void *dbg_prt void *dbg_prt
- 112 .. .. void *dbg_old
- 116 .. .. void *dbg_ev
- 120 .. .. void *dbg_irq
- 124 .. .. void *pReserved3
- ( new->id == 0 && *((short *)&new->dbgMask) == -1 ) identifies "old",
- new->Registered and new->Version overlay old->next,
- new->next overlays old->pIrp, new->regTime matches old->regTime and
- thus these fields can be maintained in new struct whithout trouble;
- id, dbgMask, drvName, dbg_end and dbg_prt need special handling !
-*/
-#define DBG_EXT_TYPE_CARD_TRACE 0x00000001
-typedef struct
-{
- unsigned long ExtendedType;
- union
- {
- /* DBG_EXT_TYPE_CARD_TRACE */
- struct
- {
- void (*MaskChangedNotify)(void *pContext);
- unsigned long ModuleTxtMask;
- unsigned long DebugLevel;
- unsigned long B_ChannelMask;
- unsigned long LogBufferSize;
- } CardTrace;
- } Data;
-} _DbgExtendedInfo_;
-#ifndef DIVA_NO_DEBUGLIB
-/* -------------------------------------------------------------
- Function used for xlog-style debug
- ------------------------------------------------------------- */
-#define XDI_USE_XLOG 1
-void xdi_dbg_xlog(char *x, ...);
-#endif /* DIVA_NO_DEBUGLIB */
-#endif /* __DEBUGLIB_H__ */
diff --git a/drivers/isdn/hardware/eicon/dfifo.h b/drivers/isdn/hardware/eicon/dfifo.h
deleted file mode 100644
index 6a1d3337f99e..000000000000
--- a/drivers/isdn/hardware/eicon/dfifo.h
+++ /dev/null
@@ -1,54 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_IDI_DFIFO_INC__
-#define __DIVA_IDI_DFIFO_INC__
-#define DIVA_DFIFO_CACHE_SZ 64 /* Used to isolate pipe from
- rest of the world
- should be divisible by 4
- */
-#define DIVA_DFIFO_RAW_SZ (2512 * 8)
-#define DIVA_DFIFO_DATA_SZ 68
-#define DIVA_DFIFO_HDR_SZ 4
-#define DIVA_DFIFO_SEGMENT_SZ (DIVA_DFIFO_DATA_SZ + DIVA_DFIFO_HDR_SZ)
-#define DIVA_DFIFO_SEGMENTS ((DIVA_DFIFO_RAW_SZ) / (DIVA_DFIFO_SEGMENT_SZ) + 1)
-#define DIVA_DFIFO_MEM_SZ ( \
- (DIVA_DFIFO_SEGMENT_SZ) * (DIVA_DFIFO_SEGMENTS) + \
- (DIVA_DFIFO_CACHE_SZ) * 2 \
- )
-#define DIVA_DFIFO_STEP DIVA_DFIFO_SEGMENT_SZ
-/* -------------------------------------------------------------------------
- Block header layout is:
- byte[0] -> flags
- byte[1] -> length of data in block
- byte[2] -> reserved
- byte[4] -> reserved
- ------------------------------------------------------------------------- */
-#define DIVA_DFIFO_WRAP 0x80 /* This is the last block in fifo */
-#define DIVA_DFIFO_READY 0x40 /* This block is ready for processing */
-#define DIVA_DFIFO_LAST 0x20 /* This block is last in message */
-#define DIVA_DFIFO_AUTO 0x10 /* Don't look for 'ready', don't ack */
-int diva_dfifo_create(void *start, int length);
-#endif
diff --git a/drivers/isdn/hardware/eicon/di.c b/drivers/isdn/hardware/eicon/di.c
deleted file mode 100644
index cd3fba1add12..000000000000
--- a/drivers/isdn/hardware/eicon/di.c
+++ /dev/null
@@ -1,835 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "di.h"
-#if !defined USE_EXTENDED_DEBUGS
-#include "dimaint.h"
-#else
-#define dprintf
-#endif
-#include "io.h"
-#include "dfifo.h"
-#define PR_RAM ((struct pr_ram *)0)
-#define RAM ((struct dual *)0)
-/*------------------------------------------------------------------*/
-/* local function prototypes */
-/*------------------------------------------------------------------*/
-void pr_out(ADAPTER *a);
-byte pr_dpc(ADAPTER *a);
-static byte pr_ready(ADAPTER *a);
-static byte isdn_rc(ADAPTER *, byte, byte, byte, word, dword, dword);
-static byte isdn_ind(ADAPTER *, byte, byte, byte, PBUFFER *, byte, word);
-/* -----------------------------------------------------------------
- Functions used for the extended XDI Debug
- macros
- global convergence counter (used by all adapters)
- Look by the implementation part of the functions
- about the parameters.
- If you change the dubugging parameters, then you should update
- the aididbg.doc in the IDI doc's.
- ----------------------------------------------------------------- */
-#if defined(XDI_USE_XLOG)
-#define XDI_A_NR(_x_) ((byte)(((ISDN_ADAPTER *)(_x_->io))->ANum))
-static void xdi_xlog(byte *msg, word code, int length);
-static byte xdi_xlog_sec = 0;
-#else
-#define XDI_A_NR(_x_) ((byte)0)
-#endif
-static void xdi_xlog_rc_event(byte Adapter,
- byte Id, byte Ch, byte Rc, byte cb, byte type);
-static void xdi_xlog_request(byte Adapter, byte Id,
- byte Ch, byte Req, byte type);
-static void xdi_xlog_ind(byte Adapter,
- byte Id,
- byte Ch,
- byte Ind,
- byte rnr_valid,
- byte rnr,
- byte type);
-/*------------------------------------------------------------------*/
-/* output function */
-/*------------------------------------------------------------------*/
-void pr_out(ADAPTER *a)
-{
- byte e_no;
- ENTITY *this = NULL;
- BUFFERS *X;
- word length;
- word i;
- word clength;
- REQ *ReqOut;
- byte more;
- byte ReadyCount;
- byte ReqCount;
- byte Id;
- dtrc(dprintf("pr_out"));
- /* while a request is pending ... */
- e_no = look_req(a);
- if (!e_no)
- {
- dtrc(dprintf("no_req"));
- return;
- }
- ReadyCount = pr_ready(a);
- if (!ReadyCount)
- {
- dtrc(dprintf("not_ready"));
- return;
- }
- ReqCount = 0;
- while (e_no && ReadyCount) {
- next_req(a);
- this = entity_ptr(a, e_no);
-#ifdef USE_EXTENDED_DEBUGS
- if (!this)
- {
- DBG_FTL(("XDI: [%02x] !A%d ==> NULL entity ptr - try to ignore",
- xdi_xlog_sec++, (int)((ISDN_ADAPTER *)a->io)->ANum))
- e_no = look_req(a);
- ReadyCount--;
- continue;
- }
- {
- DBG_TRC((">A%d Id=0x%x Req=0x%x", ((ISDN_ADAPTER *)a->io)->ANum, this->Id, this->Req))
- }
-#else
- dbug(dprintf("out:Req=%x,Id=%x,Ch=%x", this->Req, this->Id, this->ReqCh));
-#endif
- /* get address of next available request buffer */
- ReqOut = (REQ *)&PR_RAM->B[a->ram_inw(a, &PR_RAM->NextReq)];
-#if defined(DIVA_ISTREAM)
- if (!(a->tx_stream[this->Id] &&
- this->Req == N_DATA)) {
-#endif
- /* now copy the data from the current data buffer into the */
- /* adapters request buffer */
- length = 0;
- i = this->XCurrent;
- X = PTR_X(a, this);
- while (i < this->XNum && length < 270) {
- clength = min((word)(270 - length), (word)(X[i].PLength-this->XOffset));
- a->ram_out_buffer(a,
- &ReqOut->XBuffer.P[length],
- PTR_P(a, this, &X[i].P[this->XOffset]),
- clength);
- length += clength;
- this->XOffset += clength;
- if (this->XOffset == X[i].PLength) {
- this->XCurrent = (byte)++i;
- this->XOffset = 0;
- }
- }
-#if defined(DIVA_ISTREAM)
- } else { /* Use CMA extension in order to transfer data to the card */
- i = this->XCurrent;
- X = PTR_X(a, this);
- while (i < this->XNum) {
- diva_istream_write(a,
- this->Id,
- PTR_P(a, this, &X[i].P[0]),
- X[i].PLength,
- ((i + 1) == this->XNum),
- 0, 0);
- this->XCurrent = (byte)++i;
- }
- length = 0;
- }
-#endif
- a->ram_outw(a, &ReqOut->XBuffer.length, length);
- a->ram_out(a, &ReqOut->ReqId, this->Id);
- a->ram_out(a, &ReqOut->ReqCh, this->ReqCh);
- /* if it's a specific request (no ASSIGN) ... */
- if (this->Id & 0x1f) {
- /* if buffers are left in the list of data buffers do */
- /* do chaining (LL_MDATA, N_MDATA) */
- this->More++;
- if (i < this->XNum && this->MInd) {
- xdi_xlog_request(XDI_A_NR(a), this->Id, this->ReqCh, this->MInd,
- a->IdTypeTable[this->No]);
- a->ram_out(a, &ReqOut->Req, this->MInd);
- more = true;
- }
- else {
- xdi_xlog_request(XDI_A_NR(a), this->Id, this->ReqCh, this->Req,
- a->IdTypeTable[this->No]);
- this->More |= XMOREF;
- a->ram_out(a, &ReqOut->Req, this->Req);
- more = false;
- if (a->FlowControlIdTable[this->ReqCh] == this->Id)
- a->FlowControlSkipTable[this->ReqCh] = true;
- /*
- Note that remove request was sent to the card
- */
- if (this->Req == REMOVE) {
- a->misc_flags_table[e_no] |= DIVA_MISC_FLAGS_REMOVE_PENDING;
- }
- }
- /* if we did chaining, this entity is put back into the */
- /* request queue */
- if (more) {
- req_queue(a, this->No);
- }
- }
- /* else it's a ASSIGN */
- else {
- /* save the request code used for buffer chaining */
- this->MInd = 0;
- if (this->Id == BLLC_ID) this->MInd = LL_MDATA;
- if (this->Id == NL_ID ||
- this->Id == TASK_ID ||
- this->Id == MAN_ID
- ) this->MInd = N_MDATA;
- /* send the ASSIGN */
- a->IdTypeTable[this->No] = this->Id;
- xdi_xlog_request(XDI_A_NR(a), this->Id, this->ReqCh, this->Req, this->Id);
- this->More |= XMOREF;
- a->ram_out(a, &ReqOut->Req, this->Req);
- /* save the reference of the ASSIGN */
- assign_queue(a, this->No, a->ram_inw(a, &ReqOut->Reference));
- }
- a->ram_outw(a, &PR_RAM->NextReq, a->ram_inw(a, &ReqOut->next));
- ReadyCount--;
- ReqCount++;
- e_no = look_req(a);
- }
- /* send the filled request buffers to the ISDN adapter */
- a->ram_out(a, &PR_RAM->ReqInput,
- (byte)(a->ram_in(a, &PR_RAM->ReqInput) + ReqCount));
- /* if it is a 'unreturncoded' UREMOVE request, remove the */
- /* Id from our table after sending the request */
- if (this && (this->Req == UREMOVE) && this->Id) {
- Id = this->Id;
- e_no = a->IdTable[Id];
- free_entity(a, e_no);
- for (i = 0; i < 256; i++)
- {
- if (a->FlowControlIdTable[i] == Id)
- a->FlowControlIdTable[i] = 0;
- }
- a->IdTable[Id] = 0;
- this->Id = 0;
- }
-}
-static byte pr_ready(ADAPTER *a)
-{
- byte ReadyCount;
- ReadyCount = (byte)(a->ram_in(a, &PR_RAM->ReqOutput) -
- a->ram_in(a, &PR_RAM->ReqInput));
- if (!ReadyCount) {
- if (!a->ReadyInt) {
- a->ram_inc(a, &PR_RAM->ReadyInt);
- a->ReadyInt++;
- }
- }
- return ReadyCount;
-}
-/*------------------------------------------------------------------*/
-/* isdn interrupt handler */
-/*------------------------------------------------------------------*/
-byte pr_dpc(ADAPTER *a)
-{
- byte Count;
- RC *RcIn;
- IND *IndIn;
- byte c;
- byte RNRId;
- byte Rc;
- byte Ind;
- /* if return codes are available ... */
- if ((Count = a->ram_in(a, &PR_RAM->RcOutput)) != 0) {
- dtrc(dprintf("#Rc=%x", Count));
- /* get the buffer address of the first return code */
- RcIn = (RC *)&PR_RAM->B[a->ram_inw(a, &PR_RAM->NextRc)];
- /* for all return codes do ... */
- while (Count--) {
- if ((Rc = a->ram_in(a, &RcIn->Rc)) != 0) {
- dword tmp[2];
- /*
- Get extended information, associated with return code
- */
- a->ram_in_buffer(a,
- &RcIn->Reserved2[0],
- (byte *)&tmp[0],
- 8);
- /* call return code handler, if it is not our return code */
- /* the handler returns 2 */
- /* for all return codes we process, we clear the Rc field */
- isdn_rc(a,
- Rc,
- a->ram_in(a, &RcIn->RcId),
- a->ram_in(a, &RcIn->RcCh),
- a->ram_inw(a, &RcIn->Reference),
- tmp[0], /* type of extended information */
- tmp[1]); /* extended information */
- a->ram_out(a, &RcIn->Rc, 0);
- }
- /* get buffer address of next return code */
- RcIn = (RC *)&PR_RAM->B[a->ram_inw(a, &RcIn->next)];
- }
- /* clear all return codes (no chaining!) */
- a->ram_out(a, &PR_RAM->RcOutput, 0);
- /* call output function */
- pr_out(a);
- }
- /* clear RNR flag */
- RNRId = 0;
- /* if indications are available ... */
- if ((Count = a->ram_in(a, &PR_RAM->IndOutput)) != 0) {
- dtrc(dprintf("#Ind=%x", Count));
- /* get the buffer address of the first indication */
- IndIn = (IND *)&PR_RAM->B[a->ram_inw(a, &PR_RAM->NextInd)];
- /* for all indications do ... */
- while (Count--) {
- /* if the application marks an indication as RNR, all */
- /* indications from the same Id delivered in this interrupt */
- /* are marked RNR */
- if (RNRId && RNRId == a->ram_in(a, &IndIn->IndId)) {
- a->ram_out(a, &IndIn->Ind, 0);
- a->ram_out(a, &IndIn->RNR, true);
- }
- else {
- Ind = a->ram_in(a, &IndIn->Ind);
- if (Ind) {
- RNRId = 0;
- /* call indication handler, a return value of 2 means chain */
- /* a return value of 1 means RNR */
- /* for all indications we process, we clear the Ind field */
- c = isdn_ind(a,
- Ind,
- a->ram_in(a, &IndIn->IndId),
- a->ram_in(a, &IndIn->IndCh),
- &IndIn->RBuffer,
- a->ram_in(a, &IndIn->MInd),
- a->ram_inw(a, &IndIn->MLength));
- if (c == 1) {
- dtrc(dprintf("RNR"));
- a->ram_out(a, &IndIn->Ind, 0);
- RNRId = a->ram_in(a, &IndIn->IndId);
- a->ram_out(a, &IndIn->RNR, true);
- }
- }
- }
- /* get buffer address of next indication */
- IndIn = (IND *)&PR_RAM->B[a->ram_inw(a, &IndIn->next)];
- }
- a->ram_out(a, &PR_RAM->IndOutput, 0);
- }
- return false;
-}
-byte scom_test_int(ADAPTER *a)
-{
- return a->ram_in(a, (void *)0x3fe);
-}
-void scom_clear_int(ADAPTER *a)
-{
- a->ram_out(a, (void *)0x3fe, 0);
-}
-/*------------------------------------------------------------------*/
-/* return code handler */
-/*------------------------------------------------------------------*/
-static byte isdn_rc(ADAPTER *a,
- byte Rc,
- byte Id,
- byte Ch,
- word Ref,
- dword extended_info_type,
- dword extended_info)
-{
- ENTITY *this;
- byte e_no;
- word i;
- int cancel_rc;
-#ifdef USE_EXTENDED_DEBUGS
- {
- DBG_TRC(("<A%d Id=0x%x Rc=0x%x", ((ISDN_ADAPTER *)a->io)->ANum, Id, Rc))
- }
-#else
- dbug(dprintf("isdn_rc(Rc=%x,Id=%x,Ch=%x)", Rc, Id, Ch));
-#endif
- /* check for ready interrupt */
- if (Rc == READY_INT) {
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 0, 0);
- if (a->ReadyInt) {
- a->ReadyInt--;
- return 0;
- }
- return 2;
- }
- /* if we know this Id ... */
- e_no = a->IdTable[Id];
- if (e_no) {
- this = entity_ptr(a, e_no);
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 0, a->IdTypeTable[this->No]);
- this->RcCh = Ch;
- /* if it is a return code to a REMOVE request, remove the */
- /* Id from our table */
- if ((a->misc_flags_table[e_no] & DIVA_MISC_FLAGS_REMOVE_PENDING) &&
- (Rc == OK)) {
- if (a->IdTypeTable[e_no] == NL_ID) {
- if (a->RcExtensionSupported &&
- (extended_info_type != DIVA_RC_TYPE_REMOVE_COMPLETE)) {
- dtrc(dprintf("XDI: N-REMOVE, A(%02x) Id:%02x, ignore RC=OK",
- XDI_A_NR(a), Id));
- return (0);
- }
- if (extended_info_type == DIVA_RC_TYPE_REMOVE_COMPLETE)
- a->RcExtensionSupported = true;
- }
- a->misc_flags_table[e_no] &= ~DIVA_MISC_FLAGS_REMOVE_PENDING;
- a->misc_flags_table[e_no] &= ~DIVA_MISC_FLAGS_NO_RC_CANCELLING;
- free_entity(a, e_no);
- for (i = 0; i < 256; i++)
- {
- if (a->FlowControlIdTable[i] == Id)
- a->FlowControlIdTable[i] = 0;
- }
- a->IdTable[Id] = 0;
- this->Id = 0;
- /* ---------------------------------------------------------------
- If we send N_DISC or N_DISK_ACK after we have received OK_FC
- then the card will respond with OK_FC and later with RC==OK.
- If we send N_REMOVE in this state we will receive only RC==OK
- This will create the state in that the XDI is waiting for the
- additional RC and does not delivery the RC to the client. This
- code corrects the counter of outstanding RC's in this case.
- --------------------------------------------------------------- */
- if ((this->More & XMOREC) > 1) {
- this->More &= ~XMOREC;
- this->More |= 1;
- dtrc(dprintf("XDI: correct MORE on REMOVE A(%02x) Id:%02x",
- XDI_A_NR(a), Id));
- }
- }
- if (Rc == OK_FC) {
- a->FlowControlIdTable[Ch] = Id;
- a->FlowControlSkipTable[Ch] = false;
- this->Rc = Rc;
- this->More &= ~(XBUSY | XMOREC);
- this->complete = 0xff;
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
- CALLBACK(a, this);
- return 0;
- }
- /*
- New protocol code sends return codes that comes from release
- of flow control condition marked with DIVA_RC_TYPE_OK_FC extended
- information element type.
- If like return code arrives then application is able to process
- all return codes self and XDI should not cances return codes.
- This return code does not decrement XMOREC partial return code
- counter due to fact that it was no request for this return code,
- also XMOREC was not incremented.
- */
- if (extended_info_type == DIVA_RC_TYPE_OK_FC) {
- a->misc_flags_table[e_no] |= DIVA_MISC_FLAGS_NO_RC_CANCELLING;
- this->Rc = Rc;
- this->complete = 0xff;
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
- DBG_TRC(("XDI OK_FC A(%02x) Id:%02x Ch:%02x Rc:%02x",
- XDI_A_NR(a), Id, Ch, Rc))
- CALLBACK(a, this);
- return 0;
- }
- cancel_rc = !(a->misc_flags_table[e_no] & DIVA_MISC_FLAGS_NO_RC_CANCELLING);
- if (cancel_rc && (a->FlowControlIdTable[Ch] == Id))
- {
- a->FlowControlIdTable[Ch] = 0;
- if ((Rc != OK) || !a->FlowControlSkipTable[Ch])
- {
- this->Rc = Rc;
- if (Ch == this->ReqCh)
- {
- this->More &= ~(XBUSY | XMOREC);
- this->complete = 0xff;
- }
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
- CALLBACK(a, this);
- }
- return 0;
- }
- if (this->More & XMOREC)
- this->More--;
- /* call the application callback function */
- if (((!cancel_rc) || (this->More & XMOREF)) && !(this->More & XMOREC)) {
- this->Rc = Rc;
- this->More &= ~XBUSY;
- this->complete = 0xff;
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 1, a->IdTypeTable[this->No]);
- CALLBACK(a, this);
- }
- return 0;
- }
- /* if it's an ASSIGN return code check if it's a return */
- /* code to an ASSIGN request from us */
- if ((Rc & 0xf0) == ASSIGN_RC) {
- e_no = get_assign(a, Ref);
- if (e_no) {
- this = entity_ptr(a, e_no);
- this->Id = Id;
- xdi_xlog_rc_event(XDI_A_NR(a), Id, Ch, Rc, 2, a->IdTypeTable[this->No]);
- /* call the application callback function */
- this->Rc = Rc;
- this->More &= ~XBUSY;
- this->complete = 0xff;
-#if defined(DIVA_ISTREAM) /* { */
- if ((Rc == ASSIGN_OK) && a->ram_offset &&
- (a->IdTypeTable[this->No] == NL_ID) &&
- ((extended_info_type == DIVA_RC_TYPE_RX_DMA) ||
- (extended_info_type == DIVA_RC_TYPE_CMA_PTR)) &&
- extended_info) {
- dword offset = (*(a->ram_offset)) (a);
- dword tmp[2];
- extended_info -= offset;
-#ifdef PLATFORM_GT_32BIT
- a->ram_in_dw(a, (void *)ULongToPtr(extended_info), (dword *)&tmp[0], 2);
-#else
- a->ram_in_dw(a, (void *)extended_info, (dword *)&tmp[0], 2);
-#endif
- a->tx_stream[Id] = tmp[0];
- a->rx_stream[Id] = tmp[1];
- if (extended_info_type == DIVA_RC_TYPE_RX_DMA) {
- DBG_TRC(("Id=0x%x RxDMA=%08x:%08x",
- Id, a->tx_stream[Id], a->rx_stream[Id]))
- a->misc_flags_table[this->No] |= DIVA_MISC_FLAGS_RX_DMA;
- } else {
- DBG_TRC(("Id=0x%x CMA=%08x:%08x",
- Id, a->tx_stream[Id], a->rx_stream[Id]))
- a->misc_flags_table[this->No] &= ~DIVA_MISC_FLAGS_RX_DMA;
- a->rx_pos[Id] = 0;
- a->rx_stream[Id] -= offset;
- }
- a->tx_pos[Id] = 0;
- a->tx_stream[Id] -= offset;
- } else {
- a->tx_stream[Id] = 0;
- a->rx_stream[Id] = 0;
- a->misc_flags_table[this->No] &= ~DIVA_MISC_FLAGS_RX_DMA;
- }
-#endif /* } */
- CALLBACK(a, this);
- if (Rc == ASSIGN_OK) {
- a->IdTable[Id] = e_no;
- }
- else
- {
- free_entity(a, e_no);
- for (i = 0; i < 256; i++)
- {
- if (a->FlowControlIdTable[i] == Id)
- a->FlowControlIdTable[i] = 0;
- }
- a->IdTable[Id] = 0;
- this->Id = 0;
- }
- return 1;
- }
- }
- return 2;
-}
-/*------------------------------------------------------------------*/
-/* indication handler */
-/*------------------------------------------------------------------*/
-static byte isdn_ind(ADAPTER *a,
- byte Ind,
- byte Id,
- byte Ch,
- PBUFFER *RBuffer,
- byte MInd,
- word MLength)
-{
- ENTITY *this;
- word clength;
- word offset;
- BUFFERS *R;
- byte *cma = NULL;
-#ifdef USE_EXTENDED_DEBUGS
- {
- DBG_TRC(("<A%d Id=0x%x Ind=0x%x", ((ISDN_ADAPTER *)a->io)->ANum, Id, Ind))
- }
-#else
- dbug(dprintf("isdn_ind(Ind=%x,Id=%x,Ch=%x)", Ind, Id, Ch));
-#endif
- if (a->IdTable[Id]) {
- this = entity_ptr(a, a->IdTable[Id]);
- this->IndCh = Ch;
- xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
- 0/* rnr_valid */, 0 /* rnr */, a->IdTypeTable[this->No]);
- /* if the Receive More flag is not yet set, this is the */
- /* first buffer of the packet */
- if (this->RCurrent == 0xff) {
- /* check for receive buffer chaining */
- if (Ind == this->MInd) {
- this->complete = 0;
- this->Ind = MInd;
- }
- else {
- this->complete = 1;
- this->Ind = Ind;
- }
- /* call the application callback function for the receive */
- /* look ahead */
- this->RLength = MLength;
-#if defined(DIVA_ISTREAM)
- if ((a->rx_stream[this->Id] ||
- (a->misc_flags_table[this->No] & DIVA_MISC_FLAGS_RX_DMA)) &&
- ((Ind == N_DATA) ||
- (a->protocol_capabilities & PROTCAP_CMA_ALLPR))) {
- PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io;
- if (a->misc_flags_table[this->No] & DIVA_MISC_FLAGS_RX_DMA) {
-#if defined(DIVA_IDI_RX_DMA)
- dword d;
- diva_get_dma_map_entry(\
- (struct _diva_dma_map_entry *)IoAdapter->dma_map,
- (int)a->rx_stream[this->Id], (void **)&cma, &d);
-#else
- cma = &a->stream_buffer[0];
- cma[0] = cma[1] = cma[2] = cma[3] = 0;
-#endif
- this->RLength = MLength = (word)*(dword *)cma;
- cma += 4;
- } else {
- int final = 0;
- cma = &a->stream_buffer[0];
- this->RLength = MLength = (word)diva_istream_read(a,
- Id,
- cma,
- sizeof(a->stream_buffer),
- &final, NULL, NULL);
- }
- IoAdapter->RBuffer.length = min(MLength, (word)270);
- if (IoAdapter->RBuffer.length != MLength) {
- this->complete = 0;
- } else {
- this->complete = 1;
- }
- memcpy(IoAdapter->RBuffer.P, cma, IoAdapter->RBuffer.length);
- this->RBuffer = (DBUFFER *)&IoAdapter->RBuffer;
- }
-#endif
- if (!cma) {
- a->ram_look_ahead(a, RBuffer, this);
- }
- this->RNum = 0;
- CALLBACK(a, this);
- /* map entity ptr, selector could be re-mapped by call to */
- /* IDI from within callback */
- this = entity_ptr(a, a->IdTable[Id]);
- xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
- 1/* rnr_valid */, this->RNR/* rnr */, a->IdTypeTable[this->No]);
- /* check for RNR */
- if (this->RNR == 1) {
- this->RNR = 0;
- return 1;
- }
- /* if no buffers are provided by the application, the */
- /* application want to copy the data itself including */
- /* N_MDATA/LL_MDATA chaining */
- if (!this->RNR && !this->RNum) {
- xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
- 2/* rnr_valid */, 0/* rnr */, a->IdTypeTable[this->No]);
- return 0;
- }
- /* if there is no RNR, set the More flag */
- this->RCurrent = 0;
- this->ROffset = 0;
- }
- if (this->RNR == 2) {
- if (Ind != this->MInd) {
- this->RCurrent = 0xff;
- this->RNR = 0;
- }
- return 0;
- }
- /* if we have received buffers from the application, copy */
- /* the data into these buffers */
- offset = 0;
- R = PTR_R(a, this);
- do {
- if (this->ROffset == R[this->RCurrent].PLength) {
- this->ROffset = 0;
- this->RCurrent++;
- }
- if (cma) {
- clength = min(MLength, (word)(R[this->RCurrent].PLength-this->ROffset));
- } else {
- clength = min(a->ram_inw(a, &RBuffer->length)-offset,
- R[this->RCurrent].PLength-this->ROffset);
- }
- if (R[this->RCurrent].P) {
- if (cma) {
- memcpy(PTR_P(a, this, &R[this->RCurrent].P[this->ROffset]),
- &cma[offset],
- clength);
- } else {
- a->ram_in_buffer(a,
- &RBuffer->P[offset],
- PTR_P(a, this, &R[this->RCurrent].P[this->ROffset]),
- clength);
- }
- }
- offset += clength;
- this->ROffset += clength;
- if (cma) {
- if (offset >= MLength) {
- break;
- }
- continue;
- }
- } while (offset < (a->ram_inw(a, &RBuffer->length)));
- /* if it's the last buffer of the packet, call the */
- /* application callback function for the receive complete */
- /* call */
- if (Ind != this->MInd) {
- R[this->RCurrent].PLength = this->ROffset;
- if (this->ROffset) this->RCurrent++;
- this->RNum = this->RCurrent;
- this->RCurrent = 0xff;
- this->Ind = Ind;
- this->complete = 2;
- xdi_xlog_ind(XDI_A_NR(a), Id, Ch, Ind,
- 3/* rnr_valid */, 0/* rnr */, a->IdTypeTable[this->No]);
- CALLBACK(a, this);
- }
- return 0;
- }
- return 2;
-}
-#if defined(XDI_USE_XLOG)
-/* -----------------------------------------------------------
- This function works in the same way as xlog on the
- active board
- ----------------------------------------------------------- */
-static void xdi_xlog(byte *msg, word code, int length) {
- xdi_dbg_xlog("\x00\x02", msg, code, length);
-}
-#endif
-/* -----------------------------------------------------------
- This function writes the information about the Return Code
- processing in the trace buffer. Trace ID is 221.
- INPUT:
- Adapter - system unicue adapter number (0 ... 255)
- Id - Id of the entity that had sent this return code
- Ch - Channel of the entity that had sent this return code
- Rc - return code value
- cb: (0...2)
- switch (cb) {
- case 0: printf ("DELIVERY"); break;
- case 1: printf ("CALLBACK"); break;
- case 2: printf ("ASSIGN"); break;
- }
- DELIVERY - have entered isdn_rc with this RC
- CALLBACK - about to make callback to the application
- for this RC
- ASSIGN - about to make callback for RC that is result
- of ASSIGN request. It is no DELIVERY message
- before of this message
- type - the Id that was sent by the ASSIGN of this entity.
- This should be global Id like NL_ID, DSIG_ID, MAN_ID.
- An unknown Id will cause "?-" in the front of the request.
- In this case the log.c is to be extended.
- ----------------------------------------------------------- */
-static void xdi_xlog_rc_event(byte Adapter,
- byte Id, byte Ch, byte Rc, byte cb, byte type) {
-#if defined(XDI_USE_XLOG)
- word LogInfo[4];
- PUT_WORD(&LogInfo[0], ((word)Adapter | (word)(xdi_xlog_sec++ << 8)));
- PUT_WORD(&LogInfo[1], ((word)Id | (word)(Ch << 8)));
- PUT_WORD(&LogInfo[2], ((word)Rc | (word)(type << 8)));
- PUT_WORD(&LogInfo[3], cb);
- xdi_xlog((byte *)&LogInfo[0], 221, sizeof(LogInfo));
-#endif
-}
-/* ------------------------------------------------------------------------
- This function writes the information about the request processing
- in the trace buffer. Trace ID is 220.
- INPUT:
- Adapter - system unicue adapter number (0 ... 255)
- Id - Id of the entity that had sent this request
- Ch - Channel of the entity that had sent this request
- Req - Code of the request
- type - the Id that was sent by the ASSIGN of this entity.
- This should be global Id like NL_ID, DSIG_ID, MAN_ID.
- An unknown Id will cause "?-" in the front of the request.
- In this case the log.c is to be extended.
- ------------------------------------------------------------------------ */
-static void xdi_xlog_request(byte Adapter, byte Id,
- byte Ch, byte Req, byte type) {
-#if defined(XDI_USE_XLOG)
- word LogInfo[3];
- PUT_WORD(&LogInfo[0], ((word)Adapter | (word)(xdi_xlog_sec++ << 8)));
- PUT_WORD(&LogInfo[1], ((word)Id | (word)(Ch << 8)));
- PUT_WORD(&LogInfo[2], ((word)Req | (word)(type << 8)));
- xdi_xlog((byte *)&LogInfo[0], 220, sizeof(LogInfo));
-#endif
-}
-/* ------------------------------------------------------------------------
- This function writes the information about the indication processing
- in the trace buffer. Trace ID is 222.
- INPUT:
- Adapter - system unicue adapter number (0 ... 255)
- Id - Id of the entity that had sent this indication
- Ch - Channel of the entity that had sent this indication
- Ind - Code of the indication
- rnr_valid: (0 .. 3) supported
- switch (rnr_valid) {
- case 0: printf ("DELIVERY"); break;
- case 1: printf ("RNR=%d", rnr);
- case 2: printf ("RNum=0");
- case 3: printf ("COMPLETE");
- }
- DELIVERY - indication entered isdn_rc function
- RNR=... - application had returned RNR=... after the
- look ahead callback
- RNum=0 - application had not returned any buffer to copy
- this indication and will copy it self
- COMPLETE - XDI had copied the data to the buffers provided
- bu the application and is about to issue the
- final callback
- rnr: Look case 1 of the rnr_valid
- type: the Id that was sent by the ASSIGN of this entity. This should
- be global Id like NL_ID, DSIG_ID, MAN_ID. An unknown Id will
- cause "?-" in the front of the request. In this case the
- log.c is to be extended.
- ------------------------------------------------------------------------ */
-static void xdi_xlog_ind(byte Adapter,
- byte Id,
- byte Ch,
- byte Ind,
- byte rnr_valid,
- byte rnr,
- byte type) {
-#if defined(XDI_USE_XLOG)
- word LogInfo[4];
- PUT_WORD(&LogInfo[0], ((word)Adapter | (word)(xdi_xlog_sec++ << 8)));
- PUT_WORD(&LogInfo[1], ((word)Id | (word)(Ch << 8)));
- PUT_WORD(&LogInfo[2], ((word)Ind | (word)(type << 8)));
- PUT_WORD(&LogInfo[3], ((word)rnr | (word)(rnr_valid << 8)));
- xdi_xlog((byte *)&LogInfo[0], 222, sizeof(LogInfo));
-#endif
-}
diff --git a/drivers/isdn/hardware/eicon/di.h b/drivers/isdn/hardware/eicon/di.h
deleted file mode 100644
index ff26c65631d6..000000000000
--- a/drivers/isdn/hardware/eicon/di.h
+++ /dev/null
@@ -1,118 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/*
- * some macros for detailed trace management
- */
-#include "di_dbg.h"
-/*****************************************************************************/
-#define XMOREC 0x1f
-#define XMOREF 0x20
-#define XBUSY 0x40
-#define RMORE 0x80
-#define DIVA_MISC_FLAGS_REMOVE_PENDING 0x01
-#define DIVA_MISC_FLAGS_NO_RC_CANCELLING 0x02
-#define DIVA_MISC_FLAGS_RX_DMA 0x04
-/* structure for all information we have to keep on a per */
-/* adapater basis */
-typedef struct adapter_s ADAPTER;
-struct adapter_s {
- void *io;
- byte IdTable[256];
- byte IdTypeTable[256];
- byte FlowControlIdTable[256];
- byte FlowControlSkipTable[256];
- byte ReadyInt;
- byte RcExtensionSupported;
- byte misc_flags_table[256];
- dword protocol_capabilities;
- byte (*ram_in)(ADAPTER *a, void *adr);
- word (*ram_inw)(ADAPTER *a, void *adr);
- void (*ram_in_buffer)(ADAPTER *a, void *adr, void *P, word length);
- void (*ram_look_ahead)(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e);
- void (*ram_out)(ADAPTER *a, void *adr, byte data);
- void (*ram_outw)(ADAPTER *a, void *adr, word data);
- void (*ram_out_buffer)(ADAPTER *a, void *adr, void *P, word length);
- void (*ram_inc)(ADAPTER *a, void *adr);
-#if defined(DIVA_ISTREAM)
- dword rx_stream[256];
- dword tx_stream[256];
- word tx_pos[256];
- word rx_pos[256];
- byte stream_buffer[2512];
- dword (*ram_offset)(ADAPTER *a);
- void (*ram_out_dw)(ADAPTER *a,
- void *addr,
- const dword *data,
- int dwords);
- void (*ram_in_dw)(ADAPTER *a,
- void *addr,
- dword *data,
- int dwords);
- void (*istream_wakeup)(ADAPTER *a);
-#else
- byte stream_buffer[4];
-#endif
-};
-/*------------------------------------------------------------------*/
-/* public functions of IDI common code */
-/*------------------------------------------------------------------*/
-void pr_out(ADAPTER *a);
-byte pr_dpc(ADAPTER *a);
-byte scom_test_int(ADAPTER *a);
-void scom_clear_int(ADAPTER *a);
-/*------------------------------------------------------------------*/
-/* OS specific functions used by IDI common code */
-/*------------------------------------------------------------------*/
-void free_entity(ADAPTER *a, byte e_no);
-void assign_queue(ADAPTER *a, byte e_no, word ref);
-byte get_assign(ADAPTER *a, word ref);
-void req_queue(ADAPTER *a, byte e_no);
-byte look_req(ADAPTER *a);
-void next_req(ADAPTER *a);
-ENTITY *entity_ptr(ADAPTER *a, byte e_no);
-#if defined(DIVA_ISTREAM)
-struct _diva_xdi_stream_interface;
-void diva_xdi_provide_istream_info(ADAPTER *a,
- struct _diva_xdi_stream_interface *pI);
-void pr_stream(ADAPTER *a);
-int diva_istream_write(void *context,
- int Id,
- void *data,
- int length,
- int final,
- byte usr1,
- byte usr2);
-int diva_istream_read(void *context,
- int Id,
- void *data,
- int max_length,
- int *final,
- byte *usr1,
- byte *usr2);
-#if defined(DIVA_IDI_RX_DMA)
-#include "diva_dma.h"
-#endif
-#endif
diff --git a/drivers/isdn/hardware/eicon/di_dbg.h b/drivers/isdn/hardware/eicon/di_dbg.h
deleted file mode 100644
index 1380b60e526e..000000000000
--- a/drivers/isdn/hardware/eicon/di_dbg.h
+++ /dev/null
@@ -1,37 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DI_DBG_INC__
-#define __DIVA_DI_DBG_INC__
-#if !defined(dtrc)
-#define dtrc(a)
-#endif
-#if !defined(dbug)
-#define dbug(a)
-#endif
-#if !defined USE_EXTENDED_DEBUGS
-extern void (*dprintf)(char*, ...);
-#endif
-#endif
diff --git a/drivers/isdn/hardware/eicon/di_defs.h b/drivers/isdn/hardware/eicon/di_defs.h
deleted file mode 100644
index a5094d221086..000000000000
--- a/drivers/isdn/hardware/eicon/di_defs.h
+++ /dev/null
@@ -1,181 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef _DI_DEFS_
-#define _DI_DEFS_
-/* typedefs for our data structures */
-typedef struct get_name_s GET_NAME;
-/* The entity_s structure is used to pass all
- parameters between application and IDI */
-typedef struct entity_s ENTITY;
-typedef struct buffers_s BUFFERS;
-typedef struct postcall_s POSTCALL;
-typedef struct get_para_s GET_PARA;
-#define BOARD_NAME_LENGTH 9
-#define IDI_CALL_LINK_T
-#define IDI_CALL_ENTITY_T
-/* typedef void ( * IDI_CALL)(ENTITY *); */
-/* --------------------------------------------------------
- IDI_CALL
- -------------------------------------------------------- */
-typedef void (IDI_CALL_LINK_T *IDI_CALL)(ENTITY IDI_CALL_ENTITY_T *);
-typedef struct {
- word length; /* length of data/parameter field */
- byte P[270]; /* data/parameter field */
-} DBUFFER;
-struct get_name_s {
- word command; /* command = 0x0100 */
- byte name[BOARD_NAME_LENGTH];
-};
-struct postcall_s {
- word command; /* command = 0x0300 */
- word dummy; /* not used */
- void (*callback)(void *); /* call back */
- void *context; /* context pointer */
-};
-#define REQ_PARA 0x0600 /* request command line parameters */
-#define REQ_PARA_LEN 1 /* number of data bytes */
-#define L1_STARTUP_DOWN_POS 0 /* '-y' command line parameter in......*/
-#define L1_STARTUP_DOWN_MSK 0x01 /* first byte position (index 0) with value 0x01 */
-struct get_para_s {
- word command; /* command = 0x0600 */
- byte len; /* max length of para field in bytes */
- byte para[REQ_PARA_LEN]; /* parameter field */
-};
-struct buffers_s {
- word PLength;
- byte *P;
-};
-struct entity_s {
- byte Req; /* pending request */
- byte Rc; /* return code received */
- byte Ind; /* indication received */
- byte ReqCh; /* channel of current Req */
- byte RcCh; /* channel of current Rc */
- byte IndCh; /* channel of current Ind */
- byte Id; /* ID used by this entity */
- byte GlobalId; /* reserved field */
- byte XNum; /* number of X-buffers */
- byte RNum; /* number of R-buffers */
- BUFFERS *X; /* pointer to X-buffer list */
- BUFFERS *R; /* pointer to R-buffer list */
- word RLength; /* length of current R-data */
- DBUFFER *RBuffer; /* buffer of current R-data */
- byte RNR; /* receive not ready flag */
- byte complete; /* receive complete status */
- IDI_CALL callback;
- word user[2];
- /* fields used by the driver internally */
- byte No; /* entity number */
- byte reserved2; /* reserved field */
- byte More; /* R/X More flags */
- byte MInd; /* MDATA coding for this ID */
- byte XCurrent; /* current transmit buffer */
- byte RCurrent; /* current receive buffer */
- word XOffset; /* offset in x-buffer */
- word ROffset; /* offset in r-buffer */
-};
-typedef struct {
- byte type;
- byte channels;
- word features;
- IDI_CALL request;
-} DESCRIPTOR;
-/* descriptor type field coding */
-#define IDI_ADAPTER_S 1
-#define IDI_ADAPTER_PR 2
-#define IDI_ADAPTER_DIVA 3
-#define IDI_ADAPTER_MAESTRA 4
-#define IDI_VADAPTER 0x40
-#define IDI_DRIVER 0x80
-#define IDI_DADAPTER 0xfd
-#define IDI_DIDDPNP 0xfe
-#define IDI_DIMAINT 0xff
-/* Hardware IDs ISA PNP */
-#define HW_ID_DIVA_PRO 3 /* same as IDI_ADAPTER_DIVA */
-#define HW_ID_MAESTRA 4 /* same as IDI_ADAPTER_MAESTRA */
-#define HW_ID_PICCOLA 5
-#define HW_ID_DIVA_PRO20 6
-#define HW_ID_DIVA20 7
-#define HW_ID_DIVA_PRO20_U 8
-#define HW_ID_DIVA20_U 9
-#define HW_ID_DIVA30 10
-#define HW_ID_DIVA30_U 11
-/* Hardware IDs PCI */
-#define HW_ID_EICON_PCI 0x1133
-#define HW_ID_SIEMENS_PCI 0x8001 /* unused SubVendor ID for Siemens Cornet-N cards */
-#define HW_ID_PROTTYPE_CORNETN 0x0014 /* SubDevice ID for Siemens Cornet-N cards */
-#define HW_ID_FUJITSU_SIEMENS_PCI 0x110A /* SubVendor ID for Fujitsu Siemens */
-#define HW_ID_GS03_PCI 0x0021 /* SubDevice ID for Fujitsu Siemens ISDN S0 card */
-#define HW_ID_DIVA_PRO20_PCI 0xe001
-#define HW_ID_DIVA20_PCI 0xe002
-#define HW_ID_DIVA_PRO20_PCI_U 0xe003
-#define HW_ID_DIVA20_PCI_U 0xe004
-#define HW_ID_DIVA201_PCI 0xe005
-#define HW_ID_DIVA_CT_ST 0xe006
-#define HW_ID_DIVA_CT_U 0xe007
-#define HW_ID_DIVA_CTL_ST 0xe008
-#define HW_ID_DIVA_CTL_U 0xe009
-#define HW_ID_DIVA_ISDN_V90_PCI 0xe00a
-#define HW_ID_DIVA202_PCI_ST 0xe00b
-#define HW_ID_DIVA202_PCI_U 0xe00c
-#define HW_ID_DIVA_PRO30_PCI 0xe00d
-#define HW_ID_MAESTRA_PCI 0xe010
-#define HW_ID_MAESTRAQ_PCI 0xe012
-#define HW_ID_DSRV_Q8M_V2_PCI 0xe013
-#define HW_ID_MAESTRAP_PCI 0xe014
-#define HW_ID_DSRV_P30M_V2_PCI 0xe015
-#define HW_ID_DSRV_VOICE_Q8M_PCI 0xe016
-#define HW_ID_DSRV_VOICE_Q8M_V2_PCI 0xe017
-#define HW_ID_DSRV_B2M_V2_PCI 0xe018
-#define HW_ID_DSRV_VOICE_P30M_V2_PCI 0xe019
-#define HW_ID_DSRV_B2F_PCI 0xe01a
-#define HW_ID_DSRV_VOICE_B2M_V2_PCI 0xe01b
-/* Hardware IDs USB */
-#define EICON_USB_VENDOR_ID 0x071D
-#define HW_ID_DIVA_USB_REV1 0x1000
-#define HW_ID_DIVA_USB_REV2 0x1003
-#define HW_ID_TELEDAT_SURF_USB_REV2 0x1004
-#define HW_ID_TELEDAT_SURF_USB_REV1 0x2000
-/* --------------------------------------------------------------------------
- Adapter array change notification framework
- -------------------------------------------------------------------------- */
-typedef void (IDI_CALL_LINK_T *didd_adapter_change_callback_t)(void IDI_CALL_ENTITY_T *context, DESCRIPTOR *adapter, int removal);
-/* -------------------------------------------------------------------------- */
-#define DI_VOICE 0x0 /* obsolete define */
-#define DI_FAX3 0x1
-#define DI_MODEM 0x2
-#define DI_POST 0x4
-#define DI_V110 0x8
-#define DI_V120 0x10
-#define DI_POTS 0x20
-#define DI_CODEC 0x40
-#define DI_MANAGE 0x80
-#define DI_V_42 0x0100
-#define DI_EXTD_FAX 0x0200 /* Extended FAX (ECM, 2D, T.6, Polling) */
-#define DI_AT_PARSER 0x0400 /* Build-in AT Parser in the L2 */
-#define DI_VOICE_OVER_IP 0x0800 /* Voice over IP support */
-typedef void (IDI_CALL_LINK_T *_IDI_CALL)(void *, ENTITY *);
-#endif
diff --git a/drivers/isdn/hardware/eicon/did_vers.h b/drivers/isdn/hardware/eicon/did_vers.h
deleted file mode 100644
index fa8db8249235..000000000000
--- a/drivers/isdn/hardware/eicon/did_vers.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-static char diva_didd_common_code_build[] = "102-51";
diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
deleted file mode 100644
index b0b23ed8b374..000000000000
--- a/drivers/isdn/hardware/eicon/diddfunc.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/* $Id: diddfunc.c,v 1.14.6.2 2004/08/28 20:03:53 armin Exp $
- *
- * DIDD Interface module for Eicon active cards.
- *
- * Functions are in dadapter.c
- *
- * Copyright 2002-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2002-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "dadapter.h"
-#include "divasync.h"
-
-#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-
-extern void DIVA_DIDD_Read(void *, int);
-extern char *DRIVERRELEASE_DIDD;
-static dword notify_handle;
-static DESCRIPTOR _DAdapter;
-
-/*
- * didd callback function
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
- int removal)
-{
- if (adapter->type == IDI_DADAPTER) {
- DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."))
- return (NULL);
- } else if (adapter->type == IDI_DIMAINT) {
- if (removal) {
- DbgDeregister();
- } else {
- DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
- }
- }
- return (NULL);
-}
-
-/*
- * connect to didd
- */
-static int __init connect_didd(void)
-{
- int x = 0;
- int dadapter = 0;
- IDI_SYNC_REQ req;
- DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
- DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
- for (x = 0; x < MAX_DESCRIPTORS; x++) {
- if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
- dadapter = 1;
- memcpy(&_DAdapter, &DIDD_Table[x], sizeof(_DAdapter));
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc =
- IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
- req.didd_notify.info.callback = (void *)didd_callback;
- req.didd_notify.info.context = NULL;
- _DAdapter.request((ENTITY *)&req);
- if (req.didd_notify.e.Rc != 0xff)
- return (0);
- notify_handle = req.didd_notify.info.handle;
- } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
- DbgRegister("DIDD", DRIVERRELEASE_DIDD, DBG_DEFAULT);
- }
- }
- return (dadapter);
-}
-
-/*
- * disconnect from didd
- */
-static void __exit disconnect_didd(void)
-{
- IDI_SYNC_REQ req;
-
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
- req.didd_notify.info.handle = notify_handle;
- _DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * init
- */
-int __init diddfunc_init(void)
-{
- diva_didd_load_time_init();
-
- if (!connect_didd()) {
- DBG_ERR(("init: failed to connect to DIDD."))
- diva_didd_load_time_finit();
- return (0);
- }
- return (1);
-}
-
-/*
- * finit
- */
-void __exit diddfunc_finit(void)
-{
- DbgDeregister();
- disconnect_didd();
- diva_didd_load_time_finit();
-}
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
deleted file mode 100644
index 1b25d8bc153a..000000000000
--- a/drivers/isdn/hardware/eicon/diva.c
+++ /dev/null
@@ -1,666 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: diva.c,v 1.21.4.1 2004/05/08 14:33:43 armin Exp $ */
-
-#define CARDTYPE_H_WANT_DATA 1
-#define CARDTYPE_H_WANT_IDI_DATA 0
-#define CARDTYPE_H_WANT_RESOURCE_DATA 0
-#define CARDTYPE_H_WANT_FILE_DATA 0
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "di_defs.h"
-#include "di.h"
-#include "io.h"
-#include "pc_maint.h"
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "diva_pci.h"
-#include "diva.h"
-
-#ifdef CONFIG_ISDN_DIVAS_PRIPCI
-#include "os_pri.h"
-#endif
-#ifdef CONFIG_ISDN_DIVAS_BRIPCI
-#include "os_bri.h"
-#include "os_4bri.h"
-#endif
-
-PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-extern IDI_CALL Requests[MAX_ADAPTER];
-extern int create_adapter_proc(diva_os_xdi_adapter_t *a);
-extern void remove_adapter_proc(diva_os_xdi_adapter_t *a);
-
-#define DivaIdiReqFunc(N) \
- static void DivaIdiRequest##N(ENTITY *e) \
- { if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); }
-
-/*
-** Create own 32 Adapters
-*/
-DivaIdiReqFunc(0)
-DivaIdiReqFunc(1)
-DivaIdiReqFunc(2)
-DivaIdiReqFunc(3)
-DivaIdiReqFunc(4)
-DivaIdiReqFunc(5)
-DivaIdiReqFunc(6)
-DivaIdiReqFunc(7)
-DivaIdiReqFunc(8)
-DivaIdiReqFunc(9)
-DivaIdiReqFunc(10)
-DivaIdiReqFunc(11)
-DivaIdiReqFunc(12)
-DivaIdiReqFunc(13)
-DivaIdiReqFunc(14)
-DivaIdiReqFunc(15)
-DivaIdiReqFunc(16)
-DivaIdiReqFunc(17)
-DivaIdiReqFunc(18)
-DivaIdiReqFunc(19)
-DivaIdiReqFunc(20)
-DivaIdiReqFunc(21)
-DivaIdiReqFunc(22)
-DivaIdiReqFunc(23)
-DivaIdiReqFunc(24)
-DivaIdiReqFunc(25)
-DivaIdiReqFunc(26)
-DivaIdiReqFunc(27)
-DivaIdiReqFunc(28)
-DivaIdiReqFunc(29)
-DivaIdiReqFunc(30)
-DivaIdiReqFunc(31)
-
-/*
-** LOCALS
-*/
-static LIST_HEAD(adapter_queue);
-
-typedef struct _diva_get_xlog {
- word command;
- byte req;
- byte rc;
- byte data[sizeof(struct mi_pc_maint)];
-} diva_get_xlog_t;
-
-typedef struct _diva_supported_cards_info {
- int CardOrdinal;
- diva_init_card_proc_t init_card;
-} diva_supported_cards_info_t;
-
-static diva_supported_cards_info_t divas_supported_cards[] = {
-#ifdef CONFIG_ISDN_DIVAS_PRIPCI
- /*
- PRI Cards
- */
- {CARDTYPE_DIVASRV_P_30M_PCI, diva_pri_init_card},
- /*
- PRI Rev.2 Cards
- */
- {CARDTYPE_DIVASRV_P_30M_V2_PCI, diva_pri_init_card},
- /*
- PRI Rev.2 VoIP Cards
- */
- {CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI, diva_pri_init_card},
-#endif
-#ifdef CONFIG_ISDN_DIVAS_BRIPCI
- /*
- 4BRI Rev 1 Cards
- */
- {CARDTYPE_DIVASRV_Q_8M_PCI, diva_4bri_init_card},
- {CARDTYPE_DIVASRV_VOICE_Q_8M_PCI, diva_4bri_init_card},
- /*
- 4BRI Rev 2 Cards
- */
- {CARDTYPE_DIVASRV_Q_8M_V2_PCI, diva_4bri_init_card},
- {CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI, diva_4bri_init_card},
- /*
- 4BRI Based BRI Rev 2 Cards
- */
- {CARDTYPE_DIVASRV_B_2M_V2_PCI, diva_4bri_init_card},
- {CARDTYPE_DIVASRV_B_2F_PCI, diva_4bri_init_card},
- {CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI, diva_4bri_init_card},
- /*
- BRI
- */
- {CARDTYPE_MAESTRA_PCI, diva_bri_init_card},
-#endif
-
- /*
- EOL
- */
- {-1}
-};
-
-static void diva_init_request_array(void);
-static void *divas_create_pci_card(int handle, void *pci_dev_handle);
-
-static diva_os_spin_lock_t adapter_lock;
-
-static int diva_find_free_adapters(int base, int nr)
-{
- int i;
-
- for (i = 0; i < nr; i++) {
- if (IoAdapters[base + i]) {
- return (-1);
- }
- }
-
- return (0);
-}
-
-static diva_os_xdi_adapter_t *diva_q_get_next(struct list_head *what)
-{
- diva_os_xdi_adapter_t *a = NULL;
-
- if (what && (what->next != &adapter_queue))
- a = list_entry(what->next, diva_os_xdi_adapter_t, link);
-
- return (a);
-}
-
-/* --------------------------------------------------------------------------
- Add card to the card list
- -------------------------------------------------------------------------- */
-void *diva_driver_add_card(void *pdev, unsigned long CardOrdinal)
-{
- diva_os_spin_lock_magic_t old_irql;
- diva_os_xdi_adapter_t *pdiva, *pa;
- int i, j, max, nr;
-
- for (i = 0; divas_supported_cards[i].CardOrdinal != -1; i++) {
- if (divas_supported_cards[i].CardOrdinal == CardOrdinal) {
- if (!(pdiva = divas_create_pci_card(i, pdev))) {
- return NULL;
- }
- switch (CardOrdinal) {
- case CARDTYPE_DIVASRV_Q_8M_PCI:
- case CARDTYPE_DIVASRV_VOICE_Q_8M_PCI:
- case CARDTYPE_DIVASRV_Q_8M_V2_PCI:
- case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI:
- max = MAX_ADAPTER - 4;
- nr = 4;
- break;
-
- default:
- max = MAX_ADAPTER;
- nr = 1;
- }
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
-
- for (i = 0; i < max; i++) {
- if (!diva_find_free_adapters(i, nr)) {
- pdiva->controller = i + 1;
- pdiva->xdi_adapter.ANum = pdiva->controller;
- IoAdapters[i] = &pdiva->xdi_adapter;
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
- create_adapter_proc(pdiva); /* add adapter to proc file system */
-
- DBG_LOG(("add %s:%d",
- CardProperties
- [CardOrdinal].Name,
- pdiva->controller))
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
- pa = pdiva;
- for (j = 1; j < nr; j++) { /* slave adapters, if any */
- pa = diva_q_get_next(&pa->link);
- if (pa && !pa->interface.cleanup_adapter_proc) {
- pa->controller = i + 1 + j;
- pa->xdi_adapter.ANum = pa->controller;
- IoAdapters[i + j] = &pa->xdi_adapter;
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
- DBG_LOG(("add slave adapter (%d)",
- pa->controller))
- create_adapter_proc(pa); /* add adapter to proc file system */
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
- } else {
- DBG_ERR(("slave adapter problem"))
- break;
- }
- }
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
- return (pdiva);
- }
- }
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
-
- /*
- Not able to add adapter - remove it and return error
- */
- DBG_ERR(("can not alloc request array"))
- diva_driver_remove_card(pdiva);
-
- return NULL;
- }
- }
-
- return NULL;
-}
-
-/* --------------------------------------------------------------------------
- Called on driver load, MAIN, main, DriverEntry
- -------------------------------------------------------------------------- */
-int divasa_xdi_driver_entry(void)
-{
- diva_os_initialize_spin_lock(&adapter_lock, "adapter");
- memset(&IoAdapters[0], 0x00, sizeof(IoAdapters));
- diva_init_request_array();
-
- return (0);
-}
-
-/* --------------------------------------------------------------------------
- Remove adapter from list
- -------------------------------------------------------------------------- */
-static diva_os_xdi_adapter_t *get_and_remove_from_queue(void)
-{
- diva_os_spin_lock_magic_t old_irql;
- diva_os_xdi_adapter_t *a = NULL;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "driver_unload");
-
- if (!list_empty(&adapter_queue)) {
- a = list_entry(adapter_queue.next, diva_os_xdi_adapter_t, link);
- list_del(adapter_queue.next);
- }
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "driver_unload");
- return (a);
-}
-
-/* --------------------------------------------------------------------------
- Remove card from the card list
- -------------------------------------------------------------------------- */
-void diva_driver_remove_card(void *pdiva)
-{
- diva_os_spin_lock_magic_t old_irql;
- diva_os_xdi_adapter_t *a[4];
- diva_os_xdi_adapter_t *pa;
- int i;
-
- pa = a[0] = (diva_os_xdi_adapter_t *) pdiva;
- a[1] = a[2] = a[3] = NULL;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "remode adapter");
-
- for (i = 1; i < 4; i++) {
- if ((pa = diva_q_get_next(&pa->link))
- && !pa->interface.cleanup_adapter_proc) {
- a[i] = pa;
- } else {
- break;
- }
- }
-
- for (i = 0; ((i < 4) && a[i]); i++) {
- list_del(&a[i]->link);
- }
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "driver_unload");
-
- (*(a[0]->interface.cleanup_adapter_proc)) (a[0]);
-
- for (i = 0; i < 4; i++) {
- if (a[i]) {
- if (a[i]->controller) {
- DBG_LOG(("remove adapter (%d)",
- a[i]->controller)) IoAdapters[a[i]->controller - 1] = NULL;
- remove_adapter_proc(a[i]);
- }
- diva_os_free(0, a[i]);
- }
- }
-}
-
-/* --------------------------------------------------------------------------
- Create diva PCI adapter and init internal adapter structures
- -------------------------------------------------------------------------- */
-static void *divas_create_pci_card(int handle, void *pci_dev_handle)
-{
- diva_supported_cards_info_t *pI = &divas_supported_cards[handle];
- diva_os_spin_lock_magic_t old_irql;
- diva_os_xdi_adapter_t *a;
-
- DBG_LOG(("found %d-%s", pI->CardOrdinal, CardProperties[pI->CardOrdinal].Name))
-
- if (!(a = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) {
- DBG_ERR(("A: can't alloc adapter"));
- return NULL;
- }
-
- memset(a, 0x00, sizeof(*a));
-
- a->CardIndex = handle;
- a->CardOrdinal = pI->CardOrdinal;
- a->Bus = DIVAS_XDI_ADAPTER_BUS_PCI;
- a->xdi_adapter.cardType = a->CardOrdinal;
- a->resources.pci.bus = diva_os_get_pci_bus(pci_dev_handle);
- a->resources.pci.func = diva_os_get_pci_func(pci_dev_handle);
- a->resources.pci.hdev = pci_dev_handle;
-
- /*
- Add master adapter first, so slave adapters will receive higher
- numbers as master adapter
- */
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
- list_add_tail(&a->link, &adapter_queue);
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
-
- if ((*(pI->init_card)) (a)) {
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
- list_del(&a->link);
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
- diva_os_free(0, a);
- DBG_ERR(("A: can't get adapter resources"));
- return NULL;
- }
-
- return (a);
-}
-
-/* --------------------------------------------------------------------------
- Called on driver unload FINIT, finit, Unload
- -------------------------------------------------------------------------- */
-void divasa_xdi_driver_unload(void)
-{
- diva_os_xdi_adapter_t *a;
-
- while ((a = get_and_remove_from_queue())) {
- if (a->interface.cleanup_adapter_proc) {
- (*(a->interface.cleanup_adapter_proc)) (a);
- }
- if (a->controller) {
- IoAdapters[a->controller - 1] = NULL;
- remove_adapter_proc(a);
- }
- diva_os_free(0, a);
- }
- diva_os_destroy_spin_lock(&adapter_lock, "adapter");
-}
-
-/*
-** Receive and process command from user mode utility
-*/
-void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
- int length, void *mptr,
- divas_xdi_copy_from_user_fn_t cp_fn)
-{
- diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
- diva_os_xdi_adapter_t *a = NULL;
- diva_os_spin_lock_magic_t old_irql;
- struct list_head *tmp;
-
- if (length < sizeof(diva_xdi_um_cfg_cmd_t)) {
- DBG_ERR(("A: A(?) open, msg too small (%d < %d)",
- length, sizeof(diva_xdi_um_cfg_cmd_t)))
- return NULL;
- }
- if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
- DBG_ERR(("A: A(?) open, write error"))
- return NULL;
- }
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
- list_for_each(tmp, &adapter_queue) {
- a = list_entry(tmp, diva_os_xdi_adapter_t, link);
- if (a->controller == (int)msg->adapter)
- break;
- a = NULL;
- }
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
-
- if (!a) {
- DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
- }
-
- return (a);
-}
-
-/*
-** Easy cleanup mailbox status
-*/
-void diva_xdi_close_adapter(void *adapter, void *os_handle)
-{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
-
- a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
- if (a->xdi_mbox.data) {
- diva_os_free(0, a->xdi_mbox.data);
- a->xdi_mbox.data = NULL;
- }
-}
-
-int
-diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
- int length, void *mptr,
- divas_xdi_copy_from_user_fn_t cp_fn)
-{
- diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
- void *data;
-
- if (a->xdi_mbox.status & DIVA_XDI_MBOX_BUSY) {
- DBG_ERR(("A: A(%d) write, mbox busy", a->controller))
- return (-1);
- }
-
- if (length < sizeof(diva_xdi_um_cfg_cmd_t)) {
- DBG_ERR(("A: A(%d) write, message too small (%d < %d)",
- a->controller, length,
- sizeof(diva_xdi_um_cfg_cmd_t)))
- return (-3);
- }
-
- if (!(data = diva_os_malloc(0, length))) {
- DBG_ERR(("A: A(%d) write, ENOMEM", a->controller))
- return (-2);
- }
-
- if (msg) {
- *(diva_xdi_um_cfg_cmd_t *)data = *msg;
- length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
- src + sizeof(*msg), length - sizeof(*msg));
- } else {
- length = (*cp_fn) (os_handle, data, src, length);
- }
- if (length > 0) {
- if ((*(a->interface.cmd_proc))
- (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
- length = -3;
- }
- } else {
- DBG_ERR(("A: A(%d) write error (%d)", a->controller,
- length))
- }
-
- diva_os_free(0, data);
-
- return (length);
-}
-
-/*
-** Write answers to user mode utility, if any
-*/
-int
-diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
- int max_length, divas_xdi_copy_to_user_fn_t cp_fn)
-{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
- int ret;
-
- if (!(a->xdi_mbox.status & DIVA_XDI_MBOX_BUSY)) {
- DBG_ERR(("A: A(%d) rx mbox empty", a->controller))
- return (-1);
- }
- if (!a->xdi_mbox.data) {
- a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
- DBG_ERR(("A: A(%d) rx ENOMEM", a->controller))
- return (-2);
- }
-
- if (max_length < a->xdi_mbox.data_length) {
- DBG_ERR(("A: A(%d) rx buffer too short(%d < %d)",
- a->controller, max_length,
- a->xdi_mbox.data_length))
- return (-3);
- }
-
- ret = (*cp_fn) (os_handle, dst, a->xdi_mbox.data,
- a->xdi_mbox.data_length);
- if (ret > 0) {
- diva_os_free(0, a->xdi_mbox.data);
- a->xdi_mbox.data = NULL;
- a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
- }
-
- return (ret);
-}
-
-
-irqreturn_t diva_os_irq_wrapper(int irq, void *context)
-{
- diva_os_xdi_adapter_t *a = context;
- diva_xdi_clear_interrupts_proc_t clear_int_proc;
-
- if (!a || !a->xdi_adapter.diva_isr_handler)
- return IRQ_NONE;
-
- if ((clear_int_proc = a->clear_interrupts_proc)) {
- (*clear_int_proc) (a);
- a->clear_interrupts_proc = NULL;
- return IRQ_HANDLED;
- }
-
- (*(a->xdi_adapter.diva_isr_handler)) (&a->xdi_adapter);
- return IRQ_HANDLED;
-}
-
-static void diva_init_request_array(void)
-{
- Requests[0] = DivaIdiRequest0;
- Requests[1] = DivaIdiRequest1;
- Requests[2] = DivaIdiRequest2;
- Requests[3] = DivaIdiRequest3;
- Requests[4] = DivaIdiRequest4;
- Requests[5] = DivaIdiRequest5;
- Requests[6] = DivaIdiRequest6;
- Requests[7] = DivaIdiRequest7;
- Requests[8] = DivaIdiRequest8;
- Requests[9] = DivaIdiRequest9;
- Requests[10] = DivaIdiRequest10;
- Requests[11] = DivaIdiRequest11;
- Requests[12] = DivaIdiRequest12;
- Requests[13] = DivaIdiRequest13;
- Requests[14] = DivaIdiRequest14;
- Requests[15] = DivaIdiRequest15;
- Requests[16] = DivaIdiRequest16;
- Requests[17] = DivaIdiRequest17;
- Requests[18] = DivaIdiRequest18;
- Requests[19] = DivaIdiRequest19;
- Requests[20] = DivaIdiRequest20;
- Requests[21] = DivaIdiRequest21;
- Requests[22] = DivaIdiRequest22;
- Requests[23] = DivaIdiRequest23;
- Requests[24] = DivaIdiRequest24;
- Requests[25] = DivaIdiRequest25;
- Requests[26] = DivaIdiRequest26;
- Requests[27] = DivaIdiRequest27;
- Requests[28] = DivaIdiRequest28;
- Requests[29] = DivaIdiRequest29;
- Requests[30] = DivaIdiRequest30;
- Requests[31] = DivaIdiRequest31;
-}
-
-void diva_xdi_display_adapter_features(int card)
-{
- dword features;
- if (!card || ((card - 1) >= MAX_ADAPTER) || !IoAdapters[card - 1]) {
- return;
- }
- card--;
- features = IoAdapters[card]->Properties.Features;
-
- DBG_LOG(("FEATURES FOR ADAPTER: %d", card + 1))
- DBG_LOG((" DI_FAX3 : %s",
- (features & DI_FAX3) ? "Y" : "N"))
- DBG_LOG((" DI_MODEM : %s",
- (features & DI_MODEM) ? "Y" : "N"))
- DBG_LOG((" DI_POST : %s",
- (features & DI_POST) ? "Y" : "N"))
- DBG_LOG((" DI_V110 : %s",
- (features & DI_V110) ? "Y" : "N"))
- DBG_LOG((" DI_V120 : %s",
- (features & DI_V120) ? "Y" : "N"))
- DBG_LOG((" DI_POTS : %s",
- (features & DI_POTS) ? "Y" : "N"))
- DBG_LOG((" DI_CODEC : %s",
- (features & DI_CODEC) ? "Y" : "N"))
- DBG_LOG((" DI_MANAGE : %s",
- (features & DI_MANAGE) ? "Y" : "N"))
- DBG_LOG((" DI_V_42 : %s",
- (features & DI_V_42) ? "Y" : "N"))
- DBG_LOG((" DI_EXTD_FAX : %s",
- (features & DI_EXTD_FAX) ? "Y" : "N"))
- DBG_LOG((" DI_AT_PARSER : %s",
- (features & DI_AT_PARSER) ? "Y" : "N"))
- DBG_LOG((" DI_VOICE_OVER_IP : %s",
- (features & DI_VOICE_OVER_IP) ? "Y" : "N"))
- }
-
-void diva_add_slave_adapter(diva_os_xdi_adapter_t *a)
-{
- diva_os_spin_lock_magic_t old_irql;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add_slave");
- list_add_tail(&a->link, &adapter_queue);
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add_slave");
-}
-
-int diva_card_read_xlog(diva_os_xdi_adapter_t *a)
-{
- diva_get_xlog_t *req;
- byte *data;
-
- if (!a->xdi_adapter.Initialized || !a->xdi_adapter.DIRequest) {
- return (-1);
- }
- if (!(data = diva_os_malloc(0, sizeof(struct mi_pc_maint)))) {
- return (-1);
- }
- memset(data, 0x00, sizeof(struct mi_pc_maint));
-
- if (!(req = diva_os_malloc(0, sizeof(*req)))) {
- diva_os_free(0, data);
- return (-1);
- }
- req->command = 0x0400;
- req->req = LOG;
- req->rc = 0x00;
-
- (*(a->xdi_adapter.DIRequest)) (&a->xdi_adapter, (ENTITY *) req);
-
- if (!req->rc || req->req) {
- diva_os_free(0, data);
- diva_os_free(0, req);
- return (-1);
- }
-
- memcpy(data, &req->req, sizeof(struct mi_pc_maint));
-
- diva_os_free(0, req);
-
- a->xdi_mbox.data_length = sizeof(struct mi_pc_maint);
- a->xdi_mbox.data = data;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
-
- return (0);
-}
-
-void xdiFreeFile(void *handle)
-{
-}
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
deleted file mode 100644
index 1ad76650fbf9..000000000000
--- a/drivers/isdn/hardware/eicon/diva.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: diva.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
-
-#ifndef __DIVA_XDI_OS_PART_H__
-#define __DIVA_XDI_OS_PART_H__
-
-
-int divasa_xdi_driver_entry(void);
-void divasa_xdi_driver_unload(void);
-void *diva_driver_add_card(void *pdev, unsigned long CardOrdinal);
-void diva_driver_remove_card(void *pdiva);
-
-typedef int (*divas_xdi_copy_to_user_fn_t) (void *os_handle, void __user *dst,
- const void *src, int length);
-
-typedef int (*divas_xdi_copy_from_user_fn_t) (void *os_handle, void *dst,
- const void __user *src, int length);
-
-int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
- int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
-
-int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
- int length, void *msg,
- divas_xdi_copy_from_user_fn_t cp_fn);
-
-void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
- int length, void *msg,
- divas_xdi_copy_from_user_fn_t cp_fn);
-
-void diva_xdi_close_adapter(void *adapter, void *os_handle);
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
deleted file mode 100644
index 60e79257dd5f..000000000000
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/* $Id: diva_didd.c,v 1.13.6.4 2005/02/11 19:40:25 armin Exp $
- *
- * DIDD Interface module for Eicon active cards.
- *
- * Functions are in dadapter.c
- *
- * Copyright 2002-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2002-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <net/net_namespace.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "dadapter.h"
-#include "divasync.h"
-#include "did_vers.h"
-
-static char *main_revision = "$Revision: 1.13.6.4 $";
-
-static char *DRIVERNAME =
- "Eicon DIVA - DIDD table (http://www.melware.net)";
-static char *DRIVERLNAME = "divadidd";
-char *DRIVERRELEASE_DIDD = "2.0";
-
-MODULE_DESCRIPTION("DIDD table driver for diva drivers");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("Eicon diva drivers");
-MODULE_LICENSE("GPL");
-
-#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-extern int diddfunc_init(void);
-extern void diddfunc_finit(void);
-
-extern void DIVA_DIDD_Read(void *, int);
-
-static struct proc_dir_entry *proc_didd;
-struct proc_dir_entry *proc_net_eicon = NULL;
-
-EXPORT_SYMBOL(DIVA_DIDD_Read);
-EXPORT_SYMBOL(proc_net_eicon);
-
-static char *getrev(const char *revision)
-{
- char *rev;
- char *p;
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "1.0";
- return rev;
-}
-
-static int divadidd_proc_show(struct seq_file *m, void *v)
-{
- char tmprev[32];
-
- strcpy(tmprev, main_revision);
- seq_printf(m, "%s\n", DRIVERNAME);
- seq_printf(m, "name : %s\n", DRIVERLNAME);
- seq_printf(m, "release : %s\n", DRIVERRELEASE_DIDD);
- seq_printf(m, "build : %s(%s)\n",
- diva_didd_common_code_build, DIVA_BUILD);
- seq_printf(m, "revision : %s\n", getrev(tmprev));
-
- return 0;
-}
-
-static int __init create_proc(void)
-{
- proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
-
- if (proc_net_eicon) {
- proc_didd = proc_create_single(DRIVERLNAME, S_IRUGO,
- proc_net_eicon, divadidd_proc_show);
- return (1);
- }
- return (0);
-}
-
-static void remove_proc(void)
-{
- remove_proc_entry(DRIVERLNAME, proc_net_eicon);
- remove_proc_entry("eicon", init_net.proc_net);
-}
-
-static int __init divadidd_init(void)
-{
- char tmprev[32];
- int ret = 0;
-
- printk(KERN_INFO "%s\n", DRIVERNAME);
- printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_DIDD);
- strcpy(tmprev, main_revision);
- printk("%s Build:%s(%s)\n", getrev(tmprev),
- diva_didd_common_code_build, DIVA_BUILD);
-
- if (!create_proc()) {
- printk(KERN_ERR "%s: could not create proc entry\n",
- DRIVERLNAME);
- ret = -EIO;
- goto out;
- }
-
- if (!diddfunc_init()) {
- printk(KERN_ERR "%s: failed to connect to DIDD.\n",
- DRIVERLNAME);
-#ifdef MODULE
- remove_proc();
-#endif
- ret = -EIO;
- goto out;
- }
-
-out:
- return (ret);
-}
-
-static void __exit divadidd_exit(void)
-{
- diddfunc_finit();
- remove_proc();
- printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divadidd_init);
-module_exit(divadidd_exit);
diff --git a/drivers/isdn/hardware/eicon/diva_dma.c b/drivers/isdn/hardware/eicon/diva_dma.c
deleted file mode 100644
index 217b6aa9f612..000000000000
--- a/drivers/isdn/hardware/eicon/diva_dma.c
+++ /dev/null
@@ -1,94 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "diva_dma.h"
-/*
- Every entry has length of PAGE_SIZE
- and represents one single physical page
-*/
-struct _diva_dma_map_entry {
- int busy;
- dword phys_bus_addr; /* 32bit address as seen by the card */
- void *local_ram_addr; /* local address as seen by the host */
- void *addr_handle; /* handle uset to free allocated memory */
-};
-/*
- Create local mapping structure and init it to default state
-*/
-struct _diva_dma_map_entry *diva_alloc_dma_map(void *os_context, int nentries) {
- diva_dma_map_entry_t *pmap = diva_os_malloc(0, sizeof(*pmap) * (nentries + 1));
- if (pmap)
- memset(pmap, 0, sizeof(*pmap) * (nentries + 1));
- return pmap;
-}
-/*
- Free local map (context should be freed before) if any
-*/
-void diva_free_dma_mapping(struct _diva_dma_map_entry *pmap) {
- if (pmap) {
- diva_os_free(0, pmap);
- }
-}
-/*
- Set information saved on the map entry
-*/
-void diva_init_dma_map_entry(struct _diva_dma_map_entry *pmap,
- int nr, void *virt, dword phys,
- void *addr_handle) {
- pmap[nr].phys_bus_addr = phys;
- pmap[nr].local_ram_addr = virt;
- pmap[nr].addr_handle = addr_handle;
-}
-/*
- Allocate one single entry in the map
-*/
-int diva_alloc_dma_map_entry(struct _diva_dma_map_entry *pmap) {
- int i;
- for (i = 0; (pmap && pmap[i].local_ram_addr); i++) {
- if (!pmap[i].busy) {
- pmap[i].busy = 1;
- return (i);
- }
- }
- return (-1);
-}
-/*
- Free one single entry in the map
-*/
-void diva_free_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr) {
- pmap[nr].busy = 0;
-}
-/*
- Get information saved on the map entry
-*/
-void diva_get_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr,
- void **pvirt, dword *pphys) {
- *pphys = pmap[nr].phys_bus_addr;
- *pvirt = pmap[nr].local_ram_addr;
-}
-void *diva_get_entry_handle(struct _diva_dma_map_entry *pmap, int nr) {
- return (pmap[nr].addr_handle);
-}
diff --git a/drivers/isdn/hardware/eicon/diva_dma.h b/drivers/isdn/hardware/eicon/diva_dma.h
deleted file mode 100644
index d32c91be562b..000000000000
--- a/drivers/isdn/hardware/eicon/diva_dma.h
+++ /dev/null
@@ -1,48 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_DMA_MAPPING_IFC_H__
-#define __DIVA_DMA_MAPPING_IFC_H__
-typedef struct _diva_dma_map_entry diva_dma_map_entry_t;
-struct _diva_dma_map_entry *diva_alloc_dma_map(void *os_context, int nentries);
-void diva_init_dma_map_entry(struct _diva_dma_map_entry *pmap,
- int nr, void *virt, dword phys,
- void *addr_handle);
-int diva_alloc_dma_map_entry(struct _diva_dma_map_entry *pmap);
-void diva_free_dma_map_entry(struct _diva_dma_map_entry *pmap, int entry);
-void diva_get_dma_map_entry(struct _diva_dma_map_entry *pmap, int nr,
- void **pvirt, dword *pphys);
-void diva_free_dma_mapping(struct _diva_dma_map_entry *pmap);
-/*
- Functionality to be implemented by OS wrapper
- and running in process context
-*/
-void diva_init_dma_map(void *hdev,
- struct _diva_dma_map_entry **ppmap,
- int nentries);
-void diva_free_dma_map(void *hdev,
- struct _diva_dma_map_entry *pmap);
-void *diva_get_entry_handle(struct _diva_dma_map_entry *pmap, int nr);
-#endif
diff --git a/drivers/isdn/hardware/eicon/diva_pci.h b/drivers/isdn/hardware/eicon/diva_pci.h
deleted file mode 100644
index 7ef5db98ad3c..000000000000
--- a/drivers/isdn/hardware/eicon/diva_pci.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: diva_pci.h,v 1.6 2003/01/04 15:29:45 schindler Exp $ */
-
-#ifndef __DIVA_PCI_INTERFACE_H__
-#define __DIVA_PCI_INTERFACE_H__
-
-void __iomem *divasa_remap_pci_bar(diva_os_xdi_adapter_t *a,
- int id,
- unsigned long bar,
- unsigned long area_length);
-void divasa_unmap_pci_bar(void __iomem *bar);
-unsigned long divasa_get_pci_irq(unsigned char bus,
- unsigned char func, void *pci_dev_handle);
-unsigned long divasa_get_pci_bar(unsigned char bus,
- unsigned char func,
- int bar, void *pci_dev_handle);
-byte diva_os_get_pci_bus(void *pci_dev_handle);
-byte diva_os_get_pci_func(void *pci_dev_handle);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h
deleted file mode 100644
index c4868a0d82f4..000000000000
--- a/drivers/isdn/hardware/eicon/divacapi.h
+++ /dev/null
@@ -1,1350 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-/*#define DEBUG */
-
-#include <linux/types.h>
-
-#define IMPLEMENT_DTMF 1
-#define IMPLEMENT_LINE_INTERCONNECT2 1
-#define IMPLEMENT_ECHO_CANCELLER 1
-#define IMPLEMENT_RTP 1
-#define IMPLEMENT_T38 1
-#define IMPLEMENT_FAX_SUB_SEP_PWD 1
-#define IMPLEMENT_V18 1
-#define IMPLEMENT_DTMF_TONE 1
-#define IMPLEMENT_PIAFS 1
-#define IMPLEMENT_FAX_PAPER_FORMATS 1
-#define IMPLEMENT_VOWN 1
-#define IMPLEMENT_CAPIDTMF 1
-#define IMPLEMENT_FAX_NONSTANDARD 1
-#define VSWITCH_SUPPORT 1
-
-
-#define IMPLEMENT_LINE_INTERCONNECT 0
-#define IMPLEMENT_MARKED_OK_AFTER_FC 1
-
-#include "capidtmf.h"
-
-/*------------------------------------------------------------------*/
-/* Common API internal definitions */
-/*------------------------------------------------------------------*/
-
-#define MAX_APPL 240
-#define MAX_NCCI 127
-
-#define MSG_IN_QUEUE_SIZE ((4096 + 3) & 0xfffc) /* must be multiple of 4 */
-
-
-#define MSG_IN_OVERHEAD sizeof(APPL *)
-
-#define MAX_NL_CHANNEL 255
-#define MAX_DATA_B3 8
-#define MAX_DATA_ACK MAX_DATA_B3
-#define MAX_MULTI_IE 6
-#define MAX_MSG_SIZE 256
-#define MAX_MSG_PARMS 10
-#define MAX_CPN_MASK_SIZE 16
-#define MAX_MSN_CONFIG 10
-#define EXT_CONTROLLER 0x80
-#define CODEC 0x01
-#define CODEC_PERMANENT 0x02
-#define ADV_VOICE 0x03
-#define MAX_CIP_TYPES 5 /* kind of CIP types for group optimization */
-
-#define FAX_CONNECT_INFO_BUFFER_SIZE 256
-#define NCPI_BUFFER_SIZE 256
-
-#define MAX_CHANNELS_PER_PLCI 8
-#define MAX_INTERNAL_COMMAND_LEVELS 4
-#define INTERNAL_REQ_BUFFER_SIZE 272
-
-#define INTERNAL_IND_BUFFER_SIZE 768
-
-#define DTMF_PARAMETER_BUFFER_SIZE 12
-#define ADV_VOICE_COEF_BUFFER_SIZE 50
-
-#define LI_PLCI_B_QUEUE_ENTRIES 256
-
-
-
-typedef struct _APPL APPL;
-typedef struct _PLCI PLCI;
-typedef struct _NCCI NCCI;
-typedef struct _DIVA_CAPI_ADAPTER DIVA_CAPI_ADAPTER;
-typedef struct _DATA_B3_DESC DATA_B3_DESC;
-typedef struct _DATA_ACK_DESC DATA_ACK_DESC;
-typedef struct manufacturer_profile_s MANUFACTURER_PROFILE;
-typedef struct fax_ncpi_s FAX_NCPI;
-typedef struct api_parse_s API_PARSE;
-typedef struct api_save_s API_SAVE;
-typedef struct msn_config_s MSN_CONFIG;
-typedef struct msn_config_max_s MSN_CONFIG_MAX;
-typedef struct msn_ld_s MSN_LD;
-
-struct manufacturer_profile_s {
- dword private_options;
- dword rtp_primary_payloads;
- dword rtp_additional_payloads;
-};
-
-struct fax_ncpi_s {
- word options;
- word format;
-};
-
-struct msn_config_s {
- byte msn[MAX_CPN_MASK_SIZE];
-};
-
-struct msn_config_max_s {
- MSN_CONFIG msn_conf[MAX_MSN_CONFIG];
-};
-
-struct msn_ld_s {
- dword low;
- dword high;
-};
-
-struct api_parse_s {
- word length;
- byte *info;
-};
-
-struct api_save_s {
- API_PARSE parms[MAX_MSG_PARMS + 1];
- byte info[MAX_MSG_SIZE];
-};
-
-struct _DATA_B3_DESC {
- word Handle;
- word Number;
- word Flags;
- word Length;
- void *P;
-};
-
-struct _DATA_ACK_DESC {
- word Handle;
- word Number;
-};
-
-typedef void (*t_std_internal_command)(dword Id, PLCI *plci, byte Rc);
-
-/************************************************************************/
-/* Don't forget to adapt dos.asm after changing the _APPL structure!!!! */
-struct _APPL {
- word Id;
- word NullCREnable;
- word CDEnable;
- dword S_Handle;
-
-
-
-
-
-
- LIST_ENTRY s_function;
- dword s_context;
- word s_count;
- APPL *s_next;
- byte *xbuffer_used;
- void **xbuffer_internal;
- void **xbuffer_ptr;
-
-
-
-
-
-
- byte *queue;
- word queue_size;
- word queue_free;
- word queue_read;
- word queue_write;
- word queue_signal;
- byte msg_lost;
- byte appl_flags;
- word Number;
-
- word MaxBuffer;
- byte MaxNCCI;
- byte MaxNCCIData;
- word MaxDataLength;
- word NCCIDataFlowCtrlTimer;
- byte *ReceiveBuffer;
- word *DataNCCI;
- word *DataFlags;
-};
-
-
-struct _PLCI {
- ENTITY Sig;
- ENTITY NL;
- word RNum;
- word RFlags;
- BUFFERS RData[2];
- BUFFERS XData[1];
- BUFFERS NData[2];
-
- DIVA_CAPI_ADAPTER *adapter;
- APPL *appl;
- PLCI *relatedPTYPLCI;
- byte Id;
- byte State;
- byte sig_req;
- byte nl_req;
- byte SuppState;
- byte channels;
- byte tel;
- byte B1_resource;
- byte B2_prot;
- byte B3_prot;
-
- word command;
- word m_command;
- word internal_command;
- word number;
- word req_in_start;
- word req_in;
- word req_out;
- word msg_in_write_pos;
- word msg_in_read_pos;
- word msg_in_wrap_pos;
-
- void *data_sent_ptr;
- byte data_sent;
- byte send_disc;
- byte sig_global_req;
- byte sig_remove_id;
- byte nl_global_req;
- byte nl_remove_id;
- byte b_channel;
- byte adv_nl;
- byte manufacturer;
- byte call_dir;
- byte hook_state;
- byte spoofed_msg;
- byte ptyState;
- byte cr_enquiry;
- word hangup_flow_ctrl_timer;
-
- word ncci_ring_list;
- byte inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI];
- t_std_internal_command internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS];
- DECLARE_BITMAP(c_ind_mask_table, MAX_APPL);
- DECLARE_BITMAP(group_optimization_mask_table, MAX_APPL);
- byte RBuffer[200];
- dword msg_in_queue[MSG_IN_QUEUE_SIZE/sizeof(dword)];
- API_SAVE saved_msg;
- API_SAVE B_protocol;
- byte fax_connect_info_length;
- byte fax_connect_info_buffer[FAX_CONNECT_INFO_BUFFER_SIZE];
- byte fax_edata_ack_length;
- word nsf_control_bits;
- byte ncpi_state;
- byte ncpi_buffer[NCPI_BUFFER_SIZE];
-
- byte internal_req_buffer[INTERNAL_REQ_BUFFER_SIZE];
- byte internal_ind_buffer[INTERNAL_IND_BUFFER_SIZE + 3];
- dword requested_options_conn;
- dword requested_options;
- word B1_facilities;
- API_SAVE *adjust_b_parms_msg;
- word adjust_b_facilities;
- word adjust_b_command;
- word adjust_b_ncci;
- word adjust_b_mode;
- word adjust_b_state;
- byte adjust_b_restore;
-
- byte dtmf_rec_active;
- word dtmf_rec_pulse_ms;
- word dtmf_rec_pause_ms;
- byte dtmf_send_requests;
- word dtmf_send_pulse_ms;
- word dtmf_send_pause_ms;
- word dtmf_cmd;
- word dtmf_msg_number_queue[8];
- byte dtmf_parameter_length;
- byte dtmf_parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE];
-
-
- t_capidtmf_state capidtmf_state;
-
-
- byte li_bchannel_id; /* BRI: 1..2, PRI: 1..32 */
- byte li_channel_bits;
- byte li_notify_update;
- word li_cmd;
- word li_write_command;
- word li_write_channel;
- word li_plci_b_write_pos;
- word li_plci_b_read_pos;
- word li_plci_b_req_pos;
- dword li_plci_b_queue[LI_PLCI_B_QUEUE_ENTRIES];
-
-
- word ec_cmd;
- word ec_idi_options;
- word ec_tail_length;
-
-
- byte tone_last_indication_code;
-
- byte vswitchstate;
- byte vsprot;
- byte vsprotdialect;
- byte notifiedcall; /* Flag if it is a spoofed call */
-
- int rx_dma_descriptor;
- dword rx_dma_magic;
-};
-
-
-struct _NCCI {
- byte data_out;
- byte data_pending;
- byte data_ack_out;
- byte data_ack_pending;
- DATA_B3_DESC DBuffer[MAX_DATA_B3];
- DATA_ACK_DESC DataAck[MAX_DATA_ACK];
-};
-
-
-struct _DIVA_CAPI_ADAPTER {
- IDI_CALL request;
- byte Id;
- byte max_plci;
- byte max_listen;
- byte listen_active;
- PLCI *plci;
- byte ch_ncci[MAX_NL_CHANNEL + 1];
- byte ncci_ch[MAX_NCCI + 1];
- byte ncci_plci[MAX_NCCI + 1];
- byte ncci_state[MAX_NCCI + 1];
- byte ncci_next[MAX_NCCI + 1];
- NCCI ncci[MAX_NCCI + 1];
-
- byte ch_flow_control[MAX_NL_CHANNEL + 1]; /* Used by XON protocol */
- byte ch_flow_control_pending;
- byte ch_flow_plci[MAX_NL_CHANNEL + 1];
- int last_flow_control_ch;
-
- dword Info_Mask[MAX_APPL];
- dword CIP_Mask[MAX_APPL];
-
- dword Notification_Mask[MAX_APPL];
- PLCI *codec_listen[MAX_APPL];
- dword requested_options_table[MAX_APPL];
- API_PROFILE profile;
- MANUFACTURER_PROFILE man_profile;
- dword manufacturer_features;
-
- byte AdvCodecFLAG;
- PLCI *AdvCodecPLCI;
- PLCI *AdvSignalPLCI;
- APPL *AdvSignalAppl;
- byte TelOAD[23];
- byte TelOSA[23];
- byte scom_appl_disable;
- PLCI *automatic_lawPLCI;
- byte automatic_law;
- byte u_law;
-
- byte adv_voice_coef_length;
- byte adv_voice_coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE];
-
- byte li_pri;
- byte li_channels;
- word li_base;
-
- byte adapter_disabled;
- byte group_optimization_enabled; /* use application groups if enabled */
- dword sdram_bar;
- byte flag_dynamic_l1_down; /* for hunt groups:down layer 1 if no appl present*/
- byte FlowControlIdTable[256];
- byte FlowControlSkipTable[256];
- void *os_card; /* pointer to associated OS dependent adapter structure */
-};
-
-
-/*------------------------------------------------------------------*/
-/* Application flags */
-/*------------------------------------------------------------------*/
-
-#define APPL_FLAG_OLD_LI_SPEC 0x01
-#define APPL_FLAG_PRIV_EC_SPEC 0x02
-
-
-/*------------------------------------------------------------------*/
-/* API parameter definitions */
-/*------------------------------------------------------------------*/
-
-#define X75_TTX 1 /* x.75 for ttx */
-#define TRF 2 /* transparent with hdlc framing */
-#define TRF_IN 3 /* transparent with hdlc fr. inc. */
-#define SDLC 4 /* sdlc, sna layer-2 */
-#define X75_BTX 5 /* x.75 for btx */
-#define LAPD 6 /* lapd (Q.921) */
-#define X25_L2 7 /* x.25 layer-2 */
-#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
-#define V42 10 /* V.42 layer-2 protocol */
-#define MDM_ATP 11 /* AT Parser built in the L2 */
-#define X75_V42BIS 12 /* ISO7776 (X.75 SLP) modified to support V.42 bis compression */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
-#define RTPL2 14 /* RTP layer-2 protocol */
-#define V120_V42BIS 15 /* V.120 layer-2 protocol supporting V.42 bis compression */
-
-#define T70NL 1
-#define X25PLP 2
-#define T70NLX 3
-#define TRANSPARENT_NL 4
-#define ISO8208 5
-#define T30 6
-
-
-/*------------------------------------------------------------------*/
-/* FAX interface to IDI */
-/*------------------------------------------------------------------*/
-
-#define CAPI_MAX_HEAD_LINE_SPACE 89
-#define CAPI_MAX_DATE_TIME_LENGTH 18
-
-#define T30_MAX_STATION_ID_LENGTH 20
-#define T30_MAX_SUBADDRESS_LENGTH 20
-#define T30_MAX_PASSWORD_LENGTH 20
-
-typedef struct t30_info_s T30_INFO;
-struct t30_info_s {
- byte code;
- byte rate_div_2400;
- byte resolution;
- byte data_format;
- byte pages_low;
- byte pages_high;
- byte operating_mode;
- byte control_bits_low;
- byte control_bits_high;
- byte feature_bits_low;
- byte feature_bits_high;
- byte recording_properties;
- byte universal_6;
- byte universal_7;
- byte station_id_len;
- byte head_line_len;
- byte station_id[T30_MAX_STATION_ID_LENGTH];
-/* byte head_line[]; */
-/* byte sub_sep_length; */
-/* byte sub_sep_field[]; */
-/* byte pwd_length; */
-/* byte pwd_field[]; */
-/* byte nsf_info_length; */
-/* byte nsf_info_field[]; */
-};
-
-
-#define T30_RESOLUTION_R8_0385 0x00
-#define T30_RESOLUTION_R8_0770_OR_200 0x01
-#define T30_RESOLUTION_R8_1540 0x02
-#define T30_RESOLUTION_R16_1540_OR_400 0x04
-#define T30_RESOLUTION_R4_0385_OR_100 0x08
-#define T30_RESOLUTION_300_300 0x10
-#define T30_RESOLUTION_INCH_BASED 0x40
-#define T30_RESOLUTION_METRIC_BASED 0x80
-
-#define T30_RECORDING_WIDTH_ISO_A4 0
-#define T30_RECORDING_WIDTH_ISO_B4 1
-#define T30_RECORDING_WIDTH_ISO_A3 2
-#define T30_RECORDING_WIDTH_COUNT 3
-
-#define T30_RECORDING_LENGTH_ISO_A4 0
-#define T30_RECORDING_LENGTH_ISO_B4 1
-#define T30_RECORDING_LENGTH_UNLIMITED 2
-#define T30_RECORDING_LENGTH_COUNT 3
-
-#define T30_MIN_SCANLINE_TIME_00_00_00 0
-#define T30_MIN_SCANLINE_TIME_05_05_05 1
-#define T30_MIN_SCANLINE_TIME_10_05_05 2
-#define T30_MIN_SCANLINE_TIME_10_10_10 3
-#define T30_MIN_SCANLINE_TIME_20_10_10 4
-#define T30_MIN_SCANLINE_TIME_20_20_20 5
-#define T30_MIN_SCANLINE_TIME_40_20_20 6
-#define T30_MIN_SCANLINE_TIME_40_40_40 7
-#define T30_MIN_SCANLINE_TIME_RES_8 8
-#define T30_MIN_SCANLINE_TIME_RES_9 9
-#define T30_MIN_SCANLINE_TIME_RES_10 10
-#define T30_MIN_SCANLINE_TIME_10_10_05 11
-#define T30_MIN_SCANLINE_TIME_20_10_05 12
-#define T30_MIN_SCANLINE_TIME_20_20_10 13
-#define T30_MIN_SCANLINE_TIME_40_20_10 14
-#define T30_MIN_SCANLINE_TIME_40_40_20 15
-#define T30_MIN_SCANLINE_TIME_COUNT 16
-
-#define T30_DATA_FORMAT_SFF 0
-#define T30_DATA_FORMAT_ASCII 1
-#define T30_DATA_FORMAT_NATIVE 2
-#define T30_DATA_FORMAT_COUNT 3
-
-
-#define T30_OPERATING_MODE_STANDARD 0
-#define T30_OPERATING_MODE_CLASS2 1
-#define T30_OPERATING_MODE_CLASS1 2
-#define T30_OPERATING_MODE_CAPI 3
-#define T30_OPERATING_MODE_CAPI_NEG 4
-#define T30_OPERATING_MODE_COUNT 5
-
-/* EDATA transmit messages */
-#define EDATA_T30_DIS 0x01
-#define EDATA_T30_FTT 0x02
-#define EDATA_T30_MCF 0x03
-#define EDATA_T30_PARAMETERS 0x04
-
-/* EDATA receive messages */
-#define EDATA_T30_DCS 0x81
-#define EDATA_T30_TRAIN_OK 0x82
-#define EDATA_T30_EOP 0x83
-#define EDATA_T30_MPS 0x84
-#define EDATA_T30_EOM 0x85
-#define EDATA_T30_DTC 0x86
-#define EDATA_T30_PAGE_END 0x87 /* Indicates end of page data. Reserved, but not implemented ! */
-#define EDATA_T30_EOP_CAPI 0x88
-
-
-#define T30_SUCCESS 0
-#define T30_ERR_NO_DIS_RECEIVED 1
-#define T30_ERR_TIMEOUT_NO_RESPONSE 2
-#define T30_ERR_RETRY_NO_RESPONSE 3
-#define T30_ERR_TOO_MANY_REPEATS 4
-#define T30_ERR_UNEXPECTED_MESSAGE 5
-#define T30_ERR_UNEXPECTED_DCN 6
-#define T30_ERR_DTC_UNSUPPORTED 7
-#define T30_ERR_ALL_RATES_FAILED 8
-#define T30_ERR_TOO_MANY_TRAINS 9
-#define T30_ERR_RECEIVE_CORRUPTED 10
-#define T30_ERR_UNEXPECTED_DISC 11
-#define T30_ERR_APPLICATION_DISC 12
-#define T30_ERR_INCOMPATIBLE_DIS 13
-#define T30_ERR_INCOMPATIBLE_DCS 14
-#define T30_ERR_TIMEOUT_NO_COMMAND 15
-#define T30_ERR_RETRY_NO_COMMAND 16
-#define T30_ERR_TIMEOUT_COMMAND_TOO_LONG 17
-#define T30_ERR_TIMEOUT_RESPONSE_TOO_LONG 18
-#define T30_ERR_NOT_IDENTIFIED 19
-#define T30_ERR_SUPERVISORY_TIMEOUT 20
-#define T30_ERR_TOO_LONG_SCAN_LINE 21
-/* #define T30_ERR_RETRY_NO_PAGE_AFTER_MPS 22 */
-#define T30_ERR_RETRY_NO_PAGE_RECEIVED 23
-#define T30_ERR_RETRY_NO_DCS_AFTER_FTT 24
-#define T30_ERR_RETRY_NO_DCS_AFTER_EOM 25
-#define T30_ERR_RETRY_NO_DCS_AFTER_MPS 26
-#define T30_ERR_RETRY_NO_DCN_AFTER_MCF 27
-#define T30_ERR_RETRY_NO_DCN_AFTER_RTN 28
-#define T30_ERR_RETRY_NO_CFR 29
-#define T30_ERR_RETRY_NO_MCF_AFTER_EOP 30
-#define T30_ERR_RETRY_NO_MCF_AFTER_EOM 31
-#define T30_ERR_RETRY_NO_MCF_AFTER_MPS 32
-#define T30_ERR_SUB_SEP_UNSUPPORTED 33
-#define T30_ERR_PWD_UNSUPPORTED 34
-#define T30_ERR_SUB_SEP_PWD_UNSUPPORTED 35
-#define T30_ERR_INVALID_COMMAND_FRAME 36
-#define T30_ERR_UNSUPPORTED_PAGE_CODING 37
-#define T30_ERR_INVALID_PAGE_CODING 38
-#define T30_ERR_INCOMPATIBLE_PAGE_CONFIG 39
-#define T30_ERR_TIMEOUT_FROM_APPLICATION 40
-#define T30_ERR_V34FAX_NO_REACTION_ON_MARK 41
-#define T30_ERR_V34FAX_TRAINING_TIMEOUT 42
-#define T30_ERR_V34FAX_UNEXPECTED_V21 43
-#define T30_ERR_V34FAX_PRIMARY_CTS_ON 44
-#define T30_ERR_V34FAX_TURNAROUND_POLLING 45
-#define T30_ERR_V34FAX_V8_INCOMPATIBILITY 46
-
-
-#define T30_CONTROL_BIT_DISABLE_FINE 0x0001
-#define T30_CONTROL_BIT_ENABLE_ECM 0x0002
-#define T30_CONTROL_BIT_ECM_64_BYTES 0x0004
-#define T30_CONTROL_BIT_ENABLE_2D_CODING 0x0008
-#define T30_CONTROL_BIT_ENABLE_T6_CODING 0x0010
-#define T30_CONTROL_BIT_ENABLE_UNCOMPR 0x0020
-#define T30_CONTROL_BIT_ACCEPT_POLLING 0x0040
-#define T30_CONTROL_BIT_REQUEST_POLLING 0x0080
-#define T30_CONTROL_BIT_MORE_DOCUMENTS 0x0100
-#define T30_CONTROL_BIT_ACCEPT_SUBADDRESS 0x0200
-#define T30_CONTROL_BIT_ACCEPT_SEL_POLLING 0x0400
-#define T30_CONTROL_BIT_ACCEPT_PASSWORD 0x0800
-#define T30_CONTROL_BIT_ENABLE_V34FAX 0x1000
-#define T30_CONTROL_BIT_EARLY_CONNECT 0x2000
-
-#define T30_CONTROL_BIT_ALL_FEATURES (T30_CONTROL_BIT_ENABLE_ECM | T30_CONTROL_BIT_ENABLE_2D_CODING | T30_CONTROL_BIT_ENABLE_T6_CODING | T30_CONTROL_BIT_ENABLE_UNCOMPR | T30_CONTROL_BIT_ENABLE_V34FAX)
-
-#define T30_FEATURE_BIT_FINE 0x0001
-#define T30_FEATURE_BIT_ECM 0x0002
-#define T30_FEATURE_BIT_ECM_64_BYTES 0x0004
-#define T30_FEATURE_BIT_2D_CODING 0x0008
-#define T30_FEATURE_BIT_T6_CODING 0x0010
-#define T30_FEATURE_BIT_UNCOMPR_ENABLED 0x0020
-#define T30_FEATURE_BIT_POLLING 0x0040
-#define T30_FEATURE_BIT_MORE_DOCUMENTS 0x0100
-#define T30_FEATURE_BIT_V34FAX 0x1000
-
-
-#define T30_NSF_CONTROL_BIT_ENABLE_NSF 0x0001
-#define T30_NSF_CONTROL_BIT_RAW_INFO 0x0002
-#define T30_NSF_CONTROL_BIT_NEGOTIATE_IND 0x0004
-#define T30_NSF_CONTROL_BIT_NEGOTIATE_RESP 0x0008
-
-#define T30_NSF_ELEMENT_NSF_FIF 0x00
-#define T30_NSF_ELEMENT_NSC_FIF 0x01
-#define T30_NSF_ELEMENT_NSS_FIF 0x02
-#define T30_NSF_ELEMENT_COMPANY_NAME 0x03
-
-
-/*------------------------------------------------------------------*/
-/* Analog modem definitions */
-/*------------------------------------------------------------------*/
-
-typedef struct async_s ASYNC_FORMAT;
-struct async_s {
- unsigned pe:1;
- unsigned parity:2;
- unsigned spare:2;
- unsigned stp:1;
- unsigned ch_len:2; /* 3th octett in CAI */
-};
-
-
-/*------------------------------------------------------------------*/
-/* PLCI/NCCI states */
-/*------------------------------------------------------------------*/
-
-#define IDLE 0
-#define OUTG_CON_PENDING 1
-#define INC_CON_PENDING 2
-#define INC_CON_ALERT 3
-#define INC_CON_ACCEPT 4
-#define INC_ACT_PENDING 5
-#define LISTENING 6
-#define CONNECTED 7
-#define OUTG_DIS_PENDING 8
-#define INC_DIS_PENDING 9
-#define LOCAL_CONNECT 10
-#define INC_RES_PENDING 11
-#define OUTG_RES_PENDING 12
-#define SUSPENDING 13
-#define ADVANCED_VOICE_SIG 14
-#define ADVANCED_VOICE_NOSIG 15
-#define RESUMING 16
-#define INC_CON_CONNECTED_ALERT 17
-#define OUTG_REJ_PENDING 18
-
-
-/*------------------------------------------------------------------*/
-/* auxiliary states for supplementary services */
-/*------------------------------------------------------------------*/
-
-#define IDLE 0
-#define HOLD_REQUEST 1
-#define HOLD_INDICATE 2
-#define CALL_HELD 3
-#define RETRIEVE_REQUEST 4
-#define RETRIEVE_INDICATION 5
-
-/*------------------------------------------------------------------*/
-/* Capi IE + Msg types */
-/*------------------------------------------------------------------*/
-#define ESC_CAUSE 0x800 | CAU /* Escape cause element */
-#define ESC_MSGTYPE 0x800 | MSGTYPEIE /* Escape message type */
-#define ESC_CHI 0x800 | CHI /* Escape channel id */
-#define ESC_LAW 0x800 | BC /* Escape law info */
-#define ESC_CR 0x800 | CRIE /* Escape CallReference */
-#define ESC_PROFILE 0x800 | PROFILEIE /* Escape profile */
-#define ESC_SSEXT 0x800 | SSEXTIE /* Escape Supplem. Serv.*/
-#define ESC_VSWITCH 0x800 | VSWITCHIE /* Escape VSwitch */
-#define CST 0x14 /* Call State i.e. */
-#define PI 0x1E /* Progress Indicator */
-#define NI 0x27 /* Notification Ind */
-#define CONN_NR 0x4C /* Connected Number */
-#define CONG_RNR 0xBF /* Congestion RNR */
-#define CONG_RR 0xB0 /* Congestion RR */
-#define RESERVED 0xFF /* Res. for future use */
-#define ON_BOARD_CODEC 0x02 /* external controller */
-#define HANDSET 0x04 /* Codec+Handset(Pro11) */
-#define HOOK_SUPPORT 0x01 /* activate Hook signal */
-#define SCR 0x7a /* unscreened number */
-
-#define HOOK_OFF_REQ 0x9001 /* internal conn req */
-#define HOOK_ON_REQ 0x9002 /* internal disc req */
-#define SUSPEND_REQ 0x9003 /* internal susp req */
-#define RESUME_REQ 0x9004 /* internal resume req */
-#define USELAW_REQ 0x9005 /* internal law req */
-#define LISTEN_SIG_ASSIGN_PEND 0x9006
-#define PERM_LIST_REQ 0x900a /* permanent conn DCE */
-#define C_HOLD_REQ 0x9011
-#define C_RETRIEVE_REQ 0x9012
-#define C_NCR_FAC_REQ 0x9013
-#define PERM_COD_ASSIGN 0x9014
-#define PERM_COD_CALL 0x9015
-#define PERM_COD_HOOK 0x9016
-#define PERM_COD_CONN_PEND 0x9017 /* wait for connect_con */
-#define PTY_REQ_PEND 0x9018
-#define CD_REQ_PEND 0x9019
-#define CF_START_PEND 0x901a
-#define CF_STOP_PEND 0x901b
-#define ECT_REQ_PEND 0x901c
-#define GETSERV_REQ_PEND 0x901d
-#define BLOCK_PLCI 0x901e
-#define INTERR_NUMBERS_REQ_PEND 0x901f
-#define INTERR_DIVERSION_REQ_PEND 0x9020
-#define MWI_ACTIVATE_REQ_PEND 0x9021
-#define MWI_DEACTIVATE_REQ_PEND 0x9022
-#define SSEXT_REQ_COMMAND 0x9023
-#define SSEXT_NC_REQ_COMMAND 0x9024
-#define START_L1_SIG_ASSIGN_PEND 0x9025
-#define REM_L1_SIG_ASSIGN_PEND 0x9026
-#define CONF_BEGIN_REQ_PEND 0x9027
-#define CONF_ADD_REQ_PEND 0x9028
-#define CONF_SPLIT_REQ_PEND 0x9029
-#define CONF_DROP_REQ_PEND 0x902a
-#define CONF_ISOLATE_REQ_PEND 0x902b
-#define CONF_REATTACH_REQ_PEND 0x902c
-#define VSWITCH_REQ_PEND 0x902d
-#define GET_MWI_STATE 0x902e
-#define CCBS_REQUEST_REQ_PEND 0x902f
-#define CCBS_DEACTIVATE_REQ_PEND 0x9030
-#define CCBS_INTERROGATE_REQ_PEND 0x9031
-
-#define NO_INTERNAL_COMMAND 0
-#define DTMF_COMMAND_1 1
-#define DTMF_COMMAND_2 2
-#define DTMF_COMMAND_3 3
-#define MIXER_COMMAND_1 4
-#define MIXER_COMMAND_2 5
-#define MIXER_COMMAND_3 6
-#define ADV_VOICE_COMMAND_CONNECT_1 7
-#define ADV_VOICE_COMMAND_CONNECT_2 8
-#define ADV_VOICE_COMMAND_CONNECT_3 9
-#define ADV_VOICE_COMMAND_DISCONNECT_1 10
-#define ADV_VOICE_COMMAND_DISCONNECT_2 11
-#define ADV_VOICE_COMMAND_DISCONNECT_3 12
-#define ADJUST_B_RESTORE_1 13
-#define ADJUST_B_RESTORE_2 14
-#define RESET_B3_COMMAND_1 15
-#define SELECT_B_COMMAND_1 16
-#define FAX_CONNECT_INFO_COMMAND_1 17
-#define FAX_CONNECT_INFO_COMMAND_2 18
-#define FAX_ADJUST_B23_COMMAND_1 19
-#define FAX_ADJUST_B23_COMMAND_2 20
-#define EC_COMMAND_1 21
-#define EC_COMMAND_2 22
-#define EC_COMMAND_3 23
-#define RTP_CONNECT_B3_REQ_COMMAND_1 24
-#define RTP_CONNECT_B3_REQ_COMMAND_2 25
-#define RTP_CONNECT_B3_REQ_COMMAND_3 26
-#define RTP_CONNECT_B3_RES_COMMAND_1 27
-#define RTP_CONNECT_B3_RES_COMMAND_2 28
-#define RTP_CONNECT_B3_RES_COMMAND_3 29
-#define HOLD_SAVE_COMMAND_1 30
-#define RETRIEVE_RESTORE_COMMAND_1 31
-#define FAX_DISCONNECT_COMMAND_1 32
-#define FAX_DISCONNECT_COMMAND_2 33
-#define FAX_DISCONNECT_COMMAND_3 34
-#define FAX_EDATA_ACK_COMMAND_1 35
-#define FAX_EDATA_ACK_COMMAND_2 36
-#define FAX_CONNECT_ACK_COMMAND_1 37
-#define FAX_CONNECT_ACK_COMMAND_2 38
-#define STD_INTERNAL_COMMAND_COUNT 39
-
-#define UID 0x2d /* User Id for Mgmt */
-
-#define CALL_DIR_OUT 0x01 /* call direction of initial call */
-#define CALL_DIR_IN 0x02
-#define CALL_DIR_ORIGINATE 0x04 /* DTE/DCE direction according to */
-#define CALL_DIR_ANSWER 0x08 /* state of B-Channel Operation */
-#define CALL_DIR_FORCE_OUTG_NL 0x10 /* for RESET_B3 reconnect, after DISC_B3... */
-
-#define AWAITING_MANUF_CON 0x80 /* command spoofing flags */
-#define SPOOFING_REQUIRED 0xff
-#define AWAITING_SELECT_B 0xef
-
-/*------------------------------------------------------------------*/
-/* B_CTRL / DSP_CTRL */
-/*------------------------------------------------------------------*/
-
-#define DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS 0x01
-#define DSP_CTRL_SET_BCHANNEL_PASSIVATION_BRI 0x02
-#define DSP_CTRL_SET_DTMF_PARAMETERS 0x03
-
-#define MANUFACTURER_FEATURE_SLAVE_CODEC 0x00000001L
-#define MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS 0x00000002L
-#define MANUFACTURER_FEATURE_HARDDTMF 0x00000004L
-#define MANUFACTURER_FEATURE_SOFTDTMF_SEND 0x00000008L
-#define MANUFACTURER_FEATURE_DTMF_PARAMETERS 0x00000010L
-#define MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE 0x00000020L
-#define MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD 0x00000040L
-#define MANUFACTURER_FEATURE_V18 0x00000080L
-#define MANUFACTURER_FEATURE_MIXER_CH_CH 0x00000100L
-#define MANUFACTURER_FEATURE_MIXER_CH_PC 0x00000200L
-#define MANUFACTURER_FEATURE_MIXER_PC_CH 0x00000400L
-#define MANUFACTURER_FEATURE_MIXER_PC_PC 0x00000800L
-#define MANUFACTURER_FEATURE_ECHO_CANCELLER 0x00001000L
-#define MANUFACTURER_FEATURE_RTP 0x00002000L
-#define MANUFACTURER_FEATURE_T38 0x00004000L
-#define MANUFACTURER_FEATURE_TRANSP_DELIVERY_CONF 0x00008000L
-#define MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL 0x00010000L
-#define MANUFACTURER_FEATURE_OOB_CHANNEL 0x00020000L
-#define MANUFACTURER_FEATURE_IN_BAND_CHANNEL 0x00040000L
-#define MANUFACTURER_FEATURE_IN_BAND_FEATURE 0x00080000L
-#define MANUFACTURER_FEATURE_PIAFS 0x00100000L
-#define MANUFACTURER_FEATURE_DTMF_TONE 0x00200000L
-#define MANUFACTURER_FEATURE_FAX_PAPER_FORMATS 0x00400000L
-#define MANUFACTURER_FEATURE_OK_FC_LABEL 0x00800000L
-#define MANUFACTURER_FEATURE_VOWN 0x01000000L
-#define MANUFACTURER_FEATURE_XCONNECT 0x02000000L
-#define MANUFACTURER_FEATURE_DMACONNECT 0x04000000L
-#define MANUFACTURER_FEATURE_AUDIO_TAP 0x08000000L
-#define MANUFACTURER_FEATURE_FAX_NONSTANDARD 0x10000000L
-
-/*------------------------------------------------------------------*/
-/* DTMF interface to IDI */
-/*------------------------------------------------------------------*/
-
-
-#define DTMF_DIGIT_TONE_LOW_GROUP_697_HZ 0x00
-#define DTMF_DIGIT_TONE_LOW_GROUP_770_HZ 0x01
-#define DTMF_DIGIT_TONE_LOW_GROUP_852_HZ 0x02
-#define DTMF_DIGIT_TONE_LOW_GROUP_941_HZ 0x03
-#define DTMF_DIGIT_TONE_LOW_GROUP_MASK 0x03
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1209_HZ 0x00
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1336_HZ 0x04
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1477_HZ 0x08
-#define DTMF_DIGIT_TONE_HIGH_GROUP_1633_HZ 0x0c
-#define DTMF_DIGIT_TONE_HIGH_GROUP_MASK 0x0c
-#define DTMF_DIGIT_TONE_CODE_0 0x07
-#define DTMF_DIGIT_TONE_CODE_1 0x00
-#define DTMF_DIGIT_TONE_CODE_2 0x04
-#define DTMF_DIGIT_TONE_CODE_3 0x08
-#define DTMF_DIGIT_TONE_CODE_4 0x01
-#define DTMF_DIGIT_TONE_CODE_5 0x05
-#define DTMF_DIGIT_TONE_CODE_6 0x09
-#define DTMF_DIGIT_TONE_CODE_7 0x02
-#define DTMF_DIGIT_TONE_CODE_8 0x06
-#define DTMF_DIGIT_TONE_CODE_9 0x0a
-#define DTMF_DIGIT_TONE_CODE_STAR 0x03
-#define DTMF_DIGIT_TONE_CODE_HASHMARK 0x0b
-#define DTMF_DIGIT_TONE_CODE_A 0x0c
-#define DTMF_DIGIT_TONE_CODE_B 0x0d
-#define DTMF_DIGIT_TONE_CODE_C 0x0e
-#define DTMF_DIGIT_TONE_CODE_D 0x0f
-
-#define DTMF_UDATA_REQUEST_SEND_DIGITS 16
-#define DTMF_UDATA_REQUEST_ENABLE_RECEIVER 17
-#define DTMF_UDATA_REQUEST_DISABLE_RECEIVER 18
-#define DTMF_UDATA_INDICATION_DIGITS_SENT 16
-#define DTMF_UDATA_INDICATION_DIGITS_RECEIVED 17
-#define DTMF_UDATA_INDICATION_MODEM_CALLING_TONE 18
-#define DTMF_UDATA_INDICATION_FAX_CALLING_TONE 19
-#define DTMF_UDATA_INDICATION_ANSWER_TONE 20
-
-#define UDATA_REQUEST_MIXER_TAP_DATA 27
-#define UDATA_INDICATION_MIXER_TAP_DATA 27
-
-#define DTMF_LISTEN_ACTIVE_FLAG 0x01
-#define DTMF_SEND_DIGIT_FLAG 0x01
-
-
-/*------------------------------------------------------------------*/
-/* Mixer interface to IDI */
-/*------------------------------------------------------------------*/
-
-
-#define LI2_FLAG_PCCONNECT_A_B 0x40000000
-#define LI2_FLAG_PCCONNECT_B_A 0x80000000
-
-#define MIXER_BCHANNELS_BRI 2
-#define MIXER_IC_CHANNELS_BRI MIXER_BCHANNELS_BRI
-#define MIXER_IC_CHANNEL_BASE MIXER_BCHANNELS_BRI
-#define MIXER_CHANNELS_BRI (MIXER_BCHANNELS_BRI + MIXER_IC_CHANNELS_BRI)
-#define MIXER_CHANNELS_PRI 32
-
-typedef struct li_config_s LI_CONFIG;
-
-struct xconnect_card_address_s {
- dword low;
- dword high;
-};
-
-struct xconnect_transfer_address_s {
- struct xconnect_card_address_s card_address;
- dword offset;
-};
-
-struct li_config_s {
- DIVA_CAPI_ADAPTER *adapter;
- PLCI *plci;
- struct xconnect_transfer_address_s send_b;
- struct xconnect_transfer_address_s send_pc;
- byte *flag_table; /* dword aligned and sized */
- byte *coef_table; /* dword aligned and sized */
- byte channel;
- byte curchnl;
- byte chflags;
-};
-
-extern LI_CONFIG *li_config_table;
-extern word li_total_channels;
-
-#define LI_CHANNEL_INVOLVED 0x01
-#define LI_CHANNEL_ACTIVE 0x02
-#define LI_CHANNEL_TX_DATA 0x04
-#define LI_CHANNEL_RX_DATA 0x08
-#define LI_CHANNEL_CONFERENCE 0x10
-#define LI_CHANNEL_ADDRESSES_SET 0x80
-
-#define LI_CHFLAG_MONITOR 0x01
-#define LI_CHFLAG_MIX 0x02
-#define LI_CHFLAG_LOOP 0x04
-
-#define LI_FLAG_INTERCONNECT 0x01
-#define LI_FLAG_MONITOR 0x02
-#define LI_FLAG_MIX 0x04
-#define LI_FLAG_PCCONNECT 0x08
-#define LI_FLAG_CONFERENCE 0x10
-#define LI_FLAG_ANNOUNCEMENT 0x20
-
-#define LI_COEF_CH_CH 0x01
-#define LI_COEF_CH_PC 0x02
-#define LI_COEF_PC_CH 0x04
-#define LI_COEF_PC_PC 0x08
-#define LI_COEF_CH_CH_SET 0x10
-#define LI_COEF_CH_PC_SET 0x20
-#define LI_COEF_PC_CH_SET 0x40
-#define LI_COEF_PC_PC_SET 0x80
-
-#define LI_REQ_SILENT_UPDATE 0xffff
-
-#define LI_PLCI_B_LAST_FLAG ((dword) 0x80000000L)
-#define LI_PLCI_B_DISC_FLAG ((dword) 0x40000000L)
-#define LI_PLCI_B_SKIP_FLAG ((dword) 0x20000000L)
-#define LI_PLCI_B_FLAG_MASK ((dword) 0xe0000000L)
-
-#define UDATA_REQUEST_SET_MIXER_COEFS_BRI 24
-#define UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC 25
-#define UDATA_REQUEST_SET_MIXER_COEFS_PRI_ASYN 26
-#define UDATA_INDICATION_MIXER_COEFS_SET 24
-
-#define MIXER_FEATURE_ENABLE_TX_DATA 0x0001
-#define MIXER_FEATURE_ENABLE_RX_DATA 0x0002
-
-#define MIXER_COEF_LINE_CHANNEL_MASK 0x1f
-#define MIXER_COEF_LINE_FROM_PC_FLAG 0x20
-#define MIXER_COEF_LINE_TO_PC_FLAG 0x40
-#define MIXER_COEF_LINE_ROW_FLAG 0x80
-
-#define UDATA_REQUEST_XCONNECT_FROM 28
-#define UDATA_INDICATION_XCONNECT_FROM 28
-#define UDATA_REQUEST_XCONNECT_TO 29
-#define UDATA_INDICATION_XCONNECT_TO 29
-
-#define XCONNECT_CHANNEL_PORT_B 0x0000
-#define XCONNECT_CHANNEL_PORT_PC 0x8000
-#define XCONNECT_CHANNEL_PORT_MASK 0x8000
-#define XCONNECT_CHANNEL_NUMBER_MASK 0x7fff
-#define XCONNECT_CHANNEL_PORT_COUNT 2
-
-#define XCONNECT_SUCCESS 0x0000
-#define XCONNECT_ERROR 0x0001
-
-
-/*------------------------------------------------------------------*/
-/* Echo canceller interface to IDI */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_ECHO_CANCELLER 0
-
-#define PRIV_SELECTOR_ECHO_CANCELLER 255
-
-#define EC_ENABLE_OPERATION 1
-#define EC_DISABLE_OPERATION 2
-#define EC_FREEZE_COEFFICIENTS 3
-#define EC_RESUME_COEFFICIENT_UPDATE 4
-#define EC_RESET_COEFFICIENTS 5
-
-#define EC_DISABLE_NON_LINEAR_PROCESSING 0x0001
-#define EC_DO_NOT_REQUIRE_REVERSALS 0x0002
-#define EC_DETECT_DISABLE_TONE 0x0004
-
-#define EC_SUCCESS 0
-#define EC_UNSUPPORTED_OPERATION 1
-
-#define EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ 1
-#define EC_BYPASS_DUE_TO_REVERSED_2100HZ 2
-#define EC_BYPASS_RELEASED 3
-
-#define DSP_CTRL_SET_LEC_PARAMETERS 0x05
-
-#define LEC_ENABLE_ECHO_CANCELLER 0x0001
-#define LEC_ENABLE_2100HZ_DETECTOR 0x0002
-#define LEC_REQUIRE_2100HZ_REVERSALS 0x0004
-#define LEC_MANUAL_DISABLE 0x0008
-#define LEC_ENABLE_NONLINEAR_PROCESSING 0x0010
-#define LEC_FREEZE_COEFFICIENTS 0x0020
-#define LEC_RESET_COEFFICIENTS 0x8000
-
-#define LEC_MAX_SUPPORTED_TAIL_LENGTH 32
-
-#define LEC_UDATA_INDICATION_DISABLE_DETECT 9
-
-#define LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ 0x00
-#define LEC_DISABLE_TYPE_REVERSED_2100HZ 0x01
-#define LEC_DISABLE_RELEASED 0x02
-
-
-/*------------------------------------------------------------------*/
-/* RTP interface to IDI */
-/*------------------------------------------------------------------*/
-
-
-#define B1_RTP 31
-#define B2_RTP 31
-#define B3_RTP 31
-
-#define PRIVATE_RTP 1
-
-#define RTP_PRIM_PAYLOAD_PCMU_8000 0
-#define RTP_PRIM_PAYLOAD_1016_8000 1
-#define RTP_PRIM_PAYLOAD_G726_32_8000 2
-#define RTP_PRIM_PAYLOAD_GSM_8000 3
-#define RTP_PRIM_PAYLOAD_G723_8000 4
-#define RTP_PRIM_PAYLOAD_DVI4_8000 5
-#define RTP_PRIM_PAYLOAD_DVI4_16000 6
-#define RTP_PRIM_PAYLOAD_LPC_8000 7
-#define RTP_PRIM_PAYLOAD_PCMA_8000 8
-#define RTP_PRIM_PAYLOAD_G722_16000 9
-#define RTP_PRIM_PAYLOAD_QCELP_8000 12
-#define RTP_PRIM_PAYLOAD_G728_8000 14
-#define RTP_PRIM_PAYLOAD_G729_8000 18
-#define RTP_PRIM_PAYLOAD_GSM_HR_8000 30
-#define RTP_PRIM_PAYLOAD_GSM_EFR_8000 31
-
-#define RTP_ADD_PAYLOAD_BASE 32
-#define RTP_ADD_PAYLOAD_RED 32
-#define RTP_ADD_PAYLOAD_CN_8000 33
-#define RTP_ADD_PAYLOAD_DTMF 34
-
-#define RTP_SUCCESS 0
-#define RTP_ERR_SSRC_OR_PAYLOAD_CHANGE 1
-
-#define UDATA_REQUEST_RTP_RECONFIGURE 64
-#define UDATA_INDICATION_RTP_CHANGE 65
-#define BUDATA_REQUEST_QUERY_RTCP_REPORT 1
-#define BUDATA_INDICATION_RTCP_REPORT 1
-
-#define RTP_CONNECT_OPTION_DISC_ON_SSRC_CHANGE 0x00000001L
-#define RTP_CONNECT_OPTION_DISC_ON_PT_CHANGE 0x00000002L
-#define RTP_CONNECT_OPTION_DISC_ON_UNKNOWN_PT 0x00000004L
-#define RTP_CONNECT_OPTION_NO_SILENCE_TRANSMIT 0x00010000L
-
-#define RTP_PAYLOAD_OPTION_VOICE_ACTIVITY_DETECT 0x0001
-#define RTP_PAYLOAD_OPTION_DISABLE_POST_FILTER 0x0002
-#define RTP_PAYLOAD_OPTION_G723_LOW_CODING_RATE 0x0100
-
-#define RTP_PACKET_FILTER_IGNORE_UNKNOWN_SSRC 0x00000001L
-
-#define RTP_CHANGE_FLAG_SSRC_CHANGE 0x00000001L
-#define RTP_CHANGE_FLAG_PAYLOAD_TYPE_CHANGE 0x00000002L
-#define RTP_CHANGE_FLAG_UNKNOWN_PAYLOAD_TYPE 0x00000004L
-
-
-/*------------------------------------------------------------------*/
-/* T.38 interface to IDI */
-/*------------------------------------------------------------------*/
-
-
-#define B1_T38 30
-#define B2_T38 30
-#define B3_T38 30
-
-#define PRIVATE_T38 2
-
-
-/*------------------------------------------------------------------*/
-/* PIAFS interface to IDI */
-/*------------------------------------------------------------------*/
-
-
-#define B1_PIAFS 29
-#define B2_PIAFS 29
-
-#define PRIVATE_PIAFS 29
-
-/*
- B2 configuration for PIAFS:
- +---------------------+------+-----------------------------------------+
- | PIAFS Protocol | byte | Bit 1 - Protocol Speed |
- | Speed configuration | | 0 - 32K |
- | | | 1 - 64K (default) |
- | | | Bit 2 - Variable Protocol Speed |
- | | | 0 - Speed is fix |
- | | | 1 - Speed is variable (default) |
- +---------------------+------+-----------------------------------------+
- | Direction | word | Enable compression/decompression for |
- | | | 0: All direction |
- | | | 1: disable outgoing data |
- | | | 2: disable incoming data |
- | | | 3: disable both direction (default) |
- +---------------------+------+-----------------------------------------+
- | Number of code | word | Parameter P1 of V.42bis in accordance |
- | words | | with V.42bis |
- +---------------------+------+-----------------------------------------+
- | Maximum String | word | Parameter P2 of V.42bis in accordance |
- | Length | | with V.42bis |
- +---------------------+------+-----------------------------------------+
- | control (UDATA) | byte | enable PIAFS control communication |
- | abilities | | |
- +---------------------+------+-----------------------------------------+
-*/
-#define PIAFS_UDATA_ABILITIES 0x80
-
-/*------------------------------------------------------------------*/
-/* FAX SUB/SEP/PWD extension */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_FAX_SUB_SEP_PWD 3
-
-
-
-/*------------------------------------------------------------------*/
-/* V.18 extension */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_V18 4
-
-
-
-/*------------------------------------------------------------------*/
-/* DTMF TONE extension */
-/*------------------------------------------------------------------*/
-
-
-#define DTMF_GET_SUPPORTED_DETECT_CODES 0xf8
-#define DTMF_GET_SUPPORTED_SEND_CODES 0xf9
-#define DTMF_LISTEN_TONE_START 0xfa
-#define DTMF_LISTEN_TONE_STOP 0xfb
-#define DTMF_SEND_TONE 0xfc
-#define DTMF_LISTEN_MF_START 0xfd
-#define DTMF_LISTEN_MF_STOP 0xfe
-#define DTMF_SEND_MF 0xff
-
-#define DTMF_MF_DIGIT_TONE_CODE_1 0x10
-#define DTMF_MF_DIGIT_TONE_CODE_2 0x11
-#define DTMF_MF_DIGIT_TONE_CODE_3 0x12
-#define DTMF_MF_DIGIT_TONE_CODE_4 0x13
-#define DTMF_MF_DIGIT_TONE_CODE_5 0x14
-#define DTMF_MF_DIGIT_TONE_CODE_6 0x15
-#define DTMF_MF_DIGIT_TONE_CODE_7 0x16
-#define DTMF_MF_DIGIT_TONE_CODE_8 0x17
-#define DTMF_MF_DIGIT_TONE_CODE_9 0x18
-#define DTMF_MF_DIGIT_TONE_CODE_0 0x19
-#define DTMF_MF_DIGIT_TONE_CODE_K1 0x1a
-#define DTMF_MF_DIGIT_TONE_CODE_K2 0x1b
-#define DTMF_MF_DIGIT_TONE_CODE_KP 0x1c
-#define DTMF_MF_DIGIT_TONE_CODE_S1 0x1d
-#define DTMF_MF_DIGIT_TONE_CODE_ST 0x1e
-
-#define DTMF_DIGIT_CODE_COUNT 16
-#define DTMF_MF_DIGIT_CODE_BASE DSP_DTMF_DIGIT_CODE_COUNT
-#define DTMF_MF_DIGIT_CODE_COUNT 15
-#define DTMF_TOTAL_DIGIT_CODE_COUNT (DSP_MF_DIGIT_CODE_BASE + DSP_MF_DIGIT_CODE_COUNT)
-
-#define DTMF_TONE_DIGIT_BASE 0x80
-
-#define DTMF_SIGNAL_NO_TONE (DTMF_TONE_DIGIT_BASE + 0)
-#define DTMF_SIGNAL_UNIDENTIFIED_TONE (DTMF_TONE_DIGIT_BASE + 1)
-
-#define DTMF_SIGNAL_DIAL_TONE (DTMF_TONE_DIGIT_BASE + 2)
-#define DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE (DTMF_TONE_DIGIT_BASE + 3)
-#define DTMF_SIGNAL_SPECIAL_DIAL_TONE (DTMF_TONE_DIGIT_BASE + 4) /* stutter dial tone */
-#define DTMF_SIGNAL_SECOND_DIAL_TONE (DTMF_TONE_DIGIT_BASE + 5)
-#define DTMF_SIGNAL_RINGING_TONE (DTMF_TONE_DIGIT_BASE + 6)
-#define DTMF_SIGNAL_SPECIAL_RINGING_TONE (DTMF_TONE_DIGIT_BASE + 7)
-#define DTMF_SIGNAL_BUSY_TONE (DTMF_TONE_DIGIT_BASE + 8)
-#define DTMF_SIGNAL_CONGESTION_TONE (DTMF_TONE_DIGIT_BASE + 9) /* reorder tone */
-#define DTMF_SIGNAL_SPECIAL_INFORMATION_TONE (DTMF_TONE_DIGIT_BASE + 10)
-#define DTMF_SIGNAL_COMFORT_TONE (DTMF_TONE_DIGIT_BASE + 11)
-#define DTMF_SIGNAL_HOLD_TONE (DTMF_TONE_DIGIT_BASE + 12)
-#define DTMF_SIGNAL_RECORD_TONE (DTMF_TONE_DIGIT_BASE + 13)
-#define DTMF_SIGNAL_CALLER_WAITING_TONE (DTMF_TONE_DIGIT_BASE + 14)
-#define DTMF_SIGNAL_CALL_WAITING_TONE (DTMF_TONE_DIGIT_BASE + 15)
-#define DTMF_SIGNAL_PAY_TONE (DTMF_TONE_DIGIT_BASE + 16)
-#define DTMF_SIGNAL_POSITIVE_INDICATION_TONE (DTMF_TONE_DIGIT_BASE + 17)
-#define DTMF_SIGNAL_NEGATIVE_INDICATION_TONE (DTMF_TONE_DIGIT_BASE + 18)
-#define DTMF_SIGNAL_WARNING_TONE (DTMF_TONE_DIGIT_BASE + 19)
-#define DTMF_SIGNAL_INTRUSION_TONE (DTMF_TONE_DIGIT_BASE + 20)
-#define DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE (DTMF_TONE_DIGIT_BASE + 21)
-#define DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE (DTMF_TONE_DIGIT_BASE + 22)
-#define DTMF_SIGNAL_CPE_ALERTING_SIGNAL (DTMF_TONE_DIGIT_BASE + 23)
-#define DTMF_SIGNAL_OFF_HOOK_WARNING_TONE (DTMF_TONE_DIGIT_BASE + 24)
-
-#define DTMF_SIGNAL_INTERCEPT_TONE (DTMF_TONE_DIGIT_BASE + 63)
-
-#define DTMF_SIGNAL_MODEM_CALLING_TONE (DTMF_TONE_DIGIT_BASE + 64)
-#define DTMF_SIGNAL_FAX_CALLING_TONE (DTMF_TONE_DIGIT_BASE + 65)
-#define DTMF_SIGNAL_ANSWER_TONE (DTMF_TONE_DIGIT_BASE + 66)
-#define DTMF_SIGNAL_REVERSED_ANSWER_TONE (DTMF_TONE_DIGIT_BASE + 67)
-#define DTMF_SIGNAL_ANSAM_TONE (DTMF_TONE_DIGIT_BASE + 68)
-#define DTMF_SIGNAL_REVERSED_ANSAM_TONE (DTMF_TONE_DIGIT_BASE + 69)
-#define DTMF_SIGNAL_BELL103_ANSWER_TONE (DTMF_TONE_DIGIT_BASE + 70)
-#define DTMF_SIGNAL_FAX_FLAGS (DTMF_TONE_DIGIT_BASE + 71)
-#define DTMF_SIGNAL_G2_FAX_GROUP_ID (DTMF_TONE_DIGIT_BASE + 72)
-#define DTMF_SIGNAL_HUMAN_SPEECH (DTMF_TONE_DIGIT_BASE + 73)
-#define DTMF_SIGNAL_ANSWERING_MACHINE_390 (DTMF_TONE_DIGIT_BASE + 74)
-
-#define DTMF_MF_LISTEN_ACTIVE_FLAG 0x02
-#define DTMF_SEND_MF_FLAG 0x02
-#define DTMF_TONE_LISTEN_ACTIVE_FLAG 0x04
-#define DTMF_SEND_TONE_FLAG 0x04
-
-#define PRIVATE_DTMF_TONE 5
-
-
-/*------------------------------------------------------------------*/
-/* FAX paper format extension */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_FAX_PAPER_FORMATS 6
-
-
-
-/*------------------------------------------------------------------*/
-/* V.OWN extension */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_VOWN 7
-
-
-
-/*------------------------------------------------------------------*/
-/* FAX non-standard facilities extension */
-/*------------------------------------------------------------------*/
-
-
-#define PRIVATE_FAX_NONSTANDARD 8
-
-
-
-/*------------------------------------------------------------------*/
-/* Advanced voice */
-/*------------------------------------------------------------------*/
-
-#define ADV_VOICE_WRITE_ACTIVATION 0
-#define ADV_VOICE_WRITE_DEACTIVATION 1
-#define ADV_VOICE_WRITE_UPDATE 2
-
-#define ADV_VOICE_OLD_COEF_COUNT 6
-#define ADV_VOICE_NEW_COEF_BASE (ADV_VOICE_OLD_COEF_COUNT * sizeof(word))
-
-/*------------------------------------------------------------------*/
-/* B1 resource switching */
-/*------------------------------------------------------------------*/
-
-#define B1_FACILITY_LOCAL 0x01
-#define B1_FACILITY_MIXER 0x02
-#define B1_FACILITY_DTMFX 0x04
-#define B1_FACILITY_DTMFR 0x08
-#define B1_FACILITY_VOICE 0x10
-#define B1_FACILITY_EC 0x20
-
-#define ADJUST_B_MODE_SAVE 0x0001
-#define ADJUST_B_MODE_REMOVE_L23 0x0002
-#define ADJUST_B_MODE_SWITCH_L1 0x0004
-#define ADJUST_B_MODE_NO_RESOURCE 0x0008
-#define ADJUST_B_MODE_ASSIGN_L23 0x0010
-#define ADJUST_B_MODE_USER_CONNECT 0x0020
-#define ADJUST_B_MODE_CONNECT 0x0040
-#define ADJUST_B_MODE_RESTORE 0x0080
-
-#define ADJUST_B_START 0
-#define ADJUST_B_SAVE_MIXER_1 1
-#define ADJUST_B_SAVE_DTMF_1 2
-#define ADJUST_B_REMOVE_L23_1 3
-#define ADJUST_B_REMOVE_L23_2 4
-#define ADJUST_B_SAVE_EC_1 5
-#define ADJUST_B_SAVE_DTMF_PARAMETER_1 6
-#define ADJUST_B_SAVE_VOICE_1 7
-#define ADJUST_B_SWITCH_L1_1 8
-#define ADJUST_B_SWITCH_L1_2 9
-#define ADJUST_B_RESTORE_VOICE_1 10
-#define ADJUST_B_RESTORE_VOICE_2 11
-#define ADJUST_B_RESTORE_DTMF_PARAMETER_1 12
-#define ADJUST_B_RESTORE_DTMF_PARAMETER_2 13
-#define ADJUST_B_RESTORE_EC_1 14
-#define ADJUST_B_RESTORE_EC_2 15
-#define ADJUST_B_ASSIGN_L23_1 16
-#define ADJUST_B_ASSIGN_L23_2 17
-#define ADJUST_B_CONNECT_1 18
-#define ADJUST_B_CONNECT_2 19
-#define ADJUST_B_CONNECT_3 20
-#define ADJUST_B_CONNECT_4 21
-#define ADJUST_B_RESTORE_DTMF_1 22
-#define ADJUST_B_RESTORE_DTMF_2 23
-#define ADJUST_B_RESTORE_MIXER_1 24
-#define ADJUST_B_RESTORE_MIXER_2 25
-#define ADJUST_B_RESTORE_MIXER_3 26
-#define ADJUST_B_RESTORE_MIXER_4 27
-#define ADJUST_B_RESTORE_MIXER_5 28
-#define ADJUST_B_RESTORE_MIXER_6 29
-#define ADJUST_B_RESTORE_MIXER_7 30
-#define ADJUST_B_END 31
-
-/*------------------------------------------------------------------*/
-/* XON Protocol def's */
-/*------------------------------------------------------------------*/
-#define N_CH_XOFF 0x01
-#define N_XON_SENT 0x02
-#define N_XON_REQ 0x04
-#define N_XON_CONNECT_IND 0x08
-#define N_RX_FLOW_CONTROL_MASK 0x3f
-#define N_OK_FC_PENDING 0x80
-#define N_TX_FLOW_CONTROL_MASK 0xc0
-
-/*------------------------------------------------------------------*/
-/* NCPI state */
-/*------------------------------------------------------------------*/
-#define NCPI_VALID_CONNECT_B3_IND 0x01
-#define NCPI_VALID_CONNECT_B3_ACT 0x02
-#define NCPI_VALID_DISC_B3_IND 0x04
-#define NCPI_CONNECT_B3_ACT_SENT 0x08
-#define NCPI_NEGOTIATE_B3_SENT 0x10
-#define NCPI_MDM_CTS_ON_RECEIVED 0x40
-#define NCPI_MDM_DCD_ON_RECEIVED 0x80
-
-/*------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
deleted file mode 100644
index 5a95587b3117..000000000000
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/* $Id: divamnt.c,v 1.32.6.10 2005/02/11 19:40:25 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * Maint module
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "debug_if.h"
-
-static DEFINE_MUTEX(maint_mutex);
-static char *main_revision = "$Revision: 1.32.6.10 $";
-
-static int major;
-
-MODULE_DESCRIPTION("Maint driver for Eicon DIVA Server cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("DIVA card driver");
-MODULE_LICENSE("GPL");
-
-static int buffer_length = 128;
-module_param(buffer_length, int, 0);
-static unsigned long diva_dbg_mem = 0;
-module_param(diva_dbg_mem, ulong, 0);
-
-static char *DRIVERNAME =
- "Eicon DIVA - MAINT module (http://www.melware.net)";
-static char *DRIVERLNAME = "diva_mnt";
-static char *DEVNAME = "DivasMAINT";
-char *DRIVERRELEASE_MNT = "2.0";
-
-static wait_queue_head_t msgwaitq;
-static unsigned long opened;
-
-extern int mntfunc_init(int *, void **, unsigned long);
-extern void mntfunc_finit(void);
-extern int maint_read_write(void __user *buf, int count);
-
-/*
- * helper functions
- */
-static char *getrev(const char *revision)
-{
- char *rev;
- char *p;
-
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "1.0";
-
- return rev;
-}
-
-/*
- * kernel/user space copy functions
- */
-int diva_os_copy_to_user(void *os_handle, void __user *dst, const void *src,
- int length)
-{
- return (copy_to_user(dst, src, length));
-}
-int diva_os_copy_from_user(void *os_handle, void *dst, const void __user *src,
- int length)
-{
- return (copy_from_user(dst, src, length));
-}
-
-/*
- * get time
- */
-void diva_os_get_time(dword *sec, dword *usec)
-{
- struct timespec64 time;
-
- ktime_get_ts64(&time);
-
- *sec = (dword) time.tv_sec;
- *usec = (dword) (time.tv_nsec / NSEC_PER_USEC);
-}
-
-/*
- * device node operations
- */
-static __poll_t maint_poll(struct file *file, poll_table *wait)
-{
- __poll_t mask = 0;
-
- poll_wait(file, &msgwaitq, wait);
- mask = EPOLLOUT | EPOLLWRNORM;
- if (file->private_data || diva_dbg_q_length()) {
- mask |= EPOLLIN | EPOLLRDNORM;
- }
- return (mask);
-}
-
-static int maint_open(struct inode *ino, struct file *filep)
-{
- int ret;
-
- mutex_lock(&maint_mutex);
- /* only one open is allowed, so we test
- it atomically */
- if (test_and_set_bit(0, &opened))
- ret = -EBUSY;
- else {
- filep->private_data = NULL;
- ret = nonseekable_open(ino, filep);
- }
- mutex_unlock(&maint_mutex);
- return ret;
-}
-
-static int maint_close(struct inode *ino, struct file *filep)
-{
- if (filep->private_data) {
- diva_os_free(0, filep->private_data);
- filep->private_data = NULL;
- }
-
- /* clear 'used' flag */
- clear_bit(0, &opened);
-
- return (0);
-}
-
-static ssize_t divas_maint_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return (maint_read_write((char __user *) buf, (int) count));
-}
-
-static ssize_t divas_maint_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return (maint_read_write(buf, (int) count));
-}
-
-static const struct file_operations divas_maint_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = divas_maint_read,
- .write = divas_maint_write,
- .poll = maint_poll,
- .open = maint_open,
- .release = maint_close
-};
-
-static void divas_maint_unregister_chrdev(void)
-{
- unregister_chrdev(major, DEVNAME);
-}
-
-static int __init divas_maint_register_chrdev(void)
-{
- if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0)
- {
- printk(KERN_ERR "%s: failed to create /dev entry.\n",
- DRIVERLNAME);
- return (0);
- }
-
- return (1);
-}
-
-/*
- * wake up reader
- */
-void diva_maint_wakeup_read(void)
-{
- wake_up_interruptible(&msgwaitq);
-}
-
-/*
- * Driver Load
- */
-static int __init maint_init(void)
-{
- char tmprev[50];
- int ret = 0;
- void *buffer = NULL;
-
- init_waitqueue_head(&msgwaitq);
-
- printk(KERN_INFO "%s\n", DRIVERNAME);
- printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_MNT);
- strcpy(tmprev, main_revision);
- printk("%s Build: %s \n", getrev(tmprev), DIVA_BUILD);
-
- if (!divas_maint_register_chrdev()) {
- ret = -EIO;
- goto out;
- }
-
- if (!(mntfunc_init(&buffer_length, &buffer, diva_dbg_mem))) {
- printk(KERN_ERR "%s: failed to connect to DIDD.\n",
- DRIVERLNAME);
- divas_maint_unregister_chrdev();
- ret = -EIO;
- goto out;
- }
-
- printk(KERN_INFO "%s: trace buffer = %p - %d kBytes, %s (Major: %d)\n",
- DRIVERLNAME, buffer, (buffer_length / 1024),
- (diva_dbg_mem == 0) ? "internal" : "external", major);
-
-out:
- return (ret);
-}
-
-/*
-** Driver Unload
-*/
-static void __exit maint_exit(void)
-{
- divas_maint_unregister_chrdev();
- mntfunc_finit();
-
- printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(maint_init);
-module_exit(maint_exit);
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
deleted file mode 100644
index 4be5f8814777..000000000000
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ /dev/null
@@ -1,237 +0,0 @@
-/* $Id: divasfunc.c,v 1.23.4.2 2004/08/28 20:03:53 armin Exp $
- *
- * Low level driver for Eicon DIVA Server ISDN cards.
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "di.h"
-#include "io.h"
-#include "divasync.h"
-#include "diva.h"
-#include "xdi_vers.h"
-
-#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-static int debugmask;
-
-extern void DIVA_DIDD_Read(void *, int);
-
-extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-
-extern char *DRIVERRELEASE_DIVAS;
-
-static dword notify_handle;
-static DESCRIPTOR DAdapter;
-static DESCRIPTOR MAdapter;
-
-/* --------------------------------------------------------------------------
- MAINT driver connector section
- -------------------------------------------------------------------------- */
-static void no_printf(unsigned char *x, ...)
-{
- /* dummy debug function */
-}
-
-#include "debuglib.c"
-
-/*
- * get the adapters serial number
- */
-void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf)
-{
- int contr = 0;
-
- if ((contr = ((IoAdapter->serialNo & 0xff000000) >> 24))) {
- sprintf(buf, "%d-%d",
- IoAdapter->serialNo & 0x00ffffff, contr + 1);
- } else {
- sprintf(buf, "%d", IoAdapter->serialNo);
- }
-}
-
-/*
- * register a new adapter
- */
-void diva_xdi_didd_register_adapter(int card)
-{
- DESCRIPTOR d;
- IDI_SYNC_REQ req;
-
- if (card && ((card - 1) < MAX_ADAPTER) &&
- IoAdapters[card - 1] && Requests[card - 1]) {
- d.type = IoAdapters[card - 1]->Properties.DescType;
- d.request = Requests[card - 1];
- d.channels = IoAdapters[card - 1]->Properties.Channels;
- d.features = IoAdapters[card - 1]->Properties.Features;
- DBG_TRC(("DIDD register A(%d) channels=%d", card,
- d.channels))
- /* workaround for different Name in structure */
- strlcpy(IoAdapters[card - 1]->Name,
- IoAdapters[card - 1]->Properties.Name,
- sizeof(IoAdapters[card - 1]->Name));
- req.didd_remove_adapter.e.Req = 0;
- req.didd_add_adapter.e.Rc = IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
- req.didd_add_adapter.info.descriptor = (void *) &d;
- DAdapter.request((ENTITY *)&req);
- if (req.didd_add_adapter.e.Rc != 0xff) {
- DBG_ERR(("DIDD register A(%d) failed !", card))
- }
- IoAdapters[card - 1]->os_trap_nfy_Fnc = NULL;
- }
-}
-
-/*
- * remove an adapter
- */
-void diva_xdi_didd_remove_adapter(int card)
-{
- IDI_SYNC_REQ req;
- ADAPTER *a = &IoAdapters[card - 1]->a;
-
- IoAdapters[card - 1]->os_trap_nfy_Fnc = NULL;
- DBG_TRC(("DIDD de-register A(%d)", card))
- req.didd_remove_adapter.e.Req = 0;
- req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER;
- req.didd_remove_adapter.info.p_request =
- (IDI_CALL) Requests[card - 1];
- DAdapter.request((ENTITY *)&req);
- memset(&(a->IdTable), 0x00, 256);
-}
-
-/*
- * start debug
- */
-static void start_dbg(void)
-{
- DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT);
- DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s])",
- DIVA_BUILD, diva_xdi_common_code_build))
- }
-
-/*
- * stop debug
- */
-static void stop_dbg(void)
-{
- DbgDeregister();
- memset(&MAdapter, 0, sizeof(MAdapter));
- dprintf = no_printf;
-}
-
-/*
- * didd callback function
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
- int removal)
-{
- if (adapter->type == IDI_DADAPTER) {
- DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
- return (NULL);
- }
-
- if (adapter->type == IDI_DIMAINT) {
- if (removal) {
- stop_dbg();
- } else {
- memcpy(&MAdapter, adapter, sizeof(MAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- start_dbg();
- }
- }
- return (NULL);
-}
-
-/*
- * connect to didd
- */
-static int __init connect_didd(void)
-{
- int x = 0;
- int dadapter = 0;
- IDI_SYNC_REQ req;
- DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
- DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
- for (x = 0; x < MAX_DESCRIPTORS; x++) {
- if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
- dadapter = 1;
- memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc =
- IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
- req.didd_notify.info.callback = (void *)didd_callback;
- req.didd_notify.info.context = NULL;
- DAdapter.request((ENTITY *)&req);
- if (req.didd_notify.e.Rc != 0xff) {
- stop_dbg();
- return (0);
- }
- notify_handle = req.didd_notify.info.handle;
- } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
- memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- start_dbg();
- }
- }
-
- if (!dadapter) {
- stop_dbg();
- }
-
- return (dadapter);
-}
-
-/*
- * disconnect from didd
- */
-static void disconnect_didd(void)
-{
- IDI_SYNC_REQ req;
-
- stop_dbg();
-
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
- req.didd_notify.info.handle = notify_handle;
- DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * init
- */
-int __init divasfunc_init(int dbgmask)
-{
- char *version;
-
- debugmask = dbgmask;
-
- if (!connect_didd()) {
- DBG_ERR(("divasfunc: failed to connect to DIDD."))
- return (0);
- }
-
- version = diva_xdi_common_code_build;
-
- divasa_xdi_driver_entry();
-
- return (1);
-}
-
-/*
- * exit
- */
-void divasfunc_exit(void)
-{
- divasa_xdi_driver_unload();
- disconnect_didd();
-}
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
deleted file mode 100644
index e7081e0c0e35..000000000000
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ /dev/null
@@ -1,562 +0,0 @@
-/* $Id: divasi.c,v 1.25.6.2 2005/01/31 12:22:20 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * User Mode IDI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/skbuff.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "um_xdi.h"
-#include "um_idi.h"
-
-static char *main_revision = "$Revision: 1.25.6.2 $";
-
-static int major;
-
-MODULE_DESCRIPTION("User IDI Interface for Eicon ISDN cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_SUPPORTED_DEVICE("DIVA card driver");
-MODULE_LICENSE("GPL");
-
-typedef struct _diva_um_idi_os_context {
- wait_queue_head_t read_wait;
- wait_queue_head_t close_wait;
- struct timer_list diva_timer_id;
- int aborted;
- int adapter_nr;
-} diva_um_idi_os_context_t;
-
-static char *DRIVERNAME = "Eicon DIVA - User IDI (http://www.melware.net)";
-static char *DRIVERLNAME = "diva_idi";
-static char *DEVNAME = "DivasIDI";
-char *DRIVERRELEASE_IDI = "2.0";
-
-extern int idifunc_init(void);
-extern void idifunc_finit(void);
-
-/*
- * helper functions
- */
-static char *getrev(const char *revision)
-{
- char *rev;
- char *p;
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "1.0";
- return rev;
-}
-
-/*
- * LOCALS
- */
-static ssize_t um_idi_read(struct file *file, char __user *buf, size_t count,
- loff_t *offset);
-static ssize_t um_idi_write(struct file *file, const char __user *buf,
- size_t count, loff_t *offset);
-static __poll_t um_idi_poll(struct file *file, poll_table *wait);
-static int um_idi_open(struct inode *inode, struct file *file);
-static int um_idi_release(struct inode *inode, struct file *file);
-static int remove_entity(void *entity);
-static void diva_um_timer_function(struct timer_list *t);
-
-/*
- * proc entry
- */
-extern struct proc_dir_entry *proc_net_eicon;
-static struct proc_dir_entry *um_idi_proc_entry = NULL;
-
-static int um_idi_proc_show(struct seq_file *m, void *v)
-{
- char tmprev[32];
-
- seq_printf(m, "%s\n", DRIVERNAME);
- seq_printf(m, "name : %s\n", DRIVERLNAME);
- seq_printf(m, "release : %s\n", DRIVERRELEASE_IDI);
- strcpy(tmprev, main_revision);
- seq_printf(m, "revision : %s\n", getrev(tmprev));
- seq_printf(m, "build : %s\n", DIVA_BUILD);
- seq_printf(m, "major : %d\n", major);
-
- return 0;
-}
-
-static int __init create_um_idi_proc(void)
-{
- um_idi_proc_entry = proc_create_single(DRIVERLNAME, S_IRUGO,
- proc_net_eicon, um_idi_proc_show);
- if (!um_idi_proc_entry)
- return (0);
- return (1);
-}
-
-static void remove_um_idi_proc(void)
-{
- if (um_idi_proc_entry) {
- remove_proc_entry(DRIVERLNAME, proc_net_eicon);
- um_idi_proc_entry = NULL;
- }
-}
-
-static const struct file_operations divas_idi_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = um_idi_read,
- .write = um_idi_write,
- .poll = um_idi_poll,
- .open = um_idi_open,
- .release = um_idi_release
-};
-
-static void divas_idi_unregister_chrdev(void)
-{
- unregister_chrdev(major, DEVNAME);
-}
-
-static int __init divas_idi_register_chrdev(void)
-{
- if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0)
- {
- printk(KERN_ERR "%s: failed to create /dev entry.\n",
- DRIVERLNAME);
- return (0);
- }
-
- return (1);
-}
-
-/*
-** Driver Load
-*/
-static int __init divasi_init(void)
-{
- char tmprev[50];
- int ret = 0;
-
- printk(KERN_INFO "%s\n", DRIVERNAME);
- printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_IDI);
- strcpy(tmprev, main_revision);
- printk("%s Build: %s\n", getrev(tmprev), DIVA_BUILD);
-
- if (!divas_idi_register_chrdev()) {
- ret = -EIO;
- goto out;
- }
-
- if (!create_um_idi_proc()) {
- divas_idi_unregister_chrdev();
- printk(KERN_ERR "%s: failed to create proc entry.\n",
- DRIVERLNAME);
- ret = -EIO;
- goto out;
- }
-
- if (!(idifunc_init())) {
- remove_um_idi_proc();
- divas_idi_unregister_chrdev();
- printk(KERN_ERR "%s: failed to connect to DIDD.\n",
- DRIVERLNAME);
- ret = -EIO;
- goto out;
- }
- printk(KERN_INFO "%s: started with major %d\n", DRIVERLNAME, major);
-
-out:
- return (ret);
-}
-
-
-/*
-** Driver Unload
-*/
-static void __exit divasi_exit(void)
-{
- idifunc_finit();
- remove_um_idi_proc();
- divas_idi_unregister_chrdev();
-
- printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divasi_init);
-module_exit(divasi_exit);
-
-
-/*
- * FILE OPERATIONS
- */
-
-static int
-divas_um_idi_copy_to_user(void *os_handle, void *dst, const void *src,
- int length)
-{
- memcpy(dst, src, length);
- return (length);
-}
-
-static ssize_t
-um_idi_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
-{
- diva_um_idi_os_context_t *p_os;
- int ret = -EINVAL;
- void *data;
-
- if (!file->private_data) {
- return (-ENODEV);
- }
-
- if (!
- (p_os =
- (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->
- private_data)))
- {
- return (-ENODEV);
- }
- if (p_os->aborted) {
- return (-ENODEV);
- }
-
- if (!(data = diva_os_malloc(0, count))) {
- return (-ENOMEM);
- }
-
- ret = diva_um_idi_read(file->private_data,
- file, data, count,
- divas_um_idi_copy_to_user);
- switch (ret) {
- case 0: /* no message available */
- ret = (-EAGAIN);
- break;
- case (-1): /* adapter was removed */
- ret = (-ENODEV);
- break;
- case (-2): /* message_length > length of user buffer */
- ret = (-EFAULT);
- break;
- }
-
- if (ret > 0) {
- if (copy_to_user(buf, data, ret)) {
- ret = (-EFAULT);
- }
- }
-
- diva_os_free(0, data);
- DBG_TRC(("read: ret %d", ret));
- return (ret);
-}
-
-
-static int
-divas_um_idi_copy_from_user(void *os_handle, void *dst, const void *src,
- int length)
-{
- memcpy(dst, src, length);
- return (length);
-}
-
-static int um_idi_open_adapter(struct file *file, int adapter_nr)
-{
- diva_um_idi_os_context_t *p_os;
- void *e =
- divas_um_idi_create_entity((dword) adapter_nr, (void *) file);
-
- if (!(file->private_data = e)) {
- return (0);
- }
- p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
- init_waitqueue_head(&p_os->read_wait);
- init_waitqueue_head(&p_os->close_wait);
- timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
- p_os->aborted = 0;
- p_os->adapter_nr = adapter_nr;
- return (1);
-}
-
-static ssize_t
-um_idi_write(struct file *file, const char __user *buf, size_t count,
- loff_t *offset)
-{
- diva_um_idi_os_context_t *p_os;
- int ret = -EINVAL;
- void *data;
- int adapter_nr = 0;
-
- if (!file->private_data) {
- /* the first write() selects the adapter_nr */
- if (count == sizeof(int)) {
- if (copy_from_user
- ((void *) &adapter_nr, buf,
- count)) return (-EFAULT);
- if (!(um_idi_open_adapter(file, adapter_nr)))
- return (-ENODEV);
- return (count);
- } else
- return (-ENODEV);
- }
-
- if (!(p_os =
- (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->
- private_data)))
- {
- return (-ENODEV);
- }
- if (p_os->aborted) {
- return (-ENODEV);
- }
-
- if (!(data = diva_os_malloc(0, count))) {
- return (-ENOMEM);
- }
-
- if (copy_from_user(data, buf, count)) {
- ret = -EFAULT;
- } else {
- ret = diva_um_idi_write(file->private_data,
- file, data, count,
- divas_um_idi_copy_from_user);
- switch (ret) {
- case 0: /* no space available */
- ret = (-EAGAIN);
- break;
- case (-1): /* adapter was removed */
- ret = (-ENODEV);
- break;
- case (-2): /* length of user buffer > max message_length */
- ret = (-EFAULT);
- break;
- }
- }
- diva_os_free(0, data);
- DBG_TRC(("write: ret %d", ret));
- return (ret);
-}
-
-static __poll_t um_idi_poll(struct file *file, poll_table *wait)
-{
- diva_um_idi_os_context_t *p_os;
-
- if (!file->private_data) {
- return (EPOLLERR);
- }
-
- if ((!(p_os =
- (diva_um_idi_os_context_t *)
- diva_um_id_get_os_context(file->private_data)))
- || p_os->aborted) {
- return (EPOLLERR);
- }
-
- poll_wait(file, &p_os->read_wait, wait);
-
- if (p_os->aborted) {
- return (EPOLLERR);
- }
-
- switch (diva_user_mode_idi_ind_ready(file->private_data, file)) {
- case (-1):
- return (EPOLLERR);
-
- case 0:
- return (0);
- }
-
- return (EPOLLIN | EPOLLRDNORM);
-}
-
-static int um_idi_open(struct inode *inode, struct file *file)
-{
- return (0);
-}
-
-
-static int um_idi_release(struct inode *inode, struct file *file)
-{
- diva_um_idi_os_context_t *p_os;
- unsigned int adapter_nr;
- int ret = 0;
-
- if (!(file->private_data)) {
- ret = -ENODEV;
- goto out;
- }
-
- if (!(p_os =
- (diva_um_idi_os_context_t *) diva_um_id_get_os_context(file->private_data))) {
- ret = -ENODEV;
- goto out;
- }
-
- adapter_nr = p_os->adapter_nr;
-
- if ((ret = remove_entity(file->private_data))) {
- goto out;
- }
-
- if (divas_um_idi_delete_entity
- ((int) adapter_nr, file->private_data)) {
- ret = -ENODEV;
- goto out;
- }
-
-out:
- return (ret);
-}
-
-int diva_os_get_context_size(void)
-{
- return (sizeof(diva_um_idi_os_context_t));
-}
-
-void diva_os_wakeup_read(void *os_context)
-{
- diva_um_idi_os_context_t *p_os =
- (diva_um_idi_os_context_t *) os_context;
- wake_up_interruptible(&p_os->read_wait);
-}
-
-void diva_os_wakeup_close(void *os_context)
-{
- diva_um_idi_os_context_t *p_os =
- (diva_um_idi_os_context_t *) os_context;
- wake_up_interruptible(&p_os->close_wait);
-}
-
-static
-void diva_um_timer_function(struct timer_list *t)
-{
- diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
-
- p_os->aborted = 1;
- wake_up_interruptible(&p_os->read_wait);
- wake_up_interruptible(&p_os->close_wait);
- DBG_ERR(("entity removal watchdog"))
- }
-
-/*
-** If application exits without entity removal this function will remove
-** entity and block until removal is complete
-*/
-static int remove_entity(void *entity)
-{
- struct task_struct *curtask = current;
- diva_um_idi_os_context_t *p_os;
-
- diva_um_idi_stop_wdog(entity);
-
- if (!entity) {
- DBG_FTL(("Zero entity on remove"))
- return (0);
- }
-
- if (!(p_os =
- (diva_um_idi_os_context_t *)
- diva_um_id_get_os_context(entity))) {
- DBG_FTL(("Zero entity os context on remove"))
- return (0);
- }
-
- if (!divas_um_idi_entity_assigned(entity) || p_os->aborted) {
- /*
- Entity is not assigned, also can be removed
- */
- return (0);
- }
-
- DBG_TRC(("E(%08x) check remove", entity))
-
- /*
- If adapter not answers on remove request inside of
- 10 Sec, then adapter is dead
- */
- diva_um_idi_start_wdog(entity);
-
- {
- DECLARE_WAITQUEUE(wait, curtask);
-
- add_wait_queue(&p_os->close_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!divas_um_idi_entity_start_remove(entity)
- || p_os->aborted) {
- break;
- }
- schedule();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&p_os->close_wait, &wait);
- }
-
- DBG_TRC(("E(%08x) start remove", entity))
- {
- DECLARE_WAITQUEUE(wait, curtask);
-
- add_wait_queue(&p_os->close_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!divas_um_idi_entity_assigned(entity)
- || p_os->aborted) {
- break;
- }
- schedule();
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&p_os->close_wait, &wait);
- }
-
- DBG_TRC(("E(%08x) remove complete, aborted:%d", entity,
- p_os->aborted))
-
- diva_um_idi_stop_wdog(entity);
-
- p_os->aborted = 0;
-
- return (0);
-}
-
-/*
- * timer watchdog
- */
-void diva_um_idi_start_wdog(void *entity)
-{
- diva_um_idi_os_context_t *p_os;
-
- if (entity &&
- ((p_os =
- (diva_um_idi_os_context_t *)
- diva_um_id_get_os_context(entity)))) {
- mod_timer(&p_os->diva_timer_id, jiffies + 10 * HZ);
- }
-}
-
-void diva_um_idi_stop_wdog(void *entity)
-{
- diva_um_idi_os_context_t *p_os;
-
- if (entity &&
- ((p_os =
- (diva_um_idi_os_context_t *)
- diva_um_id_get_os_context(entity)))) {
- del_timer(&p_os->diva_timer_id);
- }
-}
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
deleted file mode 100644
index b6a3950b2564..000000000000
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ /dev/null
@@ -1,848 +0,0 @@
-/* $Id: divasmain.c,v 1.55.4.6 2005/02/09 19:28:20 armin Exp $
- *
- * Low level driver for Eicon DIVA Server ISDN cards.
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
-#include <asm/io.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/poll.h>
-#include <linux/kmod.h>
-
-#include "platform.h"
-#undef ID_MASK
-#undef N_DATA
-#include "pc.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "diva.h"
-#include "di.h"
-#include "io.h"
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "xdi_vers.h"
-#include "diva_dma.h"
-#include "diva_pci.h"
-
-static char *main_revision = "$Revision: 1.55.4.6 $";
-
-static int major;
-
-static int dbgmask;
-
-MODULE_DESCRIPTION("Kernel driver for Eicon DIVA Server cards");
-MODULE_AUTHOR("Cytronics & Melware, Eicon Networks");
-MODULE_LICENSE("GPL");
-
-module_param(dbgmask, int, 0);
-MODULE_PARM_DESC(dbgmask, "initial debug mask");
-
-static char *DRIVERNAME =
- "Eicon DIVA Server driver (http://www.melware.net)";
-static char *DRIVERLNAME = "divas";
-static char *DEVNAME = "Divas";
-char *DRIVERRELEASE_DIVAS = "2.0";
-
-extern irqreturn_t diva_os_irq_wrapper(int irq, void *context);
-extern int create_divas_proc(void);
-extern void remove_divas_proc(void);
-extern void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf);
-extern int divasfunc_init(int dbgmask);
-extern void divasfunc_exit(void);
-
-typedef struct _diva_os_thread_dpc {
- struct tasklet_struct divas_task;
- diva_os_soft_isr_t *psoft_isr;
-} diva_os_thread_dpc_t;
-
-/* --------------------------------------------------------------------------
- PCI driver interface section
- -------------------------------------------------------------------------- */
-/*
- vendor, device Vendor and device ID to match (or PCI_ANY_ID)
- subvendor, Subsystem vendor and device ID to match (or PCI_ANY_ID)
- subdevice
- class, Device class to match. The class_mask tells which bits
- class_mask of the class are honored during the comparison.
- driver_data Data private to the driver.
-*/
-
-#if !defined(PCI_DEVICE_ID_EICON_MAESTRAP_2)
-#define PCI_DEVICE_ID_EICON_MAESTRAP_2 0xE015
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_4BRI_VOIP)
-#define PCI_DEVICE_ID_EICON_4BRI_VOIP 0xE016
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_4BRI_2_VOIP)
-#define PCI_DEVICE_ID_EICON_4BRI_2_VOIP 0xE017
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_BRI2M_2)
-#define PCI_DEVICE_ID_EICON_BRI2M_2 0xE018
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP)
-#define PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP 0xE019
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_2F)
-#define PCI_DEVICE_ID_EICON_2F 0xE01A
-#endif
-
-#if !defined(PCI_DEVICE_ID_EICON_BRI2M_2_VOIP)
-#define PCI_DEVICE_ID_EICON_BRI2M_2_VOIP 0xE01B
-#endif
-
-/*
- This table should be sorted by PCI device ID
-*/
-static const struct pci_device_id divas_pci_tbl[] = {
- /* Diva Server BRI-2M PCI 0xE010 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA),
- CARDTYPE_MAESTRA_PCI },
- /* Diva Server 4BRI-8M PCI 0xE012 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAQ),
- CARDTYPE_DIVASRV_Q_8M_PCI },
- /* Diva Server 4BRI-8M 2.0 PCI 0xE013 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAQ_U),
- CARDTYPE_DIVASRV_Q_8M_V2_PCI },
- /* Diva Server PRI-30M PCI 0xE014 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP),
- CARDTYPE_DIVASRV_P_30M_PCI },
- /* Diva Server PRI 2.0 adapter 0xE015 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2),
- CARDTYPE_DIVASRV_P_30M_V2_PCI },
- /* Diva Server Voice 4BRI-8M PCI 0xE016 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_4BRI_VOIP),
- CARDTYPE_DIVASRV_VOICE_Q_8M_PCI },
- /* Diva Server Voice 4BRI-8M 2.0 PCI 0xE017 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_4BRI_2_VOIP),
- CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI },
- /* Diva Server BRI-2M 2.0 PCI 0xE018 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_BRI2M_2),
- CARDTYPE_DIVASRV_B_2M_V2_PCI },
- /* Diva Server Voice PRI 2.0 PCI 0xE019 */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRAP_2_VOIP),
- CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI },
- /* Diva Server 2FX 0xE01A */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_2F),
- CARDTYPE_DIVASRV_B_2F_PCI },
- /* Diva Server Voice BRI-2M 2.0 PCI 0xE01B */
- { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_BRI2M_2_VOIP),
- CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI },
- { 0, } /* 0 terminated list. */
-};
-MODULE_DEVICE_TABLE(pci, divas_pci_tbl);
-
-static int divas_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void divas_remove_one(struct pci_dev *pdev);
-
-static struct pci_driver diva_pci_driver = {
- .name = "divas",
- .probe = divas_init_one,
- .remove = divas_remove_one,
- .id_table = divas_pci_tbl,
-};
-
-/*********************************************************
- ** little helper functions
- *********************************************************/
-static char *getrev(const char *revision)
-{
- char *rev;
- char *p;
- if ((p = strchr(revision, ':'))) {
- rev = p + 2;
- p = strchr(rev, '$');
- *--p = 0;
- } else
- rev = "1.0";
- return rev;
-}
-
-void diva_log_info(unsigned char *format, ...)
-{
- va_list args;
- unsigned char line[160];
-
- va_start(args, format);
- vsnprintf(line, sizeof(line), format, args);
- va_end(args);
-
- printk(KERN_INFO "%s: %s\n", DRIVERLNAME, line);
-}
-
-void divas_get_version(char *p)
-{
- char tmprev[32];
-
- strcpy(tmprev, main_revision);
- sprintf(p, "%s: %s(%s) %s(%s) major=%d\n", DRIVERLNAME, DRIVERRELEASE_DIVAS,
- getrev(tmprev), diva_xdi_common_code_build, DIVA_BUILD, major);
-}
-
-/* --------------------------------------------------------------------------
- PCI Bus services
- -------------------------------------------------------------------------- */
-byte diva_os_get_pci_bus(void *pci_dev_handle)
-{
- struct pci_dev *pdev = (struct pci_dev *) pci_dev_handle;
- return ((byte) pdev->bus->number);
-}
-
-byte diva_os_get_pci_func(void *pci_dev_handle)
-{
- struct pci_dev *pdev = (struct pci_dev *) pci_dev_handle;
- return ((byte) pdev->devfn);
-}
-
-unsigned long divasa_get_pci_irq(unsigned char bus, unsigned char func,
- void *pci_dev_handle)
-{
- unsigned char irq = 0;
- struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
- irq = dev->irq;
-
- return ((unsigned long) irq);
-}
-
-unsigned long divasa_get_pci_bar(unsigned char bus, unsigned char func,
- int bar, void *pci_dev_handle)
-{
- unsigned long ret = 0;
- struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
- if (bar < 6) {
- ret = dev->resource[bar].start;
- }
-
- DBG_TRC(("GOT BAR[%d]=%08x", bar, ret));
-
- {
- unsigned long type = (ret & 0x00000001);
- if (type & PCI_BASE_ADDRESS_SPACE_IO) {
- DBG_TRC((" I/O"));
- ret &= PCI_BASE_ADDRESS_IO_MASK;
- } else {
- DBG_TRC((" memory"));
- ret &= PCI_BASE_ADDRESS_MEM_MASK;
- }
- DBG_TRC((" final=%08x", ret));
- }
-
- return (ret);
-}
-
-void PCIwrite(byte bus, byte func, int offset, void *data, int length,
- void *pci_dev_handle)
-{
- struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
- switch (length) {
- case 1: /* byte */
- pci_write_config_byte(dev, offset,
- *(unsigned char *) data);
- break;
- case 2: /* word */
- pci_write_config_word(dev, offset,
- *(unsigned short *) data);
- break;
- case 4: /* dword */
- pci_write_config_dword(dev, offset,
- *(unsigned int *) data);
- break;
-
- default: /* buffer */
- if (!(length % 4) && !(length & 0x03)) { /* Copy as dword */
- dword *p = (dword *) data;
- length /= 4;
-
- while (length--) {
- pci_write_config_dword(dev, offset,
- *(unsigned int *)
- p++);
- }
- } else { /* copy as byte stream */
- byte *p = (byte *) data;
-
- while (length--) {
- pci_write_config_byte(dev, offset,
- *(unsigned char *)
- p++);
- }
- }
- }
-}
-
-void PCIread(byte bus, byte func, int offset, void *data, int length,
- void *pci_dev_handle)
-{
- struct pci_dev *dev = (struct pci_dev *) pci_dev_handle;
-
- switch (length) {
- case 1: /* byte */
- pci_read_config_byte(dev, offset, (unsigned char *) data);
- break;
- case 2: /* word */
- pci_read_config_word(dev, offset, (unsigned short *) data);
- break;
- case 4: /* dword */
- pci_read_config_dword(dev, offset, (unsigned int *) data);
- break;
-
- default: /* buffer */
- if (!(length % 4) && !(length & 0x03)) { /* Copy as dword */
- dword *p = (dword *) data;
- length /= 4;
-
- while (length--) {
- pci_read_config_dword(dev, offset,
- (unsigned int *)
- p++);
- }
- } else { /* copy as byte stream */
- byte *p = (byte *) data;
-
- while (length--) {
- pci_read_config_byte(dev, offset,
- (unsigned char *)
- p++);
- }
- }
- }
-}
-
-/*
- Init map with DMA pages. It is not problem if some allocations fail -
- the channels that will not get one DMA page will use standard PIO
- interface
-*/
-static void *diva_pci_alloc_consistent(struct pci_dev *hwdev,
- size_t size,
- dma_addr_t *dma_handle,
- void **addr_handle)
-{
- void *addr = pci_alloc_consistent(hwdev, size, dma_handle);
-
- *addr_handle = addr;
-
- return (addr);
-}
-
-void diva_init_dma_map(void *hdev,
- struct _diva_dma_map_entry **ppmap, int nentries)
-{
- struct pci_dev *pdev = (struct pci_dev *) hdev;
- struct _diva_dma_map_entry *pmap =
- diva_alloc_dma_map(hdev, nentries);
-
- if (pmap) {
- int i;
- dma_addr_t dma_handle;
- void *cpu_addr;
- void *addr_handle;
-
- for (i = 0; i < nentries; i++) {
- if (!(cpu_addr = diva_pci_alloc_consistent(pdev,
- PAGE_SIZE,
- &dma_handle,
- &addr_handle)))
- {
- break;
- }
- diva_init_dma_map_entry(pmap, i, cpu_addr,
- (dword) dma_handle,
- addr_handle);
- DBG_TRC(("dma map alloc [%d]=(%08lx:%08x:%08lx)",
- i, (unsigned long) cpu_addr,
- (dword) dma_handle,
- (unsigned long) addr_handle))}
- }
-
- *ppmap = pmap;
-}
-
-/*
- Free all contained in the map entries and memory used by the map
- Should be always called after adapter removal from DIDD array
-*/
-void diva_free_dma_map(void *hdev, struct _diva_dma_map_entry *pmap)
-{
- struct pci_dev *pdev = (struct pci_dev *) hdev;
- int i;
- dword phys_addr;
- void *cpu_addr;
- dma_addr_t dma_handle;
- void *addr_handle;
-
- for (i = 0; (pmap != NULL); i++) {
- diva_get_dma_map_entry(pmap, i, &cpu_addr, &phys_addr);
- if (!cpu_addr) {
- break;
- }
- addr_handle = diva_get_entry_handle(pmap, i);
- dma_handle = (dma_addr_t) phys_addr;
- pci_free_consistent(pdev, PAGE_SIZE, addr_handle,
- dma_handle);
- DBG_TRC(("dma map free [%d]=(%08lx:%08x:%08lx)", i,
- (unsigned long) cpu_addr, (dword) dma_handle,
- (unsigned long) addr_handle))
- }
-
- diva_free_dma_mapping(pmap);
-}
-
-
-/*********************************************************
- ** I/O port utilities
- *********************************************************/
-
-int
-diva_os_register_io_port(void *adapter, int on, unsigned long port,
- unsigned long length, const char *name, int id)
-{
- if (on) {
- if (!request_region(port, length, name)) {
- DBG_ERR(("A: I/O: can't register port=%08x", port))
- return (-1);
- }
- } else {
- release_region(port, length);
- }
- return (0);
-}
-
-void __iomem *divasa_remap_pci_bar(diva_os_xdi_adapter_t *a, int id, unsigned long bar, unsigned long area_length)
-{
- void __iomem *ret = ioremap(bar, area_length);
- DBG_TRC(("remap(%08x)->%p", bar, ret));
- return (ret);
-}
-
-void divasa_unmap_pci_bar(void __iomem *bar)
-{
- if (bar) {
- iounmap(bar);
- }
-}
-
-/*********************************************************
- ** I/O port access
- *********************************************************/
-inline byte inpp(void __iomem *addr)
-{
- return (inb((unsigned long) addr));
-}
-
-inline word inppw(void __iomem *addr)
-{
- return (inw((unsigned long) addr));
-}
-
-inline void inppw_buffer(void __iomem *addr, void *P, int length)
-{
- insw((unsigned long) addr, (word *) P, length >> 1);
-}
-
-inline void outppw_buffer(void __iomem *addr, void *P, int length)
-{
- outsw((unsigned long) addr, (word *) P, length >> 1);
-}
-
-inline void outppw(void __iomem *addr, word w)
-{
- outw(w, (unsigned long) addr);
-}
-
-inline void outpp(void __iomem *addr, word p)
-{
- outb(p, (unsigned long) addr);
-}
-
-/* --------------------------------------------------------------------------
- IRQ request / remove
- -------------------------------------------------------------------------- */
-int diva_os_register_irq(void *context, byte irq, const char *name)
-{
- int result = request_irq(irq, diva_os_irq_wrapper,
- IRQF_SHARED, name, context);
- return (result);
-}
-
-void diva_os_remove_irq(void *context, byte irq)
-{
- free_irq(irq, context);
-}
-
-/* --------------------------------------------------------------------------
- DPC framework implementation
- -------------------------------------------------------------------------- */
-static void diva_os_dpc_proc(unsigned long context)
-{
- diva_os_thread_dpc_t *psoft_isr = (diva_os_thread_dpc_t *) context;
- diva_os_soft_isr_t *pisr = psoft_isr->psoft_isr;
-
- (*(pisr->callback)) (pisr, pisr->callback_context);
-}
-
-int diva_os_initialize_soft_isr(diva_os_soft_isr_t *psoft_isr,
- diva_os_soft_isr_callback_t callback,
- void *callback_context)
-{
- diva_os_thread_dpc_t *pdpc;
-
- pdpc = (diva_os_thread_dpc_t *) diva_os_malloc(0, sizeof(*pdpc));
- if (!(psoft_isr->object = pdpc)) {
- return (-1);
- }
- memset(pdpc, 0x00, sizeof(*pdpc));
- psoft_isr->callback = callback;
- psoft_isr->callback_context = callback_context;
- pdpc->psoft_isr = psoft_isr;
- tasklet_init(&pdpc->divas_task, diva_os_dpc_proc, (unsigned long)pdpc);
-
- return (0);
-}
-
-int diva_os_schedule_soft_isr(diva_os_soft_isr_t *psoft_isr)
-{
- if (psoft_isr && psoft_isr->object) {
- diva_os_thread_dpc_t *pdpc =
- (diva_os_thread_dpc_t *) psoft_isr->object;
-
- tasklet_schedule(&pdpc->divas_task);
- }
-
- return (1);
-}
-
-int diva_os_cancel_soft_isr(diva_os_soft_isr_t *psoft_isr)
-{
- return (0);
-}
-
-void diva_os_remove_soft_isr(diva_os_soft_isr_t *psoft_isr)
-{
- if (psoft_isr && psoft_isr->object) {
- diva_os_thread_dpc_t *pdpc =
- (diva_os_thread_dpc_t *) psoft_isr->object;
- void *mem;
-
- tasklet_kill(&pdpc->divas_task);
- mem = psoft_isr->object;
- psoft_isr->object = NULL;
- diva_os_free(0, mem);
- }
-}
-
-/*
- * kernel/user space copy functions
- */
-static int
-xdi_copy_to_user(void *os_handle, void __user *dst, const void *src, int length)
-{
- if (copy_to_user(dst, src, length)) {
- return (-EFAULT);
- }
- return (length);
-}
-
-static int
-xdi_copy_from_user(void *os_handle, void *dst, const void __user *src, int length)
-{
- if (copy_from_user(dst, src, length)) {
- return (-EFAULT);
- }
- return (length);
-}
-
-/*
- * device node operations
- */
-static int divas_open(struct inode *inode, struct file *file)
-{
- return (0);
-}
-
-static int divas_release(struct inode *inode, struct file *file)
-{
- if (file->private_data) {
- diva_xdi_close_adapter(file->private_data, file);
- }
- return (0);
-}
-
-static ssize_t divas_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- diva_xdi_um_cfg_cmd_t msg;
- int ret = -EINVAL;
-
- if (!file->private_data) {
- file->private_data = diva_xdi_open_adapter(file, buf,
- count, &msg,
- xdi_copy_from_user);
- if (!file->private_data)
- return (-ENODEV);
- ret = diva_xdi_write(file->private_data, file,
- buf, count, &msg, xdi_copy_from_user);
- } else {
- ret = diva_xdi_write(file->private_data, file,
- buf, count, NULL, xdi_copy_from_user);
- }
-
- switch (ret) {
- case -1: /* Message should be removed from rx mailbox first */
- ret = -EBUSY;
- break;
- case -2: /* invalid adapter was specified in this call */
- ret = -ENOMEM;
- break;
- case -3:
- ret = -ENXIO;
- break;
- }
- DBG_TRC(("write: ret %d", ret));
- return (ret);
-}
-
-static ssize_t divas_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- diva_xdi_um_cfg_cmd_t msg;
- int ret = -EINVAL;
-
- if (!file->private_data) {
- file->private_data = diva_xdi_open_adapter(file, buf,
- count, &msg,
- xdi_copy_from_user);
- }
- if (!file->private_data) {
- return (-ENODEV);
- }
-
- ret = diva_xdi_read(file->private_data, file,
- buf, count, xdi_copy_to_user);
- switch (ret) {
- case -1: /* RX mailbox is empty */
- ret = -EAGAIN;
- break;
- case -2: /* no memory, mailbox was cleared, last command is failed */
- ret = -ENOMEM;
- break;
- case -3: /* can't copy to user, retry */
- ret = -EFAULT;
- break;
- }
- DBG_TRC(("read: ret %d", ret));
- return (ret);
-}
-
-static __poll_t divas_poll(struct file *file, poll_table *wait)
-{
- if (!file->private_data) {
- return (EPOLLERR);
- }
- return (EPOLLIN | EPOLLRDNORM);
-}
-
-static const struct file_operations divas_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = divas_read,
- .write = divas_write,
- .poll = divas_poll,
- .open = divas_open,
- .release = divas_release
-};
-
-static void divas_unregister_chrdev(void)
-{
- unregister_chrdev(major, DEVNAME);
-}
-
-static int __init divas_register_chrdev(void)
-{
- if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0)
- {
- printk(KERN_ERR "%s: failed to create /dev entry.\n",
- DRIVERLNAME);
- return (0);
- }
-
- return (1);
-}
-
-/* --------------------------------------------------------------------------
- PCI driver section
- -------------------------------------------------------------------------- */
-static int divas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- void *pdiva = NULL;
- u8 pci_latency;
- u8 new_latency = 32;
-
- DBG_TRC(("%s bus: %08x fn: %08x insertion.\n",
- CardProperties[ent->driver_data].Name,
- pdev->bus->number, pdev->devfn))
- printk(KERN_INFO "%s: %s bus: %08x fn: %08x insertion.\n",
- DRIVERLNAME, CardProperties[ent->driver_data].Name,
- pdev->bus->number, pdev->devfn);
-
- if (pci_enable_device(pdev)) {
- DBG_TRC(("%s: %s bus: %08x fn: %08x device init failed.\n",
- DRIVERLNAME,
- CardProperties[ent->driver_data].Name,
- pdev->bus->number,
- pdev->devfn))
- printk(KERN_ERR
- "%s: %s bus: %08x fn: %08x device init failed.\n",
- DRIVERLNAME,
- CardProperties[ent->driver_data].
- Name, pdev->bus->number,
- pdev->devfn);
- return (-EIO);
- }
-
- pci_set_master(pdev);
-
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
- if (!pci_latency) {
- DBG_TRC(("%s: bus: %08x fn: %08x fix latency.\n",
- DRIVERLNAME, pdev->bus->number, pdev->devfn))
- printk(KERN_INFO
- "%s: bus: %08x fn: %08x fix latency.\n",
- DRIVERLNAME, pdev->bus->number, pdev->devfn);
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
- }
-
- if (!(pdiva = diva_driver_add_card(pdev, ent->driver_data))) {
- DBG_TRC(("%s: %s bus: %08x fn: %08x card init failed.\n",
- DRIVERLNAME,
- CardProperties[ent->driver_data].Name,
- pdev->bus->number,
- pdev->devfn))
- printk(KERN_ERR
- "%s: %s bus: %08x fn: %08x card init failed.\n",
- DRIVERLNAME,
- CardProperties[ent->driver_data].
- Name, pdev->bus->number,
- pdev->devfn);
- return (-EIO);
- }
-
- pci_set_drvdata(pdev, pdiva);
-
- return (0);
-}
-
-static void divas_remove_one(struct pci_dev *pdev)
-{
- void *pdiva = pci_get_drvdata(pdev);
-
- DBG_TRC(("bus: %08x fn: %08x removal.\n",
- pdev->bus->number, pdev->devfn))
- printk(KERN_INFO "%s: bus: %08x fn: %08x removal.\n",
- DRIVERLNAME, pdev->bus->number, pdev->devfn);
-
- if (pdiva) {
- diva_driver_remove_card(pdiva);
- }
-
-}
-
-/* --------------------------------------------------------------------------
- Driver Load / Startup
- -------------------------------------------------------------------------- */
-static int __init divas_init(void)
-{
- char tmprev[50];
- int ret = 0;
-
- printk(KERN_INFO "%s\n", DRIVERNAME);
- printk(KERN_INFO "%s: Rel:%s Rev:", DRIVERLNAME, DRIVERRELEASE_DIVAS);
- strcpy(tmprev, main_revision);
- printk("%s Build: %s(%s)\n", getrev(tmprev),
- diva_xdi_common_code_build, DIVA_BUILD);
- printk(KERN_INFO "%s: support for: ", DRIVERLNAME);
-#ifdef CONFIG_ISDN_DIVAS_BRIPCI
- printk("BRI/PCI ");
-#endif
-#ifdef CONFIG_ISDN_DIVAS_PRIPCI
- printk("PRI/PCI ");
-#endif
- printk("adapters\n");
-
- if (!divasfunc_init(dbgmask)) {
- printk(KERN_ERR "%s: failed to connect to DIDD.\n",
- DRIVERLNAME);
- ret = -EIO;
- goto out;
- }
-
- if (!divas_register_chrdev()) {
-#ifdef MODULE
- divasfunc_exit();
-#endif
- ret = -EIO;
- goto out;
- }
-
- if (!create_divas_proc()) {
-#ifdef MODULE
- divas_unregister_chrdev();
- divasfunc_exit();
-#endif
- printk(KERN_ERR "%s: failed to create proc entry.\n",
- DRIVERLNAME);
- ret = -EIO;
- goto out;
- }
-
- if ((ret = pci_register_driver(&diva_pci_driver))) {
-#ifdef MODULE
- remove_divas_proc();
- divas_unregister_chrdev();
- divasfunc_exit();
-#endif
- printk(KERN_ERR "%s: failed to init pci driver.\n",
- DRIVERLNAME);
- goto out;
- }
- printk(KERN_INFO "%s: started with major %d\n", DRIVERLNAME, major);
-
-out:
- return (ret);
-}
-
-/* --------------------------------------------------------------------------
- Driver Unload
- -------------------------------------------------------------------------- */
-static void __exit divas_exit(void)
-{
- pci_unregister_driver(&diva_pci_driver);
- remove_divas_proc();
- divas_unregister_chrdev();
- divasfunc_exit();
-
- printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
-}
-
-module_init(divas_init);
-module_exit(divas_exit);
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
deleted file mode 100644
index f52f4622b10b..000000000000
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ /dev/null
@@ -1,412 +0,0 @@
-/* $Id: divasproc.c,v 1.19.4.3 2005/01/31 12:22:20 armin Exp $
- *
- * Low level driver for Eicon DIVA Server ISDN cards.
- * /proc functions
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/list.h>
-#include <linux/uaccess.h>
-
-#include "platform.h"
-#include "debuglib.h"
-#undef ID_MASK
-#undef N_DATA
-#include "pc.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "di.h"
-#include "io.h"
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "diva.h"
-#include "diva_pci.h"
-
-
-extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-extern void divas_get_version(char *);
-extern void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf);
-
-/*********************************************************
- ** Functions for /proc interface / File operations
- *********************************************************/
-
-static char *divas_proc_name = "divas";
-static char *adapter_dir_name = "adapter";
-static char *info_proc_name = "info";
-static char *grp_opt_proc_name = "group_optimization";
-static char *d_l1_down_proc_name = "dynamic_l1_down";
-
-/*
-** "divas" entry
-*/
-
-extern struct proc_dir_entry *proc_net_eicon;
-static struct proc_dir_entry *divas_proc_entry = NULL;
-
-static ssize_t
-divas_read(struct file *file, char __user *buf, size_t count, loff_t *off)
-{
- int len = 0;
- int cadapter;
- char tmpbuf[80];
- char tmpser[16];
-
- if (*off)
- return 0;
-
- divas_get_version(tmpbuf);
- if (copy_to_user(buf + len, &tmpbuf, strlen(tmpbuf)))
- return -EFAULT;
- len += strlen(tmpbuf);
-
- for (cadapter = 0; cadapter < MAX_ADAPTER; cadapter++) {
- if (IoAdapters[cadapter]) {
- diva_get_vserial_number(IoAdapters[cadapter],
- tmpser);
- sprintf(tmpbuf,
- "%2d: %-30s Serial:%-10s IRQ:%2d\n",
- cadapter + 1,
- IoAdapters[cadapter]->Properties.Name,
- tmpser,
- IoAdapters[cadapter]->irq_info.irq_nr);
- if ((strlen(tmpbuf) + len) > count)
- break;
- if (copy_to_user
- (buf + len, &tmpbuf,
- strlen(tmpbuf))) return -EFAULT;
- len += strlen(tmpbuf);
- }
- }
-
- *off += len;
- return (len);
-}
-
-static ssize_t
-divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
-{
- return (-ENODEV);
-}
-
-static __poll_t divas_poll(struct file *file, poll_table *wait)
-{
- return (EPOLLERR);
-}
-
-static int divas_open(struct inode *inode, struct file *file)
-{
- return nonseekable_open(inode, file);
-}
-
-static int divas_close(struct inode *inode, struct file *file)
-{
- return (0);
-}
-
-static const struct file_operations divas_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .read = divas_read,
- .write = divas_write,
- .poll = divas_poll,
- .open = divas_open,
- .release = divas_close
-};
-
-int create_divas_proc(void)
-{
- divas_proc_entry = proc_create(divas_proc_name, S_IFREG | S_IRUGO,
- proc_net_eicon, &divas_fops);
- if (!divas_proc_entry)
- return (0);
-
- return (1);
-}
-
-void remove_divas_proc(void)
-{
- if (divas_proc_entry) {
- remove_proc_entry(divas_proc_name, proc_net_eicon);
- divas_proc_entry = NULL;
- }
-}
-
-static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
- PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
- if ((count == 1) || (count == 2)) {
- char c;
- if (get_user(c, buffer))
- return -EFAULT;
- switch (c) {
- case '0':
- IoAdapter->capi_cfg.cfg_1 &=
- ~DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON;
- break;
- case '1':
- IoAdapter->capi_cfg.cfg_1 |=
- DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON;
- break;
- default:
- return (-EINVAL);
- }
- return (count);
- }
- return (-EINVAL);
-}
-
-static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
- PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
- if ((count == 1) || (count == 2)) {
- char c;
- if (get_user(c, buffer))
- return -EFAULT;
- switch (c) {
- case '0':
- IoAdapter->capi_cfg.cfg_1 &=
- ~DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON;
- break;
- case '1':
- IoAdapter->capi_cfg.cfg_1 |=
- DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON;
- break;
- default:
- return (-EINVAL);
- }
- return (count);
- }
- return (-EINVAL);
-}
-
-static int d_l1_down_proc_show(struct seq_file *m, void *v)
-{
- diva_os_xdi_adapter_t *a = m->private;
- PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
- seq_printf(m, "%s\n",
- (IoAdapter->capi_cfg.
- cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" :
- "0");
- return 0;
-}
-
-static int d_l1_down_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, d_l1_down_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations d_l1_down_proc_fops = {
- .owner = THIS_MODULE,
- .open = d_l1_down_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = d_l1_down_proc_write,
-};
-
-static int grp_opt_proc_show(struct seq_file *m, void *v)
-{
- diva_os_xdi_adapter_t *a = m->private;
- PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
- seq_printf(m, "%s\n",
- (IoAdapter->capi_cfg.
- cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON)
- ? "1" : "0");
- return 0;
-}
-
-static int grp_opt_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, grp_opt_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations grp_opt_proc_fops = {
- .owner = THIS_MODULE,
- .open = grp_opt_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = grp_opt_proc_write,
-};
-
-static ssize_t info_proc_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *pos)
-{
- diva_os_xdi_adapter_t *a = PDE_DATA(file_inode(file));
- PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- char c[4];
-
- if (count <= 4)
- return -EINVAL;
-
- if (copy_from_user(c, buffer, 4))
- return -EFAULT;
-
- /* this is for test purposes only */
- if (!memcmp(c, "trap", 4)) {
- (*(IoAdapter->os_trap_nfy_Fnc)) (IoAdapter, IoAdapter->ANum);
- return (count);
- }
- return (-EINVAL);
-}
-
-static int info_proc_show(struct seq_file *m, void *v)
-{
- int i = 0;
- char *p;
- char tmpser[16];
- diva_os_xdi_adapter_t *a = m->private;
- PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
-
- seq_printf(m, "Name : %s\n", IoAdapter->Properties.Name);
- seq_printf(m, "DSP state : %08x\n", a->dsp_mask);
- seq_printf(m, "Channels : %02d\n", IoAdapter->Properties.Channels);
- seq_printf(m, "E. max/used : %03d/%03d\n",
- IoAdapter->e_max, IoAdapter->e_count);
- diva_get_vserial_number(IoAdapter, tmpser);
- seq_printf(m, "Serial : %s\n", tmpser);
- seq_printf(m, "IRQ : %d\n", IoAdapter->irq_info.irq_nr);
- seq_printf(m, "CardIndex : %d\n", a->CardIndex);
- seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal);
- seq_printf(m, "Controller : %d\n", a->controller);
- seq_printf(m, "Bus-Type : %s\n",
- (a->Bus ==
- DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI");
- seq_printf(m, "Port-Name : %s\n", a->port_name);
- if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) {
- seq_printf(m, "PCI-bus : %d\n", a->resources.pci.bus);
- seq_printf(m, "PCI-func : %d\n", a->resources.pci.func);
- for (i = 0; i < 8; i++) {
- if (a->resources.pci.bar[i]) {
- seq_printf(m,
- "Mem / I/O %d : 0x%x / mapped : 0x%lx",
- i, a->resources.pci.bar[i],
- (unsigned long) a->resources.
- pci.addr[i]);
- if (a->resources.pci.length[i]) {
- seq_printf(m,
- " / length : %d",
- a->resources.pci.
- length[i]);
- }
- seq_putc(m, '\n');
- }
- }
- }
- if ((!a->xdi_adapter.port) &&
- ((!a->xdi_adapter.ram) ||
- (!a->xdi_adapter.reset)
- || (!a->xdi_adapter.cfg))) {
- if (!IoAdapter->irq_info.irq_nr) {
- p = "slave";
- } else {
- p = "out of service";
- }
- } else if (a->xdi_adapter.trapped) {
- p = "trapped";
- } else if (a->xdi_adapter.Initialized) {
- p = "active";
- } else {
- p = "ready";
- }
- seq_printf(m, "State : %s\n", p);
-
- return 0;
-}
-
-static int info_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, info_proc_show, PDE_DATA(inode));
-}
-
-static const struct file_operations info_proc_fops = {
- .owner = THIS_MODULE,
- .open = info_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = info_proc_write,
-};
-
-/*
-** adapter proc init/de-init
-*/
-
-/* --------------------------------------------------------------------------
- Create adapter directory and files in proc file system
- -------------------------------------------------------------------------- */
-int create_adapter_proc(diva_os_xdi_adapter_t *a)
-{
- struct proc_dir_entry *de, *pe;
- char tmp[16];
-
- sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
- if (!(de = proc_mkdir(tmp, proc_net_eicon)))
- return (0);
- a->proc_adapter_dir = (void *) de;
-
- pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de,
- &info_proc_fops, a);
- if (!pe)
- return (0);
- a->proc_info = (void *) pe;
-
- pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de,
- &grp_opt_proc_fops, a);
- if (pe)
- a->proc_grp_opt = (void *) pe;
- pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de,
- &d_l1_down_proc_fops, a);
- if (pe)
- a->proc_d_l1_down = (void *) pe;
-
- DBG_TRC(("proc entry %s created", tmp));
-
- return (1);
-}
-
-/* --------------------------------------------------------------------------
- Remove adapter directory and files in proc file system
- -------------------------------------------------------------------------- */
-void remove_adapter_proc(diva_os_xdi_adapter_t *a)
-{
- char tmp[16];
-
- if (a->proc_adapter_dir) {
- if (a->proc_d_l1_down) {
- remove_proc_entry(d_l1_down_proc_name,
- (struct proc_dir_entry *) a->proc_adapter_dir);
- }
- if (a->proc_grp_opt) {
- remove_proc_entry(grp_opt_proc_name,
- (struct proc_dir_entry *) a->proc_adapter_dir);
- }
- if (a->proc_info) {
- remove_proc_entry(info_proc_name,
- (struct proc_dir_entry *) a->proc_adapter_dir);
- }
- sprintf(tmp, "%s%d", adapter_dir_name, a->controller);
- remove_proc_entry(tmp, proc_net_eicon);
- DBG_TRC(("proc entry %s%d removed", adapter_dir_name,
- a->controller));
- }
-}
diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
deleted file mode 100644
index dd6b53a2c2c8..000000000000
--- a/drivers/isdn/hardware/eicon/divasync.h
+++ /dev/null
@@ -1,489 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_SYNC__H
-#define __DIVA_SYNC__H
-#define IDI_SYNC_REQ_REMOVE 0x00
-#define IDI_SYNC_REQ_GET_NAME 0x01
-#define IDI_SYNC_REQ_GET_SERIAL 0x02
-#define IDI_SYNC_REQ_SET_POSTCALL 0x03
-#define IDI_SYNC_REQ_GET_XLOG 0x04
-#define IDI_SYNC_REQ_GET_FEATURES 0x05
-#define IDI_SYNC_REQ_USB_REGISTER 0x06
-#define IDI_SYNC_REQ_USB_RELEASE 0x07
-#define IDI_SYNC_REQ_USB_ADD_DEVICE 0x08
-#define IDI_SYNC_REQ_USB_START_DEVICE 0x09
-#define IDI_SYNC_REQ_USB_STOP_DEVICE 0x0A
-#define IDI_SYNC_REQ_USB_REMOVE_DEVICE 0x0B
-#define IDI_SYNC_REQ_GET_CARDTYPE 0x0C
-#define IDI_SYNC_REQ_GET_DBG_XLOG 0x0D
-#define DIVA_USB
-#define DIVA_USB_REQ 0xAC
-#define DIVA_USB_TEST 0xAB
-#define DIVA_USB_ADD_ADAPTER 0xAC
-#define DIVA_USB_REMOVE_ADAPTER 0xAD
-#define IDI_SYNC_REQ_SERIAL_HOOK 0x80
-#define IDI_SYNC_REQ_XCHANGE_STATUS 0x81
-#define IDI_SYNC_REQ_USB_HOOK 0x82
-#define IDI_SYNC_REQ_PORTDRV_HOOK 0x83
-#define IDI_SYNC_REQ_SLI 0x84 /* SLI request from 3signal modem drivers */
-#define IDI_SYNC_REQ_RECONFIGURE 0x85
-#define IDI_SYNC_REQ_RESET 0x86
-#define IDI_SYNC_REQ_GET_85X_DEVICE_DATA 0x87
-#define IDI_SYNC_REQ_LOCK_85X 0x88
-#define IDI_SYNC_REQ_DIVA_85X_USB_DATA_EXCHANGE 0x99
-#define IDI_SYNC_REQ_DIPORT_EXCHANGE_REQ 0x98
-#define IDI_SYNC_REQ_GET_85X_EXT_PORT_TYPE 0xA0
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES 0x92
-/*
- To receive XDI features:
- 1. set 'buffer_length_in_bytes' to length of you buffer
- 2. set 'features' to pointer to your buffer
- 3. issue synchronous request to XDI
- 4. Check that feature 'DIVA_XDI_EXTENDED_FEATURES_VALID' is present
- after call. This feature does indicate that your request
- was processed and XDI does support this synchronous request
- 5. if on return bit 31 (0x80000000) in 'buffer_length_in_bytes' is
- set then provided buffer was too small, and bits 30-0 does
- contain necessary length of buffer.
- in this case only features that do find place in the buffer
- are indicated to caller
-*/
-typedef struct _diva_xdi_get_extended_xdi_features {
- dword buffer_length_in_bytes;
- byte *features;
-} diva_xdi_get_extended_xdi_features_t;
-/*
- features[0]
-*/
-#define DIVA_XDI_EXTENDED_FEATURES_VALID 0x01
-#define DIVA_XDI_EXTENDED_FEATURE_CMA 0x02
-#define DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR 0x04
-#define DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS 0x08
-#define DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC 0x10
-#define DIVA_XDI_EXTENDED_FEATURE_RX_DMA 0x20
-#define DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA 0x40
-#define DIVA_XDI_EXTENDED_FEATURE_WIDE_ID 0x80
-#define DIVA_XDI_EXTENDED_FEATURES_MAX_SZ 1
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR 0x93
-typedef struct _diva_xdi_get_adapter_sdram_bar {
- dword bar;
-} diva_xdi_get_adapter_sdram_bar_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS 0x94
-/*
- CAPI Parameters will be written in the caller's buffer
-*/
-typedef struct _diva_xdi_get_capi_parameters {
- dword structure_length;
- byte flag_dynamic_l1_down;
- byte group_optimization_enabled;
-} diva_xdi_get_capi_parameters_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER 0x95
-/*
- Get logical adapter number, as assigned by XDI
- 'controller' is starting with zero 'sub' controller number
- in case of one adapter that supports multiple interfaces
- 'controller' is zero for Master adapter (and adapter that supports
- only one interface)
-*/
-typedef struct _diva_xdi_get_logical_adapter_number {
- dword logical_adapter_number;
- dword controller;
- dword total_controllers;
-} diva_xdi_get_logical_adapter_number_s_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_UP1DM_OPERATION 0x96
-/******************************************************************************/
-#define IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION 0x97
-#define IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC 0x01
-#define IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE 0x02
-typedef struct _diva_xdi_dma_descriptor_operation {
- int operation;
- int descriptor_number;
- void *descriptor_address;
- dword descriptor_magic;
-} diva_xdi_dma_descriptor_operation_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY 0x01
-#define IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY 0x02
-#define IDI_SYNC_REQ_DIDD_ADD_ADAPTER 0x03
-#define IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER 0x04
-#define IDI_SYNC_REQ_DIDD_READ_ADAPTER_ARRAY 0x05
-#define IDI_SYNC_REQ_DIDD_GET_CFG_LIB_IFC 0x10
-typedef struct _diva_didd_adapter_notify {
- dword handle; /* Notification handle */
- void *callback;
- void *context;
-} diva_didd_adapter_notify_t;
-typedef struct _diva_didd_add_adapter {
- void *descriptor;
-} diva_didd_add_adapter_t;
-typedef struct _diva_didd_remove_adapter {
- IDI_CALL p_request;
-} diva_didd_remove_adapter_t;
-typedef struct _diva_didd_read_adapter_array {
- void *buffer;
- dword length;
-} diva_didd_read_adapter_array_t;
-typedef struct _diva_didd_get_cfg_lib_ifc {
- void *ifc;
-} diva_didd_get_cfg_lib_ifc_t;
-/******************************************************************************/
-#define IDI_SYNC_REQ_XDI_GET_STREAM 0x91
-#define DIVA_XDI_SYNCHRONOUS_SERVICE 0x01
-#define DIVA_XDI_DMA_SERVICE 0x02
-#define DIVA_XDI_AUTO_SERVICE 0x03
-#define DIVA_ISTREAM_COMPLETE_NOTIFY 0
-#define DIVA_ISTREAM_COMPLETE_READ 1
-#define DIVA_ISTREAM_COMPLETE_WRITE 2
-typedef struct _diva_xdi_stream_interface {
- unsigned char Id; /* filled by XDI client */
- unsigned char provided_service; /* filled by XDI */
- unsigned char requested_service; /* filled by XDI Client */
- void *xdi_context; /* filled by XDI */
- void *client_context; /* filled by XDI client */
- int (*write)(void *context,
- int Id,
- void *data,
- int length,
- int final,
- byte usr1,
- byte usr2);
- int (*read)(void *context,
- int Id,
- void *data,
- int max_length,
- int *final,
- byte *usr1,
- byte *usr2);
- int (*complete)(void *client_context,
- int Id,
- int what,
- void *data,
- int length,
- int *final);
-} diva_xdi_stream_interface_t;
-/******************************************************************************/
-/*
- * IDI_SYNC_REQ_SERIAL_HOOK - special interface for the DIVA Mobile card
- */
-typedef struct
-{ unsigned char LineState; /* Modem line state (STATUS_R) */
-#define SERIAL_GSM_CELL 0x01 /* GSM or CELL cable attached */
- unsigned char CardState; /* PCMCIA card state (0 = down) */
- unsigned char IsdnState; /* ISDN layer 1 state (0 = down)*/
- unsigned char HookState; /* current logical hook state */
-#define SERIAL_ON_HOOK 0x02 /* set in DIVA CTRL_R register */
-} SERIAL_STATE;
-typedef int (*SERIAL_INT_CB)(void *Context);
-typedef int (*SERIAL_DPC_CB)(void *Context);
-typedef unsigned char (*SERIAL_I_SYNC)(void *Context);
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
- unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (is the request) */
- unsigned char Function; /* private function code */
-#define SERIAL_HOOK_ATTACH 0x81
-#define SERIAL_HOOK_STATUS 0x82
-#define SERIAL_HOOK_I_SYNC 0x83
-#define SERIAL_HOOK_NOECHO 0x84
-#define SERIAL_HOOK_RING 0x85
-#define SERIAL_HOOK_DETACH 0x8f
- unsigned char Flags; /* function refinements */
- /* parameters passed by the ATTACH request */
- SERIAL_INT_CB InterruptHandler; /* called on each interrupt */
- SERIAL_DPC_CB DeferredHandler; /* called on hook state changes */
- void *HandlerContext; /* context for both handlers */
- /* return values for both the ATTACH and the STATUS request */
- unsigned long IoBase; /* IO port assigned to UART */
- SERIAL_STATE State;
- /* parameters and return values for the I_SYNC function */
- SERIAL_I_SYNC SyncFunction; /* to be called synchronized */
- void *SyncContext; /* context for this function */
- unsigned char SyncResult; /* return value of function */
-} SERIAL_HOOK;
-/*
- * IDI_SYNC_REQ_XCHANGE_STATUS - exchange the status between IDI and WMP
- * IDI_SYNC_REQ_RECONFIGURE - reconfiguration of IDI from WMP
- */
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
- unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (is the request) */
-#define DRIVER_STATUS_BOOT 0xA1
-#define DRIVER_STATUS_INIT_DEV 0xA2
-#define DRIVER_STATUS_RUNNING 0xA3
-#define DRIVER_STATUS_SHUTDOWN 0xAF
-#define DRIVER_STATUS_TRAPPED 0xAE
- unsigned char wmpStatus; /* exported by WMP */
- unsigned char idiStatus; /* exported by IDI */
- unsigned long wizProto; /* from WMP registry to IDI */
- /* the cardtype value is defined by cardtype.h */
- unsigned long cardType; /* from IDI registry to WMP */
- unsigned long nt2; /* from IDI registry to WMP */
- unsigned long permanent; /* from IDI registry to WMP */
- unsigned long stableL2; /* from IDI registry to WMP */
- unsigned long tei; /* from IDI registry to WMP */
-#define CRC4_MASK 0x00000003
-#define L1_TRISTATE_MASK 0x00000004
-#define WATCHDOG_MASK 0x00000008
-#define NO_ORDER_CHECK_MASK 0x00000010
-#define LOW_CHANNEL_MASK 0x00000020
-#define NO_HSCX30_MASK 0x00000040
-#define SET_BOARD 0x00001000
-#define SET_CRC4 0x00030000
-#define SET_L1_TRISTATE 0x00040000
-#define SET_WATCHDOG 0x00080000
-#define SET_NO_ORDER_CHECK 0x00100000
-#define SET_LOW_CHANNEL 0x00200000
-#define SET_NO_HSCX30 0x00400000
-#define SET_MODE 0x00800000
-#define SET_PROTO 0x02000000
-#define SET_CARDTYPE 0x04000000
-#define SET_NT2 0x08000000
-#define SET_PERMANENT 0x10000000
-#define SET_STABLEL2 0x20000000
-#define SET_TEI 0x40000000
-#define SET_NUMBERLEN 0x80000000
- unsigned long Flag; /* |31-Type-16|15-Mask-0| */
- unsigned long NumberLen; /* reconfiguration: union is empty */
- union {
- struct { /* possible reconfiguration, but ... ; SET_BOARD */
- unsigned long SerialNumber;
- char *pCardname; /* di_defs.h: BOARD_NAME_LENGTH */
- } board;
- struct { /* reset: need resources */
- void *pRawResources;
- void *pXlatResources;
- } res;
- struct { /* reconfiguration: wizProto == PROTTYPE_RBSCAS */
-#define GLARE_RESOLVE_MASK 0x00000001
-#define DID_MASK 0x00000002
-#define BEARER_CAP_MASK 0x0000000c
-#define SET_GLARE_RESOLVE 0x00010000
-#define SET_DID 0x00020000
-#define SET_BEARER_CAP 0x000c0000
- unsigned long Flag; /* |31-Type-16|15-VALUE-0| */
- unsigned short DigitTimeout;
- unsigned short AnswerDelay;
- } rbs;
- struct { /* reconfiguration: wizProto == PROTTYPE_QSIG */
-#define CALL_REF_LENGTH1_MASK 0x00000001
-#define BRI_CHANNEL_ID_MASK 0x00000002
-#define SET_CALL_REF_LENGTH 0x00010000
-#define SET_BRI_CHANNEL_ID 0x00020000
- unsigned long Flag; /* |31-Type-16|15-VALUE-0| */
- } qsig;
- struct { /* reconfiguration: NumberLen != 0 */
-#define SET_SPID1 0x00010000
-#define SET_NUMBER1 0x00020000
-#define SET_SUBADDRESS1 0x00040000
-#define SET_SPID2 0x00100000
-#define SET_NUMBER2 0x00200000
-#define SET_SUBADDRESS2 0x00400000
-#define MASK_SET 0xffff0000
- unsigned long Flag; /* |31-Type-16|15-Channel-0| */
- unsigned char *pBuffer; /* number value */
- } isdnNo;
- }
- parms
- ;
-} isdnProps;
-/*
- * IDI_SYNC_REQ_PORTDRV_HOOK - signal plug/unplug (Award Cardware only)
- */
-typedef void (*PORTDRV_HOOK_CB)(void *Context, int Plug);
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
- unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (is the request) */
- unsigned char Function; /* private function code */
- unsigned char Flags; /* function refinements */
- PORTDRV_HOOK_CB Callback; /* to be called on plug/unplug */
- void *Context; /* context for callback */
- unsigned long Info; /* more info if needed */
-} PORTDRV_HOOK;
-/* Codes for the 'Rc' element in structure below. */
-#define SLI_INSTALL (0xA1)
-#define SLI_UNINSTALL (0xA2)
-typedef int (*SLIENTRYPOINT)(void *p3SignalAPI, void *pContext);
-typedef struct
-{ /* 'Req' and 'Rc' must be at the same place as in the ENTITY struct */
- unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (is the request) */
- unsigned char Function; /* private function code */
- unsigned char Flags; /* function refinements */
- SLIENTRYPOINT Callback; /* to be called on plug/unplug */
- void *Context; /* context for callback */
- unsigned long Info; /* more info if needed */
-} SLIENTRYPOINT_REQ;
-/******************************************************************************/
-/*
- * Definitions for DIVA USB
- */
-typedef int (*USB_SEND_REQ)(unsigned char PipeIndex, unsigned char Type, void *Data, int sizeData);
-typedef int (*USB_START_DEV)(void *Adapter, void *Ipac);
-/* called from WDM */
-typedef void (*USB_RECV_NOTIFY)(void *Ipac, void *msg);
-typedef void (*USB_XMIT_NOTIFY)(void *Ipac, unsigned char PipeIndex);
-/******************************************************************************/
-/*
- * Parameter description for synchronous requests.
- *
- * Sorry, must repeat some parts of di_defs.h here because
- * they are not defined for all operating environments
- */
-typedef union
-{ ENTITY Entity;
- struct
- { /* 'Req' and 'Rc' are at the same place as in the ENTITY struct */
- unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (is the request) */
- } Request;
- struct
- { unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (0x01) */
- unsigned char name[BOARD_NAME_LENGTH];
- } GetName;
- struct
- { unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (0x02) */
- unsigned long serial; /* serial number */
- } GetSerial;
- struct
- { unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (0x02) */
- unsigned long lineIdx;/* line, 0 if card has only one */
- } GetLineIdx;
- struct
- { unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (0x02) */
- unsigned long cardtype;/* card type */
- } GetCardType;
- struct
- { unsigned short command;/* command = 0x0300 */
- unsigned short dummy; /* not used */
- IDI_CALL callback;/* routine to call back */
- ENTITY *contxt; /* ptr to entity to use */
- } PostCall;
- struct
- { unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (0x04) */
- unsigned char pcm[1]; /* buffer (a pc_maint struct) */
- } GetXlog;
- struct
- { unsigned char Req; /* request (must be always 0) */
- unsigned char Rc; /* return code (0x05) */
- unsigned short features;/* feature defines see below */
- } GetFeatures;
- SERIAL_HOOK SerialHook;
-/* Added for DIVA USB */
- struct
- { unsigned char Req;
- unsigned char Rc;
- USB_SEND_REQ UsbSendRequest; /* function in Diva Usb WDM driver in usb_os.c, */
- /* called from usb_drv.c to send a message to our device */
- /* eg UsbSendRequest (USB_PIPE_SIGNAL, USB_IPAC_START, 0, 0); */
- USB_RECV_NOTIFY usb_recv; /* called from usb_os.c to pass a received message and ptr to IPAC */
- /* on to usb_drv.c by a call to usb_recv(). */
- USB_XMIT_NOTIFY usb_xmit; /* called from usb_os.c in DivaUSB.sys WDM to indicate a completed transmit */
- /* to usb_drv.c by a call to usb_xmit(). */
- USB_START_DEV UsbStartDevice; /* Start the USB Device, in usb_os.c */
- IDI_CALL callback; /* routine to call back */
- ENTITY *contxt; /* ptr to entity to use */
- void **ipac_ptr; /* pointer to struct IPAC in VxD */
- } Usb_Msg_old;
-/* message used by WDM and VXD to pass pointers of function and IPAC* */
- struct
- { unsigned char Req;
- unsigned char Rc;
- USB_SEND_REQ pUsbSendRequest;/* function in Diva Usb WDM driver in usb_os.c, */
- /* called from usb_drv.c to send a message to our device */
- /* eg UsbSendRequest (USB_PIPE_SIGNAL, USB_IPAC_START, 0, 0); */
- USB_RECV_NOTIFY p_usb_recv; /* called from usb_os.c to pass a received message and ptr to IPAC */
- /* on to usb_drv.c by a call to usb_recv(). */
- USB_XMIT_NOTIFY p_usb_xmit; /* called from usb_os.c in DivaUSB.sys WDM to indicate a completed transmit */
- /* to usb_drv.c by a call to usb_xmit().*/
- void *ipac_ptr; /* &Diva.ipac pointer to struct IPAC in VxD */
- } Usb_Msg;
- PORTDRV_HOOK PortdrvHook;
- SLIENTRYPOINT_REQ sliEntryPointReq;
- struct {
- unsigned char Req;
- unsigned char Rc;
- diva_xdi_stream_interface_t info;
- } xdi_stream_info;
- struct {
- unsigned char Req;
- unsigned char Rc;
- diva_xdi_get_extended_xdi_features_t info;
- } xdi_extended_features;
- struct {
- unsigned char Req;
- unsigned char Rc;
- diva_xdi_get_adapter_sdram_bar_t info;
- } xdi_sdram_bar;
- struct {
- unsigned char Req;
- unsigned char Rc;
- diva_xdi_get_capi_parameters_t info;
- } xdi_capi_prms;
- struct {
- ENTITY e;
- diva_didd_adapter_notify_t info;
- } didd_notify;
- struct {
- ENTITY e;
- diva_didd_add_adapter_t info;
- } didd_add_adapter;
- struct {
- ENTITY e;
- diva_didd_remove_adapter_t info;
- } didd_remove_adapter;
- struct {
- ENTITY e;
- diva_didd_read_adapter_array_t info;
- } didd_read_adapter_array;
- struct {
- ENTITY e;
- diva_didd_get_cfg_lib_ifc_t info;
- } didd_get_cfg_lib_ifc;
- struct {
- unsigned char Req;
- unsigned char Rc;
- diva_xdi_get_logical_adapter_number_s_t info;
- } xdi_logical_adapter_number;
- struct {
- unsigned char Req;
- unsigned char Rc;
- diva_xdi_dma_descriptor_operation_t info;
- } xdi_dma_descriptor_operation;
-} IDI_SYNC_REQ;
-/******************************************************************************/
-#endif /* __DIVA_SYNC__H */
diff --git a/drivers/isdn/hardware/eicon/dqueue.c b/drivers/isdn/hardware/eicon/dqueue.c
deleted file mode 100644
index 7958a2536a10..000000000000
--- a/drivers/isdn/hardware/eicon/dqueue.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/* $Id: dqueue.c,v 1.5 2003/04/12 21:40:49 schindler Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * User Mode IDI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "dqueue.h"
-
-int
-diva_data_q_init(diva_um_idi_data_queue_t *q,
- int max_length, int max_segments)
-{
- int i;
-
- q->max_length = max_length;
- q->segments = max_segments;
-
- for (i = 0; i < q->segments; i++) {
- q->data[i] = NULL;
- q->length[i] = 0;
- }
- q->read = q->write = q->count = q->segment_pending = 0;
-
- for (i = 0; i < q->segments; i++) {
- if (!(q->data[i] = diva_os_malloc(0, q->max_length))) {
- diva_data_q_finit(q);
- return (-1);
- }
- }
-
- return (0);
-}
-
-int diva_data_q_finit(diva_um_idi_data_queue_t *q)
-{
- int i;
-
- for (i = 0; i < q->segments; i++) {
- if (q->data[i]) {
- diva_os_free(0, q->data[i]);
- }
- q->data[i] = NULL;
- q->length[i] = 0;
- }
- q->read = q->write = q->count = q->segment_pending = 0;
-
- return (0);
-}
-
-int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q)
-{
- return (q->max_length);
-}
-
-void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q)
-{
- if ((!q->segment_pending) && (q->count < q->segments)) {
- q->segment_pending = 1;
- return (q->data[q->write]);
- }
-
- return NULL;
-}
-
-void
-diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q, int length)
-{
- if (q->segment_pending) {
- q->length[q->write] = length;
- q->count++;
- q->write++;
- if (q->write >= q->segments) {
- q->write = 0;
- }
- q->segment_pending = 0;
- }
-}
-
-const void *diva_data_q_get_segment4read(const diva_um_idi_data_queue_t *
- q)
-{
- if (q->count) {
- return (q->data[q->read]);
- }
- return NULL;
-}
-
-int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q)
-{
- return (q->length[q->read]);
-}
-
-void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q)
-{
- if (q->count) {
- q->length[q->read] = 0;
- q->count--;
- q->read++;
- if (q->read >= q->segments) {
- q->read = 0;
- }
- }
-}
diff --git a/drivers/isdn/hardware/eicon/dqueue.h b/drivers/isdn/hardware/eicon/dqueue.h
deleted file mode 100644
index 2da9799686ab..000000000000
--- a/drivers/isdn/hardware/eicon/dqueue.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: dqueue.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
-
-#ifndef _DIVA_USER_MODE_IDI_DATA_QUEUE_H__
-#define _DIVA_USER_MODE_IDI_DATA_QUEUE_H__
-
-#define DIVA_UM_IDI_MAX_MSGS 64
-
-typedef struct _diva_um_idi_data_queue {
- int segments;
- int max_length;
- int read;
- int write;
- int count;
- int segment_pending;
- void *data[DIVA_UM_IDI_MAX_MSGS];
- int length[DIVA_UM_IDI_MAX_MSGS];
-} diva_um_idi_data_queue_t;
-
-int diva_data_q_init(diva_um_idi_data_queue_t *q,
- int max_length, int max_segments);
-int diva_data_q_finit(diva_um_idi_data_queue_t *q);
-int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q);
-void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q);
-void diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q,
- int length);
-const void *diva_data_q_get_segment4read(const diva_um_idi_data_queue_t *
- q);
-int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q);
-void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/dsp_defs.h b/drivers/isdn/hardware/eicon/dsp_defs.h
deleted file mode 100644
index 94828c87e2a4..000000000000
--- a/drivers/isdn/hardware/eicon/dsp_defs.h
+++ /dev/null
@@ -1,301 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef DSP_DEFS_H_
-#define DSP_DEFS_H_
-#include "dspdids.h"
-/*---------------------------------------------------------------------------*/
-#define dsp_download_reserve_space(fp, length)
-/*****************************************************************************/
-/*
- * OS file access abstraction layer
- *
- * I/O functions returns -1 on error, 0 on EOF
- */
-struct _OsFileHandle_;
-typedef long (*OsFileIo)(struct _OsFileHandle_ *handle,
- void *buffer,
- long size);
-typedef long (*OsFileSeek)(struct _OsFileHandle_ *handle,
- long position,
- int mode);
-typedef long (*OsCardLoad)(struct _OsFileHandle_ *handle,
- long length,
- void **addr);
-typedef struct _OsFileHandle_
-{ void *sysFileDesc;
- unsigned long sysFileSize;
- OsFileIo sysFileRead;
- OsFileSeek sysFileSeek;
- void *sysLoadDesc;
- OsCardLoad sysCardLoad;
-} OsFileHandle;
-extern OsFileHandle *OsOpenFile(char *path_name);
-extern void OsCloseFile(OsFileHandle *fp);
-/*****************************************************************************/
-#define DSP_TELINDUS_FILE "dspdload.bin"
-/* special DSP file for BRI cards for Qsig and CornetN because of missing memory */
-#define DSP_QSIG_TELINDUS_FILE "dspdqsig.bin"
-#define DSP_MDM_TELINDUS_FILE "dspdvmdm.bin"
-#define DSP_FAX_TELINDUS_FILE "dspdvfax.bin"
-#define DSP_DIRECTORY_ENTRIES 64
-#define DSP_MEMORY_TYPE_EXTERNAL_DM 0
-#define DSP_MEMORY_TYPE_EXTERNAL_PM 1
-#define DSP_MEMORY_TYPE_INTERNAL_DM 2
-#define DSP_MEMORY_TYPE_INTERNAL_PM 3
-#define DSP_DOWNLOAD_FLAG_BOOTABLE 0x0001
-#define DSP_DOWNLOAD_FLAG_2181 0x0002
-#define DSP_DOWNLOAD_FLAG_TIMECRITICAL 0x0004
-#define DSP_DOWNLOAD_FLAG_COMPAND 0x0008
-#define DSP_MEMORY_BLOCK_COUNT 16
-#define DSP_SEGMENT_PM_FLAG 0x0001
-#define DSP_SEGMENT_SHARED_FLAG 0x0002
-#define DSP_SEGMENT_EXTERNAL_DM DSP_MEMORY_TYPE_EXTERNAL_DM
-#define DSP_SEGMENT_EXTERNAL_PM DSP_MEMORY_TYPE_EXTERNAL_PM
-#define DSP_SEGMENT_INTERNAL_DM DSP_MEMORY_TYPE_INTERNAL_DM
-#define DSP_SEGMENT_INTERNAL_PM DSP_MEMORY_TYPE_INTERNAL_PM
-#define DSP_SEGMENT_FIRST_RELOCATABLE 4
-#define DSP_DATA_BLOCK_PM_FLAG 0x0001
-#define DSP_DATA_BLOCK_DWORD_FLAG 0x0002
-#define DSP_DATA_BLOCK_RESOLVE_FLAG 0x0004
-#define DSP_RELOC_NONE 0x00
-#define DSP_RELOC_SEGMENT_MASK 0x3f
-#define DSP_RELOC_TYPE_MASK 0xc0
-#define DSP_RELOC_TYPE_0 0x00 /* relocation of address in DM word / high part of PM word */
-#define DSP_RELOC_TYPE_1 0x40 /* relocation of address in low part of PM data word */
-#define DSP_RELOC_TYPE_2 0x80 /* relocation of address in standard command */
-#define DSP_RELOC_TYPE_3 0xc0 /* relocation of address in call/jump on flag in */
-#define DSP_COMBIFILE_FORMAT_IDENTIFICATION_SIZE 48
-#define DSP_COMBIFILE_FORMAT_VERSION_BCD 0x0100
-#define DSP_FILE_FORMAT_IDENTIFICATION_SIZE 48
-#define DSP_FILE_FORMAT_VERSION_BCD 0x0100
-typedef struct tag_dsp_combifile_header
-{
- char format_identification[DSP_COMBIFILE_FORMAT_IDENTIFICATION_SIZE];
- word format_version_bcd;
- word header_size;
- word combifile_description_size;
- word directory_entries;
- word directory_size;
- word download_count;
- word usage_mask_size;
-} t_dsp_combifile_header;
-typedef struct tag_dsp_combifile_directory_entry
-{
- word card_type_number;
- word file_set_number;
-} t_dsp_combifile_directory_entry;
-typedef struct tag_dsp_file_header
-{
- char format_identification[DSP_FILE_FORMAT_IDENTIFICATION_SIZE];
- word format_version_bcd;
- word download_id;
- word download_flags;
- word required_processing_power;
- word interface_channel_count;
- word header_size;
- word download_description_size;
- word memory_block_table_size;
- word memory_block_count;
- word segment_table_size;
- word segment_count;
- word symbol_table_size;
- word symbol_count;
- word total_data_size_dm;
- word data_block_count_dm;
- word total_data_size_pm;
- word data_block_count_pm;
-} t_dsp_file_header;
-typedef struct tag_dsp_memory_block_desc
-{
- word alias_memory_block;
- word memory_type;
- word address;
- word size; /* DSP words */
-} t_dsp_memory_block_desc;
-typedef struct tag_dsp_segment_desc
-{
- word memory_block;
- word attributes;
- word base;
- word size;
- word alignment; /* ==0 -> no other legal start address than base */
-} t_dsp_segment_desc;
-typedef struct tag_dsp_symbol_desc
-{
- word symbol_id;
- word segment;
- word offset;
- word size; /* DSP words */
-} t_dsp_symbol_desc;
-typedef struct tag_dsp_data_block_header
-{
- word attributes;
- word segment;
- word offset;
- word size; /* DSP words */
-} t_dsp_data_block_header;
-typedef struct tag_dsp_download_desc
-{
- word download_id;
- word download_flags;
- word required_processing_power;
- word interface_channel_count;
- word excess_header_size;
- word memory_block_count;
- word segment_count;
- word symbol_count;
- word data_block_count_dm;
- word data_block_count_pm;
- byte *p_excess_header_data;
- char *p_download_description;
- t_dsp_memory_block_desc *p_memory_block_table;
- t_dsp_segment_desc *p_segment_table;
- t_dsp_symbol_desc *p_symbol_table;
- word *p_data_blocks_dm;
- word *p_data_blocks_pm;
-} t_dsp_desc;
-typedef struct tag_dsp_portable_download_desc /* be sure to keep native alignment for MAESTRA's */
-{
- word download_id;
- word download_flags;
- word required_processing_power;
- word interface_channel_count;
- word excess_header_size;
- word memory_block_count;
- word segment_count;
- word symbol_count;
- word data_block_count_dm;
- word data_block_count_pm;
- dword p_excess_header_data;
- dword p_download_description;
- dword p_memory_block_table;
- dword p_segment_table;
- dword p_symbol_table;
- dword p_data_blocks_dm;
- dword p_data_blocks_pm;
-} t_dsp_portable_desc;
-#define DSP_DOWNLOAD_INDEX_KERNEL 0
-#define DSP30TX_DOWNLOAD_INDEX_KERNEL 1
-#define DSP30RX_DOWNLOAD_INDEX_KERNEL 2
-#define DSP_MAX_DOWNLOAD_COUNT 64
-#define DSP_DOWNLOAD_MAX_SEGMENTS 16
-#define DSP_UDATA_REQUEST_RECONFIGURE 0
-/*
- parameters:
- <word> reconfigure delay (in 8kHz samples)
- <word> reconfigure code
- <byte> reconfigure hdlc preamble flags
-*/
-#define DSP_RECONFIGURE_TX_FLAG 0x8000
-#define DSP_RECONFIGURE_SHORT_TRAIN_FLAG 0x4000
-#define DSP_RECONFIGURE_ECHO_PROTECT_FLAG 0x2000
-#define DSP_RECONFIGURE_HDLC_FLAG 0x1000
-#define DSP_RECONFIGURE_SYNC_FLAG 0x0800
-#define DSP_RECONFIGURE_PROTOCOL_MASK 0x00ff
-#define DSP_RECONFIGURE_IDLE 0
-#define DSP_RECONFIGURE_V25 1
-#define DSP_RECONFIGURE_V21_CH2 2
-#define DSP_RECONFIGURE_V27_2400 3
-#define DSP_RECONFIGURE_V27_4800 4
-#define DSP_RECONFIGURE_V29_7200 5
-#define DSP_RECONFIGURE_V29_9600 6
-#define DSP_RECONFIGURE_V33_12000 7
-#define DSP_RECONFIGURE_V33_14400 8
-#define DSP_RECONFIGURE_V17_7200 9
-#define DSP_RECONFIGURE_V17_9600 10
-#define DSP_RECONFIGURE_V17_12000 11
-#define DSP_RECONFIGURE_V17_14400 12
-/*
- data indications if transparent framer
- <byte> data 0
- <byte> data 1
- ...
- data indications if HDLC framer
- <byte> data 0
- <byte> data 1
- ...
- <byte> CRC 0
- <byte> CRC 1
- <byte> preamble flags
-*/
-#define DSP_UDATA_INDICATION_SYNC 0
-/*
- returns:
- <word> time of sync (sampled from counter at 8kHz)
-*/
-#define DSP_UDATA_INDICATION_DCD_OFF 1
-/*
- returns:
- <word> time of DCD off (sampled from counter at 8kHz)
-*/
-#define DSP_UDATA_INDICATION_DCD_ON 2
-/*
- returns:
- <word> time of DCD on (sampled from counter at 8kHz)
- <byte> connected norm
- <word> connected options
- <dword> connected speed (bit/s)
-*/
-#define DSP_UDATA_INDICATION_CTS_OFF 3
-/*
- returns:
- <word> time of CTS off (sampled from counter at 8kHz)
-*/
-#define DSP_UDATA_INDICATION_CTS_ON 4
-/*
- returns:
- <word> time of CTS on (sampled from counter at 8kHz)
- <byte> connected norm
- <word> connected options
- <dword> connected speed (bit/s)
-*/
-#define DSP_CONNECTED_NORM_UNSPECIFIED 0
-#define DSP_CONNECTED_NORM_V21 1
-#define DSP_CONNECTED_NORM_V23 2
-#define DSP_CONNECTED_NORM_V22 3
-#define DSP_CONNECTED_NORM_V22_BIS 4
-#define DSP_CONNECTED_NORM_V32_BIS 5
-#define DSP_CONNECTED_NORM_V34 6
-#define DSP_CONNECTED_NORM_V8 7
-#define DSP_CONNECTED_NORM_BELL_212A 8
-#define DSP_CONNECTED_NORM_BELL_103 9
-#define DSP_CONNECTED_NORM_V29_LEASED_LINE 10
-#define DSP_CONNECTED_NORM_V33_LEASED_LINE 11
-#define DSP_CONNECTED_NORM_TFAST 12
-#define DSP_CONNECTED_NORM_V21_CH2 13
-#define DSP_CONNECTED_NORM_V27_TER 14
-#define DSP_CONNECTED_NORM_V29 15
-#define DSP_CONNECTED_NORM_V33 16
-#define DSP_CONNECTED_NORM_V17 17
-#define DSP_CONNECTED_OPTION_TRELLIS 0x0001
-/*---------------------------------------------------------------------------*/
-extern char *dsp_read_file(OsFileHandle *fp,
- word card_type_number,
- word *p_dsp_download_count,
- t_dsp_desc *p_dsp_download_table,
- t_dsp_portable_desc *p_dsp_portable_download_table);
-/*---------------------------------------------------------------------------*/
-#endif /* DSP_DEFS_H_ */
diff --git a/drivers/isdn/hardware/eicon/dsp_tst.h b/drivers/isdn/hardware/eicon/dsp_tst.h
deleted file mode 100644
index 85edd3ea50f7..000000000000
--- a/drivers/isdn/hardware/eicon/dsp_tst.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: dsp_tst.h,v 1.1.2.2 2001/02/08 12:25:43 armin Exp $ */
-
-#ifndef __DIVA_PRI_HOST_TEST_DSPS_H__
-#define __DIVA_PRI_HOST_TEST_DSPS_H__
-
-/*
- DSP registers on maestra pri
-*/
-#define DSP1_PORT (0x00)
-#define DSP2_PORT (0x8)
-#define DSP3_PORT (0x800)
-#define DSP4_PORT (0x808)
-#define DSP5_PORT (0x810)
-#define DSP6_PORT (0x818)
-#define DSP7_PORT (0x820)
-#define DSP8_PORT (0x828)
-#define DSP9_PORT (0x830)
-#define DSP10_PORT (0x840)
-#define DSP11_PORT (0x848)
-#define DSP12_PORT (0x850)
-#define DSP13_PORT (0x858)
-#define DSP14_PORT (0x860)
-#define DSP15_PORT (0x868)
-#define DSP16_PORT (0x870)
-#define DSP17_PORT (0x1000)
-#define DSP18_PORT (0x1008)
-#define DSP19_PORT (0x1010)
-#define DSP20_PORT (0x1018)
-#define DSP21_PORT (0x1020)
-#define DSP22_PORT (0x1028)
-#define DSP23_PORT (0x1030)
-#define DSP24_PORT (0x1040)
-#define DSP25_PORT (0x1048)
-#define DSP26_PORT (0x1050)
-#define DSP27_PORT (0x1058)
-#define DSP28_PORT (0x1060)
-#define DSP29_PORT (0x1068)
-#define DSP30_PORT (0x1070)
-#define DSP_ADR_OFFS 0x80
-
-/*------------------------------------------------------------------
- Dsp related definitions
- ------------------------------------------------------------------ */
-#define DSP_SIGNATURE_PROBE_WORD 0x5a5a
-#define dsp_make_address_ex(pm, address) ((word)((pm) ? (address) : (address) + 0x4000))
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/dspdids.h b/drivers/isdn/hardware/eicon/dspdids.h
deleted file mode 100644
index 957b33cc0022..000000000000
--- a/drivers/isdn/hardware/eicon/dspdids.h
+++ /dev/null
@@ -1,75 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef DSPDIDS_H_
-#define DSPDIDS_H_
-/*---------------------------------------------------------------------------*/
-#define DSP_DID_INVALID 0
-#define DSP_DID_DIVA 1
-#define DSP_DID_DIVA_PRO 2
-#define DSP_DID_DIVA_PRO_20 3
-#define DSP_DID_DIVA_PRO_PCCARD 4
-#define DSP_DID_DIVA_SERVER_BRI_1M 5
-#define DSP_DID_DIVA_SERVER_BRI_2M 6
-#define DSP_DID_DIVA_SERVER_PRI_2M_TX 7
-#define DSP_DID_DIVA_SERVER_PRI_2M_RX 8
-#define DSP_DID_DIVA_SERVER_PRI_30M 9
-#define DSP_DID_TASK_HSCX 100
-#define DSP_DID_TASK_HSCX_PRI_2M_TX 101
-#define DSP_DID_TASK_HSCX_PRI_2M_RX 102
-#define DSP_DID_TASK_V110KRNL 200
-#define DSP_DID_OVERLAY_V1100 201
-#define DSP_DID_OVERLAY_V1101 202
-#define DSP_DID_OVERLAY_V1102 203
-#define DSP_DID_OVERLAY_V1103 204
-#define DSP_DID_OVERLAY_V1104 205
-#define DSP_DID_OVERLAY_V1105 206
-#define DSP_DID_OVERLAY_V1106 207
-#define DSP_DID_OVERLAY_V1107 208
-#define DSP_DID_OVERLAY_V1108 209
-#define DSP_DID_OVERLAY_V1109 210
-#define DSP_DID_TASK_V110_PRI_2M_TX 220
-#define DSP_DID_TASK_V110_PRI_2M_RX 221
-#define DSP_DID_TASK_MODEM 300
-#define DSP_DID_TASK_FAX05 400
-#define DSP_DID_TASK_VOICE 500
-#define DSP_DID_TASK_TIKRNL81 600
-#define DSP_DID_OVERLAY_DIAL 601
-#define DSP_DID_OVERLAY_V22 602
-#define DSP_DID_OVERLAY_V32 603
-#define DSP_DID_OVERLAY_FSK 604
-#define DSP_DID_OVERLAY_FAX 605
-#define DSP_DID_OVERLAY_VXX 606
-#define DSP_DID_OVERLAY_V8 607
-#define DSP_DID_OVERLAY_INFO 608
-#define DSP_DID_OVERLAY_V34 609
-#define DSP_DID_OVERLAY_DFX 610
-#define DSP_DID_PARTIAL_OVERLAY_DIAL 611
-#define DSP_DID_PARTIAL_OVERLAY_FSK 612
-#define DSP_DID_PARTIAL_OVERLAY_FAX 613
-#define DSP_DID_TASK_TIKRNL05 700
-/*---------------------------------------------------------------------------*/
-#endif
-/*---------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/dsrv4bri.h b/drivers/isdn/hardware/eicon/dsrv4bri.h
deleted file mode 100644
index f353fb6b8933..000000000000
--- a/drivers/isdn/hardware/eicon/dsrv4bri.h
+++ /dev/null
@@ -1,40 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_DSRV_4_BRI_INC__
-#define __DIVA_XDI_DSRV_4_BRI_INC__
-/*
- * Some special registers in the PLX 9054
- */
-#define PLX9054_P2LDBELL 0x60
-#define PLX9054_L2PDBELL 0x64
-#define PLX9054_INTCSR 0x69
-#define PLX9054_INT_ENABLE 0x09
-#define PLX9054_SOFT_RESET 0x4000
-#define PLX9054_RELOAD_EEPROM 0x2000
-#define DIVA_4BRI_REVISION(__x__) (((__x__)->cardType == CARDTYPE_DIVASRV_Q_8M_V2_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_B_2M_V2_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_B_2F_PCI) || ((__x__)->cardType == CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI))
-void diva_os_set_qBri_functions(PISDN_ADAPTER IoAdapter);
-void diva_os_set_qBri2_functions(PISDN_ADAPTER IoAdapter);
-#endif
diff --git a/drivers/isdn/hardware/eicon/dsrv_bri.h b/drivers/isdn/hardware/eicon/dsrv_bri.h
deleted file mode 100644
index 8a67dbc65be4..000000000000
--- a/drivers/isdn/hardware/eicon/dsrv_bri.h
+++ /dev/null
@@ -1,37 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_DSRV_BRI_INC__
-#define __DIVA_XDI_DSRV_BRI_INC__
-/*
- Functions exported from os dependent part of
- BRI card configuration and used in
- OS independed part
-*/
-/*
- Prepare OS dependent part of BRI functions
-*/
-void diva_os_prepare_maestra_functions(PISDN_ADAPTER IoAdapter);
-#endif
diff --git a/drivers/isdn/hardware/eicon/dsrv_pri.h b/drivers/isdn/hardware/eicon/dsrv_pri.h
deleted file mode 100644
index fd1a9ff9f195..000000000000
--- a/drivers/isdn/hardware/eicon/dsrv_pri.h
+++ /dev/null
@@ -1,38 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_DSRV_PRI_INC__
-#define __DIVA_XDI_DSRV_PRI_INC__
-/*
- Functions exported from os dependent part of
- PRI card configuration and used in
- OS independed part
-*/
-/*
- Prepare OS dependent part of PRI/PRI Rev.2 functions
-*/
-void diva_os_prepare_pri_functions(PISDN_ADAPTER IoAdapter);
-void diva_os_prepare_pri2_functions(PISDN_ADAPTER IoAdapter);
-#endif
diff --git a/drivers/isdn/hardware/eicon/entity.h b/drivers/isdn/hardware/eicon/entity.h
deleted file mode 100644
index f9767d321db9..000000000000
--- a/drivers/isdn/hardware/eicon/entity.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: entity.h,v 1.4 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVAS_USER_MODE_IDI_ENTITY__
-#define __DIVAS_USER_MODE_IDI_ENTITY__
-
-#define DIVA_UM_IDI_RC_PENDING 0x00000001
-#define DIVA_UM_IDI_REMOVE_PENDING 0x00000002
-#define DIVA_UM_IDI_TX_FLOW_CONTROL 0x00000004
-#define DIVA_UM_IDI_REMOVED 0x00000008
-#define DIVA_UM_IDI_ASSIGN_PENDING 0x00000010
-
-typedef struct _divas_um_idi_entity {
- struct list_head link;
- diva_um_idi_adapter_t *adapter; /* Back to adapter */
- ENTITY e;
- void *os_ref;
- dword status;
- void *os_context;
- int rc_count;
- diva_um_idi_data_queue_t data; /* definad by user 1 ... MAX */
- diva_um_idi_data_queue_t rc; /* two entries */
- BUFFERS XData;
- BUFFERS RData;
- byte buffer[2048 + 512];
-} divas_um_idi_entity_t;
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/helpers.h b/drivers/isdn/hardware/eicon/helpers.h
deleted file mode 100644
index c9156b0acaba..000000000000
--- a/drivers/isdn/hardware/eicon/helpers.h
+++ /dev/null
@@ -1,51 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_CARD_CONFIG_HELPERS_INC__
-#define __DIVA_XDI_CARD_CONFIG_HELPERS_INC__
-dword diva_get_protocol_file_features(byte *File,
- int offset,
- char *IdStringBuffer,
- dword IdBufferSize);
-void diva_configure_protocol(PISDN_ADAPTER IoAdapter);
-/*
- Low level file access system abstraction
-*/
-/* -------------------------------------------------------------------------
- Access to single file
- Return pointer to the image of the requested file,
- write image length to 'FileLength'
- ------------------------------------------------------------------------- */
-void *xdiLoadFile(char *FileName, dword *FileLength, unsigned long MaxLoadSize);
-/* -------------------------------------------------------------------------
- Dependent on the protocol settings does read return pointer
- to the image of appropriate protocol file
- ------------------------------------------------------------------------- */
-void *xdiLoadArchive(PISDN_ADAPTER IoAdapter, dword *FileLength, unsigned long MaxLoadSize);
-/* --------------------------------------------------------------------------
- Free all system resources accessed by xdiLoadFile and xdiLoadArchive
- -------------------------------------------------------------------------- */
-void xdiFreeFile(void *handle);
-#endif
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
deleted file mode 100644
index fef6586fe5ac..000000000000
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/* $Id: idifunc.c,v 1.14.4.4 2004/08/28 20:03:53 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * User Mode IDI Interface
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "um_xdi.h"
-#include "um_idi.h"
-
-#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-extern char *DRIVERRELEASE_IDI;
-
-extern void DIVA_DIDD_Read(void *, int);
-extern int diva_user_mode_idi_create_adapter(const DESCRIPTOR *, int);
-extern void diva_user_mode_idi_remove_adapter(int);
-
-static dword notify_handle;
-static DESCRIPTOR DAdapter;
-static DESCRIPTOR MAdapter;
-
-static void no_printf(unsigned char *x, ...)
-{
- /* dummy debug function */
-}
-
-#include "debuglib.c"
-
-/*
- * stop debug
- */
-static void stop_dbg(void)
-{
- DbgDeregister();
- memset(&MAdapter, 0, sizeof(MAdapter));
- dprintf = no_printf;
-}
-
-typedef struct _udiva_card {
- struct list_head list;
- int Id;
- DESCRIPTOR d;
-} udiva_card;
-
-static LIST_HEAD(cards);
-static diva_os_spin_lock_t ll_lock;
-
-/*
- * find card in list
- */
-static udiva_card *find_card_in_list(DESCRIPTOR *d)
-{
- udiva_card *card;
- struct list_head *tmp;
- diva_os_spin_lock_magic_t old_irql;
-
- diva_os_enter_spin_lock(&ll_lock, &old_irql, "find card");
- list_for_each(tmp, &cards) {
- card = list_entry(tmp, udiva_card, list);
- if (card->d.request == d->request) {
- diva_os_leave_spin_lock(&ll_lock, &old_irql,
- "find card");
- return (card);
- }
- }
- diva_os_leave_spin_lock(&ll_lock, &old_irql, "find card");
- return ((udiva_card *) NULL);
-}
-
-/*
- * new card
- */
-static void um_new_card(DESCRIPTOR *d)
-{
- int adapter_nr = 0;
- udiva_card *card = NULL;
- IDI_SYNC_REQ sync_req;
- diva_os_spin_lock_magic_t old_irql;
-
- if (!(card = diva_os_malloc(0, sizeof(udiva_card)))) {
- DBG_ERR(("cannot get buffer for card"));
- return;
- }
- memcpy(&card->d, d, sizeof(DESCRIPTOR));
- sync_req.xdi_logical_adapter_number.Req = 0;
- sync_req.xdi_logical_adapter_number.Rc =
- IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER;
- card->d.request((ENTITY *)&sync_req);
- adapter_nr =
- sync_req.xdi_logical_adapter_number.info.logical_adapter_number;
- card->Id = adapter_nr;
- if (!(diva_user_mode_idi_create_adapter(d, adapter_nr))) {
- diva_os_enter_spin_lock(&ll_lock, &old_irql, "add card");
- list_add_tail(&card->list, &cards);
- diva_os_leave_spin_lock(&ll_lock, &old_irql, "add card");
- } else {
- DBG_ERR(("could not create user mode idi card %d",
- adapter_nr));
- diva_os_free(0, card);
- }
-}
-
-/*
- * remove card
- */
-static void um_remove_card(DESCRIPTOR *d)
-{
- diva_os_spin_lock_magic_t old_irql;
- udiva_card *card = NULL;
-
- if (!(card = find_card_in_list(d))) {
- DBG_ERR(("cannot find card to remove"));
- return;
- }
- diva_user_mode_idi_remove_adapter(card->Id);
- diva_os_enter_spin_lock(&ll_lock, &old_irql, "remove card");
- list_del(&card->list);
- diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove card");
- DBG_LOG(("idi proc entry removed for card %d", card->Id));
- diva_os_free(0, card);
-}
-
-/*
- * remove all adapter
- */
-static void __exit remove_all_idi_proc(void)
-{
- udiva_card *card;
- diva_os_spin_lock_magic_t old_irql;
-
-rescan:
- diva_os_enter_spin_lock(&ll_lock, &old_irql, "remove all");
- if (!list_empty(&cards)) {
- card = list_entry(cards.next, udiva_card, list);
- list_del(&card->list);
- diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove all");
- diva_user_mode_idi_remove_adapter(card->Id);
- diva_os_free(0, card);
- goto rescan;
- }
- diva_os_leave_spin_lock(&ll_lock, &old_irql, "remove all");
-}
-
-/*
- * DIDD notify callback
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
- int removal)
-{
- if (adapter->type == IDI_DADAPTER) {
- DBG_ERR(("Notification about IDI_DADAPTER change ! Oops."));
- return (NULL);
- } else if (adapter->type == IDI_DIMAINT) {
- if (removal) {
- stop_dbg();
- } else {
- memcpy(&MAdapter, adapter, sizeof(MAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT);
- }
- } else if ((adapter->type > 0) && (adapter->type < 16)) { /* IDI Adapter */
- if (removal) {
- um_remove_card(adapter);
- } else {
- um_new_card(adapter);
- }
- }
- return (NULL);
-}
-
-/*
- * connect DIDD
- */
-static int __init connect_didd(void)
-{
- int x = 0;
- int dadapter = 0;
- IDI_SYNC_REQ req;
- DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
- DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
- for (x = 0; x < MAX_DESCRIPTORS; x++) {
- if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
- dadapter = 1;
- memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc =
- IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
- req.didd_notify.info.callback = (void *)didd_callback;
- req.didd_notify.info.context = NULL;
- DAdapter.request((ENTITY *)&req);
- if (req.didd_notify.e.Rc != 0xff) {
- stop_dbg();
- return (0);
- }
- notify_handle = req.didd_notify.info.handle;
- } else if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */
- memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- DbgRegister("User IDI", DRIVERRELEASE_IDI, DBG_DEFAULT);
- } else if ((DIDD_Table[x].type > 0)
- && (DIDD_Table[x].type < 16)) { /* IDI Adapter found */
- um_new_card(&DIDD_Table[x]);
- }
- }
-
- if (!dadapter) {
- stop_dbg();
- }
-
- return (dadapter);
-}
-
-/*
- * Disconnect from DIDD
- */
-static void __exit disconnect_didd(void)
-{
- IDI_SYNC_REQ req;
-
- stop_dbg();
-
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
- req.didd_notify.info.handle = notify_handle;
- DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * init
- */
-int __init idifunc_init(void)
-{
- diva_os_initialize_spin_lock(&ll_lock, "idifunc");
-
- if (diva_user_mode_idi_init()) {
- DBG_ERR(("init: init failed."));
- return (0);
- }
-
- if (!connect_didd()) {
- diva_user_mode_idi_finit();
- DBG_ERR(("init: failed to connect to DIDD."));
- return (0);
- }
- return (1);
-}
-
-/*
- * finit
- */
-void __exit idifunc_finit(void)
-{
- diva_user_mode_idi_finit();
- disconnect_didd();
- remove_all_idi_proc();
-}
diff --git a/drivers/isdn/hardware/eicon/io.c b/drivers/isdn/hardware/eicon/io.c
deleted file mode 100644
index 8851ce580c23..000000000000
--- a/drivers/isdn/hardware/eicon/io.c
+++ /dev/null
@@ -1,852 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "divasync.h"
-#define MIPS_SCOM
-#include "pkmaint.h" /* pc_main.h, packed in os-dependent fashion */
-#include "di.h"
-#include "mi_pc.h"
-#include "io.h"
-extern ADAPTER *adapter[MAX_ADAPTER];
-extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
-void request(PISDN_ADAPTER, ENTITY *);
-static void pcm_req(PISDN_ADAPTER, ENTITY *);
-/* --------------------------------------------------------------------------
- local functions
- -------------------------------------------------------------------------- */
-#define ReqFunc(N) \
- static void Request##N(ENTITY *e) \
- { if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); }
-ReqFunc(0)
-ReqFunc(1)
-ReqFunc(2)
-ReqFunc(3)
-ReqFunc(4)
-ReqFunc(5)
-ReqFunc(6)
-ReqFunc(7)
-ReqFunc(8)
-ReqFunc(9)
-ReqFunc(10)
-ReqFunc(11)
-ReqFunc(12)
-ReqFunc(13)
-ReqFunc(14)
-ReqFunc(15)
-IDI_CALL Requests[MAX_ADAPTER] =
-{ &Request0, &Request1, &Request2, &Request3,
- &Request4, &Request5, &Request6, &Request7,
- &Request8, &Request9, &Request10, &Request11,
- &Request12, &Request13, &Request14, &Request15
-};
-/*****************************************************************************/
-/*
- This array should indicate all new services, that this version of XDI
- is able to provide to his clients
-*/
-static byte extended_xdi_features[DIVA_XDI_EXTENDED_FEATURES_MAX_SZ + 1] = {
- (DIVA_XDI_EXTENDED_FEATURES_VALID |
- DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR |
- DIVA_XDI_EXTENDED_FEATURE_CAPI_PRMS |
-#if defined(DIVA_IDI_RX_DMA)
- DIVA_XDI_EXTENDED_FEATURE_CMA |
- DIVA_XDI_EXTENDED_FEATURE_RX_DMA |
- DIVA_XDI_EXTENDED_FEATURE_MANAGEMENT_DMA |
-#endif
- DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC),
- 0
-};
-/*****************************************************************************/
-void
-dump_xlog_buffer(PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc)
-{
- dword logLen;
- word *Xlog = xlogDesc->buf;
- word logCnt = xlogDesc->cnt;
- word logOut = xlogDesc->out / sizeof(*Xlog);
- DBG_FTL(("%s: ************* XLOG recovery (%d) *************",
- &IoAdapter->Name[0], (int)logCnt))
- DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
- for (; logCnt > 0; --logCnt)
- {
- if (!GET_WORD(&Xlog[logOut]))
- {
- if (--logCnt == 0)
- break;
- logOut = 0;
- }
- if (GET_WORD(&Xlog[logOut]) <= (logOut * sizeof(*Xlog)))
- {
- if (logCnt > 2)
- {
- DBG_FTL(("Possibly corrupted XLOG: %d entries left",
- (int)logCnt))
- }
- break;
- }
- logLen = (dword)(GET_WORD(&Xlog[logOut]) - (logOut * sizeof(*Xlog)));
- DBG_FTL_MXLOG(((char *)&Xlog[logOut + 1], (dword)(logLen - 2)))
- logOut = (GET_WORD(&Xlog[logOut]) + 1) / sizeof(*Xlog);
- }
- DBG_FTL(("%s: ***************** end of XLOG *****************",
- &IoAdapter->Name[0]))
- }
-/*****************************************************************************/
-#if defined(XDI_USE_XLOG)
-static char *(ExceptionCauseTable[]) =
-{
- "Interrupt",
- "TLB mod /IBOUND",
- "TLB load /DBOUND",
- "TLB store",
- "Address error load",
- "Address error store",
- "Instruction load bus error",
- "Data load/store bus error",
- "Syscall",
- "Breakpoint",
- "Reverd instruction",
- "Coprocessor unusable",
- "Overflow",
- "TRAP",
- "VCEI",
- "Floating Point Exception",
- "CP2",
- "Reserved 17",
- "Reserved 18",
- "Reserved 19",
- "Reserved 20",
- "Reserved 21",
- "Reserved 22",
- "WATCH",
- "Reserved 24",
- "Reserved 25",
- "Reserved 26",
- "Reserved 27",
- "Reserved 28",
- "Reserved 29",
- "Reserved 30",
- "VCED"
-};
-#endif
-void
-dump_trap_frame(PISDN_ADAPTER IoAdapter, byte __iomem *exceptionFrame)
-{
- MP_XCPTC __iomem *xcept = (MP_XCPTC __iomem *)exceptionFrame;
- dword __iomem *regs;
- regs = &xcept->regs[0];
- DBG_FTL(("%s: ***************** CPU TRAPPED *****************",
- &IoAdapter->Name[0]))
- DBG_FTL(("Microcode: %s", &IoAdapter->ProtocolIdString[0]))
- DBG_FTL(("Cause: %s",
- ExceptionCauseTable[(READ_DWORD(&xcept->cr) & 0x0000007c) >> 2]))
- DBG_FTL(("sr 0x%08x cr 0x%08x epc 0x%08x vaddr 0x%08x",
- READ_DWORD(&xcept->sr), READ_DWORD(&xcept->cr),
- READ_DWORD(&xcept->epc), READ_DWORD(&xcept->vaddr)))
- DBG_FTL(("zero 0x%08x at 0x%08x v0 0x%08x v1 0x%08x",
- READ_DWORD(&regs[0]), READ_DWORD(&regs[1]),
- READ_DWORD(&regs[2]), READ_DWORD(&regs[3])))
- DBG_FTL(("a0 0x%08x a1 0x%08x a2 0x%08x a3 0x%08x",
- READ_DWORD(&regs[4]), READ_DWORD(&regs[5]),
- READ_DWORD(&regs[6]), READ_DWORD(&regs[7])))
- DBG_FTL(("t0 0x%08x t1 0x%08x t2 0x%08x t3 0x%08x",
- READ_DWORD(&regs[8]), READ_DWORD(&regs[9]),
- READ_DWORD(&regs[10]), READ_DWORD(&regs[11])))
- DBG_FTL(("t4 0x%08x t5 0x%08x t6 0x%08x t7 0x%08x",
- READ_DWORD(&regs[12]), READ_DWORD(&regs[13]),
- READ_DWORD(&regs[14]), READ_DWORD(&regs[15])))
- DBG_FTL(("s0 0x%08x s1 0x%08x s2 0x%08x s3 0x%08x",
- READ_DWORD(&regs[16]), READ_DWORD(&regs[17]),
- READ_DWORD(&regs[18]), READ_DWORD(&regs[19])))
- DBG_FTL(("s4 0x%08x s5 0x%08x s6 0x%08x s7 0x%08x",
- READ_DWORD(&regs[20]), READ_DWORD(&regs[21]),
- READ_DWORD(&regs[22]), READ_DWORD(&regs[23])))
- DBG_FTL(("t8 0x%08x t9 0x%08x k0 0x%08x k1 0x%08x",
- READ_DWORD(&regs[24]), READ_DWORD(&regs[25]),
- READ_DWORD(&regs[26]), READ_DWORD(&regs[27])))
- DBG_FTL(("gp 0x%08x sp 0x%08x s8 0x%08x ra 0x%08x",
- READ_DWORD(&regs[28]), READ_DWORD(&regs[29]),
- READ_DWORD(&regs[30]), READ_DWORD(&regs[31])))
- DBG_FTL(("md 0x%08x|%08x resvd 0x%08x class 0x%08x",
- READ_DWORD(&xcept->mdhi), READ_DWORD(&xcept->mdlo),
- READ_DWORD(&xcept->reseverd), READ_DWORD(&xcept->xclass)))
- }
-/* --------------------------------------------------------------------------
- Real XDI Request function
- -------------------------------------------------------------------------- */
-void request(PISDN_ADAPTER IoAdapter, ENTITY *e)
-{
- byte i;
- diva_os_spin_lock_magic_t irql;
-/*
- * if the Req field in the entity structure is 0,
- * we treat this request as a special function call
- */
- if (!e->Req)
- {
- IDI_SYNC_REQ *syncReq = (IDI_SYNC_REQ *)e;
- switch (e->Rc)
- {
-#if defined(DIVA_IDI_RX_DMA)
- case IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION: {
- diva_xdi_dma_descriptor_operation_t *pI = \
- &syncReq->xdi_dma_descriptor_operation.info;
- if (!IoAdapter->dma_map) {
- pI->operation = -1;
- pI->descriptor_number = -1;
- return;
- }
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "dma_op");
- if (pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC) {
- pI->descriptor_number = diva_alloc_dma_map_entry(\
- (struct _diva_dma_map_entry *)IoAdapter->dma_map);
- if (pI->descriptor_number >= 0) {
- dword dma_magic;
- void *local_addr;
- diva_get_dma_map_entry(\
- (struct _diva_dma_map_entry *)IoAdapter->dma_map,
- pI->descriptor_number,
- &local_addr, &dma_magic);
- pI->descriptor_address = local_addr;
- pI->descriptor_magic = dma_magic;
- pI->operation = 0;
- } else {
- pI->operation = -1;
- }
- } else if ((pI->operation == IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE) &&
- (pI->descriptor_number >= 0)) {
- diva_free_dma_map_entry((struct _diva_dma_map_entry *)IoAdapter->dma_map,
- pI->descriptor_number);
- pI->descriptor_number = -1;
- pI->operation = 0;
- } else {
- pI->descriptor_number = -1;
- pI->operation = -1;
- }
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "dma_op");
- } return;
-#endif
- case IDI_SYNC_REQ_XDI_GET_LOGICAL_ADAPTER_NUMBER: {
- diva_xdi_get_logical_adapter_number_s_t *pI = \
- &syncReq->xdi_logical_adapter_number.info;
- pI->logical_adapter_number = IoAdapter->ANum;
- pI->controller = IoAdapter->ControllerNumber;
- pI->total_controllers = IoAdapter->Properties.Adapters;
- } return;
- case IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS: {
- diva_xdi_get_capi_parameters_t prms, *pI = &syncReq->xdi_capi_prms.info;
- memset(&prms, 0x00, sizeof(prms));
- prms.structure_length = min_t(size_t, sizeof(prms), pI->structure_length);
- memset(pI, 0x00, pI->structure_length);
- prms.flag_dynamic_l1_down = (IoAdapter->capi_cfg.cfg_1 & \
- DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? 1 : 0;
- prms.group_optimization_enabled = (IoAdapter->capi_cfg.cfg_1 & \
- DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? 1 : 0;
- memcpy(pI, &prms, prms.structure_length);
- } return;
- case IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR:
- syncReq->xdi_sdram_bar.info.bar = IoAdapter->sdram_bar;
- return;
- case IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES: {
- dword i;
- diva_xdi_get_extended_xdi_features_t *pI =\
- &syncReq->xdi_extended_features.info;
- pI->buffer_length_in_bytes &= ~0x80000000;
- if (pI->buffer_length_in_bytes && pI->features) {
- memset(pI->features, 0x00, pI->buffer_length_in_bytes);
- }
- for (i = 0; ((pI->features) && (i < pI->buffer_length_in_bytes) &&
- (i < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ)); i++) {
- pI->features[i] = extended_xdi_features[i];
- }
- if ((pI->buffer_length_in_bytes < DIVA_XDI_EXTENDED_FEATURES_MAX_SZ) ||
- (!pI->features)) {
- pI->buffer_length_in_bytes =\
- (0x80000000 | DIVA_XDI_EXTENDED_FEATURES_MAX_SZ);
- }
- } return;
- case IDI_SYNC_REQ_XDI_GET_STREAM:
- if (IoAdapter) {
- diva_xdi_provide_istream_info(&IoAdapter->a,
- &syncReq->xdi_stream_info.info);
- } else {
- syncReq->xdi_stream_info.info.provided_service = 0;
- }
- return;
- case IDI_SYNC_REQ_GET_NAME:
- if (IoAdapter)
- {
- strcpy(&syncReq->GetName.name[0], IoAdapter->Name);
- DBG_TRC(("xdi: Adapter %d / Name '%s'",
- IoAdapter->ANum, IoAdapter->Name))
- return;
- }
- syncReq->GetName.name[0] = '\0';
- break;
- case IDI_SYNC_REQ_GET_SERIAL:
- if (IoAdapter)
- {
- syncReq->GetSerial.serial = IoAdapter->serialNo;
- DBG_TRC(("xdi: Adapter %d / SerialNo %ld",
- IoAdapter->ANum, IoAdapter->serialNo))
- return;
- }
- syncReq->GetSerial.serial = 0;
- break;
- case IDI_SYNC_REQ_GET_CARDTYPE:
- if (IoAdapter)
- {
- syncReq->GetCardType.cardtype = IoAdapter->cardType;
- DBG_TRC(("xdi: Adapter %d / CardType %ld",
- IoAdapter->ANum, IoAdapter->cardType))
- return;
- }
- syncReq->GetCardType.cardtype = 0;
- break;
- case IDI_SYNC_REQ_GET_XLOG:
- if (IoAdapter)
- {
- pcm_req(IoAdapter, e);
- return;
- }
- e->Ind = 0;
- break;
- case IDI_SYNC_REQ_GET_DBG_XLOG:
- if (IoAdapter)
- {
- pcm_req(IoAdapter, e);
- return;
- }
- e->Ind = 0;
- break;
- case IDI_SYNC_REQ_GET_FEATURES:
- if (IoAdapter)
- {
- syncReq->GetFeatures.features =
- (unsigned short)IoAdapter->features;
- return;
- }
- syncReq->GetFeatures.features = 0;
- break;
- case IDI_SYNC_REQ_PORTDRV_HOOK:
- if (IoAdapter)
- {
- DBG_TRC(("Xdi:IDI_SYNC_REQ_PORTDRV_HOOK - ignored"))
- return;
- }
- break;
- }
- if (IoAdapter)
- {
- return;
- }
- }
- DBG_TRC(("xdi: Id 0x%x / Req 0x%x / Rc 0x%x", e->Id, e->Req, e->Rc))
- if (!IoAdapter)
- {
- DBG_FTL(("xdi: uninitialized Adapter used - ignore request"))
- return;
- }
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
-/*
- * assign an entity
- */
- if (!(e->Id & 0x1f))
- {
- if (IoAdapter->e_count >= IoAdapter->e_max)
- {
- DBG_FTL(("xdi: all Ids in use (max=%d) --> Req ignored",
- IoAdapter->e_max))
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
- return;
- }
-/*
- * find a new free id
- */
- for (i = 1; IoAdapter->e_tbl[i].e; ++i);
- IoAdapter->e_tbl[i].e = e;
- IoAdapter->e_count++;
- e->No = (byte)i;
- e->More = 0;
- e->RCurrent = 0xff;
- }
- else
- {
- i = e->No;
- }
-/*
- * if the entity is still busy, ignore the request call
- */
- if (e->More & XBUSY)
- {
- DBG_FTL(("xdi: Id 0x%x busy --> Req 0x%x ignored", e->Id, e->Req))
- if (!IoAdapter->trapped && IoAdapter->trapFnc)
- {
- IoAdapter->trapFnc(IoAdapter);
- /*
- Firs trap, also notify user if supported
- */
- if (IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
- (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
- }
- }
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
- return;
- }
-/*
- * initialize transmit status variables
- */
- e->More |= XBUSY;
- e->More &= ~XMOREF;
- e->XCurrent = 0;
- e->XOffset = 0;
-/*
- * queue this entity in the adapter request queue
- */
- IoAdapter->e_tbl[i].next = 0;
- if (IoAdapter->head)
- {
- IoAdapter->e_tbl[IoAdapter->tail].next = i;
- IoAdapter->tail = i;
- }
- else
- {
- IoAdapter->head = i;
- IoAdapter->tail = i;
- }
-/*
- * queue the DPC to process the request
- */
- diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req");
-}
-/* ---------------------------------------------------------------------
- Main DPC routine
- --------------------------------------------------------------------- */
-void DIDpcRoutine(struct _diva_os_soft_isr *psoft_isr, void *Context) {
- PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)Context;
- ADAPTER *a = &IoAdapter->a;
- diva_os_atomic_t *pin_dpc = &IoAdapter->in_dpc;
- if (diva_os_atomic_increment(pin_dpc) == 1) {
- do {
- if (IoAdapter->tst_irq(a))
- {
- if (!IoAdapter->Unavailable)
- IoAdapter->dpc(a);
- IoAdapter->clr_irq(a);
- }
- IoAdapter->out(a);
- } while (diva_os_atomic_decrement(pin_dpc) > 0);
- /* ----------------------------------------------------------------
- Look for XLOG request (cards with indirect addressing)
- ---------------------------------------------------------------- */
- if (IoAdapter->pcm_pending) {
- struct pc_maint *pcm;
- diva_os_spin_lock_magic_t OldIrql;
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_dpc");
- pcm = (struct pc_maint *)IoAdapter->pcm_data;
- switch (IoAdapter->pcm_pending) {
- case 1: /* ask card for XLOG */
- a->ram_out(a, &IoAdapter->pcm->rc, 0);
- a->ram_out(a, &IoAdapter->pcm->req, pcm->req);
- IoAdapter->pcm_pending = 2;
- break;
- case 2: /* Try to get XLOG from the card */
- if ((int)(a->ram_in(a, &IoAdapter->pcm->rc))) {
- a->ram_in_buffer(a, IoAdapter->pcm, pcm, sizeof(*pcm));
- IoAdapter->pcm_pending = 3;
- }
- break;
- case 3: /* let XDI recovery XLOG */
- break;
- }
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_dpc");
- }
- /* ---------------------------------------------------------------- */
- }
-}
-/* --------------------------------------------------------------------------
- XLOG interface
- -------------------------------------------------------------------------- */
-static void
-pcm_req(PISDN_ADAPTER IoAdapter, ENTITY *e)
-{
- diva_os_spin_lock_magic_t OldIrql;
- int i, rc;
- ADAPTER *a = &IoAdapter->a;
- struct pc_maint *pcm = (struct pc_maint *)&e->Ind;
-/*
- * special handling of I/O based card interface
- * the memory access isn't an atomic operation !
- */
- if (IoAdapter->Properties.Card == CARD_MAE)
- {
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_1");
- IoAdapter->pcm_data = (void *)pcm;
- IoAdapter->pcm_pending = 1;
- diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_1");
- for (rc = 0, i = (IoAdapter->trapped ? 3000 : 250); !rc && (i > 0); --i)
- {
- diva_os_sleep(1);
- if (IoAdapter->pcm_pending == 3) {
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_3");
- IoAdapter->pcm_pending = 0;
- IoAdapter->pcm_data = NULL;
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_3");
- return;
- }
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_2");
- diva_os_schedule_soft_isr(&IoAdapter->req_soft_isr);
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_2");
- }
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_4");
- IoAdapter->pcm_pending = 0;
- IoAdapter->pcm_data = NULL;
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
- &OldIrql,
- "data_pcm_4");
- goto Trapped;
- }
-/*
- * memory based shared ram is accessible from different
- * processors without disturbing concurrent processes.
- */
- a->ram_out(a, &IoAdapter->pcm->rc, 0);
- a->ram_out(a, &IoAdapter->pcm->req, pcm->req);
- for (i = (IoAdapter->trapped ? 3000 : 250); --i > 0;)
- {
- diva_os_sleep(1);
- rc = (int)(a->ram_in(a, &IoAdapter->pcm->rc));
- if (rc)
- {
- a->ram_in_buffer(a, IoAdapter->pcm, pcm, sizeof(*pcm));
- return;
- }
- }
-Trapped:
- if (IoAdapter->trapFnc)
- {
- int trapped = IoAdapter->trapped;
- IoAdapter->trapFnc(IoAdapter);
- /*
- Firs trap, also notify user if supported
- */
- if (!trapped && IoAdapter->trapped && IoAdapter->os_trap_nfy_Fnc) {
- (*(IoAdapter->os_trap_nfy_Fnc))(IoAdapter, IoAdapter->ANum);
- }
- }
-}
-/*------------------------------------------------------------------*/
-/* ram access functions for memory mapped cards */
-/*------------------------------------------------------------------*/
-byte mem_in(ADAPTER *a, void *addr)
-{
- byte val;
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- val = READ_BYTE(Base + (unsigned long)addr);
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
- return (val);
-}
-word mem_inw(ADAPTER *a, void *addr)
-{
- word val;
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- val = READ_WORD((Base + (unsigned long)addr));
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
- return (val);
-}
-void mem_in_dw(ADAPTER *a, void *addr, dword *data, int dwords)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- while (dwords--) {
- *data++ = READ_DWORD((Base + (unsigned long)addr));
- addr += 4;
- }
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_in_buffer(ADAPTER *a, void *addr, void *buffer, word length)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- memcpy_fromio(buffer, (Base + (unsigned long)addr), length);
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
-{
- PISDN_ADAPTER IoAdapter = (PISDN_ADAPTER)a->io;
- IoAdapter->RBuffer.length = mem_inw(a, &RBuffer->length);
- mem_in_buffer(a, RBuffer->P, IoAdapter->RBuffer.P,
- IoAdapter->RBuffer.length);
- e->RBuffer = (DBUFFER *)&IoAdapter->RBuffer;
-}
-void mem_out(ADAPTER *a, void *addr, byte data)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- WRITE_BYTE(Base + (unsigned long)addr, data);
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_outw(ADAPTER *a, void *addr, word data)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- WRITE_WORD((Base + (unsigned long)addr), data);
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_out_dw(ADAPTER *a, void *addr, const dword *data, int dwords)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- while (dwords--) {
- WRITE_DWORD((Base + (unsigned long)addr), *data);
- addr += 4;
- data++;
- }
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_out_buffer(ADAPTER *a, void *addr, void *buffer, word length)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- memcpy_toio((Base + (unsigned long)addr), buffer, length);
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-void mem_inc(ADAPTER *a, void *addr)
-{
- volatile byte __iomem *Base = DIVA_OS_MEM_ATTACH_RAM((PISDN_ADAPTER)a->io);
- byte x = READ_BYTE(Base + (unsigned long)addr);
- WRITE_BYTE(Base + (unsigned long)addr, x + 1);
- DIVA_OS_MEM_DETACH_RAM((PISDN_ADAPTER)a->io, Base);
-}
-/*------------------------------------------------------------------*/
-/* ram access functions for io-mapped cards */
-/*------------------------------------------------------------------*/
-byte io_in(ADAPTER *a, void *adr)
-{
- byte val;
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- outppw(Port + 4, (word)(unsigned long)adr);
- val = inpp(Port);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
- return (val);
-}
-word io_inw(ADAPTER *a, void *adr)
-{
- word val;
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- outppw(Port + 4, (word)(unsigned long)adr);
- val = inppw(Port);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
- return (val);
-}
-void io_in_buffer(ADAPTER *a, void *adr, void *buffer, word len)
-{
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- byte *P = (byte *)buffer;
- if ((long)adr & 1) {
- outppw(Port + 4, (word)(unsigned long)adr);
- *P = inpp(Port);
- P++;
- adr = ((byte *) adr) + 1;
- len--;
- if (!len) {
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
- return;
- }
- }
- outppw(Port + 4, (word)(unsigned long)adr);
- inppw_buffer(Port, P, len + 1);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e)
-{
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- outppw(Port + 4, (word)(unsigned long)RBuffer);
- ((PISDN_ADAPTER)a->io)->RBuffer.length = inppw(Port);
- inppw_buffer(Port, ((PISDN_ADAPTER)a->io)->RBuffer.P, ((PISDN_ADAPTER)a->io)->RBuffer.length + 1);
- e->RBuffer = (DBUFFER *) &(((PISDN_ADAPTER)a->io)->RBuffer);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_out(ADAPTER *a, void *adr, byte data)
-{
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- outppw(Port + 4, (word)(unsigned long)adr);
- outpp(Port, data);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_outw(ADAPTER *a, void *adr, word data)
-{
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- outppw(Port + 4, (word)(unsigned long)adr);
- outppw(Port, data);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_out_buffer(ADAPTER *a, void *adr, void *buffer, word len)
-{
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- byte *P = (byte *)buffer;
- if ((long)adr & 1) {
- outppw(Port + 4, (word)(unsigned long)adr);
- outpp(Port, *P);
- P++;
- adr = ((byte *) adr) + 1;
- len--;
- if (!len) {
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
- return;
- }
- }
- outppw(Port + 4, (word)(unsigned long)adr);
- outppw_buffer(Port, P, len + 1);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-void io_inc(ADAPTER *a, void *adr)
-{
- byte x;
- byte __iomem *Port = DIVA_OS_MEM_ATTACH_PORT((PISDN_ADAPTER)a->io);
- outppw(Port + 4, (word)(unsigned long)adr);
- x = inpp(Port);
- outppw(Port + 4, (word)(unsigned long)adr);
- outpp(Port, x + 1);
- DIVA_OS_MEM_DETACH_PORT((PISDN_ADAPTER)a->io, Port);
-}
-/*------------------------------------------------------------------*/
-/* OS specific functions related to queuing of entities */
-/*------------------------------------------------------------------*/
-void free_entity(ADAPTER *a, byte e_no)
-{
- PISDN_ADAPTER IoAdapter;
- diva_os_spin_lock_magic_t irql;
- IoAdapter = (PISDN_ADAPTER) a->io;
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_free");
- IoAdapter->e_tbl[e_no].e = NULL;
- IoAdapter->e_count--;
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_free");
-}
-void assign_queue(ADAPTER *a, byte e_no, word ref)
-{
- PISDN_ADAPTER IoAdapter;
- diva_os_spin_lock_magic_t irql;
- IoAdapter = (PISDN_ADAPTER) a->io;
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_assign");
- IoAdapter->e_tbl[e_no].assign_ref = ref;
- IoAdapter->e_tbl[e_no].next = (byte)IoAdapter->assign;
- IoAdapter->assign = e_no;
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_assign");
-}
-byte get_assign(ADAPTER *a, word ref)
-{
- PISDN_ADAPTER IoAdapter;
- diva_os_spin_lock_magic_t irql;
- byte e_no;
- IoAdapter = (PISDN_ADAPTER) a->io;
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock,
- &irql,
- "data_assign_get");
- for (e_no = (byte)IoAdapter->assign;
- e_no && IoAdapter->e_tbl[e_no].assign_ref != ref;
- e_no = IoAdapter->e_tbl[e_no].next);
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock,
- &irql,
- "data_assign_get");
- return e_no;
-}
-void req_queue(ADAPTER *a, byte e_no)
-{
- PISDN_ADAPTER IoAdapter;
- diva_os_spin_lock_magic_t irql;
- IoAdapter = (PISDN_ADAPTER) a->io;
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_q");
- IoAdapter->e_tbl[e_no].next = 0;
- if (IoAdapter->head) {
- IoAdapter->e_tbl[IoAdapter->tail].next = e_no;
- IoAdapter->tail = e_no;
- }
- else {
- IoAdapter->head = e_no;
- IoAdapter->tail = e_no;
- }
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_q");
-}
-byte look_req(ADAPTER *a)
-{
- PISDN_ADAPTER IoAdapter;
- IoAdapter = (PISDN_ADAPTER) a->io;
- return ((byte)IoAdapter->head);
-}
-void next_req(ADAPTER *a)
-{
- PISDN_ADAPTER IoAdapter;
- diva_os_spin_lock_magic_t irql;
- IoAdapter = (PISDN_ADAPTER) a->io;
- diva_os_enter_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_next");
- IoAdapter->head = IoAdapter->e_tbl[IoAdapter->head].next;
- if (!IoAdapter->head) IoAdapter->tail = 0;
- diva_os_leave_spin_lock(&IoAdapter->data_spin_lock, &irql, "data_req_next");
-}
-/*------------------------------------------------------------------*/
-/* memory map functions */
-/*------------------------------------------------------------------*/
-ENTITY *entity_ptr(ADAPTER *a, byte e_no)
-{
- PISDN_ADAPTER IoAdapter;
- IoAdapter = (PISDN_ADAPTER)a->io;
- return (IoAdapter->e_tbl[e_no].e);
-}
-void *PTR_X(ADAPTER *a, ENTITY *e)
-{
- return ((void *) e->X);
-}
-void *PTR_R(ADAPTER *a, ENTITY *e)
-{
- return ((void *) e->R);
-}
-void *PTR_P(ADAPTER *a, ENTITY *e, void *P)
-{
- return P;
-}
-void CALLBACK(ADAPTER *a, ENTITY *e)
-{
- if (e && e->callback)
- e->callback(e);
-}
diff --git a/drivers/isdn/hardware/eicon/io.h b/drivers/isdn/hardware/eicon/io.h
deleted file mode 100644
index 01deced18ab8..000000000000
--- a/drivers/isdn/hardware/eicon/io.h
+++ /dev/null
@@ -1,308 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_COMMON_IO_H_INC__ /* { */
-#define __DIVA_XDI_COMMON_IO_H_INC__
-/*
- maximum = 16 adapters
-*/
-#define DI_MAX_LINKS MAX_ADAPTER
-#define ISDN_MAX_NUM_LEN 60
-/* --------------------------------------------------------------------------
- structure for quadro card management (obsolete for
- systems that do provide per card load event)
- -------------------------------------------------------------------------- */
-typedef struct {
- dword Num;
- DEVICE_NAME DeviceName[4];
- PISDN_ADAPTER QuadroAdapter[4];
-} ADAPTER_LIST_ENTRY, *PADAPTER_LIST_ENTRY;
-/* --------------------------------------------------------------------------
- Special OS memory support structures
- -------------------------------------------------------------------------- */
-#define MAX_MAPPED_ENTRIES 8
-typedef struct {
- void *Address;
- dword Length;
-} ADAPTER_MEMORY;
-/* --------------------------------------------------------------------------
- Configuration of XDI clients carried by XDI
- -------------------------------------------------------------------------- */
-#define DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON 0x01
-#define DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON 0x02
-typedef struct _diva_xdi_capi_cfg {
- byte cfg_1;
-} diva_xdi_capi_cfg_t;
-/* --------------------------------------------------------------------------
- Main data structure kept per adapter
- -------------------------------------------------------------------------- */
-struct _ISDN_ADAPTER {
- void (*DIRequest)(PISDN_ADAPTER, ENTITY *);
- int State; /* from NT4 1.srv, a good idea, but a poor achievement */
- int Initialized;
- int RegisteredWithDidd;
- int Unavailable; /* callback function possible? */
- int ResourcesClaimed;
- int PnpBiosConfigUsed;
- dword Logging;
- dword features;
- char ProtocolIdString[80];
- /*
- remember mapped memory areas
- */
- ADAPTER_MEMORY MappedMemory[MAX_MAPPED_ENTRIES];
- CARD_PROPERTIES Properties;
- dword cardType;
- dword protocol_id; /* configured protocol identifier */
- char protocol_name[8]; /* readable name of protocol */
- dword BusType;
- dword BusNumber;
- dword slotNumber;
- dword slotId;
- dword ControllerNumber; /* for QUADRO cards only */
- PISDN_ADAPTER MultiMaster; /* for 4-BRI card only - use MultiMaster or QuadroList */
- PADAPTER_LIST_ENTRY QuadroList; /* for QUADRO card only */
- PDEVICE_OBJECT DeviceObject;
- dword DeviceId;
- diva_os_adapter_irq_info_t irq_info;
- dword volatile IrqCount;
- int trapped;
- dword DspCodeBaseAddr;
- dword MaxDspCodeSize;
- dword downloadAddr;
- dword DspCodeBaseAddrTable[4]; /* add. for MultiMaster */
- dword MaxDspCodeSizeTable[4]; /* add. for MultiMaster */
- dword downloadAddrTable[4]; /* add. for MultiMaster */
- dword MemoryBase;
- dword MemorySize;
- byte __iomem *Address;
- byte __iomem *Config;
- byte __iomem *Control;
- byte __iomem *reset;
- byte __iomem *port;
- byte __iomem *ram;
- byte __iomem *cfg;
- byte __iomem *prom;
- byte __iomem *ctlReg;
- struct pc_maint *pcm;
- diva_os_dependent_devica_name_t os_name;
- byte Name[32];
- dword serialNo;
- dword ANum;
- dword ArchiveType; /* ARCHIVE_TYPE_NONE ..._SINGLE ..._USGEN ..._MULTI */
- char *ProtocolSuffix; /* internal protocolfile table */
- char Archive[32];
- char Protocol[32];
- char AddDownload[32]; /* Dsp- or other additional download files */
- char Oad1[ISDN_MAX_NUM_LEN];
- char Osa1[ISDN_MAX_NUM_LEN];
- char Oad2[ISDN_MAX_NUM_LEN];
- char Osa2[ISDN_MAX_NUM_LEN];
- char Spid1[ISDN_MAX_NUM_LEN];
- char Spid2[ISDN_MAX_NUM_LEN];
- byte nosig;
- byte BriLayer2LinkCount; /* amount of TEI's that adapter will support in P2MP mode */
- dword Channels;
- dword tei;
- dword nt2;
- dword TerminalCount;
- dword WatchDog;
- dword Permanent;
- dword BChMask; /* B channel mask for unchannelized modes */
- dword StableL2;
- dword DidLen;
- dword NoOrderCheck;
- dword ForceLaw; /* VoiceCoding - default:0, a-law: 1, my-law: 2 */
- dword SigFlags;
- dword LowChannel;
- dword NoHscx30;
- dword ProtVersion;
- dword crc4;
- dword L1TristateOrQsig; /* enable Layer 1 Tristate (bit 2)Or Qsig params (bit 0,1)*/
- dword InitialDspInfo;
- dword ModemGuardTone;
- dword ModemMinSpeed;
- dword ModemMaxSpeed;
- dword ModemOptions;
- dword ModemOptions2;
- dword ModemNegotiationMode;
- dword ModemModulationsMask;
- dword ModemTransmitLevel;
- dword FaxOptions;
- dword FaxMaxSpeed;
- dword Part68LevelLimiter;
- dword UsEktsNumCallApp;
- byte UsEktsFeatAddConf;
- byte UsEktsFeatRemoveConf;
- byte UsEktsFeatCallTransfer;
- byte UsEktsFeatMsgWaiting;
- byte QsigDialect;
- byte ForceVoiceMailAlert;
- byte DisableAutoSpid;
- byte ModemCarrierWaitTimeSec;
- byte ModemCarrierLossWaitTimeTenthSec;
- byte PiafsLinkTurnaroundInFrames;
- byte DiscAfterProgress;
- byte AniDniLimiter[3];
- byte TxAttenuation; /* PRI/E1 only: attenuate TX signal */
- word QsigFeatures;
- dword GenerateRingtone;
- dword SupplementaryServicesFeatures;
- dword R2Dialect;
- dword R2CasOptions;
- dword FaxV34Options;
- dword DisabledDspMask;
- dword AdapterTestMask;
- dword DspImageLength;
- word AlertToIn20mSecTicks;
- word ModemEyeSetup;
- byte R2CtryLength;
- byte CCBSRelTimer;
- byte *PcCfgBufferFile;/* flexible parameter via file */
- byte *PcCfgBuffer; /* flexible parameter via multistring */
- diva_os_dump_file_t dump_file; /* dump memory to file at lowest irq level */
- diva_os_board_trace_t board_trace; /* traces from the board */
- diva_os_spin_lock_t isr_spin_lock;
- diva_os_spin_lock_t data_spin_lock;
- diva_os_soft_isr_t req_soft_isr;
- diva_os_soft_isr_t isr_soft_isr;
- diva_os_atomic_t in_dpc;
- PBUFFER RBuffer; /* Copy of receive lookahead buffer */
- word e_max;
- word e_count;
- E_INFO *e_tbl;
- word assign; /* list of pending ASSIGNs */
- word head; /* head of request queue */
- word tail; /* tail of request queue */
- ADAPTER a; /* not a separate structure */
- void (*out)(ADAPTER *a);
- byte (*dpc)(ADAPTER *a);
- byte (*tst_irq)(ADAPTER *a);
- void (*clr_irq)(ADAPTER *a);
- int (*load)(PISDN_ADAPTER);
- int (*mapmem)(PISDN_ADAPTER);
- int (*chkIrq)(PISDN_ADAPTER);
- void (*disIrq)(PISDN_ADAPTER);
- void (*start)(PISDN_ADAPTER);
- void (*stop)(PISDN_ADAPTER);
- void (*rstFnc)(PISDN_ADAPTER);
- void (*trapFnc)(PISDN_ADAPTER);
- dword (*DetectDsps)(PISDN_ADAPTER);
- void (*os_trap_nfy_Fnc)(PISDN_ADAPTER, dword);
- diva_os_isr_callback_t diva_isr_handler;
- dword sdram_bar; /* must be 32 bit */
- dword fpga_features;
- volatile int pcm_pending;
- volatile void *pcm_data;
- diva_xdi_capi_cfg_t capi_cfg;
- dword tasks;
- void *dma_map;
- int (*DivaAdapterTestProc)(PISDN_ADAPTER);
- void *AdapterTestMemoryStart;
- dword AdapterTestMemoryLength;
- const byte *cfg_lib_memory_init;
- dword cfg_lib_memory_init_length;
-};
-/* ---------------------------------------------------------------------
- Entity table
- --------------------------------------------------------------------- */
-struct e_info_s {
- ENTITY *e;
- byte next; /* chaining index */
- word assign_ref; /* assign reference */
-};
-/* ---------------------------------------------------------------------
- S-cards shared ram structure for loading
- --------------------------------------------------------------------- */
-struct s_load {
- byte ctrl;
- byte card;
- byte msize;
- byte fill0;
- word ebit;
- word elocl;
- word eloch;
- byte reserved[20];
- word signature;
- byte fill[224];
- byte b[256];
-};
-#define PR_RAM ((struct pr_ram *)0)
-#define RAM ((struct dual *)0)
-/* ---------------------------------------------------------------------
- platform specific conversions
- --------------------------------------------------------------------- */
-extern void *PTR_P(ADAPTER *a, ENTITY *e, void *P);
-extern void *PTR_X(ADAPTER *a, ENTITY *e);
-extern void *PTR_R(ADAPTER *a, ENTITY *e);
-extern void CALLBACK(ADAPTER *a, ENTITY *e);
-extern void set_ram(void **adr_ptr);
-/* ---------------------------------------------------------------------
- ram access functions for io mapped cards
- --------------------------------------------------------------------- */
-byte io_in(ADAPTER *a, void *adr);
-word io_inw(ADAPTER *a, void *adr);
-void io_in_buffer(ADAPTER *a, void *adr, void *P, word length);
-void io_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e);
-void io_out(ADAPTER *a, void *adr, byte data);
-void io_outw(ADAPTER *a, void *adr, word data);
-void io_out_buffer(ADAPTER *a, void *adr, void *P, word length);
-void io_inc(ADAPTER *a, void *adr);
-void bri_in_buffer(PISDN_ADAPTER IoAdapter, dword Pos,
- void *Buf, dword Len);
-int bri_out_buffer(PISDN_ADAPTER IoAdapter, dword Pos,
- void *Buf, dword Len, int Verify);
-/* ---------------------------------------------------------------------
- ram access functions for memory mapped cards
- --------------------------------------------------------------------- */
-byte mem_in(ADAPTER *a, void *adr);
-word mem_inw(ADAPTER *a, void *adr);
-void mem_in_buffer(ADAPTER *a, void *adr, void *P, word length);
-void mem_look_ahead(ADAPTER *a, PBUFFER *RBuffer, ENTITY *e);
-void mem_out(ADAPTER *a, void *adr, byte data);
-void mem_outw(ADAPTER *a, void *adr, word data);
-void mem_out_buffer(ADAPTER *a, void *adr, void *P, word length);
-void mem_inc(ADAPTER *a, void *adr);
-void mem_in_dw(ADAPTER *a, void *addr, dword *data, int dwords);
-void mem_out_dw(ADAPTER *a, void *addr, const dword *data, int dwords);
-/* ---------------------------------------------------------------------
- functions exported by io.c
- --------------------------------------------------------------------- */
-extern IDI_CALL Requests[MAX_ADAPTER];
-extern void DIDpcRoutine(struct _diva_os_soft_isr *psoft_isr,
- void *context);
-extern void request(PISDN_ADAPTER, ENTITY *);
-/* ---------------------------------------------------------------------
- trapFn helpers, used to recover debug trace from dead card
- --------------------------------------------------------------------- */
-typedef struct {
- word *buf;
- word cnt;
- word out;
-} Xdesc;
-extern void dump_trap_frame(PISDN_ADAPTER IoAdapter, byte __iomem *exception);
-extern void dump_xlog_buffer(PISDN_ADAPTER IoAdapter, Xdesc *xlogDesc);
-/* --------------------------------------------------------------------- */
-#endif /* } __DIVA_XDI_COMMON_IO_H_INC__ */
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
deleted file mode 100644
index 045bda5c839f..000000000000
--- a/drivers/isdn/hardware/eicon/istream.c
+++ /dev/null
@@ -1,226 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#if defined(DIVA_ISTREAM) /* { */
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "di.h"
-#if !defined USE_EXTENDED_DEBUGS
-#include "dimaint.h"
-#else
-#define dprintf
-#endif
-#include "dfifo.h"
-int diva_istream_write(void *context,
- int Id,
- void *data,
- int length,
- int final,
- byte usr1,
- byte usr2);
-int diva_istream_read(void *context,
- int Id,
- void *data,
- int max_length,
- int *final,
- byte *usr1,
- byte *usr2);
-/* -------------------------------------------------------------------
- Does provide iStream interface to the client
- ------------------------------------------------------------------- */
-void diva_xdi_provide_istream_info(ADAPTER *a,
- diva_xdi_stream_interface_t *pi) {
- pi->provided_service = 0;
-}
-/* ------------------------------------------------------------------
- Does write the data from caller's buffer to the card's
- stream interface.
- If synchronous service was requested, then function
- does return amount of data written to stream.
- 'final' does indicate that piece of data to be written is
- final part of frame (necessary only by structured datatransfer)
- return 0 if zero lengh packet was written
- return -1 if stream is full
- ------------------------------------------------------------------ */
-int diva_istream_write(void *context,
- int Id,
- void *data,
- int length,
- int final,
- byte usr1,
- byte usr2) {
- ADAPTER *a = (ADAPTER *)context;
- int written = 0, to_write = -1;
- char tmp[4];
- byte *data_ptr = (byte *)data;
- for (;;) {
- a->ram_in_dw(a,
-#ifdef PLATFORM_GT_32BIT
- ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]),
-#else
- (void *)(a->tx_stream[Id] + a->tx_pos[Id]),
-#endif
- (dword *)&tmp[0],
- 1);
- if (tmp[0] & DIVA_DFIFO_READY) { /* No free blocks more */
- if (to_write < 0)
- return (-1); /* was not able to write */
- break; /* only part of message was written */
- }
- to_write = min(length, DIVA_DFIFO_DATA_SZ);
- if (to_write) {
- a->ram_out_buffer(a,
-#ifdef PLATFORM_GT_32BIT
- ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id] + 4),
-#else
- (void *)(a->tx_stream[Id] + a->tx_pos[Id] + 4),
-#endif
- data_ptr,
- (word)to_write);
- length -= to_write;
- written += to_write;
- data_ptr += to_write;
- }
- tmp[1] = (char)to_write;
- tmp[0] = (tmp[0] & DIVA_DFIFO_WRAP) |
- DIVA_DFIFO_READY |
- ((!length && final) ? DIVA_DFIFO_LAST : 0);
- if (tmp[0] & DIVA_DFIFO_LAST) {
- tmp[2] = usr1;
- tmp[3] = usr2;
- }
- a->ram_out_dw(a,
-#ifdef PLATFORM_GT_32BIT
- ULongToPtr(a->tx_stream[Id] + a->tx_pos[Id]),
-#else
- (void *)(a->tx_stream[Id] + a->tx_pos[Id]),
-#endif
- (dword *)&tmp[0],
- 1);
- if (tmp[0] & DIVA_DFIFO_WRAP) {
- a->tx_pos[Id] = 0;
- } else {
- a->tx_pos[Id] += DIVA_DFIFO_STEP;
- }
- if (!length) {
- break;
- }
- }
- return (written);
-}
-/* -------------------------------------------------------------------
- In case of SYNCRONOUS service:
- Does write data from stream in caller's buffer.
- Does return amount of data written to buffer
- Final flag is set on return if last part of structured frame
- was received
- return 0 if zero packet was received
- return -1 if stream is empty
- return -2 if read buffer does not profide sufficient space
- to accommodate entire segment
- max_length should be at least 68 bytes
- ------------------------------------------------------------------- */
-int diva_istream_read(void *context,
- int Id,
- void *data,
- int max_length,
- int *final,
- byte *usr1,
- byte *usr2) {
- ADAPTER *a = (ADAPTER *)context;
- int read = 0, to_read = -1;
- char tmp[4];
- byte *data_ptr = (byte *)data;
- *final = 0;
- for (;;) {
- a->ram_in_dw(a,
-#ifdef PLATFORM_GT_32BIT
- ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]),
-#else
- (void *)(a->rx_stream[Id] + a->rx_pos[Id]),
-#endif
- (dword *)&tmp[0],
- 1);
- if (tmp[1] > max_length) {
- if (to_read < 0)
- return (-2); /* was not able to read */
- break;
- }
- if (!(tmp[0] & DIVA_DFIFO_READY)) {
- if (to_read < 0)
- return (-1); /* was not able to read */
- break;
- }
- to_read = min(max_length, (int)tmp[1]);
- if (to_read) {
- a->ram_in_buffer(a,
-#ifdef PLATFORM_GT_32BIT
- ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id] + 4),
-#else
- (void *)(a->rx_stream[Id] + a->rx_pos[Id] + 4),
-#endif
- data_ptr,
- (word)to_read);
- max_length -= to_read;
- read += to_read;
- data_ptr += to_read;
- }
- if (tmp[0] & DIVA_DFIFO_LAST) {
- *final = 1;
- }
- tmp[0] &= DIVA_DFIFO_WRAP;
- a->ram_out_dw(a,
-#ifdef PLATFORM_GT_32BIT
- ULongToPtr(a->rx_stream[Id] + a->rx_pos[Id]),
-#else
- (void *)(a->rx_stream[Id] + a->rx_pos[Id]),
-#endif
- (dword *)&tmp[0],
- 1);
- if (tmp[0] & DIVA_DFIFO_WRAP) {
- a->rx_pos[Id] = 0;
- } else {
- a->rx_pos[Id] += DIVA_DFIFO_STEP;
- }
- if (*final) {
- if (usr1)
- *usr1 = tmp[2];
- if (usr2)
- *usr2 = tmp[3];
- break;
- }
- }
- return (read);
-}
-/* ---------------------------------------------------------------------
- Does check if one of streams had caused interrupt and does
- wake up corresponding application
- --------------------------------------------------------------------- */
-void pr_stream(ADAPTER *a) {
-}
-#endif /* } */
diff --git a/drivers/isdn/hardware/eicon/kst_ifc.h b/drivers/isdn/hardware/eicon/kst_ifc.h
deleted file mode 100644
index 894fdfda1090..000000000000
--- a/drivers/isdn/hardware/eicon/kst_ifc.h
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2000.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_EICON_TRACE_API__
-#define __DIVA_EICON_TRACE_API__
-
-#define DIVA_TRACE_LINE_TYPE_LEN 64
-#define DIVA_TRACE_IE_LEN 64
-#define DIVA_TRACE_FAX_PRMS_LEN 128
-
-typedef struct _diva_trace_ie {
- byte length;
- byte data[DIVA_TRACE_IE_LEN];
-} diva_trace_ie_t;
-
-/*
- Structure used to represent "State\\BX\\Modem" directory
- to user.
-*/
-typedef struct _diva_trace_modem_state {
- dword ChannelNumber;
-
- dword Event;
-
- dword Norm;
-
- dword Options; /* Options received from Application */
-
- dword TxSpeed;
- dword RxSpeed;
-
- dword RoundtripMsec;
-
- dword SymbolRate;
-
- int RxLeveldBm;
- int EchoLeveldBm;
-
- dword SNRdb;
- dword MAE;
-
- dword LocalRetrains;
- dword RemoteRetrains;
- dword LocalResyncs;
- dword RemoteResyncs;
-
- dword DiscReason;
-
-} diva_trace_modem_state_t;
-
-/*
- Representation of "State\\BX\\FAX" directory
-*/
-typedef struct _diva_trace_fax_state {
- dword ChannelNumber;
- dword Event;
- dword Page_Counter;
- dword Features;
- char Station_ID[DIVA_TRACE_FAX_PRMS_LEN];
- char Subaddress[DIVA_TRACE_FAX_PRMS_LEN];
- char Password[DIVA_TRACE_FAX_PRMS_LEN];
- dword Speed;
- dword Resolution;
- dword Paper_Width;
- dword Paper_Length;
- dword Scanline_Time;
- dword Disc_Reason;
- dword dummy;
-} diva_trace_fax_state_t;
-
-/*
- Structure used to represent Interface State in the abstract
- and interface/D-channel protocol independent form.
-*/
-typedef struct _diva_trace_interface_state {
- char Layer1[DIVA_TRACE_LINE_TYPE_LEN];
- char Layer2[DIVA_TRACE_LINE_TYPE_LEN];
-} diva_trace_interface_state_t;
-
-typedef struct _diva_incoming_call_statistics {
- dword Calls;
- dword Connected;
- dword User_Busy;
- dword Call_Rejected;
- dword Wrong_Number;
- dword Incompatible_Dst;
- dword Out_of_Order;
- dword Ignored;
-} diva_incoming_call_statistics_t;
-
-typedef struct _diva_outgoing_call_statistics {
- dword Calls;
- dword Connected;
- dword User_Busy;
- dword No_Answer;
- dword Wrong_Number;
- dword Call_Rejected;
- dword Other_Failures;
-} diva_outgoing_call_statistics_t;
-
-typedef struct _diva_modem_call_statistics {
- dword Disc_Normal;
- dword Disc_Unspecified;
- dword Disc_Busy_Tone;
- dword Disc_Congestion;
- dword Disc_Carr_Wait;
- dword Disc_Trn_Timeout;
- dword Disc_Incompat;
- dword Disc_Frame_Rej;
- dword Disc_V42bis;
-} diva_modem_call_statistics_t;
-
-typedef struct _diva_fax_call_statistics {
- dword Disc_Normal;
- dword Disc_Not_Ident;
- dword Disc_No_Response;
- dword Disc_Retries;
- dword Disc_Unexp_Msg;
- dword Disc_No_Polling;
- dword Disc_Training;
- dword Disc_Unexpected;
- dword Disc_Application;
- dword Disc_Incompat;
- dword Disc_No_Command;
- dword Disc_Long_Msg;
- dword Disc_Supervisor;
- dword Disc_SUB_SEP_PWD;
- dword Disc_Invalid_Msg;
- dword Disc_Page_Coding;
- dword Disc_App_Timeout;
- dword Disc_Unspecified;
-} diva_fax_call_statistics_t;
-
-typedef struct _diva_prot_statistics {
- dword X_Frames;
- dword X_Bytes;
- dword X_Errors;
- dword R_Frames;
- dword R_Bytes;
- dword R_Errors;
-} diva_prot_statistics_t;
-
-typedef struct _diva_ifc_statistics {
- diva_incoming_call_statistics_t inc;
- diva_outgoing_call_statistics_t outg;
- diva_modem_call_statistics_t mdm;
- diva_fax_call_statistics_t fax;
- diva_prot_statistics_t b1;
- diva_prot_statistics_t b2;
- diva_prot_statistics_t d1;
- diva_prot_statistics_t d2;
-} diva_ifc_statistics_t;
-
-/*
- Structure used to represent "State\\BX" directory
- to user.
-*/
-typedef struct _diva_trace_line_state {
- dword ChannelNumber;
-
- char Line[DIVA_TRACE_LINE_TYPE_LEN];
-
- char Framing[DIVA_TRACE_LINE_TYPE_LEN];
-
- char Layer2[DIVA_TRACE_LINE_TYPE_LEN];
- char Layer3[DIVA_TRACE_LINE_TYPE_LEN];
-
- char RemoteAddress[DIVA_TRACE_LINE_TYPE_LEN];
- char RemoteSubAddress[DIVA_TRACE_LINE_TYPE_LEN];
-
- char LocalAddress[DIVA_TRACE_LINE_TYPE_LEN];
- char LocalSubAddress[DIVA_TRACE_LINE_TYPE_LEN];
-
- diva_trace_ie_t call_BC;
- diva_trace_ie_t call_HLC;
- diva_trace_ie_t call_LLC;
-
- dword Charges;
-
- dword CallReference;
-
- dword LastDisconnecCause;
-
- char UserID[DIVA_TRACE_LINE_TYPE_LEN];
-
- diva_trace_modem_state_t modem;
- diva_trace_fax_state_t fax;
-
- diva_trace_interface_state_t *pInterface;
-
- diva_ifc_statistics_t *pInterfaceStat;
-
-} diva_trace_line_state_t;
-
-#define DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE ('l')
-#define DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE ('m')
-#define DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE ('f')
-#define DIVA_SUPER_TRACE_INTERFACE_CHANGE ('i')
-#define DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE ('s')
-#define DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE ('M')
-#define DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE ('F')
-
-struct _diva_strace_library_interface;
-typedef void (*diva_trace_channel_state_change_proc_t)(void *user_context,
- struct _diva_strace_library_interface *hLib,
- int Adapter,
- diva_trace_line_state_t *channel, int notify_subject);
-typedef void (*diva_trace_channel_trace_proc_t)(void *user_context,
- struct _diva_strace_library_interface *hLib,
- int Adapter, void *xlog_buffer, int length);
-typedef void (*diva_trace_error_proc_t)(void *user_context,
- struct _diva_strace_library_interface *hLib,
- int Adapter,
- int error, const char *file, int line);
-
-/*
- This structure creates interface from user to library
-*/
-typedef struct _diva_trace_library_user_interface {
- void *user_context;
- diva_trace_channel_state_change_proc_t notify_proc;
- diva_trace_channel_trace_proc_t trace_proc;
- diva_trace_error_proc_t error_notify_proc;
-} diva_trace_library_user_interface_t;
-
-/*
- Interface from Library to User
-*/
-typedef int (*DivaSTraceLibraryStart_proc_t)(void *hLib);
-typedef int (*DivaSTraceLibraryFinit_proc_t)(void *hLib);
-typedef int (*DivaSTraceMessageInput_proc_t)(void *hLib);
-typedef void* (*DivaSTraceGetHandle_proc_t)(void *hLib);
-
-/*
- Turn Audio Tap trace on/off
- Channel should be in the range 1 ... Number of Channels
-*/
-typedef int (*DivaSTraceSetAudioTap_proc_t)(void *hLib, int Channel, int on);
-
-/*
- Turn B-channel trace on/off
- Channel should be in the range 1 ... Number of Channels
-*/
-typedef int (*DivaSTraceSetBChannel_proc_t)(void *hLib, int Channel, int on);
-
-/*
- Turn D-channel (Layer1/Layer2/Layer3) trace on/off
- Layer1 - All D-channel frames received/sent over the interface
- inclusive Layer 2 headers, Layer 2 frames and TEI management frames
- Layer2 - Events from LAPD protocol instance with SAPI of signalling protocol
- Layer3 - All D-channel frames addressed to assigned to the card TEI and
- SAPI of signalling protocol, and signalling protocol events.
-*/
-typedef int (*DivaSTraceSetDChannel_proc_t)(void *hLib, int on);
-
-/*
- Get overall card statistics
-*/
-typedef int (*DivaSTraceGetOutgoingCallStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetIncomingCallStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetModemStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetFaxStatistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetBLayer1Statistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetBLayer2Statistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetDLayer1Statistics_proc_t)(void *hLib);
-typedef int (*DivaSTraceGetDLayer2Statistics_proc_t)(void *hLib);
-
-/*
- Call control
-*/
-typedef int (*DivaSTraceClearCall_proc_t)(void *hLib, int Channel);
-
-typedef struct _diva_strace_library_interface {
- void *hLib;
- DivaSTraceLibraryStart_proc_t DivaSTraceLibraryStart;
- DivaSTraceLibraryStart_proc_t DivaSTraceLibraryStop;
- DivaSTraceLibraryFinit_proc_t DivaSTraceLibraryFinit;
- DivaSTraceMessageInput_proc_t DivaSTraceMessageInput;
- DivaSTraceGetHandle_proc_t DivaSTraceGetHandle;
- DivaSTraceSetAudioTap_proc_t DivaSTraceSetAudioTap;
- DivaSTraceSetBChannel_proc_t DivaSTraceSetBChannel;
- DivaSTraceSetDChannel_proc_t DivaSTraceSetDChannel;
- DivaSTraceSetDChannel_proc_t DivaSTraceSetInfo;
- DivaSTraceGetOutgoingCallStatistics_proc_t \
- DivaSTraceGetOutgoingCallStatistics;
- DivaSTraceGetIncomingCallStatistics_proc_t \
- DivaSTraceGetIncomingCallStatistics;
- DivaSTraceGetModemStatistics_proc_t \
- DivaSTraceGetModemStatistics;
- DivaSTraceGetFaxStatistics_proc_t \
- DivaSTraceGetFaxStatistics;
- DivaSTraceGetBLayer1Statistics_proc_t \
- DivaSTraceGetBLayer1Statistics;
- DivaSTraceGetBLayer2Statistics_proc_t \
- DivaSTraceGetBLayer2Statistics;
- DivaSTraceGetDLayer1Statistics_proc_t \
- DivaSTraceGetDLayer1Statistics;
- DivaSTraceGetDLayer2Statistics_proc_t \
- DivaSTraceGetDLayer2Statistics;
- DivaSTraceClearCall_proc_t DivaSTraceClearCall;
-} diva_strace_library_interface_t;
-
-/*
- Create and return Library interface
-*/
-diva_strace_library_interface_t *DivaSTraceLibraryCreateInstance(int Adapter,
- const diva_trace_library_user_interface_t *user_proc,
- byte *pmem);
-dword DivaSTraceGetMemotyRequirement(int channels);
-
-#define DIVA_MAX_ADAPTERS 64
-#define DIVA_MAX_LINES 32
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/maintidi.c b/drivers/isdn/hardware/eicon/maintidi.c
deleted file mode 100644
index 2ee789f95867..000000000000
--- a/drivers/isdn/hardware/eicon/maintidi.c
+++ /dev/null
@@ -1,2194 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2000.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "kst_ifc.h"
-#include "di_defs.h"
-#include "maintidi.h"
-#include "pc.h"
-#include "man_defs.h"
-
-
-extern void diva_mnt_internal_dprintf(dword drv_id, dword type, char *p, ...);
-
-#define MODEM_PARSE_ENTRIES 16 /* amount of variables of interest */
-#define FAX_PARSE_ENTRIES 12 /* amount of variables of interest */
-#define LINE_PARSE_ENTRIES 15 /* amount of variables of interest */
-#define STAT_PARSE_ENTRIES 70 /* amount of variables of interest */
-
-/*
- LOCAL FUNCTIONS
-*/
-static int DivaSTraceLibraryStart(void *hLib);
-static int DivaSTraceLibraryStop(void *hLib);
-static int SuperTraceLibraryFinit(void *hLib);
-static void *SuperTraceGetHandle(void *hLib);
-static int SuperTraceMessageInput(void *hLib);
-static int SuperTraceSetAudioTap(void *hLib, int Channel, int on);
-static int SuperTraceSetBChannel(void *hLib, int Channel, int on);
-static int SuperTraceSetDChannel(void *hLib, int on);
-static int SuperTraceSetInfo(void *hLib, int on);
-static int SuperTraceClearCall(void *hLib, int Channel);
-static int SuperTraceGetOutgoingCallStatistics(void *hLib);
-static int SuperTraceGetIncomingCallStatistics(void *hLib);
-static int SuperTraceGetModemStatistics(void *hLib);
-static int SuperTraceGetFaxStatistics(void *hLib);
-static int SuperTraceGetBLayer1Statistics(void *hLib);
-static int SuperTraceGetBLayer2Statistics(void *hLib);
-static int SuperTraceGetDLayer1Statistics(void *hLib);
-static int SuperTraceGetDLayer2Statistics(void *hLib);
-
-/*
- LOCAL FUNCTIONS
-*/
-static int ScheduleNextTraceRequest(diva_strace_context_t *pLib);
-static int process_idi_event(diva_strace_context_t *pLib,
- diva_man_var_header_t *pVar);
-static int process_idi_info(diva_strace_context_t *pLib,
- diva_man_var_header_t *pVar);
-static int diva_modem_event(diva_strace_context_t *pLib, int Channel);
-static int diva_fax_event(diva_strace_context_t *pLib, int Channel);
-static int diva_line_event(diva_strace_context_t *pLib, int Channel);
-static int diva_modem_info(diva_strace_context_t *pLib,
- int Channel,
- diva_man_var_header_t *pVar);
-static int diva_fax_info(diva_strace_context_t *pLib,
- int Channel,
- diva_man_var_header_t *pVar);
-static int diva_line_info(diva_strace_context_t *pLib,
- int Channel,
- diva_man_var_header_t *pVar);
-static int diva_ifc_statistics(diva_strace_context_t *pLib,
- diva_man_var_header_t *pVar);
-static diva_man_var_header_t *get_next_var(diva_man_var_header_t *pVar);
-static diva_man_var_header_t *find_var(diva_man_var_header_t *pVar,
- const char *name);
-static int diva_strace_read_int(diva_man_var_header_t *pVar, int *var);
-static int diva_strace_read_uint(diva_man_var_header_t *pVar, dword *var);
-static int diva_strace_read_asz(diva_man_var_header_t *pVar, char *var);
-static int diva_strace_read_asc(diva_man_var_header_t *pVar, char *var);
-static int diva_strace_read_ie(diva_man_var_header_t *pVar,
- diva_trace_ie_t *var);
-static void diva_create_parse_table(diva_strace_context_t *pLib);
-static void diva_trace_error(diva_strace_context_t *pLib,
- int error, const char *file, int line);
-static void diva_trace_notify_user(diva_strace_context_t *pLib,
- int Channel,
- int notify_subject);
-static int diva_trace_read_variable(diva_man_var_header_t *pVar,
- void *variable);
-
-/*
- Initialize the library and return context
- of the created trace object that will represent
- the IDI adapter.
- Return 0 on error.
-*/
-diva_strace_library_interface_t *DivaSTraceLibraryCreateInstance(int Adapter,
- const diva_trace_library_user_interface_t *user_proc,
- byte *pmem) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)pmem;
- int i;
-
- if (!pLib) {
- return NULL;
- }
-
- pmem += sizeof(*pLib);
- memset(pLib, 0x00, sizeof(*pLib));
-
- pLib->Adapter = Adapter;
-
- /*
- Set up Library Interface
- */
- pLib->instance.hLib = pLib;
- pLib->instance.DivaSTraceLibraryStart = DivaSTraceLibraryStart;
- pLib->instance.DivaSTraceLibraryStop = DivaSTraceLibraryStop;
- pLib->instance.DivaSTraceLibraryFinit = SuperTraceLibraryFinit;
- pLib->instance.DivaSTraceMessageInput = SuperTraceMessageInput;
- pLib->instance.DivaSTraceGetHandle = SuperTraceGetHandle;
- pLib->instance.DivaSTraceSetAudioTap = SuperTraceSetAudioTap;
- pLib->instance.DivaSTraceSetBChannel = SuperTraceSetBChannel;
- pLib->instance.DivaSTraceSetDChannel = SuperTraceSetDChannel;
- pLib->instance.DivaSTraceSetInfo = SuperTraceSetInfo;
- pLib->instance.DivaSTraceGetOutgoingCallStatistics = \
- SuperTraceGetOutgoingCallStatistics;
- pLib->instance.DivaSTraceGetIncomingCallStatistics = \
- SuperTraceGetIncomingCallStatistics;
- pLib->instance.DivaSTraceGetModemStatistics = \
- SuperTraceGetModemStatistics;
- pLib->instance.DivaSTraceGetFaxStatistics = \
- SuperTraceGetFaxStatistics;
- pLib->instance.DivaSTraceGetBLayer1Statistics = \
- SuperTraceGetBLayer1Statistics;
- pLib->instance.DivaSTraceGetBLayer2Statistics = \
- SuperTraceGetBLayer2Statistics;
- pLib->instance.DivaSTraceGetDLayer1Statistics = \
- SuperTraceGetDLayer1Statistics;
- pLib->instance.DivaSTraceGetDLayer2Statistics = \
- SuperTraceGetDLayer2Statistics;
- pLib->instance.DivaSTraceClearCall = SuperTraceClearCall;
-
-
- if (user_proc) {
- pLib->user_proc_table.user_context = user_proc->user_context;
- pLib->user_proc_table.notify_proc = user_proc->notify_proc;
- pLib->user_proc_table.trace_proc = user_proc->trace_proc;
- pLib->user_proc_table.error_notify_proc = user_proc->error_notify_proc;
- }
-
- if (!(pLib->hAdapter = SuperTraceOpenAdapter(Adapter))) {
- diva_mnt_internal_dprintf(0, DLI_ERR, "Can not open XDI adapter");
- return NULL;
- }
- pLib->Channels = SuperTraceGetNumberOfChannels(pLib->hAdapter);
-
- /*
- Calculate amount of parte table entites necessary to translate
- information from all events of onterest
- */
- pLib->parse_entries = (MODEM_PARSE_ENTRIES + FAX_PARSE_ENTRIES + \
- STAT_PARSE_ENTRIES + \
- LINE_PARSE_ENTRIES + 1) * pLib->Channels;
- pLib->parse_table = (diva_strace_path2action_t *)pmem;
-
- for (i = 0; i < 30; i++) {
- pLib->lines[i].pInterface = &pLib->Interface;
- pLib->lines[i].pInterfaceStat = &pLib->InterfaceStat;
- }
-
- pLib->e.R = &pLib->RData;
-
- pLib->req_busy = 1;
- pLib->rc_ok = ASSIGN_OK;
-
- diva_create_parse_table(pLib);
-
- return ((diva_strace_library_interface_t *)pLib);
-}
-
-static int DivaSTraceLibraryStart(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- return (SuperTraceASSIGN(pLib->hAdapter, pLib->buffer));
-}
-
-/*
- Return (-1) on error
- Return (0) if was initiated or pending
- Return (1) if removal is complete
-*/
-static int DivaSTraceLibraryStop(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- if (!pLib->e.Id) { /* Was never started/assigned */
- return (1);
- }
-
- switch (pLib->removal_state) {
- case 0:
- pLib->removal_state = 1;
- ScheduleNextTraceRequest(pLib);
- break;
-
- case 3:
- return (1);
- }
-
- return (0);
-}
-
-static int SuperTraceLibraryFinit(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- if (pLib) {
- if (pLib->hAdapter) {
- SuperTraceCloseAdapter(pLib->hAdapter);
- }
- return (0);
- }
- return (-1);
-}
-
-static void *SuperTraceGetHandle(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- return (&pLib->e);
-}
-
-/*
- After library handle object is gone in signaled state
- this function should be called and will pick up incoming
- IDI messages (return codes and indications).
-*/
-static int SuperTraceMessageInput(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- int ret = 0;
- byte Rc, Ind;
-
- if (pLib->e.complete == 255) {
- /*
- Process return code
- */
- pLib->req_busy = 0;
- Rc = pLib->e.Rc;
- pLib->e.Rc = 0;
-
- if (pLib->removal_state == 2) {
- pLib->removal_state = 3;
- return (0);
- }
-
- if (Rc != pLib->rc_ok) {
- int ignore = 0;
- /*
- Auto-detect amount of events/channels and features
- */
- if (pLib->general_b_ch_event == 1) {
- pLib->general_b_ch_event = 2;
- ignore = 1;
- } else if (pLib->general_fax_event == 1) {
- pLib->general_fax_event = 2;
- ignore = 1;
- } else if (pLib->general_mdm_event == 1) {
- pLib->general_mdm_event = 2;
- ignore = 1;
- } else if ((pLib->ChannelsTraceActive < pLib->Channels) && pLib->ChannelsTraceActive) {
- pLib->ChannelsTraceActive = pLib->Channels;
- ignore = 1;
- } else if (pLib->ModemTraceActive < pLib->Channels) {
- pLib->ModemTraceActive = pLib->Channels;
- ignore = 1;
- } else if (pLib->FaxTraceActive < pLib->Channels) {
- pLib->FaxTraceActive = pLib->Channels;
- ignore = 1;
- } else if (pLib->audio_trace_init == 2) {
- ignore = 1;
- pLib->audio_trace_init = 1;
- } else if (pLib->eye_pattern_pending) {
- pLib->eye_pattern_pending = 0;
- ignore = 1;
- } else if (pLib->audio_tap_pending) {
- pLib->audio_tap_pending = 0;
- ignore = 1;
- }
-
- if (!ignore) {
- return (-1); /* request failed */
- }
- } else {
- if (pLib->general_b_ch_event == 1) {
- pLib->ChannelsTraceActive = pLib->Channels;
- pLib->general_b_ch_event = 2;
- } else if (pLib->general_fax_event == 1) {
- pLib->general_fax_event = 2;
- pLib->FaxTraceActive = pLib->Channels;
- } else if (pLib->general_mdm_event == 1) {
- pLib->general_mdm_event = 2;
- pLib->ModemTraceActive = pLib->Channels;
- }
- }
- if (pLib->audio_trace_init == 2) {
- pLib->audio_trace_init = 1;
- }
- pLib->rc_ok = 0xff; /* default OK after assign was done */
- if ((ret = ScheduleNextTraceRequest(pLib))) {
- return (-1);
- }
- } else {
- /*
- Process indication
- Always 'RNR' indication if return code is pending
- */
- Ind = pLib->e.Ind;
- pLib->e.Ind = 0;
- if (pLib->removal_state) {
- pLib->e.RNum = 0;
- pLib->e.RNR = 2;
- } else if (pLib->req_busy) {
- pLib->e.RNum = 0;
- pLib->e.RNR = 1;
- } else {
- if (pLib->e.complete != 0x02) {
- /*
- Look-ahead call, set up buffers
- */
- pLib->e.RNum = 1;
- pLib->e.R->P = (byte *)&pLib->buffer[0];
- pLib->e.R->PLength = (word)(sizeof(pLib->buffer) - 1);
-
- } else {
- /*
- Indication reception complete, process it now
- */
- byte *p = (byte *)&pLib->buffer[0];
- pLib->buffer[pLib->e.R->PLength] = 0; /* terminate I.E. with zero */
-
- switch (Ind) {
- case MAN_COMBI_IND: {
- int total_length = pLib->e.R->PLength;
- word this_ind_length;
-
- while (total_length > 3 && *p) {
- Ind = *p++;
- this_ind_length = (word)p[0] | ((word)p[1] << 8);
- p += 2;
-
- switch (Ind) {
- case MAN_INFO_IND:
- if (process_idi_info(pLib, (diva_man_var_header_t *)p)) {
- return (-1);
- }
- break;
- case MAN_EVENT_IND:
- if (process_idi_event(pLib, (diva_man_var_header_t *)p)) {
- return (-1);
- }
- break;
- case MAN_TRACE_IND:
- if (pLib->trace_on == 1) {
- /*
- Ignore first trace event that is result of
- EVENT_ON operation
- */
- pLib->trace_on++;
- } else {
- /*
- Delivery XLOG buffer to application
- */
- if (pLib->user_proc_table.trace_proc) {
- (*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context,
- &pLib->instance, pLib->Adapter,
- p, this_ind_length);
- }
- }
- break;
- default:
- diva_mnt_internal_dprintf(0, DLI_ERR, "Unknown IDI Ind (DMA mode): %02x", Ind);
- }
- p += (this_ind_length + 1);
- total_length -= (4 + this_ind_length);
- }
- } break;
- case MAN_INFO_IND:
- if (process_idi_info(pLib, (diva_man_var_header_t *)p)) {
- return (-1);
- }
- break;
- case MAN_EVENT_IND:
- if (process_idi_event(pLib, (diva_man_var_header_t *)p)) {
- return (-1);
- }
- break;
- case MAN_TRACE_IND:
- if (pLib->trace_on == 1) {
- /*
- Ignore first trace event that is result of
- EVENT_ON operation
- */
- pLib->trace_on++;
- } else {
- /*
- Delivery XLOG buffer to application
- */
- if (pLib->user_proc_table.trace_proc) {
- (*(pLib->user_proc_table.trace_proc))(pLib->user_proc_table.user_context,
- &pLib->instance, pLib->Adapter,
- p, pLib->e.R->PLength);
- }
- }
- break;
- default:
- diva_mnt_internal_dprintf(0, DLI_ERR, "Unknown IDI Ind: %02x", Ind);
- }
- }
- }
- }
-
- if ((ret = ScheduleNextTraceRequest(pLib))) {
- return (-1);
- }
-
- return (ret);
-}
-
-/*
- Internal state machine responsible for scheduling of requests
-*/
-static int ScheduleNextTraceRequest(diva_strace_context_t *pLib) {
- char name[64];
- int ret = 0;
- int i;
-
- if (pLib->req_busy) {
- return (0);
- }
-
- if (pLib->removal_state == 1) {
- if (SuperTraceREMOVE(pLib->hAdapter)) {
- pLib->removal_state = 3;
- } else {
- pLib->req_busy = 1;
- pLib->removal_state = 2;
- }
- return (0);
- }
-
- if (pLib->removal_state) {
- return (0);
- }
-
- if (!pLib->general_b_ch_event) {
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\B Event", pLib->buffer))) {
- return (-1);
- }
- pLib->general_b_ch_event = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->general_fax_event) {
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\FAX Event", pLib->buffer))) {
- return (-1);
- }
- pLib->general_fax_event = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->general_mdm_event) {
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, "State\\Modem Event", pLib->buffer))) {
- return (-1);
- }
- pLib->general_mdm_event = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->ChannelsTraceActive < pLib->Channels) {
- pLib->ChannelsTraceActive++;
- sprintf(name, "State\\B%d\\Line", pLib->ChannelsTraceActive);
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->ChannelsTraceActive--;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->ModemTraceActive < pLib->Channels) {
- pLib->ModemTraceActive++;
- sprintf(name, "State\\B%d\\Modem\\Event", pLib->ModemTraceActive);
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->ModemTraceActive--;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->FaxTraceActive < pLib->Channels) {
- pLib->FaxTraceActive++;
- sprintf(name, "State\\B%d\\FAX\\Event", pLib->FaxTraceActive);
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->FaxTraceActive--;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->trace_mask_init) {
- word tmp = 0x0000;
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\Event Enable",
- &tmp,
- 0x87, /* MI_BITFLD */
- sizeof(tmp))) {
- return (-1);
- }
- pLib->trace_mask_init = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->audio_trace_init) {
- dword tmp = 0x00000000;
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\AudioCh# Enable",
- &tmp,
- 0x87, /* MI_BITFLD */
- sizeof(tmp))) {
- return (-1);
- }
- pLib->audio_trace_init = 2;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->bchannel_init) {
- dword tmp = 0x00000000;
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\B-Ch# Enable",
- &tmp,
- 0x87, /* MI_BITFLD */
- sizeof(tmp))) {
- return (-1);
- }
- pLib->bchannel_init = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->trace_length_init) {
- word tmp = 30;
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\Max Log Length",
- &tmp,
- 0x82, /* MI_UINT */
- sizeof(tmp))) {
- return (-1);
- }
- pLib->trace_length_init = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->trace_on) {
- if (SuperTraceTraceOnRequest(pLib->hAdapter,
- "Trace\\Log Buffer",
- pLib->buffer)) {
- return (-1);
- }
- pLib->trace_on = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->trace_event_mask != pLib->current_trace_event_mask) {
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\Event Enable",
- &pLib->trace_event_mask,
- 0x87, /* MI_BITFLD */
- sizeof(pLib->trace_event_mask))) {
- return (-1);
- }
- pLib->current_trace_event_mask = pLib->trace_event_mask;
- pLib->req_busy = 1;
- return (0);
- }
-
- if ((pLib->audio_tap_pending >= 0) && (pLib->audio_tap_mask != pLib->current_audio_tap_mask)) {
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\AudioCh# Enable",
- &pLib->audio_tap_mask,
- 0x87, /* MI_BITFLD */
- sizeof(pLib->audio_tap_mask))) {
- return (-1);
- }
- pLib->current_audio_tap_mask = pLib->audio_tap_mask;
- pLib->audio_tap_pending = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if ((pLib->eye_pattern_pending >= 0) && (pLib->audio_tap_mask != pLib->current_eye_pattern_mask)) {
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\EyeCh# Enable",
- &pLib->audio_tap_mask,
- 0x87, /* MI_BITFLD */
- sizeof(pLib->audio_tap_mask))) {
- return (-1);
- }
- pLib->current_eye_pattern_mask = pLib->audio_tap_mask;
- pLib->eye_pattern_pending = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->bchannel_trace_mask != pLib->current_bchannel_trace_mask) {
- if (SuperTraceWriteVar(pLib->hAdapter,
- pLib->buffer,
- "Trace\\B-Ch# Enable",
- &pLib->bchannel_trace_mask,
- 0x87, /* MI_BITFLD */
- sizeof(pLib->bchannel_trace_mask))) {
- return (-1);
- }
- pLib->current_bchannel_trace_mask = pLib->bchannel_trace_mask;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->trace_events_down) {
- if (SuperTraceTraceOnRequest(pLib->hAdapter,
- "Events Down",
- pLib->buffer)) {
- return (-1);
- }
- pLib->trace_events_down = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->l1_trace) {
- if (SuperTraceTraceOnRequest(pLib->hAdapter,
- "State\\Layer1",
- pLib->buffer)) {
- return (-1);
- }
- pLib->l1_trace = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->l2_trace) {
- if (SuperTraceTraceOnRequest(pLib->hAdapter,
- "State\\Layer2 No1",
- pLib->buffer)) {
- return (-1);
- }
- pLib->l2_trace = 1;
- pLib->req_busy = 1;
- return (0);
- }
-
- for (i = 0; i < 30; i++) {
- if (pLib->pending_line_status & (1L << i)) {
- sprintf(name, "State\\B%d", i + 1);
- if (SuperTraceReadRequest(pLib->hAdapter, name, pLib->buffer)) {
- return (-1);
- }
- pLib->pending_line_status &= ~(1L << i);
- pLib->req_busy = 1;
- return (0);
- }
- if (pLib->pending_modem_status & (1L << i)) {
- sprintf(name, "State\\B%d\\Modem", i + 1);
- if (SuperTraceReadRequest(pLib->hAdapter, name, pLib->buffer)) {
- return (-1);
- }
- pLib->pending_modem_status &= ~(1L << i);
- pLib->req_busy = 1;
- return (0);
- }
- if (pLib->pending_fax_status & (1L << i)) {
- sprintf(name, "State\\B%d\\FAX", i + 1);
- if (SuperTraceReadRequest(pLib->hAdapter, name, pLib->buffer)) {
- return (-1);
- }
- pLib->pending_fax_status &= ~(1L << i);
- pLib->req_busy = 1;
- return (0);
- }
- if (pLib->clear_call_command & (1L << i)) {
- sprintf(name, "State\\B%d\\Clear Call", i + 1);
- if (SuperTraceExecuteRequest(pLib->hAdapter, name, pLib->buffer)) {
- return (-1);
- }
- pLib->clear_call_command &= ~(1L << i);
- pLib->req_busy = 1;
- return (0);
- }
- }
-
- if (pLib->outgoing_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\Outgoing Calls",
- pLib->buffer)) {
- return (-1);
- }
- pLib->outgoing_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->incoming_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\Incoming Calls",
- pLib->buffer)) {
- return (-1);
- }
- pLib->incoming_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->modem_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\Modem",
- pLib->buffer)) {
- return (-1);
- }
- pLib->modem_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->fax_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\FAX",
- pLib->buffer)) {
- return (-1);
- }
- pLib->fax_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->b1_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\B-Layer1",
- pLib->buffer)) {
- return (-1);
- }
- pLib->b1_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->b2_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\B-Layer2",
- pLib->buffer)) {
- return (-1);
- }
- pLib->b2_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->d1_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\D-Layer1",
- pLib->buffer)) {
- return (-1);
- }
- pLib->d1_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (pLib->d2_ifc_stats) {
- if (SuperTraceReadRequest(pLib->hAdapter,
- "Statistics\\D-Layer2",
- pLib->buffer)) {
- return (-1);
- }
- pLib->d2_ifc_stats = 0;
- pLib->req_busy = 1;
- return (0);
- }
-
- if (!pLib->IncomingCallsCallsActive) {
- pLib->IncomingCallsCallsActive = 1;
- sprintf(name, "%s", "Statistics\\Incoming Calls\\Calls");
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->IncomingCallsCallsActive = 0;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
- if (!pLib->IncomingCallsConnectedActive) {
- pLib->IncomingCallsConnectedActive = 1;
- sprintf(name, "%s", "Statistics\\Incoming Calls\\Connected");
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->IncomingCallsConnectedActive = 0;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
- if (!pLib->OutgoingCallsCallsActive) {
- pLib->OutgoingCallsCallsActive = 1;
- sprintf(name, "%s", "Statistics\\Outgoing Calls\\Calls");
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->OutgoingCallsCallsActive = 0;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
- if (!pLib->OutgoingCallsConnectedActive) {
- pLib->OutgoingCallsConnectedActive = 1;
- sprintf(name, "%s", "Statistics\\Outgoing Calls\\Connected");
- if ((ret = SuperTraceTraceOnRequest(pLib->hAdapter, name, pLib->buffer))) {
- pLib->OutgoingCallsConnectedActive = 0;
- return (-1);
- }
- pLib->req_busy = 1;
- return (0);
- }
-
- return (0);
-}
-
-static int process_idi_event(diva_strace_context_t *pLib,
- diva_man_var_header_t *pVar) {
- const char *path = (char *)&pVar->path_length + 1;
- char name[64];
- int i;
-
- if (!strncmp("State\\B Event", path, pVar->path_length)) {
- dword ch_id;
- if (!diva_trace_read_variable(pVar, &ch_id)) {
- if (!pLib->line_init_event && !pLib->pending_line_status) {
- for (i = 1; i <= pLib->Channels; i++) {
- diva_line_event(pLib, i);
- }
- return (0);
- } else if (ch_id && ch_id <= pLib->Channels) {
- return (diva_line_event(pLib, (int)ch_id));
- }
- return (0);
- }
- return (-1);
- }
-
- if (!strncmp("State\\FAX Event", path, pVar->path_length)) {
- dword ch_id;
- if (!diva_trace_read_variable(pVar, &ch_id)) {
- if (!pLib->pending_fax_status && !pLib->fax_init_event) {
- for (i = 1; i <= pLib->Channels; i++) {
- diva_fax_event(pLib, i);
- }
- return (0);
- } else if (ch_id && ch_id <= pLib->Channels) {
- return (diva_fax_event(pLib, (int)ch_id));
- }
- return (0);
- }
- return (-1);
- }
-
- if (!strncmp("State\\Modem Event", path, pVar->path_length)) {
- dword ch_id;
- if (!diva_trace_read_variable(pVar, &ch_id)) {
- if (!pLib->pending_modem_status && !pLib->modem_init_event) {
- for (i = 1; i <= pLib->Channels; i++) {
- diva_modem_event(pLib, i);
- }
- return (0);
- } else if (ch_id && ch_id <= pLib->Channels) {
- return (diva_modem_event(pLib, (int)ch_id));
- }
- return (0);
- }
- return (-1);
- }
-
- /*
- First look for Line Event
- */
- for (i = 1; i <= pLib->Channels; i++) {
- sprintf(name, "State\\B%d\\Line", i);
- if (find_var(pVar, name)) {
- return (diva_line_event(pLib, i));
- }
- }
-
- /*
- Look for Moden Progress Event
- */
- for (i = 1; i <= pLib->Channels; i++) {
- sprintf(name, "State\\B%d\\Modem\\Event", i);
- if (find_var(pVar, name)) {
- return (diva_modem_event(pLib, i));
- }
- }
-
- /*
- Look for Fax Event
- */
- for (i = 1; i <= pLib->Channels; i++) {
- sprintf(name, "State\\B%d\\FAX\\Event", i);
- if (find_var(pVar, name)) {
- return (diva_fax_event(pLib, i));
- }
- }
-
- /*
- Notification about loss of events
- */
- if (!strncmp("Events Down", path, pVar->path_length)) {
- if (pLib->trace_events_down == 1) {
- pLib->trace_events_down = 2;
- } else {
- diva_trace_error(pLib, 1, "Events Down", 0);
- }
- return (0);
- }
-
- if (!strncmp("State\\Layer1", path, pVar->path_length)) {
- diva_strace_read_asz(pVar, &pLib->lines[0].pInterface->Layer1[0]);
- if (pLib->l1_trace == 1) {
- pLib->l1_trace = 2;
- } else {
- diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_INTERFACE_CHANGE);
- }
- return (0);
- }
- if (!strncmp("State\\Layer2 No1", path, pVar->path_length)) {
- char *tmp = &pLib->lines[0].pInterface->Layer2[0];
- dword l2_state;
- if (diva_strace_read_uint(pVar, &l2_state))
- return -1;
-
- switch (l2_state) {
- case 0:
- strcpy(tmp, "Idle");
- break;
- case 1:
- strcpy(tmp, "Layer2 UP");
- break;
- case 2:
- strcpy(tmp, "Layer2 Disconnecting");
- break;
- case 3:
- strcpy(tmp, "Layer2 Connecting");
- break;
- case 4:
- strcpy(tmp, "SPID Initializing");
- break;
- case 5:
- strcpy(tmp, "SPID Initialised");
- break;
- case 6:
- strcpy(tmp, "Layer2 Connecting");
- break;
-
- case 7:
- strcpy(tmp, "Auto SPID Stopped");
- break;
-
- case 8:
- strcpy(tmp, "Auto SPID Idle");
- break;
-
- case 9:
- strcpy(tmp, "Auto SPID Requested");
- break;
-
- case 10:
- strcpy(tmp, "Auto SPID Delivery");
- break;
-
- case 11:
- strcpy(tmp, "Auto SPID Complete");
- break;
-
- default:
- sprintf(tmp, "U:%d", (int)l2_state);
- }
- if (pLib->l2_trace == 1) {
- pLib->l2_trace = 2;
- } else {
- diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_INTERFACE_CHANGE);
- }
- return (0);
- }
-
- if (!strncmp("Statistics\\Incoming Calls\\Calls", path, pVar->path_length) ||
- !strncmp("Statistics\\Incoming Calls\\Connected", path, pVar->path_length)) {
- return (SuperTraceGetIncomingCallStatistics(pLib));
- }
-
- if (!strncmp("Statistics\\Outgoing Calls\\Calls", path, pVar->path_length) ||
- !strncmp("Statistics\\Outgoing Calls\\Connected", path, pVar->path_length)) {
- return (SuperTraceGetOutgoingCallStatistics(pLib));
- }
-
- return (-1);
-}
-
-static int diva_line_event(diva_strace_context_t *pLib, int Channel) {
- pLib->pending_line_status |= (1L << (Channel - 1));
- return (0);
-}
-
-static int diva_modem_event(diva_strace_context_t *pLib, int Channel) {
- pLib->pending_modem_status |= (1L << (Channel - 1));
- return (0);
-}
-
-static int diva_fax_event(diva_strace_context_t *pLib, int Channel) {
- pLib->pending_fax_status |= (1L << (Channel - 1));
- return (0);
-}
-
-/*
- Process INFO indications that arrive from the card
- Uses path of first I.E. to detect the source of the
- infication
-*/
-static int process_idi_info(diva_strace_context_t *pLib,
- diva_man_var_header_t *pVar) {
- const char *path = (char *)&pVar->path_length + 1;
- char name[64];
- int i, len;
-
- /*
- First look for Modem Status Info
- */
- for (i = pLib->Channels; i > 0; i--) {
- len = sprintf(name, "State\\B%d\\Modem", i);
- if (!strncmp(name, path, len)) {
- return (diva_modem_info(pLib, i, pVar));
- }
- }
-
- /*
- Look for Fax Status Info
- */
- for (i = pLib->Channels; i > 0; i--) {
- len = sprintf(name, "State\\B%d\\FAX", i);
- if (!strncmp(name, path, len)) {
- return (diva_fax_info(pLib, i, pVar));
- }
- }
-
- /*
- Look for Line Status Info
- */
- for (i = pLib->Channels; i > 0; i--) {
- len = sprintf(name, "State\\B%d", i);
- if (!strncmp(name, path, len)) {
- return (diva_line_info(pLib, i, pVar));
- }
- }
-
- if (!diva_ifc_statistics(pLib, pVar)) {
- return (0);
- }
-
- return (-1);
-}
-
-/*
- MODEM INSTANCE STATE UPDATE
-
- Update Modem Status Information and issue notification to user,
- that will inform about change in the state of modem instance, that is
- associuated with this channel
-*/
-static int diva_modem_info(diva_strace_context_t *pLib,
- int Channel,
- diva_man_var_header_t *pVar) {
- diva_man_var_header_t *cur;
- int i, nr = Channel - 1;
-
- for (i = pLib->modem_parse_entry_first[nr];
- i <= pLib->modem_parse_entry_last[nr]; i++) {
- if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
- if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
- diva_trace_error(pLib, -3, __FILE__, __LINE__);
- return (-1);
- }
- } else {
- diva_trace_error(pLib, -2, __FILE__, __LINE__);
- return (-1);
- }
- }
-
- /*
- We do not use first event to notify user - this is the event that is
- generated as result of EVENT ON operation and is used only to initialize
- internal variables of application
- */
- if (pLib->modem_init_event & (1L << nr)) {
- diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_MODEM_CHANGE);
- } else {
- pLib->modem_init_event |= (1L << nr);
- }
-
- return (0);
-}
-
-static int diva_fax_info(diva_strace_context_t *pLib,
- int Channel,
- diva_man_var_header_t *pVar) {
- diva_man_var_header_t *cur;
- int i, nr = Channel - 1;
-
- for (i = pLib->fax_parse_entry_first[nr];
- i <= pLib->fax_parse_entry_last[nr]; i++) {
- if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
- if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
- diva_trace_error(pLib, -3, __FILE__, __LINE__);
- return (-1);
- }
- } else {
- diva_trace_error(pLib, -2, __FILE__, __LINE__);
- return (-1);
- }
- }
-
- /*
- We do not use first event to notify user - this is the event that is
- generated as result of EVENT ON operation and is used only to initialize
- internal variables of application
- */
- if (pLib->fax_init_event & (1L << nr)) {
- diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_FAX_CHANGE);
- } else {
- pLib->fax_init_event |= (1L << nr);
- }
-
- return (0);
-}
-
-/*
- LINE STATE UPDATE
- Update Line Status Information and issue notification to user,
- that will inform about change in the line state.
-*/
-static int diva_line_info(diva_strace_context_t *pLib,
- int Channel,
- diva_man_var_header_t *pVar) {
- diva_man_var_header_t *cur;
- int i, nr = Channel - 1;
-
- for (i = pLib->line_parse_entry_first[nr];
- i <= pLib->line_parse_entry_last[nr]; i++) {
- if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
- if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
- diva_trace_error(pLib, -3, __FILE__, __LINE__);
- return (-1);
- }
- } else {
- diva_trace_error(pLib, -2 , __FILE__, __LINE__);
- return (-1);
- }
- }
-
- /*
- We do not use first event to notify user - this is the event that is
- generated as result of EVENT ON operation and is used only to initialize
- internal variables of application
-
- Exception is is if the line is "online". In this case we have to notify
- user about this confition.
- */
- if (pLib->line_init_event & (1L << nr)) {
- diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE);
- } else {
- pLib->line_init_event |= (1L << nr);
- if (strcmp(&pLib->lines[nr].Line[0], "Idle")) {
- diva_trace_notify_user(pLib, nr, DIVA_SUPER_TRACE_NOTIFY_LINE_CHANGE);
- }
- }
-
- return (0);
-}
-
-/*
- Move position to next vatianle in the chain
-*/
-static diva_man_var_header_t *get_next_var(diva_man_var_header_t *pVar) {
- byte *msg = (byte *)pVar;
- byte *start;
- int msg_length;
-
- if (*msg != ESC) return NULL;
-
- start = msg + 2;
- msg_length = *(msg + 1);
- msg = (start + msg_length);
-
- if (*msg != ESC) return NULL;
-
- return ((diva_man_var_header_t *)msg);
-}
-
-/*
- Move position to variable with given name
-*/
-static diva_man_var_header_t *find_var(diva_man_var_header_t *pVar,
- const char *name) {
- const char *path;
-
- do {
- path = (char *)&pVar->path_length + 1;
-
- if (!strncmp(name, path, pVar->path_length)) {
- break;
- }
- } while ((pVar = get_next_var(pVar)));
-
- return (pVar);
-}
-
-static void diva_create_line_parse_table(diva_strace_context_t *pLib,
- int Channel) {
- diva_trace_line_state_t *pLine = &pLib->lines[Channel];
- int nr = Channel + 1;
-
- if ((pLib->cur_parse_entry + LINE_PARSE_ENTRIES) >= pLib->parse_entries) {
- diva_trace_error(pLib, -1, __FILE__, __LINE__);
- return;
- }
-
- pLine->ChannelNumber = nr;
-
- pLib->line_parse_entry_first[Channel] = pLib->cur_parse_entry;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Framing", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Framing[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Line", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Line[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Layer2", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Layer2[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Layer3", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Layer3[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Remote Address", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLine->RemoteAddress[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Remote SubAddr", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLine->RemoteSubAddress[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Local Address", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLine->LocalAddress[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Local SubAddr", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLine->LocalSubAddress[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\BC", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_BC;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\HLC", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_HLC;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\LLC", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->call_LLC;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Charges", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->Charges;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Call Reference", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->CallReference;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Last Disc Cause", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLine->LastDisconnecCause;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\User ID", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pLine->UserID[0];
-
- pLib->line_parse_entry_last[Channel] = pLib->cur_parse_entry - 1;
-}
-
-static void diva_create_fax_parse_table(diva_strace_context_t *pLib,
- int Channel) {
- diva_trace_fax_state_t *pFax = &pLib->lines[Channel].fax;
- int nr = Channel + 1;
-
- if ((pLib->cur_parse_entry + FAX_PARSE_ENTRIES) >= pLib->parse_entries) {
- diva_trace_error(pLib, -1, __FILE__, __LINE__);
- return;
- }
- pFax->ChannelNumber = nr;
-
- pLib->fax_parse_entry_first[Channel] = pLib->cur_parse_entry;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Event", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Event;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Page Counter", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Page_Counter;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Features", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Features;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Station ID", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Station_ID[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Subaddress", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Subaddress[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Password", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Password[0];
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Speed", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Speed;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Resolution", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Resolution;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Paper Width", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Paper_Width;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Paper Length", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Paper_Length;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Scanline Time", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Scanline_Time;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\FAX\\Disc Reason", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pFax->Disc_Reason;
-
- pLib->fax_parse_entry_last[Channel] = pLib->cur_parse_entry - 1;
-}
-
-static void diva_create_modem_parse_table(diva_strace_context_t *pLib,
- int Channel) {
- diva_trace_modem_state_t *pModem = &pLib->lines[Channel].modem;
- int nr = Channel + 1;
-
- if ((pLib->cur_parse_entry + MODEM_PARSE_ENTRIES) >= pLib->parse_entries) {
- diva_trace_error(pLib, -1, __FILE__, __LINE__);
- return;
- }
- pModem->ChannelNumber = nr;
-
- pLib->modem_parse_entry_first[Channel] = pLib->cur_parse_entry;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Event", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Event;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Norm", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Norm;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Options", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->Options;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\TX Speed", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->TxSpeed;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\RX Speed", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RxSpeed;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Roundtrip ms", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RoundtripMsec;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Symbol Rate", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->SymbolRate;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\RX Level dBm", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RxLeveldBm;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Echo Level dBm", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->EchoLeveldBm;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\SNR dB", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->SNRdb;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\MAE", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->MAE;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Local Retrains", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->LocalRetrains;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Remote Retrains", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RemoteRetrains;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Local Resyncs", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->LocalResyncs;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Remote Resyncs", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->RemoteResyncs;
-
- sprintf(pLib->parse_table[pLib->cur_parse_entry].path,
- "State\\B%d\\Modem\\Disc Reason", nr);
- pLib->parse_table[pLib->cur_parse_entry++].variable = &pModem->DiscReason;
-
- pLib->modem_parse_entry_last[Channel] = pLib->cur_parse_entry - 1;
-}
-
-static void diva_create_parse_table(diva_strace_context_t *pLib) {
- int i;
-
- for (i = 0; i < pLib->Channels; i++) {
- diva_create_line_parse_table(pLib, i);
- diva_create_modem_parse_table(pLib, i);
- diva_create_fax_parse_table(pLib, i);
- }
-
- pLib->statistic_parse_first = pLib->cur_parse_entry;
-
- /*
- Outgoing Calls
- */
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\Calls");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.Calls;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\Connected");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.Connected;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\User Busy");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.User_Busy;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\No Answer");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.No_Answer;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\Wrong Number");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.Wrong_Number;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\Call Rejected");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.Call_Rejected;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Outgoing Calls\\Other Failures");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.outg.Other_Failures;
-
- /*
- Incoming Calls
- */
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Calls");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Calls;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Connected");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Connected;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\User Busy");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.User_Busy;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Call Rejected");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Call_Rejected;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Wrong Number");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Wrong_Number;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Incompatible Dst");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Incompatible_Dst;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Out of Order");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Out_of_Order;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Incoming Calls\\Ignored");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.inc.Ignored;
-
- /*
- Modem Statistics
- */
- pLib->mdm_statistic_parse_first = pLib->cur_parse_entry;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Normal");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Normal;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Unspecified");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Unspecified;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Busy Tone");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Busy_Tone;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Congestion");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Congestion;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Carr. Wait");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Carr_Wait;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Trn Timeout");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Trn_Timeout;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Incompat.");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Incompat;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc Frame Rej.");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_Frame_Rej;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\Modem\\Disc V42bis");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.mdm.Disc_V42bis;
-
- pLib->mdm_statistic_parse_last = pLib->cur_parse_entry - 1;
-
- /*
- Fax Statistics
- */
- pLib->fax_statistic_parse_first = pLib->cur_parse_entry;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Normal");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Normal;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Not Ident.");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Not_Ident;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc No Response");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_No_Response;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Retries");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Retries;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Unexp. Msg.");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Unexp_Msg;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc No Polling.");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_No_Polling;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Training");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Training;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Unexpected");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Unexpected;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Application");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Application;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Incompat.");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Incompat;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc No Command");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_No_Command;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Long Msg");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Long_Msg;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Supervisor");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Supervisor;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc SUB SEP PWD");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_SUB_SEP_PWD;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Invalid Msg");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Invalid_Msg;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Page Coding");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Page_Coding;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc App Timeout");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_App_Timeout;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\FAX\\Disc Unspecified");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.fax.Disc_Unspecified;
-
- pLib->fax_statistic_parse_last = pLib->cur_parse_entry - 1;
-
- /*
- B-Layer1"
- */
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer1\\X-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b1.X_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer1\\X-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b1.X_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer1\\X-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b1.X_Errors;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer1\\R-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b1.R_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer1\\R-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b1.R_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer1\\R-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b1.R_Errors;
-
- /*
- B-Layer2
- */
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer2\\X-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b2.X_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer2\\X-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b2.X_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer2\\X-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b2.X_Errors;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer2\\R-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b2.R_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer2\\R-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b2.R_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\B-Layer2\\R-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.b2.R_Errors;
-
- /*
- D-Layer1
- */
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer1\\X-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d1.X_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer1\\X-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d1.X_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer1\\X-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d1.X_Errors;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer1\\R-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d1.R_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer1\\R-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d1.R_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer1\\R-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d1.R_Errors;
-
- /*
- D-Layer2
- */
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer2\\X-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d2.X_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer2\\X-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d2.X_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer2\\X-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d2.X_Errors;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer2\\R-Frames");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d2.R_Frames;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer2\\R-Bytes");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d2.R_Bytes;
-
- strcpy(pLib->parse_table[pLib->cur_parse_entry].path,
- "Statistics\\D-Layer2\\R-Errors");
- pLib->parse_table[pLib->cur_parse_entry++].variable = \
- &pLib->InterfaceStat.d2.R_Errors;
-
-
- pLib->statistic_parse_last = pLib->cur_parse_entry - 1;
-}
-
-static void diva_trace_error(diva_strace_context_t *pLib,
- int error, const char *file, int line) {
- if (pLib->user_proc_table.error_notify_proc) {
- (*(pLib->user_proc_table.error_notify_proc))(\
- pLib->user_proc_table.user_context,
- &pLib->instance, pLib->Adapter,
- error, file, line);
- }
-}
-
-/*
- Delivery notification to user
-*/
-static void diva_trace_notify_user(diva_strace_context_t *pLib,
- int Channel,
- int notify_subject) {
- if (pLib->user_proc_table.notify_proc) {
- (*(pLib->user_proc_table.notify_proc))(pLib->user_proc_table.user_context,
- &pLib->instance,
- pLib->Adapter,
- &pLib->lines[Channel],
- notify_subject);
- }
-}
-
-/*
- Read variable value to they destination based on the variable type
-*/
-static int diva_trace_read_variable(diva_man_var_header_t *pVar,
- void *variable) {
- switch (pVar->type) {
- case 0x03: /* MI_ASCIIZ - syting */
- return (diva_strace_read_asz(pVar, (char *)variable));
- case 0x04: /* MI_ASCII - string */
- return (diva_strace_read_asc(pVar, (char *)variable));
- case 0x05: /* MI_NUMBER - counted sequence of bytes */
- return (diva_strace_read_ie(pVar, (diva_trace_ie_t *)variable));
- case 0x81: /* MI_INT - signed integer */
- return (diva_strace_read_int(pVar, (int *)variable));
- case 0x82: /* MI_UINT - unsigned integer */
- return (diva_strace_read_uint(pVar, (dword *)variable));
- case 0x83: /* MI_HINT - unsigned integer, hex representetion */
- return (diva_strace_read_uint(pVar, (dword *)variable));
- case 0x87: /* MI_BITFLD - unsigned integer, bit representation */
- return (diva_strace_read_uint(pVar, (dword *)variable));
- }
-
- /*
- This type of variable is not handled, indicate error
- Or one problem in management interface, or in application recodeing
- table, or this application should handle it.
- */
- return (-1);
-}
-
-/*
- Read signed integer to destination
-*/
-static int diva_strace_read_int(diva_man_var_header_t *pVar, int *var) {
- byte *ptr = (char *)&pVar->path_length;
- int value;
-
- ptr += (pVar->path_length + 1);
-
- switch (pVar->value_length) {
- case 1:
- value = *(char *)ptr;
- break;
-
- case 2:
- value = (short)GET_WORD(ptr);
- break;
-
- case 4:
- value = (int)GET_DWORD(ptr);
- break;
-
- default:
- return (-1);
- }
-
- *var = value;
-
- return (0);
-}
-
-static int diva_strace_read_uint(diva_man_var_header_t *pVar, dword *var) {
- byte *ptr = (char *)&pVar->path_length;
- dword value;
-
- ptr += (pVar->path_length + 1);
-
- switch (pVar->value_length) {
- case 1:
- value = (byte)(*ptr);
- break;
-
- case 2:
- value = (word)GET_WORD(ptr);
- break;
-
- case 3:
- value = (dword)GET_DWORD(ptr);
- value &= 0x00ffffff;
- break;
-
- case 4:
- value = (dword)GET_DWORD(ptr);
- break;
-
- default:
- return (-1);
- }
-
- *var = value;
-
- return (0);
-}
-
-/*
- Read zero terminated ASCII string
-*/
-static int diva_strace_read_asz(diva_man_var_header_t *pVar, char *var) {
- char *ptr = (char *)&pVar->path_length;
- int length;
-
- ptr += (pVar->path_length + 1);
-
- if (!(length = pVar->value_length)) {
- length = strlen(ptr);
- }
- memcpy(var, ptr, length);
- var[length] = 0;
-
- return (0);
-}
-
-/*
- Read counted (with leading length byte) ASCII string
-*/
-static int diva_strace_read_asc(diva_man_var_header_t *pVar, char *var) {
- char *ptr = (char *)&pVar->path_length;
-
- ptr += (pVar->path_length + 1);
- memcpy(var, ptr + 1, *ptr);
- var[(int)*ptr] = 0;
-
- return (0);
-}
-
-/*
- Read one information element - i.e. one string of byte values with
- one length byte in front
-*/
-static int diva_strace_read_ie(diva_man_var_header_t *pVar,
- diva_trace_ie_t *var) {
- char *ptr = (char *)&pVar->path_length;
-
- ptr += (pVar->path_length + 1);
-
- var->length = *ptr;
- memcpy(&var->data[0], ptr + 1, *ptr);
-
- return (0);
-}
-
-static int SuperTraceSetAudioTap(void *hLib, int Channel, int on) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- if ((Channel < 1) || (Channel > pLib->Channels)) {
- return (-1);
- }
- Channel--;
-
- if (on) {
- pLib->audio_tap_mask |= (1L << Channel);
- } else {
- pLib->audio_tap_mask &= ~(1L << Channel);
- }
-
- /*
- EYE patterns have TM_M_DATA set as additional
- condition
- */
- if (pLib->audio_tap_mask) {
- pLib->trace_event_mask |= TM_M_DATA;
- } else {
- pLib->trace_event_mask &= ~TM_M_DATA;
- }
-
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceSetBChannel(void *hLib, int Channel, int on) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- if ((Channel < 1) || (Channel > pLib->Channels)) {
- return (-1);
- }
- Channel--;
-
- if (on) {
- pLib->bchannel_trace_mask |= (1L << Channel);
- } else {
- pLib->bchannel_trace_mask &= ~(1L << Channel);
- }
-
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceSetDChannel(void *hLib, int on) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- if (on) {
- pLib->trace_event_mask |= (TM_D_CHAN | TM_C_COMM | TM_DL_ERR | TM_LAYER1);
- } else {
- pLib->trace_event_mask &= ~(TM_D_CHAN | TM_C_COMM | TM_DL_ERR | TM_LAYER1);
- }
-
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceSetInfo(void *hLib, int on) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- if (on) {
- pLib->trace_event_mask |= TM_STRING;
- } else {
- pLib->trace_event_mask &= ~TM_STRING;
- }
-
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceClearCall(void *hLib, int Channel) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
-
- if ((Channel < 1) || (Channel > pLib->Channels)) {
- return (-1);
- }
- Channel--;
-
- pLib->clear_call_command |= (1L << Channel);
-
- return (ScheduleNextTraceRequest(pLib));
-}
-
-/*
- Parse and update cumulative statistice
-*/
-static int diva_ifc_statistics(diva_strace_context_t *pLib,
- diva_man_var_header_t *pVar) {
- diva_man_var_header_t *cur;
- int i, one_updated = 0, mdm_updated = 0, fax_updated = 0;
-
- for (i = pLib->statistic_parse_first; i <= pLib->statistic_parse_last; i++) {
- if ((cur = find_var(pVar, pLib->parse_table[i].path))) {
- if (diva_trace_read_variable(cur, pLib->parse_table[i].variable)) {
- diva_trace_error(pLib, -3 , __FILE__, __LINE__);
- return (-1);
- }
- one_updated = 1;
- if ((i >= pLib->mdm_statistic_parse_first) && (i <= pLib->mdm_statistic_parse_last)) {
- mdm_updated = 1;
- }
- if ((i >= pLib->fax_statistic_parse_first) && (i <= pLib->fax_statistic_parse_last)) {
- fax_updated = 1;
- }
- }
- }
-
- /*
- We do not use first event to notify user - this is the event that is
- generated as result of EVENT ON operation and is used only to initialize
- internal variables of application
- */
- if (mdm_updated) {
- diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_NOTIFY_MDM_STAT_CHANGE);
- } else if (fax_updated) {
- diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_NOTIFY_FAX_STAT_CHANGE);
- } else if (one_updated) {
- diva_trace_notify_user(pLib, 0, DIVA_SUPER_TRACE_NOTIFY_STAT_CHANGE);
- }
-
- return (one_updated ? 0 : -1);
-}
-
-static int SuperTraceGetOutgoingCallStatistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->outgoing_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetIncomingCallStatistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->incoming_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetModemStatistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->modem_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetFaxStatistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->fax_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetBLayer1Statistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->b1_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetBLayer2Statistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->b2_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetDLayer1Statistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->d1_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-static int SuperTraceGetDLayer2Statistics(void *hLib) {
- diva_strace_context_t *pLib = (diva_strace_context_t *)hLib;
- pLib->d2_ifc_stats = 1;
- return (ScheduleNextTraceRequest(pLib));
-}
-
-dword DivaSTraceGetMemotyRequirement(int channels) {
- dword parse_entries = (MODEM_PARSE_ENTRIES + FAX_PARSE_ENTRIES + \
- STAT_PARSE_ENTRIES + \
- LINE_PARSE_ENTRIES + 1) * channels;
- return (sizeof(diva_strace_context_t) + \
- (parse_entries * sizeof(diva_strace_path2action_t)));
-}
diff --git a/drivers/isdn/hardware/eicon/maintidi.h b/drivers/isdn/hardware/eicon/maintidi.h
deleted file mode 100644
index 2b46147c5532..000000000000
--- a/drivers/isdn/hardware/eicon/maintidi.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2000.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_EICON_TRACE_IDI_IFC_H__
-#define __DIVA_EICON_TRACE_IDI_IFC_H__
-
-void *SuperTraceOpenAdapter(int AdapterNumber);
-int SuperTraceCloseAdapter(void *AdapterHandle);
-int SuperTraceWrite(void *AdapterHandle,
- const void *data, int length);
-int SuperTraceReadRequest(void *AdapterHandle, const char *name, byte *data);
-int SuperTraceGetNumberOfChannels(void *AdapterHandle);
-int SuperTraceASSIGN(void *AdapterHandle, byte *data);
-int SuperTraceREMOVE(void *AdapterHandle);
-int SuperTraceTraceOnRequest(void *hAdapter, const char *name, byte *data);
-int SuperTraceWriteVar(void *AdapterHandle,
- byte *data,
- const char *name,
- void *var,
- byte type,
- byte var_length);
-int SuperTraceExecuteRequest(void *AdapterHandle,
- const char *name,
- byte *data);
-
-typedef struct _diva_strace_path2action {
- char path[64]; /* Full path to variable */
- void *variable; /* Variable that will receive value */
-} diva_strace_path2action_t;
-
-#define DIVA_MAX_MANAGEMENT_TRANSFER_SIZE 4096
-
-typedef struct _diva_strace_context {
- diva_strace_library_interface_t instance;
-
- int Adapter;
- void *hAdapter;
-
- int Channels;
- int req_busy;
-
- ENTITY e;
- IDI_CALL request;
- BUFFERS XData;
- BUFFERS RData;
- byte buffer[DIVA_MAX_MANAGEMENT_TRANSFER_SIZE + 1];
- int removal_state;
- int general_b_ch_event;
- int general_fax_event;
- int general_mdm_event;
-
- byte rc_ok;
-
- /*
- Initialization request state machine
- */
- int ChannelsTraceActive;
- int ModemTraceActive;
- int FaxTraceActive;
- int IncomingCallsCallsActive;
- int IncomingCallsConnectedActive;
- int OutgoingCallsCallsActive;
- int OutgoingCallsConnectedActive;
-
- int trace_mask_init;
- int audio_trace_init;
- int bchannel_init;
- int trace_length_init;
- int trace_on;
- int trace_events_down;
- int l1_trace;
- int l2_trace;
-
- /*
- Trace\Event Enable
- */
- word trace_event_mask;
- word current_trace_event_mask;
-
- dword audio_tap_mask;
- dword current_audio_tap_mask;
- dword current_eye_pattern_mask;
- int audio_tap_pending;
- int eye_pattern_pending;
-
- dword bchannel_trace_mask;
- dword current_bchannel_trace_mask;
-
-
- diva_trace_line_state_t lines[30];
-
- int parse_entries;
- int cur_parse_entry;
- diva_strace_path2action_t *parse_table;
-
- diva_trace_library_user_interface_t user_proc_table;
-
- int line_parse_entry_first[30];
- int line_parse_entry_last[30];
-
- int modem_parse_entry_first[30];
- int modem_parse_entry_last[30];
-
- int fax_parse_entry_first[30];
- int fax_parse_entry_last[30];
-
- int statistic_parse_first;
- int statistic_parse_last;
-
- int mdm_statistic_parse_first;
- int mdm_statistic_parse_last;
-
- int fax_statistic_parse_first;
- int fax_statistic_parse_last;
-
- dword line_init_event;
- dword modem_init_event;
- dword fax_init_event;
-
- dword pending_line_status;
- dword pending_modem_status;
- dword pending_fax_status;
-
- dword clear_call_command;
-
- int outgoing_ifc_stats;
- int incoming_ifc_stats;
- int modem_ifc_stats;
- int fax_ifc_stats;
- int b1_ifc_stats;
- int b2_ifc_stats;
- int d1_ifc_stats;
- int d2_ifc_stats;
-
- diva_trace_interface_state_t Interface;
- diva_ifc_statistics_t InterfaceStat;
-} diva_strace_context_t;
-
-typedef struct _diva_man_var_header {
- byte escape;
- byte length;
- byte management_id;
- byte type;
- byte attribute;
- byte status;
- byte value_length;
- byte path_length;
-} diva_man_var_header_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/man_defs.h b/drivers/isdn/hardware/eicon/man_defs.h
deleted file mode 100644
index 249c471700e7..000000000000
--- a/drivers/isdn/hardware/eicon/man_defs.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 1.9
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/* Definitions for use with the Management Information Element */
-
-/*------------------------------------------------------------------*/
-/* Management information element */
-/* ---------------------------------------------------------- */
-/* Byte Coding Comment */
-/* ---------------------------------------------------------- */
-/* 0 | 0 1 1 1 1 1 1 1 | ESC */
-/* 1 | 0 x x x x x x x | Length of information element (m-1) */
-/* 2 | 1 0 0 0 0 0 0 0 | Management Information Id */
-/* 3 | x x x x x x x x | Type */
-/* 4 | x x x x x x x x | Attribute */
-/* 5 | x x x x x x x x | Status */
-/* 6 | x x x x x x x x | Variable Value Length (m-n) */
-/* 7 | x x x x x x x x | Path / Variable Name String Length (n-8)*/
-/* 8..n | x x x x x x x x | Path/Node Name String separated by '\' */
-/* n..m | x x x x x x x x | Variable content */
-/*------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------*/
-/* Type Field */
-/* */
-/* MAN_READ: not used */
-/* MAN_WRITE: not used */
-/* MAN_EVENT_ON: not used */
-/* MAN_EVENT_OFF: not used */
-/* MAN_INFO_IND: type of variable */
-/* MAN_EVENT_IND: type of variable */
-/* MAN_TRACE_IND not used */
-/*------------------------------------------------------------------*/
-#define MI_DIR 0x01 /* Directory string (zero terminated) */
-#define MI_EXECUTE 0x02 /* Executable function (has no value) */
-#define MI_ASCIIZ 0x03 /* Zero terminated string */
-#define MI_ASCII 0x04 /* String, first byte is length */
-#define MI_NUMBER 0x05 /* Number string, first byte is length*/
-#define MI_TRACE 0x06 /* Trace information, format see below*/
-
-#define MI_FIXED_LENGTH 0x80 /* get length from MAN_INFO max_len */
-#define MI_INT 0x81 /* number to display as signed int */
-#define MI_UINT 0x82 /* number to display as unsigned int */
-#define MI_HINT 0x83 /* number to display in hex format */
-#define MI_HSTR 0x84 /* number to display as a hex string */
-#define MI_BOOLEAN 0x85 /* number to display as boolean */
-#define MI_IP_ADDRESS 0x86 /* number to display as IP address */
-#define MI_BITFLD 0x87 /* number to display as bit field */
-#define MI_SPID_STATE 0x88 /* state# of SPID initialisation */
-
-/*------------------------------------------------------------------*/
-/* Attribute Field */
-/* */
-/* MAN_READ: not used */
-/* MAN_WRITE: not used */
-/* MAN_EVENT_ON: not used */
-/* MAN_EVENT_OFF: not used */
-/* MAN_INFO_IND: set according to capabilities of that variable */
-/* MAN_EVENT_IND: not used */
-/* MAN_TRACE_IND not used */
-/*------------------------------------------------------------------*/
-#define MI_WRITE 0x01 /* Variable is writeable */
-#define MI_EVENT 0x02 /* Variable can indicate changes */
-
-/*------------------------------------------------------------------*/
-/* Status Field */
-/* */
-/* MAN_READ: not used */
-/* MAN_WRITE: not used */
-/* MAN_EVENT_ON: not used */
-/* MAN_EVENT_OFF: not used */
-/* MAN_INFO_IND: set according to the actual status */
-/* MAN_EVENT_IND: set according to the actual statu */
-/* MAN_TRACE_IND not used */
-/*------------------------------------------------------------------*/
-#define MI_LOCKED 0x01 /* write protected by another instance*/
-#define MI_EVENT_ON 0x02 /* Event logging switched on */
-#define MI_PROTECTED 0x04 /* write protected by this instance */
-
-/*------------------------------------------------------------------*/
-/* Data Format used for MAN_TRACE_IND (no MI-element used) */
-/*------------------------------------------------------------------*/
-typedef struct mi_xlog_hdr_s MI_XLOG_HDR;
-struct mi_xlog_hdr_s
-{
- unsigned long time; /* Timestamp in msec units */
- unsigned short size; /* Size of data that follows */
- unsigned short code; /* code of trace event */
-}; /* unspecified data follows this header */
-
-/*------------------------------------------------------------------*/
-/* Trace mask definitions for trace events except B channel and */
-/* debug trace events */
-/*------------------------------------------------------------------*/
-#define TM_D_CHAN 0x0001 /* D-Channel (D-.) Code 3,4 */
-#define TM_L_LAYER 0x0002 /* Low Layer (LL) Code 6,7 */
-#define TM_N_LAYER 0x0004 /* Network Layer (N) Code 14,15 */
-#define TM_DL_ERR 0x0008 /* Data Link Error (MDL) Code 9 */
-#define TM_LAYER1 0x0010 /* Layer 1 Code 20 */
-#define TM_C_COMM 0x0020 /* Call Comment (SIG) Code 5,21,22 */
-#define TM_M_DATA 0x0040 /* Modulation Data (EYE) Code 23 */
-#define TM_STRING 0x0080 /* Sting data Code 24 */
-#define TM_N_USED2 0x0100 /* not used */
-#define TM_N_USED3 0x0200 /* not used */
-#define TM_N_USED4 0x0400 /* not used */
-#define TM_N_USED5 0x0800 /* not used */
-#define TM_N_USED6 0x1000 /* not used */
-#define TM_N_USED7 0x2000 /* not used */
-#define TM_N_USED8 0x4000 /* not used */
-#define TM_REST 0x8000 /* Codes 10,11,12,13,16,18,19,128,129 */
-
-/*------ End of file -----------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/mdm_msg.h b/drivers/isdn/hardware/eicon/mdm_msg.h
deleted file mode 100644
index 0e6b2e009a74..000000000000
--- a/drivers/isdn/hardware/eicon/mdm_msg.h
+++ /dev/null
@@ -1,346 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __EICON_MDM_MSG_H__
-#define __EICON_MDM_MSG_H__
-#define DSP_UDATA_INDICATION_DCD_OFF 0x01
-#define DSP_UDATA_INDICATION_DCD_ON 0x02
-#define DSP_UDATA_INDICATION_CTS_OFF 0x03
-#define DSP_UDATA_INDICATION_CTS_ON 0x04
-/* =====================================================================
- DCD_OFF Message:
- <word> time of DCD off (sampled from counter at 8kHz)
- DCD_ON Message:
- <word> time of DCD on (sampled from counter at 8kHz)
- <byte> connected norm
- <word> connected options
- <dword> connected speed (bit/s, max of tx and rx speed)
- <word> roundtrip delay (ms)
- <dword> connected speed tx (bit/s)
- <dword> connected speed rx (bit/s)
- Size of this message == 19 bytes, but we will receive only 11
- ===================================================================== */
-#define DSP_CONNECTED_NORM_UNSPECIFIED 0
-#define DSP_CONNECTED_NORM_V21 1
-#define DSP_CONNECTED_NORM_V23 2
-#define DSP_CONNECTED_NORM_V22 3
-#define DSP_CONNECTED_NORM_V22_BIS 4
-#define DSP_CONNECTED_NORM_V32_BIS 5
-#define DSP_CONNECTED_NORM_V34 6
-#define DSP_CONNECTED_NORM_V8 7
-#define DSP_CONNECTED_NORM_BELL_212A 8
-#define DSP_CONNECTED_NORM_BELL_103 9
-#define DSP_CONNECTED_NORM_V29_LEASED_LINE 10
-#define DSP_CONNECTED_NORM_V33_LEASED_LINE 11
-#define DSP_CONNECTED_NORM_V90 12
-#define DSP_CONNECTED_NORM_V21_CH2 13
-#define DSP_CONNECTED_NORM_V27_TER 14
-#define DSP_CONNECTED_NORM_V29 15
-#define DSP_CONNECTED_NORM_V33 16
-#define DSP_CONNECTED_NORM_V17 17
-#define DSP_CONNECTED_NORM_V32 18
-#define DSP_CONNECTED_NORM_K56_FLEX 19
-#define DSP_CONNECTED_NORM_X2 20
-#define DSP_CONNECTED_NORM_V18 21
-#define DSP_CONNECTED_NORM_V18_LOW_HIGH 22
-#define DSP_CONNECTED_NORM_V18_HIGH_LOW 23
-#define DSP_CONNECTED_NORM_V21_LOW_HIGH 24
-#define DSP_CONNECTED_NORM_V21_HIGH_LOW 25
-#define DSP_CONNECTED_NORM_BELL103_LOW_HIGH 26
-#define DSP_CONNECTED_NORM_BELL103_HIGH_LOW 27
-#define DSP_CONNECTED_NORM_V23_75_1200 28
-#define DSP_CONNECTED_NORM_V23_1200_75 29
-#define DSP_CONNECTED_NORM_EDT_110 30
-#define DSP_CONNECTED_NORM_BAUDOT_45 31
-#define DSP_CONNECTED_NORM_BAUDOT_47 32
-#define DSP_CONNECTED_NORM_BAUDOT_50 33
-#define DSP_CONNECTED_NORM_DTMF 34
-#define DSP_CONNECTED_NORM_V18_RESERVED_13 35
-#define DSP_CONNECTED_NORM_V18_RESERVED_14 36
-#define DSP_CONNECTED_NORM_V18_RESERVED_15 37
-#define DSP_CONNECTED_NORM_VOWN 38
-#define DSP_CONNECTED_NORM_V23_OFF_HOOK 39
-#define DSP_CONNECTED_NORM_V23_ON_HOOK 40
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_3 41
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_4 42
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_5 43
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_6 44
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_7 45
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_8 46
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_9 47
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_10 48
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_11 49
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_12 50
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_13 51
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_14 52
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_15 53
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_16 54
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_17 55
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_18 56
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_19 57
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_20 58
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_21 59
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_22 60
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_23 61
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_24 62
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_25 63
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_26 64
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_27 65
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_28 66
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_29 67
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_30 68
-#define DSP_CONNECTED_NORM_VOWN_RESERVED_31 69
-#define DSP_CONNECTED_OPTION_TRELLIS 0x0001
-#define DSP_CONNECTED_OPTION_V42_TRANS 0x0002
-#define DSP_CONNECTED_OPTION_V42_LAPM 0x0004
-#define DSP_CONNECTED_OPTION_SHORT_TRAIN 0x0008
-#define DSP_CONNECTED_OPTION_TALKER_ECHO_PROTECT 0x0010
-#define DSP_CONNECTED_OPTION_V42BIS 0x0020
-#define DSP_CONNECTED_OPTION_MNP2 0x0040
-#define DSP_CONNECTED_OPTION_MNP3 0x0080
-#define DSP_CONNECTED_OPTION_MNP4 0x00c0
-#define DSP_CONNECTED_OPTION_MNP5 0x0100
-#define DSP_CONNECTED_OPTION_MNP10 0x0200
-#define DSP_CONNECTED_OPTION_MASK_V42 0x0024
-#define DSP_CONNECTED_OPTION_MASK_MNP 0x03c0
-#define DSP_CONNECTED_OPTION_MASK_ERROR_CORRECT 0x03e4
-#define DSP_CONNECTED_OPTION_MASK_COMPRESSION 0x0320
-#define DSP_UDATA_INDICATION_DISCONNECT 5
-/*
- returns:
- <byte> cause
-*/
-/* ==========================================================
- DLC: B2 modem configuration
- ========================================================== */
-/*
- Fields in assign DLC information element for modem protocol V.42/MNP:
- <byte> length of information element
- <word> information field length
- <byte> address A (not used, default 3)
- <byte> address B (not used, default 1)
- <byte> modulo mode (not used, default 7)
- <byte> window size (not used, default 7)
- <word> XID length (not used, default 0)
- ... XID information (not used, default empty)
- <byte> modem protocol negotiation options
- <byte> modem protocol options
- <byte> modem protocol break configuration
- <byte> modem protocol application options
-*/
-#define DLC_MODEMPROT_DISABLE_V42_V42BIS 0x01
-#define DLC_MODEMPROT_DISABLE_MNP_MNP5 0x02
-#define DLC_MODEMPROT_REQUIRE_PROTOCOL 0x04
-#define DLC_MODEMPROT_DISABLE_V42_DETECT 0x08
-#define DLC_MODEMPROT_DISABLE_COMPRESSION 0x10
-#define DLC_MODEMPROT_REQUIRE_PROTOCOL_V34UP 0x20
-#define DLC_MODEMPROT_NO_PROTOCOL_IF_1200 0x01
-#define DLC_MODEMPROT_BUFFER_IN_V42_DETECT 0x02
-#define DLC_MODEMPROT_DISABLE_V42_SREJ 0x04
-#define DLC_MODEMPROT_DISABLE_MNP3 0x08
-#define DLC_MODEMPROT_DISABLE_MNP4 0x10
-#define DLC_MODEMPROT_DISABLE_MNP10 0x20
-#define DLC_MODEMPROT_NO_PROTOCOL_IF_V22BIS 0x40
-#define DLC_MODEMPROT_NO_PROTOCOL_IF_V32BIS 0x80
-#define DLC_MODEMPROT_BREAK_DISABLED 0x00
-#define DLC_MODEMPROT_BREAK_NORMAL 0x01
-#define DLC_MODEMPROT_BREAK_EXPEDITED 0x02
-#define DLC_MODEMPROT_BREAK_DESTRUCTIVE 0x03
-#define DLC_MODEMPROT_BREAK_CONFIG_MASK 0x03
-#define DLC_MODEMPROT_APPL_EARLY_CONNECT 0x01
-#define DLC_MODEMPROT_APPL_PASS_INDICATIONS 0x02
-/* ==========================================================
- CAI parameters used for the modem L1 configuration
- ========================================================== */
-/*
- Fields in assign CAI information element:
- <byte> length of information element
- <byte> info field and B-channel hardware
- <byte> rate adaptation bit rate
- <byte> async framing parameters
- <byte> reserved
- <word> packet length
- <byte> modem line taking options
- <byte> modem modulation negotiation parameters
- <byte> modem modulation options
- <byte> modem disabled modulations mask low
- <byte> modem disabled modulations mask high
- <byte> modem enabled modulations mask
- <word> modem min TX speed
- <word> modem max TX speed
- <word> modem min RX speed
- <word> modem max RX speed
- <byte> modem disabled symbol rates mask
- <byte> modem info options mask
- <byte> modem transmit level adjust
- <byte> modem speaker parameters
- <word> modem private debug config
- <struct> modem reserved
- <struct> v18 config parameters
- <struct> v18 probing sequence
- <struct> v18 probing message
-*/
-#define DSP_CAI_HARDWARE_HDLC_64K 0x05
-#define DSP_CAI_HARDWARE_HDLC_56K 0x08
-#define DSP_CAI_HARDWARE_TRANSP 0x09
-#define DSP_CAI_HARDWARE_V110_SYNC 0x0c
-#define DSP_CAI_HARDWARE_V110_ASYNC 0x0d
-#define DSP_CAI_HARDWARE_HDLC_128K 0x0f
-#define DSP_CAI_HARDWARE_FAX 0x10
-#define DSP_CAI_HARDWARE_MODEM_ASYNC 0x11
-#define DSP_CAI_HARDWARE_MODEM_SYNC 0x12
-#define DSP_CAI_HARDWARE_V110_HDLCA 0x13
-#define DSP_CAI_HARDWARE_ADVANCED_VOICE 0x14
-#define DSP_CAI_HARDWARE_TRANSP_DTMF 0x16
-#define DSP_CAI_HARDWARE_DTMF_VOICE_ISDN 0x17
-#define DSP_CAI_HARDWARE_DTMF_VOICE_LOCAL 0x18
-#define DSP_CAI_HARDWARE_MASK 0x3f
-#define DSP_CAI_ENABLE_INFO_INDICATIONS 0x80
-#define DSP_CAI_RATE_ADAPTATION_300 0x00
-#define DSP_CAI_RATE_ADAPTATION_600 0x01
-#define DSP_CAI_RATE_ADAPTATION_1200 0x02
-#define DSP_CAI_RATE_ADAPTATION_2400 0x03
-#define DSP_CAI_RATE_ADAPTATION_4800 0x04
-#define DSP_CAI_RATE_ADAPTATION_9600 0x05
-#define DSP_CAI_RATE_ADAPTATION_19200 0x06
-#define DSP_CAI_RATE_ADAPTATION_38400 0x07
-#define DSP_CAI_RATE_ADAPTATION_48000 0x08
-#define DSP_CAI_RATE_ADAPTATION_56000 0x09
-#define DSP_CAI_RATE_ADAPTATION_7200 0x0a
-#define DSP_CAI_RATE_ADAPTATION_14400 0x0b
-#define DSP_CAI_RATE_ADAPTATION_28800 0x0c
-#define DSP_CAI_RATE_ADAPTATION_12000 0x0d
-#define DSP_CAI_RATE_ADAPTATION_1200_75 0x0e
-#define DSP_CAI_RATE_ADAPTATION_75_1200 0x0f
-#define DSP_CAI_RATE_ADAPTATION_MASK 0x0f
-#define DSP_CAI_ASYNC_PARITY_ENABLE 0x01
-#define DSP_CAI_ASYNC_PARITY_SPACE 0x00
-#define DSP_CAI_ASYNC_PARITY_ODD 0x02
-#define DSP_CAI_ASYNC_PARITY_EVEN 0x04
-#define DSP_CAI_ASYNC_PARITY_MARK 0x06
-#define DSP_CAI_ASYNC_PARITY_MASK 0x06
-#define DSP_CAI_ASYNC_ONE_STOP_BIT 0x00
-#define DSP_CAI_ASYNC_TWO_STOP_BITS 0x20
-#define DSP_CAI_ASYNC_CHAR_LENGTH_8 0x00
-#define DSP_CAI_ASYNC_CHAR_LENGTH_7 0x40
-#define DSP_CAI_ASYNC_CHAR_LENGTH_6 0x80
-#define DSP_CAI_ASYNC_CHAR_LENGTH_5 0xc0
-#define DSP_CAI_ASYNC_CHAR_LENGTH_MASK 0xc0
-#define DSP_CAI_MODEM_LEASED_LINE_MODE 0x01
-#define DSP_CAI_MODEM_4_WIRE_OPERATION 0x02
-#define DSP_CAI_MODEM_DISABLE_BUSY_DETECT 0x04
-#define DSP_CAI_MODEM_DISABLE_CALLING_TONE 0x08
-#define DSP_CAI_MODEM_DISABLE_ANSWER_TONE 0x10
-#define DSP_CAI_MODEM_ENABLE_DIAL_TONE_DET 0x20
-#define DSP_CAI_MODEM_USE_POTS_INTERFACE 0x40
-#define DSP_CAI_MODEM_FORCE_RAY_TAYLOR_FAX 0x80
-#define DSP_CAI_MODEM_NEGOTIATE_HIGHEST 0x00
-#define DSP_CAI_MODEM_NEGOTIATE_DISABLED 0x01
-#define DSP_CAI_MODEM_NEGOTIATE_IN_CLASS 0x02
-#define DSP_CAI_MODEM_NEGOTIATE_V100 0x03
-#define DSP_CAI_MODEM_NEGOTIATE_V8 0x04
-#define DSP_CAI_MODEM_NEGOTIATE_V8BIS 0x05
-#define DSP_CAI_MODEM_NEGOTIATE_MASK 0x07
-#define DSP_CAI_MODEM_GUARD_TONE_NONE 0x00
-#define DSP_CAI_MODEM_GUARD_TONE_550HZ 0x40
-#define DSP_CAI_MODEM_GUARD_TONE_1800HZ 0x80
-#define DSP_CAI_MODEM_GUARD_TONE_MASK 0xc0
-#define DSP_CAI_MODEM_DISABLE_RETRAIN 0x01
-#define DSP_CAI_MODEM_DISABLE_STEPUPDOWN 0x02
-#define DSP_CAI_MODEM_DISABLE_SPLIT_SPEED 0x04
-#define DSP_CAI_MODEM_DISABLE_TRELLIS 0x08
-#define DSP_CAI_MODEM_ALLOW_RDL_TEST_LOOP 0x10
-#define DSP_CAI_MODEM_DISABLE_FLUSH_TIMER 0x40
-#define DSP_CAI_MODEM_REVERSE_DIRECTION 0x80
-#define DSP_CAI_MODEM_DISABLE_V21 0x01
-#define DSP_CAI_MODEM_DISABLE_V23 0x02
-#define DSP_CAI_MODEM_DISABLE_V22 0x04
-#define DSP_CAI_MODEM_DISABLE_V22BIS 0x08
-#define DSP_CAI_MODEM_DISABLE_V32 0x10
-#define DSP_CAI_MODEM_DISABLE_V32BIS 0x20
-#define DSP_CAI_MODEM_DISABLE_V34 0x40
-#define DSP_CAI_MODEM_DISABLE_V90 0x80
-#define DSP_CAI_MODEM_DISABLE_BELL103 0x01
-#define DSP_CAI_MODEM_DISABLE_BELL212A 0x02
-#define DSP_CAI_MODEM_DISABLE_VFC 0x04
-#define DSP_CAI_MODEM_DISABLE_K56FLEX 0x08
-#define DSP_CAI_MODEM_DISABLE_X2 0x10
-#define DSP_CAI_MODEM_ENABLE_V29FDX 0x01
-#define DSP_CAI_MODEM_ENABLE_V33 0x02
-#define DSP_CAI_MODEM_DISABLE_2400_SYMBOLS 0x01
-#define DSP_CAI_MODEM_DISABLE_2743_SYMBOLS 0x02
-#define DSP_CAI_MODEM_DISABLE_2800_SYMBOLS 0x04
-#define DSP_CAI_MODEM_DISABLE_3000_SYMBOLS 0x08
-#define DSP_CAI_MODEM_DISABLE_3200_SYMBOLS 0x10
-#define DSP_CAI_MODEM_DISABLE_3429_SYMBOLS 0x20
-#define DSP_CAI_MODEM_DISABLE_TX_REDUCTION 0x01
-#define DSP_CAI_MODEM_DISABLE_PRECODING 0x02
-#define DSP_CAI_MODEM_DISABLE_PREEMPHASIS 0x04
-#define DSP_CAI_MODEM_DISABLE_SHAPING 0x08
-#define DSP_CAI_MODEM_DISABLE_NONLINEAR_EN 0x10
-#define DSP_CAI_MODEM_SPEAKER_OFF 0x00
-#define DSP_CAI_MODEM_SPEAKER_DURING_TRAIN 0x01
-#define DSP_CAI_MODEM_SPEAKER_TIL_CONNECT 0x02
-#define DSP_CAI_MODEM_SPEAKER_ALWAYS_ON 0x03
-#define DSP_CAI_MODEM_SPEAKER_CONTROL_MASK 0x03
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_MIN 0x00
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_LOW 0x04
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_HIGH 0x08
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_MAX 0x0c
-#define DSP_CAI_MODEM_SPEAKER_VOLUME_MASK 0x0c
-/* ==========================================================
- DCD/CTS State
- ========================================================== */
-#define MDM_WANT_CONNECT_B3_ACTIVE_I 0x01
-#define MDM_NCPI_VALID 0x02
-#define MDM_NCPI_CTS_ON_RECEIVED 0x04
-#define MDM_NCPI_DCD_ON_RECEIVED 0x08
-/* ==========================================================
- CAPI NCPI Constants
- ========================================================== */
-#define MDM_NCPI_ECM_V42 0x0001
-#define MDM_NCPI_ECM_MNP 0x0002
-#define MDM_NCPI_TRANSPARENT 0x0004
-#define MDM_NCPI_COMPRESSED 0x0010
-/* ==========================================================
- CAPI B2 Config Constants
- ========================================================== */
-#define MDM_B2_DISABLE_V42bis 0x0001
-#define MDM_B2_DISABLE_MNP 0x0002
-#define MDM_B2_DISABLE_TRANS 0x0004
-#define MDM_B2_DISABLE_V42 0x0008
-#define MDM_B2_DISABLE_COMP 0x0010
-/* ==========================================================
- CAPI B1 Config Constants
- ========================================================== */
-#define MDM_CAPI_DISABLE_RETRAIN 0x0001
-#define MDM_CAPI_DISABLE_RING_TONE 0x0002
-#define MDM_CAPI_GUARD_1800 0x0004
-#define MDM_CAPI_GUARD_550 0x0008
-#define MDM_CAPI_NEG_V8 0x0003
-#define MDM_CAPI_NEG_V100 0x0002
-#define MDM_CAPI_NEG_MOD_CLASS 0x0001
-#define MDM_CAPI_NEG_DISABLED 0x0000
-#endif
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
deleted file mode 100644
index def7992a38e6..000000000000
--- a/drivers/isdn/hardware/eicon/message.c
+++ /dev/null
@@ -1,14954 +0,0 @@
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/bitmap.h>
-
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "capi20.h"
-#include "divacapi.h"
-#include "mdm_msg.h"
-#include "divasync.h"
-
-#define FILE_ "MESSAGE.C"
-#define dprintf
-
-/*------------------------------------------------------------------*/
-/* This is options supported for all adapters that are server by */
-/* XDI driver. Allo it is not necessary to ask it from every adapter*/
-/* and it is not necessary to save it separate for every adapter */
-/* Macrose defined here have only local meaning */
-/*------------------------------------------------------------------*/
-static dword diva_xdi_extended_features = 0;
-
-#define DIVA_CAPI_USE_CMA 0x00000001
-#define DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR 0x00000002
-#define DIVA_CAPI_XDI_PROVIDES_NO_CANCEL 0x00000004
-#define DIVA_CAPI_XDI_PROVIDES_RX_DMA 0x00000008
-
-/*
- CAPI can request to process all return codes self only if:
- protocol code supports this && xdi supports this
-*/
-#define DIVA_CAPI_SUPPORTS_NO_CANCEL(__a__) (((__a__)->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL) && ((__a__)->manufacturer_features & MANUFACTURER_FEATURE_OK_FC_LABEL) && (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_NO_CANCEL))
-
-/*------------------------------------------------------------------*/
-/* local function prototypes */
-/*------------------------------------------------------------------*/
-
-static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci);
-void AutomaticLaw(DIVA_CAPI_ADAPTER *);
-word CapiRelease(word);
-word CapiRegister(word);
-word api_put(APPL *, CAPI_MSG *);
-static word api_parse(byte *, word, byte *, API_PARSE *);
-static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out);
-static void api_load_msg(API_SAVE *in, API_PARSE *out);
-
-word api_remove_start(void);
-void api_remove_complete(void);
-
-static void plci_remove(PLCI *);
-static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a);
-static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *, IDI_SYNC_REQ *);
-
-void callback(ENTITY *);
-
-static void control_rc(PLCI *, byte, byte, byte, byte, byte);
-static void data_rc(PLCI *, byte);
-static void data_ack(PLCI *, byte);
-static void sig_ind(PLCI *);
-static void SendInfo(PLCI *, dword, byte **, byte);
-static void SendSetupInfo(APPL *, PLCI *, dword, byte **, byte);
-static void SendSSExtInd(APPL *, PLCI *plci, dword Id, byte **parms);
-
-static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms);
-
-static void nl_ind(PLCI *);
-
-static byte connect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte listen_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte info_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte info_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte alert_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte facility_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte facility_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte disconnect_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte data_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte data_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte reset_b3_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte reset_b3_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte connect_b3_t90_a_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte select_b_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte manufacturer_req(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-static byte manufacturer_res(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-
-static word get_plci(DIVA_CAPI_ADAPTER *);
-static void add_p(PLCI *, byte, byte *);
-static void add_s(PLCI *plci, byte code, API_PARSE *p);
-static void add_ss(PLCI *plci, byte code, API_PARSE *p);
-static void add_ie(PLCI *plci, byte code, byte *p, word p_length);
-static void add_d(PLCI *, word, byte *);
-static void add_ai(PLCI *, API_PARSE *);
-static word add_b1(PLCI *, API_PARSE *, word, word);
-static word add_b23(PLCI *, API_PARSE *);
-static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms);
-static void sig_req(PLCI *, byte, byte);
-static void nl_req_ncci(PLCI *, byte, byte);
-static void send_req(PLCI *);
-static void send_data(PLCI *);
-static word plci_remove_check(PLCI *);
-static void listen_check(DIVA_CAPI_ADAPTER *);
-static byte AddInfo(byte **, byte **, byte *, byte *);
-static byte getChannel(API_PARSE *);
-static void IndParse(PLCI *, const word *, byte **, byte);
-static byte ie_compare(byte *, byte *);
-static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
-static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
-
-/*
- XON protocol helpers
-*/
-static void channel_flow_control_remove(PLCI *plci);
-static void channel_x_off(PLCI *plci, byte ch, byte flag);
-static void channel_x_on(PLCI *plci, byte ch);
-static void channel_request_xon(PLCI *plci, byte ch);
-static void channel_xmit_xon(PLCI *plci);
-static int channel_can_xon(PLCI *plci, byte ch);
-static void channel_xmit_extended_xon(PLCI *plci);
-
-static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, dword info_mask, byte setupParse);
-static word AdvCodecSupport(DIVA_CAPI_ADAPTER *, PLCI *, APPL *, byte);
-static void CodecIdCheck(DIVA_CAPI_ADAPTER *, PLCI *);
-static void SetVoiceChannel(PLCI *, byte *, DIVA_CAPI_ADAPTER *);
-static void VoiceChannelOff(PLCI *plci);
-static void adv_voice_write_coefs(PLCI *plci, word write_command);
-static void adv_voice_clear_config(PLCI *plci);
-
-static word get_b1_facilities(PLCI *plci, byte b1_resource);
-static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities);
-static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities);
-static word adjust_b_process(dword Id, PLCI *plci, byte Rc);
-static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command);
-static void adjust_b_restore(dword Id, PLCI *plci, byte Rc);
-static void reset_b3_command(dword Id, PLCI *plci, byte Rc);
-static void select_b_command(dword Id, PLCI *plci, byte Rc);
-static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc);
-static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc);
-static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc);
-static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc);
-static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc);
-static void hold_save_command(dword Id, PLCI *plci, byte Rc);
-static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc);
-static void init_b1_config(PLCI *plci);
-static void clear_b1_config(PLCI *plci);
-
-static void dtmf_command(dword Id, PLCI *plci, byte Rc);
-static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
-static void dtmf_confirmation(dword Id, PLCI *plci);
-static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length);
-static void dtmf_parameter_write(PLCI *plci);
-
-
-static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id);
-static void mixer_set_bchannel_id(PLCI *plci, byte *chi);
-static void mixer_clear_config(PLCI *plci);
-static void mixer_notify_update(PLCI *plci, byte others);
-static void mixer_command(dword Id, PLCI *plci, byte Rc);
-static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
-static void mixer_indication_coefs_set(dword Id, PLCI *plci);
-static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length);
-static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length);
-static void mixer_remove(PLCI *plci);
-
-
-static void ec_command(dword Id, PLCI *plci, byte Rc);
-static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg);
-static void ec_indication(dword Id, PLCI *plci, byte *msg, word length);
-
-
-static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc);
-static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc);
-
-
-static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic);
-static void diva_free_dma_descriptor(PLCI *plci, int nr);
-
-/*------------------------------------------------------------------*/
-/* external function prototypes */
-/*------------------------------------------------------------------*/
-
-extern byte MapController(byte);
-extern byte UnMapController(byte);
-#define MapId(Id)(((Id) & 0xffffff00L) | MapController((byte)(Id)))
-#define UnMapId(Id)(((Id) & 0xffffff00L) | UnMapController((byte)(Id)))
-
-void sendf(APPL *, word, dword, word, byte *, ...);
-void *TransmitBufferSet(APPL *appl, dword ref);
-void *TransmitBufferGet(APPL *appl, void *p);
-void TransmitBufferFree(APPL *appl, void *p);
-void *ReceiveBufferGet(APPL *appl, int Num);
-
-int fax_head_line_time(char *buffer);
-
-
-/*------------------------------------------------------------------*/
-/* Global data definitions */
-/*------------------------------------------------------------------*/
-extern byte max_adapter;
-extern byte max_appl;
-extern DIVA_CAPI_ADAPTER *adapter;
-extern APPL *application;
-
-
-
-
-
-
-
-static byte remove_started = false;
-static PLCI dummy_plci;
-
-
-static struct _ftable {
- word command;
- byte *format;
- byte (*function)(dword, word, DIVA_CAPI_ADAPTER *, PLCI *, APPL *, API_PARSE *);
-} ftable[] = {
- {_DATA_B3_R, "dwww", data_b3_req},
- {_DATA_B3_I | RESPONSE, "w", data_b3_res},
- {_INFO_R, "ss", info_req},
- {_INFO_I | RESPONSE, "", info_res},
- {_CONNECT_R, "wsssssssss", connect_req},
- {_CONNECT_I | RESPONSE, "wsssss", connect_res},
- {_CONNECT_ACTIVE_I | RESPONSE, "", connect_a_res},
- {_DISCONNECT_R, "s", disconnect_req},
- {_DISCONNECT_I | RESPONSE, "", disconnect_res},
- {_LISTEN_R, "dddss", listen_req},
- {_ALERT_R, "s", alert_req},
- {_FACILITY_R, "ws", facility_req},
- {_FACILITY_I | RESPONSE, "ws", facility_res},
- {_CONNECT_B3_R, "s", connect_b3_req},
- {_CONNECT_B3_I | RESPONSE, "ws", connect_b3_res},
- {_CONNECT_B3_ACTIVE_I | RESPONSE, "", connect_b3_a_res},
- {_DISCONNECT_B3_R, "s", disconnect_b3_req},
- {_DISCONNECT_B3_I | RESPONSE, "", disconnect_b3_res},
- {_RESET_B3_R, "s", reset_b3_req},
- {_RESET_B3_I | RESPONSE, "", reset_b3_res},
- {_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "ws", connect_b3_t90_a_res},
- {_CONNECT_B3_T90_ACTIVE_I | RESPONSE, "", connect_b3_t90_a_res},
- {_SELECT_B_REQ, "s", select_b_req},
- {_MANUFACTURER_R, "dws", manufacturer_req},
- {_MANUFACTURER_I | RESPONSE, "dws", manufacturer_res},
- {_MANUFACTURER_I | RESPONSE, "", manufacturer_res}
-};
-
-static byte *cip_bc[29][2] = {
- { "", "" }, /* 0 */
- { "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 1 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 2 */
- { "\x02\x89\x90", "\x02\x89\x90" }, /* 3 */
- { "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 4 */
- { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 5 */
- { "\x02\x98\x90", "\x02\x98\x90" }, /* 6 */
- { "\x04\x88\xc0\xc6\xe6", "\x04\x88\xc0\xc6\xe6" }, /* 7 */
- { "\x04\x88\x90\x21\x8f", "\x04\x88\x90\x21\x8f" }, /* 8 */
- { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 9 */
- { "", "" }, /* 10 */
- { "", "" }, /* 11 */
- { "", "" }, /* 12 */
- { "", "" }, /* 13 */
- { "", "" }, /* 14 */
- { "", "" }, /* 15 */
-
- { "\x03\x80\x90\xa3", "\x03\x80\x90\xa2" }, /* 16 */
- { "\x03\x90\x90\xa3", "\x03\x90\x90\xa2" }, /* 17 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 18 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 19 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 20 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 21 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 22 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 23 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 24 */
- { "\x02\x88\x90", "\x02\x88\x90" }, /* 25 */
- { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 26 */
- { "\x03\x91\x90\xa5", "\x03\x91\x90\xa5" }, /* 27 */
- { "\x02\x88\x90", "\x02\x88\x90" } /* 28 */
-};
-
-static byte *cip_hlc[29] = {
- "", /* 0 */
- "", /* 1 */
- "", /* 2 */
- "", /* 3 */
- "", /* 4 */
- "", /* 5 */
- "", /* 6 */
- "", /* 7 */
- "", /* 8 */
- "", /* 9 */
- "", /* 10 */
- "", /* 11 */
- "", /* 12 */
- "", /* 13 */
- "", /* 14 */
- "", /* 15 */
-
- "\x02\x91\x81", /* 16 */
- "\x02\x91\x84", /* 17 */
- "\x02\x91\xa1", /* 18 */
- "\x02\x91\xa4", /* 19 */
- "\x02\x91\xa8", /* 20 */
- "\x02\x91\xb1", /* 21 */
- "\x02\x91\xb2", /* 22 */
- "\x02\x91\xb5", /* 23 */
- "\x02\x91\xb8", /* 24 */
- "\x02\x91\xc1", /* 25 */
- "\x02\x91\x81", /* 26 */
- "\x03\x91\xe0\x01", /* 27 */
- "\x03\x91\xe0\x02" /* 28 */
-};
-
-/*------------------------------------------------------------------*/
-
-#define V120_HEADER_LENGTH 1
-#define V120_HEADER_EXTEND_BIT 0x80
-#define V120_HEADER_BREAK_BIT 0x40
-#define V120_HEADER_C1_BIT 0x04
-#define V120_HEADER_C2_BIT 0x08
-#define V120_HEADER_FLUSH_COND (V120_HEADER_BREAK_BIT | V120_HEADER_C1_BIT | V120_HEADER_C2_BIT)
-
-static byte v120_default_header[] =
-{
-
- 0x83 /* Ext, BR , res, res, C2 , C1 , B , F */
-
-};
-
-static byte v120_break_header[] =
-{
-
- 0xc3 | V120_HEADER_BREAK_BIT /* Ext, BR , res, res, C2 , C1 , B , F */
-
-};
-
-
-/*------------------------------------------------------------------*/
-/* API_PUT function */
-/*------------------------------------------------------------------*/
-
-word api_put(APPL *appl, CAPI_MSG *msg)
-{
- word i, j, k, l, n;
- word ret;
- byte c;
- byte controller;
- DIVA_CAPI_ADAPTER *a;
- PLCI *plci;
- NCCI *ncci_ptr;
- word ncci;
- CAPI_MSG *m;
- API_PARSE msg_parms[MAX_MSG_PARMS + 1];
-
- if (msg->header.length < sizeof(msg->header) ||
- msg->header.length > MAX_MSG_SIZE) {
- dbug(1, dprintf("bad len"));
- return _BAD_MSG;
- }
-
- controller = (byte)((msg->header.controller & 0x7f) - 1);
-
- /* controller starts with 0 up to (max_adapter - 1) */
- if (controller >= max_adapter)
- {
- dbug(1, dprintf("invalid ctrl"));
- return _BAD_MSG;
- }
-
- a = &adapter[controller];
- plci = NULL;
- if ((msg->header.plci != 0) && (msg->header.plci <= a->max_plci) && !a->adapter_disabled)
- {
- dbug(1, dprintf("plci=%x", msg->header.plci));
- plci = &a->plci[msg->header.plci - 1];
- ncci = GET_WORD(&msg->header.ncci);
- if (plci->Id
- && (plci->appl
- || (plci->State == INC_CON_PENDING)
- || (plci->State == INC_CON_ALERT)
- || (msg->header.command == (_DISCONNECT_I | RESPONSE)))
- && ((ncci == 0)
- || (msg->header.command == (_DISCONNECT_B3_I | RESPONSE))
- || ((ncci < MAX_NCCI + 1) && (a->ncci_plci[ncci] == plci->Id))))
- {
- i = plci->msg_in_read_pos;
- j = plci->msg_in_write_pos;
- if (j >= i)
- {
- if (j + msg->header.length + MSG_IN_OVERHEAD <= MSG_IN_QUEUE_SIZE)
- i += MSG_IN_QUEUE_SIZE - j;
- else
- j = 0;
- }
- else
- {
-
- n = (((CAPI_MSG *)(plci->msg_in_queue))->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc;
-
- if (i > MSG_IN_QUEUE_SIZE - n)
- i = MSG_IN_QUEUE_SIZE - n + 1;
- i -= j;
- }
-
- if (i <= ((msg->header.length + MSG_IN_OVERHEAD + 3) & 0xfffc))
-
- {
- dbug(0, dprintf("Q-FULL1(msg) - len=%d write=%d read=%d wrap=%d free=%d",
- msg->header.length, plci->msg_in_write_pos,
- plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
-
- return _QUEUE_FULL;
- }
- c = false;
- if ((((byte *) msg) < ((byte *)(plci->msg_in_queue)))
- || (((byte *) msg) >= ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
- {
- if (plci->msg_in_write_pos != plci->msg_in_read_pos)
- c = true;
- }
- if (msg->header.command == _DATA_B3_R)
- {
- if (msg->header.length < 20)
- {
- dbug(1, dprintf("DATA_B3 REQ wrong length %d", msg->header.length));
- return _BAD_MSG;
- }
- ncci_ptr = &(a->ncci[ncci]);
- n = ncci_ptr->data_pending;
- l = ncci_ptr->data_ack_pending;
- k = plci->msg_in_read_pos;
- while (k != plci->msg_in_write_pos)
- {
- if (k == plci->msg_in_wrap_pos)
- k = 0;
- if ((((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.command == _DATA_B3_R)
- && (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.ncci == ncci))
- {
- n++;
- if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->info.data_b3_req.Flags & 0x0004)
- l++;
- }
-
- k += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[k]))->header.length +
- MSG_IN_OVERHEAD + 3) & 0xfffc;
-
- }
- if ((n >= MAX_DATA_B3) || (l >= MAX_DATA_ACK))
- {
- dbug(0, dprintf("Q-FULL2(data) - pending=%d/%d ack_pending=%d/%d",
- ncci_ptr->data_pending, n, ncci_ptr->data_ack_pending, l));
-
- return _QUEUE_FULL;
- }
- if (plci->req_in || plci->internal_command)
- {
- if ((((byte *) msg) >= ((byte *)(plci->msg_in_queue)))
- && (((byte *) msg) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
- {
- dbug(0, dprintf("Q-FULL3(requeue)"));
-
- return _QUEUE_FULL;
- }
- c = true;
- }
- }
- else
- {
- if (plci->req_in || plci->internal_command)
- c = true;
- else
- {
- plci->command = msg->header.command;
- plci->number = msg->header.number;
- }
- }
- if (c)
- {
- dbug(1, dprintf("enqueue msg(0x%04x,0x%x,0x%x) - len=%d write=%d read=%d wrap=%d free=%d",
- msg->header.command, plci->req_in, plci->internal_command,
- msg->header.length, plci->msg_in_write_pos,
- plci->msg_in_read_pos, plci->msg_in_wrap_pos, i));
- if (j == 0)
- plci->msg_in_wrap_pos = plci->msg_in_write_pos;
- m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
- for (i = 0; i < msg->header.length; i++)
- ((byte *)(plci->msg_in_queue))[j++] = ((byte *) msg)[i];
- if (m->header.command == _DATA_B3_R)
- {
-
- m->info.data_b3_req.Data = (dword)(long)(TransmitBufferSet(appl, m->info.data_b3_req.Data));
-
- }
-
- j = (j + 3) & 0xfffc;
-
- *((APPL **)(&((byte *)(plci->msg_in_queue))[j])) = appl;
- plci->msg_in_write_pos = j + MSG_IN_OVERHEAD;
- return 0;
- }
- }
- else
- {
- plci = NULL;
- }
- }
- dbug(1, dprintf("com=%x", msg->header.command));
-
- for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
- for (i = 0, ret = _BAD_MSG; i < ARRAY_SIZE(ftable); i++) {
-
- if (ftable[i].command == msg->header.command) {
- /* break loop if the message is correct, otherwise continue scan */
- /* (for example: CONNECT_B3_T90_ACT_RES has two specifications) */
- if (!api_parse(msg->info.b, (word)(msg->header.length - 12), ftable[i].format, msg_parms)) {
- ret = 0;
- break;
- }
- for (j = 0; j < MAX_MSG_PARMS + 1; j++) msg_parms[j].length = 0;
- }
- }
- if (ret) {
- dbug(1, dprintf("BAD_MSG"));
- if (plci) plci->command = 0;
- return ret;
- }
-
-
- c = ftable[i].function(GET_DWORD(&msg->header.controller),
- msg->header.number,
- a,
- plci,
- appl,
- msg_parms);
-
- channel_xmit_extended_xon(plci);
-
- if (c == 1) send_req(plci);
- if (c == 2 && plci) plci->req_in = plci->req_in_start = plci->req_out = 0;
- if (plci && !plci->req_in) plci->command = 0;
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/* api_parse function, check the format of api messages */
-/*------------------------------------------------------------------*/
-
-static word api_parse(byte *msg, word length, byte *format, API_PARSE *parms)
-{
- word i;
- word p;
-
- for (i = 0, p = 0; format[i]; i++) {
- if (parms)
- {
- parms[i].info = &msg[p];
- }
- switch (format[i]) {
- case 'b':
- p += 1;
- break;
- case 'w':
- p += 2;
- break;
- case 'd':
- p += 4;
- break;
- case 's':
- if (msg[p] == 0xff) {
- parms[i].info += 2;
- parms[i].length = msg[p + 1] + (msg[p + 2] << 8);
- p += (parms[i].length + 3);
- }
- else {
- parms[i].length = msg[p];
- p += (parms[i].length + 1);
- }
- break;
- }
-
- if (p > length) return true;
- }
- if (parms) parms[i].info = NULL;
- return false;
-}
-
-static void api_save_msg(API_PARSE *in, byte *format, API_SAVE *out)
-{
- word i, j, n = 0;
- byte *p;
-
- p = out->info;
- for (i = 0; format[i] != '\0'; i++)
- {
- out->parms[i].info = p;
- out->parms[i].length = in[i].length;
- switch (format[i])
- {
- case 'b':
- n = 1;
- break;
- case 'w':
- n = 2;
- break;
- case 'd':
- n = 4;
- break;
- case 's':
- n = in[i].length + 1;
- break;
- }
- for (j = 0; j < n; j++)
- *(p++) = in[i].info[j];
- }
- out->parms[i].info = NULL;
- out->parms[i].length = 0;
-}
-
-static void api_load_msg(API_SAVE *in, API_PARSE *out)
-{
- word i;
-
- i = 0;
- do
- {
- out[i].info = in->parms[i].info;
- out[i].length = in->parms[i].length;
- } while (in->parms[i++].info);
-}
-
-
-/*------------------------------------------------------------------*/
-/* CAPI remove function */
-/*------------------------------------------------------------------*/
-
-word api_remove_start(void)
-{
- word i;
- word j;
-
- if (!remove_started) {
- remove_started = true;
- for (i = 0; i < max_adapter; i++) {
- if (adapter[i].request) {
- for (j = 0; j < adapter[i].max_plci; j++) {
- if (adapter[i].plci[j].Sig.Id) plci_remove(&adapter[i].plci[j]);
- }
- }
- }
- return 1;
- }
- else {
- for (i = 0; i < max_adapter; i++) {
- if (adapter[i].request) {
- for (j = 0; j < adapter[i].max_plci; j++) {
- if (adapter[i].plci[j].Sig.Id) return 1;
- }
- }
- }
- }
- api_remove_complete();
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/* internal command queue */
-/*------------------------------------------------------------------*/
-
-static void init_internal_command_queue(PLCI *plci)
-{
- word i;
-
- dbug(1, dprintf("%s,%d: init_internal_command_queue",
- (char *)(FILE_), __LINE__));
-
- plci->internal_command = 0;
- for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS; i++)
- plci->internal_command_queue[i] = NULL;
-}
-
-
-static void start_internal_command(dword Id, PLCI *plci, t_std_internal_command command_function)
-{
- word i;
-
- dbug(1, dprintf("[%06lx] %s,%d: start_internal_command",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- if (plci->internal_command == 0)
- {
- plci->internal_command_queue[0] = command_function;
- (*command_function)(Id, plci, OK);
- }
- else
- {
- i = 1;
- while (plci->internal_command_queue[i] != NULL)
- i++;
- plci->internal_command_queue[i] = command_function;
- }
-}
-
-
-static void next_internal_command(dword Id, PLCI *plci)
-{
- word i;
-
- dbug(1, dprintf("[%06lx] %s,%d: next_internal_command",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- plci->internal_command = 0;
- plci->internal_command_queue[0] = NULL;
- while (plci->internal_command_queue[1] != NULL)
- {
- for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++)
- plci->internal_command_queue[i] = plci->internal_command_queue[i + 1];
- plci->internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS - 1] = NULL;
- (*(plci->internal_command_queue[0]))(Id, plci, OK);
- if (plci->internal_command != 0)
- return;
- plci->internal_command_queue[0] = NULL;
- }
-}
-
-
-/*------------------------------------------------------------------*/
-/* NCCI allocate/remove function */
-/*------------------------------------------------------------------*/
-
-static dword ncci_mapping_bug = 0;
-
-static word get_ncci(PLCI *plci, byte ch, word force_ncci)
-{
- DIVA_CAPI_ADAPTER *a;
- word ncci, i, j, k;
-
- a = plci->adapter;
- if (!ch || a->ch_ncci[ch])
- {
- ncci_mapping_bug++;
- dbug(1, dprintf("NCCI mapping exists %ld %02x %02x %02x-%02x",
- ncci_mapping_bug, ch, force_ncci, a->ncci_ch[a->ch_ncci[ch]], a->ch_ncci[ch]));
- ncci = ch;
- }
- else
- {
- if (force_ncci)
- ncci = force_ncci;
- else
- {
- if ((ch < MAX_NCCI + 1) && !a->ncci_ch[ch])
- ncci = ch;
- else
- {
- ncci = 1;
- while ((ncci < MAX_NCCI + 1) && a->ncci_ch[ncci])
- ncci++;
- if (ncci == MAX_NCCI + 1)
- {
- ncci_mapping_bug++;
- i = 1;
- do
- {
- j = 1;
- while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i))
- j++;
- k = j;
- if (j < MAX_NCCI + 1)
- {
- do
- {
- j++;
- } while ((j < MAX_NCCI + 1) && (a->ncci_ch[j] != i));
- }
- } while ((i < MAX_NL_CHANNEL + 1) && (j < MAX_NCCI + 1));
- if (i < MAX_NL_CHANNEL + 1)
- {
- dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x %02x-%02x-%02x",
- ncci_mapping_bug, ch, force_ncci, i, k, j));
- }
- else
- {
- dbug(1, dprintf("NCCI mapping overflow %ld %02x %02x",
- ncci_mapping_bug, ch, force_ncci));
- }
- ncci = ch;
- }
- }
- a->ncci_plci[ncci] = plci->Id;
- a->ncci_state[ncci] = IDLE;
- if (!plci->ncci_ring_list)
- plci->ncci_ring_list = ncci;
- else
- a->ncci_next[ncci] = a->ncci_next[plci->ncci_ring_list];
- a->ncci_next[plci->ncci_ring_list] = (byte) ncci;
- }
- a->ncci_ch[ncci] = ch;
- a->ch_ncci[ch] = (byte) ncci;
- dbug(1, dprintf("NCCI mapping established %ld %02x %02x %02x-%02x",
- ncci_mapping_bug, ch, force_ncci, ch, ncci));
- }
- return (ncci);
-}
-
-
-static void ncci_free_receive_buffers(PLCI *plci, word ncci)
-{
- DIVA_CAPI_ADAPTER *a;
- APPL *appl;
- word i, ncci_code;
- dword Id;
-
- a = plci->adapter;
- Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
- if (ncci)
- {
- if (a->ncci_plci[ncci] == plci->Id)
- {
- if (!plci->appl)
- {
- ncci_mapping_bug++;
- dbug(1, dprintf("NCCI mapping appl expected %ld %08lx",
- ncci_mapping_bug, Id));
- }
- else
- {
- appl = plci->appl;
- ncci_code = ncci | (((word) a->Id) << 8);
- for (i = 0; i < appl->MaxBuffer; i++)
- {
- if ((appl->DataNCCI[i] == ncci_code)
- && (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
- {
- appl->DataNCCI[i] = 0;
- }
- }
- }
- }
- }
- else
- {
- for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
- {
- if (a->ncci_plci[ncci] == plci->Id)
- {
- if (!plci->appl)
- {
- ncci_mapping_bug++;
- dbug(1, dprintf("NCCI mapping no appl %ld %08lx",
- ncci_mapping_bug, Id));
- }
- else
- {
- appl = plci->appl;
- ncci_code = ncci | (((word) a->Id) << 8);
- for (i = 0; i < appl->MaxBuffer; i++)
- {
- if ((appl->DataNCCI[i] == ncci_code)
- && (((byte)(appl->DataFlags[i] >> 8)) == plci->Id))
- {
- appl->DataNCCI[i] = 0;
- }
- }
- }
- }
- }
- }
-}
-
-
-static void cleanup_ncci_data(PLCI *plci, word ncci)
-{
- NCCI *ncci_ptr;
-
- if (ncci && (plci->adapter->ncci_plci[ncci] == plci->Id))
- {
- ncci_ptr = &(plci->adapter->ncci[ncci]);
- if (plci->appl)
- {
- while (ncci_ptr->data_pending != 0)
- {
- if (!plci->data_sent || (ncci_ptr->DBuffer[ncci_ptr->data_out].P != plci->data_sent_ptr))
- TransmitBufferFree(plci->appl, ncci_ptr->DBuffer[ncci_ptr->data_out].P);
- (ncci_ptr->data_out)++;
- if (ncci_ptr->data_out == MAX_DATA_B3)
- ncci_ptr->data_out = 0;
- (ncci_ptr->data_pending)--;
- }
- }
- ncci_ptr->data_out = 0;
- ncci_ptr->data_pending = 0;
- ncci_ptr->data_ack_out = 0;
- ncci_ptr->data_ack_pending = 0;
- }
-}
-
-
-static void ncci_remove(PLCI *plci, word ncci, byte preserve_ncci)
-{
- DIVA_CAPI_ADAPTER *a;
- dword Id;
- word i;
-
- a = plci->adapter;
- Id = (((dword) ncci) << 16) | (((word)(plci->Id)) << 8) | a->Id;
- if (!preserve_ncci)
- ncci_free_receive_buffers(plci, ncci);
- if (ncci)
- {
- if (a->ncci_plci[ncci] != plci->Id)
- {
- ncci_mapping_bug++;
- dbug(1, dprintf("NCCI mapping doesn't exist %ld %08lx %02x",
- ncci_mapping_bug, Id, preserve_ncci));
- }
- else
- {
- cleanup_ncci_data(plci, ncci);
- dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
- ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
- a->ch_ncci[a->ncci_ch[ncci]] = 0;
- if (!preserve_ncci)
- {
- a->ncci_ch[ncci] = 0;
- a->ncci_plci[ncci] = 0;
- a->ncci_state[ncci] = IDLE;
- i = plci->ncci_ring_list;
- while ((i != 0) && (a->ncci_next[i] != plci->ncci_ring_list) && (a->ncci_next[i] != ncci))
- i = a->ncci_next[i];
- if ((i != 0) && (a->ncci_next[i] == ncci))
- {
- if (i == ncci)
- plci->ncci_ring_list = 0;
- else if (plci->ncci_ring_list == ncci)
- plci->ncci_ring_list = i;
- a->ncci_next[i] = a->ncci_next[ncci];
- }
- a->ncci_next[ncci] = 0;
- }
- }
- }
- else
- {
- for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
- {
- if (a->ncci_plci[ncci] == plci->Id)
- {
- cleanup_ncci_data(plci, ncci);
- dbug(1, dprintf("NCCI mapping released %ld %08lx %02x %02x-%02x",
- ncci_mapping_bug, Id, preserve_ncci, a->ncci_ch[ncci], ncci));
- a->ch_ncci[a->ncci_ch[ncci]] = 0;
- if (!preserve_ncci)
- {
- a->ncci_ch[ncci] = 0;
- a->ncci_plci[ncci] = 0;
- a->ncci_state[ncci] = IDLE;
- a->ncci_next[ncci] = 0;
- }
- }
- }
- if (!preserve_ncci)
- plci->ncci_ring_list = 0;
- }
-}
-
-
-/*------------------------------------------------------------------*/
-/* PLCI remove function */
-/*------------------------------------------------------------------*/
-
-static void plci_free_msg_in_queue(PLCI *plci)
-{
- word i;
-
- if (plci->appl)
- {
- i = plci->msg_in_read_pos;
- while (i != plci->msg_in_write_pos)
- {
- if (i == plci->msg_in_wrap_pos)
- i = 0;
- if (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.command == _DATA_B3_R)
- {
-
- TransmitBufferFree(plci->appl,
- (byte *)(long)(((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->info.data_b3_req.Data));
-
- }
-
- i += (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[i]))->header.length +
- MSG_IN_OVERHEAD + 3) & 0xfffc;
-
- }
- }
- plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
-}
-
-
-static void plci_remove(PLCI *plci)
-{
-
- if (!plci) {
- dbug(1, dprintf("plci_remove(no plci)"));
- return;
- }
- init_internal_command_queue(plci);
- dbug(1, dprintf("plci_remove(%x,tel=%x)", plci->Id, plci->tel));
- if (plci_remove_check(plci))
- {
- return;
- }
- if (plci->Sig.Id == 0xff)
- {
- dbug(1, dprintf("D-channel X.25 plci->NL.Id:%0x", plci->NL.Id));
- if (plci->NL.Id && !plci->nl_remove_id)
- {
- nl_req_ncci(plci, REMOVE, 0);
- send_req(plci);
- }
- }
- else
- {
- if (!plci->sig_remove_id
- && (plci->Sig.Id
- || (plci->req_in != plci->req_out)
- || (plci->nl_req || plci->sig_req)))
- {
- sig_req(plci, HANGUP, 0);
- send_req(plci);
- }
- }
- ncci_remove(plci, 0, false);
- plci_free_msg_in_queue(plci);
-
- plci->channels = 0;
- plci->appl = NULL;
- if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT))
- plci->State = OUTG_DIS_PENDING;
-}
-
-/*------------------------------------------------------------------*/
-/* translation function for each message */
-/*------------------------------------------------------------------*/
-
-static byte connect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word ch;
- word i;
- word Info;
- byte LinkLayer;
- API_PARSE *ai;
- API_PARSE *bp;
- API_PARSE ai_parms[5];
- word channel = 0;
- dword ch_mask;
- byte m;
- static byte esc_chi[35] = {0x02, 0x18, 0x01};
- static byte lli[2] = {0x01, 0x00};
- byte noCh = 0;
- word dir = 0;
- byte *p_chi = "";
-
- for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-
- dbug(1, dprintf("connect_req(%d)", parms->length));
- Info = _WRONG_IDENTIFIER;
- if (a)
- {
- if (a->adapter_disabled)
- {
- dbug(1, dprintf("adapter disabled"));
- Id = ((word)1 << 8) | a->Id;
- sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
- sendf(appl, _DISCONNECT_I, Id, 0, "w", _L1_ERROR);
- return false;
- }
- Info = _OUT_OF_PLCI;
- if ((i = get_plci(a)))
- {
- Info = 0;
- plci = &a->plci[i - 1];
- plci->appl = appl;
- plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
- /* check 'external controller' bit for codec support */
- if (Id & EXT_CONTROLLER)
- {
- if (AdvCodecSupport(a, plci, appl, 0))
- {
- plci->Id = 0;
- sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
- return 2;
- }
- }
- ai = &parms[9];
- bp = &parms[5];
- ch = 0;
- if (bp->length)LinkLayer = bp->info[3];
- else LinkLayer = 0;
- if (ai->length)
- {
- ch = 0xffff;
- if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
- {
- ch = 0;
- if (ai_parms[0].length)
- {
- ch = GET_WORD(ai_parms[0].info + 1);
- if (ch > 4) ch = 0; /* safety -> ignore ChannelID */
- if (ch == 4) /* explizit CHI in message */
- {
- /* check length of B-CH struct */
- if ((ai_parms[0].info)[3] >= 1)
- {
- if ((ai_parms[0].info)[4] == CHI)
- {
- p_chi = &((ai_parms[0].info)[5]);
- }
- else
- {
- p_chi = &((ai_parms[0].info)[3]);
- }
- if (p_chi[0] > 35) /* check length of channel ID */
- {
- Info = _WRONG_MESSAGE_FORMAT;
- }
- }
- else Info = _WRONG_MESSAGE_FORMAT;
- }
-
- if (ch == 3 && ai_parms[0].length >= 7 && ai_parms[0].length <= 36)
- {
- dir = GET_WORD(ai_parms[0].info + 3);
- ch_mask = 0;
- m = 0x3f;
- for (i = 0; i + 5 <= ai_parms[0].length; i++)
- {
- if (ai_parms[0].info[i + 5] != 0)
- {
- if ((ai_parms[0].info[i + 5] | m) != 0xff)
- Info = _WRONG_MESSAGE_FORMAT;
- else
- {
- if (ch_mask == 0)
- channel = i;
- ch_mask |= 1L << i;
- }
- }
- m = 0;
- }
- if (ch_mask == 0)
- Info = _WRONG_MESSAGE_FORMAT;
- if (!Info)
- {
- if ((ai_parms[0].length == 36) || (ch_mask != ((dword)(1L << channel))))
- {
- esc_chi[0] = (byte)(ai_parms[0].length - 2);
- for (i = 0; i + 5 <= ai_parms[0].length; i++)
- esc_chi[i + 3] = ai_parms[0].info[i + 5];
- }
- else
- esc_chi[0] = 2;
- esc_chi[2] = (byte)channel;
- plci->b_channel = (byte)channel; /* not correct for ETSI ch 17..31 */
- add_p(plci, LLI, lli);
- add_p(plci, ESC, esc_chi);
- plci->State = LOCAL_CONNECT;
- if (!dir) plci->call_dir |= CALL_DIR_FORCE_OUTG_NL; /* dir 0=DTE, 1=DCE */
- }
- }
- }
- }
- else Info = _WRONG_MESSAGE_FORMAT;
- }
-
- dbug(1, dprintf("ch=%x,dir=%x,p_ch=%d", ch, dir, channel));
- plci->command = _CONNECT_R;
- plci->number = Number;
- /* x.31 or D-ch free SAPI in LinkLayer? */
- if (ch == 1 && LinkLayer != 3 && LinkLayer != 12) noCh = true;
- if ((ch == 0 || ch == 2 || noCh || ch == 3 || ch == 4) && !Info)
- {
- /* B-channel used for B3 connections (ch==0), or no B channel */
- /* is used (ch==2) or perm. connection (3) is used do a CALL */
- if (noCh) Info = add_b1(plci, &parms[5], 2, 0); /* no resource */
- else Info = add_b1(plci, &parms[5], ch, 0);
- add_s(plci, OAD, &parms[2]);
- add_s(plci, OSA, &parms[4]);
- add_s(plci, BC, &parms[6]);
- add_s(plci, LLC, &parms[7]);
- add_s(plci, HLC, &parms[8]);
- if (a->Info_Mask[appl->Id - 1] & 0x200)
- {
- /* early B3 connect (CIP mask bit 9) no release after a disc */
- add_p(plci, LLI, "\x01\x01");
- }
- if (GET_WORD(parms[0].info) < 29) {
- add_p(plci, BC, cip_bc[GET_WORD(parms[0].info)][a->u_law]);
- add_p(plci, HLC, cip_hlc[GET_WORD(parms[0].info)]);
- }
- add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(plci, ASSIGN, DSIG_ID);
- }
- else if (ch == 1) {
-
- /* D-Channel used for B3 connections */
- plci->Sig.Id = 0xff;
- Info = 0;
- }
-
- if (!Info && ch != 2 && !noCh) {
- Info = add_b23(plci, &parms[5]);
- if (!Info) {
- if (!(plci->tel && !plci->adv_nl))nl_req_ncci(plci, ASSIGN, 0);
- }
- }
-
- if (!Info)
- {
- if (ch == 0 || ch == 2 || ch == 3 || noCh || ch == 4)
- {
- if (plci->spoofed_msg == SPOOFING_REQUIRED)
- {
- api_save_msg(parms, "wsssssssss", &plci->saved_msg);
- plci->spoofed_msg = CALL_REQ;
- plci->internal_command = BLOCK_PLCI;
- plci->command = 0;
- dbug(1, dprintf("Spoof"));
- send_req(plci);
- return false;
- }
- if (ch == 4)add_p(plci, CHI, p_chi);
- add_s(plci, CPN, &parms[1]);
- add_s(plci, DSA, &parms[3]);
- if (noCh) add_p(plci, ESC, "\x02\x18\xfd"); /* D-channel, no B-L3 */
- add_ai(plci, &parms[9]);
- if (!dir)sig_req(plci, CALL_REQ, 0);
- else
- {
- plci->command = PERM_LIST_REQ;
- plci->appl = appl;
- sig_req(plci, LISTEN_REQ, 0);
- send_req(plci);
- return false;
- }
- }
- send_req(plci);
- return false;
- }
- plci->Id = 0;
- }
- }
- sendf(appl,
- _CONNECT_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- return 2;
-}
-
-static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word i, Info;
- word Reject;
- static byte cau_t[] = {0, 0, 0x90, 0x91, 0xac, 0x9d, 0x86, 0xd8, 0x9b};
- static byte esc_t[] = {0x03, 0x08, 0x00, 0x00};
- API_PARSE *ai;
- API_PARSE ai_parms[5];
- word ch = 0;
-
- if (!plci) {
- dbug(1, dprintf("connect_res(no plci)"));
- return 0; /* no plci, no send */
- }
-
- dbug(1, dprintf("connect_res(State=0x%x)", plci->State));
- for (i = 0; i < 5; i++) ai_parms[i].length = 0;
- ai = &parms[5];
- dbug(1, dprintf("ai->length=%d", ai->length));
-
- if (ai->length)
- {
- if (!api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
- {
- dbug(1, dprintf("ai_parms[0].length=%d/0x%x", ai_parms[0].length, GET_WORD(ai_parms[0].info + 1)));
- ch = 0;
- if (ai_parms[0].length)
- {
- ch = GET_WORD(ai_parms[0].info + 1);
- dbug(1, dprintf("BCH-I=0x%x", ch));
- }
- }
- }
-
- if (plci->State == INC_CON_CONNECTED_ALERT)
- {
- dbug(1, dprintf("Connected Alert Call_Res"));
- if (a->Info_Mask[appl->Id - 1] & 0x200)
- {
- /* early B3 connect (CIP mask bit 9) no release after a disc */
- add_p(plci, LLI, "\x01\x01");
- }
- add_s(plci, CONN_NR, &parms[2]);
- add_s(plci, LLC, &parms[4]);
- add_ai(plci, &parms[5]);
- plci->State = INC_CON_ACCEPT;
- sig_req(plci, CALL_RES, 0);
- return 1;
- }
- else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) {
- __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
- dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
- Reject = GET_WORD(parms[0].info);
- dbug(1, dprintf("Reject=0x%x", Reject));
- if (Reject)
- {
- if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
- {
- if ((Reject & 0xff00) == 0x3400)
- {
- esc_t[2] = ((byte)(Reject & 0x00ff)) | 0x80;
- add_p(plci, ESC, esc_t);
- add_ai(plci, &parms[5]);
- sig_req(plci, REJECT, 0);
- }
- else if (Reject == 1 || Reject >= 9)
- {
- add_ai(plci, &parms[5]);
- sig_req(plci, HANGUP, 0);
- }
- else
- {
- esc_t[2] = cau_t[(Reject&0x000f)];
- add_p(plci, ESC, esc_t);
- add_ai(plci, &parms[5]);
- sig_req(plci, REJECT, 0);
- }
- plci->appl = appl;
- }
- else
- {
- sendf(appl, _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
- }
- }
- else {
- plci->appl = appl;
- if (Id & EXT_CONTROLLER) {
- if (AdvCodecSupport(a, plci, appl, 0)) {
- dbug(1, dprintf("connect_res(error from AdvCodecSupport)"));
- sig_req(plci, HANGUP, 0);
- return 1;
- }
- if (plci->tel == ADV_VOICE && a->AdvCodecPLCI)
- {
- Info = add_b23(plci, &parms[1]);
- if (Info)
- {
- dbug(1, dprintf("connect_res(error from add_b23)"));
- sig_req(plci, HANGUP, 0);
- return 1;
- }
- if (plci->adv_nl)
- {
- nl_req_ncci(plci, ASSIGN, 0);
- }
- }
- }
- else
- {
- plci->tel = 0;
- if (ch != 2)
- {
- Info = add_b23(plci, &parms[1]);
- if (Info)
- {
- dbug(1, dprintf("connect_res(error from add_b23 2)"));
- sig_req(plci, HANGUP, 0);
- return 1;
- }
- }
- nl_req_ncci(plci, ASSIGN, 0);
- }
-
- if (plci->spoofed_msg == SPOOFING_REQUIRED)
- {
- api_save_msg(parms, "wsssss", &plci->saved_msg);
- plci->spoofed_msg = CALL_RES;
- plci->internal_command = BLOCK_PLCI;
- plci->command = 0;
- dbug(1, dprintf("Spoof"));
- }
- else
- {
- add_b1(plci, &parms[1], ch, plci->B1_facilities);
- if (a->Info_Mask[appl->Id - 1] & 0x200)
- {
- /* early B3 connect (CIP mask bit 9) no release after a disc */
- add_p(plci, LLI, "\x01\x01");
- }
- add_s(plci, CONN_NR, &parms[2]);
- add_s(plci, LLC, &parms[4]);
- add_ai(plci, &parms[5]);
- plci->State = INC_CON_ACCEPT;
- sig_req(plci, CALL_RES, 0);
- }
-
- for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
- }
- }
- return 1;
-}
-
-static byte connect_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- dbug(1, dprintf("connect_a_res"));
- return false;
-}
-
-static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info;
- word i;
-
- dbug(1, dprintf("disconnect_req"));
-
- Info = _WRONG_IDENTIFIER;
-
- if (plci)
- {
- if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
- {
- __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
- plci->appl = appl;
- for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
- plci->State = OUTG_DIS_PENDING;
- }
- if (plci->Sig.Id && plci->appl)
- {
- Info = 0;
- if (plci->Sig.Id != 0xff)
- {
- if (plci->State != INC_DIS_PENDING)
- {
- add_ai(plci, &msg[0]);
- sig_req(plci, HANGUP, 0);
- plci->State = OUTG_DIS_PENDING;
- return 1;
- }
- }
- else
- {
- if (plci->NL.Id && !plci->nl_remove_id)
- {
- mixer_remove(plci);
- nl_req_ncci(plci, REMOVE, 0);
- sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
- sendf(appl, _DISCONNECT_I, Id, 0, "w", 0);
- plci->State = INC_DIS_PENDING;
- }
- return 1;
- }
- }
- }
-
- if (!appl) return false;
- sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", Info);
- return false;
-}
-
-static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- dbug(1, dprintf("disconnect_res"));
- if (plci)
- {
- /* clear ind mask bit, just in case of collsion of */
- /* DISCONNECT_IND and CONNECT_RES */
- __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
- ncci_free_receive_buffers(plci, 0);
- if (plci_remove_check(plci))
- {
- return 0;
- }
- if (plci->State == INC_DIS_PENDING
- || plci->State == SUSPENDING) {
- if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) {
- if (plci->State != SUSPENDING) plci->State = IDLE;
- dbug(1, dprintf("chs=%d", plci->channels));
- if (!plci->channels) {
- plci_remove(plci);
- }
- }
- }
- }
- return 0;
-}
-
-static byte listen_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word Info;
- byte i;
-
- dbug(1, dprintf("listen_req(Appl=0x%x)", appl->Id));
-
- Info = _WRONG_IDENTIFIER;
- if (a) {
- Info = 0;
- a->Info_Mask[appl->Id - 1] = GET_DWORD(parms[0].info);
- a->CIP_Mask[appl->Id - 1] = GET_DWORD(parms[1].info);
- dbug(1, dprintf("CIP_MASK=0x%lx", GET_DWORD(parms[1].info)));
- if (a->Info_Mask[appl->Id - 1] & 0x200) { /* early B3 connect provides */
- a->Info_Mask[appl->Id - 1] |= 0x10; /* call progression infos */
- }
-
- /* check if external controller listen and switch listen on or off*/
- if (Id&EXT_CONTROLLER && GET_DWORD(parms[1].info)) {
- if (a->profile.Global_Options & ON_BOARD_CODEC) {
- dummy_plci.State = IDLE;
- a->codec_listen[appl->Id - 1] = &dummy_plci;
- a->TelOAD[0] = (byte)(parms[3].length);
- for (i = 1; parms[3].length >= i && i < 22; i++) {
- a->TelOAD[i] = parms[3].info[i];
- }
- a->TelOAD[i] = 0;
- a->TelOSA[0] = (byte)(parms[4].length);
- for (i = 1; parms[4].length >= i && i < 22; i++) {
- a->TelOSA[i] = parms[4].info[i];
- }
- a->TelOSA[i] = 0;
- }
- else Info = 0x2002; /* wrong controller, codec not supported */
- }
- else{ /* clear listen */
- a->codec_listen[appl->Id - 1] = (PLCI *)0;
- }
- }
- sendf(appl,
- _LISTEN_R | CONFIRM,
- Id,
- Number,
- "w", Info);
-
- if (a) listen_check(a);
- return false;
-}
-
-static byte info_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word i;
- API_PARSE *ai;
- PLCI *rc_plci = NULL;
- API_PARSE ai_parms[5];
- word Info = 0;
-
- dbug(1, dprintf("info_req"));
- for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-
- ai = &msg[1];
-
- if (ai->length)
- {
- if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
- {
- dbug(1, dprintf("AddInfo wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- }
- if (!a) Info = _WRONG_STATE;
-
- if (!Info && plci)
- { /* no fac, with CPN, or KEY */
- rc_plci = plci;
- if (!ai_parms[3].length && plci->State && (msg[0].length || ai_parms[1].length))
- {
- /* overlap sending option */
- dbug(1, dprintf("OvlSnd"));
- add_s(plci, CPN, &msg[0]);
- add_s(plci, KEY, &ai_parms[1]);
- sig_req(plci, INFO_REQ, 0);
- send_req(plci);
- return false;
- }
-
- if (plci->State && ai_parms[2].length)
- {
- /* User_Info option */
- dbug(1, dprintf("UUI"));
- add_s(plci, UUI, &ai_parms[2]);
- sig_req(plci, USER_DATA, 0);
- }
- else if (plci->State && ai_parms[3].length)
- {
- /* Facility option */
- dbug(1, dprintf("FAC"));
- add_s(plci, CPN, &msg[0]);
- add_ai(plci, &msg[1]);
- sig_req(plci, FACILITY_REQ, 0);
- }
- else
- {
- Info = _WRONG_STATE;
- }
- }
- else if ((ai_parms[1].length || ai_parms[2].length || ai_parms[3].length) && !Info)
- {
- /* NCR_Facility option -> send UUI and Keypad too */
- dbug(1, dprintf("NCR_FAC"));
- if ((i = get_plci(a)))
- {
- rc_plci = &a->plci[i - 1];
- appl->NullCREnable = true;
- rc_plci->internal_command = C_NCR_FAC_REQ;
- rc_plci->appl = appl;
- add_p(rc_plci, CAI, "\x01\x80");
- add_p(rc_plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rc_plci, ASSIGN, DSIG_ID);
- send_req(rc_plci);
- }
- else
- {
- Info = _OUT_OF_PLCI;
- }
-
- if (!Info)
- {
- add_s(rc_plci, CPN, &msg[0]);
- add_ai(rc_plci, &msg[1]);
- sig_req(rc_plci, NCR_FACILITY, 0);
- send_req(rc_plci);
- return false;
- /* for application controlled supplementary services */
- }
- }
-
- if (!rc_plci)
- {
- Info = _WRONG_MESSAGE_FORMAT;
- }
-
- if (!Info)
- {
- send_req(rc_plci);
- }
- else
- { /* appl is not assigned to a PLCI or error condition */
- dbug(1, dprintf("localInfoCon"));
- sendf(appl,
- _INFO_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- }
- return false;
-}
-
-static byte info_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- dbug(1, dprintf("info_res"));
- return false;
-}
-
-static byte alert_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info;
- byte ret;
-
- dbug(1, dprintf("alert_req"));
-
- Info = _WRONG_IDENTIFIER;
- ret = false;
- if (plci) {
- Info = _ALERT_IGNORED;
- if (plci->State != INC_CON_ALERT) {
- Info = _WRONG_STATE;
- if (plci->State == INC_CON_PENDING) {
- Info = 0;
- plci->State = INC_CON_ALERT;
- add_ai(plci, &msg[0]);
- sig_req(plci, CALL_ALERT, 0);
- ret = 1;
- }
- }
- }
- sendf(appl,
- _ALERT_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- return ret;
-}
-
-static byte facility_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info = 0;
- word i = 0;
-
- word selector;
- word SSreq;
- long relatedPLCIvalue;
- DIVA_CAPI_ADAPTER *relatedadapter;
- byte *SSparms = "";
- byte RCparms[] = "\x05\x00\x00\x02\x00\x00";
- byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
- API_PARSE *parms;
- API_PARSE ss_parms[11];
- PLCI *rplci;
- byte cai[15];
- dword d;
- API_PARSE dummy;
-
- dbug(1, dprintf("facility_req"));
- for (i = 0; i < 9; i++) ss_parms[i].length = 0;
-
- parms = &msg[1];
-
- if (!a)
- {
- dbug(1, dprintf("wrong Ctrl"));
- Info = _WRONG_IDENTIFIER;
- }
-
- selector = GET_WORD(msg[0].info);
-
- if (!Info)
- {
- switch (selector)
- {
- case SELECTOR_HANDSET:
- Info = AdvCodecSupport(a, plci, appl, HOOK_SUPPORT);
- break;
-
- case SELECTOR_SU_SERV:
- if (!msg[1].length)
- {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- SSreq = GET_WORD(&(msg[1].info[1]));
- PUT_WORD(&RCparms[1], SSreq);
- SSparms = RCparms;
- switch (SSreq)
- {
- case S_GET_SUPPORTED_SERVICES:
- if ((i = get_plci(a)))
- {
- rplci = &a->plci[i - 1];
- rplci->appl = appl;
- add_p(rplci, CAI, "\x01\x80");
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- }
- else
- {
- PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
- SSparms = (byte *)SSstruct;
- break;
- }
- rplci->internal_command = GETSERV_REQ_PEND;
- rplci->number = Number;
- rplci->appl = appl;
- sig_req(rplci, S_SUPPORTED, 0);
- send_req(rplci);
- return false;
- break;
-
- case S_LISTEN:
- if (parms->length == 7)
- {
- if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- }
- else
- {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- a->Notification_Mask[appl->Id - 1] = GET_DWORD(ss_parms[2].info);
- if (a->Notification_Mask[appl->Id - 1] & SMASK_MWI) /* MWI active? */
- {
- if ((i = get_plci(a)))
- {
- rplci = &a->plci[i - 1];
- rplci->appl = appl;
- add_p(rplci, CAI, "\x01\x80");
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- }
- else
- {
- break;
- }
- rplci->internal_command = GET_MWI_STATE;
- rplci->number = Number;
- sig_req(rplci, MWI_POLL, 0);
- send_req(rplci);
- }
- break;
-
- case S_HOLD:
- api_parse(&parms->info[1], (word)parms->length, "ws", ss_parms);
- if (plci && plci->State && plci->SuppState == IDLE)
- {
- plci->SuppState = HOLD_REQUEST;
- plci->command = C_HOLD_REQ;
- add_s(plci, CAI, &ss_parms[1]);
- sig_req(plci, CALL_HOLD, 0);
- send_req(plci);
- return false;
- }
- else Info = 0x3010; /* wrong state */
- break;
- case S_RETRIEVE:
- if (plci && plci->State && plci->SuppState == CALL_HELD)
- {
- if (Id & EXT_CONTROLLER)
- {
- if (AdvCodecSupport(a, plci, appl, 0))
- {
- Info = 0x3010; /* wrong state */
- break;
- }
- }
- else plci->tel = 0;
-
- plci->SuppState = RETRIEVE_REQUEST;
- plci->command = C_RETRIEVE_REQ;
- if (plci->spoofed_msg == SPOOFING_REQUIRED)
- {
- plci->spoofed_msg = CALL_RETRIEVE;
- plci->internal_command = BLOCK_PLCI;
- plci->command = 0;
- dbug(1, dprintf("Spoof"));
- return false;
- }
- else
- {
- sig_req(plci, CALL_RETRIEVE, 0);
- send_req(plci);
- return false;
- }
- }
- else Info = 0x3010; /* wrong state */
- break;
- case S_SUSPEND:
- if (parms->length)
- {
- if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- }
- if (plci && plci->State)
- {
- add_s(plci, CAI, &ss_parms[2]);
- plci->command = SUSPEND_REQ;
- sig_req(plci, SUSPEND, 0);
- plci->State = SUSPENDING;
- send_req(plci);
- }
- else Info = 0x3010; /* wrong state */
- break;
-
- case S_RESUME:
- if (!(i = get_plci(a)))
- {
- Info = _OUT_OF_PLCI;
- break;
- }
- rplci = &a->plci[i - 1];
- rplci->appl = appl;
- rplci->number = Number;
- rplci->tel = 0;
- rplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
- /* check 'external controller' bit for codec support */
- if (Id & EXT_CONTROLLER)
- {
- if (AdvCodecSupport(a, rplci, appl, 0))
- {
- rplci->Id = 0;
- Info = 0x300A;
- break;
- }
- }
- if (parms->length)
- {
- if (api_parse(&parms->info[1], (word)parms->length, "wbs", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- rplci->Id = 0;
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- }
- dummy.length = 0;
- dummy.info = "\x00";
- add_b1(rplci, &dummy, 0, 0);
- if (a->Info_Mask[appl->Id - 1] & 0x200)
- {
- /* early B3 connect (CIP mask bit 9) no release after a disc */
- add_p(rplci, LLI, "\x01\x01");
- }
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- add_s(rplci, CAI, &ss_parms[2]);
- rplci->command = RESUME_REQ;
- sig_req(rplci, RESUME, 0);
- rplci->State = RESUMING;
- send_req(rplci);
- break;
-
- case S_CONF_BEGIN: /* Request */
- case S_CONF_DROP:
- case S_CONF_ISOLATE:
- case S_CONF_REATTACH:
- if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (plci && plci->State && ((plci->SuppState == IDLE) || (plci->SuppState == CALL_HELD)))
- {
- d = GET_DWORD(ss_parms[2].info);
- if (d >= 0x80)
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- plci->ptyState = (byte)SSreq;
- plci->command = 0;
- cai[0] = 2;
- switch (SSreq)
- {
- case S_CONF_BEGIN:
- cai[1] = CONF_BEGIN;
- plci->internal_command = CONF_BEGIN_REQ_PEND;
- break;
- case S_CONF_DROP:
- cai[1] = CONF_DROP;
- plci->internal_command = CONF_DROP_REQ_PEND;
- break;
- case S_CONF_ISOLATE:
- cai[1] = CONF_ISOLATE;
- plci->internal_command = CONF_ISOLATE_REQ_PEND;
- break;
- case S_CONF_REATTACH:
- cai[1] = CONF_REATTACH;
- plci->internal_command = CONF_REATTACH_REQ_PEND;
- break;
- }
- cai[2] = (byte)d; /* Conference Size resp. PartyId */
- add_p(plci, CAI, cai);
- sig_req(plci, S_SERVICE, 0);
- send_req(plci);
- return false;
- }
- else Info = 0x3010; /* wrong state */
- break;
-
- case S_ECT:
- case S_3PTY_BEGIN:
- case S_3PTY_END:
- case S_CONF_ADD:
- if (parms->length == 7)
- {
- if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- }
- else if (parms->length == 8) /* workaround for the T-View-S */
- {
- if (api_parse(&parms->info[1], (word)parms->length, "wbdb", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- }
- else
- {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (!msg[1].length)
- {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (!plci)
- {
- Info = _WRONG_IDENTIFIER;
- break;
- }
- relatedPLCIvalue = GET_DWORD(ss_parms[2].info);
- relatedPLCIvalue &= 0x0000FFFF;
- dbug(1, dprintf("PTY/ECT/addCONF,relPLCI=%lx", relatedPLCIvalue));
- /* controller starts with 0 up to (max_adapter - 1) */
- if (((relatedPLCIvalue & 0x7f) == 0)
- || (MapController((byte)(relatedPLCIvalue & 0x7f)) == 0)
- || (MapController((byte)(relatedPLCIvalue & 0x7f)) > max_adapter))
- {
- if (SSreq == S_3PTY_END)
- {
- dbug(1, dprintf("wrong Controller use 2nd PLCI=PLCI"));
- rplci = plci;
- }
- else
- {
- Info = 0x3010; /* wrong state */
- break;
- }
- }
- else
- {
- relatedadapter = &adapter[MapController((byte)(relatedPLCIvalue & 0x7f)) - 1];
- relatedPLCIvalue >>= 8;
- /* find PLCI PTR*/
- for (i = 0, rplci = NULL; i < relatedadapter->max_plci; i++)
- {
- if (relatedadapter->plci[i].Id == (byte)relatedPLCIvalue)
- {
- rplci = &relatedadapter->plci[i];
- }
- }
- if (!rplci || !relatedPLCIvalue)
- {
- if (SSreq == S_3PTY_END)
- {
- dbug(1, dprintf("use 2nd PLCI=PLCI"));
- rplci = plci;
- }
- else
- {
- Info = 0x3010; /* wrong state */
- break;
- }
- }
- }
-/*
- dbug(1, dprintf("rplci:%x", rplci));
- dbug(1, dprintf("plci:%x", plci));
- dbug(1, dprintf("rplci->ptyState:%x", rplci->ptyState));
- dbug(1, dprintf("plci->ptyState:%x", plci->ptyState));
- dbug(1, dprintf("SSreq:%x", SSreq));
- dbug(1, dprintf("rplci->internal_command:%x", rplci->internal_command));
- dbug(1, dprintf("rplci->appl:%x", rplci->appl));
- dbug(1, dprintf("rplci->Id:%x", rplci->Id));
-*/
- /* send PTY/ECT req, cannot check all states because of US stuff */
- if (!rplci->internal_command && rplci->appl)
- {
- plci->command = 0;
- rplci->relatedPTYPLCI = plci;
- plci->relatedPTYPLCI = rplci;
- rplci->ptyState = (byte)SSreq;
- if (SSreq == S_ECT)
- {
- rplci->internal_command = ECT_REQ_PEND;
- cai[1] = ECT_EXECUTE;
-
- rplci->vswitchstate = 0;
- rplci->vsprot = 0;
- rplci->vsprotdialect = 0;
- plci->vswitchstate = 0;
- plci->vsprot = 0;
- plci->vsprotdialect = 0;
-
- }
- else if (SSreq == S_CONF_ADD)
- {
- rplci->internal_command = CONF_ADD_REQ_PEND;
- cai[1] = CONF_ADD;
- }
- else
- {
- rplci->internal_command = PTY_REQ_PEND;
- cai[1] = (byte)(SSreq - 3);
- }
- rplci->number = Number;
- if (plci != rplci) /* explicit invocation */
- {
- cai[0] = 2;
- cai[2] = plci->Sig.Id;
- dbug(1, dprintf("explicit invocation"));
- }
- else
- {
- dbug(1, dprintf("implicit invocation"));
- cai[0] = 1;
- }
- add_p(rplci, CAI, cai);
- sig_req(rplci, S_SERVICE, 0);
- send_req(rplci);
- return false;
- }
- else
- {
- dbug(0, dprintf("Wrong line"));
- Info = 0x3010; /* wrong state */
- break;
- }
- break;
-
- case S_CALL_DEFLECTION:
- if (api_parse(&parms->info[1], (word)parms->length, "wbwss", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (!plci)
- {
- Info = _WRONG_IDENTIFIER;
- break;
- }
- /* reuse unused screening indicator */
- ss_parms[3].info[3] = (byte)GET_WORD(&(ss_parms[2].info[0]));
- plci->command = 0;
- plci->internal_command = CD_REQ_PEND;
- appl->CDEnable = true;
- cai[0] = 1;
- cai[1] = CALL_DEFLECTION;
- add_p(plci, CAI, cai);
- add_p(plci, CPN, ss_parms[3].info);
- sig_req(plci, S_SERVICE, 0);
- send_req(plci);
- return false;
- break;
-
- case S_CALL_FORWARDING_START:
- if (api_parse(&parms->info[1], (word)parms->length, "wbdwwsss", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
-
- if ((i = get_plci(a)))
- {
- rplci = &a->plci[i - 1];
- rplci->appl = appl;
- add_p(rplci, CAI, "\x01\x80");
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- }
- else
- {
- Info = _OUT_OF_PLCI;
- break;
- }
-
- /* reuse unused screening indicator */
- rplci->internal_command = CF_START_PEND;
- rplci->appl = appl;
- rplci->number = Number;
- appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
- cai[0] = 2;
- cai[1] = 0x70 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
- cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
- add_p(rplci, CAI, cai);
- add_p(rplci, OAD, ss_parms[5].info);
- add_p(rplci, CPN, ss_parms[6].info);
- sig_req(rplci, S_SERVICE, 0);
- send_req(rplci);
- return false;
- break;
-
- case S_INTERROGATE_DIVERSION:
- case S_INTERROGATE_NUMBERS:
- case S_CALL_FORWARDING_STOP:
- case S_CCBS_REQUEST:
- case S_CCBS_DEACTIVATE:
- case S_CCBS_INTERROGATE:
- switch (SSreq)
- {
- case S_INTERROGATE_NUMBERS:
- if (api_parse(&parms->info[1], (word)parms->length, "wbd", ss_parms))
- {
- dbug(0, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- break;
- case S_CCBS_REQUEST:
- case S_CCBS_DEACTIVATE:
- if (api_parse(&parms->info[1], (word)parms->length, "wbdw", ss_parms))
- {
- dbug(0, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- break;
- case S_CCBS_INTERROGATE:
- if (api_parse(&parms->info[1], (word)parms->length, "wbdws", ss_parms))
- {
- dbug(0, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- break;
- default:
- if (api_parse(&parms->info[1], (word)parms->length, "wbdwws", ss_parms))
- {
- dbug(0, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- break;
- }
-
- if (Info) break;
- if ((i = get_plci(a)))
- {
- rplci = &a->plci[i - 1];
- switch (SSreq)
- {
- case S_INTERROGATE_DIVERSION: /* use cai with S_SERVICE below */
- cai[1] = 0x60 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
- rplci->internal_command = INTERR_DIVERSION_REQ_PEND; /* move to rplci if assigned */
- break;
- case S_INTERROGATE_NUMBERS: /* use cai with S_SERVICE below */
- cai[1] = DIVERSION_INTERROGATE_NUM; /* Function */
- rplci->internal_command = INTERR_NUMBERS_REQ_PEND; /* move to rplci if assigned */
- break;
- case S_CALL_FORWARDING_STOP:
- rplci->internal_command = CF_STOP_PEND;
- cai[1] = 0x80 | (byte)GET_WORD(&(ss_parms[3].info[0])); /* Function */
- break;
- case S_CCBS_REQUEST:
- cai[1] = CCBS_REQUEST;
- rplci->internal_command = CCBS_REQUEST_REQ_PEND;
- break;
- case S_CCBS_DEACTIVATE:
- cai[1] = CCBS_DEACTIVATE;
- rplci->internal_command = CCBS_DEACTIVATE_REQ_PEND;
- break;
- case S_CCBS_INTERROGATE:
- cai[1] = CCBS_INTERROGATE;
- rplci->internal_command = CCBS_INTERROGATE_REQ_PEND;
- break;
- default:
- cai[1] = 0;
- break;
- }
- rplci->appl = appl;
- rplci->number = Number;
- add_p(rplci, CAI, "\x01\x80");
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- }
- else
- {
- Info = _OUT_OF_PLCI;
- break;
- }
-
- appl->S_Handle = GET_DWORD(&(ss_parms[2].info[0]));
- switch (SSreq)
- {
- case S_INTERROGATE_NUMBERS:
- cai[0] = 1;
- add_p(rplci, CAI, cai);
- break;
- case S_CCBS_REQUEST:
- case S_CCBS_DEACTIVATE:
- cai[0] = 3;
- PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
- add_p(rplci, CAI, cai);
- break;
- case S_CCBS_INTERROGATE:
- cai[0] = 3;
- PUT_WORD(&cai[2], GET_WORD(&(ss_parms[3].info[0])));
- add_p(rplci, CAI, cai);
- add_p(rplci, OAD, ss_parms[4].info);
- break;
- default:
- cai[0] = 2;
- cai[2] = (byte)GET_WORD(&(ss_parms[4].info[0])); /* Basic Service */
- add_p(rplci, CAI, cai);
- add_p(rplci, OAD, ss_parms[5].info);
- break;
- }
-
- sig_req(rplci, S_SERVICE, 0);
- send_req(rplci);
- return false;
- break;
-
- case S_MWI_ACTIVATE:
- if (api_parse(&parms->info[1], (word)parms->length, "wbwdwwwssss", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (!plci)
- {
- if ((i = get_plci(a)))
- {
- rplci = &a->plci[i - 1];
- rplci->appl = appl;
- rplci->cr_enquiry = true;
- add_p(rplci, CAI, "\x01\x80");
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- }
- else
- {
- Info = _OUT_OF_PLCI;
- break;
- }
- }
- else
- {
- rplci = plci;
- rplci->cr_enquiry = false;
- }
-
- rplci->command = 0;
- rplci->internal_command = MWI_ACTIVATE_REQ_PEND;
- rplci->appl = appl;
- rplci->number = Number;
-
- cai[0] = 13;
- cai[1] = ACTIVATION_MWI; /* Function */
- PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
- PUT_DWORD(&cai[4], GET_DWORD(&(ss_parms[3].info[0]))); /* Number of Messages */
- PUT_WORD(&cai[8], GET_WORD(&(ss_parms[4].info[0]))); /* Message Status */
- PUT_WORD(&cai[10], GET_WORD(&(ss_parms[5].info[0]))); /* Message Reference */
- PUT_WORD(&cai[12], GET_WORD(&(ss_parms[6].info[0]))); /* Invocation Mode */
- add_p(rplci, CAI, cai);
- add_p(rplci, CPN, ss_parms[7].info); /* Receiving User Number */
- add_p(rplci, OAD, ss_parms[8].info); /* Controlling User Number */
- add_p(rplci, OSA, ss_parms[9].info); /* Controlling User Provided Number */
- add_p(rplci, UID, ss_parms[10].info); /* Time */
- sig_req(rplci, S_SERVICE, 0);
- send_req(rplci);
- return false;
-
- case S_MWI_DEACTIVATE:
- if (api_parse(&parms->info[1], (word)parms->length, "wbwwss", ss_parms))
- {
- dbug(1, dprintf("format wrong"));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (!plci)
- {
- if ((i = get_plci(a)))
- {
- rplci = &a->plci[i - 1];
- rplci->appl = appl;
- rplci->cr_enquiry = true;
- add_p(rplci, CAI, "\x01\x80");
- add_p(rplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(rplci, ASSIGN, DSIG_ID);
- send_req(rplci);
- }
- else
- {
- Info = _OUT_OF_PLCI;
- break;
- }
- }
- else
- {
- rplci = plci;
- rplci->cr_enquiry = false;
- }
-
- rplci->command = 0;
- rplci->internal_command = MWI_DEACTIVATE_REQ_PEND;
- rplci->appl = appl;
- rplci->number = Number;
-
- cai[0] = 5;
- cai[1] = DEACTIVATION_MWI; /* Function */
- PUT_WORD(&cai[2], GET_WORD(&(ss_parms[2].info[0]))); /* Basic Service */
- PUT_WORD(&cai[4], GET_WORD(&(ss_parms[3].info[0]))); /* Invocation Mode */
- add_p(rplci, CAI, cai);
- add_p(rplci, CPN, ss_parms[4].info); /* Receiving User Number */
- add_p(rplci, OAD, ss_parms[5].info); /* Controlling User Number */
- sig_req(rplci, S_SERVICE, 0);
- send_req(rplci);
- return false;
-
- default:
- Info = 0x300E; /* not supported */
- break;
- }
- break; /* case SELECTOR_SU_SERV: end */
-
-
- case SELECTOR_DTMF:
- return (dtmf_request(Id, Number, a, plci, appl, msg));
-
-
-
- case SELECTOR_LINE_INTERCONNECT:
- return (mixer_request(Id, Number, a, plci, appl, msg));
-
-
-
- case PRIV_SELECTOR_ECHO_CANCELLER:
- appl->appl_flags |= APPL_FLAG_PRIV_EC_SPEC;
- return (ec_request(Id, Number, a, plci, appl, msg));
-
- case SELECTOR_ECHO_CANCELLER:
- appl->appl_flags &= ~APPL_FLAG_PRIV_EC_SPEC;
- return (ec_request(Id, Number, a, plci, appl, msg));
-
-
- case SELECTOR_V42BIS:
- default:
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- } /* end of switch (selector) */
- }
-
- dbug(1, dprintf("SendFacRc"));
- sendf(appl,
- _FACILITY_R | CONFIRM,
- Id,
- Number,
- "wws", Info, selector, SSparms);
- return false;
-}
-
-static byte facility_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- dbug(1, dprintf("facility_res"));
- return false;
-}
-
-static byte connect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word Info = 0;
- byte req;
- byte len;
- word w;
- word fax_control_bits, fax_feature_bits, fax_info_change;
- API_PARSE *ncpi;
- byte pvc[2];
-
- API_PARSE fax_parms[9];
- word i;
-
-
- dbug(1, dprintf("connect_b3_req"));
- if (plci)
- {
- if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING)
- || (plci->State == INC_DIS_PENDING) || (plci->SuppState != IDLE))
- {
- Info = _WRONG_STATE;
- }
- else
- {
- /* local reply if assign unsuccessful
- or B3 protocol allows only one layer 3 connection
- and already connected
- or B2 protocol not any LAPD
- and connect_b3_req contradicts originate/answer direction */
- if (!plci->NL.Id
- || (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
- && ((plci->channels != 0)
- || (((plci->B2_prot != B2_SDLC) && (plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL))
- && ((plci->call_dir & CALL_DIR_ANSWER) && !(plci->call_dir & CALL_DIR_FORCE_OUTG_NL))))))
- {
- dbug(1, dprintf("B3 already connected=%d or no NL.Id=0x%x, dir=%d sstate=0x%x",
- plci->channels, plci->NL.Id, plci->call_dir, plci->SuppState));
- Info = _WRONG_STATE;
- sendf(appl,
- _CONNECT_B3_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- return false;
- }
- plci->requested_options_conn = 0;
-
- req = N_CONNECT;
- ncpi = &parms[0];
- if (plci->B3_prot == 2 || plci->B3_prot == 3)
- {
- if (ncpi->length > 2)
- {
- /* check for PVC */
- if (ncpi->info[2] || ncpi->info[3])
- {
- pvc[0] = ncpi->info[3];
- pvc[1] = ncpi->info[2];
- add_d(plci, 2, pvc);
- req = N_RESET;
- }
- else
- {
- if (ncpi->info[1] & 1) req = N_CONNECT | N_D_BIT;
- add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
- }
- }
- }
- else if (plci->B3_prot == 5)
- {
- if (plci->NL.Id && !plci->nl_remove_id)
- {
- fax_control_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low);
- fax_feature_bits = GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low);
- if (!(fax_control_bits & T30_CONTROL_BIT_MORE_DOCUMENTS)
- || (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS))
- {
- len = offsetof(T30_INFO, universal_6);
- fax_info_change = false;
- if (ncpi->length >= 4)
- {
- w = GET_WORD(&ncpi->info[3]);
- if ((w & 0x0001) != ((word)(((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & 0x0001)))
- {
- ((T30_INFO *)(plci->fax_connect_info_buffer))->resolution =
- (byte)((((T30_INFO *)(plci->fax_connect_info_buffer))->resolution & ~T30_RESOLUTION_R8_0770_OR_200) |
- ((w & 0x0001) ? T30_RESOLUTION_R8_0770_OR_200 : 0));
- fax_info_change = true;
- }
- fax_control_bits &= ~(T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
- if (w & 0x0002) /* Fax-polling request */
- fax_control_bits |= T30_CONTROL_BIT_REQUEST_POLLING;
- if ((w & 0x0004) /* Request to send / poll another document */
- && (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS))
- {
- fax_control_bits |= T30_CONTROL_BIT_MORE_DOCUMENTS;
- }
- if (ncpi->length >= 6)
- {
- w = GET_WORD(&ncpi->info[5]);
- if (((byte) w) != ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format)
- {
- ((T30_INFO *)(plci->fax_connect_info_buffer))->data_format = (byte) w;
- fax_info_change = true;
- }
-
- if ((a->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
- && (GET_WORD(&ncpi->info[5]) & 0x8000)) /* Private SEP/SUB/PWD enable */
- {
- plci->requested_options_conn |= (1L << PRIVATE_FAX_SUB_SEP_PWD);
- }
- if ((a->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
- && (GET_WORD(&ncpi->info[5]) & 0x4000)) /* Private non-standard facilities enable */
- {
- plci->requested_options_conn |= (1L << PRIVATE_FAX_NONSTANDARD);
- }
- fax_control_bits &= ~(T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_SEL_POLLING |
- T30_CONTROL_BIT_ACCEPT_PASSWORD);
- if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
- & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
- {
- if (api_parse(&ncpi->info[1], ncpi->length, "wwwwsss", fax_parms))
- Info = _WRONG_MESSAGE_FORMAT;
- else
- {
- if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
- & (1L << PRIVATE_FAX_SUB_SEP_PWD))
- {
- fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
- if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
- fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
- }
- w = fax_parms[4].length;
- if (w > 20)
- w = 20;
- ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = (byte) w;
- for (i = 0; i < w; i++)
- ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id[i] = fax_parms[4].info[1 + i];
- ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
- len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
- w = fax_parms[5].length;
- if (w > 20)
- w = 20;
- plci->fax_connect_info_buffer[len++] = (byte) w;
- for (i = 0; i < w; i++)
- plci->fax_connect_info_buffer[len++] = fax_parms[5].info[1 + i];
- w = fax_parms[6].length;
- if (w > 20)
- w = 20;
- plci->fax_connect_info_buffer[len++] = (byte) w;
- for (i = 0; i < w; i++)
- plci->fax_connect_info_buffer[len++] = fax_parms[6].info[1 + i];
- if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[appl->Id - 1])
- & (1L << PRIVATE_FAX_NONSTANDARD))
- {
- if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
- {
- dbug(1, dprintf("non-standard facilities info missing or wrong format"));
- plci->fax_connect_info_buffer[len++] = 0;
- }
- else
- {
- if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
- plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
- plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
- for (i = 0; i < fax_parms[7].length; i++)
- plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
- }
- }
- }
- }
- else
- {
- len = offsetof(T30_INFO, universal_6);
- }
- fax_info_change = true;
-
- }
- if (fax_control_bits != GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low))
- {
- PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low, fax_control_bits);
- fax_info_change = true;
- }
- }
- if (Info == GOOD)
- {
- plci->fax_connect_info_length = len;
- if (fax_info_change)
- {
- if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
- {
- start_internal_command(Id, plci, fax_connect_info_command);
- return false;
- }
- else
- {
- start_internal_command(Id, plci, fax_adjust_b23_command);
- return false;
- }
- }
- }
- }
- else Info = _WRONG_STATE;
- }
- else Info = _WRONG_STATE;
- }
-
- else if (plci->B3_prot == B3_RTP)
- {
- plci->internal_req_buffer[0] = ncpi->length + 1;
- plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
- for (w = 0; w < ncpi->length; w++)
- plci->internal_req_buffer[2 + w] = ncpi->info[1 + w];
- start_internal_command(Id, plci, rtp_connect_b3_req_command);
- return false;
- }
-
- if (!Info)
- {
- nl_req_ncci(plci, req, 0);
- return 1;
- }
- }
- }
- else Info = _WRONG_IDENTIFIER;
-
- sendf(appl,
- _CONNECT_B3_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- return false;
-}
-
-static byte connect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word ncci;
- API_PARSE *ncpi;
- byte req;
-
- word w;
-
-
- API_PARSE fax_parms[9];
- word i;
- byte len;
-
-
- dbug(1, dprintf("connect_b3_res"));
-
- ncci = (word)(Id >> 16);
- if (plci && ncci) {
- if (a->ncci_state[ncci] == INC_CON_PENDING) {
- if (GET_WORD(&parms[0].info[0]) != 0)
- {
- a->ncci_state[ncci] = OUTG_REJ_PENDING;
- channel_request_xon(plci, a->ncci_ch[ncci]);
- channel_xmit_xon(plci);
- cleanup_ncci_data(plci, ncci);
- nl_req_ncci(plci, N_DISC, (byte)ncci);
- return 1;
- }
- a->ncci_state[ncci] = INC_ACT_PENDING;
-
- req = N_CONNECT_ACK;
- ncpi = &parms[1];
- if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
- {
-
- if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
- & (1L << PRIVATE_FAX_NONSTANDARD))
- {
- if (((plci->B3_prot == 4) || (plci->B3_prot == 5))
- && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
- && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
- {
- len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
- if (plci->fax_connect_info_length < len)
- {
- ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
- ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
- }
- if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
- {
- dbug(1, dprintf("non-standard facilities info missing or wrong format"));
- }
- else
- {
- if (plci->fax_connect_info_length <= len)
- plci->fax_connect_info_buffer[len] = 0;
- len += 1 + plci->fax_connect_info_buffer[len];
- if (plci->fax_connect_info_length <= len)
- plci->fax_connect_info_buffer[len] = 0;
- len += 1 + plci->fax_connect_info_buffer[len];
- if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
- plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
- plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
- for (i = 0; i < fax_parms[7].length; i++)
- plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
- }
- plci->fax_connect_info_length = len;
- ((T30_INFO *)(plci->fax_connect_info_buffer))->code = 0;
- start_internal_command(Id, plci, fax_connect_ack_command);
- return false;
- }
- }
-
- nl_req_ncci(plci, req, (byte)ncci);
- if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- if (plci->B3_prot == 4)
- sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- else
- sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
- }
-
- else if (plci->B3_prot == B3_RTP)
- {
- plci->internal_req_buffer[0] = ncpi->length + 1;
- plci->internal_req_buffer[1] = UDATA_REQUEST_RTP_RECONFIGURE;
- for (w = 0; w < ncpi->length; w++)
- plci->internal_req_buffer[2 + w] = ncpi->info[1+w];
- start_internal_command(Id, plci, rtp_connect_b3_res_command);
- return false;
- }
-
- else
- {
- if (ncpi->length > 2) {
- if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
- add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
- }
- nl_req_ncci(plci, req, (byte)ncci);
- sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- if (plci->adjust_b_restore)
- {
- plci->adjust_b_restore = false;
- start_internal_command(Id, plci, adjust_b_restore);
- }
- }
- return 1;
- }
- }
- return false;
-}
-
-static byte connect_b3_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word ncci;
-
- ncci = (word)(Id >> 16);
- dbug(1, dprintf("connect_b3_a_res(ncci=0x%x)", ncci));
-
- if (plci && ncci && (plci->State != IDLE) && (plci->State != INC_DIS_PENDING)
- && (plci->State != OUTG_DIS_PENDING))
- {
- if (a->ncci_state[ncci] == INC_ACT_PENDING) {
- a->ncci_state[ncci] = CONNECTED;
- if (plci->State != INC_CON_CONNECTED_ALERT) plci->State = CONNECTED;
- channel_request_xon(plci, a->ncci_ch[ncci]);
- channel_xmit_xon(plci);
- }
- }
- return false;
-}
-
-static byte disconnect_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word Info;
- word ncci;
- API_PARSE *ncpi;
-
- dbug(1, dprintf("disconnect_b3_req"));
-
- Info = _WRONG_IDENTIFIER;
- ncci = (word)(Id >> 16);
- if (plci && ncci)
- {
- Info = _WRONG_STATE;
- if ((a->ncci_state[ncci] == CONNECTED)
- || (a->ncci_state[ncci] == OUTG_CON_PENDING)
- || (a->ncci_state[ncci] == INC_CON_PENDING)
- || (a->ncci_state[ncci] == INC_ACT_PENDING))
- {
- a->ncci_state[ncci] = OUTG_DIS_PENDING;
- channel_request_xon(plci, a->ncci_ch[ncci]);
- channel_xmit_xon(plci);
-
- if (a->ncci[ncci].data_pending
- && ((plci->B3_prot == B3_TRANSPARENT)
- || (plci->B3_prot == B3_T30)
- || (plci->B3_prot == B3_T30_WITH_EXTENSIONS)))
- {
- plci->send_disc = (byte)ncci;
- plci->command = 0;
- return false;
- }
- else
- {
- cleanup_ncci_data(plci, ncci);
-
- if (plci->B3_prot == 2 || plci->B3_prot == 3)
- {
- ncpi = &parms[0];
- if (ncpi->length > 3)
- {
- add_d(plci, (word)(ncpi->length - 3), (byte *)&(ncpi->info[4]));
- }
- }
- nl_req_ncci(plci, N_DISC, (byte)ncci);
- }
- return 1;
- }
- }
- sendf(appl,
- _DISCONNECT_B3_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- return false;
-}
-
-static byte disconnect_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word ncci;
- word i;
-
- ncci = (word)(Id >> 16);
- dbug(1, dprintf("disconnect_b3_res(ncci=0x%x", ncci));
- if (plci && ncci) {
- plci->requested_options_conn = 0;
- plci->fax_connect_info_length = 0;
- plci->ncpi_state = 0x00;
- if (((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE))
- && ((plci->B2_prot != B2_LAPD) && (plci->B2_prot != B2_LAPD_FREE_SAPI_SEL)))
- {
- plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
- }
- for (i = 0; i < MAX_CHANNELS_PER_PLCI && plci->inc_dis_ncci_table[i] != (byte)ncci; i++);
- if (i < MAX_CHANNELS_PER_PLCI) {
- if (plci->channels)plci->channels--;
- for (; i < MAX_CHANNELS_PER_PLCI - 1; i++) plci->inc_dis_ncci_table[i] = plci->inc_dis_ncci_table[i + 1];
- plci->inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI - 1] = 0;
-
- ncci_free_receive_buffers(plci, ncci);
-
- if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
- if (plci->State == SUSPENDING) {
- sendf(plci->appl,
- _FACILITY_I,
- Id & 0xffffL,
- 0,
- "ws", (word)3, "\x03\x04\x00\x00");
- sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
- }
- plci_remove(plci);
- plci->State = IDLE;
- }
- }
- else
- {
- if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
- && ((plci->B3_prot == 4) || (plci->B3_prot == 5))
- && (a->ncci_state[ncci] == INC_DIS_PENDING))
- {
- ncci_free_receive_buffers(plci, ncci);
-
- nl_req_ncci(plci, N_EDATA, (byte)ncci);
-
- plci->adapter->ncci_state[ncci] = IDLE;
- start_internal_command(Id, plci, fax_disconnect_command);
- return 1;
- }
- }
- }
- return false;
-}
-
-static byte data_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- NCCI *ncci_ptr;
- DATA_B3_DESC *data;
- word Info;
- word ncci;
- word i;
-
- dbug(1, dprintf("data_b3_req"));
-
- Info = _WRONG_IDENTIFIER;
- ncci = (word)(Id >> 16);
- dbug(1, dprintf("ncci=0x%x, plci=0x%x", ncci, plci));
-
- if (plci && ncci)
- {
- Info = _WRONG_STATE;
- if ((a->ncci_state[ncci] == CONNECTED)
- || (a->ncci_state[ncci] == INC_ACT_PENDING))
- {
- /* queue data */
- ncci_ptr = &(a->ncci[ncci]);
- i = ncci_ptr->data_out + ncci_ptr->data_pending;
- if (i >= MAX_DATA_B3)
- i -= MAX_DATA_B3;
- data = &(ncci_ptr->DBuffer[i]);
- data->Number = Number;
- if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
- && (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
- {
-
- data->P = (byte *)(long)(*((dword *)(parms[0].info)));
-
- }
- else
- data->P = TransmitBufferSet(appl, *(dword *)parms[0].info);
- data->Length = GET_WORD(parms[1].info);
- data->Handle = GET_WORD(parms[2].info);
- data->Flags = GET_WORD(parms[3].info);
- (ncci_ptr->data_pending)++;
-
- /* check for delivery confirmation */
- if (data->Flags & 0x0004)
- {
- i = ncci_ptr->data_ack_out + ncci_ptr->data_ack_pending;
- if (i >= MAX_DATA_ACK)
- i -= MAX_DATA_ACK;
- ncci_ptr->DataAck[i].Number = data->Number;
- ncci_ptr->DataAck[i].Handle = data->Handle;
- (ncci_ptr->data_ack_pending)++;
- }
-
- send_data(plci);
- return false;
- }
- }
- if (appl)
- {
- if (plci)
- {
- if ((((byte *)(parms[0].info)) >= ((byte *)(plci->msg_in_queue)))
- && (((byte *)(parms[0].info)) < ((byte *)(plci->msg_in_queue)) + sizeof(plci->msg_in_queue)))
- {
-
- TransmitBufferFree(appl, (byte *)(long)(*((dword *)(parms[0].info))));
-
- }
- }
- sendf(appl,
- _DATA_B3_R | CONFIRM,
- Id,
- Number,
- "ww", GET_WORD(parms[2].info), Info);
- }
- return false;
-}
-
-static byte data_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word n;
- word ncci;
- word NCCIcode;
-
- dbug(1, dprintf("data_b3_res"));
-
- ncci = (word)(Id >> 16);
- if (plci && ncci) {
- n = GET_WORD(parms[0].info);
- dbug(1, dprintf("free(%d)", n));
- NCCIcode = ncci | (((word) a->Id) << 8);
- if (n < appl->MaxBuffer &&
- appl->DataNCCI[n] == NCCIcode &&
- (byte)(appl->DataFlags[n] >> 8) == plci->Id) {
- dbug(1, dprintf("found"));
- appl->DataNCCI[n] = 0;
-
- if (channel_can_xon(plci, a->ncci_ch[ncci])) {
- channel_request_xon(plci, a->ncci_ch[ncci]);
- }
- channel_xmit_xon(plci);
-
- if (appl->DataFlags[n] & 4) {
- nl_req_ncci(plci, N_DATA_ACK, (byte)ncci);
- return 1;
- }
- }
- }
- return false;
-}
-
-static byte reset_b3_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word Info;
- word ncci;
-
- dbug(1, dprintf("reset_b3_req"));
-
- Info = _WRONG_IDENTIFIER;
- ncci = (word)(Id >> 16);
- if (plci && ncci)
- {
- Info = _WRONG_STATE;
- switch (plci->B3_prot)
- {
- case B3_ISO8208:
- case B3_X25_DCE:
- if (a->ncci_state[ncci] == CONNECTED)
- {
- nl_req_ncci(plci, N_RESET, (byte)ncci);
- send_req(plci);
- Info = GOOD;
- }
- break;
- case B3_TRANSPARENT:
- if (a->ncci_state[ncci] == CONNECTED)
- {
- start_internal_command(Id, plci, reset_b3_command);
- Info = GOOD;
- }
- break;
- }
- }
- /* reset_b3 must result in a reset_b3_con & reset_b3_Ind */
- sendf(appl,
- _RESET_B3_R | CONFIRM,
- Id,
- Number,
- "w", Info);
- return false;
-}
-
-static byte reset_b3_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word ncci;
-
- dbug(1, dprintf("reset_b3_res"));
-
- ncci = (word)(Id >> 16);
- if (plci && ncci) {
- switch (plci->B3_prot)
- {
- case B3_ISO8208:
- case B3_X25_DCE:
- if (a->ncci_state[ncci] == INC_RES_PENDING)
- {
- a->ncci_state[ncci] = CONNECTED;
- nl_req_ncci(plci, N_RESET_ACK, (byte)ncci);
- return true;
- }
- break;
- }
- }
- return false;
-}
-
-static byte connect_b3_t90_a_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word ncci;
- API_PARSE *ncpi;
- byte req;
-
- dbug(1, dprintf("connect_b3_t90_a_res"));
-
- ncci = (word)(Id >> 16);
- if (plci && ncci) {
- if (a->ncci_state[ncci] == INC_ACT_PENDING) {
- a->ncci_state[ncci] = CONNECTED;
- }
- else if (a->ncci_state[ncci] == INC_CON_PENDING) {
- a->ncci_state[ncci] = CONNECTED;
-
- req = N_CONNECT_ACK;
-
- /* parms[0]==0 for CAPI original message definition! */
- if (parms[0].info) {
- ncpi = &parms[1];
- if (ncpi->length > 2) {
- if (ncpi->info[1] & 1) req = N_CONNECT_ACK | N_D_BIT;
- add_d(plci, (word)(ncpi->length - 3), &ncpi->info[4]);
- }
- }
- nl_req_ncci(plci, req, (byte)ncci);
- return 1;
- }
- }
- return false;
-}
-
-
-static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info = 0;
- word i;
- byte tel;
- API_PARSE bp_parms[7];
-
- if (!plci || !msg)
- {
- Info = _WRONG_IDENTIFIER;
- }
- else
- {
- dbug(1, dprintf("select_b_req[%d],PLCI=0x%x,Tel=0x%x,NL=0x%x,appl=0x%x,sstate=0x%x",
- msg->length, plci->Id, plci->tel, plci->NL.Id, plci->appl, plci->SuppState));
- dbug(1, dprintf("PlciState=0x%x", plci->State));
- for (i = 0; i < 7; i++) bp_parms[i].length = 0;
-
- /* check if no channel is open, no B3 connected only */
- if ((plci->State == IDLE) || (plci->State == OUTG_DIS_PENDING) || (plci->State == INC_DIS_PENDING)
- || (plci->SuppState != IDLE) || plci->channels || plci->nl_remove_id)
- {
- Info = _WRONG_STATE;
- }
- /* check message format and fill bp_parms pointer */
- else if (msg->length && api_parse(&msg->info[1], (word)msg->length, "wwwsss", bp_parms))
- {
- Info = _WRONG_MESSAGE_FORMAT;
- }
- else
- {
- if ((plci->State == INC_CON_PENDING) || (plci->State == INC_CON_ALERT)) /* send alert tone inband to the network, */
- { /* e.g. Qsig or RBS or Cornet-N or xess PRI */
- if (Id & EXT_CONTROLLER)
- {
- sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", 0x2002); /* wrong controller */
- return 0;
- }
- plci->State = INC_CON_CONNECTED_ALERT;
- plci->appl = appl;
- __clear_bit(appl->Id - 1, plci->c_ind_mask_table);
- dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
- /* disconnect the other appls its quasi a connect */
- for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED);
- }
-
- api_save_msg(msg, "s", &plci->saved_msg);
- tel = plci->tel;
- if (Id & EXT_CONTROLLER)
- {
- if (tel) /* external controller in use by this PLCI */
- {
- if (a->AdvSignalAppl && a->AdvSignalAppl != appl)
- {
- dbug(1, dprintf("Ext_Ctrl in use 1"));
- Info = _WRONG_STATE;
- }
- }
- else /* external controller NOT in use by this PLCI ? */
- {
- if (a->AdvSignalPLCI)
- {
- dbug(1, dprintf("Ext_Ctrl in use 2"));
- Info = _WRONG_STATE;
- }
- else /* activate the codec */
- {
- dbug(1, dprintf("Ext_Ctrl start"));
- if (AdvCodecSupport(a, plci, appl, 0))
- {
- dbug(1, dprintf("Error in codec procedures"));
- Info = _WRONG_STATE;
- }
- else if (plci->spoofed_msg == SPOOFING_REQUIRED) /* wait until codec is active */
- {
- plci->spoofed_msg = AWAITING_SELECT_B;
- plci->internal_command = BLOCK_PLCI; /* lock other commands */
- plci->command = 0;
- dbug(1, dprintf("continue if codec loaded"));
- return false;
- }
- }
- }
- }
- else /* external controller bit is OFF */
- {
- if (tel) /* external controller in use, need to switch off */
- {
- if (a->AdvSignalAppl == appl)
- {
- CodecIdCheck(a, plci);
- plci->tel = 0;
- plci->adv_nl = 0;
- dbug(1, dprintf("Ext_Ctrl disable"));
- }
- else
- {
- dbug(1, dprintf("Ext_Ctrl not requested"));
- }
- }
- }
- if (!Info)
- {
- if (plci->call_dir & CALL_DIR_OUT)
- plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
- else if (plci->call_dir & CALL_DIR_IN)
- plci->call_dir = CALL_DIR_IN | CALL_DIR_ANSWER;
- start_internal_command(Id, plci, select_b_command);
- return false;
- }
- }
- }
- sendf(appl, _SELECT_B_REQ | CONFIRM, Id, Number, "w", Info);
- return false;
-}
-
-static byte manufacturer_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *parms)
-{
- word command;
- word i;
- word ncci;
- API_PARSE *m;
- API_PARSE m_parms[5];
- word codec;
- byte req;
- byte ch;
- byte dir;
- static byte chi[2] = {0x01, 0x00};
- static byte lli[2] = {0x01, 0x00};
- static byte codec_cai[2] = {0x01, 0x01};
- static byte null_msg = {0};
- static API_PARSE null_parms = { 0, &null_msg };
- PLCI *v_plci;
- word Info = 0;
-
- dbug(1, dprintf("manufacturer_req"));
- for (i = 0; i < 5; i++) m_parms[i].length = 0;
-
- if (GET_DWORD(parms[0].info) != _DI_MANU_ID) {
- Info = _WRONG_MESSAGE_FORMAT;
- }
- command = GET_WORD(parms[1].info);
- m = &parms[2];
- if (!Info)
- {
- switch (command) {
- case _DI_ASSIGN_PLCI:
- if (api_parse(&m->info[1], (word)m->length, "wbbs", m_parms)) {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- codec = GET_WORD(m_parms[0].info);
- ch = m_parms[1].info[0];
- dir = m_parms[2].info[0];
- if ((i = get_plci(a))) {
- plci = &a->plci[i - 1];
- plci->appl = appl;
- plci->command = _MANUFACTURER_R;
- plci->m_command = command;
- plci->number = Number;
- plci->State = LOCAL_CONNECT;
- Id = (((word)plci->Id << 8) | plci->adapter->Id | 0x80);
- dbug(1, dprintf("ManCMD,plci=0x%x", Id));
-
- if ((ch == 1 || ch == 2) && (dir <= 2)) {
- chi[1] = (byte)(0x80 | ch);
- lli[1] = 0;
- plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
- switch (codec)
- {
- case 0:
- Info = add_b1(plci, &m_parms[3], 0, 0);
- break;
- case 1:
- add_p(plci, CAI, codec_cai);
- break;
- /* manual 'swich on' to the codec support without signalling */
- /* first 'assign plci' with this function, then use */
- case 2:
- if (AdvCodecSupport(a, plci, appl, 0)) {
- Info = _RESOURCE_ERROR;
- }
- else {
- Info = add_b1(plci, &null_parms, 0, B1_FACILITY_LOCAL);
- lli[1] = 0x10; /* local call codec stream */
- }
- break;
- }
-
- plci->State = LOCAL_CONNECT;
- plci->manufacturer = true;
- plci->command = _MANUFACTURER_R;
- plci->m_command = command;
- plci->number = Number;
-
- if (!Info)
- {
- add_p(plci, LLI, lli);
- add_p(plci, CHI, chi);
- add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(plci, ASSIGN, DSIG_ID);
-
- if (!codec)
- {
- Info = add_b23(plci, &m_parms[3]);
- if (!Info)
- {
- nl_req_ncci(plci, ASSIGN, 0);
- send_req(plci);
- }
- }
- if (!Info)
- {
- dbug(1, dprintf("dir=0x%x,spoof=0x%x", dir, plci->spoofed_msg));
- if (plci->spoofed_msg == SPOOFING_REQUIRED)
- {
- api_save_msg(m_parms, "wbbs", &plci->saved_msg);
- plci->spoofed_msg = AWAITING_MANUF_CON;
- plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
- plci->command = 0;
- send_req(plci);
- return false;
- }
- if (dir == 1) {
- sig_req(plci, CALL_REQ, 0);
- }
- else if (!dir) {
- sig_req(plci, LISTEN_REQ, 0);
- }
- send_req(plci);
- }
- else
- {
- sendf(appl,
- _MANUFACTURER_R | CONFIRM,
- Id,
- Number,
- "dww", _DI_MANU_ID, command, Info);
- return 2;
- }
- }
- }
- }
- else Info = _OUT_OF_PLCI;
- break;
-
- case _DI_IDI_CTRL:
- if (!plci)
- {
- Info = _WRONG_IDENTIFIER;
- break;
- }
- if (api_parse(&m->info[1], (word)m->length, "bs", m_parms)) {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- req = m_parms[0].info[0];
- plci->command = _MANUFACTURER_R;
- plci->m_command = command;
- plci->number = Number;
- if (req == CALL_REQ)
- {
- plci->b_channel = getChannel(&m_parms[1]);
- mixer_set_bchannel_id_esc(plci, plci->b_channel);
- if (plci->spoofed_msg == SPOOFING_REQUIRED)
- {
- plci->spoofed_msg = CALL_REQ | AWAITING_MANUF_CON;
- plci->internal_command = BLOCK_PLCI; /* reject other req meanwhile */
- plci->command = 0;
- break;
- }
- }
- else if (req == LAW_REQ)
- {
- plci->cr_enquiry = true;
- }
- add_ss(plci, FTY, &m_parms[1]);
- sig_req(plci, req, 0);
- send_req(plci);
- if (req == HANGUP)
- {
- if (plci->NL.Id && !plci->nl_remove_id)
- {
- if (plci->channels)
- {
- for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
- {
- if ((a->ncci_plci[ncci] == plci->Id) && (a->ncci_state[ncci] == CONNECTED))
- {
- a->ncci_state[ncci] = OUTG_DIS_PENDING;
- cleanup_ncci_data(plci, ncci);
- nl_req_ncci(plci, N_DISC, (byte)ncci);
- }
- }
- }
- mixer_remove(plci);
- nl_req_ncci(plci, REMOVE, 0);
- send_req(plci);
- }
- }
- break;
-
- case _DI_SIG_CTRL:
- /* signalling control for loop activation B-channel */
- if (!plci)
- {
- Info = _WRONG_IDENTIFIER;
- break;
- }
- if (m->length) {
- plci->command = _MANUFACTURER_R;
- plci->number = Number;
- add_ss(plci, FTY, m);
- sig_req(plci, SIG_CTRL, 0);
- send_req(plci);
- }
- else Info = _WRONG_MESSAGE_FORMAT;
- break;
-
- case _DI_RXT_CTRL:
- /* activation control for receiver/transmitter B-channel */
- if (!plci)
- {
- Info = _WRONG_IDENTIFIER;
- break;
- }
- if (m->length) {
- plci->command = _MANUFACTURER_R;
- plci->number = Number;
- add_ss(plci, FTY, m);
- sig_req(plci, DSP_CTRL, 0);
- send_req(plci);
- }
- else Info = _WRONG_MESSAGE_FORMAT;
- break;
-
- case _DI_ADV_CODEC:
- case _DI_DSP_CTRL:
- /* TEL_CTRL commands to support non standard adjustments: */
- /* Ring on/off, Handset micro volume, external micro vol. */
- /* handset+external speaker volume, receiver+transm. gain,*/
- /* handsfree on (hookinfo off), set mixer command */
-
- if (command == _DI_ADV_CODEC)
- {
- if (!a->AdvCodecPLCI) {
- Info = _WRONG_STATE;
- break;
- }
- v_plci = a->AdvCodecPLCI;
- }
- else
- {
- if (plci
- && (m->length >= 3)
- && (m->info[1] == 0x1c)
- && (m->info[2] >= 1))
- {
- if (m->info[3] == DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS)
- {
- if ((plci->tel != ADV_VOICE) || (plci != a->AdvSignalPLCI))
- {
- Info = _WRONG_STATE;
- break;
- }
- a->adv_voice_coef_length = m->info[2] - 1;
- if (a->adv_voice_coef_length > m->length - 3)
- a->adv_voice_coef_length = (byte)(m->length - 3);
- if (a->adv_voice_coef_length > ADV_VOICE_COEF_BUFFER_SIZE)
- a->adv_voice_coef_length = ADV_VOICE_COEF_BUFFER_SIZE;
- for (i = 0; i < a->adv_voice_coef_length; i++)
- a->adv_voice_coef_buffer[i] = m->info[4 + i];
- if (plci->B1_facilities & B1_FACILITY_VOICE)
- adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
- break;
- }
- else if (m->info[3] == DSP_CTRL_SET_DTMF_PARAMETERS)
- {
- if (!(a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_PARAMETERS))
- {
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
-
- plci->dtmf_parameter_length = m->info[2] - 1;
- if (plci->dtmf_parameter_length > m->length - 3)
- plci->dtmf_parameter_length = (byte)(m->length - 3);
- if (plci->dtmf_parameter_length > DTMF_PARAMETER_BUFFER_SIZE)
- plci->dtmf_parameter_length = DTMF_PARAMETER_BUFFER_SIZE;
- for (i = 0; i < plci->dtmf_parameter_length; i++)
- plci->dtmf_parameter_buffer[i] = m->info[4 + i];
- if (plci->B1_facilities & B1_FACILITY_DTMFR)
- dtmf_parameter_write(plci);
- break;
-
- }
- }
- v_plci = plci;
- }
-
- if (!v_plci)
- {
- Info = _WRONG_IDENTIFIER;
- break;
- }
- if (m->length) {
- add_ss(v_plci, FTY, m);
- sig_req(v_plci, TEL_CTRL, 0);
- send_req(v_plci);
- }
- else Info = _WRONG_MESSAGE_FORMAT;
-
- break;
-
- case _DI_OPTIONS_REQUEST:
- if (api_parse(&m->info[1], (word)m->length, "d", m_parms)) {
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (GET_DWORD(m_parms[0].info) & ~a->man_profile.private_options)
- {
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- a->requested_options_table[appl->Id - 1] = GET_DWORD(m_parms[0].info);
- break;
-
-
-
- default:
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- }
-
- sendf(appl,
- _MANUFACTURER_R | CONFIRM,
- Id,
- Number,
- "dww", _DI_MANU_ID, command, Info);
- return false;
-}
-
-
-static byte manufacturer_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a,
- PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word indication;
-
- API_PARSE m_parms[3];
- API_PARSE *ncpi;
- API_PARSE fax_parms[9];
- word i;
- byte len;
-
-
- dbug(1, dprintf("manufacturer_res"));
-
- if ((msg[0].length == 0)
- || (msg[1].length == 0)
- || (GET_DWORD(msg[0].info) != _DI_MANU_ID))
- {
- return false;
- }
- indication = GET_WORD(msg[1].info);
- switch (indication)
- {
-
- case _DI_NEGOTIATE_B3:
- if (!plci)
- break;
- if (((plci->B3_prot != 4) && (plci->B3_prot != 5))
- || !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
- {
- dbug(1, dprintf("wrong state for NEGOTIATE_B3 parameters"));
- break;
- }
- if (api_parse(&msg[2].info[1], msg[2].length, "ws", m_parms))
- {
- dbug(1, dprintf("wrong format in NEGOTIATE_B3 parameters"));
- break;
- }
- ncpi = &m_parms[1];
- len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
- if (plci->fax_connect_info_length < len)
- {
- ((T30_INFO *)(plci->fax_connect_info_buffer))->station_id_len = 0;
- ((T30_INFO *)(plci->fax_connect_info_buffer))->head_line_len = 0;
- }
- if (api_parse(&ncpi->info[1], ncpi->length, "wwwwssss", fax_parms))
- {
- dbug(1, dprintf("non-standard facilities info missing or wrong format"));
- }
- else
- {
- if (plci->fax_connect_info_length <= len)
- plci->fax_connect_info_buffer[len] = 0;
- len += 1 + plci->fax_connect_info_buffer[len];
- if (plci->fax_connect_info_length <= len)
- plci->fax_connect_info_buffer[len] = 0;
- len += 1 + plci->fax_connect_info_buffer[len];
- if ((fax_parms[7].length >= 3) && (fax_parms[7].info[1] >= 2))
- plci->nsf_control_bits = GET_WORD(&fax_parms[7].info[2]);
- plci->fax_connect_info_buffer[len++] = (byte)(fax_parms[7].length);
- for (i = 0; i < fax_parms[7].length; i++)
- plci->fax_connect_info_buffer[len++] = fax_parms[7].info[1 + i];
- }
- plci->fax_connect_info_length = len;
- plci->fax_edata_ack_length = plci->fax_connect_info_length;
- start_internal_command(Id, plci, fax_edata_ack_command);
- break;
-
- }
- return false;
-}
-
-/*------------------------------------------------------------------*/
-/* IDI callback function */
-/*------------------------------------------------------------------*/
-
-void callback(ENTITY *e)
-{
- DIVA_CAPI_ADAPTER *a;
- APPL *appl;
- PLCI *plci;
- CAPI_MSG *m;
- word i, j;
- byte rc;
- byte ch;
- byte req;
- byte global_req;
- int no_cancel_rc;
-
- dbug(1, dprintf("%x:CB(%x:Req=%x,Rc=%x,Ind=%x)",
- (e->user[0] + 1) & 0x7fff, e->Id, e->Req, e->Rc, e->Ind));
-
- a = &(adapter[(byte)e->user[0]]);
- plci = &(a->plci[e->user[1]]);
- no_cancel_rc = DIVA_CAPI_SUPPORTS_NO_CANCEL(a);
-
- /*
- If new protocol code and new XDI is used then CAPI should work
- fully in accordance with IDI cpec an look on callback field instead
- of Rc field for return codes.
- */
- if (((e->complete == 0xff) && no_cancel_rc) ||
- (e->Rc && !no_cancel_rc)) {
- rc = e->Rc;
- ch = e->RcCh;
- req = e->Req;
- e->Rc = 0;
-
- if (e->user[0] & 0x8000)
- {
- /*
- If REMOVE request was sent then we have to wait until
- return code with Id set to zero arrives.
- All other return codes should be ignored.
- */
- if (req == REMOVE)
- {
- if (e->Id)
- {
- dbug(1, dprintf("cancel RC in REMOVE state"));
- return;
- }
- channel_flow_control_remove(plci);
- for (i = 0; i < 256; i++)
- {
- if (a->FlowControlIdTable[i] == plci->nl_remove_id)
- a->FlowControlIdTable[i] = 0;
- }
- plci->nl_remove_id = 0;
- if (plci->rx_dma_descriptor > 0) {
- diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
- plci->rx_dma_descriptor = 0;
- }
- }
- if (rc == OK_FC)
- {
- a->FlowControlIdTable[ch] = e->Id;
- a->FlowControlSkipTable[ch] = 0;
-
- a->ch_flow_control[ch] |= N_OK_FC_PENDING;
- a->ch_flow_plci[ch] = plci->Id;
- plci->nl_req = 0;
- }
- else
- {
- /*
- Cancel return codes self, if feature was requested
- */
- if (no_cancel_rc && (a->FlowControlIdTable[ch] == e->Id) && e->Id) {
- a->FlowControlIdTable[ch] = 0;
- if ((rc == OK) && a->FlowControlSkipTable[ch]) {
- dbug(3, dprintf("XDI CAPI: RC cancelled Id:0x02, Ch:%02x", e->Id, ch));
- return;
- }
- }
-
- if (a->ch_flow_control[ch] & N_OK_FC_PENDING)
- {
- a->ch_flow_control[ch] &= ~N_OK_FC_PENDING;
- if (ch == e->ReqCh)
- plci->nl_req = 0;
- }
- else
- plci->nl_req = 0;
- }
- if (plci->nl_req)
- control_rc(plci, 0, rc, ch, 0, true);
- else
- {
- if (req == N_XON)
- {
- channel_x_on(plci, ch);
- if (plci->internal_command)
- control_rc(plci, req, rc, ch, 0, true);
- }
- else
- {
- if (plci->nl_global_req)
- {
- global_req = plci->nl_global_req;
- plci->nl_global_req = 0;
- if (rc != ASSIGN_OK) {
- e->Id = 0;
- if (plci->rx_dma_descriptor > 0) {
- diva_free_dma_descriptor(plci, plci->rx_dma_descriptor - 1);
- plci->rx_dma_descriptor = 0;
- }
- }
- channel_xmit_xon(plci);
- control_rc(plci, 0, rc, ch, global_req, true);
- }
- else if (plci->data_sent)
- {
- channel_xmit_xon(plci);
- plci->data_sent = false;
- plci->NL.XNum = 1;
- data_rc(plci, ch);
- if (plci->internal_command)
- control_rc(plci, req, rc, ch, 0, true);
- }
- else
- {
- channel_xmit_xon(plci);
- control_rc(plci, req, rc, ch, 0, true);
- }
- }
- }
- }
- else
- {
- /*
- If REMOVE request was sent then we have to wait until
- return code with Id set to zero arrives.
- All other return codes should be ignored.
- */
- if (req == REMOVE)
- {
- if (e->Id)
- {
- dbug(1, dprintf("cancel RC in REMOVE state"));
- return;
- }
- plci->sig_remove_id = 0;
- }
- plci->sig_req = 0;
- if (plci->sig_global_req)
- {
- global_req = plci->sig_global_req;
- plci->sig_global_req = 0;
- if (rc != ASSIGN_OK)
- e->Id = 0;
- channel_xmit_xon(plci);
- control_rc(plci, 0, rc, ch, global_req, false);
- }
- else
- {
- channel_xmit_xon(plci);
- control_rc(plci, req, rc, ch, 0, false);
- }
- }
- /*
- Again: in accordance with IDI spec Rc and Ind can't be delivered in the
- same callback. Also if new XDI and protocol code used then jump
- direct to finish.
- */
- if (no_cancel_rc) {
- channel_xmit_xon(plci);
- goto capi_callback_suffix;
- }
- }
-
- channel_xmit_xon(plci);
-
- if (e->Ind) {
- if (e->user[0] & 0x8000) {
- byte Ind = e->Ind & 0x0f;
- byte Ch = e->IndCh;
- if (((Ind == N_DISC) || (Ind == N_DISC_ACK)) &&
- (a->ch_flow_plci[Ch] == plci->Id)) {
- if (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK) {
- dbug(3, dprintf("XDI CAPI: I: pending N-XON Ch:%02x", Ch));
- }
- a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
- }
- nl_ind(plci);
- if ((e->RNR != 1) &&
- (a->ch_flow_plci[Ch] == plci->Id) &&
- (a->ch_flow_control[Ch] & N_RX_FLOW_CONTROL_MASK)) {
- a->ch_flow_control[Ch] &= ~N_RX_FLOW_CONTROL_MASK;
- dbug(3, dprintf("XDI CAPI: I: remove faked N-XON Ch:%02x", Ch));
- }
- } else {
- sig_ind(plci);
- }
- e->Ind = 0;
- }
-
-capi_callback_suffix:
-
- while (!plci->req_in
- && !plci->internal_command
- && (plci->msg_in_write_pos != plci->msg_in_read_pos))
- {
- j = (plci->msg_in_read_pos == plci->msg_in_wrap_pos) ? 0 : plci->msg_in_read_pos;
-
- i = (((CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]))->header.length + 3) & 0xfffc;
-
- m = (CAPI_MSG *)(&((byte *)(plci->msg_in_queue))[j]);
- appl = *((APPL **)(&((byte *)(plci->msg_in_queue))[j + i]));
- dbug(1, dprintf("dequeue msg(0x%04x) - write=%d read=%d wrap=%d",
- m->header.command, plci->msg_in_write_pos, plci->msg_in_read_pos, plci->msg_in_wrap_pos));
- if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
- {
- plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_read_pos = i + MSG_IN_OVERHEAD;
- }
- else
- {
- plci->msg_in_read_pos = j + i + MSG_IN_OVERHEAD;
- }
- if (plci->msg_in_read_pos == plci->msg_in_write_pos)
- {
- plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
- }
- else if (plci->msg_in_read_pos == plci->msg_in_wrap_pos)
- {
- plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
- }
- i = api_put(appl, m);
- if (i != 0)
- {
- if (m->header.command == _DATA_B3_R)
-
- TransmitBufferFree(appl, (byte *)(long)(m->info.data_b3_req.Data));
-
- dbug(1, dprintf("Error 0x%04x from msg(0x%04x)", i, m->header.command));
- break;
- }
-
- if (plci->li_notify_update)
- {
- plci->li_notify_update = false;
- mixer_notify_update(plci, false);
- }
-
- }
- send_data(plci);
- send_req(plci);
-}
-
-
-static void control_rc(PLCI *plci, byte req, byte rc, byte ch, byte global_req,
- byte nl_rc)
-{
- dword Id;
- dword rId;
- word Number;
- word Info = 0;
- word i;
- word ncci;
- DIVA_CAPI_ADAPTER *a;
- APPL *appl;
- PLCI *rplci;
- byte SSparms[] = "\x05\x00\x00\x02\x00\x00";
- byte SSstruct[] = "\x09\x00\x00\x06\x00\x00\x00\x00\x00\x00";
-
- if (!plci) {
- dbug(0, dprintf("A: control_rc, no plci %02x:%02x:%02x:%02x:%02x", req, rc, ch, global_req, nl_rc));
- return;
- }
- dbug(1, dprintf("req0_in/out=%d/%d", plci->req_in, plci->req_out));
- if (plci->req_in != plci->req_out)
- {
- if (nl_rc || (global_req != ASSIGN) || (rc == ASSIGN_OK))
- {
- dbug(1, dprintf("req_1return"));
- return;
- }
- /* cancel outstanding request on the PLCI after SIG ASSIGN failure */
- }
- plci->req_in = plci->req_in_start = plci->req_out = 0;
- dbug(1, dprintf("control_rc"));
-
- appl = plci->appl;
- a = plci->adapter;
- ncci = a->ch_ncci[ch];
- if (appl)
- {
- Id = (((dword)(ncci ? ncci : ch)) << 16) | ((word)plci->Id << 8) | a->Id;
- if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
- Number = plci->number;
- dbug(1, dprintf("Contr_RC-Id=%08lx,plci=%x,tel=%x, entity=0x%x, command=0x%x, int_command=0x%x", Id, plci->Id, plci->tel, plci->Sig.Id, plci->command, plci->internal_command));
- dbug(1, dprintf("channels=0x%x", plci->channels));
- if (plci_remove_check(plci))
- return;
- if (req == REMOVE && rc == ASSIGN_OK)
- {
- sig_req(plci, HANGUP, 0);
- sig_req(plci, REMOVE, 0);
- send_req(plci);
- }
- if (plci->command)
- {
- switch (plci->command)
- {
- case C_HOLD_REQ:
- dbug(1, dprintf("HoldRC=0x%x", rc));
- SSparms[1] = (byte)S_HOLD;
- if (rc != OK)
- {
- plci->SuppState = IDLE;
- Info = 0x2001;
- }
- sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
- break;
-
- case C_RETRIEVE_REQ:
- dbug(1, dprintf("RetrieveRC=0x%x", rc));
- SSparms[1] = (byte)S_RETRIEVE;
- if (rc != OK)
- {
- plci->SuppState = CALL_HELD;
- Info = 0x2001;
- }
- sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", Info, 3, SSparms);
- break;
-
- case _INFO_R:
- dbug(1, dprintf("InfoRC=0x%x", rc));
- if (rc != OK) Info = _WRONG_STATE;
- sendf(appl, _INFO_R | CONFIRM, Id, Number, "w", Info);
- break;
-
- case _CONNECT_R:
- dbug(1, dprintf("Connect_R=0x%x/0x%x/0x%x/0x%x", req, rc, global_req, nl_rc));
- if (plci->State == INC_DIS_PENDING)
- break;
- if (plci->Sig.Id != 0xff)
- {
- if (((global_req == ASSIGN) && (rc != ASSIGN_OK))
- || (!nl_rc && (req == CALL_REQ) && (rc != OK)))
- {
- dbug(1, dprintf("No more IDs/Call_Req failed"));
- sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
- plci_remove(plci);
- plci->State = IDLE;
- break;
- }
- if (plci->State != LOCAL_CONNECT) plci->State = OUTG_CON_PENDING;
- sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
- }
- else /* D-ch activation */
- {
- if (rc != ASSIGN_OK)
- {
- dbug(1, dprintf("No more IDs/X.25 Call_Req failed"));
- sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
- plci_remove(plci);
- plci->State = IDLE;
- break;
- }
- sendf(appl, _CONNECT_R | CONFIRM, Id, Number, "w", 0);
- sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "sss", "", "", "");
- plci->State = INC_ACT_PENDING;
- }
- break;
-
- case _CONNECT_I | RESPONSE:
- if (plci->State != INC_DIS_PENDING)
- plci->State = INC_CON_ACCEPT;
- break;
-
- case _DISCONNECT_R:
- if (plci->State == INC_DIS_PENDING)
- break;
- if (plci->Sig.Id != 0xff)
- {
- plci->State = OUTG_DIS_PENDING;
- sendf(appl, _DISCONNECT_R | CONFIRM, Id, Number, "w", 0);
- }
- break;
-
- case SUSPEND_REQ:
- break;
-
- case RESUME_REQ:
- break;
-
- case _CONNECT_B3_R:
- if (rc != OK)
- {
- sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", _WRONG_IDENTIFIER);
- break;
- }
- ncci = get_ncci(plci, ch, 0);
- Id = (Id & 0xffff) | (((dword) ncci) << 16);
- plci->channels++;
- if (req == N_RESET)
- {
- a->ncci_state[ncci] = INC_ACT_PENDING;
- sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
- sendf(appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- }
- else
- {
- a->ncci_state[ncci] = OUTG_CON_PENDING;
- sendf(appl, _CONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
- }
- break;
-
- case _CONNECT_B3_I | RESPONSE:
- break;
-
- case _RESET_B3_R:
-/* sendf(appl, _RESET_B3_R | CONFIRM, Id, Number, "w", 0);*/
- break;
-
- case _DISCONNECT_B3_R:
- sendf(appl, _DISCONNECT_B3_R | CONFIRM, Id, Number, "w", 0);
- break;
-
- case _MANUFACTURER_R:
- break;
-
- case PERM_LIST_REQ:
- if (rc != OK)
- {
- Info = _WRONG_IDENTIFIER;
- sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
- plci_remove(plci);
- }
- else
- sendf(plci->appl, _CONNECT_R | CONFIRM, Id, Number, "w", Info);
- break;
-
- default:
- break;
- }
- plci->command = 0;
- }
- else if (plci->internal_command)
- {
- switch (plci->internal_command)
- {
- case BLOCK_PLCI:
- return;
-
- case GET_MWI_STATE:
- if (rc == OK) /* command supported, wait for indication */
- {
- return;
- }
- plci_remove(plci);
- break;
-
- /* Get Supported Services */
- case GETSERV_REQ_PEND:
- if (rc == OK) /* command supported, wait for indication */
- {
- break;
- }
- PUT_DWORD(&SSstruct[6], MASK_TERMINAL_PORTABILITY);
- sendf(appl, _FACILITY_R | CONFIRM, Id, Number, "wws", 0, 3, SSstruct);
- plci_remove(plci);
- break;
-
- case INTERR_DIVERSION_REQ_PEND: /* Interrogate Parameters */
- case INTERR_NUMBERS_REQ_PEND:
- case CF_START_PEND: /* Call Forwarding Start pending */
- case CF_STOP_PEND: /* Call Forwarding Stop pending */
- case CCBS_REQUEST_REQ_PEND:
- case CCBS_DEACTIVATE_REQ_PEND:
- case CCBS_INTERROGATE_REQ_PEND:
- switch (plci->internal_command)
- {
- case INTERR_DIVERSION_REQ_PEND:
- SSparms[1] = S_INTERROGATE_DIVERSION;
- break;
- case INTERR_NUMBERS_REQ_PEND:
- SSparms[1] = S_INTERROGATE_NUMBERS;
- break;
- case CF_START_PEND:
- SSparms[1] = S_CALL_FORWARDING_START;
- break;
- case CF_STOP_PEND:
- SSparms[1] = S_CALL_FORWARDING_STOP;
- break;
- case CCBS_REQUEST_REQ_PEND:
- SSparms[1] = S_CCBS_REQUEST;
- break;
- case CCBS_DEACTIVATE_REQ_PEND:
- SSparms[1] = S_CCBS_DEACTIVATE;
- break;
- case CCBS_INTERROGATE_REQ_PEND:
- SSparms[1] = S_CCBS_INTERROGATE;
- break;
- }
- if (global_req == ASSIGN)
- {
- dbug(1, dprintf("AssignDiversion_RC=0x%x/0x%x", req, rc));
- return;
- }
- if (!plci->appl) break;
- if (rc == ISDN_GUARD_REJ)
- {
- Info = _CAPI_GUARD_ERROR;
- }
- else if (rc != OK)
- {
- Info = _SUPPLEMENTARY_SERVICE_NOT_SUPPORTED;
- }
- sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7,
- plci->number, "wws", Info, (word)3, SSparms);
- if (Info) plci_remove(plci);
- break;
-
- /* 3pty conference pending */
- case PTY_REQ_PEND:
- if (!plci->relatedPTYPLCI) break;
- rplci = plci->relatedPTYPLCI;
- SSparms[1] = plci->ptyState;
- rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
- if (rplci->tel) rId |= EXT_CONTROLLER;
- if (rc != OK)
- {
- Info = 0x300E; /* not supported */
- plci->relatedPTYPLCI = NULL;
- plci->ptyState = 0;
- }
- sendf(rplci->appl,
- _FACILITY_R | CONFIRM,
- rId,
- plci->number,
- "wws", Info, (word)3, SSparms);
- break;
-
- /* Explicit Call Transfer pending */
- case ECT_REQ_PEND:
- dbug(1, dprintf("ECT_RC=0x%x/0x%x", req, rc));
- if (!plci->relatedPTYPLCI) break;
- rplci = plci->relatedPTYPLCI;
- SSparms[1] = S_ECT;
- rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
- if (rplci->tel) rId |= EXT_CONTROLLER;
- if (rc != OK)
- {
- Info = 0x300E; /* not supported */
- plci->relatedPTYPLCI = NULL;
- plci->ptyState = 0;
- }
- sendf(rplci->appl,
- _FACILITY_R | CONFIRM,
- rId,
- plci->number,
- "wws", Info, (word)3, SSparms);
- break;
-
- case _MANUFACTURER_R:
- dbug(1, dprintf("_Manufacturer_R=0x%x/0x%x", req, rc));
- if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
- {
- dbug(1, dprintf("No more IDs"));
- sendf(appl, _MANUFACTURER_R | CONFIRM, Id, Number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
- plci_remove(plci); /* after codec init, internal codec commands pending */
- }
- break;
-
- case _CONNECT_R:
- dbug(1, dprintf("_Connect_R=0x%x/0x%x", req, rc));
- if ((global_req == ASSIGN) && (rc != ASSIGN_OK))
- {
- dbug(1, dprintf("No more IDs"));
- sendf(appl, _CONNECT_R | CONFIRM, Id & 0xffL, Number, "w", _OUT_OF_PLCI);
- plci_remove(plci); /* after codec init, internal codec commands pending */
- }
- break;
-
- case PERM_COD_HOOK: /* finished with Hook_Ind */
- return;
-
- case PERM_COD_CALL:
- dbug(1, dprintf("***Codec Connect_Pending A, Rc = 0x%x", rc));
- plci->internal_command = PERM_COD_CONN_PEND;
- return;
-
- case PERM_COD_ASSIGN:
- dbug(1, dprintf("***Codec Assign A, Rc = 0x%x", rc));
- if (rc != ASSIGN_OK) break;
- sig_req(plci, CALL_REQ, 0);
- send_req(plci);
- plci->internal_command = PERM_COD_CALL;
- return;
-
- /* Null Call Reference Request pending */
- case C_NCR_FAC_REQ:
- dbug(1, dprintf("NCR_FAC=0x%x/0x%x", req, rc));
- if (global_req == ASSIGN)
- {
- if (rc == ASSIGN_OK)
- {
- return;
- }
- else
- {
- sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
- appl->NullCREnable = false;
- plci_remove(plci);
- }
- }
- else if (req == NCR_FACILITY)
- {
- if (rc == OK)
- {
- sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", 0);
- }
- else
- {
- sendf(appl, _INFO_R | CONFIRM, Id & 0xf, Number, "w", _WRONG_STATE);
- appl->NullCREnable = false;
- }
- plci_remove(plci);
- }
- break;
-
- case HOOK_ON_REQ:
- if (plci->channels)
- {
- if (a->ncci_state[ncci] == CONNECTED)
- {
- a->ncci_state[ncci] = OUTG_DIS_PENDING;
- cleanup_ncci_data(plci, ncci);
- nl_req_ncci(plci, N_DISC, (byte)ncci);
- }
- break;
- }
- break;
-
- case HOOK_OFF_REQ:
- if (plci->State == INC_DIS_PENDING)
- break;
- sig_req(plci, CALL_REQ, 0);
- send_req(plci);
- plci->State = OUTG_CON_PENDING;
- break;
-
-
- case MWI_ACTIVATE_REQ_PEND:
- case MWI_DEACTIVATE_REQ_PEND:
- if (global_req == ASSIGN && rc == ASSIGN_OK)
- {
- dbug(1, dprintf("MWI_REQ assigned"));
- return;
- }
- else if (rc != OK)
- {
- if (rc == WRONG_IE)
- {
- Info = 0x2007; /* Illegal message parameter coding */
- dbug(1, dprintf("MWI_REQ invalid parameter"));
- }
- else
- {
- Info = 0x300B; /* not supported */
- dbug(1, dprintf("MWI_REQ not supported"));
- }
- /* 0x3010: Request not allowed in this state */
- PUT_WORD(&SSparms[4], 0x300E); /* SS not supported */
-
- }
- if (plci->internal_command == MWI_ACTIVATE_REQ_PEND)
- {
- PUT_WORD(&SSparms[1], S_MWI_ACTIVATE);
- }
- else PUT_WORD(&SSparms[1], S_MWI_DEACTIVATE);
-
- if (plci->cr_enquiry)
- {
- sendf(plci->appl,
- _FACILITY_R | CONFIRM,
- Id & 0xf,
- plci->number,
- "wws", Info, (word)3, SSparms);
- if (rc != OK) plci_remove(plci);
- }
- else
- {
- sendf(plci->appl,
- _FACILITY_R | CONFIRM,
- Id,
- plci->number,
- "wws", Info, (word)3, SSparms);
- }
- break;
-
- case CONF_BEGIN_REQ_PEND:
- case CONF_ADD_REQ_PEND:
- case CONF_SPLIT_REQ_PEND:
- case CONF_DROP_REQ_PEND:
- case CONF_ISOLATE_REQ_PEND:
- case CONF_REATTACH_REQ_PEND:
- dbug(1, dprintf("CONF_RC=0x%x/0x%x", req, rc));
- if ((plci->internal_command == CONF_ADD_REQ_PEND) && (!plci->relatedPTYPLCI)) break;
- rplci = plci;
- rId = Id;
- switch (plci->internal_command)
- {
- case CONF_BEGIN_REQ_PEND:
- SSparms[1] = S_CONF_BEGIN;
- break;
- case CONF_ADD_REQ_PEND:
- SSparms[1] = S_CONF_ADD;
- rplci = plci->relatedPTYPLCI;
- rId = ((word)rplci->Id << 8) | rplci->adapter->Id;
- break;
- case CONF_SPLIT_REQ_PEND:
- SSparms[1] = S_CONF_SPLIT;
- break;
- case CONF_DROP_REQ_PEND:
- SSparms[1] = S_CONF_DROP;
- break;
- case CONF_ISOLATE_REQ_PEND:
- SSparms[1] = S_CONF_ISOLATE;
- break;
- case CONF_REATTACH_REQ_PEND:
- SSparms[1] = S_CONF_REATTACH;
- break;
- }
-
- if (rc != OK)
- {
- Info = 0x300E; /* not supported */
- plci->relatedPTYPLCI = NULL;
- plci->ptyState = 0;
- }
- sendf(rplci->appl,
- _FACILITY_R | CONFIRM,
- rId,
- plci->number,
- "wws", Info, (word)3, SSparms);
- break;
-
- case VSWITCH_REQ_PEND:
- if (rc != OK)
- {
- if (plci->relatedPTYPLCI)
- {
- plci->relatedPTYPLCI->vswitchstate = 0;
- plci->relatedPTYPLCI->vsprot = 0;
- plci->relatedPTYPLCI->vsprotdialect = 0;
- }
- plci->vswitchstate = 0;
- plci->vsprot = 0;
- plci->vsprotdialect = 0;
- }
- else
- {
- if (plci->relatedPTYPLCI &&
- plci->vswitchstate == 1 &&
- plci->relatedPTYPLCI->vswitchstate == 3) /* join complete */
- plci->vswitchstate = 3;
- }
- break;
-
- /* Call Deflection Request pending (SSCT) */
- case CD_REQ_PEND:
- SSparms[1] = S_CALL_DEFLECTION;
- if (rc != OK)
- {
- Info = 0x300E; /* not supported */
- plci->appl->CDEnable = 0;
- }
- sendf(plci->appl, _FACILITY_R | CONFIRM, Id,
- plci->number, "wws", Info, (word)3, SSparms);
- break;
-
- case RTP_CONNECT_B3_REQ_COMMAND_2:
- if (rc == OK)
- {
- ncci = get_ncci(plci, ch, 0);
- Id = (Id & 0xffff) | (((dword) ncci) << 16);
- plci->channels++;
- a->ncci_state[ncci] = OUTG_CON_PENDING;
- }
- /* fall through */
-
- default:
- if (plci->internal_command_queue[0])
- {
- (*(plci->internal_command_queue[0]))(Id, plci, rc);
- if (plci->internal_command)
- return;
- }
- break;
- }
- next_internal_command(Id, plci);
- }
- }
- else /* appl==0 */
- {
- Id = ((word)plci->Id << 8) | plci->adapter->Id;
- if (plci->tel) Id |= EXT_CONTROLLER;
-
- switch (plci->internal_command)
- {
- case BLOCK_PLCI:
- return;
-
- case START_L1_SIG_ASSIGN_PEND:
- case REM_L1_SIG_ASSIGN_PEND:
- if (global_req == ASSIGN)
- {
- break;
- }
- else
- {
- dbug(1, dprintf("***L1 Req rem PLCI"));
- plci->internal_command = 0;
- sig_req(plci, REMOVE, 0);
- send_req(plci);
- }
- break;
-
- /* Call Deflection Request pending, just no appl ptr assigned */
- case CD_REQ_PEND:
- SSparms[1] = S_CALL_DEFLECTION;
- if (rc != OK)
- {
- Info = 0x300E; /* not supported */
- }
- for (i = 0; i < max_appl; i++)
- {
- if (application[i].CDEnable)
- {
- if (!application[i].Id) application[i].CDEnable = 0;
- else
- {
- sendf(&application[i], _FACILITY_R | CONFIRM, Id,
- plci->number, "wws", Info, (word)3, SSparms);
- if (Info) application[i].CDEnable = 0;
- }
- }
- }
- plci->internal_command = 0;
- break;
-
- case PERM_COD_HOOK: /* finished with Hook_Ind */
- return;
-
- case PERM_COD_CALL:
- plci->internal_command = PERM_COD_CONN_PEND;
- dbug(1, dprintf("***Codec Connect_Pending, Rc = 0x%x", rc));
- return;
-
- case PERM_COD_ASSIGN:
- dbug(1, dprintf("***Codec Assign, Rc = 0x%x", rc));
- plci->internal_command = 0;
- if (rc != ASSIGN_OK) break;
- plci->internal_command = PERM_COD_CALL;
- sig_req(plci, CALL_REQ, 0);
- send_req(plci);
- return;
-
- case LISTEN_SIG_ASSIGN_PEND:
- if (rc == ASSIGN_OK)
- {
- plci->internal_command = 0;
- dbug(1, dprintf("ListenCheck, new SIG_ID = 0x%x", plci->Sig.Id));
- add_p(plci, ESC, "\x02\x18\x00"); /* support call waiting */
- sig_req(plci, INDICATE_REQ, 0);
- send_req(plci);
- }
- else
- {
- dbug(1, dprintf("ListenCheck failed (assignRc=0x%x)", rc));
- a->listen_active--;
- plci_remove(plci);
- plci->State = IDLE;
- }
- break;
-
- case USELAW_REQ:
- if (global_req == ASSIGN)
- {
- if (rc == ASSIGN_OK)
- {
- sig_req(plci, LAW_REQ, 0);
- send_req(plci);
- dbug(1, dprintf("Auto-Law assigned"));
- }
- else
- {
- dbug(1, dprintf("Auto-Law assign failed"));
- a->automatic_law = 3;
- plci->internal_command = 0;
- a->automatic_lawPLCI = NULL;
- }
- break;
- }
- else if (req == LAW_REQ && rc == OK)
- {
- dbug(1, dprintf("Auto-Law initiated"));
- a->automatic_law = 2;
- plci->internal_command = 0;
- }
- else
- {
- dbug(1, dprintf("Auto-Law not supported"));
- a->automatic_law = 3;
- plci->internal_command = 0;
- sig_req(plci, REMOVE, 0);
- send_req(plci);
- a->automatic_lawPLCI = NULL;
- }
- break;
- }
- plci_remove_check(plci);
- }
-}
-
-static void data_rc(PLCI *plci, byte ch)
-{
- dword Id;
- DIVA_CAPI_ADAPTER *a;
- NCCI *ncci_ptr;
- DATA_B3_DESC *data;
- word ncci;
-
- if (plci->appl)
- {
- TransmitBufferFree(plci->appl, plci->data_sent_ptr);
- a = plci->adapter;
- ncci = a->ch_ncci[ch];
- if (ncci && (a->ncci_plci[ncci] == plci->Id))
- {
- ncci_ptr = &(a->ncci[ncci]);
- dbug(1, dprintf("data_out=%d, data_pending=%d", ncci_ptr->data_out, ncci_ptr->data_pending));
- if (ncci_ptr->data_pending)
- {
- data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
- if (!(data->Flags & 4) && a->ncci_state[ncci])
- {
- Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
- if (plci->tel) Id |= EXT_CONTROLLER;
- sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, data->Number,
- "ww", data->Handle, 0);
- }
- (ncci_ptr->data_out)++;
- if (ncci_ptr->data_out == MAX_DATA_B3)
- ncci_ptr->data_out = 0;
- (ncci_ptr->data_pending)--;
- }
- }
- }
-}
-
-static void data_ack(PLCI *plci, byte ch)
-{
- dword Id;
- DIVA_CAPI_ADAPTER *a;
- NCCI *ncci_ptr;
- word ncci;
-
- a = plci->adapter;
- ncci = a->ch_ncci[ch];
- ncci_ptr = &(a->ncci[ncci]);
- if (ncci_ptr->data_ack_pending)
- {
- if (a->ncci_state[ncci] && (a->ncci_plci[ncci] == plci->Id))
- {
- Id = (((dword)ncci) << 16) | ((word)plci->Id << 8) | a->Id;
- if (plci->tel) Id |= EXT_CONTROLLER;
- sendf(plci->appl, _DATA_B3_R | CONFIRM, Id, ncci_ptr->DataAck[ncci_ptr->data_ack_out].Number,
- "ww", ncci_ptr->DataAck[ncci_ptr->data_ack_out].Handle, 0);
- }
- (ncci_ptr->data_ack_out)++;
- if (ncci_ptr->data_ack_out == MAX_DATA_ACK)
- ncci_ptr->data_ack_out = 0;
- (ncci_ptr->data_ack_pending)--;
- }
-}
-
-static void sig_ind(PLCI *plci)
-{
- dword x_Id;
- dword Id;
- dword rId;
- word i;
- word cip;
- dword cip_mask;
- byte *ie;
- DIVA_CAPI_ADAPTER *a;
- API_PARSE saved_parms[MAX_MSG_PARMS + 1];
-#define MAXPARMSIDS 31
- byte *parms[MAXPARMSIDS];
- byte *add_i[4];
- byte *multi_fac_parms[MAX_MULTI_IE];
- byte *multi_pi_parms[MAX_MULTI_IE];
- byte *multi_ssext_parms[MAX_MULTI_IE];
- byte *multi_CiPN_parms[MAX_MULTI_IE];
-
- byte *multi_vswitch_parms[MAX_MULTI_IE];
-
- byte ai_len;
- byte *esc_chi = "";
- byte *esc_law = "";
- byte *pty_cai = "";
- byte *esc_cr = "";
- byte *esc_profile = "";
-
- byte facility[256];
- PLCI *tplci = NULL;
- byte chi[] = "\x02\x18\x01";
- byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
- byte resume_cau[] = "\x05\x05\x00\x02\x00\x00";
- /* ESC_MSGTYPE must be the last but one message, a new IE has to be */
- /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
- /* SMSG is situated at the end because its 0 (for compatibility reasons */
- /* (see Info_Mask Bit 4, first IE. then the message type) */
- static const word parms_id[] =
- {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
- UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
- RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
- CST, ESC_PROFILE, 0xff, ESC_MSGTYPE, SMSG};
- /* 14 FTY repl by ESC_CHI */
- /* 18 PI repl by ESC_LAW */
- /* removed OAD changed to 0xff for future use, OAD is multiIE now */
- static const word multi_fac_id[] = {1, FTY};
- static const word multi_pi_id[] = {1, PI};
- static const word multi_CiPN_id[] = {1, OAD};
- static const word multi_ssext_id[] = {1, ESC_SSEXT};
-
- static const word multi_vswitch_id[] = {1, ESC_VSWITCH};
-
- byte *cau;
- word ncci;
- byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
- byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
- byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
- byte force_mt_info = false;
- byte dir;
- dword d;
- word w;
-
- a = plci->adapter;
- Id = ((word)plci->Id << 8) | a->Id;
- PUT_WORD(&SS_Ind[4], 0x0000);
-
- if (plci->sig_remove_id)
- {
- plci->Sig.RNR = 2; /* discard */
- dbug(1, dprintf("SIG discard while remove pending"));
- return;
- }
- if (plci->tel && plci->SuppState != CALL_HELD) Id |= EXT_CONTROLLER;
- dbug(1, dprintf("SigInd-Id=%08lx,plci=%x,tel=%x,state=0x%x,channels=%d,Discflowcl=%d",
- Id, plci->Id, plci->tel, plci->State, plci->channels, plci->hangup_flow_ctrl_timer));
- if (plci->Sig.Ind == CALL_HOLD_ACK && plci->channels)
- {
- plci->Sig.RNR = 1;
- return;
- }
- if (plci->Sig.Ind == HANGUP && plci->channels)
- {
- plci->Sig.RNR = 1;
- plci->hangup_flow_ctrl_timer++;
- /* recover the network layer after timeout */
- if (plci->hangup_flow_ctrl_timer == 100)
- {
- dbug(1, dprintf("Exceptional disc"));
- plci->Sig.RNR = 0;
- plci->hangup_flow_ctrl_timer = 0;
- for (ncci = 1; ncci < MAX_NCCI + 1; ncci++)
- {
- if (a->ncci_plci[ncci] == plci->Id)
- {
- cleanup_ncci_data(plci, ncci);
- if (plci->channels)plci->channels--;
- if (plci->appl)
- sendf(plci->appl, _DISCONNECT_B3_I, (((dword) ncci) << 16) | Id, 0, "ws", 0, "");
- }
- }
- if (plci->appl)
- sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
- plci_remove(plci);
- plci->State = IDLE;
- }
- return;
- }
-
- /* do first parse the info with no OAD in, because OAD will be converted */
- /* first the multiple facility IE, then mult. progress ind. */
- /* then the parameters for the info_ind + conn_ind */
- IndParse(plci, multi_fac_id, multi_fac_parms, MAX_MULTI_IE);
- IndParse(plci, multi_pi_id, multi_pi_parms, MAX_MULTI_IE);
- IndParse(plci, multi_ssext_id, multi_ssext_parms, MAX_MULTI_IE);
-
- IndParse(plci, multi_vswitch_id, multi_vswitch_parms, MAX_MULTI_IE);
-
- IndParse(plci, parms_id, parms, 0);
- IndParse(plci, multi_CiPN_id, multi_CiPN_parms, MAX_MULTI_IE);
- esc_chi = parms[14];
- esc_law = parms[18];
- pty_cai = parms[24];
- esc_cr = parms[25];
- esc_profile = parms[27];
- if (esc_cr[0] && plci)
- {
- if (plci->cr_enquiry && plci->appl)
- {
- plci->cr_enquiry = false;
- /* d = MANU_ID */
- /* w = m_command */
- /* b = total length */
- /* b = indication type */
- /* b = length of all IEs */
- /* b = IE1 */
- /* S = IE1 length + cont. */
- /* b = IE2 */
- /* S = IE2 length + cont. */
- sendf(plci->appl,
- _MANUFACTURER_I,
- Id,
- 0,
- "dwbbbbSbS", _DI_MANU_ID, plci->m_command,
- 2 + 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], plci->Sig.Ind, 1 + 1 + esc_cr[0] + 1 + 1 + esc_law[0], ESC, esc_cr, ESC, esc_law);
- }
- }
- /* create the additional info structure */
- add_i[1] = parms[15]; /* KEY of additional info */
- add_i[2] = parms[11]; /* UUI of additional info */
- ai_len = AddInfo(add_i, multi_fac_parms, esc_chi, facility);
-
- /* the ESC_LAW indicates if u-Law or a-Law is actually used by the card */
- /* indication returns by the card if requested by the function */
- /* AutomaticLaw() after driver init */
- if (a->automatic_law < 4)
- {
- if (esc_law[0]) {
- if (esc_law[2]) {
- dbug(0, dprintf("u-Law selected"));
- a->u_law = 1;
- }
- else {
- dbug(0, dprintf("a-Law selected"));
- a->u_law = 0;
- }
- a->automatic_law = 4;
- if (plci == a->automatic_lawPLCI) {
- plci->internal_command = 0;
- sig_req(plci, REMOVE, 0);
- send_req(plci);
- a->automatic_lawPLCI = NULL;
- }
- }
- if (esc_profile[0])
- {
- dbug(1, dprintf("[%06x] CardProfile: %lx %lx %lx %lx %lx",
- UnMapController(a->Id), GET_DWORD(&esc_profile[6]),
- GET_DWORD(&esc_profile[10]), GET_DWORD(&esc_profile[14]),
- GET_DWORD(&esc_profile[18]), GET_DWORD(&esc_profile[46])));
-
- a->profile.Global_Options &= 0x000000ffL;
- a->profile.B1_Protocols &= 0x000003ffL;
- a->profile.B2_Protocols &= 0x00001fdfL;
- a->profile.B3_Protocols &= 0x000000b7L;
-
- a->profile.Global_Options &= GET_DWORD(&esc_profile[6]) |
- GL_BCHANNEL_OPERATION_SUPPORTED;
- a->profile.B1_Protocols &= GET_DWORD(&esc_profile[10]);
- a->profile.B2_Protocols &= GET_DWORD(&esc_profile[14]);
- a->profile.B3_Protocols &= GET_DWORD(&esc_profile[18]);
- a->manufacturer_features = GET_DWORD(&esc_profile[46]);
- a->man_profile.private_options = 0;
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_ECHO_CANCELLER)
- {
- a->man_profile.private_options |= 1L << PRIVATE_ECHO_CANCELLER;
- a->profile.Global_Options |= GL_ECHO_CANCELLER_SUPPORTED;
- }
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_RTP)
- a->man_profile.private_options |= 1L << PRIVATE_RTP;
- a->man_profile.rtp_primary_payloads = GET_DWORD(&esc_profile[50]);
- a->man_profile.rtp_additional_payloads = GET_DWORD(&esc_profile[54]);
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_T38)
- a->man_profile.private_options |= 1L << PRIVATE_T38;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD)
- a->man_profile.private_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_V18)
- a->man_profile.private_options |= 1L << PRIVATE_V18;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_DTMF_TONE)
- a->man_profile.private_options |= 1L << PRIVATE_DTMF_TONE;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_PIAFS)
- a->man_profile.private_options |= 1L << PRIVATE_PIAFS;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
- a->man_profile.private_options |= 1L << PRIVATE_FAX_PAPER_FORMATS;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_VOWN)
- a->man_profile.private_options |= 1L << PRIVATE_VOWN;
-
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_NONSTANDARD)
- a->man_profile.private_options |= 1L << PRIVATE_FAX_NONSTANDARD;
-
- }
- else
- {
- a->profile.Global_Options &= 0x0000007fL;
- a->profile.B1_Protocols &= 0x000003dfL;
- a->profile.B2_Protocols &= 0x00001adfL;
- a->profile.B3_Protocols &= 0x000000b7L;
- a->manufacturer_features &= MANUFACTURER_FEATURE_HARDDTMF;
- }
- if (a->manufacturer_features & (MANUFACTURER_FEATURE_HARDDTMF |
- MANUFACTURER_FEATURE_SOFTDTMF_SEND | MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
- {
- a->profile.Global_Options |= GL_DTMF_SUPPORTED;
- }
- a->manufacturer_features &= ~MANUFACTURER_FEATURE_OOB_CHANNEL;
- dbug(1, dprintf("[%06x] Profile: %lx %lx %lx %lx %lx",
- UnMapController(a->Id), a->profile.Global_Options,
- a->profile.B1_Protocols, a->profile.B2_Protocols,
- a->profile.B3_Protocols, a->manufacturer_features));
- }
- /* codec plci for the handset/hook state support is just an internal id */
- if (plci != a->AdvCodecPLCI)
- {
- force_mt_info = SendMultiIE(plci, Id, multi_fac_parms, FTY, 0x20, 0);
- force_mt_info |= SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, 0);
- SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
- SendInfo(plci, Id, parms, force_mt_info);
-
- VSwitchReqInd(plci, Id, multi_vswitch_parms);
-
- }
-
- /* switch the codec to the b-channel */
- if (esc_chi[0] && plci && !plci->SuppState) {
- plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
- mixer_set_bchannel_id_esc(plci, plci->b_channel);
- dbug(1, dprintf("storeChannel=0x%x", plci->b_channel));
- if (plci->tel == ADV_VOICE && plci->appl) {
- SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
- }
- }
-
- if (plci->appl) plci->appl->Number++;
-
- switch (plci->Sig.Ind) {
- /* Response to Get_Supported_Services request */
- case S_SUPPORTED:
- dbug(1, dprintf("S_Supported"));
- if (!plci->appl) break;
- if (pty_cai[0] == 4)
- {
- PUT_DWORD(&CF_Ind[6], GET_DWORD(&pty_cai[1]));
- }
- else
- {
- PUT_DWORD(&CF_Ind[6], MASK_TERMINAL_PORTABILITY | MASK_HOLD_RETRIEVE);
- }
- PUT_WORD(&CF_Ind[1], 0);
- PUT_WORD(&CF_Ind[4], 0);
- sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0x7, plci->number, "wws", 0, 3, CF_Ind);
- plci_remove(plci);
- break;
-
- /* Supplementary Service rejected */
- case S_SERVICE_REJ:
- dbug(1, dprintf("S_Reject=0x%x", pty_cai[5]));
- if (!pty_cai[0]) break;
- switch (pty_cai[5])
- {
- case ECT_EXECUTE:
- case THREE_PTY_END:
- case THREE_PTY_BEGIN:
- if (!plci->relatedPTYPLCI) break;
- tplci = plci->relatedPTYPLCI;
- rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
- if (tplci->tel) rId |= EXT_CONTROLLER;
- if (pty_cai[5] == ECT_EXECUTE)
- {
- PUT_WORD(&SS_Ind[1], S_ECT);
-
- plci->vswitchstate = 0;
- plci->relatedPTYPLCI->vswitchstate = 0;
-
- }
- else
- {
- PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
- }
- if (pty_cai[2] != 0xff)
- {
- PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
- }
- else
- {
- PUT_WORD(&SS_Ind[4], 0x300E);
- }
- plci->relatedPTYPLCI = NULL;
- plci->ptyState = 0;
- sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
- break;
-
- case CALL_DEFLECTION:
- if (pty_cai[2] != 0xff)
- {
- PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
- }
- else
- {
- PUT_WORD(&SS_Ind[4], 0x300E);
- }
- PUT_WORD(&SS_Ind[1], pty_cai[5]);
- for (i = 0; i < max_appl; i++)
- {
- if (application[i].CDEnable)
- {
- if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- application[i].CDEnable = false;
- }
- }
- break;
-
- case DEACTIVATION_DIVERSION:
- case ACTIVATION_DIVERSION:
- case DIVERSION_INTERROGATE_CFU:
- case DIVERSION_INTERROGATE_CFB:
- case DIVERSION_INTERROGATE_CFNR:
- case DIVERSION_INTERROGATE_NUM:
- case CCBS_REQUEST:
- case CCBS_DEACTIVATE:
- case CCBS_INTERROGATE:
- if (!plci->appl) break;
- if (pty_cai[2] != 0xff)
- {
- PUT_WORD(&Interr_Err_Ind[4], 0x3600 | (word)pty_cai[2]);
- }
- else
- {
- PUT_WORD(&Interr_Err_Ind[4], 0x300E);
- }
- switch (pty_cai[5])
- {
- case DEACTIVATION_DIVERSION:
- dbug(1, dprintf("Deact_Div"));
- Interr_Err_Ind[0] = 0x9;
- Interr_Err_Ind[3] = 0x6;
- PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_STOP);
- break;
- case ACTIVATION_DIVERSION:
- dbug(1, dprintf("Act_Div"));
- Interr_Err_Ind[0] = 0x9;
- Interr_Err_Ind[3] = 0x6;
- PUT_WORD(&Interr_Err_Ind[1], S_CALL_FORWARDING_START);
- break;
- case DIVERSION_INTERROGATE_CFU:
- case DIVERSION_INTERROGATE_CFB:
- case DIVERSION_INTERROGATE_CFNR:
- dbug(1, dprintf("Interr_Div"));
- Interr_Err_Ind[0] = 0xa;
- Interr_Err_Ind[3] = 0x7;
- PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_DIVERSION);
- break;
- case DIVERSION_INTERROGATE_NUM:
- dbug(1, dprintf("Interr_Num"));
- Interr_Err_Ind[0] = 0xa;
- Interr_Err_Ind[3] = 0x7;
- PUT_WORD(&Interr_Err_Ind[1], S_INTERROGATE_NUMBERS);
- break;
- case CCBS_REQUEST:
- dbug(1, dprintf("CCBS Request"));
- Interr_Err_Ind[0] = 0xd;
- Interr_Err_Ind[3] = 0xa;
- PUT_WORD(&Interr_Err_Ind[1], S_CCBS_REQUEST);
- break;
- case CCBS_DEACTIVATE:
- dbug(1, dprintf("CCBS Deactivate"));
- Interr_Err_Ind[0] = 0x9;
- Interr_Err_Ind[3] = 0x6;
- PUT_WORD(&Interr_Err_Ind[1], S_CCBS_DEACTIVATE);
- break;
- case CCBS_INTERROGATE:
- dbug(1, dprintf("CCBS Interrogate"));
- Interr_Err_Ind[0] = 0xb;
- Interr_Err_Ind[3] = 0x8;
- PUT_WORD(&Interr_Err_Ind[1], S_CCBS_INTERROGATE);
- break;
- }
- PUT_DWORD(&Interr_Err_Ind[6], plci->appl->S_Handle);
- sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, Interr_Err_Ind);
- plci_remove(plci);
- break;
- case ACTIVATION_MWI:
- case DEACTIVATION_MWI:
- if (pty_cai[5] == ACTIVATION_MWI)
- {
- PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
- }
- else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
-
- if (pty_cai[2] != 0xff)
- {
- PUT_WORD(&SS_Ind[4], 0x3600 | (word)pty_cai[2]);
- }
- else
- {
- PUT_WORD(&SS_Ind[4], 0x300E);
- }
-
- if (plci->cr_enquiry)
- {
- sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
- plci_remove(plci);
- }
- else
- {
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- }
- break;
- case CONF_ADD: /* ERROR */
- case CONF_BEGIN:
- case CONF_DROP:
- case CONF_ISOLATE:
- case CONF_REATTACH:
- CONF_Ind[0] = 9;
- CONF_Ind[3] = 6;
- switch (pty_cai[5])
- {
- case CONF_BEGIN:
- PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
- plci->ptyState = 0;
- break;
- case CONF_DROP:
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
- plci->ptyState = CONNECTED;
- break;
- case CONF_ISOLATE:
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
- plci->ptyState = CONNECTED;
- break;
- case CONF_REATTACH:
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
- plci->ptyState = CONNECTED;
- break;
- case CONF_ADD:
- PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
- plci->relatedPTYPLCI = NULL;
- tplci = plci->relatedPTYPLCI;
- if (tplci) tplci->ptyState = CONNECTED;
- plci->ptyState = CONNECTED;
- break;
- }
-
- if (pty_cai[2] != 0xff)
- {
- PUT_WORD(&CONF_Ind[4], 0x3600 | (word)pty_cai[2]);
- }
- else
- {
- PUT_WORD(&CONF_Ind[4], 0x3303); /* Time-out: network did not respond
- within the required time */
- }
-
- PUT_DWORD(&CONF_Ind[6], 0x0);
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
- break;
- }
- break;
-
- /* Supplementary Service indicates success */
- case S_SERVICE:
- dbug(1, dprintf("Service_Ind"));
- PUT_WORD(&CF_Ind[4], 0);
- switch (pty_cai[5])
- {
- case THREE_PTY_END:
- case THREE_PTY_BEGIN:
- case ECT_EXECUTE:
- if (!plci->relatedPTYPLCI) break;
- tplci = plci->relatedPTYPLCI;
- rId = ((word)tplci->Id << 8) | tplci->adapter->Id;
- if (tplci->tel) rId |= EXT_CONTROLLER;
- if (pty_cai[5] == ECT_EXECUTE)
- {
- PUT_WORD(&SS_Ind[1], S_ECT);
-
- if (plci->vswitchstate != 3)
- {
-
- plci->ptyState = IDLE;
- plci->relatedPTYPLCI = NULL;
- plci->ptyState = 0;
-
- }
-
- dbug(1, dprintf("ECT OK"));
- sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
-
-
-
- }
- else
- {
- switch (plci->ptyState)
- {
- case S_3PTY_BEGIN:
- plci->ptyState = CONNECTED;
- dbug(1, dprintf("3PTY ON"));
- break;
-
- case S_3PTY_END:
- plci->ptyState = IDLE;
- plci->relatedPTYPLCI = NULL;
- plci->ptyState = 0;
- dbug(1, dprintf("3PTY OFF"));
- break;
- }
- PUT_WORD(&SS_Ind[1], pty_cai[5] + 3);
- sendf(tplci->appl, _FACILITY_I, rId, 0, "ws", 3, SS_Ind);
- }
- break;
-
- case CALL_DEFLECTION:
- PUT_WORD(&SS_Ind[1], pty_cai[5]);
- for (i = 0; i < max_appl; i++)
- {
- if (application[i].CDEnable)
- {
- if (application[i].Id) sendf(&application[i], _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- application[i].CDEnable = false;
- }
- }
- break;
-
- case DEACTIVATION_DIVERSION:
- case ACTIVATION_DIVERSION:
- if (!plci->appl) break;
- PUT_WORD(&CF_Ind[1], pty_cai[5] + 2);
- PUT_DWORD(&CF_Ind[6], plci->appl->S_Handle);
- sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "ws", 3, CF_Ind);
- plci_remove(plci);
- break;
-
- case DIVERSION_INTERROGATE_CFU:
- case DIVERSION_INTERROGATE_CFB:
- case DIVERSION_INTERROGATE_CFNR:
- case DIVERSION_INTERROGATE_NUM:
- case CCBS_REQUEST:
- case CCBS_DEACTIVATE:
- case CCBS_INTERROGATE:
- if (!plci->appl) break;
- switch (pty_cai[5])
- {
- case DIVERSION_INTERROGATE_CFU:
- case DIVERSION_INTERROGATE_CFB:
- case DIVERSION_INTERROGATE_CFNR:
- dbug(1, dprintf("Interr_Div"));
- PUT_WORD(&pty_cai[1], S_INTERROGATE_DIVERSION);
- pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
- break;
- case DIVERSION_INTERROGATE_NUM:
- dbug(1, dprintf("Interr_Num"));
- PUT_WORD(&pty_cai[1], S_INTERROGATE_NUMBERS);
- pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
- break;
- case CCBS_REQUEST:
- dbug(1, dprintf("CCBS Request"));
- PUT_WORD(&pty_cai[1], S_CCBS_REQUEST);
- pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
- break;
- case CCBS_DEACTIVATE:
- dbug(1, dprintf("CCBS Deactivate"));
- PUT_WORD(&pty_cai[1], S_CCBS_DEACTIVATE);
- pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
- break;
- case CCBS_INTERROGATE:
- dbug(1, dprintf("CCBS Interrogate"));
- PUT_WORD(&pty_cai[1], S_CCBS_INTERROGATE);
- pty_cai[3] = pty_cai[0] - 3; /* Supplementary Service-specific parameter len */
- break;
- }
- PUT_WORD(&pty_cai[4], 0); /* Supplementary Service Reason */
- PUT_DWORD(&pty_cai[6], plci->appl->S_Handle);
- sendf(plci->appl, _FACILITY_I, Id & 0x7, 0, "wS", 3, pty_cai);
- plci_remove(plci);
- break;
-
- case ACTIVATION_MWI:
- case DEACTIVATION_MWI:
- if (pty_cai[5] == ACTIVATION_MWI)
- {
- PUT_WORD(&SS_Ind[1], S_MWI_ACTIVATE);
- }
- else PUT_WORD(&SS_Ind[1], S_MWI_DEACTIVATE);
- if (plci->cr_enquiry)
- {
- sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "ws", 3, SS_Ind);
- plci_remove(plci);
- }
- else
- {
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- }
- break;
- case MWI_INDICATION:
- if (pty_cai[0] >= 0x12)
- {
- PUT_WORD(&pty_cai[3], S_MWI_INDICATE);
- pty_cai[2] = pty_cai[0] - 2; /* len Parameter */
- pty_cai[5] = pty_cai[0] - 5; /* Supplementary Service-specific parameter len */
- if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_MWI))
- {
- if (plci->internal_command == GET_MWI_STATE) /* result on Message Waiting Listen */
- {
- sendf(plci->appl, _FACILITY_I, Id & 0xf, 0, "wS", 3, &pty_cai[2]);
- plci_remove(plci);
- return;
- }
- else sendf(plci->appl, _FACILITY_I, Id, 0, "wS", 3, &pty_cai[2]);
- pty_cai[0] = 0;
- }
- else
- {
- for (i = 0; i < max_appl; i++)
- {
- if (a->Notification_Mask[i]&SMASK_MWI)
- {
- sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "wS", 3, &pty_cai[2]);
- pty_cai[0] = 0;
- }
- }
- }
-
- if (!pty_cai[0])
- { /* acknowledge */
- facility[2] = 0; /* returncode */
- }
- else facility[2] = 0xff;
- }
- else
- {
- /* reject */
- facility[2] = 0xff; /* returncode */
- }
- facility[0] = 2;
- facility[1] = MWI_RESPONSE; /* Function */
- add_p(plci, CAI, facility);
- add_p(plci, ESC, multi_ssext_parms[0]); /* remembered parameter -> only one possible */
- sig_req(plci, S_SERVICE, 0);
- send_req(plci);
- plci->command = 0;
- next_internal_command(Id, plci);
- break;
- case CONF_ADD: /* OK */
- case CONF_BEGIN:
- case CONF_DROP:
- case CONF_ISOLATE:
- case CONF_REATTACH:
- case CONF_PARTYDISC:
- CONF_Ind[0] = 9;
- CONF_Ind[3] = 6;
- switch (pty_cai[5])
- {
- case CONF_BEGIN:
- PUT_WORD(&CONF_Ind[1], S_CONF_BEGIN);
- if (pty_cai[0] == 6)
- {
- d = pty_cai[6];
- PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
- }
- else
- {
- PUT_DWORD(&CONF_Ind[6], 0x0);
- }
- break;
- case CONF_ISOLATE:
- PUT_WORD(&CONF_Ind[1], S_CONF_ISOLATE);
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- break;
- case CONF_REATTACH:
- PUT_WORD(&CONF_Ind[1], S_CONF_REATTACH);
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- break;
- case CONF_DROP:
- PUT_WORD(&CONF_Ind[1], S_CONF_DROP);
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- break;
- case CONF_ADD:
- PUT_WORD(&CONF_Ind[1], S_CONF_ADD);
- d = pty_cai[6];
- PUT_DWORD(&CONF_Ind[6], d); /* PartyID */
- tplci = plci->relatedPTYPLCI;
- if (tplci) tplci->ptyState = CONNECTED;
- break;
- case CONF_PARTYDISC:
- CONF_Ind[0] = 7;
- CONF_Ind[3] = 4;
- PUT_WORD(&CONF_Ind[1], S_CONF_PARTYDISC);
- d = pty_cai[6];
- PUT_DWORD(&CONF_Ind[4], d); /* PartyID */
- break;
- }
- plci->ptyState = CONNECTED;
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
- break;
- case CCBS_INFO_RETAIN:
- case CCBS_ERASECALLLINKAGEID:
- case CCBS_STOP_ALERTING:
- CONF_Ind[0] = 5;
- CONF_Ind[3] = 2;
- switch (pty_cai[5])
- {
- case CCBS_INFO_RETAIN:
- PUT_WORD(&CONF_Ind[1], S_CCBS_INFO_RETAIN);
- break;
- case CCBS_STOP_ALERTING:
- PUT_WORD(&CONF_Ind[1], S_CCBS_STOP_ALERTING);
- break;
- case CCBS_ERASECALLLINKAGEID:
- PUT_WORD(&CONF_Ind[1], S_CCBS_ERASECALLLINKAGEID);
- CONF_Ind[0] = 7;
- CONF_Ind[3] = 4;
- CONF_Ind[6] = 0;
- CONF_Ind[7] = 0;
- break;
- }
- w = pty_cai[6];
- PUT_WORD(&CONF_Ind[4], w); /* PartyID */
-
- if (plci->appl && (a->Notification_Mask[plci->appl->Id - 1] & SMASK_CCBS))
- {
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, CONF_Ind);
- }
- else
- {
- for (i = 0; i < max_appl; i++)
- if (a->Notification_Mask[i] & SMASK_CCBS)
- sendf(&application[i], _FACILITY_I, Id & 0x7, 0, "ws", 3, CONF_Ind);
- }
- break;
- }
- break;
- case CALL_HOLD_REJ:
- cau = parms[7];
- if (cau)
- {
- i = _L3_CAUSE | cau[2];
- if (cau[2] == 0) i = 0x3603;
- }
- else
- {
- i = 0x3603;
- }
- PUT_WORD(&SS_Ind[1], S_HOLD);
- PUT_WORD(&SS_Ind[4], i);
- if (plci->SuppState == HOLD_REQUEST)
- {
- plci->SuppState = IDLE;
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- }
- break;
-
- case CALL_HOLD_ACK:
- if (plci->SuppState == HOLD_REQUEST)
- {
- plci->SuppState = CALL_HELD;
- CodecIdCheck(a, plci);
- start_internal_command(Id, plci, hold_save_command);
- }
- break;
-
- case CALL_RETRIEVE_REJ:
- cau = parms[7];
- if (cau)
- {
- i = _L3_CAUSE | cau[2];
- if (cau[2] == 0) i = 0x3603;
- }
- else
- {
- i = 0x3603;
- }
- PUT_WORD(&SS_Ind[1], S_RETRIEVE);
- PUT_WORD(&SS_Ind[4], i);
- if (plci->SuppState == RETRIEVE_REQUEST)
- {
- plci->SuppState = CALL_HELD;
- CodecIdCheck(a, plci);
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- }
- break;
-
- case CALL_RETRIEVE_ACK:
- PUT_WORD(&SS_Ind[1], S_RETRIEVE);
- if (plci->SuppState == RETRIEVE_REQUEST)
- {
- plci->SuppState = IDLE;
- plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
- plci->b_channel = esc_chi[esc_chi[0]]&0x1f;
- if (plci->tel)
- {
- mixer_set_bchannel_id_esc(plci, plci->b_channel);
- dbug(1, dprintf("RetrChannel=0x%x", plci->b_channel));
- SetVoiceChannel(a->AdvCodecPLCI, esc_chi, a);
- if (plci->B2_prot == B2_TRANSPARENT && plci->B3_prot == B3_TRANSPARENT)
- {
- dbug(1, dprintf("Get B-ch"));
- start_internal_command(Id, plci, retrieve_restore_command);
- }
- else
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", 3, SS_Ind);
- }
- else
- start_internal_command(Id, plci, retrieve_restore_command);
- }
- break;
-
- case INDICATE_IND:
- if (plci->State != LISTENING) {
- sig_req(plci, HANGUP, 0);
- send_req(plci);
- break;
- }
- cip = find_cip(a, parms[4], parms[6]);
- cip_mask = 1L << cip;
- dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask));
- bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
- if (!remove_started && !a->adapter_disabled)
- {
- group_optimization(a, plci);
- for_each_set_bit(i, plci->group_optimization_mask_table, max_appl) {
- if (application[i].Id
- && (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask)
- && CPN_filter_ok(parms[0], a, i)) {
- dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i]));
- __set_bit(i, plci->c_ind_mask_table);
- dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
- plci->State = INC_CON_PENDING;
- plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) |
- CALL_DIR_IN | CALL_DIR_ANSWER;
- if (esc_chi[0]) {
- plci->b_channel = esc_chi[esc_chi[0]] & 0x1f;
- mixer_set_bchannel_id_esc(plci, plci->b_channel);
- }
- /* if a listen on the ext controller is done, check if hook states */
- /* are supported or if just a on board codec must be activated */
- if (a->codec_listen[i] && !a->AdvSignalPLCI) {
- if (a->profile.Global_Options & HANDSET)
- plci->tel = ADV_VOICE;
- else if (a->profile.Global_Options & ON_BOARD_CODEC)
- plci->tel = CODEC;
- if (plci->tel) Id |= EXT_CONTROLLER;
- a->codec_listen[i] = plci;
- }
-
- sendf(&application[i], _CONNECT_I, Id, 0,
- "wSSSSSSSbSSSSS", cip, /* CIP */
- parms[0], /* CalledPartyNumber */
- multi_CiPN_parms[0], /* CallingPartyNumber */
- parms[2], /* CalledPartySubad */
- parms[3], /* CallingPartySubad */
- parms[4], /* BearerCapability */
- parms[5], /* LowLC */
- parms[6], /* HighLC */
- ai_len, /* nested struct add_i */
- add_i[0], /* B channel info */
- add_i[1], /* keypad facility */
- add_i[2], /* user user data */
- add_i[3], /* nested facility */
- multi_CiPN_parms[1] /* second CiPN(SCR) */
- );
- SendSSExtInd(&application[i],
- plci,
- Id,
- multi_ssext_parms);
- SendSetupInfo(&application[i],
- plci,
- Id,
- parms,
- SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true));
- }
- }
- dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table));
- }
- if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) {
- sig_req(plci, HANGUP, 0);
- send_req(plci);
- plci->State = IDLE;
- }
- plci->notifiedcall = 0;
- a->listen_active--;
- listen_check(a);
- break;
-
- case CALL_PEND_NOTIFY:
- plci->notifiedcall = 1;
- listen_check(a);
- break;
-
- case CALL_IND:
- case CALL_CON:
- if (plci->State == ADVANCED_VOICE_SIG || plci->State == ADVANCED_VOICE_NOSIG)
- {
- if (plci->internal_command == PERM_COD_CONN_PEND)
- {
- if (plci->State == ADVANCED_VOICE_NOSIG)
- {
- dbug(1, dprintf("***Codec OK"));
- if (a->AdvSignalPLCI)
- {
- tplci = a->AdvSignalPLCI;
- if (tplci->spoofed_msg)
- {
- dbug(1, dprintf("***Spoofed Msg(0x%x)", tplci->spoofed_msg));
- tplci->command = 0;
- tplci->internal_command = 0;
- x_Id = ((word)tplci->Id << 8) | tplci->adapter->Id | 0x80;
- switch (tplci->spoofed_msg)
- {
- case CALL_RES:
- tplci->command = _CONNECT_I | RESPONSE;
- api_load_msg(&tplci->saved_msg, saved_parms);
- add_b1(tplci, &saved_parms[1], 0, tplci->B1_facilities);
- if (tplci->adapter->Info_Mask[tplci->appl->Id - 1] & 0x200)
- {
- /* early B3 connect (CIP mask bit 9) no release after a disc */
- add_p(tplci, LLI, "\x01\x01");
- }
- add_s(tplci, CONN_NR, &saved_parms[2]);
- add_s(tplci, LLC, &saved_parms[4]);
- add_ai(tplci, &saved_parms[5]);
- tplci->State = INC_CON_ACCEPT;
- sig_req(tplci, CALL_RES, 0);
- send_req(tplci);
- break;
-
- case AWAITING_SELECT_B:
- dbug(1, dprintf("Select_B continue"));
- start_internal_command(x_Id, tplci, select_b_command);
- break;
-
- case AWAITING_MANUF_CON: /* Get_Plci per Manufacturer_Req to ext controller */
- if (!tplci->Sig.Id)
- {
- dbug(1, dprintf("No SigID!"));
- sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, _OUT_OF_PLCI);
- plci_remove(tplci);
- break;
- }
- tplci->command = _MANUFACTURER_R;
- api_load_msg(&tplci->saved_msg, saved_parms);
- dir = saved_parms[2].info[0];
- if (dir == 1) {
- sig_req(tplci, CALL_REQ, 0);
- }
- else if (!dir) {
- sig_req(tplci, LISTEN_REQ, 0);
- }
- send_req(tplci);
- sendf(tplci->appl, _MANUFACTURER_R | CONFIRM, x_Id, tplci->number, "dww", _DI_MANU_ID, _MANUFACTURER_R, 0);
- break;
-
- case (CALL_REQ | AWAITING_MANUF_CON):
- sig_req(tplci, CALL_REQ, 0);
- send_req(tplci);
- break;
-
- case CALL_REQ:
- if (!tplci->Sig.Id)
- {
- dbug(1, dprintf("No SigID!"));
- sendf(tplci->appl, _CONNECT_R | CONFIRM, tplci->adapter->Id, 0, "w", _OUT_OF_PLCI);
- plci_remove(tplci);
- break;
- }
- tplci->command = _CONNECT_R;
- api_load_msg(&tplci->saved_msg, saved_parms);
- add_s(tplci, CPN, &saved_parms[1]);
- add_s(tplci, DSA, &saved_parms[3]);
- add_ai(tplci, &saved_parms[9]);
- sig_req(tplci, CALL_REQ, 0);
- send_req(tplci);
- break;
-
- case CALL_RETRIEVE:
- tplci->command = C_RETRIEVE_REQ;
- sig_req(tplci, CALL_RETRIEVE, 0);
- send_req(tplci);
- break;
- }
- tplci->spoofed_msg = 0;
- if (tplci->internal_command == 0)
- next_internal_command(x_Id, tplci);
- }
- }
- next_internal_command(Id, plci);
- break;
- }
- dbug(1, dprintf("***Codec Hook Init Req"));
- plci->internal_command = PERM_COD_HOOK;
- add_p(plci, FTY, "\x01\x09"); /* Get Hook State*/
- sig_req(plci, TEL_CTRL, 0);
- send_req(plci);
- }
- }
- else if (plci->command != _MANUFACTURER_R /* old style permanent connect */
- && plci->State != INC_ACT_PENDING)
- {
- mixer_set_bchannel_id_esc(plci, plci->b_channel);
- if (plci->tel == ADV_VOICE && plci->SuppState == IDLE) /* with permanent codec switch on immediately */
- {
- chi[2] = plci->b_channel;
- SetVoiceChannel(a->AdvCodecPLCI, chi, a);
- }
- sendf(plci->appl, _CONNECT_ACTIVE_I, Id, 0, "Sss", parms[21], "", "");
- plci->State = INC_ACT_PENDING;
- }
- break;
-
- case TEL_CTRL:
- ie = multi_fac_parms[0]; /* inspect the facility hook indications */
- if (plci->State == ADVANCED_VOICE_SIG && ie[0]) {
- switch (ie[1] & 0x91) {
- case 0x80: /* hook off */
- case 0x81:
- if (plci->internal_command == PERM_COD_HOOK)
- {
- dbug(1, dprintf("init:hook_off"));
- plci->hook_state = ie[1];
- next_internal_command(Id, plci);
- break;
- }
- else /* ignore doubled hook indications */
- {
- if (((plci->hook_state) & 0xf0) == 0x80)
- {
- dbug(1, dprintf("ignore hook"));
- break;
- }
- plci->hook_state = ie[1]&0x91;
- }
- /* check for incoming call pending */
- /* and signal '+'.Appl must decide */
- /* with connect_res if call must */
- /* accepted or not */
- for (i = 0, tplci = NULL; i < max_appl; i++) {
- if (a->codec_listen[i]
- && (a->codec_listen[i]->State == INC_CON_PENDING
- || a->codec_listen[i]->State == INC_CON_ALERT)) {
- tplci = a->codec_listen[i];
- tplci->appl = &application[i];
- }
- }
- /* no incoming call, do outgoing call */
- /* and signal '+' if outg. setup */
- if (!a->AdvSignalPLCI && !tplci) {
- if ((i = get_plci(a))) {
- a->AdvSignalPLCI = &a->plci[i - 1];
- tplci = a->AdvSignalPLCI;
- tplci->tel = ADV_VOICE;
- PUT_WORD(&voice_cai[5], a->AdvSignalAppl->MaxDataLength);
- if (a->Info_Mask[a->AdvSignalAppl->Id - 1] & 0x200) {
- /* early B3 connect (CIP mask bit 9) no release after a disc */
- add_p(tplci, LLI, "\x01\x01");
- }
- add_p(tplci, CAI, voice_cai);
- add_p(tplci, OAD, a->TelOAD);
- add_p(tplci, OSA, a->TelOSA);
- add_p(tplci, SHIFT | 6, NULL);
- add_p(tplci, SIN, "\x02\x01\x00");
- add_p(tplci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(tplci, ASSIGN, DSIG_ID);
- a->AdvSignalPLCI->internal_command = HOOK_OFF_REQ;
- a->AdvSignalPLCI->command = 0;
- tplci->appl = a->AdvSignalAppl;
- tplci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
- send_req(tplci);
- }
-
- }
-
- if (!tplci) break;
- Id = ((word)tplci->Id << 8) | a->Id;
- Id |= EXT_CONTROLLER;
- sendf(tplci->appl,
- _FACILITY_I,
- Id,
- 0,
- "ws", (word)0, "\x01+");
- break;
-
- case 0x90: /* hook on */
- case 0x91:
- if (plci->internal_command == PERM_COD_HOOK)
- {
- dbug(1, dprintf("init:hook_on"));
- plci->hook_state = ie[1] & 0x91;
- next_internal_command(Id, plci);
- break;
- }
- else /* ignore doubled hook indications */
- {
- if (((plci->hook_state) & 0xf0) == 0x90) break;
- plci->hook_state = ie[1] & 0x91;
- }
- /* hangup the adv. voice call and signal '-' to the appl */
- if (a->AdvSignalPLCI) {
- Id = ((word)a->AdvSignalPLCI->Id << 8) | a->Id;
- if (plci->tel) Id |= EXT_CONTROLLER;
- sendf(a->AdvSignalAppl,
- _FACILITY_I,
- Id,
- 0,
- "ws", (word)0, "\x01-");
- a->AdvSignalPLCI->internal_command = HOOK_ON_REQ;
- a->AdvSignalPLCI->command = 0;
- sig_req(a->AdvSignalPLCI, HANGUP, 0);
- send_req(a->AdvSignalPLCI);
- }
- break;
- }
- }
- break;
-
- case RESUME:
- __clear_bit(plci->appl->Id - 1, plci->c_ind_mask_table);
- PUT_WORD(&resume_cau[4], GOOD);
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
- break;
-
- case SUSPEND:
- bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
-
- if (plci->NL.Id && !plci->nl_remove_id) {
- mixer_remove(plci);
- nl_req_ncci(plci, REMOVE, 0);
- }
- if (!plci->sig_remove_id) {
- plci->internal_command = 0;
- sig_req(plci, REMOVE, 0);
- }
- send_req(plci);
- if (!plci->channels) {
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, "\x05\x04\x00\x02\x00\x00");
- sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", 0);
- }
- break;
-
- case SUSPEND_REJ:
- break;
-
- case HANGUP:
- plci->hangup_flow_ctrl_timer = 0;
- if (plci->manufacturer && plci->State == LOCAL_CONNECT) break;
- cau = parms[7];
- if (cau) {
- i = _L3_CAUSE | cau[2];
- if (cau[2] == 0) i = 0;
- else if (cau[2] == 8) i = _L1_ERROR;
- else if (cau[2] == 9 || cau[2] == 10) i = _L2_ERROR;
- else if (cau[2] == 5) i = _CAPI_GUARD_ERROR;
- }
- else {
- i = _L3_ERROR;
- }
-
- if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT)
- {
- for_each_set_bit(i, plci->c_ind_mask_table, max_appl)
- sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0);
- }
- else
- {
- bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
- }
- if (!plci->appl)
- {
- if (plci->State == LISTENING)
- {
- plci->notifiedcall = 0;
- a->listen_active--;
- }
- plci->State = INC_DIS_PENDING;
- if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
- {
- plci->State = IDLE;
- if (plci->NL.Id && !plci->nl_remove_id)
- {
- mixer_remove(plci);
- nl_req_ncci(plci, REMOVE, 0);
- }
- if (!plci->sig_remove_id)
- {
- plci->internal_command = 0;
- sig_req(plci, REMOVE, 0);
- }
- send_req(plci);
- }
- }
- else
- {
- /* collision of DISCONNECT or CONNECT_RES with HANGUP can */
- /* result in a second HANGUP! Don't generate another */
- /* DISCONNECT */
- if (plci->State != IDLE && plci->State != INC_DIS_PENDING)
- {
- if (plci->State == RESUMING)
- {
- PUT_WORD(&resume_cau[4], i);
- sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau);
- }
- plci->State = INC_DIS_PENDING;
- sendf(plci->appl, _DISCONNECT_I, Id, 0, "w", i);
- }
- }
- break;
-
- case SSEXT_IND:
- SendSSExtInd(NULL, plci, Id, multi_ssext_parms);
- break;
-
- case VSWITCH_REQ:
- VSwitchReqInd(plci, Id, multi_vswitch_parms);
- break;
- case VSWITCH_IND:
- if (plci->relatedPTYPLCI &&
- plci->vswitchstate == 3 &&
- plci->relatedPTYPLCI->vswitchstate == 3 &&
- parms[MAXPARMSIDS - 1][0])
- {
- add_p(plci->relatedPTYPLCI, SMSG, parms[MAXPARMSIDS - 1]);
- sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
- send_req(plci->relatedPTYPLCI);
- }
- else VSwitchReqInd(plci, Id, multi_vswitch_parms);
- break;
-
- }
-}
-
-
-static void SendSetupInfo(APPL *appl, PLCI *plci, dword Id, byte **parms, byte Info_Sent_Flag)
-{
- word i;
- byte *ie;
- word Info_Number;
- byte *Info_Element;
- word Info_Mask = 0;
-
- dbug(1, dprintf("SetupInfo"));
-
- for (i = 0; i < MAXPARMSIDS; i++) {
- ie = parms[i];
- Info_Number = 0;
- Info_Element = ie;
- if (ie[0]) {
- switch (i) {
- case 0:
- dbug(1, dprintf("CPN "));
- Info_Number = 0x0070;
- Info_Mask = 0x80;
- Info_Sent_Flag = true;
- break;
- case 8: /* display */
- dbug(1, dprintf("display(%d)", i));
- Info_Number = 0x0028;
- Info_Mask = 0x04;
- Info_Sent_Flag = true;
- break;
- case 16: /* Channel Id */
- dbug(1, dprintf("CHI"));
- Info_Number = 0x0018;
- Info_Mask = 0x100;
- Info_Sent_Flag = true;
- mixer_set_bchannel_id(plci, Info_Element);
- break;
- case 19: /* Redirected Number */
- dbug(1, dprintf("RDN"));
- Info_Number = 0x0074;
- Info_Mask = 0x400;
- Info_Sent_Flag = true;
- break;
- case 20: /* Redirected Number extended */
- dbug(1, dprintf("RDX"));
- Info_Number = 0x0073;
- Info_Mask = 0x400;
- Info_Sent_Flag = true;
- break;
- case 22: /* Redirecing Number */
- dbug(1, dprintf("RIN"));
- Info_Number = 0x0076;
- Info_Mask = 0x400;
- Info_Sent_Flag = true;
- break;
- default:
- Info_Number = 0;
- break;
- }
- }
-
- if (i == MAXPARMSIDS - 2) { /* to indicate the message type "Setup" */
- Info_Number = 0x8000 | 5;
- Info_Mask = 0x10;
- Info_Element = "";
- }
-
- if (Info_Sent_Flag && Info_Number) {
- if (plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask) {
- sendf(appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
- }
- }
-}
-
-
-static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent)
-{
- word i;
- word j;
- word k;
- byte *ie;
- word Info_Number;
- byte *Info_Element;
- word Info_Mask = 0;
- static byte charges[5] = {4, 0, 0, 0, 0};
- static byte cause[] = {0x02, 0x80, 0x00};
- APPL *appl;
-
- dbug(1, dprintf("InfoParse "));
-
- if (
- !plci->appl
- && !plci->State
- && plci->Sig.Ind != NCR_FACILITY
- )
- {
- dbug(1, dprintf("NoParse "));
- return;
- }
- cause[2] = 0;
- for (i = 0; i < MAXPARMSIDS; i++) {
- ie = parms[i];
- Info_Number = 0;
- Info_Element = ie;
- if (ie[0]) {
- switch (i) {
- case 0:
- dbug(1, dprintf("CPN "));
- Info_Number = 0x0070;
- Info_Mask = 0x80;
- break;
- case 7: /* ESC_CAU */
- dbug(1, dprintf("cau(0x%x)", ie[2]));
- Info_Number = 0x0008;
- Info_Mask = 0x00;
- cause[2] = ie[2];
- Info_Element = NULL;
- break;
- case 8: /* display */
- dbug(1, dprintf("display(%d)", i));
- Info_Number = 0x0028;
- Info_Mask = 0x04;
- break;
- case 9: /* Date display */
- dbug(1, dprintf("date(%d)", i));
- Info_Number = 0x0029;
- Info_Mask = 0x02;
- break;
- case 10: /* charges */
- for (j = 0; j < 4; j++) charges[1 + j] = 0;
- for (j = 0; j < ie[0] && !(ie[1 + j] & 0x80); j++);
- for (k = 1, j++; j < ie[0] && k <= 4; j++, k++) charges[k] = ie[1 + j];
- Info_Number = 0x4000;
- Info_Mask = 0x40;
- Info_Element = charges;
- break;
- case 11: /* user user info */
- dbug(1, dprintf("uui"));
- Info_Number = 0x007E;
- Info_Mask = 0x08;
- break;
- case 12: /* congestion receiver ready */
- dbug(1, dprintf("clRDY"));
- Info_Number = 0x00B0;
- Info_Mask = 0x08;
- Info_Element = "";
- break;
- case 13: /* congestion receiver not ready */
- dbug(1, dprintf("clNRDY"));
- Info_Number = 0x00BF;
- Info_Mask = 0x08;
- Info_Element = "";
- break;
- case 15: /* Keypad Facility */
- dbug(1, dprintf("KEY"));
- Info_Number = 0x002C;
- Info_Mask = 0x20;
- break;
- case 16: /* Channel Id */
- dbug(1, dprintf("CHI"));
- Info_Number = 0x0018;
- Info_Mask = 0x100;
- mixer_set_bchannel_id(plci, Info_Element);
- break;
- case 17: /* if no 1tr6 cause, send full cause, else esc_cause */
- dbug(1, dprintf("q9cau(0x%x)", ie[2]));
- if (!cause[2] || cause[2] < 0x80) break; /* eg. layer 1 error */
- Info_Number = 0x0008;
- Info_Mask = 0x01;
- if (cause[2] != ie[2]) Info_Element = cause;
- break;
- case 19: /* Redirected Number */
- dbug(1, dprintf("RDN"));
- Info_Number = 0x0074;
- Info_Mask = 0x400;
- break;
- case 22: /* Redirecing Number */
- dbug(1, dprintf("RIN"));
- Info_Number = 0x0076;
- Info_Mask = 0x400;
- break;
- case 23: /* Notification Indicator */
- dbug(1, dprintf("NI"));
- Info_Number = (word)NI;
- Info_Mask = 0x210;
- break;
- case 26: /* Call State */
- dbug(1, dprintf("CST"));
- Info_Number = (word)CST;
- Info_Mask = 0x01; /* do with cause i.e. for now */
- break;
- case MAXPARMSIDS - 2: /* Escape Message Type, must be the last indication */
- dbug(1, dprintf("ESC/MT[0x%x]", ie[3]));
- Info_Number = 0x8000 | ie[3];
- if (iesent) Info_Mask = 0xffff;
- else Info_Mask = 0x10;
- Info_Element = "";
- break;
- default:
- Info_Number = 0;
- Info_Mask = 0;
- Info_Element = "";
- break;
- }
- }
-
- if (plci->Sig.Ind == NCR_FACILITY) /* check controller broadcast */
- {
- for (j = 0; j < max_appl; j++)
- {
- appl = &application[j];
- if (Info_Number
- && appl->Id
- && plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
- {
- dbug(1, dprintf("NCR_Ind"));
- iesent = true;
- sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
- }
- }
- }
- else if (!plci->appl)
- { /* overlap receiving broadcast */
- if (Info_Number == CPN
- || Info_Number == KEY
- || Info_Number == NI
- || Info_Number == DSP
- || Info_Number == UUI)
- {
- for_each_set_bit(j, plci->c_ind_mask_table, max_appl) {
- dbug(1, dprintf("Ovl_Ind"));
- iesent = true;
- sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
- }
- } /* all other signalling states */
- else if (Info_Number
- && plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
- {
- dbug(1, dprintf("Std_Ind"));
- iesent = true;
- sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
- }
-}
-
-
-static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type,
- dword info_mask, byte setupParse)
-{
- word i;
- word j;
- byte *ie;
- word Info_Number;
- byte *Info_Element;
- APPL *appl;
- word Info_Mask = 0;
- byte iesent = 0;
-
- if (
- !plci->appl
- && !plci->State
- && plci->Sig.Ind != NCR_FACILITY
- && !setupParse
- )
- {
- dbug(1, dprintf("NoM-IEParse "));
- return 0;
- }
- dbug(1, dprintf("M-IEParse "));
-
- for (i = 0; i < MAX_MULTI_IE; i++)
- {
- ie = parms[i];
- Info_Number = 0;
- Info_Element = ie;
- if (ie[0])
- {
- dbug(1, dprintf("[Ind0x%x]:IE=0x%x", plci->Sig.Ind, ie_type));
- Info_Number = (word)ie_type;
- Info_Mask = (word)info_mask;
- }
-
- if (plci->Sig.Ind == NCR_FACILITY) /* check controller broadcast */
- {
- for (j = 0; j < max_appl; j++)
- {
- appl = &application[j];
- if (Info_Number
- && appl->Id
- && plci->adapter->Info_Mask[appl->Id - 1] & Info_Mask)
- {
- iesent = true;
- dbug(1, dprintf("Mlt_NCR_Ind"));
- sendf(&application[j], _INFO_I, Id & 0x0f, 0, "wS", Info_Number, Info_Element);
- }
- }
- }
- else if (!plci->appl && Info_Number)
- { /* overlap receiving broadcast */
- for_each_set_bit(j, plci->c_ind_mask_table, max_appl) {
- iesent = true;
- dbug(1, dprintf("Mlt_Ovl_Ind"));
- sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
- } /* all other signalling states */
- else if (Info_Number
- && plci->adapter->Info_Mask[plci->appl->Id - 1] & Info_Mask)
- {
- iesent = true;
- dbug(1, dprintf("Mlt_Std_Ind"));
- sendf(plci->appl, _INFO_I, Id, 0, "wS", Info_Number, Info_Element);
- }
- }
- return iesent;
-}
-
-static void SendSSExtInd(APPL *appl, PLCI *plci, dword Id, byte **parms)
-{
- word i;
- /* Format of multi_ssext_parms[i][]:
- 0 byte length
- 1 byte SSEXTIE
- 2 byte SSEXT_REQ/SSEXT_IND
- 3 byte length
- 4 word SSExtCommand
- 6... Params
- */
- if (
- plci
- && plci->State
- && plci->Sig.Ind != NCR_FACILITY
- )
- for (i = 0; i < MAX_MULTI_IE; i++)
- {
- if (parms[i][0] < 6) continue;
- if (parms[i][2] == SSEXT_REQ) continue;
-
- if (appl)
- {
- parms[i][0] = 0; /* kill it */
- sendf(appl, _MANUFACTURER_I,
- Id,
- 0,
- "dwS",
- _DI_MANU_ID,
- _DI_SSEXT_CTRL,
- &parms[i][3]);
- }
- else if (plci->appl)
- {
- parms[i][0] = 0; /* kill it */
- sendf(plci->appl, _MANUFACTURER_I,
- Id,
- 0,
- "dwS",
- _DI_MANU_ID,
- _DI_SSEXT_CTRL,
- &parms[i][3]);
- }
- }
-};
-
-static void nl_ind(PLCI *plci)
-{
- byte ch;
- word ncci;
- dword Id;
- DIVA_CAPI_ADAPTER *a;
- word NCCIcode;
- APPL *APPLptr;
- word count;
- word Num;
- word i, ncpi_state;
- byte len, ncci_state;
- word msg;
- word info = 0;
- word fax_feature_bits;
- byte fax_send_edata_ack;
- static byte v120_header_buffer[2 + 3];
- static word fax_info[] = {
- 0, /* T30_SUCCESS */
- _FAX_NO_CONNECTION, /* T30_ERR_NO_DIS_RECEIVED */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_RESPONSE */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_RESPONSE */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_TOO_MANY_REPEATS */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_UNEXPECTED_MESSAGE */
- _FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DCN */
- _FAX_LOCAL_ABORT, /* T30_ERR_DTC_UNSUPPORTED */
- _FAX_TRAINING_ERROR, /* T30_ERR_ALL_RATES_FAILED */
- _FAX_TRAINING_ERROR, /* T30_ERR_TOO_MANY_TRAINS */
- _FAX_PARAMETER_ERROR, /* T30_ERR_RECEIVE_CORRUPTED */
- _FAX_REMOTE_ABORT, /* T30_ERR_UNEXPECTED_DISC */
- _FAX_LOCAL_ABORT, /* T30_ERR_APPLICATION_DISC */
- _FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_DIS */
- _FAX_LOCAL_ABORT, /* T30_ERR_INCOMPATIBLE_DCS */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_NO_COMMAND */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_COMMAND */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_COMMAND_TOO_LONG */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_TIMEOUT_RESPONSE_TOO_LONG */
- _FAX_NO_CONNECTION, /* T30_ERR_NOT_IDENTIFIED */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_SUPERVISORY_TIMEOUT */
- _FAX_PARAMETER_ERROR, /* T30_ERR_TOO_LONG_SCAN_LINE */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_MPS */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_PAGE_AFTER_CFR */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_FTT */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_EOM */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCS_AFTER_MPS */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_MCF */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_DCN_AFTER_RTN */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_CFR */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOP */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_EOM */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_RETRY_NO_MCF_AFTER_MPS */
- 0x331d, /* T30_ERR_SUB_SEP_UNSUPPORTED */
- 0x331e, /* T30_ERR_PWD_UNSUPPORTED */
- 0x331f, /* T30_ERR_SUB_SEP_PWD_UNSUPPORTED */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_INVALID_COMMAND_FRAME */
- _FAX_PARAMETER_ERROR, /* T30_ERR_UNSUPPORTED_PAGE_CODING */
- _FAX_PARAMETER_ERROR, /* T30_ERR_INVALID_PAGE_CODING */
- _FAX_REMOTE_REJECT, /* T30_ERR_INCOMPATIBLE_PAGE_CONFIG */
- _FAX_LOCAL_ABORT, /* T30_ERR_TIMEOUT_FROM_APPLICATION */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_NO_REACTION_ON_MARK */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_TRAINING_TIMEOUT */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_UNEXPECTED_V21 */
- _FAX_PROTOCOL_ERROR, /* T30_ERR_V34FAX_PRIMARY_CTS_ON */
- _FAX_LOCAL_ABORT, /* T30_ERR_V34FAX_TURNAROUND_POLLING */
- _FAX_LOCAL_ABORT /* T30_ERR_V34FAX_V8_INCOMPATIBILITY */
- };
-
- byte dtmf_code_buffer[CAPIDTMF_RECV_DIGIT_BUFFER_SIZE + 1];
-
-
- static word rtp_info[] = {
- GOOD, /* RTP_SUCCESS */
- 0x3600 /* RTP_ERR_SSRC_OR_PAYLOAD_CHANGE */
- };
-
- static dword udata_forwarding_table[0x100 / sizeof(dword)] =
- {
- 0x0020301e, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000
- };
-
- ch = plci->NL.IndCh;
- a = plci->adapter;
- ncci = a->ch_ncci[ch];
- Id = (((dword)(ncci ? ncci : ch)) << 16) | (((word) plci->Id) << 8) | a->Id;
- if (plci->tel) Id |= EXT_CONTROLLER;
- APPLptr = plci->appl;
- dbug(1, dprintf("NL_IND-Id(NL:0x%x)=0x%08lx,plci=%x,tel=%x,state=0x%x,ch=0x%x,chs=%d,Ind=%x",
- plci->NL.Id, Id, plci->Id, plci->tel, plci->State, ch, plci->channels, plci->NL.Ind & 0x0f));
-
- /* in the case if no connect_active_Ind was sent to the appl we wait for */
-
- if (plci->nl_remove_id)
- {
- plci->NL.RNR = 2; /* discard */
- dbug(1, dprintf("NL discard while remove pending"));
- return;
- }
- if ((plci->NL.Ind & 0x0f) == N_CONNECT)
- {
- if (plci->State == INC_DIS_PENDING
- || plci->State == OUTG_DIS_PENDING
- || plci->State == IDLE)
- {
- plci->NL.RNR = 2; /* discard */
- dbug(1, dprintf("discard n_connect"));
- return;
- }
- if (plci->State < INC_ACT_PENDING)
- {
- plci->NL.RNR = 1; /* flow control */
- channel_x_off(plci, ch, N_XON_CONNECT_IND);
- return;
- }
- }
-
- if (!APPLptr) /* no application or invalid data */
- { /* while reloading the DSP */
- dbug(1, dprintf("discard1"));
- plci->NL.RNR = 2;
- return;
- }
-
- if (((plci->NL.Ind & 0x0f) == N_UDATA)
- && (((plci->B2_prot != B2_SDLC) && ((plci->B1_resource == 17) || (plci->B1_resource == 18)))
- || (plci->B2_prot == 7)
- || (plci->B3_prot == 7)))
- {
- plci->ncpi_buffer[0] = 0;
-
- ncpi_state = plci->ncpi_state;
- if (plci->NL.complete == 1)
- {
- byte *data = &plci->NL.RBuffer->P[0];
-
- if ((plci->NL.RBuffer->length >= 12)
- && ((*data == DSP_UDATA_INDICATION_DCD_ON)
- || (*data == DSP_UDATA_INDICATION_CTS_ON)))
- {
- word conn_opt, ncpi_opt = 0x00;
-/* HexDump ("MDM N_UDATA:", plci->NL.RBuffer->length, data); */
-
- if (*data == DSP_UDATA_INDICATION_DCD_ON)
- plci->ncpi_state |= NCPI_MDM_DCD_ON_RECEIVED;
- if (*data == DSP_UDATA_INDICATION_CTS_ON)
- plci->ncpi_state |= NCPI_MDM_CTS_ON_RECEIVED;
-
- data++; /* indication code */
- data += 2; /* timestamp */
- if ((*data == DSP_CONNECTED_NORM_V18) || (*data == DSP_CONNECTED_NORM_VOWN))
- ncpi_state &= ~(NCPI_MDM_DCD_ON_RECEIVED | NCPI_MDM_CTS_ON_RECEIVED);
- data++; /* connected norm */
- conn_opt = GET_WORD(data);
- data += 2; /* connected options */
-
- PUT_WORD(&(plci->ncpi_buffer[1]), (word)(GET_DWORD(data) & 0x0000FFFF));
-
- if (conn_opt & DSP_CONNECTED_OPTION_MASK_V42)
- {
- ncpi_opt |= MDM_NCPI_ECM_V42;
- }
- else if (conn_opt & DSP_CONNECTED_OPTION_MASK_MNP)
- {
- ncpi_opt |= MDM_NCPI_ECM_MNP;
- }
- else
- {
- ncpi_opt |= MDM_NCPI_TRANSPARENT;
- }
- if (conn_opt & DSP_CONNECTED_OPTION_MASK_COMPRESSION)
- {
- ncpi_opt |= MDM_NCPI_COMPRESSED;
- }
- PUT_WORD(&(plci->ncpi_buffer[3]), ncpi_opt);
- plci->ncpi_buffer[0] = 4;
-
- plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND | NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
- }
- }
- if (plci->B3_prot == 7)
- {
- if (((a->ncci_state[ncci] == INC_ACT_PENDING) || (a->ncci_state[ncci] == OUTG_CON_PENDING))
- && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- a->ncci_state[ncci] = INC_ACT_PENDING;
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
- }
-
- if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
- & ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
- || !(ncpi_state & NCPI_MDM_DCD_ON_RECEIVED)
- || !(ncpi_state & NCPI_MDM_CTS_ON_RECEIVED))
-
- {
- plci->NL.RNR = 2;
- return;
- }
- }
-
- if (plci->NL.complete == 2)
- {
- if (((plci->NL.Ind & 0x0f) == N_UDATA)
- && !(udata_forwarding_table[plci->RData[0].P[0] >> 5] & (1L << (plci->RData[0].P[0] & 0x1f))))
- {
- switch (plci->RData[0].P[0])
- {
-
- case DTMF_UDATA_INDICATION_FAX_CALLING_TONE:
- if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01X");
- break;
- case DTMF_UDATA_INDICATION_ANSWER_TONE:
- if (plci->dtmf_rec_active & DTMF_LISTEN_ACTIVE_FLAG)
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", SELECTOR_DTMF, "\x01Y");
- break;
- case DTMF_UDATA_INDICATION_DIGITS_RECEIVED:
- dtmf_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
- break;
- case DTMF_UDATA_INDICATION_DIGITS_SENT:
- dtmf_confirmation(Id, plci);
- break;
-
-
- case UDATA_INDICATION_MIXER_TAP_DATA:
- capidtmf_recv_process_block(&(plci->capidtmf_state), plci->RData[0].P + 1, (word)(plci->RData[0].PLength - 1));
- i = capidtmf_indication(&(plci->capidtmf_state), dtmf_code_buffer + 1);
- if (i != 0)
- {
- dtmf_code_buffer[0] = DTMF_UDATA_INDICATION_DIGITS_RECEIVED;
- dtmf_indication(Id, plci, dtmf_code_buffer, (word)(i + 1));
- }
- break;
-
-
- case UDATA_INDICATION_MIXER_COEFS_SET:
- mixer_indication_coefs_set(Id, plci);
- break;
- case UDATA_INDICATION_XCONNECT_FROM:
- mixer_indication_xconnect_from(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
- break;
- case UDATA_INDICATION_XCONNECT_TO:
- mixer_indication_xconnect_to(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
- break;
-
-
- case LEC_UDATA_INDICATION_DISABLE_DETECT:
- ec_indication(Id, plci, plci->RData[0].P, plci->RData[0].PLength);
- break;
-
-
-
- default:
- break;
- }
- }
- else
- {
- if ((plci->RData[0].PLength != 0)
- && ((plci->B2_prot == B2_V120_ASYNC)
- || (plci->B2_prot == B2_V120_ASYNC_V42BIS)
- || (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
- {
-
- sendf(plci->appl, _DATA_B3_I, Id, 0,
- "dwww",
- plci->RData[1].P,
- (plci->NL.RNum < 2) ? 0 : plci->RData[1].PLength,
- plci->RNum,
- plci->RFlags);
-
- }
- else
- {
-
- sendf(plci->appl, _DATA_B3_I, Id, 0,
- "dwww",
- plci->RData[0].P,
- plci->RData[0].PLength,
- plci->RNum,
- plci->RFlags);
-
- }
- }
- return;
- }
-
- fax_feature_bits = 0;
- if ((plci->NL.Ind & 0x0f) == N_CONNECT ||
- (plci->NL.Ind & 0x0f) == N_CONNECT_ACK ||
- (plci->NL.Ind & 0x0f) == N_DISC ||
- (plci->NL.Ind & 0x0f) == N_EDATA ||
- (plci->NL.Ind & 0x0f) == N_DISC_ACK)
- {
- info = 0;
- plci->ncpi_buffer[0] = 0;
- switch (plci->B3_prot) {
- case 0: /*XPARENT*/
- case 1: /*T.90 NL*/
- break; /* no network control protocol info - jfr */
- case 2: /*ISO8202*/
- case 3: /*X25 DCE*/
- for (i = 0; i < plci->NL.RLength; i++) plci->ncpi_buffer[4 + i] = plci->NL.RBuffer->P[i];
- plci->ncpi_buffer[0] = (byte)(i + 3);
- plci->ncpi_buffer[1] = (byte)(plci->NL.Ind & N_D_BIT ? 1 : 0);
- plci->ncpi_buffer[2] = 0;
- plci->ncpi_buffer[3] = 0;
- break;
- case 4: /*T.30 - FAX*/
- case 5: /*T.30 - FAX*/
- if (plci->NL.RLength >= sizeof(T30_INFO))
- {
- dbug(1, dprintf("FaxStatus %04x", ((T30_INFO *)plci->NL.RBuffer->P)->code));
- len = 9;
- PUT_WORD(&(plci->ncpi_buffer[1]), ((T30_INFO *)plci->NL.RBuffer->P)->rate_div_2400 * 2400);
- fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
- i = (((T30_INFO *)plci->NL.RBuffer->P)->resolution & T30_RESOLUTION_R8_0770_OR_200) ? 0x0001 : 0x0000;
- if (plci->B3_prot == 5)
- {
- if (!(fax_feature_bits & T30_FEATURE_BIT_ECM))
- i |= 0x8000; /* This is not an ECM connection */
- if (fax_feature_bits & T30_FEATURE_BIT_T6_CODING)
- i |= 0x4000; /* This is a connection with MMR compression */
- if (fax_feature_bits & T30_FEATURE_BIT_2D_CODING)
- i |= 0x2000; /* This is a connection with MR compression */
- if (fax_feature_bits & T30_FEATURE_BIT_MORE_DOCUMENTS)
- i |= 0x0004; /* More documents */
- if (fax_feature_bits & T30_FEATURE_BIT_POLLING)
- i |= 0x0002; /* Fax-polling indication */
- }
- dbug(1, dprintf("FAX Options %04x %04x", fax_feature_bits, i));
- PUT_WORD(&(plci->ncpi_buffer[3]), i);
- PUT_WORD(&(plci->ncpi_buffer[5]), ((T30_INFO *)plci->NL.RBuffer->P)->data_format);
- plci->ncpi_buffer[7] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_low;
- plci->ncpi_buffer[8] = ((T30_INFO *)plci->NL.RBuffer->P)->pages_high;
- plci->ncpi_buffer[len] = 0;
- if (((T30_INFO *)plci->NL.RBuffer->P)->station_id_len)
- {
- plci->ncpi_buffer[len] = 20;
- for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
- plci->ncpi_buffer[++len] = ((T30_INFO *)plci->NL.RBuffer->P)->station_id[i];
- }
- if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
- {
- if (((T30_INFO *)plci->NL.RBuffer->P)->code < ARRAY_SIZE(fax_info))
- info = fax_info[((T30_INFO *)plci->NL.RBuffer->P)->code];
- else
- info = _FAX_PROTOCOL_ERROR;
- }
-
- if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
- & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
- {
- i = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + ((T30_INFO *)plci->NL.RBuffer->P)->head_line_len;
- while (i < plci->NL.RBuffer->length)
- plci->ncpi_buffer[++len] = plci->NL.RBuffer->P[i++];
- }
-
- plci->ncpi_buffer[0] = len;
- fax_feature_bits = GET_WORD(&((T30_INFO *)plci->NL.RBuffer->P)->feature_bits_low);
- PUT_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->feature_bits_low, fax_feature_bits);
-
- plci->ncpi_state |= NCPI_VALID_CONNECT_B3_IND;
- if (((plci->NL.Ind & 0x0f) == N_CONNECT_ACK)
- || (((plci->NL.Ind & 0x0f) == N_CONNECT)
- && (fax_feature_bits & T30_FEATURE_BIT_POLLING))
- || (((plci->NL.Ind & 0x0f) == N_EDATA)
- && ((((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_TRAIN_OK)
- || (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
- || (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DTC))))
- {
- plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT;
- }
- if (((plci->NL.Ind & 0x0f) == N_DISC)
- || ((plci->NL.Ind & 0x0f) == N_DISC_ACK)
- || (((plci->NL.Ind & 0x0f) == N_EDATA)
- && (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_EOP_CAPI)))
- {
- plci->ncpi_state |= NCPI_VALID_CONNECT_B3_ACT | NCPI_VALID_DISC_B3_IND;
- }
- }
- break;
-
- case B3_RTP:
- if (((plci->NL.Ind & 0x0f) == N_DISC) || ((plci->NL.Ind & 0x0f) == N_DISC_ACK))
- {
- if (plci->NL.RLength != 0)
- {
- info = rtp_info[plci->NL.RBuffer->P[0]];
- plci->ncpi_buffer[0] = plci->NL.RLength - 1;
- for (i = 1; i < plci->NL.RLength; i++)
- plci->ncpi_buffer[i] = plci->NL.RBuffer->P[i];
- }
- }
- break;
-
- }
- plci->NL.RNR = 2;
- }
- switch (plci->NL.Ind & 0x0f) {
- case N_EDATA:
- if ((plci->B3_prot == 4) || (plci->B3_prot == 5))
- {
- dbug(1, dprintf("EDATA ncci=0x%x state=%d code=%02x", ncci, a->ncci_state[ncci],
- ((T30_INFO *)plci->NL.RBuffer->P)->code));
- fax_send_edata_ack = (((T30_INFO *)(plci->fax_connect_info_buffer))->operating_mode == T30_OPERATING_MODE_CAPI_NEG);
-
- if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
- && (plci->nsf_control_bits & (T30_NSF_CONTROL_BIT_NEGOTIATE_IND | T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
- && (((T30_INFO *)plci->NL.RBuffer->P)->code == EDATA_T30_DIS)
- && (a->ncci_state[ncci] == OUTG_CON_PENDING)
- && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_NEGOTIATE_B3_SENT))
- {
- ((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
- sendf(plci->appl, _MANUFACTURER_I, Id, 0, "dwbS", _DI_MANU_ID, _DI_NEGOTIATE_B3,
- (byte)(plci->ncpi_buffer[0] + 1), plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_NEGOTIATE_B3_SENT;
- if (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP)
- fax_send_edata_ack = false;
- }
-
- if (a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
- {
- switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
- {
- case EDATA_T30_DIS:
- if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
- && !(GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) & T30_CONTROL_BIT_REQUEST_POLLING)
- && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- a->ncci_state[ncci] = INC_ACT_PENDING;
- if (plci->B3_prot == 4)
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- else
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
- break;
-
- case EDATA_T30_TRAIN_OK:
- if ((a->ncci_state[ncci] == INC_ACT_PENDING)
- && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- if (plci->B3_prot == 4)
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- else
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
- break;
-
- case EDATA_T30_EOP_CAPI:
- if (a->ncci_state[ncci] == CONNECTED)
- {
- sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", GOOD, plci->ncpi_buffer);
- a->ncci_state[ncci] = INC_DIS_PENDING;
- plci->ncpi_state = 0;
- fax_send_edata_ack = false;
- }
- break;
- }
- }
- else
- {
- switch (((T30_INFO *)plci->NL.RBuffer->P)->code)
- {
- case EDATA_T30_TRAIN_OK:
- if ((a->ncci_state[ncci] == INC_ACT_PENDING)
- && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- if (plci->B3_prot == 4)
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- else
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
- break;
- }
- }
- if (fax_send_edata_ack)
- {
- ((T30_INFO *)(plci->fax_connect_info_buffer))->code = ((T30_INFO *)plci->NL.RBuffer->P)->code;
- plci->fax_edata_ack_length = 1;
- start_internal_command(Id, plci, fax_edata_ack_command);
- }
- }
- else
- {
- dbug(1, dprintf("EDATA ncci=0x%x state=%d", ncci, a->ncci_state[ncci]));
- }
- break;
- case N_CONNECT:
- if (!a->ch_ncci[ch])
- {
- ncci = get_ncci(plci, ch, 0);
- Id = (Id & 0xffff) | (((dword) ncci) << 16);
- }
- dbug(1, dprintf("N_CONNECT: ch=%d state=%d plci=%lx plci_Id=%lx plci_State=%d",
- ch, a->ncci_state[ncci], a->ncci_plci[ncci], plci->Id, plci->State));
-
- msg = _CONNECT_B3_I;
- if (a->ncci_state[ncci] == IDLE)
- plci->channels++;
- else if (plci->B3_prot == 1)
- msg = _CONNECT_B3_T90_ACTIVE_I;
-
- a->ncci_state[ncci] = INC_CON_PENDING;
- if (plci->B3_prot == 4)
- sendf(plci->appl, msg, Id, 0, "s", "");
- else
- sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
- break;
- case N_CONNECT_ACK:
- dbug(1, dprintf("N_connect_Ack"));
- if (plci->internal_command_queue[0]
- && ((plci->adjust_b_state == ADJUST_B_CONNECT_2)
- || (plci->adjust_b_state == ADJUST_B_CONNECT_3)
- || (plci->adjust_b_state == ADJUST_B_CONNECT_4)))
- {
- (*(plci->internal_command_queue[0]))(Id, plci, 0);
- if (!plci->internal_command)
- next_internal_command(Id, plci);
- break;
- }
- msg = _CONNECT_B3_ACTIVE_I;
- if (plci->B3_prot == 1)
- {
- if (a->ncci_state[ncci] != OUTG_CON_PENDING)
- msg = _CONNECT_B3_T90_ACTIVE_I;
- a->ncci_state[ncci] = INC_ACT_PENDING;
- sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
- }
- else if ((plci->B3_prot == 4) || (plci->B3_prot == 5) || (plci->B3_prot == 7))
- {
- if ((a->ncci_state[ncci] == OUTG_CON_PENDING)
- && (plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- a->ncci_state[ncci] = INC_ACT_PENDING;
- if (plci->B3_prot == 4)
- sendf(plci->appl, msg, Id, 0, "s", "");
- else
- sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
- }
- else
- {
- a->ncci_state[ncci] = INC_ACT_PENDING;
- sendf(plci->appl, msg, Id, 0, "S", plci->ncpi_buffer);
- }
- if (plci->adjust_b_restore)
- {
- plci->adjust_b_restore = false;
- start_internal_command(Id, plci, adjust_b_restore);
- }
- break;
- case N_DISC:
- case N_DISC_ACK:
- if (plci->internal_command_queue[0]
- && ((plci->internal_command == FAX_DISCONNECT_COMMAND_1)
- || (plci->internal_command == FAX_DISCONNECT_COMMAND_2)
- || (plci->internal_command == FAX_DISCONNECT_COMMAND_3)))
- {
- (*(plci->internal_command_queue[0]))(Id, plci, 0);
- if (!plci->internal_command)
- next_internal_command(Id, plci);
- }
- ncci_state = a->ncci_state[ncci];
- ncci_remove(plci, ncci, false);
-
- /* with N_DISC or N_DISC_ACK the IDI frees the respective */
- /* channel, so we cannot store the state in ncci_state! The */
- /* information which channel we received a N_DISC is thus */
- /* stored in the inc_dis_ncci_table buffer. */
- for (i = 0; plci->inc_dis_ncci_table[i]; i++);
- plci->inc_dis_ncci_table[i] = (byte) ncci;
-
- /* need a connect_b3_ind before a disconnect_b3_ind with FAX */
- if (!plci->channels
- && (plci->B1_resource == 16)
- && (plci->State <= CONNECTED))
- {
- len = 9;
- i = ((T30_INFO *)plci->fax_connect_info_buffer)->rate_div_2400 * 2400;
- PUT_WORD(&plci->ncpi_buffer[1], i);
- PUT_WORD(&plci->ncpi_buffer[3], 0);
- i = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
- PUT_WORD(&plci->ncpi_buffer[5], i);
- PUT_WORD(&plci->ncpi_buffer[7], 0);
- plci->ncpi_buffer[len] = 0;
- plci->ncpi_buffer[0] = len;
- if (plci->B3_prot == 4)
- sendf(plci->appl, _CONNECT_B3_I, Id, 0, "s", "");
- else
- {
-
- if ((plci->requested_options_conn | plci->requested_options | a->requested_options_table[plci->appl->Id - 1])
- & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
- {
- plci->ncpi_buffer[++len] = 0;
- plci->ncpi_buffer[++len] = 0;
- plci->ncpi_buffer[++len] = 0;
- plci->ncpi_buffer[0] = len;
- }
-
- sendf(plci->appl, _CONNECT_B3_I, Id, 0, "S", plci->ncpi_buffer);
- }
- sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
- plci->ncpi_state = 0;
- sig_req(plci, HANGUP, 0);
- send_req(plci);
- plci->State = OUTG_DIS_PENDING;
- /* disc here */
- }
- else if ((a->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
- && ((plci->B3_prot == 4) || (plci->B3_prot == 5))
- && ((ncci_state == INC_DIS_PENDING) || (ncci_state == IDLE)))
- {
- if (ncci_state == IDLE)
- {
- if (plci->channels)
- plci->channels--;
- if ((plci->State == IDLE || plci->State == SUSPENDING) && !plci->channels) {
- if (plci->State == SUSPENDING) {
- sendf(plci->appl,
- _FACILITY_I,
- Id & 0xffffL,
- 0,
- "ws", (word)3, "\x03\x04\x00\x00");
- sendf(plci->appl, _DISCONNECT_I, Id & 0xffffL, 0, "w", 0);
- }
- plci_remove(plci);
- plci->State = IDLE;
- }
- }
- }
- else if (plci->channels)
- {
- sendf(plci->appl, _DISCONNECT_B3_I, Id, 0, "wS", info, plci->ncpi_buffer);
- plci->ncpi_state = 0;
- if ((ncci_state == OUTG_REJ_PENDING)
- && ((plci->B3_prot != B3_T90NL) && (plci->B3_prot != B3_ISO8208) && (plci->B3_prot != B3_X25_DCE)))
- {
- sig_req(plci, HANGUP, 0);
- send_req(plci);
- plci->State = OUTG_DIS_PENDING;
- }
- }
- break;
- case N_RESET:
- a->ncci_state[ncci] = INC_RES_PENDING;
- sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
- break;
- case N_RESET_ACK:
- a->ncci_state[ncci] = CONNECTED;
- sendf(plci->appl, _RESET_B3_I, Id, 0, "S", plci->ncpi_buffer);
- break;
-
- case N_UDATA:
- if (!(udata_forwarding_table[plci->NL.RBuffer->P[0] >> 5] & (1L << (plci->NL.RBuffer->P[0] & 0x1f))))
- {
- plci->RData[0].P = plci->internal_ind_buffer + (-((int)(long)(plci->internal_ind_buffer)) & 3);
- plci->RData[0].PLength = INTERNAL_IND_BUFFER_SIZE;
- plci->NL.R = plci->RData;
- plci->NL.RNum = 1;
- return;
- }
- /* fall through */
- case N_BDATA:
- case N_DATA:
- if (((a->ncci_state[ncci] != CONNECTED) && (plci->B2_prot == 1)) /* transparent */
- || (a->ncci_state[ncci] == IDLE)
- || (a->ncci_state[ncci] == INC_DIS_PENDING))
- {
- plci->NL.RNR = 2;
- break;
- }
- if ((a->ncci_state[ncci] != CONNECTED)
- && (a->ncci_state[ncci] != OUTG_DIS_PENDING)
- && (a->ncci_state[ncci] != OUTG_REJ_PENDING))
- {
- dbug(1, dprintf("flow control"));
- plci->NL.RNR = 1; /* flow control */
- channel_x_off(plci, ch, 0);
- break;
- }
-
- NCCIcode = ncci | (((word)a->Id) << 8);
-
- /* count all buffers within the Application pool */
- /* belonging to the same NCCI. If this is below the */
- /* number of buffers available per NCCI we accept */
- /* this packet, otherwise we reject it */
- count = 0;
- Num = 0xffff;
- for (i = 0; i < APPLptr->MaxBuffer; i++) {
- if (NCCIcode == APPLptr->DataNCCI[i]) count++;
- if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
- }
-
- if (count >= APPLptr->MaxNCCIData || Num == 0xffff)
- {
- dbug(3, dprintf("Flow-Control"));
- plci->NL.RNR = 1;
- if (++(APPLptr->NCCIDataFlowCtrlTimer) >=
- (word)((a->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL) ? 40 : 2000))
- {
- plci->NL.RNR = 2;
- dbug(3, dprintf("DiscardData"));
- } else {
- channel_x_off(plci, ch, 0);
- }
- break;
- }
- else
- {
- APPLptr->NCCIDataFlowCtrlTimer = 0;
- }
-
- plci->RData[0].P = ReceiveBufferGet(APPLptr, Num);
- if (!plci->RData[0].P) {
- plci->NL.RNR = 1;
- channel_x_off(plci, ch, 0);
- break;
- }
-
- APPLptr->DataNCCI[Num] = NCCIcode;
- APPLptr->DataFlags[Num] = (plci->Id << 8) | (plci->NL.Ind >> 4);
- dbug(3, dprintf("Buffer(%d), Max = %d", Num, APPLptr->MaxBuffer));
-
- plci->RNum = Num;
- plci->RFlags = plci->NL.Ind >> 4;
- plci->RData[0].PLength = APPLptr->MaxDataLength;
- plci->NL.R = plci->RData;
- if ((plci->NL.RLength != 0)
- && ((plci->B2_prot == B2_V120_ASYNC)
- || (plci->B2_prot == B2_V120_ASYNC_V42BIS)
- || (plci->B2_prot == B2_V120_BIT_TRANSPARENT)))
- {
- plci->RData[1].P = plci->RData[0].P;
- plci->RData[1].PLength = plci->RData[0].PLength;
- plci->RData[0].P = v120_header_buffer + (-((unsigned long)v120_header_buffer) & 3);
- if ((plci->NL.RBuffer->P[0] & V120_HEADER_EXTEND_BIT) || (plci->NL.RLength == 1))
- plci->RData[0].PLength = 1;
- else
- plci->RData[0].PLength = 2;
- if (plci->NL.RBuffer->P[0] & V120_HEADER_BREAK_BIT)
- plci->RFlags |= 0x0010;
- if (plci->NL.RBuffer->P[0] & (V120_HEADER_C1_BIT | V120_HEADER_C2_BIT))
- plci->RFlags |= 0x8000;
- plci->NL.RNum = 2;
- }
- else
- {
- if ((plci->NL.Ind & 0x0f) == N_UDATA)
- plci->RFlags |= 0x0010;
-
- else if ((plci->B3_prot == B3_RTP) && ((plci->NL.Ind & 0x0f) == N_BDATA))
- plci->RFlags |= 0x0001;
-
- plci->NL.RNum = 1;
- }
- break;
- case N_DATA_ACK:
- data_ack(plci, ch);
- break;
- default:
- plci->NL.RNR = 2;
- break;
- }
-}
-
-/*------------------------------------------------------------------*/
-/* find a free PLCI */
-/*------------------------------------------------------------------*/
-
-static word get_plci(DIVA_CAPI_ADAPTER *a)
-{
- word i, j;
- PLCI *plci;
-
- for (i = 0; i < a->max_plci && a->plci[i].Id; i++);
- if (i == a->max_plci) {
- dbug(1, dprintf("get_plci: out of PLCIs"));
- return 0;
- }
- plci = &a->plci[i];
- plci->Id = (byte)(i + 1);
-
- plci->Sig.Id = 0;
- plci->NL.Id = 0;
- plci->sig_req = 0;
- plci->nl_req = 0;
-
- plci->appl = NULL;
- plci->relatedPTYPLCI = NULL;
- plci->State = IDLE;
- plci->SuppState = IDLE;
- plci->channels = 0;
- plci->tel = 0;
- plci->B1_resource = 0;
- plci->B2_prot = 0;
- plci->B3_prot = 0;
-
- plci->command = 0;
- plci->m_command = 0;
- init_internal_command_queue(plci);
- plci->number = 0;
- plci->req_in_start = 0;
- plci->req_in = 0;
- plci->req_out = 0;
- plci->msg_in_write_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_read_pos = MSG_IN_QUEUE_SIZE;
- plci->msg_in_wrap_pos = MSG_IN_QUEUE_SIZE;
-
- plci->data_sent = false;
- plci->send_disc = 0;
- plci->sig_global_req = 0;
- plci->sig_remove_id = 0;
- plci->nl_global_req = 0;
- plci->nl_remove_id = 0;
- plci->adv_nl = 0;
- plci->manufacturer = false;
- plci->call_dir = CALL_DIR_OUT | CALL_DIR_ORIGINATE;
- plci->spoofed_msg = 0;
- plci->ptyState = 0;
- plci->cr_enquiry = false;
- plci->hangup_flow_ctrl_timer = 0;
-
- plci->ncci_ring_list = 0;
- for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0;
- bitmap_zero(plci->c_ind_mask_table, MAX_APPL);
- bitmap_fill(plci->group_optimization_mask_table, MAX_APPL);
- plci->fax_connect_info_length = 0;
- plci->nsf_control_bits = 0;
- plci->ncpi_state = 0x00;
- plci->ncpi_buffer[0] = 0;
-
- plci->requested_options_conn = 0;
- plci->requested_options = 0;
- plci->notifiedcall = 0;
- plci->vswitchstate = 0;
- plci->vsprot = 0;
- plci->vsprotdialect = 0;
- init_b1_config(plci);
- dbug(1, dprintf("get_plci(%x)", plci->Id));
- return i + 1;
-}
-
-/*------------------------------------------------------------------*/
-/* put a parameter in the parameter buffer */
-/*------------------------------------------------------------------*/
-
-static void add_p(PLCI *plci, byte code, byte *p)
-{
- word p_length;
-
- p_length = 0;
- if (p) p_length = p[0];
- add_ie(plci, code, p, p_length);
-}
-
-/*------------------------------------------------------------------*/
-/* put a structure in the parameter buffer */
-/*------------------------------------------------------------------*/
-static void add_s(PLCI *plci, byte code, API_PARSE *p)
-{
- if (p) add_ie(plci, code, p->info, (word)p->length);
-}
-
-/*------------------------------------------------------------------*/
-/* put multiple structures in the parameter buffer */
-/*------------------------------------------------------------------*/
-static void add_ss(PLCI *plci, byte code, API_PARSE *p)
-{
- byte i;
-
- if (p) {
- dbug(1, dprintf("add_ss(%x,len=%d)", code, p->length));
- for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
- dbug(1, dprintf("add_ss_ie(%x,len=%d)", p->info[i - 1], p->info[i]));
- add_ie(plci, p->info[i - 1], (byte *)&(p->info[i]), (word)p->info[i]);
- }
- }
-}
-
-/*------------------------------------------------------------------*/
-/* return the channel number sent by the application in a esc_chi */
-/*------------------------------------------------------------------*/
-static byte getChannel(API_PARSE *p)
-{
- byte i;
-
- if (p) {
- for (i = 2; i < (byte)p->length; i += p->info[i] + 2) {
- if (p->info[i] == 2) {
- if (p->info[i - 1] == ESC && p->info[i + 1] == CHI) return (p->info[i + 2]);
- }
- }
- }
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/* put an information element in the parameter buffer */
-/*------------------------------------------------------------------*/
-
-static void add_ie(PLCI *plci, byte code, byte *p, word p_length)
-{
- word i;
-
- if (!(code & 0x80) && !p_length) return;
-
- if (plci->req_in == plci->req_in_start) {
- plci->req_in += 2;
- }
- else {
- plci->req_in--;
- }
- plci->RBuffer[plci->req_in++] = code;
-
- if (p) {
- plci->RBuffer[plci->req_in++] = (byte)p_length;
- for (i = 0; i < p_length; i++) plci->RBuffer[plci->req_in++] = p[1 + i];
- }
-
- plci->RBuffer[plci->req_in++] = 0;
-}
-
-/*------------------------------------------------------------------*/
-/* put a unstructured data into the buffer */
-/*------------------------------------------------------------------*/
-
-static void add_d(PLCI *plci, word length, byte *p)
-{
- word i;
-
- if (plci->req_in == plci->req_in_start) {
- plci->req_in += 2;
- }
- else {
- plci->req_in--;
- }
- for (i = 0; i < length; i++) plci->RBuffer[plci->req_in++] = p[i];
-}
-
-/*------------------------------------------------------------------*/
-/* put parameters from the Additional Info parameter in the */
-/* parameter buffer */
-/*------------------------------------------------------------------*/
-
-static void add_ai(PLCI *plci, API_PARSE *ai)
-{
- word i;
- API_PARSE ai_parms[5];
-
- for (i = 0; i < 5; i++) ai_parms[i].length = 0;
-
- if (!ai->length)
- return;
- if (api_parse(&ai->info[1], (word)ai->length, "ssss", ai_parms))
- return;
-
- add_s(plci, KEY, &ai_parms[1]);
- add_s(plci, UUI, &ai_parms[2]);
- add_ss(plci, FTY, &ai_parms[3]);
-}
-
-/*------------------------------------------------------------------*/
-/* put parameter for b1 protocol in the parameter buffer */
-/*------------------------------------------------------------------*/
-
-static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
- word b1_facilities)
-{
- API_PARSE bp_parms[8];
- API_PARSE mdm_cfg[9];
- API_PARSE global_config[2];
- byte cai[256];
- byte resource[] = {5, 9, 13, 12, 16, 39, 9, 17, 17, 18};
- byte voice_cai[] = "\x06\x14\x00\x00\x00\x00\x08";
- word i;
-
- API_PARSE mdm_cfg_v18[4];
- word j, n, w;
- dword d;
-
-
- for (i = 0; i < 8; i++) bp_parms[i].length = 0;
- for (i = 0; i < 2; i++) global_config[i].length = 0;
-
- dbug(1, dprintf("add_b1"));
- api_save_msg(bp, "s", &plci->B_protocol);
-
- if (b_channel_info == 2) {
- plci->B1_resource = 0;
- adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
- add_p(plci, CAI, "\x01\x00");
- dbug(1, dprintf("Cai=1,0 (no resource)"));
- return 0;
- }
-
- if (plci->tel == CODEC_PERMANENT) return 0;
- else if (plci->tel == CODEC) {
- plci->B1_resource = 1;
- adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
- add_p(plci, CAI, "\x01\x01");
- dbug(1, dprintf("Cai=1,1 (Codec)"));
- return 0;
- }
- else if (plci->tel == ADV_VOICE) {
- plci->B1_resource = add_b1_facilities(plci, 9, (word)(b1_facilities | B1_FACILITY_VOICE));
- adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities | B1_FACILITY_VOICE));
- voice_cai[1] = plci->B1_resource;
- PUT_WORD(&voice_cai[5], plci->appl->MaxDataLength);
- add_p(plci, CAI, voice_cai);
- dbug(1, dprintf("Cai=1,0x%x (AdvVoice)", voice_cai[1]));
- return 0;
- }
- plci->call_dir &= ~(CALL_DIR_ORIGINATE | CALL_DIR_ANSWER);
- if (plci->call_dir & CALL_DIR_OUT)
- plci->call_dir |= CALL_DIR_ORIGINATE;
- else if (plci->call_dir & CALL_DIR_IN)
- plci->call_dir |= CALL_DIR_ANSWER;
-
- if (!bp->length) {
- plci->B1_resource = 0x5;
- adjust_b1_facilities(plci, plci->B1_resource, b1_facilities);
- add_p(plci, CAI, "\x01\x05");
- return 0;
- }
-
- dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
- if (bp->length > 256) return _WRONG_MESSAGE_FORMAT;
- if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
- {
- bp_parms[6].length = 0;
- if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
- {
- dbug(1, dprintf("b-form.!"));
- return _WRONG_MESSAGE_FORMAT;
- }
- }
- else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
- {
- dbug(1, dprintf("b-form.!"));
- return _WRONG_MESSAGE_FORMAT;
- }
-
- if (bp_parms[6].length)
- {
- if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
- {
- return _WRONG_MESSAGE_FORMAT;
- }
- switch (GET_WORD(global_config[0].info))
- {
- case 1:
- plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
- break;
- case 2:
- plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
- break;
- }
- }
- dbug(1, dprintf("call_dir=%04x", plci->call_dir));
-
-
- if ((GET_WORD(bp_parms[0].info) == B1_RTP)
- && (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
- {
- plci->B1_resource = add_b1_facilities(plci, 31, (word)(b1_facilities & ~B1_FACILITY_VOICE));
- adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
- cai[1] = plci->B1_resource;
- cai[2] = 0;
- cai[3] = 0;
- cai[4] = 0;
- PUT_WORD(&cai[5], plci->appl->MaxDataLength);
- for (i = 0; i < bp_parms[3].length; i++)
- cai[7 + i] = bp_parms[3].info[1 + i];
- cai[0] = 6 + bp_parms[3].length;
- add_p(plci, CAI, cai);
- return 0;
- }
-
-
- if ((GET_WORD(bp_parms[0].info) == B1_PIAFS)
- && (plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))
- {
- plci->B1_resource = add_b1_facilities(plci, 35/* PIAFS HARDWARE FACILITY */, (word)(b1_facilities & ~B1_FACILITY_VOICE));
- adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
- cai[1] = plci->B1_resource;
- cai[2] = 0;
- cai[3] = 0;
- cai[4] = 0;
- PUT_WORD(&cai[5], plci->appl->MaxDataLength);
- cai[0] = 6;
- add_p(plci, CAI, cai);
- return 0;
- }
-
-
- if ((GET_WORD(bp_parms[0].info) >= 32)
- || (!((1L << GET_WORD(bp_parms[0].info)) & plci->adapter->profile.B1_Protocols)
- && ((GET_WORD(bp_parms[0].info) != 3)
- || !((1L << B1_HDLC) & plci->adapter->profile.B1_Protocols)
- || ((bp_parms[3].length != 0) && (GET_WORD(&bp_parms[3].info[1]) != 0) && (GET_WORD(&bp_parms[3].info[1]) != 56000)))))
- {
- return _B1_NOT_SUPPORTED;
- }
- plci->B1_resource = add_b1_facilities(plci, resource[GET_WORD(bp_parms[0].info)],
- (word)(b1_facilities & ~B1_FACILITY_VOICE));
- adjust_b1_facilities(plci, plci->B1_resource, (word)(b1_facilities & ~B1_FACILITY_VOICE));
- cai[0] = 6;
- cai[1] = plci->B1_resource;
- for (i = 2; i < sizeof(cai); i++) cai[i] = 0;
-
- if ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
- || (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
- || (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC))
- { /* B1 - modem */
- for (i = 0; i < 7; i++) mdm_cfg[i].length = 0;
-
- if (bp_parms[3].length)
- {
- if (api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwww", mdm_cfg))
- {
- return (_WRONG_MESSAGE_FORMAT);
- }
-
- cai[2] = 0; /* Bit rate for adaptation */
-
- dbug(1, dprintf("MDM Max Bit Rate:<%d>", GET_WORD(mdm_cfg[0].info)));
-
- PUT_WORD(&cai[13], 0); /* Min Tx speed */
- PUT_WORD(&cai[15], GET_WORD(mdm_cfg[0].info)); /* Max Tx speed */
- PUT_WORD(&cai[17], 0); /* Min Rx speed */
- PUT_WORD(&cai[19], GET_WORD(mdm_cfg[0].info)); /* Max Rx speed */
-
- cai[3] = 0; /* Async framing parameters */
- switch (GET_WORD(mdm_cfg[2].info))
- { /* Parity */
- case 1: /* odd parity */
- cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
- dbug(1, dprintf("MDM: odd parity"));
- break;
-
- case 2: /* even parity */
- cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
- dbug(1, dprintf("MDM: even parity"));
- break;
-
- default:
- dbug(1, dprintf("MDM: no parity"));
- break;
- }
-
- switch (GET_WORD(mdm_cfg[3].info))
- { /* stop bits */
- case 1: /* 2 stop bits */
- cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
- dbug(1, dprintf("MDM: 2 stop bits"));
- break;
-
- default:
- dbug(1, dprintf("MDM: 1 stop bit"));
- break;
- }
-
- switch (GET_WORD(mdm_cfg[1].info))
- { /* char length */
- case 5:
- cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
- dbug(1, dprintf("MDM: 5 bits"));
- break;
-
- case 6:
- cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
- dbug(1, dprintf("MDM: 6 bits"));
- break;
-
- case 7:
- cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
- dbug(1, dprintf("MDM: 7 bits"));
- break;
-
- default:
- dbug(1, dprintf("MDM: 8 bits"));
- break;
- }
-
- cai[7] = 0; /* Line taking options */
- cai[8] = 0; /* Modulation negotiation options */
- cai[9] = 0; /* Modulation options */
-
- if (((plci->call_dir & CALL_DIR_ORIGINATE) != 0) ^ ((plci->call_dir & CALL_DIR_OUT) != 0))
- {
- cai[9] |= DSP_CAI_MODEM_REVERSE_DIRECTION;
- dbug(1, dprintf("MDM: Reverse direction"));
- }
-
- if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RETRAIN)
- {
- cai[9] |= DSP_CAI_MODEM_DISABLE_RETRAIN;
- dbug(1, dprintf("MDM: Disable retrain"));
- }
-
- if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_DISABLE_RING_TONE)
- {
- cai[7] |= DSP_CAI_MODEM_DISABLE_CALLING_TONE | DSP_CAI_MODEM_DISABLE_ANSWER_TONE;
- dbug(1, dprintf("MDM: Disable ring tone"));
- }
-
- if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_1800)
- {
- cai[8] |= DSP_CAI_MODEM_GUARD_TONE_1800HZ;
- dbug(1, dprintf("MDM: 1800 guard tone"));
- }
- else if (GET_WORD(mdm_cfg[4].info) & MDM_CAPI_GUARD_550)
- {
- cai[8] |= DSP_CAI_MODEM_GUARD_TONE_550HZ;
- dbug(1, dprintf("MDM: 550 guard tone"));
- }
-
- if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_V100)
- {
- cai[8] |= DSP_CAI_MODEM_NEGOTIATE_V100;
- dbug(1, dprintf("MDM: V100"));
- }
- else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_MOD_CLASS)
- {
- cai[8] |= DSP_CAI_MODEM_NEGOTIATE_IN_CLASS;
- dbug(1, dprintf("MDM: IN CLASS"));
- }
- else if ((GET_WORD(mdm_cfg[5].info) & 0x00ff) == MDM_CAPI_NEG_DISABLED)
- {
- cai[8] |= DSP_CAI_MODEM_NEGOTIATE_DISABLED;
- dbug(1, dprintf("MDM: DISABLED"));
- }
- cai[0] = 20;
-
- if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_V18))
- && (GET_WORD(mdm_cfg[5].info) & 0x8000)) /* Private V.18 enable */
- {
- plci->requested_options |= 1L << PRIVATE_V18;
- }
- if (GET_WORD(mdm_cfg[5].info) & 0x4000) /* Private VOWN enable */
- plci->requested_options |= 1L << PRIVATE_VOWN;
-
- if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
- & ((1L << PRIVATE_V18) | (1L << PRIVATE_VOWN)))
- {
- if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwws", mdm_cfg))
- {
- i = 27;
- if (mdm_cfg[6].length >= 4)
- {
- d = GET_DWORD(&mdm_cfg[6].info[1]);
- cai[7] |= (byte) d; /* line taking options */
- cai[9] |= (byte)(d >> 8); /* modulation options */
- cai[++i] = (byte)(d >> 16); /* vown modulation options */
- cai[++i] = (byte)(d >> 24);
- if (mdm_cfg[6].length >= 8)
- {
- d = GET_DWORD(&mdm_cfg[6].info[5]);
- cai[10] |= (byte) d; /* disabled modulations mask */
- cai[11] |= (byte)(d >> 8);
- if (mdm_cfg[6].length >= 12)
- {
- d = GET_DWORD(&mdm_cfg[6].info[9]);
- cai[12] = (byte) d; /* enabled modulations mask */
- cai[++i] = (byte)(d >> 8); /* vown enabled modulations */
- cai[++i] = (byte)(d >> 16);
- cai[++i] = (byte)(d >> 24);
- cai[++i] = 0;
- if (mdm_cfg[6].length >= 14)
- {
- w = GET_WORD(&mdm_cfg[6].info[13]);
- if (w != 0)
- PUT_WORD(&cai[13], w); /* min tx speed */
- if (mdm_cfg[6].length >= 16)
- {
- w = GET_WORD(&mdm_cfg[6].info[15]);
- if (w != 0)
- PUT_WORD(&cai[15], w); /* max tx speed */
- if (mdm_cfg[6].length >= 18)
- {
- w = GET_WORD(&mdm_cfg[6].info[17]);
- if (w != 0)
- PUT_WORD(&cai[17], w); /* min rx speed */
- if (mdm_cfg[6].length >= 20)
- {
- w = GET_WORD(&mdm_cfg[6].info[19]);
- if (w != 0)
- PUT_WORD(&cai[19], w); /* max rx speed */
- if (mdm_cfg[6].length >= 22)
- {
- w = GET_WORD(&mdm_cfg[6].info[21]);
- cai[23] = (byte)(-((short) w)); /* transmit level */
- if (mdm_cfg[6].length >= 24)
- {
- w = GET_WORD(&mdm_cfg[6].info[23]);
- cai[22] |= (byte) w; /* info options mask */
- cai[21] |= (byte)(w >> 8); /* disabled symbol rates */
- }
- }
- }
- }
- }
- }
- }
- }
- }
- cai[27] = i - 27;
- i++;
- if (!api_parse(&bp_parms[3].info[1], (word)bp_parms[3].length, "wwwwwwss", mdm_cfg))
- {
- if (!api_parse(&mdm_cfg[7].info[1], (word)mdm_cfg[7].length, "sss", mdm_cfg_v18))
- {
- for (n = 0; n < 3; n++)
- {
- cai[i] = (byte)(mdm_cfg_v18[n].length);
- for (j = 1; j < ((word)(cai[i] + 1)); j++)
- cai[i + j] = mdm_cfg_v18[n].info[j];
- i += cai[i] + 1;
- }
- }
- }
- cai[0] = (byte)(i - 1);
- }
- }
-
- }
- }
- if (GET_WORD(bp_parms[0].info) == 2 || /* V.110 async */
- GET_WORD(bp_parms[0].info) == 3) /* V.110 sync */
- {
- if (bp_parms[3].length) {
- dbug(1, dprintf("V.110,%d", GET_WORD(&bp_parms[3].info[1])));
- switch (GET_WORD(&bp_parms[3].info[1])) { /* Rate */
- case 0:
- case 56000:
- if (GET_WORD(bp_parms[0].info) == 3) { /* V.110 sync 56k */
- dbug(1, dprintf("56k sync HSCX"));
- cai[1] = 8;
- cai[2] = 0;
- cai[3] = 0;
- }
- else if (GET_WORD(bp_parms[0].info) == 2) {
- dbug(1, dprintf("56k async DSP"));
- cai[2] = 9;
- }
- break;
- case 50: cai[2] = 1; break;
- case 75: cai[2] = 1; break;
- case 110: cai[2] = 1; break;
- case 150: cai[2] = 1; break;
- case 200: cai[2] = 1; break;
- case 300: cai[2] = 1; break;
- case 600: cai[2] = 1; break;
- case 1200: cai[2] = 2; break;
- case 2400: cai[2] = 3; break;
- case 4800: cai[2] = 4; break;
- case 7200: cai[2] = 10; break;
- case 9600: cai[2] = 5; break;
- case 12000: cai[2] = 13; break;
- case 24000: cai[2] = 0; break;
- case 14400: cai[2] = 11; break;
- case 19200: cai[2] = 6; break;
- case 28800: cai[2] = 12; break;
- case 38400: cai[2] = 7; break;
- case 48000: cai[2] = 8; break;
- case 76: cai[2] = 15; break; /* 75/1200 */
- case 1201: cai[2] = 14; break; /* 1200/75 */
- case 56001: cai[2] = 9; break; /* V.110 56000 */
-
- default:
- return _B1_PARM_NOT_SUPPORTED;
- }
- cai[3] = 0;
- if (cai[1] == 13) /* v.110 async */
- {
- if (bp_parms[3].length >= 8)
- {
- switch (GET_WORD(&bp_parms[3].info[3]))
- { /* char length */
- case 5:
- cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_5;
- break;
- case 6:
- cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_6;
- break;
- case 7:
- cai[3] |= DSP_CAI_ASYNC_CHAR_LENGTH_7;
- break;
- }
- switch (GET_WORD(&bp_parms[3].info[5]))
- { /* Parity */
- case 1: /* odd parity */
- cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_ODD);
- break;
- case 2: /* even parity */
- cai[3] |= (DSP_CAI_ASYNC_PARITY_ENABLE | DSP_CAI_ASYNC_PARITY_EVEN);
- break;
- }
- switch (GET_WORD(&bp_parms[3].info[7]))
- { /* stop bits */
- case 1: /* 2 stop bits */
- cai[3] |= DSP_CAI_ASYNC_TWO_STOP_BITS;
- break;
- }
- }
- }
- }
- else if (cai[1] == 8 || GET_WORD(bp_parms[0].info) == 3) {
- dbug(1, dprintf("V.110 default 56k sync"));
- cai[1] = 8;
- cai[2] = 0;
- cai[3] = 0;
- }
- else {
- dbug(1, dprintf("V.110 default 9600 async"));
- cai[2] = 5;
- }
- }
- PUT_WORD(&cai[5], plci->appl->MaxDataLength);
- dbug(1, dprintf("CAI[%d]=%x,%x,%x,%x,%x,%x", cai[0], cai[1], cai[2], cai[3], cai[4], cai[5], cai[6]));
-/* HexDump ("CAI", sizeof(cai), &cai[0]); */
-
- add_p(plci, CAI, cai);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/* put parameter for b2 and B3 protocol in the parameter buffer */
-/*------------------------------------------------------------------*/
-
-static word add_b23(PLCI *plci, API_PARSE *bp)
-{
- word i, fax_control_bits;
- byte pos, len;
- byte SAPI = 0x40; /* default SAPI 16 for x.31 */
- API_PARSE bp_parms[8];
- API_PARSE *b1_config;
- API_PARSE *b2_config;
- API_PARSE b2_config_parms[8];
- API_PARSE *b3_config;
- API_PARSE b3_config_parms[6];
- API_PARSE global_config[2];
-
- static byte llc[3] = {2,0,0};
- static byte dlc[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- static byte nlc[256];
- static byte lli[12] = {1,1};
-
- const byte llc2_out[] = {1,2,4,6,2,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
- const byte llc2_in[] = {1,3,4,6,3,0,0,0, X75_V42BIS,V120_L2,V120_V42BIS,V120_L2,6};
-
- const byte llc3[] = {4,3,2,2,6,6,0};
- const byte header[] = {0,2,3,3,0,0,0};
-
- for (i = 0; i < 8; i++) bp_parms[i].length = 0;
- for (i = 0; i < 6; i++) b2_config_parms[i].length = 0;
- for (i = 0; i < 5; i++) b3_config_parms[i].length = 0;
-
- lli[0] = 1;
- lli[1] = 1;
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
- lli[1] |= 2;
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
- lli[1] |= 4;
-
- if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
- lli[1] |= 0x10;
- if (plci->rx_dma_descriptor <= 0) {
- plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
- if (plci->rx_dma_descriptor >= 0)
- plci->rx_dma_descriptor++;
- }
- if (plci->rx_dma_descriptor > 0) {
- lli[0] = 6;
- lli[1] |= 0x40;
- lli[2] = (byte)(plci->rx_dma_descriptor - 1);
- lli[3] = (byte)plci->rx_dma_magic;
- lli[4] = (byte)(plci->rx_dma_magic >> 8);
- lli[5] = (byte)(plci->rx_dma_magic >> 16);
- lli[6] = (byte)(plci->rx_dma_magic >> 24);
- }
- }
-
- if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
- lli[1] |= 0x20;
- }
-
- dbug(1, dprintf("add_b23"));
- api_save_msg(bp, "s", &plci->B_protocol);
-
- if (!bp->length && plci->tel)
- {
- plci->adv_nl = true;
- dbug(1, dprintf("Default adv.Nl"));
- add_p(plci, LLI, lli);
- plci->B2_prot = 1 /*XPARENT*/;
- plci->B3_prot = 0 /*XPARENT*/;
- llc[1] = 2;
- llc[2] = 4;
- add_p(plci, LLC, llc);
- dlc[0] = 2;
- PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
- add_p(plci, DLC, dlc);
- return 0;
- }
-
- if (!bp->length) /*default*/
- {
- dbug(1, dprintf("ret default"));
- add_p(plci, LLI, lli);
- plci->B2_prot = 0 /*X.75 */;
- plci->B3_prot = 0 /*XPARENT*/;
- llc[1] = 1;
- llc[2] = 4;
- add_p(plci, LLC, llc);
- dlc[0] = 2;
- PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
- add_p(plci, DLC, dlc);
- return 0;
- }
- dbug(1, dprintf("b_prot_len=%d", (word)bp->length));
- if ((word)bp->length > 256) return _WRONG_MESSAGE_FORMAT;
-
- if (api_parse(&bp->info[1], (word)bp->length, "wwwsssb", bp_parms))
- {
- bp_parms[6].length = 0;
- if (api_parse(&bp->info[1], (word)bp->length, "wwwsss", bp_parms))
- {
- dbug(1, dprintf("b-form.!"));
- return _WRONG_MESSAGE_FORMAT;
- }
- }
- else if (api_parse(&bp->info[1], (word)bp->length, "wwwssss", bp_parms))
- {
- dbug(1, dprintf("b-form.!"));
- return _WRONG_MESSAGE_FORMAT;
- }
-
- if (plci->tel == ADV_VOICE) /* transparent B on advanced voice */
- {
- if (GET_WORD(bp_parms[1].info) != 1
- || GET_WORD(bp_parms[2].info) != 0) return _B2_NOT_SUPPORTED;
- plci->adv_nl = true;
- }
- else if (plci->tel) return _B2_NOT_SUPPORTED;
-
-
- if ((GET_WORD(bp_parms[1].info) == B2_RTP)
- && (GET_WORD(bp_parms[2].info) == B3_RTP)
- && (plci->adapter->man_profile.private_options & (1L << PRIVATE_RTP)))
- {
- add_p(plci, LLI, lli);
- plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
- plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
- llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ? 14 : 13;
- llc[2] = 4;
- add_p(plci, LLC, llc);
- dlc[0] = 2;
- PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
- dlc[3] = 3; /* Addr A */
- dlc[4] = 1; /* Addr B */
- dlc[5] = 7; /* modulo mode */
- dlc[6] = 7; /* window size */
- dlc[7] = 0; /* XID len Lo */
- dlc[8] = 0; /* XID len Hi */
- for (i = 0; i < bp_parms[4].length; i++)
- dlc[9 + i] = bp_parms[4].info[1 + i];
- dlc[0] = (byte)(8 + bp_parms[4].length);
- add_p(plci, DLC, dlc);
- for (i = 0; i < bp_parms[5].length; i++)
- nlc[1 + i] = bp_parms[5].info[1 + i];
- nlc[0] = (byte)(bp_parms[5].length);
- add_p(plci, NLC, nlc);
- return 0;
- }
-
-
-
- if ((GET_WORD(bp_parms[1].info) >= 32)
- || (!((1L << GET_WORD(bp_parms[1].info)) & plci->adapter->profile.B2_Protocols)
- && ((GET_WORD(bp_parms[1].info) != B2_PIAFS)
- || !(plci->adapter->man_profile.private_options & (1L << PRIVATE_PIAFS)))))
-
- {
- return _B2_NOT_SUPPORTED;
- }
- if ((GET_WORD(bp_parms[2].info) >= 32)
- || !((1L << GET_WORD(bp_parms[2].info)) & plci->adapter->profile.B3_Protocols))
- {
- return _B3_NOT_SUPPORTED;
- }
- if ((GET_WORD(bp_parms[1].info) != B2_SDLC)
- && ((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
- || (GET_WORD(bp_parms[0].info) == B1_MODEM_ASYNC)
- || (GET_WORD(bp_parms[0].info) == B1_MODEM_SYNC_HDLC)))
- {
- return (add_modem_b23(plci, bp_parms));
- }
-
- add_p(plci, LLI, lli);
-
- plci->B2_prot = (byte)GET_WORD(bp_parms[1].info);
- plci->B3_prot = (byte)GET_WORD(bp_parms[2].info);
- if (plci->B2_prot == 12) SAPI = 0; /* default SAPI D-channel */
-
- if (bp_parms[6].length)
- {
- if (api_parse(&bp_parms[6].info[1], (word)bp_parms[6].length, "w", global_config))
- {
- return _WRONG_MESSAGE_FORMAT;
- }
- switch (GET_WORD(global_config[0].info))
- {
- case 1:
- plci->call_dir = (plci->call_dir & ~CALL_DIR_ANSWER) | CALL_DIR_ORIGINATE;
- break;
- case 2:
- plci->call_dir = (plci->call_dir & ~CALL_DIR_ORIGINATE) | CALL_DIR_ANSWER;
- break;
- }
- }
- dbug(1, dprintf("call_dir=%04x", plci->call_dir));
-
-
- if (plci->B2_prot == B2_PIAFS)
- llc[1] = PIAFS_CRC;
- else
-/* IMPLEMENT_PIAFS */
- {
- llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
- llc2_out[GET_WORD(bp_parms[1].info)] : llc2_in[GET_WORD(bp_parms[1].info)];
- }
- llc[2] = llc3[GET_WORD(bp_parms[2].info)];
-
- add_p(plci, LLC, llc);
-
- dlc[0] = 2;
- PUT_WORD(&dlc[1], plci->appl->MaxDataLength +
- header[GET_WORD(bp_parms[2].info)]);
-
- b1_config = &bp_parms[3];
- nlc[0] = 0;
- if (plci->B3_prot == 4
- || plci->B3_prot == 5)
- {
- for (i = 0; i < sizeof(T30_INFO); i++) nlc[i] = 0;
- nlc[0] = sizeof(T30_INFO);
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
- ((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI;
- ((T30_INFO *)&nlc[1])->rate_div_2400 = 0xff;
- if (b1_config->length >= 2)
- {
- ((T30_INFO *)&nlc[1])->rate_div_2400 = (byte)(GET_WORD(&b1_config->info[1]) / 2400);
- }
- }
- b2_config = &bp_parms[4];
-
-
- if (llc[1] == PIAFS_CRC)
- {
- if (plci->B3_prot != B3_TRANSPARENT)
- {
- return _B_STACK_NOT_SUPPORTED;
- }
- if (b2_config->length && api_parse(&b2_config->info[1], (word)b2_config->length, "bwww", b2_config_parms)) {
- return _WRONG_MESSAGE_FORMAT;
- }
- PUT_WORD(&dlc[1], plci->appl->MaxDataLength);
- dlc[3] = 0; /* Addr A */
- dlc[4] = 0; /* Addr B */
- dlc[5] = 0; /* modulo mode */
- dlc[6] = 0; /* window size */
- if (b2_config->length >= 7) {
- dlc[7] = 7;
- dlc[8] = 0;
- dlc[9] = b2_config_parms[0].info[0]; /* PIAFS protocol Speed configuration */
- dlc[10] = b2_config_parms[1].info[0]; /* V.42bis P0 */
- dlc[11] = b2_config_parms[1].info[1]; /* V.42bis P0 */
- dlc[12] = b2_config_parms[2].info[0]; /* V.42bis P1 */
- dlc[13] = b2_config_parms[2].info[1]; /* V.42bis P1 */
- dlc[14] = b2_config_parms[3].info[0]; /* V.42bis P2 */
- dlc[15] = b2_config_parms[3].info[1]; /* V.42bis P2 */
- dlc[0] = 15;
- if (b2_config->length >= 8) { /* PIAFS control abilities */
- dlc[7] = 10;
- dlc[16] = 2; /* Length of PIAFS extension */
- dlc[17] = PIAFS_UDATA_ABILITIES; /* control (UDATA) ability */
- dlc[18] = b2_config_parms[4].info[0]; /* value */
- dlc[0] = 18;
- }
- }
- else /* default values, 64K, variable, no compression */
- {
- dlc[7] = 7;
- dlc[8] = 0;
- dlc[9] = 0x03; /* PIAFS protocol Speed configuration */
- dlc[10] = 0x03; /* V.42bis P0 */
- dlc[11] = 0; /* V.42bis P0 */
- dlc[12] = 0; /* V.42bis P1 */
- dlc[13] = 0; /* V.42bis P1 */
- dlc[14] = 0; /* V.42bis P2 */
- dlc[15] = 0; /* V.42bis P2 */
- dlc[0] = 15;
- }
- add_p(plci, DLC, dlc);
- }
- else
-
- if ((llc[1] == V120_L2) || (llc[1] == V120_V42BIS))
- {
- if (plci->B3_prot != B3_TRANSPARENT)
- return _B_STACK_NOT_SUPPORTED;
-
- dlc[0] = 6;
- PUT_WORD(&dlc[1], GET_WORD(&dlc[1]) + 2);
- dlc[3] = 0x08;
- dlc[4] = 0x01;
- dlc[5] = 127;
- dlc[6] = 7;
- if (b2_config->length != 0)
- {
- if ((llc[1] == V120_V42BIS) && api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms)) {
- return _WRONG_MESSAGE_FORMAT;
- }
- dlc[3] = (byte)((b2_config->info[2] << 3) | ((b2_config->info[1] >> 5) & 0x04));
- dlc[4] = (byte)((b2_config->info[1] << 1) | 0x01);
- if (b2_config->info[3] != 128)
- {
- dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
- return _B2_PARM_NOT_SUPPORTED;
- }
- dlc[5] = (byte)(b2_config->info[3] - 1);
- dlc[6] = b2_config->info[4];
- if (llc[1] == V120_V42BIS) {
- if (b2_config->length >= 10) {
- dlc[7] = 6;
- dlc[8] = 0;
- dlc[9] = b2_config_parms[4].info[0];
- dlc[10] = b2_config_parms[4].info[1];
- dlc[11] = b2_config_parms[5].info[0];
- dlc[12] = b2_config_parms[5].info[1];
- dlc[13] = b2_config_parms[6].info[0];
- dlc[14] = b2_config_parms[6].info[1];
- dlc[0] = 14;
- dbug(1, dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
- dbug(1, dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
- dbug(1, dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
- }
- else {
- dlc[6] = 14;
- }
- }
- }
- }
- else
- {
- if (b2_config->length)
- {
- dbug(1, dprintf("B2-Config"));
- if (llc[1] == X75_V42BIS) {
- if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbwww", b2_config_parms))
- {
- return _WRONG_MESSAGE_FORMAT;
- }
- }
- else {
- if (api_parse(&b2_config->info[1], (word)b2_config->length, "bbbbs", b2_config_parms))
- {
- return _WRONG_MESSAGE_FORMAT;
- }
- }
- /* if B2 Protocol is LAPD, b2_config structure is different */
- if (llc[1] == 6)
- {
- dlc[0] = 4;
- if (b2_config->length >= 1) dlc[2] = b2_config->info[1]; /* TEI */
- else dlc[2] = 0x01;
- if ((b2_config->length >= 2) && (plci->B2_prot == 12))
- {
- SAPI = b2_config->info[2]; /* SAPI */
- }
- dlc[1] = SAPI;
- if ((b2_config->length >= 3) && (b2_config->info[3] == 128))
- {
- dlc[3] = 127; /* Mode */
- }
- else
- {
- dlc[3] = 7; /* Mode */
- }
-
- if (b2_config->length >= 4) dlc[4] = b2_config->info[4]; /* Window */
- else dlc[4] = 1;
- dbug(1, dprintf("D-dlc[%d]=%x,%x,%x,%x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
- if (b2_config->length > 5) return _B2_PARM_NOT_SUPPORTED;
- }
- else
- {
- dlc[0] = (byte)(b2_config_parms[4].length + 6);
- dlc[3] = b2_config->info[1];
- dlc[4] = b2_config->info[2];
- if (b2_config->info[3] != 8 && b2_config->info[3] != 128) {
- dbug(1, dprintf("1D-dlc= %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4]));
- return _B2_PARM_NOT_SUPPORTED;
- }
-
- dlc[5] = (byte)(b2_config->info[3] - 1);
- dlc[6] = b2_config->info[4];
- if (dlc[6] > dlc[5]) {
- dbug(1, dprintf("2D-dlc= %x %x %x %x %x %x %x", dlc[0], dlc[1], dlc[2], dlc[3], dlc[4], dlc[5], dlc[6]));
- return _B2_PARM_NOT_SUPPORTED;
- }
-
- if (llc[1] == X75_V42BIS) {
- if (b2_config->length >= 10) {
- dlc[7] = 6;
- dlc[8] = 0;
- dlc[9] = b2_config_parms[4].info[0];
- dlc[10] = b2_config_parms[4].info[1];
- dlc[11] = b2_config_parms[5].info[0];
- dlc[12] = b2_config_parms[5].info[1];
- dlc[13] = b2_config_parms[6].info[0];
- dlc[14] = b2_config_parms[6].info[1];
- dlc[0] = 14;
- dbug(1, dprintf("b2_config_parms[4].info[0] [1]: %x %x", b2_config_parms[4].info[0], b2_config_parms[4].info[1]));
- dbug(1, dprintf("b2_config_parms[5].info[0] [1]: %x %x", b2_config_parms[5].info[0], b2_config_parms[5].info[1]));
- dbug(1, dprintf("b2_config_parms[6].info[0] [1]: %x %x", b2_config_parms[6].info[0], b2_config_parms[6].info[1]));
- }
- else {
- dlc[6] = 14;
- }
-
- }
- else {
- PUT_WORD(&dlc[7], (word)b2_config_parms[4].length);
- for (i = 0; i < b2_config_parms[4].length; i++)
- dlc[11 + i] = b2_config_parms[4].info[1 + i];
- }
- }
- }
- }
- add_p(plci, DLC, dlc);
-
- b3_config = &bp_parms[5];
- if (b3_config->length)
- {
- if (plci->B3_prot == 4
- || plci->B3_prot == 5)
- {
- if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwss", b3_config_parms))
- {
- return _WRONG_MESSAGE_FORMAT;
- }
- i = GET_WORD((byte *)(b3_config_parms[0].info));
- ((T30_INFO *)&nlc[1])->resolution = (byte)(((i & 0x0001) ||
- ((plci->B3_prot == 4) && (((byte)(GET_WORD((byte *)b3_config_parms[1].info))) != 5))) ? T30_RESOLUTION_R8_0770_OR_200 : 0);
- ((T30_INFO *)&nlc[1])->data_format = (byte)(GET_WORD((byte *)b3_config_parms[1].info));
- fax_control_bits = T30_CONTROL_BIT_ALL_FEATURES;
- if ((((T30_INFO *)&nlc[1])->rate_div_2400 != 0) && (((T30_INFO *)&nlc[1])->rate_div_2400 <= 6))
- fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_V34FAX;
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_FAX_PAPER_FORMATS)
- {
-
- if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
- & (1L << PRIVATE_FAX_PAPER_FORMATS))
- {
- ((T30_INFO *)&nlc[1])->resolution |= T30_RESOLUTION_R8_1540 |
- T30_RESOLUTION_R16_1540_OR_400 | T30_RESOLUTION_300_300 |
- T30_RESOLUTION_INCH_BASED | T30_RESOLUTION_METRIC_BASED;
- }
-
- ((T30_INFO *)&nlc[1])->recording_properties =
- T30_RECORDING_WIDTH_ISO_A3 |
- (T30_RECORDING_LENGTH_UNLIMITED << 2) |
- (T30_MIN_SCANLINE_TIME_00_00_00 << 4);
- }
- if (plci->B3_prot == 5)
- {
- if (i & 0x0002) /* Accept incoming fax-polling requests */
- fax_control_bits |= T30_CONTROL_BIT_ACCEPT_POLLING;
- if (i & 0x2000) /* Do not use MR compression */
- fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_2D_CODING;
- if (i & 0x4000) /* Do not use MMR compression */
- fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_T6_CODING;
- if (i & 0x8000) /* Do not use ECM */
- fax_control_bits &= ~T30_CONTROL_BIT_ENABLE_ECM;
- if (plci->fax_connect_info_length != 0)
- {
- ((T30_INFO *)&nlc[1])->resolution = ((T30_INFO *)plci->fax_connect_info_buffer)->resolution;
- ((T30_INFO *)&nlc[1])->data_format = ((T30_INFO *)plci->fax_connect_info_buffer)->data_format;
- ((T30_INFO *)&nlc[1])->recording_properties = ((T30_INFO *)plci->fax_connect_info_buffer)->recording_properties;
- fax_control_bits |= GET_WORD(&((T30_INFO *)plci->fax_connect_info_buffer)->control_bits_low) &
- (T30_CONTROL_BIT_REQUEST_POLLING | T30_CONTROL_BIT_MORE_DOCUMENTS);
- }
- }
- /* copy station id to NLC */
- for (i = 0; i < T30_MAX_STATION_ID_LENGTH; i++)
- {
- if (i < b3_config_parms[2].length)
- {
- ((T30_INFO *)&nlc[1])->station_id[i] = ((byte *)b3_config_parms[2].info)[1 + i];
- }
- else
- {
- ((T30_INFO *)&nlc[1])->station_id[i] = ' ';
- }
- }
- ((T30_INFO *)&nlc[1])->station_id_len = T30_MAX_STATION_ID_LENGTH;
- /* copy head line to NLC */
- if (b3_config_parms[3].length)
- {
-
- pos = (byte)(fax_head_line_time(&(((T30_INFO *)&nlc[1])->station_id[T30_MAX_STATION_ID_LENGTH])));
- if (pos != 0)
- {
- if (CAPI_MAX_DATE_TIME_LENGTH + 2 + b3_config_parms[3].length > CAPI_MAX_HEAD_LINE_SPACE)
- pos = 0;
- else
- {
- nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
- nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
- len = (byte)b3_config_parms[2].length;
- if (len > 20)
- len = 20;
- if (CAPI_MAX_DATE_TIME_LENGTH + 2 + len + 2 + b3_config_parms[3].length <= CAPI_MAX_HEAD_LINE_SPACE)
- {
- for (i = 0; i < len; i++)
- nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[2].info)[1 + i];
- nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
- nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ' ';
- }
- }
- }
-
- len = (byte)b3_config_parms[3].length;
- if (len > CAPI_MAX_HEAD_LINE_SPACE - pos)
- len = (byte)(CAPI_MAX_HEAD_LINE_SPACE - pos);
- ((T30_INFO *)&nlc[1])->head_line_len = (byte)(pos + len);
- nlc[0] += (byte)(pos + len);
- for (i = 0; i < len; i++)
- nlc[1 + offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH + pos++] = ((byte *)b3_config_parms[3].info)[1 + i];
- } else
- ((T30_INFO *)&nlc[1])->head_line_len = 0;
-
- plci->nsf_control_bits = 0;
- if (plci->B3_prot == 5)
- {
- if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_SUB_SEP_PWD))
- && (GET_WORD((byte *)b3_config_parms[1].info) & 0x8000)) /* Private SUB/SEP/PWD enable */
- {
- plci->requested_options |= 1L << PRIVATE_FAX_SUB_SEP_PWD;
- }
- if ((plci->adapter->man_profile.private_options & (1L << PRIVATE_FAX_NONSTANDARD))
- && (GET_WORD((byte *)b3_config_parms[1].info) & 0x4000)) /* Private non-standard facilities enable */
- {
- plci->requested_options |= 1L << PRIVATE_FAX_NONSTANDARD;
- }
- if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
- & ((1L << PRIVATE_FAX_SUB_SEP_PWD) | (1L << PRIVATE_FAX_NONSTANDARD)))
- {
- if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
- & (1L << PRIVATE_FAX_SUB_SEP_PWD))
- {
- fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SUBADDRESS | T30_CONTROL_BIT_ACCEPT_PASSWORD;
- if (fax_control_bits & T30_CONTROL_BIT_ACCEPT_POLLING)
- fax_control_bits |= T30_CONTROL_BIT_ACCEPT_SEL_POLLING;
- }
- len = nlc[0];
- pos = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
- if (pos < plci->fax_connect_info_length)
- {
- for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
- nlc[++len] = plci->fax_connect_info_buffer[pos++];
- }
- else
- nlc[++len] = 0;
- if (pos < plci->fax_connect_info_length)
- {
- for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
- nlc[++len] = plci->fax_connect_info_buffer[pos++];
- }
- else
- nlc[++len] = 0;
- if ((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[plci->appl->Id - 1])
- & (1L << PRIVATE_FAX_NONSTANDARD))
- {
- if ((pos < plci->fax_connect_info_length) && (plci->fax_connect_info_buffer[pos] != 0))
- {
- if ((plci->fax_connect_info_buffer[pos] >= 3) && (plci->fax_connect_info_buffer[pos + 1] >= 2))
- plci->nsf_control_bits = GET_WORD(&plci->fax_connect_info_buffer[pos + 2]);
- for (i = 1 + plci->fax_connect_info_buffer[pos]; i != 0; i--)
- nlc[++len] = plci->fax_connect_info_buffer[pos++];
- }
- else
- {
- if (api_parse(&b3_config->info[1], (word)b3_config->length, "wwsss", b3_config_parms))
- {
- dbug(1, dprintf("non-standard facilities info missing or wrong format"));
- nlc[++len] = 0;
- }
- else
- {
- if ((b3_config_parms[4].length >= 3) && (b3_config_parms[4].info[1] >= 2))
- plci->nsf_control_bits = GET_WORD(&b3_config_parms[4].info[2]);
- nlc[++len] = (byte)(b3_config_parms[4].length);
- for (i = 0; i < b3_config_parms[4].length; i++)
- nlc[++len] = b3_config_parms[4].info[1 + i];
- }
- }
- }
- nlc[0] = len;
- if ((plci->nsf_control_bits & T30_NSF_CONTROL_BIT_ENABLE_NSF)
- && (plci->nsf_control_bits & T30_NSF_CONTROL_BIT_NEGOTIATE_RESP))
- {
- ((T30_INFO *)&nlc[1])->operating_mode = T30_OPERATING_MODE_CAPI_NEG;
- }
- }
- }
-
- PUT_WORD(&(((T30_INFO *)&nlc[1])->control_bits_low), fax_control_bits);
- len = offsetof(T30_INFO, station_id) + T30_MAX_STATION_ID_LENGTH;
- for (i = 0; i < len; i++)
- plci->fax_connect_info_buffer[i] = nlc[1 + i];
- ((T30_INFO *) plci->fax_connect_info_buffer)->head_line_len = 0;
- i += ((T30_INFO *)&nlc[1])->head_line_len;
- while (i < nlc[0])
- plci->fax_connect_info_buffer[len++] = nlc[++i];
- plci->fax_connect_info_length = len;
- }
- else
- {
- nlc[0] = 14;
- if (b3_config->length != 16)
- return _B3_PARM_NOT_SUPPORTED;
- for (i = 0; i < 12; i++) nlc[1 + i] = b3_config->info[1 + i];
- if (GET_WORD(&b3_config->info[13]) != 8 && GET_WORD(&b3_config->info[13]) != 128)
- return _B3_PARM_NOT_SUPPORTED;
- nlc[13] = b3_config->info[13];
- if (GET_WORD(&b3_config->info[15]) >= nlc[13])
- return _B3_PARM_NOT_SUPPORTED;
- nlc[14] = b3_config->info[15];
- }
- }
- else
- {
- if (plci->B3_prot == 4
- || plci->B3_prot == 5 /*T.30 - FAX*/) return _B3_PARM_NOT_SUPPORTED;
- }
- add_p(plci, NLC, nlc);
- return 0;
-}
-
-/*----------------------------------------------------------------*/
-/* make the same as add_b23, but only for the modem related */
-/* L2 and L3 B-Chan protocol. */
-/* */
-/* Enabled L2 and L3 Configurations: */
-/* If L1 == Modem all negotiation */
-/* only L2 == Modem with full negotiation is allowed */
-/* If L1 == Modem async or sync */
-/* only L2 == Transparent is allowed */
-/* L3 == Modem or L3 == Transparent are allowed */
-/* B2 Configuration for modem: */
-/* word : enable/disable compression, bitoptions */
-/* B3 Configuration for modem: */
-/* empty */
-/*----------------------------------------------------------------*/
-static word add_modem_b23(PLCI *plci, API_PARSE *bp_parms)
-{
- static byte lli[12] = {1,1};
- static byte llc[3] = {2,0,0};
- static byte dlc[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
- API_PARSE mdm_config[2];
- word i;
- word b2_config = 0;
-
- for (i = 0; i < 2; i++) mdm_config[i].length = 0;
- for (i = 0; i < sizeof(dlc); i++) dlc[i] = 0;
-
- if (((GET_WORD(bp_parms[0].info) == B1_MODEM_ALL_NEGOTIATE)
- && (GET_WORD(bp_parms[1].info) != B2_MODEM_EC_COMPRESSION))
- || ((GET_WORD(bp_parms[0].info) != B1_MODEM_ALL_NEGOTIATE)
- && (GET_WORD(bp_parms[1].info) != B2_TRANSPARENT)))
- {
- return (_B_STACK_NOT_SUPPORTED);
- }
- if ((GET_WORD(bp_parms[2].info) != B3_MODEM)
- && (GET_WORD(bp_parms[2].info) != B3_TRANSPARENT))
- {
- return (_B_STACK_NOT_SUPPORTED);
- }
-
- plci->B2_prot = (byte) GET_WORD(bp_parms[1].info);
- plci->B3_prot = (byte) GET_WORD(bp_parms[2].info);
-
- if ((GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION) && bp_parms[4].length)
- {
- if (api_parse(&bp_parms[4].info[1],
- (word)bp_parms[4].length, "w",
- mdm_config))
- {
- return (_WRONG_MESSAGE_FORMAT);
- }
- b2_config = GET_WORD(mdm_config[0].info);
- }
-
- /* OK, L2 is modem */
-
- lli[0] = 1;
- lli[1] = 1;
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)
- lli[1] |= 2;
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_OOB_CHANNEL)
- lli[1] |= 4;
-
- if ((lli[1] & 0x02) && (diva_xdi_extended_features & DIVA_CAPI_USE_CMA)) {
- lli[1] |= 0x10;
- if (plci->rx_dma_descriptor <= 0) {
- plci->rx_dma_descriptor = diva_get_dma_descriptor(plci, &plci->rx_dma_magic);
- if (plci->rx_dma_descriptor >= 0)
- plci->rx_dma_descriptor++;
- }
- if (plci->rx_dma_descriptor > 0) {
- lli[1] |= 0x40;
- lli[0] = 6;
- lli[2] = (byte)(plci->rx_dma_descriptor - 1);
- lli[3] = (byte)plci->rx_dma_magic;
- lli[4] = (byte)(plci->rx_dma_magic >> 8);
- lli[5] = (byte)(plci->rx_dma_magic >> 16);
- lli[6] = (byte)(plci->rx_dma_magic >> 24);
- }
- }
-
- if (DIVA_CAPI_SUPPORTS_NO_CANCEL(plci->adapter)) {
- lli[1] |= 0x20;
- }
-
- llc[1] = (plci->call_dir & (CALL_DIR_ORIGINATE | CALL_DIR_FORCE_OUTG_NL)) ?
- /*V42*/ 10 : /*V42_IN*/ 9;
- llc[2] = 4; /* pass L3 always transparent */
- add_p(plci, LLI, lli);
- add_p(plci, LLC, llc);
- i = 1;
- PUT_WORD(&dlc[i], plci->appl->MaxDataLength);
- i += 2;
- if (GET_WORD(bp_parms[1].info) == B2_MODEM_EC_COMPRESSION)
- {
- if (bp_parms[4].length)
- {
- dbug(1, dprintf("MDM b2_config=%02x", b2_config));
- dlc[i++] = 3; /* Addr A */
- dlc[i++] = 1; /* Addr B */
- dlc[i++] = 7; /* modulo mode */
- dlc[i++] = 7; /* window size */
- dlc[i++] = 0; /* XID len Lo */
- dlc[i++] = 0; /* XID len Hi */
-
- if (b2_config & MDM_B2_DISABLE_V42bis)
- {
- dlc[i] |= DLC_MODEMPROT_DISABLE_V42_V42BIS;
- }
- if (b2_config & MDM_B2_DISABLE_MNP)
- {
- dlc[i] |= DLC_MODEMPROT_DISABLE_MNP_MNP5;
- }
- if (b2_config & MDM_B2_DISABLE_TRANS)
- {
- dlc[i] |= DLC_MODEMPROT_REQUIRE_PROTOCOL;
- }
- if (b2_config & MDM_B2_DISABLE_V42)
- {
- dlc[i] |= DLC_MODEMPROT_DISABLE_V42_DETECT;
- }
- if (b2_config & MDM_B2_DISABLE_COMP)
- {
- dlc[i] |= DLC_MODEMPROT_DISABLE_COMPRESSION;
- }
- i++;
- }
- }
- else
- {
- dlc[i++] = 3; /* Addr A */
- dlc[i++] = 1; /* Addr B */
- dlc[i++] = 7; /* modulo mode */
- dlc[i++] = 7; /* window size */
- dlc[i++] = 0; /* XID len Lo */
- dlc[i++] = 0; /* XID len Hi */
- dlc[i++] = DLC_MODEMPROT_DISABLE_V42_V42BIS |
- DLC_MODEMPROT_DISABLE_MNP_MNP5 |
- DLC_MODEMPROT_DISABLE_V42_DETECT |
- DLC_MODEMPROT_DISABLE_COMPRESSION;
- }
- dlc[0] = (byte)(i - 1);
-/* HexDump ("DLC", sizeof(dlc), &dlc[0]); */
- add_p(plci, DLC, dlc);
- return (0);
-}
-
-
-/*------------------------------------------------------------------*/
-/* send a request for the signaling entity */
-/*------------------------------------------------------------------*/
-
-static void sig_req(PLCI *plci, byte req, byte Id)
-{
- if (!plci) return;
- if (plci->adapter->adapter_disabled) return;
- dbug(1, dprintf("sig_req(%x)", req));
- if (req == REMOVE)
- plci->sig_remove_id = plci->Sig.Id;
- if (plci->req_in == plci->req_in_start) {
- plci->req_in += 2;
- plci->RBuffer[plci->req_in++] = 0;
- }
- PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
- plci->RBuffer[plci->req_in++] = Id; /* sig/nl flag */
- plci->RBuffer[plci->req_in++] = req; /* request */
- plci->RBuffer[plci->req_in++] = 0; /* channel */
- plci->req_in_start = plci->req_in;
-}
-
-/*------------------------------------------------------------------*/
-/* send a request for the network layer entity */
-/*------------------------------------------------------------------*/
-
-static void nl_req_ncci(PLCI *plci, byte req, byte ncci)
-{
- if (!plci) return;
- if (plci->adapter->adapter_disabled) return;
- dbug(1, dprintf("nl_req %02x %02x %02x", plci->Id, req, ncci));
- if (req == REMOVE)
- {
- plci->nl_remove_id = plci->NL.Id;
- ncci_remove(plci, 0, (byte)(ncci != 0));
- ncci = 0;
- }
- if (plci->req_in == plci->req_in_start) {
- plci->req_in += 2;
- plci->RBuffer[plci->req_in++] = 0;
- }
- PUT_WORD(&plci->RBuffer[plci->req_in_start], plci->req_in-plci->req_in_start - 2);
- plci->RBuffer[plci->req_in++] = 1; /* sig/nl flag */
- plci->RBuffer[plci->req_in++] = req; /* request */
- plci->RBuffer[plci->req_in++] = plci->adapter->ncci_ch[ncci]; /* channel */
- plci->req_in_start = plci->req_in;
-}
-
-static void send_req(PLCI *plci)
-{
- ENTITY *e;
- word l;
-/* word i; */
-
- if (!plci) return;
- if (plci->adapter->adapter_disabled) return;
- channel_xmit_xon(plci);
-
- /* if nothing to do, return */
- if (plci->req_in == plci->req_out) return;
- dbug(1, dprintf("send_req(in=%d,out=%d)", plci->req_in, plci->req_out));
-
- if (plci->nl_req || plci->sig_req) return;
-
- l = GET_WORD(&plci->RBuffer[plci->req_out]);
- plci->req_out += 2;
- plci->XData[0].P = &plci->RBuffer[plci->req_out];
- plci->req_out += l;
- if (plci->RBuffer[plci->req_out] == 1)
- {
- e = &plci->NL;
- plci->req_out++;
- e->Req = plci->nl_req = plci->RBuffer[plci->req_out++];
- e->ReqCh = plci->RBuffer[plci->req_out++];
- if (!(e->Id & 0x1f))
- {
- e->Id = NL_ID;
- plci->RBuffer[plci->req_out - 4] = CAI;
- plci->RBuffer[plci->req_out - 3] = 1;
- plci->RBuffer[plci->req_out - 2] = (plci->Sig.Id == 0xff) ? 0 : plci->Sig.Id;
- plci->RBuffer[plci->req_out - 1] = 0;
- l += 3;
- plci->nl_global_req = plci->nl_req;
- }
- dbug(1, dprintf("%x:NLREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
- }
- else
- {
- e = &plci->Sig;
- if (plci->RBuffer[plci->req_out])
- e->Id = plci->RBuffer[plci->req_out];
- plci->req_out++;
- e->Req = plci->sig_req = plci->RBuffer[plci->req_out++];
- e->ReqCh = plci->RBuffer[plci->req_out++];
- if (!(e->Id & 0x1f))
- plci->sig_global_req = plci->sig_req;
- dbug(1, dprintf("%x:SIGREQ(%x:%x:%x)", plci->adapter->Id, e->Id, e->Req, e->ReqCh));
- }
- plci->XData[0].PLength = l;
- e->X = plci->XData;
- plci->adapter->request(e);
- dbug(1, dprintf("send_ok"));
-}
-
-static void send_data(PLCI *plci)
-{
- DIVA_CAPI_ADAPTER *a;
- DATA_B3_DESC *data;
- NCCI *ncci_ptr;
- word ncci;
-
- if (!plci->nl_req && plci->ncci_ring_list)
- {
- a = plci->adapter;
- ncci = plci->ncci_ring_list;
- do
- {
- ncci = a->ncci_next[ncci];
- ncci_ptr = &(a->ncci[ncci]);
- if (!(a->ncci_ch[ncci]
- && (a->ch_flow_control[a->ncci_ch[ncci]] & N_OK_FC_PENDING)))
- {
- if (ncci_ptr->data_pending)
- {
- if ((a->ncci_state[ncci] == CONNECTED)
- || (a->ncci_state[ncci] == INC_ACT_PENDING)
- || (plci->send_disc == ncci))
- {
- data = &(ncci_ptr->DBuffer[ncci_ptr->data_out]);
- if ((plci->B2_prot == B2_V120_ASYNC)
- || (plci->B2_prot == B2_V120_ASYNC_V42BIS)
- || (plci->B2_prot == B2_V120_BIT_TRANSPARENT))
- {
- plci->NData[1].P = TransmitBufferGet(plci->appl, data->P);
- plci->NData[1].PLength = data->Length;
- if (data->Flags & 0x10)
- plci->NData[0].P = v120_break_header;
- else
- plci->NData[0].P = v120_default_header;
- plci->NData[0].PLength = 1;
- plci->NL.XNum = 2;
- plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
- }
- else
- {
- plci->NData[0].P = TransmitBufferGet(plci->appl, data->P);
- plci->NData[0].PLength = data->Length;
- if (data->Flags & 0x10)
- plci->NL.Req = plci->nl_req = (byte)N_UDATA;
-
- else if ((plci->B3_prot == B3_RTP) && (data->Flags & 0x01))
- plci->NL.Req = plci->nl_req = (byte)N_BDATA;
-
- else
- plci->NL.Req = plci->nl_req = (byte)((data->Flags & 0x07) << 4 | N_DATA);
- }
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = a->ncci_ch[ncci];
- dbug(1, dprintf("%x:DREQ(%x:%x)", a->Id, plci->NL.Id, plci->NL.Req));
- plci->data_sent = true;
- plci->data_sent_ptr = data->P;
- a->request(&plci->NL);
- }
- else {
- cleanup_ncci_data(plci, ncci);
- }
- }
- else if (plci->send_disc == ncci)
- {
- /* dprintf("N_DISC"); */
- plci->NData[0].PLength = 0;
- plci->NL.ReqCh = a->ncci_ch[ncci];
- plci->NL.Req = plci->nl_req = N_DISC;
- a->request(&plci->NL);
- plci->command = _DISCONNECT_B3_R;
- plci->send_disc = 0;
- }
- }
- } while (!plci->nl_req && (ncci != plci->ncci_ring_list));
- plci->ncci_ring_list = ncci;
- }
-}
-
-static void listen_check(DIVA_CAPI_ADAPTER *a)
-{
- word i, j;
- PLCI *plci;
- byte activnotifiedcalls = 0;
-
- dbug(1, dprintf("listen_check(%d,%d)", a->listen_active, a->max_listen));
- if (!remove_started && !a->adapter_disabled)
- {
- for (i = 0; i < a->max_plci; i++)
- {
- plci = &(a->plci[i]);
- if (plci->notifiedcall) activnotifiedcalls++;
- }
- dbug(1, dprintf("listen_check(%d)", activnotifiedcalls));
-
- for (i = a->listen_active; i < ((word)(a->max_listen + activnotifiedcalls)); i++) {
- if ((j = get_plci(a))) {
- a->listen_active++;
- plci = &a->plci[j - 1];
- plci->State = LISTENING;
-
- add_p(plci, OAD, "\x01\xfd");
-
- add_p(plci, KEY, "\x04\x43\x41\x32\x30");
-
- add_p(plci, CAI, "\x01\xc0");
- add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- add_p(plci, LLI, "\x01\xc4"); /* support Dummy CR FAC + MWI + SpoofNotify */
- add_p(plci, SHIFT | 6, NULL);
- add_p(plci, SIN, "\x02\x00\x00");
- plci->internal_command = LISTEN_SIG_ASSIGN_PEND; /* do indicate_req if OK */
- sig_req(plci, ASSIGN, DSIG_ID);
- send_req(plci);
- }
- }
- }
-}
-
-/*------------------------------------------------------------------*/
-/* functions for all parameters sent in INDs */
-/*------------------------------------------------------------------*/
-
-static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
-{
- word ploc; /* points to current location within packet */
- byte w;
- byte wlen;
- byte codeset, lock;
- byte *in;
- word i;
- word code;
- word mIEindex = 0;
- ploc = 0;
- codeset = 0;
- lock = 0;
-
- in = plci->Sig.RBuffer->P;
- for (i = 0; i < parms_id[0]; i++) /* multiIE parms_id contains just the 1st */
- { /* element but parms array is larger */
- parms[i] = (byte *)"";
- }
- for (i = 0; i < multiIEsize; i++)
- {
- parms[i] = (byte *)"";
- }
-
- while (ploc < plci->Sig.RBuffer->length - 1) {
-
- /* read information element id and length */
- w = in[ploc];
-
- if (w & 0x80) {
-/* w &=0xf0; removed, cannot detect congestion levels */
-/* upper 4 bit masked with w==SHIFT now */
- wlen = 0;
- }
- else {
- wlen = (byte)(in[ploc + 1] + 1);
- }
- /* check if length valid (not exceeding end of packet) */
- if ((ploc + wlen) > 270) return;
- if (lock & 0x80) lock &= 0x7f;
- else codeset = lock;
-
- if ((w & 0xf0) == SHIFT) {
- codeset = in[ploc];
- if (!(codeset & 0x08)) lock = (byte)(codeset & 7);
- codeset &= 7;
- lock |= 0x80;
- }
- else {
- if (w == ESC && wlen >= 3) code = in[ploc + 2] | 0x800;
- else code = w;
- code |= (codeset << 8);
-
- for (i = 1; i < parms_id[0] + 1 && parms_id[i] != code; i++);
-
- if (i < parms_id[0] + 1) {
- if (!multiIEsize) { /* with multiIEs use next field index, */
- mIEindex = i - 1; /* with normal IEs use same index like parms_id */
- }
-
- parms[mIEindex] = &in[ploc + 1];
- dbug(1, dprintf("mIE[%d]=0x%x", *parms[mIEindex], in[ploc]));
- if (parms_id[i] == OAD
- || parms_id[i] == CONN_NR
- || parms_id[i] == CAD) {
- if (in[ploc + 2] & 0x80) {
- in[ploc + 0] = (byte)(in[ploc + 1] + 1);
- in[ploc + 1] = (byte)(in[ploc + 2] & 0x7f);
- in[ploc + 2] = 0x80;
- parms[mIEindex] = &in[ploc];
- }
- }
- mIEindex++; /* effects multiIEs only */
- }
- }
-
- ploc += (wlen + 1);
- }
- return;
-}
-
-/*------------------------------------------------------------------*/
-/* try to match a cip from received BC and HLC */
-/*------------------------------------------------------------------*/
-
-static byte ie_compare(byte *ie1, byte *ie2)
-{
- word i;
- if (!ie1 || !ie2) return false;
- if (!ie1[0]) return false;
- for (i = 0; i < (word)(ie1[0] + 1); i++) if (ie1[i] != ie2[i]) return false;
- return true;
-}
-
-static word find_cip(DIVA_CAPI_ADAPTER *a, byte *bc, byte *hlc)
-{
- word i;
- word j;
-
- for (i = 9; i && !ie_compare(bc, cip_bc[i][a->u_law]); i--);
-
- for (j = 16; j < 29 &&
- (!ie_compare(bc, cip_bc[j][a->u_law]) || !ie_compare(hlc, cip_hlc[j])); j++);
- if (j == 29) return i;
- return j;
-}
-
-
-static byte AddInfo(byte **add_i,
- byte **fty_i,
- byte *esc_chi,
- byte *facility)
-{
- byte i;
- byte j;
- byte k;
- byte flen;
- byte len = 0;
- /* facility is a nested structure */
- /* FTY can be more than once */
-
- if (esc_chi[0] && !(esc_chi[esc_chi[0]] & 0x7f))
- {
- add_i[0] = (byte *)"\x02\x02\x00"; /* use neither b nor d channel */
- }
-
- else
- {
- add_i[0] = (byte *)"";
- }
- if (!fty_i[0][0])
- {
- add_i[3] = (byte *)"";
- }
- else
- { /* facility array found */
- for (i = 0, j = 1; i < MAX_MULTI_IE && fty_i[i][0]; i++)
- {
- dbug(1, dprintf("AddIFac[%d]", fty_i[i][0]));
- len += fty_i[i][0];
- len += 2;
- flen = fty_i[i][0];
- facility[j++] = 0x1c; /* copy fac IE */
- for (k = 0; k <= flen; k++, j++)
- {
- facility[j] = fty_i[i][k];
-/* dbug(1, dprintf("%x ",facility[j])); */
- }
- }
- facility[0] = len;
- add_i[3] = facility;
- }
-/* dbug(1, dprintf("FacArrLen=%d ",len)); */
- len = add_i[0][0] + add_i[1][0] + add_i[2][0] + add_i[3][0];
- len += 4; /* calculate length of all */
- return (len);
-}
-
-/*------------------------------------------------------------------*/
-/* voice and codec features */
-/*------------------------------------------------------------------*/
-
-static void SetVoiceChannel(PLCI *plci, byte *chi, DIVA_CAPI_ADAPTER *a)
-{
- byte voice_chi[] = "\x02\x18\x01";
- byte channel;
-
- channel = chi[chi[0]] & 0x3;
- dbug(1, dprintf("ExtDevON(Ch=0x%x)", channel));
- voice_chi[2] = (channel) ? channel : 1;
- add_p(plci, FTY, "\x02\x01\x07"); /* B On, default on 1 */
- add_p(plci, ESC, voice_chi); /* Channel */
- sig_req(plci, TEL_CTRL, 0);
- send_req(plci);
- if (a->AdvSignalPLCI)
- {
- adv_voice_write_coefs(a->AdvSignalPLCI, ADV_VOICE_WRITE_ACTIVATION);
- }
-}
-
-static void VoiceChannelOff(PLCI *plci)
-{
- dbug(1, dprintf("ExtDevOFF"));
- add_p(plci, FTY, "\x02\x01\x08"); /* B Off */
- sig_req(plci, TEL_CTRL, 0);
- send_req(plci);
- if (plci->adapter->AdvSignalPLCI)
- {
- adv_voice_clear_config(plci->adapter->AdvSignalPLCI);
- }
-}
-
-
-static word AdvCodecSupport(DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl,
- byte hook_listen)
-{
- word j;
- PLCI *splci;
-
- /* check if hardware supports handset with hook states (adv.codec) */
- /* or if just a on board codec is supported */
- /* the advanced codec plci is just for internal use */
-
- /* diva Pro with on-board codec: */
- if (a->profile.Global_Options & HANDSET)
- {
- /* new call, but hook states are already signalled */
- if (a->AdvCodecFLAG)
- {
- if (a->AdvSignalAppl != appl || a->AdvSignalPLCI)
- {
- dbug(1, dprintf("AdvSigPlci=0x%x", a->AdvSignalPLCI));
- return 0x2001; /* codec in use by another application */
- }
- if (plci != NULL)
- {
- a->AdvSignalPLCI = plci;
- plci->tel = ADV_VOICE;
- }
- return 0; /* adv codec still used */
- }
- if ((j = get_plci(a)))
- {
- splci = &a->plci[j - 1];
- splci->tel = CODEC_PERMANENT;
- /* hook_listen indicates if a facility_req with handset/hook support */
- /* was sent. Otherwise if just a call on an external device was made */
- /* the codec will be used but the hook info will be discarded (just */
- /* the external controller is in use */
- if (hook_listen) splci->State = ADVANCED_VOICE_SIG;
- else
- {
- splci->State = ADVANCED_VOICE_NOSIG;
- if (plci)
- {
- plci->spoofed_msg = SPOOFING_REQUIRED;
- }
- /* indicate D-ch connect if */
- } /* codec is connected OK */
- if (plci != NULL)
- {
- a->AdvSignalPLCI = plci;
- plci->tel = ADV_VOICE;
- }
- a->AdvSignalAppl = appl;
- a->AdvCodecFLAG = true;
- a->AdvCodecPLCI = splci;
- add_p(splci, CAI, "\x01\x15");
- add_p(splci, LLI, "\x01\x00");
- add_p(splci, ESC, "\x02\x18\x00");
- add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- splci->internal_command = PERM_COD_ASSIGN;
- dbug(1, dprintf("Codec Assign"));
- sig_req(splci, ASSIGN, DSIG_ID);
- send_req(splci);
- }
- else
- {
- return 0x2001; /* wrong state, no more plcis */
- }
- }
- else if (a->profile.Global_Options & ON_BOARD_CODEC)
- {
- if (hook_listen) return 0x300B; /* Facility not supported */
- /* no hook with SCOM */
- if (plci != NULL) plci->tel = CODEC;
- dbug(1, dprintf("S/SCOM codec"));
- /* first time we use the scom-s codec we must shut down the internal */
- /* handset application of the card. This can be done by an assign with */
- /* a cai with the 0x80 bit set. Assign return code is 'out of resource'*/
- if (!a->scom_appl_disable) {
- if ((j = get_plci(a))) {
- splci = &a->plci[j - 1];
- add_p(splci, CAI, "\x01\x80");
- add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- sig_req(splci, ASSIGN, 0xC0); /* 0xc0 is the TEL_ID */
- send_req(splci);
- a->scom_appl_disable = true;
- }
- else{
- return 0x2001; /* wrong state, no more plcis */
- }
- }
- }
- else return 0x300B; /* Facility not supported */
-
- return 0;
-}
-
-
-static void CodecIdCheck(DIVA_CAPI_ADAPTER *a, PLCI *plci)
-{
-
- dbug(1, dprintf("CodecIdCheck"));
-
- if (a->AdvSignalPLCI == plci)
- {
- dbug(1, dprintf("PLCI owns codec"));
- VoiceChannelOff(a->AdvCodecPLCI);
- if (a->AdvCodecPLCI->State == ADVANCED_VOICE_NOSIG)
- {
- dbug(1, dprintf("remove temp codec PLCI"));
- plci_remove(a->AdvCodecPLCI);
- a->AdvCodecFLAG = 0;
- a->AdvCodecPLCI = NULL;
- a->AdvSignalAppl = NULL;
- }
- a->AdvSignalPLCI = NULL;
- }
-}
-
-/* -------------------------------------------------------------------
- Ask for physical address of card on PCI bus
- ------------------------------------------------------------------- */
-static void diva_ask_for_xdi_sdram_bar(DIVA_CAPI_ADAPTER *a,
- IDI_SYNC_REQ *preq) {
- a->sdram_bar = 0;
- if (diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR) {
- ENTITY *e = (ENTITY *)preq;
-
- e->user[0] = a->Id - 1;
- preq->xdi_sdram_bar.info.bar = 0;
- preq->xdi_sdram_bar.Req = 0;
- preq->xdi_sdram_bar.Rc = IDI_SYNC_REQ_XDI_GET_ADAPTER_SDRAM_BAR;
-
- (*(a->request))(e);
-
- a->sdram_bar = preq->xdi_sdram_bar.info.bar;
- dbug(3, dprintf("A(%d) SDRAM BAR = %08x", a->Id, a->sdram_bar));
- }
-}
-
-/* -------------------------------------------------------------------
- Ask XDI about extended features
- ------------------------------------------------------------------- */
-static void diva_get_extended_adapter_features(DIVA_CAPI_ADAPTER *a) {
- IDI_SYNC_REQ *preq;
- char buffer[((sizeof(preq->xdi_extended_features) + 4) > sizeof(ENTITY)) ? (sizeof(preq->xdi_extended_features) + 4) : sizeof(ENTITY)];
-
- char features[4];
- preq = (IDI_SYNC_REQ *)&buffer[0];
-
- if (!diva_xdi_extended_features) {
- ENTITY *e = (ENTITY *)preq;
- diva_xdi_extended_features |= 0x80000000;
-
- e->user[0] = a->Id - 1;
- preq->xdi_extended_features.Req = 0;
- preq->xdi_extended_features.Rc = IDI_SYNC_REQ_XDI_GET_EXTENDED_FEATURES;
- preq->xdi_extended_features.info.buffer_length_in_bytes = sizeof(features);
- preq->xdi_extended_features.info.features = &features[0];
-
- (*(a->request))(e);
-
- if (features[0] & DIVA_XDI_EXTENDED_FEATURES_VALID) {
- /*
- Check features located in the byte '0'
- */
- if (features[0] & DIVA_XDI_EXTENDED_FEATURE_CMA) {
- diva_xdi_extended_features |= DIVA_CAPI_USE_CMA;
- }
- if (features[0] & DIVA_XDI_EXTENDED_FEATURE_RX_DMA) {
- diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_RX_DMA;
- dbug(1, dprintf("XDI provides RxDMA"));
- }
- if (features[0] & DIVA_XDI_EXTENDED_FEATURE_SDRAM_BAR) {
- diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_SDRAM_BAR;
- }
- if (features[0] & DIVA_XDI_EXTENDED_FEATURE_NO_CANCEL_RC) {
- diva_xdi_extended_features |= DIVA_CAPI_XDI_PROVIDES_NO_CANCEL;
- dbug(3, dprintf("XDI provides NO_CANCEL_RC feature"));
- }
-
- }
- }
-
- diva_ask_for_xdi_sdram_bar(a, preq);
-}
-
-/*------------------------------------------------------------------*/
-/* automatic law */
-/*------------------------------------------------------------------*/
-/* called from OS specific part after init time to get the Law */
-/* a-law (Euro) and u-law (us,japan) use different BCs in the Setup message */
-void AutomaticLaw(DIVA_CAPI_ADAPTER *a)
-{
- word j;
- PLCI *splci;
-
- if (a->automatic_law) {
- return;
- }
- if ((j = get_plci(a))) {
- diva_get_extended_adapter_features(a);
- splci = &a->plci[j - 1];
- a->automatic_lawPLCI = splci;
- a->automatic_law = 1;
- add_p(splci, CAI, "\x01\x80");
- add_p(splci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- splci->internal_command = USELAW_REQ;
- splci->command = 0;
- splci->number = 0;
- sig_req(splci, ASSIGN, DSIG_ID);
- send_req(splci);
- }
-}
-
-/* called from OS specific part if an application sends an Capi20Release */
-word CapiRelease(word Id)
-{
- word i, j, appls_found;
- PLCI *plci;
- APPL *this;
- DIVA_CAPI_ADAPTER *a;
-
- if (!Id)
- {
- dbug(0, dprintf("A: CapiRelease(Id==0)"));
- return (_WRONG_APPL_ID);
- }
-
- this = &application[Id - 1]; /* get application pointer */
-
- for (i = 0, appls_found = 0; i < max_appl; i++)
- {
- if (application[i].Id) /* an application has been found */
- {
- appls_found++;
- }
- }
-
- for (i = 0; i < max_adapter; i++) /* scan all adapters... */
- {
- a = &adapter[i];
- if (a->request)
- {
- a->Info_Mask[Id - 1] = 0;
- a->CIP_Mask[Id - 1] = 0;
- a->Notification_Mask[Id - 1] = 0;
- a->codec_listen[Id - 1] = NULL;
- a->requested_options_table[Id - 1] = 0;
- for (j = 0; j < a->max_plci; j++) /* and all PLCIs connected */
- { /* with this application */
- plci = &a->plci[j];
- if (plci->Id) /* if plci owns no application */
- { /* it may be not jet connected */
- if (plci->State == INC_CON_PENDING
- || plci->State == INC_CON_ALERT)
- {
- if (test_bit(Id - 1, plci->c_ind_mask_table))
- {
- __clear_bit(Id - 1, plci->c_ind_mask_table);
- if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
- {
- sig_req(plci, HANGUP, 0);
- send_req(plci);
- plci->State = OUTG_DIS_PENDING;
- }
- }
- }
- if (test_bit(Id - 1, plci->c_ind_mask_table))
- {
- __clear_bit(Id - 1, plci->c_ind_mask_table);
- if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
- {
- if (!plci->appl)
- {
- plci_remove(plci);
- plci->State = IDLE;
- }
- }
- }
- if (plci->appl == this)
- {
- plci->appl = NULL;
- plci_remove(plci);
- plci->State = IDLE;
- }
- }
- }
- listen_check(a);
-
- if (a->flag_dynamic_l1_down)
- {
- if (appls_found == 1) /* last application does a capi release */
- {
- if ((j = get_plci(a)))
- {
- plci = &a->plci[j - 1];
- plci->command = 0;
- add_p(plci, OAD, "\x01\xfd");
- add_p(plci, CAI, "\x01\x80");
- add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- add_p(plci, SHIFT | 6, NULL);
- add_p(plci, SIN, "\x02\x00\x00");
- plci->internal_command = REM_L1_SIG_ASSIGN_PEND;
- sig_req(plci, ASSIGN, DSIG_ID);
- add_p(plci, FTY, "\x02\xff\x06"); /* l1 down */
- sig_req(plci, SIG_CTRL, 0);
- send_req(plci);
- }
- }
- }
- if (a->AdvSignalAppl == this)
- {
- this->NullCREnable = false;
- if (a->AdvCodecPLCI)
- {
- plci_remove(a->AdvCodecPLCI);
- a->AdvCodecPLCI->tel = 0;
- a->AdvCodecPLCI->adv_nl = 0;
- }
- a->AdvSignalAppl = NULL;
- a->AdvSignalPLCI = NULL;
- a->AdvCodecFLAG = 0;
- a->AdvCodecPLCI = NULL;
- }
- }
- }
-
- this->Id = 0;
-
- return GOOD;
-}
-
-static word plci_remove_check(PLCI *plci)
-{
- if (!plci) return true;
- if (!plci->NL.Id && bitmap_empty(plci->c_ind_mask_table, MAX_APPL))
- {
- if (plci->Sig.Id == 0xff)
- plci->Sig.Id = 0;
- if (!plci->Sig.Id)
- {
- dbug(1, dprintf("plci_remove_complete(%x)", plci->Id));
- dbug(1, dprintf("tel=0x%x,Sig=0x%x", plci->tel, plci->Sig.Id));
- if (plci->Id)
- {
- CodecIdCheck(plci->adapter, plci);
- clear_b1_config(plci);
- ncci_remove(plci, 0, false);
- plci_free_msg_in_queue(plci);
- channel_flow_control_remove(plci);
- plci->Id = 0;
- plci->State = IDLE;
- plci->channels = 0;
- plci->appl = NULL;
- plci->notifiedcall = 0;
- }
- listen_check(plci->adapter);
- return true;
- }
- }
- return false;
-}
-
-
-/*------------------------------------------------------------------*/
-
-static byte plci_nl_busy(PLCI *plci)
-{
- /* only applicable for non-multiplexed protocols */
- return (plci->nl_req
- || (plci->ncci_ring_list
- && plci->adapter->ncci_ch[plci->ncci_ring_list]
- && (plci->adapter->ch_flow_control[plci->adapter->ncci_ch[plci->ncci_ring_list]] & N_OK_FC_PENDING)));
-}
-
-
-/*------------------------------------------------------------------*/
-/* DTMF facilities */
-/*------------------------------------------------------------------*/
-
-
-static struct
-{
- byte send_mask;
- byte listen_mask;
- byte character;
- byte code;
-} dtmf_digit_map[] =
-{
- { 0x01, 0x01, 0x23, DTMF_DIGIT_TONE_CODE_HASHMARK },
- { 0x01, 0x01, 0x2a, DTMF_DIGIT_TONE_CODE_STAR },
- { 0x01, 0x01, 0x30, DTMF_DIGIT_TONE_CODE_0 },
- { 0x01, 0x01, 0x31, DTMF_DIGIT_TONE_CODE_1 },
- { 0x01, 0x01, 0x32, DTMF_DIGIT_TONE_CODE_2 },
- { 0x01, 0x01, 0x33, DTMF_DIGIT_TONE_CODE_3 },
- { 0x01, 0x01, 0x34, DTMF_DIGIT_TONE_CODE_4 },
- { 0x01, 0x01, 0x35, DTMF_DIGIT_TONE_CODE_5 },
- { 0x01, 0x01, 0x36, DTMF_DIGIT_TONE_CODE_6 },
- { 0x01, 0x01, 0x37, DTMF_DIGIT_TONE_CODE_7 },
- { 0x01, 0x01, 0x38, DTMF_DIGIT_TONE_CODE_8 },
- { 0x01, 0x01, 0x39, DTMF_DIGIT_TONE_CODE_9 },
- { 0x01, 0x01, 0x41, DTMF_DIGIT_TONE_CODE_A },
- { 0x01, 0x01, 0x42, DTMF_DIGIT_TONE_CODE_B },
- { 0x01, 0x01, 0x43, DTMF_DIGIT_TONE_CODE_C },
- { 0x01, 0x01, 0x44, DTMF_DIGIT_TONE_CODE_D },
- { 0x01, 0x00, 0x61, DTMF_DIGIT_TONE_CODE_A },
- { 0x01, 0x00, 0x62, DTMF_DIGIT_TONE_CODE_B },
- { 0x01, 0x00, 0x63, DTMF_DIGIT_TONE_CODE_C },
- { 0x01, 0x00, 0x64, DTMF_DIGIT_TONE_CODE_D },
-
- { 0x04, 0x04, 0x80, DTMF_SIGNAL_NO_TONE },
- { 0x00, 0x04, 0x81, DTMF_SIGNAL_UNIDENTIFIED_TONE },
- { 0x04, 0x04, 0x82, DTMF_SIGNAL_DIAL_TONE },
- { 0x04, 0x04, 0x83, DTMF_SIGNAL_PABX_INTERNAL_DIAL_TONE },
- { 0x04, 0x04, 0x84, DTMF_SIGNAL_SPECIAL_DIAL_TONE },
- { 0x04, 0x04, 0x85, DTMF_SIGNAL_SECOND_DIAL_TONE },
- { 0x04, 0x04, 0x86, DTMF_SIGNAL_RINGING_TONE },
- { 0x04, 0x04, 0x87, DTMF_SIGNAL_SPECIAL_RINGING_TONE },
- { 0x04, 0x04, 0x88, DTMF_SIGNAL_BUSY_TONE },
- { 0x04, 0x04, 0x89, DTMF_SIGNAL_CONGESTION_TONE },
- { 0x04, 0x04, 0x8a, DTMF_SIGNAL_SPECIAL_INFORMATION_TONE },
- { 0x04, 0x04, 0x8b, DTMF_SIGNAL_COMFORT_TONE },
- { 0x04, 0x04, 0x8c, DTMF_SIGNAL_HOLD_TONE },
- { 0x04, 0x04, 0x8d, DTMF_SIGNAL_RECORD_TONE },
- { 0x04, 0x04, 0x8e, DTMF_SIGNAL_CALLER_WAITING_TONE },
- { 0x04, 0x04, 0x8f, DTMF_SIGNAL_CALL_WAITING_TONE },
- { 0x04, 0x04, 0x90, DTMF_SIGNAL_PAY_TONE },
- { 0x04, 0x04, 0x91, DTMF_SIGNAL_POSITIVE_INDICATION_TONE },
- { 0x04, 0x04, 0x92, DTMF_SIGNAL_NEGATIVE_INDICATION_TONE },
- { 0x04, 0x04, 0x93, DTMF_SIGNAL_WARNING_TONE },
- { 0x04, 0x04, 0x94, DTMF_SIGNAL_INTRUSION_TONE },
- { 0x04, 0x04, 0x95, DTMF_SIGNAL_CALLING_CARD_SERVICE_TONE },
- { 0x04, 0x04, 0x96, DTMF_SIGNAL_PAYPHONE_RECOGNITION_TONE },
- { 0x04, 0x04, 0x97, DTMF_SIGNAL_CPE_ALERTING_SIGNAL },
- { 0x04, 0x04, 0x98, DTMF_SIGNAL_OFF_HOOK_WARNING_TONE },
- { 0x04, 0x04, 0xbf, DTMF_SIGNAL_INTERCEPT_TONE },
- { 0x04, 0x04, 0xc0, DTMF_SIGNAL_MODEM_CALLING_TONE },
- { 0x04, 0x04, 0xc1, DTMF_SIGNAL_FAX_CALLING_TONE },
- { 0x04, 0x04, 0xc2, DTMF_SIGNAL_ANSWER_TONE },
- { 0x04, 0x04, 0xc3, DTMF_SIGNAL_REVERSED_ANSWER_TONE },
- { 0x04, 0x04, 0xc4, DTMF_SIGNAL_ANSAM_TONE },
- { 0x04, 0x04, 0xc5, DTMF_SIGNAL_REVERSED_ANSAM_TONE },
- { 0x04, 0x04, 0xc6, DTMF_SIGNAL_BELL103_ANSWER_TONE },
- { 0x04, 0x04, 0xc7, DTMF_SIGNAL_FAX_FLAGS },
- { 0x04, 0x04, 0xc8, DTMF_SIGNAL_G2_FAX_GROUP_ID },
- { 0x00, 0x04, 0xc9, DTMF_SIGNAL_HUMAN_SPEECH },
- { 0x04, 0x04, 0xca, DTMF_SIGNAL_ANSWERING_MACHINE_390 },
- { 0x02, 0x02, 0xf1, DTMF_MF_DIGIT_TONE_CODE_1 },
- { 0x02, 0x02, 0xf2, DTMF_MF_DIGIT_TONE_CODE_2 },
- { 0x02, 0x02, 0xf3, DTMF_MF_DIGIT_TONE_CODE_3 },
- { 0x02, 0x02, 0xf4, DTMF_MF_DIGIT_TONE_CODE_4 },
- { 0x02, 0x02, 0xf5, DTMF_MF_DIGIT_TONE_CODE_5 },
- { 0x02, 0x02, 0xf6, DTMF_MF_DIGIT_TONE_CODE_6 },
- { 0x02, 0x02, 0xf7, DTMF_MF_DIGIT_TONE_CODE_7 },
- { 0x02, 0x02, 0xf8, DTMF_MF_DIGIT_TONE_CODE_8 },
- { 0x02, 0x02, 0xf9, DTMF_MF_DIGIT_TONE_CODE_9 },
- { 0x02, 0x02, 0xfa, DTMF_MF_DIGIT_TONE_CODE_0 },
- { 0x02, 0x02, 0xfb, DTMF_MF_DIGIT_TONE_CODE_K1 },
- { 0x02, 0x02, 0xfc, DTMF_MF_DIGIT_TONE_CODE_K2 },
- { 0x02, 0x02, 0xfd, DTMF_MF_DIGIT_TONE_CODE_KP },
- { 0x02, 0x02, 0xfe, DTMF_MF_DIGIT_TONE_CODE_S1 },
- { 0x02, 0x02, 0xff, DTMF_MF_DIGIT_TONE_CODE_ST },
-
-};
-
-#define DTMF_DIGIT_MAP_ENTRIES ARRAY_SIZE(dtmf_digit_map)
-
-
-static void dtmf_enable_receiver(PLCI *plci, byte enable_mask)
-{
- word min_digit_duration, min_gap_duration;
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_enable_receiver %02x",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, enable_mask));
-
- if (enable_mask != 0)
- {
- min_digit_duration = (plci->dtmf_rec_pulse_ms == 0) ? 40 : plci->dtmf_rec_pulse_ms;
- min_gap_duration = (plci->dtmf_rec_pause_ms == 0) ? 40 : plci->dtmf_rec_pause_ms;
- plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_ENABLE_RECEIVER;
- PUT_WORD(&plci->internal_req_buffer[1], min_digit_duration);
- PUT_WORD(&plci->internal_req_buffer[3], min_gap_duration);
- plci->NData[0].PLength = 5;
-
- PUT_WORD(&plci->internal_req_buffer[5], INTERNAL_IND_BUFFER_SIZE);
- plci->NData[0].PLength += 2;
- capidtmf_recv_enable(&(plci->capidtmf_state), min_digit_duration, min_gap_duration);
-
- }
- else
- {
- plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_DISABLE_RECEIVER;
- plci->NData[0].PLength = 1;
-
- capidtmf_recv_disable(&(plci->capidtmf_state));
-
- }
- plci->NData[0].P = plci->internal_req_buffer;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_UDATA;
- plci->adapter->request(&plci->NL);
-}
-
-
-static void dtmf_send_digits(PLCI *plci, byte *digit_buffer, word digit_count)
-{
- word w, i;
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_digits %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, digit_count));
-
- plci->internal_req_buffer[0] = DTMF_UDATA_REQUEST_SEND_DIGITS;
- w = (plci->dtmf_send_pulse_ms == 0) ? 40 : plci->dtmf_send_pulse_ms;
- PUT_WORD(&plci->internal_req_buffer[1], w);
- w = (plci->dtmf_send_pause_ms == 0) ? 40 : plci->dtmf_send_pause_ms;
- PUT_WORD(&plci->internal_req_buffer[3], w);
- for (i = 0; i < digit_count; i++)
- {
- w = 0;
- while ((w < DTMF_DIGIT_MAP_ENTRIES)
- && (digit_buffer[i] != dtmf_digit_map[w].character))
- {
- w++;
- }
- plci->internal_req_buffer[5 + i] = (w < DTMF_DIGIT_MAP_ENTRIES) ?
- dtmf_digit_map[w].code : DTMF_DIGIT_TONE_CODE_STAR;
- }
- plci->NData[0].PLength = 5 + digit_count;
- plci->NData[0].P = plci->internal_req_buffer;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_UDATA;
- plci->adapter->request(&plci->NL);
-}
-
-
-static void dtmf_rec_clear_config(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_rec_clear_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->dtmf_rec_active = 0;
- plci->dtmf_rec_pulse_ms = 0;
- plci->dtmf_rec_pause_ms = 0;
-
- capidtmf_init(&(plci->capidtmf_state), plci->adapter->u_law);
-
-}
-
-
-static void dtmf_send_clear_config(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_send_clear_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->dtmf_send_requests = 0;
- plci->dtmf_send_pulse_ms = 0;
- plci->dtmf_send_pause_ms = 0;
-}
-
-
-static void dtmf_prepare_switch(dword Id, PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_prepare_switch",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- while (plci->dtmf_send_requests != 0)
- dtmf_confirmation(Id, plci);
-}
-
-
-static word dtmf_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_save_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- return (GOOD);
-}
-
-
-static word dtmf_restore_config(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_restore_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- Info = GOOD;
- if (plci->B1_facilities & B1_FACILITY_DTMFR)
- {
- switch (plci->adjust_b_state)
- {
- case ADJUST_B_RESTORE_DTMF_1:
- plci->internal_command = plci->adjust_b_command;
- if (plci_nl_busy(plci))
- {
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
- break;
- }
- dtmf_enable_receiver(plci, plci->dtmf_rec_active);
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_2;
- break;
- case ADJUST_B_RESTORE_DTMF_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Reenable DTMF receiver failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- break;
- }
- }
- return (Info);
-}
-
-
-static void dtmf_command(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command, Info;
- byte mask;
- byte result[4];
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_command %02x %04x %04x %d %d %d %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
- plci->dtmf_cmd, plci->dtmf_rec_pulse_ms, plci->dtmf_rec_pause_ms,
- plci->dtmf_send_pulse_ms, plci->dtmf_send_pause_ms));
-
- Info = GOOD;
- result[0] = 2;
- PUT_WORD(&result[1], DTMF_SUCCESS);
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- mask = 0x01;
- switch (plci->dtmf_cmd)
- {
-
- case DTMF_LISTEN_TONE_START:
- mask <<= 1; /* fall through */
- case DTMF_LISTEN_MF_START:
- mask <<= 1; /* fall through */
-
- case DTMF_LISTEN_START:
- switch (internal_command)
- {
- default:
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
- B1_FACILITY_DTMFR), DTMF_COMMAND_1);
- /* fall through */
- case DTMF_COMMAND_1:
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (plci->internal_command)
- return;
- /* fall through */
- case DTMF_COMMAND_2:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = DTMF_COMMAND_2;
- return;
- }
- plci->internal_command = DTMF_COMMAND_3;
- dtmf_enable_receiver(plci, (byte)(plci->dtmf_rec_active | mask));
- return;
- case DTMF_COMMAND_3:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Enable DTMF receiver failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
-
- plci->tone_last_indication_code = DTMF_SIGNAL_NO_TONE;
-
- plci->dtmf_rec_active |= mask;
- break;
- }
- break;
-
-
- case DTMF_LISTEN_TONE_STOP:
- mask <<= 1; /* fall through */
- case DTMF_LISTEN_MF_STOP:
- mask <<= 1; /* fall through */
-
- case DTMF_LISTEN_STOP:
- switch (internal_command)
- {
- default:
- plci->dtmf_rec_active &= ~mask;
- if (plci->dtmf_rec_active)
- break;
-/*
- case DTMF_COMMAND_1:
- if (plci->dtmf_rec_active)
- {
- if (plci_nl_busy (plci))
- {
- plci->internal_command = DTMF_COMMAND_1;
- return;
- }
- plci->dtmf_rec_active &= ~mask;
- plci->internal_command = DTMF_COMMAND_2;
- dtmf_enable_receiver (plci, false);
- return;
- }
- Rc = OK;
- case DTMF_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug (1, dprintf("[%06lx] %s,%d: Disable DTMF receiver failed %02x",
- UnMapId (Id), (char far *)(FILE_), __LINE__, Rc));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
-*/
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
- ~(B1_FACILITY_DTMFX | B1_FACILITY_DTMFR)), DTMF_COMMAND_3);
- /* fall through */
- case DTMF_COMMAND_3:
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Unload DTMF failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (plci->internal_command)
- return;
- break;
- }
- break;
-
-
- case DTMF_SEND_TONE:
- mask <<= 1; /* fall through */
- case DTMF_SEND_MF:
- mask <<= 1; /* fall through */
-
- case DTMF_DIGITS_SEND:
- switch (internal_command)
- {
- default:
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
- ((plci->dtmf_parameter_length != 0) ? B1_FACILITY_DTMFX | B1_FACILITY_DTMFR : B1_FACILITY_DTMFX)),
- DTMF_COMMAND_1);
- /* fall through */
- case DTMF_COMMAND_1:
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Load DTMF failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (plci->internal_command)
- return;
- /* fall through */
- case DTMF_COMMAND_2:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = DTMF_COMMAND_2;
- return;
- }
- plci->dtmf_msg_number_queue[(plci->dtmf_send_requests)++] = plci->number;
- plci->internal_command = DTMF_COMMAND_3;
- dtmf_send_digits(plci, &plci->saved_msg.parms[3].info[1], plci->saved_msg.parms[3].length);
- return;
- case DTMF_COMMAND_3:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Send DTMF digits failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- if (plci->dtmf_send_requests != 0)
- (plci->dtmf_send_requests)--;
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- return;
- }
- break;
- }
- sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
- "wws", Info, SELECTOR_DTMF, result);
-}
-
-
-static byte dtmf_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info;
- word i, j;
- byte mask;
- API_PARSE dtmf_parms[5];
- byte result[40];
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_request",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- Info = GOOD;
- result[0] = 2;
- PUT_WORD(&result[1], DTMF_SUCCESS);
- if (!(a->profile.Global_Options & GL_DTMF_SUPPORTED))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- }
- else if (api_parse(&msg[1].info[1], msg[1].length, "w", dtmf_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- }
-
- else if ((GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
- || (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_SEND_CODES))
- {
- if (!((a->requested_options_table[appl->Id - 1])
- & (1L << PRIVATE_DTMF_TONE)))
- {
- dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
- PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
- }
- else
- {
- for (i = 0; i < 32; i++)
- result[4 + i] = 0;
- if (GET_WORD(dtmf_parms[0].info) == DTMF_GET_SUPPORTED_DETECT_CODES)
- {
- for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
- {
- if (dtmf_digit_map[i].listen_mask != 0)
- result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
- }
- }
- else
- {
- for (i = 0; i < DTMF_DIGIT_MAP_ENTRIES; i++)
- {
- if (dtmf_digit_map[i].send_mask != 0)
- result[4 + (dtmf_digit_map[i].character >> 3)] |= (1 << (dtmf_digit_map[i].character & 0x7));
- }
- }
- result[0] = 3 + 32;
- result[3] = 32;
- }
- }
-
- else if (plci == NULL)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_IDENTIFIER;
- }
- else
- {
- if (!plci->State
- || !plci->NL.Id || plci->nl_remove_id)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_STATE;
- }
- else
- {
- plci->command = 0;
- plci->dtmf_cmd = GET_WORD(dtmf_parms[0].info);
- mask = 0x01;
- switch (plci->dtmf_cmd)
- {
-
- case DTMF_LISTEN_TONE_START:
- case DTMF_LISTEN_TONE_STOP:
- mask <<= 1; /* fall through */
- case DTMF_LISTEN_MF_START:
- case DTMF_LISTEN_MF_STOP:
- mask <<= 1;
- if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
- & (1L << PRIVATE_DTMF_TONE)))
- {
- dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
- PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
- break;
- }
- /* fall through */
-
- case DTMF_LISTEN_START:
- case DTMF_LISTEN_STOP:
- if (!(a->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
- && !(a->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (mask & DTMF_LISTEN_ACTIVE_FLAG)
- {
- if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
- {
- plci->dtmf_rec_pulse_ms = 0;
- plci->dtmf_rec_pause_ms = 0;
- }
- else
- {
- plci->dtmf_rec_pulse_ms = GET_WORD(dtmf_parms[1].info);
- plci->dtmf_rec_pause_ms = GET_WORD(dtmf_parms[2].info);
- }
- }
- start_internal_command(Id, plci, dtmf_command);
- return (false);
-
-
- case DTMF_SEND_TONE:
- mask <<= 1; /* fall through */
- case DTMF_SEND_MF:
- mask <<= 1;
- if (!((plci->requested_options_conn | plci->requested_options | plci->adapter->requested_options_table[appl->Id - 1])
- & (1L << PRIVATE_DTMF_TONE)))
- {
- dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(dtmf_parms[0].info)));
- PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
- break;
- }
- /* fall through */
-
- case DTMF_DIGITS_SEND:
- if (api_parse(&msg[1].info[1], msg[1].length, "wwws", dtmf_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- if (mask & DTMF_LISTEN_ACTIVE_FLAG)
- {
- plci->dtmf_send_pulse_ms = GET_WORD(dtmf_parms[1].info);
- plci->dtmf_send_pause_ms = GET_WORD(dtmf_parms[2].info);
- }
- i = 0;
- j = 0;
- while ((i < dtmf_parms[3].length) && (j < DTMF_DIGIT_MAP_ENTRIES))
- {
- j = 0;
- while ((j < DTMF_DIGIT_MAP_ENTRIES)
- && ((dtmf_parms[3].info[i + 1] != dtmf_digit_map[j].character)
- || ((dtmf_digit_map[j].send_mask & mask) == 0)))
- {
- j++;
- }
- i++;
- }
- if (j == DTMF_DIGIT_MAP_ENTRIES)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Incorrect DTMF digit %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, dtmf_parms[3].info[i]));
- PUT_WORD(&result[1], DTMF_INCORRECT_DIGIT);
- break;
- }
- if (plci->dtmf_send_requests >= ARRAY_SIZE(plci->dtmf_msg_number_queue))
- {
- dbug(1, dprintf("[%06lx] %s,%d: DTMF request overrun",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_STATE;
- break;
- }
- api_save_msg(dtmf_parms, "wwws", &plci->saved_msg);
- start_internal_command(Id, plci, dtmf_command);
- return (false);
-
- default:
- dbug(1, dprintf("[%06lx] %s,%d: DTMF unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci->dtmf_cmd));
- PUT_WORD(&result[1], DTMF_UNKNOWN_REQUEST);
- }
- }
- }
- sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
- "wws", Info, SELECTOR_DTMF, result);
- return (false);
-}
-
-
-static void dtmf_confirmation(dword Id, PLCI *plci)
-{
- word i;
- byte result[4];
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_confirmation",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- result[0] = 2;
- PUT_WORD(&result[1], DTMF_SUCCESS);
- if (plci->dtmf_send_requests != 0)
- {
- sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->dtmf_msg_number_queue[0],
- "wws", GOOD, SELECTOR_DTMF, result);
- (plci->dtmf_send_requests)--;
- for (i = 0; i < plci->dtmf_send_requests; i++)
- plci->dtmf_msg_number_queue[i] = plci->dtmf_msg_number_queue[i + 1];
- }
-}
-
-
-static void dtmf_indication(dword Id, PLCI *plci, byte *msg, word length)
-{
- word i, j, n;
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_indication",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- n = 0;
- for (i = 1; i < length; i++)
- {
- j = 0;
- while ((j < DTMF_DIGIT_MAP_ENTRIES)
- && ((msg[i] != dtmf_digit_map[j].code)
- || ((dtmf_digit_map[j].listen_mask & plci->dtmf_rec_active) == 0)))
- {
- j++;
- }
- if (j < DTMF_DIGIT_MAP_ENTRIES)
- {
-
- if ((dtmf_digit_map[j].listen_mask & DTMF_TONE_LISTEN_ACTIVE_FLAG)
- && (plci->tone_last_indication_code == DTMF_SIGNAL_NO_TONE)
- && (dtmf_digit_map[j].character != DTMF_SIGNAL_UNIDENTIFIED_TONE))
- {
- if (n + 1 == i)
- {
- for (i = length; i > n + 1; i--)
- msg[i] = msg[i - 1];
- length++;
- i++;
- }
- msg[++n] = DTMF_SIGNAL_UNIDENTIFIED_TONE;
- }
- plci->tone_last_indication_code = dtmf_digit_map[j].character;
-
- msg[++n] = dtmf_digit_map[j].character;
- }
- }
- if (n != 0)
- {
- msg[0] = (byte) n;
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "wS", SELECTOR_DTMF, msg);
- }
-}
-
-
-/*------------------------------------------------------------------*/
-/* DTMF parameters */
-/*------------------------------------------------------------------*/
-
-static void dtmf_parameter_write(PLCI *plci)
-{
- word i;
- byte parameter_buffer[DTMF_PARAMETER_BUFFER_SIZE + 2];
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_write",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- parameter_buffer[0] = plci->dtmf_parameter_length + 1;
- parameter_buffer[1] = DSP_CTRL_SET_DTMF_PARAMETERS;
- for (i = 0; i < plci->dtmf_parameter_length; i++)
- parameter_buffer[2 + i] = plci->dtmf_parameter_buffer[i];
- add_p(plci, FTY, parameter_buffer);
- sig_req(plci, TEL_CTRL, 0);
- send_req(plci);
-}
-
-
-static void dtmf_parameter_clear_config(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_clear_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->dtmf_parameter_length = 0;
-}
-
-
-static void dtmf_parameter_prepare_switch(dword Id, PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_prepare_switch",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
-}
-
-
-static word dtmf_parameter_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_save_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- return (GOOD);
-}
-
-
-static word dtmf_parameter_restore_config(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
-
- dbug(1, dprintf("[%06lx] %s,%d: dtmf_parameter_restore_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- Info = GOOD;
- if ((plci->B1_facilities & B1_FACILITY_DTMFR)
- && (plci->dtmf_parameter_length != 0))
- {
- switch (plci->adjust_b_state)
- {
- case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
- plci->internal_command = plci->adjust_b_command;
- if (plci->sig_req)
- {
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
- break;
- }
- dtmf_parameter_write(plci);
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_2;
- break;
- case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Restore DTMF parameters failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- break;
- }
- }
- return (Info);
-}
-
-
-/*------------------------------------------------------------------*/
-/* Line interconnect facilities */
-/*------------------------------------------------------------------*/
-
-
-LI_CONFIG *li_config_table;
-word li_total_channels;
-
-
-/*------------------------------------------------------------------*/
-/* translate a CHI information element to a channel number */
-/* returns 0xff - any channel */
-/* 0xfe - chi wrong coding */
-/* 0xfd - D-channel */
-/* 0x00 - no channel */
-/* else channel number / PRI: timeslot */
-/* if channels is provided we accept more than one channel. */
-/*------------------------------------------------------------------*/
-
-static byte chi_to_channel(byte *chi, dword *pchannelmap)
-{
- int p;
- int i;
- dword map;
- byte excl;
- byte ofs;
- byte ch;
-
- if (pchannelmap) *pchannelmap = 0;
- if (!chi[0]) return 0xff;
- excl = 0;
-
- if (chi[1] & 0x20) {
- if (chi[0] == 1 && chi[1] == 0xac) return 0xfd; /* exclusive d-channel */
- for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
- if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
- if ((chi[1] | 0xc8) != 0xe9) return 0xfe;
- if (chi[1] & 0x08) excl = 0x40;
-
- /* int. id present */
- if (chi[1] & 0x40) {
- p = i + 1;
- for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
- if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
- }
-
- /* coding standard, Number/Map, Channel Type */
- p = i + 1;
- for (i = p; i < chi[0] && !(chi[i] & 0x80); i++);
- if (i == chi[0] || !(chi[i] & 0x80)) return 0xfe;
- if ((chi[p] | 0xd0) != 0xd3) return 0xfe;
-
- /* Number/Map */
- if (chi[p] & 0x10) {
-
- /* map */
- if ((chi[0] - p) == 4) ofs = 0;
- else if ((chi[0] - p) == 3) ofs = 1;
- else return 0xfe;
- ch = 0;
- map = 0;
- for (i = 0; i < 4 && p < chi[0]; i++) {
- p++;
- ch += 8;
- map <<= 8;
- if (chi[p]) {
- for (ch = 0; !(chi[p] & (1 << ch)); ch++);
- map |= chi[p];
- }
- }
- ch += ofs;
- map <<= ofs;
- }
- else {
-
- /* number */
- p = i + 1;
- ch = chi[p] & 0x3f;
- if (pchannelmap) {
- if ((byte)(chi[0] - p) > 30) return 0xfe;
- map = 0;
- for (i = p; i <= chi[0]; i++) {
- if ((chi[i] & 0x7f) > 31) return 0xfe;
- map |= (1L << (chi[i] & 0x7f));
- }
- }
- else {
- if (p != chi[0]) return 0xfe;
- if (ch > 31) return 0xfe;
- map = (1L << ch);
- }
- if (chi[p] & 0x40) return 0xfe;
- }
- if (pchannelmap) *pchannelmap = map;
- else if (map != ((dword)(1L << ch))) return 0xfe;
- return (byte)(excl | ch);
- }
- else { /* not PRI */
- for (i = 1; i < chi[0] && !(chi[i] & 0x80); i++);
- if (i != chi[0] || !(chi[i] & 0x80)) return 0xfe;
- if (chi[1] & 0x08) excl = 0x40;
-
- switch (chi[1] | 0x98) {
- case 0x98: return 0;
- case 0x99:
- if (pchannelmap) *pchannelmap = 2;
- return excl | 1;
- case 0x9a:
- if (pchannelmap) *pchannelmap = 4;
- return excl | 2;
- case 0x9b: return 0xff;
- case 0x9c: return 0xfd; /* d-ch */
- default: return 0xfe;
- }
- }
-}
-
-
-static void mixer_set_bchannel_id_esc(PLCI *plci, byte bchannel_id)
-{
- DIVA_CAPI_ADAPTER *a;
- PLCI *splci;
- byte old_id;
-
- a = plci->adapter;
- old_id = plci->li_bchannel_id;
- if (a->li_pri)
- {
- if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
- li_config_table[a->li_base + (old_id - 1)].plci = NULL;
- plci->li_bchannel_id = (bchannel_id & 0x1f) + 1;
- if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
- li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
- }
- else
- {
- if (((bchannel_id & 0x03) == 1) || ((bchannel_id & 0x03) == 2))
- {
- if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
- li_config_table[a->li_base + (old_id - 1)].plci = NULL;
- plci->li_bchannel_id = bchannel_id & 0x03;
- if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
- {
- splci = a->AdvSignalPLCI;
- if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
- {
- if ((splci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
- {
- li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
- }
- splci->li_bchannel_id = 3 - plci->li_bchannel_id;
- li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id_esc %d",
- (dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
- (char *)(FILE_), __LINE__, splci->li_bchannel_id));
- }
- }
- if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
- li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
- }
- }
- if ((old_id == 0) && (plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- mixer_clear_config(plci);
- }
- dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id_esc %d %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, bchannel_id, plci->li_bchannel_id));
-}
-
-
-static void mixer_set_bchannel_id(PLCI *plci, byte *chi)
-{
- DIVA_CAPI_ADAPTER *a;
- PLCI *splci;
- byte ch, old_id;
-
- a = plci->adapter;
- old_id = plci->li_bchannel_id;
- ch = chi_to_channel(chi, NULL);
- if (!(ch & 0x80))
- {
- if (a->li_pri)
- {
- if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
- li_config_table[a->li_base + (old_id - 1)].plci = NULL;
- plci->li_bchannel_id = (ch & 0x1f) + 1;
- if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
- li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
- }
- else
- {
- if (((ch & 0x1f) == 1) || ((ch & 0x1f) == 2))
- {
- if ((old_id != 0) && (li_config_table[a->li_base + (old_id - 1)].plci == plci))
- li_config_table[a->li_base + (old_id - 1)].plci = NULL;
- plci->li_bchannel_id = ch & 0x1f;
- if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI != plci) && (a->AdvSignalPLCI->tel == ADV_VOICE))
- {
- splci = a->AdvSignalPLCI;
- if (li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci == NULL)
- {
- if ((splci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci == splci))
- {
- li_config_table[a->li_base + (splci->li_bchannel_id - 1)].plci = NULL;
- }
- splci->li_bchannel_id = 3 - plci->li_bchannel_id;
- li_config_table[a->li_base + (2 - plci->li_bchannel_id)].plci = splci;
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
- (dword)((splci->Id << 8) | UnMapController(splci->adapter->Id)),
- (char *)(FILE_), __LINE__, splci->li_bchannel_id));
- }
- }
- if (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == NULL)
- li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci = plci;
- }
- }
- }
- if ((old_id == 0) && (plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- mixer_clear_config(plci);
- }
- dbug(1, dprintf("[%06lx] %s,%d: mixer_set_bchannel_id %02x %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, ch, plci->li_bchannel_id));
-}
-
-
-#define MIXER_MAX_DUMP_CHANNELS 34
-
-static void mixer_calculate_coefs(DIVA_CAPI_ADAPTER *a)
-{
- word n, i, j;
- char *p;
- char hex_line[2 * MIXER_MAX_DUMP_CHANNELS + MIXER_MAX_DUMP_CHANNELS / 8 + 4];
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_calculate_coefs",
- (dword)(UnMapController(a->Id)), (char *)(FILE_), __LINE__));
-
- for (i = 0; i < li_total_channels; i++)
- {
- li_config_table[i].channel &= LI_CHANNEL_ADDRESSES_SET;
- if (li_config_table[i].chflags != 0)
- li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
- else
- {
- for (j = 0; j < li_total_channels; j++)
- {
- if (((li_config_table[i].flag_table[j]) != 0)
- || ((li_config_table[j].flag_table[i]) != 0))
- {
- li_config_table[i].channel |= LI_CHANNEL_INVOLVED;
- }
- if (((li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE) != 0)
- || ((li_config_table[j].flag_table[i] & LI_FLAG_CONFERENCE) != 0))
- {
- li_config_table[i].channel |= LI_CHANNEL_CONFERENCE;
- }
- }
- }
- }
- for (i = 0; i < li_total_channels; i++)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC);
- if (li_config_table[i].flag_table[j] & LI_FLAG_CONFERENCE)
- li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
- }
- }
- for (n = 0; n < li_total_channels; n++)
- {
- if (li_config_table[n].channel & LI_CHANNEL_CONFERENCE)
- {
- for (i = 0; i < li_total_channels; i++)
- {
- if (li_config_table[i].channel & LI_CHANNEL_CONFERENCE)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].coef_table[j] |=
- li_config_table[i].coef_table[n] & li_config_table[n].coef_table[j];
- }
- }
- }
- }
- }
- for (i = 0; i < li_total_channels; i++)
- {
- if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
- {
- li_config_table[i].coef_table[i] &= ~LI_COEF_CH_CH;
- for (j = 0; j < li_total_channels; j++)
- {
- if (li_config_table[i].coef_table[j] & LI_COEF_CH_CH)
- li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE;
- }
- if (li_config_table[i].flag_table[i] & LI_FLAG_CONFERENCE)
- li_config_table[i].coef_table[i] |= LI_COEF_CH_CH;
- }
- }
- for (i = 0; i < li_total_channels; i++)
- {
- if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
- li_config_table[i].coef_table[j] |= LI_COEF_CH_CH;
- if (li_config_table[i].flag_table[j] & LI_FLAG_MONITOR)
- li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
- if (li_config_table[i].flag_table[j] & LI_FLAG_MIX)
- li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
- if (li_config_table[i].flag_table[j] & LI_FLAG_PCCONNECT)
- li_config_table[i].coef_table[j] |= LI_COEF_PC_PC;
- }
- if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
- {
- li_config_table[i].coef_table[j] |= LI_COEF_CH_PC;
- if (li_config_table[j].chflags & LI_CHFLAG_MIX)
- li_config_table[i].coef_table[j] |= LI_COEF_PC_CH | LI_COEF_PC_PC;
- }
- }
- }
- if (li_config_table[i].chflags & LI_CHFLAG_MIX)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- if (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT)
- li_config_table[j].coef_table[i] |= LI_COEF_PC_CH;
- }
- }
- if (li_config_table[i].chflags & LI_CHFLAG_LOOP)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- if (li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
- {
- for (n = 0; n < li_total_channels; n++)
- {
- if (li_config_table[n].flag_table[i] & LI_FLAG_INTERCONNECT)
- {
- li_config_table[n].coef_table[j] |= LI_COEF_CH_CH;
- if (li_config_table[j].chflags & LI_CHFLAG_MIX)
- {
- li_config_table[n].coef_table[j] |= LI_COEF_PC_CH;
- if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
- li_config_table[n].coef_table[j] |= LI_COEF_CH_PC | LI_COEF_PC_PC;
- }
- else if (li_config_table[n].chflags & LI_CHFLAG_MONITOR)
- li_config_table[n].coef_table[j] |= LI_COEF_CH_PC;
- }
- }
- }
- }
- }
- }
- }
- for (i = 0; i < li_total_channels; i++)
- {
- if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
- {
- if (li_config_table[i].chflags & (LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP))
- li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
- if (li_config_table[i].chflags & LI_CHFLAG_MONITOR)
- li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
- if (li_config_table[i].chflags & LI_CHFLAG_MIX)
- li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
- for (j = 0; j < li_total_channels; j++)
- {
- if ((li_config_table[i].flag_table[j] &
- (LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_MONITOR))
- || (li_config_table[j].flag_table[i] &
- (LI_FLAG_INTERCONNECT | LI_FLAG_PCCONNECT | LI_FLAG_CONFERENCE | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX)))
- {
- li_config_table[i].channel |= LI_CHANNEL_ACTIVE;
- }
- if (li_config_table[i].flag_table[j] & (LI_FLAG_PCCONNECT | LI_FLAG_MONITOR))
- li_config_table[i].channel |= LI_CHANNEL_RX_DATA;
- if (li_config_table[j].flag_table[i] & (LI_FLAG_PCCONNECT | LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX))
- li_config_table[i].channel |= LI_CHANNEL_TX_DATA;
- }
- if (!(li_config_table[i].channel & LI_CHANNEL_ACTIVE))
- {
- li_config_table[i].coef_table[i] |= LI_COEF_PC_CH | LI_COEF_CH_PC;
- li_config_table[i].channel |= LI_CHANNEL_TX_DATA | LI_CHANNEL_RX_DATA;
- }
- }
- }
- for (i = 0; i < li_total_channels; i++)
- {
- if (li_config_table[i].channel & LI_CHANNEL_INVOLVED)
- {
- j = 0;
- while ((j < li_total_channels) && !(li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT))
- j++;
- if (j < li_total_channels)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].coef_table[j] &= ~(LI_COEF_CH_CH | LI_COEF_PC_CH);
- if (li_config_table[i].flag_table[j] & LI_FLAG_ANNOUNCEMENT)
- li_config_table[i].coef_table[j] |= LI_COEF_PC_CH;
- }
- }
- }
- }
- n = li_total_channels;
- if (n > MIXER_MAX_DUMP_CHANNELS)
- n = MIXER_MAX_DUMP_CHANNELS;
-
- p = hex_line;
- for (j = 0; j < n; j++)
- {
- if ((j & 0x7) == 0)
- *(p++) = ' ';
- p = hex_byte_pack(p, li_config_table[j].curchnl);
- }
- *p = '\0';
- dbug(1, dprintf("[%06lx] CURRENT %s",
- (dword)(UnMapController(a->Id)), (char *)hex_line));
- p = hex_line;
- for (j = 0; j < n; j++)
- {
- if ((j & 0x7) == 0)
- *(p++) = ' ';
- p = hex_byte_pack(p, li_config_table[j].channel);
- }
- *p = '\0';
- dbug(1, dprintf("[%06lx] CHANNEL %s",
- (dword)(UnMapController(a->Id)), (char *)hex_line));
- p = hex_line;
- for (j = 0; j < n; j++)
- {
- if ((j & 0x7) == 0)
- *(p++) = ' ';
- p = hex_byte_pack(p, li_config_table[j].chflags);
- }
- *p = '\0';
- dbug(1, dprintf("[%06lx] CHFLAG %s",
- (dword)(UnMapController(a->Id)), (char *)hex_line));
- for (i = 0; i < n; i++)
- {
- p = hex_line;
- for (j = 0; j < n; j++)
- {
- if ((j & 0x7) == 0)
- *(p++) = ' ';
- p = hex_byte_pack(p, li_config_table[i].flag_table[j]);
- }
- *p = '\0';
- dbug(1, dprintf("[%06lx] FLAG[%02x]%s",
- (dword)(UnMapController(a->Id)), i, (char *)hex_line));
- }
- for (i = 0; i < n; i++)
- {
- p = hex_line;
- for (j = 0; j < n; j++)
- {
- if ((j & 0x7) == 0)
- *(p++) = ' ';
- p = hex_byte_pack(p, li_config_table[i].coef_table[j]);
- }
- *p = '\0';
- dbug(1, dprintf("[%06lx] COEF[%02x]%s",
- (dword)(UnMapController(a->Id)), i, (char *)hex_line));
- }
-}
-
-
-static struct
-{
- byte mask;
- byte line_flags;
-} mixer_write_prog_pri[] =
-{
- { LI_COEF_CH_CH, 0 },
- { LI_COEF_CH_PC, MIXER_COEF_LINE_TO_PC_FLAG },
- { LI_COEF_PC_CH, MIXER_COEF_LINE_FROM_PC_FLAG },
- { LI_COEF_PC_PC, MIXER_COEF_LINE_TO_PC_FLAG | MIXER_COEF_LINE_FROM_PC_FLAG }
-};
-
-static struct
-{
- byte from_ch;
- byte to_ch;
- byte mask;
- byte xconnect_override;
-} mixer_write_prog_bri[] =
-{
- { 0, 0, LI_COEF_CH_CH, 0x01 }, /* B to B */
- { 1, 0, LI_COEF_CH_CH, 0x01 }, /* Alt B to B */
- { 0, 0, LI_COEF_PC_CH, 0x80 }, /* PC to B */
- { 1, 0, LI_COEF_PC_CH, 0x01 }, /* Alt PC to B */
- { 2, 0, LI_COEF_CH_CH, 0x00 }, /* IC to B */
- { 3, 0, LI_COEF_CH_CH, 0x00 }, /* Alt IC to B */
- { 0, 0, LI_COEF_CH_PC, 0x80 }, /* B to PC */
- { 1, 0, LI_COEF_CH_PC, 0x01 }, /* Alt B to PC */
- { 0, 0, LI_COEF_PC_PC, 0x01 }, /* PC to PC */
- { 1, 0, LI_COEF_PC_PC, 0x01 }, /* Alt PC to PC */
- { 2, 0, LI_COEF_CH_PC, 0x00 }, /* IC to PC */
- { 3, 0, LI_COEF_CH_PC, 0x00 }, /* Alt IC to PC */
- { 0, 2, LI_COEF_CH_CH, 0x00 }, /* B to IC */
- { 1, 2, LI_COEF_CH_CH, 0x00 }, /* Alt B to IC */
- { 0, 2, LI_COEF_PC_CH, 0x00 }, /* PC to IC */
- { 1, 2, LI_COEF_PC_CH, 0x00 }, /* Alt PC to IC */
- { 2, 2, LI_COEF_CH_CH, 0x00 }, /* IC to IC */
- { 3, 2, LI_COEF_CH_CH, 0x00 }, /* Alt IC to IC */
- { 1, 1, LI_COEF_CH_CH, 0x01 }, /* Alt B to Alt B */
- { 0, 1, LI_COEF_CH_CH, 0x01 }, /* B to Alt B */
- { 1, 1, LI_COEF_PC_CH, 0x80 }, /* Alt PC to Alt B */
- { 0, 1, LI_COEF_PC_CH, 0x01 }, /* PC to Alt B */
- { 3, 1, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt B */
- { 2, 1, LI_COEF_CH_CH, 0x00 }, /* IC to Alt B */
- { 1, 1, LI_COEF_CH_PC, 0x80 }, /* Alt B to Alt PC */
- { 0, 1, LI_COEF_CH_PC, 0x01 }, /* B to Alt PC */
- { 1, 1, LI_COEF_PC_PC, 0x01 }, /* Alt PC to Alt PC */
- { 0, 1, LI_COEF_PC_PC, 0x01 }, /* PC to Alt PC */
- { 3, 1, LI_COEF_CH_PC, 0x00 }, /* Alt IC to Alt PC */
- { 2, 1, LI_COEF_CH_PC, 0x00 }, /* IC to Alt PC */
- { 1, 3, LI_COEF_CH_CH, 0x00 }, /* Alt B to Alt IC */
- { 0, 3, LI_COEF_CH_CH, 0x00 }, /* B to Alt IC */
- { 1, 3, LI_COEF_PC_CH, 0x00 }, /* Alt PC to Alt IC */
- { 0, 3, LI_COEF_PC_CH, 0x00 }, /* PC to Alt IC */
- { 3, 3, LI_COEF_CH_CH, 0x00 }, /* Alt IC to Alt IC */
- { 2, 3, LI_COEF_CH_CH, 0x00 } /* IC to Alt IC */
-};
-
-static byte mixer_swapped_index_bri[] =
-{
- 18, /* B to B */
- 19, /* Alt B to B */
- 20, /* PC to B */
- 21, /* Alt PC to B */
- 22, /* IC to B */
- 23, /* Alt IC to B */
- 24, /* B to PC */
- 25, /* Alt B to PC */
- 26, /* PC to PC */
- 27, /* Alt PC to PC */
- 28, /* IC to PC */
- 29, /* Alt IC to PC */
- 30, /* B to IC */
- 31, /* Alt B to IC */
- 32, /* PC to IC */
- 33, /* Alt PC to IC */
- 34, /* IC to IC */
- 35, /* Alt IC to IC */
- 0, /* Alt B to Alt B */
- 1, /* B to Alt B */
- 2, /* Alt PC to Alt B */
- 3, /* PC to Alt B */
- 4, /* Alt IC to Alt B */
- 5, /* IC to Alt B */
- 6, /* Alt B to Alt PC */
- 7, /* B to Alt PC */
- 8, /* Alt PC to Alt PC */
- 9, /* PC to Alt PC */
- 10, /* Alt IC to Alt PC */
- 11, /* IC to Alt PC */
- 12, /* Alt B to Alt IC */
- 13, /* B to Alt IC */
- 14, /* Alt PC to Alt IC */
- 15, /* PC to Alt IC */
- 16, /* Alt IC to Alt IC */
- 17 /* IC to Alt IC */
-};
-
-static struct
-{
- byte mask;
- byte from_pc;
- byte to_pc;
-} xconnect_write_prog[] =
-{
- { LI_COEF_CH_CH, false, false },
- { LI_COEF_CH_PC, false, true },
- { LI_COEF_PC_CH, true, false },
- { LI_COEF_PC_PC, true, true }
-};
-
-
-static void xconnect_query_addresses(PLCI *plci)
-{
- DIVA_CAPI_ADAPTER *a;
- word w, ch;
- byte *p;
-
- dbug(1, dprintf("[%06lx] %s,%d: xconnect_query_addresses",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- a = plci->adapter;
- if (a->li_pri && ((plci->li_bchannel_id == 0)
- || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci)))
- {
- dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
- return;
- }
- p = plci->internal_req_buffer;
- ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
- *(p++) = UDATA_REQUEST_XCONNECT_FROM;
- w = ch;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- w = ch | XCONNECT_CHANNEL_PORT_PC;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- plci->NData[0].P = plci->internal_req_buffer;
- plci->NData[0].PLength = p - plci->internal_req_buffer;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_UDATA;
- plci->adapter->request(&plci->NL);
-}
-
-
-static void xconnect_write_coefs(PLCI *plci, word internal_command)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: xconnect_write_coefs %04x",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, internal_command));
-
- plci->li_write_command = internal_command;
- plci->li_write_channel = 0;
-}
-
-
-static byte xconnect_write_coefs_process(dword Id, PLCI *plci, byte Rc)
-{
- DIVA_CAPI_ADAPTER *a;
- word w, n, i, j, r, s, to_ch;
- dword d;
- byte *p;
- struct xconnect_transfer_address_s *transfer_address;
- byte ch_map[MIXER_CHANNELS_BRI];
-
- dbug(1, dprintf("[%06x] %s,%d: xconnect_write_coefs_process %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->li_write_channel));
-
- a = plci->adapter;
- if ((plci->li_bchannel_id == 0)
- || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
- {
- dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- return (true);
- }
- i = a->li_base + (plci->li_bchannel_id - 1);
- j = plci->li_write_channel;
- p = plci->internal_req_buffer;
- if (j != 0)
- {
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI write coefs failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- return (false);
- }
- }
- if (li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- {
- r = 0;
- s = 0;
- if (j < li_total_channels)
- {
- if (li_config_table[i].channel & LI_CHANNEL_ADDRESSES_SET)
- {
- s = ((li_config_table[i].send_b.card_address.low | li_config_table[i].send_b.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_PC | LI_COEF_PC_PC)) &
- ((li_config_table[i].send_pc.card_address.low | li_config_table[i].send_pc.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_PC_CH));
- }
- r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- while ((j < li_total_channels)
- && ((r == 0)
- || (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
- || (!li_config_table[j].adapter->li_pri
- && (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
- || (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
- || (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
- && (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
- || !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
- || ((li_config_table[j].adapter->li_base != a->li_base)
- && !(r & s &
- ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
- ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))))
- {
- j++;
- if (j < li_total_channels)
- r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- }
- }
- if (j < li_total_channels)
- {
- plci->internal_command = plci->li_write_command;
- if (plci_nl_busy(plci))
- return (true);
- to_ch = (a->li_pri) ? plci->li_bchannel_id - 1 : 0;
- *(p++) = UDATA_REQUEST_XCONNECT_TO;
- do
- {
- if (li_config_table[j].adapter->li_base != a->li_base)
- {
- r &= s &
- ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
- ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC));
- }
- n = 0;
- do
- {
- if (r & xconnect_write_prog[n].mask)
- {
- if (xconnect_write_prog[n].from_pc)
- transfer_address = &(li_config_table[j].send_pc);
- else
- transfer_address = &(li_config_table[j].send_b);
- d = transfer_address->card_address.low;
- *(p++) = (byte) d;
- *(p++) = (byte)(d >> 8);
- *(p++) = (byte)(d >> 16);
- *(p++) = (byte)(d >> 24);
- d = transfer_address->card_address.high;
- *(p++) = (byte) d;
- *(p++) = (byte)(d >> 8);
- *(p++) = (byte)(d >> 16);
- *(p++) = (byte)(d >> 24);
- d = transfer_address->offset;
- *(p++) = (byte) d;
- *(p++) = (byte)(d >> 8);
- *(p++) = (byte)(d >> 16);
- *(p++) = (byte)(d >> 24);
- w = xconnect_write_prog[n].to_pc ? to_ch | XCONNECT_CHANNEL_PORT_PC : to_ch;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- w = ((li_config_table[i].coef_table[j] & xconnect_write_prog[n].mask) == 0) ? 0x01 :
- (li_config_table[i].adapter->u_law ?
- (li_config_table[j].adapter->u_law ? 0x80 : 0x86) :
- (li_config_table[j].adapter->u_law ? 0x7a : 0x80));
- *(p++) = (byte) w;
- *(p++) = (byte) 0;
- li_config_table[i].coef_table[j] ^= xconnect_write_prog[n].mask << 4;
- }
- n++;
- } while ((n < ARRAY_SIZE(xconnect_write_prog))
- && ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
- if (n == ARRAY_SIZE(xconnect_write_prog))
- {
- do
- {
- j++;
- if (j < li_total_channels)
- r = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- } while ((j < li_total_channels)
- && ((r == 0)
- || (!(li_config_table[j].channel & LI_CHANNEL_ADDRESSES_SET))
- || (!li_config_table[j].adapter->li_pri
- && (j >= li_config_table[j].adapter->li_base + MIXER_BCHANNELS_BRI))
- || (((li_config_table[j].send_b.card_address.low != li_config_table[i].send_b.card_address.low)
- || (li_config_table[j].send_b.card_address.high != li_config_table[i].send_b.card_address.high))
- && (!(a->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)
- || !(li_config_table[j].adapter->manufacturer_features & MANUFACTURER_FEATURE_DMACONNECT)))
- || ((li_config_table[j].adapter->li_base != a->li_base)
- && !(r & s &
- ((li_config_table[j].send_b.card_address.low | li_config_table[j].send_b.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_PC_CH | LI_COEF_PC_PC)) &
- ((li_config_table[j].send_pc.card_address.low | li_config_table[j].send_pc.card_address.high) ?
- (LI_COEF_CH_CH | LI_COEF_CH_PC | LI_COEF_PC_CH | LI_COEF_PC_PC) : (LI_COEF_CH_CH | LI_COEF_CH_PC))))));
- }
- } while ((j < li_total_channels)
- && ((p - plci->internal_req_buffer) + 16 < INTERNAL_REQ_BUFFER_SIZE));
- }
- else if (j == li_total_channels)
- {
- plci->internal_command = plci->li_write_command;
- if (plci_nl_busy(plci))
- return (true);
- if (a->li_pri)
- {
- *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
- w = 0;
- if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
- w |= MIXER_FEATURE_ENABLE_TX_DATA;
- if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
- w |= MIXER_FEATURE_ENABLE_RX_DATA;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- }
- else
- {
- *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
- w = 0;
- if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
- && (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
- {
- w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
- }
- if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
- w |= MIXER_FEATURE_ENABLE_TX_DATA;
- if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
- w |= MIXER_FEATURE_ENABLE_RX_DATA;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- for (j = 0; j < sizeof(ch_map); j += 2)
- {
- if (plci->li_bchannel_id == 2)
- {
- ch_map[j] = (byte)(j + 1);
- ch_map[j + 1] = (byte) j;
- }
- else
- {
- ch_map[j] = (byte) j;
- ch_map[j + 1] = (byte)(j + 1);
- }
- }
- for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
- {
- i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
- j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
- if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
- {
- *p = (mixer_write_prog_bri[n].xconnect_override != 0) ?
- mixer_write_prog_bri[n].xconnect_override :
- ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
- if ((i >= a->li_base + MIXER_BCHANNELS_BRI) || (j >= a->li_base + MIXER_BCHANNELS_BRI))
- {
- w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
- }
- }
- else
- {
- *p = 0x00;
- if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
- {
- w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
- if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
- *p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
- }
- }
- p++;
- }
- }
- j = li_total_channels + 1;
- }
- }
- else
- {
- if (j <= li_total_channels)
- {
- plci->internal_command = plci->li_write_command;
- if (plci_nl_busy(plci))
- return (true);
- if (j < a->li_base)
- j = a->li_base;
- if (a->li_pri)
- {
- *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_PRI_SYNC;
- w = 0;
- if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
- w |= MIXER_FEATURE_ENABLE_TX_DATA;
- if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
- w |= MIXER_FEATURE_ENABLE_RX_DATA;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- for (n = 0; n < ARRAY_SIZE(mixer_write_prog_pri); n++)
- {
- *(p++) = (byte)((plci->li_bchannel_id - 1) | mixer_write_prog_pri[n].line_flags);
- for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
- {
- w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- if (w & mixer_write_prog_pri[n].mask)
- {
- *(p++) = (li_config_table[i].coef_table[j] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
- li_config_table[i].coef_table[j] ^= mixer_write_prog_pri[n].mask << 4;
- }
- else
- *(p++) = 0x00;
- }
- *(p++) = (byte)((plci->li_bchannel_id - 1) | MIXER_COEF_LINE_ROW_FLAG | mixer_write_prog_pri[n].line_flags);
- for (j = a->li_base; j < a->li_base + MIXER_CHANNELS_PRI; j++)
- {
- w = ((li_config_table[j].coef_table[i] & 0xf) ^ (li_config_table[j].coef_table[i] >> 4));
- if (w & mixer_write_prog_pri[n].mask)
- {
- *(p++) = (li_config_table[j].coef_table[i] & mixer_write_prog_pri[n].mask) ? 0x80 : 0x01;
- li_config_table[j].coef_table[i] ^= mixer_write_prog_pri[n].mask << 4;
- }
- else
- *(p++) = 0x00;
- }
- }
- }
- else
- {
- *(p++) = UDATA_REQUEST_SET_MIXER_COEFS_BRI;
- w = 0;
- if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI)
- && (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length))
- {
- w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
- }
- if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
- w |= MIXER_FEATURE_ENABLE_TX_DATA;
- if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
- w |= MIXER_FEATURE_ENABLE_RX_DATA;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- for (j = 0; j < sizeof(ch_map); j += 2)
- {
- if (plci->li_bchannel_id == 2)
- {
- ch_map[j] = (byte)(j + 1);
- ch_map[j + 1] = (byte) j;
- }
- else
- {
- ch_map[j] = (byte) j;
- ch_map[j + 1] = (byte)(j + 1);
- }
- }
- for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
- {
- i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
- j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
- if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
- {
- *p = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
- w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
- }
- else
- {
- *p = 0x00;
- if ((a->AdvSignalPLCI != NULL) && (a->AdvSignalPLCI->tel == ADV_VOICE))
- {
- w = (plci == a->AdvSignalPLCI) ? n : mixer_swapped_index_bri[n];
- if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w < a->adv_voice_coef_length)
- *p = a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + w];
- }
- }
- p++;
- }
- }
- j = li_total_channels + 1;
- }
- }
- plci->li_write_channel = j;
- if (p != plci->internal_req_buffer)
- {
- plci->NData[0].P = plci->internal_req_buffer;
- plci->NData[0].PLength = p - plci->internal_req_buffer;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_UDATA;
- plci->adapter->request(&plci->NL);
- }
- return (true);
-}
-
-
-static void mixer_notify_update(PLCI *plci, byte others)
-{
- DIVA_CAPI_ADAPTER *a;
- word i, w;
- PLCI *notify_plci;
- byte msg[sizeof(CAPI_MSG_HEADER) + 6];
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_notify_update %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, others));
-
- a = plci->adapter;
- if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
- {
- if (others)
- plci->li_notify_update = true;
- i = 0;
- do
- {
- notify_plci = NULL;
- if (others)
- {
- while ((i < li_total_channels) && (li_config_table[i].plci == NULL))
- i++;
- if (i < li_total_channels)
- notify_plci = li_config_table[i++].plci;
- }
- else
- {
- if ((plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- notify_plci = plci;
- }
- }
- if ((notify_plci != NULL)
- && !notify_plci->li_notify_update
- && (notify_plci->appl != NULL)
- && (notify_plci->State)
- && notify_plci->NL.Id && !notify_plci->nl_remove_id)
- {
- notify_plci->li_notify_update = true;
- ((CAPI_MSG *) msg)->header.length = 18;
- ((CAPI_MSG *) msg)->header.appl_id = notify_plci->appl->Id;
- ((CAPI_MSG *) msg)->header.command = _FACILITY_R;
- ((CAPI_MSG *) msg)->header.number = 0;
- ((CAPI_MSG *) msg)->header.controller = notify_plci->adapter->Id;
- ((CAPI_MSG *) msg)->header.plci = notify_plci->Id;
- ((CAPI_MSG *) msg)->header.ncci = 0;
- ((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
- ((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
- ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
- ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
- ((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
- w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
- if (w != _QUEUE_FULL)
- {
- if (w != 0)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Interconnect notify failed %06x %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__,
- (dword)((notify_plci->Id << 8) | UnMapController(notify_plci->adapter->Id)), w));
- }
- notify_plci->li_notify_update = false;
- }
- }
- } while (others && (notify_plci != NULL));
- if (others)
- plci->li_notify_update = false;
- }
-}
-
-
-static void mixer_clear_config(PLCI *plci)
-{
- DIVA_CAPI_ADAPTER *a;
- word i, j;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_clear_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->li_notify_update = false;
- plci->li_plci_b_write_pos = 0;
- plci->li_plci_b_read_pos = 0;
- plci->li_plci_b_req_pos = 0;
- a = plci->adapter;
- if ((plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- i = a->li_base + (plci->li_bchannel_id - 1);
- li_config_table[i].curchnl = 0;
- li_config_table[i].channel = 0;
- li_config_table[i].chflags = 0;
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[j].flag_table[i] = 0;
- li_config_table[i].flag_table[j] = 0;
- li_config_table[i].coef_table[j] = 0;
- li_config_table[j].coef_table[i] = 0;
- }
- if (!a->li_pri)
- {
- li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
- if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
- {
- i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
- li_config_table[i].curchnl = 0;
- li_config_table[i].channel = 0;
- li_config_table[i].chflags = 0;
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].flag_table[j] = 0;
- li_config_table[j].flag_table[i] = 0;
- li_config_table[i].coef_table[j] = 0;
- li_config_table[j].coef_table[i] = 0;
- }
- if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
- {
- i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
- li_config_table[i].curchnl = 0;
- li_config_table[i].channel = 0;
- li_config_table[i].chflags = 0;
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].flag_table[j] = 0;
- li_config_table[j].flag_table[i] = 0;
- li_config_table[i].coef_table[j] = 0;
- li_config_table[j].coef_table[i] = 0;
- }
- }
- }
- }
- }
-}
-
-
-static void mixer_prepare_switch(dword Id, PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_prepare_switch",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- do
- {
- mixer_indication_coefs_set(Id, plci);
- } while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
-}
-
-
-static word mixer_save_config(dword Id, PLCI *plci, byte Rc)
-{
- DIVA_CAPI_ADAPTER *a;
- word i, j;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_save_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- a = plci->adapter;
- if ((plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- i = a->li_base + (plci->li_bchannel_id - 1);
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].coef_table[j] &= 0xf;
- li_config_table[j].coef_table[i] &= 0xf;
- }
- if (!a->li_pri)
- li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
- }
- return (GOOD);
-}
-
-
-static word mixer_restore_config(dword Id, PLCI *plci, byte Rc)
-{
- DIVA_CAPI_ADAPTER *a;
- word Info;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_restore_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- Info = GOOD;
- a = plci->adapter;
- if ((plci->B1_facilities & B1_FACILITY_MIXER)
- && (plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- switch (plci->adjust_b_state)
- {
- case ADJUST_B_RESTORE_MIXER_1:
- if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- {
- plci->internal_command = plci->adjust_b_command;
- if (plci_nl_busy(plci))
- {
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
- break;
- }
- xconnect_query_addresses(plci);
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_2;
- break;
- }
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_MIXER_2:
- case ADJUST_B_RESTORE_MIXER_3:
- case ADJUST_B_RESTORE_MIXER_4:
- if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B query addresses failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- if (Rc == OK)
- {
- if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_3;
- else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
- }
- else if (Rc == 0)
- {
- if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_4;
- else if (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_5;
- }
- if (plci->adjust_b_state != ADJUST_B_RESTORE_MIXER_5)
- {
- plci->internal_command = plci->adjust_b_command;
- break;
- }
- /* fall through */
- case ADJUST_B_RESTORE_MIXER_5:
- xconnect_write_coefs(plci, plci->adjust_b_command);
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_6;
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_MIXER_6:
- if (!xconnect_write_coefs_process(Id, plci, Rc))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (plci->internal_command)
- break;
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_7;
- case ADJUST_B_RESTORE_MIXER_7:
- break;
- }
- }
- return (Info);
-}
-
-
-static void mixer_command(dword Id, PLCI *plci, byte Rc)
-{
- DIVA_CAPI_ADAPTER *a;
- word i, internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_command %02x %04x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
- plci->li_cmd));
-
- a = plci->adapter;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (plci->li_cmd)
- {
- case LI_REQ_CONNECT:
- case LI_REQ_DISCONNECT:
- case LI_REQ_SILENT_UPDATE:
- switch (internal_command)
- {
- default:
- if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
- {
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
- B1_FACILITY_MIXER), MIXER_COMMAND_1);
- }
- /* fall through */
- case MIXER_COMMAND_1:
- if (plci->li_channel_bits & LI_CHANNEL_INVOLVED)
- {
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Load mixer failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- }
- plci->li_plci_b_req_pos = plci->li_plci_b_write_pos;
- if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
- || ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
- && (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
- ~B1_FACILITY_MIXER)) == plci->B1_resource)))
- {
- xconnect_write_coefs(plci, MIXER_COMMAND_2);
- }
- else
- {
- do
- {
- mixer_indication_coefs_set(Id, plci);
- } while (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos);
- }
- /* fall through */
- case MIXER_COMMAND_2:
- if ((plci->li_channel_bits & LI_CHANNEL_INVOLVED)
- || ((get_b1_facilities(plci, plci->B1_resource) & B1_FACILITY_MIXER)
- && (add_b1_facilities(plci, plci->B1_resource, (word)(plci->B1_facilities &
- ~B1_FACILITY_MIXER)) == plci->B1_resource)))
- {
- if (!xconnect_write_coefs_process(Id, plci, Rc))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Write mixer coefs failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- if (plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
- {
- do
- {
- plci->li_plci_b_write_pos = (plci->li_plci_b_write_pos == 0) ?
- LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
- i = (plci->li_plci_b_write_pos == 0) ?
- LI_PLCI_B_QUEUE_ENTRIES - 1 : plci->li_plci_b_write_pos - 1;
- } while ((plci->li_plci_b_write_pos != plci->li_plci_b_req_pos)
- && !(plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG));
- }
- break;
- }
- if (plci->internal_command)
- return;
- }
- if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
- {
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
- ~B1_FACILITY_MIXER), MIXER_COMMAND_3);
- }
- /* fall through */
- case MIXER_COMMAND_3:
- if (!(plci->li_channel_bits & LI_CHANNEL_INVOLVED))
- {
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Unload mixer failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- }
- break;
- }
- break;
- }
- if ((plci->li_bchannel_id == 0)
- || (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci != plci))
- {
- dbug(1, dprintf("[%06x] %s,%d: Channel id wiped out %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, (int)(plci->li_bchannel_id)));
- }
- else
- {
- i = a->li_base + (plci->li_bchannel_id - 1);
- li_config_table[i].curchnl = plci->li_channel_bits;
- if (!a->li_pri && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
- {
- i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
- li_config_table[i].curchnl = plci->li_channel_bits;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
- {
- i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
- li_config_table[i].curchnl = plci->li_channel_bits;
- }
- }
- }
-}
-
-
-static void li_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
- dword plci_b_id, byte connect, dword li_flags)
-{
- word i, ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
- PLCI *plci_b;
- DIVA_CAPI_ADAPTER *a_b;
-
- a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
- plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
- ch_a = a->li_base + (plci->li_bchannel_id - 1);
- if (!a->li_pri && (plci->tel == ADV_VOICE)
- && (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
- {
- ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
- ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
- a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
- }
- else
- {
- ch_a_v = ch_a;
- ch_a_s = ch_a;
- }
- ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
- if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
- && (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
- {
- ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
- ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
- a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
- }
- else
- {
- ch_b_v = ch_b;
- ch_b_s = ch_b;
- }
- if (connect)
- {
- li_config_table[ch_a].flag_table[ch_a_v] &= ~LI_FLAG_MONITOR;
- li_config_table[ch_a].flag_table[ch_a_s] &= ~LI_FLAG_MONITOR;
- li_config_table[ch_a_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
- li_config_table[ch_a_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
- }
- li_config_table[ch_a].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
- li_config_table[ch_a].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
- li_config_table[ch_b_v].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
- li_config_table[ch_b_s].flag_table[ch_a] &= ~(LI_FLAG_ANNOUNCEMENT | LI_FLAG_MIX);
- if (ch_a_v == ch_b_v)
- {
- li_config_table[ch_a_v].flag_table[ch_b_v] &= ~LI_FLAG_CONFERENCE;
- li_config_table[ch_a_s].flag_table[ch_b_s] &= ~LI_FLAG_CONFERENCE;
- }
- else
- {
- if (li_config_table[ch_a_v].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
- {
- for (i = 0; i < li_total_channels; i++)
- {
- if (i != ch_a_v)
- li_config_table[ch_a_v].flag_table[i] &= ~LI_FLAG_CONFERENCE;
- }
- }
- if (li_config_table[ch_a_s].flag_table[ch_b_v] & LI_FLAG_CONFERENCE)
- {
- for (i = 0; i < li_total_channels; i++)
- {
- if (i != ch_a_s)
- li_config_table[ch_a_s].flag_table[i] &= ~LI_FLAG_CONFERENCE;
- }
- }
- if (li_config_table[ch_b_v].flag_table[ch_a_v] & LI_FLAG_CONFERENCE)
- {
- for (i = 0; i < li_total_channels; i++)
- {
- if (i != ch_a_v)
- li_config_table[i].flag_table[ch_a_v] &= ~LI_FLAG_CONFERENCE;
- }
- }
- if (li_config_table[ch_b_v].flag_table[ch_a_s] & LI_FLAG_CONFERENCE)
- {
- for (i = 0; i < li_total_channels; i++)
- {
- if (i != ch_a_s)
- li_config_table[i].flag_table[ch_a_s] &= ~LI_FLAG_CONFERENCE;
- }
- }
- }
- if (li_flags & LI_FLAG_CONFERENCE_A_B)
- {
- li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
- }
- if (li_flags & LI_FLAG_CONFERENCE_B_A)
- {
- li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
- }
- if (li_flags & LI_FLAG_MONITOR_A)
- {
- li_config_table[ch_a].flag_table[ch_a_v] |= LI_FLAG_MONITOR;
- li_config_table[ch_a].flag_table[ch_a_s] |= LI_FLAG_MONITOR;
- }
- if (li_flags & LI_FLAG_MONITOR_B)
- {
- li_config_table[ch_a].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
- li_config_table[ch_a].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
- }
- if (li_flags & LI_FLAG_ANNOUNCEMENT_A)
- {
- li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
- li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
- }
- if (li_flags & LI_FLAG_ANNOUNCEMENT_B)
- {
- li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
- li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_ANNOUNCEMENT;
- }
- if (li_flags & LI_FLAG_MIX_A)
- {
- li_config_table[ch_a_v].flag_table[ch_a] |= LI_FLAG_MIX;
- li_config_table[ch_a_s].flag_table[ch_a] |= LI_FLAG_MIX;
- }
- if (li_flags & LI_FLAG_MIX_B)
- {
- li_config_table[ch_b_v].flag_table[ch_a] |= LI_FLAG_MIX;
- li_config_table[ch_b_s].flag_table[ch_a] |= LI_FLAG_MIX;
- }
- if (ch_a_v != ch_a_s)
- {
- li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
- }
- if (ch_b_v != ch_b_s)
- {
- li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
- }
-}
-
-
-static void li2_update_connect(dword Id, DIVA_CAPI_ADAPTER *a, PLCI *plci,
- dword plci_b_id, byte connect, dword li_flags)
-{
- word ch_a, ch_a_v, ch_a_s, ch_b, ch_b_v, ch_b_s;
- PLCI *plci_b;
- DIVA_CAPI_ADAPTER *a_b;
-
- a_b = &(adapter[MapController((byte)(plci_b_id & 0x7f)) - 1]);
- plci_b = &(a_b->plci[((plci_b_id >> 8) & 0xff) - 1]);
- ch_a = a->li_base + (plci->li_bchannel_id - 1);
- if (!a->li_pri && (plci->tel == ADV_VOICE)
- && (plci == a->AdvSignalPLCI) && (Id & EXT_CONTROLLER))
- {
- ch_a_v = ch_a + MIXER_IC_CHANNEL_BASE;
- ch_a_s = (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
- a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id) : ch_a_v;
- }
- else
- {
- ch_a_v = ch_a;
- ch_a_s = ch_a;
- }
- ch_b = a_b->li_base + (plci_b->li_bchannel_id - 1);
- if (!a_b->li_pri && (plci_b->tel == ADV_VOICE)
- && (plci_b == a_b->AdvSignalPLCI) && (plci_b_id & EXT_CONTROLLER))
- {
- ch_b_v = ch_b + MIXER_IC_CHANNEL_BASE;
- ch_b_s = (a_b->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC) ?
- a_b->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci_b->li_bchannel_id) : ch_b_v;
- }
- else
- {
- ch_b_v = ch_b;
- ch_b_s = ch_b;
- }
- if (connect)
- {
- li_config_table[ch_b].flag_table[ch_b_v] &= ~LI_FLAG_MONITOR;
- li_config_table[ch_b].flag_table[ch_b_s] &= ~LI_FLAG_MONITOR;
- li_config_table[ch_b_v].flag_table[ch_b] &= ~LI_FLAG_MIX;
- li_config_table[ch_b_s].flag_table[ch_b] &= ~LI_FLAG_MIX;
- li_config_table[ch_b].flag_table[ch_b] &= ~LI_FLAG_PCCONNECT;
- li_config_table[ch_b].chflags &= ~(LI_CHFLAG_MONITOR | LI_CHFLAG_MIX | LI_CHFLAG_LOOP);
- }
- li_config_table[ch_b_v].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_b_s].flag_table[ch_a_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_b_v].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_b_s].flag_table[ch_a_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_a_v].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_a_v].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_a_s].flag_table[ch_b_v] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- li_config_table[ch_a_s].flag_table[ch_b_s] &= ~(LI_FLAG_INTERCONNECT | LI_FLAG_CONFERENCE);
- if (li_flags & LI2_FLAG_INTERCONNECT_A_B)
- {
- li_config_table[ch_b_v].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_b_s].flag_table[ch_a_v] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_b_v].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_INTERCONNECT;
- }
- if (li_flags & LI2_FLAG_INTERCONNECT_B_A)
- {
- li_config_table[ch_a_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_a_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_a_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
- }
- if (li_flags & LI2_FLAG_MONITOR_B)
- {
- li_config_table[ch_b].flag_table[ch_b_v] |= LI_FLAG_MONITOR;
- li_config_table[ch_b].flag_table[ch_b_s] |= LI_FLAG_MONITOR;
- }
- if (li_flags & LI2_FLAG_MIX_B)
- {
- li_config_table[ch_b_v].flag_table[ch_b] |= LI_FLAG_MIX;
- li_config_table[ch_b_s].flag_table[ch_b] |= LI_FLAG_MIX;
- }
- if (li_flags & LI2_FLAG_MONITOR_X)
- li_config_table[ch_b].chflags |= LI_CHFLAG_MONITOR;
- if (li_flags & LI2_FLAG_MIX_X)
- li_config_table[ch_b].chflags |= LI_CHFLAG_MIX;
- if (li_flags & LI2_FLAG_LOOP_B)
- {
- li_config_table[ch_b_v].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
- li_config_table[ch_b_s].flag_table[ch_b_s] |= LI_FLAG_INTERCONNECT;
- }
- if (li_flags & LI2_FLAG_LOOP_PC)
- li_config_table[ch_b].flag_table[ch_b] |= LI_FLAG_PCCONNECT;
- if (li_flags & LI2_FLAG_LOOP_X)
- li_config_table[ch_b].chflags |= LI_CHFLAG_LOOP;
- if (li_flags & LI2_FLAG_PCCONNECT_A_B)
- li_config_table[ch_b_s].flag_table[ch_a_s] |= LI_FLAG_PCCONNECT;
- if (li_flags & LI2_FLAG_PCCONNECT_B_A)
- li_config_table[ch_a_s].flag_table[ch_b_s] |= LI_FLAG_PCCONNECT;
- if (ch_a_v != ch_a_s)
- {
- li_config_table[ch_a_v].flag_table[ch_a_s] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_a_s].flag_table[ch_a_v] |= LI_FLAG_CONFERENCE;
- }
- if (ch_b_v != ch_b_s)
- {
- li_config_table[ch_b_v].flag_table[ch_b_s] |= LI_FLAG_CONFERENCE;
- li_config_table[ch_b_s].flag_table[ch_b_v] |= LI_FLAG_CONFERENCE;
- }
-}
-
-
-static word li_check_main_plci(dword Id, PLCI *plci)
-{
- if (plci == NULL)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- return (_WRONG_IDENTIFIER);
- }
- if (!plci->State
- || !plci->NL.Id || plci->nl_remove_id
- || (plci->li_bchannel_id == 0))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- return (_WRONG_STATE);
- }
- li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = plci;
- return (GOOD);
-}
-
-
-static PLCI *li_check_plci_b(dword Id, PLCI *plci,
- dword plci_b_id, word plci_b_write_pos, byte *p_result)
-{
- byte ctlr_b;
- PLCI *plci_b;
-
- if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
- LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
- return (NULL);
- }
- ctlr_b = 0;
- if ((plci_b_id & 0x7f) != 0)
- {
- ctlr_b = MapController((byte)(plci_b_id & 0x7f));
- if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
- ctlr_b = 0;
- }
- if ((ctlr_b == 0)
- || (((plci_b_id >> 8) & 0xff) == 0)
- || (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
- PUT_WORD(p_result, _WRONG_IDENTIFIER);
- return (NULL);
- }
- plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
- if (!plci_b->State
- || !plci_b->NL.Id || plci_b->nl_remove_id
- || (plci_b->li_bchannel_id == 0))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
- PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
- return (NULL);
- }
- li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci = plci_b;
- if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
- ((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
- && (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- || !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
- PUT_WORD(p_result, _WRONG_IDENTIFIER);
- return (NULL);
- }
- if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
- (word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
- PUT_WORD(p_result, _REQUEST_NOT_ALLOWED_IN_THIS_STATE);
- return (NULL);
- }
- return (plci_b);
-}
-
-
-static PLCI *li2_check_plci_b(dword Id, PLCI *plci,
- dword plci_b_id, word plci_b_write_pos, byte *p_result)
-{
- byte ctlr_b;
- PLCI *plci_b;
-
- if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
- LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(p_result, _WRONG_STATE);
- return (NULL);
- }
- ctlr_b = 0;
- if ((plci_b_id & 0x7f) != 0)
- {
- ctlr_b = MapController((byte)(plci_b_id & 0x7f));
- if ((ctlr_b > max_adapter) || ((ctlr_b != 0) && (adapter[ctlr_b - 1].request == NULL)))
- ctlr_b = 0;
- }
- if ((ctlr_b == 0)
- || (((plci_b_id >> 8) & 0xff) == 0)
- || (((plci_b_id >> 8) & 0xff) > adapter[ctlr_b - 1].max_plci))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI invalid second PLCI %08lx",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
- PUT_WORD(p_result, _WRONG_IDENTIFIER);
- return (NULL);
- }
- plci_b = &(adapter[ctlr_b - 1].plci[((plci_b_id >> 8) & 0xff) - 1]);
- if (!plci_b->State
- || !plci_b->NL.Id || plci_b->nl_remove_id
- || (plci_b->li_bchannel_id == 0)
- || (li_config_table[plci_b->adapter->li_base + (plci_b->li_bchannel_id - 1)].plci != plci_b))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI peer in wrong state %08lx",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
- PUT_WORD(p_result, _WRONG_STATE);
- return (NULL);
- }
- if (((byte)(plci_b_id & ~EXT_CONTROLLER)) !=
- ((byte)(UnMapController(plci->adapter->Id) & ~EXT_CONTROLLER))
- && (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- || !(plci_b->adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)))
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI not on same ctrl %08lx",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b_id));
- PUT_WORD(p_result, _WRONG_IDENTIFIER);
- return (NULL);
- }
- if (!(get_b1_facilities(plci_b, add_b1_facilities(plci_b, plci_b->B1_resource,
- (word)(plci_b->B1_facilities | B1_FACILITY_MIXER))) & B1_FACILITY_MIXER))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Interconnect peer cannot mix %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci_b->B1_resource));
- PUT_WORD(p_result, _WRONG_STATE);
- return (NULL);
- }
- return (plci_b);
-}
-
-
-static byte mixer_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info;
- word i;
- dword d, li_flags, plci_b_id;
- PLCI *plci_b;
- API_PARSE li_parms[3];
- API_PARSE li_req_parms[3];
- API_PARSE li_participant_struct[2];
- API_PARSE li_participant_parms[3];
- word participant_parms_pos;
- byte result_buffer[32];
- byte *result;
- word result_pos;
- word plci_b_write_pos;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_request",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- Info = GOOD;
- result = result_buffer;
- result_buffer[0] = 0;
- if (!(a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- }
- else if (api_parse(&msg[1].info[1], msg[1].length, "ws", li_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- else
- {
- result_buffer[0] = 3;
- PUT_WORD(&result_buffer[1], GET_WORD(li_parms[0].info));
- result_buffer[3] = 0;
- switch (GET_WORD(li_parms[0].info))
- {
- case LI_GET_SUPPORTED_SERVICES:
- if (appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
- {
- result_buffer[0] = 17;
- result_buffer[3] = 14;
- PUT_WORD(&result_buffer[4], GOOD);
- d = 0;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_CH)
- d |= LI_CONFERENCING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
- d |= LI_MONITORING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
- d |= LI_ANNOUNCEMENTS_SUPPORTED | LI_MIXING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- d |= LI_CROSS_CONTROLLER_SUPPORTED;
- PUT_DWORD(&result_buffer[6], d);
- if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- {
- d = 0;
- for (i = 0; i < li_total_channels; i++)
- {
- if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- && (li_config_table[i].adapter->li_pri
- || (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
- {
- d++;
- }
- }
- }
- else
- {
- d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
- }
- PUT_DWORD(&result_buffer[10], d / 2);
- PUT_DWORD(&result_buffer[14], d);
- }
- else
- {
- result_buffer[0] = 25;
- result_buffer[3] = 22;
- PUT_WORD(&result_buffer[4], GOOD);
- d = LI2_ASYMMETRIC_SUPPORTED | LI2_B_LOOPING_SUPPORTED | LI2_X_LOOPING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_CH_PC)
- d |= LI2_MONITORING_SUPPORTED | LI2_REMOTE_MONITORING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_CH)
- d |= LI2_MIXING_SUPPORTED | LI2_REMOTE_MIXING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_MIXER_PC_PC)
- d |= LI2_PC_LOOPING_SUPPORTED;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- d |= LI2_CROSS_CONTROLLER_SUPPORTED;
- PUT_DWORD(&result_buffer[6], d);
- d = a->li_pri ? a->li_channels : MIXER_BCHANNELS_BRI;
- PUT_DWORD(&result_buffer[10], d / 2);
- PUT_DWORD(&result_buffer[14], d - 1);
- if (a->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- {
- d = 0;
- for (i = 0; i < li_total_channels; i++)
- {
- if ((li_config_table[i].adapter->manufacturer_features & MANUFACTURER_FEATURE_XCONNECT)
- && (li_config_table[i].adapter->li_pri
- || (i < li_config_table[i].adapter->li_base + MIXER_BCHANNELS_BRI)))
- {
- d++;
- }
- }
- }
- PUT_DWORD(&result_buffer[18], d / 2);
- PUT_DWORD(&result_buffer[22], d - 1);
- }
- break;
-
- case LI_REQ_CONNECT:
- if (li_parms[1].length == 8)
- {
- appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
- if (api_parse(&li_parms[1].info[1], li_parms[1].length, "dd", li_req_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
- li_flags = GET_DWORD(li_req_parms[1].info);
- Info = li_check_main_plci(Id, plci);
- result_buffer[0] = 9;
- result_buffer[3] = 6;
- PUT_DWORD(&result_buffer[4], plci_b_id);
- PUT_WORD(&result_buffer[8], GOOD);
- if (Info != GOOD)
- break;
- result = plci->saved_msg.info;
- for (i = 0; i <= result_buffer[0]; i++)
- result[i] = result_buffer[i];
- plci_b_write_pos = plci->li_plci_b_write_pos;
- plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
- if (plci_b == NULL)
- break;
- li_update_connect(Id, a, plci, plci_b_id, true, li_flags);
- plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_LAST_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- plci->li_plci_b_write_pos = plci_b_write_pos;
- }
- else
- {
- appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
- if (api_parse(&li_parms[1].info[1], li_parms[1].length, "ds", li_req_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- li_flags = GET_DWORD(li_req_parms[0].info) & ~(LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A);
- Info = li_check_main_plci(Id, plci);
- result_buffer[0] = 7;
- result_buffer[3] = 4;
- PUT_WORD(&result_buffer[4], Info);
- result_buffer[6] = 0;
- if (Info != GOOD)
- break;
- result = plci->saved_msg.info;
- for (i = 0; i <= result_buffer[0]; i++)
- result[i] = result_buffer[i];
- plci_b_write_pos = plci->li_plci_b_write_pos;
- participant_parms_pos = 0;
- result_pos = 7;
- li2_update_connect(Id, a, plci, UnMapId(Id), true, li_flags);
- while (participant_parms_pos < li_req_parms[1].length)
- {
- result[result_pos] = 6;
- result_pos += 7;
- PUT_DWORD(&result[result_pos - 6], 0);
- PUT_WORD(&result[result_pos - 2], GOOD);
- if (api_parse(&li_req_parms[1].info[1 + participant_parms_pos],
- (word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
- break;
- }
- if (api_parse(&li_participant_struct[0].info[1],
- li_participant_struct[0].length, "dd", li_participant_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
- break;
- }
- plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
- li_flags = GET_DWORD(li_participant_parms[1].info);
- PUT_DWORD(&result[result_pos - 6], plci_b_id);
- if (sizeof(result) - result_pos < 7)
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
- break;
- }
- plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
- if (plci_b != NULL)
- {
- li2_update_connect(Id, a, plci, plci_b_id, true, li_flags);
- plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id |
- ((li_flags & (LI2_FLAG_INTERCONNECT_A_B | LI2_FLAG_INTERCONNECT_B_A |
- LI2_FLAG_PCCONNECT_A_B | LI2_FLAG_PCCONNECT_B_A)) ? 0 : LI_PLCI_B_DISC_FLAG);
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- }
- participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
- (&li_req_parms[1].info[1]));
- }
- result[0] = (byte)(result_pos - 1);
- result[3] = (byte)(result_pos - 4);
- result[6] = (byte)(result_pos - 7);
- i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
- if ((plci_b_write_pos == plci->li_plci_b_read_pos)
- || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
- {
- plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- }
- else
- plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
- plci->li_plci_b_write_pos = plci_b_write_pos;
- }
- mixer_calculate_coefs(a);
- plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
- mixer_notify_update(plci, true);
- sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
- "wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
- plci->command = 0;
- plci->li_cmd = GET_WORD(li_parms[0].info);
- start_internal_command(Id, plci, mixer_command);
- return (false);
-
- case LI_REQ_DISCONNECT:
- if (li_parms[1].length == 4)
- {
- appl->appl_flags |= APPL_FLAG_OLD_LI_SPEC;
- if (api_parse(&li_parms[1].info[1], li_parms[1].length, "d", li_req_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- plci_b_id = GET_DWORD(li_req_parms[0].info) & 0xffff;
- Info = li_check_main_plci(Id, plci);
- result_buffer[0] = 9;
- result_buffer[3] = 6;
- PUT_DWORD(&result_buffer[4], GET_DWORD(li_req_parms[0].info));
- PUT_WORD(&result_buffer[8], GOOD);
- if (Info != GOOD)
- break;
- result = plci->saved_msg.info;
- for (i = 0; i <= result_buffer[0]; i++)
- result[i] = result_buffer[i];
- plci_b_write_pos = plci->li_plci_b_write_pos;
- plci_b = li_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[8]);
- if (plci_b == NULL)
- break;
- li_update_connect(Id, a, plci, plci_b_id, false, 0);
- plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG | LI_PLCI_B_LAST_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- plci->li_plci_b_write_pos = plci_b_write_pos;
- }
- else
- {
- appl->appl_flags &= ~APPL_FLAG_OLD_LI_SPEC;
- if (api_parse(&li_parms[1].info[1], li_parms[1].length, "s", li_req_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- break;
- }
- Info = li_check_main_plci(Id, plci);
- result_buffer[0] = 7;
- result_buffer[3] = 4;
- PUT_WORD(&result_buffer[4], Info);
- result_buffer[6] = 0;
- if (Info != GOOD)
- break;
- result = plci->saved_msg.info;
- for (i = 0; i <= result_buffer[0]; i++)
- result[i] = result_buffer[i];
- plci_b_write_pos = plci->li_plci_b_write_pos;
- participant_parms_pos = 0;
- result_pos = 7;
- while (participant_parms_pos < li_req_parms[0].length)
- {
- result[result_pos] = 6;
- result_pos += 7;
- PUT_DWORD(&result[result_pos - 6], 0);
- PUT_WORD(&result[result_pos - 2], GOOD);
- if (api_parse(&li_req_parms[0].info[1 + participant_parms_pos],
- (word)(li_parms[1].length - participant_parms_pos), "s", li_participant_struct))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
- break;
- }
- if (api_parse(&li_participant_struct[0].info[1],
- li_participant_struct[0].length, "d", li_participant_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(&result[result_pos - 2], _WRONG_MESSAGE_FORMAT);
- break;
- }
- plci_b_id = GET_DWORD(li_participant_parms[0].info) & 0xffff;
- PUT_DWORD(&result[result_pos - 6], plci_b_id);
- if (sizeof(result) - result_pos < 7)
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI result overrun",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- PUT_WORD(&result[result_pos - 2], _WRONG_STATE);
- break;
- }
- plci_b = li2_check_plci_b(Id, plci, plci_b_id, plci_b_write_pos, &result[result_pos - 2]);
- if (plci_b != NULL)
- {
- li2_update_connect(Id, a, plci, plci_b_id, false, 0);
- plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- }
- participant_parms_pos = (word)((&li_participant_struct[0].info[1 + li_participant_struct[0].length]) -
- (&li_req_parms[0].info[1]));
- }
- result[0] = (byte)(result_pos - 1);
- result[3] = (byte)(result_pos - 4);
- result[6] = (byte)(result_pos - 7);
- i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
- if ((plci_b_write_pos == plci->li_plci_b_read_pos)
- || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
- {
- plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- }
- else
- plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
- plci->li_plci_b_write_pos = plci_b_write_pos;
- }
- mixer_calculate_coefs(a);
- plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
- mixer_notify_update(plci, true);
- sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
- "wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
- plci->command = 0;
- plci->li_cmd = GET_WORD(li_parms[0].info);
- start_internal_command(Id, plci, mixer_command);
- return (false);
-
- case LI_REQ_SILENT_UPDATE:
- if (!plci || !plci->State
- || !plci->NL.Id || plci->nl_remove_id
- || (plci->li_bchannel_id == 0)
- || (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci != plci))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- return (false);
- }
- plci_b_write_pos = plci->li_plci_b_write_pos;
- if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
- LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 2)
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- return (false);
- }
- i = (plci_b_write_pos == 0) ? LI_PLCI_B_QUEUE_ENTRIES - 1 : plci_b_write_pos - 1;
- if ((plci_b_write_pos == plci->li_plci_b_read_pos)
- || (plci->li_plci_b_queue[i] & LI_PLCI_B_LAST_FLAG))
- {
- plci->li_plci_b_queue[plci_b_write_pos] = LI_PLCI_B_SKIP_FLAG | LI_PLCI_B_LAST_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- }
- else
- plci->li_plci_b_queue[i] |= LI_PLCI_B_LAST_FLAG;
- plci->li_plci_b_write_pos = plci_b_write_pos;
- plci->li_channel_bits = li_config_table[a->li_base + (plci->li_bchannel_id - 1)].channel;
- plci->command = 0;
- plci->li_cmd = GET_WORD(li_parms[0].info);
- start_internal_command(Id, plci, mixer_command);
- return (false);
-
- default:
- dbug(1, dprintf("[%06lx] %s,%d: LI unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, GET_WORD(li_parms[0].info)));
- Info = _FACILITY_NOT_SUPPORTED;
- }
- }
- sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
- "wwS", Info, SELECTOR_LINE_INTERCONNECT, result);
- return (false);
-}
-
-
-static void mixer_indication_coefs_set(dword Id, PLCI *plci)
-{
- dword d;
- byte result[12];
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_coefs_set",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- if (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos)
- {
- do
- {
- d = plci->li_plci_b_queue[plci->li_plci_b_read_pos];
- if (!(d & LI_PLCI_B_SKIP_FLAG))
- {
- if (plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
- {
- if (d & LI_PLCI_B_DISC_FLAG)
- {
- result[0] = 5;
- PUT_WORD(&result[1], LI_IND_DISCONNECT);
- result[3] = 2;
- PUT_WORD(&result[4], _LI_USER_INITIATED);
- }
- else
- {
- result[0] = 7;
- PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
- result[3] = 4;
- PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
- }
- }
- else
- {
- if (d & LI_PLCI_B_DISC_FLAG)
- {
- result[0] = 9;
- PUT_WORD(&result[1], LI_IND_DISCONNECT);
- result[3] = 6;
- PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
- PUT_WORD(&result[8], _LI_USER_INITIATED);
- }
- else
- {
- result[0] = 7;
- PUT_WORD(&result[1], LI_IND_CONNECT_ACTIVE);
- result[3] = 4;
- PUT_DWORD(&result[4], d & ~LI_PLCI_B_FLAG_MASK);
- }
- }
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0,
- "ws", SELECTOR_LINE_INTERCONNECT, result);
- }
- plci->li_plci_b_read_pos = (plci->li_plci_b_read_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ?
- 0 : plci->li_plci_b_read_pos + 1;
- } while (!(d & LI_PLCI_B_LAST_FLAG) && (plci->li_plci_b_read_pos != plci->li_plci_b_req_pos));
- }
-}
-
-
-static void mixer_indication_xconnect_from(dword Id, PLCI *plci, byte *msg, word length)
-{
- word i, j, ch;
- struct xconnect_transfer_address_s s, *p;
- DIVA_CAPI_ADAPTER *a;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_from %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, (int)length));
-
- a = plci->adapter;
- i = 1;
- for (i = 1; i < length; i += 16)
- {
- s.card_address.low = msg[i] | (msg[i + 1] << 8) | (((dword)(msg[i + 2])) << 16) | (((dword)(msg[i + 3])) << 24);
- s.card_address.high = msg[i + 4] | (msg[i + 5] << 8) | (((dword)(msg[i + 6])) << 16) | (((dword)(msg[i + 7])) << 24);
- s.offset = msg[i + 8] | (msg[i + 9] << 8) | (((dword)(msg[i + 10])) << 16) | (((dword)(msg[i + 11])) << 24);
- ch = msg[i + 12] | (msg[i + 13] << 8);
- j = ch & XCONNECT_CHANNEL_NUMBER_MASK;
- if (!a->li_pri && (plci->li_bchannel_id == 2))
- j = 1 - j;
- j += a->li_base;
- if (ch & XCONNECT_CHANNEL_PORT_PC)
- p = &(li_config_table[j].send_pc);
- else
- p = &(li_config_table[j].send_b);
- p->card_address.low = s.card_address.low;
- p->card_address.high = s.card_address.high;
- p->offset = s.offset;
- li_config_table[j].channel |= LI_CHANNEL_ADDRESSES_SET;
- }
- if (plci->internal_command_queue[0]
- && ((plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_2)
- || (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_3)
- || (plci->adjust_b_state == ADJUST_B_RESTORE_MIXER_4)))
- {
- (*(plci->internal_command_queue[0]))(Id, plci, 0);
- if (!plci->internal_command)
- next_internal_command(Id, plci);
- }
- mixer_notify_update(plci, true);
-}
-
-
-static void mixer_indication_xconnect_to(dword Id, PLCI *plci, byte *msg, word length)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_indication_xconnect_to %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, (int) length));
-
-}
-
-
-static byte mixer_notify_source_removed(PLCI *plci, dword plci_b_id)
-{
- word plci_b_write_pos;
-
- plci_b_write_pos = plci->li_plci_b_write_pos;
- if (((plci->li_plci_b_read_pos > plci_b_write_pos) ? plci->li_plci_b_read_pos :
- LI_PLCI_B_QUEUE_ENTRIES + plci->li_plci_b_read_pos) - plci_b_write_pos - 1 < 1)
- {
- dbug(1, dprintf("[%06lx] %s,%d: LI request overrun",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
- return (false);
- }
- plci->li_plci_b_queue[plci_b_write_pos] = plci_b_id | LI_PLCI_B_DISC_FLAG;
- plci_b_write_pos = (plci_b_write_pos == LI_PLCI_B_QUEUE_ENTRIES - 1) ? 0 : plci_b_write_pos + 1;
- plci->li_plci_b_write_pos = plci_b_write_pos;
- return (true);
-}
-
-
-static void mixer_remove(PLCI *plci)
-{
- DIVA_CAPI_ADAPTER *a;
- PLCI *notify_plci;
- dword plci_b_id;
- word i, j;
-
- dbug(1, dprintf("[%06lx] %s,%d: mixer_remove",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- a = plci->adapter;
- plci_b_id = (plci->Id << 8) | UnMapController(plci->adapter->Id);
- if (a->profile.Global_Options & GL_LINE_INTERCONNECT_SUPPORTED)
- {
- if ((plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- i = a->li_base + (plci->li_bchannel_id - 1);
- if ((li_config_table[i].curchnl | li_config_table[i].channel) & LI_CHANNEL_INVOLVED)
- {
- for (j = 0; j < li_total_channels; j++)
- {
- if ((li_config_table[i].flag_table[j] & LI_FLAG_INTERCONNECT)
- || (li_config_table[j].flag_table[i] & LI_FLAG_INTERCONNECT))
- {
- notify_plci = li_config_table[j].plci;
- if ((notify_plci != NULL)
- && (notify_plci != plci)
- && (notify_plci->appl != NULL)
- && !(notify_plci->appl->appl_flags & APPL_FLAG_OLD_LI_SPEC)
- && (notify_plci->State)
- && notify_plci->NL.Id && !notify_plci->nl_remove_id)
- {
- mixer_notify_source_removed(notify_plci, plci_b_id);
- }
- }
- }
- mixer_clear_config(plci);
- mixer_calculate_coefs(a);
- mixer_notify_update(plci, true);
- }
- li_config_table[i].plci = NULL;
- plci->li_bchannel_id = 0;
- }
- }
-}
-
-
-/*------------------------------------------------------------------*/
-/* Echo canceller facilities */
-/*------------------------------------------------------------------*/
-
-
-static void ec_write_parameters(PLCI *plci)
-{
- word w;
- byte parameter_buffer[6];
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_write_parameters",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- parameter_buffer[0] = 5;
- parameter_buffer[1] = DSP_CTRL_SET_LEC_PARAMETERS;
- PUT_WORD(&parameter_buffer[2], plci->ec_idi_options);
- plci->ec_idi_options &= ~LEC_RESET_COEFFICIENTS;
- w = (plci->ec_tail_length == 0) ? 128 : plci->ec_tail_length;
- PUT_WORD(&parameter_buffer[4], w);
- add_p(plci, FTY, parameter_buffer);
- sig_req(plci, TEL_CTRL, 0);
- send_req(plci);
-}
-
-
-static void ec_clear_config(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_clear_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
- LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING;
- plci->ec_tail_length = 0;
-}
-
-
-static void ec_prepare_switch(dword Id, PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_prepare_switch",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
-}
-
-
-static word ec_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_save_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- return (GOOD);
-}
-
-
-static word ec_restore_config(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_restore_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- Info = GOOD;
- if (plci->B1_facilities & B1_FACILITY_EC)
- {
- switch (plci->adjust_b_state)
- {
- case ADJUST_B_RESTORE_EC_1:
- plci->internal_command = plci->adjust_b_command;
- if (plci->sig_req)
- {
- plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
- break;
- }
- ec_write_parameters(plci);
- plci->adjust_b_state = ADJUST_B_RESTORE_EC_2;
- break;
- case ADJUST_B_RESTORE_EC_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Restore EC failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- break;
- }
- }
- return (Info);
-}
-
-
-static void ec_command(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command, Info;
- byte result[8];
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_command %02x %04x %04x %04x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command,
- plci->ec_cmd, plci->ec_idi_options, plci->ec_tail_length));
-
- Info = GOOD;
- if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
- {
- result[0] = 2;
- PUT_WORD(&result[1], EC_SUCCESS);
- }
- else
- {
- result[0] = 5;
- PUT_WORD(&result[1], plci->ec_cmd);
- result[3] = 2;
- PUT_WORD(&result[4], GOOD);
- }
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (plci->ec_cmd)
- {
- case EC_ENABLE_OPERATION:
- case EC_FREEZE_COEFFICIENTS:
- case EC_RESUME_COEFFICIENT_UPDATE:
- case EC_RESET_COEFFICIENTS:
- switch (internal_command)
- {
- default:
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities |
- B1_FACILITY_EC), EC_COMMAND_1);
- /* fall through */
- case EC_COMMAND_1:
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Load EC failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (plci->internal_command)
- return;
- /* fall through */
- case EC_COMMAND_2:
- if (plci->sig_req)
- {
- plci->internal_command = EC_COMMAND_2;
- return;
- }
- plci->internal_command = EC_COMMAND_3;
- ec_write_parameters(plci);
- return;
- case EC_COMMAND_3:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Enable EC failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- break;
- }
- break;
-
- case EC_DISABLE_OPERATION:
- switch (internal_command)
- {
- default:
- case EC_COMMAND_1:
- if (plci->B1_facilities & B1_FACILITY_EC)
- {
- if (plci->sig_req)
- {
- plci->internal_command = EC_COMMAND_1;
- return;
- }
- plci->internal_command = EC_COMMAND_2;
- ec_write_parameters(plci);
- return;
- }
- Rc = OK;
- /* fall through */
- case EC_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Disable EC failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- adjust_b1_resource(Id, plci, NULL, (word)(plci->B1_facilities &
- ~B1_FACILITY_EC), EC_COMMAND_3);
- /* fall through */
- case EC_COMMAND_3:
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Unload EC failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- break;
- }
- if (plci->internal_command)
- return;
- break;
- }
- break;
- }
- sendf(plci->appl, _FACILITY_R | CONFIRM, Id & 0xffffL, plci->number,
- "wws", Info, (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
- PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
-}
-
-
-static byte ec_request(dword Id, word Number, DIVA_CAPI_ADAPTER *a, PLCI *plci, APPL *appl, API_PARSE *msg)
-{
- word Info;
- word opt;
- API_PARSE ec_parms[3];
- byte result[16];
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_request",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- Info = GOOD;
- result[0] = 0;
- if (!(a->man_profile.private_options & (1L << PRIVATE_ECHO_CANCELLER)))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Facility not supported",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _FACILITY_NOT_SUPPORTED;
- }
- else
- {
- if (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
- {
- if (api_parse(&msg[1].info[1], msg[1].length, "w", ec_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- else
- {
- if (plci == NULL)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_IDENTIFIER;
- }
- else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_STATE;
- }
- else
- {
- plci->command = 0;
- plci->ec_cmd = GET_WORD(ec_parms[0].info);
- plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
- result[0] = 2;
- PUT_WORD(&result[1], EC_SUCCESS);
- if (msg[1].length >= 4)
- {
- opt = GET_WORD(&ec_parms[0].info[2]);
- plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
- LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
- if (!(opt & EC_DISABLE_NON_LINEAR_PROCESSING))
- plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
- if (opt & EC_DETECT_DISABLE_TONE)
- plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
- if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
- plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
- if (msg[1].length >= 6)
- {
- plci->ec_tail_length = GET_WORD(&ec_parms[0].info[4]);
- }
- }
- switch (plci->ec_cmd)
- {
- case EC_ENABLE_OPERATION:
- plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- case EC_DISABLE_OPERATION:
- plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
- LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
- LEC_RESET_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- case EC_FREEZE_COEFFICIENTS:
- plci->ec_idi_options |= LEC_FREEZE_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- case EC_RESUME_COEFFICIENT_UPDATE:
- plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- case EC_RESET_COEFFICIENTS:
- plci->ec_idi_options |= LEC_RESET_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- default:
- dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
- PUT_WORD(&result[1], EC_UNSUPPORTED_OPERATION);
- }
- }
- }
- }
- else
- {
- if (api_parse(&msg[1].info[1], msg[1].length, "ws", ec_parms))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong message format",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_MESSAGE_FORMAT;
- }
- else
- {
- if (GET_WORD(ec_parms[0].info) == EC_GET_SUPPORTED_SERVICES)
- {
- result[0] = 11;
- PUT_WORD(&result[1], EC_GET_SUPPORTED_SERVICES);
- result[3] = 8;
- PUT_WORD(&result[4], GOOD);
- PUT_WORD(&result[6], 0x0007);
- PUT_WORD(&result[8], LEC_MAX_SUPPORTED_TAIL_LENGTH);
- PUT_WORD(&result[10], 0);
- }
- else if (plci == NULL)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong PLCI",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_IDENTIFIER;
- }
- else if (!plci->State || !plci->NL.Id || plci->nl_remove_id)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Wrong state",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- Info = _WRONG_STATE;
- }
- else
- {
- plci->command = 0;
- plci->ec_cmd = GET_WORD(ec_parms[0].info);
- plci->ec_idi_options &= ~(LEC_MANUAL_DISABLE | LEC_RESET_COEFFICIENTS);
- result[0] = 5;
- PUT_WORD(&result[1], plci->ec_cmd);
- result[3] = 2;
- PUT_WORD(&result[4], GOOD);
- plci->ec_idi_options &= ~(LEC_ENABLE_NONLINEAR_PROCESSING |
- LEC_ENABLE_2100HZ_DETECTOR | LEC_REQUIRE_2100HZ_REVERSALS);
- plci->ec_tail_length = 0;
- if (ec_parms[1].length >= 2)
- {
- opt = GET_WORD(&ec_parms[1].info[1]);
- if (opt & EC_ENABLE_NON_LINEAR_PROCESSING)
- plci->ec_idi_options |= LEC_ENABLE_NONLINEAR_PROCESSING;
- if (opt & EC_DETECT_DISABLE_TONE)
- plci->ec_idi_options |= LEC_ENABLE_2100HZ_DETECTOR;
- if (!(opt & EC_DO_NOT_REQUIRE_REVERSALS))
- plci->ec_idi_options |= LEC_REQUIRE_2100HZ_REVERSALS;
- if (ec_parms[1].length >= 4)
- {
- plci->ec_tail_length = GET_WORD(&ec_parms[1].info[3]);
- }
- }
- switch (plci->ec_cmd)
- {
- case EC_ENABLE_OPERATION:
- plci->ec_idi_options &= ~LEC_FREEZE_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- case EC_DISABLE_OPERATION:
- plci->ec_idi_options = LEC_ENABLE_ECHO_CANCELLER |
- LEC_MANUAL_DISABLE | LEC_ENABLE_NONLINEAR_PROCESSING |
- LEC_RESET_COEFFICIENTS;
- start_internal_command(Id, plci, ec_command);
- return (false);
-
- default:
- dbug(1, dprintf("[%06lx] %s,%d: EC unknown request %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, plci->ec_cmd));
- PUT_WORD(&result[4], _FACILITY_SPECIFIC_FUNCTION_NOT_SUPP);
- }
- }
- }
- }
- }
- sendf(appl, _FACILITY_R | CONFIRM, Id & 0xffffL, Number,
- "wws", Info, (appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
- PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
- return (false);
-}
-
-
-static void ec_indication(dword Id, PLCI *plci, byte *msg, word length)
-{
- byte result[8];
-
- dbug(1, dprintf("[%06lx] %s,%d: ec_indication",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
- if (!(plci->ec_idi_options & LEC_MANUAL_DISABLE))
- {
- if (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC)
- {
- result[0] = 2;
- PUT_WORD(&result[1], 0);
- switch (msg[1])
- {
- case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
- PUT_WORD(&result[1], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
- break;
- case LEC_DISABLE_TYPE_REVERSED_2100HZ:
- PUT_WORD(&result[1], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
- break;
- case LEC_DISABLE_RELEASED:
- PUT_WORD(&result[1], EC_BYPASS_RELEASED);
- break;
- }
- }
- else
- {
- result[0] = 5;
- PUT_WORD(&result[1], EC_BYPASS_INDICATION);
- result[3] = 2;
- PUT_WORD(&result[4], 0);
- switch (msg[1])
- {
- case LEC_DISABLE_TYPE_CONTIGNUOUS_2100HZ:
- PUT_WORD(&result[4], EC_BYPASS_DUE_TO_CONTINUOUS_2100HZ);
- break;
- case LEC_DISABLE_TYPE_REVERSED_2100HZ:
- PUT_WORD(&result[4], EC_BYPASS_DUE_TO_REVERSED_2100HZ);
- break;
- case LEC_DISABLE_RELEASED:
- PUT_WORD(&result[4], EC_BYPASS_RELEASED);
- break;
- }
- }
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", (plci->appl->appl_flags & APPL_FLAG_PRIV_EC_SPEC) ?
- PRIV_SELECTOR_ECHO_CANCELLER : SELECTOR_ECHO_CANCELLER, result);
- }
-}
-
-
-
-/*------------------------------------------------------------------*/
-/* Advanced voice */
-/*------------------------------------------------------------------*/
-
-static void adv_voice_write_coefs(PLCI *plci, word write_command)
-{
- DIVA_CAPI_ADAPTER *a;
- word i;
- byte *p;
-
- word w, n, j, k;
- byte ch_map[MIXER_CHANNELS_BRI];
-
- byte coef_buffer[ADV_VOICE_COEF_BUFFER_SIZE + 2];
-
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_write_coefs %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, write_command));
-
- a = plci->adapter;
- p = coef_buffer + 1;
- *(p++) = DSP_CTRL_OLD_SET_MIXER_COEFFICIENTS;
- i = 0;
- while (i + sizeof(word) <= a->adv_voice_coef_length)
- {
- PUT_WORD(p, GET_WORD(a->adv_voice_coef_buffer + i));
- p += 2;
- i += 2;
- }
- while (i < ADV_VOICE_OLD_COEF_COUNT * sizeof(word))
- {
- PUT_WORD(p, 0x8000);
- p += 2;
- i += 2;
- }
-
- if (!a->li_pri && (plci->li_bchannel_id == 0))
- {
- if ((li_config_table[a->li_base].plci == NULL) && (li_config_table[a->li_base + 1].plci != NULL))
- {
- plci->li_bchannel_id = 1;
- li_config_table[a->li_base].plci = plci;
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, plci->li_bchannel_id));
- }
- else if ((li_config_table[a->li_base].plci != NULL) && (li_config_table[a->li_base + 1].plci == NULL))
- {
- plci->li_bchannel_id = 2;
- li_config_table[a->li_base + 1].plci = plci;
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_set_bchannel_id %d",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, plci->li_bchannel_id));
- }
- }
- if (!a->li_pri && (plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- i = a->li_base + (plci->li_bchannel_id - 1);
- switch (write_command)
- {
- case ADV_VOICE_WRITE_ACTIVATION:
- j = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
- k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
- if (!(plci->B1_facilities & B1_FACILITY_MIXER))
- {
- li_config_table[j].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
- li_config_table[i].flag_table[j] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
- }
- if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
- {
- li_config_table[k].flag_table[i] |= LI_FLAG_CONFERENCE | LI_FLAG_MIX;
- li_config_table[i].flag_table[k] |= LI_FLAG_CONFERENCE | LI_FLAG_MONITOR;
- li_config_table[k].flag_table[j] |= LI_FLAG_CONFERENCE;
- li_config_table[j].flag_table[k] |= LI_FLAG_CONFERENCE;
- }
- mixer_calculate_coefs(a);
- li_config_table[i].curchnl = li_config_table[i].channel;
- li_config_table[j].curchnl = li_config_table[j].channel;
- if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
- li_config_table[k].curchnl = li_config_table[k].channel;
- break;
-
- case ADV_VOICE_WRITE_DEACTIVATION:
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].flag_table[j] = 0;
- li_config_table[j].flag_table[i] = 0;
- }
- k = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[k].flag_table[j] = 0;
- li_config_table[j].flag_table[k] = 0;
- }
- if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
- {
- k = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[k].flag_table[j] = 0;
- li_config_table[j].flag_table[k] = 0;
- }
- }
- mixer_calculate_coefs(a);
- break;
- }
- if (plci->B1_facilities & B1_FACILITY_MIXER)
- {
- w = 0;
- if (ADV_VOICE_NEW_COEF_BASE + sizeof(word) <= a->adv_voice_coef_length)
- w = GET_WORD(a->adv_voice_coef_buffer + ADV_VOICE_NEW_COEF_BASE);
- if (li_config_table[i].channel & LI_CHANNEL_TX_DATA)
- w |= MIXER_FEATURE_ENABLE_TX_DATA;
- if (li_config_table[i].channel & LI_CHANNEL_RX_DATA)
- w |= MIXER_FEATURE_ENABLE_RX_DATA;
- *(p++) = (byte) w;
- *(p++) = (byte)(w >> 8);
- for (j = 0; j < sizeof(ch_map); j += 2)
- {
- ch_map[j] = (byte)(j + (plci->li_bchannel_id - 1));
- ch_map[j + 1] = (byte)(j + (2 - plci->li_bchannel_id));
- }
- for (n = 0; n < ARRAY_SIZE(mixer_write_prog_bri); n++)
- {
- i = a->li_base + ch_map[mixer_write_prog_bri[n].to_ch];
- j = a->li_base + ch_map[mixer_write_prog_bri[n].from_ch];
- if (li_config_table[i].channel & li_config_table[j].channel & LI_CHANNEL_INVOLVED)
- {
- *(p++) = ((li_config_table[i].coef_table[j] & mixer_write_prog_bri[n].mask) ? 0x80 : 0x01);
- w = ((li_config_table[i].coef_table[j] & 0xf) ^ (li_config_table[i].coef_table[j] >> 4));
- li_config_table[i].coef_table[j] ^= (w & mixer_write_prog_bri[n].mask) << 4;
- }
- else
- {
- *(p++) = (ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n < a->adv_voice_coef_length) ?
- a->adv_voice_coef_buffer[ADV_VOICE_NEW_COEF_BASE + sizeof(word) + n] : 0x00;
- }
- }
- }
- else
- {
- for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
- *(p++) = a->adv_voice_coef_buffer[i];
- }
- }
- else
-
- {
- for (i = ADV_VOICE_NEW_COEF_BASE; i < a->adv_voice_coef_length; i++)
- *(p++) = a->adv_voice_coef_buffer[i];
- }
- coef_buffer[0] = (p - coef_buffer) - 1;
- add_p(plci, FTY, coef_buffer);
- sig_req(plci, TEL_CTRL, 0);
- send_req(plci);
-}
-
-
-static void adv_voice_clear_config(PLCI *plci)
-{
- DIVA_CAPI_ADAPTER *a;
-
- word i, j;
-
-
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_clear_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- a = plci->adapter;
- if ((plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
- {
- a->adv_voice_coef_length = 0;
-
- if (!a->li_pri && (plci->li_bchannel_id != 0)
- && (li_config_table[a->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- i = a->li_base + (plci->li_bchannel_id - 1);
- li_config_table[i].curchnl = 0;
- li_config_table[i].channel = 0;
- li_config_table[i].chflags = 0;
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].flag_table[j] = 0;
- li_config_table[j].flag_table[i] = 0;
- li_config_table[i].coef_table[j] = 0;
- li_config_table[j].coef_table[i] = 0;
- }
- li_config_table[i].coef_table[i] |= LI_COEF_CH_PC_SET | LI_COEF_PC_CH_SET;
- i = a->li_base + MIXER_IC_CHANNEL_BASE + (plci->li_bchannel_id - 1);
- li_config_table[i].curchnl = 0;
- li_config_table[i].channel = 0;
- li_config_table[i].chflags = 0;
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].flag_table[j] = 0;
- li_config_table[j].flag_table[i] = 0;
- li_config_table[i].coef_table[j] = 0;
- li_config_table[j].coef_table[i] = 0;
- }
- if (a->manufacturer_features & MANUFACTURER_FEATURE_SLAVE_CODEC)
- {
- i = a->li_base + MIXER_IC_CHANNEL_BASE + (2 - plci->li_bchannel_id);
- li_config_table[i].curchnl = 0;
- li_config_table[i].channel = 0;
- li_config_table[i].chflags = 0;
- for (j = 0; j < li_total_channels; j++)
- {
- li_config_table[i].flag_table[j] = 0;
- li_config_table[j].flag_table[i] = 0;
- li_config_table[i].coef_table[j] = 0;
- li_config_table[j].coef_table[i] = 0;
- }
- }
- }
-
- }
-}
-
-
-static void adv_voice_prepare_switch(dword Id, PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_prepare_switch",
- UnMapId(Id), (char *)(FILE_), __LINE__));
-
-}
-
-
-static word adv_voice_save_config(dword Id, PLCI *plci, byte Rc)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_save_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- return (GOOD);
-}
-
-
-static word adv_voice_restore_config(dword Id, PLCI *plci, byte Rc)
-{
- DIVA_CAPI_ADAPTER *a;
- word Info;
-
- dbug(1, dprintf("[%06lx] %s,%d: adv_voice_restore_config %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- Info = GOOD;
- a = plci->adapter;
- if ((plci->B1_facilities & B1_FACILITY_VOICE)
- && (plci->tel == ADV_VOICE) && (plci == a->AdvSignalPLCI))
- {
- switch (plci->adjust_b_state)
- {
- case ADJUST_B_RESTORE_VOICE_1:
- plci->internal_command = plci->adjust_b_command;
- if (plci->sig_req)
- {
- plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
- break;
- }
- adv_voice_write_coefs(plci, ADV_VOICE_WRITE_UPDATE);
- plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_2;
- break;
- case ADJUST_B_RESTORE_VOICE_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Restore voice config failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- break;
- }
- }
- return (Info);
-}
-
-
-
-
-/*------------------------------------------------------------------*/
-/* B1 resource switching */
-/*------------------------------------------------------------------*/
-
-static byte b1_facilities_table[] =
-{
- 0x00, /* 0 No bchannel resources */
- 0x00, /* 1 Codec (automatic law) */
- 0x00, /* 2 Codec (A-law) */
- 0x00, /* 3 Codec (y-law) */
- 0x00, /* 4 HDLC for X.21 */
- 0x00, /* 5 HDLC */
- 0x00, /* 6 External Device 0 */
- 0x00, /* 7 External Device 1 */
- 0x00, /* 8 HDLC 56k */
- 0x00, /* 9 Transparent */
- 0x00, /* 10 Loopback to network */
- 0x00, /* 11 Test pattern to net */
- 0x00, /* 12 Rate adaptation sync */
- 0x00, /* 13 Rate adaptation async */
- 0x00, /* 14 R-Interface */
- 0x00, /* 15 HDLC 128k leased line */
- 0x00, /* 16 FAX */
- 0x00, /* 17 Modem async */
- 0x00, /* 18 Modem sync HDLC */
- 0x00, /* 19 V.110 async HDLC */
- 0x12, /* 20 Adv voice (Trans,mixer) */
- 0x00, /* 21 Codec connected to IC */
- 0x0c, /* 22 Trans,DTMF */
- 0x1e, /* 23 Trans,DTMF+mixer */
- 0x1f, /* 24 Trans,DTMF+mixer+local */
- 0x13, /* 25 Trans,mixer+local */
- 0x12, /* 26 HDLC,mixer */
- 0x12, /* 27 HDLC 56k,mixer */
- 0x2c, /* 28 Trans,LEC+DTMF */
- 0x3e, /* 29 Trans,LEC+DTMF+mixer */
- 0x3f, /* 30 Trans,LEC+DTMF+mixer+local */
- 0x2c, /* 31 RTP,LEC+DTMF */
- 0x3e, /* 32 RTP,LEC+DTMF+mixer */
- 0x3f, /* 33 RTP,LEC+DTMF+mixer+local */
- 0x00, /* 34 Signaling task */
- 0x00, /* 35 PIAFS */
- 0x0c, /* 36 Trans,DTMF+TONE */
- 0x1e, /* 37 Trans,DTMF+TONE+mixer */
- 0x1f /* 38 Trans,DTMF+TONE+mixer+local*/
-};
-
-
-static word get_b1_facilities(PLCI *plci, byte b1_resource)
-{
- word b1_facilities;
-
- b1_facilities = b1_facilities_table[b1_resource];
- if ((b1_resource == 9) || (b1_resource == 20) || (b1_resource == 25))
- {
-
- if (!(((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
- || (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
-
- {
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND)
- b1_facilities |= B1_FACILITY_DTMFX;
- if (plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)
- b1_facilities |= B1_FACILITY_DTMFR;
- }
- }
- if ((b1_resource == 17) || (b1_resource == 18))
- {
- if (plci->adapter->manufacturer_features & (MANUFACTURER_FEATURE_V18 | MANUFACTURER_FEATURE_VOWN))
- b1_facilities |= B1_FACILITY_DTMFX | B1_FACILITY_DTMFR;
- }
-/*
- dbug (1, dprintf("[%06lx] %s,%d: get_b1_facilities %d %04x",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char far *)(FILE_), __LINE__, b1_resource, b1_facilites));
-*/
- return (b1_facilities);
-}
-
-
-static byte add_b1_facilities(PLCI *plci, byte b1_resource, word b1_facilities)
-{
- byte b;
-
- switch (b1_resource)
- {
- case 5:
- case 26:
- if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 26;
- else
- b = 5;
- break;
-
- case 8:
- case 27:
- if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 27;
- else
- b = 8;
- break;
-
- case 9:
- case 20:
- case 22:
- case 23:
- case 24:
- case 25:
- case 28:
- case 29:
- case 30:
- case 36:
- case 37:
- case 38:
- if (b1_facilities & B1_FACILITY_EC)
- {
- if (b1_facilities & B1_FACILITY_LOCAL)
- b = 30;
- else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 29;
- else
- b = 28;
- }
-
- else if ((b1_facilities & (B1_FACILITY_DTMFX | B1_FACILITY_DTMFR | B1_FACILITY_MIXER))
- && (((plci->requested_options_conn | plci->requested_options) & (1L << PRIVATE_DTMF_TONE))
- || (plci->appl && (plci->adapter->requested_options_table[plci->appl->Id - 1] & (1L << PRIVATE_DTMF_TONE)))))
- {
- if (b1_facilities & B1_FACILITY_LOCAL)
- b = 38;
- else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 37;
- else
- b = 36;
- }
-
- else if (((plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_HARDDTMF)
- && !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE))
- || ((b1_facilities & B1_FACILITY_DTMFR)
- && ((b1_facilities & B1_FACILITY_MIXER)
- || !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE)))
- || ((b1_facilities & B1_FACILITY_DTMFX)
- && ((b1_facilities & B1_FACILITY_MIXER)
- || !(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_SOFTDTMF_SEND))))
- {
- if (b1_facilities & B1_FACILITY_LOCAL)
- b = 24;
- else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 23;
- else
- b = 22;
- }
- else
- {
- if (b1_facilities & B1_FACILITY_LOCAL)
- b = 25;
- else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 20;
- else
- b = 9;
- }
- break;
-
- case 31:
- case 32:
- case 33:
- if (b1_facilities & B1_FACILITY_LOCAL)
- b = 33;
- else if (b1_facilities & (B1_FACILITY_MIXER | B1_FACILITY_VOICE))
- b = 32;
- else
- b = 31;
- break;
-
- default:
- b = b1_resource;
- }
- dbug(1, dprintf("[%06lx] %s,%d: add_b1_facilities %d %04x %d %04x",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__,
- b1_resource, b1_facilities, b, get_b1_facilities(plci, b)));
- return (b);
-}
-
-
-static void adjust_b1_facilities(PLCI *plci, byte new_b1_resource, word new_b1_facilities)
-{
- word removed_facilities;
-
- dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_facilities %d %04x %04x",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__, new_b1_resource, new_b1_facilities,
- new_b1_facilities & get_b1_facilities(plci, new_b1_resource)));
-
- new_b1_facilities &= get_b1_facilities(plci, new_b1_resource);
- removed_facilities = plci->B1_facilities & ~new_b1_facilities;
-
- if (removed_facilities & B1_FACILITY_EC)
- ec_clear_config(plci);
-
-
- if (removed_facilities & B1_FACILITY_DTMFR)
- {
- dtmf_rec_clear_config(plci);
- dtmf_parameter_clear_config(plci);
- }
- if (removed_facilities & B1_FACILITY_DTMFX)
- dtmf_send_clear_config(plci);
-
-
- if (removed_facilities & B1_FACILITY_MIXER)
- mixer_clear_config(plci);
-
- if (removed_facilities & B1_FACILITY_VOICE)
- adv_voice_clear_config(plci);
- plci->B1_facilities = new_b1_facilities;
-}
-
-
-static void adjust_b_clear(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: adjust_b_clear",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->adjust_b_restore = false;
-}
-
-
-static word adjust_b_process(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
- byte b1_resource;
- NCCI *ncci_ptr;
- API_PARSE bp[2];
-
- dbug(1, dprintf("[%06lx] %s,%d: adjust_b_process %02x %d",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->adjust_b_state));
-
- Info = GOOD;
- switch (plci->adjust_b_state)
- {
- case ADJUST_B_START:
- if ((plci->adjust_b_parms_msg == NULL)
- && (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
- && ((plci->adjust_b_mode & ~(ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 |
- ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_RESTORE)) == 0))
- {
- b1_resource = (plci->adjust_b_mode == ADJUST_B_MODE_NO_RESOURCE) ?
- 0 : add_b1_facilities(plci, plci->B1_resource, plci->adjust_b_facilities);
- if (b1_resource == plci->B1_resource)
- {
- adjust_b1_facilities(plci, b1_resource, plci->adjust_b_facilities);
- break;
- }
- if (plci->adjust_b_facilities & ~get_b1_facilities(plci, b1_resource))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B nonsupported facilities %d %d %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__,
- plci->B1_resource, b1_resource, plci->adjust_b_facilities));
- Info = _WRONG_STATE;
- break;
- }
- }
- if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
- {
-
- mixer_prepare_switch(Id, plci);
-
-
- dtmf_prepare_switch(Id, plci);
- dtmf_parameter_prepare_switch(Id, plci);
-
-
- ec_prepare_switch(Id, plci);
-
- adv_voice_prepare_switch(Id, plci);
- }
- plci->adjust_b_state = ADJUST_B_SAVE_MIXER_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_SAVE_MIXER_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
- {
-
- Info = mixer_save_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_SAVE_DTMF_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_SAVE_DTMF_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
- {
-
- Info = dtmf_save_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_REMOVE_L23_1;
- /* fall through */
- case ADJUST_B_REMOVE_L23_1:
- if ((plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
- && plci->NL.Id && !plci->nl_remove_id)
- {
- plci->internal_command = plci->adjust_b_command;
- if (plci->adjust_b_ncci != 0)
- {
- ncci_ptr = &(plci->adapter->ncci[plci->adjust_b_ncci]);
- while (ncci_ptr->data_pending)
- {
- plci->data_sent_ptr = ncci_ptr->DBuffer[ncci_ptr->data_out].P;
- data_rc(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
- }
- while (ncci_ptr->data_ack_pending)
- data_ack(plci, plci->adapter->ncci_ch[plci->adjust_b_ncci]);
- }
- nl_req_ncci(plci, REMOVE,
- (byte)((plci->adjust_b_mode & ADJUST_B_MODE_CONNECT) ? plci->adjust_b_ncci : 0));
- send_req(plci);
- plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
- break;
- }
- plci->adjust_b_state = ADJUST_B_REMOVE_L23_2;
- Rc = OK;
- /* fall through */
- case ADJUST_B_REMOVE_L23_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B remove failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- if (plci->adjust_b_mode & ADJUST_B_MODE_REMOVE_L23)
- {
- if (plci_nl_busy(plci))
- {
- plci->internal_command = plci->adjust_b_command;
- break;
- }
- }
- plci->adjust_b_state = ADJUST_B_SAVE_EC_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_SAVE_EC_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
- {
-
- Info = ec_save_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_SAVE_DTMF_PARAMETER_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_SAVE_DTMF_PARAMETER_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
- {
-
- Info = dtmf_parameter_save_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_SAVE_VOICE_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_SAVE_VOICE_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_SAVE)
- {
- Info = adv_voice_save_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
- }
- plci->adjust_b_state = ADJUST_B_SWITCH_L1_1;
- /* fall through */
- case ADJUST_B_SWITCH_L1_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_SWITCH_L1)
- {
- if (plci->sig_req)
- {
- plci->internal_command = plci->adjust_b_command;
- break;
- }
- if (plci->adjust_b_parms_msg != NULL)
- api_load_msg(plci->adjust_b_parms_msg, bp);
- else
- api_load_msg(&plci->B_protocol, bp);
- Info = add_b1(plci, bp,
- (word)((plci->adjust_b_mode & ADJUST_B_MODE_NO_RESOURCE) ? 2 : 0),
- plci->adjust_b_facilities);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L1 parameters %d %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__,
- plci->B1_resource, plci->adjust_b_facilities));
- break;
- }
- plci->internal_command = plci->adjust_b_command;
- sig_req(plci, RESOURCES, 0);
- send_req(plci);
- plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
- break;
- }
- plci->adjust_b_state = ADJUST_B_SWITCH_L1_2;
- Rc = OK;
- /* fall through */
- case ADJUST_B_SWITCH_L1_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B switch failed %02x %d %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__,
- Rc, plci->B1_resource, plci->adjust_b_facilities));
- Info = _WRONG_STATE;
- break;
- }
- plci->adjust_b_state = ADJUST_B_RESTORE_VOICE_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_VOICE_1:
- case ADJUST_B_RESTORE_VOICE_2:
- if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
- {
- Info = adv_voice_restore_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
- }
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_PARAMETER_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_DTMF_PARAMETER_1:
- case ADJUST_B_RESTORE_DTMF_PARAMETER_2:
- if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
- {
-
- Info = dtmf_parameter_restore_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_RESTORE_EC_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_EC_1:
- case ADJUST_B_RESTORE_EC_2:
- if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
- {
-
- Info = ec_restore_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_ASSIGN_L23_1;
- /* fall through */
- case ADJUST_B_ASSIGN_L23_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
- {
- if (plci_nl_busy(plci))
- {
- plci->internal_command = plci->adjust_b_command;
- break;
- }
- if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
- plci->call_dir |= CALL_DIR_FORCE_OUTG_NL;
- if (plci->adjust_b_parms_msg != NULL)
- api_load_msg(plci->adjust_b_parms_msg, bp);
- else
- api_load_msg(&plci->B_protocol, bp);
- Info = add_b23(plci, bp);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B invalid L23 parameters %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Info));
- break;
- }
- plci->internal_command = plci->adjust_b_command;
- nl_req_ncci(plci, ASSIGN, 0);
- send_req(plci);
- plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
- break;
- }
- plci->adjust_b_state = ADJUST_B_ASSIGN_L23_2;
- Rc = ASSIGN_OK;
- /* fall through */
- case ADJUST_B_ASSIGN_L23_2:
- if ((Rc != OK) && (Rc != OK_FC) && (Rc != ASSIGN_OK))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B assign failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- if (plci->adjust_b_mode & ADJUST_B_MODE_ASSIGN_L23)
- {
- if (Rc != ASSIGN_OK)
- {
- plci->internal_command = plci->adjust_b_command;
- break;
- }
- }
- if (plci->adjust_b_mode & ADJUST_B_MODE_USER_CONNECT)
- {
- plci->adjust_b_restore = true;
- break;
- }
- plci->adjust_b_state = ADJUST_B_CONNECT_1;
- /* fall through */
- case ADJUST_B_CONNECT_1:
- if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
- {
- plci->internal_command = plci->adjust_b_command;
- if (plci_nl_busy(plci))
- break;
- nl_req_ncci(plci, N_CONNECT, 0);
- send_req(plci);
- plci->adjust_b_state = ADJUST_B_CONNECT_2;
- break;
- }
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_CONNECT_2:
- case ADJUST_B_CONNECT_3:
- case ADJUST_B_CONNECT_4:
- if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B connect failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- if (Rc == OK)
- {
- if (plci->adjust_b_mode & ADJUST_B_MODE_CONNECT)
- {
- get_ncci(plci, (byte)(Id >> 16), plci->adjust_b_ncci);
- Id = (Id & 0xffff) | (((dword)(plci->adjust_b_ncci)) << 16);
- }
- if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
- plci->adjust_b_state = ADJUST_B_CONNECT_3;
- else if (plci->adjust_b_state == ADJUST_B_CONNECT_4)
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
- }
- else if (Rc == 0)
- {
- if (plci->adjust_b_state == ADJUST_B_CONNECT_2)
- plci->adjust_b_state = ADJUST_B_CONNECT_4;
- else if (plci->adjust_b_state == ADJUST_B_CONNECT_3)
- plci->adjust_b_state = ADJUST_B_RESTORE_DTMF_1;
- }
- if (plci->adjust_b_state != ADJUST_B_RESTORE_DTMF_1)
- {
- plci->internal_command = plci->adjust_b_command;
- break;
- }
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_DTMF_1:
- case ADJUST_B_RESTORE_DTMF_2:
- if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
- {
-
- Info = dtmf_restore_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_RESTORE_MIXER_1;
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_MIXER_1:
- case ADJUST_B_RESTORE_MIXER_2:
- case ADJUST_B_RESTORE_MIXER_3:
- case ADJUST_B_RESTORE_MIXER_4:
- case ADJUST_B_RESTORE_MIXER_5:
- case ADJUST_B_RESTORE_MIXER_6:
- case ADJUST_B_RESTORE_MIXER_7:
- if (plci->adjust_b_mode & ADJUST_B_MODE_RESTORE)
- {
-
- Info = mixer_restore_config(Id, plci, Rc);
- if ((Info != GOOD) || plci->internal_command)
- break;
-
- }
- plci->adjust_b_state = ADJUST_B_END;
- case ADJUST_B_END:
- break;
- }
- return (Info);
-}
-
-
-static void adjust_b1_resource(dword Id, PLCI *plci, API_SAVE *bp_msg, word b1_facilities, word internal_command)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: adjust_b1_resource %d %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__,
- plci->B1_resource, b1_facilities));
-
- plci->adjust_b_parms_msg = bp_msg;
- plci->adjust_b_facilities = b1_facilities;
- plci->adjust_b_command = internal_command;
- plci->adjust_b_ncci = (word)(Id >> 16);
- if ((bp_msg == NULL) && (plci->B1_resource == 0))
- plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_NO_RESOURCE | ADJUST_B_MODE_SWITCH_L1;
- else
- plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_SWITCH_L1 | ADJUST_B_MODE_RESTORE;
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B1 resource %d %04x...",
- UnMapId(Id), (char *)(FILE_), __LINE__,
- plci->B1_resource, b1_facilities));
-}
-
-
-static void adjust_b_restore(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: adjust_b_restore %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- if (plci->req_in != 0)
- {
- plci->internal_command = ADJUST_B_RESTORE_1;
- break;
- }
- Rc = OK;
- /* fall through */
- case ADJUST_B_RESTORE_1:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B enqueued failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- }
- plci->adjust_b_parms_msg = NULL;
- plci->adjust_b_facilities = plci->B1_facilities;
- plci->adjust_b_command = ADJUST_B_RESTORE_2;
- plci->adjust_b_ncci = (word)(Id >> 16);
- plci->adjust_b_mode = ADJUST_B_MODE_RESTORE;
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore...",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- /* fall through */
- case ADJUST_B_RESTORE_2:
- if (adjust_b_process(Id, plci, Rc) != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Adjust B restore failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- }
- if (plci->internal_command)
- break;
- break;
- }
-}
-
-
-static void reset_b3_command(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: reset_b3_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- plci->adjust_b_parms_msg = NULL;
- plci->adjust_b_facilities = plci->B1_facilities;
- plci->adjust_b_command = RESET_B3_COMMAND_1;
- plci->adjust_b_ncci = (word)(Id >> 16);
- plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_CONNECT;
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: Reset B3...",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- /* fall through */
- case RESET_B3_COMMAND_1:
- Info = adjust_b_process(Id, plci, Rc);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Reset failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- break;
- }
-/* sendf (plci->appl, _RESET_B3_R | CONFIRM, Id, plci->number, "w", Info);*/
- sendf(plci->appl, _RESET_B3_I, Id, 0, "s", "");
-}
-
-
-static void select_b_command(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
- word internal_command;
- byte esc_chi[3];
-
- dbug(1, dprintf("[%06lx] %s,%d: select_b_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- plci->adjust_b_parms_msg = &plci->saved_msg;
- if ((plci->tel == ADV_VOICE) && (plci == plci->adapter->AdvSignalPLCI))
- plci->adjust_b_facilities = plci->B1_facilities | B1_FACILITY_VOICE;
- else
- plci->adjust_b_facilities = plci->B1_facilities & ~B1_FACILITY_VOICE;
- plci->adjust_b_command = SELECT_B_COMMAND_1;
- plci->adjust_b_ncci = (word)(Id >> 16);
- if (plci->saved_msg.parms[0].length == 0)
- {
- plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
- ADJUST_B_MODE_NO_RESOURCE;
- }
- else
- {
- plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_SWITCH_L1 |
- ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
- }
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: Select B protocol...",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- /* fall through */
- case SELECT_B_COMMAND_1:
- Info = adjust_b_process(Id, plci, Rc);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: Select B protocol failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- if (plci->tel == ADV_VOICE)
- {
- esc_chi[0] = 0x02;
- esc_chi[1] = 0x18;
- esc_chi[2] = plci->b_channel;
- SetVoiceChannel(plci->adapter->AdvCodecPLCI, esc_chi, plci->adapter);
- }
- break;
- }
- sendf(plci->appl, _SELECT_B_REQ | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void fax_connect_ack_command(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: fax_connect_ack_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0; /* fall through */
- case FAX_CONNECT_ACK_COMMAND_1:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = FAX_CONNECT_ACK_COMMAND_1;
- return;
- }
- plci->internal_command = FAX_CONNECT_ACK_COMMAND_2;
- plci->NData[0].P = plci->fax_connect_info_buffer;
- plci->NData[0].PLength = plci->fax_connect_info_length;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_CONNECT_ACK;
- plci->adapter->request(&plci->NL);
- return;
- case FAX_CONNECT_ACK_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: FAX issue CONNECT ACK failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- break;
- }
- }
- if ((plci->ncpi_state & NCPI_VALID_CONNECT_B3_ACT)
- && !(plci->ncpi_state & NCPI_CONNECT_B3_ACT_SENT))
- {
- if (plci->B3_prot == 4)
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- else
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "S", plci->ncpi_buffer);
- plci->ncpi_state |= NCPI_CONNECT_B3_ACT_SENT;
- }
-}
-
-
-static void fax_edata_ack_command(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: fax_edata_ack_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- /* fall through */
- case FAX_EDATA_ACK_COMMAND_1:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = FAX_EDATA_ACK_COMMAND_1;
- return;
- }
- plci->internal_command = FAX_EDATA_ACK_COMMAND_2;
- plci->NData[0].P = plci->fax_connect_info_buffer;
- plci->NData[0].PLength = plci->fax_edata_ack_length;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_EDATA;
- plci->adapter->request(&plci->NL);
- return;
- case FAX_EDATA_ACK_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: FAX issue EDATA ACK failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- break;
- }
- }
-}
-
-
-static void fax_connect_info_command(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: fax_connect_info_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0; /* fall through */
- case FAX_CONNECT_INFO_COMMAND_1:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = FAX_CONNECT_INFO_COMMAND_1;
- return;
- }
- plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
- plci->NData[0].P = plci->fax_connect_info_buffer;
- plci->NData[0].PLength = plci->fax_connect_info_length;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_EDATA;
- plci->adapter->request(&plci->NL);
- return;
- case FAX_CONNECT_INFO_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: FAX setting connect info failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- if (plci_nl_busy(plci))
- {
- plci->internal_command = FAX_CONNECT_INFO_COMMAND_2;
- return;
- }
- plci->command = _CONNECT_B3_R;
- nl_req_ncci(plci, N_CONNECT, 0);
- send_req(plci);
- return;
- }
- sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void fax_adjust_b23_command(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: fax_adjust_b23_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- plci->adjust_b_parms_msg = NULL;
- plci->adjust_b_facilities = plci->B1_facilities;
- plci->adjust_b_command = FAX_ADJUST_B23_COMMAND_1;
- plci->adjust_b_ncci = (word)(Id >> 16);
- plci->adjust_b_mode = ADJUST_B_MODE_REMOVE_L23 | ADJUST_B_MODE_ASSIGN_L23;
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: FAX adjust B23...",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- /* fall through */
- case FAX_ADJUST_B23_COMMAND_1:
- Info = adjust_b_process(Id, plci, Rc);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: FAX adjust failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- /* fall through */
- case FAX_ADJUST_B23_COMMAND_2:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = FAX_ADJUST_B23_COMMAND_2;
- return;
- }
- plci->command = _CONNECT_B3_R;
- nl_req_ncci(plci, N_CONNECT, 0);
- send_req(plci);
- return;
- }
- sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void fax_disconnect_command(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: fax_disconnect_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- plci->internal_command = FAX_DISCONNECT_COMMAND_1;
- return;
- case FAX_DISCONNECT_COMMAND_1:
- case FAX_DISCONNECT_COMMAND_2:
- case FAX_DISCONNECT_COMMAND_3:
- if ((Rc != OK) && (Rc != OK_FC) && (Rc != 0))
- {
- dbug(1, dprintf("[%06lx] %s,%d: FAX disconnect EDATA failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- break;
- }
- if (Rc == OK)
- {
- if ((internal_command == FAX_DISCONNECT_COMMAND_1)
- || (internal_command == FAX_DISCONNECT_COMMAND_2))
- {
- plci->internal_command = FAX_DISCONNECT_COMMAND_2;
- }
- }
- else if (Rc == 0)
- {
- if (internal_command == FAX_DISCONNECT_COMMAND_1)
- plci->internal_command = FAX_DISCONNECT_COMMAND_3;
- }
- return;
- }
-}
-
-
-
-static void rtp_connect_b3_req_command(dword Id, PLCI *plci, byte Rc)
-{
- word Info;
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_req_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0; /* fall through */
- case RTP_CONNECT_B3_REQ_COMMAND_1:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_1;
- return;
- }
- plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
- nl_req_ncci(plci, N_CONNECT, 0);
- send_req(plci);
- return;
- case RTP_CONNECT_B3_REQ_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect info failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- Info = _WRONG_STATE;
- break;
- }
- if (plci_nl_busy(plci))
- {
- plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_2;
- return;
- }
- plci->internal_command = RTP_CONNECT_B3_REQ_COMMAND_3;
- plci->NData[0].PLength = plci->internal_req_buffer[0];
- plci->NData[0].P = plci->internal_req_buffer + 1;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_UDATA;
- plci->adapter->request(&plci->NL);
- break;
- case RTP_CONNECT_B3_REQ_COMMAND_3:
- return;
- }
- sendf(plci->appl, _CONNECT_B3_R | CONFIRM, Id, plci->number, "w", Info);
-}
-
-
-static void rtp_connect_b3_res_command(dword Id, PLCI *plci, byte Rc)
-{
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: rtp_connect_b3_res_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0; /* fall through */
- case RTP_CONNECT_B3_RES_COMMAND_1:
- if (plci_nl_busy(plci))
- {
- plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_1;
- return;
- }
- plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
- nl_req_ncci(plci, N_CONNECT_ACK, (byte)(Id >> 16));
- send_req(plci);
- return;
- case RTP_CONNECT_B3_RES_COMMAND_2:
- if ((Rc != OK) && (Rc != OK_FC))
- {
- dbug(1, dprintf("[%06lx] %s,%d: RTP setting connect resp info failed %02x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc));
- break;
- }
- if (plci_nl_busy(plci))
- {
- plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_2;
- return;
- }
- sendf(plci->appl, _CONNECT_B3_ACTIVE_I, Id, 0, "s", "");
- plci->internal_command = RTP_CONNECT_B3_RES_COMMAND_3;
- plci->NData[0].PLength = plci->internal_req_buffer[0];
- plci->NData[0].P = plci->internal_req_buffer + 1;
- plci->NL.X = plci->NData;
- plci->NL.ReqCh = 0;
- plci->NL.Req = plci->nl_req = (byte) N_UDATA;
- plci->adapter->request(&plci->NL);
- return;
- case RTP_CONNECT_B3_RES_COMMAND_3:
- return;
- }
-}
-
-
-
-static void hold_save_command(dword Id, PLCI *plci, byte Rc)
-{
- byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
- word Info;
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: hold_save_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- if (!plci->NL.Id)
- break;
- plci->command = 0;
- plci->adjust_b_parms_msg = NULL;
- plci->adjust_b_facilities = plci->B1_facilities;
- plci->adjust_b_command = HOLD_SAVE_COMMAND_1;
- plci->adjust_b_ncci = (word)(Id >> 16);
- plci->adjust_b_mode = ADJUST_B_MODE_SAVE | ADJUST_B_MODE_REMOVE_L23;
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: HOLD save...",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- /* fall through */
- case HOLD_SAVE_COMMAND_1:
- Info = adjust_b_process(Id, plci, Rc);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: HOLD save failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- }
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
-}
-
-
-static void retrieve_restore_command(dword Id, PLCI *plci, byte Rc)
-{
- byte SS_Ind[] = "\x05\x03\x00\x02\x00\x00"; /* Retrieve_Ind struct*/
- word Info;
- word internal_command;
-
- dbug(1, dprintf("[%06lx] %s,%d: retrieve_restore_command %02x %04x",
- UnMapId(Id), (char *)(FILE_), __LINE__, Rc, plci->internal_command));
-
- Info = GOOD;
- internal_command = plci->internal_command;
- plci->internal_command = 0;
- switch (internal_command)
- {
- default:
- plci->command = 0;
- plci->adjust_b_parms_msg = NULL;
- plci->adjust_b_facilities = plci->B1_facilities;
- plci->adjust_b_command = RETRIEVE_RESTORE_COMMAND_1;
- plci->adjust_b_ncci = (word)(Id >> 16);
- plci->adjust_b_mode = ADJUST_B_MODE_ASSIGN_L23 | ADJUST_B_MODE_USER_CONNECT | ADJUST_B_MODE_RESTORE;
- plci->adjust_b_state = ADJUST_B_START;
- dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore...",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- /* fall through */
- case RETRIEVE_RESTORE_COMMAND_1:
- Info = adjust_b_process(Id, plci, Rc);
- if (Info != GOOD)
- {
- dbug(1, dprintf("[%06lx] %s,%d: RETRIEVE restore failed",
- UnMapId(Id), (char *)(FILE_), __LINE__));
- break;
- }
- if (plci->internal_command)
- return;
- }
- sendf(plci->appl, _FACILITY_I, Id & 0xffffL, 0, "ws", 3, SS_Ind);
-}
-
-
-static void init_b1_config(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: init_b1_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- plci->B1_resource = 0;
- plci->B1_facilities = 0;
-
- plci->li_bchannel_id = 0;
- mixer_clear_config(plci);
-
-
- ec_clear_config(plci);
-
-
- dtmf_rec_clear_config(plci);
- dtmf_send_clear_config(plci);
- dtmf_parameter_clear_config(plci);
-
- adv_voice_clear_config(plci);
- adjust_b_clear(plci);
-}
-
-
-static void clear_b1_config(PLCI *plci)
-{
-
- dbug(1, dprintf("[%06lx] %s,%d: clear_b1_config",
- (dword)((plci->Id << 8) | UnMapController(plci->adapter->Id)),
- (char *)(FILE_), __LINE__));
-
- adv_voice_clear_config(plci);
- adjust_b_clear(plci);
-
- ec_clear_config(plci);
-
-
- dtmf_rec_clear_config(plci);
- dtmf_send_clear_config(plci);
- dtmf_parameter_clear_config(plci);
-
-
- if ((plci->li_bchannel_id != 0)
- && (li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci == plci))
- {
- mixer_clear_config(plci);
- li_config_table[plci->adapter->li_base + (plci->li_bchannel_id - 1)].plci = NULL;
- plci->li_bchannel_id = 0;
- }
-
- plci->B1_resource = 0;
- plci->B1_facilities = 0;
-}
-
-
-/* -----------------------------------------------------------------
- XON protocol local helpers
- ----------------------------------------------------------------- */
-static void channel_flow_control_remove(PLCI *plci) {
- DIVA_CAPI_ADAPTER *a = plci->adapter;
- word i;
- for (i = 1; i < MAX_NL_CHANNEL + 1; i++) {
- if (a->ch_flow_plci[i] == plci->Id) {
- a->ch_flow_plci[i] = 0;
- a->ch_flow_control[i] = 0;
- }
- }
-}
-
-static void channel_x_on(PLCI *plci, byte ch) {
- DIVA_CAPI_ADAPTER *a = plci->adapter;
- if (a->ch_flow_control[ch] & N_XON_SENT) {
- a->ch_flow_control[ch] &= ~N_XON_SENT;
- }
-}
-
-static void channel_x_off(PLCI *plci, byte ch, byte flag) {
- DIVA_CAPI_ADAPTER *a = plci->adapter;
- if ((a->ch_flow_control[ch] & N_RX_FLOW_CONTROL_MASK) == 0) {
- a->ch_flow_control[ch] |= (N_CH_XOFF | flag);
- a->ch_flow_plci[ch] = plci->Id;
- a->ch_flow_control_pending++;
- }
-}
-
-static void channel_request_xon(PLCI *plci, byte ch) {
- DIVA_CAPI_ADAPTER *a = plci->adapter;
-
- if (a->ch_flow_control[ch] & N_CH_XOFF) {
- a->ch_flow_control[ch] |= N_XON_REQ;
- a->ch_flow_control[ch] &= ~N_CH_XOFF;
- a->ch_flow_control[ch] &= ~N_XON_CONNECT_IND;
- }
-}
-
-static void channel_xmit_extended_xon(PLCI *plci) {
- DIVA_CAPI_ADAPTER *a;
- int max_ch = ARRAY_SIZE(a->ch_flow_control);
- int i, one_requested = 0;
-
- if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) {
- return;
- }
-
- for (i = 0; i < max_ch; i++) {
- if ((a->ch_flow_control[i] & N_CH_XOFF) &&
- (a->ch_flow_control[i] & N_XON_CONNECT_IND) &&
- (plci->Id == a->ch_flow_plci[i])) {
- channel_request_xon(plci, (byte)i);
- one_requested = 1;
- }
- }
-
- if (one_requested) {
- channel_xmit_xon(plci);
- }
-}
-
-/*
- Try to xmit next X_ON
-*/
-static int find_channel_with_pending_x_on(DIVA_CAPI_ADAPTER *a, PLCI *plci) {
- int max_ch = ARRAY_SIZE(a->ch_flow_control);
- int i;
-
- if (!(plci->adapter->manufacturer_features & MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL)) {
- return (0);
- }
-
- if (a->last_flow_control_ch >= max_ch) {
- a->last_flow_control_ch = 1;
- }
- for (i = a->last_flow_control_ch; i < max_ch; i++) {
- if ((a->ch_flow_control[i] & N_XON_REQ) &&
- (plci->Id == a->ch_flow_plci[i])) {
- a->last_flow_control_ch = i + 1;
- return (i);
- }
- }
-
- for (i = 1; i < a->last_flow_control_ch; i++) {
- if ((a->ch_flow_control[i] & N_XON_REQ) &&
- (plci->Id == a->ch_flow_plci[i])) {
- a->last_flow_control_ch = i + 1;
- return (i);
- }
- }
-
- return (0);
-}
-
-static void channel_xmit_xon(PLCI *plci) {
- DIVA_CAPI_ADAPTER *a = plci->adapter;
- byte ch;
-
- if (plci->nl_req || !plci->NL.Id || plci->nl_remove_id) {
- return;
- }
- if ((ch = (byte)find_channel_with_pending_x_on(a, plci)) == 0) {
- return;
- }
- a->ch_flow_control[ch] &= ~N_XON_REQ;
- a->ch_flow_control[ch] |= N_XON_SENT;
-
- plci->NL.Req = plci->nl_req = (byte)N_XON;
- plci->NL.ReqCh = ch;
- plci->NL.X = plci->NData;
- plci->NL.XNum = 1;
- plci->NData[0].P = &plci->RBuffer[0];
- plci->NData[0].PLength = 0;
-
- plci->adapter->request(&plci->NL);
-}
-
-static int channel_can_xon(PLCI *plci, byte ch) {
- APPL *APPLptr;
- DIVA_CAPI_ADAPTER *a;
- word NCCIcode;
- dword count;
- word Num;
- word i;
-
- APPLptr = plci->appl;
- a = plci->adapter;
-
- if (!APPLptr)
- return (0);
-
- NCCIcode = a->ch_ncci[ch] | (((word) a->Id) << 8);
-
- /* count all buffers within the Application pool */
- /* belonging to the same NCCI. XON if a first is */
- /* used. */
- count = 0;
- Num = 0xffff;
- for (i = 0; i < APPLptr->MaxBuffer; i++) {
- if (NCCIcode == APPLptr->DataNCCI[i]) count++;
- if (!APPLptr->DataNCCI[i] && Num == 0xffff) Num = i;
- }
- if ((count > 2) || (Num == 0xffff)) {
- return (0);
- }
- return (1);
-}
-
-
-/*------------------------------------------------------------------*/
-
-static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *a, word offset)
-{
- return 1;
-}
-
-
-
-/**********************************************************************************/
-/* function groups the listening applications according to the CIP mask and the */
-/* Info_Mask. Each group gets just one Connect_Ind. Some application manufacturer */
-/* are not multi-instance capable, so they start e.g. 30 applications what causes */
-/* big problems on application level (one call, 30 Connect_Ind, ect). The */
-/* function must be enabled by setting "a->group_optimization_enabled" from the */
-/* OS specific part (per adapter). */
-/**********************************************************************************/
-static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci)
-{
- word i, j, k, busy, group_found;
- dword info_mask_group[MAX_CIP_TYPES];
- dword cip_mask_group[MAX_CIP_TYPES];
- word appl_number_group_type[MAX_APPL];
- PLCI *auxplci;
-
- /* all APPLs within this inc. call are allowed to dial in */
- bitmap_fill(plci->group_optimization_mask_table, MAX_APPL);
-
- if (!a->group_optimization_enabled)
- {
- dbug(1, dprintf("No group optimization"));
- return;
- }
-
- dbug(1, dprintf("Group optimization = 0x%x...", a->group_optimization_enabled));
-
- for (i = 0; i < MAX_CIP_TYPES; i++)
- {
- info_mask_group[i] = 0;
- cip_mask_group[i] = 0;
- }
- for (i = 0; i < MAX_APPL; i++)
- {
- appl_number_group_type[i] = 0;
- }
- for (i = 0; i < max_appl; i++) /* check if any multi instance capable application is present */
- { /* group_optimization set to 1 means not to optimize multi-instance capable applications (default) */
- if (application[i].Id && (application[i].MaxNCCI) > 1 && (a->CIP_Mask[i]) && (a->group_optimization_enabled == 1))
- {
- dbug(1, dprintf("Multi-Instance capable, no optimization required"));
- return; /* allow good application unfiltered access */
- }
- }
- for (i = 0; i < max_appl; i++) /* Build CIP Groups */
- {
- if (application[i].Id && a->CIP_Mask[i])
- {
- for (k = 0, busy = false; k < a->max_plci; k++)
- {
- if (a->plci[k].Id)
- {
- auxplci = &a->plci[k];
- if (auxplci->appl == &application[i]) {
- /* application has a busy PLCI */
- busy = true;
- dbug(1, dprintf("Appl 0x%x is busy", i + 1));
- } else if (test_bit(i, plci->c_ind_mask_table)) {
- /* application has an incoming call pending */
- busy = true;
- dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1));
- }
- }
- }
-
- for (j = 0, group_found = 0; j <= (MAX_CIP_TYPES) && !busy && !group_found; j++) /* build groups with free applications only */
- {
- if (j == MAX_CIP_TYPES) /* all groups are in use but group still not found */
- { /* the MAX_CIP_TYPES group enables all calls because of field overflow */
- appl_number_group_type[i] = MAX_CIP_TYPES;
- group_found = true;
- dbug(1, dprintf("Field overflow appl 0x%x", i + 1));
- }
- else if ((info_mask_group[j] == a->CIP_Mask[i]) && (cip_mask_group[j] == a->Info_Mask[i]))
- { /* is group already present ? */
- appl_number_group_type[i] = j | 0x80; /* store the group number for each application */
- group_found = true;
- dbug(1, dprintf("Group 0x%x found with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
- }
- else if (!info_mask_group[j])
- { /* establish a new group */
- appl_number_group_type[i] = j | 0x80; /* store the group number for each application */
- info_mask_group[j] = a->CIP_Mask[i]; /* store the new CIP mask for the new group */
- cip_mask_group[j] = a->Info_Mask[i]; /* store the new Info_Mask for this new group */
- group_found = true;
- dbug(1, dprintf("New Group 0x%x established with appl 0x%x, CIP=0x%lx", appl_number_group_type[i], i + 1, info_mask_group[j]));
- }
- }
- }
- }
-
- for (i = 0; i < max_appl; i++) /* Build group_optimization_mask_table */
- {
- if (appl_number_group_type[i]) /* application is free, has listens and is member of a group */
- {
- if (appl_number_group_type[i] == MAX_CIP_TYPES)
- {
- dbug(1, dprintf("OverflowGroup 0x%x, valid appl = 0x%x, call enabled", appl_number_group_type[i], i + 1));
- }
- else
- {
- dbug(1, dprintf("Group 0x%x, valid appl = 0x%x", appl_number_group_type[i], i + 1));
- for (j = i + 1; j < max_appl; j++) /* search other group members and mark them as busy */
- {
- if (appl_number_group_type[i] == appl_number_group_type[j])
- {
- dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j]));
- /* disable call on other group members */
- __clear_bit(j, plci->group_optimization_mask_table);
- appl_number_group_type[j] = 0; /* remove disabled group member from group list */
- }
- }
- }
- }
- else /* application should not get a call */
- {
- __clear_bit(i, plci->group_optimization_mask_table);
- }
- }
-
-}
-
-
-
-/* OS notifies the driver about a application Capi_Register */
-word CapiRegister(word id)
-{
- word i, j, appls_found;
-
- PLCI *plci;
- DIVA_CAPI_ADAPTER *a;
-
- for (i = 0, appls_found = 0; i < max_appl; i++)
- {
- if (application[i].Id && (application[i].Id != id))
- {
- appls_found++; /* an application has been found */
- }
- }
-
- if (appls_found) return true;
- for (i = 0; i < max_adapter; i++) /* scan all adapters... */
- {
- a = &adapter[i];
- if (a->request)
- {
- if (a->flag_dynamic_l1_down) /* remove adapter from L1 tristate (Huntgroup) */
- {
- if (!appls_found) /* first application does a capi register */
- {
- if ((j = get_plci(a))) /* activate L1 of all adapters */
- {
- plci = &a->plci[j - 1];
- plci->command = 0;
- add_p(plci, OAD, "\x01\xfd");
- add_p(plci, CAI, "\x01\x80");
- add_p(plci, UID, "\x06\x43\x61\x70\x69\x32\x30");
- add_p(plci, SHIFT | 6, NULL);
- add_p(plci, SIN, "\x02\x00\x00");
- plci->internal_command = START_L1_SIG_ASSIGN_PEND;
- sig_req(plci, ASSIGN, DSIG_ID);
- add_p(plci, FTY, "\x02\xff\x07"); /* l1 start */
- sig_req(plci, SIG_CTRL, 0);
- send_req(plci);
- }
- }
- }
- }
- }
- return false;
-}
-
-/*------------------------------------------------------------------*/
-
-/* Functions for virtual Switching e.g. Transfer by join, Conference */
-
-static void VSwitchReqInd(PLCI *plci, dword Id, byte **parms)
-{
- word i;
- /* Format of vswitch_t:
- 0 byte length
- 1 byte VSWITCHIE
- 2 byte VSWITCH_REQ/VSWITCH_IND
- 3 byte reserved
- 4 word VSwitchcommand
- 6 word returnerror
- 8... Params
- */
- if (!plci ||
- !plci->appl ||
- !plci->State ||
- plci->Sig.Ind == NCR_FACILITY
- )
- return;
-
- for (i = 0; i < MAX_MULTI_IE; i++)
- {
- if (!parms[i][0]) continue;
- if (parms[i][0] < 7)
- {
- parms[i][0] = 0; /* kill it */
- continue;
- }
- dbug(1, dprintf("VSwitchReqInd(%d)", parms[i][4]));
- switch (parms[i][4])
- {
- case VSJOIN:
- if (!plci->relatedPTYPLCI ||
- (plci->ptyState != S_ECT && plci->relatedPTYPLCI->ptyState != S_ECT))
- { /* Error */
- break;
- }
- /* remember all necessary informations */
- if (parms[i][0] != 11 || parms[i][8] != 3) /* Length Test */
- {
- break;
- }
- if (parms[i][2] == VSWITCH_IND && parms[i][9] == 1)
- { /* first indication after ECT-Request on Consultation Call */
- plci->vswitchstate = parms[i][9];
- parms[i][9] = 2; /* State */
- /* now ask first Call to join */
- }
- else if (parms[i][2] == VSWITCH_REQ && parms[i][9] == 3)
- { /* Answer of VSWITCH_REQ from first Call */
- plci->vswitchstate = parms[i][9];
- /* tell consultation call to join
- and the protocol capabilities of the first call */
- }
- else
- { /* Error */
- break;
- }
- plci->vsprot = parms[i][10]; /* protocol */
- plci->vsprotdialect = parms[i][11]; /* protocoldialect */
- /* send join request to related PLCI */
- parms[i][1] = VSWITCHIE;
- parms[i][2] = VSWITCH_REQ;
-
- plci->relatedPTYPLCI->command = 0;
- plci->relatedPTYPLCI->internal_command = VSWITCH_REQ_PEND;
- add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
- sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
- send_req(plci->relatedPTYPLCI);
- break;
- case VSTRANSPORT:
- default:
- if (plci->relatedPTYPLCI &&
- plci->vswitchstate == 3 &&
- plci->relatedPTYPLCI->vswitchstate == 3)
- {
- add_p(plci->relatedPTYPLCI, ESC, &parms[i][0]);
- sig_req(plci->relatedPTYPLCI, VSWITCH_REQ, 0);
- send_req(plci->relatedPTYPLCI);
- }
- break;
- }
- parms[i][0] = 0; /* kill it */
- }
-}
-
-
-/*------------------------------------------------------------------*/
-
-static int diva_get_dma_descriptor(PLCI *plci, dword *dma_magic) {
- ENTITY e;
- IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
- if (!(diva_xdi_extended_features & DIVA_CAPI_XDI_PROVIDES_RX_DMA)) {
- return (-1);
- }
-
- pReq->xdi_dma_descriptor_operation.Req = 0;
- pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
- pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_ALLOC;
- pReq->xdi_dma_descriptor_operation.info.descriptor_number = -1;
- pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
- pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
-
- e.user[0] = plci->adapter->Id - 1;
- plci->adapter->request((ENTITY *)pReq);
-
- if (!pReq->xdi_dma_descriptor_operation.info.operation &&
- (pReq->xdi_dma_descriptor_operation.info.descriptor_number >= 0) &&
- pReq->xdi_dma_descriptor_operation.info.descriptor_magic) {
- *dma_magic = pReq->xdi_dma_descriptor_operation.info.descriptor_magic;
- dbug(3, dprintf("dma_alloc, a:%d (%d-%08x)",
- plci->adapter->Id,
- pReq->xdi_dma_descriptor_operation.info.descriptor_number,
- *dma_magic));
- return (pReq->xdi_dma_descriptor_operation.info.descriptor_number);
- } else {
- dbug(1, dprintf("dma_alloc failed"));
- return (-1);
- }
-}
-
-static void diva_free_dma_descriptor(PLCI *plci, int nr) {
- ENTITY e;
- IDI_SYNC_REQ *pReq = (IDI_SYNC_REQ *)&e;
-
- if (nr < 0) {
- return;
- }
-
- pReq->xdi_dma_descriptor_operation.Req = 0;
- pReq->xdi_dma_descriptor_operation.Rc = IDI_SYNC_REQ_DMA_DESCRIPTOR_OPERATION;
-
- pReq->xdi_dma_descriptor_operation.info.operation = IDI_SYNC_REQ_DMA_DESCRIPTOR_FREE;
- pReq->xdi_dma_descriptor_operation.info.descriptor_number = nr;
- pReq->xdi_dma_descriptor_operation.info.descriptor_address = NULL;
- pReq->xdi_dma_descriptor_operation.info.descriptor_magic = 0;
-
- e.user[0] = plci->adapter->Id - 1;
- plci->adapter->request((ENTITY *)pReq);
-
- if (!pReq->xdi_dma_descriptor_operation.info.operation) {
- dbug(1, dprintf("dma_free(%d)", nr));
- } else {
- dbug(1, dprintf("dma_free failed (%d)", nr));
- }
-}
-
-/*------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/mi_pc.h b/drivers/isdn/hardware/eicon/mi_pc.h
deleted file mode 100644
index 83e9ed8c1bf3..000000000000
--- a/drivers/isdn/hardware/eicon/mi_pc.h
+++ /dev/null
@@ -1,204 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-/*----------------------------------------------------------------------------
-// MAESTRA ISA PnP */
-#define BRI_MEMORY_BASE 0x1f700000
-#define BRI_MEMORY_SIZE 0x00100000 /* 1MB on the BRI */
-#define BRI_SHARED_RAM_SIZE 0x00010000 /* 64k shared RAM */
-#define BRI_RAY_TAYLOR_DSP_CODE_SIZE 0x00020000 /* max 128k DSP-Code (Ray Taylor's code) */
-#define BRI_ORG_MAX_DSP_CODE_SIZE 0x00050000 /* max 320k DSP-Code (Telindus) */
-#define BRI_V90D_MAX_DSP_CODE_SIZE 0x00060000 /* max 384k DSP-Code if V.90D included */
-#define BRI_CACHED_ADDR(x) (((x) & 0x1fffffffL) | 0x80000000L)
-#define BRI_UNCACHED_ADDR(x) (((x) & 0x1fffffffL) | 0xa0000000L)
-#define ADDR 4
-#define ADDRH 6
-#define DATA 0
-#define RESET 7
-#define DEFAULT_ADDRESS 0x240
-#define DEFAULT_IRQ 3
-#define M_PCI_ADDR 0x04 /* MAESTRA BRI PCI */
-#define M_PCI_ADDRH 0x0c /* MAESTRA BRI PCI */
-#define M_PCI_DATA 0x00 /* MAESTRA BRI PCI */
-#define M_PCI_RESET 0x10 /* MAESTRA BRI PCI */
-/*----------------------------------------------------------------------------
-// MAESTRA PRI PCI */
-#define MP_IRQ_RESET 0xc18 /* offset of isr in the CONFIG memory bar */
-#define MP_IRQ_RESET_VAL 0xfe /* value to clear an interrupt */
-#define MP_MEMORY_SIZE 0x00400000 /* 4MB on standard PRI */
-#define MP2_MEMORY_SIZE 0x00800000 /* 8MB on PRI Rev. 2 */
-#define MP_SHARED_RAM_OFFSET 0x00001000 /* offset of shared RAM base in the DRAM memory bar */
-#define MP_SHARED_RAM_SIZE 0x00010000 /* 64k shared RAM */
-#define MP_PROTOCOL_OFFSET (MP_SHARED_RAM_OFFSET + MP_SHARED_RAM_SIZE)
-#define MP_RAY_TAYLOR_DSP_CODE_SIZE 0x00040000 /* max 256k DSP-Code (Ray Taylor's code) */
-#define MP_ORG_MAX_DSP_CODE_SIZE 0x00060000 /* max 384k DSP-Code (Telindus) */
-#define MP_V90D_MAX_DSP_CODE_SIZE 0x00070000 /* max 448k DSP-Code if V.90D included) */
-#define MP_VOIP_MAX_DSP_CODE_SIZE 0x00090000 /* max 576k DSP-Code if voice over IP included */
-#define MP_CACHED_ADDR(x) (((x) & 0x1fffffffL) | 0x80000000L)
-#define MP_UNCACHED_ADDR(x) (((x) & 0x1fffffffL) | 0xa0000000L)
-#define MP_RESET 0x20 /* offset of RESET register in the DEVICES memory bar */
-/* RESET register bits */
-#define _MP_S2M_RESET 0x10 /* active lo */
-#define _MP_LED2 0x08 /* 1 = on */
-#define _MP_LED1 0x04 /* 1 = on */
-#define _MP_DSP_RESET 0x02 /* active lo */
-#define _MP_RISC_RESET 0x81 /* active hi, bit 7 for compatibility with old boards */
-/* CPU exception context structure in MP shared ram after trap */
-typedef struct mp_xcptcontext_s MP_XCPTC;
-struct mp_xcptcontext_s {
- dword sr;
- dword cr;
- dword epc;
- dword vaddr;
- dword regs[32];
- dword mdlo;
- dword mdhi;
- dword reseverd;
- dword xclass;
-};
-/* boot interface structure for PRI */
-struct mp_load {
- dword volatile cmd;
- dword volatile addr;
- dword volatile len;
- dword volatile err;
- dword volatile live;
- dword volatile res1[0x1b];
- dword volatile TrapId; /* has value 0x999999XX on a CPU trap */
- dword volatile res2[0x03];
- MP_XCPTC volatile xcpt; /* contains register dump */
- dword volatile rest[((0x1020 >> 2) - 6) - 0x1b - 1 - 0x03 - (sizeof(MP_XCPTC) >> 2)];
- dword volatile signature;
- dword data[60000]; /* real interface description */
-};
-/*----------------------------------------------------------------------------*/
-/* SERVER 4BRI (Quattro PCI) */
-#define MQ_BOARD_REG_OFFSET 0x800000 /* PC relative On board registers offset */
-#define MQ_BREG_RISC 0x1200 /* RISC Reset ect */
-#define MQ_RISC_COLD_RESET_MASK 0x0001 /* RISC Cold reset */
-#define MQ_RISC_WARM_RESET_MASK 0x0002 /* RISC Warm reset */
-#define MQ_BREG_IRQ_TEST 0x0608 /* Interrupt request, no CPU interaction */
-#define MQ_IRQ_REQ_ON 0x1
-#define MQ_IRQ_REQ_OFF 0x0
-#define MQ_BOARD_DSP_OFFSET 0xa00000 /* PC relative On board DSP regs offset */
-#define MQ_DSP1_ADDR_OFFSET 0x0008 /* Addr register offset DSP 1 subboard 1 */
-#define MQ_DSP2_ADDR_OFFSET 0x0208 /* Addr register offset DSP 2 subboard 1 */
-#define MQ_DSP1_DATA_OFFSET 0x0000 /* Data register offset DSP 1 subboard 1 */
-#define MQ_DSP2_DATA_OFFSET 0x0200 /* Data register offset DSP 2 subboard 1 */
-#define MQ_DSP_JUNK_OFFSET 0x0400 /* DSP Data/Addr regs subboard offset */
-#define MQ_ISAC_DSP_RESET 0x0028 /* ISAC and DSP reset address offset */
-#define MQ_BOARD_ISAC_DSP_RESET 0x800028 /* ISAC and DSP reset address offset */
-#define MQ_INSTANCE_COUNT 4 /* 4BRI consists of four instances */
-#define MQ_MEMORY_SIZE 0x00400000 /* 4MB on standard 4BRI */
-#define MQ_CTRL_SIZE 0x00002000 /* 8K memory mapped registers */
-#define MQ_SHARED_RAM_SIZE 0x00010000 /* 64k shared RAM */
-#define MQ_ORG_MAX_DSP_CODE_SIZE 0x00050000 /* max 320k DSP-Code (Telindus) */
-#define MQ_V90D_MAX_DSP_CODE_SIZE 0x00060000 /* max 384K DSP-Code if V.90D included */
-#define MQ_VOIP_MAX_DSP_CODE_SIZE 0x00028000 /* max 4*160k = 640K DSP-Code if voice over IP included */
-#define MQ_CACHED_ADDR(x) (((x) & 0x1fffffffL) | 0x80000000L)
-#define MQ_UNCACHED_ADDR(x) (((x) & 0x1fffffffL) | 0xa0000000L)
-/*--------------------------------------------------------------------------------------------*/
-/* Additional definitions reflecting the different address map of the SERVER 4BRI V2 */
-#define MQ2_BREG_RISC 0x0200 /* RISC Reset ect */
-#define MQ2_BREG_IRQ_TEST 0x0400 /* Interrupt request, no CPU interaction */
-#define MQ2_BOARD_DSP_OFFSET 0x800000 /* PC relative On board DSP regs offset */
-#define MQ2_DSP1_DATA_OFFSET 0x1800 /* Data register offset DSP 1 subboard 1 */
-#define MQ2_DSP1_ADDR_OFFSET 0x1808 /* Addr register offset DSP 1 subboard 1 */
-#define MQ2_DSP2_DATA_OFFSET 0x1810 /* Data register offset DSP 2 subboard 1 */
-#define MQ2_DSP2_ADDR_OFFSET 0x1818 /* Addr register offset DSP 2 subboard 1 */
-#define MQ2_DSP_JUNK_OFFSET 0x1000 /* DSP Data/Addr regs subboard offset */
-#define MQ2_ISAC_DSP_RESET 0x0000 /* ISAC and DSP reset address offset */
-#define MQ2_BOARD_ISAC_DSP_RESET 0x800000 /* ISAC and DSP reset address offset */
-#define MQ2_IPACX_CONFIG 0x0300 /* IPACX Configuration TE(0)/NT(1) */
-#define MQ2_BOARD_IPACX_CONFIG 0x800300 /* "" */
-#define MQ2_MEMORY_SIZE 0x01000000 /* 16MB code/data memory */
-#define MQ2_CTRL_SIZE 0x00008000 /* 32K memory mapped registers */
-/*----------------------------------------------------------------------------*/
-/* SERVER BRI 2M/2F as derived from 4BRI V2 */
-#define BRI2_MEMORY_SIZE 0x00800000 /* 8MB code/data memory */
-#define BRI2_PROTOCOL_MEMORY_SIZE (MQ2_MEMORY_SIZE >> 2) /* same as one 4BRI Rev.2 task */
-#define BRI2_CTRL_SIZE 0x00008000 /* 32K memory mapped registers */
-#define M_INSTANCE_COUNT 1 /* BRI consists of one instance */
-/*
- * Some useful constants for proper initialization of the GT6401x
- */
-#define ID_REG 0x0000 /*Pci reg-contain the Dev&Ven ID of the card*/
-#define RAS0_BASEREG 0x0010 /*Ras0 register - contain the base addr Ras0*/
-#define RAS2_BASEREG 0x0014
-#define CS_BASEREG 0x0018
-#define BOOT_BASEREG 0x001c
-#define GTREGS_BASEREG 0x0024 /*GTRegsBase reg-contain the base addr where*/
- /*the GT64010 internal regs where mapped */
-/*
- * GT64010 internal registers
- */
-/* DRAM device coding */
-#define LOW_RAS0_DREG 0x0400 /*Ras0 low decode address*/
-#define HI_RAS0_DREG 0x0404 /*Ras0 high decode address*/
-#define LOW_RAS1_DREG 0x0408 /*Ras1 low decode address*/
-#define HI_RAS1_DREG 0x040c /*Ras1 high decode address*/
-#define LOW_RAS2_DREG 0x0410 /*Ras2 low decode address*/
-#define HI_RAS2_DREG 0x0414 /*Ras2 high decode address*/
-#define LOW_RAS3_DREG 0x0418 /*Ras3 low decode address*/
-#define HI_RAS3_DREG 0x041c /*Ras3 high decode address*/
-/* I/O CS device coding */
-#define LOW_CS0_DREG 0x0420 /* CS0* low decode register */
-#define HI_CS0_DREG 0x0424 /* CS0* high decode register */
-#define LOW_CS1_DREG 0x0428 /* CS1* low decode register */
-#define HI_CS1_DREG 0x042c /* CS1* high decode register */
-#define LOW_CS2_DREG 0x0430 /* CS2* low decode register */
-#define HI_CS2_DREG 0x0434 /* CS2* high decode register */
-#define LOW_CS3_DREG 0x0438 /* CS3* low decode register */
-#define HI_CS3_DREG 0x043c /* CS3* high decode register */
-/* Boot PROM device coding */
-#define LOW_BOOTCS_DREG 0x0440 /* Boot CS low decode register */
-#define HI_BOOTCS_DREG 0x0444 /* Boot CS High decode register */
-/* DRAM group coding (for CPU) */
-#define LO_RAS10_GREG 0x0008 /*Ras1..0 group low decode address*/
-#define HI_RAS10_GREG 0x0010 /*Ras1..0 group high decode address*/
-#define LO_RAS32_GREG 0x0018 /*Ras3..2 group low decode address */
-#define HI_RAS32_GREG 0x0020 /*Ras3..2 group high decode address */
-/* I/O CS group coding for (CPU) */
-#define LO_CS20_GREG 0x0028 /* CS2..0 group low decode register */
-#define HI_CS20_GREG 0x0030 /* CS2..0 group high decode register */
-#define LO_CS3B_GREG 0x0038 /* CS3 & PROM group low decode register */
-#define HI_CS3B_GREG 0x0040 /* CS3 & PROM group high decode register */
-/* Galileo specific PCI config. */
-#define PCI_TIMEOUT_RET 0x0c04 /* Time Out and retry register */
-#define RAS10_BANKSIZE 0x0c08 /* RAS 1..0 group PCI bank size */
-#define RAS32_BANKSIZE 0x0c0c /* RAS 3..2 group PCI bank size */
-#define CS20_BANKSIZE 0x0c10 /* CS 2..0 group PCI bank size */
-#define CS3B_BANKSIZE 0x0c14 /* CS 3 & Boot group PCI bank size */
-#define DRAM_SIZE 0x0001 /*Dram size in mega bytes*/
-#define PROM_SIZE 0x08000 /*Prom size in bytes*/
-/*--------------------------------------------------------------------------*/
-#define OFFS_DIVA_INIT_TASK_COUNT 0x68
-#define OFFS_DSP_CODE_BASE_ADDR 0x6c
-#define OFFS_XLOG_BUF_ADDR 0x70
-#define OFFS_XLOG_COUNT_ADDR 0x74
-#define OFFS_XLOG_OUT_ADDR 0x78
-#define OFFS_PROTOCOL_END_ADDR 0x7c
-#define OFFS_PROTOCOL_ID_STRING 0x80
-/*--------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
deleted file mode 100644
index 1cd9affb6058..000000000000
--- a/drivers/isdn/hardware/eicon/mntfunc.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/* $Id: mntfunc.c,v 1.19.6.4 2005/01/31 12:22:20 armin Exp $
- *
- * Driver for Eicon DIVA Server ISDN cards.
- * Maint module
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-
-#include "platform.h"
-#include "di_defs.h"
-#include "divasync.h"
-#include "debug_if.h"
-
-extern char *DRIVERRELEASE_MNT;
-
-#define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR)
-#define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG)
-
-extern void DIVA_DIDD_Read(void *, int);
-
-static dword notify_handle;
-static DESCRIPTOR DAdapter;
-static DESCRIPTOR MAdapter;
-static DESCRIPTOR MaintDescriptor =
-{ IDI_DIMAINT, 0, 0, (IDI_CALL) diva_maint_prtComp };
-
-extern int diva_os_copy_to_user(void *os_handle, void __user *dst,
- const void *src, int length);
-extern int diva_os_copy_from_user(void *os_handle, void *dst,
- const void __user *src, int length);
-
-static void no_printf(unsigned char *x, ...)
-{
- /* dummy debug function */
-}
-
-#include "debuglib.c"
-
-/*
- * DIDD callback function
- */
-static void *didd_callback(void *context, DESCRIPTOR *adapter,
- int removal)
-{
- if (adapter->type == IDI_DADAPTER) {
- DBG_ERR(("cb: Change in DAdapter ? Oops ?."));
- } else if (adapter->type == IDI_DIMAINT) {
- if (removal) {
- DbgDeregister();
- memset(&MAdapter, 0, sizeof(MAdapter));
- dprintf = no_printf;
- } else {
- memcpy(&MAdapter, adapter, sizeof(MAdapter));
- dprintf = (DIVA_DI_PRINTF) MAdapter.request;
- DbgRegister("MAINT", DRIVERRELEASE_MNT, DBG_DEFAULT);
- }
- } else if ((adapter->type > 0) && (adapter->type < 16)) {
- if (removal) {
- diva_mnt_remove_xdi_adapter(adapter);
- } else {
- diva_mnt_add_xdi_adapter(adapter);
- }
- }
- return (NULL);
-}
-
-/*
- * connect to didd
- */
-static int __init connect_didd(void)
-{
- int x = 0;
- int dadapter = 0;
- IDI_SYNC_REQ req;
- DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
-
- DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
-
- for (x = 0; x < MAX_DESCRIPTORS; x++) {
- if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */
- dadapter = 1;
- memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter));
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc =
- IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY;
- req.didd_notify.info.callback = (void *)didd_callback;
- req.didd_notify.info.context = NULL;
- DAdapter.request((ENTITY *)&req);
- if (req.didd_notify.e.Rc != 0xff)
- return (0);
- notify_handle = req.didd_notify.info.handle;
- /* Register MAINT (me) */
- req.didd_add_adapter.e.Req = 0;
- req.didd_add_adapter.e.Rc =
- IDI_SYNC_REQ_DIDD_ADD_ADAPTER;
- req.didd_add_adapter.info.descriptor =
- (void *) &MaintDescriptor;
- DAdapter.request((ENTITY *)&req);
- if (req.didd_add_adapter.e.Rc != 0xff)
- return (0);
- } else if ((DIDD_Table[x].type > 0)
- && (DIDD_Table[x].type < 16)) {
- diva_mnt_add_xdi_adapter(&DIDD_Table[x]);
- }
- }
- return (dadapter);
-}
-
-/*
- * disconnect from didd
- */
-static void __exit disconnect_didd(void)
-{
- IDI_SYNC_REQ req;
-
- req.didd_notify.e.Req = 0;
- req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY;
- req.didd_notify.info.handle = notify_handle;
- DAdapter.request((ENTITY *)&req);
-
- req.didd_remove_adapter.e.Req = 0;
- req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER;
- req.didd_remove_adapter.info.p_request =
- (IDI_CALL) MaintDescriptor.request;
- DAdapter.request((ENTITY *)&req);
-}
-
-/*
- * read/write maint
- */
-int maint_read_write(void __user *buf, int count)
-{
- byte data[128];
- dword cmd, id, mask;
- int ret = 0;
-
- if (count < (3 * sizeof(dword)))
- return (-EFAULT);
-
- if (diva_os_copy_from_user(NULL, (void *) &data[0],
- buf, 3 * sizeof(dword))) {
- return (-EFAULT);
- }
-
- cmd = *(dword *)&data[0]; /* command */
- id = *(dword *)&data[4]; /* driver id */
- mask = *(dword *)&data[8]; /* mask or size */
-
- switch (cmd) {
- case DITRACE_CMD_GET_DRIVER_INFO:
- if ((ret = diva_get_driver_info(id, data, sizeof(data))) > 0) {
- if ((count < ret) || diva_os_copy_to_user
- (NULL, buf, (void *) &data[0], ret))
- ret = -EFAULT;
- } else {
- ret = -EINVAL;
- }
- break;
-
- case DITRACE_READ_DRIVER_DBG_MASK:
- if ((ret = diva_get_driver_dbg_mask(id, (byte *) data)) > 0) {
- if ((count < ret) || diva_os_copy_to_user
- (NULL, buf, (void *) &data[0], ret))
- ret = -EFAULT;
- } else {
- ret = -ENODEV;
- }
- break;
-
- case DITRACE_WRITE_DRIVER_DBG_MASK:
- if ((ret = diva_set_driver_dbg_mask(id, mask)) <= 0) {
- ret = -ENODEV;
- }
- break;
-
- /*
- Filter commands will ignore the ID due to fact that filtering affects
- the B- channel and Audio Tap trace levels only. Also MAINT driver will
- select the right trace ID by itself
- */
- case DITRACE_WRITE_SELECTIVE_TRACE_FILTER:
- if (!mask) {
- ret = diva_set_trace_filter(1, "*");
- } else if (mask < sizeof(data)) {
- if (diva_os_copy_from_user(NULL, data, (char __user *)buf + 12, mask)) {
- ret = -EFAULT;
- } else {
- ret = diva_set_trace_filter((int)mask, data);
- }
- } else {
- ret = -EINVAL;
- }
- break;
-
- case DITRACE_READ_SELECTIVE_TRACE_FILTER:
- if ((ret = diva_get_trace_filter(sizeof(data), data)) > 0) {
- if (diva_os_copy_to_user(NULL, buf, data, ret))
- ret = -EFAULT;
- } else {
- ret = -ENODEV;
- }
- break;
-
- case DITRACE_READ_TRACE_ENTRY:{
- diva_os_spin_lock_magic_t old_irql;
- word size;
- diva_dbg_entry_head_t *pmsg;
- byte *pbuf;
-
- if (!(pbuf = diva_os_malloc(0, mask))) {
- return (-ENOMEM);
- }
-
- for (;;) {
- if (!(pmsg =
- diva_maint_get_message(&size, &old_irql))) {
- break;
- }
- if (size > mask) {
- diva_maint_ack_message(0, &old_irql);
- ret = -EINVAL;
- break;
- }
- ret = size;
- memcpy(pbuf, pmsg, size);
- diva_maint_ack_message(1, &old_irql);
- if ((count < size) ||
- diva_os_copy_to_user(NULL, buf, (void *) pbuf, size))
- ret = -EFAULT;
- break;
- }
- diva_os_free(0, pbuf);
- }
- break;
-
- case DITRACE_READ_TRACE_ENTRYS:{
- diva_os_spin_lock_magic_t old_irql;
- word size;
- diva_dbg_entry_head_t *pmsg;
- byte *pbuf = NULL;
- int written = 0;
-
- if (mask < 4096) {
- ret = -EINVAL;
- break;
- }
- if (!(pbuf = diva_os_malloc(0, mask))) {
- return (-ENOMEM);
- }
-
- for (;;) {
- if (!(pmsg =
- diva_maint_get_message(&size, &old_irql))) {
- break;
- }
- if ((size + 8) > mask) {
- diva_maint_ack_message(0, &old_irql);
- break;
- }
- /*
- Write entry length
- */
- pbuf[written++] = (byte) size;
- pbuf[written++] = (byte) (size >> 8);
- pbuf[written++] = 0;
- pbuf[written++] = 0;
- /*
- Write message
- */
- memcpy(&pbuf[written], pmsg, size);
- diva_maint_ack_message(1, &old_irql);
- written += size;
- mask -= (size + 4);
- }
- pbuf[written++] = 0;
- pbuf[written++] = 0;
- pbuf[written++] = 0;
- pbuf[written++] = 0;
-
- if ((count < written) || diva_os_copy_to_user(NULL, buf, (void *) pbuf, written)) {
- ret = -EFAULT;
- } else {
- ret = written;
- }
- diva_os_free(0, pbuf);
- }
- break;
-
- default:
- ret = -EINVAL;
- }
- return (ret);
-}
-
-/*
- * init
- */
-int __init mntfunc_init(int *buffer_length, void **buffer,
- unsigned long diva_dbg_mem)
-{
- if (*buffer_length < 64) {
- *buffer_length = 64;
- }
- if (*buffer_length > 512) {
- *buffer_length = 512;
- }
- *buffer_length *= 1024;
-
- if (diva_dbg_mem) {
- *buffer = (void *) diva_dbg_mem;
- } else {
- while ((*buffer_length >= (64 * 1024))
- &&
- (!(*buffer = diva_os_malloc(0, *buffer_length)))) {
- *buffer_length -= 1024;
- }
-
- if (!*buffer) {
- DBG_ERR(("init: Can not alloc trace buffer"));
- return (0);
- }
- }
-
- if (diva_maint_init(*buffer, *buffer_length, (diva_dbg_mem == 0))) {
- if (!diva_dbg_mem) {
- diva_os_free(0, *buffer);
- }
- DBG_ERR(("init: maint init failed"));
- return (0);
- }
-
- if (!connect_didd()) {
- DBG_ERR(("init: failed to connect to DIDD."));
- diva_maint_finit();
- if (!diva_dbg_mem) {
- diva_os_free(0, *buffer);
- }
- return (0);
- }
- return (1);
-}
-
-/*
- * exit
- */
-void __exit mntfunc_finit(void)
-{
- void *buffer;
- int i = 100;
-
- DbgDeregister();
-
- while (diva_mnt_shutdown_xdi_adapters() && i--) {
- diva_os_sleep(10);
- }
-
- disconnect_didd();
-
- if ((buffer = diva_maint_finit())) {
- diva_os_free(0, buffer);
- }
-
- memset(&MAdapter, 0, sizeof(MAdapter));
- dprintf = no_printf;
-}
diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c
deleted file mode 100644
index 87db5f4df27d..000000000000
--- a/drivers/isdn/hardware/eicon/os_4bri.c
+++ /dev/null
@@ -1,1132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: os_4bri.c,v 1.28.4.4 2005/02/11 19:40:25 armin Exp $ */
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "dsp_defs.h"
-#include "di.h"
-#include "io.h"
-
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "os_4bri.h"
-#include "diva_pci.h"
-#include "mi_pc.h"
-#include "dsrv4bri.h"
-#include "helpers.h"
-
-static void *diva_xdiLoadFileFile = NULL;
-static dword diva_xdiLoadFileLength = 0;
-
-/*
-** IMPORTS
-*/
-extern void prepare_qBri_functions(PISDN_ADAPTER IoAdapter);
-extern void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter);
-extern void diva_xdi_display_adapter_features(int card);
-extern void diva_add_slave_adapter(diva_os_xdi_adapter_t *a);
-
-extern int qBri_FPGA_download(PISDN_ADAPTER IoAdapter);
-extern void start_qBri_hardware(PISDN_ADAPTER IoAdapter);
-
-extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a);
-
-/*
-** LOCALS
-*/
-static unsigned long _4bri_bar_length[4] = {
- 0x100,
- 0x100, /* I/O */
- MQ_MEMORY_SIZE,
- 0x2000
-};
-static unsigned long _4bri_v2_bar_length[4] = {
- 0x100,
- 0x100, /* I/O */
- MQ2_MEMORY_SIZE,
- 0x10000
-};
-static unsigned long _4bri_v2_bri_bar_length[4] = {
- 0x100,
- 0x100, /* I/O */
- BRI2_MEMORY_SIZE,
- 0x10000
-};
-
-
-static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a);
-static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a);
-static int diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *cmd,
- int length);
-static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a);
-static int diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a,
- byte *data, dword length);
-static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter);
-static int diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
- dword address,
- const byte *data,
- dword length, dword limit);
-static int diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter,
- dword start_address, dword features);
-static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter);
-static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a);
-
-static int _4bri_is_rev_2_card(int card_ordinal)
-{
- switch (card_ordinal) {
- case CARDTYPE_DIVASRV_Q_8M_V2_PCI:
- case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI:
- case CARDTYPE_DIVASRV_B_2M_V2_PCI:
- case CARDTYPE_DIVASRV_B_2F_PCI:
- case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
- return (1);
- }
- return (0);
-}
-
-static int _4bri_is_rev_2_bri_card(int card_ordinal)
-{
- switch (card_ordinal) {
- case CARDTYPE_DIVASRV_B_2M_V2_PCI:
- case CARDTYPE_DIVASRV_B_2F_PCI:
- case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
- return (1);
- }
- return (0);
-}
-
-static void diva_4bri_set_addresses(diva_os_xdi_adapter_t *a)
-{
- dword offset = a->resources.pci.qoffset;
- dword c_offset = offset * a->xdi_adapter.ControllerNumber;
-
- a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_CONTROL] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 0;
- a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 3;
- a->resources.pci.mem_type_id[MEM_TYPE_PROM] = 0;
-
- /*
- Set up hardware related pointers
- */
- a->xdi_adapter.Address = a->resources.pci.addr[2]; /* BAR2 SDRAM */
- a->xdi_adapter.Address += c_offset;
-
- a->xdi_adapter.Control = a->resources.pci.addr[2]; /* BAR2 SDRAM */
-
- a->xdi_adapter.ram = a->resources.pci.addr[2]; /* BAR2 SDRAM */
- a->xdi_adapter.ram += c_offset + (offset - MQ_SHARED_RAM_SIZE);
-
- a->xdi_adapter.reset = a->resources.pci.addr[0]; /* BAR0 CONFIG */
- /*
- ctlReg contains the register address for the MIPS CPU reset control
- */
- a->xdi_adapter.ctlReg = a->resources.pci.addr[3]; /* BAR3 CNTRL */
- /*
- prom contains the register address for FPGA and EEPROM programming
- */
- a->xdi_adapter.prom = &a->xdi_adapter.reset[0x6E];
-}
-
-/*
-** BAR0 - MEM - 0x100 - CONFIG MEM
-** BAR1 - I/O - 0x100 - UNUSED
-** BAR2 - MEM - MQ_MEMORY_SIZE (MQ2_MEMORY_SIZE on Rev.2) - SDRAM
-** BAR3 - MEM - 0x2000 (0x10000 on Rev.2) - CNTRL
-**
-** Called by master adapter, that will initialize and add slave adapters
-*/
-int diva_4bri_init_card(diva_os_xdi_adapter_t *a)
-{
- int bar, i;
- byte __iomem *p;
- PADAPTER_LIST_ENTRY quadro_list;
- diva_os_xdi_adapter_t *diva_current;
- diva_os_xdi_adapter_t *adapter_list[4];
- PISDN_ADAPTER Slave;
- unsigned long bar_length[ARRAY_SIZE(_4bri_bar_length)];
- int v2 = _4bri_is_rev_2_card(a->CardOrdinal);
- int tasks = _4bri_is_rev_2_bri_card(a->CardOrdinal) ? 1 : MQ_INSTANCE_COUNT;
- int factor = (tasks == 1) ? 1 : 2;
-
- if (v2) {
- if (_4bri_is_rev_2_bri_card(a->CardOrdinal)) {
- memcpy(bar_length, _4bri_v2_bri_bar_length,
- sizeof(bar_length));
- } else {
- memcpy(bar_length, _4bri_v2_bar_length,
- sizeof(bar_length));
- }
- } else {
- memcpy(bar_length, _4bri_bar_length, sizeof(bar_length));
- }
- DBG_TRC(("SDRAM_LENGTH=%08x, tasks=%d, factor=%d",
- bar_length[2], tasks, factor))
-
- /*
- Get Serial Number
- The serial number of 4BRI is accessible in accordance with PCI spec
- via command register located in configuration space, also we do not
- have to map any BAR before we can access it
- */
- if (!_4bri_get_serial_number(a)) {
- DBG_ERR(("A: 4BRI can't get Serial Number"))
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
-
- /*
- Set properties
- */
- a->xdi_adapter.Properties = CardProperties[a->CardOrdinal];
- DBG_LOG(("Load %s, SN:%ld, bus:%02x, func:%02x",
- a->xdi_adapter.Properties.Name,
- a->xdi_adapter.serialNo,
- a->resources.pci.bus, a->resources.pci.func))
-
- /*
- First initialization step: get and check hardware resoures.
- Do not map resources and do not access card at this step
- */
- for (bar = 0; bar < 4; bar++) {
- a->resources.pci.bar[bar] =
- divasa_get_pci_bar(a->resources.pci.bus,
- a->resources.pci.func, bar,
- a->resources.pci.hdev);
- if (!a->resources.pci.bar[bar]
- || (a->resources.pci.bar[bar] == 0xFFFFFFF0)) {
- DBG_ERR(
- ("A: invalid bar[%d]=%08x", bar,
- a->resources.pci.bar[bar]))
- return (-1);
- }
- }
- a->resources.pci.irq =
- (byte) divasa_get_pci_irq(a->resources.pci.bus,
- a->resources.pci.func,
- a->resources.pci.hdev);
- if (!a->resources.pci.irq) {
- DBG_ERR(("A: invalid irq"));
- return (-1);
- }
-
- a->xdi_adapter.sdram_bar = a->resources.pci.bar[2];
-
- /*
- Map all MEMORY BAR's
- */
- for (bar = 0; bar < 4; bar++) {
- if (bar != 1) { /* ignore I/O */
- a->resources.pci.addr[bar] =
- divasa_remap_pci_bar(a, bar, a->resources.pci.bar[bar],
- bar_length[bar]);
- if (!a->resources.pci.addr[bar]) {
- DBG_ERR(("A: 4BRI: can't map bar[%d]", bar))
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
- }
- }
-
- /*
- Register I/O port
- */
- sprintf(&a->port_name[0], "DIVA 4BRI %ld", (long) a->xdi_adapter.serialNo);
-
- if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1],
- bar_length[1], &a->port_name[0], 1)) {
- DBG_ERR(("A: 4BRI: can't register bar[1]"))
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
-
- a->resources.pci.addr[1] =
- (void *) (unsigned long) a->resources.pci.bar[1];
-
- /*
- Set cleanup pointer for base adapter only, so slave adapter
- will be unable to get cleanup
- */
- a->interface.cleanup_adapter_proc = diva_4bri_cleanup_adapter;
-
- /*
- Create slave adapters
- */
- if (tasks > 1) {
- if (!(a->slave_adapters[0] =
- (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a))))
- {
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
- if (!(a->slave_adapters[1] =
- (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a))))
- {
- diva_os_free(0, a->slave_adapters[0]);
- a->slave_adapters[0] = NULL;
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
- if (!(a->slave_adapters[2] =
- (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a))))
- {
- diva_os_free(0, a->slave_adapters[0]);
- diva_os_free(0, a->slave_adapters[1]);
- a->slave_adapters[0] = NULL;
- a->slave_adapters[1] = NULL;
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
- memset(a->slave_adapters[0], 0x00, sizeof(*a));
- memset(a->slave_adapters[1], 0x00, sizeof(*a));
- memset(a->slave_adapters[2], 0x00, sizeof(*a));
- }
-
- adapter_list[0] = a;
- adapter_list[1] = a->slave_adapters[0];
- adapter_list[2] = a->slave_adapters[1];
- adapter_list[3] = a->slave_adapters[2];
-
- /*
- Allocate slave list
- */
- quadro_list =
- (PADAPTER_LIST_ENTRY) diva_os_malloc(0, sizeof(*quadro_list));
- if (!(a->slave_list = quadro_list)) {
- for (i = 0; i < (tasks - 1); i++) {
- diva_os_free(0, a->slave_adapters[i]);
- a->slave_adapters[i] = NULL;
- }
- diva_4bri_cleanup_adapter(a);
- return (-1);
- }
- memset(quadro_list, 0x00, sizeof(*quadro_list));
-
- /*
- Set interfaces
- */
- a->xdi_adapter.QuadroList = quadro_list;
- for (i = 0; i < tasks; i++) {
- adapter_list[i]->xdi_adapter.ControllerNumber = i;
- adapter_list[i]->xdi_adapter.tasks = tasks;
- quadro_list->QuadroAdapter[i] =
- &adapter_list[i]->xdi_adapter;
- }
-
- for (i = 0; i < tasks; i++) {
- diva_current = adapter_list[i];
-
- diva_current->dsp_mask = 0x00000003;
-
- diva_current->xdi_adapter.a.io =
- &diva_current->xdi_adapter;
- diva_current->xdi_adapter.DIRequest = request;
- diva_current->interface.cmd_proc = diva_4bri_cmd_card_proc;
- diva_current->xdi_adapter.Properties =
- CardProperties[a->CardOrdinal];
- diva_current->CardOrdinal = a->CardOrdinal;
-
- diva_current->xdi_adapter.Channels =
- CardProperties[a->CardOrdinal].Channels;
- diva_current->xdi_adapter.e_max =
- CardProperties[a->CardOrdinal].E_info;
- diva_current->xdi_adapter.e_tbl =
- diva_os_malloc(0,
- diva_current->xdi_adapter.e_max *
- sizeof(E_INFO));
-
- if (!diva_current->xdi_adapter.e_tbl) {
- diva_4bri_cleanup_slave_adapters(a);
- diva_4bri_cleanup_adapter(a);
- for (i = 1; i < (tasks - 1); i++) {
- diva_os_free(0, adapter_list[i]);
- }
- return (-1);
- }
- memset(diva_current->xdi_adapter.e_tbl, 0x00,
- diva_current->xdi_adapter.e_max * sizeof(E_INFO));
-
- if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.isr_spin_lock, "isr")) {
- diva_4bri_cleanup_slave_adapters(a);
- diva_4bri_cleanup_adapter(a);
- for (i = 1; i < (tasks - 1); i++) {
- diva_os_free(0, adapter_list[i]);
- }
- return (-1);
- }
- if (diva_os_initialize_spin_lock(&diva_current->xdi_adapter.data_spin_lock, "data")) {
- diva_4bri_cleanup_slave_adapters(a);
- diva_4bri_cleanup_adapter(a);
- for (i = 1; i < (tasks - 1); i++) {
- diva_os_free(0, adapter_list[i]);
- }
- return (-1);
- }
-
- strcpy(diva_current->xdi_adapter.req_soft_isr. dpc_thread_name, "kdivas4brid");
-
- if (diva_os_initialize_soft_isr(&diva_current->xdi_adapter.req_soft_isr, DIDpcRoutine,
- &diva_current->xdi_adapter)) {
- diva_4bri_cleanup_slave_adapters(a);
- diva_4bri_cleanup_adapter(a);
- for (i = 1; i < (tasks - 1); i++) {
- diva_os_free(0, adapter_list[i]);
- }
- return (-1);
- }
-
- /*
- Do not initialize second DPC - only one thread will be created
- */
- diva_current->xdi_adapter.isr_soft_isr.object =
- diva_current->xdi_adapter.req_soft_isr.object;
- }
-
- if (v2) {
- prepare_qBri2_functions(&a->xdi_adapter);
- } else {
- prepare_qBri_functions(&a->xdi_adapter);
- }
-
- for (i = 0; i < tasks; i++) {
- diva_current = adapter_list[i];
- if (i)
- memcpy(&diva_current->resources, &a->resources, sizeof(divas_card_resources_t));
- diva_current->resources.pci.qoffset = (a->xdi_adapter.MemorySize >> factor);
- }
-
- /*
- Set up hardware related pointers
- */
- a->xdi_adapter.cfg = (void *) (unsigned long) a->resources.pci.bar[0]; /* BAR0 CONFIG */
- a->xdi_adapter.port = (void *) (unsigned long) a->resources.pci.bar[1]; /* BAR1 */
- a->xdi_adapter.ctlReg = (void *) (unsigned long) a->resources.pci.bar[3]; /* BAR3 CNTRL */
-
- for (i = 0; i < tasks; i++) {
- diva_current = adapter_list[i];
- diva_4bri_set_addresses(diva_current);
- Slave = a->xdi_adapter.QuadroList->QuadroAdapter[i];
- Slave->MultiMaster = &a->xdi_adapter;
- Slave->sdram_bar = a->xdi_adapter.sdram_bar;
- if (i) {
- Slave->serialNo = ((dword) (Slave->ControllerNumber << 24)) |
- a->xdi_adapter.serialNo;
- Slave->cardType = a->xdi_adapter.cardType;
- }
- }
-
- /*
- reset contains the base address for the PLX 9054 register set
- */
- p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
- WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
- DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
-
- /*
- Set IRQ handler
- */
- a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq;
- sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA 4BRI %ld",
- (long) a->xdi_adapter.serialNo);
-
- if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr,
- a->xdi_adapter.irq_info.irq_name)) {
- diva_4bri_cleanup_slave_adapters(a);
- diva_4bri_cleanup_adapter(a);
- for (i = 1; i < (tasks - 1); i++) {
- diva_os_free(0, adapter_list[i]);
- }
- return (-1);
- }
-
- a->xdi_adapter.irq_info.registered = 1;
-
- /*
- Add three slave adapters
- */
- if (tasks > 1) {
- diva_add_slave_adapter(adapter_list[1]);
- diva_add_slave_adapter(adapter_list[2]);
- diva_add_slave_adapter(adapter_list[3]);
- }
-
- diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name,
- a->resources.pci.irq, a->xdi_adapter.serialNo);
-
- return (0);
-}
-
-/*
-** Cleanup function will be called for master adapter only
-** this is guaranteed by design: cleanup callback is set
-** by master adapter only
-*/
-static int diva_4bri_cleanup_adapter(diva_os_xdi_adapter_t *a)
-{
- int bar;
-
- /*
- Stop adapter if running
- */
- if (a->xdi_adapter.Initialized) {
- diva_4bri_stop_adapter(a);
- }
-
- /*
- Remove IRQ handler
- */
- if (a->xdi_adapter.irq_info.registered) {
- diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr);
- }
- a->xdi_adapter.irq_info.registered = 0;
-
- /*
- Free DPC's and spin locks on all adapters
- */
- diva_4bri_cleanup_slave_adapters(a);
-
- /*
- Unmap all BARS
- */
- for (bar = 0; bar < 4; bar++) {
- if (bar != 1) {
- if (a->resources.pci.bar[bar]
- && a->resources.pci.addr[bar]) {
- divasa_unmap_pci_bar(a->resources.pci.addr[bar]);
- a->resources.pci.bar[bar] = 0;
- a->resources.pci.addr[bar] = NULL;
- }
- }
- }
-
- /*
- Unregister I/O
- */
- if (a->resources.pci.bar[1] && a->resources.pci.addr[1]) {
- diva_os_register_io_port(a, 0, a->resources.pci.bar[1],
- _4bri_is_rev_2_card(a->
- CardOrdinal) ?
- _4bri_v2_bar_length[1] :
- _4bri_bar_length[1],
- &a->port_name[0], 1);
- a->resources.pci.bar[1] = 0;
- a->resources.pci.addr[1] = NULL;
- }
-
- if (a->slave_list) {
- diva_os_free(0, a->slave_list);
- a->slave_list = NULL;
- }
-
- return (0);
-}
-
-static int _4bri_get_serial_number(diva_os_xdi_adapter_t *a)
-{
- dword data[64];
- dword serNo;
- word addr, status, i, j;
- byte Bus, Slot;
- void *hdev;
-
- Bus = a->resources.pci.bus;
- Slot = a->resources.pci.func;
- hdev = a->resources.pci.hdev;
-
- for (i = 0; i < 64; ++i) {
- addr = i * 4;
- for (j = 0; j < 5; ++j) {
- PCIwrite(Bus, Slot, 0x4E, &addr, sizeof(addr),
- hdev);
- diva_os_wait(1);
- PCIread(Bus, Slot, 0x4E, &status, sizeof(status),
- hdev);
- if (status & 0x8000)
- break;
- }
- if (j >= 5) {
- DBG_ERR(("EEPROM[%d] read failed (0x%x)", i * 4, addr))
- return (0);
- }
- PCIread(Bus, Slot, 0x50, &data[i], sizeof(data[i]), hdev);
- }
- DBG_BLK(((char *) &data[0], sizeof(data)))
-
- serNo = data[32];
- if (serNo == 0 || serNo == 0xffffffff)
- serNo = data[63];
-
- if (!serNo) {
- DBG_LOG(("W: Serial Number == 0, create one serial number"));
- serNo = a->resources.pci.bar[1] & 0xffff0000;
- serNo |= a->resources.pci.bus << 8;
- serNo |= a->resources.pci.func;
- }
-
- a->xdi_adapter.serialNo = serNo;
-
- DBG_REG(("Serial No. : %ld", a->xdi_adapter.serialNo))
-
- return (serNo);
-}
-
-/*
-** Release resources of slave adapters
-*/
-static int diva_4bri_cleanup_slave_adapters(diva_os_xdi_adapter_t *a)
-{
- diva_os_xdi_adapter_t *adapter_list[4];
- diva_os_xdi_adapter_t *diva_current;
- int i;
-
- adapter_list[0] = a;
- adapter_list[1] = a->slave_adapters[0];
- adapter_list[2] = a->slave_adapters[1];
- adapter_list[3] = a->slave_adapters[2];
-
- for (i = 0; i < a->xdi_adapter.tasks; i++) {
- diva_current = adapter_list[i];
- if (diva_current) {
- diva_os_destroy_spin_lock(&diva_current->
- xdi_adapter.
- isr_spin_lock, "unload");
- diva_os_destroy_spin_lock(&diva_current->
- xdi_adapter.
- data_spin_lock,
- "unload");
-
- diva_os_cancel_soft_isr(&diva_current->xdi_adapter.
- req_soft_isr);
- diva_os_cancel_soft_isr(&diva_current->xdi_adapter.
- isr_soft_isr);
-
- diva_os_remove_soft_isr(&diva_current->xdi_adapter.
- req_soft_isr);
- diva_current->xdi_adapter.isr_soft_isr.object = NULL;
-
- if (diva_current->xdi_adapter.e_tbl) {
- diva_os_free(0,
- diva_current->xdi_adapter.
- e_tbl);
- }
- diva_current->xdi_adapter.e_tbl = NULL;
- diva_current->xdi_adapter.e_max = 0;
- diva_current->xdi_adapter.e_count = 0;
- }
- }
-
- return (0);
-}
-
-static int
-diva_4bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *cmd, int length)
-{
- int ret = -1;
-
- if (cmd->adapter != a->controller) {
- DBG_ERR(("A: 4bri_cmd, invalid controller=%d != %d",
- cmd->adapter, a->controller))
- return (-1);
- }
-
- switch (cmd->command) {
- case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- *(dword *) a->xdi_mbox.data =
- (dword) a->CardOrdinal;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_SERIAL_NR:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- *(dword *) a->xdi_mbox.data =
- (dword) a->xdi_adapter.serialNo;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG:
- if (!a->xdi_adapter.ControllerNumber) {
- /*
- Only master adapter can access hardware config
- */
- a->xdi_mbox.data_length = sizeof(dword) * 9;
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- int i;
- dword *data = (dword *) a->xdi_mbox.data;
-
- for (i = 0; i < 8; i++) {
- *data++ = a->resources.pci.bar[i];
- }
- *data++ = (dword) a->resources.pci.irq;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_CARD_STATE:
- if (!a->xdi_adapter.ControllerNumber) {
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- dword *data = (dword *) a->xdi_mbox.data;
- if (!a->xdi_adapter.ram
- || !a->xdi_adapter.reset
- || !a->xdi_adapter.cfg) {
- *data = 3;
- } else if (a->xdi_adapter.trapped) {
- *data = 2;
- } else if (a->xdi_adapter.Initialized) {
- *data = 1;
- } else {
- *data = 0;
- }
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- }
- break;
-
- case DIVA_XDI_UM_CMD_WRITE_FPGA:
- if (!a->xdi_adapter.ControllerNumber) {
- ret =
- diva_4bri_write_fpga_image(a,
- (byte *)&cmd[1],
- cmd->command_data.
- write_fpga.
- image_length);
- }
- break;
-
- case DIVA_XDI_UM_CMD_RESET_ADAPTER:
- if (!a->xdi_adapter.ControllerNumber) {
- ret = diva_4bri_reset_adapter(&a->xdi_adapter);
- }
- break;
-
- case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK:
- if (!a->xdi_adapter.ControllerNumber) {
- ret = diva_4bri_write_sdram_block(&a->xdi_adapter,
- cmd->
- command_data.
- write_sdram.
- offset,
- (byte *) &
- cmd[1],
- cmd->
- command_data.
- write_sdram.
- length,
- a->xdi_adapter.
- MemorySize);
- }
- break;
-
- case DIVA_XDI_UM_CMD_START_ADAPTER:
- if (!a->xdi_adapter.ControllerNumber) {
- ret = diva_4bri_start_adapter(&a->xdi_adapter,
- cmd->command_data.
- start.offset,
- cmd->command_data.
- start.features);
- }
- break;
-
- case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES:
- if (!a->xdi_adapter.ControllerNumber) {
- a->xdi_adapter.features =
- cmd->command_data.features.features;
- a->xdi_adapter.a.protocol_capabilities =
- a->xdi_adapter.features;
- DBG_TRC(("Set raw protocol features (%08x)",
- a->xdi_adapter.features))
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_STOP_ADAPTER:
- if (!a->xdi_adapter.ControllerNumber) {
- ret = diva_4bri_stop_adapter(a);
- }
- break;
-
- case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY:
- ret = diva_card_read_xlog(a);
- break;
-
- case DIVA_XDI_UM_CMD_READ_SDRAM:
- if (!a->xdi_adapter.ControllerNumber
- && a->xdi_adapter.Address) {
- if (
- (a->xdi_mbox.data_length =
- cmd->command_data.read_sdram.length)) {
- if (
- (a->xdi_mbox.data_length +
- cmd->command_data.read_sdram.offset) <
- a->xdi_adapter.MemorySize) {
- a->xdi_mbox.data =
- diva_os_malloc(0,
- a->xdi_mbox.
- data_length);
- if (a->xdi_mbox.data) {
- byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(&a->xdi_adapter);
- byte __iomem *src = p;
- byte *dst = a->xdi_mbox.data;
- dword len = a->xdi_mbox.data_length;
-
- src += cmd->command_data.read_sdram.offset;
-
- while (len--) {
- *dst++ = READ_BYTE(src++);
- }
- DIVA_OS_MEM_DETACH_ADDRESS(&a->xdi_adapter, p);
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- }
- }
- }
- break;
-
- default:
- DBG_ERR(("A: A(%d) invalid cmd=%d", a->controller,
- cmd->command))
- }
-
- return (ret);
-}
-
-void *xdiLoadFile(char *FileName, dword *FileLength,
- unsigned long lim)
-{
- void *ret = diva_xdiLoadFileFile;
-
- if (FileLength) {
- *FileLength = diva_xdiLoadFileLength;
- }
- diva_xdiLoadFileFile = NULL;
- diva_xdiLoadFileLength = 0;
-
- return (ret);
-}
-
-void diva_os_set_qBri_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-void diva_os_set_qBri2_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-static int
-diva_4bri_write_fpga_image(diva_os_xdi_adapter_t *a, byte *data,
- dword length)
-{
- int ret;
-
- diva_xdiLoadFileFile = data;
- diva_xdiLoadFileLength = length;
-
- ret = qBri_FPGA_download(&a->xdi_adapter);
-
- diva_xdiLoadFileFile = NULL;
- diva_xdiLoadFileLength = 0;
-
- return (ret ? 0 : -1);
-}
-
-static int diva_4bri_reset_adapter(PISDN_ADAPTER IoAdapter)
-{
- PISDN_ADAPTER Slave;
- int i;
-
- if (!IoAdapter->Address || !IoAdapter->reset) {
- return (-1);
- }
- if (IoAdapter->Initialized) {
- DBG_ERR(("A: A(%d) can't reset 4BRI adapter - please stop first",
- IoAdapter->ANum))
- return (-1);
- }
-
- /*
- Forget all entities on all adapters
- */
- for (i = 0; ((i < IoAdapter->tasks) && IoAdapter->QuadroList); i++) {
- Slave = IoAdapter->QuadroList->QuadroAdapter[i];
- Slave->e_count = 0;
- if (Slave->e_tbl) {
- memset(Slave->e_tbl, 0x00,
- Slave->e_max * sizeof(E_INFO));
- }
- Slave->head = 0;
- Slave->tail = 0;
- Slave->assign = 0;
- Slave->trapped = 0;
-
- memset(&Slave->a.IdTable[0], 0x00,
- sizeof(Slave->a.IdTable));
- memset(&Slave->a.IdTypeTable[0], 0x00,
- sizeof(Slave->a.IdTypeTable));
- memset(&Slave->a.FlowControlIdTable[0], 0x00,
- sizeof(Slave->a.FlowControlIdTable));
- memset(&Slave->a.FlowControlSkipTable[0], 0x00,
- sizeof(Slave->a.FlowControlSkipTable));
- memset(&Slave->a.misc_flags_table[0], 0x00,
- sizeof(Slave->a.misc_flags_table));
- memset(&Slave->a.rx_stream[0], 0x00,
- sizeof(Slave->a.rx_stream));
- memset(&Slave->a.tx_stream[0], 0x00,
- sizeof(Slave->a.tx_stream));
- memset(&Slave->a.tx_pos[0], 0x00, sizeof(Slave->a.tx_pos));
- memset(&Slave->a.rx_pos[0], 0x00, sizeof(Slave->a.rx_pos));
- }
-
- return (0);
-}
-
-
-static int
-diva_4bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
- dword address,
- const byte *data, dword length, dword limit)
-{
- byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
- byte __iomem *mem = p;
-
- if (((address + length) >= limit) || !mem) {
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
- DBG_ERR(("A: A(%d) write 4BRI address=0x%08lx",
- IoAdapter->ANum, address + length))
- return (-1);
- }
- mem += address;
-
- while (length--) {
- WRITE_BYTE(mem++, *data++);
- }
-
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
- return (0);
-}
-
-static int
-diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter,
- dword start_address, dword features)
-{
- volatile word __iomem *signature;
- int started = 0;
- int i;
- byte __iomem *p;
-
- /*
- start adapter
- */
- start_qBri_hardware(IoAdapter);
-
- p = DIVA_OS_MEM_ATTACH_RAM(IoAdapter);
- /*
- wait for signature in shared memory (max. 3 seconds)
- */
- signature = (volatile word __iomem *) (&p[0x1E]);
-
- for (i = 0; i < 300; ++i) {
- diva_os_wait(10);
- if (READ_WORD(&signature[0]) == 0x4447) {
- DBG_TRC(("Protocol startup time %d.%02d seconds",
- (i / 100), (i % 100)))
- started = 1;
- break;
- }
- }
-
- for (i = 1; i < IoAdapter->tasks; i++) {
- IoAdapter->QuadroList->QuadroAdapter[i]->features =
- IoAdapter->features;
- IoAdapter->QuadroList->QuadroAdapter[i]->a.
- protocol_capabilities = IoAdapter->features;
- }
-
- if (!started) {
- DBG_FTL(("%s: Adapter selftest failed, signature=%04x",
- IoAdapter->Properties.Name,
- READ_WORD(&signature[0])))
- DIVA_OS_MEM_DETACH_RAM(IoAdapter, p);
- (*(IoAdapter->trapFnc)) (IoAdapter);
- IoAdapter->stop(IoAdapter);
- return (-1);
- }
- DIVA_OS_MEM_DETACH_RAM(IoAdapter, p);
-
- for (i = 0; i < IoAdapter->tasks; i++) {
- IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 1;
- IoAdapter->QuadroList->QuadroAdapter[i]->IrqCount = 0;
- }
-
- if (check_qBri_interrupt(IoAdapter)) {
- DBG_ERR(("A: A(%d) interrupt test failed",
- IoAdapter->ANum))
- for (i = 0; i < IoAdapter->tasks; i++) {
- IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0;
- }
- IoAdapter->stop(IoAdapter);
- return (-1);
- }
-
- IoAdapter->Properties.Features = (word) features;
- diva_xdi_display_adapter_features(IoAdapter->ANum);
-
- for (i = 0; i < IoAdapter->tasks; i++) {
- DBG_LOG(("A(%d) %s adapter successfully started",
- IoAdapter->QuadroList->QuadroAdapter[i]->ANum,
- (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI"))
- diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum);
- IoAdapter->QuadroList->QuadroAdapter[i]->Properties.Features = (word) features;
- }
-
- return (0);
-}
-
-static int check_qBri_interrupt(PISDN_ADAPTER IoAdapter)
-{
-#ifdef SUPPORT_INTERRUPT_TEST_ON_4BRI
- int i;
- ADAPTER *a = &IoAdapter->a;
- byte __iomem *p;
-
- IoAdapter->IrqCount = 0;
-
- if (IoAdapter->ControllerNumber > 0)
- return (-1);
-
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE);
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
- /*
- interrupt test
- */
- a->ReadyInt = 1;
- a->ram_out(a, &PR_RAM->ReadyInt, 1);
-
- for (i = 100; !IoAdapter->IrqCount && (i-- > 0); diva_os_wait(10));
-
- return ((IoAdapter->IrqCount > 0) ? 0 : -1);
-#else
- dword volatile __iomem *qBriIrq;
- byte __iomem *p;
- /*
- Reset on-board interrupt register
- */
- IoAdapter->IrqCount = 0;
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- qBriIrq = (dword volatile __iomem *) (&p[_4bri_is_rev_2_card
- (IoAdapter->
- cardType) ? (MQ2_BREG_IRQ_TEST)
- : (MQ_BREG_IRQ_TEST)]);
-
- WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- WRITE_BYTE(&p[PLX9054_INTCSR], PLX9054_INT_ENABLE);
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
- diva_os_wait(100);
-
- return (0);
-#endif /* SUPPORT_INTERRUPT_TEST_ON_4BRI */
-}
-
-static void diva_4bri_clear_interrupts(diva_os_xdi_adapter_t *a)
-{
- PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-
- /*
- clear any pending interrupt
- */
- IoAdapter->disIrq(IoAdapter);
-
- IoAdapter->tst_irq(&IoAdapter->a);
- IoAdapter->clr_irq(&IoAdapter->a);
- IoAdapter->tst_irq(&IoAdapter->a);
-
- /*
- kill pending dpcs
- */
- diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr);
- diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr);
-}
-
-static int diva_4bri_stop_adapter(diva_os_xdi_adapter_t *a)
-{
- PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
- int i;
-
- if (!IoAdapter->ram) {
- return (-1);
- }
-
- if (!IoAdapter->Initialized) {
- DBG_ERR(("A: A(%d) can't stop PRI adapter - not running",
- IoAdapter->ANum))
- return (-1); /* nothing to stop */
- }
-
- for (i = 0; i < IoAdapter->tasks; i++) {
- IoAdapter->QuadroList->QuadroAdapter[i]->Initialized = 0;
- }
-
- /*
- Disconnect Adapters from DIDD
- */
- for (i = 0; i < IoAdapter->tasks; i++) {
- diva_xdi_didd_remove_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum);
- }
-
- i = 100;
-
- /*
- Stop interrupts
- */
- a->clear_interrupts_proc = diva_4bri_clear_interrupts;
- IoAdapter->a.ReadyInt = 1;
- IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt);
- do {
- diva_os_sleep(10);
- } while (i-- && a->clear_interrupts_proc);
-
- if (a->clear_interrupts_proc) {
- diva_4bri_clear_interrupts(a);
- a->clear_interrupts_proc = NULL;
- DBG_ERR(("A: A(%d) no final interrupt from 4BRI adapter",
- IoAdapter->ANum))
- }
- IoAdapter->a.ReadyInt = 0;
-
- /*
- Stop and reset adapter
- */
- IoAdapter->stop(IoAdapter);
-
- return (0);
-}
diff --git a/drivers/isdn/hardware/eicon/os_4bri.h b/drivers/isdn/hardware/eicon/os_4bri.h
deleted file mode 100644
index 94b2709537d8..000000000000
--- a/drivers/isdn/hardware/eicon/os_4bri.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: os_4bri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
-
-#ifndef __DIVA_OS_4_BRI_H__
-#define __DIVA_OS_4_BRI_H__
-
-int diva_4bri_init_card(diva_os_xdi_adapter_t *a);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c
deleted file mode 100644
index de93090bcacb..000000000000
--- a/drivers/isdn/hardware/eicon/os_bri.c
+++ /dev/null
@@ -1,815 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: os_bri.c,v 1.21 2004/03/21 17:26:01 armin Exp $ */
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "dsp_defs.h"
-#include "di.h"
-#include "io.h"
-
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "os_bri.h"
-#include "diva_pci.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "dsrv_bri.h"
-
-/*
-** IMPORTS
-*/
-extern void prepare_maestra_functions(PISDN_ADAPTER IoAdapter);
-extern void diva_xdi_display_adapter_features(int card);
-extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a);
-
-/*
-** LOCALS
-*/
-static int bri_bar_length[3] = {
- 0x80,
- 0x80,
- 0x20
-};
-static int diva_bri_cleanup_adapter(diva_os_xdi_adapter_t *a);
-static dword diva_bri_get_serial_number(diva_os_xdi_adapter_t *a);
-static int diva_bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *cmd, int length);
-static int diva_bri_reregister_io(diva_os_xdi_adapter_t *a);
-static int diva_bri_reset_adapter(PISDN_ADAPTER IoAdapter);
-static int diva_bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
- dword address,
- const byte *data, dword length);
-static int diva_bri_start_adapter(PISDN_ADAPTER IoAdapter,
- dword start_address, dword features);
-static int diva_bri_stop_adapter(diva_os_xdi_adapter_t *a);
-
-static void diva_bri_set_addresses(diva_os_xdi_adapter_t *a)
-{
- a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 0;
- a->resources.pci.mem_type_id[MEM_TYPE_CFG] = 1;
- a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 1;
- a->resources.pci.mem_type_id[MEM_TYPE_PORT] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_CTLREG] = 2;
-
- a->xdi_adapter.ram = a->resources.pci.addr[0];
- a->xdi_adapter.cfg = a->resources.pci.addr[1];
- a->xdi_adapter.Address = a->resources.pci.addr[2];
-
- a->xdi_adapter.reset = a->xdi_adapter.cfg;
- a->xdi_adapter.port = a->xdi_adapter.Address;
-
- a->xdi_adapter.ctlReg = a->xdi_adapter.port + M_PCI_RESET;
-
- a->xdi_adapter.reset += 0x4C; /* PLX 9050 !! */
-}
-
-/*
-** BAR0 - MEM Addr - 0x80 - NOT USED
-** BAR1 - I/O Addr - 0x80
-** BAR2 - I/O Addr - 0x20
-*/
-int diva_bri_init_card(diva_os_xdi_adapter_t *a)
-{
- int bar;
- dword bar2 = 0, bar2_length = 0xffffffff;
- word cmd = 0, cmd_org;
- byte Bus, Slot;
- void *hdev;
- byte __iomem *p;
-
- /*
- Set properties
- */
- a->xdi_adapter.Properties = CardProperties[a->CardOrdinal];
- DBG_LOG(("Load %s", a->xdi_adapter.Properties.Name))
-
- /*
- Get resources
- */
- for (bar = 0; bar < 3; bar++) {
- a->resources.pci.bar[bar] =
- divasa_get_pci_bar(a->resources.pci.bus,
- a->resources.pci.func, bar,
- a->resources.pci.hdev);
- if (!a->resources.pci.bar[bar]) {
- DBG_ERR(("A: can't get BAR[%d]", bar))
- return (-1);
- }
- }
-
- a->resources.pci.irq =
- (byte) divasa_get_pci_irq(a->resources.pci.bus,
- a->resources.pci.func,
- a->resources.pci.hdev);
- if (!a->resources.pci.irq) {
- DBG_ERR(("A: invalid irq"));
- return (-1);
- }
-
- /*
- Get length of I/O bar 2 - it is different by older
- EEPROM version
- */
- Bus = a->resources.pci.bus;
- Slot = a->resources.pci.func;
- hdev = a->resources.pci.hdev;
-
- /*
- Get plain original values of the BAR2 CDM registers
- */
- PCIread(Bus, Slot, 0x18, &bar2, sizeof(bar2), hdev);
- PCIread(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
- /*
- Disable device and get BAR2 length
- */
- PCIwrite(Bus, Slot, 0x04, &cmd, sizeof(cmd), hdev);
- PCIwrite(Bus, Slot, 0x18, &bar2_length, sizeof(bar2_length), hdev);
- PCIread(Bus, Slot, 0x18, &bar2_length, sizeof(bar2_length), hdev);
- /*
- Restore BAR2 and CMD registers
- */
- PCIwrite(Bus, Slot, 0x18, &bar2, sizeof(bar2), hdev);
- PCIwrite(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
-
- /*
- Calculate BAR2 length
- */
- bar2_length = (~(bar2_length & ~7)) + 1;
- DBG_LOG(("BAR[2] length=%lx", bar2_length))
-
- /*
- Map and register resources
- */
- if (!(a->resources.pci.addr[0] =
- divasa_remap_pci_bar(a, 0, a->resources.pci.bar[0],
- bri_bar_length[0]))) {
- DBG_ERR(("A: BRI, can't map BAR[0]"))
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
-
- sprintf(&a->port_name[0], "BRI %02x:%02x",
- a->resources.pci.bus, a->resources.pci.func);
-
- if (diva_os_register_io_port(a, 1, a->resources.pci.bar[1],
- bri_bar_length[1], &a->port_name[0], 1)) {
- DBG_ERR(("A: BRI, can't register BAR[1]"))
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
- a->resources.pci.addr[1] = (void *) (unsigned long) a->resources.pci.bar[1];
- a->resources.pci.length[1] = bri_bar_length[1];
-
- if (diva_os_register_io_port(a, 1, a->resources.pci.bar[2],
- bar2_length, &a->port_name[0], 2)) {
- DBG_ERR(("A: BRI, can't register BAR[2]"))
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
- a->resources.pci.addr[2] = (void *) (unsigned long) a->resources.pci.bar[2];
- a->resources.pci.length[2] = bar2_length;
-
- /*
- Set all memory areas
- */
- diva_bri_set_addresses(a);
-
- /*
- Get Serial Number
- */
- a->xdi_adapter.serialNo = diva_bri_get_serial_number(a);
-
- /*
- Register I/O ports with correct name now
- */
- if (diva_bri_reregister_io(a)) {
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
-
- /*
- Initialize OS dependent objects
- */
- if (diva_os_initialize_spin_lock
- (&a->xdi_adapter.isr_spin_lock, "isr")) {
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
- if (diva_os_initialize_spin_lock
- (&a->xdi_adapter.data_spin_lock, "data")) {
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
-
- strcpy(a->xdi_adapter.req_soft_isr.dpc_thread_name, "kdivasbrid");
-
- if (diva_os_initialize_soft_isr(&a->xdi_adapter.req_soft_isr,
- DIDpcRoutine, &a->xdi_adapter)) {
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
- /*
- Do not initialize second DPC - only one thread will be created
- */
- a->xdi_adapter.isr_soft_isr.object = a->xdi_adapter.req_soft_isr.object;
-
- /*
- Create entity table
- */
- a->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels;
- a->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info;
- a->xdi_adapter.e_tbl = diva_os_malloc(0, a->xdi_adapter.e_max * sizeof(E_INFO));
- if (!a->xdi_adapter.e_tbl) {
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
- memset(a->xdi_adapter.e_tbl, 0x00, a->xdi_adapter.e_max * sizeof(E_INFO));
-
- /*
- Set up interface
- */
- a->xdi_adapter.a.io = &a->xdi_adapter;
- a->xdi_adapter.DIRequest = request;
- a->interface.cleanup_adapter_proc = diva_bri_cleanup_adapter;
- a->interface.cmd_proc = diva_bri_cmd_card_proc;
-
- p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
- outpp(p, 0x41);
- DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
-
- prepare_maestra_functions(&a->xdi_adapter);
-
- a->dsp_mask = 0x00000003;
-
- /*
- Set IRQ handler
- */
- a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq;
- sprintf(a->xdi_adapter.irq_info.irq_name, "DIVA BRI %ld",
- (long) a->xdi_adapter.serialNo);
- if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr,
- a->xdi_adapter.irq_info.irq_name)) {
- diva_bri_cleanup_adapter(a);
- return (-1);
- }
- a->xdi_adapter.irq_info.registered = 1;
-
- diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name,
- a->resources.pci.irq, a->xdi_adapter.serialNo);
-
- return (0);
-}
-
-
-static int diva_bri_cleanup_adapter(diva_os_xdi_adapter_t *a)
-{
- int i;
-
- if (a->xdi_adapter.Initialized) {
- diva_bri_stop_adapter(a);
- }
-
- /*
- Remove ISR Handler
- */
- if (a->xdi_adapter.irq_info.registered) {
- diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr);
- }
- a->xdi_adapter.irq_info.registered = 0;
-
- if (a->resources.pci.addr[0] && a->resources.pci.bar[0]) {
- divasa_unmap_pci_bar(a->resources.pci.addr[0]);
- a->resources.pci.addr[0] = NULL;
- a->resources.pci.bar[0] = 0;
- }
-
- for (i = 1; i < 3; i++) {
- if (a->resources.pci.addr[i] && a->resources.pci.bar[i]) {
- diva_os_register_io_port(a, 0,
- a->resources.pci.bar[i],
- a->resources.pci.
- length[i],
- &a->port_name[0], i);
- a->resources.pci.addr[i] = NULL;
- a->resources.pci.bar[i] = 0;
- }
- }
-
- /*
- Free OS objects
- */
- diva_os_cancel_soft_isr(&a->xdi_adapter.req_soft_isr);
- diva_os_cancel_soft_isr(&a->xdi_adapter.isr_soft_isr);
-
- diva_os_remove_soft_isr(&a->xdi_adapter.req_soft_isr);
- a->xdi_adapter.isr_soft_isr.object = NULL;
-
- diva_os_destroy_spin_lock(&a->xdi_adapter.isr_spin_lock, "rm");
- diva_os_destroy_spin_lock(&a->xdi_adapter.data_spin_lock, "rm");
-
- /*
- Free memory
- */
- if (a->xdi_adapter.e_tbl) {
- diva_os_free(0, a->xdi_adapter.e_tbl);
- a->xdi_adapter.e_tbl = NULL;
- }
-
- return (0);
-}
-
-void diva_os_prepare_maestra_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-/*
-** Get serial number
-*/
-static dword diva_bri_get_serial_number(diva_os_xdi_adapter_t *a)
-{
- dword serNo = 0;
- byte __iomem *confIO;
- word serHi, serLo;
- word __iomem *confMem;
-
- confIO = DIVA_OS_MEM_ATTACH_CFG(&a->xdi_adapter);
- serHi = (word) (inppw(&confIO[0x22]) & 0x0FFF);
- serLo = (word) (inppw(&confIO[0x26]) & 0x0FFF);
- serNo = ((dword) serHi << 16) | (dword) serLo;
- DIVA_OS_MEM_DETACH_CFG(&a->xdi_adapter, confIO);
-
- if ((serNo == 0) || (serNo == 0xFFFFFFFF)) {
- DBG_FTL(("W: BRI use BAR[0] to get card serial number"))
-
- confMem = (word __iomem *)DIVA_OS_MEM_ATTACH_RAM(&a->xdi_adapter);
- serHi = (word) (READ_WORD(&confMem[0x11]) & 0x0FFF);
- serLo = (word) (READ_WORD(&confMem[0x13]) & 0x0FFF);
- serNo = (((dword) serHi) << 16) | ((dword) serLo);
- DIVA_OS_MEM_DETACH_RAM(&a->xdi_adapter, confMem);
- }
-
- DBG_LOG(("Serial Number=%ld", serNo))
-
- return (serNo);
-}
-
-/*
-** Unregister I/O and register it with new name,
-** based on Serial Number
-*/
-static int diva_bri_reregister_io(diva_os_xdi_adapter_t *a)
-{
- int i;
-
- for (i = 1; i < 3; i++) {
- diva_os_register_io_port(a, 0, a->resources.pci.bar[i],
- a->resources.pci.length[i],
- &a->port_name[0], i);
- a->resources.pci.addr[i] = NULL;
- }
-
- sprintf(a->port_name, "DIVA BRI %ld",
- (long) a->xdi_adapter.serialNo);
-
- for (i = 1; i < 3; i++) {
- if (diva_os_register_io_port(a, 1, a->resources.pci.bar[i],
- a->resources.pci.length[i],
- &a->port_name[0], i)) {
- DBG_ERR(("A: failed to reregister BAR[%d]", i))
- return (-1);
- }
- a->resources.pci.addr[i] =
- (void *) (unsigned long) a->resources.pci.bar[i];
- }
-
- return (0);
-}
-
-/*
-** Process command from user mode
-*/
-static int
-diva_bri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *cmd, int length)
-{
- int ret = -1;
-
- if (cmd->adapter != a->controller) {
- DBG_ERR(("A: pri_cmd, invalid controller=%d != %d",
- cmd->adapter, a->controller))
- return (-1);
- }
-
- switch (cmd->command) {
- case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- *(dword *) a->xdi_mbox.data =
- (dword) a->CardOrdinal;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_SERIAL_NR:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- *(dword *) a->xdi_mbox.data =
- (dword) a->xdi_adapter.serialNo;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG:
- a->xdi_mbox.data_length = sizeof(dword) * 9;
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- int i;
- dword *data = (dword *) a->xdi_mbox.data;
-
- for (i = 0; i < 8; i++) {
- *data++ = a->resources.pci.bar[i];
- }
- *data++ = (dword) a->resources.pci.irq;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_CARD_STATE:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- dword *data = (dword *) a->xdi_mbox.data;
- if (!a->xdi_adapter.port) {
- *data = 3;
- } else if (a->xdi_adapter.trapped) {
- *data = 2;
- } else if (a->xdi_adapter.Initialized) {
- *data = 1;
- } else {
- *data = 0;
- }
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_RESET_ADAPTER:
- ret = diva_bri_reset_adapter(&a->xdi_adapter);
- break;
-
- case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK:
- ret = diva_bri_write_sdram_block(&a->xdi_adapter,
- cmd->command_data.
- write_sdram.offset,
- (byte *)&cmd[1],
- cmd->command_data.
- write_sdram.length);
- break;
-
- case DIVA_XDI_UM_CMD_START_ADAPTER:
- ret = diva_bri_start_adapter(&a->xdi_adapter,
- cmd->command_data.start.
- offset,
- cmd->command_data.start.
- features);
- break;
-
- case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES:
- a->xdi_adapter.features =
- cmd->command_data.features.features;
- a->xdi_adapter.a.protocol_capabilities =
- a->xdi_adapter.features;
- DBG_TRC(
- ("Set raw protocol features (%08x)",
- a->xdi_adapter.features)) ret = 0;
- break;
-
- case DIVA_XDI_UM_CMD_STOP_ADAPTER:
- ret = diva_bri_stop_adapter(a);
- break;
-
- case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY:
- ret = diva_card_read_xlog(a);
- break;
-
- default:
- DBG_ERR(
- ("A: A(%d) invalid cmd=%d", a->controller,
- cmd->command))}
-
- return (ret);
-}
-
-static int diva_bri_reset_adapter(PISDN_ADAPTER IoAdapter)
-{
- byte __iomem *addrHi, *addrLo, *ioaddr;
- dword i;
- byte __iomem *Port;
-
- if (!IoAdapter->port) {
- return (-1);
- }
- if (IoAdapter->Initialized) {
- DBG_ERR(("A: A(%d) can't reset BRI adapter - please stop first",
- IoAdapter->ANum)) return (-1);
- }
- (*(IoAdapter->rstFnc)) (IoAdapter);
- diva_os_wait(100);
- Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
- addrHi = Port +
- ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
- addrLo = Port + ADDR;
- ioaddr = Port + DATA;
- /*
- recover
- */
- outpp(addrHi, (byte) 0);
- outppw(addrLo, (word) 0);
- outppw(ioaddr, (word) 0);
- /*
- clear shared memory
- */
- outpp(addrHi,
- (byte) (
- (IoAdapter->MemoryBase + IoAdapter->MemorySize -
- BRI_SHARED_RAM_SIZE) >> 16));
- outppw(addrLo, 0);
- for (i = 0; i < 0x8000; outppw(ioaddr, 0), ++i);
- diva_os_wait(100);
-
- /*
- clear signature
- */
- outpp(addrHi,
- (byte) (
- (IoAdapter->MemoryBase + IoAdapter->MemorySize -
- BRI_SHARED_RAM_SIZE) >> 16));
- outppw(addrLo, 0x1e);
- outpp(ioaddr, 0);
- outpp(ioaddr, 0);
-
- outpp(addrHi, (byte) 0);
- outppw(addrLo, (word) 0);
- outppw(ioaddr, (word) 0);
-
- DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-
- /*
- Forget all outstanding entities
- */
- IoAdapter->e_count = 0;
- if (IoAdapter->e_tbl) {
- memset(IoAdapter->e_tbl, 0x00,
- IoAdapter->e_max * sizeof(E_INFO));
- }
- IoAdapter->head = 0;
- IoAdapter->tail = 0;
- IoAdapter->assign = 0;
- IoAdapter->trapped = 0;
-
- memset(&IoAdapter->a.IdTable[0], 0x00,
- sizeof(IoAdapter->a.IdTable));
- memset(&IoAdapter->a.IdTypeTable[0], 0x00,
- sizeof(IoAdapter->a.IdTypeTable));
- memset(&IoAdapter->a.FlowControlIdTable[0], 0x00,
- sizeof(IoAdapter->a.FlowControlIdTable));
- memset(&IoAdapter->a.FlowControlSkipTable[0], 0x00,
- sizeof(IoAdapter->a.FlowControlSkipTable));
- memset(&IoAdapter->a.misc_flags_table[0], 0x00,
- sizeof(IoAdapter->a.misc_flags_table));
- memset(&IoAdapter->a.rx_stream[0], 0x00,
- sizeof(IoAdapter->a.rx_stream));
- memset(&IoAdapter->a.tx_stream[0], 0x00,
- sizeof(IoAdapter->a.tx_stream));
- memset(&IoAdapter->a.tx_pos[0], 0x00, sizeof(IoAdapter->a.tx_pos));
- memset(&IoAdapter->a.rx_pos[0], 0x00, sizeof(IoAdapter->a.rx_pos));
-
- return (0);
-}
-
-static int
-diva_bri_write_sdram_block(PISDN_ADAPTER IoAdapter,
- dword address, const byte *data, dword length)
-{
- byte __iomem *addrHi, *addrLo, *ioaddr;
- byte __iomem *Port;
-
- if (!IoAdapter->port) {
- return (-1);
- }
-
- Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
- addrHi = Port +
- ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
- addrLo = Port + ADDR;
- ioaddr = Port + DATA;
-
- while (length--) {
- outpp(addrHi, (word) (address >> 16));
- outppw(addrLo, (word) (address & 0x0000ffff));
- outpp(ioaddr, *data++);
- address++;
- }
-
- DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
- return (0);
-}
-
-static int
-diva_bri_start_adapter(PISDN_ADAPTER IoAdapter,
- dword start_address, dword features)
-{
- byte __iomem *Port;
- dword i, test;
- byte __iomem *addrHi, *addrLo, *ioaddr;
- int started = 0;
- ADAPTER *a = &IoAdapter->a;
-
- if (IoAdapter->Initialized) {
- DBG_ERR(
- ("A: A(%d) bri_start_adapter, adapter already running",
- IoAdapter->ANum)) return (-1);
- }
- if (!IoAdapter->port) {
- DBG_ERR(("A: A(%d) bri_start_adapter, adapter not mapped",
- IoAdapter->ANum)) return (-1);
- }
-
- sprintf(IoAdapter->Name, "A(%d)", (int) IoAdapter->ANum);
- DBG_LOG(("A(%d) start BRI", IoAdapter->ANum))
-
- Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
- addrHi = Port +
- ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
- addrLo = Port + ADDR;
- ioaddr = Port + DATA;
-
- outpp(addrHi,
- (byte) (
- (IoAdapter->MemoryBase + IoAdapter->MemorySize -
- BRI_SHARED_RAM_SIZE) >> 16));
- outppw(addrLo, 0x1e);
- outppw(ioaddr, 0x00);
- DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-
- /*
- start the protocol code
- */
- Port = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- outpp(Port, 0x08);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, Port);
-
- Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
- addrHi = Port +
- ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
- addrLo = Port + ADDR;
- ioaddr = Port + DATA;
- /*
- wait for signature (max. 3 seconds)
- */
- for (i = 0; i < 300; ++i) {
- diva_os_wait(10);
- outpp(addrHi,
- (byte) (
- (IoAdapter->MemoryBase +
- IoAdapter->MemorySize -
- BRI_SHARED_RAM_SIZE) >> 16));
- outppw(addrLo, 0x1e);
- test = (dword) inppw(ioaddr);
- if (test == 0x4447) {
- DBG_LOG(
- ("Protocol startup time %d.%02d seconds",
- (i / 100), (i % 100)))
- started = 1;
- break;
- }
- }
- DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-
- if (!started) {
- DBG_FTL(("A: A(%d) %s: Adapter selftest failed 0x%04X",
- IoAdapter->ANum, IoAdapter->Properties.Name,
- test))
- (*(IoAdapter->trapFnc)) (IoAdapter);
- return (-1);
- }
-
- IoAdapter->Initialized = 1;
-
- /*
- Check Interrupt
- */
- IoAdapter->IrqCount = 0;
- a->ReadyInt = 1;
-
- if (IoAdapter->reset) {
- Port = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- outpp(Port, 0x41);
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, Port);
- }
-
- a->ram_out(a, &PR_RAM->ReadyInt, 1);
- for (i = 0; ((!IoAdapter->IrqCount) && (i < 100)); i++) {
- diva_os_wait(10);
- }
- if (!IoAdapter->IrqCount) {
- DBG_ERR(
- ("A: A(%d) interrupt test failed",
- IoAdapter->ANum))
- IoAdapter->Initialized = 0;
- IoAdapter->stop(IoAdapter);
- return (-1);
- }
-
- IoAdapter->Properties.Features = (word) features;
- diva_xdi_display_adapter_features(IoAdapter->ANum);
- DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum))
- /*
- Register with DIDD
- */
- diva_xdi_didd_register_adapter(IoAdapter->ANum);
-
- return (0);
-}
-
-static void diva_bri_clear_interrupts(diva_os_xdi_adapter_t *a)
-{
- PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-
- /*
- clear any pending interrupt
- */
- IoAdapter->disIrq(IoAdapter);
-
- IoAdapter->tst_irq(&IoAdapter->a);
- IoAdapter->clr_irq(&IoAdapter->a);
- IoAdapter->tst_irq(&IoAdapter->a);
-
- /*
- kill pending dpcs
- */
- diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr);
- diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr);
-}
-
-/*
-** Stop card
-*/
-static int diva_bri_stop_adapter(diva_os_xdi_adapter_t *a)
-{
- PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
- int i = 100;
-
- if (!IoAdapter->port) {
- return (-1);
- }
- if (!IoAdapter->Initialized) {
- DBG_ERR(("A: A(%d) can't stop BRI adapter - not running",
- IoAdapter->ANum))
- return (-1); /* nothing to stop */
- }
- IoAdapter->Initialized = 0;
-
- /*
- Disconnect Adapter from DIDD
- */
- diva_xdi_didd_remove_adapter(IoAdapter->ANum);
-
- /*
- Stop interrupts
- */
- a->clear_interrupts_proc = diva_bri_clear_interrupts;
- IoAdapter->a.ReadyInt = 1;
- IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt);
- do {
- diva_os_sleep(10);
- } while (i-- && a->clear_interrupts_proc);
- if (a->clear_interrupts_proc) {
- diva_bri_clear_interrupts(a);
- a->clear_interrupts_proc = NULL;
- DBG_ERR(("A: A(%d) no final interrupt from BRI adapter",
- IoAdapter->ANum))
- }
- IoAdapter->a.ReadyInt = 0;
-
- /*
- Stop and reset adapter
- */
- IoAdapter->stop(IoAdapter);
-
- return (0);
-}
diff --git a/drivers/isdn/hardware/eicon/os_bri.h b/drivers/isdn/hardware/eicon/os_bri.h
deleted file mode 100644
index 37c92cc53ded..000000000000
--- a/drivers/isdn/hardware/eicon/os_bri.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: os_bri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
-
-#ifndef __DIVA_OS_BRI_REV_1_H__
-#define __DIVA_OS_BRI_REV_1_H__
-
-int diva_bri_init_card(diva_os_xdi_adapter_t *a);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/os_capi.h b/drivers/isdn/hardware/eicon/os_capi.h
deleted file mode 100644
index e72394b95d50..000000000000
--- a/drivers/isdn/hardware/eicon/os_capi.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* $Id: os_capi.h,v 1.7 2003/04/12 21:40:49 schindler Exp $
- *
- * ISDN interface module for Eicon active cards DIVA.
- * CAPI Interface OS include files
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000-2003 Cytronics & Melware (info@melware.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-#ifndef __OS_CAPI_H__
-#define __OS_CAPI_H__
-
-#include <linux/capi.h>
-#include <linux/kernelcapi.h>
-#include <linux/isdn/capiutil.h>
-#include <linux/isdn/capilli.h>
-
-#endif /* __OS_CAPI_H__ */
diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c
deleted file mode 100644
index b20f1fb89d14..000000000000
--- a/drivers/isdn/hardware/eicon/os_pri.c
+++ /dev/null
@@ -1,1053 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: os_pri.c,v 1.32 2004/03/21 17:26:01 armin Exp $ */
-
-#include "platform.h"
-#include "debuglib.h"
-#include "cardtype.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di_defs.h"
-#include "dsp_defs.h"
-#include "di.h"
-#include "io.h"
-
-#include "xdi_msg.h"
-#include "xdi_adapter.h"
-#include "os_pri.h"
-#include "diva_pci.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "dsp_tst.h"
-#include "diva_dma.h"
-#include "dsrv_pri.h"
-
-/* --------------------------------------------------------------------------
- OS Dependent part of XDI driver for DIVA PRI Adapter
-
- DSP detection/validation by Anthony Booth (Eicon Networks, www.eicon.com)
- -------------------------------------------------------------------------- */
-
-#define DIVA_PRI_NO_PCI_BIOS_WORKAROUND 1
-
-extern int diva_card_read_xlog(diva_os_xdi_adapter_t *a);
-
-/*
-** IMPORTS
-*/
-extern void prepare_pri_functions(PISDN_ADAPTER IoAdapter);
-extern void prepare_pri2_functions(PISDN_ADAPTER IoAdapter);
-extern void diva_xdi_display_adapter_features(int card);
-
-static int diva_pri_cleanup_adapter(diva_os_xdi_adapter_t *a);
-static int diva_pri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *cmd, int length);
-static int pri_get_serial_number(diva_os_xdi_adapter_t *a);
-static int diva_pri_stop_adapter(diva_os_xdi_adapter_t *a);
-static dword diva_pri_detect_dsps(diva_os_xdi_adapter_t *a);
-
-/*
-** Check card revision
-*/
-static int pri_is_rev_2_card(int card_ordinal)
-{
- switch (card_ordinal) {
- case CARDTYPE_DIVASRV_P_30M_V2_PCI:
- case CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI:
- return (1);
- }
- return (0);
-}
-
-static void diva_pri_set_addresses(diva_os_xdi_adapter_t *a)
-{
- a->resources.pci.mem_type_id[MEM_TYPE_ADDRESS] = 0;
- a->resources.pci.mem_type_id[MEM_TYPE_CONTROL] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_CONFIG] = 4;
- a->resources.pci.mem_type_id[MEM_TYPE_RAM] = 0;
- a->resources.pci.mem_type_id[MEM_TYPE_RESET] = 2;
- a->resources.pci.mem_type_id[MEM_TYPE_CFG] = 4;
- a->resources.pci.mem_type_id[MEM_TYPE_PROM] = 3;
-
- a->xdi_adapter.Address = a->resources.pci.addr[0];
- a->xdi_adapter.Control = a->resources.pci.addr[2];
- a->xdi_adapter.Config = a->resources.pci.addr[4];
-
- a->xdi_adapter.ram = a->resources.pci.addr[0];
- a->xdi_adapter.ram += MP_SHARED_RAM_OFFSET;
-
- a->xdi_adapter.reset = a->resources.pci.addr[2];
- a->xdi_adapter.reset += MP_RESET;
-
- a->xdi_adapter.cfg = a->resources.pci.addr[4];
- a->xdi_adapter.cfg += MP_IRQ_RESET;
-
- a->xdi_adapter.sdram_bar = a->resources.pci.bar[0];
-
- a->xdi_adapter.prom = a->resources.pci.addr[3];
-}
-
-/*
-** BAR0 - SDRAM, MP_MEMORY_SIZE, MP2_MEMORY_SIZE by Rev.2
-** BAR1 - DEVICES, 0x1000
-** BAR2 - CONTROL (REG), 0x2000
-** BAR3 - FLASH (REG), 0x8000
-** BAR4 - CONFIG (CFG), 0x1000
-*/
-int diva_pri_init_card(diva_os_xdi_adapter_t *a)
-{
- int bar = 0;
- int pri_rev_2;
- unsigned long bar_length[5] = {
- MP_MEMORY_SIZE,
- 0x1000,
- 0x2000,
- 0x8000,
- 0x1000
- };
-
- pri_rev_2 = pri_is_rev_2_card(a->CardOrdinal);
-
- if (pri_rev_2) {
- bar_length[0] = MP2_MEMORY_SIZE;
- }
- /*
- Set properties
- */
- a->xdi_adapter.Properties = CardProperties[a->CardOrdinal];
- DBG_LOG(("Load %s", a->xdi_adapter.Properties.Name))
-
- /*
- First initialization step: get and check hardware resoures.
- Do not map resources and do not acecess card at this step
- */
- for (bar = 0; bar < 5; bar++) {
- a->resources.pci.bar[bar] =
- divasa_get_pci_bar(a->resources.pci.bus,
- a->resources.pci.func, bar,
- a->resources.pci.hdev);
- if (!a->resources.pci.bar[bar]
- || (a->resources.pci.bar[bar] == 0xFFFFFFF0)) {
- DBG_ERR(("A: invalid bar[%d]=%08x", bar,
- a->resources.pci.bar[bar]))
- return (-1);
- }
- }
- a->resources.pci.irq =
- (byte) divasa_get_pci_irq(a->resources.pci.bus,
- a->resources.pci.func,
- a->resources.pci.hdev);
- if (!a->resources.pci.irq) {
- DBG_ERR(("A: invalid irq"));
- return (-1);
- }
-
- /*
- Map all BAR's
- */
- for (bar = 0; bar < 5; bar++) {
- a->resources.pci.addr[bar] =
- divasa_remap_pci_bar(a, bar, a->resources.pci.bar[bar],
- bar_length[bar]);
- if (!a->resources.pci.addr[bar]) {
- DBG_ERR(("A: A(%d), can't map bar[%d]",
- a->controller, bar))
- diva_pri_cleanup_adapter(a);
- return (-1);
- }
- }
-
- /*
- Set all memory areas
- */
- diva_pri_set_addresses(a);
-
- /*
- Get Serial Number of this adapter
- */
- if (pri_get_serial_number(a)) {
- dword serNo;
- serNo = a->resources.pci.bar[1] & 0xffff0000;
- serNo |= ((dword) a->resources.pci.bus) << 8;
- serNo += (a->resources.pci.func + a->controller + 1);
- a->xdi_adapter.serialNo = serNo & ~0xFF000000;
- DBG_ERR(("A: A(%d) can't get Serial Number, generated serNo=%ld",
- a->controller, a->xdi_adapter.serialNo))
- }
-
-
- /*
- Initialize os objects
- */
- if (diva_os_initialize_spin_lock(&a->xdi_adapter.isr_spin_lock, "isr")) {
- diva_pri_cleanup_adapter(a);
- return (-1);
- }
- if (diva_os_initialize_spin_lock
- (&a->xdi_adapter.data_spin_lock, "data")) {
- diva_pri_cleanup_adapter(a);
- return (-1);
- }
-
- strcpy(a->xdi_adapter.req_soft_isr.dpc_thread_name, "kdivasprid");
-
- if (diva_os_initialize_soft_isr(&a->xdi_adapter.req_soft_isr,
- DIDpcRoutine, &a->xdi_adapter)) {
- diva_pri_cleanup_adapter(a);
- return (-1);
- }
-
- /*
- Do not initialize second DPC - only one thread will be created
- */
- a->xdi_adapter.isr_soft_isr.object =
- a->xdi_adapter.req_soft_isr.object;
-
- /*
- Next step of card initialization:
- set up all interface pointers
- */
- a->xdi_adapter.Channels = CardProperties[a->CardOrdinal].Channels;
- a->xdi_adapter.e_max = CardProperties[a->CardOrdinal].E_info;
-
- a->xdi_adapter.e_tbl =
- diva_os_malloc(0, a->xdi_adapter.e_max * sizeof(E_INFO));
- if (!a->xdi_adapter.e_tbl) {
- diva_pri_cleanup_adapter(a);
- return (-1);
- }
- memset(a->xdi_adapter.e_tbl, 0x00, a->xdi_adapter.e_max * sizeof(E_INFO));
-
- a->xdi_adapter.a.io = &a->xdi_adapter;
- a->xdi_adapter.DIRequest = request;
- a->interface.cleanup_adapter_proc = diva_pri_cleanup_adapter;
- a->interface.cmd_proc = diva_pri_cmd_card_proc;
-
- if (pri_rev_2) {
- prepare_pri2_functions(&a->xdi_adapter);
- } else {
- prepare_pri_functions(&a->xdi_adapter);
- }
-
- a->dsp_mask = diva_pri_detect_dsps(a);
-
- /*
- Allocate DMA map
- */
- if (pri_rev_2) {
- diva_init_dma_map(a->resources.pci.hdev,
- (struct _diva_dma_map_entry **) &a->xdi_adapter.dma_map, 32);
- }
-
- /*
- Set IRQ handler
- */
- a->xdi_adapter.irq_info.irq_nr = a->resources.pci.irq;
- sprintf(a->xdi_adapter.irq_info.irq_name,
- "DIVA PRI %ld", (long) a->xdi_adapter.serialNo);
-
- if (diva_os_register_irq(a, a->xdi_adapter.irq_info.irq_nr,
- a->xdi_adapter.irq_info.irq_name)) {
- diva_pri_cleanup_adapter(a);
- return (-1);
- }
- a->xdi_adapter.irq_info.registered = 1;
-
- diva_log_info("%s IRQ:%d SerNo:%d", a->xdi_adapter.Properties.Name,
- a->resources.pci.irq, a->xdi_adapter.serialNo);
-
- return (0);
-}
-
-static int diva_pri_cleanup_adapter(diva_os_xdi_adapter_t *a)
-{
- int bar = 0;
-
- /*
- Stop Adapter if adapter is running
- */
- if (a->xdi_adapter.Initialized) {
- diva_pri_stop_adapter(a);
- }
-
- /*
- Remove ISR Handler
- */
- if (a->xdi_adapter.irq_info.registered) {
- diva_os_remove_irq(a, a->xdi_adapter.irq_info.irq_nr);
- }
- a->xdi_adapter.irq_info.registered = 0;
-
- /*
- Step 1: unmap all BAR's, if any was mapped
- */
- for (bar = 0; bar < 5; bar++) {
- if (a->resources.pci.bar[bar]
- && a->resources.pci.addr[bar]) {
- divasa_unmap_pci_bar(a->resources.pci.addr[bar]);
- a->resources.pci.bar[bar] = 0;
- a->resources.pci.addr[bar] = NULL;
- }
- }
-
- /*
- Free OS objects
- */
- diva_os_cancel_soft_isr(&a->xdi_adapter.isr_soft_isr);
- diva_os_cancel_soft_isr(&a->xdi_adapter.req_soft_isr);
-
- diva_os_remove_soft_isr(&a->xdi_adapter.req_soft_isr);
- a->xdi_adapter.isr_soft_isr.object = NULL;
-
- diva_os_destroy_spin_lock(&a->xdi_adapter.isr_spin_lock, "rm");
- diva_os_destroy_spin_lock(&a->xdi_adapter.data_spin_lock, "rm");
-
- /*
- Free memory accupied by XDI adapter
- */
- if (a->xdi_adapter.e_tbl) {
- diva_os_free(0, a->xdi_adapter.e_tbl);
- a->xdi_adapter.e_tbl = NULL;
- }
- a->xdi_adapter.Channels = 0;
- a->xdi_adapter.e_max = 0;
-
-
- /*
- Free adapter DMA map
- */
- diva_free_dma_map(a->resources.pci.hdev,
- (struct _diva_dma_map_entry *) a->xdi_adapter.
- dma_map);
- a->xdi_adapter.dma_map = NULL;
-
-
- /*
- Detach this adapter from debug driver
- */
-
- return (0);
-}
-
-/*
-** Activate On Board Boot Loader
-*/
-static int diva_pri_reset_adapter(PISDN_ADAPTER IoAdapter)
-{
- dword i;
- struct mp_load __iomem *boot;
-
- if (!IoAdapter->Address || !IoAdapter->reset) {
- return (-1);
- }
- if (IoAdapter->Initialized) {
- DBG_ERR(("A: A(%d) can't reset PRI adapter - please stop first",
- IoAdapter->ANum))
- return (-1);
- }
-
- boot = (struct mp_load __iomem *) DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
- WRITE_DWORD(&boot->err, 0);
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-
- IoAdapter->rstFnc(IoAdapter);
-
- diva_os_wait(10);
-
- boot = (struct mp_load __iomem *) DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
- i = READ_DWORD(&boot->live);
-
- diva_os_wait(10);
- if (i == READ_DWORD(&boot->live)) {
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
- DBG_ERR(("A: A(%d) CPU on PRI %ld is not alive!",
- IoAdapter->ANum, IoAdapter->serialNo))
- return (-1);
- }
- if (READ_DWORD(&boot->err)) {
- DBG_ERR(("A: A(%d) PRI %ld Board Selftest failed, error=%08lx",
- IoAdapter->ANum, IoAdapter->serialNo,
- READ_DWORD(&boot->err)))
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
- return (-1);
- }
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-
- /*
- Forget all outstanding entities
- */
- IoAdapter->e_count = 0;
- if (IoAdapter->e_tbl) {
- memset(IoAdapter->e_tbl, 0x00,
- IoAdapter->e_max * sizeof(E_INFO));
- }
- IoAdapter->head = 0;
- IoAdapter->tail = 0;
- IoAdapter->assign = 0;
- IoAdapter->trapped = 0;
-
- memset(&IoAdapter->a.IdTable[0], 0x00,
- sizeof(IoAdapter->a.IdTable));
- memset(&IoAdapter->a.IdTypeTable[0], 0x00,
- sizeof(IoAdapter->a.IdTypeTable));
- memset(&IoAdapter->a.FlowControlIdTable[0], 0x00,
- sizeof(IoAdapter->a.FlowControlIdTable));
- memset(&IoAdapter->a.FlowControlSkipTable[0], 0x00,
- sizeof(IoAdapter->a.FlowControlSkipTable));
- memset(&IoAdapter->a.misc_flags_table[0], 0x00,
- sizeof(IoAdapter->a.misc_flags_table));
- memset(&IoAdapter->a.rx_stream[0], 0x00,
- sizeof(IoAdapter->a.rx_stream));
- memset(&IoAdapter->a.tx_stream[0], 0x00,
- sizeof(IoAdapter->a.tx_stream));
- memset(&IoAdapter->a.tx_pos[0], 0x00, sizeof(IoAdapter->a.tx_pos));
- memset(&IoAdapter->a.rx_pos[0], 0x00, sizeof(IoAdapter->a.rx_pos));
-
- return (0);
-}
-
-static int
-diva_pri_write_sdram_block(PISDN_ADAPTER IoAdapter,
- dword address,
- const byte *data, dword length, dword limit)
-{
- byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
- byte __iomem *mem = p;
-
- if (((address + length) >= limit) || !mem) {
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
- DBG_ERR(("A: A(%d) write PRI address=0x%08lx",
- IoAdapter->ANum, address + length))
- return (-1);
- }
- mem += address;
-
- /* memcpy_toio(), maybe? */
- while (length--) {
- WRITE_BYTE(mem++, *data++);
- }
-
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, p);
- return (0);
-}
-
-static int
-diva_pri_start_adapter(PISDN_ADAPTER IoAdapter,
- dword start_address, dword features)
-{
- dword i;
- int started = 0;
- byte __iomem *p;
- struct mp_load __iomem *boot = (struct mp_load __iomem *) DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
- ADAPTER *a = &IoAdapter->a;
-
- if (IoAdapter->Initialized) {
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
- DBG_ERR(("A: A(%d) pri_start_adapter, adapter already running",
- IoAdapter->ANum))
- return (-1);
- }
- if (!boot) {
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
- DBG_ERR(("A: PRI %ld can't start, adapter not mapped",
- IoAdapter->serialNo))
- return (-1);
- }
-
- sprintf(IoAdapter->Name, "A(%d)", (int) IoAdapter->ANum);
- DBG_LOG(("A(%d) start PRI at 0x%08lx", IoAdapter->ANum,
- start_address))
-
- WRITE_DWORD(&boot->addr, start_address);
- WRITE_DWORD(&boot->cmd, 3);
-
- for (i = 0; i < 300; ++i) {
- diva_os_wait(10);
- if ((READ_DWORD(&boot->signature) >> 16) == 0x4447) {
- DBG_LOG(("A(%d) Protocol startup time %d.%02d seconds",
- IoAdapter->ANum, (i / 100), (i % 100)))
- started = 1;
- break;
- }
- }
-
- if (!started) {
- byte __iomem *p = (byte __iomem *)boot;
- dword TrapId;
- dword debug;
- TrapId = READ_DWORD(&p[0x80]);
- debug = READ_DWORD(&p[0x1c]);
- DBG_ERR(("A(%d) Adapter start failed 0x%08lx, TrapId=%08lx, debug=%08lx",
- IoAdapter->ANum, READ_DWORD(&boot->signature),
- TrapId, debug))
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
- if (IoAdapter->trapFnc) {
- (*(IoAdapter->trapFnc)) (IoAdapter);
- }
- IoAdapter->stop(IoAdapter);
- return (-1);
- }
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, boot);
-
- IoAdapter->Initialized = true;
-
- /*
- Check Interrupt
- */
- IoAdapter->IrqCount = 0;
- p = DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
- WRITE_DWORD(p, (dword)~0x03E00000);
- DIVA_OS_MEM_DETACH_CFG(IoAdapter, p);
- a->ReadyInt = 1;
- a->ram_out(a, &PR_RAM->ReadyInt, 1);
-
- for (i = 100; !IoAdapter->IrqCount && (i-- > 0); diva_os_wait(10));
-
- if (!IoAdapter->IrqCount) {
- DBG_ERR(("A: A(%d) interrupt test failed",
- IoAdapter->ANum))
- IoAdapter->Initialized = false;
- IoAdapter->stop(IoAdapter);
- return (-1);
- }
-
- IoAdapter->Properties.Features = (word) features;
-
- diva_xdi_display_adapter_features(IoAdapter->ANum);
-
- DBG_LOG(("A(%d) PRI adapter successfully started", IoAdapter->ANum))
- /*
- Register with DIDD
- */
- diva_xdi_didd_register_adapter(IoAdapter->ANum);
-
- return (0);
-}
-
-static void diva_pri_clear_interrupts(diva_os_xdi_adapter_t *a)
-{
- PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
-
- /*
- clear any pending interrupt
- */
- IoAdapter->disIrq(IoAdapter);
-
- IoAdapter->tst_irq(&IoAdapter->a);
- IoAdapter->clr_irq(&IoAdapter->a);
- IoAdapter->tst_irq(&IoAdapter->a);
-
- /*
- kill pending dpcs
- */
- diva_os_cancel_soft_isr(&IoAdapter->req_soft_isr);
- diva_os_cancel_soft_isr(&IoAdapter->isr_soft_isr);
-}
-
-/*
-** Stop Adapter, but do not unmap/unregister - adapter
-** will be restarted later
-*/
-static int diva_pri_stop_adapter(diva_os_xdi_adapter_t *a)
-{
- PISDN_ADAPTER IoAdapter = &a->xdi_adapter;
- int i = 100;
-
- if (!IoAdapter->ram) {
- return (-1);
- }
- if (!IoAdapter->Initialized) {
- DBG_ERR(("A: A(%d) can't stop PRI adapter - not running",
- IoAdapter->ANum))
- return (-1); /* nothing to stop */
- }
- IoAdapter->Initialized = 0;
-
- /*
- Disconnect Adapter from DIDD
- */
- diva_xdi_didd_remove_adapter(IoAdapter->ANum);
-
- /*
- Stop interrupts
- */
- a->clear_interrupts_proc = diva_pri_clear_interrupts;
- IoAdapter->a.ReadyInt = 1;
- IoAdapter->a.ram_inc(&IoAdapter->a, &PR_RAM->ReadyInt);
- do {
- diva_os_sleep(10);
- } while (i-- && a->clear_interrupts_proc);
-
- if (a->clear_interrupts_proc) {
- diva_pri_clear_interrupts(a);
- a->clear_interrupts_proc = NULL;
- DBG_ERR(("A: A(%d) no final interrupt from PRI adapter",
- IoAdapter->ANum))
- }
- IoAdapter->a.ReadyInt = 0;
-
- /*
- Stop and reset adapter
- */
- IoAdapter->stop(IoAdapter);
-
- return (0);
-}
-
-/*
-** Process commands form configuration/download framework and from
-** user mode
-**
-** return 0 on success
-*/
-static int
-diva_pri_cmd_card_proc(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *cmd, int length)
-{
- int ret = -1;
-
- if (cmd->adapter != a->controller) {
- DBG_ERR(("A: pri_cmd, invalid controller=%d != %d",
- cmd->adapter, a->controller))
- return (-1);
- }
-
- switch (cmd->command) {
- case DIVA_XDI_UM_CMD_GET_CARD_ORDINAL:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- *(dword *) a->xdi_mbox.data =
- (dword) a->CardOrdinal;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_SERIAL_NR:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- *(dword *) a->xdi_mbox.data =
- (dword) a->xdi_adapter.serialNo;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG:
- a->xdi_mbox.data_length = sizeof(dword) * 9;
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- int i;
- dword *data = (dword *) a->xdi_mbox.data;
-
- for (i = 0; i < 8; i++) {
- *data++ = a->resources.pci.bar[i];
- }
- *data++ = (dword) a->resources.pci.irq;
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_RESET_ADAPTER:
- ret = diva_pri_reset_adapter(&a->xdi_adapter);
- break;
-
- case DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK:
- ret = diva_pri_write_sdram_block(&a->xdi_adapter,
- cmd->command_data.
- write_sdram.offset,
- (byte *)&cmd[1],
- cmd->command_data.
- write_sdram.length,
- pri_is_rev_2_card(a->
- CardOrdinal)
- ? MP2_MEMORY_SIZE :
- MP_MEMORY_SIZE);
- break;
-
- case DIVA_XDI_UM_CMD_STOP_ADAPTER:
- ret = diva_pri_stop_adapter(a);
- break;
-
- case DIVA_XDI_UM_CMD_START_ADAPTER:
- ret = diva_pri_start_adapter(&a->xdi_adapter,
- cmd->command_data.start.
- offset,
- cmd->command_data.start.
- features);
- break;
-
- case DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES:
- a->xdi_adapter.features =
- cmd->command_data.features.features;
- a->xdi_adapter.a.protocol_capabilities =
- a->xdi_adapter.features;
- DBG_TRC(("Set raw protocol features (%08x)",
- a->xdi_adapter.features))
- ret = 0;
- break;
-
- case DIVA_XDI_UM_CMD_GET_CARD_STATE:
- a->xdi_mbox.data_length = sizeof(dword);
- a->xdi_mbox.data =
- diva_os_malloc(0, a->xdi_mbox.data_length);
- if (a->xdi_mbox.data) {
- dword *data = (dword *) a->xdi_mbox.data;
- if (!a->xdi_adapter.ram ||
- !a->xdi_adapter.reset ||
- !a->xdi_adapter.cfg) {
- *data = 3;
- } else if (a->xdi_adapter.trapped) {
- *data = 2;
- } else if (a->xdi_adapter.Initialized) {
- *data = 1;
- } else {
- *data = 0;
- }
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- ret = 0;
- }
- break;
-
- case DIVA_XDI_UM_CMD_READ_XLOG_ENTRY:
- ret = diva_card_read_xlog(a);
- break;
-
- case DIVA_XDI_UM_CMD_READ_SDRAM:
- if (a->xdi_adapter.Address) {
- if (
- (a->xdi_mbox.data_length =
- cmd->command_data.read_sdram.length)) {
- if (
- (a->xdi_mbox.data_length +
- cmd->command_data.read_sdram.offset) <
- a->xdi_adapter.MemorySize) {
- a->xdi_mbox.data =
- diva_os_malloc(0,
- a->xdi_mbox.
- data_length);
- if (a->xdi_mbox.data) {
- byte __iomem *p = DIVA_OS_MEM_ATTACH_ADDRESS(&a->xdi_adapter);
- byte __iomem *src = p;
- byte *dst = a->xdi_mbox.data;
- dword len = a->xdi_mbox.data_length;
-
- src += cmd->command_data.read_sdram.offset;
-
- while (len--) {
- *dst++ = READ_BYTE(src++);
- }
- a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
- DIVA_OS_MEM_DETACH_ADDRESS(&a->xdi_adapter, p);
- ret = 0;
- }
- }
- }
- }
- break;
-
- default:
- DBG_ERR(("A: A(%d) invalid cmd=%d", a->controller,
- cmd->command))
- }
-
- return (ret);
-}
-
-/*
-** Get Serial Number
-*/
-static int pri_get_serial_number(diva_os_xdi_adapter_t *a)
-{
- byte data[64];
- int i;
- dword len = sizeof(data);
- volatile byte __iomem *config;
- volatile byte __iomem *flash;
- byte c;
-
-/*
- * First set some GT6401x config registers before accessing the BOOT-ROM
- */
- config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
- c = READ_BYTE(&config[0xc3c]);
- if (!(c & 0x08)) {
- WRITE_BYTE(&config[0xc3c], c); /* Base Address enable register */
- }
- WRITE_BYTE(&config[LOW_BOOTCS_DREG], 0x00);
- WRITE_BYTE(&config[HI_BOOTCS_DREG], 0xFF);
- DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-/*
- * Read only the last 64 bytes of manufacturing data
- */
- memset(data, '\0', len);
- flash = DIVA_OS_MEM_ATTACH_PROM(&a->xdi_adapter);
- for (i = 0; i < len; i++) {
- data[i] = READ_BYTE(&flash[0x8000 - len + i]);
- }
- DIVA_OS_MEM_DETACH_PROM(&a->xdi_adapter, flash);
-
- config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
- WRITE_BYTE(&config[LOW_BOOTCS_DREG], 0xFC); /* Disable FLASH EPROM access */
- WRITE_BYTE(&config[HI_BOOTCS_DREG], 0xFF);
- DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-
- if (memcmp(&data[48], "DIVAserverPR", 12)) {
-#if !defined(DIVA_PRI_NO_PCI_BIOS_WORKAROUND) /* { */
- word cmd = 0, cmd_org;
- void *addr;
- dword addr1, addr3, addr4;
- byte Bus, Slot;
- void *hdev;
- addr4 = a->resources.pci.bar[4];
- addr3 = a->resources.pci.bar[3]; /* flash */
- addr1 = a->resources.pci.bar[1]; /* unused */
-
- DBG_ERR(("A: apply Compaq BIOS workaround"))
- DBG_LOG(("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]))
-
- Bus = a->resources.pci.bus;
- Slot = a->resources.pci.func;
- hdev = a->resources.pci.hdev;
- PCIread(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
- PCIwrite(Bus, Slot, 0x04, &cmd, sizeof(cmd), hdev);
-
- PCIwrite(Bus, Slot, 0x14, &addr4, sizeof(addr4), hdev);
- PCIwrite(Bus, Slot, 0x20, &addr1, sizeof(addr1), hdev);
-
- PCIwrite(Bus, Slot, 0x04, &cmd_org, sizeof(cmd_org), hdev);
-
- addr = a->resources.pci.addr[1];
- a->resources.pci.addr[1] = a->resources.pci.addr[4];
- a->resources.pci.addr[4] = addr;
-
- addr1 = a->resources.pci.bar[1];
- a->resources.pci.bar[1] = a->resources.pci.bar[4];
- a->resources.pci.bar[4] = addr1;
-
- /*
- Try to read Flash again
- */
- len = sizeof(data);
-
- config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
- if (!(config[0xc3c] & 0x08)) {
- config[0xc3c] |= 0x08; /* Base Address enable register */
- }
- config[LOW_BOOTCS_DREG] = 0x00;
- config[HI_BOOTCS_DREG] = 0xFF;
- DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-
- memset(data, '\0', len);
- flash = DIVA_OS_MEM_ATTACH_PROM(&a->xdi_adapter);
- for (i = 0; i < len; i++) {
- data[i] = flash[0x8000 - len + i];
- }
- DIVA_OS_MEM_ATTACH_PROM(&a->xdi_adapter, flash);
- config = DIVA_OS_MEM_ATTACH_CONFIG(&a->xdi_adapter);
- config[LOW_BOOTCS_DREG] = 0xFC;
- config[HI_BOOTCS_DREG] = 0xFF;
- DIVA_OS_MEM_DETACH_CONFIG(&a->xdi_adapter, config);
-
- if (memcmp(&data[48], "DIVAserverPR", 12)) {
- DBG_ERR(("A: failed to read serial number"))
- DBG_LOG(("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]))
- return (-1);
- }
-#else /* } { */
- DBG_ERR(("A: failed to read DIVA signature word"))
- DBG_LOG(("%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
- data[0], data[1], data[2], data[3],
- data[4], data[5], data[6], data[7]))
- DBG_LOG(("%02x:%02x:%02x:%02x", data[47], data[46],
- data[45], data[44]))
-#endif /* } */
- }
-
- a->xdi_adapter.serialNo =
- (data[47] << 24) | (data[46] << 16) | (data[45] << 8) |
- data[44];
- if (!a->xdi_adapter.serialNo
- || (a->xdi_adapter.serialNo == 0xffffffff)) {
- a->xdi_adapter.serialNo = 0;
- DBG_ERR(("A: failed to read serial number"))
- return (-1);
- }
-
- DBG_LOG(("Serial No. : %ld", a->xdi_adapter.serialNo))
- DBG_TRC(("Board Revision : %d.%02d", (int) data[41],
- (int) data[40]))
- DBG_TRC(("PLD revision : %d.%02d", (int) data[33],
- (int) data[32]))
- DBG_TRC(("Boot loader version : %d.%02d", (int) data[37],
- (int) data[36]))
-
- DBG_TRC(("Manufacturing Date : %d/%02d/%02d (yyyy/mm/dd)",
- (int) ((data[28] > 90) ? 1900 : 2000) +
- (int) data[28], (int) data[29], (int) data[30]))
-
- return (0);
-}
-
-void diva_os_prepare_pri2_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-void diva_os_prepare_pri_functions(PISDN_ADAPTER IoAdapter)
-{
-}
-
-/*
-** Checks presence of DSP on board
-*/
-static int
-dsp_check_presence(volatile byte __iomem *addr, volatile byte __iomem *data, int dsp)
-{
- word pattern;
-
- WRITE_WORD(addr, 0x4000);
- WRITE_WORD(data, DSP_SIGNATURE_PROBE_WORD);
-
- WRITE_WORD(addr, 0x4000);
- pattern = READ_WORD(data);
-
- if (pattern != DSP_SIGNATURE_PROBE_WORD) {
- DBG_TRC(("W: DSP[%d] %04x(is) != %04x(should)",
- dsp, pattern, DSP_SIGNATURE_PROBE_WORD))
- return (-1);
- }
-
- WRITE_WORD(addr, 0x4000);
- WRITE_WORD(data, ~DSP_SIGNATURE_PROBE_WORD);
-
- WRITE_WORD(addr, 0x4000);
- pattern = READ_WORD(data);
-
- if (pattern != (word)~DSP_SIGNATURE_PROBE_WORD) {
- DBG_ERR(("A: DSP[%d] %04x(is) != %04x(should)",
- dsp, pattern, (word)~DSP_SIGNATURE_PROBE_WORD))
- return (-2);
- }
-
- DBG_TRC(("DSP[%d] present", dsp))
-
- return (0);
-}
-
-
-/*
-** Check if DSP's are present and operating
-** Information about detected DSP's is returned as bit mask
-** Bit 0 - DSP1
-** ...
-** ...
-** ...
-** Bit 29 - DSP30
-*/
-static dword diva_pri_detect_dsps(diva_os_xdi_adapter_t *a)
-{
- byte __iomem *base;
- byte __iomem *p;
- dword ret = 0;
- dword row_offset[7] = {
- 0x00000000,
- 0x00000800, /* 1 - ROW 1 */
- 0x00000840, /* 2 - ROW 2 */
- 0x00001000, /* 3 - ROW 3 */
- 0x00001040, /* 4 - ROW 4 */
- 0x00000000 /* 5 - ROW 0 */
- };
-
- byte __iomem *dsp_addr_port;
- byte __iomem *dsp_data_port;
- byte row_state;
- int dsp_row = 0, dsp_index, dsp_num;
-
- if (!a->xdi_adapter.Control || !a->xdi_adapter.reset) {
- return (0);
- }
-
- p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
- WRITE_BYTE(p, _MP_RISC_RESET | _MP_DSP_RESET);
- DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
- diva_os_wait(5);
-
- base = DIVA_OS_MEM_ATTACH_CONTROL(&a->xdi_adapter);
-
- for (dsp_num = 0; dsp_num < 30; dsp_num++) {
- dsp_row = dsp_num / 7 + 1;
- dsp_index = dsp_num % 7;
-
- dsp_data_port = base;
- dsp_addr_port = base;
-
- dsp_data_port += row_offset[dsp_row];
- dsp_addr_port += row_offset[dsp_row];
-
- dsp_data_port += (dsp_index * 8);
- dsp_addr_port += (dsp_index * 8) + 0x80;
-
- if (!dsp_check_presence
- (dsp_addr_port, dsp_data_port, dsp_num + 1)) {
- ret |= (1 << dsp_num);
- }
- }
- DIVA_OS_MEM_DETACH_CONTROL(&a->xdi_adapter, base);
-
- p = DIVA_OS_MEM_ATTACH_RESET(&a->xdi_adapter);
- WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2);
- DIVA_OS_MEM_DETACH_RESET(&a->xdi_adapter, p);
- diva_os_wait(5);
-
- /*
- Verify modules
- */
- for (dsp_row = 0; dsp_row < 4; dsp_row++) {
- row_state = ((ret >> (dsp_row * 7)) & 0x7F);
- if (row_state && (row_state != 0x7F)) {
- for (dsp_index = 0; dsp_index < 7; dsp_index++) {
- if (!(row_state & (1 << dsp_index))) {
- DBG_ERR(("A: MODULE[%d]-DSP[%d] failed",
- dsp_row + 1,
- dsp_index + 1))
- }
- }
- }
- }
-
- if (!(ret & 0x10000000)) {
- DBG_ERR(("A: ON BOARD-DSP[1] failed"))
- }
- if (!(ret & 0x20000000)) {
- DBG_ERR(("A: ON BOARD-DSP[2] failed"))
- }
-
- /*
- Print module population now
- */
- DBG_LOG(("+-----------------------+"))
- DBG_LOG(("| DSP MODULE POPULATION |"))
- DBG_LOG(("+-----------------------+"))
- DBG_LOG(("| 1 | 2 | 3 | 4 |"))
- DBG_LOG(("+-----------------------+"))
- DBG_LOG(("| %s | %s | %s | %s |",
- ((ret >> (0 * 7)) & 0x7F) ? "Y" : "N",
- ((ret >> (1 * 7)) & 0x7F) ? "Y" : "N",
- ((ret >> (2 * 7)) & 0x7F) ? "Y" : "N",
- ((ret >> (3 * 7)) & 0x7F) ? "Y" : "N"))
- DBG_LOG(("+-----------------------+"))
-
- DBG_LOG(("DSP's(present-absent):%08x-%08x", ret,
- ~ret & 0x3fffffff))
-
- return (ret);
-}
diff --git a/drivers/isdn/hardware/eicon/os_pri.h b/drivers/isdn/hardware/eicon/os_pri.h
deleted file mode 100644
index 0e91855b171a..000000000000
--- a/drivers/isdn/hardware/eicon/os_pri.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: os_pri.h,v 1.1.2.2 2001/02/08 12:25:44 armin Exp $ */
-
-#ifndef __DIVA_OS_PRI_REV_1_H__
-#define __DIVA_OS_PRI_REV_1_H__
-
-int diva_pri_init_card(diva_os_xdi_adapter_t *a);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/pc.h b/drivers/isdn/hardware/eicon/pc.h
deleted file mode 100644
index 329c0c26abfb..000000000000
--- a/drivers/isdn/hardware/eicon/pc.h
+++ /dev/null
@@ -1,738 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef PC_H_INCLUDED /* { */
-#define PC_H_INCLUDED
-/*------------------------------------------------------------------*/
-/* buffer definition */
-/*------------------------------------------------------------------*/
-typedef struct {
- word length; /* length of data/parameter field */
- byte P[270]; /* data/parameter field */
-} PBUFFER;
-/*------------------------------------------------------------------*/
-/* dual port ram structure */
-/*------------------------------------------------------------------*/
-struct dual
-{
- byte Req; /* request register */
- byte ReqId; /* request task/entity identification */
- byte Rc; /* return code register */
- byte RcId; /* return code task/entity identification */
- byte Ind; /* Indication register */
- byte IndId; /* Indication task/entity identification */
- byte IMask; /* Interrupt Mask Flag */
- byte RNR; /* Receiver Not Ready (set by PC) */
- byte XLock; /* XBuffer locked Flag */
- byte Int; /* ISDN-S interrupt */
- byte ReqCh; /* Channel field for layer-3 Requests */
- byte RcCh; /* Channel field for layer-3 Returncodes */
- byte IndCh; /* Channel field for layer-3 Indications */
- byte MInd; /* more data indication field */
- word MLength; /* more data total packet length */
- byte ReadyInt; /* request field for ready interrupt */
- byte SWReg; /* Software register for special purposes */
- byte Reserved[11]; /* reserved space */
- byte InterfaceType; /* interface type 1=16K interface */
- word Signature; /* ISDN-S adapter Signature (GD) */
- PBUFFER XBuffer; /* Transmit Buffer */
- PBUFFER RBuffer; /* Receive Buffer */
-};
-/*------------------------------------------------------------------*/
-/* SWReg Values (0 means no command) */
-/*------------------------------------------------------------------*/
-#define SWREG_DIE_WITH_LEDON 0x01
-#define SWREG_HALT_CPU 0x02 /* Push CPU into a while (1) loop */
-/*------------------------------------------------------------------*/
-/* Id Fields Coding */
-/*------------------------------------------------------------------*/
-#define ID_MASK 0xe0 /* Mask for the ID field */
-#define GL_ERR_ID 0x1f /* ID for error reporting on global requests*/
-#define DSIG_ID 0x00 /* ID for D-channel signaling */
-#define NL_ID 0x20 /* ID for network-layer access (B or D) */
-#define BLLC_ID 0x60 /* ID for B-channel link level access */
-#define TASK_ID 0x80 /* ID for dynamic user tasks */
-#define TIMER_ID 0xa0 /* ID for timer task */
-#define TEL_ID 0xc0 /* ID for telephone support */
-#define MAN_ID 0xe0 /* ID for management */
-/*------------------------------------------------------------------*/
-/* ASSIGN and REMOVE requests are the same for all entities */
-/*------------------------------------------------------------------*/
-#define ASSIGN 0x01
-#define UREMOVE 0xfe /* without return code */
-#define REMOVE 0xff
-/*------------------------------------------------------------------*/
-/* Timer Interrupt Task Interface */
-/*------------------------------------------------------------------*/
-#define ASSIGN_TIM 0x01
-#define REMOVE_TIM 0xff
-/*------------------------------------------------------------------*/
-/* dynamic user task interface */
-/*------------------------------------------------------------------*/
-#define ASSIGN_TSK 0x01
-#define REMOVE_TSK 0xff
-#define LOAD 0xf0
-#define RELOCATE 0xf1
-#define START 0xf2
-#define LOAD2 0xf3
-#define RELOCATE2 0xf4
-/*------------------------------------------------------------------*/
-/* dynamic user task messages */
-/*------------------------------------------------------------------*/
-#define TSK_B2 0x0000
-#define TSK_WAKEUP 0x2000
-#define TSK_TIMER 0x4000
-#define TSK_TSK 0x6000
-#define TSK_PC 0xe000
-/*------------------------------------------------------------------*/
-/* LL management primitives */
-/*------------------------------------------------------------------*/
-#define ASSIGN_LL 1 /* assign logical link */
-#define REMOVE_LL 0xff /* remove logical link */
-/*------------------------------------------------------------------*/
-/* LL service primitives */
-/*------------------------------------------------------------------*/
-#define LL_UDATA 1 /* link unit data request/indication */
-#define LL_ESTABLISH 2 /* link establish request/indication */
-#define LL_RELEASE 3 /* link release request/indication */
-#define LL_DATA 4 /* data request/indication */
-#define LL_LOCAL 5 /* switch to local operation (COM only) */
-#define LL_DATA_PEND 5 /* data pending indication (SDLC SHM only) */
-#define LL_REMOTE 6 /* switch to remote operation (COM only) */
-#define LL_TEST 8 /* link test request */
-#define LL_MDATA 9 /* more data request/indication */
-#define LL_BUDATA 10 /* broadcast unit data request/indication */
-#define LL_XID 12 /* XID command request/indication */
-#define LL_XID_R 13 /* XID response request/indication */
-/*------------------------------------------------------------------*/
-/* NL service primitives */
-/*------------------------------------------------------------------*/
-#define N_MDATA 1 /* more data to come REQ/IND */
-#define N_CONNECT 2 /* OSI N-CONNECT REQ/IND */
-#define N_CONNECT_ACK 3 /* OSI N-CONNECT CON/RES */
-#define N_DISC 4 /* OSI N-DISC REQ/IND */
-#define N_DISC_ACK 5 /* OSI N-DISC CON/RES */
-#define N_RESET 6 /* OSI N-RESET REQ/IND */
-#define N_RESET_ACK 7 /* OSI N-RESET CON/RES */
-#define N_DATA 8 /* OSI N-DATA REQ/IND */
-#define N_EDATA 9 /* OSI N-EXPEDITED DATA REQ/IND */
-#define N_UDATA 10 /* OSI D-UNIT-DATA REQ/IND */
-#define N_BDATA 11 /* BROADCAST-DATA REQ/IND */
-#define N_DATA_ACK 12 /* data ack ind for D-bit procedure */
-#define N_EDATA_ACK 13 /* data ack ind for INTERRUPT */
-#define N_XON 15 /* clear RNR state */
-#define N_COMBI_IND N_XON /* combined indication */
-#define N_Q_BIT 0x10 /* Q-bit for req/ind */
-#define N_M_BIT 0x20 /* M-bit for req/ind */
-#define N_D_BIT 0x40 /* D-bit for req/ind */
-/*------------------------------------------------------------------*/
-/* Signaling management primitives */
-/*------------------------------------------------------------------*/
-#define ASSIGN_SIG 1 /* assign signaling task */
-#define UREMOVE_SIG 0xfe /* remove signaling task without return code*/
-#define REMOVE_SIG 0xff /* remove signaling task */
-/*------------------------------------------------------------------*/
-/* Signaling service primitives */
-/*------------------------------------------------------------------*/
-#define CALL_REQ 1 /* call request */
-#define CALL_CON 1 /* call confirmation */
-#define CALL_IND 2 /* incoming call connected */
-#define LISTEN_REQ 2 /* listen request */
-#define HANGUP 3 /* hangup request/indication */
-#define SUSPEND 4 /* call suspend request/confirm */
-#define RESUME 5 /* call resume request/confirm */
-#define SUSPEND_REJ 6 /* suspend rejected indication */
-#define USER_DATA 8 /* user data for user to user signaling */
-#define CONGESTION 9 /* network congestion indication */
-#define INDICATE_REQ 10 /* request to indicate an incoming call */
-#define INDICATE_IND 10 /* indicates that there is an incoming call */
-#define CALL_RES 11 /* accept an incoming call */
-#define CALL_ALERT 12 /* send ALERT for incoming call */
-#define INFO_REQ 13 /* INFO request */
-#define INFO_IND 13 /* INFO indication */
-#define REJECT 14 /* reject an incoming call */
-#define RESOURCES 15 /* reserve B-Channel hardware resources */
-#define HW_CTRL 16 /* B-Channel hardware IOCTL req/ind */
-#define TEL_CTRL 16 /* Telephone control request/indication */
-#define STATUS_REQ 17 /* Request D-State (returned in INFO_IND) */
-#define FAC_REG_REQ 18 /* 1TR6 connection independent fac reg */
-#define FAC_REG_ACK 19 /* 1TR6 fac registration acknowledge */
-#define FAC_REG_REJ 20 /* 1TR6 fac registration reject */
-#define CALL_COMPLETE 21/* send a CALL_PROC for incoming call */
-#define SW_CTRL 22 /* extended software features */
-#define REGISTER_REQ 23 /* Q.931 connection independent reg req */
-#define REGISTER_IND 24 /* Q.931 connection independent reg ind */
-#define FACILITY_REQ 25 /* Q.931 connection independent fac req */
-#define FACILITY_IND 26 /* Q.931 connection independent fac ind */
-#define NCR_INFO_REQ 27 /* INFO_REQ with NULL CR */
-#define GCR_MIM_REQ 28 /* MANAGEMENT_INFO_REQ with global CR */
-#define SIG_CTRL 29 /* Control for Signalling Hardware */
-#define DSP_CTRL 30 /* Control for DSPs */
-#define LAW_REQ 31 /* Law config request for (returns info_i) */
-#define SPID_CTRL 32 /* Request/indication SPID related */
-#define NCR_FACILITY 33 /* Request/indication with NULL/DUMMY CR */
-#define CALL_HOLD 34 /* Request/indication to hold a CALL */
-#define CALL_RETRIEVE 35 /* Request/indication to retrieve a CALL */
-#define CALL_HOLD_ACK 36 /* OK of hold a CALL */
-#define CALL_RETRIEVE_ACK 37 /* OK of retrieve a CALL */
-#define CALL_HOLD_REJ 38 /* Reject of hold a CALL */
-#define CALL_RETRIEVE_REJ 39 /* Reject of retrieve a call */
-#define GCR_RESTART 40 /* Send/Receive Restart message */
-#define S_SERVICE 41 /* Send/Receive Supplementary Service */
-#define S_SERVICE_REJ 42 /* Reject Supplementary Service indication */
-#define S_SUPPORTED 43 /* Req/Ind to get Supported Services */
-#define STATUS_ENQ 44 /* Req to send the D-ch request if !state0 */
-#define CALL_GUARD 45 /* Req/Ind to use the FLAGS_CALL_OUTCHECK */
-#define CALL_GUARD_HP 46 /* Call Guard function to reject a call */
-#define CALL_GUARD_IF 47 /* Call Guard function, inform the appl */
-#define SSEXT_REQ 48 /* Supplem.Serv./QSIG specific request */
-#define SSEXT_IND 49 /* Supplem.Serv./QSIG specific indication */
-/* reserved commands for the US protocols */
-#define INT_3PTY_NIND 50 /* US specific indication */
-#define INT_CF_NIND 51 /* US specific indication */
-#define INT_3PTY_DROP 52 /* US specific indication */
-#define INT_MOVE_CONF 53 /* US specific indication */
-#define INT_MOVE_RC 54 /* US specific indication */
-#define INT_MOVE_FLIPPED_CONF 55 /* US specific indication */
-#define INT_X5NI_OK 56 /* internal transfer OK indication */
-#define INT_XDMS_START 57 /* internal transfer OK indication */
-#define INT_XDMS_STOP 58 /* internal transfer finish indication */
-#define INT_XDMS_STOP2 59 /* internal transfer send FA */
-#define INT_CUSTCONF_REJ 60 /* internal conference reject */
-#define INT_CUSTXFER 61 /* internal transfer request */
-#define INT_CUSTX_NIND 62 /* internal transfer ack */
-#define INT_CUSTXREJ_NIND 63 /* internal transfer rej */
-#define INT_X5NI_CF_XFER 64 /* internal transfer OK indication */
-#define VSWITCH_REQ 65 /* communication between protocol and */
-#define VSWITCH_IND 66 /* capifunctions for D-CH-switching */
-#define MWI_POLL 67 /* Message Waiting Status Request fkt */
-#define CALL_PEND_NOTIFY 68 /* notify capi to set new listen */
-#define DO_NOTHING 69 /* dont do somethin if you get this */
-#define INT_CT_REJ 70 /* ECT rejected internal command */
-#define CALL_HOLD_COMPLETE 71 /* In NT Mode indicate hold complete */
-#define CALL_RETRIEVE_COMPLETE 72 /* In NT Mode indicate retrieve complete */
-/*------------------------------------------------------------------*/
-/* management service primitives */
-/*------------------------------------------------------------------*/
-#define MAN_READ 2
-#define MAN_WRITE 3
-#define MAN_EXECUTE 4
-#define MAN_EVENT_ON 5
-#define MAN_EVENT_OFF 6
-#define MAN_LOCK 7
-#define MAN_UNLOCK 8
-#define MAN_INFO_IND 2
-#define MAN_EVENT_IND 3
-#define MAN_TRACE_IND 4
-#define MAN_COMBI_IND 9
-#define MAN_ESC 0x80
-/*------------------------------------------------------------------*/
-/* return code coding */
-/*------------------------------------------------------------------*/
-#define UNKNOWN_COMMAND 0x01 /* unknown command */
-#define WRONG_COMMAND 0x02 /* wrong command */
-#define WRONG_ID 0x03 /* unknown task/entity id */
-#define WRONG_CH 0x04 /* wrong task/entity id */
-#define UNKNOWN_IE 0x05 /* unknown information el. */
-#define WRONG_IE 0x06 /* wrong information el. */
-#define OUT_OF_RESOURCES 0x07 /* ISDN-S card out of res. */
-#define ISDN_GUARD_REJ 0x09 /* ISDN-Guard SuppServ rej */
-#define N_FLOW_CONTROL 0x10 /* Flow-Control, retry */
-#define ASSIGN_RC 0xe0 /* ASSIGN acknowledgement */
-#define ASSIGN_OK 0xef /* ASSIGN OK */
-#define OK_FC 0xfc /* Flow-Control RC */
-#define READY_INT 0xfd /* Ready interrupt */
-#define TIMER_INT 0xfe /* timer interrupt */
-#define OK 0xff /* command accepted */
-/*------------------------------------------------------------------*/
-/* information elements */
-/*------------------------------------------------------------------*/
-#define SHIFT 0x90 /* codeset shift */
-#define MORE 0xa0 /* more data */
-#define SDNCMPL 0xa1 /* sending complete */
-#define CL 0xb0 /* congestion level */
-/* codeset 0 */
-#define SMSG 0x00 /* segmented message */
-#define BC 0x04 /* Bearer Capability */
-#define CAU 0x08 /* cause */
-#define CAD 0x0c /* Connected address */
-#define CAI 0x10 /* call identity */
-#define CHI 0x18 /* channel identification */
-#define LLI 0x19 /* logical link id */
-#define CHA 0x1a /* charge advice */
-#define FTY 0x1c /* Facility */
-#define DT 0x29 /* ETSI date/time */
-#define KEY 0x2c /* keypad information element */
-#define UID 0x2d /* User id information element */
-#define DSP 0x28 /* display */
-#define SIG 0x34 /* signalling hardware control */
-#define OAD 0x6c /* origination address */
-#define OSA 0x6d /* origination sub-address */
-#define CPN 0x70 /* called party number */
-#define DSA 0x71 /* destination sub-address */
-#define RDX 0x73 /* redirecting number extended */
-#define RDN 0x74 /* redirecting number */
-#define RIN 0x76 /* redirection number */
-#define IUP 0x76 /* VN6 rerouter->PCS (codeset 6) */
-#define IPU 0x77 /* VN6 PCS->rerouter (codeset 6) */
-#define RI 0x79 /* restart indicator */
-#define MIE 0x7a /* management info element */
-#define LLC 0x7c /* low layer compatibility */
-#define HLC 0x7d /* high layer compatibility */
-#define UUI 0x7e /* user user information */
-#define ESC 0x7f /* escape extension */
-#define DLC 0x20 /* data link layer configuration */
-#define NLC 0x21 /* network layer configuration */
-#define REDIRECT_IE 0x22 /* redirection request/indication data */
-#define REDIRECT_NET_IE 0x23 /* redirection network override data */
-/* codeset 6 */
-#define SIN 0x01 /* service indicator */
-#define CIF 0x02 /* charging information */
-#define DATE 0x03 /* date */
-#define CPS 0x07 /* called party status */
-/*------------------------------------------------------------------*/
-/* ESC information elements */
-/*------------------------------------------------------------------*/
-#define MSGTYPEIE 0x7a /* Messagetype info element */
-#define CRIE 0x7b /* INFO info element */
-#define CODESET6IE 0xec /* Tunnel for Codeset 6 IEs */
-#define VSWITCHIE 0xed /* VSwitch info element */
-#define SSEXTIE 0xee /* Supplem. Service info element */
-#define PROFILEIE 0xef /* Profile info element */
-/*------------------------------------------------------------------*/
-/* TEL_CTRL contents */
-/*------------------------------------------------------------------*/
-#define RING_ON 0x01
-#define RING_OFF 0x02
-#define HANDS_FREE_ON 0x03
-#define HANDS_FREE_OFF 0x04
-#define ON_HOOK 0x80
-#define OFF_HOOK 0x90
-/* operation values used by ETSI supplementary services */
-#define THREE_PTY_BEGIN 0x04
-#define THREE_PTY_END 0x05
-#define ECT_EXECUTE 0x06
-#define ACTIVATION_DIVERSION 0x07
-#define DEACTIVATION_DIVERSION 0x08
-#define CALL_DEFLECTION 0x0D
-#define INTERROGATION_DIVERSION 0x0B
-#define INTERROGATION_SERV_USR_NR 0x11
-#define ACTIVATION_MWI 0x20
-#define DEACTIVATION_MWI 0x21
-#define MWI_INDICATION 0x22
-#define MWI_RESPONSE 0x23
-#define CONF_BEGIN 0x28
-#define CONF_ADD 0x29
-#define CONF_SPLIT 0x2a
-#define CONF_DROP 0x2b
-#define CONF_ISOLATE 0x2c
-#define CONF_REATTACH 0x2d
-#define CONF_PARTYDISC 0x2e
-#define CCBS_INFO_RETAIN 0x2f
-#define CCBS_ERASECALLLINKAGEID 0x30
-#define CCBS_STOP_ALERTING 0x31
-#define CCBS_REQUEST 0x32
-#define CCBS_DEACTIVATE 0x33
-#define CCBS_INTERROGATE 0x34
-#define CCBS_STATUS 0x35
-#define CCBS_ERASE 0x36
-#define CCBS_B_FREE 0x37
-#define CCNR_INFO_RETAIN 0x38
-#define CCBS_REMOTE_USER_FREE 0x39
-#define CCNR_REQUEST 0x3a
-#define CCNR_INTERROGATE 0x3b
-#define GET_SUPPORTED_SERVICES 0xff
-#define DIVERSION_PROCEDURE_CFU 0x70
-#define DIVERSION_PROCEDURE_CFB 0x71
-#define DIVERSION_PROCEDURE_CFNR 0x72
-#define DIVERSION_DEACTIVATION_CFU 0x80
-#define DIVERSION_DEACTIVATION_CFB 0x81
-#define DIVERSION_DEACTIVATION_CFNR 0x82
-#define DIVERSION_INTERROGATE_NUM 0x11
-#define DIVERSION_INTERROGATE_CFU 0x60
-#define DIVERSION_INTERROGATE_CFB 0x61
-#define DIVERSION_INTERROGATE_CFNR 0x62
-/* Service Masks */
-#define SMASK_HOLD_RETRIEVE 0x00000001
-#define SMASK_TERMINAL_PORTABILITY 0x00000002
-#define SMASK_ECT 0x00000004
-#define SMASK_3PTY 0x00000008
-#define SMASK_CALL_FORWARDING 0x00000010
-#define SMASK_CALL_DEFLECTION 0x00000020
-#define SMASK_MCID 0x00000040
-#define SMASK_CCBS 0x00000080
-#define SMASK_MWI 0x00000100
-#define SMASK_CCNR 0x00000200
-#define SMASK_CONF 0x00000400
-/* ----------------------------------------------
- Types of transfers used to transfer the
- information in the 'struct RC->Reserved2[8]'
- The information is transferred as 2 dwords
- (2 4Byte unsigned values)
- First of them is the transfer type.
- 2^32-1 possible messages are possible in this way.
- The context of the second one had no meaning
- ---------------------------------------------- */
-#define DIVA_RC_TYPE_NONE 0x00000000
-#define DIVA_RC_TYPE_REMOVE_COMPLETE 0x00000008
-#define DIVA_RC_TYPE_STREAM_PTR 0x00000009
-#define DIVA_RC_TYPE_CMA_PTR 0x0000000a
-#define DIVA_RC_TYPE_OK_FC 0x0000000b
-#define DIVA_RC_TYPE_RX_DMA 0x0000000c
-/* ------------------------------------------------------
- IO Control codes for IN BAND SIGNALING
- ------------------------------------------------------ */
-#define CTRL_L1_SET_SIG_ID 5
-#define CTRL_L1_SET_DAD 6
-#define CTRL_L1_RESOURCES 7
-/* ------------------------------------------------------ */
-/* ------------------------------------------------------
- Layer 2 types
- ------------------------------------------------------ */
-#define X75T 1 /* x.75 for ttx */
-#define TRF 2 /* transparent with hdlc framing */
-#define TRF_IN 3 /* transparent with hdlc fr. inc. */
-#define SDLC 4 /* sdlc, sna layer-2 */
-#define X75 5 /* x.75 for btx */
-#define LAPD 6 /* lapd (Q.921) */
-#define X25_L2 7 /* x.25 layer-2 */
-#define V120_L2 8 /* V.120 layer-2 protocol */
-#define V42_IN 9 /* V.42 layer-2 protocol, incoming */
-#define V42 10 /* V.42 layer-2 protocol */
-#define MDM_ATP 11 /* AT Parser built in the L2 */
-#define X75_V42BIS 12 /* x.75 with V.42bis */
-#define RTPL2_IN 13 /* RTP layer-2 protocol, incoming */
-#define RTPL2 14 /* RTP layer-2 protocol */
-#define V120_V42BIS 15 /* V.120 asynchronous mode supporting V.42bis compression */
-#define LISTENER 27 /* Layer 2 to listen line */
-#define MTP2 28 /* MTP2 Layer 2 */
-#define PIAFS_CRC 29 /* PIAFS Layer 2 with CRC calculation at L2 */
-/* ------------------------------------------------------
- PIAFS DLC DEFINITIONS
- ------------------------------------------------------ */
-#define PIAFS_64K 0x01
-#define PIAFS_VARIABLE_SPEED 0x02
-#define PIAFS_CHINESE_SPEED 0x04
-#define PIAFS_UDATA_ABILITY_ID 0x80
-#define PIAFS_UDATA_ABILITY_DCDON 0x01
-#define PIAFS_UDATA_ABILITY_DDI 0x80
-/*
- DLC of PIAFS :
- Byte | 8 7 6 5 4 3 2 1
- -----+--------------------------------------------------------
- 0 | 0 0 1 0 0 0 0 0 Data Link Configuration
- 1 | X X X X X X X X Length of IE (at least 15 Bytes)
- 2 | 0 0 0 0 0 0 0 0 max. information field, LOW byte (not used, fix 73 Bytes)
- 3 | 0 0 0 0 0 0 0 0 max. information field, HIGH byte (not used, fix 73 Bytes)
- 4 | 0 0 0 0 0 0 0 0 address A (not used)
- 5 | 0 0 0 0 0 0 0 0 address B (not used)
- 6 | 0 0 0 0 0 0 0 0 Mode (not used, fix 128)
- 7 | 0 0 0 0 0 0 0 0 Window Size (not used, fix 127)
- 8 | X X X X X X X X XID Length, Low Byte (at least 7 Bytes)
- 9 | X X X X X X X X XID Length, High Byte
- 10 | 0 0 0 0 0 C V S PIAFS Protocol Speed configuration -> Note(1)
- | S = 0 -> Protocol Speed is 32K
- | S = 1 -> Protocol Speed is 64K
- | V = 0 -> Protocol Speed is fixed
- | V = 1 -> Protocol Speed is variable
- | C = 0 -> speed setting according to standard
- | C = 1 -> speed setting for chinese implementation
- 11 | 0 0 0 0 0 0 R T P0 - V42bis Compression enable/disable, Low Byte
- | T = 0 -> Transmit Direction enable
- | T = 1 -> Transmit Direction disable
- | R = 0 -> Receive Direction enable
- | R = 1 -> Receive Direction disable
- 13 | 0 0 0 0 0 0 0 0 P0 - V42bis Compression enable/disable, High Byte
- 14 | X X X X X X X X P1 - V42bis Dictionary Size, Low Byte
- 15 | X X X X X X X X P1 - V42bis Dictionary Size, High Byte
- 16 | X X X X X X X X P2 - V42bis String Length, Low Byte
- 17 | X X X X X X X X P2 - V42bis String Length, High Byte
- 18 | X X X X X X X X PIAFS extension length
- 19 | 1 0 0 0 0 0 0 0 PIAFS extension Id (0x80) - UDATA abilities
- 20 | U 0 0 0 0 0 0 D UDATA abilities -> Note (2)
- | up to now the following Bits are defined:
- | D - signal DCD ON
- | U - use extensive UDATA control communication
- | for DDI test application
- + Note (1): ----------+------+-----------------------------------------+
- | PIAFS Protocol | Bit | |
- | Speed configuration | S | Bit 1 - Protocol Speed |
- | | | 0 - 32K |
- | | | 1 - 64K (default) |
- | | V | Bit 2 - Variable Protocol Speed |
- | | | 0 - Speed is fix |
- | | | 1 - Speed is variable (default) |
- | | | OVERWRITES 32k Bit 1 |
- | | C | Bit 3 0 - Speed Settings according to |
- | | | PIAFS specification |
- | | | 1 - Speed setting for chinese |
- | | | PIAFS implementation |
- | | | Explanation for chinese speed settings: |
- | | | if Bit 3 is set the following |
- | | | rules apply: |
- | | | Bit1=0 Bit2=0: 32k fix |
- | | | Bit1=1 Bit2=0: 64k fix |
- | | | Bit1=0 Bit2=1: PIAFS is trying |
- | | | to negotiate 32k is that is |
- | | | not possible it tries to |
- | | | negotiate 64k |
- | | | Bit1=1 Bit2=1: PIAFS is trying |
- | | | to negotiate 64k is that is |
- | | | not possible it tries to |
- | | | negotiate 32k |
- + Note (2): ----------+------+-----------------------------------------+
- | PIAFS | Bit | this byte defines the usage of UDATA |
- | Implementation | | control communication |
- | UDATA usage | D | Bit 1 - DCD-ON signalling |
- | | | 0 - no DCD-ON is signalled |
- | | | (default) |
- | | | 1 - DCD-ON will be signalled |
- | | U | Bit 8 - DDI test application UDATA |
- | | | control communication |
- | | | 0 - no UDATA control |
- | | | communication (default) |
- | | | sets as well the DCD-ON |
- | | | signalling |
- | | | 1 - UDATA control communication |
- | | | ATTENTION: Do not use these |
- | | | setting if you |
- | | | are not really |
- | | | that you need it |
- | | | and you know |
- | | | exactly what you |
- | | | are doing. |
- | | | You can easily |
- | | | disable any |
- | | | data transfer. |
- +---------------------+------+-----------------------------------------+
-*/
-/* ------------------------------------------------------
- LISTENER DLC DEFINITIONS
- ------------------------------------------------------ */
-#define LISTENER_FEATURE_MASK_CUMMULATIVE 0x0001
-/* ------------------------------------------------------
- LISTENER META-FRAME CODE/PRIMITIVE DEFINITIONS
- ------------------------------------------------------ */
-#define META_CODE_LL_UDATA_RX 0x01
-#define META_CODE_LL_UDATA_TX 0x02
-#define META_CODE_LL_DATA_RX 0x03
-#define META_CODE_LL_DATA_TX 0x04
-#define META_CODE_LL_MDATA_RX 0x05
-#define META_CODE_LL_MDATA_TX 0x06
-#define META_CODE_EMPTY 0x10
-#define META_CODE_LOST_FRAMES 0x11
-#define META_FLAG_TRUNCATED 0x0001
-/*------------------------------------------------------------------*/
-/* CAPI-like profile to indicate features on LAW_REQ */
-/*------------------------------------------------------------------*/
-#define GL_INTERNAL_CONTROLLER_SUPPORTED 0x00000001L
-#define GL_EXTERNAL_EQUIPMENT_SUPPORTED 0x00000002L
-#define GL_HANDSET_SUPPORTED 0x00000004L
-#define GL_DTMF_SUPPORTED 0x00000008L
-#define GL_SUPPLEMENTARY_SERVICES_SUPPORTED 0x00000010L
-#define GL_CHANNEL_ALLOCATION_SUPPORTED 0x00000020L
-#define GL_BCHANNEL_OPERATION_SUPPORTED 0x00000040L
-#define GL_LINE_INTERCONNECT_SUPPORTED 0x00000080L
-#define B1_HDLC_SUPPORTED 0x00000001L
-#define B1_TRANSPARENT_SUPPORTED 0x00000002L
-#define B1_V110_ASYNC_SUPPORTED 0x00000004L
-#define B1_V110_SYNC_SUPPORTED 0x00000008L
-#define B1_T30_SUPPORTED 0x00000010L
-#define B1_HDLC_INVERTED_SUPPORTED 0x00000020L
-#define B1_TRANSPARENT_R_SUPPORTED 0x00000040L
-#define B1_MODEM_ALL_NEGOTIATE_SUPPORTED 0x00000080L
-#define B1_MODEM_ASYNC_SUPPORTED 0x00000100L
-#define B1_MODEM_SYNC_HDLC_SUPPORTED 0x00000200L
-#define B2_X75_SUPPORTED 0x00000001L
-#define B2_TRANSPARENT_SUPPORTED 0x00000002L
-#define B2_SDLC_SUPPORTED 0x00000004L
-#define B2_LAPD_SUPPORTED 0x00000008L
-#define B2_T30_SUPPORTED 0x00000010L
-#define B2_PPP_SUPPORTED 0x00000020L
-#define B2_TRANSPARENT_NO_CRC_SUPPORTED 0x00000040L
-#define B2_MODEM_EC_COMPRESSION_SUPPORTED 0x00000080L
-#define B2_X75_V42BIS_SUPPORTED 0x00000100L
-#define B2_V120_ASYNC_SUPPORTED 0x00000200L
-#define B2_V120_ASYNC_V42BIS_SUPPORTED 0x00000400L
-#define B2_V120_BIT_TRANSPARENT_SUPPORTED 0x00000800L
-#define B2_LAPD_FREE_SAPI_SEL_SUPPORTED 0x00001000L
-#define B3_TRANSPARENT_SUPPORTED 0x00000001L
-#define B3_T90NL_SUPPORTED 0x00000002L
-#define B3_ISO8208_SUPPORTED 0x00000004L
-#define B3_X25_DCE_SUPPORTED 0x00000008L
-#define B3_T30_SUPPORTED 0x00000010L
-#define B3_T30_WITH_EXTENSIONS_SUPPORTED 0x00000020L
-#define B3_RESERVED_SUPPORTED 0x00000040L
-#define B3_MODEM_SUPPORTED 0x00000080L
-#define MANUFACTURER_FEATURE_SLAVE_CODEC 0x00000001L
-#define MANUFACTURER_FEATURE_FAX_MORE_DOCUMENTS 0x00000002L
-#define MANUFACTURER_FEATURE_HARDDTMF 0x00000004L
-#define MANUFACTURER_FEATURE_SOFTDTMF_SEND 0x00000008L
-#define MANUFACTURER_FEATURE_DTMF_PARAMETERS 0x00000010L
-#define MANUFACTURER_FEATURE_SOFTDTMF_RECEIVE 0x00000020L
-#define MANUFACTURER_FEATURE_FAX_SUB_SEP_PWD 0x00000040L
-#define MANUFACTURER_FEATURE_V18 0x00000080L
-#define MANUFACTURER_FEATURE_MIXER_CH_CH 0x00000100L
-#define MANUFACTURER_FEATURE_MIXER_CH_PC 0x00000200L
-#define MANUFACTURER_FEATURE_MIXER_PC_CH 0x00000400L
-#define MANUFACTURER_FEATURE_MIXER_PC_PC 0x00000800L
-#define MANUFACTURER_FEATURE_ECHO_CANCELLER 0x00001000L
-#define MANUFACTURER_FEATURE_RTP 0x00002000L
-#define MANUFACTURER_FEATURE_T38 0x00004000L
-#define MANUFACTURER_FEATURE_TRANSP_DELIVERY_CONF 0x00008000L
-#define MANUFACTURER_FEATURE_XONOFF_FLOW_CONTROL 0x00010000L
-#define MANUFACTURER_FEATURE_OOB_CHANNEL 0x00020000L
-#define MANUFACTURER_FEATURE_IN_BAND_CHANNEL 0x00040000L
-#define MANUFACTURER_FEATURE_IN_BAND_FEATURE 0x00080000L
-#define MANUFACTURER_FEATURE_PIAFS 0x00100000L
-#define MANUFACTURER_FEATURE_DTMF_TONE 0x00200000L
-#define MANUFACTURER_FEATURE_FAX_PAPER_FORMATS 0x00400000L
-#define MANUFACTURER_FEATURE_OK_FC_LABEL 0x00800000L
-#define MANUFACTURER_FEATURE_VOWN 0x01000000L
-#define MANUFACTURER_FEATURE_XCONNECT 0x02000000L
-#define MANUFACTURER_FEATURE_DMACONNECT 0x04000000L
-#define MANUFACTURER_FEATURE_AUDIO_TAP 0x08000000L
-#define MANUFACTURER_FEATURE_FAX_NONSTANDARD 0x10000000L
-#define MANUFACTURER_FEATURE_SS7 0x20000000L
-#define MANUFACTURER_FEATURE_MADAPTER 0x40000000L
-#define MANUFACTURER_FEATURE_MEASURE 0x80000000L
-#define MANUFACTURER_FEATURE2_LISTENING 0x00000001L
-#define MANUFACTURER_FEATURE2_SS_DIFFCONTPOSSIBLE 0x00000002L
-#define MANUFACTURER_FEATURE2_GENERIC_TONE 0x00000004L
-#define MANUFACTURER_FEATURE2_COLOR_FAX 0x00000008L
-#define MANUFACTURER_FEATURE2_SS_ECT_DIFFCONTPOSSIBLE 0x00000010L
-#define RTP_PRIM_PAYLOAD_PCMU_8000 0
-#define RTP_PRIM_PAYLOAD_1016_8000 1
-#define RTP_PRIM_PAYLOAD_G726_32_8000 2
-#define RTP_PRIM_PAYLOAD_GSM_8000 3
-#define RTP_PRIM_PAYLOAD_G723_8000 4
-#define RTP_PRIM_PAYLOAD_DVI4_8000 5
-#define RTP_PRIM_PAYLOAD_DVI4_16000 6
-#define RTP_PRIM_PAYLOAD_LPC_8000 7
-#define RTP_PRIM_PAYLOAD_PCMA_8000 8
-#define RTP_PRIM_PAYLOAD_G722_16000 9
-#define RTP_PRIM_PAYLOAD_QCELP_8000 12
-#define RTP_PRIM_PAYLOAD_G728_8000 14
-#define RTP_PRIM_PAYLOAD_G729_8000 18
-#define RTP_PRIM_PAYLOAD_GSM_HR_8000 30
-#define RTP_PRIM_PAYLOAD_GSM_EFR_8000 31
-#define RTP_ADD_PAYLOAD_BASE 32
-#define RTP_ADD_PAYLOAD_RED 32
-#define RTP_ADD_PAYLOAD_CN_8000 33
-#define RTP_ADD_PAYLOAD_DTMF 34
-#define RTP_PRIM_PAYLOAD_PCMU_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_PCMU_8000)
-#define RTP_PRIM_PAYLOAD_1016_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_1016_8000)
-#define RTP_PRIM_PAYLOAD_G726_32_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_G726_32_8000)
-#define RTP_PRIM_PAYLOAD_GSM_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_GSM_8000)
-#define RTP_PRIM_PAYLOAD_G723_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_G723_8000)
-#define RTP_PRIM_PAYLOAD_DVI4_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_DVI4_8000)
-#define RTP_PRIM_PAYLOAD_DVI4_16000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_DVI4_16000)
-#define RTP_PRIM_PAYLOAD_LPC_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_LPC_8000)
-#define RTP_PRIM_PAYLOAD_PCMA_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_PCMA_8000)
-#define RTP_PRIM_PAYLOAD_G722_16000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_G722_16000)
-#define RTP_PRIM_PAYLOAD_QCELP_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_QCELP_8000)
-#define RTP_PRIM_PAYLOAD_G728_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_G728_8000)
-#define RTP_PRIM_PAYLOAD_G729_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_G729_8000)
-#define RTP_PRIM_PAYLOAD_GSM_HR_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_GSM_HR_8000)
-#define RTP_PRIM_PAYLOAD_GSM_EFR_8000_SUPPORTED (1L << RTP_PRIM_PAYLOAD_GSM_EFR_8000)
-#define RTP_ADD_PAYLOAD_RED_SUPPORTED (1L << (RTP_ADD_PAYLOAD_RED - RTP_ADD_PAYLOAD_BASE))
-#define RTP_ADD_PAYLOAD_CN_8000_SUPPORTED (1L << (RTP_ADD_PAYLOAD_CN_8000 - RTP_ADD_PAYLOAD_BASE))
-#define RTP_ADD_PAYLOAD_DTMF_SUPPORTED (1L << (RTP_ADD_PAYLOAD_DTMF - RTP_ADD_PAYLOAD_BASE))
-/* virtual switching definitions */
-#define VSJOIN 1
-#define VSTRANSPORT 2
-#define VSGETPARAMS 3
-#define VSCAD 1
-#define VSRXCPNAME 2
-#define VSCALLSTAT 3
-#define VSINVOKEID 4
-#define VSCLMRKS 5
-#define VSTBCTIDENT 6
-#define VSETSILINKID 7
-#define VSSAMECONTROLLER 8
-/* Errorcodes for VSETSILINKID begin */
-#define VSETSILINKIDRRWC 1
-#define VSETSILINKIDREJECT 2
-#define VSETSILINKIDTIMEOUT 3
-#define VSETSILINKIDFAILCOUNT 4
-#define VSETSILINKIDERROR 5
-/* Errorcodes for VSETSILINKID end */
-/* -----------------------------------------------------------**
-** The PROTOCOL_FEATURE_STRING in feature.h (included **
-** in prstart.sx and astart.sx) defines capabilities and **
-** features of the actual protocol code. It's used as a bit **
-** mask. **
-** The following Bits are defined: **
-** -----------------------------------------------------------*/
-#define PROTCAP_TELINDUS 0x0001 /* Telindus Variant of protocol code */
-#define PROTCAP_MAN_IF 0x0002 /* Management interface implemented */
-#define PROTCAP_V_42 0x0004 /* V42 implemented */
-#define PROTCAP_V90D 0x0008 /* V.90D (implies up to 384k DSP code) */
-#define PROTCAP_EXTD_FAX 0x0010 /* Extended FAX (ECM, 2D, T6, Polling) */
-#define PROTCAP_EXTD_RXFC 0x0020 /* RxFC (Extd Flow Control), OOB Chnl */
-#define PROTCAP_VOIP 0x0040 /* VoIP (implies up to 512k DSP code) */
-#define PROTCAP_CMA_ALLPR 0x0080 /* CMA support for all NL primitives */
-#define PROTCAP_FREE8 0x0100 /* not used */
-#define PROTCAP_FREE9 0x0200 /* not used */
-#define PROTCAP_FREE10 0x0400 /* not used */
-#define PROTCAP_FREE11 0x0800 /* not used */
-#define PROTCAP_FREE12 0x1000 /* not used */
-#define PROTCAP_FREE13 0x2000 /* not used */
-#define PROTCAP_FREE14 0x4000 /* not used */
-#define PROTCAP_EXTENSION 0x8000 /* used for future extensions */
-/* -----------------------------------------------------------* */
-/* Onhook data transmission ETS30065901 */
-/* Message Type */
-/*#define RESERVED4 0x4*/
-#define CALL_SETUP 0x80
-#define MESSAGE_WAITING_INDICATOR 0x82
-/*#define RESERVED84 0x84*/
-/*#define RESERVED85 0x85*/
-#define ADVICE_OF_CHARGE 0x86
-/*1111 0001
- to
- 1111 1111
- F1H - Reserved for network operator use
- to
- FFH*/
-/* Parameter Types */
-#define DATE_AND_TIME 1
-#define CLI_PARAMETER_TYPE 2
-#define CALLED_DIRECTORY_NUMBER_PARAMETER_TYPE 3
-#define REASON_FOR_ABSENCE_OF_CLI_PARAMETER_TYPE 4
-#define NAME_PARAMETER_TYPE 7
-#define REASON_FOR_ABSENCE_OF_CALLING_PARTY_NAME_PARAMETER_TYPE 8
-#define VISUAL_INDICATOR_PARAMETER_TYPE 0xb
-#define COMPLEMENTARY_CLI_PARAMETER_TYPE 0x10
-#define CALL_TYPE_PARAMETER_TYPE 0x11
-#define FIRST_CALLED_LINE_DIRECTORY_NUMBER_PARAMETER_TYPE 0x12
-#define NETWORK_MESSAGE_SYSTEM_STATUS_PARAMETER_TYPE 0x13
-#define FORWARDED_CALL_TYPE_PARAMETER_TYPE 0x15
-#define TYPE_OF_CALLING_USER_PARAMETER_TYPE 0x16
-#define REDIRECTING_NUMBER_PARAMETER_TYPE 0x1a
-#define EXTENSION_FOR_NETWORK_OPERATOR_USE_PARAMETER_TYPE 0xe0
-/* -----------------------------------------------------------* */
-#else
-#endif /* PC_H_INCLUDED } */
diff --git a/drivers/isdn/hardware/eicon/pc_init.h b/drivers/isdn/hardware/eicon/pc_init.h
deleted file mode 100644
index d1d00866e8d4..000000000000
--- a/drivers/isdn/hardware/eicon/pc_init.h
+++ /dev/null
@@ -1,267 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef PC_INIT_H_
-#define PC_INIT_H_
-/*------------------------------------------------------------------*/
-/*
- Initialisation parameters for the card
- 0x0008 <byte> TEI
- 0x0009 <byte> NT2 flag
- 0x000a <byte> Default DID length
- 0x000b <byte> Disable watchdog flag
- 0x000c <byte> Permanent connection flag
- 0x000d <byte> Bit 3-8: L1 Hunt Group/Tristate
- 0x000d <byte> Bit 1: QSig small CR length if set to 1
- 0x000d <byte> Bit 2: QSig small CHI length if set to 1
- 0x000e <byte> Bit 1-3: Stable L2, 0=OnDemand,1=NoDisc,2=permanent
- 0x000e <byte> Bit 4: NT mode
- 0x000e <byte> Bit 5: QSig Channel ID format
- 0x000e <byte> Bit 6: QSig Call Forwarding Allowed Flag
- 0x000e <byte> Bit 7: Disable AutoSPID Flag
- 0x000f <byte> No order check flag
- 0x0010 <byte> Force companding type:0=default,1=a-law,2=u-law
- 0x0012 <byte> Low channel flag
- 0x0013 <byte> Protocol version
- 0x0014 <byte> CRC4 option:0=default,1=double_frm,2=multi_frm,3=auto
- 0x0015 <byte> Bit 0: NoHscx30, Bit 1: Loopback flag, Bit 2: ForceHscx30
- 0x0016 <byte> DSP info
- 0x0017-0x0019 Serial number
- 0x001a <byte> Card type
- 0x0020 <string> OAD 0
- 0x0040 <string> OSA 0
- 0x0060 <string> SPID 0 (if not T.1)
- 0x0060 <struct> if T.1: Robbed Bit Configuration
- 0x0060 length (8)
- 0x0061 RBS Answer Delay
- 0x0062 RBS Config Bit 3, 4:
- 0 0 -> Wink Start
- 1 0 -> Loop Start
- 0 1 -> Ground Start
- 1 1 -> reserved
- Bit 5, 6:
- 0 0 -> Pulse Dial -> Rotary
- 1 0 -> DTMF
- 0 1 -> MF
- 1 1 -> reserved
- 0x0063 RBS RX Digit Timeout
- 0x0064 RBS Bearer Capability
- 0x0065-0x0069 RBS Debug Mask
- 0x0080 <string> OAD 1
- 0x00a0 <string> OSA 1
- 0x00c0 <string> SPID 1
- 0x00e0 <w-element list> Additional configuration
-*/
-#define PCINIT_END_OF_LIST 0x00
-#define PCINIT_MODEM_GUARD_TONE 0x01
-#define PCINIT_MODEM_MIN_SPEED 0x02
-#define PCINIT_MODEM_MAX_SPEED 0x03
-#define PCINIT_MODEM_PROTOCOL_OPTIONS 0x04
-#define PCINIT_FAX_OPTIONS 0x05
-#define PCINIT_FAX_MAX_SPEED 0x06
-#define PCINIT_MODEM_OPTIONS 0x07
-#define PCINIT_MODEM_NEGOTIATION_MODE 0x08
-#define PCINIT_MODEM_MODULATIONS_MASK 0x09
-#define PCINIT_MODEM_TRANSMIT_LEVEL 0x0a
-#define PCINIT_FAX_DISABLED_RESOLUTIONS 0x0b
-#define PCINIT_FAX_MAX_RECORDING_WIDTH 0x0c
-#define PCINIT_FAX_MAX_RECORDING_LENGTH 0x0d
-#define PCINIT_FAX_MIN_SCANLINE_TIME 0x0e
-#define PCINIT_US_EKTS_CACH_HANDLES 0x0f
-#define PCINIT_US_EKTS_BEGIN_CONF 0x10
-#define PCINIT_US_EKTS_DROP_CONF 0x11
-#define PCINIT_US_EKTS_CALL_TRANSFER 0x12
-#define PCINIT_RINGERTONE_OPTION 0x13
-#define PCINIT_CARD_ADDRESS 0x14
-#define PCINIT_FPGA_FEATURES 0x15
-#define PCINIT_US_EKTS_MWI 0x16
-#define PCINIT_MODEM_SPEAKER_CONTROL 0x17
-#define PCINIT_MODEM_SPEAKER_VOLUME 0x18
-#define PCINIT_MODEM_CARRIER_WAIT_TIME 0x19
-#define PCINIT_MODEM_CARRIER_LOSS_TIME 0x1a
-#define PCINIT_UNCHAN_B_MASK 0x1b
-#define PCINIT_PART68_LIMITER 0x1c
-#define PCINIT_XDI_FEATURES 0x1d
-#define PCINIT_QSIG_DIALECT 0x1e
-#define PCINIT_DISABLE_AUTOSPID_FLAG 0x1f
-#define PCINIT_FORCE_VOICE_MAIL_ALERT 0x20
-#define PCINIT_PIAFS_TURNAROUND_FRAMES 0x21
-#define PCINIT_L2_COUNT 0x22
-#define PCINIT_QSIG_FEATURES 0x23
-#define PCINIT_NO_SIGNALLING 0x24
-#define PCINIT_CARD_SN 0x25
-#define PCINIT_CARD_PORT 0x26
-#define PCINIT_ALERTTO 0x27
-#define PCINIT_MODEM_EYE_SETUP 0x28
-#define PCINIT_FAX_V34_OPTIONS 0x29
-/*------------------------------------------------------------------*/
-#define PCINIT_MODEM_GUARD_TONE_NONE 0x00
-#define PCINIT_MODEM_GUARD_TONE_550HZ 0x01
-#define PCINIT_MODEM_GUARD_TONE_1800HZ 0x02
-#define PCINIT_MODEM_GUARD_TONE_CHOICES 0x03
-#define PCINIT_MODEMPROT_DISABLE_V42_V42BIS 0x0001
-#define PCINIT_MODEMPROT_DISABLE_MNP_MNP5 0x0002
-#define PCINIT_MODEMPROT_REQUIRE_PROTOCOL 0x0004
-#define PCINIT_MODEMPROT_DISABLE_V42_DETECT 0x0008
-#define PCINIT_MODEMPROT_DISABLE_COMPRESSION 0x0010
-#define PCINIT_MODEMPROT_REQUIRE_PROTOCOL_V34UP 0x0020
-#define PCINIT_MODEMPROT_NO_PROTOCOL_IF_1200 0x0100
-#define PCINIT_MODEMPROT_BUFFER_IN_V42_DETECT 0x0200
-#define PCINIT_MODEMPROT_DISABLE_V42_SREJ 0x0400
-#define PCINIT_MODEMPROT_DISABLE_MNP3 0x0800
-#define PCINIT_MODEMPROT_DISABLE_MNP4 0x1000
-#define PCINIT_MODEMPROT_DISABLE_MNP10 0x2000
-#define PCINIT_MODEMPROT_NO_PROTOCOL_IF_V22BIS 0x4000
-#define PCINIT_MODEMPROT_NO_PROTOCOL_IF_V32BIS 0x8000
-#define PCINIT_MODEMCONFIG_LEASED_LINE_MODE 0x00000001L
-#define PCINIT_MODEMCONFIG_4_WIRE_OPERATION 0x00000002L
-#define PCINIT_MODEMCONFIG_DISABLE_BUSY_DETECT 0x00000004L
-#define PCINIT_MODEMCONFIG_DISABLE_CALLING_TONE 0x00000008L
-#define PCINIT_MODEMCONFIG_DISABLE_ANSWER_TONE 0x00000010L
-#define PCINIT_MODEMCONFIG_ENABLE_DIAL_TONE_DET 0x00000020L
-#define PCINIT_MODEMCONFIG_USE_POTS_INTERFACE 0x00000040L
-#define PCINIT_MODEMCONFIG_FORCE_RAY_TAYLOR_FAX 0x00000080L
-#define PCINIT_MODEMCONFIG_DISABLE_RETRAIN 0x00000100L
-#define PCINIT_MODEMCONFIG_DISABLE_STEPDOWN 0x00000200L
-#define PCINIT_MODEMCONFIG_DISABLE_SPLIT_SPEED 0x00000400L
-#define PCINIT_MODEMCONFIG_DISABLE_TRELLIS 0x00000800L
-#define PCINIT_MODEMCONFIG_ALLOW_RDL_TEST_LOOP 0x00001000L
-#define PCINIT_MODEMCONFIG_DISABLE_STEPUP 0x00002000L
-#define PCINIT_MODEMCONFIG_DISABLE_FLUSH_TIMER 0x00004000L
-#define PCINIT_MODEMCONFIG_REVERSE_DIRECTION 0x00008000L
-#define PCINIT_MODEMCONFIG_DISABLE_TX_REDUCTION 0x00010000L
-#define PCINIT_MODEMCONFIG_DISABLE_PRECODING 0x00020000L
-#define PCINIT_MODEMCONFIG_DISABLE_PREEMPHASIS 0x00040000L
-#define PCINIT_MODEMCONFIG_DISABLE_SHAPING 0x00080000L
-#define PCINIT_MODEMCONFIG_DISABLE_NONLINEAR_EN 0x00100000L
-#define PCINIT_MODEMCONFIG_DISABLE_MANUALREDUCT 0x00200000L
-#define PCINIT_MODEMCONFIG_DISABLE_16_POINT_TRN 0x00400000L
-#define PCINIT_MODEMCONFIG_DISABLE_2400_SYMBOLS 0x01000000L
-#define PCINIT_MODEMCONFIG_DISABLE_2743_SYMBOLS 0x02000000L
-#define PCINIT_MODEMCONFIG_DISABLE_2800_SYMBOLS 0x04000000L
-#define PCINIT_MODEMCONFIG_DISABLE_3000_SYMBOLS 0x08000000L
-#define PCINIT_MODEMCONFIG_DISABLE_3200_SYMBOLS 0x10000000L
-#define PCINIT_MODEMCONFIG_DISABLE_3429_SYMBOLS 0x20000000L
-#define PCINIT_MODEM_NEGOTIATE_HIGHEST 0x00
-#define PCINIT_MODEM_NEGOTIATE_DISABLED 0x01
-#define PCINIT_MODEM_NEGOTIATE_IN_CLASS 0x02
-#define PCINIT_MODEM_NEGOTIATE_V100 0x03
-#define PCINIT_MODEM_NEGOTIATE_V8 0x04
-#define PCINIT_MODEM_NEGOTIATE_V8BIS 0x05
-#define PCINIT_MODEM_NEGOTIATE_CHOICES 0x06
-#define PCINIT_MODEMMODULATION_DISABLE_V21 0x00000001L
-#define PCINIT_MODEMMODULATION_DISABLE_V23 0x00000002L
-#define PCINIT_MODEMMODULATION_DISABLE_V22 0x00000004L
-#define PCINIT_MODEMMODULATION_DISABLE_V22BIS 0x00000008L
-#define PCINIT_MODEMMODULATION_DISABLE_V32 0x00000010L
-#define PCINIT_MODEMMODULATION_DISABLE_V32BIS 0x00000020L
-#define PCINIT_MODEMMODULATION_DISABLE_V34 0x00000040L
-#define PCINIT_MODEMMODULATION_DISABLE_V90 0x00000080L
-#define PCINIT_MODEMMODULATION_DISABLE_BELL103 0x00000100L
-#define PCINIT_MODEMMODULATION_DISABLE_BELL212A 0x00000200L
-#define PCINIT_MODEMMODULATION_DISABLE_VFC 0x00000400L
-#define PCINIT_MODEMMODULATION_DISABLE_K56FLEX 0x00000800L
-#define PCINIT_MODEMMODULATION_DISABLE_X2 0x00001000L
-#define PCINIT_MODEMMODULATION_ENABLE_V29FDX 0x00010000L
-#define PCINIT_MODEMMODULATION_ENABLE_V33 0x00020000L
-#define PCINIT_MODEMMODULATION_ENABLE_V90A 0x00040000L
-#define PCINIT_MODEM_TRANSMIT_LEVEL_CHOICES 0x10
-#define PCINIT_MODEM_SPEAKER_OFF 0x00
-#define PCINIT_MODEM_SPEAKER_DURING_TRAIN 0x01
-#define PCINIT_MODEM_SPEAKER_TIL_CONNECT 0x02
-#define PCINIT_MODEM_SPEAKER_ALWAYS_ON 0x03
-#define PCINIT_MODEM_SPEAKER_CHOICES 0x04
-#define PCINIT_MODEM_SPEAKER_VOLUME_MIN 0x00
-#define PCINIT_MODEM_SPEAKER_VOLUME_LOW 0x01
-#define PCINIT_MODEM_SPEAKER_VOLUME_HIGH 0x02
-#define PCINIT_MODEM_SPEAKER_VOLUME_MAX 0x03
-#define PCINIT_MODEM_SPEAKER_VOLUME_CHOICES 0x04
-/*------------------------------------------------------------------*/
-#define PCINIT_FAXCONFIG_DISABLE_FINE 0x0001
-#define PCINIT_FAXCONFIG_DISABLE_ECM 0x0002
-#define PCINIT_FAXCONFIG_ECM_64_BYTES 0x0004
-#define PCINIT_FAXCONFIG_DISABLE_2D_CODING 0x0008
-#define PCINIT_FAXCONFIG_DISABLE_T6_CODING 0x0010
-#define PCINIT_FAXCONFIG_DISABLE_UNCOMPR 0x0020
-#define PCINIT_FAXCONFIG_REFUSE_POLLING 0x0040
-#define PCINIT_FAXCONFIG_HIDE_TOTAL_PAGES 0x0080
-#define PCINIT_FAXCONFIG_HIDE_ALL_HEADLINE 0x0100
-#define PCINIT_FAXCONFIG_HIDE_PAGE_INFO 0x0180
-#define PCINIT_FAXCONFIG_HEADLINE_OPTIONS_MASK 0x0180
-#define PCINIT_FAXCONFIG_DISABLE_FEATURE_FALLBACK 0x0200
-#define PCINIT_FAXCONFIG_V34FAX_CONTROL_RATE_1200 0x0800
-#define PCINIT_FAXCONFIG_DISABLE_V34FAX 0x1000
-#define PCINIT_FAXCONFIG_DISABLE_R8_0770_OR_200 0x01
-#define PCINIT_FAXCONFIG_DISABLE_R8_1540 0x02
-#define PCINIT_FAXCONFIG_DISABLE_R16_1540_OR_400 0x04
-#define PCINIT_FAXCONFIG_DISABLE_R4_0385_OR_100 0x08
-#define PCINIT_FAXCONFIG_DISABLE_300_300 0x10
-#define PCINIT_FAXCONFIG_DISABLE_INCH_BASED 0x40
-#define PCINIT_FAXCONFIG_DISABLE_METRIC_BASED 0x80
-#define PCINIT_FAXCONFIG_REC_WIDTH_ISO_A3 0
-#define PCINIT_FAXCONFIG_REC_WIDTH_ISO_B4 1
-#define PCINIT_FAXCONFIG_REC_WIDTH_ISO_A4 2
-#define PCINIT_FAXCONFIG_REC_WIDTH_COUNT 3
-#define PCINIT_FAXCONFIG_REC_LENGTH_UNLIMITED 0
-#define PCINIT_FAXCONFIG_REC_LENGTH_ISO_B4 1
-#define PCINIT_FAXCONFIG_REC_LENGTH_ISO_A4 2
-#define PCINIT_FAXCONFIG_REC_LENGTH_COUNT 3
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_00_00_00 0
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_05_05_05 1
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_10_05_05 2
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_10_10_10 3
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_10_10 4
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_20_20 5
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_20_20 6
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_40_40 7
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_RES_8 8
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_RES_9 9
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_RES_10 10
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_10_10_05 11
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_10_05 12
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_20_20_10 13
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_20_10 14
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_40_40_20 15
-#define PCINIT_FAXCONFIG_SCANLINE_TIME_COUNT 16
-#define PCINIT_FAXCONFIG_DISABLE_TX_REDUCTION 0x00010000L
-#define PCINIT_FAXCONFIG_DISABLE_PRECODING 0x00020000L
-#define PCINIT_FAXCONFIG_DISABLE_PREEMPHASIS 0x00040000L
-#define PCINIT_FAXCONFIG_DISABLE_SHAPING 0x00080000L
-#define PCINIT_FAXCONFIG_DISABLE_NONLINEAR_EN 0x00100000L
-#define PCINIT_FAXCONFIG_DISABLE_MANUALREDUCT 0x00200000L
-#define PCINIT_FAXCONFIG_DISABLE_16_POINT_TRN 0x00400000L
-#define PCINIT_FAXCONFIG_DISABLE_2400_SYMBOLS 0x01000000L
-#define PCINIT_FAXCONFIG_DISABLE_2743_SYMBOLS 0x02000000L
-#define PCINIT_FAXCONFIG_DISABLE_2800_SYMBOLS 0x04000000L
-#define PCINIT_FAXCONFIG_DISABLE_3000_SYMBOLS 0x08000000L
-#define PCINIT_FAXCONFIG_DISABLE_3200_SYMBOLS 0x10000000L
-#define PCINIT_FAXCONFIG_DISABLE_3429_SYMBOLS 0x20000000L
-/*--------------------------------------------------------------------------*/
-#define PCINIT_XDI_CMA_FOR_ALL_NL_PRIMITIVES 0x01
-/*--------------------------------------------------------------------------*/
-#define PCINIT_FPGA_PLX_ACCESS_SUPPORTED 0x01
-/*--------------------------------------------------------------------------*/
-#endif
-/*--------------------------------------------------------------------------*/
diff --git a/drivers/isdn/hardware/eicon/pc_maint.h b/drivers/isdn/hardware/eicon/pc_maint.h
deleted file mode 100644
index 496f018fb5a2..000000000000
--- a/drivers/isdn/hardware/eicon/pc_maint.h
+++ /dev/null
@@ -1,160 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifdef PLATFORM_GT_32BIT
-/* #define POINTER_32BIT byte * __ptr32 */
-#define POINTER_32BIT dword
-#else
-#define POINTER_32BIT byte *
-#endif
-#if !defined(MIPS_SCOM)
-#define BUFFER_SZ 48
-#define MAINT_OFFS 0x380
-#else
-#define BUFFER_SZ 128
-#if defined(PRI)
-#define MAINT_OFFS 0xef00
-#else
-#define MAINT_OFFS 0xff00
-#endif
-#endif
-#define MIPS_BUFFER_SZ 128
-#if defined(PRI)
-#define MIPS_MAINT_OFFS 0xef00
-#else
-#define MIPS_MAINT_OFFS 0xff00
-#endif
-#define LOG 1
-#define MEMR 2
-#define MEMW 3
-#define IOR 4
-#define IOW 5
-#define B1TEST 6
-#define B2TEST 7
-#define BTESTOFF 8
-#define DSIG_STATS 9
-#define B_CH_STATS 10
-#define D_CH_STATS 11
-#define BL1_STATS 12
-#define BL1_STATS_C 13
-#define GET_VERSION 14
-#define OS_STATS 15
-#define XLOG_SET_MASK 16
-#define XLOG_GET_MASK 17
-#define DSP_READ 20
-#define DSP_WRITE 21
-#define OK 0xff
-#define MORE_EVENTS 0xfe
-#define NO_EVENT 1
-struct DSigStruc
-{
- byte Id;
- byte u;
- byte listen;
- byte active;
- byte sin[3];
- byte bc[6];
- byte llc[6];
- byte hlc[6];
- byte oad[20];
-};
-struct BL1Struc {
- dword cx_b1;
- dword cx_b2;
- dword cr_b1;
- dword cr_b2;
- dword px_b1;
- dword px_b2;
- dword pr_b1;
- dword pr_b2;
- word er_b1;
- word er_b2;
-};
-struct L2Struc {
- dword XTotal;
- dword RTotal;
- word XError;
- word RError;
-};
-struct OSStruc {
- dword free_n;
-};
-typedef union
-{
- struct DSigStruc DSigStats;
- struct BL1Struc BL1Stats;
- struct L2Struc L2Stats;
- struct OSStruc OSStats;
- byte b[BUFFER_SZ];
- word w[BUFFER_SZ >> 1];
- word l[BUFFER_SZ >> 2]; /* word is wrong, do not use! Use 'd' instead. */
- dword d[BUFFER_SZ >> 2];
-} BUFFER;
-typedef union
-{
- struct DSigStruc DSigStats;
- struct BL1Struc BL1Stats;
- struct L2Struc L2Stats;
- struct OSStruc OSStats;
- byte b[MIPS_BUFFER_SZ];
- word w[MIPS_BUFFER_SZ >> 1];
- word l[BUFFER_SZ >> 2]; /* word is wrong, do not use! Use 'd' instead. */
- dword d[MIPS_BUFFER_SZ >> 2];
-} MIPS_BUFFER;
-#if !defined(MIPS_SCOM)
-struct pc_maint
-{
- byte req;
- byte rc;
- POINTER_32BIT mem;
- short length;
- word port;
- byte fill[6];
- BUFFER data;
-};
-#else
-struct pc_maint
-{
- byte req;
- byte rc;
- byte reserved[2]; /* R3000 alignment ... */
- POINTER_32BIT mem;
- short length;
- word port;
- byte fill[4]; /* data at offset 16 */
- BUFFER data;
-};
-#endif
-struct mi_pc_maint
-{
- byte req;
- byte rc;
- byte reserved[2]; /* R3000 alignment ... */
- POINTER_32BIT mem;
- short length;
- word port;
- byte fill[4]; /* data at offset 16 */
- MIPS_BUFFER data;
-};
diff --git a/drivers/isdn/hardware/eicon/pkmaint.h b/drivers/isdn/hardware/eicon/pkmaint.h
deleted file mode 100644
index cf3fb14a8e6f..000000000000
--- a/drivers/isdn/hardware/eicon/pkmaint.h
+++ /dev/null
@@ -1,43 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_XDI_OS_DEPENDENT_PACK_MAIN_ON_BYTE_INC__
-#define __DIVA_XDI_OS_DEPENDENT_PACK_MAIN_ON_BYTE_INC__
-
-
-/*
- Only one purpose of this compiler dependent file to pack
- structures, described in pc_maint.h so that no padding
- will be included.
-
- With microsoft compile it is done by "pshpack1.h" and
- after is restored by "poppack.h"
-*/
-
-
-#include "pc_maint.h"
-
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
deleted file mode 100644
index 62e2073c3690..000000000000
--- a/drivers/isdn/hardware/eicon/platform.h
+++ /dev/null
@@ -1,369 +0,0 @@
-/* $Id: platform.h,v 1.37.4.6 2005/01/31 12:22:20 armin Exp $
- *
- * platform.h
- *
- *
- * Copyright 2000-2003 by Armin Schindler (mac@melware.de)
- * Copyright 2000 Eicon Networks
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- */
-
-
-#ifndef __PLATFORM_H__
-#define __PLATFORM_H__
-
-#if !defined(DIVA_BUILD)
-#define DIVA_BUILD "local"
-#endif
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/vmalloc.h>
-#include <linux/proc_fs.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <asm/types.h>
-#include <asm/io.h>
-
-#include "cardtype.h"
-
-/* activate debuglib for modules only */
-#ifndef MODULE
-#define DIVA_NO_DEBUGLIB
-#endif
-
-#define DIVA_USER_MODE_CARD_CONFIG 1
-#define USE_EXTENDED_DEBUGS 1
-
-#define MAX_ADAPTER 32
-
-#define DIVA_ISTREAM 1
-
-#define MEMORY_SPACE_TYPE 0
-#define PORT_SPACE_TYPE 1
-
-
-#include <linux/string.h>
-
-#ifndef byte
-#define byte u8
-#endif
-
-#ifndef word
-#define word u16
-#endif
-
-#ifndef dword
-#define dword u32
-#endif
-
-#ifndef qword
-#define qword u64
-#endif
-
-#ifndef NULL
-#define NULL ((void *) 0)
-#endif
-
-#ifndef far
-#define far
-#endif
-
-#ifndef _pascal
-#define _pascal
-#endif
-
-#ifndef _loadds
-#define _loadds
-#endif
-
-#ifndef _cdecl
-#define _cdecl
-#endif
-
-#define MEM_TYPE_RAM 0
-#define MEM_TYPE_PORT 1
-#define MEM_TYPE_PROM 2
-#define MEM_TYPE_CTLREG 3
-#define MEM_TYPE_RESET 4
-#define MEM_TYPE_CFG 5
-#define MEM_TYPE_ADDRESS 6
-#define MEM_TYPE_CONFIG 7
-#define MEM_TYPE_CONTROL 8
-
-#define MAX_MEM_TYPE 10
-
-#define DIVA_OS_MEM_ATTACH_RAM(a) ((a)->ram)
-#define DIVA_OS_MEM_ATTACH_PORT(a) ((a)->port)
-#define DIVA_OS_MEM_ATTACH_PROM(a) ((a)->prom)
-#define DIVA_OS_MEM_ATTACH_CTLREG(a) ((a)->ctlReg)
-#define DIVA_OS_MEM_ATTACH_RESET(a) ((a)->reset)
-#define DIVA_OS_MEM_ATTACH_CFG(a) ((a)->cfg)
-#define DIVA_OS_MEM_ATTACH_ADDRESS(a) ((a)->Address)
-#define DIVA_OS_MEM_ATTACH_CONFIG(a) ((a)->Config)
-#define DIVA_OS_MEM_ATTACH_CONTROL(a) ((a)->Control)
-
-#define DIVA_OS_MEM_DETACH_RAM(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_PORT(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_PROM(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_CTLREG(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_RESET(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_CFG(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_ADDRESS(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_CONFIG(a, x) do { } while (0)
-#define DIVA_OS_MEM_DETACH_CONTROL(a, x) do { } while (0)
-
-#define DIVA_INVALID_FILE_HANDLE ((dword)(-1))
-
-#define DIVAS_CONTAINING_RECORD(address, type, field) \
- ((type *)((char *)(address) - (char *)(&((type *)0)->field)))
-
-extern int sprintf(char *, const char *, ...);
-
-typedef void *LIST_ENTRY;
-
-typedef char DEVICE_NAME[64];
-typedef struct _ISDN_ADAPTER ISDN_ADAPTER;
-typedef struct _ISDN_ADAPTER *PISDN_ADAPTER;
-
-typedef void (*DIVA_DI_PRINTF)(unsigned char *, ...);
-#include "debuglib.h"
-
-#define dtrc(p) DBG_PRV0(p)
-#define dbug(a, p) DBG_PRV1(p)
-
-
-typedef struct e_info_s E_INFO;
-
-typedef char diva_os_dependent_devica_name_t[64];
-typedef void *PDEVICE_OBJECT;
-
-struct _diva_os_soft_isr;
-struct _diva_os_timer;
-struct _ISDN_ADAPTER;
-
-void diva_log_info(unsigned char *, ...);
-
-/*
-** XDI DIDD Interface
-*/
-void diva_xdi_didd_register_adapter(int card);
-void diva_xdi_didd_remove_adapter(int card);
-
-/*
-** memory allocation
-*/
-static __inline__ void *diva_os_malloc(unsigned long flags, unsigned long size)
-{
- void *ret = NULL;
-
- if (size) {
- ret = (void *) vmalloc((unsigned int) size);
- }
- return (ret);
-}
-static __inline__ void diva_os_free(unsigned long flags, void *ptr)
-{
- vfree(ptr);
-}
-
-/*
-** use skbuffs for message buffer
-*/
-typedef struct sk_buff diva_os_message_buffer_s;
-diva_os_message_buffer_s *diva_os_alloc_message_buffer(unsigned long size, void **data_buf);
-void diva_os_free_message_buffer(diva_os_message_buffer_s *dmb);
-#define DIVA_MESSAGE_BUFFER_LEN(x) x->len
-#define DIVA_MESSAGE_BUFFER_DATA(x) x->data
-
-/*
-** mSeconds waiting
-*/
-static __inline__ void diva_os_sleep(dword mSec)
-{
- msleep(mSec);
-}
-static __inline__ void diva_os_wait(dword mSec)
-{
- mdelay(mSec);
-}
-
-/*
-** PCI Configuration space access
-*/
-void PCIwrite(byte bus, byte func, int offset, void *data, int length, void *pci_dev_handle);
-void PCIread(byte bus, byte func, int offset, void *data, int length, void *pci_dev_handle);
-
-/*
-** I/O Port utilities
-*/
-int diva_os_register_io_port(void *adapter, int reg, unsigned long port,
- unsigned long length, const char *name, int id);
-/*
-** I/O port access abstraction
-*/
-byte inpp(void __iomem *);
-word inppw(void __iomem *);
-void inppw_buffer(void __iomem *, void *, int);
-void outppw(void __iomem *, word);
-void outppw_buffer(void __iomem * , void*, int);
-void outpp(void __iomem *, word);
-
-/*
-** IRQ
-*/
-typedef struct _diva_os_adapter_irq_info {
- byte irq_nr;
- int registered;
- char irq_name[24];
-} diva_os_adapter_irq_info_t;
-int diva_os_register_irq(void *context, byte irq, const char *name);
-void diva_os_remove_irq(void *context, byte irq);
-
-#define diva_os_in_irq() in_irq()
-
-/*
-** Spin Lock framework
-*/
-typedef long diva_os_spin_lock_magic_t;
-typedef spinlock_t diva_os_spin_lock_t;
-static __inline__ int diva_os_initialize_spin_lock(spinlock_t *lock, void *unused) { \
- spin_lock_init(lock); return (0); }
-static __inline__ void diva_os_enter_spin_lock(diva_os_spin_lock_t *a, \
- diva_os_spin_lock_magic_t *old_irql, \
- void *dbg) { spin_lock_bh(a); }
-static __inline__ void diva_os_leave_spin_lock(diva_os_spin_lock_t *a, \
- diva_os_spin_lock_magic_t *old_irql, \
- void *dbg) { spin_unlock_bh(a); }
-
-#define diva_os_destroy_spin_lock(a, b) do { } while (0)
-
-/*
-** Deffered processing framework
-*/
-typedef int (*diva_os_isr_callback_t)(struct _ISDN_ADAPTER *);
-typedef void (*diva_os_soft_isr_callback_t)(struct _diva_os_soft_isr *psoft_isr, void *context);
-
-typedef struct _diva_os_soft_isr {
- void *object;
- diva_os_soft_isr_callback_t callback;
- void *callback_context;
- char dpc_thread_name[24];
-} diva_os_soft_isr_t;
-
-int diva_os_initialize_soft_isr(diva_os_soft_isr_t *psoft_isr, diva_os_soft_isr_callback_t callback, void *callback_context);
-int diva_os_schedule_soft_isr(diva_os_soft_isr_t *psoft_isr);
-int diva_os_cancel_soft_isr(diva_os_soft_isr_t *psoft_isr);
-void diva_os_remove_soft_isr(diva_os_soft_isr_t *psoft_isr);
-
-/*
- Get time service
-*/
-void diva_os_get_time(dword *sec, dword *usec);
-
-/*
-** atomic operation, fake because we use threads
-*/
-typedef int diva_os_atomic_t;
-static inline diva_os_atomic_t
-diva_os_atomic_increment(diva_os_atomic_t *pv)
-{
- *pv += 1;
- return (*pv);
-}
-static inline diva_os_atomic_t
-diva_os_atomic_decrement(diva_os_atomic_t *pv)
-{
- *pv -= 1;
- return (*pv);
-}
-
-/*
-** CAPI SECTION
-*/
-#define NO_CORNETN
-#define IMPLEMENT_DTMF 1
-#define IMPLEMENT_ECHO_CANCELLER 1
-#define IMPLEMENT_RTP 1
-#define IMPLEMENT_T38 1
-#define IMPLEMENT_FAX_SUB_SEP_PWD 1
-#define IMPLEMENT_V18 1
-#define IMPLEMENT_DTMF_TONE 1
-#define IMPLEMENT_PIAFS 1
-#define IMPLEMENT_FAX_PAPER_FORMATS 1
-#define IMPLEMENT_VOWN 1
-#define IMPLEMENT_CAPIDTMF 1
-#define IMPLEMENT_FAX_NONSTANDARD 1
-#define VSWITCH_SUPPORT 1
-
-#define IMPLEMENT_MARKED_OK_AFTER_FC 1
-
-#define DIVA_IDI_RX_DMA 1
-
-/*
-** endian macros
-**
-** If only... In some cases we did use them for endianness conversion;
-** unfortunately, other uses were real iomem accesses.
-*/
-#define READ_BYTE(addr) readb(addr)
-#define READ_WORD(addr) readw(addr)
-#define READ_DWORD(addr) readl(addr)
-
-#define WRITE_BYTE(addr, v) writeb(v, addr)
-#define WRITE_WORD(addr, v) writew(v, addr)
-#define WRITE_DWORD(addr, v) writel(v, addr)
-
-static inline __u16 GET_WORD(void *addr)
-{
- return le16_to_cpu(*(__le16 *)addr);
-}
-static inline __u32 GET_DWORD(void *addr)
-{
- return le32_to_cpu(*(__le32 *)addr);
-}
-static inline void PUT_WORD(void *addr, __u16 v)
-{
- *(__le16 *)addr = cpu_to_le16(v);
-}
-static inline void PUT_DWORD(void *addr, __u32 v)
-{
- *(__le32 *)addr = cpu_to_le32(v);
-}
-
-/*
-** 32/64 bit macors
-*/
-#ifdef BITS_PER_LONG
-#if BITS_PER_LONG > 32
-#define PLATFORM_GT_32BIT
-#define ULongToPtr(x) (void *)(unsigned long)(x)
-#endif
-#endif
-
-/*
-** undef os definitions of macros we use
-*/
-#undef ID_MASK
-#undef N_DATA
-#undef ADDR
-
-/*
-** dump file
-*/
-#define diva_os_dump_file_t char
-#define diva_os_board_trace_t char
-#define diva_os_dump_file(__x__) do { } while (0)
-
-/*
-** size of internal arrays
-*/
-#define MAX_DESCRIPTORS 64
-
-#endif /* __PLATFORM_H__ */
diff --git a/drivers/isdn/hardware/eicon/pr_pc.h b/drivers/isdn/hardware/eicon/pr_pc.h
deleted file mode 100644
index a08d6d57a486..000000000000
--- a/drivers/isdn/hardware/eicon/pr_pc.h
+++ /dev/null
@@ -1,76 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-struct pr_ram {
- word NextReq; /* pointer to next Req Buffer */
- word NextRc; /* pointer to next Rc Buffer */
- word NextInd; /* pointer to next Ind Buffer */
- byte ReqInput; /* number of Req Buffers sent */
- byte ReqOutput; /* number of Req Buffers returned */
- byte ReqReserved; /* number of Req Buffers reserved */
- byte Int; /* ISDN-P interrupt */
- byte XLock; /* Lock field for arbitration */
- byte RcOutput; /* number of Rc buffers received */
- byte IndOutput; /* number of Ind buffers received */
- byte IMask; /* Interrupt Mask Flag */
- byte Reserved1[2]; /* reserved field, do not use */
- byte ReadyInt; /* request field for ready interrupt */
- byte Reserved2[12]; /* reserved field, do not use */
- byte InterfaceType; /* interface type 1=16K interface */
- word Signature; /* ISDN-P initialized indication */
- byte B[1]; /* buffer space for Req,Ind and Rc */
-};
-typedef struct {
- word next;
- byte Req;
- byte ReqId;
- byte ReqCh;
- byte Reserved1;
- word Reference;
- byte Reserved[8];
- PBUFFER XBuffer;
-} REQ;
-typedef struct {
- word next;
- byte Rc;
- byte RcId;
- byte RcCh;
- byte Reserved1;
- word Reference;
- byte Reserved2[8];
-} RC;
-typedef struct {
- word next;
- byte Ind;
- byte IndId;
- byte IndCh;
- byte MInd;
- word MLength;
- word Reference;
- byte RNR;
- byte Reserved;
- dword Ack;
- PBUFFER RBuffer;
-} IND;
diff --git a/drivers/isdn/hardware/eicon/s_4bri.c b/drivers/isdn/hardware/eicon/s_4bri.c
deleted file mode 100644
index ec12165fbf62..000000000000
--- a/drivers/isdn/hardware/eicon/s_4bri.c
+++ /dev/null
@@ -1,510 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "divasync.h"
-#include "pc_init.h"
-#include "io.h"
-#include "helpers.h"
-#include "dsrv4bri.h"
-#include "dsp_defs.h"
-#include "sdp_hdr.h"
-
-/*****************************************************************************/
-#define MAX_XLOG_SIZE (64 * 1024)
-
-/* --------------------------------------------------------------------------
- Recovery XLOG from QBRI Card
- -------------------------------------------------------------------------- */
-static void qBri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
- byte __iomem *base;
- word *Xlog;
- dword regs[4], TrapID, offset, size;
- Xdesc xlogDesc;
- int factor = (IoAdapter->tasks == 1) ? 1 : 2;
-
-/*
- * check for trapped MIPS 46xx CPU, dump exception frame
- */
-
- base = DIVA_OS_MEM_ATTACH_CONTROL(IoAdapter);
- offset = IoAdapter->ControllerNumber * (IoAdapter->MemorySize >> factor);
-
- TrapID = READ_DWORD(&base[0x80]);
-
- if ((TrapID == 0x99999999) || (TrapID == 0x99999901))
- {
- dump_trap_frame(IoAdapter, &base[0x90]);
- IoAdapter->trapped = 1;
- }
-
- regs[0] = READ_DWORD((base + offset) + 0x70);
- regs[1] = READ_DWORD((base + offset) + 0x74);
- regs[2] = READ_DWORD((base + offset) + 0x78);
- regs[3] = READ_DWORD((base + offset) + 0x7c);
- regs[0] &= IoAdapter->MemorySize - 1;
-
- if ((regs[0] >= offset)
- && (regs[0] < offset + (IoAdapter->MemorySize >> factor) - 1))
- {
- if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) {
- DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
- return;
- }
-
- size = offset + (IoAdapter->MemorySize >> factor) - regs[0];
- if (size > MAX_XLOG_SIZE)
- size = MAX_XLOG_SIZE;
- memcpy_fromio(Xlog, &base[regs[0]], size);
- xlogDesc.buf = Xlog;
- xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]);
- xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]);
- dump_xlog_buffer(IoAdapter, &xlogDesc);
- diva_os_free(0, Xlog);
- IoAdapter->trapped = 2;
- }
- DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
-}
-
-/* --------------------------------------------------------------------------
- Reset QBRI Hardware
- -------------------------------------------------------------------------- */
-static void reset_qBri_hardware(PISDN_ADAPTER IoAdapter) {
- word volatile __iomem *qBriReset;
- byte volatile __iomem *qBriCntrl;
- byte volatile __iomem *p;
-
- qBriReset = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
- WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_SOFT_RESET);
- diva_os_wait(1);
- WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_SOFT_RESET);
- diva_os_wait(1);
- WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_RELOAD_EEPROM);
- diva_os_wait(1);
- WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_RELOAD_EEPROM);
- diva_os_wait(1);
- DIVA_OS_MEM_DETACH_PROM(IoAdapter, qBriReset);
-
- qBriCntrl = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- p = &qBriCntrl[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
- WRITE_DWORD(p, 0);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, qBriCntrl);
-
- DBG_TRC(("resetted board @ reset addr 0x%08lx", qBriReset))
- DBG_TRC(("resetted board @ cntrl addr 0x%08lx", p))
- }
-
-/* --------------------------------------------------------------------------
- Start Card CPU
- -------------------------------------------------------------------------- */
-void start_qBri_hardware(PISDN_ADAPTER IoAdapter) {
- byte volatile __iomem *qBriReset;
- byte volatile __iomem *p;
-
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- qBriReset = &p[(DIVA_4BRI_REVISION(IoAdapter)) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
- WRITE_DWORD(qBriReset, MQ_RISC_COLD_RESET_MASK);
- diva_os_wait(2);
- WRITE_DWORD(qBriReset, MQ_RISC_WARM_RESET_MASK | MQ_RISC_COLD_RESET_MASK);
- diva_os_wait(10);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
- DBG_TRC(("started processor @ addr 0x%08lx", qBriReset))
- }
-
-/* --------------------------------------------------------------------------
- Stop Card CPU
- -------------------------------------------------------------------------- */
-static void stop_qBri_hardware(PISDN_ADAPTER IoAdapter) {
- byte volatile __iomem *p;
- dword volatile __iomem *qBriReset;
- dword volatile __iomem *qBriIrq;
- dword volatile __iomem *qBriIsacDspReset;
- int rev2 = DIVA_4BRI_REVISION(IoAdapter);
- int reset_offset = rev2 ? (MQ2_BREG_RISC) : (MQ_BREG_RISC);
- int irq_offset = rev2 ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST);
- int hw_offset = rev2 ? (MQ2_ISAC_DSP_RESET) : (MQ_ISAC_DSP_RESET);
-
- if (IoAdapter->ControllerNumber > 0)
- return;
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- qBriReset = (dword volatile __iomem *)&p[reset_offset];
- qBriIsacDspReset = (dword volatile __iomem *)&p[hw_offset];
-/*
- * clear interrupt line (reset Local Interrupt Test Register)
- */
- WRITE_DWORD(qBriReset, 0);
- WRITE_DWORD(qBriIsacDspReset, 0);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- qBriIrq = (dword volatile __iomem *)&p[irq_offset];
- WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
- DBG_TRC(("stopped processor @ addr 0x%08lx", qBriReset))
-
- }
-
-/* --------------------------------------------------------------------------
- FPGA download
- -------------------------------------------------------------------------- */
-#define FPGA_NAME_OFFSET 0x10
-
-static byte *qBri_check_FPGAsrc(PISDN_ADAPTER IoAdapter, char *FileName,
- dword *Length, dword *code) {
- byte *File;
- char *fpgaFile, *fpgaType, *fpgaDate, *fpgaTime;
- dword fpgaFlen, fpgaTlen, fpgaDlen, cnt, year, i;
-
- if (!(File = (byte *)xdiLoadFile(FileName, Length, 0))) {
- return (NULL);
- }
-/*
- * scan file until FF and put id string into buffer
- */
- for (i = 0; File[i] != 0xff;)
- {
- if (++i >= *Length)
- {
- DBG_FTL(("FPGA download: start of data header not found"))
- xdiFreeFile(File);
- return (NULL);
- }
- }
- *code = i++;
-
- if ((File[i] & 0xF0) != 0x20)
- {
- DBG_FTL(("FPGA download: data header corrupted"))
- xdiFreeFile(File);
- return (NULL);
- }
- fpgaFlen = (dword)File[FPGA_NAME_OFFSET - 1];
- if (fpgaFlen == 0)
- fpgaFlen = 12;
- fpgaFile = (char *)&File[FPGA_NAME_OFFSET];
- fpgaTlen = (dword)fpgaFile[fpgaFlen + 2];
- if (fpgaTlen == 0)
- fpgaTlen = 10;
- fpgaType = (char *)&fpgaFile[fpgaFlen + 3];
- fpgaDlen = (dword) fpgaType[fpgaTlen + 2];
- if (fpgaDlen == 0)
- fpgaDlen = 11;
- fpgaDate = (char *)&fpgaType[fpgaTlen + 3];
- fpgaTime = (char *)&fpgaDate[fpgaDlen + 3];
- cnt = (dword)(((File[i] & 0x0F) << 20) + (File[i + 1] << 12)
- + (File[i + 2] << 4) + (File[i + 3] >> 4));
-
- if ((dword)(i + (cnt / 8)) > *Length)
- {
- DBG_FTL(("FPGA download: '%s' file too small (%ld < %ld)",
- FileName, *Length, code + ((cnt + 7) / 8)))
- xdiFreeFile(File);
- return (NULL);
- }
- i = 0;
- do
- {
- while ((fpgaDate[i] != '\0')
- && ((fpgaDate[i] < '0') || (fpgaDate[i] > '9')))
- {
- i++;
- }
- year = 0;
- while ((fpgaDate[i] >= '0') && (fpgaDate[i] <= '9'))
- year = year * 10 + (fpgaDate[i++] - '0');
- } while ((year < 2000) && (fpgaDate[i] != '\0'));
-
- switch (IoAdapter->cardType) {
- case CARDTYPE_DIVASRV_B_2F_PCI:
- break;
-
- default:
- if (year >= 2001) {
- IoAdapter->fpga_features |= PCINIT_FPGA_PLX_ACCESS_SUPPORTED;
- }
- }
-
- DBG_LOG(("FPGA[%s] file %s (%s %s) len %d",
- fpgaType, fpgaFile, fpgaDate, fpgaTime, cnt))
- return (File);
-}
-
-/******************************************************************************/
-
-#define FPGA_PROG 0x0001 /* PROG enable low */
-#define FPGA_BUSY 0x0002 /* BUSY high, DONE low */
-#define FPGA_CS 0x000C /* Enable I/O pins */
-#define FPGA_CCLK 0x0100
-#define FPGA_DOUT 0x0400
-#define FPGA_DIN FPGA_DOUT /* bidirectional I/O */
-
-int qBri_FPGA_download(PISDN_ADAPTER IoAdapter) {
- int bit;
- byte *File;
- dword code, FileLength;
- word volatile __iomem *addr = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
- word val, baseval = FPGA_CS | FPGA_PROG;
-
-
-
- if (DIVA_4BRI_REVISION(IoAdapter))
- {
- char *name;
-
- switch (IoAdapter->cardType) {
- case CARDTYPE_DIVASRV_B_2F_PCI:
- name = "dsbri2f.bit";
- break;
-
- case CARDTYPE_DIVASRV_B_2M_V2_PCI:
- case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
- name = "dsbri2m.bit";
- break;
-
- default:
- name = "ds4bri2.bit";
- }
-
- File = qBri_check_FPGAsrc(IoAdapter, name,
- &FileLength, &code);
- }
- else
- {
- File = qBri_check_FPGAsrc(IoAdapter, "ds4bri.bit",
- &FileLength, &code);
- }
- if (!File) {
- DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
- return (0);
- }
-/*
- * prepare download, pulse PROGRAM pin down.
- */
- WRITE_WORD(addr, baseval & ~FPGA_PROG); /* PROGRAM low pulse */
- WRITE_WORD(addr, baseval); /* release */
- diva_os_wait(50); /* wait until FPGA finished internal memory clear */
-/*
- * check done pin, must be low
- */
- if (READ_WORD(addr) & FPGA_BUSY)
- {
- DBG_FTL(("FPGA download: acknowledge for FPGA memory clear missing"))
- xdiFreeFile(File);
- DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
- return (0);
- }
-/*
- * put data onto the FPGA
- */
- while (code < FileLength)
- {
- val = ((word)File[code++]) << 3;
-
- for (bit = 8; bit-- > 0; val <<= 1) /* put byte onto FPGA */
- {
- baseval &= ~FPGA_DOUT; /* clr data bit */
- baseval |= (val & FPGA_DOUT); /* copy data bit */
- WRITE_WORD(addr, baseval);
- WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */
- WRITE_WORD(addr, baseval | FPGA_CCLK); /* set CCLK hi */
- WRITE_WORD(addr, baseval); /* set CCLK lo */
- }
- }
- xdiFreeFile(File);
- diva_os_wait(100);
- val = READ_WORD(addr);
-
- DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
-
- if (!(val & FPGA_BUSY))
- {
- DBG_FTL(("FPGA download: chip remains in busy state (0x%04x)", val))
- return (0);
- }
-
- return (1);
-}
-
-static int load_qBri_hardware(PISDN_ADAPTER IoAdapter) {
- return (0);
-}
-
-/* --------------------------------------------------------------------------
- Card ISR
- -------------------------------------------------------------------------- */
-static int qBri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
- dword volatile __iomem *qBriIrq;
-
- PADAPTER_LIST_ENTRY QuadroList = IoAdapter->QuadroList;
-
- word i;
- int serviced = 0;
- byte __iomem *p;
-
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
-
- if (!(READ_BYTE(&p[PLX9054_INTCSR]) & 0x80)) {
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
- return (0);
- }
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
-/*
- * clear interrupt line (reset Local Interrupt Test Register)
- */
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]);
- WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-
- for (i = 0; i < IoAdapter->tasks; ++i)
- {
- IoAdapter = QuadroList->QuadroAdapter[i];
-
- if (IoAdapter && IoAdapter->Initialized
- && IoAdapter->tst_irq(&IoAdapter->a))
- {
- IoAdapter->IrqCount++;
- serviced = 1;
- diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
- }
- }
-
- return (serviced);
-}
-
-/* --------------------------------------------------------------------------
- Does disable the interrupt on the card
- -------------------------------------------------------------------------- */
-static void disable_qBri_interrupt(PISDN_ADAPTER IoAdapter) {
- dword volatile __iomem *qBriIrq;
- byte __iomem *p;
-
- if (IoAdapter->ControllerNumber > 0)
- return;
-/*
- * clear interrupt line (reset Local Interrupt Test Register)
- */
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]);
- WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-
-/* --------------------------------------------------------------------------
- Install Adapter Entry Points
- -------------------------------------------------------------------------- */
-static void set_common_qBri_functions(PISDN_ADAPTER IoAdapter) {
- ADAPTER *a;
-
- a = &IoAdapter->a;
-
- a->ram_in = mem_in;
- a->ram_inw = mem_inw;
- a->ram_in_buffer = mem_in_buffer;
- a->ram_look_ahead = mem_look_ahead;
- a->ram_out = mem_out;
- a->ram_outw = mem_outw;
- a->ram_out_buffer = mem_out_buffer;
- a->ram_inc = mem_inc;
-
- IoAdapter->out = pr_out;
- IoAdapter->dpc = pr_dpc;
- IoAdapter->tst_irq = scom_test_int;
- IoAdapter->clr_irq = scom_clear_int;
- IoAdapter->pcm = (struct pc_maint *)MIPS_MAINT_OFFS;
-
- IoAdapter->load = load_qBri_hardware;
-
- IoAdapter->disIrq = disable_qBri_interrupt;
- IoAdapter->rstFnc = reset_qBri_hardware;
- IoAdapter->stop = stop_qBri_hardware;
- IoAdapter->trapFnc = qBri_cpu_trapped;
-
- IoAdapter->diva_isr_handler = qBri_ISR;
-
- IoAdapter->a.io = (void *)IoAdapter;
-}
-
-static void set_qBri_functions(PISDN_ADAPTER IoAdapter) {
- if (!IoAdapter->tasks) {
- IoAdapter->tasks = MQ_INSTANCE_COUNT;
- }
- IoAdapter->MemorySize = MQ_MEMORY_SIZE;
- set_common_qBri_functions(IoAdapter);
- diva_os_set_qBri_functions(IoAdapter);
-}
-
-static void set_qBri2_functions(PISDN_ADAPTER IoAdapter) {
- if (!IoAdapter->tasks) {
- IoAdapter->tasks = MQ_INSTANCE_COUNT;
- }
- IoAdapter->MemorySize = (IoAdapter->tasks == 1) ? BRI2_MEMORY_SIZE : MQ2_MEMORY_SIZE;
- set_common_qBri_functions(IoAdapter);
- diva_os_set_qBri2_functions(IoAdapter);
-}
-
-/******************************************************************************/
-
-void prepare_qBri_functions(PISDN_ADAPTER IoAdapter) {
-
- set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[0]);
- set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[1]);
- set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[2]);
- set_qBri_functions(IoAdapter->QuadroList->QuadroAdapter[3]);
-
-}
-
-void prepare_qBri2_functions(PISDN_ADAPTER IoAdapter) {
- if (!IoAdapter->tasks) {
- IoAdapter->tasks = MQ_INSTANCE_COUNT;
- }
-
- set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[0]);
- if (IoAdapter->tasks > 1) {
- set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[1]);
- set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[2]);
- set_qBri2_functions(IoAdapter->QuadroList->QuadroAdapter[3]);
- }
-
-}
-
-/* -------------------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/eicon/s_bri.c b/drivers/isdn/hardware/eicon/s_bri.c
deleted file mode 100644
index 6a5bb7462339..000000000000
--- a/drivers/isdn/hardware/eicon/s_bri.c
+++ /dev/null
@@ -1,191 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "divasync.h"
-#include "io.h"
-#include "helpers.h"
-#include "dsrv_bri.h"
-#include "dsp_defs.h"
-/*****************************************************************************/
-#define MAX_XLOG_SIZE (64 * 1024)
-/* --------------------------------------------------------------------------
- Investigate card state, recovery trace buffer
- -------------------------------------------------------------------------- */
-static void bri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
- byte __iomem *addrHi, *addrLo, *ioaddr;
- word *Xlog;
- dword regs[4], i, size;
- Xdesc xlogDesc;
- byte __iomem *Port;
-/*
- * first read pointers and trap frame
- */
- if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE)))
- return;
- Port = DIVA_OS_MEM_ATTACH_PORT(IoAdapter);
- addrHi = Port + ((IoAdapter->Properties.Bus == BUS_PCI) ? M_PCI_ADDRH : ADDRH);
- addrLo = Port + ADDR;
- ioaddr = Port + DATA;
- outpp(addrHi, 0);
- outppw(addrLo, 0);
- for (i = 0; i < 0x100; Xlog[i++] = inppw(ioaddr));
-/*
- * check for trapped MIPS 3xxx CPU, dump only exception frame
- */
- if (GET_DWORD(&Xlog[0x80 / sizeof(Xlog[0])]) == 0x99999999)
- {
- dump_trap_frame(IoAdapter, &((byte *)Xlog)[0x90]);
- IoAdapter->trapped = 1;
- }
- regs[0] = GET_DWORD(&((byte *)Xlog)[0x70]);
- regs[1] = GET_DWORD(&((byte *)Xlog)[0x74]);
- regs[2] = GET_DWORD(&((byte *)Xlog)[0x78]);
- regs[3] = GET_DWORD(&((byte *)Xlog)[0x7c]);
- outpp(addrHi, (regs[1] >> 16) & 0x7F);
- outppw(addrLo, regs[1] & 0xFFFF);
- xlogDesc.cnt = inppw(ioaddr);
- outpp(addrHi, (regs[2] >> 16) & 0x7F);
- outppw(addrLo, regs[2] & 0xFFFF);
- xlogDesc.out = inppw(ioaddr);
- xlogDesc.buf = Xlog;
- regs[0] &= IoAdapter->MemorySize - 1;
- if ((regs[0] < IoAdapter->MemorySize - 1))
- {
- size = IoAdapter->MemorySize - regs[0];
- if (size > MAX_XLOG_SIZE)
- size = MAX_XLOG_SIZE;
- for (i = 0; i < (size / sizeof(*Xlog)); regs[0] += 2)
- {
- outpp(addrHi, (regs[0] >> 16) & 0x7F);
- outppw(addrLo, regs[0] & 0xFFFF);
- Xlog[i++] = inppw(ioaddr);
- }
- dump_xlog_buffer(IoAdapter, &xlogDesc);
- diva_os_free(0, Xlog);
- IoAdapter->trapped = 2;
- }
- outpp(addrHi, (byte)((BRI_UNCACHED_ADDR(IoAdapter->MemoryBase + IoAdapter->MemorySize -
- BRI_SHARED_RAM_SIZE)) >> 16));
- outppw(addrLo, 0x00);
- DIVA_OS_MEM_DETACH_PORT(IoAdapter, Port);
-}
-/* ---------------------------------------------------------------------
- Reset hardware
- --------------------------------------------------------------------- */
-static void reset_bri_hardware(PISDN_ADAPTER IoAdapter) {
- byte __iomem *p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- outpp(p, 0x00);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-/* ---------------------------------------------------------------------
- Halt system
- --------------------------------------------------------------------- */
-static void stop_bri_hardware(PISDN_ADAPTER IoAdapter) {
- byte __iomem *p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- if (p) {
- outpp(p, 0x00); /* disable interrupts ! */
- }
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- outpp(p, 0x00); /* clear int, halt cpu */
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-static int load_bri_hardware(PISDN_ADAPTER IoAdapter) {
- return (0);
-}
-/******************************************************************************/
-static int bri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
- byte __iomem *p;
-
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- if (!(inpp(p) & 0x01)) {
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
- return (0);
- }
- /*
- clear interrupt line
- */
- outpp(p, 0x08);
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
- IoAdapter->IrqCount++;
- if (IoAdapter->Initialized) {
- diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
- }
- return (1);
-}
-/* --------------------------------------------------------------------------
- Disable IRQ in the card hardware
- -------------------------------------------------------------------------- */
-static void disable_bri_interrupt(PISDN_ADAPTER IoAdapter) {
- byte __iomem *p;
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- if (p)
- {
- outpp(p, 0x00); /* disable interrupts ! */
- }
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
- p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
- outpp(p, 0x00); /* clear int, halt cpu */
- DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
-}
-/* -------------------------------------------------------------------------
- Fill card entry points
- ------------------------------------------------------------------------- */
-void prepare_maestra_functions(PISDN_ADAPTER IoAdapter) {
- ADAPTER *a = &IoAdapter->a;
- a->ram_in = io_in;
- a->ram_inw = io_inw;
- a->ram_in_buffer = io_in_buffer;
- a->ram_look_ahead = io_look_ahead;
- a->ram_out = io_out;
- a->ram_outw = io_outw;
- a->ram_out_buffer = io_out_buffer;
- a->ram_inc = io_inc;
- IoAdapter->MemoryBase = BRI_MEMORY_BASE;
- IoAdapter->MemorySize = BRI_MEMORY_SIZE;
- IoAdapter->out = pr_out;
- IoAdapter->dpc = pr_dpc;
- IoAdapter->tst_irq = scom_test_int;
- IoAdapter->clr_irq = scom_clear_int;
- IoAdapter->pcm = (struct pc_maint *)MIPS_MAINT_OFFS;
- IoAdapter->load = load_bri_hardware;
- IoAdapter->disIrq = disable_bri_interrupt;
- IoAdapter->rstFnc = reset_bri_hardware;
- IoAdapter->stop = stop_bri_hardware;
- IoAdapter->trapFnc = bri_cpu_trapped;
- IoAdapter->diva_isr_handler = bri_ISR;
- /*
- Prepare OS dependent functions
- */
- diva_os_prepare_maestra_functions(IoAdapter);
-}
-/* -------------------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/eicon/s_pri.c b/drivers/isdn/hardware/eicon/s_pri.c
deleted file mode 100644
index ddd0e0ef8ed7..000000000000
--- a/drivers/isdn/hardware/eicon/s_pri.c
+++ /dev/null
@@ -1,205 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "pr_pc.h"
-#include "di.h"
-#include "mi_pc.h"
-#include "pc_maint.h"
-#include "divasync.h"
-#include "io.h"
-#include "helpers.h"
-#include "dsrv_pri.h"
-#include "dsp_defs.h"
-/*****************************************************************************/
-#define MAX_XLOG_SIZE (64 * 1024)
-/* -------------------------------------------------------------------------
- Does return offset between ADAPTER->ram and real begin of memory
- ------------------------------------------------------------------------- */
-static dword pri_ram_offset(ADAPTER *a) {
- return ((dword)MP_SHARED_RAM_OFFSET);
-}
-/* -------------------------------------------------------------------------
- Recovery XLOG buffer from the card
- ------------------------------------------------------------------------- */
-static void pri_cpu_trapped(PISDN_ADAPTER IoAdapter) {
- byte __iomem *base;
- word *Xlog;
- dword regs[4], TrapID, size;
- Xdesc xlogDesc;
-/*
- * check for trapped MIPS 46xx CPU, dump exception frame
- */
- base = DIVA_OS_MEM_ATTACH_ADDRESS(IoAdapter);
- TrapID = READ_DWORD(&base[0x80]);
- if ((TrapID == 0x99999999) || (TrapID == 0x99999901))
- {
- dump_trap_frame(IoAdapter, &base[0x90]);
- IoAdapter->trapped = 1;
- }
- regs[0] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x70]);
- regs[1] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x74]);
- regs[2] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x78]);
- regs[3] = READ_DWORD(&base[MP_PROTOCOL_OFFSET + 0x7c]);
- regs[0] &= IoAdapter->MemorySize - 1;
- if ((regs[0] < IoAdapter->MemorySize - 1))
- {
- if (!(Xlog = (word *)diva_os_malloc(0, MAX_XLOG_SIZE))) {
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, base);
- return;
- }
- size = IoAdapter->MemorySize - regs[0];
- if (size > MAX_XLOG_SIZE)
- size = MAX_XLOG_SIZE;
- memcpy_fromio(Xlog, &base[regs[0]], size);
- xlogDesc.buf = Xlog;
- xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]);
- xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]);
- dump_xlog_buffer(IoAdapter, &xlogDesc);
- diva_os_free(0, Xlog);
- IoAdapter->trapped = 2;
- }
- DIVA_OS_MEM_DETACH_ADDRESS(IoAdapter, base);
-}
-/* -------------------------------------------------------------------------
- Hardware reset of PRI card
- ------------------------------------------------------------------------- */
-static void reset_pri_hardware(PISDN_ADAPTER IoAdapter) {
- byte __iomem *p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2);
- diva_os_wait(50);
- WRITE_BYTE(p, 0x00);
- diva_os_wait(50);
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-}
-/* -------------------------------------------------------------------------
- Stop Card Hardware
- ------------------------------------------------------------------------- */
-static void stop_pri_hardware(PISDN_ADAPTER IoAdapter) {
- dword i;
- byte __iomem *p;
- dword volatile __iomem *cfgReg = (void __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
- WRITE_DWORD(&cfgReg[3], 0);
- WRITE_DWORD(&cfgReg[1], 0);
- DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg);
- IoAdapter->a.ram_out(&IoAdapter->a, &RAM->SWReg, SWREG_HALT_CPU);
- i = 0;
- while ((i < 100) && (IoAdapter->a.ram_in(&IoAdapter->a, &RAM->SWReg) != 0))
- {
- diva_os_wait(1);
- i++;
- }
- DBG_TRC(("%s: PRI stopped (%d)", IoAdapter->Name, i))
- cfgReg = (void __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
- WRITE_DWORD(&cfgReg[0], ((dword)(~0x03E00000)));
- DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg);
- diva_os_wait(1);
- p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
- WRITE_BYTE(p, _MP_RISC_RESET | _MP_LED1 | _MP_LED2);
- DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
-}
-static int load_pri_hardware(PISDN_ADAPTER IoAdapter) {
- return (0);
-}
-/* --------------------------------------------------------------------------
- PRI Adapter interrupt Service Routine
- -------------------------------------------------------------------------- */
-static int pri_ISR(struct _ISDN_ADAPTER *IoAdapter) {
- byte __iomem *cfg = DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
- if (!(READ_DWORD(cfg) & 0x80000000)) {
- DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfg);
- return (0);
- }
- /*
- clear interrupt line
- */
- WRITE_DWORD(cfg, (dword)~0x03E00000);
- DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfg);
- IoAdapter->IrqCount++;
- if (IoAdapter->Initialized)
- {
- diva_os_schedule_soft_isr(&IoAdapter->isr_soft_isr);
- }
- return (1);
-}
-/* -------------------------------------------------------------------------
- Disable interrupt in the card hardware
- ------------------------------------------------------------------------- */
-static void disable_pri_interrupt(PISDN_ADAPTER IoAdapter) {
- dword volatile __iomem *cfgReg = (dword volatile __iomem *)DIVA_OS_MEM_ATTACH_CFG(IoAdapter);
- WRITE_DWORD(&cfgReg[3], 0);
- WRITE_DWORD(&cfgReg[1], 0);
- WRITE_DWORD(&cfgReg[0], (dword)(~0x03E00000));
- DIVA_OS_MEM_DETACH_CFG(IoAdapter, cfgReg);
-}
-/* -------------------------------------------------------------------------
- Install entry points for PRI Adapter
- ------------------------------------------------------------------------- */
-static void prepare_common_pri_functions(PISDN_ADAPTER IoAdapter) {
- ADAPTER *a = &IoAdapter->a;
- a->ram_in = mem_in;
- a->ram_inw = mem_inw;
- a->ram_in_buffer = mem_in_buffer;
- a->ram_look_ahead = mem_look_ahead;
- a->ram_out = mem_out;
- a->ram_outw = mem_outw;
- a->ram_out_buffer = mem_out_buffer;
- a->ram_inc = mem_inc;
- a->ram_offset = pri_ram_offset;
- a->ram_out_dw = mem_out_dw;
- a->ram_in_dw = mem_in_dw;
- a->istream_wakeup = pr_stream;
- IoAdapter->out = pr_out;
- IoAdapter->dpc = pr_dpc;
- IoAdapter->tst_irq = scom_test_int;
- IoAdapter->clr_irq = scom_clear_int;
- IoAdapter->pcm = (struct pc_maint *)(MIPS_MAINT_OFFS
- - MP_SHARED_RAM_OFFSET);
- IoAdapter->load = load_pri_hardware;
- IoAdapter->disIrq = disable_pri_interrupt;
- IoAdapter->rstFnc = reset_pri_hardware;
- IoAdapter->stop = stop_pri_hardware;
- IoAdapter->trapFnc = pri_cpu_trapped;
- IoAdapter->diva_isr_handler = pri_ISR;
-}
-/* -------------------------------------------------------------------------
- Install entry points for PRI Adapter
- ------------------------------------------------------------------------- */
-void prepare_pri_functions(PISDN_ADAPTER IoAdapter) {
- IoAdapter->MemorySize = MP_MEMORY_SIZE;
- prepare_common_pri_functions(IoAdapter);
- diva_os_prepare_pri_functions(IoAdapter);
-}
-/* -------------------------------------------------------------------------
- Install entry points for PRI Rev.2 Adapter
- ------------------------------------------------------------------------- */
-void prepare_pri2_functions(PISDN_ADAPTER IoAdapter) {
- IoAdapter->MemorySize = MP2_MEMORY_SIZE;
- prepare_common_pri_functions(IoAdapter);
- diva_os_prepare_pri2_functions(IoAdapter);
-}
-/* ------------------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/eicon/sdp_hdr.h b/drivers/isdn/hardware/eicon/sdp_hdr.h
deleted file mode 100644
index 5e20f8d68673..000000000000
--- a/drivers/isdn/hardware/eicon/sdp_hdr.h
+++ /dev/null
@@ -1,117 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-#ifndef __DIVA_SOFT_DSP_TASK_ENTRY_H__
-#define __DIVA_SOFT_DSP_TASK_ENTRY_H__
-/*
- The soft DSP image is described by binary header contained on begin of this
- image:
- OFFSET FROM IMAGE START | VARIABLE
- ------------------------------------------------------------------------
- DIVA_MIPS_TASK_IMAGE_LINK_OFFS | link to the next image
- ----------------------------------------------------------------------
- DIVA_MIPS_TASK_IMAGE_GP_OFFS | image gp register value, void*
- ----------------------------------------------------------------------
- DIVA_MIPS_TASK_IMAGE_ENTRY_OFFS | diva_mips_sdp_task_entry_t*
- ----------------------------------------------------------------------
- DIVA_MIPS_TASK_IMAGE_LOAD_ADDR_OFFS | image image start address (void*)
- ----------------------------------------------------------------------
- DIVA_MIPS_TASK_IMAGE_END_ADDR_OFFS | image image end address (void*)
- ----------------------------------------------------------------------
- DIVA_MIPS_TASK_IMAGE_ID_STRING_OFFS | image id string char[...];
- ----------------------------------------------------------------------
-*/
-#define DIVA_MIPS_TASK_IMAGE_LINK_OFFS 0x6C
-#define DIVA_MIPS_TASK_IMAGE_GP_OFFS 0x70
-#define DIVA_MIPS_TASK_IMAGE_ENTRY_OFFS 0x74
-#define DIVA_MIPS_TASK_IMAGE_LOAD_ADDR_OFFS 0x78
-#define DIVA_MIPS_TASK_IMAGE_END_ADDR_OFFS 0x7c
-#define DIVA_MIPS_TASK_IMAGE_ID_STRING_OFFS 0x80
-/*
- This function is called in order to set GP register of this task
- This function should be always called before any function of the
- task is called
-*/
-typedef void (*diva_task_set_prog_gp_proc_t)(void *new_gp);
-/*
- This function is called to clear .bss at task initialization step
-*/
-typedef void (*diva_task_sys_reset_proc_t)(void);
-/*
- This function is called in order to provide GP of master call to
- task, that will be used by calls from the task to the master
-*/
-typedef void (*diva_task_set_main_gp_proc_t)(void *main_gp);
-/*
- This function is called to provide address of 'dprintf' function
- to the task
-*/
-typedef word (*diva_prt_proc_t)(char *, ...);
-typedef void (*diva_task_set_prt_proc_t)(diva_prt_proc_t fn);
-/*
- This function is called to set task PID
-*/
-typedef void (*diva_task_set_pid_proc_t)(dword id);
-/*
- This function is called for run-time task init
-*/
-typedef int (*diva_task_run_time_init_proc_t)(void*, dword);
-/*
- This function is called from system scheduler or from timer
-*/
-typedef void (*diva_task_callback_proc_t)(void);
-/*
- This callback is used by task to get current time im mS
-*/
-typedef dword (*diva_task_get_tick_count_proc_t)(void);
-typedef void (*diva_task_set_get_time_proc_t)(\
- diva_task_get_tick_count_proc_t fn);
-typedef struct _diva_mips_sdp_task_entry {
- diva_task_set_prog_gp_proc_t set_gp_proc;
- diva_task_sys_reset_proc_t sys_reset_proc;
- diva_task_set_main_gp_proc_t set_main_gp_proc;
- diva_task_set_prt_proc_t set_dprintf_proc;
- diva_task_set_pid_proc_t set_pid_proc;
- diva_task_run_time_init_proc_t run_time_init_proc;
- diva_task_callback_proc_t task_callback_proc;
- diva_task_callback_proc_t timer_callback_proc;
- diva_task_set_get_time_proc_t set_get_time_proc;
- void *last_entry_proc;
-} diva_mips_sdp_task_entry_t;
-/*
- 'last_entry_proc' should be set to zero and is used for future extensuios
-*/
-typedef struct _diva_mips_sw_task {
- diva_mips_sdp_task_entry_t sdp_entry;
- void *sdp_gp_reg;
- void *own_gp_reg;
-} diva_mips_sw_task_t;
-#if !defined(DIVA_BRI2F_SDP_1_NAME)
-#define DIVA_BRI2F_SDP_1_NAME "sdp0.2q0"
-#endif
-#if !defined(DIVA_BRI2F_SDP_2_NAME)
-#define DIVA_BRI2F_SDP_2_NAME "sdp1.2q0"
-#endif
-#endif
diff --git a/drivers/isdn/hardware/eicon/um_idi.c b/drivers/isdn/hardware/eicon/um_idi.c
deleted file mode 100644
index db4dd4ff3642..000000000000
--- a/drivers/isdn/hardware/eicon/um_idi.c
+++ /dev/null
@@ -1,886 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* $Id: um_idi.c,v 1.14 2004/03/21 17:54:37 armin Exp $ */
-
-#include "platform.h"
-#include "di_defs.h"
-#include "pc.h"
-#include "dqueue.h"
-#include "adapter.h"
-#include "entity.h"
-#include "um_xdi.h"
-#include "um_idi.h"
-#include "debuglib.h"
-#include "divasync.h"
-
-#define DIVAS_MAX_XDI_ADAPTERS 64
-
-/* --------------------------------------------------------------------------
- IMPORTS
- -------------------------------------------------------------------------- */
-extern void diva_os_wakeup_read(void *os_context);
-extern void diva_os_wakeup_close(void *os_context);
-/* --------------------------------------------------------------------------
- LOCALS
- -------------------------------------------------------------------------- */
-static LIST_HEAD(adapter_q);
-static diva_os_spin_lock_t adapter_lock;
-
-static diva_um_idi_adapter_t *diva_um_idi_find_adapter(dword nr);
-static void cleanup_adapter(diva_um_idi_adapter_t *a);
-static void cleanup_entity(divas_um_idi_entity_t *e);
-static int diva_user_mode_idi_adapter_features(diva_um_idi_adapter_t *a,
- diva_um_idi_adapter_features_t
- *features);
-static int process_idi_request(divas_um_idi_entity_t *e,
- const diva_um_idi_req_hdr_t *req);
-static int process_idi_rc(divas_um_idi_entity_t *e, byte rc);
-static int process_idi_ind(divas_um_idi_entity_t *e, byte ind);
-static int write_return_code(divas_um_idi_entity_t *e, byte rc);
-
-/* --------------------------------------------------------------------------
- MAIN
- -------------------------------------------------------------------------- */
-int diva_user_mode_idi_init(void)
-{
- diva_os_initialize_spin_lock(&adapter_lock, "adapter");
- return (0);
-}
-
-/* --------------------------------------------------------------------------
- Copy adapter features to user supplied buffer
- -------------------------------------------------------------------------- */
-static int
-diva_user_mode_idi_adapter_features(diva_um_idi_adapter_t *a,
- diva_um_idi_adapter_features_t *
- features)
-{
- IDI_SYNC_REQ sync_req;
-
- if ((a) && (a->d.request)) {
- features->type = a->d.type;
- features->features = a->d.features;
- features->channels = a->d.channels;
- memset(features->name, 0, sizeof(features->name));
-
- sync_req.GetName.Req = 0;
- sync_req.GetName.Rc = IDI_SYNC_REQ_GET_NAME;
- (*(a->d.request)) ((ENTITY *)&sync_req);
- strlcpy(features->name, sync_req.GetName.name,
- sizeof(features->name));
-
- sync_req.GetSerial.Req = 0;
- sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL;
- sync_req.GetSerial.serial = 0;
- (*(a->d.request))((ENTITY *)&sync_req);
- features->serial_number = sync_req.GetSerial.serial;
- }
-
- return ((a) ? 0 : -1);
-}
-
-/* --------------------------------------------------------------------------
- REMOVE ADAPTER
- -------------------------------------------------------------------------- */
-void diva_user_mode_idi_remove_adapter(int adapter_nr)
-{
- struct list_head *tmp;
- diva_um_idi_adapter_t *a;
-
- list_for_each(tmp, &adapter_q) {
- a = list_entry(tmp, diva_um_idi_adapter_t, link);
- if (a->adapter_nr == adapter_nr) {
- list_del(tmp);
- cleanup_adapter(a);
- DBG_LOG(("DIDD: del adapter(%d)", a->adapter_nr));
- diva_os_free(0, a);
- break;
- }
- }
-}
-
-/* --------------------------------------------------------------------------
- CALLED ON DRIVER EXIT (UNLOAD)
- -------------------------------------------------------------------------- */
-void diva_user_mode_idi_finit(void)
-{
- struct list_head *tmp, *safe;
- diva_um_idi_adapter_t *a;
-
- list_for_each_safe(tmp, safe, &adapter_q) {
- a = list_entry(tmp, diva_um_idi_adapter_t, link);
- list_del(tmp);
- cleanup_adapter(a);
- DBG_LOG(("DIDD: del adapter(%d)", a->adapter_nr));
- diva_os_free(0, a);
- }
- diva_os_destroy_spin_lock(&adapter_lock, "adapter");
-}
-
-/* -------------------------------------------------------------------------
- CREATE AND INIT IDI ADAPTER
- ------------------------------------------------------------------------- */
-int diva_user_mode_idi_create_adapter(const DESCRIPTOR *d, int adapter_nr)
-{
- diva_os_spin_lock_magic_t old_irql;
- diva_um_idi_adapter_t *a =
- (diva_um_idi_adapter_t *) diva_os_malloc(0,
- sizeof
- (diva_um_idi_adapter_t));
-
- if (!a) {
- return (-1);
- }
- memset(a, 0x00, sizeof(*a));
- INIT_LIST_HEAD(&a->entity_q);
-
- a->d = *d;
- a->adapter_nr = adapter_nr;
-
- DBG_LOG(("DIDD_ADD A(%d), type:%02x, features:%04x, channels:%d",
- adapter_nr, a->d.type, a->d.features, a->d.channels));
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "create_adapter");
- list_add_tail(&a->link, &adapter_q);
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "create_adapter");
- return (0);
-}
-
-/* ------------------------------------------------------------------------
- Find adapter by Adapter number
- ------------------------------------------------------------------------ */
-static diva_um_idi_adapter_t *diva_um_idi_find_adapter(dword nr)
-{
- diva_um_idi_adapter_t *a = NULL;
- struct list_head *tmp;
-
- list_for_each(tmp, &adapter_q) {
- a = list_entry(tmp, diva_um_idi_adapter_t, link);
- DBG_TRC(("find_adapter: (%d)-(%d)", nr, a->adapter_nr));
- if (a->adapter_nr == (int)nr)
- break;
- a = NULL;
- }
- return (a);
-}
-
-/* ------------------------------------------------------------------------
- Cleanup this adapter and cleanup/delete all entities assigned
- to this adapter
- ------------------------------------------------------------------------ */
-static void cleanup_adapter(diva_um_idi_adapter_t *a)
-{
- struct list_head *tmp, *safe;
- divas_um_idi_entity_t *e;
-
- list_for_each_safe(tmp, safe, &a->entity_q) {
- e = list_entry(tmp, divas_um_idi_entity_t, link);
- list_del(tmp);
- cleanup_entity(e);
- if (e->os_context) {
- diva_os_wakeup_read(e->os_context);
- diva_os_wakeup_close(e->os_context);
- }
- }
- memset(&a->d, 0x00, sizeof(DESCRIPTOR));
-}
-
-/* ------------------------------------------------------------------------
- Cleanup, but NOT delete this entity
- ------------------------------------------------------------------------ */
-static void cleanup_entity(divas_um_idi_entity_t *e)
-{
- e->os_ref = NULL;
- e->status = 0;
- e->adapter = NULL;
- e->e.Id = 0;
- e->rc_count = 0;
-
- e->status |= DIVA_UM_IDI_REMOVED;
- e->status |= DIVA_UM_IDI_REMOVE_PENDING;
-
- diva_data_q_finit(&e->data);
- diva_data_q_finit(&e->rc);
-}
-
-
-/* ------------------------------------------------------------------------
- Create ENTITY, link it to the adapter and remove pointer to entity
- ------------------------------------------------------------------------ */
-void *divas_um_idi_create_entity(dword adapter_nr, void *file)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- diva_os_spin_lock_magic_t old_irql;
-
- if ((e = (divas_um_idi_entity_t *) diva_os_malloc(0, sizeof(*e)))) {
- memset(e, 0x00, sizeof(*e));
- if (!
- (e->os_context =
- diva_os_malloc(0, diva_os_get_context_size()))) {
- DBG_LOG(("E(%08x) no memory for os context", e));
- diva_os_free(0, e);
- return NULL;
- }
- memset(e->os_context, 0x00, diva_os_get_context_size());
-
- if ((diva_data_q_init(&e->data, 2048 + 512, 16))) {
- diva_os_free(0, e->os_context);
- diva_os_free(0, e);
- return NULL;
- }
- if ((diva_data_q_init(&e->rc, sizeof(diva_um_idi_ind_hdr_t), 2))) {
- diva_data_q_finit(&e->data);
- diva_os_free(0, e->os_context);
- diva_os_free(0, e);
- return NULL;
- }
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "create_entity");
- /*
- Look for Adapter requested
- */
- if (!(a = diva_um_idi_find_adapter(adapter_nr))) {
- /*
- No adapter was found, or this adapter was removed
- */
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "create_entity");
-
- DBG_LOG(("A: no adapter(%ld)", adapter_nr));
-
- cleanup_entity(e);
- diva_os_free(0, e->os_context);
- diva_os_free(0, e);
-
- return NULL;
- }
-
- e->os_ref = file; /* link to os handle */
- e->adapter = a; /* link to adapter */
-
- list_add_tail(&e->link, &a->entity_q); /* link from adapter */
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "create_entity");
-
- DBG_LOG(("A(%ld), create E(%08x)", adapter_nr, e));
- }
-
- return (e);
-}
-
-/* ------------------------------------------------------------------------
- Unlink entity and free memory
- ------------------------------------------------------------------------ */
-int divas_um_idi_delete_entity(int adapter_nr, void *entity)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- diva_os_spin_lock_magic_t old_irql;
-
- if (!(e = (divas_um_idi_entity_t *) entity))
- return (-1);
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "delete_entity");
- if ((a = e->adapter)) {
- list_del(&e->link);
- }
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "delete_entity");
-
- diva_um_idi_stop_wdog(entity);
- cleanup_entity(e);
- diva_os_free(0, e->os_context);
- memset(e, 0x00, sizeof(*e));
-
- DBG_LOG(("A(%d) remove E:%08x", adapter_nr, e));
- diva_os_free(0, e);
-
- return (0);
-}
-
-/* --------------------------------------------------------------------------
- Called by application to read data from IDI
- -------------------------------------------------------------------------- */
-int diva_um_idi_read(void *entity,
- void *os_handle,
- void *dst,
- int max_length, divas_um_idi_copy_to_user_fn_t cp_fn)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- const void *data;
- int length, ret = 0;
- diva_um_idi_data_queue_t *q;
- diva_os_spin_lock_magic_t old_irql;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "read");
-
- e = (divas_um_idi_entity_t *) entity;
- if (!e || (!(a = e->adapter)) ||
- (e->status & DIVA_UM_IDI_REMOVE_PENDING) ||
- (e->status & DIVA_UM_IDI_REMOVED) ||
- (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "read");
- DBG_ERR(("E(%08x) read failed - adapter removed", e))
- return (-1);
- }
-
- DBG_TRC(("A(%d) E(%08x) read(%d)", a->adapter_nr, e, max_length));
-
- /*
- Try to read return code first
- */
- data = diva_data_q_get_segment4read(&e->rc);
- q = &e->rc;
-
- /*
- No return codes available, read indications now
- */
- if (!data) {
- if (!(e->status & DIVA_UM_IDI_RC_PENDING)) {
- DBG_TRC(("A(%d) E(%08x) read data", a->adapter_nr, e));
- data = diva_data_q_get_segment4read(&e->data);
- q = &e->data;
- }
- } else {
- e->status &= ~DIVA_UM_IDI_RC_PENDING;
- DBG_TRC(("A(%d) E(%08x) read rc", a->adapter_nr, e));
- }
-
- if (data) {
- if ((length = diva_data_q_get_segment_length(q)) >
- max_length) {
- /*
- Not enough space to read message
- */
- DBG_ERR(("A: A(%d) E(%08x) read small buffer",
- a->adapter_nr, e, ret));
- diva_os_leave_spin_lock(&adapter_lock, &old_irql,
- "read");
- return (-2);
- }
- /*
- Copy it to user, this function does access ONLY locked an verified
- memory, also we can access it witch spin lock held
- */
-
- if ((ret = (*cp_fn) (os_handle, dst, data, length)) >= 0) {
- /*
- Acknowledge only if read was successful
- */
- diva_data_q_ack_segment4read(q);
- }
- }
-
-
- DBG_TRC(("A(%d) E(%08x) read=%d", a->adapter_nr, e, ret));
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "read");
-
- return (ret);
-}
-
-
-int diva_um_idi_write(void *entity,
- void *os_handle,
- const void *src,
- int length, divas_um_idi_copy_from_user_fn_t cp_fn)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- diva_um_idi_req_hdr_t *req;
- void *data;
- int ret = 0;
- diva_os_spin_lock_magic_t old_irql;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "write");
-
- e = (divas_um_idi_entity_t *) entity;
- if (!e || (!(a = e->adapter)) ||
- (e->status & DIVA_UM_IDI_REMOVE_PENDING) ||
- (e->status & DIVA_UM_IDI_REMOVED) ||
- (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- DBG_ERR(("E(%08x) write failed - adapter removed", e))
- return (-1);
- }
-
- DBG_TRC(("A(%d) E(%08x) write(%d)", a->adapter_nr, e, length));
-
- if ((length < sizeof(*req)) || (length > sizeof(e->buffer))) {
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- return (-2);
- }
-
- if (e->status & DIVA_UM_IDI_RC_PENDING) {
- DBG_ERR(("A: A(%d) E(%08x) rc pending", a->adapter_nr, e));
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- return (-1); /* should wait for RC code first */
- }
-
- /*
- Copy function does access only locked verified memory,
- also it can be called with spin lock held
- */
- if ((ret = (*cp_fn) (os_handle, e->buffer, src, length)) < 0) {
- DBG_TRC(("A: A(%d) E(%08x) write error=%d", a->adapter_nr,
- e, ret));
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- return (ret);
- }
-
- req = (diva_um_idi_req_hdr_t *)&e->buffer[0];
-
- switch (req->type) {
- case DIVA_UM_IDI_GET_FEATURES:{
- DBG_LOG(("A(%d) get_features", a->adapter_nr));
- if (!(data =
- diva_data_q_get_segment4write(&e->data))) {
- DBG_ERR(("A(%d) get_features, no free buffer",
- a->adapter_nr));
- diva_os_leave_spin_lock(&adapter_lock,
- &old_irql,
- "write");
- return (0);
- }
- diva_user_mode_idi_adapter_features(a, &(((diva_um_idi_ind_hdr_t
- *) data)->hdr.features));
- ((diva_um_idi_ind_hdr_t *) data)->type =
- DIVA_UM_IDI_IND_FEATURES;
- ((diva_um_idi_ind_hdr_t *) data)->data_length = 0;
- diva_data_q_ack_segment4write(&e->data,
- sizeof(diva_um_idi_ind_hdr_t));
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
-
- diva_os_wakeup_read(e->os_context);
- }
- break;
-
- case DIVA_UM_IDI_REQ:
- case DIVA_UM_IDI_REQ_MAN:
- case DIVA_UM_IDI_REQ_SIG:
- case DIVA_UM_IDI_REQ_NET:
- DBG_TRC(("A(%d) REQ(%02d)-(%02d)-(%08x)", a->adapter_nr,
- req->Req, req->ReqCh,
- req->type & DIVA_UM_IDI_REQ_TYPE_MASK));
- switch (process_idi_request(e, req)) {
- case -1:
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- return (-1);
- case -2:
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- diva_os_wakeup_read(e->os_context);
- break;
- default:
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- break;
- }
- break;
-
- default:
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "write");
- return (-1);
- }
-
- DBG_TRC(("A(%d) E(%08x) write=%d", a->adapter_nr, e, ret));
-
- return (ret);
-}
-
-/* --------------------------------------------------------------------------
- CALLBACK FROM XDI
- -------------------------------------------------------------------------- */
-static void diva_um_idi_xdi_callback(ENTITY *entity)
-{
- divas_um_idi_entity_t *e = DIVAS_CONTAINING_RECORD(entity,
- divas_um_idi_entity_t,
- e);
- diva_os_spin_lock_magic_t old_irql;
- int call_wakeup = 0;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "xdi_callback");
-
- if (e->e.complete == 255) {
- if (!(e->status & DIVA_UM_IDI_REMOVE_PENDING)) {
- diva_um_idi_stop_wdog(e);
- }
- if ((call_wakeup = process_idi_rc(e, e->e.Rc))) {
- if (e->rc_count) {
- e->rc_count--;
- }
- }
- e->e.Rc = 0;
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "xdi_callback");
-
- if (call_wakeup) {
- diva_os_wakeup_read(e->os_context);
- diva_os_wakeup_close(e->os_context);
- }
- } else {
- if (e->status & DIVA_UM_IDI_REMOVE_PENDING) {
- e->e.RNum = 0;
- e->e.RNR = 2;
- } else {
- call_wakeup = process_idi_ind(e, e->e.Ind);
- }
- e->e.Ind = 0;
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "xdi_callback");
- if (call_wakeup) {
- diva_os_wakeup_read(e->os_context);
- }
- }
-}
-
-static int process_idi_request(divas_um_idi_entity_t *e,
- const diva_um_idi_req_hdr_t *req)
-{
- int assign = 0;
- byte Req = (byte) req->Req;
- dword type = req->type & DIVA_UM_IDI_REQ_TYPE_MASK;
-
- if (!e->e.Id || !e->e.callback) { /* not assigned */
- if (Req != ASSIGN) {
- DBG_ERR(("A: A(%d) E(%08x) not assigned",
- e->adapter->adapter_nr, e));
- return (-1); /* NOT ASSIGNED */
- } else {
- switch (type) {
- case DIVA_UM_IDI_REQ_TYPE_MAN:
- e->e.Id = MAN_ID;
- DBG_TRC(("A(%d) E(%08x) assign MAN",
- e->adapter->adapter_nr, e));
- break;
-
- case DIVA_UM_IDI_REQ_TYPE_SIG:
- e->e.Id = DSIG_ID;
- DBG_TRC(("A(%d) E(%08x) assign SIG",
- e->adapter->adapter_nr, e));
- break;
-
- case DIVA_UM_IDI_REQ_TYPE_NET:
- e->e.Id = NL_ID;
- DBG_TRC(("A(%d) E(%08x) assign NET",
- e->adapter->adapter_nr, e));
- break;
-
- default:
- DBG_ERR(("A: A(%d) E(%08x) unknown type=%08x",
- e->adapter->adapter_nr, e,
- type));
- return (-1);
- }
- }
- e->e.XNum = 1;
- e->e.RNum = 1;
- e->e.callback = diva_um_idi_xdi_callback;
- e->e.X = &e->XData;
- e->e.R = &e->RData;
- assign = 1;
- }
- e->status |= DIVA_UM_IDI_RC_PENDING;
- e->e.Req = Req;
- e->e.ReqCh = (byte) req->ReqCh;
- e->e.X->PLength = (word) req->data_length;
- e->e.X->P = (byte *)&req[1]; /* Our buffer is safe */
-
- DBG_TRC(("A(%d) E(%08x) request(%02x-%02x-%02x (%d))",
- e->adapter->adapter_nr, e, e->e.Id, e->e.Req,
- e->e.ReqCh, e->e.X->PLength));
-
- e->rc_count++;
-
- if (e->adapter && e->adapter->d.request) {
- diva_um_idi_start_wdog(e);
- (*(e->adapter->d.request)) (&e->e);
- }
-
- if (assign) {
- if (e->e.Rc == OUT_OF_RESOURCES) {
- /*
- XDI has no entities more, call was not forwarded to the card,
- no callback will be scheduled
- */
- DBG_ERR(("A: A(%d) E(%08x) XDI out of entities",
- e->adapter->adapter_nr, e));
-
- e->e.Id = 0;
- e->e.ReqCh = 0;
- e->e.RcCh = 0;
- e->e.Ind = 0;
- e->e.IndCh = 0;
- e->e.XNum = 0;
- e->e.RNum = 0;
- e->e.callback = NULL;
- e->e.X = NULL;
- e->e.R = NULL;
- write_return_code(e, ASSIGN_RC | OUT_OF_RESOURCES);
- return (-2);
- } else {
- e->status |= DIVA_UM_IDI_ASSIGN_PENDING;
- }
- }
-
- return (0);
-}
-
-static int process_idi_rc(divas_um_idi_entity_t *e, byte rc)
-{
- DBG_TRC(("A(%d) E(%08x) rc(%02x-%02x-%02x)",
- e->adapter->adapter_nr, e, e->e.Id, rc, e->e.RcCh));
-
- if (e->status & DIVA_UM_IDI_ASSIGN_PENDING) {
- e->status &= ~DIVA_UM_IDI_ASSIGN_PENDING;
- if (rc != ASSIGN_OK) {
- DBG_ERR(("A: A(%d) E(%08x) ASSIGN failed",
- e->adapter->adapter_nr, e));
- e->e.callback = NULL;
- e->e.Id = 0;
- e->e.Req = 0;
- e->e.ReqCh = 0;
- e->e.Rc = 0;
- e->e.RcCh = 0;
- e->e.Ind = 0;
- e->e.IndCh = 0;
- e->e.X = NULL;
- e->e.R = NULL;
- e->e.XNum = 0;
- e->e.RNum = 0;
- }
- }
- if ((e->e.Req == REMOVE) && e->e.Id && (rc == 0xff)) {
- DBG_ERR(("A: A(%d) E(%08x) discard OK in REMOVE",
- e->adapter->adapter_nr, e));
- return (0); /* let us do it in the driver */
- }
- if ((e->e.Req == REMOVE) && (!e->e.Id)) { /* REMOVE COMPLETE */
- e->e.callback = NULL;
- e->e.Id = 0;
- e->e.Req = 0;
- e->e.ReqCh = 0;
- e->e.Rc = 0;
- e->e.RcCh = 0;
- e->e.Ind = 0;
- e->e.IndCh = 0;
- e->e.X = NULL;
- e->e.R = NULL;
- e->e.XNum = 0;
- e->e.RNum = 0;
- e->rc_count = 0;
- }
- if ((e->e.Req == REMOVE) && (rc != 0xff)) { /* REMOVE FAILED */
- DBG_ERR(("A: A(%d) E(%08x) REMOVE FAILED",
- e->adapter->adapter_nr, e));
- }
- write_return_code(e, rc);
-
- return (1);
-}
-
-static int process_idi_ind(divas_um_idi_entity_t *e, byte ind)
-{
- int do_wakeup = 0;
-
- if (e->e.complete != 0x02) {
- diva_um_idi_ind_hdr_t *pind =
- (diva_um_idi_ind_hdr_t *)
- diva_data_q_get_segment4write(&e->data);
- if (pind) {
- e->e.RNum = 1;
- e->e.R->P = (byte *)&pind[1];
- e->e.R->PLength =
- (word) (diva_data_q_get_max_length(&e->data) -
- sizeof(*pind));
- DBG_TRC(("A(%d) E(%08x) ind_1(%02x-%02x-%02x)-[%d-%d]",
- e->adapter->adapter_nr, e, e->e.Id, ind,
- e->e.IndCh, e->e.RLength,
- e->e.R->PLength));
-
- } else {
- DBG_TRC(("A(%d) E(%08x) ind(%02x-%02x-%02x)-RNR",
- e->adapter->adapter_nr, e, e->e.Id, ind,
- e->e.IndCh));
- e->e.RNum = 0;
- e->e.RNR = 1;
- do_wakeup = 1;
- }
- } else {
- diva_um_idi_ind_hdr_t *pind =
- (diva_um_idi_ind_hdr_t *) (e->e.R->P);
-
- DBG_TRC(("A(%d) E(%08x) ind(%02x-%02x-%02x)-[%d]",
- e->adapter->adapter_nr, e, e->e.Id, ind,
- e->e.IndCh, e->e.R->PLength));
-
- pind--;
- pind->type = DIVA_UM_IDI_IND;
- pind->hdr.ind.Ind = ind;
- pind->hdr.ind.IndCh = e->e.IndCh;
- pind->data_length = e->e.R->PLength;
- diva_data_q_ack_segment4write(&e->data,
- (int) (sizeof(*pind) +
- e->e.R->PLength));
- do_wakeup = 1;
- }
-
- if ((e->status & DIVA_UM_IDI_RC_PENDING) && !e->rc.count) {
- do_wakeup = 0;
- }
-
- return (do_wakeup);
-}
-
-/* --------------------------------------------------------------------------
- Write return code to the return code queue of entity
- -------------------------------------------------------------------------- */
-static int write_return_code(divas_um_idi_entity_t *e, byte rc)
-{
- diva_um_idi_ind_hdr_t *prc;
-
- if (!(prc =
- (diva_um_idi_ind_hdr_t *) diva_data_q_get_segment4write(&e->rc)))
- {
- DBG_ERR(("A: A(%d) E(%08x) rc(%02x) lost",
- e->adapter->adapter_nr, e, rc));
- e->status &= ~DIVA_UM_IDI_RC_PENDING;
- return (-1);
- }
-
- prc->type = DIVA_UM_IDI_IND_RC;
- prc->hdr.rc.Rc = rc;
- prc->hdr.rc.RcCh = e->e.RcCh;
- prc->data_length = 0;
- diva_data_q_ack_segment4write(&e->rc, sizeof(*prc));
-
- return (0);
-}
-
-/* --------------------------------------------------------------------------
- Return amount of entries that can be bead from this entity or
- -1 if adapter was removed
- -------------------------------------------------------------------------- */
-int diva_user_mode_idi_ind_ready(void *entity, void *os_handle)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- diva_os_spin_lock_magic_t old_irql;
- int ret;
-
- if (!entity)
- return (-1);
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "ind_ready");
- e = (divas_um_idi_entity_t *) entity;
- a = e->adapter;
-
- if ((!a) || (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
- /*
- Adapter was unloaded
- */
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "ind_ready");
- return (-1); /* adapter was removed */
- }
- if (e->status & DIVA_UM_IDI_REMOVED) {
- /*
- entity was removed as result of adapter removal
- user should assign this entity again
- */
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "ind_ready");
- return (-1);
- }
-
- ret = e->rc.count + e->data.count;
-
- if ((e->status & DIVA_UM_IDI_RC_PENDING) && !e->rc.count) {
- ret = 0;
- }
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "ind_ready");
-
- return (ret);
-}
-
-void *diva_um_id_get_os_context(void *entity)
-{
- return (((divas_um_idi_entity_t *) entity)->os_context);
-}
-
-int divas_um_idi_entity_assigned(void *entity)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- int ret;
- diva_os_spin_lock_magic_t old_irql;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "assigned?");
-
-
- e = (divas_um_idi_entity_t *) entity;
- if (!e || (!(a = e->adapter)) ||
- (e->status & DIVA_UM_IDI_REMOVED) ||
- (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "assigned?");
- return (0);
- }
-
- e->status |= DIVA_UM_IDI_REMOVE_PENDING;
-
- ret = (e->e.Id || e->rc_count
- || (e->status & DIVA_UM_IDI_ASSIGN_PENDING));
-
- DBG_TRC(("Id:%02x, rc_count:%d, status:%08x", e->e.Id, e->rc_count,
- e->status))
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "assigned?");
-
- return (ret);
-}
-
-int divas_um_idi_entity_start_remove(void *entity)
-{
- divas_um_idi_entity_t *e;
- diva_um_idi_adapter_t *a;
- diva_os_spin_lock_magic_t old_irql;
-
- diva_os_enter_spin_lock(&adapter_lock, &old_irql, "start_remove");
-
- e = (divas_um_idi_entity_t *) entity;
- if (!e || (!(a = e->adapter)) ||
- (e->status & DIVA_UM_IDI_REMOVED) ||
- (a->status & DIVA_UM_IDI_ADAPTER_REMOVED)) {
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
- return (0);
- }
-
- if (e->rc_count) {
- /*
- Entity BUSY
- */
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
- return (1);
- }
-
- if (!e->e.Id) {
- /*
- Remove request was already pending, and arrived now
- */
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
- return (0); /* REMOVE was pending */
- }
-
- /*
- Now send remove request
- */
- e->e.Req = REMOVE;
- e->e.ReqCh = 0;
-
- e->rc_count++;
-
- DBG_TRC(("A(%d) E(%08x) request(%02x-%02x-%02x (%d))",
- e->adapter->adapter_nr, e, e->e.Id, e->e.Req,
- e->e.ReqCh, e->e.X->PLength));
-
- if (a->d.request)
- (*(a->d.request)) (&e->e);
-
- diva_os_leave_spin_lock(&adapter_lock, &old_irql, "start_remove");
-
- return (0);
-}
diff --git a/drivers/isdn/hardware/eicon/um_idi.h b/drivers/isdn/hardware/eicon/um_idi.h
deleted file mode 100644
index 9aedd9e351a3..000000000000
--- a/drivers/isdn/hardware/eicon/um_idi.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: um_idi.h,v 1.6 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVA_USER_MODE_IDI_CORE_H__
-#define __DIVA_USER_MODE_IDI_CORE_H__
-
-
-/*
- interface between UM IDI core and OS dependent part
-*/
-int diva_user_mode_idi_init(void);
-void diva_user_mode_idi_finit(void);
-void *divas_um_idi_create_entity(dword adapter_nr, void *file);
-int divas_um_idi_delete_entity(int adapter_nr, void *entity);
-
-typedef int (*divas_um_idi_copy_to_user_fn_t) (void *os_handle,
- void *dst,
- const void *src,
- int length);
-typedef int (*divas_um_idi_copy_from_user_fn_t) (void *os_handle,
- void *dst,
- const void *src,
- int length);
-
-int diva_um_idi_read(void *entity,
- void *os_handle,
- void *dst,
- int max_length, divas_um_idi_copy_to_user_fn_t cp_fn);
-
-int diva_um_idi_write(void *entity,
- void *os_handle,
- const void *src,
- int length, divas_um_idi_copy_from_user_fn_t cp_fn);
-
-int diva_user_mode_idi_ind_ready(void *entity, void *os_handle);
-void *diva_um_id_get_os_context(void *entity);
-int diva_os_get_context_size(void);
-int divas_um_idi_entity_assigned(void *entity);
-int divas_um_idi_entity_start_remove(void *entity);
-
-void diva_um_idi_start_wdog(void *entity);
-void diva_um_idi_stop_wdog(void *entity);
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/um_xdi.h b/drivers/isdn/hardware/eicon/um_xdi.h
deleted file mode 100644
index 1f37aa4efd18..000000000000
--- a/drivers/isdn/hardware/eicon/um_xdi.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: um_xdi.h,v 1.1.2.2 2002/10/02 14:38:38 armin Exp $ */
-
-#ifndef __DIVA_USER_MODE_XDI_H__
-#define __DIVA_USER_MODE_XDI_H__
-
-/*
- Contains declaratiom of structures shared between application
- and user mode idi driver
-*/
-
-typedef struct _diva_um_idi_adapter_features {
- dword type;
- dword features;
- dword channels;
- dword serial_number;
- char name[128];
-} diva_um_idi_adapter_features_t;
-
-#define DIVA_UM_IDI_REQ_MASK 0x0000FFFF
-#define DIVA_UM_IDI_REQ_TYPE_MASK (~(DIVA_UM_IDI_REQ_MASK))
-#define DIVA_UM_IDI_GET_FEATURES 1 /* trigger features indication */
-#define DIVA_UM_IDI_REQ 2
-#define DIVA_UM_IDI_REQ_TYPE_MAN 0x10000000
-#define DIVA_UM_IDI_REQ_TYPE_SIG 0x20000000
-#define DIVA_UM_IDI_REQ_TYPE_NET 0x30000000
-#define DIVA_UM_IDI_REQ_MAN (DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_MAN)
-#define DIVA_UM_IDI_REQ_SIG (DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_SIG)
-#define DIVA_UM_IDI_REQ_NET (DIVA_UM_IDI_REQ | DIVA_UM_IDI_REQ_TYPE_NET)
-/*
- data_length bytes will follow this structure
-*/
-typedef struct _diva_um_idi_req_hdr {
- dword type;
- dword Req;
- dword ReqCh;
- dword data_length;
-} diva_um_idi_req_hdr_t;
-
-typedef struct _diva_um_idi_ind_parameters {
- dword Ind;
- dword IndCh;
-} diva_um_idi_ind_parameters_t;
-
-typedef struct _diva_um_idi_rc_parameters {
- dword Rc;
- dword RcCh;
-} diva_um_idi_rc_parameters_t;
-
-typedef union _diva_um_idi_ind {
- diva_um_idi_adapter_features_t features;
- diva_um_idi_ind_parameters_t ind;
- diva_um_idi_rc_parameters_t rc;
-} diva_um_idi_ind_t;
-
-#define DIVA_UM_IDI_IND_FEATURES 1 /* features indication */
-#define DIVA_UM_IDI_IND 2
-#define DIVA_UM_IDI_IND_RC 3
-/*
- data_length bytes of data follow
- this structure
-*/
-typedef struct _diva_um_idi_ind_hdr {
- dword type;
- diva_um_idi_ind_t hdr;
- dword data_length;
-} diva_um_idi_ind_hdr_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
deleted file mode 100644
index b036e217c659..000000000000
--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: xdi_adapter.h,v 1.7 2004/03/21 17:26:01 armin Exp $ */
-
-#ifndef __DIVA_OS_XDI_ADAPTER_H__
-#define __DIVA_OS_XDI_ADAPTER_H__
-
-#define DIVAS_XDI_ADAPTER_BUS_PCI 0
-#define DIVAS_XDI_ADAPTER_BUS_ISA 1
-
-typedef struct _divas_pci_card_resources {
- byte bus;
- byte func;
- void *hdev;
-
- dword bar[8]; /* contains context of appropriate BAR Register */
- void __iomem *addr[8]; /* same bar, but mapped into memory */
- dword length[8]; /* bar length */
- int mem_type_id[MAX_MEM_TYPE];
- unsigned int qoffset;
- byte irq;
-} divas_pci_card_resources_t;
-
-typedef union _divas_card_resources {
- divas_pci_card_resources_t pci;
-} divas_card_resources_t;
-
-struct _diva_os_xdi_adapter;
-typedef int (*diva_init_card_proc_t)(struct _diva_os_xdi_adapter *a);
-typedef int (*diva_cmd_card_proc_t)(struct _diva_os_xdi_adapter *a,
- diva_xdi_um_cfg_cmd_t *data,
- int length);
-typedef void (*diva_xdi_clear_interrupts_proc_t)(struct
- _diva_os_xdi_adapter *);
-
-#define DIVA_XDI_MBOX_BUSY 1
-#define DIVA_XDI_MBOX_WAIT_XLOG 2
-
-typedef struct _xdi_mbox_t {
- dword status;
- diva_xdi_um_cfg_cmd_data_t cmd_data;
- dword data_length;
- void *data;
-} xdi_mbox_t;
-
-typedef struct _diva_os_idi_adapter_interface {
- diva_init_card_proc_t cleanup_adapter_proc;
- diva_cmd_card_proc_t cmd_proc;
-} diva_os_idi_adapter_interface_t;
-
-typedef struct _diva_os_xdi_adapter {
- struct list_head link;
- int CardIndex;
- int CardOrdinal;
- int controller; /* number of this controller */
- int Bus; /* PCI, ISA, ... */
- divas_card_resources_t resources;
- char port_name[24];
- ISDN_ADAPTER xdi_adapter;
- xdi_mbox_t xdi_mbox;
- diva_os_idi_adapter_interface_t interface;
- struct _diva_os_xdi_adapter *slave_adapters[3];
- void *slave_list;
- void *proc_adapter_dir; /* adapterX proc entry */
- void *proc_info; /* info proc entry */
- void *proc_grp_opt; /* group_optimization */
- void *proc_d_l1_down; /* dynamic_l1_down */
- volatile diva_xdi_clear_interrupts_proc_t clear_interrupts_proc;
- dword dsp_mask;
-} diva_os_xdi_adapter_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/xdi_msg.h b/drivers/isdn/hardware/eicon/xdi_msg.h
deleted file mode 100644
index 0646079bf466..000000000000
--- a/drivers/isdn/hardware/eicon/xdi_msg.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* $Id: xdi_msg.h,v 1.1.2.2 2001/02/16 08:40:36 armin Exp $ */
-
-#ifndef __DIVA_XDI_UM_CFG_MESSAGE_H__
-#define __DIVA_XDI_UM_CFG_MESSAGE_H__
-
-/*
- Definition of messages used to communicate between
- XDI device driver and user mode configuration utility
-*/
-
-/*
- As acknowledge one DWORD - card ordinal will be read from the card
-*/
-#define DIVA_XDI_UM_CMD_GET_CARD_ORDINAL 0
-
-/*
- no acknowledge will be generated, memory block will be written in the
- memory at given offset
-*/
-#define DIVA_XDI_UM_CMD_WRITE_SDRAM_BLOCK 1
-
-/*
- no acknowledge will be genatated, FPGA will be programmed
-*/
-#define DIVA_XDI_UM_CMD_WRITE_FPGA 2
-
-/*
- As acknowledge block of SDRAM will be read in the user buffer
-*/
-#define DIVA_XDI_UM_CMD_READ_SDRAM 3
-
-/*
- As acknowledge dword with serial number will be read in the user buffer
-*/
-#define DIVA_XDI_UM_CMD_GET_SERIAL_NR 4
-
-/*
- As acknowledge struct consisting from 9 dwords with PCI info.
- dword[0...7] = 8 PCI BARS
- dword[9] = IRQ
-*/
-#define DIVA_XDI_UM_CMD_GET_PCI_HW_CONFIG 5
-
-/*
- Reset of the board + activation of primary
- boot loader
-*/
-#define DIVA_XDI_UM_CMD_RESET_ADAPTER 6
-
-/*
- Called after code download to start adapter
- at specified address
- Start does set new set of features due to fact that we not know
- if protocol features have changed
-*/
-#define DIVA_XDI_UM_CMD_START_ADAPTER 7
-
-/*
- Stop adapter, called if user
- wishes to stop adapter without unload
- of the driver, to reload adapter with
- different protocol
-*/
-#define DIVA_XDI_UM_CMD_STOP_ADAPTER 8
-
-/*
- Get state of current adapter
- Acknowledge is one dword with following values:
- 0 - adapter ready for download
- 1 - adapter running
- 2 - adapter dead
- 3 - out of service, driver should be restarted or hardware problem
-*/
-#define DIVA_XDI_UM_CMD_GET_CARD_STATE 9
-
-/*
- Reads XLOG entry from the card
-*/
-#define DIVA_XDI_UM_CMD_READ_XLOG_ENTRY 10
-
-/*
- Set untranslated protocol code features
-*/
-#define DIVA_XDI_UM_CMD_SET_PROTOCOL_FEATURES 11
-
-typedef struct _diva_xdi_um_cfg_cmd_data_set_features {
- dword features;
-} diva_xdi_um_cfg_cmd_data_set_features_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_start {
- dword offset;
- dword features;
-} diva_xdi_um_cfg_cmd_data_start_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_write_sdram {
- dword ram_number;
- dword offset;
- dword length;
-} diva_xdi_um_cfg_cmd_data_write_sdram_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_write_fpga {
- dword fpga_number;
- dword image_length;
-} diva_xdi_um_cfg_cmd_data_write_fpga_t;
-
-typedef struct _diva_xdi_um_cfg_cmd_data_read_sdram {
- dword ram_number;
- dword offset;
- dword length;
-} diva_xdi_um_cfg_cmd_data_read_sdram_t;
-
-typedef union _diva_xdi_um_cfg_cmd_data {
- diva_xdi_um_cfg_cmd_data_write_sdram_t write_sdram;
- diva_xdi_um_cfg_cmd_data_write_fpga_t write_fpga;
- diva_xdi_um_cfg_cmd_data_read_sdram_t read_sdram;
- diva_xdi_um_cfg_cmd_data_start_t start;
- diva_xdi_um_cfg_cmd_data_set_features_t features;
-} diva_xdi_um_cfg_cmd_data_t;
-
-typedef struct _diva_xdi_um_cfg_cmd {
- dword adapter; /* Adapter number 1...N */
- dword command;
- diva_xdi_um_cfg_cmd_data_t command_data;
- dword data_length; /* Plain binary data will follow */
-} diva_xdi_um_cfg_cmd_t;
-
-#endif
diff --git a/drivers/isdn/hardware/eicon/xdi_vers.h b/drivers/isdn/hardware/eicon/xdi_vers.h
deleted file mode 100644
index b3479e59c7c5..000000000000
--- a/drivers/isdn/hardware/eicon/xdi_vers.h
+++ /dev/null
@@ -1,26 +0,0 @@
-
-/*
- *
- Copyright (c) Eicon Networks, 2002.
- *
- This source file is supplied for the use with
- Eicon Networks range of DIVA Server Adapters.
- *
- Eicon File Revision : 2.1
- *
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
- *
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
- implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
- *
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-static char diva_xdi_common_code_build[] = "102-52";
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 5acf6ab67cd3..6f60aced11c5 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -52,10 +52,7 @@ static const struct w6692map w6692_map[] =
{W6692_USR, "USR W6692"}
};
-#ifndef PCI_VENDOR_ID_USR
-#define PCI_VENDOR_ID_USR 0x16ec
#define PCI_DEVICE_ID_USR_6692 0x3409
-#endif
struct w6692_ch {
struct bchannel bch;
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index ea0e4c6de3fb..5b719b561860 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -274,7 +274,7 @@ hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count
u_char *ptr, *ptr1, new_f2;
struct sk_buff *skb;
struct IsdnCardState *cs = bcs->cs;
- int total, maxlen, new_z2;
+ int maxlen, new_z2;
z_type *zp;
if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
@@ -297,7 +297,6 @@ hfcpci_empty_fifo(struct BCState *bcs, bzfifo_type *bz, u_char *bdata, int count
} else if (!(skb = dev_alloc_skb(count - 3)))
printk(KERN_WARNING "HFCPCI: receive out of memory\n");
else {
- total = count;
count -= 3;
ptr = skb_put(skb, count);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 17d73db1456e..2d451b6c24af 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -200,8 +200,11 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
down_read(&triggers_list_lock);
down_write(&led_cdev->trigger_lock);
list_for_each_entry(trig, &trigger_list, next_trig) {
- if (!strcmp(led_cdev->default_trigger, trig->name))
+ if (!strcmp(led_cdev->default_trigger, trig->name)) {
+ led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
led_trigger_set(led_cdev, trig);
+ break;
+ }
}
up_write(&led_cdev->trigger_lock);
up_read(&triggers_list_lock);
@@ -248,8 +251,10 @@ int led_trigger_register(struct led_trigger *trig)
list_for_each_entry(led_cdev, &leds_list, node) {
down_write(&led_cdev->trigger_lock);
if (!led_cdev->trigger && led_cdev->default_trigger &&
- !strcmp(led_cdev->default_trigger, trig->name))
+ !strcmp(led_cdev->default_trigger, trig->name)) {
+ led_cdev->flags |= LED_INIT_DEFAULT_TRIGGER;
led_trigger_set(led_cdev, trig);
+ }
up_write(&led_cdev->trigger_lock);
}
up_read(&leds_list_lock);
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 77a104d2b124..036d4a536697 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -130,7 +130,7 @@ static int pm860x_led_dt_init(struct platform_device *pdev,
return -ENODEV;
}
for_each_child_of_node(nproot, np) {
- if (!of_node_cmp(np->name, data->name)) {
+ if (of_node_name_eq(np, data->name)) {
of_property_read_u32(np, "marvell,88pm860x-iset",
&iset);
data->iset = PM8606_LED_CURRENT(iset);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 45e012093865..998f2ff6914d 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -190,7 +190,6 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev)
fwnode_handle_put(child);
return ERR_PTR(ret);
}
- led_dat->cdev.dev->of_node = np;
priv->num_leds++;
}
diff --git a/drivers/leds/leds-powernv.c b/drivers/leds/leds-powernv.c
index b1adbd70ce2e..fbab86cb3cc7 100644
--- a/drivers/leds/leds-powernv.c
+++ b/drivers/leds/leds-powernv.c
@@ -285,6 +285,7 @@ static int powernv_led_probe(struct platform_device *pdev)
struct device_node *led_node;
struct powernv_led_common *powernv_led_common;
struct device *dev = &pdev->dev;
+ int rc;
led_node = of_find_node_by_path("/ibm,opal/leds");
if (!led_node) {
@@ -295,15 +296,20 @@ static int powernv_led_probe(struct platform_device *pdev)
powernv_led_common = devm_kzalloc(dev, sizeof(*powernv_led_common),
GFP_KERNEL);
- if (!powernv_led_common)
- return -ENOMEM;
+ if (!powernv_led_common) {
+ rc = -ENOMEM;
+ goto out;
+ }
mutex_init(&powernv_led_common->lock);
powernv_led_common->max_led_type = cpu_to_be64(OPAL_SLOT_LED_TYPE_MAX);
platform_set_drvdata(pdev, powernv_led_common);
- return powernv_led_classdev(pdev, led_node, powernv_led_common);
+ rc = powernv_led_classdev(pdev, led_node, powernv_led_common);
+out:
+ of_node_put(led_node);
+ return rc;
}
/* Platform driver remove */
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 5d3faae51d59..af08bcdc4fd8 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -74,12 +74,6 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
(sizeof(struct led_pwm_data) * num_leds);
}
-static void led_pwm_cleanup(struct led_pwm_priv *priv)
-{
- while (priv->num_leds--)
- led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
-}
-
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct device_node *child)
{
@@ -120,7 +114,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
if (!led_data->period && (led->pwm_period_ns > 0))
led_data->period = led->pwm_period_ns;
- ret = led_classdev_register(dev, &led_data->cdev);
+ ret = devm_of_led_classdev_register(dev, child, &led_data->cdev);
if (ret == 0) {
priv->num_leds++;
led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
@@ -191,25 +185,14 @@ static int led_pwm_probe(struct platform_device *pdev)
ret = led_pwm_create_of(&pdev->dev, priv);
}
- if (ret) {
- led_pwm_cleanup(priv);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, priv);
return 0;
}
-static int led_pwm_remove(struct platform_device *pdev)
-{
- struct led_pwm_priv *priv = platform_get_drvdata(pdev);
-
- led_pwm_cleanup(priv);
-
- return 0;
-}
-
static const struct of_device_id of_pwm_leds_match[] = {
{ .compatible = "pwm-leds", },
{},
@@ -218,7 +201,6 @@ MODULE_DEVICE_TABLE(of, of_pwm_leds_match);
static struct platform_driver led_pwm_driver = {
.probe = led_pwm_probe,
- .remove = led_pwm_remove,
.driver = {
.name = "leds_pwm",
.of_match_table = of_pwm_leds_match,
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index b76fc3cdc8f8..23cc85e2e0e5 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -136,4 +136,11 @@ config LEDS_TRIGGER_PATTERN
which is a series of tuples, of brightness and duration (ms).
If unsure, say N
+config LEDS_TRIGGER_AUDIO
+ tristate "Audio Mute LED Trigger"
+ help
+ This allows LEDs to be controlled by audio drivers for following
+ the audio mute and mic-mute changes.
+ If unsure, say N
+
endif # LEDS_TRIGGERS
diff --git a/drivers/leds/trigger/Makefile b/drivers/leds/trigger/Makefile
index 9bcb64ee8123..733a83e2a718 100644
--- a/drivers/leds/trigger/Makefile
+++ b/drivers/leds/trigger/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_LEDS_TRIGGER_CAMERA) += ledtrig-camera.o
obj-$(CONFIG_LEDS_TRIGGER_PANIC) += ledtrig-panic.o
obj-$(CONFIG_LEDS_TRIGGER_NETDEV) += ledtrig-netdev.o
obj-$(CONFIG_LEDS_TRIGGER_PATTERN) += ledtrig-pattern.o
+obj-$(CONFIG_LEDS_TRIGGER_AUDIO) += ledtrig-audio.o
diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c
new file mode 100644
index 000000000000..f76621e88482
--- /dev/null
+++ b/drivers/leds/trigger/ledtrig-audio.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Audio Mute LED trigger
+//
+
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+
+static struct led_trigger *ledtrig_audio[NUM_AUDIO_LEDS];
+static enum led_brightness audio_state[NUM_AUDIO_LEDS];
+
+enum led_brightness ledtrig_audio_get(enum led_audio type)
+{
+ return audio_state[type];
+}
+EXPORT_SYMBOL_GPL(ledtrig_audio_get);
+
+void ledtrig_audio_set(enum led_audio type, enum led_brightness state)
+{
+ audio_state[type] = state;
+ led_trigger_event(ledtrig_audio[type], state);
+}
+EXPORT_SYMBOL_GPL(ledtrig_audio_set);
+
+static int __init ledtrig_audio_init(void)
+{
+ led_trigger_register_simple("audio-mute",
+ &ledtrig_audio[LED_AUDIO_MUTE]);
+ led_trigger_register_simple("audio-micmute",
+ &ledtrig_audio[LED_AUDIO_MICMUTE]);
+ return 0;
+}
+module_init(ledtrig_audio_init);
+
+static void __exit ledtrig_audio_exit(void)
+{
+ led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MUTE]);
+ led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MICMUTE]);
+}
+module_exit(ledtrig_audio_exit);
+
+MODULE_DESCRIPTION("LED trigger for audio mute control");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index efb976a863d2..5f82036fe322 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -389,7 +389,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
goto err_dev;
}
- tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
+ tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
if (!tqueue) {
ret = -ENOMEM;
goto err_disk;
@@ -974,7 +974,7 @@ static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
struct ppa_addr ppa;
u8 *blks;
int ch, lun, nr_blks;
- int ret;
+ int ret = 0;
ppa.ppa = slba;
ppa = dev_to_generic_addr(dev, ppa);
@@ -1140,30 +1140,33 @@ EXPORT_SYMBOL(nvm_alloc_dev);
int nvm_register(struct nvm_dev *dev)
{
- int ret;
+ int ret, exp_pool_size;
if (!dev->q || !dev->ops)
return -EINVAL;
- dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist");
+ ret = nvm_init(dev);
+ if (ret)
+ return ret;
+
+ exp_pool_size = max_t(int, PAGE_SIZE,
+ (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
+ exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
+
+ dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
+ exp_pool_size);
if (!dev->dma_pool) {
pr_err("nvm: could not create dma pool\n");
+ nvm_free(dev);
return -ENOMEM;
}
- ret = nvm_init(dev);
- if (ret)
- goto err_init;
-
/* register device with a supported media manager */
down_write(&nvm_lock);
list_add(&dev->devices, &nvm_devices);
up_write(&nvm_lock);
return 0;
-err_init:
- dev->ops->destroy_dma_pool(dev->dma_pool);
- return ret;
}
EXPORT_SYMBOL(nvm_register);
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 6944aac43b01..1ff165351180 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -250,8 +250,8 @@ int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
if (rqd->nr_ppas == 1)
return 0;
- rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
- rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
+ rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk);
+ rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk);
return 0;
}
@@ -376,7 +376,7 @@ void pblk_write_should_kick(struct pblk *pblk)
{
unsigned int secs_avail = pblk_rb_read_count(&pblk->rwb);
- if (secs_avail >= pblk->min_write_pgs)
+ if (secs_avail >= pblk->min_write_pgs_data)
pblk_write_kick(pblk);
}
@@ -407,7 +407,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct list_head *move_list = NULL;
- int vsc = le32_to_cpu(*line->vsc);
+ int packed_meta = (le32_to_cpu(*line->vsc) / pblk->min_write_pgs_data)
+ * (pblk->min_write_pgs - pblk->min_write_pgs_data);
+ int vsc = le32_to_cpu(*line->vsc) + packed_meta;
lockdep_assert_held(&line->lock);
@@ -531,7 +533,7 @@ void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
if (caddr == 0)
trace_pblk_chunk_state(pblk_disk_name(pblk),
ppa, NVM_CHK_ST_OPEN);
- else if (caddr == chunk->cnlb)
+ else if (caddr == (chunk->cnlb - 1))
trace_pblk_chunk_state(pblk_disk_name(pblk),
ppa, NVM_CHK_ST_CLOSED);
}
@@ -620,12 +622,15 @@ out:
}
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
- unsigned long secs_to_flush)
+ unsigned long secs_to_flush, bool skip_meta)
{
int max = pblk->sec_per_write;
int min = pblk->min_write_pgs;
int secs_to_sync = 0;
+ if (skip_meta && pblk->min_write_pgs_data != pblk->min_write_pgs)
+ min = max = pblk->min_write_pgs_data;
+
if (secs_avail >= max)
secs_to_sync = max;
else if (secs_avail >= min)
@@ -796,10 +801,11 @@ static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
rqd.is_seq = 1;
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
- struct pblk_sec_meta *meta_list = rqd.meta_list;
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk,
+ rqd.meta_list, i);
rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
- meta_list[i].lba = lba_list[paddr] = addr_empty;
+ meta->lba = lba_list[paddr] = addr_empty;
}
ret = pblk_submit_io_sync_sem(pblk, &rqd);
@@ -845,13 +851,13 @@ int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
if (!meta_list)
return -ENOMEM;
- ppa_list = meta_list + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+ ppa_list = meta_list + pblk_dma_meta_size(pblk);
+ dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
next_rq:
memset(&rqd, 0, sizeof(struct nvm_rq));
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
rq_len = rq_ppas * geo->csecs;
bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
@@ -1276,6 +1282,7 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
return 0;
}
+/* Line allocations in the recovery path are always single threaded */
int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
@@ -1295,15 +1302,22 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
ret = pblk_line_alloc_bitmaps(pblk, line);
if (ret)
- return ret;
+ goto fail;
if (!pblk_line_init_bb(pblk, line, 0)) {
- list_add(&line->list, &l_mg->free_list);
- return -EINTR;
+ ret = -EINTR;
+ goto fail;
}
pblk_rl_free_lines_dec(&pblk->rl, line, true);
return 0;
+
+fail:
+ spin_lock(&l_mg->free_lock);
+ list_add(&line->list, &l_mg->free_list);
+ spin_unlock(&l_mg->free_lock);
+
+ return ret;
}
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
@@ -2160,3 +2174,38 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
}
spin_unlock(&pblk->trans_lock);
}
+
+void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ void *buffer;
+
+ if (pblk_is_oob_meta_supported(pblk)) {
+ /* Just use OOB metadata buffer as always */
+ buffer = rqd->meta_list;
+ } else {
+ /* We need to reuse last page of request (packed metadata)
+ * in similar way as traditional oob metadata
+ */
+ buffer = page_to_virt(
+ rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+ }
+
+ return buffer;
+}
+
+void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ void *meta_list = rqd->meta_list;
+ void *page;
+ int i = 0;
+
+ if (pblk_is_oob_meta_supported(pblk))
+ return;
+
+ page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page);
+ /* We need to fill oob meta buffer with data from packed metadata */
+ for (; i < rqd->nr_ppas; i++)
+ memcpy(pblk_get_meta(pblk, meta_list, i),
+ page + (i * sizeof(struct pblk_sec_meta)),
+ sizeof(struct pblk_sec_meta));
+}
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 13822594647c..f9a3e47b6a93 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -207,9 +207,6 @@ static int pblk_rwb_init(struct pblk *pblk)
return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
}
-/* Minimum pages needed within a lun */
-#define ADDR_POOL_SIZE 64
-
static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
struct nvm_addrf_12 *dst)
{
@@ -350,23 +347,19 @@ fail_destroy_ws:
static int pblk_get_global_caches(void)
{
- int ret;
+ int ret = 0;
mutex_lock(&pblk_caches.mutex);
- if (kref_read(&pblk_caches.kref) > 0) {
- kref_get(&pblk_caches.kref);
- mutex_unlock(&pblk_caches.mutex);
- return 0;
- }
+ if (kref_get_unless_zero(&pblk_caches.kref))
+ goto out;
ret = pblk_create_global_caches();
-
if (!ret)
- kref_get(&pblk_caches.kref);
+ kref_init(&pblk_caches.kref);
+out:
mutex_unlock(&pblk_caches.mutex);
-
return ret;
}
@@ -406,12 +399,45 @@ static int pblk_core_init(struct pblk *pblk)
pblk->nr_flush_rst = 0;
pblk->min_write_pgs = geo->ws_opt;
+ pblk->min_write_pgs_data = pblk->min_write_pgs;
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
+ pblk->oob_meta_size = geo->sos;
+ if (!pblk_is_oob_meta_supported(pblk)) {
+ /* For drives which does not have OOB metadata feature
+ * in order to support recovery feature we need to use
+ * so called packed metadata. Packed metada will store
+ * the same information as OOB metadata (l2p table mapping,
+ * but in the form of the single page at the end of
+ * every write request.
+ */
+ if (pblk->min_write_pgs
+ * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
+ /* We want to keep all the packed metadata on single
+ * page per write requests. So we need to ensure that
+ * it will fit.
+ *
+ * This is more like sanity check, since there is
+ * no device with such a big minimal write size
+ * (above 1 metabytes).
+ */
+ pblk_err(pblk, "Not supported min write size\n");
+ return -EINVAL;
+ }
+ /* For packed meta approach we do some simplification.
+ * On read path we always issue requests which size
+ * equal to max_write_pgs, with all pages filled with
+ * user payload except of last one page which will be
+ * filled with packed metadata.
+ */
+ pblk->max_write_pgs = pblk->min_write_pgs;
+ pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
+ }
+
pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
GFP_KERNEL);
if (!pblk->pad_dist)
@@ -635,40 +661,61 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
}
-static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
+static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
- int sec_meta, blk_meta;
+ int sec_meta, blk_meta, clba;
+ int minimum;
if (geo->op == NVM_TARGET_DEFAULT_OP)
pblk->op = PBLK_DEFAULT_OP;
else
pblk->op = geo->op;
- provisioned = nr_free_blks;
+ minimum = pblk_get_min_chks(pblk);
+ provisioned = nr_free_chks;
provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
- pblk->op_blks = nr_free_blks - provisioned;
+ if ((nr_free_chks - provisioned) < minimum) {
+ if (geo->op != NVM_TARGET_DEFAULT_OP) {
+ pblk_err(pblk, "OP too small to create a sane instance\n");
+ return -EINTR;
+ }
+
+ /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
+ * is not enough, calculate and set sane value
+ */
+
+ provisioned = nr_free_chks - minimum;
+ pblk->op = (100 * minimum) / nr_free_chks;
+ pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
+ pblk->op);
+ }
+
+ pblk->op_blks = nr_free_chks - provisioned;
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
- pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->clba;
+ pblk->rl.total_blocks = nr_free_chks;
+ pblk->rl.nr_secs = nr_free_chks * geo->clba;
/* Consider sectors used for metadata */
sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
- pblk->capacity = (provisioned - blk_meta) * geo->clba;
+ clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
+ pblk->capacity = (provisioned - blk_meta) * clba;
- atomic_set(&pblk->rl.free_blocks, nr_free_blks);
- atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_blocks, nr_free_chks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
+
+ return 0;
}
static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
@@ -984,7 +1031,7 @@ static int pblk_lines_init(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
void *chunk_meta;
- long nr_free_chks = 0;
+ int nr_free_chks = 0;
int i, ret;
ret = pblk_line_meta_init(pblk);
@@ -1031,7 +1078,9 @@ static int pblk_lines_init(struct pblk *pblk)
goto fail_free_lines;
}
- pblk_set_provision(pblk, nr_free_chks);
+ ret = pblk_set_provision(pblk, nr_free_chks);
+ if (ret)
+ goto fail_free_lines;
vfree(chunk_meta);
return 0;
@@ -1041,7 +1090,7 @@ fail_free_lines:
pblk_line_meta_free(l_mg, &pblk->lines[i]);
kfree(pblk->lines);
fail_free_chunk_meta:
- kfree(chunk_meta);
+ vfree(chunk_meta);
fail_free_luns:
kfree(pblk->luns);
fail_free_meta:
@@ -1154,6 +1203,12 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
return ERR_PTR(-EINVAL);
}
+ if (geo->ext) {
+ pblk_err(pblk, "extended metadata not supported\n");
+ kfree(pblk);
+ return ERR_PTR(-EINVAL);
+ }
+
spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 6dcbd44e3acb..79df583ea709 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -22,7 +22,7 @@
static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
struct ppa_addr *ppa_list,
unsigned long *lun_bitmap,
- struct pblk_sec_meta *meta_list,
+ void *meta_list,
unsigned int valid_secs)
{
struct pblk_line *line = pblk_line_get_data(pblk);
@@ -33,6 +33,9 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
int nr_secs = pblk->min_write_pgs;
int i;
+ if (!line)
+ return -ENOSPC;
+
if (pblk_line_is_full(line)) {
struct pblk_line *prev_line = line;
@@ -42,8 +45,11 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
line = pblk_line_replace_data(pblk);
pblk_line_close_meta(pblk, prev_line);
- if (!line)
- return -EINTR;
+ if (!line) {
+ pblk_pipeline_stop(pblk);
+ return -ENOSPC;
+ }
+
}
emeta = line->emeta;
@@ -52,6 +58,7 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
paddr = pblk_alloc_page(pblk, line, nr_secs);
for (i = 0; i < nr_secs; i++, paddr++) {
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
/* ppa to be sent to the device */
@@ -68,14 +75,15 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
kref_get(&line->ref);
w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
w_ctx->ppa = ppa_list[i];
- meta_list[i].lba = cpu_to_le64(w_ctx->lba);
+ meta->lba = cpu_to_le64(w_ctx->lba);
lba_list[paddr] = cpu_to_le64(w_ctx->lba);
if (lba_list[paddr] != addr_empty)
line->nr_valid_lbas++;
else
atomic64_inc(&pblk->pad_wa);
} else {
- lba_list[paddr] = meta_list[i].lba = addr_empty;
+ lba_list[paddr] = addr_empty;
+ meta->lba = addr_empty;
__pblk_map_invalidate(pblk, line, paddr);
}
}
@@ -84,50 +92,57 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
return 0;
}
-void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
+int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
unsigned long *lun_bitmap, unsigned int valid_secs,
unsigned int off)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
+ void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
+ void *meta_buffer;
struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
unsigned int map_secs;
int min = pblk->min_write_pgs;
int i;
+ int ret;
for (i = off; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
- lun_bitmap, &meta_list[i], map_secs)) {
- bio_put(rqd->bio);
- pblk_free_rqd(pblk, rqd, PBLK_WRITE);
- pblk_pipeline_stop(pblk);
- }
+ meta_buffer = pblk_get_meta(pblk, meta_list, i);
+
+ ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
+ lun_bitmap, meta_buffer, map_secs);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
/* only if erase_ppa is set, acquire erase semaphore */
-void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
+int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
unsigned int sentry, unsigned long *lun_bitmap,
unsigned int valid_secs, struct ppa_addr *erase_ppa)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
- struct pblk_sec_meta *meta_list = rqd->meta_list;
+ void *meta_list = pblk_get_meta_for_writes(pblk, rqd);
+ void *meta_buffer;
struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
struct pblk_line *e_line, *d_line;
unsigned int map_secs;
int min = pblk->min_write_pgs;
int i, erase_lun;
+ int ret;
+
for (i = 0; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
- lun_bitmap, &meta_list[i], map_secs)) {
- bio_put(rqd->bio);
- pblk_free_rqd(pblk, rqd, PBLK_WRITE);
- pblk_pipeline_stop(pblk);
- }
+ meta_buffer = pblk_get_meta(pblk, meta_list, i);
+
+ ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
+ lun_bitmap, meta_buffer, map_secs);
+ if (ret)
+ return ret;
erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
@@ -163,7 +178,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
*/
e_line = pblk_line_get_erase(pblk);
if (!e_line)
- return;
+ return -ENOSPC;
/* Erase blocks that are bad in this line but might not be in next */
if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
@@ -174,7 +189,7 @@ retry:
bit = find_next_bit(d_line->blk_bitmap,
lm->blk_per_line, bit + 1);
if (bit >= lm->blk_per_line)
- return;
+ return 0;
spin_lock(&e_line->lock);
if (test_bit(bit, e_line->erase_bitmap)) {
@@ -188,4 +203,6 @@ retry:
*erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
erase_ppa->a.blk = e_line->id;
}
+
+ return 0;
}
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index b1f4b51783f4..d4ca8c64ee0f 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -147,7 +147,7 @@ int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
/*
* Initialize rate-limiter, which controls access to the write buffer
- * but user and GC I/O
+ * by user and GC I/O
*/
pblk_rl_init(&pblk->rl, rb->nr_entries);
@@ -552,6 +552,9 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
to_read = count;
}
+ /* Add space for packed metadata if in use*/
+ pad += (pblk->min_write_pgs - pblk->min_write_pgs_data);
+
c_ctx->sentry = pos;
c_ctx->nr_valid = to_read;
c_ctx->nr_padded = pad;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 9fba614adeeb..3789185144da 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -43,7 +43,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct bio *bio, sector_t blba,
unsigned long *read_bitmap)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
+ void *meta_list = rqd->meta_list;
struct ppa_addr ppas[NVM_MAX_VLBA];
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
@@ -53,12 +53,15 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
for (i = 0; i < nr_secs; i++) {
struct ppa_addr p = ppas[i];
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
sector_t lba = blba + i;
retry:
if (pblk_ppa_empty(p)) {
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
WARN_ON(test_and_set_bit(i, read_bitmap));
- meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
+ meta->lba = addr_empty;
if (unlikely(!advanced_bio)) {
bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
@@ -78,7 +81,7 @@ retry:
goto retry;
}
WARN_ON(test_and_set_bit(i, read_bitmap));
- meta_list[i].lba = cpu_to_le64(lba);
+ meta->lba = cpu_to_le64(lba);
advanced_bio = true;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
@@ -105,12 +108,16 @@ next:
static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
sector_t blba)
{
- struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
+ void *meta_list = rqd->meta_list;
int nr_lbas = rqd->nr_ppas;
int i;
+ if (!pblk_is_oob_meta_supported(pblk))
+ return;
+
for (i = 0; i < nr_lbas; i++) {
- u64 lba = le64_to_cpu(meta_lba_list[i].lba);
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
+ u64 lba = le64_to_cpu(meta->lba);
if (lba == ADDR_EMPTY)
continue;
@@ -134,17 +141,22 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
u64 *lba_list, int nr_lbas)
{
- struct pblk_sec_meta *meta_lba_list = rqd->meta_list;
+ void *meta_lba_list = rqd->meta_list;
int i, j;
+ if (!pblk_is_oob_meta_supported(pblk))
+ return;
+
for (i = 0, j = 0; i < nr_lbas; i++) {
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk,
+ meta_lba_list, j);
u64 lba = lba_list[i];
u64 meta_lba;
if (lba == ADDR_EMPTY)
continue;
- meta_lba = le64_to_cpu(meta_lba_list[j].lba);
+ meta_lba = le64_to_cpu(meta->lba);
if (lba != meta_lba) {
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -216,15 +228,15 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
struct pblk *pblk = rqd->private;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct pblk_pr_ctx *pr_ctx = r_ctx->private;
+ struct pblk_sec_meta *meta;
struct bio *new_bio = rqd->bio;
struct bio *bio = pr_ctx->orig_bio;
struct bio_vec src_bv, dst_bv;
- struct pblk_sec_meta *meta_list = rqd->meta_list;
+ void *meta_list = rqd->meta_list;
int bio_init_idx = pr_ctx->bio_init_idx;
unsigned long *read_bitmap = pr_ctx->bitmap;
int nr_secs = pr_ctx->orig_nr_secs;
int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs);
- __le64 *lba_list_mem, *lba_list_media;
void *src_p, *dst_p;
int hole, i;
@@ -237,13 +249,10 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
rqd->ppa_list[0] = ppa;
}
- /* Re-use allocated memory for intermediate lbas */
- lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
- lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size);
-
for (i = 0; i < nr_secs; i++) {
- lba_list_media[i] = meta_list[i].lba;
- meta_list[i].lba = lba_list_mem[i];
+ meta = pblk_get_meta(pblk, meta_list, i);
+ pr_ctx->lba_list_media[i] = le64_to_cpu(meta->lba);
+ meta->lba = cpu_to_le64(pr_ctx->lba_list_mem[i]);
}
/* Fill the holes in the original bio */
@@ -255,7 +264,8 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
kref_put(&line->ref, pblk_line_put);
- meta_list[hole].lba = lba_list_media[i];
+ meta = pblk_get_meta(pblk, meta_list, hole);
+ meta->lba = cpu_to_le64(pr_ctx->lba_list_media[i]);
src_bv = new_bio->bi_io_vec[i++];
dst_bv = bio->bi_io_vec[bio_init_idx + hole];
@@ -291,17 +301,13 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
unsigned long *read_bitmap,
int nr_holes)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
+ void *meta_list = rqd->meta_list;
struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd);
struct pblk_pr_ctx *pr_ctx;
struct bio *new_bio, *bio = r_ctx->private;
- __le64 *lba_list_mem;
int nr_secs = rqd->nr_ppas;
int i;
- /* Re-use allocated memory for intermediate lbas */
- lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size);
-
new_bio = bio_alloc(GFP_KERNEL, nr_holes);
if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes))
@@ -312,12 +318,15 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
goto fail_free_pages;
}
- pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
+ pr_ctx = kzalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL);
if (!pr_ctx)
goto fail_free_pages;
- for (i = 0; i < nr_secs; i++)
- lba_list_mem[i] = meta_list[i].lba;
+ for (i = 0; i < nr_secs; i++) {
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
+
+ pr_ctx->lba_list_mem[i] = le64_to_cpu(meta->lba);
+ }
new_bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(new_bio, REQ_OP_READ, 0);
@@ -325,7 +334,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
rqd->bio = new_bio;
rqd->nr_ppas = nr_holes;
- pr_ctx->ppa_ptr = NULL;
pr_ctx->orig_bio = bio;
bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA);
pr_ctx->bio_init_idx = bio_init_idx;
@@ -383,7 +391,7 @@ err:
static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
sector_t lba, unsigned long *read_bitmap)
{
- struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, rqd->meta_list, 0);
struct ppa_addr ppa;
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
@@ -394,8 +402,10 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio,
retry:
if (pblk_ppa_empty(ppa)) {
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+
WARN_ON(test_and_set_bit(0, read_bitmap));
- meta_list[0].lba = cpu_to_le64(ADDR_EMPTY);
+ meta->lba = addr_empty;
return;
}
@@ -409,7 +419,7 @@ retry:
}
WARN_ON(test_and_set_bit(0, read_bitmap));
- meta_list[0].lba = cpu_to_le64(lba);
+ meta->lba = cpu_to_le64(lba);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_inc(&pblk->cache_reads);
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index 5740b7509bd8..3fcf062d752c 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -13,6 +13,9 @@
* General Public License for more details.
*
* pblk-recovery.c - pblk's recovery path
+ *
+ * The L2P recovery path is single threaded as the L2P table is updated in order
+ * following the line sequence ID.
*/
#include "pblk.h"
@@ -124,7 +127,7 @@ static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
struct pblk_recov_alloc {
struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
+ void *meta_list;
struct nvm_rq *rqd;
void *data;
dma_addr_t dma_ppa_list;
@@ -158,7 +161,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_sec_meta *meta_list;
+ void *meta_list;
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
@@ -188,7 +191,7 @@ static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
kref_init(&pad_rq->ref);
next_pad_rq:
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
if (rq_ppas < pblk->min_write_pgs) {
pblk_err(pblk, "corrupted pad line %d\n", line->id);
goto fail_free_pad;
@@ -237,12 +240,15 @@ next_pad_rq:
for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++) {
struct ppa_addr dev_ppa;
+ struct pblk_sec_meta *meta;
__le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
dev_ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
pblk_map_invalidate(pblk, dev_ppa);
- lba_list[w_ptr] = meta_list[i].lba = addr_empty;
+ lba_list[w_ptr] = addr_empty;
+ meta = pblk_get_meta(pblk, meta_list, i);
+ meta->lba = addr_empty;
rqd->ppa_list[i] = dev_ppa;
}
}
@@ -334,20 +340,21 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
struct pblk_recov_alloc p)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
+ void *meta_list;
struct nvm_rq *rqd;
struct bio *bio;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
__le64 *lba_list;
- u64 paddr = 0;
+ u64 paddr = pblk_line_smeta_start(pblk, line) + lm->smeta_sec;
bool padded = false;
int rq_ppas, rq_len;
int i, j;
int ret;
- u64 left_ppas = pblk_sec_in_open_line(pblk, line);
+ u64 left_ppas = pblk_sec_in_open_line(pblk, line) - lm->smeta_sec;
if (pblk_line_wp_is_unbalanced(pblk, line))
pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
@@ -364,17 +371,19 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
next_rq:
memset(rqd, 0, pblk_g_rq_size);
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0, false);
if (!rq_ppas)
rq_ppas = pblk->min_write_pgs;
rq_len = rq_ppas * geo->csecs;
+retry_rq:
bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio_get(bio);
rqd->bio = bio;
rqd->opcode = NVM_OP_PREAD;
@@ -387,7 +396,6 @@ next_rq:
if (pblk_io_aligned(pblk, rq_ppas))
rqd->is_seq = 1;
-retry_rq:
for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa;
int pos;
@@ -410,6 +418,7 @@ retry_rq:
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
bio_put(bio);
+ bio_put(bio);
return ret;
}
@@ -421,20 +430,28 @@ retry_rq:
if (padded) {
pblk_log_read_err(pblk, rqd);
+ bio_put(bio);
return -EINTR;
}
pad_distance = pblk_pad_distance(pblk, line);
ret = pblk_recov_pad_line(pblk, line, pad_distance);
- if (ret)
+ if (ret) {
+ bio_put(bio);
return ret;
+ }
padded = true;
+ bio_put(bio);
goto retry_rq;
}
+ pblk_get_packed_meta(pblk, rqd);
+ bio_put(bio);
+
for (i = 0; i < rqd->nr_ppas; i++) {
- u64 lba = le64_to_cpu(meta_list[i].lba);
+ struct pblk_sec_meta *meta = pblk_get_meta(pblk, meta_list, i);
+ u64 lba = le64_to_cpu(meta->lba);
lba_list[paddr++] = cpu_to_le64(lba);
@@ -463,7 +480,7 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
struct nvm_geo *geo = &dev->geo;
struct nvm_rq *rqd;
struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
+ void *meta_list;
struct pblk_recov_alloc p;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
@@ -473,8 +490,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
if (!meta_list)
return -ENOMEM;
- ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+ ppa_list = (void *)(meta_list) + pblk_dma_meta_size(pblk);
+ dma_ppa_list = dma_meta_list + pblk_dma_meta_size(pblk);
data = kcalloc(pblk->max_write_pgs, geo->csecs, GFP_KERNEL);
if (!data) {
@@ -804,7 +821,6 @@ next:
WARN_ON_ONCE(!test_and_clear_bit(meta_line,
&l_mg->meta_bitmap));
spin_unlock(&l_mg->free_lock);
- pblk_line_replace_data(pblk);
} else {
spin_lock(&l_mg->free_lock);
/* Allocate next line for preparation */
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index db55a1c89997..76116d5f78e4 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -214,11 +214,10 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- int min_blocks = lm->blk_per_line * PBLK_GC_RSV_LINE;
int sec_meta, blk_meta;
-
unsigned int rb_windows;
+
/* Consider sectors used for metadata */
sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
@@ -226,7 +225,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
rl->high_pw = get_count_order(rl->high);
- rl->rsv_blocks = min_blocks;
+ rl->rsv_blocks = pblk_get_min_chks(pblk);
/* This will always be a power-of-2 */
rb_windows = budget / NVM_MAX_VLBA;
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 2d2818155aa8..7d8958df9472 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -479,6 +479,13 @@ static ssize_t pblk_sysfs_set_sec_per_write(struct pblk *pblk,
if (kstrtouint(page, 0, &sec_per_write))
return -EINVAL;
+ if (!pblk_is_oob_meta_supported(pblk)) {
+ /* For packed metadata case it is
+ * not allowed to change sec_per_write.
+ */
+ return -EINVAL;
+ }
+
if (sec_per_write < pblk->min_write_pgs
|| sec_per_write > pblk->max_write_pgs
|| sec_per_write % pblk->min_write_pgs != 0)
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index fa8726493b39..06d56deb645d 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -105,14 +105,20 @@ retry:
}
/* Map remaining sectors in chunk, starting from ppa */
-static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
+static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
+ int rqd_ppas)
{
struct pblk_line *line;
struct ppa_addr map_ppa = *ppa;
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
+ __le64 *lba_list;
u64 paddr;
int done = 0;
+ int n = 0;
line = pblk_ppa_to_line(pblk, *ppa);
+ lba_list = emeta_to_lbas(pblk, line->emeta->buf);
+
spin_lock(&line->lock);
while (!done) {
@@ -121,10 +127,17 @@ static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
if (!test_and_set_bit(paddr, line->map_bitmap))
line->left_msecs--;
+ if (n < rqd_ppas && lba_list[paddr] != addr_empty)
+ line->nr_valid_lbas--;
+
+ lba_list[paddr] = addr_empty;
+
if (!test_and_set_bit(paddr, line->invalid_bitmap))
le32_add_cpu(line->vsc, -1);
done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
+
+ n++;
}
line->w_err_gc->has_write_err = 1;
@@ -148,9 +161,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
w_ctx = &entry->w_ctx;
/* Check if the lba has been overwritten */
- ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
- if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
- w_ctx->lba = ADDR_EMPTY;
+ if (w_ctx->lba != ADDR_EMPTY) {
+ ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
+ if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
+ w_ctx->lba = ADDR_EMPTY;
+ }
/* Mark up the entry as submittable again */
flags = READ_ONCE(w_ctx->flags);
@@ -200,7 +215,7 @@ static void pblk_submit_rec(struct work_struct *work)
pblk_log_write_err(pblk, rqd);
- pblk_map_remaining(pblk, ppa_list);
+ pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
pblk_queue_resubmit(pblk, c_ctx);
pblk_up_rq(pblk, c_ctx->lun_bitmap);
@@ -319,12 +334,13 @@ static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
}
if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
- pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap, valid, 0);
+ ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
+ valid, 0);
else
- pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
+ ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
valid, erase_ppa);
- return 0;
+ return ret;
}
static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
@@ -332,7 +348,7 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
{
int secs_to_sync;
- secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush);
+ secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
#ifdef CONFIG_NVM_PBLK_DEBUG
if ((!secs_to_sync && secs_to_flush)
@@ -548,15 +564,17 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
c_ctx->nr_padded);
}
-static int pblk_submit_write(struct pblk *pblk)
+static int pblk_submit_write(struct pblk *pblk, int *secs_left)
{
struct bio *bio;
struct nvm_rq *rqd;
unsigned int secs_avail, secs_to_sync, secs_to_com;
- unsigned int secs_to_flush;
+ unsigned int secs_to_flush, packed_meta_pgs;
unsigned long pos;
unsigned int resubmit;
+ *secs_left = 0;
+
spin_lock(&pblk->resubmit_lock);
resubmit = !list_empty(&pblk->resubmit_list);
spin_unlock(&pblk->resubmit_lock);
@@ -586,17 +604,17 @@ static int pblk_submit_write(struct pblk *pblk)
*/
secs_avail = pblk_rb_read_count(&pblk->rwb);
if (!secs_avail)
- return 1;
+ return 0;
secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
- if (!secs_to_flush && secs_avail < pblk->min_write_pgs)
- return 1;
+ if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
+ return 0;
secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
secs_to_flush);
if (secs_to_sync > pblk->max_write_pgs) {
pblk_err(pblk, "bad buffer sync calculation\n");
- return 1;
+ return 0;
}
secs_to_com = (secs_to_sync > secs_avail) ?
@@ -604,7 +622,8 @@ static int pblk_submit_write(struct pblk *pblk)
pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
}
- bio = bio_alloc(GFP_KERNEL, secs_to_sync);
+ packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
+ bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
bio->bi_iter.bi_sector = 0; /* internal bio */
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@@ -625,6 +644,7 @@ static int pblk_submit_write(struct pblk *pblk)
atomic_long_add(secs_to_sync, &pblk->sub_writes);
#endif
+ *secs_left = 1;
return 0;
fail_free_bio:
@@ -633,16 +653,22 @@ fail_put_bio:
bio_put(bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
- return 1;
+ return -EINTR;
}
int pblk_write_ts(void *data)
{
struct pblk *pblk = data;
+ int secs_left;
+ int write_failure = 0;
while (!kthread_should_stop()) {
- if (!pblk_submit_write(pblk))
- continue;
+ if (!write_failure) {
+ write_failure = pblk_submit_write(pblk, &secs_left);
+
+ if (secs_left)
+ continue;
+ }
set_current_state(TASK_INTERRUPTIBLE);
io_schedule();
}
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 02bb2e98f8a9..85e38ed62f85 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -104,7 +104,6 @@ enum {
PBLK_RL_LOW = 4
};
-#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
/* write buffer completion context */
@@ -132,6 +131,8 @@ struct pblk_pr_ctx {
unsigned int bio_init_idx;
void *ppa_ptr;
dma_addr_t dma_ppa_list;
+ __le64 lba_list_mem[NVM_MAX_VLBA];
+ __le64 lba_list_media[NVM_MAX_VLBA];
};
/* Pad context */
@@ -631,7 +632,9 @@ struct pblk {
int state; /* pblk line state */
int min_write_pgs; /* Minimum amount of pages required by controller */
+ int min_write_pgs_data; /* Minimum amount of payload pages */
int max_write_pgs; /* Maximum amount of pages supported by controller */
+ int oob_meta_size; /* Size of OOB sector metadata */
sector_t capacity; /* Device capacity when bad blocks are subtracted */
@@ -836,7 +839,7 @@ void pblk_dealloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
- unsigned long secs_to_flush);
+ unsigned long secs_to_flush, bool skip_meta);
void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
unsigned long *lun_bitmap);
void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
@@ -860,6 +863,8 @@ void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas,
u64 *lba_list, int nr_secs);
void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
sector_t blba, int nr_secs);
+void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd);
+void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd);
/*
* pblk user I/O write path
@@ -871,10 +876,10 @@ int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
/*
* pblk map
*/
-void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
+int pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
unsigned int sentry, unsigned long *lun_bitmap,
unsigned int valid_secs, struct ppa_addr *erase_ppa);
-void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
+int pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
unsigned long *lun_bitmap, unsigned int valid_secs,
unsigned int off);
@@ -905,7 +910,6 @@ int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta);
#define PBLK_GC_MAX_READERS 8 /* Max number of outstanding GC reader jobs */
#define PBLK_GC_RQ_QD 128 /* Queue depth for inflight GC requests */
#define PBLK_GC_L_QD 4 /* Queue depth for inflight GC lines */
-#define PBLK_GC_RSV_LINE 1 /* Reserved lines for GC */
int pblk_gc_init(struct pblk *pblk);
void pblk_gc_exit(struct pblk *pblk, bool graceful);
@@ -1370,4 +1374,33 @@ static inline char *pblk_disk_name(struct pblk *pblk)
return disk->disk_name;
}
+
+static inline unsigned int pblk_get_min_chks(struct pblk *pblk)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
+ /* In a worst-case scenario every line will have OP invalid sectors.
+ * We will then need a minimum of 1/OP lines to free up a single line
+ */
+
+ return DIV_ROUND_UP(100, pblk->op) * lm->blk_per_line;
+}
+
+static inline struct pblk_sec_meta *pblk_get_meta(struct pblk *pblk,
+ void *meta, int index)
+{
+ return meta +
+ max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
+ * index;
+}
+
+static inline int pblk_dma_meta_size(struct pblk *pblk)
+{
+ return max_t(int, sizeof(struct pblk_sec_meta), pblk->oob_meta_size)
+ * NVM_MAX_VLBA;
+}
+
+static inline int pblk_is_oob_meta_supported(struct pblk *pblk)
+{
+ return pblk->oob_meta_size >= sizeof(struct pblk_sec_meta);
+}
#endif /* PBLK_H_ */
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index c8e078b911c7..ef0c2366cf59 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -160,7 +160,7 @@ anslcd_init(void)
struct device_node* node;
node = of_find_node_by_name(NULL, "lcd");
- if (!node || !node->parent || strcmp(node->parent->name, "gc")) {
+ if (!node || !of_node_name_eq(node->parent, "gc")) {
of_node_put(node);
return -ENODEV;
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 17d3bc917562..3543a82081de 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -190,11 +190,11 @@ static int macio_resource_quirks(struct device_node *np, struct resource *res,
return 0;
/* Grand Central has too large resource 0 on some machines */
- if (index == 0 && !strcmp(np->name, "gc"))
+ if (index == 0 && of_node_name_eq(np, "gc"))
res->end = res->start + 0x1ffff;
/* Airport has bogus resource 2 */
- if (index >= 2 && !strcmp(np->name, "radio"))
+ if (index >= 2 && of_node_name_eq(np, "radio"))
return 1;
#ifndef CONFIG_PPC64
@@ -207,21 +207,21 @@ static int macio_resource_quirks(struct device_node *np, struct resource *res,
* level of hierarchy, but I don't really feel the need
* for it
*/
- if (!strcmp(np->name, "escc"))
+ if (of_node_name_eq(np, "escc"))
return 1;
/* ESCC has bogus resources >= 3 */
- if (index >= 3 && !(strcmp(np->name, "ch-a") &&
- strcmp(np->name, "ch-b")))
+ if (index >= 3 && (of_node_name_eq(np, "ch-a") ||
+ of_node_name_eq(np, "ch-b")))
return 1;
/* Media bay has too many resources, keep only first one */
- if (index > 0 && !strcmp(np->name, "media-bay"))
+ if (index > 0 && of_node_name_eq(np, "media-bay"))
return 1;
/* Some older IDE resources have bogus sizes */
- if (!(strcmp(np->name, "IDE") && strcmp(np->name, "ATA") &&
- strcmp(np->type, "ide") && strcmp(np->type, "ata"))) {
+ if (of_node_name_eq(np, "IDE") || of_node_name_eq(np, "ATA") ||
+ of_node_is_type(np, "ide") || of_node_is_type(np, "ata")) {
if (index == 0 && (res->end - res->start) > 0xfff)
res->end = res->start + 0xfff;
if (index == 1 && (res->end - res->start) > 0xff)
@@ -260,7 +260,7 @@ static void macio_add_missing_resources(struct macio_dev *dev)
irq_base = 64;
/* Fix SCC */
- if (strcmp(np->name, "ch-a") == 0) {
+ if (of_node_name_eq(np, "ch-a")) {
macio_create_fixup_irq(dev, 0, 15 + irq_base);
macio_create_fixup_irq(dev, 1, 4 + irq_base);
macio_create_fixup_irq(dev, 2, 5 + irq_base);
@@ -268,18 +268,18 @@ static void macio_add_missing_resources(struct macio_dev *dev)
}
/* Fix media-bay */
- if (strcmp(np->name, "media-bay") == 0) {
+ if (of_node_name_eq(np, "media-bay")) {
macio_create_fixup_irq(dev, 0, 29 + irq_base);
printk(KERN_INFO "macio: fixed media-bay irq on gatwick\n");
}
/* Fix left media bay childs */
- if (dev->media_bay != NULL && strcmp(np->name, "floppy") == 0) {
+ if (dev->media_bay != NULL && of_node_name_eq(np, "floppy")) {
macio_create_fixup_irq(dev, 0, 19 + irq_base);
macio_create_fixup_irq(dev, 1, 1 + irq_base);
printk(KERN_INFO "macio: fixed left floppy irqs\n");
}
- if (dev->media_bay != NULL && strcasecmp(np->name, "ata4") == 0) {
+ if (dev->media_bay != NULL && of_node_name_eq(np, "ata4")) {
macio_create_fixup_irq(dev, 0, 14 + irq_base);
macio_create_fixup_irq(dev, 0, 3 + irq_base);
printk(KERN_INFO "macio: fixed left ide irqs\n");
@@ -438,11 +438,8 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
static int macio_skip_device(struct device_node *np)
{
- if (strncmp(np->name, "battery", 7) == 0)
- return 1;
- if (strncmp(np->name, "escc-legacy", 11) == 0)
- return 1;
- return 0;
+ return of_node_name_prefix(np, "battery") ||
+ of_node_name_prefix(np, "escc-legacy");
}
/**
@@ -489,9 +486,9 @@ static void macio_pci_add_devices(struct macio_chip *chip)
root_res);
if (mdev == NULL)
of_node_put(np);
- else if (strncmp(np->name, "media-bay", 9) == 0)
+ else if (of_node_name_prefix(np, "media-bay"))
mbdev = mdev;
- else if (strncmp(np->name, "escc", 4) == 0)
+ else if (of_node_name_prefix(np, "escc"))
sdev = mdev;
}
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index d2451e58acb9..27f5eefc508f 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -3,17 +3,6 @@
#include <linux/stat.h>
#include <asm/macio.h>
-
-#define macio_config_of_attr(field, format_string) \
-static ssize_t \
-field##_show (struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct macio_dev *mdev = to_macio_device (dev); \
- return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
-} \
-static DEVICE_ATTR_RO(field);
-
static ssize_t
compatible_show (struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -65,7 +54,12 @@ static ssize_t name_show(struct device *dev,
}
static DEVICE_ATTR_RO(name);
-macio_config_of_attr (type, "%s\n");
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", of_node_get_device_type(dev->of_node));
+}
+static DEVICE_ATTR_RO(type);
static struct attribute *macio_dev_attrs[] = {
&dev_attr_name.attr,
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 1f29d2413c74..3940e2a032f7 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -376,18 +376,19 @@ static int rackmeter_probe(struct macio_dev* mdev,
pr_debug("rackmeter_probe()\n");
/* Get i2s-a node */
- while ((i2s = of_get_next_child(mdev->ofdev.dev.of_node, i2s)) != NULL)
- if (strcmp(i2s->name, "i2s-a") == 0)
- break;
+ for_each_child_of_node(mdev->ofdev.dev.of_node, i2s)
+ if (of_node_name_eq(i2s, "i2s-a"))
+ break;
+
if (i2s == NULL) {
pr_debug(" i2s-a child not found\n");
goto bail;
}
/* Get lightshow or virtual sound */
- while ((np = of_get_next_child(i2s, np)) != NULL) {
- if (strcmp(np->name, "lightshow") == 0)
+ for_each_child_of_node(i2s, np) {
+ if (of_node_name_eq(np, "lightshow"))
break;
- if ((strcmp(np->name, "sound") == 0) &&
+ if (of_node_name_eq(np, "sound") &&
of_get_property(np, "virtual", NULL) != NULL)
break;
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index 60f57e2abf21..ac0cf37d6239 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -318,8 +318,8 @@ int __init find_via_pmu(void)
PMU_INT_ADB |
PMU_INT_TICK;
- if (vias->parent->name && ((strcmp(vias->parent->name, "ohare") == 0)
- || of_device_is_compatible(vias->parent, "ohare")))
+ if (of_node_name_eq(vias->parent, "ohare") ||
+ of_device_is_compatible(vias->parent, "ohare"))
pmu_kind = PMU_OHARE_BASED;
else if (of_device_is_compatible(vias->parent, "paddington"))
pmu_kind = PMU_PADDINGTON_BASED;
diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
index fab7a21e9577..629f19875d7f 100644
--- a/drivers/macintosh/windfarm_fcu_controls.c
+++ b/drivers/macintosh/windfarm_fcu_controls.c
@@ -425,25 +425,25 @@ static void wf_fcu_lookup_fans(struct wf_fcu_priv *pv)
{ "CPU B 2", "cpu-fan-b-1", },
{ "CPU B 3", "cpu-fan-c-1", },
};
- struct device_node *np = NULL, *fcu = pv->i2c->dev.of_node;
+ struct device_node *np, *fcu = pv->i2c->dev.of_node;
int i;
DBG("Looking up FCU controls in device-tree...\n");
- while ((np = of_get_next_child(fcu, np)) != NULL) {
+ for_each_child_of_node(fcu, np) {
int id, type = -1;
const char *loc;
const char *name;
const u32 *reg;
- DBG(" control: %s, type: %s\n", np->name, np->type);
+ DBG(" control: %pOFn, type: %s\n", np, of_node_get_device_type(np));
/* Detect control type */
- if (!strcmp(np->type, "fan-rpm-control") ||
- !strcmp(np->type, "fan-rpm"))
+ if (of_node_is_type(np, "fan-rpm-control") ||
+ of_node_is_type(np, "fan-rpm"))
type = FCU_FAN_RPM;
- if (!strcmp(np->type, "fan-pwm-control") ||
- !strcmp(np->type, "fan-pwm"))
+ if (of_node_is_type(np, "fan-pwm-control") ||
+ of_node_is_type(np, "fan-pwm"))
type = FCU_FAN_PWM;
/* Only care about fans for now */
if (type == -1)
diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
index 35aa571d498a..09724acd70b6 100644
--- a/drivers/macintosh/windfarm_lm87_sensor.c
+++ b/drivers/macintosh/windfarm_lm87_sensor.c
@@ -110,8 +110,8 @@ static int wf_lm87_probe(struct i2c_client *client,
* the Xserve G5 has several lm87's. However, for now we only
* care about the internal temperature sensor
*/
- while ((np = of_get_next_child(client->dev.of_node, np)) != NULL) {
- if (strcmp(np->name, "int-temp"))
+ for_each_child_of_node(client->dev.of_node, np) {
+ if (!of_node_name_eq(np, "int-temp"))
continue;
loc = of_get_property(np, "location", NULL);
if (!loc)
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index 86d65462a61c..2cb9652a9998 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -267,7 +267,7 @@ static int __init smu_controls_init(void)
/* Look for RPM fans */
for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;)
- if (!strcmp(fans->name, "rpm-fans") ||
+ if (of_node_name_eq(fans, "rpm-fans") ||
of_device_is_compatible(fans, "smu-rpm-fans"))
break;
for (fan = NULL;
@@ -287,7 +287,7 @@ static int __init smu_controls_init(void)
/* Look for PWM fans */
for (fans = NULL; (fans = of_get_next_child(smu, fans)) != NULL;)
- if (!strcmp(fans->name, "pwm-fans"))
+ if (of_node_name_eq(fans, "pwm-fans"))
break;
for (fan = NULL;
fans && (fan = of_get_next_child(fans, fan)) != NULL;) {
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index a0f61eb853c5..b4be718beba8 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -197,7 +197,7 @@ static int wf_sat_probe(struct i2c_client *client,
struct wf_sat *sat;
struct wf_sat_sensor *sens;
const u32 *reg;
- const char *loc, *type;
+ const char *loc;
u8 chip, core;
struct device_node *child;
int shift, cpu, index;
@@ -220,7 +220,6 @@ static int wf_sat_probe(struct i2c_client *client,
child = NULL;
while ((child = of_get_next_child(dev, child)) != NULL) {
reg = of_get_property(child, "reg", NULL);
- type = of_get_property(child, "device_type", NULL);
loc = of_get_property(child, "location", NULL);
if (reg == NULL || loc == NULL)
continue;
@@ -249,15 +248,15 @@ static int wf_sat_probe(struct i2c_client *client,
continue;
}
- if (strcmp(type, "voltage-sensor") == 0) {
+ if (of_node_is_type(child, "voltage-sensor")) {
name = "cpu-voltage";
shift = 4;
vsens[core] = index;
- } else if (strcmp(type, "current-sensor") == 0) {
+ } else if (of_node_is_type(child, "current-sensor")) {
name = "cpu-current";
shift = 8;
isens[core] = index;
- } else if (strcmp(type, "temp-sensor") == 0) {
+ } else if (of_node_is_type(child, "temp-sensor")) {
name = "cpu-temp";
shift = 10;
} else
diff --git a/drivers/macintosh/windfarm_smu_sensors.c b/drivers/macintosh/windfarm_smu_sensors.c
index 172fd267dcf6..a58f6733381a 100644
--- a/drivers/macintosh/windfarm_smu_sensors.c
+++ b/drivers/macintosh/windfarm_smu_sensors.c
@@ -197,15 +197,14 @@ static const struct wf_sensor_ops smu_slotspow_ops = {
static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
{
struct smu_ad_sensor *ads;
- const char *c, *l;
+ const char *l;
const u32 *v;
ads = kmalloc(sizeof(struct smu_ad_sensor), GFP_KERNEL);
if (ads == NULL)
return NULL;
- c = of_get_property(node, "device_type", NULL);
l = of_get_property(node, "location", NULL);
- if (c == NULL || l == NULL)
+ if (l == NULL)
goto fail;
/* We currently pick the sensors based on the OF name and location
@@ -215,7 +214,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
* the names and locations consistents so I'll stick with the names
* and locations for now.
*/
- if (!strcmp(c, "temp-sensor") &&
+ if (of_node_is_type(node, "temp-sensor") &&
!strcmp(l, "CPU T-Diode")) {
ads->sens.ops = &smu_cputemp_ops;
ads->sens.name = "cpu-temp";
@@ -224,7 +223,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
SMU_SDB_CPUDIODE_ID);
goto fail;
}
- } else if (!strcmp(c, "current-sensor") &&
+ } else if (of_node_is_type(node, "current-sensor") &&
!strcmp(l, "CPU Current")) {
ads->sens.ops = &smu_cpuamp_ops;
ads->sens.name = "cpu-current";
@@ -233,7 +232,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
SMU_SDB_CPUVCP_ID);
goto fail;
}
- } else if (!strcmp(c, "voltage-sensor") &&
+ } else if (of_node_is_type(node, "voltage-sensor") &&
!strcmp(l, "CPU Voltage")) {
ads->sens.ops = &smu_cpuvolt_ops;
ads->sens.name = "cpu-voltage";
@@ -242,7 +241,7 @@ static struct smu_ad_sensor *smu_ads_create(struct device_node *node)
SMU_SDB_CPUVCP_ID);
goto fail;
}
- } else if (!strcmp(c, "power-sensor") &&
+ } else if (of_node_is_type(node, "power-sensor") &&
!strcmp(l, "Slots Power")) {
ads->sens.ops = &smu_slotspow_ops;
ads->sens.name = "slots-power";
@@ -425,7 +424,7 @@ static int __init smu_sensors_init(void)
/* Look for sensors subdir */
for (sensors = NULL;
(sensors = of_get_next_child(smu, sensors)) != NULL;)
- if (!strcmp(sensors->name, "sensors"))
+ if (of_node_name_eq(sensors, "sensors"))
break;
of_node_put(smu);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index b61b83bbcfff..fdf75352e16a 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -627,6 +627,20 @@ struct cache_set {
struct bkey gc_done;
/*
+ * For automatical garbage collection after writeback completed, this
+ * varialbe is used as bit fields,
+ * - 0000 0001b (BCH_ENABLE_AUTO_GC): enable gc after writeback
+ * - 0000 0010b (BCH_DO_AUTO_GC): do gc after writeback
+ * This is an optimization for following write request after writeback
+ * finished, but read hit rate dropped due to clean data on cache is
+ * discarded. Unless user explicitly sets it via sysfs, it won't be
+ * enabled.
+ */
+#define BCH_ENABLE_AUTO_GC 1
+#define BCH_DO_AUTO_GC 2
+ uint8_t gc_after_writeback;
+
+ /*
* The allocation code needs gc_mark in struct bucket to be correct, but
* it's not while a gc is in progress. Protected by bucket_lock.
*/
@@ -658,7 +672,11 @@ struct cache_set {
/*
* A btree node on disk could have too many bsets for an iterator to fit
- * on the stack - have to dynamically allocate them
+ * on the stack - have to dynamically allocate them.
+ * bch_cache_set_alloc() will make sure the pool can allocate iterators
+ * equipped with enough room that can host
+ * (sb.bucket_size / sb.block_size)
+ * btree_iter_sets, which is more than static MAX_BSETS.
*/
mempool_t fill_iter;
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 3f4211b5cd33..23cb1dc7296b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -207,6 +207,11 @@ void bch_btree_node_read_done(struct btree *b)
struct bset *i = btree_bset_first(b);
struct btree_iter *iter;
+ /*
+ * c->fill_iter can allocate an iterator with more memory space
+ * than static MAX_BSETS.
+ * See the comment arount cache_set->fill_iter.
+ */
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
iter->used = 0;
diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
index a68d6c55783b..d1c72ef64edf 100644
--- a/drivers/md/bcache/btree.h
+++ b/drivers/md/bcache/btree.h
@@ -266,6 +266,24 @@ static inline void wake_up_gc(struct cache_set *c)
wake_up(&c->gc_wait);
}
+static inline void force_wake_up_gc(struct cache_set *c)
+{
+ /*
+ * Garbage collection thread only works when sectors_to_gc < 0,
+ * calling wake_up_gc() won't start gc thread if sectors_to_gc is
+ * not a nagetive value.
+ * Therefore sectors_to_gc is set to -1 here, before waking up
+ * gc thread by calling wake_up_gc(). Then gc_should_run() will
+ * give a chance to permit gc thread to run. "Give a chance" means
+ * before going into gc_should_run(), there is still possibility
+ * that c->sectors_to_gc being set to other positive value. So
+ * this routine won't 100% make sure gc thread will be woken up
+ * to run.
+ */
+ atomic_set(&c->sectors_to_gc, -1);
+ wake_up_gc(c);
+}
+
#define MAP_DONE 0
#define MAP_CONTINUE 1
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 8f448b9c96a1..8b123be05254 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -249,8 +249,7 @@ void bch_debug_init_cache_set(struct cache_set *c)
void bch_debug_exit(void)
{
- if (!IS_ERR_OR_NULL(bcache_debug))
- debugfs_remove_recursive(bcache_debug);
+ debugfs_remove_recursive(bcache_debug);
}
void __init bch_debug_init(void)
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 522c7426f3a0..b2fd412715b1 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -663,7 +663,7 @@ static void journal_write_unlocked(struct closure *cl)
REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
bch_bio_map(bio, w->data);
- trace_bcache_journal_write(bio);
+ trace_bcache_journal_write(bio, w->data->keys);
bio_list_add(&list, bio);
SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 3bf35914bb57..15070412a32e 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -311,11 +311,11 @@ err:
* data is written it calls bch_journal, and after the keys have been added to
* the next journal write they're inserted into the btree.
*
- * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
+ * It inserts the data in op->bio; bi_sector is used for the key offset,
* and op->inode is used for the key inode.
*
- * If s->bypass is true, instead of inserting the data it invalidates the
- * region of the cache represented by s->cache_bio and op->inode.
+ * If op->bypass is true, instead of inserting the data it invalidates the
+ * region of the cache represented by op->bio and op->inode.
*/
void bch_data_insert(struct closure *cl)
{
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 7bbd670a5a84..4dee119c3664 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -25,8 +25,8 @@
#include <linux/reboot.h>
#include <linux/sysfs.h>
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
+unsigned int bch_cutoff_writeback;
+unsigned int bch_cutoff_writeback_sync;
static const char bcache_magic[] = {
0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
@@ -1510,8 +1510,7 @@ static void cache_set_free(struct closure *cl)
struct cache *ca;
unsigned int i;
- if (!IS_ERR_OR_NULL(c->debug))
- debugfs_remove(c->debug);
+ debugfs_remove(c->debug);
bch_open_buckets_free(c);
bch_btree_cache_free(c);
@@ -2424,6 +2423,32 @@ static void bcache_exit(void)
mutex_destroy(&bch_register_lock);
}
+/* Check and fixup module parameters */
+static void check_module_parameters(void)
+{
+ if (bch_cutoff_writeback_sync == 0)
+ bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
+ else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
+ pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u",
+ bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
+ bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
+ }
+
+ if (bch_cutoff_writeback == 0)
+ bch_cutoff_writeback = CUTOFF_WRITEBACK;
+ else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
+ pr_warn("set bch_cutoff_writeback (%u) to max value %u",
+ bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
+ bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
+ }
+
+ if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
+ pr_warn("set bch_cutoff_writeback (%u) to %u",
+ bch_cutoff_writeback, bch_cutoff_writeback_sync);
+ bch_cutoff_writeback = bch_cutoff_writeback_sync;
+ }
+}
+
static int __init bcache_init(void)
{
static const struct attribute *files[] = {
@@ -2432,6 +2457,8 @@ static int __init bcache_init(void)
NULL
};
+ check_module_parameters();
+
mutex_init(&bch_register_lock);
init_waitqueue_head(&unregister_wait);
register_reboot_notifier(&reboot);
@@ -2468,5 +2495,18 @@ err:
return -ENOMEM;
}
+/*
+ * Module hooks
+ */
module_exit(bcache_exit);
module_init(bcache_init);
+
+module_param(bch_cutoff_writeback, uint, 0);
+MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
+
+module_param(bch_cutoff_writeback_sync, uint, 0);
+MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
+
+MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
+MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 26f035a0c5b9..557a8a3270a1 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -16,7 +16,7 @@
#include <linux/sort.h>
#include <linux/sched/clock.h>
-/* Default is -1; we skip past it for struct cached_dev's cache mode */
+/* Default is 0 ("writethrough") */
static const char * const bch_cache_modes[] = {
"writethrough",
"writeback",
@@ -25,7 +25,7 @@ static const char * const bch_cache_modes[] = {
NULL
};
-/* Default is -1; we skip past it for stop_when_cache_set_failed */
+/* Default is 0 ("auto") */
static const char * const bch_stop_on_failure_modes[] = {
"auto",
"always",
@@ -88,6 +88,8 @@ read_attribute(writeback_keys_done);
read_attribute(writeback_keys_failed);
read_attribute(io_errors);
read_attribute(congested);
+read_attribute(cutoff_writeback);
+read_attribute(cutoff_writeback_sync);
rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us);
@@ -128,6 +130,7 @@ rw_attribute(expensive_debug_checks);
rw_attribute(cache_replacement_policy);
rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled);
+rw_attribute(gc_after_writeback);
rw_attribute(size);
static ssize_t bch_snprint_string_list(char *buf,
@@ -264,7 +267,8 @@ STORE(__cached_dev)
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
- sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
+ sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
+ 0, bch_cutoff_writeback);
if (attr == &sysfs_writeback_rate) {
ssize_t ret;
@@ -384,8 +388,25 @@ STORE(bch_cached_dev)
mutex_lock(&bch_register_lock);
size = __cached_dev_store(kobj, attr, buf, size);
- if (attr == &sysfs_writeback_running)
- bch_writeback_queue(dc);
+ if (attr == &sysfs_writeback_running) {
+ /* dc->writeback_running changed in __cached_dev_store() */
+ if (IS_ERR_OR_NULL(dc->writeback_thread)) {
+ /*
+ * reject setting it to 1 via sysfs if writeback
+ * kthread is not created yet.
+ */
+ if (dc->writeback_running) {
+ dc->writeback_running = false;
+ pr_err("%s: failed to run non-existent writeback thread",
+ dc->disk.disk->disk_name);
+ }
+ } else
+ /*
+ * writeback kthread will check if dc->writeback_running
+ * is true or false.
+ */
+ bch_writeback_queue(dc);
+ }
if (attr == &sysfs_writeback_percent)
if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
@@ -668,6 +689,9 @@ SHOW(__bch_cache_set)
sysfs_print(congested_write_threshold_us,
c->congested_write_threshold_us);
+ sysfs_print(cutoff_writeback, bch_cutoff_writeback);
+ sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
+
sysfs_print(active_journal_entries, fifo_used(&c->journal.pin));
sysfs_printf(verify, "%i", c->verify);
sysfs_printf(key_merging_disabled, "%i", c->key_merging_disabled);
@@ -676,6 +700,7 @@ SHOW(__bch_cache_set)
sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite);
sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled);
sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
+ sysfs_printf(gc_after_writeback, "%i", c->gc_after_writeback);
sysfs_printf(io_disable, "%i",
test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -725,21 +750,8 @@ STORE(__bch_cache_set)
bch_cache_accounting_clear(&c->accounting);
}
- if (attr == &sysfs_trigger_gc) {
- /*
- * Garbage collection thread only works when sectors_to_gc < 0,
- * when users write to sysfs entry trigger_gc, most of time
- * they want to forcibly triger gargage collection. Here -1 is
- * set to c->sectors_to_gc, to make gc_should_run() give a
- * chance to permit gc thread to run. "give a chance" means
- * before going into gc_should_run(), there is still chance
- * that c->sectors_to_gc being set to other positive value. So
- * writing sysfs entry trigger_gc won't always make sure gc
- * thread takes effect.
- */
- atomic_set(&c->sectors_to_gc, -1);
- wake_up_gc(c);
- }
+ if (attr == &sysfs_trigger_gc)
+ force_wake_up_gc(c);
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
@@ -789,6 +801,12 @@ STORE(__bch_cache_set)
sysfs_strtoul(gc_always_rewrite, c->gc_always_rewrite);
sysfs_strtoul(btree_shrinker_disabled, c->shrinker_disabled);
sysfs_strtoul(copy_gc_enabled, c->copy_gc_enabled);
+ /*
+ * write gc_after_writeback here may overwrite an already set
+ * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
+ * set in next chance.
+ */
+ sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
return size;
}
@@ -869,7 +887,10 @@ static struct attribute *bch_cache_set_internal_files[] = {
&sysfs_gc_always_rewrite,
&sysfs_btree_shrinker_disabled,
&sysfs_copy_gc_enabled,
+ &sysfs_gc_after_writeback,
&sysfs_io_disable,
+ &sysfs_cutoff_writeback,
+ &sysfs_cutoff_writeback_sync,
NULL
};
KTYPE(bch_cache_set_internal);
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 08c3a9f9676c..73f0efac2b9f 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -17,6 +17,15 @@
#include <linux/sched/clock.h>
#include <trace/events/bcache.h>
+static void update_gc_after_writeback(struct cache_set *c)
+{
+ if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
+ c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
+ return;
+
+ c->gc_after_writeback |= BCH_DO_AUTO_GC;
+}
+
/* Rate limiting */
static uint64_t __calc_target_rate(struct cached_dev *dc)
{
@@ -191,6 +200,7 @@ static void update_writeback_rate(struct work_struct *work)
if (!set_at_max_writeback_rate(c, dc)) {
down_read(&dc->writeback_lock);
__update_writeback_rate(dc);
+ update_gc_after_writeback(c);
up_read(&dc->writeback_lock);
}
}
@@ -689,6 +699,23 @@ static int bch_writeback_thread(void *arg)
up_write(&dc->writeback_lock);
break;
}
+
+ /*
+ * When dirty data rate is high (e.g. 50%+), there might
+ * be heavy buckets fragmentation after writeback
+ * finished, which hurts following write performance.
+ * If users really care about write performance they
+ * may set BCH_ENABLE_AUTO_GC via sysfs, then when
+ * BCH_DO_AUTO_GC is set, garbage collection thread
+ * will be wake up here. After moving gc, the shrunk
+ * btree and discarded free buckets SSD space may be
+ * helpful for following write requests.
+ */
+ if (c->gc_after_writeback ==
+ (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
+ c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
+ force_wake_up_gc(c);
+ }
}
up_write(&dc->writeback_lock);
@@ -777,7 +804,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
bch_keybuf_init(&dc->writeback_keys);
dc->writeback_metadata = true;
- dc->writeback_running = true;
+ dc->writeback_running = false;
dc->writeback_percent = 10;
dc->writeback_delay = 30;
atomic_long_set(&dc->writeback_rate.rate, 1024);
@@ -805,6 +832,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
cached_dev_put(dc);
return PTR_ERR(dc->writeback_thread);
}
+ dc->writeback_running = true;
WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
schedule_delayed_work(&dc->writeback_rate_update,
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index d2b9fdbc8994..6a743d3bb338 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -5,12 +5,17 @@
#define CUTOFF_WRITEBACK 40
#define CUTOFF_WRITEBACK_SYNC 70
+#define CUTOFF_WRITEBACK_MAX 70
+#define CUTOFF_WRITEBACK_SYNC_MAX 90
+
#define MAX_WRITEBACKS_IN_PASS 5
#define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
#define WRITEBACK_RATE_UPDATE_SECS_MAX 60
#define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
+#define BCH_AUTO_GC_DIRTY_THRESHOLD 50
+
/*
* 14 (16384ths) is chosen here as something that each backing device
* should be a reasonable fraction of the share, and not to blow up
@@ -53,6 +58,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
}
}
+extern unsigned int bch_cutoff_writeback;
+extern unsigned int bch_cutoff_writeback_sync;
+
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned int cache_mode, bool would_skip)
{
@@ -60,7 +68,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
if (cache_mode != CACHE_MODE_WRITEBACK ||
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
- in_use > CUTOFF_WRITEBACK_SYNC)
+ in_use > bch_cutoff_writeback_sync)
return false;
if (dc->partial_stripes_expensive &&
@@ -73,7 +81,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
return (op_is_sync(bio->bi_opf) ||
bio->bi_opf & (REQ_META|REQ_PRIO) ||
- in_use <= CUTOFF_WRITEBACK);
+ in_use <= bch_cutoff_writeback);
}
static inline void bch_writeback_queue(struct cached_dev *dc)
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 5936de71883f..6fc93834da44 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -930,6 +930,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
bool dirty_flag;
*result = true;
+ if (from_cblock(cmd->cache_blocks) == 0)
+ /* Nothing to do */
+ return 0;
+
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) {
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 224d44503a06..95c6d86ab5e8 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -65,7 +65,6 @@ struct mapped_device {
*/
struct work_struct work;
wait_queue_head_t wait;
- atomic_t pending[2];
spinlock_t deferred_lock;
struct bio_list deferred;
@@ -107,9 +106,6 @@ struct mapped_device {
struct block_device *bdev;
- /* zero-length flush that will be cloned and submitted to targets */
- struct bio flush_bio;
-
struct dm_stats stats;
/* for blk-mq request-based DM support */
@@ -119,7 +115,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
};
-int md_in_flight(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index b8eec515a003..a7195eb5b8d8 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -377,7 +377,7 @@ static struct crypto_cipher *alloc_essiv_cipher(struct crypt_config *cc,
int err;
/* Setup the essiv_tfm with the given salt */
- essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+ essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, 0);
if (IS_ERR(essiv_tfm)) {
ti->error = "Error allocating crypto tfm for ESSIV";
return essiv_tfm;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index bb3096bf2cc6..d4ad0bfee251 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2804,7 +2804,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
int r;
if (a->alg_string) {
- *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ASYNC);
+ *hash = crypto_alloc_shash(a->alg_string, 0, 0);
if (IS_ERR(*hash)) {
*error = error_alg;
r = PTR_ERR(*hash);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 7cd36e4d1310..4e06be4f0a62 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -43,7 +43,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
int dm_request_based(struct mapped_device *md)
{
- return queue_is_rq_based(md->queue);
+ return queue_is_mq(md->queue);
}
void dm_start_queue(struct request_queue *q)
@@ -130,10 +130,8 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
- atomic_dec(&md->pending[rw]);
-
/* nudge anyone waiting on suspend queue */
- if (!md_in_flight(md))
+ if (unlikely(waitqueue_active(&md->wait)))
wake_up(&md->wait);
/*
@@ -436,7 +434,6 @@ ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
blk_mq_start_request(orig);
- atomic_inc(&md->pending[rq_data_dir(orig)]);
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 9038c302d5c2..844f7d0f2ef8 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -919,12 +919,12 @@ static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev,
struct request_queue *q = bdev_get_queue(dev->bdev);
struct verify_rq_based_data *v = data;
- if (q->mq_ops)
+ if (queue_is_mq(q))
v->mq_count++;
else
v->sq_count++;
- return queue_is_rq_based(q);
+ return queue_is_mq(q);
}
static int dm_table_determine_type(struct dm_table *t)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 0bd8d498b3b9..dadd9696340c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
struct dm_thin_new_mapping;
/*
- * The pool runs in 4 modes. Ordered in degraded order for comparisons.
+ * The pool runs in various modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
@@ -282,9 +282,38 @@ struct pool {
mempool_t mapping_pool;
};
-static enum pool_mode get_pool_mode(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
+static enum pool_mode get_pool_mode(struct pool *pool)
+{
+ return pool->pf.mode;
+}
+
+static void notify_of_pool_mode_change(struct pool *pool)
+{
+ const char *descs[] = {
+ "write",
+ "out-of-data-space",
+ "read-only",
+ "read-only",
+ "fail"
+ };
+ const char *extra_desc = NULL;
+ enum pool_mode mode = get_pool_mode(pool);
+
+ if (mode == PM_OUT_OF_DATA_SPACE) {
+ if (!pool->pf.error_if_no_space)
+ extra_desc = " (queue IO)";
+ else
+ extra_desc = " (error IO)";
+ }
+
+ dm_table_event(pool->ti->table);
+ DMINFO("%s: switching pool to %s%s mode",
+ dm_device_name(pool->pool_md),
+ descs[(int)mode], extra_desc ? : "");
+}
+
/*
* Target context for a pool.
*/
@@ -2351,8 +2380,6 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
-static void notify_of_pool_mode_change_to_oods(struct pool *pool);
-
/*
* We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in
@@ -2365,7 +2392,7 @@ static void do_no_space_timeout(struct work_struct *ws)
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool->pf.error_if_no_space = true;
- notify_of_pool_mode_change_to_oods(pool);
+ notify_of_pool_mode_change(pool);
error_retry_list_with_code(pool, BLK_STS_NOSPC);
}
}
@@ -2433,26 +2460,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
/*----------------------------------------------------------------*/
-static enum pool_mode get_pool_mode(struct pool *pool)
-{
- return pool->pf.mode;
-}
-
-static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
-{
- dm_table_event(pool->ti->table);
- DMINFO("%s: switching pool to %s mode",
- dm_device_name(pool->pool_md), new_mode);
-}
-
-static void notify_of_pool_mode_change_to_oods(struct pool *pool)
-{
- if (!pool->pf.error_if_no_space)
- notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
- else
- notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
-}
-
static bool passdown_enabled(struct pool_c *pt)
{
return pt->adjusted_pf.discard_passdown;
@@ -2501,8 +2508,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
switch (new_mode) {
case PM_FAIL:
- if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
@@ -2516,8 +2521,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
- if (!is_read_only_pool_mode(old_mode))
- notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
pool->process_discard = process_bio_success;
@@ -2538,8 +2541,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* alarming rate. Adjust your low water mark if you're
* frequently seeing this mode.
*/
- if (old_mode != new_mode)
- notify_of_pool_mode_change_to_oods(pool);
pool->out_of_data_space = true;
pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio;
@@ -2552,8 +2553,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
break;
case PM_WRITE:
- if (old_mode != new_mode)
- notify_of_pool_mode_change(pool, "write");
if (old_mode == PM_OUT_OF_DATA_SPACE)
cancel_delayed_work_sync(&pool->no_space_timeout);
pool->out_of_data_space = false;
@@ -2573,6 +2572,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* doesn't cause an unexpected mode transition on resume.
*/
pt->adjusted_pf.mode = new_mode;
+
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool);
}
static void abort_transaction(struct pool *pool)
@@ -4023,7 +4025,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -4397,7 +4399,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 20, 0},
+ .version = {1, 21, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 981154e59461..6af5babe6837 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -20,7 +20,6 @@ struct dmz_bioctx {
struct dm_zone *zone;
struct bio *bio;
refcount_t ref;
- blk_status_t status;
};
/*
@@ -78,65 +77,66 @@ static inline void dmz_bio_endio(struct bio *bio, blk_status_t status)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
- if (bioctx->status == BLK_STS_OK && status != BLK_STS_OK)
- bioctx->status = status;
- bio_endio(bio);
+ if (status != BLK_STS_OK && bio->bi_status == BLK_STS_OK)
+ bio->bi_status = status;
+
+ if (refcount_dec_and_test(&bioctx->ref)) {
+ struct dm_zone *zone = bioctx->zone;
+
+ if (zone) {
+ if (bio->bi_status != BLK_STS_OK &&
+ bio_op(bio) == REQ_OP_WRITE &&
+ dmz_is_seq(zone))
+ set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
+ dmz_deactivate_zone(zone);
+ }
+ bio_endio(bio);
+ }
}
/*
- * Partial clone read BIO completion callback. This terminates the
+ * Completion callback for an internally cloned target BIO. This terminates the
* target BIO when there are no more references to its context.
*/
-static void dmz_read_bio_end_io(struct bio *bio)
+static void dmz_clone_endio(struct bio *clone)
{
- struct dmz_bioctx *bioctx = bio->bi_private;
- blk_status_t status = bio->bi_status;
+ struct dmz_bioctx *bioctx = clone->bi_private;
+ blk_status_t status = clone->bi_status;
- bio_put(bio);
+ bio_put(clone);
dmz_bio_endio(bioctx->bio, status);
}
/*
- * Issue a BIO to a zone. The BIO may only partially process the
+ * Issue a clone of a target BIO. The clone may only partially process the
* original target BIO.
*/
-static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
- struct bio *bio, sector_t chunk_block,
- unsigned int nr_blocks)
+static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
+ struct bio *bio, sector_t chunk_block,
+ unsigned int nr_blocks)
{
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
- sector_t sector;
struct bio *clone;
- /* BIO remap sector */
- sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
-
- /* If the read is not partial, there is no need to clone the BIO */
- if (nr_blocks == dmz_bio_blocks(bio)) {
- /* Setup and submit the BIO */
- bio->bi_iter.bi_sector = sector;
- refcount_inc(&bioctx->ref);
- generic_make_request(bio);
- return 0;
- }
-
- /* Partial BIO: we need to clone the BIO */
clone = bio_clone_fast(bio, GFP_NOIO, &dmz->bio_set);
if (!clone)
return -ENOMEM;
- /* Setup the clone */
- clone->bi_iter.bi_sector = sector;
+ bio_set_dev(clone, dmz->dev->bdev);
+ clone->bi_iter.bi_sector =
+ dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT;
- clone->bi_end_io = dmz_read_bio_end_io;
+ clone->bi_end_io = dmz_clone_endio;
clone->bi_private = bioctx;
bio_advance(bio, clone->bi_iter.bi_size);
- /* Submit the clone */
refcount_inc(&bioctx->ref);
generic_make_request(clone);
+ if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
+ zone->wp_block += nr_blocks;
+
return 0;
}
@@ -214,7 +214,7 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks) {
/* Valid blocks found: read them */
nr_blocks = min_t(unsigned int, nr_blocks, end_block - chunk_block);
- ret = dmz_submit_read_bio(dmz, rzone, bio, chunk_block, nr_blocks);
+ ret = dmz_submit_bio(dmz, rzone, bio, chunk_block, nr_blocks);
if (ret)
return ret;
chunk_block += nr_blocks;
@@ -229,25 +229,6 @@ static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
}
/*
- * Issue a write BIO to a zone.
- */
-static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
- struct bio *bio, sector_t chunk_block,
- unsigned int nr_blocks)
-{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
- /* Setup and submit the BIO */
- bio_set_dev(bio, dmz->dev->bdev);
- bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
- refcount_inc(&bioctx->ref);
- generic_make_request(bio);
-
- if (dmz_is_seq(zone))
- zone->wp_block += nr_blocks;
-}
-
-/*
* Write blocks directly in a data zone, at the write pointer.
* If a buffer zone is assigned, invalidate the blocks written
* in place.
@@ -265,7 +246,9 @@ static int dmz_handle_direct_write(struct dmz_target *dmz,
return -EROFS;
/* Submit write */
- dmz_submit_write_bio(dmz, zone, bio, chunk_block, nr_blocks);
+ ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
/*
* Validate the blocks in the data zone and invalidate
@@ -301,7 +284,9 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
return -EROFS;
/* Submit write */
- dmz_submit_write_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+ ret = dmz_submit_bio(dmz, bzone, bio, chunk_block, nr_blocks);
+ if (ret)
+ return ret;
/*
* Validate the blocks in the buffer zone
@@ -600,7 +585,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->zone = NULL;
bioctx->bio = bio;
refcount_set(&bioctx->ref, 1);
- bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) {
@@ -624,35 +608,6 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
}
/*
- * Completed target BIO processing.
- */
-static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
-{
- struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
-
- if (bioctx->status == BLK_STS_OK && *error)
- bioctx->status = *error;
-
- if (!refcount_dec_and_test(&bioctx->ref))
- return DM_ENDIO_INCOMPLETE;
-
- /* Done */
- bio->bi_status = bioctx->status;
-
- if (bioctx->zone) {
- struct dm_zone *zone = bioctx->zone;
-
- if (*error && bio_op(bio) == REQ_OP_WRITE) {
- if (dmz_is_seq(zone))
- set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
- }
- dmz_deactivate_zone(zone);
- }
-
- return DM_ENDIO_DONE;
-}
-
-/*
* Get zoned device information.
*/
static int dmz_get_zoned_device(struct dm_target *ti, char *path)
@@ -946,7 +901,6 @@ static struct target_type dmz_type = {
.ctr = dmz_ctr,
.dtr = dmz_dtr,
.map = dmz_map,
- .end_io = dmz_end_io,
.io_hints = dmz_io_hints,
.prepare_ioctl = dmz_prepare_ioctl,
.postsuspend = dmz_suspend,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c510179a7f84..a4a06982ed91 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -646,26 +646,38 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone);
}
-int md_in_flight(struct mapped_device *md)
+static bool md_in_flight_bios(struct mapped_device *md)
{
- return atomic_read(&md->pending[READ]) +
- atomic_read(&md->pending[WRITE]);
+ int cpu;
+ struct hd_struct *part = &dm_disk(md)->part0;
+ long sum = 0;
+
+ for_each_possible_cpu(cpu) {
+ sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+ sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+
+ return sum != 0;
+}
+
+static bool md_in_flight(struct mapped_device *md)
+{
+ if (queue_is_mq(md->queue))
+ return blk_mq_queue_inflight(md->queue);
+ else
+ return md_in_flight_bios(md);
}
static void start_io_acct(struct dm_io *io)
{
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- int rw = bio_data_dir(bio);
io->start_time = jiffies;
generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio),
&dm_disk(md)->part0);
- atomic_set(&dm_disk(md)->part0.in_flight[rw],
- atomic_inc_return(&md->pending[rw]));
-
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -677,8 +689,6 @@ static void end_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
unsigned long duration = jiffies - io->start_time;
- int pending;
- int rw = bio_data_dir(bio);
generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0,
io->start_time);
@@ -688,16 +698,8 @@ static void end_io_acct(struct dm_io *io)
bio->bi_iter.bi_sector, bio_sectors(bio),
true, duration, &io->stats_aux);
- /*
- * After this is decremented the bio must not be touched if it is
- * a flush.
- */
- pending = atomic_dec_return(&md->pending[rw]);
- atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
- pending += atomic_read(&md->pending[rw^0x1]);
-
/* nudge anyone waiting on suspend queue */
- if (!pending)
+ if (unlikely(waitqueue_active(&md->wait)))
wake_up(&md->wait);
}
@@ -1417,10 +1419,21 @@ static int __send_empty_flush(struct clone_info *ci)
unsigned target_nr = 0;
struct dm_target *ti;
+ /*
+ * Empty flush uses a statically initialized bio, as the base for
+ * cloning. However, blkg association requires that a bdev is
+ * associated with a gendisk, which doesn't happen until the bdev is
+ * opened. So, blkg association is done at issue time of the flush
+ * rather than when the device is created in alloc_dev().
+ */
+ bio_set_dev(ci->bio, ci->io->md->bdev);
+
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
+ bio_disassociate_blkg(ci->bio);
+
return 0;
}
@@ -1593,10 +1606,21 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
return ret;
}
+ blk_queue_split(md->queue, &bio);
+
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- ci.bio = &ci.io->md->flush_bio;
+ struct bio flush_bio;
+
+ /*
+ * Use an on-stack bio for this, it's safe since we don't
+ * need to reference it after submit. It's just used as
+ * the basis for the clone(s).
+ */
+ bio_init(&flush_bio, NULL, 0);
+ flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
@@ -1652,7 +1676,16 @@ static blk_qc_t __process_bio(struct mapped_device *md,
init_clone_info(&ci, md, map, bio);
if (bio->bi_opf & REQ_PREFLUSH) {
- ci.bio = &ci.io->md->flush_bio;
+ struct bio flush_bio;
+
+ /*
+ * Use an on-stack bio for this, it's safe since we don't
+ * need to reference it after submit. It's just used as
+ * the basis for the clone(s).
+ */
+ bio_init(&flush_bio, NULL, 0);
+ flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+ ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
@@ -1896,7 +1929,7 @@ static struct mapped_device *alloc_dev(int minor)
INIT_LIST_HEAD(&md->table_devices);
spin_lock_init(&md->uevent_lock);
- md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
+ md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
if (!md->queue)
goto bad;
md->queue->queuedata = md;
@@ -1906,8 +1939,6 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->disk)
goto bad;
- atomic_set(&md->pending[0], 0);
- atomic_set(&md->pending[1], 0);
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
@@ -1938,10 +1969,6 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->bdev)
goto bad;
- bio_init(&md->flush_bio, NULL, 0);
- bio_set_dev(&md->flush_bio, md->bdev);
- md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
-
dm_stats_init(&md->stats);
/* Populate the mapping, nobody knows we exist yet */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fc488cb30a94..9a0a1e0934d5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -334,7 +334,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
const int sgrp = op_stat_group(bio_op(bio));
struct mddev *mddev = q->queuedata;
unsigned int sectors;
- int cpu;
blk_queue_split(q, &bio);
@@ -359,9 +358,9 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
md_handle_request(mddev, bio);
- cpu = part_stat_lock();
- part_stat_inc(cpu, &mddev->gendisk->part0, ios[sgrp]);
- part_stat_add(cpu, &mddev->gendisk->part0, sectors[sgrp], sectors);
+ part_stat_lock();
+ part_stat_inc(&mddev->gendisk->part0, ios[sgrp]);
+ part_stat_add(&mddev->gendisk->part0, sectors[sgrp], sectors);
part_stat_unlock();
return BLK_QC_T_NONE;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ac1cffd2a09b..f3fb5bb8c82a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
!discard_bio)
continue;
bio_chain(discard_bio, bio);
- bio_clone_blkcg_association(discard_bio, bio);
+ bio_clone_blkg_association(discard_bio, bio);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 8add62a18293..102eb35fcf3f 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -110,6 +110,19 @@ config MEDIA_CONTROLLER_DVB
This is currently experimental.
+config MEDIA_CONTROLLER_REQUEST_API
+ bool "Enable Media controller Request API (EXPERIMENTAL)"
+ depends on MEDIA_CONTROLLER && STAGING_MEDIA
+ default n
+ ---help---
+ DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+
+ This option enables the Request API for the Media controller and V4L2
+ interfaces. It is currently needed by a few stateless codec drivers.
+
+ There is currently no intention to provide API or ABI stability for
+ this new API as of yet.
+
#
# Video4Linux support
# Only enables if one of the V4L2 types (ATV, webcam, radio) is selected
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 65a933a21e68..f1261cc2b6fa 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -455,7 +455,7 @@ int cec_thread_func(void *_adap)
(adap->needs_hpd &&
(!adap->is_configured && !adap->is_configuring)) ||
kthread_should_stop() ||
- (!adap->transmitting &&
+ (!adap->transmit_in_progress &&
!list_empty(&adap->transmit_queue)),
msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
timeout = err == 0;
@@ -463,7 +463,7 @@ int cec_thread_func(void *_adap)
/* Otherwise we just wait for something to happen. */
wait_event_interruptible(adap->kthread_waitq,
kthread_should_stop() ||
- (!adap->transmitting &&
+ (!adap->transmit_in_progress &&
!list_empty(&adap->transmit_queue)));
}
@@ -488,6 +488,7 @@ int cec_thread_func(void *_adap)
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
adap->transmitting->msg.len,
adap->transmitting->msg.msg);
+ adap->transmit_in_progress = false;
adap->tx_timeouts++;
/* Just give up on this. */
cec_data_cancel(adap->transmitting,
@@ -499,7 +500,7 @@ int cec_thread_func(void *_adap)
* If we are still transmitting, or there is nothing new to
* transmit, then just continue waiting.
*/
- if (adap->transmitting || list_empty(&adap->transmit_queue))
+ if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
goto unlock;
/* Get a new message to transmit */
@@ -545,6 +546,8 @@ int cec_thread_func(void *_adap)
if (adap->ops->adap_transmit(adap, data->attempts,
signal_free_time, &data->msg))
cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
+ else
+ adap->transmit_in_progress = true;
unlock:
mutex_unlock(&adap->lock);
@@ -575,14 +578,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
data = adap->transmitting;
if (!data) {
/*
- * This can happen if a transmit was issued and the cable is
+ * This might happen if a transmit was issued and the cable is
* unplugged while the transmit is ongoing. Ignore this
* transmit in that case.
*/
- dprintk(1, "%s was called without an ongoing transmit!\n",
- __func__);
- goto unlock;
+ if (!adap->transmit_in_progress)
+ dprintk(1, "%s was called without an ongoing transmit!\n",
+ __func__);
+ adap->transmit_in_progress = false;
+ goto wake_thread;
}
+ adap->transmit_in_progress = false;
msg = &data->msg;
@@ -648,7 +654,6 @@ wake_thread:
* for transmitting or to retry the current message.
*/
wake_up_interruptible(&adap->kthread_waitq);
-unlock:
mutex_unlock(&adap->lock);
}
EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
@@ -1432,6 +1437,13 @@ configured:
las->log_addr[i],
cec_phys_addr_exp(adap->phys_addr));
cec_transmit_msg_fh(adap, &msg, NULL, false);
+
+ /* Report Vendor ID */
+ if (adap->log_addrs.vendor_id != CEC_VENDOR_ID_NONE) {
+ cec_msg_device_vendor_id(&msg,
+ adap->log_addrs.vendor_id);
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
+ }
}
adap->kthread_config = NULL;
complete(&adap->config_completion);
@@ -1496,8 +1508,11 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
mutex_lock(&adap->devnode.lock);
- if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
+ if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
WARN_ON(adap->ops->adap_enable(adap, false));
+ adap->transmit_in_progress = false;
+ wake_up_interruptible(&adap->kthread_waitq);
+ }
mutex_unlock(&adap->devnode.lock);
if (phys_addr == CEC_PHYS_ADDR_INVALID)
return;
@@ -1505,6 +1520,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
mutex_lock(&adap->devnode.lock);
adap->last_initiator = 0xff;
+ adap->transmit_in_progress = false;
if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
adap->ops->adap_enable(adap, true)) {
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c
index e4edc930d4ed..cc875dabd765 100644
--- a/drivers/media/cec/cec-core.c
+++ b/drivers/media/cec/cec-core.c
@@ -24,6 +24,10 @@ int cec_debug;
module_param_named(debug, cec_debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
+static bool debug_phys_addr;
+module_param(debug_phys_addr, bool, 0644);
+MODULE_PARM_DESC(debug_phys_addr, "add CEC_CAP_PHYS_ADDR if set");
+
static dev_t cec_dev_t;
/* Active devices */
@@ -270,6 +274,8 @@ struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
adap->log_addrs.cec_version = CEC_OP_CEC_VERSION_2_0;
adap->log_addrs.vendor_id = CEC_VENDOR_ID_NONE;
adap->capabilities = caps;
+ if (debug_phys_addr)
+ adap->capabilities |= CEC_CAP_PHYS_ADDR;
adap->needs_hpd = caps & CEC_CAP_NEEDS_HPD;
adap->available_log_addrs = available_las;
adap->sequence = 0;
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 635db8e70ead..8f987bc0dd88 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
break;
/* Was the message ACKed? */
ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
- if (!ack && !pin->tx_ignore_nack_until_eom &&
- pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
+ if (!ack && (!pin->tx_ignore_nack_until_eom ||
+ pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
+ !pin->tx_post_eom) {
/*
* Note: the CEC spec is ambiguous regarding
* what action to take when a NACK appears
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index fa483b95bc5a..d9a590ae7545 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -1769,7 +1769,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
unsigned s; \
\
for (s = 0; s < len; s++) { \
- u8 chr = font8x16[text[s] * 16 + line]; \
+ u8 chr = font8x16[(u8)text[s] * 16 + line]; \
\
if (hdiv == 2 && tpg->hflip) { \
pos[3] = (chr & (0x01 << 6) ? fg : bg); \
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 975ff5669f72..70e8c3366f9c 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -679,11 +679,9 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
* are not in use and can be freed.
*/
mutex_lock(&q->mmap_lock);
- if (q->memory == VB2_MEMORY_MMAP && __buffers_in_use(q)) {
- mutex_unlock(&q->mmap_lock);
- dprintk(1, "memory in use, cannot free\n");
- return -EBUSY;
- }
+ if (debug && q->memory == VB2_MEMORY_MMAP &&
+ __buffers_in_use(q))
+ dprintk(1, "memory in use, orphaning buffers\n");
/*
* Call queue_cancel to clean up any buffers in the
@@ -812,6 +810,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
+ } else if (q->memory != memory) {
+ dprintk(1, "memory model mismatch\n");
+ return -EINVAL;
}
num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
@@ -947,7 +948,7 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
}
atomic_dec(&q->owned_by_drv_count);
- if (vb->req_obj.req) {
+ if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
/* This is not supported at the moment */
WARN_ON(state == VB2_BUF_STATE_REQUEUEING);
media_request_object_unbind(&vb->req_obj);
@@ -1359,8 +1360,12 @@ static void vb2_req_release(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
- if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
vb->state = VB2_BUF_STATE_DEQUEUED;
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+ }
}
static const struct media_request_object_ops vb2_core_req_ops = {
@@ -1528,6 +1533,18 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
return ret;
vb->state = VB2_BUF_STATE_IN_REQUEST;
+
+ /*
+ * Increment the refcount and store the request.
+ * The request refcount is decremented again when the
+ * buffer is dequeued. This is to prevent vb2_buffer_done()
+ * from freeing the request from interrupt context, which can
+ * happen if the application closed the request fd after
+ * queueing the request.
+ */
+ media_request_get(req);
+ vb->request = req;
+
/* Fill buffer information for the userspace */
if (pb) {
call_void_bufop(q, copy_timestamp, vb, pb);
@@ -1749,10 +1766,6 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
vb->planes[i].dbuf_mapped = 0;
}
- if (vb->req_obj.req) {
- media_request_object_unbind(&vb->req_obj);
- media_request_object_put(&vb->req_obj);
- }
call_void_bufop(q, init_buffer, vb);
}
@@ -1797,6 +1810,14 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
/* go back to dequeued state */
__vb2_dqbuf(vb);
+ if (WARN_ON(vb->req_obj.req)) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+
dprintk(2, "dqbuf of buffer %d, with state %d\n",
vb->index, vb->state);
@@ -1903,6 +1924,14 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
vb->prepared = false;
}
__vb2_dqbuf(vb);
+
+ if (vb->req_obj.req) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
}
}
@@ -1940,10 +1969,8 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
if (ret)
return ret;
ret = vb2_start_streaming(q);
- if (ret) {
- __vb2_queue_cancel(q);
+ if (ret)
return ret;
- }
}
q->streaming = 1;
@@ -2117,9 +2144,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
}
+
+ mutex_lock(&q->mmap_lock);
+
if (vb2_fileio_is_active(q)) {
dprintk(1, "mmap: file io in progress\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto unlock;
}
/*
@@ -2127,7 +2158,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
*/
ret = __find_plane_by_offset(q, off, &buffer, &plane);
if (ret)
- return ret;
+ goto unlock;
vb = q->bufs[buffer];
@@ -2140,11 +2171,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
if (length < (vma->vm_end - vma->vm_start)) {
dprintk(1,
"MMAP invalid, as it would overflow buffer length\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
- mutex_lock(&q->mmap_lock);
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+
+unlock:
mutex_unlock(&q->mmap_lock);
if (ret)
return ret;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index a17033ab2c22..3a0ca2f9854f 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -158,7 +158,6 @@ static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
return;
check_once = true;
- WARN_ON(1);
pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
if (vb->vb2_queue->allow_zero_bytesused)
@@ -333,10 +332,10 @@ static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
- struct v4l2_buffer *b,
- const char *opname,
+ struct v4l2_buffer *b, bool is_prepare,
struct media_request **p_req)
{
+ const char *opname = is_prepare ? "prepare_buf" : "qbuf";
struct media_request *req;
struct vb2_v4l2_buffer *vbuf;
struct vb2_buffer *vb;
@@ -378,6 +377,9 @@ static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *md
return ret;
}
+ if (is_prepare)
+ return 0;
+
if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
if (q->uses_requests) {
dprintk(1, "%s: queue uses requests\n", opname);
@@ -624,15 +626,17 @@ EXPORT_SYMBOL(vb2_querybuf);
static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
{
- *caps = 0;
+ *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
if (q->io_modes & VB2_MMAP)
*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
if (q->io_modes & VB2_USERPTR)
*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
if (q->io_modes & VB2_DMABUF)
*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (q->supports_requests)
*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+#endif
}
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
@@ -657,7 +661,7 @@ int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
return -EINVAL;
- ret = vb2_queue_or_prepare_buf(q, mdev, b, "prepare_buf", NULL);
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
}
@@ -705,6 +709,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
requested_sizes[0] = f->fmt.sdr.buffersize;
break;
case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
requested_sizes[0] = f->fmt.meta.buffersize;
break;
default:
@@ -729,7 +734,7 @@ int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
return -EBUSY;
}
- ret = vb2_queue_or_prepare_buf(q, mdev, b, "qbuf", &req);
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
if (ret)
return ret;
ret = vb2_core_qbuf(q, b->index, b, req);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 961207cf09eb..27a1d4a98d73 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -917,6 +917,9 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);
+ dprintk("frequency interval: tuner: %u...%u, frontend: %u...%u",
+ tuner_min, tuner_max, frontend_min, frontend_max);
+
/* If the standard is for satellite, convert frequencies to kHz */
switch (c->delivery_system) {
case SYS_DVBS:
@@ -2587,8 +2590,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
u8 last = 1;
if (dvb_frontend_debug)
- dprintk("%s switch command: 0x%04lx\n",
- __func__, swcmd);
+ dprintk("switch command: 0x%04lx\n",
+ swcmd);
nexttime = ktime_get_boottime();
if (dvb_frontend_debug)
tv[0] = nexttime;
@@ -2611,8 +2614,8 @@ static int dvb_frontend_handle_ioctl(struct file *file,
dvb_frontend_sleep_until(&nexttime, 8000);
}
if (dvb_frontend_debug) {
- dprintk("%s(%d): switch delay (should be 32k followed by all 8k)\n",
- __func__, fe->dvb->num);
+ dprintk("(adapter %d): switch delay (should be 32k followed by all 8k)\n",
+ fe->dvb->num);
for (i = 1; i < 10; i++)
pr_info("%d: %d\n", i,
(int)ktime_us_delta(tv[i], tv[i - 1]));
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 0cd57013ea25..23b831ce3439 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -1137,16 +1137,8 @@ static int af9033_probe(struct i2c_client *client,
buf[4], buf[5], buf[6], buf[7]);
/* Sleep as chip seems to be partly active by default */
- switch (dev->cfg.tuner) {
- case AF9033_TUNER_IT9135_38:
- case AF9033_TUNER_IT9135_51:
- case AF9033_TUNER_IT9135_52:
- case AF9033_TUNER_IT9135_60:
- case AF9033_TUNER_IT9135_61:
- case AF9033_TUNER_IT9135_62:
- /* IT9135 did not like to sleep at that early */
- break;
- default:
+ /* IT9135 did not like to sleep at that early */
+ if (dev->is_af9035) {
ret = regmap_write(dev->regmap, 0x80004c, 0x01);
if (ret)
goto err_regmap_exit;
diff --git a/drivers/media/dvb-frontends/dib0090.c b/drivers/media/dvb-frontends/dib0090.c
index 44a074261e69..4813a88eb9f7 100644
--- a/drivers/media/dvb-frontends/dib0090.c
+++ b/drivers/media/dvb-frontends/dib0090.c
@@ -1072,45 +1072,45 @@ static void dib0090_set_bbramp_pwm(struct dib0090_state *state, const u16 * cfg)
void dib0090_pwm_gain_reset(struct dvb_frontend *fe)
{
struct dib0090_state *state = fe->tuner_priv;
- u16 *bb_ramp = (u16 *)&bb_ramp_pwm_normal; /* default baseband config */
- u16 *rf_ramp = NULL;
+ const u16 *bb_ramp = bb_ramp_pwm_normal; /* default baseband config */
+ const u16 *rf_ramp = NULL;
u8 en_pwm_rf_mux = 1;
/* reset the AGC */
if (state->config->use_pwm_agc) {
if (state->current_band == BAND_CBAND) {
if (state->identity.in_soc) {
- bb_ramp = (u16 *)&bb_ramp_pwm_normal_socs;
+ bb_ramp = bb_ramp_pwm_normal_socs;
if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1)
- rf_ramp = (u16 *)&rf_ramp_pwm_cband_8090;
+ rf_ramp = rf_ramp_pwm_cband_8090;
else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1) {
if (state->config->is_dib7090e) {
if (state->rf_ramp == NULL)
- rf_ramp = (u16 *)&rf_ramp_pwm_cband_7090e_sensitivity;
+ rf_ramp = rf_ramp_pwm_cband_7090e_sensitivity;
else
- rf_ramp = (u16 *)state->rf_ramp;
+ rf_ramp = state->rf_ramp;
} else
- rf_ramp = (u16 *)&rf_ramp_pwm_cband_7090p;
+ rf_ramp = rf_ramp_pwm_cband_7090p;
}
} else
- rf_ramp = (u16 *)&rf_ramp_pwm_cband;
+ rf_ramp = rf_ramp_pwm_cband;
} else
if (state->current_band == BAND_VHF) {
if (state->identity.in_soc) {
- bb_ramp = (u16 *)&bb_ramp_pwm_normal_socs;
+ bb_ramp = bb_ramp_pwm_normal_socs;
/* rf_ramp = &rf_ramp_pwm_vhf_socs; */ /* TODO */
} else
- rf_ramp = (u16 *)&rf_ramp_pwm_vhf;
+ rf_ramp = rf_ramp_pwm_vhf;
} else if (state->current_band == BAND_UHF) {
if (state->identity.in_soc) {
- bb_ramp = (u16 *)&bb_ramp_pwm_normal_socs;
+ bb_ramp = bb_ramp_pwm_normal_socs;
if (state->identity.version == SOC_8090_P1G_11R1 || state->identity.version == SOC_8090_P1G_21R1)
- rf_ramp = (u16 *)&rf_ramp_pwm_uhf_8090;
+ rf_ramp = rf_ramp_pwm_uhf_8090;
else if (state->identity.version == SOC_7090_P1G_11R1 || state->identity.version == SOC_7090_P1G_21R1)
- rf_ramp = (u16 *)&rf_ramp_pwm_uhf_7090;
+ rf_ramp = rf_ramp_pwm_uhf_7090;
} else
- rf_ramp = (u16 *)&rf_ramp_pwm_uhf;
+ rf_ramp = rf_ramp_pwm_uhf;
}
if (rf_ramp)
dib0090_set_rframp_pwm(state, rf_ramp);
@@ -1416,9 +1416,9 @@ int dib0090_update_rframp_7090(struct dvb_frontend *fe, u8 cfg_sensitivity)
}
if (cfg_sensitivity)
- state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_sensitivity;
+ state->rf_ramp = rf_ramp_pwm_cband_7090e_sensitivity;
else
- state->rf_ramp = (const u16 *)&rf_ramp_pwm_cband_7090e_aci;
+ state->rf_ramp = rf_ramp_pwm_cband_7090e_aci;
dib0090_pwm_gain_reset(fe);
return 0;
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index 58387860b62d..2818e8def1b3 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -1871,10 +1871,13 @@ static u32 dib7000p_get_time_us(struct dvb_frontend *demod)
break;
}
- interleaving = interleaving;
-
denom = bits_per_symbol * rate_num * fft_div * 384;
+ /*
+ * FIXME: check if the math makes sense. If so, fill the
+ * interleaving var.
+ */
+
/* If calculus gets wrong, wait for 1s for the next stats */
if (!denom)
return 0;
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 84ac3f73f8fe..8ea1e45be710 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1474,9 +1474,11 @@ static int scu_command(struct drxk_state *state,
/* assume that the command register is ready
since it is checked afterwards */
- for (ii = parameter_len - 1; ii >= 0; ii -= 1) {
- buffer[cnt++] = (parameter[ii] & 0xFF);
- buffer[cnt++] = ((parameter[ii] >> 8) & 0xFF);
+ if (parameter) {
+ for (ii = parameter_len - 1; ii >= 0; ii -= 1) {
+ buffer[cnt++] = (parameter[ii] & 0xFF);
+ buffer[cnt++] = ((parameter[ii] >> 8) & 0xFF);
+ }
}
buffer[cnt++] = (cmd & 0xFF);
buffer[cnt++] = ((cmd >> 8) & 0xFF);
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 6d4b2eec67b4..29836c1a40e9 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -80,8 +80,8 @@ struct dvb_pll_desc {
static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
.name = "Thomson dtt7579",
- .min = 177000000,
- .max = 858000000,
+ .min = 177 * MHz,
+ .max = 858 * MHz,
.iffreq= 36166667,
.sleepdata = (u8[]){ 2, 0xb4, 0x03 },
.count = 4,
@@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
.name = "Thomson dtt759x",
- .min = 177000000,
- .max = 896000000,
+ .min = 177 * MHz,
+ .max = 896 * MHz,
.set = thomson_dtt759x_bw,
.iffreq= 36166667,
.sleepdata = (u8[]){ 2, 0x84, 0x03 },
@@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf)
static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
.name = "Thomson dtt7520x",
- .min = 185000000,
- .max = 900000000,
+ .min = 185 * MHz,
+ .max = 900 * MHz,
.set = thomson_dtt7520x_bw,
.iffreq = 36166667,
.count = 7,
@@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
static const struct dvb_pll_desc dvb_pll_lg_z201 = {
.name = "LG z201",
- .min = 174000000,
- .max = 862000000,
+ .min = 174 * MHz,
+ .max = 862 * MHz,
.iffreq= 36166667,
.sleepdata = (u8[]){ 2, 0xbc, 0x03 },
.count = 5,
@@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = {
static const struct dvb_pll_desc dvb_pll_unknown_1 = {
.name = "unknown 1", /* used by dntv live dvb-t */
- .min = 174000000,
- .max = 862000000,
+ .min = 174 * MHz,
+ .max = 862 * MHz,
.iffreq= 36166667,
.count = 9,
.entries = {
@@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = {
*/
static const struct dvb_pll_desc dvb_pll_tua6010xs = {
.name = "Infineon TUA6010XS",
- .min = 44250000,
- .max = 858000000,
+ .min = 44250 * kHz,
+ .max = 858 * MHz,
.iffreq= 36125000,
.count = 3,
.entries = {
@@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = {
/* Panasonic env57h1xd5 (some Philips PLL ?) */
static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
.name = "Panasonic ENV57H1XD5",
- .min = 44250000,
- .max = 858000000,
+ .min = 44250 * kHz,
+ .max = 858 * MHz,
.iffreq= 36125000,
.count = 4,
.entries = {
@@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
static const struct dvb_pll_desc dvb_pll_tda665x = {
.name = "Philips TDA6650/TDA6651",
- .min = 44250000,
- .max = 858000000,
+ .min = 44250 * kHz,
+ .max = 858 * MHz,
.set = tda665x_bw,
.iffreq= 36166667,
.initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
@@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
static const struct dvb_pll_desc dvb_pll_tua6034 = {
.name = "Infineon TUA6034",
- .min = 44250000,
- .max = 858000000,
+ .min = 44250 * kHz,
+ .max = 858 * MHz,
.iffreq= 36166667,
.count = 3,
.set = tua6034_bw,
@@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
static const struct dvb_pll_desc dvb_pll_tded4 = {
.name = "ALPS TDED4",
- .min = 47000000,
- .max = 863000000,
+ .min = 47 * MHz,
+ .max = 863 * MHz,
.iffreq= 36166667,
.set = tded4_bw,
.count = 4,
@@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = {
*/
static const struct dvb_pll_desc dvb_pll_tdhu2 = {
.name = "ALPS TDHU2",
- .min = 54000000,
- .max = 864000000,
+ .min = 54 * MHz,
+ .max = 864 * MHz,
.iffreq= 44000000,
.count = 4,
.entries = {
@@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = {
*/
static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
.name = "Samsung TBMV30111IN / TBMV30712IN1",
- .min = 54000000,
- .max = 860000000,
+ .min = 54 * MHz,
+ .max = 860 * MHz,
.iffreq= 44000000,
.count = 6,
.entries = {
@@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
*/
static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
.name = "Philips SD1878",
- .min = 950000,
- .max = 2150000,
+ .min = 950 * MHz,
+ .max = 2150 * MHz,
.iffreq= 249, /* zero-IF, offset 249 is to round up */
.count = 4,
.entries = {
@@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
static const struct dvb_pll_desc dvb_pll_opera1 = {
.name = "Opera Tuner",
- .min = 900000,
- .max = 2250000,
+ .min = 900 * MHz,
+ .max = 2250 * MHz,
.initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 },
.initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 },
.iffreq= 0,
@@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
/* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
.name = "Samsung DTOS403IH102A",
- .min = 44250000,
- .max = 858000000,
+ .min = 44250 * kHz,
+ .max = 858 * MHz,
.iffreq = 36125000,
.count = 8,
.set = samsung_dtos403ih102a_set,
@@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
/* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
.name = "Samsung TDTC9251DH0",
- .min = 48000000,
- .max = 863000000,
+ .min = 48 * MHz,
+ .max = 863 * MHz,
.iffreq = 36166667,
.count = 3,
.entries = {
@@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
/* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
.name = "Samsung TBDU18132",
- .min = 950000,
- .max = 2150000, /* guesses */
+ .min = 950 * MHz,
+ .max = 2150 * MHz, /* guesses */
.iffreq = 0,
.count = 2,
.entries = {
@@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
/* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
.name = "Samsung TBMU24112",
- .min = 950000,
- .max = 2150000, /* guesses */
+ .min = 950 * MHz,
+ .max = 2150 * MHz, /* guesses */
.iffreq = 0,
.count = 2,
.entries = {
@@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
* 822 - 862 1 * 0 0 1 0 0 0 0x88 */
static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
.name = "ALPS TDEE4",
- .min = 47000000,
- .max = 862000000,
+ .min = 47 * MHz,
+ .max = 862 * MHz,
.iffreq = 36125000,
.count = 4,
.entries = {
@@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
/* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */
static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
.name = "Infineon TUA6034 ISDB-T (Friio)",
- .min = 90000000,
- .max = 770000000,
+ .min = 90 * MHz,
+ .max = 770 * MHz,
.iffreq = 57000000,
.initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 },
.sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b },
@@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
/* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */
static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = {
.name = "Philips TDA6651 ISDB-T (EarthSoft PT1)",
- .min = 90000000,
- .max = 770000000,
+ .min = 90 * MHz,
+ .max = 770 * MHz,
.iffreq = 57000000,
.initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 },
.count = 10,
@@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
u32 div;
int i;
- if (frequency && (frequency < desc->min || frequency > desc->max))
- return -EINVAL;
-
for (i = 0; i < desc->count; i++) {
if (frequency > desc->entries[i].limit)
continue;
@@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct dvb_pll_priv *priv = NULL;
int ret;
const struct dvb_pll_desc *desc;
- struct dtv_frontend_properties *c = &fe->dtv_property_cache;
b1 = kmalloc(1, GFP_KERNEL);
if (!b1)
@@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
strncpy(fe->ops.tuner_ops.info.name, desc->name,
sizeof(fe->ops.tuner_ops.info.name));
- switch (c->delivery_system) {
- case SYS_DVBS:
- case SYS_DVBS2:
- case SYS_TURBO:
- case SYS_ISDBS:
- fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz;
- fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
- break;
- default:
- fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
- fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
- }
+
+ fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
+ fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
+
+ dprintk("%s tuner, frequency range: %u...%u\n",
+ desc->name, desc->min, desc->max);
if (!desc->initdata)
fe->ops.tuner_ops.init = NULL;
diff --git a/drivers/media/dvb-frontends/lgdt3306a.c b/drivers/media/dvb-frontends/lgdt3306a.c
index 0e1f5daaf20c..cee9c83e48de 100644
--- a/drivers/media/dvb-frontends/lgdt3306a.c
+++ b/drivers/media/dvb-frontends/lgdt3306a.c
@@ -2205,15 +2205,13 @@ static int lgdt3306a_probe(struct i2c_client *client,
struct dvb_frontend *fe;
int ret;
- config = kzalloc(sizeof(struct lgdt3306a_config), GFP_KERNEL);
+ config = kmemdup(client->dev.platform_data,
+ sizeof(struct lgdt3306a_config), GFP_KERNEL);
if (config == NULL) {
ret = -ENOMEM;
goto fail;
}
- memcpy(config, client->dev.platform_data,
- sizeof(struct lgdt3306a_config));
-
config->i2c_addr = client->addr;
fe = lgdt3306a_attach(config, client->adapter);
if (fe == NULL) {
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 6191315f5970..290b9eab099f 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -781,7 +781,7 @@ static int set_input(struct dvb_frontend *fe, int input)
return 0;
}
-static struct dvb_frontend_ops mxl_ops = {
+static const struct dvb_frontend_ops mxl_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2, SYS_DSS },
.info = {
.name = "MaxLinear MxL5xx DVB-S/S2 tuner-demodulator",
diff --git a/drivers/media/dvb-frontends/tda18271c2dd.c b/drivers/media/dvb-frontends/tda18271c2dd.c
index 5ce58612315d..eeb2318c102f 100644
--- a/drivers/media/dvb-frontends/tda18271c2dd.c
+++ b/drivers/media/dvb-frontends/tda18271c2dd.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/firmware.h>
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 1c933b2cf760..3ef5df1648d7 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
return r->operand[7];
}
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
@@ -1009,7 +1010,8 @@ out:
return ret;
}
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 876cdec8329b..009905a19947 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
struct dvb_diseqc_master_cmd *diseqcmd);
void avc_remote_ctrl_work(struct work_struct *work);
int avc_register_remote_control(struct firedtv *fdtv);
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len);
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len);
int avc_ca_reset(struct firedtv *fdtv);
int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 704af210e270..4c936e129500 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -61,6 +61,7 @@ config VIDEO_TDA1997X
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on SND_SOC
select SND_PCM
+ select HDMI
---help---
V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
@@ -595,6 +596,18 @@ config VIDEO_APTINA_PLL
config VIDEO_SMIAPP_PLL
tristate
+config VIDEO_IMX214
+ tristate "Sony IMX214 sensor support"
+ depends on GPIOLIB && I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ depends on V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX214 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx214.
+
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -610,6 +623,7 @@ config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
+ select REGMAP_I2C
---help---
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
@@ -846,6 +860,7 @@ config VIDEO_MT9M032
config VIDEO_MT9M111
tristate "mt9m111, mt9m112 and mt9m131 support"
depends on I2C && VIDEO_V4L2
+ select V4L2_FWNODE
help
This driver supports MT9M111, MT9M112 and MT9M131 cameras from
Micron/Aptina
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 260d4d9ec2a1..65fae7732de0 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX258) += imx258.o
obj-$(CONFIG_VIDEO_IMX274) += imx274.o
obj-$(CONFIG_VIDEO_IMX319) += imx319.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 5b008b0002c0..aa8b04cfed0f 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -578,7 +578,7 @@ static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 99697baad2ea..6f3dc8862622 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -180,6 +180,9 @@
#define V4L2_CID_ADV_FAST_SWITCH (V4L2_CID_USER_ADV7180_BASE + 0x00)
+/* Initial number of frames to skip to avoid possible garbage */
+#define ADV7180_NUM_OF_SKIP_FRAMES 2
+
struct adv7180_state;
#define ADV7180_FLAG_RESET_POWERED BIT(0)
@@ -769,6 +772,13 @@ static int adv7180_g_mbus_config(struct v4l2_subdev *sd,
return 0;
}
+static int adv7180_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
+{
+ *frames = ADV7180_NUM_OF_SKIP_FRAMES;
+
+ return 0;
+}
+
static int adv7180_g_pixelaspect(struct v4l2_subdev *sd, struct v4l2_fract *aspect)
{
struct adv7180_state *state = to_state(sd);
@@ -849,10 +859,15 @@ static const struct v4l2_subdev_pad_ops adv7180_pad_ops = {
.get_fmt = adv7180_get_pad_format,
};
+static const struct v4l2_subdev_sensor_ops adv7180_sensor_ops = {
+ .g_skip_frames = adv7180_get_skip_frames,
+};
+
static const struct v4l2_subdev_ops adv7180_ops = {
.core = &adv7180_core_ops,
.video = &adv7180_video_ops,
.pad = &adv7180_pad_ops,
+ .sensor = &adv7180_sensor_ops,
};
static irqreturn_t adv7180_irq(int irq, void *devid)
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f3899cc84e27..cec5ebb1c9e6 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -130,7 +130,7 @@ static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+ V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
@@ -550,7 +550,7 @@ static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_
buffer[3] = 0;
buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
- if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 9eb7c70a7712..28a84bf9f8a9 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -27,6 +27,7 @@
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <linux/regmap.h>
+#include <linux/interrupt.h>
#include <media/i2c/adv7604.h>
#include <media/cec.h>
@@ -114,6 +115,11 @@ struct adv76xx_chip_info {
unsigned int fmt_change_digital_mask;
unsigned int cp_csc;
+ unsigned int cec_irq_status;
+ unsigned int cec_rx_enable;
+ unsigned int cec_rx_enable_mask;
+ bool cec_irq_swap;
+
const struct adv76xx_format_info *formats;
unsigned int nformats;
@@ -766,7 +772,7 @@ static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -777,7 +783,7 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -2003,10 +2009,11 @@ static void adv76xx_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
static void adv76xx_cec_isr(struct v4l2_subdev *sd, bool *handled)
{
struct adv76xx_state *state = to_state(sd);
+ const struct adv76xx_chip_info *info = state->info;
u8 cec_irq;
/* cec controller */
- cec_irq = io_read(sd, 0x4d) & 0x0f;
+ cec_irq = io_read(sd, info->cec_irq_status) & 0x0f;
if (!cec_irq)
return;
@@ -2024,15 +2031,21 @@ static void adv76xx_cec_isr(struct v4l2_subdev *sd, bool *handled)
for (i = 0; i < msg.len; i++)
msg.msg[i] = cec_read(sd, i + 0x15);
- cec_write(sd, 0x26, 0x01); /* re-enable rx */
+ cec_write(sd, info->cec_rx_enable,
+ info->cec_rx_enable_mask); /* re-enable rx */
cec_received_msg(state->cec_adap, &msg);
}
}
- /* note: the bit order is swapped between 0x4d and 0x4e */
- cec_irq = ((cec_irq & 0x08) >> 3) | ((cec_irq & 0x04) >> 1) |
- ((cec_irq & 0x02) << 1) | ((cec_irq & 0x01) << 3);
- io_write(sd, 0x4e, cec_irq);
+ if (info->cec_irq_swap) {
+ /*
+ * Note: the bit order is swapped between 0x4d and 0x4e
+ * on adv7604
+ */
+ cec_irq = ((cec_irq & 0x08) >> 3) | ((cec_irq & 0x04) >> 1) |
+ ((cec_irq & 0x02) << 1) | ((cec_irq & 0x01) << 3);
+ }
+ io_write(sd, info->cec_irq_status + 1, cec_irq);
if (handled)
*handled = true;
@@ -2041,6 +2054,7 @@ static void adv76xx_cec_isr(struct v4l2_subdev *sd, bool *handled)
static int adv76xx_cec_adap_enable(struct cec_adapter *adap, bool enable)
{
struct adv76xx_state *state = cec_get_drvdata(adap);
+ const struct adv76xx_chip_info *info = state->info;
struct v4l2_subdev *sd = &state->sd;
if (!state->cec_enabled_adap && enable) {
@@ -2052,11 +2066,11 @@ static int adv76xx_cec_adap_enable(struct cec_adapter *adap, bool enable)
/* tx: arbitration lost */
/* tx: retry timeout */
/* rx: ready */
- io_write_clr_set(sd, 0x50, 0x0f, 0x0f);
- cec_write(sd, 0x26, 0x01); /* enable rx */
+ io_write_clr_set(sd, info->cec_irq_status + 3, 0x0f, 0x0f);
+ cec_write(sd, info->cec_rx_enable, info->cec_rx_enable_mask);
} else if (state->cec_enabled_adap && !enable) {
/* disable cec interrupts */
- io_write_clr_set(sd, 0x50, 0x0f, 0x00);
+ io_write_clr_set(sd, info->cec_irq_status + 3, 0x0f, 0x00);
/* disable address mask 1-3 */
cec_write_clr_set(sd, 0x27, 0x70, 0x00);
/* power down cec section */
@@ -2221,6 +2235,16 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
return 0;
}
+static irqreturn_t adv76xx_irq_handler(int irq, void *dev_id)
+{
+ struct adv76xx_state *state = dev_id;
+ bool handled = false;
+
+ adv76xx_isr(&state->sd, 0, &handled);
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
static int adv76xx_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
{
struct adv76xx_state *state = to_state(sd);
@@ -2420,7 +2444,7 @@ static int adv76xx_read_infoframe(struct v4l2_subdev *sd, int index,
buffer[i + 3] = infoframe_read(sd,
adv76xx_cri[index].payload_addr + i);
- if (hdmi_infoframe_unpack(frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__,
adv76xx_cri[index].desc);
return -ENOENT;
@@ -2960,6 +2984,10 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
.cable_det_mask = 0x1e,
.fmt_change_digital_mask = 0xc1,
.cp_csc = 0xfc,
+ .cec_irq_status = 0x4d,
+ .cec_rx_enable = 0x26,
+ .cec_rx_enable_mask = 0x01,
+ .cec_irq_swap = true,
.formats = adv7604_formats,
.nformats = ARRAY_SIZE(adv7604_formats),
.set_termination = adv7604_set_termination,
@@ -3006,6 +3034,9 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
.cable_det_mask = 0x01,
.fmt_change_digital_mask = 0x03,
.cp_csc = 0xf4,
+ .cec_irq_status = 0x93,
+ .cec_rx_enable = 0x2c,
+ .cec_rx_enable_mask = 0x02,
.formats = adv7611_formats,
.nformats = ARRAY_SIZE(adv7611_formats),
.set_termination = adv7611_set_termination,
@@ -3047,6 +3078,9 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
.cable_det_mask = 0x01,
.fmt_change_digital_mask = 0x03,
.cp_csc = 0xf4,
+ .cec_irq_status = 0x93,
+ .cec_rx_enable = 0x2c,
+ .cec_rx_enable_mask = 0x02,
.formats = adv7612_formats,
.nformats = ARRAY_SIZE(adv7612_formats),
.set_termination = adv7611_set_termination,
@@ -3134,7 +3168,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
state->pdata.insert_av_codes = 1;
/* Disable the interrupt for now as no DT-based board uses it. */
- state->pdata.int1_config = ADV76XX_INT1_CONFIG_DISABLED;
+ state->pdata.int1_config = ADV76XX_INT1_CONFIG_ACTIVE_HIGH;
/* Hardcode the remaining platform data fields. */
state->pdata.disable_pwrdnb = 0;
@@ -3517,6 +3551,16 @@ static int adv76xx_probe(struct i2c_client *client,
if (err)
goto err_entity;
+ if (client->irq) {
+ err = devm_request_threaded_irq(&client->dev,
+ client->irq,
+ NULL, adv76xx_irq_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ client->name, state);
+ if (err)
+ goto err_entity;
+ }
+
#if IS_ENABLED(CONFIG_VIDEO_ADV7604_CEC)
state->cec_adap = cec_allocate_adapter(&adv76xx_cec_adap_ops,
state, dev_name(&client->dev),
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 4721d49dcf0f..989259488e3d 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -663,7 +663,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -674,7 +674,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -2574,7 +2574,7 @@ static void log_infoframe(struct v4l2_subdev *sd, struct adv7842_cfg_read_infofr
for (i = 0; i < len; i++)
buffer[i + 3] = infoframe_read(sd, cri->payload_addr + i);
- if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
return;
}
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
new file mode 100644
index 000000000000..ec3d1b855f62
--- /dev/null
+++ b/drivers/media/i2c/imx214.c
@@ -0,0 +1,1118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * imx214.c - imx214 sensor driver
+ *
+ * Copyright 2018 Qtechnology A/S
+ *
+ * Ricardo Ribalda <ricardo.ribalda@gmail.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <media/media-entity.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define IMX214_DEFAULT_CLK_FREQ 24000000
+#define IMX214_DEFAULT_LINK_FREQ 480000000
+#define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10)
+#define IMX214_FPS 30
+#define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10
+
+static const char * const imx214_supply_name[] = {
+ "vdda",
+ "vddd",
+ "vdddo",
+};
+
+#define IMX214_NUM_SUPPLIES ARRAY_SIZE(imx214_supply_name)
+
+struct imx214 {
+ struct device *dev;
+ struct clk *xclk;
+ struct regmap *regmap;
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_rect crop;
+
+ struct v4l2_ctrl_handler ctrls;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *exposure;
+
+ struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES];
+
+ struct gpio_desc *enable_gpio;
+
+ /*
+ * Serialize control access, get/set format, get selection
+ * and start streaming.
+ */
+ struct mutex mutex;
+
+ bool streaming;
+};
+
+struct reg_8 {
+ u16 addr;
+ u8 val;
+};
+
+enum {
+ IMX214_TABLE_WAIT_MS = 0,
+ IMX214_TABLE_END,
+ IMX214_MAX_RETRIES,
+ IMX214_WAIT_MS
+};
+
+/*From imx214_mode_tbls.h*/
+static const struct reg_8 mode_4096x2304[] = {
+ {0x0114, 0x03},
+ {0x0220, 0x00},
+ {0x0221, 0x11},
+ {0x0222, 0x01},
+ {0x0340, 0x0C},
+ {0x0341, 0x7A},
+ {0x0342, 0x13},
+ {0x0343, 0x90},
+ {0x0344, 0x00},
+ {0x0345, 0x38},
+ {0x0346, 0x01},
+ {0x0347, 0x98},
+ {0x0348, 0x10},
+ {0x0349, 0x37},
+ {0x034A, 0x0A},
+ {0x034B, 0x97},
+ {0x0381, 0x01},
+ {0x0383, 0x01},
+ {0x0385, 0x01},
+ {0x0387, 0x01},
+ {0x0900, 0x00},
+ {0x0901, 0x00},
+ {0x0902, 0x00},
+ {0x3000, 0x35},
+ {0x3054, 0x01},
+ {0x305C, 0x11},
+
+ {0x0112, 0x0A},
+ {0x0113, 0x0A},
+ {0x034C, 0x10},
+ {0x034D, 0x00},
+ {0x034E, 0x09},
+ {0x034F, 0x00},
+ {0x0401, 0x00},
+ {0x0404, 0x00},
+ {0x0405, 0x10},
+ {0x0408, 0x00},
+ {0x0409, 0x00},
+ {0x040A, 0x00},
+ {0x040B, 0x00},
+ {0x040C, 0x10},
+ {0x040D, 0x00},
+ {0x040E, 0x09},
+ {0x040F, 0x00},
+
+ {0x0301, 0x05},
+ {0x0303, 0x02},
+ {0x0305, 0x03},
+ {0x0306, 0x00},
+ {0x0307, 0x96},
+ {0x0309, 0x0A},
+ {0x030B, 0x01},
+ {0x0310, 0x00},
+
+ {0x0820, 0x12},
+ {0x0821, 0xC0},
+ {0x0822, 0x00},
+ {0x0823, 0x00},
+
+ {0x3A03, 0x09},
+ {0x3A04, 0x50},
+ {0x3A05, 0x01},
+
+ {0x0B06, 0x01},
+ {0x30A2, 0x00},
+
+ {0x30B4, 0x00},
+
+ {0x3A02, 0xFF},
+
+ {0x3011, 0x00},
+ {0x3013, 0x01},
+
+ {0x0202, 0x0C},
+ {0x0203, 0x70},
+ {0x0224, 0x01},
+ {0x0225, 0xF4},
+
+ {0x0204, 0x00},
+ {0x0205, 0x00},
+ {0x020E, 0x01},
+ {0x020F, 0x00},
+ {0x0210, 0x01},
+ {0x0211, 0x00},
+ {0x0212, 0x01},
+ {0x0213, 0x00},
+ {0x0214, 0x01},
+ {0x0215, 0x00},
+ {0x0216, 0x00},
+ {0x0217, 0x00},
+
+ {0x4170, 0x00},
+ {0x4171, 0x10},
+ {0x4176, 0x00},
+ {0x4177, 0x3C},
+ {0xAE20, 0x04},
+ {0xAE21, 0x5C},
+
+ {IMX214_TABLE_WAIT_MS, 10},
+ {0x0138, 0x01},
+ {IMX214_TABLE_END, 0x00}
+};
+
+static const struct reg_8 mode_1920x1080[] = {
+ {0x0114, 0x03},
+ {0x0220, 0x00},
+ {0x0221, 0x11},
+ {0x0222, 0x01},
+ {0x0340, 0x0C},
+ {0x0341, 0x7A},
+ {0x0342, 0x13},
+ {0x0343, 0x90},
+ {0x0344, 0x04},
+ {0x0345, 0x78},
+ {0x0346, 0x03},
+ {0x0347, 0xFC},
+ {0x0348, 0x0B},
+ {0x0349, 0xF7},
+ {0x034A, 0x08},
+ {0x034B, 0x33},
+ {0x0381, 0x01},
+ {0x0383, 0x01},
+ {0x0385, 0x01},
+ {0x0387, 0x01},
+ {0x0900, 0x00},
+ {0x0901, 0x00},
+ {0x0902, 0x00},
+ {0x3000, 0x35},
+ {0x3054, 0x01},
+ {0x305C, 0x11},
+
+ {0x0112, 0x0A},
+ {0x0113, 0x0A},
+ {0x034C, 0x07},
+ {0x034D, 0x80},
+ {0x034E, 0x04},
+ {0x034F, 0x38},
+ {0x0401, 0x00},
+ {0x0404, 0x00},
+ {0x0405, 0x10},
+ {0x0408, 0x00},
+ {0x0409, 0x00},
+ {0x040A, 0x00},
+ {0x040B, 0x00},
+ {0x040C, 0x07},
+ {0x040D, 0x80},
+ {0x040E, 0x04},
+ {0x040F, 0x38},
+
+ {0x0301, 0x05},
+ {0x0303, 0x02},
+ {0x0305, 0x03},
+ {0x0306, 0x00},
+ {0x0307, 0x96},
+ {0x0309, 0x0A},
+ {0x030B, 0x01},
+ {0x0310, 0x00},
+
+ {0x0820, 0x12},
+ {0x0821, 0xC0},
+ {0x0822, 0x00},
+ {0x0823, 0x00},
+
+ {0x3A03, 0x04},
+ {0x3A04, 0xF8},
+ {0x3A05, 0x02},
+
+ {0x0B06, 0x01},
+ {0x30A2, 0x00},
+
+ {0x30B4, 0x00},
+
+ {0x3A02, 0xFF},
+
+ {0x3011, 0x00},
+ {0x3013, 0x01},
+
+ {0x0202, 0x0C},
+ {0x0203, 0x70},
+ {0x0224, 0x01},
+ {0x0225, 0xF4},
+
+ {0x0204, 0x00},
+ {0x0205, 0x00},
+ {0x020E, 0x01},
+ {0x020F, 0x00},
+ {0x0210, 0x01},
+ {0x0211, 0x00},
+ {0x0212, 0x01},
+ {0x0213, 0x00},
+ {0x0214, 0x01},
+ {0x0215, 0x00},
+ {0x0216, 0x00},
+ {0x0217, 0x00},
+
+ {0x4170, 0x00},
+ {0x4171, 0x10},
+ {0x4176, 0x00},
+ {0x4177, 0x3C},
+ {0xAE20, 0x04},
+ {0xAE21, 0x5C},
+
+ {IMX214_TABLE_WAIT_MS, 10},
+ {0x0138, 0x01},
+ {IMX214_TABLE_END, 0x00}
+};
+
+static const struct reg_8 mode_table_common[] = {
+ /* software reset */
+
+ /* software standby settings */
+ {0x0100, 0x00},
+
+ /* ATR setting */
+ {0x9300, 0x02},
+
+ /* external clock setting */
+ {0x0136, 0x18},
+ {0x0137, 0x00},
+
+ /* global setting */
+ /* basic config */
+ {0x0101, 0x00},
+ {0x0105, 0x01},
+ {0x0106, 0x01},
+ {0x4550, 0x02},
+ {0x4601, 0x00},
+ {0x4642, 0x05},
+ {0x6227, 0x11},
+ {0x6276, 0x00},
+ {0x900E, 0x06},
+ {0xA802, 0x90},
+ {0xA803, 0x11},
+ {0xA804, 0x62},
+ {0xA805, 0x77},
+ {0xA806, 0xAE},
+ {0xA807, 0x34},
+ {0xA808, 0xAE},
+ {0xA809, 0x35},
+ {0xA80A, 0x62},
+ {0xA80B, 0x83},
+ {0xAE33, 0x00},
+
+ /* analog setting */
+ {0x4174, 0x00},
+ {0x4175, 0x11},
+ {0x4612, 0x29},
+ {0x461B, 0x12},
+ {0x461F, 0x06},
+ {0x4635, 0x07},
+ {0x4637, 0x30},
+ {0x463F, 0x18},
+ {0x4641, 0x0D},
+ {0x465B, 0x12},
+ {0x465F, 0x11},
+ {0x4663, 0x11},
+ {0x4667, 0x0F},
+ {0x466F, 0x0F},
+ {0x470E, 0x09},
+ {0x4909, 0xAB},
+ {0x490B, 0x95},
+ {0x4915, 0x5D},
+ {0x4A5F, 0xFF},
+ {0x4A61, 0xFF},
+ {0x4A73, 0x62},
+ {0x4A85, 0x00},
+ {0x4A87, 0xFF},
+
+ /* embedded data */
+ {0x5041, 0x04},
+ {0x583C, 0x04},
+ {0x620E, 0x04},
+ {0x6EB2, 0x01},
+ {0x6EB3, 0x00},
+ {0x9300, 0x02},
+
+ /* imagequality */
+ /* HDR setting */
+ {0x3001, 0x07},
+ {0x6D12, 0x3F},
+ {0x6D13, 0xFF},
+ {0x9344, 0x03},
+ {0x9706, 0x10},
+ {0x9707, 0x03},
+ {0x9708, 0x03},
+ {0x9E04, 0x01},
+ {0x9E05, 0x00},
+ {0x9E0C, 0x01},
+ {0x9E0D, 0x02},
+ {0x9E24, 0x00},
+ {0x9E25, 0x8C},
+ {0x9E26, 0x00},
+ {0x9E27, 0x94},
+ {0x9E28, 0x00},
+ {0x9E29, 0x96},
+
+ /* CNR parameter setting */
+ {0x69DB, 0x01},
+
+ /* Moire reduction */
+ {0x6957, 0x01},
+
+ /* image enhancment */
+ {0x6987, 0x17},
+ {0x698A, 0x03},
+ {0x698B, 0x03},
+
+ /* white balanace */
+ {0x0B8E, 0x01},
+ {0x0B8F, 0x00},
+ {0x0B90, 0x01},
+ {0x0B91, 0x00},
+ {0x0B92, 0x01},
+ {0x0B93, 0x00},
+ {0x0B94, 0x01},
+ {0x0B95, 0x00},
+
+ /* ATR setting */
+ {0x6E50, 0x00},
+ {0x6E51, 0x32},
+ {0x9340, 0x00},
+ {0x9341, 0x3C},
+ {0x9342, 0x03},
+ {0x9343, 0xFF},
+ {IMX214_TABLE_END, 0x00}
+};
+
+/*
+ * Declare modes in order, from biggest
+ * to smallest height.
+ */
+static const struct imx214_mode {
+ u32 width;
+ u32 height;
+ const struct reg_8 *reg_table;
+} imx214_modes[] = {
+ {
+ .width = 4096,
+ .height = 2304,
+ .reg_table = mode_4096x2304,
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .reg_table = mode_1920x1080,
+ },
+};
+
+static inline struct imx214 *to_imx214(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct imx214, sd);
+}
+
+static int __maybe_unused imx214_power_on(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx214 *imx214 = to_imx214(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(IMX214_NUM_SUPPLIES, imx214->supplies);
+ if (ret < 0) {
+ dev_err(imx214->dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ ret = clk_prepare_enable(imx214->xclk);
+ if (ret < 0) {
+ regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies);
+ dev_err(imx214->dev, "clk prepare enable failed\n");
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(imx214->enable_gpio, 1);
+ usleep_range(12000, 15000);
+
+ return 0;
+}
+
+static int __maybe_unused imx214_power_off(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx214 *imx214 = to_imx214(sd);
+
+ gpiod_set_value_cansleep(imx214->enable_gpio, 0);
+
+ clk_disable_unprepare(imx214->xclk);
+
+ regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies);
+ usleep_range(10, 20);
+
+ return 0;
+}
+
+static int imx214_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = IMX214_MBUS_CODE;
+
+ return 0;
+}
+
+static int imx214_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->code != IMX214_MBUS_CODE)
+ return -EINVAL;
+
+ if (fse->index >= ARRAY_SIZE(imx214_modes))
+ return -EINVAL;
+
+ fse->min_width = fse->max_width = imx214_modes[fse->index].width;
+ fse->min_height = fse->max_height = imx214_modes[fse->index].height;
+
+ return 0;
+}
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static int imx214_s_register(struct v4l2_subdev *subdev,
+ const struct v4l2_dbg_register *reg)
+{
+ struct imx214 *imx214 = container_of(subdev, struct imx214, sd);
+
+ return regmap_write(imx214->regmap, reg->reg, reg->val);
+}
+
+static int imx214_g_register(struct v4l2_subdev *subdev,
+ struct v4l2_dbg_register *reg)
+{
+ struct imx214 *imx214 = container_of(subdev, struct imx214, sd);
+ unsigned int aux;
+ int ret;
+
+ reg->size = 1;
+ ret = regmap_read(imx214->regmap, reg->reg, &aux);
+ reg->val = aux;
+
+ return ret;
+}
+#endif
+
+static const struct v4l2_subdev_core_ops imx214_core_ops = {
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = imx214_g_register,
+ .s_register = imx214_s_register,
+#endif
+};
+
+static struct v4l2_mbus_framefmt *
+__imx214_get_pad_format(struct imx214 *imx214,
+ struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(&imx214->sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &imx214->fmt;
+ default:
+ return NULL;
+ }
+}
+
+static int imx214_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct imx214 *imx214 = to_imx214(sd);
+
+ mutex_lock(&imx214->mutex);
+ format->format = *__imx214_get_pad_format(imx214, cfg, format->pad,
+ format->which);
+ mutex_unlock(&imx214->mutex);
+
+ return 0;
+}
+
+static struct v4l2_rect *
+__imx214_get_pad_crop(struct imx214 *imx214, struct v4l2_subdev_pad_config *cfg,
+ unsigned int pad, enum v4l2_subdev_format_whence which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_crop(&imx214->sd, cfg, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &imx214->crop;
+ default:
+ return NULL;
+ }
+}
+
+static int imx214_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct imx214 *imx214 = to_imx214(sd);
+ struct v4l2_mbus_framefmt *__format;
+ struct v4l2_rect *__crop;
+ const struct imx214_mode *mode;
+
+ mutex_lock(&imx214->mutex);
+
+ __crop = __imx214_get_pad_crop(imx214, cfg, format->pad, format->which);
+
+ if (format)
+ mode = v4l2_find_nearest_size(imx214_modes,
+ ARRAY_SIZE(imx214_modes), width, height,
+ format->format.width, format->format.height);
+ else
+ mode = &imx214_modes[0];
+
+ __crop->width = mode->width;
+ __crop->height = mode->height;
+
+ __format = __imx214_get_pad_format(imx214, cfg, format->pad,
+ format->which);
+ __format->width = __crop->width;
+ __format->height = __crop->height;
+ __format->code = IMX214_MBUS_CODE;
+ __format->field = V4L2_FIELD_NONE;
+ __format->colorspace = V4L2_COLORSPACE_SRGB;
+ __format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace);
+ __format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
+ __format->colorspace, __format->ycbcr_enc);
+ __format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace);
+
+ format->format = *__format;
+
+ mutex_unlock(&imx214->mutex);
+
+ return 0;
+}
+
+static int imx214_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct imx214 *imx214 = to_imx214(sd);
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ mutex_lock(&imx214->mutex);
+ sel->r = *__imx214_get_pad_crop(imx214, cfg, sel->pad,
+ sel->which);
+ mutex_unlock(&imx214->mutex);
+ return 0;
+}
+
+static int imx214_entity_init_cfg(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg)
+{
+ struct v4l2_subdev_format fmt = { };
+
+ fmt.which = cfg ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt.format.width = imx214_modes[0].width;
+ fmt.format.height = imx214_modes[0].height;
+
+ imx214_set_format(subdev, cfg, &fmt);
+
+ return 0;
+}
+
+static int imx214_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx214 *imx214 = container_of(ctrl->handler,
+ struct imx214, ctrls);
+ u8 vals[2];
+ int ret;
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(imx214->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ vals[1] = ctrl->val;
+ vals[0] = ctrl->val >> 8;
+ ret = regmap_bulk_write(imx214->regmap, 0x202, vals, 2);
+ if (ret < 0)
+ dev_err(imx214->dev, "Error %d\n", ret);
+ ret = 0;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put(imx214->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx214_ctrl_ops = {
+ .s_ctrl = imx214_set_ctrl,
+};
+
+#define MAX_CMD 4
+static int imx214_write_table(struct imx214 *imx214,
+ const struct reg_8 table[])
+{
+ u8 vals[MAX_CMD];
+ int i;
+ int ret;
+
+ for (; table->addr != IMX214_TABLE_END ; table++) {
+ if (table->addr == IMX214_TABLE_WAIT_MS) {
+ usleep_range(table->val * 1000,
+ table->val * 1000 + 500);
+ continue;
+ }
+
+ for (i = 0; i < MAX_CMD; i++) {
+ if (table[i].addr != (table[0].addr + i))
+ break;
+ vals[i] = table[i].val;
+ }
+
+ ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i);
+
+ if (ret) {
+ dev_err(imx214->dev, "write_table error: %d\n", ret);
+ return ret;
+ }
+
+ table += i - 1;
+ }
+
+ return 0;
+}
+
+static int imx214_start_streaming(struct imx214 *imx214)
+{
+ const struct imx214_mode *mode;
+ int ret;
+
+ mutex_lock(&imx214->mutex);
+ ret = imx214_write_table(imx214, mode_table_common);
+ if (ret < 0) {
+ dev_err(imx214->dev, "could not sent common table %d\n", ret);
+ goto error;
+ }
+
+ mode = v4l2_find_nearest_size(imx214_modes,
+ ARRAY_SIZE(imx214_modes), width, height,
+ imx214->fmt.width, imx214->fmt.height);
+ ret = imx214_write_table(imx214, mode->reg_table);
+ if (ret < 0) {
+ dev_err(imx214->dev, "could not sent mode table %d\n", ret);
+ goto error;
+ }
+ ret = __v4l2_ctrl_handler_setup(&imx214->ctrls);
+ if (ret < 0) {
+ dev_err(imx214->dev, "could not sync v4l2 controls\n");
+ goto error;
+ }
+ ret = regmap_write(imx214->regmap, 0x100, 1);
+ if (ret < 0) {
+ dev_err(imx214->dev, "could not sent start table %d\n", ret);
+ goto error;
+ }
+
+ mutex_unlock(&imx214->mutex);
+ return 0;
+
+error:
+ mutex_unlock(&imx214->mutex);
+ return ret;
+}
+
+static int imx214_stop_streaming(struct imx214 *imx214)
+{
+ int ret;
+
+ ret = regmap_write(imx214->regmap, 0x100, 0);
+ if (ret < 0)
+ dev_err(imx214->dev, "could not sent stop table %d\n", ret);
+
+ return ret;
+}
+
+static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct imx214 *imx214 = to_imx214(subdev);
+ int ret;
+
+ if (imx214->streaming == enable)
+ return 0;
+
+ if (enable) {
+ ret = pm_runtime_get_sync(imx214->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(imx214->dev);
+ return ret;
+ }
+
+ ret = imx214_start_streaming(imx214);
+ if (ret < 0)
+ goto err_rpm_put;
+ } else {
+ ret = imx214_start_streaming(imx214);
+ if (ret < 0)
+ goto err_rpm_put;
+ pm_runtime_put(imx214->dev);
+ }
+
+ imx214->streaming = enable;
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(imx214->dev);
+ return ret;
+}
+
+static int imx214_g_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_frame_interval *fival)
+{
+ fival->pad = 0;
+ fival->interval.numerator = 1;
+ fival->interval.denominator = IMX214_FPS;
+
+ return 0;
+}
+
+static int imx214_enum_frame_interval(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ const struct imx214_mode *mode;
+
+ if (fie->index != 0)
+ return -EINVAL;
+
+ mode = v4l2_find_nearest_size(imx214_modes,
+ ARRAY_SIZE(imx214_modes), width, height,
+ fie->width, fie->height);
+
+ fie->code = IMX214_MBUS_CODE;
+ fie->width = mode->width;
+ fie->height = mode->height;
+ fie->interval.numerator = 1;
+ fie->interval.denominator = IMX214_FPS;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops imx214_video_ops = {
+ .s_stream = imx214_s_stream,
+ .g_frame_interval = imx214_g_frame_interval,
+ .s_frame_interval = imx214_g_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = {
+ .enum_mbus_code = imx214_enum_mbus_code,
+ .enum_frame_size = imx214_enum_frame_size,
+ .enum_frame_interval = imx214_enum_frame_interval,
+ .get_fmt = imx214_get_format,
+ .set_fmt = imx214_set_format,
+ .get_selection = imx214_get_selection,
+ .init_cfg = imx214_entity_init_cfg,
+};
+
+static const struct v4l2_subdev_ops imx214_subdev_ops = {
+ .core = &imx214_core_ops,
+ .video = &imx214_video_ops,
+ .pad = &imx214_subdev_pad_ops,
+};
+
+static const struct regmap_config sensor_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int imx214_get_regulators(struct device *dev, struct imx214 *imx214)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMX214_NUM_SUPPLIES; i++)
+ imx214->supplies[i].supply = imx214_supply_name[i];
+
+ return devm_regulator_bulk_get(dev, IMX214_NUM_SUPPLIES,
+ imx214->supplies);
+}
+
+static int imx214_parse_fwnode(struct device *dev)
+{
+ struct fwnode_handle *endpoint;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned int i;
+ int ret;
+
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!endpoint) {
+ dev_err(dev, "endpoint node not found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg);
+ if (ret) {
+ dev_err(dev, "parsing endpoint node failed\n");
+ goto done;
+ }
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ)
+ break;
+
+ if (i == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequencies %d not supported, Please review your DT\n",
+ IMX214_DEFAULT_LINK_FREQ);
+ ret = -EINVAL;
+ goto done;
+ }
+
+done:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ fwnode_handle_put(endpoint);
+ return ret;
+}
+
+static int __maybe_unused imx214_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx214 *imx214 = to_imx214(sd);
+
+ if (imx214->streaming)
+ imx214_stop_streaming(imx214);
+
+ return 0;
+}
+
+static int __maybe_unused imx214_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx214 *imx214 = to_imx214(sd);
+ int ret;
+
+ if (imx214->streaming) {
+ ret = imx214_start_streaming(imx214);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ imx214_stop_streaming(imx214);
+ imx214->streaming = 0;
+ return ret;
+}
+
+static int imx214_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct imx214 *imx214;
+ static const s64 link_freq[] = {
+ IMX214_DEFAULT_LINK_FREQ,
+ };
+ int ret;
+
+ ret = imx214_parse_fwnode(dev);
+ if (ret)
+ return ret;
+
+ imx214 = devm_kzalloc(dev, sizeof(*imx214), GFP_KERNEL);
+ if (!imx214)
+ return -ENOMEM;
+
+ imx214->dev = dev;
+
+ imx214->xclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(imx214->xclk)) {
+ dev_err(dev, "could not get xclk");
+ return PTR_ERR(imx214->xclk);
+ }
+
+ ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ);
+ if (ret) {
+ dev_err(dev, "could not set xclk frequency\n");
+ return ret;
+ }
+
+ ret = imx214_get_regulators(dev, imx214);
+ if (ret < 0) {
+ dev_err(dev, "cannot get regulators\n");
+ return ret;
+ }
+
+ imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(imx214->enable_gpio)) {
+ dev_err(dev, "cannot get enable gpio\n");
+ return PTR_ERR(imx214->enable_gpio);
+ }
+
+ imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config);
+ if (IS_ERR(imx214->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(imx214->regmap);
+ }
+
+ v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops);
+
+ /*
+ * Enable power initially, to avoid warnings
+ * from clk_disable on power_off
+ */
+ imx214_power_on(imx214->dev);
+
+ pm_runtime_set_active(imx214->dev);
+ pm_runtime_enable(imx214->dev);
+ pm_runtime_idle(imx214->dev);
+
+ v4l2_ctrl_handler_init(&imx214->ctrls, 3);
+
+ imx214->pixel_rate = v4l2_ctrl_new_std(&imx214->ctrls, NULL,
+ V4L2_CID_PIXEL_RATE, 0,
+ IMX214_DEFAULT_PIXEL_RATE, 1,
+ IMX214_DEFAULT_PIXEL_RATE);
+ imx214->link_freq = v4l2_ctrl_new_int_menu(&imx214->ctrls, NULL,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq) - 1,
+ 0, link_freq);
+ if (imx214->link_freq)
+ imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /*
+ * WARNING!
+ * Values obtained reverse engineering blobs and/or devices.
+ * Ranges and functionality might be wrong.
+ *
+ * Sony, please release some register set documentation for the
+ * device.
+ *
+ * Yours sincerely, Ricardo.
+ */
+ imx214->exposure = v4l2_ctrl_new_std(&imx214->ctrls, &imx214_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ 0, 3184, 1, 0x0c70);
+
+ ret = imx214->ctrls.error;
+ if (ret) {
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto free_ctrl;
+ }
+
+ imx214->sd.ctrl_handler = &imx214->ctrls;
+ mutex_init(&imx214->mutex);
+ imx214->ctrls.lock = &imx214->mutex;
+
+ imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx214->pad.flags = MEDIA_PAD_FL_SOURCE;
+ imx214->sd.dev = &client->dev;
+ imx214->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad);
+ if (ret < 0) {
+ dev_err(dev, "could not register media entity\n");
+ goto free_ctrl;
+ }
+
+ imx214_entity_init_cfg(&imx214->sd, NULL);
+
+ ret = v4l2_async_register_subdev_sensor_common(&imx214->sd);
+ if (ret < 0) {
+ dev_err(dev, "could not register v4l2 device\n");
+ goto free_entity;
+ }
+
+ return 0;
+
+free_entity:
+ media_entity_cleanup(&imx214->sd.entity);
+free_ctrl:
+ mutex_destroy(&imx214->mutex);
+ v4l2_ctrl_handler_free(&imx214->ctrls);
+ pm_runtime_disable(imx214->dev);
+
+ return ret;
+}
+
+static int imx214_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx214 *imx214 = to_imx214(sd);
+
+ v4l2_async_unregister_subdev(&imx214->sd);
+ media_entity_cleanup(&imx214->sd.entity);
+ v4l2_ctrl_handler_free(&imx214->ctrls);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&imx214->mutex);
+
+ return 0;
+}
+
+static const struct of_device_id imx214_of_match[] = {
+ { .compatible = "sony,imx214" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx214_of_match);
+
+static const struct dev_pm_ops imx214_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx214_suspend, imx214_resume)
+ SET_RUNTIME_PM_OPS(imx214_power_off, imx214_power_on, NULL)
+};
+
+static struct i2c_driver imx214_i2c_driver = {
+ .driver = {
+ .of_match_table = imx214_of_match,
+ .pm = &imx214_pm_ops,
+ .name = "imx214",
+ },
+ .probe_new = imx214_probe,
+ .remove = imx214_remove,
+};
+
+module_i2c_driver(imx214_i2c_driver);
+
+MODULE_DESCRIPTION("Sony IMX214 Camera drier");
+MODULE_AUTHOR("Ricardo Ribalda <ricardo.ribalda@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 31a1e2294843..f86ae18bc104 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -62,11 +62,6 @@
/* Test Pattern Control */
#define IMX258_REG_TEST_PATTERN 0x0600
-#define IMX258_TEST_PATTERN_DISABLE 0
-#define IMX258_TEST_PATTERN_SOLID_COLOR 1
-#define IMX258_TEST_PATTERN_COLOR_BARS 2
-#define IMX258_TEST_PATTERN_GREY_COLOR 3
-#define IMX258_TEST_PATTERN_PN9 4
/* Orientation */
#define REG_MIRROR_FLIP_CONTROL 0x0101
@@ -504,18 +499,10 @@ static const struct imx258_reg mode_1048_780_regs[] = {
static const char * const imx258_test_pattern_menu[] = {
"Disabled",
- "Color Bars",
- "Solid Color",
- "Grey Color Bars",
- "PN9"
-};
-
-static const int imx258_test_pattern_val[] = {
- IMX258_TEST_PATTERN_DISABLE,
- IMX258_TEST_PATTERN_COLOR_BARS,
- IMX258_TEST_PATTERN_SOLID_COLOR,
- IMX258_TEST_PATTERN_GREY_COLOR,
- IMX258_TEST_PATTERN_PN9,
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
};
/* Configurations for supported link frequencies */
@@ -778,13 +765,10 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_TEST_PATTERN:
ret = imx258_write_reg(imx258, IMX258_REG_TEST_PATTERN,
IMX258_REG_VALUE_16BIT,
- imx258_test_pattern_val[ctrl->val]);
-
+ ctrl->val);
ret = imx258_write_reg(imx258, REG_MIRROR_FLIP_CONTROL,
IMX258_REG_VALUE_08BIT,
- ctrl->val == imx258_test_pattern_val
- [IMX258_TEST_PATTERN_DISABLE] ?
- REG_CONFIG_MIRROR_FLIP :
+ !ctrl->val ? REG_CONFIG_MIRROR_FLIP :
REG_CONFIG_FLIP_TEST_PATTERN);
break;
default:
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 11c69281692e..5fac7fd32634 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -619,16 +619,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
{
+ unsigned int uint_val;
int err;
- err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+ err = regmap_read(priv->regmap, addr, &uint_val);
if (err)
dev_err(&priv->client->dev,
"%s : i2c read failed, addr = %x\n", __func__, addr);
else
dev_dbg(&priv->client->dev,
"%s : addr 0x%x, val=0x%x\n", __func__,
- addr, *val);
+ addr, uint_val);
+
+ *val = uint_val;
return err;
}
@@ -1901,7 +1904,7 @@ static int imx274_probe(struct i2c_client *client,
imx274_reset(imx274, 1);
/* initialize controls */
- ret = v4l2_ctrl_handler_init(&imx274->ctrls.handler, 2);
+ ret = v4l2_ctrl_handler_init(&imx274->ctrls.handler, 4);
if (ret < 0) {
dev_err(&client->dev,
"%s : ctrl handler init Failed\n", __func__);
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 0d3e27812b93..17c2e4b41221 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1648,10 +1648,10 @@ static const struct imx319_reg mode_1280x720_regs[] = {
static const char * const imx319_test_pattern_menu[] = {
"Disabled",
- "100% color bars",
- "Solid color",
- "Fade to gray color bars",
- "PN9"
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
};
/* supported link frequencies */
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 20c8eea5db4b..bed293b60e50 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -876,10 +876,10 @@ static const struct imx355_reg mode_820x616_regs[] = {
static const char * const imx355_test_pattern_menu[] = {
"Disabled",
- "100% color bars",
- "Solid color",
- "Fade to gray color bars",
- "PN9"
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
};
/* supported link frequencies */
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 1395986a07bb..d639b9bcf64a 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -15,12 +15,15 @@
#include <linux/delay.h>
#include <linux/v4l2-mediabus.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <media/v4l2-async.h>
#include <media/v4l2-clk.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
/*
* MT9M111, MT9M112 and MT9M131:
@@ -101,6 +104,7 @@
#define MT9M111_REDUCER_XSIZE_A 0x1a7
#define MT9M111_REDUCER_YZOOM_A 0x1a9
#define MT9M111_REDUCER_YSIZE_A 0x1aa
+#define MT9M111_EFFECTS_MODE 0x1e2
#define MT9M111_OUTPUT_FORMAT_CTRL2_A 0x13a
#define MT9M111_OUTPUT_FORMAT_CTRL2_B 0x19b
@@ -126,6 +130,9 @@
#define MT9M111_OUTFMT_SWAP_YCbCr_C_Y_RGB_EVEN (1 << 1)
#define MT9M111_OUTFMT_SWAP_YCbCr_Cb_Cr_RGB_R_B (1 << 0)
#define MT9M111_TPG_SEL_MASK GENMASK(2, 0)
+#define MT9M111_EFFECTS_MODE_MASK GENMASK(2, 0)
+#define MT9M111_RM_PWR_MASK BIT(10)
+#define MT9M111_RM_SKIP2_MASK GENMASK(3, 2)
/*
* Camera control register addresses (0x200..0x2ff not implemented)
@@ -204,6 +211,23 @@ static const struct mt9m111_datafmt mt9m111_colour_fmts[] = {
{MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB},
};
+enum mt9m111_mode_id {
+ MT9M111_MODE_SXGA_8FPS,
+ MT9M111_MODE_SXGA_15FPS,
+ MT9M111_MODE_QSXGA_30FPS,
+ MT9M111_NUM_MODES,
+};
+
+struct mt9m111_mode_info {
+ unsigned int sensor_w;
+ unsigned int sensor_h;
+ unsigned int max_image_w;
+ unsigned int max_image_h;
+ unsigned int max_fps;
+ unsigned int reg_val;
+ unsigned int reg_mask;
+};
+
struct mt9m111 {
struct v4l2_subdev subdev;
struct v4l2_ctrl_handler hdl;
@@ -213,15 +237,51 @@ struct mt9m111 {
struct v4l2_clk *clk;
unsigned int width; /* output */
unsigned int height; /* sizes */
+ struct v4l2_fract frame_interval;
+ const struct mt9m111_mode_info *current_mode;
struct mutex power_lock; /* lock to protect power_count */
int power_count;
const struct mt9m111_datafmt *fmt;
int lastpage; /* PageMap cache value */
+ bool is_streaming;
+ /* user point of view - 0: falling 1: rising edge */
+ unsigned int pclk_sample:1;
#ifdef CONFIG_MEDIA_CONTROLLER
struct media_pad pad;
#endif
};
+static const struct mt9m111_mode_info mt9m111_mode_data[MT9M111_NUM_MODES] = {
+ [MT9M111_MODE_SXGA_8FPS] = {
+ .sensor_w = 1280,
+ .sensor_h = 1024,
+ .max_image_w = 1280,
+ .max_image_h = 1024,
+ .max_fps = 8,
+ .reg_val = MT9M111_RM_LOW_POWER_RD,
+ .reg_mask = MT9M111_RM_PWR_MASK | MT9M111_RM_SKIP2_MASK,
+ },
+ [MT9M111_MODE_SXGA_15FPS] = {
+ .sensor_w = 1280,
+ .sensor_h = 1024,
+ .max_image_w = 1280,
+ .max_image_h = 1024,
+ .max_fps = 15,
+ .reg_val = MT9M111_RM_FULL_POWER_RD,
+ .reg_mask = MT9M111_RM_PWR_MASK | MT9M111_RM_SKIP2_MASK,
+ },
+ [MT9M111_MODE_QSXGA_30FPS] = {
+ .sensor_w = 1280,
+ .sensor_h = 1024,
+ .max_image_w = 640,
+ .max_image_h = 512,
+ .max_fps = 30,
+ .reg_val = MT9M111_RM_LOW_POWER_RD | MT9M111_RM_COL_SKIP_2X |
+ MT9M111_RM_ROW_SKIP_2X,
+ .reg_mask = MT9M111_RM_PWR_MASK | MT9M111_RM_SKIP2_MASK,
+ },
+};
+
/* Find a data format by a pixel code */
static const struct mt9m111_datafmt *mt9m111_find_datafmt(struct mt9m111 *mt9m111,
u32 code)
@@ -538,6 +598,10 @@ static int mt9m111_set_pixfmt(struct mt9m111 *mt9m111,
return -EINVAL;
}
+ /* receiver samples on falling edge, chip-hw default is rising */
+ if (mt9m111->pclk_sample == 0)
+ mask_outfmt2 |= MT9M111_OUTFMT_INV_PIX_CLOCK;
+
ret = mt9m111_reg_mask(client, context_a.output_fmt_ctrl2,
data_outfmt2, mask_outfmt2);
if (!ret)
@@ -559,6 +623,9 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd,
bool bayer;
int ret;
+ if (mt9m111->is_streaming)
+ return -EBUSY;
+
if (format->pad)
return -EINVAL;
@@ -611,6 +678,61 @@ static int mt9m111_set_fmt(struct v4l2_subdev *sd,
return ret;
}
+static const struct mt9m111_mode_info *
+mt9m111_find_mode(struct mt9m111 *mt9m111, unsigned int req_fps,
+ unsigned int width, unsigned int height)
+{
+ const struct mt9m111_mode_info *mode;
+ struct v4l2_rect *sensor_rect = &mt9m111->rect;
+ unsigned int gap, gap_best = (unsigned int) -1;
+ int i, best_gap_idx = MT9M111_MODE_SXGA_15FPS;
+ bool skip_30fps = false;
+
+ /*
+ * The fps selection is based on the row, column skipping mechanism.
+ * So ensure that the sensor window is set to default else the fps
+ * aren't calculated correctly within the sensor hw.
+ */
+ if (sensor_rect->width != MT9M111_MAX_WIDTH ||
+ sensor_rect->height != MT9M111_MAX_HEIGHT) {
+ dev_info(mt9m111->subdev.dev,
+ "Framerate selection is not supported for cropped "
+ "images\n");
+ return NULL;
+ }
+
+ /* 30fps only supported for images not exceeding 640x512 */
+ if (width > MT9M111_MAX_WIDTH / 2 || height > MT9M111_MAX_HEIGHT / 2) {
+ dev_dbg(mt9m111->subdev.dev,
+ "Framerates > 15fps are supported only for images "
+ "not exceeding 640x512\n");
+ skip_30fps = true;
+ }
+
+ /* find best matched fps */
+ for (i = 0; i < MT9M111_NUM_MODES; i++) {
+ unsigned int fps = mt9m111_mode_data[i].max_fps;
+
+ if (fps == 30 && skip_30fps)
+ continue;
+
+ gap = abs(fps - req_fps);
+ if (gap < gap_best) {
+ best_gap_idx = i;
+ gap_best = gap;
+ }
+ }
+
+ /*
+ * Use context a/b default timing values instead of calculate blanking
+ * timing values.
+ */
+ mode = &mt9m111_mode_data[best_gap_idx];
+ mt9m111->ctx = (best_gap_idx == MT9M111_MODE_QSXGA_30FPS) ? &context_a :
+ &context_b;
+ return mode;
+}
+
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mt9m111_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
@@ -726,6 +848,29 @@ static int mt9m111_set_test_pattern(struct mt9m111 *mt9m111, int val)
MT9M111_TPG_SEL_MASK);
}
+static int mt9m111_set_colorfx(struct mt9m111 *mt9m111, int val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
+ static const struct v4l2_control colorfx[] = {
+ { V4L2_COLORFX_NONE, 0 },
+ { V4L2_COLORFX_BW, 1 },
+ { V4L2_COLORFX_SEPIA, 2 },
+ { V4L2_COLORFX_NEGATIVE, 3 },
+ { V4L2_COLORFX_SOLARIZATION, 4 },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(colorfx); i++) {
+ if (colorfx[i].id == val) {
+ return mt9m111_reg_mask(client, MT9M111_EFFECTS_MODE,
+ colorfx[i].value,
+ MT9M111_EFFECTS_MODE_MASK);
+ }
+ }
+
+ return -EINVAL;
+}
+
static int mt9m111_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct mt9m111 *mt9m111 = container_of(ctrl->handler,
@@ -746,6 +891,8 @@ static int mt9m111_s_ctrl(struct v4l2_ctrl *ctrl)
return mt9m111_set_autowhitebalance(mt9m111, ctrl->val);
case V4L2_CID_TEST_PATTERN:
return mt9m111_set_test_pattern(mt9m111, ctrl->val);
+ case V4L2_CID_COLORFX:
+ return mt9m111_set_colorfx(mt9m111, ctrl->val);
}
return -EINVAL;
@@ -771,11 +918,16 @@ static int mt9m111_suspend(struct mt9m111 *mt9m111)
static void mt9m111_restore_state(struct mt9m111 *mt9m111)
{
+ struct i2c_client *client = v4l2_get_subdevdata(&mt9m111->subdev);
+
mt9m111_set_context(mt9m111, mt9m111->ctx);
mt9m111_set_pixfmt(mt9m111, mt9m111->fmt->code);
mt9m111_setup_geometry(mt9m111, &mt9m111->rect,
mt9m111->width, mt9m111->height, mt9m111->fmt->code);
v4l2_ctrl_handler_setup(&mt9m111->hdl);
+ mt9m111_reg_mask(client, mt9m111->ctx->read_mode,
+ mt9m111->current_mode->reg_val,
+ mt9m111->current_mode->reg_mask);
}
static int mt9m111_resume(struct mt9m111 *mt9m111)
@@ -862,12 +1014,62 @@ static const struct v4l2_ctrl_ops mt9m111_ctrl_ops = {
static const struct v4l2_subdev_core_ops mt9m111_subdev_core_ops = {
.s_power = mt9m111_s_power,
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m111_g_register,
.s_register = mt9m111_s_register,
#endif
};
+static int mt9m111_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+
+ fi->interval = mt9m111->frame_interval;
+
+ return 0;
+}
+
+static int mt9m111_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+ const struct mt9m111_mode_info *mode;
+ struct v4l2_fract *fract = &fi->interval;
+ int fps;
+
+ if (mt9m111->is_streaming)
+ return -EBUSY;
+
+ if (fi->pad != 0)
+ return -EINVAL;
+
+ if (fract->numerator == 0) {
+ fract->denominator = 30;
+ fract->numerator = 1;
+ }
+
+ fps = DIV_ROUND_CLOSEST(fract->denominator, fract->numerator);
+
+ /* Find best fitting mode. Do not update the mode if no one was found. */
+ mode = mt9m111_find_mode(mt9m111, fps, mt9m111->width, mt9m111->height);
+ if (!mode)
+ return 0;
+
+ if (mode->max_fps != fps) {
+ fract->denominator = mode->max_fps;
+ fract->numerator = 1;
+ }
+
+ mt9m111->current_mode = mode;
+ mt9m111->frame_interval = fi->interval;
+
+ return 0;
+}
+
static int mt9m111_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
@@ -879,12 +1081,26 @@ static int mt9m111_enum_mbus_code(struct v4l2_subdev *sd,
return 0;
}
+static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+
+ mt9m111->is_streaming = !!enable;
+ return 0;
+}
+
static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
struct v4l2_mbus_config *cfg)
{
- cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING |
+ struct mt9m111 *mt9m111 = container_of(sd, struct mt9m111, subdev);
+
+ cfg->flags = V4L2_MBUS_MASTER |
V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH |
V4L2_MBUS_DATA_ACTIVE_HIGH;
+
+ cfg->flags |= mt9m111->pclk_sample ? V4L2_MBUS_PCLK_SAMPLE_RISING :
+ V4L2_MBUS_PCLK_SAMPLE_FALLING;
+
cfg->type = V4L2_MBUS_PARALLEL;
return 0;
@@ -892,6 +1108,9 @@ static int mt9m111_g_mbus_config(struct v4l2_subdev *sd,
static const struct v4l2_subdev_video_ops mt9m111_subdev_video_ops = {
.g_mbus_config = mt9m111_g_mbus_config,
+ .s_stream = mt9m111_s_stream,
+ .g_frame_interval = mt9m111_g_frame_interval,
+ .s_frame_interval = mt9m111_s_frame_interval,
};
static const struct v4l2_subdev_pad_ops mt9m111_subdev_pad_ops = {
@@ -951,6 +1170,30 @@ done:
return ret;
}
+static int mt9m111_probe_fw(struct i2c_client *client, struct mt9m111 *mt9m111)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
+ struct fwnode_handle *np;
+ int ret;
+
+ np = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ if (!np)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_parse(np, &bus_cfg);
+ if (ret)
+ goto out_put_fw;
+
+ mt9m111->pclk_sample = !!(bus_cfg.bus.parallel.flags &
+ V4L2_MBUS_PCLK_SAMPLE_RISING);
+
+out_put_fw:
+ fwnode_handle_put(np);
+ return ret;
+}
+
static int mt9m111_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
@@ -968,6 +1211,10 @@ static int mt9m111_probe(struct i2c_client *client,
if (!mt9m111)
return -ENOMEM;
+ ret = mt9m111_probe_fw(client, mt9m111);
+ if (ret)
+ return ret;
+
mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
return PTR_ERR(mt9m111->clk);
@@ -976,9 +1223,10 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->ctx = &context_b;
v4l2_i2c_subdev_init(&mt9m111->subdev, client, &mt9m111_subdev_ops);
- mt9m111->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mt9m111->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
- v4l2_ctrl_handler_init(&mt9m111->hdl, 5);
+ v4l2_ctrl_handler_init(&mt9m111->hdl, 7);
v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&mt9m111->hdl, &mt9m111_ctrl_ops,
@@ -994,6 +1242,14 @@ static int mt9m111_probe(struct i2c_client *client,
&mt9m111_ctrl_ops, V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(mt9m111_test_pattern_menu) - 1, 0, 0,
mt9m111_test_pattern_menu);
+ v4l2_ctrl_new_std_menu(&mt9m111->hdl, &mt9m111_ctrl_ops,
+ V4L2_CID_COLORFX, V4L2_COLORFX_SOLARIZATION,
+ ~(BIT(V4L2_COLORFX_NONE) |
+ BIT(V4L2_COLORFX_BW) |
+ BIT(V4L2_COLORFX_SEPIA) |
+ BIT(V4L2_COLORFX_NEGATIVE) |
+ BIT(V4L2_COLORFX_SOLARIZATION)),
+ V4L2_COLORFX_NONE);
mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
if (mt9m111->hdl.error) {
ret = mt9m111->hdl.error;
@@ -1008,6 +1264,10 @@ static int mt9m111_probe(struct i2c_client *client,
goto out_hdlfree;
#endif
+ mt9m111->current_mode = &mt9m111_mode_data[MT9M111_MODE_SXGA_15FPS];
+ mt9m111->frame_interval.numerator = 1;
+ mt9m111->frame_interval.denominator = mt9m111->current_mode->max_fps;
+
/* Second stage probe - when a capture adapter is there */
mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index c8bbc1f52261..45bb872db3c5 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1612,7 +1612,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
OV13858_NUM_OF_LINK_FREQS - 1,
0,
link_freq_menu_items);
- ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if (ov13858->link_freq)
+ ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
@@ -1635,7 +1636,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
- ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if (ov13858->hblank)
+ ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
exposure_max = mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index 20a8853ba1e2..5d2d6735cc78 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -26,6 +26,7 @@
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-image-sizes.h>
@@ -705,6 +706,11 @@ err:
return ret;
}
+static const char * const ov2640_test_pattern_menu[] = {
+ "Disabled",
+ "Eight Vertical Colour Bars",
+};
+
/*
* functions
*/
@@ -740,6 +746,9 @@ static int ov2640_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_HFLIP:
val = ctrl->val ? REG04_HFLIP_IMG : 0x00;
return ov2640_mask_set(client, REG04, REG04_HFLIP_IMG, val);
+ case V4L2_CID_TEST_PATTERN:
+ val = ctrl->val ? COM7_COLOR_BAR_TEST : 0x00;
+ return ov2640_mask_set(client, COM7, COM7_COLOR_BAR_TEST, val);
}
return -EINVAL;
@@ -1088,6 +1097,9 @@ static const struct v4l2_ctrl_ops ov2640_ctrl_ops = {
};
static const struct v4l2_subdev_core_ops ov2640_subdev_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov2640_g_register,
.s_register = ov2640_s_register,
@@ -1182,14 +1194,19 @@ static int ov2640_probe(struct i2c_client *client,
goto err_clk;
v4l2_i2c_subdev_init(&priv->subdev, client, &ov2640_subdev_ops);
- priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
mutex_init(&priv->lock);
- v4l2_ctrl_handler_init(&priv->hdl, 2);
+ v4l2_ctrl_handler_init(&priv->hdl, 3);
priv->hdl.lock = &priv->lock;
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&priv->hdl, &ov2640_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std_menu_items(&priv->hdl, &ov2640_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov2640_test_pattern_menu) - 1, 0, 0,
+ ov2640_test_pattern_menu);
priv->subdev.ctrl_handler = &priv->hdl;
if (priv->hdl.error) {
ret = priv->hdl.error;
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 0e34e15b67b3..b10bcfabaeeb 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -568,10 +568,6 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
if (ret < 0)
return ret;
- ret = ov2680_mode_restore(sensor);
- if (ret < 0)
- goto disable;
-
sensor->is_enabled = true;
/* Set clock lane into LP-11 state */
@@ -580,12 +576,6 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
ov2680_stream_disable(sensor);
return 0;
-
-disable:
- dev_err(dev, "failed to enable sensor: %d\n", ret);
- ov2680_power_off(sensor);
-
- return ret;
}
static int ov2680_s_power(struct v4l2_subdev *sd, int on)
@@ -606,6 +596,8 @@ static int ov2680_s_power(struct v4l2_subdev *sd, int on)
ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
if (ret < 0)
return ret;
+
+ ret = ov2680_mode_restore(sensor);
}
return ret;
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index eaefdb58653b..bef3f3aae0ed 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -25,6 +25,7 @@
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
@@ -94,9 +95,6 @@
#define OV5640_REG_SDE_CTRL5 0x5585
#define OV5640_REG_AVG_READOUT 0x56a1
-#define OV5640_SCLK2X_ROOT_DIVIDER_DEFAULT 1
-#define OV5640_SCLK_ROOT_DIVIDER_DEFAULT 2
-
enum ov5640_mode_id {
OV5640_MODE_QCIF_176_144 = 0,
OV5640_MODE_QVGA_320_240,
@@ -113,6 +111,7 @@ enum ov5640_mode_id {
enum ov5640_frame_rate {
OV5640_15_FPS = 0,
OV5640_30_FPS,
+ OV5640_60_FPS,
OV5640_NUM_FRAMERATES,
};
@@ -141,6 +140,7 @@ MODULE_PARM_DESC(virtual_channel,
static const int ov5640_framerates[] = {
[OV5640_15_FPS] = 15,
[OV5640_30_FPS] = 30,
+ [OV5640_60_FPS] = 60,
};
/* regulator supplies */
@@ -261,8 +261,7 @@ static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3103, 0x11, 0, 0}, {0x3008, 0x82, 0, 5}, {0x3008, 0x42, 0, 0},
{0x3103, 0x03, 0, 0}, {0x3017, 0x00, 0, 0}, {0x3018, 0x00, 0, 0},
- {0x3034, 0x18, 0, 0}, {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0},
- {0x3037, 0x13, 0, 0}, {0x3630, 0x36, 0, 0},
+ {0x3630, 0x36, 0, 0},
{0x3631, 0x0e, 0, 0}, {0x3632, 0xe2, 0, 0}, {0x3633, 0x12, 0, 0},
{0x3621, 0xe0, 0, 0}, {0x3704, 0xa0, 0, 0}, {0x3703, 0x5a, 0, 0},
{0x3715, 0x78, 0, 0}, {0x3717, 0x01, 0, 0}, {0x370b, 0x60, 0, 0},
@@ -344,27 +343,8 @@ static const struct reg_value ov5640_init_setting_30fps_VGA[] = {
{0x3a1f, 0x14, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3c00, 0x04, 0, 300},
};
-static const struct reg_value ov5640_setting_30fps_VGA_640_480[] = {
- {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
- {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_VGA_640_480[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -382,28 +362,8 @@ static const struct reg_value ov5640_setting_15fps_VGA_640_480[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_30fps_XGA_1024_768[] = {
- {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x0e, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x3503, 0x00, 0, 0},
- {0x3035, 0x12, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
- {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_XGA_1024_768[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -421,8 +381,8 @@ static const struct reg_value ov5640_setting_15fps_XGA_1024_768[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
- {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_QVGA_320_240[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -440,8 +400,8 @@ static const struct reg_value ov5640_setting_30fps_QVGA_320_240[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
- {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_QCIF_176_144[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -459,46 +419,8 @@ static const struct reg_value ov5640_setting_15fps_QVGA_320_240[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_30fps_QCIF_176_144[] = {
- {0x3035, 0x14, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_15fps_QCIF_176_144[] = {
- {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
- {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_NTSC_720_480[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -516,27 +438,8 @@ static const struct reg_value ov5640_setting_30fps_NTSC_720_480[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_15fps_NTSC_720_480[] = {
- {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x3c, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
- {0x3035, 0x12, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_PAL_720_576[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -554,48 +457,8 @@ static const struct reg_value ov5640_setting_30fps_PAL_720_576[] = {
{0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
-static const struct reg_value ov5640_setting_15fps_PAL_720_576[] = {
- {0x3035, 0x22, 0, 0}, {0x3036, 0x38, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x04, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9b, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x38, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x06, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_30fps_720P_1280_720[] = {
- {0x3008, 0x42, 0, 0},
- {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x31, 0, 0},
- {0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0xfa, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x06, 0, 0}, {0x3807, 0xa9, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
- {0x3618, 0x00, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3709, 0x52, 0, 0}, {0x370c, 0x03, 0, 0}, {0x3a02, 0x02, 0, 0},
- {0x3a03, 0xe4, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0xbc, 0, 0},
- {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x72, 0, 0}, {0x3a0e, 0x01, 0, 0},
- {0x3a0d, 0x02, 0, 0}, {0x3a14, 0x02, 0, 0}, {0x3a15, 0xe4, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x4713, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0},
- {0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0}, {0x4005, 0x1a, 0, 0},
- {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
- {0x3035, 0x41, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0},
+static const struct reg_value ov5640_setting_720P_1280_720[] = {
+ {0x3c07, 0x07, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x31, 0, 0},
{0x3815, 0x31, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -613,9 +476,9 @@ static const struct reg_value ov5640_setting_15fps_720P_1280_720[] = {
{0x3824, 0x04, 0, 0}, {0x5001, 0x83, 0, 0},
};
-static const struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
+static const struct reg_value ov5640_setting_1080P_1920_1080[] = {
{0x3008, 0x42, 0, 0},
- {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -630,8 +493,8 @@ static const struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
{0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
{0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x11, 0, 0},
- {0x3036, 0x54, 0, 0}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0},
+ {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
{0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
@@ -643,43 +506,10 @@ static const struct reg_value ov5640_setting_30fps_1080P_1920_1080[] = {
{0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
{0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
{0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0},
- {0x3503, 0, 0, 0},
-};
-
-static const struct reg_value ov5640_setting_15fps_1080P_1920_1080[] = {
- {0x3008, 0x42, 0, 0},
- {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3814, 0x11, 0, 0},
- {0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
- {0x3802, 0x00, 0, 0}, {0x3803, 0x00, 0, 0}, {0x3804, 0x0a, 0, 0},
- {0x3805, 0x3f, 0, 0}, {0x3806, 0x07, 0, 0}, {0x3807, 0x9f, 0, 0},
- {0x3810, 0x00, 0, 0},
- {0x3811, 0x10, 0, 0}, {0x3812, 0x00, 0, 0}, {0x3813, 0x04, 0, 0},
- {0x3618, 0x04, 0, 0}, {0x3612, 0x29, 0, 0}, {0x3708, 0x21, 0, 0},
- {0x3709, 0x12, 0, 0}, {0x370c, 0x00, 0, 0}, {0x3a02, 0x03, 0, 0},
- {0x3a03, 0xd8, 0, 0}, {0x3a08, 0x01, 0, 0}, {0x3a09, 0x27, 0, 0},
- {0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
- {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
- {0x4001, 0x02, 0, 0}, {0x4004, 0x06, 0, 0}, {0x4713, 0x03, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x3824, 0x02, 0, 0}, {0x5001, 0x83, 0, 0}, {0x3035, 0x21, 0, 0},
- {0x3036, 0x54, 0, 1}, {0x3c07, 0x07, 0, 0}, {0x3c08, 0x00, 0, 0},
- {0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
- {0x3800, 0x01, 0, 0}, {0x3801, 0x50, 0, 0}, {0x3802, 0x01, 0, 0},
- {0x3803, 0xb2, 0, 0}, {0x3804, 0x08, 0, 0}, {0x3805, 0xef, 0, 0},
- {0x3806, 0x05, 0, 0}, {0x3807, 0xf1, 0, 0},
- {0x3612, 0x2b, 0, 0}, {0x3708, 0x64, 0, 0},
- {0x3a02, 0x04, 0, 0}, {0x3a03, 0x60, 0, 0}, {0x3a08, 0x01, 0, 0},
- {0x3a09, 0x50, 0, 0}, {0x3a0a, 0x01, 0, 0}, {0x3a0b, 0x18, 0, 0},
- {0x3a0e, 0x03, 0, 0}, {0x3a0d, 0x04, 0, 0}, {0x3a14, 0x04, 0, 0},
- {0x3a15, 0x60, 0, 0}, {0x4713, 0x02, 0, 0}, {0x4407, 0x04, 0, 0},
- {0x460b, 0x37, 0, 0}, {0x460c, 0x20, 0, 0}, {0x3824, 0x04, 0, 0},
- {0x4005, 0x1a, 0, 0}, {0x3008, 0x02, 0, 0}, {0x3503, 0, 0, 0},
};
-static const struct reg_value ov5640_setting_15fps_QSXGA_2592_1944[] = {
- {0x3035, 0x21, 0, 0}, {0x3036, 0x54, 0, 0}, {0x3c07, 0x08, 0, 0},
+static const struct reg_value ov5640_setting_QSXGA_2592_1944[] = {
+ {0x3c07, 0x08, 0, 0},
{0x3c09, 0x1c, 0, 0}, {0x3c0a, 0x9c, 0, 0}, {0x3c0b, 0x40, 0, 0},
{0x3814, 0x11, 0, 0},
{0x3815, 0x11, 0, 0}, {0x3800, 0x00, 0, 0}, {0x3801, 0x00, 0, 0},
@@ -705,79 +535,43 @@ static const struct ov5640_mode_info ov5640_mode_init_data = {
};
static const struct ov5640_mode_info
-ov5640_mode_data[OV5640_NUM_FRAMERATES][OV5640_NUM_MODES] = {
- {
- {OV5640_MODE_QCIF_176_144, SUBSAMPLING,
- 176, 1896, 144, 984,
- ov5640_setting_15fps_QCIF_176_144,
- ARRAY_SIZE(ov5640_setting_15fps_QCIF_176_144)},
- {OV5640_MODE_QVGA_320_240, SUBSAMPLING,
- 320, 1896, 240, 984,
- ov5640_setting_15fps_QVGA_320_240,
- ARRAY_SIZE(ov5640_setting_15fps_QVGA_320_240)},
- {OV5640_MODE_VGA_640_480, SUBSAMPLING,
- 640, 1896, 480, 1080,
- ov5640_setting_15fps_VGA_640_480,
- ARRAY_SIZE(ov5640_setting_15fps_VGA_640_480)},
- {OV5640_MODE_NTSC_720_480, SUBSAMPLING,
- 720, 1896, 480, 984,
- ov5640_setting_15fps_NTSC_720_480,
- ARRAY_SIZE(ov5640_setting_15fps_NTSC_720_480)},
- {OV5640_MODE_PAL_720_576, SUBSAMPLING,
- 720, 1896, 576, 984,
- ov5640_setting_15fps_PAL_720_576,
- ARRAY_SIZE(ov5640_setting_15fps_PAL_720_576)},
- {OV5640_MODE_XGA_1024_768, SUBSAMPLING,
- 1024, 1896, 768, 1080,
- ov5640_setting_15fps_XGA_1024_768,
- ARRAY_SIZE(ov5640_setting_15fps_XGA_1024_768)},
- {OV5640_MODE_720P_1280_720, SUBSAMPLING,
- 1280, 1892, 720, 740,
- ov5640_setting_15fps_720P_1280_720,
- ARRAY_SIZE(ov5640_setting_15fps_720P_1280_720)},
- {OV5640_MODE_1080P_1920_1080, SCALING,
- 1920, 2500, 1080, 1120,
- ov5640_setting_15fps_1080P_1920_1080,
- ARRAY_SIZE(ov5640_setting_15fps_1080P_1920_1080)},
- {OV5640_MODE_QSXGA_2592_1944, SCALING,
- 2592, 2844, 1944, 1968,
- ov5640_setting_15fps_QSXGA_2592_1944,
- ARRAY_SIZE(ov5640_setting_15fps_QSXGA_2592_1944)},
- }, {
- {OV5640_MODE_QCIF_176_144, SUBSAMPLING,
- 176, 1896, 144, 984,
- ov5640_setting_30fps_QCIF_176_144,
- ARRAY_SIZE(ov5640_setting_30fps_QCIF_176_144)},
- {OV5640_MODE_QVGA_320_240, SUBSAMPLING,
- 320, 1896, 240, 984,
- ov5640_setting_30fps_QVGA_320_240,
- ARRAY_SIZE(ov5640_setting_30fps_QVGA_320_240)},
- {OV5640_MODE_VGA_640_480, SUBSAMPLING,
- 640, 1896, 480, 1080,
- ov5640_setting_30fps_VGA_640_480,
- ARRAY_SIZE(ov5640_setting_30fps_VGA_640_480)},
- {OV5640_MODE_NTSC_720_480, SUBSAMPLING,
- 720, 1896, 480, 984,
- ov5640_setting_30fps_NTSC_720_480,
- ARRAY_SIZE(ov5640_setting_30fps_NTSC_720_480)},
- {OV5640_MODE_PAL_720_576, SUBSAMPLING,
- 720, 1896, 576, 984,
- ov5640_setting_30fps_PAL_720_576,
- ARRAY_SIZE(ov5640_setting_30fps_PAL_720_576)},
- {OV5640_MODE_XGA_1024_768, SUBSAMPLING,
- 1024, 1896, 768, 1080,
- ov5640_setting_30fps_XGA_1024_768,
- ARRAY_SIZE(ov5640_setting_30fps_XGA_1024_768)},
- {OV5640_MODE_720P_1280_720, SUBSAMPLING,
- 1280, 1892, 720, 740,
- ov5640_setting_30fps_720P_1280_720,
- ARRAY_SIZE(ov5640_setting_30fps_720P_1280_720)},
- {OV5640_MODE_1080P_1920_1080, SCALING,
- 1920, 2500, 1080, 1120,
- ov5640_setting_30fps_1080P_1920_1080,
- ARRAY_SIZE(ov5640_setting_30fps_1080P_1920_1080)},
- {OV5640_MODE_QSXGA_2592_1944, -1, 0, 0, 0, 0, NULL, 0},
- },
+ov5640_mode_data[OV5640_NUM_MODES] = {
+ {OV5640_MODE_QCIF_176_144, SUBSAMPLING,
+ 176, 1896, 144, 984,
+ ov5640_setting_QCIF_176_144,
+ ARRAY_SIZE(ov5640_setting_QCIF_176_144)},
+ {OV5640_MODE_QVGA_320_240, SUBSAMPLING,
+ 320, 1896, 240, 984,
+ ov5640_setting_QVGA_320_240,
+ ARRAY_SIZE(ov5640_setting_QVGA_320_240)},
+ {OV5640_MODE_VGA_640_480, SUBSAMPLING,
+ 640, 1896, 480, 1080,
+ ov5640_setting_VGA_640_480,
+ ARRAY_SIZE(ov5640_setting_VGA_640_480)},
+ {OV5640_MODE_NTSC_720_480, SUBSAMPLING,
+ 720, 1896, 480, 984,
+ ov5640_setting_NTSC_720_480,
+ ARRAY_SIZE(ov5640_setting_NTSC_720_480)},
+ {OV5640_MODE_PAL_720_576, SUBSAMPLING,
+ 720, 1896, 576, 984,
+ ov5640_setting_PAL_720_576,
+ ARRAY_SIZE(ov5640_setting_PAL_720_576)},
+ {OV5640_MODE_XGA_1024_768, SUBSAMPLING,
+ 1024, 1896, 768, 1080,
+ ov5640_setting_XGA_1024_768,
+ ARRAY_SIZE(ov5640_setting_XGA_1024_768)},
+ {OV5640_MODE_720P_1280_720, SUBSAMPLING,
+ 1280, 1892, 720, 740,
+ ov5640_setting_720P_1280_720,
+ ARRAY_SIZE(ov5640_setting_720P_1280_720)},
+ {OV5640_MODE_1080P_1920_1080, SCALING,
+ 1920, 2500, 1080, 1120,
+ ov5640_setting_1080P_1920_1080,
+ ARRAY_SIZE(ov5640_setting_1080P_1920_1080)},
+ {OV5640_MODE_QSXGA_2592_1944, SCALING,
+ 2592, 2844, 1944, 1968,
+ ov5640_setting_QSXGA_2592_1944,
+ ARRAY_SIZE(ov5640_setting_QSXGA_2592_1944)},
};
static int ov5640_init_slave_id(struct ov5640_dev *sensor)
@@ -909,6 +703,333 @@ static int ov5640_mod_reg(struct ov5640_dev *sensor, u16 reg,
return ov5640_write_reg(sensor, reg, val);
}
+/*
+ * After trying the various combinations, reading various
+ * documentations spreaded around the net, and from the various
+ * feedback, the clock tree is probably as follows:
+ *
+ * +--------------+
+ * | Ext. Clock |
+ * +-+------------+
+ * | +----------+
+ * +->| PLL1 | - reg 0x3036, for the multiplier
+ * +-+--------+ - reg 0x3037, bits 0-3 for the pre-divider
+ * | +--------------+
+ * +->| System Clock | - reg 0x3035, bits 4-7
+ * +-+------------+
+ * | +--------------+
+ * +->| MIPI Divider | - reg 0x3035, bits 0-3
+ * | +-+------------+
+ * | +----------------> MIPI SCLK
+ * | + +-----+
+ * | +->| / 2 |-------> MIPI BIT CLK
+ * | +-----+
+ * | +--------------+
+ * +->| PLL Root Div | - reg 0x3037, bit 4
+ * +-+------------+
+ * | +---------+
+ * +->| Bit Div | - reg 0x3035, bits 0-3
+ * +-+-------+
+ * | +-------------+
+ * +->| SCLK Div | - reg 0x3108, bits 0-1
+ * | +-+-----------+
+ * | +---------------> SCLK
+ * | +-------------+
+ * +->| SCLK 2X Div | - reg 0x3108, bits 2-3
+ * | +-+-----------+
+ * | +---------------> SCLK 2X
+ * | +-------------+
+ * +->| PCLK Div | - reg 0x3108, bits 4-5
+ * ++------------+
+ * + +-----------+
+ * +->| P_DIV | - reg 0x3035, bits 0-3
+ * +-----+-----+
+ * +------------> PCLK
+ *
+ * This is deviating from the datasheet at least for the register
+ * 0x3108, since it's said here that the PCLK would be clocked from
+ * the PLL.
+ *
+ * There seems to be also (unverified) constraints:
+ * - the PLL pre-divider output rate should be in the 4-27MHz range
+ * - the PLL multiplier output rate should be in the 500-1000MHz range
+ * - PCLK >= SCLK * 2 in YUV, >= SCLK in Raw or JPEG
+ *
+ * In the two latter cases, these constraints are met since our
+ * factors are hardcoded. If we were to change that, we would need to
+ * take this into account. The only varying parts are the PLL
+ * multiplier and the system clock divider, which are shared between
+ * all these clocks so won't cause any issue.
+ */
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 3 in the vendor kernels.
+ */
+#define OV5640_PLL_PREDIV 3
+
+#define OV5640_PLL_MULT_MIN 4
+#define OV5640_PLL_MULT_MAX 252
+
+/*
+ * This is supposed to be ranging from 1 to 16, but the value is
+ * always set to either 1 or 2 in the vendor kernels.
+ */
+#define OV5640_SYSDIV_MIN 1
+#define OV5640_SYSDIV_MAX 16
+
+/*
+ * Hardcode these values for scaler and non-scaler modes.
+ * FIXME: to be re-calcualted for 1 data lanes setups
+ */
+#define OV5640_MIPI_DIV_PCLK 2
+#define OV5640_MIPI_DIV_SCLK 1
+
+/*
+ * This is supposed to be ranging from 1 to 2, but the value is always
+ * set to 2 in the vendor kernels.
+ */
+#define OV5640_PLL_ROOT_DIV 2
+#define OV5640_PLL_CTRL3_PLL_ROOT_DIV_2 BIT(4)
+
+/*
+ * We only supports 8-bit formats at the moment
+ */
+#define OV5640_BIT_DIV 2
+#define OV5640_PLL_CTRL0_MIPI_MODE_8BIT 0x08
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 2 in the vendor kernels.
+ */
+#define OV5640_SCLK_ROOT_DIV 2
+
+/*
+ * This is hardcoded so that the consistency is maintained between SCLK and
+ * SCLK 2x.
+ */
+#define OV5640_SCLK2X_ROOT_DIV (OV5640_SCLK_ROOT_DIV / 2)
+
+/*
+ * This is supposed to be ranging from 1 to 8, but the value is always
+ * set to 1 in the vendor kernels.
+ */
+#define OV5640_PCLK_ROOT_DIV 1
+#define OV5640_PLL_SYS_ROOT_DIVIDER_BYPASS 0x00
+
+static unsigned long ov5640_compute_sys_clk(struct ov5640_dev *sensor,
+ u8 pll_prediv, u8 pll_mult,
+ u8 sysdiv)
+{
+ unsigned long sysclk = sensor->xclk_freq / pll_prediv * pll_mult;
+
+ /* PLL1 output cannot exceed 1GHz. */
+ if (sysclk / 1000000 > 1000)
+ return 0;
+
+ return sysclk / sysdiv;
+}
+
+static unsigned long ov5640_calc_sys_clk(struct ov5640_dev *sensor,
+ unsigned long rate,
+ u8 *pll_prediv, u8 *pll_mult,
+ u8 *sysdiv)
+{
+ unsigned long best = ~0;
+ u8 best_sysdiv = 1, best_mult = 1;
+ u8 _sysdiv, _pll_mult;
+
+ for (_sysdiv = OV5640_SYSDIV_MIN;
+ _sysdiv <= OV5640_SYSDIV_MAX;
+ _sysdiv++) {
+ for (_pll_mult = OV5640_PLL_MULT_MIN;
+ _pll_mult <= OV5640_PLL_MULT_MAX;
+ _pll_mult++) {
+ unsigned long _rate;
+
+ /*
+ * The PLL multiplier cannot be odd if above
+ * 127.
+ */
+ if (_pll_mult > 127 && (_pll_mult % 2))
+ continue;
+
+ _rate = ov5640_compute_sys_clk(sensor,
+ OV5640_PLL_PREDIV,
+ _pll_mult, _sysdiv);
+
+ /*
+ * We have reached the maximum allowed PLL1 output,
+ * increase sysdiv.
+ */
+ if (!rate)
+ break;
+
+ /*
+ * Prefer rates above the expected clock rate than
+ * below, even if that means being less precise.
+ */
+ if (_rate < rate)
+ continue;
+
+ if (abs(rate - _rate) < abs(rate - best)) {
+ best = _rate;
+ best_sysdiv = _sysdiv;
+ best_mult = _pll_mult;
+ }
+
+ if (_rate == rate)
+ goto out;
+ }
+ }
+
+out:
+ *sysdiv = best_sysdiv;
+ *pll_prediv = OV5640_PLL_PREDIV;
+ *pll_mult = best_mult;
+
+ return best;
+}
+
+/*
+ * ov5640_set_mipi_pclk() - Calculate the clock tree configuration values
+ * for the MIPI CSI-2 output.
+ *
+ * @rate: The requested bandwidth per lane in bytes per second.
+ * 'Bandwidth Per Lane' is calculated as:
+ * bpl = HTOT * VTOT * FPS * bpp / num_lanes;
+ *
+ * This function use the requested bandwidth to calculate:
+ * - sample_rate = bpl / (bpp / num_lanes);
+ * = bpl / (PLL_RDIV * BIT_DIV * PCLK_DIV * MIPI_DIV / num_lanes);
+ *
+ * - mipi_sclk = bpl / MIPI_DIV / 2; ( / 2 is for CSI-2 DDR)
+ *
+ * with these fixed parameters:
+ * PLL_RDIV = 2;
+ * BIT_DIVIDER = 2; (MIPI_BIT_MODE == 8 ? 2 : 2,5);
+ * PCLK_DIV = 1;
+ *
+ * The MIPI clock generation differs for modes that use the scaler and modes
+ * that do not. In case the scaler is in use, the MIPI_SCLK generates the MIPI
+ * BIT CLk, and thus:
+ *
+ * - mipi_sclk = bpl / MIPI_DIV / 2;
+ * MIPI_DIV = 1;
+ *
+ * For modes that do not go through the scaler, the MIPI BIT CLOCK is generated
+ * from the pixel clock, and thus:
+ *
+ * - sample_rate = bpl / (bpp / num_lanes);
+ * = bpl / (2 * 2 * 1 * MIPI_DIV / num_lanes);
+ * = bpl / (4 * MIPI_DIV / num_lanes);
+ * - MIPI_DIV = bpp / (4 * num_lanes);
+ *
+ * FIXME: this have been tested with 16bpp and 2 lanes setup only.
+ * MIPI_DIV is fixed to value 2, but it -might- be changed according to the
+ * above formula for setups with 1 lane or image formats with different bpp.
+ *
+ * FIXME: this deviates from the sensor manual documentation which is quite
+ * thin on the MIPI clock tree generation part.
+ */
+static int ov5640_set_mipi_pclk(struct ov5640_dev *sensor,
+ unsigned long rate)
+{
+ const struct ov5640_mode_info *mode = sensor->current_mode;
+ u8 prediv, mult, sysdiv;
+ u8 mipi_div;
+ int ret;
+
+ /*
+ * 1280x720 is reported to use 'SUBSAMPLING' only,
+ * but according to the sensor manual it goes through the
+ * scaler before subsampling.
+ */
+ if (mode->dn_mode == SCALING ||
+ (mode->id == OV5640_MODE_720P_1280_720))
+ mipi_div = OV5640_MIPI_DIV_SCLK;
+ else
+ mipi_div = OV5640_MIPI_DIV_PCLK;
+
+ ov5640_calc_sys_clk(sensor, rate, &prediv, &mult, &sysdiv);
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL0,
+ 0x0f, OV5640_PLL_CTRL0_MIPI_MODE_8BIT);
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
+ 0xff, sysdiv << 4 | mipi_div);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL2, 0xff, mult);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
+ 0x1f, OV5640_PLL_CTRL3_PLL_ROOT_DIV_2 | prediv);
+ if (ret)
+ return ret;
+
+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER,
+ 0x30, OV5640_PLL_SYS_ROOT_DIVIDER_BYPASS);
+}
+
+static unsigned long ov5640_calc_pclk(struct ov5640_dev *sensor,
+ unsigned long rate,
+ u8 *pll_prediv, u8 *pll_mult, u8 *sysdiv,
+ u8 *pll_rdiv, u8 *bit_div, u8 *pclk_div)
+{
+ unsigned long _rate = rate * OV5640_PLL_ROOT_DIV * OV5640_BIT_DIV *
+ OV5640_PCLK_ROOT_DIV;
+
+ _rate = ov5640_calc_sys_clk(sensor, _rate, pll_prediv, pll_mult,
+ sysdiv);
+ *pll_rdiv = OV5640_PLL_ROOT_DIV;
+ *bit_div = OV5640_BIT_DIV;
+ *pclk_div = OV5640_PCLK_ROOT_DIV;
+
+ return _rate / *pll_rdiv / *bit_div / *pclk_div;
+}
+
+static int ov5640_set_dvp_pclk(struct ov5640_dev *sensor, unsigned long rate)
+{
+ u8 prediv, mult, sysdiv, pll_rdiv, bit_div, pclk_div;
+ int ret;
+
+ ov5640_calc_pclk(sensor, rate, &prediv, &mult, &sysdiv, &pll_rdiv,
+ &bit_div, &pclk_div);
+
+ if (bit_div == 2)
+ bit_div = 8;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL0,
+ 0x0f, bit_div);
+ if (ret)
+ return ret;
+
+ /*
+ * We need to set sysdiv according to the clock, and to clear
+ * the MIPI divider.
+ */
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1,
+ 0xff, sysdiv << 4);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL2,
+ 0xff, mult);
+ if (ret)
+ return ret;
+
+ ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL3,
+ 0x1f, prediv | ((pll_rdiv - 1) << 4));
+ if (ret)
+ return ret;
+
+ return ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x30,
+ (ilog2(pclk_div) << 4));
+}
+
/* download ov5640 settings to sensor through i2c */
static int ov5640_set_timings(struct ov5640_dev *sensor,
const struct ov5640_mode_info *mode)
@@ -1062,16 +1183,6 @@ static int ov5640_set_stream_dvp(struct ov5640_dev *sensor, bool on)
if (on) {
/*
- * reset MIPI PCLK/SERCLK divider
- *
- * SC PLL CONTRL1 0
- * - [3..0]: MIPI PCLK/SERCLK divider
- */
- ret = ov5640_mod_reg(sensor, OV5640_REG_SC_PLL_CTRL1, 0x0f, 0);
- if (ret)
- return ret;
-
- /*
* configure parallel port control lines polarity
*
* POLARITY CTRL0
@@ -1444,8 +1555,8 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
{
const struct ov5640_mode_info *mode;
- mode = v4l2_find_nearest_size(ov5640_mode_data[fr],
- ARRAY_SIZE(ov5640_mode_data[fr]),
+ mode = v4l2_find_nearest_size(ov5640_mode_data,
+ ARRAY_SIZE(ov5640_mode_data),
hact, vact,
width, height);
@@ -1453,6 +1564,11 @@ ov5640_find_mode(struct ov5640_dev *sensor, enum ov5640_frame_rate fr,
(!nearest && (mode->hact != width || mode->vact != height)))
return NULL;
+ /* Only 640x480 can operate at 60fps (for now) */
+ if (fr == OV5640_60_FPS &&
+ !(mode->hact == 640 && mode->vact == 480))
+ return NULL;
+
return mode;
}
@@ -1637,6 +1753,7 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
enum ov5640_downsize_mode dn_mode, orig_dn_mode;
bool auto_gain = sensor->ctrls.auto_gain->val == 1;
bool auto_exp = sensor->ctrls.auto_exp->val == V4L2_EXPOSURE_AUTO;
+ unsigned long rate;
int ret;
dn_mode = mode->dn_mode;
@@ -1655,6 +1772,23 @@ static int ov5640_set_mode(struct ov5640_dev *sensor)
goto restore_auto_gain;
}
+ /*
+ * All the formats we support have 16 bits per pixel, seems to require
+ * the same rate than YUV, so we can just use 16 bpp all the time.
+ */
+ rate = mode->vtot * mode->htot * 16;
+ rate *= ov5640_framerates[sensor->current_fr];
+ if (sensor->ep.bus_type == V4L2_MBUS_CSI2_DPHY) {
+ rate = rate / sensor->ep.bus.mipi_csi2.num_data_lanes;
+ ret = ov5640_set_mipi_pclk(sensor, rate);
+ } else {
+ rate = rate / sensor->ep.bus.parallel.bus_width;
+ ret = ov5640_set_dvp_pclk(sensor, rate);
+ }
+
+ if (ret < 0)
+ return 0;
+
if ((dn_mode == SUBSAMPLING && orig_dn_mode == SCALING) ||
(dn_mode == SCALING && orig_dn_mode == SUBSAMPLING)) {
/*
@@ -1724,8 +1858,8 @@ static int ov5640_restore_mode(struct ov5640_dev *sensor)
sensor->last_mode = &ov5640_mode_init_data;
ret = ov5640_mod_reg(sensor, OV5640_REG_SYS_ROOT_DIVIDER, 0x3f,
- (ilog2(OV5640_SCLK2X_ROOT_DIVIDER_DEFAULT) << 2) |
- ilog2(OV5640_SCLK_ROOT_DIVIDER_DEFAULT));
+ (ilog2(OV5640_SCLK2X_ROOT_DIV) << 2) |
+ ilog2(OV5640_SCLK_ROOT_DIV));
if (ret)
return ret;
@@ -1925,34 +2059,39 @@ static int ov5640_try_frame_interval(struct ov5640_dev *sensor,
u32 width, u32 height)
{
const struct ov5640_mode_info *mode;
- u32 minfps, maxfps, fps;
- int ret;
+ enum ov5640_frame_rate rate = OV5640_30_FPS;
+ int minfps, maxfps, best_fps, fps;
+ int i;
minfps = ov5640_framerates[OV5640_15_FPS];
- maxfps = ov5640_framerates[OV5640_30_FPS];
+ maxfps = ov5640_framerates[OV5640_60_FPS];
if (fi->numerator == 0) {
fi->denominator = maxfps;
fi->numerator = 1;
- return OV5640_30_FPS;
+ rate = OV5640_60_FPS;
+ goto find_mode;
}
- fps = DIV_ROUND_CLOSEST(fi->denominator, fi->numerator);
+ fps = clamp_val(DIV_ROUND_CLOSEST(fi->denominator, fi->numerator),
+ minfps, maxfps);
- fi->numerator = 1;
- if (fps > maxfps)
- fi->denominator = maxfps;
- else if (fps < minfps)
- fi->denominator = minfps;
- else if (2 * fps >= 2 * minfps + (maxfps - minfps))
- fi->denominator = maxfps;
- else
- fi->denominator = minfps;
+ best_fps = minfps;
+ for (i = 0; i < ARRAY_SIZE(ov5640_framerates); i++) {
+ int curr_fps = ov5640_framerates[i];
- ret = (fi->denominator == minfps) ? OV5640_15_FPS : OV5640_30_FPS;
+ if (abs(curr_fps - fps) < abs(best_fps - fps)) {
+ best_fps = curr_fps;
+ rate = i;
+ }
+ }
- mode = ov5640_find_mode(sensor, ret, width, height, false);
- return mode ? ret : -EINVAL;
+ fi->numerator = 1;
+ fi->denominator = best_fps;
+
+find_mode:
+ mode = ov5640_find_mode(sensor, rate, width, height, false);
+ return mode ? rate : -EINVAL;
}
static int ov5640_get_fmt(struct v4l2_subdev *sd,
@@ -2020,6 +2159,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *new_mode;
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+ struct v4l2_mbus_framefmt *fmt;
int ret;
if (format->pad != 0)
@@ -2037,22 +2177,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto out;
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ else
+ fmt = &sensor->fmt;
- *fmt = *mbus_fmt;
- goto out;
- }
+ *fmt = *mbus_fmt;
if (new_mode != sensor->current_mode) {
sensor->current_mode = new_mode;
sensor->pending_mode_change = true;
}
- if (mbus_fmt->code != sensor->fmt.code) {
- sensor->fmt = *mbus_fmt;
+ if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
- }
+
out:
mutex_unlock(&sensor->lock);
return ret;
@@ -2502,10 +2640,10 @@ static int ov5640_enum_frame_size(struct v4l2_subdev *sd,
return -EINVAL;
fse->min_width =
- ov5640_mode_data[0][fse->index].hact;
+ ov5640_mode_data[fse->index].hact;
fse->max_width = fse->min_width;
fse->min_height =
- ov5640_mode_data[0][fse->index].vact;
+ ov5640_mode_data[fse->index].vact;
fse->max_height = fse->min_height;
return 0;
@@ -2570,8 +2708,11 @@ static int ov5640_s_frame_interval(struct v4l2_subdev *sd,
frame_rate = ov5640_try_frame_interval(sensor, &fi->interval,
mode->hact, mode->vact);
- if (frame_rate < 0)
- frame_rate = OV5640_15_FPS;
+ if (frame_rate < 0) {
+ /* Always return a valid frame interval value */
+ fi->interval = sensor->frame_interval;
+ goto out;
+ }
mode = ov5640_find_mode(sensor, frame_rate, mode->hact,
mode->vact, true);
@@ -2641,6 +2782,9 @@ out:
static const struct v4l2_subdev_core_ops ov5640_core_ops = {
.s_power = ov5640_s_power,
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops ov5640_video_ops = {
@@ -2736,7 +2880,7 @@ static int ov5640_probe(struct i2c_client *client,
sensor->frame_interval.denominator = ov5640_framerates[OV5640_30_FPS];
sensor->current_fr = OV5640_30_FPS;
sensor->current_mode =
- &ov5640_mode_data[OV5640_30_FPS][OV5640_MODE_VGA_640_480];
+ &ov5640_mode_data[OV5640_MODE_VGA_640_480];
sensor->last_mode = sensor->current_mode;
sensor->ae_target = 52;
@@ -2795,7 +2939,8 @@ static int ov5640_probe(struct i2c_client *client,
v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
- sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 5eba8dd7222b..785f326ac519 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -886,7 +886,7 @@ static int ov5645_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
-static struct v4l2_ctrl_ops ov5645_ctrl_ops = {
+static const struct v4l2_ctrl_ops ov5645_ctrl_ops = {
.s_ctrl = ov5645_s_ctrl,
};
diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c
index bc68a3a5b4ec..a70a6ff7b36e 100644
--- a/drivers/media/i2c/ov7670.c
+++ b/drivers/media/i2c/ov7670.c
@@ -20,6 +20,7 @@
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mediabus.h>
@@ -1651,6 +1652,9 @@ static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static const struct v4l2_subdev_core_ops ov7670_core_ops = {
.reset = ov7670_reset,
.init = ov7670_init,
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov7670_g_register,
.s_register = ov7670_s_register,
@@ -1773,7 +1777,7 @@ static int ov7670_probe(struct i2c_client *client,
#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
sd->internal_ops = &ov7670_subdev_internal_ops;
- sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
#endif
info->clock_speed = 30; /* default: a guess */
diff --git a/drivers/media/i2c/ov772x.c b/drivers/media/i2c/ov772x.c
index fefff7fd7d68..2e9a758736a1 100644
--- a/drivers/media/i2c/ov772x.c
+++ b/drivers/media/i2c/ov772x.c
@@ -30,6 +30,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-image-sizes.h>
#include <media/v4l2-subdev.h>
@@ -1287,6 +1288,9 @@ static const struct v4l2_ctrl_ops ov772x_ctrl_ops = {
};
static const struct v4l2_subdev_core_ops ov772x_subdev_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov772x_g_register,
.s_register = ov772x_s_register,
@@ -1379,7 +1383,8 @@ static int ov772x_probe(struct i2c_client *client,
mutex_init(&priv->lock);
v4l2_i2c_subdev_init(&priv->subdev, client, &ov772x_subdev_ops);
- priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ priv->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
v4l2_ctrl_handler_init(&priv->hdl, 3);
/* Use our mutex for the controls */
priv->hdl.lock = &priv->lock;
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 6e9c233cfbe3..177688afd9a6 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -322,7 +322,7 @@ static int ov7740_set_power(struct ov7740 *ov7740, int on)
return 0;
}
-static struct v4l2_subdev_core_ops ov7740_subdev_core_ops = {
+static const struct v4l2_subdev_core_ops ov7740_subdev_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = ov7740_get_register,
@@ -648,7 +648,7 @@ static int ov7740_s_frame_interval(struct v4l2_subdev *sd,
return 0;
}
-static struct v4l2_subdev_video_ops ov7740_subdev_video_ops = {
+static const struct v4l2_subdev_video_ops ov7740_subdev_video_ops = {
.s_stream = ov7740_set_stream,
.s_frame_interval = ov7740_s_frame_interval,
.g_frame_interval = ov7740_g_frame_interval,
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 41d470d9ca94..bc2e35e5ce61 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -59,7 +59,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
/* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
- V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 13000000, 165000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE |
@@ -444,7 +444,7 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
- if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
return;
}
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index c4c2a6134e1e..e8613e364403 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1253,7 +1253,7 @@ tda1997x_parse_infoframe(struct tda1997x_state *state, u16 addr)
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
- err = hdmi_infoframe_unpack(&frame, buffer);
+ err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -1928,7 +1928,7 @@ static int tda1997x_log_infoframe(struct v4l2_subdev *sd, int addr)
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
v4l2_dbg(1, debug, sd, "infoframe: addr=%d len=%d\n", addr, len);
- err = hdmi_infoframe_unpack(&frame, buffer);
+ err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
index 9b4f21237810..06a78c2cdaab 100644
--- a/drivers/media/i2c/tda7432.c
+++ b/drivers/media/i2c/tda7432.c
@@ -19,7 +19,7 @@
*
* loudness - set between 0 and 15 for varying degrees of loudness effect
*
- * maxvol - set maximium volume to +20db (1), default is 0db(0)
+ * maxvol - set maximum volume to +20db (1), default is 0db(0)
*/
#include <linux/module.h>
@@ -53,7 +53,7 @@ MODULE_PARM_DESC(debug, "Set debugging level from 0 to 3. Default is off(0).");
module_param(loudness, int, S_IRUGO);
MODULE_PARM_DESC(loudness, "Turn loudness on(1) else off(0). Default is off(0).");
module_param(maxvol, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(maxvol, "Set maximium volume to +20dB(0) else +0dB(1). Default is +20dB(0).");
+MODULE_PARM_DESC(maxvol, "Set maximum volume to +20dB(0) else +0dB(1). Default is +20dB(0).");
/* Structure of address and subaddresses for the tda7432 */
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 498ad2368cbc..f5ee28058ea2 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -49,7 +49,7 @@ static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1080, 25000000, 148500000,
V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
};
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 0e91b9949c3a..eaddd977ba40 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -1790,7 +1790,7 @@ static int tvp5150_probe(struct i2c_client *c,
tvp5150_isr, IRQF_TRIGGER_HIGH |
IRQF_ONESHOT, "tvp5150", core);
if (res)
- return res;
+ goto err;
}
res = v4l2_async_register_subdev(sd);
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 4d49af86c15e..01dcf179f203 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -17,6 +17,8 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
@@ -38,7 +40,7 @@ struct video_i2c_buffer {
};
struct video_i2c_data {
- struct i2c_client *client;
+ struct regmap *regmap;
const struct video_i2c_chip *chip;
struct mutex lock;
spinlock_t slock;
@@ -51,6 +53,8 @@ struct video_i2c_data {
struct task_struct *kthread_vid_cap;
struct list_head vid_cap_active;
+
+ struct v4l2_fract frame_interval;
};
static const struct v4l2_fmtdesc amg88xx_format = {
@@ -62,13 +66,20 @@ static const struct v4l2_frmsize_discrete amg88xx_size = {
.height = 8,
};
+static const struct regmap_config amg88xx_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff
+};
+
struct video_i2c_chip {
/* video dimensions */
const struct v4l2_fmtdesc *format;
const struct v4l2_frmsize_discrete *size;
- /* max frames per second */
- unsigned int max_fps;
+ /* available frame intervals */
+ const struct v4l2_fract *frame_intervals;
+ unsigned int num_frame_intervals;
/* pixel buffer size */
unsigned int buffer_size;
@@ -76,33 +87,111 @@ struct video_i2c_chip {
/* pixel size in bits */
unsigned int bpp;
+ const struct regmap_config *regmap_config;
+
+ /* setup function */
+ int (*setup)(struct video_i2c_data *data);
+
/* xfer function */
int (*xfer)(struct video_i2c_data *data, char *buf);
+ /* power control function */
+ int (*set_power)(struct video_i2c_data *data, bool on);
+
/* hwmon init function */
int (*hwmon_init)(struct video_i2c_data *data);
};
+/* Power control register */
+#define AMG88XX_REG_PCTL 0x00
+#define AMG88XX_PCTL_NORMAL 0x00
+#define AMG88XX_PCTL_SLEEP 0x10
+
+/* Reset register */
+#define AMG88XX_REG_RST 0x01
+#define AMG88XX_RST_FLAG 0x30
+#define AMG88XX_RST_INIT 0x3f
+
+/* Frame rate register */
+#define AMG88XX_REG_FPSC 0x02
+#define AMG88XX_FPSC_1FPS BIT(0)
+
+/* Thermistor register */
+#define AMG88XX_REG_TTHL 0x0e
+
+/* Temperature register */
+#define AMG88XX_REG_T01L 0x80
+
static int amg88xx_xfer(struct video_i2c_data *data, char *buf)
{
- struct i2c_client *client = data->client;
- struct i2c_msg msg[2];
- u8 reg = 0x80;
+ return regmap_bulk_read(data->regmap, AMG88XX_REG_T01L, buf,
+ data->chip->buffer_size);
+}
+
+static int amg88xx_setup(struct video_i2c_data *data)
+{
+ unsigned int mask = AMG88XX_FPSC_1FPS;
+ unsigned int val;
+
+ if (data->frame_interval.numerator == data->frame_interval.denominator)
+ val = mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(data->regmap, AMG88XX_REG_FPSC, mask, val);
+}
+
+static int amg88xx_set_power_on(struct video_i2c_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->regmap, AMG88XX_REG_PCTL, AMG88XX_PCTL_NORMAL);
+ if (ret)
+ return ret;
+
+ msleep(50);
+
+ ret = regmap_write(data->regmap, AMG88XX_REG_RST, AMG88XX_RST_INIT);
+ if (ret)
+ return ret;
+
+ usleep_range(2000, 3000);
+
+ ret = regmap_write(data->regmap, AMG88XX_REG_RST, AMG88XX_RST_FLAG);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait two frames before reading thermistor and temperature registers
+ */
+ msleep(200);
+
+ return 0;
+}
+
+static int amg88xx_set_power_off(struct video_i2c_data *data)
+{
int ret;
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = (char *)&reg;
+ ret = regmap_write(data->regmap, AMG88XX_REG_PCTL, AMG88XX_PCTL_SLEEP);
+ if (ret)
+ return ret;
+ /*
+ * Wait for a while to avoid resuming normal mode immediately after
+ * entering sleep mode, otherwise the device occasionally goes wrong
+ * (thermistor and temperature registers are not updated at all)
+ */
+ msleep(100);
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = data->chip->buffer_size;
- msg[1].buf = (char *)buf;
+ return 0;
+}
- ret = i2c_transfer(client->adapter, msg, 2);
+static int amg88xx_set_power(struct video_i2c_data *data, bool on)
+{
+ if (on)
+ return amg88xx_set_power_on(data);
- return (ret == 2) ? 0 : -EIO;
+ return amg88xx_set_power_off(data);
}
#if IS_ENABLED(CONFIG_HWMON)
@@ -133,12 +222,23 @@ static int amg88xx_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct video_i2c_data *data = dev_get_drvdata(dev);
- struct i2c_client *client = data->client;
- int tmp = i2c_smbus_read_word_data(client, 0x0e);
+ __le16 buf;
+ int tmp;
+
+ tmp = pm_runtime_get_sync(regmap_get_device(data->regmap));
+ if (tmp < 0) {
+ pm_runtime_put_noidle(regmap_get_device(data->regmap));
+ return tmp;
+ }
- if (tmp < 0)
+ tmp = regmap_bulk_read(data->regmap, AMG88XX_REG_TTHL, &buf, 2);
+ pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
+ pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
+ if (tmp)
return tmp;
+ tmp = le16_to_cpu(buf);
+
/*
* Check for sign bit, this isn't a two's complement value but an
* absolute temperature that needs to be inverted in the case of being
@@ -164,8 +264,9 @@ static const struct hwmon_chip_info amg88xx_chip_info = {
static int amg88xx_hwmon_init(struct video_i2c_data *data)
{
- void *hwmon = devm_hwmon_device_register_with_info(&data->client->dev,
- "amg88xx", data, &amg88xx_chip_info, NULL);
+ struct device *dev = regmap_get_device(data->regmap);
+ void *hwmon = devm_hwmon_device_register_with_info(dev, "amg88xx", data,
+ &amg88xx_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon);
}
@@ -175,14 +276,23 @@ static int amg88xx_hwmon_init(struct video_i2c_data *data)
#define AMG88XX 0
+static const struct v4l2_fract amg88xx_frame_intervals[] = {
+ { 1, 10 },
+ { 1, 1 },
+};
+
static const struct video_i2c_chip video_i2c_chip[] = {
[AMG88XX] = {
.size = &amg88xx_size,
.format = &amg88xx_format,
- .max_fps = 10,
+ .frame_intervals = amg88xx_frame_intervals,
+ .num_frame_intervals = ARRAY_SIZE(amg88xx_frame_intervals),
.buffer_size = 128,
.bpp = 16,
+ .regmap_config = &amg88xx_regmap_config,
+ .setup = &amg88xx_setup,
.xfer = &amg88xx_xfer,
+ .set_power = amg88xx_set_power,
.hwmon_init = amg88xx_hwmon_init,
},
};
@@ -246,7 +356,8 @@ static void buffer_queue(struct vb2_buffer *vb)
static int video_i2c_thread_vid_cap(void *priv)
{
struct video_i2c_data *data = priv;
- unsigned int delay = msecs_to_jiffies(1000 / data->chip->max_fps);
+ unsigned int delay = mult_frac(HZ, data->frame_interval.numerator,
+ data->frame_interval.denominator);
set_freezable();
@@ -308,19 +419,36 @@ static void video_i2c_del_list(struct vb2_queue *vq, enum vb2_buffer_state state
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct video_i2c_data *data = vb2_get_drv_priv(vq);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
if (data->kthread_vid_cap)
return 0;
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto error_del_list;
+ }
+
+ ret = data->chip->setup(data);
+ if (ret)
+ goto error_rpm_put;
+
data->sequence = 0;
data->kthread_vid_cap = kthread_run(video_i2c_thread_vid_cap, data,
"%s-vid-cap", data->v4l2_dev.name);
- if (!IS_ERR(data->kthread_vid_cap))
+ ret = PTR_ERR_OR_ZERO(data->kthread_vid_cap);
+ if (!ret)
return 0;
+error_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+error_del_list:
video_i2c_del_list(vq, VB2_BUF_STATE_QUEUED);
- return PTR_ERR(data->kthread_vid_cap);
+ return ret;
}
static void stop_streaming(struct vb2_queue *vq)
@@ -332,11 +460,13 @@ static void stop_streaming(struct vb2_queue *vq)
kthread_stop(data->kthread_vid_cap);
data->kthread_vid_cap = NULL;
+ pm_runtime_mark_last_busy(regmap_get_device(data->regmap));
+ pm_runtime_put_autosuspend(regmap_get_device(data->regmap));
video_i2c_del_list(vq, VB2_BUF_STATE_ERROR);
}
-static struct vb2_ops video_i2c_video_qops = {
+static const struct vb2_ops video_i2c_video_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
@@ -350,7 +480,8 @@ static int video_i2c_querycap(struct file *file, void *priv,
struct v4l2_capability *vcap)
{
struct video_i2c_data *data = video_drvdata(file);
- struct i2c_client *client = data->client;
+ struct device *dev = regmap_get_device(data->regmap);
+ struct i2c_client *client = to_i2c_client(dev);
strscpy(vcap->driver, data->v4l2_dev.name, sizeof(vcap->driver));
strscpy(vcap->card, data->vdev.name, sizeof(vcap->card));
@@ -426,15 +557,14 @@ static int video_i2c_enum_frameintervals(struct file *file, void *priv,
const struct video_i2c_data *data = video_drvdata(file);
const struct v4l2_frmsize_discrete *size = data->chip->size;
- if (fe->index > 0)
+ if (fe->index >= data->chip->num_frame_intervals)
return -EINVAL;
if (fe->width != size->width || fe->height != size->height)
return -EINVAL;
fe->type = V4L2_FRMIVAL_TYPE_DISCRETE;
- fe->discrete.numerator = 1;
- fe->discrete.denominator = data->chip->max_fps;
+ fe->discrete = data->chip->frame_intervals[fe->index];
return 0;
}
@@ -479,12 +609,27 @@ static int video_i2c_g_parm(struct file *filp, void *priv,
parm->parm.capture.readbuffers = 1;
parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
- parm->parm.capture.timeperframe.numerator = 1;
- parm->parm.capture.timeperframe.denominator = data->chip->max_fps;
+ parm->parm.capture.timeperframe = data->frame_interval;
return 0;
}
+static int video_i2c_s_parm(struct file *filp, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct video_i2c_data *data = video_drvdata(filp);
+ int i;
+
+ for (i = 0; i < data->chip->num_frame_intervals - 1; i++) {
+ if (V4L2_FRACT_COMPARE(parm->parm.capture.timeperframe, <=,
+ data->chip->frame_intervals[i]))
+ break;
+ }
+ data->frame_interval = data->chip->frame_intervals[i];
+
+ return video_i2c_g_parm(filp, priv, parm);
+}
+
static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = {
.vidioc_querycap = video_i2c_querycap,
.vidioc_g_input = video_i2c_g_input,
@@ -496,7 +641,7 @@ static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = {
.vidioc_g_fmt_vid_cap = video_i2c_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = video_i2c_s_fmt_vid_cap,
.vidioc_g_parm = video_i2c_g_parm,
- .vidioc_s_parm = video_i2c_g_parm,
+ .vidioc_s_parm = video_i2c_s_parm,
.vidioc_try_fmt_vid_cap = video_i2c_try_fmt_vid_cap,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
@@ -510,7 +655,13 @@ static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = {
static void video_i2c_release(struct video_device *vdev)
{
- kfree(video_get_drvdata(vdev));
+ struct video_i2c_data *data = video_get_drvdata(vdev);
+
+ v4l2_device_unregister(&data->v4l2_dev);
+ mutex_destroy(&data->lock);
+ mutex_destroy(&data->queue_lock);
+ regmap_exit(data->regmap);
+ kfree(data);
}
static int video_i2c_probe(struct i2c_client *client,
@@ -532,13 +683,18 @@ static int video_i2c_probe(struct i2c_client *client,
else
goto error_free_device;
- data->client = client;
+ data->regmap = regmap_init_i2c(client, data->chip->regmap_config);
+ if (IS_ERR(data->regmap)) {
+ ret = PTR_ERR(data->regmap);
+ goto error_free_device;
+ }
+
v4l2_dev = &data->v4l2_dev;
strscpy(v4l2_dev->name, VIDEO_I2C_DRIVER, sizeof(v4l2_dev->name));
ret = v4l2_device_register(&client->dev, v4l2_dev);
if (ret < 0)
- goto error_free_device;
+ goto error_regmap_exit;
mutex_init(&data->lock);
mutex_init(&data->queue_lock);
@@ -575,9 +731,23 @@ static int video_i2c_probe(struct i2c_client *client,
spin_lock_init(&data->slock);
INIT_LIST_HEAD(&data->vid_cap_active);
+ data->frame_interval = data->chip->frame_intervals[0];
+
video_set_drvdata(&data->vdev, data);
i2c_set_clientdata(client, data);
+ if (data->chip->set_power) {
+ ret = data->chip->set_power(data, true);
+ if (ret)
+ goto error_unregister_device;
+ }
+
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev, 2000);
+ pm_runtime_use_autosuspend(&client->dev);
+
if (data->chip->hwmon_init) {
ret = data->chip->hwmon_init(data);
if (ret < 0) {
@@ -588,15 +758,29 @@ static int video_i2c_probe(struct i2c_client *client,
ret = video_register_device(&data->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0)
- goto error_unregister_device;
+ goto error_pm_disable;
+
+ pm_runtime_mark_last_busy(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
return 0;
+error_pm_disable:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
+ if (data->chip->set_power)
+ data->chip->set_power(data, false);
+
error_unregister_device:
v4l2_device_unregister(v4l2_dev);
mutex_destroy(&data->lock);
mutex_destroy(&data->queue_lock);
+error_regmap_exit:
+ regmap_exit(data->regmap);
+
error_free_device:
kfree(data);
@@ -607,15 +791,48 @@ static int video_i2c_remove(struct i2c_client *client)
{
struct video_i2c_data *data = i2c_get_clientdata(client);
- video_unregister_device(&data->vdev);
- v4l2_device_unregister(&data->v4l2_dev);
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
- mutex_destroy(&data->lock);
- mutex_destroy(&data->queue_lock);
+ if (data->chip->set_power)
+ data->chip->set_power(data, false);
+
+ video_unregister_device(&data->vdev);
return 0;
}
+#ifdef CONFIG_PM
+
+static int video_i2c_pm_runtime_suspend(struct device *dev)
+{
+ struct video_i2c_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ if (!data->chip->set_power)
+ return 0;
+
+ return data->chip->set_power(data, false);
+}
+
+static int video_i2c_pm_runtime_resume(struct device *dev)
+{
+ struct video_i2c_data *data = i2c_get_clientdata(to_i2c_client(dev));
+
+ if (!data->chip->set_power)
+ return 0;
+
+ return data->chip->set_power(data, true);
+}
+
+#endif
+
+static const struct dev_pm_ops video_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(video_i2c_pm_runtime_suspend,
+ video_i2c_pm_runtime_resume, NULL)
+};
+
static const struct i2c_device_id video_i2c_id_table[] = {
{ "amg88xx", AMG88XX },
{}
@@ -632,6 +849,7 @@ static struct i2c_driver video_i2c_driver = {
.driver = {
.name = VIDEO_I2C_DRIVER,
.of_match_table = video_i2c_of_match,
+ .pm = &video_i2c_pm_ops,
},
.probe = video_i2c_probe,
.remove = video_i2c_remove,
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index bed24372e61f..b8ec88612df7 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -381,10 +381,14 @@ static long media_device_get_topology(struct media_device *mdev, void *arg)
static long media_device_request_alloc(struct media_device *mdev,
int *alloc_fd)
{
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
return -ENOTTY;
return media_request_alloc(mdev, alloc_fd);
+#else
+ return -ENOTTY;
+#endif
}
static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
diff --git a/drivers/media/media-request.c b/drivers/media/media-request.c
index 4e9db1fed697..c71a34ae6383 100644
--- a/drivers/media/media-request.c
+++ b/drivers/media/media-request.c
@@ -238,6 +238,9 @@ static const struct file_operations request_fops = {
.owner = THIS_MODULE,
.poll = media_request_poll,
.unlocked_ioctl = media_request_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = media_request_ioctl,
+#endif /* CONFIG_COMPAT */
.release = media_request_close,
};
diff --git a/drivers/media/pci/b2c2/flexcop-dma.c b/drivers/media/pci/b2c2/flexcop-dma.c
index f07610a1646d..ba45b378d739 100644
--- a/drivers/media/pci/b2c2/flexcop-dma.c
+++ b/drivers/media/pci/b2c2/flexcop-dma.c
@@ -17,7 +17,8 @@ int flexcop_dma_allocate(struct pci_dev *pdev,
return -EINVAL;
}
- if ((tcpu = pci_alloc_consistent(pdev, size, &tdma)) != NULL) {
+ tcpu = pci_alloc_consistent(pdev, size, &tdma);
+ if (tcpu != NULL) {
dma->pdev = pdev;
dma->cpu_addr0 = tcpu;
dma->dma_addr0 = tdma;
@@ -34,7 +35,7 @@ void flexcop_dma_free(struct flexcop_dma *dma)
{
pci_free_consistent(dma->pdev, dma->size*2,
dma->cpu_addr0, dma->dma_addr0);
- memset(dma,0,sizeof(struct flexcop_dma));
+ memset(dma, 0, sizeof(struct flexcop_dma));
}
EXPORT_SYMBOL(flexcop_dma_free);
@@ -42,23 +43,24 @@ int flexcop_dma_config(struct flexcop_device *fc,
struct flexcop_dma *dma,
flexcop_dma_index_t dma_idx)
{
- flexcop_ibi_value v0x0,v0x4,v0xc;
- v0x0.raw = v0x4.raw = v0xc.raw = 0;
+ flexcop_ibi_value v0x0, v0x4, v0xc;
+ v0x0.raw = v0x4.raw = v0xc.raw = 0;
v0x0.dma_0x0.dma_address0 = dma->dma_addr0 >> 2;
v0xc.dma_0xc.dma_address1 = dma->dma_addr1 >> 2;
v0x4.dma_0x4_write.dma_addr_size = dma->size / 4;
if ((dma_idx & FC_DMA_1) == dma_idx) {
- fc->write_ibi_reg(fc,dma1_000,v0x0);
- fc->write_ibi_reg(fc,dma1_004,v0x4);
- fc->write_ibi_reg(fc,dma1_00c,v0xc);
+ fc->write_ibi_reg(fc, dma1_000, v0x0);
+ fc->write_ibi_reg(fc, dma1_004, v0x4);
+ fc->write_ibi_reg(fc, dma1_00c, v0xc);
} else if ((dma_idx & FC_DMA_2) == dma_idx) {
- fc->write_ibi_reg(fc,dma2_010,v0x0);
- fc->write_ibi_reg(fc,dma2_014,v0x4);
- fc->write_ibi_reg(fc,dma2_01c,v0xc);
+ fc->write_ibi_reg(fc, dma2_010, v0x0);
+ fc->write_ibi_reg(fc, dma2_014, v0x4);
+ fc->write_ibi_reg(fc, dma2_01c, v0xc);
} else {
- err("either DMA1 or DMA2 can be configured within one flexcop_dma_config call.");
+ err("either DMA1 or DMA2 can be configured within one %s call.",
+ __func__);
return -EINVAL;
}
@@ -72,8 +74,8 @@ int flexcop_dma_xfer_control(struct flexcop_device *fc,
flexcop_dma_addr_index_t index,
int onoff)
{
- flexcop_ibi_value v0x0,v0xc;
- flexcop_ibi_register r0x0,r0xc;
+ flexcop_ibi_value v0x0, v0xc;
+ flexcop_ibi_register r0x0, r0xc;
if ((dma_idx & FC_DMA_1) == dma_idx) {
r0x0 = dma1_000;
@@ -82,15 +84,16 @@ int flexcop_dma_xfer_control(struct flexcop_device *fc,
r0x0 = dma2_010;
r0xc = dma2_01c;
} else {
- err("either transfer DMA1 or DMA2 can be started within one flexcop_dma_xfer_control call.");
+ err("transfer DMA1 or DMA2 can be started within one %s call.",
+ __func__);
return -EINVAL;
}
- v0x0 = fc->read_ibi_reg(fc,r0x0);
- v0xc = fc->read_ibi_reg(fc,r0xc);
+ v0x0 = fc->read_ibi_reg(fc, r0x0);
+ v0xc = fc->read_ibi_reg(fc, r0xc);
- deb_rdump("reg: %03x: %x\n",r0x0,v0x0.raw);
- deb_rdump("reg: %03x: %x\n",r0xc,v0xc.raw);
+ deb_rdump("reg: %03x: %x\n", r0x0, v0x0.raw);
+ deb_rdump("reg: %03x: %x\n", r0xc, v0xc.raw);
if (index & FC_DMA_SUBADDR_0)
v0x0.dma_0x0.dma_0start = onoff;
@@ -98,11 +101,11 @@ int flexcop_dma_xfer_control(struct flexcop_device *fc,
if (index & FC_DMA_SUBADDR_1)
v0xc.dma_0xc.dma_1start = onoff;
- fc->write_ibi_reg(fc,r0x0,v0x0);
- fc->write_ibi_reg(fc,r0xc,v0xc);
+ fc->write_ibi_reg(fc, r0x0, v0x0);
+ fc->write_ibi_reg(fc, r0xc, v0xc);
- deb_rdump("reg: %03x: %x\n",r0x0,v0x0.raw);
- deb_rdump("reg: %03x: %x\n",r0xc,v0xc.raw);
+ deb_rdump("reg: %03x: %x\n", r0x0, v0x0.raw);
+ deb_rdump("reg: %03x: %x\n", r0xc, v0xc.raw);
return 0;
}
EXPORT_SYMBOL(flexcop_dma_xfer_control);
@@ -112,10 +115,11 @@ static int flexcop_dma_remap(struct flexcop_device *fc,
int onoff)
{
flexcop_ibi_register r = (dma_idx & FC_DMA_1) ? dma1_00c : dma2_01c;
- flexcop_ibi_value v = fc->read_ibi_reg(fc,r);
- deb_info("%s\n",__func__);
+ flexcop_ibi_value v = fc->read_ibi_reg(fc, r);
+
+ deb_info("%s\n", __func__);
v.dma_0xc.remap_enable = onoff;
- fc->write_ibi_reg(fc,r,v);
+ fc->write_ibi_reg(fc, r, v);
return 0;
}
@@ -123,7 +127,7 @@ int flexcop_dma_control_size_irq(struct flexcop_device *fc,
flexcop_dma_index_t no,
int onoff)
{
- flexcop_ibi_value v = fc->read_ibi_reg(fc,ctrl_208);
+ flexcop_ibi_value v = fc->read_ibi_reg(fc, ctrl_208);
if (no & FC_DMA_1)
v.ctrl_208.DMA1_IRQ_Enable_sig = onoff;
@@ -131,7 +135,7 @@ int flexcop_dma_control_size_irq(struct flexcop_device *fc,
if (no & FC_DMA_2)
v.ctrl_208.DMA2_IRQ_Enable_sig = onoff;
- fc->write_ibi_reg(fc,ctrl_208,v);
+ fc->write_ibi_reg(fc, ctrl_208, v);
return 0;
}
EXPORT_SYMBOL(flexcop_dma_control_size_irq);
@@ -140,7 +144,7 @@ int flexcop_dma_control_timer_irq(struct flexcop_device *fc,
flexcop_dma_index_t no,
int onoff)
{
- flexcop_ibi_value v = fc->read_ibi_reg(fc,ctrl_208);
+ flexcop_ibi_value v = fc->read_ibi_reg(fc, ctrl_208);
if (no & FC_DMA_1)
v.ctrl_208.DMA1_Timer_Enable_sig = onoff;
@@ -148,7 +152,7 @@ int flexcop_dma_control_timer_irq(struct flexcop_device *fc,
if (no & FC_DMA_2)
v.ctrl_208.DMA2_Timer_Enable_sig = onoff;
- fc->write_ibi_reg(fc,ctrl_208,v);
+ fc->write_ibi_reg(fc, ctrl_208, v);
return 0;
}
EXPORT_SYMBOL(flexcop_dma_control_timer_irq);
@@ -158,13 +162,13 @@ int flexcop_dma_config_timer(struct flexcop_device *fc,
flexcop_dma_index_t dma_idx, u8 cycles)
{
flexcop_ibi_register r = (dma_idx & FC_DMA_1) ? dma1_004 : dma2_014;
- flexcop_ibi_value v = fc->read_ibi_reg(fc,r);
+ flexcop_ibi_value v = fc->read_ibi_reg(fc, r);
- flexcop_dma_remap(fc,dma_idx,0);
+ flexcop_dma_remap(fc, dma_idx, 0);
- deb_info("%s\n",__func__);
+ deb_info("%s\n", __func__);
v.dma_0x4_write.dmatimer = cycles;
- fc->write_ibi_reg(fc,r,v);
+ fc->write_ibi_reg(fc, r, v);
return 0;
}
EXPORT_SYMBOL(flexcop_dma_config_timer);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index d4906c04dc6e..d09785fd37a8 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -2792,19 +2792,17 @@ static int bttv_g_tuner(struct file *file, void *priv,
return 0;
}
-static int bttv_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cap)
+static int bttv_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct bttv_fh *fh = priv;
struct bttv *btv = fh->btv;
- if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
/* defrect and bounds are set via g_selection */
- cap->pixelaspect = bttv_tvnorms[btv->tvnorm].cropcap.pixelaspect;
-
+ *f = bttv_tvnorms[btv->tvnorm].cropcap.pixelaspect;
return 0;
}
@@ -3162,7 +3160,7 @@ static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
.vidioc_g_fmt_vbi_cap = bttv_g_fmt_vbi_cap,
.vidioc_try_fmt_vbi_cap = bttv_try_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = bttv_s_fmt_vbi_cap,
- .vidioc_cropcap = bttv_cropcap,
+ .vidioc_g_pixelaspect = bttv_g_pixelaspect,
.vidioc_reqbufs = bttv_reqbufs,
.vidioc_querybuf = bttv_querybuf,
.vidioc_qbuf = bttv_qbuf,
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index 0525f5e1565b..c088de551081 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -1077,33 +1077,65 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
return 0;
}
-static int cobalt_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+static int cobalt_g_pixelaspect(struct file *file, void *fh,
+ int type, struct v4l2_fract *f)
{
struct cobalt_stream *s = video_drvdata(file);
struct v4l2_dv_timings timings;
int err = 0;
- if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
+
if (s->input == 1)
timings = cea1080p60;
else
err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
- if (!err) {
- cc->bounds.width = cc->defrect.width = timings.bt.width;
- cc->bounds.height = cc->defrect.height = timings.bt.height;
- cc->pixelaspect = v4l2_dv_timings_aspect_ratio(&timings);
- }
+ if (!err)
+ *f = v4l2_dv_timings_aspect_ratio(&timings);
return err;
}
+static int cobalt_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct cobalt_stream *s = video_drvdata(file);
+ struct v4l2_dv_timings timings;
+ int err = 0;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ if (s->input == 1)
+ timings = cea1080p60;
+ else
+ err = v4l2_subdev_call(s->sd, video, g_dv_timings, &timings);
+
+ if (err)
+ return err;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = timings.bt.width;
+ sel->r.height = timings.bt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static const struct v4l2_ioctl_ops cobalt_ioctl_ops = {
.vidioc_querycap = cobalt_querycap,
.vidioc_g_parm = cobalt_g_parm,
.vidioc_log_status = cobalt_log_status,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_cropcap = cobalt_cropcap,
+ .vidioc_g_pixelaspect = cobalt_g_pixelaspect,
+ .vidioc_g_selection = cobalt_g_selection,
.vidioc_enum_input = cobalt_enum_input,
.vidioc_g_input = cobalt_g_input,
.vidioc_s_input = cobalt_s_input,
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 854116375a7c..8c54b17f382a 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -441,15 +441,16 @@ static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
return cx18_get_input(cx, vin->index, vin);
}
-static int cx18_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cropcap)
+static int cx18_g_pixelaspect(struct file *file, void *fh,
+ int type, struct v4l2_fract *f)
{
struct cx18 *cx = fh2id(fh)->cx;
- if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cropcap->pixelaspect.numerator = cx->is_50hz ? 54 : 11;
- cropcap->pixelaspect.denominator = cx->is_50hz ? 59 : 10;
+
+ f->numerator = cx->is_50hz ? 54 : 11;
+ f->denominator = cx->is_50hz ? 59 : 10;
return 0;
}
@@ -1079,7 +1080,7 @@ static const struct v4l2_ioctl_ops cx18_ioctl_ops = {
.vidioc_g_audio = cx18_g_audio,
.vidioc_enumaudio = cx18_enumaudio,
.vidioc_enum_input = cx18_enum_input,
- .vidioc_cropcap = cx18_cropcap,
+ .vidioc_g_pixelaspect = cx18_g_pixelaspect,
.vidioc_g_selection = cx18_g_selection,
.vidioc_g_input = cx18_g_input,
.vidioc_s_input = cx18_s_input,
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 39804d830305..fd5c52b21436 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -23,6 +23,7 @@
#include <linux/moduleparam.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
@@ -41,6 +42,18 @@ MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX23885_VERSION);
+/*
+ * Some platforms have been found to require periodic resetting of the DMA
+ * engine. Ryzen and XEON platforms are known to be affected. The symptom
+ * encountered is "mpeg risc op code error". Only Ryzen platforms employ
+ * this workaround if the option equals 1. The workaround can be explicitly
+ * disabled for all platforms by setting to 0, the workaround can be forced
+ * on for any platform by setting to 2.
+ */
+static unsigned int dma_reset_workaround = 1;
+module_param(dma_reset_workaround, int, 0644);
+MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
+
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
@@ -603,8 +616,13 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
{
- uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
- uint32_t reg2_val = cx_read(TC_REQ_SET);
+ uint32_t reg1_val, reg2_val;
+
+ if (!dev->need_dma_reset)
+ return;
+
+ reg1_val = cx_read(TC_REQ); /* read-only */
+ reg2_val = cx_read(TC_REQ_SET);
if (reg1_val && reg2_val) {
cx_write(TC_REQ, reg1_val);
@@ -2058,6 +2076,37 @@ void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
/* TODO: 23-19 */
}
+static struct {
+ int vendor, dev;
+} const broken_dev_id[] = {
+ /* According with
+ * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
+ * 0x1451 is PCI ID for the IOMMU found on Ryzen
+ */
+ { PCI_VENDOR_ID_AMD, 0x1451 },
+};
+
+static bool cx23885_does_need_dma_reset(void)
+{
+ int i;
+ struct pci_dev *pdev = NULL;
+
+ if (dma_reset_workaround == 0)
+ return false;
+ else if (dma_reset_workaround == 2)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
+ pdev = pci_get_device(broken_dev_id[i].vendor,
+ broken_dev_id[i].dev, NULL);
+ if (pdev) {
+ pci_dev_put(pdev);
+ return true;
+ }
+ }
+ return false;
+}
+
static int cx23885_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
@@ -2069,6 +2118,8 @@ static int cx23885_initdev(struct pci_dev *pci_dev,
if (NULL == dev)
return -ENOMEM;
+ dev->need_dma_reset = cx23885_does_need_dma_reset();
+
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err < 0)
goto fail_free;
diff --git a/drivers/media/pci/cx23885/cx23885-i2c.c b/drivers/media/pci/cx23885/cx23885-i2c.c
index d0df3dfff694..de6809b950ce 100644
--- a/drivers/media/pci/cx23885/cx23885-i2c.c
+++ b/drivers/media/pci/cx23885/cx23885-i2c.c
@@ -18,7 +18,6 @@
#include "cx23885.h"
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/io.h>
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 92d32a733f1b..168178c1e574 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -668,26 +668,43 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cc)
+static int vidioc_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct cx23885_dev *dev = video_drvdata(file);
bool is_50hz = dev->tvnorm & V4L2_STD_625_50;
- if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cc->bounds.left = 0;
- cc->bounds.top = 0;
- cc->bounds.width = 720;
- cc->bounds.height = norm_maxh(dev->tvnorm);
- cc->defrect = cc->bounds;
- cc->pixelaspect.numerator = is_50hz ? 54 : 11;
- cc->pixelaspect.denominator = is_50hz ? 59 : 10;
+ f->numerator = is_50hz ? 54 : 11;
+ f->denominator = is_50hz ? 59 : 10;
return 0;
}
+static int vidioc_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct cx23885_dev *dev = video_drvdata(file);
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = 720;
+ sel->r.height = norm_maxh(dev->tvnorm);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct cx23885_dev *dev = video_drvdata(file);
@@ -1122,7 +1139,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_pixelaspect = vidioc_g_pixelaspect,
+ .vidioc_g_selection = vidioc_g_selection,
.vidioc_s_std = vidioc_s_std,
.vidioc_g_std = vidioc_g_std,
.vidioc_enum_input = vidioc_enum_input,
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index d54c7ee1ab21..cf965efabe66 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -451,6 +451,8 @@ struct cx23885_dev {
/* Analog raw audio */
struct cx23885_audio_dev *audio_dev;
+ /* Does the system require periodic DMA resets? */
+ unsigned int need_dma_reset:1;
};
static inline struct cx23885_dev *to_cx23885(struct v4l2_device *v4l2_dev)
diff --git a/drivers/media/pci/ddbridge/ddbridge.h b/drivers/media/pci/ddbridge/ddbridge.h
index f137155bf79e..b834449e78f8 100644
--- a/drivers/media/pci/ddbridge/ddbridge.h
+++ b/drivers/media/pci/ddbridge/ddbridge.h
@@ -18,47 +18,43 @@
#ifndef _DDBRIDGE_H_
#define _DDBRIDGE_H_
-#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dvb/ca.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
-#include <linux/timer.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/spi/spi.h>
#include <linux/swab.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
-#include <linux/kthread.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/spi/spi.h>
-#include <linux/gpio.h>
-#include <linux/completion.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
#include <asm/dma.h>
#include <asm/irq.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <linux/dvb/ca.h>
-#include <linux/socket.h>
-#include <linux/device.h>
-#include <linux/io.h>
#include <media/dmxdev.h>
-#include <media/dvbdev.h>
+#include <media/dvb_ca_en50221.h>
#include <media/dvb_demux.h>
+#include <media/dvbdev.h>
#include <media/dvb_frontend.h>
-#include <media/dvb_ringbuffer.h>
-#include <media/dvb_ca_en50221.h>
#include <media/dvb_net.h>
+#include <media/dvb_ringbuffer.h>
#define DDBRIDGE_VERSION "0.9.33-integrated"
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 240635be7a31..7caab9b8c2b9 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -10,8 +10,6 @@
#define CIO2_PCI_ID 0x9d32
#define CIO2_PCI_BAR 0
#define CIO2_DMA_MASK DMA_BIT_MASK(39)
-#define CIO2_IMAGE_MAX_WIDTH 4224
-#define CIO2_IMAGE_MAX_LENGTH 3136
#define CIO2_IMAGE_MAX_WIDTH 4224
#define CIO2_IMAGE_MAX_LENGTH 3136
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index a66f8b872520..6c269ecd8d05 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -829,17 +829,18 @@ static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vou
return ivtv_get_output(itv, vout->index, vout);
}
-static int ivtv_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
+static int ivtv_g_pixelaspect(struct file *file, void *fh,
+ int type, struct v4l2_fract *f)
{
struct ivtv_open_id *id = fh2id(fh);
struct ivtv *itv = id->itv;
- if (cropcap->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- cropcap->pixelaspect.numerator = itv->is_50hz ? 54 : 11;
- cropcap->pixelaspect.denominator = itv->is_50hz ? 59 : 10;
- } else if (cropcap->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- cropcap->pixelaspect.numerator = itv->is_out_50hz ? 54 : 11;
- cropcap->pixelaspect.denominator = itv->is_out_50hz ? 59 : 10;
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ f->numerator = itv->is_50hz ? 54 : 11;
+ f->denominator = itv->is_50hz ? 59 : 10;
+ } else if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ f->numerator = itv->is_out_50hz ? 54 : 11;
+ f->denominator = itv->is_out_50hz ? 59 : 10;
} else {
return -EINVAL;
}
@@ -1923,7 +1924,7 @@ static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
.vidioc_enum_input = ivtv_enum_input,
.vidioc_enum_output = ivtv_enum_output,
.vidioc_enumaudout = ivtv_enumaudout,
- .vidioc_cropcap = ivtv_cropcap,
+ .vidioc_g_pixelaspect = ivtv_g_pixelaspect,
.vidioc_s_selection = ivtv_s_selection,
.vidioc_g_selection = ivtv_g_selection,
.vidioc_g_input = ivtv_g_input,
diff --git a/drivers/media/pci/mantis/mantis_cards.c b/drivers/media/pci/mantis/mantis_cards.c
index 7eb75cb7d75a..e544bb9bab90 100644
--- a/drivers/media/pci/mantis/mantis_cards.c
+++ b/drivers/media/pci/mantis/mantis_cards.c
@@ -19,7 +19,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
index 8984b1bf57a5..aa98ea49558c 100644
--- a/drivers/media/pci/saa7134/saa7134-core.c
+++ b/drivers/media/pci/saa7134/saa7134-core.c
@@ -1419,8 +1419,8 @@ static int saa7134_suspend(struct pci_dev *pci_dev , pm_message_t state)
del_timer(&dev->vbi_q.timeout);
del_timer(&dev->ts_q.timeout);
- if (dev->remote)
- saa7134_ir_stop(dev);
+ if (dev->remote && dev->remote->dev->users)
+ saa7134_ir_close(dev->remote->dev);
pci_save_state(pci_dev);
pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
@@ -1447,8 +1447,8 @@ static int saa7134_resume(struct pci_dev *pci_dev)
saa7134_videoport_init(dev);
if (card_has_mpeg(dev))
saa7134_ts_init_hw(dev);
- if (dev->remote)
- saa7134_ir_start(dev);
+ if (dev->remote && dev->remote->dev->users)
+ saa7134_ir_open(dev->remote->dev);
saa7134_hw_enable1(dev);
msleep(100);
diff --git a/drivers/media/pci/saa7134/saa7134-input.c b/drivers/media/pci/saa7134/saa7134-input.c
index 999b2774b220..35884d5b8337 100644
--- a/drivers/media/pci/saa7134/saa7134-input.c
+++ b/drivers/media/pci/saa7134/saa7134-input.c
@@ -299,43 +299,6 @@ static int get_key_purpletv(struct IR_i2c *ir, enum rc_proto *protocol,
return 1;
}
-static int get_key_hvr1110(struct IR_i2c *ir, enum rc_proto *protocol,
- u32 *scancode, u8 *toggle)
-{
- int rc;
- unsigned char buf[5];
-
- /* poll IR chip */
- rc = i2c_master_recv(ir->c, buf, 5);
- if (rc != 5) {
- ir_dbg(ir, "read error\n");
- if (rc < 0)
- return rc;
- return -EIO;
- }
-
- /* Check if some key were pressed */
- if (!(buf[0] & 0x80))
- return 0;
-
- /*
- * buf[3] & 0x80 is always high.
- * buf[3] & 0x40 is a parity bit. A repeat event is marked
- * by preserving it into two separate readings
- * buf[4] bits 0 and 1, and buf[1] and buf[2] are always
- * zero.
- *
- * Note that the keymap which the hvr1110 uses is RC5.
- *
- * FIXME: start bits could maybe be used...?
- */
- *protocol = RC_PROTO_RC5;
- *scancode = RC_SCANCODE_RC5(buf[3] & 0x1f, buf[4] >> 2);
- *toggle = !!(buf[3] & 0x40);
- return 1;
-}
-
-
static int get_key_beholdm6xx(struct IR_i2c *ir, enum rc_proto *protocol,
u32 *scancode, u8 *toggle)
{
@@ -485,17 +448,10 @@ static void saa7134_input_timer(struct timer_list *t)
mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
}
-static int __saa7134_ir_start(void *priv)
+int saa7134_ir_open(struct rc_dev *rc)
{
- struct saa7134_dev *dev = priv;
- struct saa7134_card_ir *ir;
-
- if (!dev || !dev->remote)
- return -EINVAL;
-
- ir = dev->remote;
- if (ir->running)
- return 0;
+ struct saa7134_dev *dev = rc->priv;
+ struct saa7134_card_ir *ir = dev->remote;
/* Moved here from saa7134_input_init1() because the latter
* is not called on device resume */
@@ -544,55 +500,15 @@ static int __saa7134_ir_start(void *priv)
return 0;
}
-static void __saa7134_ir_stop(void *priv)
+void saa7134_ir_close(struct rc_dev *rc)
{
- struct saa7134_dev *dev = priv;
- struct saa7134_card_ir *ir;
-
- if (!dev || !dev->remote)
- return;
-
- ir = dev->remote;
- if (!ir->running)
- return;
+ struct saa7134_dev *dev = rc->priv;
+ struct saa7134_card_ir *ir = dev->remote;
if (ir->polling)
del_timer_sync(&ir->timer);
ir->running = false;
-
- return;
-}
-
-int saa7134_ir_start(struct saa7134_dev *dev)
-{
- if (dev->remote->users)
- return __saa7134_ir_start(dev);
-
- return 0;
-}
-
-void saa7134_ir_stop(struct saa7134_dev *dev)
-{
- if (dev->remote->users)
- __saa7134_ir_stop(dev);
-}
-
-static int saa7134_ir_open(struct rc_dev *rc)
-{
- struct saa7134_dev *dev = rc->priv;
-
- dev->remote->users++;
- return __saa7134_ir_start(dev);
-}
-
-static void saa7134_ir_close(struct rc_dev *rc)
-{
- struct saa7134_dev *dev = rc->priv;
-
- dev->remote->users--;
- if (!dev->remote->users)
- __saa7134_ir_stop(dev);
}
int saa7134_input_init1(struct saa7134_dev *dev)
@@ -661,7 +577,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0x0007C8;
mask_keydown = 0x000010;
polling = 50; // ms
- /* GPIO stuff moved to __saa7134_ir_start() */
+ /* GPIO stuff moved to saa7134_ir_open() */
break;
case SAA7134_BOARD_AVERMEDIA_M135A:
ir_codes = RC_MAP_AVERMEDIA_M135A;
@@ -683,14 +599,14 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; // ms
- /* GPIO stuff moved to __saa7134_ir_start() */
+ /* GPIO stuff moved to saa7134_ir_open() */
break;
case SAA7134_BOARD_AVERMEDIA_A16D:
ir_codes = RC_MAP_AVERMEDIA_A16D;
mask_keycode = 0x02F200;
mask_keydown = 0x000400;
polling = 50; /* ms */
- /* GPIO stuff moved to __saa7134_ir_start() */
+ /* GPIO stuff moved to saa7134_ir_open() */
break;
case SAA7134_BOARD_KWORLD_TERMINATOR:
ir_codes = RC_MAP_PIXELVIEW;
@@ -742,7 +658,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
mask_keycode = 0x0003CC;
mask_keydown = 0x000010;
polling = 5; /* ms */
- /* GPIO stuff moved to __saa7134_ir_start() */
+ /* GPIO stuff moved to saa7134_ir_open() */
break;
case SAA7134_BOARD_VIDEOMATE_TV_PVR:
case SAA7134_BOARD_VIDEOMATE_GOLD_PLUS:
@@ -880,8 +796,6 @@ int saa7134_input_init1(struct saa7134_dev *dev)
ir->raw_decode = raw_decode;
/* init input device */
- snprintf(ir->name, sizeof(ir->name), "saa7134 IR (%s)",
- saa7134_boards[dev->board].name);
snprintf(ir->phys, sizeof(ir->phys), "pci-%s/ir0",
pci_name(dev->pci));
@@ -893,7 +807,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
}
- rc->device_name = ir->name;
+ rc->device_name = saa7134_boards[dev->board].name;
rc->input_phys = ir->phys;
rc->input_id.bustype = BUS_PCI;
rc->input_id.version = 1;
@@ -929,7 +843,6 @@ void saa7134_input_fini(struct saa7134_dev *dev)
if (NULL == dev->remote)
return;
- saa7134_ir_stop(dev);
rc_unregister_device(dev->remote->dev);
kfree(dev->remote);
dev->remote = NULL;
@@ -1031,9 +944,11 @@ void saa7134_probe_i2c_ir(struct saa7134_dev *dev)
(1 == rc) ? "yes" : "no");
break;
case SAA7134_BOARD_HAUPPAUGE_HVR1110:
- dev->init_data.name = "HVR 1110";
- dev->init_data.get_key = get_key_hvr1110;
+ dev->init_data.name = saa7134_boards[dev->board].name;
dev->init_data.ir_codes = RC_MAP_HAUPPAUGE;
+ dev->init_data.type = RC_PROTO_BIT_RC5 |
+ RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_RC6_6A_32;
+ dev->init_data.internal_get_key_func = IR_KBD_GET_KEY_HAUP_XVR;
info.addr = 0x71;
break;
case SAA7134_BOARD_BEHOLD_607FM_MK3:
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index 8f28741ebb35..5bc4b8fc8ebf 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1650,23 +1650,22 @@ int saa7134_querystd(struct file *file, void *priv, v4l2_std_id *std)
}
EXPORT_SYMBOL_GPL(saa7134_querystd);
-static int saa7134_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cap)
+static int saa7134_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct saa7134_dev *dev = video_drvdata(file);
- if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ type != V4L2_BUF_TYPE_VIDEO_OVERLAY)
return -EINVAL;
- cap->pixelaspect.numerator = 1;
- cap->pixelaspect.denominator = 1;
+
if (dev->tvnorm->id & V4L2_STD_525_60) {
- cap->pixelaspect.numerator = 11;
- cap->pixelaspect.denominator = 10;
+ f->numerator = 11;
+ f->denominator = 10;
}
if (dev->tvnorm->id & V4L2_STD_625_50) {
- cap->pixelaspect.numerator = 54;
- cap->pixelaspect.denominator = 59;
+ f->numerator = 54;
+ f->denominator = 59;
}
return 0;
}
@@ -1987,7 +1986,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_fmt_vbi_cap = saa7134_try_get_set_fmt_vbi_cap,
.vidioc_try_fmt_vbi_cap = saa7134_try_get_set_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = saa7134_try_get_set_fmt_vbi_cap,
- .vidioc_cropcap = saa7134_cropcap,
+ .vidioc_g_pixelaspect = saa7134_g_pixelaspect,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index 480228456014..50b1d07d2ac1 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -123,9 +123,7 @@ struct saa7134_format {
struct saa7134_card_ir {
struct rc_dev *dev;
- char name[32];
char phys[32];
- unsigned users;
u32 polling;
u32 last_gpio;
@@ -923,13 +921,13 @@ int saa7134_input_init1(struct saa7134_dev *dev);
void saa7134_input_fini(struct saa7134_dev *dev);
void saa7134_input_irq(struct saa7134_dev *dev);
void saa7134_probe_i2c_ir(struct saa7134_dev *dev);
-int saa7134_ir_start(struct saa7134_dev *dev);
-void saa7134_ir_stop(struct saa7134_dev *dev);
+int saa7134_ir_open(struct rc_dev *dev);
+void saa7134_ir_close(struct rc_dev *dev);
#else
#define saa7134_input_init1(dev) ((void)0)
#define saa7134_input_fini(dev) ((void)0)
#define saa7134_input_irq(dev) ((void)0)
#define saa7134_probe_i2c_ir(dev) ((void)0)
-#define saa7134_ir_start(dev) ((void)0)
-#define saa7134_ir_stop(dev) ((void)0)
+#define saa7134_ir_open(dev) ((void)0)
+#define saa7134_ir_close(dev) ((void)0)
#endif
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 70c4f6c54881..a505e9f5a1e2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -32,6 +32,15 @@ source "drivers/media/platform/davinci/Kconfig"
source "drivers/media/platform/omap/Kconfig"
+config VIDEO_ASPEED
+ tristate "Aspeed AST2400 and AST2500 Video Engine driver"
+ depends on VIDEO_V4L2
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ Support for the Aspeed Video Engine (VE) embedded in the Aspeed
+ AST2400 and AST2500 SOCs. The VE can capture and compress video data
+ from digital or analog sources.
+
config VIDEO_SH_VOU
tristate "SuperH VOU video output driver"
depends on MEDIA_CAMERA_SUPPORT
@@ -138,6 +147,7 @@ source "drivers/media/platform/am437x/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
source "drivers/media/platform/rcar-vin/Kconfig"
source "drivers/media/platform/atmel/Kconfig"
+source "drivers/media/platform/sunxi/sun6i-csi/Kconfig"
config VIDEO_TI_CAL
tristate "TI CAL (Camera Adaptation Layer) driver"
@@ -625,6 +635,28 @@ config VIDEO_TEGRA_HDMI_CEC
The CEC bus is present in the HDMI connector and enables communication
between compatible devices.
+config VIDEO_SECO_CEC
+ tristate "SECO Boards HDMI CEC driver"
+ depends on (X86 || IA64) || COMPILE_TEST
+ depends on PCI && DMI
+ select CEC_CORE
+ select CEC_NOTIFIER
+ help
+ This is a driver for SECO Boards integrated CEC interface.
+ Selecting it will enable support for this device.
+ CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+config VIDEO_SECO_RC
+ bool "SECO Boards IR RC5 support"
+ depends on VIDEO_SECO_CEC
+ select RC_CORE
+ help
+ If you say yes here you will get support for the
+ SECO Boards Consumer-IR in seco-cec driver.
+ The embedded controller supports RC5 protocol only, default mapping
+ is set to rc-hauppauge.
+
endif #CEC_PLATFORM_DRIVERS
menuconfig SDR_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 6ab6200dd9c9..e6deb2597738 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -3,6 +3,7 @@
# Makefile for the video capture/playback device drivers.
#
+obj-$(CONFIG_VIDEO_ASPEED) += aspeed-video.o
obj-$(CONFIG_VIDEO_CADENCE) += cadence/
obj-$(CONFIG_VIDEO_VIA_CAMERA) += via-camera.o
obj-$(CONFIG_VIDEO_CAFE_CCIC) += marvell-ccic/
@@ -55,6 +56,8 @@ obj-$(CONFIG_VIDEO_TEGRA_HDMI_CEC) += tegra-cec/
obj-y += stm32/
+obj-$(CONFIG_VIDEO_SECO_CEC) += seco-cec/
+
obj-y += davinci/
obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
@@ -98,3 +101,5 @@ obj-$(CONFIG_VIDEO_QCOM_VENUS) += qcom/venus/
obj-y += meson/
obj-y += cros-ec-cec/
+
+obj-$(CONFIG_VIDEO_SUN6I_CSI) += sunxi/sun6i-csi/
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index e13d2b3a7168..5c17624aaade 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2081,24 +2081,18 @@ static void vpfe_stop_streaming(struct vb2_queue *vq)
spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags);
}
-static int vpfe_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *crop)
+static int vpfe_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct vpfe_device *vpfe = video_drvdata(file);
- vpfe_dbg(2, vpfe, "vpfe_cropcap\n");
+ vpfe_dbg(2, vpfe, "vpfe_g_pixelaspect\n");
- if (vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
return -EINVAL;
- memset(crop, 0, sizeof(struct v4l2_cropcap));
-
- crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- crop->defrect.width = vpfe_standards[vpfe->std_index].width;
- crop->bounds.width = crop->defrect.width;
- crop->defrect.height = vpfe_standards[vpfe->std_index].height;
- crop->bounds.height = crop->defrect.height;
- crop->pixelaspect = vpfe_standards[vpfe->std_index].pixelaspect;
+ *f = vpfe_standards[vpfe->std_index].pixelaspect;
return 0;
}
@@ -2108,12 +2102,17 @@ vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
struct vpfe_device *vpfe = video_drvdata(file);
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ vpfe->std_index >= ARRAY_SIZE(vpfe_standards))
+ return -EINVAL;
+
switch (s->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
- s->r.left = s->r.top = 0;
- s->r.width = vpfe->crop.width;
- s->r.height = vpfe->crop.height;
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = vpfe_standards[vpfe->std_index].width;
+ s->r.height = vpfe_standards[vpfe->std_index].height;
break;
case V4L2_SEL_TGT_CROP:
@@ -2282,7 +2281,7 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
- .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_pixelaspect = vpfe_g_pixelaspect,
.vidioc_g_selection = vpfe_g_selection,
.vidioc_s_selection = vpfe_s_selection,
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
new file mode 100644
index 000000000000..dfec813f50a9
--- /dev/null
+++ b/drivers/media/platform/aspeed-video.c
@@ -0,0 +1,1729 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define DEVICE_NAME "aspeed-video"
+
+#define ASPEED_VIDEO_JPEG_NUM_QUALITIES 12
+#define ASPEED_VIDEO_JPEG_HEADER_SIZE 10
+#define ASPEED_VIDEO_JPEG_QUANT_SIZE 116
+#define ASPEED_VIDEO_JPEG_DCT_SIZE 34
+
+#define MAX_FRAME_RATE 60
+#define MAX_HEIGHT 1200
+#define MAX_WIDTH 1920
+#define MIN_HEIGHT 480
+#define MIN_WIDTH 640
+
+#define NUM_POLARITY_CHECKS 10
+#define INVALID_RESOLUTION_RETRIES 2
+#define INVALID_RESOLUTION_DELAY msecs_to_jiffies(250)
+#define RESOLUTION_CHANGE_DELAY msecs_to_jiffies(500)
+#define MODE_DETECT_TIMEOUT msecs_to_jiffies(500)
+#define STOP_TIMEOUT msecs_to_jiffies(1000)
+#define DIRECT_FETCH_THRESHOLD 0x0c0000 /* 1024 * 768 */
+
+#define VE_MAX_SRC_BUFFER_SIZE 0x8ca000 /* 1920 * 1200, 32bpp */
+#define VE_JPEG_HEADER_SIZE 0x006000 /* 512 * 12 * 4 */
+
+#define VE_PROTECTION_KEY 0x000
+#define VE_PROTECTION_KEY_UNLOCK 0x1a038aa8
+
+#define VE_SEQ_CTRL 0x004
+#define VE_SEQ_CTRL_TRIG_MODE_DET BIT(0)
+#define VE_SEQ_CTRL_TRIG_CAPTURE BIT(1)
+#define VE_SEQ_CTRL_FORCE_IDLE BIT(2)
+#define VE_SEQ_CTRL_MULT_FRAME BIT(3)
+#define VE_SEQ_CTRL_TRIG_COMP BIT(4)
+#define VE_SEQ_CTRL_AUTO_COMP BIT(5)
+#define VE_SEQ_CTRL_EN_WATCHDOG BIT(7)
+#define VE_SEQ_CTRL_YUV420 BIT(10)
+#define VE_SEQ_CTRL_COMP_FMT GENMASK(11, 10)
+#define VE_SEQ_CTRL_HALT BIT(12)
+#define VE_SEQ_CTRL_EN_WATCHDOG_COMP BIT(14)
+#define VE_SEQ_CTRL_TRIG_JPG BIT(15)
+#define VE_SEQ_CTRL_CAP_BUSY BIT(16)
+#define VE_SEQ_CTRL_COMP_BUSY BIT(18)
+
+#ifdef CONFIG_MACH_ASPEED_G5
+#define VE_SEQ_CTRL_JPEG_MODE BIT(13) /* AST2500 */
+#else
+#define VE_SEQ_CTRL_JPEG_MODE BIT(8) /* AST2400 */
+#endif /* CONFIG_MACH_ASPEED_G5 */
+
+#define VE_CTRL 0x008
+#define VE_CTRL_HSYNC_POL BIT(0)
+#define VE_CTRL_VSYNC_POL BIT(1)
+#define VE_CTRL_SOURCE BIT(2)
+#define VE_CTRL_INT_DE BIT(4)
+#define VE_CTRL_DIRECT_FETCH BIT(5)
+#define VE_CTRL_YUV BIT(6)
+#define VE_CTRL_RGB BIT(7)
+#define VE_CTRL_CAPTURE_FMT GENMASK(7, 6)
+#define VE_CTRL_AUTO_OR_CURSOR BIT(8)
+#define VE_CTRL_CLK_INVERSE BIT(11)
+#define VE_CTRL_CLK_DELAY GENMASK(11, 9)
+#define VE_CTRL_INTERLACE BIT(14)
+#define VE_CTRL_HSYNC_POL_CTRL BIT(15)
+#define VE_CTRL_FRC GENMASK(23, 16)
+
+#define VE_TGS_0 0x00c
+#define VE_TGS_1 0x010
+#define VE_TGS_FIRST GENMASK(28, 16)
+#define VE_TGS_LAST GENMASK(12, 0)
+
+#define VE_SCALING_FACTOR 0x014
+#define VE_SCALING_FILTER0 0x018
+#define VE_SCALING_FILTER1 0x01c
+#define VE_SCALING_FILTER2 0x020
+#define VE_SCALING_FILTER3 0x024
+
+#define VE_CAP_WINDOW 0x030
+#define VE_COMP_WINDOW 0x034
+#define VE_COMP_PROC_OFFSET 0x038
+#define VE_COMP_OFFSET 0x03c
+#define VE_JPEG_ADDR 0x040
+#define VE_SRC0_ADDR 0x044
+#define VE_SRC_SCANLINE_OFFSET 0x048
+#define VE_SRC1_ADDR 0x04c
+#define VE_COMP_ADDR 0x054
+
+#define VE_STREAM_BUF_SIZE 0x058
+#define VE_STREAM_BUF_SIZE_N_PACKETS GENMASK(5, 3)
+#define VE_STREAM_BUF_SIZE_P_SIZE GENMASK(2, 0)
+
+#define VE_COMP_CTRL 0x060
+#define VE_COMP_CTRL_VQ_DCT_ONLY BIT(0)
+#define VE_COMP_CTRL_VQ_4COLOR BIT(1)
+#define VE_COMP_CTRL_QUANTIZE BIT(2)
+#define VE_COMP_CTRL_EN_BQ BIT(4)
+#define VE_COMP_CTRL_EN_CRYPTO BIT(5)
+#define VE_COMP_CTRL_DCT_CHR GENMASK(10, 6)
+#define VE_COMP_CTRL_DCT_LUM GENMASK(15, 11)
+#define VE_COMP_CTRL_EN_HQ BIT(16)
+#define VE_COMP_CTRL_RSVD BIT(19)
+#define VE_COMP_CTRL_ENCODE GENMASK(21, 20)
+#define VE_COMP_CTRL_HQ_DCT_CHR GENMASK(26, 22)
+#define VE_COMP_CTRL_HQ_DCT_LUM GENMASK(31, 27)
+
+#define VE_OFFSET_COMP_STREAM 0x078
+
+#define VE_SRC_LR_EDGE_DET 0x090
+#define VE_SRC_LR_EDGE_DET_LEFT GENMASK(11, 0)
+#define VE_SRC_LR_EDGE_DET_NO_V BIT(12)
+#define VE_SRC_LR_EDGE_DET_NO_H BIT(13)
+#define VE_SRC_LR_EDGE_DET_NO_DISP BIT(14)
+#define VE_SRC_LR_EDGE_DET_NO_CLK BIT(15)
+#define VE_SRC_LR_EDGE_DET_RT_SHF 16
+#define VE_SRC_LR_EDGE_DET_RT GENMASK(27, VE_SRC_LR_EDGE_DET_RT_SHF)
+#define VE_SRC_LR_EDGE_DET_INTERLACE BIT(31)
+
+#define VE_SRC_TB_EDGE_DET 0x094
+#define VE_SRC_TB_EDGE_DET_TOP GENMASK(12, 0)
+#define VE_SRC_TB_EDGE_DET_BOT_SHF 16
+#define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, VE_SRC_TB_EDGE_DET_BOT_SHF)
+
+#define VE_MODE_DETECT_STATUS 0x098
+#define VE_MODE_DETECT_H_PIXELS GENMASK(11, 0)
+#define VE_MODE_DETECT_V_LINES_SHF 16
+#define VE_MODE_DETECT_V_LINES GENMASK(27, VE_MODE_DETECT_V_LINES_SHF)
+#define VE_MODE_DETECT_STATUS_VSYNC BIT(28)
+#define VE_MODE_DETECT_STATUS_HSYNC BIT(29)
+
+#define VE_SYNC_STATUS 0x09c
+#define VE_SYNC_STATUS_HSYNC GENMASK(11, 0)
+#define VE_SYNC_STATUS_VSYNC_SHF 16
+#define VE_SYNC_STATUS_VSYNC GENMASK(27, VE_SYNC_STATUS_VSYNC_SHF)
+
+#define VE_INTERRUPT_CTRL 0x304
+#define VE_INTERRUPT_STATUS 0x308
+#define VE_INTERRUPT_MODE_DETECT_WD BIT(0)
+#define VE_INTERRUPT_CAPTURE_COMPLETE BIT(1)
+#define VE_INTERRUPT_COMP_READY BIT(2)
+#define VE_INTERRUPT_COMP_COMPLETE BIT(3)
+#define VE_INTERRUPT_MODE_DETECT BIT(4)
+#define VE_INTERRUPT_FRAME_COMPLETE BIT(5)
+#define VE_INTERRUPT_DECODE_ERR BIT(6)
+#define VE_INTERRUPT_HALT_READY BIT(8)
+#define VE_INTERRUPT_HANG_WD BIT(9)
+#define VE_INTERRUPT_STREAM_DESC BIT(10)
+#define VE_INTERRUPT_VSYNC_DESC BIT(11)
+
+#define VE_MODE_DETECT 0x30c
+#define VE_MEM_RESTRICT_START 0x310
+#define VE_MEM_RESTRICT_END 0x314
+
+enum {
+ VIDEO_MODE_DETECT_DONE,
+ VIDEO_RES_CHANGE,
+ VIDEO_RES_DETECT,
+ VIDEO_STREAMING,
+ VIDEO_FRAME_INPRG,
+ VIDEO_STOPPED,
+};
+
+struct aspeed_video_addr {
+ unsigned int size;
+ dma_addr_t dma;
+ void *virt;
+};
+
+struct aspeed_video_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head link;
+};
+
+#define to_aspeed_video_buffer(x) \
+ container_of((x), struct aspeed_video_buffer, vb)
+
+struct aspeed_video {
+ void __iomem *base;
+ struct clk *eclk;
+ struct clk *vclk;
+ struct reset_control *rst;
+
+ struct device *dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_pix_format pix_fmt;
+ struct v4l2_bt_timings active_timings;
+ struct v4l2_bt_timings detected_timings;
+ u32 v4l2_input_status;
+ struct vb2_queue queue;
+ struct video_device vdev;
+ struct mutex video_lock; /* v4l2 and videobuf2 lock */
+
+ wait_queue_head_t wait;
+ spinlock_t lock; /* buffer list lock */
+ struct delayed_work res_work;
+ struct list_head buffers;
+ unsigned long flags;
+ unsigned int sequence;
+
+ unsigned int max_compressed_size;
+ struct aspeed_video_addr srcs[2];
+ struct aspeed_video_addr jpeg;
+
+ bool yuv420;
+ unsigned int frame_rate;
+ unsigned int jpeg_quality;
+
+ unsigned int frame_bottom;
+ unsigned int frame_left;
+ unsigned int frame_right;
+ unsigned int frame_top;
+};
+
+#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
+
+static const u32 aspeed_video_jpeg_header[ASPEED_VIDEO_JPEG_HEADER_SIZE] = {
+ 0xe0ffd8ff, 0x464a1000, 0x01004649, 0x60000101, 0x00006000, 0x0f00feff,
+ 0x00002d05, 0x00000000, 0x00000000, 0x00dbff00
+};
+
+static const u32 aspeed_video_jpeg_quant[ASPEED_VIDEO_JPEG_QUANT_SIZE] = {
+ 0x081100c0, 0x00000000, 0x00110103, 0x03011102, 0xc4ff0111, 0x00001f00,
+ 0x01010501, 0x01010101, 0x00000000, 0x00000000, 0x04030201, 0x08070605,
+ 0xff0b0a09, 0x10b500c4, 0x03010200, 0x03040203, 0x04040505, 0x7d010000,
+ 0x00030201, 0x12051104, 0x06413121, 0x07615113, 0x32147122, 0x08a19181,
+ 0xc1b14223, 0xf0d15215, 0x72623324, 0x160a0982, 0x1a191817, 0x28272625,
+ 0x35342a29, 0x39383736, 0x4544433a, 0x49484746, 0x5554534a, 0x59585756,
+ 0x6564635a, 0x69686766, 0x7574736a, 0x79787776, 0x8584837a, 0x89888786,
+ 0x9493928a, 0x98979695, 0xa3a29a99, 0xa7a6a5a4, 0xb2aaa9a8, 0xb6b5b4b3,
+ 0xbab9b8b7, 0xc5c4c3c2, 0xc9c8c7c6, 0xd4d3d2ca, 0xd8d7d6d5, 0xe2e1dad9,
+ 0xe6e5e4e3, 0xeae9e8e7, 0xf4f3f2f1, 0xf8f7f6f5, 0xc4fffaf9, 0x00011f00,
+ 0x01010103, 0x01010101, 0x00000101, 0x00000000, 0x04030201, 0x08070605,
+ 0xff0b0a09, 0x11b500c4, 0x02010200, 0x04030404, 0x04040507, 0x77020100,
+ 0x03020100, 0x21050411, 0x41120631, 0x71610751, 0x81322213, 0x91421408,
+ 0x09c1b1a1, 0xf0523323, 0xd1726215, 0x3424160a, 0x17f125e1, 0x261a1918,
+ 0x2a292827, 0x38373635, 0x44433a39, 0x48474645, 0x54534a49, 0x58575655,
+ 0x64635a59, 0x68676665, 0x74736a69, 0x78777675, 0x83827a79, 0x87868584,
+ 0x928a8988, 0x96959493, 0x9a999897, 0xa5a4a3a2, 0xa9a8a7a6, 0xb4b3b2aa,
+ 0xb8b7b6b5, 0xc3c2bab9, 0xc7c6c5c4, 0xd2cac9c8, 0xd6d5d4d3, 0xdad9d8d7,
+ 0xe5e4e3e2, 0xe9e8e7e6, 0xf4f3f2ea, 0xf8f7f6f5, 0xdafffaf9, 0x01030c00,
+ 0x03110200, 0x003f0011
+};
+
+static const u32 aspeed_video_jpeg_dct[ASPEED_VIDEO_JPEG_NUM_QUALITIES]
+ [ASPEED_VIDEO_JPEG_DCT_SIZE] = {
+ { 0x0d140043, 0x0c0f110f, 0x11101114, 0x17141516, 0x1e20321e,
+ 0x3d1e1b1b, 0x32242e2b, 0x4b4c3f48, 0x44463f47, 0x61735a50,
+ 0x566c5550, 0x88644644, 0x7a766c65, 0x4d808280, 0x8c978d60,
+ 0x7e73967d, 0xdbff7b80, 0x1f014300, 0x272d2121, 0x3030582d,
+ 0x697bb958, 0xb8b9b97b, 0xb9b8a6a6, 0xb9b9b9b9, 0xb9b9b9b9,
+ 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9,
+ 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xffb9b9b9 },
+ { 0x0c110043, 0x0a0d0f0d, 0x0f0e0f11, 0x14111213, 0x1a1c2b1a,
+ 0x351a1818, 0x2b1f2826, 0x4142373f, 0x3c3d373e, 0x55644e46,
+ 0x4b5f4a46, 0x77573d3c, 0x6b675f58, 0x43707170, 0x7a847b54,
+ 0x6e64836d, 0xdbff6c70, 0x1b014300, 0x22271d1d, 0x2a2a4c27,
+ 0x5b6ba04c, 0xa0a0a06b, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0,
+ 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0,
+ 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xffa0a0a0 },
+ { 0x090e0043, 0x090a0c0a, 0x0c0b0c0e, 0x110e0f10, 0x15172415,
+ 0x2c151313, 0x241a211f, 0x36372e34, 0x31322e33, 0x4653413a,
+ 0x3e4e3d3a, 0x62483231, 0x58564e49, 0x385d5e5d, 0x656d6645,
+ 0x5b536c5a, 0xdbff595d, 0x16014300, 0x1c201818, 0x22223f20,
+ 0x4b58853f, 0x85858558, 0x85858585, 0x85858585, 0x85858585,
+ 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585,
+ 0x85858585, 0x85858585, 0x85858585, 0xff858585 },
+ { 0x070b0043, 0x07080a08, 0x0a090a0b, 0x0d0b0c0c, 0x11121c11,
+ 0x23110f0f, 0x1c141a19, 0x2b2b2429, 0x27282428, 0x3842332e,
+ 0x313e302e, 0x4e392827, 0x46443e3a, 0x2c4a4a4a, 0x50565137,
+ 0x48425647, 0xdbff474a, 0x12014300, 0x161a1313, 0x1c1c331a,
+ 0x3d486c33, 0x6c6c6c48, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c,
+ 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c,
+ 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0xff6c6c6c },
+ { 0x06090043, 0x05060706, 0x07070709, 0x0a09090a, 0x0d0e160d,
+ 0x1b0d0c0c, 0x16101413, 0x21221c20, 0x1e1f1c20, 0x2b332824,
+ 0x26302624, 0x3d2d1f1e, 0x3735302d, 0x22393a39, 0x3f443f2b,
+ 0x38334338, 0xdbff3739, 0x0d014300, 0x11130e0e, 0x15152613,
+ 0x2d355026, 0x50505035, 0x50505050, 0x50505050, 0x50505050,
+ 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050,
+ 0x50505050, 0x50505050, 0x50505050, 0xff505050 },
+ { 0x04060043, 0x03040504, 0x05040506, 0x07060606, 0x09090f09,
+ 0x12090808, 0x0f0a0d0d, 0x16161315, 0x14151315, 0x1d221b18,
+ 0x19201918, 0x281e1514, 0x2423201e, 0x17262726, 0x2a2d2a1c,
+ 0x25222d25, 0xdbff2526, 0x09014300, 0x0b0d0a0a, 0x0e0e1a0d,
+ 0x1f25371a, 0x37373725, 0x37373737, 0x37373737, 0x37373737,
+ 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737,
+ 0x37373737, 0x37373737, 0x37373737, 0xff373737 },
+ { 0x02030043, 0x01020202, 0x02020203, 0x03030303, 0x04040704,
+ 0x09040404, 0x07050606, 0x0b0b090a, 0x0a0a090a, 0x0e110d0c,
+ 0x0c100c0c, 0x140f0a0a, 0x1211100f, 0x0b131313, 0x1516150e,
+ 0x12111612, 0xdbff1213, 0x04014300, 0x05060505, 0x07070d06,
+ 0x0f121b0d, 0x1b1b1b12, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b,
+ 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b,
+ 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0xff1b1b1b },
+ { 0x01020043, 0x01010101, 0x01010102, 0x02020202, 0x03030503,
+ 0x06030202, 0x05030404, 0x07070607, 0x06070607, 0x090b0908,
+ 0x080a0808, 0x0d0a0706, 0x0c0b0a0a, 0x070c0d0c, 0x0e0f0e09,
+ 0x0c0b0f0c, 0xdbff0c0c, 0x03014300, 0x03040303, 0x04040804,
+ 0x0a0c1208, 0x1212120c, 0x12121212, 0x12121212, 0x12121212,
+ 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212,
+ 0x12121212, 0x12121212, 0x12121212, 0xff121212 },
+ { 0x01020043, 0x01010101, 0x01010102, 0x02020202, 0x03030503,
+ 0x06030202, 0x05030404, 0x07070607, 0x06070607, 0x090b0908,
+ 0x080a0808, 0x0d0a0706, 0x0c0b0a0a, 0x070c0d0c, 0x0e0f0e09,
+ 0x0c0b0f0c, 0xdbff0c0c, 0x02014300, 0x03030202, 0x04040703,
+ 0x080a0f07, 0x0f0f0f0a, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f,
+ 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xff0f0f0f },
+ { 0x01010043, 0x01010101, 0x01010101, 0x01010101, 0x02020302,
+ 0x04020202, 0x03020303, 0x05050405, 0x05050405, 0x07080606,
+ 0x06080606, 0x0a070505, 0x09080807, 0x05090909, 0x0a0b0a07,
+ 0x09080b09, 0xdbff0909, 0x02014300, 0x02030202, 0x03030503,
+ 0x07080c05, 0x0c0c0c08, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c,
+ 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c,
+ 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0xff0c0c0c },
+ { 0x01010043, 0x01010101, 0x01010101, 0x01010101, 0x01010201,
+ 0x03010101, 0x02010202, 0x03030303, 0x03030303, 0x04050404,
+ 0x04050404, 0x06050303, 0x06050505, 0x03060606, 0x07070704,
+ 0x06050706, 0xdbff0606, 0x01014300, 0x01020101, 0x02020402,
+ 0x05060904, 0x09090906, 0x09090909, 0x09090909, 0x09090909,
+ 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909,
+ 0x09090909, 0x09090909, 0x09090909, 0xff090909 },
+ { 0x01010043, 0x01010101, 0x01010101, 0x01010101, 0x01010101,
+ 0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202,
+ 0x02020202, 0x03020101, 0x03020202, 0x01030303, 0x03030302,
+ 0x03020303, 0xdbff0403, 0x01014300, 0x01010101, 0x01010201,
+ 0x03040602, 0x06060604, 0x06060606, 0x06060606, 0x06060606,
+ 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606,
+ 0x06060606, 0x06060606, 0x06060606, 0xff060606 }
+};
+
+static const struct v4l2_dv_timings_cap aspeed_video_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .min_width = MIN_WIDTH,
+ .max_width = MAX_WIDTH,
+ .min_height = MIN_HEIGHT,
+ .max_height = MAX_HEIGHT,
+ .min_pixelclock = 6574080, /* 640 x 480 x 24Hz */
+ .max_pixelclock = 138240000, /* 1920 x 1200 x 60Hz */
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING |
+ V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420)
+{
+ int i;
+ unsigned int base;
+
+ for (i = 0; i < ASPEED_VIDEO_JPEG_NUM_QUALITIES; i++) {
+ base = 256 * i; /* AST HW requires this header spacing */
+ memcpy(&table[base], aspeed_video_jpeg_header,
+ sizeof(aspeed_video_jpeg_header));
+
+ base += ASPEED_VIDEO_JPEG_HEADER_SIZE;
+ memcpy(&table[base], aspeed_video_jpeg_dct[i],
+ sizeof(aspeed_video_jpeg_dct[i]));
+
+ base += ASPEED_VIDEO_JPEG_DCT_SIZE;
+ memcpy(&table[base], aspeed_video_jpeg_quant,
+ sizeof(aspeed_video_jpeg_quant));
+
+ if (yuv420)
+ table[base + 2] = 0x00220103;
+ }
+}
+
+static void aspeed_video_update(struct aspeed_video *video, u32 reg, u32 clear,
+ u32 bits)
+{
+ u32 t = readl(video->base + reg);
+ u32 before = t;
+
+ t &= ~clear;
+ t |= bits;
+ writel(t, video->base + reg);
+ dev_dbg(video->dev, "update %03x[%08x -> %08x]\n", reg, before,
+ readl(video->base + reg));
+}
+
+static u32 aspeed_video_read(struct aspeed_video *video, u32 reg)
+{
+ u32 t = readl(video->base + reg);
+
+ dev_dbg(video->dev, "read %03x[%08x]\n", reg, t);
+ return t;
+}
+
+static void aspeed_video_write(struct aspeed_video *video, u32 reg, u32 val)
+{
+ writel(val, video->base + reg);
+ dev_dbg(video->dev, "write %03x[%08x]\n", reg,
+ readl(video->base + reg));
+}
+
+static int aspeed_video_start_frame(struct aspeed_video *video)
+{
+ dma_addr_t addr;
+ unsigned long flags;
+ struct aspeed_video_buffer *buf;
+ u32 seq_ctrl = aspeed_video_read(video, VE_SEQ_CTRL);
+
+ if (video->v4l2_input_status) {
+ dev_dbg(video->dev, "No signal; don't start frame\n");
+ return 0;
+ }
+
+ if (!(seq_ctrl & VE_SEQ_CTRL_COMP_BUSY) ||
+ !(seq_ctrl & VE_SEQ_CTRL_CAP_BUSY)) {
+ dev_err(video->dev, "Engine busy; don't start frame\n");
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&video->lock, flags);
+ buf = list_first_entry_or_null(&video->buffers,
+ struct aspeed_video_buffer, link);
+ if (!buf) {
+ spin_unlock_irqrestore(&video->lock, flags);
+ dev_dbg(video->dev, "No buffers; don't start frame\n");
+ return -EPROTO;
+ }
+
+ set_bit(VIDEO_FRAME_INPRG, &video->flags);
+ addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+ spin_unlock_irqrestore(&video->lock, flags);
+
+ aspeed_video_write(video, VE_COMP_PROC_OFFSET, 0);
+ aspeed_video_write(video, VE_COMP_OFFSET, 0);
+ aspeed_video_write(video, VE_COMP_ADDR, addr);
+
+ aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
+ VE_INTERRUPT_COMP_COMPLETE |
+ VE_INTERRUPT_CAPTURE_COMPLETE);
+
+ aspeed_video_update(video, VE_SEQ_CTRL, 0,
+ VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP);
+
+ return 0;
+}
+
+static void aspeed_video_enable_mode_detect(struct aspeed_video *video)
+{
+ /* Enable mode detect interrupts */
+ aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
+ VE_INTERRUPT_MODE_DETECT);
+
+ /* Trigger mode detect */
+ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_MODE_DET);
+}
+
+static void aspeed_video_reset(struct aspeed_video *video)
+{
+ /* Reset the engine */
+ reset_control_assert(video->rst);
+
+ /* Don't usleep here; function may be called in interrupt context */
+ udelay(100);
+ reset_control_deassert(video->rst);
+}
+
+static void aspeed_video_off(struct aspeed_video *video)
+{
+ aspeed_video_reset(video);
+
+ /* Turn off the relevant clocks */
+ clk_disable_unprepare(video->vclk);
+ clk_disable_unprepare(video->eclk);
+}
+
+static void aspeed_video_on(struct aspeed_video *video)
+{
+ /* Turn on the relevant clocks */
+ clk_prepare_enable(video->eclk);
+ clk_prepare_enable(video->vclk);
+
+ aspeed_video_reset(video);
+}
+
+static void aspeed_video_bufs_done(struct aspeed_video *video,
+ enum vb2_buffer_state state)
+{
+ unsigned long flags;
+ struct aspeed_video_buffer *buf;
+
+ spin_lock_irqsave(&video->lock, flags);
+ list_for_each_entry(buf, &video->buffers, link)
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ INIT_LIST_HEAD(&video->buffers);
+ spin_unlock_irqrestore(&video->lock, flags);
+}
+
+static void aspeed_video_irq_res_change(struct aspeed_video *video)
+{
+ dev_dbg(video->dev, "Resolution changed; resetting\n");
+
+ set_bit(VIDEO_RES_CHANGE, &video->flags);
+ clear_bit(VIDEO_FRAME_INPRG, &video->flags);
+
+ aspeed_video_off(video);
+ aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR);
+
+ schedule_delayed_work(&video->res_work, RESOLUTION_CHANGE_DELAY);
+}
+
+static irqreturn_t aspeed_video_irq(int irq, void *arg)
+{
+ struct aspeed_video *video = arg;
+ u32 sts = aspeed_video_read(video, VE_INTERRUPT_STATUS);
+
+ /*
+ * Resolution changed or signal was lost; reset the engine and
+ * re-initialize
+ */
+ if (sts & VE_INTERRUPT_MODE_DETECT_WD) {
+ aspeed_video_irq_res_change(video);
+ return IRQ_HANDLED;
+ }
+
+ if (sts & VE_INTERRUPT_MODE_DETECT) {
+ if (test_bit(VIDEO_RES_DETECT, &video->flags)) {
+ aspeed_video_update(video, VE_INTERRUPT_CTRL,
+ VE_INTERRUPT_MODE_DETECT, 0);
+ aspeed_video_write(video, VE_INTERRUPT_STATUS,
+ VE_INTERRUPT_MODE_DETECT);
+
+ set_bit(VIDEO_MODE_DETECT_DONE, &video->flags);
+ wake_up_interruptible_all(&video->wait);
+ } else {
+ /*
+ * Signal acquired while NOT doing resolution
+ * detection; reset the engine and re-initialize
+ */
+ aspeed_video_irq_res_change(video);
+ return IRQ_HANDLED;
+ }
+ }
+
+ if ((sts & VE_INTERRUPT_COMP_COMPLETE) &&
+ (sts & VE_INTERRUPT_CAPTURE_COMPLETE)) {
+ struct aspeed_video_buffer *buf;
+ u32 frame_size = aspeed_video_read(video,
+ VE_OFFSET_COMP_STREAM);
+
+ spin_lock(&video->lock);
+ clear_bit(VIDEO_FRAME_INPRG, &video->flags);
+ buf = list_first_entry_or_null(&video->buffers,
+ struct aspeed_video_buffer,
+ link);
+ if (buf) {
+ vb2_set_plane_payload(&buf->vb.vb2_buf, 0, frame_size);
+
+ if (!list_is_last(&buf->link, &video->buffers)) {
+ buf->vb.vb2_buf.timestamp = ktime_get_ns();
+ buf->vb.sequence = video->sequence++;
+ buf->vb.field = V4L2_FIELD_NONE;
+ vb2_buffer_done(&buf->vb.vb2_buf,
+ VB2_BUF_STATE_DONE);
+ list_del(&buf->link);
+ }
+ }
+ spin_unlock(&video->lock);
+
+ aspeed_video_update(video, VE_SEQ_CTRL,
+ VE_SEQ_CTRL_TRIG_CAPTURE |
+ VE_SEQ_CTRL_FORCE_IDLE |
+ VE_SEQ_CTRL_TRIG_COMP, 0);
+ aspeed_video_update(video, VE_INTERRUPT_CTRL,
+ VE_INTERRUPT_COMP_COMPLETE |
+ VE_INTERRUPT_CAPTURE_COMPLETE, 0);
+ aspeed_video_write(video, VE_INTERRUPT_STATUS,
+ VE_INTERRUPT_COMP_COMPLETE |
+ VE_INTERRUPT_CAPTURE_COMPLETE);
+
+ if (test_bit(VIDEO_STREAMING, &video->flags) && buf)
+ aspeed_video_start_frame(video);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void aspeed_video_check_and_set_polarity(struct aspeed_video *video)
+{
+ int i;
+ int hsync_counter = 0;
+ int vsync_counter = 0;
+ u32 sts;
+
+ for (i = 0; i < NUM_POLARITY_CHECKS; ++i) {
+ sts = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
+ if (sts & VE_MODE_DETECT_STATUS_VSYNC)
+ vsync_counter--;
+ else
+ vsync_counter++;
+
+ if (sts & VE_MODE_DETECT_STATUS_HSYNC)
+ hsync_counter--;
+ else
+ hsync_counter++;
+ }
+
+ if (hsync_counter < 0 || vsync_counter < 0) {
+ u32 ctrl;
+
+ if (hsync_counter < 0) {
+ ctrl = VE_CTRL_HSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_HSYNC_POS_POL;
+ } else {
+ video->detected_timings.polarities |=
+ V4L2_DV_HSYNC_POS_POL;
+ }
+
+ if (vsync_counter < 0) {
+ ctrl = VE_CTRL_VSYNC_POL;
+ video->detected_timings.polarities &=
+ ~V4L2_DV_VSYNC_POS_POL;
+ } else {
+ video->detected_timings.polarities |=
+ V4L2_DV_VSYNC_POS_POL;
+ }
+
+ aspeed_video_update(video, VE_CTRL, 0, ctrl);
+ }
+}
+
+static bool aspeed_video_alloc_buf(struct aspeed_video *video,
+ struct aspeed_video_addr *addr,
+ unsigned int size)
+{
+ addr->virt = dma_alloc_coherent(video->dev, size, &addr->dma,
+ GFP_KERNEL);
+ if (!addr->virt)
+ return false;
+
+ addr->size = size;
+ return true;
+}
+
+static void aspeed_video_free_buf(struct aspeed_video *video,
+ struct aspeed_video_addr *addr)
+{
+ dma_free_coherent(video->dev, addr->size, addr->virt, addr->dma);
+ addr->size = 0;
+ addr->dma = 0ULL;
+ addr->virt = NULL;
+}
+
+/*
+ * Get the minimum HW-supported compression buffer size for the frame size.
+ * Assume worst-case JPEG compression size is 1/8 raw size. This should be
+ * plenty even for maximum quality; any worse and the engine will simply return
+ * incomplete JPEGs.
+ */
+static void aspeed_video_calc_compressed_size(struct aspeed_video *video,
+ unsigned int frame_size)
+{
+ int i, j;
+ u32 compression_buffer_size_reg = 0;
+ unsigned int size;
+ const unsigned int num_compression_packets = 4;
+ const unsigned int compression_packet_size = 1024;
+ const unsigned int max_compressed_size = frame_size / 2; /* 4bpp / 8 */
+
+ video->max_compressed_size = UINT_MAX;
+
+ for (i = 0; i < 6; ++i) {
+ for (j = 0; j < 8; ++j) {
+ size = (num_compression_packets << i) *
+ (compression_packet_size << j);
+ if (size < max_compressed_size)
+ continue;
+
+ if (size < video->max_compressed_size) {
+ compression_buffer_size_reg = (i << 3) | j;
+ video->max_compressed_size = size;
+ }
+ }
+ }
+
+ aspeed_video_write(video, VE_STREAM_BUF_SIZE,
+ compression_buffer_size_reg);
+
+ dev_dbg(video->dev, "Max compressed size: %x\n",
+ video->max_compressed_size);
+}
+
+#define res_check(v) test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(v)->flags)
+
+static void aspeed_video_get_resolution(struct aspeed_video *video)
+{
+ bool invalid_resolution = true;
+ int rc;
+ int tries = 0;
+ u32 mds;
+ u32 src_lr_edge;
+ u32 src_tb_edge;
+ u32 sync;
+ struct v4l2_bt_timings *det = &video->detected_timings;
+
+ det->width = MIN_WIDTH;
+ det->height = MIN_HEIGHT;
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+
+ /*
+ * Since we need max buffer size for detection, free the second source
+ * buffer first.
+ */
+ if (video->srcs[1].size)
+ aspeed_video_free_buf(video, &video->srcs[1]);
+
+ if (video->srcs[0].size < VE_MAX_SRC_BUFFER_SIZE) {
+ if (video->srcs[0].size)
+ aspeed_video_free_buf(video, &video->srcs[0]);
+
+ if (!aspeed_video_alloc_buf(video, &video->srcs[0],
+ VE_MAX_SRC_BUFFER_SIZE)) {
+ dev_err(video->dev,
+ "Failed to allocate source buffers\n");
+ return;
+ }
+ }
+
+ aspeed_video_write(video, VE_SRC0_ADDR, video->srcs[0].dma);
+
+ do {
+ if (tries) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(INVALID_RESOLUTION_DELAY))
+ return;
+ }
+
+ set_bit(VIDEO_RES_DETECT, &video->flags);
+ aspeed_video_enable_mode_detect(video);
+
+ rc = wait_event_interruptible_timeout(video->wait,
+ res_check(video),
+ MODE_DETECT_TIMEOUT);
+ if (!rc) {
+ dev_err(video->dev, "Timed out; first mode detect\n");
+ clear_bit(VIDEO_RES_DETECT, &video->flags);
+ return;
+ }
+
+ /* Disable mode detect in order to re-trigger */
+ aspeed_video_update(video, VE_SEQ_CTRL,
+ VE_SEQ_CTRL_TRIG_MODE_DET, 0);
+
+ aspeed_video_check_and_set_polarity(video);
+
+ aspeed_video_enable_mode_detect(video);
+
+ rc = wait_event_interruptible_timeout(video->wait,
+ res_check(video),
+ MODE_DETECT_TIMEOUT);
+ clear_bit(VIDEO_RES_DETECT, &video->flags);
+ if (!rc) {
+ dev_err(video->dev, "Timed out; second mode detect\n");
+ return;
+ }
+
+ src_lr_edge = aspeed_video_read(video, VE_SRC_LR_EDGE_DET);
+ src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET);
+ mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS);
+ sync = aspeed_video_read(video, VE_SYNC_STATUS);
+
+ video->frame_bottom = (src_tb_edge & VE_SRC_TB_EDGE_DET_BOT) >>
+ VE_SRC_TB_EDGE_DET_BOT_SHF;
+ video->frame_top = src_tb_edge & VE_SRC_TB_EDGE_DET_TOP;
+ det->vfrontporch = video->frame_top;
+ det->vbackporch = ((mds & VE_MODE_DETECT_V_LINES) >>
+ VE_MODE_DETECT_V_LINES_SHF) - video->frame_bottom;
+ det->vsync = (sync & VE_SYNC_STATUS_VSYNC) >>
+ VE_SYNC_STATUS_VSYNC_SHF;
+ if (video->frame_top > video->frame_bottom)
+ continue;
+
+ video->frame_right = (src_lr_edge & VE_SRC_LR_EDGE_DET_RT) >>
+ VE_SRC_LR_EDGE_DET_RT_SHF;
+ video->frame_left = src_lr_edge & VE_SRC_LR_EDGE_DET_LEFT;
+ det->hfrontporch = video->frame_left;
+ det->hbackporch = (mds & VE_MODE_DETECT_H_PIXELS) -
+ video->frame_right;
+ det->hsync = sync & VE_SYNC_STATUS_HSYNC;
+ if (video->frame_left > video->frame_right)
+ continue;
+
+ invalid_resolution = false;
+ } while (invalid_resolution && (tries++ < INVALID_RESOLUTION_RETRIES));
+
+ if (invalid_resolution) {
+ dev_err(video->dev, "Invalid resolution detected\n");
+ return;
+ }
+
+ det->height = (video->frame_bottom - video->frame_top) + 1;
+ det->width = (video->frame_right - video->frame_left) + 1;
+ video->v4l2_input_status = 0;
+
+ /*
+ * Enable mode-detect watchdog, resolution-change watchdog and
+ * automatic compression after frame capture.
+ */
+ aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
+ VE_INTERRUPT_MODE_DETECT_WD);
+ aspeed_video_update(video, VE_SEQ_CTRL, 0,
+ VE_SEQ_CTRL_AUTO_COMP | VE_SEQ_CTRL_EN_WATCHDOG);
+
+ dev_dbg(video->dev, "Got resolution: %dx%d\n", det->width,
+ det->height);
+}
+
+static void aspeed_video_set_resolution(struct aspeed_video *video)
+{
+ struct v4l2_bt_timings *act = &video->active_timings;
+ unsigned int size = act->width * act->height;
+
+ aspeed_video_calc_compressed_size(video, size);
+
+ /* Don't use direct mode below 1024 x 768 (irqs don't fire) */
+ if (size < DIRECT_FETCH_THRESHOLD) {
+ aspeed_video_write(video, VE_TGS_0,
+ FIELD_PREP(VE_TGS_FIRST,
+ video->frame_left - 1) |
+ FIELD_PREP(VE_TGS_LAST,
+ video->frame_right));
+ aspeed_video_write(video, VE_TGS_1,
+ FIELD_PREP(VE_TGS_FIRST, video->frame_top) |
+ FIELD_PREP(VE_TGS_LAST,
+ video->frame_bottom + 1));
+ aspeed_video_update(video, VE_CTRL, 0, VE_CTRL_INT_DE);
+ } else {
+ aspeed_video_update(video, VE_CTRL, 0, VE_CTRL_DIRECT_FETCH);
+ }
+
+ /* Set capture/compression frame sizes */
+ aspeed_video_write(video, VE_CAP_WINDOW,
+ act->width << 16 | act->height);
+ aspeed_video_write(video, VE_COMP_WINDOW,
+ act->width << 16 | act->height);
+ aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4);
+
+ size *= 4;
+
+ if (size == video->srcs[0].size / 2) {
+ aspeed_video_write(video, VE_SRC1_ADDR,
+ video->srcs[0].dma + size);
+ } else if (size == video->srcs[0].size) {
+ if (!aspeed_video_alloc_buf(video, &video->srcs[1], size))
+ goto err_mem;
+
+ aspeed_video_write(video, VE_SRC1_ADDR, video->srcs[1].dma);
+ } else {
+ aspeed_video_free_buf(video, &video->srcs[0]);
+
+ if (!aspeed_video_alloc_buf(video, &video->srcs[0], size))
+ goto err_mem;
+
+ if (!aspeed_video_alloc_buf(video, &video->srcs[1], size))
+ goto err_mem;
+
+ aspeed_video_write(video, VE_SRC0_ADDR, video->srcs[0].dma);
+ aspeed_video_write(video, VE_SRC1_ADDR, video->srcs[1].dma);
+ }
+
+ return;
+
+err_mem:
+ dev_err(video->dev, "Failed to allocate source buffers\n");
+
+ if (video->srcs[0].size)
+ aspeed_video_free_buf(video, &video->srcs[0]);
+}
+
+static void aspeed_video_init_regs(struct aspeed_video *video)
+{
+ u32 comp_ctrl = VE_COMP_CTRL_RSVD |
+ FIELD_PREP(VE_COMP_CTRL_DCT_LUM, video->jpeg_quality) |
+ FIELD_PREP(VE_COMP_CTRL_DCT_CHR, video->jpeg_quality | 0x10);
+ u32 ctrl = VE_CTRL_AUTO_OR_CURSOR;
+ u32 seq_ctrl = VE_SEQ_CTRL_JPEG_MODE;
+
+ if (video->frame_rate)
+ ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate);
+
+ if (video->yuv420)
+ seq_ctrl |= VE_SEQ_CTRL_YUV420;
+
+ /* Unlock VE registers */
+ aspeed_video_write(video, VE_PROTECTION_KEY, VE_PROTECTION_KEY_UNLOCK);
+
+ /* Disable interrupts */
+ aspeed_video_write(video, VE_INTERRUPT_CTRL, 0);
+ aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
+
+ /* Clear the offset */
+ aspeed_video_write(video, VE_COMP_PROC_OFFSET, 0);
+ aspeed_video_write(video, VE_COMP_OFFSET, 0);
+
+ aspeed_video_write(video, VE_JPEG_ADDR, video->jpeg.dma);
+
+ /* Set control registers */
+ aspeed_video_write(video, VE_SEQ_CTRL, seq_ctrl);
+ aspeed_video_write(video, VE_CTRL, ctrl);
+ aspeed_video_write(video, VE_COMP_CTRL, comp_ctrl);
+
+ /* Don't downscale */
+ aspeed_video_write(video, VE_SCALING_FACTOR, 0x10001000);
+ aspeed_video_write(video, VE_SCALING_FILTER0, 0x00200000);
+ aspeed_video_write(video, VE_SCALING_FILTER1, 0x00200000);
+ aspeed_video_write(video, VE_SCALING_FILTER2, 0x00200000);
+ aspeed_video_write(video, VE_SCALING_FILTER3, 0x00200000);
+
+ /* Set mode detection defaults */
+ aspeed_video_write(video, VE_MODE_DETECT, 0x22666500);
+}
+
+static void aspeed_video_start(struct aspeed_video *video)
+{
+ aspeed_video_on(video);
+
+ aspeed_video_init_regs(video);
+
+ /* Resolution set to 640x480 if no signal found */
+ aspeed_video_get_resolution(video);
+
+ /* Set timings since the device is being opened for the first time */
+ video->active_timings = video->detected_timings;
+ aspeed_video_set_resolution(video);
+
+ video->pix_fmt.width = video->active_timings.width;
+ video->pix_fmt.height = video->active_timings.height;
+ video->pix_fmt.sizeimage = video->max_compressed_size;
+}
+
+static void aspeed_video_stop(struct aspeed_video *video)
+{
+ set_bit(VIDEO_STOPPED, &video->flags);
+ cancel_delayed_work_sync(&video->res_work);
+
+ aspeed_video_off(video);
+
+ if (video->srcs[0].size)
+ aspeed_video_free_buf(video, &video->srcs[0]);
+
+ if (video->srcs[1].size)
+ aspeed_video_free_buf(video, &video->srcs[1]);
+
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ video->flags = 0;
+}
+
+static int aspeed_video_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DEVICE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "Aspeed Video Engine", sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ DEVICE_NAME);
+
+ return 0;
+}
+
+static int aspeed_video_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_PIX_FMT_JPEG;
+
+ return 0;
+}
+
+static int aspeed_video_get_format(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ f->fmt.pix = video->pix_fmt;
+
+ return 0;
+}
+
+static int aspeed_video_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ if (inp->index)
+ return -EINVAL;
+
+ strscpy(inp->name, "Host VGA capture", sizeof(inp->name));
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+ inp->status = video->v4l2_input_status;
+
+ return 0;
+}
+
+static int aspeed_video_get_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int aspeed_video_set_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int aspeed_video_get_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.readbuffers = 3;
+ a->parm.capture.timeperframe.numerator = 1;
+ if (!video->frame_rate)
+ a->parm.capture.timeperframe.denominator = MAX_FRAME_RATE;
+ else
+ a->parm.capture.timeperframe.denominator = video->frame_rate;
+
+ return 0;
+}
+
+static int aspeed_video_set_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ unsigned int frame_rate = 0;
+ struct aspeed_video *video = video_drvdata(file);
+
+ a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ a->parm.capture.readbuffers = 3;
+
+ if (a->parm.capture.timeperframe.numerator)
+ frame_rate = a->parm.capture.timeperframe.denominator /
+ a->parm.capture.timeperframe.numerator;
+
+ if (!frame_rate || frame_rate > MAX_FRAME_RATE) {
+ frame_rate = 0;
+ a->parm.capture.timeperframe.denominator = MAX_FRAME_RATE;
+ a->parm.capture.timeperframe.numerator = 1;
+ }
+
+ if (video->frame_rate != frame_rate) {
+ video->frame_rate = frame_rate;
+ aspeed_video_update(video, VE_CTRL, VE_CTRL_FRC,
+ FIELD_PREP(VE_CTRL_FRC, frame_rate));
+ }
+
+ return 0;
+}
+
+static int aspeed_video_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ if (fsize->index)
+ return -EINVAL;
+
+ if (fsize->pixel_format != V4L2_PIX_FMT_JPEG)
+ return -EINVAL;
+
+ fsize->discrete.width = video->pix_fmt.width;
+ fsize->discrete.height = video->pix_fmt.height;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+
+ return 0;
+}
+
+static int aspeed_video_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *fival)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ if (fival->index)
+ return -EINVAL;
+
+ if (fival->width != video->detected_timings.width ||
+ fival->height != video->detected_timings.height)
+ return -EINVAL;
+
+ if (fival->pixel_format != V4L2_PIX_FMT_JPEG)
+ return -EINVAL;
+
+ fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+
+ fival->stepwise.min.denominator = MAX_FRAME_RATE;
+ fival->stepwise.min.numerator = 1;
+ fival->stepwise.max.denominator = 1;
+ fival->stepwise.max.numerator = 1;
+ fival->stepwise.step = fival->stepwise.max;
+
+ return 0;
+}
+
+static int aspeed_video_set_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ if (timings->bt.width == video->active_timings.width &&
+ timings->bt.height == video->active_timings.height)
+ return 0;
+
+ if (vb2_is_busy(&video->queue))
+ return -EBUSY;
+
+ video->active_timings = timings->bt;
+
+ aspeed_video_set_resolution(video);
+
+ video->pix_fmt.width = timings->bt.width;
+ video->pix_fmt.height = timings->bt.height;
+ video->pix_fmt.sizeimage = video->max_compressed_size;
+
+ timings->type = V4L2_DV_BT_656_1120;
+
+ return 0;
+}
+
+static int aspeed_video_get_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ struct aspeed_video *video = video_drvdata(file);
+
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt = video->active_timings;
+
+ return 0;
+}
+
+static int aspeed_video_query_dv_timings(struct file *file, void *fh,
+ struct v4l2_dv_timings *timings)
+{
+ int rc;
+ struct aspeed_video *video = video_drvdata(file);
+
+ /*
+ * This blocks only if the driver is currently in the process of
+ * detecting a new resolution; in the event of no signal or timeout
+ * this function is woken up.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ if (test_bit(VIDEO_RES_CHANGE, &video->flags))
+ return -EAGAIN;
+ } else {
+ rc = wait_event_interruptible(video->wait,
+ !test_bit(VIDEO_RES_CHANGE,
+ &video->flags));
+ if (rc)
+ return -EINTR;
+ }
+
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt = video->detected_timings;
+
+ return video->v4l2_input_status ? -ENOLINK : 0;
+}
+
+static int aspeed_video_enum_dv_timings(struct file *file, void *fh,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &aspeed_video_timings_cap,
+ NULL, NULL);
+}
+
+static int aspeed_video_dv_timings_cap(struct file *file, void *fh,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = aspeed_video_timings_cap;
+
+ return 0;
+}
+
+static int aspeed_video_sub_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_SOURCE_CHANGE:
+ return v4l2_src_change_event_subscribe(fh, sub);
+ }
+
+ return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops aspeed_video_ioctl_ops = {
+ .vidioc_querycap = aspeed_video_querycap,
+
+ .vidioc_enum_fmt_vid_cap = aspeed_video_enum_format,
+ .vidioc_g_fmt_vid_cap = aspeed_video_get_format,
+ .vidioc_s_fmt_vid_cap = aspeed_video_get_format,
+ .vidioc_try_fmt_vid_cap = aspeed_video_get_format,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_enum_input = aspeed_video_enum_input,
+ .vidioc_g_input = aspeed_video_get_input,
+ .vidioc_s_input = aspeed_video_set_input,
+
+ .vidioc_g_parm = aspeed_video_get_parm,
+ .vidioc_s_parm = aspeed_video_set_parm,
+ .vidioc_enum_framesizes = aspeed_video_enum_framesizes,
+ .vidioc_enum_frameintervals = aspeed_video_enum_frameintervals,
+
+ .vidioc_s_dv_timings = aspeed_video_set_dv_timings,
+ .vidioc_g_dv_timings = aspeed_video_get_dv_timings,
+ .vidioc_query_dv_timings = aspeed_video_query_dv_timings,
+ .vidioc_enum_dv_timings = aspeed_video_enum_dv_timings,
+ .vidioc_dv_timings_cap = aspeed_video_dv_timings_cap,
+
+ .vidioc_subscribe_event = aspeed_video_sub_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void aspeed_video_update_jpeg_quality(struct aspeed_video *video)
+{
+ u32 comp_ctrl = FIELD_PREP(VE_COMP_CTRL_DCT_LUM, video->jpeg_quality) |
+ FIELD_PREP(VE_COMP_CTRL_DCT_CHR, video->jpeg_quality | 0x10);
+
+ aspeed_video_update(video, VE_COMP_CTRL,
+ VE_COMP_CTRL_DCT_LUM | VE_COMP_CTRL_DCT_CHR,
+ comp_ctrl);
+}
+
+static void aspeed_video_update_subsampling(struct aspeed_video *video)
+{
+ if (video->jpeg.virt)
+ aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
+
+ if (video->yuv420)
+ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_YUV420);
+ else
+ aspeed_video_update(video, VE_SEQ_CTRL, VE_SEQ_CTRL_YUV420, 0);
+}
+
+static int aspeed_video_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct aspeed_video *video = container_of(ctrl->handler,
+ struct aspeed_video,
+ ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ video->jpeg_quality = ctrl->val;
+ aspeed_video_update_jpeg_quality(video);
+ break;
+ case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
+ if (ctrl->val == V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
+ video->yuv420 = true;
+ aspeed_video_update_subsampling(video);
+ } else {
+ video->yuv420 = false;
+ aspeed_video_update_subsampling(video);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops aspeed_video_ctrl_ops = {
+ .s_ctrl = aspeed_video_set_ctrl,
+};
+
+static void aspeed_video_resolution_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct aspeed_video *video = container_of(dwork, struct aspeed_video,
+ res_work);
+ u32 input_status = video->v4l2_input_status;
+
+ aspeed_video_on(video);
+
+ /* Exit early in case no clients remain */
+ if (test_bit(VIDEO_STOPPED, &video->flags))
+ goto done;
+
+ aspeed_video_init_regs(video);
+
+ aspeed_video_get_resolution(video);
+
+ if (video->detected_timings.width != video->active_timings.width ||
+ video->detected_timings.height != video->active_timings.height ||
+ input_status != video->v4l2_input_status) {
+ static const struct v4l2_event ev = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ v4l2_event_queue(&video->vdev, &ev);
+ } else if (test_bit(VIDEO_STREAMING, &video->flags)) {
+ /* No resolution change so just restart streaming */
+ aspeed_video_start_frame(video);
+ }
+
+done:
+ clear_bit(VIDEO_RES_CHANGE, &video->flags);
+ wake_up_interruptible_all(&video->wait);
+}
+
+static int aspeed_video_open(struct file *file)
+{
+ int rc;
+ struct aspeed_video *video = video_drvdata(file);
+
+ mutex_lock(&video->video_lock);
+
+ rc = v4l2_fh_open(file);
+ if (rc) {
+ mutex_unlock(&video->video_lock);
+ return rc;
+ }
+
+ if (v4l2_fh_is_singular_file(file))
+ aspeed_video_start(video);
+
+ mutex_unlock(&video->video_lock);
+
+ return 0;
+}
+
+static int aspeed_video_release(struct file *file)
+{
+ int rc;
+ struct aspeed_video *video = video_drvdata(file);
+
+ mutex_lock(&video->video_lock);
+
+ if (v4l2_fh_is_singular_file(file))
+ aspeed_video_stop(video);
+
+ rc = _vb2_fop_release(file, NULL);
+
+ mutex_unlock(&video->video_lock);
+
+ return rc;
+}
+
+static const struct v4l2_file_operations aspeed_video_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .open = aspeed_video_open,
+ .release = aspeed_video_release,
+};
+
+static int aspeed_video_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct aspeed_video *video = vb2_get_drv_priv(q);
+
+ if (*num_planes) {
+ if (sizes[0] < video->max_compressed_size)
+ return -EINVAL;
+
+ return 0;
+ }
+
+ *num_planes = 1;
+ sizes[0] = video->max_compressed_size;
+
+ return 0;
+}
+
+static int aspeed_video_buf_prepare(struct vb2_buffer *vb)
+{
+ struct aspeed_video *video = vb2_get_drv_priv(vb->vb2_queue);
+
+ if (vb2_plane_size(vb, 0) < video->max_compressed_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int aspeed_video_start_streaming(struct vb2_queue *q,
+ unsigned int count)
+{
+ int rc;
+ struct aspeed_video *video = vb2_get_drv_priv(q);
+
+ video->sequence = 0;
+
+ rc = aspeed_video_start_frame(video);
+ if (rc) {
+ aspeed_video_bufs_done(video, VB2_BUF_STATE_QUEUED);
+ return rc;
+ }
+
+ set_bit(VIDEO_STREAMING, &video->flags);
+ return 0;
+}
+
+static void aspeed_video_stop_streaming(struct vb2_queue *q)
+{
+ int rc;
+ struct aspeed_video *video = vb2_get_drv_priv(q);
+
+ clear_bit(VIDEO_STREAMING, &video->flags);
+
+ rc = wait_event_timeout(video->wait,
+ !test_bit(VIDEO_FRAME_INPRG, &video->flags),
+ STOP_TIMEOUT);
+ if (!rc) {
+ dev_err(video->dev, "Timed out when stopping streaming\n");
+
+ /*
+ * Need to force stop any DMA and try and get HW into a good
+ * state for future calls to start streaming again.
+ */
+ aspeed_video_reset(video);
+ aspeed_video_init_regs(video);
+
+ aspeed_video_get_resolution(video);
+ }
+
+ aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR);
+}
+
+static void aspeed_video_buf_queue(struct vb2_buffer *vb)
+{
+ bool empty;
+ struct aspeed_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct aspeed_video_buffer *avb = to_aspeed_video_buffer(vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->lock, flags);
+ empty = list_empty(&video->buffers);
+ list_add_tail(&avb->link, &video->buffers);
+ spin_unlock_irqrestore(&video->lock, flags);
+
+ if (test_bit(VIDEO_STREAMING, &video->flags) &&
+ !test_bit(VIDEO_FRAME_INPRG, &video->flags) && empty)
+ aspeed_video_start_frame(video);
+}
+
+static const struct vb2_ops aspeed_video_vb2_ops = {
+ .queue_setup = aspeed_video_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = aspeed_video_buf_prepare,
+ .start_streaming = aspeed_video_start_streaming,
+ .stop_streaming = aspeed_video_stop_streaming,
+ .buf_queue = aspeed_video_buf_queue,
+};
+
+static int aspeed_video_setup_video(struct aspeed_video *video)
+{
+ const u64 mask = ~(BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_444) |
+ BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_420));
+ struct v4l2_device *v4l2_dev = &video->v4l2_dev;
+ struct vb2_queue *vbq = &video->queue;
+ struct video_device *vdev = &video->vdev;
+ int rc;
+
+ video->pix_fmt.pixelformat = V4L2_PIX_FMT_JPEG;
+ video->pix_fmt.field = V4L2_FIELD_NONE;
+ video->pix_fmt.colorspace = V4L2_COLORSPACE_SRGB;
+ video->pix_fmt.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+
+ rc = v4l2_device_register(video->dev, v4l2_dev);
+ if (rc) {
+ dev_err(video->dev, "Failed to register v4l2 device\n");
+ return rc;
+ }
+
+ v4l2_ctrl_handler_init(&video->ctrl_handler, 2);
+ v4l2_ctrl_new_std(&video->ctrl_handler, &aspeed_video_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 0,
+ ASPEED_VIDEO_JPEG_NUM_QUALITIES - 1, 1, 0);
+ v4l2_ctrl_new_std_menu(&video->ctrl_handler, &aspeed_video_ctrl_ops,
+ V4L2_CID_JPEG_CHROMA_SUBSAMPLING,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444);
+
+ if (video->ctrl_handler.error) {
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+
+ dev_err(video->dev, "Failed to init controls: %d\n",
+ video->ctrl_handler.error);
+ return rc;
+ }
+
+ v4l2_dev->ctrl_handler = &video->ctrl_handler;
+
+ vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vbq->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF;
+ vbq->dev = v4l2_dev->dev;
+ vbq->lock = &video->video_lock;
+ vbq->ops = &aspeed_video_vb2_ops;
+ vbq->mem_ops = &vb2_dma_contig_memops;
+ vbq->drv_priv = video;
+ vbq->buf_struct_size = sizeof(struct aspeed_video_buffer);
+ vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vbq->min_buffers_needed = 3;
+
+ rc = vb2_queue_init(vbq);
+ if (rc) {
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+
+ dev_err(video->dev, "Failed to init vb2 queue\n");
+ return rc;
+ }
+
+ vdev->queue = vbq;
+ vdev->fops = &aspeed_video_v4l2_fops;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
+ V4L2_CAP_STREAMING;
+ vdev->v4l2_dev = v4l2_dev;
+ strscpy(vdev->name, DEVICE_NAME, sizeof(vdev->name));
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->release = video_device_release_empty;
+ vdev->ioctl_ops = &aspeed_video_ioctl_ops;
+ vdev->lock = &video->video_lock;
+
+ video_set_drvdata(vdev, video);
+ rc = video_register_device(vdev, VFL_TYPE_GRABBER, 0);
+ if (rc) {
+ vb2_queue_release(vbq);
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+
+ dev_err(video->dev, "Failed to register video device\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int aspeed_video_init(struct aspeed_video *video)
+{
+ int irq;
+ int rc;
+ struct device *dev = video->dev;
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+ if (!irq) {
+ dev_err(dev, "Unable to find IRQ\n");
+ return -ENODEV;
+ }
+
+ rc = devm_request_irq(dev, irq, aspeed_video_irq, IRQF_SHARED,
+ DEVICE_NAME, video);
+ if (rc < 0) {
+ dev_err(dev, "Unable to request IRQ %d\n", irq);
+ return rc;
+ }
+
+ video->eclk = devm_clk_get(dev, "eclk");
+ if (IS_ERR(video->eclk)) {
+ dev_err(dev, "Unable to get ECLK\n");
+ return PTR_ERR(video->eclk);
+ }
+
+ video->vclk = devm_clk_get(dev, "vclk");
+ if (IS_ERR(video->vclk)) {
+ dev_err(dev, "Unable to get VCLK\n");
+ return PTR_ERR(video->vclk);
+ }
+
+ video->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(video->rst)) {
+ dev_err(dev, "Unable to get VE reset\n");
+ return PTR_ERR(video->rst);
+ }
+
+ rc = of_reserved_mem_device_init(dev);
+ if (rc) {
+ dev_err(dev, "Unable to reserve memory\n");
+ return rc;
+ }
+
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev, "Failed to set DMA mask\n");
+ of_reserved_mem_device_release(dev);
+ return rc;
+ }
+
+ if (!aspeed_video_alloc_buf(video, &video->jpeg,
+ VE_JPEG_HEADER_SIZE)) {
+ dev_err(dev, "Failed to allocate DMA for JPEG header\n");
+ of_reserved_mem_device_release(dev);
+ return rc;
+ }
+
+ aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
+
+ return 0;
+}
+
+static int aspeed_video_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct resource *res;
+ struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+
+ if (!video)
+ return -ENOMEM;
+
+ video->frame_rate = 30;
+ video->dev = &pdev->dev;
+ mutex_init(&video->video_lock);
+ init_waitqueue_head(&video->wait);
+ INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work);
+ INIT_LIST_HEAD(&video->buffers);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ video->base = devm_ioremap_resource(video->dev, res);
+
+ if (IS_ERR(video->base))
+ return PTR_ERR(video->base);
+
+ rc = aspeed_video_init(video);
+ if (rc)
+ return rc;
+
+ rc = aspeed_video_setup_video(video);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int aspeed_video_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
+ struct aspeed_video *video = to_aspeed_video(v4l2_dev);
+
+ video_unregister_device(&video->vdev);
+
+ vb2_queue_release(&video->queue);
+
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+
+ v4l2_device_unregister(v4l2_dev);
+
+ dma_free_coherent(video->dev, VE_JPEG_HEADER_SIZE, video->jpeg.virt,
+ video->jpeg.dma);
+
+ of_reserved_mem_device_release(dev);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_video_of_match[] = {
+ { .compatible = "aspeed,ast2400-video-engine" },
+ { .compatible = "aspeed,ast2500-video-engine" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, aspeed_video_of_match);
+
+static struct platform_driver aspeed_video_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = aspeed_video_of_match,
+ },
+ .probe = aspeed_video_probe,
+ .remove = aspeed_video_remove,
+};
+
+module_platform_driver(aspeed_video_driver);
+
+MODULE_DESCRIPTION("ASPEED Video Engine Driver");
+MODULE_AUTHOR("Eddie James");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index d26c2d85a009..8e0194993a52 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -253,7 +253,6 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
{
struct vb2_v4l2_buffer *src_buf;
struct coda_buffer_meta *meta;
- unsigned long flags;
u32 start;
if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG)
@@ -269,6 +268,23 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
ctx->num_metas > 1)
break;
+ if (ctx->num_internal_frames &&
+ ctx->num_metas >= ctx->num_internal_frames) {
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+
+ /*
+ * If we managed to fill in at least a full reorder
+ * window of buffers (num_internal_frames is a
+ * conservative estimate for this) and the bitstream
+ * prefetcher has at least 2 256 bytes periods beyond
+ * the first buffer to fetch, we can safely stop queuing
+ * in order to limit the decoder drain latency.
+ */
+ if (coda_bitstream_can_fetch_past(ctx, meta->end))
+ break;
+ }
+
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
/* Drop frames that do not start/end with a SOI/EOI markers */
@@ -299,8 +315,7 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
}
/* Buffer start position */
- start = ctx->bitstream_fifo.kfifo.in &
- ctx->bitstream_fifo.kfifo.mask;
+ start = ctx->bitstream_fifo.kfifo.in;
if (coda_bitstream_try_queue(ctx, src_buf)) {
/*
@@ -315,15 +330,12 @@ void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
meta->timecode = src_buf->timecode;
meta->timestamp = src_buf->vb2_buf.timestamp;
meta->start = start;
- meta->end = ctx->bitstream_fifo.kfifo.in &
- ctx->bitstream_fifo.kfifo.mask;
- spin_lock_irqsave(&ctx->buffer_meta_lock,
- flags);
+ meta->end = ctx->bitstream_fifo.kfifo.in;
+ spin_lock(&ctx->buffer_meta_lock);
list_add_tail(&meta->list,
&ctx->buffer_meta_list);
ctx->num_metas++;
- spin_unlock_irqrestore(&ctx->buffer_meta_lock,
- flags);
+ spin_unlock(&ctx->buffer_meta_lock);
trace_coda_bit_queue(ctx, src_buf, meta);
}
@@ -713,8 +725,7 @@ static void coda_setup_iram(struct coda_ctx *ctx)
out:
if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "IRAM smaller than needed\n");
+ coda_dbg(1, ctx, "IRAM smaller than needed\n");
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
@@ -991,16 +1002,15 @@ static int coda_start_encoding(struct coda_ctx *ctx)
else
coda_write(dev, CODA_STD_H264,
CODA_CMD_ENC_SEQ_COD_STD);
- if (ctx->params.h264_deblk_enabled) {
- value = ((ctx->params.h264_deblk_alpha &
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
- ((ctx->params.h264_deblk_beta &
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
- } else {
- value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
- }
+ value = ((ctx->params.h264_disable_deblocking_filter_idc &
+ CODA_264PARAM_DISABLEDEBLK_MASK) <<
+ CODA_264PARAM_DISABLEDEBLK_OFFSET) |
+ ((ctx->params.h264_slice_alpha_c0_offset_div2 &
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
+ ((ctx->params.h264_slice_beta_offset_div2 &
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
break;
case V4L2_PIX_FMT_JPEG:
@@ -1201,6 +1211,12 @@ static int coda_start_encoding(struct coda_ctx *ctx)
goto out;
}
+ coda_dbg(1, ctx, "start encoding %dx%d %4.4s->%4.4s @ %d/%d Hz\n",
+ q_data_src->rect.width, q_data_src->rect.height,
+ (char *)&ctx->codec->src_fourcc, (char *)&dst_fourcc,
+ ctx->params.framerate & 0xffff,
+ (ctx->params.framerate >> 16) + 1);
+
/* Save stream headers */
buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
switch (dst_fourcc) {
@@ -1462,8 +1478,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
}
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
- wr_ptr - start_ptr);
+ coda_dbg(1, ctx, "frame size = %u\n", wr_ptr - start_ptr);
coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
coda_read(dev, CODA_RET_ENC_PIC_FLAG);
@@ -1492,11 +1507,9 @@ static void coda_finish_encode(struct coda_ctx *ctx)
if (ctx->gopcounter < 0)
ctx->gopcounter = ctx->params.gop_size - 1;
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "job finished: encoding frame (%d) (%s)\n",
- dst_buf->sequence,
- (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
- "KEYFRAME" : "PFRAME");
+ coda_dbg(1, ctx, "job finished: encoded %c frame (%d)\n",
+ (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' : 'P',
+ dst_buf->sequence);
}
static void coda_seq_end_work(struct work_struct *work)
@@ -1510,9 +1523,7 @@ static void coda_seq_end_work(struct work_struct *work)
if (ctx->initialized == 0)
goto out;
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx,
- __func__);
+ coda_dbg(1, ctx, "%s: sent command 'SEQ_END' to coda\n", __func__);
if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
v4l2_err(&dev->v4l2_dev,
"CODA_COMMAND_SEQ_END failed\n");
@@ -1655,8 +1666,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
u32 val;
int ret;
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "Video Data Order Adapter: %s\n",
+ coda_dbg(1, ctx, "Video Data Order Adapter: %s\n",
ctx->use_vdoa ? "Enabled" : "Disabled");
/* Start decoding */
@@ -1736,7 +1746,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
v4l2_err(&dev->v4l2_dev,
- "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
+ "CODA_COMMAND_SEQ_INIT failed, error code = 0x%x\n",
coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
return -EAGAIN;
}
@@ -1760,8 +1770,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
width = round_up(width, 16);
height = round_up(height, 16);
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
- __func__, ctx->idx, width, height);
+ coda_dbg(1, ctx, "start decoding: %dx%d\n", width, height);
ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
/*
@@ -1879,7 +1888,6 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
struct coda_buffer_meta *meta;
- unsigned long flags;
u32 rot_mode = 0;
u32 reg_addr, reg_stride;
@@ -1893,8 +1901,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
if (coda_get_bitstream_payload(ctx) < 512 &&
(!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "bitstream payload: %d, skipping\n",
+ coda_dbg(1, ctx, "bitstream payload: %d, skipping\n",
coda_get_bitstream_payload(ctx));
v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
return -EAGAIN;
@@ -1973,15 +1980,14 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
- spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
+ spin_lock(&ctx->buffer_meta_lock);
meta = list_first_entry_or_null(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
/* If this is the last buffer in the bitstream, add padding */
- if (meta->end == (ctx->bitstream_fifo.kfifo.in &
- ctx->bitstream_fifo.kfifo.mask)) {
+ if (meta->end == ctx->bitstream_fifo.kfifo.in) {
static unsigned char buf[512];
unsigned int pad;
@@ -1993,7 +1999,7 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
kfifo_in(&ctx->bitstream_fifo, buf, pad);
}
}
- spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ spin_unlock(&ctx->buffer_meta_lock);
coda_kfifo_sync_to_device_full(ctx);
@@ -2015,7 +2021,6 @@ static void coda_finish_decode(struct coda_ctx *ctx)
struct vb2_v4l2_buffer *dst_buf;
struct coda_buffer_meta *meta;
unsigned long payload;
- unsigned long flags;
int width, height;
int decoded_idx;
int display_idx;
@@ -2100,8 +2105,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
if (val == 0) {
/* not enough bitstream data */
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "prescan failed: %d\n", val);
+ coda_dbg(1, ctx, "prescan failed: %d\n", val);
ctx->hold = true;
return;
}
@@ -2147,13 +2151,13 @@ static void coda_finish_decode(struct coda_ctx *ctx)
} else {
val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
val -= ctx->sequence_offset;
- spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
+ spin_lock(&ctx->buffer_meta_lock);
if (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
list_del(&meta->list);
ctx->num_metas--;
- spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ spin_unlock(&ctx->buffer_meta_lock);
/*
* Clamp counters to 16 bits for comparison, as the HW
* counter rolls over at this point for h.264. This
@@ -2170,7 +2174,7 @@ static void coda_finish_decode(struct coda_ctx *ctx)
ctx->frame_metas[decoded_idx] = *meta;
kfree(meta);
} else {
- spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ spin_unlock(&ctx->buffer_meta_lock);
v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
memset(&ctx->frame_metas[decoded_idx], 0,
sizeof(struct coda_buffer_meta));
@@ -2243,18 +2247,28 @@ static void coda_finish_decode(struct coda_ctx *ctx)
else
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "job finished: decoding frame (%d) (%s)\n",
- dst_buf->sequence,
- (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ?
- "KEYFRAME" : "PFRAME");
+ coda_dbg(1, ctx, "job finished: decoded %c frame (%u/%u)\n",
+ (dst_buf->flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' :
+ ((dst_buf->flags & V4L2_BUF_FLAG_PFRAME) ? 'P' : 'B'),
+ dst_buf->sequence, ctx->qsequence);
} else {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "job finished: no frame decoded\n");
+ coda_dbg(1, ctx, "job finished: no frame decoded (%u/%u)\n",
+ ctx->osequence, ctx->qsequence);
}
/* The rotator will copy the current display frame next time */
ctx->display_idx = display_idx;
+
+ /*
+ * The current decode run might have brought the bitstream fill level
+ * below the size where we can start the next decode run. As userspace
+ * might have filled the output queue completely and might thus be
+ * blocked, we can't rely on the next qbuf to trigger the bitstream
+ * refill. Check if we have data to refill the bitstream now.
+ */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx, NULL);
+ mutex_unlock(&ctx->bitstream_mutex);
}
static void coda_decode_timeout(struct coda_ctx *ctx)
@@ -2308,13 +2322,11 @@ irqreturn_t coda_irq_handler(int irq, void *data)
trace_coda_bit_done(ctx);
if (ctx->aborting) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "task has been aborted\n");
+ coda_dbg(1, ctx, "task has been aborted\n");
}
if (coda_isbusy(ctx->dev)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "coda is still busy!!!!\n");
+ coda_dbg(1, ctx, "coda is still busy!!!!\n");
return IRQ_NONE;
}
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index 2848ea5f464d..7518f01c48f7 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -17,6 +17,7 @@
#include <linux/firmware.h>
#include <linux/gcd.h>
#include <linux/genalloc.h>
+#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -50,8 +51,8 @@
#define CODA_ISRAM_SIZE (2048 * 2)
-#define MIN_W 176
-#define MIN_H 144
+#define MIN_W 48
+#define MIN_H 16
#define S_ALIGN 1 /* multiple of 2 */
#define W_ALIGN 1 /* multiple of 2 */
@@ -703,7 +704,8 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
return -EINVAL;
if (vb2_is_busy(vq)) {
- v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
+ v4l2_err(&ctx->dev->v4l2_dev, "%s: %s queue busy: %d\n",
+ __func__, v4l2_type_names[f->type], vq->num_buffers);
return -EBUSY;
}
@@ -749,11 +751,10 @@ static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
else
ctx->use_vdoa = false;
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Setting format for type %d, wxh: %dx%d, fmt: %4.4s %c\n",
- f->type, q_data->width, q_data->height,
- (char *)&q_data->fourcc,
- (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) ? 'L' : 'T');
+ coda_dbg(1, ctx, "Setting %s format, wxh: %dx%d, fmt: %4.4s %c\n",
+ v4l2_type_names[f->type], q_data->width, q_data->height,
+ (char *)&q_data->fourcc,
+ (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) ? 'L' : 'T');
return 0;
}
@@ -938,32 +939,42 @@ static int coda_s_selection(struct file *file, void *fh,
struct coda_ctx *ctx = fh_to_ctx(fh);
struct coda_q_data *q_data;
- if (ctx->inst_type == CODA_INST_ENCODER &&
- s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
- s->target == V4L2_SEL_TGT_CROP) {
- q_data = get_q_data(ctx, s->type);
- if (!q_data)
- return -EINVAL;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (ctx->inst_type == CODA_INST_ENCODER &&
+ s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ q_data = get_q_data(ctx, s->type);
+ if (!q_data)
+ return -EINVAL;
- s->r.left = 0;
- s->r.top = 0;
- s->r.width = clamp(s->r.width, 2U, q_data->width);
- s->r.height = clamp(s->r.height, 2U, q_data->height);
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = clamp(s->r.width, 2U, q_data->width);
+ s->r.height = clamp(s->r.height, 2U, q_data->height);
+
+ if (s->flags & V4L2_SEL_FLAG_LE) {
+ s->r.width = round_up(s->r.width, 2);
+ s->r.height = round_up(s->r.height, 2);
+ } else {
+ s->r.width = round_down(s->r.width, 2);
+ s->r.height = round_down(s->r.height, 2);
+ }
- if (s->flags & V4L2_SEL_FLAG_LE) {
- s->r.width = round_up(s->r.width, 2);
- s->r.height = round_up(s->r.height, 2);
- } else {
- s->r.width = round_down(s->r.width, 2);
- s->r.height = round_down(s->r.height, 2);
- }
+ q_data->rect = s->r;
- q_data->rect = s->r;
+ coda_dbg(1, ctx, "Setting crop rectangle: %dx%d\n",
+ s->r.width, s->r.height);
- return 0;
+ return 0;
+ }
+ /* else fall through */
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_COMPOSE:
+ return coda_g_selection(file, fh, s);
+ default:
+ /* v4l2-compliance expects this to fail for read-only targets */
+ return -EINVAL;
}
-
- return coda_g_selection(file, fh, s);
}
static int coda_try_encoder_cmd(struct file *file, void *fh,
@@ -1044,6 +1055,38 @@ static int coda_decoder_cmd(struct file *file, void *fh,
return 0;
}
+static int coda_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *f)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+ int i;
+
+ if (f->index)
+ return -EINVAL;
+
+ /* Disallow YUYV if the vdoa is not available */
+ if (!ctx->vdoa && f->pixel_format == V4L2_PIX_FMT_YUYV)
+ return -EINVAL;
+
+ for (i = 0; i < CODA_MAX_FORMATS; i++) {
+ if (f->pixel_format == ctx->cvd->src_formats[i] ||
+ f->pixel_format == ctx->cvd->dst_formats[i])
+ break;
+ }
+ if (i == CODA_MAX_FORMATS)
+ return -EINVAL;
+
+ f->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+ f->stepwise.min.numerator = 1;
+ f->stepwise.min.denominator = 65535;
+ f->stepwise.max.numerator = 65536;
+ f->stepwise.max.denominator = 1;
+ f->stepwise.step.numerator = 1;
+ f->stepwise.step.denominator = 1;
+
+ return 0;
+}
+
static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
@@ -1080,10 +1123,10 @@ static void coda_approximate_timeperframe(struct v4l2_fract *timeperframe)
return;
}
- /* Upper bound is 65536/1, map everything above to infinity */
+ /* Upper bound is 65536/1 */
if (s.denominator == 0 || s.numerator / s.denominator > 65536) {
- timeperframe->numerator = 1;
- timeperframe->denominator = 0;
+ timeperframe->numerator = 65536;
+ timeperframe->denominator = 1;
return;
}
@@ -1135,6 +1178,7 @@ static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
+ a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
tpf = &a->parm.output.timeperframe;
coda_approximate_timeperframe(tpf);
ctx->params.framerate = coda_timeperframe_to_frate(tpf);
@@ -1189,6 +1233,8 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_g_parm = coda_g_parm,
.vidioc_s_parm = coda_s_parm,
+ .vidioc_enum_frameintervals = coda_enum_frameintervals,
+
.vidioc_subscribe_event = coda_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
@@ -1257,14 +1303,12 @@ static int coda_job_ready(void *m2m_priv)
* the compressed frame can be in the bitstream.
*/
if (!src_bufs && ctx->inst_type != CODA_INST_DECODER) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "not ready: not enough video buffers.\n");
+ coda_dbg(1, ctx, "not ready: not enough vid-out buffers.\n");
return 0;
}
if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "not ready: not enough video capture buffers.\n");
+ coda_dbg(1, ctx, "not ready: not enough vid-cap buffers.\n");
return 0;
}
@@ -1272,49 +1316,48 @@ static int coda_job_ready(void *m2m_priv)
bool stream_end = ctx->bit_stream_param &
CODA_BIT_STREAM_END_FLAG;
int num_metas = ctx->num_metas;
+ struct coda_buffer_meta *meta;
unsigned int count;
count = hweight32(ctx->frm_dis_flg);
if (ctx->use_vdoa && count >= (ctx->num_internal_frames - 1)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: all internal buffers in use: %d/%d (0x%x)",
- ctx->idx, count, ctx->num_internal_frames,
+ coda_dbg(1, ctx,
+ "not ready: all internal buffers in use: %d/%d (0x%x)",
+ count, ctx->num_internal_frames,
ctx->frm_dis_flg);
return 0;
}
if (ctx->hold && !src_bufs) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: on hold for more buffers.\n",
- ctx->idx);
+ coda_dbg(1, ctx,
+ "not ready: on hold for more buffers.\n");
return 0;
}
if (!stream_end && (num_metas + src_bufs) < 2) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: need 2 buffers available (%d, %d)\n",
- ctx->idx, num_metas, src_bufs);
+ coda_dbg(1, ctx,
+ "not ready: need 2 buffers available (queue:%d + bitstream:%d)\n",
+ num_metas, src_bufs);
return 0;
}
-
- if (!src_bufs && !stream_end &&
- (coda_get_bitstream_payload(ctx) < 512)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "%d: not ready: not enough bitstream data (%d).\n",
- ctx->idx, coda_get_bitstream_payload(ctx));
+ meta = list_first_entry(&ctx->buffer_meta_list,
+ struct coda_buffer_meta, list);
+ if (!coda_bitstream_can_fetch_past(ctx, meta->end) &&
+ !stream_end) {
+ coda_dbg(1, ctx,
+ "not ready: not enough bitstream data to read past %u (%u)\n",
+ meta->end, ctx->bitstream_fifo.kfifo.in);
return 0;
}
}
if (ctx->aborting) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "not ready: aborting\n");
+ coda_dbg(1, ctx, "not ready: aborting\n");
return 0;
}
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "job ready\n");
+ coda_dbg(1, ctx, "job ready\n");
return 1;
}
@@ -1325,8 +1368,7 @@ static void coda_job_abort(void *priv)
ctx->aborting = 1;
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Aborting task\n");
+ coda_dbg(1, ctx, "job abort\n");
}
static const struct v4l2_m2m_ops coda_m2m_ops = {
@@ -1403,8 +1445,8 @@ static int coda_queue_setup(struct vb2_queue *vq,
*nplanes = 1;
sizes[0] = size;
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "get %d buffer(s) of size %d each.\n", *nbuffers, size);
+ coda_dbg(1, ctx, "get %d buffer(s) of size %d each.\n", *nbuffers,
+ size);
return 0;
}
@@ -1469,8 +1511,7 @@ static void coda_update_h264_profile_ctrl(struct coda_ctx *ctx)
profile_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_PROFILE);
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Profile: %s\n",
- profile_names[profile]);
+ coda_dbg(1, ctx, "Parsed H264 Profile: %s\n", profile_names[profile]);
}
static void coda_update_h264_level_ctrl(struct coda_ctx *ctx)
@@ -1489,8 +1530,7 @@ static void coda_update_h264_level_ctrl(struct coda_ctx *ctx)
level_names = v4l2_ctrl_get_menu(V4L2_CID_MPEG_VIDEO_H264_LEVEL);
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "Parsed H264 Level: %s\n",
- level_names[level]);
+ coda_dbg(1, ctx, "Parsed H264 Level: %s\n", level_names[level]);
}
static void coda_buf_queue(struct vb2_buffer *vb)
@@ -1595,6 +1635,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (count < 1)
return -EINVAL;
+ coda_dbg(1, ctx, "start streaming %s\n", v4l2_type_names[q->type]);
+
INIT_LIST_HEAD(&list);
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
@@ -1687,14 +1729,13 @@ static void coda_stop_streaming(struct vb2_queue *q)
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct coda_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *buf;
- unsigned long flags;
bool stop;
stop = ctx->streamon_out && ctx->streamon_cap;
+ coda_dbg(1, ctx, "stop streaming %s\n", v4l2_type_names[q->type]);
+
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: output\n", __func__);
ctx->streamon_out = 0;
coda_bit_stream_end_flag(ctx);
@@ -1704,8 +1745,6 @@ static void coda_stop_streaming(struct vb2_queue *q)
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
} else {
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: capture\n", __func__);
ctx->streamon_cap = 0;
ctx->osequence = 0;
@@ -1722,7 +1761,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
- spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
+ spin_lock(&ctx->buffer_meta_lock);
while (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
@@ -1730,7 +1769,7 @@ static void coda_stop_streaming(struct vb2_queue *q)
kfree(meta);
}
ctx->num_metas = 0;
- spin_unlock_irqrestore(&ctx->buffer_meta_lock, flags);
+ spin_unlock(&ctx->buffer_meta_lock);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
ctx->runcounter = 0;
@@ -1757,8 +1796,8 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
struct coda_ctx *ctx =
container_of(ctrl->handler, struct coda_ctx, ctrls);
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+ coda_dbg(1, ctx, "s_ctrl: id = 0x%x, name = \"%s\", val = %d\n",
+ ctrl->id, ctrl->name, ctrl->val);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
@@ -1792,14 +1831,13 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->params.h264_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
- ctx->params.h264_deblk_alpha = ctrl->val;
+ ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
- ctx->params.h264_deblk_beta = ctrl->val;
+ ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- ctx->params.h264_deblk_enabled = (ctrl->val ==
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
/* TODO: switch between baseline and constrained baseline */
@@ -1849,9 +1887,8 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->params.vbv_size = min(ctrl->val * 8192, 0x7fffffff);
break;
default:
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "Invalid control, id=%d, val=%d\n",
- ctrl->id, ctrl->val);
+ coda_dbg(1, ctx, "Invalid control, id=%d, val=%d\n",
+ ctrl->id, ctrl->val);
return -EINVAL;
}
@@ -1881,13 +1918,13 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
@@ -2099,17 +2136,6 @@ int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
return coda_queue_init(priv, dst_vq);
}
-static int coda_next_free_instance(struct coda_dev *dev)
-{
- int idx = ffz(dev->instance_mask);
-
- if ((idx < 0) ||
- (dev->devtype->product == CODA_DX6 && idx > CODADX6_MAX_INSTANCES))
- return -EBUSY;
-
- return idx;
-}
-
/*
* File operations
*/
@@ -2118,7 +2144,8 @@ static int coda_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct coda_dev *dev = video_get_drvdata(vdev);
- struct coda_ctx *ctx = NULL;
+ struct coda_ctx *ctx;
+ unsigned int max = ~0;
char *name;
int ret;
int idx;
@@ -2127,12 +2154,13 @@ static int coda_open(struct file *file)
if (!ctx)
return -ENOMEM;
- idx = coda_next_free_instance(dev);
+ if (dev->devtype->product == CODA_DX6)
+ max = CODADX6_MAX_INSTANCES - 1;
+ idx = ida_alloc_max(&dev->ida, max, GFP_KERNEL);
if (idx < 0) {
ret = idx;
goto err_coda_max;
}
- set_bit(idx, &dev->instance_mask);
name = kasprintf(GFP_KERNEL, "context%d", idx);
if (!name) {
@@ -2156,6 +2184,9 @@ static int coda_open(struct file *file)
v4l2_fh_add(&ctx->fh);
ctx->dev = dev;
ctx->idx = idx;
+
+ coda_dbg(1, ctx, "open instance (%p)\n", ctx);
+
switch (dev->devtype->product) {
case CODA_960:
/*
@@ -2221,13 +2252,6 @@ static int coda_open(struct file *file)
INIT_LIST_HEAD(&ctx->buffer_meta_list);
spin_lock_init(&ctx->buffer_meta_lock);
- mutex_lock(&dev->dev_mutex);
- list_add(&ctx->list, &dev->instances);
- mutex_unlock(&dev->dev_mutex);
-
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
- ctx->idx, ctx);
-
return 0;
err_ctrls_setup:
@@ -2241,8 +2265,8 @@ err_clk_per:
err_pm_get:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
- clear_bit(ctx->idx, &dev->instance_mask);
err_coda_name_init:
+ ida_free(&dev->ida, ctx->idx);
err_coda_max:
kfree(ctx);
return ret;
@@ -2253,8 +2277,7 @@ static int coda_release(struct file *file)
struct coda_dev *dev = video_drvdata(file);
struct coda_ctx *ctx = fh_to_ctx(file->private_data);
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
- ctx);
+ coda_dbg(1, ctx, "release instance (%p)\n", ctx);
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
coda_bit_stream_end_flag(ctx);
@@ -2271,10 +2294,6 @@ static int coda_release(struct file *file)
flush_work(&ctx->seq_end_work);
}
- mutex_lock(&dev->dev_mutex);
- list_del(&ctx->list);
- mutex_unlock(&dev->dev_mutex);
-
if (ctx->dev->devtype->product == CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
@@ -2284,7 +2303,7 @@ static int coda_release(struct file *file)
pm_runtime_put_sync(&dev->plat_dev->dev);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
- clear_bit(ctx->idx, &dev->instance_mask);
+ ida_free(&dev->ida, ctx->idx);
if (ctx->ops->release)
ctx->ops->release(ctx);
debugfs_remove_recursive(ctx->debugfs_entry);
@@ -2679,7 +2698,6 @@ static int coda_probe(struct platform_device *pdev)
return -EINVAL;
spin_lock_init(&dev->irqlock);
- INIT_LIST_HEAD(&dev->instances);
dev->plat_dev = pdev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
@@ -2745,6 +2763,7 @@ static int coda_probe(struct platform_device *pdev)
mutex_init(&dev->dev_mutex);
mutex_init(&dev->coda_mutex);
+ ida_init(&dev->ida);
dev->debugfs_root = debugfs_create_dir("coda", NULL);
if (!dev->debugfs_root)
@@ -2832,6 +2851,7 @@ static int coda_remove(struct platform_device *pdev)
coda_free_aux_buf(dev, &dev->tempbuf);
coda_free_aux_buf(dev, &dev->workbuf);
debugfs_remove_recursive(dev->debugfs_root);
+ ida_destroy(&dev->ida);
return 0;
}
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 19ac0b9dc6eb..31cea72f5b2a 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -16,6 +16,7 @@
#define __CODA_H__
#include <linux/debugfs.h>
+#include <linux/idr.h>
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/kfifo.h>
@@ -94,8 +95,7 @@ struct coda_dev {
struct mutex coda_mutex;
struct workqueue_struct *workqueue;
struct v4l2_m2m_dev *m2m_dev;
- struct list_head instances;
- unsigned long instance_mask;
+ struct ida ida;
struct dentry *debugfs_root;
};
@@ -115,9 +115,9 @@ struct coda_params {
u8 h264_inter_qp;
u8 h264_min_qp;
u8 h264_max_qp;
- u8 h264_deblk_enabled;
- u8 h264_deblk_alpha;
- u8 h264_deblk_beta;
+ u8 h264_disable_deblocking_filter_idc;
+ s8 h264_slice_alpha_c0_offset_div2;
+ s8 h264_slice_beta_offset_div2;
u8 h264_profile_idc;
u8 h264_level_idc;
u8 mpeg4_intra_qp;
@@ -144,8 +144,8 @@ struct coda_buffer_meta {
u32 sequence;
struct v4l2_timecode timecode;
u64 timestamp;
- u32 start;
- u32 end;
+ unsigned int start;
+ unsigned int end;
};
/* Per-queue, driver-specific private data */
@@ -192,7 +192,6 @@ struct coda_context_ops {
struct coda_ctx {
struct coda_dev *dev;
struct mutex buffer_mutex;
- struct list_head list;
struct work_struct pic_run_work;
struct work_struct seq_end_work;
struct completion completion;
@@ -253,6 +252,13 @@ struct coda_ctx {
extern int coda_debug;
+#define coda_dbg(level, ctx, fmt, arg...) \
+ do { \
+ if (coda_debug >= (level)) \
+ v4l2_dbg((level), coda_debug, &(ctx)->dev->v4l2_dev, \
+ "%u: " fmt, (ctx)->idx, ##arg); \
+ } while (0)
+
void coda_write(struct coda_dev *dev, u32 data, u32 reg);
unsigned int coda_read(struct coda_dev *dev, u32 reg);
void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
@@ -295,6 +301,18 @@ static inline unsigned int coda_get_bitstream_payload(struct coda_ctx *ctx)
return kfifo_len(&ctx->bitstream_fifo);
}
+/*
+ * The bitstream prefetcher needs to read at least 2 256 byte periods past
+ * the desired bitstream position for all data to reach the decoder.
+ */
+static inline bool coda_bitstream_can_fetch_past(struct coda_ctx *ctx,
+ unsigned int pos)
+{
+ return (int)(ctx->bitstream_fifo.kfifo.in - ALIGN(pos, 256)) > 512;
+}
+
+bool coda_bitstream_can_fetch_past(struct coda_ctx *ctx, unsigned int pos);
+
void coda_bit_stream_end_flag(struct coda_ctx *ctx);
void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 5e7b00a97671..e675e38f3475 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -292,7 +292,7 @@
#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8
#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f
#define CODA_264PARAM_DISABLEDEBLK_OFFSET 6
-#define CODA_264PARAM_DISABLEDEBLK_MASK 0x01
+#define CODA_264PARAM_DISABLEDEBLK_MASK 0x03
#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5
#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01
#define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0
diff --git a/drivers/media/platform/coda/trace.h b/drivers/media/platform/coda/trace.h
index ca671e315ad0..a672bfc4c6ba 100644
--- a/drivers/media/platform/coda/trace.h
+++ b/drivers/media/platform/coda/trace.h
@@ -97,8 +97,8 @@ DECLARE_EVENT_CLASS(coda_buf_meta_class,
TP_fast_assign(
__entry->minor = ctx->fh.vdev->minor;
__entry->index = buf->vb2_buf.index;
- __entry->start = meta->start;
- __entry->end = meta->end;
+ __entry->start = meta->start & ctx->bitstream_fifo.kfifo.mask;
+ __entry->end = meta->end & ctx->bitstream_fifo.kfifo.mask;
__entry->ctx = ctx->idx;
),
@@ -127,8 +127,10 @@ DECLARE_EVENT_CLASS(coda_meta_class,
TP_fast_assign(
__entry->minor = ctx->fh.vdev->minor;
- __entry->start = meta ? meta->start : 0;
- __entry->end = meta ? meta->end : 0;
+ __entry->start = meta ? (meta->start &
+ ctx->bitstream_fifo.kfifo.mask) : 0;
+ __entry->end = meta ? (meta->end &
+ ctx->bitstream_fifo.kfifo.mask) : 0;
__entry->ctx = ctx->idx;
),
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 18c035ef84cf..4766a7a23d16 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -93,28 +93,6 @@ static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
}
/**
- * vpbe_g_cropcap - Get crop capabilities of the display
- * @vpbe_dev: vpbe device ptr
- * @cropcap: cropcap is a ptr to struct v4l2_cropcap
- *
- * Update the crop capabilities in crop cap for current
- * mode
- */
-static int vpbe_g_cropcap(struct vpbe_device *vpbe_dev,
- struct v4l2_cropcap *cropcap)
-{
- if (!cropcap)
- return -EINVAL;
- cropcap->bounds.left = 0;
- cropcap->bounds.top = 0;
- cropcap->bounds.width = vpbe_dev->current_timings.xres;
- cropcap->bounds.height = vpbe_dev->current_timings.yres;
- cropcap->defrect = cropcap->bounds;
-
- return 0;
-}
-
-/**
* vpbe_enum_outputs - enumerate outputs
* @vpbe_dev: vpbe device ptr
* @output: ptr to v4l2_output structure
@@ -740,7 +718,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
def_output);
- return ret;
+ goto fail_kfree_amp;
}
printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
@@ -748,12 +726,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
def_mode);
- return ret;
+ goto fail_kfree_amp;
}
vpbe_dev->initialized = 1;
/* TBD handling of bootargs for default output and mode */
return 0;
+fail_kfree_amp:
+ mutex_lock(&vpbe_dev->lock);
+ kfree(vpbe_dev->amp);
fail_kfree_encoders:
kfree(vpbe_dev->encoders);
fail_dev_unregister:
@@ -793,7 +774,6 @@ static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
}
static const struct vpbe_device_ops vpbe_dev_ops = {
- .g_cropcap = vpbe_g_cropcap,
.enum_outputs = vpbe_enum_outputs,
.set_output = vpbe_set_output,
.get_output = vpbe_get_output,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 5c235898af7b..9e86b0d36640 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -759,18 +759,18 @@ static int vpbe_display_g_selection(struct file *file, void *priv,
return 0;
}
-static int vpbe_display_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cropcap)
+static int vpbe_display_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct vpbe_layer *layer = video_drvdata(file);
struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
- if (cropcap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
- cropcap->pixelaspect = vpbe_dev->current_timings.aspect;
+ *f = vpbe_dev->current_timings.aspect;
return 0;
}
@@ -1263,7 +1263,7 @@ static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_expbuf = vb2_ioctl_expbuf,
- .vidioc_cropcap = vpbe_display_cropcap,
+ .vidioc_g_pixelaspect = vpbe_display_g_pixelaspect,
.vidioc_g_selection = vpbe_display_g_selection,
.vidioc_s_selection = vpbe_display_s_selection,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index ea3ddd5a42bd..9996bab98fe3 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1558,20 +1558,20 @@ static int vpfe_streamoff(struct file *file, void *priv,
return ret;
}
-static int vpfe_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *crop)
+static int vpfe_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct vpfe_device *vpfe_dev = video_drvdata(file);
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_cropcap\n");
+ v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_pixelaspect\n");
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
/* If std_index is invalid, then just return (== 1:1 aspect) */
if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
return 0;
- crop->pixelaspect = vpfe_standards[vpfe_dev->std_index].pixelaspect;
+ *f = vpfe_standards[vpfe_dev->std_index].pixelaspect;
return 0;
}
@@ -1677,7 +1677,7 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
.vidioc_dqbuf = vpfe_dqbuf,
.vidioc_streamon = vpfe_streamon,
.vidioc_streamoff = vpfe_streamoff,
- .vidioc_cropcap = vpfe_cropcap,
+ .vidioc_g_pixelaspect = vpfe_g_pixelaspect,
.vidioc_g_selection = vpfe_g_selection,
.vidioc_s_selection = vpfe_s_selection,
};
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 838c5c53de37..0fa3ec04ab7b 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -541,20 +541,7 @@ void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h)
}
}
-int gsc_g_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
-{
- struct gsc_frame *frame;
-
- frame = ctx_get_frame(ctx, cr->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- cr->c = frame->crop;
-
- return 0;
-}
-
-int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
+int gsc_try_selection(struct gsc_ctx *ctx, struct v4l2_selection *s)
{
struct gsc_frame *f;
struct gsc_dev *gsc = ctx->gsc_dev;
@@ -562,25 +549,25 @@ int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
u32 mod_x = 0, mod_y = 0, tmp_w, tmp_h;
u32 min_w, min_h, max_w, max_h;
- if (cr->c.top < 0 || cr->c.left < 0) {
+ if (s->r.top < 0 || s->r.left < 0) {
pr_err("doesn't support negative values for top & left\n");
return -EINVAL;
}
- pr_debug("user put w: %d, h: %d", cr->c.width, cr->c.height);
+ pr_debug("user put w: %d, h: %d", s->r.width, s->r.height);
- if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
f = &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
f = &ctx->s_frame;
else
return -EINVAL;
max_w = f->f_width;
max_h = f->f_height;
- tmp_w = cr->c.width;
- tmp_h = cr->c.height;
+ tmp_w = s->r.width;
+ tmp_h = s->r.height;
- if (V4L2_TYPE_IS_OUTPUT(cr->type)) {
+ if (V4L2_TYPE_IS_OUTPUT(s->type)) {
if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 1) ||
is_rgb(f->fmt->color))
min_w = 32;
@@ -602,8 +589,8 @@ int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
max_h = f->f_width;
min_w = variant->pix_min->target_rot_en_w;
min_h = variant->pix_min->target_rot_en_h;
- tmp_w = cr->c.height;
- tmp_h = cr->c.width;
+ tmp_w = s->r.height;
+ tmp_h = s->r.width;
} else {
min_w = variant->pix_min->target_rot_dis_w;
min_h = variant->pix_min->target_rot_dis_h;
@@ -616,29 +603,29 @@ int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr)
v4l_bound_align_image(&tmp_w, min_w, max_w, mod_x,
&tmp_h, min_h, max_h, mod_y, 0);
- if (!V4L2_TYPE_IS_OUTPUT(cr->type) &&
- (ctx->gsc_ctrls.rotate->val == 90 ||
- ctx->gsc_ctrls.rotate->val == 270))
+ if (!V4L2_TYPE_IS_OUTPUT(s->type) &&
+ (ctx->gsc_ctrls.rotate->val == 90 ||
+ ctx->gsc_ctrls.rotate->val == 270))
gsc_check_crop_change(tmp_h, tmp_w,
- &cr->c.width, &cr->c.height);
+ &s->r.width, &s->r.height);
else
gsc_check_crop_change(tmp_w, tmp_h,
- &cr->c.width, &cr->c.height);
+ &s->r.width, &s->r.height);
/* adjust left/top if cropping rectangle is out of bounds */
/* Need to add code to algin left value with 2's multiple */
- if (cr->c.left + tmp_w > max_w)
- cr->c.left = max_w - tmp_w;
- if (cr->c.top + tmp_h > max_h)
- cr->c.top = max_h - tmp_h;
+ if (s->r.left + tmp_w > max_w)
+ s->r.left = max_w - tmp_w;
+ if (s->r.top + tmp_h > max_h)
+ s->r.top = max_h - tmp_h;
if ((is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color)) &&
- cr->c.left & 1)
- cr->c.left -= 1;
+ s->r.left & 1)
+ s->r.left -= 1;
pr_debug("Aligned l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
- cr->c.left, cr->c.top, cr->c.width, cr->c.height, max_w, max_h);
+ s->r.left, s->r.top, s->r.width, s->r.height, max_w, max_h);
return 0;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index 715d9c9d8d30..c81f0a17d286 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -392,8 +392,7 @@ int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
void gsc_set_frame_size(struct gsc_frame *frame, int width, int height);
int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f);
void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h);
-int gsc_g_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr);
-int gsc_try_crop(struct gsc_ctx *ctx, struct v4l2_crop *cr);
+int gsc_try_selection(struct gsc_ctx *ctx, struct v4l2_selection *s);
int gsc_cal_prescaler_ratio(struct gsc_variant *var, u32 src, u32 dst,
u32 *ratio);
void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh);
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index cc5d690818e1..c757f5d98bcc 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -494,30 +494,27 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
{
struct gsc_frame *frame;
struct gsc_ctx *ctx = fh_to_ctx(fh);
- struct v4l2_crop cr;
struct gsc_variant *variant = ctx->gsc_dev->variant;
+ struct v4l2_selection sel = *s;
int ret;
- cr.type = s->type;
- cr.c = s->r;
-
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
return -EINVAL;
- ret = gsc_try_crop(ctx, &cr);
+ ret = gsc_try_selection(ctx, &sel);
if (ret)
return ret;
if (s->flags & V4L2_SEL_FLAG_LE &&
- !is_rectangle_enclosed(&cr.c, &s->r))
+ !is_rectangle_enclosed(&sel.r, &s->r))
return -ERANGE;
if (s->flags & V4L2_SEL_FLAG_GE &&
- !is_rectangle_enclosed(&s->r, &cr.c))
+ !is_rectangle_enclosed(&s->r, &sel.r))
return -ERANGE;
- s->r = cr.c;
+ s->r = sel.r;
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -539,15 +536,15 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
/* Check to see if scaling ratio is within supported range */
if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = gsc_check_scaler_ratio(variant, cr.c.width,
- cr.c.height, ctx->d_frame.crop.width,
+ ret = gsc_check_scaler_ratio(variant, sel.r.width,
+ sel.r.height, ctx->d_frame.crop.width,
ctx->d_frame.crop.height,
ctx->gsc_ctrls.rotate->val, ctx->out_path);
} else {
ret = gsc_check_scaler_ratio(variant,
ctx->s_frame.crop.width,
- ctx->s_frame.crop.height, cr.c.width,
- cr.c.height, ctx->gsc_ctrls.rotate->val,
+ ctx->s_frame.crop.height, sel.r.width,
+ sel.r.height, ctx->gsc_ctrls.rotate->val,
ctx->out_path);
}
@@ -557,7 +554,7 @@ static int gsc_m2m_s_selection(struct file *file, void *fh,
}
}
- frame->crop = cr.c;
+ frame->crop = sel.r;
gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-core.h b/drivers/media/platform/exynos4-is/fimc-core.h
index 82d514df97f0..9f751a5efd64 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/exynos4-is/fimc-core.h
@@ -596,12 +596,14 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
{
struct fimc_frame *frame;
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE == type) {
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+ type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (fimc_ctx_state_is_set(FIMC_CTX_M2M, ctx))
frame = &ctx->s_frame;
else
return ERR_PTR(-EINVAL);
- } else if (V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE == type) {
+ } else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
frame = &ctx->d_frame;
} else {
v4l2_err(ctx->fimc_dev->v4l2_dev,
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.c b/drivers/media/platform/exynos4-is/fimc-is-errno.c
index e050e63fe358..bbb08576492e 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-errno.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.c
@@ -90,8 +90,8 @@ const char *fimc_is_param_strerr(unsigned int error)
return "ERROR_SENSOR_INVALID_SIZE";
case ERROR_SENSOR_INVALID_SETTING:
return "ERROR_SENSOR_INVALID_SETTING";
- case ERROR_SENSOR_ACTURATOR_INIT_FAIL:
- return "ERROR_SENSOR_ACTURATOR_INIT_FAIL";
+ case ERROR_SENSOR_ACTUATOR_INIT_FAIL:
+ return "ERROR_SENSOR_ACTUATOR_INIT_FAIL";
case ERROR_SENSOR_INVALID_AF_POS:
return "ERROR_SENSOR_INVALID_AF_POS";
case ERROR_SENSOR_UNSUPPORT_FUNC:
diff --git a/drivers/media/platform/exynos4-is/fimc-is-errno.h b/drivers/media/platform/exynos4-is/fimc-is-errno.h
index ef981e74513a..77f4fc860be5 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-errno.h
+++ b/drivers/media/platform/exynos4-is/fimc-is-errno.h
@@ -189,7 +189,7 @@ enum fimc_is_error {
ERROR_SENSOR_INVALID_EXPOSURETIME,
ERROR_SENSOR_INVALID_SIZE,
ERROR_SENSOR_INVALID_SETTING,
- ERROR_SENSOR_ACTURATOR_INIT_FAIL,
+ ERROR_SENSOR_ACTUATOR_INIT_FAIL,
ERROR_SENSOR_INVALID_AF_POS,
ERROR_SENSOR_UNSUPPORT_FUNC,
ERROR_SENSOR_UNSUPPORT_PERI,
diff --git a/drivers/media/platform/exynos4-is/fimc-m2m.c b/drivers/media/platform/exynos4-is/fimc-m2m.c
index a19f8b164a47..61c8177409cf 100644
--- a/drivers/media/platform/exynos4-is/fimc-m2m.c
+++ b/drivers/media/platform/exynos4-is/fimc-m2m.c
@@ -383,60 +383,80 @@ static int fimc_m2m_s_fmt_mplane(struct file *file, void *fh,
return 0;
}
-static int fimc_m2m_cropcap(struct file *file, void *fh,
- struct v4l2_cropcap *cr)
+static int fimc_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_frame *frame;
- frame = ctx_get_frame(ctx, cr->type);
+ frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
- cr->bounds.left = 0;
- cr->bounds.top = 0;
- cr->bounds.width = frame->o_width;
- cr->bounds.height = frame->o_height;
- cr->defrect = cr->bounds;
-
- return 0;
-}
-
-static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
-{
- struct fimc_ctx *ctx = fh_to_ctx(fh);
- struct fimc_frame *frame;
-
- frame = ctx_get_frame(ctx, cr->type);
- if (IS_ERR(frame))
- return PTR_ERR(frame);
-
- cr->c.left = frame->offs_h;
- cr->c.top = frame->offs_v;
- cr->c.width = frame->width;
- cr->c.height = frame->height;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = frame->offs_h;
+ s->r.top = frame->offs_v;
+ s->r.width = frame->width;
+ s->r.height = frame->height;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->o_width;
+ s->r.height = frame->o_height;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
-static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
+static int fimc_m2m_try_selection(struct fimc_ctx *ctx,
+ struct v4l2_selection *s)
{
struct fimc_dev *fimc = ctx->fimc_dev;
struct fimc_frame *f;
u32 min_size, halign, depth = 0;
int i;
- if (cr->c.top < 0 || cr->c.left < 0) {
+ if (s->r.top < 0 || s->r.left < 0) {
v4l2_err(&fimc->m2m.vfd,
"doesn't support negative values for top & left\n");
return -EINVAL;
}
- if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
f = &ctx->d_frame;
- else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ if (s->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
f = &ctx->s_frame;
- else
+ if (s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+ } else {
return -EINVAL;
+ }
min_size = (f == &ctx->s_frame) ?
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
@@ -450,61 +470,61 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
for (i = 0; i < f->fmt->memplanes; i++)
depth += f->fmt->depth[i];
- v4l_bound_align_image(&cr->c.width, min_size, f->o_width,
+ v4l_bound_align_image(&s->r.width, min_size, f->o_width,
ffs(min_size) - 1,
- &cr->c.height, min_size, f->o_height,
+ &s->r.height, min_size, f->o_height,
halign, 64/(ALIGN(depth, 8)));
/* adjust left/top if cropping rectangle is out of bounds */
- if (cr->c.left + cr->c.width > f->o_width)
- cr->c.left = f->o_width - cr->c.width;
- if (cr->c.top + cr->c.height > f->o_height)
- cr->c.top = f->o_height - cr->c.height;
+ if (s->r.left + s->r.width > f->o_width)
+ s->r.left = f->o_width - s->r.width;
+ if (s->r.top + s->r.height > f->o_height)
+ s->r.top = f->o_height - s->r.height;
- cr->c.left = round_down(cr->c.left, min_size);
- cr->c.top = round_down(cr->c.top, fimc->variant->hor_offs_align);
+ s->r.left = round_down(s->r.left, min_size);
+ s->r.top = round_down(s->r.top, fimc->variant->hor_offs_align);
dbg("l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
- cr->c.left, cr->c.top, cr->c.width, cr->c.height,
+ s->r.left, s->r.top, s->r.width, s->r.height,
f->f_width, f->f_height);
return 0;
}
-static int fimc_m2m_s_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
+static int fimc_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
{
struct fimc_ctx *ctx = fh_to_ctx(fh);
struct fimc_dev *fimc = ctx->fimc_dev;
- struct v4l2_crop cr = *crop;
struct fimc_frame *f;
int ret;
- ret = fimc_m2m_try_crop(ctx, &cr);
+ ret = fimc_m2m_try_selection(ctx, s);
if (ret)
return ret;
- f = (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
+ f = (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
&ctx->s_frame : &ctx->d_frame;
/* Check to see if scaling ratio is within supported range */
- if (cr.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- ret = fimc_check_scaler_ratio(ctx, cr.c.width,
- cr.c.height, ctx->d_frame.width,
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ ret = fimc_check_scaler_ratio(ctx, s->r.width,
+ s->r.height, ctx->d_frame.width,
ctx->d_frame.height, ctx->rotation);
} else {
ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width,
- ctx->s_frame.height, cr.c.width,
- cr.c.height, ctx->rotation);
+ ctx->s_frame.height, s->r.width,
+ s->r.height, ctx->rotation);
}
if (ret) {
v4l2_err(&fimc->m2m.vfd, "Out of scaler range\n");
return -EINVAL;
}
- f->offs_h = cr.c.left;
- f->offs_v = cr.c.top;
- f->width = cr.c.width;
- f->height = cr.c.height;
+ f->offs_h = s->r.left;
+ f->offs_v = s->r.top;
+ f->width = s->r.width;
+ f->height = s->r.height;
fimc_ctx_state_set(FIMC_PARAMS, ctx);
@@ -528,9 +548,8 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_g_crop = fimc_m2m_g_crop,
- .vidioc_s_crop = fimc_m2m_s_crop,
- .vidioc_cropcap = fimc_m2m_cropcap
+ .vidioc_g_selection = fimc_m2m_g_selection,
+ .vidioc_s_selection = fimc_m2m_s_selection,
};
@@ -717,6 +736,7 @@ int fimc_register_m2m_device(struct fimc_dev *fimc,
vfd->release = video_device_release_empty;
vfd->lock = &fimc->lock;
vfd->vfl_dir = VFL_DIR_M2M;
+ set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "fimc.%d.m2m", fimc->id);
video_set_drvdata(vfd, fimc);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 870501b0f351..463f2d84553e 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -445,7 +445,7 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
*/
np = of_get_parent(rem);
- if (np && !of_node_cmp(np->name, "i2c-isp"))
+ if (of_node_name_eq(np, "i2c-isp"))
pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
else
pd->fimc_bus_type = pd->sensor_bus_type;
@@ -495,7 +495,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
for_each_available_child_of_node(parent, node) {
struct device_node *port;
- if (of_node_cmp(node->name, "csis"))
+ if (!of_node_name_eq(node, "csis"))
continue;
/* The csis node can have only port subnode. */
port = of_get_next_child(node, NULL);
@@ -720,13 +720,13 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd,
continue;
/* If driver of any entity isn't ready try all again later. */
- if (!strcmp(node->name, CSIS_OF_NODE_NAME))
+ if (of_node_name_eq(node, CSIS_OF_NODE_NAME))
plat_entity = IDX_CSIS;
- else if (!strcmp(node->name, FIMC_IS_OF_NODE_NAME))
+ else if (of_node_name_eq(node, FIMC_IS_OF_NODE_NAME))
plat_entity = IDX_IS_ISP;
- else if (!strcmp(node->name, FIMC_LITE_OF_NODE_NAME))
+ else if (of_node_name_eq(node, FIMC_LITE_OF_NODE_NAME))
plat_entity = IDX_FLITE;
- else if (!strcmp(node->name, FIMC_OF_NODE_NAME) &&
+ else if (of_node_name_eq(node, FIMC_OF_NODE_NAME) &&
!of_property_read_bool(node, "samsung,lcd-wb"))
plat_entity = IDX_FIMC;
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index b76cd0e8313c..c1c255408d16 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/sched.h>
@@ -1607,7 +1606,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
.job_abort = pxp_job_abort,
};
-static void pxp_soft_reset(struct pxp_dev *dev)
+static int pxp_soft_reset(struct pxp_dev *dev)
{
int ret;
u32 val;
@@ -1620,10 +1619,12 @@ static void pxp_soft_reset(struct pxp_dev *dev)
ret = readl_poll_timeout(dev->mmio + HW_PXP_CTRL, val,
val & BM_PXP_CTRL_CLKGATE, 0, 100);
if (ret < 0)
- pr_err("PXP reset timeout\n");
+ return ret;
writel(BM_PXP_CTRL_SFTRST, dev->mmio + HW_PXP_CTRL_CLR);
writel(BM_PXP_CTRL_CLKGATE, dev->mmio + HW_PXP_CTRL_CLR);
+
+ return 0;
}
static int pxp_probe(struct platform_device *pdev)
@@ -1666,8 +1667,15 @@ static int pxp_probe(struct platform_device *pdev)
return ret;
}
- clk_prepare_enable(dev->clk);
- pxp_soft_reset(dev);
+ ret = clk_prepare_enable(dev->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = pxp_soft_reset(dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "PXP reset timeout: %d\n", ret);
+ goto err_clk;
+ }
spin_lock_init(&dev->irqlock);
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 2986cb4b88d0..8d00d9d8adff 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -4,7 +4,7 @@
* sensor.
*
* The data sheet for this device can be found at:
- * http://www.marvell.com/products/pc_connectivity/88alp01/
+ * http://wiki.laptop.org/images/5/5c/88ALP01_Datasheet_July_2007.pdf
*
* Copyright 2006-11 One Laptop Per Child Association, Inc.
* Copyright 2006-11 Jonathan Corbet <corbet@lwn.net>
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 54631ad1c71e..d1f12257bf66 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -1087,7 +1087,6 @@ static void mtk_venc_worker(struct work_struct *work)
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
memset(&frm_buf, 0, sizeof(frm_buf));
for (i = 0; i < src_buf->num_planes ; i++) {
- frm_buf.fb_addr[i].va = vb2_plane_vaddr(src_buf, i);
frm_buf.fb_addr[i].dma_addr =
vb2_dma_contig_plane_dma_addr(src_buf, i);
frm_buf.fb_addr[i].size =
@@ -1098,14 +1097,11 @@ static void mtk_venc_worker(struct work_struct *work)
bs_buf.size = (size_t)dst_buf->planes[0].length;
mtk_v4l2_debug(2,
- "Framebuf VA=%p PA=%llx Size=0x%zx;VA=%p PA=0x%llx Size=0x%zx;VA=%p PA=0x%llx Size=%zu",
- frm_buf.fb_addr[0].va,
+ "Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
(u64)frm_buf.fb_addr[0].dma_addr,
frm_buf.fb_addr[0].size,
- frm_buf.fb_addr[1].va,
(u64)frm_buf.fb_addr[1].dma_addr,
frm_buf.fb_addr[1].size,
- frm_buf.fb_addr[2].va,
(u64)frm_buf.fb_addr[2].dma_addr,
frm_buf.fb_addr[2].size);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 3e73e9db781f..7c025045ea90 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -41,25 +41,27 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
if (!node) {
mtk_v4l2_err("no mediatek,larb found");
- return -1;
+ return -ENODEV;
}
pdev = of_find_device_by_node(node);
+ of_node_put(node);
if (!pdev) {
mtk_v4l2_err("no mediatek,larb device found");
- return -1;
+ return -ENODEV;
}
pm->larbvenc = &pdev->dev;
node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
if (!node) {
mtk_v4l2_err("no mediatek,larb found");
- return -1;
+ return -ENODEV;
}
pdev = of_find_device_by_node(node);
+ of_node_put(node);
if (!pdev) {
mtk_v4l2_err("no mediatek,larb device found");
- return -1;
+ return -ENODEV;
}
pm->larbvenclt = &pdev->dev;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
index 06c254f5c171..9bf6e8d1b9c9 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.h
@@ -25,6 +25,11 @@ struct mtk_vcodec_mem {
dma_addr_t dma_addr;
};
+struct mtk_vcodec_fb {
+ size_t size;
+ dma_addr_t dma_addr;
+};
+
struct mtk_vcodec_ctx;
struct mtk_vcodec_dev;
diff --git a/drivers/media/platform/mtk-vcodec/venc_drv_if.h b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
index a6e7d32e55cb..55ecda844894 100644
--- a/drivers/media/platform/mtk-vcodec/venc_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/venc_drv_if.h
@@ -106,7 +106,7 @@ struct venc_enc_param {
* @fb_addr: plane frame buffer addresses
*/
struct venc_frm_buf {
- struct mtk_vcodec_mem fb_addr[MTK_VCODEC_MAX_PLANES];
+ struct mtk_vcodec_fb fb_addr[MTK_VCODEC_MAX_PLANES];
};
/*
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index ed6a557de65d..a8c542fa647d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -37,9 +37,9 @@
/* VFE halt timeout */
#define VFE_HALT_TIMEOUT_MS 100
/* Max number of frame drop updates per frame */
-#define VFE_FRAME_DROP_UPDATES 5
-/* Frame drop value. NOTE: VAL + UPDATES should not exceed 31 */
-#define VFE_FRAME_DROP_VAL 20
+#define VFE_FRAME_DROP_UPDATES 2
+/* Frame drop value. VAL + UPDATES - 1 should not exceed 31 */
+#define VFE_FRAME_DROP_VAL 30
#define VFE_NEXT_SOF_MS 500
@@ -659,7 +659,9 @@ static int vfe_enable_output(struct vfe_line *line)
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->ops;
+ struct media_entity *sensor;
unsigned long flags;
+ unsigned int frame_skip = 0;
unsigned int i;
u16 ub_size;
@@ -667,6 +669,17 @@ static int vfe_enable_output(struct vfe_line *line)
if (!ub_size)
return -EINVAL;
+ sensor = camss_find_sensor(&line->subdev.entity);
+ if (sensor) {
+ struct v4l2_subdev *subdev =
+ media_entity_to_v4l2_subdev(sensor);
+
+ v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
+ /* Max frame skip is 29 frames */
+ if (frame_skip > VFE_FRAME_DROP_VAL - 1)
+ frame_skip = VFE_FRAME_DROP_VAL - 1;
+ }
+
spin_lock_irqsave(&vfe->output_lock, flags);
ops->reg_update_clear(vfe, line->id);
@@ -695,10 +708,10 @@ static int vfe_enable_output(struct vfe_line *line)
switch (output->state) {
case VFE_OUTPUT_SINGLE:
- vfe_output_frame_drop(vfe, output, 1);
+ vfe_output_frame_drop(vfe, output, 1 << frame_skip);
break;
case VFE_OUTPUT_CONTINUOUS:
- vfe_output_frame_drop(vfe, output, 3);
+ vfe_output_frame_drop(vfe, output, 3 << frame_skip);
break;
default:
vfe_output_frame_drop(vfe, output, 0);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 45978db3b0be..63da18773d24 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -346,7 +346,7 @@ void camss_disable_clocks(int nclocks, struct camss_clock *clock)
*
* Return a pointer to sensor media entity or NULL if not found
*/
-static struct media_entity *camss_find_sensor(struct media_entity *entity)
+struct media_entity *camss_find_sensor(struct media_entity *entity)
{
struct media_pad *pad;
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 57b269ca93fd..1376b07889bf 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -106,6 +106,7 @@ void camss_add_clock_margin(u64 *rate);
int camss_enable_clocks(int nclocks, struct camss_clock *clock,
struct device *dev);
void camss_disable_clocks(int nclocks, struct camss_clock *clock);
+struct media_entity *camss_find_sensor(struct media_entity *entity);
int camss_get_pixel_clock(struct media_entity *entity, u32 *pixel_clock);
int camss_pm_domain_on(struct camss *camss, int id);
void camss_pm_domain_off(struct camss *camss, int id);
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index bb6add9d340e..cb411eb85ee4 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -76,7 +76,7 @@ static void venus_sys_error_handler(struct work_struct *work)
hfi_core_deinit(core, true);
hfi_destroy(core);
mutex_lock(&core->lock);
- venus_shutdown(core->dev);
+ venus_shutdown(core);
pm_runtime_put_sync(core->dev);
@@ -84,7 +84,7 @@ static void venus_sys_error_handler(struct work_struct *work)
pm_runtime_get_sync(core->dev);
- ret |= venus_boot(core->dev, core->res->fwname);
+ ret |= venus_boot(core);
ret |= hfi_core_resume(core, true);
@@ -264,6 +264,14 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
@@ -284,7 +292,15 @@ static int venus_probe(struct platform_device *pdev)
if (ret < 0)
goto err_runtime_disable;
- ret = venus_boot(dev, core->res->fwname);
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret)
+ goto err_runtime_disable;
+
+ ret = venus_firmware_init(core);
+ if (ret)
+ goto err_runtime_disable;
+
+ ret = venus_boot(core);
if (ret)
goto err_runtime_disable;
@@ -308,10 +324,6 @@ static int venus_probe(struct platform_device *pdev)
if (ret)
goto err_core_deinit;
- ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret)
- goto err_dev_unregister;
-
ret = pm_runtime_put_sync(dev);
if (ret)
goto err_dev_unregister;
@@ -323,7 +335,7 @@ err_dev_unregister:
err_core_deinit:
hfi_core_deinit(core, false);
err_venus_shutdown:
- venus_shutdown(dev);
+ venus_shutdown(core);
err_runtime_disable:
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
@@ -344,9 +356,11 @@ static int venus_remove(struct platform_device *pdev)
WARN_ON(ret);
hfi_destroy(core);
- venus_shutdown(dev);
+ venus_shutdown(core);
of_platform_depopulate(dev);
+ venus_firmware_deinit(core);
+
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 2f02365f4818..6382cea29185 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -98,6 +98,7 @@ struct venus_caps {
* @dev: convenience struct device pointer
* @dev_dec: convenience struct device pointer for decoder device
* @dev_enc: convenience struct device pointer for encoder device
+ * @use_tz: a flag that suggests presence of trustzone
* @lock: a lock for this strucure
* @instances: a list_head of all instances
* @insts_count: num of instances
@@ -129,6 +130,11 @@ struct venus_core {
struct device *dev;
struct device *dev_dec;
struct device *dev_enc;
+ unsigned int use_tz;
+ struct video_firmware {
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ } fw;
struct mutex lock;
struct list_head instances;
atomic_t insts_count;
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index c4a577848dd7..c29acfd70c1b 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -15,32 +15,66 @@
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
+#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include <linux/sizes.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include "core.h"
#include "firmware.h"
+#include "hfi_venus_io.h"
#define VENUS_PAS_ID 9
#define VENUS_FW_MEM_SIZE (6 * SZ_1M)
+#define VENUS_FW_START_ADDR 0x0
-int venus_boot(struct device *dev, const char *fwname)
+static void venus_reset_cpu(struct venus_core *core)
+{
+ void __iomem *base = core->base;
+
+ writel(0, base + WRAPPER_FW_START_ADDR);
+ writel(VENUS_FW_MEM_SIZE, base + WRAPPER_FW_END_ADDR);
+ writel(0, base + WRAPPER_CPA_START_ADDR);
+ writel(VENUS_FW_MEM_SIZE, base + WRAPPER_CPA_END_ADDR);
+ writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_START_ADDR);
+ writel(VENUS_FW_MEM_SIZE, base + WRAPPER_NONPIX_END_ADDR);
+ writel(0x0, base + WRAPPER_CPU_CGC_DIS);
+ writel(0x0, base + WRAPPER_CPU_CLOCK_CONFIG);
+
+ /* Bring ARM9 out of reset */
+ writel(0, base + WRAPPER_A9SS_SW_RESET);
+}
+
+int venus_set_hw_state(struct venus_core *core, bool resume)
+{
+ if (core->use_tz)
+ return qcom_scm_set_remote_state(resume, 0);
+
+ if (resume)
+ venus_reset_cpu(core);
+ else
+ writel(1, core->base + WRAPPER_A9SS_SW_RESET);
+
+ return 0;
+}
+
+static int venus_load_fw(struct venus_core *core, const char *fwname,
+ phys_addr_t *mem_phys, size_t *mem_size)
{
const struct firmware *mdt;
struct device_node *node;
- phys_addr_t mem_phys;
+ struct device *dev;
struct resource r;
ssize_t fw_size;
- size_t mem_size;
void *mem_va;
int ret;
- if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) || !qcom_scm_is_available())
- return -EPROBE_DEFER;
-
+ dev = core->dev;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
dev_err(dev, "no memory-region specified\n");
@@ -51,16 +85,16 @@ int venus_boot(struct device *dev, const char *fwname)
if (ret)
return ret;
- mem_phys = r.start;
- mem_size = resource_size(&r);
+ *mem_phys = r.start;
+ *mem_size = resource_size(&r);
- if (mem_size < VENUS_FW_MEM_SIZE)
+ if (*mem_size < VENUS_FW_MEM_SIZE)
return -EINVAL;
- mem_va = memremap(r.start, mem_size, MEMREMAP_WC);
+ mem_va = memremap(r.start, *mem_size, MEMREMAP_WC);
if (!mem_va) {
dev_err(dev, "unable to map memory region: %pa+%zx\n",
- &r.start, mem_size);
+ &r.start, *mem_size);
return -ENOMEM;
}
@@ -75,24 +109,181 @@ int venus_boot(struct device *dev, const char *fwname)
goto err_unmap;
}
- ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID, mem_va, mem_phys,
- mem_size, NULL);
+ if (core->use_tz)
+ ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID,
+ mem_va, *mem_phys, *mem_size, NULL);
+ else
+ ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
+ mem_va, *mem_phys, *mem_size, NULL);
release_firmware(mdt);
- if (ret)
- goto err_unmap;
-
- ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
- if (ret)
- goto err_unmap;
-
err_unmap:
memunmap(mem_va);
return ret;
}
-int venus_shutdown(struct device *dev)
+static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
+ size_t mem_size)
+{
+ struct iommu_domain *iommu;
+ struct device *dev;
+ int ret;
+
+ dev = core->fw.dev;
+ if (!dev)
+ return -EPROBE_DEFER;
+
+ iommu = core->fw.iommu_domain;
+
+ ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
+ IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
+ if (ret) {
+ dev_err(dev, "could not map video firmware region\n");
+ return ret;
+ }
+
+ venus_reset_cpu(core);
+
+ return 0;
+}
+
+static int venus_shutdown_no_tz(struct venus_core *core)
+{
+ struct iommu_domain *iommu;
+ size_t unmapped;
+ u32 reg;
+ struct device *dev = core->fw.dev;
+ void __iomem *base = core->base;
+
+ /* Assert the reset to ARM9 */
+ reg = readl_relaxed(base + WRAPPER_A9SS_SW_RESET);
+ reg |= WRAPPER_A9SS_SW_RESET_BIT;
+ writel_relaxed(reg, base + WRAPPER_A9SS_SW_RESET);
+
+ /* Make sure reset is asserted before the mapping is removed */
+ mb();
+
+ iommu = core->fw.iommu_domain;
+
+ unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, VENUS_FW_MEM_SIZE);
+ if (unmapped != VENUS_FW_MEM_SIZE)
+ dev_err(dev, "failed to unmap firmware\n");
+
+ return 0;
+}
+
+int venus_boot(struct venus_core *core)
{
- return qcom_scm_pas_shutdown(VENUS_PAS_ID);
+ struct device *dev = core->dev;
+ phys_addr_t mem_phys;
+ size_t mem_size;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) ||
+ (core->use_tz && !qcom_scm_is_available()))
+ return -EPROBE_DEFER;
+
+ ret = venus_load_fw(core, core->res->fwname, &mem_phys, &mem_size);
+ if (ret) {
+ dev_err(dev, "fail to load video firmware\n");
+ return -EINVAL;
+ }
+
+ if (core->use_tz)
+ ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
+ else
+ ret = venus_boot_no_tz(core, mem_phys, mem_size);
+
+ return ret;
+}
+
+int venus_shutdown(struct venus_core *core)
+{
+ int ret;
+
+ if (core->use_tz)
+ ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
+ else
+ ret = venus_shutdown_no_tz(core);
+
+ return ret;
+}
+
+int venus_firmware_init(struct venus_core *core)
+{
+ struct platform_device_info info;
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *np;
+ int ret;
+
+ np = of_get_child_by_name(core->dev->of_node, "video-firmware");
+ if (!np) {
+ core->use_tz = true;
+ return 0;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.fwnode = &np->fwnode;
+ info.parent = core->dev;
+ info.name = np->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(np);
+ return PTR_ERR(pdev);
+ }
+
+ pdev->dev.of_node = np;
+
+ ret = of_dma_configure(&pdev->dev, np, true);
+ if (ret) {
+ dev_err(core->dev, "dma configure fail\n");
+ goto err_unregister;
+ }
+
+ core->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu_dom) {
+ dev_err(core->fw.dev, "Failed to allocate iommu domain\n");
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, core->fw.dev);
+ if (ret) {
+ dev_err(core->fw.dev, "could not attach device\n");
+ goto err_iommu_free;
+ }
+
+ core->fw.iommu_domain = iommu_dom;
+
+ of_node_put(np);
+
+ return 0;
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(np);
+ return ret;
+}
+
+void venus_firmware_deinit(struct venus_core *core)
+{
+ struct iommu_domain *iommu;
+
+ if (!core->fw.dev)
+ return;
+
+ iommu = core->fw.iommu_domain;
+
+ iommu_detach_device(iommu, core->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(core->fw.dev));
}
diff --git a/drivers/media/platform/qcom/venus/firmware.h b/drivers/media/platform/qcom/venus/firmware.h
index 428efb56d339..119a9a4fc1a2 100644
--- a/drivers/media/platform/qcom/venus/firmware.h
+++ b/drivers/media/platform/qcom/venus/firmware.h
@@ -16,7 +16,20 @@
struct device;
-int venus_boot(struct device *dev, const char *fwname);
-int venus_shutdown(struct device *dev);
+int venus_firmware_init(struct venus_core *core);
+void venus_firmware_deinit(struct venus_core *core);
+int venus_boot(struct venus_core *core);
+int venus_shutdown(struct venus_core *core);
+int venus_set_hw_state(struct venus_core *core, bool suspend);
+
+static inline int venus_set_hw_state_suspend(struct venus_core *core)
+{
+ return venus_set_hw_state(core, false);
+}
+
+static inline int venus_set_hw_state_resume(struct venus_core *core)
+{
+ return venus_set_hw_state(core, true);
+}
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index e8389d8d8c48..87a441488e15 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1215,7 +1215,7 @@ pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
}
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
/* not implemented on Venus 4xx */
- break;
+ return -ENOTSUPP;
default:
return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
}
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index 124085556b94..5c1e5b4f767a 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -19,7 +19,6 @@
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
-#include <linux/qcom_scm.h>
#include <linux/slab.h>
#include "core.h"
@@ -27,6 +26,7 @@
#include "hfi_msgs.h"
#include "hfi_venus.h"
#include "hfi_venus_io.h"
+#include "firmware.h"
#define HFI_MASK_QHDR_TX_TYPE 0xff000000
#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
@@ -55,11 +55,6 @@
#define IFACEQ_VAR_LARGE_PKT_SIZE 512
#define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
-enum tzbsp_video_state {
- TZBSP_VIDEO_STATE_SUSPEND = 0,
- TZBSP_VIDEO_STATE_RESUME
-};
-
struct hfi_queue_table_header {
u32 version;
u32 size;
@@ -575,7 +570,7 @@ static int venus_power_off(struct venus_hfi_device *hdev)
if (!hdev->power_enabled)
return 0;
- ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
+ ret = venus_set_hw_state_suspend(hdev->core);
if (ret)
return ret;
@@ -595,7 +590,7 @@ static int venus_power_on(struct venus_hfi_device *hdev)
if (hdev->power_enabled)
return 0;
- ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_RESUME, 0);
+ ret = venus_set_hw_state_resume(hdev->core);
if (ret)
goto err;
@@ -608,7 +603,7 @@ static int venus_power_on(struct venus_hfi_device *hdev)
return 0;
err_suspend:
- qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0);
+ venus_set_hw_state_suspend(hdev->core);
err:
hdev->power_enabled = false;
return ret;
@@ -1355,6 +1350,8 @@ static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
pkt = (struct hfi_session_set_property_pkt *)packet;
ret = pkt_session_set_property(pkt, inst, ptype, pdata);
+ if (ret == -ENOTSUPP)
+ return 0;
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index def0926a6dee..ef0c72a0c892 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -112,6 +112,14 @@
#define WRAPPER_CPU_STATUS (WRAPPER_BASE + 0x2014)
#define WRAPPER_CPU_STATUS_WFI BIT(0)
#define WRAPPER_SW_RESET (WRAPPER_BASE + 0x3000)
+#define WRAPPER_CPA_START_ADDR (WRAPPER_BASE + 0x1020)
+#define WRAPPER_CPA_END_ADDR (WRAPPER_BASE + 0x1024)
+#define WRAPPER_FW_START_ADDR (WRAPPER_BASE + 0x1028)
+#define WRAPPER_FW_END_ADDR (WRAPPER_BASE + 0x102C)
+#define WRAPPER_NONPIX_START_ADDR (WRAPPER_BASE + 0x1030)
+#define WRAPPER_NONPIX_END_ADDR (WRAPPER_BASE + 0x1034)
+#define WRAPPER_A9SS_SW_RESET (WRAPPER_BASE + 0x3000)
+#define WRAPPER_A9SS_SW_RESET_BIT BIT(4)
/* Venus 4xx */
#define WRAPPER_VCODEC0_MMCC_POWER_STATUS (WRAPPER_BASE + 0x90)
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 189ec975c6bb..282de21cf2e1 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -885,10 +885,8 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
vbuf->field = V4L2_FIELD_NONE;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- unsigned int opb_sz = venus_helper_get_opb_size(inst);
-
vb = &vbuf->vb2_buf;
- vb2_set_plane_payload(vb, 0, bytesused ? : opb_sz);
+ vb2_set_plane_payload(vb, 0, bytesused);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index ce85962b6adc..32cff294582f 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -651,6 +651,8 @@ static int venc_set_properties(struct venus_inst *inst)
struct hfi_framerate frate;
struct hfi_bitrate brate;
struct hfi_idr_period idrp;
+ struct hfi_quantization quant;
+ struct hfi_quantization_range quant_range;
u32 ptype, rate_control, bitrate, profile = 0, level = 0;
int ret;
@@ -770,6 +772,23 @@ static int venc_set_properties(struct venus_inst *inst)
if (ret)
return ret;
+ ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP;
+ quant.qp_i = ctr->h264_i_qp;
+ quant.qp_p = ctr->h264_p_qp;
+ quant.qp_b = ctr->h264_b_qp;
+ quant.layer_id = 0;
+ ret = hfi_session_set_property(inst, ptype, &quant);
+ if (ret)
+ return ret;
+
+ ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
+ quant_range.min_qp = ctr->h264_min_qp;
+ quant_range.max_qp = ctr->h264_max_qp;
+ quant_range.layer_id = 0;
+ ret = hfi_session_set_property(inst, ptype, &quant_range);
+ if (ret)
+ return ret;
+
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
profile = venc_v4l2_to_hfi(V4L2_CID_MPEG_VIDEO_H264_PROFILE,
ctr->profile.h264);
@@ -1074,7 +1093,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
- src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &venc_vb2_ops;
src_vq->mem_ops = &vb2_dma_sg_memops;
@@ -1090,7 +1109,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &venc_vb2_ops;
dst_vq->mem_ops = &vb2_dma_sg_memops;
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 459101728d26..ac1e1d26f341 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -79,7 +79,10 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct venc_controls *ctr = &inst->controls.enc;
+ struct hfi_enable en = { .enable = 1 };
+ struct hfi_bitrate brate;
u32 bframes;
+ u32 ptype;
int ret;
switch (ctrl->id) {
@@ -88,6 +91,19 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
ctr->bitrate = ctrl->val;
+ mutex_lock(&inst->lock);
+ if (inst->streamon_out && inst->streamon_cap) {
+ ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
+ brate.bitrate = ctr->bitrate;
+ brate.layer_id = 0;
+
+ ret = hfi_session_set_property(inst, ptype, &brate);
+ if (ret) {
+ mutex_unlock(&inst->lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&inst->lock);
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
ctr->bitrate_peak = ctrl->val;
@@ -173,6 +189,19 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
ctr->num_b_frames = bframes;
break;
+ case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
+ mutex_lock(&inst->lock);
+ if (inst->streamon_out && inst->streamon_cap) {
+ ptype = HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
+ ret = hfi_session_set_property(inst, ptype, &en);
+
+ if (ret) {
+ mutex_unlock(&inst->lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&inst->lock);
+ break;
default:
return -EINVAL;
}
@@ -188,7 +217,7 @@ int venc_ctrl_init(struct venus_inst *inst)
{
int ret;
- ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 27);
+ ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 28);
if (ret)
return ret;
@@ -295,7 +324,7 @@ int venc_ctrl_init(struct venus_inst *inst)
0, INTRA_REFRESH_MBS_MAX, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 12);
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, 1, 128, 1, 1);
@@ -309,6 +338,9 @@ int venc_ctrl_init(struct venus_inst *inst)
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, 0, (1 << 16) - 1, 1, 0);
+ v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0, 0, 0, 0);
+
ret = inst->ctrl_handler.error;
if (ret)
goto err;
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index f476b2f1eb35..f0719ce24b97 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -1088,6 +1088,50 @@ static const struct rvin_info rcar_info_r8a77970 = {
.routes = rcar_info_r8a77970_routes,
};
+static const struct rvin_group_route rcar_info_r8a77980_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 0, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 0, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 1, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 1, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 2, .mask = BIT(1) },
+ { .csi = RVIN_CSI40, .channel = 2, .vin = 2, .mask = BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 3, .mask = BIT(0) },
+ { .csi = RVIN_CSI40, .channel = 3, .vin = 3, .mask = BIT(3) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 4, .mask = BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { .csi = RVIN_CSI41, .channel = 0, .vin = 6, .mask = BIT(1) },
+ { .csi = RVIN_CSI41, .channel = 2, .vin = 6, .mask = BIT(3) },
+ { .csi = RVIN_CSI41, .channel = 1, .vin = 7, .mask = BIT(0) },
+ { .csi = RVIN_CSI41, .channel = 3, .vin = 7, .mask = BIT(3) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77980 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77980_routes,
+};
+
+static const struct rvin_group_route rcar_info_r8a77990_routes[] = {
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 4, .mask = BIT(0) | BIT(3) },
+ { .csi = RVIN_CSI40, .channel = 0, .vin = 5, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 4, .mask = BIT(2) },
+ { .csi = RVIN_CSI40, .channel = 1, .vin = 5, .mask = BIT(1) | BIT(3) },
+ { /* Sentinel */ }
+};
+
+static const struct rvin_info rcar_info_r8a77990 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .max_width = 4096,
+ .max_height = 4096,
+ .routes = rcar_info_r8a77990_routes,
+};
+
static const struct rvin_group_route rcar_info_r8a77995_routes[] = {
{ /* Sentinel */ }
};
@@ -1146,6 +1190,14 @@ static const struct of_device_id rvin_of_id_table[] = {
.data = &rcar_info_r8a77970,
},
{
+ .compatible = "renesas,vin-r8a77980",
+ .data = &rcar_info_r8a77980,
+ },
+ {
+ .compatible = "renesas,vin-r8a77990",
+ .data = &rcar_info_r8a77990,
+ },
+ {
.compatible = "renesas,vin-r8a77995",
.data = &rcar_info_r8a77995,
},
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index b0044a08e71e..6d356f5a9456 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -152,37 +152,37 @@ static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
};
static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
- { .mbps = 80, .reg = 0x00 },
- { .mbps = 90, .reg = 0x20 },
- { .mbps = 100, .reg = 0x40 },
- { .mbps = 110, .reg = 0x02 },
- { .mbps = 130, .reg = 0x22 },
- { .mbps = 140, .reg = 0x42 },
- { .mbps = 150, .reg = 0x04 },
- { .mbps = 170, .reg = 0x24 },
- { .mbps = 180, .reg = 0x44 },
- { .mbps = 200, .reg = 0x06 },
- { .mbps = 220, .reg = 0x26 },
- { .mbps = 240, .reg = 0x46 },
- { .mbps = 250, .reg = 0x08 },
- { .mbps = 270, .reg = 0x28 },
- { .mbps = 300, .reg = 0x0a },
- { .mbps = 330, .reg = 0x2a },
- { .mbps = 360, .reg = 0x4a },
- { .mbps = 400, .reg = 0x0c },
- { .mbps = 450, .reg = 0x2c },
- { .mbps = 500, .reg = 0x0e },
- { .mbps = 550, .reg = 0x2e },
- { .mbps = 600, .reg = 0x10 },
- { .mbps = 650, .reg = 0x30 },
- { .mbps = 700, .reg = 0x12 },
- { .mbps = 750, .reg = 0x32 },
- { .mbps = 800, .reg = 0x52 },
- { .mbps = 850, .reg = 0x72 },
- { .mbps = 900, .reg = 0x14 },
- { .mbps = 950, .reg = 0x34 },
- { .mbps = 1000, .reg = 0x54 },
- { .mbps = 1050, .reg = 0x74 },
+ { .mbps = 89, .reg = 0x00 },
+ { .mbps = 99, .reg = 0x20 },
+ { .mbps = 109, .reg = 0x40 },
+ { .mbps = 129, .reg = 0x02 },
+ { .mbps = 139, .reg = 0x22 },
+ { .mbps = 149, .reg = 0x42 },
+ { .mbps = 169, .reg = 0x04 },
+ { .mbps = 179, .reg = 0x24 },
+ { .mbps = 199, .reg = 0x44 },
+ { .mbps = 219, .reg = 0x06 },
+ { .mbps = 239, .reg = 0x26 },
+ { .mbps = 249, .reg = 0x46 },
+ { .mbps = 269, .reg = 0x08 },
+ { .mbps = 299, .reg = 0x28 },
+ { .mbps = 329, .reg = 0x0a },
+ { .mbps = 359, .reg = 0x2a },
+ { .mbps = 399, .reg = 0x4a },
+ { .mbps = 449, .reg = 0x0c },
+ { .mbps = 499, .reg = 0x2c },
+ { .mbps = 549, .reg = 0x0e },
+ { .mbps = 599, .reg = 0x2e },
+ { .mbps = 649, .reg = 0x10 },
+ { .mbps = 699, .reg = 0x30 },
+ { .mbps = 749, .reg = 0x12 },
+ { .mbps = 799, .reg = 0x32 },
+ { .mbps = 849, .reg = 0x52 },
+ { .mbps = 899, .reg = 0x72 },
+ { .mbps = 949, .reg = 0x14 },
+ { .mbps = 999, .reg = 0x34 },
+ { .mbps = 1049, .reg = 0x54 },
+ { .mbps = 1099, .reg = 0x74 },
{ .mbps = 1125, .reg = 0x16 },
{ /* sentinel */ },
};
@@ -342,6 +342,7 @@ struct rcar_csi2_info {
int (*confirm_start)(struct rcar_csi2 *priv);
const struct rcsi2_mbps_reg *hsfreqrange;
unsigned int csi0clkfreqrange;
+ unsigned int num_channels;
bool clear_ulps;
};
@@ -476,13 +477,14 @@ static int rcsi2_start(struct rcar_csi2 *priv)
format = rcsi2_code_to_fmt(priv->mf.code);
/*
- * Enable all Virtual Channels.
+ * Enable all supported CSI-2 channels with virtual channel and
+ * data type matching.
*
* NOTE: It's not possible to get individual datatype for each
* source virtual channel. Once this is possible in V4L2
* it should be used here.
*/
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < priv->info->num_channels; i++) {
u32 vcdt_part;
vcdt_part = VCDT_SEL_VC(i) | VCDT_VCDTN_EN | VCDT_SEL_DTN_ON |
@@ -511,7 +513,8 @@ static int rcsi2_start(struct rcar_csi2 *priv)
rcsi2_write(priv, FLD_REG, FLD_FLD_NUM(2) | FLD_FLD_EN4 |
FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN);
rcsi2_write(priv, VCDT_REG, vcdt);
- rcsi2_write(priv, VCDT2_REG, vcdt2);
+ if (vcdt2)
+ rcsi2_write(priv, VCDT2_REG, vcdt2);
/* Lanes are zero indexed. */
rcsi2_write(priv, LSWAP_REG,
LSWAP_L0SEL(priv->lane_swap[0] - 1) |
@@ -940,27 +943,45 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = {
.init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
.csi0clkfreqrange = 0x20,
+ .num_channels = 4,
.clear_ulps = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a7795es1 = {
.hsfreqrange = hsfreqrange_m3w_h3es1,
+ .num_channels = 4,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = {
.hsfreqrange = hsfreqrange_m3w_h3es1,
+ .num_channels = 4,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
.init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
.hsfreqrange = hsfreqrange_h3_v3h_m3n,
.csi0clkfreqrange = 0x20,
+ .num_channels = 4,
.clear_ulps = true,
};
static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
.init_phtw = rcsi2_init_phtw_v3m_e3,
.confirm_start = rcsi2_confirm_start_v3m_e3,
+ .num_channels = 4,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77980 = {
+ .init_phtw = rcsi2_init_phtw_h3_v3h_m3n,
+ .hsfreqrange = hsfreqrange_h3_v3h_m3n,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+};
+
+static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
+ .init_phtw = rcsi2_init_phtw_v3m_e3,
+ .confirm_start = rcsi2_confirm_start_v3m_e3,
+ .num_channels = 2,
};
static const struct of_device_id rcar_csi2_of_table[] = {
@@ -980,6 +1001,14 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.compatible = "renesas,r8a77970-csi2",
.data = &rcar_csi2_info_r8a77970,
},
+ {
+ .compatible = "renesas,r8a77980-csi2",
+ .data = &rcar_csi2_info_r8a77980,
+ },
+ {
+ .compatible = "renesas,r8a77990-csi2",
+ .data = &rcar_csi2_info_r8a77990,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index dc77682b4785..7a2851790b91 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -404,16 +404,16 @@ static int rvin_s_selection(struct file *file, void *fh,
return 0;
}
-static int rvin_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *crop)
+static int rvin_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct rvin_dev *vin = video_drvdata(file);
struct v4l2_subdev *sd = vin_to_source(vin);
- if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- return v4l2_subdev_call(sd, video, g_pixelaspect, &crop->pixelaspect);
+ return v4l2_subdev_call(sd, video, g_pixelaspect, f);
}
static int rvin_enum_input(struct file *file, void *priv,
@@ -620,7 +620,7 @@ static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
.vidioc_g_selection = rvin_g_selection,
.vidioc_s_selection = rvin_s_selection,
- .vidioc_cropcap = rvin_cropcap,
+ .vidioc_g_pixelaspect = rvin_g_pixelaspect,
.vidioc_enum_input = rvin_enum_input,
.vidioc_g_input = rvin_g_input,
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 9cc9db083870..5c653287185f 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -97,7 +97,7 @@ static irqreturn_t rga_isr(int irq, void *prv)
return IRQ_HANDLED;
}
-static struct v4l2_m2m_ops rga_m2m_ops = {
+static const struct v4l2_m2m_ops rga_m2m_ops = {
.device_run = device_run,
};
@@ -700,7 +700,7 @@ static const struct v4l2_ioctl_ops rga_ioctl_ops = {
.vidioc_s_selection = vidioc_s_selection,
};
-static struct video_device rga_videodev = {
+static const struct video_device rga_videodev = {
.name = "rockchip-rga",
.fops = &rga_fops,
.ioctl_ops = &rga_ioctl_ops,
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index e901201b6fcc..57ab1d1085d1 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -89,7 +89,7 @@ static struct g2d_fmt *find_fmt(struct v4l2_format *f)
static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
- enum v4l2_buf_type type)
+ enum v4l2_buf_type type)
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
@@ -408,51 +408,76 @@ static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
return 0;
}
-static int vidioc_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cr)
-{
- struct g2d_ctx *ctx = priv;
- struct g2d_frame *f;
-
- f = get_frame(ctx, cr->type);
- if (IS_ERR(f))
- return PTR_ERR(f);
-
- cr->bounds.left = 0;
- cr->bounds.top = 0;
- cr->bounds.width = f->width;
- cr->bounds.height = f->height;
- cr->defrect = cr->bounds;
- return 0;
-}
-
-static int vidioc_g_crop(struct file *file, void *prv, struct v4l2_crop *cr)
+static int vidioc_g_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
{
struct g2d_ctx *ctx = prv;
struct g2d_frame *f;
- f = get_frame(ctx, cr->type);
+ f = get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
- cr->c.left = f->o_height;
- cr->c.top = f->o_width;
- cr->c.width = f->c_width;
- cr->c.height = f->c_height;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_COMPOSE:
+ s->r.left = f->o_height;
+ s->r.top = f->o_width;
+ s->r.width = f->c_width;
+ s->r.height = f->c_height;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
-static int vidioc_try_crop(struct file *file, void *prv, const struct v4l2_crop *cr)
+static int vidioc_try_selection(struct file *file, void *prv,
+ const struct v4l2_selection *s)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
struct g2d_frame *f;
- f = get_frame(ctx, cr->type);
+ f = get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
- if (cr->c.top < 0 || cr->c.left < 0) {
+ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (s->target != V4L2_SEL_TGT_COMPOSE)
+ return -EINVAL;
+ } else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (s->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+ }
+
+ if (s->r.top < 0 || s->r.left < 0) {
v4l2_err(&dev->v4l2_dev,
"doesn't support negative values for top & left\n");
return -EINVAL;
@@ -461,23 +486,24 @@ static int vidioc_try_crop(struct file *file, void *prv, const struct v4l2_crop
return 0;
}
-static int vidioc_s_crop(struct file *file, void *prv, const struct v4l2_crop *cr)
+static int vidioc_s_selection(struct file *file, void *prv,
+ struct v4l2_selection *s)
{
struct g2d_ctx *ctx = prv;
struct g2d_frame *f;
int ret;
- ret = vidioc_try_crop(file, prv, cr);
+ ret = vidioc_try_selection(file, prv, s);
if (ret)
return ret;
- f = get_frame(ctx, cr->type);
+ f = get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
- f->c_width = cr->c.width;
- f->c_height = cr->c.height;
- f->o_width = cr->c.left;
- f->o_height = cr->c.top;
+ f->c_width = s->r.width;
+ f->c_height = s->r.height;
+ f->o_width = s->r.left;
+ f->o_height = s->r.top;
f->bottom = f->o_height + f->c_height;
f->right = f->o_width + f->c_width;
return 0;
@@ -585,9 +611,8 @@ static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
- .vidioc_g_crop = vidioc_g_crop,
- .vidioc_s_crop = vidioc_s_crop,
- .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
};
static const struct video_device g2d_videodev = {
@@ -680,6 +705,7 @@ static int g2d_probe(struct platform_device *pdev)
goto unreg_v4l2_dev;
}
*vfd = g2d_videodev;
+ set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
vfd->lock = &dev->mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 927a1235408d..8a5ba3bec3af 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1342,6 +1342,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
vfd->lock = &dev->mfc_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
+ set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
dev->vfd_dec = vfd;
video_set_drvdata(vfd, dev);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index ece59ce1b149..f4c0e3a8f27d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -773,19 +773,23 @@ static const struct v4l2_ctrl_ops s5p_mfc_dec_ctrl_ops = {
.g_volatile_ctrl = s5p_mfc_dec_g_v_ctrl,
};
-/* Get cropping information */
-static int vidioc_g_crop(struct file *file, void *priv,
- struct v4l2_crop *cr)
+/* Get compose information */
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
{
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
struct s5p_mfc_dev *dev = ctx->dev;
u32 left, right, top, bottom;
+ u32 width, height;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
if (ctx->state != MFCINST_HEAD_PARSED &&
ctx->state != MFCINST_RUNNING &&
ctx->state != MFCINST_FINISHING &&
ctx->state != MFCINST_FINISHED) {
- mfc_err("Can not get crop information\n");
+ mfc_err("Can not get compose information\n");
return -EINVAL;
}
if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) {
@@ -795,22 +799,33 @@ static int vidioc_g_crop(struct file *file, void *priv,
top = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_v, ctx);
bottom = top >> S5P_FIMV_SHARED_CROP_BOTTOM_SHIFT;
top = top & S5P_FIMV_SHARED_CROP_TOP_MASK;
- cr->c.left = left;
- cr->c.top = top;
- cr->c.width = ctx->img_width - left - right;
- cr->c.height = ctx->img_height - top - bottom;
- mfc_debug(2, "Cropping info [h264]: l=%d t=%d w=%d h=%d (r=%d b=%d fw=%d fh=%d\n",
- left, top, cr->c.width, cr->c.height, right, bottom,
+ width = ctx->img_width - left - right;
+ height = ctx->img_height - top - bottom;
+ mfc_debug(2, "Composing info [h264]: l=%d t=%d w=%d h=%d (r=%d b=%d fw=%d fh=%d\n",
+ left, top, s->r.width, s->r.height, right, bottom,
ctx->buf_width, ctx->buf_height);
} else {
- cr->c.left = 0;
- cr->c.top = 0;
- cr->c.width = ctx->img_width;
- cr->c.height = ctx->img_height;
- mfc_debug(2, "Cropping info: w=%d h=%d fw=%d fh=%d\n",
- cr->c.width, cr->c.height, ctx->buf_width,
+ left = 0;
+ top = 0;
+ width = ctx->img_width;
+ height = ctx->img_height;
+ mfc_debug(2, "Composing info: w=%d h=%d fw=%d fh=%d\n",
+ s->r.width, s->r.height, ctx->buf_width,
ctx->buf_height);
}
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE:
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ s->r.left = left;
+ s->r.top = top;
+ s->r.width = width;
+ s->r.height = height;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -887,7 +902,7 @@ static const struct v4l2_ioctl_ops s5p_mfc_dec_ioctl_ops = {
.vidioc_expbuf = vidioc_expbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
- .vidioc_g_crop = vidioc_g_crop,
+ .vidioc_g_selection = vidioc_g_selection,
.vidioc_decoder_cmd = vidioc_decoder_cmd,
.vidioc_subscribe_event = vidioc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
diff --git a/drivers/media/platform/seco-cec/Makefile b/drivers/media/platform/seco-cec/Makefile
new file mode 100644
index 000000000000..a3f2c6bd3ac0
--- /dev/null
+++ b/drivers/media/platform/seco-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_SECO_CEC) += seco-cec.o
diff --git a/drivers/media/platform/seco-cec/seco-cec.c b/drivers/media/platform/seco-cec/seco-cec.c
new file mode 100644
index 000000000000..a425a10540c1
--- /dev/null
+++ b/drivers/media/platform/seco-cec/seco-cec.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * CEC driver for SECO X86 Boards
+ *
+ * Author: Ettore Chimenti <ek5.chimenti@gmail.com>
+ * Copyright (C) 2018, SECO SpA.
+ * Copyright (C) 2018, Aidilab Srl.
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+/* CEC Framework */
+#include <media/cec.h>
+
+#include "seco-cec.h"
+
+struct secocec_data {
+ struct device *dev;
+ struct platform_device *pdev;
+ struct cec_adapter *cec_adap;
+ struct cec_notifier *notifier;
+ struct rc_dev *ir;
+ char ir_input_phys[32];
+ int irq;
+};
+
+#define smb_wr16(cmd, data) smb_word_op(CMD_WORD_DATA, SECOCEC_MICRO_ADDRESS, \
+ cmd, data, SMBUS_WRITE, NULL)
+#define smb_rd16(cmd, res) smb_word_op(CMD_WORD_DATA, SECOCEC_MICRO_ADDRESS, \
+ cmd, 0, SMBUS_READ, res)
+
+static int smb_word_op(short data_format, u16 slave_addr, u8 cmd, u16 data,
+ u8 operation, u16 *result)
+{
+ unsigned int count;
+ short _data_format;
+ int status = 0;
+
+ switch (data_format) {
+ case CMD_BYTE_DATA:
+ _data_format = BRA_SMB_CMD_BYTE_DATA;
+ break;
+ case CMD_WORD_DATA:
+ _data_format = BRA_SMB_CMD_WORD_DATA;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Active wait until ready */
+ for (count = 0; count <= SMBTIMEOUT; ++count) {
+ if (!(inb(HSTS) & BRA_INUSE_STS))
+ break;
+ udelay(SMB_POLL_UDELAY);
+ }
+
+ if (count > SMBTIMEOUT)
+ /* Reset the lock instead of failing */
+ outb(0xff, HSTS);
+
+ outb(0x00, HCNT);
+ outb((u8)(slave_addr & 0xfe) | operation, XMIT_SLVA);
+ outb(cmd, HCMD);
+ inb(HCNT);
+
+ if (operation == SMBUS_WRITE) {
+ outb((u8)data, HDAT0);
+ outb((u8)(data >> 8), HDAT1);
+ }
+
+ outb(BRA_START + _data_format, HCNT);
+
+ for (count = 0; count <= SMBTIMEOUT; count++) {
+ if (!(inb(HSTS) & BRA_HOST_BUSY))
+ break;
+ udelay(SMB_POLL_UDELAY);
+ }
+
+ if (count > SMBTIMEOUT) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ if (inb(HSTS) & BRA_HSTS_ERR_MASK) {
+ status = -EIO;
+ goto err;
+ }
+
+ if (operation == SMBUS_READ)
+ *result = ((inb(HDAT0) & 0xff) + ((inb(HDAT1) & 0xff) << 8));
+
+err:
+ outb(0xff, HSTS);
+ return status;
+}
+
+static int secocec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct secocec_data *cec = cec_get_drvdata(adap);
+ struct device *dev = cec->dev;
+ u16 val = 0;
+ int status;
+
+ if (enable) {
+ /* Clear the status register */
+ status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
+ if (status)
+ goto err;
+
+ status = smb_wr16(SECOCEC_STATUS_REG_1, val);
+ if (status)
+ goto err;
+
+ /* Enable the interrupts */
+ status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+ if (status)
+ goto err;
+
+ status = smb_wr16(SECOCEC_ENABLE_REG_1,
+ val | SECOCEC_ENABLE_REG_1_CEC);
+ if (status)
+ goto err;
+
+ dev_dbg(dev, "Device enabled");
+ } else {
+ /* Clear the status register */
+ status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
+ status = smb_wr16(SECOCEC_STATUS_REG_1, val);
+
+ /* Disable the interrupts */
+ status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+ status = smb_wr16(SECOCEC_ENABLE_REG_1, val &
+ ~SECOCEC_ENABLE_REG_1_CEC &
+ ~SECOCEC_ENABLE_REG_1_IR);
+
+ dev_dbg(dev, "Device disabled");
+ }
+
+ return 0;
+err:
+ return status;
+}
+
+static int secocec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ u16 enable_val = 0;
+ int status;
+
+ /* Disable device */
+ status = smb_rd16(SECOCEC_ENABLE_REG_1, &enable_val);
+ if (status)
+ return status;
+
+ status = smb_wr16(SECOCEC_ENABLE_REG_1,
+ enable_val & ~SECOCEC_ENABLE_REG_1_CEC);
+ if (status)
+ return status;
+
+ /* Write logical address
+ * NOTE: CEC_LOG_ADDR_INVALID is mapped to the 'Unregistered' LA
+ */
+ status = smb_wr16(SECOCEC_DEVICE_LA, logical_addr & 0xf);
+ if (status)
+ return status;
+
+ /* Re-enable device */
+ status = smb_wr16(SECOCEC_ENABLE_REG_1,
+ enable_val | SECOCEC_ENABLE_REG_1_CEC);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static int secocec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ u16 payload_len, payload_id_len, destination, val = 0;
+ u8 *payload_msg;
+ int status;
+ u8 i;
+
+ /* Device msg len already accounts for header */
+ payload_id_len = msg->len - 1;
+
+ /* Send data length */
+ status = smb_wr16(SECOCEC_WRITE_DATA_LENGTH, payload_id_len);
+ if (status)
+ goto err;
+
+ /* Send Operation ID if present */
+ if (payload_id_len > 0) {
+ status = smb_wr16(SECOCEC_WRITE_OPERATION_ID, msg->msg[1]);
+ if (status)
+ goto err;
+ }
+ /* Send data if present */
+ if (payload_id_len > 1) {
+ /* Only data; */
+ payload_len = msg->len - 2;
+ payload_msg = &msg->msg[2];
+
+ /* Copy message into registers */
+ for (i = 0; i < payload_len; i += 2) {
+ /* hi byte */
+ val = payload_msg[i + 1] << 8;
+
+ /* lo byte */
+ val |= payload_msg[i];
+
+ status = smb_wr16(SECOCEC_WRITE_DATA_00 + i / 2, val);
+ if (status)
+ goto err;
+ }
+ }
+ /* Send msg source/destination and fire msg */
+ destination = msg->msg[0];
+ status = smb_wr16(SECOCEC_WRITE_BYTE0, destination);
+ if (status)
+ goto err;
+
+ return 0;
+
+err:
+ return status;
+}
+
+static void secocec_tx_done(struct cec_adapter *adap, u16 status_val)
+{
+ if (status_val & SECOCEC_STATUS_TX_ERROR_MASK) {
+ if (status_val & SECOCEC_STATUS_TX_NACK_ERROR)
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK);
+ else
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_ERROR);
+ } else {
+ cec_transmit_attempt_done(adap, CEC_TX_STATUS_OK);
+ }
+
+ /* Reset status reg */
+ status_val = SECOCEC_STATUS_TX_ERROR_MASK |
+ SECOCEC_STATUS_MSG_SENT_MASK |
+ SECOCEC_STATUS_TX_NACK_ERROR;
+ smb_wr16(SECOCEC_STATUS, status_val);
+}
+
+static void secocec_rx_done(struct cec_adapter *adap, u16 status_val)
+{
+ struct secocec_data *cec = cec_get_drvdata(adap);
+ struct device *dev = cec->dev;
+ struct cec_msg msg = { };
+ bool flag_overflow = false;
+ u8 payload_len, i = 0;
+ u8 *payload_msg;
+ u16 val = 0;
+ int status;
+
+ if (status_val & SECOCEC_STATUS_RX_OVERFLOW_MASK) {
+ /* NOTE: Untested, it also might not be necessary */
+ dev_warn(dev, "Received more than 16 bytes. Discarding");
+ flag_overflow = true;
+ }
+
+ if (status_val & SECOCEC_STATUS_RX_ERROR_MASK) {
+ dev_warn(dev, "Message received with errors. Discarding");
+ status = -EIO;
+ goto rxerr;
+ }
+
+ /* Read message length */
+ status = smb_rd16(SECOCEC_READ_DATA_LENGTH, &val);
+ if (status)
+ return;
+
+ /* Device msg len already accounts for the header */
+ msg.len = min(val + 1, CEC_MAX_MSG_SIZE);
+
+ /* Read logical address */
+ status = smb_rd16(SECOCEC_READ_BYTE0, &val);
+ if (status)
+ return;
+
+ /* device stores source LA and destination */
+ msg.msg[0] = val;
+
+ /* Read operation ID */
+ status = smb_rd16(SECOCEC_READ_OPERATION_ID, &val);
+ if (status)
+ return;
+
+ msg.msg[1] = val;
+
+ /* Read data if present */
+ if (msg.len > 1) {
+ payload_len = msg.len - 2;
+ payload_msg = &msg.msg[2];
+
+ /* device stores 2 bytes in every 16-bit val */
+ for (i = 0; i < payload_len; i += 2) {
+ status = smb_rd16(SECOCEC_READ_DATA_00 + i / 2, &val);
+ if (status)
+ return;
+
+ /* low byte, skipping header */
+ payload_msg[i] = val & 0x00ff;
+
+ /* hi byte */
+ payload_msg[i + 1] = (val & 0xff00) >> 8;
+ }
+ }
+
+ cec_received_msg(cec->cec_adap, &msg);
+
+ /* Reset status reg */
+ status_val = SECOCEC_STATUS_MSG_RECEIVED_MASK;
+ if (flag_overflow)
+ status_val |= SECOCEC_STATUS_RX_OVERFLOW_MASK;
+
+ status = smb_wr16(SECOCEC_STATUS, status_val);
+
+ return;
+
+rxerr:
+ /* Reset error reg */
+ status_val = SECOCEC_STATUS_MSG_RECEIVED_MASK |
+ SECOCEC_STATUS_RX_ERROR_MASK;
+ if (flag_overflow)
+ status_val |= SECOCEC_STATUS_RX_OVERFLOW_MASK;
+ smb_wr16(SECOCEC_STATUS, status_val);
+}
+
+static const struct cec_adap_ops secocec_cec_adap_ops = {
+ /* Low-level callbacks */
+ .adap_enable = secocec_adap_enable,
+ .adap_log_addr = secocec_adap_log_addr,
+ .adap_transmit = secocec_adap_transmit,
+};
+
+#ifdef CONFIG_VIDEO_SECO_RC
+static int secocec_ir_probe(void *priv)
+{
+ struct secocec_data *cec = priv;
+ struct device *dev = cec->dev;
+ int status;
+ u16 val;
+
+ /* Prepare the RC input device */
+ cec->ir = devm_rc_allocate_device(dev, RC_DRIVER_SCANCODE);
+ if (!cec->ir)
+ return -ENOMEM;
+
+ snprintf(cec->ir_input_phys, sizeof(cec->ir_input_phys),
+ "%s/input0", dev_name(dev));
+
+ cec->ir->device_name = dev_name(dev);
+ cec->ir->input_phys = cec->ir_input_phys;
+ cec->ir->input_id.bustype = BUS_HOST;
+ cec->ir->input_id.vendor = 0;
+ cec->ir->input_id.product = 0;
+ cec->ir->input_id.version = 1;
+ cec->ir->driver_name = SECOCEC_DEV_NAME;
+ cec->ir->allowed_protocols = RC_PROTO_BIT_RC5;
+ cec->ir->priv = cec;
+ cec->ir->map_name = RC_MAP_HAUPPAUGE;
+ cec->ir->timeout = MS_TO_NS(100);
+
+ /* Clear the status register */
+ status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
+ if (status != 0)
+ goto err;
+
+ status = smb_wr16(SECOCEC_STATUS_REG_1, val);
+ if (status != 0)
+ goto err;
+
+ /* Enable the interrupts */
+ status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+ if (status != 0)
+ goto err;
+
+ status = smb_wr16(SECOCEC_ENABLE_REG_1,
+ val | SECOCEC_ENABLE_REG_1_IR);
+ if (status != 0)
+ goto err;
+
+ dev_dbg(dev, "IR enabled");
+
+ status = devm_rc_register_device(dev, cec->ir);
+
+ if (status) {
+ dev_err(dev, "Failed to prepare input device");
+ cec->ir = NULL;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+
+ smb_wr16(SECOCEC_ENABLE_REG_1,
+ val & ~SECOCEC_ENABLE_REG_1_IR);
+
+ dev_dbg(dev, "IR disabled");
+ return status;
+}
+
+static int secocec_ir_rx(struct secocec_data *priv)
+{
+ struct secocec_data *cec = priv;
+ struct device *dev = cec->dev;
+ u16 val, status, key, addr, toggle;
+
+ if (!cec->ir)
+ return -ENODEV;
+
+ status = smb_rd16(SECOCEC_IR_READ_DATA, &val);
+ if (status != 0)
+ goto err;
+
+ key = val & SECOCEC_IR_COMMAND_MASK;
+ addr = (val & SECOCEC_IR_ADDRESS_MASK) >> SECOCEC_IR_ADDRESS_SHL;
+ toggle = (val & SECOCEC_IR_TOGGLE_MASK) >> SECOCEC_IR_TOGGLE_SHL;
+
+ rc_keydown(cec->ir, RC_PROTO_RC5, RC_SCANCODE_RC5(addr, key), toggle);
+
+ dev_dbg(dev, "IR key pressed: 0x%02x addr 0x%02x toggle 0x%02x", key,
+ addr, toggle);
+
+ return 0;
+
+err:
+ dev_err(dev, "IR Receive message failed (%d)", status);
+ return -EIO;
+}
+#else
+static void secocec_ir_rx(struct secocec_data *priv)
+{
+}
+
+static int secocec_ir_probe(void *priv)
+{
+ return 0;
+}
+#endif
+
+static irqreturn_t secocec_irq_handler(int irq, void *priv)
+{
+ struct secocec_data *cec = priv;
+ struct device *dev = cec->dev;
+ u16 status_val, cec_val, val = 0;
+ int status;
+
+ /* Read status register */
+ status = smb_rd16(SECOCEC_STATUS_REG_1, &status_val);
+ if (status)
+ goto err;
+
+ if (status_val & SECOCEC_STATUS_REG_1_CEC) {
+ /* Read CEC status register */
+ status = smb_rd16(SECOCEC_STATUS, &cec_val);
+ if (status)
+ goto err;
+
+ if (cec_val & SECOCEC_STATUS_MSG_RECEIVED_MASK)
+ secocec_rx_done(cec->cec_adap, cec_val);
+
+ if (cec_val & SECOCEC_STATUS_MSG_SENT_MASK)
+ secocec_tx_done(cec->cec_adap, cec_val);
+
+ if ((~cec_val & SECOCEC_STATUS_MSG_SENT_MASK) &&
+ (~cec_val & SECOCEC_STATUS_MSG_RECEIVED_MASK))
+ dev_warn_once(dev,
+ "Message not received or sent, but interrupt fired");
+
+ val = SECOCEC_STATUS_REG_1_CEC;
+ }
+
+ if (status_val & SECOCEC_STATUS_REG_1_IR) {
+ val |= SECOCEC_STATUS_REG_1_IR;
+
+ secocec_ir_rx(cec);
+ }
+
+ /* Reset status register */
+ status = smb_wr16(SECOCEC_STATUS_REG_1, val);
+ if (status)
+ goto err;
+
+ return IRQ_HANDLED;
+
+err:
+ dev_err_once(dev, "IRQ: R/W SMBus operation failed (%d)", status);
+
+ /* Reset status register */
+ val = SECOCEC_STATUS_REG_1_CEC | SECOCEC_STATUS_REG_1_IR;
+ smb_wr16(SECOCEC_STATUS_REG_1, val);
+
+ return IRQ_HANDLED;
+}
+
+struct cec_dmi_match {
+ char *sys_vendor;
+ char *product_name;
+ char *devname;
+ char *conn;
+};
+
+static const struct cec_dmi_match secocec_dmi_match_table[] = {
+ /* UDOO X86 */
+ { "SECO", "UDOO x86", "0000:00:02.0", "Port B" },
+};
+
+static int secocec_cec_get_notifier(struct cec_notifier **notify)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(secocec_dmi_match_table) ; ++i) {
+ const struct cec_dmi_match *m = &secocec_dmi_match_table[i];
+
+ if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) &&
+ dmi_match(DMI_PRODUCT_NAME, m->product_name)) {
+ struct device *d;
+
+ /* Find the device, bail out if not yet registered */
+ d = bus_find_device_by_name(&pci_bus_type, NULL,
+ m->devname);
+ if (!d)
+ return -EPROBE_DEFER;
+
+ *notify = cec_notifier_get_conn(d, m->conn);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int secocec_acpi_probe(struct secocec_data *sdev)
+{
+ struct device *dev = sdev->dev;
+ struct gpio_desc *gpio;
+ int irq = 0;
+
+ gpio = devm_gpiod_get(dev, NULL, GPIOF_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(dev, "Cannot request interrupt gpio");
+ return PTR_ERR(gpio);
+ }
+
+ irq = gpiod_to_irq(gpio);
+ if (irq < 0) {
+ dev_err(dev, "Cannot find valid irq");
+ return -ENODEV;
+ }
+ dev_dbg(dev, "irq-gpio is bound to IRQ %d", irq);
+
+ sdev->irq = irq;
+
+ return 0;
+}
+
+static int secocec_probe(struct platform_device *pdev)
+{
+ struct secocec_data *secocec;
+ struct device *dev = &pdev->dev;
+ int ret;
+ u16 val;
+
+ secocec = devm_kzalloc(dev, sizeof(*secocec), GFP_KERNEL);
+ if (!secocec)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, secocec);
+
+ /* Request SMBus regions */
+ if (!request_muxed_region(BRA_SMB_BASE_ADDR, 7, "CEC00001")) {
+ dev_err(dev, "Request memory region failed");
+ return -ENXIO;
+ }
+
+ secocec->pdev = pdev;
+ secocec->dev = dev;
+
+ if (!has_acpi_companion(dev)) {
+ dev_dbg(dev, "Cannot find any ACPI companion");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = secocec_acpi_probe(secocec);
+ if (ret) {
+ dev_err(dev, "Cannot assign gpio to IRQ");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ /* Firmware version check */
+ ret = smb_rd16(SECOCEC_VERSION, &val);
+ if (ret) {
+ dev_err(dev, "Cannot check fw version");
+ goto err;
+ }
+ if (val < SECOCEC_LATEST_FW) {
+ dev_err(dev, "CEC Firmware not supported (v.%04x). Use ver > v.%04x",
+ val, SECOCEC_LATEST_FW);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = secocec_cec_get_notifier(&secocec->notifier);
+ if (ret) {
+ dev_err(dev, "no CEC notifier available\n");
+ goto err;
+ }
+
+ ret = devm_request_threaded_irq(dev,
+ secocec->irq,
+ NULL,
+ secocec_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ dev_name(&pdev->dev), secocec);
+
+ if (ret) {
+ dev_err(dev, "Cannot request IRQ %d", secocec->irq);
+ ret = -EIO;
+ goto err;
+ }
+
+ /* Allocate CEC adapter */
+ secocec->cec_adap = cec_allocate_adapter(&secocec_cec_adap_ops,
+ secocec,
+ dev_name(dev),
+ CEC_CAP_DEFAULTS,
+ SECOCEC_MAX_ADDRS);
+
+ if (IS_ERR(secocec->cec_adap)) {
+ ret = PTR_ERR(secocec->cec_adap);
+ goto err;
+ }
+
+ ret = cec_register_adapter(secocec->cec_adap, dev);
+ if (ret)
+ goto err_delete_adapter;
+
+ if (secocec->notifier)
+ cec_register_cec_notifier(secocec->cec_adap, secocec->notifier);
+
+ ret = secocec_ir_probe(secocec);
+ if (ret)
+ goto err_delete_adapter;
+
+ platform_set_drvdata(pdev, secocec);
+
+ dev_dbg(dev, "Device registered");
+
+ return ret;
+
+err_delete_adapter:
+ cec_delete_adapter(secocec->cec_adap);
+err:
+ dev_err(dev, "%s device probe failed\n", dev_name(dev));
+
+ return ret;
+}
+
+static int secocec_remove(struct platform_device *pdev)
+{
+ struct secocec_data *secocec = platform_get_drvdata(pdev);
+ u16 val;
+
+ if (secocec->ir) {
+ smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+
+ smb_wr16(SECOCEC_ENABLE_REG_1, val & ~SECOCEC_ENABLE_REG_1_IR);
+
+ dev_dbg(&pdev->dev, "IR disabled");
+ }
+ cec_unregister_adapter(secocec->cec_adap);
+
+ if (secocec->notifier)
+ cec_notifier_put(secocec->notifier);
+
+ release_region(BRA_SMB_BASE_ADDR, 7);
+
+ dev_dbg(&pdev->dev, "CEC device removed");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int secocec_suspend(struct device *dev)
+{
+ int status;
+ u16 val;
+
+ dev_dbg(dev, "Device going to suspend, disabling");
+
+ /* Clear the status register */
+ status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
+ if (status)
+ goto err;
+
+ status = smb_wr16(SECOCEC_STATUS_REG_1, val);
+ if (status)
+ goto err;
+
+ /* Disable the interrupts */
+ status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+ if (status)
+ goto err;
+
+ status = smb_wr16(SECOCEC_ENABLE_REG_1, val &
+ ~SECOCEC_ENABLE_REG_1_CEC & ~SECOCEC_ENABLE_REG_1_IR);
+ if (status)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(dev, "Suspend failed (err: %d)", status);
+ return status;
+}
+
+static int secocec_resume(struct device *dev)
+{
+ int status;
+ u16 val;
+
+ dev_dbg(dev, "Resuming device from suspend");
+
+ /* Clear the status register */
+ status = smb_rd16(SECOCEC_STATUS_REG_1, &val);
+ if (status)
+ goto err;
+
+ status = smb_wr16(SECOCEC_STATUS_REG_1, val);
+ if (status)
+ goto err;
+
+ /* Enable the interrupts */
+ status = smb_rd16(SECOCEC_ENABLE_REG_1, &val);
+ if (status)
+ goto err;
+
+ status = smb_wr16(SECOCEC_ENABLE_REG_1, val | SECOCEC_ENABLE_REG_1_CEC);
+ if (status)
+ goto err;
+
+ dev_dbg(dev, "Device resumed from suspend");
+
+ return 0;
+
+err:
+ dev_err(dev, "Resume failed (err: %d)", status);
+ return status;
+}
+
+static SIMPLE_DEV_PM_OPS(secocec_pm_ops, secocec_suspend, secocec_resume);
+#define SECOCEC_PM_OPS (&secocec_pm_ops)
+#else
+#define SECOCEC_PM_OPS NULL
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id secocec_acpi_match[] = {
+ {"CEC00001", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(acpi, secocec_acpi_match);
+#endif
+
+static struct platform_driver secocec_driver = {
+ .driver = {
+ .name = SECOCEC_DEV_NAME,
+ .acpi_match_table = ACPI_PTR(secocec_acpi_match),
+ .pm = SECOCEC_PM_OPS,
+ },
+ .probe = secocec_probe,
+ .remove = secocec_remove,
+};
+
+module_platform_driver(secocec_driver);
+
+MODULE_DESCRIPTION("SECO CEC X86 Driver");
+MODULE_AUTHOR("Ettore Chimenti <ek5.chimenti@gmail.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/media/platform/seco-cec/seco-cec.h b/drivers/media/platform/seco-cec/seco-cec.h
new file mode 100644
index 000000000000..e632c4a2a044
--- /dev/null
+++ b/drivers/media/platform/seco-cec/seco-cec.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/*
+ * SECO X86 Boards CEC register defines
+ *
+ * Author: Ettore Chimenti <ek5.chimenti@gmail.com>
+ * Copyright (C) 2018, SECO Spa.
+ * Copyright (C) 2018, Aidilab Srl.
+ */
+
+#ifndef __SECO_CEC_H__
+#define __SECO_CEC_H__
+
+#define SECOCEC_MAX_ADDRS 1
+#define SECOCEC_DEV_NAME "secocec"
+#define SECOCEC_LATEST_FW 0x0f0b
+
+#define SMBTIMEOUT 0xfff
+#define SMB_POLL_UDELAY 10
+
+#define SMBUS_WRITE 0
+#define SMBUS_READ 1
+
+#define CMD_BYTE_DATA 0
+#define CMD_WORD_DATA 1
+
+/*
+ * SMBus definitons for Braswell
+ */
+
+#define BRA_DONE_STATUS BIT(7)
+#define BRA_INUSE_STS BIT(6)
+#define BRA_FAILED_OP BIT(4)
+#define BRA_BUS_ERR BIT(3)
+#define BRA_DEV_ERR BIT(2)
+#define BRA_INTR BIT(1)
+#define BRA_HOST_BUSY BIT(0)
+#define BRA_HSTS_ERR_MASK (BRA_FAILED_OP | BRA_BUS_ERR | BRA_DEV_ERR)
+
+#define BRA_PEC_EN BIT(7)
+#define BRA_START BIT(6)
+#define BRA_LAST__BYTE BIT(5)
+#define BRA_INTREN BIT(0)
+#define BRA_SMB_CMD (7 << 2)
+#define BRA_SMB_CMD_QUICK (0 << 2)
+#define BRA_SMB_CMD_BYTE (1 << 2)
+#define BRA_SMB_CMD_BYTE_DATA (2 << 2)
+#define BRA_SMB_CMD_WORD_DATA (3 << 2)
+#define BRA_SMB_CMD_PROCESS_CALL (4 << 2)
+#define BRA_SMB_CMD_BLOCK (5 << 2)
+#define BRA_SMB_CMD_I2CREAD (6 << 2)
+#define BRA_SMB_CMD_BLOCK_PROCESS (7 << 2)
+
+#define BRA_SMB_BASE_ADDR 0x2040
+#define HSTS (BRA_SMB_BASE_ADDR + 0)
+#define HCNT (BRA_SMB_BASE_ADDR + 2)
+#define HCMD (BRA_SMB_BASE_ADDR + 3)
+#define XMIT_SLVA (BRA_SMB_BASE_ADDR + 4)
+#define HDAT0 (BRA_SMB_BASE_ADDR + 5)
+#define HDAT1 (BRA_SMB_BASE_ADDR + 6)
+
+/*
+ * Microcontroller Address
+ */
+
+#define SECOCEC_MICRO_ADDRESS 0x40
+
+/*
+ * STM32 SMBus Registers
+ */
+
+#define SECOCEC_VERSION 0x00
+#define SECOCEC_ENABLE_REG_1 0x01
+#define SECOCEC_ENABLE_REG_2 0x02
+#define SECOCEC_STATUS_REG_1 0x03
+#define SECOCEC_STATUS_REG_2 0x04
+
+#define SECOCEC_STATUS 0x28
+#define SECOCEC_DEVICE_LA 0x29
+#define SECOCEC_READ_OPERATION_ID 0x2a
+#define SECOCEC_READ_DATA_LENGTH 0x2b
+#define SECOCEC_READ_DATA_00 0x2c
+#define SECOCEC_READ_DATA_02 0x2d
+#define SECOCEC_READ_DATA_04 0x2e
+#define SECOCEC_READ_DATA_06 0x2f
+#define SECOCEC_READ_DATA_08 0x30
+#define SECOCEC_READ_DATA_10 0x31
+#define SECOCEC_READ_DATA_12 0x32
+#define SECOCEC_READ_BYTE0 0x33
+#define SECOCEC_WRITE_OPERATION_ID 0x34
+#define SECOCEC_WRITE_DATA_LENGTH 0x35
+#define SECOCEC_WRITE_DATA_00 0x36
+#define SECOCEC_WRITE_DATA_02 0x37
+#define SECOCEC_WRITE_DATA_04 0x38
+#define SECOCEC_WRITE_DATA_06 0x39
+#define SECOCEC_WRITE_DATA_08 0x3a
+#define SECOCEC_WRITE_DATA_10 0x3b
+#define SECOCEC_WRITE_DATA_12 0x3c
+#define SECOCEC_WRITE_BYTE0 0x3d
+
+#define SECOCEC_IR_READ_DATA 0x3e
+
+/*
+ * IR
+ */
+
+#define SECOCEC_IR_COMMAND_MASK 0x007F
+#define SECOCEC_IR_COMMAND_SHL 0
+#define SECOCEC_IR_ADDRESS_MASK 0x1F00
+#define SECOCEC_IR_ADDRESS_SHL 7
+#define SECOCEC_IR_TOGGLE_MASK 0x8000
+#define SECOCEC_IR_TOGGLE_SHL 15
+
+/*
+ * Enabling register
+ */
+
+#define SECOCEC_ENABLE_REG_1_CEC 0x1000
+#define SECOCEC_ENABLE_REG_1_IR 0x2000
+#define SECOCEC_ENABLE_REG_1_IR_PASSTHROUGH 0x4000
+
+/*
+ * Status register
+ */
+
+#define SECOCEC_STATUS_REG_1_CEC SECOCEC_ENABLE_REG_1_CEC
+#define SECOCEC_STATUS_REG_1_IR SECOCEC_ENABLE_REG_1_IR
+#define SECOCEC_STATUS_REG_1_IR_PASSTHR SECOCEC_ENABLE_REG_1_IR_PASSTHR
+
+/*
+ * Status data
+ */
+
+#define SECOCEC_STATUS_MSG_RECEIVED_MASK BIT(0)
+#define SECOCEC_STATUS_RX_ERROR_MASK BIT(1)
+#define SECOCEC_STATUS_MSG_SENT_MASK BIT(2)
+#define SECOCEC_STATUS_TX_ERROR_MASK BIT(3)
+
+#define SECOCEC_STATUS_TX_NACK_ERROR BIT(4)
+#define SECOCEC_STATUS_RX_OVERFLOW_MASK BIT(5)
+
+#endif /* __SECO_CEC_H__ */
diff --git a/drivers/media/platform/sh_vou.c b/drivers/media/platform/sh_vou.c
index cee58b125548..5799aa4b9323 100644
--- a/drivers/media/platform/sh_vou.c
+++ b/drivers/media/platform/sh_vou.c
@@ -1007,7 +1007,7 @@ static int sh_vou_s_selection(struct file *file, void *fh,
/*
* No down-scaling. According to the API, current call has precedence:
- * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two.
+ * https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/crop.html#cropping-structures
*/
vou_adjust_input(&geo, vou_dev->std);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-hw.c b/drivers/media/platform/sti/bdisp/bdisp-hw.c
index 26d9fa7aeb5f..4372abbb5950 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-hw.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-hw.c
@@ -510,7 +510,7 @@ int bdisp_hw_alloc_filters(struct device *dev)
/* Allocate all the filters within a single memory page */
size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL,
DMA_ATTR_WRITE_COMBINE);
if (!base)
return -ENOMEM;
diff --git a/drivers/media/platform/sunxi/sun6i-csi/Kconfig b/drivers/media/platform/sunxi/sun6i-csi/Kconfig
new file mode 100644
index 000000000000..018e3ec788c0
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_SUN6I_CSI
+ tristate "Allwinner V3s Camera Sensor Interface driver"
+ depends on VIDEO_V4L2 && COMMON_CLK && VIDEO_V4L2_SUBDEV_API && HAS_DMA
+ depends on ARCH_SUNXI || COMPILE_TEST
+ select VIDEOBUF2_DMA_CONTIG
+ select REGMAP_MMIO
+ select V4L2_FWNODE
+ help
+ Support for the Allwinner Camera Sensor Interface Controller on V3s.
diff --git a/drivers/media/platform/sunxi/sun6i-csi/Makefile b/drivers/media/platform/sunxi/sun6i-csi/Makefile
new file mode 100644
index 000000000000..213cb6be9e9c
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/Makefile
@@ -0,0 +1,3 @@
+sun6i-csi-y += sun6i_video.o sun6i_csi.o
+
+obj-$(CONFIG_VIDEO_SUN6I_CSI) += sun6i-csi.o
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
new file mode 100644
index 000000000000..6950585edb5a
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -0,0 +1,913 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing)
+ * All rights reserved.
+ * Author: Yong Deng <yong.deng@magewell.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "sun6i_csi.h"
+#include "sun6i_csi_reg.h"
+
+#define MODULE_NAME "sun6i-csi"
+
+struct sun6i_csi_dev {
+ struct sun6i_csi csi;
+ struct device *dev;
+
+ struct regmap *regmap;
+ struct clk *clk_mod;
+ struct clk *clk_ram;
+ struct reset_control *rstc_bus;
+
+ int planar_offset[3];
+};
+
+static inline struct sun6i_csi_dev *sun6i_csi_to_dev(struct sun6i_csi *csi)
+{
+ return container_of(csi, struct sun6i_csi_dev, csi);
+}
+
+/* TODO add 10&12 bit YUV, RGB support */
+bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
+ u32 pixformat, u32 mbus_code)
+{
+ struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+
+ /*
+ * Some video receivers have the ability to be compatible with
+ * 8bit and 16bit bus width.
+ * Identify the media bus format from device tree.
+ */
+ if ((sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_PARALLEL
+ || sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656)
+ && sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) {
+ switch (pixformat) {
+ case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ return true;
+ default:
+ dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n",
+ mbus_code);
+ break;
+ }
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n",
+ pixformat);
+ break;
+ }
+ return false;
+ }
+
+ switch (pixformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ return (mbus_code == MEDIA_BUS_FMT_SBGGR8_1X8);
+ case V4L2_PIX_FMT_SGBRG8:
+ return (mbus_code == MEDIA_BUS_FMT_SGBRG8_1X8);
+ case V4L2_PIX_FMT_SGRBG8:
+ return (mbus_code == MEDIA_BUS_FMT_SGRBG8_1X8);
+ case V4L2_PIX_FMT_SRGGB8:
+ return (mbus_code == MEDIA_BUS_FMT_SRGGB8_1X8);
+ case V4L2_PIX_FMT_SBGGR10:
+ return (mbus_code == MEDIA_BUS_FMT_SBGGR10_1X10);
+ case V4L2_PIX_FMT_SGBRG10:
+ return (mbus_code == MEDIA_BUS_FMT_SGBRG10_1X10);
+ case V4L2_PIX_FMT_SGRBG10:
+ return (mbus_code == MEDIA_BUS_FMT_SGRBG10_1X10);
+ case V4L2_PIX_FMT_SRGGB10:
+ return (mbus_code == MEDIA_BUS_FMT_SRGGB10_1X10);
+ case V4L2_PIX_FMT_SBGGR12:
+ return (mbus_code == MEDIA_BUS_FMT_SBGGR12_1X12);
+ case V4L2_PIX_FMT_SGBRG12:
+ return (mbus_code == MEDIA_BUS_FMT_SGBRG12_1X12);
+ case V4L2_PIX_FMT_SGRBG12:
+ return (mbus_code == MEDIA_BUS_FMT_SGRBG12_1X12);
+ case V4L2_PIX_FMT_SRGGB12:
+ return (mbus_code == MEDIA_BUS_FMT_SRGGB12_1X12);
+
+ case V4L2_PIX_FMT_YUYV:
+ return (mbus_code == MEDIA_BUS_FMT_YUYV8_2X8);
+ case V4L2_PIX_FMT_YVYU:
+ return (mbus_code == MEDIA_BUS_FMT_YVYU8_2X8);
+ case V4L2_PIX_FMT_UYVY:
+ return (mbus_code == MEDIA_BUS_FMT_UYVY8_2X8);
+ case V4L2_PIX_FMT_VYUY:
+ return (mbus_code == MEDIA_BUS_FMT_VYUY8_2X8);
+
+ case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ case V4L2_PIX_FMT_YUV422P:
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ return true;
+ default:
+ dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n",
+ mbus_code);
+ break;
+ }
+ break;
+ default:
+ dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
+ break;
+ }
+
+ return false;
+}
+
+int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable)
+{
+ struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+ struct regmap *regmap = sdev->regmap;
+ int ret;
+
+ if (!enable) {
+ regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0);
+
+ clk_disable_unprepare(sdev->clk_ram);
+ clk_disable_unprepare(sdev->clk_mod);
+ reset_control_assert(sdev->rstc_bus);
+ return 0;
+ }
+
+ ret = clk_prepare_enable(sdev->clk_mod);
+ if (ret) {
+ dev_err(sdev->dev, "Enable csi clk err %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(sdev->clk_ram);
+ if (ret) {
+ dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret);
+ goto clk_mod_disable;
+ }
+
+ ret = reset_control_deassert(sdev->rstc_bus);
+ if (ret) {
+ dev_err(sdev->dev, "reset err %d\n", ret);
+ goto clk_ram_disable;
+ }
+
+ regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, CSI_EN_CSI_EN);
+
+ return 0;
+
+clk_ram_disable:
+ clk_disable_unprepare(sdev->clk_ram);
+clk_mod_disable:
+ clk_disable_unprepare(sdev->clk_mod);
+ return ret;
+}
+
+static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev,
+ u32 mbus_code, u32 pixformat)
+{
+ /* bayer */
+ if ((mbus_code & 0xF000) == 0x3000)
+ return CSI_INPUT_FORMAT_RAW;
+
+ switch (pixformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ return CSI_INPUT_FORMAT_RAW;
+ default:
+ break;
+ }
+
+ /* not support YUV420 input format yet */
+ dev_dbg(sdev->dev, "Select YUV422 as default input format of CSI.\n");
+ return CSI_INPUT_FORMAT_YUV422;
+}
+
+static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
+ u32 pixformat, u32 field)
+{
+ bool buf_interlaced = false;
+
+ if (field == V4L2_FIELD_INTERLACED
+ || field == V4L2_FIELD_INTERLACED_TB
+ || field == V4L2_FIELD_INTERLACED_BT)
+ buf_interlaced = true;
+
+ switch (pixformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ return buf_interlaced ? CSI_FRAME_RAW_10 : CSI_FIELD_RAW_10;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ return buf_interlaced ? CSI_FRAME_RAW_12 : CSI_FIELD_RAW_12;
+
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
+
+ case V4L2_PIX_FMT_HM12:
+ return buf_interlaced ? CSI_FRAME_MB_YUV420 :
+ CSI_FIELD_MB_YUV420;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ return buf_interlaced ? CSI_FRAME_UV_CB_YUV420 :
+ CSI_FIELD_UV_CB_YUV420;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ return buf_interlaced ? CSI_FRAME_PLANAR_YUV420 :
+ CSI_FIELD_PLANAR_YUV420;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ return buf_interlaced ? CSI_FRAME_UV_CB_YUV422 :
+ CSI_FIELD_UV_CB_YUV422;
+ case V4L2_PIX_FMT_YUV422P:
+ return buf_interlaced ? CSI_FRAME_PLANAR_YUV422 :
+ CSI_FIELD_PLANAR_YUV422;
+ default:
+ dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat);
+ break;
+ }
+
+ return CSI_FIELD_RAW_8;
+}
+
+static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
+ u32 mbus_code, u32 pixformat)
+{
+ switch (pixformat) {
+ case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YUV422P:
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ return CSI_INPUT_SEQ_UYVY;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ return CSI_INPUT_SEQ_VYUY;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ return CSI_INPUT_SEQ_YUYV;
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ return CSI_INPUT_SEQ_YVYU;
+ default:
+ dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n",
+ mbus_code);
+ break;
+ }
+ break;
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_YVU420:
+ switch (mbus_code) {
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ return CSI_INPUT_SEQ_VYUY;
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ return CSI_INPUT_SEQ_UYVY;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ return CSI_INPUT_SEQ_YVYU;
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ return CSI_INPUT_SEQ_YUYV;
+ default:
+ dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n",
+ mbus_code);
+ break;
+ }
+ break;
+
+ case V4L2_PIX_FMT_YUYV:
+ return CSI_INPUT_SEQ_YUYV;
+
+ default:
+ dev_warn(sdev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n",
+ pixformat);
+ break;
+ }
+
+ return CSI_INPUT_SEQ_YUYV;
+}
+
+static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev)
+{
+ struct v4l2_fwnode_endpoint *endpoint = &sdev->csi.v4l2_ep;
+ struct sun6i_csi *csi = &sdev->csi;
+ unsigned char bus_width;
+ u32 flags;
+ u32 cfg;
+ bool input_interlaced = false;
+
+ if (csi->config.field == V4L2_FIELD_INTERLACED
+ || csi->config.field == V4L2_FIELD_INTERLACED_TB
+ || csi->config.field == V4L2_FIELD_INTERLACED_BT)
+ input_interlaced = true;
+
+ bus_width = endpoint->bus.parallel.bus_width;
+
+ regmap_read(sdev->regmap, CSI_IF_CFG_REG, &cfg);
+
+ cfg &= ~(CSI_IF_CFG_CSI_IF_MASK | CSI_IF_CFG_MIPI_IF_MASK |
+ CSI_IF_CFG_IF_DATA_WIDTH_MASK |
+ CSI_IF_CFG_CLK_POL_MASK | CSI_IF_CFG_VREF_POL_MASK |
+ CSI_IF_CFG_HREF_POL_MASK | CSI_IF_CFG_FIELD_MASK |
+ CSI_IF_CFG_SRC_TYPE_MASK);
+
+ if (input_interlaced)
+ cfg |= CSI_IF_CFG_SRC_TYPE_INTERLACED;
+ else
+ cfg |= CSI_IF_CFG_SRC_TYPE_PROGRESSED;
+
+ switch (endpoint->bus_type) {
+ case V4L2_MBUS_PARALLEL:
+ cfg |= CSI_IF_CFG_MIPI_IF_CSI;
+
+ flags = endpoint->bus.parallel.flags;
+
+ cfg |= (bus_width == 16) ? CSI_IF_CFG_CSI_IF_YUV422_16BIT :
+ CSI_IF_CFG_CSI_IF_YUV422_INTLV;
+
+ if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= CSI_IF_CFG_FIELD_POSITIVE;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
+ cfg |= CSI_IF_CFG_VREF_POL_POSITIVE;
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
+ cfg |= CSI_IF_CFG_HREF_POL_POSITIVE;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ cfg |= CSI_IF_CFG_CLK_POL_FALLING_EDGE;
+ break;
+ case V4L2_MBUS_BT656:
+ cfg |= CSI_IF_CFG_MIPI_IF_CSI;
+
+ flags = endpoint->bus.parallel.flags;
+
+ cfg |= (bus_width == 16) ? CSI_IF_CFG_CSI_IF_BT1120 :
+ CSI_IF_CFG_CSI_IF_BT656;
+
+ if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
+ cfg |= CSI_IF_CFG_FIELD_POSITIVE;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ cfg |= CSI_IF_CFG_CLK_POL_FALLING_EDGE;
+ break;
+ default:
+ dev_warn(sdev->dev, "Unsupported bus type: %d\n",
+ endpoint->bus_type);
+ break;
+ }
+
+ switch (bus_width) {
+ case 8:
+ cfg |= CSI_IF_CFG_IF_DATA_WIDTH_8BIT;
+ break;
+ case 10:
+ cfg |= CSI_IF_CFG_IF_DATA_WIDTH_10BIT;
+ break;
+ case 12:
+ cfg |= CSI_IF_CFG_IF_DATA_WIDTH_12BIT;
+ break;
+ case 16: /* No need to configure DATA_WIDTH for 16bit */
+ break;
+ default:
+ dev_warn(sdev->dev, "Unsupported bus width: %u\n", bus_width);
+ break;
+ }
+
+ regmap_write(sdev->regmap, CSI_IF_CFG_REG, cfg);
+}
+
+static void sun6i_csi_set_format(struct sun6i_csi_dev *sdev)
+{
+ struct sun6i_csi *csi = &sdev->csi;
+ u32 cfg;
+ u32 val;
+
+ regmap_read(sdev->regmap, CSI_CH_CFG_REG, &cfg);
+
+ cfg &= ~(CSI_CH_CFG_INPUT_FMT_MASK |
+ CSI_CH_CFG_OUTPUT_FMT_MASK | CSI_CH_CFG_VFLIP_EN |
+ CSI_CH_CFG_HFLIP_EN | CSI_CH_CFG_FIELD_SEL_MASK |
+ CSI_CH_CFG_INPUT_SEQ_MASK);
+
+ val = get_csi_input_format(sdev, csi->config.code,
+ csi->config.pixelformat);
+ cfg |= CSI_CH_CFG_INPUT_FMT(val);
+
+ val = get_csi_output_format(sdev, csi->config.pixelformat,
+ csi->config.field);
+ cfg |= CSI_CH_CFG_OUTPUT_FMT(val);
+
+ val = get_csi_input_seq(sdev, csi->config.code,
+ csi->config.pixelformat);
+ cfg |= CSI_CH_CFG_INPUT_SEQ(val);
+
+ if (csi->config.field == V4L2_FIELD_TOP)
+ cfg |= CSI_CH_CFG_FIELD_SEL_FIELD0;
+ else if (csi->config.field == V4L2_FIELD_BOTTOM)
+ cfg |= CSI_CH_CFG_FIELD_SEL_FIELD1;
+ else
+ cfg |= CSI_CH_CFG_FIELD_SEL_BOTH;
+
+ regmap_write(sdev->regmap, CSI_CH_CFG_REG, cfg);
+}
+
+static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
+{
+ struct sun6i_csi_config *config = &sdev->csi.config;
+ u32 bytesperline_y;
+ u32 bytesperline_c;
+ int *planar_offset = sdev->planar_offset;
+ u32 width = config->width;
+ u32 height = config->height;
+ u32 hor_len = width;
+
+ switch (config->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ dev_dbg(sdev->dev,
+ "Horizontal length should be 2 times of width for packed YUV formats!\n");
+ hor_len = width * 2;
+ break;
+ default:
+ break;
+ }
+
+ regmap_write(sdev->regmap, CSI_CH_HSIZE_REG,
+ CSI_CH_HSIZE_HOR_LEN(hor_len) |
+ CSI_CH_HSIZE_HOR_START(0));
+ regmap_write(sdev->regmap, CSI_CH_VSIZE_REG,
+ CSI_CH_VSIZE_VER_LEN(height) |
+ CSI_CH_VSIZE_VER_START(0));
+
+ planar_offset[0] = 0;
+ switch (config->pixelformat) {
+ case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ bytesperline_y = width;
+ bytesperline_c = width;
+ planar_offset[1] = bytesperline_y * height;
+ planar_offset[2] = -1;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ bytesperline_y = width;
+ bytesperline_c = width / 2;
+ planar_offset[1] = bytesperline_y * height;
+ planar_offset[2] = planar_offset[1] +
+ bytesperline_c * height / 2;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ bytesperline_y = width;
+ bytesperline_c = width / 2;
+ planar_offset[1] = bytesperline_y * height;
+ planar_offset[2] = planar_offset[1] +
+ bytesperline_c * height;
+ break;
+ default: /* raw */
+ dev_dbg(sdev->dev,
+ "Calculating pixelformat(0x%x)'s bytesperline as a packed format\n",
+ config->pixelformat);
+ bytesperline_y = (sun6i_csi_get_bpp(config->pixelformat) *
+ config->width) / 8;
+ bytesperline_c = 0;
+ planar_offset[1] = -1;
+ planar_offset[2] = -1;
+ break;
+ }
+
+ regmap_write(sdev->regmap, CSI_CH_BUF_LEN_REG,
+ CSI_CH_BUF_LEN_BUF_LEN_C(bytesperline_c) |
+ CSI_CH_BUF_LEN_BUF_LEN_Y(bytesperline_y));
+}
+
+int sun6i_csi_update_config(struct sun6i_csi *csi,
+ struct sun6i_csi_config *config)
+{
+ struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+
+ if (!config)
+ return -EINVAL;
+
+ memcpy(&csi->config, config, sizeof(csi->config));
+
+ sun6i_csi_setup_bus(sdev);
+ sun6i_csi_set_format(sdev);
+ sun6i_csi_set_window(sdev);
+
+ return 0;
+}
+
+void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr)
+{
+ struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+
+ regmap_write(sdev->regmap, CSI_CH_F0_BUFA_REG,
+ (addr + sdev->planar_offset[0]) >> 2);
+ if (sdev->planar_offset[1] != -1)
+ regmap_write(sdev->regmap, CSI_CH_F1_BUFA_REG,
+ (addr + sdev->planar_offset[1]) >> 2);
+ if (sdev->planar_offset[2] != -1)
+ regmap_write(sdev->regmap, CSI_CH_F2_BUFA_REG,
+ (addr + sdev->planar_offset[2]) >> 2);
+}
+
+void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable)
+{
+ struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi);
+ struct regmap *regmap = sdev->regmap;
+
+ if (!enable) {
+ regmap_update_bits(regmap, CSI_CAP_REG, CSI_CAP_CH0_VCAP_ON, 0);
+ regmap_write(regmap, CSI_CH_INT_EN_REG, 0);
+ return;
+ }
+
+ regmap_write(regmap, CSI_CH_INT_STA_REG, 0xFF);
+ regmap_write(regmap, CSI_CH_INT_EN_REG,
+ CSI_CH_INT_EN_HB_OF_INT_EN |
+ CSI_CH_INT_EN_FIFO2_OF_INT_EN |
+ CSI_CH_INT_EN_FIFO1_OF_INT_EN |
+ CSI_CH_INT_EN_FIFO0_OF_INT_EN |
+ CSI_CH_INT_EN_FD_INT_EN |
+ CSI_CH_INT_EN_CD_INT_EN);
+
+ regmap_update_bits(regmap, CSI_CAP_REG, CSI_CAP_CH0_VCAP_ON,
+ CSI_CAP_CH0_VCAP_ON);
+}
+
+/* -----------------------------------------------------------------------------
+ * Media Controller and V4L2
+ */
+static int sun6i_csi_link_entity(struct sun6i_csi *csi,
+ struct media_entity *entity,
+ struct fwnode_handle *fwnode)
+{
+ struct media_entity *sink;
+ struct media_pad *sink_pad;
+ int src_pad_index;
+ int ret;
+
+ ret = media_entity_get_fwnode_pad(entity, fwnode, MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(csi->dev, "%s: no source pad in external entity %s\n",
+ __func__, entity->name);
+ return -EINVAL;
+ }
+
+ src_pad_index = ret;
+
+ sink = &csi->video.vdev.entity;
+ sink_pad = &csi->video.pad;
+
+ dev_dbg(csi->dev, "creating %s:%u -> %s:%u link\n",
+ entity->name, src_pad_index, sink->name, sink_pad->index);
+ ret = media_create_pad_link(entity, src_pad_index, sink,
+ sink_pad->index,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0) {
+ dev_err(csi->dev, "failed to create %s:%u -> %s:%u link\n",
+ entity->name, src_pad_index,
+ sink->name, sink_pad->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sun6i_subdev_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct sun6i_csi *csi = container_of(notifier, struct sun6i_csi,
+ notifier);
+ struct v4l2_device *v4l2_dev = &csi->v4l2_dev;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ dev_dbg(csi->dev, "notify complete, all subdevs registered\n");
+
+ sd = list_first_entry(&v4l2_dev->subdevs, struct v4l2_subdev, list);
+ if (!sd)
+ return -EINVAL;
+
+ ret = sun6i_csi_link_entity(csi, &sd->entity, sd->fwnode);
+ if (ret < 0)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ return media_device_register(&csi->media_dev);
+}
+
+static const struct v4l2_async_notifier_operations sun6i_csi_async_ops = {
+ .complete = sun6i_subdev_notify_complete,
+};
+
+static int sun6i_csi_fwnode_parse(struct device *dev,
+ struct v4l2_fwnode_endpoint *vep,
+ struct v4l2_async_subdev *asd)
+{
+ struct sun6i_csi *csi = dev_get_drvdata(dev);
+
+ if (vep->base.port || vep->base.id) {
+ dev_warn(dev, "Only support a single port with one endpoint\n");
+ return -ENOTCONN;
+ }
+
+ switch (vep->bus_type) {
+ case V4L2_MBUS_PARALLEL:
+ case V4L2_MBUS_BT656:
+ csi->v4l2_ep = *vep;
+ return 0;
+ default:
+ dev_err(dev, "Unsupported media bus type\n");
+ return -ENOTCONN;
+ }
+}
+
+static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi)
+{
+ media_device_unregister(&csi->media_dev);
+ v4l2_async_notifier_unregister(&csi->notifier);
+ v4l2_async_notifier_cleanup(&csi->notifier);
+ sun6i_video_cleanup(&csi->video);
+ v4l2_device_unregister(&csi->v4l2_dev);
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
+ media_device_cleanup(&csi->media_dev);
+}
+
+static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
+{
+ int ret;
+
+ csi->media_dev.dev = csi->dev;
+ strscpy(csi->media_dev.model, "Allwinner Video Capture Device",
+ sizeof(csi->media_dev.model));
+ csi->media_dev.hw_revision = 0;
+
+ media_device_init(&csi->media_dev);
+ v4l2_async_notifier_init(&csi->notifier);
+
+ ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0);
+ if (ret) {
+ dev_err(csi->dev, "V4L2 controls handler init failed (%d)\n",
+ ret);
+ goto clean_media;
+ }
+
+ csi->v4l2_dev.mdev = &csi->media_dev;
+ csi->v4l2_dev.ctrl_handler = &csi->ctrl_handler;
+ ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+ if (ret) {
+ dev_err(csi->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ goto free_ctrl;
+ }
+
+ ret = sun6i_video_init(&csi->video, csi, "sun6i-csi");
+ if (ret)
+ goto unreg_v4l2;
+
+ ret = v4l2_async_notifier_parse_fwnode_endpoints(csi->dev,
+ &csi->notifier,
+ sizeof(struct v4l2_async_subdev),
+ sun6i_csi_fwnode_parse);
+ if (ret)
+ goto clean_video;
+
+ csi->notifier.ops = &sun6i_csi_async_ops;
+
+ ret = v4l2_async_notifier_register(&csi->v4l2_dev, &csi->notifier);
+ if (ret) {
+ dev_err(csi->dev, "notifier registration failed\n");
+ goto clean_video;
+ }
+
+ return 0;
+
+clean_video:
+ sun6i_video_cleanup(&csi->video);
+unreg_v4l2:
+ v4l2_device_unregister(&csi->v4l2_dev);
+free_ctrl:
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
+clean_media:
+ v4l2_async_notifier_cleanup(&csi->notifier);
+ media_device_cleanup(&csi->media_dev);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Resources and IRQ
+ */
+static irqreturn_t sun6i_csi_isr(int irq, void *dev_id)
+{
+ struct sun6i_csi_dev *sdev = (struct sun6i_csi_dev *)dev_id;
+ struct regmap *regmap = sdev->regmap;
+ u32 status;
+
+ regmap_read(regmap, CSI_CH_INT_STA_REG, &status);
+
+ if (!(status & 0xFF))
+ return IRQ_NONE;
+
+ if ((status & CSI_CH_INT_STA_FIFO0_OF_PD) ||
+ (status & CSI_CH_INT_STA_FIFO1_OF_PD) ||
+ (status & CSI_CH_INT_STA_FIFO2_OF_PD) ||
+ (status & CSI_CH_INT_STA_HB_OF_PD)) {
+ regmap_write(regmap, CSI_CH_INT_STA_REG, status);
+ regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0);
+ regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN,
+ CSI_EN_CSI_EN);
+ return IRQ_HANDLED;
+ }
+
+ if (status & CSI_CH_INT_STA_FD_PD)
+ sun6i_video_frame_done(&sdev->csi.video);
+
+ regmap_write(regmap, CSI_CH_INT_STA_REG, status);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config sun6i_csi_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+};
+
+static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *io_base;
+ int ret;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ sdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "bus", io_base,
+ &sun6i_csi_regmap_config);
+ if (IS_ERR(sdev->regmap)) {
+ dev_err(&pdev->dev, "Failed to init register map\n");
+ return PTR_ERR(sdev->regmap);
+ }
+
+ sdev->clk_mod = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sdev->clk_mod)) {
+ dev_err(&pdev->dev, "Unable to acquire csi clock\n");
+ return PTR_ERR(sdev->clk_mod);
+ }
+
+ sdev->clk_ram = devm_clk_get(&pdev->dev, "ram");
+ if (IS_ERR(sdev->clk_ram)) {
+ dev_err(&pdev->dev, "Unable to acquire dram-csi clock\n");
+ return PTR_ERR(sdev->clk_ram);
+ }
+
+ sdev->rstc_bus = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(sdev->rstc_bus)) {
+ dev_err(&pdev->dev, "Cannot get reset controller\n");
+ return PTR_ERR(sdev->rstc_bus);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No csi IRQ specified\n");
+ ret = -ENXIO;
+ return ret;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun6i_csi_isr, 0, MODULE_NAME,
+ sdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request csi IRQ\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * PHYS_OFFSET isn't available on all architectures. In order to
+ * accommodate for COMPILE_TEST, let's define it to something dumb.
+ */
+#if defined(CONFIG_COMPILE_TEST) && !defined(PHYS_OFFSET)
+#define PHYS_OFFSET 0
+#endif
+
+static int sun6i_csi_probe(struct platform_device *pdev)
+{
+ struct sun6i_csi_dev *sdev;
+ int ret;
+
+ sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return -ENOMEM;
+
+ sdev->dev = &pdev->dev;
+ /* The DMA bus has the memory mapped at 0 */
+ sdev->dev->dma_pfn_offset = PHYS_OFFSET >> PAGE_SHIFT;
+
+ ret = sun6i_csi_resource_request(sdev, pdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, sdev);
+
+ sdev->csi.dev = &pdev->dev;
+ return sun6i_csi_v4l2_init(&sdev->csi);
+}
+
+static int sun6i_csi_remove(struct platform_device *pdev)
+{
+ struct sun6i_csi_dev *sdev = platform_get_drvdata(pdev);
+
+ sun6i_csi_v4l2_cleanup(&sdev->csi);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_csi_of_match[] = {
+ { .compatible = "allwinner,sun6i-a31-csi", },
+ { .compatible = "allwinner,sun8i-v3s-csi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun6i_csi_of_match);
+
+static struct platform_driver sun6i_csi_platform_driver = {
+ .probe = sun6i_csi_probe,
+ .remove = sun6i_csi_remove,
+ .driver = {
+ .name = MODULE_NAME,
+ .of_match_table = of_match_ptr(sun6i_csi_of_match),
+ },
+};
+module_platform_driver(sun6i_csi_platform_driver);
+
+MODULE_DESCRIPTION("Allwinner V3s Camera Sensor Interface driver");
+MODULE_AUTHOR("Yong Deng <yong.deng@magewell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
new file mode 100644
index 000000000000..0bb000712c33
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing)
+ * All rights reserved.
+ * Author: Yong Deng <yong.deng@magewell.com>
+ */
+
+#ifndef __SUN6I_CSI_H__
+#define __SUN6I_CSI_H__
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#include "sun6i_video.h"
+
+struct sun6i_csi;
+
+/**
+ * struct sun6i_csi_config - configs for sun6i csi
+ * @pixelformat: v4l2 pixel format (V4L2_PIX_FMT_*)
+ * @code: media bus format code (MEDIA_BUS_FMT_*)
+ * @field: used interlacing type (enum v4l2_field)
+ * @width: frame width
+ * @height: frame height
+ */
+struct sun6i_csi_config {
+ u32 pixelformat;
+ u32 code;
+ u32 field;
+ u32 width;
+ u32 height;
+};
+
+struct sun6i_csi {
+ struct device *dev;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+
+ struct v4l2_async_notifier notifier;
+
+ /* video port settings */
+ struct v4l2_fwnode_endpoint v4l2_ep;
+
+ struct sun6i_csi_config config;
+
+ struct sun6i_video video;
+};
+
+/**
+ * sun6i_csi_is_format_supported() - check if the format supported by csi
+ * @csi: pointer to the csi
+ * @pixformat: v4l2 pixel format (V4L2_PIX_FMT_*)
+ * @mbus_code: media bus format code (MEDIA_BUS_FMT_*)
+ */
+bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat,
+ u32 mbus_code);
+
+/**
+ * sun6i_csi_set_power() - power on/off the csi
+ * @csi: pointer to the csi
+ * @enable: on/off
+ */
+int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable);
+
+/**
+ * sun6i_csi_update_config() - update the csi register setttings
+ * @csi: pointer to the csi
+ * @config: see struct sun6i_csi_config
+ */
+int sun6i_csi_update_config(struct sun6i_csi *csi,
+ struct sun6i_csi_config *config);
+
+/**
+ * sun6i_csi_update_buf_addr() - update the csi frame buffer address
+ * @csi: pointer to the csi
+ * @addr: frame buffer's physical address
+ */
+void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr);
+
+/**
+ * sun6i_csi_set_stream() - start/stop csi streaming
+ * @csi: pointer to the csi
+ * @enable: start/stop
+ */
+void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable);
+
+/* get bpp form v4l2 pixformat */
+static inline int sun6i_csi_get_bpp(unsigned int pixformat)
+{
+ switch (pixformat) {
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ return 8;
+ case V4L2_PIX_FMT_SBGGR10:
+ case V4L2_PIX_FMT_SGBRG10:
+ case V4L2_PIX_FMT_SGRBG10:
+ case V4L2_PIX_FMT_SRGGB10:
+ return 10;
+ case V4L2_PIX_FMT_SBGGR12:
+ case V4L2_PIX_FMT_SGBRG12:
+ case V4L2_PIX_FMT_SGRBG12:
+ case V4L2_PIX_FMT_SRGGB12:
+ case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ return 12;
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_YVYU:
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_VYUY:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_YUV422P:
+ return 16;
+ case V4L2_PIX_FMT_RGB24:
+ case V4L2_PIX_FMT_BGR24:
+ return 24;
+ case V4L2_PIX_FMT_RGB32:
+ case V4L2_PIX_FMT_BGR32:
+ return 32;
+ default:
+ WARN(1, "Unsupported pixformat: 0x%x\n", pixformat);
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* __SUN6I_CSI_H__ */
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_reg.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_reg.h
new file mode 100644
index 000000000000..703fa14bb313
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_reg.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing)
+ * All rights reserved.
+ * Author: Yong Deng <yong.deng@magewell.com>
+ */
+
+#ifndef __SUN6I_CSI_REG_H__
+#define __SUN6I_CSI_REG_H__
+
+#include <linux/kernel.h>
+
+#define CSI_EN_REG 0x0
+#define CSI_EN_VER_EN BIT(30)
+#define CSI_EN_CSI_EN BIT(0)
+
+#define CSI_IF_CFG_REG 0x4
+#define CSI_IF_CFG_SRC_TYPE_MASK BIT(21)
+#define CSI_IF_CFG_SRC_TYPE_PROGRESSED ((0 << 21) & CSI_IF_CFG_SRC_TYPE_MASK)
+#define CSI_IF_CFG_SRC_TYPE_INTERLACED ((1 << 21) & CSI_IF_CFG_SRC_TYPE_MASK)
+#define CSI_IF_CFG_FPS_DS_EN BIT(20)
+#define CSI_IF_CFG_FIELD_MASK BIT(19)
+#define CSI_IF_CFG_FIELD_NEGATIVE ((0 << 19) & CSI_IF_CFG_FIELD_MASK)
+#define CSI_IF_CFG_FIELD_POSITIVE ((1 << 19) & CSI_IF_CFG_FIELD_MASK)
+#define CSI_IF_CFG_VREF_POL_MASK BIT(18)
+#define CSI_IF_CFG_VREF_POL_NEGATIVE ((0 << 18) & CSI_IF_CFG_VREF_POL_MASK)
+#define CSI_IF_CFG_VREF_POL_POSITIVE ((1 << 18) & CSI_IF_CFG_VREF_POL_MASK)
+#define CSI_IF_CFG_HREF_POL_MASK BIT(17)
+#define CSI_IF_CFG_HREF_POL_NEGATIVE ((0 << 17) & CSI_IF_CFG_HREF_POL_MASK)
+#define CSI_IF_CFG_HREF_POL_POSITIVE ((1 << 17) & CSI_IF_CFG_HREF_POL_MASK)
+#define CSI_IF_CFG_CLK_POL_MASK BIT(16)
+#define CSI_IF_CFG_CLK_POL_RISING_EDGE ((0 << 16) & CSI_IF_CFG_CLK_POL_MASK)
+#define CSI_IF_CFG_CLK_POL_FALLING_EDGE ((1 << 16) & CSI_IF_CFG_CLK_POL_MASK)
+#define CSI_IF_CFG_IF_DATA_WIDTH_MASK GENMASK(10, 8)
+#define CSI_IF_CFG_IF_DATA_WIDTH_8BIT ((0 << 8) & CSI_IF_CFG_IF_DATA_WIDTH_MASK)
+#define CSI_IF_CFG_IF_DATA_WIDTH_10BIT ((1 << 8) & CSI_IF_CFG_IF_DATA_WIDTH_MASK)
+#define CSI_IF_CFG_IF_DATA_WIDTH_12BIT ((2 << 8) & CSI_IF_CFG_IF_DATA_WIDTH_MASK)
+#define CSI_IF_CFG_MIPI_IF_MASK BIT(7)
+#define CSI_IF_CFG_MIPI_IF_CSI (0 << 7)
+#define CSI_IF_CFG_MIPI_IF_MIPI BIT(7)
+#define CSI_IF_CFG_CSI_IF_MASK GENMASK(4, 0)
+#define CSI_IF_CFG_CSI_IF_YUV422_INTLV ((0 << 0) & CSI_IF_CFG_CSI_IF_MASK)
+#define CSI_IF_CFG_CSI_IF_YUV422_16BIT ((1 << 0) & CSI_IF_CFG_CSI_IF_MASK)
+#define CSI_IF_CFG_CSI_IF_BT656 ((4 << 0) & CSI_IF_CFG_CSI_IF_MASK)
+#define CSI_IF_CFG_CSI_IF_BT1120 ((5 << 0) & CSI_IF_CFG_CSI_IF_MASK)
+
+#define CSI_CAP_REG 0x8
+#define CSI_CAP_CH0_CAP_MASK_MASK GENMASK(5, 2)
+#define CSI_CAP_CH0_CAP_MASK(count) (((count) << 2) & CSI_CAP_CH0_CAP_MASK_MASK)
+#define CSI_CAP_CH0_VCAP_ON BIT(1)
+#define CSI_CAP_CH0_SCAP_ON BIT(0)
+
+#define CSI_SYNC_CNT_REG 0xc
+#define CSI_FIFO_THRS_REG 0x10
+#define CSI_BT656_HEAD_CFG_REG 0x14
+#define CSI_PTN_LEN_REG 0x30
+#define CSI_PTN_ADDR_REG 0x34
+#define CSI_VER_REG 0x3c
+
+#define CSI_CH_CFG_REG 0x44
+#define CSI_CH_CFG_INPUT_FMT_MASK GENMASK(23, 20)
+#define CSI_CH_CFG_INPUT_FMT(fmt) (((fmt) << 20) & CSI_CH_CFG_INPUT_FMT_MASK)
+#define CSI_CH_CFG_OUTPUT_FMT_MASK GENMASK(19, 16)
+#define CSI_CH_CFG_OUTPUT_FMT(fmt) (((fmt) << 16) & CSI_CH_CFG_OUTPUT_FMT_MASK)
+#define CSI_CH_CFG_VFLIP_EN BIT(13)
+#define CSI_CH_CFG_HFLIP_EN BIT(12)
+#define CSI_CH_CFG_FIELD_SEL_MASK GENMASK(11, 10)
+#define CSI_CH_CFG_FIELD_SEL_FIELD0 ((0 << 10) & CSI_CH_CFG_FIELD_SEL_MASK)
+#define CSI_CH_CFG_FIELD_SEL_FIELD1 ((1 << 10) & CSI_CH_CFG_FIELD_SEL_MASK)
+#define CSI_CH_CFG_FIELD_SEL_BOTH ((2 << 10) & CSI_CH_CFG_FIELD_SEL_MASK)
+#define CSI_CH_CFG_INPUT_SEQ_MASK GENMASK(9, 8)
+#define CSI_CH_CFG_INPUT_SEQ(seq) (((seq) << 8) & CSI_CH_CFG_INPUT_SEQ_MASK)
+
+#define CSI_CH_SCALE_REG 0x4c
+#define CSI_CH_SCALE_QUART_EN BIT(0)
+
+#define CSI_CH_F0_BUFA_REG 0x50
+
+#define CSI_CH_F1_BUFA_REG 0x58
+
+#define CSI_CH_F2_BUFA_REG 0x60
+
+#define CSI_CH_STA_REG 0x6c
+#define CSI_CH_STA_FIELD_STA_MASK BIT(2)
+#define CSI_CH_STA_FIELD_STA_FIELD0 ((0 << 2) & CSI_CH_STA_FIELD_STA_MASK)
+#define CSI_CH_STA_FIELD_STA_FIELD1 ((1 << 2) & CSI_CH_STA_FIELD_STA_MASK)
+#define CSI_CH_STA_VCAP_STA BIT(1)
+#define CSI_CH_STA_SCAP_STA BIT(0)
+
+#define CSI_CH_INT_EN_REG 0x70
+#define CSI_CH_INT_EN_VS_INT_EN BIT(7)
+#define CSI_CH_INT_EN_HB_OF_INT_EN BIT(6)
+#define CSI_CH_INT_EN_MUL_ERR_INT_EN BIT(5)
+#define CSI_CH_INT_EN_FIFO2_OF_INT_EN BIT(4)
+#define CSI_CH_INT_EN_FIFO1_OF_INT_EN BIT(3)
+#define CSI_CH_INT_EN_FIFO0_OF_INT_EN BIT(2)
+#define CSI_CH_INT_EN_FD_INT_EN BIT(1)
+#define CSI_CH_INT_EN_CD_INT_EN BIT(0)
+
+#define CSI_CH_INT_STA_REG 0x74
+#define CSI_CH_INT_STA_VS_PD BIT(7)
+#define CSI_CH_INT_STA_HB_OF_PD BIT(6)
+#define CSI_CH_INT_STA_MUL_ERR_PD BIT(5)
+#define CSI_CH_INT_STA_FIFO2_OF_PD BIT(4)
+#define CSI_CH_INT_STA_FIFO1_OF_PD BIT(3)
+#define CSI_CH_INT_STA_FIFO0_OF_PD BIT(2)
+#define CSI_CH_INT_STA_FD_PD BIT(1)
+#define CSI_CH_INT_STA_CD_PD BIT(0)
+
+#define CSI_CH_FLD1_VSIZE_REG 0x78
+
+#define CSI_CH_HSIZE_REG 0x80
+#define CSI_CH_HSIZE_HOR_LEN_MASK GENMASK(28, 16)
+#define CSI_CH_HSIZE_HOR_LEN(len) (((len) << 16) & CSI_CH_HSIZE_HOR_LEN_MASK)
+#define CSI_CH_HSIZE_HOR_START_MASK GENMASK(12, 0)
+#define CSI_CH_HSIZE_HOR_START(start) (((start) << 0) & CSI_CH_HSIZE_HOR_START_MASK)
+
+#define CSI_CH_VSIZE_REG 0x84
+#define CSI_CH_VSIZE_VER_LEN_MASK GENMASK(28, 16)
+#define CSI_CH_VSIZE_VER_LEN(len) (((len) << 16) & CSI_CH_VSIZE_VER_LEN_MASK)
+#define CSI_CH_VSIZE_VER_START_MASK GENMASK(12, 0)
+#define CSI_CH_VSIZE_VER_START(start) (((start) << 0) & CSI_CH_VSIZE_VER_START_MASK)
+
+#define CSI_CH_BUF_LEN_REG 0x88
+#define CSI_CH_BUF_LEN_BUF_LEN_C_MASK GENMASK(29, 16)
+#define CSI_CH_BUF_LEN_BUF_LEN_C(len) (((len) << 16) & CSI_CH_BUF_LEN_BUF_LEN_C_MASK)
+#define CSI_CH_BUF_LEN_BUF_LEN_Y_MASK GENMASK(13, 0)
+#define CSI_CH_BUF_LEN_BUF_LEN_Y(len) (((len) << 0) & CSI_CH_BUF_LEN_BUF_LEN_Y_MASK)
+
+#define CSI_CH_FLIP_SIZE_REG 0x8c
+#define CSI_CH_FLIP_SIZE_VER_LEN_MASK GENMASK(28, 16)
+#define CSI_CH_FLIP_SIZE_VER_LEN(len) (((len) << 16) & CSI_CH_FLIP_SIZE_VER_LEN_MASK)
+#define CSI_CH_FLIP_SIZE_VALID_LEN_MASK GENMASK(12, 0)
+#define CSI_CH_FLIP_SIZE_VALID_LEN(len) (((len) << 0) & CSI_CH_FLIP_SIZE_VALID_LEN_MASK)
+
+#define CSI_CH_FRM_CLK_CNT_REG 0x90
+#define CSI_CH_ACC_ITNL_CLK_CNT_REG 0x94
+#define CSI_CH_FIFO_STAT_REG 0x98
+#define CSI_CH_PCLK_STAT_REG 0x9c
+
+/*
+ * csi input data format
+ */
+enum csi_input_fmt {
+ CSI_INPUT_FORMAT_RAW = 0,
+ CSI_INPUT_FORMAT_YUV422 = 3,
+ CSI_INPUT_FORMAT_YUV420 = 4,
+};
+
+/*
+ * csi output data format
+ */
+enum csi_output_fmt {
+ /* only when input format is RAW */
+ CSI_FIELD_RAW_8 = 0,
+ CSI_FIELD_RAW_10 = 1,
+ CSI_FIELD_RAW_12 = 2,
+ CSI_FIELD_RGB565 = 4,
+ CSI_FIELD_RGB888 = 5,
+ CSI_FIELD_PRGB888 = 6,
+ CSI_FRAME_RAW_8 = 8,
+ CSI_FRAME_RAW_10 = 9,
+ CSI_FRAME_RAW_12 = 10,
+ CSI_FRAME_RGB565 = 12,
+ CSI_FRAME_RGB888 = 13,
+ CSI_FRAME_PRGB888 = 14,
+
+ /* only when input format is YUV422 */
+ CSI_FIELD_PLANAR_YUV422 = 0,
+ CSI_FIELD_PLANAR_YUV420 = 1,
+ CSI_FRAME_PLANAR_YUV420 = 2,
+ CSI_FRAME_PLANAR_YUV422 = 3,
+ CSI_FIELD_UV_CB_YUV422 = 4,
+ CSI_FIELD_UV_CB_YUV420 = 5,
+ CSI_FRAME_UV_CB_YUV420 = 6,
+ CSI_FRAME_UV_CB_YUV422 = 7,
+ CSI_FIELD_MB_YUV422 = 8,
+ CSI_FIELD_MB_YUV420 = 9,
+ CSI_FRAME_MB_YUV420 = 10,
+ CSI_FRAME_MB_YUV422 = 11,
+ CSI_FIELD_UV_CB_YUV422_10 = 12,
+ CSI_FIELD_UV_CB_YUV420_10 = 13,
+};
+
+/*
+ * csi YUV input data sequence
+ */
+enum csi_input_seq {
+ /* only when input format is YUV422 */
+ CSI_INPUT_SEQ_YUYV = 0,
+ CSI_INPUT_SEQ_YVYU,
+ CSI_INPUT_SEQ_UYVY,
+ CSI_INPUT_SEQ_VYUY,
+};
+
+#endif /* __SUN6I_CSI_REG_H__ */
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
new file mode 100644
index 000000000000..b04300c3811f
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing)
+ * All rights reserved.
+ * Author: Yong Deng <yong.deng@magewell.com>
+ */
+
+#include <linux/of.h>
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-v4l2.h>
+
+#include "sun6i_csi.h"
+#include "sun6i_video.h"
+
+/* This is got from BSP sources. */
+#define MIN_WIDTH (32)
+#define MIN_HEIGHT (32)
+#define MAX_WIDTH (4800)
+#define MAX_HEIGHT (4800)
+
+struct sun6i_csi_buffer {
+ struct vb2_v4l2_buffer vb;
+ struct list_head list;
+
+ dma_addr_t dma_addr;
+ bool queued_to_csi;
+};
+
+static const u32 supported_pixformats[] = {
+ V4L2_PIX_FMT_SBGGR8,
+ V4L2_PIX_FMT_SGBRG8,
+ V4L2_PIX_FMT_SGRBG8,
+ V4L2_PIX_FMT_SRGGB8,
+ V4L2_PIX_FMT_SBGGR10,
+ V4L2_PIX_FMT_SGBRG10,
+ V4L2_PIX_FMT_SGRBG10,
+ V4L2_PIX_FMT_SRGGB10,
+ V4L2_PIX_FMT_SBGGR12,
+ V4L2_PIX_FMT_SGBRG12,
+ V4L2_PIX_FMT_SGRBG12,
+ V4L2_PIX_FMT_SRGGB12,
+ V4L2_PIX_FMT_YUYV,
+ V4L2_PIX_FMT_YVYU,
+ V4L2_PIX_FMT_UYVY,
+ V4L2_PIX_FMT_VYUY,
+ V4L2_PIX_FMT_HM12,
+ V4L2_PIX_FMT_NV12,
+ V4L2_PIX_FMT_NV21,
+ V4L2_PIX_FMT_YUV420,
+ V4L2_PIX_FMT_YVU420,
+ V4L2_PIX_FMT_NV16,
+ V4L2_PIX_FMT_NV61,
+ V4L2_PIX_FMT_YUV422P,
+};
+
+static bool is_pixformat_valid(unsigned int pixformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_pixformats); i++)
+ if (supported_pixformats[i] == pixformat)
+ return true;
+
+ return false;
+}
+
+static struct v4l2_subdev *
+sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(&video->pad);
+
+ if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int sun6i_video_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers,
+ unsigned int *nplanes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct sun6i_video *video = vb2_get_drv_priv(vq);
+ unsigned int size = video->fmt.fmt.pix.sizeimage;
+
+ if (*nplanes)
+ return sizes[0] < size ? -EINVAL : 0;
+
+ *nplanes = 1;
+ sizes[0] = size;
+
+ return 0;
+}
+
+static int sun6i_video_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sun6i_csi_buffer *buf =
+ container_of(vbuf, struct sun6i_csi_buffer, vb);
+ struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long size = video->fmt.fmt.pix.sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ v4l2_err(video->vdev.v4l2_dev, "buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, 0), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
+
+ vbuf->field = video->fmt.fmt.pix.field;
+
+ return 0;
+}
+
+static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct sun6i_video *video = vb2_get_drv_priv(vq);
+ struct sun6i_csi_buffer *buf;
+ struct sun6i_csi_buffer *next_buf;
+ struct sun6i_csi_config config;
+ struct v4l2_subdev *subdev;
+ unsigned long flags;
+ int ret;
+
+ video->sequence = 0;
+
+ ret = media_pipeline_start(&video->vdev.entity, &video->vdev.pipe);
+ if (ret < 0)
+ goto clear_dma_queue;
+
+ if (video->mbus_code == 0) {
+ ret = -EINVAL;
+ goto stop_media_pipeline;
+ }
+
+ subdev = sun6i_video_remote_subdev(video, NULL);
+ if (!subdev)
+ goto stop_media_pipeline;
+
+ config.pixelformat = video->fmt.fmt.pix.pixelformat;
+ config.code = video->mbus_code;
+ config.field = video->fmt.fmt.pix.field;
+ config.width = video->fmt.fmt.pix.width;
+ config.height = video->fmt.fmt.pix.height;
+
+ ret = sun6i_csi_update_config(video->csi, &config);
+ if (ret < 0)
+ goto stop_media_pipeline;
+
+ spin_lock_irqsave(&video->dma_queue_lock, flags);
+
+ buf = list_first_entry(&video->dma_queue,
+ struct sun6i_csi_buffer, list);
+ buf->queued_to_csi = true;
+ sun6i_csi_update_buf_addr(video->csi, buf->dma_addr);
+
+ sun6i_csi_set_stream(video->csi, true);
+
+ /*
+ * CSI will lookup the next dma buffer for next frame before the
+ * the current frame done IRQ triggered. This is not documented
+ * but reported by Ondřej Jirman.
+ * The BSP code has workaround for this too. It skip to mark the
+ * first buffer as frame done for VB2 and pass the second buffer
+ * to CSI in the first frame done ISR call. Then in second frame
+ * done ISR call, it mark the first buffer as frame done for VB2
+ * and pass the third buffer to CSI. And so on. The bad thing is
+ * that the first buffer will be written twice and the first frame
+ * is dropped even the queued buffer is sufficient.
+ * So, I make some improvement here. Pass the next buffer to CSI
+ * just follow starting the CSI. In this case, the first frame
+ * will be stored in first buffer, second frame in second buffer.
+ * This method is used to avoid dropping the first frame, it
+ * would also drop frame when lacking of queued buffer.
+ */
+ next_buf = list_next_entry(buf, list);
+ next_buf->queued_to_csi = true;
+ sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr);
+
+ spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, 1);
+ if (ret && ret != -ENOIOCTLCMD)
+ goto stop_csi_stream;
+
+ return 0;
+
+stop_csi_stream:
+ sun6i_csi_set_stream(video->csi, false);
+stop_media_pipeline:
+ media_pipeline_stop(&video->vdev.entity);
+clear_dma_queue:
+ spin_lock_irqsave(&video->dma_queue_lock, flags);
+ list_for_each_entry(buf, &video->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+ INIT_LIST_HEAD(&video->dma_queue);
+ spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+
+ return ret;
+}
+
+static void sun6i_video_stop_streaming(struct vb2_queue *vq)
+{
+ struct sun6i_video *video = vb2_get_drv_priv(vq);
+ struct v4l2_subdev *subdev;
+ unsigned long flags;
+ struct sun6i_csi_buffer *buf;
+
+ subdev = sun6i_video_remote_subdev(video, NULL);
+ if (subdev)
+ v4l2_subdev_call(subdev, video, s_stream, 0);
+
+ sun6i_csi_set_stream(video->csi, false);
+
+ media_pipeline_stop(&video->vdev.entity);
+
+ /* Release all active buffers */
+ spin_lock_irqsave(&video->dma_queue_lock, flags);
+ list_for_each_entry(buf, &video->dma_queue, list)
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&video->dma_queue);
+ spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+}
+
+static void sun6i_video_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct sun6i_csi_buffer *buf =
+ container_of(vbuf, struct sun6i_csi_buffer, vb);
+ struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&video->dma_queue_lock, flags);
+ buf->queued_to_csi = false;
+ list_add_tail(&buf->list, &video->dma_queue);
+ spin_unlock_irqrestore(&video->dma_queue_lock, flags);
+}
+
+void sun6i_video_frame_done(struct sun6i_video *video)
+{
+ struct sun6i_csi_buffer *buf;
+ struct sun6i_csi_buffer *next_buf;
+ struct vb2_v4l2_buffer *vbuf;
+
+ spin_lock(&video->dma_queue_lock);
+
+ buf = list_first_entry(&video->dma_queue,
+ struct sun6i_csi_buffer, list);
+ if (list_is_last(&buf->list, &video->dma_queue)) {
+ dev_dbg(video->csi->dev, "Frame dropped!\n");
+ goto unlock;
+ }
+
+ next_buf = list_next_entry(buf, list);
+ /* If a new buffer (#next_buf) had not been queued to CSI, the old
+ * buffer (#buf) is still holding by CSI for storing the next
+ * frame. So, we queue a new buffer (#next_buf) to CSI then wait
+ * for next ISR call.
+ */
+ if (!next_buf->queued_to_csi) {
+ next_buf->queued_to_csi = true;
+ sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr);
+ dev_dbg(video->csi->dev, "Frame dropped!\n");
+ goto unlock;
+ }
+
+ list_del(&buf->list);
+ vbuf = &buf->vb;
+ vbuf->vb2_buf.timestamp = ktime_get_ns();
+ vbuf->sequence = video->sequence;
+ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
+
+ /* Prepare buffer for next frame but one. */
+ if (!list_is_last(&next_buf->list, &video->dma_queue)) {
+ next_buf = list_next_entry(next_buf, list);
+ next_buf->queued_to_csi = true;
+ sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr);
+ } else {
+ dev_dbg(video->csi->dev, "Next frame will be dropped!\n");
+ }
+
+unlock:
+ video->sequence++;
+ spin_unlock(&video->dma_queue_lock);
+}
+
+static const struct vb2_ops sun6i_csi_vb2_ops = {
+ .queue_setup = sun6i_video_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = sun6i_video_buffer_prepare,
+ .start_streaming = sun6i_video_start_streaming,
+ .stop_streaming = sun6i_video_stop_streaming,
+ .buf_queue = sun6i_video_buffer_queue,
+};
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct sun6i_video *video = video_drvdata(file);
+
+ strscpy(cap->driver, "sun6i-video", sizeof(cap->driver));
+ strscpy(cap->card, video->vdev.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ video->csi->dev->of_node->name);
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ u32 index = f->index;
+
+ if (index >= ARRAY_SIZE(supported_pixformats))
+ return -EINVAL;
+
+ f->pixelformat = supported_pixformats[index];
+
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *fmt)
+{
+ struct sun6i_video *video = video_drvdata(file);
+
+ *fmt = video->fmt;
+
+ return 0;
+}
+
+static int sun6i_video_try_fmt(struct sun6i_video *video,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format *pixfmt = &f->fmt.pix;
+ int bpp;
+
+ if (!is_pixformat_valid(pixfmt->pixelformat))
+ pixfmt->pixelformat = supported_pixformats[0];
+
+ v4l_bound_align_image(&pixfmt->width, MIN_WIDTH, MAX_WIDTH, 1,
+ &pixfmt->height, MIN_HEIGHT, MAX_WIDTH, 1, 1);
+
+ bpp = sun6i_csi_get_bpp(pixfmt->pixelformat);
+ pixfmt->bytesperline = (pixfmt->width * bpp) >> 3;
+ pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
+
+ if (pixfmt->field == V4L2_FIELD_ANY)
+ pixfmt->field = V4L2_FIELD_NONE;
+
+ pixfmt->colorspace = V4L2_COLORSPACE_RAW;
+ pixfmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pixfmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ pixfmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ return 0;
+}
+
+static int sun6i_video_set_fmt(struct sun6i_video *video, struct v4l2_format *f)
+{
+ int ret;
+
+ ret = sun6i_video_try_fmt(video, f);
+ if (ret)
+ return ret;
+
+ video->fmt = *f;
+
+ return 0;
+}
+
+static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct sun6i_video *video = video_drvdata(file);
+
+ if (vb2_is_busy(&video->vb2_vidq))
+ return -EBUSY;
+
+ return sun6i_video_set_fmt(video, f);
+}
+
+static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct sun6i_video *video = video_drvdata(file);
+
+ return sun6i_video_try_fmt(video, f);
+}
+
+static int vidioc_enum_input(struct file *file, void *fh,
+ struct v4l2_input *inp)
+{
+ if (inp->index != 0)
+ return -EINVAL;
+
+ strlcpy(inp->name, "camera", sizeof(inp->name));
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *fh, unsigned int i)
+{
+ if (i != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_g_input = vidioc_g_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_log_status = v4l2_ctrl_log_status,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 file operations
+ */
+static int sun6i_video_open(struct file *file)
+{
+ struct sun6i_video *video = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&video->lock))
+ return -ERESTARTSYS;
+
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto unlock;
+
+ ret = v4l2_pipeline_pm_use(&video->vdev.entity, 1);
+ if (ret < 0)
+ goto fh_release;
+
+ /* check if already powered */
+ if (!v4l2_fh_is_singular_file(file))
+ goto unlock;
+
+ ret = sun6i_csi_set_power(video->csi, true);
+ if (ret < 0)
+ goto fh_release;
+
+ mutex_unlock(&video->lock);
+ return 0;
+
+fh_release:
+ v4l2_fh_release(file);
+unlock:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int sun6i_video_close(struct file *file)
+{
+ struct sun6i_video *video = video_drvdata(file);
+ bool last_fh;
+
+ mutex_lock(&video->lock);
+
+ last_fh = v4l2_fh_is_singular_file(file);
+
+ _vb2_fop_release(file, NULL);
+
+ v4l2_pipeline_pm_use(&video->vdev.entity, 0);
+
+ if (last_fh)
+ sun6i_csi_set_power(video->csi, false);
+
+ mutex_unlock(&video->lock);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations sun6i_video_fops = {
+ .owner = THIS_MODULE,
+ .open = sun6i_video_open,
+ .release = sun6i_video_close,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+static int sun6i_video_link_validate_get_format(struct media_pad *pad,
+ struct v4l2_subdev_format *fmt)
+{
+ if (is_media_entity_v4l2_subdev(pad->entity)) {
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(pad->entity);
+
+ fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ fmt->pad = pad->index;
+ return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
+ }
+
+ return -EINVAL;
+}
+
+static int sun6i_video_link_validate(struct media_link *link)
+{
+ struct video_device *vdev = container_of(link->sink->entity,
+ struct video_device, entity);
+ struct sun6i_video *video = video_get_drvdata(vdev);
+ struct v4l2_subdev_format source_fmt;
+ int ret;
+
+ video->mbus_code = 0;
+
+ if (!media_entity_remote_pad(link->sink->entity->pads)) {
+ dev_info(video->csi->dev,
+ "video node %s pad not connected\n", vdev->name);
+ return -ENOLINK;
+ }
+
+ ret = sun6i_video_link_validate_get_format(link->source, &source_fmt);
+ if (ret < 0)
+ return ret;
+
+ if (!sun6i_csi_is_format_supported(video->csi,
+ video->fmt.fmt.pix.pixelformat,
+ source_fmt.format.code)) {
+ dev_err(video->csi->dev,
+ "Unsupported pixformat: 0x%x with mbus code: 0x%x!\n",
+ video->fmt.fmt.pix.pixelformat,
+ source_fmt.format.code);
+ return -EPIPE;
+ }
+
+ if (source_fmt.format.width != video->fmt.fmt.pix.width ||
+ source_fmt.format.height != video->fmt.fmt.pix.height) {
+ dev_err(video->csi->dev,
+ "Wrong width or height %ux%u (%ux%u expected)\n",
+ video->fmt.fmt.pix.width, video->fmt.fmt.pix.height,
+ source_fmt.format.width, source_fmt.format.height);
+ return -EPIPE;
+ }
+
+ video->mbus_code = source_fmt.format.code;
+
+ return 0;
+}
+
+static const struct media_entity_operations sun6i_video_media_ops = {
+ .link_validate = sun6i_video_link_validate
+};
+
+int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
+ const char *name)
+{
+ struct video_device *vdev = &video->vdev;
+ struct vb2_queue *vidq = &video->vb2_vidq;
+ struct v4l2_format fmt = { 0 };
+ int ret;
+
+ video->csi = csi;
+
+ /* Initialize the media entity... */
+ video->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
+ vdev->entity.ops = &sun6i_video_media_ops;
+ ret = media_entity_pads_init(&vdev->entity, 1, &video->pad);
+ if (ret < 0)
+ return ret;
+
+ mutex_init(&video->lock);
+
+ INIT_LIST_HEAD(&video->dma_queue);
+ spin_lock_init(&video->dma_queue_lock);
+
+ video->sequence = 0;
+
+ /* Setup default format */
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmt.fmt.pix.pixelformat = supported_pixformats[0];
+ fmt.fmt.pix.width = 1280;
+ fmt.fmt.pix.height = 720;
+ fmt.fmt.pix.field = V4L2_FIELD_NONE;
+ sun6i_video_set_fmt(video, &fmt);
+
+ /* Initialize videobuf2 queue */
+ vidq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vidq->io_modes = VB2_MMAP | VB2_DMABUF;
+ vidq->drv_priv = video;
+ vidq->buf_struct_size = sizeof(struct sun6i_csi_buffer);
+ vidq->ops = &sun6i_csi_vb2_ops;
+ vidq->mem_ops = &vb2_dma_contig_memops;
+ vidq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vidq->lock = &video->lock;
+ /* Make sure non-dropped frame */
+ vidq->min_buffers_needed = 3;
+ vidq->dev = csi->dev;
+
+ ret = vb2_queue_init(vidq);
+ if (ret) {
+ v4l2_err(&csi->v4l2_dev, "vb2_queue_init failed: %d\n", ret);
+ goto clean_entity;
+ }
+
+ /* Register video device */
+ strlcpy(vdev->name, name, sizeof(vdev->name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &sun6i_video_fops;
+ vdev->ioctl_ops = &sun6i_video_ioctl_ops;
+ vdev->vfl_type = VFL_TYPE_GRABBER;
+ vdev->vfl_dir = VFL_DIR_RX;
+ vdev->v4l2_dev = &csi->v4l2_dev;
+ vdev->queue = vidq;
+ vdev->lock = &video->lock;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
+ video_set_drvdata(vdev, video);
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ v4l2_err(&csi->v4l2_dev,
+ "video_register_device failed: %d\n", ret);
+ goto release_vb2;
+ }
+
+ return 0;
+
+release_vb2:
+ vb2_queue_release(&video->vb2_vidq);
+clean_entity:
+ media_entity_cleanup(&video->vdev.entity);
+ mutex_destroy(&video->lock);
+ return ret;
+}
+
+void sun6i_video_cleanup(struct sun6i_video *video)
+{
+ video_unregister_device(&video->vdev);
+ media_entity_cleanup(&video->vdev.entity);
+ vb2_queue_release(&video->vb2_vidq);
+ mutex_destroy(&video->lock);
+}
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h
new file mode 100644
index 000000000000..b9cd919c24ac
--- /dev/null
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing)
+ * All rights reserved.
+ * Author: Yong Deng <yong.deng@magewell.com>
+ */
+
+#ifndef __SUN6I_VIDEO_H__
+#define __SUN6I_VIDEO_H__
+
+#include <media/v4l2-dev.h>
+#include <media/videobuf2-core.h>
+
+struct sun6i_csi;
+
+struct sun6i_video {
+ struct video_device vdev;
+ struct media_pad pad;
+ struct sun6i_csi *csi;
+
+ struct mutex lock;
+
+ struct vb2_queue vb2_vidq;
+ spinlock_t dma_queue_lock;
+ struct list_head dma_queue;
+
+ unsigned int sequence;
+ struct v4l2_format fmt;
+ u32 mbus_code;
+};
+
+int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi,
+ const char *name);
+void sun6i_video_cleanup(struct sun6i_video *video);
+
+void sun6i_video_frame_done(struct sun6i_video *video);
+
+#endif /* __SUN6I_VIDEO_H__ */
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 95a093f41905..fc3c212b96e1 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -1615,7 +1615,7 @@ of_get_next_port(const struct device_node *parent,
return NULL;
}
prev = port;
- } while (of_node_cmp(port->name, "port") != 0);
+ } while (!of_node_name_eq(port, "port"));
}
return port;
@@ -1635,7 +1635,7 @@ of_get_next_endpoint(const struct device_node *parent,
if (!ep)
return NULL;
prev = ep;
- } while (of_node_cmp(ep->name, "endpoint") != 0);
+ } while (!of_node_name_eq(ep, "endpoint"));
return ep;
}
diff --git a/drivers/media/platform/vicodec/codec-fwht.c b/drivers/media/platform/vicodec/codec-fwht.c
index 36656031b295..5630f1dc45e6 100644
--- a/drivers/media/platform/vicodec/codec-fwht.c
+++ b/drivers/media/platform/vicodec/codec-fwht.c
@@ -753,31 +753,49 @@ u32 fwht_encode_frame(struct fwht_raw_frame *frm,
__be16 *rlco = cf->rlc_data;
__be16 *rlco_max;
u32 encoding;
- u32 chroma_h = frm->height / frm->height_div;
- u32 chroma_w = frm->width / frm->width_div;
- unsigned int chroma_size = chroma_h * chroma_w;
rlco_max = rlco + size / 2 - 256;
encoding = encode_plane(frm->luma, ref_frm->luma, &rlco, rlco_max, cf,
frm->height, frm->width,
- frm->luma_step, is_intra, next_is_intra);
+ frm->luma_alpha_step, is_intra, next_is_intra);
if (encoding & FWHT_FRAME_UNENCODED)
encoding |= FWHT_LUMA_UNENCODED;
encoding &= ~FWHT_FRAME_UNENCODED;
- rlco_max = rlco + chroma_size / 2 - 256;
- encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max, cf,
- chroma_h, chroma_w,
- frm->chroma_step, is_intra, next_is_intra);
- if (encoding & FWHT_FRAME_UNENCODED)
- encoding |= FWHT_CB_UNENCODED;
- encoding &= ~FWHT_FRAME_UNENCODED;
- rlco_max = rlco + chroma_size / 2 - 256;
- encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max, cf,
- chroma_h, chroma_w,
- frm->chroma_step, is_intra, next_is_intra);
- if (encoding & FWHT_FRAME_UNENCODED)
- encoding |= FWHT_CR_UNENCODED;
- encoding &= ~FWHT_FRAME_UNENCODED;
+
+ if (frm->components_num >= 3) {
+ u32 chroma_h = frm->height / frm->height_div;
+ u32 chroma_w = frm->width / frm->width_div;
+ unsigned int chroma_size = chroma_h * chroma_w;
+
+ rlco_max = rlco + chroma_size / 2 - 256;
+ encoding |= encode_plane(frm->cb, ref_frm->cb, &rlco, rlco_max,
+ cf, chroma_h, chroma_w,
+ frm->chroma_step,
+ is_intra, next_is_intra);
+ if (encoding & FWHT_FRAME_UNENCODED)
+ encoding |= FWHT_CB_UNENCODED;
+ encoding &= ~FWHT_FRAME_UNENCODED;
+ rlco_max = rlco + chroma_size / 2 - 256;
+ encoding |= encode_plane(frm->cr, ref_frm->cr, &rlco, rlco_max,
+ cf, chroma_h, chroma_w,
+ frm->chroma_step,
+ is_intra, next_is_intra);
+ if (encoding & FWHT_FRAME_UNENCODED)
+ encoding |= FWHT_CR_UNENCODED;
+ encoding &= ~FWHT_FRAME_UNENCODED;
+ }
+
+ if (frm->components_num == 4) {
+ rlco_max = rlco + size / 2 - 256;
+ encoding = encode_plane(frm->alpha, ref_frm->alpha, &rlco,
+ rlco_max, cf, frm->height, frm->width,
+ frm->luma_alpha_step,
+ is_intra, next_is_intra);
+ if (encoding & FWHT_FRAME_UNENCODED)
+ encoding |= FWHT_ALPHA_UNENCODED;
+ encoding &= ~FWHT_FRAME_UNENCODED;
+ }
+
cf->size = (rlco - cf->rlc_data) * sizeof(*rlco);
return encoding;
}
@@ -836,20 +854,28 @@ static void decode_plane(struct fwht_cframe *cf, const __be16 **rlco, u8 *ref,
}
void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags)
+ u32 hdr_flags, unsigned int components_num)
{
const __be16 *rlco = cf->rlc_data;
- u32 h = cf->height / 2;
- u32 w = cf->width / 2;
- if (hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT)
- h *= 2;
- if (hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH)
- w *= 2;
decode_plane(cf, &rlco, ref->luma, cf->height, cf->width,
hdr_flags & FWHT_FL_LUMA_IS_UNCOMPRESSED);
- decode_plane(cf, &rlco, ref->cb, h, w,
- hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED);
- decode_plane(cf, &rlco, ref->cr, h, w,
- hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED);
+
+ if (components_num >= 3) {
+ u32 h = cf->height;
+ u32 w = cf->width;
+
+ if (!(hdr_flags & FWHT_FL_CHROMA_FULL_HEIGHT))
+ h /= 2;
+ if (!(hdr_flags & FWHT_FL_CHROMA_FULL_WIDTH))
+ w /= 2;
+ decode_plane(cf, &rlco, ref->cb, h, w,
+ hdr_flags & FWHT_FL_CB_IS_UNCOMPRESSED);
+ decode_plane(cf, &rlco, ref->cr, h, w,
+ hdr_flags & FWHT_FL_CR_IS_UNCOMPRESSED);
+ }
+
+ if (components_num == 4)
+ decode_plane(cf, &rlco, ref->alpha, cf->height, cf->width,
+ hdr_flags & FWHT_FL_ALPHA_IS_UNCOMPRESSED);
}
diff --git a/drivers/media/platform/vicodec/codec-fwht.h b/drivers/media/platform/vicodec/codec-fwht.h
index 3e9391fec5fe..90ff8962fca7 100644
--- a/drivers/media/platform/vicodec/codec-fwht.h
+++ b/drivers/media/platform/vicodec/codec-fwht.h
@@ -56,7 +56,7 @@
#define FWHT_MAGIC1 0x4f4f4f4f
#define FWHT_MAGIC2 0xffffffff
-#define FWHT_VERSION 1
+#define FWHT_VERSION 2
/* Set if this is an interlaced format */
#define FWHT_FL_IS_INTERLACED BIT(0)
@@ -75,6 +75,11 @@
#define FWHT_FL_CR_IS_UNCOMPRESSED BIT(6)
#define FWHT_FL_CHROMA_FULL_HEIGHT BIT(7)
#define FWHT_FL_CHROMA_FULL_WIDTH BIT(8)
+#define FWHT_FL_ALPHA_IS_UNCOMPRESSED BIT(9)
+
+/* A 4-values flag - the number of components - 1 */
+#define FWHT_FL_COMPONENTS_NUM_MSK GENMASK(17, 16)
+#define FWHT_FL_COMPONENTS_NUM_OFFSET 16
struct fwht_cframe_hdr {
u32 magic1;
@@ -104,9 +109,10 @@ struct fwht_raw_frame {
unsigned int width, height;
unsigned int width_div;
unsigned int height_div;
- unsigned int luma_step;
+ unsigned int luma_alpha_step;
unsigned int chroma_step;
- u8 *luma, *cb, *cr;
+ unsigned int components_num;
+ u8 *luma, *cb, *cr, *alpha;
};
#define FWHT_FRAME_PCODED BIT(0)
@@ -114,12 +120,13 @@ struct fwht_raw_frame {
#define FWHT_LUMA_UNENCODED BIT(2)
#define FWHT_CB_UNENCODED BIT(3)
#define FWHT_CR_UNENCODED BIT(4)
+#define FWHT_ALPHA_UNENCODED BIT(5)
u32 fwht_encode_frame(struct fwht_raw_frame *frm,
struct fwht_raw_frame *ref_frm,
struct fwht_cframe *cf,
bool is_intra, bool next_is_intra);
void fwht_decode_frame(struct fwht_cframe *cf, struct fwht_raw_frame *ref,
- u32 hdr_flags);
+ u32 hdr_flags, unsigned int components_num);
#endif
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index e5b68fb38aac..8cb0212df67f 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -11,27 +11,30 @@
#include "codec-v4l2-fwht.h"
static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
- { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2 },
- { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2 },
- { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1 },
- { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2 },
- { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2 },
- { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1 },
- { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1 },
- { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1 },
- { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1 },
- { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1 },
- { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1 },
- { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1 },
- { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1 },
- { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1 },
- { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1 },
- { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1 },
- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1 },
- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1 },
- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1 },
- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1 },
- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1 },
+ { V4L2_PIX_FMT_YUV420, 1, 3, 2, 1, 1, 2, 2, 3},
+ { V4L2_PIX_FMT_YVU420, 1, 3, 2, 1, 1, 2, 2, 3},
+ { V4L2_PIX_FMT_YUV422P, 1, 2, 1, 1, 1, 2, 1, 3},
+ { V4L2_PIX_FMT_NV12, 1, 3, 2, 1, 2, 2, 2, 3},
+ { V4L2_PIX_FMT_NV21, 1, 3, 2, 1, 2, 2, 2, 3},
+ { V4L2_PIX_FMT_NV16, 1, 2, 1, 1, 2, 2, 1, 3},
+ { V4L2_PIX_FMT_NV61, 1, 2, 1, 1, 2, 2, 1, 3},
+ { V4L2_PIX_FMT_NV24, 1, 3, 1, 1, 2, 1, 1, 3},
+ { V4L2_PIX_FMT_NV42, 1, 3, 1, 1, 2, 1, 1, 3},
+ { V4L2_PIX_FMT_YUYV, 2, 2, 1, 2, 4, 2, 1, 3},
+ { V4L2_PIX_FMT_YVYU, 2, 2, 1, 2, 4, 2, 1, 3},
+ { V4L2_PIX_FMT_UYVY, 2, 2, 1, 2, 4, 2, 1, 3},
+ { V4L2_PIX_FMT_VYUY, 2, 2, 1, 2, 4, 2, 1, 3},
+ { V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3},
+ { V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3},
+ { V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3},
+ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3},
+ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3},
+ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3},
+ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3},
+ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3},
+ { V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4},
+ { V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4},
+ { V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1},
};
const struct v4l2_fwht_pixfmt_info *v4l2_fwht_find_pixfmt(u32 pixelformat)
@@ -68,10 +71,16 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
rf.luma = p_in;
rf.width_div = info->width_div;
rf.height_div = info->height_div;
- rf.luma_step = info->luma_step;
+ rf.luma_alpha_step = info->luma_alpha_step;
rf.chroma_step = info->chroma_step;
+ rf.alpha = NULL;
+ rf.components_num = info->components_num;
switch (info->id) {
+ case V4L2_PIX_FMT_GREY:
+ rf.cb = NULL;
+ rf.cr = NULL;
+ break;
case V4L2_PIX_FMT_YUV420:
rf.cb = rf.luma + size;
rf.cr = rf.cb + size / 4;
@@ -138,6 +147,18 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
rf.cr = rf.cb + 2;
rf.luma++;
break;
+ case V4L2_PIX_FMT_ARGB32:
+ rf.alpha = rf.luma;
+ rf.cr = rf.luma + 1;
+ rf.cb = rf.cr + 2;
+ rf.luma += 2;
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ rf.cb = rf.luma;
+ rf.cr = rf.cb + 2;
+ rf.luma++;
+ rf.alpha = rf.cr + 1;
+ break;
default:
return -EINVAL;
}
@@ -162,12 +183,15 @@ int v4l2_fwht_encode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
p_hdr->version = htonl(FWHT_VERSION);
p_hdr->width = htonl(cf.width);
p_hdr->height = htonl(cf.height);
+ flags |= (info->components_num - 1) << FWHT_FL_COMPONENTS_NUM_OFFSET;
if (encoding & FWHT_LUMA_UNENCODED)
flags |= FWHT_FL_LUMA_IS_UNCOMPRESSED;
if (encoding & FWHT_CB_UNENCODED)
flags |= FWHT_FL_CB_IS_UNCOMPRESSED;
if (encoding & FWHT_CR_UNENCODED)
flags |= FWHT_FL_CR_IS_UNCOMPRESSED;
+ if (encoding & FWHT_ALPHA_UNENCODED)
+ flags |= FWHT_FL_ALPHA_IS_UNCOMPRESSED;
if (rf.height_div == 1)
flags |= FWHT_FL_CHROMA_FULL_HEIGHT;
if (rf.width_div == 1)
@@ -192,6 +216,8 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
struct fwht_cframe_hdr *p_hdr;
struct fwht_cframe cf;
u8 *p;
+ unsigned int components_num = 3;
+ unsigned int version;
if (!state->info)
return -EINVAL;
@@ -199,16 +225,16 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
p_hdr = (struct fwht_cframe_hdr *)p_in;
cf.width = ntohl(p_hdr->width);
cf.height = ntohl(p_hdr->height);
- flags = ntohl(p_hdr->flags);
- state->colorspace = ntohl(p_hdr->colorspace);
- state->xfer_func = ntohl(p_hdr->xfer_func);
- state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
- state->quantization = ntohl(p_hdr->quantization);
- cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+
+ version = ntohl(p_hdr->version);
+ if (!version || version > FWHT_VERSION) {
+ pr_err("version %d is not supported, current version is %d\n",
+ version, FWHT_VERSION);
+ return -EINVAL;
+ }
if (p_hdr->magic1 != FWHT_MAGIC1 ||
p_hdr->magic2 != FWHT_MAGIC2 ||
- ntohl(p_hdr->version) != FWHT_VERSION ||
(cf.width & 7) || (cf.height & 7))
return -EINVAL;
@@ -216,14 +242,34 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
if (cf.width != state->width || cf.height != state->height)
return -EINVAL;
+ flags = ntohl(p_hdr->flags);
+
+ if (version == FWHT_VERSION) {
+ components_num = 1 + ((flags & FWHT_FL_COMPONENTS_NUM_MSK) >>
+ FWHT_FL_COMPONENTS_NUM_OFFSET);
+ }
+
+ state->colorspace = ntohl(p_hdr->colorspace);
+ state->xfer_func = ntohl(p_hdr->xfer_func);
+ state->ycbcr_enc = ntohl(p_hdr->ycbcr_enc);
+ state->quantization = ntohl(p_hdr->quantization);
+ cf.rlc_data = (__be16 *)(p_in + sizeof(*p_hdr));
+
if (!(flags & FWHT_FL_CHROMA_FULL_WIDTH))
chroma_size /= 2;
if (!(flags & FWHT_FL_CHROMA_FULL_HEIGHT))
chroma_size /= 2;
- fwht_decode_frame(&cf, &state->ref_frame, flags);
+ fwht_decode_frame(&cf, &state->ref_frame, flags, components_num);
+ /*
+ * TODO - handle the case where the compressed stream encodes a
+ * different format than the requested decoded format.
+ */
switch (state->info->id) {
+ case V4L2_PIX_FMT_GREY:
+ memcpy(p_out, state->ref_frame.luma, size);
+ break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YUV422P:
memcpy(p_out, state->ref_frame.luma, size);
@@ -325,6 +371,22 @@ int v4l2_fwht_decode(struct v4l2_fwht_state *state, u8 *p_in, u8 *p_out)
*p++ = 0;
}
break;
+ case V4L2_PIX_FMT_ARGB32:
+ for (i = 0, p = p_out; i < size; i++) {
+ *p++ = state->ref_frame.alpha[i];
+ *p++ = state->ref_frame.cr[i];
+ *p++ = state->ref_frame.luma[i];
+ *p++ = state->ref_frame.cb[i];
+ }
+ break;
+ case V4L2_PIX_FMT_ABGR32:
+ for (i = 0, p = p_out; i < size; i++) {
+ *p++ = state->ref_frame.cb[i];
+ *p++ = state->ref_frame.luma[i];
+ *p++ = state->ref_frame.cr[i];
+ *p++ = state->ref_frame.alpha[i];
+ }
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.h b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
index 162465b78067..ed53e28d4f9c 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.h
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.h
@@ -13,11 +13,12 @@ struct v4l2_fwht_pixfmt_info {
unsigned int bytesperline_mult;
unsigned int sizeimage_mult;
unsigned int sizeimage_div;
- unsigned int luma_step;
+ unsigned int luma_alpha_step;
unsigned int chroma_step;
/* Chroma plane subsampling */
unsigned int width_div;
unsigned int height_div;
+ unsigned int components_num;
};
struct v4l2_fwht_state {
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c
index b292cff26c86..0d7876f5acf0 100644
--- a/drivers/media/platform/vicodec/vicodec-core.c
+++ b/drivers/media/platform/vicodec/vicodec-core.c
@@ -61,7 +61,7 @@ struct pixfmt_info {
};
static const struct v4l2_fwht_pixfmt_info pixfmt_fwht = {
- V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1
+ V4L2_PIX_FMT_FWHT, 0, 3, 1, 1, 1, 1, 1, 0
};
static void vicodec_dev_release(struct device *dev)
@@ -151,52 +151,52 @@ static struct vicodec_q_data *get_q_data(struct vicodec_ctx *ctx,
}
static int device_process(struct vicodec_ctx *ctx,
- struct vb2_v4l2_buffer *in_vb,
- struct vb2_v4l2_buffer *out_vb)
+ struct vb2_v4l2_buffer *src_vb,
+ struct vb2_v4l2_buffer *dst_vb)
{
struct vicodec_dev *dev = ctx->dev;
- struct vicodec_q_data *q_cap;
+ struct vicodec_q_data *q_dst;
struct v4l2_fwht_state *state = &ctx->state;
- u8 *p_in, *p_out;
+ u8 *p_src, *p_dst;
int ret;
- q_cap = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ q_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (ctx->is_enc)
- p_in = vb2_plane_vaddr(&in_vb->vb2_buf, 0);
+ p_src = vb2_plane_vaddr(&src_vb->vb2_buf, 0);
else
- p_in = state->compressed_frame;
- p_out = vb2_plane_vaddr(&out_vb->vb2_buf, 0);
- if (!p_in || !p_out) {
+ p_src = state->compressed_frame;
+ p_dst = vb2_plane_vaddr(&dst_vb->vb2_buf, 0);
+ if (!p_src || !p_dst) {
v4l2_err(&dev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
return -EFAULT;
}
if (ctx->is_enc) {
- struct vicodec_q_data *q_out;
+ struct vicodec_q_data *q_src;
- q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- state->info = q_out->info;
- ret = v4l2_fwht_encode(state, p_in, p_out);
+ q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ state->info = q_src->info;
+ ret = v4l2_fwht_encode(state, p_src, p_dst);
if (ret < 0)
return ret;
- vb2_set_plane_payload(&out_vb->vb2_buf, 0, ret);
+ vb2_set_plane_payload(&dst_vb->vb2_buf, 0, ret);
} else {
- state->info = q_cap->info;
- ret = v4l2_fwht_decode(state, p_in, p_out);
+ state->info = q_dst->info;
+ ret = v4l2_fwht_decode(state, p_src, p_dst);
if (ret < 0)
return ret;
- vb2_set_plane_payload(&out_vb->vb2_buf, 0, q_cap->sizeimage);
+ vb2_set_plane_payload(&dst_vb->vb2_buf, 0, q_dst->sizeimage);
}
- out_vb->sequence = q_cap->sequence++;
- out_vb->vb2_buf.timestamp = in_vb->vb2_buf.timestamp;
+ dst_vb->sequence = q_dst->sequence++;
+ dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
- if (in_vb->flags & V4L2_BUF_FLAG_TIMECODE)
- out_vb->timecode = in_vb->timecode;
- out_vb->field = in_vb->field;
- out_vb->flags &= ~V4L2_BUF_FLAG_LAST;
- out_vb->flags |= in_vb->flags &
+ if (src_vb->flags & V4L2_BUF_FLAG_TIMECODE)
+ dst_vb->timecode = src_vb->timecode;
+ dst_vb->field = src_vb->field;
+ dst_vb->flags &= ~V4L2_BUF_FLAG_LAST;
+ dst_vb->flags |= src_vb->flags &
(V4L2_BUF_FLAG_TIMECODE |
V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
@@ -219,12 +219,12 @@ static void device_run(void *priv)
struct vicodec_ctx *ctx = priv;
struct vicodec_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct vicodec_q_data *q_out;
+ struct vicodec_q_data *q_src;
u32 state;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
state = VB2_BUF_STATE_DONE;
if (device_process(ctx, src_buf, dst_buf))
@@ -237,11 +237,11 @@ static void device_run(void *priv)
v4l2_event_queue_fh(&ctx->fh, &eos_event);
}
if (ctx->is_enc) {
- src_buf->sequence = q_out->sequence++;
+ src_buf->sequence = q_src->sequence++;
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, state);
} else if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == ctx->cur_buf_offset) {
- src_buf->sequence = q_out->sequence++;
+ src_buf->sequence = q_src->sequence++;
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, state);
ctx->cur_buf_offset = 0;
@@ -259,15 +259,15 @@ static void device_run(void *priv)
v4l2_m2m_job_finish(dev->dec_dev, ctx->fh.m2m_ctx);
}
-static void job_remove_out_buf(struct vicodec_ctx *ctx, u32 state)
+static void job_remove_src_buf(struct vicodec_ctx *ctx, u32 state)
{
struct vb2_v4l2_buffer *src_buf;
- struct vicodec_q_data *q_out;
+ struct vicodec_q_data *q_src;
- q_out = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
spin_lock(ctx->lock);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
- src_buf->sequence = q_out->sequence++;
+ src_buf->sequence = q_src->sequence++;
v4l2_m2m_buf_done(src_buf, state);
ctx->cur_buf_offset = 0;
spin_unlock(ctx->lock);
@@ -280,7 +280,7 @@ static int job_ready(void *priv)
};
struct vicodec_ctx *ctx = priv;
struct vb2_v4l2_buffer *src_buf;
- u8 *p_out;
+ u8 *p_src;
u8 *p;
u32 sz;
u32 state;
@@ -293,25 +293,27 @@ restart:
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
if (!src_buf)
return 0;
- p_out = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ p_src = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
sz = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
- p = p_out + ctx->cur_buf_offset;
+ p = p_src + ctx->cur_buf_offset;
state = VB2_BUF_STATE_DONE;
if (!ctx->comp_size) {
state = VB2_BUF_STATE_ERROR;
- for (; p < p_out + sz; p++) {
+ for (; p < p_src + sz; p++) {
u32 copy;
- p = memchr(p, magic[ctx->comp_magic_cnt], sz);
+ p = memchr(p, magic[ctx->comp_magic_cnt],
+ p_src + sz - p);
if (!p) {
ctx->comp_magic_cnt = 0;
break;
}
copy = sizeof(magic) - ctx->comp_magic_cnt;
- if (p_out + sz - p < copy)
- copy = p_out + sz - p;
+ if (p_src + sz - p < copy)
+ copy = p_src + sz - p;
+
memcpy(ctx->state.compressed_frame + ctx->comp_magic_cnt,
p, copy);
ctx->comp_magic_cnt += copy;
@@ -324,7 +326,7 @@ restart:
ctx->comp_magic_cnt = 0;
}
if (ctx->comp_magic_cnt < sizeof(magic)) {
- job_remove_out_buf(ctx, state);
+ job_remove_src_buf(ctx, state);
goto restart;
}
ctx->comp_size = sizeof(magic);
@@ -334,14 +336,14 @@ restart:
(struct fwht_cframe_hdr *)ctx->state.compressed_frame;
u32 copy = sizeof(struct fwht_cframe_hdr) - ctx->comp_size;
- if (copy > p_out + sz - p)
- copy = p_out + sz - p;
+ if (copy > p_src + sz - p)
+ copy = p_src + sz - p;
memcpy(ctx->state.compressed_frame + ctx->comp_size,
p, copy);
p += copy;
ctx->comp_size += copy;
if (ctx->comp_size < sizeof(struct fwht_cframe_hdr)) {
- job_remove_out_buf(ctx, state);
+ job_remove_src_buf(ctx, state);
goto restart;
}
ctx->comp_frame_size = ntohl(p_hdr->size) + sizeof(*p_hdr);
@@ -351,18 +353,19 @@ restart:
if (ctx->comp_size < ctx->comp_frame_size) {
u32 copy = ctx->comp_frame_size - ctx->comp_size;
- if (copy > p_out + sz - p)
- copy = p_out + sz - p;
+ if (copy > p_src + sz - p)
+ copy = p_src + sz - p;
+
memcpy(ctx->state.compressed_frame + ctx->comp_size,
p, copy);
p += copy;
ctx->comp_size += copy;
if (ctx->comp_size < ctx->comp_frame_size) {
- job_remove_out_buf(ctx, state);
+ job_remove_src_buf(ctx, state);
goto restart;
}
}
- ctx->cur_buf_offset = p - p_out;
+ ctx->cur_buf_offset = p - p_src;
ctx->comp_has_frame = true;
ctx->comp_has_next_frame = false;
if (sz - ctx->cur_buf_offset >= sizeof(struct fwht_cframe_hdr)) {
@@ -397,11 +400,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, VICODEC_NAME, sizeof(cap->card) - 1);
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", VICODEC_NAME);
- cap->device_caps = V4L2_CAP_STREAMING |
- (multiplanar ?
- V4L2_CAP_VIDEO_M2M_MPLANE :
- V4L2_CAP_VIDEO_M2M);
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -993,19 +991,36 @@ static int vicodec_start_streaming(struct vb2_queue *q,
unsigned int size = q_data->width * q_data->height;
const struct v4l2_fwht_pixfmt_info *info = q_data->info;
unsigned int chroma_div = info->width_div * info->height_div;
+ unsigned int total_planes_size;
+
+ /*
+ * we don't know ahead how many components are in the encoding type
+ * V4L2_PIX_FMT_FWHT, so we will allocate space for 4 planes.
+ */
+ if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4)
+ total_planes_size = 2 * size + 2 * (size / chroma_div);
+ else if (info->components_num == 3)
+ total_planes_size = size + 2 * (size / chroma_div);
+ else
+ total_planes_size = size;
q_data->sequence = 0;
- if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+ if (!ctx->is_enc) {
+ state->width = q_data->width;
+ state->height = q_data->height;
+ }
return 0;
+ }
- state->width = q_data->width;
- state->height = q_data->height;
+ if (ctx->is_enc) {
+ state->width = q_data->width;
+ state->height = q_data->height;
+ }
state->ref_frame.width = state->ref_frame.height = 0;
- state->ref_frame.luma = kvmalloc(size + 2 * size / chroma_div,
- GFP_KERNEL);
- ctx->comp_max_size = size + 2 * size / chroma_div +
- sizeof(struct fwht_cframe_hdr);
+ state->ref_frame.luma = kvmalloc(total_planes_size, GFP_KERNEL);
+ ctx->comp_max_size = total_planes_size + sizeof(struct fwht_cframe_hdr);
state->compressed_frame = kvmalloc(ctx->comp_max_size, GFP_KERNEL);
if (!state->ref_frame.luma || !state->compressed_frame) {
kvfree(state->ref_frame.luma);
@@ -1013,8 +1028,20 @@ static int vicodec_start_streaming(struct vb2_queue *q,
vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED);
return -ENOMEM;
}
- state->ref_frame.cb = state->ref_frame.luma + size;
- state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
+ if (info->id == V4L2_PIX_FMT_FWHT || info->components_num >= 3) {
+ state->ref_frame.cb = state->ref_frame.luma + size;
+ state->ref_frame.cr = state->ref_frame.cb + size / chroma_div;
+ } else {
+ state->ref_frame.cb = NULL;
+ state->ref_frame.cr = NULL;
+ }
+
+ if (info->id == V4L2_PIX_FMT_FWHT || info->components_num == 4)
+ state->ref_frame.alpha =
+ state->ref_frame.cr + size / chroma_div;
+ else
+ state->ref_frame.alpha = NULL;
+
ctx->last_src_buf = NULL;
ctx->last_dst_buf = NULL;
state->gop_cnt = 0;
@@ -1108,7 +1135,7 @@ static int vicodec_s_ctrl(struct v4l2_ctrl *ctrl)
return -EINVAL;
}
-static struct v4l2_ctrl_ops vicodec_ctrl_ops = {
+static const struct v4l2_ctrl_ops vicodec_ctrl_ops = {
.s_ctrl = vicodec_s_ctrl,
};
@@ -1311,6 +1338,8 @@ static int vicodec_probe(struct platform_device *pdev)
vfd->lock = &dev->enc_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
strscpy(vfd->name, "vicodec-enc", sizeof(vfd->name));
+ vfd->device_caps = V4L2_CAP_STREAMING |
+ (multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M);
v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
video_set_drvdata(vfd, dev);
@@ -1327,6 +1356,8 @@ static int vicodec_probe(struct platform_device *pdev)
vfd = &dev->dec_vfd;
vfd->lock = &dev->dec_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
+ vfd->device_caps = V4L2_CAP_STREAMING |
+ (multiplanar ? V4L2_CAP_VIDEO_M2M_MPLANE : V4L2_CAP_VIDEO_M2M);
strscpy(vfd->name, "vicodec-dec", sizeof(vfd->name));
v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d82db738f174..d01821a6906a 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -438,8 +438,6 @@ static int vidioc_querycap(struct file *file, void *priv,
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", MEM2MEM_NAME);
- cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
- cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
@@ -805,10 +803,11 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
static void vim2m_stop_streaming(struct vb2_queue *q)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vim2m_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
- flush_scheduled_work();
+ cancel_delayed_work_sync(&dev->work_run);
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
@@ -999,6 +998,7 @@ static const struct video_device vim2m_videodev = {
.ioctl_ops = &vim2m_ioctl_ops,
.minor = -1,
.release = video_device_release_empty,
+ .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
static const struct v4l2_m2m_ops m2m_ops = {
diff --git a/drivers/media/platform/vimc/vimc-common.c b/drivers/media/platform/vimc/vimc-common.c
index dee1b9dfc4f6..867e24dbd6b5 100644
--- a/drivers/media/platform/vimc/vimc-common.c
+++ b/drivers/media/platform/vimc/vimc-common.c
@@ -276,6 +276,8 @@ int vimc_pipeline_s_stream(struct media_entity *ent, int enable)
/* Start the stream in the subdevice direct connected */
pad = media_entity_remote_pad(&ent->pads[i]);
+ if (!pad)
+ continue;
if (!is_media_entity_v4l2_subdev(pad->entity))
return -EINVAL;
diff --git a/drivers/media/platform/vimc/vimc-sensor.c b/drivers/media/platform/vimc/vimc-sensor.c
index edf4c85ae63d..32ca9c6172b1 100644
--- a/drivers/media/platform/vimc/vimc-sensor.c
+++ b/drivers/media/platform/vimc/vimc-sensor.c
@@ -286,7 +286,7 @@ static int vimc_sen_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static struct v4l2_subdev_core_ops vimc_sen_core_ops = {
+static const struct v4l2_subdev_core_ops vimc_sen_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 626e2b24a403..c931f007e5b0 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -324,13 +324,14 @@ static int vidioc_s_dv_timings(struct file *file, void *fh, struct v4l2_dv_timin
return vivid_vid_out_s_dv_timings(file, fh, timings);
}
-static int vidioc_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cc)
+static int vidioc_g_pixelaspect(struct file *file, void *fh,
+ int type, struct v4l2_fract *f)
{
struct video_device *vdev = video_devdata(file);
if (vdev->vfl_dir == VFL_DIR_RX)
- return vivid_vid_cap_cropcap(file, fh, cc);
- return vivid_vid_out_cropcap(file, fh, cc);
+ return vivid_vid_cap_g_pixelaspect(file, fh, type, f);
+ return vivid_vid_out_g_pixelaspect(file, fh, type, f);
}
static int vidioc_g_selection(struct file *file, void *fh,
@@ -519,7 +520,7 @@ static const struct v4l2_ioctl_ops vivid_ioctl_ops = {
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,
- .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_pixelaspect = vidioc_g_pixelaspect,
.vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_try_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
@@ -624,12 +625,24 @@ static void vivid_dev_release(struct v4l2_device *v4l2_dev)
vfree(dev->bitmap_out);
tpg_free(&dev->tpg);
kfree(dev->query_dv_timings_qmenu);
+ kfree(dev->query_dv_timings_qmenu_strings);
kfree(dev);
}
#ifdef CONFIG_MEDIA_CONTROLLER
+static int vivid_req_validate(struct media_request *req)
+{
+ struct vivid_dev *dev = container_of(req->mdev, struct vivid_dev, mdev);
+
+ if (dev->req_validate_error) {
+ dev->req_validate_error = false;
+ return -EINVAL;
+ }
+ return vb2_request_validate(req);
+}
+
static const struct media_device_ops vivid_media_ops = {
- .req_validate = vb2_request_validate,
+ .req_validate = vivid_req_validate,
.req_queue = vb2_request_queue,
};
#endif
@@ -669,6 +682,8 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
/* Initialize media device */
strlcpy(dev->mdev.model, VIVID_MODULE_NAME, sizeof(dev->mdev.model));
+ snprintf(dev->mdev.bus_info, sizeof(dev->mdev.bus_info),
+ "platform:%s-%03d", VIVID_MODULE_NAME, inst);
dev->mdev.dev = &pdev->dev;
media_device_init(&dev->mdev);
dev->mdev.ops = &vivid_media_ops;
@@ -873,20 +888,31 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
if (!dev->edid)
goto free_dev;
- /* create a string array containing the names of all the preset timings */
while (v4l2_dv_timings_presets[dev->query_dv_timings_size].bt.width)
dev->query_dv_timings_size++;
+
+ /*
+ * Create a char pointer array that points to the names of all the
+ * preset timings
+ */
dev->query_dv_timings_qmenu = kmalloc_array(dev->query_dv_timings_size,
- (sizeof(void *) + 32),
- GFP_KERNEL);
- if (dev->query_dv_timings_qmenu == NULL)
+ sizeof(char *), GFP_KERNEL);
+ /*
+ * Create a string array containing the names of all the preset
+ * timings. Each name is max 31 chars long (+ terminating 0).
+ */
+ dev->query_dv_timings_qmenu_strings =
+ kmalloc_array(dev->query_dv_timings_size, 32, GFP_KERNEL);
+
+ if (!dev->query_dv_timings_qmenu ||
+ !dev->query_dv_timings_qmenu_strings)
goto free_dev;
+
for (i = 0; i < dev->query_dv_timings_size; i++) {
const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
- char *p = (char *)&dev->query_dv_timings_qmenu[dev->query_dv_timings_size];
+ char *p = dev->query_dv_timings_qmenu_strings + i * 32;
u32 htot, vtot;
- p += i * 32;
dev->query_dv_timings_qmenu[i] = p;
htot = V4L2_DV_BT_FRAME_WIDTH(bt);
diff --git a/drivers/media/platform/vivid/vivid-core.h b/drivers/media/platform/vivid/vivid-core.h
index 1891254c8f0b..6697c7009629 100644
--- a/drivers/media/platform/vivid/vivid-core.h
+++ b/drivers/media/platform/vivid/vivid-core.h
@@ -294,6 +294,7 @@ struct vivid_dev {
bool buf_prepare_error;
bool start_streaming_error;
bool dqbuf_error;
+ bool req_validate_error;
bool seq_wrap;
bool time_wrap;
u64 time_wrap_offset;
@@ -305,6 +306,7 @@ struct vivid_dev {
enum vivid_signal_mode dv_timings_signal_mode;
char **query_dv_timings_qmenu;
+ char *query_dv_timings_qmenu_strings;
unsigned query_dv_timings_size;
unsigned query_dv_timings_last;
unsigned query_dv_timings;
@@ -392,6 +394,9 @@ struct vivid_dev {
/* thread for generating video capture stream */
struct task_struct *kthread_vid_cap;
unsigned long jiffies_vid_cap;
+ u64 cap_stream_start;
+ u64 cap_frame_period;
+ u64 cap_frame_eof_offset;
u32 cap_seq_offset;
u32 cap_seq_count;
bool cap_seq_resync;
diff --git a/drivers/media/platform/vivid/vivid-ctrls.c b/drivers/media/platform/vivid/vivid-ctrls.c
index bfffeda12f14..4cd526ff248b 100644
--- a/drivers/media/platform/vivid/vivid-ctrls.c
+++ b/drivers/media/platform/vivid/vivid-ctrls.c
@@ -81,6 +81,7 @@
#define VIVID_CID_START_STR_ERROR (VIVID_CID_VIVID_BASE + 69)
#define VIVID_CID_QUEUE_ERROR (VIVID_CID_VIVID_BASE + 70)
#define VIVID_CID_CLEAR_FB (VIVID_CID_VIVID_BASE + 71)
+#define VIVID_CID_REQ_VALIDATE_ERROR (VIVID_CID_VIVID_BASE + 72)
#define VIVID_CID_RADIO_SEEK_MODE (VIVID_CID_VIVID_BASE + 90)
#define VIVID_CID_RADIO_SEEK_PROG_LIM (VIVID_CID_VIVID_BASE + 91)
@@ -1002,6 +1003,9 @@ static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
case VIVID_CID_START_STR_ERROR:
dev->start_streaming_error = true;
break;
+ case VIVID_CID_REQ_VALIDATE_ERROR:
+ dev->req_validate_error = true;
+ break;
case VIVID_CID_QUEUE_ERROR:
if (vb2_start_streaming_called(&dev->vb_vid_cap_q))
vb2_queue_error(&dev->vb_vid_cap_q);
@@ -1087,6 +1091,15 @@ static const struct v4l2_ctrl_config vivid_ctrl_queue_error = {
.type = V4L2_CTRL_TYPE_BUTTON,
};
+#ifdef CONFIG_MEDIA_CONTROLLER
+static const struct v4l2_ctrl_config vivid_ctrl_req_validate_error = {
+ .ops = &vivid_streaming_ctrl_ops,
+ .id = VIVID_CID_REQ_VALIDATE_ERROR,
+ .name = "Inject req_validate() Error",
+ .type = V4L2_CTRL_TYPE_BUTTON,
+};
+#endif
+
static const struct v4l2_ctrl_config vivid_ctrl_seq_wrap = {
.ops = &vivid_streaming_ctrl_ops,
.id = VIVID_CID_SEQ_WRAP,
@@ -1516,6 +1529,9 @@ int vivid_create_controls(struct vivid_dev *dev, bool show_ccs_cap,
v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_buf_prepare_error, NULL);
v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_start_streaming_error, NULL);
v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_queue_error, NULL);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_req_validate_error, NULL);
+#endif
v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_seq_wrap, NULL);
v4l2_ctrl_new_custom(hdl_streaming, &vivid_ctrl_time_wrap, NULL);
}
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index eebfff2126be..f8006a30c12f 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -425,12 +425,6 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
is_loop = true;
buf->vb.sequence = dev->vid_cap_seq_count;
- /*
- * Take the timestamp now if the timestamp source is set to
- * "Start of Exposure".
- */
- if (dev->tstamp_src_is_soe)
- buf->vb.vb2_buf.timestamp = ktime_get_ns();
if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
/*
* 60 Hz standards start with the bottom field, 50 Hz standards
@@ -554,14 +548,6 @@ static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
}
}
}
-
- /*
- * If "End of Frame" is specified at the timestamp source, then take
- * the timestamp now.
- */
- if (!dev->tstamp_src_is_soe)
- buf->vb.vb2_buf.timestamp = ktime_get_ns();
- buf->vb.vb2_buf.timestamp += dev->time_wrap_offset;
}
/*
@@ -667,10 +653,28 @@ static void vivid_overlay(struct vivid_dev *dev, struct vivid_buffer *buf)
}
}
+static void vivid_cap_update_frame_period(struct vivid_dev *dev)
+{
+ u64 f_period;
+
+ f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000;
+ do_div(f_period, dev->timeperframe_vid_cap.denominator);
+ if (dev->field_cap == V4L2_FIELD_ALTERNATE)
+ f_period >>= 1;
+ /*
+ * If "End of Frame", then offset the exposure time by 0.9
+ * of the frame period.
+ */
+ dev->cap_frame_eof_offset = f_period * 9;
+ do_div(dev->cap_frame_eof_offset, 10);
+ dev->cap_frame_period = f_period;
+}
+
static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
{
struct vivid_buffer *vid_cap_buf = NULL;
struct vivid_buffer *vbi_cap_buf = NULL;
+ u64 f_time = 0;
dprintk(dev, 1, "Video Capture Thread Tick\n");
@@ -702,6 +706,11 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
if (!vid_cap_buf && !vbi_cap_buf)
goto update_mv;
+ f_time = dev->cap_frame_period * dev->vid_cap_seq_count +
+ dev->cap_stream_start + dev->time_wrap_offset;
+ if (!dev->tstamp_src_is_soe)
+ f_time += dev->cap_frame_eof_offset;
+
if (vid_cap_buf) {
v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_vid_cap);
@@ -721,9 +730,13 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vid_cap buffer %d done\n",
vid_cap_buf->vb.vb2_buf.index);
+
+ vid_cap_buf->vb.vb2_buf.timestamp = f_time;
}
if (vbi_cap_buf) {
+ u64 vbi_period;
+
v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req,
&dev->ctrl_hdl_vbi_cap);
if (dev->stream_sliced_vbi_cap)
@@ -736,6 +749,11 @@ static void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs)
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
dprintk(dev, 2, "vbi_cap %d done\n",
vbi_cap_buf->vb.vb2_buf.index);
+
+ /* If capturing a VBI, offset by 0.05 */
+ vbi_period = dev->cap_frame_period * 5;
+ do_div(vbi_period, 100);
+ vbi_cap_buf->vb.vb2_buf.timestamp = f_time + vbi_period;
}
dev->dqbuf_error = false;
@@ -767,6 +785,8 @@ static int vivid_thread_vid_cap(void *data)
dev->cap_seq_count = 0;
dev->cap_seq_resync = false;
dev->jiffies_vid_cap = jiffies;
+ dev->cap_stream_start = ktime_get_ns();
+ vivid_cap_update_frame_period(dev);
for (;;) {
try_to_freeze();
@@ -779,6 +799,9 @@ static int vivid_thread_vid_cap(void *data)
dev->jiffies_vid_cap = cur_jiffies;
dev->cap_seq_offset = dev->cap_seq_count + 1;
dev->cap_seq_count = 0;
+ dev->cap_stream_start += dev->cap_frame_period *
+ dev->cap_seq_offset;
+ vivid_cap_update_frame_period(dev);
dev->cap_seq_resync = false;
}
numerator = dev->timeperframe_vid_cap.numerator;
@@ -873,8 +896,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-cap", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_cap)) {
+ int err = PTR_ERR(dev->kthread_vid_cap);
+
+ dev->kthread_vid_cap = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dev->kthread_vid_cap);
+ return err;
}
*pstreaming = true;
vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index 5a14810eeb69..ce5bcda2348c 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -244,8 +244,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-out", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_out)) {
+ int err = PTR_ERR(dev->kthread_vid_out);
+
+ dev->kthread_vid_out = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dev->kthread_vid_out);
+ return err;
}
*pstreaming = true;
vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-sdr-cap.c b/drivers/media/platform/vivid/vivid-sdr-cap.c
index dcdc80e272c2..9acc709b0740 100644
--- a/drivers/media/platform/vivid/vivid-sdr-cap.c
+++ b/drivers/media/platform/vivid/vivid-sdr-cap.c
@@ -276,8 +276,6 @@ static int sdr_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->sdr_cap_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_sdr_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vbi-cap.c b/drivers/media/platform/vivid/vivid-vbi-cap.c
index 903cebeb5ce5..40ecd7902b56 100644
--- a/drivers/media/platform/vivid/vivid-vbi-cap.c
+++ b/drivers/media/platform/vivid/vivid-vbi-cap.c
@@ -95,8 +95,6 @@ void vivid_raw_vbi_cap_process(struct vivid_dev *dev, struct vivid_buffer *buf)
if (!VIVID_INVALID_SIGNAL(dev->std_signal_mode))
vivid_vbi_gen_raw(&dev->vbi_gen, &vbi, vbuf);
-
- buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset;
}
@@ -119,8 +117,6 @@ void vivid_sliced_vbi_cap_process(struct vivid_dev *dev,
for (i = 0; i < 25; i++)
vbuf[i] = dev->vbi_gen.data[i];
}
-
- buf->vb.vb2_buf.timestamp = ktime_get_ns() + dev->time_wrap_offset;
}
static int vbi_cap_queue_setup(struct vb2_queue *vq,
@@ -204,8 +200,6 @@ static int vbi_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_cap_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vbi_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vbi-out.c b/drivers/media/platform/vivid/vivid-vbi-out.c
index 9357c07e30d6..cd56476902a2 100644
--- a/drivers/media/platform/vivid/vivid-vbi-out.c
+++ b/drivers/media/platform/vivid/vivid-vbi-out.c
@@ -96,8 +96,6 @@ static int vbi_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vbi_out_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vbi_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 9c8e8be81ce3..c059fc12668a 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -243,8 +243,6 @@ static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vid_cap);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -451,6 +449,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
break;
}
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
vivid_update_quality(dev);
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
@@ -1016,26 +1016,24 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
return 0;
}
-int vivid_vid_cap_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cap)
+int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct vivid_dev *dev = video_drvdata(file);
- if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
switch (vivid_get_pixel_aspect(dev)) {
case TPG_PIXEL_ASPECT_NTSC:
- cap->pixelaspect.numerator = 11;
- cap->pixelaspect.denominator = 10;
+ f->numerator = 11;
+ f->denominator = 10;
break;
case TPG_PIXEL_ASPECT_PAL:
- cap->pixelaspect.numerator = 54;
- cap->pixelaspect.denominator = 59;
+ f->numerator = 54;
+ f->denominator = 59;
break;
- case TPG_PIXEL_ASPECT_SQUARE:
- cap->pixelaspect.numerator = 1;
- cap->pixelaspect.denominator = 1;
+ default:
break;
}
return 0;
@@ -1837,9 +1835,6 @@ int vivid_vid_cap_g_parm(struct file *file, void *priv,
return 0;
}
-#define FRACT_CMP(a, OP, b) \
- ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
-
int vivid_vid_cap_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
@@ -1860,14 +1855,14 @@ int vivid_vid_cap_s_parm(struct file *file, void *priv,
if (tpf.denominator == 0)
tpf = webcam_intervals[ival_sz - 1];
for (i = 0; i < ival_sz; i++)
- if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
+ if (V4L2_FRACT_COMPARE(tpf, >=, webcam_intervals[i]))
break;
if (i == ival_sz)
i = ival_sz - 1;
dev->webcam_ival_idx = i;
tpf = webcam_intervals[dev->webcam_ival_idx];
- tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
- tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
+ tpf = V4L2_FRACT_COMPARE(tpf, <, tpf_min) ? tpf_min : tpf;
+ tpf = V4L2_FRACT_COMPARE(tpf, >, tpf_max) ? tpf_max : tpf;
/* resync the thread's timings */
dev->cap_seq_resync = true;
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.h b/drivers/media/platform/vivid/vivid-vid-cap.h
index 47d8b48820df..1e422a59eeab 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.h
+++ b/drivers/media/platform/vivid/vivid-vid-cap.h
@@ -28,7 +28,7 @@ int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f)
int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f);
int vivid_vid_cap_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
-int vivid_vid_cap_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap);
+int vivid_vid_cap_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f);
int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, struct v4l2_format *f);
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 9645a91b8782..661f4015fba1 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -21,7 +21,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
+ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
diff --git a/drivers/media/platform/vivid/vivid-vid-out.c b/drivers/media/platform/vivid/vivid-vid-out.c
index aaf13f03d5d4..ea250aee2b2e 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.c
+++ b/drivers/media/platform/vivid/vivid-vid-out.c
@@ -162,8 +162,6 @@ static int vid_out_start_streaming(struct vb2_queue *vq, unsigned count)
list_for_each_entry_safe(buf, tmp, &dev->vid_out_active, list) {
list_del(&buf->list);
- v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req,
- &dev->ctrl_hdl_vid_out);
vb2_buffer_done(&buf->vb.vb2_buf,
VB2_BUF_STATE_QUEUED);
}
@@ -795,26 +793,24 @@ int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection
return 0;
}
-int vivid_vid_out_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cap)
+int vivid_vid_out_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct vivid_dev *dev = video_drvdata(file);
- if (cap->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ if (type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
switch (vivid_get_pixel_aspect(dev)) {
case TPG_PIXEL_ASPECT_NTSC:
- cap->pixelaspect.numerator = 11;
- cap->pixelaspect.denominator = 10;
+ f->numerator = 11;
+ f->denominator = 10;
break;
case TPG_PIXEL_ASPECT_PAL:
- cap->pixelaspect.numerator = 54;
- cap->pixelaspect.denominator = 59;
+ f->numerator = 54;
+ f->denominator = 59;
break;
- case TPG_PIXEL_ASPECT_SQUARE:
- cap->pixelaspect.numerator = 1;
- cap->pixelaspect.denominator = 1;
+ default:
break;
}
return 0;
diff --git a/drivers/media/platform/vivid/vivid-vid-out.h b/drivers/media/platform/vivid/vivid-vid-out.h
index e87aacf843c5..8d56314f4ea1 100644
--- a/drivers/media/platform/vivid/vivid-vid-out.h
+++ b/drivers/media/platform/vivid/vivid-vid-out.h
@@ -23,7 +23,7 @@ int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f)
int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f);
int vivid_vid_out_g_selection(struct file *file, void *priv, struct v4l2_selection *sel);
int vivid_vid_out_s_selection(struct file *file, void *fh, struct v4l2_selection *s);
-int vivid_vid_out_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cap);
+int vivid_vid_out_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f);
int vidioc_enum_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_fmtdesc *f);
int vidioc_g_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
int vidioc_try_fmt_vid_out_overlay(struct file *file, void *priv, struct v4l2_format *f);
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
index 0b18f0bd7419..8b0a26335d70 100644
--- a/drivers/media/platform/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -95,7 +95,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config,
LIF_PAD_SOURCE);
- switch (entity->vsp1->version & VI6_IP_VERSION_SOC_MASK) {
+ switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) {
case VI6_IP_VERSION_MODEL_VSPD_GEN2:
case VI6_IP_VERSION_MODEL_VSPD_V2H:
hbth = 1536;
diff --git a/drivers/media/platform/xilinx/Kconfig b/drivers/media/platform/xilinx/Kconfig
index a5d21b7c6e0b..74ec8aaa5ae0 100644
--- a/drivers/media/platform/xilinx/Kconfig
+++ b/drivers/media/platform/xilinx/Kconfig
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
config VIDEO_XILINX
tristate "Xilinx Video IP (EXPERIMENTAL)"
depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && OF && HAS_DMA
diff --git a/drivers/media/platform/xilinx/Makefile b/drivers/media/platform/xilinx/Makefile
index e8a0f2a9f733..4cdc0b1ec7a5 100644
--- a/drivers/media/platform/xilinx/Makefile
+++ b/drivers/media/platform/xilinx/Makefile
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
xilinx-video-objs += xilinx-dma.o xilinx-vip.o xilinx-vipp.o
obj-$(CONFIG_VIDEO_XILINX) += xilinx-video.o
diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c
index 4ae9d38c9433..c9d5fdb2d407 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.c
+++ b/drivers/media/platform/xilinx/xilinx-dma.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video DMA
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/dma/xilinx_dma.h>
diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h
index e95d136c153a..5aec4d17eb21 100644
--- a/drivers/media/platform/xilinx/xilinx-dma.h
+++ b/drivers/media/platform/xilinx/xilinx-dma.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video DMA
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __XILINX_VIP_DMA_H__
diff --git a/drivers/media/platform/xilinx/xilinx-tpg.c b/drivers/media/platform/xilinx/xilinx-tpg.c
index 851d20dcd550..ed01bedb5db6 100644
--- a/drivers/media/platform/xilinx/xilinx-tpg.c
+++ b/drivers/media/platform/xilinx/xilinx-tpg.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Test Pattern Generator
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
@@ -725,7 +722,7 @@ static int xtpg_parse_of(struct xtpg_device *xtpg)
const struct xvip_video_format *format;
struct device_node *endpoint;
- if (!port->name || of_node_cmp(port->name, "port"))
+ if (!of_node_name_eq(port, "port"))
continue;
format = xvip_of_get_format(port);
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 311259129504..18f98838111b 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Core
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
@@ -36,7 +33,7 @@ static const struct xvip_video_format xvip_video_formats[] = {
{ XVIP_VF_MONO_SENSOR, 8, "mono", MEDIA_BUS_FMT_Y8_1X8,
1, V4L2_PIX_FMT_GREY, "Greyscale 8-bit" },
{ XVIP_VF_MONO_SENSOR, 8, "rggb", MEDIA_BUS_FMT_SRGGB8_1X8,
- 1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit RGGB" },
+ 1, V4L2_PIX_FMT_SRGGB8, "Bayer 8-bit RGGB" },
{ XVIP_VF_MONO_SENSOR, 8, "grbg", MEDIA_BUS_FMT_SGRBG8_1X8,
1, V4L2_PIX_FMT_SGRBG8, "Bayer 8-bit GRBG" },
{ XVIP_VF_MONO_SENSOR, 8, "gbrg", MEDIA_BUS_FMT_SGBRG8_1X8,
diff --git a/drivers/media/platform/xilinx/xilinx-vip.h b/drivers/media/platform/xilinx/xilinx-vip.h
index 42fee2026815..ba939dd52818 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.h
+++ b/drivers/media/platform/xilinx/xilinx-vip.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Core
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __XILINX_VIP_H__
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 99e016d35d91..edce0402155d 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Composite Device
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/list.h>
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.h b/drivers/media/platform/xilinx/xilinx-vipp.h
index 7e9c4cff33b4..e65fce9538f9 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.h
+++ b/drivers/media/platform/xilinx/xilinx-vipp.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Composite Device
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __XILINX_VIPP_H__
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.c b/drivers/media/platform/xilinx/xilinx-vtc.c
index 01c750edcac5..0ae0208d7529 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.c
+++ b/drivers/media/platform/xilinx/xilinx-vtc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video Timing Controller
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/media/platform/xilinx/xilinx-vtc.h b/drivers/media/platform/xilinx/xilinx-vtc.h
index e1bb2cfcf428..90cf44245283 100644
--- a/drivers/media/platform/xilinx/xilinx-vtc.h
+++ b/drivers/media/platform/xilinx/xilinx-vtc.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video Timing Controller
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __XILINX_VTC_H__
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 1021c08a9ba4..8a216068a35a 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -493,6 +493,18 @@ config IR_TANGO
The HW decoder supports NEC, RC-5, RC-6 IR protocols.
When compiled as a module, look for tango-ir.
+config RC_XBOX_DVD
+ tristate "Xbox DVD Movie Playback Kit"
+ depends on RC_CORE
+ depends on USB_ARCH_HAS_HCD
+ select USB
+ help
+ Say Y here if you want to use the Xbox DVD Movie Playback Kit.
+ These are IR remotes with USB receivers for the Original Xbox (2001).
+
+ To compile this driver as a module, choose M here: the module will be
+ called xbox_remote.
+
config IR_ZX
tristate "ZTE ZX IR remote control"
depends on RC_CORE
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index e0340d043fe8..92c163816849 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -48,3 +48,4 @@ obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_IR_ZX) += zx-irdec.o
obj-$(CONFIG_IR_TANGO) += tango-ir.o
+obj-$(CONFIG_RC_XBOX_DVD) += xbox_remote.o
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 8b97fd1f0cea..390a722e6211 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -59,6 +59,28 @@ static const struct bpf_func_proto rc_keydown_proto = {
.arg4_type = ARG_ANYTHING,
};
+BPF_CALL_3(bpf_rc_pointer_rel, u32*, sample, s32, rel_x, s32, rel_y)
+{
+ struct ir_raw_event_ctrl *ctrl;
+
+ ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
+
+ input_report_rel(ctrl->dev->input_dev, REL_X, rel_x);
+ input_report_rel(ctrl->dev->input_dev, REL_Y, rel_y);
+ input_sync(ctrl->dev->input_dev);
+
+ return 0;
+}
+
+static const struct bpf_func_proto rc_pointer_rel_proto = {
+ .func = bpf_rc_pointer_rel,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *
lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -67,6 +89,8 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &rc_repeat_proto;
case BPF_FUNC_rc_keydown:
return &rc_keydown_proto;
+ case BPF_FUNC_rc_pointer_rel:
+ return &rc_pointer_rel_proto;
case BPF_FUNC_map_lookup_elem:
return &bpf_map_lookup_elem_proto;
case BPF_FUNC_map_update_elem:
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 1041c056854d..989d2554ec72 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -772,9 +772,9 @@ static ssize_t show_associate_remote(struct device *d,
mutex_lock(&ictx->lock);
if (ictx->rf_isassociating)
- strcpy(buf, "associating\n");
+ strscpy(buf, "associating\n", PAGE_SIZE);
else
- strcpy(buf, "closed\n");
+ strscpy(buf, "closed\n", PAGE_SIZE);
dev_info(d, "Visit http://www.lirc.org/html/imon-24g.html for instructions on how to associate your iMON 2.4G DT/LT remote\n");
mutex_unlock(&ictx->lock);
diff --git a/drivers/media/rc/imon_raw.c b/drivers/media/rc/imon_raw.c
index 7796098d9c30..25e56c5b13c0 100644
--- a/drivers/media/rc/imon_raw.c
+++ b/drivers/media/rc/imon_raw.c
@@ -14,51 +14,50 @@ struct imon {
struct device *dev;
struct urb *ir_urb;
struct rc_dev *rcdev;
- u8 ir_buf[8];
+ u8 ir_buf[8] __aligned(__alignof__(u64));
char phys[64];
};
/*
- * ffs/find_next_bit() searches in the wrong direction, so open-code our own.
+ * The first 5 bytes of data represent IR pulse or space. Each bit, starting
+ * from highest bit in the first byte, represents 250µs of data. It is 1
+ * for space and 0 for pulse.
+ *
+ * The station sends 10 packets, and the 7th byte will be number 1 to 10, so
+ * when we receive 10 we assume all the data has arrived.
*/
-static inline int is_bit_set(const u8 *buf, int bit)
-{
- return buf[bit / 8] & (0x80 >> (bit & 7));
-}
-
static void imon_ir_data(struct imon *imon)
{
struct ir_raw_event rawir = {};
- int offset = 0, size = 5 * 8;
+ u64 d = be64_to_cpup((__be64 *)imon->ir_buf) >> 24;
+ int offset = 40;
int bit;
dev_dbg(imon->dev, "data: %*ph", 8, imon->ir_buf);
- while (offset < size) {
- bit = offset;
- while (!is_bit_set(imon->ir_buf, bit) && bit < size)
- bit++;
- dev_dbg(imon->dev, "pulse: %d bits", bit - offset);
- if (bit > offset) {
+ do {
+ bit = fls64(d & (BIT_ULL(offset) - 1));
+ if (bit < offset) {
+ dev_dbg(imon->dev, "pulse: %d bits", offset - bit);
rawir.pulse = true;
- rawir.duration = (bit - offset) * BIT_DURATION;
+ rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
- }
- if (bit >= size)
- break;
+ if (bit == 0)
+ break;
- offset = bit;
- while (is_bit_set(imon->ir_buf, bit) && bit < size)
- bit++;
- dev_dbg(imon->dev, "space: %d bits", bit - offset);
+ offset = bit;
+ }
+
+ bit = fls64(~d & (BIT_ULL(offset) - 1));
+ dev_dbg(imon->dev, "space: %d bits", offset - bit);
rawir.pulse = false;
- rawir.duration = (bit - offset) * BIT_DURATION;
+ rawir.duration = (offset - bit) * BIT_DURATION;
ir_raw_event_store_with_filter(imon->rcdev, &rawir);
offset = bit;
- }
+ } while (offset > 0);
if (imon->ir_buf[7] == 0x0a) {
ir_raw_event_set_idle(imon->rcdev, true);
diff --git a/drivers/media/rc/keymaps/Makefile b/drivers/media/rc/keymaps/Makefile
index d6b913a3032d..5b1399af6b3a 100644
--- a/drivers/media/rc/keymaps/Makefile
+++ b/drivers/media/rc/keymaps/Makefile
@@ -116,4 +116,5 @@ obj-$(CONFIG_RC_MAP) += rc-adstech-dvb-t-pci.o \
rc-winfast.o \
rc-winfast-usbii-deluxe.o \
rc-su3000.o \
+ rc-xbox-dvd.o \
rc-zx-irdec.o
diff --git a/drivers/media/rc/keymaps/rc-xbox-dvd.c b/drivers/media/rc/keymaps/rc-xbox-dvd.c
new file mode 100644
index 000000000000..af387244636b
--- /dev/null
+++ b/drivers/media/rc/keymaps/rc-xbox-dvd.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Keytable for Xbox DVD remote
+// Copyright (c) 2018 by Benjamin Valentin <benpicco@googlemail.com>
+
+#include <media/rc-map.h>
+#include <linux/module.h>
+
+/* based on lircd.conf.xbox */
+static struct rc_map_table xbox_dvd[] = {
+ {0xa0b, KEY_OK},
+ {0xaa6, KEY_UP},
+ {0xaa7, KEY_DOWN},
+ {0xaa8, KEY_RIGHT},
+ {0xaa9, KEY_LEFT},
+ {0xac3, KEY_INFO},
+
+ {0xac6, KEY_9},
+ {0xac7, KEY_8},
+ {0xac8, KEY_7},
+ {0xac9, KEY_6},
+ {0xaca, KEY_5},
+ {0xacb, KEY_4},
+ {0xacc, KEY_3},
+ {0xacd, KEY_2},
+ {0xace, KEY_1},
+ {0xacf, KEY_0},
+
+ {0xad5, KEY_ANGLE},
+ {0xad8, KEY_BACK},
+ {0xadd, KEY_PREVIOUSSONG},
+ {0xadf, KEY_NEXTSONG},
+ {0xae0, KEY_STOP},
+ {0xae2, KEY_REWIND},
+ {0xae3, KEY_FASTFORWARD},
+ {0xae5, KEY_TITLE},
+ {0xae6, KEY_PAUSE},
+ {0xaea, KEY_PLAY},
+ {0xaf7, KEY_MENU},
+};
+
+static struct rc_map_list xbox_dvd_map = {
+ .map = {
+ .scan = xbox_dvd,
+ .size = ARRAY_SIZE(xbox_dvd),
+ .rc_proto = RC_PROTO_UNKNOWN,
+ .name = RC_MAP_XBOX_DVD,
+ }
+};
+
+static int __init init_rc_map(void)
+{
+ return rc_map_register(&xbox_dvd_map);
+}
+
+static void __exit exit_rc_map(void)
+{
+ rc_map_unregister(&xbox_dvd_map);
+}
+
+module_init(init_rc_map)
+module_exit(exit_rc_map)
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index c9293696dc2d..8d7d3ef88862 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -432,6 +432,15 @@ static const struct usb_device_id mceusb_dev_table[] = {
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_HAUPPAUGE, 0xb139),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
+ /* Hauppauge WinTV-HVR-935C - based on cx231xx */
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb151),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
+ /* Hauppauge WinTV-HVR-955Q - based on cx231xx */
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb123),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
+ /* Hauppauge WinTV-HVR-975 - based on cx231xx */
+ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb150),
+ .driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x0259),
.driver_info = HAUPPAUGE_CX_HYBRID_TV },
{ USB_DEVICE(VENDOR_PCTV, 0x025e),
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 552bbe82a160..66a174979b3c 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -695,7 +695,8 @@ void rc_repeat(struct rc_dev *dev)
(dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
};
- ir_lirc_scancode_event(dev, &sc);
+ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+ ir_lirc_scancode_event(dev, &sc);
spin_lock_irqsave(&dev->keylock, flags);
@@ -735,7 +736,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
.keycode = keycode
};
- ir_lirc_scancode_event(dev, &sc);
+ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+ ir_lirc_scancode_event(dev, &sc);
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
@@ -1950,6 +1952,8 @@ void rc_unregister_device(struct rc_dev *dev)
rc_free_rx_device(dev);
mutex_lock(&dev->lock);
+ if (dev->users && dev->close)
+ dev->close(dev);
dev->registered = false;
mutex_unlock(&dev->lock);
diff --git a/drivers/media/rc/xbox_remote.c b/drivers/media/rc/xbox_remote.c
new file mode 100644
index 000000000000..f959cbb94744
--- /dev/null
+++ b/drivers/media/rc/xbox_remote.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Driver for Xbox DVD Movie Playback Kit
+// Copyright (c) 2018 by Benjamin Valentin <benpicco@googlemail.com>
+
+/*
+ * Xbox DVD Movie Playback Kit USB IR dongle support
+ *
+ * The driver was derived from the ati_remote driver 2.2.1
+ * and used information from lirc_xbox.c
+ *
+ * Copyright (c) 2011, 2012 Anssi Hannula <anssi.hannula@iki.fi>
+ * Copyright (c) 2004 Torrey Hoffman <thoffman@arnor.net>
+ * Copyright (c) 2002 Vladimir Dergachev
+ * Copyright (c) 2003-2004 Paul Miller <pmiller9@users.sourceforge.net>
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/usb/input.h>
+#include <media/rc-core.h>
+
+/*
+ * Module and Version Information
+ */
+#define DRIVER_VERSION "1.0.0"
+#define DRIVER_AUTHOR "Benjamin Valentin <benpicco@googlemail.com>"
+#define DRIVER_DESC "Xbox DVD USB Remote Control"
+
+#define NAME_BUFSIZE 80 /* size of product name, path buffers */
+#define DATA_BUFSIZE 8 /* size of URB data buffers */
+
+/*
+ * USB vendor ids for XBOX DVD Dongles
+ */
+#define VENDOR_GAMESTER 0x040b
+#define VENDOR_MICROSOFT 0x045e
+
+static const struct usb_device_id xbox_remote_table[] = {
+ /* Gamester Xbox DVD Movie Playback Kit IR */
+ {
+ USB_DEVICE(VENDOR_GAMESTER, 0x6521),
+ },
+ /* Microsoft Xbox DVD Movie Playback Kit IR */
+ {
+ USB_DEVICE(VENDOR_MICROSOFT, 0x0284),
+ },
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, xbox_remote_table);
+
+struct xbox_remote {
+ struct rc_dev *rdev;
+ struct usb_device *udev;
+ struct usb_interface *interface;
+
+ struct urb *irq_urb;
+ unsigned char inbuf[DATA_BUFSIZE] __aligned(sizeof(u16));
+
+ char rc_name[NAME_BUFSIZE];
+ char rc_phys[NAME_BUFSIZE];
+};
+
+static int xbox_remote_rc_open(struct rc_dev *rdev)
+{
+ struct xbox_remote *xbox_remote = rdev->priv;
+
+ /* On first open, submit the read urb which was set up previously. */
+ xbox_remote->irq_urb->dev = xbox_remote->udev;
+ if (usb_submit_urb(xbox_remote->irq_urb, GFP_KERNEL)) {
+ dev_err(&xbox_remote->interface->dev,
+ "%s: usb_submit_urb failed!\n", __func__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void xbox_remote_rc_close(struct rc_dev *rdev)
+{
+ struct xbox_remote *xbox_remote = rdev->priv;
+
+ usb_kill_urb(xbox_remote->irq_urb);
+}
+
+/*
+ * xbox_remote_report_input
+ */
+static void xbox_remote_input_report(struct urb *urb)
+{
+ struct xbox_remote *xbox_remote = urb->context;
+ unsigned char *data = xbox_remote->inbuf;
+
+ /*
+ * data[0] = 0x00
+ * data[1] = length - always 0x06
+ * data[2] = the key code
+ * data[3] = high part of key code
+ * data[4] = last_press_ms (low)
+ * data[5] = last_press_ms (high)
+ */
+
+ /* Deal with strange looking inputs */
+ if (urb->actual_length != 6 || urb->actual_length != data[1]) {
+ dev_warn(&urb->dev->dev, "Weird data, len=%d: %*ph\n",
+ urb->actual_length, urb->actual_length, data);
+ return;
+ }
+
+ rc_keydown(xbox_remote->rdev, RC_PROTO_UNKNOWN,
+ le16_to_cpup((__le16 *)(data + 2)), 0);
+}
+
+/*
+ * xbox_remote_irq_in
+ */
+static void xbox_remote_irq_in(struct urb *urb)
+{
+ struct xbox_remote *xbox_remote = urb->context;
+ int retval;
+
+ switch (urb->status) {
+ case 0: /* success */
+ xbox_remote_input_report(urb);
+ break;
+ case -ECONNRESET: /* unlink */
+ case -ENOENT:
+ case -ESHUTDOWN:
+ dev_dbg(&xbox_remote->interface->dev,
+ "%s: urb error status, unlink?\n",
+ __func__);
+ return;
+ default: /* error */
+ dev_dbg(&xbox_remote->interface->dev,
+ "%s: Nonzero urb status %d\n",
+ __func__, urb->status);
+ }
+
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
+ if (retval)
+ dev_err(&xbox_remote->interface->dev,
+ "%s: usb_submit_urb()=%d\n",
+ __func__, retval);
+}
+
+static void xbox_remote_rc_init(struct xbox_remote *xbox_remote)
+{
+ struct rc_dev *rdev = xbox_remote->rdev;
+
+ rdev->priv = xbox_remote;
+ rdev->allowed_protocols = RC_PROTO_BIT_UNKNOWN;
+ rdev->driver_name = "xbox_remote";
+
+ rdev->open = xbox_remote_rc_open;
+ rdev->close = xbox_remote_rc_close;
+
+ rdev->device_name = xbox_remote->rc_name;
+ rdev->input_phys = xbox_remote->rc_phys;
+
+ usb_to_input_id(xbox_remote->udev, &rdev->input_id);
+ rdev->dev.parent = &xbox_remote->interface->dev;
+}
+
+static int xbox_remote_initialize(struct xbox_remote *xbox_remote,
+ struct usb_endpoint_descriptor *endpoint_in)
+{
+ struct usb_device *udev = xbox_remote->udev;
+ int pipe, maxp;
+
+ /* Set up irq_urb */
+ pipe = usb_rcvintpipe(udev, endpoint_in->bEndpointAddress);
+ maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
+ maxp = (maxp > DATA_BUFSIZE) ? DATA_BUFSIZE : maxp;
+
+ usb_fill_int_urb(xbox_remote->irq_urb, udev, pipe, xbox_remote->inbuf,
+ maxp, xbox_remote_irq_in, xbox_remote,
+ endpoint_in->bInterval);
+
+ return 0;
+}
+
+/*
+ * xbox_remote_probe
+ */
+static int xbox_remote_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct usb_host_interface *iface_host = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *endpoint_in;
+ struct xbox_remote *xbox_remote;
+ struct rc_dev *rc_dev;
+ int err = -ENOMEM;
+
+ // why is there also a device with no endpoints?
+ if (iface_host->desc.bNumEndpoints == 0)
+ return -ENODEV;
+
+ if (iface_host->desc.bNumEndpoints != 1) {
+ pr_err("%s: Unexpected desc.bNumEndpoints: %d\n",
+ __func__, iface_host->desc.bNumEndpoints);
+ return -ENODEV;
+ }
+
+ endpoint_in = &iface_host->endpoint[0].desc;
+
+ if (!usb_endpoint_is_int_in(endpoint_in)) {
+ pr_err("%s: Unexpected endpoint_in\n", __func__);
+ return -ENODEV;
+ }
+ if (le16_to_cpu(endpoint_in->wMaxPacketSize) == 0) {
+ pr_err("%s: endpoint_in message size==0?\n", __func__);
+ return -ENODEV;
+ }
+
+ xbox_remote = kzalloc(sizeof(*xbox_remote), GFP_KERNEL);
+ rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE);
+ if (!xbox_remote || !rc_dev)
+ goto exit_free_dev_rdev;
+
+ /* Allocate URB buffer */
+ xbox_remote->irq_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!xbox_remote->irq_urb)
+ goto exit_free_buffers;
+
+ xbox_remote->udev = udev;
+ xbox_remote->rdev = rc_dev;
+ xbox_remote->interface = interface;
+
+ usb_make_path(udev, xbox_remote->rc_phys, sizeof(xbox_remote->rc_phys));
+
+ strlcat(xbox_remote->rc_phys, "/input0", sizeof(xbox_remote->rc_phys));
+
+ snprintf(xbox_remote->rc_name, sizeof(xbox_remote->rc_name), "%s%s%s",
+ udev->manufacturer ?: "",
+ udev->manufacturer && udev->product ? " " : "",
+ udev->product ?: "");
+
+ if (!strlen(xbox_remote->rc_name))
+ snprintf(xbox_remote->rc_name, sizeof(xbox_remote->rc_name),
+ DRIVER_DESC "(%04x,%04x)",
+ le16_to_cpu(xbox_remote->udev->descriptor.idVendor),
+ le16_to_cpu(xbox_remote->udev->descriptor.idProduct));
+
+ rc_dev->map_name = RC_MAP_XBOX_DVD; /* default map */
+
+ xbox_remote_rc_init(xbox_remote);
+
+ /* Device Hardware Initialization */
+ err = xbox_remote_initialize(xbox_remote, endpoint_in);
+ if (err)
+ goto exit_kill_urbs;
+
+ /* Set up and register rc device */
+ err = rc_register_device(xbox_remote->rdev);
+ if (err)
+ goto exit_kill_urbs;
+
+ usb_set_intfdata(interface, xbox_remote);
+
+ return 0;
+
+exit_kill_urbs:
+ usb_kill_urb(xbox_remote->irq_urb);
+exit_free_buffers:
+ usb_free_urb(xbox_remote->irq_urb);
+exit_free_dev_rdev:
+ rc_free_device(rc_dev);
+ kfree(xbox_remote);
+
+ return err;
+}
+
+/*
+ * xbox_remote_disconnect
+ */
+static void xbox_remote_disconnect(struct usb_interface *interface)
+{
+ struct xbox_remote *xbox_remote;
+
+ xbox_remote = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+ if (!xbox_remote) {
+ dev_warn(&interface->dev, "%s - null device?\n", __func__);
+ return;
+ }
+
+ usb_kill_urb(xbox_remote->irq_urb);
+ rc_unregister_device(xbox_remote->rdev);
+ usb_free_urb(xbox_remote->irq_urb);
+ kfree(xbox_remote);
+}
+
+/* usb specific object to register with the usb subsystem */
+static struct usb_driver xbox_remote_driver = {
+ .name = "xbox_remote",
+ .probe = xbox_remote_probe,
+ .disconnect = xbox_remote_disconnect,
+ .id_table = xbox_remote_table,
+};
+
+module_usb_driver(xbox_remote_driver);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index 11ce5101e19f..d5c433e20d4a 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
#include <linux/ktime.h>
#include <media/dvb_demux.h>
@@ -51,6 +52,7 @@ struct cxd2880_dvb_spi {
struct mutex spi_mutex; /* For SPI access exclusive control */
int feed_count;
int all_pid_feed_count;
+ struct regulator *vcc_supply;
u8 *ts_buf;
struct cxd2880_pid_filter_config filter_config;
};
@@ -518,6 +520,17 @@ cxd2880_spi_probe(struct spi_device *spi)
if (!dvb_spi)
return -ENOMEM;
+ dvb_spi->vcc_supply = devm_regulator_get_optional(&spi->dev, "vcc");
+ if (IS_ERR(dvb_spi->vcc_supply)) {
+ if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dvb_spi->vcc_supply = NULL;
+ } else {
+ ret = regulator_enable(dvb_spi->vcc_supply);
+ if (ret)
+ return ret;
+ }
+
dvb_spi->spi = spi;
mutex_init(&dvb_spi->spi_mutex);
dev_set_drvdata(&spi->dev, dvb_spi);
@@ -536,6 +549,7 @@ cxd2880_spi_probe(struct spi_device *spi)
if (!dvb_attach(cxd2880_attach, &dvb_spi->dvb_fe, &config)) {
pr_err("cxd2880_attach failed\n");
+ ret = -ENODEV;
goto fail_attach;
}
@@ -630,6 +644,9 @@ cxd2880_spi_remove(struct spi_device *spi)
dvb_frontend_detach(&dvb_spi->dvb_fe);
dvb_unregister_adapter(&dvb_spi->adapter);
+ if (dvb_spi->vcc_supply)
+ regulator_disable(dvb_spi->vcc_supply);
+
kfree(dvb_spi);
pr_info("cxd2880_spi remove ok.\n");
diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
index efbf210147c7..7876c897cc1d 100644
--- a/drivers/media/usb/au0828/au0828-video.c
+++ b/drivers/media/usb/au0828/au0828-video.c
@@ -1616,27 +1616,42 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
return 0;
}
-static int vidioc_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cc)
+static int vidioc_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct au0828_dev *dev = video_drvdata(file);
- if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
dev->std_set_in_tuner_core, dev->dev_state);
- cc->bounds.left = 0;
- cc->bounds.top = 0;
- cc->bounds.width = dev->width;
- cc->bounds.height = dev->height;
+ f->numerator = 54;
+ f->denominator = 59;
- cc->defrect = cc->bounds;
+ return 0;
+}
+
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct au0828_dev *dev = video_drvdata(file);
- cc->pixelaspect.numerator = 54;
- cc->pixelaspect.denominator = 59;
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = dev->width;
+ s->r.height = dev->height;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -1762,7 +1777,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_enumaudio = vidioc_enumaudio,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
- .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_pixelaspect = vidioc_g_pixelaspect,
+ .vidioc_g_selection = vidioc_g_selection,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
index 3f401fbd0ecc..748739c2b8b2 100644
--- a/drivers/media/usb/cpia2/cpia2_v4l.c
+++ b/drivers/media/usb/cpia2/cpia2_v4l.c
@@ -479,24 +479,25 @@ static int cpia2_g_fmt_vid_cap(struct file *file, void *fh,
*
*****************************************************************************/
-static int cpia2_cropcap(struct file *file, void *fh, struct v4l2_cropcap *c)
+static int cpia2_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
{
struct camera_data *cam = video_drvdata(file);
- if (c->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- c->bounds.left = 0;
- c->bounds.top = 0;
- c->bounds.width = cam->width;
- c->bounds.height = cam->height;
- c->defrect.left = 0;
- c->defrect.top = 0;
- c->defrect.width = cam->width;
- c->defrect.height = cam->height;
- c->pixelaspect.numerator = 1;
- c->pixelaspect.denominator = 1;
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = cam->width;
+ s->r.height = cam->height;
+ break;
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -1047,7 +1048,7 @@ static const struct v4l2_ioctl_ops cpia2_ioctl_ops = {
.vidioc_try_fmt_vid_cap = cpia2_try_fmt_vid_cap,
.vidioc_g_jpegcomp = cpia2_g_jpegcomp,
.vidioc_s_jpegcomp = cpia2_s_jpegcomp,
- .vidioc_cropcap = cpia2_cropcap,
+ .vidioc_g_selection = cpia2_g_selection,
.vidioc_reqbufs = cpia2_reqbufs,
.vidioc_querybuf = cpia2_querybuf,
.vidioc_qbuf = cpia2_qbuf,
diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c
index 2641e23d946b..1c48c497bd6a 100644
--- a/drivers/media/usb/cx231xx/cx231xx-417.c
+++ b/drivers/media/usb/cx231xx/cx231xx-417.c
@@ -1500,27 +1500,45 @@ static const struct videobuf_queue_ops cx231xx_qops = {
/* ------------------------------------------------------------------ */
-static int vidioc_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cc)
+static int vidioc_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
bool is_50hz = dev->encodernorm.id & V4L2_STD_625_50;
- if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cc->bounds.left = 0;
- cc->bounds.top = 0;
- cc->bounds.width = dev->ts1.width;
- cc->bounds.height = dev->ts1.height;
- cc->defrect = cc->bounds;
- cc->pixelaspect.numerator = is_50hz ? 54 : 11;
- cc->pixelaspect.denominator = is_50hz ? 59 : 10;
+ f->numerator = is_50hz ? 54 : 11;
+ f->denominator = is_50hz ? 59 : 10;
return 0;
}
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = dev->ts1.width;
+ s->r.height = dev->ts1.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int vidioc_g_std(struct file *file, void *fh0, v4l2_std_id *norm)
{
struct cx231xx_fh *fh = file->private_data;
@@ -1865,7 +1883,8 @@ static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_g_input = cx231xx_g_input,
.vidioc_s_input = cx231xx_s_input,
.vidioc_s_ctrl = vidioc_s_ctrl,
- .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_pixelaspect = vidioc_g_pixelaspect,
+ .vidioc_g_selection = vidioc_g_selection,
.vidioc_querycap = cx231xx_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
diff --git a/drivers/media/usb/cx231xx/cx231xx-video.c b/drivers/media/usb/cx231xx/cx231xx-video.c
index c990f70c0ea6..0d451c4ea3b9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-video.c
+++ b/drivers/media/usb/cx231xx/cx231xx-video.c
@@ -1482,27 +1482,45 @@ int cx231xx_s_register(struct file *file, void *priv,
}
#endif
-static int vidioc_cropcap(struct file *file, void *priv,
- struct v4l2_cropcap *cc)
+static int vidioc_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct cx231xx_fh *fh = priv;
struct cx231xx *dev = fh->dev;
bool is_50hz = dev->norm & V4L2_STD_625_50;
- if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- cc->bounds.left = 0;
- cc->bounds.top = 0;
- cc->bounds.width = dev->width;
- cc->bounds.height = dev->height;
- cc->defrect = cc->bounds;
- cc->pixelaspect.numerator = is_50hz ? 54 : 11;
- cc->pixelaspect.denominator = is_50hz ? 59 : 10;
+ f->numerator = is_50hz ? 54 : 11;
+ f->denominator = is_50hz ? 59 : 10;
return 0;
}
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *s)
+{
+ struct cx231xx_fh *fh = priv;
+ struct cx231xx *dev = fh->dev;
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = dev->width;
+ s->r.height = dev->height;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
@@ -2093,7 +2111,8 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_g_fmt_vbi_cap = vidioc_g_fmt_vbi_cap,
.vidioc_try_fmt_vbi_cap = vidioc_try_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = vidioc_s_fmt_vbi_cap,
- .vidioc_cropcap = vidioc_cropcap,
+ .vidioc_g_pixelaspect = vidioc_g_pixelaspect,
+ .vidioc_g_selection = vidioc_g_selection,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index df4412245a8a..511e3f270308 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -133,6 +133,7 @@ config DVB_USB_RTL28XXU
depends on DVB_USB_V2 && I2C_MUX
select DVB_MN88472 if MEDIA_SUBDRV_AUTOSELECT
select DVB_MN88473 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_CXD2841ER if MEDIA_SUBDRV_AUTOSELECT
select DVB_RTL2830
select DVB_RTL2832
select DVB_RTL2832_SDR if (MEDIA_SUBDRV_AUTOSELECT && MEDIA_SDR_SUPPORT)
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 3b8f7931b730..d55ef016d418 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -957,9 +957,7 @@ int dvb_usbv2_probe(struct usb_interface *intf,
if (d->props->identify_state) {
const char *name = NULL;
ret = d->props->identify_state(d, &name);
- if (ret == 0) {
- ;
- } else if (ret == COLD) {
+ if (ret == COLD) {
dev_info(&d->udev->dev,
"%s: found a '%s' in cold state\n",
KBUILD_MODNAME, d->name);
@@ -984,7 +982,7 @@ int dvb_usbv2_probe(struct usb_interface *intf,
} else {
goto err_free_all;
}
- } else {
+ } else if (ret != WARM) {
goto err_free_all;
}
}
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index 0559417c8af4..80fed4494736 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -200,11 +200,10 @@ gl861_i2c_write_ex(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen)
u8 *buf;
int ret;
- buf = kmalloc(wlen, GFP_KERNEL);
+ buf = kmemdup(wbuf, wlen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy(buf, wbuf, wlen);
ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
GL861_REQ_I2C_RAW, GL861_WRITE,
addr << (8 + 1), 0x0100, buf, wlen, 2000);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index f109c04f05ae..602013cf3e69 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -134,9 +134,9 @@ struct lme2510_state {
u8 stream_on;
u8 pid_size;
u8 pid_off;
- void *buffer;
+ u8 int_buffer[128];
struct urb *lme_urb;
- void *usb_buffer;
+ u8 usb_buffer[64];
/* Frontend original calls */
int (*fe_read_status)(struct dvb_frontend *, enum fe_status *);
int (*fe_read_signal_strength)(struct dvb_frontend *, u16 *);
@@ -147,59 +147,30 @@ struct lme2510_state {
u8 dvb_usb_lme2510_firmware;
};
-static int lme2510_bulk_write(struct usb_device *dev,
- u8 *snd, int len, u8 pipe)
-{
- int actual_l;
-
- return usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe),
- snd, len, &actual_l, 100);
-}
-
-static int lme2510_bulk_read(struct usb_device *dev,
- u8 *rev, int len, u8 pipe)
-{
- int actual_l;
-
- return usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe),
- rev, len, &actual_l, 200);
-}
-
static int lme2510_usb_talk(struct dvb_usb_device *d,
- u8 *wbuf, int wlen, u8 *rbuf, int rlen)
+ u8 *wbuf, int wlen, u8 *rbuf, int rlen)
{
struct lme2510_state *st = d->priv;
- u8 *buff;
int ret = 0;
- if (st->usb_buffer == NULL) {
- st->usb_buffer = kmalloc(64, GFP_KERNEL);
- if (st->usb_buffer == NULL) {
- info("MEM Error no memory");
- return -ENOMEM;
- }
- }
- buff = st->usb_buffer;
+ if (max(wlen, rlen) > sizeof(st->usb_buffer))
+ return -EINVAL;
ret = mutex_lock_interruptible(&d->usb_mutex);
-
if (ret < 0)
return -EAGAIN;
- /* the read/write capped at 64 */
- memcpy(buff, wbuf, (wlen < 64) ? wlen : 64);
+ memcpy(st->usb_buffer, wbuf, wlen);
- ret |= lme2510_bulk_write(d->udev, buff, wlen , 0x01);
+ ret = dvb_usbv2_generic_rw_locked(d, st->usb_buffer, wlen,
+ st->usb_buffer, rlen);
- ret |= lme2510_bulk_read(d->udev, buff, (rlen < 64) ?
- rlen : 64 , 0x01);
-
- if (rlen > 0)
- memcpy(rbuf, buff, rlen);
+ if (rlen)
+ memcpy(rbuf, st->usb_buffer, rlen);
mutex_unlock(&d->usb_mutex);
- return (ret < 0) ? -ENODEV : 0;
+ return ret;
}
static int lme2510_stream_restart(struct dvb_usb_device *d)
@@ -417,20 +388,14 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
if (lme_int->lme_urb == NULL)
return -ENOMEM;
- lme_int->buffer = usb_alloc_coherent(d->udev, 128, GFP_ATOMIC,
- &lme_int->lme_urb->transfer_dma);
-
- if (lme_int->buffer == NULL)
- return -ENOMEM;
-
usb_fill_int_urb(lme_int->lme_urb,
- d->udev,
- usb_rcvintpipe(d->udev, 0xa),
- lme_int->buffer,
- 128,
- lme2510_int_response,
- adap,
- 8);
+ d->udev,
+ usb_rcvintpipe(d->udev, 0xa),
+ lme_int->int_buffer,
+ sizeof(lme_int->int_buffer),
+ lme2510_int_response,
+ adap,
+ 8);
/* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
@@ -438,8 +403,6 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
- lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
info("INT Interrupt Service Started");
@@ -1245,41 +1208,20 @@ static int lme2510_get_rc_config(struct dvb_usb_device *d,
return 0;
}
-static void *lme2510_exit_int(struct dvb_usb_device *d)
+static void lme2510_exit(struct dvb_usb_device *d)
{
struct lme2510_state *st = d->priv;
struct dvb_usb_adapter *adap = &d->adapter[0];
- void *buffer = NULL;
if (adap != NULL) {
lme2510_kill_urb(&adap->stream);
}
- if (st->usb_buffer != NULL) {
- st->i2c_talk_onoff = 1;
- st->signal_level = 0;
- st->signal_sn = 0;
- buffer = st->usb_buffer;
- }
-
- if (st->lme_urb != NULL) {
+ if (st->lme_urb) {
usb_kill_urb(st->lme_urb);
- usb_free_coherent(d->udev, 128, st->buffer,
- st->lme_urb->transfer_dma);
+ usb_free_urb(st->lme_urb);
info("Interrupt Service Stopped");
}
-
- return buffer;
-}
-
-static void lme2510_exit(struct dvb_usb_device *d)
-{
- void *usb_buffer;
-
- if (d != NULL) {
- usb_buffer = lme2510_exit_int(d);
- kfree(usb_buffer);
- }
}
static struct dvb_usb_device_properties lme2510_props = {
@@ -1288,6 +1230,8 @@ static struct dvb_usb_device_properties lme2510_props = {
.bInterfaceNumber = 0,
.adapter_nr = adapter_nr,
.size_of_priv = sizeof(struct lme2510_state),
+ .generic_bulk_ctrl_endpoint = 0x01,
+ .generic_bulk_ctrl_endpoint_response = 0x01,
.download_firmware = lme2510_download_firmware,
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 8a83b10e50e0..d0075cb743b2 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -384,6 +384,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
struct rtl28xxu_req req_r828d = {0x0074, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_mn88472 = {0xff38, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_mn88473 = {0xff38, CMD_I2C_RD, 1, buf};
+ struct rtl28xxu_req req_cxd2837er = {0xfdd8, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_si2157 = {0x00c0, CMD_I2C_RD, 1, buf};
struct rtl28xxu_req req_si2168 = {0x00c8, CMD_I2C_RD, 1, buf};
@@ -540,7 +541,18 @@ tuner_found:
/* probe slave demod */
if (dev->tuner == TUNER_RTL2832_R828D) {
- /* power on MN88472 demod on GPIO0 */
+ /* power off slave demod on GPIO0 to reset CXD2837ER */
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_EN, 0x00, 0x01);
+ if (ret)
+ goto err;
+
+ msleep(50);
+
+ /* power on slave demod on GPIO0 */
ret = rtl28xxu_wr_reg_mask(d, SYS_GPIO_OUT_VAL, 0x01, 0x01);
if (ret)
goto err;
@@ -553,7 +565,7 @@ tuner_found:
if (ret)
goto err;
- /* check MN88472 answers */
+ /* check slave answers */
ret = rtl28xxu_ctrl_msg(d, &req_mn88472);
if (ret == 0 && buf[0] == 0x02) {
dev_dbg(&d->intf->dev, "MN88472 found\n");
@@ -567,6 +579,13 @@ tuner_found:
dev->slave_demod = SLAVE_DEMOD_MN88473;
goto demod_found;
}
+
+ ret = rtl28xxu_ctrl_msg(d, &req_cxd2837er);
+ if (ret == 0 && buf[0] == 0xb1) {
+ dev_dbg(&d->intf->dev, "CXD2837ER found\n");
+ dev->slave_demod = SLAVE_DEMOD_CXD2837ER;
+ goto demod_found;
+ }
}
if (dev->tuner == TUNER_RTL2832_SI2157) {
/* check Si2168 ID register; reg=c8 val=80 */
@@ -989,6 +1008,23 @@ static int rtl2832u_frontend_attach(struct dvb_usb_adapter *adap)
}
dev->i2c_client_slave_demod = client;
+ } else if (dev->slave_demod == SLAVE_DEMOD_CXD2837ER) {
+ struct cxd2841er_config cxd2837er_config = {};
+
+ cxd2837er_config.i2c_addr = 0xd8;
+ cxd2837er_config.xtal = SONY_XTAL_20500;
+ cxd2837er_config.flags = (CXD2841ER_AUTO_IFHZ |
+ CXD2841ER_NO_AGCNEG | CXD2841ER_TSBITS |
+ CXD2841ER_EARLY_TUNE | CXD2841ER_TS_SERIAL);
+ adap->fe[1] = dvb_attach(cxd2841er_attach_t_c,
+ &cxd2837er_config,
+ &d->i2c_adap);
+ if (!adap->fe[1]) {
+ dev->slave_demod = SLAVE_DEMOD_NONE;
+ goto err_slave_demod_failed;
+ }
+ adap->fe[1]->id = 1;
+ dev->i2c_client_slave_demod = NULL;
} else {
struct si2168_config si2168_config = {};
struct i2c_adapter *adapter;
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
index 138062960a73..197f4e339605 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
@@ -31,6 +31,7 @@
#include "rtl2832_sdr.h"
#include "mn88472.h"
#include "mn88473.h"
+#include "cxd2841er.h"
#include "qt1010.h"
#include "mt2060.h"
@@ -87,7 +88,8 @@ struct rtl28xxu_dev {
#define SLAVE_DEMOD_MN88472 1
#define SLAVE_DEMOD_MN88473 2
#define SLAVE_DEMOD_SI2168 3
- unsigned int slave_demod:2;
+ #define SLAVE_DEMOD_CXD2837ER 4
+ unsigned int slave_demod:3;
union {
struct rtl2830_platform_data rtl2830_platform_data;
struct rtl2832_platform_data rtl2832_platform_data;
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 024c751eb165..2ad2ddeaff51 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
stream->props.u.bulk.buffersize,
usb_urb_complete, stream);
- stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
stream->urbs_initialized++;
}
return 0;
@@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
urb->complete = usb_urb_complete;
urb->pipe = usb_rcvisocpipe(stream->udev,
stream->props.endpoint);
- urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
+ urb->transfer_flags = URB_ISO_ASAP;
urb->interval = stream->props.u.isoc.interval;
urb->number_of_packets = stream->props.u.isoc.framesperurb;
urb->transfer_buffer_length = stream->props.u.isoc.framesize *
@@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
if (stream->state & USB_STATE_URB_BUF) {
while (stream->buf_num) {
stream->buf_num--;
- stream->buf_list[stream->buf_num] = NULL;
+ kfree(stream->buf_list[stream->buf_num]);
}
}
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index 7551dce96f64..9311f7d4bba5 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -29,7 +29,7 @@
static int force_lna_activation;
module_param(force_lna_activation, int, 0644);
-MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifyer(s) (LNA), if applicable for the device (default: 0=automatic/off).");
+MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifier(s) (LNA), if applicable for the device (default: 0=automatic/off).");
struct dib0700_adapter_state {
int (*set_param_save) (struct dvb_frontend *);
diff --git a/drivers/media/usb/dvb-usb/friio-fe.c b/drivers/media/usb/dvb-usb/friio-fe.c
deleted file mode 100644
index e6bd0ed8d789..000000000000
--- a/drivers/media/usb/dvb-usb/friio-fe.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/* DVB USB compliant Linux driver for the Friio USB2.0 ISDB-T receiver.
- *
- * Copyright (C) 2009 Akihiro Tsukada <tskd2@yahoo.co.jp>
- *
- * This module is based off the the gl861 and vp702x modules.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
- */
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-
-#include "friio.h"
-
-struct jdvbt90502_state {
- struct i2c_adapter *i2c;
- struct dvb_frontend frontend;
- struct jdvbt90502_config config;
-};
-
-/* NOTE: TC90502 has 16bit register-address? */
-/* register 0x0100 is used for reading PLL status, so reg is u16 here */
-static int jdvbt90502_reg_read(struct jdvbt90502_state *state,
- const u16 reg, u8 *buf, const size_t count)
-{
- int ret;
- u8 wbuf[3];
- struct i2c_msg msg[2];
-
- wbuf[0] = reg & 0xFF;
- wbuf[1] = 0;
- wbuf[2] = reg >> 8;
-
- msg[0].addr = state->config.demod_address;
- msg[0].flags = 0;
- msg[0].buf = wbuf;
- msg[0].len = sizeof(wbuf);
-
- msg[1].addr = msg[0].addr;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = buf;
- msg[1].len = count;
-
- ret = i2c_transfer(state->i2c, msg, 2);
- if (ret != 2) {
- deb_fe(" reg read failed.\n");
- return -EREMOTEIO;
- }
- return 0;
-}
-
-/* currently 16bit register-address is not used, so reg is u8 here */
-static int jdvbt90502_single_reg_write(struct jdvbt90502_state *state,
- const u8 reg, const u8 val)
-{
- struct i2c_msg msg;
- u8 wbuf[2];
-
- wbuf[0] = reg;
- wbuf[1] = val;
-
- msg.addr = state->config.demod_address;
- msg.flags = 0;
- msg.buf = wbuf;
- msg.len = sizeof(wbuf);
-
- if (i2c_transfer(state->i2c, &msg, 1) != 1) {
- deb_fe(" reg write failed.");
- return -EREMOTEIO;
- }
- return 0;
-}
-
-static int _jdvbt90502_write(struct dvb_frontend *fe, const u8 buf[], int len)
-{
- struct jdvbt90502_state *state = fe->demodulator_priv;
- int err, i;
- for (i = 0; i < len - 1; i++) {
- err = jdvbt90502_single_reg_write(state,
- buf[0] + i, buf[i + 1]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/* read pll status byte via the demodulator's I2C register */
-/* note: Win box reads it by 8B block at the I2C addr 0x30 from reg:0x80 */
-static int jdvbt90502_pll_read(struct jdvbt90502_state *state, u8 *result)
-{
- int ret;
-
- /* +1 for reading */
- u8 pll_addr_byte = (state->config.pll_address << 1) + 1;
-
- *result = 0;
-
- ret = jdvbt90502_single_reg_write(state, JDVBT90502_2ND_I2C_REG,
- pll_addr_byte);
- if (ret)
- goto error;
-
- ret = jdvbt90502_reg_read(state, 0x0100, result, 1);
- if (ret)
- goto error;
-
- deb_fe("PLL read val:%02x\n", *result);
- return 0;
-
-error:
- deb_fe("%s:ret == %d\n", __func__, ret);
- return -EREMOTEIO;
-}
-
-
-/* set pll frequency via the demodulator's I2C register */
-static int jdvbt90502_pll_set_freq(struct jdvbt90502_state *state, u32 freq)
-{
- int ret;
- int retry;
- u8 res1;
- u8 res2[9];
-
- u8 pll_freq_cmd[PLL_CMD_LEN];
- u8 pll_agc_cmd[PLL_CMD_LEN];
- struct i2c_msg msg[2];
- u32 f;
-
- deb_fe("%s: freq=%d, step=%d\n", __func__, freq,
- state->frontend.ops.info.frequency_stepsize_hz);
- /* freq -> oscilator frequency conversion. */
- /* freq: 473,000,000 + n*6,000,000 [+ 142857 (center freq. shift)] */
- f = freq / state->frontend.ops.info.frequency_stepsize_hz;
- /* add 399[1/7 MHZ] = 57MHz for the IF */
- f += 399;
- /* add center frequency shift if necessary */
- if (f % 7 == 0)
- f++;
- pll_freq_cmd[DEMOD_REDIRECT_REG] = JDVBT90502_2ND_I2C_REG; /* 0xFE */
- pll_freq_cmd[ADDRESS_BYTE] = state->config.pll_address << 1;
- pll_freq_cmd[DIVIDER_BYTE1] = (f >> 8) & 0x7F;
- pll_freq_cmd[DIVIDER_BYTE2] = f & 0xFF;
- pll_freq_cmd[CONTROL_BYTE] = 0xB2; /* ref.divider:28, 4MHz/28=1/7MHz */
- pll_freq_cmd[BANDSWITCH_BYTE] = 0x08; /* UHF band */
-
- msg[0].addr = state->config.demod_address;
- msg[0].flags = 0;
- msg[0].buf = pll_freq_cmd;
- msg[0].len = sizeof(pll_freq_cmd);
-
- ret = i2c_transfer(state->i2c, &msg[0], 1);
- if (ret != 1)
- goto error;
-
- udelay(50);
-
- pll_agc_cmd[DEMOD_REDIRECT_REG] = pll_freq_cmd[DEMOD_REDIRECT_REG];
- pll_agc_cmd[ADDRESS_BYTE] = pll_freq_cmd[ADDRESS_BYTE];
- pll_agc_cmd[DIVIDER_BYTE1] = pll_freq_cmd[DIVIDER_BYTE1];
- pll_agc_cmd[DIVIDER_BYTE2] = pll_freq_cmd[DIVIDER_BYTE2];
- pll_agc_cmd[CONTROL_BYTE] = 0x9A; /* AGC_CTRL instead of BANDSWITCH */
- pll_agc_cmd[AGC_CTRL_BYTE] = 0x50;
- /* AGC Time Constant 2s, AGC take-over point:103dBuV(lowest) */
-
- msg[1].addr = msg[0].addr;
- msg[1].flags = 0;
- msg[1].buf = pll_agc_cmd;
- msg[1].len = sizeof(pll_agc_cmd);
-
- ret = i2c_transfer(state->i2c, &msg[1], 1);
- if (ret != 1)
- goto error;
-
- /* I don't know what these cmds are for, */
- /* but the USB log on a windows box contains them */
- ret = jdvbt90502_single_reg_write(state, 0x01, 0x40);
- ret |= jdvbt90502_single_reg_write(state, 0x01, 0x00);
- if (ret)
- goto error;
- udelay(100);
-
- /* wait for the demod to be ready? */
-#define RETRY_COUNT 5
- for (retry = 0; retry < RETRY_COUNT; retry++) {
- ret = jdvbt90502_reg_read(state, 0x0096, &res1, 1);
- if (ret)
- goto error;
- /* if (res1 != 0x00) goto error; */
- ret = jdvbt90502_reg_read(state, 0x00B0, res2, sizeof(res2));
- if (ret)
- goto error;
- if (res2[0] >= 0xA7)
- break;
- msleep(100);
- }
- if (retry >= RETRY_COUNT) {
- deb_fe("%s: FE does not get ready after freq setting.\n",
- __func__);
- return -EREMOTEIO;
- }
-
- return 0;
-error:
- deb_fe("%s:ret == %d\n", __func__, ret);
- return -EREMOTEIO;
-}
-
-static int jdvbt90502_read_status(struct dvb_frontend *fe,
- enum fe_status *state)
-{
- u8 result;
- int ret;
-
- *state = FE_HAS_SIGNAL;
-
- ret = jdvbt90502_pll_read(fe->demodulator_priv, &result);
- if (ret) {
- deb_fe("%s:ret == %d\n", __func__, ret);
- return -EREMOTEIO;
- }
-
- *state = FE_HAS_SIGNAL
- | FE_HAS_CARRIER
- | FE_HAS_VITERBI
- | FE_HAS_SYNC;
-
- if (result & PLL_STATUS_LOCKED)
- *state |= FE_HAS_LOCK;
-
- return 0;
-}
-
-static int jdvbt90502_read_signal_strength(struct dvb_frontend *fe,
- u16 *strength)
-{
- int ret;
- u8 rbuf[37];
-
- *strength = 0;
-
- /* status register (incl. signal strength) : 0x89 */
- /* TODO: read just the necessary registers [0x8B..0x8D]? */
- ret = jdvbt90502_reg_read(fe->demodulator_priv, 0x0089,
- rbuf, sizeof(rbuf));
-
- if (ret) {
- deb_fe("%s:ret == %d\n", __func__, ret);
- return -EREMOTEIO;
- }
-
- /* signal_strength: rbuf[2-4] (24bit BE), use lower 16bit for now. */
- *strength = (rbuf[3] << 8) + rbuf[4];
- if (rbuf[2])
- *strength = 0xffff;
-
- return 0;
-}
-
-static int jdvbt90502_set_frontend(struct dvb_frontend *fe)
-{
- struct dtv_frontend_properties *p = &fe->dtv_property_cache;
-
- /**
- * NOTE: ignore all the parameters except frequency.
- * others should be fixed to the proper value for ISDB-T,
- * but don't check here.
- */
-
- struct jdvbt90502_state *state = fe->demodulator_priv;
- int ret;
-
- deb_fe("%s: Freq:%d\n", __func__, p->frequency);
-
- /* This driver only works on auto mode */
- p->inversion = INVERSION_AUTO;
- p->bandwidth_hz = 6000000;
- p->code_rate_HP = FEC_AUTO;
- p->code_rate_LP = FEC_AUTO;
- p->modulation = QAM_64;
- p->transmission_mode = TRANSMISSION_MODE_AUTO;
- p->guard_interval = GUARD_INTERVAL_AUTO;
- p->hierarchy = HIERARCHY_AUTO;
- p->delivery_system = SYS_ISDBT;
-
- ret = jdvbt90502_pll_set_freq(state, p->frequency);
- if (ret) {
- deb_fe("%s:ret == %d\n", __func__, ret);
- return -EREMOTEIO;
- }
-
- return 0;
-}
-
-
-/*
- * (reg, val) commad list to initialize this module.
- * captured on a Windows box.
- */
-static u8 init_code[][2] = {
- {0x01, 0x40},
- {0x04, 0x38},
- {0x05, 0x40},
- {0x07, 0x40},
- {0x0F, 0x4F},
- {0x11, 0x21},
- {0x12, 0x0B},
- {0x13, 0x2F},
- {0x14, 0x31},
- {0x16, 0x02},
- {0x21, 0xC4},
- {0x22, 0x20},
- {0x2C, 0x79},
- {0x2D, 0x34},
- {0x2F, 0x00},
- {0x30, 0x28},
- {0x31, 0x31},
- {0x32, 0xDF},
- {0x38, 0x01},
- {0x39, 0x78},
- {0x3B, 0x33},
- {0x3C, 0x33},
- {0x48, 0x90},
- {0x51, 0x68},
- {0x5E, 0x38},
- {0x71, 0x00},
- {0x72, 0x08},
- {0x77, 0x00},
- {0xC0, 0x21},
- {0xC1, 0x10},
- {0xE4, 0x1A},
- {0xEA, 0x1F},
- {0x77, 0x00},
- {0x71, 0x00},
- {0x71, 0x00},
- {0x76, 0x0C},
-};
-
-static int jdvbt90502_init(struct dvb_frontend *fe)
-{
- int i = -1;
- int ret;
- struct i2c_msg msg;
-
- struct jdvbt90502_state *state = fe->demodulator_priv;
-
- deb_fe("%s called.\n", __func__);
-
- msg.addr = state->config.demod_address;
- msg.flags = 0;
- msg.len = 2;
- for (i = 0; i < ARRAY_SIZE(init_code); i++) {
- msg.buf = init_code[i];
- ret = i2c_transfer(state->i2c, &msg, 1);
- if (ret != 1)
- goto error;
- }
- fe->dtv_property_cache.delivery_system = SYS_ISDBT;
- msleep(100);
-
- return 0;
-
-error:
- deb_fe("%s: init_code[%d] failed. ret==%d\n", __func__, i, ret);
- return -EREMOTEIO;
-}
-
-
-static void jdvbt90502_release(struct dvb_frontend *fe)
-{
- struct jdvbt90502_state *state = fe->demodulator_priv;
- kfree(state);
-}
-
-
-static const struct dvb_frontend_ops jdvbt90502_ops;
-
-struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d)
-{
- struct jdvbt90502_state *state = NULL;
-
- deb_info("%s called.\n", __func__);
-
- /* allocate memory for the internal state */
- state = kzalloc(sizeof(struct jdvbt90502_state), GFP_KERNEL);
- if (state == NULL)
- goto error;
-
- /* setup the state */
- state->i2c = &d->i2c_adap;
- state->config = friio_fe_config;
-
- /* create dvb_frontend */
- state->frontend.ops = jdvbt90502_ops;
- state->frontend.demodulator_priv = state;
-
- if (jdvbt90502_init(&state->frontend) < 0)
- goto error;
-
- return &state->frontend;
-
-error:
- kfree(state);
- return NULL;
-}
-
-static const struct dvb_frontend_ops jdvbt90502_ops = {
- .delsys = { SYS_ISDBT },
- .info = {
- .name = "Comtech JDVBT90502 ISDB-T",
- .frequency_min_hz = 473000000, /* UHF 13ch, center */
- .frequency_max_hz = 767142857, /* UHF 62ch, center */
- .frequency_stepsize_hz = JDVBT90502_PLL_CLK / JDVBT90502_PLL_DIVIDER,
-
- /* NOTE: this driver ignores all parameters but frequency. */
- .caps = FE_CAN_INVERSION_AUTO |
- FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
- FE_CAN_FEC_4_5 | FE_CAN_FEC_5_6 | FE_CAN_FEC_6_7 |
- FE_CAN_FEC_7_8 | FE_CAN_FEC_8_9 | FE_CAN_FEC_AUTO |
- FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
- FE_CAN_TRANSMISSION_MODE_AUTO |
- FE_CAN_GUARD_INTERVAL_AUTO |
- FE_CAN_HIERARCHY_AUTO,
- },
-
- .release = jdvbt90502_release,
-
- .init = jdvbt90502_init,
- .write = _jdvbt90502_write,
-
- .set_frontend = jdvbt90502_set_frontend,
-
- .read_status = jdvbt90502_read_status,
- .read_signal_strength = jdvbt90502_read_signal_strength,
-};
diff --git a/drivers/media/usb/dvb-usb/friio.c b/drivers/media/usb/dvb-usb/friio.c
deleted file mode 100644
index fe799a7ad44b..000000000000
--- a/drivers/media/usb/dvb-usb/friio.c
+++ /dev/null
@@ -1,522 +0,0 @@
-/* DVB USB compliant Linux driver for the Friio USB2.0 ISDB-T receiver.
- *
- * Copyright (C) 2009 Akihiro Tsukada <tskd2@yahoo.co.jp>
- *
- * This module is based off the the gl861 and vp702x modules.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
- */
-#include "friio.h"
-
-/* debug */
-int dvb_usb_friio_debug;
-module_param_named(debug, dvb_usb_friio_debug, int, 0644);
-MODULE_PARM_DESC(debug,
- "set debugging level (1=info,2=xfer,4=rc,8=fe (or-able))."
- DVB_USB_DEBUG_STATUS);
-
-DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
-
-/*
- * Indirect I2C access to the PLL via FE.
- * whole I2C protocol data to the PLL is sent via the FE's I2C register.
- * This is done by a control msg to the FE with the I2C data accompanied, and
- * a specific USB request number is assigned for that purpose.
- *
- * this func sends wbuf[1..] to the I2C register wbuf[0] at addr (= at FE).
- * TODO: refoctored, smarter i2c functions.
- */
-static int gl861_i2c_ctrlmsg_data(struct dvb_usb_device *d, u8 addr,
- u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
-{
- u16 index = wbuf[0]; /* must be JDVBT90502_2ND_I2C_REG(=0xFE) */
- u16 value = addr << (8 + 1);
- int wo = (rbuf == NULL || rlen == 0); /* write only */
- u8 req, type;
-
- deb_xfer("write to PLL:0x%02x via FE reg:0x%02x, len:%d\n",
- wbuf[1], wbuf[0], wlen - 1);
-
- if (wo && wlen >= 2) {
- req = GL861_REQ_I2C_DATA_CTRL_WRITE;
- type = GL861_WRITE;
- udelay(20);
- return usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- req, type, value, index,
- &wbuf[1], wlen - 1, 2000);
- }
-
- deb_xfer("not supported ctrl-msg, aborting.");
- return -EINVAL;
-}
-
-/* normal I2C access (without extra data arguments).
- * write to the register wbuf[0] at I2C address addr with the value wbuf[1],
- * or read from the register wbuf[0].
- * register address can be 16bit (wbuf[2]<<8 | wbuf[0]) if wlen==3
- */
-static int gl861_i2c_msg(struct dvb_usb_device *d, u8 addr,
- u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen)
-{
- u16 index;
- u16 value = addr << (8 + 1);
- int wo = (rbuf == NULL || rlen == 0); /* write-only */
- u8 req, type;
- unsigned int pipe;
-
- /* special case for the indirect I2C access to the PLL via FE, */
- if (addr == friio_fe_config.demod_address &&
- wbuf[0] == JDVBT90502_2ND_I2C_REG)
- return gl861_i2c_ctrlmsg_data(d, addr, wbuf, wlen, rbuf, rlen);
-
- if (wo) {
- req = GL861_REQ_I2C_WRITE;
- type = GL861_WRITE;
- pipe = usb_sndctrlpipe(d->udev, 0);
- } else { /* rw */
- req = GL861_REQ_I2C_READ;
- type = GL861_READ;
- pipe = usb_rcvctrlpipe(d->udev, 0);
- }
-
- switch (wlen) {
- case 1:
- index = wbuf[0];
- break;
- case 2:
- index = wbuf[0];
- value = value + wbuf[1];
- break;
- case 3:
- /* special case for 16bit register-address */
- index = (wbuf[2] << 8) | wbuf[0];
- value = value + wbuf[1];
- break;
- default:
- deb_xfer("wlen = %x, aborting.", wlen);
- return -EINVAL;
- }
- msleep(1);
- return usb_control_msg(d->udev, pipe, req, type,
- value, index, rbuf, rlen, 2000);
-}
-
-/* I2C */
-static int gl861_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
- int num)
-{
- struct dvb_usb_device *d = i2c_get_adapdata(adap);
- int i;
-
-
- if (num > 2)
- return -EINVAL;
-
- if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
- return -EAGAIN;
-
- for (i = 0; i < num; i++) {
- /* write/read request */
- if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
- if (gl861_i2c_msg(d, msg[i].addr,
- msg[i].buf, msg[i].len,
- msg[i + 1].buf, msg[i + 1].len) < 0)
- break;
- i++;
- } else
- if (gl861_i2c_msg(d, msg[i].addr, msg[i].buf,
- msg[i].len, NULL, 0) < 0)
- break;
- }
-
- mutex_unlock(&d->i2c_mutex);
- return i;
-}
-
-static u32 gl861_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C;
-}
-
-static int friio_ext_ctl(struct dvb_usb_adapter *adap,
- u32 sat_color, int lnb_on)
-{
- int i;
- int ret;
- struct i2c_msg msg;
- u8 *buf;
- u32 mask;
- u8 lnb = (lnb_on) ? FRIIO_CTL_LNB : 0;
-
- buf = kmalloc(2, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- msg.addr = 0x00;
- msg.flags = 0;
- msg.len = 2;
- msg.buf = buf;
-
- buf[0] = 0x00;
-
- /* send 2bit header (&B10) */
- buf[1] = lnb | FRIIO_CTL_LED | FRIIO_CTL_STROBE;
- ret = gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
- buf[1] |= FRIIO_CTL_CLK;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
-
- buf[1] = lnb | FRIIO_CTL_STROBE;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
- buf[1] |= FRIIO_CTL_CLK;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
-
- /* send 32bit(satur, R, G, B) data in serial */
- mask = 1 << 31;
- for (i = 0; i < 32; i++) {
- buf[1] = lnb | FRIIO_CTL_STROBE;
- if (sat_color & mask)
- buf[1] |= FRIIO_CTL_LED;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
- buf[1] |= FRIIO_CTL_CLK;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
- mask >>= 1;
- }
-
- /* set the strobe off */
- buf[1] = lnb;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
- buf[1] |= FRIIO_CTL_CLK;
- ret += gl861_i2c_xfer(&adap->dev->i2c_adap, &msg, 1);
-
- kfree(buf);
- return (ret == 70);
-}
-
-
-static int friio_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff);
-
-/* TODO: move these init cmds to the FE's init routine? */
-static u8 streaming_init_cmds[][2] = {
- {0x33, 0x08},
- {0x37, 0x40},
- {0x3A, 0x1F},
- {0x3B, 0xFF},
- {0x3C, 0x1F},
- {0x3D, 0xFF},
- {0x38, 0x00},
- {0x35, 0x00},
- {0x39, 0x00},
- {0x36, 0x00},
-};
-static int cmdlen = sizeof(streaming_init_cmds) / 2;
-
-/*
- * Command sequence in this init function is a replay
- * of the captured USB commands from the Windows proprietary driver.
- */
-static int friio_initialize(struct dvb_usb_device *d)
-{
- int ret;
- int i;
- int retry = 0;
- u8 *rbuf, *wbuf;
-
- deb_info("%s called.\n", __func__);
-
- wbuf = kmalloc(3, GFP_KERNEL);
- if (!wbuf)
- return -ENOMEM;
-
- rbuf = kmalloc(2, GFP_KERNEL);
- if (!rbuf) {
- kfree(wbuf);
- return -ENOMEM;
- }
-
- /* use gl861_i2c_msg instead of gl861_i2c_xfer(), */
- /* because the i2c device is not set up yet. */
- wbuf[0] = 0x11;
- wbuf[1] = 0x02;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
- if (ret < 0)
- goto error;
- msleep(2);
-
- wbuf[0] = 0x11;
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
- if (ret < 0)
- goto error;
- msleep(1);
-
- /* following msgs should be in the FE's init code? */
- /* cmd sequence to identify the device type? (friio black/white) */
- wbuf[0] = 0x03;
- wbuf[1] = 0x80;
- /* can't use gl861_i2c_cmd, as the register-addr is 16bit(0x0100) */
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_DATA_CTRL_WRITE, GL861_WRITE,
- 0x1200, 0x0100, wbuf, 2, 2000);
- if (ret < 0)
- goto error;
-
- msleep(2);
- wbuf[0] = 0x00;
- wbuf[2] = 0x01; /* reg.0x0100 */
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x12 >> 1, wbuf, 3, rbuf, 2);
- /* my Friio White returns 0xffff. */
- if (ret < 0 || rbuf[0] != 0xff || rbuf[1] != 0xff)
- goto error;
-
- msleep(2);
- wbuf[0] = 0x03;
- wbuf[1] = 0x80;
- ret = usb_control_msg(d->udev, usb_sndctrlpipe(d->udev, 0),
- GL861_REQ_I2C_DATA_CTRL_WRITE, GL861_WRITE,
- 0x9000, 0x0100, wbuf, 2, 2000);
- if (ret < 0)
- goto error;
-
- msleep(2);
- wbuf[0] = 0x00;
- wbuf[2] = 0x01; /* reg.0x0100 */
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, 0x90 >> 1, wbuf, 3, rbuf, 2);
- /* my Friio White returns 0xffff again. */
- if (ret < 0 || rbuf[0] != 0xff || rbuf[1] != 0xff)
- goto error;
-
- msleep(1);
-
-restart:
- /* ============ start DEMOD init cmds ================== */
- /* read PLL status to clear the POR bit */
- wbuf[0] = JDVBT90502_2ND_I2C_REG;
- wbuf[1] = (FRIIO_PLL_ADDR << 1) + 1; /* +1 for reading */
- ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 2, NULL, 0);
- if (ret < 0)
- goto error;
-
- msleep(5);
- /* note: DEMODULATOR has 16bit register-address. */
- wbuf[0] = 0x00;
- wbuf[2] = 0x01; /* reg addr: 0x0100 */
- wbuf[1] = 0x00; /* val: not used */
- ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 3, rbuf, 1);
- if (ret < 0)
- goto error;
-/*
- msleep(1);
- wbuf[0] = 0x80;
- wbuf[1] = 0x00;
- ret = gl861_i2c_msg(d, FRIIO_DEMOD_ADDR, wbuf, 2, rbuf, 1);
- if (ret < 0)
- goto error;
- */
- if (rbuf[0] & 0x80) { /* still in PowerOnReset state? */
- if (++retry > 3) {
- deb_info("failed to get the correct FE demod status:0x%02x\n",
- rbuf[0]);
- goto error;
- }
- msleep(100);
- goto restart;
- }
-
- /* TODO: check return value in rbuf */
- /* =========== end DEMOD init cmds ===================== */
- msleep(1);
-
- wbuf[0] = 0x30;
- wbuf[1] = 0x04;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
- if (ret < 0)
- goto error;
-
- msleep(2);
- /* following 2 cmds unnecessary? */
- wbuf[0] = 0x00;
- wbuf[1] = 0x01;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
- if (ret < 0)
- goto error;
-
- wbuf[0] = 0x06;
- wbuf[1] = 0x0F;
- ret = gl861_i2c_msg(d, 0x00, wbuf, 2, NULL, 0);
- if (ret < 0)
- goto error;
-
- /* some streaming ctl cmds (maybe) */
- msleep(10);
- for (i = 0; i < cmdlen; i++) {
- ret = gl861_i2c_msg(d, 0x00, streaming_init_cmds[i], 2,
- NULL, 0);
- if (ret < 0)
- goto error;
- msleep(1);
- }
- msleep(20);
-
- /* change the LED color etc. */
- ret = friio_streaming_ctrl(&d->adapter[0], 0);
- if (ret < 0)
- goto error;
-
- return 0;
-
-error:
- kfree(wbuf);
- kfree(rbuf);
- deb_info("%s:ret == %d\n", __func__, ret);
- return -EIO;
-}
-
-/* Callbacks for DVB USB */
-
-static int friio_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
-{
- int ret;
-
- deb_info("%s called.(%d)\n", __func__, onoff);
-
- /* set the LED color and saturation (and LNB on) */
- if (onoff)
- ret = friio_ext_ctl(adap, 0x6400ff64, 1);
- else
- ret = friio_ext_ctl(adap, 0x96ff00ff, 1);
-
- if (ret != 1) {
- deb_info("%s failed to send cmdx. ret==%d\n", __func__, ret);
- return -EREMOTEIO;
- }
- return 0;
-}
-
-static int friio_frontend_attach(struct dvb_usb_adapter *adap)
-{
- if (friio_initialize(adap->dev) < 0)
- return -EIO;
-
- adap->fe_adap[0].fe = jdvbt90502_attach(adap->dev);
- if (adap->fe_adap[0].fe == NULL)
- return -EIO;
-
- return 0;
-}
-
-/* DVB USB Driver stuff */
-static struct dvb_usb_device_properties friio_properties;
-
-static int friio_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct dvb_usb_device *d;
- struct usb_host_interface *alt;
- int ret;
-
- if (intf->num_altsetting < GL861_ALTSETTING_COUNT)
- return -ENODEV;
-
- alt = usb_altnum_to_altsetting(intf, FRIIO_BULK_ALTSETTING);
- if (alt == NULL) {
- deb_rc("not alt found!\n");
- return -ENODEV;
- }
- ret = usb_set_interface(interface_to_usbdev(intf),
- alt->desc.bInterfaceNumber,
- alt->desc.bAlternateSetting);
- if (ret != 0) {
- deb_rc("failed to set alt-setting!\n");
- return ret;
- }
-
- ret = dvb_usb_device_init(intf, &friio_properties,
- THIS_MODULE, &d, adapter_nr);
- if (ret == 0)
- friio_streaming_ctrl(&d->adapter[0], 1);
-
- return ret;
-}
-
-
-struct jdvbt90502_config friio_fe_config = {
- .demod_address = FRIIO_DEMOD_ADDR,
- .pll_address = FRIIO_PLL_ADDR,
-};
-
-static struct i2c_algorithm gl861_i2c_algo = {
- .master_xfer = gl861_i2c_xfer,
- .functionality = gl861_i2c_func,
-};
-
-static struct usb_device_id friio_table[] = {
- { USB_DEVICE(USB_VID_774, USB_PID_FRIIO_WHITE) },
- { } /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(usb, friio_table);
-
-
-static struct dvb_usb_device_properties friio_properties = {
- .caps = DVB_USB_IS_AN_I2C_ADAPTER,
- .usb_ctrl = DEVICE_SPECIFIC,
-
- .size_of_priv = 0,
-
- .num_adapters = 1,
- .adapter = {
- /* caps:0 => no pid filter, 188B TS packet */
- /* GL861 has a HW pid filter, but no info available. */
- {
- .num_frontends = 1,
- .fe = {{
- .caps = 0,
-
- .frontend_attach = friio_frontend_attach,
- .streaming_ctrl = friio_streaming_ctrl,
-
- .stream = {
- .type = USB_BULK,
- /* count <= MAX_NO_URBS_FOR_DATA_STREAM(10) */
- .count = 8,
- .endpoint = 0x01,
- .u = {
- /* GL861 has 6KB buf inside */
- .bulk = {
- .buffersize = 16384,
- }
- }
- },
- }},
- }
- },
- .i2c_algo = &gl861_i2c_algo,
-
- .num_device_descs = 1,
- .devices = {
- {
- .name = "774 Friio ISDB-T USB2.0",
- .cold_ids = { NULL },
- .warm_ids = { &friio_table[0], NULL },
- },
- }
-};
-
-static struct usb_driver friio_driver = {
- .name = "dvb_usb_friio",
- .probe = friio_probe,
- .disconnect = dvb_usb_device_exit,
- .id_table = friio_table,
-};
-
-module_usb_driver(friio_driver);
-
-MODULE_AUTHOR("Akihiro Tsukada <tskd2@yahoo.co.jp>");
-MODULE_DESCRIPTION("Driver for Friio ISDB-T USB2.0 Receiver");
-MODULE_VERSION("0.2");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb/friio.h b/drivers/media/usb/dvb-usb/friio.h
deleted file mode 100644
index a53af56d035c..000000000000
--- a/drivers/media/usb/dvb-usb/friio.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* DVB USB compliant Linux driver for the Friio USB2.0 ISDB-T receiver.
- *
- * Copyright (C) 2009 Akihiro Tsukada <tskd2@yahoo.co.jp>
- *
- * This module is based off the the gl861 and vp702x modules.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation, version 2.
- *
- * see Documentation/media/dvb-drivers/dvb-usb.rst for more information
- */
-#ifndef _DVB_USB_FRIIO_H_
-#define _DVB_USB_FRIIO_H_
-
-/**
- * Friio Components
- * USB hub: AU4254
- * USB controller(+ TS dmx & streaming): GL861
- * Frontend: comtech JDVBT-90502
- * (tuner PLL: tua6034, I2C addr:(0xC0 >> 1))
- * (OFDM demodulator: TC90502, I2C addr:(0x30 >> 1))
- * LED x3 (+LNB) control: PIC 16F676
- * EEPROM: 24C08
- *
- * (USB smart card reader: AU9522)
- *
- */
-
-#define DVB_USB_LOG_PREFIX "friio"
-#include "dvb-usb.h"
-
-extern int dvb_usb_friio_debug;
-#define deb_info(args...) dprintk(dvb_usb_friio_debug, 0x01, args)
-#define deb_xfer(args...) dprintk(dvb_usb_friio_debug, 0x02, args)
-#define deb_rc(args...) dprintk(dvb_usb_friio_debug, 0x04, args)
-#define deb_fe(args...) dprintk(dvb_usb_friio_debug, 0x08, args)
-
-/* Vendor requests */
-#define GL861_WRITE 0x40
-#define GL861_READ 0xc0
-
-/* command bytes */
-#define GL861_REQ_I2C_WRITE 0x01
-#define GL861_REQ_I2C_READ 0x02
-/* For control msg with data argument */
-/* Used for accessing the PLL on the secondary I2C bus of FE via GL861 */
-#define GL861_REQ_I2C_DATA_CTRL_WRITE 0x03
-
-#define GL861_ALTSETTING_COUNT 2
-#define FRIIO_BULK_ALTSETTING 0
-#define FRIIO_ISOC_ALTSETTING 1
-
-/* LED & LNB control via PIC. */
-/* basically, it's serial control with clock and strobe. */
-/* write the below 4bit control data to the reg 0x00 at the I2C addr 0x00 */
-/* when controlling the LEDs, 32bit(saturation, R, G, B) is sent on the bit3*/
-#define FRIIO_CTL_LNB (1 << 0)
-#define FRIIO_CTL_STROBE (1 << 1)
-#define FRIIO_CTL_CLK (1 << 2)
-#define FRIIO_CTL_LED (1 << 3)
-
-/* Front End related */
-
-#define FRIIO_DEMOD_ADDR (0x30 >> 1)
-#define FRIIO_PLL_ADDR (0xC0 >> 1)
-
-#define JDVBT90502_PLL_CLK 4000000
-#define JDVBT90502_PLL_DIVIDER 28
-
-#define JDVBT90502_2ND_I2C_REG 0xFE
-
-/* byte index for pll i2c command data structure*/
-/* see datasheet for tua6034 */
-#define DEMOD_REDIRECT_REG 0
-#define ADDRESS_BYTE 1
-#define DIVIDER_BYTE1 2
-#define DIVIDER_BYTE2 3
-#define CONTROL_BYTE 4
-#define BANDSWITCH_BYTE 5
-#define AGC_CTRL_BYTE 5
-#define PLL_CMD_LEN 6
-
-/* bit masks for PLL STATUS response */
-#define PLL_STATUS_POR_MODE 0x80 /* 1: Power on Reset (test) Mode */
-#define PLL_STATUS_LOCKED 0x40 /* 1: locked */
-#define PLL_STATUS_AGC_ACTIVE 0x08 /* 1:active */
-#define PLL_STATUS_TESTMODE 0x07 /* digital output level (5 level) */
- /* 0.15Vcc step 0x00: < 0.15Vcc, ..., 0x04: >= 0.6Vcc (<= 1Vcc) */
-
-
-struct jdvbt90502_config {
- u8 demod_address; /* i2c addr for demodulator IC */
- u8 pll_address; /* PLL addr on the secondary i2c*/
-};
-extern struct jdvbt90502_config friio_fe_config;
-
-extern struct dvb_frontend *jdvbt90502_attach(struct dvb_usb_device *d);
-#endif
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 87b887b7604e..1283c7ca9ad5 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -1958,7 +1958,7 @@ const struct em28xx_board em28xx_boards[] = {
} },
},
[EM2882_BOARD_TERRATEC_HYBRID_XS] = {
- .name = "Terratec Cinnergy Hybrid T USB XS (em2882)",
+ .name = "Terratec Cinergy Hybrid T USB XS (em2882)",
.tuner_type = TUNER_XC2028,
.tuner_gpio = default_tuner_gpio,
.mts_firmware = 1,
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index fce9d6f4b7c9..3137f5d89d80 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
/* append the packet to the frame buffer */
if (len > 0) {
- if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) {
+ if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) {
gspca_err(gspca_dev, "frame overflow %d > %d\n",
gspca_dev->image_len + len,
- gspca_dev->pixfmt.sizeimage);
+ PAGE_ALIGN(gspca_dev->pixfmt.sizeimage));
packet_type = DISCARD_PACKET;
} else {
/* !! image is NULL only when last pkt is LAST or DISCARD
@@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq,
unsigned int sizes[], struct device *alloc_devs[])
{
struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq);
+ unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
if (*nplanes)
- return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0;
+ return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
- sizes[0] = gspca_dev->pixfmt.sizeimage;
+ sizes[0] = size;
return 0;
}
static int gspca_buffer_prepare(struct vb2_buffer *vb)
{
struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue);
- unsigned long size = gspca_dev->pixfmt.sizeimage;
+ unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
if (vb2_plane_size(vb, 0) < size) {
gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n",
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index 365c78b748dd..b085b14f3f87 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -586,7 +586,7 @@ unlock:
else
pulse8->config_pending = true;
mutex_unlock(&pulse8->config_lock);
- return err;
+ return log_addr == CEC_LOG_ADDR_INVALID ? 0 : err;
}
static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 7702285c1519..446a999dd2ce 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1698,7 +1698,7 @@ static int pvr2_hdw_untrip_unlocked(struct pvr2_hdw *hdw)
if (!hdw->flag_tripped) return 0;
hdw->flag_tripped = 0;
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "Clearing driver error statuss");
+ "Clearing driver error status");
return !0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 97a93ed4bcda..08d5b7aa3537 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -703,16 +703,19 @@ static int pvr2_try_ext_ctrls(struct file *file, void *priv,
return 0;
}
-static int pvr2_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cap)
+static int pvr2_g_pixelaspect(struct file *file, void *priv,
+ int type, struct v4l2_fract *f)
{
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ struct v4l2_cropcap cap = { .type = type };
int ret;
- if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
- ret = pvr2_hdw_get_cropcap(hdw, cap);
- cap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* paranoia */
+ ret = pvr2_hdw_get_cropcap(hdw, &cap);
+ if (!ret)
+ *f = cap.pixelaspect;
return ret;
}
@@ -815,7 +818,7 @@ static const struct v4l2_ioctl_ops pvr2_ioctl_ops = {
.vidioc_g_audio = pvr2_g_audio,
.vidioc_enumaudio = pvr2_enumaudio,
.vidioc_enum_input = pvr2_enum_input,
- .vidioc_cropcap = pvr2_cropcap,
+ .vidioc_g_pixelaspect = pvr2_g_pixelaspect,
.vidioc_s_selection = pvr2_s_selection,
.vidioc_g_selection = pvr2_g_selection,
.vidioc_g_input = pvr2_g_input,
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index be3634407f1f..2ffded08407b 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -225,10 +225,9 @@ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
return -ENOENT;
}
- phdr = kmalloc(size, GFP_KERNEL);
+ phdr = kmemdup(buffer, size, GFP_KERNEL);
if (!phdr)
return -ENOMEM;
- memcpy(phdr, buffer, size);
pr_debug("sending %s(%d) size: %d\n",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index e11d5d5b7c26..b8ec74d98e8d 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -116,6 +116,13 @@ static const struct dmi_system_id stk_upside_down_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "T12Rg-H")
}
},
+ {
+ .ident = "ASUS A6VM",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
+ }
+ },
{}
};
@@ -164,7 +171,11 @@ int stk_camera_read_reg(struct stk_camera *dev, u16 index, u8 *value)
*value = *buf;
kfree(buf);
- return ret;
+
+ if (ret < 0)
+ return ret;
+ else
+ return 0;
}
static int stk_start_stream(struct stk_camera *dev)
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index bc369a0934a3..b62cbd800111 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -214,6 +214,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.guid = UVC_GUID_FORMAT_INZI,
.fcc = V4L2_PIX_FMT_INZI,
},
+ {
+ .name = "4-bit Depth Confidence (Packed)",
+ .guid = UVC_GUID_FORMAT_CNF4,
+ .fcc = V4L2_PIX_FMT_CNF4,
+ },
};
/* ------------------------------------------------------------------------
@@ -391,6 +396,50 @@ static struct uvc_streaming *uvc_stream_by_id(struct uvc_device *dev, int id)
}
/* ------------------------------------------------------------------------
+ * Streaming Object Management
+ */
+
+static void uvc_stream_delete(struct uvc_streaming *stream)
+{
+ if (stream->async_wq)
+ destroy_workqueue(stream->async_wq);
+
+ mutex_destroy(&stream->mutex);
+
+ usb_put_intf(stream->intf);
+
+ kfree(stream->format);
+ kfree(stream->header.bmaControls);
+ kfree(stream);
+}
+
+static struct uvc_streaming *uvc_stream_new(struct uvc_device *dev,
+ struct usb_interface *intf)
+{
+ struct uvc_streaming *stream;
+
+ stream = kzalloc(sizeof(*stream), GFP_KERNEL);
+ if (stream == NULL)
+ return NULL;
+
+ mutex_init(&stream->mutex);
+
+ stream->dev = dev;
+ stream->intf = usb_get_intf(intf);
+ stream->intfnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ /* Allocate a stream specific work queue for asynchronous tasks. */
+ stream->async_wq = alloc_workqueue("uvcvideo", WQ_UNBOUND | WQ_HIGHPRI,
+ 0);
+ if (!stream->async_wq) {
+ uvc_stream_delete(stream);
+ return NULL;
+ }
+
+ return stream;
+}
+
+/* ------------------------------------------------------------------------
* Descriptors parsing
*/
@@ -682,17 +731,12 @@ static int uvc_parse_streaming(struct uvc_device *dev,
return -EINVAL;
}
- streaming = kzalloc(sizeof(*streaming), GFP_KERNEL);
+ streaming = uvc_stream_new(dev, intf);
if (streaming == NULL) {
usb_driver_release_interface(&uvc_driver.driver, intf);
- return -EINVAL;
+ return -ENOMEM;
}
- mutex_init(&streaming->mutex);
- streaming->dev = dev;
- streaming->intf = usb_get_intf(intf);
- streaming->intfnum = intf->cur_altsetting->desc.bInterfaceNumber;
-
/* The Pico iMage webcam has its class-specific interface descriptors
* after the endpoint descriptors.
*/
@@ -899,10 +943,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
error:
usb_driver_release_interface(&uvc_driver.driver, intf);
- usb_put_intf(intf);
- kfree(streaming->format);
- kfree(streaming->header.bmaControls);
- kfree(streaming);
+ uvc_stream_delete(streaming);
return ret;
}
@@ -1810,7 +1851,7 @@ static int uvc_scan_device(struct uvc_device *dev)
* is released.
*
* As this function is called after or during disconnect(), all URBs have
- * already been canceled by the USB core. There is no need to kill the
+ * already been cancelled by the USB core. There is no need to kill the
* interrupt URB manually.
*/
static void uvc_delete(struct kref *kref)
@@ -1824,11 +1865,7 @@ static void uvc_delete(struct kref *kref)
usb_put_intf(dev->intf);
usb_put_dev(dev->udev);
- if (dev->vdev.dev)
- v4l2_device_unregister(&dev->vdev);
#ifdef CONFIG_MEDIA_CONTROLLER
- if (media_devnode_is_registered(dev->mdev.devnode))
- media_device_unregister(&dev->mdev);
media_device_cleanup(&dev->mdev);
#endif
@@ -1852,10 +1889,7 @@ static void uvc_delete(struct kref *kref)
streaming = list_entry(p, struct uvc_streaming, list);
usb_driver_release_interface(&uvc_driver.driver,
streaming->intf);
- usb_put_intf(streaming->intf);
- kfree(streaming->format);
- kfree(streaming->header.bmaControls);
- kfree(streaming);
+ uvc_stream_delete(streaming);
}
kfree(dev);
@@ -1885,6 +1919,15 @@ static void uvc_unregister_video(struct uvc_device *dev)
uvc_debugfs_cleanup_stream(stream);
}
+
+ uvc_status_unregister(dev);
+
+ if (dev->vdev.dev)
+ v4l2_device_unregister(&dev->vdev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+ if (media_devnode_is_registered(dev->mdev.devnode))
+ media_device_unregister(&dev->mdev);
+#endif
}
int uvc_register_video_device(struct uvc_device *dev,
diff --git a/drivers/media/usb/uvc/uvc_isight.c b/drivers/media/usb/uvc/uvc_isight.c
index 81e6f2187bfb..39a4e4482b23 100644
--- a/drivers/media/usb/uvc/uvc_isight.c
+++ b/drivers/media/usb/uvc/uvc_isight.c
@@ -99,9 +99,11 @@ static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
return 0;
}
-void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
- struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
+void uvc_video_decode_isight(struct uvc_urb *uvc_urb, struct uvc_buffer *buf,
+ struct uvc_buffer *meta_buf)
{
+ struct urb *urb = uvc_urb->urb;
+ struct uvc_streaming *stream = uvc_urb->stream;
int ret, i;
for (i = 0; i < urb->number_of_packets; ++i) {
diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
index 8964e16f2b22..682698ec1118 100644
--- a/drivers/media/usb/uvc/uvc_queue.c
+++ b/drivers/media/usb/uvc/uvc_queue.c
@@ -142,6 +142,7 @@ static void uvc_buffer_queue(struct vb2_buffer *vb)
spin_lock_irqsave(&queue->irqlock, flags);
if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
+ kref_init(&buf->ref);
list_add_tail(&buf->queue, &queue->irqqueue);
} else {
/* If the device is disconnected return the buffer to userspace
@@ -169,18 +170,19 @@ static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
struct uvc_streaming *stream = uvc_queue_to_stream(queue);
- unsigned long flags;
int ret;
+ lockdep_assert_irqs_enabled();
+
queue->buf_used = 0;
- ret = uvc_video_enable(stream, 1);
+ ret = uvc_video_start_streaming(stream);
if (ret == 0)
return 0;
- spin_lock_irqsave(&queue->irqlock, flags);
+ spin_lock_irq(&queue->irqlock);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED);
- spin_unlock_irqrestore(&queue->irqlock, flags);
+ spin_unlock_irq(&queue->irqlock);
return ret;
}
@@ -188,14 +190,15 @@ static int uvc_start_streaming(struct vb2_queue *vq, unsigned int count)
static void uvc_stop_streaming(struct vb2_queue *vq)
{
struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
- unsigned long flags;
+
+ lockdep_assert_irqs_enabled();
if (vq->type != V4L2_BUF_TYPE_META_CAPTURE)
- uvc_video_enable(uvc_queue_to_stream(queue), 0);
+ uvc_video_stop_streaming(uvc_queue_to_stream(queue));
- spin_lock_irqsave(&queue->irqlock, flags);
+ spin_lock_irq(&queue->irqlock);
uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR);
- spin_unlock_irqrestore(&queue->irqlock, flags);
+ spin_unlock_irq(&queue->irqlock);
}
static const struct vb2_ops uvc_queue_qops = {
@@ -430,32 +433,93 @@ void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
spin_unlock_irqrestore(&queue->irqlock, flags);
}
-struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
- struct uvc_buffer *buf)
+/*
+ * uvc_queue_get_current_buffer: Obtain the current working output buffer
+ *
+ * Buffers may span multiple packets, and even URBs, therefore the active buffer
+ * remains on the queue until the EOF marker.
+ */
+static struct uvc_buffer *
+__uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
+{
+ if (list_empty(&queue->irqqueue))
+ return NULL;
+
+ return list_first_entry(&queue->irqqueue, struct uvc_buffer, queue);
+}
+
+struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue)
{
struct uvc_buffer *nextbuf;
unsigned long flags;
+ spin_lock_irqsave(&queue->irqlock, flags);
+ nextbuf = __uvc_queue_get_current_buffer(queue);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
+ return nextbuf;
+}
+
+/*
+ * uvc_queue_buffer_requeue: Requeue a buffer on our internal irqqueue
+ *
+ * Reuse a buffer through our internal queue without the need to 'prepare'.
+ * The buffer will be returned to userspace through the uvc_buffer_queue call if
+ * the device has been disconnected.
+ */
+static void uvc_queue_buffer_requeue(struct uvc_video_queue *queue,
+ struct uvc_buffer *buf)
+{
+ buf->error = 0;
+ buf->state = UVC_BUF_STATE_QUEUED;
+ buf->bytesused = 0;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
+
+ uvc_buffer_queue(&buf->buf.vb2_buf);
+}
+
+static void uvc_queue_buffer_complete(struct kref *ref)
+{
+ struct uvc_buffer *buf = container_of(ref, struct uvc_buffer, ref);
+ struct vb2_buffer *vb = &buf->buf.vb2_buf;
+ struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
+
if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) {
- buf->error = 0;
- buf->state = UVC_BUF_STATE_QUEUED;
- buf->bytesused = 0;
- vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
- return buf;
+ uvc_queue_buffer_requeue(queue, buf);
+ return;
}
+ buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
+ vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+/*
+ * Release a reference on the buffer. Complete the buffer when the last
+ * reference is released.
+ */
+void uvc_queue_buffer_release(struct uvc_buffer *buf)
+{
+ kref_put(&buf->ref, uvc_queue_buffer_complete);
+}
+
+/*
+ * Remove this buffer from the queue. Lifetime will persist while async actions
+ * are still running (if any), and uvc_queue_buffer_release will give the buffer
+ * back to VB2 when all users have completed.
+ */
+struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ struct uvc_buffer *buf)
+{
+ struct uvc_buffer *nextbuf;
+ unsigned long flags;
+
spin_lock_irqsave(&queue->irqlock, flags);
list_del(&buf->queue);
- if (!list_empty(&queue->irqqueue))
- nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
- queue);
- else
- nextbuf = NULL;
+ nextbuf = __uvc_queue_get_current_buffer(queue);
spin_unlock_irqrestore(&queue->irqlock, flags);
- buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
- vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
- vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
+ uvc_queue_buffer_release(buf);
return nextbuf;
}
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 0722dc684378..883e4cab45e7 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -54,7 +54,7 @@ error:
return ret;
}
-static void uvc_input_cleanup(struct uvc_device *dev)
+static void uvc_input_unregister(struct uvc_device *dev)
{
if (dev->input)
input_unregister_device(dev->input);
@@ -71,7 +71,7 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
#else
#define uvc_input_init(dev)
-#define uvc_input_cleanup(dev)
+#define uvc_input_unregister(dev)
#define uvc_input_report_key(dev, code, value)
#endif /* CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV */
@@ -292,12 +292,16 @@ int uvc_status_init(struct uvc_device *dev)
return 0;
}
-void uvc_status_cleanup(struct uvc_device *dev)
+void uvc_status_unregister(struct uvc_device *dev)
{
usb_kill_urb(dev->int_urb);
+ uvc_input_unregister(dev);
+}
+
+void uvc_status_cleanup(struct uvc_device *dev)
+{
usb_free_urb(dev->int_urb);
kfree(dev->status);
- uvc_input_cleanup(dev);
}
int uvc_status_start(struct uvc_device *dev, gfp_t flags)
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 86a99f461fd8..84525ff04745 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1094,21 +1094,54 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return data[0];
}
-static void uvc_video_decode_data(struct uvc_streaming *stream,
+/*
+ * uvc_video_decode_data_work: Asynchronous memcpy processing
+ *
+ * Copy URB data to video buffers in process context, releasing buffer
+ * references and requeuing the URB when done.
+ */
+static void uvc_video_copy_data_work(struct work_struct *work)
+{
+ struct uvc_urb *uvc_urb = container_of(work, struct uvc_urb, work);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < uvc_urb->async_operations; i++) {
+ struct uvc_copy_op *op = &uvc_urb->copy_operations[i];
+
+ memcpy(op->dst, op->src, op->len);
+
+ /* Release reference taken on this buffer. */
+ uvc_queue_buffer_release(op->buf);
+ }
+
+ ret = usb_submit_urb(uvc_urb->urb, GFP_KERNEL);
+ if (ret < 0)
+ uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
+ ret);
+}
+
+static void uvc_video_decode_data(struct uvc_urb *uvc_urb,
struct uvc_buffer *buf, const u8 *data, int len)
{
- unsigned int maxlen, nbytes;
- void *mem;
+ unsigned int active_op = uvc_urb->async_operations;
+ struct uvc_copy_op *op = &uvc_urb->copy_operations[active_op];
+ unsigned int maxlen;
if (len <= 0)
return;
- /* Copy the video data to the buffer. */
maxlen = buf->length - buf->bytesused;
- mem = buf->mem + buf->bytesused;
- nbytes = min((unsigned int)len, maxlen);
- memcpy(mem, data, nbytes);
- buf->bytesused += nbytes;
+
+ /* Take a buffer reference for async work. */
+ kref_get(&buf->ref);
+
+ op->buf = buf;
+ op->src = data;
+ op->dst = buf->mem + buf->bytesused;
+ op->len = min_t(unsigned int, len, maxlen);
+
+ buf->bytesused += op->len;
/* Complete the current frame if the buffer size was exceeded. */
if (len > maxlen) {
@@ -1116,6 +1149,8 @@ static void uvc_video_decode_data(struct uvc_streaming *stream,
buf->error = 1;
buf->state = UVC_BUF_STATE_READY;
}
+
+ uvc_urb->async_operations++;
}
static void uvc_video_decode_end(struct uvc_streaming *stream,
@@ -1291,9 +1326,11 @@ static void uvc_video_next_buffers(struct uvc_streaming *stream,
*video_buf = uvc_queue_next_buffer(&stream->queue, *video_buf);
}
-static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
+static void uvc_video_decode_isoc(struct uvc_urb *uvc_urb,
struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
{
+ struct urb *urb = uvc_urb->urb;
+ struct uvc_streaming *stream = uvc_urb->stream;
u8 *mem;
int ret, i;
@@ -1322,7 +1359,7 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_meta(stream, meta_buf, mem, ret);
/* Decode the payload data. */
- uvc_video_decode_data(stream, buf, mem + ret,
+ uvc_video_decode_data(uvc_urb, buf, mem + ret,
urb->iso_frame_desc[i].actual_length - ret);
/* Process the header again. */
@@ -1334,9 +1371,11 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
}
}
-static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
+static void uvc_video_decode_bulk(struct uvc_urb *uvc_urb,
struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
{
+ struct urb *urb = uvc_urb->urb;
+ struct uvc_streaming *stream = uvc_urb->stream;
u8 *mem;
int len, ret;
@@ -1380,9 +1419,9 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
* sure buf is never dereferenced if NULL.
*/
- /* Process video data. */
+ /* Prepare video data for processing. */
if (!stream->bulk.skip_payload && buf != NULL)
- uvc_video_decode_data(stream, buf, mem, len);
+ uvc_video_decode_data(uvc_urb, buf, mem, len);
/* Detect the payload end by a URB smaller than the maximum size (or
* a payload size equal to the maximum) and process the header again.
@@ -1402,9 +1441,12 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
}
}
-static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
+static void uvc_video_encode_bulk(struct uvc_urb *uvc_urb,
struct uvc_buffer *buf, struct uvc_buffer *meta_buf)
{
+ struct urb *urb = uvc_urb->urb;
+ struct uvc_streaming *stream = uvc_urb->stream;
+
u8 *mem = urb->transfer_buffer;
int len = stream->urb_size, ret;
@@ -1447,7 +1489,8 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
static void uvc_video_complete(struct urb *urb)
{
- struct uvc_streaming *stream = urb->context;
+ struct uvc_urb *uvc_urb = urb->context;
+ struct uvc_streaming *stream = uvc_urb->stream;
struct uvc_video_queue *queue = &stream->queue;
struct uvc_video_queue *qmeta = &stream->meta.queue;
struct vb2_queue *vb2_qmeta = stream->meta.vdev.queue;
@@ -1464,7 +1507,7 @@ static void uvc_video_complete(struct urb *urb)
uvc_printk(KERN_WARNING, "Non-zero status (%d) in video "
"completion handler.\n", urb->status);
/* fall through */
- case -ENOENT: /* usb_kill_urb() called. */
+ case -ENOENT: /* usb_poison_urb() called. */
if (stream->frozen)
return;
/* fall through */
@@ -1476,11 +1519,7 @@ static void uvc_video_complete(struct urb *urb)
return;
}
- spin_lock_irqsave(&queue->irqlock, flags);
- if (!list_empty(&queue->irqqueue))
- buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
- queue);
- spin_unlock_irqrestore(&queue->irqlock, flags);
+ buf = uvc_queue_get_current_buffer(queue);
if (vb2_qmeta) {
spin_lock_irqsave(&qmeta->irqlock, flags);
@@ -1490,12 +1529,26 @@ static void uvc_video_complete(struct urb *urb)
spin_unlock_irqrestore(&qmeta->irqlock, flags);
}
- stream->decode(urb, stream, buf, buf_meta);
+ /* Re-initialise the URB async work. */
+ uvc_urb->async_operations = 0;
- if ((ret = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- uvc_printk(KERN_ERR, "Failed to resubmit video URB (%d).\n",
- ret);
+ /*
+ * Process the URB headers, and optionally queue expensive memcpy tasks
+ * to be deferred to a work queue.
+ */
+ stream->decode(uvc_urb, buf, buf_meta);
+
+ /* If no async work is needed, resubmit the URB immediately. */
+ if (!uvc_urb->async_operations) {
+ ret = usb_submit_urb(uvc_urb->urb, GFP_ATOMIC);
+ if (ret < 0)
+ uvc_printk(KERN_ERR,
+ "Failed to resubmit video URB (%d).\n",
+ ret);
+ return;
}
+
+ queue_work(stream->async_wq, &uvc_urb->work);
}
/*
@@ -1503,18 +1556,19 @@ static void uvc_video_complete(struct urb *urb)
*/
static void uvc_free_urb_buffers(struct uvc_streaming *stream)
{
- unsigned int i;
+ struct uvc_urb *uvc_urb;
+
+ for_each_uvc_urb(uvc_urb, stream) {
+ if (!uvc_urb->buffer)
+ continue;
- for (i = 0; i < UVC_URBS; ++i) {
- if (stream->urb_buffer[i]) {
#ifndef CONFIG_DMA_NONCOHERENT
- usb_free_coherent(stream->dev->udev, stream->urb_size,
- stream->urb_buffer[i], stream->urb_dma[i]);
+ usb_free_coherent(stream->dev->udev, stream->urb_size,
+ uvc_urb->buffer, uvc_urb->dma);
#else
- kfree(stream->urb_buffer[i]);
+ kfree(uvc_urb->buffer);
#endif
- stream->urb_buffer[i] = NULL;
- }
+ uvc_urb->buffer = NULL;
}
stream->urb_size = 0;
@@ -1551,19 +1605,23 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
/* Retry allocations until one succeed. */
for (; npackets > 1; npackets /= 2) {
for (i = 0; i < UVC_URBS; ++i) {
+ struct uvc_urb *uvc_urb = &stream->uvc_urb[i];
+
stream->urb_size = psize * npackets;
#ifndef CONFIG_DMA_NONCOHERENT
- stream->urb_buffer[i] = usb_alloc_coherent(
+ uvc_urb->buffer = usb_alloc_coherent(
stream->dev->udev, stream->urb_size,
- gfp_flags | __GFP_NOWARN, &stream->urb_dma[i]);
+ gfp_flags | __GFP_NOWARN, &uvc_urb->dma);
#else
- stream->urb_buffer[i] =
+ uvc_urb->buffer =
kmalloc(stream->urb_size, gfp_flags | __GFP_NOWARN);
#endif
- if (!stream->urb_buffer[i]) {
+ if (!uvc_urb->buffer) {
uvc_free_urb_buffers(stream);
break;
}
+
+ uvc_urb->stream = stream;
}
if (i == UVC_URBS) {
@@ -1582,21 +1640,26 @@ static int uvc_alloc_urb_buffers(struct uvc_streaming *stream,
/*
* Uninitialize isochronous/bulk URBs and free transfer buffers.
*/
-static void uvc_uninit_video(struct uvc_streaming *stream, int free_buffers)
+static void uvc_video_stop_transfer(struct uvc_streaming *stream,
+ int free_buffers)
{
- struct urb *urb;
- unsigned int i;
+ struct uvc_urb *uvc_urb;
uvc_video_stats_stop(stream);
- for (i = 0; i < UVC_URBS; ++i) {
- urb = stream->urb[i];
- if (urb == NULL)
- continue;
+ /*
+ * We must poison the URBs rather than kill them to ensure that even
+ * after the completion handler returns, any asynchronous workqueues
+ * will be prevented from resubmitting the URBs.
+ */
+ for_each_uvc_urb(uvc_urb, stream)
+ usb_poison_urb(uvc_urb->urb);
- usb_kill_urb(urb);
- usb_free_urb(urb);
- stream->urb[i] = NULL;
+ flush_workqueue(stream->async_wq);
+
+ for_each_uvc_urb(uvc_urb, stream) {
+ usb_free_urb(uvc_urb->urb);
+ uvc_urb->urb = NULL;
}
if (free_buffers)
@@ -1637,7 +1700,8 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
struct usb_host_endpoint *ep, gfp_t gfp_flags)
{
struct urb *urb;
- unsigned int npackets, i, j;
+ struct uvc_urb *uvc_urb;
+ unsigned int npackets, i;
u16 psize;
u32 size;
@@ -1650,35 +1714,35 @@ static int uvc_init_video_isoc(struct uvc_streaming *stream,
size = npackets * psize;
- for (i = 0; i < UVC_URBS; ++i) {
+ for_each_uvc_urb(uvc_urb, stream) {
urb = usb_alloc_urb(npackets, gfp_flags);
if (urb == NULL) {
- uvc_uninit_video(stream, 1);
+ uvc_video_stop_transfer(stream, 1);
return -ENOMEM;
}
urb->dev = stream->dev->udev;
- urb->context = stream;
+ urb->context = uvc_urb;
urb->pipe = usb_rcvisocpipe(stream->dev->udev,
ep->desc.bEndpointAddress);
#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
- urb->transfer_dma = stream->urb_dma[i];
+ urb->transfer_dma = uvc_urb->dma;
#else
urb->transfer_flags = URB_ISO_ASAP;
#endif
urb->interval = ep->desc.bInterval;
- urb->transfer_buffer = stream->urb_buffer[i];
+ urb->transfer_buffer = uvc_urb->buffer;
urb->complete = uvc_video_complete;
urb->number_of_packets = npackets;
urb->transfer_buffer_length = size;
- for (j = 0; j < npackets; ++j) {
- urb->iso_frame_desc[j].offset = j * psize;
- urb->iso_frame_desc[j].length = psize;
+ for (i = 0; i < npackets; ++i) {
+ urb->iso_frame_desc[i].offset = i * psize;
+ urb->iso_frame_desc[i].length = psize;
}
- stream->urb[i] = urb;
+ uvc_urb->urb = urb;
}
return 0;
@@ -1692,7 +1756,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
struct usb_host_endpoint *ep, gfp_t gfp_flags)
{
struct urb *urb;
- unsigned int npackets, pipe, i;
+ struct uvc_urb *uvc_urb;
+ unsigned int npackets, pipe;
u16 psize;
u32 size;
@@ -1716,22 +1781,21 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
if (stream->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
size = 0;
- for (i = 0; i < UVC_URBS; ++i) {
+ for_each_uvc_urb(uvc_urb, stream) {
urb = usb_alloc_urb(0, gfp_flags);
if (urb == NULL) {
- uvc_uninit_video(stream, 1);
+ uvc_video_stop_transfer(stream, 1);
return -ENOMEM;
}
- usb_fill_bulk_urb(urb, stream->dev->udev, pipe,
- stream->urb_buffer[i], size, uvc_video_complete,
- stream);
+ usb_fill_bulk_urb(urb, stream->dev->udev, pipe, uvc_urb->buffer,
+ size, uvc_video_complete, uvc_urb);
#ifndef CONFIG_DMA_NONCOHERENT
urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
- urb->transfer_dma = stream->urb_dma[i];
+ urb->transfer_dma = uvc_urb->dma;
#endif
- stream->urb[i] = urb;
+ uvc_urb->urb = urb;
}
return 0;
@@ -1740,10 +1804,12 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
/*
* Initialize isochronous/bulk URBs and allocate transfer buffers.
*/
-static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
+static int uvc_video_start_transfer(struct uvc_streaming *stream,
+ gfp_t gfp_flags)
{
struct usb_interface *intf = stream->intf;
struct usb_host_endpoint *ep;
+ struct uvc_urb *uvc_urb;
unsigned int i;
int ret;
@@ -1821,12 +1887,12 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
return ret;
/* Submit the URBs. */
- for (i = 0; i < UVC_URBS; ++i) {
- ret = usb_submit_urb(stream->urb[i], gfp_flags);
+ for_each_uvc_urb(uvc_urb, stream) {
+ ret = usb_submit_urb(uvc_urb->urb, gfp_flags);
if (ret < 0) {
- uvc_printk(KERN_ERR, "Failed to submit URB %u "
- "(%d).\n", i, ret);
- uvc_uninit_video(stream, 1);
+ uvc_printk(KERN_ERR, "Failed to submit URB %u (%d).\n",
+ uvc_urb_index(uvc_urb), ret);
+ uvc_video_stop_transfer(stream, 1);
return ret;
}
}
@@ -1857,7 +1923,7 @@ int uvc_video_suspend(struct uvc_streaming *stream)
return 0;
stream->frozen = 1;
- uvc_uninit_video(stream, 0);
+ uvc_video_stop_transfer(stream, 0);
usb_set_interface(stream->dev->udev, stream->intfnum, 0);
return 0;
}
@@ -1893,7 +1959,7 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
if (ret < 0)
return ret;
- return uvc_init_video(stream, GFP_NOIO);
+ return uvc_video_start_transfer(stream, GFP_NOIO);
}
/* ------------------------------------------------------------------------
@@ -1915,6 +1981,7 @@ int uvc_video_init(struct uvc_streaming *stream)
struct uvc_streaming_control *probe = &stream->ctrl;
struct uvc_format *format = NULL;
struct uvc_frame *frame = NULL;
+ struct uvc_urb *uvc_urb;
unsigned int i;
int ret;
@@ -2000,41 +2067,17 @@ int uvc_video_init(struct uvc_streaming *stream)
}
}
+ /* Prepare asynchronous work items. */
+ for_each_uvc_urb(uvc_urb, stream)
+ INIT_WORK(&uvc_urb->work, uvc_video_copy_data_work);
+
return 0;
}
-/*
- * Enable or disable the video stream.
- */
-int uvc_video_enable(struct uvc_streaming *stream, int enable)
+int uvc_video_start_streaming(struct uvc_streaming *stream)
{
int ret;
- if (!enable) {
- uvc_uninit_video(stream, 1);
- if (stream->intf->num_altsetting > 1) {
- usb_set_interface(stream->dev->udev,
- stream->intfnum, 0);
- } else {
- /* UVC doesn't specify how to inform a bulk-based device
- * when the video stream is stopped. Windows sends a
- * CLEAR_FEATURE(HALT) request to the video streaming
- * bulk endpoint, mimic the same behaviour.
- */
- unsigned int epnum = stream->header.bEndpointAddress
- & USB_ENDPOINT_NUMBER_MASK;
- unsigned int dir = stream->header.bEndpointAddress
- & USB_ENDPOINT_DIR_MASK;
- unsigned int pipe;
-
- pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
- usb_clear_halt(stream->dev->udev, pipe);
- }
-
- uvc_video_clock_cleanup(stream);
- return 0;
- }
-
ret = uvc_video_clock_init(stream);
if (ret < 0)
return ret;
@@ -2044,7 +2087,7 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
if (ret < 0)
goto error_commit;
- ret = uvc_init_video(stream, GFP_KERNEL);
+ ret = uvc_video_start_transfer(stream, GFP_KERNEL);
if (ret < 0)
goto error_video;
@@ -2057,3 +2100,28 @@ error_commit:
return ret;
}
+
+void uvc_video_stop_streaming(struct uvc_streaming *stream)
+{
+ uvc_video_stop_transfer(stream, 1);
+
+ if (stream->intf->num_altsetting > 1) {
+ usb_set_interface(stream->dev->udev, stream->intfnum, 0);
+ } else {
+ /* UVC doesn't specify how to inform a bulk-based device
+ * when the video stream is stopped. Windows sends a
+ * CLEAR_FEATURE(HALT) request to the video streaming
+ * bulk endpoint, mimic the same behaviour.
+ */
+ unsigned int epnum = stream->header.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK;
+ unsigned int dir = stream->header.bEndpointAddress
+ & USB_ENDPOINT_DIR_MASK;
+ unsigned int pipe;
+
+ pipe = usb_sndbulkpipe(stream->dev->udev, epnum) | dir;
+ usb_clear_halt(stream->dev->udev, pipe);
+ }
+
+ uvc_video_clock_cleanup(stream);
+}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index c0cbd833d0a4..9b41b14ce076 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -154,6 +154,9 @@
#define UVC_GUID_FORMAT_INVI \
{ 'I', 'N', 'V', 'I', 0xdb, 0x57, 0x49, 0x5e, \
0x8e, 0x3f, 0xf4, 0x79, 0x53, 0x2b, 0x94, 0x6f}
+#define UVC_GUID_FORMAT_CNF4 \
+ { 'C', ' ', ' ', ' ', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_D3DFMT_L8 \
{0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \
@@ -410,6 +413,9 @@ struct uvc_buffer {
unsigned int bytesused;
u32 pts;
+
+ /* Asynchronous buffer handling. */
+ struct kref ref;
};
#define UVC_QUEUE_DISCONNECTED (1 << 0)
@@ -487,6 +493,44 @@ struct uvc_stats_stream {
#define UVC_METATADA_BUF_SIZE 1024
+/**
+ * struct uvc_copy_op: Context structure to schedule asynchronous memcpy
+ *
+ * @buf: active buf object for this operation
+ * @dst: copy destination address
+ * @src: copy source address
+ * @len: copy length
+ */
+struct uvc_copy_op {
+ struct uvc_buffer *buf;
+ void *dst;
+ const __u8 *src;
+ size_t len;
+};
+
+/**
+ * struct uvc_urb - URB context management structure
+ *
+ * @urb: the URB described by this context structure
+ * @stream: UVC streaming context
+ * @buffer: memory storage for the URB
+ * @dma: DMA coherent addressing for the urb_buffer
+ * @async_operations: counter to indicate the number of copy operations
+ * @copy_operations: work descriptors for asynchronous copy operations
+ * @work: work queue entry for asynchronous decode
+ */
+struct uvc_urb {
+ struct urb *urb;
+ struct uvc_streaming *stream;
+
+ char *buffer;
+ dma_addr_t dma;
+
+ unsigned int async_operations;
+ struct uvc_copy_op copy_operations[UVC_MAX_PACKETS];
+ struct work_struct work;
+};
+
struct uvc_streaming {
struct list_head list;
struct uvc_device *dev;
@@ -517,8 +561,9 @@ struct uvc_streaming {
/* Buffers queue. */
unsigned int frozen : 1;
struct uvc_video_queue queue;
- void (*decode) (struct urb *urb, struct uvc_streaming *video,
- struct uvc_buffer *buf, struct uvc_buffer *meta_buf);
+ struct workqueue_struct *async_wq;
+ void (*decode)(struct uvc_urb *uvc_urb, struct uvc_buffer *buf,
+ struct uvc_buffer *meta_buf);
struct {
struct video_device vdev;
@@ -535,9 +580,7 @@ struct uvc_streaming {
u32 max_payload_size;
} bulk;
- struct urb *urb[UVC_URBS];
- char *urb_buffer[UVC_URBS];
- dma_addr_t urb_dma[UVC_URBS];
+ struct uvc_urb uvc_urb[UVC_URBS];
unsigned int urb_size;
u32 sequence;
@@ -572,6 +615,14 @@ struct uvc_streaming {
} clock;
};
+#define for_each_uvc_urb(uvc_urb, uvc_streaming) \
+ for ((uvc_urb) = &(uvc_streaming)->uvc_urb[0]; \
+ (uvc_urb) < &(uvc_streaming)->uvc_urb[UVC_URBS]; \
+ ++(uvc_urb))
+
+#define uvc_urb_index(uvc_urb) \
+ (unsigned int)((uvc_urb) - (&(uvc_urb)->stream->uvc_urb[0]))
+
struct uvc_device_info {
u32 quirks;
u32 meta_format;
@@ -711,6 +762,8 @@ int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type);
void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect);
struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
struct uvc_buffer *buf);
+struct uvc_buffer *uvc_queue_get_current_buffer(struct uvc_video_queue *queue);
+void uvc_queue_buffer_release(struct uvc_buffer *buf);
int uvc_queue_mmap(struct uvc_video_queue *queue,
struct vm_area_struct *vma);
__poll_t uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
@@ -737,7 +790,8 @@ void uvc_mc_cleanup_entity(struct uvc_entity *entity);
int uvc_video_init(struct uvc_streaming *stream);
int uvc_video_suspend(struct uvc_streaming *stream);
int uvc_video_resume(struct uvc_streaming *stream, int reset);
-int uvc_video_enable(struct uvc_streaming *stream, int enable);
+int uvc_video_start_streaming(struct uvc_streaming *stream);
+void uvc_video_stop_streaming(struct uvc_streaming *stream);
int uvc_probe_video(struct uvc_streaming *stream,
struct uvc_streaming_control *probe);
int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
@@ -757,6 +811,7 @@ int uvc_register_video_device(struct uvc_device *dev,
/* Status */
int uvc_status_init(struct uvc_device *dev);
+void uvc_status_unregister(struct uvc_device *dev);
void uvc_status_cleanup(struct uvc_device *dev);
int uvc_status_start(struct uvc_device *dev, gfp_t flags);
void uvc_status_stop(struct uvc_device *dev);
@@ -806,7 +861,7 @@ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts,
u8 epaddr);
/* Quirks support */
-void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
+void uvc_video_decode_isight(struct uvc_urb *uvc_urb,
struct uvc_buffer *buf,
struct uvc_buffer *meta_buf);
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index b97090e85996..c0940f5c69b4 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -30,6 +30,7 @@ config VIDEO_FIXED_MINOR_RANGES
config VIDEO_PCI_SKELETON
tristate "Skeleton PCI V4L2 driver"
depends on PCI
+ depends on SAMPLES
depends on VIDEO_V4L2 && VIDEOBUF2_CORE
depends on VIDEOBUF2_MEMOPS && VIDEOBUF2_DMA_CONTIG
---help---
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index a6d91370838d..15b0c44a76e7 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -424,11 +424,7 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
{
- mutex_lock(&list_lock);
-
INIT_LIST_HEAD(&notifier->asd_list);
-
- mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_notifier_init);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index f4325329fbd6..fe4577a46869 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -323,6 +323,7 @@ static int __get_v4l2_format32(struct v4l2_format __user *p64,
return copy_in_user(&p64->fmt.sdr, &p32->fmt.sdr,
sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
return copy_in_user(&p64->fmt.meta, &p32->fmt.meta,
sizeof(p64->fmt.meta)) ? -EFAULT : 0;
default:
@@ -392,6 +393,7 @@ static int __put_v4l2_format32(struct v4l2_format __user *p64,
return copy_in_user(&p32->fmt.sdr, &p64->fmt.sdr,
sizeof(p64->fmt.sdr)) ? -EFAULT : 0;
case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
return copy_in_user(&p32->fmt.meta, &p64->fmt.meta,
sizeof(p64->fmt.meta)) ? -EFAULT : 0;
default:
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 5f2b033a7a42..5e3806feb5d7 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1563,7 +1563,7 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
u64 offset;
s64 val;
- switch (ctrl->type) {
+ switch ((u32)ctrl->type) {
case V4L2_CTRL_TYPE_INTEGER:
return ROUND_TO_RANGE(ptr.p_s32[idx], u32, ctrl);
case V4L2_CTRL_TYPE_INTEGER64:
@@ -1636,7 +1636,8 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
switch (p_mpeg2_slice_params->picture.intra_dc_precision) {
case 0: /* 8 bits */
case 1: /* 9 bits */
- case 11: /* 11 bits */
+ case 2: /* 10 bits */
+ case 3: /* 11 bits */
break;
default:
return -EINVAL;
@@ -2232,7 +2233,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
is_array = nr_of_dims > 0;
/* Prefill elem_size for all types handled by std_type_ops */
- switch (type) {
+ switch ((u32)type) {
case V4L2_CTRL_TYPE_INTEGER64:
elem_size = sizeof(s64);
break;
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index feb749aaaa42..d7528f82a66a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -597,7 +597,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_enum_fmt_vid_overlay ||
ops->vidioc_enum_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_enum_fmt_vid_out ||
- ops->vidioc_enum_fmt_vid_out_mplane)))
+ ops->vidioc_enum_fmt_vid_out_mplane ||
+ ops->vidioc_enum_fmt_meta_out)))
set_bit(_IOC_NR(VIDIOC_ENUM_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_g_fmt_vid_cap ||
ops->vidioc_g_fmt_vid_cap_mplane ||
@@ -605,7 +606,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_g_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_g_fmt_vid_out ||
ops->vidioc_g_fmt_vid_out_mplane ||
- ops->vidioc_g_fmt_vid_out_overlay)))
+ ops->vidioc_g_fmt_vid_out_overlay ||
+ ops->vidioc_g_fmt_meta_out)))
set_bit(_IOC_NR(VIDIOC_G_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_s_fmt_vid_cap ||
ops->vidioc_s_fmt_vid_cap_mplane ||
@@ -613,7 +615,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_s_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_s_fmt_vid_out ||
ops->vidioc_s_fmt_vid_out_mplane ||
- ops->vidioc_s_fmt_vid_out_overlay)))
+ ops->vidioc_s_fmt_vid_out_overlay ||
+ ops->vidioc_s_fmt_meta_out)))
set_bit(_IOC_NR(VIDIOC_S_FMT), valid_ioctls);
if ((is_rx && (ops->vidioc_try_fmt_vid_cap ||
ops->vidioc_try_fmt_vid_cap_mplane ||
@@ -621,7 +624,8 @@ static void determine_valid_ioctls(struct video_device *vdev)
ops->vidioc_try_fmt_meta_cap)) ||
(is_tx && (ops->vidioc_try_fmt_vid_out ||
ops->vidioc_try_fmt_vid_out_mplane ||
- ops->vidioc_try_fmt_vid_out_overlay)))
+ ops->vidioc_try_fmt_vid_out_overlay ||
+ ops->vidioc_try_fmt_meta_out)))
set_bit(_IOC_NR(VIDIOC_TRY_FMT), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_OVERLAY, vidioc_overlay);
SET_VALID_IOCTL(ops, VIDIOC_G_FBUF, vidioc_g_fbuf);
@@ -635,14 +639,14 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMESIZES, vidioc_enum_framesizes);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_FRAMEINTERVALS, vidioc_enum_frameintervals);
- if (ops->vidioc_g_crop || ops->vidioc_g_selection)
+ if (ops->vidioc_g_selection) {
set_bit(_IOC_NR(VIDIOC_G_CROP), valid_ioctls);
- if (ops->vidioc_s_crop || ops->vidioc_s_selection)
+ set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
+ }
+ if (ops->vidioc_s_selection)
set_bit(_IOC_NR(VIDIOC_S_CROP), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_G_SELECTION, vidioc_g_selection);
SET_VALID_IOCTL(ops, VIDIOC_S_SELECTION, vidioc_s_selection);
- if (ops->vidioc_cropcap || ops->vidioc_g_selection)
- set_bit(_IOC_NR(VIDIOC_CROPCAP), valid_ioctls);
} else if (is_vbi) {
/* vbi specific ioctls */
if ((is_rx && (ops->vidioc_g_fmt_vbi_cap ||
diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
index df0ac38c4050..e0ddb9a52bd1 100644
--- a/drivers/media/v4l2-core/v4l2-device.c
+++ b/drivers/media/v4l2-core/v4l2-device.c
@@ -247,6 +247,7 @@ int v4l2_device_register_subdev_nodes(struct v4l2_device *v4l2_dev)
video_set_drvdata(vdev, sd);
strscpy(vdev->name, sd->name, sizeof(vdev->name));
+ vdev->dev_parent = sd->dev;
vdev->v4l2_dev = v4l2_dev;
vdev->fops = &v4l2_subdev_fops;
vdev->release = v4l2_device_release_subdev_node;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 218f0da0ce76..9bfedd7596a1 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -310,8 +310,8 @@ v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
}
if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
- flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
V4L2_MBUS_DATA_ACTIVE_LOW;
pr_debug("data-active %s\n", v ? "high" : "low");
@@ -564,8 +564,7 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
fwnode = fwnode_get_parent(__fwnode);
fwnode_property_read_u32(fwnode, port_prop, &link->local_port);
fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) &&
- of_node_cmp(to_of_node(fwnode)->name, "ports") == 0)
+ if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports"))
fwnode = fwnode_get_next_parent(fwnode);
link->local_node = fwnode;
@@ -578,8 +577,7 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *__fwnode,
fwnode = fwnode_get_parent(fwnode);
fwnode_property_read_u32(fwnode, port_prop, &link->remote_port);
fwnode = fwnode_get_next_parent(fwnode);
- if (is_of_node(fwnode) &&
- of_node_cmp(to_of_node(fwnode)->name, "ports") == 0)
+ if (is_of_node(fwnode) && of_node_name_eq(to_of_node(fwnode), "ports"))
fwnode = fwnode_get_next_parent(fwnode);
link->remote_node = fwnode;
@@ -613,7 +611,7 @@ v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
asd->match.fwnode =
fwnode_graph_get_remote_port_parent(endpoint);
if (!asd->match.fwnode) {
- dev_warn(dev, "bad remote port parent\n");
+ dev_dbg(dev, "no remote endpoint found\n");
ret = -ENOTCONN;
goto out_err;
}
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index c63746968fa3..1441a73ce64c 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -194,6 +194,7 @@ const char *v4l2_type_names[] = {
[V4L2_BUF_TYPE_SDR_CAPTURE] = "sdr-cap",
[V4L2_BUF_TYPE_SDR_OUTPUT] = "sdr-out",
[V4L2_BUF_TYPE_META_CAPTURE] = "meta-cap",
+ [V4L2_BUF_TYPE_META_OUTPUT] = "meta-out",
};
EXPORT_SYMBOL(v4l2_type_names);
@@ -366,6 +367,7 @@ static void v4l_print_format(const void *arg, bool write_only)
(sdr->pixelformat >> 24) & 0xff);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
meta = &p->fmt.meta;
pr_cont(", dataformat=%c%c%c%c, buffersize=%u\n",
(meta->dataformat >> 0) & 0xff,
@@ -999,6 +1001,10 @@ static int check_fmt(struct file *file, enum v4l2_buf_type type)
if (is_vid && is_rx && ops->vidioc_g_fmt_meta_cap)
return 0;
break;
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (is_vid && is_tx && ops->vidioc_g_fmt_meta_out)
+ return 0;
+ break;
default:
break;
}
@@ -1189,6 +1195,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_Y12I: descr = "Interleaved 12-bit Greyscale"; break;
case V4L2_PIX_FMT_Z16: descr = "16-bit Depth"; break;
case V4L2_PIX_FMT_INZI: descr = "Planar 10:16 Greyscale Depth"; break;
+ case V4L2_PIX_FMT_CNF4: descr = "4-bit Depth Confidence (Packed)"; break;
case V4L2_PIX_FMT_PAL8: descr = "8-bit Palette"; break;
case V4L2_PIX_FMT_UV8: descr = "8-bit Chrominance UV 4-4"; break;
case V4L2_PIX_FMT_YVU410: descr = "Planar YVU 4:1:0"; break;
@@ -1339,9 +1346,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
- WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
if (fmt->description[0])
return;
+ WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
flags = 0;
snprintf(fmt->description, sz, "%c%c%c%c%s",
(char)(fmt->pixelformat & 0x7f),
@@ -1409,6 +1416,11 @@ static int v4l_enum_fmt(const struct v4l2_ioctl_ops *ops,
break;
ret = ops->vidioc_enum_fmt_meta_cap(file, fh, arg);
break;
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_enum_fmt_meta_out))
+ break;
+ ret = ops->vidioc_enum_fmt_meta_out(file, fh, arg);
+ break;
}
if (ret == 0)
v4l_fill_fmtdesc(p);
@@ -1487,6 +1499,8 @@ static int v4l_g_fmt(const struct v4l2_ioctl_ops *ops,
return ops->vidioc_g_fmt_sdr_out(file, fh, arg);
case V4L2_BUF_TYPE_META_CAPTURE:
return ops->vidioc_g_fmt_meta_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ return ops->vidioc_g_fmt_meta_out(file, fh, arg);
}
return -EINVAL;
}
@@ -1512,6 +1526,7 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
struct v4l2_format *p = arg;
struct video_device *vfd = video_devdata(file);
int ret = check_fmt(file, p->type);
+ unsigned int i;
if (ret)
return ret;
@@ -1536,6 +1551,8 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
return ops->vidioc_s_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_overlay))
@@ -1564,6 +1581,8 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_s_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
return ops->vidioc_s_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_s_fmt_vid_out_overlay))
@@ -1595,6 +1614,11 @@ static int v4l_s_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.meta);
return ops->vidioc_s_fmt_meta_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_s_fmt_meta_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.meta);
+ return ops->vidioc_s_fmt_meta_out(file, fh, arg);
}
return -EINVAL;
}
@@ -1604,6 +1628,7 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
{
struct v4l2_format *p = arg;
int ret = check_fmt(file, p->type);
+ unsigned int i;
if (ret)
return ret;
@@ -1623,6 +1648,8 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_cap_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
return ops->vidioc_try_fmt_vid_cap_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_overlay))
@@ -1651,6 +1678,8 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
if (unlikely(!ops->vidioc_try_fmt_vid_out_mplane))
break;
CLEAR_AFTER_FIELD(p, fmt.pix_mp.xfer_func);
+ for (i = 0; i < p->fmt.pix_mp.num_planes; i++)
+ CLEAR_AFTER_FIELD(p, fmt.pix_mp.plane_fmt[i].bytesperline);
return ops->vidioc_try_fmt_vid_out_mplane(file, fh, arg);
case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
if (unlikely(!ops->vidioc_try_fmt_vid_out_overlay))
@@ -1682,6 +1711,11 @@ static int v4l_try_fmt(const struct v4l2_ioctl_ops *ops,
break;
CLEAR_AFTER_FIELD(p, fmt.meta);
return ops->vidioc_try_fmt_meta_cap(file, fh, arg);
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ if (unlikely(!ops->vidioc_try_fmt_meta_out))
+ break;
+ CLEAR_AFTER_FIELD(p, fmt.meta);
+ return ops->vidioc_try_fmt_meta_out(file, fh, arg);
}
return -EINVAL;
}
@@ -2202,21 +2236,24 @@ static int v4l_s_selection(const struct v4l2_ioctl_ops *ops,
static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
struct v4l2_selection s = {
.type = p->type,
};
int ret;
- if (ops->vidioc_g_crop)
- return ops->vidioc_g_crop(file, fh, p);
/* simulate capture crop using selection api */
/* crop means compose for output devices */
if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ s.target = V4L2_SEL_TGT_COMPOSE;
else
- s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+ s.target = V4L2_SEL_TGT_CROP;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
+ V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
ret = v4l_g_selection(ops, file, fh, &s);
@@ -2229,21 +2266,24 @@ static int v4l_g_crop(const struct v4l2_ioctl_ops *ops,
static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_crop *p = arg;
struct v4l2_selection s = {
.type = p->type,
.r = p->c,
};
- if (ops->vidioc_s_crop)
- return ops->vidioc_s_crop(file, fh, p);
/* simulate capture crop using selection api */
/* crop means compose for output devices */
if (V4L2_TYPE_IS_OUTPUT(p->type))
- s.target = V4L2_SEL_TGT_COMPOSE_ACTIVE;
+ s.target = V4L2_SEL_TGT_COMPOSE;
else
- s.target = V4L2_SEL_TGT_CROP_ACTIVE;
+ s.target = V4L2_SEL_TGT_CROP;
+
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE ?
+ V4L2_SEL_TGT_CROP : V4L2_SEL_TGT_COMPOSE;
return v4l_s_selection(ops, file, fh, &s);
}
@@ -2251,6 +2291,7 @@ static int v4l_s_crop(const struct v4l2_ioctl_ops *ops,
static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
struct file *file, void *fh, void *arg)
{
+ struct video_device *vfd = video_devdata(file);
struct v4l2_cropcap *p = arg;
struct v4l2_selection s = { .type = p->type };
int ret = 0;
@@ -2259,18 +2300,21 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
p->pixelaspect.numerator = 1;
p->pixelaspect.denominator = 1;
+ if (s.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ s.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ else if (s.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ s.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+
/*
* The determine_valid_ioctls() call already should ensure
* that this can never happen, but just in case...
*/
- if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
+ if (WARN_ON(!ops->vidioc_g_selection))
return -ENOTTY;
- if (ops->vidioc_cropcap)
- ret = ops->vidioc_cropcap(file, fh, p);
-
- if (!ops->vidioc_g_selection)
- return ret;
+ if (ops->vidioc_g_pixelaspect)
+ ret = ops->vidioc_g_pixelaspect(file, fh, s.type,
+ &p->pixelaspect);
/*
* Ignore ENOTTY or ENOIOCTLCMD error returns, just use the
@@ -2287,13 +2331,17 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
else
s.target = V4L2_SEL_TGT_CROP_BOUNDS;
+ if (test_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags))
+ s.target = s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS ?
+ V4L2_SEL_TGT_CROP_BOUNDS : V4L2_SEL_TGT_COMPOSE_BOUNDS;
+
ret = v4l_g_selection(ops, file, fh, &s);
if (ret)
return ret;
p->bounds = s.r;
/* obtaining defrect */
- if (V4L2_TYPE_IS_OUTPUT(p->type))
+ if (s.target == V4L2_SEL_TGT_COMPOSE_BOUNDS)
s.target = V4L2_SEL_TGT_COMPOSE_DEFAULT;
else
s.target = V4L2_SEL_TGT_CROP_DEFAULT;
@@ -2586,7 +2634,7 @@ DEFINE_V4L_STUB_FUNC(enum_dv_timings)
DEFINE_V4L_STUB_FUNC(query_dv_timings)
DEFINE_V4L_STUB_FUNC(dv_timings_cap)
-static struct v4l2_ioctl_info v4l2_ioctls[] = {
+static const struct v4l2_ioctl_info v4l2_ioctls[] = {
IOCTL_INFO(VIDIOC_QUERYCAP, v4l_querycap, v4l_print_querycap, 0),
IOCTL_INFO(VIDIOC_ENUM_FMT, v4l_enum_fmt, v4l_print_fmtdesc, INFO_FL_CLEAR(v4l2_fmtdesc, type)),
IOCTL_INFO(VIDIOC_G_FMT, v4l_g_fmt, v4l_print_format, 0),
@@ -2679,45 +2727,6 @@ static bool v4l2_is_known_ioctl(unsigned int cmd)
return v4l2_ioctls[_IOC_NR(cmd)].ioctl == cmd;
}
-#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
-static bool v4l2_ioctl_m2m_queue_is_output(unsigned int cmd, void *arg)
-{
- switch (cmd) {
- case VIDIOC_CREATE_BUFS: {
- struct v4l2_create_buffers *cbufs = arg;
-
- return V4L2_TYPE_IS_OUTPUT(cbufs->format.type);
- }
- case VIDIOC_REQBUFS: {
- struct v4l2_requestbuffers *rbufs = arg;
-
- return V4L2_TYPE_IS_OUTPUT(rbufs->type);
- }
- case VIDIOC_QBUF:
- case VIDIOC_DQBUF:
- case VIDIOC_QUERYBUF:
- case VIDIOC_PREPARE_BUF: {
- struct v4l2_buffer *buf = arg;
-
- return V4L2_TYPE_IS_OUTPUT(buf->type);
- }
- case VIDIOC_EXPBUF: {
- struct v4l2_exportbuffer *expbuf = arg;
-
- return V4L2_TYPE_IS_OUTPUT(expbuf->type);
- }
- case VIDIOC_STREAMON:
- case VIDIOC_STREAMOFF: {
- int *type = arg;
-
- return V4L2_TYPE_IS_OUTPUT(*type);
- }
- default:
- return false;
- }
-}
-#endif
-
static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
struct v4l2_fh *vfh, unsigned int cmd,
void *arg)
@@ -2727,12 +2736,8 @@ static struct mutex *v4l2_ioctl_get_lock(struct video_device *vdev,
#if IS_ENABLED(CONFIG_V4L2_MEM2MEM_DEV)
if (vfh && vfh->m2m_ctx &&
(v4l2_ioctls[_IOC_NR(cmd)].flags & INFO_FL_QUEUE)) {
- bool is_output = v4l2_ioctl_m2m_queue_is_output(cmd, arg);
- struct v4l2_m2m_queue_ctx *ctx = is_output ?
- &vfh->m2m_ctx->out_q_ctx : &vfh->m2m_ctx->cap_q_ctx;
-
- if (ctx->q.lock)
- return ctx->q.lock;
+ if (vfh->m2m_ctx->q_lock)
+ return vfh->m2m_ctx->q_lock;
}
#endif
if (vdev->queue && vdev->queue->lock &&
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 1ed2465972ac..5bbdec55b7d7 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -87,6 +87,7 @@ static const char * const m2m_entity_name[] = {
* @curr_ctx: currently running instance
* @job_queue: instances queued to run
* @job_spinlock: protects job_queue
+ * @job_work: worker to run queued jobs.
* @m2m_ops: driver callbacks
*/
struct v4l2_m2m_dev {
@@ -103,6 +104,7 @@ struct v4l2_m2m_dev {
struct list_head job_queue;
spinlock_t job_spinlock;
+ struct work_struct job_work;
const struct v4l2_m2m_ops *m2m_ops;
};
@@ -244,6 +246,9 @@ EXPORT_SYMBOL(v4l2_m2m_get_curr_priv);
* @m2m_dev: per-device context
*
* Get next transaction (if present) from the waiting jobs list and run it.
+ *
+ * Note that this function can run on a given v4l2_m2m_ctx context,
+ * but call .device_run for another context.
*/
static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
{
@@ -297,51 +302,48 @@ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev,
/* If the context is aborted then don't schedule it */
if (m2m_ctx->job_flags & TRANS_ABORT) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("Aborted context\n");
- return;
+ goto job_unlock;
}
if (m2m_ctx->job_flags & TRANS_QUEUED) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("On job queue already\n");
- return;
+ goto job_unlock;
}
spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
&& !m2m_ctx->out_q_ctx.buffered) {
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
- flags_out);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No input buffers available\n");
- return;
+ goto out_unlock;
}
spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
&& !m2m_ctx->cap_q_ctx.buffered) {
- spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
- flags_cap);
- spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
- flags_out);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("No output buffers available\n");
- return;
+ goto cap_unlock;
}
spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
if (m2m_dev->m2m_ops->job_ready
&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
dprintk("Driver not ready\n");
- return;
+ goto job_unlock;
}
list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue);
m2m_ctx->job_flags |= TRANS_QUEUED;
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
+ return;
+
+cap_unlock:
+ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+out_unlock:
+ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
+job_unlock:
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
}
/**
@@ -366,6 +368,18 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule);
/**
+ * v4l2_m2m_device_run_work() - run pending jobs for the context
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void v4l2_m2m_device_run_work(struct work_struct *work)
+{
+ struct v4l2_m2m_dev *m2m_dev =
+ container_of(work, struct v4l2_m2m_dev, job_work);
+
+ v4l2_m2m_try_run(m2m_dev);
+}
+
+/**
* v4l2_m2m_cancel_job() - cancel pending jobs for the context
* @m2m_ctx: m2m context with jobs to be canceled
*
@@ -424,7 +438,12 @@ void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
/* This instance might have more buffers ready, but since we do not
* allow more than one job on the job_queue per instance, each has
* to be scheduled separately after the previous one finishes. */
- v4l2_m2m_try_schedule(m2m_ctx);
+ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx);
+
+ /* We might be running in atomic context,
+ * but the job must be run in non-atomic context.
+ */
+ schedule_work(&m2m_dev->job_work);
}
EXPORT_SYMBOL(v4l2_m2m_job_finish);
@@ -866,6 +885,7 @@ struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops)
m2m_dev->m2m_ops = m2m_ops;
INIT_LIST_HEAD(&m2m_dev->job_queue);
spin_lock_init(&m2m_dev->job_spinlock);
+ INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work);
return m2m_dev;
}
@@ -908,12 +928,14 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
if (ret)
goto err;
/*
- * If both queues use same mutex assign it as the common buffer
- * queues lock to the m2m context. This lock is used in the
- * v4l2_m2m_ioctl_* helpers.
+ * Both queues should use same the mutex to lock the m2m context.
+ * This lock is used in some v4l2_m2m_* helpers.
*/
- if (out_q_ctx->q.lock == cap_q_ctx->q.lock)
- m2m_ctx->q_lock = out_q_ctx->q.lock;
+ if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ m2m_ctx->q_lock = out_q_ctx->q.lock;
return m2m_ctx;
err:
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 8a02f11076f9..82daccc9ea62 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -15,7 +15,7 @@
#define pr_fmt(fmt) DRIVER_NAME ": " fmt
#include <linux/module.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/memstick.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
@@ -1873,69 +1873,65 @@ static void msb_io_work(struct work_struct *work)
struct msb_data *msb = container_of(work, struct msb_data, io_work);
int page, error, len;
sector_t lba;
- unsigned long flags;
struct scatterlist *sg = msb->prealloc_sg;
+ struct request *req;
dbg_verbose("IO: work started");
while (1) {
- spin_lock_irqsave(&msb->q_lock, flags);
+ spin_lock_irq(&msb->q_lock);
if (msb->need_flush_cache) {
msb->need_flush_cache = false;
- spin_unlock_irqrestore(&msb->q_lock, flags);
+ spin_unlock_irq(&msb->q_lock);
msb_cache_flush(msb);
continue;
}
- if (!msb->req) {
- msb->req = blk_fetch_request(msb->queue);
- if (!msb->req) {
- dbg_verbose("IO: no more requests exiting");
- spin_unlock_irqrestore(&msb->q_lock, flags);
- return;
- }
+ req = msb->req;
+ if (!req) {
+ dbg_verbose("IO: no more requests exiting");
+ spin_unlock_irq(&msb->q_lock);
+ return;
}
- spin_unlock_irqrestore(&msb->q_lock, flags);
-
- /* If card was removed meanwhile */
- if (!msb->req)
- return;
+ spin_unlock_irq(&msb->q_lock);
/* process the request */
dbg_verbose("IO: processing new request");
- blk_rq_map_sg(msb->queue, msb->req, sg);
+ blk_rq_map_sg(msb->queue, req, sg);
- lba = blk_rq_pos(msb->req);
+ lba = blk_rq_pos(req);
sector_div(lba, msb->page_size / 512);
page = sector_div(lba, msb->pages_in_block);
if (rq_data_dir(msb->req) == READ)
error = msb_do_read_request(msb, lba, page, sg,
- blk_rq_bytes(msb->req), &len);
+ blk_rq_bytes(req), &len);
else
error = msb_do_write_request(msb, lba, page, sg,
- blk_rq_bytes(msb->req), &len);
-
- spin_lock_irqsave(&msb->q_lock, flags);
+ blk_rq_bytes(req), &len);
- if (len)
- if (!__blk_end_request(msb->req, BLK_STS_OK, len))
- msb->req = NULL;
+ if (len && !blk_update_request(req, BLK_STS_OK, len)) {
+ __blk_mq_end_request(req, BLK_STS_OK);
+ spin_lock_irq(&msb->q_lock);
+ msb->req = NULL;
+ spin_unlock_irq(&msb->q_lock);
+ }
if (error && msb->req) {
blk_status_t ret = errno_to_blk_status(error);
+
dbg_verbose("IO: ending one sector of the request with error");
- if (!__blk_end_request(msb->req, ret, msb->page_size))
- msb->req = NULL;
+ blk_mq_end_request(req, ret);
+ spin_lock_irq(&msb->q_lock);
+ msb->req = NULL;
+ spin_unlock_irq(&msb->q_lock);
}
if (msb->req)
dbg_verbose("IO: request still pending");
-
- spin_unlock_irqrestore(&msb->q_lock, flags);
}
}
@@ -2002,29 +1998,40 @@ static int msb_bd_getgeo(struct block_device *bdev,
return 0;
}
-static void msb_submit_req(struct request_queue *q)
+static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct memstick_dev *card = q->queuedata;
+ struct memstick_dev *card = hctx->queue->queuedata;
struct msb_data *msb = memstick_get_drvdata(card);
- struct request *req = NULL;
+ struct request *req = bd->rq;
dbg_verbose("Submit request");
+ spin_lock_irq(&msb->q_lock);
+
if (msb->card_dead) {
dbg("Refusing requests on removed card");
WARN_ON(!msb->io_queue_stopped);
- while ((req = blk_fetch_request(q)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
- return;
+ spin_unlock_irq(&msb->q_lock);
+ blk_mq_start_request(req);
+ return BLK_STS_IOERR;
}
- if (msb->req)
- return;
+ if (msb->req) {
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+
+ blk_mq_start_request(req);
+ msb->req = req;
if (!msb->io_queue_stopped)
queue_work(msb->io_queue, &msb->io_work);
+
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_OK;
}
static int msb_check_card(struct memstick_dev *card)
@@ -2040,21 +2047,20 @@ static void msb_stop(struct memstick_dev *card)
dbg("Stopping all msblock IO");
+ blk_mq_stop_hw_queues(msb->queue);
spin_lock_irqsave(&msb->q_lock, flags);
- blk_stop_queue(msb->queue);
msb->io_queue_stopped = true;
spin_unlock_irqrestore(&msb->q_lock, flags);
del_timer_sync(&msb->cache_flush_timer);
flush_workqueue(msb->io_queue);
+ spin_lock_irqsave(&msb->q_lock, flags);
if (msb->req) {
- spin_lock_irqsave(&msb->q_lock, flags);
- blk_requeue_request(msb->queue, msb->req);
+ blk_mq_requeue_request(msb->req, false);
msb->req = NULL;
- spin_unlock_irqrestore(&msb->q_lock, flags);
}
-
+ spin_unlock_irqrestore(&msb->q_lock, flags);
}
static void msb_start(struct memstick_dev *card)
@@ -2077,9 +2083,7 @@ static void msb_start(struct memstick_dev *card)
msb->need_flush_cache = true;
msb->io_queue_stopped = false;
- spin_lock_irqsave(&msb->q_lock, flags);
- blk_start_queue(msb->queue);
- spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
queue_work(msb->io_queue, &msb->io_work);
@@ -2092,6 +2096,10 @@ static const struct block_device_operations msb_bdops = {
.owner = THIS_MODULE
};
+static const struct blk_mq_ops msb_mq_ops = {
+ .queue_rq = msb_queue_rq,
+};
+
/* Registers the block device */
static int msb_init_disk(struct memstick_dev *card)
{
@@ -2112,9 +2120,11 @@ static int msb_init_disk(struct memstick_dev *card)
goto out_release_id;
}
- msb->queue = blk_init_queue(msb_submit_req, &msb->q_lock);
- if (!msb->queue) {
- rc = -ENOMEM;
+ msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &msb_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(msb->queue)) {
+ rc = PTR_ERR(msb->queue);
+ msb->queue = NULL;
goto out_put_disk;
}
@@ -2202,12 +2212,13 @@ static void msb_remove(struct memstick_dev *card)
/* Take care of unhandled + new requests from now on */
spin_lock_irqsave(&msb->q_lock, flags);
msb->card_dead = true;
- blk_start_queue(msb->queue);
spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
/* Remove the disk */
del_gendisk(msb->disk);
blk_cleanup_queue(msb->queue);
+ blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
mutex_lock(&msb_disk_lock);
diff --git a/drivers/memstick/core/ms_block.h b/drivers/memstick/core/ms_block.h
index 53962c3b21df..9ba84e0ced63 100644
--- a/drivers/memstick/core/ms_block.h
+++ b/drivers/memstick/core/ms_block.h
@@ -152,6 +152,7 @@ struct msb_data {
struct gendisk *disk;
struct request_queue *queue;
spinlock_t q_lock;
+ struct blk_mq_tag_set tag_set;
struct hd_geometry geometry;
struct attribute_group attr_group;
struct request *req;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 0cd30dcb6801..aba50ec98b4d 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -12,7 +12,7 @@
*
*/
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/idr.h>
#include <linux/hdreg.h>
#include <linux/kthread.h>
@@ -142,6 +142,7 @@ struct mspro_block_data {
struct gendisk *disk;
struct request_queue *queue;
struct request *block_req;
+ struct blk_mq_tag_set tag_set;
spinlock_t q_lock;
unsigned short page_size;
@@ -152,7 +153,6 @@ struct mspro_block_data {
unsigned char system;
unsigned char read_only:1,
eject:1,
- has_request:1,
data_dir:1,
active:1;
unsigned char transfer_cmd;
@@ -694,13 +694,12 @@ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
/*** Data transfer ***/
-static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
+static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
u64 t_off;
unsigned int count;
-try_again:
while (chunk) {
msb->current_page = 0;
msb->current_seg = 0;
@@ -709,9 +708,17 @@ try_again:
msb->req_sg);
if (!msb->seg_count) {
- chunk = __blk_end_request_cur(msb->block_req,
- BLK_STS_RESOURCE);
- continue;
+ unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
+
+ chunk = blk_update_request(msb->block_req,
+ BLK_STS_RESOURCE,
+ bytes);
+ if (chunk)
+ continue;
+ __blk_mq_end_request(msb->block_req,
+ BLK_STS_RESOURCE);
+ msb->block_req = NULL;
+ break;
}
t_off = blk_rq_pos(msb->block_req);
@@ -729,30 +736,22 @@ try_again:
return 0;
}
- dev_dbg(&card->dev, "blk_fetch\n");
- msb->block_req = blk_fetch_request(msb->queue);
- if (!msb->block_req) {
- dev_dbg(&card->dev, "issue end\n");
- return -EAGAIN;
- }
-
- dev_dbg(&card->dev, "trying again\n");
- chunk = 1;
- goto try_again;
+ return 1;
}
static int mspro_block_complete_req(struct memstick_dev *card, int error)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- int chunk, cnt;
+ int cnt;
+ bool chunk;
unsigned int t_len = 0;
unsigned long flags;
spin_lock_irqsave(&msb->q_lock, flags);
- dev_dbg(&card->dev, "complete %d, %d\n", msb->has_request ? 1 : 0,
+ dev_dbg(&card->dev, "complete %d, %d\n", msb->block_req ? 1 : 0,
error);
- if (msb->has_request) {
+ if (msb->block_req) {
/* Nothing to do - not really an error */
if (error == -EAGAIN)
error = 0;
@@ -777,15 +776,17 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
if (error && !t_len)
t_len = blk_rq_cur_bytes(msb->block_req);
- chunk = __blk_end_request(msb->block_req,
+ chunk = blk_update_request(msb->block_req,
errno_to_blk_status(error), t_len);
-
- error = mspro_block_issue_req(card, chunk);
-
- if (!error)
- goto out;
- else
- msb->has_request = 0;
+ if (chunk) {
+ error = mspro_block_issue_req(card, chunk);
+ if (!error)
+ goto out;
+ } else {
+ __blk_mq_end_request(msb->block_req,
+ errno_to_blk_status(error));
+ msb->block_req = NULL;
+ }
} else {
if (!error)
error = -EAGAIN;
@@ -806,8 +807,8 @@ static void mspro_block_stop(struct memstick_dev *card)
while (1) {
spin_lock_irqsave(&msb->q_lock, flags);
- if (!msb->has_request) {
- blk_stop_queue(msb->queue);
+ if (!msb->block_req) {
+ blk_mq_stop_hw_queues(msb->queue);
rc = 1;
}
spin_unlock_irqrestore(&msb->q_lock, flags);
@@ -822,32 +823,37 @@ static void mspro_block_stop(struct memstick_dev *card)
static void mspro_block_start(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- unsigned long flags;
- spin_lock_irqsave(&msb->q_lock, flags);
- blk_start_queue(msb->queue);
- spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
}
-static void mspro_block_submit_req(struct request_queue *q)
+static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct memstick_dev *card = q->queuedata;
+ struct memstick_dev *card = hctx->queue->queuedata;
struct mspro_block_data *msb = memstick_get_drvdata(card);
- struct request *req = NULL;
- if (msb->has_request)
- return;
+ spin_lock_irq(&msb->q_lock);
- if (msb->eject) {
- while ((req = blk_fetch_request(q)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
+ if (msb->block_req) {
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
- return;
+ if (msb->eject) {
+ spin_unlock_irq(&msb->q_lock);
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
}
- msb->has_request = 1;
- if (mspro_block_issue_req(card, 0))
- msb->has_request = 0;
+ msb->block_req = bd->rq;
+ blk_mq_start_request(bd->rq);
+
+ if (mspro_block_issue_req(card, true))
+ msb->block_req = NULL;
+
+ spin_unlock_irq(&msb->q_lock);
+ return BLK_STS_OK;
}
/*** Initialization ***/
@@ -1167,6 +1173,10 @@ static int mspro_block_init_card(struct memstick_dev *card)
}
+static const struct blk_mq_ops mspro_mq_ops = {
+ .queue_rq = mspro_queue_rq,
+};
+
static int mspro_block_init_disk(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
@@ -1206,9 +1216,11 @@ static int mspro_block_init_disk(struct memstick_dev *card)
goto out_release_id;
}
- msb->queue = blk_init_queue(mspro_block_submit_req, &msb->q_lock);
- if (!msb->queue) {
- rc = -ENOMEM;
+ msb->queue = blk_mq_init_sq_queue(&msb->tag_set, &mspro_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(msb->queue)) {
+ rc = PTR_ERR(msb->queue);
+ msb->queue = NULL;
goto out_put_disk;
}
@@ -1318,13 +1330,14 @@ static void mspro_block_remove(struct memstick_dev *card)
spin_lock_irqsave(&msb->q_lock, flags);
msb->eject = 1;
- blk_start_queue(msb->queue);
spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
del_gendisk(msb->disk);
dev_dbg(&card->dev, "mspro block remove\n");
blk_cleanup_queue(msb->queue);
+ blk_mq_free_tag_set(&msb->tag_set);
msb->queue = NULL;
sysfs_remove_group(&card->dev.kobj, &msb->attr_group);
@@ -1344,8 +1357,9 @@ static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state)
struct mspro_block_data *msb = memstick_get_drvdata(card);
unsigned long flags;
+ blk_mq_stop_hw_queues(msb->queue);
+
spin_lock_irqsave(&msb->q_lock, flags);
- blk_stop_queue(msb->queue);
msb->active = 0;
spin_unlock_irqrestore(&msb->q_lock, flags);
@@ -1355,7 +1369,6 @@ static int mspro_block_suspend(struct memstick_dev *card, pm_message_t state)
static int mspro_block_resume(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
- unsigned long flags;
int rc = 0;
#ifdef CONFIG_MEMSTICK_UNSAFE_RESUME
@@ -1401,9 +1414,7 @@ out_unlock:
#endif /* CONFIG_MEMSTICK_UNSAFE_RESUME */
- spin_lock_irqsave(&msb->q_lock, flags);
- blk_start_queue(msb->queue);
- spin_unlock_irqrestore(&msb->q_lock, flags);
+ blk_mq_start_hw_queues(msb->queue);
return rc;
}
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 0be511dd93d0..e1450a56fc07 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -16,20 +16,21 @@
* published by the Free Software Foundation.
*/
-#include <linux/err.h>
+#include <linux/acpi.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mfd/axp20x.h>
+#include <linux/mfd/core.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-#include <linux/mfd/axp20x.h>
-#include <linux/mfd/core.h>
-#include <linux/of_device.h>
-#include <linux/acpi.h>
-#define AXP20X_OFF 0x80
+#define AXP20X_OFF BIT(7)
#define AXP806_REG_ADDR_EXT_ADDR_MASTER_MODE 0
#define AXP806_REG_ADDR_EXT_ADDR_SLAVE_MODE BIT(4)
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 8f9d6964173e..b99a194ce5a4 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -263,6 +263,11 @@ static const struct file_operations fops = {
#endif
};
+static void cros_ec_class_release(struct device *dev)
+{
+ kfree(to_cros_ec_dev(dev));
+}
+
static void cros_ec_sensors_register(struct cros_ec_dev *ec)
{
/*
@@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev)
int retval = -ENOMEM;
struct device *dev = &pdev->dev;
struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
- struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+ struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
return retval;
@@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev)
ec->class_dev.devt = MKDEV(ec_major, pdev->id);
ec->class_dev.class = &cros_class;
ec->class_dev.parent = dev;
+ ec->class_dev.release = cros_ec_class_release;
retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
if (retval) {
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 22bd6525e09c..04a177efd245 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -21,7 +21,6 @@
#include <linux/mfd/core.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -306,14 +305,6 @@ static int wm8994_set_pdata_from_of(struct wm8994 *wm8994)
pdata->csnaddr_pd = of_property_read_bool(np, "wlf,csnaddr-pd");
- pdata->ldo[0].enable = of_get_named_gpio(np, "wlf,ldo1ena", 0);
- if (pdata->ldo[0].enable < 0)
- pdata->ldo[0].enable = 0;
-
- pdata->ldo[1].enable = of_get_named_gpio(np, "wlf,ldo2ena", 0);
- if (pdata->ldo[1].enable < 0)
- pdata->ldo[1].enable = 0;
-
return 0;
}
#else
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index b66d832d3233..c79ba1c699ad 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1718,7 +1718,6 @@ int cxl_slot_is_switched(struct pci_dev *dev)
{
struct device_node *np;
int depth = 0;
- const __be32 *prop;
if (!(np = pci_device_to_OF_node(dev))) {
pr_err("cxl: np = NULL\n");
@@ -1727,8 +1726,7 @@ int cxl_slot_is_switched(struct pci_dev *dev)
of_node_get(np);
while (np) {
np = of_get_next_parent(np);
- prop = of_get_property(np, "device_type", NULL);
- if (!prop || strcmp((char *)prop, "pciex"))
+ if (!of_node_is_type(np, "pciex"))
break;
depth++;
}
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 7908633d9204..49da2f744bbf 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -11,17 +11,6 @@
#include <misc/cxl.h>
#include "cxl.h"
-static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
-{
- if (dma_mask < DMA_BIT_MASK(64)) {
- pr_info("%s only 64bit DMA supported on CXL", __func__);
- return -EIO;
- }
-
- *(pdev->dev.dma_mask) = dma_mask;
- return 0;
-}
-
static int cxl_pci_probe_mode(struct pci_bus *bus)
{
return PCI_PROBE_NORMAL;
@@ -220,7 +209,6 @@ static struct pci_controller_ops cxl_pci_controller_ops =
.reset_secondary_bus = cxl_pci_reset_secondary_bus,
.setup_msi_irqs = cxl_setup_msi_irqs,
.teardown_msi_irqs = cxl_teardown_msi_irqs,
- .dma_set_mask = cxl_dma_set_mask,
};
int cxl_pci_vphb_add(struct cxl_afu *afu)
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index c824329f7012..0e4193cb08cf 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -416,7 +416,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
if (err)
goto error_window;
err = scif_map_page(&window->num_pages_lookup.lookup[j],
- vmalloc_dma_phys ?
+ vmalloc_num_pages ?
vmalloc_to_page(&window->num_pages[i]) :
virt_to_page(&window->num_pages[i]),
remote_dev);
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 3633202e18f4..6b212c8b78e7 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -129,6 +129,16 @@ static u64 vop_get_features(struct virtio_device *vdev)
return features;
}
+static void vop_transport_features(struct virtio_device *vdev)
+{
+ /*
+ * Packed ring isn't enabled on virtio_vop for now,
+ * because virtio_vop uses vring_new_virtqueue() which
+ * creates virtio rings on preallocated memory.
+ */
+ __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
+}
+
static int vop_finalize_features(struct virtio_device *vdev)
{
unsigned int i, bits;
@@ -141,6 +151,9 @@ static int vop_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Give virtio_vop a chance to accept features. */
+ vop_transport_features(vdev);
+
memset_io(out_features, 0, feature_len);
bits = min_t(unsigned, feature_len,
sizeof(vdev->features)) * 8;
diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c
index e70cfa24577f..11ab996657a2 100644
--- a/drivers/misc/ocxl/afu_irq.c
+++ b/drivers/misc/ocxl/afu_irq.c
@@ -2,7 +2,6 @@
// Copyright 2017 IBM Corp.
#include <linux/interrupt.h>
#include <linux/eventfd.h>
-#include <asm/pnv-ocxl.h>
#include "ocxl_internal.h"
#include "trace.h"
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 57a6bb1fd3c9..8f2c5d8bd2ee 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -318,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
if (rc)
return rc;
ptr = (u32 *) &afu->name[i];
- *ptr = val;
+ *ptr = le32_to_cpu((__force __le32) val);
}
afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
return 0;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 31695a078485..d50b861d7e57 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -273,9 +273,9 @@ static int setup_xsl_irq(struct pci_dev *dev, struct link *link)
spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
link->domain, link->bus, link->dev);
if (!spa->irq_name) {
- unmap_irq_registers(spa);
dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_xsl;
}
/*
* At some point, we'll need to look into allowing a higher
@@ -283,11 +283,10 @@ static int setup_xsl_irq(struct pci_dev *dev, struct link *link)
*/
spa->virq = irq_create_mapping(NULL, hwirq);
if (!spa->virq) {
- kfree(spa->irq_name);
- unmap_irq_registers(spa);
dev_err(&dev->dev,
"irq_create_mapping failed for translation interrupt\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_name;
}
dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
@@ -295,15 +294,21 @@ static int setup_xsl_irq(struct pci_dev *dev, struct link *link)
rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
link);
if (rc) {
- irq_dispose_mapping(spa->virq);
- kfree(spa->irq_name);
- unmap_irq_registers(spa);
dev_err(&dev->dev,
"request_irq failed for translation interrupt: %d\n",
rc);
- return -EINVAL;
+ rc = -EINVAL;
+ goto err_mapping;
}
return 0;
+
+err_mapping:
+ irq_dispose_mapping(spa->virq);
+err_name:
+ kfree(spa->irq_name);
+err_xsl:
+ unmap_irq_registers(spa);
+ return rc;
}
static void release_xsl_irq(struct link *link)
@@ -566,7 +571,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
mutex_lock(&spa->spa_lock);
- pe->tid = tid;
+ pe->tid = cpu_to_be32(tid);
/*
* The barrier makes sure the PE is updated
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index c35b5b08bb33..62e7619d5a4d 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -100,7 +100,6 @@ static DEFINE_IDA(mmc_rpmb_ida);
* There is one mmc_blk_data per slot.
*/
struct mmc_blk_data {
- spinlock_t lock;
struct device *parent;
struct gendisk *disk;
struct mmc_queue queue;
@@ -472,7 +471,7 @@ out:
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_blk_ioc_data *idata)
{
- struct mmc_command cmd = {};
+ struct mmc_command cmd = {}, sbc = {};
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
@@ -550,10 +549,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
}
if (idata->rpmb) {
- err = mmc_set_blockcount(card, data.blocks,
- idata->ic.write_flag & (1 << 31));
- if (err)
- return err;
+ sbc.opcode = MMC_SET_BLOCK_COUNT;
+ /*
+ * We don't do any blockcount validation because the max size
+ * may be increased by a future standard. We just copy the
+ * 'Reliable Write' bit here.
+ */
+ sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31));
+ sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ mrq.sbc = &sbc;
}
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
@@ -1483,7 +1487,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
blk_mq_end_request(req, BLK_STS_OK);
}
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
@@ -1491,7 +1495,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
mmc_cqe_check_busy(mq);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
if (!mq->cqe_busy)
blk_mq_run_hw_queues(q, true);
@@ -1988,17 +1992,16 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
{
- struct request_queue *q = req->q;
unsigned long flags;
bool put_card;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
mq->in_flight[mmc_issue_type(mq, req)] -= 1;
put_card = (mmc_tot_in_flight(mq) == 0);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
if (put_card)
mmc_put_card(mq->card, &mq->ctx);
@@ -2094,11 +2097,11 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
* request does not need to wait (although it does need to
* complete complete_req first).
*/
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
mq->complete_req = req;
mq->rw_wait = false;
waiting = mq->waiting;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
/*
* If 'waiting' then the waiting task will complete this
@@ -2117,10 +2120,10 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
/* Take the recovery path for errors or urgent background operations */
if (mmc_blk_rq_error(&mqrq->brq) ||
mmc_blk_urgent_bkops_needed(mq, mqrq)) {
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
mq->recovery_needed = true;
mq->recovery_req = req;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
wake_up(&mq->wait);
schedule_work(&mq->recovery_work);
return;
@@ -2136,7 +2139,6 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
{
- struct request_queue *q = mq->queue;
unsigned long flags;
bool done;
@@ -2144,7 +2146,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
* Wait while there is another request in progress, but not if recovery
* is needed. Also indicate whether there is a request waiting to start.
*/
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
if (mq->recovery_needed) {
*err = -EBUSY;
done = true;
@@ -2152,7 +2154,7 @@ static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
done = !mq->rw_wait;
}
mq->waiting = !done;
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
return done;
}
@@ -2329,12 +2331,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
goto err_kfree;
}
- spin_lock_init(&md->lock);
INIT_LIST_HEAD(&md->part);
INIT_LIST_HEAD(&md->rpmbs);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+ ret = mmc_init_queue(&md->queue, card);
if (ret)
goto err_putdisk;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index bc1bd2c25613..55997cf84b39 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -30,6 +30,7 @@
#include "pwrseq.h"
#define DEFAULT_CMD6_TIMEOUT_MS 500
+#define MIN_CACHE_EN_TIMEOUT_MS 1600
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
@@ -526,8 +527,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
- if (!mmc_card_broken_hpi(card) &&
- ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
card->ext_csd.bkops = 1;
card->ext_csd.man_bkops_en =
(ext_csd[EXT_CSD_BKOPS_EN] &
@@ -1782,20 +1782,26 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
+ card->ext_csd.hpi_en = 0;
err = 0;
- } else
+ } else {
card->ext_csd.hpi_en = 1;
+ }
}
/*
- * If cache size is higher than 0, this indicates
- * the existence of cache and it can be turned on.
+ * If cache size is higher than 0, this indicates the existence of cache
+ * and it can be turned on. Note that some eMMCs from Micron has been
+ * reported to need ~800 ms timeout, while enabling the cache after
+ * sudden power failure tests. Let's extend the timeout to a minimum of
+ * DEFAULT_CACHE_EN_TIMEOUT_MS and do it for all cards.
*/
- if (!mmc_card_broken_hpi(card) &&
- card->ext_csd.cache_size > 0) {
+ if (card->ext_csd.cache_size > 0) {
+ unsigned int timeout_ms = MIN_CACHE_EN_TIMEOUT_MS;
+
+ timeout_ms = max(card->ext_csd.generic_cmd6_time, timeout_ms);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, 1,
- card->ext_csd.generic_cmd6_time);
+ EXT_CSD_CACHE_CTRL, 1, timeout_ms);
if (err && err != -EBADMSG)
goto free_card;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 6edffeed9953..35cc138b096d 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -89,9 +89,9 @@ void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
struct mmc_queue *mq = q->queuedata;
unsigned long flags;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
__mmc_cqe_recovery_notifier(mq);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
}
static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
@@ -128,14 +128,14 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
unsigned long flags;
int ret;
- spin_lock_irqsave(q->queue_lock, flags);
+ spin_lock_irqsave(&mq->lock, flags);
if (mq->recovery_needed || !mq->use_cqe)
ret = BLK_EH_RESET_TIMER;
else
ret = mmc_cqe_timed_out(req);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ spin_unlock_irqrestore(&mq->lock, flags);
return ret;
}
@@ -157,9 +157,9 @@ static void mmc_mq_recovery_handler(struct work_struct *work)
mq->in_recovery = false;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&mq->lock);
mq->recovery_needed = false;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
mmc_put_card(mq->card, &mq->ctx);
@@ -258,10 +258,10 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
issue_type = mmc_issue_type(mq, req);
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&mq->lock);
if (mq->recovery_needed || mq->busy) {
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
@@ -269,7 +269,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
case MMC_ISSUE_DCMD:
if (mmc_cqe_dcmd_busy(mq)) {
mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
return BLK_STS_RESOURCE;
}
break;
@@ -294,7 +294,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
get_card = (mmc_tot_in_flight(mq) == 1);
cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
if (!(req->rq_flags & RQF_DONTPREP)) {
req_to_mmc_queue_req(req)->retries = 0;
@@ -328,12 +328,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
if (issued != MMC_REQ_STARTED) {
bool put_card = false;
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(&mq->lock);
mq->in_flight[issue_type] -= 1;
if (mmc_tot_in_flight(mq) == 0)
put_card = true;
mq->busy = false;
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(&mq->lock);
if (put_card)
mmc_put_card(card, &mq->ctx);
} else {
@@ -378,14 +378,37 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
}
-static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
- const struct blk_mq_ops *mq_ops, spinlock_t *lock)
+/* Set queue depth to get a reasonable value for q->nr_requests */
+#define MMC_QUEUE_DEPTH 64
+
+/**
+ * mmc_init_queue - initialise a queue structure.
+ * @mq: mmc queue
+ * @card: mmc card to attach this queue
+ *
+ * Initialise a MMC card request queue.
+ */
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
int ret;
+ mq->card = card;
+ mq->use_cqe = host->cqe_enabled;
+
+ spin_lock_init(&mq->lock);
+
memset(&mq->tag_set, 0, sizeof(mq->tag_set));
- mq->tag_set.ops = mq_ops;
- mq->tag_set.queue_depth = q_depth;
+ mq->tag_set.ops = &mmc_mq_ops;
+ /*
+ * The queue depth for CQE must match the hardware because the request
+ * tag is used to index the hardware queue.
+ */
+ if (mq->use_cqe)
+ mq->tag_set.queue_depth =
+ min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
+ else
+ mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
mq->tag_set.numa_node = NUMA_NO_NODE;
mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE |
BLK_MQ_F_BLOCKING;
@@ -403,68 +426,17 @@ static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth,
goto free_tag_set;
}
- mq->queue->queue_lock = lock;
mq->queue->queuedata = mq;
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+ mmc_setup_queue(mq, card);
return 0;
free_tag_set:
blk_mq_free_tag_set(&mq->tag_set);
-
return ret;
}
-/* Set queue depth to get a reasonable value for q->nr_requests */
-#define MMC_QUEUE_DEPTH 64
-
-static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock)
-{
- struct mmc_host *host = card->host;
- int q_depth;
- int ret;
-
- /*
- * The queue depth for CQE must match the hardware because the request
- * tag is used to index the hardware queue.
- */
- if (mq->use_cqe)
- q_depth = min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
- else
- q_depth = MMC_QUEUE_DEPTH;
-
- ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock);
- if (ret)
- return ret;
-
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
- mmc_setup_queue(mq, card);
-
- return 0;
-}
-
-/**
- * mmc_init_queue - initialise a queue structure.
- * @mq: mmc queue
- * @card: mmc card to attach this queue
- * @lock: queue lock
- * @subname: partition subname
- *
- * Initialise a MMC card request queue.
- */
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
-{
- struct mmc_host *host = card->host;
-
- mq->card = card;
-
- mq->use_cqe = host->cqe_enabled;
-
- return mmc_mq_init(mq, card, lock);
-}
-
void mmc_queue_suspend(struct mmc_queue *mq)
{
blk_mq_quiesce_queue(mq->queue);
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 9bf3c9245075..fd11491ced9f 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -77,6 +77,7 @@ struct mmc_queue {
struct blk_mq_tag_set tag_set;
struct mmc_blk_data *blkdata;
struct request_queue *queue;
+ spinlock_t lock;
int in_flight[MMC_ISSUE_MAX];
unsigned int cqe_busy;
#define MMC_CQE_DCMD_BUSY BIT(0)
@@ -95,8 +96,7 @@ struct mmc_queue {
struct work_struct complete_work;
};
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
- const char *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index adf32682f27a..c60a7625b1fa 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -104,6 +104,7 @@ struct mmc_omap_slot {
unsigned int vdd;
u16 saved_con;
u16 bus_mode;
+ u16 power_mode;
unsigned int fclk_freq;
struct tasklet_struct cover_tasklet;
@@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmc_omap_slot *slot = mmc_priv(mmc);
struct mmc_omap_host *host = slot->host;
int i, dsor;
- int clk_enabled;
+ int clk_enabled, init_stream;
mmc_omap_select_slot(slot, 0);
@@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->vdd = ios->vdd;
clk_enabled = 0;
+ init_stream = 0;
switch (ios->power_mode) {
case MMC_POWER_OFF:
mmc_omap_set_power(slot, 0, ios->vdd);
@@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
/* Cannot touch dsor yet, just power up MMC */
mmc_omap_set_power(slot, 1, ios->vdd);
+ slot->power_mode = ios->power_mode;
goto exit;
case MMC_POWER_ON:
mmc_omap_fclk_enable(host, 1);
clk_enabled = 1;
dsor |= 1 << 11;
+ if (slot->power_mode != MMC_POWER_ON)
+ init_stream = 1;
break;
}
+ slot->power_mode = ios->power_mode;
if (slot->bus_mode != ios->bus_mode) {
if (slot->pdata->set_bus_mode != NULL)
@@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
for (i = 0; i < 2; i++)
OMAP_MMC_WRITE(host, CON, dsor);
slot->saved_con = dsor;
- if (ios->power_mode == MMC_POWER_ON) {
+ if (init_stream) {
/* worst case at 400kHz, 80 cycles makes 200 microsecs */
int usecs = 250;
@@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->host = host;
slot->mmc = mmc;
slot->id = id;
+ slot->power_mode = MMC_POWER_UNDEFINED;
slot->pdata = &host->pdata->slots[id];
host->slots[id] = slot;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 467d889a1638..3f4ea8f624be 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1909,7 +1909,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE | MMC_CAP_CMD23;
@@ -1939,6 +1938,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
+ /*
+ * Limit the maximum segment size to the lower of the request size
+ * and the DMA engine device segment size limits. In reality, with
+ * 32-bit transfers, the DMA engine can do longer segments than this
+ * but there is no way to represent that in the DMA model - if we
+ * increase this figure here, we get warnings from the DMA API debug.
+ */
+ mmc->max_seg_size = min3(mmc->max_req_size,
+ dma_get_max_seg_size(host->rx_chan->device->dev),
+ dma_get_max_seg_size(host->tx_chan->device->dev));
+
/* Request IRQ for MMC operations */
ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 88347ce78f23..d264391616f9 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -288,9 +288,9 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
struct device *dev = omap_host->dev;
struct mmc_ios *ios = &mmc->ios;
u32 start_window = 0, max_window = 0;
+ bool dcrc_was_enabled = false;
u8 cur_match, prev_match = 0;
u32 length = 0, max_len = 0;
- u32 ier = host->ier;
u32 phase_delay = 0;
int ret = 0;
u32 reg;
@@ -317,9 +317,10 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
* during the tuning procedure. So disable it during the
* tuning procedure.
*/
- ier &= ~SDHCI_INT_DATA_CRC;
- sdhci_writel(host, ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+ if (host->ier & SDHCI_INT_DATA_CRC) {
+ host->ier &= ~SDHCI_INT_DATA_CRC;
+ dcrc_was_enabled = true;
+ }
while (phase_delay <= MAX_PHASE_DELAY) {
sdhci_omap_set_dll(omap_host, phase_delay);
@@ -366,6 +367,9 @@ tuning_error:
ret:
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
+ /* Reenable forbidden interrupt */
+ if (dcrc_was_enabled)
+ host->ier |= SDHCI_INT_DATA_CRC;
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
return ret;
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 7b95d088fdef..e6ace31e2a41 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -510,25 +510,25 @@ static void tegra_sdhci_parse_pad_autocal_dt(struct sdhci_host *host)
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-3v3-timeout",
- &autocal->pull_up_3v3);
+ &autocal->pull_up_3v3_timeout);
if (err)
autocal->pull_up_3v3_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-3v3-timeout",
- &autocal->pull_down_3v3);
+ &autocal->pull_down_3v3_timeout);
if (err)
autocal->pull_down_3v3_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-up-offset-1v8-timeout",
- &autocal->pull_up_1v8);
+ &autocal->pull_up_1v8_timeout);
if (err)
autocal->pull_up_1v8_timeout = 0;
err = device_property_read_u32(host->mmc->parent,
"nvidia,pad-autocal-pull-down-offset-1v8-timeout",
- &autocal->pull_down_1v8);
+ &autocal->pull_down_1v8_timeout);
if (err)
autocal->pull_down_1v8_timeout = 0;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 99bdae53fa2e..df05352b6a4a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -127,12 +127,12 @@ static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
{
u16 ctrl2;
- ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
+ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (ctrl2 & SDHCI_CTRL_V4_MODE)
return;
ctrl2 |= SDHCI_CTRL_V4_MODE;
- sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
+ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
}
/*
@@ -216,8 +216,12 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
timeout = ktime_add_ms(ktime_get(), 100);
/* hw clears the bit when it's done */
- while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
+ break;
+ if (timedout) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
sdhci_dumpregs(host);
@@ -1608,9 +1612,13 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (clk & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
sdhci_dumpregs(host);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index c77f537323ec..1e18c9639c3e 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -22,56 +22,6 @@ config MTD_TESTS
WARNING: some of the tests will ERASE entire MTD device which they
test. Do not use these tests unless you really know what you do.
-config MTD_REDBOOT_PARTS
- tristate "RedBoot partition table parsing"
- help
- RedBoot is a ROM monitor and bootloader which deals with multiple
- 'images' in flash devices by putting a table one of the erase
- blocks on the device, similar to a partition table, which gives
- the offsets, lengths and names of all the images stored in the
- flash.
-
- If you need code which can detect and parse this table, and register
- MTD 'partitions' corresponding to each image in the table, enable
- this option.
-
- You will still need the parsing functions to be called by the driver
- for your particular device. It won't happen automatically. The
- SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
- example.
-
-if MTD_REDBOOT_PARTS
-
-config MTD_REDBOOT_DIRECTORY_BLOCK
- int "Location of RedBoot partition table"
- default "-1"
- help
- This option is the Linux counterpart to the
- CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
- option.
-
- The option specifies which Flash sectors holds the RedBoot
- partition table. A zero or positive value gives an absolute
- erase block number. A negative value specifies a number of
- sectors before the end of the device.
-
- For example "2" means block number 2, "-1" means the last
- block and "-2" means the penultimate block.
-
-config MTD_REDBOOT_PARTS_UNALLOCATED
- bool "Include unallocated flash regions"
- help
- If you need to register each unallocated flash region as a MTD
- 'partition', enable this option.
-
-config MTD_REDBOOT_PARTS_READONLY
- bool "Force read-only for RedBoot system images"
- help
- If you need to force read-only for 'RedBoot', 'RedBoot Config' and
- 'FIS directory' images, enable this option.
-
-endif # MTD_REDBOOT_PARTS
-
config MTD_CMDLINE_PARTS
tristate "Command line partition table parsing"
depends on MTD
@@ -144,7 +94,7 @@ config MTD_BCM63XX_PARTS
depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
select CRC32
help
- This provides partions parsing for BCM63xx devices with CFE
+ This provides partition parsing for BCM63xx devices with CFE
bootloaders.
config MTD_BCM47XX_PARTS
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 93473d215a38..58fc327a5276 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_MTD) += mtd.o
mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o
obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
-obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 35aa72b720a6..e752067526a5 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -324,6 +324,7 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
case FL_JEDEC_QUERY:
map_write(map, CMD(0x70), cmd_addr);
chip->state = FL_STATUS;
+ /* Fall through */
case FL_STATUS:
status = map_read(map, cmd_addr);
@@ -461,6 +462,7 @@ static int do_write_buffer(struct map_info *map, struct flchip *chip,
#ifdef DEBUG_CFI_FEATURES
printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr));
#endif
+ /* Fall through */
case FL_STATUS:
status = map_read(map, cmd_adr);
@@ -754,6 +756,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
+ /* Fall through */
case FL_STATUS:
status = map_read(map, adr);
@@ -995,6 +998,7 @@ static void cfi_staa_sync (struct mtd_info *mtd)
* as the whole point is that nobody can do anything
* with the chip now anyway.
*/
+ /* Fall through */
case FL_SYNCING:
mutex_unlock(&chip->mutex);
break;
@@ -1050,6 +1054,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
+ /* Fall through */
case FL_STATUS:
status = map_read(map, adr);
@@ -1196,6 +1201,7 @@ retry:
case FL_READY:
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;
+ /* Fall through */
case FL_STATUS:
status = map_read(map, adr);
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index c9e424993e37..410a321682e6 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -329,8 +329,10 @@ static int ustrtoul(const char *cp, char **endp, unsigned int base)
switch (**endp) {
case 'G' :
result *= 1024;
+ /* fall through */
case 'M':
result *= 1024;
+ /* fall through */
case 'K':
case 'k':
result *= 1024;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 512bd4c2eec0..4c94fc096696 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1603,7 +1603,7 @@ static void doc_unregister_sysfs(struct platform_device *pdev,
/*
* Debug sysfs entries
*/
-static int dbg_flashctrl_show(struct seq_file *s, void *p)
+static int flashcontrol_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
@@ -1623,9 +1623,9 @@ static int dbg_flashctrl_show(struct seq_file *s, void *p)
return 0;
}
-DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
+DEFINE_SHOW_ATTRIBUTE(flashcontrol);
-static int dbg_asicmode_show(struct seq_file *s, void *p)
+static int asic_mode_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
@@ -1660,9 +1660,9 @@ static int dbg_asicmode_show(struct seq_file *s, void *p)
seq_puts(s, ")\n");
return 0;
}
-DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
+DEFINE_SHOW_ATTRIBUTE(asic_mode);
-static int dbg_device_id_show(struct seq_file *s, void *p)
+static int device_id_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int id;
@@ -1674,9 +1674,9 @@ static int dbg_device_id_show(struct seq_file *s, void *p)
seq_printf(s, "DeviceId = %d\n", id);
return 0;
}
-DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
+DEFINE_SHOW_ATTRIBUTE(device_id);
-static int dbg_protection_show(struct seq_file *s, void *p)
+static int protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
@@ -1726,7 +1726,7 @@ static int dbg_protection_show(struct seq_file *s, void *p)
!!(dps1 & DOC_DPS_KEY_OK));
return 0;
}
-DEBUGFS_RO_ATTR(protection, dbg_protection_show);
+DEFINE_SHOW_ATTRIBUTE(protection);
static void __init doc_dbg_register(struct mtd_info *floor)
{
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index e99946575398..e16dca23655b 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -317,17 +317,6 @@ struct docg3 {
#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
-
-#define DEBUGFS_RO_ATTR(name, show_fct) \
- static int name##_open(struct inode *inode, struct file *file) \
- { return single_open(file, show_fct, inode->i_private); } \
- static const struct file_operations name##_fops = { \
- .owner = THIS_MODULE, \
- .open = name##_open, \
- .llseek = seq_lseek, \
- .read = seq_read, \
- .release = single_release \
- };
#endif
/*
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index afb36bff13a7..e0cf869c8544 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -66,15 +66,15 @@ config MTD_PHYSMAP_BANKWIDTH
used internally by the CFI drivers.
config MTD_PHYSMAP_OF
- tristate "Memory device in physical memory map based on OF description"
- depends on OF && (MTD_CFI || MTD_JEDECPROBE || MTD_ROM || MTD_RAM)
+ bool "Memory device in physical memory map based on OF description"
+ depends on OF && MTD_PHYSMAP
help
This provides a 'mapping' driver which allows the NOR Flash, ROM
and RAM driver code to communicate with chips which are mapped
physically into the CPU's memory. The mapping description here is
taken from OF device tree.
-config MTD_PHYSMAP_OF_VERSATILE
+config MTD_PHYSMAP_VERSATILE
bool "ARM Versatile OF-based physical memory map handling"
depends on MTD_PHYSMAP_OF
depends on MFD_SYSCON
@@ -84,16 +84,26 @@ config MTD_PHYSMAP_OF_VERSATILE
platforms, basically to add a VPP (write protection) callback so
the flash can be taken out of write protection.
-config MTD_PHYSMAP_OF_GEMINI
+config MTD_PHYSMAP_GEMINI
bool "Cortina Gemini OF-based physical memory map handling"
depends on MTD_PHYSMAP_OF
depends on MFD_SYSCON
+ select MTD_COMPLEX_MAPPINGS
default ARCH_GEMINI
help
This provides some extra DT physmap parsing for the Gemini
platforms, some detection and setting up parallel mode on the
external interface.
+config MTD_PHYSMAP_GPIO_ADDR
+ bool "GPIO-assisted Flash Chip Support"
+ depends on MTD_PHYSMAP
+ depends on GPIOLIB || COMPILE_TEST
+ depends on MTD_COMPLEX_MAPPINGS
+ help
+ Extend the physmap driver to allow flashes to be partially
+ physically addressed and assisted by GPIOs.
+
config MTD_PMC_MSP_EVM
tristate "CFI Flash device mapped on PMC-Sierra MSP"
depends on PMC_MSP && MTD_CFI
@@ -334,16 +344,6 @@ config MTD_PCMCIA_ANONYMOUS
If unsure, say N.
-config MTD_GPIO_ADDR
- tristate "GPIO-assisted Flash Chip Support"
- depends on GPIOLIB || COMPILE_TEST
- depends on MTD_COMPLEX_MAPPINGS
- help
- Map driver which allows flashes to be partially physically addressed
- and assisted by GPIOs.
-
- If compiled as a module, it will be called gpio-addr-flash.
-
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
@@ -400,13 +400,4 @@ config MTD_PISMO
When built as a module, it will be called pismo.ko
-config MTD_LATCH_ADDR
- tristate "Latch-assisted Flash Chip Support"
- depends on MTD_COMPLEX_MAPPINGS
- help
- Map driver which allows flashes to be partially physically addressed
- and have the upper address lines set by a board specific code.
-
- If compiled as a module, it will be called latch-addr-flash.
-
endmenu
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 51acf1fec19b..1146009f41df 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -17,12 +17,11 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o
obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o
obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o
obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o
+physmap-objs-y += physmap-core.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_VERSATILE) += physmap-versatile.o
+physmap-objs-$(CONFIG_MTD_PHYSMAP_GEMINI) += physmap-gemini.o
+physmap-objs := $(physmap-objs-y)
obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
-physmap_of-objs-y += physmap_of_core.o
-physmap_of-objs-$(CONFIG_MTD_PHYSMAP_OF_VERSATILE) += physmap_of_versatile.o
-physmap_of-objs-$(CONFIG_MTD_PHYSMAP_OF_GEMINI) += physmap_of_gemini.o
-physmap_of-objs := $(physmap_of-objs-y)
-obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PISMO) += pismo.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
@@ -44,6 +43,4 @@ obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
-obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
-obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
deleted file mode 100644
index a20e85aa770e..000000000000
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * drivers/mtd/maps/gpio-addr-flash.c
- *
- * Handle the case where a flash device is mostly addressed using physical
- * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash
- * to a 2MiB memory range and use the GPIOs to select a particular range.
- *
- * Copyright © 2000 Nicolas Pitre <nico@cam.org>
- * Copyright © 2005-2009 Analog Devices Inc.
- *
- * Enter bugs at http://blackfin.uclinux.org/
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#define win_mask(x) ((BIT(x)) - 1)
-
-#define DRIVER_NAME "gpio-addr-flash"
-
-/**
- * struct async_state - keep GPIO flash state
- * @mtd: MTD state for this mapping
- * @map: MTD map state for this flash
- * @gpios: Struct containing the array of GPIO descriptors
- * @gpio_values: cached GPIO values
- * @win_order: dedicated memory size (if no GPIOs)
- */
-struct async_state {
- struct mtd_info *mtd;
- struct map_info map;
- struct gpio_descs *gpios;
- unsigned int gpio_values;
- unsigned int win_order;
-};
-#define gf_map_info_to_state(mi) ((struct async_state *)(mi)->map_priv_1)
-
-/**
- * gf_set_gpios() - set GPIO address lines to access specified flash offset
- * @state: GPIO flash state
- * @ofs: desired offset to access
- *
- * Rather than call the GPIO framework every time, cache the last-programmed
- * value. This speeds up sequential accesses (which are by far the most common
- * type).
- */
-static void gf_set_gpios(struct async_state *state, unsigned long ofs)
-{
- int i;
-
- ofs >>= state->win_order;
-
- if (ofs == state->gpio_values)
- return;
-
- for (i = 0; i < state->gpios->ndescs; i++) {
- if ((ofs & BIT(i)) == (state->gpio_values & BIT(i)))
- continue;
-
- gpiod_set_value(state->gpios->desc[i], !!(ofs & BIT(i)));
- }
-
- state->gpio_values = ofs;
-}
-
-/**
- * gf_read() - read a word at the specified offset
- * @map: MTD map state
- * @ofs: desired offset to read
- */
-static map_word gf_read(struct map_info *map, unsigned long ofs)
-{
- struct async_state *state = gf_map_info_to_state(map);
- uint16_t word;
- map_word test;
-
- gf_set_gpios(state, ofs);
-
- word = readw(map->virt + (ofs & win_mask(state->win_order)));
- test.x[0] = word;
- return test;
-}
-
-/**
- * gf_copy_from() - copy a chunk of data from the flash
- * @map: MTD map state
- * @to: memory to copy to
- * @from: flash offset to copy from
- * @len: how much to copy
- *
- * The "from" region may straddle more than one window, so toggle the GPIOs for
- * each window region before reading its data.
- */
-static void gf_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len)
-{
- struct async_state *state = gf_map_info_to_state(map);
-
- int this_len;
-
- while (len) {
- this_len = from & win_mask(state->win_order);
- this_len = BIT(state->win_order) - this_len;
- this_len = min_t(int, len, this_len);
-
- gf_set_gpios(state, from);
- memcpy_fromio(to,
- map->virt + (from & win_mask(state->win_order)),
- this_len);
- len -= this_len;
- from += this_len;
- to += this_len;
- }
-}
-
-/**
- * gf_write() - write a word at the specified offset
- * @map: MTD map state
- * @ofs: desired offset to write
- */
-static void gf_write(struct map_info *map, map_word d1, unsigned long ofs)
-{
- struct async_state *state = gf_map_info_to_state(map);
- uint16_t d;
-
- gf_set_gpios(state, ofs);
-
- d = d1.x[0];
- writew(d, map->virt + (ofs & win_mask(state->win_order)));
-}
-
-/**
- * gf_copy_to() - copy a chunk of data to the flash
- * @map: MTD map state
- * @to: flash offset to copy to
- * @from: memory to copy from
- * @len: how much to copy
- *
- * See gf_copy_from() caveat.
- */
-static void gf_copy_to(struct map_info *map, unsigned long to,
- const void *from, ssize_t len)
-{
- struct async_state *state = gf_map_info_to_state(map);
-
- int this_len;
-
- while (len) {
- this_len = to & win_mask(state->win_order);
- this_len = BIT(state->win_order) - this_len;
- this_len = min_t(int, len, this_len);
-
- gf_set_gpios(state, to);
- memcpy_toio(map->virt + (to & win_mask(state->win_order)),
- from, len);
-
- len -= this_len;
- to += this_len;
- from += this_len;
- }
-}
-
-static const char * const part_probe_types[] = {
- "cmdlinepart", "RedBoot", NULL };
-
-/**
- * gpio_flash_probe() - setup a mapping for a GPIO assisted flash
- * @pdev: platform device
- *
- * The platform resource layout expected looks something like:
- * struct mtd_partition partitions[] = { ... };
- * struct physmap_flash_data flash_data = { ... };
- * static struct gpiod_lookup_table addr_flash_gpios = {
- * .dev_id = "gpio-addr-flash.0",
- * .table = {
- * GPIO_LOOKUP_IDX("gpio.0", 15, "addr", 0, GPIO_ACTIVE_HIGH),
- * GPIO_LOOKUP_IDX("gpio.0", 16, "addr", 1, GPIO_ACTIVE_HIGH),
- * );
- * };
- * gpiod_add_lookup_table(&addr_flash_gpios);
- *
- * struct resource flash_resource[] = {
- * {
- * .name = "cfi_probe",
- * .start = 0x20000000,
- * .end = 0x201fffff,
- * .flags = IORESOURCE_MEM,
- * },
- * };
- * struct platform_device flash_device = {
- * .name = "gpio-addr-flash",
- * .dev = { .platform_data = &flash_data, },
- * .num_resources = ARRAY_SIZE(flash_resource),
- * .resource = flash_resource,
- * ...
- * };
- */
-static int gpio_flash_probe(struct platform_device *pdev)
-{
- struct physmap_flash_data *pdata;
- struct resource *memory;
- struct async_state *state;
-
- pdata = dev_get_platdata(&pdev->dev);
- memory = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- if (!memory)
- return -EINVAL;
-
- state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- state->gpios = devm_gpiod_get_array(&pdev->dev, "addr", GPIOD_OUT_LOW);
- if (IS_ERR(state->gpios))
- return PTR_ERR(state->gpios);
-
- state->win_order = get_bitmask_order(resource_size(memory)) - 1;
-
- state->map.name = DRIVER_NAME;
- state->map.read = gf_read;
- state->map.copy_from = gf_copy_from;
- state->map.write = gf_write;
- state->map.copy_to = gf_copy_to;
- state->map.bankwidth = pdata->width;
- state->map.size = BIT(state->win_order + state->gpios->ndescs);
- state->map.virt = devm_ioremap_resource(&pdev->dev, memory);
- if (IS_ERR(state->map.virt))
- return PTR_ERR(state->map.virt);
-
- state->map.phys = NO_XIP;
- state->map.map_priv_1 = (unsigned long)state;
-
- platform_set_drvdata(pdev, state);
-
- dev_notice(&pdev->dev, "probing %d-bit flash bus\n",
- state->map.bankwidth * 8);
- state->mtd = do_map_probe(memory->name, &state->map);
- if (!state->mtd)
- return -ENXIO;
- state->mtd->dev.parent = &pdev->dev;
-
- mtd_device_parse_register(state->mtd, part_probe_types, NULL,
- pdata->parts, pdata->nr_parts);
-
- return 0;
-}
-
-static int gpio_flash_remove(struct platform_device *pdev)
-{
- struct async_state *state = platform_get_drvdata(pdev);
-
- mtd_device_unregister(state->mtd);
- map_destroy(state->mtd);
- return 0;
-}
-
-static struct platform_driver gpio_flash_driver = {
- .probe = gpio_flash_probe,
- .remove = gpio_flash_remove,
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-module_platform_driver(gpio_flash_driver);
-
-MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
-MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
deleted file mode 100644
index 51db24b7f88d..000000000000
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * Interface for NOR flash driver whose high address lines are latched
- *
- * Copyright © 2000 Nicolas Pitre <nico@cam.org>
- * Copyright © 2005-2008 Analog Devices Inc.
- * Copyright © 2008 MontaVista Software, Inc. <source@mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/latch-addr-flash.h>
-#include <linux/slab.h>
-
-#define DRIVER_NAME "latch-addr-flash"
-
-struct latch_addr_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
- struct resource *res;
-
- void (*set_window)(unsigned long offset, void *data);
- void *data;
-
- /* cache; could be found out of res */
- unsigned long win_mask;
-
- spinlock_t lock;
-};
-
-static map_word lf_read(struct map_info *map, unsigned long ofs)
-{
- struct latch_addr_flash_info *info;
- map_word datum;
-
- info = (struct latch_addr_flash_info *)map->map_priv_1;
-
- spin_lock(&info->lock);
-
- info->set_window(ofs, info->data);
- datum = inline_map_read(map, info->win_mask & ofs);
-
- spin_unlock(&info->lock);
-
- return datum;
-}
-
-static void lf_write(struct map_info *map, map_word datum, unsigned long ofs)
-{
- struct latch_addr_flash_info *info;
-
- info = (struct latch_addr_flash_info *)map->map_priv_1;
-
- spin_lock(&info->lock);
-
- info->set_window(ofs, info->data);
- inline_map_write(map, datum, info->win_mask & ofs);
-
- spin_unlock(&info->lock);
-}
-
-static void lf_copy_from(struct map_info *map, void *to,
- unsigned long from, ssize_t len)
-{
- struct latch_addr_flash_info *info =
- (struct latch_addr_flash_info *) map->map_priv_1;
- unsigned n;
-
- while (len > 0) {
- n = info->win_mask + 1 - (from & info->win_mask);
- if (n > len)
- n = len;
-
- spin_lock(&info->lock);
-
- info->set_window(from, info->data);
- memcpy_fromio(to, map->virt + (from & info->win_mask), n);
-
- spin_unlock(&info->lock);
-
- to += n;
- from += n;
- len -= n;
- }
-}
-
-static char *rom_probe_types[] = { "cfi_probe", NULL };
-
-static int latch_addr_flash_remove(struct platform_device *dev)
-{
- struct latch_addr_flash_info *info;
- struct latch_addr_flash_data *latch_addr_data;
-
- info = platform_get_drvdata(dev);
- if (info == NULL)
- return 0;
-
- latch_addr_data = dev_get_platdata(&dev->dev);
-
- if (info->mtd != NULL) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
-
- if (info->map.virt != NULL)
- iounmap(info->map.virt);
-
- if (info->res != NULL)
- release_mem_region(info->res->start, resource_size(info->res));
-
- kfree(info);
-
- if (latch_addr_data->done)
- latch_addr_data->done(latch_addr_data->data);
-
- return 0;
-}
-
-static int latch_addr_flash_probe(struct platform_device *dev)
-{
- struct latch_addr_flash_data *latch_addr_data;
- struct latch_addr_flash_info *info;
- resource_size_t win_base = dev->resource->start;
- resource_size_t win_size = resource_size(dev->resource);
- char **probe_type;
- int chipsel;
- int err;
-
- latch_addr_data = dev_get_platdata(&dev->dev);
- if (latch_addr_data == NULL)
- return -ENODEV;
-
- pr_notice("latch-addr platform flash device: %#llx byte "
- "window at %#.8llx\n",
- (unsigned long long)win_size, (unsigned long long)win_base);
-
- chipsel = dev->id;
-
- if (latch_addr_data->init) {
- err = latch_addr_data->init(latch_addr_data->data, chipsel);
- if (err != 0)
- return err;
- }
-
- info = kzalloc(sizeof(struct latch_addr_flash_info), GFP_KERNEL);
- if (info == NULL) {
- err = -ENOMEM;
- goto done;
- }
-
- platform_set_drvdata(dev, info);
-
- info->res = request_mem_region(win_base, win_size, DRIVER_NAME);
- if (info->res == NULL) {
- dev_err(&dev->dev, "Could not reserve memory region\n");
- err = -EBUSY;
- goto free_info;
- }
-
- info->map.name = DRIVER_NAME;
- info->map.size = latch_addr_data->size;
- info->map.bankwidth = latch_addr_data->width;
-
- info->map.phys = NO_XIP;
- info->map.virt = ioremap(win_base, win_size);
- if (!info->map.virt) {
- err = -ENOMEM;
- goto free_res;
- }
-
- info->map.map_priv_1 = (unsigned long)info;
-
- info->map.read = lf_read;
- info->map.copy_from = lf_copy_from;
- info->map.write = lf_write;
- info->set_window = latch_addr_data->set_window;
- info->data = latch_addr_data->data;
- info->win_mask = win_size - 1;
-
- spin_lock_init(&info->lock);
-
- for (probe_type = rom_probe_types; !info->mtd && *probe_type;
- probe_type++)
- info->mtd = do_map_probe(*probe_type, &info->map);
-
- if (info->mtd == NULL) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENODEV;
- goto iounmap;
- }
- info->mtd->dev.parent = &dev->dev;
-
- mtd_device_register(info->mtd, latch_addr_data->parts,
- latch_addr_data->nr_parts);
- return 0;
-
-iounmap:
- iounmap(info->map.virt);
-free_res:
- release_mem_region(info->res->start, resource_size(info->res));
-free_info:
- kfree(info);
-done:
- if (latch_addr_data->done)
- latch_addr_data->done(latch_addr_data->data);
- return err;
-}
-
-static struct platform_driver latch_addr_flash_driver = {
- .probe = latch_addr_flash_probe,
- .remove = latch_addr_flash_remove,
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-module_platform_driver(latch_addr_flash_driver);
-
-MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
-MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
- "address lines being set board specifically");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
new file mode 100644
index 000000000000..d9a3e4bebe5d
--- /dev/null
+++ b/drivers/mtd/maps/physmap-core.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Normal mappings of chips in physical memory
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * 031022 - [jsun] add run-time configure and partition setup
+ *
+ * Device tree support:
+ * Copyright (C) 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright (C) 2007 David Gibson, IBM Corporation.
+ *
+ * GPIO address extension:
+ * Handle the case where a flash device is mostly addressed using physical
+ * line and supplemented by GPIOs. This way you can hook up say a 8MiB flash
+ * to a 2MiB memory range and use the GPIOs to select a particular range.
+ *
+ * Copyright © 2000 Nicolas Pitre <nico@cam.org>
+ * Copyright © 2005-2009 Analog Devices Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/concat.h>
+#include <linux/mtd/cfi_endian.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+
+#include "physmap-gemini.h"
+#include "physmap-versatile.h"
+
+struct physmap_flash_info {
+ unsigned int nmaps;
+ struct mtd_info **mtds;
+ struct mtd_info *cmtd;
+ struct map_info *maps;
+ spinlock_t vpp_lock;
+ int vpp_refcnt;
+ const char *probe_type;
+ const char * const *part_types;
+ unsigned int nparts;
+ const struct mtd_partition *parts;
+ struct gpio_descs *gpios;
+ unsigned int gpio_values;
+ unsigned int win_order;
+};
+
+static int physmap_flash_remove(struct platform_device *dev)
+{
+ struct physmap_flash_info *info;
+ struct physmap_flash_data *physmap_data;
+ int i, err;
+
+ info = platform_get_drvdata(dev);
+ if (!info)
+ return 0;
+
+ if (info->cmtd) {
+ err = mtd_device_unregister(info->cmtd);
+ if (err)
+ return err;
+
+ if (info->cmtd != info->mtds[0])
+ mtd_concat_destroy(info->cmtd);
+ }
+
+ for (i = 0; i < info->nmaps; i++) {
+ if (info->mtds[i])
+ map_destroy(info->mtds[i]);
+ }
+
+ physmap_data = dev_get_platdata(&dev->dev);
+ if (physmap_data && physmap_data->exit)
+ physmap_data->exit(dev);
+
+ return 0;
+}
+
+static void physmap_set_vpp(struct map_info *map, int state)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_data *physmap_data;
+ struct physmap_flash_info *info;
+ unsigned long flags;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ physmap_data = dev_get_platdata(&pdev->dev);
+
+ if (!physmap_data->set_vpp)
+ return;
+
+ info = platform_get_drvdata(pdev);
+
+ spin_lock_irqsave(&info->vpp_lock, flags);
+ if (state) {
+ if (++info->vpp_refcnt == 1) /* first nested 'on' */
+ physmap_data->set_vpp(pdev, 1);
+ } else {
+ if (--info->vpp_refcnt == 0) /* last nested 'off' */
+ physmap_data->set_vpp(pdev, 0);
+ }
+ spin_unlock_irqrestore(&info->vpp_lock, flags);
+}
+
+#if IS_ENABLED(CONFIG_MTD_PHYSMAP_GPIO_ADDR)
+static void physmap_set_addr_gpios(struct physmap_flash_info *info,
+ unsigned long ofs)
+{
+ unsigned int i;
+
+ ofs >>= info->win_order;
+ if (info->gpio_values == ofs)
+ return;
+
+ for (i = 0; i < info->gpios->ndescs; i++) {
+ if ((BIT(i) & ofs) == (BIT(i) & info->gpio_values))
+ continue;
+
+ gpiod_set_value(info->gpios->desc[i], !!(BIT(i) & ofs));
+ }
+}
+
+#define win_mask(order) (BIT(order) - 1)
+
+static map_word physmap_addr_gpios_read(struct map_info *map,
+ unsigned long ofs)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+ map_word mw;
+ u16 word;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+ physmap_set_addr_gpios(info, ofs);
+
+ word = readw(map->virt + (ofs & win_mask(info->win_order)));
+ mw.x[0] = word;
+ return mw;
+}
+
+static void physmap_addr_gpios_copy_from(struct map_info *map, void *buf,
+ unsigned long ofs, ssize_t len)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+
+ while (len) {
+ unsigned int winofs = ofs & win_mask(info->win_order);
+ unsigned int chunklen = min_t(unsigned int, len,
+ BIT(info->win_order) - winofs);
+
+ physmap_set_addr_gpios(info, ofs);
+ memcpy_fromio(buf, map->virt + winofs, chunklen);
+ len -= chunklen;
+ buf += chunklen;
+ ofs += chunklen;
+ }
+}
+
+static void physmap_addr_gpios_write(struct map_info *map, map_word mw,
+ unsigned long ofs)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+ u16 word;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+ physmap_set_addr_gpios(info, ofs);
+
+ word = mw.x[0];
+ writew(word, map->virt + (ofs & win_mask(info->win_order)));
+}
+
+static void physmap_addr_gpios_copy_to(struct map_info *map, unsigned long ofs,
+ const void *buf, ssize_t len)
+{
+ struct platform_device *pdev;
+ struct physmap_flash_info *info;
+
+ pdev = (struct platform_device *)map->map_priv_1;
+ info = platform_get_drvdata(pdev);
+
+ while (len) {
+ unsigned int winofs = ofs & win_mask(info->win_order);
+ unsigned int chunklen = min_t(unsigned int, len,
+ BIT(info->win_order) - winofs);
+
+ physmap_set_addr_gpios(info, ofs);
+ memcpy_toio(map->virt + winofs, buf, chunklen);
+ len -= chunklen;
+ buf += chunklen;
+ ofs += chunklen;
+ }
+}
+
+static int physmap_addr_gpios_map_init(struct map_info *map)
+{
+ map->phys = NO_XIP;
+ map->read = physmap_addr_gpios_read;
+ map->copy_from = physmap_addr_gpios_copy_from;
+ map->write = physmap_addr_gpios_write;
+ map->copy_to = physmap_addr_gpios_copy_to;
+
+ return 0;
+}
+#else
+static int physmap_addr_gpios_map_init(struct map_info *map)
+{
+ return -ENOTSUPP;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_MTD_PHYSMAP_OF)
+static const struct of_device_id of_flash_match[] = {
+ {
+ .compatible = "cfi-flash",
+ .data = "cfi_probe",
+ },
+ {
+ /*
+ * FIXME: JEDEC chips can't be safely and reliably
+ * probed, although the mtd code gets it right in
+ * practice most of the time. We should use the
+ * vendor and device ids specified by the binding to
+ * bypass the heuristic probe code, but the mtd layer
+ * provides, at present, no interface for doing so
+ * :(.
+ */
+ .compatible = "jedec-flash",
+ .data = "jedec_probe",
+ },
+ {
+ .compatible = "mtd-ram",
+ .data = "map_ram",
+ },
+ {
+ .compatible = "mtd-rom",
+ .data = "map_rom",
+ },
+ {
+ .type = "rom",
+ .compatible = "direct-mapped"
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, of_flash_match);
+
+static const char * const of_default_part_probes[] = {
+ "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL
+};
+
+static const char * const *of_get_part_probes(struct platform_device *dev)
+{
+ struct device_node *dp = dev->dev.of_node;
+ const char **res;
+ int count;
+
+ count = of_property_count_strings(dp, "linux,part-probe");
+ if (count < 0)
+ return of_default_part_probes;
+
+ res = devm_kcalloc(&dev->dev, count + 1, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return NULL;
+
+ count = of_property_read_string_array(dp, "linux,part-probe", res,
+ count);
+ if (count < 0)
+ return NULL;
+
+ return res;
+}
+
+static const char *of_select_probe_type(struct platform_device *dev)
+{
+ struct device_node *dp = dev->dev.of_node;
+ const struct of_device_id *match;
+ const char *probe_type;
+
+ match = of_match_device(of_flash_match, &dev->dev);
+ probe_type = match->data;
+ if (probe_type)
+ return probe_type;
+
+ dev_warn(&dev->dev,
+ "Device tree uses obsolete \"direct-mapped\" flash binding\n");
+
+ of_property_read_string(dp, "probe-type", &probe_type);
+ if (!probe_type)
+ return NULL;
+
+ if (!strcmp(probe_type, "CFI")) {
+ probe_type = "cfi_probe";
+ } else if (!strcmp(probe_type, "JEDEC")) {
+ probe_type = "jedec_probe";
+ } else if (!strcmp(probe_type, "ROM")) {
+ probe_type = "map_rom";
+ } else {
+ dev_warn(&dev->dev,
+ "obsolete_probe: don't know probe type '%s', mapping as rom\n",
+ probe_type);
+ probe_type = "map_rom";
+ }
+
+ return probe_type;
+}
+
+static int physmap_flash_of_init(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ struct device_node *dp = dev->dev.of_node;
+ const char *mtd_name = NULL;
+ int err, swap = 0;
+ bool map_indirect;
+ unsigned int i;
+ u32 bankwidth;
+
+ if (!dp)
+ return -EINVAL;
+
+ info->probe_type = of_select_probe_type(dev);
+
+ info->part_types = of_get_part_probes(dev);
+ if (!info->part_types)
+ return -ENOMEM;
+
+ of_property_read_string(dp, "linux,mtd-name", &mtd_name);
+
+ map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
+
+ err = of_property_read_u32(dp, "bank-width", &bankwidth);
+ if (err) {
+ dev_err(&dev->dev, "Can't get bank width from device tree\n");
+ return err;
+ }
+
+ if (of_property_read_bool(dp, "big-endian"))
+ swap = CFI_BIG_ENDIAN;
+ else if (of_property_read_bool(dp, "little-endian"))
+ swap = CFI_LITTLE_ENDIAN;
+
+ for (i = 0; i < info->nmaps; i++) {
+ info->maps[i].name = mtd_name;
+ info->maps[i].swap = swap;
+ info->maps[i].bankwidth = bankwidth;
+ info->maps[i].device_node = dp;
+
+ err = of_flash_probe_gemini(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
+ err = of_flash_probe_versatile(dev, dp, &info->maps[i]);
+ if (err)
+ return err;
+
+ /*
+ * On some platforms (e.g. MPC5200) a direct 1:1 mapping
+ * may cause problems with JFFS2 usage, as the local bus (LPB)
+ * doesn't support unaligned accesses as implemented in the
+ * JFFS2 code via memcpy(). By setting NO_XIP, the
+ * flash will not be exposed directly to the MTD users
+ * (e.g. JFFS2) any more.
+ */
+ if (map_indirect)
+ info->maps[i].phys = NO_XIP;
+ }
+
+ return 0;
+}
+#else /* IS_ENABLED(CONFIG_MTD_PHYSMAP_OF) */
+#define of_flash_match NULL
+
+static int physmap_flash_of_init(struct platform_device *dev)
+{
+ return -ENOTSUPP;
+}
+#endif /* IS_ENABLED(CONFIG_MTD_PHYSMAP_OF) */
+
+static const char * const rom_probe_types[] = {
+ "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom",
+};
+
+static const char * const part_probe_types[] = {
+ "cmdlinepart", "RedBoot", "afs", NULL
+};
+
+static int physmap_flash_pdata_init(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ struct physmap_flash_data *physmap_data;
+ unsigned int i;
+ int err;
+
+ physmap_data = dev_get_platdata(&dev->dev);
+ if (!physmap_data)
+ return -EINVAL;
+
+ info->probe_type = physmap_data->probe_type;
+ info->part_types = physmap_data->part_probe_types ? : part_probe_types;
+ info->parts = physmap_data->parts;
+ info->nparts = physmap_data->nr_parts;
+
+ if (physmap_data->init) {
+ err = physmap_data->init(dev);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < info->nmaps; i++) {
+ info->maps[i].bankwidth = physmap_data->width;
+ info->maps[i].pfow_base = physmap_data->pfow_base;
+ info->maps[i].set_vpp = physmap_set_vpp;
+ }
+
+ return 0;
+}
+
+static int physmap_flash_probe(struct platform_device *dev)
+{
+ struct physmap_flash_info *info;
+ int err = 0;
+ int i;
+
+ if (!dev->dev.of_node && !dev_get_platdata(&dev->dev))
+ return -EINVAL;
+
+ info = devm_kzalloc(&dev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ while (platform_get_resource(dev, IORESOURCE_MEM, info->nmaps))
+ info->nmaps++;
+
+ if (!info->nmaps)
+ return -ENODEV;
+
+ info->maps = devm_kzalloc(&dev->dev,
+ sizeof(*info->maps) * info->nmaps,
+ GFP_KERNEL);
+ if (!info->maps)
+ return -ENOMEM;
+
+ info->mtds = devm_kzalloc(&dev->dev,
+ sizeof(*info->mtds) * info->nmaps,
+ GFP_KERNEL);
+ if (!info->mtds)
+ return -ENOMEM;
+
+ platform_set_drvdata(dev, info);
+
+ info->gpios = devm_gpiod_get_array_optional(&dev->dev, "addr",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(info->gpios))
+ return PTR_ERR(info->gpios);
+
+ if (info->gpios && info->nmaps > 1) {
+ dev_err(&dev->dev, "addr-gpios only supported for nmaps == 1\n");
+ return -EINVAL;
+ }
+
+ if (dev->dev.of_node)
+ err = physmap_flash_of_init(dev);
+ else
+ err = physmap_flash_pdata_init(dev);
+
+ if (err)
+ return err;
+
+ for (i = 0; i < info->nmaps; i++) {
+ struct resource *res;
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, i);
+ info->maps[i].virt = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(info->maps[i].virt)) {
+ err = PTR_ERR(info->maps[i].virt);
+ goto err_out;
+ }
+
+ dev_notice(&dev->dev, "physmap platform flash device: %pR\n",
+ res);
+
+ info->maps[i].name = dev_name(&dev->dev);
+
+ if (!info->maps[i].phys)
+ info->maps[i].phys = res->start;
+
+ info->win_order = get_bitmask_order(resource_size(res)) - 1;
+ info->maps[i].size = BIT(info->win_order +
+ (info->gpios ?
+ info->gpios->ndescs : 0));
+
+ info->maps[i].map_priv_1 = (unsigned long)dev;
+
+ if (info->gpios) {
+ err = physmap_addr_gpios_map_init(&info->maps[i]);
+ if (err)
+ goto err_out;
+ }
+
+#ifdef CONFIG_MTD_COMPLEX_MAPPINGS
+ /*
+ * Only use the simple_map implementation if map hooks are not
+ * implemented. Since map->read() is mandatory checking for its
+ * presence is enough.
+ */
+ if (!info->maps[i].read)
+ simple_map_init(&info->maps[i]);
+#else
+ simple_map_init(&info->maps[i]);
+#endif
+
+ if (info->probe_type) {
+ info->mtds[i] = do_map_probe(info->probe_type,
+ &info->maps[i]);
+ } else {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(rom_probe_types); j++) {
+ info->mtds[i] = do_map_probe(rom_probe_types[j],
+ &info->maps[i]);
+ if (info->mtds[i])
+ break;
+ }
+ }
+
+ if (!info->mtds[i]) {
+ dev_err(&dev->dev, "map_probe failed\n");
+ err = -ENXIO;
+ goto err_out;
+ }
+ info->mtds[i]->dev.parent = &dev->dev;
+ }
+
+ if (info->nmaps == 1) {
+ info->cmtd = info->mtds[0];
+ } else {
+ /*
+ * We detected multiple devices. Concatenate them together.
+ */
+ info->cmtd = mtd_concat_create(info->mtds, info->nmaps,
+ dev_name(&dev->dev));
+ if (!info->cmtd)
+ err = -ENXIO;
+ }
+ if (err)
+ goto err_out;
+
+ spin_lock_init(&info->vpp_lock);
+
+ mtd_set_of_node(info->cmtd, dev->dev.of_node);
+ err = mtd_device_parse_register(info->cmtd, info->part_types, NULL,
+ info->parts, info->nparts);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ physmap_flash_remove(dev);
+ return err;
+}
+
+#ifdef CONFIG_PM
+static void physmap_flash_shutdown(struct platform_device *dev)
+{
+ struct physmap_flash_info *info = platform_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < info->nmaps && info->mtds[i]; i++)
+ if (mtd_suspend(info->mtds[i]) == 0)
+ mtd_resume(info->mtds[i]);
+}
+#else
+#define physmap_flash_shutdown NULL
+#endif
+
+static struct platform_driver physmap_flash_driver = {
+ .probe = physmap_flash_probe,
+ .remove = physmap_flash_remove,
+ .shutdown = physmap_flash_shutdown,
+ .driver = {
+ .name = "physmap-flash",
+ .of_match_table = of_flash_match,
+ },
+};
+
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+static struct physmap_flash_data physmap_flash_data = {
+ .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
+};
+
+static struct resource physmap_flash_resource = {
+ .start = CONFIG_MTD_PHYSMAP_START,
+ .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device physmap_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = {
+ .platform_data = &physmap_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &physmap_flash_resource,
+};
+#endif
+
+static int __init physmap_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&physmap_flash_driver);
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+ if (err == 0) {
+ err = platform_device_register(&physmap_flash);
+ if (err)
+ platform_driver_unregister(&physmap_flash_driver);
+ }
+#endif
+
+ return err;
+}
+
+static void __exit physmap_exit(void)
+{
+#ifdef CONFIG_MTD_PHYSMAP_COMPAT
+ platform_device_unregister(&physmap_flash);
+#endif
+ platform_driver_unregister(&physmap_flash_driver);
+}
+
+module_init(physmap_init);
+module_exit(physmap_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
+MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
+MODULE_DESCRIPTION("Generic configurable MTD map driver");
+
+/* legacy platform drivers can't hotplug or coldplg */
+#ifndef CONFIG_MTD_PHYSMAP_COMPAT
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:physmap-flash");
+#endif
diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap-gemini.c
index 9df62ca721d5..60775b208fc9 100644
--- a/drivers/mtd/maps/physmap_of_gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -10,10 +10,12 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/mtd/map.h>
+#include <linux/mtd/xip.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
-#include "physmap_of_gemini.h"
+#include <linux/pinctrl/consumer.h>
+#include "physmap-gemini.h"
/*
* The Flash-relevant parts of the global status register
@@ -44,6 +46,82 @@
#define FLASH_PARALLEL_HIGH_PIN_CNT (1 << 20) /* else low pin cnt */
+static const struct of_device_id syscon_match[] = {
+ { .compatible = "cortina,gemini-syscon" },
+ { },
+};
+
+struct gemini_flash {
+ struct device *dev;
+ struct pinctrl *p;
+ struct pinctrl_state *enabled_state;
+ struct pinctrl_state *disabled_state;
+};
+
+/* Static local state */
+static struct gemini_flash *gf;
+
+static void gemini_flash_enable_pins(void)
+{
+ int ret;
+
+ if (IS_ERR(gf->enabled_state))
+ return;
+ ret = pinctrl_select_state(gf->p, gf->enabled_state);
+ if (ret)
+ dev_err(gf->dev, "failed to enable pins\n");
+}
+
+static void gemini_flash_disable_pins(void)
+{
+ int ret;
+
+ if (IS_ERR(gf->disabled_state))
+ return;
+ ret = pinctrl_select_state(gf->p, gf->disabled_state);
+ if (ret)
+ dev_err(gf->dev, "failed to disable pins\n");
+}
+
+static map_word __xipram gemini_flash_map_read(struct map_info *map,
+ unsigned long ofs)
+{
+ map_word __xipram ret;
+
+ gemini_flash_enable_pins();
+ ret = inline_map_read(map, ofs);
+ gemini_flash_disable_pins();
+
+ return ret;
+}
+
+static void __xipram gemini_flash_map_write(struct map_info *map,
+ const map_word datum,
+ unsigned long ofs)
+{
+ gemini_flash_enable_pins();
+ inline_map_write(map, datum, ofs);
+ gemini_flash_disable_pins();
+}
+
+static void __xipram gemini_flash_map_copy_from(struct map_info *map,
+ void *to, unsigned long from,
+ ssize_t len)
+{
+ gemini_flash_enable_pins();
+ inline_map_copy_from(map, to, from, len);
+ gemini_flash_disable_pins();
+}
+
+static void __xipram gemini_flash_map_copy_to(struct map_info *map,
+ unsigned long to,
+ const void *from, ssize_t len)
+{
+ gemini_flash_enable_pins();
+ inline_map_copy_to(map, to, from, len);
+ gemini_flash_disable_pins();
+}
+
int of_flash_probe_gemini(struct platform_device *pdev,
struct device_node *np,
struct map_info *map)
@@ -57,6 +135,11 @@ int of_flash_probe_gemini(struct platform_device *pdev,
if (!of_device_is_compatible(np, "cortina,gemini-flash"))
return 0;
+ gf = devm_kzalloc(dev, sizeof(*gf), GFP_KERNEL);
+ if (!gf)
+ return -ENOMEM;
+ gf->dev = dev;
+
rmap = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(rmap)) {
dev_err(dev, "no syscon\n");
@@ -91,7 +174,32 @@ int of_flash_probe_gemini(struct platform_device *pdev,
map->bankwidth * 8);
}
- dev_info(&pdev->dev, "initialized Gemini-specific physmap control\n");
+ gf->p = devm_pinctrl_get(dev);
+ if (IS_ERR(gf->p)) {
+ dev_err(dev, "no pinctrl handle\n");
+ ret = PTR_ERR(gf->p);
+ return ret;
+ }
+
+ gf->enabled_state = pinctrl_lookup_state(gf->p, "enabled");
+ if (IS_ERR(gf->enabled_state))
+ dev_err(dev, "no enabled pin control state\n");
+
+ gf->disabled_state = pinctrl_lookup_state(gf->p, "disabled");
+ if (IS_ERR(gf->enabled_state)) {
+ dev_err(dev, "no disabled pin control state\n");
+ } else {
+ ret = pinctrl_select_state(gf->p, gf->disabled_state);
+ if (ret)
+ dev_err(gf->dev, "failed to disable pins\n");
+ }
+
+ map->read = gemini_flash_map_read;
+ map->write = gemini_flash_map_write;
+ map->copy_from = gemini_flash_map_copy_from;
+ map->copy_to = gemini_flash_map_copy_to;
+
+ dev_info(dev, "initialized Gemini-specific physmap control\n");
return 0;
}
diff --git a/drivers/mtd/maps/physmap_of_gemini.h b/drivers/mtd/maps/physmap-gemini.h
index 60e13a689d6a..72bd04ce3fdb 100644
--- a/drivers/mtd/maps/physmap_of_gemini.h
+++ b/drivers/mtd/maps/physmap-gemini.h
@@ -2,7 +2,7 @@
#include <linux/of.h>
#include <linux/mtd/map.h>
-#ifdef CONFIG_MTD_PHYSMAP_OF_GEMINI
+#ifdef CONFIG_MTD_PHYSMAP_GEMINI
int of_flash_probe_gemini(struct platform_device *pdev,
struct device_node *np,
struct map_info *map);
diff --git a/drivers/mtd/maps/physmap_of_versatile.c b/drivers/mtd/maps/physmap-versatile.c
index 03f2b6e7bc7e..0179d710bb3f 100644
--- a/drivers/mtd/maps/physmap_of_versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -28,7 +28,7 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
-#include "physmap_of_versatile.h"
+#include "physmap-versatile.h"
static struct regmap *syscon_regmap;
diff --git a/drivers/mtd/maps/physmap_of_versatile.h b/drivers/mtd/maps/physmap-versatile.h
index 0302502c9462..9cf39d031f5a 100644
--- a/drivers/mtd/maps/physmap_of_versatile.h
+++ b/drivers/mtd/maps/physmap-versatile.h
@@ -2,7 +2,7 @@
#include <linux/of.h>
#include <linux/mtd/map.h>
-#ifdef CONFIG_MTD_PHYSMAP_OF_VERSATILE
+#ifdef CONFIG_MTD_PHYSMAP_VERSATILE
int of_flash_probe_versatile(struct platform_device *pdev,
struct device_node *np,
struct map_info *map);
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
deleted file mode 100644
index cc2adbbcd60f..000000000000
--- a/drivers/mtd/maps/physmap.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Normal mappings of chips in physical memory
- *
- * Copyright (C) 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- *
- * 031022 - [jsun] add run-time configure and partition setup
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/physmap.h>
-#include <linux/mtd/concat.h>
-#include <linux/io.h>
-
-#define MAX_RESOURCES 4
-
-struct physmap_flash_info {
- struct mtd_info *mtd[MAX_RESOURCES];
- struct mtd_info *cmtd;
- struct map_info map[MAX_RESOURCES];
- spinlock_t vpp_lock;
- int vpp_refcnt;
-};
-
-static int physmap_flash_remove(struct platform_device *dev)
-{
- struct physmap_flash_info *info;
- struct physmap_flash_data *physmap_data;
- int i;
-
- info = platform_get_drvdata(dev);
- if (info == NULL)
- return 0;
-
- physmap_data = dev_get_platdata(&dev->dev);
-
- if (info->cmtd) {
- mtd_device_unregister(info->cmtd);
- if (info->cmtd != info->mtd[0])
- mtd_concat_destroy(info->cmtd);
- }
-
- for (i = 0; i < MAX_RESOURCES; i++) {
- if (info->mtd[i] != NULL)
- map_destroy(info->mtd[i]);
- }
-
- if (physmap_data->exit)
- physmap_data->exit(dev);
-
- return 0;
-}
-
-static void physmap_set_vpp(struct map_info *map, int state)
-{
- struct platform_device *pdev;
- struct physmap_flash_data *physmap_data;
- struct physmap_flash_info *info;
- unsigned long flags;
-
- pdev = (struct platform_device *)map->map_priv_1;
- physmap_data = dev_get_platdata(&pdev->dev);
-
- if (!physmap_data->set_vpp)
- return;
-
- info = platform_get_drvdata(pdev);
-
- spin_lock_irqsave(&info->vpp_lock, flags);
- if (state) {
- if (++info->vpp_refcnt == 1) /* first nested 'on' */
- physmap_data->set_vpp(pdev, 1);
- } else {
- if (--info->vpp_refcnt == 0) /* last nested 'off' */
- physmap_data->set_vpp(pdev, 0);
- }
- spin_unlock_irqrestore(&info->vpp_lock, flags);
-}
-
-static const char * const rom_probe_types[] = {
- "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL };
-
-static const char * const part_probe_types[] = {
- "cmdlinepart", "RedBoot", "afs", NULL };
-
-static int physmap_flash_probe(struct platform_device *dev)
-{
- struct physmap_flash_data *physmap_data;
- struct physmap_flash_info *info;
- const char * const *probe_type;
- const char * const *part_types;
- int err = 0;
- int i;
- int devices_found = 0;
-
- physmap_data = dev_get_platdata(&dev->dev);
- if (physmap_data == NULL)
- return -ENODEV;
-
- info = devm_kzalloc(&dev->dev, sizeof(struct physmap_flash_info),
- GFP_KERNEL);
- if (info == NULL) {
- err = -ENOMEM;
- goto err_out;
- }
-
- if (physmap_data->init) {
- err = physmap_data->init(dev);
- if (err)
- goto err_out;
- }
-
- platform_set_drvdata(dev, info);
-
- for (i = 0; i < dev->num_resources; i++) {
- printk(KERN_NOTICE "physmap platform flash device: %.8llx at %.8llx\n",
- (unsigned long long)resource_size(&dev->resource[i]),
- (unsigned long long)dev->resource[i].start);
-
- if (!devm_request_mem_region(&dev->dev,
- dev->resource[i].start,
- resource_size(&dev->resource[i]),
- dev_name(&dev->dev))) {
- dev_err(&dev->dev, "Could not reserve memory region\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- info->map[i].name = dev_name(&dev->dev);
- info->map[i].phys = dev->resource[i].start;
- info->map[i].size = resource_size(&dev->resource[i]);
- info->map[i].bankwidth = physmap_data->width;
- info->map[i].set_vpp = physmap_set_vpp;
- info->map[i].pfow_base = physmap_data->pfow_base;
- info->map[i].map_priv_1 = (unsigned long)dev;
-
- info->map[i].virt = devm_ioremap(&dev->dev, info->map[i].phys,
- info->map[i].size);
- if (info->map[i].virt == NULL) {
- dev_err(&dev->dev, "Failed to ioremap flash region\n");
- err = -EIO;
- goto err_out;
- }
-
- simple_map_init(&info->map[i]);
-
- probe_type = rom_probe_types;
- if (physmap_data->probe_type == NULL) {
- for (; info->mtd[i] == NULL && *probe_type != NULL; probe_type++)
- info->mtd[i] = do_map_probe(*probe_type, &info->map[i]);
- } else
- info->mtd[i] = do_map_probe(physmap_data->probe_type, &info->map[i]);
-
- if (info->mtd[i] == NULL) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto err_out;
- } else {
- devices_found++;
- }
- info->mtd[i]->dev.parent = &dev->dev;
- }
-
- if (devices_found == 1) {
- info->cmtd = info->mtd[0];
- } else if (devices_found > 1) {
- /*
- * We detected multiple devices. Concatenate them together.
- */
- info->cmtd = mtd_concat_create(info->mtd, devices_found, dev_name(&dev->dev));
- if (info->cmtd == NULL)
- err = -ENXIO;
- }
- if (err)
- goto err_out;
-
- spin_lock_init(&info->vpp_lock);
-
- part_types = physmap_data->part_probe_types ? : part_probe_types;
-
- mtd_device_parse_register(info->cmtd, part_types, NULL,
- physmap_data->parts, physmap_data->nr_parts);
- return 0;
-
-err_out:
- physmap_flash_remove(dev);
- return err;
-}
-
-#ifdef CONFIG_PM
-static void physmap_flash_shutdown(struct platform_device *dev)
-{
- struct physmap_flash_info *info = platform_get_drvdata(dev);
- int i;
-
- for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (mtd_suspend(info->mtd[i]) == 0)
- mtd_resume(info->mtd[i]);
-}
-#else
-#define physmap_flash_shutdown NULL
-#endif
-
-static struct platform_driver physmap_flash_driver = {
- .probe = physmap_flash_probe,
- .remove = physmap_flash_remove,
- .shutdown = physmap_flash_shutdown,
- .driver = {
- .name = "physmap-flash",
- },
-};
-
-
-#ifdef CONFIG_MTD_PHYSMAP_COMPAT
-static struct physmap_flash_data physmap_flash_data = {
- .width = CONFIG_MTD_PHYSMAP_BANKWIDTH,
-};
-
-static struct resource physmap_flash_resource = {
- .start = CONFIG_MTD_PHYSMAP_START,
- .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct platform_device physmap_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &physmap_flash_data,
- },
- .num_resources = 1,
- .resource = &physmap_flash_resource,
-};
-#endif
-
-static int __init physmap_init(void)
-{
- int err;
-
- err = platform_driver_register(&physmap_flash_driver);
-#ifdef CONFIG_MTD_PHYSMAP_COMPAT
- if (err == 0) {
- err = platform_device_register(&physmap_flash);
- if (err)
- platform_driver_unregister(&physmap_flash_driver);
- }
-#endif
-
- return err;
-}
-
-static void __exit physmap_exit(void)
-{
-#ifdef CONFIG_MTD_PHYSMAP_COMPAT
- platform_device_unregister(&physmap_flash);
-#endif
- platform_driver_unregister(&physmap_flash_driver);
-}
-
-module_init(physmap_init);
-module_exit(physmap_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("Generic configurable MTD map driver");
-
-/* legacy platform drivers can't hotplug or coldplg */
-#ifndef CONFIG_MTD_PHYSMAP_COMPAT
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:physmap-flash");
-#endif
diff --git a/drivers/mtd/maps/physmap_of_core.c b/drivers/mtd/maps/physmap_of_core.c
deleted file mode 100644
index ece605d78c21..000000000000
--- a/drivers/mtd/maps/physmap_of_core.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Flash mappings described by the OF (or flattened) device tree
- *
- * Copyright (C) 2006 MontaVista Software Inc.
- * Author: Vitaly Wool <vwool@ru.mvista.com>
- *
- * Revised to handle newer style flash binding by:
- * Copyright (C) 2007 David Gibson, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-#include <linux/mtd/cfi_endian.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/slab.h>
-#include "physmap_of_gemini.h"
-#include "physmap_of_versatile.h"
-
-struct of_flash_list {
- struct mtd_info *mtd;
- struct map_info map;
-};
-
-struct of_flash {
- struct mtd_info *cmtd;
- int list_size; /* number of elements in of_flash_list */
- struct of_flash_list list[0];
-};
-
-static int of_flash_remove(struct platform_device *dev)
-{
- struct of_flash *info;
- int i;
-
- info = dev_get_drvdata(&dev->dev);
- if (!info)
- return 0;
- dev_set_drvdata(&dev->dev, NULL);
-
- if (info->cmtd) {
- mtd_device_unregister(info->cmtd);
- if (info->cmtd != info->list[0].mtd)
- mtd_concat_destroy(info->cmtd);
- }
-
- for (i = 0; i < info->list_size; i++)
- if (info->list[i].mtd)
- map_destroy(info->list[i].mtd);
-
- return 0;
-}
-
-static const char * const rom_probe_types[] = {
- "cfi_probe", "jedec_probe", "map_rom" };
-
-/* Helper function to handle probing of the obsolete "direct-mapped"
- * compatible binding, which has an extra "probe-type" property
- * describing the type of flash probe necessary. */
-static struct mtd_info *obsolete_probe(struct platform_device *dev,
- struct map_info *map)
-{
- struct device_node *dp = dev->dev.of_node;
- const char *of_probe;
- struct mtd_info *mtd;
- int i;
-
- dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" "
- "flash binding\n");
-
- of_probe = of_get_property(dp, "probe-type", NULL);
- if (!of_probe) {
- for (i = 0; i < ARRAY_SIZE(rom_probe_types); i++) {
- mtd = do_map_probe(rom_probe_types[i], map);
- if (mtd)
- return mtd;
- }
- return NULL;
- } else if (strcmp(of_probe, "CFI") == 0) {
- return do_map_probe("cfi_probe", map);
- } else if (strcmp(of_probe, "JEDEC") == 0) {
- return do_map_probe("jedec_probe", map);
- } else {
- if (strcmp(of_probe, "ROM") != 0)
- dev_warn(&dev->dev, "obsolete_probe: don't know probe "
- "type '%s', mapping as rom\n", of_probe);
- return do_map_probe("map_rom", map);
- }
-}
-
-/* When partitions are set we look for a linux,part-probe property which
- specifies the list of partition probers to use. If none is given then the
- default is use. These take precedence over other device tree
- information. */
-static const char * const part_probe_types_def[] = {
- "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL };
-
-static const char * const *of_get_probes(struct device_node *dp)
-{
- const char **res;
- int count;
-
- count = of_property_count_strings(dp, "linux,part-probe");
- if (count < 0)
- return part_probe_types_def;
-
- res = kcalloc(count + 1, sizeof(*res), GFP_KERNEL);
- if (!res)
- return NULL;
-
- count = of_property_read_string_array(dp, "linux,part-probe", res,
- count);
- if (count < 0)
- return NULL;
-
- return res;
-}
-
-static void of_free_probes(const char * const *probes)
-{
- if (probes != part_probe_types_def)
- kfree(probes);
-}
-
-static const struct of_device_id of_flash_match[];
-static int of_flash_probe(struct platform_device *dev)
-{
- const char * const *part_probe_types;
- const struct of_device_id *match;
- struct device_node *dp = dev->dev.of_node;
- struct resource res;
- struct of_flash *info;
- const char *probe_type;
- const __be32 *width;
- int err;
- int i;
- int count;
- const __be32 *p;
- int reg_tuple_size;
- struct mtd_info **mtd_list = NULL;
- resource_size_t res_size;
- bool map_indirect;
- const char *mtd_name = NULL;
-
- match = of_match_device(of_flash_match, &dev->dev);
- if (!match)
- return -EINVAL;
- probe_type = match->data;
-
- reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
-
- of_property_read_string(dp, "linux,mtd-name", &mtd_name);
-
- /*
- * Get number of "reg" tuples. Scan for MTD devices on area's
- * described by each "reg" region. This makes it possible (including
- * the concat support) to support the Intel P30 48F4400 chips which
- * consists internally of 2 non-identical NOR chips on one die.
- */
- p = of_get_property(dp, "reg", &count);
- if (!p || count % reg_tuple_size != 0) {
- dev_err(&dev->dev, "Malformed reg property on %pOF\n",
- dev->dev.of_node);
- err = -EINVAL;
- goto err_flash_remove;
- }
- count /= reg_tuple_size;
-
- map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
-
- err = -ENOMEM;
- info = devm_kzalloc(&dev->dev,
- sizeof(struct of_flash) +
- sizeof(struct of_flash_list) * count, GFP_KERNEL);
- if (!info)
- goto err_flash_remove;
-
- dev_set_drvdata(&dev->dev, info);
-
- mtd_list = kcalloc(count, sizeof(*mtd_list), GFP_KERNEL);
- if (!mtd_list)
- goto err_flash_remove;
-
- for (i = 0; i < count; i++) {
- err = -ENXIO;
- if (of_address_to_resource(dp, i, &res)) {
- /*
- * Continue with next register tuple if this
- * one is not mappable
- */
- continue;
- }
-
- dev_dbg(&dev->dev, "of_flash device: %pR\n", &res);
-
- err = -EBUSY;
- res_size = resource_size(&res);
- info->list[i].map.virt = devm_ioremap_resource(&dev->dev, &res);
- if (IS_ERR(info->list[i].map.virt)) {
- err = PTR_ERR(info->list[i].map.virt);
- goto err_out;
- }
-
- err = -ENXIO;
- width = of_get_property(dp, "bank-width", NULL);
- if (!width) {
- dev_err(&dev->dev, "Can't get bank width from device"
- " tree\n");
- goto err_out;
- }
-
- info->list[i].map.name = mtd_name ?: dev_name(&dev->dev);
- info->list[i].map.phys = res.start;
- info->list[i].map.size = res_size;
- info->list[i].map.bankwidth = be32_to_cpup(width);
- info->list[i].map.device_node = dp;
-
- if (of_property_read_bool(dp, "big-endian"))
- info->list[i].map.swap = CFI_BIG_ENDIAN;
- else if (of_property_read_bool(dp, "little-endian"))
- info->list[i].map.swap = CFI_LITTLE_ENDIAN;
-
- err = of_flash_probe_gemini(dev, dp, &info->list[i].map);
- if (err)
- goto err_out;
- err = of_flash_probe_versatile(dev, dp, &info->list[i].map);
- if (err)
- goto err_out;
-
- simple_map_init(&info->list[i].map);
-
- /*
- * On some platforms (e.g. MPC5200) a direct 1:1 mapping
- * may cause problems with JFFS2 usage, as the local bus (LPB)
- * doesn't support unaligned accesses as implemented in the
- * JFFS2 code via memcpy(). By setting NO_XIP, the
- * flash will not be exposed directly to the MTD users
- * (e.g. JFFS2) any more.
- */
- if (map_indirect)
- info->list[i].map.phys = NO_XIP;
-
- if (probe_type) {
- info->list[i].mtd = do_map_probe(probe_type,
- &info->list[i].map);
- } else {
- info->list[i].mtd = obsolete_probe(dev,
- &info->list[i].map);
- }
-
- /* Fall back to mapping region as ROM */
- if (!info->list[i].mtd) {
- dev_warn(&dev->dev,
- "do_map_probe() failed for type %s\n",
- probe_type);
-
- info->list[i].mtd = do_map_probe("map_rom",
- &info->list[i].map);
- }
- mtd_list[i] = info->list[i].mtd;
-
- err = -ENXIO;
- if (!info->list[i].mtd) {
- dev_err(&dev->dev, "do_map_probe() failed\n");
- goto err_out;
- } else {
- info->list_size++;
- }
- info->list[i].mtd->dev.parent = &dev->dev;
- }
-
- err = 0;
- info->cmtd = NULL;
- if (info->list_size == 1) {
- info->cmtd = info->list[0].mtd;
- } else if (info->list_size > 1) {
- /*
- * We detected multiple devices. Concatenate them together.
- */
- info->cmtd = mtd_concat_create(mtd_list, info->list_size,
- dev_name(&dev->dev));
- }
- if (info->cmtd == NULL)
- err = -ENXIO;
-
- if (err)
- goto err_out;
-
- info->cmtd->dev.parent = &dev->dev;
- mtd_set_of_node(info->cmtd, dp);
- part_probe_types = of_get_probes(dp);
- if (!part_probe_types) {
- err = -ENOMEM;
- goto err_out;
- }
- mtd_device_parse_register(info->cmtd, part_probe_types, NULL,
- NULL, 0);
- of_free_probes(part_probe_types);
-
- kfree(mtd_list);
-
- return 0;
-
-err_out:
- kfree(mtd_list);
-err_flash_remove:
- of_flash_remove(dev);
-
- return err;
-}
-
-static const struct of_device_id of_flash_match[] = {
- {
- .compatible = "cfi-flash",
- .data = (void *)"cfi_probe",
- },
- {
- /* FIXME: JEDEC chips can't be safely and reliably
- * probed, although the mtd code gets it right in
- * practice most of the time. We should use the
- * vendor and device ids specified by the binding to
- * bypass the heuristic probe code, but the mtd layer
- * provides, at present, no interface for doing so
- * :(. */
- .compatible = "jedec-flash",
- .data = (void *)"jedec_probe",
- },
- {
- .compatible = "mtd-ram",
- .data = (void *)"map_ram",
- },
- {
- .compatible = "mtd-rom",
- .data = (void *)"map_rom",
- },
- {
- .type = "rom",
- .compatible = "direct-mapped"
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, of_flash_match);
-
-static struct platform_driver of_flash_driver = {
- .driver = {
- .name = "of-flash",
- .of_match_table = of_flash_match,
- },
- .probe = of_flash_probe,
- .remove = of_flash_remove,
-};
-
-module_platform_driver(of_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
-MODULE_DESCRIPTION("Device tree based MTD map driver");
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index a5b1933c0490..b2d5ed1cbc94 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -56,7 +56,7 @@ struct mtdblk_dev {
*/
static int erase_write (struct mtd_info *mtd, unsigned long pos,
- int len, const char *buf)
+ unsigned int len, const char *buf)
{
struct erase_info erase;
size_t retlen;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 97ac219c082e..b6b93291aba9 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -665,6 +665,8 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd)
} else {
pr_debug("mtd device won't show a device symlink in sysfs\n");
}
+
+ mtd->orig_flags = mtd->flags;
}
/**
@@ -1136,13 +1138,13 @@ static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
return -EINVAL;
if (ops->ooblen) {
- u64 maxooblen;
+ size_t maxooblen;
if (ops->ooboffs >= mtd_oobavail(mtd, ops))
return -EINVAL;
- maxooblen = ((mtd_div_by_ws(mtd->size, mtd) -
- mtd_div_by_ws(offs, mtd)) *
+ maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
+ mtd_div_by_ws(offs, mtd)) *
mtd_oobavail(mtd, ops)) - ops->ooboffs;
if (ops->ooblen > maxooblen)
return -EINVAL;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 99c460facd5e..b6af41b04622 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -61,6 +61,15 @@ static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
return container_of(mtd, struct mtd_part, mtd);
}
+static u64 part_absolute_offset(struct mtd_info *mtd)
+{
+ struct mtd_part *part = mtd_to_part(mtd);
+
+ if (!mtd_is_partition(mtd))
+ return 0;
+
+ return part_absolute_offset(part->parent) + part->offset;
+}
/*
* MTD methods which simply translate the effective address and pass through
@@ -346,7 +355,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* set up the MTD object for this partition */
slave->mtd.type = parent->type;
- slave->mtd.flags = parent->flags & ~part->mask_flags;
+ slave->mtd.flags = parent->orig_flags & ~part->mask_flags;
+ slave->mtd.orig_flags = slave->mtd.flags;
slave->mtd.size = part->size;
slave->mtd.writesize = parent->writesize;
slave->mtd.writebufsize = parent->writebufsize;
@@ -513,7 +523,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
if (!(slave->mtd.flags & MTD_NO_ERASE))
wr_alignment = slave->mtd.erasesize;
- tmp = slave->offset;
+ tmp = part_absolute_offset(parent) + slave->offset;
remainder = do_div(tmp, wr_alignment);
if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
/* Doesn't start on a boundary of major erase size */
@@ -524,7 +534,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
part->name);
}
- tmp = slave->mtd.size;
+ tmp = part_absolute_offset(parent) + slave->mtd.size;
remainder = do_div(tmp, wr_alignment);
if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
slave->mtd.flags &= ~MTD_WRITEABLE;
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index d9dcb2d051b4..d162d1717fad 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -1265,18 +1265,7 @@ static int mtdswap_show(struct seq_file *s, void *data)
return 0;
}
-
-static int mtdswap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mtdswap_show, inode->i_private);
-}
-
-static const struct file_operations mtdswap_fops = {
- .open = mtdswap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mtdswap);
static int mtdswap_add_debugfs(struct mtdswap_dev *d)
{
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
index 56cde38b92c0..044adf913854 100644
--- a/drivers/mtd/nand/bbt.c
+++ b/drivers/mtd/nand/bbt.c
@@ -27,7 +27,8 @@ int nanddev_bbt_init(struct nand_device *nand)
unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
BITS_PER_LONG);
- nand->bbt.cache = kzalloc(nwords, GFP_KERNEL);
+ nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
+ GFP_KERNEL);
if (!nand->bbt.cache)
return -ENOMEM;
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index c7efc31384d5..1a55d3e3d4c5 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -70,7 +70,7 @@ config MTD_NAND_GPIO
config MTD_NAND_AMS_DELTA
tristate "NAND Flash device on Amstrad E3"
- depends on MACH_AMS_DELTA
+ depends on MACH_AMS_DELTA || COMPILE_TEST
default y
help
Support for NAND flash on Amstrad E3 (Delta).
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index 5ba180a291eb..8312182088c1 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
*
@@ -8,10 +9,6 @@
* Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
* Partially stolen from plat_nand.c
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Overview:
* This is a device driver for the NAND flash device found on the
* Amstrad E3 (Delta).
@@ -24,18 +21,14 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-#include <mach/hardware.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
/*
* MTD structure for E3 (Delta)
*/
-
struct ams_delta_nand {
+ struct nand_controller base;
struct nand_chip nand_chip;
struct gpio_desc *gpiod_rdy;
struct gpio_desc *gpiod_nce;
@@ -44,7 +37,7 @@ struct ams_delta_nand {
struct gpio_desc *gpiod_nwe;
struct gpio_desc *gpiod_ale;
struct gpio_desc *gpiod_cle;
- void __iomem *io_base;
+ struct gpio_descs *data_gpiods;
bool data_in;
};
@@ -73,99 +66,154 @@ static const struct mtd_partition partition_info[] = {
.size = 3 * SZ_256K },
};
-static void ams_delta_io_write(struct ams_delta_nand *priv, u_char byte)
+static void ams_delta_write_commit(struct ams_delta_nand *priv)
{
- writew(byte, priv->nand_chip.legacy.IO_ADDR_W);
gpiod_set_value(priv->gpiod_nwe, 0);
ndelay(40);
gpiod_set_value(priv->gpiod_nwe, 1);
}
-static u_char ams_delta_io_read(struct ams_delta_nand *priv)
+static void ams_delta_io_write(struct ams_delta_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+
+ gpiod_set_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
+ ams_delta_write_commit(priv);
+}
+
+static void ams_delta_dir_output(struct ams_delta_nand *priv, u8 byte)
+{
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(byte)) = { byte, };
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_output_raw(data_gpiods->desc[i],
+ test_bit(i, values));
+
+ ams_delta_write_commit(priv);
+
+ priv->data_in = false;
+}
+
+static u8 ams_delta_io_read(struct ams_delta_nand *priv)
{
- u_char res;
+ u8 res;
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ DECLARE_BITMAP(values, BITS_PER_TYPE(res)) = { 0, };
gpiod_set_value(priv->gpiod_nre, 0);
ndelay(40);
- res = readw(priv->nand_chip.legacy.IO_ADDR_R);
+
+ gpiod_get_raw_array_value(data_gpiods->ndescs, data_gpiods->desc,
+ data_gpiods->info, values);
+
gpiod_set_value(priv->gpiod_nre, 1);
+ res = values[0];
return res;
}
-static void ams_delta_dir_input(struct ams_delta_nand *priv, bool in)
+static void ams_delta_dir_input(struct ams_delta_nand *priv)
{
- writew(in ? ~0 : 0, priv->io_base + OMAP_MPUIO_IO_CNTL);
- priv->data_in = in;
+ struct gpio_descs *data_gpiods = priv->data_gpiods;
+ int i;
+
+ for (i = 0; i < data_gpiods->ndescs; i++)
+ gpiod_direction_input(data_gpiods->desc[i]);
+
+ priv->data_in = true;
}
-static void ams_delta_write_buf(struct nand_chip *this, const u_char *buf,
+static void ams_delta_write_buf(struct ams_delta_nand *priv, const u8 *buf,
int len)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
- int i;
+ int i = 0;
- if (priv->data_in)
- ams_delta_dir_input(priv, false);
+ if (len > 0 && priv->data_in)
+ ams_delta_dir_output(priv, buf[i++]);
- for (i = 0; i < len; i++)
- ams_delta_io_write(priv, buf[i]);
+ while (i < len)
+ ams_delta_io_write(priv, buf[i++]);
}
-static void ams_delta_read_buf(struct nand_chip *this, u_char *buf, int len)
+static void ams_delta_read_buf(struct ams_delta_nand *priv, u8 *buf, int len)
{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
int i;
if (!priv->data_in)
- ams_delta_dir_input(priv, true);
+ ams_delta_dir_input(priv);
for (i = 0; i < len; i++)
buf[i] = ams_delta_io_read(priv);
}
-static u_char ams_delta_read_byte(struct nand_chip *this)
+static void ams_delta_ctrl_cs(struct ams_delta_nand *priv, bool assert)
{
- u_char res;
-
- ams_delta_read_buf(this, &res, 1);
-
- return res;
+ gpiod_set_value(priv->gpiod_nce, assert ? 0 : 1);
}
-/*
- * Command control function
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 7
- * NAND_ALE: bit 2 -> bit 6
- */
-static void ams_delta_hwcontrol(struct nand_chip *this, int cmd,
- unsigned int ctrl)
+static int ams_delta_exec_op(struct nand_chip *this,
+ const struct nand_operation *op, bool check_only)
{
struct ams_delta_nand *priv = nand_get_controller_data(this);
-
- if (ctrl & NAND_CTRL_CHANGE) {
- gpiod_set_value(priv->gpiod_nce, !(ctrl & NAND_NCE));
- gpiod_set_value(priv->gpiod_cle, !!(ctrl & NAND_CLE));
- gpiod_set_value(priv->gpiod_ale, !!(ctrl & NAND_ALE));
+ const struct nand_op_instr *instr;
+ int ret = 0;
+
+ if (check_only)
+ return 0;
+
+ ams_delta_ctrl_cs(priv, 1);
+
+ for (instr = op->instrs; instr < op->instrs + op->ninstrs; instr++) {
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ gpiod_set_value(priv->gpiod_cle, 1);
+ ams_delta_write_buf(priv, &instr->ctx.cmd.opcode, 1);
+ gpiod_set_value(priv->gpiod_cle, 0);
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ gpiod_set_value(priv->gpiod_ale, 1);
+ ams_delta_write_buf(priv, instr->ctx.addr.addrs,
+ instr->ctx.addr.naddrs);
+ gpiod_set_value(priv->gpiod_ale, 0);
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ ams_delta_read_buf(priv, instr->ctx.data.buf.in,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_DATA_OUT_INSTR:
+ ams_delta_write_buf(priv, instr->ctx.data.buf.out,
+ instr->ctx.data.len);
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ ret = priv->gpiod_rdy ?
+ nand_gpio_waitrdy(this, priv->gpiod_rdy,
+ instr->ctx.waitrdy.timeout_ms) :
+ nand_soft_waitrdy(this,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+
+ if (ret)
+ break;
}
- if (cmd != NAND_CMD_NONE) {
- u_char byte = cmd;
+ ams_delta_ctrl_cs(priv, 0);
- ams_delta_write_buf(this, &byte, 1);
- }
-}
-
-static int ams_delta_nand_ready(struct nand_chip *this)
-{
- struct ams_delta_nand *priv = nand_get_controller_data(this);
-
- return gpiod_get_value(priv->gpiod_rdy);
+ return ret;
}
+static const struct nand_controller_ops ams_delta_ops = {
+ .exec_op = ams_delta_exec_op,
+};
/*
* Main initialization routine
@@ -175,61 +223,29 @@ static int ams_delta_init(struct platform_device *pdev)
struct ams_delta_nand *priv;
struct nand_chip *this;
struct mtd_info *mtd;
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- void __iomem *io_base;
+ struct gpio_descs *data_gpiods;
int err = 0;
- if (!res)
- return -ENXIO;
-
/* Allocate memory for MTD device structure and private data */
priv = devm_kzalloc(&pdev->dev, sizeof(struct ams_delta_nand),
GFP_KERNEL);
- if (!priv) {
- pr_warn("Unable to allocate E3 NAND MTD device structure.\n");
+ if (!priv)
return -ENOMEM;
- }
+
this = &priv->nand_chip;
mtd = nand_to_mtd(this);
mtd->dev.parent = &pdev->dev;
- /*
- * Don't try to request the memory region from here,
- * it should have been already requested from the
- * gpio-omap driver and requesting it again would fail.
- */
-
- io_base = ioremap(res->start, resource_size(res));
- if (io_base == NULL) {
- dev_err(&pdev->dev, "ioremap failed\n");
- err = -EIO;
- goto out_free;
- }
-
- priv->io_base = io_base;
nand_set_controller_data(this, priv);
- /* Set address of NAND IO lines */
- this->legacy.IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
- this->legacy.IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
- this->legacy.read_byte = ams_delta_read_byte;
- this->legacy.write_buf = ams_delta_write_buf;
- this->legacy.read_buf = ams_delta_read_buf;
- this->legacy.cmd_ctrl = ams_delta_hwcontrol;
-
priv->gpiod_rdy = devm_gpiod_get_optional(&pdev->dev, "rdy", GPIOD_IN);
if (IS_ERR(priv->gpiod_rdy)) {
err = PTR_ERR(priv->gpiod_rdy);
dev_warn(&pdev->dev, "RDY GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
- if (priv->gpiod_rdy)
- this->legacy.dev_ready = ams_delta_nand_ready;
-
- /* 25 us command delay time */
- this->legacy.chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
this->ecc.algo = NAND_ECC_HAMMING;
@@ -240,61 +256,75 @@ static int ams_delta_init(struct platform_device *pdev)
if (IS_ERR(priv->gpiod_nwp)) {
err = PTR_ERR(priv->gpiod_nwp);
dev_err(&pdev->dev, "NWP GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_nce = devm_gpiod_get(&pdev->dev, "nce", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nce)) {
err = PTR_ERR(priv->gpiod_nce);
dev_err(&pdev->dev, "NCE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_nre = devm_gpiod_get(&pdev->dev, "nre", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nre)) {
err = PTR_ERR(priv->gpiod_nre);
dev_err(&pdev->dev, "NRE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_nwe = devm_gpiod_get(&pdev->dev, "nwe", GPIOD_OUT_HIGH);
if (IS_ERR(priv->gpiod_nwe)) {
err = PTR_ERR(priv->gpiod_nwe);
dev_err(&pdev->dev, "NWE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_ale = devm_gpiod_get(&pdev->dev, "ale", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_ale)) {
err = PTR_ERR(priv->gpiod_ale);
dev_err(&pdev->dev, "ALE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
priv->gpiod_cle = devm_gpiod_get(&pdev->dev, "cle", GPIOD_OUT_LOW);
if (IS_ERR(priv->gpiod_cle)) {
err = PTR_ERR(priv->gpiod_cle);
dev_err(&pdev->dev, "CLE GPIO request failed (%d)\n", err);
- goto out_mtd;
+ return err;
}
- /* Initialize data port direction to a known state */
- ams_delta_dir_input(priv, true);
+ /* Request array of data pins, initialize them as input */
+ data_gpiods = devm_gpiod_get_array(&pdev->dev, "data", GPIOD_IN);
+ if (IS_ERR(data_gpiods)) {
+ err = PTR_ERR(data_gpiods);
+ dev_err(&pdev->dev, "data GPIO request failed: %d\n", err);
+ return err;
+ }
+ priv->data_gpiods = data_gpiods;
+ priv->data_in = true;
+
+ /* Initialize the NAND controller object embedded in ams_delta_nand. */
+ priv->base.ops = &ams_delta_ops;
+ nand_controller_init(&priv->base);
+ this->controller = &priv->base;
/* Scan to find existence of the device */
err = nand_scan(this, 1);
if (err)
- goto out_mtd;
+ return err;
/* Register the partitions */
- mtd_device_register(mtd, partition_info, ARRAY_SIZE(partition_info));
+ err = mtd_device_register(mtd, partition_info,
+ ARRAY_SIZE(partition_info));
+ if (err)
+ goto err_nand_cleanup;
- goto out;
+ return 0;
+
+err_nand_cleanup:
+ nand_cleanup(this);
- out_mtd:
- iounmap(io_base);
-out_free:
- out:
return err;
}
@@ -305,13 +335,10 @@ static int ams_delta_cleanup(struct platform_device *pdev)
{
struct ams_delta_nand *priv = platform_get_drvdata(pdev);
struct mtd_info *mtd = nand_to_mtd(&priv->nand_chip);
- void __iomem *io_base = priv->io_base;
- /* Release resources, unregister device */
+ /* Unregister device */
nand_release(mtd_to_nand(mtd));
- iounmap(io_base);
-
return 0;
}
@@ -325,6 +352,6 @@ static struct platform_driver ams_delta_nand_driver = {
module_platform_driver(ams_delta_nand_driver);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index ad720494e8f7..5781fcf6b76c 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1477,10 +1477,10 @@ static void atmel_nand_init(struct atmel_nand_controller *nc,
chip->legacy.write_byte = atmel_nand_write_byte;
chip->legacy.read_buf = atmel_nand_read_buf;
chip->legacy.write_buf = atmel_nand_write_buf;
- chip->select_chip = atmel_nand_select_chip;
+ chip->legacy.select_chip = atmel_nand_select_chip;
- if (nc->mck && nc->caps->ops->setup_data_interface)
- chip->setup_data_interface = atmel_nand_setup_data_interface;
+ if (!nc->mck || !nc->caps->ops->setup_data_interface)
+ chip->options |= NAND_KEEP_TIMINGS;
/* Some NANDs require a longer delay than the default one (20us). */
chip->legacy.chip_delay = 40;
@@ -1525,7 +1525,7 @@ static void atmel_hsmc_nand_init(struct atmel_nand_controller *nc,
/* Overload some methods for the HSMC controller. */
chip->legacy.cmd_ctrl = atmel_hsmc_nand_cmd_ctrl;
- chip->select_chip = atmel_hsmc_nand_select_chip;
+ chip->legacy.select_chip = atmel_hsmc_nand_select_chip;
}
static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
@@ -1908,6 +1908,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops atmel_nand_controller_ops = {
.attach_chip = atmel_nand_attach_chip,
+ .setup_data_interface = atmel_nand_setup_data_interface,
};
static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
diff --git a/drivers/mtd/nand/raw/au1550nd.c b/drivers/mtd/nand/raw/au1550nd.c
index 9731c1c487f6..a963002663ed 100644
--- a/drivers/mtd/nand/raw/au1550nd.c
+++ b/drivers/mtd/nand/raw/au1550nd.c
@@ -430,7 +430,7 @@ static int au1550nd_probe(struct platform_device *pdev)
ctx->cs = cs;
this->legacy.dev_ready = au1550_device_ready;
- this->select_chip = au1550_select_chip;
+ this->legacy.select_chip = au1550_select_chip;
this->legacy.cmdfunc = au1550_command;
/* 30 us command delay time */
diff --git a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
index 9095a79ebc7d..a37cbfe56567 100644
--- a/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/raw/bcm47xxnflash/ops_bcm4706.c
@@ -383,7 +383,7 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
u8 tbits, col_bits, col_size, row_bits, row_bsize;
u32 val;
- b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+ nand_chip->legacy.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
nand_chip->legacy.cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
nand_chip->legacy.dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
b47n->nand_chip.legacy.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
diff --git a/drivers/mtd/nand/raw/cafe_nand.c b/drivers/mtd/nand/raw/cafe_nand.c
index c1a745940d12..b1c0cd6b49da 100644
--- a/drivers/mtd/nand/raw/cafe_nand.c
+++ b/drivers/mtd/nand/raw/cafe_nand.c
@@ -708,7 +708,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->nand.legacy.read_byte = cafe_read_byte;
cafe->nand.legacy.read_buf = cafe_read_buf;
cafe->nand.legacy.write_buf = cafe_write_buf;
- cafe->nand.select_chip = cafe_select_chip;
+ cafe->nand.legacy.select_chip = cafe_select_chip;
cafe->nand.legacy.set_features = nand_get_set_features_notsupp;
cafe->nand.legacy.get_features = nand_get_set_features_notsupp;
@@ -780,7 +780,7 @@ static int cafe_nand_probe(struct pci_dev *pdev,
cafe->usedma = 0;
/* Scan to find existence of the device */
- cafe->nand.dummy_controller.ops = &cafe_nand_controller_ops;
+ cafe->nand.legacy.dummy_controller.ops = &cafe_nand_controller_ops;
err = nand_scan(&cafe->nand, 2);
if (err)
goto out_irq;
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 80f228d23cd2..27bafa5e1ca1 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -762,7 +762,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
info->chip.legacy.IO_ADDR_R = vaddr;
info->chip.legacy.IO_ADDR_W = vaddr;
info->chip.legacy.chip_delay = 0;
- info->chip.select_chip = nand_davinci_select_chip;
+ info->chip.legacy.select_chip = nand_davinci_select_chip;
/* options such as NAND_BBT_USE_FLASH */
info->chip.bbt_options = pdata->bbt_options;
@@ -801,7 +801,7 @@ static int nand_davinci_probe(struct platform_device *pdev)
spin_unlock_irq(&davinci_nand_lock);
/* Scan to find existence of the device(s) */
- info->chip.dummy_controller.ops = &davinci_nand_controller_ops;
+ info->chip.legacy.dummy_controller.ops = &davinci_nand_controller_ops;
ret = nand_scan(&info->chip, pdata->mask_chipsel ? 2 : 1);
if (ret < 0) {
dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 830ea247277b..eebac35304c6 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -204,18 +204,6 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali,
return denali->irq_status;
}
-static uint32_t denali_check_irq(struct denali_nand_info *denali)
-{
- unsigned long flags;
- uint32_t irq_status;
-
- spin_lock_irqsave(&denali->irq_lock, flags);
- irq_status = denali->irq_status;
- spin_unlock_irqrestore(&denali->irq_lock, flags);
-
- return irq_status;
-}
-
static void denali_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -288,8 +276,7 @@ static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
return;
/*
- * Some commands are followed by chip->legacy.dev_ready or
- * chip->legacy.waitfunc.
+ * Some commands are followed by chip->legacy.waitfunc.
* irq_status must be cleared here to catch the R/B# interrupt later.
*/
if (ctrl & NAND_CTRL_CHANGE)
@@ -298,13 +285,6 @@ static void denali_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl)
denali->host_write(denali, DENALI_BANK(denali) | type, dat);
}
-static int denali_dev_ready(struct nand_chip *chip)
-{
- struct denali_nand_info *denali = mtd_to_denali(nand_to_mtd(chip));
-
- return !!(denali_check_irq(denali) & INTR__INT_ACT);
-}
-
static int denali_check_erased_page(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t *buf,
unsigned long uncor_ecc_flags,
@@ -1065,29 +1045,6 @@ static int denali_setup_data_interface(struct nand_chip *chip, int chipnr,
return 0;
}
-static void denali_reset_banks(struct denali_nand_info *denali)
-{
- u32 irq_status;
- int i;
-
- for (i = 0; i < denali->max_banks; i++) {
- denali->active_bank = i;
-
- denali_reset_irq(denali);
-
- iowrite32(DEVICE_RESET__BANK(i),
- denali->reg + DEVICE_RESET);
-
- irq_status = denali_wait_for_irq(denali,
- INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT);
- if (!(irq_status & INTR__INT_ACT))
- break;
- }
-
- dev_dbg(denali->dev, "%d chips connected\n", i);
- denali->max_banks = i;
-}
-
static void denali_hw_init(struct denali_nand_info *denali)
{
/*
@@ -1316,6 +1273,7 @@ static void denali_detach_chip(struct nand_chip *chip)
static const struct nand_controller_ops denali_controller_ops = {
.attach_chip = denali_attach_chip,
.detach_chip = denali_detach_chip,
+ .setup_data_interface = denali_setup_data_interface,
};
int denali_init(struct denali_nand_info *denali)
@@ -1341,12 +1299,6 @@ int denali_init(struct denali_nand_info *denali)
}
denali_enable_irq(denali);
- denali_reset_banks(denali);
- if (!denali->max_banks) {
- /* Error out earlier if no chip is found for some reasons. */
- ret = -ENODEV;
- goto disable_irq;
- }
denali->active_bank = DENALI_INVALID_BANK;
@@ -1355,11 +1307,10 @@ int denali_init(struct denali_nand_info *denali)
if (!mtd->name)
mtd->name = "denali-nand";
- chip->select_chip = denali_select_chip;
+ chip->legacy.select_chip = denali_select_chip;
chip->legacy.read_byte = denali_read_byte;
chip->legacy.write_byte = denali_write_byte;
chip->legacy.cmd_ctrl = denali_cmd_ctrl;
- chip->legacy.dev_ready = denali_dev_ready;
chip->legacy.waitfunc = denali_waitfunc;
if (features & FEATURES__INDEX_ADDR) {
@@ -1372,9 +1323,9 @@ int denali_init(struct denali_nand_info *denali)
/* clk rate info is needed for setup_data_interface */
if (denali->clk_rate && denali->clk_x_rate)
- chip->setup_data_interface = denali_setup_data_interface;
+ chip->options |= NAND_KEEP_TIMINGS;
- chip->dummy_controller.ops = &denali_controller_ops;
+ chip->legacy.dummy_controller.ops = &denali_controller_ops;
ret = nand_scan(chip, denali->max_banks);
if (ret)
goto disable_irq;
diff --git a/drivers/mtd/nand/raw/denali.h b/drivers/mtd/nand/raw/denali.h
index 57a5498f58bb..25c00601b8b3 100644
--- a/drivers/mtd/nand/raw/denali.h
+++ b/drivers/mtd/nand/raw/denali.h
@@ -7,7 +7,7 @@
#ifndef __DENALI_H__
#define __DENALI_H__
-#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/mtd/rawnand.h>
#include <linux/spinlock_types.h>
diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c
index 3a4c373affab..53f57e0f007e 100644
--- a/drivers/mtd/nand/raw/diskonchip.c
+++ b/drivers/mtd/nand/raw/diskonchip.c
@@ -1390,7 +1390,7 @@ static inline int __init doc2001plus_init(struct mtd_info *mtd)
this->legacy.read_buf = doc2001plus_readbuf;
doc->late_init = inftl_scan_bbt;
this->legacy.cmd_ctrl = NULL;
- this->select_chip = doc2001plus_select_chip;
+ this->legacy.select_chip = doc2001plus_select_chip;
this->legacy.cmdfunc = doc2001plus_command;
this->ecc.hwctl = doc2001plus_enable_hwecc;
@@ -1568,7 +1568,7 @@ static int __init doc_probe(unsigned long physadr)
mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
nand_set_controller_data(nand, doc);
- nand->select_chip = doc200x_select_chip;
+ nand->legacy.select_chip = doc200x_select_chip;
nand->legacy.cmd_ctrl = doc200x_hwcontrol;
nand->legacy.dev_ready = doc200x_dev_ready;
nand->legacy.waitfunc = doc200x_wait;
diff --git a/drivers/mtd/nand/raw/fsl_elbc_nand.c b/drivers/mtd/nand/raw/fsl_elbc_nand.c
index d6ed697fcfe6..70f0d2b450ea 100644
--- a/drivers/mtd/nand/raw/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_elbc_nand.c
@@ -779,7 +779,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->legacy.read_byte = fsl_elbc_read_byte;
chip->legacy.write_buf = fsl_elbc_write_buf;
chip->legacy.read_buf = fsl_elbc_read_buf;
- chip->select_chip = fsl_elbc_select_chip;
+ chip->legacy.select_chip = fsl_elbc_select_chip;
chip->legacy.cmdfunc = fsl_elbc_cmdfunc;
chip->legacy.waitfunc = fsl_elbc_wait;
chip->legacy.set_features = nand_get_set_features_notsupp;
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index 6f4afc44381a..e65d274399f9 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -864,7 +864,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->legacy.write_buf = fsl_ifc_write_buf;
chip->legacy.read_buf = fsl_ifc_read_buf;
- chip->select_chip = fsl_ifc_select_chip;
+ chip->legacy.select_chip = fsl_ifc_select_chip;
chip->legacy.cmdfunc = fsl_ifc_cmdfunc;
chip->legacy.waitfunc = fsl_ifc_wait;
chip->legacy.set_features = nand_get_set_features_notsupp;
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 673c5a0c9345..5ccc28ec0985 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -170,7 +170,7 @@ static int fun_chip_init(struct fsl_upm_nand *fun,
fun->chip.ecc.mode = NAND_ECC_SOFT;
fun->chip.ecc.algo = NAND_ECC_HAMMING;
if (fun->mchip_count > 1)
- fun->chip.select_chip = fun_select_chip;
+ fun->chip.legacy.select_chip = fun_select_chip;
if (fun->rnb_gpio[0] >= 0)
fun->chip.legacy.dev_ready = fun_chip_ready;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 70ac8d875218..325b4414dccc 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ST Microelectronics
* Flexible Static Memory Controller (FSMC)
@@ -10,10 +11,6 @@
* Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
* Copyright © 2007 STMicroelectronics Pvt. Ltd.
* Copyright © 2009 Alessandro Rubini
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/clk.h>
@@ -41,15 +38,14 @@
/* fsmc controller registers for NOR flash */
#define CTRL 0x0
/* ctrl register definitions */
- #define BANK_ENABLE (1 << 0)
- #define MUXED (1 << 1)
+ #define BANK_ENABLE BIT(0)
+ #define MUXED BIT(1)
#define NOR_DEV (2 << 2)
- #define WIDTH_8 (0 << 4)
- #define WIDTH_16 (1 << 4)
- #define RSTPWRDWN (1 << 6)
- #define WPROT (1 << 7)
- #define WRT_ENABLE (1 << 12)
- #define WAIT_ENB (1 << 13)
+ #define WIDTH_16 BIT(4)
+ #define RSTPWRDWN BIT(6)
+ #define WPROT BIT(7)
+ #define WRT_ENABLE BIT(12)
+ #define WAIT_ENB BIT(13)
#define CTRL_TIM 0x4
/* ctrl_tim register definitions */
@@ -57,43 +53,35 @@
#define FSMC_NOR_BANK_SZ 0x8
#define FSMC_NOR_REG_SIZE 0x40
-#define FSMC_NOR_REG(base, bank, reg) (base + \
- FSMC_NOR_BANK_SZ * (bank) + \
- reg)
+#define FSMC_NOR_REG(base, bank, reg) ((base) + \
+ (FSMC_NOR_BANK_SZ * (bank)) + \
+ (reg))
/* fsmc controller registers for NAND flash */
#define FSMC_PC 0x00
/* pc register definitions */
- #define FSMC_RESET (1 << 0)
- #define FSMC_WAITON (1 << 1)
- #define FSMC_ENABLE (1 << 2)
- #define FSMC_DEVTYPE_NAND (1 << 3)
- #define FSMC_DEVWID_8 (0 << 4)
- #define FSMC_DEVWID_16 (1 << 4)
- #define FSMC_ECCEN (1 << 6)
- #define FSMC_ECCPLEN_512 (0 << 7)
- #define FSMC_ECCPLEN_256 (1 << 7)
- #define FSMC_TCLR_1 (1)
+ #define FSMC_RESET BIT(0)
+ #define FSMC_WAITON BIT(1)
+ #define FSMC_ENABLE BIT(2)
+ #define FSMC_DEVTYPE_NAND BIT(3)
+ #define FSMC_DEVWID_16 BIT(4)
+ #define FSMC_ECCEN BIT(6)
+ #define FSMC_ECCPLEN_256 BIT(7)
#define FSMC_TCLR_SHIFT (9)
#define FSMC_TCLR_MASK (0xF)
- #define FSMC_TAR_1 (1)
#define FSMC_TAR_SHIFT (13)
#define FSMC_TAR_MASK (0xF)
#define STS 0x04
/* sts register definitions */
- #define FSMC_CODE_RDY (1 << 15)
+ #define FSMC_CODE_RDY BIT(15)
#define COMM 0x08
/* comm register definitions */
- #define FSMC_TSET_0 0
#define FSMC_TSET_SHIFT 0
#define FSMC_TSET_MASK 0xFF
- #define FSMC_TWAIT_6 6
#define FSMC_TWAIT_SHIFT 8
#define FSMC_TWAIT_MASK 0xFF
- #define FSMC_THOLD_4 4
#define FSMC_THOLD_SHIFT 16
#define FSMC_THOLD_MASK 0xFF
- #define FSMC_THIZ_1 1
#define FSMC_THIZ_SHIFT 24
#define FSMC_THIZ_MASK 0xFF
#define ATTRIB 0x0C
@@ -106,12 +94,12 @@
#define FSMC_BUSY_WAIT_TIMEOUT (1 * HZ)
struct fsmc_nand_timings {
- uint8_t tclr;
- uint8_t tar;
- uint8_t thiz;
- uint8_t thold;
- uint8_t twait;
- uint8_t tset;
+ u8 tclr;
+ u8 tar;
+ u8 thiz;
+ u8 thold;
+ u8 twait;
+ u8 tset;
};
enum access_mode {
@@ -122,19 +110,21 @@ enum access_mode {
/**
* struct fsmc_nand_data - structure for FSMC NAND device state
*
+ * @base: Inherit from the nand_controller struct
* @pid: Part ID on the AMBA PrimeCell format
- * @mtd: MTD info for a NAND flash.
* @nand: Chip related info for a NAND flash.
- * @partitions: Partition info for a NAND Flash.
- * @nr_partitions: Total number of partition of a NAND flash.
*
* @bank: Bank number for probed device.
+ * @dev: Parent device
+ * @mode: Access mode
* @clk: Clock structure for FSMC.
*
* @read_dma_chan: DMA channel for read access
* @write_dma_chan: DMA channel for write access to NAND
* @dma_access_complete: Completion structure
*
+ * @dev_timings: NAND timings
+ *
* @data_pa: NAND Physical port for Data.
* @data_va: NAND port for Data.
* @cmd_va: NAND port for Command.
@@ -142,6 +132,7 @@ enum access_mode {
* @regs_va: Registers base address for a given bank.
*/
struct fsmc_nand_data {
+ struct nand_controller base;
u32 pid;
struct nand_chip nand;
@@ -248,9 +239,9 @@ static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
.free = fsmc_ecc4_ooblayout_free,
};
-static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
+static inline struct fsmc_nand_data *nand_to_fsmc(struct nand_chip *chip)
{
- return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
+ return container_of(chip, struct fsmc_nand_data, nand);
}
/*
@@ -262,8 +253,8 @@ static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
static void fsmc_nand_setup(struct fsmc_nand_data *host,
struct fsmc_nand_timings *tims)
{
- uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
- uint32_t tclr, tar, thiz, thold, twait, tset;
+ u32 value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+ u32 tclr, tar, thiz, thold, twait, tset;
tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
@@ -273,13 +264,9 @@ static void fsmc_nand_setup(struct fsmc_nand_data *host,
tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
if (host->nand.options & NAND_BUSWIDTH_16)
- writel_relaxed(value | FSMC_DEVWID_16,
- host->regs_va + FSMC_PC);
- else
- writel_relaxed(value | FSMC_DEVWID_8, host->regs_va + FSMC_PC);
+ value |= FSMC_DEVWID_16;
- writel_relaxed(readl(host->regs_va + FSMC_PC) | tclr | tar,
- host->regs_va + FSMC_PC);
+ writel_relaxed(value | tclr | tar, host->regs_va + FSMC_PC);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
}
@@ -290,7 +277,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
{
unsigned long hclk = clk_get_rate(host->clk);
unsigned long hclkn = NSEC_PER_SEC / hclk;
- uint32_t thiz, thold, twait, tset;
+ u32 thiz, thold, twait, tset;
if (sdrt->tRC_min < 30000)
return -EOPNOTSUPP;
@@ -343,7 +330,7 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host,
static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
const struct nand_data_interface *conf)
{
- struct fsmc_nand_data *host = nand_get_controller_data(nand);
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
struct fsmc_nand_timings tims;
const struct nand_sdr_timings *sdrt;
int ret;
@@ -369,7 +356,7 @@ static int fsmc_setup_data_interface(struct nand_chip *nand, int csline,
*/
static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
host->regs_va + FSMC_PC);
@@ -384,18 +371,18 @@ static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
* FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
* max of 8-bits)
*/
-static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
- uint8_t *ecc)
+static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- uint32_t ecc_tmp;
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
do {
if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
break;
- else
- cond_resched();
+
+ cond_resched();
} while (!time_after_eq(jiffies, deadline));
if (time_after_eq(jiffies, deadline)) {
@@ -404,25 +391,25 @@ static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
}
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
- ecc[0] = (uint8_t) (ecc_tmp >> 0);
- ecc[1] = (uint8_t) (ecc_tmp >> 8);
- ecc[2] = (uint8_t) (ecc_tmp >> 16);
- ecc[3] = (uint8_t) (ecc_tmp >> 24);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
+ ecc[3] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + ECC2);
- ecc[4] = (uint8_t) (ecc_tmp >> 0);
- ecc[5] = (uint8_t) (ecc_tmp >> 8);
- ecc[6] = (uint8_t) (ecc_tmp >> 16);
- ecc[7] = (uint8_t) (ecc_tmp >> 24);
+ ecc[4] = ecc_tmp;
+ ecc[5] = ecc_tmp >> 8;
+ ecc[6] = ecc_tmp >> 16;
+ ecc[7] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + ECC3);
- ecc[8] = (uint8_t) (ecc_tmp >> 0);
- ecc[9] = (uint8_t) (ecc_tmp >> 8);
- ecc[10] = (uint8_t) (ecc_tmp >> 16);
- ecc[11] = (uint8_t) (ecc_tmp >> 24);
+ ecc[8] = ecc_tmp;
+ ecc[9] = ecc_tmp >> 8;
+ ecc[10] = ecc_tmp >> 16;
+ ecc[11] = ecc_tmp >> 24;
ecc_tmp = readl_relaxed(host->regs_va + STS);
- ecc[12] = (uint8_t) (ecc_tmp >> 16);
+ ecc[12] = ecc_tmp >> 16;
return 0;
}
@@ -432,22 +419,22 @@ static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const uint8_t *data,
* FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
* max of 1-bit)
*/
-static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const uint8_t *data,
- uint8_t *ecc)
+static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
+ u8 *ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- uint32_t ecc_tmp;
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 ecc_tmp;
ecc_tmp = readl_relaxed(host->regs_va + ECC1);
- ecc[0] = (uint8_t) (ecc_tmp >> 0);
- ecc[1] = (uint8_t) (ecc_tmp >> 8);
- ecc[2] = (uint8_t) (ecc_tmp >> 16);
+ ecc[0] = ecc_tmp;
+ ecc[1] = ecc_tmp >> 8;
+ ecc[2] = ecc_tmp >> 16;
return 0;
}
/* Count the number of 0's in buff upto a max of max_bits */
-static int count_written_bits(uint8_t *buff, int size, int max_bits)
+static int count_written_bits(u8 *buff, int size, int max_bits)
{
int k, written_bits = 0;
@@ -468,7 +455,7 @@ static void dma_complete(void *param)
}
static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
- enum dma_data_direction direction)
+ enum dma_data_direction direction)
{
struct dma_chan *chan;
struct dma_device *dma_dev;
@@ -519,7 +506,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
time_left =
wait_for_completion_timeout(&host->dma_access_complete,
- msecs_to_jiffies(3000));
+ msecs_to_jiffies(3000));
if (time_left == 0) {
dmaengine_terminate_all(chan);
dev_err(host->dev, "wait_for_completion_timeout\n");
@@ -537,18 +524,19 @@ unmap_dma:
/*
* fsmc_write_buf - write buffer to chip
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: data buffer
* @len: number of bytes to write
*/
-static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+static void fsmc_write_buf(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
- IS_ALIGNED(len, sizeof(uint32_t))) {
- uint32_t *p = (uint32_t *)buf;
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
len = len >> 2;
for (i = 0; i < len; i++)
writel_relaxed(p[i], host->data_va);
@@ -560,18 +548,18 @@ static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
/*
* fsmc_read_buf - read chip data into buffer
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+static void fsmc_read_buf(struct fsmc_nand_data *host, u8 *buf, int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
int i;
- if (IS_ALIGNED((uintptr_t)buf, sizeof(uint32_t)) &&
- IS_ALIGNED(len, sizeof(uint32_t))) {
- uint32_t *p = (uint32_t *)buf;
+ if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
+ IS_ALIGNED(len, sizeof(u32))) {
+ u32 *p = (u32 *)buf;
+
len = len >> 2;
for (i = 0; i < len; i++)
p[i] = readl_relaxed(host->data_va);
@@ -583,48 +571,42 @@ static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
/*
* fsmc_read_buf_dma - read chip data into buffer
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: buffer to store date
* @len: number of bytes to read
*/
-static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+static void fsmc_read_buf_dma(struct fsmc_nand_data *host, u8 *buf,
+ int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-
dma_xfer(host, buf, len, DMA_FROM_DEVICE);
}
/*
* fsmc_write_buf_dma - write buffer to chip
- * @mtd: MTD device structure
+ * @host: FSMC NAND controller
* @buf: data buffer
* @len: number of bytes to write
*/
-static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
- int len)
+static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
+ int len)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-
dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
}
/* fsmc_select_chip - assert or deassert nCE */
-static void fsmc_select_chip(struct nand_chip *chip, int chipnr)
+static void fsmc_ce_ctrl(struct fsmc_nand_data *host, bool assert)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- u32 pc;
-
- /* Support only one CS */
- if (chipnr > 0)
- return;
+ u32 pc = readl(host->regs_va + FSMC_PC);
- pc = readl(host->regs_va + FSMC_PC);
- if (chipnr < 0)
+ if (!assert)
writel_relaxed(pc & ~FSMC_ENABLE, host->regs_va + FSMC_PC);
else
writel_relaxed(pc | FSMC_ENABLE, host->regs_va + FSMC_PC);
- /* nCE line must be asserted before starting any operation */
+ /*
+ * nCE line changes must be applied before returning from this
+ * function.
+ */
mb();
}
@@ -637,14 +619,16 @@ static void fsmc_select_chip(struct nand_chip *chip, int chipnr)
static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
bool check_only)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
const struct nand_op_instr *instr = NULL;
int ret = 0;
unsigned int op_id;
int i;
pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
+
+ fsmc_ce_ctrl(host, true);
+
for (op_id = 0; op_id < op->ninstrs; op_id++) {
instr = &op->instrs[op_id];
@@ -671,10 +655,10 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
", force 8-bit" : "");
if (host->mode == USE_DMA_ACCESS)
- fsmc_read_buf_dma(mtd, instr->ctx.data.buf.in,
+ fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
instr->ctx.data.len);
else
- fsmc_read_buf(mtd, instr->ctx.data.buf.in,
+ fsmc_read_buf(host, instr->ctx.data.buf.in,
instr->ctx.data.len);
break;
@@ -684,10 +668,11 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
", force 8-bit" : "");
if (host->mode == USE_DMA_ACCESS)
- fsmc_write_buf_dma(mtd, instr->ctx.data.buf.out,
+ fsmc_write_buf_dma(host,
+ instr->ctx.data.buf.out,
instr->ctx.data.len);
else
- fsmc_write_buf(mtd, instr->ctx.data.buf.out,
+ fsmc_write_buf(host, instr->ctx.data.buf.out,
instr->ctx.data.len);
break;
@@ -701,6 +686,8 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
}
}
+ fsmc_ce_ctrl(host, false);
+
return ret;
}
@@ -717,34 +704,35 @@ static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
* After this read, fsmc hardware generates and reports error data bits(up to a
* max of 8 bits)
*/
-static int fsmc_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
int i, j, s, stat, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_calc = chip->ecc.calc_buf;
- uint8_t *ecc_code = chip->ecc.code_buf;
- int off, len, group = 0;
+ u8 *p = buf;
+ u8 *ecc_calc = chip->ecc.calc_buf;
+ u8 *ecc_code = chip->ecc.code_buf;
+ int off, len, ret, group = 0;
/*
- * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
+ * ecc_oob is intentionally taken as u16. In 16bit devices, we
* end up reading 14 bytes (7 words) from oob. The local array is
* to maintain word alignment
*/
- uint16_t ecc_oob[7];
- uint8_t *oob = (uint8_t *)&ecc_oob[0];
+ u16 ecc_oob[7];
+ u8 *oob = (u8 *)&ecc_oob[0];
unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
nand_read_page_op(chip, page, s * eccsize, NULL, 0);
chip->ecc.hwctl(chip, NAND_ECC_READ);
- nand_read_data_op(chip, p, eccsize, false);
+ ret = nand_read_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
for (j = 0; j < eccbytes;) {
struct mtd_oob_region oobregion;
- int ret;
ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
if (ret)
@@ -788,15 +776,15 @@ static int fsmc_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
* @calc_ecc: ecc calculated from read data
*
* calc_ecc is a 104 bit information containing maximum of 8 error
- * offset informations of 13 bits each in 512 bytes of read data.
+ * offset information of 13 bits each in 512 bytes of read data.
*/
-static int fsmc_bch8_correct_data(struct nand_chip *chip, uint8_t *dat,
- uint8_t *read_ecc, uint8_t *calc_ecc)
+static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
+ u8 *read_ecc, u8 *calc_ecc)
{
- struct fsmc_nand_data *host = mtd_to_fsmc(nand_to_mtd(chip));
- uint32_t err_idx[8];
- uint32_t num_err, i;
- uint32_t ecc1, ecc2, ecc3, ecc4;
+ struct fsmc_nand_data *host = nand_to_fsmc(chip);
+ u32 err_idx[8];
+ u32 num_err, i;
+ u32 ecc1, ecc2, ecc3, ecc4;
num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
@@ -837,8 +825,8 @@ static int fsmc_bch8_correct_data(struct nand_chip *chip, uint8_t *dat,
* |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
*
* calc_ecc is a 104 bit information containing maximum of 8 error
- * offset informations of 13 bits each. calc_ecc is copied into a
- * uint64_t array and error offset indexes are populated in err_idx
+ * offset information of 13 bits each. calc_ecc is copied into a
+ * u64 array and error offset indexes are populated in err_idx
* array
*/
ecc1 = readl_relaxed(host->regs_va + ECC1);
@@ -897,11 +885,13 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
nand->options |= NAND_SKIP_BBTSCAN;
host->dev_timings = devm_kzalloc(&pdev->dev,
- sizeof(*host->dev_timings), GFP_KERNEL);
+ sizeof(*host->dev_timings),
+ GFP_KERNEL);
if (!host->dev_timings)
return -ENOMEM;
+
ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
- sizeof(*host->dev_timings));
+ sizeof(*host->dev_timings));
if (ret)
host->dev_timings = NULL;
@@ -920,7 +910,7 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
static int fsmc_nand_attach_chip(struct nand_chip *nand)
{
struct mtd_info *mtd = nand_to_mtd(nand);
- struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+ struct fsmc_nand_data *host = nand_to_fsmc(nand);
if (AMBA_REV_BITS(host->pid) >= 8) {
switch (mtd->oobsize) {
@@ -992,6 +982,8 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
static const struct nand_controller_ops fsmc_nand_controller_ops = {
.attach_chip = fsmc_nand_attach_chip,
+ .exec_op = fsmc_exec_op,
+ .setup_data_interface = fsmc_setup_data_interface,
};
/*
@@ -1061,10 +1053,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
* AMBA PrimeCell bus. However it is not a PrimeCell.
*/
for (pid = 0, i = 0; i < 4; i++)
- pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+ pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) &
+ 255) << (i * 8);
+
host->pid = pid;
- dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
- "revision %02x, config %02x\n",
+
+ dev_info(&pdev->dev,
+ "FSMC device partno %03x, manufacturer %02x, revision %02x, config %02x\n",
AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
@@ -1075,12 +1070,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
/* Link all private pointers */
mtd = nand_to_mtd(&host->nand);
- nand_set_controller_data(nand, host);
nand_set_flash_node(nand, pdev->dev.of_node);
mtd->dev.parent = &pdev->dev;
- nand->exec_op = fsmc_exec_op;
- nand->select_chip = fsmc_select_chip;
/*
* Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
@@ -1106,10 +1098,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
}
}
- if (host->dev_timings)
+ if (host->dev_timings) {
fsmc_nand_setup(host, host->dev_timings);
- else
- nand->setup_data_interface = fsmc_setup_data_interface;
+ nand->options |= NAND_KEEP_TIMINGS;
+ }
if (AMBA_REV_BITS(host->pid) >= 8) {
nand->ecc.read_page = fsmc_read_page_hwecc;
@@ -1119,10 +1111,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
nand->ecc.strength = 8;
}
+ nand_controller_init(&host->base);
+ host->base.ops = &fsmc_nand_controller_ops;
+ nand->controller = &host->base;
+
/*
* Scan to find existence of the device
*/
- nand->dummy_controller.ops = &fsmc_nand_controller_ops;
ret = nand_scan(nand, 1);
if (ret)
goto release_dma_write_chan;
@@ -1175,19 +1170,23 @@ static int fsmc_nand_remove(struct platform_device *pdev)
static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+
if (host)
clk_disable_unprepare(host->clk);
+
return 0;
}
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+
if (host) {
clk_prepare_enable(host->clk);
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
}
+
return 0;
}
#endif
@@ -1212,6 +1211,6 @@ static struct platform_driver fsmc_nand_driver = {
module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 94c2b7525c85..ed405c9434fe 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -1549,7 +1549,7 @@ static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
int column, page, chipnr;
chipnr = (int)(ofs >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
@@ -1562,7 +1562,7 @@ static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
ret = nand_prog_page_op(chip, page, column, block_mark, 1);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
@@ -1610,7 +1610,7 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
saved_chip_number = this->current_chip;
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
/*
* Loop through the first search area, looking for the NCB fingerprint.
@@ -1638,7 +1638,10 @@ static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
}
- chip->select_chip(chip, saved_chip_number);
+ if (saved_chip_number >= 0)
+ nand_select_target(chip, saved_chip_number);
+ else
+ nand_deselect_target(chip);
if (found_an_ncb_fingerprint)
dev_dbg(dev, "\tFound a fingerprint\n");
@@ -1681,7 +1684,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Select chip 0. */
saved_chip_number = this->current_chip;
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
/* Loop over blocks in the first search area, erasing them. */
dev_dbg(dev, "Erasing the search area...\n");
@@ -1713,7 +1716,11 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
}
/* Deselect chip 0. */
- chip->select_chip(chip, saved_chip_number);
+ if (saved_chip_number >= 0)
+ nand_select_target(chip, saved_chip_number);
+ else
+ nand_deselect_target(chip);
+
return 0;
}
@@ -1762,10 +1769,10 @@ static int mx23_boot_init(struct gpmi_nand_data *this)
byte = block << chip->phys_erase_shift;
/* Send the command to read the conventional block mark. */
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
nand_read_page_op(chip, page, mtd->writesize, NULL, 0);
block_mark = chip->legacy.read_byte(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
/*
* Check if the block is marked bad. If so, we need to mark it
@@ -1882,6 +1889,7 @@ static int gpmi_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops gpmi_nand_controller_ops = {
.attach_chip = gpmi_nand_attach_chip,
+ .setup_data_interface = gpmi_setup_data_interface,
};
static int gpmi_nand_init(struct gpmi_nand_data *this)
@@ -1900,8 +1908,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
nand_set_controller_data(chip, this);
nand_set_flash_node(chip, this->pdev->dev.of_node);
- chip->select_chip = gpmi_select_chip;
- chip->setup_data_interface = gpmi_setup_data_interface;
+ chip->legacy.select_chip = gpmi_select_chip;
chip->legacy.cmd_ctrl = gpmi_cmd_ctrl;
chip->legacy.dev_ready = gpmi_dev_ready;
chip->legacy.read_byte = gpmi_read_byte;
@@ -1924,7 +1931,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- chip->dummy_controller.ops = &gpmi_nand_controller_ops;
+ chip->legacy.dummy_controller.ops = &gpmi_nand_controller_ops;
ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c
index f043938ee36b..f3f9aa160cff 100644
--- a/drivers/mtd/nand/raw/hisi504_nand.c
+++ b/drivers/mtd/nand/raw/hisi504_nand.c
@@ -783,7 +783,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
nand_set_controller_data(chip, host);
nand_set_flash_node(chip, np);
chip->legacy.cmdfunc = hisi_nfc_cmdfunc;
- chip->select_chip = hisi_nfc_select_chip;
+ chip->legacy.select_chip = hisi_nfc_select_chip;
chip->legacy.read_byte = hisi_nfc_read_byte;
chip->legacy.write_buf = hisi_nfc_write_buf;
chip->legacy.read_buf = hisi_nfc_read_buf;
@@ -799,7 +799,7 @@ static int hisi_nfc_probe(struct platform_device *pdev)
return ret;
}
- chip->dummy_controller.ops = &hisi_nfc_controller_ops;
+ chip->legacy.dummy_controller.ops = &hisi_nfc_controller_ops;
ret = nand_scan(chip, max_chips);
if (ret)
return ret;
diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h
index 04c2cf74eff3..fbf6ca015cd7 100644
--- a/drivers/mtd/nand/raw/internals.h
+++ b/drivers/mtd/nand/raw/internals.h
@@ -95,6 +95,39 @@ void nand_decode_ext_id(struct nand_chip *chip);
void panic_nand_wait(struct nand_chip *chip, unsigned long timeo);
void sanitize_string(uint8_t *s, size_t len);
+static inline bool nand_has_exec_op(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->exec_op)
+ return false;
+
+ return true;
+}
+
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ if (WARN_ON(op->cs >= chip->numchips))
+ return -EINVAL;
+
+ return chip->controller->ops->exec_op(chip, op, false);
+}
+
+static inline bool nand_has_setup_data_iface(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->setup_data_interface)
+ return false;
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return false;
+
+ return true;
+}
+
/* BBT functions */
int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
int nand_isreserved_bbt(struct nand_chip *chip, loff_t offs);
diff --git a/drivers/mtd/nand/raw/jz4740_nand.c b/drivers/mtd/nand/raw/jz4740_nand.c
index fb59cfca11a7..f92ae5aa2a54 100644
--- a/drivers/mtd/nand/raw/jz4740_nand.c
+++ b/drivers/mtd/nand/raw/jz4740_nand.c
@@ -335,14 +335,14 @@ static int jz_nand_detect_bank(struct platform_device *pdev,
goto notfound_id;
/* Retrieve the IDs from the first chip. */
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
nand_reset_op(chip);
nand_readid_op(chip, 0, id, sizeof(id));
*nand_maf_id = id[0];
*nand_dev_id = id[1];
} else {
/* Detect additional chip. */
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
nand_reset_op(chip);
nand_readid_op(chip, 0, id, sizeof(id));
if (*nand_maf_id != id[0] || *nand_dev_id != id[1]) {
@@ -427,8 +427,8 @@ static int jz_nand_probe(struct platform_device *pdev)
chip->legacy.chip_delay = 50;
chip->legacy.cmd_ctrl = jz_nand_cmd_ctrl;
- chip->select_chip = jz_nand_select_chip;
- chip->dummy_controller.ops = &jz_nand_controller_ops;
+ chip->legacy.select_chip = jz_nand_select_chip;
+ chip->legacy.dummy_controller.ops = &jz_nand_controller_ops;
if (nand->busy_gpio)
chip->legacy.dev_ready = jz_nand_dev_ready;
diff --git a/drivers/mtd/nand/raw/jz4780_bch.c b/drivers/mtd/nand/raw/jz4780_bch.c
index 731c6051d91e..7201827809e9 100644
--- a/drivers/mtd/nand/raw/jz4780_bch.c
+++ b/drivers/mtd/nand/raw/jz4780_bch.c
@@ -136,8 +136,10 @@ static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
switch (size8) {
case 3:
dest8[2] = (val >> 16) & 0xff;
+ /* fall through */
case 2:
dest8[1] = (val >> 8) & 0xff;
+ /* fall through */
case 1:
dest8[0] = val & 0xff;
break;
diff --git a/drivers/mtd/nand/raw/jz4780_nand.c b/drivers/mtd/nand/raw/jz4780_nand.c
index cdf22100ab77..22e58975f0d5 100644
--- a/drivers/mtd/nand/raw/jz4780_nand.c
+++ b/drivers/mtd/nand/raw/jz4780_nand.c
@@ -279,7 +279,7 @@ static int jz4780_nand_init_chip(struct platform_device *pdev,
chip->legacy.IO_ADDR_W = cs->base + OFFSET_DATA;
chip->legacy.chip_delay = RB_DELAY_US;
chip->options = NAND_NO_SUBPAGE_WRITE;
- chip->select_chip = jz4780_nand_select_chip;
+ chip->legacy.select_chip = jz4780_nand_select_chip;
chip->legacy.cmd_ctrl = jz4780_nand_cmd_ctrl;
chip->ecc.mode = NAND_ECC_HW;
chip->controller = &nfc->controller;
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index abbb655fe154..086964f8d424 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -799,7 +799,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
* Scan to find existence of the device and get the type of NAND device:
* SMALL block or LARGE block.
*/
- nand_chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
res = nand_scan(nand_chip, 1);
if (res)
goto free_irq;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index f2f2cdbb9d04..a2c5fdc875bd 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -924,7 +924,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
/* Find NAND device */
- chip->dummy_controller.ops = &lpc32xx_nand_controller_ops;
+ chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
res = nand_scan(chip, 1);
if (res)
goto release_dma;
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 650f2b490a05..84283c6bb0ff 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -378,7 +378,7 @@ struct marvell_nfc_caps {
* @dev: Parent device (used to print error messages)
* @regs: NAND controller registers
* @core_clk: Core clock
- * @reg_clk: Regiters clock
+ * @reg_clk: Registers clock
* @complete: Completion object to wait for NAND controller events
* @assigned_cs: Bitmask describing already assigned CS lines
* @chips: List containing all the NAND chips attached to
@@ -514,9 +514,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
}
-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDSR);
writel_relaxed(int_mask, nfc->regs + NDSR);
+
+ return reg & int_mask;
}
static void marvell_nfc_force_byte_access(struct nand_chip *chip,
@@ -683,6 +688,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 pending;
int ret;
/* Timeout is expressed in ms */
@@ -695,8 +701,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
marvell_nfc_disable_int(nfc, NDCR_RDYM);
- marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
- if (!ret) {
+ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+ /*
+ * In case the interrupt was not served in the required time frame,
+ * check if the ISR was not served or if something went actually wrong.
+ */
+ if (ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
@@ -704,7 +715,8 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
return 0;
}
-static void marvell_nfc_select_chip(struct nand_chip *chip, int die_nr)
+static void marvell_nfc_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
{
struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
@@ -713,12 +725,6 @@ static void marvell_nfc_select_chip(struct nand_chip *chip, int die_nr)
if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die)
return;
- if (die_nr < 0 || die_nr >= marvell_nand->nsels) {
- nfc->selected_chip = NULL;
- marvell_nand->selected_die = -1;
- return;
- }
-
writel_relaxed(marvell_nand->ndtr0, nfc->regs + NDTR0);
writel_relaxed(marvell_nand->ndtr1, nfc->regs + NDTR1);
@@ -1024,13 +1030,13 @@ static int marvell_nfc_hw_ecc_hmg_do_read_page(struct nand_chip *chip,
}
ret = marvell_nfc_wait_cmdd(chip);
-
return ret;
}
static int marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi,
true, page);
}
@@ -1043,6 +1049,7 @@ static int marvell_nfc_hw_ecc_hmg_read_page(struct nand_chip *chip, u8 *buf,
int max_bitflips = 0, ret;
u8 *raw_buf;
+ marvell_nfc_select_target(chip, chip->cur_cs);
marvell_nfc_enable_hw_ecc(chip);
marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, false,
page);
@@ -1079,6 +1086,7 @@ static int marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
/* Invalidate page cache */
chip->pagebuf = -1;
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_read_page(chip, chip->data_buf,
chip->oob_poi, true, page);
}
@@ -1142,6 +1150,7 @@ static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
const u8 *buf,
int oob_required, int page)
{
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
true, page);
}
@@ -1152,6 +1161,7 @@ static int marvell_nfc_hw_ecc_hmg_write_page(struct nand_chip *chip,
{
int ret;
+ marvell_nfc_select_target(chip, chip->cur_cs);
marvell_nfc_enable_hw_ecc(chip);
ret = marvell_nfc_hw_ecc_hmg_do_write_page(chip, buf, chip->oob_poi,
false, page);
@@ -1175,6 +1185,7 @@ static int marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip,
memset(chip->data_buf, 0xFF, mtd->writesize);
+ marvell_nfc_select_target(chip, chip->cur_cs);
return marvell_nfc_hw_ecc_hmg_do_write_page(chip, chip->data_buf,
chip->oob_poi, true, page);
}
@@ -1194,6 +1205,8 @@ static int marvell_nfc_hw_ecc_bch_read_page_raw(struct nand_chip *chip, u8 *buf,
int ecc_len = lt->ecc_bytes;
int chunk;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
if (oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
@@ -1304,6 +1317,8 @@ static int marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
u32 failure_mask = 0;
int chunk, ret;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
/*
* With BCH, OOB is not fully used (and thus not read entirely), not
* expected bytes could show up at the end of the OOB buffer if not
@@ -1448,6 +1463,8 @@ static int marvell_nfc_hw_ecc_bch_write_page_raw(struct nand_chip *chip,
lt->last_spare_bytes;
int chunk;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
for (chunk = 0; chunk < lt->nchunks; chunk++) {
@@ -1559,6 +1576,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int spare_len = lt->spare_bytes;
int chunk, ret;
+ marvell_nfc_select_target(chip, chip->cur_cs);
+
/* Spare data will be written anyway, so clear it to avoid garbage */
if (!oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
@@ -2097,6 +2116,8 @@ static int marvell_nfc_exec_op(struct nand_chip *chip,
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ marvell_nfc_select_target(chip, op->cs);
+
if (nfc->caps->is_nfcv2)
return nand_op_parser_exec_op(chip, &marvell_nfcv2_op_parser,
op, check_only);
@@ -2495,6 +2516,8 @@ static int marvell_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops marvell_nand_controller_ops = {
.attach_chip = marvell_nand_attach_chip,
+ .exec_op = marvell_nfc_exec_op,
+ .setup_data_interface = marvell_nfc_setup_data_interface,
};
static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
@@ -2617,10 +2640,8 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
chip->controller = &nfc->controller;
nand_set_flash_node(chip, np);
- chip->exec_op = marvell_nfc_exec_op;
- chip->select_chip = marvell_nfc_select_chip;
if (!of_property_read_bool(np, "marvell,nand-keep-config"))
- chip->setup_data_interface = marvell_nfc_setup_data_interface;
+ chip->options |= NAND_KEEP_TIMINGS;
mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index 86a0aabe08df..062cd1eb2861 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -697,7 +697,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
chip->legacy.read_byte = mpc5121_nfc_read_byte;
chip->legacy.read_buf = mpc5121_nfc_read_buf;
chip->legacy.write_buf = mpc5121_nfc_write_buf;
- chip->select_chip = mpc5121_nfc_select_chip;
+ chip->legacy.select_chip = mpc5121_nfc_select_chip;
chip->legacy.set_features = nand_get_set_features_notsupp;
chip->legacy.get_features = nand_get_set_features_notsupp;
chip->bbt_options = NAND_BBT_USE_FLASH;
@@ -712,7 +712,7 @@ static int mpc5121_nfc_probe(struct platform_device *op)
return retval;
}
- chip->select_chip = ads5121_select_chip;
+ chip->legacy.select_chip = ads5121_select_chip;
}
/* Enable NFC clock */
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index 2bb0df1b7244..b6b4602f5132 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -1288,6 +1288,7 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops mtk_nfc_controller_ops = {
.attach_chip = mtk_nfc_attach_chip,
+ .setup_data_interface = mtk_nfc_setup_data_interface,
};
static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
@@ -1333,13 +1334,12 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
nand->legacy.dev_ready = mtk_nfc_dev_ready;
- nand->select_chip = mtk_nfc_select_chip;
+ nand->legacy.select_chip = mtk_nfc_select_chip;
nand->legacy.write_byte = mtk_nfc_write_byte;
nand->legacy.write_buf = mtk_nfc_write_buf;
nand->legacy.read_byte = mtk_nfc_read_byte;
nand->legacy.read_buf = mtk_nfc_read_buf;
nand->legacy.cmd_ctrl = mtk_nfc_cmd_ctrl;
- nand->setup_data_interface = mtk_nfc_setup_data_interface;
/* set default mode in case dt entry is missing */
nand->ecc.mode = NAND_ECC_HW;
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 88bd3f6a499c..59554c187e01 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -1738,8 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip)
return 0;
}
+static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf)
+{
+ struct mxc_nand_host *host = nand_get_controller_data(chip);
+
+ return host->devtype_data->setup_data_interface(chip, chipnr, conf);
+}
+
static const struct nand_controller_ops mxcnd_controller_ops = {
.attach_chip = mxcnd_attach_chip,
+ .setup_data_interface = mxcnd_setup_data_interface,
};
static int mxcnd_probe(struct platform_device *pdev)
@@ -1800,7 +1809,8 @@ static int mxcnd_probe(struct platform_device *pdev)
if (err < 0)
return err;
- this->setup_data_interface = host->devtype_data->setup_data_interface;
+ if (!host->devtype_data->setup_data_interface)
+ this->options |= NAND_KEEP_TIMINGS;
if (host->devtype_data->needs_ip) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1828,7 +1838,7 @@ static int mxcnd_probe(struct platform_device *pdev)
this->ecc.bytes = host->devtype_data->eccbytes;
host->eccsize = host->devtype_data->eccsize;
- this->select_chip = host->devtype_data->select_chip;
+ this->legacy.select_chip = host->devtype_data->select_chip;
this->ecc.size = 512;
mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
@@ -1881,7 +1891,7 @@ static int mxcnd_probe(struct platform_device *pdev)
}
/* Scan the NAND device */
- this->dummy_controller.ops = &mxcnd_controller_ops;
+ this->legacy.dummy_controller.ops = &mxcnd_controller_ops;
err = nand_scan(this, is_imx25_nfc(host) ? 4 : 1);
if (err)
goto escan;
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 71050a0b31df..cca4b24d2ffa 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -45,14 +45,10 @@
#include <linux/io.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
+#include <linux/gpio/consumer.h>
#include "internals.h"
-static int nand_get_device(struct mtd_info *mtd, int new_state);
-
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops);
-
/* Define default oob placement schemes for large and small page devices */
static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
struct mtd_oob_region *oobregion)
@@ -213,10 +209,8 @@ static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
.free = nand_ooblayout_free_lp_hamming,
};
-static int check_offs_len(struct mtd_info *mtd,
- loff_t ofs, uint64_t len)
+static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int ret = 0;
/* Start address must align on block boundary */
@@ -235,15 +229,54 @@ static int check_offs_len(struct mtd_info *mtd,
}
/**
+ * nand_select_target() - Select a NAND target (A.K.A. die)
+ * @chip: NAND chip object
+ * @cs: the CS line to select. Note that this CS id is always from the chip
+ * PoV, not the controller one
+ *
+ * Select a NAND target so that further operations executed on @chip go to the
+ * selected NAND target.
+ */
+void nand_select_target(struct nand_chip *chip, unsigned int cs)
+{
+ /*
+ * cs should always lie between 0 and chip->numchips, when that's not
+ * the case it's a bug and the caller should be fixed.
+ */
+ if (WARN_ON(cs > chip->numchips))
+ return;
+
+ chip->cur_cs = cs;
+
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, cs);
+}
+EXPORT_SYMBOL_GPL(nand_select_target);
+
+/**
+ * nand_deselect_target() - Deselect the currently selected target
+ * @chip: NAND chip object
+ *
+ * Deselect the currently selected NAND target. The result of operations
+ * executed on @chip after the target has been deselected is undefined.
+ */
+void nand_deselect_target(struct nand_chip *chip)
+{
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, -1);
+
+ chip->cur_cs = -1;
+}
+EXPORT_SYMBOL_GPL(nand_deselect_target);
+
+/**
* nand_release_device - [GENERIC] release chip
- * @mtd: MTD device structure
+ * @chip: NAND chip object
*
* Release chip lock and wake up anyone waiting on the device.
*/
-static void nand_release_device(struct mtd_info *mtd)
+static void nand_release_device(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
/* Release the controller and the chip */
spin_lock(&chip->controller->lock);
chip->controller->active = NULL;
@@ -289,6 +322,197 @@ static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
return 0;
}
+static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_bad)
+ return chip->legacy.block_bad(chip, ofs);
+
+ return nand_block_bad(chip, ofs);
+}
+
+/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip, int new_state)
+{
+ /* Hardware controller shared among independent devices */
+ chip->controller->active = chip;
+ chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: NAND chip structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct nand_chip *chip, int new_state)
+{
+ spinlock_t *lock = &chip->controller->lock;
+ wait_queue_head_t *wq = &chip->controller->wq;
+ DECLARE_WAITQUEUE(wait, current);
+retry:
+ spin_lock(lock);
+
+ /* Hardware controller shared among independent devices */
+ if (!chip->controller->active)
+ chip->controller->active = chip;
+
+ if (chip->controller->active == chip && chip->state == FL_READY) {
+ chip->state = new_state;
+ spin_unlock(lock);
+ return 0;
+ }
+ if (new_state == FL_PM_SUSPENDED) {
+ if (chip->controller->active->state == FL_PM_SUSPENDED) {
+ chip->state = FL_PM_SUSPENDED;
+ spin_unlock(lock);
+ return 0;
+ }
+ }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(wq, &wait);
+ spin_unlock(lock);
+ schedule();
+ remove_wait_queue(wq, &wait);
+ goto retry;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @chip: NAND chip object
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct nand_chip *chip)
+{
+ u8 status;
+ int ret;
+
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_WP ? 0 : 1;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, page, status, len;
+
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
+
+ len = mtd_oobavail(mtd, ops);
+
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ chipnr = (int)(to >> chip->chip_shift);
+
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ nand_reset(chip, chipnr);
+
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ nand_deselect_target(chip);
+ return -EROFS;
+ }
+
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagebuf)
+ chip->pagebuf = -1;
+
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(chip, page & chip->pagemask);
+
+ nand_deselect_target(chip);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
+}
+
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
* @chip: NAND chip object
@@ -320,7 +544,7 @@ static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
ofs += mtd->erasesize - mtd->writesize;
do {
- res = nand_do_write_oob(mtd, ofs, &ops);
+ res = nand_do_write_oob(chip, ofs, &ops);
if (!ret)
ret = res;
@@ -344,17 +568,9 @@ int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
return nand_default_block_markbad(chip, ofs);
}
-static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
-{
- if (chip->legacy.block_bad)
- return chip->legacy.block_bad(chip, ofs);
-
- return nand_block_bad(chip, ofs);
-}
-
/**
* nand_block_markbad_lowlevel - mark a block bad
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
*
* This function performs the generic NAND bad block marking steps (i.e., bad
@@ -371,9 +587,9 @@ static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
* Note that we retain the first error encountered in (2) or (3), finish the
* procedures, and dump the error in the end.
*/
-static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
int res, ret = 0;
if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
@@ -386,9 +602,9 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- nand_get_device(mtd, FL_WRITING);
+ nand_get_device(chip, FL_WRITING);
ret = nand_markbad_bbm(chip, ofs);
- nand_release_device(mtd);
+ nand_release_device(chip);
}
/* Mark block bad in BBT */
@@ -405,31 +621,6 @@ static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
}
/**
- * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- *
- * Check, if the device is write protected. The function expects, that the
- * device is already selected.
- */
-static int nand_check_wp(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- u8 status;
- int ret;
-
- /* Broken xD cards report WP despite being writable */
- if (chip->options & NAND_BROKEN_XD)
- return 0;
-
- /* Check the WP bit */
- ret = nand_status_op(chip, &status);
- if (ret)
- return ret;
-
- return status & NAND_STATUS_WP ? 0 : 1;
-}
-
-/**
* nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
* @mtd: MTD device structure
* @ofs: offset from device start
@@ -448,17 +639,15 @@ static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
-static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
/* Return info from the table */
if (chip->bbt)
return nand_isbad_bbt(chip, ofs, allowbbt);
@@ -489,7 +678,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
u8 status = 0;
int ret;
- if (!chip->exec_op)
+ if (!nand_has_exec_op(chip))
return -ENOTSUPP;
/* Wait tWB before polling the STATUS reg. */
@@ -532,61 +721,34 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
/**
- * panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
+ * @chip: NAND chip structure
+ * @gpiod: GPIO descriptor of R/B pin
+ * @timeout_ms: Timeout in ms
*
- * Used when in panic, no locks are taken.
- */
-static void panic_nand_get_device(struct nand_chip *chip,
- struct mtd_info *mtd, int new_state)
-{
- /* Hardware controller shared among independent devices */
- chip->controller->active = chip;
- chip->state = new_state;
-}
-
-/**
- * nand_get_device - [GENERIC] Get chip for selected access
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * Poll the R/B GPIO pin until it becomes ready. If that does not happen
+ * whitin the specified timeout, -ETIMEDOUT is returned.
*
- * Get the device and lock it for exclusive access
+ * This helper is intended to be used when the controller has access to the
+ * NAND R/B pin over GPIO.
+ *
+ * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
*/
-static int
-nand_get_device(struct mtd_info *mtd, int new_state)
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- spinlock_t *lock = &chip->controller->lock;
- wait_queue_head_t *wq = &chip->controller->wq;
- DECLARE_WAITQUEUE(wait, current);
-retry:
- spin_lock(lock);
+ /* Wait until R/B pin indicates chip is ready or timeout occurs */
+ timeout_ms = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ if (gpiod_get_value_cansleep(gpiod))
+ return 0;
- /* Hardware controller shared among independent devices */
- if (!chip->controller->active)
- chip->controller->active = chip;
+ cond_resched();
+ } while (time_before(jiffies, timeout_ms));
- if (chip->controller->active == chip && chip->state == FL_READY) {
- chip->state = new_state;
- spin_unlock(lock);
- return 0;
- }
- if (new_state == FL_PM_SUSPENDED) {
- if (chip->controller->active->state == FL_PM_SUSPENDED) {
- chip->state = FL_PM_SUSPENDED;
- spin_unlock(lock);
- return 0;
- }
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(wq, &wait);
- spin_unlock(lock);
- schedule();
- remove_wait_queue(wq, &wait);
- goto retry;
-}
+ return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
/**
* panic_nand_wait - [GENERIC] wait until the command is done
@@ -645,7 +807,7 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
{
int ret;
- if (!chip->setup_data_interface)
+ if (!nand_has_setup_data_iface(chip))
return 0;
/*
@@ -663,7 +825,8 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
*/
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
- ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
+ ret = chip->controller->ops->setup_data_interface(chip, chipnr,
+ &chip->data_interface);
if (ret)
pr_err("Failed to configure data interface to SDR timing mode 0\n");
@@ -690,21 +853,22 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
};
int ret;
- if (!chip->setup_data_interface)
+ if (!nand_has_setup_data_iface(chip))
return 0;
/* Change the mode on the chip side (if supported by the NAND chip) */
if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
return ret;
}
/* Change the mode on the controller side */
- ret = chip->setup_data_interface(chip, chipnr, &chip->data_interface);
+ ret = chip->controller->ops->setup_data_interface(chip, chipnr,
+ &chip->data_interface);
if (ret)
return ret;
@@ -713,10 +877,10 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
return 0;
memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
tmode_param);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
goto err_reset_chip;
@@ -734,9 +898,9 @@ err_reset_chip:
* timing mode.
*/
nand_reset_data_interface(chip, chipnr);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
nand_reset_op(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
@@ -759,7 +923,7 @@ static int nand_init_data_interface(struct nand_chip *chip)
{
int modes, mode, ret;
- if (!chip->setup_data_interface)
+ if (!nand_has_setup_data_iface(chip))
return 0;
/*
@@ -785,7 +949,7 @@ static int nand_init_data_interface(struct nand_chip *chip)
* Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the
* controller supports the requested timings.
*/
- ret = chip->setup_data_interface(chip,
+ ret = chip->controller->ops->setup_data_interface(chip,
NAND_DATA_IFACE_CHECK_ONLY,
&chip->data_interface);
if (!ret) {
@@ -866,7 +1030,7 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
@@ -909,7 +1073,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
/* Drop the DATA_IN instruction if len is set to 0. */
@@ -955,7 +1119,7 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
if (mtd->writesize > 512)
return nand_lp_exec_read_page_op(chip, page,
offset_in_page, buf,
@@ -994,7 +1158,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
if (len && !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1004,7 +1168,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
PSEC_TO_NSEC(sdr->tRR_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
@@ -1049,7 +1213,7 @@ int nand_change_read_column_op(struct nand_chip *chip,
if (mtd->writesize <= 512)
return -ENOTSUPP;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[2] = {};
@@ -1060,7 +1224,7 @@ int nand_change_read_column_op(struct nand_chip *chip,
PSEC_TO_NSEC(sdr->tCCS_min)),
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
@@ -1108,7 +1272,7 @@ int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
if (offset_in_oob + len > mtd->oobsize)
return -EINVAL;
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return nand_read_page_op(chip, page,
mtd->writesize + offset_in_oob,
buf, len);
@@ -1142,7 +1306,7 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
int ret;
u8 status;
@@ -1221,7 +1385,7 @@ int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, false);
@@ -1248,7 +1412,7 @@ int nand_prog_page_end_op(struct nand_chip *chip)
int ret;
u8 status;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1256,7 +1420,7 @@ int nand_prog_page_end_op(struct nand_chip *chip)
PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
ret = nand_exec_op(chip, &op);
if (ret)
@@ -1307,7 +1471,7 @@ int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
if (offset_in_page + len > mtd->writesize + mtd->oobsize)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
len, true);
} else {
@@ -1355,7 +1519,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
if (mtd->writesize <= 512)
return -ENOTSUPP;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[2];
@@ -1364,7 +1528,7 @@ int nand_change_write_column_op(struct nand_chip *chip,
NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
NAND_OP_DATA_OUT(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
int ret;
ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
@@ -1410,7 +1574,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
if (len && !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1418,7 +1582,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
/* Drop the DATA_IN instruction if len is set to 0. */
if (!len)
@@ -1449,7 +1613,7 @@ EXPORT_SYMBOL_GPL(nand_readid_op);
*/
int nand_status_op(struct nand_chip *chip, u8 *status)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1457,7 +1621,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status)
PSEC_TO_NSEC(sdr->tADL_min)),
NAND_OP_8BIT_DATA_IN(1, status, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
if (!status)
op.ninstrs--;
@@ -1486,11 +1650,11 @@ EXPORT_SYMBOL_GPL(nand_status_op);
*/
int nand_exit_status_op(struct nand_chip *chip)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_READ0, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1518,7 +1682,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
int ret;
u8 status;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
u8 addrs[3] = { page, page >> 8, page >> 16 };
@@ -1529,7 +1693,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
PSEC_TO_MSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
if (chip->options & NAND_ROW_ADDR_3)
instrs[1].ctx.addr.naddrs++;
@@ -1577,7 +1741,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
const u8 *params = data;
int i, ret;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1587,7 +1751,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1624,7 +1788,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
u8 *params = data;
int i;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
@@ -1635,7 +1799,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
data, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1650,12 +1814,12 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature,
static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
unsigned int delay_ns)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
PSEC_TO_NSEC(delay_ns)),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1681,14 +1845,14 @@ static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
*/
int nand_reset_op(struct nand_chip *chip)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
const struct nand_sdr_timings *sdr =
nand_get_sdr_timings(&chip->data_interface);
struct nand_op_instr instrs[] = {
NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -1718,11 +1882,11 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
if (!len || !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_IN(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
@@ -1762,11 +1926,11 @@ int nand_write_data_op(struct nand_chip *chip, const void *buf,
if (!len || !buf)
return -EINVAL;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_DATA_OUT(len, buf, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
instrs[0].ctx.data.force_8bit = force_8bit;
@@ -2224,11 +2388,12 @@ int nand_reset(struct nand_chip *chip, int chipnr)
/*
* The CS line has to be released before we can apply the new NAND
- * interface settings, hence this weird ->select_chip() dance.
+ * interface settings, hence this weird nand_select_target()
+ * nand_deselect_target() dance.
*/
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
ret = nand_reset_op(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
return ret;
@@ -2924,15 +3089,15 @@ static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
/**
* nand_transfer_oob - [INTERN] Transfer oob to client buffer
- * @mtd: mtd info structure
+ * @chip: NAND chip object
* @oob: oob destination address
* @ops: oob ops structure
* @len: size of oob to transfer
*/
-static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
struct mtd_oob_ops *ops, size_t len)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret;
switch (ops->mode) {
@@ -2989,17 +3154,17 @@ static void nand_wait_readrdy(struct nand_chip *chip)
/**
* nand_do_read_ops - [INTERN] Read data with ECC
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @from: offset to read from
* @ops: oob ops structure
*
* Internal function. Called with chip held.
*/
-static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
struct mtd_oob_ops *ops)
{
int chipnr, page, realpage, col, bytes, aligned, oob_required;
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
@@ -3012,7 +3177,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
bool ecc_fail = false;
chipnr = (int)(from >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
realpage = (int)(from >> chip->page_shift);
page = realpage & chip->pagemask;
@@ -3087,8 +3252,8 @@ read_retry:
int toread = min(oobreadlen, max_oobsize);
if (toread) {
- oob = nand_transfer_oob(mtd,
- oob, ops, toread);
+ oob = nand_transfer_oob(chip, oob, ops,
+ toread);
oobreadlen -= toread;
}
}
@@ -3143,11 +3308,11 @@ read_retry:
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
ops->retlen = ops->len - (size_t) readlen;
if (oob)
@@ -3318,18 +3483,18 @@ static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
/**
* nand_do_read_oob - [INTERN] NAND read out-of-band
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @from: offset to read from
* @ops: oob operations description structure
*
* NAND read out-of-band data from the spare area.
*/
-static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int max_bitflips = 0;
int page, realpage, chipnr;
- struct nand_chip *chip = mtd_to_nand(mtd);
struct mtd_ecc_stats stats;
int readlen = ops->ooblen;
int len;
@@ -3344,7 +3509,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
len = mtd_oobavail(mtd, ops);
chipnr = (int)(from >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Shift to get page */
realpage = (int)(from >> chip->page_shift);
@@ -3360,7 +3525,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
break;
len = min(len, readlen);
- buf = nand_transfer_oob(mtd, buf, ops, len);
+ buf = nand_transfer_oob(chip, buf, ops, len);
nand_wait_readrdy(chip);
@@ -3377,11 +3542,11 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
ops->oobretlen = ops->ooblen - readlen;
@@ -3405,6 +3570,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
static int nand_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
ops->retlen = 0;
@@ -3414,14 +3580,14 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from,
ops->mode != MTD_OPS_RAW)
return -ENOTSUPP;
- nand_get_device(mtd, FL_READING);
+ nand_get_device(chip, FL_READING);
if (!ops->datbuf)
- ret = nand_do_read_oob(mtd, from, ops);
+ ret = nand_do_read_oob(chip, from, ops);
else
- ret = nand_do_read_ops(mtd, from, ops);
+ ret = nand_do_read_ops(chip, from, ops);
- nand_release_device(mtd);
+ nand_release_device(chip);
return ret;
}
@@ -3749,7 +3915,6 @@ static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
/**
* nand_write_page - write one page
- * @mtd: MTD device structure
* @chip: NAND chip descriptor
* @offset: address offset within the page
* @data_len: length of actual data to be written
@@ -3758,10 +3923,11 @@ static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
* @page: page number to write
* @raw: use _raw version of write_page
*/
-static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int raw)
+static int nand_write_page(struct nand_chip *chip, uint32_t offset,
+ int data_len, const uint8_t *buf, int oob_required,
+ int page, int raw)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int status, subpage;
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
@@ -3785,59 +3951,21 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
return 0;
}
-/**
- * nand_fill_oob - [INTERN] Transfer client buffer to oob
- * @mtd: MTD device structure
- * @oob: oob data buffer
- * @len: oob data write length
- * @ops: oob ops structure
- */
-static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
- struct mtd_oob_ops *ops)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
-
- /*
- * Initialise to all 0xFF, to avoid the possibility of left over OOB
- * data from a previous OOB read.
- */
- memset(chip->oob_poi, 0xff, mtd->oobsize);
-
- switch (ops->mode) {
-
- case MTD_OPS_PLACE_OOB:
- case MTD_OPS_RAW:
- memcpy(chip->oob_poi + ops->ooboffs, oob, len);
- return oob + len;
-
- case MTD_OPS_AUTO_OOB:
- ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
- ops->ooboffs, len);
- BUG_ON(ret);
- return oob + len;
-
- default:
- BUG();
- }
- return NULL;
-}
-
#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
/**
* nand_do_write_ops - [INTERN] NAND write with ECC
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @to: offset to write to
* @ops: oob operations description structure
*
* NAND write with ECC.
*/
-static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int chipnr, realpage, page, column;
- struct nand_chip *chip = mtd_to_nand(mtd);
uint32_t writelen = ops->len;
uint32_t oobwritelen = ops->ooblen;
@@ -3862,10 +3990,10 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
column = to & (mtd->writesize - 1);
chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
+ if (nand_check_wp(chip)) {
ret = -EIO;
goto err_out;
}
@@ -3913,14 +4041,14 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (unlikely(oob)) {
size_t len = min(oobwritelen, oobmaxlen);
- oob = nand_fill_oob(mtd, oob, len, ops);
+ oob = nand_fill_oob(chip, oob, len, ops);
oobwritelen -= len;
} else {
/* We still need to erase leftover OOB data */
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
- ret = nand_write_page(mtd, chip, column, bytes, wbuf,
+ ret = nand_write_page(chip, column, bytes, wbuf,
oob_required, page,
(ops->mode == MTD_OPS_RAW));
if (ret)
@@ -3938,8 +4066,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
/* Check, if we cross a chip boundary */
if (!page) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
@@ -3948,7 +4076,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
ops->oobretlen = ops->ooblen;
err_out:
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
@@ -3972,9 +4100,9 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
int ret;
/* Grab the device */
- panic_nand_get_device(chip, mtd, FL_WRITING);
+ panic_nand_get_device(chip, FL_WRITING);
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Wait for the device to get ready */
panic_nand_wait(chip, 400);
@@ -3984,81 +4112,13 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
ops.datbuf = (uint8_t *)buf;
ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(mtd, to, &ops);
+ ret = nand_do_write_ops(chip, to, &ops);
*retlen = ops.retlen;
return ret;
}
/**
- * nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- *
- * NAND write out-of-band.
- */
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
-{
- int chipnr, page, status, len;
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- pr_debug("%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int)to, (int)ops->ooblen);
-
- len = mtd_oobavail(mtd, ops);
-
- /* Do not allow write past end of page */
- if ((ops->ooboffs + ops->ooblen) > len) {
- pr_debug("%s: attempt to write past end of page\n",
- __func__);
- return -EINVAL;
- }
-
- chipnr = (int)(to >> chip->chip_shift);
-
- /*
- * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
- * of my DiskOnChip 2000 test units) will clear the whole data page too
- * if we don't do this. I have no clue why, but I seem to have 'fixed'
- * it in the doc2000 driver in August 1999. dwmw2.
- */
- nand_reset(chip, chipnr);
-
- chip->select_chip(chip, chipnr);
-
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
-
- /* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
- chip->select_chip(chip, -1);
- return -EROFS;
- }
-
- /* Invalidate the page cache, if we write to the cached page */
- if (page == chip->pagebuf)
- chip->pagebuf = -1;
-
- nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
-
- if (ops->mode == MTD_OPS_RAW)
- status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
- else
- status = chip->ecc.write_oob(chip, page & chip->pagemask);
-
- chip->select_chip(chip, -1);
-
- if (status)
- return status;
-
- ops->oobretlen = ops->ooblen;
-
- return 0;
-}
-
-/**
* nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
* @mtd: MTD device structure
* @to: offset to write to
@@ -4067,11 +4127,12 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
static int nand_write_oob(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret = -ENOTSUPP;
ops->retlen = 0;
- nand_get_device(mtd, FL_WRITING);
+ nand_get_device(chip, FL_WRITING);
switch (ops->mode) {
case MTD_OPS_PLACE_OOB:
@@ -4084,12 +4145,12 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to,
}
if (!ops->datbuf)
- ret = nand_do_write_oob(mtd, to, ops);
+ ret = nand_do_write_oob(chip, to, ops);
else
- ret = nand_do_write_ops(mtd, to, ops);
+ ret = nand_do_write_ops(chip, to, ops);
out:
- nand_release_device(mtd);
+ nand_release_device(chip);
return ret;
}
@@ -4133,7 +4194,6 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
int allowbbt)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
int page, status, pages_per_block, ret, chipnr;
loff_t len;
@@ -4141,11 +4201,11 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
__func__, (unsigned long long)instr->addr,
(unsigned long long)instr->len);
- if (check_offs_len(mtd, instr->addr, instr->len))
+ if (check_offs_len(chip, instr->addr, instr->len))
return -EINVAL;
/* Grab the lock and see if the device is available */
- nand_get_device(mtd, FL_ERASING);
+ nand_get_device(chip, FL_ERASING);
/* Shift to get first page */
page = (int)(instr->addr >> chip->page_shift);
@@ -4155,10 +4215,10 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
/* Select the NAND device */
- chip->select_chip(chip, chipnr);
+ nand_select_target(chip, chipnr);
/* Check, if it is write protected */
- if (nand_check_wp(mtd)) {
+ if (nand_check_wp(chip)) {
pr_debug("%s: device is write protected!\n",
__func__);
ret = -EIO;
@@ -4170,7 +4230,7 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
while (len) {
/* Check if we have a bad block, we do not erase bad blocks! */
- if (nand_block_checkbad(mtd, ((loff_t) page) <<
+ if (nand_block_checkbad(chip, ((loff_t) page) <<
chip->page_shift, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
__func__, page);
@@ -4209,8 +4269,8 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
/* Check, if we cross a chip boundary */
if (len && !(page & chip->pagemask)) {
chipnr++;
- chip->select_chip(chip, -1);
- chip->select_chip(chip, chipnr);
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
}
}
@@ -4218,8 +4278,8 @@ int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
erase_exit:
/* Deselect and wake up anyone waiting on the device */
- chip->select_chip(chip, -1);
- nand_release_device(mtd);
+ nand_deselect_target(chip);
+ nand_release_device(chip);
/* Return more or less happy */
return ret;
@@ -4233,12 +4293,14 @@ erase_exit:
*/
static void nand_sync(struct mtd_info *mtd)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
pr_debug("%s: called\n", __func__);
/* Grab the lock and see if the device is available */
- nand_get_device(mtd, FL_SYNCING);
+ nand_get_device(chip, FL_SYNCING);
/* Release it and go back */
- nand_release_device(mtd);
+ nand_release_device(chip);
}
/**
@@ -4253,13 +4315,13 @@ static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
int ret;
/* Select the NAND device */
- nand_get_device(mtd, FL_READING);
- chip->select_chip(chip, chipnr);
+ nand_get_device(chip, FL_READING);
+ nand_select_target(chip, chipnr);
- ret = nand_block_checkbad(mtd, offs, 0);
+ ret = nand_block_checkbad(chip, offs, 0);
- chip->select_chip(chip, -1);
- nand_release_device(mtd);
+ nand_deselect_target(chip);
+ nand_release_device(chip);
return ret;
}
@@ -4281,7 +4343,7 @@ static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
return ret;
}
- return nand_block_markbad_lowlevel(mtd, ofs);
+ return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
}
/**
@@ -4326,7 +4388,7 @@ static int nand_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
*/
static int nand_suspend(struct mtd_info *mtd)
{
- return nand_get_device(mtd, FL_PM_SUSPENDED);
+ return nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
}
/**
@@ -4338,7 +4400,7 @@ static void nand_resume(struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
if (chip->state == FL_PM_SUSPENDED)
- nand_release_device(mtd);
+ nand_release_device(chip);
else
pr_err("%s called for a chip which is not in suspended state\n",
__func__);
@@ -4351,19 +4413,20 @@ static void nand_resume(struct mtd_info *mtd)
*/
static void nand_shutdown(struct mtd_info *mtd)
{
- nand_get_device(mtd, FL_PM_SUSPENDED);
+ nand_get_device(mtd_to_nand(mtd), FL_PM_SUSPENDED);
}
/* Set default functions */
static void nand_set_defaults(struct nand_chip *chip)
{
- nand_legacy_set_defaults(chip);
-
+ /* If no controller is provided, use the dummy, legacy one. */
if (!chip->controller) {
- chip->controller = &chip->dummy_controller;
+ chip->controller = &chip->legacy.dummy_controller;
nand_controller_init(chip->controller);
}
+ nand_legacy_set_defaults(chip);
+
if (!chip->buf_align)
chip->buf_align = 1;
}
@@ -4627,7 +4690,7 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
return ret;
/* Select the device */
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
/* Send the command for reading device ID */
ret = nand_readid_op(chip, 0, id_data, 2);
@@ -4952,6 +5015,9 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
unsigned int i;
int ret;
+ /* Assume all dies are deselected when we enter nand_scan_ident(). */
+ chip->cur_cs = -1;
+
/* Enforce the right timings for reset/detection */
onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0);
@@ -4962,31 +5028,32 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
if (!mtd->name && mtd->dev.parent)
mtd->name = dev_name(mtd->dev.parent);
- if (chip->exec_op && !chip->select_chip) {
- pr_err("->select_chip() is mandatory when implementing ->exec_op()\n");
- return -EINVAL;
- }
+ /*
+ * Start with chips->numchips = maxchips to let nand_select_target() do
+ * its job. chip->numchips will be adjusted after.
+ */
+ chip->numchips = maxchips;
+
+ /* Set the default functions */
+ nand_set_defaults(chip);
ret = nand_legacy_check_hooks(chip);
if (ret)
return ret;
- /* Set the default functions */
- nand_set_defaults(chip);
-
/* Read the flash type */
ret = nand_detect(chip, table);
if (ret) {
if (!(chip->options & NAND_SCAN_SILENT_NODEV))
pr_warn("No NAND device found\n");
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
return ret;
}
nand_maf_id = chip->id.data[0];
nand_dev_id = chip->id.data[1];
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
/* Check for a chip array */
for (i = 1; i < maxchips; i++) {
@@ -4995,15 +5062,15 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
/* See comment in nand_get_flash_type for reset */
nand_reset(chip, i);
- chip->select_chip(chip, i);
+ nand_select_target(chip, i);
/* Send the command for reading device ID */
nand_readid_op(chip, 0, id, sizeof(id));
/* Read manufacturer and device IDs */
if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
break;
}
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
}
if (i > 1)
pr_info("%d chips detected\n", i);
@@ -5021,9 +5088,9 @@ static void nand_scan_ident_cleanup(struct nand_chip *chip)
kfree(chip->parameters.onfi);
}
-static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
+static int nand_set_ecc_soft_ops(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
@@ -5379,9 +5446,9 @@ EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
* Requirement (2) ensures we can correct even when all bitflips are clumped
* in the same sector.
*/
-static bool nand_ecc_strength_good(struct mtd_info *mtd)
+static bool nand_ecc_strength_good(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
int corr, ds_corr;
@@ -5429,9 +5496,9 @@ static int nand_scan_tail(struct nand_chip *chip)
* to explictly select the relevant die when interacting with the NAND
* chip.
*/
- chip->select_chip(chip, 0);
+ nand_select_target(chip, 0);
ret = nand_manufacturer_init(chip);
- chip->select_chip(chip, -1);
+ nand_deselect_target(chip);
if (ret)
goto err_free_buf;
@@ -5546,7 +5613,7 @@ static int nand_scan_tail(struct nand_chip *chip)
ecc->algo = NAND_ECC_HAMMING;
case NAND_ECC_SOFT:
- ret = nand_set_ecc_soft_ops(mtd);
+ ret = nand_set_ecc_soft_ops(chip);
if (ret) {
ret = -EINVAL;
goto err_nand_manuf_cleanup;
@@ -5631,7 +5698,7 @@ static int nand_scan_tail(struct nand_chip *chip)
mtd->oobavail = ret;
/* ECC sanity check: warn if it's too weak */
- if (!nand_ecc_strength_good(mtd))
+ if (!nand_ecc_strength_good(chip))
pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
mtd->name);
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c
index 98a826838b60..1b722fe9213c 100644
--- a/drivers/mtd/nand/raw/nand_bbt.c
+++ b/drivers/mtd/nand/raw/nand_bbt.c
@@ -77,8 +77,6 @@
#define BBT_ENTRY_MASK 0x03
#define BBT_ENTRY_SHIFT 2
-static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
-
static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
{
uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
@@ -160,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
@@ -169,11 +167,11 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
*
* Read the bad block table starting from page.
*/
-static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
- struct nand_bbt_descr *td, int offs)
+static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
+ struct nand_bbt_descr *td, int offs)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
int res, ret = 0, i, j, act = 0;
- struct nand_chip *this = mtd_to_nand(mtd);
size_t retlen, len, totlen;
loff_t from;
int bits = td->options & NAND_BBT_NRBITS_MSK;
@@ -253,7 +251,7 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @chip: read the table for a specific chip, -1 read all chips; applies only if
@@ -262,16 +260,17 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
* Read the bad block table for all chips starting at a given page. We assume
* that the bbt bits are in consecutive order.
*/
-static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, int chip)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
for (i = 0; i < this->numchips; i++) {
if (chip == -1 || chip == i)
- res = read_bbt(mtd, buf, td->pages[i],
+ res = read_bbt(this, buf, td->pages[i],
this->chipsize >> this->bbt_erase_shift,
td, offs);
if (res)
@@ -279,7 +278,7 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
offs += this->chipsize >> this->bbt_erase_shift;
}
} else {
- res = read_bbt(mtd, buf, td->pages[0],
+ res = read_bbt(this, buf, td->pages[0],
mtd->size >> this->bbt_erase_shift, td, 0);
if (res)
return res;
@@ -288,9 +287,10 @@ static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/* BBT marker is in the first page, no OOB */
-static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
- struct nand_bbt_descr *td)
+static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ struct nand_bbt_descr *td)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
size_t retlen;
size_t len;
@@ -303,7 +303,7 @@ static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
/**
* scan_read_oob - [GENERIC] Scan data+OOB region to buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @offs: offset at which to scan
* @len: length of data region to read
@@ -312,9 +312,10 @@ static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
* page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
* ECC condition (error or bitflip). May quit on the first (non-ECC) error.
*/
-static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
size_t len)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops;
int res, ret = 0;
@@ -342,19 +343,20 @@ static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
return ret;
}
-static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
- size_t len, struct nand_bbt_descr *td)
+static int scan_read(struct nand_chip *this, uint8_t *buf, loff_t offs,
+ size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
- return scan_read_data(mtd, buf, offs, td);
+ return scan_read_data(this, buf, offs, td);
else
- return scan_read_oob(mtd, buf, offs, len);
+ return scan_read_oob(this, buf, offs, len);
}
/* Scan write data with oob to flash */
-static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+static int scan_write_bbt(struct nand_chip *this, loff_t offs, size_t len,
uint8_t *buf, uint8_t *oob)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops;
ops.mode = MTD_OPS_PLACE_OOB;
@@ -367,8 +369,9 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
return mtd_write_oob(mtd, offs, &ops);
}
-static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+static u32 bbt_get_ver_offs(struct nand_chip *this, struct nand_bbt_descr *td)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
u32 ver_offs = td->veroffs;
if (!(td->options & NAND_BBT_NO_OOB))
@@ -378,7 +381,7 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -386,34 +389,35 @@ static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
* Read the bad block table(s) for all chips starting at a given page. We
* assume that the bbt bits are in consecutive order.
*/
-static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
- scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
- mtd->writesize, td);
- td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+ scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
+ mtd->writesize, td);
+ td->version[0] = buf[bbt_get_ver_offs(this, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
td->pages[0], td->version[0]);
}
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
- scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize, md);
- md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+ scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
+ mtd->writesize, md);
+ md->version[0] = buf[bbt_get_ver_offs(this, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
}
}
/* Scan a given block partially */
-static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
loff_t offs, uint8_t *buf, int numpages)
{
+ struct mtd_info *mtd = nand_to_mtd(this);
struct mtd_oob_ops ops;
int j, ret;
@@ -443,7 +447,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
* @chip: create the table for a specific chip, -1 read all chips; applies only
@@ -452,10 +456,10 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
* Create a bad block table by scanning the device for the given good/bad block
* identify pattern.
*/
-static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
- struct nand_bbt_descr *bd, int chip)
+static int create_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd, int chip)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, numblocks, numpages;
int startblock;
loff_t from;
@@ -491,7 +495,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- ret = scan_block_fast(mtd, bd, from, buf, numpages);
+ ret = scan_block_fast(this, bd, from, buf, numpages);
if (ret < 0)
return ret;
@@ -509,7 +513,7 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
*
@@ -522,9 +526,10 @@ static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
*
* The bbt ident pattern resides in the oob area of the first page in a block.
*/
-static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+static int search_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, chips;
int startblock, block, dir;
int scanlen = mtd->writesize + mtd->oobsize;
@@ -561,11 +566,11 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
/* Read first page */
- scan_read(mtd, buf, offs, mtd->writesize, td);
+ scan_read(this, buf, offs, mtd->writesize, td);
if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
td->pages[i] = actblock << blocktopage;
if (td->options & NAND_BBT_VERSION) {
- offs = bbt_get_ver_offs(mtd, td);
+ offs = bbt_get_ver_offs(this, td);
td->version[i] = buf[offs];
}
break;
@@ -586,23 +591,23 @@ static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
*
* Search and read the bad block table(s).
*/
-static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td,
struct nand_bbt_descr *md)
{
/* Search the primary table */
- search_bbt(mtd, buf, td);
+ search_bbt(this, buf, td);
/* Search the mirror table */
if (md)
- search_bbt(mtd, buf, md);
+ search_bbt(this, buf, md);
}
/**
@@ -700,7 +705,7 @@ static void mark_bbt_block_bad(struct nand_chip *this,
/**
* write_bbt - [GENERIC] (Re)write the bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -708,11 +713,11 @@ static void mark_bbt_block_bad(struct nand_chip *this,
*
* (Re)write the bad block table.
*/
-static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+static int write_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
struct erase_info einfo;
int i, res, chip = 0;
int bits, page, offs, numblocks, sft, sftmsk;
@@ -862,9 +867,9 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
continue;
}
- res = scan_write_bbt(mtd, to, len, buf,
- td->options & NAND_BBT_NO_OOB ? NULL :
- &buf[len]);
+ res = scan_write_bbt(this, to, len, buf,
+ td->options & NAND_BBT_NO_OOB ?
+ NULL : &buf[len]);
if (res < 0) {
pr_warn("nand_bbt: error while writing BBT block %d\n",
res);
@@ -887,22 +892,21 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device for
* manufacturer / software marked good / bad blocks.
*/
-static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static inline int nand_memory_bbt(struct nand_chip *this,
+ struct nand_bbt_descr *bd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
-
- return create_bbt(mtd, this->data_buf, bd, -1);
+ return create_bbt(this, this->data_buf, bd, -1);
}
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
+ * @this: the NAND device
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
*
@@ -911,10 +915,10 @@ static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *b
* for the chip/device. Update is necessary if one of the tables is missing or
* the version nr. of one table is less than the other.
*/
-static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+static int check_create(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd)
{
int i, chips, writeops, create, chipsel, res, res2;
- struct nand_chip *this = mtd_to_nand(mtd);
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
struct nand_bbt_descr *rd, *rd2;
@@ -971,7 +975,7 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Create the table in memory by scanning the chip(s) */
if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
- create_bbt(mtd, buf, bd, chipsel);
+ create_bbt(this, buf, bd, chipsel);
td->version[i] = 1;
if (md)
@@ -980,7 +984,7 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Read back first? */
if (rd) {
- res = read_abs_bbt(mtd, buf, rd, chipsel);
+ res = read_abs_bbt(this, buf, rd, chipsel);
if (mtd_is_eccerr(res)) {
/* Mark table as invalid */
rd->pages[i] = -1;
@@ -991,7 +995,7 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/* If they weren't versioned, read both */
if (rd2) {
- res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+ res2 = read_abs_bbt(this, buf, rd2, chipsel);
if (mtd_is_eccerr(res2)) {
/* Mark table as invalid */
rd2->pages[i] = -1;
@@ -1013,14 +1017,14 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
/* Write the bad block table to the device? */
if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, td, md, chipsel);
+ res = write_bbt(this, buf, td, md, chipsel);
if (res < 0)
return res;
}
/* Write the mirror bad block table to the device? */
if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, md, td, chipsel);
+ res = write_bbt(this, buf, md, td, chipsel);
if (res < 0)
return res;
}
@@ -1029,16 +1033,71 @@ static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_desc
}
/**
+ * nand_update_bbt - update bad block table(s)
+ * @this: the NAND device
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct nand_chip *this, loff_t offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
+/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
+ * @this: the NAND device
* @td: bad block table descriptor
*
* The bad block table regions are marked as "bad" to prevent accidental
* erasures / writes. The regions are identified by the mark 0x02.
*/
-static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, j, chips, block, nrblocks, update;
uint8_t oldval;
@@ -1061,7 +1120,7 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
if ((oldval != BBT_BLOCK_RESERVED) &&
td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)block <<
+ nand_update_bbt(this, (loff_t)block <<
this->bbt_erase_shift);
continue;
}
@@ -1083,22 +1142,22 @@ static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
* bbts. This should only happen once.
*/
if (update && td->reserved_block_code)
- nand_update_bbt(mtd, (loff_t)(block - 1) <<
+ nand_update_bbt(this, (loff_t)(block - 1) <<
this->bbt_erase_shift);
}
}
/**
* verify_bbt_descr - verify the bad block description
- * @mtd: MTD device structure
+ * @this: the NAND device
* @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
* table.
*/
-static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
u32 pattern_len;
u32 bits;
u32 table_size;
@@ -1138,7 +1197,7 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
+ * @this: the NAND device
* @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already available. If
@@ -1148,9 +1207,9 @@ static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
* The bad block table memory is allocated here. It must be freed by calling
* the nand_free_bbt function.
*/
-static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
{
- struct nand_chip *this = mtd_to_nand(mtd);
+ struct mtd_info *mtd = nand_to_mtd(this);
int len, res;
uint8_t *buf;
struct nand_bbt_descr *td = this->bbt_td;
@@ -1170,14 +1229,14 @@ static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
* memory based bad block table.
*/
if (!td) {
- if ((res = nand_memory_bbt(mtd, bd))) {
+ if ((res = nand_memory_bbt(this, bd))) {
pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
goto err;
}
return 0;
}
- verify_bbt_descr(mtd, td);
- verify_bbt_descr(mtd, md);
+ verify_bbt_descr(this, td);
+ verify_bbt_descr(this, md);
/* Allocate a temporary buffer for one eraseblock incl. oob */
len = (1 << this->bbt_erase_shift);
@@ -1190,20 +1249,20 @@ static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
/* Is the bbt at a given page? */
if (td->options & NAND_BBT_ABSPAGE) {
- read_abs_bbts(mtd, buf, td, md);
+ read_abs_bbts(this, buf, td, md);
} else {
/* Search the bad block table using a pattern in oob */
- search_read_bbts(mtd, buf, td, md);
+ search_read_bbts(this, buf, td, md);
}
- res = check_create(mtd, buf, bd);
+ res = check_create(this, buf, bd);
if (res)
goto err;
/* Prevent the bbt regions from erasing / writing */
- mark_bbt_region(mtd, td);
+ mark_bbt_region(this, td);
if (md)
- mark_bbt_region(mtd, md);
+ mark_bbt_region(this, md);
vfree(buf);
return 0;
@@ -1214,61 +1273,6 @@ err:
return res;
}
-/**
- * nand_update_bbt - update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
- *
- * The function updates the bad block table(s).
- */
-static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
-{
- struct nand_chip *this = mtd_to_nand(mtd);
- int len, res = 0;
- int chip, chipsel;
- uint8_t *buf;
- struct nand_bbt_descr *td = this->bbt_td;
- struct nand_bbt_descr *md = this->bbt_md;
-
- if (!this->bbt || !td)
- return -EINVAL;
-
- /* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * mtd->oobsize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Do we have a bbt per chip? */
- if (td->options & NAND_BBT_PERCHIP) {
- chip = (int)(offs >> this->chip_shift);
- chipsel = chip;
- } else {
- chip = 0;
- chipsel = -1;
- }
-
- td->version[chip]++;
- if (md)
- md->version[chip]++;
-
- /* Write the bad block table to the device? */
- if (td->options & NAND_BBT_WRITE) {
- res = write_bbt(mtd, buf, td, md, chipsel);
- if (res < 0)
- goto out;
- }
- /* Write the mirror bad block table to the device? */
- if (md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(mtd, buf, md, td, chipsel);
- }
-
- out:
- kfree(buf);
- return res;
-}
-
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
@@ -1382,7 +1386,7 @@ int nand_create_bbt(struct nand_chip *this)
return ret;
}
- return nand_scan_bbt(nand_to_mtd(this), this->badblock_pattern);
+ return nand_scan_bbt(this, this->badblock_pattern);
}
EXPORT_SYMBOL(nand_create_bbt);
@@ -1433,7 +1437,6 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
*/
int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
{
- struct mtd_info *mtd = nand_to_mtd(this);
int block, ret = 0;
block = (int)(offs >> this->bbt_erase_shift);
@@ -1443,7 +1446,7 @@ int nand_markbad_bbt(struct nand_chip *this, loff_t offs)
/* Update flash-based bad block table */
if (this->bbt_options & NAND_BBT_USE_FLASH)
- ret = nand_update_bbt(mtd, offs);
+ ret = nand_update_bbt(this, offs);
return ret;
}
diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c
index ac1b5c103968..343f477362d1 100644
--- a/drivers/mtd/nand/raw/nand_hynix.c
+++ b/drivers/mtd/nand/raw/nand_hynix.c
@@ -80,11 +80,11 @@ static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
{
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_CMD(cmd, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
@@ -98,12 +98,12 @@ static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
{
u16 column = ((u16)addr << 8) | addr;
- if (chip->exec_op) {
+ if (nand_has_exec_op(chip)) {
struct nand_op_instr instrs[] = {
NAND_OP_ADDR(1, &addr, 0),
NAND_OP_8BIT_DATA_OUT(1, &val, 0),
};
- struct nand_operation op = NAND_OPERATION(instrs);
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
return nand_exec_op(chip, &op);
}
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 5c26492c841d..38b5dc22cb30 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -107,6 +107,8 @@ int nand_jedec_detect(struct nand_chip *chip)
pr_warn("Invalid codeword size\n");
}
+ ret = 1;
+
free_jedec_param_page:
kfree(p);
return ret;
diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c
index c5ddc86cd98c..43575943f13b 100644
--- a/drivers/mtd/nand/raw/nand_legacy.c
+++ b/drivers/mtd/nand/raw/nand_legacy.c
@@ -165,15 +165,14 @@ static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
/**
* panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @timeo: Timeout
*
* Helper function for nand_wait_ready used when needing to wait in interrupt
* context.
*/
-static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+static void panic_nand_wait_ready(struct nand_chip *chip, unsigned long timeo)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
int i;
/* Wait for the device to get ready */
@@ -193,11 +192,10 @@ static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
*/
void nand_wait_ready(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
unsigned long timeo = 400;
if (in_interrupt() || oops_in_progress)
- return panic_nand_wait_ready(mtd, timeo);
+ return panic_nand_wait_ready(chip, timeo);
/* Wait until command is processed or timeout occurs */
timeo = jiffies + msecs_to_jiffies(timeo);
@@ -214,14 +212,13 @@ EXPORT_SYMBOL_GPL(nand_wait_ready);
/**
* nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @timeo: Timeout in ms
*
* Wait for status ready (i.e. command done) or timeout.
*/
-static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
{
- register struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
timeo = jiffies + msecs_to_jiffies(timeo);
@@ -321,7 +318,7 @@ static void nand_command(struct nand_chip *chip, unsigned int command,
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
- nand_wait_status_ready(mtd, 250);
+ nand_wait_status_ready(chip, 250);
return;
/* This applies to read commands */
@@ -367,7 +364,7 @@ static void nand_ccs_delay(struct nand_chip *chip)
* Wait tCCS_min if it is correctly defined, otherwise wait 500ns
* (which should be safe for all NANDs).
*/
- if (chip->setup_data_interface)
+ if (nand_has_setup_data_iface(chip))
ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000);
else
ndelay(500);
@@ -458,7 +455,7 @@ static void nand_command_lp(struct nand_chip *chip, unsigned int command,
chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
NAND_NCE | NAND_CTRL_CHANGE);
/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
- nand_wait_status_ready(mtd, 250);
+ nand_wait_status_ready(chip, 250);
return;
case NAND_CMD_RNDOUT:
@@ -525,7 +522,6 @@ EXPORT_SYMBOL(nand_get_set_features_notsupp);
/**
* nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
* @chip: NAND chip structure
*
* Wait for command done. This applies to erase and program only.
@@ -581,7 +577,7 @@ void nand_legacy_set_defaults(struct nand_chip *chip)
{
unsigned int busw = chip->options & NAND_BUSWIDTH_16;
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return;
/* check for proper chip_delay setup, set 20us if not */
@@ -589,15 +585,15 @@ void nand_legacy_set_defaults(struct nand_chip *chip)
chip->legacy.chip_delay = 20;
/* check, if a user supplied command function given */
- if (!chip->legacy.cmdfunc && !chip->exec_op)
+ if (!chip->legacy.cmdfunc)
chip->legacy.cmdfunc = nand_command;
/* check, if a user supplied wait function given */
if (chip->legacy.waitfunc == NULL)
chip->legacy.waitfunc = nand_wait;
- if (!chip->select_chip)
- chip->select_chip = nand_select_chip;
+ if (!chip->legacy.select_chip)
+ chip->legacy.select_chip = nand_select_chip;
/* If called twice, pointers that depend on busw may need to be reset */
if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
@@ -625,14 +621,15 @@ int nand_legacy_check_hooks(struct nand_chip *chip)
* ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
* not populated.
*/
- if (chip->exec_op)
+ if (nand_has_exec_op(chip))
return 0;
/*
* Default functions assigned for ->legacy.cmdfunc() and
- * ->select_chip() both expect ->legacy.cmd_ctrl() to be populated.
+ * ->legacy.select_chip() both expect ->legacy.cmd_ctrl() to be
+ * populated.
*/
- if ((!chip->legacy.cmdfunc || !chip->select_chip) &&
+ if ((!chip->legacy.cmdfunc || !chip->legacy.select_chip) &&
!chip->legacy.cmd_ctrl) {
pr_err("->legacy.cmd_ctrl() should be provided\n");
return -EINVAL;
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 358dcc957bb2..47d8cda547cf 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -33,6 +33,13 @@ static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
"MX30LF4G18AC",
"MX30LF4G28AC",
"MX60LF8G18AC",
+ "MX30UF1G18AC",
+ "MX30UF1G16AC",
+ "MX30UF2G18AC",
+ "MX30UF2G16AC",
+ "MX30UF4G18AC",
+ "MX30UF4G16AC",
+ "MX30UF4G28AC",
};
if (!chip->parameters.supports_set_get_features)
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index c452819f6123..933d1a629c51 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -443,7 +443,7 @@ static unsigned long total_wear = 0;
/* MTD structure for NAND controller */
static struct mtd_info *nsmtd;
-static int nandsim_debugfs_show(struct seq_file *m, void *private)
+static int nandsim_show(struct seq_file *m, void *private)
{
unsigned long wmin = -1, wmax = 0, avg;
unsigned long deciles[10], decile_max[10], tot = 0;
@@ -494,18 +494,7 @@ static int nandsim_debugfs_show(struct seq_file *m, void *private)
return 0;
}
-
-static int nandsim_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nandsim_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations dfs_fops = {
- .open = nandsim_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(nandsim);
/**
* nandsim_debugfs_create - initialize debugfs
@@ -531,7 +520,7 @@ static int nandsim_debugfs_create(struct nandsim *dev)
}
dent = debugfs_create_file("nandsim_wear_report", S_IRUSR,
- root, dev, &dfs_fops);
+ root, dev, &nandsim_fops);
if (IS_ERR_OR_NULL(dent)) {
NS_ERR("cannot create \"nandsim_wear_report\" debugfs entry\n");
return -1;
@@ -2304,7 +2293,7 @@ static int __init ns_init_module(void)
if ((retval = parse_gravepages()) != 0)
goto error;
- chip->dummy_controller.ops = &ns_controller_ops;
+ chip->legacy.dummy_controller.ops = &ns_controller_ops;
retval = nand_scan(chip, 1);
if (retval) {
NS_ERR("Could not scan NAND Simulator device\n");
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index d49a7a17146c..9857e0e5acd4 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -146,7 +146,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->legacy.IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
chip->legacy.cmd_ctrl = ndfc_hwcontrol;
chip->legacy.dev_ready = ndfc_ready;
- chip->select_chip = ndfc_select_chip;
+ chip->legacy.select_chip = ndfc_select_chip;
chip->legacy.chip_delay = 50;
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 886d05c391ef..68e8b9f7f372 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1944,7 +1944,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- info->dma = dma_request_chan(dev, "rxtx");
+ info->dma = dma_request_chan(dev->parent, "rxtx");
if (IS_ERR(info->dma)) {
dev_err(dev, "DMA engine request failed\n");
diff --git a/drivers/mtd/nand/raw/plat_nand.c b/drivers/mtd/nand/raw/plat_nand.c
index 86c536ddaf24..a994b76daa50 100644
--- a/drivers/mtd/nand/raw/plat_nand.c
+++ b/drivers/mtd/nand/raw/plat_nand.c
@@ -63,7 +63,7 @@ static int plat_nand_probe(struct platform_device *pdev)
data->chip.legacy.IO_ADDR_W = data->io_base;
data->chip.legacy.cmd_ctrl = pdata->ctrl.cmd_ctrl;
data->chip.legacy.dev_ready = pdata->ctrl.dev_ready;
- data->chip.select_chip = pdata->ctrl.select_chip;
+ data->chip.legacy.select_chip = pdata->ctrl.select_chip;
data->chip.legacy.write_buf = pdata->ctrl.write_buf;
data->chip.legacy.read_buf = pdata->ctrl.read_buf;
data->chip.legacy.chip_delay = pdata->chip.chip_delay;
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 699d3cf49c6d..46c62a31fa46 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2804,7 +2804,7 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
mtd->dev.parent = dev;
chip->legacy.cmdfunc = qcom_nandc_command;
- chip->select_chip = qcom_nandc_select_chip;
+ chip->legacy.select_chip = qcom_nandc_select_chip;
chip->legacy.read_byte = qcom_nandc_read_byte;
chip->legacy.read_buf = qcom_nandc_read_buf;
chip->legacy.write_buf = qcom_nandc_write_buf;
diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c
index 39be65b35ac2..c01422d953dd 100644
--- a/drivers/mtd/nand/raw/r852.c
+++ b/drivers/mtd/nand/raw/r852.c
@@ -151,8 +151,9 @@ static void r852_dma_done(struct r852_device *dev, int error)
dev->dma_stage = 0;
if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
- pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
- dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, dev->phys_dma_addr,
+ R852_DMA_LEN,
+ dev->dma_dir ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
/*
@@ -197,11 +198,10 @@ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
bounce = 1;
if (!bounce) {
- dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+ dev->phys_dma_addr = dma_map_single(&dev->pci_dev->dev, buf,
R852_DMA_LEN,
- (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
-
- if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+ do_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pci_dev->dev, dev->phys_dma_addr))
bounce = 1;
}
@@ -835,7 +835,7 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
pci_set_master(pci_dev);
- error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (error)
goto error2;
@@ -885,8 +885,8 @@ static int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->pci_dev = pci_dev;
pci_set_drvdata(pci_dev, dev);
- dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
- &dev->phys_bounce_buffer);
+ dev->bounce_buffer = dma_alloc_coherent(&pci_dev->dev, R852_DMA_LEN,
+ &dev->phys_bounce_buffer, GFP_KERNEL);
if (!dev->bounce_buffer)
goto error6;
@@ -946,8 +946,8 @@ error9:
error8:
pci_iounmap(pci_dev, dev->mmio);
error7:
- pci_free_consistent(pci_dev, R852_DMA_LEN,
- dev->bounce_buffer, dev->phys_bounce_buffer);
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
error6:
kfree(dev);
error5:
@@ -980,8 +980,8 @@ static void r852_remove(struct pci_dev *pci_dev)
/* Cleanup */
kfree(dev->tmp_buffer);
pci_iounmap(pci_dev, dev->mmio);
- pci_free_consistent(pci_dev, R852_DMA_LEN,
- dev->bounce_buffer, dev->phys_bounce_buffer);
+ dma_free_coherent(&pci_dev->dev, R852_DMA_LEN, dev->bounce_buffer,
+ dev->phys_bounce_buffer);
kfree(dev->chip);
kfree(dev);
@@ -1045,9 +1045,9 @@ static int r852_resume(struct device *device)
/* Otherwise, initialize the card */
if (dev->card_registered) {
r852_engine_enable(dev);
- dev->chip->select_chip(dev->chip, 0);
+ nand_select_target(dev->chip, 0);
nand_reset_op(dev->chip);
- dev->chip->select_chip(dev->chip, -1);
+ nand_deselect_target(dev->chip);
}
/* Program card detection IRQ */
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index d2e42e9d0e8c..adc7a196e383 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -866,7 +866,7 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->legacy.write_buf = s3c2410_nand_write_buf;
chip->legacy.read_buf = s3c2410_nand_read_buf;
- chip->select_chip = s3c2410_nand_select_chip;
+ chip->legacy.select_chip = s3c2410_nand_select_chip;
chip->legacy.chip_delay = 50;
nand_set_controller_data(chip, nmtd);
chip->options = set->options;
@@ -876,8 +876,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
* let's keep behavior unchanged for legacy boards booting via pdata and
* auto-detect timings only when booting with a device tree.
*/
- if (np)
- chip->setup_data_interface = s3c2410_nand_setup_data_interface;
+ if (!np)
+ chip->options |= NAND_KEEP_TIMINGS;
switch (info->cpu_type) {
case TYPE_S3C2410:
@@ -1011,6 +1011,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops s3c24xx_nand_controller_ops = {
.attach_chip = s3c2410_nand_attach_chip,
+ .setup_data_interface = s3c2410_nand_setup_data_interface,
};
static const struct of_device_id s3c24xx_nand_dt_ids[] = {
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 4d20d033de7b..cf6b1be1cf9c 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH FLCTL nand controller
*
@@ -5,20 +6,6 @@
* Copyright (c) 2008 Atom Create Engineering Co., Ltd.
*
* Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#include <linux/module.h>
@@ -1183,7 +1170,7 @@ static int flctl_probe(struct platform_device *pdev)
nand->legacy.read_byte = flctl_read_byte;
nand->legacy.write_buf = flctl_write_buf;
nand->legacy.read_buf = flctl_read_buf;
- nand->select_chip = flctl_select_chip;
+ nand->legacy.select_chip = flctl_select_chip;
nand->legacy.cmdfunc = flctl_cmdfunc;
nand->legacy.set_features = nand_get_set_features_notsupp;
nand->legacy.get_features = nand_get_set_features_notsupp;
@@ -1196,7 +1183,7 @@ static int flctl_probe(struct platform_device *pdev)
flctl_setup_dma(flctl);
- nand->dummy_controller.ops = &flctl_nand_controller_ops;
+ nand->legacy.dummy_controller.ops = &flctl_nand_controller_ops;
ret = nand_scan(nand, 1);
if (ret)
goto err_chip;
@@ -1236,7 +1223,7 @@ static struct platform_driver flctl_driver = {
module_platform_driver_probe(flctl_driver, flctl_probe);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_DESCRIPTION("SuperH FLCTL driver");
MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/raw/sm_common.c b/drivers/mtd/nand/raw/sm_common.c
index 6f063ef57640..409d036858dc 100644
--- a/drivers/mtd/nand/raw/sm_common.c
+++ b/drivers/mtd/nand/raw/sm_common.c
@@ -194,7 +194,7 @@ int sm_register_device(struct mtd_info *mtd, int smartmedia)
chip->options |= NAND_SKIP_BBTSCAN;
/* Scan for card properties */
- chip->dummy_controller.ops = &sm_controller_ops;
+ chip->legacy.dummy_controller.ops = &sm_controller_ops;
flash_ids = smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids;
ret = nand_scan_with_ids(chip, 1, flash_ids);
if (ret)
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 51b1a548064b..e828ee50a201 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -1393,7 +1393,7 @@ static int sunxi_nfc_hw_ecc_write_page_dma(struct nand_chip *chip,
sunxi_nfc_randomizer_enable(mtd);
writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
- nfc->regs + NFC_REG_RCMD_SET);
+ nfc->regs + NFC_REG_WCMD_SET);
dma_async_issue_pending(nfc->dmac);
@@ -1847,6 +1847,7 @@ static int sunxi_nand_attach_chip(struct nand_chip *nand)
static const struct nand_controller_ops sunxi_nand_controller_ops = {
.attach_chip = sunxi_nand_attach_chip,
+ .setup_data_interface = sunxi_nfc_setup_data_interface,
};
static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
@@ -1922,12 +1923,11 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
*/
nand->ecc.mode = NAND_ECC_HW;
nand_set_flash_node(nand, np);
- nand->select_chip = sunxi_nfc_select_chip;
+ nand->legacy.select_chip = sunxi_nfc_select_chip;
nand->legacy.cmd_ctrl = sunxi_nfc_cmd_ctrl;
nand->legacy.read_buf = sunxi_nfc_read_buf;
nand->legacy.write_buf = sunxi_nfc_write_buf;
nand->legacy.read_byte = sunxi_nfc_read_byte;
- nand->setup_data_interface = sunxi_nfc_setup_data_interface;
mtd = nand_to_mtd(nand);
mtd->dev.parent = dev;
diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c
index 8818f893f300..cb3beda88789 100644
--- a/drivers/mtd/nand/raw/tango_nand.c
+++ b/drivers/mtd/nand/raw/tango_nand.c
@@ -530,6 +530,7 @@ static int tango_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops tango_controller_ops = {
.attach_chip = tango_attach_chip,
+ .setup_data_interface = tango_set_timings,
};
static int chip_init(struct device *dev, struct device_node *np)
@@ -567,10 +568,9 @@ static int chip_init(struct device *dev, struct device_node *np)
chip->legacy.read_byte = tango_read_byte;
chip->legacy.write_buf = tango_write_buf;
chip->legacy.read_buf = tango_read_buf;
- chip->select_chip = tango_select_chip;
+ chip->legacy.select_chip = tango_select_chip;
chip->legacy.cmd_ctrl = tango_cmd_ctrl;
chip->legacy.dev_ready = tango_dev_ready;
- chip->setup_data_interface = tango_set_timings;
chip->options = NAND_USE_BOUNCE_BUFFER |
NAND_NO_SUBPAGE_WRITE |
NAND_WAIT_TCCS;
diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c
index 9767e29d74e2..13be32c38194 100644
--- a/drivers/mtd/nand/raw/tegra_nand.c
+++ b/drivers/mtd/nand/raw/tegra_nand.c
@@ -454,29 +454,24 @@ static const struct nand_op_parser tegra_nand_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 4)),
);
+static void tegra_nand_select_target(struct nand_chip *chip,
+ unsigned int die_nr)
+{
+ struct tegra_nand_chip *nand = to_tegra_chip(chip);
+ struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
+
+ ctrl->cur_cs = nand->cs[die_nr];
+}
+
static int tegra_nand_exec_op(struct nand_chip *chip,
const struct nand_operation *op,
bool check_only)
{
+ tegra_nand_select_target(chip, op->cs);
return nand_op_parser_exec_op(chip, &tegra_nand_op_parser, op,
check_only);
}
-static void tegra_nand_select_chip(struct nand_chip *chip, int die_nr)
-{
- struct tegra_nand_chip *nand = to_tegra_chip(chip);
- struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller);
-
- WARN_ON(die_nr >= (int)ARRAY_SIZE(nand->cs));
-
- if (die_nr < 0 || die_nr > 0) {
- ctrl->cur_cs = -1;
- return;
- }
-
- ctrl->cur_cs = nand->cs[die_nr];
-}
-
static void tegra_nand_hw_ecc(struct tegra_nand_controller *ctrl,
struct nand_chip *chip, bool enable)
{
@@ -503,6 +498,8 @@ static int tegra_nand_page_xfer(struct mtd_info *mtd, struct nand_chip *chip,
u32 addr1, cmd, dma_ctrl;
int ret;
+ tegra_nand_select_target(chip, chip->cur_cs);
+
if (read) {
writel_relaxed(NAND_CMD_READ0, ctrl->regs + CMD_REG1);
writel_relaxed(NAND_CMD_READSTART, ctrl->regs + CMD_REG2);
@@ -1053,6 +1050,8 @@ static int tegra_nand_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops tegra_nand_controller_ops = {
.attach_chip = &tegra_nand_attach_chip,
+ .exec_op = tegra_nand_exec_op,
+ .setup_data_interface = tegra_nand_setup_data_interface,
};
static int tegra_nand_chips_init(struct device *dev,
@@ -1115,9 +1114,6 @@ static int tegra_nand_chips_init(struct device *dev,
mtd->name = "tegra_nand";
chip->options = NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER;
- chip->exec_op = tegra_nand_exec_op;
- chip->select_chip = tegra_nand_select_chip;
- chip->setup_data_interface = tegra_nand_setup_data_interface;
ret = nand_scan(chip, 1);
if (ret)
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 9814fd4a84cf..a662ca1970e5 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2009-2015 Freescale Semiconductor, Inc. and others
*
@@ -10,11 +11,6 @@
*
* Based on original driver mpc5121_nfc.c.
*
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* Limitations:
* - Untested on MPC5125 and M54418.
* - DMA and pipelining not used.
@@ -152,6 +148,7 @@ enum vf610_nfc_variant {
};
struct vf610_nfc {
+ struct nand_controller base;
struct nand_chip chip;
struct device *dev;
void __iomem *regs;
@@ -168,11 +165,6 @@ struct vf610_nfc {
u32 ecc_mode;
};
-static inline struct vf610_nfc *mtd_to_nfc(struct mtd_info *mtd)
-{
- return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip);
-}
-
static inline struct vf610_nfc *chip_to_nfc(struct nand_chip *chip)
{
return container_of(chip, struct vf610_nfc, chip);
@@ -316,8 +308,7 @@ static void vf610_nfc_done(struct vf610_nfc *nfc)
static irqreturn_t vf610_nfc_irq(int irq, void *data)
{
- struct mtd_info *mtd = data;
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = data;
vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
complete(&nfc->cmd_done);
@@ -487,40 +478,40 @@ static const struct nand_op_parser vf610_nfc_op_parser = NAND_OP_PARSER(
NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, PAGE_2K + OOB_MAX)),
);
-static int vf610_nfc_exec_op(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only)
-{
- return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
- check_only);
-}
-
/*
* This function supports Vybrid only (MPC5125 would have full RB and four CS)
*/
-static void vf610_nfc_select_chip(struct nand_chip *chip, int cs)
+static void vf610_nfc_select_target(struct nand_chip *chip, unsigned int cs)
{
- struct vf610_nfc *nfc = mtd_to_nfc(nand_to_mtd(chip));
- u32 tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ u32 tmp;
/* Vybrid only (MPC5125 would have full RB and four CS) */
if (nfc->variant != NFC_VFC610)
return;
+ tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
-
- if (cs >= 0) {
- tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
- tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
- }
+ tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+ tmp |= BIT(cs) << ROW_ADDR_CHIP_SEL_SHIFT;
vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
}
-static inline int vf610_nfc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+static int vf610_nfc_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+{
+ vf610_nfc_select_target(chip, op->cs);
+ return nand_op_parser_exec_op(chip, &vf610_nfc_op_parser, op,
+ check_only);
+}
+
+static inline int vf610_nfc_correct_data(struct nand_chip *chip, uint8_t *dat,
uint8_t *oob, int page)
{
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
u8 ecc_status;
u8 ecc_count;
@@ -560,12 +551,14 @@ static void vf610_nfc_fill_row(struct nand_chip *chip, int page, u32 *code,
static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
int stat;
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
cmd2 |= NAND_CMD_READ0 << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
@@ -592,7 +585,7 @@ static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
mtd->writesize,
mtd->oobsize, false);
- stat = vf610_nfc_correct_data(mtd, buf, chip->oob_poi, page);
+ stat = vf610_nfc_correct_data(chip, buf, chip->oob_poi, page);
if (stat < 0) {
mtd->ecc_stats.failed++;
@@ -606,13 +599,15 @@ static int vf610_nfc_read_page(struct nand_chip *chip, uint8_t *buf,
static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
int oob_required, int page)
{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int trfr_sz = mtd->writesize + mtd->oobsize;
u32 row = 0, cmd1 = 0, cmd2 = 0, code = 0;
u8 status;
int ret;
+ vf610_nfc_select_target(chip, chip->cur_cs);
+
cmd2 |= NAND_CMD_SEQIN << CMD_BYTE1_SHIFT;
code |= COMMAND_CMD_BYTE1 | COMMAND_CAR_BYTE1 | COMMAND_CAR_BYTE2;
@@ -648,8 +643,7 @@ static int vf610_nfc_write_page(struct nand_chip *chip, const uint8_t *buf,
static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
@@ -662,8 +656,8 @@ static int vf610_nfc_read_page_raw(struct nand_chip *chip, u8 *buf,
static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
int ret;
nfc->data_access = true;
@@ -681,7 +675,7 @@ static int vf610_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf,
static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
{
- struct vf610_nfc *nfc = mtd_to_nfc(nand_to_mtd(chip));
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
@@ -694,7 +688,7 @@ static int vf610_nfc_read_oob(struct nand_chip *chip, int page)
static int vf610_nfc_write_oob(struct nand_chip *chip, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
int ret;
nfc->data_access = true;
@@ -751,7 +745,7 @@ static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
static int vf610_nfc_attach_chip(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = chip_to_nfc(chip);
vf610_nfc_init_controller(nfc);
@@ -809,6 +803,8 @@ static int vf610_nfc_attach_chip(struct nand_chip *chip)
static const struct nand_controller_ops vf610_nfc_controller_ops = {
.attach_chip = vf610_nfc_attach_chip,
+ .exec_op = vf610_nfc_exec_op,
+
};
static int vf610_nfc_probe(struct platform_device *pdev)
@@ -876,14 +872,11 @@ static int vf610_nfc_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- chip->exec_op = vf610_nfc_exec_op;
- chip->select_chip = vf610_nfc_select_chip;
-
chip->options |= NAND_NO_SUBPAGE_WRITE;
init_completion(&nfc->cmd_done);
- err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, mtd);
+ err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
if (err) {
dev_err(nfc->dev, "Error requesting IRQ!\n");
goto err_disable_clk;
@@ -891,13 +884,16 @@ static int vf610_nfc_probe(struct platform_device *pdev)
vf610_nfc_preinit_controller(nfc);
+ nand_controller_init(&nfc->base);
+ nfc->base.ops = &vf610_nfc_controller_ops;
+ chip->controller = &nfc->base;
+
/* Scan the NAND chip */
- chip->dummy_controller.ops = &vf610_nfc_controller_ops;
err = nand_scan(chip, 1);
if (err)
goto err_disable_clk;
- platform_set_drvdata(pdev, mtd);
+ platform_set_drvdata(pdev, nfc);
/* Register device in MTD */
err = mtd_device_register(mtd, NULL, 0);
@@ -914,10 +910,9 @@ err_disable_clk:
static int vf610_nfc_remove(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = platform_get_drvdata(pdev);
- nand_release(mtd_to_nand(mtd));
+ nand_release(&nfc->chip);
clk_disable_unprepare(nfc->clk);
return 0;
}
@@ -925,8 +920,7 @@ static int vf610_nfc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
static int vf610_nfc_suspend(struct device *dev)
{
- struct mtd_info *mtd = dev_get_drvdata(dev);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
clk_disable_unprepare(nfc->clk);
return 0;
@@ -934,11 +928,9 @@ static int vf610_nfc_suspend(struct device *dev)
static int vf610_nfc_resume(struct device *dev)
{
+ struct vf610_nfc *nfc = dev_get_drvdata(dev);
int err;
- struct mtd_info *mtd = dev_get_drvdata(dev);
- struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
err = clk_prepare_enable(nfc->clk);
if (err)
return err;
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index a234a5cb4868..4cb78106af14 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -176,7 +176,7 @@ static int xway_nand_probe(struct platform_device *pdev)
data->chip.legacy.cmd_ctrl = xway_cmd_ctrl;
data->chip.legacy.dev_ready = xway_dev_ready;
- data->chip.select_chip = xway_select_chip;
+ data->chip.legacy.select_chip = xway_select_chip;
data->chip.legacy.write_buf = xway_write_buf;
data->chip.legacy.read_buf = xway_read_buf;
data->chip.legacy.read_byte = xway_read_byte;
diff --git a/drivers/mtd/nand/spi/Makefile b/drivers/mtd/nand/spi/Makefile
index b74e074b363a..753125082640 100644
--- a/drivers/mtd/nand/spi/Makefile
+++ b/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o macronix.o micron.o winbond.o
+spinand-objs := core.o gigadevice.o macronix.o micron.o toshiba.o winbond.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 30f83649c481..479c2f2cf17f 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -764,8 +764,10 @@ static const struct nand_ops spinand_ops = {
};
static const struct spinand_manufacturer *spinand_manufacturers[] = {
+ &gigadevice_spinand_manufacturer,
&macronix_spinand_manufacturer,
&micron_spinand_manufacturer,
+ &toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
};
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
new file mode 100644
index 000000000000..e4141c20947a
--- /dev/null
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:
+ * Chuanhong Guo <gch981213@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_GIGADEVICE 0xC8
+#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
+#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ region->offset = (16 * section) + 8;
+ region->length = 8;
+
+ return 0;
+}
+
+static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 3)
+ return -ERANGE;
+
+ if (section) {
+ region->offset = 16 * section;
+ region->length = 8;
+ } else {
+ /* section 0 has one byte reserved for bad block mark */
+ region->offset = 1;
+ region->length = 7;
+ }
+ return 0;
+}
+
+static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
+ /* 1-7 bits are flipped. return the maximum. */
+ return 7;
+
+ case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
+ return 8;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = {
+ .ecc = gd5fxgq4xa_ooblayout_ecc,
+ .free = gd5fxgq4xa_ooblayout_free,
+};
+
+static const struct spinand_info gigadevice_spinand_table[] = {
+ SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F2GQ4xA", 0xF2,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F4GQ4xA", 0xF4,
+ NAND_MEMORG(1, 2048, 64, 64, 4096, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
+ gd5fxgq4xa_ecc_get_status)),
+};
+
+static int gigadevice_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * For GD NANDs, There is an address byte needed to shift in before IDs
+ * are read out, so the first byte in raw_id is dummy.
+ */
+ if (id[1] != SPINAND_MFR_GIGADEVICE)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
+ ARRAY_SIZE(gigadevice_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
+ .detect = gigadevice_spinand_detect,
+};
+
+const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
+ .id = SPINAND_MFR_GIGADEVICE,
+ .name = "GigaDevice",
+ .ops = &gigadevice_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
new file mode 100644
index 000000000000..081265557e70
--- /dev/null
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 exceet electronics GmbH
+ * Copyright (c) 2018 Kontron Electronics GmbH
+ *
+ * Author: Frieder Schrempf <frieder.schrempf@kontron.de>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_TOSHIBA 0x98
+#define TOSH_STATUS_ECC_HAS_BITFLIPS_T (3 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int tc58cvg2s0h_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 7)
+ return -ERANGE;
+
+ region->offset = 128 + 16 * section;
+ region->length = 16;
+
+ return 0;
+}
+
+static int tc58cvg2s0h_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section > 0)
+ return -ERANGE;
+
+ /* 2 bytes reserved for BBM */
+ region->offset = 2;
+ region->length = 126;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops tc58cvg2s0h_ooblayout = {
+ .ecc = tc58cvg2s0h_ooblayout_ecc,
+ .free = tc58cvg2s0h_ooblayout_free,
+};
+
+static int tc58cvg2s0h_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ struct nand_device *nand = spinand_to_nand(spinand);
+ u8 mbf = 0;
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf);
+
+ switch (status & STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+ return 0;
+
+ case STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+
+ case STATUS_ECC_HAS_BITFLIPS:
+ case TOSH_STATUS_ECC_HAS_BITFLIPS_T:
+ /*
+ * Let's try to retrieve the real maximum number of bitflips
+ * in order to avoid forcing the wear-leveling layer to move
+ * data around if it's not necessary.
+ */
+ if (spi_mem_exec_op(spinand->spimem, &op))
+ return nand->eccreq.strength;
+
+ mbf >>= 4;
+
+ if (WARN_ON(mbf > nand->eccreq.strength || !mbf))
+ return nand->eccreq.strength;
+
+ return mbf;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info toshiba_spinand_table[] = {
+ SPINAND_INFO("TC58CVG2S0H", 0xCD,
+ NAND_MEMORG(1, 4096, 256, 64, 2048, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tc58cvg2s0h_ooblayout,
+ tc58cvg2s0h_ecc_get_status)),
+};
+
+static int toshiba_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ /*
+ * Toshiba SPI NAND read ID needs a dummy byte,
+ * so the first byte in id is garbage.
+ */
+ if (id[1] != SPINAND_MFR_TOSHIBA)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, toshiba_spinand_table,
+ ARRAY_SIZE(toshiba_spinand_table),
+ id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
+ .detect = toshiba_spinand_detect,
+};
+
+const struct spinand_manufacturer toshiba_spinand_manufacturer = {
+ .id = SPINAND_MFR_TOSHIBA,
+ .name = "Toshiba",
+ .ops = &toshiba_spinand_manuf_ops,
+};
diff --git a/drivers/mtd/nand/spi/winbond.c b/drivers/mtd/nand/spi/winbond.c
index 67baa1b32c00..5d944580b898 100644
--- a/drivers/mtd/nand/spi/winbond.c
+++ b/drivers/mtd/nand/spi/winbond.c
@@ -84,6 +84,14 @@ static const struct spinand_info winbond_spinand_table[] = {
0,
SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL),
SPINAND_SELECT_TARGET(w25m02gv_select_target)),
+ SPINAND_INFO("W25N01GV", 0xAA,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1),
+ NAND_ECCREQ(1, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&w25m02gv_ooblayout, NULL)),
};
/**
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index 91b7fb326f9a..334aa5b3a655 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -346,25 +346,26 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
goto fail;
}
- /* increase and write Wear-Leveling info */
- nb_erases = le32_to_cpu(uci.WearInfo);
- nb_erases++;
-
- /* wrap (almost impossible with current flash) or free block */
- if (nb_erases == 0)
- nb_erases = 1;
-
- /* check the "freeness" of Erase Unit before updating metadata
- * FixMe: is this check really necessary ? since we have check the
- * return code after the erase operation. */
- if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
- goto fail;
-
- uci.WearInfo = le32_to_cpu(nb_erases);
- if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
- 8, 8, &retlen, (char *)&uci) < 0)
- goto fail;
- return 0;
+ /* increase and write Wear-Leveling info */
+ nb_erases = le32_to_cpu(uci.WearInfo);
+ nb_erases++;
+
+ /* wrap (almost impossible with current flash) or free block */
+ if (nb_erases == 0)
+ nb_erases = 1;
+
+ /* check the "freeness" of Erase Unit before updating metadata
+ * FixMe: is this check really necessary ? since we have check the
+ * return code after the erase operation.
+ */
+ if (check_free_sectors(nftl, instr->addr, nftl->EraseSize, 1) != 0)
+ goto fail;
+
+ uci.WearInfo = le32_to_cpu(nb_erases);
+ if (nftl_write_oob(mtd, block * nftl->EraseSize + SECTORSIZE +
+ 8, 8, &retlen, (char *)&uci) < 0)
+ goto fail;
+ return 0;
fail:
/* could not format, update the bad block table (caller is responsible
for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig
index ee5ab994132f..fccf1950e92d 100644
--- a/drivers/mtd/parsers/Kconfig
+++ b/drivers/mtd/parsers/Kconfig
@@ -14,3 +14,53 @@ config MTD_SHARPSL_PARTS
This provides the read-only FTL logic necessary to read the partition
table from the NAND flash of Sharp SL Series (Zaurus) and the MTD
partition parser using this code.
+
+config MTD_REDBOOT_PARTS
+ tristate "RedBoot partition table parsing"
+ help
+ RedBoot is a ROM monitor and bootloader which deals with multiple
+ 'images' in flash devices by putting a table one of the erase
+ blocks on the device, similar to a partition table, which gives
+ the offsets, lengths and names of all the images stored in the
+ flash.
+
+ If you need code which can detect and parse this table, and register
+ MTD 'partitions' corresponding to each image in the table, enable
+ this option.
+
+ You will still need the parsing functions to be called by the driver
+ for your particular device. It won't happen automatically. The
+ SA1100 map driver (CONFIG_MTD_SA1100) has an option for this, for
+ example.
+
+if MTD_REDBOOT_PARTS
+
+config MTD_REDBOOT_DIRECTORY_BLOCK
+ int "Location of RedBoot partition table"
+ default "-1"
+ help
+ This option is the Linux counterpart to the
+ CYGNUM_REDBOOT_FIS_DIRECTORY_BLOCK RedBoot compile time
+ option.
+
+ The option specifies which Flash sectors holds the RedBoot
+ partition table. A zero or positive value gives an absolute
+ erase block number. A negative value specifies a number of
+ sectors before the end of the device.
+
+ For example "2" means block number 2, "-1" means the last
+ block and "-2" means the penultimate block.
+
+config MTD_REDBOOT_PARTS_UNALLOCATED
+ bool "Include unallocated flash regions"
+ help
+ If you need to register each unallocated flash region as a MTD
+ 'partition', enable this option.
+
+config MTD_REDBOOT_PARTS_READONLY
+ bool "Force read-only for RedBoot system images"
+ help
+ If you need to force read-only for 'RedBoot', 'RedBoot Config' and
+ 'FIS directory' images, enable this option.
+
+endif # MTD_REDBOOT_PARTS
diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile
index 5b1bcc3d90d9..d8418bf6804a 100644
--- a/drivers/mtd/parsers/Makefile
+++ b/drivers/mtd/parsers/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o
obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o
+obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/parsers/redboot.c
index 7623ac5fc586..957538d57725 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/parsers/redboot.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
-
+#include <linux/of.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/module.h>
@@ -56,6 +56,27 @@ static inline int redboot_checksum(struct fis_image_desc *img)
return 1;
}
+static void parse_redboot_of(struct mtd_info *master)
+{
+ struct device_node *np;
+ u32 dirblock;
+ int ret;
+
+ np = mtd_get_of_node(master);
+ if (!np)
+ return;
+
+ ret = of_property_read_u32(np, "fis-index-block", &dirblock);
+ if (ret)
+ return;
+
+ /*
+ * Assign the block found in the device tree to the local
+ * directory block pointer.
+ */
+ directory = dirblock;
+}
+
static int parse_redboot_partitions(struct mtd_info *master,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
@@ -76,6 +97,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
static char nullstring[] = "unallocated";
#endif
+ parse_redboot_of(master);
+
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
while (mtd_block_isbad(master, offset)) {
@@ -289,9 +312,16 @@ static int parse_redboot_partitions(struct mtd_info *master,
return ret;
}
+static const struct of_device_id mtd_parser_redboot_of_match_table[] = {
+ { .compatible = "redboot-fis" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtd_parser_redboot_of_match_table);
+
static struct mtd_part_parser redboot_parser = {
.parse_fn = parse_redboot_partitions,
.name = "RedBoot",
+ .of_match_table = mtd_parser_redboot_of_match_table,
};
module_mtd_part_parser(redboot_parser);
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 6cc9c929ff57..44fe8018733c 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -39,15 +39,6 @@ config SPI_ASPEED_SMC
and support for the SPI flash memory controller (SPI) for
the host firmware. The implementation only supports SPI NOR.
-config SPI_ATMEL_QUADSPI
- tristate "Atmel Quad SPI Controller"
- depends on ARCH_AT91 || (ARM && COMPILE_TEST)
- depends on OF && HAS_IOMEM
- help
- This enables support for the Quad SPI controller in master mode.
- This driver does not support generic SPI. The implementation only
- supports SPI NOR.
-
config SPI_CADENCE_QUADSPI
tristate "Cadence Quad SPI controller"
depends on OF && (ARM || ARM64 || COMPILE_TEST)
diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile
index f4c61d282abd..a552efd22958 100644
--- a/drivers/mtd/spi-nor/Makefile
+++ b/drivers/mtd/spi-nor/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
-obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 93c9bc8931fc..6e13bbd1aaa5 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
* influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
*
* Copyright (C) 2005, Intec Automation Inc.
* Copyright (C) 2014, Freescale Semiconductor, Inc.
- *
- * This code is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
@@ -42,6 +39,197 @@
#define SPI_NOR_MAX_ID_LEN 6
#define SPI_NOR_MAX_ADDR_WIDTH 4
+struct spi_nor_read_command {
+ u8 num_mode_clocks;
+ u8 num_wait_states;
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+struct spi_nor_pp_command {
+ u8 opcode;
+ enum spi_nor_protocol proto;
+};
+
+enum spi_nor_read_command_index {
+ SNOR_CMD_READ,
+ SNOR_CMD_READ_FAST,
+ SNOR_CMD_READ_1_1_1_DTR,
+
+ /* Dual SPI */
+ SNOR_CMD_READ_1_1_2,
+ SNOR_CMD_READ_1_2_2,
+ SNOR_CMD_READ_2_2_2,
+ SNOR_CMD_READ_1_2_2_DTR,
+
+ /* Quad SPI */
+ SNOR_CMD_READ_1_1_4,
+ SNOR_CMD_READ_1_4_4,
+ SNOR_CMD_READ_4_4_4,
+ SNOR_CMD_READ_1_4_4_DTR,
+
+ /* Octo SPI */
+ SNOR_CMD_READ_1_1_8,
+ SNOR_CMD_READ_1_8_8,
+ SNOR_CMD_READ_8_8_8,
+ SNOR_CMD_READ_1_8_8_DTR,
+
+ SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+ SNOR_CMD_PP,
+
+ /* Quad SPI */
+ SNOR_CMD_PP_1_1_4,
+ SNOR_CMD_PP_1_4_4,
+ SNOR_CMD_PP_4_4_4,
+
+ /* Octo SPI */
+ SNOR_CMD_PP_1_1_8,
+ SNOR_CMD_PP_1_8_8,
+ SNOR_CMD_PP_8_8_8,
+
+ SNOR_CMD_PP_MAX
+};
+
+struct spi_nor_flash_parameter {
+ u64 size;
+ u32 page_size;
+
+ struct spi_nor_hwcaps hwcaps;
+ struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
+ struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
+
+ int (*quad_enable)(struct spi_nor *nor);
+};
+
+struct sfdp_parameter_header {
+ u8 id_lsb;
+ u8 minor;
+ u8 major;
+ u8 length; /* in double words */
+ u8 parameter_table_pointer[3]; /* byte address */
+ u8 id_msb;
+};
+
+#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+ (((p)->parameter_table_pointer[2] << 16) | \
+ ((p)->parameter_table_pointer[1] << 8) | \
+ ((p)->parameter_table_pointer[0] << 0))
+
+#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
+#define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */
+
+#define SFDP_SIGNATURE 0x50444653U
+#define SFDP_JESD216_MAJOR 1
+#define SFDP_JESD216_MINOR 0
+#define SFDP_JESD216A_MINOR 5
+#define SFDP_JESD216B_MINOR 6
+
+struct sfdp_header {
+ u32 signature; /* Ox50444653U <=> "SFDP" */
+ u8 minor;
+ u8 major;
+ u8 nph; /* 0-base number of parameter headers */
+ u8 unused;
+
+ /* Basic Flash Parameter Table. */
+ struct sfdp_parameter_header bfpt_header;
+};
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i) ((i) - 1)
+#define BFPT_DWORD_MAX 16
+
+/* The first version of JESB216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216 9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
+#define BFPT_DWORD1_DTR BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
+#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ * reads based on instruction. DQ3/HOLD# functions are hold during
+ * instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * Writing only one byte to the status register has the side-effect of
+ * clearing status register 2, including the QE bit. The 100b code is
+ * used if writing one byte to the status register does not modify
+ * status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ * one data byte where bit 6 is one.
+ * [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ * register 2 instruction 3Eh with one data byte where bit 7 is one.
+ * [...]
+ * The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ * In contrast to the 001b code, writing one byte to the status
+ * register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ * Read Status instruction 05h. Status register2 is read using
+ * instruction 35h. QE is set via Writ Status instruction 01h with
+ * two data bytes where bit 1 of the second byte is one.
+ * [...]
+ */
+#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
+
+struct sfdp_bfpt {
+ u32 dwords[BFPT_DWORD_MAX];
+};
+
+/**
+ * struct spi_nor_fixups - SPI NOR fixup hooks
+ * @post_bfpt: called after the BFPT table has been parsed
+ *
+ * Those hooks can be used to tweak the SPI NOR configuration when the SFDP
+ * table is broken or not available.
+ */
+struct spi_nor_fixups {
+ int (*post_bfpt)(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params);
+};
+
struct flash_info {
char *name;
@@ -91,13 +279,14 @@ struct flash_info {
#define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */
#define USE_CLSR BIT(14) /* use CLSR command */
+ /* Part specific fixup hooks. */
+ const struct spi_nor_fixups *fixups;
+
int (*quad_enable)(struct spi_nor *nor);
};
#define JEDEC_MFR(info) ((info)->id[0])
-static const struct flash_info *spi_nor_match_id(const char *name);
-
/*
* Read the status register, returning its value in the location
* Return the status register value.
@@ -159,7 +348,7 @@ static int read_cr(struct spi_nor *nor)
* Write status register 1 byte
* Returns negative if error occurred.
*/
-static inline int write_sr(struct spi_nor *nor, u8 val)
+static int write_sr(struct spi_nor *nor, u8 val)
{
nor->cmd_buf[0] = val;
return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
@@ -169,7 +358,7 @@ static inline int write_sr(struct spi_nor *nor, u8 val)
* Set write enable latch with Write Enable command.
* Returns negative if error occurred.
*/
-static inline int write_enable(struct spi_nor *nor)
+static int write_enable(struct spi_nor *nor)
{
return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
}
@@ -177,12 +366,12 @@ static inline int write_enable(struct spi_nor *nor)
/*
* Send write disable instruction to the chip.
*/
-static inline int write_disable(struct spi_nor *nor)
+static int write_disable(struct spi_nor *nor)
{
return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
}
-static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
{
return mtd->priv;
}
@@ -200,7 +389,7 @@ static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
return opcode;
}
-static inline u8 spi_nor_convert_3to4_read(u8 opcode)
+static u8 spi_nor_convert_3to4_read(u8 opcode)
{
static const u8 spi_nor_3to4_read[][2] = {
{ SPINOR_OP_READ, SPINOR_OP_READ_4B },
@@ -219,7 +408,7 @@ static inline u8 spi_nor_convert_3to4_read(u8 opcode)
ARRAY_SIZE(spi_nor_3to4_read));
}
-static inline u8 spi_nor_convert_3to4_program(u8 opcode)
+static u8 spi_nor_convert_3to4_program(u8 opcode)
{
static const u8 spi_nor_3to4_program[][2] = {
{ SPINOR_OP_PP, SPINOR_OP_PP_4B },
@@ -231,7 +420,7 @@ static inline u8 spi_nor_convert_3to4_program(u8 opcode)
ARRAY_SIZE(spi_nor_3to4_program));
}
-static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
+static u8 spi_nor_convert_3to4_erase(u8 opcode)
{
static const u8 spi_nor_3to4_erase[][2] = {
{ SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
@@ -243,15 +432,14 @@ static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
ARRAY_SIZE(spi_nor_3to4_erase));
}
-static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
- const struct flash_info *info)
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
{
/* Do some manufacturer fixups first */
- switch (JEDEC_MFR(info)) {
+ switch (JEDEC_MFR(nor->info)) {
case SNOR_MFR_SPANSION:
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE;
- nor->mtd.erasesize = info->sector_size;
+ nor->mtd.erasesize = nor->info->sector_size;
break;
default:
@@ -276,17 +464,18 @@ static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
}
/* Enable/disable 4-byte addressing mode. */
-static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
- int enable)
+static int set_4byte(struct spi_nor *nor, bool enable)
{
int status;
bool need_wren = false;
u8 cmd;
- switch (JEDEC_MFR(info)) {
+ switch (JEDEC_MFR(nor->info)) {
+ case SNOR_MFR_ST:
case SNOR_MFR_MICRON:
/* Some Micron need WREN command; all will accept it */
need_wren = true;
+ /* fall through */
case SNOR_MFR_MACRONIX:
case SNOR_MFR_WINBOND:
if (need_wren)
@@ -298,7 +487,7 @@ static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
write_disable(nor);
if (!status && !enable &&
- JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ JEDEC_MFR(nor->info) == SNOR_MFR_WINBOND) {
/*
* On Winbond W25Q256FV, leaving 4byte mode causes
* the Extended Address Register to be set to 1, so all
@@ -333,7 +522,7 @@ static int s3an_sr_ready(struct spi_nor *nor)
return !!(val & XSR_RDY);
}
-static inline int spi_nor_sr_ready(struct spi_nor *nor)
+static int spi_nor_sr_ready(struct spi_nor *nor)
{
int sr = read_sr(nor);
if (sr < 0)
@@ -352,7 +541,7 @@ static inline int spi_nor_sr_ready(struct spi_nor *nor)
return !(sr & SR_WIP);
}
-static inline int spi_nor_fsr_ready(struct spi_nor *nor)
+static int spi_nor_fsr_ready(struct spi_nor *nor)
{
int fsr = read_fsr(nor);
if (fsr < 0)
@@ -1200,7 +1389,247 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return ret;
}
-static int macronix_quad_enable(struct spi_nor *nor);
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occurred.
+ */
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
+{
+ int ret;
+
+ write_enable(nor);
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
+ if (ret < 0) {
+ dev_err(nor->dev,
+ "error while writing configuration register\n");
+ return -EINVAL;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * macronix_quad_enable() - set QE bit in Status Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register.
+ *
+ * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+ int ret, val;
+
+ val = read_sr(nor);
+ if (val < 0)
+ return val;
+ if (val & SR_QUAD_EN_MX)
+ return 0;
+
+ write_enable(nor);
+
+ write_sr(nor, val | SR_QUAD_EN_MX);
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret)
+ return ret;
+
+ ret = read_sr(nor);
+ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+ dev_err(nor->dev, "Macronix Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spansion_quad_enable() - set QE bit in Configuraiton Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function is kept for legacy purpose because it has been used for a
+ * long time without anybody complaining but it should be considered as
+ * deprecated and maybe buggy.
+ * First, this function doesn't care about the previous values of the Status
+ * and Configuration Registers when it sets the QE bit (bit 1) in the
+ * Configuration Register: all other bits are cleared, which may have unwanted
+ * side effects like removing some block protections.
+ * Secondly, it uses the Read Configuration Register (35h) instruction though
+ * some very old and few memories don't support this instruction. If a pull-up
+ * resistor is present on the MISO/IO1 line, we might still be able to pass the
+ * "read back" test because the QSPI memory doesn't recognize the command,
+ * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
+ int ret;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ /* read back and check it */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories not supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
+{
+ u8 sr_cr[2];
+ int ret;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+ sr_cr[1] = CR_QUAD_EN_SPAN;
+
+ return write_sr_cr(nor, sr_cr);
+}
+
+/**
+ * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+{
+ struct device *dev = nor->dev;
+ u8 sr_cr[2];
+ int ret;
+
+ /* Check current Quad Enable bit value. */
+ ret = read_cr(nor);
+ if (ret < 0) {
+ dev_err(dev, "error while reading configuration register\n");
+ return -EINVAL;
+ }
+
+ if (ret & CR_QUAD_EN_SPAN)
+ return 0;
+
+ sr_cr[1] = ret | CR_QUAD_EN_SPAN;
+
+ /* Keep the current value of the Status Register. */
+ ret = read_sr(nor);
+ if (ret < 0) {
+ dev_err(dev, "error while reading status register\n");
+ return -EINVAL;
+ }
+ sr_cr[0] = ret;
+
+ ret = write_sr_cr(nor, sr_cr);
+ if (ret)
+ return ret;
+
+ /* Read back and check it. */
+ ret = read_cr(nor);
+ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+ dev_err(nor->dev, "Spansion Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register 2.
+ *
+ * This is one of the procedures to set the QE bit described in the SFDP
+ * (JESD216 rev B) specification but no manufacturer using this procedure has
+ * been identified yet, hence the name of the function.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int sr2_bit7_quad_enable(struct spi_nor *nor)
+{
+ u8 sr2;
+ int ret;
+
+ /* Check current Quad Enable bit value. */
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
+ if (ret)
+ return ret;
+ if (sr2 & SR2_QUAD_EN_BIT7)
+ return 0;
+
+ /* Update the Quad Enable bit. */
+ sr2 |= SR2_QUAD_EN_BIT7;
+
+ write_enable(nor);
+
+ ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
+ if (ret < 0) {
+ dev_err(nor->dev, "error while writing status register 2\n");
+ return -EINVAL;
+ }
+
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret < 0) {
+ dev_err(nor->dev, "timeout while writing status register 2\n");
+ return ret;
+ }
+
+ /* Read back and check it. */
+ ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
+ if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
+ dev_err(nor->dev, "SR2 Quad bit not set\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
/* Used when the "_ext_id" is two bytes at most */
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
@@ -1252,6 +1681,31 @@ static int macronix_quad_enable(struct spi_nor *nor);
.addr_width = 3, \
.flags = SPI_NOR_NO_FR | SPI_S3AN,
+static int
+mx25l25635_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ /*
+ * MX25L25635F supports 4B opcodes but MX25L25635E does not.
+ * Unfortunately, Macronix has re-used the same JEDEC ID for both
+ * variants which prevents us from defining a new entry in the parts
+ * table.
+ * We need a way to differentiate MX25L25635E and MX25L25635F, and it
+ * seems that the F version advertises support for Fast Read 4-4-4 in
+ * its BFPT table.
+ */
+ if (bfpt->dwords[BFPT_DWORD(5)] & BFPT_DWORD5_FAST_READ_4_4_4)
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ return 0;
+}
+
+static struct spi_nor_fixups mx25l25635_fixups = {
+ .post_bfpt = mx25l25635_post_bfpt_fixups,
+};
+
/* NOTE: double check command sets and memory organization when you add
* more nor chips. This current list focusses on newer chips, which
* have been converging on command sets which including JEDEC ID.
@@ -1352,12 +1806,19 @@ static const struct flash_info spi_nor_ids[] = {
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024, 8,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024, 32,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25lp080d", INFO(0x9d6014, 0, 64 * 1024, 16,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25lp032", INFO(0x9d6016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ) },
+ { "is25lp064", INFO(0x9d6017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp128", INFO(0x9d6018, 0, 64 * 1024, 256,
SECT_4K | SPI_NOR_DUAL_READ) },
{ "is25lp256", INFO(0x9d6019, 0, 64 * 1024, 512,
- SECT_4K | SPI_NOR_DUAL_READ) },
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_4B_OPCODES) },
{ "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
@@ -1380,7 +1841,11 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
- { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512,
+ SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
+ .fixups = &mx25l25635_fixups },
{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
@@ -1388,7 +1853,7 @@ static const struct flash_info spi_nor_ids[] = {
{ "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
- /* Micron */
+ /* Micron <--> ST Micro */
{ "n25q016a", INFO(0x20bb15, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_QUAD_READ) },
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
@@ -1404,6 +1869,12 @@ static const struct flash_info spi_nor_ids[] = {
{ "n25q00a", INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
{ "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+ /* Micron */
+ {
+ "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512,
+ SECT_4K | USE_FSR | SPI_NOR_4B_OPCODES)
+ },
+
/* PMC */
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
@@ -1531,6 +2002,11 @@ static const struct flash_info spi_nor_ids[] = {
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
+ {
+ "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
@@ -1763,248 +2239,6 @@ write_err:
return ret;
}
-/**
- * macronix_quad_enable() - set QE bit in Status Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register.
- *
- * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int macronix_quad_enable(struct spi_nor *nor)
-{
- int ret, val;
-
- val = read_sr(nor);
- if (val < 0)
- return val;
- if (val & SR_QUAD_EN_MX)
- return 0;
-
- write_enable(nor);
-
- write_sr(nor, val | SR_QUAD_EN_MX);
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret)
- return ret;
-
- ret = read_sr(nor);
- if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
- dev_err(nor->dev, "Macronix Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Write status Register and configuration register with 2 bytes
- * The first byte will be written to the status register, while the
- * second byte will be written to the configuration register.
- * Return negative if error occurred.
- */
-static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
-{
- int ret;
-
- write_enable(nor);
-
- ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
- if (ret < 0) {
- dev_err(nor->dev,
- "error while writing configuration register\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret) {
- dev_err(nor->dev,
- "timeout while writing configuration register\n");
- return ret;
- }
-
- return 0;
-}
-
-/**
- * spansion_quad_enable() - set QE bit in Configuraiton Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function is kept for legacy purpose because it has been used for a
- * long time without anybody complaining but it should be considered as
- * deprecated and maybe buggy.
- * First, this function doesn't care about the previous values of the Status
- * and Configuration Registers when it sets the QE bit (bit 1) in the
- * Configuration Register: all other bits are cleared, which may have unwanted
- * side effects like removing some block protections.
- * Secondly, it uses the Read Configuration Register (35h) instruction though
- * some very old and few memories don't support this instruction. If a pull-up
- * resistor is present on the MISO/IO1 line, we might still be able to pass the
- * "read back" test because the QSPI memory doesn't recognize the command,
- * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_quad_enable(struct spi_nor *nor)
-{
- u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
- int ret;
-
- ret = write_sr_cr(nor, sr_cr);
- if (ret)
- return ret;
-
- /* read back and check it */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories not supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
-{
- u8 sr_cr[2];
- int ret;
-
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(nor->dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
- sr_cr[1] = CR_QUAD_EN_SPAN;
-
- return write_sr_cr(nor, sr_cr);
-}
-
-/**
- * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Configuration Register.
- * This function should be used with QSPI memories supporting the Read
- * Configuration Register (35h) instruction.
- *
- * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
- * memories.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int spansion_read_cr_quad_enable(struct spi_nor *nor)
-{
- struct device *dev = nor->dev;
- u8 sr_cr[2];
- int ret;
-
- /* Check current Quad Enable bit value. */
- ret = read_cr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading configuration register\n");
- return -EINVAL;
- }
-
- if (ret & CR_QUAD_EN_SPAN)
- return 0;
-
- sr_cr[1] = ret | CR_QUAD_EN_SPAN;
-
- /* Keep the current value of the Status Register. */
- ret = read_sr(nor);
- if (ret < 0) {
- dev_err(dev, "error while reading status register\n");
- return -EINVAL;
- }
- sr_cr[0] = ret;
-
- ret = write_sr_cr(nor, sr_cr);
- if (ret)
- return ret;
-
- /* Read back and check it. */
- ret = read_cr(nor);
- if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
- dev_err(nor->dev, "Spansion Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * sr2_bit7_quad_enable() - set QE bit in Status Register 2.
- * @nor: pointer to a 'struct spi_nor'
- *
- * Set the Quad Enable (QE) bit in the Status Register 2.
- *
- * This is one of the procedures to set the QE bit described in the SFDP
- * (JESD216 rev B) specification but no manufacturer using this procedure has
- * been identified yet, hence the name of the function.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int sr2_bit7_quad_enable(struct spi_nor *nor)
-{
- u8 sr2;
- int ret;
-
- /* Check current Quad Enable bit value. */
- ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
- if (ret)
- return ret;
- if (sr2 & SR2_QUAD_EN_BIT7)
- return 0;
-
- /* Update the Quad Enable bit. */
- sr2 |= SR2_QUAD_EN_BIT7;
-
- write_enable(nor);
-
- ret = nor->write_reg(nor, SPINOR_OP_WRSR2, &sr2, 1);
- if (ret < 0) {
- dev_err(nor->dev, "error while writing status register 2\n");
- return -EINVAL;
- }
-
- ret = spi_nor_wait_till_ready(nor);
- if (ret < 0) {
- dev_err(nor->dev, "timeout while writing status register 2\n");
- return ret;
- }
-
- /* Read back and check it. */
- ret = nor->read_reg(nor, SPINOR_OP_RDSR2, &sr2, 1);
- if (!(ret > 0 && (sr2 & SR2_QUAD_EN_BIT7))) {
- dev_err(nor->dev, "SR2 Quad bit not set\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int spi_nor_check(struct spi_nor *nor)
{
if (!nor->dev || !nor->read || !nor->write ||
@@ -2016,7 +2250,7 @@ static int spi_nor_check(struct spi_nor *nor)
return 0;
}
-static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
+static int s3an_nor_scan(struct spi_nor *nor)
{
int ret;
u8 val;
@@ -2047,7 +2281,7 @@ static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
/* Flash in Power of 2 mode */
nor->page_size = (nor->page_size == 264) ? 256 : 512;
nor->mtd.writebufsize = nor->page_size;
- nor->mtd.size = 8 * nor->page_size * info->n_sectors;
+ nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors;
nor->mtd.erasesize = 8 * nor->page_size;
} else {
/* Flash in Default addressing mode */
@@ -2057,71 +2291,6 @@ static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor)
return 0;
}
-struct spi_nor_read_command {
- u8 num_mode_clocks;
- u8 num_wait_states;
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-struct spi_nor_pp_command {
- u8 opcode;
- enum spi_nor_protocol proto;
-};
-
-enum spi_nor_read_command_index {
- SNOR_CMD_READ,
- SNOR_CMD_READ_FAST,
- SNOR_CMD_READ_1_1_1_DTR,
-
- /* Dual SPI */
- SNOR_CMD_READ_1_1_2,
- SNOR_CMD_READ_1_2_2,
- SNOR_CMD_READ_2_2_2,
- SNOR_CMD_READ_1_2_2_DTR,
-
- /* Quad SPI */
- SNOR_CMD_READ_1_1_4,
- SNOR_CMD_READ_1_4_4,
- SNOR_CMD_READ_4_4_4,
- SNOR_CMD_READ_1_4_4_DTR,
-
- /* Octo SPI */
- SNOR_CMD_READ_1_1_8,
- SNOR_CMD_READ_1_8_8,
- SNOR_CMD_READ_8_8_8,
- SNOR_CMD_READ_1_8_8_DTR,
-
- SNOR_CMD_READ_MAX
-};
-
-enum spi_nor_pp_command_index {
- SNOR_CMD_PP,
-
- /* Quad SPI */
- SNOR_CMD_PP_1_1_4,
- SNOR_CMD_PP_1_4_4,
- SNOR_CMD_PP_4_4_4,
-
- /* Octo SPI */
- SNOR_CMD_PP_1_1_8,
- SNOR_CMD_PP_1_8_8,
- SNOR_CMD_PP_8_8_8,
-
- SNOR_CMD_PP_MAX
-};
-
-struct spi_nor_flash_parameter {
- u64 size;
- u32 page_size;
-
- struct spi_nor_hwcaps hwcaps;
- struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
- struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
-
- int (*quad_enable)(struct spi_nor *nor);
-};
-
static void
spi_nor_set_read_settings(struct spi_nor_read_command *read,
u8 num_mode_clocks,
@@ -2144,6 +2313,57 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
pp->proto = proto;
}
+static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (table[i][0] == (int)hwcaps)
+ return table[i][1];
+
+ return -EINVAL;
+}
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
+{
+ static const int hwcaps_read2cmd[][2] = {
+ { SNOR_HWCAPS_READ, SNOR_CMD_READ },
+ { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
+ { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
+ { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
+ { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
+ { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
+ { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
+ { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
+ { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
+ { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
+ { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
+ { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
+ ARRAY_SIZE(hwcaps_read2cmd));
+}
+
+static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+{
+ static const int hwcaps_pp2cmd[][2] = {
+ { SNOR_HWCAPS_PP, SNOR_CMD_PP },
+ { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
+ { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
+ { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
+ { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
+ { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
+ { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
+ };
+
+ return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
+ ARRAY_SIZE(hwcaps_pp2cmd));
+}
+
/*
* Serial Flash Discoverable Parameters (SFDP) parsing.
*/
@@ -2244,120 +2464,9 @@ static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
return ret;
}
-struct sfdp_parameter_header {
- u8 id_lsb;
- u8 minor;
- u8 major;
- u8 length; /* in double words */
- u8 parameter_table_pointer[3]; /* byte address */
- u8 id_msb;
-};
-
-#define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
-#define SFDP_PARAM_HEADER_PTP(p) \
- (((p)->parameter_table_pointer[2] << 16) | \
- ((p)->parameter_table_pointer[1] << 8) | \
- ((p)->parameter_table_pointer[0] << 0))
-
-#define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
-#define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
-
-#define SFDP_SIGNATURE 0x50444653U
-#define SFDP_JESD216_MAJOR 1
-#define SFDP_JESD216_MINOR 0
-#define SFDP_JESD216A_MINOR 5
-#define SFDP_JESD216B_MINOR 6
-
-struct sfdp_header {
- u32 signature; /* Ox50444653U <=> "SFDP" */
- u8 minor;
- u8 major;
- u8 nph; /* 0-base number of parameter headers */
- u8 unused;
-
- /* Basic Flash Parameter Table. */
- struct sfdp_parameter_header bfpt_header;
-};
-
-/* Basic Flash Parameter Table */
-
-/*
- * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
- * They are indexed from 1 but C arrays are indexed from 0.
- */
-#define BFPT_DWORD(i) ((i) - 1)
-#define BFPT_DWORD_MAX 16
-
-/* The first version of JESB216 defined only 9 DWORDs. */
-#define BFPT_DWORD_MAX_JESD216 9
-
-/* 1st DWORD. */
-#define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
-#define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
-#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
-#define BFPT_DWORD1_DTR BIT(19)
-#define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
-#define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
-#define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
-
-/* 5th DWORD. */
-#define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
-#define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
-
-/* 11th DWORD. */
-#define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
-#define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
-
-/* 15th DWORD. */
-
-/*
- * (from JESD216 rev B)
- * Quad Enable Requirements (QER):
- * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
- * reads based on instruction. DQ3/HOLD# functions are hold during
- * instruction phase.
- * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * Writing only one byte to the status register has the side-effect of
- * clearing status register 2, including the QE bit. The 100b code is
- * used if writing one byte to the status register does not modify
- * status register 2.
- * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
- * one data byte where bit 6 is one.
- * [...]
- * - 011b: QE is bit 7 of status register 2. It is set via Write status
- * register 2 instruction 3Eh with one data byte where bit 7 is one.
- * [...]
- * The status register 2 is read using instruction 3Fh.
- * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- * In contrast to the 001b code, writing one byte to the status
- * register does not modify status register 2.
- * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
- * Read Status instruction 05h. Status register2 is read using
- * instruction 35h. QE is set via Writ Status instruction 01h with
- * two data bytes where bit 1 of the second byte is one.
- * [...]
- */
-#define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
-#define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
-#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
-#define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
-#define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
-#define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
-
-struct sfdp_bfpt {
- u32 dwords[BFPT_DWORD_MAX];
-};
-
/* Fast Read settings. */
-static inline void
+static void
spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
u16 half,
enum spi_nor_protocol proto)
@@ -2464,8 +2573,6 @@ static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
{BFPT_DWORD(9), 16},
};
-static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
-
/**
* spi_nor_set_erase_type() - set a SPI NOR erase type
* @erase: pointer to a structure that describes a SPI NOR erase type
@@ -2598,6 +2705,19 @@ static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
map->uniform_erase_type = erase_mask;
}
+static int
+spi_nor_post_bfpt_fixups(struct spi_nor *nor,
+ const struct sfdp_parameter_header *bfpt_header,
+ const struct sfdp_bfpt *bfpt,
+ struct spi_nor_flash_parameter *params)
+{
+ if (nor->info->fixups && nor->info->fixups->post_bfpt)
+ return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
+ params);
+
+ return 0;
+}
+
/**
* spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
* @nor: pointer to a 'struct spi_nor'
@@ -2750,7 +2870,8 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
/* Stop here if not JESD216 rev A or later. */
if (bfpt_header->length < BFPT_DWORD_MAX)
- return 0;
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt,
+ params);
/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
params->page_size = bfpt.dwords[BFPT_DWORD(11)];
@@ -2785,7 +2906,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
return -EINVAL;
}
- return 0;
+ return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params);
}
#define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22)
@@ -2995,12 +3116,13 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
const u32 *smpt)
{
struct spi_nor_erase_map *map = &nor->erase_map;
- const struct spi_nor_erase_type *erase = map->erase_type;
+ struct spi_nor_erase_type *erase = map->erase_type;
struct spi_nor_erase_region *region;
u64 offset;
u32 region_count;
int i, j;
- u8 erase_type, uniform_erase_type;
+ u8 uniform_erase_type, save_uniform_erase_type;
+ u8 erase_type, regions_erase_type;
region_count = SMPT_MAP_REGION_COUNT(*smpt);
/*
@@ -3014,6 +3136,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
map->regions = region;
uniform_erase_type = 0xff;
+ regions_erase_type = 0;
offset = 0;
/* Populate regions. */
for (i = 0; i < region_count; i++) {
@@ -3030,13 +3153,38 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
*/
uniform_erase_type &= erase_type;
+ /*
+ * regions_erase_type mask will indicate all the erase types
+ * supported in this configuration map.
+ */
+ regions_erase_type |= erase_type;
+
offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
region[i].size;
}
+ save_uniform_erase_type = map->uniform_erase_type;
map->uniform_erase_type = spi_nor_sort_erase_mask(map,
uniform_erase_type);
+ if (!regions_erase_type) {
+ /*
+ * Roll back to the previous uniform_erase_type mask, SMPT is
+ * broken.
+ */
+ map->uniform_erase_type = save_uniform_erase_type;
+ return -EINVAL;
+ }
+
+ /*
+ * BFPT advertises all the erase types supported by all the possible
+ * map configurations. Mask out the erase types that are not supported
+ * by the current map configuration.
+ */
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
spi_nor_region_mark_end(&region[i - 1]);
return 0;
@@ -3064,7 +3212,7 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
/* Read the Sector Map Parameter Table. */
len = smpt_header->length * sizeof(*smpt);
- smpt = kzalloc(len, GFP_KERNEL);
+ smpt = kmalloc(len, GFP_KERNEL);
if (!smpt)
return -ENOMEM;
@@ -3094,6 +3242,191 @@ out:
return ret;
}
+#define SFDP_4BAIT_DWORD_MAX 2
+
+struct sfdp_4bait {
+ /* The hardware capability. */
+ u32 hwcaps;
+
+ /*
+ * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether
+ * the associated 4-byte address op code is supported.
+ */
+ u32 supported_bit;
+};
+
+/**
+ * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table
+ * @nor: pointer to a 'struct spi_nor'.
+ * @param_header: pointer to the 'struct sfdp_parameter_header' describing
+ * the 4-Byte Address Instruction Table length and version.
+ * @params: pointer to the 'struct spi_nor_flash_parameter' to be.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_4bait(struct spi_nor *nor,
+ const struct sfdp_parameter_header *param_header,
+ struct spi_nor_flash_parameter *params)
+{
+ static const struct sfdp_4bait reads[] = {
+ { SNOR_HWCAPS_READ, BIT(0) },
+ { SNOR_HWCAPS_READ_FAST, BIT(1) },
+ { SNOR_HWCAPS_READ_1_1_2, BIT(2) },
+ { SNOR_HWCAPS_READ_1_2_2, BIT(3) },
+ { SNOR_HWCAPS_READ_1_1_4, BIT(4) },
+ { SNOR_HWCAPS_READ_1_4_4, BIT(5) },
+ { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) },
+ { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) },
+ { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) },
+ };
+ static const struct sfdp_4bait programs[] = {
+ { SNOR_HWCAPS_PP, BIT(6) },
+ { SNOR_HWCAPS_PP_1_1_4, BIT(7) },
+ { SNOR_HWCAPS_PP_1_4_4, BIT(8) },
+ };
+ static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = {
+ { 0u /* not used */, BIT(9) },
+ { 0u /* not used */, BIT(10) },
+ { 0u /* not used */, BIT(11) },
+ { 0u /* not used */, BIT(12) },
+ };
+ struct spi_nor_pp_command *params_pp = params->page_programs;
+ struct spi_nor_erase_map *map = &nor->erase_map;
+ struct spi_nor_erase_type *erase_type = map->erase_type;
+ u32 *dwords;
+ size_t len;
+ u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask;
+ int i, ret;
+
+ if (param_header->major != SFDP_JESD216_MAJOR ||
+ param_header->length < SFDP_4BAIT_DWORD_MAX)
+ return -EINVAL;
+
+ /* Read the 4-byte Address Instruction Table. */
+ len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX;
+
+ /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+ dwords = kmalloc(len, GFP_KERNEL);
+ if (!dwords)
+ return -ENOMEM;
+
+ addr = SFDP_PARAM_HEADER_PTP(param_header);
+ ret = spi_nor_read_sfdp(nor, addr, len, dwords);
+ if (ret)
+ return ret;
+
+ /* Fix endianness of the 4BAIT DWORDs. */
+ for (i = 0; i < SFDP_4BAIT_DWORD_MAX; i++)
+ dwords[i] = le32_to_cpu(dwords[i]);
+
+ /*
+ * Compute the subset of (Fast) Read commands for which the 4-byte
+ * version is supported.
+ */
+ discard_hwcaps = 0;
+ read_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(reads); i++) {
+ const struct sfdp_4bait *read = &reads[i];
+
+ discard_hwcaps |= read->hwcaps;
+ if ((params->hwcaps.mask & read->hwcaps) &&
+ (dwords[0] & read->supported_bit))
+ read_hwcaps |= read->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Page Program commands for which the 4-byte
+ * version is supported.
+ */
+ pp_hwcaps = 0;
+ for (i = 0; i < ARRAY_SIZE(programs); i++) {
+ const struct sfdp_4bait *program = &programs[i];
+
+ /*
+ * The 4 Byte Address Instruction (Optional) Table is the only
+ * SFDP table that indicates support for Page Program Commands.
+ * Bypass the params->hwcaps.mask and consider 4BAIT the biggest
+ * authority for specifying Page Program support.
+ */
+ discard_hwcaps |= program->hwcaps;
+ if (dwords[0] & program->supported_bit)
+ pp_hwcaps |= program->hwcaps;
+ }
+
+ /*
+ * Compute the subset of Sector Erase commands for which the 4-byte
+ * version is supported.
+ */
+ erase_mask = 0;
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ const struct sfdp_4bait *erase = &erases[i];
+
+ if (dwords[0] & erase->supported_bit)
+ erase_mask |= BIT(i);
+ }
+
+ /* Replicate the sort done for the map's erase types in BFPT. */
+ erase_mask = spi_nor_sort_erase_mask(map, erase_mask);
+
+ /*
+ * We need at least one 4-byte op code per read, program and erase
+ * operation; the .read(), .write() and .erase() hooks share the
+ * nor->addr_width value.
+ */
+ if (!read_hwcaps || !pp_hwcaps || !erase_mask)
+ goto out;
+
+ /*
+ * Discard all operations from the 4-byte instruction set which are
+ * not supported by this memory.
+ */
+ params->hwcaps.mask &= ~discard_hwcaps;
+ params->hwcaps.mask |= (read_hwcaps | pp_hwcaps);
+
+ /* Use the 4-byte address instruction set. */
+ for (i = 0; i < SNOR_CMD_READ_MAX; i++) {
+ struct spi_nor_read_command *read_cmd = &params->reads[i];
+
+ read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode);
+ }
+
+ /* 4BAIT is the only SFDP table that indicates page program support. */
+ if (pp_hwcaps & SNOR_HWCAPS_PP)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP],
+ SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_1_4],
+ SPINOR_OP_PP_1_1_4_4B,
+ SNOR_PROTO_1_1_4);
+ if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4)
+ spi_nor_set_pp_settings(&params_pp[SNOR_CMD_PP_1_4_4],
+ SPINOR_OP_PP_1_4_4_4B,
+ SNOR_PROTO_1_4_4);
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+ if (erase_mask & BIT(i))
+ erase_type[i].opcode = (dwords[1] >>
+ erase_type[i].idx * 8) & 0xFF;
+ else
+ spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF);
+ }
+
+ /*
+ * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes()
+ * later because we already did the conversion to 4byte opcodes. Also,
+ * this latest function implements a legacy quirk for the erase size of
+ * Spansion memory. However this quirk is no longer needed with new
+ * SFDP compliant memories.
+ */
+ nor->addr_width = 4;
+ nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT;
+
+ /* fall through */
+out:
+ kfree(dwords);
+ return ret;
+}
+
/**
* spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
* @nor: pointer to a 'struct spi_nor'
@@ -3191,6 +3524,10 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
err = spi_nor_parse_smpt(nor, param_header);
break;
+ case SFDP_4BAIT_ID:
+ err = spi_nor_parse_4bait(nor, param_header, params);
+ break;
+
default:
break;
}
@@ -3214,17 +3551,17 @@ exit:
}
static int spi_nor_init_params(struct spi_nor *nor,
- const struct flash_info *info,
struct spi_nor_flash_parameter *params)
{
struct spi_nor_erase_map *map = &nor->erase_map;
+ const struct flash_info *info = nor->info;
u8 i, erase_mask;
/* Set legacy flash parameters as default. */
memset(params, 0, sizeof(*params));
/* Set SPI NOR sizes. */
- params->size = info->sector_size * info->n_sectors;
+ params->size = (u64)info->sector_size * info->n_sectors;
params->page_size = info->page_size;
/* (Fast) Read settings. */
@@ -3289,6 +3626,7 @@ static int spi_nor_init_params(struct spi_nor *nor,
params->quad_enable = macronix_quad_enable;
break;
+ case SNOR_MFR_ST:
case SNOR_MFR_MICRON:
break;
@@ -3318,6 +3656,7 @@ static int spi_nor_init_params(struct spi_nor *nor,
if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
nor->addr_width = 0;
+ nor->flags &= ~SNOR_F_4B_OPCODES;
/* restore previous erase map */
memcpy(&nor->erase_map, &prev_map,
sizeof(nor->erase_map));
@@ -3329,57 +3668,6 @@ static int spi_nor_init_params(struct spi_nor *nor,
return 0;
}
-static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
-{
- size_t i;
-
- for (i = 0; i < size; i++)
- if (table[i][0] == (int)hwcaps)
- return table[i][1];
-
- return -EINVAL;
-}
-
-static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
-{
- static const int hwcaps_read2cmd[][2] = {
- { SNOR_HWCAPS_READ, SNOR_CMD_READ },
- { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
- { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
- { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
- { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
- { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
- { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
- { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
- { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
- { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
- { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
- { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
- { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
- { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
- { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
- ARRAY_SIZE(hwcaps_read2cmd));
-}
-
-static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
-{
- static const int hwcaps_pp2cmd[][2] = {
- { SNOR_HWCAPS_PP, SNOR_CMD_PP },
- { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
- { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
- { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
- { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
- { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
- { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
- };
-
- return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
- ARRAY_SIZE(hwcaps_pp2cmd));
-}
-
static int spi_nor_select_read(struct spi_nor *nor,
const struct spi_nor_flash_parameter *params,
u32 shared_hwcaps)
@@ -3532,7 +3820,7 @@ static int spi_nor_select_erase(struct spi_nor *nor, u32 wanted_size)
return 0;
}
-static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+static int spi_nor_setup(struct spi_nor *nor,
const struct spi_nor_flash_parameter *params,
const struct spi_nor_hwcaps *hwcaps)
{
@@ -3575,7 +3863,7 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
}
/* Select the Sector Erase command. */
- err = spi_nor_select_erase(nor, info->sector_size);
+ err = spi_nor_select_erase(nor, nor->info->sector_size);
if (err) {
dev_err(nor->dev,
"can't select erase settings supported by both the SPI controller and memory.\n");
@@ -3618,9 +3906,7 @@ static int spi_nor_init(struct spi_nor *nor)
}
}
- if ((nor->addr_width == 4) &&
- (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
- !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
/*
* If the RESET# pin isn't hooked up properly, or the system
* otherwise doesn't perform a reset command in the boot
@@ -3630,7 +3916,7 @@ static int spi_nor_init(struct spi_nor *nor)
*/
WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
"enabling reset hack; may not recover from unexpected reboots\n");
- set_4byte(nor, nor->info, 1);
+ set_4byte(nor, true);
}
return 0;
@@ -3652,14 +3938,24 @@ static void spi_nor_resume(struct mtd_info *mtd)
void spi_nor_restore(struct spi_nor *nor)
{
/* restore the addressing mode */
- if ((nor->addr_width == 4) &&
- (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
- !(nor->info->flags & SPI_NOR_4B_OPCODES) &&
- (nor->flags & SNOR_F_BROKEN_RESET))
- set_4byte(nor, nor->info, 0);
+ if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
+ nor->flags & SNOR_F_BROKEN_RESET)
+ set_4byte(nor, false);
}
EXPORT_SYMBOL_GPL(spi_nor_restore);
+static const struct flash_info *spi_nor_match_id(const char *name)
+{
+ const struct flash_info *id = spi_nor_ids;
+
+ while (id->name) {
+ if (!strcmp(name, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
int spi_nor_scan(struct spi_nor *nor, const char *name,
const struct spi_nor_hwcaps *hwcaps)
{
@@ -3712,6 +4008,8 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
}
}
+ nor->info = info;
+
mutex_init(&nor->lock);
/*
@@ -3723,7 +4021,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
nor->flags |= SNOR_F_READY_XSR_RDY;
/* Parse the Serial Flash Discoverable Parameters table. */
- ret = spi_nor_init_params(nor, info, &params);
+ ret = spi_nor_init_params(nor, &params);
if (ret)
return ret;
@@ -3739,8 +4037,9 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
mtd->_resume = spi_nor_resume;
/* NOR protection support for STmicro/Micron chips and similar */
- if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
- info->flags & SPI_NOR_HAS_LOCK) {
+ if (JEDEC_MFR(info) == SNOR_MFR_ST ||
+ JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+ info->flags & SPI_NOR_HAS_LOCK) {
nor->flash_lock = stm_lock;
nor->flash_unlock = stm_unlock;
nor->flash_is_locked = stm_is_locked;
@@ -3799,7 +4098,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
* - set the SPI protocols for register and memory accesses.
* - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
*/
- ret = spi_nor_setup(nor, info, &params, hwcaps);
+ ret = spi_nor_setup(nor, &params, hwcaps);
if (ret)
return ret;
@@ -3810,13 +4109,18 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
} else if (mtd->size > 0x1000000) {
/* enable 4-byte addressing if the device exceeds 16MiB */
nor->addr_width = 4;
- if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
- info->flags & SPI_NOR_4B_OPCODES)
- spi_nor_set_4byte_opcodes(nor, info);
} else {
nor->addr_width = 3;
}
+ if (info->flags & SPI_NOR_4B_OPCODES ||
+ (JEDEC_MFR(info) == SNOR_MFR_SPANSION && mtd->size > SZ_16M))
+ nor->flags |= SNOR_F_4B_OPCODES;
+
+ if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
+ !(nor->flags & SNOR_F_HAS_4BAIT))
+ spi_nor_set_4byte_opcodes(nor);
+
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
dev_err(dev, "address width is too large: %u\n",
nor->addr_width);
@@ -3824,13 +4128,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
}
if (info->flags & SPI_S3AN) {
- ret = s3an_nor_scan(info, nor);
+ ret = s3an_nor_scan(nor);
if (ret)
return ret;
}
/* Send all the required SPI flash commands to initialize device */
- nor->info = info;
ret = spi_nor_init(nor);
if (ret)
return ret;
@@ -3858,19 +4161,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
}
EXPORT_SYMBOL_GPL(spi_nor_scan);
-static const struct flash_info *spi_nor_match_id(const char *name)
-{
- const struct flash_info *id = spi_nor_ids;
-
- while (id->name) {
- if (!strcmp(name, id->name))
- return id;
- id++;
- }
- return NULL;
-}
-
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
MODULE_AUTHOR("Mike Lavender");
MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a4e3454133a4..09170b707339 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1101,10 +1101,10 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
- put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
+ put_mtd_device(ubi->mtd);
put_device(&ubi->dev);
return 0;
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index e9e9ecbcedcc..0b8f0c46268d 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -227,9 +227,9 @@ out_unlock:
out_free:
kfree(desc);
out_put_ubi:
- ubi_put_device(ubi);
ubi_err(ubi, "cannot open device %d, volume %d, error %d",
ubi_num, vol_id, err);
+ ubi_put_device(ubi);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index d03775100f7d..6371958dd170 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -397,10 +397,10 @@ config NET_SB1000
At present this driver only compiles as a module, so say M here if
you have this card. The module will be called sb1000. Then read
- <file:Documentation/networking/README.sb1000> for information on how
- to use this module, as it needs special ppp scripts for establishing
- a connection. Further documentation and the necessary scripts can be
- found at:
+ <file:Documentation/networking/device_drivers/sb1000.txt> for
+ information on how to use this module, as it needs special ppp
+ scripts for establishing a connection. Further documentation
+ and the necessary scripts can be found at:
<http://www.jacksonville.net/~fventuri/>
<http://home.adelphia.net/~siglercm/sb1000.html>
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index bb49f6e40a19..f90bb723985f 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -777,10 +777,7 @@ static void cops_rx(struct net_device *dev)
}
/* Get response length. */
- if(lp->board==DAYNA)
- pkt_len = inb(ioaddr) & 0xFF;
- else
- pkt_len = inb(ioaddr) & 0x00FF;
+ pkt_len = inb(ioaddr);
pkt_len |= (inb(ioaddr) << 8);
/* Input IO code. */
rsp_type=inb(ioaddr);
@@ -892,10 +889,7 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
/* Output IO length. */
outb(skb->len, ioaddr);
- if(lp->board == DAYNA)
- outb(skb->len >> 8, ioaddr);
- else
- outb((skb->len >> 8)&0x0FF, ioaddr);
+ outb(skb->len >> 8, ioaddr);
/* Output IO code. */
outb(LAP_WRITE, ioaddr);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index f43fb2f958a5..7c46d9f4fefd 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -1220,7 +1220,7 @@ static void ad_churn_machine(struct port *port)
port->sm_churn_partner_state = AD_CHURN_MONITOR;
port->sm_churn_actor_timer_counter =
__ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0);
- port->sm_churn_partner_timer_counter =
+ port->sm_churn_partner_timer_counter =
__ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0);
return;
}
@@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
+ port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
@@ -2125,7 +2128,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
if ((new_aggregator->lag_ports == port) &&
new_aggregator->is_active) {
netdev_info(bond->dev, "Removing an active aggregator\n");
- select_new_active_agg = 1;
+ select_new_active_agg = 1;
}
new_aggregator->is_individual = aggregator->is_individual;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index e82108c917a6..9431127bbc60 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1031,7 +1031,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
*/
memcpy(ss.__data, addr, len);
ss.ss_family = dev->type;
- if (dev_set_mac_address(dev, (struct sockaddr *)&ss)) {
+ if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) {
netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
dev->name);
return -EOPNOTSUPP;
@@ -1250,7 +1250,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr,
slave->dev->addr_len);
- res = dev_set_mac_address(slave->dev, addr);
+ res = dev_set_mac_address(slave->dev, addr, NULL);
/* restore net_device's hw address */
bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
@@ -1273,7 +1273,7 @@ unwind:
bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr,
rollback_slave->dev->addr_len);
dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&ss);
+ (struct sockaddr *)&ss, NULL);
bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
rollback_slave->dev->addr_len);
}
@@ -1732,7 +1732,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
bond->dev->addr_len);
ss.ss_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
- dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss);
+ dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
+ NULL);
bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
new_slave->dev->addr_len);
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3868e1a5126d..1360f1ffe070 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -45,19 +45,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
return 0;
}
-
-static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bond_debug_rlb_hash_show, inode->i_private);
-}
-
-static const struct file_operations bond_debug_rlb_hash_fops = {
- .owner = THIS_MODULE,
- .open = bond_debug_rlb_hash_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(bond_debug_rlb_hash);
void bond_debug_register(struct bonding *bond)
{
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 333387f1f1fe..a9d597f28023 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -609,14 +609,21 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
*
* Should be called with RTNL held.
*/
-static void bond_set_dev_addr(struct net_device *bond_dev,
- struct net_device *slave_dev)
+static int bond_set_dev_addr(struct net_device *bond_dev,
+ struct net_device *slave_dev)
{
+ int err;
+
netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
+ err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
+ if (err)
+ return err;
+
memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
+ return 0;
}
static struct slave *bond_get_old_active(struct bonding *bond,
@@ -652,8 +659,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
switch (bond->params.fail_over_mac) {
case BOND_FOM_ACTIVE:
- if (new_active)
- bond_set_dev_addr(bond->dev, new_active->dev);
+ if (new_active) {
+ rv = bond_set_dev_addr(bond->dev, new_active->dev);
+ if (rv)
+ netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
+ -rv, bond->dev->name);
+ }
break;
case BOND_FOM_FOLLOW:
/* if new_active && old_active, swap them
@@ -680,7 +691,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
}
rv = dev_set_mac_address(new_active->dev,
- (struct sockaddr *)&ss);
+ (struct sockaddr *)&ss, NULL);
if (rv) {
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
@@ -695,7 +706,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
ss.ss_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev,
- (struct sockaddr *)&ss);
+ (struct sockaddr *)&ss, NULL);
if (rv)
netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
-rv, new_active->dev->name);
@@ -1489,8 +1500,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
* address to be the same as the slave's.
*/
if (!bond_has_slaves(bond) &&
- bond->dev->addr_assign_type == NET_ADDR_RANDOM)
- bond_set_dev_addr(bond->dev, slave_dev);
+ bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
+ res = bond_set_dev_addr(bond->dev, slave_dev);
+ if (res)
+ goto err_undo_flags;
+ }
new_slave = bond_alloc_slave(bond);
if (!new_slave) {
@@ -1527,7 +1541,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
*/
memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
ss.ss_family = slave_dev->type;
- res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
+ res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
+ extack);
if (res) {
netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
goto err_restore_mtu;
@@ -1538,7 +1553,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
slave_dev->flags |= IFF_SLAVE;
/* open the slave since the application closed it */
- res = dev_open(slave_dev);
+ res = dev_open(slave_dev, extack);
if (res) {
netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
goto err_restore_mac;
@@ -1818,7 +1833,7 @@ err_restore_mac:
bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
new_slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
+ dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
}
err_restore_mtu:
@@ -1999,7 +2014,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
slave->dev->addr_len);
ss.ss_family = slave_dev->type;
- dev_set_mac_address(slave_dev, (struct sockaddr *)&ss);
+ dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
}
if (unregister)
@@ -3544,8 +3559,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
- bond_set_dev_addr(bond_dev, slave_dev);
- res = 0;
+ res = bond_set_dev_addr(bond_dev, slave_dev);
break;
case BOND_CHANGE_ACTIVE_OLD:
case SIOCBONDCHANGEACTIVE:
@@ -3732,7 +3746,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
bond_for_each_slave(bond, slave, iter) {
netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
- res = dev_set_mac_address(slave->dev, addr);
+ res = dev_set_mac_address(slave->dev, addr, NULL);
if (res) {
/* TODO: consider downing the slave
* and retry ?
@@ -3761,7 +3775,7 @@ unwind:
break;
tmp_res = dev_set_mac_address(rollback_slave->dev,
- (struct sockaddr *)&tmp_ss);
+ (struct sockaddr *)&tmp_ss, NULL);
if (tmp_res) {
netdev_dbg(bond_dev, "unwind err %d dev %s\n",
tmp_res, rollback_slave->dev->name);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 7cdd0cead693..e0f0ad7a550a 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -96,7 +96,7 @@ config CAN_AT91
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on ARM || PPC
+ depends on OF && HAS_IOMEM
---help---
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 75ce11395ee8..0f36eafe3ac1 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -19,11 +19,13 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
#define DRV_NAME "flexcan"
@@ -131,16 +133,15 @@
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
#define FLEXCAN_ESR_ALL_INT \
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
- FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT | \
+ FLEXCAN_ESR_WAK_INT)
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8
#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0
-#define FLEXCAN_TX_MB 63
#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1)
-#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f)
+#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f)
#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5)
@@ -189,12 +190,13 @@
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) /* Setup stop mode to support wakeup */
/* Structure of the message buffer */
struct flexcan_mb {
u32 can_ctrl;
u32 can_id;
- u32 data[2];
+ u32 data[];
};
/* Structure of the hardware registers */
@@ -223,7 +225,7 @@ struct flexcan_regs {
u32 rxfgmask; /* 0x48 */
u32 rxfir; /* 0x4c */
u32 _reserved3[12]; /* 0x50 */
- struct flexcan_mb mb[64]; /* 0x80 */
+ u8 mb[2][512]; /* 0x80 */
/* FIFO-mode:
* MB
* 0x080...0x08f 0 RX message buffer
@@ -253,12 +255,24 @@ struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
};
+struct flexcan_stop_mode {
+ struct regmap *gpr;
+ u8 req_gpr;
+ u8 req_bit;
+ u8 ack_gpr;
+ u8 ack_bit;
+};
+
struct flexcan_priv {
struct can_priv can;
struct can_rx_offload offload;
struct flexcan_regs __iomem *regs;
+ struct flexcan_mb __iomem *tx_mb;
struct flexcan_mb __iomem *tx_mb_reserved;
+ u8 tx_mb_idx;
+ u8 mb_count;
+ u8 mb_size;
u32 reg_ctrl_default;
u32 reg_imask1_default;
u32 reg_imask2_default;
@@ -267,6 +281,7 @@ struct flexcan_priv {
struct clk *clk_per;
const struct flexcan_devtype_data *devtype_data;
struct regulator *reg_xceiver;
+ struct flexcan_stop_mode stm;
/* Read and Write APIs */
u32 (*read)(void __iomem *addr);
@@ -290,7 +305,8 @@ static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SETUP_STOP_MODE,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -350,6 +366,68 @@ static inline void flexcan_write_le(u32 val, void __iomem *addr)
iowrite32(val, addr);
}
+static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv,
+ u8 mb_index)
+{
+ u8 bank_size;
+ bool bank;
+
+ if (WARN_ON(mb_index >= priv->mb_count))
+ return NULL;
+
+ bank_size = sizeof(priv->regs->mb[0]) / priv->mb_size;
+
+ bank = mb_index >= bank_size;
+ if (bank)
+ mb_index -= bank_size;
+
+ return (struct flexcan_mb __iomem *)
+ (&priv->regs->mb[bank][priv->mb_size * mb_index]);
+}
+
+static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_mcr;
+
+ reg_mcr = priv->read(&regs->mcr);
+
+ if (enable)
+ reg_mcr |= FLEXCAN_MCR_WAK_MSK;
+ else
+ reg_mcr &= ~FLEXCAN_MCR_WAK_MSK;
+
+ priv->write(reg_mcr, &regs->mcr);
+}
+
+static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_mcr;
+
+ reg_mcr = priv->read(&regs->mcr);
+ reg_mcr |= FLEXCAN_MCR_SLF_WAK;
+ priv->write(reg_mcr, &regs->mcr);
+
+ /* enable stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+}
+
+static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_mcr;
+
+ /* remove stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+
+ reg_mcr = priv->read(&regs->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
+ priv->write(reg_mcr, &regs->mcr);
+}
+
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
@@ -512,11 +590,11 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
- struct flexcan_regs __iomem *regs = priv->regs;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 can_id;
u32 data;
u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
+ int i;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
@@ -533,27 +611,23 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
if (cf->can_id & CAN_RTR_FLAG)
ctrl |= FLEXCAN_MB_CNT_RTR;
- if (cf->can_dlc > 0) {
- data = be32_to_cpup((__be32 *)&cf->data[0]);
- priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
- }
- if (cf->can_dlc > 4) {
- data = be32_to_cpup((__be32 *)&cf->data[4]);
- priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
+ for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
+ data = be32_to_cpup((__be32 *)&cf->data[i]);
+ priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
can_put_echo_skb(skb, dev, 0);
- priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
- priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
+ priv->write(can_id, &priv->tx_mb->can_id);
+ priv->write(ctrl, &priv->tx_mb->can_ctrl);
/* Errata ERR005829 step8:
* Write twice INACTIVE(0x8) code to first MB.
*/
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb_reserved->can_ctrl);
+ &priv->tx_mb_reserved->can_ctrl);
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &priv->tx_mb_reserved->can_ctrl);
+ &priv->tx_mb_reserved->can_ctrl);
return NETDEV_TX_OK;
}
@@ -672,8 +746,11 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
{
struct flexcan_priv *priv = rx_offload_to_priv(offload);
struct flexcan_regs __iomem *regs = priv->regs;
- struct flexcan_mb __iomem *mb = &regs->mb[n];
+ struct flexcan_mb __iomem *mb;
u32 reg_ctrl, reg_id, reg_iflag1;
+ int i;
+
+ mb = flexcan_get_mb(priv, n);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
u32 code;
@@ -714,8 +791,10 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
cf->can_id |= CAN_RTR_FLAG;
cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
- *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0]));
- *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1]));
+ for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
+ __be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)]));
+ *(__be32 *)(cf->data + i) = data;
+ }
/* mark as read */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
@@ -744,7 +823,7 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
u32 iflag1, iflag2;
iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
- ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
+ ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
return (u64)iflag2 << 32 | iflag1;
@@ -794,8 +873,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
reg_iflag2 = priv->read(&regs->iflag2);
/* transmission complete interrupt */
- if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
- u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
+ if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+ u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl);
handled = IRQ_HANDLED;
stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
@@ -805,8 +884,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* after sending a RTR frame MB is in RX mode */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_MB].can_ctrl);
- priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
+ &priv->tx_mb->can_ctrl);
+ priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag2);
netif_wake_queue(dev);
}
@@ -821,7 +900,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
/* state change interrupt or broken error state quirk fix is enabled */
if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
(priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
- FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
flexcan_irq_state(dev, reg_esr);
/* bus error IRQ - handle if bus error reporting is activated */
@@ -919,6 +998,7 @@ static int flexcan_chip_start(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->regs;
u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
int err, i;
+ struct flexcan_mb __iomem *mb;
/* enable module */
err = flexcan_chip_enable(priv);
@@ -935,11 +1015,9 @@ static int flexcan_chip_start(struct net_device *dev)
/* MCR
*
* enable freeze
- * enable fifo
* halt now
* only supervisor access
* enable warning int
- * disable local echo
* enable individual RX masking
* choose format C
* set max mailbox number
@@ -947,14 +1025,37 @@ static int flexcan_chip_start(struct net_device *dev)
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
- FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
- FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
+ FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C |
+ FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
+ /* MCR
+ *
+ * FIFO:
+ * - disable for timestamp mode
+ * - enable for FIFO mode
+ */
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
reg_mcr &= ~FLEXCAN_MCR_FEN;
else
reg_mcr |= FLEXCAN_MCR_FEN;
+ /* MCR
+ *
+ * NOTE: In loopback mode, the CAN_MCR[SRXDIS] cannot be
+ * asserted because this will impede the self reception
+ * of a transmitted message. This is not documented in
+ * earlier versions of flexcan block guide.
+ *
+ * Self Reception:
+ * - enable Self Reception for loopback mode
+ * (by clearing "Self Reception Disable" bit)
+ * - disable for normal operation
+ */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ reg_mcr &= ~FLEXCAN_MCR_SRX_DIS;
+ else
+ reg_mcr |= FLEXCAN_MCR_SRX_DIS;
+
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
priv->write(reg_mcr, &regs->mcr);
@@ -999,14 +1100,16 @@ static int flexcan_chip_start(struct net_device *dev)
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
+ mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
- &regs->mb[i].can_ctrl);
+ &mb->can_ctrl);
}
} else {
/* clear and invalidate unused mailboxes first */
- for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
+ for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) {
+ mb = flexcan_get_mb(priv, i);
priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
- &regs->mb[i].can_ctrl);
+ &mb->can_ctrl);
}
}
@@ -1016,7 +1119,7 @@ static int flexcan_chip_start(struct net_device *dev)
/* mark TX mailbox as INACTIVE */
priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
- &regs->mb[FLEXCAN_TX_MB].can_ctrl);
+ &priv->tx_mb->can_ctrl);
/* acceptance mask/acceptance code (accept everything) */
priv->write(0x0, &regs->rxgmask);
@@ -1027,7 +1130,7 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(0x0, &regs->rxfgmask);
/* clear acceptance filters */
- for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
+ for (i = 0; i < priv->mb_count; i++)
priv->write(0, &regs->rximr[i]);
/* On Vybrid, disable memory error detection interrupts
@@ -1128,10 +1231,49 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_close;
+ priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
+ priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
+ (sizeof(priv->regs->mb[1]) / priv->mb_size);
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
+ priv->tx_mb_reserved =
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP);
+ else
+ priv->tx_mb_reserved =
+ flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO);
+ priv->tx_mb_idx = priv->mb_count - 1;
+ priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx);
+
+ priv->reg_imask1_default = 0;
+ priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+
+ priv->offload.mailbox_read = flexcan_mailbox_read;
+
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+ u64 imask;
+
+ priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+ priv->offload.mb_last = priv->mb_count - 2;
+
+ imask = GENMASK_ULL(priv->offload.mb_last,
+ priv->offload.mb_first);
+ priv->reg_imask1_default |= imask;
+ priv->reg_imask2_default |= imask >> 32;
+
+ err = can_rx_offload_add_timestamp(dev, &priv->offload);
+ } else {
+ priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+ FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
+ err = can_rx_offload_add_fifo(dev, &priv->offload,
+ FLEXCAN_NAPI_WEIGHT);
+ }
+ if (err)
+ goto out_free_irq;
+
/* start chip and queuing */
err = flexcan_chip_start(dev);
if (err)
- goto out_free_irq;
+ goto out_offload_del;
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -1140,6 +1282,8 @@ static int flexcan_open(struct net_device *dev)
return 0;
+ out_offload_del:
+ can_rx_offload_del(&priv->offload);
out_free_irq:
free_irq(dev->irq, dev);
out_close:
@@ -1160,6 +1304,7 @@ static int flexcan_close(struct net_device *dev)
can_rx_offload_disable(&priv->offload);
flexcan_chip_stop(dev);
+ can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
clk_disable_unprepare(priv->clk_per);
clk_disable_unprepare(priv->clk_ipg);
@@ -1260,6 +1405,59 @@ static void unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
+static int flexcan_setup_stop_mode(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *gpr_np;
+ struct flexcan_priv *priv;
+ phandle phandle;
+ u32 out_val[5];
+ int ret;
+
+ if (!np)
+ return -EINVAL;
+
+ /* stop mode property format is:
+ * <&gpr req_gpr req_bit ack_gpr ack_bit>.
+ */
+ ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
+ ARRAY_SIZE(out_val));
+ if (ret) {
+ dev_dbg(&pdev->dev, "no stop-mode property\n");
+ return ret;
+ }
+ phandle = *out_val;
+
+ gpr_np = of_find_node_by_phandle(phandle);
+ if (!gpr_np) {
+ dev_dbg(&pdev->dev, "could not find gpr node by phandle\n");
+ return PTR_ERR(gpr_np);
+ }
+
+ priv = netdev_priv(dev);
+ priv->stm.gpr = syscon_node_to_regmap(gpr_np);
+ of_node_put(gpr_np);
+ if (IS_ERR(priv->stm.gpr)) {
+ dev_dbg(&pdev->dev, "could not find gpr regmap\n");
+ return PTR_ERR(priv->stm.gpr);
+ }
+
+ priv->stm.req_gpr = out_val[1];
+ priv->stm.req_bit = out_val[2];
+ priv->stm.ack_gpr = out_val[3];
+ priv->stm.ack_bit = out_val[4];
+
+ dev_dbg(&pdev->dev,
+ "gpr %s req_gpr=0x02%x req_bit=%u ack_gpr=0x02%x ack_bit=%u\n",
+ gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit,
+ priv->stm.ack_gpr, priv->stm.ack_bit);
+
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ return 0;
+}
+
static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
@@ -1371,35 +1569,6 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
- priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
- else
- priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
-
- priv->reg_imask1_default = 0;
- priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
-
- priv->offload.mailbox_read = flexcan_mailbox_read;
-
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
- u64 imask;
-
- priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
- priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST;
-
- imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first);
- priv->reg_imask1_default |= imask;
- priv->reg_imask2_default |= imask >> 32;
-
- err = can_rx_offload_add_timestamp(dev, &priv->offload);
- } else {
- priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
- FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
- err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT);
- }
- if (err)
- goto failed_offload;
-
err = register_flexcandev(dev);
if (err) {
dev_err(&pdev->dev, "registering netdev failed\n");
@@ -1408,12 +1577,17 @@ static int flexcan_probe(struct platform_device *pdev)
devm_can_led_init(dev);
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) {
+ err = flexcan_setup_stop_mode(pdev);
+ if (err)
+ dev_dbg(&pdev->dev, "failed to setup stop-mode\n");
+ }
+
dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
priv->regs, dev->irq);
return 0;
- failed_offload:
failed_register:
free_candev(dev);
return err;
@@ -1422,10 +1596,8 @@ static int flexcan_probe(struct platform_device *pdev)
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct flexcan_priv *priv = netdev_priv(dev);
unregister_flexcandev(dev);
- can_rx_offload_del(&priv->offload);
free_candev(dev);
return 0;
@@ -1438,9 +1610,17 @@ static int __maybe_unused flexcan_suspend(struct device *device)
int err;
if (netif_running(dev)) {
- err = flexcan_chip_disable(priv);
- if (err)
- return err;
+ /* if wakeup is enabled, enter stop mode
+ * else enter disabled mode.
+ */
+ if (device_may_wakeup(device)) {
+ enable_irq_wake(dev->irq);
+ flexcan_enter_stop_mode(priv);
+ } else {
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
+ }
netif_stop_queue(dev);
netif_device_detach(dev);
}
@@ -1459,14 +1639,45 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
- err = flexcan_chip_enable(priv);
- if (err)
- return err;
+ if (device_may_wakeup(device)) {
+ disable_irq_wake(dev->irq);
+ } else {
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
+ }
}
return 0;
}
-static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
+static int __maybe_unused flexcan_noirq_suspend(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev) && device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, true);
+
+ return 0;
+}
+
+static int __maybe_unused flexcan_noirq_resume(struct device *device)
+{
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev) && device_may_wakeup(device)) {
+ flexcan_enable_wakeup_irq(priv, false);
+ flexcan_exit_stop_mode(priv);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops flexcan_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume)
+};
static struct platform_driver flexcan_driver = {
.driver = {
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 7b03a3a37db7..bd5a8fcd83e1 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
tristate "Renesas R-Car CAN controller"
depends on ARCH_RENESAS || ARM
diff --git a/drivers/net/can/rcar/Makefile b/drivers/net/can/rcar/Makefile
index 08de36a4cfcc..c9185b0c04a8 100644
--- a/drivers/net/can/rcar/Makefile
+++ b/drivers/net/can/rcar/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Renesas R-Car CAN & CAN FD controller drivers
#
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 771a46083739..13e66297b65f 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -1,12 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Renesas R-Car CAN device driver
*
* Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
* Copyright (C) 2013 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 602c19e23f05..05410008aa6b 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/* Renesas R-Car CAN FD device driver
*
* Copyright (C) 2015 Renesas Electronics Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
/* The R-Car CAN FD controller can operate in either one of the below two modes
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 1e65cb6c2591..f6dc89927ece 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -88,6 +88,7 @@ config CAN_PLX_PCI
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
- IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/)
- Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com)
+ - ASEM CAN raw - 2 isolated CAN channels (www.asem.it)
config CAN_TSCAN1
tristate "TS-CAN1 PC104 boards"
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index f8ff25c8ee2e..9bcdefea138a 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -46,7 +46,8 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"esd CAN-PCIe/2000, "
"Connect Tech Inc. CANpro/104-Plus Opto (CRG001), "
"IXXAT PC-I 04/PCI, "
- "ELCUS CAN-200-PCI")
+ "ELCUS CAN-200-PCI, "
+ "ASEM DUAL CAN-RAW")
MODULE_LICENSE("GPL v2");
#define PLX_PCI_MAX_CHAN 2
@@ -70,7 +71,9 @@ struct plx_pci_card {
*/
#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
+#define PLX_LINT1_POL (1 << 1) /* Local interrupt 1 polarity */
#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
+#define PLX_LINT2_POL (1 << 4) /* Local interrupt 2 polarity */
#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
@@ -92,6 +95,9 @@ struct plx_pci_card {
*/
#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+/* OCR setting for ASEM Dual CAN raw */
+#define ASEM_PCI_OCR 0xfe
+
/*
* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
@@ -145,10 +151,20 @@ struct plx_pci_card {
#define MOXA_PCI_VENDOR_ID 0x1393
#define MOXA_PCI_DEVICE_ID 0x0100
+#define ASEM_RAW_CAN_VENDOR_ID 0x10b5
+#define ASEM_RAW_CAN_DEVICE_ID 0x9030
+#define ASEM_RAW_CAN_SUB_VENDOR_ID 0x3000
+#define ASEM_RAW_CAN_SUB_DEVICE_ID 0x1001
+#define ASEM_RAW_CAN_SUB_DEVICE_ID_BIS 0x1002
+#define ASEM_RAW_CAN_RST_REGISTER 0x54
+#define ASEM_RAW_CAN_RST_MASK_CAN1 0x20
+#define ASEM_RAW_CAN_RST_MASK_CAN2 0x04
+
static void plx_pci_reset_common(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev);
+static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -269,6 +285,14 @@ static struct plx_pci_card_info plx_pci_card_info_moxa = {
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_asem_dual_can = {
+ "ASEM Dual CAN raw PCI", 2,
+ PLX_PCI_CAN_CLOCK, ASEM_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+ &plx_pci_reset_asem_dual_can_raw
+ /* based on PLX9030 */
+};
+
static const struct pci_device_id plx_pci_tbl[] = {
{
/* Adlink PCI-7841/cPCI-7841 */
@@ -375,6 +399,20 @@ static const struct pci_device_id plx_pci_tbl[] = {
0, 0,
(kernel_ulong_t)&plx_pci_card_info_moxa
},
+ {
+ /* ASEM Dual CAN raw */
+ ASEM_RAW_CAN_VENDOR_ID, ASEM_RAW_CAN_DEVICE_ID,
+ ASEM_RAW_CAN_SUB_VENDOR_ID, ASEM_RAW_CAN_SUB_DEVICE_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_asem_dual_can
+ },
+ {
+ /* ASEM Dual CAN raw -new model */
+ ASEM_RAW_CAN_VENDOR_ID, ASEM_RAW_CAN_DEVICE_ID,
+ ASEM_RAW_CAN_SUB_VENDOR_ID, ASEM_RAW_CAN_SUB_DEVICE_ID_BIS,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_asem_dual_can
+ },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
@@ -524,6 +562,31 @@ static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev)
}
}
+/* Special reset function for ASEM Dual CAN raw card */
+static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev)
+{
+ void __iomem *bar0_addr;
+ u8 tmpval;
+
+ plx_pci_reset_common(pdev);
+
+ bar0_addr = pci_iomap(pdev, 0, 0);
+ if (!bar0_addr) {
+ dev_err(&pdev->dev, "Failed to remap reset space 0 (BAR0)\n");
+ return;
+ }
+
+ /* reset the two SJA1000 chips */
+ tmpval = ioread8(bar0_addr + ASEM_RAW_CAN_RST_REGISTER);
+ tmpval &= ~(ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2);
+ iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER);
+ usleep_range(300, 400);
+ tmpval |= ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2;
+ iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER);
+ usleep_range(300, 400);
+ pci_iounmap(pdev, bar0_addr);
+}
+
static void plx_pci_del_card(struct pci_dev *pdev)
{
struct plx_pci_card *card = pci_get_drvdata(pdev);
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index f3d5bda012a1..04aac3bb54ef 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -715,7 +715,7 @@ static void ucan_read_bulk_callback(struct urb *urb)
up->in_ep_size,
urb->transfer_buffer,
urb->transfer_dma);
- netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n",
+ netdev_dbg(up->netdev, "not resubmitting urb; status: %d\n",
urb->status);
return;
default:
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ed6828821fbd..80af658e530d 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -207,7 +207,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
return PTR_ERR(peer_net);
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
- &vxcan_link_ops, tbp);
+ &vxcan_link_ops, tbp, extack);
if (IS_ERR(peer)) {
put_net(peer_net);
return PTR_ERR(peer);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 045f0845e665..97d0933d9bd9 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -63,6 +63,7 @@ enum xcan_reg {
XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */
XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */
XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */
+ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */
};
#define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00)
@@ -75,6 +76,8 @@ enum xcan_reg {
XCAN_CANFD_FRAME_SIZE * (n))
#define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \
XCAN_CANFD_FRAME_SIZE * (n))
+#define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \
+ XCAN_CANFD_FRAME_SIZE * (n))
/* the single TX mailbox used by this driver on CAN FD HW */
#define XCAN_TX_MAILBOX_IDX 0
@@ -152,6 +155,7 @@ enum xcan_reg {
* instead of the regular FIFO at 0x50
*/
#define XCAN_FLAG_RX_FIFO_MULTI 0x0010
+#define XCAN_FLAG_CANFD_2 0x0020
struct xcan_devtype_data {
unsigned int flags;
@@ -221,6 +225,18 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = {
.brp_inc = 1,
};
+static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
+ .name = DRIVER_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -612,7 +628,7 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
*
* Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
*/
-static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
@@ -973,7 +989,10 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
if (!(fsr & XCAN_FSR_FL_MASK))
return -ENOENT;
- offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
+ offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
+ else
+ offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
} else {
/* check if RX FIFO is empty */
@@ -1430,11 +1449,24 @@ static const struct xcan_devtype_data xcan_canfd_data = {
.bus_clk_name = "s_axi_aclk",
};
+static const struct xcan_devtype_data xcan_canfd2_data = {
+ .flags = XCAN_FLAG_EXT_FILTERS |
+ XCAN_FLAG_RXMNF |
+ XCAN_FLAG_TX_MAILBOXES |
+ XCAN_FLAG_CANFD_2 |
+ XCAN_FLAG_RX_FIFO_MULTI,
+ .bittiming_const = &xcan_bittiming_const_canfd2,
+ .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
+ .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
+ .bus_clk_name = "s_axi_aclk",
+};
+
/* Match table for OF platform binding */
static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
+ { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xcan_of_match);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2eb68769562c..aa4a1f5206f1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
return ret;
}
+ ret = bcm_sf2_cfp_resume(ds);
+ if (ret)
+ return ret;
+
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
@@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
mutex_init(&priv->cfp.lock);
+ INIT_LIST_HEAD(&priv->cfp.rules_list);
/* CFP rule #0 cannot be used for specific classifications, flag it as
* permanently used
@@ -1090,12 +1095,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
return ret;
}
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
return ret;
}
+ bcm_sf2_gphy_enable_set(priv->dev->ds, false);
+
ret = bcm_sf2_cfp_rst(priv);
if (ret) {
pr_err("failed to reset CFP\n");
@@ -1166,6 +1175,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
priv->wol_ports_mask = 0;
dsa_unregister_switch(priv->dev->ds);
+ bcm_sf2_cfp_exit(priv->dev->ds);
/* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index cc31e986e6e3..faaef320ec48 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv {
DECLARE_BITMAP(used, CFP_NUM_RULES);
DECLARE_BITMAP(unique, CFP_NUM_RULES);
unsigned int rules_cnt;
+ struct list_head rules_list;
};
struct bcm_sf2_priv {
@@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc);
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+void bcm_sf2_cfp_exit(struct dsa_switch *ds);
+int bcm_sf2_cfp_resume(struct dsa_switch *ds);
#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f272a084..e14663ab6dbc 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,6 +20,12 @@
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
+struct cfp_rule {
+ int port;
+ struct ethtool_rx_flow_spec fs;
+ struct list_head next;
+};
+
struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE];
u32 mask_value;
@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
core_writel(priv, reg, offset);
}
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+ int port, u32 location)
+{
+ struct cfp_rule *rule = NULL;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ if (rule->port == port && rule->fs.location == location)
+ break;
+ }
+
+ return rule;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct cfp_rule *rule = NULL;
+ size_t fs_size = 0;
+ int ret = 1;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = 1;
+ if (rule->port != port)
+ continue;
+
+ if (rule->fs.flow_type != fs->flow_type ||
+ rule->fs.ring_cookie != fs->ring_cookie ||
+ rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ continue;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+ ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
@@ -728,27 +789,14 @@ out_err:
return ret;
}
-static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
- struct ethtool_rx_flow_spec *fs)
+static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
- int ret = -EINVAL;
-
- /* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
- fs->m_ext.data[1]))
- return -EINVAL;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- test_bit(fs->location, priv->cfp.used))
- return -EBUSY;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
+ int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
queue_num, fs);
break;
default:
+ ret = -EINVAL;
break;
}
return ret;
}
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule = NULL;
+ int ret = -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
+ fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+ if (ret == 0)
+ return -EEXIST;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->port = port;
+ memcpy(&rule->fs, fs, sizeof(*fs));
+ list_add_tail(&rule->next, &priv->cfp.rules_list);
+
+ return ret;
+}
+
static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
u32 loc, u32 *next_loc)
{
@@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
return 0;
}
-static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
- u32 loc)
+static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
{
u32 next_loc = 0;
int ret;
- /* Refuse deleting unused rules, and those that are not unique since
- * that could leave IPv6 rules with one of the chained rule in the
- * table.
- */
- if (!test_bit(loc, priv->cfp.unique) || loc == 0)
- return -EINVAL;
-
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
@@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
return ret;
}
-static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
{
- unsigned int i;
-
- for (i = 0; i < sizeof(flow->m_u); i++)
- flow->m_u.hdata[i] ^= 0xff;
-
- flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
- flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
- flow->m_ext.data[0] ^= cpu_to_be32(~0);
- flow->m_ext.data[1] ^= cpu_to_be32(~0);
-}
-
-static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
- bool mask)
-{
- u32 reg, offset, ipv4;
- u16 src_dst_port;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
-
- reg = core_readl(priv, offset);
- /* src port [15:8] */
- src_dst_port = reg << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
-
- reg = core_readl(priv, offset);
- /* src port [7:0] */
- src_dst_port |= (reg >> 24);
-
- v4_spec->pdst = cpu_to_be16(src_dst_port);
- v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
-
- /* IPv4 dst [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
-
- reg = core_readl(priv, offset);
- /* IPv4 dst [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- /* IPv4 dst [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- v4_spec->ip4dst = cpu_to_be32(ipv4);
-
- /* IPv4 src [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
+ struct cfp_rule *rule;
+ int ret;
- /* Once the TCAM is programmed, the mask reflects the slice number
- * being matched, don't bother checking it when reading back the
- * mask spec
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
*/
- if (!mask && !(reg & SLICE_VALID))
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
- /* IPv4 src [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- /* IPv4 src [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- v4_spec->ip4src = cpu_to_be32(ipv4);
-
- return 0;
-}
-
-static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs)
-{
- struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
- u32 reg;
- int ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V4_FLOW;
- v4_spec = &fs->h_u.tcp_ip4_spec;
- v4_m_spec = &fs->m_u.tcp_ip4_spec;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V4_FLOW;
- v4_spec = &fs->h_u.udp_ip4_spec;
- v4_m_spec = &fs->m_u.udp_ip4_spec;
- break;
- default:
+ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+ if (!rule)
return -EINVAL;
- }
-
- fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
- v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
-
- ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
- if (ret)
- return ret;
-
- return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
-}
-
-static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
- __be32 *ip6_addr, __be16 *port,
- bool mask)
-{
- u32 reg, tmp, offset;
-
- /* C-Tag [31:24]
- * UDF_n_B8 [23:8] (port)
- * UDF_n_B7 (upper) [7:0] (addr[15:8])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(4);
- else
- offset = CORE_CFP_DATA_PORT(4);
- reg = core_readl(priv, offset);
- *port = cpu_to_be32(reg) >> 8;
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B7 (lower) [31:24] (addr[7:0])
- * UDF_n_B6 [23:8] (addr[31:16])
- * UDF_n_B5 (upper) [7:0] (addr[47:40])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[3] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B5 (lower) [31:24] (addr[39:32])
- * UDF_n_B4 [23:8] (addr[63:48])
- * UDF_n_B3 (upper) [7:0] (addr[79:72])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[2] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
- /* UDF_n_B3 (lower) [31:24] (addr[71:64])
- * UDF_n_B2 [23:8] (addr[95:80])
- * UDF_n_B1 (upper) [7:0] (addr[111:104])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[1] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
+ ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
- /* UDF_n_B1 (lower) [31:24] (addr[103:96])
- * UDF_n_B0 [23:8] (addr[127:112])
- * Reserved [7:4]
- * Slice ID [3:2]
- * Slice valid [1:0]
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[0] = cpu_to_be32(tmp);
+ list_del(&rule->next);
+ kfree(rule);
- if (!mask && !(reg & SLICE_VALID))
- return -EINVAL;
-
- return 0;
+ return ret;
}
-static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs,
- u32 next_loc)
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
{
- struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
- u32 reg;
- int ret;
-
- /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
- * assuming tcp_ip6_spec here being an union.
- */
- v6_spec = &fs->h_u.tcp_ip6_spec;
- v6_m_spec = &fs->m_u.tcp_ip6_spec;
-
- /* Read the second half first */
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
- false);
- if (ret)
- return ret;
-
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
- &v6_m_spec->pdst, true);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations. We would not have the port enabled for this rule, so
- * don't bother checking it.
- */
- (void)core_readl(priv, CORE_CFP_DATA_PORT(7));
-
- /* The slice number is valid, so read the rule we are chained from now
- * which is our first half.
- */
- bcm_sf2_cfp_rule_addr_set(priv, next_loc);
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V6_FLOW;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V6_FLOW;
- break;
- default:
- return -EINVAL;
- }
+ unsigned int i;
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
- false);
- if (ret)
- return ret;
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
- return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
- &v6_m_spec->psrc, true);
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc)
{
- u32 reg, ipv4_or_chain_id;
- unsigned int queue_num;
- int ret;
-
- bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_ACT_POL_DATA0);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- /* Extract the destination port */
- nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
- DST_MAP_IB_MASK) - 1;
-
- /* There is no Port 6, so we compensate for that here */
- if (nfc->fs.ring_cookie >= 6)
- nfc->fs.ring_cookie++;
- nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
-
- /* Extract the destination queue */
- queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
- nfc->fs.ring_cookie += queue_num;
-
- /* Extract the L3_FRAMING or CHAIN_ID */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ struct cfp_rule *rule;
- /* With IPv6 rules this would contain a non-zero chain ID since
- * we reserve entry 0 and it cannot be used. So if we read 0 here
- * this means an IPv4 rule.
- */
- ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
- if (ipv4_or_chain_id == 0)
- ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
- else
- ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
- ipv4_or_chain_id);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations
- */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
- if (!(reg & 1 << port))
+ rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+ if (!rule)
return -EINVAL;
+ memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */
@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
return 0;
}
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule, *n;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return;
+
+ list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+ bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}
+
+int bcm_sf2_cfp_resume(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule;
+ int ret = 0;
+ u32 reg;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg &= ~CFP_EN_MAP_MASK;
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
+ rule->fs.location);
+ if (ret) {
+ dev_err(ds->dev, "failed to remove rule\n");
+ return ret;
+ }
+
+ ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
+ if (ret) {
+ dev_err(ds->dev, "failed to restore rule\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index a8b8f59099ce..bea29fde9f3d 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -1,12 +1,16 @@
-menuconfig MICROCHIP_KSZ
- tristate "Microchip KSZ series switch support"
+config NET_DSA_MICROCHIP_KSZ_COMMON
+ tristate
+
+menuconfig NET_DSA_MICROCHIP_KSZ9477
+ tristate "Microchip KSZ9477 series switch support"
depends on NET_DSA
- select NET_DSA_TAG_KSZ
+ select NET_DSA_TAG_KSZ9477
+ select NET_DSA_MICROCHIP_KSZ_COMMON
help
- This driver adds support for Microchip KSZ switch chips.
+ This driver adds support for Microchip KSZ9477 switch chips.
-config MICROCHIP_KSZ_SPI_DRIVER
- tristate "KSZ series SPI connected switch driver"
- depends on MICROCHIP_KSZ && SPI
+config NET_DSA_MICROCHIP_KSZ9477_SPI
+ tristate "KSZ9477 series SPI connected switch driver"
+ depends on NET_DSA_MICROCHIP_KSZ9477 && SPI
help
Select to enable support for registering switches configured through SPI.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index ed335e29fae8..3142c18b8f57 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_MICROCHIP_KSZ) += ksz_common.o
-obj-$(CONFIG_MICROCHIP_KSZ_SPI_DRIVER) += ksz_spi.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o
+obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
new file mode 100644
index 000000000000..89ed059bb576
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -0,0 +1,1316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 switch driver main logic
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/gpio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/microchip-ksz.h>
+#include <linux/phy.h>
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <net/dsa.h>
+#include <net/switchdev.h>
+
+#include "ksz_priv.h"
+#include "ksz_common.h"
+#include "ksz9477_reg.h"
+
+static const struct {
+ int index;
+ char string[ETH_GSTRING_LEN];
+} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
+ { 0x00, "rx_hi" },
+ { 0x01, "rx_undersize" },
+ { 0x02, "rx_fragments" },
+ { 0x03, "rx_oversize" },
+ { 0x04, "rx_jabbers" },
+ { 0x05, "rx_symbol_err" },
+ { 0x06, "rx_crc_err" },
+ { 0x07, "rx_align_err" },
+ { 0x08, "rx_mac_ctrl" },
+ { 0x09, "rx_pause" },
+ { 0x0A, "rx_bcast" },
+ { 0x0B, "rx_mcast" },
+ { 0x0C, "rx_ucast" },
+ { 0x0D, "rx_64_or_less" },
+ { 0x0E, "rx_65_127" },
+ { 0x0F, "rx_128_255" },
+ { 0x10, "rx_256_511" },
+ { 0x11, "rx_512_1023" },
+ { 0x12, "rx_1024_1522" },
+ { 0x13, "rx_1523_2000" },
+ { 0x14, "rx_2001" },
+ { 0x15, "tx_hi" },
+ { 0x16, "tx_late_col" },
+ { 0x17, "tx_pause" },
+ { 0x18, "tx_bcast" },
+ { 0x19, "tx_mcast" },
+ { 0x1A, "tx_ucast" },
+ { 0x1B, "tx_deferred" },
+ { 0x1C, "tx_total_col" },
+ { 0x1D, "tx_exc_col" },
+ { 0x1E, "tx_single_col" },
+ { 0x1F, "tx_mult_col" },
+ { 0x80, "rx_total" },
+ { 0x81, "tx_total" },
+ { 0x82, "rx_discards" },
+ { 0x83, "tx_discards" },
+};
+
+static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
+{
+ u32 data;
+
+ ksz_read32(dev, addr, &data);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ ksz_write32(dev, addr, data);
+}
+
+static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset,
+ u32 bits, bool set)
+{
+ u32 addr;
+ u32 data;
+
+ addr = PORT_CTRL_ADDR(port, offset);
+ ksz_read32(dev, addr, &data);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ ksz_write32(dev, addr, data);
+}
+
+static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton,
+ int timeout)
+{
+ u8 data;
+
+ do {
+ ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid,
+ u32 *vlan_table)
+{
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
+
+ /* wait to be cleared */
+ ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read vlan table\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
+ ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid,
+ u32 *vlan_table)
+{
+ int ret;
+
+ mutex_lock(&dev->vlan_mutex);
+
+ ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
+ ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
+
+ ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
+ ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
+
+ /* wait to be cleared */
+ ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to write vlan table\n");
+ goto exit;
+ }
+
+ ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
+
+ /* update vlan cache table */
+ dev->vlan_cache[vid].table[0] = vlan_table[0];
+ dev->vlan_cache[vid].table[1] = vlan_table[1];
+ dev->vlan_cache[vid].table[2] = vlan_table[2];
+
+exit:
+ mutex_unlock(&dev->vlan_mutex);
+
+ return ret;
+}
+
+static void ksz9477_read_table(struct ksz_device *dev, u32 *table)
+{
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
+}
+
+static void ksz9477_write_table(struct ksz_device *dev, u32 *table)
+{
+ ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
+ ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
+ ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
+ ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
+}
+
+static int ksz9477_wait_alu_ready(struct ksz_device *dev, u32 waiton,
+ int timeout)
+{
+ u32 data;
+
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev, u32 waiton,
+ int timeout)
+{
+ u32 data;
+
+ do {
+ ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
+ if (!(data & waiton))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (timeout <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ksz9477_reset_switch(struct ksz_device *dev)
+{
+ u8 data8;
+ u16 data16;
+ u32 data32;
+
+ /* reset switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
+
+ /* turn off SPI DO Edge select */
+ ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+
+ /* default configuration */
+ ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
+ data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
+ SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
+ ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+
+ /* disable interrupts */
+ ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
+ ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
+ ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+
+ /* set broadcast storm protection 10% rate */
+ ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
+ data16 &= ~BROADCAST_STORM_RATE;
+ data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
+ ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds,
+ int port)
+{
+ return DSA_TAG_PROTO_KSZ9477;
+}
+
+static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg)
+{
+ struct ksz_device *dev = ds->priv;
+ u16 val = 0xffff;
+
+ /* No real PHY after this. Simulate the PHY.
+ * A fixed PHY can be setup in the device tree, but this function is
+ * still called for that port during initialization.
+ * For RGMII PHY there is no way to access it so the fixed PHY should
+ * be used. For SGMII PHY the supporting code will be added later.
+ */
+ if (addr >= dev->phy_port_cnt) {
+ struct ksz_port *p = &dev->ports[addr];
+
+ switch (reg) {
+ case MII_BMCR:
+ val = 0x1140;
+ break;
+ case MII_BMSR:
+ val = 0x796d;
+ break;
+ case MII_PHYSID1:
+ val = 0x0022;
+ break;
+ case MII_PHYSID2:
+ val = 0x1631;
+ break;
+ case MII_ADVERTISE:
+ val = 0x05e1;
+ break;
+ case MII_LPA:
+ val = 0xc5e1;
+ break;
+ case MII_CTRL1000:
+ val = 0x0700;
+ break;
+ case MII_STAT1000:
+ if (p->phydev.speed == SPEED_1000)
+ val = 0x3800;
+ else
+ val = 0;
+ break;
+ }
+ } else {
+ ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ }
+
+ return val;
+}
+
+static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg,
+ u16 val)
+{
+ struct ksz_device *dev = ds->priv;
+
+ /* No real PHY after this. */
+ if (addr >= dev->phy_port_cnt)
+ return 0;
+ ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+
+ return 0;
+}
+
+static void ksz9477_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, uint8_t *buf)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *buf)
+{
+ struct ksz_device *dev = ds->priv;
+ int i;
+ u32 data;
+ int timeout;
+
+ mutex_lock(&dev->stats_mutex);
+
+ for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
+ data = MIB_COUNTER_READ;
+ data |= ((ksz9477_mib_names[i].index & 0xFF) <<
+ MIB_COUNTER_INDEX_S);
+ ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
+
+ timeout = 1000;
+ do {
+ ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
+ &data);
+ usleep_range(1, 10);
+ if (!(data & MIB_COUNTER_READ))
+ break;
+ } while (timeout-- > 0);
+
+ /* failed to read MIB. get out of loop */
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to get MIB\n");
+ break;
+ }
+
+ /* count resets upon read */
+ ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+
+ dev->mib_value[i] += (uint64_t)data;
+ buf[i] = dev->mib_value[i];
+ }
+
+ mutex_unlock(&dev->stats_mutex);
+}
+
+static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
+ u8 member)
+{
+ ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
+ dev->ports[port].member = member;
+}
+
+static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p = &dev->ports[port];
+ u8 data;
+ int member = -1;
+
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ data |= PORT_LEARN_DISABLE;
+ if (port != dev->cpu_port)
+ member = 0;
+ break;
+ case BR_STATE_LISTENING:
+ data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
+ if (port != dev->cpu_port &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ case BR_STATE_LEARNING:
+ data |= PORT_RX_ENABLE;
+ break;
+ case BR_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+
+ /* This function is also used internally. */
+ if (port == dev->cpu_port)
+ break;
+
+ member = dev->host_mask | p->vid_member;
+
+ /* Port is a member of a bridge. */
+ if (dev->br_member & (1 << port)) {
+ dev->member |= (1 << port);
+ member = dev->member;
+ }
+ break;
+ case BR_STATE_BLOCKING:
+ data |= PORT_LEARN_DISABLE;
+ if (port != dev->cpu_port &&
+ p->stp_state == BR_STATE_DISABLED)
+ member = dev->host_mask | p->vid_member;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ p->stp_state = state;
+ if (data & PORT_RX_ENABLE)
+ dev->rx_ports |= (1 << port);
+ else
+ dev->rx_ports &= ~(1 << port);
+ if (data & PORT_TX_ENABLE)
+ dev->tx_ports |= (1 << port);
+ else
+ dev->tx_ports &= ~(1 << port);
+
+ /* Port membership may share register with STP state. */
+ if (member >= 0 && member != p->member)
+ ksz9477_cfg_port_member(dev, port, (u8)member);
+
+ /* Check if forwarding needs to be updated. */
+ if (state != BR_STATE_FORWARDING) {
+ if (dev->br_member & (1 << port))
+ dev->member &= ~(1 << port);
+ }
+
+ /* When topology has changed the function ksz_update_port_member
+ * should be called to modify port forwarding behavior. However
+ * as the offload_fwd_mark indication cannot be reported here
+ * the switch forwarding function is not enabled.
+ */
+}
+
+static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
+{
+ u8 data;
+
+ ksz_read8(dev, REG_SW_LUE_CTRL_2, &data);
+ data &= ~(SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S);
+ data |= (SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S);
+ ksz_write8(dev, REG_SW_LUE_CTRL_2, data);
+ if (port < dev->mib_port_cnt) {
+ /* flush individual port */
+ ksz_pread8(dev, port, P_STP_CTRL, &data);
+ if (!(data & PORT_LEARN_DISABLE))
+ ksz_pwrite8(dev, port, P_STP_CTRL,
+ data | PORT_LEARN_DISABLE);
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true);
+ ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ } else {
+ /* flush all */
+ ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true);
+ }
+}
+
+static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool flag)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (flag) {
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, true);
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
+ } else {
+ ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
+ ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
+ PORT_VLAN_LOOKUP_VID_0, false);
+ }
+
+ return 0;
+}
+
+static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 vlan_table[3];
+ u16 vid;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return;
+ }
+
+ vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
+ if (untagged)
+ vlan_table[1] |= BIT(port);
+ else
+ vlan_table[1] &= ~BIT(port);
+ vlan_table[1] &= ~(BIT(dev->cpu_port));
+
+ vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
+
+ if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return;
+ }
+
+ /* change PVID */
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
+ }
+}
+
+static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct ksz_device *dev = ds->priv;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ u32 vlan_table[3];
+ u16 vid;
+ u16 pvid;
+
+ ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
+ pvid = pvid & 0xFFF;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ if (ksz9477_get_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to get vlan table\n");
+ return -ETIMEDOUT;
+ }
+
+ vlan_table[2] &= ~BIT(port);
+
+ if (pvid == vid)
+ pvid = 1;
+
+ if (untagged)
+ vlan_table[1] &= ~BIT(port);
+
+ if (ksz9477_set_vlan_table(dev, vid, vlan_table)) {
+ dev_dbg(dev->dev, "Failed to set vlan table\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
+
+ return 0;
+}
+
+static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* find any entry with mac & vid */
+ data = vid << ALU_FID_INDEX_S;
+ data |= ((addr[0] << 8) | addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((addr[2] << 24) | (addr[3] << 16));
+ data |= ((addr[4] << 8) | addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ /* read ALU entry */
+ ksz9477_read_table(dev, alu_table);
+
+ /* update ALU entry */
+ alu_table[0] = ALU_V_STATIC_VALID;
+ alu_table[1] |= BIT(port);
+ if (vid)
+ alu_table[1] |= ALU_V_USE_FID;
+ alu_table[2] = (vid << ALU_V_FID_S);
+ alu_table[2] |= ((addr[0] << 8) | addr[1]);
+ alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
+ alu_table[3] |= ((addr[4] << 8) | addr[5]);
+
+ ksz9477_write_table(dev, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 alu_table[4];
+ u32 data;
+ int ret = 0;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* read any entry with mac & vid */
+ data = vid << ALU_FID_INDEX_S;
+ data |= ((addr[0] << 8) | addr[1]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
+
+ data = ((addr[2] << 24) | (addr[3] << 16));
+ data |= ((addr[4] << 8) | addr[5]);
+ ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
+
+ /* start read operation */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU\n");
+ goto exit;
+ }
+
+ ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
+ if (alu_table[0] & ALU_V_STATIC_VALID) {
+ ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
+ ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
+ ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
+
+ /* clear forwarding port */
+ alu_table[2] &= ~BIT(port);
+
+ /* if there is no port to forward, clear table */
+ if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+ } else {
+ alu_table[0] = 0;
+ alu_table[1] = 0;
+ alu_table[2] = 0;
+ alu_table[3] = 0;
+ }
+
+ ksz9477_write_table(dev, alu_table);
+
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to write ALU\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table)
+{
+ alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
+ alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
+ alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
+ alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
+ ALU_V_PRIO_AGE_CNT_M;
+ alu->mstp = alu_table[0] & ALU_V_MSTP_M;
+
+ alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
+ alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
+ alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
+
+ alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
+
+ alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
+ alu->mac[1] = alu_table[2] & 0xFF;
+ alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
+ alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
+ alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
+ alu->mac[5] = alu_table[3] & 0xFF;
+}
+
+static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+ u32 ksz_data;
+ u32 alu_table[4];
+ struct alu_struct alu;
+ int timeout;
+
+ mutex_lock(&dev->alu_mutex);
+
+ /* start ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
+
+ do {
+ timeout = 1000;
+ do {
+ ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
+ if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
+ break;
+ usleep_range(1, 10);
+ } while (timeout-- > 0);
+
+ if (!timeout) {
+ dev_dbg(dev->dev, "Failed to search ALU\n");
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ /* read ALU table */
+ ksz9477_read_table(dev, alu_table);
+
+ ksz9477_convert_alu(&alu, alu_table);
+
+ if (alu.port_forward & BIT(port)) {
+ ret = cb(alu.mac, alu.fid, alu.is_static, data);
+ if (ret)
+ goto exit;
+ }
+ } while (ksz_data & ALU_START);
+
+exit:
+
+ /* stop ALU search */
+ ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
+
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 static_table[4];
+ u32 data;
+ int index;
+ u32 mac_hi, mac_lo;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << ALU_STAT_INDEX_S) |
+ ALU_STAT_READ | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ ksz9477_read_table(dev, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+ if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ static_table[3] == mac_lo) {
+ /* found matching one */
+ break;
+ }
+ } else {
+ /* found empty one */
+ break;
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->num_statics)
+ goto exit;
+
+ /* add entry */
+ static_table[0] = ALU_V_STATIC_VALID;
+ static_table[1] |= BIT(port);
+ if (mdb->vid)
+ static_table[1] |= ALU_V_USE_FID;
+ static_table[2] = (mdb->vid << ALU_V_FID_S);
+ static_table[2] |= mac_hi;
+ static_table[3] = mac_lo;
+
+ ksz9477_write_table(dev, static_table);
+
+ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+}
+
+static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ksz_device *dev = ds->priv;
+ u32 static_table[4];
+ u32 data;
+ int index;
+ int ret = 0;
+ u32 mac_hi, mac_lo;
+
+ mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
+ mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
+ mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
+
+ mutex_lock(&dev->alu_mutex);
+
+ for (index = 0; index < dev->num_statics; index++) {
+ /* find empty slot first */
+ data = (index << ALU_STAT_INDEX_S) |
+ ALU_STAT_READ | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+ if (ret < 0) {
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ goto exit;
+ }
+
+ /* read ALU static table */
+ ksz9477_read_table(dev, static_table);
+
+ if (static_table[0] & ALU_V_STATIC_VALID) {
+ /* check this has same vid & mac address */
+
+ if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) &&
+ ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
+ static_table[3] == mac_lo) {
+ /* found matching one */
+ break;
+ }
+ }
+ }
+
+ /* no available entry */
+ if (index == dev->num_statics)
+ goto exit;
+
+ /* clear port */
+ static_table[1] &= ~BIT(port);
+
+ if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
+ /* delete entry */
+ static_table[0] = 0;
+ static_table[1] = 0;
+ static_table[2] = 0;
+ static_table[3] = 0;
+ }
+
+ ksz9477_write_table(dev, static_table);
+
+ data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
+ ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
+
+ /* wait to be finished */
+ ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
+ if (ret < 0)
+ dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+
+exit:
+ mutex_unlock(&dev->alu_mutex);
+
+ return ret;
+}
+
+static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
+
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
+
+ /* configure mirror port */
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, true);
+
+ ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+
+ return 0;
+}
+
+static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct ksz_device *dev = ds->priv;
+ u8 data;
+
+ if (mirror->ingress)
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
+ else
+ ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
+
+ ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+
+ if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
+ ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
+ PORT_MIRROR_SNIFFER, false);
+}
+
+static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
+{
+ u8 data8;
+ u8 member;
+ u16 data16;
+ struct ksz_port *p = &dev->ports[port];
+
+ /* enable tag tail for host port */
+ if (cpu_port)
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
+ true);
+
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
+
+ /* set back pressure */
+ ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
+
+ /* enable broadcast storm limit */
+ ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
+
+ /* disable DiffServ priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
+
+ /* replace priority */
+ ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
+ false);
+ ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
+ MTI_PVID_REPLACE, false);
+
+ /* enable 802.1p priority */
+ ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
+
+ if (port < dev->phy_port_cnt) {
+ /* do not force flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ false);
+
+ } else {
+ /* force flow control */
+ ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
+ PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
+ true);
+
+ /* configure MAC to 1G & RGMII mode */
+ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
+ data8 &= ~PORT_MII_NOT_1GBIT;
+ data8 &= ~PORT_MII_SEL_M;
+ switch (dev->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ data8 |= PORT_MII_NOT_1GBIT;
+ data8 |= PORT_MII_SEL;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ data8 |= PORT_MII_NOT_1GBIT;
+ data8 |= PORT_RMII_SEL;
+ p->phydev.speed = SPEED_100;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ data8 |= PORT_GMII_SEL;
+ p->phydev.speed = SPEED_1000;
+ break;
+ default:
+ data8 &= ~PORT_RGMII_ID_IG_ENABLE;
+ data8 &= ~PORT_RGMII_ID_EG_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ data8 |= PORT_RGMII_ID_IG_ENABLE;
+ if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ dev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ data8 |= PORT_RGMII_ID_EG_ENABLE;
+ data8 |= PORT_RGMII_SEL;
+ p->phydev.speed = SPEED_1000;
+ break;
+ }
+ ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
+ p->phydev.duplex = 1;
+ }
+ if (cpu_port) {
+ member = dev->port_mask;
+ dev->on_ports = dev->host_mask;
+ dev->live_ports = dev->host_mask;
+ } else {
+ member = dev->host_mask | p->vid_member;
+ dev->on_ports |= (1 << port);
+
+ /* Link was detected before port is enabled. */
+ if (p->phydev.link)
+ dev->live_ports |= (1 << port);
+ }
+ ksz9477_cfg_port_member(dev, port, member);
+
+ /* clear pending interrupts */
+ if (port < dev->phy_port_cnt)
+ ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
+}
+
+static void ksz9477_config_cpu_port(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
+ int i;
+
+ ds->num_ports = dev->port_cnt;
+
+ for (i = 0; i < dev->port_cnt; i++) {
+ if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
+ dev->cpu_port = i;
+ dev->host_mask = (1 << dev->cpu_port);
+ dev->port_mask |= dev->host_mask;
+
+ /* enable cpu port */
+ ksz9477_port_setup(dev, i, true);
+ p = &dev->ports[dev->cpu_port];
+ p->vid_member = dev->port_mask;
+ p->on = 1;
+ }
+ }
+
+ dev->member = dev->host_mask;
+
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ if (i == dev->cpu_port)
+ continue;
+ p = &dev->ports[i];
+
+ /* Initialize to non-zero so that ksz_cfg_port_member() will
+ * be called.
+ */
+ p->vid_member = (1 << i);
+ p->member = dev->port_mask;
+ ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
+ p->on = 1;
+ if (i < dev->phy_port_cnt)
+ p->phy = 1;
+ if (dev->chip_id == 0x00947700 && i == 6) {
+ p->sgmii = 1;
+
+ /* SGMII PHY detection code is not implemented yet. */
+ p->phy = 0;
+ }
+ }
+}
+
+static int ksz9477_setup(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret = 0;
+
+ dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
+ dev->num_vlans, GFP_KERNEL);
+ if (!dev->vlan_cache)
+ return -ENOMEM;
+
+ ret = ksz9477_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+ return ret;
+ }
+
+ /* Required for port partitioning. */
+ ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY,
+ true);
+
+ /* accept packet up to 2000bytes */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
+
+ ksz9477_config_cpu_port(ds);
+
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
+
+ /* queue based egress rate limit */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
+
+ /* start switch */
+ ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
+
+ return 0;
+}
+
+static const struct dsa_switch_ops ksz9477_switch_ops = {
+ .get_tag_protocol = ksz9477_get_tag_protocol,
+ .setup = ksz9477_setup,
+ .phy_read = ksz9477_phy_read16,
+ .phy_write = ksz9477_phy_write16,
+ .port_enable = ksz_enable_port,
+ .port_disable = ksz_disable_port,
+ .get_strings = ksz9477_get_strings,
+ .get_ethtool_stats = ksz_get_ethtool_stats,
+ .get_sset_count = ksz_sset_count,
+ .port_bridge_join = ksz_port_bridge_join,
+ .port_bridge_leave = ksz_port_bridge_leave,
+ .port_stp_state_set = ksz9477_port_stp_state_set,
+ .port_fast_age = ksz_port_fast_age,
+ .port_vlan_filtering = ksz9477_port_vlan_filtering,
+ .port_vlan_prepare = ksz_port_vlan_prepare,
+ .port_vlan_add = ksz9477_port_vlan_add,
+ .port_vlan_del = ksz9477_port_vlan_del,
+ .port_fdb_dump = ksz9477_port_fdb_dump,
+ .port_fdb_add = ksz9477_port_fdb_add,
+ .port_fdb_del = ksz9477_port_fdb_del,
+ .port_mdb_prepare = ksz_port_mdb_prepare,
+ .port_mdb_add = ksz9477_port_mdb_add,
+ .port_mdb_del = ksz9477_port_mdb_del,
+ .port_mirror_add = ksz9477_port_mirror_add,
+ .port_mirror_del = ksz9477_port_mirror_del,
+};
+
+static u32 ksz9477_get_port_addr(int port, int offset)
+{
+ return PORT_CTRL_ADDR(port, offset);
+}
+
+static int ksz9477_switch_detect(struct ksz_device *dev)
+{
+ u8 data8;
+ u32 id32;
+ int ret;
+
+ /* turn off SPI DO Edge select */
+ ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
+ if (ret)
+ return ret;
+
+ data8 &= ~SPI_AUTO_EDGE_DETECTION;
+ ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
+ if (ret)
+ return ret;
+
+ /* read chip id */
+ ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
+ if (ret)
+ return ret;
+
+ /* Number of ports can be reduced depending on chip. */
+ dev->mib_port_cnt = TOTAL_PORT_NUM;
+ dev->phy_port_cnt = 5;
+
+ dev->chip_id = id32;
+
+ return 0;
+}
+
+struct ksz_chip_data {
+ u32 chip_id;
+ const char *dev_name;
+ int num_vlans;
+ int num_alus;
+ int num_statics;
+ int cpu_ports;
+ int port_cnt;
+};
+
+static const struct ksz_chip_data ksz9477_switch_chips[] = {
+ {
+ .chip_id = 0x00947700,
+ .dev_name = "KSZ9477",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ },
+ {
+ .chip_id = 0x00989700,
+ .dev_name = "KSZ9897",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x7F, /* can be configured as cpu port */
+ .port_cnt = 7, /* total physical port count */
+ },
+};
+
+static int ksz9477_switch_init(struct ksz_device *dev)
+{
+ int i;
+
+ dev->ds->ops = &ksz9477_switch_ops;
+
+ for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) {
+ const struct ksz_chip_data *chip = &ksz9477_switch_chips[i];
+
+ if (dev->chip_id == chip->chip_id) {
+ dev->name = chip->dev_name;
+ dev->num_vlans = chip->num_vlans;
+ dev->num_alus = chip->num_alus;
+ dev->num_statics = chip->num_statics;
+ dev->port_cnt = chip->port_cnt;
+ dev->cpu_ports = chip->cpu_ports;
+
+ break;
+ }
+ }
+
+ /* no switch found */
+ if (!dev->port_cnt)
+ return -ENODEV;
+
+ dev->port_mask = (1 << dev->port_cnt) - 1;
+
+ dev->reg_mib_cnt = SWITCH_COUNTER_NUM;
+ dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM;
+
+ i = dev->mib_port_cnt;
+ dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i,
+ GFP_KERNEL);
+ if (!dev->ports)
+ return -ENOMEM;
+ for (i = 0; i < dev->mib_port_cnt; i++) {
+ dev->ports[i].mib.counters =
+ devm_kzalloc(dev->dev,
+ sizeof(u64) *
+ (TOTAL_SWITCH_COUNTER_NUM + 1),
+ GFP_KERNEL);
+ if (!dev->ports[i].mib.counters)
+ return -ENOMEM;
+ }
+ dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
+
+ return 0;
+}
+
+static void ksz9477_switch_exit(struct ksz_device *dev)
+{
+ ksz9477_reset_switch(dev);
+}
+
+static const struct ksz_dev_ops ksz9477_dev_ops = {
+ .get_port_addr = ksz9477_get_port_addr,
+ .cfg_port_member = ksz9477_cfg_port_member,
+ .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
+ .port_setup = ksz9477_port_setup,
+ .shutdown = ksz9477_reset_switch,
+ .detect = ksz9477_switch_detect,
+ .init = ksz9477_switch_init,
+ .exit = ksz9477_switch_exit,
+};
+
+int ksz9477_switch_register(struct ksz_device *dev)
+{
+ return ksz_switch_register(dev, &ksz9477_dev_ops);
+}
+EXPORT_SYMBOL(ksz9477_switch_register);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index 6aa6752035a1..2938e892b631 100644
--- a/drivers/net/dsa/microchip/ksz_9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -1,19 +1,8 @@
-/*
- * Microchip KSZ9477 register definitions
- *
- * Copyright (C) 2017
+/* SPDX-License-Identifier: GPL-2.0
*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Microchip KSZ9477 register definitions
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
*/
#ifndef __KSZ9477_REGS_H
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
new file mode 100644
index 000000000000..d757ba151cb1
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Microchip KSZ9477 series register access through SPI
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#include <asm/unaligned.h>
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include "ksz_priv.h"
+#include "ksz_spi.h"
+
+/* SPI frame opcodes */
+#define KS_SPIOP_RD 3
+#define KS_SPIOP_WR 2
+
+#define SPI_ADDR_SHIFT 24
+#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1)
+#define SPI_TURNAROUND_SHIFT 5
+
+/* Enough to read all switch port registers. */
+#define SPI_TX_BUF_LEN 0x100
+
+static int ksz9477_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
+ unsigned int len)
+{
+ u32 txbuf;
+ int ret;
+
+ txbuf = reg & SPI_ADDR_MASK;
+ txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
+ txbuf <<= SPI_TURNAROUND_SHIFT;
+ txbuf = cpu_to_be32(txbuf);
+
+ ret = spi_write_then_read(spi, &txbuf, 4, val, len);
+ return ret;
+}
+
+static int ksz9477_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
+ unsigned int len)
+{
+ u32 *txbuf = (u32 *)val;
+
+ *txbuf = reg & SPI_ADDR_MASK;
+ *txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
+ *txbuf <<= SPI_TURNAROUND_SHIFT;
+ *txbuf = cpu_to_be32(*txbuf);
+
+ return spi_write(spi, txbuf, 4 + len);
+}
+
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+
+ return ksz9477_spi_read_reg(spi, reg, data, len);
+}
+
+static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
+ unsigned int len)
+{
+ struct spi_device *spi = dev->priv;
+
+ if (len > SPI_TX_BUF_LEN)
+ len = SPI_TX_BUF_LEN;
+ memcpy(&dev->txbuf[4], data, len);
+ return ksz9477_spi_write_reg(spi, reg, dev->txbuf, len);
+}
+
+static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ *val = 0;
+ ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
+ if (!ret) {
+ *val = be32_to_cpu(*val);
+ /* convert to 24bit */
+ *val >>= 8;
+ }
+
+ return ret;
+}
+
+static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+ /* make it to big endian 24bit from MSB */
+ value <<= 8;
+ value = cpu_to_be32(value);
+ return ksz_spi_write(dev, reg, &value, 3);
+}
+
+static const struct ksz_io_ops ksz9477_spi_ops = {
+ .read8 = ksz_spi_read8,
+ .read16 = ksz_spi_read16,
+ .read24 = ksz_spi_read24,
+ .read32 = ksz_spi_read32,
+ .write8 = ksz_spi_write8,
+ .write16 = ksz_spi_write16,
+ .write24 = ksz_spi_write24,
+ .write32 = ksz_spi_write32,
+ .get = ksz_spi_get,
+ .set = ksz_spi_set,
+};
+
+static int ksz9477_spi_probe(struct spi_device *spi)
+{
+ struct ksz_device *dev;
+ int ret;
+
+ dev = ksz_switch_alloc(&spi->dev, &ksz9477_spi_ops, spi);
+ if (!dev)
+ return -ENOMEM;
+
+ if (spi->dev.platform_data)
+ dev->pdata = spi->dev.platform_data;
+
+ dev->txbuf = devm_kzalloc(dev->dev, 4 + SPI_TX_BUF_LEN, GFP_KERNEL);
+
+ ret = ksz9477_switch_register(dev);
+
+ /* Main DSA driver may not be started yet. */
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, dev);
+
+ return 0;
+}
+
+static int ksz9477_spi_remove(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ ksz_switch_remove(dev);
+
+ return 0;
+}
+
+static void ksz9477_spi_shutdown(struct spi_device *spi)
+{
+ struct ksz_device *dev = spi_get_drvdata(spi);
+
+ if (dev && dev->dev_ops->shutdown)
+ dev->dev_ops->shutdown(dev);
+}
+
+static const struct of_device_id ksz9477_dt_ids[] = {
+ { .compatible = "microchip,ksz9477" },
+ { .compatible = "microchip,ksz9897" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+
+static struct spi_driver ksz9477_spi_driver = {
+ .driver = {
+ .name = "ksz9477-switch",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ksz9477_dt_ids),
+ },
+ .probe = ksz9477_spi_probe,
+ .remove = ksz9477_spi_remove,
+ .shutdown = ksz9477_spi_shutdown,
+};
+
+module_spi_driver(ksz9477_spi_driver);
+
+MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
+MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 86b6464b4525..3b12e2dcff31 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -1,1145 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Microchip switch driver main logic
*
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
*/
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_data/microchip-ksz.h>
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
#include <net/dsa.h>
#include <net/switchdev.h>
#include "ksz_priv.h"
-static const struct {
- int index;
- char string[ETH_GSTRING_LEN];
-} mib_names[TOTAL_SWITCH_COUNTER_NUM] = {
- { 0x00, "rx_hi" },
- { 0x01, "rx_undersize" },
- { 0x02, "rx_fragments" },
- { 0x03, "rx_oversize" },
- { 0x04, "rx_jabbers" },
- { 0x05, "rx_symbol_err" },
- { 0x06, "rx_crc_err" },
- { 0x07, "rx_align_err" },
- { 0x08, "rx_mac_ctrl" },
- { 0x09, "rx_pause" },
- { 0x0A, "rx_bcast" },
- { 0x0B, "rx_mcast" },
- { 0x0C, "rx_ucast" },
- { 0x0D, "rx_64_or_less" },
- { 0x0E, "rx_65_127" },
- { 0x0F, "rx_128_255" },
- { 0x10, "rx_256_511" },
- { 0x11, "rx_512_1023" },
- { 0x12, "rx_1024_1522" },
- { 0x13, "rx_1523_2000" },
- { 0x14, "rx_2001" },
- { 0x15, "tx_hi" },
- { 0x16, "tx_late_col" },
- { 0x17, "tx_pause" },
- { 0x18, "tx_bcast" },
- { 0x19, "tx_mcast" },
- { 0x1A, "tx_ucast" },
- { 0x1B, "tx_deferred" },
- { 0x1C, "tx_total_col" },
- { 0x1D, "tx_exc_col" },
- { 0x1E, "tx_single_col" },
- { 0x1F, "tx_mult_col" },
- { 0x80, "rx_total" },
- { 0x81, "tx_total" },
- { 0x82, "rx_discards" },
- { 0x83, "tx_discards" },
-};
-
-static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
-{
- u8 data;
-
- ksz_read8(dev, addr, &data);
- if (set)
- data |= bits;
- else
- data &= ~bits;
- ksz_write8(dev, addr, data);
-}
-
-static void ksz_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set)
-{
- u32 data;
-
- ksz_read32(dev, addr, &data);
- if (set)
- data |= bits;
- else
- data &= ~bits;
- ksz_write32(dev, addr, data);
-}
-
-static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
- bool set)
-{
- u32 addr;
- u8 data;
-
- addr = PORT_CTRL_ADDR(port, offset);
- ksz_read8(dev, addr, &data);
-
- if (set)
- data |= bits;
- else
- data &= ~bits;
-
- ksz_write8(dev, addr, data);
-}
-
-static void ksz_port_cfg32(struct ksz_device *dev, int port, int offset,
- u32 bits, bool set)
-{
- u32 addr;
- u32 data;
-
- addr = PORT_CTRL_ADDR(port, offset);
- ksz_read32(dev, addr, &data);
-
- if (set)
- data |= bits;
- else
- data &= ~bits;
-
- ksz_write32(dev, addr, data);
-}
-
-static int wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
- u8 data;
-
- do {
- ksz_read8(dev, REG_SW_VLAN_CTRL, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int get_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
-{
- struct ksz_device *dev = ds->priv;
- int ret;
-
- mutex_lock(&dev->vlan_mutex);
-
- ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
- ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START);
-
- /* wait to be cleared */
- ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read vlan table\n");
- goto exit;
- }
-
- ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]);
- ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]);
- ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]);
-
- ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
-
-exit:
- mutex_unlock(&dev->vlan_mutex);
-
- return ret;
-}
-
-static int set_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table)
-{
- struct ksz_device *dev = ds->priv;
- int ret;
-
- mutex_lock(&dev->vlan_mutex);
-
- ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]);
- ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]);
- ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]);
-
- ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M);
- ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE);
-
- /* wait to be cleared */
- ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to write vlan table\n");
- goto exit;
- }
-
- ksz_write8(dev, REG_SW_VLAN_CTRL, 0);
-
- /* update vlan cache table */
- dev->vlan_cache[vid].table[0] = vlan_table[0];
- dev->vlan_cache[vid].table[1] = vlan_table[1];
- dev->vlan_cache[vid].table[2] = vlan_table[2];
-
-exit:
- mutex_unlock(&dev->vlan_mutex);
-
- return ret;
-}
-
-static void read_table(struct dsa_switch *ds, u32 *table)
-{
- struct ksz_device *dev = ds->priv;
-
- ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]);
- ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]);
- ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]);
- ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]);
-}
-
-static void write_table(struct dsa_switch *ds, u32 *table)
-{
- struct ksz_device *dev = ds->priv;
-
- ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]);
- ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]);
- ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]);
- ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]);
-}
-
-static int wait_alu_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
- u32 data;
-
- do {
- ksz_read32(dev, REG_SW_ALU_CTRL__4, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, int timeout)
-{
- u32 data;
-
- do {
- ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data);
- if (!(data & waiton))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (timeout <= 0)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int ksz_reset_switch(struct dsa_switch *ds)
+void ksz_update_port_member(struct ksz_device *dev, int port)
{
- struct ksz_device *dev = ds->priv;
- u8 data8;
- u16 data16;
- u32 data32;
-
- /* reset switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true);
-
- /* turn off SPI DO Edge select */
- ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
- data8 &= ~SPI_AUTO_EDGE_DETECTION;
- ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
-
- /* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
-
- /* disable interrupts */
- ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
- ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
- ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
-
- /* set broadcast storm protection 10% rate */
- ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16);
- data16 &= ~BROADCAST_STORM_RATE;
- data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100;
- ksz_write16(dev, REG_SW_MAC_CTRL_2, data16);
-
- return 0;
-}
-
-static void port_setup(struct ksz_device *dev, int port, bool cpu_port)
-{
- u8 data8;
- u16 data16;
-
- /* enable tag tail for host port */
- if (cpu_port)
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
- true);
-
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
-
- /* set back pressure */
- ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true);
-
- /* set flow control */
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
- PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, true);
-
- /* enable broadcast storm limit */
- ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
-
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
- /* replace priority */
- ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
- false);
- ksz_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
- MTI_PVID_REPLACE, false);
-
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
-
- /* configure MAC to 1G & RGMII mode */
- ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8);
- data8 |= PORT_RGMII_ID_EG_ENABLE;
- data8 &= ~PORT_MII_NOT_1GBIT;
- data8 &= ~PORT_MII_SEL_M;
- data8 |= PORT_RGMII_SEL;
- ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
-
- /* clear pending interrupts */
- ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
-}
-
-static void ksz_config_cpu_port(struct dsa_switch *ds)
-{
- struct ksz_device *dev = ds->priv;
+ struct ksz_port *p;
int i;
- ds->num_ports = dev->port_cnt;
-
- for (i = 0; i < ds->num_ports; i++) {
- if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
- dev->cpu_port = i;
-
- /* enable cpu port */
- port_setup(dev, i, true);
- }
- }
-}
-
-static int ksz_setup(struct dsa_switch *ds)
-{
- struct ksz_device *dev = ds->priv;
- int ret = 0;
-
- dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table),
- dev->num_vlans, GFP_KERNEL);
- if (!dev->vlan_cache)
- return -ENOMEM;
-
- ret = ksz_reset_switch(ds);
- if (ret) {
- dev_err(ds->dev, "failed to reset switch\n");
- return ret;
+ for (i = 0; i < dev->port_cnt; i++) {
+ if (i == port || i == dev->cpu_port)
+ continue;
+ p = &dev->ports[i];
+ if (!(dev->member & (1 << i)))
+ continue;
+
+ /* Port is a member of the bridge and is forwarding. */
+ if (p->stp_state == BR_STATE_FORWARDING &&
+ p->member != dev->member)
+ dev->dev_ops->cfg_port_member(dev, i, dev->member);
}
-
- /* accept packet up to 2000bytes */
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true);
-
- ksz_config_cpu_port(ds);
-
- ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true);
-
- /* queue based egress rate limit */
- ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true);
-
- /* start switch */
- ksz_cfg(dev, REG_SW_OPERATION, SW_START, true);
-
- return 0;
}
+EXPORT_SYMBOL_GPL(ksz_update_port_member);
-static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
- int port)
-{
- return DSA_TAG_PROTO_KSZ;
-}
-
-static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
+int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
- u16 val = 0;
+ u16 val = 0xffff;
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ dev->dev_ops->r_phy(dev, addr, reg, &val);
return val;
}
+EXPORT_SYMBOL_GPL(ksz_phy_read16);
-static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
+int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ dev->dev_ops->w_phy(dev, addr, reg, val);
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_phy_write16);
-static int ksz_enable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
+int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct ksz_device *dev = ds->priv;
- /* setup slave port */
- port_setup(dev, port, false);
-
- return 0;
-}
-
-static void ksz_disable_port(struct dsa_switch *ds, int port,
- struct phy_device *phy)
-{
- struct ksz_device *dev = ds->priv;
-
- /* there is no port disable */
- ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
-}
-
-static int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
-{
if (sset != ETH_SS_STATS)
return 0;
- return TOTAL_SWITCH_COUNTER_NUM;
+ return dev->mib_cnt;
}
+EXPORT_SYMBOL_GPL(ksz_sset_count);
-static void ksz_get_strings(struct dsa_switch *ds, int port,
- u32 stringset, uint8_t *buf)
-{
- int i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string,
- ETH_GSTRING_LEN);
- }
-}
-
-static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *buf)
+int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
{
struct ksz_device *dev = ds->priv;
- int i;
- u32 data;
- int timeout;
-
- mutex_lock(&dev->stats_mutex);
-
- for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) {
- data = MIB_COUNTER_READ;
- data |= ((mib_names[i].index & 0xFF) << MIB_COUNTER_INDEX_S);
- ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data);
-
- timeout = 1000;
- do {
- ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4,
- &data);
- usleep_range(1, 10);
- if (!(data & MIB_COUNTER_READ))
- break;
- } while (timeout-- > 0);
-
- /* failed to read MIB. get out of loop */
- if (!timeout) {
- dev_dbg(dev->dev, "Failed to get MIB\n");
- break;
- }
- /* count resets upon read */
- ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data);
+ dev->br_member |= (1 << port);
- dev->mib_value[i] += (uint64_t)data;
- buf[i] = dev->mib_value[i];
- }
-
- mutex_unlock(&dev->stats_mutex);
-}
-
-static void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
-{
- struct ksz_device *dev = ds->priv;
- u8 data;
-
- ksz_pread8(dev, port, P_STP_CTRL, &data);
- data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
-
- switch (state) {
- case BR_STATE_DISABLED:
- data |= PORT_LEARN_DISABLE;
- break;
- case BR_STATE_LISTENING:
- data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- break;
- case BR_STATE_LEARNING:
- data |= PORT_RX_ENABLE;
- break;
- case BR_STATE_FORWARDING:
- data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
- break;
- case BR_STATE_BLOCKING:
- data |= PORT_LEARN_DISABLE;
- break;
- default:
- dev_err(ds->dev, "invalid STP state: %d\n", state);
- return;
- }
+ /* port_stp_state_set() will be called after to put the port in
+ * appropriate state so there is no need to do anything.
+ */
- ksz_pwrite8(dev, port, P_STP_CTRL, data);
+ return 0;
}
+EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
-static void ksz_port_fast_age(struct dsa_switch *ds, int port)
+void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
{
struct ksz_device *dev = ds->priv;
- u8 data8;
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 |= SW_FAST_AGING;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ dev->br_member &= ~(1 << port);
+ dev->member &= ~(1 << port);
- data8 &= ~SW_FAST_AGING;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ /* port_stp_state_set() will be called after to put the port in
+ * forwarding state so there is no need to do anything.
+ */
}
+EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
-static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag)
+void ksz_port_fast_age(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- if (flag) {
- ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
- PORT_VLAN_LOOKUP_VID_0, true);
- ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true);
- ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true);
- } else {
- ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false);
- ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, false);
- ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
- PORT_VLAN_LOOKUP_VID_0, false);
- }
-
- return 0;
+ dev->dev_ops->flush_dyn_mac_table(dev, port);
}
+EXPORT_SYMBOL_GPL(ksz_port_fast_age);
-static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
/* nothing needed */
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
-static void ksz_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ksz_device *dev = ds->priv;
- u32 vlan_table[3];
- u16 vid;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (get_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return;
- }
-
- vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M);
- if (untagged)
- vlan_table[1] |= BIT(port);
- else
- vlan_table[1] &= ~BIT(port);
- vlan_table[1] &= ~(BIT(dev->cpu_port));
-
- vlan_table[2] |= BIT(port) | BIT(dev->cpu_port);
-
- if (set_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return;
- }
-
- /* change PVID */
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID)
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid);
- }
-}
-
-static int ksz_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- struct ksz_device *dev = ds->priv;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- u32 vlan_table[3];
- u16 vid;
- u16 pvid;
-
- ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid);
- pvid = pvid & 0xFFF;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (get_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to get vlan table\n");
- return -ETIMEDOUT;
- }
-
- vlan_table[2] &= ~BIT(port);
-
- if (pvid == vid)
- pvid = 1;
-
- if (untagged)
- vlan_table[1] &= ~BIT(port);
-
- if (set_vlan_table(ds, vid, vlan_table)) {
- dev_dbg(dev->dev, "Failed to set vlan table\n");
- return -ETIMEDOUT;
- }
- }
-
- ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid);
-
- return 0;
-}
-
-struct alu_struct {
- /* entry 1 */
- u8 is_static:1;
- u8 is_src_filter:1;
- u8 is_dst_filter:1;
- u8 prio_age:3;
- u32 _reserv_0_1:23;
- u8 mstp:3;
- /* entry 2 */
- u8 is_override:1;
- u8 is_use_fid:1;
- u32 _reserv_1_1:23;
- u8 port_forward:7;
- /* entry 3 & 4*/
- u32 _reserv_2_1:9;
- u8 fid:7;
- u8 mac[ETH_ALEN];
-};
-
-static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
-{
- struct ksz_device *dev = ds->priv;
- u32 alu_table[4];
- u32 data;
- int ret = 0;
-
- mutex_lock(&dev->alu_mutex);
-
- /* find any entry with mac & vid */
- data = vid << ALU_FID_INDEX_S;
- data |= ((addr[0] << 8) | addr[1]);
- ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
-
- data = ((addr[2] << 24) | (addr[3] << 16));
- data |= ((addr[4] << 8) | addr[5]);
- ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
-
- /* start read operation */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read ALU\n");
- goto exit;
- }
-
- /* read ALU entry */
- read_table(ds, alu_table);
-
- /* update ALU entry */
- alu_table[0] = ALU_V_STATIC_VALID;
- alu_table[1] |= BIT(port);
- if (vid)
- alu_table[1] |= ALU_V_USE_FID;
- alu_table[2] = (vid << ALU_V_FID_S);
- alu_table[2] |= ((addr[0] << 8) | addr[1]);
- alu_table[3] = ((addr[2] << 24) | (addr[3] << 16));
- alu_table[3] |= ((addr[4] << 8) | addr[5]);
-
- write_table(ds, alu_table);
-
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0)
- dev_dbg(dev->dev, "Failed to write ALU\n");
-
-exit:
- mutex_unlock(&dev->alu_mutex);
-
- return ret;
-}
-
-static int ksz_port_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid)
+int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
+ void *data)
{
struct ksz_device *dev = ds->priv;
- u32 alu_table[4];
- u32 data;
int ret = 0;
-
- mutex_lock(&dev->alu_mutex);
-
- /* read any entry with mac & vid */
- data = vid << ALU_FID_INDEX_S;
- data |= ((addr[0] << 8) | addr[1]);
- ksz_write32(dev, REG_SW_ALU_INDEX_0, data);
-
- data = ((addr[2] << 24) | (addr[3] << 16));
- data |= ((addr[4] << 8) | addr[5]);
- ksz_write32(dev, REG_SW_ALU_INDEX_1, data);
-
- /* start read operation */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read ALU\n");
- goto exit;
- }
-
- ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]);
- if (alu_table[0] & ALU_V_STATIC_VALID) {
- ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]);
- ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]);
- ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
-
- /* clear forwarding port */
- alu_table[2] &= ~BIT(port);
-
- /* if there is no port to forward, clear table */
- if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
- alu_table[0] = 0;
- alu_table[1] = 0;
- alu_table[2] = 0;
- alu_table[3] = 0;
- }
- } else {
- alu_table[0] = 0;
- alu_table[1] = 0;
- alu_table[2] = 0;
- alu_table[3] = 0;
- }
-
- write_table(ds, alu_table);
-
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START);
-
- /* wait to be finished */
- ret = wait_alu_ready(dev, ALU_START, 1000);
- if (ret < 0)
- dev_dbg(dev->dev, "Failed to write ALU\n");
-
-exit:
- mutex_unlock(&dev->alu_mutex);
-
- return ret;
-}
-
-static void convert_alu(struct alu_struct *alu, u32 *alu_table)
-{
- alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID);
- alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER);
- alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER);
- alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) &
- ALU_V_PRIO_AGE_CNT_M;
- alu->mstp = alu_table[0] & ALU_V_MSTP_M;
-
- alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE);
- alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID);
- alu->port_forward = alu_table[1] & ALU_V_PORT_MAP;
-
- alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M;
-
- alu->mac[0] = (alu_table[2] >> 8) & 0xFF;
- alu->mac[1] = alu_table[2] & 0xFF;
- alu->mac[2] = (alu_table[3] >> 24) & 0xFF;
- alu->mac[3] = (alu_table[3] >> 16) & 0xFF;
- alu->mac[4] = (alu_table[3] >> 8) & 0xFF;
- alu->mac[5] = alu_table[3] & 0xFF;
-}
-
-static int ksz_port_fdb_dump(struct dsa_switch *ds, int port,
- dsa_fdb_dump_cb_t *cb, void *data)
-{
- struct ksz_device *dev = ds->priv;
- int ret = 0;
- u32 ksz_data;
- u32 alu_table[4];
+ u16 i = 0;
+ u16 entries = 0;
+ u8 timestamp = 0;
+ u8 fid;
+ u8 member;
struct alu_struct alu;
- int timeout;
-
- mutex_lock(&dev->alu_mutex);
-
- /* start ALU search */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH);
do {
- timeout = 1000;
- do {
- ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data);
- if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START))
- break;
- usleep_range(1, 10);
- } while (timeout-- > 0);
-
- if (!timeout) {
- dev_dbg(dev->dev, "Failed to search ALU\n");
- ret = -ETIMEDOUT;
- goto exit;
- }
-
- /* read ALU table */
- read_table(ds, alu_table);
-
- convert_alu(&alu, alu_table);
-
- if (alu.port_forward & BIT(port)) {
+ alu.is_static = false;
+ ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
+ &member, &timestamp,
+ &entries);
+ if (!ret && (member & BIT(port))) {
ret = cb(alu.mac, alu.fid, alu.is_static, data);
if (ret)
- goto exit;
+ break;
}
- } while (ksz_data & ALU_START);
-
-exit:
-
- /* stop ALU search */
- ksz_write32(dev, REG_SW_ALU_CTRL__4, 0);
-
- mutex_unlock(&dev->alu_mutex);
+ i++;
+ } while (i < entries);
+ if (i >= entries)
+ ret = 0;
return ret;
}
+EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
-static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
/* nothing to do */
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
-static void ksz_port_mdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
- u32 static_table[4];
- u32 data;
+ struct alu_struct alu;
int index;
- u32 mac_hi, mac_lo;
-
- mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
- mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
- mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
-
- mutex_lock(&dev->alu_mutex);
+ int empty = 0;
+ alu.port_forward = 0;
for (index = 0; index < dev->num_statics; index++) {
- /* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) {
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
- goto exit;
- }
-
- /* read ALU static table */
- read_table(ds, static_table);
-
- if (static_table[0] & ALU_V_STATIC_VALID) {
- /* check this has same vid & mac address */
- if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
- ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
- (static_table[3] == mac_lo)) {
- /* found matching one */
+ if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
break;
- }
- } else {
- /* found empty one */
- break;
+ /* Remember the first empty entry. */
+ } else if (!empty) {
+ empty = index + 1;
}
}
/* no available entry */
- if (index == dev->num_statics)
- goto exit;
+ if (index == dev->num_statics && !empty)
+ return;
/* add entry */
- static_table[0] = ALU_V_STATIC_VALID;
- static_table[1] |= BIT(port);
- if (mdb->vid)
- static_table[1] |= ALU_V_USE_FID;
- static_table[2] = (mdb->vid << ALU_V_FID_S);
- static_table[2] |= mac_hi;
- static_table[3] = mac_lo;
-
- write_table(ds, static_table);
-
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0)
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ if (index == dev->num_statics) {
+ index = empty - 1;
+ memset(&alu, 0, sizeof(alu));
+ memcpy(alu.mac, mdb->addr, ETH_ALEN);
+ alu.is_static = true;
+ }
+ alu.port_forward |= BIT(port);
+ if (mdb->vid) {
+ alu.is_use_fid = true;
-exit:
- mutex_unlock(&dev->alu_mutex);
+ /* Need a way to map VID to FID. */
+ alu.fid = mdb->vid;
+ }
+ dev->dev_ops->w_sta_mac_table(dev, index, &alu);
}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
-static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_mdb *mdb)
+int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
{
struct ksz_device *dev = ds->priv;
- u32 static_table[4];
- u32 data;
+ struct alu_struct alu;
int index;
int ret = 0;
- u32 mac_hi, mac_lo;
-
- mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]);
- mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16));
- mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]);
-
- mutex_lock(&dev->alu_mutex);
for (index = 0; index < dev->num_statics; index++) {
- /* find empty slot first */
- data = (index << ALU_STAT_INDEX_S) |
- ALU_STAT_READ | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
- if (ret < 0) {
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
- goto exit;
- }
-
- /* read ALU static table */
- read_table(ds, static_table);
-
- if (static_table[0] & ALU_V_STATIC_VALID) {
- /* check this has same vid & mac address */
-
- if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) &&
- ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) &&
- (static_table[3] == mac_lo)) {
- /* found matching one */
+ if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
+ /* Found one already in static MAC table. */
+ if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
+ alu.fid == mdb->vid)
break;
- }
}
}
/* no available entry */
- if (index == dev->num_statics) {
- ret = -EINVAL;
+ if (index == dev->num_statics)
goto exit;
- }
/* clear port */
- static_table[1] &= ~BIT(port);
-
- if ((static_table[1] & ALU_V_PORT_MAP) == 0) {
- /* delete entry */
- static_table[0] = 0;
- static_table[1] = 0;
- static_table[2] = 0;
- static_table[3] = 0;
- }
-
- write_table(ds, static_table);
-
- data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START;
- ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data);
-
- /* wait to be finished */
- ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000);
- if (ret < 0)
- dev_dbg(dev->dev, "Failed to read ALU STATIC\n");
+ alu.port_forward &= ~BIT(port);
+ if (!alu.port_forward)
+ alu.is_static = false;
+ dev->dev_ops->w_sta_mac_table(dev, index, &alu);
exit:
- mutex_unlock(&dev->alu_mutex);
-
return ret;
}
+EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
-static int ksz_port_mirror_add(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror,
- bool ingress)
+int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
- if (ingress)
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true);
- else
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true);
-
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false);
-
- /* configure mirror port */
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, true);
+ /* setup slave port */
+ dev->dev_ops->port_setup(dev, port, false);
- ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false);
+ /* port_stp_state_set() will be called after to enable the port so
+ * there is no need to do anything.
+ */
return 0;
}
+EXPORT_SYMBOL_GPL(ksz_enable_port);
-static void ksz_port_mirror_del(struct dsa_switch *ds, int port,
- struct dsa_mall_mirror_tc_entry *mirror)
+void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
{
struct ksz_device *dev = ds->priv;
- u8 data;
-
- if (mirror->ingress)
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false);
- else
- ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false);
- ksz_pread8(dev, port, P_MIRROR_CTRL, &data);
+ dev->on_ports &= ~(1 << port);
+ dev->live_ports &= ~(1 << port);
- if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX)))
- ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL,
- PORT_MIRROR_SNIFFER, false);
-}
-
-static const struct dsa_switch_ops ksz_switch_ops = {
- .get_tag_protocol = ksz_get_tag_protocol,
- .setup = ksz_setup,
- .phy_read = ksz_phy_read16,
- .phy_write = ksz_phy_write16,
- .port_enable = ksz_enable_port,
- .port_disable = ksz_disable_port,
- .get_strings = ksz_get_strings,
- .get_ethtool_stats = ksz_get_ethtool_stats,
- .get_sset_count = ksz_sset_count,
- .port_stp_state_set = ksz_port_stp_state_set,
- .port_fast_age = ksz_port_fast_age,
- .port_vlan_filtering = ksz_port_vlan_filtering,
- .port_vlan_prepare = ksz_port_vlan_prepare,
- .port_vlan_add = ksz_port_vlan_add,
- .port_vlan_del = ksz_port_vlan_del,
- .port_fdb_dump = ksz_port_fdb_dump,
- .port_fdb_add = ksz_port_fdb_add,
- .port_fdb_del = ksz_port_fdb_del,
- .port_mdb_prepare = ksz_port_mdb_prepare,
- .port_mdb_add = ksz_port_mdb_add,
- .port_mdb_del = ksz_port_mdb_del,
- .port_mirror_add = ksz_port_mirror_add,
- .port_mirror_del = ksz_port_mirror_del,
-};
-
-struct ksz_chip_data {
- u32 chip_id;
- const char *dev_name;
- int num_vlans;
- int num_alus;
- int num_statics;
- int cpu_ports;
- int port_cnt;
-};
-
-static const struct ksz_chip_data ksz_switch_chips[] = {
- {
- .chip_id = 0x00947700,
- .dev_name = "KSZ9477",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- },
- {
- .chip_id = 0x00989700,
- .dev_name = "KSZ9897",
- .num_vlans = 4096,
- .num_alus = 4096,
- .num_statics = 16,
- .cpu_ports = 0x7F, /* can be configured as cpu port */
- .port_cnt = 7, /* total physical port count */
- },
-};
-
-static int ksz_switch_init(struct ksz_device *dev)
-{
- int i;
-
- dev->ds->ops = &ksz_switch_ops;
-
- for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
- const struct ksz_chip_data *chip = &ksz_switch_chips[i];
-
- if (dev->chip_id == chip->chip_id) {
- dev->name = chip->dev_name;
- dev->num_vlans = chip->num_vlans;
- dev->num_alus = chip->num_alus;
- dev->num_statics = chip->num_statics;
- dev->port_cnt = chip->port_cnt;
- dev->cpu_ports = chip->cpu_ports;
-
- break;
- }
- }
-
- /* no switch found */
- if (!dev->port_cnt)
- return -ENODEV;
-
- return 0;
+ /* port_stp_state_set() will be called after to disable the port so
+ * there is no need to do anything.
+ */
}
+EXPORT_SYMBOL_GPL(ksz_disable_port);
struct ksz_device *ksz_switch_alloc(struct device *base,
const struct ksz_io_ops *ops,
@@ -1167,59 +288,64 @@ struct ksz_device *ksz_switch_alloc(struct device *base,
}
EXPORT_SYMBOL(ksz_switch_alloc);
-int ksz_switch_detect(struct ksz_device *dev)
-{
- u8 data8;
- u32 id32;
- int ret;
-
- /* turn off SPI DO Edge select */
- ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8);
- if (ret)
- return ret;
-
- data8 &= ~SPI_AUTO_EDGE_DETECTION;
- ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8);
- if (ret)
- return ret;
-
- /* read chip id */
- ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32);
- if (ret)
- return ret;
-
- dev->chip_id = id32;
-
- return 0;
-}
-EXPORT_SYMBOL(ksz_switch_detect);
-
-int ksz_switch_register(struct ksz_device *dev)
+int ksz_switch_register(struct ksz_device *dev,
+ const struct ksz_dev_ops *ops)
{
int ret;
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
+ dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dev->reset_gpio))
+ return PTR_ERR(dev->reset_gpio);
+
+ if (dev->reset_gpio) {
+ gpiod_set_value(dev->reset_gpio, 1);
+ mdelay(10);
+ gpiod_set_value(dev->reset_gpio, 0);
+ }
+
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
mutex_init(&dev->alu_mutex);
mutex_init(&dev->vlan_mutex);
- if (ksz_switch_detect(dev))
+ dev->dev_ops = ops;
+
+ if (dev->dev_ops->detect(dev))
return -EINVAL;
- ret = ksz_switch_init(dev);
+ ret = dev->dev_ops->init(dev);
if (ret)
return ret;
- return dsa_register_switch(dev->ds);
+ dev->interface = PHY_INTERFACE_MODE_MII;
+ if (dev->dev->of_node) {
+ ret = of_get_phy_mode(dev->dev->of_node);
+ if (ret >= 0)
+ dev->interface = ret;
+ }
+
+ ret = dsa_register_switch(dev->ds);
+ if (ret) {
+ dev->dev_ops->exit(dev);
+ return ret;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(ksz_switch_register);
void ksz_switch_remove(struct ksz_device *dev)
{
+ dev->dev_ops->exit(dev);
dsa_unregister_switch(dev->ds);
+
+ if (dev->reset_gpio)
+ gpiod_set_value(dev->reset_gpio, 1);
+
}
EXPORT_SYMBOL(ksz_switch_remove);
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
new file mode 100644
index 000000000000..2dd832de0d52
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Microchip switch driver common header
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_COMMON_H
+#define __KSZ_COMMON_H
+
+void ksz_update_port_member(struct ksz_device *dev, int port);
+
+/* Common DSA access functions */
+
+int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
+int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
+int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
+int ksz_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br);
+void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br);
+void ksz_port_fast_age(struct dsa_switch *ds, int port);
+int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
+ void *data);
+int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+void ksz_port_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int ksz_port_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb);
+int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+
+/* Common register access functions */
+
+static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read8(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read16(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read24(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->read32(dev, reg, val);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write8(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write16(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write24(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->write32(dev, reg, value);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_get(struct ksz_device *dev, u32 reg, void *data,
+ size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->get(dev, reg, data, len);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline int ksz_set(struct ksz_device *dev, u32 reg, void *data,
+ size_t len)
+{
+ int ret;
+
+ mutex_lock(&dev->reg_mutex);
+ ret = dev->ops->set(dev, reg, data, len);
+ mutex_unlock(&dev->reg_mutex);
+
+ return ret;
+}
+
+static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
+{
+ ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
+{
+ ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
+{
+ ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
+{
+ ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
+{
+ ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
+{
+ ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+}
+
+static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
+{
+ u8 data;
+
+ ksz_read8(dev, addr, &data);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ ksz_write8(dev, addr, data);
+}
+
+static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits,
+ bool set)
+{
+ u32 addr;
+ u8 data;
+
+ addr = dev->dev_ops->get_port_addr(port, offset);
+ ksz_read8(dev, addr, &data);
+
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+
+ ksz_write8(dev, addr, data);
+}
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h
index 2a98dbd51456..60b49010904b 100644
--- a/drivers/net/dsa/microchip/ksz_priv.h
+++ b/drivers/net/dsa/microchip/ksz_priv.h
@@ -1,19 +1,8 @@
-/*
- * Microchip KSZ series switch common definitions
- *
- * Copyright (C) 2017
+/* SPDX-License-Identifier: GPL-2.0
*
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
+ * Microchip KSZ series switch common definitions
*
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
*/
#ifndef __KSZ_PRIV_H
@@ -25,7 +14,7 @@
#include <linux/etherdevice.h>
#include <net/dsa.h>
-#include "ksz_9477_reg.h"
+#include "ksz9477_reg.h"
struct ksz_io_ops;
@@ -33,6 +22,27 @@ struct vlan_table {
u32 table[3];
};
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u64 *counters;
+};
+
+struct ksz_port {
+ u16 member;
+ u16 vid_member;
+ int stp_state;
+ struct phy_device phydev;
+
+ u32 on:1; /* port is not disabled by hardware */
+ u32 phy:1; /* port has a PHY */
+ u32 fiber:1; /* port is fiber */
+ u32 sgmii:1; /* port is SGMII */
+ u32 force:1;
+ u32 link_just_down:1; /* link just goes down */
+
+ struct ksz_port_mib mib;
+};
+
struct ksz_device {
struct dsa_switch *ds;
struct ksz_platform_data *pdata;
@@ -43,11 +53,14 @@ struct ksz_device {
struct mutex alu_mutex; /* ALU access */
struct mutex vlan_mutex; /* vlan access */
const struct ksz_io_ops *ops;
+ const struct ksz_dev_ops *dev_ops;
struct device *dev;
void *priv;
+ struct gpio_desc *reset_gpio; /* Optional reset GPIO */
+
/* chip specific data */
u32 chip_id;
int num_vlans;
@@ -55,11 +68,37 @@ struct ksz_device {
int num_statics;
int cpu_port; /* port connected to CPU */
int cpu_ports; /* port bitmap can be cpu port */
+ int phy_port_cnt;
int port_cnt;
+ int reg_mib_cnt;
+ int mib_cnt;
+ int mib_port_cnt;
+ int last_port; /* ports after that not used */
+ phy_interface_t interface;
+ u32 regs_size;
struct vlan_table *vlan_cache;
u64 mib_value[TOTAL_SWITCH_COUNTER_NUM];
+
+ u8 *txbuf;
+
+ struct ksz_port *ports;
+ struct timer_list mib_read_timer;
+ struct work_struct mib_read;
+ unsigned long mib_read_interval;
+ u16 br_member;
+ u16 member;
+ u16 live_ports;
+ u16 on_ports; /* ports enabled by DSA */
+ u16 rx_ports;
+ u16 tx_ports;
+ u16 mirror_rx;
+ u16 mirror_tx;
+ u32 features; /* chip specific features */
+ u32 overrides; /* chip functions set by user */
+ u16 host_mask;
+ u16 port_mask;
};
struct ksz_io_ops {
@@ -71,140 +110,60 @@ struct ksz_io_ops {
int (*write16)(struct ksz_device *dev, u32 reg, u16 value);
int (*write24)(struct ksz_device *dev, u32 reg, u32 value);
int (*write32)(struct ksz_device *dev, u32 reg, u32 value);
- int (*phy_read16)(struct ksz_device *dev, int addr, int reg,
- u16 *value);
- int (*phy_write16)(struct ksz_device *dev, int addr, int reg,
- u16 value);
+ int (*get)(struct ksz_device *dev, u32 reg, void *data, size_t len);
+ int (*set)(struct ksz_device *dev, u32 reg, void *data, size_t len);
+};
+
+struct alu_struct {
+ /* entry 1 */
+ u8 is_static:1;
+ u8 is_src_filter:1;
+ u8 is_dst_filter:1;
+ u8 prio_age:3;
+ u32 _reserv_0_1:23;
+ u8 mstp:3;
+ /* entry 2 */
+ u8 is_override:1;
+ u8 is_use_fid:1;
+ u32 _reserv_1_1:23;
+ u8 port_forward:7;
+ /* entry 3 & 4*/
+ u32 _reserv_2_1:9;
+ u8 fid:7;
+ u8 mac[ETH_ALEN];
+};
+
+struct ksz_dev_ops {
+ u32 (*get_port_addr)(int port, int offset);
+ void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
+ void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
+ void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
+ void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u8 *timestamp,
+ u16 *entries);
+ int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr,
+ struct alu_struct *alu);
+ void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *cnt);
+ void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
+ u64 *dropped, u64 *cnt);
+ void (*port_init_cnt)(struct ksz_device *dev, int port);
+ int (*shutdown)(struct ksz_device *dev);
+ int (*detect)(struct ksz_device *dev);
+ int (*init)(struct ksz_device *dev);
+ void (*exit)(struct ksz_device *dev);
};
struct ksz_device *ksz_switch_alloc(struct device *base,
const struct ksz_io_ops *ops, void *priv);
-int ksz_switch_detect(struct ksz_device *dev);
-int ksz_switch_register(struct ksz_device *dev);
+int ksz_switch_register(struct ksz_device *dev,
+ const struct ksz_dev_ops *ops);
void ksz_switch_remove(struct ksz_device *dev);
-static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read8(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read16(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read24(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->read32(dev, reg, val);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write8(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write16(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write24(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
- int ret;
-
- mutex_lock(&dev->reg_mutex);
- ret = dev->ops->write32(dev, reg, value);
- mutex_unlock(&dev->reg_mutex);
-
- return ret;
-}
-
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
-{
- ksz_read8(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
-{
- ksz_read16(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
-{
- ksz_read32(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
-{
- ksz_write8(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
-{
- ksz_write16(dev, PORT_CTRL_ADDR(port, offset), data);
-}
-
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
-{
- ksz_write32(dev, PORT_CTRL_ADDR(port, offset), data);
-}
+int ksz9477_switch_register(struct ksz_device *dev);
#endif
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
deleted file mode 100644
index 8c1778b42701..000000000000
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Microchip KSZ series register access through SPI
- *
- * Copyright (C) 2017
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <asm/unaligned.h>
-
-#include <linux/delay.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-
-#include "ksz_priv.h"
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD 3
-#define KS_SPIOP_WR 2
-
-#define SPI_ADDR_SHIFT 24
-#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1)
-#define SPI_TURNAROUND_SHIFT 5
-
-static int ksz_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val,
- unsigned int len)
-{
- u32 txbuf;
- int ret;
-
- txbuf = reg & SPI_ADDR_MASK;
- txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT;
- txbuf <<= SPI_TURNAROUND_SHIFT;
- txbuf = cpu_to_be32(txbuf);
-
- ret = spi_write_then_read(spi, &txbuf, 4, val, len);
- return ret;
-}
-
-static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
- unsigned int len)
-{
- struct spi_device *spi = dev->priv;
-
- return ksz_spi_read_reg(spi, reg, data, len);
-}
-
-static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
-{
- return ksz_spi_read(dev, reg, val, 1);
-}
-
-static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
-{
- int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
-
- if (!ret)
- *val = be16_to_cpu(*val);
-
- return ret;
-}
-
-static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret;
-
- *val = 0;
- ret = ksz_spi_read(dev, reg, (u8 *)val, 3);
- if (!ret) {
- *val = be32_to_cpu(*val);
- /* convert to 24bit */
- *val >>= 8;
- }
-
- return ret;
-}
-
-static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
-{
- int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
-
- if (!ret)
- *val = be32_to_cpu(*val);
-
- return ret;
-}
-
-static int ksz_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val,
- unsigned int len)
-{
- u32 txbuf;
- u8 data[12];
- int i;
-
- txbuf = reg & SPI_ADDR_MASK;
- txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT);
- txbuf <<= SPI_TURNAROUND_SHIFT;
- txbuf = cpu_to_be32(txbuf);
-
- data[0] = txbuf & 0xFF;
- data[1] = (txbuf & 0xFF00) >> 8;
- data[2] = (txbuf & 0xFF0000) >> 16;
- data[3] = (txbuf & 0xFF000000) >> 24;
- for (i = 0; i < len; i++)
- data[i + 4] = val[i];
-
- return spi_write(spi, &data, 4 + len);
-}
-
-static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
-{
- struct spi_device *spi = dev->priv;
-
- return ksz_spi_write_reg(spi, reg, &value, 1);
-}
-
-static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
-{
- struct spi_device *spi = dev->priv;
-
- value = cpu_to_be16(value);
- return ksz_spi_write_reg(spi, reg, (u8 *)&value, 2);
-}
-
-static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value)
-{
- struct spi_device *spi = dev->priv;
-
- /* make it to big endian 24bit from MSB */
- value <<= 8;
- value = cpu_to_be32(value);
- return ksz_spi_write_reg(spi, reg, (u8 *)&value, 3);
-}
-
-static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
-{
- struct spi_device *spi = dev->priv;
-
- value = cpu_to_be32(value);
- return ksz_spi_write_reg(spi, reg, (u8 *)&value, 4);
-}
-
-static const struct ksz_io_ops ksz_spi_ops = {
- .read8 = ksz_spi_read8,
- .read16 = ksz_spi_read16,
- .read24 = ksz_spi_read24,
- .read32 = ksz_spi_read32,
- .write8 = ksz_spi_write8,
- .write16 = ksz_spi_write16,
- .write24 = ksz_spi_write24,
- .write32 = ksz_spi_write32,
-};
-
-static int ksz_spi_probe(struct spi_device *spi)
-{
- struct ksz_device *dev;
- int ret;
-
- dev = ksz_switch_alloc(&spi->dev, &ksz_spi_ops, spi);
- if (!dev)
- return -ENOMEM;
-
- if (spi->dev.platform_data)
- dev->pdata = spi->dev.platform_data;
-
- ret = ksz_switch_register(dev);
- if (ret)
- return ret;
-
- spi_set_drvdata(spi, dev);
-
- return 0;
-}
-
-static int ksz_spi_remove(struct spi_device *spi)
-{
- struct ksz_device *dev = spi_get_drvdata(spi);
-
- if (dev)
- ksz_switch_remove(dev);
-
- return 0;
-}
-
-static const struct of_device_id ksz_dt_ids[] = {
- { .compatible = "microchip,ksz9477" },
- { .compatible = "microchip,ksz9897" },
- {},
-};
-MODULE_DEVICE_TABLE(of, ksz_dt_ids);
-
-static struct spi_driver ksz_spi_driver = {
- .driver = {
- .name = "ksz9477-switch",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(ksz_dt_ids),
- },
- .probe = ksz_spi_probe,
- .remove = ksz_spi_remove,
-};
-
-module_spi_driver(ksz_spi_driver);
-
-MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
-MODULE_DESCRIPTION("Microchip KSZ Series Switch SPI access Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_spi.h b/drivers/net/dsa/microchip/ksz_spi.h
new file mode 100644
index 000000000000..427811bd60b3
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Microchip KSZ series SPI access common header
+ *
+ * Copyright (C) 2017-2018 Microchip Technology Inc.
+ * Tristram Ha <Tristram.Ha@microchip.com>
+ */
+
+#ifndef __KSZ_SPI_H
+#define __KSZ_SPI_H
+
+/* Chip dependent SPI access */
+static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data,
+ unsigned int len);
+static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data,
+ unsigned int len);
+
+static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val)
+{
+ return ksz_spi_read(dev, reg, val, 1);
+}
+
+static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val)
+{
+ int ret = ksz_spi_read(dev, reg, (u8 *)val, 2);
+
+ if (!ret)
+ *val = be16_to_cpu(*val);
+
+ return ret;
+}
+
+static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val)
+{
+ int ret = ksz_spi_read(dev, reg, (u8 *)val, 4);
+
+ if (!ret)
+ *val = be32_to_cpu(*val);
+
+ return ret;
+}
+
+static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value)
+{
+ return ksz_spi_write(dev, reg, &value, 1);
+}
+
+static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value)
+{
+ value = cpu_to_be16(value);
+ return ksz_spi_write(dev, reg, &value, 2);
+}
+
+static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value)
+{
+ value = cpu_to_be32(value);
+ return ksz_spi_write(dev, reg, &value, 4);
+}
+
+static int ksz_spi_get(struct ksz_device *dev, u32 reg, void *data, size_t len)
+{
+ return ksz_spi_read(dev, reg, data, len);
+}
+
+static int ksz_spi_set(struct ksz_device *dev, u32 reg, void *data, size_t len)
+{
+ return ksz_spi_write(dev, reg, data, len);
+}
+
+#endif
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a5de9bffe5be..74547f43b938 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -658,7 +658,8 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(
+ phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 65f10fec25b3..0b3e51f248c2 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
/* Reset the switch. */
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_SWRESET |
- GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
- GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+ GLOBAL_ATU_CONTROL_LEARNDIS);
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
@@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
*/
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
- /* Enable automatic address learning, set the address
- * database size to 1024 entries, and set the default aging
- * time to 5 minutes.
+ /* Disable automatic address learning.
*/
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
- GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+ GLOBAL_ATU_CONTROL_LEARNDIS);
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index e05d4eddc935..8a517d8fb9d1 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1124,7 +1124,7 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
u16 *p = _p;
int i;
- regs->version = 0;
+ regs->version = chip->info->prod_num;
memset(p, 0xff, 32 * sizeof(u16));
@@ -2524,11 +2524,22 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
mutex_unlock(&chip->reg_lock);
if (reg == MII_PHYSID2) {
- /* Some internal PHYS don't have a model number. Use
- * the mv88e6390 family model number instead.
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (chip->info->family != MV88E6XXX_FAMILY_6165)
+ /* Then there is the 6165 family. It gets is
+ * PHYs correct. But it can also have two
+ * SERDES interfaces in the PHY address
+ * space. And these don't have a model
+ * number. But they are not PHYs, so we don't
+ * want to give them something a PHY driver
+ * will recognise.
+ *
+ * Use the mv88e6390 family model number
+ * instead, for anything which really could be
+ * a PHY,
+ */
+ if (!(val & 0x3f0))
+ val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
}
return err ? err : val;
@@ -3234,6 +3245,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3276,6 +3288,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3291,8 +3304,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390x_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3318,6 +3331,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3405,11 +3419,11 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3464,6 +3478,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.stats_get_stats = mv88e6320_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
@@ -3506,6 +3521,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.stats_get_stats = mv88e6320_stats_get_stats,
.set_cpu_port = mv88e6095_g1_set_cpu_port,
.set_egress_port = mv88e6095_g1_set_egress_port,
+ .watchdog_ops = &mv88e6390_watchdog_ops,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -3710,11 +3726,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3757,11 +3773,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,8 +3793,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390x_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index cd7db60a508b..ebd26b6a93e6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -368,12 +368,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
u16 reg;
int err;
- if (mode == PHY_INTERFACE_MODE_NA)
- return 0;
-
if (port != 9 && port != 10)
return -EOPNOTSUPP;
+ /* Default to a slow mode, so freeing up SERDES interfaces for
+ * other ports which might use them for SFPs.
+ */
+ if (mode == PHY_INTERFACE_MODE_NA)
+ mode = PHY_INTERFACE_MODE_1000BASEX;
+
switch (mode) {
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X;
@@ -437,6 +440,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ switch (mode) {
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return mv88e6390x_port_set_cmode(chip, port, mode);
+}
+
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 36904c9bf955..0d81866d0e4a 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -310,6 +310,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index bb69650ff772..2caa8c8b4b55 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -619,15 +619,11 @@ out:
return ret;
}
-int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
{
int lane;
int err;
- /* Only support ports 9 and 10 at the moment */
- if (port < 9)
- return 0;
-
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane == -ENODEV)
@@ -663,11 +659,19 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_serdes_irq_enable(chip, port, lane);
}
-void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port < 9)
+ return 0;
+
+ return mv88e6390_serdes_irq_setup(chip, port);
+}
+
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
{
int lane = mv88e6390x_serdes_get_lane(chip, port);
- if (port < 9)
+ if (lane == -ENODEV)
return;
if (lane < 0)
@@ -685,6 +689,14 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
chip->ports[port].serdes_irq = 0;
}
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port < 9)
+ return;
+
+ mv88e6390x_serdes_irq_free(chip, port);
+}
+
int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
{
u8 cmode = chip->ports[port].cmode;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 7870c5a9ef12..573dce8b1eb4 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -77,6 +77,8 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 5bc168314ea2..40f421dbdf57 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1151,7 +1151,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
print_info = (vortex_debug > 1);
if (print_info)
- pr_info("See Documentation/networking/vortex.txt\n");
+ pr_info("See Documentation/networking/device_drivers/3com/vortex.txt\n");
pr_info("%s: 3Com %s %s at %p.\n",
print_name,
@@ -1956,7 +1956,7 @@ vortex_error(struct net_device *dev, int status)
dev->name, tx_status);
if (tx_status == 0x82) {
pr_err("Probably a duplex mismatch. See "
- "Documentation/networking/vortex.txt\n");
+ "Documentation/networking/device_drivers/3com/vortex.txt\n");
}
dump_tx_ring(dev);
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5c3ef9fc8207..0ac44ef1f7a9 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -75,8 +75,9 @@ config VORTEX
"Hurricane" (3c555/3cSOHO) PCI
If you have such a card, say Y here. More specific information is in
- <file:Documentation/networking/vortex.txt> and in the comments at
- the beginning of <file:drivers/net/ethernet/3com/3c59x.c>.
+ <file:Documentation/networking/device_drivers/3com/vortex.txt> and
+ in the comments at the beginning of
+ <file:drivers/net/ethernet/3com/3c59x.c>.
To compile this support as a module, choose M here.
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7c9348a26cbb..91fc64c1145e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev)
else
phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported;
+ linkmode_copy(phy->advertising, phy->supported);
greth->link = 0;
greth->speed = 0;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 7c1eb304c27e..e833d1b3fe18 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -940,11 +940,8 @@ static int au1000_open(struct net_device *dev)
return retval;
}
- if (dev->phydev) {
- /* cause the PHY state machine to schedule a link state check */
- dev->phydev->state = PHY_CHANGELINK;
+ if (dev->phydev)
phy_start(dev->phydev);
- }
netif_start_queue(dev);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 9d4899826823..bd6589de93d9 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1488,9 +1488,9 @@ static int sunlance_sbus_probe(struct platform_device *op)
struct device_node *parent_dp = parent->dev.of_node;
int err;
- if (!strcmp(parent_dp->name, "ledma")) {
+ if (of_node_name_eq(parent_dp, "ledma")) {
err = sparc_lance_probe_one(op, parent, NULL);
- } else if (!strcmp(parent_dp->name, "lebuffer")) {
+ } else if (of_node_name_eq(parent_dp, "lebuffer")) {
err = sparc_lance_probe_one(op, NULL, parent);
} else
err = sparc_lance_probe_one(op, NULL, NULL);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 151bdb629e8a..128cd648ba99 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- phy_data->phydev->supported = PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+
+ linkmode_copy(phy_data->phydev->supported, supported);
+
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- phy_data->phydev->supported = (PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+ linkmode_copy(phy_data->phydev->supported, supported);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct phy_device *phydev;
- u32 advertising;
int ret;
/* If we already have a PHY, just return */
@@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
xgbe_phy_external_phy_quirks(pdata);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
- phydev->advertising &= advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ lks->link_modes.advertising);
phy_start_aneg(phy_data->phydev);
@@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising);
if (phy_data->phydev->pause) {
XGBE_SET_LP_ADV(lks, Pause);
@@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
- u32 advertising;
int ret;
ret = xgbe_phy_find_phy_device(pdata);
@@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return 0;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
-
phy_data->phydev->autoneg = pdata->phy.autoneg;
- phy_data->phydev->advertising = phy_data->phydev->supported &
- advertising;
+ linkmode_and(phy_data->phydev->advertising,
+ phy_data->phydev->supported,
+ lks->link_modes.advertising);
if (pdata->phy.autoneg != AUTONEG_ENABLE) {
phy_data->phydev->speed = pdata->phy.speed;
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index f5fe3bb2e59d..53529cd85162 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- phydev->supported &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_AUI |
- SUPPORTED_MII |
- SUPPORTED_FIBRE |
- SUPPORTED_BNC);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
+
+ linkmode_andnot(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3b889efddf78..50dd6bf176d0 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -29,9 +29,6 @@
#define RES_RING_CSR 1
#define RES_RING_CMD 2
-static const struct of_device_id xgene_enet_of_match[];
-static const struct acpi_device_id xgene_enet_acpi_match[];
-
static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
struct xgene_enet_raw_desc16 *raw_desc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 686f6d8c9e79..4556630ee286 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 91eb8910b1c9..3944ce7f0870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -12,7 +12,7 @@
#ifndef AQ_CFG_H
#define AQ_CFG_H
-#define AQ_CFG_VECS_DEF 4U
+#define AQ_CFG_VECS_DEF 8U
#define AQ_CFG_TCS_DEF 1U
#define AQ_CFG_TXDS_DEF 4096U
@@ -42,8 +42,8 @@
#define AQ_CFG_IS_LRO_DEF 1U
/* RSS */
-#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U
-#define AQ_CFG_RSS_HASHKEY_SIZE 320U
+#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 64U
+#define AQ_CFG_RSS_HASHKEY_SIZE 40U
#define AQ_CFG_IS_RSS_DEF 1U
#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index becb578211ed..6b6d1724676e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
-
+#include <linux/if_vlan.h>
#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 99ef1daaa4d8..38e87eed76b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -12,6 +12,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_filters.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -201,6 +202,41 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
return 0;
}
+static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(netdev);
+ struct aq_nic_cfg_s *cfg;
+ unsigned int i = 0U;
+ u32 rss_entries;
+ int err = 0;
+
+ cfg = aq_nic_get_cfg(aq_nic);
+ rss_entries = cfg->aq_rss.indirection_table_size;
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+ /* Fill out the redirection table */
+ if (indir)
+ for (i = 0; i < rss_entries; i++)
+ cfg->aq_rss.indirection_table[i] = indir[i];
+
+ /* Fill out the rss hash key */
+ if (key) {
+ memcpy(cfg->aq_rss.hash_secret_key, key,
+ sizeof(cfg->aq_rss.hash_secret_key));
+ err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw,
+ &cfg->aq_rss);
+ if (err)
+ return err;
+ }
+
+ err = aq_nic->aq_hw_ops->hw_rss_set(aq_nic->aq_hw, &cfg->aq_rss);
+
+ return err;
+}
+
static int aq_ethtool_get_rxnfc(struct net_device *ndev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
@@ -213,7 +249,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = aq_get_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = aq_add_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = aq_del_rxnfc_rule(aq_nic, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -495,7 +560,7 @@ static int aq_set_ringparam(struct net_device *ndev,
}
}
if (ndev_running)
- err = dev_open(ndev);
+ err = dev_open(ndev, NULL);
err_exit:
return err;
@@ -519,7 +584,9 @@ const struct ethtool_ops aq_ethtool_ops = {
.set_pauseparam = aq_ethtool_set_pauseparam,
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
+ .set_rxfh = aq_ethtool_set_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
+ .set_rxnfc = aq_ethtool_set_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 000000000000..18bc035da850
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return false;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ return true;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ case IPV6_USER_FLOW:
+ switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+ struct ethtool_rx_flow_spec *fsp2)
+{
+ if (fsp1->flow_type != fsp2->flow_type ||
+ memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+ memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+ memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+ memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+ return false;
+
+ return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_rx_filter *rule;
+ struct hlist_node *aq_node2;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == fsp->location)
+ continue;
+ if (aq_match_filter(&rule->aq_fsp, fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: This filter is already set\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+ fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FL3L4,
+ AQ_RX_LAST_LOC_FL3L4);
+ return -EINVAL;
+ }
+ if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (rx_fltrs->fl3l4.is_ipv6 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified location for ipv6 must be %d or %d",
+ AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+ fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FETHERT,
+ AQ_RX_LAST_LOC_FETHERT);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+ fsp->m_u.ether_spec.h_proto == 0U) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: proto (ether_type) parameter must be specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+ fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FVLANID,
+ AQ_RX_LAST_LOC_FVLANID);
+ return -EINVAL;
+ }
+
+ if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+ aq_nic->active_vlans))) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown vlan-id specified");
+ return -EINVAL;
+ }
+
+ if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: queue number must be in range [0, %d]",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+ err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+ } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ } else {
+ netdev_err(aq_nic->ndev,
+ "ethtool: invalid vlan mask 0x%x specified",
+ be16_to_cpu(fsp->m_ext.vlan_tci));
+ err = -EINVAL;
+ }
+ } else {
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IP_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ default:
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown flow-type specified");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_support = false;
+
+ if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: Please, to enable the RX flow control:\n"
+ "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+ rule_is_not_support = true;
+ } else if (!aq_rule_is_approve(fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified flow type is not supported\n");
+ rule_is_not_support = true;
+ } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+ (fsp->h_u.tcp_ip4_spec.tos ||
+ fsp->h_u.tcp_ip6_spec.tclass)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified tos tclass are not supported\n");
+ rule_is_not_support = true;
+ } else if (fsp->flow_type & FLOW_MAC_EXT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: MAC_EXT is not supported");
+ rule_is_not_support = true;
+ }
+
+ return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_correct = false;
+
+ if (!aq_nic) {
+ rule_is_not_correct = true;
+ } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified number %u rule is invalid\n",
+ fsp->location);
+ rule_is_not_correct = true;
+ } else if (aq_check_filter(aq_nic, fsp)) {
+ rule_is_not_correct = true;
+ } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified action is invalid.\n"
+ "Maximum allowable value action is %u.\n",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ rule_is_not_correct = true;
+ }
+ }
+
+ return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+
+ if (aq_rule_is_not_correct(aq_nic, fsp))
+ err = -EINVAL;
+ else if (aq_rule_is_not_support(aq_nic, fsp))
+ err = -EOPNOTSUPP;
+ else if (aq_rule_already_exists(aq_nic, fsp))
+ err = -EEXIST;
+
+ return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l2 *data, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ data->queue = fsp->ring_cookie;
+ else
+ data->queue = -1;
+
+ data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+ data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+ == VLAN_PRIO_MASK;
+ data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ struct aq_rx_filter_l2 data;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+ if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+ return -EOPNOTSUPP;
+
+ if (add)
+ return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+ else
+ return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+ aq_vlans[i].vlan_id == vlan) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+ unsigned long *active_vlans,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ bool vlan_busy = false;
+ int vlan = -1;
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+ continue;
+ do {
+ vlan = find_next_bit(active_vlans,
+ VLAN_N_VID,
+ vlan + 1);
+ if (vlan == VLAN_N_VID) {
+ aq_vlans[i].enable = 0U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = 0;
+ continue;
+ }
+
+ vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+ if (!vlan_busy) {
+ aq_vlans[i].enable = 1U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = vlan;
+ }
+ } while (vlan_busy && vlan != VLAN_N_VID);
+ }
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+ int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+ int i;
+
+ memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+ if (!add)
+ return 0;
+
+ /* remove vlan if it was in table without queue assignment */
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].vlan_id ==
+ (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+ aq_vlans[i].enable = false;
+ }
+ }
+
+ aq_vlans[location].location = location;
+ aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_VID_MASK;
+ aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+ aq_vlans[location].enable = 1U;
+
+ return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+ if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+ return aq_del_rxnfc_rule(aq_nic, &cmd);
+ }
+
+ return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+
+ aq_set_data_fvlan(aq_nic,
+ aq_rx_fltr,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+ add);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l3l4 *data, bool add)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+ data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+ if (!add) {
+ if (!data->is_ipv6)
+ rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+ else
+ rx_fltrs->fl3l4.active_ipv6 &=
+ ~BIT((data->location) / 4);
+
+ return 0;
+ }
+
+ data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_UDP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_SCTP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ default:
+ break;
+ }
+
+ if (!data->is_ipv6) {
+ data->ip_src[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+ data->ip_dst[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+ } else {
+ int i;
+
+ rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ data->ip_dst[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+ data->ip_src[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+ }
+ data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+ }
+ if (fsp->flow_type != IP_USER_FLOW &&
+ fsp->flow_type != IPV6_USER_FLOW) {
+ if (!data->is_ipv6) {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+ } else {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+ }
+ }
+ if (data->ip_src[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+ if (data->ip_dst[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+ if (data->p_dst)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+ if (data->p_src)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+ } else {
+ data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ }
+
+ return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+ const struct aq_hw_ops *aq_hw_ops,
+ struct aq_rx_filter_l3l4 *data)
+{
+ if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+ return -EOPNOTSUPP;
+
+ return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ struct aq_rx_filter_l3l4 data;
+
+ if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+ aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
+ aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+ return -EINVAL;
+
+ return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ int err = -EINVAL;
+
+ if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+ if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_VID_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_vlan;
+ err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+ } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_PRIO_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ }
+ } else {
+ switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_l3l4;
+ err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, u16 index,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL, *parent = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location >= index)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->aq_fsp.location == index) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+ if (unlikely(!aq_rx_fltr))
+ return err;
+
+ INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+ if (parent)
+ hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+ else
+ hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+ ++rx_fltrs->active_filters;
+
+ return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+ return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *aq_rx_fltr;
+ int err = 0;
+
+ err = aq_check_rule(aq_nic, fsp);
+ if (err)
+ goto err_exit;
+
+ aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+ if (unlikely(!aq_rx_fltr)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+ err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+ if (unlikely(err))
+ goto err_free;
+
+ err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+ if (unlikely(err)) {
+ hlist_del(&aq_rx_fltr->aq_node);
+ --rx_fltrs->active_filters;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(aq_rx_fltr);
+err_exit:
+ return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == cmd->fs.location)
+ break;
+ }
+
+ if (rule && rule->aq_fsp.location == cmd->fs.location) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+ return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node)
+ if (fsp->location <= rule->aq_fsp.location)
+ break;
+
+ if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+ return -EINVAL;
+
+ memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+ return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int count = 0;
+
+ cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (unlikely(count == cmd->rule_cnt))
+ return -EMSGSIZE;
+
+ rule_locs[count++] = rule->aq_fsp.location;
+ }
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ if (err)
+ goto err_exit;
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, true);
+ if (err)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int hweight = 0;
+ int err = 0;
+ int i;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
+ hweight += hweight_long(aq_nic->active_vlans[i]);
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ }
+
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ if (err)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (hweight < AQ_VLAN_MAX_FILTERS)
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ /* otherwise left in promiscue mode */
+ }
+
+ return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int err = 0;
+
+ memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 000000000000..c6a08c6585d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+ aq_rx_filter_ethertype,
+ aq_rx_filter_vlan,
+ aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+ struct hlist_node aq_node;
+ enum aq_rx_filter_type type;
+ struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a1e70da358ca..81aab73dc22f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,17 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_RX_FIRST_LOC_FVLANID 0U
+#define AQ_RX_LAST_LOC_FVLANID 15U
+#define AQ_RX_FIRST_LOC_FETHERT 16U
+#define AQ_RX_LAST_LOC_FETHERT 31U
+#define AQ_RX_FIRST_LOC_FL3L4 32U
+#define AQ_RX_LAST_LOC_FL3L4 39U
+#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS \
+ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -130,6 +141,7 @@ struct aq_hw_s {
struct aq_ring_s;
struct aq_ring_param_s;
struct sk_buff;
+struct aq_rx_filter_l3l4;
struct aq_hw_ops {
@@ -183,6 +195,23 @@ struct aq_hw_ops {
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
+ int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l2_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans);
+
+ int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 7c07eef275eb..2a11c1eefd8f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,6 +13,7 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
+
+ err = aq_reapply_rxnfc_all_rules(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
bool is_lro = false;
int err = 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+ err = aq_clear_rxnfc_all_rules(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+ if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ err = aq_filters_vlan_offload_off(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
+err_exit:
return err;
}
@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ set_bit(vid, aq_nic->active_vlans);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ clear_bit(vid, aq_nic->active_vlans);
+
+ if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+ return aq_filters_vlans_update(aq_nic);
+
+ return 0;
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
- .ndo_set_features = aq_ndev_set_features
+ .ndo_set_features = aq_ndev_set_features,
+ .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7abdc0952425..0147c037ca96 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -44,7 +44,7 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
struct aq_rss_parameters *rss_params = &cfg->aq_rss;
int i = 0;
- static u8 rss_key[40] = {
+ static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
@@ -84,10 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
- cfg->vlan_id = 0U;
-
- aq_nic_rss_init(self, cfg->num_rss_queues);
-
/*descriptors */
cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
@@ -108,6 +104,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+
cfg->irq_type = aq_pci_func_get_irq_type(self);
if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 44ec47a3d60a..8e34c1e49bf2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
- u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+struct aq_hw_rx_fl2 {
+ struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+ u8 active_ipv4;
+ u8 active_ipv6:2;
+ u8 is_ipv6;
+};
+
+struct aq_hw_rx_fltrs_s {
+ struct hlist_head filter_list;
+ u16 active_filters;
+ struct aq_hw_rx_fl2 fl2;
+ struct aq_hw_rx_fl3l4 fl3l4;
+};
+
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
@@ -81,10 +97,13 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
+ /* Bitmask of currently assigned vlans from linux */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1d5d6b8df855..c8b44cdb91c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -19,6 +19,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "aq_filters.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
+ aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index f02592f43fe3..b58ca7cb8e9d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -21,7 +21,7 @@
#define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
.is_64_dma = true, \
- .msix_irqs = 4U, \
+ .msix_irqs = 8U, \
.irq_mask = ~0U, \
.vecs = HW_ATL_B0_RSS_MAX, \
.tcs = HW_ATL_B0_TC_MAX, \
@@ -41,7 +41,9 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
- NETIF_F_LRO, \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- if (cfg->vlan_id) {
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
- hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
- hw_atl_rpf_vlan_untagged_act_set(self, 1U);
-
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
- hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
- } else {
- hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- }
+ // Always accept untagged packets
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@@ -674,7 +667,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
- is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+ is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
pkt_type = 0xFFU & (rxd_wb->type >> 4);
@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_b0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self,
+ !!data->user_priority_en,
+ data->location);
+ if (data->user_priority_en)
+ hw_atl_rpf_etht_user_priority_set(self,
+ data->user_priority,
+ data->location);
+
+ if (data->queue < 0) {
+ hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+ } else {
+ hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+ hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/**
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ * for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id,
+ i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vland filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
+ .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
+ .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
+ .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 5502ec5f0f69..939f77e2e117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
vlan_id_flr);
}
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+ HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+ vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_F_MSK,
+ HW_ATL_RPF_VL_RXQ_F_SHIFT,
+ vlan_rxq);
+};
+
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+ HW_ATL_RPF_L4_SPD_MSK,
+ HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+ HW_ATL_RPF_L4_DPD_MSK,
+ HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
/* RPO: rx packet offload */
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
up_force_intr);
}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+ ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location),
+ ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ ipv6_src[i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ ipv6_dest[i]);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 41f239928c15..03c570d115fe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter);
+
/* set ethertype filter enable */
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
/* set ethertype filter */
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
/* rpo */
/* set ipv4 header checksum offload enable */
@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index a715fa317b1c..8470d92db812 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1092,24 +1092,43 @@
/* Default value of bitfield vl_id{F}[B:0] */
#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
-/* RX et_en{F} Bitfield Definitions
- * Preprocessor definitions for the bitfield "et_en{F}".
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
- * PORT="pif_rpf_et_en_i[0]"
- */
-
-/* Register address for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
-/* Bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
-/* Inverted bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
-/* Lower bit position of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_SHIFT 31
-/* Width of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_WIDTH 1
-/* Default value of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+ * PORT="pif_rpf_vl_rxq_en_i"
+ */
+
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1263,6 +1282,44 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
@@ -2418,4 +2475,48 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
+#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
+#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 7def1cb8ab9d..9b74a3197d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -263,6 +263,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
10, 1000U);
+ if (err)
+ return err;
}
if (self->rbl_enabled)
@@ -454,8 +456,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
(fw.val =
aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
fw.tid), 1000U, 100U);
- if (err < 0)
- goto err_exit;
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -463,8 +463,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
- if (err < 0)
- goto err_exit;
if (rpc) {
if (fw.len) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 3613fca64b58..48278e333462 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -240,6 +240,64 @@ struct __packed offload_info {
u8 buf[0];
};
+enum hw_atl_rx_action_with_traffic {
+ HW_ATL_RX_DISCARD,
+ HW_ATL_RX_HOST,
+};
+
+struct aq_rx_filter_vlan {
+ u8 enable;
+ u8 location;
+ u16 vlan_id;
+ u8 queue;
+};
+
+struct aq_rx_filter_l2 {
+ s8 queue;
+ u8 location;
+ u8 user_priority_en;
+ u8 user_priority;
+ u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+ u32 cmd;
+ u8 location;
+ u32 ip_dst[4];
+ u32 ip_src[4];
+ u16 p_dst;
+ u16 p_src;
+ u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+ HW_ATL_RX_TCP,
+ HW_ATL_RX_UDP,
+ HW_ATL_RX_SCTP,
+ HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+ HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
+ HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
+ HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
+ HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
+ HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+ ((location) - AQ_RX_FIRST_LOC_FL3L4)
+
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index bd277b0dc615..4406325fdd9f 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising &= phy_dev->supported;
+ linkmode_and(phy_dev->advertising, phy_dev->advertising,
+ phy_dev->supported);
priv->last_rx_bd = 0;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e445ab724827..f44808959ff3 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev)
static int b44_register_phy_one(struct b44 *bp)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
@@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp)
}
/* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_MII);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0e2d99c737e3..4574275ef445 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
+ unsigned int index;
u32 reg;
/* Disable RXCHK, active filters and Broadcom tag matching */
@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
rxchk_writel(priv, reg, RXCHK_CONTROL);
+ /* Make sure we restore correct CID index in case HW lost
+ * its context during deep idle state
+ */
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ rxchk_writel(priv, priv->filters_loc[index] <<
+ RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ }
+
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ priv->filters_loc[index] = nfc->fs.location;
set_bit(index, priv->filters);
return 0;
@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
* be taken care of during suspend time by bcm_sysport_suspend_to_wol
*/
clear_bit(index, priv->filters);
+ priv->filters_loc[index] = 0;
return 0;
}
@@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
struct bcm_sysport_priv *priv;
struct net_device *slave_dev;
unsigned int num_tx_queues;
- unsigned int q, start, port;
+ unsigned int q, qp, port;
struct net_device *dev;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
@@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
priv->per_port_num_tx_queues = num_tx_queues;
- start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
- for (q = 0; q < num_tx_queues; q++) {
- ring = &priv->tx_rings[q + start];
+ for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
+ q++) {
+ ring = &priv->tx_rings[q];
+
+ if (ring->inspect)
+ continue;
/* Just remember the mapping actual programming done
* during bcm_sysport_init_tx_ring
*/
- ring->switch_queue = q;
+ ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
priv->ring_map[q + port * num_tx_queues] = ring;
+ qp++;
+ }
+
+ return 0;
+}
+
+static int bcm_sysport_unmap_queues(struct notifier_block *nb,
+ struct dsa_notifier_register_info *info)
+{
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_priv *priv;
+ struct net_device *slave_dev;
+ unsigned int num_tx_queues;
+ struct net_device *dev;
+ unsigned int q, port;
+
+ priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+ if (priv->netdev != info->master)
+ return 0;
+
+ dev = info->master;
+
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return 0;
+
+ port = info->port_number;
+ slave_dev = info->info.dev;
+
+ num_tx_queues = slave_dev->real_num_tx_queues;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
- /* Set all queues as being used now */
- set_bit(q + start, &priv->queue_bitmap);
+ if (ring->switch_port != port)
+ continue;
+
+ if (!ring->inspect)
+ continue;
+
+ ring->inspect = false;
+ priv->ring_map[q + port * num_tx_queues] = NULL;
}
return 0;
@@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct dsa_notifier_register_info *info;
-
- if (event != DSA_PORT_REGISTER)
- return NOTIFY_DONE;
+ int ret = NOTIFY_DONE;
- info = ptr;
+ switch (event) {
+ case DSA_PORT_REGISTER:
+ ret = bcm_sysport_map_queues(nb, ptr);
+ break;
+ case DSA_PORT_UNREGISTER:
+ ret = bcm_sysport_unmap_queues(nb, ptr);
+ break;
+ }
- return notifier_from_errno(bcm_sysport_map_queues(nb, info));
+ return notifier_from_errno(ret);
}
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index a7a230884a87..0887e6356649 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -786,6 +786,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
+ u32 filters_loc[RXCHK_BRCM_TAG_MAX];
struct bcm_sysport_stats64 stats64;
@@ -795,7 +796,6 @@ struct bcm_sysport_priv {
/* map information between switch port queues and local queues */
struct notifier_block dsa_notifier;
unsigned int per_port_num_tx_queues;
- unsigned long queue_bitmap;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 0de487a8f0eb..5cd3135dfe30 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1282,6 +1282,7 @@ enum sp_rtnl_flag {
BNX2X_SP_RTNL_TX_STOP,
BNX2X_SP_RTNL_GET_DRV_VERSION,
BNX2X_SP_RTNL_CHANGE_UDP_PORT,
+ BNX2X_SP_RTNL_UPDATE_SVID,
};
enum bnx2x_iov_flag {
@@ -2520,6 +2521,7 @@ void bnx2x_update_mfw_dump(struct bnx2x *bp);
void bnx2x_init_ptp(struct bnx2x *bp);
int bnx2x_configure_ptp_filters(struct bnx2x *bp);
void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb);
+void bnx2x_register_phc(struct bnx2x *bp);
#define BNX2X_MAX_PHC_DRIFT 31000000
#define BNX2X_PTP_TX_TIMEOUT
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 686899d7e555..ecb1bd7eb508 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2842,6 +2842,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_set_rx_mode_inner(bp);
if (bp->flags & PTP_SUPPORTED) {
+ bnx2x_register_phc(bp);
bnx2x_init_ptp(bp);
bnx2x_configure_ptp_filters(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index a4a90b6cdb46..749d0ef44371 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1105,11 +1105,39 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct bnx2x *bp = netdev_priv(dev);
+ char version[ETHTOOL_FWVERS_LEN];
+ int ext_dev_info_offset;
+ u32 mbi;
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
- bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d",
+ BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
+ BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION);
+ strlcat(info->version, version, sizeof(info->version));
+
+ if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
+ ext_dev_info_offset = SHMEM2_RD(bp,
+ extended_dev_info_shared_addr);
+ mbi = REG_RD(bp, ext_dev_info_offset +
+ offsetof(struct extended_dev_info_shared_cfg,
+ mbi_version));
+ if (mbi) {
+ memset(version, 0, sizeof(version));
+ snprintf(version, ETHTOOL_FWVERS_LEN, "mbi %d.%d.%d ",
+ (mbi & 0xff000000) >> 24,
+ (mbi & 0x00ff0000) >> 16,
+ (mbi & 0x0000ff00) >> 8);
+ strlcpy(info->fw_version, version,
+ sizeof(info->fw_version));
+ }
+ }
+
+ memset(version, 0, sizeof(version));
+ bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
+ strlcat(info->fw_version, version, sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index f8b810313094..d9057c8bbeef 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1140,6 +1140,11 @@ struct shm_dev_info { /* size */
};
+struct extended_dev_info_shared_cfg {
+ u32 reserved[18];
+ u32 mbi_version;
+ u32 mbi_date;
+};
#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 95309b27c7d1..3b5b47e98c73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2925,6 +2925,10 @@ static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
+ /* Prepare parameters for function state transitions */
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
int func = BP_ABS_FUNC(bp);
u32 val;
@@ -4311,7 +4315,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
bnx2x_handle_eee_event(bp);
if (val & DRV_STATUS_OEM_UPDATE_SVID)
- bnx2x_handle_update_svid_cmd(bp);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_UPDATE_SVID, 0);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
@@ -7723,6 +7728,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
REG_WR(bp, reg_addr, val);
}
+ if (CHIP_IS_E3B0(bp))
+ bp->flags |= PTP_SUPPORTED;
+
return 0;
}
@@ -8472,6 +8480,7 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
/* Fill a user request section if needed */
if (!test_bit(RAMROD_CONT, ramrod_flags)) {
ramrod_param.user_req.u.vlan.vlan = vlan;
+ __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
/* Set the command: ADD or DEL */
if (set)
ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
@@ -8492,6 +8501,27 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
return rc;
}
+static int bnx2x_del_all_vlans(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
+ unsigned long ramrod_flags = 0, vlan_flags = 0;
+ struct bnx2x_vlan_entry *vlan;
+ int rc;
+
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ __set_bit(BNX2X_VLAN, &vlan_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
+ if (rc)
+ return rc;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ return 0;
+}
+
int bnx2x_del_all_macs(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *mac_obj,
int mac_type, bool wait_for_comp)
@@ -9330,6 +9360,17 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
rc);
+ /* The whole *vlan_obj structure may be not initialized if VLAN
+ * filtering offload is not supported by hardware. Currently this is
+ * true for all hardware covered by CHIP_IS_E1x().
+ */
+ if (!CHIP_IS_E1x(bp)) {
+ /* Remove all currently configured VLANs */
+ rc = bnx2x_del_all_vlans(bp);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all VLANs\n");
+ }
+
/* Disable LLH */
if (!CHIP_IS_E1(bp))
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
@@ -9417,8 +9458,13 @@ unload_error:
* function stop ramrod is sent, since as part of this ramrod FW access
* PTP registers.
*/
- if (bp->flags & PTP_SUPPORTED)
+ if (bp->flags & PTP_SUPPORTED) {
bnx2x_stop_ptp(bp);
+ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
+ bp->ptp_clock = NULL;
+ }
+ }
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
@@ -10359,6 +10405,9 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
+ bnx2x_handle_update_svid_cmd(bp);
+
if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
&bp->sp_rtnl_state)) {
if (bnx2x_udp_port_update(bp)) {
@@ -11750,8 +11799,10 @@ static void bnx2x_get_fcoe_info(struct bnx2x *bp)
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_fcoe_conn)
+ if (!bp->cnic_eth_dev.max_fcoe_conn) {
bp->flags |= NO_FCOE_FLAG;
+ eth_zero_addr(bp->fip_mac);
+ }
}
static void bnx2x_get_cnic_info(struct bnx2x *bp)
@@ -12494,9 +12545,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->dump_preset_idx = 1;
- if (CHIP_IS_E3B0(bp))
- bp->flags |= PTP_SUPPORTED;
-
return rc;
}
@@ -13024,13 +13072,6 @@ static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
{
- struct bnx2x_vlan_entry *vlan;
-
- /* The hw forgot all entries after reload */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
-
/* Don't set rx mode here. Our caller will do it. */
bnx2x_vlan_configure(bp, false);
@@ -13895,7 +13936,7 @@ static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
return -ENOTSUPP;
}
-static void bnx2x_register_phc(struct bnx2x *bp)
+void bnx2x_register_phc(struct bnx2x *bp)
{
/* Fill the ptp_clock_info struct and register PTP clock*/
bp->ptp_clock_info.owner = THIS_MODULE;
@@ -14097,8 +14138,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev->base_addr, bp->pdev->irq, dev->dev_addr);
pcie_print_link_status(bp->pdev);
- bnx2x_register_phc(bp);
-
if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
@@ -14131,11 +14170,6 @@ static void __bnx2x_remove(struct pci_dev *pdev,
struct bnx2x *bp,
bool remove_netdev)
{
- if (bp->ptp_clock) {
- ptp_clock_unregister(bp->ptp_clock);
- bp->ptp_clock = NULL;
- }
-
/* Delete storage MAC address */
if (!NO_FCOE(bp)) {
rtnl_lock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 0bf2fd470819..7a6e82db4231 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -265,6 +265,7 @@ enum {
BNX2X_ETH_MAC,
BNX2X_ISCSI_ETH_MAC,
BNX2X_NETQ_ETH_MAC,
+ BNX2X_VLAN,
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
@@ -272,7 +273,8 @@ enum {
#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
1 << BNX2X_ETH_MAC | \
1 << BNX2X_ISCSI_ETH_MAC | \
- 1 << BNX2X_NETQ_ETH_MAC)
+ 1 << BNX2X_NETQ_ETH_MAC | \
+ 1 << BNX2X_VLAN)
#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
((flags) & BNX2X_VLAN_MAC_CMP_MASK)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d4c300117529..3aa80da973d7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -118,6 +118,7 @@ enum board_idx {
NETXTREME_E_VF,
NETXTREME_C_VF,
NETXTREME_S_VF,
+ NETXTREME_E_P5_VF,
};
/* indexed by enum above */
@@ -160,6 +161,7 @@ static const struct {
[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
+ [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -210,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -237,7 +240,7 @@ static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
- idx == NETXTREME_S_VF);
+ idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -1809,7 +1812,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
case CMPL_BASE_TYPE_HWRM_DONE:
seq_id = le16_to_cpu(h_cmpl->sequence_id);
if (seq_id == bp->hwrm_intr_seq_id)
- bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
+ bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
else
netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
break;
@@ -2372,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
rmem->pg_arr[i] = NULL;
}
if (rmem->pg_tbl) {
- dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ dma_free_coherent(&pdev->dev, pg_tbl_size,
rmem->pg_tbl, rmem->pg_tbl_map);
rmem->pg_tbl = NULL;
}
@@ -2390,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
valid_bit = PTU_PTE_VALID;
- if (rmem->nr_pages > 1) {
- rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
- rmem->nr_pages * 8,
+ if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
+ size_t pg_tbl_size = rmem->nr_pages * 8;
+
+ if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
+ pg_tbl_size = rmem->page_size;
+ rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
&rmem->pg_tbl_map,
GFP_KERNEL);
if (!rmem->pg_tbl)
@@ -2409,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->nr_pages > 1) {
+ if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
extra_bits |= PTU_PTE_NEXT_TO_LAST;
@@ -3276,6 +3286,27 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr);
bp->hwrm_cmd_resp_addr = NULL;
}
+
+ if (bp->hwrm_cmd_kong_resp_addr) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ bp->hwrm_cmd_kong_resp_addr,
+ bp->hwrm_cmd_kong_resp_dma_addr);
+ bp->hwrm_cmd_kong_resp_addr = NULL;
+ }
+}
+
+static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+
+ bp->hwrm_cmd_kong_resp_addr =
+ dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &bp->hwrm_cmd_kong_resp_dma_addr,
+ GFP_KERNEL);
+ if (!bp->hwrm_cmd_kong_resp_addr)
+ return -ENOMEM;
+
+ return 0;
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -3317,9 +3348,8 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
return 0;
}
-static void bnxt_free_stats(struct bnxt *bp)
+static void bnxt_free_port_stats(struct bnxt *bp)
{
- u32 size, i;
struct pci_dev *pdev = bp->pdev;
bp->flags &= ~BNXT_FLAG_PORT_STATS;
@@ -3345,6 +3375,12 @@ static void bnxt_free_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL;
}
+}
+
+static void bnxt_free_ring_stats(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int size, i;
if (!bp->bnapi)
return;
@@ -3384,6 +3420,9 @@ static int bnxt_alloc_stats(struct bnxt *bp)
}
if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
+ if (bp->hw_rx_port_stats)
+ goto alloc_ext_stats;
+
bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
sizeof(struct tx_port_stats) + 1024;
@@ -3400,11 +3439,15 @@ static int bnxt_alloc_stats(struct bnxt *bp)
sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
+alloc_ext_stats:
/* Display extended statistics only if FW supports it */
if (bp->hwrm_spec_code < 0x10804 ||
bp->hwrm_spec_code == 0x10900)
return 0;
+ if (bp->hw_rx_port_stats_ext)
+ goto alloc_tx_ext_stats;
+
bp->hw_rx_port_stats_ext =
dma_zalloc_coherent(&pdev->dev,
sizeof(struct rx_port_stats_ext),
@@ -3413,6 +3456,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
if (!bp->hw_rx_port_stats_ext)
return 0;
+alloc_tx_ext_stats:
+ if (bp->hw_tx_port_stats_ext)
+ return 0;
+
if (bp->hwrm_spec_code >= 0x10902) {
bp->hw_tx_port_stats_ext =
dma_zalloc_coherent(&pdev->dev,
@@ -3520,7 +3567,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
bnxt_free_cp_rings(bp);
bnxt_free_ntp_fltrs(bp, irq_re_init);
if (irq_re_init) {
- bnxt_free_stats(bp);
+ bnxt_free_ring_stats(bp);
bnxt_free_ring_grps(bp);
bnxt_free_vnics(bp);
kfree(bp->tx_ring_map);
@@ -3721,7 +3768,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
req->req_type = cpu_to_le16(req_type);
req->cmpl_ring = cpu_to_le16(cmpl_ring);
req->target_id = cpu_to_le16(target_id);
- req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
+ if (bnxt_kong_hwrm_message(bp, req))
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
+ else
+ req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
}
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
@@ -3736,11 +3786,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
struct hwrm_short_input short_input = {0};
-
- req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
- memset(resp, 0, PAGE_SIZE);
- cp_ring_id = le16_to_cpu(req->cmpl_ring);
- intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+ u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
+ u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
+ u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
+ u16 dst = BNXT_HWRM_CHNL_CHIMP;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
if (msg_len > bp->hwrm_max_ext_req_len ||
@@ -3748,6 +3797,23 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
return -EINVAL;
}
+ if (bnxt_hwrm_kong_chnl(bp, req)) {
+ dst = BNXT_HWRM_CHNL_KONG;
+ bar_offset = BNXT_GRCPF_REG_KONG_COMM;
+ doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
+ resp = bp->hwrm_cmd_kong_resp_addr;
+ resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
+ }
+
+ memset(resp, 0, PAGE_SIZE);
+ cp_ring_id = le16_to_cpu(req->cmpl_ring);
+ intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
+
+ req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
+ /* currently supports only one outstanding message */
+ if (intr_process)
+ bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
+
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
@@ -3781,17 +3847,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Write request msg to hwrm channel */
- __iowrite32_copy(bp->bar0, data, msg_len / 4);
+ __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
for (i = msg_len; i < max_req_len; i += 4)
- writel(0, bp->bar0 + i);
-
- /* currently supports only one outstanding message */
- if (intr_process)
- bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
+ writel(0, bp->bar0 + bar_offset + i);
/* Ring channel doorbell */
- writel(1, bp->bar0 + 0x100);
+ writel(1, bp->bar0 + doorbell_offset);
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
@@ -3806,10 +3868,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
- resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
+
if (intr_process) {
+ u16 seq_id = bp->hwrm_intr_seq_id;
+
/* Wait until hwrm response cmpl interrupt is processed */
- while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+ while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
i++ < tmo_count) {
/* on first few passes, just barely sleep */
if (i < HWRM_SHORT_TIMEOUT_COUNTER)
@@ -3820,14 +3885,14 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
HWRM_MAX_TIMEOUT);
}
- if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
+ if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
le16_to_cpu(req->req_type));
return -1;
}
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
- valid = bp->hwrm_cmd_resp_addr + len - 1;
+ valid = resp_addr + len - 1;
} else {
int j;
@@ -3855,7 +3920,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
/* Last byte of resp contains valid bit */
- valid = bp->hwrm_cmd_resp_addr + len - 1;
+ valid = resp_addr + len - 1;
for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
/* make sure we read from updated DMA memory */
dma_rmb();
@@ -3990,6 +4055,10 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ req.flags |= cpu_to_le32(
+ FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
+
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
@@ -4118,12 +4187,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
- int rc = 0;
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
- struct hwrm_cfa_ntuple_filter_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
+ int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
@@ -4169,8 +4237,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
fltr->filter_id = resp->ntuple_filter_id;
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -5161,7 +5231,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
cp = le16_to_cpu(resp->alloc_cmpl_rings);
stats = le16_to_cpu(resp->alloc_stat_ctx);
- cp = min_t(u16, cp, stats);
+ hw_resc->resv_irqs = cp;
if (bp->flags & BNXT_FLAG_CHIP_P5) {
int rx = hw_resc->resv_rx_rings;
int tx = hw_resc->resv_tx_rings;
@@ -5175,10 +5245,11 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
hw_resc->resv_rx_rings = rx;
hw_resc->resv_tx_rings = tx;
}
- cp = le16_to_cpu(resp->alloc_msix);
+ hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
hw_resc->resv_hw_ring_grps = rx;
}
hw_resc->resv_cp_rings = cp;
+ hw_resc->resv_stat_ctxs = stats;
}
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
@@ -5208,7 +5279,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp);
static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
int tx_rings, int rx_rings, int ring_grps,
- int cp_rings, int vnics)
+ int cp_rings, int stats, int vnics)
{
u32 enables = 0;
@@ -5250,7 +5321,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
req->num_rsscos_ctxs =
cpu_to_le16(ring_grps + 1);
}
- req->num_stat_ctxs = req->num_cmpl_rings;
+ req->num_stat_ctxs = cpu_to_le16(stats);
req->num_vnics = cpu_to_le16(vnics);
}
req->enables = cpu_to_le32(enables);
@@ -5260,7 +5331,7 @@ static void
__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
struct hwrm_func_vf_cfg_input *req, int tx_rings,
int rx_rings, int ring_grps, int cp_rings,
- int vnics)
+ int stats, int vnics)
{
u32 enables = 0;
@@ -5293,7 +5364,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
}
- req->num_stat_ctxs = req->num_cmpl_rings;
+ req->num_stat_ctxs = cpu_to_le16(stats);
req->num_vnics = cpu_to_le16(vnics);
req->enables = cpu_to_le32(enables);
@@ -5301,13 +5372,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
static int
bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats, int vnics)
{
struct hwrm_func_cfg_input req = {0};
int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
if (!req.enables)
return 0;
@@ -5324,7 +5395,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
static int
bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats, int vnics)
{
struct hwrm_func_vf_cfg_input req = {0};
int rc;
@@ -5335,7 +5406,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return -ENOMEM;
@@ -5345,15 +5416,17 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
- int cp, int vnic)
+ int cp, int stat, int vnic)
{
if (BNXT_PF(bp))
- return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
+ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
+ vnic);
else
- return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
+ vnic);
}
-static int bnxt_cp_rings_in_use(struct bnxt *bp)
+int bnxt_nq_rings_in_use(struct bnxt *bp)
{
int cp = bp->cp_nr_rings;
int ulp_msix, ulp_base;
@@ -5368,11 +5441,28 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
return cp;
}
+static int bnxt_cp_rings_in_use(struct bnxt *bp)
+{
+ int cp;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ return bnxt_nq_rings_in_use(bp);
+
+ cp = bp->tx_nr_rings + bp->rx_nr_rings;
+ return cp;
+}
+
+static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
+{
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int cp = bnxt_cp_rings_in_use(bp);
- int rx = bp->rx_nr_rings;
+ int nq = bnxt_nq_rings_in_use(bp);
+ int rx = bp->rx_nr_rings, stat;
int vnic = 1, grp = rx;
if (bp->hwrm_spec_code < 0x10601)
@@ -5385,9 +5475,11 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
+ stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_vnics != vnic ||
+ hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
+ hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
@@ -5397,12 +5489,12 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int cp = bnxt_cp_rings_in_use(bp);
+ int cp = bnxt_nq_rings_in_use(bp);
int tx = bp->tx_nr_rings;
int rx = bp->rx_nr_rings;
int grp, rx_rings, rc;
+ int vnic = 1, stat;
bool sh = false;
- int vnic = 1;
if (!bnxt_need_reserve_rings(bp))
return 0;
@@ -5414,17 +5506,19 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
grp = bp->rx_nr_rings;
+ stat = bnxt_get_func_stat_ctxs(bp);
- rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
if (rc)
return rc;
tx = hw_resc->resv_tx_rings;
if (BNXT_NEW_RM(bp)) {
rx = hw_resc->resv_rx_rings;
- cp = hw_resc->resv_cp_rings;
+ cp = hw_resc->resv_irqs;
grp = hw_resc->resv_hw_ring_grps;
vnic = hw_resc->resv_vnics;
+ stat = hw_resc->resv_stat_ctxs;
}
rx_rings = rx;
@@ -5443,6 +5537,10 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
}
}
rx_rings = min_t(int, rx_rings, grp);
+ cp = min_t(int, cp, bp->cp_nr_rings);
+ if (stat > bnxt_get_ulp_stat_ctxs(bp))
+ stat -= bnxt_get_ulp_stat_ctxs(bp);
+ cp = min_t(int, cp, stat);
rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx = rx_rings << 1;
@@ -5451,14 +5549,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
bp->rx_nr_rings = rx_rings;
bp->cp_nr_rings = cp;
- if (!tx || !rx || !cp || !grp || !vnic)
+ if (!tx || !rx || !cp || !grp || !vnic || !stat)
return -ENOMEM;
return rc;
}
static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats,
+ int vnics)
{
struct hwrm_func_vf_cfg_input req = {0};
u32 flags;
@@ -5468,7 +5567,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
return 0;
__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
@@ -5486,14 +5585,15 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats,
+ int vnics)
{
struct hwrm_func_cfg_input req = {0};
u32 flags;
int rc;
__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
if (BNXT_NEW_RM(bp)) {
flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
@@ -5514,17 +5614,19 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
}
static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
- int ring_grps, int cp_rings, int vnics)
+ int ring_grps, int cp_rings, int stats,
+ int vnics)
{
if (bp->hwrm_spec_code < 0x10801)
return 0;
if (BNXT_PF(bp))
return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
- ring_grps, cp_rings, vnics);
+ ring_grps, cp_rings, stats,
+ vnics);
return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
- cp_rings, vnics);
+ cp_rings, stats, vnics);
}
static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
@@ -5949,8 +6051,11 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
pg_size = 2 << 4;
*pg_attr = pg_size;
- if (rmem->nr_pages > 1) {
- *pg_attr |= 1;
+ if (rmem->depth >= 1) {
+ if (rmem->depth == 2)
+ *pg_attr |= 2;
+ else
+ *pg_attr |= 1;
*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
} else {
*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
@@ -6027,6 +6132,22 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
&req.stat_pg_size_stat_lvl,
&req.stat_page_dir);
}
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
+ ctx_pg = &ctx->mrav_mem;
+ req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.mrav_pg_size_mrav_lvl,
+ &req.mrav_page_dir);
+ }
+ if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
+ ctx_pg = &ctx->tim_mem;
+ req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.tim_pg_size_tim_lvl,
+ &req.tim_page_dir);
+ }
for (i = 0, num_entries = &req.tqm_sp_num_entries,
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
@@ -6047,25 +6168,104 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
- struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
+ struct bnxt_ctx_pg_info *ctx_pg)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
- if (!mem_size)
- return 0;
-
- rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
- if (rmem->nr_pages > MAX_CTX_PAGES) {
- rmem->nr_pages = 0;
- return -EINVAL;
- }
rmem->page_size = BNXT_PAGE_SIZE;
rmem->pg_arr = ctx_pg->ctx_pg_arr;
rmem->dma_arr = ctx_pg->ctx_dma_arr;
rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
+ if (rmem->depth >= 1)
+ rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
return bnxt_alloc_ring(bp, rmem);
}
+static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+ int rc;
+
+ if (!mem_size)
+ return 0;
+
+ ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
+ ctx_pg->nr_pages = 0;
+ return -EINVAL;
+ }
+ if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
+ int nr_tbls, i;
+
+ rmem->depth = 2;
+ ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
+ GFP_KERNEL);
+ if (!ctx_pg->ctx_pg_tbl)
+ return -ENOMEM;
+ nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
+ rmem->nr_pages = nr_tbls;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
+ if (rc)
+ return rc;
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
+ if (!pg_tbl)
+ return -ENOMEM;
+ ctx_pg->ctx_pg_tbl[i] = pg_tbl;
+ rmem = &pg_tbl->ring_mem;
+ rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
+ rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
+ rmem->depth = 1;
+ rmem->nr_pages = MAX_CTX_PAGES;
+ if (i == (nr_tbls - 1))
+ rmem->nr_pages = ctx_pg->nr_pages %
+ MAX_CTX_PAGES;
+ rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
+ if (rc)
+ break;
+ }
+ } else {
+ rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
+ if (rmem->nr_pages > 1 || depth)
+ rmem->depth = 1;
+ rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
+ }
+ return rc;
+}
+
+static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
+ struct bnxt_ctx_pg_info *ctx_pg)
+{
+ struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
+
+ if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
+ ctx_pg->ctx_pg_tbl) {
+ int i, nr_tbls = rmem->nr_pages;
+
+ for (i = 0; i < nr_tbls; i++) {
+ struct bnxt_ctx_pg_info *pg_tbl;
+ struct bnxt_ring_mem_info *rmem2;
+
+ pg_tbl = ctx_pg->ctx_pg_tbl[i];
+ if (!pg_tbl)
+ continue;
+ rmem2 = &pg_tbl->ring_mem;
+ bnxt_free_ring(bp, rmem2);
+ ctx_pg->ctx_pg_arr[i] = NULL;
+ kfree(pg_tbl);
+ ctx_pg->ctx_pg_tbl[i] = NULL;
+ }
+ kfree(ctx_pg->ctx_pg_tbl);
+ ctx_pg->ctx_pg_tbl = NULL;
+ }
+ bnxt_free_ring(bp, rmem);
+ ctx_pg->nr_pages = 0;
+}
+
static void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
@@ -6076,16 +6276,18 @@ static void bnxt_free_ctx_mem(struct bnxt *bp)
if (ctx->tqm_mem[0]) {
for (i = 0; i < bp->max_q + 1; i++)
- bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
+ bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
kfree(ctx->tqm_mem[0]);
ctx->tqm_mem[0] = NULL;
}
- bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
- bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
+ bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
}
@@ -6094,6 +6296,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u8 pg_lvl = 1;
int i, rc;
rc = bnxt_hwrm_func_backing_store_qcaps(bp);
@@ -6106,24 +6311,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
+ if (bp->flags & BNXT_FLAG_ROCE_CAP) {
+ pg_lvl = 2;
+ extra_qps = 65536;
+ extra_srqs = 8192;
+ }
+
ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
+ ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
+ extra_qps;
mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->srq_mem;
- ctx_pg->entries = ctx->srq_max_l2_entries;
+ ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
ctx_pg = &ctx->cq_mem;
- ctx_pg->entries = ctx->cq_max_l2_entries;
+ ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
if (rc)
return rc;
@@ -6131,26 +6343,47 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
+ if (rc)
+ return rc;
+
+ ena = 0;
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ goto skip_rdma;
+
+ ctx_pg = &ctx->mrav_mem;
+ ctx_pg->entries = extra_qps * 4;
+ mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
+ if (rc)
+ return rc;
+ ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+
+ ctx_pg = &ctx->tim_mem;
+ ctx_pg->entries = ctx->qp_mem.entries;
+ mem_size = ctx->tim_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
+ ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
- entries = ctx->qp_max_l2_entries;
+skip_rdma:
+ entries = ctx->qp_max_l2_entries + extra_qps;
entries = roundup(entries, ctx->tqm_entries_multiple);
entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
ctx->tqm_max_entries_per_ring);
- for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
+ for (i = 0; i < bp->max_q + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = entries;
mem_size = ctx->tqm_entry_size * entries;
- rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
if (rc)
return rc;
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
@@ -6177,7 +6410,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
req.fid = cpu_to_le16(0xffff);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
if (rc) {
rc = -EIO;
goto hwrm_func_resc_qcaps_exit;
@@ -6207,7 +6441,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
if (bp->flags & BNXT_FLAG_CHIP_P5) {
u16 max_msix = le16_to_cpu(resp->max_msix);
- hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
+ hw_resc->max_nqs = max_msix;
hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
}
@@ -6292,6 +6526,8 @@ hwrm_func_qcaps_exit:
return rc;
}
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+
static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc;
@@ -6299,6 +6535,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
+ return rc;
+ }
if (bp->hwrm_spec_code >= 0x10803) {
rc = bnxt_alloc_ctx_mem(bp);
if (rc)
@@ -6422,6 +6663,13 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
(dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
+ if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6468,6 +6716,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
{
struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
struct hwrm_port_qstats_ext_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int rc;
@@ -6490,6 +6739,34 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
bp->fw_rx_stats_ext_size = 0;
bp->fw_tx_stats_ext_size = 0;
}
+ if (bp->fw_tx_stats_ext_size <=
+ offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ bp->pri2cos_valid = 0;
+ return rc;
+ }
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
+ req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
+
+ rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ struct hwrm_queue_pri2cos_qcfg_output *resp2;
+ u8 *pri2cos;
+ int i, j;
+
+ resp2 = bp->hwrm_cmd_resp_addr;
+ pri2cos = &resp2->pri0_cos_queue_id;
+ for (i = 0; i < 8; i++) {
+ u8 queue_id = pri2cos[i];
+
+ for (j = 0; j < bp->max_q; j++) {
+ if (bp->q_ids[j] == queue_id)
+ bp->pri2cos[i] = j;
+ }
+ }
+ bp->pri2cos_valid = 1;
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -7014,25 +7291,28 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
return bp->hw_resc.max_stat_ctxs;
}
-void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
-{
- bp->hw_resc.max_stat_ctxs = max;
-}
-
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
return bp->hw_resc.max_cp_rings;
}
-unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
{
- return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
+ unsigned int cp = bp->hw_resc.max_cp_rings;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ cp -= bnxt_get_ulp_msix_num(bp);
+
+ return cp;
}
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
+
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
@@ -7041,6 +7321,26 @@ static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
bp->hw_resc.max_irqs = max_irqs;
}
+unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
+{
+ unsigned int cp;
+
+ cp = bnxt_get_max_func_cp_rings_for_en(bp);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ return cp - bp->rx_nr_rings - bp->tx_nr_rings;
+ else
+ return cp - bp->cp_nr_rings;
+}
+
+unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
+{
+ unsigned int stat;
+
+ stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp);
+ stat -= bp->cp_nr_rings;
+ return stat;
+}
+
int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
int max_cp = bnxt_get_max_func_cp_rings(bp);
@@ -7048,7 +7348,9 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
int total_req = bp->cp_nr_rings + num;
int max_idx, avail_msix;
- max_idx = min_t(int, bp->total_irqs, max_cp);
+ max_idx = bp->total_irqs;
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ max_idx = min_t(int, bp->total_irqs, max_cp);
avail_msix = max_idx - bp->cp_nr_rings;
if (!BNXT_NEW_RM(bp) || avail_msix >= num)
return avail_msix;
@@ -7066,7 +7368,7 @@ static int bnxt_get_num_msix(struct bnxt *bp)
if (!BNXT_NEW_RM(bp))
return bnxt_get_max_func_irqs(bp);
- return bnxt_cp_rings_in_use(bp);
+ return bnxt_nq_rings_in_use(bp);
}
static int bnxt_init_msix(struct bnxt *bp)
@@ -7176,23 +7478,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp)
{
int tcs = netdev_get_num_tc(bp->dev);
+ bool reinit_irq = false;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- rc = __bnxt_reserve_rings(bp);
- if (rc) {
- netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
- return rc;
- }
if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
+ reinit_irq = true;
+ }
+ rc = __bnxt_reserve_rings(bp);
+ if (reinit_irq) {
+ if (!rc)
+ rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
- if (rc)
- return rc;
+ }
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
+ return rc;
}
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
@@ -7200,7 +7505,6 @@ int bnxt_reserve_rings(struct bnxt *bp)
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
- bp->num_stat_ctxs = bp->cp_nr_rings;
return 0;
}
@@ -7794,6 +8098,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
hw_resc->resv_tx_rings = 0;
hw_resc->resv_rx_rings = 0;
hw_resc->resv_hw_ring_grps = 0;
@@ -8232,6 +8538,9 @@ static bool bnxt_drv_busy(struct bnxt *bp)
test_bit(BNXT_STATE_READ_STATS, &bp->state));
}
+static void bnxt_get_ring_stats(struct bnxt *bp,
+ struct rtnl_link_stats64 *stats);
+
static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
bool link_re_init)
{
@@ -8257,6 +8566,9 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
del_timer_sync(&bp->timer);
bnxt_free_skbs(bp);
+ /* Save ring stats before shutdown */
+ if (bp->bnapi)
+ bnxt_get_ring_stats(bp, &bp->net_stats_prev);
if (irq_re_init) {
bnxt_free_irq(bp);
bnxt_del_napi(bp);
@@ -8318,23 +8630,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
-static void
-bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+static void bnxt_get_ring_stats(struct bnxt *bp,
+ struct rtnl_link_stats64 *stats)
{
- u32 i;
- struct bnxt *bp = netdev_priv(dev);
+ int i;
- set_bit(BNXT_STATE_READ_STATS, &bp->state);
- /* Make sure bnxt_close_nic() sees that we are reading stats before
- * we check the BNXT_STATE_OPEN flag.
- */
- smp_mb__after_atomic();
- if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
- clear_bit(BNXT_STATE_READ_STATS, &bp->state);
- return;
- }
- /* TODO check if we need to synchronize with bnxt_close path */
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -8363,6 +8664,40 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
}
+}
+
+static void bnxt_add_prev_stats(struct bnxt *bp,
+ struct rtnl_link_stats64 *stats)
+{
+ struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
+
+ stats->rx_packets += prev_stats->rx_packets;
+ stats->tx_packets += prev_stats->tx_packets;
+ stats->rx_bytes += prev_stats->rx_bytes;
+ stats->tx_bytes += prev_stats->tx_bytes;
+ stats->rx_missed_errors += prev_stats->rx_missed_errors;
+ stats->multicast += prev_stats->multicast;
+ stats->tx_dropped += prev_stats->tx_dropped;
+}
+
+static void
+bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ set_bit(BNXT_STATE_READ_STATS, &bp->state);
+ /* Make sure bnxt_close_nic() sees that we are reading stats before
+ * we check the BNXT_STATE_OPEN flag.
+ */
+ smp_mb__after_atomic();
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ clear_bit(BNXT_STATE_READ_STATS, &bp->state);
+ *stats = bp->net_stats_prev;
+ return;
+ }
+
+ bnxt_get_ring_stats(bp, stats);
+ bnxt_add_prev_stats(bp, stats);
if (bp->flags & BNXT_FLAG_PORT_STATS) {
struct rx_port_stats *rx = bp->hw_rx_port_stats;
@@ -8598,12 +8933,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
if (vnics == bp->hw_resc.resv_vnics)
return true;
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
if (vnics <= bp->hw_resc.resv_vnics)
return true;
netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
- bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
+ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
return false;
#else
return false;
@@ -9014,7 +9349,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp)
{
int max_rx, max_tx, tx_sets = 1;
- int tx_rings_needed;
+ int tx_rings_needed, stats;
int rx_rings = rx;
int cp, vnics, rc;
@@ -9039,10 +9374,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx_rings <<= 1;
cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
- if (BNXT_NEW_RM(bp))
+ stats = cp;
+ if (BNXT_NEW_RM(bp)) {
cp += bnxt_get_ulp_msix_num(bp);
+ stats += bnxt_get_ulp_stat_ctxs(bp);
+ }
return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
- vnics);
+ stats, vnics);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -9078,7 +9416,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
* 1 coal_buf x bufs_per_record = 1 completion record.
*/
coal = &bp->rx_coal;
- coal->coal_ticks = 14;
+ coal->coal_ticks = 10;
coal->coal_bufs = 30;
coal->coal_ticks_irq = 1;
coal->coal_bufs_irq = 2;
@@ -9266,7 +9604,6 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
- bp->num_stat_ctxs = bp->cp_nr_rings;
if (netif_running(bp->dev))
return bnxt_open_nic(bp, true, false);
@@ -9589,7 +9926,7 @@ static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
}
static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 flags)
+ u16 flags, struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -9732,6 +10069,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
kfree(bp->ctx);
bp->ctx = NULL;
bnxt_cleanup_pci(bp);
+ bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -9799,13 +10137,16 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int *max_cp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int max_ring_grps = 0;
+ int max_ring_grps = 0, max_irq;
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
- *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
- hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
- *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+ *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
+ max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
+ bnxt_get_ulp_msix_num(bp),
+ hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ *max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
*max_cp -= 1;
@@ -9813,6 +10154,11 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
}
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+ /* On P5 chips, max_cp output param should be available NQs */
+ *max_cp = max_irq;
+ }
*max_rx = min_t(int, *max_rx, max_ring_grps);
}
@@ -9929,7 +10275,6 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
netdev_warn(bp->dev, "2nd rings reservation failed.\n");
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
}
- bp->num_stat_ctxs = bp->cp_nr_rings;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bp->rx_nr_rings++;
bp->cp_nr_rings++;
@@ -10063,6 +10408,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
+ rc = bnxt_alloc_kong_hwrm_resources(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
+ }
+
if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
rc = bnxt_alloc_hwrm_short_cmd_req(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9e99d4ab3e06..a451796deefe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -567,7 +567,6 @@ struct nqe_cn {
#define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000
-#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE)
@@ -585,6 +584,9 @@ struct nqe_cn {
#define HWRM_VALID_BIT_DELAY_USEC 20
+#define BNXT_HWRM_CHNL_CHIMP 0
+#define BNXT_HWRM_CHNL_KONG 1
+
#define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4
@@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd {
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
- u32 flags;
+ u16 flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2
+#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
+
+ u16 depth;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -927,7 +932,10 @@ struct bnxt_hw_resc {
u16 resv_vnics;
u16 min_stat_ctxs;
u16 max_stat_ctxs;
+ u16 resv_stat_ctxs;
+ u16 max_nqs;
u16 max_irqs;
+ u16 resv_irqs;
};
#if defined(CONFIG_BNXT_SRIOV)
@@ -1110,9 +1118,14 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
};
-#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
-#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
-#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
+#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
+#define BNXT_CAG_REG_BASE 0x300000
+
+#define BNXT_GRCPF_REG_KONG_COMM 0xA00
+#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
struct bnxt_tc_flow_stats {
u64 packets;
@@ -1180,12 +1193,15 @@ struct bnxt_vf_rep {
#define PTU_PTE_NEXT_TO_LAST 0x4UL
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
struct bnxt_ctx_pg_info {
u32 entries;
+ u32 nr_pages;
void *ctx_pg_arr[MAX_CTX_PAGES];
dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
struct bnxt_ring_mem_info ring_mem;
+ struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
struct bnxt_ctx_mem_info {
@@ -1221,6 +1237,8 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info cq_mem;
struct bnxt_ctx_pg_info vnic_mem;
struct bnxt_ctx_pg_info stat_mem;
+ struct bnxt_ctx_pg_info mrav_mem;
+ struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9];
};
@@ -1415,8 +1433,6 @@ struct bnxt {
int cp_nr_pages;
int cp_nr_rings;
- int num_stat_ctxs;
-
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
@@ -1456,21 +1472,27 @@ struct bnxt {
u32 msg_enable;
u32 fw_cap;
- #define BNXT_FW_CAP_SHORT_CMD 0x00000001
- #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
- #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
- #define BNXT_FW_CAP_NEW_RM 0x00000008
- #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+ #define BNXT_FW_CAP_SHORT_CMD 0x00000001
+ #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
+ #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
+ #define BNXT_FW_CAP_NEW_RM 0x00000008
+ #define BNXT_FW_CAP_IF_CHANGE 0x00000010
+ #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
+ #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
- u32 hwrm_intr_seq_id;
+ u16 hwrm_cmd_kong_seq;
+ u16 hwrm_intr_seq_id;
void *hwrm_short_cmd_req_addr;
dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr;
+ void *hwrm_cmd_kong_resp_addr;
+ dma_addr_t hwrm_cmd_kong_resp_dma_addr;
+ struct rtnl_link_stats64 net_stats_prev;
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
@@ -1482,6 +1504,8 @@ struct bnxt {
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
+ u8 pri2cos[8];
+ u8 pri2cos_valid;
u16 hwrm_max_req_len;
u16 hwrm_max_ext_req_len;
@@ -1668,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
}
}
+static inline bool bnxt_cfa_hwrm_message(u16 req_type)
+{
+ switch (req_type) {
+ case HWRM_CFA_ENCAP_RECORD_ALLOC:
+ case HWRM_CFA_ENCAP_RECORD_FREE:
+ case HWRM_CFA_DECAP_FILTER_ALLOC:
+ case HWRM_CFA_DECAP_FILTER_FREE:
+ case HWRM_CFA_NTUPLE_FILTER_ALLOC:
+ case HWRM_CFA_NTUPLE_FILTER_FREE:
+ case HWRM_CFA_NTUPLE_FILTER_CFG:
+ case HWRM_CFA_EM_FLOW_ALLOC:
+ case HWRM_CFA_EM_FLOW_FREE:
+ case HWRM_CFA_EM_FLOW_CFG:
+ case HWRM_CFA_FLOW_ALLOC:
+ case HWRM_CFA_FLOW_FREE:
+ case HWRM_CFA_FLOW_INFO:
+ case HWRM_CFA_FLOW_FLUSH:
+ case HWRM_CFA_FLOW_STATS:
+ case HWRM_CFA_METER_PROFILE_ALLOC:
+ case HWRM_CFA_METER_PROFILE_FREE:
+ case HWRM_CFA_METER_PROFILE_CFG:
+ case HWRM_CFA_METER_INSTANCE_ALLOC:
+ case HWRM_CFA_METER_INSTANCE_FREE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
+{
+ return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
+ bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
+}
+
+static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
+{
+ return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
+ req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
+}
+
+static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
+{
+ if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
+ return bp->hwrm_cmd_kong_resp_addr;
+ else
+ return bp->hwrm_cmd_resp_addr;
+}
+
+static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
+{
+ u16 seq_id;
+
+ if (dst == BNXT_HWRM_CHNL_CHIMP)
+ seq_id = bp->hwrm_cmd_seq++;
+ else
+ seq_id = bp->hwrm_cmd_kong_seq++;
+ return seq_id;
+}
+
extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
@@ -1685,11 +1769,12 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
+int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
-void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
+unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index a85d2be986af..15c7041e937b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -471,7 +471,10 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if (total_ets_bw > 100)
return -EINVAL;
- *tc = max_tc + 1;
+ if (max_tc >= bp->max_tc)
+ *tc = bp->max_tc;
+ else
+ *tc = max_tc + 1;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6cc69a58478a..adabbe94a259 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -207,6 +207,34 @@ reset_coalesce:
BNXT_TX_STATS_EXT_COS_ENTRY(6), \
BNXT_TX_STATS_EXT_COS_ENTRY(7) \
+#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
+ { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
+ __stringify(counter##_pri##n) }
+
+#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
+ { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
+ __stringify(counter##_pri##n) }
+
+#define BNXT_RX_STATS_PRI_ENTRIES(counter) \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 7)
+
+#define BNXT_TX_STATS_PRI_ENTRIES(counter) \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 7)
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -327,8 +355,41 @@ static const struct {
BNXT_TX_STATS_EXT_PFC_ENTRIES,
};
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_rx_bytes_pri_arr[] = {
+ BNXT_RX_STATS_PRI_ENTRIES(rx_bytes),
+};
+
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_rx_pkts_pri_arr[] = {
+ BNXT_RX_STATS_PRI_ENTRIES(rx_packets),
+};
+
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_bytes_pri_arr[] = {
+ BNXT_TX_STATS_PRI_ENTRIES(tx_bytes),
+};
+
+static const struct {
+ long base_off;
+ char string[ETH_GSTRING_LEN];
+} bnxt_tx_pkts_pri_arr[] = {
+ BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
+};
+
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+#define BNXT_NUM_STATS_PRI \
+ (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
+ ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
+ ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
+ ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -339,9 +400,12 @@ static int bnxt_get_num_stats(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
- if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
num_stats += bp->fw_rx_stats_ext_size +
bp->fw_tx_stats_ext_size;
+ if (bp->pri2cos_valid)
+ num_stats += BNXT_NUM_STATS_PRI;
+ }
return num_stats;
}
@@ -369,8 +433,10 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
- if (!bp->bnapi)
- return;
+ if (!bp->bnapi) {
+ j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS;
+ goto skip_ring_stats;
+ }
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
bnxt_sw_func_stats[i].counter = 0;
@@ -395,6 +461,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
buf[j] = bnxt_sw_func_stats[i].counter;
+skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS) {
__le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
@@ -415,6 +482,32 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = le64_to_cpu(*(tx_port_stats_ext +
bnxt_tx_port_stats_ext_arr[i].offset));
}
+ if (bp->pri2cos_valid) {
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_rx_bytes_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ }
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_rx_pkts_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
+ }
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_tx_bytes_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ }
+ for (i = 0; i < 8; i++, j++) {
+ long n = bnxt_tx_pkts_pri_arr[i].base_off +
+ bp->pri2cos[i];
+
+ buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
+ }
+ }
}
}
@@ -493,6 +586,28 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
bnxt_tx_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
+ if (bp->pri2cos_valid) {
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_rx_bytes_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_rx_pkts_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_tx_bytes_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < 8; i++) {
+ strcpy(buf,
+ bnxt_tx_pkts_pri_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
}
break;
case ETH_SS_TEST:
@@ -663,8 +778,6 @@ static int bnxt_set_channels(struct net_device *dev,
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
- bp->num_stat_ctxs = bp->cp_nr_rings;
-
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
if (netif_running(dev)) {
@@ -1526,14 +1639,22 @@ static int bnxt_flash_nvram(struct net_device *dev,
rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle);
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_info(dev,
+ "PF does not have admin privileges to flash the device\n");
+ rc = -EACCES;
+ } else if (rc) {
+ rc = -EIO;
+ }
return rc;
}
static int bnxt_firmware_reset(struct net_device *dev,
u16 dir_type)
{
- struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
@@ -1573,7 +1694,15 @@ static int bnxt_firmware_reset(struct net_device *dev,
return -EINVAL;
}
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_info(dev,
+ "PF does not have admin privileges to reset the device\n");
+ rc = -EACCES;
+ } else if (rc) {
+ rc = -EIO;
+ }
+ return rc;
}
static int bnxt_flash_firmware(struct net_device *dev,
@@ -1780,9 +1909,9 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_nvm_install_update_input install = {0};
const struct firmware *fw;
+ int rc, hwrm_err = 0;
u32 item_len;
u16 index;
- int rc;
bnxt_hwrm_fw_set_time(bp);
@@ -1825,15 +1954,16 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
memcpy(kmem, fw->data, fw->size);
modify.host_src_addr = cpu_to_le64(dma_handle);
- rc = hwrm_send_message(bp, &modify, sizeof(modify),
- FLASH_PACKAGE_TIMEOUT);
+ hwrm_err = hwrm_send_message(bp, &modify,
+ sizeof(modify),
+ FLASH_PACKAGE_TIMEOUT);
dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
dma_handle);
}
}
release_firmware(fw);
- if (rc)
- return rc;
+ if (rc || hwrm_err)
+ goto err_exit;
if ((install_type & 0xffff) == 0)
install_type >>= 16;
@@ -1841,12 +1971,10 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
install.install_type = cpu_to_le32(install_type);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc) {
- rc = -EOPNOTSUPP;
+ hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (hwrm_err)
goto flash_pkg_exit;
- }
if (resp->error_code) {
u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
@@ -1854,12 +1982,11 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
install.flags |= cpu_to_le16(
NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
- rc = _hwrm_send_message(bp, &install, sizeof(install),
- INSTALL_PACKAGE_TIMEOUT);
- if (rc) {
- rc = -EOPNOTSUPP;
+ hwrm_err = _hwrm_send_message(bp, &install,
+ sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+ if (hwrm_err)
goto flash_pkg_exit;
- }
}
}
@@ -1870,6 +1997,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
}
flash_pkg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
+err_exit:
+ if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) {
+ netdev_info(dev,
+ "PF does not have admin privileges to flash the device\n");
+ rc = -EACCES;
+ } else if (hwrm_err) {
+ rc = -EOPNOTSUPP;
+ }
return rc;
}
@@ -2450,17 +2585,37 @@ static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds)
+{
+ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_phy_qcaps_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode);
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_disable_an_for_lpbk(struct bnxt *bp,
struct hwrm_port_phy_cfg_input *req)
{
struct bnxt_link_info *link_info = &bp->link_info;
- u16 fw_advertising = link_info->advertising;
+ u16 fw_advertising;
u16 fw_speed;
int rc;
if (!link_info->autoneg)
return 0;
+ rc = bnxt_query_force_speeds(bp, &fw_advertising);
+ if (rc)
+ return rc;
+
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
if (netif_carrier_ok(bp->dev))
fw_speed = bp->link_info.link_speed;
@@ -2572,6 +2727,7 @@ static int bnxt_poll_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static int bnxt_run_loopback(struct bnxt *bp)
{
struct bnxt_tx_ring_info *txr = &bp->tx_ring[0];
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
struct bnxt_cp_ring_info *cpr;
int pkt_size, i = 0;
struct sk_buff *skb;
@@ -2579,7 +2735,9 @@ static int bnxt_run_loopback(struct bnxt *bp)
u8 *data;
int rc;
- cpr = &txr->bnapi->cp_ring;
+ cpr = &rxr->bnapi->cp_ring;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ cpr = cpr->cp_ring_arr[BNXT_RX_HDL];
pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_copy_thresh);
skb = netdev_alloc_skb(bp->dev, pkt_size);
if (!skb)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 5dd086059568..f1aaac8e6268 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -194,6 +194,8 @@ struct cmd_nums {
#define HWRM_STAT_CTX_QUERY 0xb2UL
#define HWRM_STAT_CTX_CLR_STATS 0xb3UL
#define HWRM_PORT_QSTATS_EXT 0xb4UL
+ #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL
+ #define HWRM_PORT_PHY_MDIO_READ 0xb6UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -213,6 +215,7 @@ struct cmd_nums {
#define HWRM_WOL_FILTER_FREE 0xf1UL
#define HWRM_WOL_FILTER_QCFG 0xf2UL
#define HWRM_WOL_REASON_QCFG 0xf3UL
+ #define HWRM_CFA_METER_QCAPS 0xf4UL
#define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
#define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
#define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
@@ -239,6 +242,24 @@ struct cmd_nums {
#define HWRM_FW_IPC_MSG 0x110UL
#define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
#define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL
+ #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL
+ #define HWRM_CFA_FLOW_AGING_CFG 0x114UL
+ #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL
+ #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL
+ #define HWRM_CFA_CTX_MEM_RGTR 0x117UL
+ #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL
+ #define HWRM_CFA_CTX_MEM_QCTX 0x119UL
+ #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL
+ #define HWRM_CFA_COUNTER_QCAPS 0x11bUL
+ #define HWRM_CFA_COUNTER_CFG 0x11cUL
+ #define HWRM_CFA_COUNTER_QCFG 0x11dUL
+ #define HWRM_CFA_COUNTER_QSTATS 0x11eUL
+ #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL
+ #define HWRM_CFA_EEM_QCAPS 0x120UL
+ #define HWRM_CFA_EEM_CFG 0x121UL
+ #define HWRM_CFA_EEM_QCFG 0x122UL
+ #define HWRM_CFA_EEM_OP 0x123UL
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -335,6 +356,8 @@ struct ret_codes {
#define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
#define HWRM_ERR_CODE_NO_BUFFER 0x8UL
#define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
@@ -363,8 +386,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 3
-#define HWRM_VERSION_STR "1.10.0.3"
+#define HWRM_VERSION_RSVD 33
+#define HWRM_VERSION_STR "1.10.0.33"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -411,6 +434,10 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL
#define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL
#define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -465,14 +492,27 @@ struct hwrm_ver_get_output {
/* eject_cmpl (size:128b/16B) */
struct eject_cmpl {
__le16 type;
- #define EJECT_CMPL_TYPE_MASK 0x3fUL
- #define EJECT_CMPL_TYPE_SFT 0
- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
- #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_TYPE_MASK 0x3fUL
+ #define EJECT_CMPL_TYPE_SFT 0
+ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
+ #define EJECT_CMPL_FLAGS_MASK 0xffc0UL
+ #define EJECT_CMPL_FLAGS_SFT 6
+ #define EJECT_CMPL_FLAGS_ERROR 0x40UL
__le16 len;
__le32 opaque;
- __le32 v;
- #define EJECT_CMPL_V 0x1UL
+ __le16 v;
+ #define EJECT_CMPL_V 0x1UL
+ #define EJECT_CMPL_ERRORS_MASK 0xfffeUL
+ #define EJECT_CMPL_ERRORS_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH
+ __le16 reserved16;
__le32 unused_2;
};
@@ -552,6 +592,10 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
@@ -647,6 +691,39 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
+/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */
+struct hwrm_async_event_cmpl_reset_notify {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
+};
+
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -672,6 +749,74 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
};
+/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */
+struct hwrm_async_event_cmpl_hw_flow_aged {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31)
+ #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_req {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
+/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */
+struct hwrm_async_event_cmpl_eem_cache_flush_done {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -867,6 +1012,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL
#define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL
#define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL
+ #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
+ #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -902,7 +1049,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:640b/80B) */
+/* hwrm_func_qcfg_output (size:704b/88B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -919,6 +1066,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL
+ #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1000,7 +1148,11 @@ struct hwrm_func_qcfg_output {
__le16 alloc_sp_tx_rings;
__le16 alloc_stat_ctx;
__le16 alloc_msix;
- u8 unused_2[5];
+ __le16 registered_vfs;
+ u8 unused_1[3];
+ u8 always_1;
+ __le32 reset_addr_poll;
+ u8 unused_2[3];
u8 valid;
};
@@ -1031,6 +1183,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
#define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
#define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL
+ #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1235,6 +1388,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
#define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
#define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1888,7 +2042,8 @@ struct hwrm_func_drv_if_change_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
u8 unused_0[3];
u8 valid;
};
@@ -2864,6 +3019,60 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_port_phy_mdio_write_input (size:320b/40B) */
+struct hwrm_port_phy_mdio_write_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ __le16 reg_data;
+ u8 cl45_mdio;
+ u8 unused_1[7];
+};
+
+/* hwrm_port_phy_mdio_write_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_write_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_port_phy_mdio_read_input (size:256b/32B) */
+struct hwrm_port_phy_mdio_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[2];
+ __le16 port_id;
+ u8 phy_addr;
+ u8 dev_addr;
+ __le16 reg_addr;
+ u8 cl45_mdio;
+ u8 unused_1;
+};
+
+/* hwrm_port_phy_mdio_read_output (size:128b/16B) */
+struct hwrm_port_phy_mdio_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 reg_data;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_port_led_cfg_input (size:512b/64B) */
struct hwrm_port_led_cfg_input {
__le16 req_type;
@@ -4869,6 +5078,10 @@ struct hwrm_ring_grp_free_output {
u8 unused_0[7];
u8 valid;
};
+#define DEFAULT_FLOW_ID 0xFFFFFFFFUL
+#define ROCEV1_FLOW_ID 0xFFFFFFFEUL
+#define ROCEV2_FLOW_ID 0xFFFFFFFDUL
+#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL
/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
struct hwrm_cfa_l2_filter_alloc_input {
@@ -4937,20 +5150,21 @@ struct hwrm_cfa_l2_filter_alloc_input {
u8 unused_3;
__le32 src_id;
u8 tunnel_type;
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
__le16 dst_id;
__le16 mirror_vnic_id;
@@ -5108,20 +5322,21 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
u8 l3_addr_type;
u8 t_l3_addr_type;
u8 tunnel_type;
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
@@ -5326,20 +5541,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
@@ -5459,20 +5675,21 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
__be32 tunnel_id;
u8 tunnel_type;
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
__le16 unused_1;
u8 src_macaddr[6];
@@ -5559,20 +5776,23 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL
#define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5597,20 +5817,21 @@ struct hwrm_cfa_flow_alloc_input {
__be16 l2_rewrite_smac[3];
u8 ip_proto;
u8 tunnel_type;
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
- #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
};
/* hwrm_cfa_flow_alloc_output (size:256b/32B) */
@@ -5623,7 +5844,8 @@ struct hwrm_cfa_flow_alloc_output {
u8 unused_0[2];
__le32 flow_id;
__le64 ext_flow_handle;
- u8 unused_1[7];
+ __le32 flow_counter_id;
+ u8 unused_1[3];
u8 valid;
};
@@ -5651,6 +5873,46 @@ struct hwrm_cfa_flow_free_output {
u8 valid;
};
+/* hwrm_cfa_flow_info_input (size:256b/32B) */
+struct hwrm_cfa_flow_info_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL
+ #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL
+ u8 unused_0[6];
+ __le64 ext_flow_handle;
+};
+
+/* hwrm_cfa_flow_info_output (size:448b/56B) */
+struct hwrm_cfa_flow_info_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ u8 profile;
+ __le16 src_fid;
+ __le16 dst_fid;
+ __le16 l2_ctxt_id;
+ __le64 em_info;
+ __le64 tcam_info;
+ __le64 vfp_tcam_info;
+ __le16 ar_id;
+ __le16 flow_handle;
+ __le32 tunnel_handle;
+ __le16 flow_timer;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_cfa_flow_stats_input (size:640b/80B) */
struct hwrm_cfa_flow_stats_input {
__le16 req_type;
@@ -5757,6 +6019,128 @@ struct hwrm_cfa_vfr_free_output {
u8 valid;
};
+/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */
+struct hwrm_cfa_eem_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+ __le32 supported;
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL
+ #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL
+ __le32 max_entries_supported;
+ __le16 key_entry_size;
+ __le16 record_entry_size;
+ __le16 efc_entry_size;
+ u8 unused_1;
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_cfg_input (size:320b/40B) */
+struct hwrm_cfa_eem_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 unused_0;
+ __le32 num_entries;
+ __le32 unused_1;
+ __le16 key0_ctx_id;
+ __le16 key1_ctx_id;
+ __le16 record_ctx_id;
+ __le16 efc_ctx_id;
+};
+
+/* hwrm_cfa_eem_cfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */
+struct hwrm_cfa_eem_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL
+ __le32 unused_0;
+};
+
+/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */
+struct hwrm_cfa_eem_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
+ __le32 num_entries;
+};
+
+/* hwrm_cfa_eem_op_input (size:192b/24B) */
+struct hwrm_cfa_eem_op_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL
+ __le16 unused_0;
+ __le16 op;
+ #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL
+ #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL
+ #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL
+ #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL
+ #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP
+};
+
+/* hwrm_cfa_eem_op_output (size:128b/16B) */
+struct hwrm_cfa_eem_op_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
@@ -5765,12 +6149,13 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0[7];
};
@@ -5794,12 +6179,13 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0;
__be16 tunnel_dst_port_val;
u8 unused_1[4];
@@ -5824,12 +6210,13 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6
u8 unused_0;
__le16 tunnel_dst_port_id;
u8 unused_1[4];
@@ -6040,7 +6427,9 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL
#define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE
u8 host_idx;
- u8 unused_0[5];
+ u8 flags;
+ #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL
+ u8 unused_0[4];
};
/* hwrm_fw_reset_output (size:128b/16B) */
@@ -6137,6 +6526,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 3962f6fd543c..d80f5c981d90 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -448,16 +448,22 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
struct bnxt_pf_info *pf = &bp->pf;
int i, rc = 0, min = 1;
+ u16 vf_msix = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
- vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
- vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp);
+ vf_ring_grps = 0;
+ } else {
+ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
+ }
+ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp);
+ vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
else
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
- vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
@@ -476,7 +482,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.min_l2_ctxs = cpu_to_le16(min);
req.min_vnics = cpu_to_le16(min);
req.min_stat_ctx = cpu_to_le16(min);
- req.min_hw_ring_grps = cpu_to_le16(min);
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+ req.min_hw_ring_grps = cpu_to_le16(min);
} else {
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
@@ -500,6 +507,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
req.max_vnics = cpu_to_le16(vf_vnics);
req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ req.max_msix = cpu_to_le16(vf_msix / num_vfs);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) {
@@ -525,6 +534,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
hw_resc->max_rsscos_ctxs -= pf->active_vfs;
hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ hw_resc->max_irqs -= vf_msix * n;
rc = pf->active_vfs;
}
@@ -539,19 +550,16 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- u16 vf_ring_grps, max_stat_ctxs;
struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
+ u16 vf_ring_grps;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
- max_stat_ctxs = hw_resc->max_stat_ctxs;
-
/* Remaining rings are distributed equally amongs VF's for now */
- vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
- bp->cp_nr_rings) / num_vfs;
- vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs;
+ vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
num_vfs;
@@ -644,8 +652,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
- avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
- avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = bnxt_get_avail_cp_rings_for_en(bp);
+ avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp);
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 749f63beddd8..c683b5e96b1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
}
-static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
+static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node)
{
struct hwrm_cfa_flow_free_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
- req.flow_handle = flow_handle;
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ req.ext_flow_handle = flow_node->ext_flow_handle;
+ else
+ req.flow_handle = flow_node->flow_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
- __func__, flow_handle, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
if (rc)
rc = -EIO;
@@ -418,13 +421,14 @@ static bool bits_set(void *key, int len)
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle,
- __le32 tunnel_handle, __le16 *flow_handle)
+ __le32 tunnel_handle,
+ struct bnxt_tc_flow_node *flow_node)
{
- struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
struct hwrm_cfa_flow_alloc_input req = { 0 };
+ struct hwrm_cfa_flow_alloc_output *resp;
u16 flow_flags = 0, action_flags = 0;
int rc;
@@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
- *flow_handle = resp->flow_handle;
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ /* CFA_FLOW_ALLOC response interpretation:
+ * fw with fw with
+ * 16-bit 64-bit
+ * flow handle flow handle
+ * =========== ===========
+ * flow_handle flow handle flow context id
+ * ext_flow_handle INVALID flow handle
+ * flow_id INVALID flow counter id
+ */
+ flow_node->flow_handle = resp->flow_handle;
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
+ flow_node->ext_flow_handle = resp->ext_flow_handle;
+ flow_node->flow_id = resp->flow_id;
+ }
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
@@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
__le32 ref_decap_handle,
__le32 *decap_filter_handle)
{
- struct hwrm_cfa_decap_filter_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
+ struct hwrm_cfa_decap_filter_alloc_output *resp;
struct ip_tunnel_key *tun_key = &flow->tun_key;
u32 enables = 0;
int rc;
@@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id;
- else
+ } else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
@@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
struct bnxt_tc_l2_key *l2_info,
__le32 *encap_record_handle)
{
- struct hwrm_cfa_encap_record_alloc_output *resp =
- bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_encap_record_alloc_input req = { 0 };
+ struct hwrm_cfa_encap_record_alloc_output *resp;
struct hwrm_cfa_encap_data_vxlan *encap =
(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
@@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (!rc)
+ if (!rc) {
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id;
- else
+ } else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ }
mutex_unlock(&bp->hwrm_cmd_lock);
if (rc)
@@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
int rc;
/* send HWRM cmd to free the flow-id */
- bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
+ bnxt_hwrm_cfa_flow_free(bp, flow_node);
mutex_lock(&tc_info->lock);
@@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
return 0;
}
+static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ u16 src_fid)
+{
+ flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
+}
+
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
@@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
bnxt_tc_set_src_fid(bp, flow, src_fid);
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ bnxt_tc_set_flow_dir(bp, flow, src_fid);
+
if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC;
goto free_node;
@@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
/* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
- tunnel_handle, &new_node->flow_handle);
+ tunnel_handle, new_node);
if (rc)
goto put_tunnel;
@@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
return 0;
hwrm_flow_free:
- bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
+ bnxt_hwrm_cfa_flow_free(bp, new_node);
put_tunnel:
bnxt_tc_put_tunnel_handle(bp, flow, new_node);
put_l2:
@@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return 0;
}
+static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node,
+ __le16 *flow_handle, __le32 *flow_id)
+{
+ u16 handle;
+
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
+ *flow_id = flow_node->flow_id;
+
+ /* If flow_id is used to fetch flow stats then:
+ * 1. lower 12 bits of flow_handle must be set to all 1s.
+ * 2. 15th bit of flow_handle must specify the flow
+ * direction (TX/RX).
+ */
+ if (flow_node->flow.dir == BNXT_DIR_RX)
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
+ CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
+ else
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
+
+ *flow_handle = cpu_to_le16(handle);
+ } else {
+ *flow_handle = flow_node->flow_handle;
+ }
+}
+
static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[])
{
- struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_flow_stats_input req = { 0 };
+ struct hwrm_cfa_flow_stats_output *resp;
__le16 *req_flow_handles = &req.flow_handle_0;
+ __le32 *req_flow_ids = &req.flow_id_0;
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
@@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
- req_flow_handles[i] = flow_node->flow_handle;
+ bnxt_fill_cfa_stats_req(bp, flow_node,
+ &req_flow_handles[i], &req_flow_ids[i]);
}
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
- __le64 *resp_packets = &resp->packet_0;
- __le64 *resp_bytes = &resp->byte_0;
+ __le64 *resp_packets;
+ __le64 *resp_bytes;
+
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
+ resp_packets = &resp->packet_0;
+ resp_bytes = &resp->byte_0;
for (i = 0; i < num_flows; i++) {
stats_batch[i].hw_stats.packets =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
index 97e09a880693..8a0968967bc5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -98,6 +98,9 @@ struct bnxt_tc_flow {
/* flow applicable to pkts ingressing on this fid */
u16 src_fid;
+ u8 dir;
+#define BNXT_DIR_RX 1
+#define BNXT_DIR_TX 0
struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key;
@@ -170,7 +173,9 @@ struct bnxt_tc_flow_node {
struct bnxt_tc_flow flow;
+ __le64 ext_flow_handle;
__le16 flow_handle;
+ __le32 flow_id;
/* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index b59b382d34f9..ea45a9b8179e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -48,10 +48,8 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->num_stat_ctxs == max_stat_ctxs)
+ bp->cp_nr_rings == max_stat_ctxs)
return -ENOMEM;
- bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
- BNXT_MIN_ROCE_STAT_CTXS);
}
atomic_set(&ulp->ref_count, 0);
@@ -82,14 +80,9 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
return -EINVAL;
}
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
+ if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
- if (ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
- }
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
@@ -168,7 +161,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
+ avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
@@ -218,6 +211,14 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
return 0;
}
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ return BNXT_MIN_ROCE_STAT_CTXS;
+
+ return 0;
+}
+
static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index d9bea37cd211..cd78453d0bf0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -90,6 +90,7 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
int bnxt_get_ulp_msix_num(struct bnxt *bp);
int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index bf6de02be396..0184ef6f05a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -199,7 +199,6 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
bp->tx_nr_rings_xdp = tx_xdp;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
- bp->num_stat_ctxs = bp->cp_nr_rings;
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d83233ae4a15..510dfc1c236b 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
if (realdev) {
dev = cnic_from_netdev(realdev);
if (dev) {
- vid |= VLAN_TAG_PRESENT;
+ vid |= VLAN_CFI_MASK; /* make non-zero */
cnic_rcv_netevent(dev->cnic_priv, event, vid);
cnic_put(dev);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d6f090bf644..983245c0867c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
break;
}
- return 0;
+ return ret;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -3612,36 +3612,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_suspend(struct device *d)
-{
- struct net_device *dev = dev_get_drvdata(d);
- struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
-
- bcmgenet_netif_stop(dev);
-
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
-
- /* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
- ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
- ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
-
- /* Turn off the clocks */
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
@@ -3719,6 +3689,39 @@ out_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
+
+static int bcmgenet_suspend(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmgenet_netif_stop(dev);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+
+ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+ if (device_may_wakeup(d) && priv->wolopts) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+ clk_prepare_enable(priv->clk_wol);
+ } else if (priv->internal_phy) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+ }
+
+ /* Turn off the clocks */
+ clk_disable_unprepare(priv->clk);
+
+ if (ret)
+ bcmgenet_resume(d);
+
+ return ret;
+}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 2fbd027f0148..57582efa362d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,6 +186,8 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already powered up so skip the rest */
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index a6cbaca37e94..aceb9b7b55bd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if (dev->phydev->supported & PHY_1000BT_FEATURES)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ dev->phydev->supported))
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
@@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 432c3b867084..3b1397af81f7 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -66,11 +66,6 @@
#include <uapi/linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
#define BAR_0 0
#define BAR_2 2
@@ -2157,7 +2152,8 @@ static void tg3_phy_start(struct tg3 *tp)
phydev->speed = tp->link_config.speed;
phydev->duplex = tp->link_config.duplex;
phydev->autoneg = tp->link_config.autoneg;
- phydev->advertising = tp->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(
+ phydev->advertising, tp->link_config.advertising);
}
phy_start(phydev);
@@ -4057,8 +4053,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
do_low_power = false;
if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
struct phy_device *phydev;
- u32 phyid, advertising;
+ u32 phyid;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
@@ -4067,25 +4064,33 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->link_config.speed = phydev->speed;
tp->link_config.duplex = phydev->duplex;
tp->link_config.autoneg = phydev->autoneg;
- tp->link_config.advertising = phydev->advertising;
-
- advertising = ADVERTISED_TP |
- ADVERTISED_Pause |
- ADVERTISED_Autoneg |
- ADVERTISED_10baseT_Half;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tp->link_config.advertising,
+ phydev->advertising);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising);
if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
- if (tg3_flag(tp, WOL_SPEED_100MB))
- advertising |=
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full;
- else
- advertising |= ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ }
}
- phydev->advertising = advertising;
-
+ linkmode_copy(phydev->advertising, advertising);
phy_start_aneg(phydev);
phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
@@ -6135,10 +6140,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
}
/* tp->lock must be held */
-static u64 tg3_refclk_read(struct tg3 *tp)
+static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
{
- u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
- return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+ u64 stamp;
+
+ ptp_read_system_prets(sts);
+ stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+
+ return stamp;
}
/* tp->lock must be held */
@@ -6229,13 +6240,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
tg3_full_lock(tp, 0);
- ns = tg3_refclk_read(tp);
+ ns = tg3_refclk_read(tp, sts);
ns += tp->ptp_adjust;
tg3_full_unlock(tp);
@@ -6330,7 +6342,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
- .gettime64 = tg3_ptp_gettime,
+ .gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
@@ -16973,32 +16985,6 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-#ifdef CONFIG_SPARC
-static int tg3_get_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
- struct pci_dev *pdev = tp->pdev;
- struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
- int len;
-
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
- return 0;
- }
- return -ENODEV;
-}
-
-static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
-
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
- return 0;
-}
-#endif
-
static int tg3_get_device_address(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
@@ -17006,10 +16992,8 @@ static int tg3_get_device_address(struct tg3 *tp)
int addr_ok = 0;
int err;
-#ifdef CONFIG_SPARC
- if (!tg3_get_macaddr_sparc(tp))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
return 0;
-#endif
if (tg3_flag(tp, IS_SSB_CORE)) {
err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
@@ -17071,13 +17055,8 @@ static int tg3_get_device_address(struct tg3 *tp)
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
-#ifdef CONFIG_SPARC
- if (!tg3_get_default_macaddr_sparc(tp))
- return 0;
-#endif
+ if (!is_valid_ether_addr(&dev->dev_addr[0]))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1d86b4d5645a..b126926ef7f5 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -61,7 +61,8 @@
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
| MACB_BIT(ISR_RLE) \
| MACB_BIT(TXERR))
-#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
+ | MACB_BIT(TXUBR))
/* Max length of transmit frame must be a multiple of 8 bytes */
#define MACB_TX_LEN_ALIGN 8
@@ -680,6 +681,11 @@ static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_
if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
desc_64 = macb_64b_desc(bp, desc);
desc_64->addrh = upper_32_bits(addr);
+ /* The low bits of RX address contain the RX_USED bit, clearing
+ * of which allows packet RX. Make sure the high bits are also
+ * visible to HW at that point.
+ */
+ dma_wmb();
}
#endif
desc->addr = lower_32_bits(addr);
@@ -928,14 +934,19 @@ static void gem_rx_refill(struct macb_queue *queue)
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
- macb_set_addr(bp, desc, paddr);
desc->ctrl = 0;
+ /* Setting addr clears RX_USED and allows reception,
+ * make sure ctrl is cleared first to avoid a race.
+ */
+ dma_wmb();
+ macb_set_addr(bp, desc, paddr);
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
} else {
- desc->addr &= ~MACB_BIT(RX_USED);
desc->ctrl = 0;
+ dma_wmb();
+ desc->addr &= ~MACB_BIT(RX_USED);
}
}
@@ -989,11 +1000,15 @@ static int gem_rx(struct macb_queue *queue, int budget)
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
addr = macb_get_addr(bp, desc);
- ctrl = desc->ctrl;
if (!rxused)
break;
+ /* Ensure ctrl is at least as up-to-date as rxused */
+ dma_rmb();
+
+ ctrl = desc->ctrl;
+
queue->rx_tail++;
count++;
@@ -1168,11 +1183,14 @@ static int macb_rx(struct macb_queue *queue, int budget)
/* Make hw descriptor updates visible to CPU */
rmb();
- ctrl = desc->ctrl;
-
if (!(desc->addr & MACB_BIT(RX_USED)))
break;
+ /* Ensure ctrl is at least as up-to-date as addr */
+ dma_rmb();
+
+ ctrl = desc->ctrl;
+
if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1)
discard_partial_frame(queue, first_frag, tail);
@@ -1312,6 +1330,21 @@ static void macb_hresp_error_task(unsigned long data)
netif_tx_start_all_queues(dev);
}
+static void macb_tx_restart(struct macb_queue *queue)
+{
+ unsigned int head = queue->tx_head;
+ unsigned int tail = queue->tx_tail;
+ struct macb *bp = queue->bp;
+
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TXUBR));
+
+ if (head == tail)
+ return;
+
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+}
+
static irqreturn_t macb_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -1369,6 +1402,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue);
+ if (status & MACB_BIT(TXUBR))
+ macb_tx_restart(queue);
+
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -4055,7 +4091,7 @@ static int macb_probe(struct platform_device *pdev)
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);
} else {
- err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
+ err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr);
if (err) {
if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index cd5296b84229..a6dc47edc4cf 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -319,6 +319,8 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
desc_ptp = macb_ptp_desc(queue->bp, desc);
tx_timestamp = &queue->tx_timestamps[head];
tx_timestamp->skb = skb;
+ /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
+ dma_rmb();
tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
/* move head */
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 6aeb1045c302..73632b843749 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -277,10 +277,6 @@ static int cavium_ptp_probe(struct pci_dev *pdev,
writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
- if (!clock->ptp_clock) {
- err = -ENODEV;
- goto error_stop;
- }
if (IS_ERR(clock->ptp_clock)) {
err = PTR_ERR(clock->ptp_clock);
goto error_stop;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 4c3925af53bc..abe5d0dac851 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -111,7 +111,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
"mac_tx_one_collision",
"mac_tx_multi_collision",
"mac_tx_max_collision_fail",
- "mac_tx_max_deferal_fail",
+ "mac_tx_max_deferral_fail",
"mac_tx_fifo_err",
"mac_tx_runts",
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index ea9859e028d4..de61060721c4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -349,13 +349,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
struct sk_buff *skb = sc->ctxptr;
struct net_device *ndev = skb->dev;
+ u32 iq_no;
dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
sc->datasize, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
+ iq_no = sc->iq_no;
octeon_free_soft_command(oct, sc);
- if (octnet_iq_is_full(oct, sc->iq_no))
+ if (octnet_iq_is_full(oct, iq_no))
return;
if (netif_queue_stopped(ndev))
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4b3aecf98f2a..5359c1021f42 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Set the mode of the interface, RGMII/MII. */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (netdev->phydev->supported &
- (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+ int rgmii_mode =
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ netdev->phydev->supported) |
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ netdev->phydev->supported)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 55af04fa03a7..6c8dcb65ff03 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
{
struct nicpf *nic = pci_get_drvdata(pdev);
+ if (!nic)
+ return;
+
if (nic->flags & NIC_SRIOV_ENABLED)
pci_disable_sriov(pdev);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index e2cdfa75673f..e8001e974411 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -24,7 +24,8 @@ config CHELSIO_T1
---help---
This driver supports Chelsio gigabit and 10-gigabit
Ethernet cards. More information about adapter features and
- performance tuning is in <file:Documentation/networking/cxgb.txt>.
+ performance tuning is in
+ <file:Documentation/networking/device_drivers/chelsio/cxgb.txt>.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index b16f4b3ef4c5..2d1ca920601e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -404,6 +404,7 @@ struct adapter_params {
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
u8 fw_caps_support; /* 32-bit Port Capabilities */
bool filter2_wr_support; /* FW support for FILTER2_WR */
+ unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */
/* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is
* used by the Port
@@ -592,6 +593,13 @@ struct port_info {
bool ptp_enable;
struct sched_table *sched_tbl;
u32 eth_flags;
+
+ /* viid and smt fields either returned by fw
+ * or decoded by parsing viid by driver.
+ */
+ u8 vin;
+ u8 vivld;
+ u8 smt_idx;
};
struct dentry;
@@ -1757,7 +1765,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
- unsigned int *rss_size);
+ unsigned int *rss_size, u8 *vivld, u8 *vin);
int t4_free_vi(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int viid);
@@ -1783,7 +1791,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, unsigned int naddr,
const u8 **addr, bool sleep_ok);
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int idx, const u8 *addr, bool persist, bool add_smt);
+ int idx, const u8 *addr, bool persist, u8 *smt_idx);
int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
bool ucast, u64 vec, bool sleep_ok);
int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index cab492ec8f59..b0ff9fa183f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -378,19 +378,7 @@ static int cim_qcfg_show(struct seq_file *seq, void *v)
QUEREMFLITS_G(p[2]) * 16);
return 0;
}
-
-static int cim_qcfg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cim_qcfg_show, inode->i_private);
-}
-
-static const struct file_operations cim_qcfg_fops = {
- .owner = THIS_MODULE,
- .open = cim_qcfg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cim_qcfg);
static int cimq_show(struct seq_file *seq, void *v, int idx)
{
@@ -860,8 +848,7 @@ static int tx_rate_show(struct seq_file *seq, void *v)
}
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
+DEFINE_SHOW_ATTRIBUTE(tx_rate);
static int cctrl_tbl_show(struct seq_file *seq, void *v)
{
@@ -893,8 +880,7 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
kfree(incr);
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
+DEFINE_SHOW_ATTRIBUTE(cctrl_tbl);
/* Format a value in a unit that differs from the value's native unit by the
* given factor.
@@ -955,8 +941,7 @@ static int clk_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(clk);
+DEFINE_SHOW_ATTRIBUTE(clk);
/* Firmware Device Log dump. */
static const char * const devlog_level_strings[] = {
@@ -1990,22 +1975,10 @@ static int sensors_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
+DEFINE_SHOW_ATTRIBUTE(sensors);
#if IS_ENABLED(CONFIG_IPV6)
-static int clip_tbl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, clip_tbl_show, inode->i_private);
-}
-
-static const struct file_operations clip_tbl_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = clip_tbl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(clip_tbl);
#endif
/*RSS Table.
@@ -2208,8 +2181,7 @@ static int rss_config_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
+DEFINE_SHOW_ATTRIBUTE(rss_config);
/* RSS Secret Key.
*/
@@ -2628,19 +2600,7 @@ static int resources_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int resources_open(struct inode *inode, struct file *file)
-{
- return single_open(file, resources_show, inode->i_private);
-}
-
-static const struct file_operations resources_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = resources_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SHOW_ATTRIBUTE(resources);
/**
* ethqset2pinfo - return port_info of an Ethernet Queue Set
@@ -3233,8 +3193,7 @@ static int tid_info_show(struct seq_file *seq, void *v)
t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
+DEFINE_SHOW_ATTRIBUTE(tid_info);
static void add_debugfs_mem(struct adapter *adap, const char *name,
unsigned int idx, unsigned int size_mb)
@@ -3364,21 +3323,9 @@ static int meminfo_show(struct seq_file *seq, void *v)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(meminfo);
-static int meminfo_open(struct inode *inode, struct file *file)
-{
- return single_open(file, meminfo_show, inode->i_private);
-}
-
-static const struct file_operations meminfo_fops = {
- .owner = THIS_MODULE,
- .open = meminfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int chcr_show(struct seq_file *seq, void *v)
+static int chcr_stats_show(struct seq_file *seq, void *v)
{
struct adapter *adap = seq->private;
@@ -3399,20 +3346,7 @@ static int chcr_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.ipsec_cnt));
return 0;
}
-
-
-static int chcr_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, chcr_show, inode->i_private);
-}
-
-static const struct file_operations chcr_stats_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = chcr_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(chcr_stats);
#define PRINT_ADAP_STATS(string, value) \
seq_printf(seq, "%-25s %-20llu\n", (string), \
@@ -3573,8 +3507,7 @@ static int tp_stats_show(struct seq_file *seq, void *v)
return 0;
}
-
-DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats);
+DEFINE_SHOW_ATTRIBUTE(tp_stats);
/* Add an array of Debug FS files.
*/
@@ -3603,7 +3536,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "cim_pif_la", &cim_pif_la_fops, 0400, 0 },
{ "cim_ma_la", &cim_ma_la_fops, 0400, 0 },
{ "cim_qcfg", &cim_qcfg_fops, 0400, 0 },
- { "clk", &clk_debugfs_fops, 0400, 0 },
+ { "clk", &clk_fops, 0400, 0 },
{ "devlog", &devlog_fops, 0400, 0 },
{ "mboxlog", &mboxlog_fops, 0400, 0 },
{ "mbox0", &mbox_debugfs_fops, 0600, 0 },
@@ -3621,11 +3554,11 @@ int t4_setup_debugfs(struct adapter *adap)
{ "l2t", &t4_l2t_fops, 0400, 0},
{ "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 },
{ "rss", &rss_debugfs_fops, 0400, 0 },
- { "rss_config", &rss_config_debugfs_fops, 0400, 0 },
+ { "rss_config", &rss_config_fops, 0400, 0 },
{ "rss_key", &rss_key_debugfs_fops, 0400, 0 },
{ "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 },
{ "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 },
- { "resources", &resources_debugfs_fops, 0400, 0 },
+ { "resources", &resources_fops, 0400, 0 },
#ifdef CONFIG_CHELSIO_T4_DCB
{ "dcb_info", &dcb_info_debugfs_fops, 0400, 0 },
#endif
@@ -3644,18 +3577,18 @@ int t4_setup_debugfs(struct adapter *adap)
{ "obq_ncsi", &cim_obq_fops, 0400, 5 },
{ "tp_la", &tp_la_fops, 0400, 0 },
{ "ulprx_la", &ulprx_la_fops, 0400, 0 },
- { "sensors", &sensors_debugfs_fops, 0400, 0 },
+ { "sensors", &sensors_fops, 0400, 0 },
{ "pm_stats", &pm_stats_debugfs_fops, 0400, 0 },
- { "tx_rate", &tx_rate_debugfs_fops, 0400, 0 },
- { "cctrl", &cctrl_tbl_debugfs_fops, 0400, 0 },
+ { "tx_rate", &tx_rate_fops, 0400, 0 },
+ { "cctrl", &cctrl_tbl_fops, 0400, 0 },
#if IS_ENABLED(CONFIG_IPV6)
- { "clip_tbl", &clip_tbl_debugfs_fops, 0400, 0 },
+ { "clip_tbl", &clip_tbl_fops, 0400, 0 },
#endif
- { "tids", &tid_info_debugfs_fops, 0400, 0},
+ { "tids", &tid_info_fops, 0400, 0},
{ "blocked_fl", &blocked_fl_fops, 0600, 0 },
{ "meminfo", &meminfo_fops, 0400, 0 },
- { "crypto", &chcr_stats_debugfs_fops, 0400, 0 },
- { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 },
+ { "crypto", &chcr_stats_fops, 0400, 0 },
+ { "tp_stats", &tp_stats_fops, 0400, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
index 23f43a0f8950..ba95e13d52da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h
@@ -37,19 +37,6 @@
#include <linux/export.h>
-#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \
-static int name##_open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, name##_show, inode->i_private); \
-} \
-static const struct file_operations name##_debugfs_fops = { \
- .owner = THIS_MODULE, \
- .open = name##_open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release \
-}
-
struct t4_debugfs_entry {
const char *name;
const struct file_operations *ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d49db46254cd..6ba9099ca7fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -453,7 +453,7 @@ static int link_start(struct net_device *dev)
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
- true);
+ &pi->smt_idx);
if (ret >= 0) {
pi->xact_addr_filt = ret;
ret = 0;
@@ -1585,28 +1585,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
/**
- * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
- * @chip: chip type
- * @viid: VI id of the given port
- *
- * Return the SMT index for this VI.
- */
-unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
-{
- /* In T4/T5, SMT contains 256 SMAC entries organized in
- * 128 rows of 2 entries each.
- * In T6, SMT contains 256 SMAC entries in 256 rows.
- * TODO: The below code needs to be updated when we add support
- * for 256 VFs.
- */
- if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
- return ((viid & 0x7f) << 1);
- else
- return (viid & 0x7f);
-}
-EXPORT_SYMBOL(cxgb4_tp_smt_idx);
-
-/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
@@ -2280,8 +2258,6 @@ static int cxgb_up(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
#endif
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adap->mac_hlist);
return err;
irq_err:
@@ -2303,6 +2279,7 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
adapter->flags &= ~FULL_INIT_DONE;
}
@@ -2669,7 +2646,7 @@ static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
vf < nvfs; vf++) {
- macaddr[5] = adap->pf * 16 + vf;
+ macaddr[5] = adap->pf * nvfs + vf;
ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
@@ -2863,7 +2840,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return -EADDRNOTAVAIL;
ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
- pi->xact_addr_filt, addr->sa_data, true, true);
+ pi->xact_addr_filt, addr->sa_data, true,
+ &pi->smt_idx);
if (ret < 0)
return ret;
@@ -4467,6 +4445,15 @@ static int adap_init0(struct adapter *adap)
adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
}
+ /* Check if FW supports returning vin and smt index.
+ * If this is not supported, driver will interpret
+ * these values from viid.
+ */
+ params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
+
/*
* Get device capabilities so we can determine what resources we need
* to manage.
@@ -4777,14 +4764,26 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
for_each_port(adap, i) {
- struct port_info *p = adap2pinfo(adap, i);
+ struct port_info *pi = adap2pinfo(adap, i);
+ u8 vivld = 0, vin = 0;
- ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
- NULL, NULL);
+ ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
+ NULL, NULL, &vivld, &vin);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
- p->viid = ret;
- p->xact_addr_filt = -1;
+ pi->viid = ret;
+ pi->xact_addr_filt = -1;
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adap->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+ pi->vin = FW_VIID_VIN_G(pi->viid);
+ }
}
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
@@ -5621,6 +5620,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(is_t5(adapter->params.chip) ? STATMODE_V(0) :
T6_STATMODE_V(0)));
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
for_each_port(adapter, i) {
netdev = alloc_etherdev_mq(sizeof(struct port_info),
MAX_ETH_QSETS);
@@ -5899,6 +5901,7 @@ fw_attach_fail:
static void remove_one(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
if (!adapter) {
pci_release_regions(pdev);
@@ -5948,6 +5951,12 @@ static void remove_one(struct pci_dev *pdev)
if (adapter->num_uld || adapter->num_ofld_uld)
t4_uld_mem_free(adapter);
free_some_resources(adapter);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 99022c0898b5..4852febbfec3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -495,14 +495,11 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
- u32 viid = cxgb4_port_viid(dev);
- u32 vf = FW_VIID_VIN_G(viid);
- u32 pf = FW_VIID_PFN_G(viid);
- u32 vld = FW_VIID_VIVLD_G(viid);
-
- ntuple |= (u64)(FT_VNID_ID_VF_V(vf) |
- FT_VNID_ID_PF_V(pf) |
- FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift;
+ struct port_info *pi = (struct port_info *)netdev_priv(dev);
+
+ ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) |
+ FT_VNID_ID_PF_V(adap->pf) |
+ FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift;
}
return ntuple;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cb523949c812..e8c34292a0ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -5880,7 +5880,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
{
int i, ofst = idx * 4;
u32 data_reg, mask_reg, cfg;
- u32 multitrc = TRCMULTIFILTER_F;
if (!enable) {
t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
@@ -5900,7 +5899,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
* maximum packet capture size of 9600 bytes is recommended.
* Also in this mode, only trace0 can be enabled and running.
*/
- multitrc = 0;
if (tp->snap_len > 9600 || idx)
return -EINVAL;
}
@@ -7141,21 +7139,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size)
{
unsigned int page_shift = fls(page_size) - 1;
- unsigned int sge_hps = page_shift - 10;
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
- HOSTPAGESIZEPF0_V(sge_hps) |
- HOSTPAGESIZEPF1_V(sge_hps) |
- HOSTPAGESIZEPF2_V(sge_hps) |
- HOSTPAGESIZEPF3_V(sge_hps) |
- HOSTPAGESIZEPF4_V(sge_hps) |
- HOSTPAGESIZEPF5_V(sge_hps) |
- HOSTPAGESIZEPF6_V(sge_hps) |
- HOSTPAGESIZEPF7_V(sge_hps));
-
if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
@@ -7488,7 +7475,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
*/
int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
- unsigned int *rss_size)
+ unsigned int *rss_size, u8 *vivld, u8 *vin)
{
int ret;
struct fw_vi_cmd c;
@@ -7523,6 +7510,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
}
if (rss_size)
*rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
+
+ if (vivld)
+ *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
+
+ if (vin)
+ *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
+
return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
}
@@ -7980,7 +7974,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
* MAC value.
*/
int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
- int idx, const u8 *addr, bool persist, bool add_smt)
+ int idx, const u8 *addr, bool persist, u8 *smt_idx)
{
int ret, mode;
struct fw_vi_mac_cmd c;
@@ -7989,7 +7983,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
- mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+ mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
memset(&c, 0, sizeof(c));
c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
@@ -8006,6 +8000,23 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
ret = -ENOMEM;
+ if (smt_idx) {
+ if (adap->params.viid_smt_extn_support) {
+ *smt_idx = FW_VI_MAC_CMD_SMTID_G
+ (be32_to_cpu(c.op_to_viid));
+ } else {
+ /* In T4/T5, SMT contains 256 SMAC entries
+ * organized in 128 rows of 2 entries each.
+ * In T6, SMT contains 256 SMAC entries in
+ * 256 rows.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
+ CHELSIO_T5)
+ *smt_idx = (viid & FW_VIID_VIN_M) << 1;
+ else
+ *smt_idx = (viid & FW_VIID_VIN_M);
+ }
+ }
}
return ret;
}
@@ -8593,7 +8604,7 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
{
unsigned int fw_caps = pi->adapter->params.fw_caps_support;
struct fw_port_cmd port_cmd;
- unsigned int action, link_ok, speed, mtu;
+ unsigned int action, link_ok, mtu;
fw_port_cap32_t linkattr;
int ret;
@@ -8627,7 +8638,6 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
mtu = FW_PORT_CMD_MTU32_G(
be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
}
- speed = fwcap_to_speed(linkattr);
*link_okp = link_ok;
*speedp = fwcap_to_speed(linkattr);
@@ -9374,6 +9384,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
enum fw_port_type port_type;
int mdio_addr;
fw_port_cap32_t pcaps, acaps;
+ u8 vivld = 0, vin = 0;
int ret;
/* If we haven't yet determined whether we're talking to Firmware
@@ -9428,7 +9439,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
acaps = be32_to_cpu(cmd.u.info32.acaps32);
}
- ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+ ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
+ &vivld, &vin);
if (ret < 0)
return ret;
@@ -9437,6 +9449,18 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
pi->lport = port;
pi->rss_size = rss_size;
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adapter->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = FW_VIID_VIVLD_G(pi->viid);
+ pi->vin = FW_VIID_VIN_G(pi->viid);
+ }
+
pi->port_type = port_type;
pi->mdio_addr = mdio_addr;
pi->mod_type = FW_PORT_MOD_TYPE_NA;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f152da1ce046..c62a0c830705 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1453,6 +1453,9 @@ struct cpl_tx_data {
#define T6_TX_FORCE_V(x) ((x) << T6_TX_FORCE_S)
#define T6_TX_FORCE_F T6_TX_FORCE_V(1U)
+#define TX_URG_S 16
+#define TX_URG_V(x) ((x) << TX_URG_S)
+
#define TX_SHOVE_S 14
#define TX_SHOVE_V(x) ((x) << TX_SHOVE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60df66f4d21c..bf7325f6d553 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 57584ab32043..1d9b3e1e5f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1253,6 +1253,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20,
FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21,
FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24,
+ FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
};
/*
@@ -2109,6 +2110,19 @@ struct fw_vi_cmd {
#define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S)
#define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U)
+#define FW_VI_CMD_VFVLD_S 24
+#define FW_VI_CMD_VFVLD_M 0x1
+#define FW_VI_CMD_VFVLD_V(x) ((x) << FW_VI_CMD_VFVLD_S)
+#define FW_VI_CMD_VFVLD_G(x) \
+ (((x) >> FW_VI_CMD_VFVLD_S) & FW_VI_CMD_VFVLD_M)
+#define FW_VI_CMD_VFVLD_F FW_VI_CMD_VFVLD_V(1U)
+
+#define FW_VI_CMD_VIN_S 16
+#define FW_VI_CMD_VIN_M 0xff
+#define FW_VI_CMD_VIN_V(x) ((x) << FW_VI_CMD_VIN_S)
+#define FW_VI_CMD_VIN_G(x) \
+ (((x) >> FW_VI_CMD_VIN_S) & FW_VI_CMD_VIN_M)
+
#define FW_VI_CMD_VIID_S 0
#define FW_VI_CMD_VIID_M 0xfff
#define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S)
@@ -2182,6 +2196,12 @@ struct fw_vi_mac_cmd {
} u;
};
+#define FW_VI_MAC_CMD_SMTID_S 12
+#define FW_VI_MAC_CMD_SMTID_M 0xff
+#define FW_VI_MAC_CMD_SMTID_V(x) ((x) << FW_VI_MAC_CMD_SMTID_S)
+#define FW_VI_MAC_CMD_SMTID_G(x) \
+ (((x) >> FW_VI_MAC_CMD_SMTID_S) & FW_VI_MAC_CMD_SMTID_M)
+
#define FW_VI_MAC_CMD_VIID_S 0
#define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ff84791a0ff8..2fab87e86561 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -722,6 +722,7 @@ static int adapter_up(struct adapter *adapter)
if (adapter->flags & USING_MSIX)
name_msix_vecs(adapter);
+
adapter->flags |= FULL_INIT_DONE;
}
@@ -747,8 +748,6 @@ static int adapter_up(struct adapter *adapter)
enable_rx(adapter);
t4vf_sge_start(adapter);
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adapter->mac_hlist);
return 0;
}
@@ -2324,19 +2323,7 @@ static int resources_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int resources_open(struct inode *inode, struct file *file)
-{
- return single_open(file, resources_show, inode->i_private);
-}
-
-static const struct file_operations resources_proc_fops = {
- .owner = THIS_MODULE,
- .open = resources_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(resources);
/*
* Show Virtual Interfaces.
@@ -2420,7 +2407,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
{ "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
{ "sge_qstats", 0444, &sge_qstats_proc_fops },
- { "resources", 0444, &resources_proc_fops },
+ { "resources", 0444, &resources_fops },
{ "interfaces", 0444, &interfaces_proc_fops },
};
@@ -3036,6 +3023,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
if (err)
goto err_unmap_bar;
+ /* Initialize hash mac addr list */
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
/*
* Allocate our "adapter ports" and stitch everything together.
*/
@@ -3287,6 +3277,7 @@ err_disable_device:
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
/*
* Tear down driver state associated with device.
@@ -3337,6 +3328,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter->mbox_log);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
kfree(adapter);
}
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index ec0b545197e2..e9a0213b08c4 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -23,7 +23,7 @@ config CS89x0
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
- <file:Documentation/networking/cs89x0.txt>.
+ <file:Documentation/networking/device_drivers/cirrus/cs89x0.txt>.
To compile this driver as a module, choose M here. The module
will be called cs89x0.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f42f7a6e1559..ebd5c2cf1efe 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -241,7 +241,7 @@ static int enic_set_ringparam(struct net_device *netdev,
}
enic_init_vnic_resources(enic);
if (running) {
- err = dev_open(netdev);
+ err = dev_open(netdev, NULL);
if (err)
goto err_out;
}
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 1003201b5d80..264e9b413e94 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -113,7 +113,7 @@ config DE4X5
These include the DE425, DE434, DE435, DE450 and DE500 models. If
you have a network card of this type, say Y. More specific
information is contained in
- <file:Documentation/networking/de4x5.txt>.
+ <file:Documentation/networking/device_drivers/dec/de4x5.txt>.
To compile this driver as a module, choose M here. The module will
be called de4x5.
@@ -137,7 +137,7 @@ config DM9102
This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
Davicom (<http://www.davicom.com.tw/>). If you have such a network
(Ethernet) card, say Y. Some information is contained in the file
- <file:Documentation/networking/dmfe.txt>.
+ <file:Documentation/networking/device_drivers/dec/dmfe.txt>.
To compile this driver as a module, choose M here. The module will
be called dmfe.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index f0536b16b3c3..d8d423f22c4f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1881,7 +1881,7 @@ Compile command:
gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
-Read Documentation/networking/dl2k.txt for details.
+Read Documentation/networking/device_drivers/dlink/dl2k.txt for details.
*/
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c5ad7a4f4d83..852f5bfe5f6d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -796,7 +796,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
u16 vlan_tag;
vlan_tag = skb_vlan_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ vlan_prio = skb_vlan_tag_get_prio(skb);
/* If vlan priority provided by OS is NOT in available bmap */
if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
@@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct be_wrb_params
*wrb_params)
{
+ bool insert_vlan = false;
u16 vlan_tag = 0;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return skb;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ insert_vlan = true;
+ }
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
- if (!vlan_tag)
+ if (!insert_vlan) {
vlan_tag = adapter->pvid;
+ insert_vlan = true;
+ }
/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
* skip VLAN insertion
*/
BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
}
- if (vlan_tag) {
+ if (insert_vlan) {
skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
vlan_tag);
if (unlikely(!skb))
return skb;
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
/* Insert the outer VLAN, if any */
@@ -4950,7 +4955,7 @@ fw_exit:
}
static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 flags)
+ u16 flags, struct netlink_ext_ack *extack)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6e0f47f2c8a3..f53090cde041 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -51,9 +51,9 @@
#include <linux/percpu.h>
#include <linux/dma-mapping.h>
#include <linux/sort.h>
+#include <linux/phy_fixed.h>
#include <soc/fsl/bman.h>
#include <soc/fsl/qman.h>
-
#include "fman.h"
#include "fman_port.h"
#include "mac.h"
@@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
static int dpaa_phy_init(struct net_device *net_dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
@@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev)
}
/* Remove any features not supported by the controller */
- phy_dev->supported &= mac_dev->if_support;
+ ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+
phy_support_asym_pause(phy_dev);
mac_dev->phy_dev = phy_dev;
@@ -2613,6 +2616,7 @@ static const struct net_device_ops dpaa_ops = {
.ndo_stop = dpaa_eth_stop,
.ndo_tx_timeout = dpaa_tx_timeout,
.ndo_get_stats64 = dpaa_get_stats64,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = dpaa_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = dpaa_set_rx_mode,
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 13d6e2272ece..62497119c85f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -529,6 +529,75 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
return 0;
}
+static int dpaa_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ struct qman_portal *portal;
+ u32 period;
+ u8 thresh;
+
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &period);
+ qman_dqrr_get_ithresh(portal, &thresh);
+
+ c->rx_coalesce_usecs = period;
+ c->rx_max_coalesced_frames = thresh;
+ c->use_adaptive_rx_coalesce = false;
+
+ return 0;
+}
+
+static int dpaa_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *c)
+{
+ const cpumask_t *cpus = qman_affine_cpus();
+ bool needs_revert[NR_CPUS] = {false};
+ struct qman_portal *portal;
+ u32 period, prev_period;
+ u8 thresh, prev_thresh;
+ int cpu, res;
+
+ if (c->use_adaptive_rx_coalesce)
+ return -EINVAL;
+
+ period = c->rx_coalesce_usecs;
+ thresh = c->rx_max_coalesced_frames;
+
+ /* save previous values */
+ portal = qman_get_affine_portal(smp_processor_id());
+ qman_portal_get_iperiod(portal, &prev_period);
+ qman_dqrr_get_ithresh(portal, &prev_thresh);
+
+ /* set new values */
+ for_each_cpu(cpu, cpus) {
+ portal = qman_get_affine_portal(cpu);
+ res = qman_portal_set_iperiod(portal, period);
+ if (res)
+ goto revert_values;
+ res = qman_dqrr_set_ithresh(portal, thresh);
+ if (res) {
+ qman_portal_set_iperiod(portal, prev_period);
+ goto revert_values;
+ }
+ needs_revert[cpu] = true;
+ }
+
+ return 0;
+
+revert_values:
+ /* restore previous values */
+ for_each_cpu(cpu, cpus) {
+ if (!needs_revert[cpu])
+ continue;
+ portal = qman_get_affine_portal(cpu);
+ /* previous values will not fail, ignore return value */
+ qman_portal_set_iperiod(portal, prev_period);
+ qman_dqrr_set_ithresh(portal, prev_thresh);
+ }
+
+ return res;
+}
+
const struct ethtool_ops dpaa_ethtool_ops = {
.get_drvinfo = dpaa_get_drvinfo,
.get_msglevel = dpaa_get_msglevel,
@@ -545,4 +614,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
.get_rxnfc = dpaa_get_rxnfc,
.set_rxnfc = dpaa_set_rxnfc,
.get_ts_info = dpaa_get_ts_info,
+ .get_coalesce = dpaa_get_coalesce,
+ .set_coalesce = dpaa_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 88f7acce38dc..1ca9a18139ec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -13,7 +13,8 @@
#include <linux/iommu.h>
#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
-
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <net/sock.h>
#include "dpaa2-eth.h"
@@ -86,7 +87,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
addr = dpaa2_sg_get_addr(&sgt[i]);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
skb_free_frag(sg_vaddr);
if (dpaa2_sg_is_final(&sgt[i]))
@@ -144,7 +145,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
sg_addr = dpaa2_sg_get_addr(sge);
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
sg_length = dpaa2_sg_get_len(sge);
@@ -199,12 +200,148 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
return skb;
}
+/* Free buffers acquired from the buffer pool or which were meant to
+ * be released in the pool
+ */
+static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ void *vaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
+ dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ skb_free_frag(vaddr);
+ }
+}
+
+static void xdp_release_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
+{
+ int err;
+
+ ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
+ if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
+ return;
+
+ while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
+ ch->xdp.drop_bufs,
+ ch->xdp.drop_cnt)) == -EBUSY)
+ cpu_relax();
+
+ if (err) {
+ free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+ ch->buf_count -= ch->xdp.drop_cnt;
+ }
+
+ ch->xdp.drop_cnt = 0;
+}
+
+static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
+{
+ struct dpaa2_eth_fq *fq;
+ struct dpaa2_faead *faead;
+ u32 ctrl, frc;
+ int i, err;
+
+ /* Mark the egress frame hardware annotation area as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
+ dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
+
+ /* Instruct hardware to release the FD buffer directly into
+ * the buffer pool once transmission is completed, instead of
+ * sending a Tx confirmation frame to us
+ */
+ ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
+ faead = dpaa2_get_faead(buf_start, false);
+ faead->ctrl = cpu_to_le32(ctrl);
+ faead->conf_fqid = 0;
+
+ fq = &priv->fq[queue_id];
+ for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
+ err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
+ priv->tx_qdid, 0,
+ fq->tx_qdbin, fd);
+ if (err != -EBUSY)
+ break;
+ }
+
+ return err;
+}
+
+static u32 run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
+{
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct bpf_prog *xdp_prog;
+ struct xdp_buff xdp;
+ u32 xdp_act = XDP_PASS;
+ int err;
+
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+
+ rcu_read_lock();
+
+ xdp_prog = READ_ONCE(ch->xdp.prog);
+ if (!xdp_prog)
+ goto out;
+
+ xdp.data = vaddr + dpaa2_fd_get_offset(fd);
+ xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
+ xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
+ xdp_set_data_meta_invalid(&xdp);
+
+ xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+ /* xdp.data pointer may have changed */
+ dpaa2_fd_set_offset(fd, xdp.data - vaddr);
+ dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
+
+ switch (xdp_act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
+ if (err) {
+ xdp_release_buf(priv, ch, addr);
+ percpu_stats->tx_errors++;
+ ch->stats.xdp_tx_err++;
+ } else {
+ percpu_stats->tx_packets++;
+ percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.xdp_tx++;
+ }
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(xdp_act);
+ /* fall through */
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
+ /* fall through */
+ case XDP_DROP:
+ xdp_release_buf(priv, ch, addr);
+ ch->stats.xdp_drop++;
+ break;
+ }
+
+out:
+ rcu_read_unlock();
+ return xdp_act;
+}
+
/* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id)
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -216,12 +353,14 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_fas *fas;
void *buf_data;
u32 status = 0;
+ u32 xdp_act;
/* Tracing point */
trace_dpaa2_rx_fd(priv->net_dev, fd);
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
- dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
fas = dpaa2_get_fas(vaddr, false);
prefetch(fas);
@@ -232,8 +371,21 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
+ xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ if (xdp_act != XDP_PASS) {
+ percpu_stats->rx_packets++;
+ percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ return;
+ }
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
+ WARN_ON(priv->xdp_prog);
+
+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
skb = build_frag_skb(priv, ch, buf_data);
skb_free_frag(vaddr);
percpu_extras->rx_sg_frames++;
@@ -267,12 +419,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, queue_id);
+ skb_record_rx_queue(skb, fq->flowid);
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(napi, skb);
+ napi_gro_receive(&ch->napi, skb);
return;
@@ -289,7 +441,7 @@ err_frame_format:
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
static int consume_frames(struct dpaa2_eth_channel *ch,
- enum dpaa2_eth_fq_type *type)
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -312,7 +464,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fd = dpaa2_dq_fd(dq);
fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
- fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+ fq->consume(priv, ch, fd, fq);
cleaned++;
} while (!is_last);
@@ -320,13 +472,12 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return 0;
fq->stats.frames += cleaned;
- ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
- * into the store. Return the frame queue type as an out param.
+ * into the store. Return the frame queue as an out param.
*/
- if (type)
- *type = fq->type;
+ if (src)
+ *src = fq;
return cleaned;
}
@@ -571,8 +722,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct dpaa2_eth_fq *fq;
+ struct netdev_queue *nq;
u16 queue_mapping;
unsigned int needed_headroom;
+ u32 fd_len;
int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
@@ -644,8 +797,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, &fd);
} else {
+ fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ percpu_stats->tx_bytes += fd_len;
+
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -661,11 +818,11 @@ err_alloc_headroom:
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
- struct napi_struct *napi __always_unused,
- u16 queue_id __always_unused)
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_len = dpaa2_fd_get_len(fd);
u32 fd_errors;
/* Tracing point */
@@ -673,7 +830,10 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+ percpu_extras->tx_conf_bytes += fd_len;
+
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -735,23 +895,6 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
-/* Free buffers acquired from the buffer pool or which were meant to
- * be released in the pool
- */
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
-{
- struct device *dev = priv->net_dev->dev.parent;
- void *vaddr;
- int i;
-
- for (i = 0; i < count; i++) {
- vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
- dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- skb_free_frag(vaddr);
- }
-}
-
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
@@ -775,7 +918,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
buf = PTR_ALIGN(buf, priv->rx_buf_align);
addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(dev, addr)))
goto err_map;
@@ -934,8 +1077,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_channel *ch;
struct dpaa2_eth_priv *priv;
int rx_cleaned = 0, txconf_cleaned = 0;
- enum dpaa2_eth_fq_type type = 0;
- int store_cleaned;
+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+ struct netdev_queue *nq;
+ int store_cleaned, work_done;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -949,18 +1093,25 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &type);
- if (type == DPAA2_RX_FQ)
+ store_cleaned = consume_frames(ch, &fq);
+ if (!store_cleaned)
+ break;
+ if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
- else
+ } else {
txconf_cleaned += store_cleaned;
+ /* We have a single Tx conf FQ on this channel */
+ txc_fq = fq;
+ }
/* If we either consumed the whole NAPI budget with Rx frames
* or we reached the Tx confirmations threshold, we're done.
*/
if (rx_cleaned >= budget ||
- txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
- return budget;
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ work_done = budget;
+ goto out;
+ }
} while (store_cleaned);
/* We didn't consume the entire budget, so finish napi and
@@ -974,7 +1125,18 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
- return max(rx_cleaned, 1);
+ work_done = max(rx_cleaned, 1);
+
+out:
+ if (txc_fq) {
+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+ txc_fq->dq_bytes);
+ txc_fq->dq_frames = 0;
+ txc_fq->dq_bytes = 0;
+ }
+
+ return work_done;
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1400,6 +1562,174 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return -EINVAL;
}
+static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
+{
+ int mfl, linear_mfl;
+
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
+ dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
+
+ if (mfl > linear_mfl) {
+ netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
+ linear_mfl - VLAN_ETH_HLEN);
+ return false;
+ }
+
+ return true;
+}
+
+static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+{
+ int mfl, err;
+
+ /* We enforce a maximum Rx frame length based on MTU only if we have
+ * an XDP program attached (in order to avoid Rx S/G frames).
+ * Otherwise, we accept all incoming frames as long as they are not
+ * larger than maximum size supported in hardware
+ */
+ if (has_xdp)
+ mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
+ else
+ mfl = DPAA2_ETH_MFL;
+
+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!priv->xdp_prog)
+ goto out;
+
+ if (!xdp_mtu_valid(priv, new_mtu))
+ return -EINVAL;
+
+ err = set_rx_mfl(priv, new_mtu, true);
+ if (err)
+ return err;
+
+out:
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+{
+ struct dpni_buffer_layout buf_layout = {0};
+ int err;
+
+ err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
+ return err;
+ }
+
+ /* Reserve extra headroom for XDP header size changes */
+ buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
+ (has_xdp ? XDP_PACKET_HEADROOM : 0);
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+ err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX, &buf_layout);
+ if (err) {
+ netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_eth_channel *ch;
+ struct bpf_prog *old;
+ bool up, need_update;
+ int i, err;
+
+ if (prog && !xdp_mtu_valid(priv, dev->mtu))
+ return -EINVAL;
+
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->num_channels);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ up = netif_running(dev);
+ need_update = (!!priv->xdp_prog != !!prog);
+
+ if (up)
+ dpaa2_eth_stop(dev);
+
+ /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
+ * Also, when switching between xdp/non-xdp modes we need to reconfigure
+ * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
+ * so we are sure no old format buffers will be used from now on.
+ */
+ if (need_update) {
+ err = set_rx_mfl(priv, dev->mtu, !!prog);
+ if (err)
+ goto out_err;
+ err = update_rx_buffer_headroom(priv, !!prog);
+ if (err)
+ goto out_err;
+ }
+
+ old = xchg(&priv->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ for (i = 0; i < priv->num_channels; i++) {
+ ch = priv->channel[i];
+ old = xchg(&ch->xdp.prog, prog);
+ if (old)
+ bpf_prog_put(old);
+ }
+
+ if (up) {
+ err = dpaa2_eth_open(dev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+
+out_err:
+ if (prog)
+ bpf_prog_sub(prog, priv->num_channels);
+ if (up)
+ dpaa2_eth_open(dev);
+
+ return err;
+}
+
+static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return setup_xdp(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct net_device_ops dpaa2_eth_ops = {
.ndo_open = dpaa2_eth_open,
.ndo_start_xmit = dpaa2_eth_tx,
@@ -1409,6 +1739,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
.ndo_set_features = dpaa2_eth_set_features,
.ndo_do_ioctl = dpaa2_eth_ioctl,
+ .ndo_change_mtu = dpaa2_eth_change_mtu,
+ .ndo_bpf = dpaa2_eth_xdp,
};
static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -1434,8 +1766,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- dev_info(dev, "Not enough DPCONs, will go on as-is\n");
- return NULL;
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return ERR_PTR(err);
}
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
@@ -1493,8 +1828,10 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return NULL;
channel->dpcon = setup_dpcon(priv);
- if (!channel->dpcon)
+ if (IS_ERR_OR_NULL(channel->dpcon)) {
+ err = PTR_ERR(channel->dpcon);
goto err_setup;
+ }
err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
&attr);
@@ -1513,7 +1850,7 @@ err_get_attr:
free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
- return NULL;
+ return ERR_PTR(err);
}
static void free_channel(struct dpaa2_eth_priv *priv,
@@ -1547,10 +1884,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
for_each_online_cpu(i) {
/* Try to allocate a channel */
channel = alloc_channel(priv);
- if (!channel) {
- dev_info(dev,
- "No affine channel for cpu %d and above\n", i);
- err = -ENODEV;
+ if (IS_ERR_OR_NULL(channel)) {
+ err = PTR_ERR(channel);
+ if (err != -EPROBE_DEFER)
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
}
@@ -1597,7 +1935,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
/* Stop if we already have enough channels to accommodate all
* RX and TX conf queues
*/
- if (priv->num_channels == dpaa2_eth_queue_count(priv))
+ if (priv->num_channels == priv->dpni_attrs.num_queues)
break;
}
@@ -1608,9 +1946,12 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
+ if (err == -EPROBE_DEFER)
+ return err;
+
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
- return err;
+ return -ENODEV;
}
dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
@@ -1732,7 +2073,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
if (err) {
- dev_err(dev, "DPBP device allocation failed\n");
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 452a8e9c4f0e..69c965de192b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -139,7 +139,9 @@ struct dpaa2_faead {
};
#define DPAA2_FAEAD_A2V 0x20000000
+#define DPAA2_FAEAD_A4V 0x08000000
#define DPAA2_FAEAD_UPDV 0x00001000
+#define DPAA2_FAEAD_EBDDV 0x00002000
#define DPAA2_FAEAD_UPD 0x00000010
/* Accessors for the hardware annotation fields that we use */
@@ -243,12 +245,14 @@ struct dpaa2_eth_fq_stats {
struct dpaa2_eth_ch_stats {
/* Volatile dequeues retried due to portal busy */
__u64 dequeue_portal_busy;
- /* Number of CDANs; useful to estimate avg NAPI len */
- __u64 cdan;
- /* Number of frames received on queues from this channel */
- __u64 frames;
/* Pull errors */
__u64 pull_err;
+ /* Number of CDANs; useful to estimate avg NAPI len */
+ __u64 cdan;
+ /* XDP counters */
+ __u64 xdp_drop;
+ __u64 xdp_tx;
+ __u64 xdp_tx_err;
};
/* Maximum number of queues associated with a DPNI */
@@ -271,17 +275,24 @@ struct dpaa2_eth_fq {
u32 tx_qdbin;
u16 flowid;
int target_cpu;
+ u32 dq_frames;
+ u32 dq_bytes;
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
void (*consume)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id);
+ struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
};
+struct dpaa2_eth_ch_xdp {
+ struct bpf_prog *prog;
+ u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD];
+ int drop_cnt;
+};
+
struct dpaa2_eth_channel {
struct dpaa2_io_notification_ctx nctx;
struct fsl_mc_device *dpcon;
@@ -293,6 +304,7 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_priv *priv;
int buf_count;
struct dpaa2_eth_ch_stats stats;
+ struct dpaa2_eth_ch_xdp xdp;
};
struct dpaa2_eth_dist_fields {
@@ -352,6 +364,7 @@ struct dpaa2_eth_priv {
u64 rx_hash_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
+ struct bpf_prog *xdp_prog;
};
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
@@ -434,9 +447,10 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
DPAA2_ETH_RX_HWA_SIZE;
}
+/* We have exactly one {Rx, Tx conf} queue per channel */
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
{
- return priv->dpni_attrs.num_queues;
+ return priv->num_channels;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 26bd5a2bd8ed..a7389e722c49 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -45,6 +45,15 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
"[drv] dequeue portal busy",
"[drv] channel pull errors",
"[drv] cdan",
+ "[drv] xdp drop",
+ "[drv] xdp tx",
+ "[drv] xdp tx errors",
+ /* FQ stats */
+ "[qbman] rx pending frames",
+ "[qbman] rx pending bytes",
+ "[qbman] tx conf pending frames",
+ "[qbman] tx conf pending bytes",
+ "[qbman] buffer count",
};
#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
@@ -174,8 +183,10 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
int j, k, err;
int num_cnt;
union dpni_statistics dpni_stats;
- u64 cdan = 0;
- u64 portal_busy = 0, pull_err = 0;
+ u32 fcnt, bcnt;
+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
+ u32 buf_cnt;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_drv_stats *extras;
struct dpaa2_eth_ch_stats *ch_stats;
@@ -212,16 +223,43 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
i += j;
- for (j = 0; j < priv->num_channels; j++) {
- ch_stats = &priv->channel[j]->stats;
- cdan += ch_stats->cdan;
- portal_busy += ch_stats->dequeue_portal_busy;
- pull_err += ch_stats->pull_err;
+ /* Per-channel stats */
+ for (k = 0; k < priv->num_channels; k++) {
+ ch_stats = &priv->channel[k]->stats;
+ for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
+ *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
}
+ i += j;
+
+ for (j = 0; j < priv->num_fqs; j++) {
+ /* Print FQ instantaneous counts */
+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
+ &fcnt, &bcnt);
+ if (err) {
+ netdev_warn(net_dev, "FQ query error %d", err);
+ return;
+ }
- *(data + i++) = portal_busy;
- *(data + i++) = pull_err;
- *(data + i++) = cdan;
+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
+ fcnt_tx_total += fcnt;
+ bcnt_tx_total += bcnt;
+ } else {
+ fcnt_rx_total += fcnt;
+ bcnt_rx_total += bcnt;
+ }
+ }
+
+ *(data + i++) = fcnt_rx_total;
+ *(data + i++) = bcnt_rx_total;
+ *(data + i++) = fcnt_tx_total;
+ *(data + i++) = bcnt_tx_total;
+
+ err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
+ if (err) {
+ netdev_warn(net_dev, "Buffer count query error %d\n", err);
+ return;
+ }
+ *(data + i++) = buf_cnt;
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 84b942b1eccc..9b150db3b510 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
if (err) {
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
goto err_exit;
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index bf80855dd0dd..f79e57f735b3 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -531,7 +531,6 @@ struct fec_enet_private {
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
- int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
struct device_node *phy_node;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 6db69ba30dcd..ae0f88bce9aa 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1714,12 +1714,6 @@ static void fec_enet_adjust_link(struct net_device *ndev)
struct phy_device *phy_dev = ndev->phydev;
int status_change = 0;
- /* Prevent a state halted on mii error */
- if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
- phy_dev->state = PHY_RESUMING;
- return;
- }
-
/*
* If the netdev is down, or is going down, we're not interested
* in link state events, so just mark our idea of the link as down
@@ -1779,7 +1773,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ret < 0)
return ret;
- fep->mii_timeout = 0;
reinit_completion(&fep->mdio_done);
/* start a read op */
@@ -1791,7 +1784,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
time_left = wait_for_completion_timeout(&fep->mdio_done,
usecs_to_jiffies(FEC_MII_TIMEOUT));
if (time_left == 0) {
- fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO read timeout\n");
ret = -ETIMEDOUT;
goto out;
@@ -1820,7 +1812,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
else
ret = 0;
- fep->mii_timeout = 0;
reinit_completion(&fep->mdio_done);
/* start a write op */
@@ -1833,7 +1824,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
time_left = wait_for_completion_timeout(&fep->mdio_done,
usecs_to_jiffies(FEC_MII_TIMEOUT));
if (time_left == 0) {
- fep->mii_timeout = 1;
netdev_err(fep->netdev, "MDIO write timeout\n");
ret = -ETIMEDOUT;
}
@@ -2001,8 +1991,6 @@ static int fec_enet_mii_init(struct platform_device *pdev)
return -ENOENT;
}
- fep->mii_timeout = 0;
-
/*
* Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
*
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index c415ac67cb7b..e80fedb27cee 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
if (!muram_node) {
dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
__func__);
- goto fman_node_put;
+ goto fman_free;
}
err = of_address_to_resource(muram_node, 0,
@@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
of_node_put(muram_node);
dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
__func__, err);
- goto fman_node_put;
+ goto fman_free;
}
of_node_put(muram_node);
- of_node_put(fm_node);
err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
"fman", fman);
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d79e4e009d63..71f4205f14e7 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
*/
/* get local capabilities */
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
/* get link partner capabilities */
rmt_adv = 0;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 82722d05fedb..88a396fd242f 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -473,7 +473,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
- if (strcmp(tbi->type, "tbi-phy") == 0) {
+ if (of_node_is_type(tbi, "tbi-phy")) {
dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
tbi);
break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3c8da1a18ba0..45fcc96be90e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -500,6 +500,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
.ndo_get_stats = gfar_get_stats,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -720,7 +721,7 @@ static int gfar_of_group_count(struct device_node *np)
int num = 0;
for_each_available_child_of_node(np, child)
- if (!of_node_cmp(child->name, "queue-group"))
+ if (of_node_name_eq(child, "queue-group"))
num++;
return num;
@@ -838,7 +839,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
/* Parse and initialize group specific information */
if (priv->mode == MQ_MG_MODE) {
for_each_available_child_of_node(np, child) {
- if (of_node_cmp(child->name, "queue-group"))
+ if (!of_node_name_eq(child, "queue-group"))
continue;
err = gfar_parse_group(child, priv, model);
@@ -1784,14 +1785,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
- uint gigabit_support =
- priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
struct phy_device *phydev;
struct ethtool_eee edata;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1809,8 +1816,8 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- phydev->advertising = phydev->supported;
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -3656,7 +3663,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 0d76e15cd6dd..241325c35cb4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
prio = vlan_tci_prio(rule);
prio_mask = vlan_tci_priom(rule);
- if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
- vlan |= RQFPR_CFI;
- vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT &&
- cfi_mask == VLAN_TAG_PRESENT) {
+ if (cfi_mask) {
+ if (cfi)
+ vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
}
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 32e02700feaa..c3d539e209ed 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -30,6 +30,7 @@
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/phy_fixed.h>
#include <linux/workqueue.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -1742,12 +1743,7 @@ static int init_phy(struct net_device *dev)
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
- phy_set_max_speed(phydev, SPEED_100);
-
- if (priv->max_speed == SPEED_1000)
- phydev->supported |= ADVERTISED_1000baseT_Full;
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, priv->max_speed);
priv->phydev = phydev;
@@ -3681,6 +3677,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
.ndo_stop = ucc_geth_close,
.ndo_start_xmit = ucc_geth_start_xmit,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = ucc_geth_set_mac_addr,
.ndo_set_rx_mode = ucc_geth_set_multi,
.ndo_tx_timeout = ucc_geth_timeout,
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 25152715396b..fee4664c9189 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -118,6 +118,7 @@ config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
default m
depends on 64BIT && PCI
+ depends on INET
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index be268dcde8fa..f9a4e76c5a8b 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -915,10 +915,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
}
ret = register_netdev(ndev);
- if (ret) {
- free_netdev(ndev);
+ if (ret)
goto alloc_fail;
- }
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b52029e26d15..ad1779fc410e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -379,6 +379,9 @@ static void hns_ae_stop(struct hnae_handle *handle)
hns_ae_ring_enable_all(handle, 0);
+ /* clean rx fbd. */
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
(void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index aaf72c055711..1790cdafd9b8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -67,11 +67,14 @@ static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
struct mac_driver *drv = (struct mac_driver *)mac_drv;
/*enable GE rX/tX */
- if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
- if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* enable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+ }
}
static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
@@ -79,11 +82,14 @@ static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
struct mac_driver *drv = (struct mac_driver *)mac_drv;
/*disable GE rX/tX */
- if ((mode == MAC_COMM_MODE_TX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
- if ((mode == MAC_COMM_MODE_RX) || (mode == MAC_COMM_MODE_RX_AND_TX))
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* disable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+ }
}
/* hns_gmac_get_en - get port enable
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 3613e400e816..a97228c93831 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -778,6 +778,17 @@ static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
return rc;
}
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+ if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+ return;
+
+ phy_device_remove(mac_cb->phy_dev);
+ phy_device_free(mac_cb->phy_dev);
+
+ mac_cb->phy_dev = NULL;
+}
+
#define MAC_MEDIA_TYPE_MAX_LEN 16
static const struct {
@@ -1117,7 +1128,11 @@ void hns_mac_uninit(struct dsaf_device *dsaf_dev)
int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
for (i = 0; i < max_port_num; i++) {
+ if (!dsaf_dev->mac_cb[i])
+ continue;
+
dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+ hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
dsaf_dev->mac_cb[i] = NULL;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index e557a4ef5996..3b9e74be5fbd 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -935,6 +935,62 @@ static void hns_dsaf_tcam_mc_cfg(
}
/**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address,
+ * @ptbl_tcam_data,
+ * @ptbl_tcam_mask
+ * @ptbl_tcam_mcast
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
* hns_dsaf_tcam_mc_invld - INT
* @dsaf_id: dsa fabric id
* @address
@@ -1493,6 +1549,27 @@ static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
}
/**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ int i;
+
+ soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+ for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+ /* search all entry from end to start.*/
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ return i;
+ soft_mac_entry--;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
* hns_dsaf_set_mac_key - set mac key
* @dsaf_dev: dsa fabric device struct pointer
* @mac_key: tcam key pointer
@@ -2166,9 +2243,9 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 0x80 * (u64)node_num);
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
/* pfc pause frame statistics stored in dsaf inode*/
if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
@@ -2285,237 +2362,237 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
p[223 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
- p[224 + i] = dsaf_read_dev(ddev,
+ p[226 + i] = dsaf_read_dev(ddev,
DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
}
- p[227] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+ p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
j = i * DSAF_COMM_CHN + port;
- p[228 + i] = dsaf_read_dev(ddev,
+ p[230 + i] = dsaf_read_dev(ddev,
DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
}
- p[231] = dsaf_read_dev(ddev,
- DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 4);
+ p[233] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
/* dsaf inode registers */
for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
j = i * DSAF_COMM_CHN + port;
- p[232 + i] = dsaf_read_dev(ddev,
+ p[234 + i] = dsaf_read_dev(ddev,
DSAF_SBM_CFG_REG_0_REG + j * 0x80);
- p[235 + i] = dsaf_read_dev(ddev,
+ p[237 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
- p[238 + i] = dsaf_read_dev(ddev,
+ p[240 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
- p[241 + i] = dsaf_read_dev(ddev,
+ p[243 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
- p[244 + i] = dsaf_read_dev(ddev,
+ p[246 + i] = dsaf_read_dev(ddev,
DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
- p[245 + i] = dsaf_read_dev(ddev,
+ p[249 + i] = dsaf_read_dev(ddev,
DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
- p[248 + i] = dsaf_read_dev(ddev,
+ p[252 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
- p[251 + i] = dsaf_read_dev(ddev,
+ p[255 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
- p[254 + i] = dsaf_read_dev(ddev,
+ p[258 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
- p[257 + i] = dsaf_read_dev(ddev,
+ p[261 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
- p[260 + i] = dsaf_read_dev(ddev,
+ p[264 + i] = dsaf_read_dev(ddev,
DSAF_SBM_INER_ST_0_REG + j * 0x80);
- p[263 + i] = dsaf_read_dev(ddev,
+ p[267 + i] = dsaf_read_dev(ddev,
DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
- p[266 + i] = dsaf_read_dev(ddev,
+ p[270 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
- p[269 + i] = dsaf_read_dev(ddev,
+ p[273 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
- p[272 + i] = dsaf_read_dev(ddev,
+ p[276 + i] = dsaf_read_dev(ddev,
DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
- p[275 + i] = dsaf_read_dev(ddev,
+ p[279 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
- p[278 + i] = dsaf_read_dev(ddev,
+ p[282 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
- p[281 + i] = dsaf_read_dev(ddev,
+ p[285 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
- p[284 + i] = dsaf_read_dev(ddev,
+ p[288 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
- p[287 + i] = dsaf_read_dev(ddev,
+ p[291 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
- p[290 + i] = dsaf_read_dev(ddev,
+ p[294 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
- p[293 + i] = dsaf_read_dev(ddev,
+ p[297 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
- p[296 + i] = dsaf_read_dev(ddev,
+ p[300 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
- p[299 + i] = dsaf_read_dev(ddev,
+ p[303 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
- p[302 + i] = dsaf_read_dev(ddev,
+ p[306 + i] = dsaf_read_dev(ddev,
DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
- p[305 + i] = dsaf_read_dev(ddev,
+ p[309 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
- p[308 + i] = dsaf_read_dev(ddev,
+ p[312 + i] = dsaf_read_dev(ddev,
DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
}
/* dsaf onode registers */
for (i = 0; i < DSAF_XOD_NUM; i++) {
- p[311 + i] = dsaf_read_dev(ddev,
+ p[315 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
- p[319 + i] = dsaf_read_dev(ddev,
+ p[323 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
- p[327 + i] = dsaf_read_dev(ddev,
+ p[331 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
- p[335 + i] = dsaf_read_dev(ddev,
+ p[339 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
- p[343 + i] = dsaf_read_dev(ddev,
+ p[347 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
- p[351 + i] = dsaf_read_dev(ddev,
+ p[355 + i] = dsaf_read_dev(ddev,
DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
}
- p[359] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
- p[360] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
- p[361] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+ p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+ p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+ p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
j = i * DSAF_COMM_CHN + port;
- p[362 + i] = dsaf_read_dev(ddev,
+ p[366 + i] = dsaf_read_dev(ddev,
DSAF_XOD_GNT_L_0_REG + j * 0x90);
- p[365 + i] = dsaf_read_dev(ddev,
+ p[369 + i] = dsaf_read_dev(ddev,
DSAF_XOD_GNT_H_0_REG + j * 0x90);
- p[368 + i] = dsaf_read_dev(ddev,
+ p[372 + i] = dsaf_read_dev(ddev,
DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
- p[371 + i] = dsaf_read_dev(ddev,
+ p[375 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
- p[374 + i] = dsaf_read_dev(ddev,
+ p[378 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
- p[377 + i] = dsaf_read_dev(ddev,
+ p[381 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
- p[380 + i] = dsaf_read_dev(ddev,
+ p[384 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
- p[383 + i] = dsaf_read_dev(ddev,
+ p[387 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
- p[386 + i] = dsaf_read_dev(ddev,
+ p[390 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
- p[389 + i] = dsaf_read_dev(ddev,
+ p[393 + i] = dsaf_read_dev(ddev,
DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
}
- p[392] = dsaf_read_dev(ddev,
+ p[396] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
- p[393] = dsaf_read_dev(ddev,
+ p[397] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
- p[394] = dsaf_read_dev(ddev,
+ p[398] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
- p[395] = dsaf_read_dev(ddev,
+ p[399] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
- p[396] = dsaf_read_dev(ddev,
+ p[400] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
- p[397] = dsaf_read_dev(ddev,
+ p[401] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
- p[398] = dsaf_read_dev(ddev,
+ p[402] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
- p[399] = dsaf_read_dev(ddev,
+ p[403] = dsaf_read_dev(ddev,
DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
- p[400] = dsaf_read_dev(ddev,
+ p[404] = dsaf_read_dev(ddev,
DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
- p[401] = dsaf_read_dev(ddev,
+ p[405] = dsaf_read_dev(ddev,
DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
- p[402] = dsaf_read_dev(ddev,
+ p[406] = dsaf_read_dev(ddev,
DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
- p[403] = dsaf_read_dev(ddev,
+ p[407] = dsaf_read_dev(ddev,
DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
- p[404] = dsaf_read_dev(ddev,
+ p[408] = dsaf_read_dev(ddev,
DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
/* dsaf voq registers */
for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
j = (i * DSAF_COMM_CHN + port) * 0x90;
- p[405 + i] = dsaf_read_dev(ddev,
+ p[409 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
- p[408 + i] = dsaf_read_dev(ddev,
+ p[412 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
- p[411 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
- p[414 + i] = dsaf_read_dev(ddev,
+ p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+ p[418 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
- p[417 + i] = dsaf_read_dev(ddev,
+ p[421 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
- p[420 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
- p[423 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
- p[426 + i] = dsaf_read_dev(ddev,
+ p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+ p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+ p[430 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
- p[429 + i] = dsaf_read_dev(ddev,
+ p[433 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
- p[432 + i] = dsaf_read_dev(ddev,
+ p[436 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
- p[435 + i] = dsaf_read_dev(ddev,
+ p[439 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
- p[438 + i] = dsaf_read_dev(ddev,
+ p[442 + i] = dsaf_read_dev(ddev,
DSAF_VOQ_BP_ALL_THRD_0_REG + j);
}
/* dsaf tbl registers */
- p[441] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
- p[442] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
- p[443] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
- p[444] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
- p[445] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
- p[446] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
- p[447] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
- p[448] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
- p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
- p[450] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
- p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
- p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
- p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
- p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
- p[455] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
- p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
- p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
- p[459] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
- p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
- p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
- p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
- p[463] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+ p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+ p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+ p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+ p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+ p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+ p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+ p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
j = i * 0x8;
- p[464 + 2 * i] = dsaf_read_dev(ddev,
+ p[468 + 2 * i] = dsaf_read_dev(ddev,
DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
- p[465 + 2 * i] = dsaf_read_dev(ddev,
+ p[469 + 2 * i] = dsaf_read_dev(ddev,
DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
}
- p[480] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
- p[481] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
- p[482] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
- p[483] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
- p[484] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
- p[485] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
- p[486] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
- p[487] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
- p[488] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
- p[489] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
- p[490] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
- p[491] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+ p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+ p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+ p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+ p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+ p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+ p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+ p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+ p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+ p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+ p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+ p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+ p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
/* dsaf other registers */
- p[492] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
- p[493] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
- p[494] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
- p[495] = dsaf_read_dev(ddev,
+ p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+ p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+ p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+ p[499] = dsaf_read_dev(ddev,
DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
- p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
- p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+ p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+ p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
if (!is_ver1)
- p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+ p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
/* mark end of dsaf regs */
- for (i = 499; i < 504; i++)
+ for (i = 503; i < 504; i++)
p[i] = 0xdddddddd;
}
@@ -2673,58 +2750,156 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
-/* Reserve the last TCAM entry for promisc support */
-#define dsaf_promisc_tcam_entry(port) \
- (DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM + (port))
-void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
- u32 port, bool enable)
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
- struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
- u16 entry_index;
- struct dsaf_drv_tbl_tcam_key tbl_tcam_data, tbl_tcam_mask;
- struct dsaf_tbl_tcam_mcast_cfg mac_data = {0};
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+ struct dsaf_drv_mac_single_dest_entry mask_entry;
+ struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct hns_mac_cb *mac_cb;
+ u8 addr[ETH_ALEN] = {0};
+ u8 port_num;
+ u16 mskid;
+
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index != DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* put promisc tcam entry in the end. */
+ /* 1. set promisc unicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable uc promisc failed (port:%#x)\n",
+ port);
+ return;
+ }
+
+ mac_cb = dsaf_dev->mac_cb[port];
+ (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+ tbl_tcam_ucast.tbl_ucast_out_port = port_num;
- if ((AE_IS_VER1(dsaf_dev->dsaf_ver)) || HNS_DSAF_IS_DEBUG(dsaf_dev))
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+ /* update software entry */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. set promisc multicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable mc promisc failed (port:%#x)\n",
+ port);
return;
+ }
+
+ memset(&mask_entry, 0x0, sizeof(mask_entry));
+ memset(&mask_key, 0x0, sizeof(mask_key));
+ memset(&temp_key, 0x0, sizeof(temp_key));
+ mask_entry.addr[0] = 0x01;
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+ port, mask_entry.addr);
+ tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+ tbl_tcam_mcast.tbl_mcast_old_en = 0;
- /* find the tcam entry index for promisc */
- entry_index = dsaf_promisc_tcam_entry(port);
-
- memset(&tbl_tcam_data, 0, sizeof(tbl_tcam_data));
- memset(&tbl_tcam_mask, 0, sizeof(tbl_tcam_mask));
-
- /* config key mask */
- if (enable) {
- dsaf_set_field(tbl_tcam_data.low.bits.port_vlan,
- DSAF_TBL_TCAM_KEY_PORT_M,
- DSAF_TBL_TCAM_KEY_PORT_S, port);
- dsaf_set_field(tbl_tcam_mask.low.bits.port_vlan,
- DSAF_TBL_TCAM_KEY_PORT_M,
- DSAF_TBL_TCAM_KEY_PORT_S, 0xf);
-
- /* SUB_QID */
- dsaf_set_bit(mac_data.tbl_mcast_port_msk[0],
- DSAF_SERVICE_NW_NUM, true);
- mac_data.tbl_mcast_item_vld = true; /* item_vld bit */
+ if (port < DSAF_SERVICE_NW_NUM) {
+ mskid = port;
+ } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
} else {
- mac_data.tbl_mcast_item_vld = false; /* item_vld bit */
+ dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port,
+ mask_key.high.val, mask_key.low.val);
+ return;
}
- dev_dbg(dsaf_dev->dev,
- "set_promisc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
- dsaf_dev->ae_dev.name, tbl_tcam_data.high.val,
- tbl_tcam_data.low.val, entry_index);
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ memcpy(&temp_key, &mask_key, sizeof(mask_key));
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ (struct dsaf_tbl_tcam_data *)(&mask_key),
+ &tbl_tcam_mcast);
+
+ /* update software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+ soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
- /* config promisc entry with mask */
- hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
- (struct dsaf_tbl_tcam_data *)&tbl_tcam_data,
- (struct dsaf_tbl_tcam_data *)&tbl_tcam_mask,
- &mac_data);
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ u8 addr[ETH_ALEN] = {0};
- /* config software entry */
+ /* 1. delete uc vague tcam entry. */
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask, &tbl_tcam_ucast);
+ /* update soft management table. */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. delete mc vague tcam entry. */
+ addr[0] = 0x01;
+ memset(&mac_key, 0x0, sizeof(mac_key));
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config mc vague table */
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ &tbl_tcam_mask, &tbl_tcam_mcast);
+ /* update soft management table. */
soft_mac_entry += entry_index;
- soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
+/* Reserve the last TCAM entry for promisc support */
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable)
+{
+ if (enable)
+ set_promisc_tcam_enable(dsaf_dev, port);
+ else
+ set_promisc_tcam_disable(dsaf_dev, port);
}
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 74d935d82cbc..b9733b0b8482 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -176,7 +176,7 @@
#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
-#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
@@ -404,11 +404,11 @@
#define RCB_ECC_ERR_ADDR4_REG 0x460
#define RCB_ECC_ERR_ADDR5_REG 0x464
-#define RCB_COM_SF_CFG_INTMASK_RING 0x480
-#define RCB_COM_SF_CFG_RING_STS 0x484
-#define RCB_COM_SF_CFG_RING 0x488
-#define RCB_COM_SF_CFG_INTMASK_BD 0x48C
-#define RCB_COM_SF_CFG_BD_RINT_STS 0x470
+#define RCB_COM_SF_CFG_INTMASK_RING 0x470
+#define RCB_COM_SF_CFG_RING_STS 0x474
+#define RCB_COM_SF_CFG_RING 0x478
+#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
#define RCB_COM_RCB_RD_BD_BUSY 0x490
#define RCB_COM_RCB_FBD_CRT_EN 0x494
#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
@@ -534,6 +534,7 @@
#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
#define GMAC_LOOP_REG 0x01DCUL
#define GMAC_RECV_CONTROL_REG 0x01E0UL
+#define GMAC_PCS_RX_EN_REG 0x01E4UL
#define GMAC_VLAN_CODE_REG 0x01E8UL
#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e907831b0e..5748d3f722f6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1180,12 +1181,16 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
- phy_dev->supported &= h->if_support;
- phy_dev->advertising = phy_dev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
+ if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
+ phy_stop(phy_dev);
+
return 0;
}
@@ -1281,6 +1286,22 @@ static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
return cpu;
}
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < q_num * 2; i++) {
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ priv->ring_data[i].ring->irq_init_flag =
+ RCB_IRQ_NOT_INITED;
+ }
+ }
+}
+
static int hns_nic_init_irq(struct hns_nic_priv *priv)
{
struct hnae_handle *h = priv->ae_handle;
@@ -1306,7 +1327,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
if (ret) {
netdev_err(priv->netdev, "request irq(%d) fail\n",
rd->ring->irq);
- return ret;
+ goto out_free_irq;
}
disable_irq(rd->ring->irq);
@@ -1321,6 +1342,10 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
}
return 0;
+
+out_free_irq:
+ hns_nic_free_irq(h->q_num, priv);
+ return ret;
}
static int hns_nic_net_up(struct net_device *ndev)
@@ -1330,6 +1355,9 @@ static int hns_nic_net_up(struct net_device *ndev)
int i, j;
int ret;
+ if (!test_bit(NIC_STATE_DOWN, &priv->state))
+ return 0;
+
ret = hns_nic_init_irq(priv);
if (ret != 0) {
netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
@@ -1365,6 +1393,7 @@ out_has_some_queues:
for (j = i - 1; j >= 0; j--)
hns_nic_ring_close(ndev, j);
+ hns_nic_free_irq(h->q_num, priv);
set_bit(NIC_STATE_DOWN, &priv->state);
return ret;
@@ -1482,11 +1511,19 @@ static int hns_nic_net_stop(struct net_device *ndev)
}
static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
static void hns_nic_net_timeout(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- hns_tx_timeout_reset(priv);
+ if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+ ndev->watchdog_timeo *= 2;
+ netdev_info(ndev, "watchdog_timo changed to %d.\n",
+ ndev->watchdog_timeo);
+ } else {
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ hns_tx_timeout_reset(priv);
+ }
}
static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -2049,11 +2086,11 @@ static void hns_nic_service_task(struct work_struct *work)
= container_of(work, struct hns_nic_priv, service_task);
struct hnae_handle *h = priv->ae_handle;
+ hns_nic_reset_subtask(priv);
hns_nic_update_link_status(priv->netdev);
h->dev->ops->update_led_status(h);
hns_nic_update_stats(priv->netdev);
- hns_nic_reset_subtask(priv);
hns_nic_service_event_complete(priv);
}
@@ -2339,7 +2376,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
ndev->min_mtu = MAC_MIN_MTU;
switch (priv->enet_ver) {
case AE_VERSION_2:
- ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 774beda040a1..8e9b95871d30 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -624,7 +624,7 @@ static void hns_nic_self_test(struct net_device *ndev,
clear_bit(NIC_STATE_TESTING, &priv->state);
if (if_running)
- (void)dev_open(ndev);
+ (void)dev_open(ndev, NULL);
}
/* Online tests aren't run; pass by default */
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index 002534f12b66..d01bf536eb86 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/
obj-$(CONFIG_HNS3) += hnae3.o
obj-$(CONFIG_HNS3_ENET) += hns3.o
-hns3-objs = hns3_enet.o hns3_ethtool.o
+hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 038326cfda93..691d12174902 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -36,6 +36,10 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
+ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
};
/* below are per-VF mac-vlan subcodes */
@@ -85,6 +89,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 055b40606dbc..36eab37d8a40 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
+#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+#define hnae3_dev_gro_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -124,14 +128,23 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
+ HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
};
+enum hnae3_flr_state {
+ HNAE3_FLR_DOWN,
+ HNAE3_FLR_DONE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -162,6 +175,7 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
+ enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -197,6 +211,10 @@ struct hnae3_ae_dev {
* Enable the hardware
* stop()
* Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
* get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok
@@ -292,17 +310,22 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
-
+ void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
@@ -403,6 +426,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -429,7 +454,14 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
- pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
+ int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
+ pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, int enable);
+ u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
+ void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
};
struct hnae3_dcb_ops {
@@ -488,6 +520,14 @@ struct hnae3_roce_private_info {
void __iomem *roce_io_base;
int base_vector;
int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
};
struct hnae3_unic_private_info {
@@ -520,9 +560,6 @@ struct hnae3_handle {
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
- unsigned long last_reset_time;
- enum hnae3_reset_type reset_level;
-
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
@@ -533,6 +570,7 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index ea5f8a84070d..b6fabbbdfd5b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
new file mode 100644
index 000000000000..0de543faa5b1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+#define HNS3_DBG_READ_LEN 256
+
+static struct dentry *hns3_dbgfs_root;
+
+static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ struct hns3_enet_ring *ring;
+ u32 base_add_l, base_add_h;
+ u32 queue_num, queue_max;
+ u32 value, i = 0;
+ int cnt;
+
+ if (!priv->ring_data) {
+ dev_err(&h->pdev->dev, "ring_data is NULL\n");
+ return -EFAULT;
+ }
+
+ queue_max = h->kinfo.num_tqps;
+ cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
+ if (cnt)
+ queue_num = 0;
+ else
+ queue_max = queue_num + 1;
+
+ dev_info(&h->pdev->dev, "queue info\n");
+
+ if (queue_num >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev,
+ "Queue number(%u) is out of range(%u)\n", queue_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ ring_data = priv->ring_data;
+ for (i = queue_num; i < queue_max; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+
+ ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring;
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_LEN_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
+
+ ring = ring_data[i].ring;
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TC_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i,
+ value);
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_queue_map(struct hnae3_handle *h)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ int i;
+
+ if (!h->ae_algo->ops->get_global_queue_id)
+ return -EOPNOTSUPP;
+
+ dev_info(&h->pdev->dev, "map info for queue id and vector id\n");
+ dev_info(&h->pdev->dev,
+ "local queue id | global queue id | vector id\n");
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ u16 global_qid;
+
+ global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
+ ring_data = &priv->ring_data[i];
+ if (!ring_data || !ring_data->ring ||
+ !ring_data->ring->tqp_vector)
+ continue;
+
+ dev_info(&h->pdev->dev,
+ " %4d %4d %4d\n",
+ i, global_qid,
+ ring_data->ring->tqp_vector->vector_irq);
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_nic_ring_data *ring_data;
+ struct hns3_desc *rx_desc, *tx_desc;
+ struct device *dev = &h->pdev->dev;
+ struct hns3_enet_ring *ring;
+ u32 tx_index, rx_index;
+ u32 q_num, value;
+ int cnt;
+
+ cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
+ if (cnt == 2) {
+ rx_index = tx_index;
+ } else if (cnt != 1) {
+ dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt);
+ return -EINVAL;
+ }
+
+ if (q_num >= h->kinfo.num_tqps) {
+ dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ ring_data = priv->ring_data;
+ ring = ring_data[q_num].ring;
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ tx_index = (cnt == 1) ? value : tx_index;
+
+ if (tx_index >= ring->desc_num) {
+ dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index,
+ ring->desc_num - 1);
+ return -EINVAL;
+ }
+
+ tx_desc = &ring->desc[tx_index];
+ dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
+ dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr);
+ dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag);
+ dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size);
+ dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
+ dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
+ dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
+ dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
+ dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag);
+ dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv);
+ dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
+ dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
+ dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
+ dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
+ dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen);
+ dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
+ dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
+
+ ring = ring_data[q_num + h->kinfo.num_tqps].ring;
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
+ rx_index = (cnt == 1) ? value : tx_index;
+ rx_desc = &ring->desc[rx_index];
+
+ dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
+ dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr);
+ dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len);
+ dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size);
+ dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash);
+ dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id);
+ dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag);
+ dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb);
+ dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag);
+ dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info);
+
+ return 0;
+}
+
+static void hns3_dbg_help(struct hnae3_handle *h)
+{
+#define HNS3_DBG_BUF_LEN 256
+
+ char printf_buf[HNS3_DBG_BUF_LEN];
+
+ dev_info(&h->pdev->dev, "available commands\n");
+ dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "queue map\n");
+ dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+ dev_info(&h->pdev->dev, "dump fd tcam\n");
+ dev_info(&h->pdev->dev, "dump tc\n");
+ dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
+ dev_info(&h->pdev->dev, "dump tm\n");
+ dev_info(&h->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&h->pdev->dev, "dump qos pri map\n");
+ dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+ dev_info(&h->pdev->dev, "dump mng tbl\n");
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+ HNS3_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ " [igu egu <prt_id>] [rpu <tc_queue_num>]",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ " [rtc] [ppp] [rcb] [tqp <q_num>]]\n",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ dev_info(&h->pdev->dev, "%s", printf_buf);
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]",
+ HNS3_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ dev_info(&h->pdev->dev, "%s", printf_buf);
+}
+
+static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int uncopy_bytes;
+ char *buf;
+ int len;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count < HNS3_DBG_READ_LEN)
+ return -ENOSPC;
+
+ buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+ uncopy_bytes = copy_to_user(buffer, buf, len);
+
+ kfree(buf);
+
+ if (uncopy_bytes)
+ return -EFAULT;
+
+ return (*ppos = len);
+}
+
+static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *cmd_buf_tmp;
+ int uncopied_bytes;
+ int ret = 0;
+
+ if (*ppos != 0)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+
+ uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
+ if (uncopied_bytes) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+
+ if (ret)
+ hns3_dbg_help(handle);
+
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+
+ return count;
+}
+
+static const struct file_operations hns3_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_cmd_read,
+ .write = hns3_dbg_cmd_write,
+};
+
+void hns3_dbg_init(struct hnae3_handle *handle)
+{
+ const char *name = pci_name(handle->pdev);
+ struct dentry *pfile;
+
+ handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
+ if (!handle->hnae3_dbgfs)
+ return;
+
+ pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
+ if (!pfile) {
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+ dev_warn(&handle->pdev->dev, "create file for %s fail\n",
+ name);
+ }
+}
+
+void hns3_dbg_uninit(struct hnae3_handle *handle)
+{
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+}
+
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+ if (!hns3_dbgfs_root) {
+ pr_warn("Register debugfs for %s fail\n", debugfs_dir_name);
+ return;
+ }
+}
+
+void hns3_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hns3_dbgfs_root);
+ hns3_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 20fcf0d1c2ce..d3b9aaf96c1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
@@ -239,7 +240,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
- tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
}
@@ -312,6 +312,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
return min_t(u16, rss_size, max_rss_size);
}
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -334,6 +352,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret)
@@ -344,6 +366,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
return 0;
out_start_err:
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
@@ -359,6 +384,9 @@ static int hns3_nic_net_open(struct net_device *netdev)
struct hnae3_knic_private_info *kinfo;
int i, ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -378,23 +406,27 @@ static int hns3_nic_net_open(struct net_device *netdev)
kinfo->prio_tc[i]);
}
- priv->ae_handle->last_reset_time = jiffies;
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
+
return 0;
}
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
- if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
- return;
-
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -408,6 +440,15 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
+
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1312,6 +1353,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ if (features & NETIF_F_GRO_HW)
+ ret = h->ae_algo->ops->set_gro_en(h, true);
+ else
+ ret = h->ae_algo->ops->set_gro_en(h, false);
+ if (ret)
+ return ret;
+ }
+
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1530,18 +1580,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- bool if_running = netif_running(netdev);
int ret;
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
- /* if this was called with netdev up then bring netdevice down */
- if (if_running) {
- (void)hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1549,10 +1592,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
else
netdev->mtu = new_mtu;
- /* if the netdev was running earlier, bring it up again */
- if (if_running && hns3_nic_net_open(netdev))
- ret = -EINVAL;
-
return ret;
}
@@ -1615,10 +1654,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
priv->tx_timeout_count++;
- if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
- return;
-
- /* request the reset */
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
if (h->ae_algo->ops->reset_event)
h->ae_algo->ops->reset_event(h->pdev, h);
}
@@ -1682,8 +1720,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
- if (pdev->revision >= 0x21)
+ if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+ }
}
/* hns3_probe - Device initialization routine
@@ -1795,8 +1835,8 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NONE;
}
- if (ae_dev->ops->process_hw_error)
- ret = ae_dev->ops->process_hw_error(ae_dev);
+ if (ae_dev->ops->handle_hw_ras_error)
+ ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
else
return PCI_ERS_RESULT_NONE;
@@ -1819,9 +1859,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+ ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+ ae_dev->ops->flr_done(ae_dev);
+}
+
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
@@ -1875,7 +1935,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2253,6 +2315,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ /* We MUST enable hardware checksum before enabling hardware GRO */
+ if (skb_shinfo(skb)->gso_size) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
@@ -2296,6 +2364,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
@@ -2329,12 +2400,166 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+ unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG 1
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ netdev_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ struct sk_buff **out_skb, bool pending)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *pre_desc;
+ u32 bd_base_info;
+ int pre_bd;
+
+ /* if there is pending bd, the SW param next_to_clean has moved
+ * to next and the next is NULL
+ */
+ if (pending) {
+ pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+ ring->desc_num;
+ pre_desc = &ring->desc[pre_bd];
+ bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+ } else {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ }
+
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+ HNS3_RX_HEAD_SIZE);
+ if (unlikely(!new_skb)) {
+ netdev_err(ring->tqp->handle->kinfo.netdev,
+ "alloc rx skb frag fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ ring->pending_buf++;
+ }
+
+ return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
+{
+ u16 gro_count;
+ u32 l3_type;
+
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count)
+ return;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ if (skb_shinfo(skb)->gso_size)
+ tcp_gro_complete(skb);
+}
+
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
- struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
+ struct hns3_desc *desc;
+ int last_bd;
+
+ /* When driver handle the rss type, ring->next_to_clean indicates the
+ * first descriptor of next packet, need -1 here.
+ */
+ last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
+ desc = &ring->desc[last_bd];
if (le32_to_cpu(desc->rx.rss_hash))
rss_type = handle->kinfo.rss_type;
@@ -2345,18 +2570,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb, int *out_bnum)
+ struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
- struct sk_buff *skb;
- unsigned char *va;
u32 bd_base_info;
- int pull_len;
u32 l234info;
int length;
- int bnum;
+ int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2368,9 +2591,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
- return -EFAULT;
+ return -ENXIO;
- va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+ if (!skb)
+ ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2379,62 +2603,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(va);
+ prefetch(ring->va);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(ring->va + L1_CACHE_BYTES);
#endif
- skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
- if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- return -ENOMEM;
- }
-
- prefetchw(skb->data);
-
- bnum = 1;
- if (length <= HNS3_RX_HEAD_SIZE) {
- memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+ if (!skb) {
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ *out_skb = skb = ring->skb;
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
- desc_cb->reuse_flag = 1;
- else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (ret > 0) { /* need add frag */
+ ret = hns3_add_frag(ring, desc, &skb, false);
+ if (ret)
+ return ret;
- ring_ptr_move_fw(ring, next_to_clean);
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
} else {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
- memcpy(__skb_put(skb, pull_len), va,
- ALIGN(pull_len, sizeof(long)));
-
- hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ ret = hns3_add_frag(ring, desc, &skb, true);
+ if (ret)
+ return ret;
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
- desc = &ring->desc[ring->next_to_clean];
- desc_cb = &ring->desc_cb[ring->next_to_clean];
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
- bnum++;
- }
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
}
- *out_bnum = bnum;
-
l234info = le32_to_cpu(desc->rx.l234_info);
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2484,7 +2688,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
hns3_rx_checksum(ring, skb, desc);
+ *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
@@ -2497,9 +2705,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = NULL;
- int num, bnum = 0;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+ int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2513,24 +2721,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
- unused_count = hns3_desc_unused(ring);
+ unused_count = hns3_desc_unused(ring) -
+ ring->pending_buf;
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb, &bnum);
+ err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
- recv_bds += bnum;
- clean_count += bnum;
- if (unlikely(err)) { /* Do jump the err */
- recv_pkts++;
+ if (err == -ENXIO) { /* Do not get FE for the packet */
+ goto out;
+ } else if (unlikely(err)) { /* Do jump the err */
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
recv_pkts++;
}
@@ -2644,10 +2860,10 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
bool rx_update, tx_update;
- if (tqp_vector->int_adapt_down > 0) {
- tqp_vector->int_adapt_down--;
+ /* update param every 1000ms */
+ if (time_before(jiffies,
+ tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
return;
- }
if (rx_group->coal.gl_adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group);
@@ -2664,11 +2880,11 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
}
tqp_vector->last_jiffies = jiffies;
- tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
@@ -2677,6 +2893,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2701,9 +2922,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- napi_complete(napi);
- hns3_update_new_int_gl(tqp_vector);
- hns3_mask_vector_irq(tqp_vector, 1);
+ if (napi_complete(napi) &&
+ likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ hns3_update_new_int_gl(tqp_vector);
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
return rx_pkt_total;
}
@@ -2783,9 +3006,10 @@ err_free_chain:
cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
- devm_kfree(&pdev->dev, chain);
+ devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
+ head->next = NULL;
return -ENOMEM;
}
@@ -2876,7 +3100,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain);
if (ret)
- return ret;
+ goto map_ring_fail;
ret = h->ae_algo->ops->map_ring_to_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
@@ -2901,6 +3125,8 @@ map_ring_fail:
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
+#define HNS3_VECTOR_PF_MAX_NUM 64
+
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_vector_info *vector;
@@ -2913,6 +3139,8 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
/* RSS size, cpu online and vector_num should be the same */
/* Should consider 2p/4p later */
vector_num = min_t(u16, num_online_cpus(), tqp_num);
+ vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
+
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
GFP_KERNEL);
if (!vector)
@@ -2970,12 +3198,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
- (void)irq_set_affinity_hint(
- priv->tqp_vector[i].vector_irq,
- NULL);
- free_irq(priv->tqp_vector[i].vector_irq,
- &priv->tqp_vector[i]);
+ if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
+ irq_set_affinity_notifier(tqp_vector->vector_irq,
+ NULL);
+ irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
+ free_irq(tqp_vector->vector_irq, tqp_vector);
+ tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
@@ -3319,6 +3547,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3337,7 +3581,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
handle->kinfo.netdev = netdev;
@@ -3357,11 +3600,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- if (handle->flags & HNAE3_SUPPORT_VF)
- handle->reset_level = HNAE3_VF_RESET;
- else
- handle->reset_level = HNAE3_FUNC_RESET;
-
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3392,10 +3630,20 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_reg_netdev_fail;
+ }
+
hns3_dcbnl_setup(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
- netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ hns3_dbg_init(handle);
+
+ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+ netdev->max_mtu = HNS3_MAX_MTU;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -3418,11 +3666,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_client_stop(handle);
+
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
hns3_del_all_fd_rules(netdev, true);
hns3_force_clear_all_rx_ring(handle);
@@ -3441,8 +3696,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_put_ring_config(priv);
+ hns3_dbg_uninit(handle);
+
priv->ring_data = NULL;
+out_netdev_free:
free_netdev(netdev);
}
@@ -3708,8 +3966,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(ndev);
+ hns3_del_all_fd_rules(ndev, false);
+ }
if (!netif_running(ndev))
return 0;
@@ -3720,6 +3992,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
int ret = 0;
if (netif_running(kinfo->netdev)) {
@@ -3729,9 +4002,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
"hns net up fail, ret=%d!\n", ret);
return ret;
}
- handle->last_reset_time = jiffies;
}
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
return ret;
}
@@ -3771,28 +4045,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ return ret;
+
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
- return ret;
+ goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
- if (ret) {
- hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
- }
+ if (ret)
+ goto err_uninit_vector;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3803,18 +4093,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
+ ret = hns3_nic_dealloc_vector_data(priv);
+ if (ret)
+ netdev_err(netdev, "dealloc vector error\n");
+
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(netdev);
- hns3_del_all_fd_rules(netdev, false);
- }
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
@@ -3980,15 +4267,23 @@ static int __init hns3_init_module(void)
INIT_LIST_HEAD(&client.node);
+ hns3_dbg_register_debugfs(hns3_driver_name);
+
ret = hnae3_register_client(&client);
if (ret)
- return ret;
+ goto err_reg_client;
ret = pci_register_driver(&hns3_driver);
if (ret)
- hnae3_unregister_client(&client);
+ goto err_reg_driver;
return ret;
+
+err_reg_driver:
+ hnae3_unregister_client(&client);
+err_reg_client:
+ hns3_dbg_unregister_debugfs();
+ return ret;
}
module_init(hns3_init_module);
@@ -4000,6 +4295,7 @@ static void __exit hns3_exit_module(void)
{
pci_unregister_driver(&hns3_driver);
hnae3_unregister_client(&client);
+ hns3_dbg_unregister_debugfs();
}
module_exit(hns3_exit_module);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3636d088aa3..e55995e93bb0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
- HNS3_NIC_STATE_REINITING,
+ HNS3_NIC_STATE_INITED,
HNS3_NIC_STATE_DOWN,
HNS3_NIC_STATE_DISABLED,
HNS3_NIC_STATE_REMOVING,
@@ -47,7 +47,7 @@ enum hns3_nic_state {
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
-#define HNS3_RING_RX_VM_REG 0x00090
+#define HNS3_RING_EN_REG 0x00090
#define HNS3_RING_T0_BE_RST 0x00094
#define HNS3_RING_COULD_BE_RST 0x00098
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
@@ -76,7 +76,10 @@ enum hns3_nic_state {
#define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_BD_MULTIPLE 8
-#define HNS3_MAX_MTU 9728
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME 9728
+#define HNS3_MAX_MTU \
+ (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -109,6 +112,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +142,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
-#define HNS3_RXD_HDL_S 16
-#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B 31
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -194,6 +200,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_RING_EN_B 0
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -399,11 +407,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
+ int pull_len; /* head length for current packet */
+ u32 frag_num;
+ unsigned char *va; /* first buffer address for current packet */
+
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
};
struct hns_queue;
@@ -460,8 +476,6 @@ enum hns3_link_mode_bits {
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
-#define HNS3_INT_ADAPT_DOWN_START 100
-
struct hns3_enet_coalesce {
u16 int_gl;
u8 gl_adapt_enable;
@@ -496,8 +510,6 @@ struct hns3_enet_tqp_vector {
char name[HNAE3_INT_NAME_LEN];
- /* when 0 should adjust interrupt coalesce parameter */
- u8 int_adapt_down;
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp;
@@ -577,6 +589,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
return ring->next_to_use == ring->next_to_clean;
}
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = READ_ONCE(base);
@@ -586,7 +603,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_FLR_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
+ ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
#define hns3_write_dev(a, reg, value) \
@@ -648,4 +679,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle);
static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
#endif
+void hns3_dbg_init(struct hnae3_handle *handle);
+void hns3_dbg_uninit(struct hnae3_handle *handle);
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
+void hns3_dbg_unregister_debugfs(void);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a4762c2b8ba1..e678b6939da3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
int test_index = 0;
u32 i;
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
@@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return;
+ }
+
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_desc_num, new_desc_num;
int ret;
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@@ -808,7 +821,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
}
if (if_running)
- ret = dev_open(ndev);
+ ret = dev_open(ndev, NULL);
return ret;
}
@@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
@@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
int ret;
int i;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 580e81743681..fffe8c1c45d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -6,6 +6,6 @@
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 690f62ed87dc..8af0cef5609b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4bdd70d..f23042b24c09 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -86,11 +86,24 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_REG_NUM = 0x0040,
HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+ HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
- HCLGE_OPC_QUERY_AN_RESULT = 0x0306,
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
@@ -126,6 +139,16 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
/* Packet buffer allocate commands */
HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
@@ -142,6 +165,7 @@ enum hclge_opcode_type {
HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
HCLGE_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
HCLGE_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -152,6 +176,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
@@ -210,27 +235,34 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* SFP command */
+ HCLGE_OPC_SFP_GET_SPEED = 0x7104,
+
/* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
- HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d,
- HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f,
- HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830,
- HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831,
- HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
HCLGE_COMMON_ECC_INT_CFG = 0x1505,
- HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
- HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804,
- HCLGE_IGU_COMMON_INT_QUERY = 0x1805,
HCLGE_IGU_COMMON_INT_EN = 0x1806,
- HCLGE_IGU_COMMON_INT_CLR = 0x1807,
HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
- HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17,
HCLGE_PPP_CMD0_INT_CMD = 0x2100,
HCLGE_PPP_CMD1_INT_CMD = 0x2101,
- HCLGE_NCSI_INT_QUERY = 0x2400,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
HCLGE_NCSI_INT_EN = 0x2401,
- HCLGE_NCSI_INT_CLR = 0x2402,
};
#define HCLGE_TQP_REG_OFFSET 0x80000
@@ -388,7 +420,9 @@ struct hclge_pf_res_cmd {
#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
- __le32 rsv[3];
+ __le16 tx_buf_size;
+ __le16 dv_buf_size;
+ __le32 rsv[2];
};
#define HCLGE_CFG_OFFSET_S 0
@@ -542,20 +576,6 @@ struct hclge_config_mac_speed_dup_cmd {
u8 rsv[22];
};
-#define HCLGE_QUERY_SPEED_S 3
-#define HCLGE_QUERY_AN_B 0
-#define HCLGE_QUERY_DUPLEX_B 2
-
-#define HCLGE_QUERY_SPEED_M GENMASK(4, 0)
-#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
-#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
-
-struct hclge_query_an_speed_dup_cmd {
- u8 an_syn_dup_speed;
- u8 pause;
- u8 rsv[23];
-};
-
#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
@@ -572,6 +592,11 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
+struct hclge_sfp_speed_cmd {
+ __le32 sfp_speed;
+ u32 rsv[5];
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
@@ -746,6 +771,24 @@ struct hclge_cfg_tx_queue_pointer_cmd {
u8 rsv[14];
};
+#pragma pack(1)
+struct hclge_mac_ethertype_idx_rd_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ u8 mac_add[6];
+ __le16 index;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ __le16 rev0;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rev1[2];
+};
+
+#pragma pack()
+
#define HCLGE_TSO_MSS_MIN_S 0
#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0)
@@ -758,6 +801,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
@@ -792,6 +841,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
+#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e72f724123d7..f6323b2501dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -35,7 +35,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
}
}
- return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+ hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+
+ return 0;
}
static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
@@ -70,25 +72,61 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
return 0;
}
+static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
+ u8 *prio_tc)
+{
+ int i;
+
+ if (num_tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev,
+ "tc num checking failed, %u > tc_max(%u)\n",
+ num_tc, hdev->tc_max);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (prio_tc[i] >= num_tc) {
+ dev_err(&hdev->pdev->dev,
+ "prio_tc[%u] checking failed, %u >= num_tc(%u)\n",
+ i, prio_tc[i], num_tc);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ if (num_tc > hdev->vport[i].alloc_tqps) {
+ dev_err(&hdev->pdev->dev,
+ "allocated tqp(%u) checking failed, %u > tqp(%u)\n",
+ i, num_tc, hdev->vport[i].alloc_tqps);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
u8 *tc, bool *changed)
{
bool has_ets_tc = false;
u32 total_ets_bw = 0;
u8 max_tc = 0;
+ int ret;
u8 i;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (ets->prio_tc[i] >= hdev->tc_max ||
- i >= hdev->tc_max)
- return -EINVAL;
-
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
*changed = true;
if (ets->prio_tc[i] > max_tc)
max_tc = ets->prio_tc[i];
+ }
+ ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
@@ -184,9 +222,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
return ret;
- ret = hclge_tm_schd_info_update(hdev, num_tc);
- if (ret)
- return ret;
+ hclge_tm_schd_info_update(hdev, num_tc);
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -305,20 +341,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
return -EINVAL;
- if (tc > hdev->tc_max) {
- dev_err(&hdev->pdev->dev,
- "setup tc failed, tc(%u) > tc_max(%u)\n",
- tc, hdev->tc_max);
- return -EINVAL;
- }
-
- ret = hclge_tm_schd_info_update(hdev, tc);
+ ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
if (ret)
- return ret;
+ return -EINVAL;
- ret = hclge_tm_prio_tc_info_update(hdev, prio_tc);
- if (ret)
- return ret;
+ hclge_tm_schd_info_update(hdev, tc);
+ hclge_tm_prio_tc_info_update(hdev, prio_tc);
ret = hclge_tm_init_hw(hdev);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
new file mode 100644
index 000000000000..26d80504c730
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -0,0 +1,933 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/device.h>
+
+#include "hclge_debugfs.h"
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
+{
+ struct hclge_desc desc[4];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true);
+ desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 4);
+ if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ dev_err(&hdev->pdev->dev,
+ "get dfx bdnum fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ return (int)desc[offset / 6].data[offset % 6];
+}
+
+static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src,
+ int index, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ desc->data[0] = cpu_to_le32(index);
+
+ for (i = 1; i < bd_num; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read reg cmd send fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
+ struct hclge_dbg_dfx_message *dfx_message,
+ char *cmd_buf, int msg_num, int offset,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc_src;
+ struct hclge_desc *desc;
+ int bd_num, buf_len;
+ int ret, i;
+ int index;
+ int max;
+
+ ret = kstrtouint(cmd_buf, 10, &index);
+ index = (ret != 0) ? 0 : index;
+
+ bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset);
+ if (bd_num <= 0)
+ return;
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
+ return;
+ }
+
+ desc = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd);
+ if (ret != HCLGE_CMD_EXEC_SUCCESS) {
+ kfree(desc_src);
+ return;
+ }
+
+ max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num;
+
+ desc = desc_src;
+ for (i = 0; i < max; i++) {
+ (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc;
+ if (dfx_message->flag)
+ dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
+ dfx_message->message, desc->data[i % 6]);
+
+ dfx_message++;
+ }
+
+ kfree(desc_src);
+}
+
+static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_dbg_bitmap_cmd *bitmap;
+ int rq_id, pri_id, qset_id;
+ int port_id, nq_id, pg_id;
+ struct hclge_desc desc[2];
+
+ int cnt, ret;
+
+ cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
+ &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
+ if (cnt != 6) {
+ dev_err(&hdev->pdev->dev,
+ "dump dcb: bad command parameter, cnt=%d\n", cnt);
+ return;
+ }
+
+ ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
+ HCLGE_OPC_QSET_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
+ dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_PORT_DFX_STS);
+ if (ret)
+ return;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
+ if (ret)
+ return;
+
+ dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
+ if (ret)
+ return;
+
+ dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
+ if (ret)
+ return;
+
+ dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
+ dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
+ dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
+ dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
+ dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_CNT);
+ if (ret)
+ return;
+
+ dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
+
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
+ HCLGE_OPC_TM_INTERNAL_STS_1);
+ if (ret)
+ return;
+
+ dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
+ dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
+ dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
+ dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
+ dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
+}
+
+static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf)
+{
+ int msg_num;
+
+ if (strncmp(&cmd_buf[9], "bios common", 11) == 0) {
+ msg_num = sizeof(hclge_dbg_bios_common_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg,
+ &cmd_buf[21], msg_num,
+ HCLGE_DBG_DFX_BIOS_OFFSET,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG);
+ } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_ssu_reg_0) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_SSU_0_OFFSET,
+ HCLGE_OPC_DFX_SSU_REG_0);
+
+ msg_num = sizeof(hclge_dbg_ssu_reg_1) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_SSU_1_OFFSET,
+ HCLGE_OPC_DFX_SSU_REG_1);
+
+ msg_num = sizeof(hclge_dbg_ssu_reg_2) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_SSU_2_OFFSET,
+ HCLGE_OPC_DFX_SSU_REG_2);
+ } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) {
+ msg_num = sizeof(hclge_dbg_igu_egu_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg,
+ &cmd_buf[17], msg_num,
+ HCLGE_DBG_DFX_IGU_OFFSET,
+ HCLGE_OPC_DFX_IGU_EGU_REG);
+ } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_rpu_reg_0) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RPU_0_OFFSET,
+ HCLGE_OPC_DFX_RPU_REG_0);
+
+ msg_num = sizeof(hclge_dbg_rpu_reg_1) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RPU_1_OFFSET,
+ HCLGE_OPC_DFX_RPU_REG_1);
+ } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) {
+ msg_num = sizeof(hclge_dbg_ncsi_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg,
+ &cmd_buf[14], msg_num,
+ HCLGE_DBG_DFX_NCSI_OFFSET,
+ HCLGE_OPC_DFX_NCSI_REG);
+ } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_rtc_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RTC_OFFSET,
+ HCLGE_OPC_DFX_RTC_REG);
+ } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_ppp_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_PPP_OFFSET,
+ HCLGE_OPC_DFX_PPP_REG);
+ } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_rcb_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_RCB_OFFSET,
+ HCLGE_OPC_DFX_RCB_REG);
+ } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) {
+ msg_num = sizeof(hclge_dbg_tqp_reg) /
+ sizeof(struct hclge_dbg_dfx_message);
+ hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg,
+ &cmd_buf[13], msg_num,
+ HCLGE_DBG_DFX_TQP_OFFSET,
+ HCLGE_OPC_DFX_TQP_REG);
+ } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) {
+ hclge_dbg_dump_dcb(hdev, &cmd_buf[13]);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return;
+ }
+}
+
+static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
+ char *title_buf, char *true_buf,
+ char *false_buf)
+{
+ if (flag)
+ dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+ true_buf);
+ else
+ dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
+ false_buf);
+}
+
+static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+{
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret);
+ return;
+ }
+
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "dump tc\n");
+ dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ for (i = 0; i < HNAE3_MAX_TC; i++)
+ hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
+ "tc", "no sp mode", "sp mode");
+}
+
+static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
+ pg_shap_cfg_cmd->pg_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
+ pg_shap_cfg_cmd->pg_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PORT_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
+ port_shap_cfg_cmd->port_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]);
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n",
+ bp_to_qs_map_cmd->tc_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n",
+ bp_to_qs_map_cmd->qs_group_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
+ bp_to_qs_map_cmd->qs_bit_map);
+ return;
+
+err_tm_pg_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
+ struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_pg_weight_cmd *pg_weight;
+ struct hclge_qs_weight_cmd *qs_weight;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump tm\n");
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
+ pg_to_pri_map->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
+ pg_to_pri_map->pri_bit_map);
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
+ qs_to_pri_map->qs_id);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
+ qs_to_pri_map->priority);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
+ qs_to_pri_map->link_vld);
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n",
+ nq_to_qs_map->qset_id);
+
+ cmd = HCLGE_OPC_TM_PG_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
+ dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_QS_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
+ dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
+ shap_cfg_cmd->pri_shapping_para);
+
+ cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
+ shap_cfg_cmd->pri_shapping_para);
+
+ hclge_dbg_dump_tm_pg(hdev);
+
+ return;
+
+err_tm_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf)
+{
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_tqp_tx_queue_tc_cmd *tc;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int queue_id, group_id;
+ u32 qset_maping[32];
+ int tc_id, qset_id;
+ int pri_id, ret;
+ u32 i;
+
+ ret = kstrtouint(&cmd_buf[12], 10, &queue_id);
+ queue_id = (ret != 0) ? 0 : queue_id;
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ qset_id = nq_to_qs_map->qset_id & 0x3FF;
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ pri_id = map->priority;
+
+ cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
+ tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ tc->queue_id = cpu_to_le16(queue_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ tc_id = tc->tc_id & 0x7;
+
+ dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
+ dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
+ queue_id, qset_id, pri_id, tc_id);
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ for (group_id = 0; group_id < 32; group_id++) {
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ bp_to_qs_map_cmd->tc_id = tc_id;
+ bp_to_qs_map_cmd->qs_group_id = group_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+
+ qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
+ }
+
+ dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
+
+ i = 0;
+ for (group_id = 0; group_id < 4; group_id++) {
+ dev_info(&hdev->pdev->dev,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_maping[(u32)(i + 7)],
+ qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
+ qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
+ qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
+ qset_maping[i]);
+ i += 8;
+ }
+
+ return;
+
+err_tm_map_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n",
+ ret);
+ return;
+ }
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
+ pause_param->pause_trans_time);
+}
+
+static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+{
+ struct hclge_qos_pri_map_cmd *pri_map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "dump qos pri map fail, status is %d.\n", ret);
+ return;
+ }
+
+ pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pri map\n");
+ dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
+ dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
+ dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
+ dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
+ dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
+ dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
+ dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
+ dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_rx_com_wl *rx_com_wl;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
+
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ tx_buf_cmd->tx_pkt_buff[i]);
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ rx_buf_cmd->buf_num[i]);
+
+ dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
+ rx_buf_cmd->shared_buf);
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ rx_com_thrd->com_thrd[i].high,
+ rx_com_thrd->com_thrd[i].low);
+
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4,
+ rx_com_thrd->com_thrd[i].high,
+ rx_com_thrd->com_thrd[i].low);
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
+
+ return;
+
+err_qos_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret);
+}
+
+static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
+{
+ struct hclge_mac_ethertype_idx_rd_cmd *req0;
+ char printf_buf[HCLGE_DBG_BUF_LEN];
+ struct hclge_desc desc;
+ int ret, i;
+
+ dev_info(&hdev->pdev->dev, "mng tab:\n");
+ memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
+ strncat(printf_buf,
+ "entry|mac_addr |mask|ether|mask|vlan|mask",
+ HCLGE_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
+
+ dev_info(&hdev->pdev->dev, "%s", printf_buf);
+
+ for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
+ true);
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0->index = cpu_to_le16(i);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "call hclge_cmd_send fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (!req0->resp_code)
+ continue;
+
+ memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
+ snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
+ "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
+ req0->index, req0->mac_add[0], req0->mac_add[1],
+ req0->mac_add[2], req0->mac_add[3], req0->mac_add[4],
+ req0->mac_add[5]);
+
+ snprintf(printf_buf + strlen(printf_buf),
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf),
+ "%x |%04x |%x |%04x|%x |%02x |%02x |",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ req0->ethter_type,
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
+
+ snprintf(printf_buf + strlen(printf_buf),
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf),
+ "%d |%d |%02d |%04d|%x\n",
+ !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ req0->egress_port & HCLGE_DBG_MNG_PF_ID,
+ (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ req0->egress_queue,
+ !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
+
+ dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ }
+}
+
+static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret, i;
+ u32 *req;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ req1->index = cpu_to_le32(loc);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ return;
+
+ dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", loc);
+
+ req = (u32 *)req1->tcam_data;
+ for (i = 0; i < 2; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ req = (u32 *)req2->tcam_data;
+ for (i = 0; i < 6; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ req = (u32 *)req3->tcam_data;
+ for (i = 0; i < 5; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+}
+
+static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+{
+ u32 i;
+
+ for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
+ hclge_dbg_fd_tcam_read(hdev, 0, true, i);
+ hclge_dbg_fd_tcam_read(hdev, 0, false, i);
+ }
+}
+
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
+ hclge_dbg_fd_tcam(hdev);
+ } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
+ hclge_dbg_dump_tc(hdev);
+ } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) {
+ hclge_dbg_dump_tm_map(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
+ hclge_dbg_dump_tm(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
+ hclge_dbg_dump_qos_pause_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
+ hclge_dbg_dump_qos_pri_map(hdev);
+ } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
+ hclge_dbg_dump_qos_buf_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
+ hclge_dbg_dump_mng_table(hdev);
+ } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
new file mode 100644
index 000000000000..d055fda41775
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -0,0 +1,713 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEBUGFS_H
+#define __HCLGE_DEBUGFS_H
+
+#define HCLGE_DBG_BUF_LEN 256
+#define HCLGE_DBG_MNG_TBL_MAX 64
+
+#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
+#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1)
+#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2)
+#define HCLGE_DBG_MNG_E_TYPE_B BIT(11)
+#define HCLGE_DBG_MNG_DROP_B BIT(13)
+#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF
+#define HCLGE_DBG_MNG_PF_ID 0x0007
+#define HCLGE_DBG_MNG_VF_ID 0x00FF
+
+/* Get DFX BD number offset */
+#define HCLGE_DBG_DFX_BIOS_OFFSET 1
+#define HCLGE_DBG_DFX_SSU_0_OFFSET 2
+#define HCLGE_DBG_DFX_SSU_1_OFFSET 3
+#define HCLGE_DBG_DFX_IGU_OFFSET 4
+#define HCLGE_DBG_DFX_RPU_0_OFFSET 5
+
+#define HCLGE_DBG_DFX_RPU_1_OFFSET 6
+#define HCLGE_DBG_DFX_NCSI_OFFSET 7
+#define HCLGE_DBG_DFX_RTC_OFFSET 8
+#define HCLGE_DBG_DFX_PPP_OFFSET 9
+#define HCLGE_DBG_DFX_RCB_OFFSET 10
+#define HCLGE_DBG_DFX_TQP_OFFSET 11
+
+#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
+
+#pragma pack(1)
+
+struct hclge_qos_pri_map_cmd {
+ u8 pri0_tc : 4,
+ pri1_tc : 4;
+ u8 pri2_tc : 4,
+ pri3_tc : 4;
+ u8 pri4_tc : 4,
+ pri5_tc : 4;
+ u8 pri6_tc : 4,
+ pri7_tc : 4;
+ u8 vlan_pri : 4,
+ rev : 4;
+};
+
+struct hclge_dbg_bitmap_cmd {
+ union {
+ u8 bitmap;
+ struct {
+ u8 bit0 : 1,
+ bit1 : 1,
+ bit2 : 1,
+ bit3 : 1,
+ bit4 : 1,
+ bit5 : 1,
+ bit6 : 1,
+ bit7 : 1;
+ };
+ };
+};
+
+struct hclge_dbg_dfx_message {
+ int flag;
+ char message[60];
+};
+
+#pragma pack()
+
+static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 123c37e653f3..d0f654123b9b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -4,78 +4,39 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" },
{ .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" },
{ .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" },
{ .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" },
{ .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" },
{ .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" },
{ .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" },
{ .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" },
{ .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" },
- { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" },
- { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" },
- { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" },
- { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" },
- { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
+ { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
+ { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
- { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" },
- { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" },
- { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" },
{ .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
{ .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
{ .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
@@ -85,15 +46,19 @@ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_igu_com_err_int[] = {
+static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
+ { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
+ { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_int[] = {
{ .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
- { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" },
{ .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
+static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
{ .int_msk = BIT(0), .msg = "rx_buf_overflow" },
{ .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
{ .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
@@ -104,51 +69,11 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = {
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
- { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" },
{ .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_ppp_mpf_int0[] = {
- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" },
- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" },
- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" },
- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" },
- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" },
- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" },
- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" },
- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" },
- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" },
- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" },
- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" },
- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" },
- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" },
- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" },
- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" },
- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" },
- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" },
- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" },
- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" },
- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" },
- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" },
- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" },
- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" },
- { .int_msk = BIT(27),
- .msg = "flow_director_ad_mem0_ecc_1bit_err" },
- { .int_msk = BIT(28),
- .msg = "flow_director_ad_mem1_ecc_1bit_err" },
- { .int_msk = BIT(29),
- .msg = "rx_vlan_tag_memory_ecc_1bit_err" },
- { .int_msk = BIT(30),
- .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
{ .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
@@ -187,23 +112,13 @@ static const struct hclge_hw_error hclge_ppp_mpf_int1[] = {
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_ppp_pf_int[] = {
- { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" },
+static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
+ { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
{ .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_ppp_mpf_int2[] = {
- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" },
- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" },
- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" },
- { /* sentinel */ }
-};
-
-static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
{ .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
{ .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
@@ -213,145 +128,248 @@ static const struct hclge_hw_error hclge_ppp_mpf_int3[] = {
{ /* sentinel */ }
};
-struct hclge_tm_sch_ecc_info {
- const char *name;
-};
-
-static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = {
- {
- { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" },
- { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" },
- { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" },
- { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" },
- },
- {
- { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" },
- { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" },
- { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" },
- { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" },
- { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" },
- },
- {
- { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" },
- { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" },
- { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" },
- { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" },
- { .name = "NIC_QUEUE_CTRL:QCLEN TAB" },
- },
- {
- { .name = "RAM_CFG_CTRL:CSHAP TAB" },
- { .name = "RAM_CFG_CTRL:PSHAP TAB" },
- },
- {
- { .name = "SHAPER_CTRL:PSHAP TAB" },
- },
- {
- { .name = "MSCH_CTRL" },
- },
- {
- { .name = "TOP_CTRL" },
- },
-};
-
-static const struct hclge_hw_error hclge_tm_sch_err_int[] = {
- { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" },
+static const struct hclge_hw_error hclge_tm_sch_rint[] = {
{ .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" },
- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" },
- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
{ .int_msk = BIT(12),
- .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
{ .int_msk = BIT(13),
- .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
{ .int_msk = BIT(14),
- .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
{ .int_msk = BIT(15),
- .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
{ .int_msk = BIT(16),
- .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
{ .int_msk = BIT(17),
- .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
{ .int_msk = BIT(18),
- .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
{ .int_msk = BIT(19),
- .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" },
+ .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
{ .int_msk = BIT(20),
- .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" },
+ .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
{ .int_msk = BIT(21),
- .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" },
- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" },
- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" },
- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" },
- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" },
- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" },
- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" },
- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" },
- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" },
- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" },
- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" },
+ .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
+ { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
+ { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
+ { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
+ { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
+ { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
+ { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
+ { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
+ { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
+ { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
+ { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
+ { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
+ { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
+ { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
+ { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
+ { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
+ { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
+ { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
+ { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
{ /* sentinel */ }
};
-static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = {
- { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" },
+static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
{ .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" },
{ .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" },
{ .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" },
{ .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" },
{ .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" },
{ .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" },
{ .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" },
{ .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" },
{ .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" },
{ .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" },
{ .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
{ /* sentinel */ }
};
-static void hclge_log_error(struct device *dev,
- const struct hclge_hw_error *err_list,
+static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
+ { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
+ { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
+ { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
+ { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
+ { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
+ { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
+ { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
+ { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
+ { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
+ { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
+ { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
+ { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
+ { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
+ { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
+ { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
+ { .int_msk = BIT(26), .msg = "rd_bus_err" },
+ { .int_msk = BIT(27), .msg = "wr_bus_err" },
+ { .int_msk = BIT(28), .msg = "reg_search_miss" },
+ { .int_msk = BIT(29), .msg = "rx_q_search_miss" },
+ { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
+ { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
+ { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
+ { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
+ { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
+ { .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
+ { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
+ { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
+ { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
+ { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
+ { .int_msk = BIT(5), .msg = "buf_wait_timeout" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
+ { .int_msk = BIT(0), .msg = "buf_sum_err" },
+ { .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
+ { .int_msk = BIT(2), .msg = "ppp_mbid_err" },
+ { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
+ { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
+ { .int_msk = BIT(5), .msg = "cks_edit_position_err" },
+ { .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
+ { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
+ { .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
+ { .int_msk = BIT(9), .msg = "vlan_num_in_err" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
+ { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
+ { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
+ { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
+ { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
+ { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
+ { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
+ { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
+ { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
+ { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
+ { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
+ { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
+ { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
+ { .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
+ { .int_msk = BIT(1), .msg = "ig_host_inf_int" },
+ { .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
+ { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
+ { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
+ { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
+ { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
+ { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
+ { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
+ { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
+ { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
+ { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
+ { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
+ { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
+ { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
+ { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
+ { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
+ { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
+ { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
+ { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
+ { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
+ { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
+ { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
+ { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
+ { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
+ { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
+ { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
+ { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
+ { .int_msk = BIT(9), .msg = "low_water_line_err_port" },
+ { .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
+ { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
+ { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
+ { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
+ { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
+ { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
+ { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
+ { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
+ { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
+ { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
+ { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
+ { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
+ { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
+ { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
+ { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
+ { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
+ { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
+ { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
+ { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
+ { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
+ { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
+ { /* sentinel */ }
+};
+
+static void hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
u32 err_sts)
{
- const struct hclge_hw_error *err;
- int i = 0;
-
- while (err_list[i].msg) {
- err = &err_list[i];
- if (!(err->int_msk & err_sts)) {
- i++;
- continue;
- }
- dev_warn(dev, "%s [error status=0x%x] found\n",
- err->msg, err_sts);
- i++;
+ while (err->msg) {
+ if (err->int_msk & err_sts)
+ dev_warn(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
+ err++;
}
}
@@ -391,96 +409,44 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
return ret;
}
-/* hclge_cmd_clear_error: clear the error status
- * @hdev: pointer to struct hclge_dev
- * @desc: descriptor for describing the command
- * @desc_src: prefilled descriptor from the previous command for reusing
- * @cmd: command opcode
- * @flag: flag for extended command structure
- *
- * This function clear the error status in the hw register/s using command
- */
-static int hclge_cmd_clear_error(struct hclge_dev *hdev,
- struct hclge_desc *desc,
- struct hclge_desc *desc_src,
- u32 cmd, u16 flag)
-{
- struct device *dev = &hdev->pdev->dev;
- int num = 1;
- int ret, i;
-
- if (cmd) {
- hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
- if (flag) {
- desc[0].flag |= cpu_to_le16(flag);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
- num = 2;
- }
- if (desc_src) {
- for (i = 0; i < 6; i++) {
- desc[0].data[i] = desc_src[0].data[i];
- if (flag)
- desc[1].data[i] = desc_src[1].data[i];
- }
- }
- } else {
- hclge_cmd_reuse_desc(&desc[0], false);
- if (flag) {
- desc[0].flag |= cpu_to_le16(flag);
- hclge_cmd_reuse_desc(&desc[1], false);
- num = 2;
- }
- }
- ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
- if (ret)
- dev_err(dev, "clear error cmd failed (%d)\n", ret);
-
- return ret;
-}
-
-static int hclge_enable_common_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int ret;
+ /* configure common error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
if (en) {
- /* enable COMMON error interrupts */
desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
- desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
- } else {
- /* disable COMMON error interrupts */
- desc[0].data[0] = 0;
- desc[0].data[2] = 0;
- desc[0].data[3] = 0;
- desc[0].data[4] = 0;
- desc[0].data[5] = 0;
}
+
desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
- desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
if (ret)
dev_err(dev,
- "failed(%d) to enable/disable COMMON err interrupts\n",
- ret);
+ "fail(%d) to configure common err interrupts\n", ret);
return ret;
}
-static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
@@ -489,74 +455,65 @@ static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en)
if (hdev->pdev->revision < 0x21)
return 0;
- /* enable/disable NCSI error interrupts */
+ /* configure NCSI error interrupts */
hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
- else
- desc.data[0] = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(dev,
- "failed(%d) to enable/disable NCSI error interrupts\n",
- ret);
+ "fail(%d) to configure NCSI error interrupts\n", ret);
return ret;
}
-static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
int ret;
- /* enable/disable error interrupts */
+ /* configure IGU,EGU error interrupts */
hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
- else
- desc.data[0] = 0;
+
desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(dev,
- "failed(%d) to enable/disable IGU common interrupts\n",
- ret);
+ "fail(%d) to configure IGU common interrupts\n", ret);
return ret;
}
hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
- else
- desc.data[0] = 0;
+
desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(dev,
- "failed(%d) to enable/disable IGU-EGU TNL interrupts\n",
- ret);
+ "fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
return ret;
}
- ret = hclge_enable_ncsi_error(hdev, en);
- if (ret)
- dev_err(dev, "fail(%d) to en/disable err int\n", ret);
+ ret = hclge_config_ncsi_hw_err_int(hdev, en);
return ret;
}
-static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc[2];
int ret;
- /* enable/disable PPP error interrupts */
+ /* configure PPP error interrupts */
hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
@@ -567,24 +524,24 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
desc[0].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
- } else {
- desc[0].data[0] = 0;
- desc[0].data[1] = 0;
+ desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
}
+
desc[1].data[0] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
desc[1].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ if (hdev->pdev->revision >= 0x21)
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
if (en) {
desc[0].data[0] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
desc[0].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
- } else {
- desc[0].data[0] = 0;
- desc[0].data[1] = 0;
}
+
desc[1].data[0] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
desc[1].data[1] =
@@ -593,498 +550,863 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
if (ret)
- dev_err(dev,
- "failed(%d) to enable/disable PPP error interrupts\n",
- ret);
+ dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
return ret;
}
-static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
{
- struct device *dev = &hdev->pdev->dev;
int ret;
- ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
en);
- if (ret) {
- dev_err(dev,
- "failed(%d) to enable/disable PPP error intr 0,1\n",
- ret);
+ if (ret)
return ret;
- }
- ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
en);
- if (ret)
- dev_err(dev,
- "failed(%d) to enable/disable PPP error intr 2,3\n",
- ret);
return ret;
}
-int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en)
+static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
int ret;
- /* enable TM SCH hw errors */
+ /* configure TM SCH hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
if (en)
desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
- else
- desc.data[0] = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret);
+ dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
return ret;
}
- /* enable TM QCN hw errors */
+ /* configure TM QCN hw errors */
ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG,
0, 0, 0);
if (ret) {
- dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret);
+ dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
return ret;
}
hclge_cmd_reuse_desc(&desc, false);
if (en)
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
- else
- desc.data[1] = 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(dev,
- "failed(%d) to configure TM QCN mem errors\n", ret);
+ "fail(%d) to configure TM QCN mem errors\n", ret);
return ret;
}
-static void hclge_process_common_error(struct hclge_dev *hdev,
- enum hclge_err_int_type type)
+static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
- struct hclge_desc desc[2];
- u32 err_sts;
+ struct hclge_desc desc;
int ret;
- /* read err sts */
- ret = hclge_cmd_query_error(hdev, &desc[0],
- HCLGE_COMMON_ECC_INT_CFG,
- HCLGE_CMD_FLAG_NEXT, 0, 0);
- if (ret) {
+ /* configure MAC common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
dev_err(dev,
- "failed(=%d) to query COMMON error interrupt status\n",
- ret);
- return;
- }
+ "fail(%d) to configure MAC COMMON error intr\n", ret);
- /* log err */
- err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts);
+ return ret;
+}
- err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts);
+static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int num = 1;
+ int ret;
- err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT)
- & HCLGE_CMDQ_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts);
+ /* configure PPU error interrupts */
+ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= HCLGE_CMD_FLAG_NEXT;
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ if (en) {
+ desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN;
+ desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN;
+ desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN;
+ desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN;
+ }
- if ((le32_to_cpu(desc[0].data[3])) & BIT(0))
- dev_warn(dev, "imp_rd_data_poison_err found\n");
+ desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK;
+ desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK;
+ desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK;
+ desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK;
+ num = 2;
+ } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2;
- err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) &
- HCLGE_TQP_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts);
+ desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK;
+ } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN;
- err_sts = (le32_to_cpu(desc[0].data[5])) &
- HCLGE_IMP_ITCM4_ECC_INT_MASK;
- hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts);
+ desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK;
+ } else {
+ dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
+ return -EINVAL;
+ }
- /* clear error interrupts */
- desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK);
- desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK |
- HCLGE_CMDQ_ROCEE_ECC_CLR_MASK);
- desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK);
- desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
- ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
- HCLGE_CMD_FLAG_NEXT);
- if (ret)
- dev_err(dev,
- "failed(%d) to clear COMMON error interrupt status\n",
- ret);
+ return ret;
}
-static void hclge_process_ncsi_error(struct hclge_dev *hdev,
- enum hclge_err_int_type type)
+static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
- struct hclge_desc desc_rd;
- struct hclge_desc desc_wr;
- u32 err_sts;
int ret;
- if (hdev->pdev->revision < 0x21)
- return;
-
- /* read NCSI error status */
- ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY,
- 0, 1, HCLGE_NCSI_ERR_INT_TYPE);
+ ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
+ en);
if (ret) {
- dev_err(dev,
- "failed(=%d) to query NCSI error interrupt status\n",
+ dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
ret);
- return;
+ return ret;
}
- /* log err */
- err_sts = le32_to_cpu(desc_rd.data[0]);
- hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts);
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_MPF_OTHER_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
+ return ret;
+ }
- /* clear err int */
- ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
- HCLGE_NCSI_INT_CLR, 0);
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_PF_OTHER_INT_CMD, en);
if (ret)
- dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n",
+ dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
ret);
+ return ret;
}
-static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
- enum hclge_err_int_type int_type)
+static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
- struct hclge_desc desc_rd;
- struct hclge_desc desc_wr;
- u32 err_sts;
+ struct hclge_desc desc[2];
int ret;
- /* read IGU common err sts */
- ret = hclge_cmd_query_error(hdev, &desc_rd,
- HCLGE_IGU_COMMON_INT_QUERY,
- 0, 1, int_type);
- if (ret) {
- dev_err(dev, "failed(=%d) to query IGU common int status\n",
- ret);
- return;
+ /* configure SSU ecc error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
+ if (en) {
+ desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
}
- /* log err */
- err_sts = le32_to_cpu(desc_rd.data[0]) &
- HCLGE_IGU_COM_INT_MASK;
- hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts);
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
- /* clear err int */
- ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
- HCLGE_IGU_COMMON_INT_CLR, 0);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
if (ret) {
- dev_err(dev, "failed(=%d) to clear IGU common int status\n",
- ret);
- return;
+ dev_err(dev,
+ "fail(%d) to configure SSU ECC error interrupt\n", ret);
+ return ret;
}
- /* read IGU-EGU TNL err sts */
- ret = hclge_cmd_query_error(hdev, &desc_rd,
- HCLGE_IGU_EGU_TNL_INT_QUERY,
- 0, 1, int_type);
- if (ret) {
- dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n",
- ret);
- return;
+ /* configure SSU common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
+
+ if (en) {
+ if (hdev->pdev->revision >= 0x21)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
+ else
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
+ desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
}
- /* log err */
- err_sts = le32_to_cpu(desc_rd.data[0]) &
- HCLGE_IGU_EGU_TNL_INT_MASK;
- hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts);
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
+ HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
- /* clear err int */
- ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
- HCLGE_IGU_EGU_TNL_INT_CLR, 0);
- if (ret) {
- dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n",
- ret);
- return;
- }
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure SSU COMMON error intr\n", ret);
- hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE);
+ return ret;
}
-static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
- enum hclge_err_int_type int_type)
+#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \
+ do { \
+ if (ae_dev->ops->set_default_reset_request) \
+ ae_dev->ops->set_default_reset_request(ae_dev, \
+ reset_type); \
+ } while (0)
+
+/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the main PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
{
- enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
- const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
- struct hclge_desc desc[2];
- u32 err_sts;
+ __le32 *desc_data;
+ u32 status;
int ret;
- /* read PPP INT sts */
- ret = hclge_cmd_query_error(hdev, &desc[0], cmd,
- HCLGE_CMD_FLAG_NEXT, 5, int_type);
+ /* query all main PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
if (ret) {
- dev_err(dev, "failed(=%d) to query PPP interrupt status\n",
- ret);
- return -EIO;
+ dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
+ return ret;
}
- /* log error */
- if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
- hw_err_lst1 = &hclge_ppp_mpf_int0[0];
- hw_err_lst2 = &hclge_ppp_mpf_int1[0];
- hw_err_lst3 = &hclge_ppp_pf_int[0];
- } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
- hw_err_lst1 = &hclge_ppp_mpf_int2[0];
- hw_err_lst2 = &hclge_ppp_mpf_int3[0];
- } else {
- dev_err(dev, "invalid command(=%d)\n", cmd);
- return -EINVAL;
+ /* log HNS common errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status) {
+ hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
- err_sts = le32_to_cpu(desc[0].data[2]);
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst1, err_sts);
- reset_level = HNAE3_FUNC_RESET;
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status) {
+ hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
- err_sts = le32_to_cpu(desc[0].data[3]);
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst2, err_sts);
- reset_level = HNAE3_FUNC_RESET;
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
- if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
- err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
- if (err_sts) {
- hclge_log_error(dev, hw_err_lst3, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
+ status = le32_to_cpu(desc[0].data[3]);
+ if (status) {
+ hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
- /* clear PPP INT */
- ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0,
- HCLGE_CMD_FLAG_NEXT);
- if (ret) {
- dev_err(dev, "failed(=%d) to clear PPP interrupt status\n",
- ret);
- return -EIO;
+ status = le32_to_cpu(desc[0].data[4]);
+ if (status) {
+ hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
}
- return 0;
+ /* log SSU(Storage Switch Unit) errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status) {
+ dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
+ if (status) {
+ dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ /* log IGU(Ingress Unit) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status);
+
+ /* log PPP(Programmable Packet Process) errors */
+ desc_data = (__le32 *)&desc[4];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0], status);
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0], status);
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status) {
+ dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
+ "rpu_rx_pkt_ecc_mbit_err");
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status) {
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
+ if (status) {
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* log TM(Traffic Manager) errors */
+ desc_data = (__le32 *)&desc[6];
+ status = le32_to_cpu(*desc_data);
+ if (status) {
+ hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* log QCN(Quantized Congestion Control) errors */
+ desc_data = (__le32 *)&desc[7];
+ status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* log NCSI errors */
+ desc_data = (__le32 *)&desc[9];
+ status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
+ if (status) {
+ hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ }
+
+ /* clear all main PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
+
+ return ret;
}
-static void hclge_process_ppp_error(struct hclge_dev *hdev,
- enum hclge_err_int_type int_type)
+/* hclge_handle_pf_ras_error: handle all PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
int ret;
- /* read PPP INT0,1 sts */
- ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD,
- int_type);
- if (ret < 0) {
- dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n",
- ret);
- return;
+ /* query all PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret) {
+ dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
+ return ret;
}
- /* read err PPP INT2,3 sts */
- ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD,
- int_type);
- if (ret < 0)
- dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n",
- ret);
+ /* log SSU(Storage Switch Unit) errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status) {
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status) {
+ hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ status = le32_to_cpu(desc[0].data[2]);
+ if (status) {
+ hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0], status);
+
+ /* clear all PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
+
+ return ret;
}
-static void hclge_process_tm_sch_error(struct hclge_dev *hdev)
+static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
{
struct device *dev = &hdev->pdev->dev;
- const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info;
- struct hclge_desc desc;
- u32 ecc_info;
- u8 module_no;
- u8 ram_no;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc desc_bd;
+ struct hclge_desc *desc;
int ret;
- /* read TM scheduler errors */
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0);
+ /* query the number of registers in the RAS int status */
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
if (ret) {
- dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret);
- return;
+ dev_err(dev, "fail(%d) to query ras int status bd num\n", ret);
+ return ret;
}
- ecc_info = le32_to_cpu(desc.data[0]);
+ mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ /* handle all main PF RAS errors */
+ ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
if (ret) {
- dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret);
- return;
+ kfree(desc);
+ return ret;
}
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+
+ /* handle all PF RAS errors */
+ ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
+ kfree(desc);
- /* log TM scheduler errors */
- if (le32_to_cpu(desc.data[0])) {
- hclge_log_error(dev, &hclge_tm_sch_err_int[0],
- le32_to_cpu(desc.data[0]));
- if (le32_to_cpu(desc.data[0]) & 0x2) {
- module_no = (ecc_info >> 20) & 0xF;
- ram_no = (ecc_info >> 16) & 0xF;
- tm_sch_ecc_info =
- &hclge_tm_sch_ecc_err[module_no][ram_no];
- dev_warn(dev, "ecc err module:ram=%s\n",
- tm_sch_ecc_info->name);
- dev_warn(dev, "ecc memory address = 0x%x\n",
- ecc_info & 0xFFFF);
+ return ret;
+}
+
+static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* read overflow error status */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_ROCEE_PF_RAS_INT_CMD,
+ 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
+ return ret;
+ }
+
+ /* log overflow error */
+ if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ const struct hclge_hw_error *err;
+ u32 err_sts;
+
+ err = &hclge_rocee_qmm_ovf_err_int[0];
+ err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
+ le32_to_cpu(desc[0].data[0]);
+ while (err->msg) {
+ if (err->int_msk == err_sts) {
+ dev_warn(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
+ break;
+ }
+ err++;
}
}
- /* clear TM scheduler errors */
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret);
- return;
+ if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
}
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to read SCH CE status\n", ret);
- return;
+ if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
}
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ return 0;
+}
+
+static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
+{
+ enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ unsigned int status;
+ int ret;
+
+ /* read RAS error interrupt status */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT,
+ 0, 0, 0);
if (ret) {
- dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret);
- return;
+ dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
+ /* reset everything for now */
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ return ret;
}
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to read SCH NFE status\n", ret);
- return;
+ status = le32_to_cpu(desc[0].data[0]);
+
+ if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI rresp error\n");
+
+ if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ dev_warn(dev, "ROCEE RAS AXI bresp error\n");
+
+ if (status & HCLGE_ROCEE_ECC_INT_MASK) {
+ dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
+ reset_type = HNAE3_GLOBAL_RESET;
}
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret);
- return;
+ if (status & HCLGE_ROCEE_OVF_INT_MASK) {
+ ret = hclge_log_rocee_ovf_error(hdev);
+ if (ret) {
+ dev_err(dev, "failed(%d) to process ovf error\n", ret);
+ /* reset everything for now */
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ return ret;
+ }
}
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0);
+ /* clear error status */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
if (ret) {
- dev_err(dev, "failed(%d) to read SCH FE status\n", ret);
- return;
+ dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
+ /* reset everything for now */
+ reset_type = HNAE3_GLOBAL_RESET;
}
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
- if (ret)
- dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
+
+ return ret;
}
-static void hclge_process_tm_qcn_error(struct hclge_dev *hdev)
+static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
struct hclge_desc desc;
int ret;
- /* read QCN errors */
- ret = hclge_cmd_query_error(hdev, &desc,
- HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0);
- if (ret) {
- dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret);
- return;
- }
+ if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
+ if (en) {
+ /* enable ROCEE hw error interrupts */
+ desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
+ desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
- /* log QCN errors */
- if (le32_to_cpu(desc.data[0]))
- hclge_log_error(dev, &hclge_qcn_ecc_err_int[0],
- le32_to_cpu(desc.data[0]));
+ hclge_log_and_clear_rocee_ras_error(hdev);
+ }
+ desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
+ desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
- /* clear QCN errors */
- ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- dev_err(dev, "failed(%d) to clear QCN error status\n", ret);
+ dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
+
+ return ret;
}
-static void hclge_process_tm_error(struct hclge_dev *hdev,
- enum hclge_err_int_type type)
+static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
{
- hclge_process_tm_sch_error(hdev);
- hclge_process_tm_qcn_error(hdev);
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ hdev->pdev->revision < 0x21)
+ return HNAE3_NONE_RESET;
+
+ return hclge_log_and_clear_rocee_ras_error(hdev);
}
static const struct hclge_hw_blk hw_blk[] = {
- { .msk = BIT(0), .name = "IGU_EGU",
- .enable_error = hclge_enable_igu_egu_error,
- .process_error = hclge_process_igu_egu_error, },
- { .msk = BIT(5), .name = "COMMON",
- .enable_error = hclge_enable_common_error,
- .process_error = hclge_process_common_error, },
- { .msk = BIT(4), .name = "TM",
- .enable_error = hclge_enable_tm_hw_error,
- .process_error = hclge_process_tm_error, },
- { .msk = BIT(1), .name = "PPP",
- .enable_error = hclge_enable_ppp_error,
- .process_error = hclge_process_ppp_error, },
+ {
+ .msk = BIT(0), .name = "IGU_EGU",
+ .config_err_int = hclge_config_igu_egu_hw_err_int,
+ },
+ {
+ .msk = BIT(1), .name = "PPP",
+ .config_err_int = hclge_config_ppp_hw_err_int,
+ },
+ {
+ .msk = BIT(2), .name = "SSU",
+ .config_err_int = hclge_config_ssu_hw_err_int,
+ },
+ {
+ .msk = BIT(3), .name = "PPU",
+ .config_err_int = hclge_config_ppu_hw_err_int,
+ },
+ {
+ .msk = BIT(4), .name = "TM",
+ .config_err_int = hclge_config_tm_hw_err_int,
+ },
+ {
+ .msk = BIT(5), .name = "COMMON",
+ .config_err_int = hclge_config_common_hw_err_int,
+ },
+ {
+ .msk = BIT(8), .name = "MAC",
+ .config_err_int = hclge_config_mac_err_int,
+ },
{ /* sentinel */ }
};
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state)
{
+ const struct hclge_hw_blk *module = hw_blk;
struct device *dev = &hdev->pdev->dev;
int ret = 0;
- int i = 0;
- while (hw_blk[i].name) {
- if (!hw_blk[i].enable_error) {
- i++;
- continue;
+ while (module->name) {
+ if (module->config_err_int) {
+ ret = module->config_err_int(hdev, state);
+ if (ret)
+ return ret;
}
- ret = hw_blk[i].enable_error(hdev, state);
- if (ret) {
- dev_err(dev, "fail(%d) to en/disable err int\n", ret);
- return ret;
- }
- i++;
+ module++;
}
+ ret = hclge_config_rocee_ras_interrupt(hdev, state);
+ if (ret)
+ dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret);
+
return ret;
}
-pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev)
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
- u32 sts, val;
- int i = 0;
-
- sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
-
- /* Processing Non-fatal errors */
- if (sts & HCLGE_RAS_REG_NFE_MASK) {
- val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF;
- i = 0;
- while (hw_blk[i].name) {
- if (!(hw_blk[i].msk & val)) {
- i++;
- continue;
- }
- dev_warn(dev, "%s ras non-fatal error identified\n",
- hw_blk[i].name);
- if (hw_blk[i].process_error)
- hw_blk[i].process_error(hdev,
- HCLGE_ERR_INT_RAS_NFE);
- i++;
- }
+ u32 status;
+
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* Handling Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_warn(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
+ hclge_handle_all_ras_errors(hdev);
+ } else {
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ hdev->pdev->revision < 0x21)
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ dev_warn(dev, "ROCEE uncorrected RAS error identified\n");
+ hclge_handle_rocee_ras_error(ae_dev);
+ }
+
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc desc_bd;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+ int ret = 0;
+ u32 status;
+
+ /* set default handling */
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+
+ /* query the number of bds for the MSIx int status */
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ return ret;
+ }
+
+ mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ goto out;
+
+ /* query all main PF MSIx errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+ /* log MAC errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data);
+ if (status) {
+ hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0], status);
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
}
- return PCI_ERS_RESULT_NEED_RESET;
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 2)) &
+ HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
+ if (status) {
+ dev_warn(dev,
+ "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n",
+ status);
+ set_bit(HNAE3_CORE_RESET, reset_requests);
+ }
+
+ /* clear all main PF MSIx errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+ /* query all PF MSIx errors */
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+ /* log SSU PF errors */
+ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
+ if (status) {
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0], status);
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+ /* read and log PPP PF errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0], status);
+
+ /* PPU(RCB) PF errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0], status);
+
+ /* clear all PF MSIx errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
+ if (ret) {
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
+ ret);
+ /* reset everything for now */
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index e0e3b5861495..51a7d4eb066a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -7,9 +7,11 @@
#include "hclge_main.h"
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
-#define HCLGE_RAS_REG_FE_MASK 0xFF
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
-#define HCLGE_RAS_REG_NFE_SHIFT 8
+#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+
+#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800
+#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
@@ -23,6 +25,8 @@
#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
#define HCLGE_IGU_ERR_INT_EN 0x0000066F
#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
@@ -41,21 +45,55 @@
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0)
+#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101
+#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0)
+#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0)
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0)
+
+#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF
+#define HCLGE_IGU_INT_MASK GENMASK(3, 0)
+#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
+#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
+#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
+#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28)
+#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27
+#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
+#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
+#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
-#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF
-#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3
-#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF
-#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16
-#define HCLGE_TQP_ECC_INT_MASK 0xFFF
-#define HCLGE_TQP_ECC_INT_SHIFT 16
-#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF
-#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3
-#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF
-#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000
-#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001
-#define HCLGE_IGU_COM_INT_MASK 0xF
-#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F
-#define HCLGE_PPP_PF_INT_MASK 0x100
+#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1
+#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
+#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
+#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
+#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
+#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
+#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
+#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
@@ -67,9 +105,7 @@ enum hclge_err_int_type {
struct hclge_hw_blk {
u32 msk;
const char *name;
- int (*enable_error)(struct hclge_dev *hdev, bool en);
- void (*process_error)(struct hclge_dev *hdev,
- enum hclge_err_int_type type);
+ int (*config_err_int)(struct hclge_dev *hdev, bool en);
};
struct hclge_hw_error {
@@ -78,6 +114,7 @@ struct hclge_hw_error {
};
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
-int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en);
-pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev);
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ffdd96020860..f7637c08bb3a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,7 +26,9 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
+#define HCLGE_BUF_SIZE_UNIT 256
+
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
@@ -48,6 +50,62 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
+static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
+ HCLGE_CMDQ_TX_ADDR_H_REG,
+ HCLGE_CMDQ_TX_DEPTH_REG,
+ HCLGE_CMDQ_TX_TAIL_REG,
+ HCLGE_CMDQ_TX_HEAD_REG,
+ HCLGE_CMDQ_RX_ADDR_L_REG,
+ HCLGE_CMDQ_RX_ADDR_H_REG,
+ HCLGE_CMDQ_RX_DEPTH_REG,
+ HCLGE_CMDQ_RX_TAIL_REG,
+ HCLGE_CMDQ_RX_HEAD_REG,
+ HCLGE_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_CMDQ_INTR_STS_REG,
+ HCLGE_CMDQ_INTR_EN_REG,
+ HCLGE_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+ HCLGE_VECTOR0_OTER_EN_REG,
+ HCLGE_MISC_RESET_STS_REG,
+ HCLGE_MISC_VECTOR_INT_STS,
+ HCLGE_GLOBAL_RESET_REG,
+ HCLGE_FUN_RST_ING,
+ HCLGE_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+ HCLGE_RING_RX_ADDR_H_REG,
+ HCLGE_RING_RX_BD_NUM_REG,
+ HCLGE_RING_RX_BD_LENGTH_REG,
+ HCLGE_RING_RX_MERGE_EN_REG,
+ HCLGE_RING_RX_TAIL_REG,
+ HCLGE_RING_RX_HEAD_REG,
+ HCLGE_RING_RX_FBD_NUM_REG,
+ HCLGE_RING_RX_OFFSET_REG,
+ HCLGE_RING_RX_FBD_OFFSET_REG,
+ HCLGE_RING_RX_STASH_REG,
+ HCLGE_RING_RX_BD_ERR_REG,
+ HCLGE_RING_TX_ADDR_L_REG,
+ HCLGE_RING_TX_ADDR_H_REG,
+ HCLGE_RING_TX_BD_NUM_REG,
+ HCLGE_RING_TX_PRIORITY_REG,
+ HCLGE_RING_TX_TC_REG,
+ HCLGE_RING_TX_MERGE_EN_REG,
+ HCLGE_RING_TX_TAIL_REG,
+ HCLGE_RING_TX_HEAD_REG,
+ HCLGE_RING_TX_FBD_NUM_REG,
+ HCLGE_RING_TX_OFFSET_REG,
+ HCLGE_RING_TX_EBD_NUM_REG,
+ HCLGE_RING_TX_EBD_OFFSET_REG,
+ HCLGE_RING_TX_BD_ERR_REG,
+ HCLGE_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_GL0_REG,
+ HCLGE_TQP_INTR_GL1_REG,
+ HCLGE_TQP_INTR_GL2_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
"App Loopback test",
"Serdes serial Loopback test",
@@ -631,6 +689,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+ if (req->tx_buf_size)
+ hdev->tx_buf_size =
+ __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+
+ hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
+
+ if (req->dv_buf_size)
+ hdev->dv_buf_size =
+ __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->dv_buf_size = HCLGE_DEFAULT_DV;
+
+ hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
+
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
@@ -886,7 +960,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->pfc_max = hdev->tc_max;
}
- hdev->tm_info.num_tc = hdev->tc_max;
+ hdev->tm_info.num_tc = 1;
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -921,6 +995,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
@@ -1144,6 +1240,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1289,40 +1386,51 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
{
u32 shared_buf_min, shared_buf_tc, shared_std;
int tc_num, pfc_enable_num;
- u32 shared_buf;
+ u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+ shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
else
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ + hdev->dv_buf_size;
- shared_buf_tc = pfc_enable_num * hdev->mps +
- (tc_num - pfc_enable_num) * hdev->mps / 2 +
- hdev->mps;
- shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
+ shared_buf_tc = pfc_enable_num * aligned_mps +
+ (tc_num - pfc_enable_num) * aligned_mps / 2 +
+ aligned_mps;
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
+ HCLGE_BUF_SIZE_UNIT);
rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
- if (rx_all <= rx_priv + shared_std)
+ if (rx_all < rx_priv + shared_std)
return false;
- shared_buf = rx_all - rx_priv;
+ shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
buf_alloc->s_buf.buf_size = shared_buf;
- buf_alloc->s_buf.self.high = shared_buf;
- buf_alloc->s_buf.self.low = 2 * hdev->mps;
+ if (hnae3_dev_dcb_supported(hdev)) {
+ buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
+ buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
+ - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ } else {
+ buf_alloc->s_buf.self.high = aligned_mps +
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
+ buf_alloc->s_buf.self.low =
+ roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ }
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if ((hdev->hw_tc_map & BIT(i)) &&
(hdev->tm_info.hw_pfc_map & BIT(i))) {
- buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
- buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
+ buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
} else {
buf_alloc->s_buf.tc_thrd[i].low = 0;
- buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
+ buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
}
}
@@ -1340,11 +1448,11 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
- if (total_size < HCLGE_DEFAULT_TX_BUF)
+ if (total_size < hdev->tx_buf_size)
return -ENOMEM;
if (hdev->hw_tc_map & BIT(i))
- priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+ priv->tx_buf_size = hdev->tx_buf_size;
else
priv->tx_buf_size = 0;
@@ -1362,7 +1470,6 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
-#define HCLGE_BUF_SIZE_UNIT 128
u32 rx_all = hdev->pkt_buf_size, aligned_mps;
int no_pfc_priv_num, pfc_priv_num;
struct hclge_priv_buf *priv;
@@ -1388,13 +1495,16 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = aligned_mps;
- priv->wl.high = priv->wl.low + aligned_mps;
+ priv->wl.high =
+ roundup(priv->wl.low + aligned_mps,
+ HCLGE_BUF_SIZE_UNIT);
priv->buf_size = priv->wl.high +
- HCLGE_DEFAULT_DV;
+ hdev->dv_buf_size;
} else {
priv->wl.low = 0;
priv->wl.high = 2 * aligned_mps;
- priv->buf_size = priv->wl.high;
+ priv->buf_size = priv->wl.high +
+ hdev->dv_buf_size;
}
} else {
priv->enable = 0;
@@ -1424,13 +1534,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
- priv->wl.low = 128;
+ priv->wl.low = 256;
priv->wl.high = priv->wl.low + aligned_mps;
- priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
} else {
priv->wl.low = 0;
priv->wl.high = aligned_mps;
- priv->buf_size = priv->wl.high;
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
}
}
@@ -1873,37 +1983,6 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
}
-static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
- u8 *duplex)
-{
- struct hclge_query_an_speed_dup_cmd *req;
- struct hclge_desc desc;
- int speed_tmp;
- int ret;
-
- req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
-
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/autoneg/duplex query cmd failed %d\n",
- ret);
- return ret;
- }
-
- *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B);
- speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M,
- HCLGE_QUERY_SPEED_S);
-
- ret = hclge_parse_speed(speed_tmp, speed);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "could not parse speed(=%d), %d\n", speed_tmp, ret);
-
- return ret;
-}
-
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
@@ -1947,12 +2026,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
static int hclge_mac_init(struct hclge_dev *hdev)
{
- struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- int mtu;
int ret;
+ hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
@@ -1964,15 +2041,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- if (netdev)
- mtu = netdev->mtu;
- else
- mtu = ETH_DATA_LEN;
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
- ret = hclge_set_mtu(handle, mtu);
+ ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
- "set mtu failed ret=%d\n", ret);
+ "allocate buffer fail, ret=%d\n", ret);
return ret;
}
@@ -2061,34 +2139,58 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
}
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
+{
+ struct hclge_sfp_speed_cmd *resp = NULL;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
+ resp = (struct hclge_sfp_speed_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP do not support get SFP speed %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
+ return ret;
+ }
+
+ *speed = resp->sfp_speed;
+
+ return 0;
+}
+
static int hclge_update_speed_duplex(struct hclge_dev *hdev)
{
struct hclge_mac mac = hdev->hw.mac;
- u8 duplex;
int speed;
int ret;
- /* get the speed and duplex as autoneg'result from mac cmd when phy
+ /* get the speed from SFP cmd when phy
* doesn't exit.
*/
- if (mac.phydev || !mac.autoneg)
+ if (mac.phydev)
return 0;
- ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac autoneg/speed/duplex query failed %d\n", ret);
- return ret;
- }
+ /* if IMP does not support get SFP/qSFP speed, return directly */
+ if (!hdev->support_sfp_query)
+ return 0;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "mac speed/duplex config failed %d\n", ret);
+ ret = hclge_get_sfp_speed(hdev, &speed);
+ if (ret == -EOPNOTSUPP) {
+ hdev->support_sfp_query = false;
+ return ret;
+ } else if (ret) {
return ret;
}
- return 0;
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
+
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
}
static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
@@ -2129,12 +2231,13 @@ static void hclge_service_complete(struct hclge_dev *hdev)
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 rst_src_reg;
- u32 cmdq_src_reg;
+ u32 rst_src_reg, cmdq_src_reg, msix_src_reg;
/* fetch the events from their corresponding regs */
rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_VECTOR0_PF_OTHER_INT_STS_REG);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -2144,7 +2247,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
@@ -2152,17 +2264,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "core reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
- return HCLGE_VECTOR0_EVENT_RST;
- }
+ /* check for vector0 msix event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
+ return HCLGE_VECTOR0_EVENT_ERR;
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
@@ -2214,6 +2325,19 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
/* vector 0 interrupt is shared with reset and mailbox source events.*/
switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_ERR:
+ /* we do not know what type of reset is required now. This could
+ * only be decided after we fetch the type of errors which
+ * caused this event. Therefore, we will do below for now:
+ * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
+ * have defered type of reset to be used.
+ * 2. Schedule the reset serivce task.
+ * 3. When service task receives HNAE3_UNKNOWN_RESET type it
+ * will fetch the correct type of reset. This would be done
+ * by first decoding the types of errors.
+ */
+ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
+ /* fall through */
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
@@ -2308,21 +2432,56 @@ static int hclge_notify_client(struct hclge_dev *hdev,
int ret;
ret = client->ops->reset_notify(handle, type);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify nic client failed %d(%d)\n", type, ret);
return ret;
+ }
}
return 0;
}
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ int ret = 0;
+ u16 i;
+
+ if (!client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 5
+#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
@@ -2335,6 +2494,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
+ case HNAE3_FLR_RESET:
+ break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -2342,6 +2503,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
+ if (hdev->reset_type == HNAE3_FLR_RESET) {
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < HCLGE_RESET_WAIT_CNT)
+ msleep(HCLGE_RESET_WATI_MS);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout: %d\n", cnt);
+ return -EBUSY;
+ }
+
+ return 0;
+ }
+
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -2358,6 +2533,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ return ret;
+ }
+
+ if (!reset)
+ continue;
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%d) failed %d!\n",
+ vport->vport_id, ret);
+ }
+
+ return 0;
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -2396,11 +2620,16 @@ static void hclge_do_reset(struct hclge_dev *hdev)
break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
- hclge_func_reset_cmd(hdev, 0);
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
+ case HNAE3_FLR_RESET:
+ dev_info(&pdev->dev, "FLR requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -2413,21 +2642,46 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
{
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ /* first, resolve any unknown reset type to the known type(s) */
+ if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ /* we will intentionally ignore any errors from this function
+ * as we will end up in *some* reset request in any case
+ */
+ hclge_handle_hw_msix_error(hdev, addr);
+ clear_bit(HNAE3_UNKNOWN_RESET, addr);
+ /* We defered the clearing of the error event which caused
+ * interrupt since it was not posssible to do that in
+ * interrupt context (and this is the reason we introduced
+ * new UNKNOWN reset type). Now, the errors have been
+ * handled and cleared in hardware we can safely enable
+ * interrupts. This is an exception to the norm.
+ */
+ hclge_enable_vector(&hdev->misc_vector, true);
+ }
+
/* return the highest priority reset level amongst all */
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
- else if (test_bit(HNAE3_CORE_RESET, addr))
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_CORE_RESET, addr)) {
rst_level = HNAE3_CORE_RESET;
- else if (test_bit(HNAE3_IMP_RESET, addr))
- rst_level = HNAE3_IMP_RESET;
- else if (test_bit(HNAE3_FUNC_RESET, addr))
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
-
- /* now, clear all other resets */
- clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_IMP_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
return rst_level;
}
@@ -2457,39 +2711,209 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ break;
+ case HNAE3_FLR_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ case HNAE3_IMP_RESET:
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+{
+#define MAX_RESET_FAIL_CNT 5
+#define RESET_UPGRADE_DELAY_SEC 10
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
+ (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
+ BIT(HCLGE_IMP_RESET_BIT))) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because IMP Reset is pending\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->reset_fail_cnt++;
+ if (is_timeout) {
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule to wait for hw reset done\n");
+ return true;
+ }
+
+ dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
+ hclge_clear_reset_cause(hdev);
+ mod_timer(&hdev->reset_timer,
+ jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+
+ return false;
+ }
+
+ hclge_clear_reset_cause(hdev);
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+ return false;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hnae3_handle *handle;
+ bool is_timeout = false;
+ int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
/* perform reset of the stack & ae device for a client */
- handle = &hdev->vport[0].nic;
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_reset_prepare_down(hdev);
+ if (ret)
+ goto err_reset;
+
rtnl_lock();
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
- if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclge_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
- hclge_clear_reset_cause(hdev);
- } else {
- rtnl_lock();
- /* schedule again to check pending resets later */
- set_bit(hdev->reset_type, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
+ if (hclge_reset_wait(hdev)) {
+ is_timeout = true;
+ goto err_reset;
}
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- handle->last_reset_time = jiffies;
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ hdev->last_reset_time = jiffies;
+ hdev->reset_fail_cnt = 0;
ae_dev->reset_type = HNAE3_NONE_RESET;
+
+ return;
+
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ if (hclge_reset_err_handle(hdev, is_timeout))
+ hclge_reset_task_schedule(hdev);
}
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
@@ -2515,20 +2939,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
return;
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
- handle->reset_level = HNAE3_FUNC_RESET;
+ else if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclge_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ hdev->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
- handle->reset_level);
+ hdev->reset_level);
/* request reset & schedule reset task */
- set_bit(handle->reset_level, &hdev->reset_request);
+ set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
- handle->reset_level++;
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ dev_info(&hdev->pdev->dev,
+ "triggering global reset in reset timer\n");
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -2542,6 +2988,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* b. else, we can come back later to check this status so re-sched
* now.
*/
+ hdev->last_reset_time = jiffies;
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
@@ -2584,6 +3031,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+ /* If vf is not alive, set to default value */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ }
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
@@ -2596,6 +3060,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
+ hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
}
@@ -4212,6 +4677,13 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
u16 tqps;
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%d) > max vf num (%d)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
@@ -4222,13 +4694,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
return -EINVAL;
}
- if (vf > hdev->num_req_vfs) {
- dev_err(&hdev->pdev->dev,
- "Error: vf id (%d) > max vf num (%d)\n",
- vf, hdev->num_req_vfs);
- return -EINVAL;
- }
-
action = HCLGE_FD_ACTION_ACCEPT_PACKET;
q_index = ring;
}
@@ -4336,8 +4801,16 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hlist_node *node;
int ret;
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
+ return 0;
+
+ /* if fd is disabled, should not restore it when reset */
+ if (!hdev->fd_cfg.fd_en)
+ return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
@@ -4592,6 +5065,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return 0;
}
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->reset_count;
+}
+
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4801,19 +5299,28 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
}
}
-static int hclge_ae_start(struct hnae3_handle *handle)
+static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, true);
+ if (enable) {
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+ } else {
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
+ }
+}
+
+static int hclge_ae_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
/* mac enable */
hclge_cfg_mac_mode(hdev, true);
clear_bit(HCLGE_STATE_DOWN, &hdev->state);
- mod_timer(&hdev->service_timer, jiffies + HZ);
hdev->hw.mac.link = 0;
/* reset tqp stats */
@@ -4832,17 +5339,17 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
-
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ /* If it is not PF reset, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+ hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
return;
}
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, false);
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ hclge_reset_tqp(handle, i);
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -4851,11 +5358,35 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
/* reset tqp stats */
hclge_reset_tqp_stats(handle);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
hclge_update_link_status(hdev);
}
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
u16 cmdq_resp, u8 resp_code,
enum hclge_mac_vlan_tbl_opcode op)
@@ -6003,54 +6534,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
- int max_frm_size;
- int ret;
-
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
- return -EINVAL;
-
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->max_frm_size = cpu_to_le16(new_mps);
req->min_frm_size = HCLGE_MAC_MIN_FRAME;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- else
- hdev->mps = max_frm_size;
-
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
struct hclge_dev *hdev = vport->back;
- int ret;
+ int i, max_frm_size, ret = 0;
+
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
+ return -EINVAL;
+
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- ret = hclge_set_mac_mtu(hdev, new_mtu);
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) {
dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret);
- return ret;
+ goto out;
}
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret);
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
@@ -6098,8 +6651,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
}
-static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
- u16 queue_id)
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
{
struct hnae3_queue *queue;
struct hclge_tqp *tqp;
@@ -6250,7 +6802,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -6612,6 +7164,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
@@ -6620,6 +7174,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS 100
+#define HCLGE_FLR_WAIT_CNT 50
+ struct hclge_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGE_FLR_WAIT_CNT)
+ msleep(HCLGE_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -6635,7 +7217,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -6727,6 +7313,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6762,13 +7352,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_hw_error_set_state(hdev, true);
if (ret) {
dev_err(&pdev->dev,
- "hw error interrupts enable failed, ret =%d\n", ret);
+ "fail(%d) to enable hw error interrupts\n", ret);
goto err_mdiobus_unreg;
}
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
@@ -6779,6 +7370,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, true);
hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -6806,6 +7398,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_vport_start(vport);
+ vport++;
+ }
+}
+
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
@@ -6823,19 +7426,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- ret = hclge_get_cap(hdev);
- if (ret) {
- dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
- ret);
- return ret;
- }
-
- ret = hclge_configure(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
- return ret;
- }
-
ret = hclge_map_tqp(hdev);
if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
@@ -6856,6 +7446,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6881,11 +7475,17 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- /* Re-enable the TM hw error interrupts because
- * they get disabled on core/global reset.
+ /* Re-enable the hw error interrupts because
+ * the interrupts get disabled on core/global reset.
*/
- if (hclge_enable_tm_hw_error(hdev, true))
- dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+ ret = hclge_hw_error_set_state(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable HNS hw error interrupts\n", ret);
+ return ret;
+ }
+
+ hclge_reset_vport_state(hdev);
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6913,6 +7513,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -7166,8 +7767,15 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
return 0;
}
+#define MAX_SEPARATE_NUM 4
+#define SEPARATOR_VALUE 0xFFFFFFFF
+#define REG_NUM_PER_LINE 4
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+
static int hclge_get_regs_len(struct hnae3_handle *handle)
{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 regs_num_32_bit, regs_num_64_bit;
@@ -7180,15 +7788,25 @@ static int hclge_get_regs_len(struct hnae3_handle *handle)
return -EOPNOTSUPP;
}
- return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE +
+ regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64);
}
static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
void *data)
{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 regs_num_32_bit, regs_num_64_bit;
+ int i, j, reg_um, separator_num;
+ u32 *reg = data;
int ret;
*version = hdev->fw_version;
@@ -7200,16 +7818,53 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
return;
}
- ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data);
+ /* fetching per-PF registers valus from PF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < kinfo->num_tqps; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ 0x200 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ 4 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ /* fetching PF common registers values from firmware */
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get 32 bit register failed, ret = %d.\n", ret);
return;
}
- data = (u32 *)data + regs_num_32_bit;
- ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit,
- data);
+ reg += regs_num_32_bit;
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
if (ret)
dev_err(&hdev->pdev->dev,
"Get 64 bit register failed, ret = %d.\n", ret);
@@ -7272,9 +7927,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
+ .flr_prepare = hclge_flr_prepare,
+ .flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -7285,6 +7950,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
.update_speed_duplex_h = hclge_update_speed_duplex_h,
@@ -7321,6 +7988,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
@@ -7336,7 +8004,14 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fd_all_rules = hclge_get_all_rules,
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
- .process_hw_error = hclge_process_ras_hw_error,
+ .dbg_run_cmd = hclge_dbg_run_cmd,
+ .handle_hw_ras_error = hclge_handle_hw_ras_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
+ .get_global_queue_id = hclge_covert_handle_qid_global,
+ .set_timer_task = hclge_set_timer_task,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0d9215404269..6615b85a1c52 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -28,6 +28,62 @@
#define HCLGE_VECTOR_REG_OFFSET 0x4
#define HCLGE_VECTOR_VF_OFFSET 0x100000
+#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
+#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
+#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
+#define HCLGE_CMDQ_TX_TAIL_REG 0x27010
+#define HCLGE_CMDQ_TX_HEAD_REG 0x27014
+#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
+#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
+#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
+#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
+#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
+#define HCLGE_CMDQ_INTR_STS_REG 0x27104
+#define HCLGE_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
+
+/* bar registers for common func */
+#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
+#define HCLGE_RAS_OTHER_STS_REG 0x20B00
+#define HCLGE_FUNC_RESET_STS_REG 0x20C00
+#define HCLGE_GRO_EN_REG 0x28000
+
+/* bar registers for rcb */
+#define HCLGE_RING_RX_ADDR_L_REG 0x80000
+#define HCLGE_RING_RX_ADDR_H_REG 0x80004
+#define HCLGE_RING_RX_BD_NUM_REG 0x80008
+#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGE_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGE_RING_RX_TAIL_REG 0x80018
+#define HCLGE_RING_RX_HEAD_REG 0x8001C
+#define HCLGE_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGE_RING_RX_OFFSET_REG 0x80024
+#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGE_RING_RX_STASH_REG 0x80030
+#define HCLGE_RING_RX_BD_ERR_REG 0x80034
+#define HCLGE_RING_TX_ADDR_L_REG 0x80040
+#define HCLGE_RING_TX_ADDR_H_REG 0x80044
+#define HCLGE_RING_TX_BD_NUM_REG 0x80048
+#define HCLGE_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGE_RING_TX_TC_REG 0x80050
+#define HCLGE_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGE_RING_TX_TAIL_REG 0x80058
+#define HCLGE_RING_TX_HEAD_REG 0x8005C
+#define HCLGE_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGE_RING_TX_OFFSET_REG 0x80064
+#define HCLGE_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGE_RING_TX_BD_ERR_REG 0x80074
+#define HCLGE_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGE_TQP_INTR_CTRL_REG 0x20000
+#define HCLGE_TQP_INTR_GL0_REG 0x20100
+#define HCLGE_TQP_INTR_GL1_REG 0x20200
+#define HCLGE_TQP_INTR_GL2_REG 0x20300
+#define HCLGE_TQP_INTR_RL_REG 0x20900
+
#define HCLGE_RSS_IND_TBL_SIZE 512
#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_RSS_KEY_SIZE 40
@@ -97,11 +153,13 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -115,8 +173,10 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+
#define HCLGE_MAC_DEFAULT_FRAME \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728
@@ -145,12 +205,14 @@ enum HCLGE_DEV_STATE {
enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_RST,
HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_ERR,
HCLGE_VECTOR0_EVENT_OTHER,
};
#define HCLGE_MPF_ENBALE 1
enum HCLGE_MAC_SPEED {
+ HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
@@ -593,10 +655,16 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
+ unsigned long reset_count; /* the number of reset has been done */
+ u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -614,6 +682,7 @@ struct hclge_dev {
u8 hw_tc_map;
u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
+ u8 support_sfp_query;
#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
@@ -644,6 +713,7 @@ struct hclge_dev {
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
+ struct timer_list reset_timer;
struct work_struct service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -666,7 +736,12 @@ struct hclge_dev {
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
+ u32 tx_buf_size; /* Tx buffer size for each TC */
+ u32 dv_buf_size; /* Dv buffer size for each TC */
+
u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg;
@@ -717,6 +792,11 @@ struct hclge_rss_tuple_cfg {
u8 ipv6_fragment_en;
};
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAX
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -742,6 +822,10 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -768,6 +852,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
@@ -777,9 +867,15 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf);
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f890022938d9..a1de451a85df 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
-static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+ enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ return -EINVAL;
+
+ memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
return status;
}
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ bool alive = !!mbx_req->msg[2];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -363,24 +389,41 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
- mbx_req->mbx_src_vfid);
-
- /* Acknowledge VF that PF is now about to assert the reset for the VF.
- * On receiving this message VF will get into pending state and will
- * start polling for the hardware reset completion status.
- */
- ret = hclge_inform_reset_assert_to_vf(vport);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
- ret, vport->vport_id);
- return;
- }
+ vport->vport_id);
- dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
- mbx_req->mbx_src_vfid);
- /* reset this virtual function */
- hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
+ ret = hclge_func_reset_cmd(hdev, vport->vport_id);
+ hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ int ret;
+ u32 mtu;
+
+ memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+ ret = hclge_set_vport_mtu(vport, mtu);
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id, qid_in_pf;
+ u8 resp_data[2];
+
+ memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id));
+ qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf));
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -460,6 +503,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
+ case HCLGE_MBX_SET_ALIVE:
+ ret = hclge_set_vf_alive(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ break;
case HCLGE_MBX_GET_QINFO:
ret = hclge_get_vf_queue_info(vport, req, true);
if (ret)
@@ -487,6 +537,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_RESET:
hclge_reset_vf(vport, req);
break;
+ case HCLGE_MBX_KEEP_ALIVE:
+ hclge_vf_keep_alive(vport, req);
+ break;
+ case HCLGE_MBX_SET_MTU:
+ ret = hclge_set_vf_mtu(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ break;
+ case HCLGE_MBX_GET_QID_IN_PF:
+ ret = hclge_get_queue_id_in_pf(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to get qid for VF\n",
+ ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03018638f701..dabb8437f8dc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -12,7 +12,7 @@
SUPPORTED_TP | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
- PHY_1000BT_FEATURES)
+ SUPPORTED_1000baseT_Full)
enum hclge_mdio_c22_op_seq {
HCLGE_MDIO_C22_WRITE = 1,
@@ -179,6 +179,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
int duplex, speed;
int ret;
+ /* When phy link down, do nothing */
+ if (netdev->phydev->link == 0)
+ return;
+
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
@@ -195,12 +199,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
int ret;
if (!phydev)
return 0;
- phydev->supported &= ~SUPPORTED_FIBRE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
@@ -210,7 +215,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return ret;
}
- phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
phy_support_asym_pause(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 494e562fe8c7..00458da67503 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1259,15 +1259,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
return 0;
}
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
{
struct hclge_vport *vport = hdev->vport;
struct hnae3_knic_private_info *kinfo;
u32 i, k;
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
- if (prio_tc[i] >= hdev->tm_info.num_tc)
- return -EINVAL;
hdev->tm_info.prio_tc[i] = prio_tc[i];
for (k = 0; k < hdev->num_alloc_vport; k++) {
@@ -1275,18 +1273,12 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
kinfo->prio_tc[i] = prio_tc[i];
}
}
- return 0;
}
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
{
u8 i, bit_map = 0;
- for (i = 0; i < hdev->num_alloc_vport; i++) {
- if (num_tc > hdev->vport[i].alloc_tqps)
- return -EINVAL;
- }
-
hdev->tm_info.num_tc = num_tc;
for (i = 0; i < hdev->tm_info.num_tc; i++)
@@ -1300,8 +1292,6 @@ int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hdev->hw_tc_map = bit_map;
hclge_tm_schd_info_init(hdev);
-
- return 0;
}
int hclge_tm_init_hw(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 25eef13a3e14..b6496a439304 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -40,6 +40,13 @@ struct hclge_nq_to_qs_link_cmd {
__le16 qset_id;
};
+struct hclge_tqp_tx_queue_tc_cmd {
+ __le16 queue_id;
+ __le16 rsvd;
+ u8 tc_id;
+ u8 rev[3];
+};
+
struct hclge_pg_weight_cmd {
u8 pg_id;
u8 dwrr;
@@ -55,6 +62,12 @@ struct hclge_qs_weight_cmd {
u8 dwrr;
};
+struct hclge_ets_tc_weight_cmd {
+ u8 tc_weight[HNAE3_MAX_TC];
+ u8 weight_offset;
+ u8 rsvd[15];
+};
+
#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
#define HCLGE_TM_SHAP_IR_B_LSH 0
#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
@@ -131,8 +144,8 @@ struct hclge_port_shapping_cmd {
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_pause_setup_hw(struct hclge_dev *hdev);
int hclge_tm_schd_mode_hw(struct hclge_dev *hdev);
-int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
-int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
int hclge_tm_map_cfg(struct hclge_dev *hdev);
int hclge_tm_init_hw(struct hclge_dev *hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0d3b445f6799..d5765c8cf3a3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
return false;
}
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_dev *hdev = ring->dev;
+ struct hclgevf_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
}
}
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
- struct hclgevf_cmq_ring *ring)
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
{
struct hclgevf_hw *hw = &hdev->hw;
- int ring_type = ring->flag;
- u32 reg_val;
+ struct hclgevf_cmq_ring *ring =
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- spin_lock_init(&ring->lock);
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
ring->dev = hdev;
+ ring->flag = ring_type;
/* allocate CSQ/CRQ descriptor */
ret = hclgevf_alloc_cmd_desc(ring);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
- return ret;
- }
- /* initialize the hardware registers with csq/crq dma-address,
- * descriptor number, head & tail pointers
- */
- switch (ring_type) {
- case HCLGEVF_TYPE_CSQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- return 0;
- case HCLGEVF_TYPE_CRQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- return 0;
- default:
- return -EINVAL;
- }
+ return ret;
}
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
@@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
return status;
}
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
- /* setup Tx write back timeout */
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- /* setup queue CSQ/CRQ rings */
- hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CSQ ring\n", ret);
+ "CSQ ring setup error %d\n", ret);
return ret;
}
- hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CRQ ring\n", ret);
+ "CRQ ring setup error %d\n", ret);
goto err_csq;
}
+ return 0;
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
hdev->arq.count = 0;
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- goto err_crq;
+ return ret;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
-err_crq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
-
- return ret;
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index bc294b0c8b62..47030b42341f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
@@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 085edb945389..82103d5fa815 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
@@ -10,8 +11,7 @@
#define HCLGEVF_NAME "hclgevf"
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
@@ -23,6 +23,58 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
+static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
+ HCLGEVF_CMDQ_TX_ADDR_H_REG,
+ HCLGEVF_CMDQ_TX_DEPTH_REG,
+ HCLGEVF_CMDQ_TX_TAIL_REG,
+ HCLGEVF_CMDQ_TX_HEAD_REG,
+ HCLGEVF_CMDQ_RX_ADDR_L_REG,
+ HCLGEVF_CMDQ_RX_ADDR_H_REG,
+ HCLGEVF_CMDQ_RX_DEPTH_REG,
+ HCLGEVF_CMDQ_RX_TAIL_REG,
+ HCLGEVF_CMDQ_RX_HEAD_REG,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG,
+ HCLGEVF_CMDQ_INTR_STS_REG,
+ HCLGEVF_CMDQ_INTR_EN_REG,
+ HCLGEVF_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+ HCLGEVF_RST_ING,
+ HCLGEVF_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+ HCLGEVF_RING_RX_ADDR_H_REG,
+ HCLGEVF_RING_RX_BD_NUM_REG,
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
+ HCLGEVF_RING_RX_MERGE_EN_REG,
+ HCLGEVF_RING_RX_TAIL_REG,
+ HCLGEVF_RING_RX_HEAD_REG,
+ HCLGEVF_RING_RX_FBD_NUM_REG,
+ HCLGEVF_RING_RX_OFFSET_REG,
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
+ HCLGEVF_RING_RX_STASH_REG,
+ HCLGEVF_RING_RX_BD_ERR_REG,
+ HCLGEVF_RING_TX_ADDR_L_REG,
+ HCLGEVF_RING_TX_ADDR_H_REG,
+ HCLGEVF_RING_TX_BD_NUM_REG,
+ HCLGEVF_RING_TX_PRIORITY_REG,
+ HCLGEVF_RING_TX_TC_REG,
+ HCLGEVF_RING_TX_MERGE_EN_REG,
+ HCLGEVF_RING_TX_TAIL_REG,
+ HCLGEVF_RING_TX_HEAD_REG,
+ HCLGEVF_RING_TX_FBD_NUM_REG,
+ HCLGEVF_RING_TX_OFFSET_REG,
+ HCLGEVF_RING_TX_EBD_NUM_REG,
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
+ HCLGEVF_RING_TX_BD_ERR_REG,
+ HCLGEVF_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+ HCLGEVF_TQP_INTR_GL0_REG,
+ HCLGEVF_TQP_INTR_GL1_REG,
+ HCLGEVF_TQP_INTR_GL2_REG,
+ HCLGEVF_TQP_INTR_RL_REG};
+
static inline struct hclgevf_dev *hclgevf_ae_get_hdev(
struct hnae3_handle *handle)
{
@@ -204,17 +256,28 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
return 0;
}
+static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data[2], resp_data[2];
+ u16 qid_in_pf = 0;
+ int ret;
+
+ memcpy(&msg_data[0], &queue_id, sizeof(queue_id));
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data,
+ 2, true, resp_data, 2);
+ if (!ret)
+ qid_in_pf = *(u16 *)resp_data;
+
+ return qid_in_pf;
+}
+
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
int i;
- /* if this is on going reset then we need to re-allocate the TPQs
- * since we cannot assume we would get same number of TPQs back from PF
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, hdev->htqp);
-
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
if (!hdev->htqp)
@@ -258,12 +321,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * kinfo->num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- /* if this is on going reset then we need to re-allocate the hnae queues
- * as well since number of TPQs from PF might have changed.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
-
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
@@ -868,6 +925,9 @@ static int hclgevf_unmap_ring_from_vector(
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret, vector_id;
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
@@ -956,13 +1016,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
return status;
}
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
-{
- struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
-
- return tqp->index;
-}
-
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -1097,38 +1150,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0);
}
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+ sizeof(new_mtu), true, NULL, 0);
+}
+
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
struct hnae3_handle *handle = &hdev->nic;
+ int ret;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- return client->ops->reset_notify(handle, type);
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
+static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
+ unsigned long delay_us,
+ unsigned long wait_cnt)
+{
+ unsigned long cnt = 0;
+
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < wait_cnt)
+ usleep_range(delay_us, delay_us * 2);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_RESET_WAIT_MS 500
-#define HCLGEVF_RESET_WAIT_CNT 20
- u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
/* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
- msleep(HCLGEVF_RESET_WAIT_MS);
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- cnt++;
- }
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ return hclgevf_flr_poll_timeout(hdev,
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_CNT);
+
+ ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
- if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
- dev_warn(&hdev->pdev->dev,
- "could'nt get reset done status from h/w, timeout!\n");
- return -EBUSY;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could'nt get reset done status from h/w, timeout!\n");
+ return ret;
}
/* we will wait a bit more to let reset of the stack to complete. This
@@ -1145,10 +1247,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
int ret;
/* uninitialize the nic client */
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
/* re-initialize the hclge device */
- ret = hclgevf_init_hdev(hdev);
+ ret = hclgevf_reset_hdev(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"hclge device re-init failed, VF is disabled!\n");
@@ -1156,22 +1260,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
}
/* bring up the nic client again */
- hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
return 0;
}
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_VF_FUNC_RESET:
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+ 0, true, NULL, sizeof(u8));
+ break;
+ case HNAE3_FLR_RESET:
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ default:
+ break;
+ }
+
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
+ hdev->reset_type, ret);
+
+ return ret;
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ ret = hclgevf_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1181,58 +1323,121 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to fetch H/W reset completion status\n",
ret);
-
- dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
- rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-
- rtnl_unlock();
- return ret;
+ goto err_reset;
}
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ goto err_reset_lock;
+ }
/* bring up the nic to enable TX/RX again */
- hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ hdev->last_reset_time = jiffies;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
+
return ret;
-}
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ /* When VF reset failed, only the higher level reset asserted by PF
+ * can restore it, so re-initialize the command queue to receive
+ * this higher reset event.
+ */
+ hclgevf_cmd_init(hdev);
+ dev_err(&hdev->pdev->dev, "failed to reset VF\n");
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
-{
- int status;
- u8 respmsg;
+ return ret;
+}
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF reset request to PF failed(=%d)\n", status);
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
- return status;
+ return rst_level;
}
static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
- handle->reset_level = HNAE3_VF_RESET;
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- handle->last_reset_time = jiffies;
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_WAIT_MS 100
+#define HCLGEVF_FLR_WAIT_CNT 50
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclgevf_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGEVF_FLR_WAIT_CNT)
+ msleep(HCLGEVF_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1321,9 +1526,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
hdev->reset_attempts = 0;
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
+ hdev->last_reset_time = jiffies;
+ while ((hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending))
+ != HNAE3_NONE_RESET) {
+ ret = hclgevf_reset(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF stack reset failed %d.\n", ret);
+ }
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1352,19 +1563,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
if (hdev->reset_attempts > 3) {
/* prepare for full reset of stack + pcie interface */
- hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- /* request PF for resetting this VF via mailbox */
- ret = hclgevf_do_reset(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "VF rst fail, stack will call\n");
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
+ hclgevf_reset_task_schedule(hdev);
}
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
@@ -1386,6 +1595,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclgevf_keep_alive_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
+
+ schedule_work(&hdev->keep_alive_task);
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+}
+
+static void hclgevf_keep_alive_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+ u8 respmsg;
+ int ret;
+
+ hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
+ 0, false, &respmsg, sizeof(u8));
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
static void hclgevf_service_task(struct work_struct *work)
{
struct hclgevf_dev *hdev;
@@ -1407,24 +1638,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
}
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
{
- u32 cmdq_src_reg;
+ u32 cmdq_src_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
*clearval = cmdq_src_reg;
- return true;
+ return HCLGEVF_VECTOR0_EVENT_MBX;
}
dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
- return false;
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
@@ -1434,19 +1678,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
+ enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
hclgevf_enable_vector(&hdev->misc_vector, false);
- if (!hclgevf_check_event_cause(hdev, &clearval))
- goto skip_sched;
-
- hclgevf_mbx_handler(hdev);
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
- hclgevf_clear_event_cause(hdev, clearval);
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ hclgevf_reset_task_schedule(hdev);
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
-skip_sched:
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+ hclgevf_clear_event_cause(hdev, clearval);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -1468,7 +1721,7 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- struct hclgevf_dev *hdev = ae_dev->priv;
+ struct hclgevf_dev *hdev;
hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
if (!hdev)
@@ -1504,6 +1757,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -1564,23 +1840,22 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
false);
}
-static int hclgevf_ae_start(struct hnae3_handle *handle)
+static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* ring enable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, true);
+ if (enable) {
+ mod_timer(&hdev->service_timer, jiffies + HZ);
+ } else {
+ del_timer_sync(&hdev->service_timer);
+ cancel_work_sync(&hdev->service_task);
+ clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
}
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -1588,45 +1863,59 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
hclgevf_request_link_info(hdev);
clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- mod_timer(&hdev->service_timer, jiffies + HZ);
return 0;
}
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
+ int i;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* Ring disable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, false);
- }
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ hclgevf_reset_tqp(handle, i);
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
- del_timer_sync(&hdev->service_timer);
- cancel_work_sync(&hdev->service_task);
- clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state);
hclgevf_update_link_status(hdev, 0);
}
-static void hclgevf_state_init(struct hclgevf_dev *hdev)
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = alive ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
+ 0, &msg_data, 1, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+ del_timer_sync(&hdev->keep_alive_timer);
+ cancel_work_sync(&hdev->keep_alive_task);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
/* setup tasks for the MBX */
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
@@ -1668,10 +1957,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
@@ -1710,6 +1995,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
@@ -1721,6 +2007,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
pci_free_irq_vectors(pdev);
}
@@ -1728,10 +2016,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
hclgevf_get_misc_vector(hdev);
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
@@ -1861,14 +2145,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
struct hclgevf_hw *hw;
int ret;
- /* check if we need to skip initialization of pci. This will happen if
- * device is undergoing VF reset. Otherwise, we would need to
- * re-initialize pci interface again i.e. when device is not going
- * through *any* reset or actually undergoing full reset.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
@@ -1957,23 +2233,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
- /* check if device is on-going full reset(i.e. pcie as well) */
- if (hclgevf_dev_ongoing_full_reset(hdev)) {
- dev_warn(&pdev->dev, "device is going full reset\n");
- hclgevf_uninit_hdev(hdev);
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
}
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
ret = hclgevf_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI initialization failed\n");
return ret;
}
+ ret = hclgevf_cmd_queue_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ goto err_cmd_queue_init;
+ }
+
ret = hclgevf_cmd_init(hdev);
if (ret)
goto err_cmd_init;
@@ -1983,16 +2334,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
@@ -2001,6 +2353,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -2019,6 +2373,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2034,6 +2392,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hdev->last_reset_time = jiffies;
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
return 0;
@@ -2043,25 +2402,31 @@ err_config:
err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_query_vf:
- hclgevf_cmd_uninit(hdev);
err_cmd_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
}
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
- hclgevf_cmd_uninit(hdev);
- hclgevf_uninit_msi(hdev);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ }
+
hclgevf_pci_uninit(hdev);
+ hclgevf_cmd_uninit(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2071,10 +2436,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclgevf_init_hdev(ae_dev->priv);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
- return ret;
+ hdev = ae_dev->priv;
+ timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
+ INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
+
+ return 0;
}
static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -2151,6 +2522,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
@@ -2159,13 +2537,104 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
*media_type = hdev->hw.mac.media_type;
}
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->reset_count;
+}
+
+#define MAX_SEPARATE_NUM 4
+#define SEPARATOR_VALUE 0xFFFFFFFF
+#define REG_NUM_PER_LINE 4
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+
+static int hclgevf_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+
+ return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
+}
+
+static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, j, reg_um, separator_num;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ /* fetching per-VF registers values from VF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_tqps; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ 0x200 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ 4 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .flr_prepare = hclgevf_flr_prepare,
+ .flr_done = hclgevf_flr_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector,
@@ -2193,11 +2662,21 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_vlan_filter = hclgevf_set_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
+ .get_regs_len = hclgevf_get_regs_len,
+ .get_regs = hclgevf_get_regs,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
.get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
+ .get_global_queue_id = hclgevf_get_qid_global,
+ .set_timer_task = hclgevf_set_timer_task,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index aed241e8ffab..787bc06944e5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -27,15 +27,77 @@
#define HCLGEVF_VECTOR_REG_OFFSET 0x4
#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+/* bar registers for cmdq */
+#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000
+#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004
+#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008
+#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010
+#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014
+#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018
+#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C
+#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
+#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
+#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100
+#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104
+#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
+#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
+
+/* bar registers for common func */
+#define HCLGEVF_GRO_EN_REG 0x28000
+
+/* bar registers for rcb */
+#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
+#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004
+#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008
+#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGEVF_RING_RX_TAIL_REG 0x80018
+#define HCLGEVF_RING_RX_HEAD_REG 0x8001C
+#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGEVF_RING_RX_OFFSET_REG 0x80024
+#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGEVF_RING_RX_STASH_REG 0x80030
+#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034
+#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040
+#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044
+#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048
+#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGEVF_RING_TX_TC_REG 0x80050
+#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGEVF_RING_TX_TAIL_REG 0x80058
+#define HCLGEVF_RING_TX_HEAD_REG 0x8005C
+#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGEVF_RING_TX_OFFSET_REG 0x80064
+#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074
+#define HCLGEVF_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000
+#define HCLGEVF_TQP_INTR_GL0_REG 0x20100
+#define HCLGEVF_TQP_INTR_GL1_REG 0x20200
+#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
+#define HCLGEVF_TQP_INTR_RL_REG 0x20900
+
/* Vector0 interrupt CMDQ event source register(RW) */
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING 0x20C00
-#define HCLGEVF_FUN_RST_ING_B 0
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
@@ -54,17 +116,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
@@ -145,10 +215,17 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
+ unsigned long reset_count; /* the number of reset has been done */
u32 reset_attempts;
u32 fw_version;
@@ -178,7 +255,9 @@ struct hclgevf_dev {
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
struct timer_list service_timer;
+ struct timer_list keep_alive_timer;
struct work_struct service_task;
+ struct work_struct keep_alive_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -192,18 +271,9 @@ struct hclgevf_dev {
u32 flag;
};
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
-{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_RESET));
-}
-
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
+ return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index e9d5a4f96304..84653f58b2d1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -26,7 +26,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
u8 *resp_data, u16 resp_len)
{
#define HCLGEVF_MAX_TRY_TIMES 500
-#define HCLGEVF_SLEEP_USCOEND 1000
+#define HCLGEVF_SLEEP_USECOND 1000
struct hclgevf_mbx_resp_status *mbx_resp;
u16 r_code0, r_code1;
int i = 0;
@@ -40,7 +40,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
- udelay(HCLGEVF_SLEEP_USCOEND);
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return -EIO;
+
+ usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
i++;
}
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- hdev->nic.reset_level = HNAE3_VF_RESET;
+ reset_type = le16_to_cpu(msg_q[1]);
+ set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 097b5502603f..d1a7d2522d82 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -50,6 +50,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_GET_LINK_STATE = 24,
+ HINIC_PORT_CMD_SET_RX_CSUM = 26,
+
HINIC_PORT_CMD_SET_PORT_STATE = 41,
HINIC_PORT_CMD_FWCTXT_INIT = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index f92f1bf3901a..1dfa7eb05c10 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -74,12 +74,6 @@
((void *)((cmdq_pages)->shadow_page_vaddr) \
+ (wq)->block_idx * CMDQ_BLOCK_SIZE)
-#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
- (wq)->wqebb_size)
-
-#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \
- & ((wq)->num_q_pages - 1))
-
#define WQ_PAGE_ADDR(wq, idx) \
((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
@@ -93,6 +87,17 @@
(((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
/ (wq)->max_wqe_size)
+static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) & ((wq)->num_wqebbs_per_page - 1))
+ << (wq)->wqebb_size_shift);
+}
+
+static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx)
+{
+ return (((idx) >> ((wq)->wqebbs_per_page_shift))
+ & ((wq)->num_q_pages - 1));
+}
/**
* queue_alloc_page - allocate page for Queue
* @hwif: HW interface for allocating DMA
@@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
struct hinic_hwif *hwif = wqs->hwif;
struct pci_dev *pdev = hwif->pdev;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int err;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
@@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
@@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
wq->q_depth = q_depth;
wq->max_wqe_size = max_wqe_size;
wq->num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
+ wq->wqebb_size_shift = wqebb_size_shift;
wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
@@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
u16 q_depth, u16 max_wqe_size)
{
struct pci_dev *pdev = hwif->pdev;
+ u16 num_wqebbs_per_page_shift;
u16 num_wqebbs_per_page;
+ u16 wqebb_size_shift;
int i, j, err = -ENOMEM;
- if (wqebb_size == 0) {
- dev_err(&pdev->dev, "wqebb_size must be > 0\n");
+ if (!is_power_of_2(wqebb_size)) {
+ dev_err(&pdev->dev, "wqebb_size must be power of 2\n");
return -EINVAL;
}
@@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
return -EINVAL;
}
- num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
+ wqebb_size_shift = ilog2(wqebb_size);
+ num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size)
+ >> wqebb_size_shift;
- if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
+ if (!is_power_of_2(num_wqebbs_per_page)) {
dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
return -EINVAL;
}
@@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
return err;
}
+ num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page);
for (i = 0; i < cmdq_blocks; i++) {
wq[i].hwif = hwif;
@@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
wq[i].q_depth = q_depth;
wq[i].max_wqe_size = max_wqe_size;
wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
-
+ wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift;
+ wq[i].wqebb_size_shift = wqebb_size_shift;
wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
@@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
*prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
- num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift;
if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
atomic_add(num_wqebbs, &wq->delta);
@@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size)
**/
void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
atomic_add(num_wqebbs, &wq->cons_idx);
@@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
u16 *cons_idx)
{
- int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
+ int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size)
+ >> wq->wqebb_size_shift;
u16 curr_cons_idx, end_cons_idx;
int curr_pg, end_pg;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
index 9b66545ba563..0a936cd6709b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h
@@ -39,7 +39,8 @@ struct hinic_wq {
u16 q_depth;
u16 max_wqe_size;
u16 num_wqebbs_per_page;
-
+ u16 wqebbs_per_page_shift;
+ u16 wqebb_size_shift;
/* The addresses are 64 bit in the HW */
u64 block_paddr;
void **shadow_block_vaddr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index 9754d6ed5f4a..138941527872 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -170,6 +170,10 @@
#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0
+
+#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU
+
#define HINIC_RQ_CQE_STATUS_GET(val, member) \
(((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \
HINIC_RQ_CQE_STATUS_##member##_MASK)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index fdf2bdb6b0d0..6d48dc62a44b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -600,9 +600,6 @@ static int add_mac_addr(struct net_device *netdev, const u8 *addr)
u16 vid = 0;
int err;
- if (!is_valid_ether_addr(addr))
- return -EADDRNOTAVAIL;
-
netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
@@ -726,6 +723,7 @@ static void set_rx_mode(struct work_struct *work)
{
struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work);
struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work);
+ struct netdev_hw_addr *ha;
netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n");
@@ -733,6 +731,9 @@ static void set_rx_mode(struct work_struct *work)
__dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
__dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr);
+
+ netdev_for_each_mc_addr(ha, nic_dev->netdev)
+ add_mac_addr(nic_dev->netdev, ha->addr);
}
static void hinic_set_rx_mode(struct net_device *netdev)
@@ -806,7 +807,8 @@ static const struct net_device_ops hinic_netdev_ops = {
static void netdev_features_init(struct net_device *netdev)
{
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
netdev->vlan_features = netdev->hw_features;
@@ -869,12 +871,16 @@ static int set_features(struct hinic_dev *nic_dev,
netdev_features_t features, bool force_change)
{
netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
+ u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
int err = 0;
if (changed & NETIF_F_TSO)
err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+ if (changed & NETIF_F_RXCSUM)
+ err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+
return err;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 7575a7d3bd9f..122c93597268 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -409,3 +409,33 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state)
return 0;
}
+
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en)
+{
+ struct hinic_checksum_offload rx_csum_cfg = {0};
+ struct hinic_hwdev *hwdev = nic_dev->hwdev;
+ struct hinic_hwif *hwif;
+ struct pci_dev *pdev;
+ u16 out_size;
+ int err;
+
+ if (!hwdev)
+ return -EINVAL;
+
+ hwif = hwdev->hwif;
+ pdev = hwif->pdev;
+ rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif);
+ rx_csum_cfg.rx_csum_offload = en;
+
+ err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM,
+ &rx_csum_cfg, sizeof(rx_csum_cfg),
+ &rx_csum_cfg, &out_size);
+ if (err || !out_size || rx_csum_cfg.status) {
+ dev_err(&pdev->dev,
+ "Failed to set rx csum offload, ret = %d\n",
+ rx_csum_cfg.status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h
index f6e3220fe28f..02d896eed455 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h
@@ -183,6 +183,15 @@ struct hinic_tso_config {
u8 resv2[3];
};
+struct hinic_checksum_offload {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ u16 func_id;
+ u16 rsvd1;
+ u32 rx_csum_offload;
+};
int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
u16 vlan_id);
@@ -213,4 +222,5 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev,
int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state);
+int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en);
#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 4c0f7eda1166..0098b206e7e9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -43,6 +43,7 @@
#define RX_IRQ_NO_LLI_TIMER 0
#define RX_IRQ_NO_CREDIT 0
#define RX_IRQ_NO_RESEND_TIMER 0
+#define HINIC_RX_BUFFER_WRITE 16
/**
* hinic_rxq_clean_stats - Clean the statistics of specific queue
@@ -89,6 +90,28 @@ static void rxq_stats_init(struct hinic_rxq *rxq)
hinic_rxq_clean_stats(rxq);
}
+static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rxq->netdev;
+ struct hinic_rq_cqe *cqe;
+ struct hinic_rq *rq;
+ u32 csum_err;
+ u32 status;
+
+ rq = rxq->rq;
+ cqe = rq->cqe[cons_idx];
+ status = be32_to_cpu(cqe->status);
+ csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ if (!csum_err)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+}
/**
* rx_alloc_skb - allocate skb and map it to dma address
* @rxq: rx queue
@@ -209,7 +232,6 @@ skb_out:
hinic_rq_update(rxq->rq, prod_idx);
}
- tasklet_schedule(&rxq->rx_task);
return i;
}
@@ -237,17 +259,6 @@ static void free_all_rx_skbs(struct hinic_rxq *rxq)
}
/**
- * rx_alloc_task - tasklet for queue allocation
- * @data: rx queue
- **/
-static void rx_alloc_task(unsigned long data)
-{
- struct hinic_rxq *rxq = (struct hinic_rxq *)data;
-
- (void)rx_alloc_pkts(rxq);
-}
-
-/**
* rx_recv_jumbo_pkt - Rx handler for jumbo pkt
* @rxq: rx queue
* @head_skb: the first skb in the list
@@ -311,6 +322,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
u64 pkt_len = 0, rx_bytes = 0;
struct hinic_rq_wqe *rq_wqe;
+ unsigned int free_wqebbs;
int num_wqes, pkts = 0;
struct hinic_sge sge;
struct sk_buff *skb;
@@ -328,6 +340,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
+ rx_csum(rxq, ci, skb);
+
prefetch(skb->data);
pkt_len = sge.len;
@@ -352,8 +366,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
rx_bytes += pkt_len;
}
- if (pkts)
- tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */
+ free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
+ if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
+ rx_alloc_pkts(rxq);
u64_stats_update_begin(&rxq->rxq_stats.syncp);
rxq->rxq_stats.pkts += pkts;
@@ -470,8 +485,6 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id);
- tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq);
-
pkts = rx_alloc_pkts(rxq);
if (!pkts) {
err = -ENOMEM;
@@ -488,7 +501,6 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
err_req_rx_irq:
err_rx_pkts:
- tasklet_kill(&rxq->rx_task);
free_all_rx_skbs(rxq);
devm_kfree(&netdev->dev, rxq->irq_name);
return err;
@@ -504,7 +516,6 @@ void hinic_clean_rxq(struct hinic_rxq *rxq)
rx_free_irq(rxq);
- tasklet_kill(&rxq->rx_task);
free_all_rx_skbs(rxq);
devm_kfree(&netdev->dev, rxq->irq_name);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 27c9af4b1c12..f8ed3fa6c8ee 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -23,6 +23,10 @@
#include "hinic_hw_qp.h"
+#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF
+#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7)
+#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8)
+
struct hinic_rxq_stats {
u64 pkts;
u64 bytes;
@@ -38,8 +42,6 @@ struct hinic_rxq {
char *irq_name;
- struct tasklet_struct rx_task;
-
struct napi_struct napi;
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 760b2ad8e295..209255495bc9 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev)
dev->phy.duplex = phy->duplex;
dev->phy.pause = phy->pause;
dev->phy.asym_pause = phy->asym_pause;
- dev->phy.advertising = phy->advertising;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
+ phy->advertising);
}
static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
@@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
phy_dev->autoneg = phy->autoneg;
phy_dev->speed = phy->speed;
phy_dev->duplex = phy->duplex;
- phy_dev->advertising = phy->advertising;
+ ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
+ phy->advertising);
return phy_start_aneg(phy_dev);
}
@@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev,
dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
dev->phy.def->name = dev->phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
- dev->phy.features = dev->phy_dev->supported;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
+ dev->phy_dev->supported);
dev->phy.address = dev->phy_dev->mdio.addr;
dev->phy.mode = dev->phy_dev->interface;
return 0;
diff --git a/drivers/net/ethernet/ibm/emac/emac.h b/drivers/net/ethernet/ibm/emac/emac.h
index e2f80cca9bed..0d2de6f67676 100644
--- a/drivers/net/ethernet/ibm/emac/emac.h
+++ b/drivers/net/ethernet/ibm/emac/emac.h
@@ -231,7 +231,7 @@ struct emac_regs {
#define EMAC_STACR_PHYE 0x00004000
#define EMAC_STACR_STAC_MASK 0x00003000
#define EMAC_STACR_STAC_READ 0x00001000
-#define EMAC_STACR_STAC_WRITE 0x00000800
+#define EMAC_STACR_STAC_WRITE 0x00002000
#define EMAC_STACR_OPBC_MASK 0x00000C00
#define EMAC_STACR_OPBC_50 0x00000000
#define EMAC_STACR_OPBC_66 0x00000400
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index c0203a0d5e3b..5ecbb1adcf3b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -773,11 +773,8 @@ static void release_napi(struct ibmvnic_adapter *adapter)
return;
for (i = 0; i < adapter->num_active_rx_napi; i++) {
- if (&adapter->napi[i]) {
- netdev_dbg(adapter->netdev,
- "Releasing napi[%d]\n", i);
- netif_napi_del(&adapter->napi[i]);
- }
+ netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
+ netif_napi_del(&adapter->napi[i]);
}
kfree(adapter->napi);
@@ -1859,7 +1856,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
- netdev_notify_peers(netdev);
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
netif_carrier_on(netdev);
@@ -1939,8 +1936,9 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rwi *rwi;
+ unsigned long flags;
- mutex_lock(&adapter->rwi_lock);
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
if (!list_empty(&adapter->rwi_list)) {
rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
@@ -1950,7 +1948,7 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
rwi = NULL;
}
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
return rwi;
}
@@ -2025,6 +2023,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
struct list_head *entry, *tmp_entry;
struct ibmvnic_rwi *rwi, *tmp;
struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
int ret;
if (adapter->state == VNIC_REMOVING ||
@@ -2041,21 +2040,21 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- mutex_lock(&adapter->rwi_lock);
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset\n");
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY;
goto err;
}
}
- rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
+ rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) {
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ibmvnic_close(netdev);
ret = ENOMEM;
goto err;
@@ -2069,7 +2068,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- mutex_unlock(&adapter->rwi_lock);
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
adapter->resetting = true;
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
@@ -4759,7 +4758,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
- mutex_init(&adapter->rwi_lock);
+ spin_lock_init(&adapter->rwi_lock);
adapter->resetting = false;
adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 99c4f8d331ce..f2018dbebfa5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
enum ibmvnic_reset_reason reset_reason;
- struct mutex rwi_lock;
+ spinlock_t rwi_lock;
struct list_head rwi_list;
struct work_struct ibmvnic_reset;
bool resetting;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 59e1bc0f609e..31fb76ee9d82 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -33,7 +33,7 @@ config E100
to identify the adapter.
More specific information on configuring the driver is in
- <file:Documentation/networking/e100.rst>.
+ <file:Documentation/networking/device_drivers/intel/e100.rst>.
To compile this driver as a module, choose M here. The module
will be called e100.
@@ -49,7 +49,7 @@ config E1000
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000.rst>.
+ <file:Documentation/networking/device_drivers/intel/e1000.rst>.
To compile this driver as a module, choose M here. The module
will be called e1000.
@@ -69,7 +69,7 @@ config E1000E
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/e1000e.rst>.
+ <file:Documentation/networking/device_drivers/intel/e1000e.rst>.
To compile this driver as a module, choose M here. The module
will be called e1000e.
@@ -97,7 +97,7 @@ config IGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/igb.rst>.
+ <file:Documentation/networking/device_drivers/intel/igb.rst>.
To compile this driver as a module, choose M here. The module
will be called igb.
@@ -133,7 +133,7 @@ config IGBVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/igbvf.rst>.
+ <file:Documentation/networking/device_drivers/intel/igbvf.rst>.
To compile this driver as a module, choose M here. The module
will be called igbvf.
@@ -150,7 +150,7 @@ config IXGB
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgb.rst>.
+ <file:Documentation/networking/device_drivers/intel/ixgb.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgb.
@@ -159,6 +159,7 @@ config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
depends on PCI
select MDIO
+ select MDIO_DEVICE
imply PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
@@ -168,7 +169,7 @@ config IXGBE
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbe.rst>.
+ <file:Documentation/networking/device_drivers/intel/ixgbe.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbe.
@@ -220,7 +221,7 @@ config IXGBEVF
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ixgbevf.rst>.
+ <file:Documentation/networking/device_drivers/intel/ixgbevf.rst>.
To compile this driver as a module, choose M here. The module
will be called ixgbevf. MSI-X interrupt support is required
@@ -247,7 +248,7 @@ config I40E
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/i40e.rst>.
+ <file:Documentation/networking/device_drivers/intel/i40e.rst>.
To compile this driver as a module, choose M here. The module
will be called i40e.
@@ -282,7 +283,7 @@ config I40EVF
This driver was formerly named i40evf.
More specific information on configuring the driver is in
- <file:Documentation/networking/iavf.rst>.
+ <file:Documentation/networking/device_drivers/intel/iavf.rst>.
To compile this driver as a module, choose M here. The module
will be called iavf. MSI-X interrupt support is required
@@ -300,7 +301,7 @@ config ICE
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/ice.rst>.
+ <file:Documentation/networking/device_drivers/intel/ice.rst>.
To compile this driver as a module, choose M here. The module
will be called ice.
@@ -318,7 +319,7 @@ config FM10K
<http://support.intel.com>
More specific information on configuring the driver is in
- <file:Documentation/networking/fm10k.rst>.
+ <file:Documentation/networking/device_drivers/intel/fm10k.rst>.
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 7c4b55482f72..0fd268070fb4 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1345,8 +1345,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
fw = e100_request_firmware(nic);
/* If it's NULL, then no ucode is required */
- if (!fw || IS_ERR(fw))
- return PTR_ERR(fw);
+ if (IS_ERR_OR_NULL(fw))
+ return PTR_ERR_OR_ZERO(fw);
if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
netif_err(nic, probe, nic->netdev,
@@ -2225,11 +2225,13 @@ static int e100_poll(struct napi_struct *napi, int budget)
e100_rx_clean(nic, &work_done, budget);
e100_tx_clean(nic);
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ /* If budget fully consumed, continue polling */
+ if (work_done == budget)
+ return budget;
+
+ /* only re-enable interrupt if stack agrees polling is really done */
+ if (likely(napi_complete_done(napi, work_done)))
e100_enable_irq(nic);
- }
return work_done;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 43b6d3cec3b3..8fe9af0e2ab7 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3803,14 +3803,15 @@ static int e1000_clean(struct napi_struct *napi, int budget)
adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
- if (!tx_clean_complete)
- work_done = budget;
+ if (!tx_clean_complete || work_done == budget)
+ return budget;
- /* If budget not fully consumed, exit the polling mode */
- if (work_done < budget) {
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (likely(adapter->itr_setting & 3))
e1000_set_itr(adapter);
- napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c760dc72c520..be13227f1697 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
void e1000e_ptp_remove(struct e1000_adapter *adapter);
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts);
+
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
return hw->phy.ops.reset(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 16a73bd9f4cb..308c006cb41d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2651,9 +2651,9 @@ err:
/**
* e1000e_poll - NAPI Rx polling callback
* @napi: struct associated with this polling callback
- * @weight: number of packets driver is allowed to process this poll
+ * @budget: number of packets driver is allowed to process this poll
**/
-static int e1000e_poll(struct napi_struct *napi, int weight)
+static int e1000e_poll(struct napi_struct *napi, int budget)
{
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
napi);
@@ -2667,16 +2667,17 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
- adapter->clean_rx(adapter->rx_ring, &work_done, weight);
+ adapter->clean_rx(adapter->rx_ring, &work_done, budget);
- if (!tx_cleaned)
- work_done = weight;
+ if (!tx_cleaned || work_done == budget)
+ return budget;
- /* If weight not fully consumed, exit the polling mode */
- if (work_done < weight) {
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (adapter->itr_setting & 3)
e1000_set_itr(adapter);
- napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
ew32(IMS, adapter->rx_ring->ims_val);
@@ -4319,13 +4320,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
/**
* e1000e_sanitize_systim - sanitize raw cycle counter reads
* @hw: pointer to the HW structure
- * @systim: time value read, sanitized and returned
+ * @systim: PHC time value read, sanitized and returned
+ * @sts: structure to hold system time before and after reading SYSTIML,
+ * may be NULL
*
* Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue.
**/
-static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
+static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
+ struct ptp_system_timestamp *sts)
{
u64 time_delta, rem, temp;
u64 systim_next;
@@ -4335,7 +4339,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
+ ptp_read_system_prets(sts);
systim_next = (u64)er32(SYSTIML);
+ ptp_read_system_postts(sts);
systim_next |= (u64)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
@@ -4353,15 +4359,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
}
/**
- * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
- * @cc: cyclecounter structure
+ * e1000e_read_systim - read SYSTIM register
+ * @adapter: board private structure
+ * @sts: structure which will contain system time before and after reading
+ * SYSTIML, may be NULL
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts)
{
- struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
- cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel, systimeh;
+ u32 systimel, systimel_2, systimeh;
u64 systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4369,11 +4376,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
+ ptp_read_system_prets(sts);
systimel = er32(SYSTIML);
+ ptp_read_system_postts(sts);
systimeh = er32(SYSTIMH);
/* Is systimel is so large that overflow is possible? */
if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
- u32 systimel_2 = er32(SYSTIML);
+ ptp_read_system_prets(sts);
+ systimel_2 = er32(SYSTIML);
+ ptp_read_system_postts(sts);
if (systimel > systimel_2) {
/* There was an overflow, read again SYSTIMH, and use
* systimel_2
@@ -4386,12 +4397,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim |= (u64)systimeh << 32;
if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
- systim = e1000e_sanitize_systim(hw, systim);
+ systim = e1000e_sanitize_systim(hw, systim, sts);
return systim;
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+
+ return e1000e_read_systim(adapter, NULL);
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c76945ad9b..1a4c65d9feb4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
#endif/*CONFIG_E1000E_HWTS*/
/**
- * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * e1000e_phc_gettimex - Reads the current time from the hardware clock and
+ * system clock
* @ptp: ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec structure to hold the current PHC time
+ * @sts: structure to hold the current system time
*
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int e1000e_phc_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u64 ns;
+ u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
- ns = timecounter_read(&adapter->tc);
+
+ /* NOTE: Non-monotonic SYSTIM readings may be returned */
+ cycles = e1000e_read_systim(adapter, sts);
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
+ u64 ns;
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
- .gettime64 = e1000e_phc_gettime,
+ .gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5b2a50e5798f..6fd15a734324 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1465,11 +1465,11 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
-
- /* re-enable the q_vector */
- fm10k_qv_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ fm10k_qv_enable(q_vector);
return min(work_done, budget - 1);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 876cac317e79..8de9085bba9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
+ __I40E_TIMEOUT_RECOVERY_PENDING,
__I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
@@ -146,6 +147,7 @@ enum i40e_state_t {
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
+ __I40E_VIRTCHNL_OP_PENDING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -494,7 +496,6 @@ struct i40e_pf {
#define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
-#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 501ee718177f..7ab61f6ebb5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -588,6 +588,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+ if (hw->mac.type == I40E_MAC_X722 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 80e3eec6134e..11506102471c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,7 +11,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0006
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
@@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 85f75b5978fc..97a9b1fb4763 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3723,6 +3723,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
i40e_status status;
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9f8464f80783..a6bc7847346b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -906,6 +906,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ks->base.speed = SPEED_100;
break;
default:
+ ks->base.speed = SPEED_UNKNOWN;
break;
}
ks->base.duplex = DUPLEX_FULL;
@@ -1335,6 +1336,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
i40e_status status;
u8 aq_failures;
int err = 0;
+ u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
* port's controlling PF
@@ -1347,15 +1349,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
- if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+ is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
/* If we have link and don't have autoneg */
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
@@ -1406,7 +1407,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
/* Give it a little more time to try to come back */
msleep(75);
if (!test_bit(__I40E_DOWN, pf->state))
@@ -2377,7 +2378,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
+ | (wol->wolopts != WAKE_FILTER))
return -EOPNOTSUPP;
/* is this a new value? */
@@ -4659,14 +4661,15 @@ flags_complete:
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
- * be set, however we do not support _changing_ the flag if NPAR is
- * enabled or FW API version < 1.7. There are situations where older
- * FW versions/NPAR enabled PFs could disable LLDP, however we _must_
- * not allow the user to enable/disable LLDP with this flag on
- * unsupported FW versions.
+ * be set, however we do not support _changing_ the flag:
+ * - on XL710 if NPAR is enabled or FW API version < 1.7
+ * - on X722 with FW API version < 1.6
+ * There are situations where older FW versions/NPAR enabled PFs could
+ * disable LLDP, however we _must_ not allow the user to enable/disable
+ * LLDP with this flag on unsupported FW versions.
*/
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
+ if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
dev_warn(&pf->pdev->dev,
"Device does not support changing FW LLDP\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 21c2688d6308..4d40878e395a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_MINOR 7
+#define DRV_VERSION_BUILD 6
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
return; /* don't do any new action before the next timeout */
+ /* don't kick off another recovery if one is already pending */
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+ return;
+
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
@@ -1413,7 +1417,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
}
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state);
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
}
/**
@@ -1493,8 +1497,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
bool found = false;
int bkt;
- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
- "Missing mac_filter_hash_lock\n");
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
@@ -1543,17 +1546,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
/* Copy the address first, so that we avoid a possible race with
- * .set_rx_mode(). If we copy after changing the address in the filter
- * list, we might open ourselves to a narrow race window where
- * .set_rx_mode could delete our dev_addr filter and prevent traffic
- * from passing.
+ * .set_rx_mode().
+ * - Remove old address from MAC filter
+ * - Copy new address
+ * - Add new address to MAC filter
*/
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- i40e_add_mac_filter(vsi, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
@@ -9632,6 +9635,7 @@ end_core_reset:
clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
}
/**
@@ -11332,16 +11336,15 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
- /* Stopping the FW LLDP engine is only supported on the
- * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
- * engine is not supported if NPAR is functioning on this
- * part
+ /* Stopping FW LLDP engine is supported on XL710 and X722
+ * starting from FW versions determined in i40e_init_adminq.
+ * Stopping the FW LLDP engine is not supported on XL710
+ * if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- !pf->hw.func_caps.npar_enable &&
- (pf->hw.aq.api_maj_ver > 1 ||
- (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
- pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
+ pf->hw.func_caps.npar_enable &&
+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
@@ -11682,6 +11685,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* @dev: the netdev being configured
* @nlh: RTNL message
* @flags: bridge flags
+ * @extack: netlink extended ack
*
* Inserts a new hardware bridge if not already created and
* enables the bridging mode requested (VEB or VEPA). If the
@@ -11694,7 +11698,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
**/
static int i40e_ndo_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh,
- u16 flags)
+ u16 flags,
+ struct netlink_ext_ack *extack)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
@@ -12334,6 +12339,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
+ /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
+ netdev->neigh_priv_len = sizeof(u32) * 4;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
@@ -14302,23 +14310,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strncpy(width, "8", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strncpy(width, "4", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strncpy(width, "2", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strncpy(width, "1", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 1199f0502d6d..5fb4353c742b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -28,19 +28,23 @@
* i40e_ptp_read - Read the PHC time from the device
* @pf: Board private structure
* @ts: timespec structure to hold the current time value
+ * @sts: structure to hold the system time before and after reading the PHC
*
* This function reads the PRTTSYN_TIME registers and stores them in a
* timespec. However, since the registers are 64 bits of nanoseconds, we must
* convert the result to a timespec before we can return.
**/
-static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts)
+static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct i40e_hw *hw = &pf->hw;
u32 hi, lo;
u64 ns;
/* The timer latches on the lowest register read. */
+ ptp_read_system_prets(sts);
lo = rd32(hw, I40E_PRTTSYN_TIME_L);
+ ptp_read_system_postts(sts);
hi = rd32(hw, I40E_PRTTSYN_TIME_H);
ns = (((u64)hi) << 32) | lo;
@@ -146,7 +150,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
mutex_lock(&pf->tmreg_lock);
- i40e_ptp_read(pf, &now);
+ i40e_ptp_read(pf, &now, NULL);
timespec64_add_ns(&now, delta);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
@@ -156,19 +160,21 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * i40e_ptp_gettime - Get the time of the PHC
+ * i40e_ptp_gettimex - Get the time of the PHC
* @ptp: The PTP clock structure
* @ts: timespec structure to hold the current time value
+ * @sts: structure to hold the system time before and after reading the PHC
*
* Read the device clock and return the correct value on ns, after converting it
* into a timespec struct.
**/
-static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
mutex_lock(&pf->tmreg_lock);
- i40e_ptp_read(pf, ts);
+ i40e_ptp_read(pf, ts, sts);
mutex_unlock(&pf->tmreg_lock);
return 0;
@@ -694,7 +700,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strncpy(pf->ptp_caps.name, i40e_driver_name,
+ strlcpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
@@ -702,7 +708,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->ptp_caps.pps = 0;
pf->ptp_caps.adjfreq = i40e_ptp_adjfreq;
pf->ptp_caps.adjtime = i40e_ptp_adjtime;
- pf->ptp_caps.gettime64 = i40e_ptp_gettime;
+ pf->ptp_caps.gettimex64 = i40e_ptp_gettimex;
pf->ptp_caps.settime64 = i40e_ptp_settime;
pf->ptp_caps.enable = i40e_ptp_feature_enable;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index aef3c89ee79c..a7e14e98889f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1559,24 +1559,6 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
@@ -1793,8 +1775,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* other fields within the skb.
**/
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb)
{
u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
@@ -1802,6 +1783,8 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
u32 tsynvalid = rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK;
u32 tsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
+ u8 rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
if (unlikely(tsynvalid))
i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn);
@@ -1812,6 +1795,13 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
skb_record_rx_queue(skb, rx_ring->queue_index);
+ if (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) {
+ u16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ le16_to_cpu(vlan_tag));
+ }
+
/* modifies the skb - consumes the enet header */
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
@@ -2350,8 +2340,6 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
struct i40e_rx_buffer *rx_buffer;
union i40e_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
- u8 rx_ptype;
u64 qword;
/* return some buffers to hardware, one at a time is too slow */
@@ -2444,18 +2432,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
-
/* populate checksum, VLAN, and protocol */
- i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
i40e_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb);
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
skb = NULL;
/* update budget accounting */
@@ -2667,10 +2648,11 @@ tx_only:
if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
-
- i40e_update_enable_itr(vsi, q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ i40e_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -3473,6 +3455,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -3526,6 +3510,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
+ void *data = xdpf->data;
u32 size = xdpf->len;
dma_addr_t dma;
@@ -3533,8 +3518,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
-
- dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED;
@@ -3652,8 +3636,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 09809dffe399..8af0e99c6c0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -12,10 +12,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
union i40e_rx_desc *rx_desc,
u64 qw);
void i40e_process_skb_fields(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype);
-void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag);
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb);
void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring);
void i40e_update_rx_stats(struct i40e_ring *rx_ring,
unsigned int total_rx_bytes,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 7df969c59855..2781ab91ca82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -615,6 +615,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index ac5698ed0b11..2ac23ebfbf31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (allmulti || alluni)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
@@ -1675,13 +1676,20 @@ err_out:
int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
- return i40e_pci_sriov_enable(pdev, num_vfs);
+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
+ goto sriov_configure_out;
}
if (!pci_vfs_assigned(pf->pdev)) {
@@ -1690,9 +1698,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto sriov_configure_out;
}
- return 0;
+sriov_configure_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
}
/***********************virtual channel routines******************/
@@ -3893,6 +3904,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -3941,6 +3957,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -3992,6 +4009,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4107,6 +4129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
ret = 0;
error_pvid:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4128,6 +4151,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4154,6 +4182,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf->tx_rate = max_tx_rate;
error:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4174,6 +4203,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4209,6 +4243,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ret = 0;
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4230,6 +4265,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
int abs_vf_id;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4273,6 +4313,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4294,6 +4335,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4327,6 +4373,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
ret = -EIO;
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4345,15 +4392,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vf = &pf->vf[vf_id];
@@ -4376,5 +4430,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index bf67d62e2b5f..f9621026beef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -13,9 +13,9 @@
#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
-#define I40E_PRIORITY_MASK 0x7000
+#define I40E_PRIORITY_MASK 0xE000
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index add1e457886d..870cf654e436 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -33,7 +33,7 @@ static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
}
/**
- * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid
+ * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
* @vsi: Current VSI
* @umem: UMEM to store
* @qid: Ring/qid to associate with the UMEM
@@ -56,7 +56,7 @@ static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid
+ * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
* @vsi: Current VSI
* @qid: Ring/qid associated with the UMEM
**/
@@ -130,7 +130,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
}
/**
- * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid
+ * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
* @vsi: Current VSI
* @umem: UMEM
* @qid: Rx ring to associate UMEM to
@@ -189,7 +189,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid
+ * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
* @vsi: Current VSI
* @qid: Rx ring to associate UMEM to
*
@@ -255,12 +255,12 @@ int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
}
/**
- * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM
+ * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
* @vsi: Current VSI
* @umem: UMEM to enable/associate to a ring, or NULL to disable
* @qid: Rx ring to (dis)associate UMEM (from)to
*
- * This function enables or disables an UMEM to a certain ring.
+ * This function enables or disables a UMEM to a certain ring.
*
* Returns 0 on success, <0 on failure
**/
@@ -276,7 +276,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
*
- * This function enables or disables an UMEM to a certain ring.
+ * This function enables or disables a UMEM to a certain ring.
*
* Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
**/
@@ -634,8 +634,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
struct i40e_rx_buffer *bi;
union i40e_rx_desc *rx_desc;
unsigned int size;
- u16 vlan_tag;
- u8 rx_ptype;
u64 qword;
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
@@ -713,14 +711,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
total_rx_bytes += skb->len;
total_rx_packets++;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
-
- vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
- le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- i40e_receive_skb(rx_ring, skb, vlan_tag);
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index fb9bfad96daf..9b4d7cec2e18 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1761,10 +1761,11 @@ tx_only:
if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
-
- iavf_update_enable_itr(vsi, q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ iavf_update_enable_itr(vsi, q_vector);
return min(work_done, budget - 1);
}
@@ -2343,6 +2344,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -2461,8 +2464,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tso < 0)
goto out_drop;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= IAVF_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b8548370f1c7..a385575600f6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,7 +52,6 @@ extern const char ice_drv_ver[];
#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_VSI_ALLOC 130
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
@@ -97,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each tx/rx ring in a VSI */
+/* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
-/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
@@ -113,7 +112,9 @@ extern const char ice_drv_ver[];
struct ice_tc_info {
u16 qoffset;
- u16 qcount;
+ u16 qcount_tx;
+ u16 qcount_rx;
+ u8 netdev_tc;
};
struct ice_tc_cfg {
@@ -149,10 +150,10 @@ enum ice_state {
__ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
- * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
- * be checked. If you need to add a bit into consideration for nominal
+ * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
+ * be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
- * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
+ * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
__ICE_STATE_NOMINAL_CHECK_BITS,
@@ -182,8 +183,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* rx ring array */
- struct ice_ring **tx_rings; /* tx ring array */
+ struct ice_ring **rx_rings; /* Rx ring array */
+ struct ice_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -200,8 +201,8 @@ struct ice_vsi {
int sw_base_vector; /* Irq base for OS reserved vectors */
int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
- u16 vsi_num; /* HW (absolute) index of this VSI */
- u16 idx; /* software index in pf->vsi[] */
+ u16 vsi_num; /* HW (absolute) index of this VSI */
+ u16 idx; /* software index in pf->vsi[] */
/* Interrupt thresholds */
u16 work_lmt;
@@ -254,8 +255,8 @@ struct ice_q_vector {
struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of tx rings in vector */
- u8 num_ring_rx; /* total number of rx rings in vector */
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 num_ring_rx; /* total number of Rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
@@ -307,10 +308,10 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan tx queues setup */
- u16 num_lan_rx; /* num lan rx queues setup */
- u16 q_left_tx; /* remaining num tx queues left unclaimed */
- u16 q_left_rx; /* remaining num rx queues left unclaimed */
+ u16 num_lan_tx; /* num lan Tx queues setup */
+ u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 q_left_tx; /* remaining num Tx queues left unclaimed */
+ u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6653555f55dd..fcdcd80b18e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -5,7 +5,7 @@
#define _ICE_ADMINQ_CMD_H_
/* This header file defines the Admin Queue commands, error codes and
- * descriptor format. It is shared between Firmware and Software.
+ * descriptor format. It is shared between Firmware and Software.
*/
#define ICE_MAX_VSI 768
@@ -87,6 +87,7 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
@@ -462,7 +463,7 @@ struct ice_aqc_sw_rules {
};
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
- * This structures describes the lookup rules and associated actions. "index"
+ * This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
@@ -1065,10 +1066,10 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
-#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
-#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
-#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
@@ -1110,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys {
};
/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct ice_aqc_get_set_rss_lut {
+struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
@@ -1314,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log {
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
+ * (ATQ). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
* result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
+ * the same descriptor format. Descriptors are in little-endian notation with
* 32-bit words.
*/
struct ice_aq_desc {
@@ -1379,10 +1380,10 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* success */
+ ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* object already exists */
+ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 554fd707a6d6..4c1d35da940d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
- ice_init_def_sw_recp(hw);
-
- return 0;
+ return ice_init_def_sw_recp(hw);
}
/**
@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
- /* Query the allocated resources for tx scheduler */
+ /* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
ice_debug(hw, ICE_DBG_SCHED,
@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to hw register space
*/
@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* ice_write_rxq_ctx
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context
- * @rxq_index: the index of the rx queue
+ * @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
* it to hw register space
@@ -1387,6 +1385,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_get_guar_num_vsi - determine number of guar VSI for a PF
+ * @hw: pointer to the hw structure
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of VSI per PF.
+ */
+static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+{
+ u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ ICE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return ICE_MAX_VSI / funcs;
+}
+
+/**
* ice_parse_caps - parse function/device capabilities
* @hw: pointer to the hw struct
* @buf: pointer to a buffer containing function/device capability records
@@ -1428,6 +1447,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Valid Functions = %d\n",
+ caps->valid_functions);
+ break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
@@ -1457,10 +1482,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guaranteed_num_vsi = number;
+ func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
- func_p->guaranteed_num_vsi);
+ number);
}
break;
case ICE_AQC_CAPS_RSS:
@@ -1688,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
+static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
{
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 84c967294eaf..2bf5e11f559a 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -3,6 +3,26 @@
#include "ice_common.h"
+#define ICE_CQ_INIT_REGS(qinfo, prefix) \
+do { \
+ (qinfo)->sq.head = prefix##_ATQH; \
+ (qinfo)->sq.tail = prefix##_ATQT; \
+ (qinfo)->sq.len = prefix##_ATQLEN; \
+ (qinfo)->sq.bah = prefix##_ATQBAH; \
+ (qinfo)->sq.bal = prefix##_ATQBAL; \
+ (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
+ (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
+ (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
+ (qinfo)->rq.head = prefix##_ARQH; \
+ (qinfo)->rq.tail = prefix##_ARQT; \
+ (qinfo)->rq.len = prefix##_ARQLEN; \
+ (qinfo)->rq.bah = prefix##_ARQBAH; \
+ (qinfo)->rq.bal = prefix##_ARQBAL; \
+ (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
+ (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
+ (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
+} while (0)
+
/**
* ice_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
@@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- cq->sq.head = PF_FW_ATQH;
- cq->sq.tail = PF_FW_ATQT;
- cq->sq.len = PF_FW_ATQLEN;
- cq->sq.bah = PF_FW_ATQBAH;
- cq->sq.bal = PF_FW_ATQBAL;
- cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
- cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
- cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
-
- cq->rq.head = PF_FW_ARQH;
- cq->rq.tail = PF_FW_ARQT;
- cq->rq.len = PF_FW_ARQLEN;
- cq->rq.bah = PF_FW_ARQBAH;
- cq->rq.bal = PF_FW_ARQBAL;
- cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
- cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
- cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
+ ICE_CQ_INIT_REGS(cq, PF_FW);
}
/**
@@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->mailboxq;
- /* set head and tail registers in our local struct */
- cq->sq.head = PF_MBX_ATQH;
- cq->sq.tail = PF_MBX_ATQT;
- cq->sq.len = PF_MBX_ATQLEN;
- cq->sq.bah = PF_MBX_ATQBAH;
- cq->sq.bal = PF_MBX_ATQBAL;
- cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
- cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
- cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
-
- cq->rq.head = PF_MBX_ARQH;
- cq->rq.tail = PF_MBX_ARQT;
- cq->rq.len = PF_MBX_ARQLEN;
- cq->rq.bah = PF_MBX_ARQBAH;
- cq->rq.bal = PF_MBX_ARQBAL;
- cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
- cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
- cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
+ ICE_CQ_INIT_REGS(cq, PF_MBX);
}
/**
@@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
}
/**
- * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
+ * ice_free_cq_ring - Free control queue ring
* @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
+ * @ring: pointer to the specific control queue ring
*
- * This assumes the posted send buffers have already been cleaned
+ * This assumes the posted buffers have already been cleaned
* and de-allocated
*/
-static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
{
- dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
- cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
- cq->sq.desc_buf.va = NULL;
- cq->sq.desc_buf.pa = 0;
- cq->sq.desc_buf.size = 0;
-}
-
-/**
- * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- *
- * This assumes the posted receive buffers have already been cleaned
- * and de-allocated
- */
-static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
- dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
- cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
- cq->rq.desc_buf.va = NULL;
- cq->rq.desc_buf.pa = 0;
- cq->rq.desc_buf.size = 0;
+ dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
+ ring->desc_buf.va, ring->desc_buf.pa);
+ ring->desc_buf.va = NULL;
+ ring->desc_buf.pa = 0;
+ ring->desc_buf.size = 0;
}
/**
@@ -280,54 +250,23 @@ unwind_alloc_sq_bufs:
return ICE_ERR_NO_MEMORY;
}
-/**
- * ice_free_rq_bufs - Free ARQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
-{
- int i;
-
- /* free descriptors */
- for (i = 0; i < cq->num_rq_entries; i++) {
- dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
- cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
- cq->rq.r.rq_bi[i].va = NULL;
- cq->rq.r.rq_bi[i].pa = 0;
- cq->rq.r.rq_bi[i].size = 0;
- }
-
- /* free the dma header */
- devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
-}
-
-/**
- * ice_free_sq_bufs - Free ATQ buffer info elements
- * @hw: pointer to the hardware structure
- * @cq: pointer to the specific Control queue
- */
-static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static enum ice_status
+ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
- int i;
+ /* Clear Head and Tail */
+ wr32(hw, ring->head, 0);
+ wr32(hw, ring->tail, 0);
- /* only unmap if the address is non-NULL */
- for (i = 0; i < cq->num_sq_entries; i++)
- if (cq->sq.r.sq_bi[i].pa) {
- dmam_free_coherent(ice_hw_to_dev(hw),
- cq->sq.r.sq_bi[i].size,
- cq->sq.r.sq_bi[i].va,
- cq->sq.r.sq_bi[i].pa);
- cq->sq.r.sq_bi[i].va = NULL;
- cq->sq.r.sq_bi[i].pa = 0;
- cq->sq.r.sq_bi[i].size = 0;
- }
+ /* set starting point */
+ wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
+ wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
+ wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
- /* free the buffer info list */
- devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
+ /* Check one register to verify that config was applied */
+ if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
+ return ICE_ERR_AQ_ERROR;
- /* free the dma header */
- devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
+ return 0;
}
/**
@@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, cq->sq.head, 0);
- wr32(hw, cq->sq.tail, 0);
-
- /* set starting point */
- wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
- wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
- wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
-
- /* Check one register to verify that config was applied */
- reg = rd32(hw, cq->sq.bal);
- if (reg != lower_32_bits(cq->sq.desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
-
- return 0;
+ return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
}
/**
@@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, cq->rq.head, 0);
- wr32(hw, cq->rq.tail, 0);
+ enum ice_status status;
- /* set starting point */
- wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
- wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
- wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
+ status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
+ if (status)
+ return status;
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
- /* Check one register to verify that config was applied */
- reg = rd32(hw, cq->rq.bal);
- if (reg != lower_32_bits(cq->rq.desc_buf.pa))
- return ICE_ERR_AQ_ERROR;
-
return 0;
}
@@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
- ice_free_ctrlq_sq_ring(hw, cq);
+ ice_free_cq_ring(hw, &cq->sq);
init_ctrlq_exit:
return ret_code;
@@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
goto init_ctrlq_exit;
init_ctrlq_free_rings:
- ice_free_ctrlq_rq_ring(hw, cq);
+ ice_free_cq_ring(hw, &cq->rq);
init_ctrlq_exit:
return ret_code;
}
+#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
+do { \
+ int i; \
+ /* free descriptors */ \
+ for (i = 0; i < (qi)->num_##ring##_entries; i++) \
+ if ((qi)->ring.r.ring##_bi[i].pa) { \
+ dmam_free_coherent(ice_hw_to_dev(hw), \
+ (qi)->ring.r.ring##_bi[i].size,\
+ (qi)->ring.r.ring##_bi[i].va,\
+ (qi)->ring.r.ring##_bi[i].pa);\
+ (qi)->ring.r.ring##_bi[i].va = NULL; \
+ (qi)->ring.r.ring##_bi[i].pa = 0; \
+ (qi)->ring.r.ring##_bi[i].size = 0; \
+ } \
+ /* free the buffer info list */ \
+ if ((qi)->ring.cmd_buf) \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
+ /* free dma head */ \
+ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
+} while (0)
+
/**
* ice_shutdown_sq - shutdown the Control ATQ
* @hw: pointer to the hardware structure
@@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->sq.count = 0; /* to indicate uninitialized queue */
/* free ring buffers and the ring itself */
- ice_free_sq_bufs(hw, cq);
- ice_free_ctrlq_sq_ring(hw, cq);
+ ICE_FREE_CQ_BUFS(hw, cq, sq);
+ ice_free_cq_ring(hw, &cq->sq);
shutdown_sq_out:
mutex_unlock(&cq->sq_lock);
@@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.count = 0;
/* free ring buffers and the ring itself */
- ice_free_rq_bufs(hw, cq);
- ice_free_ctrlq_rq_ring(hw, cq);
+ ICE_FREE_CQ_BUFS(hw, cq, rq);
+ ice_free_cq_ring(hw, &cq->rq);
shutdown_rq_out:
mutex_unlock(&cq->rq_lock);
@@ -657,7 +591,6 @@ init_ctrlq_free_rq:
* - cq->num_rq_entries
* - cq->rq_buf_size
* - cq->sq_buf_size
- *
*/
static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
@@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
- * This is the main send command routine for the ATQ. It runs the q,
+ * This is the main send command routine for the ATQ. It runs the queue,
* cleans the queue, etc.
*/
enum ice_status
@@ -1035,7 +968,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* @pending: number of events that could be left to process
*
* This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
+ * the contents through e. It can also return how many events are
* left to process through 'pending'.
*/
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 648acdb4c644..3b6e387f5440 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev.
*/
-static struct ice_stats ice_gstrings_pf_stats[] = {
+static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
};
-static u32 ice_regs_dump_list[] = {
+static const u32 ice_regs_dump_list[] = {
PFGEN_STATE,
PRTGEN_STATUS,
QRX_CTRL(0),
@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
- * User space programs such as ethtool must make 3 separate
+ * Userspace programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
- * user space, changes to the number or size could result in
+ * userspace, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
- bool link_up;
pi = vsi->port_info;
- hw_link_info = &pi->phy.link_info;
- link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
+ /* If VSI state is up, then restart autoneg with link up */
+ if (!test_bit(__ICE_DOWN, vsi->back->state))
+ status = ice_aq_set_link_restart_an(pi, true, NULL);
+ else
+ status = ice_aq_set_link_restart_an(pi, false, NULL);
- status = ice_aq_set_link_restart_an(pi, link_up, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/**
* ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure
- * @pause: return tx/rx flow control status
+ * @pause: return Tx/Rx flow control status
*/
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
}
/**
- * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
* @netdev: network interface device structure
*
* Returns the table size.
@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
}
/**
- * ice_get_rxfh - get the rx flow hash indirection table
+ * ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
@@ -1603,7 +1603,7 @@ out:
}
/**
- * ice_set_rxfh - set the rx flow hash indirection table
+ * ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 596b9fb1c510..5507928c8fbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -7,6 +7,9 @@
#define _ICE_HW_AUTOGEN_H_
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_HEAD_S 0
+#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 7d2a66739e3f..bb51dd7defb5 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -6,11 +6,11 @@
union ice_32byte_rx_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
+ __le64 rsvd1;
+ __le64 rsvd2;
} read;
struct {
struct {
@@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer {
*/
union ice_32b_rx_flex_desc {
struct {
- __le64 pkt_addr; /* Packet buffer address */
- __le64 hdr_addr; /* Header buffer address */
- /* bit 0 of hdr_addr is DD bit */
- __le64 rsvd1;
- __le64 rsvd2;
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ /* bit 0 of hdr_addr is DD bit */
+ __le64 rsvd1;
+ __le64 rsvd2;
} read;
struct {
/* Qword 0 */
@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
+#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
+#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
+#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
/* RLAN Rx queue context data
*
@@ -274,18 +277,18 @@ struct ice_rlan_ctx {
u16 dbuf; /* bigger than needed, see above for reason */
#define ICE_RLAN_CTX_HBUF_S 6
u16 hbuf; /* bigger than needed, see above for reason */
- u8 dtype;
- u8 dsize;
- u8 crcstrip;
- u8 l2tsel;
- u8 hsplit_0;
- u8 hsplit_1;
- u8 showiv;
+ u8 dtype;
+ u8 dsize;
+ u8 crcstrip;
+ u8 l2tsel;
+ u8 hsplit_0;
+ u8 hsplit_1;
+ u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
- u8 tphrdesc_ena;
- u8 tphwdesc_ena;
- u8 tphdata_ena;
- u8 tphhead_ena;
+ u8 tphrdesc_ena;
+ u8 tphwdesc_ena;
+ u8 tphdata_ena;
+ u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
};
@@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits {
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
- u8 port_num;
+ u8 port_num;
u16 cgd_num; /* bigger than needed, see above for reason */
- u8 pf_num;
+ u8 pf_num;
u16 vmvf_num;
- u8 vmvf_type;
+ u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
- u8 tsyn_ena;
- u8 alt_vlan;
+ u8 tsyn_ena;
+ u8 alt_vlan;
u16 cpuid; /* bigger than needed, see above for reason */
- u8 wb_mode;
- u8 tphrd_desc;
- u8 tphrd;
- u8 tphwr_desc;
+ u8 wb_mode;
+ u8 tphrd_desc;
+ u8 tphrd;
+ u8 tphwr_desc;
u16 cmpq_id;
u16 qnum_in_func;
- u8 itr_notification_mode;
- u8 adjust_prof_id;
+ u8 itr_notification_mode;
+ u8 adjust_prof_id;
u32 qlen; /* bigger than needed, see above for reason */
- u8 quanta_prof_idx;
- u8 tso_ena;
+ u8 quanta_prof_idx;
+ u8 tso_ena;
u16 tso_qnum;
- u8 legacy_int;
- u8 drop_ena;
- u8 cache_prof_idx;
- u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal do not write */
+ u8 legacy_int;
+ u8 drop_ena;
+ u8 cache_prof_idx;
+ u8 pkt_shaper_prof_idx;
+ u8 int_q_state; /* width not needed - internal do not write */
};
/* macro to make the table lines short */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1041fa2a7767..29b1dcfd4331 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
u16 pf_q;
int err;
- /* what is RX queue number in global space of 2K Rx queues */
+ /* what is Rx queue number in global space of 2K Rx queues */
pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */
@@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
int i;
- for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
- usleep_range(10, 20);
+ usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ if (i >= ICE_Q_WAIT_MAX_RETRY)
return -ETIMEDOUT;
return 0;
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, numq_tc;
- u16 pow = 0, max_rss = 0, qcount;
+ u16 offset = 0, qmap = 0, tx_count = 0;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
+ u16 tx_numq_tc, rx_numq_tc;
+ u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
+ u8 netdev_tc = 0;
int i;
/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
- numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ if (!rx_numq_tc)
+ rx_numq_tc = 1;
+ tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
+ if (!tx_numq_tc)
+ tx_numq_tc = 1;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
- qcount = numq_tc;
+ qcount_rx = rx_numq_tc;
+
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
+ qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount);
+ pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
- vsi->tc_cfg.tc_info[i].qcount = 1;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount = qcount;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount;
+ offset += qcount_rx;
+ tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
-
- vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
+ vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1000,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
- * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
@@ -1039,7 +1051,7 @@ out:
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
+ * We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
*/
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
@@ -1188,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int i;
- /* Allocate tx_rings */
+ /* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
@@ -1207,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
vsi->tx_rings[i] = ring;
}
- /* Allocate rx_rings */
+ /* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring;
@@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
+ u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
- u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
- if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
- err = -EINVAL;
- goto err_cfg_txqs;
- }
qg_buf->num_txqs = 1;
num_q_grps = 1;
- /* set up and configure the Tx queues */
- ice_for_each_txq(vsi, i) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
+ /* set up and configure the Tx queues for each enabled TC */
+ for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- pf_q = vsi->txq_map[i];
- ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[q_idx];
+ ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
+ pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[q_idx]->tail =
+ pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ num_q_grps, qg_buf, buf_len,
+ NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
- /* init queue specific tail reg. It is referred as transmit
- * comm scheduler queue doorbell.
- */
- vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
- goto err_cfg_txqs;
- }
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[q_idx]->txq_teid =
+ le32_to_cpu(txq->q_teid);
- /* Add Tx Queue TEID into the VSI Tx ring from the response
- * This will complete configuring and enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[i]->txq_teid =
- le32_to_cpu(txq->q_teid);
+ q_idx++;
+ }
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
@@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
- if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ if (!vsi->tx_rings || !vsi->tx_rings[i] ||
+ !vsi->tx_rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
@@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
+ /* set tc configuration */
+ ice_vsi_set_tc_cfg(vsi);
+
/* create the VSI */
ret = ice_vsi_init(vsi);
if (ret)
@@ -2113,17 +2136,13 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
- /* if VSI type is not recognized, clean up the resources and
- * exit
- */
+ /* clean up the resources and exit */
goto unroll_vsi_init;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2314,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
int start = res->search_hint;
int end = start;
- if ((start + needed) > res->num_entries)
+ if ((start + needed) > res->num_entries)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
@@ -2491,6 +2510,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
@@ -2518,11 +2538,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf;
int ret, i;
if (!vsi)
return -EINVAL;
+ pf = vsi->back;
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2532,6 +2555,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
+ ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi);
@@ -2578,11 +2602,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
break;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 333312a1d595..8725569d11f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
+
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@@ -405,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what type
* of reset is pending and sets bits in pf->state indicating the reset
- * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
+ * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* prepare for pending reset if not already (for PF software-initiated
* global resets the software should already be prepared for it as
* indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
@@ -1379,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
- * non-queue interrupts, e.g. AdminQ and errors. This is not used
+ * non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
@@ -1783,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
- /* only 1 rx queue unless RSS is enabled */
+ /* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1;
else
@@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev,
ice_determine_q_usage(pf);
- pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
- hw->func_caps.guaranteed_num_vsi);
+ pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
@@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
-
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -2563,8 +2564,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
}
/**
@@ -2931,8 +2936,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
}
/**
@@ -3138,8 +3147,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi)
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
@@ -3148,9 +3158,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- rtnl_unlock();
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
} else {
ice_vsi_close(vsi);
}
@@ -3189,7 +3203,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v]);
+ ice_dis_vsi(pf->vsi[v], false);
}
/**
@@ -3618,6 +3632,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
* @dev: the netdev being configured
* @nlh: RTNL message
* @flags: bridge setlink flags
+ * @extack: netlink extended ack
*
* Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is
* hooked up to. Iterates through the PF VSI list and sets the loopback mode (if
@@ -3626,7 +3641,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
*/
static int
ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
- u16 __always_unused flags)
+ u16 __always_unused flags, struct netlink_ext_ack *extack)
{
struct ice_netdev_priv *np = netdev_priv(dev);
struct ice_pf *pf = np->vsi->back;
@@ -3668,7 +3683,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
- netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
+ netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status);
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
@@ -3691,40 +3706,36 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- u32 head, val = 0, i;
int hung_queue = -1;
+ u32 i;
pf->tx_timeout_count++;
- /* find the stopped queue the same way the stack does */
+ /* find the stopped queue the same way dev_watchdog() does */
for (i = 0; i < netdev->num_tx_queues; i++) {
- struct netdev_queue *q;
unsigned long trans_start;
+ struct netdev_queue *q;
q = netdev_get_tx_queue(netdev, i);
trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
- (trans_start + netdev->watchdog_timeo))) {
+ trans_start + netdev->watchdog_timeo)) {
hung_queue = i;
break;
}
}
- if (i == netdev->num_tx_queues) {
+ if (i == netdev->num_tx_queues)
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
- } else {
+ else
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++) {
- if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
- if (hung_queue ==
- vsi->tx_rings[i]->q_index) {
+ for (i = 0; i < vsi->num_txq; i++)
+ if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
+ if (hung_queue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
break;
}
- }
- }
- }
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
@@ -3736,17 +3747,20 @@ static void ice_tx_timeout(struct net_device *netdev)
return;
if (tx_ring) {
- head = tx_ring->next_to_clean;
+ struct ice_hw *hw = &pf->hw;
+ u32 head, val = 0;
+
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(&pf->hw,
+ val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->hw_base_vector));
+ tx_ring->vsi->hw_base_vector));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use,
- readl(tx_ring->tail), val);
+ head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -3780,7 +3794,7 @@ static void ice_tx_timeout(struct net_device *netdev)
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
- * active by the system (IFF_UP). At this point all resources needed
+ * active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
@@ -3813,7 +3827,7 @@ static int ice_open(struct net_device *netdev)
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
- * and the netdevice enters the DOWN state. The hardware is still under the
+ * and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
@@ -3842,14 +3856,14 @@ ice_features_check(struct sk_buff *skb,
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
- * being requested for this frame. We can rule out both by just
+ * being requested for this frame. We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
/* We cannot support GSO if the MSS is going to be less than
- * 64 bytes. If it is then we need to drop support for GSO.
+ * 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
features &= ~NETIF_F_GSO_MASK;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7cc8aa18a22b..a1681853df2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
*
* Cleanup scheduling elements from SW DB
*/
-static void ice_sched_clear_port(struct ice_port_info *pi)
+void ice_sched_clear_port(struct ice_port_info *pi)
{
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return;
@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* This function removes the leaf node that was created by the FW
* during initialization
*/
-static void
-ice_rm_dflt_leaf_node(struct ice_port_info *pi)
+static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
{
struct ice_sched_node *node;
@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi)
* This function frees all the nodes except root and TC that were created by
* the FW during initialization
*/
-static void
-ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
+static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
{
struct ice_sched_node *node;
@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
* @num_nodes: pointer to num nodes array
*
* This function calculates the number of supported nodes needed to add this
- * VSI into tx tree including the VSI, parent and intermediate nodes in below
+ * VSI into Tx tree including the VSI, parent and intermediate nodes in below
* layers
*/
static void
@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
}
/**
- * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
+ * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
- * This function adds the VSI supported nodes into tx tree including the
+ * This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
@@ -1527,7 +1525,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_cfg_vsi - configure the new/exisiting VSI
+ * ice_sched_cfg_vsi - configure the new/existing VSI
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
@@ -1605,3 +1603,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return status;
}
+
+/**
+ * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes single aggregator VSI info entry from
+ * aggregator list.
+ */
+static void
+ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *atmp;
+
+ list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw),
+ agg_vsi_info);
+ return;
+ }
+ }
+}
+
+/**
+ * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @owner: LAN or RDMA
+ *
+ * This function removes the VSI and its LAN or RDMA children nodes from the
+ * scheduler tree.
+ */
+static enum ice_status
+ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_vsi_ctx *vsi_ctx;
+ u8 i, j = 0;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return status;
+ mutex_lock(&pi->sched_lock);
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ goto exit_sched_rm_vsi_cfg;
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ struct ice_sched_node *vsi_node, *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, i);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner) {
+ ice_free_sched_node(pi, vsi_node->children[j]);
+
+ /* reset the counter again since the num
+ * children will be updated after node removal
+ */
+ j = 0;
+ } else {
+ j++;
+ }
+ }
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children) {
+ ice_free_sched_node(pi, vsi_node);
+ vsi_ctx->sched.vsi_node[i] = NULL;
+
+ /* clean up agg related vsi info if any */
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+ }
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[i] = 0;
+ }
+ status = 0;
+
+exit_sched_rm_vsi_cfg:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its LAN children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 5dc9cfa04c58..da5b4c166da8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,6 +12,7 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u16 vsi_handle;
};
struct ice_sched_agg_info {
@@ -25,6 +26,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
@@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 027eba4e13f8..533b989a23e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
- * If adv_link_support is true, then return link speed in Mbps. Else return
+ * If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 40c9c6558956..2e5693107fa4 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status
-ice_init_def_sw_recp(struct ice_hw *hw)
+enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
@@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
*
* NOTE: *req_desc is both an input/output parameter.
* The caller of this function first calls this function with *request_desc set
- * to 0. If the response from f/w has *req_desc set to 0, all the switch
+ * to 0. If the response from f/w has *req_desc set to 0, all the switch
* configuration information has been returned; if non-zero (meaning not all
* the information was returned), the caller should call this function again
* with *req_desc set to the previous value returned by f/w to get the
@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
/**
* ice_fill_sw_info - Helper function to populate lb_en and lan_en
* @hw: pointer to the hardware structure
- * @f_info: filter info structure to fill/update
+ * @fi: filter info structure to fill/update
*
* This helper function populates the lb_en and lan_en elements of the provided
* ice_fltr_info struct using the switch's type and characteristics of the
* switch rule being configured.
*/
-static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
+static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
{
- f_info->lb_en = false;
- f_info->lan_en = false;
- if ((f_info->flag & ICE_FLTR_TX) &&
- (f_info->fltr_act == ICE_FWD_TO_VSI ||
- f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
- f_info->fltr_act == ICE_FWD_TO_Q ||
- f_info->fltr_act == ICE_FWD_TO_QGRP)) {
- f_info->lb_en = true;
- if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
- f_info->lan_en = true;
+ fi->lb_en = false;
+ fi->lan_en = false;
+ if ((fi->flag & ICE_FLTR_TX) &&
+ (fi->fltr_act == ICE_FWD_TO_VSI ||
+ fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
+ fi->fltr_act == ICE_FWD_TO_Q ||
+ fi->fltr_act == ICE_FWD_TO_QGRP)) {
+ fi->lb_en = true;
+ /* Do not set lan_en to TRUE if
+ * 1. The switch is a VEB AND
+ * 2
+ * 2.1 The lookup is MAC with unicast addr for MAC, OR
+ * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ *
+ * In all other cases, the LAN enable has to be set to true.
+ */
+ if (!(hw->evb_veb &&
+ ((fi->lkup_type == ICE_SW_LKUP_MAC &&
+ is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ fi->lan_en = true;
}
}
@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
* 1. Large Action
- * 2. Look up tx rx
+ * 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
- /* call the fill switch rule to fill the lookup tx rx structure */
+ /* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Call AQ command to add or update previously created VSI list with new VSI.
*
* Helper function to do book keeping associated with adding filter information
- * The algorithm to do the booking keeping is described below :
- * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
+ * The algorithm to do the book keeping is described below :
+ * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
* if only one VSI has been added till now
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
/* Update the previous switch rule to a new VSI list which
- * includes current VSI thats requested
+ * includes current VSI that is requested
*/
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fe5bbabbb41e..49fc38094185 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
/**
* ice_setup_tx_ring - Allocate the Tx descriptors
- * @tx_ring: the tx ring to set up
+ * @tx_ring: the Tx ring to set up
*
* Return 0 on success, negative on error
*/
@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
/**
* ice_setup_rx_ring - Allocate the Rx descriptors
- * @rx_ring: the rx ring to set up
+ * @rx_ring: the Rx ring to set up
*
* Return 0 on success, negative on error
*/
@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
rx_ring->next_to_alloc = val;
/* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
+ * know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
+ * @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
/**
* ice_fetch_rx_buf - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware
*
* This function allocates an skb on the fly, and populates it with the page
@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
* ice_pull_tail - ice specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
- * This function is an ice specific version of __pskb_pull_tail. The
+ * This function is an ice specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
+ * This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
@@ -904,7 +904,7 @@ checksum_fail:
/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
- * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
* @ptype: the packet type decoded by hardware
@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
/**
* ice_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
+ * @rx_ring: Rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
*
@@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
- * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
- * processing. The advantage to this is that on systems that have
+ * processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
@@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* Work is done so exit the polling mode and re-enable the interrupt */
- napi_complete_done(napi, work_done);
- if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
- return 0;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
+ ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
+
+ return min(work_done, budget - 1);
}
/* helper function for building cmd/type/offset */
@@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
- * __ice_maybe_stop_tx - 2nd level check for tx stop conditions
+ * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
@@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
}
/**
- * ice_maybe_stop_tx - 1st level check for tx stop conditions
+ * ice_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
@@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
+
return __ice_maybe_stop_tx(tx_ring, size);
}
@@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* Finally, we add one to round up. Because 256 isn't an exact multiple of
* 3, we'll underestimate near each multiple of 12K. This is actually more
* accurate as we have 4K - 1 of wiggle room that we can fit into the last
- * segment. For our purposes this is accurate out to 1M which is orders of
+ * segment. For our purposes this is accurate out to 1M which is orders of
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
@@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
}
/**
- * ice_xmit_desc_count - calculate number of tx descriptors needed
+ * ice_xmit_desc_count - calculate number of Tx descriptors needed
* @skb: send buffer
*
* Returns number of data descriptors needed for this skb.
@@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
nr_frags -= ICE_MAX_BUF_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
- /* Initialize size to the negative value of gso_size minus 1. We
+ /* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenerio in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f4dbc81c1988..0ea428104215 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -124,6 +124,8 @@ struct ice_phy_info {
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
+ u32 valid_functions;
+
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
@@ -150,7 +152,7 @@ struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
- u32 guaranteed_num_vsi;
+ u32 guar_num_vsi;
};
/* Device wide capabilities */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e71065f9d391..05ff4f910649 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
-/***********************enable_vf routines*****************************/
-
/**
* ice_dis_vf_mappings
* @vf: pointer to the VF structure
@@ -215,6 +213,15 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
@@ -228,15 +235,6 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
- * We don't want to reconfigure interrupts since AVF driver doesn't
+ * We dont want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
* ice_process_vflr_event - Free VF resources via IRQ calls
* @pf: pointer to the PF structure
*
- * called from the VLFR IRQ handler to
+ * called from the VFLR IRQ handler to
* free up VF resources and state variables
*/
void ice_process_vflr_event(struct ice_pf *pf)
@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
- /* copy Rx queue info from VF into vsi */
+ /* copy Rx queue info from VF into VSI */
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise
*
- * add guest mac address filter
+ * add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
@@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* @msg: pointer to the msg buffer
*
* VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
+ * different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
+ * available queue pairs via virtchnl message response to vf.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
@@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
- "VF %d tried to request %d queues. Ignoring.\n",
+ "VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
dev_err(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 10131e0180f9..01470a8ee03a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,7 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
- u8 num_req_qs; /* num of queue pairs requested by VF */
+ u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 8a28f3388f69..01fcfc6f3415 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -334,6 +334,7 @@
#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I210_RXPBSIZE_MASK 0x0000003F
+#define I210_RXPBSIZE_PB_30KB 0x0000001E
#define I210_RXPBSIZE_PB_32KB 0x00000020
#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
#define I210_TXPBSIZE_MASK 0xC0FFFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c54ebedca6da..c393cb2c0f16 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
+ phy_word = E1000_PHY_PLL_UNCONF;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
/* check current state directly from internal PHY */
igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ca54e268d157..fe1592ae8769 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -515,7 +515,7 @@ struct igb_adapter {
/* OS defined structs */
struct pci_dev *pdev;
- spinlock_t stats64_lock;
+ struct mutex stats64_lock;
struct rtnl_link_stats64 stats64;
/* structs defined in e1000_hw.h */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 5acf3b743876..7426060b678f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
return -EOPNOTSUPP;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
@@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
int i, j;
char *p;
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
@@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
i += IGB_RX_QUEUE_STATS_LEN;
}
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
}
static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5df88ad8ac81..87bdf1604ae2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1850,13 +1850,12 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
* configuration' in respect to these parameters.
*/
- netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
- idleslope %d sendslope %d hiCredit %d \
- locredit %d\n",
- (ring->cbs_enable) ? "enabled" : "disabled",
- (ring->launchtime_enable) ? "enabled" : "disabled", queue,
- ring->idleslope, ring->sendslope, ring->hicredit,
- ring->locredit);
+ netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
+ ring->cbs_enable ? "enabled" : "disabled",
+ ring->launchtime_enable ? "enabled" : "disabled",
+ queue,
+ ring->idleslope, ring->sendslope,
+ ring->hicredit, ring->locredit);
}
static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
@@ -1935,7 +1934,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter)
val = rd32(E1000_RXPBS);
val &= ~I210_RXPBSIZE_MASK;
- val |= I210_RXPBSIZE_PB_32KB;
+ val |= I210_RXPBSIZE_PB_30KB;
wr32(E1000_RXPBS, val);
/* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
@@ -2204,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter)
del_timer_sync(&adapter->phy_info_timer);
/* record the stats before reset*/
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3841,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
spin_lock_init(&adapter->nfc_lock);
- spin_lock_init(&adapter->stats64_lock);
+ mutex_init(&adapter->stats64_lock);
#ifdef CONFIG_PCI_IOV
switch (hw->mac.type) {
case e1000_82576:
@@ -5407,9 +5406,9 @@ no_wait:
}
}
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -6019,6 +6018,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -6147,8 +6148,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
- skb_tx_timestamp(skb);
-
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
@@ -6236,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- spin_lock(&adapter->stats64_lock);
+ mutex_lock(&adapter->stats64_lock);
igb_update_stats(adapter);
memcpy(stats, &adapter->stats64, sizeof(*stats));
- spin_unlock(&adapter->stats64_lock);
+ mutex_unlock(&adapter->stats64_lock);
}
/**
@@ -7753,11 +7752,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* If not enough Rx work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- igb_ring_irq_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igb_ring_irq_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -8770,9 +8771,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
rtnl_unlock();
#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
+ if (!runtime) {
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+ }
#endif
status = rd32(E1000_STATUS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2b95dc9c7a6a..fd3071f55bd3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -277,17 +277,53 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
+ u32 lo, hi;
u64 ns;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- ns = timecounter_read(&igb->tc);
+ ptp_read_system_prets(sts);
+ lo = rd32(E1000_SYSTIML);
+ ptp_read_system_postts(sts);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ unsigned long flags;
+ u32 lo, hi;
+ u64 ns;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ lo = rd32(E1000_SYSTIML);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -296,16 +332,22 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- igb_ptp_read_i210(igb, ts);
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(E1000_SYSTIML);
+ ts->tv_sec = rd32(E1000_SYSTIMH);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
struct timespec64 ts;
+ u64 ns;
- igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&igb->tc);
+ ts = ns_to_timespec64(ns);
pr_debug("igb overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
@@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
@@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210;
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 163e5838f7c2..a3cd7ac48d4b 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
@@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 820d49eb41ab..4eab83faec62 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1186,10 +1186,13 @@ static int igbvf_poll(struct napi_struct *napi, int budget)
igbvf_clean_rx_irq(adapter, &work_done, budget);
- /* If not enough Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done == budget)
+ return budget;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
if (adapter->requested_itr & 3)
igbvf_set_itr(adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index cdf18a5d9e08..b1039dd3dd13 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -5,23 +5,12 @@
#define _IGC_H_
#include <linux/kobject.h>
-
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-
#include <linux/ethtool.h>
-
#include <linux/sctp.h>
-#define IGC_ERR(args...) pr_err("igc: " args)
-
-#define PFX "igc: "
-
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-
#include "igc_hw.h"
/* main */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 832da609d9a7..df40af759542 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
- u32 ctrl_ext;
if (hw->phy.media_type != igc_media_type_copper) {
phy->type = igc_phy_none;
@@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
- ctrl_ext = rd32(IGC_CTRL_EXT);
-
/* set lan id */
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
IGC_STATUS_FUNC_SHIFT;
@@ -287,8 +284,6 @@ out:
static s32 igc_get_invariants_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
- u32 link_mode = 0;
- u32 ctrl_ext = 0;
s32 ret_val = 0;
switch (hw->device_id) {
@@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
hw->phy.media_type = igc_media_type_copper;
- ctrl_ext = rd32(IGC_CTRL_EXT);
- link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
-
/* mac initialization and operations */
ret_val = igc_init_mac_params_base(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9d85707e8a81..f20183037fb2 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
/* update pointers within the skb to store the data */
skb_reserve(skb, IGC_SKB_PAD);
- __skb_put(skb, size);
+ __skb_put(skb, size);
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
+ rx_buffer->page_offset ^= truesize;
#else
- rx_buffer->page_offset += truesize;
+ rx_buffer->page_offset += truesize;
#endif
} else {
rx_buffer->pagecnt_bias++;
@@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
tx_buffer->next_to_watch,
jiffies,
tx_buffer->next_to_watch->wb.status);
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
/* we are about to reset, no point in enabling stuff */
return true;
@@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
}
/**
- * igc_ioctl - I/O control method
- * @netdev: network interface device structure
- * @ifreq: frequency
- * @cmd: command
- */
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
*/
@@ -2866,11 +2852,13 @@ static int igc_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- /* If not enough Rx work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- igc_ring_irq_enable(q_vector);
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ igc_ring_irq_enable(q_vector);
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -3358,7 +3346,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
goto err_set_queues;
@@ -3445,7 +3433,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
- .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -3532,26 +3519,23 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- IGC_ERR("Wrong DMA configuration, aborting\n");
+ dev_err(&pdev->dev, "igc: Wrong DMA config\n");
goto err_dma;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 143bdd5ee2a0..08d85e336bd4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -12,6 +12,7 @@
#include <linux/aer.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
+#include <linux/phy.h>
#include <linux/timecounter.h>
#include <linux/net_tstamp.h>
@@ -561,6 +562,7 @@ struct ixgbe_adapter {
struct net_device *netdev;
struct bpf_prog *xdp_prog;
struct pci_dev *pdev;
+ struct mii_bus *mii_bus;
unsigned long state;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 732b1e6ecc43..acba067cc15a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_FILTER))
return -EOPNOTSUPP;
if (ixgbe_wol_exclusion(adapter, wol))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index fd1b0546fd67..ff85ce5791a3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
#include "ixgbe.h"
#include <net/xfrm.h>
#include <crypto/aead.h>
+#include <linux/if_bridge.h>
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
struct tx_sa tsa;
- if (adapter->num_vfs)
+ if (adapter->num_vfs &&
+ adapter->bridge_mode != BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
/* find the first unused index */
@@ -1063,11 +1065,13 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbe_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
+ struct sec_path *sp;
struct tx_sa *tsa;
- if (unlikely(!first->skb->sp->len)) {
+ sp = skb_sec_path(first->skb);
+ if (unlikely(!sp->len)) {
netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
- __func__, first->skb->sp->len);
+ __func__, sp->len);
return 0;
}
@@ -1157,6 +1161,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
struct xfrm_state *xs = NULL;
struct ipv6hdr *ip6 = NULL;
struct iphdr *ip4 = NULL;
+ struct sec_path *sp;
void *daddr;
__be32 spi;
u8 *c_hdr;
@@ -1196,12 +1201,12 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
if (unlikely(!xs))
return;
- skb->sp = secpath_dup(skb->sp);
- if (unlikely(!skb->sp))
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
return;
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 113b38e0defb..daff8183534b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -39,6 +39,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_phy.h"
#include "ixgbe_sriov.h"
#include "ixgbe_model.h"
#include "ixgbe_txrx_common.h"
@@ -6077,9 +6078,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Disable Rx */
ixgbe_disable_rx(adapter);
- /* synchronize_sched() needed for pending XDP buffers to drain */
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
if (adapter->xdp_ring[0])
- synchronize_sched();
+ synchronize_rcu();
ixgbe_irq_disable(adapter);
@@ -8269,6 +8270,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/*
* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
@@ -8646,8 +8649,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -8695,7 +8696,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#endif /* IXGBE_FCOE */
#ifdef CONFIG_IXGBE_IPSEC
- if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (secpath_exists(skb) &&
+ !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx);
@@ -8789,6 +8791,15 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
u16 value;
int rc;
+ if (adapter->mii_bus) {
+ int regnum = addr;
+
+ if (devad != MDIO_DEVAD_NONE)
+ regnum |= (devad << 16) | MII_ADDR_C45;
+
+ return mdiobus_read(adapter->mii_bus, prtad, regnum);
+ }
+
if (prtad != hw->phy.mdio.prtad)
return -EINVAL;
rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
@@ -8803,6 +8814,15 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ if (adapter->mii_bus) {
+ int regnum = addr;
+
+ if (devad != MDIO_DEVAD_NONE)
+ regnum |= (devad << 16) | MII_ADDR_C45;
+
+ return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
+ }
+
if (prtad != hw->phy.mdio.prtad)
return -EINVAL;
return hw->phy.ops.write_reg(hw, addr, devad, value);
@@ -9979,7 +9999,8 @@ static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter,
}
static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh, u16 flags)
+ struct nlmsghdr *nlh, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -10191,7 +10212,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
*/
if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
#ifdef CONFIG_IXGBE_IPSEC
- if (!skb->sp)
+ if (!secpath_exists(skb))
#endif
features &= ~NETIF_F_TSO;
}
@@ -10476,7 +10497,7 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring)
ixgbe_disable_rxr_hw(adapter, rx_ring);
if (xdp_ring)
- synchronize_sched();
+ synchronize_rcu();
/* Rx/Tx/XDP Tx share the same napi context. */
napi_disable(&rx_ring->q_vector->napi);
@@ -10517,7 +10538,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
ixgbe_configure_rx_ring(adapter, rx_ring);
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
- clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+ if (xdp_ring)
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
}
/**
@@ -11119,6 +11141,8 @@ skip_sriov:
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
+ ixgbe_mii_bus_init(hw);
+
return 0;
err_register:
@@ -11169,6 +11193,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_REMOVING, &adapter->state);
cancel_work_sync(&adapter->service_task);
+ if (adapter->mii_bus)
+ mdiobus_unregister(adapter->mii_bus);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 919a7af84b42..cc4907f9ff02 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -3,6 +3,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/sched.h>
#include "ixgbe.h"
@@ -658,6 +659,304 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
return status;
}
+#define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr)
+
+/**
+ * ixgbe_msca_cmd - Write the command register and poll for completion/timeout
+ * @hw: pointer to hardware structure
+ * @cmd: command register value to write
+ **/
+static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd);
+
+ return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd,
+ !(cmd & IXGBE_MSCA_MDI_COMMAND), 10,
+ 10 * IXGBE_MDIO_COMMAND_TIMEOUT);
+}
+
+/**
+ * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
+ int regnum, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 data;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ if (regnum & MII_ADDR_C45) {
+ hwaddr |= regnum & GENMASK(21, 0);
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+ } else {
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
+ IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
+ }
+
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
+
+ /* For a clause 45 access the address cycle just completed, we still
+ * need to do the read command, otherwise just get the data
+ */
+ if (!(regnum & MII_ADDR_C45))
+ goto do_mii_bus_read;
+
+ cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
+
+do_mii_bus_read:
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
+
+mii_bus_read_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return data;
+}
+
+/**
+ * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
+ int regnum, u16 val, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 err;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ if (regnum & MII_ADDR_C45) {
+ hwaddr |= regnum & GENMASK(21, 0);
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+ } else {
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+ }
+
+ /* For clause 45 this is an address cycle, for clause 22 this is the
+ * entire transaction
+ */
+ err = ixgbe_msca_cmd(hw, cmd);
+ if (err < 0 || !(regnum & MII_ADDR_C45))
+ goto mii_bus_write_done;
+
+ cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
+ err = ixgbe_msca_cmd(hw, cmd);
+
+mii_bus_write_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return err;
+}
+
+/**
+ * ixgbe_mii_bus_read - Read a clause 22/45 register
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ **/
+static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_write - Write a clause 22/45 register
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ **/
+static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
+ int regnum, u16 val)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+}
+
+/**
+ * ixgbe_get_first_secondary_devfn - get first device downstream of root port
+ * @devfn: PCI_DEVFN of root port on domain 0, bus 0
+ *
+ * Returns pci_dev pointer to PCI_DEVFN(0, 0) on subordinate side of root
+ * on domain 0, bus 0, devfn = 'devfn'
+ **/
+static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
+{
+ struct pci_dev *rp_pdev;
+ int bus;
+
+ rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
+ if (rp_pdev && rp_pdev->subordinate) {
+ bus = rp_pdev->subordinate->number;
+ return pci_get_domain_bus_and_slot(0, bus, 0);
+ }
+
+ return NULL;
+}
+
+/**
+ * ixgbe_x550em_a_has_mii - is this the first ixgbe x550em_a PCI function?
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if hw points to lowest numbered PCI B:D.F x550_em_a device in
+ * the SoC. There are up to 4 MACs sharing a single MDIO bus on the x550em_a,
+ * but we only want to register one MDIO bus.
+ **/
+static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *func0_pdev;
+
+ /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
+ * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
+ * It's not valid for function 0 to be disabled and function 1 is up,
+ * so the lowest numbered ixgbe dev will be device 0 function 0 on one
+ * of those two root ports
+ */
+ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
+ if (func0_pdev) {
+ if (func0_pdev == pdev)
+ return true;
+ else
+ return false;
+ }
+ func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
+ if (func0_pdev == pdev)
+ return true;
+
+ return false;
+}
+
+/**
+ * ixgbe_mii_bus_init - mii_bus structure setup
+ * @hw: pointer to hardware structure
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbe_mii_bus_init initializes a mii_bus structure in adapter
+ **/
+s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &adapter->netdev->dev;
+ struct mii_bus *bus;
+
+ adapter->mii_bus = devm_mdiobus_alloc(dev);
+ if (!adapter->mii_bus)
+ return -ENOMEM;
+
+ bus = adapter->mii_bus;
+
+ switch (hw->device_id) {
+ /* C3000 SoCs */
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ if (!ixgbe_x550em_a_has_mii(hw))
+ goto ixgbe_no_mii_bus;
+ bus->read = &ixgbe_x550em_a_mii_bus_read;
+ bus->write = &ixgbe_x550em_a_mii_bus_write;
+ break;
+ default:
+ bus->read = &ixgbe_mii_bus_read;
+ bus->write = &ixgbe_mii_bus_write;
+ break;
+ }
+
+ /* Use the position of the device in the PCI hierarchy as the id */
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
+ pci_name(pdev));
+
+ bus->name = "ixgbe-mdio";
+ bus->priv = adapter;
+ bus->parent = dev;
+ bus->phy_mask = GENMASK(31, 0);
+
+ /* Support clause 22/45 natively. ixgbe_probe() sets MDIO_EMULATE_C22
+ * unfortunately that causes some clause 22 frames to be sent with
+ * clause 45 addressing. We don't want that.
+ */
+ hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
+
+ return mdiobus_register(bus);
+
+ixgbe_no_mii_bus:
+ devm_mdiobus_free(dev, bus);
+ adapter->mii_bus = NULL;
+ return -ENODEV;
+}
+
/**
* ixgbe_setup_phy_link_generic - Set and restart autoneg
* @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 64e44e01c973..214b01085718 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -120,6 +120,8 @@
/* SFP+ SFF-8472 Compliance code */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw);
+
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index b3e0d8bb5cbd..d81a50dc9535 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * ixgbe_ptp_gettime
+ * ixgbe_ptp_gettimex
* @ptp: the ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec to hold the PHC timestamp
+ * @sts: structure to hold the system time before and after reading the PHC
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
- u64 ns;
+ u64 ns, stamp;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->hw_tc);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ /* Upper 32 bits represent billions of cycles, lower 32 bits
+ * represent cycles. However, we use timespec64_to_ns for the
+ * correct math even though the units haven't been corrected
+ * yet.
+ */
+ ptp_read_system_prets(sts);
+ IXGBE_READ_REG(hw, IXGBE_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH);
+ stamp = timespec64_to_ns(ts);
+ break;
+ default:
+ ptp_read_system_prets(sts);
+ stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+ break;
+ }
+
+ ns = timecounter_cyc2time(&adapter->hw_tc, stamp);
+
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
IXGBE_OVERFLOW_PERIOD);
- struct timespec64 ts;
+ unsigned long flags;
if (timeout) {
- ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+ /* Update the timecounter */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ timecounter_read(&adapter->hw_tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
adapter->last_overflow_check = jiffies;
}
}
@@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
@@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
@@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 5dacfc870259..345701af7749 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -700,7 +700,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
u8 num_tcs = adapter->hw_tcs;
u32 reg_val;
u32 queue;
- u32 word;
/* remove VLAN filters beloning to this VF */
ixgbe_clear_vf_vlans(adapter, vf);
@@ -758,6 +757,14 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
}
}
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+static void ixgbe_vf_clear_mbx(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 word;
+
/* Clear VF's mailbox memory */
for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
@@ -831,6 +838,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* reset the filters for the device */
ixgbe_vf_reset_event(adapter, vf);
+ ixgbe_vf_clear_mbx(adapter, vf);
+
/* set vf mac address */
if (!is_zero_ether_addr(vf_mac))
ixgbe_set_vf_mac(adapter, vf, vf_mac);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 10dbaf4f6e80..9c42f741ed5e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -2262,7 +2262,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
*autoneg = false;
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index e8a3231be0bf..5170dd9d8705 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -450,12 +450,14 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
struct ixgbevf_ipsec *ipsec = adapter->ipsec;
struct xfrm_state *xs;
+ struct sec_path *sp;
struct tx_sa *tsa;
u16 sa_idx;
- if (unlikely(!first->skb->sp->len)) {
+ sp = skb_sec_path(first->skb);
+ if (unlikely(!sp->len)) {
netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
- __func__, first->skb->sp->len);
+ __func__, sp->len);
return 0;
}
@@ -546,6 +548,7 @@ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
struct xfrm_state *xs = NULL;
struct ipv6hdr *ip6 = NULL;
struct iphdr *ip4 = NULL;
+ struct sec_path *sp;
void *daddr;
__be32 spi;
u8 *c_hdr;
@@ -585,12 +588,12 @@ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
if (unlikely(!xs))
return;
- skb->sp = secpath_dup(skb->sp);
- if (unlikely(!skb->sp))
+ sp = secpath_set(skb);
+ if (unlikely(!sp))
return;
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e47ede7e832..49e23afa05a2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1293,16 +1293,20 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
- /* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting == 1)
- ixgbevf_set_itr(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
- !test_bit(__IXGBEVF_REMOVING, &adapter->state))
- ixgbevf_irq_enable_queues(adapter,
- BIT(q_vector->v_idx));
- return 0;
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting == 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
+ !test_bit(__IXGBEVF_REMOVING, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ BIT(q_vector->v_idx));
+ }
+
+ return min(work_done, budget - 1);
}
/**
@@ -4016,6 +4020,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -4151,7 +4157,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
first->protocol = vlan_get_protocol(skb);
#ifdef CONFIG_IXGBEVF_IPSEC
- if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
+ if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
goto out_drop;
#endif
tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1e9bcbdc6a90..2f427271a793 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
- u32 supported, advertising;
phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
*/
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- supported &= ~SUPPORTED_1000baseT_Half;
- advertising &= ~ADVERTISED_1000baseT_Half;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.advertising);
return 0;
}
@@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
phy->duplex = 0;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ linkmode_copy(phy->advertising, phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phy->advertising);
} else {
phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
+ linkmode_zero(phy->advertising);
phy->speed = speed;
phy->duplex = duplex;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index e5397c8197b9..9d4568eb2297 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -408,7 +408,6 @@ struct mvneta_port {
struct mvneta_pcpu_stats __percpu *stats;
int pkt_size;
- unsigned int frag_size;
void __iomem *base;
struct mvneta_rx_queue *rxqs;
struct mvneta_tx_queue *txqs;
@@ -2905,7 +2904,9 @@ static void mvneta_rxq_hw_init(struct mvneta_port *pp,
if (!pp->bm_priv) {
/* Set Offset */
mvneta_rxq_offset_set(pp, rxq, 0);
- mvneta_rxq_buf_size_set(pp, rxq, pp->frag_size);
+ mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
+ PAGE_SIZE :
+ MVNETA_RX_BUF_SIZE(pp->pkt_size));
mvneta_rxq_bm_disable(pp, rxq);
mvneta_rxq_fill(pp, rxq, rxq->size);
} else {
@@ -3760,7 +3761,6 @@ static int mvneta_open(struct net_device *dev)
int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
- pp->frag_size = PAGE_SIZE;
ret = mvneta_setup_rxqs(pp);
if (ret)
@@ -4248,8 +4248,7 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
/* The Armada 37x documents do not give limits for this other than
* it being an 8-bit register. */
- if (eee->tx_lpi_enabled &&
- (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255))
+ if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
return -EINVAL;
lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7a37a37e3fb3..f1dab0b55769 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4375,8 +4375,27 @@ static void mvpp2_phylink_validate(struct net_device *dev,
unsigned long *supported,
struct phylink_link_state *state)
{
+ struct mvpp2_port *port = netdev_priv(dev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ /* Invalid combinations */
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_10GKR:
+ case PHY_INTERFACE_MODE_XAUI:
+ if (port->gop_id != 0)
+ goto empty_set;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (port->gop_id == 0)
+ goto empty_set;
+ break;
+ default:
+ break;
+ }
+
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
phylink_set(mask, Pause);
@@ -4384,30 +4403,45 @@ static void mvpp2_phylink_validate(struct net_device *dev,
switch (state->interface) {
case PHY_INTERFACE_MODE_10GKR:
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
- phylink_set(mask, 10000baseKR_Full);
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_NA:
+ if (port->gop_id == 0) {
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+ phylink_set(mask, 10000baseKR_Full);
+ }
/* Fall-through */
- default:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_SGMII:
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10000baseT_Full);
/* Fall-through */
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
phylink_set(mask, 2500baseX_Full);
+ break;
+ default:
+ goto empty_set;
}
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+
+empty_set:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
static void mvpp22_xlg_link_state(struct mvpp2_port *port,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 12db256c8c9f..742f0c1f60df 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -31,6 +31,7 @@
* @resp: command response
* @link_info: link related information
* @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
* @cmd_pend: flag set before new command is started
* flag cleared after command response is received
* @cgx: parent cgx port
@@ -43,6 +44,7 @@ struct lmac {
u64 resp;
struct cgx_link_user_info link_info;
struct cgx_event_cb event_cb;
+ spinlock_t event_cb_lock;
bool cmd_pend;
struct cgx *cgx;
u8 lmac_id;
@@ -55,6 +57,8 @@ struct cgx {
u8 cgx_id;
u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list;
};
@@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX];
/* Convert firmware lmac type encoding to string */
static char *cgx_lmactype_string[LMAC_MODE_MAX];
+/* CGX PHY management internal APIs */
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
+
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
@@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
return cgx->lmac_idmap[lmac_id];
}
-int cgx_get_cgx_cnt(void)
+int cgx_get_cgxcnt_max(void)
{
struct cgx *cgx_dev;
- int count = 0;
+ int idmax = -ENODEV;
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
- count++;
+ if (cgx_dev->cgx_id > idmax)
+ idmax = cgx_dev->cgx_id;
+
+ if (idmax < 0)
+ return 0;
- return count;
+ return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgx_cnt);
+EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
@@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ /* Ensure callback doesn't get unregistered until we finish it */
+ spin_lock(&lmac->event_cb_lock);
+
if (!lmac->event_cb.notify_link_chg) {
dev_dbg(dev, "cgx port %d:%d Link change handler null",
cgx->cgx_id, lmac->lmac_id);
@@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat,
dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
cgx->cgx_id, lmac->lmac_id,
linfo->link_up ? "UP" : "DOWN", linfo->speed);
- return;
+ goto err;
}
if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
dev_err(dev, "event notification failure\n");
+err:
+ spin_unlock(&lmac->event_cb_lock);
}
static inline bool cgx_cmdresp_is_linkevent(u64 event)
@@ -482,6 +498,60 @@ static inline bool cgx_event_is_linkevent(u64 event)
return false;
}
+static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz,
+ struct cgx *cgx)
+{
+ u64 req = 0;
+ u64 resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp);
+
+ return err;
+}
+
+static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr,
+ struct cgx *cgx)
+{
+ u64 req = 0;
+ u64 resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ if (!err)
+ *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp);
+
+ return err;
+}
+
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size)
+{
+ struct cgx *cgx_dev;
+ int err;
+
+ if (!addr || !size)
+ return -EINVAL;
+
+ cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list);
+ if (!cgx_dev)
+ return -ENXIO;
+
+ err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev);
+ if (err)
+ return -EIO;
+
+ err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev);
+ if (err)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_get_mkex_prfl_info);
+
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
struct lmac *lmac = data;
@@ -548,6 +618,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
}
EXPORT_SYMBOL(cgx_lmac_evh_register);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
+{
+ struct lmac *lmac;
+ unsigned long flags;
+ struct cgx *cgx = cgxd;
+
+ lmac = lmac_pdata(lmac_id, cgx);
+ if (!lmac)
+ return -ENODEV;
+
+ spin_lock_irqsave(&lmac->event_cb_lock, flags);
+ lmac->event_cb.notify_link_chg = NULL;
+ lmac->event_cb.data = NULL;
+ spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_evh_unregister);
+
+static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
+{
+ u64 req = 0;
+ u64 resp;
+
+ if (enable)
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
+ else
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
u64 req = 0;
@@ -581,6 +683,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
return 0;
}
+static void cgx_lmac_linkup_work(struct work_struct *work)
+{
+ struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
+ struct device *dev = &cgx->pdev->dev;
+ int i, err;
+
+ /* Do Link up for all the lmacs */
+ for (i = 0; i < cgx->lmac_count; i++) {
+ err = cgx_fwi_link_change(cgx, i, true);
+ if (err)
+ dev_info(dev, "cgx port %d:%d Link up command failed\n",
+ cgx->cgx_id, i);
+ }
+}
+
+int cgx_lmac_linkup_start(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ if (!cgx)
+ return -ENODEV;
+
+ queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(cgx_lmac_linkup_start);
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
@@ -602,6 +732,7 @@ static int cgx_lmac_init(struct cgx *cgx)
lmac->cgx = cgx;
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
+ spin_lock_init(&lmac->event_cb_lock);
err = request_irq(pci_irq_vector(cgx->pdev,
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
@@ -624,6 +755,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
struct lmac *lmac;
int i;
+ if (cgx->cgx_cmd_workq) {
+ flush_workqueue(cgx->cgx_cmd_workq);
+ destroy_workqueue(cgx->cgx_cmd_workq);
+ cgx->cgx_cmd_workq = NULL;
+ }
+
/* Free all lmac related resources */
for (i = 0; i < cgx->lmac_count; i++) {
lmac = cgx->lmac_idmap[i];
@@ -679,8 +816,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
+ /* init wq for processing linkup requests */
+ INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
+ cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
+ if (!cgx->cgx_cmd_workq) {
+ dev_err(dev, "alloc workqueue failed for cgx cmd");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
list_add(&cgx->cgx_list, &cgx_list);
- cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 0a66d2717442..206dc5dc1df8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -20,40 +20,41 @@
/* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0
-#define MAX_CGX 3
+#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */
#define CGXX_CMRX_CFG 0x00
-#define CMR_EN BIT_ULL(55)
-#define DATA_PKT_TX_EN BIT_ULL(53)
-#define DATA_PKT_RX_EN BIT_ULL(54)
-#define CGX_LMAC_TYPE_SHIFT 40
-#define CGX_LMAC_TYPE_MASK 0xF
+#define CMR_EN BIT_ULL(55)
+#define DATA_PKT_TX_EN BIT_ULL(53)
+#define DATA_PKT_RX_EN BIT_ULL(54)
+#define CGX_LMAC_TYPE_SHIFT 40
+#define CGX_LMAC_TYPE_MASK 0xF
#define CGXX_CMRX_INT 0x040
-#define FW_CGX_INT BIT_ULL(1)
+#define FW_CGX_INT BIT_ULL(1)
#define CGXX_CMRX_INT_ENA_W1S 0x058
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
-#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
-#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
-#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
-#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
+#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
+#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
+#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 0x200
-#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
-#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
+#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
#define CGXX_SPUX_CONTROL1 0x10000
-#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
+#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
-#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
+#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
#define CGX_COMMAND_REG CGXX_SCRATCH1_REG
#define CGX_EVENT_REG CGXX_SCRATCH0_REG
@@ -94,11 +95,12 @@ struct cgx_event_cb {
extern struct pci_driver cgx_driver;
-int cgx_get_cgx_cnt(void);
+int cgx_get_cgxcnt_max(void);
int cgx_get_lmac_cnt(void *cgxd);
void *cgx_get_pdata(int cgx_id);
int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind);
int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
+int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
@@ -108,4 +110,6 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
int cgx_get_link_info(void *cgxd, int lmac_id,
struct cgx_link_user_info *linfo);
+int cgx_lmac_linkup_start(void *cgxd);
+int cgx_get_mkex_prfl_info(u64 *addr, u64 *size);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index fa17af3f4ba7..fb3ba4968a9b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -78,8 +78,8 @@ enum cgx_cmd_id {
CGX_CMD_LINK_STATE_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
- CGX_CMD_IRQ_ENABLE,
- CGX_CMD_IRQ_DISABLE,
+ CGX_CMD_GET_MKEX_PRFL_SIZE,
+ CGX_CMD_GET_MKEX_PRFL_ADDR
};
/* async event ids */
@@ -139,6 +139,16 @@ enum cgx_cmd_own {
*/
#define RESP_MAC_ADDR GENMASK_ULL(56, 9)
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9)
+
+/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as
+ * CGX_STAT_SUCCESS
+ */
+#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9)
+
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
*
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d39ada404c8f..ec50a21c5aaf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,6 +143,14 @@ enum nix_scheduler {
NIX_TXSCH_LVL_CNT = 0x5,
};
+#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull)
+
+/* Min/Max packet sizes, excluding FCS */
+#define NIC_HW_MIN_FRS 40
+#define NIC_HW_MAX_FRS 9212
+#define SDP_HW_MAX_FRS 65535
+
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
#define NIX_RX_ACTIONOP_UCAST (0x1ull)
@@ -169,7 +177,9 @@ enum nix_scheduler {
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
+#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
+#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
@@ -186,26 +196,4 @@ enum nix_scheduler {
#define DEFAULT_RSS_CONTEXT_GROUP 0
#define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */
-/* NIX flow tag, key type flags */
-#define FLOW_KEY_TYPE_PORT BIT(0)
-#define FLOW_KEY_TYPE_IPV4 BIT(1)
-#define FLOW_KEY_TYPE_IPV6 BIT(2)
-#define FLOW_KEY_TYPE_TCP BIT(3)
-#define FLOW_KEY_TYPE_UDP BIT(4)
-#define FLOW_KEY_TYPE_SCTP BIT(5)
-
-/* NIX flow tag algorithm indices, max is 31 */
-enum {
- FLOW_KEY_ALG_PORT,
- FLOW_KEY_ALG_IP,
- FLOW_KEY_ALG_TCP,
- FLOW_KEY_ALG_UDP,
- FLOW_KEY_ALG_SCTP,
- FLOW_KEY_ALG_TCP_UDP,
- FLOW_KEY_ALG_TCP_SCTP,
- FLOW_KEY_ALG_UDP_SCTP,
- FLOW_KEY_ALG_TCP_UDP_SCTP,
- FLOW_KEY_ALG_MAX,
-};
-
#endif /* COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 85ba24a05774..d6f9ed8ea966 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -290,7 +290,7 @@ EXPORT_SYMBOL(otx2_mbox_nonempty);
const char *otx2_mbox_id2name(u16 id)
{
switch (id) {
-#define M(_name, _id, _1, _2) case _id: return # _name;
+#define M(_name, _id, _1, _2, _3) case _id: return # _name;
MBOX_MESSAGES
#undef M
default:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index a15a59c9a239..76a4575d18ff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -120,54 +120,101 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
#define MBOX_MESSAGES \
/* Generic mbox IDs (range 0x000 - 0x1FF) */ \
-M(READY, 0x001, msg_req, ready_msg_rsp) \
-M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \
-M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \
-M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \
+M(READY, 0x001, ready, msg_req, ready_msg_rsp) \
+M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
+M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
+M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \
+M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
-M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \
-M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \
-M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \
-M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \
+M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
+M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
+M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \
+M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
-M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \
+M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \
cgx_mac_addr_set_or_get) \
-M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \
-M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \
-M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \
-M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \
-M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \
-M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \
-M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \
+M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \
+M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \
+M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \
+M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
+M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
+M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
+M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
-M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \
-M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \
-M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \
-M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \
+M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
+ npa_lf_alloc_req, npa_lf_alloc_rsp) \
+M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \
+M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \
+M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\
/* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \
/* TIM mbox IDs (range 0x800 - 0x9FF) */ \
/* CPT mbox IDs (range 0xA00 - 0xBFF) */ \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
+M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
+ npc_mcam_alloc_entry_rsp) \
+M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \
+ npc_mcam_free_entry_req, msg_rsp) \
+M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \
+ npc_mcam_write_entry_req, msg_rsp) \
+M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \
+ npc_mcam_ena_dis_entry_req, msg_rsp) \
+M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\
+ npc_mcam_shift_entry_rsp) \
+M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \
+ npc_mcam_alloc_counter_req, \
+ npc_mcam_alloc_counter_rsp) \
+M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \
+ npc_mcam_unmap_counter_req, msg_rsp) \
+M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \
+ npc_mcam_oper_counter_req, msg_rsp) \
+M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \
+ npc_mcam_oper_counter_req, \
+ npc_mcam_oper_counter_rsp) \
+M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \
+ npc_mcam_alloc_and_write_entry_req, \
+ npc_mcam_alloc_and_write_entry_rsp) \
+M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \
+ msg_req, npc_get_kex_cfg_rsp) \
/* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \
-M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \
-M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
-M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
-M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
-M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
-M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
-M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \
-M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \
-M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \
-M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \
-M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \
-M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp)
+M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \
+ nix_lf_alloc_req, nix_lf_alloc_rsp) \
+M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \
+M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \
+M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \
+ hwctx_disable_req, msg_rsp) \
+M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \
+ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
+M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \
+M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \
+M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \
+M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \
+M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg, \
+ nix_rss_flowkey_cfg_rsp) \
+M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \
+M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \
+M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \
+M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \
+M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \
+M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \
+ nix_mark_format_cfg, \
+ nix_mark_format_cfg_rsp) \
+M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \
+M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
+ nix_lso_format_cfg, \
+ nix_lso_format_cfg_rsp) \
+M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
-M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp)
+M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
enum {
-#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id,
+#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
#undef M
@@ -191,6 +238,13 @@ struct msg_rsp {
struct mbox_msghdr hdr;
};
+/* RVU mailbox error codes
+ * Range 256 - 300.
+ */
+enum rvu_af_status {
+ RVU_INVALID_VF_ID = -256,
+};
+
struct ready_msg_rsp {
struct mbox_msghdr hdr;
u16 sclk_feq; /* SCLK frequency */
@@ -347,6 +401,8 @@ struct hwctx_disable_req {
u8 ctype;
};
+/* NIX mbox message formats */
+
/* NIX mailbox error codes
* Range 401 - 500.
*/
@@ -365,6 +421,12 @@ enum nix_af_status {
NIX_AF_INVAL_TXSCHQ_CFG = -412,
NIX_AF_SMQ_FLUSH_FAILED = -413,
NIX_AF_ERR_LF_RESET = -414,
+ NIX_AF_ERR_RSS_NOSPC_FIELD = -415,
+ NIX_AF_ERR_RSS_NOSPC_ALGO = -416,
+ NIX_AF_ERR_MARK_CFG_FAIL = -417,
+ NIX_AF_ERR_LSO_CFG_FAIL = -418,
+ NIX_AF_INVAL_NPA_PF_FUNC = -419,
+ NIX_AF_INVAL_SSO_PF_FUNC = -420,
};
/* For NIX LF context alloc and init */
@@ -392,6 +454,10 @@ struct nix_lf_alloc_rsp {
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
u8 mac_addr[ETH_ALEN];
+ u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */
+ u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */
+ u16 cints; /* NIX_AF_CONST2::CINTS */
+ u16 qints; /* NIX_AF_CONST2::QINTS */
};
/* NIX AQ enqueue msg */
@@ -472,6 +538,7 @@ struct nix_txschq_config {
struct nix_vtag_config {
struct mbox_msghdr hdr;
+ /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */
u8 vtag_size;
/* cfg_type is '0' for tx vlan cfg
* cfg_type is '1' for rx vlan cfg
@@ -492,7 +559,7 @@ struct nix_vtag_config {
/* valid when cfg_type is '1' */
struct {
- /* rx vtag type index */
+ /* rx vtag type index, valid values are in 0..7 range */
u8 vtag_type;
/* rx vtag strip */
u8 strip_vtag :1;
@@ -505,15 +572,40 @@ struct nix_vtag_config {
struct nix_rss_flowkey_cfg {
struct mbox_msghdr hdr;
int mcam_index; /* MCAM entry index to modify */
+#define NIX_FLOW_KEY_TYPE_PORT BIT(0)
+#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1)
+#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2)
+#define NIX_FLOW_KEY_TYPE_TCP BIT(3)
+#define NIX_FLOW_KEY_TYPE_UDP BIT(4)
+#define NIX_FLOW_KEY_TYPE_SCTP BIT(5)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
+struct nix_rss_flowkey_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 alg_idx; /* Selected algo index */
+};
+
struct nix_set_mac_addr {
struct mbox_msghdr hdr;
u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */
};
+struct nix_mark_format_cfg {
+ struct mbox_msghdr hdr;
+ u8 offset;
+ u8 y_mask;
+ u8 y_val;
+ u8 r_mask;
+ u8 r_val;
+};
+
+struct nix_mark_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 mark_format_idx;
+};
+
struct nix_rx_mode {
struct mbox_msghdr hdr;
#define NIX_RX_MODE_UCAST BIT(0)
@@ -522,4 +614,182 @@ struct nix_rx_mode {
u16 mode;
};
+struct nix_rx_cfg {
+ struct mbox_msghdr hdr;
+#define NIX_RX_OL3_VERIFY BIT(0)
+#define NIX_RX_OL4_VERIFY BIT(1)
+ u8 len_verify; /* Outer L3/L4 len check */
+#define NIX_RX_CSUM_OL4_VERIFY BIT(0)
+ u8 csum_verify; /* Outer L4 checksum verification */
+};
+
+struct nix_frs_cfg {
+ struct mbox_msghdr hdr;
+ u8 update_smq; /* Update SMQ's min/max lens */
+ u8 update_minlen; /* Set minlen also */
+ u8 sdp_link; /* Set SDP RX link */
+ u16 maxlen;
+ u16 minlen;
+};
+
+struct nix_lso_format_cfg {
+ struct mbox_msghdr hdr;
+ u64 field_mask;
+#define NIX_LSO_FIELD_MAX 8
+ u64 fields[NIX_LSO_FIELD_MAX];
+};
+
+struct nix_lso_format_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u8 lso_format_idx;
+};
+
+/* NPC mbox message structs */
+
+#define NPC_MCAM_ENTRY_INVALID 0xFFFF
+#define NPC_MCAM_INVALID_MAP 0xFFFF
+
+/* NPC mailbox error codes
+ * Range 701 - 800.
+ */
+enum npc_af_status {
+ NPC_MCAM_INVALID_REQ = -701,
+ NPC_MCAM_ALLOC_DENIED = -702,
+ NPC_MCAM_ALLOC_FAILED = -703,
+ NPC_MCAM_PERM_DENIED = -704,
+};
+
+struct npc_mcam_alloc_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MAX_NONCONTIG_ENTRIES 256
+ u8 contig; /* Contiguous entries ? */
+#define NPC_MCAM_ANY_PRIO 0
+#define NPC_MCAM_LOWER_PRIO 1
+#define NPC_MCAM_HIGHER_PRIO 2
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u16 ref_entry;
+ u16 count; /* Number of entries requested */
+};
+
+struct npc_mcam_alloc_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of entries allocated */
+ u16 free_count; /* Number of entries available */
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+};
+
+struct npc_mcam_free_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry; /* Entry index to be freed */
+ u8 all; /* If all entries allocated to this PFVF to be freed */
+};
+
+struct mcam_entry {
+#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
+ u64 kw[NPC_MAX_KWS_IN_KEY];
+ u64 kw_mask[NPC_MAX_KWS_IN_KEY];
+ u64 action;
+ u64 vtag_action;
+};
+
+struct npc_mcam_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 entry; /* MCAM entry to write this match key */
+ u16 cntr; /* Counter for this MCAM entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 set_cntr; /* Set counter for this entry ? */
+};
+
+/* Enable/Disable a given entry */
+struct npc_mcam_ena_dis_entry_req {
+ struct mbox_msghdr hdr;
+ u16 entry;
+};
+
+struct npc_mcam_shift_entry_req {
+ struct mbox_msghdr hdr;
+#define NPC_MCAM_MAX_SHIFTS 64
+ u16 curr_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 new_entry[NPC_MCAM_MAX_SHIFTS];
+ u16 shift_count; /* Number of entries to shift */
+};
+
+struct npc_mcam_shift_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */
+};
+
+struct npc_mcam_alloc_counter_req {
+ struct mbox_msghdr hdr;
+ u8 contig; /* Contiguous counters ? */
+#define NPC_MAX_NONCONTIG_COUNTERS 64
+ u16 count; /* Number of counters requested */
+};
+
+struct npc_mcam_alloc_counter_rsp {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Counter allocated or start index if contiguous.
+ * Invalid incase of non-contiguous.
+ */
+ u16 count; /* Number of counters allocated */
+ u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS];
+};
+
+struct npc_mcam_oper_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr; /* Free a counter or clear/fetch it's stats */
+};
+
+struct npc_mcam_oper_counter_rsp {
+ struct mbox_msghdr hdr;
+ u64 stat; /* valid only while fetching counter's stats */
+};
+
+struct npc_mcam_unmap_counter_req {
+ struct mbox_msghdr hdr;
+ u16 cntr;
+ u16 entry; /* Entry and counter to be unmapped */
+ u8 all; /* Unmap all entries using this counter ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_req {
+ struct mbox_msghdr hdr;
+ struct mcam_entry entry_data;
+ u16 ref_entry;
+ u8 priority; /* Lower or higher w.r.t ref_entry */
+ u8 intf; /* Rx or Tx interface */
+ u8 enable_entry;/* Enable this MCAM entry ? */
+ u8 alloc_cntr; /* Allocate counter and map ? */
+};
+
+struct npc_mcam_alloc_and_write_entry_rsp {
+ struct mbox_msghdr hdr;
+ u16 entry;
+ u16 cntr;
+};
+
+struct npc_get_kex_cfg_rsp {
+ struct mbox_msghdr hdr;
+ u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */
+ u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */
+#define NPC_MAX_INTF 2
+#define NPC_MAX_LID 8
+#define NPC_MAX_LT 16
+#define NPC_MAX_LD 2
+#define NPC_MAX_LFL 16
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+#define MKEX_NAME_LEN 128
+ u8 mkex_pfl_name[MKEX_NAME_LEN];
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index f98b0113def3..8d6d90fdfb73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -259,4 +259,28 @@ struct nix_rx_action {
#endif
};
+/* NIX Receive Vtag Action Structure */
+#define VTAG0_VALID_BIT BIT_ULL(15)
+#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
+#define VTAG0_LID_MASK GENMASK_ULL(10, 8)
+#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0)
+
+struct npc_mcam_kex {
+ /* MKEX Profle Header */
+ u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */
+ u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */
+ u64 cpu_model; /* Format as profiled by CPU hardware */
+ u64 kpu_version; /* KPU firmware/profile version */
+ u64 reserved; /* Reserved for extension */
+
+ /* MKEX Profle Data */
+ u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ u64 kex_ld_flags[NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */
+ u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD];
+ /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */
+ u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+} __packed;
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index dc28fa2b9481..e581091c09c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -29,6 +29,16 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc);
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *));
+enum {
+ TYPE_AFVF,
+ TYPE_AFPF,
+};
/* Supported devices */
static const struct pci_device_id rvu_id_table[] = {
@@ -42,6 +52,10 @@ MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, rvu_id_table);
+static char *mkex_profile; /* MKEX profile name */
+module_param(mkex_profile, charp, 0000);
+MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+
/* Poll a RVU block's register 'offset', for a 'zero'
* or 'nonzero' at bits specified by 'mask'
*/
@@ -153,17 +167,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
u16 match = 0;
int lf;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lf = 0; lf < block->lf.max; lf++) {
if (block->fn_map[lf] == pcifunc) {
if (slot == match) {
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return lf;
}
match++;
}
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return -ENODEV;
}
@@ -337,6 +351,28 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc)
return &rvu->pf[rvu_get_pf(pcifunc)];
}
+static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc)
+{
+ int pf, vf, nvfs;
+ u64 cfg;
+
+ pf = rvu_get_pf(pcifunc);
+ if (pf >= rvu->hw->total_pfs)
+ return false;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ return true;
+
+ /* Check if VF is within number of VFs attached to this PF */
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ nvfs = (cfg >> 12) & 0xFF;
+ if (vf >= nvfs)
+ return false;
+
+ return true;
+}
+
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr)
{
struct rvu_block *block;
@@ -597,6 +633,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0);
+
+ mutex_destroy(&rvu->rsrc_lock);
}
static int rvu_setup_hw_resources(struct rvu *rvu)
@@ -752,7 +790,7 @@ init:
if (!rvu->hwvf)
return -ENOMEM;
- spin_lock_init(&rvu->rsrc_lock);
+ mutex_init(&rvu->rsrc_lock);
err = rvu_setup_msix_resources(rvu);
if (err)
@@ -777,17 +815,26 @@ init:
err = rvu_npc_init(rvu);
if (err)
- return err;
+ goto exit;
+
+ err = rvu_cgx_init(rvu);
+ if (err)
+ goto exit;
err = rvu_npa_init(rvu);
if (err)
- return err;
+ goto cgx_err;
err = rvu_nix_init(rvu);
if (err)
- return err;
+ goto cgx_err;
return 0;
+
+cgx_err:
+ rvu_cgx_exit(rvu);
+exit:
+ return err;
}
/* NPA and NIX admin queue APIs */
@@ -830,7 +877,7 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
return 0;
}
-static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req,
struct ready_msg_rsp *rsp)
{
return 0;
@@ -858,6 +905,22 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype)
return 0;
}
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype)
+{
+ struct rvu_pfvf *pfvf;
+
+ if (!is_pf_func_valid(rvu, pcifunc))
+ return false;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+
+ /* Check if this PFFUNC has a LF of type blktype attached */
+ if (!rvu_get_rsrc_mapcount(pfvf, blktype))
+ return false;
+
+ return true;
+}
+
static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
int pcifunc, int slot)
{
@@ -926,7 +989,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
struct rvu_block *block;
int blkid;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
/* Check for partial resource detach */
if (detach && detach->partial)
@@ -956,11 +1019,11 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
rvu_detach_block(rvu, pcifunc, block->type);
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return 0;
}
-static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_detach_resources(struct rvu *rvu,
struct rsrc_detach *detach,
struct msg_rsp *rsp)
{
@@ -1108,7 +1171,7 @@ fail:
return -ENOSPC;
}
-static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
+static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
struct rsrc_attach *attach,
struct msg_rsp *rsp)
{
@@ -1119,7 +1182,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
if (!attach->modify)
rvu_detach_rsrcs(rvu, NULL, pcifunc);
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
@@ -1163,7 +1226,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu,
}
exit:
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return err;
}
@@ -1231,7 +1294,7 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset);
}
-static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
+static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req,
struct msix_offset_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1280,22 +1343,51 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req,
return 0;
}
-static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
+static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 vf, numvfs;
+ u64 cfg;
+
+ vf = pcifunc & RVU_PFVF_FUNC_MASK;
+ cfg = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc)));
+ numvfs = (cfg >> 12) & 0xFF;
+
+ if (vf && vf <= numvfs)
+ __rvu_flr_handler(rvu, pcifunc);
+ else
+ return RVU_INVALID_VF_ID;
+
+ return 0;
+}
+
+static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
+ struct rvu *rvu = pci_get_drvdata(mbox->pdev);
+
/* Check if valid, if not reply with a invalid msg */
if (req->sig != OTX2_MBOX_REQ_SIG)
goto bad_message;
switch (req->id) {
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
case _id: { \
struct _rsp_type *rsp; \
int err; \
\
rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
- &rvu->mbox, devid, \
+ mbox, devid, \
sizeof(struct _rsp_type)); \
+ /* some handlers should complete even if reply */ \
+ /* could not be allocated */ \
+ if (!rsp && \
+ _id != MBOX_MSG_DETACH_RESOURCES && \
+ _id != MBOX_MSG_NIX_TXSCH_FREE && \
+ _id != MBOX_MSG_VF_FLR) \
+ return -ENOMEM; \
if (rsp) { \
rsp->hdr.id = _id; \
rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
@@ -1303,9 +1395,9 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
rsp->hdr.rc = 0; \
} \
\
- err = rvu_mbox_handler_ ## _name(rvu, \
- (struct _req_type *)req, \
- rsp); \
+ err = rvu_mbox_handler_ ## _fn_name(rvu, \
+ (struct _req_type *)req, \
+ rsp); \
if (rsp && err) \
rsp->hdr.rc = err; \
\
@@ -1313,29 +1405,38 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid,
}
MBOX_MESSAGES
#undef M
- break;
+
bad_message:
default:
- otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc,
- req->id);
+ otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id);
return -ENODEV;
}
}
-static void rvu_mbox_handler(struct work_struct *work)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
{
- struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
+ int offset, err, id, devid;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *req_hdr;
struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
struct otx2_mbox *mbox;
- int offset, id, err;
- u16 pf;
- mbox = &rvu->mbox;
- pf = mwork - rvu->mbox_wrk;
- mdev = &mbox->dev[pf];
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
+
+ devid = mwork - mw->mbox_wrk;
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[devid];
/* Process received mbox messages */
req_hdr = mdev->mbase + mbox->rx_start;
@@ -1347,10 +1448,21 @@ static void rvu_mbox_handler(struct work_struct *work)
for (id = 0; id < req_hdr->num_msgs; id++) {
msg = mdev->mbase + offset;
- /* Set which PF sent this message based on mbox IRQ */
- msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
- msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT);
- err = rvu_process_mbox_msg(rvu, pf, msg);
+ /* Set which PF/VF sent this message based on mbox IRQ */
+ switch (type) {
+ case TYPE_AFPF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT);
+ break;
+ case TYPE_AFVF:
+ msg->pcifunc &=
+ ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT);
+ msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1;
+ break;
+ }
+
+ err = rvu_process_mbox_msg(mbox, devid, msg);
if (!err) {
offset = mbox->rx_start + msg->next_msgoff;
continue;
@@ -1358,31 +1470,57 @@ static void rvu_mbox_handler(struct work_struct *work)
if (msg->pcifunc & RVU_PFVF_FUNC_MASK)
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n",
- err, otx2_mbox_id2name(msg->id), msg->id, pf,
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid,
(msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1);
else
dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n",
- err, otx2_mbox_id2name(msg->id), msg->id, pf);
+ err, otx2_mbox_id2name(msg->id),
+ msg->id, devid);
}
- /* Send mbox responses to PF */
- otx2_mbox_msg_send(mbox, pf);
+ /* Send mbox responses to VF/PF */
+ otx2_mbox_msg_send(mbox, devid);
+}
+
+static inline void rvu_afpf_mbox_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFPF);
}
-static void rvu_mbox_up_handler(struct work_struct *work)
+static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_handler(mwork, TYPE_AFVF);
+}
+
+static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
+{
struct rvu *rvu = mwork->rvu;
struct otx2_mbox_dev *mdev;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ struct mbox_wq_info *mw;
struct otx2_mbox *mbox;
- int offset, id;
- u16 pf;
+ int offset, id, devid;
+
+ switch (type) {
+ case TYPE_AFPF:
+ mw = &rvu->afpf_wq_info;
+ break;
+ case TYPE_AFVF:
+ mw = &rvu->afvf_wq_info;
+ break;
+ default:
+ return;
+ }
- mbox = &rvu->mbox_up;
- pf = mwork - rvu->mbox_wrk_up;
- mdev = &mbox->dev[pf];
+ devid = mwork - mw->mbox_wrk_up;
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[devid];
rsp_hdr = mdev->mbase + mbox->rx_start;
if (rsp_hdr->num_msgs == 0) {
@@ -1423,128 +1561,182 @@ end:
mdev->msgs_acked++;
}
- otx2_mbox_reset(mbox, 0);
+ otx2_mbox_reset(mbox, devid);
}
-static int rvu_mbox_init(struct rvu *rvu)
+static inline void rvu_afpf_mbox_up_handler(struct work_struct *work)
{
- struct rvu_hwinfo *hw = rvu->hw;
- void __iomem *hwbase = NULL;
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFPF);
+}
+
+static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
+{
+ struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+
+ __rvu_mbox_up_handler(mwork, TYPE_AFVF);
+}
+
+static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
+ int type, int num,
+ void (mbox_handler)(struct work_struct *),
+ void (mbox_up_handler)(struct work_struct *))
+{
+ void __iomem *hwbase = NULL, *reg_base;
+ int err, i, dir, dir_up;
struct rvu_work *mwork;
+ const char *name;
u64 bar4_addr;
- int err, pf;
- rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox",
- WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
- hw->total_pfs);
- if (!rvu->mbox_wq)
+ switch (type) {
+ case TYPE_AFPF:
+ name = "rvu_afpf_mailbox";
+ bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
+ dir = MBOX_DIR_AFPF;
+ dir_up = MBOX_DIR_AFPF_UP;
+ reg_base = rvu->afreg_base;
+ break;
+ case TYPE_AFVF:
+ name = "rvu_afvf_mailbox";
+ bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ dir = MBOX_DIR_PFVF;
+ dir_up = MBOX_DIR_PFVF_UP;
+ reg_base = rvu->pfreg_base;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mw->mbox_wq = alloc_workqueue(name,
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ num);
+ if (!mw->mbox_wq)
return -ENOMEM;
- rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs,
- sizeof(struct rvu_work), GFP_KERNEL);
- if (!rvu->mbox_wrk) {
+ mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk) {
err = -ENOMEM;
goto exit;
}
- rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs,
- sizeof(struct rvu_work), GFP_KERNEL);
- if (!rvu->mbox_wrk_up) {
+ mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!mw->mbox_wrk_up) {
err = -ENOMEM;
goto exit;
}
- /* Map mbox region shared with PFs */
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
/* Mailbox is a reserved memory (in RAM) region shared between
* RVU devices, shouldn't be mapped as device memory to allow
* unaligned accesses.
*/
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs);
+ hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
if (!hwbase) {
dev_err(rvu->dev, "Unable to map mailbox region\n");
err = -ENOMEM;
goto exit;
}
- err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base,
- MBOX_DIR_AFPF, hw->total_pfs);
+ err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base,
- MBOX_DIR_AFPF_UP, hw->total_pfs);
+ err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
- for (pf = 0; pf < hw->total_pfs; pf++) {
- mwork = &rvu->mbox_wrk[pf];
+ for (i = 0; i < num; i++) {
+ mwork = &mw->mbox_wrk[i];
mwork->rvu = rvu;
- INIT_WORK(&mwork->work, rvu_mbox_handler);
- }
+ INIT_WORK(&mwork->work, mbox_handler);
- for (pf = 0; pf < hw->total_pfs; pf++) {
- mwork = &rvu->mbox_wrk_up[pf];
+ mwork = &mw->mbox_wrk_up[i];
mwork->rvu = rvu;
- INIT_WORK(&mwork->work, rvu_mbox_up_handler);
+ INIT_WORK(&mwork->work, mbox_up_handler);
}
return 0;
exit:
if (hwbase)
iounmap((void __iomem *)hwbase);
- destroy_workqueue(rvu->mbox_wq);
+ destroy_workqueue(mw->mbox_wq);
return err;
}
-static void rvu_mbox_destroy(struct rvu *rvu)
+static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
- if (rvu->mbox_wq) {
- flush_workqueue(rvu->mbox_wq);
- destroy_workqueue(rvu->mbox_wq);
- rvu->mbox_wq = NULL;
+ if (mw->mbox_wq) {
+ flush_workqueue(mw->mbox_wq);
+ destroy_workqueue(mw->mbox_wq);
+ mw->mbox_wq = NULL;
}
- if (rvu->mbox.hwbase)
- iounmap((void __iomem *)rvu->mbox.hwbase);
+ if (mw->mbox.hwbase)
+ iounmap((void __iomem *)mw->mbox.hwbase);
- otx2_mbox_destroy(&rvu->mbox);
- otx2_mbox_destroy(&rvu->mbox_up);
+ otx2_mbox_destroy(&mw->mbox);
+ otx2_mbox_destroy(&mw->mbox_up);
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static void rvu_queue_work(struct mbox_wq_info *mw, int first,
+ int mdevs, u64 intr)
{
- struct rvu *rvu = (struct rvu *)rvu_irq;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ int i;
+
+ for (i = first; i < mdevs; i++) {
+ /* start from 0 */
+ if (!(intr & BIT_ULL(i - first)))
+ continue;
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[i];
+ hdr = mdev->mbase + mbox->rx_start;
+ if (hdr->num_msgs)
+ queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work);
+ }
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
u64 intr;
- u8 pf;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
/* Sync with mbox memory region */
- smp_wmb();
+ rmb();
- for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
- if (intr & (1ULL << pf)) {
- mbox = &rvu->mbox;
- mdev = &mbox->dev[pf];
- hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(rvu->mbox_wq,
- &rvu->mbox_wrk[pf].work);
- mbox = &rvu->mbox_up;
- mdev = &mbox->dev[pf];
- hdr = mdev->mbase + mbox->rx_start;
- if (hdr->num_msgs)
- queue_work(rvu->mbox_wq,
- &rvu->mbox_wrk_up[pf].work);
- }
+ rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+
+ /* Handle VF interrupts */
+ if (vfs > 64) {
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr);
+ vfs -= 64;
}
+ intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+
+ rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
+
return IRQ_HANDLED;
}
@@ -1561,6 +1753,216 @@ static void rvu_enable_mbox_intr(struct rvu *rvu)
INTR_MASK(hw->total_pfs) & ~1ULL);
}
+static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
+{
+ struct rvu_block *block;
+ int slot, lf, num_lfs;
+ int err;
+
+ block = &rvu->hw->block[blkaddr];
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ block->type);
+ if (!num_lfs)
+ return;
+ for (slot = 0; slot < num_lfs; slot++) {
+ lf = rvu_get_lf(rvu, block, pcifunc, slot);
+ if (lf < 0)
+ continue;
+
+ /* Cleanup LF and reset it */
+ if (block->addr == BLKADDR_NIX0)
+ rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
+ else if (block->addr == BLKADDR_NPA)
+ rvu_npa_lf_teardown(rvu, pcifunc, lf);
+
+ err = rvu_lf_reset(rvu, block, lf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, lf);
+ }
+ }
+}
+
+static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ mutex_lock(&rvu->flr_lock);
+ /* Reset order should reflect inter-block dependencies:
+ * 1. Reset any packet/work sources (NIX, CPT, TIM)
+ * 2. Flush and reset SSO/SSOW
+ * 3. Cleanup pools (NPA)
+ */
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
+ rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_detach_rsrcs(rvu, NULL, pcifunc);
+ mutex_unlock(&rvu->flr_lock);
+}
+
+static void rvu_afvf_flr_handler(struct rvu *rvu, int vf)
+{
+ int reg = 0;
+
+ /* pcifunc = 0(PF0) | (vf + 1) */
+ __rvu_flr_handler(rvu, vf + 1);
+
+ if (vf >= 64) {
+ reg = 1;
+ vf = vf - 64;
+ }
+
+ /* Signal FLR finish and enable IRQ */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
+}
+
+static void rvu_flr_handler(struct work_struct *work)
+{
+ struct rvu_work *flrwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = flrwork->rvu;
+ u16 pcifunc, numvfs, vf;
+ u64 cfg;
+ int pf;
+
+ pf = flrwork - rvu->flr_wrk;
+ if (pf >= rvu->hw->total_pfs) {
+ rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs);
+ return;
+ }
+
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ numvfs = (cfg >> 12) & 0xFF;
+ pcifunc = pf << RVU_PFVF_PF_SHIFT;
+
+ for (vf = 0; vf < numvfs; vf++)
+ __rvu_flr_handler(rvu, (pcifunc | (vf + 1)));
+
+ __rvu_flr_handler(rvu, pcifunc);
+
+ /* Signal FLR finish */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf));
+
+ /* Enable interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf));
+}
+
+static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs)
+{
+ int dev, vf, reg = 0;
+ u64 intr;
+
+ if (start_vf >= 64)
+ reg = 1;
+
+ intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg));
+ if (!intr)
+ return;
+
+ for (vf = 0; vf < numvfs; vf++) {
+ if (!(intr & BIT_ULL(vf)))
+ continue;
+ dev = vf + start_vf + rvu->hw->total_pfs;
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work);
+ /* Clear and disable the interrupt */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf));
+ }
+}
+
+static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT);
+ if (!intr)
+ goto afvf_flr;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* PF is already dead do only AF related operations */
+ queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work);
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT,
+ BIT_ULL(pf));
+ /* Disable the interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ BIT_ULL(pf));
+ }
+ }
+
+afvf_flr:
+ rvu_afvf_queue_flr_work(rvu, 0, 64);
+ if (rvu->vfs > 64)
+ rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr)
+{
+ int vf;
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (vf = 0; vf < 64; vf++) {
+ if (intr & (1ULL << vf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf));
+ /* clear interrupt */
+ rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf));
+ }
+ }
+}
+
+/* Handles ME interrupts from VFs of AF */
+static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfset;
+ u64 intr;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ for (vfset = 0; vfset <= 1; vfset++) {
+ intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset));
+ if (intr)
+ rvu_me_handle_vfset(rvu, vfset, intr);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handles ME interrupts from PFs */
+static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ u64 intr;
+ u8 pf;
+
+ intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT);
+
+ /* Nothing to be done here other than clearing the
+ * TRPEND bit.
+ */
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ if (intr & (1ULL << pf)) {
+ /* clear the trpend due to ME(master enable) */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND,
+ BIT_ULL(pf));
+ /* clear interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT,
+ BIT_ULL(pf));
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
@@ -1569,6 +1971,14 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+ /* Disable the PF FLR interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Disable the PF ME interrupt */
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
for (irq = 0; irq < rvu->num_vec; irq++) {
if (rvu->irq_allocated[irq])
free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
@@ -1578,9 +1988,25 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
rvu->num_vec = 0;
}
+static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu)
+{
+ struct rvu_pfvf *pfvf = &rvu->pf[0];
+ int offset;
+
+ pfvf = &rvu->pf[0];
+ offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Make sure there are enough MSIX vectors configured so that
+ * VF interrupts can be handled. Offset equal to zero means
+ * that PF vectors are not configured and overlapping AF vectors.
+ */
+ return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) &&
+ offset;
+}
+
static int rvu_register_interrupts(struct rvu *rvu)
{
- int ret;
+ int ret, offset, pf_vec_start;
rvu->num_vec = pci_msix_vec_count(rvu->pdev);
@@ -1620,13 +2046,331 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Enable mailbox interrupts from all PFs */
rvu_enable_mbox_intr(rvu);
+ /* Register FLR interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ "RVUAF FLR");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for FLR\n");
+ goto fail;
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true;
+
+ /* Enable FLR interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ /* Register ME interrupt handler */
+ sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ "RVUAF ME");
+ ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME),
+ rvu_me_pf_intr_handler, 0,
+ &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE],
+ rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for ME\n");
+ }
+ rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true;
+
+ /* Enable ME interrupt for all PFs*/
+ rvu_write64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs));
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S,
+ INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu))
+ return 0;
+
+ /* Get PF MSIX vectors offset. */
+ pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff;
+
+ /* Register MBOX0 interrupt. */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox0\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so
+ * simply increment current offset by 1.
+ */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_mbox_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE],
+ rvu);
+ if (ret)
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for Mbox1\n");
+
+ rvu->irq_allocated[offset] = true;
+
+ /* Register FLR interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_flr_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF FLR1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ /* Register ME interrupt handler for AF's VFs */
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME0;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME0\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
+
+ offset = pf_vec_start + RVU_PF_INT_VEC_VFME1;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1");
+ ret = request_irq(pci_irq_vector(rvu->pdev, offset),
+ rvu_me_vf_intr_handler, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu);
+ if (ret) {
+ dev_err(rvu->dev,
+ "RVUAF: IRQ registration failed for RVUAFVF ME1\n");
+ goto fail;
+ }
+ rvu->irq_allocated[offset] = true;
return 0;
fail:
- pci_free_irq_vectors(rvu->pdev);
+ rvu_unregister_interrupts(rvu);
+ return ret;
+}
+
+static void rvu_flr_wq_destroy(struct rvu *rvu)
+{
+ if (rvu->flr_wq) {
+ flush_workqueue(rvu->flr_wq);
+ destroy_workqueue(rvu->flr_wq);
+ rvu->flr_wq = NULL;
+ }
+}
+
+static int rvu_flr_init(struct rvu *rvu)
+{
+ int dev, num_devs;
+ u64 cfg;
+ int pf;
+
+ /* Enable FLR for all PFs*/
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf));
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf),
+ cfg | BIT_ULL(22));
+ }
+
+ rvu->flr_wq = alloc_workqueue("rvu_afpf_flr",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ 1);
+ if (!rvu->flr_wq)
+ return -ENOMEM;
+
+ num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev);
+ rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs,
+ sizeof(struct rvu_work), GFP_KERNEL);
+ if (!rvu->flr_wrk) {
+ destroy_workqueue(rvu->flr_wq);
+ return -ENOMEM;
+ }
+
+ for (dev = 0; dev < num_devs; dev++) {
+ rvu->flr_wrk[dev].rvu = rvu;
+ INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler);
+ }
+
+ mutex_init(&rvu->flr_lock);
+
+ return 0;
+}
+
+static void rvu_disable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1),
+ INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
+}
+
+static void rvu_enable_afvf_intr(struct rvu *rvu)
+{
+ int vfs = rvu->vfs;
+
+ /* Clear any pending interrupts and enable AF VF interrupts for
+ * the first 64 VFs.
+ */
+ /* Mbox */
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* FLR */
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs));
+
+ /* Same for remaining VFs, if any. */
+ if (vfs <= 64)
+ return;
+
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
+ INTR_MASK(vfs - 64));
+
+ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+ rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
+}
+
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
+
+static int lbk_get_num_chans(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ int ret = -EIO;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK,
+ NULL);
+ if (!pdev)
+ goto err;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ /* Read number of available LBK channels from LBK(0)_CONST register. */
+ ret = (readq(base + 0x10) >> 32) & 0xffff;
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+err:
return ret;
}
+static int rvu_enable_sriov(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+ int err, chans, vfs;
+
+ if (!rvu_afvf_msix_vectors_num_ok(rvu)) {
+ dev_warn(&pdev->dev,
+ "Skipping SRIOV enablement since not enough IRQs are available\n");
+ return 0;
+ }
+
+ chans = lbk_get_num_chans();
+ if (chans < 0)
+ return chans;
+
+ vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Limit VFs in case we have more VFs than LBK channels available. */
+ if (vfs > chans)
+ vfs = chans;
+
+ /* AF's VFs work in pairs and talk over consecutive loopback channels.
+ * Thus we want to enable maximum even number of VFs. In case
+ * odd number of VFs are available then the last VF on the list
+ * remains disabled.
+ */
+ if (vfs & 0x1) {
+ dev_warn(&pdev->dev,
+ "Number of VFs should be even. Enabling %d out of %d.\n",
+ vfs - 1, vfs);
+ vfs--;
+ }
+
+ if (!vfs)
+ return 0;
+
+ /* Save VFs number for reference in VF interrupts handlers.
+ * Since interrupts might start arriving during SRIOV enablement
+ * ordinary API cannot be used to get number of enabled VFs.
+ */
+ rvu->vfs = vfs;
+
+ err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs,
+ rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler);
+ if (err)
+ return err;
+
+ rvu_enable_afvf_intr(rvu);
+ /* Make sure IRQs are enabled before SRIOV. */
+ mb();
+
+ err = pci_enable_sriov(pdev, vfs);
+ if (err) {
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rvu_disable_sriov(struct rvu *rvu)
+{
+ rvu_disable_afvf_intr(rvu);
+ rvu_mbox_destroy(&rvu->afvf_wq_info);
+ pci_disable_sriov(rvu->pdev);
+}
+
+static void rvu_update_module_params(struct rvu *rvu)
+{
+ const char *default_pfl_name = "default";
+
+ strscpy(rvu->mkex_pfl_name,
+ mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+}
+
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -1680,6 +2424,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
+ /* Store module params in rvu structure */
+ rvu_update_module_params(rvu);
+
/* Check which blocks the HW supports */
rvu_check_block_implemented(rvu);
@@ -1689,24 +2436,35 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_release_regions;
- err = rvu_mbox_init(rvu);
+ /* Init mailbox btw AF and PFs */
+ err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
+ rvu->hw->total_pfs, rvu_afpf_mbox_handler,
+ rvu_afpf_mbox_up_handler);
if (err)
goto err_hwsetup;
- err = rvu_cgx_probe(rvu);
+ err = rvu_flr_init(rvu);
if (err)
goto err_mbox;
err = rvu_register_interrupts(rvu);
if (err)
- goto err_cgx;
+ goto err_flr;
+
+ /* Enable AF's VFs (if any) */
+ err = rvu_enable_sriov(rvu);
+ if (err)
+ goto err_irq;
return 0;
-err_cgx:
- rvu_cgx_wq_destroy(rvu);
+err_irq:
+ rvu_unregister_interrupts(rvu);
+err_flr:
+ rvu_flr_wq_destroy(rvu);
err_mbox:
- rvu_mbox_destroy(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
err_hwsetup:
+ rvu_cgx_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
err_release_regions:
@@ -1725,8 +2483,10 @@ static void rvu_remove(struct pci_dev *pdev)
struct rvu *rvu = pci_get_drvdata(pdev);
rvu_unregister_interrupts(rvu);
- rvu_cgx_wq_destroy(rvu);
- rvu_mbox_destroy(rvu);
+ rvu_flr_wq_destroy(rvu);
+ rvu_cgx_exit(rvu);
+ rvu_mbox_destroy(&rvu->afpf_wq_info);
+ rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 2c0580cd2807..c9d60b0554c0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -11,6 +11,7 @@
#ifndef RVU_H
#define RVU_H
+#include <linux/pci.h>
#include "rvu_struct.h"
#include "common.h"
#include "mbox.h"
@@ -18,6 +19,9 @@
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+/* Subsystem Device ID */
+#define PCI_SUBSYS_DEVID_96XX 0xB200
+
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
#define PCI_PF_REG_BAR_NUM 2
@@ -64,7 +68,7 @@ struct nix_mcast {
struct qmem *mcast_buf;
int replay_pkind;
int next_free_mce;
- spinlock_t mce_lock; /* Serialize MCE updates */
+ struct mutex mce_lock; /* Serialize MCE updates */
};
struct nix_mce_list {
@@ -74,15 +78,27 @@ struct nix_mce_list {
};
struct npc_mcam {
- spinlock_t lock; /* MCAM entries and counters update lock */
+ struct rsrc_bmap counters;
+ struct mutex lock; /* MCAM entries and counters update lock */
+ unsigned long *bmap; /* bitmap, 0 => bmap_entries */
+ unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */
+ u16 bmap_entries; /* Number of unreserved MCAM entries */
+ u16 bmap_fcnt; /* MCAM entries free count */
+ u16 *entry2pfvf_map;
+ u16 *entry2cntr_map;
+ u16 *cntr2pfvf_map;
+ u16 *cntr_refcnt;
u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */
u16 banksize; /* Number of MCAM entries in each bank */
u16 total_entries; /* Total number of MCAM entries */
- u16 entries; /* Total minus reserved for NIX LFs */
u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */
u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */
+ u16 lprio_count;
+ u16 lprio_start;
+ u16 hprio_count;
+ u16 hprio_end;
};
/* Structure for per RVU func info ie PF/VF */
@@ -122,18 +138,35 @@ struct rvu_pfvf {
u16 tx_chan_base;
u8 rx_chan_cnt; /* total number of RX channels */
u8 tx_chan_cnt; /* total number of TX channels */
+ u16 maxlen;
+ u16 minlen;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
/* Broadcast pkt replication info */
u16 bcast_mce_idx;
struct nix_mce_list bcast_mce_list;
+
+ /* VLAN offload */
+ struct mcam_entry entry;
+ int rxvlan_index;
+ bool rxvlan;
};
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
- u16 *pfvf_map;
+#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16))
+ u32 *pfvf_map;
+};
+
+struct nix_mark_format {
+ u8 total;
+ u8 in_use;
+ u32 *cfg;
};
struct npc_pkind {
@@ -141,9 +174,23 @@ struct npc_pkind {
u32 *pfchan_map;
};
+struct nix_flowkey {
+#define NIX_FLOW_KEY_ALG_MAX 32
+ u32 flowkey[NIX_FLOW_KEY_ALG_MAX];
+ int in_use;
+};
+
+struct nix_lso {
+ u8 total;
+ u8 in_use;
+};
+
struct nix_hw {
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast;
+ struct nix_flowkey flowkey;
+ struct nix_mark_format mark_format;
+ struct nix_lso lso;
};
struct rvu_hwinfo {
@@ -164,6 +211,16 @@ struct rvu_hwinfo {
struct npc_mcam mcam;
};
+struct mbox_wq_info {
+ struct otx2_mbox mbox;
+ struct rvu_work *mbox_wrk;
+
+ struct otx2_mbox mbox_up;
+ struct rvu_work *mbox_wrk_up;
+
+ struct workqueue_struct *mbox_wq;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -172,14 +229,17 @@ struct rvu {
struct rvu_hwinfo *hw;
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
- spinlock_t rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ int vfs; /* Number of VFs attached to RVU */
/* Mbox */
- struct otx2_mbox mbox;
- struct rvu_work *mbox_wrk;
- struct otx2_mbox mbox_up;
- struct rvu_work *mbox_wrk_up;
- struct workqueue_struct *mbox_wq;
+ struct mbox_wq_info afpf_wq_info;
+ struct mbox_wq_info afvf_wq_info;
+
+ /* PF FLR */
+ struct rvu_work *flr_wrk;
+ struct workqueue_struct *flr_wq;
+ struct mutex flr_lock; /* Serialize FLRs */
/* MSI-X */
u16 num_vec;
@@ -190,7 +250,7 @@ struct rvu {
/* CGX */
#define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */
u8 cgx_mapped_pfs;
- u8 cgx_cnt; /* available cgx ports */
+ u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port
@@ -201,6 +261,8 @@ struct rvu {
struct workqueue_struct *cgx_evh_wq;
spinlock_t cgx_evq_lock; /* cgx event queue lock */
struct list_head cgx_evq_head; /* cgx event queue head */
+
+ char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -223,9 +285,22 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
+static inline bool is_rvu_9xxx_A0(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ return (pdev->revision == 0x00) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
+}
+
/* Function Prototypes
* RVU
*/
+static inline int is_afvf(u16 pcifunc)
+{
+ return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
@@ -236,6 +311,7 @@ int rvu_get_pf(u16 pcifunc);
struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc);
void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf);
bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr);
+bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype);
int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot);
int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
@@ -266,89 +342,110 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
-int rvu_cgx_probe(struct rvu *rvu);
-void rvu_cgx_wq_destroy(struct rvu *rvu);
+int rvu_cgx_init(struct rvu *rvu);
+int rvu_cgx_exit(struct rvu *rvu);
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu);
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start);
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp);
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp);
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf);
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
/* NIX APIs */
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc);
int rvu_nix_init(struct rvu *rvu);
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg);
void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp);
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
struct nix_rss_flowkey_cfg *req,
- struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp);
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+ struct nix_mark_format_cfg *req,
+ struct nix_mark_format_cfg_rsp *rsp);
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+ struct nix_lso_format_cfg *req,
+ struct nix_lso_format_cfg_rsp *rsp);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -360,9 +457,48 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti);
void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index);
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp);
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp);
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 188185c15b4a..7d7133c5f799 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -20,14 +20,14 @@ struct cgx_evq_entry {
struct cgx_link_event link_event;
};
-#define M(_name, _id, _req_type, _rsp_type) \
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
static struct _req_type __maybe_unused \
-*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
{ \
struct _req_type *req; \
\
req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
- &rvu->mbox_up, devid, sizeof(struct _req_type), \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
sizeof(struct _rsp_type)); \
if (!req) \
return NULL; \
@@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{
- if (cgx_id >= rvu->cgx_cnt)
+ if (cgx_id >= rvu->cgx_cnt_max)
return NULL;
return rvu->cgx_idmap[cgx_id];
@@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
- int cgx_cnt = rvu->cgx_cnt;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
int size, free_pkind;
- if (!cgx_cnt)
+ if (!cgx_cnt_max)
return 0;
- if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
- rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
- /* Initialize offset 0 with an invalid cgx and lmac id */
- rvu->pf2cgxlmac_map[0] = 0xFF;
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
rvu->cgx_mapped_pfs = 0;
- for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
@@ -177,12 +179,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
}
/* Send mbox message to PF */
- msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid);
+ msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
if (!msg)
continue;
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid);
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
@@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work)
} while (1);
}
-static void cgx_lmac_event_handler_init(struct rvu *rvu)
+static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
struct cgx_event_cb cb;
int cgx, lmac, err;
@@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
if (!rvu->cgx_evh_wq) {
dev_err(rvu->dev, "alloc workqueue failed");
- return;
+ return -ENOMEM;
}
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu;
- for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
@@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu)
cgx, lmac);
}
}
+
+ return 0;
}
-void rvu_cgx_wq_destroy(struct rvu *rvu)
+static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
flush_workqueue(rvu->cgx_evh_wq);
@@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu)
}
}
-int rvu_cgx_probe(struct rvu *rvu)
+int rvu_cgx_init(struct rvu *rvu)
{
- int i, err;
+ int cgx, err;
+ void *cgxd;
- /* find available cgx ports */
- rvu->cgx_cnt = cgx_get_cgx_cnt();
- if (!rvu->cgx_cnt) {
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
}
- rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
- GFP_KERNEL);
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap)
return -ENOMEM;
/* Initialize the cgxdata table */
- for (i = 0; i < rvu->cgx_cnt; i++)
- rvu->cgx_idmap[i] = cgx_get_pdata(i);
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
+ rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */
err = rvu_map_cgx_lmac_pf(rvu);
@@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu)
return err;
/* Register for CGX events */
- cgx_lmac_event_handler_init(rvu);
+ err = cgx_lmac_event_handler_init(rvu);
+ if (err)
+ return err;
+
+ /* Ensure event handler registration is completed, before
+ * we turn on the links
+ */
+ mb();
+
+ /* Do link up for all CGX ports */
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ err = cgx_lmac_linkup_start(cgxd);
+ if (err)
+ dev_err(rvu->dev,
+ "Link up process failed to start on cgx %d\n",
+ cgx);
+ }
+
+ return 0;
+}
+
+int rvu_cgx_exit(struct rvu *rvu)
+{
+ int cgx, lmac;
+ void *cgxd;
+
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
+ cgxd = rvu_cgx_pdata(cgx, rvu);
+ if (!cgxd)
+ continue;
+ for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ cgx_lmac_evh_unregister(cgxd, lmac);
+ }
+
+ /* Ensure event handler unregister is completed */
+ mb();
+
+ rvu_cgx_wq_destroy(rvu);
return 0;
}
@@ -303,21 +352,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
-int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
struct cgx_stats_rsp *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
@@ -354,7 +403,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -368,7 +417,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
+int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
{
@@ -387,7 +436,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -407,7 +456,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
@@ -451,21 +500,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
return 0;
}
-int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
return 0;
}
-int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
struct cgx_link_info_msg *rsp)
{
u8 cgx_id, lmac_id;
@@ -500,14 +549,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
lmac_id, en);
}
-int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
return 0;
}
-int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a5ab7eff2301..4a7609fd6dd0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -43,6 +43,19 @@ enum mc_buf_cnt {
MC_BUF_CNT_2048,
};
+enum nix_makr_fmt_indexes {
+ NIX_MARK_CFG_IP_DSCP_RED,
+ NIX_MARK_CFG_IP_DSCP_YELLOW,
+ NIX_MARK_CFG_IP_DSCP_YELLOW_RED,
+ NIX_MARK_CFG_IP_ECN_RED,
+ NIX_MARK_CFG_IP_ECN_YELLOW,
+ NIX_MARK_CFG_IP_ECN_YELLOW_RED,
+ NIX_MARK_CFG_VLAN_DEI_RED,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW,
+ NIX_MARK_CFG_VLAN_DEI_YELLOW_RED,
+ NIX_MARK_CFG_MAX,
+};
+
/* For now considering MC resources needed for broadcast
* pkt replication only. i.e 256 HWVFs + 12 PFs.
*/
@@ -55,6 +68,17 @@ struct mce {
u16 pcifunc;
};
+bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return false;
+ return true;
+}
+
int rvu_get_nixlf_count(struct rvu *rvu)
{
struct rvu_block *block;
@@ -94,11 +118,29 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr)
return NULL;
}
+static void nix_rx_sync(struct rvu *rvu, int blkaddr)
+{
+ int err;
+
+ /*Sync all in flight RX packets to LLC/DRAM */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0));
+ err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true);
+ if (err)
+ dev_err(rvu->dev, "NIX RX software sync failed\n");
+
+ /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA]
+ * bit too early. Hence wait for 50us more.
+ */
+ if (is_rvu_9xxx_A0(rvu))
+ usleep_range(50, 60);
+}
+
static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
int lvl, u16 pcifunc, u16 schq)
{
struct nix_txsch *txsch;
struct nix_hw *nix_hw;
+ u16 map_func;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
@@ -109,12 +151,19 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
if (schq >= txsch->schq.max)
return false;
- spin_lock(&rvu->rsrc_lock);
- if (txsch->pfvf_map[schq] != pcifunc) {
- spin_unlock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
+ map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
+ mutex_unlock(&rvu->rsrc_lock);
+
+ /* For TL1 schq, sharing across VF's of same PF is ok */
+ if (lvl == NIX_TXSCH_LVL_TL1 &&
+ rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
return false;
- }
- spin_unlock(&rvu->rsrc_lock);
+
+ if (lvl != NIX_TXSCH_LVL_TL1 &&
+ map_func != pcifunc)
+ return false;
+
return true;
}
@@ -122,7 +171,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
u8 cgx_id, lmac_id;
- int pkind, pf;
+ int pkind, pf, vf;
int err;
pf = rvu_get_pf(pcifunc);
@@ -148,6 +197,14 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_set_pkind(rvu, pkind, pfvf);
break;
case NIX_INTF_TYPE_LBK:
+ vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
+ pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf);
+ pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) :
+ NIX_CHAN_LBK_CHX(0, vf + 1);
+ pfvf->rx_chan_cnt = 1;
+ pfvf->tx_chan_cnt = 1;
+ rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base, false);
break;
}
@@ -168,14 +225,21 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+ pfvf->maxlen = NIC_HW_MIN_FRS;
+ pfvf->minlen = NIC_HW_MIN_FRS;
return 0;
}
static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int err;
+ pfvf->maxlen = 0;
+ pfvf->minlen = 0;
+ pfvf->rxvlan = false;
+
/* Remove this PF_FUNC from bcast pkt replication list */
err = nix_update_bcast_mce_list(rvu, pcifunc, false);
if (err) {
@@ -234,17 +298,21 @@ static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr,
/* TCP's flags field */
field.layer = NIX_TXLAYER_OL4;
field.offset = 12;
- field.sizem1 = 0; /* not needed */
+ field.sizem1 = 1; /* 2 bytes */
field.alg = NIX_LSOALG_TCP_FLAGS;
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++),
*(u64 *)&field);
}
-static void nix_setup_lso(struct rvu *rvu, int blkaddr)
+static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
{
u64 cfg, idx, fidx = 0;
+ /* Get max HW supported format indices */
+ cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF;
+ nix_hw->lso.total = cfg;
+
/* Enable LSO */
cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG);
/* For TSO, set first and middle segment flags to
@@ -254,7 +322,10 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16);
rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63));
- /* Configure format fields for TCPv4 segmentation offload */
+ /* Setup default static LSO formats
+ *
+ * Configure format fields for TCPv4 segmentation offload
+ */
idx = NIX_LSO_FORMAT_IDX_TSOV4;
nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx);
nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx);
@@ -264,6 +335,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
+ nix_hw->lso.in_use++;
/* Configure format fields for TCPv6 segmentation offload */
idx = NIX_LSO_FORMAT_IDX_TSOV6;
@@ -276,6 +348,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr,
NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL);
}
+ nix_hw->lso.in_use++;
}
static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
@@ -388,9 +461,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
bool ena;
u64 cfg;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (!pfvf->nixlf || blkaddr < 0)
+ if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
block = &hw->block[blkaddr];
@@ -400,9 +472,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
return NIX_AF_ERR_AQ_ENQUEUE;
}
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+
+ /* Skip NIXLF check for broadcast MCE entry init */
+ if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ if (!pfvf->nixlf || nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+ }
switch (req->ctype) {
case NIX_AQ_CTYPE_RQ:
@@ -447,7 +524,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
- req->op != NIX_AQ_INSTOP_WRITE) {
+ ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+ (req->op == NIX_AQ_INSTOP_WRITE &&
+ req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
pcifunc, req->sq.smq))
return NIX_AF_ERR_AQ_ENQUEUE;
@@ -637,25 +716,25 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
-int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
-int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
return nix_lf_hwctx_disable(rvu, req);
}
-int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
struct nix_lf_alloc_req *req,
struct nix_lf_alloc_rsp *rsp)
{
- int nixlf, qints, hwctx_size, err, rc = 0;
+ int nixlf, qints, hwctx_size, intf, err, rc = 0;
struct rvu_hwinfo *hw = rvu->hw;
u16 pcifunc = req->hdr.pcifunc;
struct rvu_block *block;
@@ -676,6 +755,24 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
if (nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
+ /* Check if requested 'NIXLF <=> NPALF' mapping is valid */
+ if (req->npa_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->npa_func == RVU_DEFAULT_PF_FUNC)
+ req->npa_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA))
+ return NIX_AF_INVAL_NPA_PF_FUNC;
+ }
+
+ /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */
+ if (req->sso_func) {
+ /* If default, use 'this' NIXLF's PFFUNC */
+ if (req->sso_func == RVU_DEFAULT_PF_FUNC)
+ req->sso_func = pcifunc;
+ if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO))
+ return NIX_AF_INVAL_SSO_PF_FUNC;
+ }
+
/* If RSS is being enabled, check if requested config is valid.
* RSS table size should be power of two, otherwise
* RSS_GRP::OFFSET + adder might go beyond that group or
@@ -777,21 +874,20 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
(u64)pfvf->nix_qints_ctx->iova);
rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36));
+ /* Setup VLANX TPID's.
+ * Use VLAN1 for 802.1Q
+ * and VLAN0 for 802.1AD.
+ */
+ cfg = (0x8100ULL << 16) | 0x88A8ULL;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
/* Enable LMTST for this NIX LF */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0));
- /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC
- * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's
- * PCIFUNC itself.
- */
- if (req->npa_func == RVU_DEFAULT_PF_FUNC)
- cfg = pcifunc;
- else
+ /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */
+ if (req->npa_func)
cfg = req->npa_func;
-
- if (req->sso_func == RVU_DEFAULT_PF_FUNC)
- cfg |= (u64)pcifunc << 16;
- else
+ if (req->sso_func)
cfg |= (u64)req->sso_func << 16;
cfg |= (u64)req->xqe_sz << 33;
@@ -800,10 +896,14 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu,
/* Config Rx pkt length, csum checks and apad enable / disable */
rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg);
- err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf);
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+ err = nix_interface_init(rvu, pcifunc, intf, nixlf);
if (err)
goto free_mem;
+ /* Disable NPC entries as NIXLF's contexts are not initialized yet */
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+
goto exit;
free_mem:
@@ -823,10 +923,18 @@ exit:
rsp->tx_chan_cnt = pfvf->tx_chan_cnt;
rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4;
rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6;
+ /* Get HW supported stat count */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ rsp->lf_rx_stats = ((cfg >> 32) & 0xFF);
+ rsp->lf_tx_stats = ((cfg >> 24) & 0xFF);
+ /* Get count of CQ IRQs and error IRQs supported per LF */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
+ rsp->qints = ((cfg >> 12) & 0xFFF);
+ rsp->cints = ((cfg >> 24) & 0xFFF);
return rc;
}
-int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -860,6 +968,41 @@ int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu,
+ struct nix_mark_format_cfg *req,
+ struct nix_mark_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, rc;
+ u32 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ cfg = (((u32)req->offset & 0x7) << 16) |
+ (((u32)req->y_mask & 0xF) << 12) |
+ (((u32)req->y_val & 0xF) << 8) |
+ (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF);
+
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg);
+ if (rc < 0) {
+ dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)",
+ rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK);
+ return NIX_AF_ERR_MARK_CFG_FAIL;
+ }
+
+ rsp->mark_format_idx = rc;
+ return 0;
+}
+
/* Disable shaping of pkts by a scheduler queue
* at a given scheduler level.
*/
@@ -918,7 +1061,74 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
}
-int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
+static int
+rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
+ u16 *schq_list, u16 *schq_cnt)
+{
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ u8 cgx_id, lmac_id;
+ u16 schq_base;
+ u32 *pfvf_map;
+ int pf, intf;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -ENODEV;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+ pfvf_map = txsch->pfvf_map;
+ pf = rvu_get_pf(pcifunc);
+
+ /* static allocation as two TL1's per link */
+ intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+ switch (intf) {
+ case NIX_INTF_TYPE_CGX:
+ rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+ schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
+ break;
+ case NIX_INTF_TYPE_LBK:
+ schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (schq_base + 1 > txsch->schq.max)
+ return -ENODEV;
+
+ /* init pfvf_map as we store flags */
+ if (pfvf_map[schq_base] == U32_MAX) {
+ pfvf_map[schq_base] =
+ TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+ pfvf_map[schq_base + 1] =
+ TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+
+ /* Onetime reset for TL1 */
+ nix_reset_tx_linkcfg(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base);
+ nix_reset_tx_shaping(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base);
+
+ nix_reset_tx_linkcfg(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base + 1);
+ nix_reset_tx_shaping(rvu, blkaddr,
+ NIX_TXSCH_LVL_TL1, schq_base + 1);
+ }
+
+ if (schq_list && schq_cnt) {
+ schq_list[0] = schq_base;
+ schq_list[1] = schq_base + 1;
+ *schq_cnt = 2;
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
struct nix_txsch_alloc_req *req,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -928,6 +1138,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
struct rvu_pfvf *pfvf;
struct nix_hw *nix_hw;
int blkaddr, rc = 0;
+ u32 *pfvf_map;
u16 schq;
pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -939,17 +1150,27 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
if (!nix_hw)
return -EINVAL;
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ if (!req_schq)
+ continue;
/* There are only 28 TL1s */
- if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
- goto err;
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ if (req->schq_contig[lvl] ||
+ req->schq[lvl] > 2 ||
+ rvu_get_tl1_schqs(rvu, blkaddr,
+ pcifunc, NULL, NULL))
+ goto err;
+ continue;
+ }
/* Check if request is valid */
- if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+ if (req_schq > MAX_TXSCHQ_PER_FUNC)
goto err;
/* If contiguous queues are needed, check for availability */
@@ -965,16 +1186,32 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl];
rsp->schq_contig[lvl] = req->schq_contig[lvl];
+ pfvf_map = txsch->pfvf_map;
rsp->schq[lvl] = req->schq[lvl];
- schq = 0;
+ if (!req->schq[lvl] && !req->schq_contig[lvl])
+ continue;
+
+ /* Handle TL1 specially as it is
+ * allocation is restricted to 2 TL1's
+ * per link
+ */
+
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ rsp->schq_contig[lvl] = 0;
+ rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
+ &rsp->schq_list[lvl][0],
+ &rsp->schq[lvl]);
+ continue;
+ }
+
/* Alloc contiguous queues first */
if (req->schq_contig[lvl]) {
schq = rvu_alloc_rsrc_contig(&txsch->schq,
req->schq_contig[lvl]);
for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
- txsch->pfvf_map[schq] = pcifunc;
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_contig_list[lvl][idx] = schq;
@@ -985,7 +1222,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
/* Alloc non-contiguous queues */
for (idx = 0; idx < req->schq[lvl]; idx++) {
schq = rvu_alloc_rsrc(&txsch->schq);
- txsch->pfvf_map[schq] = pcifunc;
+ pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
rsp->schq_list[lvl][idx] = schq;
@@ -995,7 +1232,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
exit:
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
return rc;
}
@@ -1020,14 +1257,14 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return NIX_AF_ERR_AF_LF_INVALID;
/* Disable TL2/3 queue links before SMQ flush*/
- spin_lock(&rvu->rsrc_lock);
+ mutex_lock(&rvu->rsrc_lock);
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
continue;
txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
- if (txsch->pfvf_map[schq] != pcifunc)
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
}
@@ -1036,7 +1273,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Flush SMQs */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
for (schq = 0; schq < txsch->schq.max; schq++) {
- if (txsch->pfvf_map[schq] != pcifunc)
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
/* Do SMQ flush and set enqueue xoff */
@@ -1054,15 +1291,21 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
/* Now free scheduler queues to free pool */
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ /* Free all SCHQ's except TL1 as
+ * TL1 is shared across all VF's for a RVU PF
+ */
+ if (lvl == NIX_TXSCH_LVL_TL1)
+ continue;
+
txsch = &nix_hw->txsch[lvl];
for (schq = 0; schq < txsch->schq.max; schq++) {
- if (txsch->pfvf_map[schq] != pcifunc)
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
continue;
rvu_free_rsrc(&txsch->schq, schq);
txsch->pfvf_map[schq] = 0;
}
}
- spin_unlock(&rvu->rsrc_lock);
+ mutex_unlock(&rvu->rsrc_lock);
/* Sync cached info for this LF in NDC-TX to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
@@ -1073,11 +1316,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return 0;
}
-int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
+static int nix_txschq_free_one(struct rvu *rvu,
+ struct nix_txsch_free_req *req)
+{
+ int lvl, schq, nixlf, blkaddr, rc;
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_txsch *txsch;
+ struct nix_hw *nix_hw;
+ u32 *pfvf_map;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lvl = req->schq_lvl;
+ schq = req->schq;
+ txsch = &nix_hw->txsch[lvl];
+
+ /* Don't allow freeing TL1 */
+ if (lvl > NIX_TXSCH_LVL_TL2 ||
+ schq >= txsch->schq.max)
+ goto err;
+
+ pfvf_map = txsch->pfvf_map;
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) {
+ mutex_unlock(&rvu->rsrc_lock);
+ goto err;
+ }
+
+ /* Flush if it is a SMQ. Onus of disabling
+ * TL2/3 queue links before SMQ flush is on user
+ */
+ if (lvl == NIX_TXSCH_LVL_SMQ) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ /* Do SMQ flush and set enqueue xoff */
+ cfg |= BIT_ULL(50) | BIT_ULL(49);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+
+ /* Wait for flush to complete */
+ rc = rvu_poll_reg(rvu, blkaddr,
+ NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true);
+ if (rc) {
+ dev_err(rvu->dev,
+ "NIXLF%d: SMQ%d flush failed\n", nixlf, schq);
+ }
+ }
+
+ /* Free the resource */
+ rvu_free_rsrc(&txsch->schq, schq);
+ txsch->pfvf_map[schq] = 0;
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+err:
+ return NIX_AF_ERR_TLX_INVALID;
+}
+
+int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu,
struct nix_txsch_free_req *req,
struct msg_rsp *rsp)
{
- return nix_txschq_free(rvu, req->hdr.pcifunc);
+ if (req->flags & TXSCHQ_FREE_ALL)
+ return nix_txschq_free(rvu, req->hdr.pcifunc);
+ else
+ return nix_txschq_free_one(rvu, req);
}
static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
@@ -1118,16 +1431,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
return true;
}
-int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
+static int
+nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+{
+ u16 schq_list[2], schq_cnt, schq;
+ int blkaddr, idx, err = 0;
+ u16 map_func, map_flags;
+ struct nix_hw *nix_hw;
+ u64 reg, regval;
+ u32 *pfvf_map;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ err = rvu_get_tl1_schqs(rvu, blkaddr,
+ pcifunc, schq_list, &schq_cnt);
+ if (err)
+ goto unlock;
+
+ for (idx = 0; idx < schq_cnt; idx++) {
+ schq = schq_list[idx];
+ map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+ map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+ /* check if config is already done or this is pf */
+ if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
+ continue;
+
+ /* default configuration */
+ reg = NIX_AF_TL1X_TOPOLOGY(schq);
+ regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ rvu_write64(rvu, blkaddr, reg, regval);
+ reg = NIX_AF_TL1X_SCHEDULE(schq);
+ regval = TXSCH_TL1_DFLT_RR_QTM;
+ rvu_write64(rvu, blkaddr, reg, regval);
+ reg = NIX_AF_TL1X_CIR(schq);
+ regval = 0;
+ rvu_write64(rvu, blkaddr, reg, regval);
+
+ map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+ pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ }
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+ return err;
+}
+
+int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
struct nix_txschq_config *req,
struct msg_rsp *rsp)
{
+ u16 schq, pcifunc = req->hdr.pcifunc;
struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
u64 reg, regval, schq_regbase;
struct nix_txsch *txsch;
+ u16 map_func, map_flags;
struct nix_hw *nix_hw;
int blkaddr, idx, err;
+ u32 *pfvf_map;
int nixlf;
if (req->lvl >= NIX_TXSCH_LVL_CNT ||
@@ -1147,6 +1517,16 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
return NIX_AF_ERR_AF_LF_INVALID;
txsch = &nix_hw->txsch[req->lvl];
+ pfvf_map = txsch->pfvf_map;
+
+ /* VF is only allowed to trigger
+ * setting default cfg on TL1
+ */
+ if (pcifunc & RVU_PFVF_FUNC_MASK &&
+ req->lvl == NIX_TXSCH_LVL_TL1) {
+ return nix_tl1_default_cfg(rvu, pcifunc);
+ }
+
for (idx = 0; idx < req->num_regs; idx++) {
reg = req->reg[idx];
regval = req->regval[idx];
@@ -1164,6 +1544,21 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
regval |= ((u64)nixlf << 24);
}
+ /* Mark config as done for TL1 by PF */
+ if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+ schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+ schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+ map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+ map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+ pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+ mutex_unlock(&rvu->rsrc_lock);
+ }
+
rvu_write64(rvu, blkaddr, reg, regval);
/* Check for SMQ flush, if so, poll for its completion */
@@ -1181,35 +1576,22 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr,
struct nix_vtag_config *req)
{
- u64 regval = 0;
-
-#define NIX_VTAGTYPE_MAX 0x8ull
-#define NIX_VTAGSIZE_MASK 0x7ull
-#define NIX_VTAGSTRIP_CAP_MASK 0x30ull
+ u64 regval = req->vtag_size;
- if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX ||
- req->vtag_size > VTAGSIZE_T8)
+ if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8)
return -EINVAL;
- regval = rvu_read64(rvu, blkaddr,
- NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type));
-
- if (req->rx.strip_vtag && req->rx.capture_vtag)
- regval |= BIT_ULL(4) | BIT_ULL(5);
- else if (req->rx.strip_vtag)
+ if (req->rx.capture_vtag)
+ regval |= BIT_ULL(5);
+ if (req->rx.strip_vtag)
regval |= BIT_ULL(4);
- else
- regval &= ~(BIT_ULL(4) | BIT_ULL(5));
-
- regval &= ~NIX_VTAGSIZE_MASK;
- regval |= req->vtag_size & NIX_VTAGSIZE_MASK;
rvu_write64(rvu, blkaddr,
NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval);
return 0;
}
-int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu,
+int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu,
struct nix_vtag_config *req,
struct msg_rsp *rsp)
{
@@ -1243,7 +1625,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op,
struct nix_aq_enq_req aq_req;
int err;
- aq_req.hdr.pcifunc = pcifunc;
+ aq_req.hdr.pcifunc = 0;
aq_req.ctype = NIX_AQ_CTYPE_MCE;
aq_req.op = op;
aq_req.qidx = mce;
@@ -1294,7 +1676,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
/* Add a new one to the list, at the tail */
- mce = kzalloc(sizeof(*mce), GFP_ATOMIC);
+ mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce)
return -ENOMEM;
mce->idx = idx;
@@ -1317,6 +1699,10 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
struct rvu_pfvf *pfvf;
int blkaddr;
+ /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
+ if (is_afvf(pcifunc))
+ return 0;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return 0;
@@ -1340,7 +1726,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
- spin_lock(&mcast->mce_lock);
+ mutex_lock(&mcast->mce_lock);
err = nix_update_mce_list(mce_list, pcifunc, idx, add);
if (err)
@@ -1370,7 +1756,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
}
end:
- spin_unlock(&mcast->mce_lock);
+ mutex_unlock(&mcast->mce_lock);
return err;
}
@@ -1455,7 +1841,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
BIT_ULL(63) | (mcast->replay_pkind << 24) |
BIT_ULL(20) | MC_BUF_CNT);
- spin_lock_init(&mcast->mce_lock);
+ mutex_init(&mcast->mce_lock);
return nix_setup_bcast_tables(rvu, nix_hw);
}
@@ -1499,14 +1885,66 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
* PF/VF pcifunc mapping info.
*/
txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
- sizeof(u16), GFP_KERNEL);
+ sizeof(u32), GFP_KERNEL);
if (!txsch->pfvf_map)
return -ENOMEM;
+ memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
}
return 0;
}
-int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
+int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr, u32 cfg)
+{
+ int fmt_idx;
+
+ for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) {
+ if (nix_hw->mark_format.cfg[fmt_idx] == cfg)
+ return fmt_idx;
+ }
+ if (fmt_idx >= nix_hw->mark_format.total)
+ return -ERANGE;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg);
+ nix_hw->mark_format.cfg[fmt_idx] = cfg;
+ nix_hw->mark_format.in_use++;
+ return fmt_idx;
+}
+
+static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
+ int blkaddr)
+{
+ u64 cfgs[] = {
+ [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200,
+ [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203,
+ [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c,
+ [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00,
+ [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c,
+ [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800,
+ [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808,
+ };
+ int i, rc;
+ u64 total;
+
+ total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8;
+ nix_hw->mark_format.total = (u8)total;
+ nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32),
+ GFP_KERNEL);
+ if (!nix_hw->mark_format.cfg)
+ return -ENOMEM;
+ for (i = 0; i < NIX_MARK_CFG_MAX; i++) {
+ rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]);
+ if (rc < 0)
+ dev_err(rvu->dev, "Err %d in setup mark format %d\n",
+ i, rc);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1537,190 +1975,287 @@ int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req,
}
/* Returns the ALG index to be set into NPC_RX_ACTION */
-static int get_flowkey_alg_idx(u32 flow_cfg)
-{
- u32 ip_cfg;
-
- flow_cfg &= ~FLOW_KEY_TYPE_PORT;
- ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
- if (flow_cfg == ip_cfg)
- return FLOW_KEY_ALG_IP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP))
- return FLOW_KEY_ALG_TCP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP))
- return FLOW_KEY_ALG_UDP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_SCTP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP))
- return FLOW_KEY_ALG_TCP_UDP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_TCP_SCTP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_UDP_SCTP;
- else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP |
- FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP))
- return FLOW_KEY_ALG_TCP_UDP_SCTP;
-
- return FLOW_KEY_ALG_PORT;
-}
-
-int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu,
- struct nix_rss_flowkey_cfg *req,
- struct msg_rsp *rsp)
+static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
{
- struct rvu_hwinfo *hw = rvu->hw;
- u16 pcifunc = req->hdr.pcifunc;
- int alg_idx, nixlf, blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
-
- nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
- if (nixlf < 0)
- return NIX_AF_ERR_AF_LF_INVALID;
+ int i;
- alg_idx = get_flowkey_alg_idx(req->flowkey_cfg);
+ /* Scan over exiting algo entries to find a match */
+ for (i = 0; i < nix_hw->flowkey.in_use; i++)
+ if (nix_hw->flowkey.flowkey[i] == flow_cfg)
+ return i;
- rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
- alg_idx, req->mcam_index);
- return 0;
+ return -ERANGE;
}
-static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
+static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
{
- struct nix_rx_flowkey_alg *field = NULL;
- int idx, key_type;
+ int idx, nr_field, key_off, field_marker, keyoff_marker;
+ int max_key_off, max_bit_pos, group_member;
+ struct nix_rx_flowkey_alg *field;
+ struct nix_rx_flowkey_alg tmp;
+ u32 key_type, valid_key;
if (!alg)
- return;
+ return -EINVAL;
- /* FIELD0: IPv4
- * FIELD1: IPv6
- * FIELD2: TCP/UDP/SCTP/ALL
- * FIELD3: Unused
- * FIELD4: Unused
- *
- * Each of the 32 possible flow key algorithm definitions should
+#define FIELDS_PER_ALG 5
+#define MAX_KEY_OFF 40
+ /* Clear all fields */
+ memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG);
+
+ /* Each of the 32 possible flow key algorithm definitions should
* fall into above incremental config (except ALG0). Otherwise a
* single NPC MCAM entry is not sufficient for supporting RSS.
*
* If a different definition or combination needed then NPC MCAM
* has to be programmed to filter such pkts and it's action should
* point to this definition to calculate flowtag or hash.
+ *
+ * The `for loop` goes over _all_ protocol field and the following
+ * variables depicts the state machine forward progress logic.
+ *
+ * keyoff_marker - Enabled when hash byte length needs to be accounted
+ * in field->key_offset update.
+ * field_marker - Enabled when a new field needs to be selected.
+ * group_member - Enabled when protocol is part of a group.
*/
- for (idx = 0; idx < 32; idx++) {
- key_type = flow_cfg & BIT_ULL(idx);
- if (!key_type)
- continue;
+
+ keyoff_marker = 0; max_key_off = 0; group_member = 0;
+ nr_field = 0; key_off = 0; field_marker = 1;
+ field = &tmp; max_bit_pos = fls(flow_cfg);
+ for (idx = 0;
+ idx < max_bit_pos && nr_field < FIELDS_PER_ALG &&
+ key_off < MAX_KEY_OFF; idx++) {
+ key_type = BIT(idx);
+ valid_key = flow_cfg & key_type;
+ /* Found a field marker, reset the field values */
+ if (field_marker)
+ memset(&tmp, 0, sizeof(tmp));
+
switch (key_type) {
- case FLOW_KEY_TYPE_PORT:
- field = &alg[0];
+ case NIX_FLOW_KEY_TYPE_PORT:
field->sel_chan = true;
/* This should be set to 1, when SEL_CHAN is set */
field->bytesm1 = 1;
+ field_marker = true;
+ keyoff_marker = true;
break;
- case FLOW_KEY_TYPE_IPV4:
- field = &alg[0];
+ case NIX_FLOW_KEY_TYPE_IPV4:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP;
field->hdr_offset = 12; /* SIP offset */
field->bytesm1 = 7; /* SIP + DIP, 8 bytes */
field->ltype_mask = 0xF; /* Match only IPv4 */
+ field_marker = true;
+ keyoff_marker = false;
break;
- case FLOW_KEY_TYPE_IPV6:
- field = &alg[1];
+ case NIX_FLOW_KEY_TYPE_IPV6:
field->lid = NPC_LID_LC;
field->ltype_match = NPC_LT_LC_IP6;
field->hdr_offset = 8; /* SIP offset */
field->bytesm1 = 31; /* SIP + DIP, 32 bytes */
field->ltype_mask = 0xF; /* Match only IPv6 */
+ field_marker = true;
+ keyoff_marker = true;
break;
- case FLOW_KEY_TYPE_TCP:
- case FLOW_KEY_TYPE_UDP:
- case FLOW_KEY_TYPE_SCTP:
- field = &alg[2];
+ case NIX_FLOW_KEY_TYPE_TCP:
+ case NIX_FLOW_KEY_TYPE_UDP:
+ case NIX_FLOW_KEY_TYPE_SCTP:
field->lid = NPC_LID_LD;
field->bytesm1 = 3; /* Sport + Dport, 4 bytes */
- if (key_type == FLOW_KEY_TYPE_TCP)
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) {
field->ltype_match |= NPC_LT_LD_TCP;
- else if (key_type == FLOW_KEY_TYPE_UDP)
+ group_member = true;
+ } else if (key_type == NIX_FLOW_KEY_TYPE_UDP &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_UDP;
- else if (key_type == FLOW_KEY_TYPE_SCTP)
+ group_member = true;
+ } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP &&
+ valid_key) {
field->ltype_match |= NPC_LT_LD_SCTP;
- field->key_offset = 32; /* After IPv4/v6 SIP, DIP */
+ group_member = true;
+ }
field->ltype_mask = ~field->ltype_match;
+ if (key_type == NIX_FLOW_KEY_TYPE_SCTP) {
+ /* Handle the case where any of the group item
+ * is enabled in the group but not the final one
+ */
+ if (group_member) {
+ valid_key = true;
+ group_member = false;
+ }
+ field_marker = true;
+ keyoff_marker = true;
+ } else {
+ field_marker = false;
+ keyoff_marker = false;
+ }
break;
}
- if (field)
- field->ena = 1;
- field = NULL;
+ field->ena = 1;
+
+ /* Found a valid flow key type */
+ if (valid_key) {
+ field->key_offset = key_off;
+ memcpy(&alg[nr_field], field, sizeof(*field));
+ max_key_off = max(max_key_off, field->bytesm1 + 1);
+
+ /* Found a field marker, get the next field */
+ if (field_marker)
+ nr_field++;
+ }
+
+ /* Found a keyoff marker, update the new key_off */
+ if (keyoff_marker) {
+ key_off += max_key_off;
+ max_key_off = 0;
+ }
}
+ /* Processed all the flow key types */
+ if (idx == max_bit_pos && key_off <= MAX_KEY_OFF)
+ return 0;
+ else
+ return NIX_AF_ERR_RSS_NOSPC_FIELD;
}
-static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg)
{
-#define FIELDS_PER_ALG 5
- u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG];
- u32 flowkey_cfg, minkey_cfg;
- int alg, fid;
+ u64 field[FIELDS_PER_ALG];
+ struct nix_hw *hw;
+ int fid, rc;
- memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG);
+ hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!hw)
+ return -EINVAL;
- /* Only incoming channel number */
- flowkey_cfg = FLOW_KEY_TYPE_PORT;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg);
+ /* No room to add new flow hash algoritham */
+ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX)
+ return NIX_AF_ERR_RSS_NOSPC_ALGO;
- /* For a incoming pkt if none of the fields match then flowkey
- * will be zero, hence tag generated will also be zero.
- * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will
- * be used to queue the packet.
- */
+ /* Generate algo fields for the given flow_cfg */
+ rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg);
+ if (rc)
+ return rc;
+
+ /* Update ALGX_FIELDX register with generated fields */
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use,
+ fid), field[fid]);
+
+ /* Store the flow_cfg for futher lookup */
+ rc = hw->flowkey.in_use;
+ hw->flowkey.flowkey[rc] = flow_cfg;
+ hw->flowkey.in_use++;
+
+ return rc;
+}
+
+int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu,
+ struct nix_rss_flowkey_cfg *req,
+ struct nix_rss_flowkey_cfg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int alg_idx, nixlf, blkaddr;
+ struct nix_hw *nix_hw;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg);
+ /* Failed to get algo index from the exiting list, reserve new */
+ if (alg_idx < 0) {
+ alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr,
+ req->flowkey_cfg);
+ if (alg_idx < 0)
+ return alg_idx;
+ }
+ rsp->alg_idx = alg_idx;
+ rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group,
+ alg_idx, req->mcam_index);
+ return 0;
+}
+
+static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr)
+{
+ u32 flowkey_cfg, minkey_cfg;
+ int alg, fid, rc;
+
+ /* Disable all flow key algx fieldx */
+ for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) {
+ for (fid = 0; fid < FIELDS_PER_ALG; fid++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
+ 0);
+ }
/* IPv4/IPv6 SIP/DIPs */
- flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg);
+ flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
minkey_cfg = flowkey_cfg;
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP |
+ NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
/* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */
- flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP |
- FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP;
- set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP],
- flowkey_cfg);
+ flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP |
+ NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP;
+ rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg);
+ if (rc < 0)
+ return rc;
- for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) {
- for (fid = 0; fid < FIELDS_PER_ALG; fid++)
- rvu_write64(rvu, blkaddr,
- NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid),
- field[alg][fid]);
- }
+ return 0;
}
-int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
+int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
struct nix_set_mac_addr *req,
struct msg_rsp *rsp)
{
@@ -1742,10 +2277,13 @@ int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu,
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
return 0;
}
-int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
+int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
bool allmulti = false, disable_promisc = false;
@@ -1775,9 +2313,303 @@ int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req,
else
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, allmulti);
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+
+ return 0;
+}
+
+static void nix_find_link_frs(struct rvu *rvu,
+ struct nix_frs_cfg *req, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ struct rvu_pfvf *pfvf;
+ int maxlen, minlen;
+ int numvfs, hwvf;
+ int vf;
+
+ /* Update with requester's min/max lengths */
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ pfvf->maxlen = req->maxlen;
+ if (req->update_minlen)
+ pfvf->minlen = req->minlen;
+
+ maxlen = req->maxlen;
+ minlen = req->update_minlen ? req->minlen : 0;
+
+ /* Get this PF's numVFs and starting hwvf */
+ rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf);
+
+ /* For each VF, compare requested max/minlen */
+ for (vf = 0; vf < numvfs; vf++) {
+ pfvf = &rvu->hwvf[hwvf + vf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+ }
+
+ /* Compare requested max/minlen with PF's max/minlen */
+ pfvf = &rvu->pf[pf];
+ if (pfvf->maxlen > maxlen)
+ maxlen = pfvf->maxlen;
+ if (req->update_minlen &&
+ pfvf->minlen && pfvf->minlen < minlen)
+ minlen = pfvf->minlen;
+
+ /* Update the request with max/min PF's and it's VF's max/min */
+ req->maxlen = maxlen;
+ if (req->update_minlen)
+ req->minlen = minlen;
+}
+
+int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int pf = rvu_get_pf(pcifunc);
+ int blkaddr, schq, link = -1;
+ struct nix_txsch *txsch;
+ u64 cfg, lmac_fifo_len;
+ struct nix_hw *nix_hw;
+ u8 cgx = 0, lmac = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
+ return NIX_AF_ERR_FRS_INVALID;
+
+ /* Check if requester wants to update SMQ's */
+ if (!req->update_smq)
+ goto rx_frscfg;
+
+ /* Update min/maxlen in each of the SMQ attached to this PF/VF */
+ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
+ mutex_lock(&rvu->rsrc_lock);
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
+ continue;
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
+ cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);
+ if (req->update_minlen)
+ cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
+ rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+
+rx_frscfg:
+ /* Check if config is for SDP link */
+ if (req->sdp_link) {
+ if (!hw->sdp_links)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+ link = hw->cgx_links + hw->lbk_links;
+ goto linkcfg;
+ }
+
+ /* Check if the request is from CGX mapped RVU PF */
+ if (is_pf_cgxmapped(rvu, pf)) {
+ /* Get CGX and LMAC to which this PF is mapped and find link */
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac);
+ link = (cgx * hw->lmac_per_cgx) + lmac;
+ } else if (pf == 0) {
+ /* For VFs of PF0 ingress is LBK port, so config LBK link */
+ link = hw->cgx_links;
+ }
+
+ if (link < 0)
+ return NIX_AF_ERR_RX_LINK_INVALID;
+
+ nix_find_link_frs(rvu, req, pcifunc);
+
+linkcfg:
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link));
+ cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16);
+ if (req->update_minlen)
+ cfg = (cfg & ~0xFFFFULL) | req->minlen;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg);
+
+ if (req->sdp_link || pf == 0)
+ return 0;
+
+ /* Update transmit credits for CGX links */
+ lmac_fifo_len =
+ CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
+ cfg &= ~(0xFFFFFULL << 12);
+ cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg);
+ rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam_alloc_entry_req alloc_req = { };
+ struct npc_mcam_alloc_entry_rsp alloc_rsp = { };
+ struct npc_mcam_free_entry_req free_req = { };
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, nixlf, err;
+ struct rvu_pfvf *pfvf;
+
+ /* LBK VFs do not have separate MCAM UCAST entry hence
+ * skip allocating rxvlan for them
+ */
+ if (is_afvf(pcifunc))
+ return 0;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (pfvf->rxvlan)
+ return 0;
+
+ /* alloc new mcam entry */
+ alloc_req.hdr.pcifunc = pcifunc;
+ alloc_req.count = 1;
+
+ err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req,
+ &alloc_rsp);
+ if (err)
+ return err;
+
+ /* update entry to enable rxvlan offload */
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0) {
+ err = NIX_AF_ERR_AF_LF_INVALID;
+ goto free_entry;
+ }
+
+ nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0) {
+ err = NIX_AF_ERR_AF_LF_INVALID;
+ goto free_entry;
+ }
+
+ pfvf->rxvlan_index = alloc_rsp.entry_list[0];
+ /* all it means is that rxvlan_index is valid */
+ pfvf->rxvlan = true;
+
+ err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+ if (err)
+ goto free_entry;
+
+ return 0;
+free_entry:
+ free_req.hdr.pcifunc = pcifunc;
+ free_req.entry = alloc_rsp.entry_list[0];
+ rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp);
+ pfvf->rxvlan = false;
+ return err;
+}
+
+int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ struct rvu_pfvf *pfvf;
+ int nixlf, blkaddr;
+ u64 cfg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf));
+ /* Set the interface configuration */
+ if (req->len_verify & BIT(0))
+ cfg |= BIT_ULL(41);
+ else
+ cfg &= ~BIT_ULL(41);
+
+ if (req->len_verify & BIT(1))
+ cfg |= BIT_ULL(40);
+ else
+ cfg &= ~BIT_ULL(40);
+
+ if (req->csum_verify & BIT(0))
+ cfg |= BIT_ULL(37);
+ else
+ cfg &= ~BIT_ULL(37);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg);
+
return 0;
}
+static void nix_link_config(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int cgx, lmac_cnt, slink, link;
+ u64 tx_credits;
+
+ /* Set default min/max packet lengths allowed on NIX Rx links.
+ *
+ * With HW reset minlen value of 60byte, HW will treat ARP pkts
+ * as undersize and report them to SW as error pkts, hence
+ * setting it to 40 bytes.
+ */
+ for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ if (hw->sdp_links) {
+ link = hw->cgx_links + hw->lbk_links;
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ }
+
+ /* Set credits for Tx links assuming max packet length allowed.
+ * This will be reconfigured based on MTU set for PF/VF.
+ */
+ for (cgx = 0; cgx < hw->cgx; cgx++) {
+ lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ slink = cgx * hw->lmac_per_cgx;
+ for (link = slink; link < (slink + lmac_cnt); link++) {
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link),
+ tx_credits);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_EXPR_CREDIT(link),
+ tx_credits);
+ }
+ }
+
+ /* Set Tx credits for LBK link */
+ slink = hw->cgx_links;
+ for (link = slink; link < (slink + hw->lbk_links); link++) {
+ tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ /* Enable credits and set credit pkt count to max allowed */
+ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits);
+ }
+}
+
static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
{
int idx, err;
@@ -1796,8 +2628,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr)
status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS);
/* Check if CGX devices are ready */
- for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) {
- if (status & (BIT_ULL(16 + idx)))
+ for (idx = 0; idx < rvu->cgx_cnt_max; idx++) {
+ /* Skip when cgx port is not available */
+ if (!rvu_cgx_pdata(idx, rvu) ||
+ (status & (BIT_ULL(16 + idx))))
continue;
dev_err(rvu->dev,
"CGX%d didn't respond to NIX X2P calibration\n", idx);
@@ -1830,10 +2664,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
/* Set admin queue endianness */
cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG);
#ifdef __BIG_ENDIAN
- cfg |= BIT_ULL(1);
+ cfg |= BIT_ULL(8);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#else
- cfg &= ~BIT_ULL(1);
+ cfg &= ~BIT_ULL(8);
rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg);
#endif
@@ -1870,6 +2704,14 @@ int rvu_nix_init(struct rvu *rvu)
return 0;
block = &hw->block[blkaddr];
+ /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt
+ * internal state when conditional clocks are turned off.
+ * Hence enable them.
+ */
+ if (is_rvu_9xxx_A0(rvu))
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG,
+ rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL);
+
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -1891,9 +2733,6 @@ int rvu_nix_init(struct rvu *rvu)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
- /* Configure segmentation offload formats */
- nix_setup_lso(rvu, blkaddr);
-
if (blkaddr == BLKADDR_NIX0) {
hw->nix0 = devm_kzalloc(rvu->dev,
sizeof(struct nix_hw), GFP_KERNEL);
@@ -1904,24 +2743,51 @@ int rvu_nix_init(struct rvu *rvu)
if (err)
return err;
+ err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr);
+ if (err)
+ return err;
+
err = nix_setup_mcast(rvu, hw->nix0, blkaddr);
if (err)
return err;
- /* Config Outer L2, IP, TCP and UDP's NPC layer info.
+ /* Configure segmentation offload formats */
+ nix_setup_lso(rvu, hw->nix0, blkaddr);
+
+ /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info.
* This helps HW protocol checker to identify headers
* and validate length and checksums.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
(NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
+ (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
+ (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
+ (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
+ (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
+ (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) |
+ 0x0F);
+
+ err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ if (err)
+ return err;
- nix_rx_flowkey_alg_cfg(rvu, blkaddr);
+ /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */
+ nix_link_config(rvu, blkaddr);
}
return 0;
}
@@ -1955,5 +2821,139 @@ void rvu_nix_freemem(struct rvu *rvu)
mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf);
+ mutex_destroy(&mcast->mce_lock);
+ }
+}
+
+static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (*nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return err;
+
+ rvu_npc_enable_default_entries(rvu, pcifunc, nixlf);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int nixlf, err;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf);
+ if (err)
+ return err;
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
+ return 0;
+}
+
+void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+ int err;
+
+ ctx_req.hdr.pcifunc = pcifunc;
+
+ /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */
+ nix_interface_deinit(rvu, pcifunc, nixlf);
+ nix_rx_sync(rvu, blkaddr);
+ nix_txschq_free(rvu, pcifunc);
+
+ if (pfvf->sq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_SQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "SQ ctx disable failed\n");
+ }
+
+ if (pfvf->rq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_RQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "RQ ctx disable failed\n");
+ }
+
+ if (pfvf->cq_ctx) {
+ ctx_req.ctype = NIX_AQ_CTYPE_CQ;
+ err = nix_lf_hwctx_disable(rvu, &ctx_req);
+ if (err)
+ dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+
+ nix_ctx_free(rvu, pfvf);
+}
+
+int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
+ struct nix_lso_format_cfg *req,
+ struct nix_lso_format_cfg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, idx, f;
+ u64 reg;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return -EINVAL;
+
+ /* Find existing matching LSO format, if any */
+ for (idx = 0; idx < nix_hw->lso.in_use; idx++) {
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++) {
+ reg = rvu_read64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(idx, f));
+ if (req->fields[f] != (reg & req->field_mask))
+ break;
+ }
+
+ if (f == NIX_LSO_FIELD_MAX)
+ break;
+ }
+
+ if (idx < nix_hw->lso.in_use) {
+ /* Match found */
+ rsp->lso_format_idx = idx;
+ return 0;
+ }
+
+ if (nix_hw->lso.in_use == nix_hw->lso.total)
+ return NIX_AF_ERR_LSO_CFG_FAIL;
+
+ rsp->lso_format_idx = nix_hw->lso.in_use++;
+
+ for (f = 0; f < NIX_LSO_FIELD_MAX; f++)
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f),
+ req->fields[f]);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
index 7531fdc54fa1..c0e165dfc403 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c
@@ -241,14 +241,14 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
return err;
}
-int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu,
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
struct npa_aq_enq_req *req,
struct npa_aq_enq_rsp *rsp)
{
return rvu_npa_aq_enq_inst(rvu, req, rsp);
}
-int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu,
+int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
struct msg_rsp *rsp)
{
@@ -273,7 +273,7 @@ static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf)
pfvf->npa_qints_ctx = NULL;
}
-int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu,
+int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu,
struct npa_lf_alloc_req *req,
struct npa_lf_alloc_rsp *rsp)
{
@@ -372,7 +372,7 @@ exit:
return rc;
}
-int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req,
+int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu)
block = &hw->block[blkaddr];
rvu_aq_free(rvu, block->aq);
}
+
+void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct hwctx_disable_req ctx_req;
+
+ /* Disable all pools */
+ ctx_req.hdr.pcifunc = pcifunc;
+ ctx_req.ctype = NPA_AQ_CTYPE_POOL;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ /* Disable all auras */
+ ctx_req.ctype = NPA_AQ_CTYPE_AURA;
+ npa_lf_hwctx_disable(rvu, &ctx_req);
+
+ npa_ctx_free(rvu, pfvf);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 23ff47f7efc5..15f70273e29c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,6 +16,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "npc.h"
+#include "cgx.h"
#include "npc_profile.h"
#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
@@ -26,13 +28,10 @@
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
-struct mcam_entry {
-#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */
- u64 kw[NPC_MAX_KWS_IN_KEY];
- u64 kw_mask[NPC_MAX_KWS_IN_KEY];
- u64 action;
- u64 vtag_action;
-};
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc);
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf)
{
@@ -256,6 +255,46 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false);
}
+static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 src, u16 dest)
+{
+ int dbank = npc_get_bank(mcam, dest);
+ int sbank = npc_get_bank(mcam, src);
+ u64 cfg, sreg, dreg;
+ int bank, i;
+
+ src &= (mcam->banksize - 1);
+ dest &= (mcam->banksize - 1);
+
+ /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */
+ for (bank = 0; bank < mcam->banks_per_entry; bank++) {
+ sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0);
+ dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0);
+ for (i = 0; i < 6; i++) {
+ cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8));
+ rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg);
+ }
+ }
+
+ /* Copy action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg);
+
+ /* Copy TAG action */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg);
+
+ /* Enable or disable */
+ cfg = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(src, sbank));
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
+}
+
static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index)
{
@@ -269,12 +308,17 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_mcam *mcam = &rvu->hw->mcam;
struct mcam_entry entry = { {0} };
struct nix_rx_action action;
int blkaddr, index, kwi;
u64 mac = 0;
+ /* AF's VFs work in promiscuous mode */
+ if (is_afvf(pcifunc))
+ return;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
@@ -308,22 +352,33 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
+
+ /* add VLAN matching, setup action and save entry back for later */
+ entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20;
+ entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20;
+
+ entry.vtag_action = VTAG0_VALID_BIT |
+ FIELD_PREP(VTAG0_TYPE_MASK, 0) |
+ FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) |
+ FIELD_PREP(VTAG0_RELPTR_MASK, 12);
+
+ memcpy(&pfvf->entry, &entry, sizeof(entry));
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, bool allmulti)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, ucast_idx, index, kwi;
struct mcam_entry entry = { {0} };
- struct nix_rx_action action;
- int blkaddr, index, kwi;
+ struct nix_rx_action action = { };
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
- if (blkaddr < 0)
+ /* Only PF or AF VF can add a promiscuous entry */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
return;
- /* Only PF or AF VF can add a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
return;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
@@ -338,16 +393,29 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[kwi] = BIT_ULL(40);
}
- *(u64 *)&action = 0x00;
- action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for promisc also
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
entry.action = *(u64 *)&action;
npc_config_mcam_entry(rvu, mcam, blkaddr, index,
NIX_INTF_RX, &entry, true);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -362,7 +430,17 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
}
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
@@ -390,9 +468,28 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_BCAST_ENTRY);
- /* Check for L2B bit and LMAC channel */
- entry.kw[0] = BIT_ULL(25) | chan;
- entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL;
+ /* Check for L2B bit and LMAC channel
+ * NOTE: Since MKEX default profile(a reduced version intended to
+ * accommodate more capability but igoring few bits) a stap-gap
+ * approach.
+ * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So
+ * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out
+ * on capability features needed for CoS (/from ODP PoV) e.g: VLAN,
+ * DSCP.
+ *
+ * Reduced layout of MKEX default profile -
+ * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD):
+ *
+ * BIT_POS[31:28] : LD
+ * BIT_POS[27:24] : LC
+ * BIT_POS[23:20] : LB
+ * BIT_POS[19:16] : LA
+ * BIT_POS[15:12] : L3B, L3M, L2B, L2M
+ * BIT_POS[11:00] : CHAN
+ *
+ */
+ entry.kw[0] = BIT_ULL(13) | chan;
+ entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL;
*(u64 *)&action = 0x00;
#ifdef MCAST_MCE
@@ -454,51 +551,110 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action);
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
+ /* If PF's promiscuous entry is enabled,
+ * Set RSS action for that entry as well
+ */
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
+ *(u64 *)&action);
+ }
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
}
-void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_rx_action action;
- int blkaddr, index, bank;
+ int index, bank, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- /* Disable ucast MCAM match entry of this PF/VF */
+ /* Ucast MCAM match entry of this PF/VF */
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, disable promisc and bcast MCAM match entries */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK)) {
- index = npc_get_nixlf_mcam_index(mcam, pcifunc,
- nixlf, NIXLF_BCAST_ENTRY);
- /* For bcast, disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF's nixlf is removed from bcast replication
- * list.
- */
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
- if (action.op != NIX_RX_ACTIONOP_MCAST)
- npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+ /* For PF, ena/dis promisc and bcast MCAM match entries */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ return;
+ /* For bcast, enable/disable only if it's action is not
+ * packet replication, incase if action is replication
+ * then this PF's nixlf is removed from bcast replication
+ * list.
+ */
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_BCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ *(u64 *)&action = rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
+ if (action.op != NIX_RX_ACTIONOP_MCAST)
+ npc_enable_mcam_entry(rvu, mcam,
+ blkaddr, index, enable);
+ if (enable)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
+ else
rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- }
+
+ rvu_npc_update_rxvlan(rvu, pcifunc, nixlf);
+}
+
+void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+}
+
+void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
+}
+
+void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ mutex_lock(&mcam->lock);
+
+ /* Disable and free all MCAM entries mapped to this 'pcifunc' */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+
+ /* Free all MCAM counters mapped to this 'pcifunc' */
+ npc_mcam_free_all_counters(rvu, mcam, pcifunc);
+
+ mutex_unlock(&mcam->lock);
+
+ rvu_npc_disable_default_entries(rvu, pcifunc, nixlf);
}
-#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \
+#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg)
-#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \
+#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
+
static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
@@ -514,28 +670,171 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
*/
for (lid = 0; lid < lid_count; lid++) {
for (ltype = 0; ltype < 16; ltype++) {
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL);
- LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL);
+ SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
+ SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
+ SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
+ SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
+
+ SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
+ SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
}
}
- /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts
- * then 112bit key is not sufficient
- */
if (mcam->keysize != NPC_MCAM_KEY_X2)
return;
- /* Start placing extracted data/flags from 64bit onwards, for now */
- /* Extract DMAC from the packet */
- cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET;
- LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+ /* Default MCAM KEX profile */
+ /* Layer A: Ethernet: */
+
+ /* DMAC: 6 bytes, KW1[47:0] */
+ cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
+
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
+
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
+
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg);
+
+ /* Layer C: IPv4 */
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
+ /* TOS: 1 byte, KW1[63:56] */
+ cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
+
+ /* Layer D:UDP */
+ /* SPORT: 2 bytes, KW3[15:0] */
+ cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
+ /* DPORT: 2 bytes, KW3[31:16] */
+ cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
+
+ /* Layer D:TCP */
+ /* SPORT: 2 bytes, KW3[15:0] */
+ cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
+ /* DPORT: 2 bytes, KW3[31:16] */
+ cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
+ SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
+}
+
+static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
+ struct npc_mcam_kex *mkex)
+{
+ int lid, lt, ld, fl;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
+ mkex->keyx_cfg[NIX_INTF_RX]);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
+ mkex->keyx_cfg[NIX_INTF_TX]);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld),
+ mkex->kex_ld_flags[ld]);
+
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ SET_KEX_LD(NIX_INTF_RX, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_RX]
+ [lid][lt][ld]);
+
+ SET_KEX_LD(NIX_INTF_TX, lid, lt, ld,
+ mkex->intf_lid_lt_ld[NIX_INTF_TX]
+ [lid][lt][ld]);
+ }
+ }
+ }
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_RX]
+ [ld][fl]);
+
+ SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl,
+ mkex->intf_ld_flags[NIX_INTF_TX]
+ [ld][fl]);
+ }
+ }
+}
+
+/* strtoull of "mkexprof" with base:36 */
+#define MKEX_SIGN 0x19bbfdbd15f
+#define MKEX_END_SIGN 0xdeadbeef
+
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+{
+ const char *mkex_profile = rvu->mkex_pfl_name;
+ struct device *dev = &rvu->pdev->dev;
+ void __iomem *mkex_prfl_addr = NULL;
+ struct npc_mcam_kex *mcam_kex;
+ u64 prfl_addr;
+ u64 prfl_sz;
+
+ /* If user not selected mkex profile */
+ if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
+ goto load_default;
+
+ if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz))
+ goto load_default;
+
+ if (!prfl_addr || !prfl_sz)
+ goto load_default;
+
+ mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!mkex_prfl_addr)
+ goto load_default;
+
+ mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+
+ while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
+ /* Compare with mkex mod_param name string */
+ if (mcam_kex->mkex_sign == MKEX_SIGN &&
+ !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) {
+ /* Due to an errata (35786) in A0 pass silicon,
+ * parse nibble enable configuration has to be
+ * identical for both Rx and Tx interfaces.
+ */
+ if (is_rvu_9xxx_A0(rvu) &&
+ mcam_kex->keyx_cfg[NIX_INTF_RX] !=
+ mcam_kex->keyx_cfg[NIX_INTF_TX])
+ goto load_default;
+
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
+
+ goto unmap;
+ }
+
+ mcam_kex++;
+ prfl_sz -= sizeof(struct npc_mcam_kex);
+ }
+ dev_warn(dev, "Failed to load requested profile: %s\n",
+ rvu->mkex_pfl_name);
+
+load_default:
+ dev_info(rvu->dev, "Using default mkex profile\n");
+ /* Config packet data and flags extraction into PARSE result */
+ npc_config_ldata_extract(rvu, blkaddr);
+
+unmap:
+ if (mkex_prfl_addr)
+ iounmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -690,13 +989,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
{
int nixlf_count = rvu_get_nixlf_count(rvu);
struct npc_mcam *mcam = &rvu->hw->mcam;
- int rsvd;
+ int rsvd, err;
u64 cfg;
/* Get HW limits */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
mcam->banks = (cfg >> 44) & 0xF;
mcam->banksize = (cfg >> 28) & 0xFFFF;
+ mcam->counters.max = (cfg >> 48) & 0xFFFF;
/* Actual number of MCAM entries vary by entry size */
cfg = (rvu_read64(rvu, blkaddr,
@@ -728,20 +1028,82 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
return -ENOMEM;
}
- mcam->entries = mcam->total_entries - rsvd;
- mcam->nixlf_offset = mcam->entries;
+ mcam->bmap_entries = mcam->total_entries - rsvd;
+ mcam->nixlf_offset = mcam->bmap_entries;
mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
- spin_lock_init(&mcam->lock);
+ /* Allocate bitmaps for managing MCAM entries */
+ mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap)
+ return -ENOMEM;
+
+ mcam->bmap_reverse = devm_kcalloc(rvu->dev,
+ BITS_TO_LONGS(mcam->bmap_entries),
+ sizeof(long), GFP_KERNEL);
+ if (!mcam->bmap_reverse)
+ return -ENOMEM;
+
+ mcam->bmap_fcnt = mcam->bmap_entries;
+
+ /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */
+ mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2pfvf_map)
+ return -ENOMEM;
+
+ /* Reserve 1/8th of MCAM entries at the bottom for low priority
+ * allocations and another 1/8th at the top for high priority
+ * allocations.
+ */
+ mcam->lprio_count = mcam->bmap_entries / 8;
+ if (mcam->lprio_count > BITS_PER_LONG)
+ mcam->lprio_count = round_down(mcam->lprio_count,
+ BITS_PER_LONG);
+ mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
+ mcam->hprio_count = mcam->lprio_count;
+ mcam->hprio_end = mcam->hprio_count;
+
+ /* Allocate bitmap for managing MCAM counters and memory
+ * for saving counter to RVU PFFUNC allocation mapping.
+ */
+ err = rvu_alloc_bitmap(&mcam->counters);
+ if (err)
+ return err;
+
+ mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr2pfvf_map)
+ goto free_mem;
+
+ /* Alloc memory for MCAM entry to counter mapping and for tracking
+ * counter's reference count.
+ */
+ mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->entry2cntr_map)
+ goto free_mem;
+
+ mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!mcam->cntr_refcnt)
+ goto free_mem;
+
+ mutex_init(&mcam->lock);
return 0;
+
+free_mem:
+ kfree(mcam->counters.bmap);
+ return -ENOMEM;
}
int rvu_npc_init(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
u64 keyz = NPC_MCAM_KEY_X2;
- int blkaddr, err;
+ int blkaddr, entry, bank, err;
+ u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -749,6 +1111,14 @@ int rvu_npc_init(struct rvu *rvu)
return -ENODEV;
}
+ /* First disable all MCAM entries, to stop traffic towards NIXLFs */
+ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
+ for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) {
+ for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++)
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0);
+ }
+
/* Allocate resource bimap for pkind*/
pkind->rsrc.max = (rvu_read64(rvu, blkaddr,
NPC_AF_CONST1) >> 12) & 0xFF;
@@ -771,29 +1141,41 @@ int rvu_npc_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
(NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ /* Config Inner IPV4 NPC layer info */
+ rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
+ (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F);
+
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
* - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
+ * - Inner IPv4 header checksum validation.
+ * - Set non zero checksum error code value
*/
rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
- BIT_ULL(6) | BIT_ULL(2));
+ BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
+ BIT_ULL(2) | BIT_ULL(1));
/* Set RX and TX side MCAM search key size.
- * Also enable parse key extract nibbles suchthat except
- * layer E to H, rest of the key is included for MCAM search.
+ * LA..LD (ltype only) + Channel
*/
+ nibble_ena = 0x49247;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ ((keyz & 0x3) << 32) | nibble_ena);
+ /* Due to an errata (35786) in A0 pass silicon, parse nibble enable
+ * configuration has to be identical for both Rx and Tx interfaces.
+ */
+ if (!is_rvu_9xxx_A0(rvu))
+ nibble_ena = (1ULL << 19) - 1;
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | ((1ULL << 20) - 1));
+ ((keyz & 0x3) << 32) | nibble_ena);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
return err;
- /* Config packet data and flags extraction into PARSE result */
- npc_config_ldata_extract(rvu, blkaddr);
+ /* Configure MKEX profile */
+ npc_load_mkex_profile(rvu, blkaddr);
/* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel.
@@ -811,6 +1193,1020 @@ int rvu_npc_init(struct rvu *rvu)
void rvu_npc_freemem(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap);
+ kfree(mcam->counters.bmap);
+ mutex_destroy(&mcam->lock);
+}
+
+static int npc_mcam_verify_entry(struct npc_mcam *mcam,
+ u16 pcifunc, int entry)
+{
+ /* Verify if entry is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->entry2pfvf_map[entry])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static int npc_mcam_verify_counter(struct npc_mcam *mcam,
+ u16 pcifunc, int cntr)
+{
+ /* Verify if counter is valid and if it is indeed
+ * allocated to the requesting PFFUNC.
+ */
+ if (cntr >= mcam->counters.max)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (pcifunc != mcam->cntr2pfvf_map[cntr])
+ return NPC_MCAM_PERM_DENIED;
+
+ return 0;
+}
+
+static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u16 bank = npc_get_bank(mcam, entry);
+
+ /* Set mapping and increment counter's refcnt */
+ mcam->entry2cntr_map[entry] = cntr;
+ mcam->cntr_refcnt[cntr]++;
+ /* Enable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank),
+ BIT_ULL(9) | cntr);
+}
+
+static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
+ struct npc_mcam *mcam,
+ int blkaddr, u16 entry, u16 cntr)
+{
+ u16 index = entry & (mcam->banksize - 1);
+ u16 bank = npc_get_bank(mcam, entry);
+
+ /* Remove mapping and reduce counter's refcnt */
+ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr]--;
+ /* Disable stats */
+ rvu_write64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00);
+}
+
+/* Sets MCAM entry in bitmap as used. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __set_bit(entry, mcam->bmap);
+ __set_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt--;
+}
+
+/* Sets MCAM entry in bitmap as free. Update
+ * reverse bitmap too. Should be called with
+ * 'mcam->lock' held.
+ */
+static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index)
+{
+ u16 entry, rentry;
+
+ entry = index;
+ rentry = mcam->bmap_entries - index - 1;
+
+ __clear_bit(entry, mcam->bmap);
+ __clear_bit(rentry, mcam->bmap_reverse);
+ mcam->bmap_fcnt++;
+}
+
+static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pcifunc)
+{
+ u16 index, cntr;
+
+ /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */
+ for (index = 0; index < mcam->bmap_entries; index++) {
+ if (mcam->entry2pfvf_map[index] == pcifunc) {
+ mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP;
+ /* Free the entry in bitmap */
+ npc_mcam_clear_bit(mcam, index);
+ /* Disable the entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[index];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam,
+ blkaddr, index,
+ cntr);
+ }
+ }
+}
+
+static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam,
+ u16 pcifunc)
+{
+ u16 cntr;
+
+ /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */
+ for (cntr = 0; cntr < mcam->counters.max; cntr++) {
+ if (mcam->cntr2pfvf_map[cntr] == pcifunc) {
+ mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP;
+ mcam->cntr_refcnt[cntr] = 0;
+ rvu_free_rsrc(&mcam->counters, cntr);
+ /* This API is expected to be called after freeing
+ * MCAM entries, which inturn will remove
+ * 'entry to counter' mapping.
+ * No need to do it again.
+ */
+ }
+ }
+}
+
+/* Find area of contiguous free entries of size 'nr'.
+ * If not found return max contiguous free entries available.
+ */
+static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start,
+ u16 nr, u16 *max_area)
+{
+ u16 max_area_start = 0;
+ u16 index, next, end;
+
+ *max_area = 0;
+
+again:
+ index = find_next_zero_bit(map, size, start);
+ if (index >= size)
+ return max_area_start;
+
+ end = ((index + nr) >= size) ? size : index + nr;
+ next = find_next_bit(map, end, index);
+ if (*max_area < (next - index)) {
+ *max_area = next - index;
+ max_area_start = index;
+ }
+
+ if (next < end) {
+ start = next + 1;
+ goto again;
+ }
+
+ return max_area_start;
+}
+
+/* Find number of free MCAM entries available
+ * within range i.e in between 'start' and 'end'.
+ */
+static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end)
+{
+ u16 index, next;
+ u16 fcnt = 0;
+
+again:
+ if (start >= end)
+ return fcnt;
+
+ index = find_next_zero_bit(map, end, start);
+ if (index >= end)
+ return fcnt;
+
+ next = find_next_bit(map, end, index);
+ if (next <= end) {
+ fcnt += next - index;
+ start = next + 1;
+ goto again;
+ }
+
+ fcnt += end - index;
+ return fcnt;
+}
+
+static void
+npc_get_mcam_search_range_priority(struct npc_mcam *mcam,
+ struct npc_mcam_alloc_entry_req *req,
+ u16 *start, u16 *end, bool *reverse)
+{
+ u16 fcnt;
+
+ if (req->priority == NPC_MCAM_HIGHER_PRIO)
+ goto hprio;
+
+ /* For a low priority entry allocation
+ * - If reference entry is not in hprio zone then
+ * search range: ref_entry to end.
+ * - If reference entry is in hprio zone and if
+ * request can be accomodated in non-hprio zone then
+ * search range: 'start of middle zone' to 'end'
+ * - else search in reverse, so that less number of hprio
+ * zone entries are allocated.
+ */
+
+ *reverse = false;
+ *start = req->ref_entry + 1;
+ *end = mcam->bmap_entries;
+
+ if (req->ref_entry >= mcam->hprio_end)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->bmap_entries);
+ if (fcnt > req->count)
+ *start = mcam->hprio_end;
+ else
+ *reverse = true;
+ return;
+
+hprio:
+ /* For a high priority entry allocation, search is always
+ * in reverse to preserve hprio zone entries.
+ * - If reference entry is not in lprio zone then
+ * search range: 0 to ref_entry.
+ * - If reference entry is in lprio zone and if
+ * request can be accomodated in middle zone then
+ * search range: 'hprio_end' to 'lprio_start'
+ */
+
+ *reverse = true;
+ *start = 0;
+ *end = req->ref_entry;
+
+ if (req->ref_entry <= mcam->lprio_start)
+ return;
+
+ fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->hprio_end, mcam->lprio_start);
+ if (fcnt < req->count)
+ return;
+ *start = mcam->hprio_end;
+ *end = mcam->lprio_start;
+}
+
+static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES];
+ u16 fcnt, hp_fcnt, lp_fcnt;
+ u16 start, end, index;
+ int entry, next_start;
+ bool reverse = false;
+ unsigned long *bmap;
+ u16 max_contig;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if there are any free entries */
+ if (!mcam->bmap_fcnt) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ /* MCAM entries are divided into high priority, middle and
+ * low priority zones. Idea is to not allocate top and lower
+ * most entries as much as possible, this is to increase
+ * probability of honouring priority allocation requests.
+ *
+ * Two bitmaps are used for mcam entry management,
+ * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'.
+ * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'.
+ *
+ * Reverse bitmap is used to allocate entries
+ * - when a higher priority entry is requested
+ * - when available free entries are less.
+ * Lower priority ones out of avaialble free entries are always
+ * chosen when 'high vs low' question arises.
+ */
+
+ /* Get the search range for priority allocation request */
+ if (req->priority) {
+ npc_get_mcam_search_range_priority(mcam, req,
+ &start, &end, &reverse);
+ goto alloc;
+ }
+
+ /* Find out the search range for non-priority allocation request
+ *
+ * Get MCAM free entry count in middle zone.
+ */
+ lp_fcnt = npc_mcam_get_free_count(mcam->bmap,
+ mcam->lprio_start,
+ mcam->bmap_entries);
+ hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end);
+ fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt;
+
+ /* Check if request can be accomodated in the middle zone */
+ if (fcnt > req->count) {
+ start = mcam->hprio_end;
+ end = mcam->lprio_start;
+ } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) {
+ /* Expand search zone from half of hprio zone to
+ * half of lprio zone.
+ */
+ start = mcam->hprio_end / 2;
+ end = mcam->bmap_entries - (mcam->lprio_count / 2);
+ reverse = true;
+ } else {
+ /* Not enough free entries, search all entries in reverse,
+ * so that low priority ones will get used up.
+ */
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ }
+
+alloc:
+ if (reverse) {
+ bmap = mcam->bmap_reverse;
+ start = mcam->bmap_entries - start;
+ end = mcam->bmap_entries - end;
+ index = start;
+ start = end;
+ end = index;
+ } else {
+ bmap = mcam->bmap;
+ }
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous entries, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(bmap, end, start,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ if (reverse)
+ rsp->entry = mcam->bmap_entries - index - max_contig;
+ else
+ rsp->entry = index;
+ } else {
+ /* Allocate requested number of non-contiguous entries,
+ * if unsuccessful allocate as many as possible.
+ */
+ rsp->count = 0;
+ next_start = start;
+ for (entry = 0; entry < req->count; entry++) {
+ index = find_next_zero_bit(bmap, end, next_start);
+ if (index >= end)
+ break;
+
+ next_start = start + (index - start) + 1;
+
+ /* Save the entry's index */
+ if (reverse)
+ index = mcam->bmap_entries - index - 1;
+ entry_list[entry] = index;
+ rsp->count++;
+ }
+ }
+
+ /* If allocating requested no of entries is unsucessful,
+ * expand the search range to full bitmap length and retry.
+ */
+ if (!req->priority && (rsp->count < req->count) &&
+ ((end - start) != mcam->bmap_entries)) {
+ reverse = true;
+ start = 0;
+ end = mcam->bmap_entries;
+ goto alloc;
+ }
+
+ /* For priority entry allocation requests, if allocation is
+ * failed then expand search to max possible range and retry.
+ */
+ if (req->priority && rsp->count < req->count) {
+ if (req->priority == NPC_MCAM_LOWER_PRIO &&
+ (start != (req->ref_entry + 1))) {
+ start = req->ref_entry + 1;
+ end = mcam->bmap_entries;
+ reverse = false;
+ goto alloc;
+ } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) &&
+ ((end - start) != req->ref_entry)) {
+ start = 0;
+ end = req->ref_entry;
+ reverse = true;
+ goto alloc;
+ }
+ }
+
+ /* Copy MCAM entry indices into mbox response entry_list.
+ * Requester always expects indices in ascending order, so
+ * so reverse the list if reverse bitmap is used for allocation.
+ */
+ if (!req->contig && rsp->count) {
+ index = 0;
+ for (entry = rsp->count - 1; entry >= 0; entry--) {
+ if (reverse)
+ rsp->entry_list[index++] = entry_list[entry];
+ else
+ rsp->entry_list[entry] = entry_list[entry];
+ }
+ }
+
+ /* Mark the allocated entries as used and set nixlf mapping */
+ for (entry = 0; entry < rsp->count; entry++) {
+ index = req->contig ?
+ (rsp->entry + entry) : rsp->entry_list[entry];
+ npc_mcam_set_bit(mcam, index);
+ mcam->entry2pfvf_map[index] = pcifunc;
+ mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP;
+ }
+
+ /* Update available free count in mbox response */
+ rsp->free_count = mcam->bmap_fcnt;
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_entry_req *req,
+ struct npc_mcam_alloc_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ rsp->entry = NPC_MCAM_ENTRY_INVALID;
+ rsp->free_count = 0;
+
+ /* Check if ref_entry is within range */
+ if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* ref_entry can't be '0' if requested priority is high.
+ * Can't be last entry if requested priority is low.
+ */
+ if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) ||
+ ((req->ref_entry == (mcam->bmap_entries - 1)) &&
+ req->priority == NPC_MCAM_LOWER_PRIO))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated indices needs to be sent to requester,
+ * max number of non-contiguous entries per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Alloc request from PFFUNC with no NIXLF attached should be denied */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_ALLOC_DENIED;
+
+ return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp);
+}
+
+int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu,
+ struct npc_mcam_free_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc = 0;
+ u16 cntr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Free request from PFFUNC with no NIXLF attached, ignore */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ if (req->all)
+ goto free_all;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ mcam->entry2pfvf_map[req->entry] = 0;
+ npc_mcam_clear_bit(mcam, req->entry);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ /* Update entry2counter mapping */
+ cntr = mcam->entry2cntr_map[req->entry];
+ if (cntr != NPC_MCAM_INVALID_MAP)
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, cntr);
+
+ goto exit;
+
+free_all:
+ /* Free up all entries allocated to requesting PFFUNC */
+ npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc);
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu,
+ struct npc_mcam_write_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ if (rc)
+ goto exit;
+
+ if (req->set_cntr &&
+ npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) {
+ rc = NPC_MCAM_INVALID_REQ;
+ goto exit;
+ }
+
+ npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->set_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+
+ rc = 0;
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu,
+ struct npc_mcam_ena_dis_entry_req *req,
+ struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry);
+ mutex_unlock(&mcam->lock);
+ if (rc)
+ return rc;
+
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
+ struct npc_mcam_shift_entry_req *req,
+ struct npc_mcam_shift_entry_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 old_entry, new_entry;
+ u16 index, cntr;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->shift_count > NPC_MCAM_MAX_SHIFTS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ for (index = 0; index < req->shift_count; index++) {
+ old_entry = req->curr_entry[index];
+ new_entry = req->new_entry[index];
+
+ /* Check if both old and new entries are valid and
+ * does belong to this PFFUNC or not.
+ */
+ rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry);
+ if (rc)
+ break;
+
+ rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry);
+ if (rc)
+ break;
+
+ /* new_entry should not have a counter mapped */
+ if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) {
+ rc = NPC_MCAM_PERM_DENIED;
+ break;
+ }
+
+ /* Disable the new_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false);
+
+ /* Copy rule from old entry to new entry */
+ npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry);
+
+ /* Copy counter mapping, if any */
+ cntr = mcam->entry2cntr_map[old_entry];
+ if (cntr != NPC_MCAM_INVALID_MAP) {
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ old_entry, cntr);
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ new_entry, cntr);
+ }
+
+ /* Enable new_entry and disable old_entry */
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false);
+ }
+
+ /* If shift has failed then report the failed index */
+ if (index != req->shift_count) {
+ rc = NPC_MCAM_PERM_DENIED;
+ rsp->failed_entry_idx = index;
+ }
+
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu,
+ struct npc_mcam_alloc_counter_req *req,
+ struct npc_mcam_alloc_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 pcifunc = req->hdr.pcifunc;
+ u16 max_contig, cntr;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* If the request is from a PFFUNC with no NIXLF attached, ignore */
+ if (!is_nixlf_attached(rvu, pcifunc))
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Since list of allocated counter IDs needs to be sent to requester,
+ * max number of non-contiguous counters per mbox msg is limited.
+ */
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+
+ /* Check if unused counters are available or not */
+ if (!rvu_rsrc_free_count(&mcam->counters)) {
+ mutex_unlock(&mcam->lock);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+
+ rsp->count = 0;
+
+ if (req->contig) {
+ /* Allocate requested number of contiguous counters, if
+ * unsuccessful find max contiguous entries available.
+ */
+ index = npc_mcam_find_zero_area(mcam->counters.bmap,
+ mcam->counters.max, 0,
+ req->count, &max_contig);
+ rsp->count = max_contig;
+ rsp->cntr = index;
+ for (cntr = index; cntr < (index + max_contig); cntr++) {
+ __set_bit(cntr, mcam->counters.bmap);
+ mcam->cntr2pfvf_map[cntr] = pcifunc;
+ }
+ } else {
+ /* Allocate requested number of non-contiguous counters,
+ * if unsuccessful allocate as many as possible.
+ */
+ for (cntr = 0; cntr < req->count; cntr++) {
+ index = rvu_alloc_rsrc(&mcam->counters);
+ if (index < 0)
+ break;
+ rsp->cntr_list[cntr] = index;
+ rsp->count++;
+ mcam->cntr2pfvf_map[index] = pcifunc;
+ }
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (err) {
+ mutex_unlock(&mcam->lock);
+ return err;
+ }
+
+ /* Mark counter as free/unused */
+ mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP;
+ rvu_free_rsrc(&mcam->counters, req->cntr);
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ entry = index + 1;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+
+ mutex_unlock(&mcam->lock);
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
+ struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 index, entry = 0;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ if (rc)
+ goto exit;
+
+ /* Unmap the MCAM entry and counter */
+ if (!req->all) {
+ rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry);
+ if (rc)
+ goto exit;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ req->entry, req->cntr);
+ goto exit;
+ }
+
+ /* Disable all MCAM entry's stats which are using this counter */
+ while (entry < mcam->bmap_entries) {
+ if (!mcam->cntr_refcnt[req->cntr])
+ break;
+
+ index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
+ if (index >= mcam->bmap_entries)
+ break;
+ if (mcam->entry2cntr_map[index] != req->cntr)
+ continue;
+
+ entry = index + 1;
+ npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
+ index, req->cntr);
+ }
+exit:
+ mutex_unlock(&mcam->lock);
+ return rc;
+}
+
+int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00);
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu,
+ struct npc_mcam_oper_counter_req *req,
+ struct npc_mcam_oper_counter_rsp *rsp)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, err;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ mutex_lock(&mcam->lock);
+ err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr);
+ mutex_unlock(&mcam->lock);
+ if (err)
+ return err;
+
+ rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr));
+ rsp->stat &= BIT_ULL(48) - 1;
+
+ return 0;
+}
+
+int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu,
+ struct npc_mcam_alloc_and_write_entry_req *req,
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp)
+{
+ struct npc_mcam_alloc_counter_req cntr_req;
+ struct npc_mcam_alloc_counter_rsp cntr_rsp;
+ struct npc_mcam_alloc_entry_req entry_req;
+ struct npc_mcam_alloc_entry_rsp entry_rsp;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ u16 entry = NPC_MCAM_ENTRY_INVALID;
+ u16 cntr = NPC_MCAM_ENTRY_INVALID;
+ int blkaddr, rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NPC_MCAM_INVALID_REQ;
+
+ if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX)
+ return NPC_MCAM_INVALID_REQ;
+
+ /* Try to allocate a MCAM entry */
+ entry_req.hdr.pcifunc = req->hdr.pcifunc;
+ entry_req.contig = true;
+ entry_req.priority = req->priority;
+ entry_req.ref_entry = req->ref_entry;
+ entry_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu,
+ &entry_req, &entry_rsp);
+ if (rc)
+ return rc;
+
+ if (!entry_rsp.count)
+ return NPC_MCAM_ALLOC_FAILED;
+
+ entry = entry_rsp.entry;
+
+ if (!req->alloc_cntr)
+ goto write_entry;
+
+ /* Now allocate counter */
+ cntr_req.hdr.pcifunc = req->hdr.pcifunc;
+ cntr_req.contig = true;
+ cntr_req.count = 1;
+
+ rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp);
+ if (rc) {
+ /* Free allocated MCAM entry */
+ mutex_lock(&mcam->lock);
+ mcam->entry2pfvf_map[entry] = 0;
+ npc_mcam_clear_bit(mcam, entry);
+ mutex_unlock(&mcam->lock);
+ return rc;
+ }
+
+ cntr = cntr_rsp.cntr;
+
+write_entry:
+ mutex_lock(&mcam->lock);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf,
+ &req->entry_data, req->enable_entry);
+
+ if (req->alloc_cntr)
+ npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr);
+ mutex_unlock(&mcam->lock);
+
+ rsp->entry = entry;
+ rsp->cntr = cntr;
+
+ return 0;
+}
+
+#define GET_KEX_CFG(intf) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf))
+
+#define GET_KEX_FLAGS(ld) \
+ rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld))
+
+#define GET_KEX_LD(intf, lid, lt, ld) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld))
+
+#define GET_KEX_LDFLAGS(intf, ld, fl) \
+ rvu_read64(rvu, BLKADDR_NPC, \
+ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl))
+
+int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
+ struct npc_get_kex_cfg_rsp *rsp)
+{
+ int lid, lt, ld, fl;
+
+ rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX);
+ rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX);
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_RX, lid, lt, ld);
+ rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] =
+ GET_KEX_LD(NIX_INTF_TX, lid, lt, ld);
+ }
+ }
+ }
+ for (ld = 0; ld < NPC_MAX_LD; ld++)
+ rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld);
+
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl);
+ rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] =
+ GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl);
+ }
+ }
+ memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN);
+ return 0;
+}
+
+int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+ bool enable;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (!pfvf->rxvlan)
+ return 0;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_UCAST_ENTRY);
+ pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index);
+ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index);
+ npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index,
+ NIX_INTF_RX, &pfvf->entry, enable);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9c08c3650c02..04fd1f135011 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3732,19 +3732,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
return 0;
}
-
-static int skge_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, skge_debug_show, inode->i_private);
-}
-
-static const struct file_operations skge_debug_fops = {
- .owner = THIS_MODULE,
- .open = skge_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(skge_debug);
/*
* Use network device events to create/remove/rename
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b374f5e..f3a5fa84860f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
skb_copy_hash(skb, re->skb);
- skb->vlan_proto = re->skb->vlan_proto;
- skb->vlan_tci = re->skb->vlan_tci;
+ __vlan_hwaccel_copy_tag(skb, re->skb);
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
- re->skb->vlan_proto = 0;
- re->skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(re->skb);
skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
@@ -4623,19 +4621,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
napi_enable(&hw->napi);
return 0;
}
-
-static int sky2_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sky2_debug_show, inode->i_private);
-}
-
-static const struct file_operations sky2_debug_fops = {
- .owner = THIS_MODULE,
- .open = sky2_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sky2_debug);
/*
* Use network device events to create/remove/rename
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7dbfdac4067a..399f565dd85a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev)
phy_set_max_speed(dev->phydev, SPEED_1000);
phy_support_asym_pause(dev->phydev);
- dev->phydev->advertising = dev->phydev->supported |
- ADVERTISED_Autoneg;
+ linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ dev->phydev->advertising);
phy_start_aneg(dev->phydev);
of_node_put(np);
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 36054e6fb9d3..f200b8c420d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -5,7 +5,7 @@
config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on MAY_USE_DEVLINK
- depends on PCI
+ depends on PCI && NETDEVICES && ETHERNET && INET
select MLX4_CORE
imply PTP_1588_CLOCK
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index d8e9a323122e..db909b6069b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -144,9 +144,9 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
- int cq_num)
+ int cq_num, u8 opmod)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
+ return mlx4_cmd(dev, mailbox->dma, cq_num, opmod,
MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
@@ -287,11 +287,61 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
__mlx4_cq_free_icm(dev, cqn);
}
+static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
+{
+ int entries_per_copy = PAGE_SIZE / cqe_size;
+ void *init_ents;
+ int err = 0;
+ int i;
+
+ init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!init_ents)
+ return -ENOMEM;
+
+ /* Populate a list of CQ entries to reduce the number of
+ * copy_to_user calls. 0xcc is the initialization value
+ * required by the FW.
+ */
+ memset(init_ents, 0xcc, PAGE_SIZE);
+
+ if (entries_per_copy < entries) {
+ for (i = 0; i < entries / entries_per_copy; i++) {
+ err = copy_to_user(buf, init_ents, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ buf += PAGE_SIZE;
+ }
+ } else {
+ err = copy_to_user(buf, init_ents, entries * cqe_size);
+ }
+
+out:
+ kfree(init_ents);
+
+ return err;
+}
+
+static void mlx4_init_kernel_cqes(struct mlx4_buf *buf,
+ int entries,
+ int cqe_size)
+{
+ int i;
+
+ if (buf->nbufs == 1)
+ memset(buf->direct.buf, 0xcc, entries * cqe_size);
+ else
+ for (i = 0; i < buf->npages; i++)
+ memset(buf->page_list[i].buf, 0xcc,
+ 1UL << buf->page_shift);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
struct mlx4_cq *cq, unsigned vector, int collapsed,
- int timestamp_en)
+ int timestamp_en, void *buf_addr, bool user_cq)
{
+ bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cq_table *cq_table = &priv->cq_table;
struct mlx4_cmd_mailbox *mailbox;
@@ -336,7 +386,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
cq_context->db_rec_addr = cpu_to_be64(db_rec);
- err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
+ if (sw_cq_init) {
+ if (user_cq) {
+ err = mlx4_init_user_cqes(buf_addr, nent,
+ dev->caps.cqe_size);
+ if (err)
+ sw_cq_init = false;
+ } else {
+ mlx4_init_kernel_cqes(buf_addr, nent,
+ dev->caps.cqe_size);
+ }
+ }
+
+ err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init);
+
mlx4_free_cmd_mailbox(dev, mailbox);
if (err)
goto err_radix;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1e487acb4667..74d466796b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -54,11 +54,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
if (!cq) {
- cq = kzalloc(sizeof(*cq), GFP_KERNEL);
- if (!cq) {
- en_err(priv, "Failed to allocate CQ structure\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed to allocate CQ structure\n");
+ return -ENOMEM;
}
cq->size = entries;
@@ -143,7 +140,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
- cq->vector, 0, timestamp_en);
+ cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
if (err)
goto free_eq;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index f11b45001cad..d290f0787dfb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
tx_pause = !!(pause->tx_pause);
rx_pause = !!(pause->rx_pause);
- rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
- tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+ rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
+ tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b744cd49a785..6b88881b8e35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3493,8 +3493,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
- /* MTU range: 46 - hw-specific max */
- dev->min_mtu = MLX4_EN_MIN_MTU;
+ /* MTU range: 68 - hw-specific max */
+ dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db00bf1c23f5..9a0881cb7f51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -271,11 +271,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring) {
- en_err(priv, "Failed to allocate RX ring structure\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed to allocate RX ring structure\n");
+ return -ENOMEM;
}
ring->prod = 0;
@@ -875,7 +872,7 @@ csum_none:
skb->data_len = length;
napi_gro_frags(&cq->napi);
} else {
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_clear_hash(skb);
}
next:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 6f5153afcab4..2cbd2bd7c67c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -57,11 +57,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
if (!ring) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring) {
- en_err(priv, "Failed allocating TX ring\n");
- return -ENOMEM;
- }
+ en_err(priv, "Failed allocating TX ring\n");
+ return -ENOMEM;
}
ring->size = size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index babcfd9c0571..7df728f1e5b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -166,6 +166,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[37] = "sl to vl mapping table change event support",
[38] = "user MAC support",
[39] = "Report driver version to FW support",
+ [40] = "SW CQ initialization support",
};
int i;
@@ -1098,6 +1099,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM;
if (field32 & (1 << 21))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS;
+ if (field32 & (1 << 23))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT;
for (i = 1; i <= dev_cap->num_ports; i++) {
err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 6a046030e873..bdb8dd161923 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -63,7 +63,7 @@ struct workqueue_struct *mlx4_wq;
#ifdef CONFIG_MLX4_DEBUG
-int mlx4_debug_level = 0;
+int mlx4_debug_level; /* 0 by default */
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
@@ -83,7 +83,7 @@ MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number o
static uint8_t num_vfs[3] = {0, 0, 0};
static int num_vfs_argc;
-module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
+module_param_array(num_vfs, byte, &num_vfs_argc, 0444);
MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
"num_vfs=port1,port2,port1+2");
@@ -313,7 +313,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
}
@@ -322,7 +322,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
i + 1);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
return 0;
@@ -1188,8 +1188,7 @@ static int __set_port_type(struct mlx4_port_info *info,
mlx4_err(mdev,
"Requested port type for port %d is not supported on this HCA\n",
info->port);
- err = -EINVAL;
- goto err_sup;
+ return -EOPNOTSUPP;
}
mlx4_stop_sense(mdev);
@@ -1211,7 +1210,7 @@ static int __set_port_type(struct mlx4_port_info *info,
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
- err = -EINVAL;
+ err = -EOPNOTSUPP;
}
}
}
@@ -1237,7 +1236,7 @@ static int __set_port_type(struct mlx4_port_info *info,
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
-err_sup:
+
return err;
}
@@ -3252,7 +3251,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
- dev->dev_vfs = NULL;
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 485d856546c6..8137454e2534 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -161,7 +161,6 @@
#define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
ETH_HLEN + PREAMBLE_LEN)
-#define MLX4_EN_MIN_MTU 46
/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
* headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
*/
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 31bd56727022..eb13d3618162 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4729,7 +4729,6 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
struct res_srq *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int srqn;
int err;
@@ -4795,7 +4794,6 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
struct res_cq *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int cqn;
int err;
@@ -4858,7 +4856,6 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
struct res_mpt *tmp;
int state;
u64 in_param;
- LIST_HEAD(tlist);
int mptn;
int err;
@@ -4926,7 +4923,6 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
struct res_mtt *mtt;
struct res_mtt *tmp;
int state;
- LIST_HEAD(tlist);
int base;
int err;
@@ -5115,7 +5111,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
struct res_eq *tmp;
int err;
int state;
- LIST_HEAD(tlist);
int eqn;
err = move_all_busy(dev, slave, RES_EQ);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index d324a3884462..9de9abacf7f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -12,17 +12,17 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
+ health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
- diag/fs_tracepoint.o diag/fw_tracer.o
+ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o
#
# Netdev basic
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o
+ en_selftest.o en/port.o en/monitor_stats.o
#
# Netdev extra
@@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o
#
# Core extra
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index a5a0823e5ada..d3125cdf69db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -40,9 +40,11 @@
#include <linux/random.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
#include <linux/debugfs.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
enum {
CMD_IF_REV = 5,
@@ -313,6 +315,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_FPGA_DESTROY_QP:
case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
case MLX5_CMD_OP_DEALLOC_MEMIC:
+ case MLX5_CMD_OP_PAGE_FAULT_RESUME:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -326,7 +329,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_QUERY_MKEY:
case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
- case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_CREATE_EQ:
case MLX5_CMD_OP_QUERY_EQ:
case MLX5_CMD_OP_GEN_EQE:
@@ -371,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_SET_MONITOR_COUNTER:
+ case MLX5_CMD_OP_ARM_MONITOR_COUNTER:
case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
case MLX5_CMD_OP_QUERY_RATE_LIMIT:
case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
@@ -520,6 +524,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER);
+ MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER);
MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
@@ -805,6 +811,8 @@ static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
return MLX5_GET(mbox_in, in->first.data, opcode);
}
+static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+
static void cb_timeout_handler(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
@@ -1412,14 +1420,32 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
up(&cmd->sem);
}
+static int cmd_comp_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_core_dev *dev;
+ struct mlx5_cmd *cmd;
+ struct mlx5_eqe *eqe;
+
+ cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb);
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ eqe = data;
+
+ mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
+
+ return NOTIFY_OK;
+}
void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
+ MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD);
+ mlx5_eq_notifier_register(dev, &dev->cmd.nb);
mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
}
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
{
mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
+ mlx5_eq_notifier_unregister(dev, &dev->cmd.nb);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1435,7 +1461,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
}
}
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
+static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -1533,7 +1559,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced)
}
}
}
-EXPORT_SYMBOL(mlx5_cmd_comp_handler);
+
+void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+{
+ unsigned long flags;
+ u64 vector;
+
+ /* wait for pending handlers to complete */
+ mlx5_eq_synchronize_cmd_irq(dev);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+ vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+ if (!vector)
+ goto no_trig;
+
+ vector |= MLX5_TRIGGERED_CMD_COMP;
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+ mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+ mlx5_cmd_comp_handler(dev, vector, true);
+ return;
+
+no_trig:
+ spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
static int status_to_err(u8 status)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 4b85abb5c9f7..713a17ee3751 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -38,6 +38,7 @@
#include <rdma/ib_verbs.h>
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
@@ -92,10 +93,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
- struct mlx5_eq *eq;
+ struct mlx5_eq_comp *eq;
int err;
- eq = mlx5_eqn2eq(dev, eqn);
+ eq = mlx5_eqn2comp_eq(dev, eqn);
if (IS_ERR(eq))
return PTR_ERR(eq);
@@ -119,12 +120,12 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
/* Add to comp EQ CQ tree to recv comp events */
- err = mlx5_eq_add_cq(eq, cq);
+ err = mlx5_eq_add_cq(&eq->core, cq);
if (err)
goto err_cmd;
/* Add to async EQ CQ tree to recv async events */
- err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq);
+ err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq);
if (err)
goto err_cq_add;
@@ -139,7 +140,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
err_cq_add:
- mlx5_eq_del_cq(eq, cq);
+ mlx5_eq_del_cq(&eq->core, cq);
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
@@ -157,11 +158,11 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
int err;
- err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq);
+ err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq);
if (err)
return err;
- err = mlx5_eq_del_cq(cq->eq, cq);
+ err = mlx5_eq_del_cq(&cq->eq->core, cq);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 90fabd612b6c..a11e22d0b0cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/cq.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
enum {
QP_PID,
@@ -349,6 +350,16 @@ out:
return param;
}
+static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
+
+ MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+ MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
int index)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 37ba7c78859d..ebc046fa97d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -45,75 +45,11 @@ struct mlx5_device_context {
unsigned long state;
};
-struct mlx5_delayed_event {
- struct list_head list;
- struct mlx5_core_dev *dev;
- enum mlx5_dev_event event;
- unsigned long param;
-};
-
enum {
MLX5_INTERFACE_ADDED,
MLX5_INTERFACE_ATTACHED,
};
-static void add_delayed_event(struct mlx5_priv *priv,
- struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_delayed_event *delayed_event;
-
- delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC);
- if (!delayed_event) {
- mlx5_core_err(dev, "event %d is missed\n", event);
- return;
- }
-
- mlx5_core_dbg(dev, "Accumulating event %d\n", event);
- delayed_event->dev = dev;
- delayed_event->event = event;
- delayed_event->param = param;
- list_add_tail(&delayed_event->list, &priv->waiting_events_list);
-}
-
-static void delayed_event_release(struct mlx5_device_context *dev_ctx,
- struct mlx5_priv *priv)
-{
- struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
- struct mlx5_delayed_event *de;
- struct mlx5_delayed_event *n;
- struct list_head temp;
-
- INIT_LIST_HEAD(&temp);
-
- spin_lock_irq(&priv->ctx_lock);
-
- priv->is_accum_events = false;
- list_splice_init(&priv->waiting_events_list, &temp);
- if (!dev_ctx->context)
- goto out;
- list_for_each_entry_safe(de, n, &temp, list)
- dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
-
-out:
- spin_unlock_irq(&priv->ctx_lock);
-
- list_for_each_entry_safe(de, n, &temp, list) {
- list_del(&de->list);
- kfree(de);
- }
-}
-
-/* accumulating events that can come after mlx5_ib calls to
- * ib_register_device, till adding that interface to the events list.
- */
-static void delayed_event_start(struct mlx5_priv *priv)
-{
- spin_lock_irq(&priv->ctx_lock);
- priv->is_accum_events = true;
- spin_unlock_irq(&priv->ctx_lock);
-}
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
@@ -129,8 +65,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
dev_ctx->intf = intf;
- delayed_event_start(priv);
-
dev_ctx->context = intf->add(dev);
if (dev_ctx->context) {
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -139,22 +73,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (dev_ctx->intf->pfault) {
- if (priv->pfault) {
- mlx5_core_err(dev, "multiple page fault handlers not supported");
- } else {
- priv->pfault_ctx = dev_ctx->context;
- priv->pfault = dev_ctx->intf->pfault;
- }
- }
-#endif
spin_unlock_irq(&priv->ctx_lock);
}
- delayed_event_release(dev_ctx, priv);
-
if (!dev_ctx->context)
kfree(dev_ctx);
}
@@ -179,15 +100,6 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
if (!dev_ctx)
return;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- spin_lock_irq(&priv->ctx_lock);
- if (priv->pfault == dev_ctx->intf->pfault)
- priv->pfault = NULL;
- spin_unlock_irq(&priv->ctx_lock);
-
- synchronize_srcu(&priv->pfault_srcu);
-#endif
-
spin_lock_irq(&priv->ctx_lock);
list_del(&dev_ctx->list);
spin_unlock_irq(&priv->ctx_lock);
@@ -207,26 +119,20 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx)
return;
- delayed_event_start(priv);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
- goto out;
+ return;
if (intf->attach(dev, dev_ctx->context))
- goto out;
-
+ return;
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- goto out;
+ return;
dev_ctx->context = intf->add(dev);
if (!dev_ctx->context)
- goto out;
-
+ return;
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
-
-out:
- delayed_event_release(dev_ctx, priv);
}
void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -350,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
mutex_unlock(&mlx5_intf_mutex);
}
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
- struct mlx5_priv *priv = &mdev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
- void *result = NULL;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
- if ((dev_ctx->intf->protocol == protocol) &&
- dev_ctx->intf->get_dev) {
- result = dev_ctx->intf->get_dev(dev_ctx->context);
- break;
- }
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
- return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-
/* Must be called with intf_mutex held */
void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
{
@@ -422,44 +306,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
return res;
}
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_device_context *dev_ctx;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->ctx_lock, flags);
-
- if (priv->is_accum_events)
- add_delayed_event(priv, dev, event, param);
-
- /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
- * still in priv->ctx_list. In this case, only notify the dev_ctx if its
- * ADDED or ATTACHED bit are set.
- */
- list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event &&
- (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
- test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
- dev_ctx->intf->event(dev, dev_ctx->context, event, param);
-
- spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_core_page_fault(struct mlx5_core_dev *dev,
- struct mlx5_pagefault *pfault)
-{
- struct mlx5_priv *priv = &dev->priv;
- int srcu_idx;
-
- srcu_idx = srcu_read_lock(&priv->pfault_srcu);
- if (priv->pfault)
- priv->pfault(dev, priv->pfault_ctx, pfault);
- srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
-}
-#endif
void mlx5_dev_list_lock(void)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 0f11fff32a9b..424457ff9759 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p,
PRINT_MASKED_VAL(name, p, format); \
}
DECLARE_MASK_VAL(u64, gre_key) = {
- .m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 |
- MLX5_GET(fte_match_set_misc, mask, gre_key_l),
- .v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 |
- MLX5_GET(fte_match_set_misc, value, gre_key_l)};
+ .m = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi) << 8 |
+ MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo),
+ .v = MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.hi) << 8 |
+ MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.lo)};
PRINT_MASKED_VAL(gre_key, p, "%llu");
PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index d4ec93bde4de..6999f4486e9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
#define CREATE_TRACE_POINTS
+#include "lib/eq.h"
#include "fw_tracer.h"
#include "fw_tracer_tracepoint.h"
@@ -846,9 +847,9 @@ free_tracer:
return ERR_PTR(err);
}
-/* Create HW resources + start tracer
- * must be called before Async EQ is created
- */
+static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data);
+
+/* Create HW resources + start tracer */
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev;
@@ -874,6 +875,9 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
goto err_dealloc_pd;
}
+ MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER);
+ mlx5_eq_notifier_register(dev, &tracer->nb);
+
mlx5_fw_tracer_start(tracer);
return 0;
@@ -883,9 +887,7 @@ err_dealloc_pd:
return err;
}
-/* Stop tracer + Cleanup HW resources
- * must be called after Async EQ is destroyed
- */
+/* Stop tracer + Cleanup HW resources */
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
{
if (IS_ERR_OR_NULL(tracer))
@@ -893,7 +895,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
-
+ mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
@@ -922,12 +924,11 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
kfree(tracer);
}
-void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
{
- struct mlx5_fw_tracer *tracer = dev->tracer;
-
- if (!tracer)
- return;
+ struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb);
+ struct mlx5_core_dev *dev = tracer->dev;
+ struct mlx5_eqe *eqe = data;
switch (eqe->sub_type) {
case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE:
@@ -942,6 +943,8 @@ void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
}
+
+ return NOTIFY_OK;
}
EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 0347f2dd5cee..a8b8747f2b61 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -55,6 +55,7 @@
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
bool owner;
u8 trc_ver;
struct workqueue_struct *work_queue;
@@ -170,6 +171,5 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer);
void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
-void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 118324802926..8fa8fdd30b85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -49,6 +49,7 @@
#include <net/switchdev.h>
#include <net/xdp.h>
#include <linux/net_dim.h>
+#include <linux/bits.h>
#include "wq.h"
#include "mlx5_core.h"
#include "en_stats.h"
@@ -147,9 +148,6 @@ struct page_pool;
MLX5_UMR_MTT_ALIGNMENT))
#define MLX5E_UMR_WQEBBS \
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS
-
-#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
@@ -178,8 +176,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
+ min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
/* Use this function to get max num channels after netdev was created */
@@ -214,22 +211,24 @@ struct mlx5e_umr_wqe {
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
enum mlx5e_priv_flag {
- MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
- MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
- MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
- MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
- MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
+ MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ MLX5E_PFLAG_RX_CQE_COMPRESS,
+ MLX5E_PFLAG_RX_STRIDING_RQ,
+ MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
+ MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5E_NUM_PFLAGS, /* Keep last */
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
do { \
if (enable) \
- (params)->pflags |= (pflag); \
+ (params)->pflags |= BIT(pflag); \
else \
- (params)->pflags &= ~(pflag); \
+ (params)->pflags &= ~(BIT(pflag)); \
} while (0)
-#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag)))
+#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
@@ -247,9 +246,6 @@ struct mlx5e_params {
bool lro_en;
u32 lro_wqe_sz;
u8 tx_min_inline_mode;
- u8 rss_hfunc;
- u8 toeplitz_hash_key[40];
- u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
@@ -349,7 +345,6 @@ enum {
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
- MLX5E_SQ_STATE_REDIRECT,
};
struct mlx5e_sq_wqe_info {
@@ -410,24 +405,51 @@ struct mlx5e_xdp_info {
struct mlx5e_dma_info di;
};
+struct mlx5e_xdp_info_fifo {
+ struct mlx5e_xdp_info *xi;
+ u32 *cc;
+ u32 *pc;
+ u32 mask;
+};
+
+struct mlx5e_xdp_wqe_info {
+ u8 num_wqebbs;
+ u8 num_ds;
+};
+
+struct mlx5e_xdp_mpwqe {
+ /* Current MPWQE session */
+ struct mlx5e_tx_wqe *wqe;
+ u8 ds_count;
+ u8 max_ds_count;
+};
+
+struct mlx5e_xdpsq;
+typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq*,
+ struct mlx5e_xdp_info*);
struct mlx5e_xdpsq {
/* data path */
/* dirtied @completion */
+ u32 xdpi_fifo_cc;
u16 cc;
bool redirect_flush;
/* dirtied @xmit */
- u16 pc ____cacheline_aligned_in_smp;
- bool doorbell;
+ u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
+ u16 pc;
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5e_xdp_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
+ mlx5e_fp_xmit_xdp_frame xmit_xdp_frame;
struct {
- struct mlx5e_xdp_info *xdpi;
+ struct mlx5e_xdp_wqe_info *wqe_info;
+ struct mlx5e_xdp_info_fifo xdpi_fifo;
} db;
void __iomem *uar_map;
u32 sqn;
@@ -633,7 +655,6 @@ struct mlx5e_channel_stats {
} ____cacheline_aligned_in_smp;
enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
};
@@ -654,6 +675,13 @@ enum {
MLX5E_NIC_PRIO
};
+struct mlx5e_rss_params {
+ u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+ u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS];
+ u8 toeplitz_hash_key[40];
+ u8 hfunc;
+};
+
struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
@@ -674,6 +702,7 @@ struct mlx5e_priv {
struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
struct mlx5e_flow_steering fs;
@@ -683,6 +712,8 @@ struct mlx5e_priv {
struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work;
struct work_struct update_stats_work;
+ struct work_struct monitor_counters_work;
+ struct mlx5_nb monitor_counters_nb;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
@@ -692,6 +723,8 @@ struct mlx5e_priv {
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
+ struct notifier_block events_nb;
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
#endif
@@ -769,6 +802,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
void mlx5e_update_stats(struct mlx5e_priv *priv);
+void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
@@ -799,9 +833,11 @@ struct mlx5e_redirect_rqt_param {
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
struct mlx5e_redirect_rqt_param rrp);
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
- enum mlx5e_traffic_types tt,
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
+ const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner);
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen);
+struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -931,14 +967,16 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5e_create_tises(struct mlx5e_priv *priv);
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
+void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
+void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size);
typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv);
+int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
change_hw_mtu_cb set_mtu_cb);
@@ -962,12 +1000,20 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
+int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings);
+int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
+void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam);
+int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
int mlx5e_netdev_init(struct net_device *netdev,
@@ -983,12 +1029,26 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
-void mlx5e_build_rss_params(struct mlx5e_params *params);
+void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
+ u16 num_channels);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
+
+void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
+void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
+netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features);
+#ifdef CONFIG_MLX5_ESWITCH
+int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac);
+int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate);
+int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
+int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
+#endif
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 1431232c9a09..be5961ff24cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -73,6 +73,22 @@ enum mlx5e_traffic_types {
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
+struct mlx5e_tirc_config {
+ u8 l3_prot_type;
+ u8 l4_prot_type;
+ u32 rx_hash_fields;
+};
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
new file mode 100644
index 000000000000..2ce420851e77
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include "en.h"
+#include "monitor_stats.h"
+#include "lib/eq.h"
+
+/* Driver will set the following watch counters list:
+ * Ppcnt.802_3:
+ * a_in_range_length_errors Type: 0x0, Counter: 0x0, group_id = N/A
+ * a_out_of_range_length_field Type: 0x0, Counter: 0x1, group_id = N/A
+ * a_frame_too_long_errors Type: 0x0, Counter: 0x2, group_id = N/A
+ * a_frame_check_sequence_errors Type: 0x0, Counter: 0x3, group_id = N/A
+ * a_alignment_errors Type: 0x0, Counter: 0x4, group_id = N/A
+ * if_out_discards Type: 0x0, Counter: 0x5, group_id = N/A
+ * Q_Counters:
+ * Q[index].rx_out_of_buffer Type: 0x1, Counter: 0x4, group_id = counter_ix
+ */
+
+#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
+#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
+
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
+ return false;
+ if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
+ MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) <
+ NUM_REQ_PPCNT_COUNTER_S1)
+ return false;
+ if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) <
+ NUM_REQ_Q_COUNTERS_S1)
+ return false;
+ return true;
+}
+
+void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
+
+ MLX5_SET(arm_monitor_counter_in, in, opcode,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER);
+ mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5e_monitor_counters_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ monitor_counters_work);
+
+ mutex_lock(&priv->state_lock);
+ mlx5e_update_ndo_stats(priv);
+ mutex_unlock(&priv->state_lock);
+ mlx5e_monitor_counter_arm(priv);
+}
+
+static int mlx5e_monitor_event_handler(struct notifier_block *nb,
+ unsigned long event, void *eqe)
+{
+ struct mlx5e_priv *priv = mlx5_nb_cof(nb, struct mlx5e_priv,
+ monitor_counters_nb);
+ queue_work(priv->wq, &priv->monitor_counters_work);
+ return NOTIFY_OK;
+}
+
+void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
+{
+ MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
+ MONITOR_COUNTER);
+ mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
+}
+
+static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
+{
+ mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
+ cancel_work_sync(&priv->monitor_counters_work);
+}
+
+static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
+{
+ enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
+
+ for (ppcnt_cnt = 0;
+ ppcnt_cnt < NUM_REQ_PPCNT_COUNTER_S1;
+ ppcnt_cnt++, cnt++) {
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].type,
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT);
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].counter,
+ ppcnt_cnt);
+ }
+ return ppcnt_cnt;
+}
+
+static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
+{
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].type,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER);
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].counter,
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER);
+ MLX5_SET(set_monitor_counter_in, in,
+ monitor_counter[cnt].counter_group_id,
+ q_counter);
+ return 1;
+}
+
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
+ int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
+ int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
+ MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+ int q_counter = priv->q_counter;
+ int cnt = 0;
+
+ if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
+ max_num_of_counters >= (NUM_REQ_PPCNT_COUNTER_S1 + cnt))
+ cnt += fill_monitor_counter_ppcnt_set1(cnt, in);
+
+ if (num_q_counters >= NUM_REQ_Q_COUNTERS_S1 &&
+ max_num_of_counters >= (NUM_REQ_Q_COUNTERS_S1 + cnt) &&
+ q_counter)
+ cnt += fill_monitor_counter_q_counter_set1(cnt, q_counter, in);
+
+ MLX5_SET(set_monitor_counter_in, in, num_of_counters, cnt);
+ MLX5_SET(set_monitor_counter_in, in, opcode,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
+{
+ INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
+ mlx5e_monitor_counter_start(priv);
+ mlx5e_set_monitor_counter(priv);
+ mlx5e_monitor_counter_arm(priv);
+ queue_work(priv->wq, &priv->update_stats_work);
+}
+
+static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
+
+ MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
+ MLX5_SET(set_monitor_counter_in, in, opcode,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER);
+
+ mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+/* check if mlx5e_monitor_counter_supported before calling this function*/
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
+{
+ mlx5e_monitor_counter_disable(priv);
+ mlx5e_monitor_counter_stop(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
new file mode 100644
index 000000000000..e1ac4b3d22fb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_MONITOR_H__
+#define __MLX5_MONITOR_H__
+
+int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
+void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
+void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
+void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
+
+#endif /* __MLX5_MONITOR_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
new file mode 100644
index 000000000000..046948ead152
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#include <net/vxlan.h>
+#include <net/gre.h>
+#include "lib/vxlan.h"
+#include "en/tc_tun.h"
+
+static int get_route_and_out_devs(struct mlx5e_priv *priv,
+ struct net_device *dev,
+ struct net_device **route_dev,
+ struct net_device **out_dev)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_dev, *uplink_upper;
+ bool dst_is_lag_dev;
+
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ dst_is_lag_dev = (uplink_upper &&
+ netif_is_lag_master(uplink_upper) &&
+ dev == uplink_upper &&
+ mlx5_lag_is_sriov(priv->mdev));
+
+ /* if the egress device isn't on the same HW e-switch or
+ * it's a LAG device, use the uplink
+ */
+ if (!switchdev_port_same_parent_id(priv->netdev, dev) ||
+ dst_is_lag_dev) {
+ *route_dev = uplink_dev;
+ *out_dev = *route_dev;
+ } else {
+ *route_dev = dev;
+ if (is_vlan_dev(*route_dev))
+ *out_dev = uplink_dev;
+ else if (mlx5e_eswitch_rep(dev))
+ *out_dev = *route_dev;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi4 *fl4,
+ struct neighbour **out_n,
+ u8 *out_ttl)
+{
+ struct rtable *rt;
+ struct neighbour *n = NULL;
+
+#if IS_ENABLED(CONFIG_INET)
+ int ret;
+
+ rt = ip_route_output_key(dev_net(mirred_dev), fl4);
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
+ if (ret < 0)
+ return ret;
+
+ if (!(*out_ttl))
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
+ n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
+ ip_rt_put(rt);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
+static const char *mlx5e_netdev_kind(struct net_device *dev)
+{
+ if (dev->rtnl_link_ops)
+ return dev->rtnl_link_ops->kind;
+ else
+ return "";
+}
+
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct net_device **route_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ u8 *out_ttl)
+{
+ struct neighbour *n = NULL;
+ struct dst_entry *dst;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ int ret;
+
+ ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+ fl6);
+ if (ret < 0)
+ return ret;
+
+ if (!(*out_ttl))
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
+ if (ret < 0)
+ return ret;
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
+static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key)
+{
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ struct udphdr *udp = (struct udphdr *)(buf);
+ struct vxlanhdr *vxh = (struct vxlanhdr *)
+ ((char *)udp + sizeof(struct udphdr));
+
+ udp->dest = tun_key->tp_dst;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(tun_id);
+
+ return 0;
+}
+
+static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key)
+{
+ __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ int hdr_len;
+ struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
+
+ /* the HW does not calculate GRE csum or sequences */
+ if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ return -EOPNOTSUPP;
+
+ greh->protocol = htons(ETH_P_TEB);
+
+ /* GRE key */
+ hdr_len = gre_calc_hlen(tun_key->tun_flags);
+ greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
+ if (tun_key->tun_flags & TUNNEL_KEY) {
+ __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
+
+ *ptr = tun_id;
+ }
+
+ return 0;
+}
+
+static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto,
+ struct mlx5e_encap_entry *e)
+{
+ int err = 0;
+ struct ip_tunnel_key *key = &e->tun_info.key;
+
+ if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ *ip_proto = IPPROTO_UDP;
+ err = mlx5e_gen_vxlan_header(buf, key);
+ } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ *ip_proto = IPPROTO_GRE;
+ err = mlx5e_gen_gre_header(buf, key);
+ } else {
+ pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n"
+ , e->tunnel_type);
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev,
+ struct mlx5e_encap_entry *e,
+ u16 proto)
+{
+ struct ethhdr *eth = (struct ethhdr *)buf;
+ char *ip;
+
+ ether_addr_copy(eth->h_dest, e->h_dest);
+ ether_addr_copy(eth->h_source, dev->dev_addr);
+ if (is_vlan_dev(dev)) {
+ struct vlan_hdr *vlan = (struct vlan_hdr *)
+ ((char *)eth + ETH_HLEN);
+ ip = (char *)vlan + VLAN_HLEN;
+ eth->h_proto = vlan_dev_vlan_proto(dev);
+ vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev));
+ vlan->h_vlan_encapsulated_proto = htons(proto);
+ } else {
+ eth->h_proto = htons(proto);
+ ip = (char *)eth + ETH_HLEN;
+ }
+
+ return ip;
+}
+
+int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev, *route_dev;
+ struct neighbour *n = NULL;
+ struct flowi4 fl4 = {};
+ int ipv4_encap_size;
+ char *encap_header;
+ u8 nud_state, ttl;
+ struct iphdr *ip;
+ int err;
+
+ /* add the IP fields */
+ fl4.flowi4_tos = tun_key->tos;
+ fl4.daddr = tun_key->u.ipv4.dst;
+ fl4.saddr = tun_key->u.ipv4.src;
+ ttl = tun_key->ttl;
+
+ err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev,
+ &fl4, &n, &ttl);
+ if (err)
+ return err;
+
+ ipv4_encap_size =
+ (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct iphdr) +
+ e->tunnel_hlen;
+
+ if (max_encap_size < ipv4_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv4_encap_size, max_encap_size);
+ return -EOPNOTSUPP;
+ }
+
+ encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's important to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
+ goto free_encap;
+
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
+
+ /* add ethernet header */
+ ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ETH_P_IP);
+
+ /* add ip header */
+ ip->tos = tun_key->tos;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+ ip->ttl = ttl;
+ ip->daddr = fl4.daddr;
+ ip->saddr = fl4.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
+ &ip->protocol, e);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->encap_size = ipv4_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
+free_encap:
+ kfree(encap_header);
+out:
+ if (n)
+ neigh_release(n);
+ return err;
+}
+
+int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ struct net_device *out_dev, *route_dev;
+ struct neighbour *n = NULL;
+ struct flowi6 fl6 = {};
+ struct ipv6hdr *ip6h;
+ int ipv6_encap_size;
+ char *encap_header;
+ u8 nud_state, ttl;
+ int err;
+
+ ttl = tun_key->ttl;
+
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ fl6.daddr = tun_key->u.ipv6.dst;
+ fl6.saddr = tun_key->u.ipv6.src;
+
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev,
+ &fl6, &n, &ttl);
+ if (err)
+ return err;
+
+ ipv6_encap_size =
+ (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct ipv6hdr) +
+ e->tunnel_hlen;
+
+ if (max_encap_size < ipv6_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv6_encap_size, max_encap_size);
+ return -EOPNOTSUPP;
+ }
+
+ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ /* used by mlx5e_detach_encap to lookup a neigh hash table
+ * entry in the neigh hash table when a user deletes a rule
+ */
+ e->m_neigh.dev = n->dev;
+ e->m_neigh.family = n->ops->family;
+ memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
+ e->out_dev = out_dev;
+
+ /* It's importent to add the neigh to the hash table before checking
+ * the neigh validity state. So if we'll get a notification, in case the
+ * neigh changes it's validity state, we would find the relevant neigh
+ * in the hash.
+ */
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ if (err)
+ goto free_encap;
+
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ ether_addr_copy(e->h_dest, n->ha);
+ read_unlock_bh(&n->lock);
+
+ /* add ethernet header */
+ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ETH_P_IPV6);
+
+ /* add ip header */
+ ip6_flow_hdr(ip6h, tun_key->tos, 0);
+ /* the HW fills up ipv6 payload len */
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = fl6.daddr;
+ ip6h->saddr = fl6.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
+ &ip6h->nexthdr, e);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->encap_size = ipv6_encap_size;
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ err = -EAGAIN;
+ goto out;
+ }
+
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB,
+ &e->encap_id);
+ if (err)
+ goto destroy_neigh_entry;
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
+ neigh_release(n);
+ return err;
+
+destroy_neigh_entry:
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
+free_encap:
+ kfree(encap_header);
+out:
+ if (n)
+ neigh_release(n);
+ return err;
+}
+
+int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return MLX5E_TC_TUNNEL_TYPE_VXLAN;
+ else if (netif_is_gretap(tunnel_dev) ||
+ netif_is_ip6gretap(tunnel_dev))
+ return MLX5E_TC_TUNNEL_TYPE_GRETAP;
+ else
+ return MLX5E_TC_TUNNEL_TYPE_UNKNOWN;
+}
+
+bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
+ struct net_device *netdev)
+{
+ int tunnel_type = mlx5e_tc_tun_get_type(netdev);
+
+ if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN &&
+ MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
+ return true;
+ else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP &&
+ MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap))
+ return true;
+ else
+ return false;
+}
+
+int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev);
+
+ if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ int dst_port = be16_to_cpu(e->tun_info.key.tp_dst);
+
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vxlan udp dport was not registered with the HW");
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n",
+ dst_port);
+ return -EOPNOTSUPP;
+ }
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
+ e->tunnel_hlen = VXLAN_HLEN;
+ } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE;
+ e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags);
+ } else {
+ e->reformat_type = -1;
+ e->tunnel_hlen = -1;
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_PORTS,
+ f->mask);
+ void *misc_c = MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ misc_parameters);
+
+ /* Full udp dst port must be given */
+ if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) ||
+ memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VXLAN decap filter must include enc_dst_port condition");
+ netdev_warn(priv->netdev,
+ "VXLAN decap filter must include enc_dst_port condition\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* udp dst port must be knonwn as a VXLAN port */
+ if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matched UDP port is not registered as a VXLAN port");
+ netdev_warn(priv->netdev,
+ "UDP port %d is not registered as a VXLAN port\n",
+ be16_to_cpu(key->dst));
+ return -EOPNOTSUPP;
+ }
+
+ /* dst UDP port is valid here */
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src));
+
+ /* match on VNI */
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->key);
+ struct flow_dissector_key_keyid *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+ MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+ be32_to_cpu(mask->keyid));
+ MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+ be32_to_cpu(key->keyid));
+ }
+ return 0;
+}
+
+static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *outer_headers_c,
+ void *outer_headers_v)
+{
+ void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "GRE HW offloading is not supported");
+ netdev_warn(priv->netdev, "GRE HW offloading is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ip_protocol, IPPROTO_GRE);
+
+ /* gre protocol*/
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB);
+
+ /* gre key */
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *mask = NULL;
+ struct flow_dissector_key_keyid *key = NULL;
+
+ mask = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+ MLX5_SET(fte_match_set_misc, misc_c,
+ gre_key.key, be32_to_cpu(mask->keyid));
+
+ key = skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->key);
+ MLX5_SET(fte_match_set_misc, misc_v,
+ gre_key.key, be32_to_cpu(key->keyid));
+ }
+
+ return 0;
+}
+
+int mlx5e_tc_tun_parse(struct net_device *filter_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v)
+{
+ int tunnel_type;
+ int err = 0;
+
+ tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
+ if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
+ headers_c, headers_v);
+ } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
+ err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
+ headers_c, headers_v);
+ } else {
+ netdev_warn(priv->netdev,
+ "decapsulation offload is not supported for %s net device (%d)\n",
+ mlx5e_netdev_kind(filter_dev), tunnel_type);
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
new file mode 100644
index 000000000000..706ce7bf15e7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_TUNNEL_H__
+#define __MLX5_EN_TC_TUNNEL_H__
+
+#include <linux/netdevice.h>
+#include <linux/mlx5/fs.h>
+#include <net/pkt_cls.h>
+#include <linux/netlink.h>
+#include "en.h"
+#include "en_rep.h"
+
+enum {
+ MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
+ MLX5E_TC_TUNNEL_TYPE_VXLAN,
+ MLX5E_TC_TUNNEL_TYPE_GRETAP
+};
+
+int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack);
+
+int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
+
+int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
+
+int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev);
+bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
+ struct net_device *netdev);
+
+int mlx5e_tc_tun_parse(struct net_device *filter_dev,
+ struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct tc_cls_flower_offload *f,
+ void *headers_c,
+ void *headers_v);
+
+#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ad6d471d00dd..3740177eed09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -47,7 +47,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
xdpi.xdpf->len, PCI_DMA_TODEVICE);
xdpi.di = *di;
- return mlx5e_xmit_xdp_frame(sq, &xdpi);
+ return sq->xmit_xdp_frame(sq, &xdpi);
}
/* returns true if packet was consumed by xdp */
@@ -102,7 +102,98 @@ xdp_abort:
}
}
-bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
+static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u8 wqebbs;
+ u16 pi;
+
+ mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
+
+ prefetchw(session->wqe->data);
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+ wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi),
+ MLX5E_XDP_MPW_MAX_WQEBBS);
+
+ session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+}
+
+static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
+ u16 ds_count = session->ds_count;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ cseg->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
+
+ wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
+ wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT;
+
+ sq->pc += wi->num_wqebbs;
+
+ sq->doorbell_cseg = cseg;
+
+ session->wqe = NULL; /* Close session */
+}
+
+static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_info *xdpi)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
+
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ struct xdp_frame *xdpf = xdpi->xdpf;
+ unsigned int dma_len = xdpf->len;
+
+ if (unlikely(sq->hw_mtu < dma_len)) {
+ stats->err++;
+ return false;
+ }
+
+ if (unlikely(!session->wqe)) {
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
+ MLX5_SEND_WQE_MAX_WQEBBS))) {
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
+ stats->full++;
+ return false;
+ }
+
+ mlx5e_xdp_mpwqe_session_start(sq);
+ }
+
+ mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len);
+
+ if (unlikely(session->ds_count == session->max_ds_count))
+ mlx5e_xdp_mpwqe_complete(sq);
+
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
+ stats->xmit++;
+ return true;
+}
+
+static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -126,11 +217,8 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
}
if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) {
- if (sq->doorbell) {
- /* SQ is full, ring doorbell */
- mlx5e_xmit_xdp_doorbell(sq);
- sq->doorbell = false;
- }
+ /* SQ is full, ring doorbell */
+ mlx5e_xmit_xdp_doorbell(sq);
stats->full++;
return false;
}
@@ -152,23 +240,20 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
- /* move page to reference to sq responsibility,
- * and mark so it's not put back in page-cache.
- */
- sq->db.xdpi[pi] = *xdpi;
sq->pc++;
- sq->doorbell = true;
+ sq->doorbell_cseg = cseg;
+ mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
+ struct mlx5e_xdp_info_fifo *xdpi_fifo;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
- struct mlx5e_rq *rq;
bool is_redirect;
u16 sqcc;
int i;
@@ -182,8 +267,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
if (!cqe)
return false;
- is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
- rq = container_of(sq, struct mlx5e_rq, xdpsq);
+ is_redirect = !rq;
+ xdpi_fifo = &sq->db.xdpi_fifo;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
@@ -199,20 +284,33 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
+ netdev_WARN_ONCE(sq->channel->netdev,
+ "Bad OP in XDPSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
do {
- u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
- struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 ci, j;
last_wqe = (sqcc == wqe_counter);
- sqcc++;
-
- if (is_redirect) {
- xdp_return_frame(xdpi->xdpf);
- dma_unmap_single(sq->pdev, xdpi->dma_addr,
- xdpi->xdpf->len, DMA_TO_DEVICE);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi->di, true);
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.wqe_info[ci];
+
+ sqcc += wi->num_wqebbs;
+
+ for (j = 0; j < wi->num_ds; j++) {
+ struct mlx5e_xdp_info xdpi =
+ mlx5e_xdpi_fifo_pop(xdpi_fifo);
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi.xdpf);
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi.di, true);
+ }
}
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
@@ -228,27 +326,32 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
- struct mlx5e_rq *rq;
- bool is_redirect;
-
- is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
- rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq);
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ bool is_redirect = !rq;
while (sq->cc != sq->pc) {
- u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
- struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
-
- sq->cc++;
-
- if (is_redirect) {
- xdp_return_frame(xdpi->xdpf);
- dma_unmap_single(sq->pdev, xdpi->dma_addr,
- xdpi->xdpf->len, DMA_TO_DEVICE);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi->di, false);
+ struct mlx5e_xdp_wqe_info *wi;
+ u16 ci, i;
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+ wi = &sq->db.wqe_info[ci];
+
+ sq->cc += wi->num_wqebbs;
+
+ for (i = 0; i < wi->num_ds; i++) {
+ struct mlx5e_xdp_info xdpi =
+ mlx5e_xdpi_fifo_pop(xdpi_fifo);
+
+ if (is_redirect) {
+ xdp_return_frame(xdpi.xdpf);
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, DMA_TO_DEVICE);
+ } else {
+ /* Recycle RX page */
+ mlx5e_page_release(rq, &xdpi.di, false);
+ }
}
}
}
@@ -292,7 +395,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.xdpf = xdpf;
- if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) {
+ if (unlikely(!sq->xmit_xdp_frame(sq, &xdpi))) {
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpf->len, DMA_TO_DEVICE);
xdp_return_frame_rx_napi(xdpf);
@@ -300,8 +403,33 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
}
- if (flags & XDP_XMIT_FLUSH)
+ if (flags & XDP_XMIT_FLUSH) {
+ if (sq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xmit_xdp_doorbell(sq);
+ }
return n - drops;
}
+
+void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
+{
+ struct mlx5e_xdpsq *xdpsq = &rq->xdpsq;
+
+ if (xdpsq->mpwqe.wqe)
+ mlx5e_xdp_mpwqe_complete(xdpsq);
+
+ mlx5e_xmit_xdp_doorbell(xdpsq);
+
+ if (xdpsq->redirect_flush) {
+ xdp_do_flush_map();
+ xdpsq->redirect_flush = false;
+ }
+}
+
+void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
+{
+ sq->xmit_xdp_frame = is_mpw ?
+ mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 6dfab045925f..3a67cb3cd179 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -37,27 +37,62 @@
#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_DS_COUNT \
- ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
+ (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len);
-bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
-void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
-
-bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
+bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
+void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq);
+void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw);
+void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
+ if (sq->doorbell_cseg) {
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
+ sq->doorbell_cseg = NULL;
+ }
+}
+
+static inline void
+mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len)
+{
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_data_seg *dseg =
+ (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->byte_count = cpu_to_be32(dma_len);
+ dseg->lkey = sq->mkey_be;
+}
+
+static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
+ struct mlx5e_tx_wqe **wqe)
+{
struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *wqe;
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ memset(*wqe, 0, sizeof(**wqe));
+}
- wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+static inline void
+mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
+ struct mlx5e_xdp_info *xi)
+{
+ u32 i = (*fifo->pc)++ & fifo->mask;
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl);
+ fifo->xi[i] = *xi;
+}
+
+static inline struct mlx5e_xdp_info
+mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
+{
+ return fifo->xi[(*fifo->cc)++ & fifo->mask];
}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 128a82b1dbfc..53608afd39b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -254,11 +254,13 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
struct mlx5e_ipsec_metadata *mdata;
struct mlx5e_ipsec_sa_entry *sa_entry;
struct xfrm_state *x;
+ struct sec_path *sp;
if (!xo)
return skb;
- if (unlikely(skb->sp->len != 1)) {
+ sp = skb_sec_path(skb);
+ if (unlikely(sp->len != 1)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
goto drop;
}
@@ -305,10 +307,11 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct xfrm_offload *xo;
struct xfrm_state *xs;
+ struct sec_path *sp;
u32 sa_handle;
- skb->sp = secpath_dup(skb->sp);
- if (unlikely(!skb->sp)) {
+ sp = secpath_set(skb);
+ if (unlikely(!sp)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
return NULL;
}
@@ -320,8 +323,9 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
return NULL;
}
- skb->sp->xvec[skb->sp->len++] = xs;
- skb->sp->olen++;
+ sp = skb_sec_path(skb);
+ sp->xvec[sp->len++] = xs;
+ sp->olen++;
xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE;
@@ -372,10 +376,11 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
netdev_features_t features)
{
+ struct sec_path *sp = skb_sec_path(skb);
struct xfrm_state *x;
- if (skb->sp && skb->sp->len) {
- x = skb->sp->xvec[0];
+ if (sp && sp->len) {
+ x = sp->xvec[0];
if (x && x->xso.offload_handle)
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 25c1c4f96841..c9df08133718 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -135,14 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
-static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
- "rx_cqe_moder",
- "tx_cqe_moder",
- "rx_cqe_compress",
- "rx_striding_rq",
- "rx_no_csum_complete",
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
+
+struct pflag_desc {
+ char name[ETH_GSTRING_LEN];
+ mlx5e_pflag_handler handler;
};
+static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS];
+
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
int i, num_stats = 0;
@@ -153,7 +154,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
return num_stats;
case ETH_SS_PRIV_FLAGS:
- return ARRAY_SIZE(mlx5e_priv_flags);
+ return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
/* fallthrough */
@@ -183,8 +184,9 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
+ for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
+ strcpy(data + i * ETH_GSTRING_LEN,
+ mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
@@ -353,7 +355,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
new_channels.params = priv->channels.params;
new_channels.params.num_channels = count;
if (!netif_is_rxfh_configured(priv->netdev))
- mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
@@ -785,10 +787,9 @@ static void get_lp_advertising(u32 eth_proto_lp,
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
}
-static int mlx5e_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *link_ksettings)
+int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
u32 rx_pause = 0;
@@ -804,7 +805,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
if (err) {
- netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port ptys failed: %d\n",
__func__, err);
goto err_query_regs;
}
@@ -824,7 +825,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
get_supported(eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings);
- get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
+ get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -844,7 +845,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
Autoneg);
if (get_fec_supported_advertised(mdev, link_ksettings))
- netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
+ netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n",
__func__, err);
if (!an_disable_admin)
@@ -855,6 +856,14 @@ err_query_regs:
return err;
}
+static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -869,10 +878,9 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
return ptys_modes;
}
-static int mlx5e_set_link_ksettings(struct net_device *netdev,
- const struct ethtool_link_ksettings *link_ksettings)
+int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
u32 eth_proto_cap, eth_proto_admin;
bool an_changes = false;
@@ -892,14 +900,14 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
if (err) {
- netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n",
__func__, err);
goto out;
}
link_modes = link_modes & eth_proto_cap;
if (!link_modes) {
- netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ netdev_err(priv->netdev, "%s: Not supported link mode(s) requested",
__func__);
err = -EINVAL;
goto out;
@@ -907,7 +915,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
if (err) {
- netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n",
__func__, err);
goto out;
}
@@ -929,9 +937,17 @@ out:
return err;
}
+static int mlx5e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
+}
+
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
- return sizeof(priv->channels.params.toeplitz_hash_key);
+ return sizeof(priv->rss_params.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
@@ -957,50 +973,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
if (indir)
- memcpy(indir, priv->channels.params.indirection_rqt,
- sizeof(priv->channels.params.indirection_rqt));
+ memcpy(indir, rss->indirection_rqt,
+ sizeof(rss->indirection_rqt));
if (key)
- memcpy(key, priv->channels.params.toeplitz_hash_key,
- sizeof(priv->channels.params.toeplitz_hash_key));
+ memcpy(key, rss->toeplitz_hash_key,
+ sizeof(rss->toeplitz_hash_key));
if (hfunc)
- *hfunc = priv->channels.params.rss_hfunc;
+ *hfunc = rss->hfunc;
return 0;
}
-static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
-{
- void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- struct mlx5_core_dev *mdev = priv->mdev;
- int ctxlen = MLX5_ST_SZ_BYTES(tirc);
- int tt;
-
- MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
- mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
- }
-
- if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
- return;
-
- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- memset(tirc, 0, ctxlen);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
- mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, inlen);
- }
-}
-
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
bool hash_changed = false;
void *in;
@@ -1016,15 +1009,14 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
- if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
- hfunc != priv->channels.params.rss_hfunc) {
- priv->channels.params.rss_hfunc = hfunc;
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) {
+ rss->hfunc = hfunc;
hash_changed = true;
}
if (indir) {
- memcpy(priv->channels.params.indirection_rqt, indir,
- sizeof(priv->channels.params.indirection_rqt));
+ memcpy(rss->indirection_rqt, indir,
+ sizeof(rss->indirection_rqt));
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
u32 rqtn = priv->indir_rqt.rqtn;
@@ -1032,7 +1024,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
.is_rss = true,
{
.rss = {
- .hfunc = priv->channels.params.rss_hfunc,
+ .hfunc = rss->hfunc,
.channels = &priv->channels,
},
},
@@ -1043,10 +1035,9 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
}
if (key) {
- memcpy(priv->channels.params.toeplitz_hash_key, key,
- sizeof(priv->channels.params.toeplitz_hash_key));
- hash_changed = hash_changed ||
- priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP;
+ memcpy(rss->toeplitz_hash_key, key,
+ sizeof(rss->toeplitz_hash_key));
+ hash_changed = hash_changed || rss->hfunc == ETH_RSS_HASH_TOP;
}
if (hash_changed)
@@ -1150,25 +1141,31 @@ static int mlx5e_set_tunable(struct net_device *dev,
return err;
}
-static void mlx5e_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
+void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int err;
err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
&pauseparam->tx_pause);
if (err) {
- netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+ netdev_err(priv->netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
__func__, err);
}
}
-static int mlx5e_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pauseparam)
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_ethtool_get_pauseparam(priv, pauseparam);
+}
+
+int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1179,22 +1176,25 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
pauseparam->rx_pause ? 1 : 0,
pauseparam->tx_pause ? 1 : 0);
if (err) {
- netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+ netdev_err(priv->netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
__func__, err);
}
return err;
}
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
+}
+
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
- int ret;
-
- ret = ethtool_op_get_ts_info(priv->netdev, info);
- if (ret)
- return ret;
info->phc_index = mlx5_clock_get_ptp_index(mdev);
@@ -1202,9 +1202,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
info->phc_index == -1)
return 0;
- info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) |
BIT(HWTSTAMP_TX_ON);
@@ -1510,8 +1510,6 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
return 0;
}
-typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
-
static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
bool is_rx_cq)
{
@@ -1674,23 +1672,58 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0;
}
+static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channels new_channels = {};
+ int err;
+
+ if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ return -EOPNOTSUPP;
+
+ new_channels.params = priv->channels.params;
+
+ MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ return 0;
+ }
+
+ err = mlx5e_open_channels(priv, &new_channels);
+ if (err)
+ return err;
+
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+ return 0;
+}
+
+static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
+ { "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
+ { "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
+ { "rx_cqe_compress", set_pflag_rx_cqe_compress },
+ { "rx_striding_rq", set_pflag_rx_striding_rq },
+ { "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
+ { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
+};
+
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
- enum mlx5e_priv_flag flag,
- mlx5e_pflag_handler pflag_handler)
+ enum mlx5e_priv_flag flag)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- bool enable = !!(wanted_flags & flag);
+ bool enable = !!(wanted_flags & BIT(flag));
u32 changes = wanted_flags ^ priv->channels.params.pflags;
int err;
- if (!(changes & flag))
+ if (!(changes & BIT(flag)))
return 0;
- err = pflag_handler(netdev, enable);
+ err = mlx5e_priv_flags[flag].handler(netdev, enable);
if (err) {
- netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
- enable ? "Enable" : "Disable", flag, err);
+ netdev_err(netdev, "%s private flag '%s' failed err %d\n",
+ enable ? "Enable" : "Disable", mlx5e_priv_flags[flag].name, err);
return err;
}
@@ -1701,38 +1734,17 @@ static int mlx5e_handle_pflag(struct net_device *netdev,
static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ enum mlx5e_priv_flag pflag;
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_CQE_BASED_MODER,
- set_pflag_rx_cqe_based_moder);
- if (err)
- goto out;
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_TX_CQE_BASED_MODER,
- set_pflag_tx_cqe_based_moder);
- if (err)
- goto out;
-
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_CQE_COMPRESS,
- set_pflag_rx_cqe_compress);
- if (err)
- goto out;
-
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_STRIDING_RQ,
- set_pflag_rx_striding_rq);
- if (err)
- goto out;
-
- err = mlx5e_handle_pflag(netdev, pflags,
- MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
- set_pflag_rx_no_csum_complete);
+ for (pflag = 0; pflag < MLX5E_NUM_PFLAGS; pflag++) {
+ err = mlx5e_handle_pflag(netdev, pflags, pflag);
+ if (err)
+ break;
+ }
-out:
mutex_unlock(&priv->state_lock);
/* Need to fix some features.. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index c18dcebe1462..4421c10f58ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -771,6 +771,112 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
}
+static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type)
+{
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ return MLX5E_TT_IPV4_TCP;
+ case TCP_V6_FLOW:
+ return MLX5E_TT_IPV6_TCP;
+ case UDP_V4_FLOW:
+ return MLX5E_TT_IPV4_UDP;
+ case UDP_V6_FLOW:
+ return MLX5E_TT_IPV6_UDP;
+ case AH_V4_FLOW:
+ return MLX5E_TT_IPV4_IPSEC_AH;
+ case AH_V6_FLOW:
+ return MLX5E_TT_IPV6_IPSEC_AH;
+ case ESP_V4_FLOW:
+ return MLX5E_TT_IPV4_IPSEC_ESP;
+ case ESP_V6_FLOW:
+ return MLX5E_TT_IPV6_IPSEC_ESP;
+ case IPV4_FLOW:
+ return MLX5E_TT_IPV4;
+ case IPV6_FLOW:
+ return MLX5E_TT_IPV6;
+ default:
+ return MLX5E_NUM_INDIR_TIRS;
+ }
+}
+
+static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ enum mlx5e_traffic_types tt;
+ u8 rx_hash_field = 0;
+ void *in;
+
+ tt = flow_type_to_traffic_type(nfc->flow_type);
+ if (tt == MLX5E_NUM_INDIR_TIRS)
+ return -EINVAL;
+
+ /* RSS does not support anything other than hashing to queues
+ * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
+ * port.
+ */
+ if (nfc->flow_type != TCP_V4_FLOW &&
+ nfc->flow_type != TCP_V6_FLOW &&
+ nfc->flow_type != UDP_V4_FLOW &&
+ nfc->flow_type != UDP_V6_FLOW)
+ return -EOPNOTSUPP;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EOPNOTSUPP;
+
+ if (nfc->data & RXH_IP_SRC)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
+ if (nfc->data & RXH_IP_DST)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
+ if (nfc->data & RXH_L4_B_0_1)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+ if (nfc->data & RXH_L4_B_2_3)
+ rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mutex_lock(&priv->state_lock);
+
+ if (rx_hash_field == priv->rss_params.rx_hash_fields[tt])
+ goto out;
+
+ priv->rss_params.rx_hash_fields[tt] = rx_hash_field;
+ mlx5e_modify_tirs_hash(priv, in, inlen);
+
+out:
+ mutex_unlock(&priv->state_lock);
+ kvfree(in);
+ return 0;
+}
+
+static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *nfc)
+{
+ enum mlx5e_traffic_types tt;
+ u32 hash_field = 0;
+
+ tt = flow_type_to_traffic_type(nfc->flow_type);
+ if (tt == MLX5E_NUM_INDIR_TIRS)
+ return -EINVAL;
+
+ hash_field = priv->rss_params.rx_hash_fields[tt];
+ nfc->data = 0;
+
+ if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
+ nfc->data |= RXH_IP_SRC;
+ if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
+ nfc->data |= RXH_IP_DST;
+ if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
+ nfc->data |= RXH_L4_B_2_3;
+
+ return 0;
+}
+
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int err = 0;
@@ -783,6 +889,9 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
+ case ETHTOOL_SRXFH:
+ err = mlx5e_set_rss_hash_opt(priv, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -810,6 +919,9 @@ int mlx5e_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
+ case ETHTOOL_GRXFH:
+ err = mlx5e_get_rss_hash_opt(priv, info);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 871313d6b34d..8cfd2ec7c0a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -49,6 +49,8 @@
#include "lib/clock.h"
#include "en/port.h"
#include "en/xdp.h"
+#include "lib/eq.h"
+#include "en/monitor_stats.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -59,6 +61,7 @@ struct mlx5e_rq_param {
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
+ bool is_mpw;
};
struct mlx5e_cq_param {
@@ -128,6 +131,8 @@ static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
return !params->lro_en && frag_sz <= PAGE_SIZE;
}
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+ MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
@@ -138,6 +143,9 @@ static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
if (!mlx5e_rx_is_linear_skb(mdev, params))
return false;
+ if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ return false;
+
if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
return true;
@@ -223,7 +231,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
MLX5_WQ_TYPE_CYCLIC;
}
-static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
@@ -262,7 +270,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv);
}
-static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
+void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{
int i;
@@ -293,33 +301,35 @@ void mlx5e_queue_update_stats(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->update_stats_work);
}
-static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
- enum mlx5_dev_event event, unsigned long param)
+static int async_event(struct notifier_block *nb, unsigned long event, void *data)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
+ struct mlx5_eqe *eqe = data;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
- return;
+ if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
+ return NOTIFY_DONE;
- switch (event) {
- case MLX5_DEV_EVENT_PORT_UP:
- case MLX5_DEV_EVENT_PORT_DOWN:
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
queue_work(priv->wq, &priv->update_carrier_work);
break;
default:
- break;
+ return NOTIFY_DONE;
}
+
+ return NOTIFY_OK;
}
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
+ priv->events_nb.notifier_call = async_event;
+ mlx5_notifier_register(priv->mdev, &priv->events_nb);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
- synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
+ mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
@@ -983,18 +993,42 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
- kvfree(sq->db.xdpi);
+ kvfree(sq->db.xdpi_fifo.xi);
+ kvfree(sq->db.wqe_info);
+}
+
+static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
+{
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
+ GFP_KERNEL, numa);
+ if (!xdpi_fifo->xi)
+ return -ENOMEM;
+
+ xdpi_fifo->pc = &sq->xdpi_fifo_pc;
+ xdpi_fifo->cc = &sq->xdpi_fifo_cc;
+ xdpi_fifo->mask = dsegs_per_wq - 1;
+
+ return 0;
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int err;
- sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
- GFP_KERNEL, numa);
- if (!sq->db.xdpi) {
- mlx5e_free_xdpsq_db(sq);
+ sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
+ GFP_KERNEL, numa);
+ if (!sq->db.wqe_info)
return -ENOMEM;
+
+ err = mlx5e_alloc_xdpsq_fifo(sq, numa);
+ if (err) {
+ mlx5e_free_xdpsq_db(sq);
+ return err;
}
return 0;
@@ -1396,6 +1430,7 @@ static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = c->mdev;
struct mlx5_rate_limit rl = {0};
+ cancel_work_sync(&sq->dim.work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
rl.rate = sq->rate_limit;
@@ -1552,11 +1587,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
struct mlx5e_xdpsq *sq,
bool is_redirect)
{
- unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
struct mlx5e_create_sq_param csp = {};
- unsigned int inline_hdr_sz = 0;
int err;
- int i;
err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect);
if (err)
@@ -1567,30 +1599,40 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- if (is_redirect)
- set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state);
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
if (err)
goto err_free_xdpsq;
- if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
- ds_cnt++;
- }
+ mlx5e_set_xmit_fp(sq, param->is_mpw);
+
+ if (!param->is_mpw) {
+ unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+ unsigned int inline_hdr_sz = 0;
+ int i;
+
+ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+ inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
+ ds_cnt++;
+ }
+
+ /* Pre initialize fixed WQE fields */
+ for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
+ struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i];
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
- /* Pre initialize fixed WQE fields */
- for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
- struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
- struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
- struct mlx5_wqe_data_seg *dseg;
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
- eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
+ dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
+ dseg->lkey = sq->mkey_be;
- dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
- dseg->lkey = sq->mkey_be;
+ wi->num_wqebbs = 1;
+ wi->num_ds = 1;
+ }
}
return 0;
@@ -1602,7 +1644,7 @@ err_free_xdpsq:
return err;
}
-static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
+static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
struct mlx5e_channel *c = sq->channel;
@@ -1610,7 +1652,7 @@ static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
napi_synchronize(&c->napi);
mlx5e_destroy_sq(c->mdev, sq->sqn);
- mlx5e_free_xdpsq_descs(sq);
+ mlx5e_free_xdpsq_descs(sq, rq);
mlx5e_free_xdpsq(sq);
}
@@ -1763,11 +1805,6 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
-static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
-{
- return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
-}
-
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1918,9 +1955,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
struct net_dim_cq_moder icocq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
- int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
unsigned int irq;
int err;
@@ -2003,7 +2040,7 @@ err_close_rq:
err_close_xdp_sq:
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
err_close_sqs:
mlx5e_close_sqs(c);
@@ -2056,10 +2093,10 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
static void mlx5e_close_channel(struct mlx5e_channel *c)
{
- mlx5e_close_xdpsq(&c->xdpsq);
+ mlx5e_close_xdpsq(&c->xdpsq, NULL);
mlx5e_close_rq(&c->rq);
if (c->xdp)
- mlx5e_close_xdpsq(&c->rq.xdpsq);
+ mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq);
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
napi_disable(&c->napi);
@@ -2226,6 +2263,8 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -2302,6 +2341,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
}
static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
@@ -2504,7 +2544,7 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz));
- ix = priv->channels.params.indirection_rqt[ix];
+ ix = priv->rss_params.indirection_rqt[ix];
rqn = rrp.rss.channels->c[ix]->rq.rqn;
} else {
rqn = rrp.rqn;
@@ -2587,7 +2627,7 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
{
.rss = {
.channels = chs,
- .hfunc = chs->params.rss_hfunc,
+ .hfunc = priv->rss_params.hfunc,
}
},
};
@@ -2607,6 +2647,54 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
mlx5e_redirect_rqts(priv, drop_rrp);
}
+static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
+ [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_TCP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = MLX5_L4_PROT_TYPE_UDP,
+ .rx_hash_fields = MLX5_HASH_IP_L4PORTS,
+ },
+ [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI,
+ },
+ [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+ [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6,
+ .l4_prot_type = 0,
+ .rx_hash_fields = MLX5_HASH_IP,
+ },
+};
+
+struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt)
+{
+ return tirc_default_config[tt];
+}
+
static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
{
if (!params->lro_en)
@@ -2622,116 +2710,68 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
}
-void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
- enum mlx5e_traffic_types tt,
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params,
+ const struct mlx5e_tirc_config *ttconfig,
void *tirc, bool inner)
{
void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
- MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
- if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
+ MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc));
+ if (rss_params->hfunc == ETH_RSS_HASH_TOP) {
void *rss_key = MLX5_ADDR_OF(tirc, tirc,
rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc,
rx_hash_toeplitz_key);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
- memcpy(rss_key, params->toeplitz_hash_key, len);
+ memcpy(rss_key, rss_params->toeplitz_hash_key, len);
}
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ ttconfig->l3_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ ttconfig->l4_prot_type);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ ttconfig->rx_hash_fields);
+}
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig,
+ enum mlx5e_traffic_types tt,
+ u32 rx_hash_fields)
+{
+ *ttconfig = tirc_default_config[tt];
+ ttconfig->rx_hash_fields = rx_hash_fields;
+}
- case MLX5E_TT_IPV6_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+{
+ void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+ struct mlx5e_tirc_config ttconfig;
+ int tt;
- case MLX5E_TT_IPV4_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+ MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
- case MLX5E_TT_IPV6_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_update_rx_hash_fields(&ttconfig, tt,
+ rss->rx_hash_fields[tt]);
+ mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ }
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
+ if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
+ return;
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- default:
- WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_update_rx_hash_fields(&ttconfig, tt,
+ rss->rx_hash_fields[tt]);
+ mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true);
+ mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in,
+ inlen);
}
}
@@ -2788,7 +2828,8 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
+ &tirc_default_config[tt], tirc, true);
}
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
@@ -2819,7 +2860,7 @@ static void mlx5e_query_mtu(struct mlx5_core_dev *mdev,
*mtu = MLX5E_HW2SW_MTU(params, hw_mtu);
}
-static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
+int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
{
struct mlx5e_params *params = &priv->channels.params;
struct net_device *netdev = priv->netdev;
@@ -2899,7 +2940,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_activate_channels(&priv->channels);
netif_tx_start_all_queues(priv->netdev);
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
+ if (mlx5e_is_vport_rep(priv))
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2910,7 +2951,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_redirect_rqts_to_drop(priv);
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
+ if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -3162,7 +3203,7 @@ err_close_tises:
return err;
}
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
+static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
@@ -3180,7 +3221,9 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
+ &tirc_default_config[tt], tirc, false);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
@@ -3385,11 +3428,14 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower, flags);
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower, flags);
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower, flags);
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
default:
return -EOPNOTSUPP;
}
@@ -3402,7 +3448,8 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
+ return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
+ MLX5E_TC_NIC_OFFLOAD);
default:
return -EOPNOTSUPP;
}
@@ -3445,7 +3492,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-static void
+void
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -3453,8 +3500,10 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- /* update HW stats in background for next time */
- mlx5e_queue_update_stats(priv);
+ if (!mlx5e_monitor_counter_supported(priv)) {
+ /* update HW stats in background for next time */
+ mlx5e_queue_update_stats(priv);
+ }
if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
@@ -3587,7 +3636,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- if (!enable && mlx5e_tc_num_filters(priv)) {
+ if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
@@ -3889,7 +3938,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3926,8 +3975,8 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
}
-static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+ int max_tx_rate)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3968,8 +4017,8 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
mlx5_ifla_link2vport(link_state));
}
-static int mlx5e_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+int mlx5e_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -3982,8 +4031,8 @@ static int mlx5e_get_vf_config(struct net_device *dev,
return 0;
}
-static int mlx5e_get_vf_stats(struct net_device *dev,
- int vf, struct ifla_vf_stats *vf_stats)
+int mlx5e_get_vf_stats(struct net_device *dev,
+ int vf, struct ifla_vf_stats *vf_stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -4044,8 +4093,7 @@ static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
queue_work(priv->wq, &vxlan_work->work);
}
-static void mlx5e_add_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4058,8 +4106,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
}
-static void mlx5e_del_vxlan_port(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4109,9 +4156,9 @@ out:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
-static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
- struct net_device *netdev,
- netdev_features_t features)
+netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4134,17 +4181,17 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
struct mlx5e_txqsq *sq)
{
- struct mlx5_eq *eq = sq->cq.mcq.eq;
+ struct mlx5_eq_comp *eq = sq->cq.mcq.eq;
u32 eqe_count;
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
- eq->eqn, eq->cons_index, eq->irqn);
+ eq->core.eqn, eq->core.cons_index, eq->core.irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count)
return false;
- netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
+ netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn);
sq->channel->stats->eq_rearm++;
return true;
}
@@ -4371,8 +4418,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
- .ndo_has_offload_stats = mlx5e_has_offload_stats,
- .ndo_get_offload_stats = mlx5e_get_offload_stats,
#endif
};
@@ -4518,15 +4563,23 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
mlx5e_init_rq_type_params(mdev, params);
}
-void mlx5e_build_rss_params(struct mlx5e_params *params)
+void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
+ u16 num_channels)
{
- params->rss_hfunc = ETH_RSS_HASH_XOR;
- netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(params->indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, params->num_channels);
+ enum mlx5e_traffic_types tt;
+
+ rss_params->hfunc = ETH_RSS_HASH_XOR;
+ netdev_rss_key_fill(rss_params->toeplitz_hash_key,
+ sizeof(rss_params->toeplitz_hash_key));
+ mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, num_channels);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
+ rss_params->rx_hash_fields[tt] =
+ tirc_default_config[tt].rx_hash_fields;
}
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
+ struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
u16 max_channels, u16 mtu)
{
@@ -4542,6 +4595,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ /* XDP SQ */
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
@@ -4575,7 +4632,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
- mlx5e_build_rss_params(params);
+ mlx5e_build_rss_params(rss_params, params->num_channels);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4590,12 +4647,6 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
-#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-static const struct switchdev_ops mlx5e_switchdev_ops = {
- .switchdev_port_attr_get = mlx5e_attr_get,
-};
-#endif
-
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -4705,12 +4756,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
-
-#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
- if (MLX5_ESWITCH_MANAGER(mdev))
- netdev->switchdev_ops = &mlx5e_switchdev_ops;
-#endif
-
mlx5e_ipsec_build_netdev(priv);
mlx5e_tls_build_netdev(priv);
}
@@ -4748,14 +4793,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rss_params *rss = &priv->rss_params;
int err;
err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
if (err)
return err;
- mlx5e_build_nic_params(mdev, &priv->channels.params,
- mlx5e_get_netdev_max_channels(netdev), netdev->mtu);
+ mlx5e_build_nic_params(mdev, rss, &priv->channels.params,
+ mlx5e_get_netdev_max_channels(netdev),
+ netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -4885,9 +4932,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
-
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
- mlx5e_register_vport_reps(priv);
+ if (mlx5e_monitor_counter_supported(priv))
+ mlx5e_monitor_counter_init(priv);
if (netdev->reg_state != NETREG_REGISTERED)
return;
@@ -4921,8 +4967,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
- if (MLX5_ESWITCH_MANAGER(priv->mdev))
- mlx5e_unregister_vport_reps(priv);
+ if (mlx5e_monitor_counter_supported(priv))
+ mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
@@ -4975,7 +5021,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
netif_carrier_off(netdev);
#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mdev->rmap;
+ netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
#endif
return 0;
@@ -5030,7 +5076,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (priv->channels.params.num_channels > max_nch) {
mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
priv->channels.params.num_channels = max_nch;
- mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_nch);
}
@@ -5119,7 +5165,6 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
static void *mlx5e_add(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
- void *rpriv = NULL;
void *priv;
int err;
int nch;
@@ -5129,20 +5174,18 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_ESWITCH_MANAGER(mdev)) {
- rpriv = mlx5e_alloc_nic_rep_priv(mdev);
- if (!rpriv) {
- mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
- return NULL;
- }
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
+ mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ mlx5e_rep_register_vport_reps(mdev);
+ return mdev;
}
#endif
nch = mlx5e_get_max_num_channels(mdev);
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv);
+ netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
- goto err_free_rpriv;
+ return NULL;
}
priv = netdev_priv(netdev);
@@ -5168,30 +5211,26 @@ err_detach:
mlx5e_detach(mdev, priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
-err_free_rpriv:
- kfree(rpriv);
return NULL;
}
static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
{
- struct mlx5e_priv *priv = vpriv;
- void *ppriv = priv->ppriv;
+ struct mlx5e_priv *priv;
+#ifdef CONFIG_MLX5_ESWITCH
+ if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
+ mlx5e_rep_unregister_vport_reps(mdev);
+ return;
+ }
+#endif
+ priv = vpriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_delete_app(priv);
#endif
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
- kfree(ppriv);
-}
-
-static void *mlx5e_get_netdev(void *vpriv)
-{
- struct mlx5e_priv *priv = vpriv;
-
- return priv->netdev;
}
static struct mlx5_interface mlx5e_interface = {
@@ -5199,9 +5238,7 @@ static struct mlx5_interface mlx5e_interface = {
.remove = mlx5e_remove,
.attach = mlx5e_attach,
.detach = mlx5e_detach,
- .event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
- .get_dev = mlx5e_get_netdev,
};
void mlx5e_init(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c3c657548824..96cc0c6a4014 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -42,13 +42,24 @@
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
+#include "en/tc_tun.h"
#include "fs_core.h"
-#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
- max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
+ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
+#define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+struct mlx5e_rep_indr_block_priv {
+ struct net_device *netdev;
+ struct mlx5e_rep_priv *rpriv;
+
+ struct list_head list;
+};
+
+static void mlx5e_rep_indr_unregister_block(struct net_device *netdev);
+
static void mlx5e_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
@@ -98,7 +109,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev,
}
}
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -121,6 +132,32 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
vport_stats->tx_bytes = vf_stats.rx_bytes;
}
+static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct rtnl_link_stats64 *vport_stats;
+
+ mlx5e_grp_802_3_update_stats(priv);
+
+ vport_stats = &priv->stats.vf_vport;
+
+ vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
+ vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
+ vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
+ vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
+}
+
+static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport == FDB_UPLINK_VPORT)
+ mlx5e_uplink_rep_update_hw_counters(priv);
+ else
+ mlx5e_vf_rep_update_hw_counters(priv);
+}
+
static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -256,6 +293,22 @@ static int mlx5e_rep_set_channels(struct net_device *dev,
return 0;
}
+static int mlx5e_rep_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_coalesce(priv, coal);
+}
+
+static int mlx5e_rep_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_coalesce(priv, coal);
+}
+
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -270,7 +323,55 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_ethtool_get_pauseparam(priv, pauseparam);
+}
+
+static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pauseparam)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
+}
+
+static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
+}
+
+static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
+}
+
+static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = {
+ .get_drvinfo = mlx5e_rep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_rep_get_strings,
+ .get_sset_count = mlx5e_rep_get_sset_count,
+ .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+ .get_ringparam = mlx5e_rep_get_ringparam,
+ .set_ringparam = mlx5e_rep_set_ringparam,
+ .get_channels = mlx5e_rep_get_channels,
+ .set_channels = mlx5e_rep_set_channels,
+ .get_coalesce = mlx5e_rep_get_coalesce,
+ .set_coalesce = mlx5e_rep_set_coalesce,
+ .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
+ .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+};
+
+static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
@@ -280,24 +381,44 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.set_ringparam = mlx5e_rep_set_ringparam,
.get_channels = mlx5e_rep_get_channels,
.set_channels = mlx5e_rep_set_channels,
+ .get_coalesce = mlx5e_rep_get_coalesce,
+ .set_coalesce = mlx5e_rep_set_coalesce,
+ .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
+ .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
+ .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
+ .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_upper = NULL;
+ struct mlx5e_priv *uplink_priv = NULL;
+ struct net_device *uplink_dev;
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
+ uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ if (uplink_dev) {
+ uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+ uplink_priv = netdev_priv(uplink_dev);
+ }
+
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
attr->u.ppid.id_len = ETH_ALEN;
- ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+ if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) {
+ ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr);
+ } else {
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+ }
break;
default:
return -EOPNOTSUPP;
@@ -466,8 +587,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
ASSERT_RTNL();
- if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
- !ether_addr_equal(e->h_dest, ha))
+ if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
+ (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
mlx5e_tc_encap_flows_del(priv, e);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
@@ -518,6 +639,184 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
neigh_release(n);
}
+static struct mlx5e_rep_indr_block_priv *
+mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
+{
+ struct mlx5e_rep_indr_block_priv *cb_priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv,
+ &rpriv->uplink_priv.tc_indr_block_priv_list,
+ list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
+{
+ struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
+ struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
+
+ list_for_each_entry_safe(cb_priv, temp, head, list) {
+ mlx5e_rep_indr_unregister_block(cb_priv->netdev);
+ kfree(cb_priv);
+ }
+}
+
+static int
+mlx5e_rep_indr_offload(struct net_device *netdev,
+ struct tc_cls_flower_offload *flower,
+ struct mlx5e_rep_indr_block_priv *indr_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
+ int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
+ int err = 0;
+
+ switch (flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ err = mlx5e_configure_flower(netdev, priv, flower, flags);
+ break;
+ case TC_CLSFLOWER_DESTROY:
+ err = mlx5e_delete_flower(netdev, priv, flower, flags);
+ break;
+ case TC_CLSFLOWER_STATS:
+ err = mlx5e_stats_flower(netdev, priv, flower, flags);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
+ void *type_data, void *indr_priv)
+{
+ struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
+ struct mlx5e_rep_priv *rpriv,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_rep_indr_block_priv *indr_priv;
+ int err = 0;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (indr_priv)
+ return -EEXIST;
+
+ indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
+ if (!indr_priv)
+ return -ENOMEM;
+
+ indr_priv->netdev = netdev;
+ indr_priv->rpriv = rpriv;
+ list_add(&indr_priv->list,
+ &rpriv->uplink_priv.tc_indr_block_priv_list);
+
+ err = tcf_block_cb_register(f->block,
+ mlx5e_rep_indr_setup_block_cb,
+ netdev, indr_priv, f->extack);
+ if (err) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ mlx5e_rep_indr_setup_block_cb,
+ netdev);
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ if (indr_priv) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static
+int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
+ struct net_device *netdev)
+{
+ int err;
+
+ err = __tc_indr_block_cb_register(netdev, rpriv,
+ mlx5e_rep_indr_setup_tc_cb,
+ netdev);
+ if (err) {
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
+ netdev_name(netdev), err);
+ }
+ return err;
+}
+
+static void mlx5e_rep_indr_unregister_block(struct net_device *netdev)
+{
+ __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
+ netdev);
+}
+
+static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
+ uplink_priv.netdevice_nb);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
+ return NOTIFY_OK;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ mlx5e_rep_indr_register_block(rpriv, netdev);
+ break;
+ case NETDEV_UNREGISTER:
+ mlx5e_rep_indr_unregister_block(netdev);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
static struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
@@ -779,7 +1078,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
mlx5e_rep_neigh_entry_destroy(priv, nhe);
}
-static int mlx5e_rep_open(struct net_device *dev)
+static int mlx5e_vf_rep_open(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -801,7 +1100,7 @@ unlock:
return err;
}
-static int mlx5e_rep_close(struct net_device *dev)
+static int mlx5e_vf_rep_close(struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -838,24 +1137,14 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
{
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
- return mlx5e_configure_flower(priv, cls_flower, flags);
+ return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_DESTROY:
- return mlx5e_delete_flower(priv, cls_flower, flags);
+ return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
+ flags);
case TC_CLSFLOWER_STATS:
- return mlx5e_stats_flower(priv, cls_flower, flags);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct mlx5e_priv *priv = cb_priv;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
+ return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
+ flags);
default:
return -EOPNOTSUPP;
}
@@ -868,7 +1157,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
switch (type) {
case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
+ MLX5E_TC_ESW_OFFLOAD);
default:
return -EOPNOTSUPP;
}
@@ -907,43 +1197,23 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
if (!MLX5_ESWITCH_MANAGER(priv->mdev))
return false;
- rep = rpriv->rep;
- if (esw->mode == SRIOV_OFFLOADS &&
- rep && rep->vport == FDB_UPLINK_VPORT)
- return true;
-
- return false;
-}
-
-static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
-{
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep;
-
- if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+ if (!rpriv) /* non vport rep mlx5e instances don't use this field */
return false;
rep = rpriv->rep;
- if (rep && rep->vport != FDB_UPLINK_VPORT)
- return true;
-
- return false;
+ return (rep->vport == FDB_UPLINK_VPORT);
}
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
- if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
return true;
}
@@ -969,8 +1239,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev,
return 0;
}
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
- void *sp)
+static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
+ void *sp)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
@@ -981,7 +1251,7 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
}
static void
-mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -990,37 +1260,93 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
}
+static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ return mlx5e_change_mtu(netdev, new_mtu, NULL);
+}
+
+static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
+}
+
+static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ return 0;
+}
+
static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
.switchdev_port_attr_get = mlx5e_attr_get,
};
-static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu)
-{
- return mlx5e_change_mtu(netdev, new_mtu, NULL);
-}
+static const struct net_device_ops mlx5e_netdev_ops_vf_rep = {
+ .ndo_open = mlx5e_vf_rep_open,
+ .ndo_stop = mlx5e_vf_rep_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
+ .ndo_setup_tc = mlx5e_rep_setup_tc,
+ .ndo_get_stats64 = mlx5e_vf_rep_get_stats,
+ .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
+ .ndo_change_mtu = mlx5e_vf_rep_change_mtu,
+};
-static const struct net_device_ops mlx5e_netdev_ops_rep = {
- .ndo_open = mlx5e_rep_open,
- .ndo_stop = mlx5e_rep_close,
+static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
+ .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
.ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
.ndo_setup_tc = mlx5e_rep_setup_tc,
- .ndo_get_stats64 = mlx5e_rep_get_stats,
- .ndo_has_offload_stats = mlx5e_has_offload_stats,
- .ndo_get_offload_stats = mlx5e_get_offload_stats,
- .ndo_change_mtu = mlx5e_change_rep_mtu,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
+ .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
+ .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_features_check = mlx5e_features_check,
+ .ndo_set_vf_mac = mlx5e_set_vf_mac,
+ .ndo_set_vf_rate = mlx5e_set_vf_rate,
+ .ndo_get_vf_config = mlx5e_get_vf_config,
+ .ndo_get_vf_stats = mlx5e_get_vf_stats,
};
-static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params, u16 mtu)
+bool mlx5e_eswitch_rep(struct net_device *netdev)
+{
+ if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep ||
+ netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
+ return true;
+
+ return false;
+}
+
+static void mlx5e_build_rep_params(struct net_device *netdev)
{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params *params;
+
u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ params = &priv->channels.params;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
- params->sw_mtu = mtu;
- params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
+ params->sw_mtu = netdev->mtu;
+
+ /* SQ */
+ if (rep->vport == FDB_UPLINK_VPORT)
+ params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ else
+ params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
/* RQ */
mlx5e_build_rq_params(mdev, params);
@@ -1034,24 +1360,38 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */
- mlx5e_build_rss_params(params);
+ mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_mtu;
- netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+ if (rep->vport == FDB_UPLINK_VPORT) {
+ SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
+ netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
+ /* we want a persistent mac for the uplink rep */
+ mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
+ netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (MLX5_CAP_GEN(mdev, qos))
+ netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+#endif
+ } else {
+ netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep;
+ eth_hw_addr_random(netdev);
+ netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops;
+ }
netdev->watchdog_timeo = 15 * HZ;
- netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
- netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
+ netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_SG;
@@ -1062,13 +1402,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
- netdev->features |= netdev->hw_features;
-
- eth_hw_addr_random(netdev);
+ if (rep->vport != FDB_UPLINK_VPORT)
+ netdev->features |= NETIF_F_VLAN_CHALLENGED;
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ netdev->features |= netdev->hw_features;
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
@@ -1083,11 +1420,9 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
if (err)
return err;
+ priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
- priv->channels.params.num_channels =
- mlx5e_get_netdev_max_channels(netdev);
-
- mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
+ mlx5e_build_rep_params(netdev);
mlx5e_build_rep_netdev(netdev);
mlx5e_timestamp_init(priv);
@@ -1210,94 +1545,173 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{
- int err;
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ int tc, err;
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
return err;
}
- return 0;
-}
-static const struct mlx5e_profile mlx5e_rep_profile = {
- .init = mlx5e_init_rep,
- .cleanup = mlx5e_cleanup_rep,
- .init_rx = mlx5e_init_rep_rx,
- .cleanup_rx = mlx5e_cleanup_rep_rx,
- .init_tx = mlx5e_init_rep_tx,
- .cleanup_tx = mlx5e_cleanup_nic_tx,
- .update_stats = mlx5e_rep_update_hw_counters,
- .update_carrier = NULL,
- .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
- .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
- .max_tc = 1,
-};
+ if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
+ uplink_priv = &rpriv->uplink_priv;
-/* e-Switch vport representors */
+ /* init shared tc flow table */
+ err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ if (err)
+ goto destroy_tises;
+
+ /* init indirect block notifications */
+ INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
+ uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
+ err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
+ if (err) {
+ mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
+ goto tc_esw_cleanup;
+ }
+ }
-static int
-mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ return 0;
+
+tc_esw_cleanup:
+ mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
+destroy_tises:
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
+ return err;
+}
+
+static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{
- struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ int tc;
- int err;
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
+ mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- err = mlx5e_add_sqs_fwd_rules(priv);
- if (err)
- return err;
+ if (rpriv->rep->vport == FDB_UPLINK_VPORT) {
+ /* clean indirect TC block notifications */
+ unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
+ mlx5e_rep_indr_clean_block_privs(rpriv);
+
+ /* delete shared tc flow table */
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
}
+}
- err = mlx5e_rep_neigh_init(rpriv);
- if (err)
- goto err_remove_sqs;
+static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
- /* init shared tc flow table */
- err = mlx5e_tc_esw_init(&rpriv->tc_ht);
- if (err)
- goto err_neigh_cleanup;
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+}
- return 0;
+static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
+ struct mlx5_eqe *eqe = data;
-err_neigh_cleanup:
- mlx5e_rep_neigh_cleanup(rpriv);
-err_remove_sqs:
- mlx5e_remove_sqs_fwd_rules(priv);
- return err;
+ if (event != MLX5_EVENT_TYPE_PORT_CHANGE)
+ return NOTIFY_DONE;
+
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ queue_work(priv->wq, &priv->update_carrier_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
}
-static void
-mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
+static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
- struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
- struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_remove_sqs_fwd_rules(priv);
+ netdev->min_mtu = ETH_MIN_MTU;
+ mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
+ netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ mlx5e_set_dev_port_mtu(priv);
+
+ mlx5_lag_add(mdev, netdev);
+ priv->events_nb.notifier_call = uplink_rep_async_event;
+ mlx5_notifier_register(mdev, &priv->events_nb);
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_initialize(priv);
+ mlx5e_dcbnl_init_app(priv);
+#endif
+}
- /* clean uplink offloaded TC rules, delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
+static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
- mlx5e_rep_neigh_cleanup(rpriv);
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_delete_app(priv);
+#endif
+ mlx5_notifier_unregister(mdev, &priv->events_nb);
+ mlx5_lag_remove(mdev);
}
+static const struct mlx5e_profile mlx5e_vf_rep_profile = {
+ .init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_rep_tx,
+ .enable = mlx5e_vf_rep_enable,
+ .update_stats = mlx5e_vf_rep_update_hw_counters,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .max_tc = 1,
+};
+
+static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
+ .init = mlx5e_init_rep,
+ .cleanup = mlx5e_cleanup_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_rep_tx,
+ .enable = mlx5e_uplink_rep_enable,
+ .disable = mlx5e_uplink_rep_disable,
+ .update_stats = mlx5e_uplink_rep_update_hw_counters,
+ .update_carrier = mlx5e_update_carrier,
+ .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
+ .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .max_tc = MLX5E_MAX_NUM_TC,
+};
+
+/* e-Switch vport representors */
static int
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_rep_priv *uplink_rpriv;
+ const struct mlx5e_profile *profile;
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
- struct mlx5e_priv *upriv;
int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
if (!rpriv)
return -ENOMEM;
+ /* rpriv->rep to be looked up when profile->init() is called */
+ rpriv->rep = rep;
+
nch = mlx5e_get_max_num_channels(dev);
- netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv);
+ profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile;
+ netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
if (!netdev) {
pr_warn("Failed to create representor netdev for vport %d\n",
rep->vport);
@@ -1306,15 +1720,20 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
}
rpriv->netdev = netdev;
- rpriv->rep = rep;
rep->rep_if[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
+ if (rep->vport == FDB_UPLINK_VPORT) {
+ err = mlx5e_create_mdev_resources(dev);
+ if (err)
+ goto err_destroy_netdev;
+ }
+
err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
pr_warn("Failed to attach representor netdev for vport %d\n",
rep->vport);
- goto err_destroy_netdev;
+ goto err_destroy_mdev_resources;
}
err = mlx5e_rep_neigh_init(rpriv);
@@ -1324,32 +1743,25 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
- upriv = netdev_priv(uplink_rpriv->netdev);
- err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
- upriv);
- if (err)
- goto err_neigh_cleanup;
-
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_egdev_cleanup;
+ goto err_neigh_cleanup;
}
return 0;
-err_egdev_cleanup:
- tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
- upriv);
-
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
err_detach_netdev:
mlx5e_detach_netdev(netdev_priv(netdev));
+err_destroy_mdev_resources:
+ if (rep->vport == FDB_UPLINK_VPORT)
+ mlx5e_destroy_mdev_resources(dev);
+
err_destroy_netdev:
mlx5e_destroy_netdev(netdev_priv(netdev));
kfree(rpriv);
@@ -1362,18 +1774,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *uplink_rpriv;
void *ppriv = priv->ppriv;
- struct mlx5e_priv *upriv;
unregister_netdev(netdev);
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
- REP_ETH);
- upriv = netdev_priv(uplink_rpriv->netdev);
- tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
- upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
+ if (rep->vport == FDB_UPLINK_VPORT)
+ mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
kfree(ppriv); /* mlx5e_rep_priv */
}
@@ -1387,14 +1794,13 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev;
}
-static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
+void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- for (vport = 1; vport < total_vfs; vport++) {
+ for (vport = 0; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep_if rep_if = {};
rep_if.load = mlx5e_vport_rep_load;
@@ -1404,55 +1810,12 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
}
}
-static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
+void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
- for (vport = 1; vport < total_vfs; vport++)
+ for (vport = total_vfs - 1; vport >= 0; vport--)
mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
}
-
-void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5_eswitch_rep_if rep_if;
- struct mlx5e_rep_priv *rpriv;
-
- rpriv = priv->ppriv;
- rpriv->netdev = priv->netdev;
-
- rep_if.load = mlx5e_nic_rep_load;
- rep_if.unload = mlx5e_nic_rep_unload;
- rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
- rep_if.priv = rpriv;
- INIT_LIST_HEAD(&rpriv->vport_sqs_list);
- mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
-
- mlx5e_rep_register_vf_vports(priv); /* VFs vports */
-}
-
-void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
- mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
- mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
-}
-
-void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
-{
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
- struct mlx5e_rep_priv *rpriv;
-
- rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
- if (!rpriv)
- return NULL;
-
- rpriv->rep = &esw->offloads.vport_reps[0];
- return rpriv;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 844d32d5c29f..edd722824697 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -53,13 +53,33 @@ struct mlx5e_neigh_update_table {
unsigned long min_interval; /* jiffies */
};
+struct mlx5_rep_uplink_priv {
+ /* Filters DB - instantiated by the uplink representor and shared by
+ * the uplink's VFs
+ */
+ struct rhashtable tc_ht;
+
+ /* indirect block callbacks are invoked on bind/unbind events
+ * on registered higher level devices (e.g. tunnel devices)
+ *
+ * tc_indr_block_cb_priv_list is used to lookup indirect callback
+ * private data
+ *
+ * netdevice_nb is the netdev events notifier - used to register
+ * tunnel devices for block events
+ *
+ */
+ struct list_head tc_indr_block_priv_list;
+ struct notifier_block netdevice_nb;
+};
+
struct mlx5e_rep_priv {
struct mlx5_eswitch_rep *rep;
struct mlx5e_neigh_update_table neigh_update;
struct net_device *netdev;
struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list;
- struct rhashtable tc_ht; /* valid for uplink rep */
+ struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
};
static inline
@@ -129,6 +149,8 @@ struct mlx5e_encap_entry {
struct net_device *out_dev;
int tunnel_type;
+ int tunnel_hlen;
+ int reformat_type;
u8 flags;
char *encap_header;
int encap_size;
@@ -140,16 +162,12 @@ struct mlx5e_rep_sq {
};
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
-void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
-void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
+void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev);
+void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev);
bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp);
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
-
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
@@ -158,12 +176,17 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
+
+bool mlx5e_eswitch_rep(struct net_device *netdev);
+
#else /* CONFIG_MLX5_ESWITCH */
-static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {}
-static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {}
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {}
#endif
+static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv)
+{
+ return (MLX5_ESWITCH_MANAGER(priv->mdev) && priv->ppriv);
+}
#endif /* __MLX5E_REP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 16985ca3248d..1d0bb5ff8c26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
mlx5_cqwq_pop(&cq->wq);
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
+ "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
return;
}
@@ -724,9 +724,9 @@ static u32 mlx5e_get_fcs(const struct sk_buff *skb)
return __get_unaligned_cpu32(fcs_bytes);
}
-static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
+static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
{
- void *ip_p = skb->data + sizeof(struct ethhdr);
+ void *ip_p = skb->data + network_depth;
return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
((struct ipv6hdr *)ip_p)->nexthdr;
@@ -755,7 +755,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
- if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
+ if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
skb->ip_summed = CHECKSUM_COMPLETE;
@@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
prefetchw(va); /* xdp_frame data area */
prefetch(data);
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
return NULL;
}
@@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
u16 byte_cnt = cqe_bcnt - headlen;
struct sk_buff *skb;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
return NULL;
}
@@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi->consumed_strides += cstrides;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
goto mpwrq_cqe_out;
}
@@ -1190,7 +1190,6 @@ mpwrq_cqe_out:
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- struct mlx5e_xdpsq *xdpsq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
@@ -1201,10 +1200,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
cqe = mlx5_cqwq_get_cqe(&cq->wq);
- if (!cqe)
+ if (!cqe) {
+ if (unlikely(work_done))
+ goto out;
return 0;
-
- xdpsq = &rq->xdpsq;
+ }
do {
if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
@@ -1219,15 +1219,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
rq->handle_rx_cqe(rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
- if (xdpsq->doorbell) {
- mlx5e_xmit_xdp_doorbell(xdpsq);
- xdpsq->doorbell = false;
- }
-
- if (xdpsq->redirect_flush) {
- xdp_do_flush_map();
- xdpsq->redirect_flush = false;
- }
+out:
+ if (rq->xdp_prog)
+ mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 3e99d0728b2f..d3fe48ff9da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include "lib/mlx5.h"
#include "en.h"
#include "en_accel/ipsec.h"
#include "en_accel/tls.h"
@@ -74,7 +75,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_recover) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
@@ -198,7 +198,6 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_nop += sq_stats->nop;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
- s->tx_udp_seg_rem += sq_stats->udp_seg_rem;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_cqe_err += sq_stats->cqe_err;
s->tx_recover += sq_stats->recover;
@@ -482,7 +481,10 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx;
}
-static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
+#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
+ (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
+
+void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -490,6 +492,9 @@ static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out;
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->IEEE_802_3_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
@@ -602,6 +607,9 @@ static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out;
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->RFC_2819_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
@@ -936,7 +944,7 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
};
static const struct counter_desc pport_pfc_stall_stats_desc[] = {
- { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
+ { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) },
{ "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) },
};
@@ -1077,6 +1085,9 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
int prio;
void *out;
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
@@ -1088,13 +1099,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
}
static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_unplug", 8 },
+ { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
};
static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_high_temp", 48 }, /* high temperature */
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+ { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
+ { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
+ { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
};
#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
@@ -1122,15 +1133,17 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
+ struct mlx5_pme_stats pme_stats;
int i;
+ mlx5_get_pme_stats(priv->mdev, &pme_stats);
+
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
mlx5e_pme_status_desc, i);
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
mlx5e_pme_error_desc, i);
return idx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 3f8e870ef4c9..fe91ec06e3c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -87,7 +87,6 @@ struct mlx5e_sw_stats {
u64 tx_recover;
u64 tx_cqes;
u64 tx_queue_wake;
- u64 tx_udp_seg_rem;
u64 tx_cqe_err;
u64 tx_xdp_xmit;
u64 tx_xdp_full;
@@ -221,7 +220,6 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
- u64 udp_seg_rem;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_ooo;
u64 tls_resync_bytes;
@@ -280,5 +278,6 @@ extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
extern const int mlx5e_num_stats_grps;
void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv);
+void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fca6f4132c91..cae6c6d48984 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,15 +44,15 @@
#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
-#include <net/vxlan.h>
#include <net/arp.h>
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "lib/vxlan.h"
#include "fs_core.h"
#include "en/port.h"
+#include "en/tc_tun.h"
+#include "lib/devcom.h"
struct mlx5_nic_flow_attr {
u32 action;
@@ -69,25 +69,54 @@ struct mlx5_nic_flow_attr {
enum {
MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
- MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
- MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
- MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
- MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
- MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
- MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
+ MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
+ MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
+ MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
+ MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
+ MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
+ MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
+ MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
};
#define MLX5E_TC_MAX_SPLITS 1
+/* Helper struct for accessing a struct containing list_head array.
+ * Containing struct
+ * |- Helper array
+ * [0] Helper item 0
+ * |- list_head item 0
+ * |- index (0)
+ * [1] Helper item 1
+ * |- list_head item 1
+ * |- index (1)
+ * To access the containing struct from one of the list_head items:
+ * 1. Get the helper item from the list_head item using
+ * helper item =
+ * container_of(list_head item, helper struct type, list_head field)
+ * 2. Get the contining struct from the helper item and its index in the array:
+ * containing struct =
+ * container_of(helper item, containing struct type, helper field[index])
+ */
+struct encap_flow_item {
+ struct list_head list;
+ int index;
+};
+
struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
u16 flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
- struct list_head encap; /* flows sharing the same encap ID */
+ /* Flow can be associated with multiple encap IDs.
+ * The number of encaps is bounded by the number of supported
+ * destinations.
+ */
+ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_tc_flow *peer_flow;
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */
+ struct list_head peer; /* flows with peer flow */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
@@ -95,11 +124,12 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
- struct ip_tunnel_info tun_info;
+ struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct net_device *filter_dev;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
void *mod_hdr_actions;
- int mirred_ifindex;
+ int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
@@ -316,7 +346,7 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
for (i = 0; i < sz; i++) {
ix = i;
- if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
+ if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz));
ix = indirection_rqt[ix];
rqn = hp->pair->rqn[ix];
@@ -360,13 +390,15 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
void *tirc;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
+
memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
- mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in,
MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
@@ -569,7 +601,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
- int peer_ifindex = parse_attr->mirred_ifindex;
+ int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
@@ -802,7 +834,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
mlx5_del_flow_rules(flow->rule[0]);
mlx5_fc_destroy(priv->mdev, counter);
- if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
+ if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
}
@@ -815,14 +847,15 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
+ struct mlx5e_tc_flow *flow, int out_index);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack);
+ struct netlink_ext_ack *extack,
+ int out_index);
static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
@@ -836,7 +869,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
return rule;
- if (attr->mirror_count) {
+ if (attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
@@ -855,7 +888,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- if (attr->mirror_count)
+ if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
@@ -870,9 +903,9 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule;
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
- slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- slow_attr->mirror_count = 0,
- slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->split_count = 0;
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
@@ -887,6 +920,9 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *slow_attr)
{
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->split_count = 0;
+ slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow->flags &= ~MLX5E_TC_FLOW_SLOW;
}
@@ -906,12 +942,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
int err = 0, encap_err = 0;
+ int out_index;
- /* if prios are not supported, keep the old behaviour of using same prio
- * for all offloaded rules.
- */
- if (!mlx5_eswitch_prios_supported(esw))
- attr->prio = 1;
+ if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
+ NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
+ return -EOPNOTSUPP;
+ }
if (attr->chain > max_chain) {
NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
@@ -925,20 +961,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_max_prio_chain;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ int mirred_ifindex;
+
+ if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+
+ mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
out_dev = __dev_get_by_index(dev_net(priv->netdev),
- attr->parse_attr->mirred_ifindex);
- encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
- out_dev, &encap_dev, flow,
- extack);
- if (encap_err && encap_err != -EAGAIN) {
- err = encap_err;
+ mirred_ifindex);
+ err = mlx5e_attach_encap(priv,
+ &parse_attr->tun_info[out_index],
+ out_dev, &encap_dev, flow,
+ extack, out_index);
+ if (err && err != -EAGAIN)
goto err_attach_encap;
- }
+ if (err == -EAGAIN)
+ encap_err = err;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- attr->out_rep[attr->out_count] = rpriv->rep;
- attr->out_mdev[attr->out_count++] = out_priv->mdev;
+ attr->dests[out_index].rep = rpriv->rep;
+ attr->dests[out_index].mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -953,7 +996,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(esw->dev, true);
+ counter = mlx5_fc_create(attr->counter_dev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
goto err_create_counter;
@@ -982,15 +1025,16 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return 0;
err_add_rule:
- mlx5_fc_destroy(esw->dev, counter);
+ mlx5_fc_destroy(attr->counter_dev, counter);
err_create_counter:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan:
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
- mlx5e_detach_encap(priv, flow);
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+ mlx5e_detach_encap(priv, flow, out_index);
err_attach_encap:
err_max_prio_chain:
return err;
@@ -1002,6 +1046,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_esw_flow_attr slow_attr;
+ int out_index;
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
if (flow->flags & MLX5E_TC_FLOW_SLOW)
@@ -1012,16 +1057,16 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
- mlx5e_detach_encap(priv, flow);
- kvfree(attr->parse_attr);
- }
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
+ if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+ mlx5e_detach_encap(priv, flow, out_index);
+ kvfree(attr->parse_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(esw->dev, attr->counter);
+ mlx5_fc_destroy(attr->counter_dev, attr->counter);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1031,10 +1076,12 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+ err = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
e->encap_size, e->encap_header,
MLX5_FLOW_NAMESPACE_FDB,
&e->encap_id);
@@ -1046,11 +1093,31 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv);
- list_for_each_entry(flow, &e->flows, encap) {
+ list_for_each_entry(efi, &e->flows, list) {
+ bool all_flow_encaps_valid = true;
+ int i;
+
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
esw_attr = flow->esw_attr;
- esw_attr->encap_id = e->encap_id;
spec = &esw_attr->parse_attr->spec;
+ esw_attr->dests[efi->index].encap_id = e->encap_id;
+ esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ /* Flow can be associated with multiple encap entries.
+ * Before offloading the flow verify that all of them have
+ * a valid neighbour.
+ */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
+ all_flow_encaps_valid = false;
+ break;
+ }
+ }
+ /* Do not offload flows with unresolved neighbors */
+ if (!all_flow_encaps_valid)
+ continue;
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
if (IS_ERR(rule)) {
@@ -1073,14 +1140,18 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
+ struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
int err;
- list_for_each_entry(flow, &e->flows, encap) {
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+ /* mark the flow's encap dest as non-valid */
+ flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1094,10 +1165,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
flow->rule[0] = rule;
}
- if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
- }
+ /* we know that the encap is valid */
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
}
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
@@ -1129,9 +1199,12 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
return;
list_for_each_entry(e, &nhe->encap_list, encap_list) {
+ struct encap_flow_item *efi;
if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
continue;
- list_for_each_entry(flow, &e->flows, encap) {
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow,
+ encaps[efi->index]);
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
counter = mlx5e_tc_get_counter(flow);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
@@ -1161,11 +1234,11 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+ struct mlx5e_tc_flow *flow, int out_index)
{
- struct list_head *next = flow->encap.next;
+ struct list_head *next = flow->encaps[out_index].list.next;
- list_del(&flow->encap);
+ list_del(&flow->encaps[out_index].list);
if (list_empty(next)) {
struct mlx5e_encap_entry *e;
@@ -1181,49 +1254,55 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
}
}
-static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
- if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
- mlx5e_tc_del_fdb_flow(priv, flow);
- else
- mlx5e_tc_del_nic_flow(priv, flow);
+ struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
+
+ if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
+ !(flow->flags & MLX5E_TC_FLOW_DUP))
+ return;
+
+ mutex_lock(&esw->offloads.peer_mutex);
+ list_del(&flow->peer);
+ mutex_unlock(&esw->offloads.peer_mutex);
+
+ flow->flags &= ~MLX5E_TC_FLOW_DUP;
+
+ mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
+ kvfree(flow->peer_flow);
+ flow->peer_flow = NULL;
}
-static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
- void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- outer_headers);
- void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
+ struct mlx5_core_dev *dev = flow->priv->mdev;
+ struct mlx5_devcom *devcom = dev->priv.devcom;
+ struct mlx5_eswitch *peer_esw;
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ return;
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
- struct flow_dissector_key_keyid *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->key);
- struct flow_dissector_key_keyid *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_KEYID,
- f->mask);
- MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
- be32_to_cpu(mask->keyid));
- MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
- be32_to_cpu(key->keyid));
+ __mlx5e_tc_del_fdb_peer_flow(flow);
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+}
+
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
+ mlx5e_tc_del_fdb_peer_flow(flow);
+ mlx5e_tc_del_fdb_flow(priv, flow);
+ } else {
+ mlx5e_tc_del_nic_flow(priv, flow);
}
}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
@@ -1235,48 +1314,14 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
f->key);
+ int err = 0;
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
- struct flow_dissector_key_ports *key =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->key);
- struct flow_dissector_key_ports *mask =
- skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_ENC_PORTS,
- f->mask);
-
- /* Full udp dst port must be given */
- if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
- goto vxlan_match_offload_err;
-
- if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
- parse_vxlan_attr(spec, f);
- else {
- NL_SET_ERR_MSG_MOD(extack,
- "port isn't an offloaded vxlan udp dport");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
- return -EOPNOTSUPP;
- }
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_dport, ntohs(mask->dst));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_dport, ntohs(key->dst));
-
- MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- udp_sport, ntohs(mask->src));
- MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- udp_sport, ntohs(key->src));
- } else { /* udp dst port must be given */
-vxlan_match_offload_err:
+ err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
+ headers_c, headers_v);
+ if (err) {
NL_SET_ERR_MSG_MOD(extack,
- "IP tunnel decap offload supported only for vxlan, must set UDP dport");
- netdev_warn(priv->netdev,
- "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
- return -EOPNOTSUPP;
+ "failed to parse tunnel attributes");
+ return err;
}
if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -1380,6 +1425,7 @@ vxlan_match_offload_err:
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev,
u8 *match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
@@ -1431,7 +1477,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
- if (parse_tunnel_attr(priv, spec, f))
+ if (parse_tunnel_attr(priv, spec, f, filter_dev))
return -EOPNOTSUPP;
break;
default:
@@ -1773,7 +1819,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct tc_cls_flower_offload *f)
+ struct tc_cls_flower_offload *f,
+ struct net_device *filter_dev)
{
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
@@ -1783,7 +1830,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
u8 match_level;
int err;
- err = __parse_cls_flower(priv, spec, f, &match_level);
+ err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
rep = rpriv->rep;
@@ -2136,7 +2183,6 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct tc_action *a;
bool modify_ip_header;
- LIST_HEAD(actions);
u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
@@ -2225,7 +2271,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
{
struct mlx5_nic_flow_attr *attr = flow->nic_attr;
const struct tc_action *a;
- LIST_HEAD(actions);
u32 action = 0;
int err, i;
@@ -2268,7 +2313,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) {
- parse_attr->mirred_ifindex = peer_dev->ifindex;
+ parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -2317,45 +2362,6 @@ static inline int hash_encap_info(struct ip_tunnel_key *key)
return jhash(key, sizeof(*key), 0);
}
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct rtable *rt;
- struct neighbour *n = NULL;
-
-#if IS_ENABLED(CONFIG_INET)
- int ret;
-
- rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- ret = PTR_ERR_OR_ZERO(rt);
- if (ret)
- return ret;
-#else
- return -EOPNOTSUPP;
-#endif
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- /* if the egress device isn't on the same HW e-switch, we use the uplink */
- if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
- *out_dev = uplink_rpriv->netdev;
- else
- *out_dev = rt->dst.dev;
-
- if (!(*out_ttl))
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
- n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
- ip_rt_put(rt);
- if (!n)
- return -ENOMEM;
-
- *out_n = n;
- return 0;
-}
static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
@@ -2371,377 +2377,24 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
(peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
}
-static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
-{
- struct neighbour *n = NULL;
- struct dst_entry *dst;
-
-#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
- struct mlx5e_rep_priv *uplink_rpriv;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- int ret;
-
- ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
- fl6);
- if (ret < 0)
- return ret;
-
- if (!(*out_ttl))
- *out_ttl = ip6_dst_hoplimit(dst);
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- /* if the egress device isn't on the same HW e-switch, we use the uplink */
- if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
- *out_dev = uplink_rpriv->netdev;
- else
- *out_dev = dst->dev;
-#else
- return -EOPNOTSUPP;
-#endif
-
- n = dst_neigh_lookup(dst, &fl6->daddr);
- dst_release(dst);
- if (!n)
- return -ENOMEM;
-
- *out_n = n;
- return 0;
-}
-
-static void gen_vxlan_header_ipv4(struct net_device *out_dev,
- char buf[], int encap_size,
- unsigned char h_dest[ETH_ALEN],
- u8 tos, u8 ttl,
- __be32 daddr,
- __be32 saddr,
- __be16 udp_dst_port,
- __be32 vx_vni)
-{
- struct ethhdr *eth = (struct ethhdr *)buf;
- struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
- struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
- struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
-
- memset(buf, 0, encap_size);
-
- ether_addr_copy(eth->h_dest, h_dest);
- ether_addr_copy(eth->h_source, out_dev->dev_addr);
- eth->h_proto = htons(ETH_P_IP);
-
- ip->daddr = daddr;
- ip->saddr = saddr;
-
- ip->tos = tos;
- ip->ttl = ttl;
- ip->protocol = IPPROTO_UDP;
- ip->version = 0x4;
- ip->ihl = 0x5;
-
- udp->dest = udp_dst_port;
- vxh->vx_flags = VXLAN_HF_VNI;
- vxh->vx_vni = vxlan_vni_field(vx_vni);
-}
-
-static void gen_vxlan_header_ipv6(struct net_device *out_dev,
- char buf[], int encap_size,
- unsigned char h_dest[ETH_ALEN],
- u8 tos, u8 ttl,
- struct in6_addr *daddr,
- struct in6_addr *saddr,
- __be16 udp_dst_port,
- __be32 vx_vni)
-{
- struct ethhdr *eth = (struct ethhdr *)buf;
- struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
- struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
- struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
-
- memset(buf, 0, encap_size);
-
- ether_addr_copy(eth->h_dest, h_dest);
- ether_addr_copy(eth->h_source, out_dev->dev_addr);
- eth->h_proto = htons(ETH_P_IPV6);
-
- ip6_flow_hdr(ip6h, tos, 0);
- /* the HW fills up ipv6 payload len */
- ip6h->nexthdr = IPPROTO_UDP;
- ip6h->hop_limit = ttl;
- ip6h->daddr = *daddr;
- ip6h->saddr = *saddr;
-
- udp->dest = udp_dst_port;
- vxh->vx_flags = VXLAN_HF_VNI;
- vxh->vx_vni = vxlan_vni_field(vx_vni);
-}
-
-static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e)
-{
- int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
- struct net_device *out_dev;
- struct neighbour *n = NULL;
- struct flowi4 fl4 = {};
- u8 nud_state, tos, ttl;
- char *encap_header;
- int err;
-
- if (max_encap_size < ipv4_encap_size) {
- mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
- ipv4_encap_size, max_encap_size);
- return -EOPNOTSUPP;
- }
-
- encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- fl4.flowi4_proto = IPPROTO_UDP;
- fl4.fl4_dport = tun_key->tp_dst;
- break;
- default:
- err = -EOPNOTSUPP;
- goto free_encap;
- }
-
- tos = tun_key->tos;
- ttl = tun_key->ttl;
-
- fl4.flowi4_tos = tun_key->tos;
- fl4.daddr = tun_key->u.ipv4.dst;
- fl4.saddr = tun_key->u.ipv4.src;
-
- err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
- &fl4, &n, &ttl);
- if (err)
- goto free_encap;
-
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
-
- /* It's importent to add the neigh to the hash table before checking
- * the neigh validity state. So if we'll get a notification, in case the
- * neigh changes it's validity state, we would find the relevant neigh
- * in the hash.
- */
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
- if (err)
- goto free_encap;
-
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- gen_vxlan_header_ipv4(out_dev, encap_header,
- ipv4_encap_size, e->h_dest, tos, ttl,
- fl4.daddr,
- fl4.saddr, tun_key->tp_dst,
- tunnel_id_to_key32(tun_key->tun_id));
- break;
- default:
- err = -EOPNOTSUPP;
- goto destroy_neigh_entry;
- }
- e->encap_size = ipv4_encap_size;
- e->encap_header = encap_header;
-
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
- err = -EAGAIN;
- goto out;
- }
-
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
- ipv4_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
- goto destroy_neigh_entry;
-
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
- return err;
-
-destroy_neigh_entry:
- mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-free_encap:
- kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
- return err;
-}
-
-static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
- struct net_device *mirred_dev,
- struct mlx5e_encap_entry *e)
-{
- int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
- int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
- struct ip_tunnel_key *tun_key = &e->tun_info.key;
- struct net_device *out_dev;
- struct neighbour *n = NULL;
- struct flowi6 fl6 = {};
- u8 nud_state, tos, ttl;
- char *encap_header;
- int err;
-
- if (max_encap_size < ipv6_encap_size) {
- mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
- ipv6_encap_size, max_encap_size);
- return -EOPNOTSUPP;
- }
-
- encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
- if (!encap_header)
- return -ENOMEM;
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- fl6.flowi6_proto = IPPROTO_UDP;
- fl6.fl6_dport = tun_key->tp_dst;
- break;
- default:
- err = -EOPNOTSUPP;
- goto free_encap;
- }
-
- tos = tun_key->tos;
- ttl = tun_key->ttl;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
- fl6.daddr = tun_key->u.ipv6.dst;
- fl6.saddr = tun_key->u.ipv6.src;
-
- err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
- &fl6, &n, &ttl);
- if (err)
- goto free_encap;
-
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
-
- /* It's importent to add the neigh to the hash table before checking
- * the neigh validity state. So if we'll get a notification, in case the
- * neigh changes it's validity state, we would find the relevant neigh
- * in the hash.
- */
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
- if (err)
- goto free_encap;
-
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
-
- switch (e->tunnel_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- gen_vxlan_header_ipv6(out_dev, encap_header,
- ipv6_encap_size, e->h_dest, tos, ttl,
- &fl6.daddr,
- &fl6.saddr, tun_key->tp_dst,
- tunnel_id_to_key32(tun_key->tun_id));
- break;
- default:
- err = -EOPNOTSUPP;
- goto destroy_neigh_entry;
- }
-
- e->encap_size = ipv6_encap_size;
- e->encap_header = encap_header;
-
- if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
- err = -EAGAIN;
- goto out;
- }
-
- err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
- ipv6_encap_size, encap_header,
- MLX5_FLOW_NAMESPACE_FDB,
- &e->encap_id);
- if (err)
- goto destroy_neigh_entry;
-
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- neigh_release(n);
- return err;
-
-destroy_neigh_entry:
- mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-free_encap:
- kfree(encap_header);
-out:
- if (n)
- neigh_release(n);
- return err;
-}
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev,
struct net_device **encap_dev,
struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ int out_index)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct ip_tunnel_key *key = &tun_info->key;
struct mlx5e_encap_entry *e;
- int tunnel_type, err = 0;
uintptr_t hash_key;
bool found = false;
-
- /* udp dst port must be set */
- if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
- goto vxlan_encap_offload_err;
-
- /* setting udp src port isn't supported */
- if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
-vxlan_encap_offload_err:
- NL_SET_ERR_MSG_MOD(extack,
- "must set udp dst port and not set udp src port");
- netdev_warn(priv->netdev,
- "must set udp dst port and not set udp src port\n");
- return -EOPNOTSUPP;
- }
-
- if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
- MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "port isn't an offloaded vxlan udp dport");
- netdev_warn(priv->netdev,
- "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
- return -EOPNOTSUPP;
- }
+ int err = 0;
hash_key = hash_encap_info(key);
@@ -2762,13 +2415,16 @@ vxlan_encap_offload_err:
return -ENOMEM;
e->tun_info = *tun_info;
- e->tunnel_type = tunnel_type;
+ err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
+ if (err)
+ goto out_err;
+
INIT_LIST_HEAD(&e->flows);
if (family == AF_INET)
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
+ err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
else if (family == AF_INET6)
- err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
+ err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
if (err && err != -EAGAIN)
goto out_err;
@@ -2776,12 +2432,15 @@ vxlan_encap_offload_err:
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
attach_flow:
- list_add(&flow->encap, &e->flows);
+ list_add(&flow->encaps[out_index].list, &e->flows);
+ flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
- if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- attr->encap_id = e->encap_id;
- else
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ attr->dests[out_index].encap_id = e->encap_id;
+ attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ } else {
err = -EAGAIN;
+ }
return err;
@@ -2850,7 +2509,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct ip_tunnel_info *info = NULL;
const struct tc_action *a;
- LIST_HEAD(actions);
bool encap = false;
u32 action = 0;
int err, i;
@@ -2875,7 +2533,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->mirror_count = attr->out_count;
+ attr->split_count = attr->out_count;
continue;
}
@@ -2893,6 +2551,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
struct net_device *out_dev;
out_dev = tcf_mirred_dev(a);
+ if (!out_dev) {
+ /* out_dev is NULL when filters with
+ * non-existing mirred device are replayed to
+ * the driver.
+ */
+ return -EINVAL;
+ }
if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
@@ -2902,23 +2567,47 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP;
}
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (switchdev_port_same_parent_id(priv->netdev,
out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) {
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
+ struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
+
+ if (uplink_upper &&
+ netif_is_lag_master(uplink_upper) &&
+ uplink_upper == out_dev)
+ out_dev = uplink_dev;
+
+ if (!mlx5e_eswitch_rep(out_dev))
+ return -EOPNOTSUPP;
+
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- attr->out_rep[attr->out_count] = rpriv->rep;
- attr->out_mdev[attr->out_count++] = out_priv->mdev;
+ attr->dests[attr->out_count].rep = rpriv->rep;
+ attr->dests[attr->out_count].mdev = out_priv->mdev;
+ attr->out_count++;
} else if (encap) {
- parse_attr->mirred_ifindex = out_dev->ifindex;
- parse_attr->tun_info = *info;
+ parse_attr->mirred_ifindex[attr->out_count] =
+ out_dev->ifindex;
+ parse_attr->tun_info[attr->out_count] = *info;
+ encap = false;
attr->parse_attr = parse_attr;
- action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
- /* attr->out_rep is resolved when we handle encap */
+ attr->dests[attr->out_count].flags |=
+ MLX5_ESW_DEST_ENCAP;
+ attr->out_count++;
+ /* attr->dests[].rep is resolved when we
+ * handle encap
+ */
+ } else if (parse_attr->filter_dev != priv->netdev) {
+ /* All mlx5 devices are called to configure
+ * high level device filters. Therefore, the
+ * *attempt* to install a filter on invalid
+ * eswitch should not trigger an explicit error
+ */
+ return -EINVAL;
} else {
NL_SET_ERR_MSG_MOD(extack,
"devices are not on same switch HW, can't offload forwarding");
@@ -2935,7 +2624,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
encap = true;
else
return -EOPNOTSUPP;
- attr->mirror_count = attr->out_count;
continue;
}
@@ -2945,7 +2633,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (err)
return err;
- attr->mirror_count = attr->out_count;
+ attr->split_count = attr->out_count;
continue;
}
@@ -2966,8 +2654,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
return -EOPNOTSUPP;
}
- action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = dest_chain;
continue;
@@ -2980,7 +2667,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP;
- if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (attr->dest_chain) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
@@ -2999,6 +2694,11 @@ static void get_flags(int flags, u16 *flow_flags)
if (flags & MLX5E_TC_EGRESS)
__flow_flags |= MLX5E_TC_FLOW_EGRESS;
+ if (flags & MLX5E_TC_ESW_OFFLOAD)
+ __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+ if (flags & MLX5E_TC_NIC_OFFLOAD)
+ __flow_flags |= MLX5E_TC_FLOW_NIC;
+
*flow_flags = __flow_flags;
}
@@ -3009,18 +2709,32 @@ static const struct rhashtable_params tc_ht_params = {
.automatic_shrinking = true,
};
-static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
+static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *uplink_rpriv;
- if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
+ if (flags & MLX5E_TC_ESW_OFFLOAD) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- return &uplink_rpriv->tc_ht;
- } else
+ return &uplink_rpriv->uplink_priv.tc_ht;
+ } else /* NIC offload */
return &priv->fs.tc.ht;
}
+static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT &&
+ flow->flags & MLX5E_TC_FLOW_INGRESS;
+ bool act_is_encap = !!(attr->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
+ bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS);
+
+ return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) &&
+ (is_rep_ingress || act_is_encap);
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct tc_cls_flower_offload *f, u16 flow_flags,
@@ -3042,10 +2756,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
flow->flags = flow_flags;
flow->priv = priv;
- err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
- if (err)
- goto err_free;
-
*__flow = flow;
*__parse_attr = parse_attr;
@@ -3058,12 +2768,16 @@ err_free:
}
static int
-mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
- struct tc_cls_flower_offload *f,
- u16 flow_flags,
- struct mlx5e_tc_flow **__flow)
+__mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct net_device *filter_dev,
+ struct mlx5_eswitch_rep *in_rep,
+ struct mlx5_core_dev *in_mdev,
+ struct mlx5e_tc_flow **__flow)
{
struct netlink_ext_ack *extack = f->common.extack;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
int attr_size, err;
@@ -3074,6 +2788,12 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
&parse_attr, &flow);
if (err)
goto out;
+ parse_attr->filter_dev = filter_dev;
+ flow->esw_attr->parse_attr = parse_attr;
+ err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
+ f, filter_dev);
+ if (err)
+ goto err_free;
flow->esw_attr->chain = f->common.chain_index;
flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
@@ -3081,14 +2801,19 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
+ flow->esw_attr->in_rep = in_rep;
+ flow->esw_attr->in_mdev = in_mdev;
+
+ if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
+ MLX5_COUNTER_SOURCE_ESWITCH)
+ flow->esw_attr->counter_dev = in_mdev;
+ else
+ flow->esw_attr->counter_dev = priv->mdev;
+
err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
if (err)
goto err_free;
- if (!(flow->esw_attr->action &
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
- kvfree(parse_attr);
-
*__flow = flow;
return 0;
@@ -3100,10 +2825,92 @@ out:
return err;
}
+static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_priv *priv = flow->priv, *peer_priv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
+ struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_rep_priv *peer_urpriv;
+ struct mlx5e_tc_flow *peer_flow;
+ struct mlx5_core_dev *in_mdev;
+ int err = 0;
+
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ return -ENODEV;
+
+ peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
+ peer_priv = netdev_priv(peer_urpriv->netdev);
+
+ /* in_mdev is assigned of which the packet originated from.
+ * So packets redirected to uplink use the same mdev of the
+ * original flow and packets redirected from uplink use the
+ * peer mdev.
+ */
+ if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT)
+ in_mdev = peer_priv->mdev;
+ else
+ in_mdev = priv->mdev;
+
+ parse_attr = flow->esw_attr->parse_attr;
+ err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
+ parse_attr->filter_dev,
+ flow->esw_attr->in_rep, in_mdev, &peer_flow);
+ if (err)
+ goto out;
+
+ flow->peer_flow = peer_flow;
+ flow->flags |= MLX5E_TC_FLOW_DUP;
+ mutex_lock(&esw->offloads.peer_mutex);
+ list_add_tail(&flow->peer, &esw->offloads.peer_flows);
+ mutex_unlock(&esw->offloads.peer_mutex);
+
+out:
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ return err;
+}
+
+static int
+mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f,
+ u16 flow_flags,
+ struct net_device *filter_dev,
+ struct mlx5e_tc_flow **__flow)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *in_rep = rpriv->rep;
+ struct mlx5_core_dev *in_mdev = priv->mdev;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
+ in_mdev, &flow);
+ if (err)
+ goto out;
+
+ if (is_peer_flow_needed(flow)) {
+ err = mlx5e_tc_add_fdb_peer_flow(f, flow);
+ if (err) {
+ mlx5e_tc_del_fdb_flow(priv, flow);
+ goto out;
+ }
+ }
+
+ *__flow = flow;
+
+ return 0;
+
+out:
+ return err;
+}
+
static int
mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
u16 flow_flags,
+ struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
struct netlink_ext_ack *extack = f->common.extack;
@@ -3122,6 +2929,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
if (err)
goto out;
+ parse_attr->filter_dev = filter_dev;
+ err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
+ f, filter_dev);
+ if (err)
+ goto err_free;
+
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -3147,6 +2960,7 @@ static int
mlx5e_tc_add_flow(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f,
int flags,
+ struct net_device *filter_dev,
struct mlx5e_tc_flow **flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -3159,18 +2973,20 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
if (esw && esw->mode == SRIOV_OFFLOADS)
- err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
+ err = mlx5e_add_fdb_flow(priv, f, flow_flags,
+ filter_dev, flow);
else
- err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
+ err = mlx5e_add_nic_flow(priv, f, flow_flags,
+ filter_dev, flow);
return err;
}
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
+int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
struct netlink_ext_ack *extack = f->common.extack;
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
int err = 0;
@@ -3184,7 +3000,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
goto out;
}
- err = mlx5e_tc_add_flow(priv, f, flags, &flow);
+ err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
if (err)
goto out;
@@ -3212,10 +3028,10 @@ static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
return false;
}
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
+int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
struct mlx5e_tc_flow *flow;
flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
@@ -3231,10 +3047,12 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
+int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags)
{
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
+ struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 bytes;
@@ -3254,6 +3072,27 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (!peer_esw)
+ goto out;
+
+ if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
+ (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
+ u64 bytes2;
+ u64 packets2;
+ u64 lastuse2;
+
+ counter = mlx5e_tc_get_counter(flow->peer_flow);
+ mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
+
+ bytes += bytes2;
+ packets += packets2;
+ lastuse = max_t(u64, lastuse, lastuse2);
+ }
+
+ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
+out:
tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
return 0;
@@ -3342,7 +3181,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier(&tc->netdevice_nb);
- rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
+ rhashtable_destroy(&tc->ht);
if (!IS_ERR_OR_NULL(tc->t)) {
mlx5_destroy_flow_table(tc->t);
@@ -3360,9 +3199,17 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
}
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
{
- struct rhashtable *tc_ht = get_tc_ht(priv);
+ struct rhashtable *tc_ht = get_tc_ht(priv, flags);
return atomic_read(&tc_ht->nelems);
}
+
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
+{
+ struct mlx5e_tc_flow *flow, *tmp;
+
+ list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
+ __mlx5e_tc_del_fdb_peer_flow(flow);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 49436bf3b80a..d2d87f978c06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -42,7 +42,9 @@
enum {
MLX5E_TC_INGRESS = BIT(0),
MLX5E_TC_EGRESS = BIT(1),
- MLX5E_TC_LAST_EXPORTED_BIT = 1,
+ MLX5E_TC_NIC_OFFLOAD = BIT(2),
+ MLX5E_TC_ESW_OFFLOAD = BIT(3),
+ MLX5E_TC_LAST_EXPORTED_BIT = 3,
};
int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
@@ -51,12 +53,12 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
+int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags);
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
+int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags);
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
+int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f, int flags);
struct mlx5e_encap_entry;
@@ -68,12 +70,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv);
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; }
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; }
#endif
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6dacaeba2fbf..598ad7e4d5c9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -127,7 +127,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
else
#endif
if (skb_vlan_tag_present(skb))
- up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+ up = skb_vlan_tag_get_prio(skb);
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
@@ -459,9 +459,10 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
netdev_err(sq->channel->netdev,
- "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
- sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome,
- err_cqe->vendor_err_synd);
+ "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+ sq->cq.mcq.cqn, ci, sq->sqn,
+ get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
}
@@ -507,7 +508,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
- if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) {
+ if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
&sq->state)) {
mlx5e_dump_error_cqe(sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 85d517360157..b4af5e19f6ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -76,6 +76,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
+ struct mlx5e_rq *rq = &c->rq;
bool busy = false;
int work_done = 0;
int i;
@@ -85,17 +86,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
- busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq, NULL);
if (c->xdp)
- busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq);
+ busy |= mlx5e_poll_xdpsq_cq(&rq->xdpsq.cq, rq);
if (likely(budget)) { /* budget=0 means: don't poll rx rings */
- work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
}
- busy |= c->rq.post_wqes(&c->rq);
+ busy |= c->rq.post_wqes(rq);
if (busy) {
if (likely(mlx5e_channel_no_affinity_change(c)))
@@ -115,9 +116,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_cq_arm(&c->sq[i].cq);
}
- mlx5e_handle_rx_dim(&c->rq);
+ mlx5e_handle_rx_dim(rq);
- mlx5e_cq_arm(&c->rq.cq);
+ mlx5e_cq_arm(&rq->cq);
mlx5e_cq_arm(&c->icosq.cq);
mlx5e_cq_arm(&c->xdpsq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c1e1a16a9b07..ee04aab65a9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -31,20 +31,22 @@
*/
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
#include <linux/mlx5/cmd.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#include "mlx5_core.h"
+#include "lib/eq.h"
#include "fpga/core.h"
#include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
enum {
- MLX5_EQE_SIZE = sizeof(struct mlx5_eqe),
MLX5_EQE_OWNER_INIT_VAL = 0x1,
};
@@ -55,14 +57,32 @@ enum {
};
enum {
- MLX5_NUM_SPARE_EQE = 0x80,
- MLX5_NUM_ASYNC_EQE = 0x1000,
- MLX5_NUM_CMD_EQE = 32,
- MLX5_NUM_PF_DRAIN = 64,
+ MLX5_EQ_DOORBEL_OFFSET = 0x40,
};
-enum {
- MLX5_EQ_DOORBEL_OFFSET = 0x40,
+struct mlx5_irq_info {
+ cpumask_var_t mask;
+ char name[MLX5_MAX_IRQ_NAME];
+ void *context; /* dev_id provided to request_irq */
+};
+
+struct mlx5_eq_table {
+ struct list_head comp_eqs_list;
+ struct mlx5_eq pages_eq;
+ struct mlx5_eq cmd_eq;
+ struct mlx5_eq async_eq;
+
+ struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX];
+
+ /* Since CQ DB is stored in async_eq */
+ struct mlx5_nb cq_err_nb;
+
+ struct mutex lock; /* sync async eqs creations */
+ int num_comp_vectors;
+ struct mlx5_irq_info *irq_info;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -78,17 +98,6 @@ enum {
(1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
(1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
-struct map_eq_in {
- u64 mask;
- u32 reserved;
- u32 unmap_eqn;
-};
-
-struct cre_des_eq {
- u8 reserved[15];
- u8 eqn;
-};
-
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
@@ -99,213 +108,56 @@ static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
-{
- return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
-}
-
-static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
-{
- struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
-
- return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
-}
-
-static const char *eqe_type_str(u8 type)
-{
- switch (type) {
- case MLX5_EVENT_TYPE_COMP:
- return "MLX5_EVENT_TYPE_COMP";
- case MLX5_EVENT_TYPE_PATH_MIG:
- return "MLX5_EVENT_TYPE_PATH_MIG";
- case MLX5_EVENT_TYPE_COMM_EST:
- return "MLX5_EVENT_TYPE_COMM_EST";
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- return "MLX5_EVENT_TYPE_SQ_DRAINED";
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
- case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
- return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
- case MLX5_EVENT_TYPE_CQ_ERROR:
- return "MLX5_EVENT_TYPE_CQ_ERROR";
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
- case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
- return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
- case MLX5_EVENT_TYPE_INTERNAL_ERROR:
- return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
- case MLX5_EVENT_TYPE_PORT_CHANGE:
- return "MLX5_EVENT_TYPE_PORT_CHANGE";
- case MLX5_EVENT_TYPE_GPIO_EVENT:
- return "MLX5_EVENT_TYPE_GPIO_EVENT";
- case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
- return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
- case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
- return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
- case MLX5_EVENT_TYPE_REMOTE_CONFIG:
- return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
- case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
- return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
- case MLX5_EVENT_TYPE_STALL_EVENT:
- return "MLX5_EVENT_TYPE_STALL_EVENT";
- case MLX5_EVENT_TYPE_CMD:
- return "MLX5_EVENT_TYPE_CMD";
- case MLX5_EVENT_TYPE_PAGE_REQUEST:
- return "MLX5_EVENT_TYPE_PAGE_REQUEST";
- case MLX5_EVENT_TYPE_PAGE_FAULT:
- return "MLX5_EVENT_TYPE_PAGE_FAULT";
- case MLX5_EVENT_TYPE_PPS_EVENT:
- return "MLX5_EVENT_TYPE_PPS_EVENT";
- case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
- return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
- case MLX5_EVENT_TYPE_FPGA_ERROR:
- return "MLX5_EVENT_TYPE_FPGA_ERROR";
- case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
- return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
- case MLX5_EVENT_TYPE_GENERAL_EVENT:
- return "MLX5_EVENT_TYPE_GENERAL_EVENT";
- case MLX5_EVENT_TYPE_DEVICE_TRACER:
- return "MLX5_EVENT_TYPE_DEVICE_TRACER";
- default:
- return "Unrecognized event";
- }
-}
-
-static enum mlx5_dev_event port_subtype_event(u8 subtype)
-{
- switch (subtype) {
- case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
- return MLX5_DEV_EVENT_PORT_DOWN;
- case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
- return MLX5_DEV_EVENT_PORT_UP;
- case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
- return MLX5_DEV_EVENT_PORT_INITIALIZED;
- case MLX5_PORT_CHANGE_SUBTYPE_LID:
- return MLX5_DEV_EVENT_LID_CHANGE;
- case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
- return MLX5_DEV_EVENT_PKEY_CHANGE;
- case MLX5_PORT_CHANGE_SUBTYPE_GUID:
- return MLX5_DEV_EVENT_GUID_CHANGE;
- case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
- return MLX5_DEV_EVENT_CLIENT_REREG;
- }
- return -1;
-}
-
-static void eq_update_ci(struct mlx5_eq *eq, int arm)
+/* caller must eventually call mlx5_cq_put on the returned cq */
+static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
{
- __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
- u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
-
- __raw_writel((__force u32)cpu_to_be32(val), addr);
- /* We still want ordering, just not swabbing, so add a barrier */
- mb();
-}
+ struct mlx5_cq_table *table = &eq->cq_table;
+ struct mlx5_core_cq *cq = NULL;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static void eqe_pf_action(struct work_struct *work)
-{
- struct mlx5_pagefault *pfault = container_of(work,
- struct mlx5_pagefault,
- work);
- struct mlx5_eq *eq = pfault->eq;
+ spin_lock(&table->lock);
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (likely(cq))
+ mlx5_cq_hold(cq);
+ spin_unlock(&table->lock);
- mlx5_core_page_fault(eq->dev, pfault);
- mempool_free(pfault, eq->pf_ctx.pool);
+ return cq;
}
-static void eq_pf_process(struct mlx5_eq *eq)
+static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr)
{
- struct mlx5_core_dev *dev = eq->dev;
- struct mlx5_eqe_page_fault *pf_eqe;
- struct mlx5_pagefault *pfault;
+ struct mlx5_eq_comp *eq_comp = eq_ptr;
+ struct mlx5_eq *eq = eq_ptr;
struct mlx5_eqe *eqe;
int set_ci = 0;
+ u32 cqn = -1;
while ((eqe = next_eqe_sw(eq))) {
- pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
- if (!pfault) {
- schedule_work(&eq->pf_ctx.work);
- break;
- }
-
+ struct mlx5_core_cq *cq;
+ /* Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
dma_rmb();
- pf_eqe = &eqe->data.page_fault;
- pfault->event_subtype = eqe->sub_type;
- pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
-
- mlx5_core_dbg(dev,
- "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
- eqe->sub_type, pfault->bytes_committed);
-
- switch (eqe->sub_type) {
- case MLX5_PFAULT_SUBTYPE_RDMA:
- /* RDMA based event */
- pfault->type =
- be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
- pfault->token =
- be32_to_cpu(pf_eqe->rdma.pftype_token) &
- MLX5_24BIT_MASK;
- pfault->rdma.r_key =
- be32_to_cpu(pf_eqe->rdma.r_key);
- pfault->rdma.packet_size =
- be16_to_cpu(pf_eqe->rdma.packet_length);
- pfault->rdma.rdma_op_len =
- be32_to_cpu(pf_eqe->rdma.rdma_op_len);
- pfault->rdma.rdma_va =
- be64_to_cpu(pf_eqe->rdma.rdma_va);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
- pfault->type, pfault->token,
- pfault->rdma.r_key);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
- pfault->rdma.rdma_op_len,
- pfault->rdma.rdma_va);
- break;
-
- case MLX5_PFAULT_SUBTYPE_WQE:
- /* WQE based event */
- pfault->type =
- (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
- pfault->token =
- be32_to_cpu(pf_eqe->wqe.token);
- pfault->wqe.wq_num =
- be32_to_cpu(pf_eqe->wqe.pftype_wq) &
- MLX5_24BIT_MASK;
- pfault->wqe.wqe_index =
- be16_to_cpu(pf_eqe->wqe.wqe_index);
- pfault->wqe.packet_size =
- be16_to_cpu(pf_eqe->wqe.packet_length);
- mlx5_core_dbg(dev,
- "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
- pfault->type, pfault->token,
- pfault->wqe.wq_num,
- pfault->wqe.wqe_index);
- break;
-
- default:
- mlx5_core_warn(dev,
- "Unsupported page fault event sub-type: 0x%02hhx\n",
- eqe->sub_type);
- /* Unsupported page faults should still be
- * resolved by the page fault handler
- */
+ /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */
+ cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
+
+ cq = mlx5_eq_cq_get(eq, cqn);
+ if (likely(cq)) {
+ ++cq->arm_sn;
+ cq->comp(cq);
+ mlx5_cq_put(cq);
+ } else {
+ mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
}
- pfault->eq = eq;
- INIT_WORK(&pfault->work, eqe_pf_action);
- queue_work(eq->pf_ctx.wq, &pfault->work);
-
++eq->cons_index;
++set_ci;
+ /* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create our EQs with MLX5_NUM_SPARE_EQE extra
+ * entries, so we must update our consumer index at
+ * least that often.
+ */
if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
eq_update_ci(eq, 0);
set_ci = 0;
@@ -313,165 +165,41 @@ static void eq_pf_process(struct mlx5_eq *eq)
}
eq_update_ci(eq, 1);
-}
-static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
-{
- struct mlx5_eq *eq = eq_ptr;
- unsigned long flags;
-
- if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
- eq_pf_process(eq);
- spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
- } else {
- schedule_work(&eq->pf_ctx.work);
- }
+ if (cqn != -1)
+ tasklet_schedule(&eq_comp->tasklet_ctx.task);
return IRQ_HANDLED;
}
-/* mempool_refill() was proposed but unfortunately wasn't accepted
- * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
- * Chip workaround.
+/* Some architectures don't latch interrupts when they are disabled, so using
+ * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
+ * avoid losing them. It is not recommended to use it, unless this is the last
+ * resort.
*/
-static void mempool_refill(mempool_t *pool)
-{
- while (pool->curr_nr < pool->min_nr)
- mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
-}
-
-static void eq_pf_action(struct work_struct *work)
-{
- struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
-
- mempool_refill(eq->pf_ctx.pool);
-
- spin_lock_irq(&eq->pf_ctx.lock);
- eq_pf_process(eq);
- spin_unlock_irq(&eq->pf_ctx.lock);
-}
-
-static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
-{
- spin_lock_init(&pf_ctx->lock);
- INIT_WORK(&pf_ctx->work, eq_pf_action);
-
- pf_ctx->wq = alloc_ordered_workqueue(name,
- WQ_MEM_RECLAIM);
- if (!pf_ctx->wq)
- return -ENOMEM;
-
- pf_ctx->pool = mempool_create_kmalloc_pool
- (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
- if (!pf_ctx->pool)
- goto err_wq;
-
- return 0;
-err_wq:
- destroy_workqueue(pf_ctx->wq);
- return -ENOMEM;
-}
-
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
- u32 wq_num, u8 type, int error)
-{
- u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
- u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0};
-
- MLX5_SET(page_fault_resume_in, in, opcode,
- MLX5_CMD_OP_PAGE_FAULT_RESUME);
- MLX5_SET(page_fault_resume_in, in, error, !!error);
- MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
- MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
- MLX5_SET(page_fault_resume_in, in, token, token);
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
-#endif
-
-static void general_event_handler(struct mlx5_core_dev *dev,
- struct mlx5_eqe *eqe)
-{
- switch (eqe->sub_type) {
- case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
- if (dev->event)
- dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0);
- break;
- default:
- mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n",
- eqe->sub_type);
- }
-}
-
-static void mlx5_temp_warning_event(struct mlx5_core_dev *dev,
- struct mlx5_eqe *eqe)
-{
- u64 value_lsb;
- u64 value_msb;
-
- value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
- value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
-
- mlx5_core_warn(dev,
- "High temperature on sensors with bit set %llx %llx",
- value_msb, value_lsb);
-}
-
-/* caller must eventually call mlx5_cq_put on the returned cq */
-static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
-{
- struct mlx5_cq_table *table = &eq->cq_table;
- struct mlx5_core_cq *cq = NULL;
-
- spin_lock(&table->lock);
- cq = radix_tree_lookup(&table->tree, cqn);
- if (likely(cq))
- mlx5_cq_hold(cq);
- spin_unlock(&table->lock);
-
- return cq;
-}
-
-static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
-{
- struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
-
- if (unlikely(!cq)) {
- mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
- return;
- }
-
- ++cq->arm_sn;
-
- cq->comp(cq);
-
- mlx5_cq_put(cq);
-}
-
-static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
{
- struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
-
- if (unlikely(!cq)) {
- mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
- return;
- }
+ u32 count_eqe;
- cq->event(cq, event_type);
+ disable_irq(eq->core.irqn);
+ count_eqe = eq->core.cons_index;
+ mlx5_eq_comp_int(eq->core.irqn, eq);
+ count_eqe = eq->core.cons_index - count_eqe;
+ enable_irq(eq->core.irqn);
- mlx5_cq_put(cq);
+ return count_eqe;
}
-static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
+static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
- struct mlx5_core_dev *dev = eq->dev;
+ struct mlx5_eq_table *eqt;
+ struct mlx5_core_dev *dev;
struct mlx5_eqe *eqe;
int set_ci = 0;
- u32 cqn = -1;
- u32 rsn;
- u8 port;
+
+ dev = eq->dev;
+ eqt = dev->priv.eq_table;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -480,116 +208,12 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
*/
dma_rmb();
- mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
- eq->eqn, eqe_type_str(eqe->type));
- switch (eqe->type) {
- case MLX5_EVENT_TYPE_COMP:
- cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
- mlx5_eq_cq_completion(eq, cqn);
- break;
- case MLX5_EVENT_TYPE_DCT_DRAINED:
- rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
- rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
- mlx5_rsc_event(dev, rsn, eqe->type);
- break;
- case MLX5_EVENT_TYPE_PATH_MIG:
- case MLX5_EVENT_TYPE_COMM_EST:
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
- mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
- eqe_type_str(eqe->type), eqe->type, rsn);
- mlx5_rsc_event(dev, rsn, eqe->type);
- break;
-
- case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
- case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
- rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
- mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
- eqe_type_str(eqe->type), eqe->type, rsn);
- mlx5_srq_event(dev, rsn, eqe->type);
- break;
-
- case MLX5_EVENT_TYPE_CMD:
- mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
- break;
+ if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
+ atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
+ else
+ mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
- case MLX5_EVENT_TYPE_PORT_CHANGE:
- port = (eqe->data.port.port >> 4) & 0xf;
- switch (eqe->sub_type) {
- case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
- case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
- case MLX5_PORT_CHANGE_SUBTYPE_LID:
- case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
- case MLX5_PORT_CHANGE_SUBTYPE_GUID:
- case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
- case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
- if (dev->event)
- dev->event(dev, port_subtype_event(eqe->sub_type),
- (unsigned long)port);
- break;
- default:
- mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
- port, eqe->sub_type);
- }
- break;
- case MLX5_EVENT_TYPE_CQ_ERROR:
- cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
- mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
- cqn, eqe->data.cq_err.syndrome);
- mlx5_eq_cq_event(eq, cqn, eqe->type);
- break;
-
- case MLX5_EVENT_TYPE_PAGE_REQUEST:
- {
- u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
- s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
-
- mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
- func_id, npages);
- mlx5_core_req_pages_handler(dev, func_id, npages);
- }
- break;
-
- case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
- mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
- break;
-
- case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
- mlx5_port_module_event(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_PPS_EVENT:
- mlx5_pps_event(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_FPGA_ERROR:
- case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
- mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
- break;
-
- case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
- mlx5_temp_warning_event(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_GENERAL_EVENT:
- general_event_handler(dev, eqe);
- break;
-
- case MLX5_EVENT_TYPE_DEVICE_TRACER:
- mlx5_fw_tracer_event(dev, eqe);
- break;
-
- default:
- mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
- eqe->type, eq->eqn);
- break;
- }
+ atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
++set_ci;
@@ -608,30 +232,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
eq_update_ci(eq, 1);
- if (cqn != -1)
- tasklet_schedule(&eq->tasklet_ctx.task);
-
return IRQ_HANDLED;
}
-/* Some architectures don't latch interrupts when they are disabled, so using
- * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
- * avoid losing them. It is not recommended to use it, unless this is the last
- * resort.
- */
-u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
-{
- u32 count_eqe;
-
- disable_irq(eq->irqn);
- count_eqe = eq->cons_index;
- mlx5_eq_int(eq->irqn, eq);
- count_eqe = eq->cons_index - count_eqe;
- enable_irq(eq->irqn);
-
- return count_eqe;
-}
-
static void init_eq_buf(struct mlx5_eq *eq)
{
struct mlx5_eqe *eqe;
@@ -643,39 +246,35 @@ static void init_eq_buf(struct mlx5_eq *eq)
}
}
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type)
+static int
+create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name,
+ struct mlx5_eq_param *param)
{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
- irq_handler_t handler;
+ u8 vecidx = param->index;
__be64 *pas;
void *eqc;
int inlen;
u32 *in;
int err;
+ if (eq_table->irq_info[vecidx].context)
+ return -EEXIST;
+
/* Init CQ table */
memset(cq_table, 0, sizeof(*cq_table));
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
- eq->type = type;
- eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+ eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (type == MLX5_EQ_TYPE_PF)
- handler = mlx5_eq_pf_int;
- else
-#endif
- handler = mlx5_eq_int;
-
init_eq_buf(eq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
@@ -691,7 +290,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
mlx5_fill_page_array(&eq->buf, pas);
MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
- MLX5_SET64(create_eq_in, in, event_bitmask, mask);
+ MLX5_SET64(create_eq_in, in, event_bitmask, param->mask);
eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
@@ -704,15 +303,17 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_in;
- snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
+ snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+ eq_table->irq_info[vecidx].context = param->context;
+ eq->vecidx = vecidx;
eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
eq->irqn = pci_irq_vector(dev->pdev, vecidx);
eq->dev = dev;
eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(eq->irqn, handler, 0,
- priv->irq_info[vecidx].name, eq);
+ err = request_irq(eq->irqn, param->handler, 0,
+ eq_table->irq_info[vecidx].name, param->context);
if (err)
goto err_eq;
@@ -720,21 +321,6 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_irq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (type == MLX5_EQ_TYPE_PF) {
- err = init_pf_ctx(&eq->pf_ctx, name);
- if (err)
- goto err_irq;
- } else
-#endif
- {
- INIT_LIST_HEAD(&eq->tasklet_ctx.list);
- INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
- spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
- }
-
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
@@ -756,27 +342,25 @@ err_buf:
return err;
}
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+ struct mlx5_irq_info *irq_info;
int err;
+ irq_info = &eq_table->irq_info[eq->vecidx];
+
mlx5_debug_eq_remove(dev, eq);
- free_irq(eq->irqn, eq);
+
+ free_irq(eq->irqn, irq_info->context);
+ irq_info->context = NULL;
+
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
synchronize_irq(eq->irqn);
- if (eq->type == MLX5_EQ_TYPE_COMP) {
- tasklet_disable(&eq->tasklet_ctx.task);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- } else if (eq->type == MLX5_EQ_TYPE_PF) {
- cancel_work_sync(&eq->pf_ctx.work);
- destroy_workqueue(eq->pf_ctx.wq);
- mempool_destroy(eq->pf_ctx.pool);
-#endif
- }
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -816,28 +400,106 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
return 0;
}
-int mlx5_eq_init(struct mlx5_core_dev *dev)
+int mlx5_eq_table_init(struct mlx5_core_dev *dev)
{
- int err;
+ struct mlx5_eq_table *eq_table;
+ int i, err;
- spin_lock_init(&dev->priv.eq_table.lock);
+ eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL);
+ if (!eq_table)
+ return -ENOMEM;
+
+ dev->priv.eq_table = eq_table;
err = mlx5_eq_debugfs_init(dev);
+ if (err)
+ goto kvfree_eq_table;
+
+ mutex_init(&eq_table->lock);
+ for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
+ ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
+ return 0;
+
+kvfree_eq_table:
+ kvfree(eq_table);
+ dev->priv.eq_table = NULL;
return err;
}
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
+void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev)
{
mlx5_eq_debugfs_cleanup(dev);
+ kvfree(dev->priv.eq_table);
}
-int mlx5_start_eqs(struct mlx5_core_dev *dev)
+/* Async EQs */
+
+static int create_async_eq(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq *eq, struct mlx5_eq_param *param)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+ int err;
+
+ mutex_lock(&eq_table->lock);
+ if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) {
+ err = -ENOSPC;
+ goto unlock;
+ }
+
+ err = create_map_eq(dev, eq, name, param);
+unlock:
+ mutex_unlock(&eq_table->lock);
+ return err;
+}
+
+static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
+ mutex_lock(&eq_table->lock);
+ err = destroy_unmap_eq(dev, eq);
+ mutex_unlock(&eq_table->lock);
+ return err;
+}
+
+static int cq_err_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_eq_table *eqt;
+ struct mlx5_core_cq *cq;
+ struct mlx5_eqe *eqe;
+ struct mlx5_eq *eq;
+ u32 cqn;
+
+ /* type == MLX5_EVENT_TYPE_CQ_ERROR */
+
+ eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb);
+ eq = &eqt->async_eq;
+ eqe = data;
+
+ cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+ mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
+ cqn, eqe->data.cq_err.syndrome);
+
+ cq = mlx5_eq_cq_get(eq, cqn);
+ if (unlikely(!cq)) {
+ mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
+ return NOTIFY_OK;
+ }
+
+ cq->event(cq, type);
+
+ mlx5_cq_put(cq);
+
+ return NOTIFY_OK;
+}
+
+static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
+{
+ u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+
if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
@@ -865,127 +527,521 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
if (MLX5_CAP_MCAM_REG(dev, tracer_registers))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER);
- err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
- MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
- "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
+ if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER);
+
+ return async_event_mask;
+}
+
+static int create_async_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_param param = {};
+ int err;
+
+ MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR);
+ mlx5_eq_notifier_register(dev, &table->cq_err_nb);
+
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_CMD_IDX,
+ .mask = 1ull << MLX5_EVENT_TYPE_CMD,
+ .nent = MLX5_NUM_CMD_EQE,
+ .context = &table->cmd_eq,
+ .handler = mlx5_eq_async_int,
+ };
+ err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, &param);
if (err) {
mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
- return err;
+ goto err0;
}
mlx5_cmd_use_events(dev);
- err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
- MLX5_NUM_ASYNC_EQE, async_event_mask,
- "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_ASYNC_IDX,
+ .mask = gather_async_events_mask(dev),
+ .nent = MLX5_NUM_ASYNC_EQE,
+ .context = &table->async_eq,
+ .handler = mlx5_eq_async_int,
+ };
+ err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, &param);
if (err) {
mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
goto err1;
}
- err = mlx5_create_map_eq(dev, &table->pages_eq,
- MLX5_EQ_VEC_PAGES,
- /* TODO: sriov max_vf + */ 1,
- 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
- MLX5_EQ_TYPE_ASYNC);
+ param = (struct mlx5_eq_param) {
+ .index = MLX5_EQ_PAGEREQ_IDX,
+ .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
+ .nent = /* TODO: sriov max_vf + */ 1,
+ .context = &table->pages_eq,
+ .handler = mlx5_eq_async_int,
+ };
+ err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, &param);
if (err) {
mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
goto err2;
}
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_create_map_eq(dev, &table->pfault_eq,
- MLX5_EQ_VEC_PFAULT,
- MLX5_NUM_ASYNC_EQE,
- 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
- "mlx5_page_fault_eq",
- MLX5_EQ_TYPE_PF);
- if (err) {
- mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
- err);
- goto err3;
- }
- }
-
- return err;
-err3:
- mlx5_destroy_unmap_eq(dev, &table->pages_eq);
-#else
return err;
-#endif
err2:
- mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ destroy_async_eq(dev, &table->async_eq);
err1:
mlx5_cmd_use_polling(dev);
- mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+ destroy_async_eq(dev, &table->cmd_eq);
+err0:
+ mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}
-void mlx5_stop_eqs(struct mlx5_core_dev *dev)
+static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq_table *table = dev->priv.eq_table;
int err;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(dev, pg)) {
- err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
- if (err)
- mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n",
- err);
- }
-#endif
-
- err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+ err = destroy_async_eq(dev, &table->pages_eq);
if (err)
mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n",
err);
- err = mlx5_destroy_unmap_eq(dev, &table->async_eq);
+ err = destroy_async_eq(dev, &table->async_eq);
if (err)
mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n",
err);
+
mlx5_cmd_use_polling(dev);
- err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+ err = destroy_async_eq(dev, &table->cmd_eq);
if (err)
mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n",
err);
+
+ mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
}
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen)
+struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
{
- u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
+ return &dev->priv.eq_table->async_eq;
+}
- MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
- MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev)
+{
+ synchronize_irq(dev->priv.eq_table->async_eq.irqn);
+}
+
+void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev)
+{
+ synchronize_irq(dev->priv.eq_table->cmd_eq.irqn);
+}
+
+/* Generic EQ API for mlx5_core consumers
+ * Needed For RDMA ODP EQ for now
+ */
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq_param *param)
+{
+ struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
+ int err;
+
+ if (!eq)
+ return ERR_PTR(-ENOMEM);
+
+ err = create_async_eq(dev, name, eq, param);
+ if (err) {
+ kvfree(eq);
+ eq = ERR_PTR(err);
+ }
+
+ return eq;
+}
+EXPORT_SYMBOL(mlx5_eq_create_generic);
+
+int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+ int err;
+
+ if (IS_ERR(eq))
+ return -EINVAL;
+
+ err = destroy_async_eq(dev, eq);
+ if (err)
+ goto out;
+
+ kvfree(eq);
+out:
+ return err;
+}
+EXPORT_SYMBOL(mlx5_eq_destroy_generic);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc)
+{
+ u32 ci = eq->cons_index + cc;
+ struct mlx5_eqe *eqe;
+
+ eqe = get_eqe(eq, ci & (eq->nent - 1));
+ eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe;
+ /* Make sure we read EQ entry contents after we've
+ * checked the ownership bit.
+ */
+ if (eqe)
+ dma_rmb();
+
+ return eqe;
+}
+EXPORT_SYMBOL(mlx5_eq_get_eqe);
+
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
+{
+ __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
+ u32 val;
+
+ eq->cons_index += cc;
+ val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+
+ __raw_writel((__force u32)cpu_to_be32(val), addr);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+EXPORT_SYMBOL(mlx5_eq_update_ci);
+
+/* Completion EQs */
+
+static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
+ struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
+
+ if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
+ irq_info->mask);
+
+ if (IS_ENABLED(CONFIG_SMP) &&
+ irq_set_affinity_hint(irq, irq_info->mask))
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
+
+ return 0;
+}
+
+static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+ struct mlx5_priv *priv = &mdev->priv;
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
+ struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx];
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(irq_info->mask);
+}
+
+static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) {
+ err = set_comp_irq_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ clear_comp_irq_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++)
+ clear_comp_irq_affinity_hint(mdev, i);
+}
+
+static void destroy_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq, *n;
+
+ clear_comp_irqs_affinity_hints(dev);
+
+#ifdef CONFIG_RFS_ACCEL
+ if (table->rmap) {
+ free_irq_cpu_rmap(table->rmap);
+ table->rmap = NULL;
+ }
+#endif
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_del(&eq->list);
+ if (destroy_unmap_eq(dev, &eq->core))
+ mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n",
+ eq->core.eqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
+ kfree(eq);
+ }
+}
+
+static int create_comp_eqs(struct mlx5_core_dev *dev)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ char name[MLX5_MAX_IRQ_NAME];
+ struct mlx5_eq_comp *eq;
+ int ncomp_vec;
+ int nent;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&table->comp_eqs_list);
+ ncomp_vec = table->num_comp_vectors;
+ nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+ table->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+ if (!table->rmap)
+ return -ENOMEM;
+#endif
+ for (i = 0; i < ncomp_vec; i++) {
+ int vecidx = i + MLX5_EQ_VEC_COMP_BASE;
+ struct mlx5_eq_param param = {};
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq) {
+ err = -ENOMEM;
+ goto clean;
+ }
+
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+
+#ifdef CONFIG_RFS_ACCEL
+ irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx));
+#endif
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
+ param = (struct mlx5_eq_param) {
+ .index = vecidx,
+ .mask = 0,
+ .nent = nent,
+ .context = &eq->core,
+ .handler = mlx5_eq_comp_int
+ };
+ err = create_map_eq(dev, &eq->core, name, &param);
+ if (err) {
+ kfree(eq);
+ goto clean;
+ }
+ mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
+ /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */
+ list_add_tail(&eq->list, &table->comp_eqs_list);
+ }
+
+ err = set_comp_irq_affinity_hints(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
+ goto clean;
+ }
+
+ return 0;
+
+clean:
+ destroy_comp_eqs(dev);
+ return err;
+}
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+ unsigned int *irqn)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq, *n;
+ int err = -ENOENT;
+ int i = 0;
+
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ if (i++ == vector) {
+ *eqn = eq->core.eqn;
+ *irqn = eq->core.irqn;
+ err = 0;
+ break;
+ }
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev)
+{
+ return dev->priv.eq_table->num_comp_vectors;
+}
+EXPORT_SYMBOL(mlx5_comp_vectors_count);
+
+struct cpumask *
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
+{
+ /* TODO: consider irq_get_affinity_mask(irq) */
+ return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
+}
+EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+
+struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ return dev->priv.eq_table->rmap;
+#else
+ return NULL;
+#endif
+}
+
+struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
+{
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq;
+
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
+ if (eq->core.eqn == eqn)
+ return eq;
+ }
+
+ return ERR_PTR(-ENOENT);
}
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq;
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ int i, max_eqs;
+
+ clear_comp_irqs_affinity_hints(dev);
#ifdef CONFIG_RFS_ACCEL
- if (dev->rmap) {
- free_irq_cpu_rmap(dev->rmap);
- dev->rmap = NULL;
+ if (table->rmap) {
+ free_irq_cpu_rmap(table->rmap);
+ table->rmap = NULL;
}
#endif
- list_for_each_entry(eq, &table->comp_eqs_list, list)
- free_irq(eq->irqn, eq);
-
- free_irq(table->pages_eq.irqn, &table->pages_eq);
- free_irq(table->async_eq.irqn, &table->async_eq);
- free_irq(table->cmd_eq.irqn, &table->cmd_eq);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (MLX5_CAP_GEN(dev, pg))
- free_irq(table->pfault_eq.irqn, &table->pfault_eq);
-#endif
+
+ mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
+ for (i = max_eqs - 1; i >= 0; i--) {
+ if (!table->irq_info[i].context)
+ continue;
+ free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context);
+ table->irq_info[i].context = NULL;
+ }
+ mutex_unlock(&table->lock);
+ pci_free_irq_vectors(dev->pdev);
+}
+
+static int alloc_irq_vectors(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = priv->eq_table;
+ int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
+ MLX5_CAP_GEN(dev, max_num_eqs) :
+ 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int nvec;
+ int err;
+
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
+ nvec = min_t(int, nvec, num_eqs);
+ if (nvec <= MLX5_EQ_VEC_COMP_BASE)
+ return -ENOMEM;
+
+ table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL);
+ if (!table->irq_info)
+ return -ENOMEM;
+
+ nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1,
+ nvec, PCI_IRQ_MSIX);
+ if (nvec < 0) {
+ err = nvec;
+ goto err_free_irq_info;
+ }
+
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
+
+ return 0;
+
+err_free_irq_info:
+ kfree(table->irq_info);
+ return err;
+}
+
+static void free_irq_vectors(struct mlx5_core_dev *dev)
+{
+ struct mlx5_priv *priv = &dev->priv;
+
pci_free_irq_vectors(dev->pdev);
+ kfree(priv->eq_table->irq_info);
+}
+
+int mlx5_eq_table_create(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = alloc_irq_vectors(dev);
+ if (err) {
+ mlx5_core_err(dev, "alloc irq vectors failed\n");
+ return err;
+ }
+
+ err = create_async_eqs(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to create async EQs\n");
+ goto err_async_eqs;
+ }
+
+ err = create_comp_eqs(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to create completion EQs\n");
+ goto err_comp_eqs;
+ }
+
+ return 0;
+err_comp_eqs:
+ destroy_async_eqs(dev);
+err_async_eqs:
+ free_irq_vectors(dev);
+ return err;
+}
+
+void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
+{
+ destroy_comp_eqs(dev);
+ destroy_async_eqs(dev);
+ free_irq_vectors(dev);
+}
+
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
+{
+ struct mlx5_eq_table *eqt = dev->priv.eq_table;
+
+ if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
+}
+
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
+{
+ struct mlx5_eq_table *eqt = dev->priv.eq_table;
+
+ if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
+ return -EINVAL;
+
+ return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index d004957328f9..a44ea7b85614 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
#include "eswitch.h"
#include "fs_core.h"
@@ -1567,7 +1568,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
/* Mark this vport as disabled to discard new events */
vport->enabled = false;
- synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC));
/* Wait for current already scheduled events to complete */
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
@@ -1593,10 +1593,25 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
mutex_unlock(&esw->state_lock);
}
+static int eswitch_vport_event(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
+ struct mlx5_eqe *eqe = data;
+ struct mlx5_vport *vport;
+ u16 vport_num;
+
+ vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
+ vport = &esw->vports[vport_num];
+ if (vport->enabled)
+ queue_work(esw->work_queue, &vport->vport_change_handler);
+
+ return NOTIFY_OK;
+}
+
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
-
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
@@ -1615,13 +1630,16 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+
esw->mode = mode;
+ mlx5_lag_update(esw->dev);
+
if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw);
} else {
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-
err = esw_offloads_init(esw, nvfs + 1);
}
@@ -1640,6 +1658,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
for (i = 0; i <= nvfs; i++)
esw_enable_vport(esw, i, enabled_events);
+ if (mode == SRIOV_LEGACY) {
+ MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+ mlx5_eq_notifier_register(esw->dev, &esw->nb);
+ }
+
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports);
return 0;
@@ -1647,8 +1670,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw->mode = SRIOV_NONE;
- if (mode == SRIOV_OFFLOADS)
+ if (mode == SRIOV_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
+ }
return err;
}
@@ -1669,6 +1694,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
mc_promisc = &esw->mc_promisc;
nvports = esw->enabled_vports;
+ if (esw->mode == SRIOV_LEGACY)
+ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
@@ -1685,8 +1713,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
old_mode = esw->mode;
esw->mode = SRIOV_NONE;
- if (old_mode == SRIOV_OFFLOADS)
+ mlx5_lag_update(esw->dev);
+
+ if (old_mode == SRIOV_OFFLOADS) {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
+ }
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
@@ -1777,23 +1809,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
kfree(esw);
}
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
-{
- struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
- u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
- struct mlx5_vport *vport;
-
- if (!esw) {
- pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
- vport_num);
- return;
- }
-
- vport = &esw->vports[vport_num];
- if (vport->enabled)
- queue_work(esw->work_queue, &vport->vport_change_handler);
-}
-
/* Vport Administration */
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
@@ -2219,3 +2234,14 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
+
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
+{
+ if ((dev0->priv.eswitch->mode == SRIOV_NONE &&
+ dev1->priv.eswitch->mode == SRIOV_NONE) ||
+ (dev0->priv.eswitch->mode == SRIOV_OFFLOADS &&
+ dev1->priv.eswitch->mode == SRIOV_OFFLOADS))
+ return true;
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index aaafc9f17115..9c89eea9b2c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -143,6 +143,8 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *peer_miss_grp;
+ struct mlx5_flow_handle **peer_miss_rules;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
@@ -165,6 +167,8 @@ struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
struct mlx5_eswitch_rep *vport_reps;
+ struct list_head peer_flows;
+ struct mutex peer_mutex;
DECLARE_HASHTABLE(encap_tbl, 8);
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
u8 inline_mode;
@@ -181,6 +185,7 @@ struct esw_mc_addr { /* SRIOV only */
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct workqueue_struct *work_queue;
@@ -211,7 +216,6 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
@@ -281,13 +285,17 @@ enum mlx5_flow_match_level {
/* current maximum for flow based vport multicasting */
#define MLX5_MAX_FLOW_FWD_VPORTS 2
+enum {
+ MLX5_ESW_DEST_ENCAP = BIT(0),
+ MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
+};
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
- struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *in_mdev;
+ struct mlx5_core_dev *counter_dev;
- int mirror_count;
+ int split_count;
int out_count;
int action;
@@ -296,7 +304,12 @@ struct mlx5_esw_flow_attr {
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan;
bool vlan_handled;
- u32 encap_id;
+ struct {
+ u32 flags;
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5_core_dev *mdev;
+ u32 encap_id;
+ } dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id;
u8 match_level;
struct mlx5_fc *counter;
@@ -338,6 +351,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2);
}
+bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0,
+ struct mlx5_core_dev *dev1);
+
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
#define esw_info(dev, format, ...) \
@@ -352,9 +368,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
-static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {}
static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; }
static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {}
+static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
#define FDB_MAX_CHAIN 1
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9eac137790f5..53065b6ae593 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -39,6 +39,7 @@
#include "eswitch.h"
#include "en.h"
#include "fs_core.h"
+#include "lib/devcom.h"
enum {
FDB_FAST_PATH = 0,
@@ -81,7 +82,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
- bool mirror = !!(attr->mirror_count);
+ bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
@@ -120,13 +121,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft;
i++;
} else {
- for (j = attr->mirror_count; j < attr->out_count; j++) {
+ for (j = attr->split_count; j < attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->out_rep[j]->vport;
+ dest[i].vport.num = attr->dests[j].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->out_mdev[j], vhca_id);
- dest[i].vport.vhca_id_valid =
- !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[i].vport.flags |=
+ MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.reformat_id = attr->dests[j].encap_id;
+ dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[i].vport.reformat_id =
+ attr->dests[j].encap_id;
+ }
i++;
}
}
@@ -163,10 +172,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id;
- if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
- flow_act.reformat_id = attr->encap_id;
-
- fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
@@ -181,7 +187,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule;
err_add_rule:
- esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
err_esw_get:
if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0);
@@ -215,12 +221,17 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- for (i = 0; i < attr->mirror_count; i++) {
+ for (i = 0; i < attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->out_rep[i]->vport;
+ dest[i].vport.num = attr->dests[i].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->out_mdev[i], vhca_id);
- dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch);
+ MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
+ dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[i].vport.reformat_id = attr->dests[i].encap_id;
+ }
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb,
@@ -268,7 +279,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr,
bool fwd_rule)
{
- bool mirror = (attr->mirror_count > 0);
+ bool split = (attr->split_count > 0);
mlx5_del_flow_rules(rule);
esw->offloads.num_flows--;
@@ -277,7 +288,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
esw_put_prio_table(esw, attr->chain, attr->prio, 1);
esw_put_prio_table(esw, attr->chain, attr->prio, 0);
} else {
- esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror);
+ esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0);
}
@@ -325,7 +336,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep;
- out_rep = attr->out_rep[0];
+ out_rep = attr->dests[0].rep;
if (push)
vport = in_rep;
@@ -346,7 +357,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
goto out_notsupp;
in_rep = attr->in_rep;
- out_rep = attr->out_rep[0];
+ out_rep = attr->dests[0].rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp;
@@ -398,7 +409,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) {
+ if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++;
attr->vlan_handled = true;
}
@@ -458,7 +469,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT)
+ if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--;
return 0;
@@ -531,6 +542,98 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_destination *dest)
+{
+ void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(peer_dev, vhca_id));
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest->vport.num = 0;
+ dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
+ dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+}
+
+static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ struct mlx5_core_dev *peer_dev)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle **flows;
+ struct mlx5_flow_handle *flow;
+ struct mlx5_flow_spec *spec;
+ /* total vports is the same for both e-switches */
+ int nvports = esw->total_vports;
+ void *misc;
+ int err, i;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ peer_miss_rules_setup(peer_dev, spec, &dest);
+
+ flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
+ if (!flows) {
+ err = -ENOMEM;
+ goto alloc_flows_err;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ for (i = 1; i < nvports; i++) {
+ MLX5_SET(fte_match_set_misc, misc, source_port, i);
+ flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
+ goto add_flow_err;
+ }
+ flows[i] = flow;
+ }
+
+ esw->fdb_table.offloads.peer_miss_rules = flows;
+
+ kvfree(spec);
+ return 0;
+
+add_flow_err:
+ for (i--; i > 0; i--)
+ mlx5_del_flow_rules(flows[i]);
+ kvfree(flows);
+alloc_flows_err:
+ kvfree(spec);
+ return err;
+}
+
+static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_handle **flows;
+ int i;
+
+ flows = esw->fdb_table.offloads.peer_miss_rules;
+
+ for (i = 1; i < esw->total_vports; i++)
+ mlx5_del_flow_rules(flows[i]);
+
+ kvfree(flows);
+}
+
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
@@ -801,7 +904,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
esw->fdb_table.offloads.fdb_left[i] =
ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
- table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
+ table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
+ esw->total_vports;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -856,6 +960,34 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
+ /* create peer esw miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_port);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_eswitch_owner_vhca_id);
+
+ MLX5_SET(create_flow_group_in, flow_group_in,
+ source_eswitch_owner_vhca_id_valid, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ix + esw->total_vports - 1);
+ ix += esw->total_vports;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
+ goto peer_miss_err;
+ }
+ esw->fdb_table.offloads.peer_miss_grp = g;
+
/* create miss group */
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
@@ -888,6 +1020,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
miss_rule_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
miss_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
+peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
esw_destroy_offloads_fast_fdb_tables(esw);
@@ -907,6 +1041,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
@@ -1163,6 +1298,105 @@ err_reps:
return err;
}
+#define ESW_OFFLOADS_DEVCOM_PAIR (0)
+#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
+
+static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw)
+{
+ int err;
+
+ err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+
+static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
+{
+ mlx5e_tc_clean_fdb_peer_flows(esw);
+ esw_del_fdb_peer_miss_rules(esw);
+}
+
+static int mlx5_esw_offloads_devcom_event(int event,
+ void *my_data,
+ void *event_data)
+{
+ struct mlx5_eswitch *esw = my_data;
+ struct mlx5_eswitch *peer_esw = event_data;
+ struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ int err;
+
+ switch (event) {
+ case ESW_OFFLOADS_DEVCOM_PAIR:
+ err = mlx5_esw_offloads_pair(esw, peer_esw);
+ if (err)
+ goto err_out;
+
+ err = mlx5_esw_offloads_pair(peer_esw, esw);
+ if (err)
+ goto err_pair;
+
+ mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
+ break;
+
+ case ESW_OFFLOADS_DEVCOM_UNPAIR:
+ if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ break;
+
+ mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ mlx5_esw_offloads_unpair(peer_esw);
+ mlx5_esw_offloads_unpair(esw);
+ break;
+ }
+
+ return 0;
+
+err_pair:
+ mlx5_esw_offloads_unpair(esw);
+
+err_out:
+ mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
+ event, err);
+ return err;
+}
+
+static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+
+ INIT_LIST_HEAD(&esw->offloads.peer_flows);
+ mutex_init(&esw->offloads.peer_mutex);
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return;
+
+ mlx5_devcom_register_component(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ mlx5_esw_offloads_devcom_event,
+ esw);
+
+ mlx5_devcom_send_event(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ ESW_OFFLOADS_DEVCOM_PAIR, esw);
+}
+
+static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+
+ if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ return;
+
+ mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
+
+ mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+}
+
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
@@ -1185,6 +1419,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto err_reps;
+ esw_offloads_devcom_init(esw);
return 0;
err_reps:
@@ -1215,14 +1450,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
}
}
- /* enable back PF RoCE */
- mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
-
return err;
}
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
{
+ esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_reps(esw, nvports);
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
new file mode 100644
index 000000000000..fbc42b7252a9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2018 Mellanox Technologies
+
+#include <linux/mlx5/driver.h>
+
+#include "mlx5_core.h"
+#include "lib/eq.h"
+#include "lib/mlx5.h"
+
+struct mlx5_event_nb {
+ struct mlx5_nb nb;
+ void *ctx;
+};
+
+/* General events handlers for the low level mlx5_core driver
+ *
+ * Other Major feature specific events such as
+ * clock/eswitch/fpga/FW trace and many others, are handled elsewhere, with
+ * separate notifiers callbacks, specifically by those mlx5 components.
+ */
+static int any_notifier(struct notifier_block *, unsigned long, void *);
+static int temp_warn(struct notifier_block *, unsigned long, void *);
+static int port_module(struct notifier_block *, unsigned long, void *);
+
+/* handler which forwards the event to events->nh, driver notifiers */
+static int forward_event(struct notifier_block *, unsigned long, void *);
+
+static struct mlx5_nb events_nbs_ref[] = {
+ /* Events to be proccessed by mlx5_core */
+ {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
+ {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
+ {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
+
+ /* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
+ /* QP/WQ resource events to forward */
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_COMM_EST },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SQ_DRAINED },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_LAST_WQE },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_CATAS_ERROR },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG_FAILED },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_ACCESS_ERROR },
+ /* SRQ events */
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_CATAS_ERROR },
+ {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_RQ_LIMIT },
+};
+
+struct mlx5_events {
+ struct mlx5_core_dev *dev;
+ struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
+ /* driver notifier chain */
+ struct atomic_notifier_head nh;
+ /* port module events stats */
+ struct mlx5_pme_stats pme_stats;
+};
+
+static const char *eqe_type_str(u8 type)
+{
+ switch (type) {
+ case MLX5_EVENT_TYPE_COMP:
+ return "MLX5_EVENT_TYPE_COMP";
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ return "MLX5_EVENT_TYPE_PATH_MIG";
+ case MLX5_EVENT_TYPE_COMM_EST:
+ return "MLX5_EVENT_TYPE_COMM_EST";
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ return "MLX5_EVENT_TYPE_SQ_DRAINED";
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ return "MLX5_EVENT_TYPE_CQ_ERROR";
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
+ case MLX5_EVENT_TYPE_INTERNAL_ERROR:
+ return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ return "MLX5_EVENT_TYPE_PORT_CHANGE";
+ case MLX5_EVENT_TYPE_GPIO_EVENT:
+ return "MLX5_EVENT_TYPE_GPIO_EVENT";
+ case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
+ return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
+ case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
+ return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
+ case MLX5_EVENT_TYPE_REMOTE_CONFIG:
+ return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
+ case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
+ return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
+ case MLX5_EVENT_TYPE_STALL_EVENT:
+ return "MLX5_EVENT_TYPE_STALL_EVENT";
+ case MLX5_EVENT_TYPE_CMD:
+ return "MLX5_EVENT_TYPE_CMD";
+ case MLX5_EVENT_TYPE_PAGE_REQUEST:
+ return "MLX5_EVENT_TYPE_PAGE_REQUEST";
+ case MLX5_EVENT_TYPE_PAGE_FAULT:
+ return "MLX5_EVENT_TYPE_PAGE_FAULT";
+ case MLX5_EVENT_TYPE_PPS_EVENT:
+ return "MLX5_EVENT_TYPE_PPS_EVENT";
+ case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+ return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+ case MLX5_EVENT_TYPE_FPGA_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_ERROR";
+ case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+ return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
+ case MLX5_EVENT_TYPE_GENERAL_EVENT:
+ return "MLX5_EVENT_TYPE_GENERAL_EVENT";
+ case MLX5_EVENT_TYPE_MONITOR_COUNTER:
+ return "MLX5_EVENT_TYPE_MONITOR_COUNTER";
+ case MLX5_EVENT_TYPE_DEVICE_TRACER:
+ return "MLX5_EVENT_TYPE_DEVICE_TRACER";
+ default:
+ return "Unrecognized event";
+ }
+}
+
+/* handles all FW events, type == eqe->type */
+static int any_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d)\n",
+ eqe_type_str(eqe->type), eqe->sub_type);
+ return NOTIFY_OK;
+}
+
+/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */
+static int temp_warn(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+ u64 value_lsb;
+ u64 value_msb;
+
+ value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb);
+ value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb);
+
+ mlx5_core_warn(events->dev,
+ "High temperature on sensors with bit set %llx %llx",
+ value_msb, value_lsb);
+
+ return NOTIFY_OK;
+}
+
+/* MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
+static const char *mlx5_pme_status_to_string(enum port_module_event_status_type status)
+{
+ switch (status) {
+ case MLX5_MODULE_STATUS_PLUGGED:
+ return "Cable plugged";
+ case MLX5_MODULE_STATUS_UNPLUGGED:
+ return "Cable unplugged";
+ case MLX5_MODULE_STATUS_ERROR:
+ return "Cable error";
+ case MLX5_MODULE_STATUS_DISABLED:
+ return "Cable disabled";
+ default:
+ return "Unknown status";
+ }
+}
+
+static const char *mlx5_pme_error_to_string(enum port_module_event_error_type error)
+{
+ switch (error) {
+ case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
+ return "Power budget exceeded";
+ case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX:
+ return "Long Range for non MLNX cable";
+ case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
+ return "Bus stuck (I2C or data shorted)";
+ case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
+ return "No EEPROM/retry timeout";
+ case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
+ return "Enforce part number list";
+ case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER:
+ return "Unknown identifier";
+ case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
+ return "High Temperature";
+ case MLX5_MODULE_EVENT_ERROR_BAD_CABLE:
+ return "Bad or shorted cable/module";
+ case MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED:
+ return "One or more network ports have been powered down due to insufficient/unadvertised power on the PCIe slot";
+ default:
+ return "Unknown error";
+ }
+}
+
+/* type == MLX5_EVENT_TYPE_PORT_MODULE_EVENT */
+static int port_module(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ enum port_module_event_status_type module_status;
+ enum port_module_event_error_type error_type;
+ struct mlx5_eqe_port_module *module_event_eqe;
+ const char *status_str, *error_str;
+ u8 module_num;
+
+ module_event_eqe = &eqe->data.port_module;
+ module_num = module_event_eqe->module;
+ module_status = module_event_eqe->module_status &
+ PORT_MODULE_EVENT_MODULE_STATUS_MASK;
+ error_type = module_event_eqe->error_type &
+ PORT_MODULE_EVENT_ERROR_TYPE_MASK;
+
+ if (module_status < MLX5_MODULE_STATUS_NUM)
+ events->pme_stats.status_counters[module_status]++;
+ status_str = mlx5_pme_status_to_string(module_status);
+
+ if (module_status == MLX5_MODULE_STATUS_ERROR) {
+ if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
+ events->pme_stats.error_counters[error_type]++;
+ error_str = mlx5_pme_error_to_string(error_type);
+ }
+
+ if (!printk_ratelimit())
+ return NOTIFY_OK;
+
+ if (module_status == MLX5_MODULE_STATUS_ERROR)
+ mlx5_core_err(events->dev,
+ "Port module event[error]: module %u, %s, %s\n",
+ module_num, status_str, error_str);
+ else
+ mlx5_core_info(events->dev,
+ "Port module event: module %u, %s\n",
+ module_num, status_str);
+
+ return NOTIFY_OK;
+}
+
+void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
+{
+ *stats = dev->priv.events->pme_stats;
+}
+
+/* forward event as is to registered interfaces (mlx5e/mlx5_ib) */
+static int forward_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
+ eqe_type_str(eqe->type), eqe->sub_type);
+ atomic_notifier_call_chain(&events->nh, event, data);
+ return NOTIFY_OK;
+}
+
+int mlx5_events_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_events *events = kzalloc(sizeof(*events), GFP_KERNEL);
+
+ if (!events)
+ return -ENOMEM;
+
+ ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
+ events->dev = dev;
+ dev->priv.events = events;
+ return 0;
+}
+
+void mlx5_events_cleanup(struct mlx5_core_dev *dev)
+{
+ kvfree(dev->priv.events);
+}
+
+void mlx5_events_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_events *events = dev->priv.events;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(events_nbs_ref); i++) {
+ events->notifiers[i].nb = events_nbs_ref[i];
+ events->notifiers[i].ctx = events;
+ mlx5_eq_notifier_register(dev, &events->notifiers[i].nb);
+ }
+}
+
+void mlx5_events_stop(struct mlx5_core_dev *dev)
+{
+ struct mlx5_events *events = dev->priv.events;
+ int i;
+
+ for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--)
+ mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb);
+}
+
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return atomic_notifier_chain_register(&events->nh, nb);
+}
+EXPORT_SYMBOL(mlx5_notifier_register);
+
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return atomic_notifier_chain_unregister(&events->nh, nb);
+}
+EXPORT_SYMBOL(mlx5_notifier_unregister);
+
+int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
+{
+ return atomic_notifier_call_chain(&events->nh, event, data);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 8ca1d1949d93..873541ef4c1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn,
{
u8 opcode, status = 0;
- opcode = cqe->op_own >> 4;
+ opcode = get_cqe_opcode(cqe);
switch (opcode) {
case MLX5_CQE_REQ_ERR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 436a8136f26f..27c5f6c7d36a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -36,6 +36,7 @@
#include "mlx5_core.h"
#include "lib/mlx5.h"
+#include "lib/eq.h"
#include "fpga/core.h"
#include "fpga/conn.h"
@@ -145,6 +146,22 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev)
return 0;
}
+static int mlx5_fpga_event(struct mlx5_fpga_device *, unsigned long, void *);
+
+static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
+{
+ struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_err_nb);
+
+ return mlx5_fpga_event(fdev, event, eqe);
+}
+
+static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe)
+{
+ struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_qp_err_nb);
+
+ return mlx5_fpga_event(fdev, event, eqe);
+}
+
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
{
struct mlx5_fpga_device *fdev = mdev->fpga;
@@ -185,6 +202,11 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
if (err)
goto out;
+ MLX5_NB_INIT(&fdev->fpga_err_nb, fpga_err_event, FPGA_ERROR);
+ MLX5_NB_INIT(&fdev->fpga_qp_err_nb, fpga_qp_err_event, FPGA_QP_ERROR);
+ mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_err_nb);
+ mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_qp_err_nb);
+
err = mlx5_fpga_conn_device_init(fdev);
if (err)
goto err_rsvd_gid;
@@ -201,6 +223,8 @@ err_conn_init:
mlx5_fpga_conn_device_cleanup(fdev);
err_rsvd_gid:
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
mlx5_core_unreserve_gids(mdev, max_num_qps);
out:
spin_lock_irqsave(&fdev->state_lock, flags);
@@ -256,6 +280,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
}
mlx5_fpga_conn_device_cleanup(fdev);
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb);
+ mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb);
+
max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps);
mlx5_core_unreserve_gids(mdev, max_num_qps);
}
@@ -283,9 +310,10 @@ static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome)
return "Unknown";
}
-void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+static int mlx5_fpga_event(struct mlx5_fpga_device *fdev,
+ unsigned long event, void *eqe)
{
- struct mlx5_fpga_device *fdev = mdev->fpga;
+ void *data = ((struct mlx5_eqe *)eqe)->data.raw;
const char *event_name;
bool teardown = false;
unsigned long flags;
@@ -303,9 +331,7 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
break;
default:
- mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
- event);
- return;
+ return NOTIFY_DONE;
}
spin_lock_irqsave(&fdev->state_lock, flags);
@@ -326,4 +352,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
*/
if (teardown)
mlx5_trigger_health_work(fdev->mdev);
+
+ return NOTIFY_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 3e2355c8df3f..7e2e871dbf83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -35,11 +35,16 @@
#ifdef CONFIG_MLX5_FPGA
+#include <linux/mlx5/eq.h>
+
+#include "lib/eq.h"
#include "fpga/cmd.h"
/* Represents an Innova device */
struct mlx5_fpga_device {
struct mlx5_core_dev *mdev;
+ struct mlx5_nb fpga_err_nb;
+ struct mlx5_nb fpga_qp_err_nb;
spinlock_t state_lock; /* Protects state transitions */
enum mlx5_fpga_status state;
enum mlx5_fpga_image last_admin_image;
@@ -82,7 +87,6 @@ int mlx5_fpga_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
int mlx5_fpga_device_start(struct mlx5_core_dev *mdev);
void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev);
-void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data);
#else
@@ -104,11 +108,6 @@ static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev)
{
}
-static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event,
- void *data)
-{
-}
-
#endif
#endif /* __MLX5_FPGA_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 08a891f9aade..c44ccb67c4a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
+static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
+ struct fs_fte *fte, bool *extended_dest)
+{
+ int fw_log_max_fdb_encap_uplink =
+ MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+ int num_fwd_destinations = 0;
+ struct mlx5_flow_rule *dst;
+ int num_encap = 0;
+
+ *extended_dest = false;
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return 0;
+
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ num_encap++;
+ num_fwd_destinations++;
+ }
+ if (num_fwd_destinations > 1 && num_encap > 0)
+ *extended_dest = true;
+
+ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+ mlx5_core_warn(dev, "FW does not support extended destination");
+ return -EOPNOTSUPP;
+ }
+ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+ mlx5_core_warn(dev, "FW does not support more than %d encaps",
+ 1 << fw_log_max_fdb_encap_uplink);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
int opmod, int modify_mask,
struct mlx5_flow_table *ft,
unsigned group_id,
struct fs_fte *fte)
{
- unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
- fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ bool extended_dest = false;
struct mlx5_flow_rule *dst;
void *in_flow_context, *vlan;
void *in_match_value;
+ unsigned int inlen;
+ int dst_cnt_size;
void *in_dests;
u32 *in;
int err;
+ if (mlx5_set_extended_dest(dev, fte, &extended_dest))
+ return -EOPNOTSUPP;
+
+ if (!extended_dest)
+ dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+ else
+ dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
+
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag);
- MLX5_SET(flow_context, in_flow_context, action, fte->action.action);
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
- fte->action.reformat_id);
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+ if (extended_dest) {
+ u32 action;
+
+ action = fte->action.action &
+ ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ MLX5_SET(flow_context, in_flow_context, action, action);
+ } else {
+ MLX5_SET(flow_context, in_flow_context, action,
+ fte->action.action);
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.reformat_id);
+ }
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_id);
@@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
- dst->dest_attr.vport.vhca_id_valid);
+ !!(dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
+ if (extended_dest) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+ MLX5_SET(extended_dest_format, in_dests,
+ packet_reformat_id,
+ dst->dest_attr.vport.reformat_id);
+ }
break;
default:
id = dst->dest_attr.tir_num;
@@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests, destination_type,
type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ in_dests += dst_cnt_size;
list_size++;
}
@@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
dst->dest_attr.counter_id);
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ in_dests += dst_cnt_size;
list_size++;
}
if (list_size > max_list_size) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 9d73eb955f75..79f122b45def 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -452,7 +452,7 @@ static void del_sw_hw_rule(struct fs_node *node)
if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
- modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
update_fte = true;
}
out:
@@ -1373,7 +1373,10 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{
if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
- d1->vport.num == d2->vport.num) ||
+ d1->vport.num == d2->vport.num &&
+ d1->vport.flags == d2->vport.flags &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
+ (d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b51ad217da32..2dc86347af58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -145,29 +145,6 @@ struct mlx5_flow_table {
struct rhltable fgs_hash;
};
-struct mlx5_fc_cache {
- u64 packets;
- u64 bytes;
- u64 lastuse;
-};
-
-struct mlx5_fc {
- struct list_head list;
- struct llist_node addlist;
- struct llist_node dellist;
-
- /* last{packets,bytes} members are used when calculating the delta since
- * last reading
- */
- u64 lastpackets;
- u64 lastbytes;
-
- u32 id;
- bool aging;
-
- struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
-};
-
struct mlx5_ft_underlay_qp {
struct list_head list;
u32 qpn;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 32accd6b041b..c6c28f56aa29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -41,6 +41,29 @@
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ struct list_head list;
+ struct llist_node addlist;
+ struct llist_node dellist;
+
+ /* last{packets,bytes} members are used when calculating the delta since
+ * last reading
+ */
+ u64 lastpackets;
+ u64 lastbytes;
+
+ u32 id;
+ bool aging;
+
+ struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
/* locking scheme:
*
* It is the responsibility of the user to prevent concurrent calls or bad
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 43118de8ee99..196c07383082 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -38,6 +38,8 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
+#include "lib/mlx5.h"
enum {
MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
@@ -78,29 +80,6 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state)
&dev->iseg->cmdq_addr_l_sz);
}
-static void trigger_cmd_completions(struct mlx5_core_dev *dev)
-{
- unsigned long flags;
- u64 vector;
-
- /* wait for pending handlers to complete */
- synchronize_irq(pci_irq_vector(dev->pdev, MLX5_EQ_VEC_CMD));
- spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
- vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
- if (!vector)
- goto no_trig;
-
- vector |= MLX5_TRIGGERED_CMD_COMP;
- spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-
- mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
- mlx5_cmd_comp_handler(dev, vector, true);
- return;
-
-no_trig:
- spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-}
-
static int in_fatal(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -124,10 +103,10 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
mlx5_core_err(dev, "start\n");
if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- trigger_cmd_completions(dev);
+ mlx5_cmd_trigger_completions(dev);
}
- mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1);
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
mlx5_core_err(dev, "end\n");
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 11dabd62e2c7..bfc0f6581729 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -87,7 +87,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
netdev->mtu = max_mtu;
- mlx5e_build_nic_params(mdev, &priv->channels.params,
+ mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev),
netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 582b2f18010a..3a6baed722d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -34,11 +34,15 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+#include "eswitch.h"
enum {
- MLX5_LAG_FLAG_BONDED = 1 << 0,
+ MLX5_LAG_FLAG_ROCE = 1 << 0,
+ MLX5_LAG_FLAG_SRIOV = 1 << 1,
};
+#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV)
+
struct lag_func {
struct mlx5_core_dev *dev;
struct net_device *netdev;
@@ -61,11 +65,6 @@ struct mlx5_lag {
struct lag_tracker tracker;
struct delayed_work bond_work;
struct notifier_block nb;
-
- /* Admin state. Allow lag only if allowed is true
- * even if network conditions for lag were met
- */
- bool allowed;
};
/* General purpose, use for short periods of time.
@@ -165,9 +164,19 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
return -1;
}
-static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
+}
+
+static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
+}
+
+static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
{
- return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
+ return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
}
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
@@ -186,36 +195,131 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port2 = 1;
}
-static void mlx5_activate_lag(struct mlx5_lag *ldev,
- struct lag_tracker *tracker)
+static void mlx5_modify_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ u8 v2p_port1, v2p_port2;
int err;
- ldev->flags |= MLX5_LAG_FLAG_BONDED;
+ mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
+ &v2p_port2);
+
+ if (v2p_port1 != ldev->v2p_map[0] ||
+ v2p_port2 != ldev->v2p_map[1]) {
+ ldev->v2p_map[0] = v2p_port1;
+ ldev->v2p_map[1] = v2p_port2;
+
+ mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
+ ldev->v2p_map[0], ldev->v2p_map[1]);
+
+ err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+ if (err)
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ }
+}
+
+static int mlx5_create_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
&ldev->v2p_map[1]);
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
+ ldev->v2p_map[0], ldev->v2p_map[1]);
+
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
if (err)
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
err);
+ return err;
+}
+
+static int mlx5_activate_lag(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker,
+ u8 flags)
+{
+ bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
+ struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ int err;
+
+ err = mlx5_create_lag(ldev, tracker);
+ if (err) {
+ if (roce_lag) {
+ mlx5_core_err(dev0,
+ "Failed to activate RoCE LAG\n");
+ } else {
+ mlx5_core_err(dev0,
+ "Failed to activate VF LAG\n"
+ "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
+ }
+ return err;
+ }
+
+ ldev->flags |= flags;
+ return 0;
}
-static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
+static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+ bool roce_lag = __mlx5_lag_is_roce(ldev);
int err;
- ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
+ ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
err = mlx5_cmd_destroy_lag(dev0);
- if (err)
- mlx5_core_err(dev0,
- "Failed to destroy LAG (%d)\n",
- err);
+ if (err) {
+ if (roce_lag) {
+ mlx5_core_err(dev0,
+ "Failed to deactivate RoCE LAG; driver restart required\n");
+ } else {
+ mlx5_core_err(dev0,
+ "Failed to deactivate VF LAG; driver restart required\n"
+ "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
+ }
+ }
+
+ return err;
+}
+
+static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+{
+ if (!ldev->pf[0].dev || !ldev->pf[1].dev)
+ return false;
+
+#ifdef CONFIG_MLX5_ESWITCH
+ return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
+#else
+ return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) &&
+ !mlx5_sriov_is_enabled(ldev->pf[1].dev));
+#endif
+}
+
+static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ mlx5_add_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
+}
+
+static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
+{
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (ldev->pf[i].dev)
+ mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
+ MLX5_INTERFACE_PROTOCOL_IB);
}
static void mlx5_do_bond(struct mlx5_lag *ldev)
@@ -223,9 +327,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
struct lag_tracker tracker;
- u8 v2p_port1, v2p_port2;
- int i, err;
- bool do_bond;
+ bool do_bond, roce_lag;
+ int err;
if (!dev0 || !dev1)
return;
@@ -234,42 +337,45 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
tracker = ldev->tracker;
mutex_unlock(&lag_mutex);
- do_bond = tracker.is_bonded && ldev->allowed;
+ do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
- if (do_bond && !mlx5_lag_is_bonded(ldev)) {
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
- MLX5_INTERFACE_PROTOCOL_IB);
+ if (do_bond && !__mlx5_lag_is_active(ldev)) {
+ roce_lag = !mlx5_sriov_is_enabled(dev0) &&
+ !mlx5_sriov_is_enabled(dev1);
- mlx5_activate_lag(ldev, &tracker);
+ if (roce_lag)
+ mlx5_lag_remove_ib_devices(ldev);
- mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_nic_vport_enable_roce(dev1);
- } else if (do_bond && mlx5_lag_is_bonded(ldev)) {
- mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
- &v2p_port2);
+ err = mlx5_activate_lag(ldev, &tracker,
+ roce_lag ? MLX5_LAG_FLAG_ROCE :
+ MLX5_LAG_FLAG_SRIOV);
+ if (err) {
+ if (roce_lag)
+ mlx5_lag_add_ib_devices(ldev);
- if ((v2p_port1 != ldev->v2p_map[0]) ||
- (v2p_port2 != ldev->v2p_map[1])) {
- ldev->v2p_map[0] = v2p_port1;
- ldev->v2p_map[1] = v2p_port2;
+ return;
+ }
- err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
- if (err)
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
+ if (roce_lag) {
+ mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_enable_roce(dev1);
+ }
+ } else if (do_bond && __mlx5_lag_is_active(ldev)) {
+ mlx5_modify_lag(ldev, &tracker);
+ } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
+ roce_lag = __mlx5_lag_is_roce(ldev);
+
+ if (roce_lag) {
+ mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_nic_vport_disable_roce(dev1);
}
- } else if (!do_bond && mlx5_lag_is_bonded(ldev)) {
- mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_nic_vport_disable_roce(dev1);
- mlx5_deactivate_lag(ldev);
+ err = mlx5_deactivate_lag(ldev);
+ if (err)
+ return;
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (ldev->pf[i].dev)
- mlx5_add_dev_by_protocol(ldev->pf[i].dev,
- MLX5_INTERFACE_PROTOCOL_IB);
+ if (roce_lag)
+ mlx5_lag_add_ib_devices(ldev);
}
}
@@ -419,15 +525,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
-{
- if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) ||
- (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev)))
- return false;
- else
- return true;
-}
-
static struct mlx5_lag *mlx5_lag_dev_alloc(void)
{
struct mlx5_lag *ldev;
@@ -437,7 +534,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void)
return NULL;
INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
- ldev->allowed = mlx5_lag_check_prereq(ldev);
return ldev;
}
@@ -462,7 +558,6 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
ldev->tracker.netdev_state[fn].link_up = 0;
ldev->tracker.netdev_state[fn].tx_enabled = 0;
- ldev->allowed = mlx5_lag_check_prereq(ldev);
dev->priv.lag = ldev;
mutex_unlock(&lag_mutex);
@@ -484,7 +579,6 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
dev->priv.lag = NULL;
- ldev->allowed = mlx5_lag_check_prereq(ldev);
mutex_unlock(&lag_mutex);
}
@@ -532,7 +626,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
if (!ldev)
return;
- if (mlx5_lag_is_bonded(ldev))
+ if (__mlx5_lag_is_active(ldev))
mlx5_deactivate_lag(ldev);
mlx5_lag_dev_remove_pf(ldev, dev);
@@ -549,56 +643,61 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
}
}
-bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
bool res;
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- res = ldev && mlx5_lag_is_bonded(ldev);
+ res = ldev && __mlx5_lag_is_roce(ldev);
mutex_unlock(&lag_mutex);
return res;
}
-EXPORT_SYMBOL(mlx5_lag_is_active);
+EXPORT_SYMBOL(mlx5_lag_is_roce);
-static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow)
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
- int ret = 0;
- bool lag_active;
-
- mlx5_dev_list_lock();
+ bool res;
+ mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (!ldev) {
- ret = -ENODEV;
- goto unlock;
- }
- lag_active = mlx5_lag_is_bonded(ldev);
- if (!mlx5_lag_check_prereq(ldev) && allow) {
- ret = -EINVAL;
- goto unlock;
- }
- if (ldev->allowed == allow)
- goto unlock;
- ldev->allowed = allow;
- if ((lag_active && !allow) || allow)
- mlx5_do_bond(ldev);
-unlock:
- mlx5_dev_list_unlock();
- return ret;
+ res = ldev && __mlx5_lag_is_active(ldev);
+ mutex_unlock(&lag_mutex);
+
+ return res;
}
+EXPORT_SYMBOL(mlx5_lag_is_active);
-int mlx5_lag_forbid(struct mlx5_core_dev *dev)
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
{
- return mlx5_lag_set_state(dev, false);
+ struct mlx5_lag *ldev;
+ bool res;
+
+ mutex_lock(&lag_mutex);
+ ldev = mlx5_lag_dev_get(dev);
+ res = ldev && __mlx5_lag_is_sriov(ldev);
+ mutex_unlock(&lag_mutex);
+
+ return res;
}
+EXPORT_SYMBOL(mlx5_lag_is_sriov);
-int mlx5_lag_allow(struct mlx5_core_dev *dev)
+void mlx5_lag_update(struct mlx5_core_dev *dev)
{
- return mlx5_lag_set_state(dev, true);
+ struct mlx5_lag *ldev;
+
+ mlx5_dev_list_lock();
+ ldev = mlx5_lag_dev_get(dev);
+ if (!ldev)
+ goto unlock;
+
+ mlx5_do_bond(ldev);
+
+unlock:
+ mlx5_dev_list_unlock();
}
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
@@ -609,7 +708,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (!(ldev && mlx5_lag_is_bonded(ldev)))
+ if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
@@ -638,7 +737,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
return true;
ldev = mlx5_lag_dev_get(dev);
- if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
+ if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
return true;
/* If bonded, we do not add an IB device for PF1. */
@@ -665,7 +764,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
mutex_lock(&lag_mutex);
ldev = mlx5_lag_dev_get(dev);
- if (ldev && mlx5_lag_is_bonded(ldev)) {
+ if (ldev && __mlx5_lag_is_roce(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[0] = ldev->pf[0].dev;
mdev[1] = ldev->pf[1].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 0d90b1b4a3d3..ca0ee9916e9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -33,6 +33,7 @@
#include <linux/clocksource.h>
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
+#include "lib/eq.h"
#include "en.h"
#include "clock.h"
@@ -71,7 +72,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc)
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
- return mlx5_read_internal_timer(mdev) & cc->mask;
+ return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
}
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
@@ -155,15 +156,19 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
return 0;
}
-static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
- u64 ns;
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
unsigned long flags;
+ u64 cycles, ns;
write_seqlock_irqsave(&clock->lock, flags);
- ns = timecounter_read(&clock->tc);
+ cycles = mlx5_read_internal_timer(mdev, sts);
+ ns = timecounter_cyc2time(&clock->tc, cycles);
write_sequnlock_irqrestore(&clock->lock, flags);
*ts = ns_to_timespec64(ns);
@@ -306,7 +311,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(mdev);
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
@@ -383,7 +388,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.pps = 0,
.adjfreq = mlx5_ptp_adjfreq,
.adjtime = mlx5_ptp_adjtime,
- .gettime64 = mlx5_ptp_gettime,
+ .gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
.enable = NULL,
.verify = NULL,
@@ -439,16 +444,17 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
-void mlx5_pps_event(struct mlx5_core_dev *mdev,
- struct mlx5_eqe *eqe)
+static int mlx5_pps_event(struct notifier_block *nb,
+ unsigned long type, void *data)
{
- struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
+ struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event;
- struct timespec64 ts;
- u64 nsec_now, nsec_delta;
u64 cycles_now, cycles_delta;
+ u64 nsec_now, nsec_delta, ns;
+ struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
- s64 ns;
+ struct timespec64 ts;
unsigned long flags;
switch (clock->ptp_info.pin_config[pin].func) {
@@ -463,11 +469,12 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
} else {
ptp_event.type = PTP_CLOCK_EXTTS;
}
+ /* TODOL clock->ptp can be NULL if ptp_clock_register failes */
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
- mlx5_ptp_gettime(&clock->ptp_info, &ts);
- cycles_now = mlx5_read_internal_timer(mdev);
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
+ cycles_now = mlx5_read_internal_timer(mdev, NULL);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
@@ -481,8 +488,11 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
write_sequnlock_irqrestore(&clock->lock, flags);
break;
default:
- mlx5_core_err(mdev, " Unhandled event\n");
+ mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
+ clock->ptp_info.pin_config[pin].func);
}
+
+ return NOTIFY_OK;
}
void mlx5_init_clock(struct mlx5_core_dev *mdev)
@@ -511,14 +521,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
/* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
+ * sure counter is checked at least twice every wrap around.
* The period is calculated as the minimum between max HW cycles count
* (The clock source mask) and max amount of cycles that can be
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
- overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
@@ -567,6 +577,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
PTR_ERR(clock->ptp));
clock->ptp = NULL;
}
+
+ MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
+ mlx5_eq_notifier_register(mdev, &clock->pps_nb);
}
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
@@ -576,6 +589,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
return;
+ mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
if (clock->ptp) {
ptp_clock_unregister(clock->ptp);
clock->ptp = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index 263cb6e2aeee..31600924bdc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -36,7 +36,6 @@
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
-void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
@@ -60,8 +59,6 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
-static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
-
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
new file mode 100644
index 000000000000..bced2efe9bef
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#include <linux/mlx5/vport.h>
+#include "lib/devcom.h"
+
+static LIST_HEAD(devcom_list);
+
+#define devcom_for_each_component(priv, comp, iter) \
+ for (iter = 0; \
+ comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \
+ iter++)
+
+struct mlx5_devcom_component {
+ struct {
+ void *data;
+ } device[MLX5_MAX_PORTS];
+
+ mlx5_devcom_event_handler_t handler;
+ struct rw_semaphore sem;
+ bool paired;
+};
+
+struct mlx5_devcom_list {
+ struct list_head list;
+
+ struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS];
+ struct mlx5_core_dev *devs[MLX5_MAX_PORTS];
+};
+
+struct mlx5_devcom {
+ struct mlx5_devcom_list *priv;
+ int idx;
+};
+
+static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void)
+{
+ struct mlx5_devcom_component *comp;
+ struct mlx5_devcom_list *priv;
+ int i;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ devcom_for_each_component(priv, comp, i)
+ init_rwsem(&comp->sem);
+
+ return priv;
+}
+
+static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv,
+ u8 idx)
+{
+ struct mlx5_devcom *devcom;
+
+ devcom = kzalloc(sizeof(*devcom), GFP_KERNEL);
+ if (!devcom)
+ return NULL;
+
+ devcom->priv = priv;
+ devcom->idx = idx;
+ return devcom;
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devcom_list *priv = NULL, *iter;
+ struct mlx5_devcom *devcom = NULL;
+ bool new_priv = false;
+ u64 sguid0, sguid1;
+ int idx, i;
+
+ if (!mlx5_core_is_pf(dev))
+ return NULL;
+
+ sguid0 = mlx5_query_nic_system_image_guid(dev);
+ list_for_each_entry(iter, &devcom_list, list) {
+ struct mlx5_core_dev *tmp_dev = NULL;
+
+ idx = -1;
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (iter->devs[i])
+ tmp_dev = iter->devs[i];
+ else
+ idx = i;
+ }
+
+ if (idx == -1)
+ continue;
+
+ sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
+ if (sguid0 != sguid1)
+ continue;
+
+ priv = iter;
+ break;
+ }
+
+ if (!priv) {
+ priv = mlx5_devcom_list_alloc();
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ idx = 0;
+ new_priv = true;
+ }
+
+ priv->devs[idx] = dev;
+ devcom = mlx5_devcom_alloc(priv, idx);
+ if (!devcom) {
+ kfree(priv);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (new_priv)
+ list_add(&priv->list, &devcom_list);
+
+ return devcom;
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
+{
+ struct mlx5_devcom_list *priv;
+ int i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+
+ priv = devcom->priv;
+ priv->devs[devcom->idx] = NULL;
+
+ kfree(devcom);
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (priv->devs[i])
+ break;
+
+ if (i != MLX5_MAX_PORTS)
+ return;
+
+ list_del(&priv->list);
+ kfree(priv);
+}
+
+void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ mlx5_devcom_event_handler_t handler,
+ void *data)
+{
+ struct mlx5_devcom_component *comp;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+
+ WARN_ON(!data);
+
+ comp = &devcom->priv->components[id];
+ down_write(&comp->sem);
+ comp->handler = handler;
+ comp->device[devcom->idx].data = data;
+ up_write(&comp->sem);
+}
+
+void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+
+ comp = &devcom->priv->components[id];
+ down_write(&comp->sem);
+ comp->device[devcom->idx].data = NULL;
+ up_write(&comp->sem);
+}
+
+int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ int event,
+ void *event_data)
+{
+ struct mlx5_devcom_component *comp;
+ int err = -ENODEV, i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return err;
+
+ comp = &devcom->priv->components[id];
+ down_write(&comp->sem);
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (i != devcom->idx && comp->device[i].data) {
+ err = comp->handler(event, comp->device[i].data,
+ event_data);
+ break;
+ }
+
+ up_write(&comp->sem);
+ return err;
+}
+
+void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ bool paired)
+{
+ struct mlx5_devcom_component *comp;
+
+ comp = &devcom->priv->components[id];
+ WARN_ON(!rwsem_is_locked(&comp->sem));
+
+ comp->paired = paired;
+}
+
+bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return false;
+
+ return devcom->priv->components[id].paired;
+}
+
+void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp;
+ int i;
+
+ if (IS_ERR_OR_NULL(devcom))
+ return NULL;
+
+ comp = &devcom->priv->components[id];
+ down_read(&comp->sem);
+ if (!comp->paired) {
+ up_read(&comp->sem);
+ return NULL;
+ }
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (i != devcom->idx)
+ break;
+
+ return comp->device[i].data;
+}
+
+void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp = &devcom->priv->components[id];
+
+ up_read(&comp->sem);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
new file mode 100644
index 000000000000..939d5bf1581b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#ifndef __LIB_MLX5_DEVCOM_H__
+#define __LIB_MLX5_DEVCOM_H__
+
+#include <linux/mlx5/driver.h>
+
+enum mlx5_devcom_components {
+ MLX5_DEVCOM_ESW_OFFLOADS,
+
+ MLX5_DEVCOM_NUM_COMPONENTS,
+};
+
+typedef int (*mlx5_devcom_event_handler_t)(int event,
+ void *my_data,
+ void *event_data);
+
+struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev);
+void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom);
+
+void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ mlx5_devcom_event_handler_t handler,
+ void *data);
+void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+
+int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ int event,
+ void *event_data);
+
+void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ bool paired);
+bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+
+void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+
+#endif
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
new file mode 100644
index 000000000000..c0fb6d72b695
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies */
+
+#ifndef __LIB_MLX5_EQ_H__
+#define __LIB_MLX5_EQ_H__
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eq.h>
+#include <linux/mlx5/cq.h>
+
+#define MLX5_MAX_IRQ_NAME (32)
+#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe))
+
+struct mlx5_eq_tasklet {
+ struct list_head list;
+ struct list_head process_list;
+ struct tasklet_struct task;
+ spinlock_t lock; /* lock completion tasklet list */
+};
+
+struct mlx5_cq_table {
+ spinlock_t lock; /* protect radix tree */
+ struct radix_tree_root tree;
+};
+
+struct mlx5_eq {
+ struct mlx5_core_dev *dev;
+ struct mlx5_cq_table cq_table;
+ __be32 __iomem *doorbell;
+ u32 cons_index;
+ struct mlx5_frag_buf buf;
+ int size;
+ unsigned int vecidx;
+ unsigned int irqn;
+ u8 eqn;
+ int nent;
+ struct mlx5_rsc_debug *dbg;
+};
+
+struct mlx5_eq_comp {
+ struct mlx5_eq core; /* Must be first */
+ struct mlx5_eq_tasklet tasklet_ctx;
+ struct list_head list;
+};
+
+static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
+{
+ return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
+}
+
+static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
+{
+ struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
+
+ return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
+}
+
+static inline void eq_update_ci(struct mlx5_eq *eq, int arm)
+{
+ __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
+ u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+
+ __raw_writel((__force u32)cpu_to_be32(val), addr);
+ /* We still want ordering, just not swabbing, so add a barrier */
+ mb();
+}
+
+int mlx5_eq_table_init(struct mlx5_core_dev *dev);
+void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev);
+int mlx5_eq_table_create(struct mlx5_core_dev *dev);
+void mlx5_eq_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
+struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
+void mlx5_cq_tasklet_cb(unsigned long data);
+struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
+
+u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
+void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev);
+void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev);
+
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+
+/* This function should only be called after mlx5_cmd_force_teardown_hca */
+void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
+
+#ifdef CONFIG_RFS_ACCEL
+struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev);
+#endif
+
+int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 7550b1cc8c6a..397a2847867a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -33,6 +33,8 @@
#ifndef __LIB_MLX5_H__
#define __LIB_MLX5_H__
+#include "mlx5_core.h"
+
void mlx5_init_reserved_gids(struct mlx5_core_dev *dev);
void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev);
int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count);
@@ -40,4 +42,38 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count);
int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index);
void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index);
+/* TODO move to lib/events.h */
+
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_DISABLED = 0x4,
+ MLX5_MODULE_STATUS_NUM,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
+ MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_pme_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
+void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
+int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28132c7dc05f..77896c11f6f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -43,7 +43,6 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
#include <linux/debugfs.h>
#include <linux/kmod.h>
#include <linux/mlx5/mlx5_ifc.h>
@@ -53,6 +52,7 @@
#endif
#include <net/devlink.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
#include "fs_core.h"
#include "lib/mpfs.h"
#include "eswitch.h"
@@ -63,6 +63,7 @@
#include "accel/tls.h"
#include "lib/clock.h"
#include "lib/vxlan.h"
+#include "lib/devcom.h"
#include "diag/fw_tracer.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -319,51 +320,6 @@ static void release_bar(struct pci_dev *pdev)
pci_release_regions(pdev);
}
-static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_eq_table *table = &priv->eq_table;
- int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
- int nvec;
- int err;
-
- nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_EQ_VEC_COMP_BASE;
- nvec = min_t(int, nvec, num_eqs);
- if (nvec <= MLX5_EQ_VEC_COMP_BASE)
- return -ENOMEM;
-
- priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
- if (!priv->irq_info)
- return -ENOMEM;
-
- nvec = pci_alloc_irq_vectors(dev->pdev,
- MLX5_EQ_VEC_COMP_BASE + 1, nvec,
- PCI_IRQ_MSIX);
- if (nvec < 0) {
- err = nvec;
- goto err_free_irq_info;
- }
-
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
-
- return 0;
-
-err_free_irq_info:
- kfree(priv->irq_info);
- return err;
-}
-
-static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev)
-{
- struct mlx5_priv *priv = &dev->priv;
-
- pci_free_irq_vectors(dev->pdev);
- kfree(priv->irq_info);
-}
-
struct mlx5_reg_host_endianness {
u8 he;
u8 rsvd[15];
@@ -624,188 +580,24 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts)
{
u32 timer_h, timer_h1, timer_l;
timer_h = ioread32be(&dev->iseg->internal_timer_h);
+ ptp_read_system_prets(sts);
timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ ptp_read_system_postts(sts);
timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
- if (timer_h != timer_h1) /* wrap around */
+ if (timer_h != timer_h1) {
+ /* wrap around */
+ ptp_read_system_prets(sts);
timer_l = ioread32be(&dev->iseg->internal_timer_l);
-
- return (u64)timer_l | (u64)timer_h1 << 32;
-}
-
-static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
- struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
-
- if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
- mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
- return -ENOMEM;
+ ptp_read_system_postts(sts);
}
- cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
- priv->irq_info[i].mask);
-
- if (IS_ENABLED(CONFIG_SMP) &&
- irq_set_affinity_hint(irq, priv->irq_info[i].mask))
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
-
- return 0;
-}
-
-static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
- struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
-
- irq_set_affinity_hint(irq, NULL);
- free_cpumask_var(priv->irq_info[i].mask);
-}
-
-static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
-{
- int err;
- int i;
-
- for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
- err = mlx5_irq_set_affinity_hint(mdev, i);
- if (err)
- goto err_out;
- }
-
- return 0;
-
-err_out:
- for (i--; i >= 0; i--)
- mlx5_irq_clear_affinity_hint(mdev, i);
-
- return err;
-}
-
-static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
-{
- int i;
-
- for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
- mlx5_irq_clear_affinity_hint(mdev, i);
-}
-
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
- unsigned int *irqn)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq, *n;
- int err = -ENOENT;
-
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- if (eq->index == vector) {
- *eqn = eq->eqn;
- *irqn = eq->irqn;
- err = 0;
- break;
- }
- }
- spin_unlock(&table->lock);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_vector2eqn);
-
-struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq;
-
- spin_lock(&table->lock);
- list_for_each_entry(eq, &table->comp_eqs_list, list)
- if (eq->eqn == eqn) {
- spin_unlock(&table->lock);
- return eq;
- }
-
- spin_unlock(&table->lock);
-
- return ERR_PTR(-ENOENT);
-}
-
-static void free_comp_eqs(struct mlx5_core_dev *dev)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- struct mlx5_eq *eq, *n;
-
-#ifdef CONFIG_RFS_ACCEL
- if (dev->rmap) {
- free_irq_cpu_rmap(dev->rmap);
- dev->rmap = NULL;
- }
-#endif
- spin_lock(&table->lock);
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
- list_del(&eq->list);
- spin_unlock(&table->lock);
- if (mlx5_destroy_unmap_eq(dev, eq))
- mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
- eq->eqn);
- kfree(eq);
- spin_lock(&table->lock);
- }
- spin_unlock(&table->lock);
-}
-
-static int alloc_comp_eqs(struct mlx5_core_dev *dev)
-{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_IRQ_NAME];
- struct mlx5_eq *eq;
- int ncomp_vec;
- int nent;
- int err;
- int i;
-
- INIT_LIST_HEAD(&table->comp_eqs_list);
- ncomp_vec = table->num_comp_vectors;
- nent = MLX5_COMP_EQ_SIZE;
-#ifdef CONFIG_RFS_ACCEL
- dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
- if (!dev->rmap)
- return -ENOMEM;
-#endif
- for (i = 0; i < ncomp_vec; i++) {
- eq = kzalloc(sizeof(*eq), GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto clean;
- }
-
-#ifdef CONFIG_RFS_ACCEL
- irq_cpu_rmap_add(dev->rmap, pci_irq_vector(dev->pdev,
- MLX5_EQ_VEC_COMP_BASE + i));
-#endif
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
- err = mlx5_create_map_eq(dev, eq,
- i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
- name, MLX5_EQ_TYPE_COMP);
- if (err) {
- kfree(eq);
- goto clean;
- }
- mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
- eq->index = i;
- spin_lock(&table->lock);
- list_add_tail(&eq->list, &table->comp_eqs_list);
- spin_unlock(&table->lock);
- }
-
- return 0;
-
-clean:
- free_comp_eqs(dev);
- return err;
+ return (u64)timer_l | (u64)timer_h1 << 32;
}
static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
@@ -938,28 +730,37 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
struct pci_dev *pdev = dev->pdev;
int err;
+ priv->devcom = mlx5_devcom_register_device(dev);
+ if (IS_ERR(priv->devcom))
+ dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
+ priv->devcom);
+
err = mlx5_query_board_id(dev);
if (err) {
dev_err(&pdev->dev, "query board id failed\n");
- goto out;
+ goto err_devcom;
}
- err = mlx5_eq_init(dev);
+ err = mlx5_eq_table_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize eq\n");
- goto out;
+ goto err_devcom;
+ }
+
+ err = mlx5_events_init(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to initialize events\n");
+ goto err_eq_cleanup;
}
err = mlx5_cq_debugfs_init(dev);
if (err) {
dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
- goto err_eq_cleanup;
+ goto err_events_cleanup;
}
mlx5_init_qp_table(dev);
- mlx5_init_srq_table(dev);
-
mlx5_init_mkey_table(dev);
mlx5_init_reserved_gids(dev);
@@ -1013,14 +814,15 @@ err_rl_cleanup:
err_tables_cleanup:
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
-
+err_events_cleanup:
+ mlx5_events_cleanup(dev);
err_eq_cleanup:
- mlx5_eq_cleanup(dev);
+ mlx5_eq_table_cleanup(dev);
+err_devcom:
+ mlx5_devcom_unregister_device(dev->priv.devcom);
-out:
return err;
}
@@ -1036,10 +838,11 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
- mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev);
- mlx5_eq_cleanup(dev);
+ mlx5_events_cleanup(dev);
+ mlx5_eq_table_cleanup(dev);
+ mlx5_devcom_unregister_device(dev->priv.devcom);
}
static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
@@ -1131,16 +934,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto reclaim_boot_pages;
}
- err = mlx5_pagealloc_start(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
- goto reclaim_boot_pages;
- }
-
err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
dev_err(&pdev->dev, "init hca failed\n");
- goto err_pagealloc_stop;
+ goto reclaim_boot_pages;
}
mlx5_set_driver_version(dev);
@@ -1161,23 +958,20 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
}
}
- err = mlx5_alloc_irq_vectors(dev);
- if (err) {
- dev_err(&pdev->dev, "alloc irq vectors failed\n");
- goto err_cleanup_once;
- }
-
dev->priv.uar = mlx5_get_uars_page(dev);
if (IS_ERR(dev->priv.uar)) {
dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
err = PTR_ERR(dev->priv.uar);
- goto err_disable_msix;
+ goto err_get_uars;
}
- err = mlx5_start_eqs(dev);
+ mlx5_events_start(dev);
+ mlx5_pagealloc_start(dev);
+
+ err = mlx5_eq_table_create(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
- goto err_put_uars;
+ dev_err(&pdev->dev, "Failed to create EQs\n");
+ goto err_eq_table;
}
err = mlx5_fw_tracer_init(dev->tracer);
@@ -1186,18 +980,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_fw_tracer;
}
- err = alloc_comp_eqs(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
- goto err_comp_eqs;
- }
-
- err = mlx5_irq_set_affinity_hints(dev);
- if (err) {
- dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
- goto err_affinity_hints;
- }
-
err = mlx5_fpga_device_start(dev);
if (err) {
dev_err(&pdev->dev, "fpga device start failed %d\n", err);
@@ -1266,24 +1048,17 @@ err_ipsec_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
- mlx5_irq_clear_affinity_hints(dev);
-
-err_affinity_hints:
- free_comp_eqs(dev);
-
-err_comp_eqs:
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
- mlx5_stop_eqs(dev);
+ mlx5_eq_table_destroy(dev);
-err_put_uars:
+err_eq_table:
+ mlx5_pagealloc_stop(dev);
+ mlx5_events_stop(dev);
mlx5_put_uars_page(dev, priv->uar);
-err_disable_msix:
- mlx5_free_irq_vectors(dev);
-
-err_cleanup_once:
+err_get_uars:
if (boot)
mlx5_cleanup_once(dev);
@@ -1294,9 +1069,6 @@ err_stop_poll:
goto out_err;
}
-err_pagealloc_stop:
- mlx5_pagealloc_stop(dev);
-
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
@@ -1340,21 +1112,20 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
mlx5_fpga_device_stop(dev);
- mlx5_irq_clear_affinity_hints(dev);
- free_comp_eqs(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
- mlx5_stop_eqs(dev);
+ mlx5_eq_table_destroy(dev);
+ mlx5_pagealloc_stop(dev);
+ mlx5_events_stop(dev);
mlx5_put_uars_page(dev, priv->uar);
- mlx5_free_irq_vectors(dev);
if (cleanup)
mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev, cleanup);
+
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out;
}
- mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_cleanup(dev);
@@ -1364,12 +1135,6 @@ out:
return err;
}
-struct mlx5_core_event_handler {
- void (*event)(struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- void *data);
-};
-
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -1403,7 +1168,6 @@ static int init_one(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
dev->pdev = pdev;
- dev->event = mlx5_core_event;
dev->profile = &profile[prof_sel];
INIT_LIST_HEAD(&priv->ctx_list);
@@ -1411,17 +1175,6 @@ static int init_one(struct pci_dev *pdev,
mutex_init(&dev->pci_status_mutex);
mutex_init(&dev->intf_state_mutex);
- INIT_LIST_HEAD(&priv->waiting_events_list);
- priv->is_accum_events = false;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- err = init_srcu_struct(&priv->pfault_srcu);
- if (err) {
- dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
- err);
- goto clean_dev;
- }
-#endif
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
@@ -1430,7 +1183,7 @@ static int init_one(struct pci_dev *pdev,
err = mlx5_pci_init(dev, priv);
if (err) {
dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
- goto clean_srcu;
+ goto clean_dev;
}
err = mlx5_health_init(dev);
@@ -1439,12 +1192,14 @@ static int init_one(struct pci_dev *pdev,
goto close_pci;
}
- mlx5_pagealloc_init(dev);
+ err = mlx5_pagealloc_init(dev);
+ if (err)
+ goto err_pagealloc_init;
err = mlx5_load_one(dev, priv, true);
if (err) {
dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
- goto clean_health;
+ goto err_load_one;
}
request_module_nowait(MLX5_IB_MOD);
@@ -1458,16 +1213,13 @@ static int init_one(struct pci_dev *pdev,
clean_load:
mlx5_unload_one(dev, priv, true);
-clean_health:
+err_load_one:
mlx5_pagealloc_cleanup(dev);
+err_pagealloc_init:
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
-clean_srcu:
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- cleanup_srcu_struct(&priv->pfault_srcu);
clean_dev:
-#endif
devlink_free(devlink);
return err;
@@ -1491,9 +1243,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- cleanup_srcu_struct(&priv->pfault_srcu);
-#endif
devlink_free(devlink);
}
@@ -1637,7 +1386,6 @@ succeed:
* kexec. There is no need to cleanup the mlx5_core software
* contexts.
*/
- mlx5_irq_clear_affinity_hints(dev);
mlx5_core_eq_free_irqs(dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0594d0961cb3..c68dcea5985b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -38,6 +38,7 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
+#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
@@ -78,6 +79,11 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define mlx5_core_warn_once(__dev, format, ...) \
+ dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
#define mlx5_core_info(__dev, format, ...) \
dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
@@ -97,12 +103,6 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
-
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- unsigned long param);
-void mlx5_core_page_fault(struct mlx5_core_dev *dev,
- struct mlx5_pagefault *pfault);
-void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
@@ -122,30 +122,10 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
-
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
- int nent, u64 mask, const char *name,
- enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
-int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
- u32 *out, int outlen);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-void mlx5_stop_eqs(struct mlx5_core_dev *dev);
-/* This function should only be called after mlx5_cmd_force_teardown_hca */
-void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev);
-struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
-u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
-void mlx5_cq_tasklet_cb(unsigned long data);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts);
+
+void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev);
int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
@@ -159,6 +139,11 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
+int mlx5_events_init(struct mlx5_core_dev *dev);
+void mlx5_events_cleanup(struct mlx5_core_dev *dev);
+void mlx5_events_start(struct mlx5_core_dev *dev);
+void mlx5_events_stop(struct mlx5_core_dev *dev);
+
void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
void mlx5_attach_device(struct mlx5_core_dev *dev);
@@ -202,10 +187,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
MLX5_CAP_GEN(dev, lag_master);
}
-int mlx5_lag_allow(struct mlx5_core_dev *dev);
-int mlx5_lag_forbid(struct mlx5_core_dev *dev);
-
void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
+void mlx5_lag_update(struct mlx5_core_dev *dev);
enum {
MLX5_NIC_IFC_FULL = 0,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index e36d3e3675f9..a83b517b0714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
@@ -433,15 +434,28 @@ static void pages_work_handler(struct work_struct *work)
kfree(req);
}
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages)
+static int req_pages_handler(struct notifier_block *nb,
+ unsigned long type, void *data)
{
struct mlx5_pages_req *req;
-
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ struct mlx5_eqe *eqe;
+ u16 func_id;
+ s32 npages;
+
+ priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+ eqe = data;
+
+ func_id = be16_to_cpu(eqe->data.req_pages.func_id);
+ npages = be32_to_cpu(eqe->data.req_pages.num_pages);
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
mlx5_core_warn(dev, "failed to allocate pages request\n");
- return;
+ return NOTIFY_DONE;
}
req->dev = dev;
@@ -449,6 +463,7 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
req->npages = npages;
INIT_WORK(&req->work, pages_work_handler);
queue_work(dev->priv.pg_wq, &req->work);
+ return NOTIFY_OK;
}
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
@@ -524,29 +539,32 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
return 0;
}
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
{
dev->priv.page_root = RB_ROOT;
INIT_LIST_HEAD(&dev->priv.free_list);
+ dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
+ if (!dev->priv.pg_wq)
+ return -ENOMEM;
+
+ return 0;
}
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
{
- /* nothing */
+ destroy_workqueue(dev->priv.pg_wq);
}
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
{
- dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
- if (!dev->priv.pg_wq)
- return -ENOMEM;
-
- return 0;
+ MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
+ mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
}
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
{
- destroy_workqueue(dev->priv.pg_wq);
+ mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
+ flush_workqueue(dev->priv.pg_wq);
}
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 31a9cbd85689..2b82f35f4c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -915,63 +915,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
}
-static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
- "Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */
- "Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */
- "Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */
-};
-
-static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
- "Power budget exceeded",
- "Long Range for non MLNX cable",
- "Bus stuck(I2C or data shorted)",
- "No EEPROM/retry timeout",
- "Enforce part number list",
- "Unknown identifier",
- "High Temperature",
- "Bad or shorted cable/module",
- "Unknown status",
-};
-
-void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
-{
- enum port_module_event_status_type module_status;
- enum port_module_event_error_type error_type;
- struct mlx5_eqe_port_module *module_event_eqe;
- struct mlx5_priv *priv = &dev->priv;
- u8 module_num;
-
- module_event_eqe = &eqe->data.port_module;
- module_num = module_event_eqe->module;
- module_status = module_event_eqe->module_status &
- PORT_MODULE_EVENT_MODULE_STATUS_MASK;
- error_type = module_event_eqe->error_type &
- PORT_MODULE_EVENT_ERROR_TYPE_MASK;
-
- if (module_status < MLX5_MODULE_STATUS_ERROR) {
- priv->pme_stats.status_counters[module_status - 1]++;
- } else if (module_status == MLX5_MODULE_STATUS_ERROR) {
- if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
- /* Unknown error type */
- error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
- priv->pme_stats.error_counters[error_type]++;
- }
-
- if (!printk_ratelimit())
- return;
-
- if (module_status < MLX5_MODULE_STATUS_ERROR)
- mlx5_core_info(dev,
- "Port module event: module %u, %s\n",
- module_num, mlx5_pme_status[module_status - 1]);
-
- else if (module_status == MLX5_MODULE_STATUS_ERROR)
- mlx5_core_info(dev,
- "Port module event[error]: module %u, %s, %s\n",
- module_num, mlx5_pme_status[module_status - 1],
- mlx5_pme_error[error_type]);
-}
-
int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
{
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 91b8139a388d..388f205a497f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -38,11 +38,11 @@
#include <linux/mlx5/transobj.h>
#include "mlx5_core.h"
+#include "lib/eq.h"
-static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
- u32 rsn)
+static struct mlx5_core_rsc_common *
+mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
{
- struct mlx5_qp_table *table = &dev->priv.qp_table;
struct mlx5_core_rsc_common *common;
spin_lock(&table->lock);
@@ -53,11 +53,6 @@ static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
spin_unlock(&table->lock);
- if (!common) {
- mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
- rsn);
- return NULL;
- }
return common;
}
@@ -120,19 +115,57 @@ static bool is_event_type_allowed(int rsc_type, int event_type)
}
}
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
+static int rsc_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
{
- struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+ struct mlx5_core_rsc_common *common;
+ struct mlx5_qp_table *table;
+ struct mlx5_core_dev *dev;
struct mlx5_core_dct *dct;
+ u8 event_type = (u8)type;
struct mlx5_core_qp *qp;
+ struct mlx5_priv *priv;
+ struct mlx5_eqe *eqe;
+ u32 rsn;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ eqe = data;
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ eqe = data;
+ rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ table = container_of(nb, struct mlx5_qp_table, nb);
+ priv = container_of(table, struct mlx5_priv, qp_table);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
- if (!common)
- return;
+ common = mlx5_get_rsc(table, rsn);
+ if (!common) {
+ mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
+ return NOTIFY_OK;
+ }
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
event_type, rsn);
- return;
+ goto out;
}
switch (common->res) {
@@ -150,8 +183,10 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
}
-
+out:
mlx5_core_put_rsc(common);
+
+ return NOTIFY_OK;
}
static int create_resource_common(struct mlx5_core_dev *dev,
@@ -487,10 +522,16 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev)
spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev);
+
+ table->nb.notifier_call = rsc_event_notifier;
+ mlx5_notifier_register(dev, &table->nb);
}
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
{
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+
+ mlx5_notifier_unregister(dev, &table->nb);
mlx5_qp_debugfs_cleanup(dev);
}
@@ -670,3 +711,20 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
}
EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type)
+{
+ u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+
+ return mlx5_get_rsc(table, rsn);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
+
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
+{
+ mlx5_core_put_rsc(res);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_res_put);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index a0674962f02c..6e178030d8fb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -216,20 +216,10 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!mlx5_core_is_pf(dev))
return -EPERM;
- if (num_vfs) {
- int ret;
-
- ret = mlx5_lag_forbid(dev);
- if (ret && (ret != -ENODEV))
- return ret;
- }
-
- if (num_vfs) {
+ if (num_vfs)
err = mlx5_sriov_enable(pdev, num_vfs);
- } else {
+ else
mlx5_sriov_disable(pdev);
- mlx5_lag_allow(dev);
- }
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index a1ee9a8a769e..c4d4b76096dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -258,115 +258,6 @@ void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
}
EXPORT_SYMBOL(mlx5_core_destroy_tis);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn)
-{
- u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
- int err;
-
- MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- if (!err)
- *rmpn = MLX5_GET(create_rmp_out, out, rmpn);
-
- return err;
-}
-
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
-{
- u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
-
- MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
- return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
-
- MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
- MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out,
- sizeof(out));
-}
-
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
-{
- u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
- int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
-
- MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
- MLX5_SET(query_rmp_in, in, rmpn, rmpn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
-{
- void *in;
- void *rmpc;
- void *wq;
- void *bitmask;
- int err;
-
- in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
- bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
- wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
- MLX5_SET(modify_rmp_in, in, rmpn, rmpn);
- MLX5_SET(wq, wq, lwm, lwm);
- MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
-
- err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
-
- kvfree(in);
-
- return err;
-}
-
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *xsrqn)
-{
- u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
- int err;
-
- MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- if (!err)
- *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
-
- return err;
-}
-
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
-
- MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
- MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
-{
- u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
-
- MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
- MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
- MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
- MLX5_SET(arm_xrc_srq_in, in, op_mod,
- MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index cfbea66b4879..9b150ce9d315 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -1204,9 +1204,19 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
- if (!mdev->sys_image_guid)
- mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid);
+ int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ u64 tmp = 0;
- return mdev->sys_image_guid;
+ if (mdev->sys_image_guid)
+ return mdev->sys_image_guid;
+
+ if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
+ mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ else
+ mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+
+ mdev->sys_image_guid = tmp;
+
+ return tmp;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 2dcbf1ebfd6a..953cc8efba69 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -155,7 +155,8 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
- u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6;
+ /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
+ u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index b1293d153a58..ea934a48c90a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -177,9 +177,14 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
return mlx5_cqwq_ctr2ix(wq, wq->cc);
}
-static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
- return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
+ struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix);
+
+ /* For 128B CQEs the data is in the last 64B */
+ cqe += wq->fbc.log_stride == 7;
+
+ return cqe;
}
static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 8a291eb36c64..080ddd1942ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -80,6 +80,7 @@ config MLXSW_SPECTRUM
depends on IPV6_GRE || IPV6_GRE=n
select GENERIC_ALLOCATOR
select PARMAN
+ select OBJAGG
select MLXFW
default m
---help---
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 1f77e97e2d7a..bbf45f10c208 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_acl_tcam.o spectrum_acl_ctcam.o \
spectrum_acl_atcam.o spectrum_acl_erp.o \
spectrum1_acl_tcam.o spectrum2_acl_tcam.o \
- spectrum_acl.o \
+ spectrum_acl_bloom_filter.o spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 30f751e69698..ddedf8ab5b64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -81,6 +81,7 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool reload_fail;
+ bool fw_flash_in_progress;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
@@ -428,12 +429,16 @@ struct mlxsw_reg_trans {
struct rcu_head rcu;
};
-#define MLXSW_EMAD_TIMEOUT_MS 200
+#define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
+#define MLXSW_EMAD_TIMEOUT_MS 200
static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+ if (trans->core->fw_flash_in_progress)
+ timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
+
queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
}
@@ -965,10 +970,11 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
};
-int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
- const struct mlxsw_bus *mlxsw_bus,
- void *bus_priv, bool reload,
- struct devlink *devlink)
+static int
+__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
{
const char *device_kind = mlxsw_bus_info->device_kind;
struct mlxsw_core *mlxsw_core;
@@ -1035,6 +1041,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_register;
}
+ if (mlxsw_driver->params_register && !reload) {
+ err = mlxsw_driver->params_register(mlxsw_core);
+ if (err)
+ goto err_register_params;
+ }
+
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -1057,6 +1069,9 @@ err_driver_init:
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
+ if (mlxsw_driver->params_unregister && !reload)
+ mlxsw_driver->params_unregister(mlxsw_core);
+err_register_params:
if (!reload)
devlink_unregister(devlink);
err_devlink_register:
@@ -1076,6 +1091,29 @@ err_bus_init:
err_devlink_alloc:
return err;
}
+
+int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_bus *mlxsw_bus,
+ void *bus_priv, bool reload,
+ struct devlink *devlink)
+{
+ bool called_again = false;
+ int err;
+
+again:
+ err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus,
+ bus_priv, reload, devlink);
+ /* -EAGAIN is returned in case the FW was updated. FW needs
+ * a reset, so lets try to call __mlxsw_core_bus_device_register()
+ * again.
+ */
+ if (err == -EAGAIN && !called_again) {
+ called_again = true;
+ goto again;
+ }
+
+ return err;
+}
EXPORT_SYMBOL(mlxsw_core_bus_device_register);
void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
@@ -1097,6 +1135,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
+ if (mlxsw_core->driver->params_unregister && !reload)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
@@ -1109,6 +1149,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail_deinit:
+ if (mlxsw_core->driver->params_unregister)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
@@ -1854,6 +1896,18 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->fw_flash_in_progress = true;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
+
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->fw_flash_in_progress = false;
+}
+EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
+
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c35be477856f..4e114f35ee0d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -282,6 +282,8 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
+ int (*params_register)(struct mlxsw_core *mlxsw_core);
+ void (*params_unregister)(struct mlxsw_core *mlxsw_core);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
@@ -292,6 +294,9 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
+void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
+void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
+
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 785bf01fe2be..df78d23b3ec3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end)
+ char *key, char *mask)
{
+ unsigned int blocks_count =
+ mlxsw_afk_key_info_blocks_count_get(key_info);
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
int block_index, i;
- for (i = block_start; i <= block_end; i++) {
+ for (i = 0; i < blocks_count; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
@@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask);
}
- if (key)
- mlxsw_afk->ops->encode_block(block_key, i, key);
- if (mask)
- mlxsw_afk->ops->encode_block(block_mask, i, mask);
+ mlxsw_afk->ops->encode_block(key, i, block_key);
+ mlxsw_afk->ops->encode_block(mask, i, block_mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
+
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end)
+{
+ int i;
+
+ for (i = block_start; i <= block_end; i++)
+ mlxsw_afk->ops->clear_block(key, i);
+}
+EXPORT_SYMBOL(mlxsw_afk_clear);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c29c045d826d..4a625cdf3e7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -33,6 +33,8 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_TTL_,
MLXSW_AFK_ELEMENT_IP_ECN,
MLXSW_AFK_ELEMENT_IP_DSCP,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -87,6 +89,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
@@ -188,7 +192,8 @@ struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
- void (*encode_block)(char *block, int block_index, char *output);
+ void (*encode_block)(char *output, int block_index, char *block);
+ void (*clear_block)(char *output, int block_index);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
@@ -228,6 +233,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end);
+ char *key, char *mask);
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 6d29dc428608..61f897b40f82 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -16,6 +16,15 @@
#define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */
#define MLXSW_THERMAL_MAX_STATE 10
#define MLXSW_THERMAL_MAX_DUTY 255
+/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values
+ * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for
+ * setting fan speed dynamic minimum. For example, if value is set to 14 (40%)
+ * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to
+ * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100.
+ */
+#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2)
+#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2)
+#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */
struct mlxsw_thermal_trip {
int type;
@@ -68,6 +77,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
enum thermal_device_mode mode;
};
@@ -285,12 +295,51 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
struct mlxsw_thermal *thermal = cdev->devdata;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
+ unsigned long cur_state, i;
+ int idx;
+ u8 duty;
+ int err;
idx = mlxsw_get_cooling_device_idx(thermal, cdev);
if (idx < 0)
return idx;
+ /* Verify if this request is for changing allowed fan dynamical
+ * minimum. If it is - update cooling levels accordingly and update
+ * state, if current state is below the newly requested minimum state.
+ * For example, if current state is 5, and minimal state is to be
+ * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed
+ * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be
+ * overwritten.
+ */
+ if (state >= MLXSW_THERMAL_SPEED_MIN &&
+ state <= MLXSW_THERMAL_SPEED_MAX) {
+ state -= MLXSW_THERMAL_MAX_STATE;
+ for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++)
+ thermal->cooling_levels[i] = max(state, i);
+
+ mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
+ if (err)
+ return err;
+
+ duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl);
+ cur_state = mlxsw_duty_to_state(duty);
+
+ /* If current fan state is lower than requested dynamical
+ * minimum, increase fan speed up to dynamical minimum.
+ */
+ if (state < cur_state)
+ return 0;
+
+ state = cur_state;
+ }
+
+ if (state > MLXSW_THERMAL_MAX_STATE)
+ return -EINVAL;
+
+ /* Normalize the state to the valid speed range. */
+ state = thermal->cooling_levels[state];
mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
@@ -369,6 +418,11 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
}
+ /* Initialize cooling levels per PWM state. */
+ for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++)
+ thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL,
+ i);
+
thermal->tzdev = thermal_zone_device_register("mlxsw",
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 5890fdfd62c3..66b8098c6fd2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1720,7 +1720,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
struct mlxsw_pci *mlxsw_pci;
- bool called_again = false;
int err;
mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
@@ -1777,18 +1776,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->bus_info.dev = &pdev->dev;
mlxsw_pci->id = id;
-again:
err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
&mlxsw_pci_bus, mlxsw_pci, false,
NULL);
- /* -EAGAIN is returned in case the FW was updated. FW needs
- * a reset, so lets try to call mlxsw_core_bus_device_register()
- * again.
- */
- if (err == -EAGAIN && !called_again) {
- called_again = true;
- goto again;
- } else if (err) {
+ if (err) {
dev_err(&pdev->dev, "cannot register bus device\n");
goto err_bus_device_register;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index db3d2790aeec..9b48dffc9f63 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -641,6 +641,10 @@ enum mlxsw_reg_sfn_rec_type {
MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7,
/* Aged-out MAC address on a LAG port. */
MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8,
+ /* Learned unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL = 0xD,
+ /* Aged-out unicast tunnel record. */
+ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL = 0xE,
};
/* reg_sfn_rec_type
@@ -704,6 +708,66 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index,
*p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index);
}
+/* reg_sfn_uc_tunnel_uip_msb
+ * When protocol is IPv4, the most significant byte of the underlay IPv4
+ * address of the remote VTEP.
+ * When protocol is IPv6, reserved.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_msb, MLXSW_REG_SFN_BASE_LEN, 24,
+ 8, MLXSW_REG_SFN_REC_LEN, 0x08, false);
+
+enum mlxsw_reg_sfn_uc_tunnel_protocol {
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV4,
+ MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV6,
+};
+
+/* reg_sfn_uc_tunnel_protocol
+ * IP protocol.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27,
+ 1, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+/* reg_sfn_uc_tunnel_uip_lsb
+ * When protocol is IPv4, the least significant bytes of the underlay
+ * IPv4 address of the remote VTEP.
+ * When protocol is IPv6, ipv6_id to be queried from TNIPSD.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0,
+ 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false);
+
+enum mlxsw_reg_sfn_tunnel_port {
+ MLXSW_REG_SFN_TUNNEL_PORT_NVE,
+ MLXSW_REG_SFN_TUNNEL_PORT_VPLS,
+ MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0,
+ MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1,
+};
+
+/* reg_sfn_uc_tunnel_port
+ * Tunnel port.
+ * Reserved on Spectrum.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sfn, tunnel_port, MLXSW_REG_SFN_BASE_LEN, 0, 4,
+ MLXSW_REG_SFN_REC_LEN, 0x10, false);
+
+static inline void
+mlxsw_reg_sfn_uc_tunnel_unpack(char *payload, int rec_index, char *mac,
+ u16 *p_fid, u32 *p_uip,
+ enum mlxsw_reg_sfn_uc_tunnel_protocol *p_proto)
+{
+ u32 uip_msb, uip_lsb;
+
+ mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac);
+ *p_fid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index);
+ uip_msb = mlxsw_reg_sfn_uc_tunnel_uip_msb_get(payload, rec_index);
+ uip_lsb = mlxsw_reg_sfn_uc_tunnel_uip_lsb_get(payload, rec_index);
+ *p_uip = uip_msb << 24 | uip_lsb;
+ *p_proto = mlxsw_reg_sfn_uc_tunnel_protocol_get(payload, rec_index);
+}
+
/* SPMS - Switch Port MSTP/RSTP State Register
* -------------------------------------------
* Configures the spanning tree state of a physical port.
@@ -2431,6 +2495,43 @@ static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a)
*p_a = mlxsw_reg_pefa_a_get(payload);
}
+/* PEMRBT - Policy-Engine Multicast Router Binding Table Register
+ * --------------------------------------------------------------
+ * This register is used for binding Multicast router to an ACL group
+ * that serves the MC router.
+ * This register is not supported by SwitchX/-2 and Spectrum.
+ */
+#define MLXSW_REG_PEMRBT_ID 0x3014
+#define MLXSW_REG_PEMRBT_LEN 0x14
+
+MLXSW_REG_DEFINE(pemrbt, MLXSW_REG_PEMRBT_ID, MLXSW_REG_PEMRBT_LEN);
+
+enum mlxsw_reg_pemrbt_protocol {
+ MLXSW_REG_PEMRBT_PROTO_IPV4,
+ MLXSW_REG_PEMRBT_PROTO_IPV6,
+};
+
+/* reg_pemrbt_protocol
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pemrbt, protocol, 0x00, 0, 1);
+
+/* reg_pemrbt_group_id
+ * ACL group identifier.
+ * Range 0..cap_max_acl_groups-1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pemrbt, group_id, 0x10, 0, 16);
+
+static inline void
+mlxsw_reg_pemrbt_pack(char *payload, enum mlxsw_reg_pemrbt_protocol protocol,
+ u16 group_id)
+{
+ MLXSW_REG_ZERO(pemrbt, payload);
+ mlxsw_reg_pemrbt_protocol_set(payload, protocol);
+ mlxsw_reg_pemrbt_group_id_set(payload, group_id);
+}
+
/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
* -----------------------------------------------------
* This register is used for accessing rules within a TCAM region.
@@ -2642,7 +2743,7 @@ mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index,
mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank);
mlxsw_reg_perpt_erpt_index_set(payload, erpt_index);
mlxsw_reg_perpt_key_size_set(payload, key_size);
- mlxsw_reg_perpt_bf_bypass_set(payload, true);
+ mlxsw_reg_perpt_bf_bypass_set(payload, false);
mlxsw_reg_perpt_erp_id_set(payload, erp_id);
mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank);
mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index);
@@ -2834,8 +2935,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
u32 priority,
const char *tcam_region_info,
const char *key, u8 erp_id,
- bool large_exists, u32 lkey_id,
- u32 action_pointer)
+ u16 delta_start, u8 delta_mask,
+ u8 delta_value, bool large_exists,
+ u32 lkey_id, u32 action_pointer)
{
MLXSW_REG_ZERO(ptce3, payload);
mlxsw_reg_ptce3_v_set(payload, valid);
@@ -2844,6 +2946,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
+ mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
+ mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
@@ -2901,7 +3006,7 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id)
mlxsw_reg_percr_region_id_set(payload, region_id);
mlxsw_reg_percr_atcam_ignore_prune_set(payload, false);
mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false);
- mlxsw_reg_percr_bf_bypass_set(payload, true);
+ mlxsw_reg_percr_bf_bypass_set(payload, false);
}
/* PERERP - Policy-Engine Region eRP Register
@@ -2990,6 +3095,72 @@ static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id,
mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id);
}
+/* PEABFE - Policy-Engine Algorithmic Bloom Filter Entries Register
+ * ----------------------------------------------------------------
+ * This register configures the Bloom filter entries.
+ */
+#define MLXSW_REG_PEABFE_ID 0x3022
+#define MLXSW_REG_PEABFE_BASE_LEN 0x10
+#define MLXSW_REG_PEABFE_BF_REC_LEN 0x4
+#define MLXSW_REG_PEABFE_BF_REC_MAX_COUNT 256
+#define MLXSW_REG_PEABFE_LEN (MLXSW_REG_PEABFE_BASE_LEN + \
+ MLXSW_REG_PEABFE_BF_REC_LEN * \
+ MLXSW_REG_PEABFE_BF_REC_MAX_COUNT)
+
+MLXSW_REG_DEFINE(peabfe, MLXSW_REG_PEABFE_ID, MLXSW_REG_PEABFE_LEN);
+
+/* reg_peabfe_size
+ * Number of BF entries to be updated.
+ * Range 1..256
+ * Access: Op
+ */
+MLXSW_ITEM32(reg, peabfe, size, 0x00, 0, 9);
+
+/* reg_peabfe_bf_entry_state
+ * Bloom filter state
+ * 0 - Clear
+ * 1 - Set
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_state,
+ MLXSW_REG_PEABFE_BASE_LEN, 31, 1,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+/* reg_peabfe_bf_entry_bank
+ * Bloom filter bank ID
+ * Range 0..cap_max_erp_table_banks-1
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_bank,
+ MLXSW_REG_PEABFE_BASE_LEN, 24, 4,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+/* reg_peabfe_bf_entry_index
+ * Bloom filter entry index
+ * Range 0..2^cap_max_bf_log-1
+ * Access: Index
+ */
+MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_index,
+ MLXSW_REG_PEABFE_BASE_LEN, 0, 24,
+ MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false);
+
+static inline void mlxsw_reg_peabfe_pack(char *payload)
+{
+ MLXSW_REG_ZERO(peabfe, payload);
+}
+
+static inline void mlxsw_reg_peabfe_rec_pack(char *payload, int rec_index,
+ u8 state, u8 bank, u32 bf_index)
+{
+ u8 num_rec = mlxsw_reg_peabfe_size_get(payload);
+
+ if (rec_index >= num_rec)
+ mlxsw_reg_peabfe_size_set(payload, rec_index + 1);
+ mlxsw_reg_peabfe_bf_entry_state_set(payload, rec_index, state);
+ mlxsw_reg_peabfe_bf_entry_bank_set(payload, rec_index, bank);
+ mlxsw_reg_peabfe_bf_entry_index_set(payload, rec_index, bf_index);
+}
+
/* IEDR - Infrastructure Entry Delete Register
* ----------------------------------------------------
* This register is used for deleting entries from the entry tables.
@@ -4231,8 +4402,11 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1,
MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
+ MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
+ MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
@@ -4247,6 +4421,7 @@ enum mlxsw_reg_ppcnt_grp {
* 0x2: RFC 2819 Counters
* 0x3: RFC 3635 Counters
* 0x5: Ethernet Extended Counters
+ * 0x6: Ethernet Discard Counters
* 0x8: Link Level Retransmission Counters
* 0x10: Per Priority Counters
* 0x11: Per Traffic Class Counters
@@ -4390,8 +4565,46 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2863 Counter Group */
+
+/* reg_ppcnt_if_in_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_in_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_if_out_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_if_out_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* Ethernet RFC 2819 Counter Group */
+/* reg_ppcnt_ether_stats_undersize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_ether_stats_oversize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_ether_stats_fragments
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* reg_ppcnt_ether_stats_pkts64octets
* Access: RO
*/
@@ -4452,6 +4665,32 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+/* Ethernet RFC 3635 Counter Group */
+
+/* reg_ppcnt_dot3stats_fcs_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_dot3stats_symbol_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_dot3control_in_unknown_opcodes
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_dot3in_pause_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4460,6 +4699,80 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_ITEM64(reg, ppcnt, ecn_marked,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+/* Ethernet Discard Counter Group Counters */
+
+/* reg_ppcnt_ingress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ingress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_ingress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_ingress_tag_frame_type
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_egress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_loopback_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, loopback_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_egress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_egress_hoq
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_hoq,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_egress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_ingress_tx_link_down
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_egress_stp_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_stp_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_egress_sll
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_sll,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Per Priority Group Counters */
/* reg_ppcnt_rx_octets
@@ -4862,6 +5175,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR,
};
/* reg_htgt_trap_group
@@ -9357,8 +9671,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(ppbs),
MLXSW_REG(prcr),
MLXSW_REG(pefa),
+ MLXSW_REG(pemrbt),
MLXSW_REG(ptce2),
MLXSW_REG(perpt),
+ MLXSW_REG(peabfe),
MLXSW_REG(perar),
MLXSW_REG(ptce3),
MLXSW_REG(percr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 99b341539870..b8b3a01c2a9e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -41,6 +41,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
+ MLXSW_RES_ID_ACL_MAX_BF_LOG,
MLXSW_RES_ID_MAX_CPU_POLICERS,
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
@@ -93,6 +94,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
[MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
+ [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960,
[MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bec940330a4..eed1045e4d96 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -45,8 +45,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1703
-#define MLXSW_SP1_FWREV_SUBMINOR 4
+#define MLXSW_SP1_FWREV_MINOR 1910
+#define MLXSW_SP1_FWREV_SUBMINOR 622
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -65,6 +65,13 @@ static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
static const char mlxsw_sp_driver_version[] = "1.0";
+static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
+};
+static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
+};
+
/* tx_hdr_version
* Tx header version.
* Must be set to 1.
@@ -309,8 +316,13 @@ static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
},
.mlxsw_sp = mlxsw_sp
};
+ int err;
- return mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ mlxsw_core_fw_flash_start(mlxsw_sp->core);
+ err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
+ mlxsw_core_fw_flash_end(mlxsw_sp->core);
+
+ return err;
}
static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
@@ -318,6 +330,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
const char *fw_filename = mlxsw_sp->fw_filename;
+ union devlink_param_value value;
const struct firmware *firmware;
int err;
@@ -325,6 +338,15 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
if (!req_rev || !fw_filename)
return 0;
+ /* Don't check if devlink 'fw_load_policy' param is 'flash' */
+ err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
+ if (err)
+ return err;
+ if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
+ return 0;
+
/* Validate driver & FW are compatible */
if (rev->major != req_rev->major) {
WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
@@ -1118,22 +1140,40 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
return 0;
}
-static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool flush_default)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
- &mlxsw_sp_port->vlans_list, list)
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ &mlxsw_sp_port->vlans_list, list) {
+ if (!flush_default &&
+ mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
+ continue;
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+ }
}
-static struct mlxsw_sp_port_vlan *
+static void
+mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+{
+ if (mlxsw_sp_port_vlan->bridge_port)
+ mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
+ else if (mlxsw_sp_port_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+}
+
+struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- bool untagged = vid == 1;
+ bool untagged = vid == MLXSW_SP_DEFAULT_VID;
int err;
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan)
+ return ERR_PTR(-EEXIST);
+
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
if (err)
return ERR_PTR(err);
@@ -1145,7 +1185,6 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
- mlxsw_sp_port_vlan->ref_count = 1;
mlxsw_sp_port_vlan->vid = vid;
list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
@@ -1156,46 +1195,17 @@ err_port_vlan_alloc:
return ERR_PTR(err);
}
-static void
-mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
+void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
u16 vid = mlxsw_sp_port_vlan->vid;
+ mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
list_del(&mlxsw_sp_port_vlan->list);
kfree(mlxsw_sp_port_vlan);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
}
-struct mlxsw_sp_port_vlan *
-mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
-{
- struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
- if (mlxsw_sp_port_vlan) {
- mlxsw_sp_port_vlan->ref_count++;
- return mlxsw_sp_port_vlan;
- }
-
- return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
-}
-
-void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
-{
- struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
-
- if (--mlxsw_sp_port_vlan->ref_count != 0)
- return;
-
- if (mlxsw_sp_port_vlan->bridge_port)
- mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
- else if (fid)
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
-
- mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
-}
-
static int mlxsw_sp_port_add_vid(struct net_device *dev,
__be16 __always_unused proto, u16 vid)
{
@@ -1207,7 +1217,7 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev,
if (!vid)
return 0;
- return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
+ return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
}
static int mlxsw_sp_port_kill_vid(struct net_device *dev,
@@ -1225,7 +1235,7 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (!mlxsw_sp_port_vlan)
return 0;
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
return 0;
}
@@ -1337,7 +1347,6 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol;
const struct tc_action *a;
- LIST_HEAD(actions);
int err;
if (!tcf_exts_has_one_action(f->exts)) {
@@ -1876,8 +1885,38 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+ {
+ .str = "if_in_discards",
+ .getter = mlxsw_reg_ppcnt_if_in_discards_get,
+ },
+ {
+ .str = "if_out_discards",
+ .getter = mlxsw_reg_ppcnt_if_out_discards_get,
+ },
+ {
+ .str = "if_out_errors",
+ .getter = mlxsw_reg_ppcnt_if_out_errors_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
{
+ .str = "ether_stats_undersize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+ },
+ {
+ .str = "ether_stats_oversize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+ },
+ {
+ .str = "ether_stats_fragments",
+ .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+ },
+ {
.str = "ether_pkts64octets",
.getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
},
@@ -1922,6 +1961,82 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+ {
+ .str = "dot3stats_fcs_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+ },
+ {
+ .str = "dot3stats_symbol_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+ },
+ {
+ .str = "dot3control_in_unknown_opcodes",
+ .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+ },
+ {
+ .str = "dot3in_pause_frames",
+ .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+ {
+ .str = "discard_ingress_general",
+ .getter = mlxsw_reg_ppcnt_ingress_general_get,
+ },
+ {
+ .str = "discard_ingress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+ },
+ {
+ .str = "discard_ingress_tag_frame_type",
+ .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+ },
+ {
+ .str = "discard_egress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+ },
+ {
+ .str = "discard_loopback_filter",
+ .getter = mlxsw_reg_ppcnt_loopback_filter_get,
+ },
+ {
+ .str = "discard_egress_general",
+ .getter = mlxsw_reg_ppcnt_egress_general_get,
+ },
+ {
+ .str = "discard_egress_hoq",
+ .getter = mlxsw_reg_ppcnt_egress_hoq_get,
+ },
+ {
+ .str = "discard_egress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_tx_link_down",
+ .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+ },
+ {
+ .str = "discard_egress_stp_filter",
+ .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+ },
+ {
+ .str = "discard_egress_sll",
+ .getter = mlxsw_reg_ppcnt_egress_sll_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1974,7 +2089,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
(MLXSW_SP_PORT_HW_TC_STATS_LEN * \
@@ -2015,12 +2133,31 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
@@ -2063,10 +2200,22 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2863_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_RFC_2819_CNT:
*p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
*p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_3635_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_DISCARD_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+ *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2116,11 +2265,26 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2863 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
/* RFC 2819 Counters */
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
data, data_index);
data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ /* RFC 3635 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+ /* Discard Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
@@ -2887,7 +3051,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->dev = dev;
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
- mlxsw_sp_port->pvid = 1;
+ mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
mlxsw_sp_port->mapping.module = module;
mlxsw_sp_port->mapping.width = width;
@@ -3026,13 +3190,22 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_nve_init;
}
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_set;
+ }
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
+ MLXSW_SP_DEFAULT_VID);
if (IS_ERR(mlxsw_sp_port_vlan)) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
mlxsw_sp_port->local_port);
err = PTR_ERR(mlxsw_sp_port_vlan);
- goto err_port_vlan_get;
+ goto err_port_vlan_create;
}
+ mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
@@ -3052,8 +3225,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
err_register_netdev:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
-err_port_vlan_get:
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
+err_port_vlan_create:
+err_port_pvid_set:
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
err_port_nve_init:
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
@@ -3094,7 +3268,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
- mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
@@ -3389,10 +3563,10 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
-static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb,
+static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
u8 local_port, void *priv)
{
- skb->offload_mr_fwd_mark = 1;
+ skb->offload_l3_fwd_mark = 1;
skb->offload_fwd_mark = 1;
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
@@ -3440,8 +3614,8 @@ out:
MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
-#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
- MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \
+#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
+ MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
#define MLXSW_SP_EVENTL(_func, _trap_id) \
@@ -3474,7 +3648,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
/* L3 traps */
MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
- MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
+ MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false),
MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
false),
@@ -3518,9 +3692,10 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false),
MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false),
MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
- MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
/* NVE traps */
MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false),
+ MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false),
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -3548,6 +3723,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
rate = 128;
burst_size = 7;
break;
@@ -3633,6 +3809,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
+ case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR:
priority = 1;
tc = 1;
break;
@@ -3835,6 +4012,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_nve_init;
}
+ err = mlxsw_sp_acl_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
+ goto err_acl_init;
+ }
+
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
@@ -3852,12 +4035,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_netdev_notifier;
}
- err = mlxsw_sp_acl_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
- goto err_acl_init;
- }
-
err = mlxsw_sp_dpipe_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
@@ -3875,12 +4052,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
err_ports_create:
mlxsw_sp_dpipe_fini(mlxsw_sp);
err_dpipe_init:
- mlxsw_sp_acl_fini(mlxsw_sp);
-err_acl_init:
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_acl_fini(mlxsw_sp);
+err_acl_init:
mlxsw_sp_nve_fini(mlxsw_sp);
err_nve_init:
mlxsw_sp_afa_fini(mlxsw_sp);
@@ -3916,6 +4093,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3931,6 +4109,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
+ mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info);
}
@@ -3941,9 +4120,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
- mlxsw_sp_acl_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_acl_fini(mlxsw_sp);
mlxsw_sp_nve_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -3956,16 +4135,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_kvdl_fini(mlxsw_sp);
}
+/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated
+ * 802.1Q FIDs
+ */
+#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \
+ VLAN_VID_MASK - 1)
+
static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
.used_max_mid = 1,
.max_mid = MLXSW_SP_MID_MAX,
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 3,
- .fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -3988,10 +4171,8 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
.used_flood_tables = 1,
.used_flood_mode = 1,
.flood_mode = 3,
- .max_fid_offset_flood_tables = 3,
- .fid_offset_flood_table_size = VLAN_N_VID - 1,
.max_fid_flood_tables = 3,
- .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
+ .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE,
.used_max_ib_mc = 1,
.max_ib_mc = 0,
.used_max_pkey = 1,
@@ -4171,6 +4352,52 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
+static int
+mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
+ (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
+ NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlxsw_sp_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL,
+ mlxsw_sp_devlink_param_fw_load_policy_validate),
+};
+
+static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
+ ARRAY_SIZE(mlxsw_sp_devlink_params));
+ if (err)
+ return err;
+
+ value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ value);
+ return 0;
+}
+
+static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core),
+ mlxsw_sp_devlink_params,
+ ARRAY_SIZE(mlxsw_sp_devlink_params));
+}
+
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
@@ -4192,6 +4419,8 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
+ .params_register = mlxsw_sp_params_register,
+ .params_unregister = mlxsw_sp_params_unregister,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.res_query_enabled = true,
@@ -4217,6 +4446,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
+ .params_register = mlxsw_sp_params_register,
+ .params_unregister = mlxsw_sp_params_unregister,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
@@ -4292,6 +4523,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
+static void
+mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ if (netif_is_bridge_port(lag_dev))
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!netif_is_bridge_port(upper_dev))
+ continue;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
+ }
+}
+
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -4419,7 +4669,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
struct mlxsw_sp_upper *lag;
u16 lag_id;
u8 port_index;
@@ -4453,9 +4702,8 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
lag->ref_count++;
/* Port is no longer usable as a router interface */
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
- if (mlxsw_sp_port_vlan->fid)
- mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
+ if (mlxsw_sp_port->default_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
return 0;
@@ -4483,7 +4731,12 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
/* Any VLANs configured on the port are no longer valid */
- mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
+ mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
+ /* Make the LAG and its directly linked uppers leave bridges they
+ * are memeber in
+ */
+ mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
@@ -4493,9 +4746,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
/* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -4573,7 +4825,7 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
if (err)
goto err_port_stp_set;
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
true, false);
if (err)
goto err_port_vlan_set;
@@ -4605,7 +4857,7 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
vid, true);
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
+ mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
false, false);
mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
@@ -4625,6 +4877,30 @@ static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
return num_vxlans > 1;
}
+static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
+{
+ DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ u16 pvid;
+ int err;
+
+ if (!netif_is_vxlan(dev))
+ continue;
+
+ err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
+ if (err || !pvid)
+ continue;
+
+ if (test_and_set_bit(pvid, vlans))
+ return false;
+ }
+
+ return true;
+}
+
static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
struct netlink_ext_ack *extack)
{
@@ -4633,13 +4909,15 @@ static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
return false;
}
- if (br_vlan_enabled(br_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "VLAN filtering can not be enabled on a bridge with a VxLAN device");
+ if (!br_vlan_enabled(br_dev) &&
+ mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
return false;
}
- if (mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
- NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
+ if (br_vlan_enabled(br_dev) &&
+ !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
return false;
}
@@ -4713,11 +4991,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
- if (is_vlan_dev(upper_dev) &&
- vlan_dev_vlan_id(upper_dev) == 1) {
- NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic");
- return -EINVAL;
- }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4746,6 +5019,16 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
} else if (netif_is_macvlan(upper_dev)) {
if (!info->linking)
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ } else if (is_vlan_dev(upper_dev)) {
+ struct net_device *br_dev;
+
+ if (!netif_is_bridge_port(upper_dev))
+ break;
+ if (info->linking)
+ break;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
+ br_dev);
}
break;
}
@@ -4902,6 +5185,48 @@ static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
return 0;
}
+static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev,
+ struct net_device *br_dev,
+ unsigned long event, void *ptr,
+ u16 vid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper_dev;
+
+ if (!mlxsw_sp)
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!netif_is_macvlan(upper_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EOPNOTSUPP;
+ }
+ if (!info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev) &&
+ !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (info->linking)
+ break;
+ if (netif_is_macvlan(upper_dev))
+ mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
+ break;
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
unsigned long event, void *ptr)
{
@@ -4915,6 +5240,9 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
real_dev, event,
ptr, vid);
+ else if (netif_is_bridge_master(real_dev))
+ return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev,
+ event, ptr, vid);
return 0;
}
@@ -5014,10 +5342,21 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
if (cu_info->linking) {
if (!netif_running(dev))
return 0;
+ /* When the bridge is VLAN-aware, the VNI of the VxLAN
+ * device needs to be mapped to a VLAN, but at this
+ * point no VLANs are configured on the VxLAN device
+ */
+ if (br_vlan_enabled(upper_dev))
+ return 0;
return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
- dev, extack);
+ dev, 0, extack);
} else {
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ /* VLANs were already flushed, which triggered the
+ * necessary cleanup
+ */
+ if (br_vlan_enabled(upper_dev))
+ return 0;
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
}
break;
case NETDEV_PRE_UP:
@@ -5028,7 +5367,7 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_lower_get(upper_dev))
return 0;
- return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev,
+ return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
extack);
case NETDEV_DOWN:
upper_dev = netdev_master_upper_dev_get(dev);
@@ -5038,7 +5377,7 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
if (!mlxsw_sp_lower_get(upper_dev))
return 0;
- mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev);
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
break;
}
@@ -5069,8 +5408,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
event, ptr);
- else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
- err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (event == NETDEV_PRE_CHANGEADDR ||
+ event == NETDEV_CHANGEADDR ||
+ event == NETDEV_CHANGEMTU)
+ err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
else if (mlxsw_sp_port_dev_check(dev))
@@ -5091,18 +5432,10 @@ static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
.notifier_call = mlxsw_sp_inetaddr_valid_event,
};
-static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inetaddr_event,
-};
-
static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
.notifier_call = mlxsw_sp_inet6addr_valid_event,
};
-static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inet6addr_event,
-};
-
static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
@@ -5128,9 +5461,7 @@ static int __init mlxsw_sp_module_init(void)
int err;
register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
- register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
if (err)
@@ -5157,9 +5488,7 @@ err_sp1_pci_driver_register:
err_sp2_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
err_sp1_core_driver_register:
- unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
return err;
}
@@ -5170,9 +5499,7 @@ static void __exit mlxsw_sp_module_exit(void)
mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
- unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 0875a79cbe7b..a1c32a81b011 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -8,6 +8,7 @@
#include <linux/netdevice.h>
#include <linux/rhashtable.h>
#include <linux/bitops.h>
+#include <linux/if_bridge.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/dcbnl.h>
@@ -24,6 +25,8 @@
#include "core_acl_flex_actions.h"
#include "reg.h"
+#define MLXSW_SP_DEFAULT_VID (VLAN_N_VID - 1)
+
#define MLXSW_SP_FID_8021D_MAX 1024
#define MLXSW_SP_MID_MAX 7000
@@ -80,6 +83,10 @@ enum mlxsw_sp_fid_type {
MLXSW_SP_FID_TYPE_MAX,
};
+enum mlxsw_sp_nve_type {
+ MLXSW_SP_NVE_TYPE_VXLAN,
+};
+
struct mlxsw_sp_mid {
struct list_head list;
unsigned char addr[ETH_ALEN];
@@ -127,6 +134,7 @@ struct mlxsw_sp {
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
+ const unsigned char *mac_mask;
struct mlxsw_sp_upper *lags;
int *port_to_module;
struct mlxsw_sp_sb *sb;
@@ -184,7 +192,6 @@ struct mlxsw_sp_port_vlan {
struct list_head list;
struct mlxsw_sp_port *mlxsw_sp_port;
struct mlxsw_sp_fid *fid;
- unsigned int ref_count;
u16 vid;
struct mlxsw_sp_bridge_port *bridge_port;
struct list_head bridge_vlan_node;
@@ -235,6 +242,7 @@ struct mlxsw_sp_port {
} periodic_hw_stats;
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
+ struct mlxsw_sp_port_vlan *default_vlan;
struct mlxsw_sp_qdisc *root_qdisc;
struct mlxsw_sp_qdisc *tclass_qdiscs;
unsigned acl_rule_count;
@@ -261,6 +269,26 @@ static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev)
return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev);
}
+static inline int
+mlxsw_sp_vxlan_mapped_vid(const struct net_device *vxlan_dev, u16 *p_vid)
+{
+ struct bridge_vlan_info vinfo;
+ u16 vid = 0;
+ int err;
+
+ err = br_vlan_get_pvid(vxlan_dev, &vid);
+ if (err || !vid)
+ goto out;
+
+ err = br_vlan_get_info(vxlan_dev, vid, &vinfo);
+ if (err || !(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED))
+ vid = 0;
+
+out:
+ *p_vid = vid;
+ return err;
+}
+
static inline bool
mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -358,11 +386,15 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev);
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
const struct net_device *vxlan_dev);
+struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ u16 vid,
+ struct netlink_ext_ack *extack);
+extern struct notifier_block mlxsw_sp_switchdev_notifier;
/* spectrum.c */
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -384,8 +416,8 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
bool learn_enable);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
struct mlxsw_sp_port_vlan *
-mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
+mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
@@ -429,15 +461,12 @@ union mlxsw_sp_l3addr {
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev);
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr);
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
const struct net_device *macvlan_dev);
-int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
unsigned long event, void *ptr);
-int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
unsigned long event, void *ptr);
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
@@ -457,7 +486,6 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
struct netdev_notifier_info *info);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
-void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev);
struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
@@ -538,6 +566,7 @@ struct mlxsw_sp_acl_rule_info {
unsigned int priority;
struct mlxsw_afk_element_values values;
struct mlxsw_afa_block *act_block;
+ u8 action_created:1;
unsigned int counter_index;
};
@@ -547,6 +576,7 @@ struct mlxsw_sp_acl_ruleset;
/* spectrum_acl.c */
enum mlxsw_sp_acl_profile {
MLXSW_SP_ACL_PROFILE_FLOWER,
+ MLXSW_SP_ACL_PROFILE_MR,
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
@@ -581,7 +611,8 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset);
struct mlxsw_sp_acl_rule_info *
-mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ struct mlxsw_afa_block *afa_block);
void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
@@ -625,6 +656,7 @@ struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
unsigned long cookie,
+ struct mlxsw_afa_block *afa_block,
struct netlink_ext_ack *extack);
void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
@@ -632,6 +664,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_afa_block *afa_block);
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
@@ -676,6 +711,10 @@ struct mlxsw_sp_acl_tcam_ops {
void (*entry_del)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *chunk_priv,
void *entry_priv);
+ int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity);
@@ -721,6 +760,12 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p);
/* spectrum_fid.c */
+bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid);
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index);
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex);
+int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_nve_type *p_type);
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
__be32 vni);
int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni);
@@ -728,9 +773,12 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid,
u32 nve_flood_index);
void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid);
bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid);
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni);
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
+ __be32 vni, int nve_ifindex);
void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid);
bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid);
+void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev);
int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type, u8 local_port,
bool member);
@@ -738,10 +786,10 @@ int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
-enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid);
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid);
void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif);
+struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid);
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_fid_type type);
@@ -749,6 +797,8 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
+ u16 vid);
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
int br_ifindex);
struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp,
@@ -797,10 +847,6 @@ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops;
extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops;
/* spectrum_nve.c */
-enum mlxsw_sp_nve_type {
- MLXSW_SP_NVE_TYPE_VXLAN,
-};
-
struct mlxsw_sp_nve_params {
enum mlxsw_sp_nve_type type;
__be32 vni;
@@ -810,6 +856,9 @@ struct mlxsw_sp_nve_params {
extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[];
extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[];
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr);
int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid,
enum mlxsw_sp_l3proto proto,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
index 2a9eac90002e..fe270c1a26a6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c
@@ -67,7 +67,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_ctcam_chunk_init(&region->cregion,
&region->catchall.cchunk,
MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
- rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, NULL);
if (IS_ERR(rulei)) {
err = PTR_ERR(rulei);
goto err_rulei_create;
@@ -193,6 +193,15 @@ static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *_region,
unsigned int offset,
@@ -240,5 +249,6 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = {
.entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry),
.entry_add = mlxsw_sp1_acl_tcam_entry_add,
.entry_del = mlxsw_sp1_acl_tcam_entry_del,
+ .entry_action_replace = mlxsw_sp1_acl_tcam_entry_action_replace,
.entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 8ca77f3e8f27..234ab51916db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
return 0;
}
@@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
static const struct mlxsw_sp_acl_ctcam_region_ops
@@ -211,6 +211,23 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *region_priv, void *chunk_priv,
+ void *entry_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp2_acl_tcam_region *region = region_priv;
+ struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
+ struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
+
+ entry->act_block = rulei->act_block;
+ return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
+ &region->aregion,
+ &chunk->achunk,
+ &entry->aentry, rulei);
+}
+
+static int
mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
void *region_priv, void *entry_priv,
bool *activity)
@@ -235,5 +252,6 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
.entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
.entry_add = mlxsw_sp2_acl_tcam_entry_add,
.entry_del = mlxsw_sp2_acl_tcam_entry_del,
+ .entry_action_replace = mlxsw_sp2_acl_tcam_entry_action_replace,
.entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index 4dd62478162e..e31ec75ac035 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -7,6 +7,201 @@
#include "spectrum.h"
#include "spectrum_mr.h"
+struct mlxsw_sp2_mr_tcam {
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_acl_block *acl_block;
+ struct mlxsw_sp_acl_ruleset *ruleset4;
+ struct mlxsw_sp_acl_ruleset *ruleset6;
+};
+
+struct mlxsw_sp2_mr_route {
+ struct mlxsw_sp2_mr_tcam *mr_tcam;
+};
+
+static struct mlxsw_sp_acl_ruleset *
+mlxsw_sp2_mr_tcam_proto_ruleset(struct mlxsw_sp2_mr_tcam *mr_tcam,
+ enum mlxsw_sp_l3proto proto)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mr_tcam->ruleset4;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mr_tcam->ruleset6;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_pemrbt_protocol protocol,
+ struct mlxsw_sp_acl_ruleset *ruleset)
+{
+ char pemrbt_pl[MLXSW_REG_PEMRBT_LEN];
+ u16 group_id;
+
+ group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
+
+ mlxsw_reg_pemrbt_pack(pemrbt_pl, protocol, group_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pemrbt), pemrbt_pl);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+};
+
+static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ struct mlxsw_afk_element_usage elusage;
+ int err;
+
+ /* Initialize IPv4 ACL group. */
+ mlxsw_afk_element_usage_fill(&elusage,
+ mlxsw_sp2_mr_tcam_usage_ipv4,
+ ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4));
+ mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
+ mr_tcam->acl_block,
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_ACL_PROFILE_MR,
+ &elusage);
+
+ if (IS_ERR(mr_tcam->ruleset4))
+ return PTR_ERR(mr_tcam->ruleset4);
+
+ /* MC Router groups should be bound before routes are inserted. */
+ err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp,
+ MLXSW_REG_PEMRBT_PROTO_IPV4,
+ mr_tcam->ruleset4);
+ if (err)
+ goto err_bind_group;
+
+ return 0;
+
+err_bind_group:
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ MLXSW_AFK_ELEMENT_DST_IP_0_31,
+};
+
+static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ struct mlxsw_afk_element_usage elusage;
+ int err;
+
+ /* Initialize IPv6 ACL group */
+ mlxsw_afk_element_usage_fill(&elusage,
+ mlxsw_sp2_mr_tcam_usage_ipv6,
+ ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6));
+ mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp,
+ mr_tcam->acl_block,
+ MLXSW_SP_L3_PROTO_IPV6,
+ MLXSW_SP_ACL_PROFILE_MR,
+ &elusage);
+
+ if (IS_ERR(mr_tcam->ruleset6))
+ return PTR_ERR(mr_tcam->ruleset6);
+
+ /* MC Router groups should be bound before routes are inserted. */
+ err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp,
+ MLXSW_REG_PEMRBT_PROTO_IPV6,
+ mr_tcam->ruleset6);
+ if (err)
+ goto err_bind_group;
+
+ return 0;
+
+err_bind_group:
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6);
+ return err;
+}
+
+static void mlxsw_sp2_mr_tcam_ipv6_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
+{
+ mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ (char *) &key->source.addr4,
+ (char *) &key->source_mask.addr4, 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ (char *) &key->group.addr4,
+ (char *) &key->group_mask.addr4, 4);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei,
+ struct mlxsw_sp_mr_route_key *key)
+{
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
+ &key->source.addr6.s6_addr[0x0],
+ &key->source_mask.addr6.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
+ &key->source.addr6.s6_addr[0x4],
+ &key->source_mask.addr6.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
+ &key->source.addr6.s6_addr[0x8],
+ &key->source_mask.addr6.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
+ &key->source.addr6.s6_addr[0xc],
+ &key->source_mask.addr6.s6_addr[0xc], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
+ &key->group.addr6.s6_addr[0x0],
+ &key->group_mask.addr6.s6_addr[0x0], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
+ &key->group.addr6.s6_addr[0x4],
+ &key->group_mask.addr6.s6_addr[0x4], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
+ &key->group.addr6.s6_addr[0x8],
+ &key->group_mask.addr6.s6_addr[0x8], 4);
+ mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
+ &key->group.addr6.s6_addr[0xc],
+ &key->group_mask.addr6.s6_addr[0xc], 4);
+}
+
+static void
+mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_sp_mr_route_key *key,
+ unsigned int priority)
+{
+ struct mlxsw_sp_acl_rule_info *rulei;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ rulei->priority = priority;
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7,
+ key->vrid, GENMASK(7, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10,
+ key->vrid >> 8, GENMASK(2, 0));
+ switch (key->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return mlxsw_sp2_mr_tcam_rule_parse6(rulei, key);
+ }
+}
+
static int
mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
@@ -14,7 +209,33 @@ mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
struct mlxsw_afa_block *afa_block,
enum mlxsw_sp_mr_route_prio prio)
{
+ struct mlxsw_sp2_mr_route *mr_route = route_priv;
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+ int err;
+
+ mr_route->mr_tcam = mr_tcam;
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset,
+ (unsigned long) route_priv, afa_block,
+ NULL);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ mlxsw_sp2_mr_tcam_rule_parse(rule, key, prio);
+ err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
+ if (err)
+ goto err_rule_add;
+
return 0;
+
+err_rule_add:
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+ return err;
}
static void
@@ -22,6 +243,21 @@ mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv,
void *route_priv,
struct mlxsw_sp_mr_route_key *key)
{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset,
+ (unsigned long) route_priv);
+ if (WARN_ON(!rule))
+ return;
+
+ mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
+ mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
}
static int
@@ -30,21 +266,64 @@ mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_route_key *key,
struct mlxsw_afa_block *afa_block)
{
- return 0;
+ struct mlxsw_sp2_mr_route *mr_route = route_priv;
+ struct mlxsw_sp2_mr_tcam *mr_tcam = mr_route->mr_tcam;
+ struct mlxsw_sp_acl_ruleset *ruleset;
+ struct mlxsw_sp_acl_rule *rule;
+
+ ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto);
+ if (WARN_ON(!ruleset))
+ return -EINVAL;
+
+ rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset,
+ (unsigned long) route_priv);
+ if (WARN_ON(!rule))
+ return -EINVAL;
+
+ return mlxsw_sp_acl_rule_action_replace(mlxsw_sp, rule, afa_block);
}
static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+ int err;
+
+ mr_tcam->mlxsw_sp = mlxsw_sp;
+ mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL);
+ if (!mr_tcam->acl_block)
+ return -ENOMEM;
+
+ err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam);
+ if (err)
+ goto err_ipv4_init;
+
+ err = mlxsw_sp2_mr_tcam_ipv6_init(mr_tcam);
+ if (err)
+ goto err_ipv6_init;
+
return 0;
+
+err_ipv6_init:
+ mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
+err_ipv4_init:
+ mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
+ return err;
}
static void mlxsw_sp2_mr_tcam_fini(void *priv)
{
+ struct mlxsw_sp2_mr_tcam *mr_tcam = priv;
+
+ mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam);
+ mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam);
+ mlxsw_sp_acl_block_destroy(mr_tcam->acl_block);
}
const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = {
+ .priv_size = sizeof(struct mlxsw_sp2_mr_tcam),
.init = mlxsw_sp2_mr_tcam_init,
.fini = mlxsw_sp2_mr_tcam_fini,
+ .route_priv_size = sizeof(struct mlxsw_sp2_mr_route),
.route_create = mlxsw_sp2_mr_tcam_route_create,
.route_destroy = mlxsw_sp2_mr_tcam_route_destroy,
.route_update = mlxsw_sp2_mr_tcam_route_update,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index c4f9238591e6..695d33358988 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -435,7 +435,8 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
}
struct mlxsw_sp_acl_rule_info *
-mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl,
+ struct mlxsw_afa_block *afa_block)
{
struct mlxsw_sp_acl_rule_info *rulei;
int err;
@@ -443,11 +444,18 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
if (!rulei)
return NULL;
+
+ if (afa_block) {
+ rulei->act_block = afa_block;
+ return rulei;
+ }
+
rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa);
if (IS_ERR(rulei->act_block)) {
err = PTR_ERR(rulei->act_block);
goto err_afa_block_create;
}
+ rulei->action_created = 1;
return rulei;
err_afa_block_create:
@@ -457,7 +465,8 @@ err_afa_block_create:
void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
{
- mlxsw_afa_block_destroy(rulei->act_block);
+ if (rulei->action_created)
+ mlxsw_afa_block_destroy(rulei->act_block);
kfree(rulei);
}
@@ -623,6 +632,7 @@ struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
unsigned long cookie,
+ struct mlxsw_afa_block *afa_block,
struct netlink_ext_ack *extack)
{
const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
@@ -639,7 +649,7 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
rule->cookie = cookie;
rule->ruleset = ruleset;
- rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+ rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block);
if (IS_ERR(rule->rulei)) {
err = PTR_ERR(rule->rulei);
goto err_rulei_create;
@@ -721,6 +731,21 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
ops->rule_del(mlxsw_sp, rule->priv);
}
+int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_rule *rule,
+ struct mlxsw_afa_block *afa_block)
+{
+ struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+ const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+ struct mlxsw_sp_acl_rule_info *rulei;
+
+ rulei = mlxsw_sp_acl_rule_rulei(rule);
+ rulei->act_block = afa_block;
+
+ return ops->rule_action_replace(mlxsw_sp, ruleset->priv, rule->priv,
+ rule->rulei);
+}
+
struct mlxsw_sp_acl_rule *
mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ruleset *ruleset,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 2dda028f94db..80fb268d51a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -14,8 +14,8 @@
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5
struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
@@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops {
void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
struct mlxsw_sp_acl_atcam_lkey_id *
(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ char *enc_key, u8 erp_id);
void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
};
@@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
static bool
mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
{
- return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+ return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
}
static int
@@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
@@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
@@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
- NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
- MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
+ mlxsw_afk_clear(afk, ht_key.enc_key,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
ht_key.erp_id = erp_id;
lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
@@ -324,6 +323,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp,
aregion->region = region;
aregion->atcam = atcam;
mlxsw_sp_acl_atcam_region_type_init(aregion);
+ INIT_LIST_HEAD(&aregion->entries_list);
err = rhashtable_init(&aregion->entries_ht,
&mlxsw_sp_acl_atcam_entries_ht_params);
@@ -357,6 +357,7 @@ void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion)
mlxsw_sp_acl_erp_region_fini(aregion);
aregion->ops->fini(aregion);
rhashtable_destroy(&aregion->entries_ht);
+ WARN_ON(!list_empty(&aregion->entries_list));
}
void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion,
@@ -379,7 +380,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
u32 kvdl_index, priority;
@@ -389,7 +390,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -398,6 +400,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
@@ -418,18 +423,51 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ char *enc_key = aentry->ht_key.enc_key;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
- region->tcam_region_info, aentry->ht_key.enc_key,
- erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ region->tcam_region_info,
+ enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1,
lkey_id->id, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
aregion->ops->lkey_id_put(aregion, lkey_id);
}
static int
+mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ struct mlxsw_sp_acl_tcam_region *region = aregion->region;
+ char ptce3_pl[MLXSW_REG_PTCE3_LEN];
+ u32 kvdl_index, priority;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true);
+ if (err)
+ return err;
+ kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
+ mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
+ priority, region->tcam_region_info,
+ aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
+ kvdl_index);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
+}
+
+static int
__mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_entry *aentry,
@@ -438,19 +476,36 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
- struct mlxsw_sp_acl_erp *erp;
- unsigned int blocks_count;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
int err;
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
-
- erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
- aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+ aentry->full_enc_key, mask);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+ memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
+ sizeof(aentry->ht_key.enc_key));
+
+ /* Compute all needed delta information and clear the delta bits
+ * from the encrypted key.
+ */
+ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
+ aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ aentry->delta_info.value =
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
+
+ /* Add rule to the list of A-TCAM rules, assuming this
+ * rule is intended to A-TCAM. In case this rule does
+ * not fit into A-TCAM it will be removed from the list.
+ */
+ list_add(&aentry->list, &aregion->entries_list);
/* We can't insert identical rules into the A-TCAM, so fail and
* let the rule spill into C-TCAM
@@ -461,6 +516,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rhashtable_insert;
+ /* Bloom filter must be updated here, before inserting the rule into
+ * the A-TCAM.
+ */
+ err = mlxsw_sp_acl_erp_bf_insert(mlxsw_sp, aregion, erp_mask, aentry);
+ if (err)
+ goto err_bf_insert;
+
err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry,
rulei);
if (err)
@@ -469,10 +531,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rule_insert:
+ mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, erp_mask, aentry);
+err_bf_insert:
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
err_rhashtable_insert:
- mlxsw_sp_acl_erp_put(aregion, erp);
+ list_del(&aentry->list);
+ mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
return err;
}
@@ -482,9 +547,21 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_entry *aentry)
{
mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
+ mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, aentry->erp_mask, aentry);
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ list_del(&aentry->list);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
+}
+
+static int
+__mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_sp_acl_atcam_region_entry_action_replace(mlxsw_sp, aregion,
+ aentry, rulei);
}
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
@@ -523,6 +600,29 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
__mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry);
}
+int
+mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ int err;
+
+ if (mlxsw_sp_acl_atcam_is_centry(aentry))
+ err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp,
+ &aregion->cregion,
+ &achunk->cchunk,
+ &aentry->centry,
+ rulei);
+ else
+ err = __mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
+ aregion, aentry,
+ rulei);
+
+ return err;
+}
+
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
new file mode 100644
index 000000000000..505b87846acc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/refcount.h>
+
+#include "spectrum.h"
+#include "spectrum_acl_tcam.h"
+
+struct mlxsw_sp_acl_bf {
+ unsigned int bank_size;
+ refcount_t refcnt[0];
+};
+
+/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key
+ * blocks, eRP ID and region ID. In Spectrum-2, region key is combined of up to
+ * 12 key blocks, so there can be up to 3 chunks in the Bloom filter key,
+ * depending on the actual number of key blocks used in the region.
+ * The layout of the Bloom filter key is as follows:
+ *
+ * +-------------------------+------------------------+------------------------+
+ * | Chunk 2 Key blocks 11-8 | Chunk 1 Key blocks 7-4 | Chunk 0 Key blocks 3-0 |
+ * +-------------------------+------------------------+------------------------+
+ */
+#define MLXSW_BLOOM_KEY_CHUNKS 3
+#define MLXSW_BLOOM_KEY_LEN 69
+
+/* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is
+ * 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero
+ * padding.
+ * The layout of each chunk is as follows:
+ *
+ * +---------+----------------------+-----------------------------------+
+ * | 3 bytes | 2 bytes | 18 bytes |
+ * +---------+-----------+----------+-----------------------------------+
+ * | 183:158 | 157:148 | 147:144 | 143:0 |
+ * +---------+-----------+----------+-----------------------------------+
+ * | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) |
+ * +---------+-----------+----------+-----------------------------------+
+ */
+#define MLXSW_BLOOM_CHUNK_PAD_BYTES 3
+#define MLXSW_BLOOM_CHUNK_KEY_BYTES 18
+#define MLXSW_BLOOM_KEY_CHUNK_BYTES 23
+
+/* The offset of the key block within a chunk is 5 bytes as it comes after
+ * 3 bytes of zero padding and 16 bits of region ID and eRP ID.
+ */
+#define MLXSW_BLOOM_CHUNK_KEY_OFFSET 5
+
+/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8,
+ * and we need to populate it with 4 key blocks copied from the entry encoded
+ * key. Since the encoded key contains a padding, key block 11 starts at offset
+ * 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks take
+ * 18 bytes.
+ * This array defines key offsets for easy access when copying key blocks from
+ * entry key to Bloom filter chunk.
+ */
+static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38};
+
+/* This table is just the CRC of each possible byte. It is
+ * computed, Msbit first, for the Bloom filter polynomial
+ * which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and
+ * the implicit x^16).
+ */
+static const u16 mlxsw_sp_acl_bf_crc_tab[256] = {
+0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d,
+0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a,
+0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a,
+0x5290, 0xd7b9, 0xddeb, 0x58c2, 0xc94f, 0x4c66, 0x4634, 0xc31d,
+0x4527, 0xc00e, 0xca5c, 0x4f75, 0xdef8, 0x5bd1, 0x5183, 0xd4aa,
+0xf7b0, 0x7299, 0x78cb, 0xfde2, 0x6c6f, 0xe946, 0xe314, 0x663d,
+0xa520, 0x2009, 0x2a5b, 0xaf72, 0x3eff, 0xbbd6, 0xb184, 0x34ad,
+0x17b7, 0x929e, 0x98cc, 0x1de5, 0x8c68, 0x0941, 0x0313, 0x863a,
+0x8a4e, 0x0f67, 0x0535, 0x801c, 0x1191, 0x94b8, 0x9eea, 0x1bc3,
+0x38d9, 0xbdf0, 0xb7a2, 0x328b, 0xa306, 0x262f, 0x2c7d, 0xa954,
+0x6a49, 0xef60, 0xe532, 0x601b, 0xf196, 0x74bf, 0x7eed, 0xfbc4,
+0xd8de, 0x5df7, 0x57a5, 0xd28c, 0x4301, 0xc628, 0xcc7a, 0x4953,
+0xcf69, 0x4a40, 0x4012, 0xc53b, 0x54b6, 0xd19f, 0xdbcd, 0x5ee4,
+0x7dfe, 0xf8d7, 0xf285, 0x77ac, 0xe621, 0x6308, 0x695a, 0xec73,
+0x2f6e, 0xaa47, 0xa015, 0x253c, 0xb4b1, 0x3198, 0x3bca, 0xbee3,
+0x9df9, 0x18d0, 0x1282, 0x97ab, 0x0626, 0x830f, 0x895d, 0x0c74,
+0x91b5, 0x149c, 0x1ece, 0x9be7, 0x0a6a, 0x8f43, 0x8511, 0x0038,
+0x2322, 0xa60b, 0xac59, 0x2970, 0xb8fd, 0x3dd4, 0x3786, 0xb2af,
+0x71b2, 0xf49b, 0xfec9, 0x7be0, 0xea6d, 0x6f44, 0x6516, 0xe03f,
+0xc325, 0x460c, 0x4c5e, 0xc977, 0x58fa, 0xddd3, 0xd781, 0x52a8,
+0xd492, 0x51bb, 0x5be9, 0xdec0, 0x4f4d, 0xca64, 0xc036, 0x451f,
+0x6605, 0xe32c, 0xe97e, 0x6c57, 0xfdda, 0x78f3, 0x72a1, 0xf788,
+0x3495, 0xb1bc, 0xbbee, 0x3ec7, 0xaf4a, 0x2a63, 0x2031, 0xa518,
+0x8602, 0x032b, 0x0979, 0x8c50, 0x1ddd, 0x98f4, 0x92a6, 0x178f,
+0x1bfb, 0x9ed2, 0x9480, 0x11a9, 0x8024, 0x050d, 0x0f5f, 0x8a76,
+0xa96c, 0x2c45, 0x2617, 0xa33e, 0x32b3, 0xb79a, 0xbdc8, 0x38e1,
+0xfbfc, 0x7ed5, 0x7487, 0xf1ae, 0x6023, 0xe50a, 0xef58, 0x6a71,
+0x496b, 0xcc42, 0xc610, 0x4339, 0xd2b4, 0x579d, 0x5dcf, 0xd8e6,
+0x5edc, 0xdbf5, 0xd1a7, 0x548e, 0xc503, 0x402a, 0x4a78, 0xcf51,
+0xec4b, 0x6962, 0x6330, 0xe619, 0x7794, 0xf2bd, 0xf8ef, 0x7dc6,
+0xbedb, 0x3bf2, 0x31a0, 0xb489, 0x2504, 0xa02d, 0xaa7f, 0x2f56,
+0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1,
+};
+
+static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c)
+{
+ return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c];
+}
+
+static u16 mlxsw_sp_acl_bf_crc(const u8 *buffer, size_t len)
+{
+ u16 crc = 0;
+
+ while (len--)
+ crc = mlxsw_sp_acl_bf_crc_byte(crc, *buffer++);
+ return crc;
+}
+
+static void
+mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ char *output, u8 *len)
+{
+ struct mlxsw_afk_key_info *key_info = aregion->region->key_info;
+ u8 chunk_index, chunk_count, block_count;
+ char *chunk = output;
+ __be16 erp_region_id;
+
+ block_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+ chunk_count = 1 + ((block_count - 1) >> 2);
+ erp_region_id = cpu_to_be16(aentry->ht_key.erp_id |
+ (aregion->region->id << 4));
+ for (chunk_index = MLXSW_BLOOM_KEY_CHUNKS - chunk_count;
+ chunk_index < MLXSW_BLOOM_KEY_CHUNKS; chunk_index++) {
+ memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES);
+ memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id,
+ sizeof(erp_region_id));
+ memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET,
+ &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
+ MLXSW_BLOOM_CHUNK_KEY_BYTES);
+ chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES;
+ }
+ *len = chunk_count * MLXSW_BLOOM_KEY_CHUNK_BYTES;
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf,
+ unsigned int erp_bank,
+ unsigned int bf_index)
+{
+ return erp_bank * bf->bank_size + bf_index;
+}
+
+static unsigned int
+mlxsw_sp_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ char bf_key[MLXSW_BLOOM_KEY_LEN];
+ u8 bf_size;
+
+ mlxsw_sp_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size);
+ return mlxsw_sp_acl_bf_crc(bf_key, bf_size);
+}
+
+int
+mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ unsigned int rule_index;
+ char *peabfe_pl;
+ u16 bf_index;
+ int err;
+
+ bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
+ bf_index);
+
+ if (refcount_inc_not_zero(&bf->refcnt[rule_index]))
+ return 0;
+
+ peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
+ if (!peabfe_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_peabfe_pack(peabfe_pl);
+ mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
+ kfree(peabfe_pl);
+ if (err)
+ return err;
+
+ refcount_set(&bf->refcnt[rule_index], 1);
+ return 0;
+}
+
+void
+mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ unsigned int rule_index;
+ char *peabfe_pl;
+ u16 bf_index;
+
+ bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry);
+ rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank,
+ bf_index);
+
+ if (refcount_dec_and_test(&bf->refcnt[rule_index])) {
+ peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL);
+ if (!peabfe_pl)
+ return;
+
+ mlxsw_reg_peabfe_pack(peabfe_pl);
+ mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl);
+ kfree(peabfe_pl);
+ }
+}
+
+struct mlxsw_sp_acl_bf *
+mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
+{
+ struct mlxsw_sp_acl_bf *bf;
+ unsigned int bf_bank_size;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_BF_LOG))
+ return ERR_PTR(-EIO);
+
+ /* Bloom filter size per erp_table_bank
+ * is 2^ACL_MAX_BF_LOG
+ */
+ bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
+ bf = kzalloc(sizeof(*bf) + bf_bank_size * num_erp_banks *
+ sizeof(*bf->refcnt), GFP_KERNEL);
+ if (!bf)
+ return ERR_PTR(-ENOMEM);
+
+ bf->bank_size = bf_bank_size;
+ return bf;
+}
+
+void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf)
+{
+ kfree(bf);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8b1d40..b0f2d8e8ded0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- unsigned int blocks_count;
char *act_set;
u32 priority;
char *mask;
@@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
- blocks_count - 1);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
err = cregion->ops->entry_insert(cregion, centry, mask);
if (err)
@@ -92,6 +89,27 @@ mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
cregion->ops->entry_remove(cregion, centry);
}
+static int
+mlxsw_sp_acl_ctcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_afa_block *afa_block,
+ unsigned int priority)
+{
+ char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+ char *act_set;
+
+ mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_UPDATE,
+ cregion->region->tcam_region_info,
+ centry->parman_item.index, priority);
+
+ act_set = mlxsw_afa_block_first_set(afa_block);
+ mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+
static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv,
unsigned long new_count)
{
@@ -194,3 +212,15 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
parman_item_remove(cregion->parman, &cchunk->parman_prio,
&centry->parman_item);
}
+
+int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return mlxsw_sp_acl_ctcam_region_entry_action_replace(mlxsw_sp, cregion,
+ centry,
+ rulei->act_block,
+ rulei->priority);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 0a4fd3c8662a..1c19feefa5f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -7,7 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/rhashtable.h>
+#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -24,11 +24,14 @@ struct mlxsw_sp_acl_erp_core {
unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1];
struct gen_pool *erp_tables;
struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_acl_bf *bf;
unsigned int num_erp_banks;
};
struct mlxsw_sp_acl_erp_key {
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+#define __MASK_LEN 0x38
+#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
bool ctcam;
};
@@ -36,10 +39,8 @@ struct mlxsw_sp_acl_erp {
struct mlxsw_sp_acl_erp_key key;
u8 id;
u8 index;
- refcount_t refcnt;
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
struct list_head list;
- struct rhash_head ht_node;
struct mlxsw_sp_acl_erp_table *erp_table;
};
@@ -53,7 +54,6 @@ struct mlxsw_sp_acl_erp_table {
DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
struct list_head atcam_erps_list;
- struct rhashtable erp_ht;
struct mlxsw_sp_acl_erp_core *erp_core;
struct mlxsw_sp_acl_atcam_region *aregion;
const struct mlxsw_sp_acl_erp_table_ops *ops;
@@ -61,12 +61,8 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_atcam_erps;
unsigned int num_max_atcam_erps;
unsigned int num_ctcam_erps;
-};
-
-static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
- .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
- .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
- .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+ unsigned int num_deltas;
+ struct objagg *objagg;
};
struct mlxsw_sp_acl_erp_table_ops {
@@ -119,14 +115,17 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
};
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
+static bool
+mlxsw_sp_acl_erp_table_is_used(const struct mlxsw_sp_acl_erp_table *erp_table)
{
- return erp->key.ctcam;
+ return erp_table->ops != &erp_single_mask_ops &&
+ erp_table->ops != &erp_no_mask_ops;
}
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
+static unsigned int
+mlxsw_sp_acl_erp_bank_get(const struct mlxsw_sp_acl_erp *erp)
{
- return erp->id;
+ return erp->index % erp->erp_table->erp_core->num_erp_banks;
}
static unsigned int
@@ -194,12 +193,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
static int
mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
@@ -210,7 +212,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
return err;
@@ -218,12 +220,15 @@ err_master_mask_update:
static int
mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
@@ -234,7 +239,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
return err;
@@ -256,26 +261,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
goto err_erp_id_get;
memcpy(&erp->key, key, sizeof(*key));
- bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
- MLXSW_SP_ACL_TCAM_MASK_LEN);
list_add(&erp->list, &erp_table->atcam_erps_list);
- refcount_set(&erp->refcnt, 1);
erp_table->num_atcam_erps++;
erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
goto err_master_mask_set;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_insert;
-
return erp;
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_atcam_erps--;
list_del(&erp->list);
@@ -290,9 +285,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
@@ -525,6 +518,48 @@ err_table_relocate:
}
static int
+mlxsw_acl_erp_table_bf_add(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+ int err;
+
+ list_for_each_entry(aentry, &aregion->entries_list, list) {
+ err = mlxsw_sp_acl_bf_entry_add(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+ if (err)
+ goto bf_entry_add_err;
+ }
+
+ return 0;
+
+bf_entry_add_err:
+ list_for_each_entry_continue_reverse(aentry, &aregion->entries_list,
+ list)
+ mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+ return err;
+}
+
+static void
+mlxsw_acl_erp_table_bf_del(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp *erp)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion;
+ unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ struct mlxsw_sp_acl_atcam_entry *aentry;
+
+ list_for_each_entry_reverse(aentry, &aregion->entries_list, list)
+ mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp,
+ erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+static int
mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
{
struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core;
@@ -548,16 +583,24 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
goto err_table_master_rp;
}
- /* Maintain the same eRP bank for the master RP, so that we
- * wouldn't need to update the bloom filter
+ /* Make sure the master RP is using a valid index, as
+ * only a single eRP row is currently allocated.
*/
- master_rp->index = master_rp->index % erp_core->num_erp_banks;
+ master_rp->index = 0;
__set_bit(master_rp->index, erp_table->erp_index_bitmap);
err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp);
if (err)
goto err_table_master_rp_add;
+ /* Update Bloom filter before enabling eRP table, as rules
+ * on the master RP were not set to Bloom filter up to this
+ * point.
+ */
+ err = mlxsw_acl_erp_table_bf_add(erp_table, master_rp);
+ if (err)
+ goto err_table_bf_add;
+
err = mlxsw_sp_acl_erp_table_enable(erp_table, false);
if (err)
goto err_table_enable;
@@ -565,6 +608,8 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table)
return 0;
err_table_enable:
+ mlxsw_acl_erp_table_bf_del(erp_table, master_rp);
+err_table_bf_add:
mlxsw_sp_acl_erp_table_erp_del(master_rp);
err_table_master_rp_add:
__clear_bit(master_rp->index, erp_table->erp_index_bitmap);
@@ -585,6 +630,7 @@ mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_tab
master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table);
if (!master_rp)
return;
+ mlxsw_acl_erp_table_bf_del(erp_table, master_rp);
mlxsw_sp_acl_erp_table_erp_del(master_rp);
__clear_bit(master_rp->index, erp_table->erp_index_bitmap);
mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps,
@@ -647,9 +693,55 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
mlxsw_sp_acl_erp_table_enable(erp_table, false);
}
+static int
+__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *inc_num)
+{
+ int err;
+
+ /* If there are C-TCAM eRP or deltas in use we need to transition
+ * the region to use eRP table, if it is not already done
+ */
+ if (!mlxsw_sp_acl_erp_table_is_used(erp_table)) {
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return err;
+ }
+
+ /* When C-TCAM or deltas are used, the eRP table must be used */
+ if (erp_table->ops != &erp_multiple_masks_ops)
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ (*inc_num)++;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_deltas);
+}
+
static void
-mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *dec_num)
{
+ (*dec_num)--;
+
+ /* If there are no C-TCAM eRP or deltas in use, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use.
+ */
+ if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
+ return;
+
switch (erp_table->num_atcam_erps) {
case 2:
/* Keep using the eRP table, but correctly set the
@@ -683,9 +775,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
}
}
+static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_deltas);
+}
+
static struct mlxsw_sp_acl_erp *
-__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
@@ -697,89 +801,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
- refcount_set(&erp->refcnt, 1);
- erp_table->num_ctcam_erps++;
- erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
if (err)
- goto err_master_mask_set;
+ goto err_erp_ctcam_inc;
+
+ erp->erp_table = erp_table;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
- goto err_rhashtable_insert;
+ goto err_master_mask_set;
err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
if (err)
goto err_erp_region_ctcam_enable;
- /* When C-TCAM is used, the eRP table must be used */
- erp_table->ops = &erp_multiple_masks_ops;
-
return erp;
err_erp_region_ctcam_enable:
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
err_master_mask_set:
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+err_erp_ctcam_inc:
kfree(erp);
return ERR_PTR(err);
}
-static struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
-{
- struct mlxsw_sp_acl_erp *erp;
- int err;
-
- /* There is a special situation where we need to spill rules
- * into the C-TCAM, yet the region is still using a master
- * mask and thus not performing a lookup in the C-TCAM. This
- * can happen when two rules that only differ in priority - and
- * thus sharing the same key - are programmed. In this case
- * we transition the region to use an eRP table
- */
- err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
- if (err)
- return ERR_PTR(err);
-
- erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
- if (IS_ERR(erp)) {
- err = PTR_ERR(erp);
- goto err_erp_create;
- }
-
- return erp;
-
-err_erp_create:
- mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
- return ERR_PTR(err);
-}
-
static void
mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
kfree(erp);
-
- /* Once the last C-TCAM eRP was destroyed, the state we
- * transition to depends on the number of A-TCAM eRPs currently
- * in use
- */
- if (erp_table->num_ctcam_erps > 0)
- return;
- mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
}
static struct mlxsw_sp_acl_erp *
@@ -790,7 +846,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
int err;
if (key->ctcam)
- return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Expand the eRP table for the new eRP, if needed */
err = mlxsw_sp_acl_erp_table_expand(erp_table);
@@ -838,7 +894,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
- if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
+ erp_table->num_deltas == 0)
erp_table->ops = &erp_two_masks_ops;
}
@@ -940,13 +997,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
WARN_ON(1);
}
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam)
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
{
- struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
- struct mlxsw_sp_acl_erp *erp;
+ struct objagg_obj *objagg_obj;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
@@ -955,29 +1011,276 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
- erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
- mlxsw_sp_acl_erp_ht_params);
- if (erp) {
- refcount_inc(&erp->refcnt);
- return erp;
- }
+ objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
+ if (IS_ERR(objagg_obj))
+ return ERR_CAST(objagg_obj);
+ return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
+}
+
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
- return erp_table->ops->erp_create(erp_table, &key);
+ ASSERT_RTNL();
+ objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
}
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp)
+int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
{
- struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+ unsigned int erp_bank;
ASSERT_RTNL();
+ if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
+ return 0;
+
+ erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ return mlxsw_sp_acl_bf_entry_add(mlxsw_sp,
+ erp->erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+ unsigned int erp_bank;
- if (!refcount_dec_and_test(&erp->refcnt))
+ ASSERT_RTNL();
+ if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table))
return;
- erp_table->ops->erp_destroy(erp_table, erp);
+ erp_bank = mlxsw_sp_acl_erp_bank_get(erp);
+ mlxsw_sp_acl_bf_entry_del(mlxsw_sp,
+ erp->erp_table->erp_core->bf,
+ aregion, erp_bank, aentry);
+}
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
+
+ return key->ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+
+ return erp->id;
+}
+
+struct mlxsw_sp_acl_erp_delta {
+ struct mlxsw_sp_acl_erp_key key;
+ u16 start;
+ u8 mask;
+};
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->start;
+}
+
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->mask;
+}
+
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ u16 tmp;
+
+ if (!mask)
+ return 0;
+
+ tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
+ if (start / 8 + 1 < __MASK_LEN)
+ tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
+ tmp >>= start % 8;
+ tmp &= mask;
+ return tmp;
+}
+
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ unsigned char *byte;
+ u16 tmp;
+
+ tmp = mask;
+ tmp <<= start % 8;
+ tmp = ~tmp;
+
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
+ *byte &= tmp & 0xff;
+ if (start / 8 + 1 < __MASK_LEN) {
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
+ *byte &= (tmp >> 8) & 0xff;
+ }
+}
+
+static const struct mlxsw_sp_acl_erp_delta
+mlxsw_sp_acl_erp_delta_default = {};
+
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (!delta)
+ delta = &mlxsw_sp_acl_erp_delta_default;
+ return delta;
+}
+
+static int
+mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
+ const struct mlxsw_sp_acl_erp_key *key,
+ u16 *delta_start, u8 *delta_mask)
+{
+ int offset = 0;
+ int si = -1;
+ u16 pmask;
+ u16 mask;
+ int i;
+
+ /* The difference between 2 masks can be up to 8 consecutive bits. */
+ for (i = 0; i < __MASK_LEN; i++) {
+ if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
+ continue;
+ if (si == -1)
+ si = i;
+ else if (si != i - 1)
+ return -EINVAL;
+ }
+ if (si == -1) {
+ /* The masks are the same, this cannot happen.
+ * That means the caller is broken.
+ */
+ WARN_ON(1);
+ *delta_start = 0;
+ *delta_mask = 0;
+ return 0;
+ }
+ pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+ mask = (unsigned char) key->mask[__MASK_IDX(si)];
+ if (si + 1 < __MASK_LEN) {
+ pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
+ mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
+ }
+
+ if ((pmask ^ mask) & pmask)
+ return -EINVAL;
+ mask &= ~pmask;
+ while (!(mask & (1 << offset)))
+ offset++;
+ while (!(mask & 1))
+ mask >>= 1;
+ if (mask & 0xff00)
+ return -EINVAL;
+
+ *delta_start = si * 8 + offset;
+ *delta_mask = mask;
+
+ return 0;
+}
+
+static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ void *obj)
+{
+ struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+ struct mlxsw_sp_acl_erp_delta *delta;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
+
+ if (parent_key->ctcam || key->ctcam)
+ return ERR_PTR(-EINVAL);
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ if (err)
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->start = delta_start;
+ delta->mask = delta_mask;
+
+ err = mlxsw_sp_acl_erp_delta_inc(erp_table);
+ if (err)
+ goto err_erp_delta_inc;
+
+ memcpy(&delta->key, key, sizeof(*key));
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
+ if (err)
+ goto err_master_mask_set;
+
+ return delta;
+
+err_master_mask_set:
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+err_erp_delta_inc:
+ kfree(delta);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
+{
+ struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+ kfree(delta);
+}
+
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+
+ return erp_table->ops->erp_create(erp_table, key);
+}
+
+static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ erp_table->ops->erp_destroy(erp_table, root_priv);
}
+static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ .obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_create = mlxsw_sp_acl_erp_delta_create,
+ .delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ .root_create = mlxsw_sp_acl_erp_root_create,
+ .root_destroy = mlxsw_sp_acl_erp_root_destroy,
+};
+
static struct mlxsw_sp_acl_erp_table *
mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
{
@@ -988,9 +1291,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
if (!erp_table)
return ERR_PTR(-ENOMEM);
- err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_init;
+ erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
+ aregion);
+ if (IS_ERR(erp_table->objagg)) {
+ err = PTR_ERR(erp_table->objagg);
+ goto err_objagg_create;
+ }
erp_table->erp_core = aregion->atcam->erp_core;
erp_table->ops = &erp_no_mask_ops;
@@ -999,7 +1305,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
return erp_table;
-err_rhashtable_init:
+err_objagg_create:
kfree(erp_table);
return ERR_PTR(err);
}
@@ -1008,7 +1314,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
- rhashtable_destroy(&erp_table->erp_ht);
+ objagg_destroy(erp_table->objagg);
kfree(erp_table);
}
@@ -1118,6 +1424,12 @@ static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_gen_pool_add;
+ erp_core->bf = mlxsw_sp_acl_bf_init(mlxsw_sp, erp_core->num_erp_banks);
+ if (IS_ERR(erp_core->bf)) {
+ err = PTR_ERR(erp_core->bf);
+ goto err_bf_init;
+ }
+
/* Different regions require masks of different sizes */
err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core);
if (err)
@@ -1126,6 +1438,8 @@ static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_erp_tables_sizes_query:
+ mlxsw_sp_acl_bf_fini(erp_core->bf);
+err_bf_init:
err_gen_pool_add:
gen_pool_destroy(erp_core->erp_tables);
return err;
@@ -1134,6 +1448,7 @@ err_gen_pool_add:
static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_erp_core *erp_core)
{
+ mlxsw_sp_acl_bf_fini(erp_core->bf);
gen_pool_destroy(erp_core->erp_tables);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index d409b09ba8df..2a998dea4f39 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
-static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
- char *output)
+static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
+ char *block)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
@@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
+static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_block = mlxsw_sp1_afk_encode_block,
+ .clear_block = mlxsw_sp1_afk_clear_block,
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
@@ -158,6 +167,11 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_7, 0x04, 24, 8),
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_8_10, 0x00, 0, 3),
+};
+
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
@@ -201,6 +215,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
+ MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
@@ -263,10 +278,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
};
-static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
- char *output)
+static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
+ u64 block_value)
{
- u64 block_value = mlxsw_sp2_afk_block_value_get(block);
const struct mlxsw_sp2_afk_block_layout *block_layout;
if (WARN_ON(block_index < 0 ||
@@ -278,8 +292,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
&block_layout->item, 0, block_value);
}
+static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
+ char *block)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+
+ __mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
+}
+
+static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
+{
+ __mlxsw_sp2_afk_block_value_set(output, block_index, 0);
+}
+
const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.blocks = mlxsw_sp2_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
.encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e171513bb32a..fe230acf92a9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -95,8 +95,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
return -EIO;
- max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
- if (rulei->priority > max_priority)
+ /* Priority range is 1..cap_kvd_size-1. */
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
+ if (rulei->priority >= max_priority)
return -EINVAL;
/* Unlike in TC, in HW, higher number means higher priority. */
@@ -779,6 +780,20 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
}
static int
+mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam_group *group,
+ struct mlxsw_sp_acl_tcam_entry *entry,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+ struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+ return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv,
+ entry->priv, rulei);
+}
+
+static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_entry *entry,
bool *activity)
@@ -848,6 +863,15 @@ struct mlxsw_sp_acl_tcam_flower_rule {
struct mlxsw_sp_acl_tcam_entry entry;
};
+struct mlxsw_sp_acl_tcam_mr_ruleset {
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
+ struct mlxsw_sp_acl_tcam_group group;
+};
+
+struct mlxsw_sp_acl_tcam_mr_rule {
+ struct mlxsw_sp_acl_tcam_entry entry;
+};
+
static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam,
@@ -930,6 +954,15 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
}
static int
+mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
void *rule_priv, bool *activity)
{
@@ -949,12 +982,146 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
.rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
.rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
.rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
+ .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
.rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
};
+static int
+mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam,
+ void *ruleset_priv,
+ struct mlxsw_afk_element_usage *tmplt_elusage)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ int err;
+
+ err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
+ mlxsw_sp_acl_tcam_patterns,
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
+ tmplt_elusage);
+ if (err)
+ return err;
+
+ /* For most of the TCAM clients it would make sense to take a tcam chunk
+ * only when the first rule is written. This is not the case for
+ * multicast router as it is required to bind the multicast router to a
+ * specific ACL Group ID which must exist in HW before multicast router
+ * is initialized.
+ */
+ ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group,
+ 1, tmplt_elusage);
+ if (IS_ERR(ruleset->chunk)) {
+ err = PTR_ERR(ruleset->chunk);
+ goto err_chunk_get;
+ }
+
+ return 0;
+
+err_chunk_get:
+ mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+ return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+
+ mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk);
+ mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+ /* Binding is done when initializing multicast router */
+ return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv,
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ bool ingress)
+{
+}
+
+static u16
+mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
+}
+
+static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
+{
+ return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) +
+ mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+ void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
+ &rule->entry, rulei);
+}
+
+static void
+mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
+{
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei)
+{
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group,
+ &rule->entry, rulei);
+}
+
+static int
+mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
+ void *rule_priv, bool *activity)
+{
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
+
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
+ activity);
+}
+
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
+ .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
+ .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
+ .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
+ .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
+ .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
+ .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
+ .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size,
+ .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
+ .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
+ .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
+ .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
+};
+
static const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops_arr[] = {
[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
+ [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
};
const struct mlxsw_sp_acl_profile_ops *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 219a4e26c332..0f1a9dee63de 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -48,6 +48,9 @@ struct mlxsw_sp_acl_profile_ops {
void *ruleset_priv, void *rule_priv,
struct mlxsw_sp_acl_rule_info *rulei);
void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+ int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp,
+ void *ruleset_priv, void *rule_priv,
+ struct mlxsw_sp_acl_rule_info *rulei);
int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv,
bool *activity);
};
@@ -121,6 +124,11 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_ctcam_region *cregion,
struct mlxsw_sp_acl_ctcam_chunk *cchunk,
struct mlxsw_sp_acl_ctcam_entry *centry);
+int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_ctcam_region *cregion,
+ struct mlxsw_sp_acl_ctcam_chunk *cchunk,
+ struct mlxsw_sp_acl_ctcam_entry *centry,
+ struct mlxsw_sp_acl_rule_info *rulei);
static inline unsigned int
mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry)
{
@@ -144,6 +152,7 @@ struct mlxsw_sp_acl_atcam {
struct mlxsw_sp_acl_atcam_region {
struct rhashtable entries_ht; /* A-TCAM only */
+ struct list_head entries_list; /* A-TCAM only */
struct mlxsw_sp_acl_ctcam_region cregion;
const struct mlxsw_sp_acl_atcam_region_ops *ops;
struct mlxsw_sp_acl_tcam_region *region;
@@ -154,7 +163,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+ * minus delta bits.
+ */
u8 erp_id;
};
@@ -164,10 +175,17 @@ struct mlxsw_sp_acl_atcam_chunk {
struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
+ struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ struct {
+ u16 start;
+ u8 mask;
+ u8 value;
+ } delta_info;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
};
static inline struct mlxsw_sp_acl_atcam_region *
@@ -204,20 +222,45 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_chunk *achunk,
struct mlxsw_sp_acl_atcam_entry *aentry);
+int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_atcam_chunk *achunk,
+ struct mlxsw_sp_acl_atcam_entry *aentry,
+ struct mlxsw_sp_acl_rule_info *rulei);
int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
-struct mlxsw_sp_acl_erp;
+struct mlxsw_sp_acl_erp_delta;
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam);
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp);
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+
+struct mlxsw_sp_acl_erp_mask;
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask);
+int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
@@ -225,4 +268,22 @@ int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
+struct mlxsw_sp_acl_bf;
+
+int
+mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+void
+mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_bf *bf,
+ struct mlxsw_sp_acl_atcam_region *aregion,
+ unsigned int erp_bank,
+ struct mlxsw_sp_acl_atcam_entry *aentry);
+struct mlxsw_sp_acl_bf *
+mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks);
+void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index a3db033d7399..055cc6943b34 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -15,6 +15,7 @@
struct mlxsw_sp_fid_family;
struct mlxsw_sp_fid_core {
+ struct rhashtable fid_ht;
struct rhashtable vni_ht;
struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX];
unsigned int *port_fid_mappings;
@@ -26,10 +27,13 @@ struct mlxsw_sp_fid {
unsigned int ref_count;
u16 fid_index;
struct mlxsw_sp_fid_family *fid_family;
+ struct rhash_head ht_node;
struct rhash_head vni_ht_node;
+ enum mlxsw_sp_nve_type nve_type;
__be32 vni;
u32 nve_flood_index;
+ int nve_ifindex;
u8 vni_valid:1,
nve_flood_index_valid:1;
};
@@ -44,6 +48,12 @@ struct mlxsw_sp_fid_8021d {
int br_ifindex;
};
+static const struct rhashtable_params mlxsw_sp_fid_ht_params = {
+ .key_len = sizeof_field(struct mlxsw_sp_fid, fid_index),
+ .key_offset = offsetof(struct mlxsw_sp_fid, fid_index),
+ .head_offset = offsetof(struct mlxsw_sp_fid, ht_node),
+};
+
static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = {
.key_len = sizeof_field(struct mlxsw_sp_fid, vni),
.key_offset = offsetof(struct mlxsw_sp_fid, vni),
@@ -75,6 +85,8 @@ struct mlxsw_sp_fid_ops {
int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid,
u32 nve_flood_index);
void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid);
+ void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev);
};
struct mlxsw_sp_fid_family {
@@ -89,6 +101,7 @@ struct mlxsw_sp_fid_family {
enum mlxsw_sp_rif_type rif_type;
const struct mlxsw_sp_fid_ops *ops;
struct mlxsw_sp *mlxsw_sp;
+ u8 lag_vid_valid:1;
};
static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = {
@@ -113,6 +126,45 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = {
[MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types,
};
+bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid)
+{
+ return fid->fid_family->lag_vid_valid;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp,
+ u16 fid_index)
+{
+ struct mlxsw_sp_fid *fid;
+
+ fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index,
+ mlxsw_sp_fid_ht_params);
+ if (fid)
+ fid->ref_count++;
+
+ return fid;
+}
+
+int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *nve_ifindex = fid->nve_ifindex;
+
+ return 0;
+}
+
+int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid,
+ enum mlxsw_sp_nve_type *p_type)
+{
+ if (!fid->vni_valid)
+ return -EINVAL;
+
+ *p_type = fid->nve_type;
+
+ return 0;
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp,
__be32 vni)
{
@@ -173,7 +225,8 @@ bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid)
return fid->nve_flood_index_valid;
}
-int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
+int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type,
+ __be32 vni, int nve_ifindex)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
@@ -183,6 +236,8 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni)
if (WARN_ON(!ops->vni_set || fid->vni_valid))
return -EINVAL;
+ fid->nve_type = type;
+ fid->nve_ifindex = nve_ifindex;
fid->vni = vni;
err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht,
&fid->vni_ht_node,
@@ -224,6 +279,16 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid)
return fid->vni_valid;
}
+void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ const struct mlxsw_sp_fid_ops *ops = fid_family->ops;
+
+ if (ops->fdb_clear_offload)
+ ops->fdb_clear_offload(fid, nve_dev);
+}
+
static const struct mlxsw_sp_flood_table *
mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid,
enum mlxsw_sp_flood_type packet_type)
@@ -284,11 +349,6 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid,
fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid);
}
-enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid)
-{
- return fid->fid_family->rif_type;
-}
-
u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid)
{
return fid->fid_index;
@@ -304,6 +364,11 @@ void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif)
fid->rif = rif;
}
+struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid)
+{
+ return fid->rif;
+}
+
enum mlxsw_sp_rif_type
mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_fid_type type)
@@ -568,7 +633,7 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg)
static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid)
{
- return fid->fid_index - fid->fid_family->start_index;
+ return fid->fid_index - VLAN_N_VID;
}
static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -713,6 +778,13 @@ static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid)
fid->vni_valid, 0, false);
}
+static void
+mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ br_fdb_clear_offload(nve_dev, 0);
+}
+
static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.setup = mlxsw_sp_fid_8021d_setup,
.configure = mlxsw_sp_fid_8021d_configure,
@@ -726,6 +798,7 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
.vni_clear = mlxsw_sp_fid_8021d_vni_clear,
.nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
.nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload,
};
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
@@ -759,6 +832,48 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = {
.nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
+ .lag_vid_valid = 1,
+};
+
+static void
+mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev)
+{
+ br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid));
+}
+
+static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = {
+ .setup = mlxsw_sp_fid_8021q_setup,
+ .configure = mlxsw_sp_fid_8021d_configure,
+ .deconfigure = mlxsw_sp_fid_8021d_deconfigure,
+ .index_alloc = mlxsw_sp_fid_8021d_index_alloc,
+ .compare = mlxsw_sp_fid_8021q_compare,
+ .flood_index = mlxsw_sp_fid_8021d_flood_index,
+ .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map,
+ .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap,
+ .vni_set = mlxsw_sp_fid_8021d_vni_set,
+ .vni_clear = mlxsw_sp_fid_8021d_vni_clear,
+ .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set,
+ .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear,
+ .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload,
+};
+
+/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */
+#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX)
+#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \
+ VLAN_VID_MASK - 2)
+
+/* Range and flood configuration must match mlxsw_config_profile */
+static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = {
+ .type = MLXSW_SP_FID_TYPE_8021Q,
+ .fid_size = sizeof(struct mlxsw_sp_fid_8021q),
+ .start_index = MLXSW_SP_FID_8021Q_EMU_START,
+ .end_index = MLXSW_SP_FID_8021Q_EMU_END,
+ .flood_tables = mlxsw_sp_fid_8021d_flood_tables,
+ .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables),
+ .rif_type = MLXSW_SP_RIF_TYPE_VLAN,
+ .ops = &mlxsw_sp_fid_8021q_emu_ops,
+ .lag_vid_valid = 1,
};
static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid)
@@ -888,7 +1003,7 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = {
};
static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = {
- [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family,
+ [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family,
[MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family,
[MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family,
[MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family,
@@ -944,10 +1059,17 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_configure;
+ err = rhashtable_insert_fast(&mlxsw_sp->fid_core->fid_ht, &fid->ht_node,
+ mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_insert;
+
list_add(&fid->list, &fid_family->fids_list);
fid->ref_count++;
return fid;
+err_rhashtable_insert:
+ fid->fid_family->ops->deconfigure(fid);
err_configure:
__clear_bit(fid_index - fid_family->start_index,
fid_family->fids_bitmap);
@@ -959,19 +1081,18 @@ err_index_alloc:
void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid)
{
struct mlxsw_sp_fid_family *fid_family = fid->fid_family;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- if (--fid->ref_count == 1 && fid->rif) {
- /* Destroy the associated RIF and let it drop the last
- * reference on the FID.
- */
- return mlxsw_sp_rif_destroy(fid->rif);
- } else if (fid->ref_count == 0) {
- list_del(&fid->list);
- fid->fid_family->ops->deconfigure(fid);
- __clear_bit(fid->fid_index - fid_family->start_index,
- fid_family->fids_bitmap);
- kfree(fid);
- }
+ if (--fid->ref_count != 0)
+ return;
+
+ list_del(&fid->list);
+ rhashtable_remove_fast(&mlxsw_sp->fid_core->fid_ht,
+ &fid->ht_node, mlxsw_sp_fid_ht_params);
+ fid->fid_family->ops->deconfigure(fid);
+ __clear_bit(fid->fid_index - fid_family->start_index,
+ fid_family->fids_bitmap);
+ kfree(fid);
}
struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid)
@@ -985,6 +1106,12 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex);
}
+struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp,
+ u16 vid)
+{
+ return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid);
+}
+
struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp,
int br_ifindex)
{
@@ -1126,9 +1253,13 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp)
return -ENOMEM;
mlxsw_sp->fid_core = fid_core;
+ err = rhashtable_init(&fid_core->fid_ht, &mlxsw_sp_fid_ht_params);
+ if (err)
+ goto err_rhashtable_fid_init;
+
err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params);
if (err)
- goto err_rhashtable_init;
+ goto err_rhashtable_vni_init;
fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int),
GFP_KERNEL);
@@ -1157,7 +1288,9 @@ err_fid_ops_register:
kfree(fid_core->port_fid_mappings);
err_alloc_port_fid_mappings:
rhashtable_destroy(&fid_core->vni_ht);
-err_rhashtable_init:
+err_rhashtable_vni_init:
+ rhashtable_destroy(&fid_core->fid_ht);
+err_rhashtable_fid_init:
kfree(fid_core);
return err;
}
@@ -1172,5 +1305,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
fid_core->fid_family_arr[i]);
kfree(fid_core->port_fid_mappings);
rhashtable_destroy(&fid_core->vni_ht);
+ rhashtable_destroy(&fid_core->fid_ht);
kfree(fid_core);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 8d211972c5e9..ff072358d950 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -406,7 +406,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
- rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
f->common.extack);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index ad06d9969bc1..0a31fff2516e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -174,6 +174,20 @@ mlxsw_sp_nve_mc_record_ops_arr[] = {
[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
};
+int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ addr->addr4 = cpu_to_be32(uip);
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+}
+
static struct mlxsw_sp_nve_mc_list *
mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nve_mc_list_key *key)
@@ -560,7 +574,7 @@ static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
&mc_entry);
- if (WARN_ON(!mc_record))
+ if (!mc_record)
return;
mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
@@ -647,7 +661,7 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
key.fid_index = mlxsw_sp_fid_index(fid);
mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
- if (WARN_ON(!mc_list))
+ if (!mc_list)
return;
mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
@@ -775,6 +789,21 @@ static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
+static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ const struct net_device *nve_dev,
+ __be32 vni)
+{
+ const struct mlxsw_sp_nve_ops *ops;
+ enum mlxsw_sp_nve_type type;
+
+ if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type)))
+ return;
+
+ ops = mlxsw_sp->nve->nve_ops_arr[type];
+ ops->fdb_clear_offload(nve_dev, vni);
+}
+
int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
struct mlxsw_sp_nve_params *params,
struct netlink_ext_ack *extack)
@@ -803,7 +832,8 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
return err;
}
- err = mlxsw_sp_fid_vni_set(fid, params->vni);
+ err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni,
+ params->dev->ifindex);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
goto err_fid_vni_set;
@@ -811,8 +841,16 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
nve->config = config;
+ err = ops->fdb_replay(params->dev, params->vni);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB");
+ goto err_fdb_replay;
+ }
+
return 0;
+err_fdb_replay:
+ mlxsw_sp_fid_vni_clear(fid);
err_fid_vni_set:
mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
return err;
@@ -822,9 +860,27 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fid *fid)
{
u16 fid_index = mlxsw_sp_fid_index(fid);
+ struct net_device *nve_dev;
+ int nve_ifindex;
+ __be32 vni;
mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
+
+ if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
+ mlxsw_sp_fid_vni(fid, &vni)))
+ goto out;
+
+ nve_dev = dev_get_by_index(&init_net, nve_ifindex);
+ if (!nve_dev)
+ goto out;
+
+ mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
+ mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
+
+ dev_put(nve_dev);
+
+out:
mlxsw_sp_fid_vni_clear(fid);
mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
}
@@ -977,6 +1033,6 @@ void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
- mlxsw_sp->nve = NULL;
kfree(mlxsw_sp->nve);
+ mlxsw_sp->nve = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 4cc3297e13d6..02937ea95bc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -41,6 +41,8 @@ struct mlxsw_sp_nve_ops {
int (*init)(struct mlxsw_sp_nve *nve,
const struct mlxsw_sp_nve_config *config);
void (*fini)(struct mlxsw_sp_nve *nve);
+ int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni);
+ void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni);
};
extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d21c7be5b1c9..74e564c4ac19 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -17,7 +17,8 @@
#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128
#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96
-#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX
+#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
+ VXLAN_F_LEARN)
static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
const struct net_device *dev,
@@ -61,11 +62,6 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
return false;
}
- if (cfg->flags & VXLAN_F_LEARN) {
- NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported");
- return false;
- }
-
if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) {
NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported");
return false;
@@ -215,12 +211,30 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
config->udp_dport);
}
+static int
+mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni)
+{
+ if (WARN_ON(!netif_is_vxlan(nve_dev)))
+ return -EINVAL;
+ return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier);
+}
+
+static void
+mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni)
+{
+ if (WARN_ON(!netif_is_vxlan(nve_dev)))
+ return;
+ vxlan_fdb_clear_offload(nve_dev, vni);
+}
+
const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
.type = MLXSW_SP_NVE_TYPE_VXLAN,
.can_offload = mlxsw_sp1_nve_vxlan_can_offload,
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp1_nve_vxlan_init,
.fini = mlxsw_sp1_nve_vxlan_fini,
+ .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay,
+ .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve,
@@ -246,4 +260,6 @@ const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
.nve_config = mlxsw_sp_nve_vxlan_config,
.init = mlxsw_sp2_nve_vxlan_init,
.fini = mlxsw_sp2_nve_vxlan_fini,
+ .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay,
+ .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 9e9bb57134f2..98e5ffd71b91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -15,6 +15,7 @@
#include <linux/gcd.h>
#include <linux/random.h>
#include <linux/if_macvlan.h>
+#include <linux/refcount.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -70,6 +71,8 @@ struct mlxsw_sp_router {
bool aborted;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
+ struct notifier_block inetaddr_nb;
+ struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
};
@@ -104,6 +107,7 @@ struct mlxsw_sp_rif_params {
struct mlxsw_sp_rif_subport {
struct mlxsw_sp_rif common;
+ refcount_t ref_count;
union {
u16 system_port;
u16 lag_id;
@@ -136,6 +140,7 @@ struct mlxsw_sp_rif_ops {
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_lpm_tree *lpm_tree);
@@ -1275,15 +1280,12 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
{
u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
- struct net_device *ipip_ul_dev;
if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
return false;
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
- ul_tb_id, ipip_entry) &&
- (!ipip_ul_dev || ipip_ul_dev == ul_dev);
+ ul_tb_id, ipip_entry);
}
/* Given decap parameters, find the corresponding IPIP entry. */
@@ -6300,6 +6302,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
err = -ENOMEM;
goto err_rif_alloc;
}
+ dev_hold(rif->dev);
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
@@ -6338,6 +6341,7 @@ err_configure:
if (fid)
mlxsw_sp_fid_put(fid);
err_fid_get:
+ dev_put(rif->dev);
kfree(rif);
err_rif_alloc:
err_rif_index_alloc:
@@ -6346,7 +6350,7 @@ err_rif_index_alloc:
return ERR_PTR(err);
}
-void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
+static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
{
const struct mlxsw_sp_rif_ops *ops = rif->ops;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
@@ -6365,6 +6369,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
if (fid)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
+ dev_put(rif->dev);
kfree(rif);
vr->rif_count--;
mlxsw_sp_vr_put(mlxsw_sp, vr);
@@ -6395,6 +6400,40 @@ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
params->system_port = mlxsw_sp_port->local_port;
}
+static struct mlxsw_sp_rif_subport *
+mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
+{
+ return container_of(rif, struct mlxsw_sp_rif_subport, common);
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_rif_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+ struct mlxsw_sp_rif *rif;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
+ if (!rif)
+ return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ refcount_inc(&rif_subport->ref_count);
+ return rif;
+}
+
+static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp_rif_subport *rif_subport;
+
+ rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ if (!refcount_dec_and_test(&rif_subport->ref_count))
+ return;
+
+ mlxsw_sp_rif_destroy(rif);
+}
+
static int
mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct net_device *l3_dev,
@@ -6402,22 +6441,18 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_rif_params params = {
+ .dev = l3_dev,
+ };
u16 vid = mlxsw_sp_port_vlan->vid;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_fid *fid;
int err;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
- if (!rif) {
- struct mlxsw_sp_rif_params params = {
- .dev = l3_dev,
- };
-
- mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
- rif = mlxsw_sp_rif_create(mlxsw_sp, &params, extack);
- if (IS_ERR(rif))
- return PTR_ERR(rif);
- }
+ mlxsw_sp_rif_subport_params_init(&params, mlxsw_sp_port_vlan);
+ rif = mlxsw_sp_rif_subport_get(mlxsw_sp, &params, extack);
+ if (IS_ERR(rif))
+ return PTR_ERR(rif);
/* FID was already created, just take a reference */
fid = rif->ops->fid_get(rif, extack);
@@ -6444,6 +6479,7 @@ err_port_vid_learning_set:
mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
err_fid_port_vid_map:
mlxsw_sp_fid_put(fid);
+ mlxsw_sp_rif_subport_put(rif);
return err;
}
@@ -6452,6 +6488,7 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
+ struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
u16 vid = mlxsw_sp_port_vlan->vid;
if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
@@ -6461,10 +6498,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
- /* If router port holds the last reference on the rFID, then the
- * associated Sub-port RIF will be destroyed.
- */
mlxsw_sp_fid_put(fid);
+ mlxsw_sp_rif_subport_put(rif);
}
static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
@@ -6500,8 +6535,8 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
netif_is_ovs_port(port_dev))
return 0;
- return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1,
- extack);
+ return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
+ MLXSW_SP_DEFAULT_VID, extack);
}
static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
@@ -6534,15 +6569,15 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
if (netif_is_bridge_port(lag_dev))
return 0;
- return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1,
- extack);
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
+ MLXSW_SP_DEFAULT_VID, extack);
}
-static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
struct mlxsw_sp_rif_params params = {
.dev = l3_dev,
};
@@ -6563,7 +6598,8 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
return 0;
}
-static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
@@ -6580,7 +6616,8 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
vid, extack);
else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
- return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack);
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
+ extack);
return 0;
}
@@ -6681,16 +6718,11 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fid_index(rif->fid), false);
}
-static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
+static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *macvlan_dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp *mlxsw_sp;
-
- mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
- if (!mlxsw_sp)
- return 0;
-
switch (event) {
case NETDEV_UP:
return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
@@ -6702,7 +6734,35 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev,
return 0;
}
-static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
+static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
+ const unsigned char *dev_addr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif *rif;
+ int i;
+
+ /* A RIF is not created for macvlan netdevs. Their MAC is used to
+ * populate the FDB
+ */
+ if (netif_is_macvlan(dev))
+ return 0;
+
+ for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
+ rif = mlxsw_sp->router->rifs[i];
+ if (rif && rif->dev != dev &&
+ !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
+ mlxsw_sp->mac_mask)) {
+ NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev,
unsigned long event,
struct netlink_ext_ack *extack)
{
@@ -6711,21 +6771,24 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
else if (netif_is_lag_master(dev))
return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
else if (netif_is_bridge_master(dev))
- return mlxsw_sp_inetaddr_bridge_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
+ extack);
else if (is_vlan_dev(dev))
- return mlxsw_sp_inetaddr_vlan_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
+ extack);
else if (netif_is_macvlan(dev))
- return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack);
+ return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
+ extack);
else
return 0;
}
-int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
struct net_device *dev = ifa->ifa_dev->dev;
- struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_router *router;
struct mlxsw_sp_rif *rif;
int err = 0;
@@ -6733,15 +6796,12 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
if (event == NETDEV_UP)
goto out;
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
-
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
+ rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event, NULL);
+ err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
out:
return notifier_from_errno(err);
}
@@ -6763,13 +6823,19 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack);
+ err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
+ ivi->extack);
+ if (err)
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
out:
return notifier_from_errno(err);
}
struct mlxsw_sp_inet6addr_event_work {
struct work_struct work;
+ struct mlxsw_sp *mlxsw_sp;
struct net_device *dev;
unsigned long event;
};
@@ -6778,21 +6844,18 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
{
struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
+ struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
struct net_device *dev = inet6addr_work->dev;
unsigned long event = inet6addr_work->event;
- struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
rtnl_lock();
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- goto out;
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- __mlxsw_sp_inetaddr_event(dev, event, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
out:
rtnl_unlock();
dev_put(dev);
@@ -6800,25 +6863,25 @@ out:
}
/* Called with rcu_read_lock() */
-int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
struct net_device *dev = if6->idev->dev;
+ struct mlxsw_sp_router *router;
/* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
if (event == NETDEV_UP)
return NOTIFY_DONE;
- if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
- return NOTIFY_DONE;
-
inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
if (!inet6addr_work)
return NOTIFY_BAD;
+ router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
+ inet6addr_work->mlxsw_sp = router->mlxsw_sp;
inet6addr_work->dev = dev;
inet6addr_work->event = event;
dev_hold(dev);
@@ -6844,7 +6907,12 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack);
+ err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
+ i6vi->extack);
+ if (err)
+ goto out;
+
+ err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
out:
return notifier_from_errno(err);
}
@@ -6866,20 +6934,14 @@ static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+static int
+mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif)
{
- struct mlxsw_sp *mlxsw_sp;
- struct mlxsw_sp_rif *rif;
+ struct net_device *dev = rif->dev;
u16 fid_index;
int err;
- mlxsw_sp = mlxsw_sp_lower_get(dev);
- if (!mlxsw_sp)
- return 0;
-
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
- return 0;
fid_index = mlxsw_sp_fid_index(rif->fid);
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
@@ -6923,6 +6985,41 @@ err_rif_edit:
return err;
}
+static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
+ struct netdev_notifier_pre_changeaddr_info *info)
+{
+ struct netlink_ext_ack *extack;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
+ info->dev_addr, extack);
+}
+
+int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *rif;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!rif)
+ return 0;
+
+ switch (event) {
+ case NETDEV_CHANGEMTU: /* fall through */
+ case NETDEV_CHANGEADDR:
+ return mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ case NETDEV_PRE_CHANGEADDR:
+ return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
+ }
+
+ return 0;
+}
+
static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
struct net_device *l3_dev,
struct netlink_ext_ack *extack)
@@ -6934,9 +7031,10 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
*/
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (rif)
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
+ extack);
- return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack);
+ return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
}
static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
@@ -6947,7 +7045,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
if (!rif)
return;
- __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL);
+ __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
}
int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
@@ -7001,18 +7099,13 @@ static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
__mlxsw_sp_rif_macvlan_flush, rif);
}
-static struct mlxsw_sp_rif_subport *
-mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
-{
- return container_of(rif, struct mlxsw_sp_rif_subport, common);
-}
-
static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
const struct mlxsw_sp_rif_params *params)
{
struct mlxsw_sp_rif_subport *rif_subport;
rif_subport = mlxsw_sp_rif_subport_rif(rif);
+ refcount_set(&rif_subport->ref_count, 1);
rif_subport->vid = params->vid;
rif_subport->lag = params->lag;
if (params->lag)
@@ -7167,11 +7260,15 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
+ struct net_device *br_dev = rif->dev;
u16 vid;
int err;
if (is_vlan_dev(rif->dev)) {
vid = vlan_dev_vlan_id(rif->dev);
+ br_dev = vlan_dev_real_dev(rif->dev);
+ if (WARN_ON(!netif_is_bridge_master(br_dev)))
+ return ERR_PTR(-EINVAL);
} else {
err = br_vlan_get_pvid(rif->dev, &vid);
if (err < 0 || !vid) {
@@ -7180,7 +7277,7 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
}
}
- return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
+ return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack);
}
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7270,7 +7367,7 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+ return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack);
}
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -7296,6 +7393,15 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
.fdb_del = mlxsw_sp_rif_fid_fdb_del,
};
+static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
+ .type = MLXSW_SP_RIF_TYPE_VLAN,
+ .rif_size = sizeof(struct mlxsw_sp_rif),
+ .configure = mlxsw_sp_rif_fid_configure,
+ .deconfigure = mlxsw_sp_rif_fid_deconfigure,
+ .fid_get = mlxsw_sp_rif_vlan_fid_get,
+ .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
+};
+
static struct mlxsw_sp_rif_ipip_lb *
mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
{
@@ -7364,7 +7470,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = {
static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
[MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
- [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
+ [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
[MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
[MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops,
};
@@ -7555,6 +7661,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -7640,6 +7756,10 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
kfree(mlxsw_sp->router);
return err;
}
@@ -7657,5 +7777,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index d965fd275c90..ad5a9b9e1466 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -383,7 +383,7 @@ mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
}
static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
- .can_handle = is_gretap_dev,
+ .can_handle = netif_is_gretap,
.parms = mlxsw_sp_span_entry_gretap4_parms,
.configure = mlxsw_sp_span_entry_gretap4_configure,
.deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
@@ -484,7 +484,7 @@ mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
static const
struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
- .can_handle = is_ip6gretap_dev,
+ .can_handle = netif_is_ip6gretap,
.parms = mlxsw_sp_span_entry_gretap6_parms,
.configure = mlxsw_sp_span_entry_gretap6_configure,
.deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 739a51f0a366..1bd2c6e15f8d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -85,13 +85,11 @@ struct mlxsw_sp_bridge_ops {
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack);
- void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev);
struct mlxsw_sp_fid *
(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
- u16 vid);
+ u16 vid, struct netlink_ext_ack *extack);
struct mlxsw_sp_fid *
(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid);
@@ -292,24 +290,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
kfree(bridge_port);
}
-static bool
-mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
- bridge_port)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
-
- /* In case ports were pulled from out of a bridged LAG, then
- * it's possible the reference count isn't zero, yet the bridge
- * port should be destroyed, as it's no longer an upper of ours.
- */
- if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
- return true;
- else if (bridge_port->ref_count == 0)
- return true;
- else
- return false;
-}
-
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
@@ -347,8 +327,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- bridge_port->ref_count--;
- if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+ if (--bridge_port->ref_count != 0)
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
@@ -929,7 +908,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
static int
mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_device *bridge_device;
@@ -939,7 +919,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
int err;
bridge_device = bridge_port->bridge_device;
- fid = bridge_device->ops->fid_get(bridge_device, vid);
+ fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
if (IS_ERR(fid))
return PTR_ERR(fid);
@@ -1007,7 +987,8 @@ mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct mlxsw_sp_bridge_port *bridge_port)
+ struct mlxsw_sp_bridge_port *bridge_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
struct mlxsw_sp_bridge_vlan *bridge_vlan;
@@ -1015,12 +996,11 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
int err;
/* No need to continue if only VLAN flags were changed */
- if (mlxsw_sp_port_vlan->bridge_port) {
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ if (mlxsw_sp_port_vlan->bridge_port)
return 0;
- }
- err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port);
+ err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
if (err)
return err;
@@ -1097,16 +1077,33 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
static int
mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port,
- u16 vid, bool is_untagged, bool is_pvid)
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack,
+ struct switchdev_trans *trans)
{
u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 old_pvid = mlxsw_sp_port->pvid;
int err;
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid);
- if (IS_ERR(mlxsw_sp_port_vlan))
- return PTR_ERR(mlxsw_sp_port_vlan);
+ /* The only valid scenario in which a port-vlan already exists, is if
+ * the VLAN flags were changed and the port-vlan is associated with the
+ * correct bridge port
+ */
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
+ if (mlxsw_sp_port_vlan &&
+ mlxsw_sp_port_vlan->bridge_port != bridge_port)
+ return -EEXIST;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (!mlxsw_sp_port_vlan) {
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
+ vid);
+ if (IS_ERR(mlxsw_sp_port_vlan))
+ return PTR_ERR(mlxsw_sp_port_vlan);
+ }
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
is_untagged);
@@ -1117,7 +1114,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (err)
goto err_port_pvid_set;
- err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+ err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
if (err)
goto err_port_vlan_bridge_join;
@@ -1128,7 +1126,7 @@ err_port_vlan_bridge_join:
err_port_pvid_set:
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
err_port_vlan_set:
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
return err;
}
@@ -1167,7 +1165,8 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
@@ -1189,9 +1188,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1204,7 +1200,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
vid, flag_untagged,
- flag_pvid);
+ flag_pvid, extack, trans);
if (err)
return err;
}
@@ -1773,7 +1769,8 @@ static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
const struct switchdev_obj_port_vlan *vlan;
@@ -1782,7 +1779,8 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans);
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
+ extack);
if (switchdev_trans_ph_prepare(trans)) {
/* The event is emitted before the changes are actually
@@ -1820,7 +1818,7 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
}
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -1968,8 +1966,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
.switchdev_port_attr_get = mlxsw_sp_port_attr_get,
.switchdev_port_attr_set = mlxsw_sp_port_attr_set,
- .switchdev_port_obj_add = mlxsw_sp_port_obj_add,
- .switchdev_port_obj_del = mlxsw_sp_port_obj_del,
};
static int
@@ -1978,19 +1974,14 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port *mlxsw_sp_port,
struct netlink_ext_ack *extack)
{
- struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
-
if (is_vlan_dev(bridge_port->dev)) {
NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
}
- mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
- if (WARN_ON(!mlxsw_sp_port_vlan))
- return -EINVAL;
-
- /* Let VLAN-aware bridge take care of its own VLANs */
- mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
+ /* Port is no longer usable as a router interface */
+ if (mlxsw_sp_port->default_vlan->fid)
+ mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
return 0;
}
@@ -2000,41 +1991,133 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
/* Make sure untagged frames are allowed to ingress */
- mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
}
static int
mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
- WARN_ON(1);
- return -EINVAL;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_nve_params params = {
+ .type = MLXSW_SP_NVE_TYPE_VXLAN,
+ .vni = vxlan->cfg.vni,
+ .dev = vxlan_dev,
+ };
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ /* If the VLAN is 0, we need to find the VLAN that is configured as
+ * PVID and egress untagged on the bridge port of the VxLAN device.
+ * It is possible no such VLAN exists
+ */
+ if (!vid) {
+ err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
+ if (err || !vid)
+ return err;
+ }
+
+ /* If no other port is member in the VLAN, then the FID does not exist.
+ * NVE will be enabled on the FID once a port joins the VLAN
+ */
+ fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
+ if (!fid)
+ return 0;
+
+ if (mlxsw_sp_fid_vni_is_set(fid)) {
+ err = -EINVAL;
+ goto err_vni_exists;
+ }
+
+ err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
+ if (err)
+ goto err_nve_fid_enable;
+
+ /* The tunnel port does not hold a reference on the FID. Only
+ * local ports and the router port
+ */
+ mlxsw_sp_fid_put(fid);
+
+ return 0;
+
+err_nve_fid_enable:
+err_vni_exists:
+ mlxsw_sp_fid_put(fid);
+ return err;
}
-static void
-mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev)
+static struct net_device *
+mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
{
+ struct net_device *dev;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(br_dev, dev, iter) {
+ u16 pvid;
+ int err;
+
+ if (!netif_is_vxlan(dev))
+ continue;
+
+ err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
+ if (err || pvid != vid)
+ continue;
+
+ return dev;
+ }
+
+ return NULL;
}
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
- u16 vid)
+ u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+ struct net_device *vxlan_dev;
+ struct mlxsw_sp_fid *fid;
+ int err;
+
+ fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+ if (IS_ERR(fid))
+ return fid;
+
+ if (mlxsw_sp_fid_vni_is_set(fid))
+ return fid;
+
+ /* Find the VxLAN device that has the specified VLAN configured as
+ * PVID and egress untagged. There can be at most one such device
+ */
+ vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
+ vid);
+ if (!vxlan_dev)
+ return fid;
+
+ if (!netif_running(vxlan_dev))
+ return fid;
+
+ err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ goto err_vxlan_join;
+
+ return fid;
- return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
+err_vxlan_join:
+ mlxsw_sp_fid_put(fid);
+ return ERR_PTR(err);
}
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid)
{
- WARN_ON(1);
- return NULL;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
+
+ return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
}
static u16
@@ -2048,7 +2131,6 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
.port_join = mlxsw_sp_bridge_8021q_port_join,
.port_leave = mlxsw_sp_bridge_8021q_port_leave,
.vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
- .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021q_fid_get,
.fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
.fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
@@ -2081,7 +2163,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *dev = bridge_port->dev;
u16 vid;
- vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
@@ -2095,7 +2177,8 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
if (mlxsw_sp_port_vlan->fid)
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
- return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port);
+ return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
+ extack);
}
static void
@@ -2107,9 +2190,9 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct net_device *dev = bridge_port->dev;
u16 vid;
- vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
- if (WARN_ON(!mlxsw_sp_port_vlan))
+ if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
@@ -2117,7 +2200,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
static int
mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
@@ -2134,8 +2217,10 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
if (!fid)
return -EINVAL;
- if (mlxsw_sp_fid_vni_is_set(fid))
- return -EINVAL;
+ if (mlxsw_sp_fid_vni_is_set(fid)) {
+ err = -EINVAL;
+ goto err_vni_exists;
+ }
err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
if (err)
@@ -2149,33 +2234,14 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
return 0;
err_nve_fid_enable:
+err_vni_exists:
mlxsw_sp_fid_put(fid);
return err;
}
-static void
-mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device,
- const struct net_device *vxlan_dev)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
- struct mlxsw_sp_fid *fid;
-
- fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
- if (WARN_ON(!fid))
- return;
-
- /* If the VxLAN device is down, then the FID does not have a VNI */
- if (!mlxsw_sp_fid_vni_is_set(fid))
- goto out;
-
- mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
-out:
- mlxsw_sp_fid_put(fid);
-}
-
static struct mlxsw_sp_fid *
mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
- u16 vid)
+ u16 vid, struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
struct net_device *vxlan_dev;
@@ -2196,7 +2262,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
if (!netif_running(vxlan_dev))
return fid;
- err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL);
+ err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
+ extack);
if (err)
goto err_vxlan_join;
@@ -2231,7 +2298,6 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
.port_join = mlxsw_sp_bridge_8021d_port_join,
.port_leave = mlxsw_sp_bridge_8021d_port_leave,
.vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
- .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave,
.fid_get = mlxsw_sp_bridge_8021d_fid_get,
.fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
.fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
@@ -2286,7 +2352,7 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
const struct net_device *br_dev,
- const struct net_device *vxlan_dev,
+ const struct net_device *vxlan_dev, u16 vid,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_bridge_device *bridge_device;
@@ -2295,20 +2361,102 @@ int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(!bridge_device))
return -EINVAL;
- return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack);
+ return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
}
void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev,
const struct net_device *vxlan_dev)
{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ struct mlxsw_sp_fid *fid;
+
+ /* If the VxLAN device is down, then the FID does not have a VNI */
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
+ if (!fid)
+ return;
+
+ mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
+ mlxsw_sp_fid_put(fid);
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev,
+ u16 vid,
+ struct netlink_ext_ack *extack)
+{
struct mlxsw_sp_bridge_device *bridge_device;
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (WARN_ON(!bridge_device))
- return;
+ return ERR_PTR(-EINVAL);
- bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev);
+ return bridge_device->ops->fid_get(bridge_device, vid, extack);
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
+ enum mlxsw_sp_l3proto *proto,
+ union mlxsw_sp_l3addr *addr)
+{
+ if (vxlan_addr->sa.sa_family == AF_INET) {
+ addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV4;
+ } else {
+ addr->addr6 = vxlan_addr->sin6.sin6_addr;
+ *proto = MLXSW_SP_L3_PROTO_IPV6;
+ }
+}
+
+static void
+mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
+ const union mlxsw_sp_l3addr *addr,
+ union vxlan_addr *vxlan_addr)
+{
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ vxlan_addr->sa.sa_family = AF_INET;
+ vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ vxlan_addr->sa.sa_family = AF_INET6;
+ vxlan_addr->sin6.sin6_addr = addr->addr6;
+ break;
+ }
+}
+
+static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni, bool adding)
+{
+ struct switchdev_notifier_vxlan_fdb_info info;
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ enum switchdev_notifier_type type;
+
+ type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
+ info.remote_port = vxlan->cfg.dst_port;
+ info.remote_vni = vni;
+ info.remote_ifindex = 0;
+ ether_addr_copy(info.eth_addr, mac);
+ info.vni = vni;
+ info.offloaded = adding;
+ call_switchdev_notifiers(type, dev, &info.info);
+}
+
+static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
+ const char *mac,
+ enum mlxsw_sp_l3proto proto,
+ union mlxsw_sp_l3addr *addr,
+ __be32 vni,
+ bool adding)
+{
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
+ adding);
}
static void
@@ -2419,7 +2567,8 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
bridge_device = bridge_port->bridge_device;
vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
- lag_vid = mlxsw_sp_port_vlan->vid;
+ lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
+ mlxsw_sp_port_vlan->vid : 0;
do_fdb_op:
err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
@@ -2442,6 +2591,122 @@ just_remove:
goto do_fdb_op;
}
+static int
+__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_fid *fid,
+ bool adding,
+ struct net_device **nve_dev,
+ u16 *p_vid, __be32 *p_vni)
+{
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct net_device *br_dev, *dev;
+ int nve_ifindex;
+ int err;
+
+ err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_fid_vni(fid, p_vni);
+ if (err)
+ return err;
+
+ dev = __dev_get_by_index(&init_net, nve_ifindex);
+ if (!dev)
+ return -EINVAL;
+ *nve_dev = dev;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
+ return -EINVAL;
+
+ if (adding && netif_is_vxlan(dev)) {
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+
+ if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
+ return -EINVAL;
+ }
+
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev)
+ return -EINVAL;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
+ char *sfn_pl,
+ int rec_index,
+ bool adding)
+{
+ enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
+ enum switchdev_notifier_type type;
+ struct net_device *nve_dev;
+ union mlxsw_sp_l3addr addr;
+ struct mlxsw_sp_fid *fid;
+ char mac[ETH_ALEN];
+ u16 fid_index, vid;
+ __be32 vni;
+ u32 uip;
+ int err;
+
+ mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
+ &uip, &sfn_proto);
+
+ fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
+ if (!fid)
+ goto err_fid_lookup;
+
+ err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr);
+ if (err)
+ goto err_ip_resolve;
+
+ err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
+ &nve_dev, &vid, &vni);
+ if (err)
+ goto err_fdb_process;
+
+ err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, adding, true);
+ if (err)
+ goto err_fdb_op;
+
+ mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
+ (enum mlxsw_sp_l3proto) sfn_proto,
+ &addr, vni, adding);
+
+ type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
+ SWITCHDEV_FDB_DEL_TO_BRIDGE;
+ mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
+
+ mlxsw_sp_fid_put(fid);
+
+ return;
+
+err_fdb_op:
+err_fdb_process:
+err_ip_resolve:
+ mlxsw_sp_fid_put(fid);
+err_fid_lookup:
+ /* Remove an FDB entry in case we cannot process it. Otherwise the
+ * device will keep sending the same notification over and over again.
+ */
+ mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
+ (enum mlxsw_sp_l3proto) sfn_proto, &addr,
+ false, true);
+}
+
static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
char *sfn_pl, int rec_index)
{
@@ -2462,6 +2727,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
rec_index, false);
break;
+ case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, true);
+ break;
+ case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
+ mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
+ rec_index, false);
+ break;
}
}
@@ -2517,20 +2790,6 @@ struct mlxsw_sp_switchdev_event_work {
};
static void
-mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
- enum mlxsw_sp_l3proto *proto,
- union mlxsw_sp_l3addr *addr)
-{
- if (vxlan_addr->sa.sa_family == AF_INET) {
- addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
- *proto = MLXSW_SP_L3_PROTO_IPV4;
- } else {
- addr->addr6 = vxlan_addr->sin6.sin6_addr;
- *proto = MLXSW_SP_L3_PROTO_IPV6;
- }
-}
-
-static void
mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_switchdev_event_work *
switchdev_work,
@@ -2595,7 +2854,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
return;
- if (!switchdev_work->fdb_info.added_by_user)
+ if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
+ !switchdev_work->fdb_info.added_by_user)
return;
if (!netif_running(dev))
@@ -2938,10 +3198,274 @@ err_addr_alloc:
return NOTIFY_BAD;
}
-static struct notifier_block mlxsw_sp_switchdev_notifier = {
+struct notifier_block mlxsw_sp_switchdev_notifier = {
.notifier_call = mlxsw_sp_switchdev_event,
};
+static int
+mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid,
+ bool flag_untagged, bool flag_pvid,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ __be32 vni = vxlan->cfg.vni;
+ struct mlxsw_sp_fid *fid;
+ u16 old_vid;
+ int err;
+
+ /* We cannot have the same VLAN as PVID and egress untagged on multiple
+ * VxLAN devices. Note that we get this notification before the VLAN is
+ * actually added to the bridge's database, so it is not possible for
+ * the lookup function to return 'vxlan_dev'
+ */
+ if (flag_untagged && flag_pvid &&
+ mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid))
+ return -EINVAL;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ if (!netif_running(vxlan_dev))
+ return 0;
+
+ /* First case: FID is not associated with this VNI, but the new VLAN
+ * is both PVID and egress untagged. Need to enable NVE on the FID, if
+ * it exists
+ */
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
+ if (!fid) {
+ if (!flag_untagged || !flag_pvid)
+ return 0;
+ return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
+ vxlan_dev, vid, extack);
+ }
+
+ /* Second case: FID is associated with the VNI and the VLAN associated
+ * with the FID is the same as the notified VLAN. This means the flags
+ * (PVID / egress untagged) were toggled and that NVE should be
+ * disabled on the FID
+ */
+ old_vid = mlxsw_sp_fid_8021q_vid(fid);
+ if (vid == old_vid) {
+ if (WARN_ON(flag_untagged && flag_pvid)) {
+ mlxsw_sp_fid_put(fid);
+ return -EINVAL;
+ }
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ mlxsw_sp_fid_put(fid);
+ return 0;
+ }
+
+ /* Third case: A new VLAN was configured on the VxLAN device, but this
+ * VLAN is not PVID, so there is nothing to do.
+ */
+ if (!flag_pvid) {
+ mlxsw_sp_fid_put(fid);
+ return 0;
+ }
+
+ /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
+ * mapped to the VNI should be unmapped
+ */
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+ mlxsw_sp_fid_put(fid);
+
+ /* Fifth case: The new VLAN is also egress untagged, which means the
+ * VLAN needs to be mapped to the VNI
+ */
+ if (!flag_untagged)
+ return 0;
+
+ err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
+ extack);
+ if (err)
+ goto err_vxlan_join;
+
+ return 0;
+
+err_vxlan_join:
+ mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
+ NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ const struct net_device *vxlan_dev, u16 vid)
+{
+ struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
+ __be32 vni = vxlan->cfg.vni;
+ struct mlxsw_sp_fid *fid;
+
+ if (!netif_running(vxlan_dev))
+ return;
+
+ fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
+ if (!fid)
+ return;
+
+ /* A different VLAN than the one mapped to the VNI is deleted */
+ if (mlxsw_sp_fid_8021q_vid(fid) != vid)
+ goto out;
+
+ mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
+
+out:
+ mlxsw_sp_fid_put(fid);
+}
+
+static int
+mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ struct switchdev_obj_port_vlan *vlan =
+ SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct switchdev_trans *trans = port_obj_info->trans;
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct netlink_ext_ack *extack;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+ u16 vid;
+
+ extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+ br_dev = netdev_master_upper_dev_get(vxlan_dev);
+ if (!br_dev)
+ return 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ port_obj_info->handled = true;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return -EINVAL;
+
+ if (!bridge_device->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vid,
+ flag_untagged,
+ flag_pvid, trans,
+ extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ struct switchdev_obj_port_vlan *vlan =
+ SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
+ struct mlxsw_sp_bridge_device *bridge_device;
+ struct mlxsw_sp *mlxsw_sp;
+ struct net_device *br_dev;
+ u16 vid;
+
+ br_dev = netdev_master_upper_dev_get(vxlan_dev);
+ if (!br_dev)
+ return;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return;
+
+ port_obj_info->handled = true;
+
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
+ if (!bridge_device)
+ return;
+
+ if (!bridge_device->vlan_enabled)
+ return;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
+ vxlan_dev, vid);
+}
+
+static int
+mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ int err = 0;
+
+ switch (port_obj_info->obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
+ port_obj_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void
+mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
+ struct switchdev_notifier_port_obj_info *
+ port_obj_info)
+{
+ switch (port_obj_info->obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ if (netif_is_vxlan(dev))
+ err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
+ else
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ if (netif_is_vxlan(dev))
+ mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
+ else
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ mlxsw_sp_port_dev_check,
+ mlxsw_sp_port_obj_del);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
+ .notifier_call = mlxsw_sp_switchdev_blocking_event,
+};
+
u8
mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
{
@@ -2951,6 +3475,7 @@ mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
+ struct notifier_block *nb;
int err;
err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
@@ -2965,17 +3490,33 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
return err;
}
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
return 0;
+
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ return err;
}
static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct notifier_block *nb;
+
cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
- unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
+ nb = &mlxsw_sp_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
+
+ unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
}
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 6f18f4d3322a..451216dd7f6b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -60,6 +60,7 @@ enum {
MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91,
MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92,
MLXSW_TRAP_ID_IPIP_DECAP_ERROR = 0xB1,
+ MLXSW_TRAP_ID_NVE_DECAP_ARP = 0xB8,
MLXSW_TRAP_ID_NVE_ENCAP_ARP = 0xBD,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6,
MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7,
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 867cddba840f..20c9377e99cb 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -802,14 +802,8 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
u32 mac_addr_hi = 0;
u32 mac_addr_lo = 0;
u32 data;
- int ret;
netdev = adapter->netdev;
- lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_);
- ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_,
- 0, 1000, 20000, 100);
- if (ret)
- return ret;
/* setup auto duplex, and speed detection */
data = lan743x_csr_read(adapter, MAC_CR);
@@ -1672,7 +1666,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
netif_wake_queue(adapter->netdev);
}
- if (!napi_complete_done(napi, weight))
+ if (!napi_complete(napi))
goto done;
/* enable isr */
@@ -1681,7 +1675,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
lan743x_csr_read(adapter, INT_STS);
done:
- return weight;
+ return 0;
}
static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
@@ -1870,9 +1864,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
INT_BIT_DMA_TX_
(tx->channel_number));
- netif_napi_add(adapter->netdev,
- &tx->napi, lan743x_tx_napi_poll,
- tx->ring_size - 1);
+ netif_tx_napi_add(adapter->netdev,
+ &tx->napi, lan743x_tx_napi_poll,
+ tx->ring_size - 1);
napi_enable(&tx->napi);
data = 0;
@@ -2719,8 +2713,9 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
"pci-%s", pci_name(adapter->pdev));
- /* set to internal PHY id */
- adapter->mdiobus->phy_mask = ~(u32)BIT(1);
+ if ((adapter->csr.id_rev & ID_REV_ID_MASK_) == ID_REV_ID_LAN7430_)
+ /* LAN7430 uses internal phy at address 1 */
+ adapter->mdiobus->phy_mask = ~(u32)BIT(1);
/* register mdiobus */
ret = mdiobus_register(adapter->mdiobus);
@@ -3017,6 +3012,7 @@ static const struct dev_pm_ops lan743x_pm_ops = {
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
{ 0, }
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 0e82b6368798..2d6eea18973e 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -548,6 +548,7 @@ struct lan743x_adapter;
/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
+#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
#define PCI_CONFIG_LENGTH (0x1000)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 3238b9ee42f3..0dca2fa51dc3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/skbuff.h>
+#include <linux/iopoll.h>
#include <net/arp.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
@@ -22,6 +23,9 @@
#include "ocelot.h"
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+
/* MAC table entry types.
* ENTRYTYPE_NORMAL is subject to aging.
* ENTRYTYPE_LOCKED is not subject to aging.
@@ -41,23 +45,20 @@ struct ocelot_mact_entry {
enum macaccess_entry_type type;
};
-static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
{
- unsigned int val, timeout = 10;
-
- /* Wait for the issued mac table command to be completed, or timeout.
- * When the command read from ANA_TABLES_MACACCESS is
- * MACACCESS_CMD_IDLE, the issued command completed successfully.
- */
- do {
- val = ocelot_read(ocelot, ANA_TABLES_MACACCESS);
- val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M;
- } while (val != MACACCESS_CMD_IDLE && timeout--);
+ return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
+}
- if (!timeout)
- return -ETIMEDOUT;
+static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
+{
+ u32 val;
- return 0;
+ return readx_poll_timeout(ocelot_mact_read_macaccess,
+ ocelot, val,
+ (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
static void ocelot_mact_select(struct ocelot *ocelot,
@@ -129,23 +130,21 @@ static void ocelot_mact_init(struct ocelot *ocelot)
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
-static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
{
- unsigned int val, timeout = 10;
-
- /* Wait for the issued vlan table command to be completed, or timeout.
- * When the command read from ANA_TABLES_VLANACCESS is
- * VLANACCESS_CMD_IDLE, the issued command completed successfully.
- */
- do {
- val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
- val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M;
- } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--);
+ return ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
+}
- if (!timeout)
- return -ETIMEDOUT;
+static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
+{
+ u32 val;
- return 0;
+ return readx_poll_timeout(ocelot_vlant_read_vlanaccess,
+ ocelot,
+ val,
+ (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) ==
+ ANA_TABLES_VLANACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
@@ -747,7 +746,7 @@ static int ocelot_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
}
return ocelot_mact_learn(ocelot, port->chip_port, addr, vid,
- ENTRYTYPE_NORMAL);
+ ENTRYTYPE_LOCKED);
}
static int ocelot_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
@@ -1293,7 +1292,8 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
static int ocelot_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
int ret = 0;
@@ -1337,8 +1337,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
static const struct switchdev_ops ocelot_port_switchdev_ops = {
.switchdev_port_attr_get = ocelot_port_attr_get,
.switchdev_port_attr_set = ocelot_port_attr_set,
- .switchdev_port_obj_add = ocelot_port_obj_add,
- .switchdev_port_obj_del = ocelot_port_obj_del,
};
static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port,
@@ -1595,6 +1593,34 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = {
};
EXPORT_SYMBOL(ocelot_netdevice_nb);
+static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ /* Blocking events. */
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ocelot_netdevice_dev_check,
+ ocelot_port_obj_del);
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
+ .notifier_call = ocelot_switchdev_blocking_event,
+};
+EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+
int ocelot_probe_port(struct ocelot *ocelot, u8 port,
void __iomem *regs,
struct phy_device *phy)
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 62c7c8eb00d9..086775f7b52f 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -499,5 +499,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port,
struct phy_device *phy);
extern struct notifier_block ocelot_netdevice_nb;
+extern struct notifier_block ocelot_switchdev_blocking_nb;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 4c23d18bbf44..ca3ea2fbfcd0 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -12,6 +12,7 @@
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
#include <linux/skbuff.h>
+#include <net/switchdev.h>
#include "ocelot.h"
@@ -328,6 +329,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
}
register_netdevice_notifier(&ocelot_netdevice_nb);
+ register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -342,6 +344,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit(ocelot);
+ unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
return 0;
diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig
index c26e0f70c494..7df20561e3fa 100644
--- a/drivers/net/ethernet/neterion/Kconfig
+++ b/drivers/net/ethernet/neterion/Kconfig
@@ -26,7 +26,7 @@ config S2IO
on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/s2io.txt>.
+ <file:Documentation/networking/device_drivers/neterion/s2io.txt>.
To compile this driver as a module, choose M here. The module
will be called s2io.
@@ -41,7 +41,7 @@ config VXGE
labeled as either one, depending on its age.
More specific information on configuring the driver is in
- <file:Documentation/networking/vxge.txt>.
+ <file:Documentation/networking/device_drivers/neterion/vxge.txt>.
To compile this driver as a module, choose M here. The module
will be called vxge.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 4c1fb7e57888..7cde387e5ec6 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -808,7 +808,7 @@ __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
- u64 data0, data1 = 0, steer_ctrl = 0;
+ u64 data0 = 0, data1 = 0, steer_ctrl = 0;
enum vxge_hw_status status;
status = vxge_hw_vpath_fw_api(vpath,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index f7a0d1d5885e..59e77e3086bb 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -1695,17 +1695,10 @@ exit:
*/
void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
{
- struct __vxge_hw_fifo_txdl_priv *txdl_priv;
- u32 max_frags;
struct __vxge_hw_channel *channel;
channel = &fifo->channel;
- txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
- (struct vxge_hw_fifo_txd *)txdlh);
-
- max_frags = fifo->config->max_frags;
-
vxge_hw_channel_dtr_free(channel, txdlh);
}
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4afb10375397..47c708f08ade 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -56,7 +56,9 @@ endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
+ abm/cls.o \
abm/ctrl.o \
+ abm/qdisc.o \
abm/main.o
endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c
new file mode 100644
index 000000000000..9852080cf454
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/bitfield.h>
+#include <net/pkt_cls.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_net_repr.h"
+#include "main.h"
+
+struct nfp_abm_u32_match {
+ u32 handle;
+ u32 band;
+ u8 mask;
+ u8 val;
+ struct list_head list;
+};
+
+static bool
+nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct tc_u32_key *k;
+ unsigned int tos_off;
+
+ if (knode->exts && tcf_exts_has_actions(knode->exts)) {
+ NL_SET_ERR_MSG_MOD(extack, "action offload not supported");
+ return false;
+ }
+ if (knode->link_handle) {
+ NL_SET_ERR_MSG_MOD(extack, "linking not supported");
+ return false;
+ }
+ if (knode->sel->flags != TC_U32_TERMINAL) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "flags must be equal to TC_U32_TERMINAL");
+ return false;
+ }
+ if (knode->sel->off || knode->sel->offshift || knode->sel->offmask ||
+ knode->sel->offoff || knode->fshift) {
+ NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported");
+ return false;
+ }
+ if (knode->sel->hoff || knode->sel->hmask) {
+ NL_SET_ERR_MSG_MOD(extack, "hashing not supported");
+ return false;
+ }
+ if (knode->val || knode->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported");
+ return false;
+ }
+ if (knode->res && knode->res->class) {
+ NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported");
+ return false;
+ }
+ if (knode->res && knode->res->classid >= abm->num_bands) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "classid higher than number of bands");
+ return false;
+ }
+ if (knode->sel->nkeys != 1) {
+ NL_SET_ERR_MSG_MOD(extack, "exactly one key required");
+ return false;
+ }
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ tos_off = 16;
+ break;
+ case htons(ETH_P_IPV6):
+ tos_off = 20;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol");
+ return false;
+ }
+
+ k = &knode->sel->keys[0];
+ if (k->offmask) {
+ NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported");
+ return false;
+ }
+ if (k->off) {
+ NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched");
+ return false;
+ }
+ if (k->val & ~k->mask) {
+ NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key");
+ return false;
+ }
+ if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) {
+ NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used");
+ nfp_err(abm->app->cpp,
+ "u32 offload: requested mask %x FW can support only %x\n",
+ be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask);
+ return false;
+ }
+
+ return true;
+}
+
+/* This filter list -> map conversion is O(n * m), we expect single digit or
+ * low double digit number of prios and likewise for the filters. Also u32
+ * doesn't report stats, so it's really only setup time cost.
+ */
+static unsigned int
+nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if ((prio & iter->mask) == iter->val)
+ return iter->band;
+
+ return alink->def_band;
+}
+
+static int nfp_abm_update_band_map(struct nfp_abm_link *alink)
+{
+ unsigned int i, bits_per_prio, prios_per_word, base_shift;
+ struct nfp_abm *abm = alink->abm;
+ u32 field_mask;
+
+ alink->has_prio = !list_empty(&alink->dscp_map);
+
+ bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands));
+ field_mask = (1 << bits_per_prio) - 1;
+ prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio;
+
+ /* FW mask applies from top bits */
+ base_shift = 8 - order_base_2(abm->num_prios);
+
+ for (i = 0; i < abm->num_prios; i++) {
+ unsigned int offset;
+ u32 *word;
+ u8 band;
+
+ word = &alink->prio_map[i / prios_per_word];
+ offset = (i % prios_per_word) * bits_per_prio;
+
+ band = nfp_abm_find_band_for_prio(alink, i << base_shift);
+
+ *word &= ~(field_mask << offset);
+ *word |= band << offset;
+ }
+
+ /* Qdisc offload status may change if has_prio changed */
+ nfp_abm_qdisc_offload_update(alink);
+
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+}
+
+static void
+nfp_abm_u32_knode_delete(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode)
+{
+ struct nfp_abm_u32_match *iter;
+
+ list_for_each_entry(iter, &alink->dscp_map, list)
+ if (iter->handle == knode->handle) {
+ list_del(&iter->list);
+ kfree(iter);
+ nfp_abm_update_band_map(alink);
+ return;
+ }
+}
+
+static int
+nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
+ struct tc_cls_u32_knode *knode,
+ __be16 proto, struct netlink_ext_ack *extack)
+{
+ struct nfp_abm_u32_match *match = NULL, *iter;
+ unsigned int tos_off;
+ u8 mask, val;
+ int err;
+
+ if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
+ goto err_delete;
+
+ tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
+
+ /* Extract the DSCP Class Selector bits */
+ val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff;
+ mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff;
+
+ /* Check if there is no conflicting mapping and find match by handle */
+ list_for_each_entry(iter, &alink->dscp_map, list) {
+ u32 cmask;
+
+ if (iter->handle == knode->handle) {
+ match = iter;
+ continue;
+ }
+
+ cmask = iter->mask & mask;
+ if ((iter->val & cmask) == (val & cmask) &&
+ iter->band != knode->res->classid) {
+ NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
+ goto err_delete;
+ }
+ }
+
+ if (!match) {
+ match = kzalloc(sizeof(*match), GFP_KERNEL);
+ if (!match)
+ return -ENOMEM;
+ list_add(&match->list, &alink->dscp_map);
+ }
+ match->handle = knode->handle;
+ match->band = knode->res->classid;
+ match->mask = mask;
+ match->val = val;
+
+ err = nfp_abm_update_band_map(alink);
+ if (err)
+ goto err_delete;
+
+ return 0;
+
+err_delete:
+ nfp_abm_u32_knode_delete(alink, knode);
+ return -EOPNOTSUPP;
+}
+
+static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_u32_offload *cls_u32 = type_data;
+ struct nfp_repr *repr = cb_priv;
+ struct nfp_abm_link *alink;
+
+ alink = repr->app_priv;
+
+ if (type != TC_SETUP_CLSU32) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only offload of u32 classifier supported");
+ return -EOPNOTSUPP;
+ }
+ if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common))
+ return -EOPNOTSUPP;
+
+ if (cls_u32->common.protocol != htons(ETH_P_IP) &&
+ cls_u32->common.protocol != htons(ETH_P_IPV6)) {
+ NL_SET_ERR_MSG_MOD(cls_u32->common.extack,
+ "only IP and IPv6 supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ switch (cls_u32->command) {
+ case TC_CLSU32_NEW_KNODE:
+ case TC_CLSU32_REPLACE_KNODE:
+ return nfp_abm_u32_knode_replace(alink, &cls_u32->knode,
+ cls_u32->common.protocol,
+ cls_u32->common.extack);
+ case TC_CLSU32_DELETE_KNODE:
+ nfp_abm_u32_knode_delete(alink, &cls_u32->knode);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *f)
+{
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block,
+ nfp_abm_setup_tc_block_cb,
+ repr, repr, f->extack);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb,
+ repr);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f422688..9584f03f3efa 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#include <linux/bitops.h>
#include <linux/kernel.h>
+#include <linux/log2.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nffw.h"
@@ -11,38 +13,58 @@
#include "../nfp_net.h"
#include "main.h"
-#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u"
+#define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u"
+#define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u"
+#define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u"
+
+#define NFP_RED_SUPPORT_SYM_NAME "_abi_nfd_out_red_offload_%u"
+
+#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s"
#define NFP_QLVL_STRIDE 16
#define NFP_QLVL_BLOG_BYTES 0
#define NFP_QLVL_BLOG_PKTS 4
#define NFP_QLVL_THRS 8
+#define NFP_QLVL_ACT 12
-#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats"
+#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s"
#define NFP_QMSTAT_STRIDE 32
#define NFP_QMSTAT_NON_STO 0
#define NFP_QMSTAT_STO 8
#define NFP_QMSTAT_DROP 16
#define NFP_QMSTAT_ECN 24
+#define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s"
+#define NFP_Q_STAT_STRIDE 16
+#define NFP_Q_STAT_PKTS 0
+#define NFP_Q_STAT_BYTES 8
+
+#define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD
+#define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET
+#define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL
+#define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4)
+#define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8)
+
static int
nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, unsigned int i,
- bool is_u64, u64 *res)
+ unsigned int stride, unsigned int offset, unsigned int band,
+ unsigned int queue, bool is_u64, u64 *res)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
u64 val, sym_offset;
+ unsigned int qid;
u32 val32;
int err;
- sym_offset = (alink->queue_base + i) * stride + offset;
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ sym_offset = qid * stride + offset;
if (is_u64)
err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val);
else
err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32);
if (err) {
- nfp_err(cpp,
- "RED offload reading stat failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n",
+ alink->id, band, queue, alink->queue_base);
return err;
}
@@ -50,175 +72,179 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, bool is_u64,
- u64 *res)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
{
- u64 val, sum = 0;
- unsigned int i;
+ struct nfp_cpp *cpp = abm->app->cpp;
+ u64 sym_offset;
int err;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
- is_u64, &val);
- if (err)
- return err;
- sum += val;
+ __clear_bit(id, abm->threshold_undef);
+ if (abm->thresholds[id] == val)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
+ if (err) {
+ nfp_err(cpp,
+ "RED offload setting level failed on subqueue %d\n",
+ id);
+ return err;
}
- *res = sum;
+ abm->thresholds[id] = val;
return 0;
}
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int threshold;
+
+ threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
+}
+
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act)
+{
+ struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
- sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
- err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
- sym_offset, val);
+ if (abm->actions[id] == act)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act);
if (err) {
- nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp,
+ "RED offload setting action failed on subqueue %d\n",
+ id);
return err;
}
+ abm->actions[id] = act;
return 0;
}
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act)
+{
+ unsigned int qid;
+
+ qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue;
+
+ return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act);
+}
+
+u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- int i, err;
+ unsigned int band;
+ u64 val, sum = 0;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
- if (err)
- return err;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
}
- return 0;
+ return sum;
}
-u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
+u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue)
{
- u64 val;
+ unsigned int band;
+ u64 val, sum = 0;
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_NON_STO, i, true, &val))
- return 0;
- return val;
+ for (band = 0; band < alink->abm->num_bands; band++) {
+ if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
+ NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO,
+ band, queue, true, &val))
+ return 0;
+ sum += val;
+ }
+
+ return sum;
}
-u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i)
+static int
+nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, unsigned int off, u64 *val)
{
- u64 val;
+ if (!nfp_abm_has_prio(alink->abm)) {
+ if (!band) {
+ unsigned int id = alink->queue_base + queue;
+
+ *val = nn_readq(alink->vnic,
+ NFP_NET_CFG_RXR_STATS(id) + off);
+ } else {
+ *val = 0;
+ }
- if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE,
- NFP_QMSTAT_STO, i, true, &val))
return 0;
- return val;
+ } else {
+ return nfp_abm_ctrl_stat(alink, alink->abm->q_stats,
+ NFP_Q_STAT_STRIDE, off, band, queue,
+ true, val);
+ }
}
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
- struct nfp_alink_stats *stats)
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *stats)
{
int err;
- stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS,
+ &stats->tx_pkts);
+ if (err)
+ return err;
- err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- i, false, &stats->backlog_bytes);
+ err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES,
+ &stats->tx_bytes);
+ if (err)
+ return err;
+
+ err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE,
+ NFP_QLVL_BLOG_BYTES, band, queue, false,
+ &stats->backlog_bytes);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls,
NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- i, false, &stats->backlog_pkts);
+ band, queue, false, &stats->backlog_pkts);
if (err)
return err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &stats->drops);
+ band, queue, true, &stats->drops);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &stats->overlimits);
+ band, queue, true, &stats->overlimits);
}
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats)
-{
- u64 pkts = 0, bytes = 0;
- int i, err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
- }
- stats->tx_pkts = pkts;
- stats->tx_bytes = bytes;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- false, &stats->backlog_bytes);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- false, &stats->backlog_pkts);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &stats->drops);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &stats->overlimits);
-}
-
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats)
{
int err;
err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- i, true, &xstats->pdrop);
+ band, queue, true, &xstats->pdrop);
if (err)
return err;
return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats,
NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- i, true, &xstats->ecn_marked);
-}
-
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats)
-{
- int err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &xstats->pdrop);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &xstats->ecn_marked);
+ band, queue, true, &xstats->ecn_marked);
}
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
@@ -233,10 +259,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
NULL, 0, NULL, 0);
}
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
+{
+ struct nfp_net *nn = alink->vnic;
+ unsigned int i;
+ int err;
+
+ /* Write data_len and wipe reserved */
+ nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
+ alink->abm->prio_map_len);
+
+ for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32))
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
+ packed[i / sizeof(u32)]);
+
+ err = nfp_net_reconfig_mbox(nn,
+ NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ if (err)
+ nfp_err(alink->abm->app->cpp,
+ "setting DSCP -> VQ map failed with error %d\n", err);
+ return err;
+}
+
+static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct nfp_net *nn = alink->vnic;
+ unsigned int min_mbox_sz;
+
+ if (!nfp_abm_has_prio(alink->abm))
+ return 0;
+
+ min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len;
+ if (nn->tlv_caps.mbox_len < min_mbox_sz) {
+ nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n",
+ nn->tlv_caps.mbox_len, min_mbox_sz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink)
{
alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ);
alink->queue_base /= alink->vnic->stride_rx;
+
+ return nfp_abm_ctrl_prio_check_params(alink);
+}
+
+static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm)
+{
+ unsigned int size;
+
+ size = roundup_pow_of_two(order_base_2(abm->num_bands));
+ size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE);
+ size = round_up(size, sizeof(u32));
+
+ return size;
}
static const struct nfp_rtsym *
@@ -260,33 +340,86 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size)
}
static const struct nfp_rtsym *
-nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name,
- unsigned int size)
+nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt,
+ size_t size)
{
- return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS);
+ char pf_symbol[64];
+
+ size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ snprintf(pf_symbol, sizeof(pf_symbol), name_fmt,
+ abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : "");
+
+ return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size);
}
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm)
{
struct nfp_pf *pf = abm->app->pf;
const struct nfp_rtsym *sym;
- unsigned int pf_id;
- char pf_symbol[64];
+ int res;
+
+ abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp);
+
+ /* Check if Qdisc offloads are supported */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->red_support = res;
+
+ /* Read count of prios and prio bands */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_bands = res;
+
+ res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1);
+ if (res < 0)
+ return res;
+ abm->num_prios = res;
+
+ /* Read available actions */
+ res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME,
+ BIT(NFP_ABM_ACT_MARK_DROP));
+ if (res < 0)
+ return res;
+ abm->action_mask = res;
+
+ abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm);
+ abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios));
+
+ /* Check values are sane, U16_MAX is arbitrarily chosen as max */
+ if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) ||
+ abm->num_bands > U16_MAX || abm->num_prios > U16_MAX ||
+ (abm->num_bands == 1) != (abm->num_prios == 1)) {
+ nfp_err(pf->cpp,
+ "invalid priomap description num bands: %u and num prios: %u\n",
+ abm->num_bands, abm->num_prios);
+ return -EINVAL;
+ }
- pf_id = nfp_cppcore_pcie_unit(pf->cpp);
- abm->pf_id = pf_id;
+ /* Find level and stat symbols */
+ if (!abm->red_support)
+ return 0;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME,
+ NFP_QLVL_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->q_lvls = sym;
- snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id);
- sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE);
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME,
+ NFP_QMSTAT_STRIDE);
if (IS_ERR(sym))
return PTR_ERR(sym);
abm->qm_stats = sym;
+ if (nfp_abm_has_prio(abm)) {
+ sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME,
+ NFP_Q_STAT_STRIDE);
+ if (IS_ERR(sym))
+ return PTR_ERR(sym);
+ abm->q_stats = sym;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index c0830c0c2c3f..4d4ff5844c47 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,14 +2,13 @@
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/slab.h>
-#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/red.h>
#include "../nfpcore/nfp.h"
#include "../nfpcore/nfp_cpp.h"
@@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
}
static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs, u32 init_val)
-{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
- int ret;
-
- ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
- memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
-
- alink->parent = handle;
- alink->num_qdiscs = qs;
- port->tc_offload_cnt = qs;
-
- return ret;
-}
-
-static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs)
-{
- __nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
-}
-
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- unsigned int i = TC_H_MIN(opt->parent) - 1;
-
- if (opt->parent == TC_H_ROOT)
- i = 0;
- else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
- i = TC_H_MIN(opt->parent) - 1;
- else
- return -EOPNOTSUPP;
-
- if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
- return -EOPNOTSUPP;
-
- return i;
-}
-
-static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle)
-{
- unsigned int i;
-
- for (i = 0; i < alink->num_qdiscs; i++)
- if (handle == alink->qdiscs[i].handle)
- break;
- if (i == alink->num_qdiscs)
- return;
-
- if (alink->parent == TC_H_ROOT) {
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- } else {
- nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
- memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
- }
-}
-
-static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- bool existing;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- existing = i >= 0;
-
- if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
- nfp_warn(alink->abm->app->cpp,
- "RED offload failed - unsupported parameters\n");
- err = -EINVAL;
- goto err_destroy;
- }
-
- if (existing) {
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
- else
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- if (err)
- goto err_destroy;
- return 0;
- }
-
- if (opt->parent == TC_H_ROOT) {
- i = 0;
- err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
- opt->set.min);
- } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
- i = TC_H_MIN(opt->parent) - 1;
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- } else {
- return -EINVAL;
- }
- /* Set the handle to try full clean up, in case IO failed */
- alink->qdiscs[i].handle = opt->handle;
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i,
- &alink->qdiscs[i].stats);
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink,
- &alink->qdiscs[i].xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i,
- &alink->qdiscs[i].xstats);
- if (err)
- goto err_destroy;
-
- alink->qdiscs[i].stats.backlog_pkts = 0;
- alink->qdiscs[i].stats.backlog_bytes = 0;
-
- return 0;
-err_destroy:
- /* If the qdisc keeps on living, but we can't offload undo changes */
- if (existing) {
- opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
- opt->set.qstats->backlog -=
- alink->qdiscs[i].stats.backlog_bytes;
- }
- nfp_abm_red_destroy(netdev, alink, opt->handle);
-
- return err;
-}
-
-static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
- struct tc_qopt_offload_stats *stats)
-{
- _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
- new->tx_pkts - old->tx_pkts);
- stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
- stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
- stats->qstats->overlimits += new->overlimits - old->overlimits;
- stats->qstats->drops += new->drops - old->drops;
-}
-
-static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_stats *prev_stats;
- struct nfp_alink_stats stats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_stats = &alink->qdiscs[i].stats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
-
- *prev_stats = stats;
-
- return 0;
-}
-
-static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_xstats *prev_xstats;
- struct nfp_alink_xstats xstats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_xstats = &alink->qdiscs[i].xstats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink, &xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
- if (err)
- return err;
-
- opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
- opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
-
- *prev_xstats = xstats;
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_RED_REPLACE:
- return nfp_abm_red_replace(netdev, alink, opt);
- case TC_RED_DESTROY:
- nfp_abm_red_destroy(netdev, alink, opt->handle);
- return 0;
- case TC_RED_STATS:
- return nfp_abm_red_stats(alink, opt);
- case TC_RED_XSTATS:
- return nfp_abm_red_xstats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
-{
- struct nfp_alink_stats stats;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->num_qdiscs; i++) {
- if (alink->qdiscs[i].handle == TC_H_UNSPEC)
- continue;
-
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
- &opt->stats);
- }
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_mq_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_MQ_CREATE:
- nfp_abm_reset_root(netdev, alink, opt->handle,
- alink->total_queues);
- return 0;
- case TC_MQ_DESTROY:
- if (opt->handle == alink->parent)
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- return 0;
- case TC_MQ_STATS:
- return nfp_abm_mq_stats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
@@ -302,10 +38,16 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
+ case TC_SETUP_ROOT_QDISC:
+ return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data);
+ case TC_SETUP_QDISC_GRED:
+ return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data);
+ case TC_SETUP_BLOCK:
+ return nfp_abm_setup_cls_block(netdev, repr, type_data);
default:
return -EOPNOTSUPP;
}
@@ -384,7 +126,9 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink,
reprs = nfp_reprs_get_locked(app, rtype);
WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr");
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[alink->id], netdev);
+ rtnl_unlock();
nfp_info(app->cpp, "%s Port %d Representor(%s) created\n",
ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys",
@@ -410,7 +154,9 @@ nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink,
netdev = nfp_repr_get_locked(app, reprs, alink->id);
if (!netdev)
return;
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[alink->id], NULL);
+ rtnl_unlock();
synchronize_rcu();
/* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */
nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev));
@@ -461,6 +207,9 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm)
struct nfp_net *nn;
int err;
+ if (!abm->red_support)
+ return -EOPNOTSUPP;
+
err = nfp_abm_ctrl_qm_enable(abm);
if (err)
return err;
@@ -573,31 +322,34 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
- alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
- GFP_KERNEL);
- if (!alink->qdiscs) {
- err = -ENOMEM;
+
+ INIT_LIST_HEAD(&alink->dscp_map);
+
+ err = nfp_abm_ctrl_read_params(alink);
+ if (err)
+ goto err_free_alink;
+
+ alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL);
+ if (!alink->prio_map)
goto err_free_alink;
- }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
- goto err_free_qdiscs;
+ goto err_free_priomap;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
- nfp_abm_ctrl_read_params(alink);
+ INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
-err_free_qdiscs:
- kvfree(alink->qdiscs);
+err_free_priomap:
+ kfree(alink->prio_map);
err_free_alink:
kfree(alink);
return err;
@@ -608,10 +360,20 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
- kvfree(alink->qdiscs);
+ WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
+ kfree(alink->prio_map);
kfree(alink);
}
+static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ struct nfp_abm_link *alink = nn->app_priv;
+
+ if (nfp_abm_has_prio(alink->abm))
+ return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map);
+ return 0;
+}
+
static u64 *
nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data)
{
@@ -659,6 +421,21 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port,
return data;
}
+static int nfp_abm_fw_init_reset(struct nfp_abm *abm)
+{
+ unsigned int i;
+
+ if (!abm->red_support)
+ return 0;
+
+ for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++) {
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+ __nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP);
+ }
+
+ return nfp_abm_ctrl_qm_disable(abm);
+}
+
static int nfp_abm_init(struct nfp_app *app)
{
struct nfp_pf *pf = app->pf;
@@ -690,15 +467,31 @@ static int nfp_abm_init(struct nfp_app *app)
if (err)
goto err_free_abm;
+ err = -ENOMEM;
+ abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS);
+ abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+ if (!abm->threshold_undef)
+ goto err_free_abm;
+
+ abm->thresholds = kvcalloc(abm->num_thresholds,
+ sizeof(*abm->thresholds), GFP_KERNEL);
+ if (!abm->thresholds)
+ goto err_free_thresh_umap;
+
+ abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions),
+ GFP_KERNEL);
+ if (!abm->actions)
+ goto err_free_thresh;
+
/* We start in legacy mode, make sure advanced queuing is disabled */
- err = nfp_abm_ctrl_qm_disable(abm);
+ err = nfp_abm_fw_init_reset(abm);
if (err)
- goto err_free_abm;
+ goto err_free_act;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
- goto err_free_abm;
+ goto err_free_act;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -710,6 +503,12 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_act:
+ kvfree(abm->actions);
+err_free_thresh:
+ kvfree(abm->thresholds);
+err_free_thresh_umap:
+ bitmap_free(abm->threshold_undef);
err_free_abm:
kfree(abm);
app->priv = NULL;
@@ -723,6 +522,9 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_abm_eswitch_clean_up(abm);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ bitmap_free(abm->threshold_undef);
+ kvfree(abm->actions);
+ kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
}
@@ -736,6 +538,7 @@ const struct nfp_app_type app_abm = {
.vnic_alloc = nfp_abm_vnic_alloc,
.vnic_free = nfp_abm_vnic_free,
+ .vnic_init = nfp_abm_vnic_init,
.port_get_stats = nfp_abm_port_get_stats,
.port_get_stats_count = nfp_abm_port_get_stats_count,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index f907b7d98917..49749c60885e 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -4,7 +4,19 @@
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
#include <net/devlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */
+
+#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
struct nfp_net;
@@ -12,21 +24,64 @@ struct nfp_net;
#define NFP_ABM_PORTID_TYPE GENMASK(23, 16)
#define NFP_ABM_PORTID_ID GENMASK(7, 0)
+/* The possible actions if thresholds are exceeded */
+enum nfp_abm_q_action {
+ /* mark if ECN capable, otherwise drop */
+ NFP_ABM_ACT_MARK_DROP = 0,
+ /* mark if ECN capable, otherwise goto QM */
+ NFP_ABM_ACT_MARK_QUEUE = 1,
+ NFP_ABM_ACT_DROP = 2,
+ NFP_ABM_ACT_QUEUE = 3,
+ NFP_ABM_ACT_NOQUEUE = 4,
+};
+
/**
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
+ *
+ * @red_support: is RED offload supported
+ * @num_prios: number of supported DSCP priorities
+ * @num_bands: number of supported DSCP priority bands
+ * @action_mask: bitmask of supported actions
+ *
+ * @thresholds: current threshold configuration
+ * @threshold_undef: bitmap of thresholds which have not been set
+ * @actions: current FW action configuration
+ * @num_thresholds: number of @thresholds and bits in @threshold_undef
+ *
+ * @prio_map_len: computed length of FW priority map (in bytes)
+ * @dscp_mask: mask FW will apply on DSCP field
+ *
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
+ *
* @q_lvls: queue level control area
* @qm_stats: queue statistics symbol
+ * @q_stats: basic queue statistics (only in per-band case)
*/
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
+
+ unsigned int red_support;
+ unsigned int num_prios;
+ unsigned int num_bands;
+ unsigned int action_mask;
+
+ u32 *thresholds;
+ unsigned long *threshold_undef;
+ u8 *actions;
+ size_t num_thresholds;
+
+ unsigned int prio_map_len;
+ u8 dscp_mask;
+
enum devlink_eswitch_mode eswitch_mode;
+
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
+ const struct nfp_rtsym *q_stats;
};
/**
@@ -57,16 +112,76 @@ struct nfp_alink_xstats {
u64 pdrop;
};
+enum nfp_qdisc_type {
+ NFP_QDISC_NONE = 0,
+ NFP_QDISC_MQ,
+ NFP_QDISC_RED,
+ NFP_QDISC_GRED,
+};
+
+#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
+
/**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle: handle of currently offloaded RED Qdisc
- * @stats: statistics from last refresh
- * @xstats: base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev: netdev on which Qdisc was created
+ * @type: Qdisc type
+ * @handle: handle of this Qdisc
+ * @parent_handle: handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt: number of attachment points in the hierarchy
+ * @num_children: current size of the @children array
+ * @children: pointers to children
+ *
+ * @params_ok: parameters of this Qdisc are OK for offload
+ * @offload_mark: offload refresh state - selected for offload
+ * @offloaded: Qdisc is currently offloaded to the HW
+ *
+ * @mq: MQ Qdisc specific parameters and state
+ * @mq.stats: current stats of the MQ Qdisc
+ * @mq.prev_stats: previously reported @mq.stats
+ *
+ * @red: RED Qdisc specific parameters and state
+ * @red.num_bands: Number of valid entries in the @red.band table
+ * @red.band: Per-band array of RED instances
+ * @red.band.ecn: ECN marking is enabled (rather than drop)
+ * @red.band.threshold: ECN marking threshold
+ * @red.band.stats: current stats of the RED Qdisc
+ * @red.band.prev_stats: previously reported @red.stats
+ * @red.band.xstats: extended stats for RED - current
+ * @red.band.prev_xstats: extended stats for RED - previously reported
*/
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+ struct net_device *netdev;
+ enum nfp_qdisc_type type;
u32 handle;
- struct nfp_alink_stats stats;
- struct nfp_alink_xstats xstats;
+ u32 parent_handle;
+ unsigned int use_cnt;
+ unsigned int num_children;
+ struct nfp_qdisc **children;
+
+ bool params_ok;
+ bool offload_mark;
+ bool offloaded;
+
+ union {
+ /* NFP_QDISC_MQ */
+ struct {
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ } mq;
+ /* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */
+ struct {
+ unsigned int num_bands;
+
+ struct {
+ bool ecn;
+ u32 threshold;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ struct nfp_alink_xstats xstats;
+ struct nfp_alink_xstats prev_xstats;
+ } band[MAX_DPs];
+ } red;
+ };
};
/**
@@ -76,9 +191,17 @@ struct nfp_red_qdisc {
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
- * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs: number of currently used qdiscs
- * @qdiscs: array of qdiscs
+ *
+ * @last_stats_update: ktime of last stats update
+ *
+ * @prio_map: current map of priorities
+ * @has_prio: @prio_map is valid
+ *
+ * @def_band: default band to use
+ * @dscp_map: list of DSCP to band mappings
+ *
+ * @root_qdisc: pointer to the current root of the Qdisc hierarchy
+ * @qdiscs: all qdiscs recorded by major part of the handle
*/
struct nfp_abm_link {
struct nfp_abm *abm;
@@ -86,26 +209,65 @@ struct nfp_abm_link {
unsigned int id;
unsigned int queue_base;
unsigned int total_queues;
- u32 parent;
- unsigned int num_qdiscs;
- struct nfp_red_qdisc *qdiscs;
+
+ u64 last_stats_update;
+
+ u32 *prio_map;
+ bool has_prio;
+
+ u8 def_band;
+ struct list_head dscp_map;
+
+ struct nfp_qdisc *root_qdisc;
+ struct radix_tree_root qdiscs;
};
-void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
+static inline bool nfp_abm_has_prio(struct nfp_abm *abm)
+{
+ return abm->num_bands > 1;
+}
+
+static inline bool nfp_abm_has_drop(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_DROP);
+}
+
+static inline bool nfp_abm_has_mark(struct nfp_abm *abm)
+{
+ return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP);
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt);
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt);
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt);
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt);
+int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr,
+ struct tc_block_offload *opt);
+
+int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
- u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, u32 val);
+int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id,
+ enum nfp_abm_q_action act);
+int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, enum nfp_abm_q_action act);
+int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats);
-int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
+int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink,
+ unsigned int band, unsigned int queue,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i);
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm);
int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm);
+void nfp_abm_prio_map_update(struct nfp_abm *abm);
+int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
new file mode 100644
index 000000000000..2473fb5f75e5
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
+{
+ return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
+}
+
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+ return qdisc->children[id] &&
+ qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
+
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+ return rtnl_dereference(*slot);
+}
+
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+ struct nfp_alink_stats *child)
+{
+ parent->tx_pkts += child->tx_pkts;
+ parent->tx_bytes += child->tx_bytes;
+ parent->backlog_pkts += child->backlog_pkts;
+ parent->backlog_bytes += child->backlog_bytes;
+ parent->overlimits += child->overlimits;
+ parent->drops += child->drops;
+}
+
+static void
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ unsigned int i;
+ int err;
+
+ if (!qdisc->offloaded)
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
+ &qdisc->red.band[i].stats);
+ if (err)
+ nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
+ &qdisc->red.band[i].xstats);
+ if (err)
+ nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
+ i, queue, err);
+ }
+}
+
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ if (qdisc->type != NFP_QDISC_MQ)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i))
+ nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
+
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+ alink->last_stats_update = time_now;
+ if (alink->root_qdisc)
+ nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+ u64 now;
+
+ /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+ * of all their leafs, so we would read the same stat multiple times
+ * for every dump.
+ */
+ now = ktime_get();
+ if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+ return;
+
+ __nfp_abm_stats_update(alink, now);
+}
+
+static void
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+ qdisc->children[i]->use_cnt--;
+ qdisc->children[i] = NULL;
+ }
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ /* Don't complain when qdisc is getting unlinked */
+ if (qdisc->use_cnt)
+ nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+ qdisc->handle);
+
+ if (!nfp_abm_qdisc_is_red(qdisc))
+ return;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].stats.backlog_pkts = 0;
+ qdisc->red.band[i].stats.backlog_bytes = 0;
+ }
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
+ unsigned int queue, struct nfp_alink_stats *prev_stats,
+ struct nfp_alink_xstats *prev_xstats)
+{
+ u64 backlog_pkts, backlog_bytes;
+ int err;
+
+ /* Don't touch the backlog, backlog can only be reset after it has
+ * been reported back to the tc qdisc stats.
+ */
+ backlog_pkts = prev_stats->backlog_pkts;
+ backlog_bytes = prev_stats->backlog_bytes;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED stats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED xstats init (%d, %d) failed with error %d\n",
+ band, queue, err);
+ return err;
+ }
+
+ prev_stats->backlog_pkts = backlog_pkts;
+ prev_stats->backlog_bytes = backlog_bytes;
+ return 0;
+}
+
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ err = __nfp_abm_stats_init(alink, i, queue,
+ &qdisc->red.band[i].prev_stats,
+ &qdisc->red.band[i].prev_xstats);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ bool good_red, good_gred;
+ unsigned int i;
+
+ good_red = qdisc->type == NFP_QDISC_RED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1 &&
+ !alink->has_prio &&
+ !qdisc->children[0];
+ good_gred = qdisc->type == NFP_QDISC_GRED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1;
+ qdisc->offload_mark = good_red || good_gred;
+
+ /* If we are starting offload init prev_stats */
+ if (qdisc->offload_mark && !qdisc->offloaded)
+ if (nfp_abm_stats_init(alink, qdisc, queue))
+ qdisc->offload_mark = false;
+
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->abm->num_bands; i++) {
+ enum nfp_abm_q_action act;
+
+ nfp_abm_ctrl_set_q_lvl(alink, i, queue,
+ qdisc->red.band[i].threshold);
+ act = qdisc->red.band[i].ecn ?
+ NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
+ nfp_abm_ctrl_set_q_act(alink, i, queue, act);
+ }
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++) {
+ struct nfp_qdisc *child = qdisc->children[i];
+
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ nfp_abm_offload_compile_red(alink, child, i);
+ }
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct radix_tree_iter iter;
+ struct nfp_qdisc *qdisc;
+ void __rcu **slot;
+ size_t i;
+
+ /* Mark all thresholds as unconfigured */
+ for (i = 0; i < abm->num_bands; i++)
+ __bitmap_set(abm->threshold_undef,
+ i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
+ alink->total_queues);
+
+ /* Clear offload marks */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ qdisc->offload_mark = false;
+ }
+
+ if (alink->root_qdisc)
+ nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+ /* Refresh offload status */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ if (!qdisc->offload_mark && qdisc->offloaded)
+ nfp_abm_qdisc_offload_stop(alink, qdisc);
+ qdisc->offloaded = qdisc->offload_mark;
+ }
+
+ /* Reset the unconfigured thresholds */
+ for (i = 0; i < abm->num_thresholds; i++)
+ if (test_bit(i, abm->threshold_undef))
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ __nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct radix_tree_iter iter;
+ unsigned int mq_refs = 0;
+ void __rcu **slot;
+
+ if (!qdisc->use_cnt)
+ return;
+ /* MQ doesn't notify well on destruction, we need special handling of
+ * MQ's children.
+ */
+ if (qdisc->type == NFP_QDISC_MQ &&
+ qdisc == alink->root_qdisc &&
+ netdev->reg_state == NETREG_UNREGISTERING)
+ return;
+
+ /* Count refs held by MQ instances and clear pointers */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+ unsigned int i;
+
+ if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+ continue;
+ for (i = 0; i < mq->num_children; i++)
+ if (mq->children[i] == qdisc) {
+ mq->children[i] = NULL;
+ mq_refs++;
+ }
+ }
+
+ WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+ qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+ if (!qdisc)
+ return;
+ nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+ WARN_ON(radix_tree_delete(&alink->qdiscs,
+ TC_H_MAJ(qdisc->handle)) != qdisc);
+
+ kfree(qdisc->children);
+ kfree(qdisc);
+
+ port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_qdisc *qdisc;
+ int err;
+
+ qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+ if (!qdisc)
+ return NULL;
+
+ if (children) {
+ qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+ if (!qdisc->children)
+ goto err_free_qdisc;
+ }
+
+ qdisc->netdev = netdev;
+ qdisc->type = type;
+ qdisc->parent_handle = parent_handle;
+ qdisc->handle = handle;
+ qdisc->num_children = children;
+
+ err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "Qdisc insertion into radix tree failed: %d\n", err);
+ goto err_free_child_tbl;
+ }
+
+ port->tc_offload_cnt++;
+ return qdisc;
+
+err_free_child_tbl:
+ kfree(qdisc->children);
+err_free_qdisc:
+ kfree(qdisc);
+ return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+ return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
+}
+
+static int
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children, struct nfp_qdisc **qdisc)
+{
+ *qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (*qdisc) {
+ if (WARN_ON((*qdisc)->type != type))
+ return -EINVAL;
+ return 1;
+ }
+
+ *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+ children);
+ return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
+{
+ struct nfp_qdisc *qdisc;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return;
+
+ /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+ if (alink->root_qdisc == qdisc)
+ qdisc->use_cnt--;
+
+ nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+ nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+ if (alink->root_qdisc == qdisc) {
+ alink->root_qdisc = NULL;
+ /* Only root change matters, other changes are acted upon on
+ * the graft notification.
+ */
+ nfp_abm_qdisc_offload_update(alink);
+ }
+}
+
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+ unsigned int id)
+{
+ struct nfp_qdisc *parent, *child;
+
+ parent = nfp_abm_qdisc_find(alink, handle);
+ if (!parent)
+ return 0;
+
+ if (WARN(id >= parent->num_children,
+ "graft child out of bound %d >= %d\n",
+ id, parent->num_children))
+ return -EINVAL;
+
+ nfp_abm_qdisc_unlink_children(parent, id, id + 1);
+
+ child = nfp_abm_qdisc_find(alink, child_handle);
+ if (child)
+ child->use_cnt++;
+ else
+ child = NFP_QDISC_UNTRACKED;
+ parent->children[id] = child;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+static void
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+ struct nfp_alink_stats *old,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_queue *qstats)
+{
+ _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
+ new->tx_pkts - old->tx_pkts);
+ qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ qstats->overlimits += new->overlimits - old->overlimits;
+ qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+ struct nfp_alink_xstats *old,
+ struct red_stats *stats)
+{
+ stats->forced_mark += new->ecn_marked - old->ecn_marked;
+ stats->pdrop += new->pdrop - old->pdrop;
+}
+
+static int
+nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_gred_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ if (!stats->xstats[i])
+ continue;
+
+ nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
+ &qdisc->red.band[i].prev_stats,
+ &stats->bstats[i], &stats->qstats[i]);
+ qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
+ &qdisc->red.band[i].prev_xstats,
+ stats->xstats[i]);
+ qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
+ }
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_gred_check_params(struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+ unsigned int i;
+
+ if (opt->set.grio_on || opt->set.wred_on) {
+ nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_def != alink->def_band) {
+ nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
+ alink->def_band, opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.dp_cnt != abm->num_bands) {
+ nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
+ abm->num_bands, opt->parent, opt->handle);
+ return false;
+ }
+
+ for (i = 0; i < abm->num_bands; i++) {
+ struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
+
+ if (!band->present)
+ return false;
+ if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->is_harddrop) {
+ nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min != band->max) {
+ nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
+ opt->parent, opt->handle, i);
+ return false;
+ }
+ if (band->min > S32_MAX) {
+ nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
+ band->min, S32_MAX, opt->parent, opt->handle,
+ i);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ unsigned int i;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
+ opt->handle, 0, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = opt->set.dp_cnt;
+ for (i = 0; i < qdisc->red.num_bands; i++) {
+ qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
+ qdisc->red.band[i].threshold = opt->set.tab[i].min;
+ }
+ }
+
+ if (qdisc->use_cnt)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_gred_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_GRED_REPLACE:
+ return nfp_abm_gred_replace(netdev, alink, opt);
+ case TC_GRED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_GRED_STATS:
+ return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (!qdisc || !qdisc->offloaded)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
+ &qdisc->red.band[0].prev_xstats,
+ opt->xstats);
+ qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
+ return 0;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
+ &qdisc->red.band[0].prev_stats,
+ stats->bstats, stats->qstats);
+ qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_abm *abm = alink->abm;
+
+ if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
+ nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
+ nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_harddrop) {
+ nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min != opt->set.max) {
+ nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+ nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+ opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+ opt->handle);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+ opt->handle, 1, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ /* If limit != 0 child gets reset */
+ if (opt->set.limit) {
+ if (nfp_abm_qdisc_child_valid(qdisc, 0))
+ qdisc->children[0]->use_cnt--;
+ qdisc->children[0] = NULL;
+ } else {
+ /* Qdisc was just allocated without a limit will use noop_qdisc,
+ * i.e. a block hole.
+ */
+ if (!ret)
+ qdisc->children[0] = NFP_QDISC_UNTRACKED;
+ }
+
+ qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+ if (qdisc->params_ok) {
+ qdisc->red.num_bands = 1;
+ qdisc->red.band[0].ecn = opt->set.is_ecn;
+ qdisc->red.band[0].threshold = opt->set.min;
+ }
+
+ if (qdisc->use_cnt == 1)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_RED_REPLACE:
+ return nfp_abm_red_replace(netdev, alink, opt);
+ case TC_RED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_RED_STATS:
+ return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
+ case TC_RED_XSTATS:
+ return nfp_abm_red_xstats(alink, opt);
+ case TC_RED_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->child_handle, 0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+ TC_H_ROOT, opt->handle, alink->total_queues,
+ &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = true;
+ qdisc->offloaded = true;
+ nfp_abm_qdisc_offload_update(alink);
+ return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc, *red;
+ unsigned int i, j;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_update(alink);
+
+ /* MQ stats are summed over the children in the core, so we need
+ * to add up the unreported child values.
+ */
+ memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+ memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+ for (i = 0; i < qdisc->num_children; i++) {
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+ continue;
+ red = qdisc->children[i];
+
+ for (j = 0; j < red->red.num_bands; j++) {
+ nfp_abm_stats_propagate(&qdisc->mq.stats,
+ &red->red.band[j].stats);
+ nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+ &red->red.band[j].prev_stats);
+ }
+ }
+
+ nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+ stats->bstats, stats->qstats);
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_MQ_CREATE:
+ return nfp_abm_mq_create(netdev, alink, opt);
+ case TC_MQ_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_MQ_STATS:
+ return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+ case TC_MQ_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->graft_params.child_handle,
+ opt->graft_params.queue);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt)
+{
+ if (opt->ingress)
+ return -EOPNOTSUPP;
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt--;
+ alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt++;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 97d33bb4d84d..e23ca90289f7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -2382,6 +2382,49 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0;
}
+static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
+{
+ /* Set signedness bit (MSB of result). */
+ emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0));
+ emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst),
+ SHF_SC_R_SHF, shift_amt);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+
+ return 0;
+}
+
+static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u64 umin, umax;
+ u8 dst, src;
+
+ dst = insn->dst_reg * 2;
+ umin = meta->umin_src;
+ umax = meta->umax_src;
+ if (umin == umax)
+ return __ashr_imm(nfp_prog, dst, umin);
+
+ src = insn->src_reg * 2;
+ /* NOTE: the first insn will set both indirect shift amount (source A)
+ * and signedness bit (MSB of result).
+ */
+ emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
+ emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
+ reg_b(dst), SHF_SC_R_SHF);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+
+ return 0;
+}
+
+static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+{
+ const struct bpf_insn *insn = &meta->insn;
+ u8 dst = insn->dst_reg * 2;
+
+ return __ashr_imm(nfp_prog, dst, insn->imm);
+}
+
static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
@@ -3009,26 +3052,19 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
+ u8 dst_gpr = insn->dst_reg * 2;
swreg tmp_reg;
- if (!imm) {
- meta->skip = true;
- return 0;
- }
-
- if (imm & ~0U) {
- tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
- emit_alu(nfp_prog, reg_none(),
- reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
- emit_br(nfp_prog, BR_BNE, insn->off, 0);
- }
-
- if (imm >> 32) {
- tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
+ tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
+ emit_alu(nfp_prog, imm_b(nfp_prog),
+ reg_a(dst_gpr), ALU_OP_AND, tmp_reg);
+ /* Upper word of the mask can only be 0 or ~0 from sign extension,
+ * so either ignore it or OR the whole thing in.
+ */
+ if (imm >> 32)
emit_alu(nfp_prog, reg_none(),
- reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
- emit_br(nfp_prog, BR_BNE, insn->off, 0);
- }
+ reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
+ emit_br(nfp_prog, BR_BNE, insn->off, 0);
return 0;
}
@@ -3286,6 +3322,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_ALU | BPF_DIV | BPF_K] = div_imm,
[BPF_ALU | BPF_NEG] = neg_reg,
[BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
+ [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg,
+ [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm,
[BPF_ALU | BPF_END | BPF_X] = end_reg32,
[BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
[BPF_LD | BPF_ABS | BPF_B] = data_ld1,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 6243af0ab025..dccae0319204 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app)
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
}
- bpf->bpf_dev = bpf_offload_dev_create();
+ bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err)
goto err_free_neutral_maps;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 7f591d71ab28..941277936475 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
-extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
+int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn_idx);
+int nfp_bpf_finalize(struct bpf_verifier_env *env);
+
+extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops;
struct netdev_bpf;
struct nfp_app;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index ba8ceedcf6a2..f0283854fade 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct nfp_bpf_neutral_map *record;
int err;
- /* Map record paths are entered via ndo, update side is protected. */
- ASSERT_RTNL();
-
/* Reuse path - other offloaded program is already tracking this map. */
record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
nfp_bpf_maps_neutral_params);
@@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
bool freed = false;
int i;
- ASSERT_RTNL();
-
for (i = 0; i < nfp_prog->map_records_cnt; i++) {
if (--nfp_prog->map_records[i]->count) {
nfp_prog->map_records[i] = NULL;
@@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
-static int
-nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
- struct netdev_bpf *bpf)
+static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct bpf_prog *prog = bpf->verifier.prog;
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
+ struct nfp_app *app = nn->app;
struct nfp_prog *nfp_prog;
int ret;
@@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
goto err_free;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
- bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0;
@@ -219,8 +212,9 @@ err_free:
return ret;
}
-static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
+static int nfp_bpf_translate(struct bpf_prog *prog)
{
+ struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_instr;
int err;
@@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
}
-static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
+static void nfp_bpf_destroy(struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kvfree(nfp_prog->prog);
nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
nfp_prog_free(nfp_prog);
-
- return 0;
}
/* Atomic engine requires values to be in big endian, we need to byte swap
@@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{
switch (bpf->command) {
- case BPF_OFFLOAD_VERIFIER_PREP:
- return nfp_bpf_verifier_prep(app, nn, bpf);
- case BPF_OFFLOAD_TRANSLATE:
- return nfp_bpf_translate(nn, bpf->offload.prog);
- case BPF_OFFLOAD_DESTROY:
- return nfp_bpf_destroy(nn, bpf->offload.prog);
case BPF_OFFLOAD_MAP_ALLOC:
return nfp_bpf_map_alloc(app->priv, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
@@ -489,14 +475,15 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int max_mtu, max_stack, max_prog_len;
+ unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (max_mtu < nn->dp.netdev->mtu) {
- NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
+ if (fw_mtu < pkt_off) {
+ NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}
@@ -600,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
return 0;
}
+
+const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
+ .insn_hook = nfp_verify_insn,
+ .finalize = nfp_bpf_finalize,
+ .prepare = nfp_bpf_verifier_prep,
+ .translate = nfp_bpf_translate,
+ .destroy = nfp_bpf_destroy,
+};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index 99f977bfd8cc..337bb862ec1d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return 0;
}
-static int
-nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
+int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
+ int prev_insn_idx)
{
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
@@ -745,7 +745,7 @@ continue_subprog:
goto continue_subprog;
}
-static int nfp_bpf_finalize(struct bpf_verifier_env *env)
+int nfp_bpf_finalize(struct bpf_verifier_env *env)
{
struct bpf_subprog_info *info;
struct nfp_prog *nfp_prog;
@@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
-
-const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
- .insn_hook = nfp_verify_insn,
- .finalize = nfp_bpf_finalize,
-};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 244dc261006e..8d54b36afee8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,7 +2,6 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
-#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
@@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
return act_size;
}
-static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
- enum nfp_flower_tun_type tun_type)
-{
- if (!out_dev->rtnl_link_ops)
- return false;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
- return tun_type == NFP_FL_TUNNEL_VXLAN;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
- return tun_type == NFP_FL_TUNNEL_GENEVE;
-
- return false;
-}
-
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
@@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
- /* Only offload if egress ports are on the same device as the
- * ingress port.
- */
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
- return -EOPNOTSUPP;
+ if (nfp_netdev_is_nfp_repr(in_dev)) {
+ /* Confirm ingress and egress are on same device. */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ }
+
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
@@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
return 0;
}
+struct ipv4_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
static int
nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
- struct nfp_fl_set_ip4_addrs *set_ip_addr)
+ struct nfp_fl_set_ip4_addrs *set_ip_addr,
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
{
+ struct ipv4_ttl_word *ttl_word_mask;
+ struct ipv4_ttl_word *ttl_word;
+ struct iphdr *tos_word_mask;
+ struct iphdr *tos_word;
__be32 exact, mask;
/* We are expecting tcf_pedit to return a big endian value */
@@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
set_ip_addr->ipv4_dst_mask |= mask;
set_ip_addr->ipv4_dst &= ~mask;
set_ip_addr->ipv4_dst |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
break;
case offsetof(struct iphdr, saddr):
set_ip_addr->ipv4_src_mask |= mask;
set_ip_addr->ipv4_src &= ~mask;
set_ip_addr->ipv4_src |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case offsetof(struct iphdr, ttl):
+ ttl_word_mask = (struct ipv4_ttl_word *)&mask;
+ ttl_word = (struct ipv4_ttl_word *)&exact;
+
+ if (ttl_word_mask->protocol || ttl_word_mask->check)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case round_down(offsetof(struct iphdr, tos), 4):
+ tos_word_mask = (struct iphdr *)&mask;
+ tos_word = (struct iphdr *)&exact;
+
+ if (tos_word_mask->version || tos_word_mask->ihl ||
+ tos_word_mask->tot_len)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
break;
default:
return -EOPNOTSUPP;
}
- set_ip_addr->reserved = cpu_to_be16(0);
- set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
- set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
-
return 0;
}
@@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
}
+struct ipv6_hop_limit_word {
+ __be16 payload_len;
+ u8 nexthdr;
+ u8 hop_limit;
+};
+
+static int
+nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+{
+ struct ipv6_hop_limit_word *fl_hl_mask;
+ struct ipv6_hop_limit_word *fl_hl;
+
+ switch (off) {
+ case offsetof(struct ipv6hdr, payload_len):
+ fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
+ fl_hl = (struct ipv6_hop_limit_word *)&exact;
+
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
+ fl_hl_mask->hop_limit;
+ break;
+ case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
+ if (mask & ~IPV6_FLOW_LABEL_MASK ||
+ exact & ~IPV6_FLOW_LABEL_MASK)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_label_mask |= mask;
+ ip_hl_fl->ipv6_label &= ~mask;
+ ip_hl_fl->ipv6_label |= exact & mask;
+ break;
+ }
+
+ ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
+ ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
static int
nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
- struct nfp_fl_set_ipv6_addr *ip_src)
+ struct nfp_fl_set_ipv6_addr *ip_src,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
{
__be32 exact, mask;
+ int err = 0;
u8 word;
/* We are expecting tcf_pedit to return a big endian value */
@@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
if (off < offsetof(struct ipv6hdr, saddr)) {
- return -EOPNOTSUPP;
+ err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
+ ip_hl_fl);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
}
- return 0;
+ return err;
}
static int
@@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
@@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
u32 offset, cmd;
u8 ip_proto = 0;
+ memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
+ memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
memset(&set_ip_addr, 0, sizeof(set_ip_addr));
@@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
err = nfp_fl_set_eth(action, idx, offset, &set_eth);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
+ &set_ip_ttl_tos);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
- &set_ip6_src);
+ &set_ip6_src, &set_ip6_tc_hl_fl);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
err = nfp_fl_set_tport(action, idx, offset, &set_tport,
@@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
}
+ if (set_ip_ttl_tos.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip_ttl_tos);
+ memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip_addr.head.len_lw) {
nfp_action += act_size;
act_size = sizeof(set_ip_addr);
@@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
+ if (set_ip6_tc_hl_fl.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
- struct nfp_repr *repr = netdev_priv(netdev);
- *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 29d673aa5277..15f41cfef9f1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/vxlan.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -65,8 +66,10 @@
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13
#define NFP_FL_ACTION_OPCODE_SET_UDP 14
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
@@ -82,6 +85,8 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
+
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
@@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs {
__be32 ipv4_dst;
};
+struct nfp_fl_set_ip4_ttl_tos {
+ struct nfp_fl_act_head head;
+ u8 ipv4_ttl_mask;
+ u8 ipv4_tos_mask;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be16 reserved;
+};
+
+struct nfp_fl_set_ipv6_tc_hl_fl {
+ struct nfp_fl_act_head head;
+ u8 ipv6_tc_mask;
+ u8 ipv6_hop_limit_mask;
+ __be16 reserved;
+ u8 ipv6_tc;
+ u8 ipv6_hop_limit;
+ __be32 ipv6_label_mask;
+ __be32 ipv6_label;
+};
+
struct nfp_fl_set_ipv6_addr {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
return skb->len - NFP_FLOWER_CMSG_HLEN;
}
+static inline bool
+nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (netif_is_vxlan(netdev))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_geneve(netdev))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
+ return false;
+}
+
+static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (netif_is_vxlan(netdev))
+ return true;
+ if (netif_is_geneve(netdev))
+ return true;
+
+ return false;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 81dcf5b318ba..5db838f45694 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
}
-static int
+static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
struct net_device *master)
{
struct nfp_fl_lag_group *group;
+ struct nfp_flower_priv *priv;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ if (!netif_is_bond_master(master))
+ return;
mutex_lock(&lag->lock);
group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
if (!group) {
mutex_unlock(&lag->lock);
- return -ENOENT;
+ nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
+ netdev_name(master));
+ return;
}
group->to_remove = true;
@@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
static int
@@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
return 0;
}
-static int
+static void
nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
struct netdev_notifier_changelowerstate_info *info)
{
@@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
unsigned long *flags;
if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
- return 0;
+ return;
lag_lower_info = info->lower_state_info;
if (!lag_lower_info)
- return 0;
+ return;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
repr = netdev_priv(netdev);
/* Verify that the repr is associated with this app. */
if (repr->app != priv->app)
- return 0;
+ return;
repr_priv = repr->app_priv;
flags = &repr_priv->lag_port_flags;
@@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
-static int
-nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
- void *ptr)
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct net_device *netdev;
- struct nfp_fl_lag *lag;
+ struct nfp_fl_lag *lag = &priv->nfp_lag;
int err;
- netdev = netdev_notifier_info_to_dev(ptr);
- lag = container_of(nb, struct nfp_fl_lag, lag_nb);
-
switch (event) {
case NETDEV_CHANGEUPPER:
err = nfp_fl_lag_changeupper_event(lag, ptr);
@@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
return NOTIFY_BAD;
return NOTIFY_OK;
case NETDEV_CHANGELOWERSTATE:
- err = nfp_fl_lag_changels_event(lag, netdev, ptr);
- if (err)
- return NOTIFY_BAD;
+ nfp_fl_lag_changels_event(lag, netdev, ptr);
return NOTIFY_OK;
case NETDEV_UNREGISTER:
- if (netif_is_bond_master(netdev)) {
- err = nfp_fl_lag_schedule_group_delete(lag, netdev);
- if (err)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- }
+ nfp_fl_lag_schedule_group_delete(lag, netdev);
+ return NOTIFY_OK;
}
return NOTIFY_DONE;
@@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag)
/* 0 is a reserved batch version so increment to first valid value. */
nfp_fl_increment_version(lag);
-
- lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
}
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 3a54728d2ea6..5059110a1768 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
}
-static int
-nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
-{
- return tc_setup_cb_egdev_register(netdev,
- nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
-}
-
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
kfree(repr->app_priv);
-
- tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
}
static void
@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+
return 0;
err_cleanup_metadata:
@@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app)
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
-
- err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
- if (err)
- return err;
}
return nfp_tunnel_config_start(app);
@@ -672,12 +659,27 @@ static int nfp_flower_start(struct nfp_app *app)
static void nfp_flower_stop(struct nfp_app *app)
{
+ nfp_tunnel_config_stop(app);
+}
+
+static int
+nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr)
+{
struct nfp_flower_priv *app_priv = app->priv;
+ int ret;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
- unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+ }
- nfp_tunnel_config_stop(app);
+ ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
+ return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
const struct nfp_app_type app_flower = {
@@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
- .repr_init = nfp_flower_repr_netdev_init,
.repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
@@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = {
.start = nfp_flower_start,
.stop = nfp_flower_stop,
+ .netdev_event = nfp_flower_netdev_event,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 90045bab95bf..b858bac47621 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
-#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
@@ -72,7 +71,6 @@ struct nfp_mtu_conf {
/**
* struct nfp_fl_lag - Flower APP priv data for link aggregation
- * @lag_nb: Notifier to track master/slave events
* @work: Work queue for writing configs to the HW
* @lock: Lock to protect lag_group_list
* @group_list: List of all master/slave groups offloaded
@@ -85,7 +83,6 @@ struct nfp_mtu_conf {
* retransmission
*/
struct nfp_fl_lag {
- struct notifier_block lag_nb;
struct delayed_work work;
struct mutex lock;
struct list_head group_list;
@@ -126,13 +123,13 @@ struct nfp_fl_lag {
* @nfp_neigh_off_lock: Lock for the neighbour address list
* @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
* @nfp_mac_off_count: Number of MACs in address list
- * @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
* @reify_replies: atomically stores the number of replies received
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
+ * @indr_block_cb_priv: List of priv data passed to indirect block cbs
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,12 +157,12 @@ struct nfp_flower_priv {
spinlock_t nfp_neigh_off_lock;
struct ida nfp_mac_off_ids;
int nfp_mac_off_count;
- struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
+ struct list_head indr_block_cb_priv;
};
/**
@@ -209,7 +206,6 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
- bool ingress_offload;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
@@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx);
+ struct net_device *netdev);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
@@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
int nfp_tunnel_config_start(struct nfp_app *app);
void nfp_tunnel_config_stop(struct nfp_app *app);
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
void nfp_tunnel_write_macs(struct nfp_app *app);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e54fb6034326..cdf75595f627 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
- if (tun_type)
+ if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
- else
+ } else {
+ if (!cmsg_port)
+ return -EOPNOTSUPP;
frame->in_port = cpu_to_be32(cmsg_port);
+ }
return 0;
}
@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
}
}
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
- struct nfp_repr *netdev_repr;
+ u32 cmsg_port = 0;
int err;
u8 *ext;
u8 *msk;
+ if (nfp_netdev_is_nfp_repr(netdev))
+ cmsg_port = nfp_repr_get_port_id(netdev);
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
+ cmsg_port, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
+ cmsg_port, true, tun_type);
if (err)
return err;
@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
- if (nfp_netdev_is_nfp_repr(netdev)) {
- netdev_repr = netdev_priv(netdev);
- nfp_tunnel_write_macs(netdev_repr->app);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
- }
+ nfp_tunnel_write_macs(app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, flow, false);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 48729bf171e0..573a4400a26c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -21,7 +21,6 @@ struct nfp_mask_id_table {
struct nfp_fl_flow_table_cmp_arg {
struct net_device *netdev;
unsigned long cookie;
- __be32 host_ctx;
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
@@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx)
+ struct net_device *netdev)
{
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
flower_cmp_arg.netdev = netdev;
flower_cmp_arg.cookie = tc_flower_cookie;
- flower_cmp_arg.host_ctx = host_ctx;
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
nfp_flower_table_params);
@@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+ nfp_flow->ingress_dev = netdev;
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
@@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
@@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
const struct nfp_fl_payload *flow_entry = obj;
- if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
- (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ if (flow_entry->ingress_dev == cmp_arg->netdev)
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
return 1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 29c95423ab64..2cdbf29ecbe7 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -56,11 +56,10 @@
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
static int
-nfp_flower_xmit_flow(struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow, u8 mtype)
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+ u8 mtype)
{
u32 meta_len, key_len, mask_len, act_len, tot_len;
- struct nfp_repr *priv = netdev_priv(netdev);
struct sk_buff *skb;
unsigned char *msg;
@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
+ skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
- nfp_ctrl_tx(priv->app->ctrl, skb);
+ nfp_ctrl_tx(app->ctrl, skb);
return 0;
}
@@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress,
enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
@@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
- if (!egress)
- return -EOPNOTSUPP;
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
default:
return -EOPNOTSUPP;
}
- } else if (egress) {
- /* Reject non tunnel matches offloaded to egress repr. */
- return -EOPNOTSUPP;
+
+ /* Ensure the ingress netdev matches the expected tun type. */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -345,13 +343,29 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
!(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
return -EOPNOTSUPP;
- /* We need to store TCP flags in the IPv4 key space, thus
- * we need to ensure we include a IPv4 key layer if we have
- * not done so already.
+ /* We need to store TCP flags in the either the IPv4 or IPv6 key
+ * space, thus we need to ensure we include a IPv4/IPv6 key
+ * layer if we have not done so already.
*/
- if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
- key_layer |= NFP_FLOWER_LAYER_IPV4;
- key_size += sizeof(struct nfp_flower_ipv4);
+ if (!key_basic)
+ return -EOPNOTSUPP;
+
+ if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+ !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+ switch (key_basic->n_proto) {
+ case cpu_to_be16(ETH_P_IP):
+ key_layer |= NFP_FLOWER_LAYER_IPV4;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ break;
+
+ case cpu_to_be16(ETH_P_IPV6):
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_size += sizeof(struct nfp_flower_ipv6);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
}
}
@@ -374,7 +388,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
@@ -398,7 +412,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -416,7 +429,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
- * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -424,46 +436,35 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
- if (flow_pay) {
- /* Ignore as duplicate if it has been added by different cb. */
- if (flow_pay->ingress_offload && egress)
- return 0;
- else
- return -EOPNOTSUPP;
- }
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
&tun_type);
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer, egress);
+ flow_pay = nfp_flower_allocate_new(key_layer);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
- flow_pay->ingress_dev = egress ? NULL : netdev;
-
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
- tun_type);
+ err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+ flow_pay, tun_type);
if (err)
goto err_destroy_flow;
@@ -471,13 +472,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay,
- flow_pay->ingress_dev);
- if (err)
- goto err_destroy_flow;
-
- err = nfp_flower_xmit_flow(netdev, flow_pay,
- NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
if (err)
goto err_destroy_flow;
@@ -485,15 +480,27 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
nfp_flower_table_params);
if (err)
- goto err_destroy_flow;
+ goto err_release_metadata;
+
+ err = nfp_flower_xmit_flow(app, flow_pay,
+ NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+ if (err)
+ goto err_remove_rhash;
- port->tc_offload_cnt++;
+ if (port)
+ port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
return 0;
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &flow_pay->fl_node,
+ nfp_flower_table_params));
+err_release_metadata:
+ nfp_modify_flow_metadata(app, flow_pay);
err_destroy_flow:
kfree(flow_pay->action_data);
kfree(flow_pay->mask_data);
@@ -509,7 +516,6 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -518,19 +524,19 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
- return egress ? 0 : -ENOENT;
+ return -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -539,13 +545,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
- err = nfp_flower_xmit_flow(netdev, nfp_flow,
+ err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
goto err_free_flow;
err_free_flow:
- port->tc_offload_cnt--;
+ if (port)
+ port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
@@ -561,7 +568,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -570,22 +576,16 @@ err_free_flow:
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
u32 ctx_id;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
return -EINVAL;
- if (nfp_flow->ingress_offload && egress)
- return 0;
-
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
@@ -602,35 +602,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower, bool egress)
+ struct tc_cls_flower_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower, egress);
+ return nfp_flower_add_offload(app, netdev, flower);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower, egress);
+ return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, netdev, flower, egress);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct nfp_repr *repr = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, true);
+ return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
}
@@ -647,7 +630,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, false);
+ type_data);
default:
return -EOPNOTSUPP;
}
@@ -686,3 +669,130 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+struct nfp_flower_indr_block_cb_priv {
+ struct net_device *netdev;
+ struct nfp_app *app;
+ struct list_head list;
+};
+
+static struct nfp_flower_indr_block_cb_priv *
+nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+ struct tc_cls_flower_offload *flower = type_data;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(priv->app, priv->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+ struct tc_block_offload *f)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->netdev = netdev;
+ cb_priv->app = app;
+ list_add(&cb_priv->list, &priv->indr_block_cb_priv);
+
+ err = tcf_block_cb_register(f->block,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv, cb_priv, f->extack);
+ if (err) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (!cb_priv)
+ return -ENOENT;
+
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_indr_block_cb,
+ cb_priv);
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ int err;
+
+ if (!nfp_fl_is_netdev_to_offload(netdev))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
+ err = __tc_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ app);
+ if (err)
+ nfp_flower_cmsg_warn(app,
+ "Indirect block reg failed - %s\n",
+ netdev->name);
+ } else if (event == NETDEV_UNREGISTER) {
+ __tc_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb, app);
+ }
+
+ return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8e5bec04d1f9..2d9f26a725c2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -4,7 +4,6 @@
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
-#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
@@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
}
-static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
-{
- if (!netdev->rtnl_link_ops)
- return false;
- if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
- return true;
- if (netif_is_vxlan(netdev))
- return true;
-
- return false;
-}
-
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_repr_get_port_id(netdev);
- else if (!nfp_tun_is_netdev_to_offload(netdev))
+ else if (!nfp_fl_is_netdev_to_offload(netdev))
return;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
mutex_unlock(&priv->nfp_mac_off_lock);
}
-static int nfp_tun_mac_event_handler(struct notifier_block *nb,
- unsigned long event, void *ptr)
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct nfp_flower_priv *app_priv;
- struct net_device *netdev;
- struct nfp_app *app;
-
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
/* If non-nfp netdev then free its offload index. */
- if (nfp_tun_is_netdev_to_offload(netdev))
+ if (nfp_fl_is_netdev_to_offload(netdev))
nfp_tun_del_mac_idx(app, netdev->ifindex);
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
event == NETDEV_REGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
nfp_tun_add_to_mac_offload_list(netdev, app);
/* Force a list write to keep NFP up to date. */
@@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
- struct net_device *netdev;
- int err;
/* Initialise priv data for MAC offloading. */
priv->nfp_mac_off_count = 0;
mutex_init(&priv->nfp_mac_off_lock);
INIT_LIST_HEAD(&priv->nfp_mac_off_list);
- priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
mutex_init(&priv->nfp_mac_index_lock);
INIT_LIST_HEAD(&priv->nfp_mac_index_list);
ida_init(&priv->nfp_mac_off_ids);
@@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
- err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
- if (err)
- goto err_free_mac_ida;
-
- err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
- if (err)
- goto err_unreg_mac_nb;
-
- /* Parse netdevs already registered for MACs that need offloaded. */
- rtnl_lock();
- for_each_netdev(&init_net, netdev)
- nfp_tun_add_to_mac_offload_list(netdev, app);
- rtnl_unlock();
-
- return 0;
-
-err_unreg_mac_nb:
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
-err_free_mac_ida:
- ida_destroy(&priv->nfp_mac_off_ids);
- return err;
+ return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
}
void nfp_tunnel_config_stop(struct nfp_app *app)
@@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage;
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
/* Free any memory that may be occupied by MAC list. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 68a0991aac22..3a973282b2bb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -131,11 +131,100 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *old;
old = nfp_reprs_get_locked(app, type);
+ rtnl_lock();
rcu_assign_pointer(app->reprs[type], reprs);
+ rtnl_unlock();
return old;
}
+static void
+nfp_app_netdev_feat_change(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_net *nn;
+ unsigned int type;
+
+ if (!nfp_netdev_is_nfp_net(netdev))
+ return;
+ nn = netdev_priv(netdev);
+ if (nn->app != app)
+ return;
+
+ for (type = 0; type < __NFP_REPR_TYPE_MAX; type++) {
+ struct nfp_reprs *reprs;
+ unsigned int i;
+
+ reprs = rtnl_dereference(app->reprs[type]);
+ if (!reprs)
+ continue;
+
+ for (i = 0; i < reprs->num_reprs; i++) {
+ struct net_device *repr;
+
+ repr = rtnl_dereference(reprs->reprs[i]);
+ if (!repr)
+ continue;
+
+ nfp_repr_transfer_features(repr, netdev);
+ }
+ }
+}
+
+static int
+nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ app = container_of(nb, struct nfp_app, netdev_nb);
+
+ /* Handle events common code is interested in */
+ switch (event) {
+ case NETDEV_FEAT_CHANGE:
+ nfp_app_netdev_feat_change(app, netdev);
+ break;
+ }
+
+ /* Call offload specific handlers */
+ if (app->type->netdev_event)
+ return app->type->netdev_event(app, netdev, event, ptr);
+ return NOTIFY_DONE;
+}
+
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+ int err;
+
+ app->ctrl = ctrl;
+
+ if (app->type->start) {
+ err = app->type->start(app);
+ if (err)
+ return err;
+ }
+
+ app->netdev_nb.notifier_call = nfp_app_netdev_event;
+ err = register_netdevice_notifier(&app->netdev_nb);
+ if (err)
+ goto err_app_stop;
+
+ return 0;
+
+err_app_stop:
+ if (app->type->stop)
+ app->type->stop(app);
+ return err;
+}
+
+void nfp_app_stop(struct nfp_app *app)
+{
+ unregister_netdevice_notifier(&app->netdev_nb);
+
+ if (app->type->stop)
+ app->type->stop(app);
+}
+
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4d6ecf99b1cc..d578d856a009 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm;
* @port_get_stats_strings: get strings for extra statistics
* @start: start application logic
* @stop: stop application logic
+ * @netdev_event: Netdevice notifier event
* @ctrl_msg_rx: control message handler
* @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
@@ -122,6 +123,9 @@ struct nfp_app_type {
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
+ int (*netdev_event)(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr);
+
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
unsigned int len);
@@ -151,6 +155,7 @@ struct nfp_app_type {
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
* @ctrl_mtu: MTU to set on the control vNIC (set in .init())
+ * @netdev_nb: Netdevice notifier block
* @priv: app-specific priv data
*/
struct nfp_app {
@@ -163,6 +168,9 @@ struct nfp_app {
const struct nfp_app_type *type;
unsigned int ctrl_mtu;
+
+ struct notifier_block netdev_nb;
+
void *priv;
};
@@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
return app->type->repr_change_mtu(app, netdev, new_mtu);
}
-static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
-{
- app->ctrl = ctrl;
- if (!app->type->start)
- return 0;
- return app->type->start(app);
-}
-
-static inline void nfp_app_stop(struct nfp_app *app)
-{
- if (!app->type->stop)
- return;
- app->type->stop(app);
-}
-
static inline const char *nfp_app_name(struct nfp_app *app)
{
if (!app)
@@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl);
+void nfp_app_stop(struct nfp_app *app);
/* Callbacks shared between apps */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 6f0c37d09256..be37c2d6151c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -158,6 +158,7 @@ struct nfp_net_tx_desc {
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
+ __le64 vals8[2];
};
};
@@ -543,6 +544,7 @@ struct nfp_net_dp {
* @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
+ * @reconfig_in_progress_update: Update FW is processing now (debug only)
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -611,6 +613,7 @@ struct nfp_net {
bool reconfig_timer_active;
bool reconfig_sync_present;
struct timer_list reconfig_timer;
+ u32 reconfig_in_progress_update;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -851,7 +854,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
@@ -868,6 +871,7 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6bddfcfdec34..e97636d2e6ee 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -101,6 +101,7 @@ static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
/* ensure update is written before pinging HW */
nn_pci_flush(nn);
nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+ nn->reconfig_in_progress_update = update;
}
/* Pass 0 as update to run posted reconfigs. */
@@ -123,10 +124,14 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
if (reg == 0)
return true;
if (reg & NFP_NET_CFG_UPDATE_ERR) {
- nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+ nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
+ reg, nn->reconfig_in_progress_update,
+ nn_readl(nn, NFP_NET_CFG_CTRL));
return true;
} else if (last_check) {
- nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+ nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
+ reg, nn->reconfig_in_progress_update,
+ nn_readl(nn, NFP_NET_CFG_CTRL));
return true;
}
@@ -279,7 +284,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
-static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
@@ -647,27 +652,29 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
* @txbuf: Pointer to driver soft TX descriptor
* @txd: Pointer to HW TX descriptor
* @skb: Pointer to SKB
+ * @md_bytes: Prepend length
*
* Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
* Return error on packet header greater than maximum supported LSO header size.
*/
static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_buf *txbuf,
- struct nfp_net_tx_desc *txd, struct sk_buff *skb)
+ struct nfp_net_tx_desc *txd, struct sk_buff *skb,
+ u32 md_bytes)
{
- u32 hdrlen;
+ u32 l3_offset, l4_offset, hdrlen;
u16 mss;
if (!skb_is_gso(skb))
return;
if (!skb->encapsulation) {
- txd->l3_offset = skb_network_offset(skb);
- txd->l4_offset = skb_transport_offset(skb);
+ l3_offset = skb_network_offset(skb);
+ l4_offset = skb_transport_offset(skb);
hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else {
- txd->l3_offset = skb_inner_network_offset(skb);
- txd->l4_offset = skb_inner_transport_offset(skb);
+ l3_offset = skb_inner_network_offset(skb);
+ l4_offset = skb_inner_transport_offset(skb);
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
}
@@ -676,7 +683,9 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec,
txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
- txd->lso_hdrlen = hdrlen;
+ txd->l3_offset = l3_offset - md_bytes;
+ txd->l4_offset = l4_offset - md_bytes;
+ txd->lso_hdrlen = hdrlen - md_bytes;
txd->mss = cpu_to_le16(mss);
txd->flags |= PCIE_DESC_TX_LSO;
@@ -786,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
- struct nfp_net_tx_desc *txd, txdg;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_buf *txbuf;
+ struct nfp_net_tx_desc *txd;
struct netdev_queue *nd_q;
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
@@ -801,13 +810,13 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
qidx = skb_get_queue_mapping(skb);
tx_ring = &dp->tx_rings[qidx];
r_vec = tx_ring->r_vec;
- nd_q = netdev_get_tx_queue(dp->netdev, qidx);
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n",
qidx, tx_ring->wr_p, tx_ring->rd_p);
+ nd_q = netdev_get_tx_queue(dp->netdev, qidx);
netif_tx_stop_queue(nd_q);
nfp_net_tx_xmit_more_flush(tx_ring);
u64_stats_update_begin(&r_vec->tx_sync);
@@ -851,7 +860,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
txd->lso_hdrlen = 0;
/* Do not reorder - tso may adjust pkt cnt, vlan may override fields */
- nfp_net_tx_tso(r_vec, txbuf, txd, skb);
+ nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes);
nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb);
if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
txd->flags |= PCIE_DESC_TX_VLAN;
@@ -860,8 +869,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
/* Gather DMA */
if (nr_frags > 0) {
+ __le64 second_half;
+
/* all descs must match except for in addr, length and eop */
- txdg = *txd;
+ second_half = txd->vals8[1];
for (f = 0; f < nr_frags; f++) {
frag = &skb_shinfo(skb)->frags[f];
@@ -878,11 +889,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
tx_ring->txbufs[wr_idx].fidx = f;
txd = &tx_ring->txds[wr_idx];
- *txd = txdg;
txd->dma_len = cpu_to_le16(fsize);
nfp_desc_set_dma_addr(txd, dma_addr);
- txd->offset_eop |=
- (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
+ txd->offset_eop = md_bytes |
+ ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0);
+ txd->vals8[1] = second_half;
}
u64_stats_update_begin(&r_vec->tx_sync);
@@ -890,16 +901,16 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&r_vec->tx_sync);
}
- netdev_tx_sent_queue(nd_q, txbuf->real_len);
-
skb_tx_timestamp(skb);
+ nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
+
tx_ring->wr_p += nr_frags + 1;
if (nfp_net_tx_ring_should_stop(tx_ring))
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (!skb->xmit_more || netif_xmit_stopped(nd_q))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -940,14 +951,10 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
- const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
u32 done_pkts = 0, done_bytes = 0;
- struct sk_buff *skb;
- int todo, nr_frags;
u32 qcp_rd_p;
- int fidx;
- int idx;
+ int todo;
if (tx_ring->wr_p == tx_ring->rd_p)
return;
@@ -961,26 +968,33 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p);
while (todo--) {
+ const struct skb_frag_struct *frag;
+ struct nfp_net_tx_buf *tx_buf;
+ struct sk_buff *skb;
+ int fidx, nr_frags;
+ int idx;
+
idx = D_IDX(tx_ring, tx_ring->rd_p++);
+ tx_buf = &tx_ring->txbufs[idx];
- skb = tx_ring->txbufs[idx].skb;
+ skb = tx_buf->skb;
if (!skb)
continue;
nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_ring->txbufs[idx].fidx;
+ fidx = tx_buf->fidx;
if (fidx == -1) {
/* unmap head */
- dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_single(dp->dev, tx_buf->dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
- done_pkts += tx_ring->txbufs[idx].pkt_cnt;
- done_bytes += tx_ring->txbufs[idx].real_len;
+ done_pkts += tx_buf->pkt_cnt;
+ done_bytes += tx_buf->real_len;
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr,
+ dma_unmap_page(dp->dev, tx_buf->dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
@@ -988,9 +1002,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget)
if (fidx == nr_frags - 1)
napi_consume_skb(skb, budget);
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].skb = NULL;
- tx_ring->txbufs[idx].fidx = -2;
+ tx_buf->dma_addr = 0;
+ tx_buf->skb = NULL;
+ tx_buf->fidx = -2;
}
tx_ring->qcp_rd_p = qcp_rd_p;
@@ -3275,7 +3289,10 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
hdrlen = skb_inner_transport_header(skb) - skb->data +
inner_tcp_hdrlen(skb);
- if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
+ /* Assume worst case scenario of having longest possible
+ * metadata prepend - 8B
+ */
+ if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
features &= ~NETIF_F_GSO_MASK;
}
@@ -3560,6 +3577,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
@@ -3570,11 +3588,12 @@ void nfp_net_info(struct nfp_net *nn)
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
+ int err;
if (needs_netdev) {
struct net_device *netdev;
@@ -3594,6 +3613,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
}
nn->dp.dev = &pdev->dev;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
@@ -3616,7 +3636,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ goto err_free_nn;
+
return nn;
+
+err_free_nn:
+ if (nn->dp.netdev)
+ free_netdev(nn->dp.netdev);
+ else
+ vfree(nn);
+ return ERR_PTR(err);
}
/**
@@ -3889,11 +3921,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
- &nn->tlv_caps);
- if (err)
- return err;
-
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
index f2aaef976c7d..6d5213b5bcb0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c
@@ -41,8 +41,8 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
data += 4;
if (length % NFP_NET_CFG_TLV_LENGTH_INC) {
- dev_err(dev, "TLV size not multiple of %u len:%u\n",
- NFP_NET_CFG_TLV_LENGTH_INC, length);
+ dev_err(dev, "TLV size not multiple of %u offset:%u len:%u\n",
+ NFP_NET_CFG_TLV_LENGTH_INC, offset, length);
return -EINVAL;
}
if (data + length > end) {
@@ -61,14 +61,14 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
if (!length)
return 0;
- dev_err(dev, "END TLV should be empty, has len:%d\n",
- length);
+ dev_err(dev, "END TLV should be empty, has offset:%u len:%d\n",
+ offset, length);
return -EINVAL;
case NFP_NET_CFG_TLV_TYPE_ME_FREQ:
if (length != 4) {
dev_err(dev,
- "ME FREQ TLV should be 4B, is %dB\n",
- length);
+ "ME FREQ TLV should be 4B, is %dB offset:%u\n",
+ length, offset);
return -EINVAL;
}
@@ -90,6 +90,15 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr),
offset, length);
break;
+ case NFP_NET_CFG_TLV_TYPE_REPR_CAP:
+ if (length < 4) {
+ dev_err(dev, "REPR CAP TLV short %dB < 4B offset:%u\n",
+ length, offset);
+ return -EINVAL;
+ }
+
+ caps->repr_cap = readl(data);
+ break;
default:
if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr))
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index d7c8518ac952..166d7f71442e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -397,6 +397,8 @@
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
+
/**
* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
@@ -464,6 +466,10 @@
* Variable, experimental IDs. IDs designated for internal development and
* experiments before a stable TLV ID has been allocated to a feature. Should
* never be present in production firmware.
+ *
+ * %NFP_NET_CFG_TLV_TYPE_REPR_CAP:
+ * Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which
+ * can be used on representors.
*/
#define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0
#define NFP_NET_CFG_TLV_TYPE_RESERVED 1
@@ -472,6 +478,7 @@
#define NFP_NET_CFG_TLV_TYPE_MBOX 4
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5
#define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6
+#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7
struct device;
@@ -480,11 +487,13 @@ struct device;
* @me_freq_mhz: ME clock_freq (MHz)
* @mbox_off: vNIC mailbox area offset
* @mbox_len: vNIC mailbox area length
+ * @repr_cap: capabilities for representors
*/
struct nfp_net_tlv_caps {
u32 me_freq_mhz;
unsigned int mbox_off;
unsigned int mbox_len;
+ u32 repr_cap;
};
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 69b1c9b62e3d..ab7f2498e1c4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -8,7 +8,7 @@
static struct dentry *nfp_dir;
-static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
+static int nfp_rx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_rx_ring *rx_ring;
@@ -65,31 +65,12 @@ out:
rtnl_unlock();
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(nfp_rx_q);
-static int nfp_net_debugfs_rx_q_open(struct inode *inode, struct file *f)
-{
- return single_open(f, nfp_net_debugfs_rx_q_read, inode->i_private);
-}
+static int nfp_tx_q_show(struct seq_file *file, void *data);
+DEFINE_SHOW_ATTRIBUTE(nfp_tx_q);
-static const struct file_operations nfp_rx_q_fops = {
- .owner = THIS_MODULE,
- .open = nfp_net_debugfs_rx_q_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
-
-static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f);
-
-static const struct file_operations nfp_tx_q_fops = {
- .owner = THIS_MODULE,
- .open = nfp_net_debugfs_tx_q_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
-
-static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
+static int nfp_tx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
@@ -158,18 +139,11 @@ out:
return 0;
}
-static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f)
+static int nfp_xdp_q_show(struct seq_file *file, void *data)
{
- return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private);
+ return nfp_tx_q_show(file, data);
}
-
-static const struct file_operations nfp_xdp_q_fops = {
- .owner = THIS_MODULE,
- .open = nfp_net_debugfs_tx_q_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
+DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q);
void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 1e7d20468a34..08f5fdbd8e41 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
+ nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
- nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index c09b893c30dd..69d7aebda09b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -11,6 +11,7 @@
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
+#include "nfp_net.h"
#include "nfp_net_ctrl.h"
#include "nfp_net_repr.h"
#include "nfp_net_sriov.h"
@@ -231,6 +232,27 @@ err_port_disable:
return err;
}
+static netdev_features_t
+nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+ netdev_features_t old_features = features;
+ netdev_features_t lower_features;
+ struct net_device *lower_dev;
+
+ lower_dev = repr->dst->u.port_info.lower_dev;
+
+ lower_features = lower_dev->features;
+ if (lower_features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ lower_features |= NETIF_F_HW_CSUM;
+
+ features = netdev_intersect_features(features, lower_features);
+ features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
+ features |= NETIF_F_LLTX;
+
+ return features;
+}
+
const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_init = nfp_app_ndo_init,
.ndo_uninit = nfp_app_ndo_uninit,
@@ -248,10 +270,25 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
+ .ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
};
+void
+nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
+{
+ struct nfp_repr *repr = netdev_priv(netdev);
+
+ if (repr->dst->u.port_info.lower_dev != lower)
+ return;
+
+ netdev->gso_max_size = lower->gso_max_size;
+ netdev->gso_max_segs = lower->gso_max_segs;
+
+ netdev_update_features(netdev);
+}
+
static void nfp_repr_clean(struct nfp_repr *repr)
{
unregister_netdev(repr->netdev);
@@ -281,6 +318,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
struct net_device *pf_netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_net *nn = netdev_priv(pf_netdev);
+ u32 repr_cap = nn->tlv_caps.repr_cap;
int err;
nfp_repr_set_lockdep_class(netdev);
@@ -299,6 +338,55 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
+ /* Set features the lower device can support with representors */
+ if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
+ netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+ netdev->hw_features = NETIF_F_HIGHDMA;
+ if (repr_cap & NFP_NET_CFG_CTRL_RXCSUM_ANY)
+ netdev->hw_features |= NETIF_F_RXCSUM;
+ if (repr_cap & NFP_NET_CFG_CTRL_TXCSUM)
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (repr_cap & NFP_NET_CFG_CTRL_GATHER)
+ netdev->hw_features |= NETIF_F_SG;
+ if ((repr_cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
+ repr_cap & NFP_NET_CFG_CTRL_LSO2)
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (repr_cap & NFP_NET_CFG_CTRL_RSS_ANY)
+ netdev->hw_features |= NETIF_F_RXHASH;
+ if (repr_cap & NFP_NET_CFG_CTRL_VXLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_LSO)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+ if (repr_cap & NFP_NET_CFG_CTRL_NVGRE) {
+ if (repr_cap & NFP_NET_CFG_CTRL_LSO)
+ netdev->hw_features |= NETIF_F_GSO_GRE;
+ }
+ if (repr_cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
+ netdev->hw_enc_features = netdev->hw_features;
+
+ netdev->vlan_features = netdev->hw_features;
+
+ if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
+ if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
+ netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
+ else
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ }
+ if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+
+ /* Advertise but disable TSO by default. */
+ netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
+
+ netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->features |= NETIF_F_LLTX;
+
if (nfp_app_has_tc(app)) {
netdev->features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_HW_TC;
@@ -442,7 +530,9 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app)
continue;
nfp_app_repr_preclean(app, netdev);
+ rtnl_lock();
rcu_assign_pointer(reprs->reprs[i], NULL);
+ rtnl_unlock();
synchronize_rcu();
nfp_repr_clean(repr);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index c412b94bfb97..e0f13dfe1f39 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -92,6 +92,8 @@ nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set,
unsigned int id);
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+void
+nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index d2c1e9ea5668..1145849ca7ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->dp.ctrl_bar = ctrl_bar;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 052b3d2c07a1..c662c6f5bee3 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -912,7 +912,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static void __init get_mac_address(struct net_device *dev)
+static void get_mac_address(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 25382f8fbb70..89d17399fb5a 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -280,7 +280,7 @@
#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
/*
- * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared
+ * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared
* register definitions
*/
#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
@@ -291,7 +291,7 @@
#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
/*
- * rxfliterctrl register definitions
+ * rxfilterctrl register definitions
*/
#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
@@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev)
phy_set_max_speed(phydev, SPEED_100);
- phydev->advertising = phydev->supported;
-
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 0ea141ece19e..6547a9dd5935 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1125,7 +1125,8 @@ netxen_validate_firmware(struct netxen_adapter *adapter)
return -EINVAL;
}
val = nx_get_bios_version(adapter);
- netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
+ if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
+ return -EIO;
if ((__force u32)val != bios) {
dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
fw_name[fw_type]);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d9a03aba0e02..24a90163775e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -296,6 +296,12 @@ enum qed_wol_support {
QED_WOL_SUPPORT_PME,
};
+enum qed_db_rec_exec {
+ DB_REC_DRY_RUN,
+ DB_REC_REAL_DEAL,
+ DB_REC_ONCE,
+};
+
struct qed_hw_info {
/* PCI personality */
enum qed_pci_personality personality;
@@ -425,6 +431,14 @@ struct qed_qm_info {
u8 num_pf_rls;
};
+struct qed_db_recovery_info {
+ struct list_head list;
+
+ /* Lock to protect the doorbell recovery mechanism list */
+ spinlock_t lock;
+ u32 db_recovery_counter;
+};
+
struct storm_stats {
u32 address;
u32 len;
@@ -522,6 +536,7 @@ struct qed_simd_fp_handler {
enum qed_slowpath_wq_flag {
QED_SLOWPATH_MFW_TLV_REQ,
+ QED_SLOWPATH_PERIODIC_DB_REC,
};
struct qed_hwfn {
@@ -640,6 +655,9 @@ struct qed_hwfn {
/* L2-related */
struct qed_l2_info *p_l2_info;
+ /* Mechanism for recovering from doorbell drop */
+ struct qed_db_recovery_info db_recovery_info;
+
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
@@ -652,11 +670,12 @@ struct qed_hwfn {
struct delayed_work iov_task;
unsigned long iov_task_flags;
#endif
-
- struct z_stream_s *stream;
+ struct z_stream_s *stream;
+ bool slowpath_wq_active;
struct workqueue_struct *slowpath_wq;
struct delayed_work slowpath_task;
unsigned long slowpath_task_flags;
+ u32 periodic_db_rec_count;
};
struct pci_params {
@@ -897,6 +916,12 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+/* doorbell recovery mechanism */
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
+ enum qed_db_rec_exec db_exec);
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
@@ -931,4 +956,6 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
union qed_mfw_tlv_data *tlv_data);
void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
+
+void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 78a638ec7c0a..979f1e4bc18b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6071,7 +6071,7 @@ static const char * const s_igu_fifo_error_strs[] = {
"no error",
"length error",
"function disabled",
- "VF sent command to attnetion address",
+ "VF sent command to attention address",
"host sent prod update command",
"read of during interrupt register while in MIMD mode",
"access to PXP BAR reserved address",
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 88a8576ca9ce..8f6551421945 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -66,6 +66,318 @@
static DEFINE_SPINLOCK(qm_lock);
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct qed_db_recovery_entry {
+ struct list_head list_entry;
+ void __iomem *db_addr;
+ void *db_data;
+ enum qed_db_rec_width db_width;
+ enum qed_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* Display a single doorbell recovery entry */
+static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
+ struct qed_db_recovery_entry *db_entry,
+ char *action)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action,
+ db_entry,
+ db_entry->db_addr,
+ db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* Doorbell address sanity (address within doorbell bar range) */
+static bool qed_db_rec_sanity(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data)
+{
+ /* Make sure doorbell address is within the doorbell bar */
+ if (db_addr < cdev->doorbells ||
+ (u8 __iomem *)db_addr >
+ (u8 __iomem *)cdev->doorbells + cdev->db_size) {
+ WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr,
+ cdev->doorbells,
+ (u8 __iomem *)cdev->doorbells + cdev->db_size);
+ return false;
+ }
+
+ /* ake sure doorbell data pointer is not null */
+ if (!db_data) {
+ WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* Find hwfn according to the doorbell address */
+static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev,
+ void __iomem *db_addr)
+{
+ struct qed_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
+ if (cdev->num_hwfns > 1)
+ p_hwfn = db_addr < cdev->hwfns[1].doorbells ?
+ &cdev->hwfns[0] : &cdev->hwfns[1];
+ else
+ p_hwfn = QED_LEADING_HWFN(cdev);
+
+ return p_hwfn;
+}
+
+/* Add a new entry to the doorbell recovery mechanism */
+int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ struct qed_db_recovery_entry *db_entry;
+ struct qed_hwfn *p_hwfn;
+
+ /* Shortcircuit VFs, for now */
+ if (IS_VF(cdev)) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return 0;
+ }
+
+ /* Sanitize doorbell address */
+ if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ return -EINVAL;
+
+ /* Obtain hwfn from doorbell address */
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
+
+ /* Create entry */
+ db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL);
+ if (!db_entry) {
+ DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n");
+ return -ENOMEM;
+ }
+
+ /* Populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* Display */
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list);
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+
+ return 0;
+}
+
+/* Remove an entry from the doorbell recovery mechanism */
+int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+ struct qed_hwfn *p_hwfn;
+ int rc = -EINVAL;
+
+ /* Shortcircuit VFs, for now */
+ if (IS_VF(cdev)) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return 0;
+ }
+
+ /* Sanitize doorbell address */
+ if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ return -EINVAL;
+
+ /* Obtain hwfn from doorbell address */
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ /* search according to db_data addr since db_addr is not unique (roce) */
+ if (db_entry->db_data == db_data) {
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
+ list_del(&db_entry->list_entry);
+ rc = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == -EINVAL)
+
+ DP_NOTICE(p_hwfn,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ kfree(db_entry);
+
+ return rc;
+}
+
+/* Initialize the doorbell recovery mechanism */
+static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n");
+
+ /* Make sure db_size was set in cdev */
+ if (!p_hwfn->cdev->db_size) {
+ DP_ERR(p_hwfn->cdev, "db_size not set\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list);
+ spin_lock_init(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return 0;
+}
+
+/* Destroy the doorbell recovery mechanism */
+static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n");
+ if (!list_empty(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!list_empty(&p_hwfn->db_recovery_info.list)) {
+ db_entry =
+ list_first_entry(&p_hwfn->db_recovery_info.list,
+ struct qed_db_recovery_entry,
+ list_entry);
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ list_del(&db_entry->list_entry);
+ kfree(db_entry);
+ }
+ }
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* Print the content of the doorbell recovery mechanism */
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_NOTICE(p_hwfn,
+ "Displaying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+}
+
+/* Ring the doorbell of a single doorbell recovery entry */
+static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
+ struct qed_db_recovery_entry *db_entry,
+ enum qed_db_rec_exec db_exec)
+{
+ if (db_exec != DB_REC_ONCE) {
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "%s doorbell address %p data %x\n",
+ db_exec == DB_REC_DRY_RUN ?
+ "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "%s doorbell address %p data %llx\n",
+ db_exec == DB_REC_DRY_RUN ?
+ "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+ }
+
+ /* Sanity */
+ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
+ db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ wmb();
+
+ /* Ring the doorbell */
+ if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ wmb();
+}
+
+/* Traverse the doorbell recovery entry list and ring all the doorbells */
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
+ enum qed_db_rec_exec db_exec)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ if (db_exec != DB_REC_ONCE) {
+ DP_NOTICE(p_hwfn,
+ "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* Track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+ }
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
+ if (db_exec == DB_REC_ONCE)
+ break;
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+}
+
+/******************** Doorbell Recovery end ****************/
+
#define QED_MIN_DPIS (4)
#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
@@ -194,6 +506,9 @@ void qed_resc_free(struct qed_dev *cdev)
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
+
+ /* Destroy doorbell recovery mechanism */
+ qed_db_recovery_teardown(p_hwfn);
}
}
@@ -969,6 +1284,11 @@ int qed_resc_alloc(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u32 n_eqes, num_cons;
+ /* Initialize the doorbell recovery mechanism */
+ rc = qed_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* First allocate the context manager structure */
rc = qed_cxt_mngr_alloc(p_hwfn);
if (rc)
@@ -1468,6 +1788,14 @@ enum QED_ROCE_EDPM_MODE {
QED_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static int
qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
@@ -1537,13 +1865,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->wid_count = (u16) n_cpus;
DP_INFO(p_hwfn,
- "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
norm_regsize,
pwm_regsize,
p_hwfn->dpi_size,
p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
- "disabled" : "enabled");
+ (!qed_edpm_enabled(p_hwfn)) ?
+ "disabled" : "enabled", PAGE_SIZE);
if (rc) {
DP_ERR(p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index defdda1ffaa2..acccd85170aa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -472,6 +472,34 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_width - doorbell is 32b pr 64b
+ * @param db_space - doorbell recovery addresses are user or kernel space
+ */
+int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
+
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 5c221ebaa7b3..b13cfb449d8f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12655,6 +12655,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
@@ -12814,6 +12815,11 @@ struct public_drv_mb {
union drv_union_data union_data;
};
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LINK_CHANGE,
MFW_DRV_MSG_FLR_FW_ACK_FAILED,
@@ -12831,8 +12837,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_BW_UPDATE11,
- MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_RESERVED,
MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b22f464ea3fa..92340919d852 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -361,29 +361,147 @@ static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn)
return 0;
}
-#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
-#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
-#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
+#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+#define QED_DB_REC_COUNT 1000
+#define QED_DB_REC_INTERVAL 100
+
+static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 count = QED_DB_REC_COUNT;
+ u32 usage = 1;
+
+ /* wait for usage to zero or count to run out. This is necessary since
+ * EDPM doorbell transactions can take multiple 64b cycles, and as such
+ * can "split" over the pci. Possibly, the doorbell drop can happen with
+ * half an EDPM in the queue and other half dropped. Another EDPM
+ * doorbell to the same address (from doorbell recovery mechanism or
+ * from the doorbelling entity) could have first half dropped and second
+ * half interpreted as continuation of the first. To prevent such
+ * malformed doorbells from reaching the device, flush the queue before
+ * releasing the overflow sticky indication.
+ */
+ while (count-- && usage) {
+ usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ udelay(QED_DB_REC_INTERVAL);
+ }
+
+ /* should have been depleted by now */
+ if (usage) {
+ DP_NOTICE(p_hwfn->cdev,
+ "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
+ QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 overflow;
+ int rc;
+
+ overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
+ if (!overflow) {
+ qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ return 0;
+ }
+
+ if (qed_edpm_enabled(p_hwfn)) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ /* Flush any pending (e)dpm as they may never arrive */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ /* Release overflow sticky indication (stop silently dropping everything) */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+
+ /* Repeat all last doorbells (doorbell drop recovery) */
+ qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+
+ return 0;
+}
+
static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
{
- u32 reason;
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ int rc;
- reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
- QED_DORQ_ATTENTION_REASON_MASK;
- if (reason) {
- u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS);
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
- DP_INFO(p_hwfn->cdev,
- "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n",
- qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS_ADDRESS),
- (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK),
- GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
- reason);
+ /* int_sts may be zero since all PFs were interrupted for doorbell
+ * overflow but another one already handled it. Can abort here. If
+ * This PF also requires overflow recovery we will be interrupted again.
+ * The masked almost full indication may also be set. Ignoring.
+ */
+ if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
+ return 0;
+
+ /* check if db_drop or overflow happened */
+ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
+ /* Obtain data about db drop/overflow */
+ first_drop_reason = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_REASON) &
+ QED_DORQ_ATTENTION_REASON_MASK;
+ details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
+ address = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS);
+ all_drops_reason = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_REASON);
+
+ /* Log info */
+ DP_NOTICE(p_hwfn->cdev,
+ "Doorbell drop occurred\n"
+ "Address\t\t0x%08x\t(second BAR address)\n"
+ "FID\t\t0x%04x\t\t(Opaque FID)\n"
+ "Size\t\t0x%04x\t\t(in bytes)\n"
+ "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
+ address,
+ GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
+ GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+ first_drop_reason, all_drops_reason);
+
+ rc = qed_db_rec_handler(p_hwfn, p_ptt);
+ qed_periodic_db_rec_start(p_hwfn);
+ if (rc)
+ return rc;
+
+ /* Clear the doorbell drop details and prepare for next drop */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
+
+ /* Mark interrupt as handled (note: even if drop was due to a different
+ * reason than overflow we mark as handled)
+ */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_INT_STS_WR,
+ DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+
+ /* If there are no indications other than drop indications, success */
+ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
+ DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
+ return 0;
}
+ /* Some other indication was present - non recoverable */
+ DP_INFO(p_hwfn, "DORQ fatal attention\n");
+
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 54b4ee0acfd7..d81a62ebd524 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -190,6 +190,16 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
*/
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
+/**
+ * @brief - Doorbell Recovery handler.
+ * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
+ * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
#define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index aa633381aa47..90afd514ffe1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1085,7 +1085,14 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
- return qed_spq_post(p_hwfn, p_ent, NULL);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
+ &p_tx->db_msg, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
}
static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
@@ -1119,9 +1126,11 @@ static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
@@ -1542,6 +1551,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(p_ll2_conn->cid,
DQ_DEMS_LEGACY);
+ /* prepare db data */
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
@@ -1780,7 +1796,6 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct qed_ll2_tx_packet *p_pkt = NULL;
- struct core_db_data db_msg = { 0, 0, 0 };
u16 bd_prod;
/* If there are missing BDs, don't do anything now */
@@ -1809,24 +1824,19 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
}
- SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
- SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_CORE_TX_BD_PROD_CMD);
- db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
- db_msg.spq_prod = cpu_to_le16(bd_prod);
+ p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
/* Make sure the BDs data is updated before ringing the doorbell */
wmb();
- DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg));
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
DP_VERBOSE(p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
p_ll2_conn->cid,
- p_ll2_conn->input.conn_type, db_msg.spq_prod);
+ p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
}
int qed_ll2_prepare_tx_packet(void *cxt,
@@ -2496,6 +2506,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
DP_NOTICE(cdev,
"Unable to map frag - dropping packet\n");
+ rc = -ENOMEM;
goto err;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 1a5c1ae01474..5f01fbd3c073 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -103,6 +103,7 @@ struct qed_ll2_tx_queue {
struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
void __iomem *doorbell_addr;
+ struct core_db_data db_msg;
u16 bds_idx;
u16 cur_send_frag_num;
u16 cur_completing_frag_num;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index fff7f04d4525..6adf5bda9811 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -966,9 +966,47 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
+#define QED_PERIODIC_DB_REC_COUNT 100
+#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
+#define QED_PERIODIC_DB_REC_INTERVAL \
+ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
+#define QED_PERIODIC_DB_REC_WAIT_COUNT 10
+#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
+ (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
+
+static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
+ enum qed_slowpath_wq_flag wq_flag,
+ unsigned long delay)
+{
+ if (!hwfn->slowpath_wq_active)
+ return -EINVAL;
+
+ /* Memory barrier for setting atomic bit */
+ smp_mb__before_atomic();
+ set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
+
+ return 0;
+}
+
+void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
+{
+ /* Reset periodic Doorbell Recovery counter */
+ p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
+
+ /* Don't schedule periodic Doorbell Recovery if already scheduled */
+ if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &p_hwfn->slowpath_task_flags))
+ return;
+
+ qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
+ QED_PERIODIC_DB_REC_INTERVAL);
+}
+
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
- int i;
+ int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
if (IS_VF(cdev))
return;
@@ -977,6 +1015,15 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
if (!cdev->hwfns[i].slowpath_wq)
continue;
+ /* Stop queuing new delayed works */
+ cdev->hwfns[i].slowpath_wq_active = false;
+
+ /* Wait until the last periodic doorbell recovery is executed */
+ while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &cdev->hwfns[i].slowpath_task_flags) &&
+ sleep_count--)
+ msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
+
flush_workqueue(cdev->hwfns[i].slowpath_wq);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
@@ -989,7 +1036,10 @@ static void qed_slowpath_task(struct work_struct *work)
struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
if (!ptt) {
- queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+ if (hwfn->slowpath_wq_active)
+ queue_delayed_work(hwfn->slowpath_wq,
+ &hwfn->slowpath_task, 0);
+
return;
}
@@ -997,6 +1047,15 @@ static void qed_slowpath_task(struct work_struct *work)
&hwfn->slowpath_task_flags))
qed_mfw_process_tlv_req(hwfn, ptt);
+ if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &hwfn->slowpath_task_flags)) {
+ qed_db_rec_handler(hwfn, ptt);
+ if (hwfn->periodic_db_rec_count--)
+ qed_slowpath_delayed_work(hwfn,
+ QED_SLOWPATH_PERIODIC_DB_REC,
+ QED_PERIODIC_DB_REC_INTERVAL);
+ }
+
qed_ptt_release(hwfn, ptt);
}
@@ -1023,6 +1082,7 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
}
INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+ hwfn->slowpath_wq_active = true;
}
return 0;
@@ -1939,21 +1999,30 @@ exit:
* 0B | 0x3 [command index] |
* 4B | b'0: check_response? | b'1-31 reserved |
* 8B | File-type | reserved |
+ * 12B | Image length in bytes |
* \----------------------------------------------------------------------/
* Start a new file of the provided type
*/
static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
const u8 **data, bool *check_resp)
{
+ u32 file_type, file_size = 0;
int rc;
*data += 4;
*check_resp = !!(**data & BIT(0));
*data += 4;
+ file_type = **data;
DP_VERBOSE(cdev, NETIF_MSG_DRV,
- "About to start a new file of type %02x\n", **data);
- rc = qed_mcp_nvm_put_file_begin(cdev, **data);
+ "About to start a new file of type %02x\n", file_type);
+ if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
+ *data += 4;
+ file_size = *((u32 *)(*data));
+ }
+
+ rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
+ (u8 *)(&file_size), 4);
*data += 4;
return rc;
@@ -2315,6 +2384,8 @@ const struct qed_common_ops qed_common_ops_pass = {
.update_mac = &qed_update_mac,
.update_mtu = &qed_update_mtu,
.update_wol = &qed_update_wol,
+ .db_recovery_add = &qed_db_recovery_add,
+ .db_recovery_del = &qed_db_recovery_del,
.read_module_eeprom = &qed_read_module_eeprom,
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index a96364df4320..e7f18e34ff0d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1619,7 +1619,7 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_sp_pf_update_stag(p_hwfn);
}
- DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
/* Acknowledge the MFW */
@@ -1641,7 +1641,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
OEM_CFG_CHANNEL_TYPE_OFFSET;
if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
- DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Incorrect UFP Channel type %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
if (val == OEM_CFG_SCHED_TYPE_ETS) {
@@ -1650,7 +1652,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
} else {
p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown UFP scheduling mode %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
@@ -1665,13 +1669,15 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
} else {
p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
- DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val);
+ DP_NOTICE(p_hwfn,
+ "Unknown Host priority control %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
}
DP_NOTICE(p_hwfn,
- "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
- p_hwfn->ufp_info.mode,
- p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type);
+ "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
}
static int
@@ -2739,24 +2745,6 @@ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
return 0;
}
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr)
-{
- struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
- struct qed_ptt *p_ptt;
- u32 resp, param;
- int rc;
-
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
- rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
- &resp, &param);
- cdev->mcp_nvm_resp = resp;
- qed_ptt_release(p_hwfn, p_ptt);
-
- return rc;
-}
-
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len)
{
@@ -2770,6 +2758,9 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
return -EBUSY;
switch (cmd) {
+ case QED_PUT_FILE_BEGIN:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ break;
case QED_PUT_FILE_DATA:
nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
break;
@@ -2782,10 +2773,14 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
goto out;
}
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
while (buf_idx < len) {
- buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
- nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
- addr) + buf_idx;
+ if (cmd == QED_PUT_FILE_BEGIN)
+ nvm_offset = addr;
+ else
+ nvm_offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
+ buf_idx;
rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
&resp, &param, buf_size,
(u32 *)&p_buf[buf_idx]);
@@ -2810,7 +2805,19 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
usleep_range(1000, 2000);
- buf_idx += buf_size;
+ /* For MBI upgrade, MFW response includes the next buffer offset
+ * to be delivered to MFW.
+ */
+ if (param && cmd == QED_PUT_FILE_DATA) {
+ buf_idx = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size = QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ } else {
+ buf_idx += buf_size;
+ buf_size = min_t(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ }
}
cdev->mcp_nvm_resp = resp;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 1adfe52b3905..eddf67798d6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -543,16 +543,6 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Put file begin
- *
- * @param cdev
- * @param addr - nvm offset
- *
- * @return int - 0 - operation was successful.
- */
-int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr);
-
-/**
* @brief Check latest response
*
* @param cdev
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 2440970882c4..8939ed6e08b7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -1243,6 +1243,56 @@
0x1701534UL
#define TSEM_REG_DBG_FORCE_FRAME \
0x1701538UL
+#define DORQ_REG_PF_USAGE_CNT \
+ 0x1009c0UL
+#define DORQ_REG_PF_OVFL_STICKY \
+ 0x1009d0UL
+#define DORQ_REG_DPM_FORCE_ABORT \
+ 0x1009d8UL
+#define DORQ_REG_INT_STS \
+ 0x100180UL
+#define DORQ_REG_INT_STS_ADDRESS_ERROR \
+ (0x1UL << 0)
+#define DORQ_REG_INT_STS_WR \
+ 0x100188UL
+#define DORQ_REG_DB_DROP_DETAILS_REL \
+ 0x100a28UL
+#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \
+ 0
+#define DORQ_REG_INT_STS_DB_DROP \
+ (0x1UL << 1)
+#define DORQ_REG_INT_STS_DB_DROP_SHIFT \
+ 1
+#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \
+ (0x1UL << 2)
+#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \
+ 2
+#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\
+ (0x1UL << 3)
+#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \
+ 3
+#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \
+ (0x1UL << 4)
+#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \
+ 4
+#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \
+ (0x1UL << 5)
+#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \
+ 5
+#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \
+ (0x1UL << 6)
+#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \
+ 6
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \
+ (0x1UL << 7)
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \
+ 7
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \
+ (0x1UL << 8)
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \
+ 8
+#define DORQ_REG_DB_DROP_DETAILS_REASON \
+ 0x100a20UL
#define MSEM_REG_DBG_SELECT \
0x1801528UL
#define MSEM_REG_DBG_DWORD_ENABLE \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3157c0d99441..4179c9013fc6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -227,7 +227,9 @@ struct qed_spq {
u32 comp_count;
u32 cid;
- qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
+ u32 db_addr_offset;
+ struct core_db_data db_data;
+ qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
/**
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 0a9c5bb0fa48..eb88bbc6b193 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -252,9 +252,9 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
{
struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct core_db_data *p_db_data = &p_spq->db_data;
u16 echo = qed_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
- struct core_db_data db;
p_ent->elem.hdr.echo = cpu_to_le16(echo);
elem = qed_chain_produce(p_chain);
@@ -266,27 +266,22 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
*elem = p_ent->elem; /* struct assignment */
/* send a doorbell on the slow hwfn session */
- memset(&db, 0, sizeof(db));
- SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_CORE_SPQ_PROD_CMD);
- db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
- db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+ p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
/* make sure the SPQE is updated before the doorbell */
wmb();
- DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+ DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
/* make sure doorbell is rang */
wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
- qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
- p_spq->cid, db.params, db.agg_flags,
- qed_chain_get_prod_idx(p_chain));
+ p_spq->db_addr_offset,
+ p_spq->cid,
+ p_db_data->params,
+ p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
return 0;
}
@@ -490,8 +485,11 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_virt = NULL;
+ struct core_db_data *p_db_data;
+ void __iomem *db_addr;
dma_addr_t p_phys = 0;
u32 i, capacity;
+ int rc;
INIT_LIST_HEAD(&p_spq->pending);
INIT_LIST_HEAD(&p_spq->completion_pending);
@@ -528,6 +526,25 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
/* reset the chain itself */
qed_chain_reset(&p_spq->chain);
+
+ /* Initialize the address/data of the SPQ doorbell */
+ p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
+ p_db_data = &p_spq->db_data;
+ memset(p_db_data, 0, sizeof(*p_db_data));
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* Register the SPQ doorbell with the doorbell recovery mechanism */
+ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
+ p_spq->db_addr_offset);
+ rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
}
int qed_spq_alloc(struct qed_hwfn *p_hwfn)
@@ -575,11 +592,17 @@ spq_allocate_fail:
void qed_spq_free(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
+ void __iomem *db_addr;
u32 capacity;
if (!p_spq)
return;
+ /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
+ p_spq->db_addr_offset);
+ qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
+
if (p_spq->p_virt) {
capacity = qed_chain_get_capacity(&p_spq->chain);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index de98a974673b..613249d1e967 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -168,6 +168,13 @@ struct qede_ptp;
#define QEDE_RFS_MAX_FLTR 256
+enum qede_flags_bit {
+ QEDE_FLAGS_IS_VF = 0,
+ QEDE_FLAGS_LINK_REQUESTED,
+ QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ QEDE_FLAGS_TX_TIMESTAMPING_EN
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -177,10 +184,7 @@ struct qede_dev {
u8 dp_level;
unsigned long flags;
-#define QEDE_FLAG_IS_VF BIT(0)
-#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
-#define QEDE_TX_TIMESTAMPING_EN BIT(1)
-#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2)
+#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags))
const struct qed_eth_ops *ops;
struct qede_ptp *ptp;
@@ -377,6 +381,7 @@ struct qede_tx_queue {
u64 xmit_pkts;
u64 stopped_cnt;
+ u64 tx_mem_alloc_err;
__le16 *hw_cons_ptr;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 8cbbd628fd73..16331c6c6fa7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -73,6 +73,7 @@ static const struct {
} qede_tqstats_arr[] = {
QEDE_TQSTAT(xmit_pkts),
QEDE_TQSTAT(stopped_cnt),
+ QEDE_TQSTAT(tx_mem_alloc_err),
};
#define QEDE_STAT_OFFSET(stat_name, type, base) \
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a78027de071..bdf816fe5a16 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1466,8 +1466,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
if (qede_pkt_req_lin(skb, xmit_type)) {
if (skb_linearize(skb)) {
- DP_NOTICE(edev,
- "SKB linearization failed - silently dropping this SKB\n");
+ txq->tx_mem_alloc_err++;
+
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 46d0f2eaa0c0..5a74fcbdbc2b 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1086,7 +1086,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
}
if (is_vf)
- edev->flags |= QEDE_FLAG_IS_VF;
+ set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
qede_init_ndev(edev);
@@ -1774,6 +1774,10 @@ static int qede_drain_txq(struct qede_dev *edev,
static int qede_stop_txq(struct qede_dev *edev,
struct qede_tx_queue *txq, int rss_id)
{
+ /* delete doorbell from doorbell recovery mechanism */
+ edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db);
+
return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
}
@@ -1910,6 +1914,11 @@ static int qede_start_txq(struct qede_dev *edev,
DQ_XCM_ETH_TX_BD_PROD_CMD);
txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ /* register doorbell with doorbell recovery mechanism */
+ rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+
return rc;
}
@@ -2057,6 +2066,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
if (!is_locked)
__qede_lock(edev);
+ clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
edev->state = QEDE_STATE_CLOSED;
qede_rdma_dev_event_close(edev);
@@ -2163,6 +2174,8 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
/* Program un-configured VLANs */
qede_configure_vlan_filters(edev);
+ set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
/* Ask for link-up using current configuration */
memset(&link_params, 0, sizeof(link_params));
link_params.link_up = true;
@@ -2258,8 +2271,8 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
{
struct qede_dev *edev = dev;
- if (!netif_running(edev->ndev)) {
- DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
+ if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
return;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 013ff567283c..5f3f42a25361 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -223,12 +223,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev)
switch (ptp->tx_type) {
case HWTSTAMP_TX_ON:
- edev->flags |= QEDE_TX_TIMESTAMPING_EN;
+ set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_ON;
break;
case HWTSTAMP_TX_OFF:
- edev->flags &= ~QEDE_TX_TIMESTAMPING_EN;
+ clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
tx_type = QED_PTP_HWTSTAMP_TX_OFF;
break;
@@ -518,7 +518,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags))
return;
- if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
+ if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
DP_NOTICE(edev,
"Tx timestamping was not enabled, this packet will not be timestamped\n");
} else if (unlikely(ptp->tx_skb)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index d42ba2293d8c..16d0479f6891 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2993,10 +2993,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
{
int i;
- struct cmd_desc_type0 *tx_desc_info;
for (i = 0; i < tx_ring->num_desc; i++) {
- tx_desc_info = &tx_ring->desc_head[i];
pr_info("TX Desc: %d\n", i);
print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
&tx_ring->desc_head[i],
@@ -4008,19 +4006,12 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
int queue_type)
{
struct net_device *netdev = adapter->netdev;
- u8 max_hw_rings = 0;
char buf[8];
- int cur_rings;
- if (queue_type == QLCNIC_RX_QUEUE) {
- max_hw_rings = adapter->max_sds_rings;
- cur_rings = adapter->drv_sds_rings;
+ if (queue_type == QLCNIC_RX_QUEUE)
strcpy(buf, "SDS");
- } else if (queue_type == QLCNIC_TX_QUEUE) {
- max_hw_rings = adapter->max_tx_rings;
- cur_rings = adapter->drv_tx_rings;
+ else
strcpy(buf, "Tx");
- }
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 50eaafa3eaba..af3b037fa442 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1067,9 +1067,6 @@ static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_adapter *adapter = vf->adapter;
int err = -EIO;
- u8 op;
-
- op = cmd->req.arg[1] & 0xff;
cmd->req.arg[1] |= vf->vp->handle << 16;
cmd->req.arg[1] |= BIT_31;
@@ -1339,14 +1336,13 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
{
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
- u8 cmd_op, mode = vp->vlan_mode;
+ u8 mode = vp->vlan_mode;
struct qlcnic_adapter *adapter;
struct qlcnic_sriov *sriov;
adapter = vf->adapter;
sriov = adapter->ahw->sriov;
- cmd_op = trans->req_hdr->cmd_op;
cmd->rsp.arg[0] |= 1 << 25;
/* For 84xx adapter in case of PVID , PFD should send vlan mode as
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index a9f1bc013364..bcb890b18a94 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
"Transmit ring full",
"SPI errors",
"Write verify errors",
+ "Buffer available errors",
};
#ifdef CONFIG_DEBUG_FS
@@ -125,19 +126,7 @@ qcaspi_info_show(struct seq_file *s, void *what)
return 0;
}
-
-static int
-qcaspi_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, qcaspi_info_show, inode->i_private);
-}
-
-static const struct file_operations qcaspi_info_ops = {
- .open = qcaspi_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(qcaspi_info);
void
qcaspi_init_device_debugfs(struct qcaspi *qca)
@@ -153,7 +142,7 @@ qcaspi_init_device_debugfs(struct qcaspi *qca)
return;
}
debugfs_create_file("info", S_IFREG | 0444, device_root, qca,
- &qcaspi_info_ops);
+ &qcaspi_info_fops);
}
void
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index d5310504f436..97f92953bdb9 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca)
qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ }
+
while (qca->txr.skb[qca->txr.head]) {
pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
@@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available == 0) {
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ } else if (available == 0) {
netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
return -1;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 2d2c49726492..eb9af45fcc5e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -74,6 +74,7 @@ struct qcaspi_stats {
u64 ring_full;
u64 spi_err;
u64 write_verify_failed;
+ u64 buf_avail_err;
};
struct qcaspi {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5f4e447c5dce..b8bbee645f51 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -301,10 +301,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
struct rmnet_port *port;
u16 mux_id;
+ if (!dev)
+ return -ENODEV;
+
real_dev = __dev_get_by_index(dev_net(dev),
nla_get_u32(tb[IFLA_LINK]));
- if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev))
+ if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 3ee8ae9b6838..f6cf59aee212 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -20,17 +20,12 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
struct rmnet_port *port,
int enable)
{
- struct rmnet_map_control_command *cmd;
struct rmnet_endpoint *ep;
struct net_device *vnd;
- u16 ip_family;
- u16 fc_seq;
- u32 qos_id;
u8 mux_id;
int r;
mux_id = RMNET_MAP_GET_MUX_ID(skb);
- cmd = RMNET_MAP_GET_CMD_START(skb);
if (mux_id >= RMNET_MAX_LOGICAL_EP) {
kfree_skb(skb);
@@ -45,10 +40,6 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
vnd = ep->egress_dev;
- ip_family = cmd->flow_control.ip_family;
- fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
- qos_id = ntohl(cmd->flow_control.qos_id);
-
/* Ignore the ip family and pass the sequence number for both v4 and v6
* sequence. User space does not support creating dedicated flows for
* the 2 protocols
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 81045dfa1cd8..44f6e4873aad 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
struct cp_private *cp;
int handled = 0;
u16 status;
+ u16 mask;
if (unlikely(dev == NULL))
return IRQ_NONE;
@@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
spin_lock(&cp->lock);
+ mask = cpr16(IntrMask);
+ if (!mask)
+ goto out_unlock;
+
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
goto out_unlock;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ffd68a7bc9e1..69d752f0b621 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1661,7 +1661,7 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
napi_disable(&tp->napi);
netif_stop_queue(dev);
- synchronize_sched();
+ synchronize_rcu();
netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
RTL_R8(ChipCmd), RTL_R16(IntrStatus),
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1fd01688d37b..99bc3de906e2 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -56,13 +56,6 @@
#define R8169_MSG_DEFAULT \
(NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
-#define TX_SLOTS_AVAIL(tp) \
- (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
-
-/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
-#define TX_FRAGS_READY_FOR(tp,nr_frags) \
- (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
-
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
@@ -212,24 +205,24 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
- { PCI_VENDOR_ID_DLINK, 0x4300,
- PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 },
+ { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x8167), RTL_CFG_0 },
+ { PCI_VDEVICE(REALTEK, 0x8168), RTL_CFG_1 },
+ { PCI_VDEVICE(NCUBE, 0x8168), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x8169), RTL_CFG_0 },
+ { PCI_VENDOR_ID_DLINK, 0x4300,
+ PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
+ { PCI_VDEVICE(DLINK, 0x4300), RTL_CFG_0 },
+ { PCI_VDEVICE(DLINK, 0x4302), RTL_CFG_0 },
+ { PCI_VDEVICE(AT, 0xc107), RTL_CFG_0 },
+ { PCI_VDEVICE(USR, 0x0116), RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
{ 0x0001, 0x8168,
PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
- {0,},
+ {}
};
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
@@ -603,7 +596,6 @@ struct RxDesc {
struct ring_info {
struct sk_buff *skb;
u32 len;
- u8 __pad[sizeof(void *) - sizeof(u32)];
};
struct rtl8169_counters {
@@ -661,7 +653,7 @@ struct rtl8169_private {
struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
u16 cp_cmd;
- u16 event_slow;
+ u16 irq_mask;
const struct rtl_coalesce_info *coalesce_info;
struct clk *clk;
@@ -1102,23 +1094,6 @@ static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
return rtl_eri_read(tp, reg, ERIAR_OOB);
}
-static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- return r8168dp_ocp_read(tp, mask, reg);
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- return r8168ep_ocp_read(tp, mask, reg);
- default:
- BUG();
- return ~0;
- }
-}
-
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
u32 data)
{
@@ -1134,30 +1109,11 @@ static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
data, ERIAR_OOB);
}
-static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- r8168dp_ocp_write(tp, mask, reg, data);
- break;
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- r8168ep_ocp_write(tp, mask, reg, data);
- break;
- default:
- BUG();
- break;
- }
-}
-
-static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
+static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
- ocp_write(tp, 0x1, 0x30, 0x00000001);
+ r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
}
#define OOB_CMD_RESET 0x00
@@ -1169,18 +1125,18 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-DECLARE_RTL_COND(rtl_ocp_read_cond)
+DECLARE_RTL_COND(rtl_dp_ocp_read_cond)
{
u16 reg;
reg = rtl8168_get_ocp_reg(tp);
- return ocp_read(tp, 0x0f, reg) & 0x00000800;
+ return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800;
}
DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
{
- return ocp_read(tp, 0x0f, 0x124) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001;
}
DECLARE_RTL_COND(rtl_ocp_tx_cond)
@@ -1198,14 +1154,15 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
{
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START);
+ rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10);
}
static void rtl8168ep_driver_start(struct rtl8169_private *tp)
{
- ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
- ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START);
+ r8168ep_ocp_write(tp, 0x01, 0x30,
+ r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10);
}
@@ -1230,15 +1187,16 @@ static void rtl8168_driver_start(struct rtl8169_private *tp)
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
{
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
+ r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10);
}
static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
- ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01);
+ r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP);
+ r8168ep_ocp_write(tp, 0x01, 0x30,
+ r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01);
rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10);
}
@@ -1265,12 +1223,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(ocp_read(tp, 0x0f, reg) & 0x00008000);
+ return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001);
+ return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001);
}
static bool r8168_check_dash(struct rtl8169_private *tp)
@@ -1325,27 +1283,20 @@ static u16 rtl_get_events(struct rtl8169_private *tp)
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
{
RTL_W16(tp, IntrStatus, bits);
- mmiowb();
}
static void rtl_irq_disable(struct rtl8169_private *tp)
{
RTL_W16(tp, IntrMask, 0);
- mmiowb();
-}
-
-static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
-{
- RTL_W16(tp, IntrMask, bits);
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
#define RTL_EVENT_NAPI_TX (TxOK | TxErr)
#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
-static void rtl_irq_enable_all(struct rtl8169_private *tp)
+static void rtl_irq_enable(struct rtl8169_private *tp)
{
- rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
+ RTL_W16(tp, IntrMask, tp->irq_mask);
}
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
@@ -2051,8 +2002,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- u8 default_version)
+static void rtl8169_get_mac_version(struct rtl8169_private *tp)
{
/*
* The driver currently handles the 8168Bf and the 8168Be identically
@@ -2066,120 +2016,107 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
* (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
*/
static const struct rtl_mac_info {
- u32 mask;
- u32 val;
- int mac_version;
+ u16 mask;
+ u16 val;
+ u16 mac_version;
} mac_info[] = {
/* 8168EP family. */
- { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 },
- { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 },
- { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 },
+ { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 },
+ { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 },
+ { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 },
/* 8168H family. */
- { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 },
- { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 },
+ { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 },
+ { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
/* 8168G family. */
- { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 },
- { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
- { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
- { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+ { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
+ { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 },
+ { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 },
/* 8168F family. */
- { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
- { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
- { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
+ { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
+ { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
+ { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
/* 8168E family. */
- { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
- { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
- { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
+ { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 },
+ { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 },
+ { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 },
/* 8168D family. */
- { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
- { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
+ { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 },
+ { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
- { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
- { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
+ { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
+ { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
/* 8168C family. */
- { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
- { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
- { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
- { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
- { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
- { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
- { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
+ { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 },
+ { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 },
+ { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 },
+ { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 },
+ { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 },
+ { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 },
+ { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 },
/* 8168B family. */
- { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
- { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
- { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
+ { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 },
+ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
+ { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
- { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
- { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
- { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
- { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
- { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
- { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
- { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
- { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
- { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
- { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
- { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
- { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
+ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
+ { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 },
+ { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 },
+ { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 },
+ { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 },
+ { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
+ { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
+ { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
+ { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
+ { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
+ { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
+ { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
/* FIXME: where did these entries come from ? -- FR */
- { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
- { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
+ { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 },
+ { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 },
/* 8110 family. */
- { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
- { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
- { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
- { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
- { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
- { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
+ { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
+ { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 },
+ { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 },
+ { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 },
+ { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 },
+ { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01 },
/* Catch-all */
- { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
+ { 0x000, 0x000, RTL_GIGA_MAC_NONE }
};
const struct rtl_mac_info *p = mac_info;
- u32 reg;
+ u16 reg = RTL_R32(tp, TxConfig) >> 20;
- reg = RTL_R32(tp, TxConfig);
while ((reg & p->mask) != p->val)
p++;
tp->mac_version = p->mac_version;
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- dev_notice(tp_to_dev(tp),
- "unknown MAC, using family default\n");
- tp->mac_version = default_version;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_42 :
- RTL_GIGA_MAC_VER_43;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_45 :
- RTL_GIGA_MAC_VER_47;
- } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) {
- tp->mac_version = tp->supports_gmii ?
- RTL_GIGA_MAC_VER_46 :
- RTL_GIGA_MAC_VER_48;
+ dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf);
+ } else if (!tp->supports_gmii) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_42)
+ tp->mac_version = RTL_GIGA_MAC_VER_43;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_45)
+ tp->mac_version = RTL_GIGA_MAC_VER_47;
+ else if (tp->mac_version == RTL_GIGA_MAC_VER_46)
+ tp->mac_version = RTL_GIGA_MAC_VER_48;
}
}
-static void rtl8169_print_mac_version(struct rtl8169_private *tp)
-{
- netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version);
-}
-
struct phy_reg {
u16 reg;
u16 val;
@@ -3902,8 +3839,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_print_mac_version(tp);
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01:
break;
@@ -4643,7 +4578,7 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
- rtl_irq_enable_all(tp);
+ rtl_irq_enable(tp);
}
static void rtl_hw_start_8169(struct rtl8169_private *tp)
@@ -5394,8 +5329,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->event_slow |= RxFIFOOver | PCSTimeout;
- tp->event_slow &= ~RxOverflow;
+ tp->irq_mask |= RxFIFOOver;
+ tp->irq_mask &= ~RxOverflow;
}
switch (tp->mac_version) {
@@ -5632,7 +5567,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
static void rtl_hw_start_8101(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
- tp->event_slow &= ~RxFIFOOver;
+ tp->irq_mask &= ~RxFIFOOver;
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16)
@@ -5866,7 +5801,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_disable(&tp->napi);
netif_stop_queue(dev);
- synchronize_sched();
+ synchronize_rcu();
rtl8169_hw_reset(tp);
@@ -5888,6 +5823,16 @@ static void rtl8169_tx_timeout(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
+static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry)
+{
+ u32 status = opts0 | len;
+
+ if (entry == NUM_TX_DESC - 1)
+ status |= RingEnd;
+
+ return cpu_to_le32(status);
+}
+
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
u32 *opts)
{
@@ -5900,7 +5845,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
const skb_frag_t *frag = info->frags + cur_frag;
dma_addr_t mapping;
- u32 status, len;
+ u32 len;
void *addr;
entry = (entry + 1) % NUM_TX_DESC;
@@ -5916,11 +5861,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
goto err_out;
}
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len |
- (RingEnd * !((entry + 1) % NUM_TX_DESC));
-
- txd->opts1 = cpu_to_le32(status);
+ txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
txd->opts2 = cpu_to_le32(opts[1]);
txd->addr = cpu_to_le64(mapping);
@@ -6108,6 +6049,15 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true;
}
+static bool rtl_tx_slots_avail(struct rtl8169_private *tp,
+ unsigned int nr_frags)
+{
+ unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx;
+
+ /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+ return slots_avail > nr_frags;
+}
+
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -6116,11 +6066,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct TxDesc *txd = tp->TxDescArray + entry;
struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
- u32 status, len;
- u32 opts[2];
+ u32 opts[2], len;
+ bool stop_queue;
int frags;
- if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
+ if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0;
}
@@ -6159,32 +6109,26 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
dma_wmb();
- /* Anti gcc 2.95.3 bugware (sic) */
- status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
- txd->opts1 = cpu_to_le32(status);
+ txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry);
/* Force all memory writes to complete before notifying device */
wmb();
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue))
+ netif_stop_queue(dev);
- mmiowb();
+ if (__netdev_sent_queue(dev, skb->len, skb->xmit_more))
+ RTL_W8(tp, TxPoll, NPQ);
- if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
- /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
- * not miss a ring update when it notices a stopped queue.
- */
- smp_wmb();
- netif_stop_queue(dev);
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).
@@ -6193,7 +6137,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't.
*/
smp_mb();
- if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
+ if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
netif_wake_queue(dev);
}
@@ -6257,7 +6201,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
+ int budget)
{
unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0;
@@ -6285,7 +6230,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
if (status & LastFrag) {
pkts_compl++;
bytes_compl += tx_skb->skb->len;
- dev_consume_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
dirty_tx++;
@@ -6310,7 +6255,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
*/
smp_mb();
if (netif_queue_stopped(dev) &&
- TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
+ rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev);
}
/*
@@ -6460,8 +6405,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
{
struct rtl8169_private *tp = dev_instance;
u16 status = rtl_get_events(tp);
+ u16 irq_mask = RTL_R16(tp, IntrMask);
- if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow)))
+ if (status == 0xffff || !(status & irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -6469,7 +6415,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
goto out;
}
- if (status & LinkChg)
+ if (status & LinkChg && tp->dev->phydev)
phy_mac_interrupt(tp->dev->phydev);
if (unlikely(status & RxFIFOOver &&
@@ -6528,13 +6474,11 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
work_done = rtl_rx(dev, tp, (u32) budget);
- rtl_tx(dev, tp);
+ rtl_tx(dev, tp, budget);
if (work_done < budget) {
napi_complete_done(napi, work_done);
-
- rtl_irq_enable_all(tp);
- mmiowb();
+ rtl_irq_enable(tp);
}
return work_done;
@@ -6584,7 +6528,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
phy_set_max_speed(phydev, SPEED_100);
/* Ensure to advertise everything, incl. pause */
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
phy_attached_info(phydev);
@@ -6609,7 +6553,7 @@ static void rtl8169_down(struct net_device *dev)
rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */
- synchronize_sched();
+ synchronize_rcu();
rtl8169_tx_clear(tp);
@@ -6824,8 +6768,7 @@ static void rtl8169_net_suspend(struct net_device *dev)
static int rtl8169_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
@@ -6855,8 +6798,7 @@ static void __rtl8169_resume(struct net_device *dev)
static int rtl8169_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
clk_prepare_enable(tp->clk);
@@ -6869,8 +6811,7 @@ static int rtl8169_resume(struct device *device)
static int rtl8169_runtime_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
if (!tp->TxDescArray)
@@ -6891,8 +6832,7 @@ static int rtl8169_runtime_suspend(struct device *device)
static int rtl8169_runtime_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rtl8169_private *tp = netdev_priv(dev);
rtl_rar_set(tp, dev->dev_addr);
@@ -6910,8 +6850,7 @@ static int rtl8169_runtime_resume(struct device *device)
static int rtl8169_runtime_idle(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
if (!netif_running(dev) || !netif_carrier_ok(dev))
pm_schedule_suspend(device, 10000);
@@ -7023,31 +6962,26 @@ static const struct net_device_ops rtl_netdev_ops = {
static const struct rtl_cfg_info {
void (*hw_start)(struct rtl8169_private *tp);
- u16 event_slow;
+ u16 irq_mask;
unsigned int has_gmii:1;
const struct rtl_coalesce_info *coalesce_info;
- u8 default_ver;
} rtl_cfg_infos [] = {
[RTL_CFG_0] = {
.hw_start = rtl_hw_start_8169,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
+ .irq_mask = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8169,
- .default_ver = RTL_GIGA_MAC_VER_01,
},
[RTL_CFG_1] = {
.hw_start = rtl_hw_start_8168,
- .event_slow = SYSErr | LinkChg | RxOverflow,
+ .irq_mask = LinkChg | RxOverflow,
.has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8168_8136,
- .default_ver = RTL_GIGA_MAC_VER_11,
},
[RTL_CFG_2] = {
.hw_start = rtl_hw_start_8101,
- .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
- PCSTimeout,
+ .irq_mask = LinkChg | RxOverflow | RxFIFOOver,
.coalesce_info = rtl_coalesce_info_8168_8136,
- .default_ver = RTL_GIGA_MAC_VER_13,
}
};
@@ -7309,11 +7243,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = pcim_iomap_table(pdev)[region];
- if (!pci_is_pcie(pdev))
- dev_info(&pdev->dev, "not PCI Express\n");
-
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp, cfg->default_ver);
+ rtl8169_get_mac_version(tp);
+ if (tp->mac_version == RTL_GIGA_MAC_NONE)
+ return -ENODEV;
if (rtl_tbi_enabled(tp)) {
dev_err(&pdev->dev, "TBI fiber mode not supported\n");
@@ -7351,8 +7284,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_init_mdio_ops(tp);
rtl_init_jumbo_ops(tp);
- rtl8169_print_mac_version(tp);
-
chipset = tp->mac_version;
rc = rtl_alloc_irq(tp);
@@ -7426,7 +7357,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->max_mtu = jumbo_max;
tp->hw_start = cfg->hw_start;
- tp->event_slow = cfg->event_slow;
+ tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask;
tp->coalesce_info = cfg->coalesce_info;
tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
@@ -7450,9 +7381,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_mdio_unregister;
- netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n",
+ netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n",
rtl_chip_infos[chipset].name, dev->dev_addr,
- (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff),
+ (RTL_R32(tp, TxConfig) >> 20) & 0xfcf,
pci_irq_vector(pdev, 0));
if (jumbo_max > JUMBO_1K)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1c6e4df94f01..ac9195add811 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1032,7 +1032,6 @@ struct ravb_private {
phy_interface_t phy_interface;
int msg_enable;
int speed;
- int duplex;
int emac_irq;
enum ravb_chip_id chip_id;
int rx_irqs[NUM_RX_QUEUE];
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index defed0d0c51d..ffc1ada4e6da 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -82,13 +82,6 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_duplex(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
-
- ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
-}
-
static void ravb_set_rate(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -406,13 +399,11 @@ error:
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
- struct ravb_private *priv = netdev_priv(ndev);
-
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
/* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */
- ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
+ ravb_write(ndev, ECMR_ZPF | ECMR_DM |
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
@@ -995,12 +986,6 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
- if (phydev->duplex != priv->duplex) {
- new_state = true;
- priv->duplex = phydev->duplex;
- ravb_set_duplex(ndev);
- }
-
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
@@ -1015,7 +1000,6 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
- priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1045,7 +1029,6 @@ static int ravb_phy_init(struct net_device *ndev)
priv->link = 0;
priv->speed = 0;
- priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1088,6 +1071,10 @@ static int ravb_phy_init(struct net_device *ndev)
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
phy_attached_info(phydev);
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index beb06628f22d..6213827e3956 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1632,9 +1632,6 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
- if (netif_is_bridge_master(vlan->obj.orig_dev))
- return -EOPNOTSUPP;
-
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
@@ -2145,8 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev,
static const struct switchdev_ops rocker_port_switchdev_ops = {
.switchdev_port_attr_get = rocker_port_attr_get,
.switchdev_port_attr_set = rocker_port_attr_set,
- .switchdev_port_obj_add = rocker_port_obj_add,
- .switchdev_port_obj_del = rocker_port_obj_del,
};
struct rocker_fib_event_work {
@@ -2812,12 +2807,54 @@ static int rocker_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+static int
+rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = rocker_port_obj_add(netdev, port_obj_info->obj,
+ port_obj_info->trans);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = rocker_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int rocker_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ if (!rocker_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return rocker_switchdev_port_obj_event(event, dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
static struct notifier_block rocker_switchdev_notifier = {
.notifier_call = rocker_switchdev_event,
};
+static struct notifier_block rocker_switchdev_blocking_notifier = {
+ .notifier_call = rocker_switchdev_blocking_event,
+};
+
static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct notifier_block *nb;
struct rocker *rocker;
int err;
@@ -2933,6 +2970,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_register_switchdev_notifier;
}
+ nb = &rocker_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n");
+ goto err_register_switchdev_blocking_notifier;
+ }
+
rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
@@ -2940,6 +2984,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_register_switchdev_blocking_notifier:
+ unregister_switchdev_notifier(&rocker_switchdev_notifier);
err_register_switchdev_notifier:
unregister_fib_notifier(&rocker->fib_nb);
err_register_fib_notifier:
@@ -2971,6 +3017,10 @@ err_pci_enable_device:
static void rocker_remove(struct pci_dev *pdev)
{
struct rocker *rocker = pci_get_drvdata(pdev);
+ struct notifier_block *nb;
+
+ nb = &rocker_switchdev_blocking_notifier;
+ unregister_switchdev_blocking_notifier(nb);
unregister_switchdev_notifier(&rocker_switchdev_notifier);
unregister_fib_notifier(&rocker->fib_nb);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7eeac3d6cfe8..b6a50058bb8d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+ /* MUM and SUC firmware share the same partition type */
+ { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
+ { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
};
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
@@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
part->common.mtd.flags = MTD_CAP_NORFLASH;
part->common.mtd.size = size;
part->common.mtd.erasesize = erase_size;
+ /* sfc_status is read-only */
+ if (!erase_size)
+ part->common.mtd.flags |= MTD_NO_ERASE;
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 98fe7e762e17..3643015a55cf 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3167,7 +3167,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
{
u32 hash = efx_filter_spec_hash(spec);
- WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
+ lockdep_assert_held(&efx->rps_hash_lock);
if (!efx->rps_hash_table)
return NULL;
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 3143588ffd77..600d7b895cf2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -539,7 +539,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
- rc = dev_open(efx->net_dev);
+ rc = dev_open(efx->net_dev, NULL);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c
index 1ccdb7a82e2a..72cedec945c1 100644
--- a/drivers/net/ethernet/sfc/falcon/ethtool.c
+++ b/drivers/net/ethernet/sfc/falcon/ethtool.c
@@ -517,7 +517,7 @@ static void ef4_ethtool_self_test(struct net_device *net_dev,
/* We need rx buffers and interrupts. */
already_up = (efx->net_dev->flags & IFF_UP);
if (!already_up) {
- rc = dev_open(efx->net_dev);
+ rc = dev_open(efx->net_dev, NULL);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c3ad564ac4c0..22eb059086f7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
goto err;
- /* Update BQL */
- netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
-
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index c2c50522b96d..808cf9816673 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1142,7 +1142,7 @@ static void sis190_down(struct net_device *dev)
if (!poll_locked)
poll_locked++;
- synchronize_sched();
+ synchronize_rcu();
} while (SIS_R32(IntrMask));
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 358820282ef0..79612060d0ba 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -27,7 +27,7 @@ config SMC9194
option if you have a DELL laptop with the docking station, or
another SMC9192/9194 based chipset. Say Y if you want it compiled
into the kernel, and read the file
- <file:Documentation/networking/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
To compile this driver as a module, choose M here. The module
will be called smc9194.
@@ -43,7 +43,7 @@ config SMC91X
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
compiled into the kernel, and read the file
- <file:Documentation/networking/smc9.txt>.
+ <file:Documentation/networking/device_drivers/smsc/smc9.txt>.
This driver is also available as a module ( = code which can be
inserted in and removed from the running kernel whenever you want).
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d75508acd2b..51b2fc1a395f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -67,7 +67,7 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define SMSC_ASSERT_MAC_LOCK(pdata) \
- WARN_ON_SMP(!spin_is_locked(&pdata->mac_lock))
+ lockdep_assert_held(&pdata->mac_lock)
#else
#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0)
#endif /* CONFIG_DEBUG_SPINLOCK */
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index d9d0d03e4ce7..05a0948ad929 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -234,6 +234,9 @@
#define DESC_NUM 256
+#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define NETSEC_RX_BUF_SZ 1536
+
#define DESC_SZ sizeof(struct netsec_de)
#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
@@ -254,7 +257,6 @@ struct netsec_desc_ring {
dma_addr_t desc_dma;
struct netsec_desc *desc;
void *vaddr;
- u16 pkt_cnt;
u16 head, tail;
};
@@ -571,34 +573,10 @@ static const struct ethtool_ops netsec_ethtool_ops = {
/************* NETDEV_OPS FOLLOW *************/
-static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
- struct netsec_desc *desc)
-{
- struct sk_buff *skb;
-
- if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
- skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
- } else {
- desc->len = L1_CACHE_ALIGN(desc->len);
- skb = netdev_alloc_skb(priv->ndev, desc->len);
- }
- if (!skb)
- return NULL;
-
- desc->addr = skb->data;
- desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->dev, desc->dma_addr)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
- return skb;
-}
static void netsec_set_rx_de(struct netsec_priv *priv,
struct netsec_desc_ring *dring, u16 idx,
- const struct netsec_desc *desc,
- struct sk_buff *skb)
+ const struct netsec_desc *desc)
{
struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
@@ -617,88 +595,28 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
dring->desc[idx].dma_addr = desc->dma_addr;
dring->desc[idx].addr = desc->addr;
dring->desc[idx].len = desc->len;
- dring->desc[idx].skb = skb;
-}
-
-static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
- struct netsec_desc_ring *dring,
- u16 idx,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc, u16 *len)
-{
- struct netsec_de de = {};
-
- memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
-
- *len = de.buf_len_info >> 16;
-
- rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
- rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
- NETSEC_RX_PKT_ERR_MASK;
- *desc = dring->desc[idx];
- return desc->skb;
-}
-
-static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc,
- u16 *len)
-{
- struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct sk_buff *tmp_skb, *skb = NULL;
- struct netsec_desc td;
- int tail;
-
- *rxpi = (struct netsec_rx_pkt_info){};
-
- td.len = priv->ndev->mtu + 22;
-
- tmp_skb = netsec_alloc_skb(priv, &td);
-
- tail = dring->tail;
-
- if (!tmp_skb) {
- netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
- dring->desc[tail].skb);
- } else {
- skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
- netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
- }
-
- /* move tail ahead */
- dring->tail = (dring->tail + 1) % DESC_NUM;
-
- return skb;
}
-static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
+static bool netsec_clean_tx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
unsigned int pkts, bytes;
-
- dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
-
- if (dring->pkt_cnt < budget)
- budget = dring->pkt_cnt;
+ struct netsec_de *entry;
+ int tail = dring->tail;
+ int cnt = 0;
pkts = 0;
bytes = 0;
+ entry = dring->vaddr + DESC_SZ * tail;
- while (pkts < budget) {
+ while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
+ cnt < DESC_NUM) {
struct netsec_desc *desc;
- struct netsec_de *entry;
- int tail, eop;
-
- tail = dring->tail;
-
- /* move tail ahead */
- dring->tail = (tail + 1) % DESC_NUM;
+ int eop;
desc = &dring->desc[tail];
- entry = dring->vaddr + DESC_SZ * tail;
-
eop = (entry->attr >> NETSEC_TX_LAST) & 1;
+ dma_rmb();
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
DMA_TO_DEVICE);
@@ -707,33 +625,94 @@ static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
bytes += desc->skb->len;
dev_kfree_skb(desc->skb);
}
+ /* clean up so netsec_uninit_pkt_dring() won't free the skb
+ * again
+ */
*desc = (struct netsec_desc){};
+
+ /* entry->attr is not going to be accessed by the NIC until
+ * netsec_set_tx_de() is called. No need for a dma_wmb() here
+ */
+ entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+ /* move tail ahead */
+ dring->tail = (tail + 1) % DESC_NUM;
+
+ tail = dring->tail;
+ entry = dring->vaddr + DESC_SZ * tail;
+ cnt++;
}
- dring->pkt_cnt -= budget;
- priv->ndev->stats.tx_packets += budget;
+ if (!cnt)
+ return false;
+
+ /* reading the register clears the irq */
+ netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
+
+ priv->ndev->stats.tx_packets += cnt;
priv->ndev->stats.tx_bytes += bytes;
- netdev_completed_queue(priv->ndev, budget, bytes);
+ netdev_completed_queue(priv->ndev, cnt, bytes);
- return budget;
+ return true;
}
-static int netsec_process_tx(struct netsec_priv *priv, int budget)
+static void netsec_process_tx(struct netsec_priv *priv)
{
struct net_device *ndev = priv->ndev;
- int new, done = 0;
+ bool cleaned;
- do {
- new = netsec_clean_tx_dring(priv, budget);
- done += new;
- budget -= new;
- } while (new);
+ cleaned = netsec_clean_tx_dring(priv);
- if (done && netif_queue_stopped(ndev))
+ if (cleaned && netif_queue_stopped(ndev)) {
+ /* Make sure we update the value, anyone stopping the queue
+ * after this will read the proper consumer idx
+ */
+ smp_wmb();
netif_wake_queue(ndev);
+ }
+}
- return done;
+static void *netsec_alloc_rx_data(struct netsec_priv *priv,
+ dma_addr_t *dma_handle, u16 *desc_len)
+{
+ size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size_t payload_len = NETSEC_RX_BUF_SZ;
+ dma_addr_t mapping;
+ void *buf;
+
+ total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
+
+ buf = napi_alloc_frag(total_len);
+ if (!buf)
+ return NULL;
+
+ mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping)))
+ goto err_out;
+
+ *dma_handle = mapping;
+ *desc_len = payload_len;
+
+ return buf;
+
+err_out:
+ skb_free_frag(buf);
+ return NULL;
+}
+
+static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ u16 idx = from;
+
+ while (num) {
+ netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
+ idx++;
+ if (idx >= DESC_NUM)
+ idx = 0;
+ num--;
+ }
}
static int netsec_process_rx(struct netsec_priv *priv, int budget)
@@ -741,14 +720,17 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0;
- struct netsec_desc desc;
struct sk_buff *skb;
- u16 len;
+ int done = 0;
while (done < budget) {
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+ struct netsec_desc *desc = &dring->desc[idx];
+ u16 pkt_len, desc_len;
+ dma_addr_t dma_handle;
+ void *buf_addr;
+ u32 truesize;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
/* reading the register clears the irq */
@@ -762,18 +744,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
*/
dma_rmb();
done++;
- skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
- if (unlikely(!skb) || rx_info.err_flag) {
+
+ pkt_len = de->buf_len_info >> 16;
+ rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ if (rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
- "%s: rx fail err(%d)\n",
- __func__, rx_info.err_code);
+ "%s: rx fail err(%d)\n", __func__,
+ rx_info.err_code);
ndev->stats.rx_dropped++;
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+ /* reuse buffer page frag */
+ netsec_rx_fill(priv, idx, 1);
continue;
}
+ rx_info.rx_cksum_result =
+ (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
- DMA_FROM_DEVICE);
- skb_put(skb, len);
+ /* allocate a fresh buffer and map it to the hardware.
+ * This will eventually replace the old buffer in the hardware
+ */
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+ if (unlikely(!buf_addr))
+ break;
+
+ dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(desc->addr);
+
+ truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(desc->addr, truesize);
+ if (unlikely(!skb)) {
+ /* free the newly allocated buffer, we are not going to
+ * use it
+ */
+ dma_unmap_single(priv->dev, dma_handle, desc_len,
+ DMA_FROM_DEVICE);
+ skb_free_frag(buf_addr);
+ netif_err(priv, drv, priv->ndev,
+ "rx failed to build skb\n");
+ break;
+ }
+ dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+
+ /* Update the descriptor with the new buffer we allocated */
+ desc->len = desc_len;
+ desc->dma_addr = dma_handle;
+ desc->addr = buf_addr;
+
+ skb_reserve(skb, NETSEC_SKB_PAD);
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
@@ -782,8 +805,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += len;
+ ndev->stats.rx_bytes += pkt_len;
}
+
+ netsec_rx_fill(priv, idx, 1);
+ dring->tail = (dring->tail + 1) % DESC_NUM;
}
return done;
@@ -792,24 +818,17 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
static int netsec_napi_poll(struct napi_struct *napi, int budget)
{
struct netsec_priv *priv;
- int tx, rx, done, todo;
+ int rx, done, todo;
priv = container_of(napi, struct netsec_priv, napi);
+ netsec_process_tx(priv);
+
todo = budget;
do {
- if (!todo)
- break;
-
- tx = netsec_process_tx(priv, todo);
- todo -= tx;
-
- if (!todo)
- break;
-
rx = netsec_process_rx(priv, todo);
todo -= rx;
- } while (rx || tx);
+ } while (rx);
done = budget - todo;
@@ -861,6 +880,41 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
dring->head = (dring->head + 1) % DESC_NUM;
}
+static int netsec_desc_used(struct netsec_desc_ring *dring)
+{
+ int used;
+
+ if (dring->head >= dring->tail)
+ used = dring->head - dring->tail;
+ else
+ used = dring->head + DESC_NUM - dring->tail;
+
+ return used;
+}
+
+static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
+
+ /* keep tail from touching the queue */
+ if (DESC_NUM - used < 2) {
+ netif_stop_queue(priv->ndev);
+
+ /* Make sure we read the updated value in case
+ * descriptors got freed
+ */
+ smp_rmb();
+
+ used = netsec_desc_used(dring);
+ if (DESC_NUM - used < 2)
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+}
+
static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -871,16 +925,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
u16 tso_seg_len = 0;
int filled;
- /* differentiate between full/emtpy ring */
- if (dring->head >= dring->tail)
- filled = dring->head - dring->tail;
- else
- filled = dring->head + DESC_NUM - dring->tail;
-
- if (DESC_NUM - filled < 2) { /* if less than 2 available */
- netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
- netif_stop_queue(priv->ndev);
- dma_wmb();
+ filled = netsec_desc_used(dring);
+ if (netsec_check_stop_tx(priv, filled)) {
+ net_warn_ratelimited("%s %s Tx queue full\n",
+ dev_name(priv->dev), ndev->name);
return NETDEV_TX_BUSY;
}
@@ -946,7 +994,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
DMA_TO_DEVICE);
- dev_kfree_skb(desc->skb);
+ if (id == NETSEC_RING_RX)
+ skb_free_frag(desc->addr);
+ else if (id == NETSEC_RING_TX)
+ dev_kfree_skb(desc->skb);
}
memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
@@ -954,7 +1005,6 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dring->head = 0;
dring->tail = 0;
- dring->pkt_cnt = 0;
if (id == NETSEC_RING_TX)
netdev_reset_queue(priv->ndev);
@@ -977,47 +1027,64 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
{
struct netsec_desc_ring *dring = &priv->desc_ring[id];
- int ret = 0;
+ int i;
dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
&dring->desc_dma, GFP_KERNEL);
- if (!dring->vaddr) {
- ret = -ENOMEM;
+ if (!dring->vaddr)
goto err;
- }
dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
- if (!dring->desc) {
- ret = -ENOMEM;
+ if (!dring->desc)
goto err;
+
+ if (id == NETSEC_RING_TX) {
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_de *de;
+
+ de = dring->vaddr + (DESC_SZ * i);
+ /* de->attr is not going to be accessed by the NIC
+ * until netsec_set_tx_de() is called.
+ * No need for a dma_wmb() here
+ */
+ de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
+ }
}
return 0;
err:
netsec_free_dring(priv, id);
- return ret;
+ return -ENOMEM;
}
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct netsec_desc desc;
- struct sk_buff *skb;
- int n;
+ int i;
- desc.len = priv->ndev->mtu + 22;
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_desc *desc = &dring->desc[i];
+ dma_addr_t dma_handle;
+ void *buf;
+ u16 len;
- for (n = 0; n < DESC_NUM; n++) {
- skb = netsec_alloc_skb(priv, &desc);
- if (!skb) {
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+ if (!buf) {
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
- return -ENOMEM;
+ goto err_out;
}
- netsec_set_rx_de(priv, dring, n, &desc, skb);
+ desc->dma_addr = dma_handle;
+ desc->addr = buf;
+ desc->len = len;
}
+ netsec_rx_fill(priv, 0, DESC_NUM);
+
return 0;
+
+err_out:
+ return -ENOMEM;
}
static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
@@ -1377,6 +1444,8 @@ static int netsec_netdev_init(struct net_device *ndev)
int ret;
u16 data;
+ BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
+
ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6732f5cbde08..bb6d5fb73035 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -185,8 +185,8 @@
NETIF_MSG_TX_ERR)
/* Parameter for descriptor */
-#define AVE_NR_TXDESC 32 /* Tx descriptor */
-#define AVE_NR_RXDESC 64 /* Rx descriptor */
+#define AVE_NR_TXDESC 64 /* Tx descriptor */
+#define AVE_NR_RXDESC 256 /* Rx descriptor */
#define AVE_DESC_OFS_CMDSTS 0
#define AVE_DESC_OFS_ADDRL 4
@@ -194,6 +194,7 @@
/* Parameter for ethernet frame */
#define AVE_MAX_ETHFRAME 1518
+#define AVE_FRAME_HEADROOM 2
/* Parameter for interrupt */
#define AVE_INTM_COUNT 20
@@ -261,6 +262,7 @@ struct ave_private {
struct regmap *regmap;
unsigned int pinmode_mask;
unsigned int pinmode_val;
+ u32 wolopts;
/* stats */
struct ave_stats stats_rx;
@@ -576,12 +578,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
skb = priv->rx.desc[entry].skbs;
if (!skb) {
- skb = netdev_alloc_skb_ip_align(ndev,
- AVE_MAX_ETHFRAME);
+ skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
if (!skb) {
netdev_err(ndev, "can't allocate skb for Rx\n");
return -ENOMEM;
}
+ skb->data += AVE_FRAME_HEADROOM;
+ skb->tail += AVE_FRAME_HEADROOM;
}
/* set disable to cmdsts */
@@ -594,12 +597,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
* - Rx buffer begins with 2 byte headroom, and data will be put from
* (buffer + 2).
* To satisfy this, specify the address to put back the buffer
- * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
- * and expand the map size by NET_IP_ALIGN.
+ * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
+ * by AVE_FRAME_HEADROOM.
*/
ret = ave_dma_map(ndev, &priv->rx.desc[entry],
- skb->data - NET_IP_ALIGN,
- AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+ skb->data - AVE_FRAME_HEADROOM,
+ AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
DMA_FROM_DEVICE, &paddr);
if (ret) {
netdev_err(ndev, "can't map skb for Rx\n");
@@ -1117,7 +1120,7 @@ static void ave_phy_adjust_link(struct net_device *ndev)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (cap & FLOW_CTRL_TX)
txcr |= AVE_TXCR_FLOCTR;
@@ -1208,9 +1211,13 @@ static int ave_init(struct net_device *ndev)
priv->phydev = phydev;
- phy_ethtool_get_wol(phydev, &wol);
+ ave_ethtool_get_wol(ndev, &wol);
device_set_wakeup_capable(&ndev->dev, !!wol.supported);
+ /* set wol initial state disabled */
+ wol.wolopts = 0;
+ ave_ethtool_set_wol(ndev, &wol);
+
if (!phy_interface_is_rgmii(phydev))
phy_set_max_speed(phydev, SPEED_100);
@@ -1689,9 +1696,10 @@ static int ave_probe(struct platform_device *pdev)
pdev->name, pdev->id);
/* Register as a NAPI supported driver */
- netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+ netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
- priv->tx.ndesc);
+ NAPI_POLL_WEIGHT);
platform_set_drvdata(pdev, ndev);
@@ -1734,6 +1742,58 @@ static int ave_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int ave_suspend(struct device *dev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ if (netif_running(ndev)) {
+ ret = ave_stop(ndev);
+ netif_device_detach(ndev);
+ }
+
+ ave_ethtool_get_wol(ndev, &wol);
+ priv->wolopts = wol.wolopts;
+
+ return ret;
+}
+
+static int ave_resume(struct device *dev)
+{
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct ave_private *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ave_global_reset(ndev);
+
+ ave_ethtool_get_wol(ndev, &wol);
+ wol.wolopts = priv->wolopts;
+ ave_ethtool_set_wol(ndev, &wol);
+
+ if (ndev->phydev) {
+ ret = phy_resume(ndev->phydev);
+ if (ret)
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ ret = ave_open(ndev);
+ netif_device_attach(ndev);
+ }
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume);
+#define AVE_PM_OPS (&ave_pm_ops)
+#else
+#define AVE_PM_OPS NULL
+#endif
+
static int ave_pro4_get_pinmode(struct ave_private *priv,
phy_interface_t phy_mode, u32 arg)
{
@@ -1908,10 +1968,12 @@ static struct platform_driver ave_driver = {
.remove = ave_remove,
.driver = {
.name = "ave",
+ .pm = AVE_PM_OPS,
.of_match_table = of_ave_match,
},
};
module_platform_driver(ave_driver);
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 324049eebb9b..6209cc1fb305 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -75,6 +75,14 @@ config DWMAC_LPC18XX
---help---
Support for NXP LPC18xx/43xx DWMAC Ethernet.
+config DWMAC_MEDIATEK
+ tristate "MediaTek MT27xx GMAC support"
+ depends on OF && (ARCH_MEDIATEK || COMPILE_TEST)
+ help
+ Support for MediaTek GMAC Ethernet controller.
+
+ This selects the MT2712 SoC support for the stmmac driver.
+
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 99967a80a8c8..bf09701d2623 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
new file mode 100644
index 000000000000..bf2562995fc8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* Peri Configuration register for mt2712 */
+#define PERI_ETH_PHY_INTF_SEL 0x418
+#define PHY_INTF_MII 0
+#define PHY_INTF_RGMII 1
+#define PHY_INTF_RMII 4
+#define RMII_CLK_SRC_RXC BIT(4)
+#define RMII_CLK_SRC_INTERNAL BIT(5)
+
+#define PERI_ETH_DLY 0x428
+#define ETH_DLY_GTXC_INV BIT(6)
+#define ETH_DLY_GTXC_ENABLE BIT(5)
+#define ETH_DLY_GTXC_STAGES GENMASK(4, 0)
+#define ETH_DLY_TXC_INV BIT(20)
+#define ETH_DLY_TXC_ENABLE BIT(19)
+#define ETH_DLY_TXC_STAGES GENMASK(18, 14)
+#define ETH_DLY_RXC_INV BIT(13)
+#define ETH_DLY_RXC_ENABLE BIT(12)
+#define ETH_DLY_RXC_STAGES GENMASK(11, 7)
+
+#define PERI_ETH_DLY_FINE 0x800
+#define ETH_RMII_DLY_TX_INV BIT(2)
+#define ETH_FINE_DLY_GTXC BIT(1)
+#define ETH_FINE_DLY_RXC BIT(0)
+
+struct mac_delay_struct {
+ u32 tx_delay;
+ u32 rx_delay;
+ bool tx_inv;
+ bool rx_inv;
+};
+
+struct mediatek_dwmac_plat_data {
+ const struct mediatek_dwmac_variant *variant;
+ struct mac_delay_struct mac_delay;
+ struct clk_bulk_data *clks;
+ struct device_node *np;
+ struct regmap *peri_regmap;
+ struct device *dev;
+ int phy_mode;
+ bool rmii_rxc;
+};
+
+struct mediatek_dwmac_variant {
+ int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
+ int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+
+ u32 dma_bit_mask;
+ u32 rx_delay_max;
+ u32 tx_delay_max;
+};
+
+/* list of clocks required for mac */
+static const char * const mt2712_dwmac_clk_l[] = {
+ "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= PHY_INTF_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (PHY_INTF_RMII | rmii_rxc);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= PHY_INTF_RGMII;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val);
+
+ return 0;
+}
+
+static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay /= 550;
+ mac_delay->rx_delay /= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay /= 170;
+ mac_delay->rx_delay /= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
+static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 delay_val = 0, fine_val = 0;
+
+ mt2712_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+ }
+ /* tx_inv will inverse the tx clock inside mac relateive to
+ * reference clock from external phy,
+ * and this bit is located in the same register with fine-tune
+ */
+ if (mac_delay->tx_inv)
+ fine_val = ETH_RMII_DLY_TX_INV;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ fine_val = ETH_FINE_DLY_GTXC | ETH_FINE_DLY_RXC;
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+ regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val);
+ regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val);
+
+ return 0;
+}
+
+static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
+ .dwmac_set_phy_interface = mt2712_set_interface,
+ .dwmac_set_delay = mt2712_set_delay,
+ .clk_list = mt2712_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt2712_dwmac_clk_l),
+ .dma_bit_mask = 33,
+ .rx_delay_max = 17600,
+ .tx_delay_max = 17600,
+};
+
+static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 tx_delay_ps, rx_delay_ps;
+
+ plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
+ if (IS_ERR(plat->peri_regmap)) {
+ dev_err(plat->dev, "Failed to get pericfg syscon\n");
+ return PTR_ERR(plat->peri_regmap);
+ }
+
+ plat->phy_mode = of_get_phy_mode(plat->np);
+ if (plat->phy_mode < 0) {
+ dev_err(plat->dev, "not find phy-mode\n");
+ return -EINVAL;
+ }
+
+ if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
+ if (tx_delay_ps < plat->variant->tx_delay_max) {
+ mac_delay->tx_delay = tx_delay_ps;
+ } else {
+ dev_err(plat->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(plat->np, "mediatek,rx-delay-ps", &rx_delay_ps)) {
+ if (rx_delay_ps < plat->variant->rx_delay_max) {
+ mac_delay->rx_delay = rx_delay_ps;
+ } else {
+ dev_err(plat->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse");
+ mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
+ plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
+
+ return 0;
+}
+
+static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
+{
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int i, num = variant->num_clks;
+
+ plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL);
+ if (!plat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ plat->clks[i].id = variant->clk_list[i];
+
+ return devm_clk_bulk_get(plat->dev, num, plat->clks);
+}
+
+static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask));
+ if (ret) {
+ dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = variant->dwmac_set_phy_interface(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = variant->dwmac_set_delay(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+}
+
+static int mediatek_dwmac_probe(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ priv_plat = devm_kzalloc(&pdev->dev, sizeof(*priv_plat), GFP_KERNEL);
+ if (!priv_plat)
+ return -ENOMEM;
+
+ priv_plat->variant = of_device_get_match_data(&pdev->dev);
+ if (!priv_plat->variant) {
+ dev_err(&pdev->dev, "Missing dwmac-mediatek variant\n");
+ return -EINVAL;
+ }
+
+ priv_plat->dev = &pdev->dev;
+ priv_plat->np = pdev->dev.of_node;
+
+ ret = mediatek_dwmac_config_dt(priv_plat);
+ if (ret)
+ return ret;
+
+ ret = mediatek_dwmac_clk_init(priv_plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->interface = priv_plat->phy_mode;
+ /* clk_csr_i = 250-300MHz & MDC = clk_csr_i/124 */
+ plat_dat->clk_csr = 5;
+ plat_dat->has_gmac4 = 1;
+ plat_dat->has_gmac = 0;
+ plat_dat->pmt = 0;
+ plat_dat->maxmtu = ETH_DATA_LEN;
+ plat_dat->bsp_priv = priv_plat;
+ plat_dat->init = mediatek_dwmac_init;
+ plat_dat->exit = mediatek_dwmac_exit;
+ mediatek_dwmac_init(pdev, priv_plat);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id mediatek_dwmac_match[] = {
+ { .compatible = "mediatek,mt2712-gmac",
+ .data = &mt2712_gmac_variant },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
+
+static struct platform_driver mediatek_dwmac_driver = {
+ .probe = mediatek_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "dwmac-mediatek",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = mediatek_dwmac_match,
+ },
+};
+module_platform_driver(mediatek_dwmac_driver);
+
+MODULE_AUTHOR("Biao Huang <biao.huang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek DWMAC specific glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864fa809..d1f61c25d82b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return;
} else {
- if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
- !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ netdev->phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ netdev->phydev->supported))
return;
}
@@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return -EOPNOTSUPP;
} else {
- if (!(phy->supported & SUPPORTED_Pause) ||
- !(phy->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phy->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phy->supported))
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 076a8be18d67..0e0a0789c2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2550,12 +2550,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
netdev_warn(priv->dev, "PTP init failed\n");
}
-#ifdef CONFIG_DEBUG_FS
- ret = stmmac_init_fs(dev);
- if (ret < 0)
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
- __func__);
-#endif
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
@@ -2756,10 +2750,6 @@ static int stmmac_release(struct net_device *dev)
netif_carrier_off(dev);
-#ifdef CONFIG_DEBUG_FS
- stmmac_exit_fs(dev);
-#endif
-
stmmac_release_ptp(priv);
return 0;
@@ -3891,7 +3881,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
}
}
-static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -3899,6 +3889,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
+ if ((dev->flags & IFF_UP) == 0)
+ return 0;
+
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
@@ -3933,23 +3926,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
-static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
-}
-
-/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
-
-static const struct file_operations stmmac_rings_status_fops = {
- .owner = THIS_MODULE,
- .open = stmmac_sysfs_ring_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
struct stmmac_priv *priv = netdev_priv(dev);
@@ -4012,19 +3991,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
return 0;
}
-
-static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
-}
-
-static const struct file_operations stmmac_dma_cap_fops = {
- .owner = THIS_MODULE,
- .open = stmmac_sysfs_dma_cap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
static int stmmac_init_fs(struct net_device *dev)
{
@@ -4108,7 +4075,7 @@ static void stmmac_reset_subtask(struct stmmac_priv *priv)
set_bit(STMMAC_DOWN, &priv->state);
dev_close(priv->dev);
- dev_open(priv->dev);
+ dev_open(priv->dev, NULL);
clear_bit(STMMAC_DOWN, &priv->state);
clear_bit(STMMAC_RESETING, &priv->state);
rtnl_unlock();
@@ -4257,6 +4224,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) {
dev_err(priv->device, "failed to create workqueue\n");
+ ret = -ENOMEM;
goto error_wq;
}
@@ -4397,6 +4365,13 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
+#ifdef CONFIG_DEBUG_FS
+ ret = stmmac_init_fs(ndev);
+ if (ret < 0)
+ netdev_warn(priv->dev, "%s: failed debugFS registration\n",
+ __func__);
+#endif
+
return ret;
error_netdev_register:
@@ -4432,6 +4407,9 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
+#ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+#endif
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 863fd602fd33..ff641cf30a4e 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2691,7 +2691,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
sbus_dp = op->dev.parent->of_node;
/* We can match PCI devices too, do not accept those here. */
- if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi"))
+ if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi"))
return err;
if (is_qfe) {
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index f932923f7d56..bb126be1eb72 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -121,7 +121,8 @@ config TLAN
Devices currently supported by this driver are Compaq Netelligent,
Compaq NetFlex and Olicom cards. Please read the file
- <file:Documentation/networking/tlan.txt> for more details.
+ <file:Documentation/networking/device_drivers/ti/tlan.txt>
+ for more details.
To compile this driver as a module, choose M here. The module
will be called tlan.
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 9b8a30bf939b..810dfc7de1f9 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -991,7 +991,6 @@ static int cpmac_open(struct net_device *dev)
cpmac_hw_start(dev);
napi_enable(&priv->napi);
- dev->phydev->state = PHY_CHANGELINK;
phy_start(dev->phydev);
return 0;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 500f7ed8c58c..0e8f61a29479 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,7 +283,7 @@ struct cpsw_ss_regs {
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
@@ -293,7 +293,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN)
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
@@ -466,6 +466,8 @@ struct cpsw_priv {
bool mqprio_hw;
int fifo_bw[CPSW_TC_NUM];
int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -565,26 +567,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid);
+
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
-{
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->data.dual_emac) {
- struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
- ALE_VLAN, slave->port_vlan, 0);
- return;
- }
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
-}
-
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -640,7 +630,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
- __dev_mc_unsync(ndev, NULL);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -661,29 +651,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret;
+
+ if (vid < 0) {
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
+ else
+ vid = 0;
+ }
+
+ mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
- cpsw_add_mcast(priv, addr);
return 0;
}
-static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int vid, flags;
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
- if (cpsw->data.dual_emac) {
- vid = cpsw->slaves[priv->emac_port].port_vlan;
- flags = ALE_VLAN;
- } else {
- vid = 0;
- flags = 0;
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
}
- cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
return 0;
}
@@ -704,7 +813,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* Restore allmulti on vlans if necessary */
cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -796,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv;
if (cpsw->data.dual_emac) {
port = CPDMA_RX_SOURCE_PORT(status);
@@ -830,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_put(skb, len);
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
- cpts_rx_timestamp(cpsw->cpts, skb);
+ priv = netdev_priv(ndev);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1845,9 +1959,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, tx_prio_map, tx_prio_rg);
}
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
/* restore resources after port reset */
static void cpsw_restore(struct cpsw_priv *priv)
{
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
/* restore MQPRIO offload */
for_each_slave(priv, cpsw_mqprio_resume, priv);
@@ -1964,7 +2092,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2003,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
q_idx = skb_get_queue_mapping(skb);
@@ -2047,13 +2175,13 @@ fail:
#if IS_ENABLED(CONFIG_TI_CPTS)
-static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
+ struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpts_is_tx_enabled(cpsw->cpts) &&
- !cpts_is_rx_enabled(cpsw->cpts)) {
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -2061,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -2084,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -2107,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2114,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
struct cpsw_common *cpsw = priv->cpsw;
- struct cpts *cpts = cpsw->cpts;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
@@ -2133,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ priv->rx_ts_enabled = 0;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -2141,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2153,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(cpsw);
+ cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -2180,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpts *cpts = cpsw->cpts;
+ struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
@@ -2189,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2415,6 +2541,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
@@ -3144,7 +3271,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
const __be32 *parp;
/* This is no slave child node, continue */
- if (strcmp(slave_node->name, "slave"))
+ if (!of_node_name_eq(slave_node, "slave"))
continue;
slave_data->phy_node = of_parse_phandle(slave_node,
@@ -3240,7 +3367,7 @@ static void cpsw_remove_dt(struct platform_device *pdev)
for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = &data->slave_data[i];
- if (strcmp(slave_node->name, "slave"))
+ if (!of_node_name_eq(slave_node, "slave"))
continue;
if (of_phy_is_fixed_link(slave_node))
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index b96b93c686bf..054f78295d1d 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static void cpts_purge_txq(struct cpts *cpts)
+{
+ struct cpts_skb_cb_data *skb_cb;
+ struct sk_buff *skb, *tmp;
+ int removed = 0;
+
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ if (time_after(jiffies, skb_cb->tmo)) {
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
@@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev,
- "expiring tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
@@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
spin_lock_irqsave(&cpts->lock, flags);
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
- if (!skb_queue_empty(&cpts->txq))
- delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ if (!skb_queue_empty(&cpts->txq)) {
+ cpts_purge_txq(cpts);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
pr_debug("cpts overflow check at %lld.%09ld\n",
@@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
u64 ns;
struct skb_shared_hwtstamps *ssh;
- if (!cpts->rx_enable)
- return;
ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
if (!ns)
return;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 73d73faf0f38..d2c7decd59b6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
void cpts_release(struct cpts *cpts);
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
- cpts->rx_enable = enable;
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return !!cpts->rx_enable;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
- cpts->tx_enable = enable;
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return !!cpts->tx_enable;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
unsigned int class = ptp_classify_raw(skb);
@@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts)
{
}
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 9153db120352..840820402cd0 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1912,11 +1912,15 @@ static int davinci_emac_probe(struct platform_device *pdev)
ether_addr_copy(ndev->dev_addr, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
- /* Use random MAC if none passed */
- eth_hw_addr_random(ndev);
- memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
- dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
- priv->mac_addr);
+ /* Try nvmem if MAC wasn't passed over pdata or DT. */
+ rc = nvmem_get_mac_address(&pdev->dev, priv->mac_addr);
+ if (rc) {
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
}
ndev->netdev_ops = &emac_netdev_ops;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0397ccb6597e..5174d318901e 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -763,6 +763,8 @@ struct gbe_priv {
int cpts_registered;
struct cpts *cpts;
+ int rx_ts_enabled;
+ int tx_ts_enabled;
};
struct gbe_intf {
@@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
- !cpts_is_tx_enabled(gbe_dev->cpts))
+ !gbe_dev->tx_ts_enabled)
return 0;
/* If phy has the txtstamp api, assume it will do it.
@@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
return 0;
}
- cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+ if (gbe_dev->rx_ts_enabled)
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
p_info->rxtstamp_complete = true;
return 0;
@@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = gbe_dev->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
u32 ts_en, seq_id, ctl;
- if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
- !cpts_is_tx_enabled(gbe_dev->cpts)) {
+ if (!gbe_dev->rx_ts_enabled &&
+ !gbe_dev->tx_ts_enabled) {
writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
return;
}
@@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
(slave->ts_ctl.uni ? TS_UNI_EN :
slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
- if (cpts_is_tx_enabled(gbe_dev->cpts))
+ if (gbe_dev->tx_ts_enabled)
ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
- if (cpts_is_rx_enabled(gbe_dev->cpts))
+ if (gbe_dev->rx_ts_enabled)
ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
@@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
- cpts_tx_enable(cpts, 0);
+ gbe_dev->tx_ts_enabled = 0;
break;
case HWTSTAMP_TX_ON:
- cpts_tx_enable(cpts, 1);
+ gbe_dev->tx_ts_enabled = 1;
break;
default:
return -ERANGE;
@@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
@@ -3621,7 +3623,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
return -EINVAL;
}
- if (!strcmp(node->name, "gbe")) {
+ if (of_node_name_eq(node, "gbe")) {
ret = get_gbe_resource_version(gbe_dev, node);
if (ret)
return ret;
@@ -3635,7 +3637,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
else
ret = -ENODEV;
- } else if (!strcmp(node->name, "xgbe")) {
+ } else if (of_node_name_eq(node, "xgbe")) {
ret = set_xgbe_ethss10_priv(gbe_dev, node);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 93d142867c2a..b4ab1a5f6cd0 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -69,7 +69,9 @@ MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
MODULE_LICENSE("GPL");
-/* Turn on debugging. See Documentation/networking/tlan.txt for details */
+/* Turn on debugging.
+ * See Documentation/networking/device_drivers/ti/tlan.txt for details
+ */
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6a71c2c0f17d..c50a9772f4af 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev)
static int tc_mii_probe(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct tc35815_local *lp = netdev_priv(dev);
struct phy_device *phydev;
- u32 dropmask;
phydev = phy_find_first(lp->mii_bus);
if (!phydev) {
@@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev)
/* mask with MAC supported features */
phy_set_max_speed(phydev, SPEED_100);
- dropmask = 0;
- if (options.speed == 10)
- dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
- else if (options.speed == 100)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
- if (options.duplex == 1)
- dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
- else if (options.duplex == 2)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
- phydev->supported &= ~dropmask;
- phydev->advertising = phydev->supported;
+ if (options.speed == 10) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.speed == 100) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ }
+ if (options.duplex == 1) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.duplex == 2) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ }
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0;
lp->speed = 0;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ef9538ee53d0..82412691ee66 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -3605,7 +3605,7 @@ static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
"tx_jumbo",
"rx_mac_control_frames",
"tx_mac_control_frames",
- "rx_frame_alignement_errors",
+ "rx_frame_alignment_errors",
"rx_long_ok",
"rx_long_err",
"tx_sqe_errors",
diff --git a/drivers/net/fjes/fjes_debugfs.c b/drivers/net/fjes/fjes_debugfs.c
index 30052ebd52bf..7fed88ea27a5 100644
--- a/drivers/net/fjes/fjes_debugfs.c
+++ b/drivers/net/fjes/fjes_debugfs.c
@@ -62,19 +62,7 @@ static int fjes_dbg_status_show(struct seq_file *m, void *v)
return 0;
}
-
-static int fjes_dbg_status_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fjes_dbg_status_show, inode->i_private);
-}
-
-static const struct file_operations fjes_dbg_status_fops = {
- .owner = THIS_MODULE,
- .open = fjes_dbg_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fjes_dbg_status);
void fjes_dbg_adapter_init(struct fjes_adapter *adapter)
{
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index a0cd1c41cf5f..58bbba8582b0 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -70,6 +70,7 @@ struct geneve_dev {
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
+ enum ifla_geneve_df df;
};
struct geneve_sock {
@@ -387,6 +388,59 @@ drop:
return 0;
}
+/* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */
+static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct genevehdr *geneveh;
+ struct geneve_sock *gs;
+ u8 zero_vni[3] = { 0 };
+ u8 *vni = zero_vni;
+
+ if (skb->len < GENEVE_BASE_HLEN)
+ return -EINVAL;
+
+ geneveh = geneve_hdr(skb);
+ if (geneveh->ver != GENEVE_VER)
+ return -EINVAL;
+
+ if (geneveh->proto_type != htons(ETH_P_TEB))
+ return -EINVAL;
+
+ gs = rcu_dereference_sk_user_data(sk);
+ if (!gs)
+ return -ENOENT;
+
+ if (geneve_get_sk_family(gs) == AF_INET) {
+ struct iphdr *iph = ip_hdr(skb);
+ __be32 addr4 = 0;
+
+ if (!gs->collect_md) {
+ vni = geneve_hdr(skb)->vni;
+ addr4 = iph->daddr;
+ }
+
+ return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (geneve_get_sk_family(gs) == AF_INET6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct in6_addr addr6;
+
+ memset(&addr6, 0, sizeof(struct in6_addr));
+
+ if (!gs->collect_md) {
+ vni = geneve_hdr(skb)->vni;
+ addr6 = ip6h->daddr;
+ }
+
+ return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT;
+ }
+#endif
+
+ return -EPFNOSUPPORT;
+}
+
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
__be16 port, bool ipv6_rx_csum)
{
@@ -544,6 +598,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
tunnel_cfg.gro_receive = geneve_gro_receive;
tunnel_cfg.gro_complete = geneve_gro_complete;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+ tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
list_add(&gs->list, &gn->sock_list);
@@ -823,8 +878,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
struct flowi4 fl4;
__u8 tos, ttl;
+ __be16 df = 0;
__be16 sport;
- __be16 df;
int err;
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
@@ -838,6 +893,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
+
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
if (geneve->ttl_inherit)
@@ -845,8 +902,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
else
ttl = key->ttl;
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+ if (geneve->df == GENEVE_DF_SET) {
+ df = htons(IP_DF);
+ } else if (geneve->df == GENEVE_DF_INHERIT) {
+ struct ethhdr *eth = eth_hdr(skb);
+
+ if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+ df = htons(IP_DF);
+ } else if (ntohs(eth->h_proto) == ETH_P_IP) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->frag_off & htons(IP_DF))
+ df = htons(IP_DF);
+ }
+ }
}
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
if (unlikely(err))
@@ -1093,6 +1164,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
+ [IFLA_GENEVE_DF] = { .type = NLA_U8 },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1128,6 +1200,16 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_GENEVE_DF]) {
+ enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
+ if (df < 0 || df > GENEVE_DF_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
+ "Invalid DF attribute");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1173,7 +1255,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack,
const struct ip_tunnel_info *info,
bool metadata, bool ipv6_rx_csum,
- bool ttl_inherit)
+ bool ttl_inherit, enum ifla_geneve_df df)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1223,6 +1305,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = ipv6_rx_csum;
geneve->ttl_inherit = ttl_inherit;
+ geneve->df = df;
err = register_netdevice(dev);
if (err)
@@ -1242,7 +1325,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack,
struct ip_tunnel_info *info, bool *metadata,
bool *use_udp6_rx_checksums, bool *ttl_inherit,
- bool changelink)
+ enum ifla_geneve_df *df, bool changelink)
{
int attrtype;
@@ -1330,6 +1413,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_GENEVE_TOS])
info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+ if (data[IFLA_GENEVE_DF])
+ *df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
if (data[IFLA_GENEVE_LABEL]) {
info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
IPV6_FLOWLABEL_MASK;
@@ -1448,6 +1534,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ enum ifla_geneve_df df = GENEVE_DF_UNSET;
bool use_udp6_rx_checksums = false;
struct ip_tunnel_info info;
bool ttl_inherit = false;
@@ -1456,12 +1543,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
init_tnl_info(&info, GENEVE_UDP_PORT);
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, &ttl_inherit, false);
+ &use_udp6_rx_checksums, &ttl_inherit, &df, false);
if (err)
return err;
err = geneve_configure(net, dev, extack, &info, metadata,
- use_udp6_rx_checksums, ttl_inherit);
+ use_udp6_rx_checksums, ttl_inherit, df);
if (err)
return err;
@@ -1524,6 +1611,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_info info;
bool metadata;
bool use_udp6_rx_checksums;
+ enum ifla_geneve_df df;
bool ttl_inherit;
int err;
@@ -1539,7 +1627,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
use_udp6_rx_checksums = geneve->use_udp6_rx_checksums;
ttl_inherit = geneve->ttl_inherit;
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, &ttl_inherit, true);
+ &use_udp6_rx_checksums, &ttl_inherit, &df, true);
if (err)
return err;
@@ -1572,6 +1660,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */
nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
@@ -1620,6 +1709,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df))
+ goto nla_put_failure;
+
if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
goto nla_put_failure;
@@ -1666,12 +1758,13 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
memset(tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &geneve_link_ops, tb);
+ &geneve_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
init_tnl_info(&info, dst_port);
- err = geneve_configure(net, dev, NULL, &info, true, true, false);
+ err = geneve_configure(net, dev, NULL, &info,
+ true, true, false, GENEVE_DF_UNSET);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 17e6dcd2eb42..28c749980359 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -120,7 +120,7 @@ struct sixpack {
struct timer_list tx_t;
struct timer_list resync_t;
refcount_t refcnt;
- struct semaphore dead_sem;
+ struct completion dead;
spinlock_t lock;
};
@@ -389,7 +389,7 @@ static struct sixpack *sp_get(struct tty_struct *tty)
static void sp_put(struct sixpack *sp)
{
if (refcount_dec_and_test(&sp->refcnt))
- up(&sp->dead_sem);
+ complete(&sp->dead);
}
/*
@@ -576,7 +576,7 @@ static int sixpack_open(struct tty_struct *tty)
spin_lock_init(&sp->lock);
refcount_set(&sp->refcnt, 1);
- sema_init(&sp->dead_sem, 0);
+ init_completion(&sp->dead);
/* !!! length of the buffers. MTU is IP MTU, not PACLEN! */
@@ -670,10 +670,10 @@ static void sixpack_close(struct tty_struct *tty)
* we have to wait for all existing users to finish.
*/
if (!refcount_dec_and_test(&sp->refcnt))
- down(&sp->dead_sem);
+ wait_for_completion(&sp->dead);
/* We must stop the queue to avoid potentially scribbling
- * on the free buffers. The sp->dead_sem is not sufficient
+ * on the free buffers. The sp->dead completion is not sufficient
* to protect us from sp->xbuff access.
*/
netif_stop_queue(sp->dev);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 802233d41b25..4938cf4c184c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -81,7 +81,7 @@ struct mkiss {
#define CRC_MODE_SMACK_TEST 4
atomic_t refcnt;
- struct semaphore dead_sem;
+ struct completion dead;
};
/*---------------------------------------------------------------------------*/
@@ -687,7 +687,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty)
static void mkiss_put(struct mkiss *ax)
{
if (atomic_dec_and_test(&ax->refcnt))
- up(&ax->dead_sem);
+ complete(&ax->dead);
}
static int crc_force = 0; /* Can be overridden with insmod */
@@ -715,7 +715,7 @@ static int mkiss_open(struct tty_struct *tty)
spin_lock_init(&ax->buflock);
atomic_set(&ax->refcnt, 1);
- sema_init(&ax->dead_sem, 0);
+ init_completion(&ax->dead);
ax->tty = tty;
tty->disc_data = ax;
@@ -795,7 +795,7 @@ static void mkiss_close(struct tty_struct *tty)
* we have to wait for all existing users to finish.
*/
if (!atomic_dec_and_test(&ax->refcnt))
- down(&ax->dead_sem);
+ wait_for_completion(&ax->dead);
/*
* Halt the transmit queue so that a new transmit cannot scribble
* on our buffers
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf36e7ff3191..91ed15ea5883 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -137,7 +137,7 @@ static int netvsc_open(struct net_device *net)
* slave as up. If open fails, then slave will be
* still be offline (and not used).
*/
- ret = dev_open(vf_netdev);
+ ret = dev_open(vf_netdev, NULL);
if (ret)
netdev_warn(net,
"unable to open slave: %s: %d\n",
@@ -605,9 +605,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
IEEE_8021Q_INFO);
vlan->value = 0;
- vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
- vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
- VLAN_PRIO_SHIFT;
+ vlan->vlanid = skb_vlan_tag_get_id(skb);
+ vlan->cfi = skb_vlan_tag_get_cfi(skb);
+ vlan->pri = skb_vlan_tag_get_prio(skb);
}
if (skb_is_gso(skb)) {
@@ -781,7 +781,8 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
}
if (vlan) {
- u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
+ u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
+ (vlan->cfi ? VLAN_CFI_MASK : 0);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
@@ -1246,7 +1247,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
return -ENODEV;
if (vf_netdev) {
- err = dev_set_mac_address(vf_netdev, addr);
+ err = dev_set_mac_address(vf_netdev, addr, NULL);
if (err)
return err;
}
@@ -1257,7 +1258,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
} else if (vf_netdev) {
/* rollback change on VF */
memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
- dev_set_mac_address(vf_netdev, addr);
+ dev_set_mac_address(vf_netdev, addr, NULL);
}
return err;
@@ -1992,7 +1993,7 @@ static void __netvsc_vf_setup(struct net_device *ndev,
"unable to change mtu to %u\n", ndev->mtu);
/* set multicast etc flags on VF */
- dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
+ dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
/* sync address list from ndev to VF */
netif_addr_lock_bh(ndev);
@@ -2001,7 +2002,7 @@ static void __netvsc_vf_setup(struct net_device *ndev,
netif_addr_unlock_bh(ndev);
if (netif_running(ndev)) {
- ret = dev_open(vf_netdev);
+ ret = dev_open(vf_netdev, NULL);
if (ret)
netdev_warn(vf_netdev,
"unable to open: %d\n", ret);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 3d9e91579866..0253eb502153 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1632,18 +1632,7 @@ static int at86rf230_stats_show(struct seq_file *file, void *offset)
seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid);
return 0;
}
-
-static int at86rf230_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, at86rf230_stats_show, inode->i_private);
-}
-
-static const struct file_operations at86rf230_stats_fops = {
- .open = at86rf230_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(at86rf230_stats);
static int at86rf230_debugfs_init(struct at86rf230_local *lp)
{
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0ff5a403a8dc..b2ff903a9cb6 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -721,7 +721,7 @@ static void ca8210_mlme_reset_worker(struct work_struct *work)
static void ca8210_rx_done(struct cas_control *cas_ctl)
{
u8 *buf;
- u8 len;
+ unsigned int len;
struct work_priv_container *mlme_reset_wpc;
struct ca8210_priv *priv = cas_ctl->priv;
@@ -730,7 +730,7 @@ static void ca8210_rx_done(struct cas_control *cas_ctl)
if (len > CA8210_SPI_BUF_SIZE) {
dev_crit(
&priv->spi->dev,
- "Received packet len (%d) erroneously long\n",
+ "Received packet len (%u) erroneously long\n",
len
);
goto finish;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 51b5198d5943..b6743f03dce0 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -492,7 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
hwsim_edge_policy, NULL))
return -EINVAL;
@@ -542,7 +542,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX + 1,
+ if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
hwsim_edge_policy, NULL))
return -EINVAL;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4a949569ec4c..19bdde60680c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -71,7 +71,8 @@ static void ipvlan_unregister_nf_hook(struct net *net)
ARRAY_SIZE(ipvl_nfops));
}
-static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
+static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
+ struct netlink_ext_ack *extack)
{
struct ipvl_dev *ipvlan;
struct net_device *mdev = port->dev;
@@ -84,10 +85,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
flags = ipvlan->dev->flags;
if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
err = dev_change_flags(ipvlan->dev,
- flags | IFF_NOARP);
+ flags | IFF_NOARP,
+ extack);
} else {
err = dev_change_flags(ipvlan->dev,
- flags & ~IFF_NOARP);
+ flags & ~IFF_NOARP,
+ extack);
}
if (unlikely(err))
goto fail;
@@ -116,9 +119,11 @@ fail:
flags = ipvlan->dev->flags;
if (port->mode == IPVLAN_MODE_L3 ||
port->mode == IPVLAN_MODE_L3S)
- dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+ dev_change_flags(ipvlan->dev, flags | IFF_NOARP,
+ NULL);
else
- dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+ dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP,
+ NULL);
}
return err;
@@ -498,7 +503,7 @@ static int ipvlan_nl_changelink(struct net_device *dev,
if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- err = ipvlan_set_port_mode(port, nmode);
+ err = ipvlan_set_port_mode(port, nmode, extack);
}
if (!err && data[IFLA_IPVLAN_FLAGS]) {
@@ -535,7 +540,7 @@ static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_IPVLAN_MODE]) {
u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
+ if (mode >= IPVLAN_MODE_MAX)
return -EINVAL;
}
if (data[IFLA_IPVLAN_FLAGS]) {
@@ -672,7 +677,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
if (data && data[IFLA_IPVLAN_MODE])
mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
- err = ipvlan_set_port_mode(port, mode);
+ err = ipvlan_set_port_mode(port, mode, extack);
if (err)
goto unlink_netdev;
@@ -754,10 +759,13 @@ EXPORT_SYMBOL_GPL(ipvlan_link_register);
static int ipvlan_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct netdev_notifier_pre_changeaddr_info *prechaddr_info;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct ipvl_dev *ipvlan, *next;
struct ipvl_port *port;
LIST_HEAD(lst_kill);
+ int err;
if (!netif_is_ipvlan_port(dev))
return NOTIFY_DONE;
@@ -813,6 +821,17 @@ static int ipvlan_device_event(struct notifier_block *unused,
ipvlan_adjust_mtu(ipvlan, dev);
break;
+ case NETDEV_PRE_CHANGEADDR:
+ prechaddr_info = ptr;
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ err = dev_pre_changeaddr_notify(ipvlan->dev,
+ prechaddr_info->dev_addr,
+ extack);
+ if (err)
+ return notifier_from_errno(err);
+ }
+ break;
+
case NETDEV_CHANGEADDR:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index fc8d5f1ee1ad..fc726ce4c164 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev)
goto hash_add;
}
- err = -EBUSY;
+ err = -EADDRINUSE;
if (macvlan_addr_busy(vlan->port, dev->dev_addr))
goto out;
@@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
} else {
/* Rehash and update the device filters */
if (macvlan_addr_busy(vlan->port, addr))
- return -EBUSY;
+ return -EADDRINUSE;
if (!macvlan_passthru(port)) {
err = dev_uc_add(lowerdev, addr);
@@ -744,9 +744,12 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
macvlan_set_addr_change(vlan->port);
- return dev_set_mac_address(vlan->lowerdev, addr);
+ return dev_set_mac_address(vlan->lowerdev, addr, NULL);
}
+ if (macvlan_addr_busy(vlan->port, addr->sa_data))
+ return -EADDRINUSE;
+
return macvlan_sync_address(dev, addr->sa_data);
}
@@ -1210,7 +1213,7 @@ static void macvlan_port_destroy(struct net_device *dev)
sa.sa_family = port->dev->type;
memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len);
- dev_set_mac_address(port->dev, &sa);
+ dev_set_mac_address(port->dev, &sa, NULL);
}
kfree(port);
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index e964d312f4ca..ed1166adaa2f 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -40,14 +40,14 @@ static int net_failover_open(struct net_device *dev)
primary_dev = rtnl_dereference(nfo_info->primary_dev);
if (primary_dev) {
- err = dev_open(primary_dev);
+ err = dev_open(primary_dev, NULL);
if (err)
goto err_primary_open;
}
standby_dev = rtnl_dereference(nfo_info->standby_dev);
if (standby_dev) {
- err = dev_open(standby_dev);
+ err = dev_open(standby_dev, NULL);
if (err)
goto err_standby_open;
}
@@ -517,7 +517,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
dev_hold(slave_dev);
if (netif_running(failover_dev)) {
- err = dev_open(slave_dev);
+ err = dev_open(slave_dev, NULL);
if (err && (err != -EBUSY)) {
netdev_err(failover_dev, "Opening slave %s failed err:%d\n",
slave_dev->name, err);
@@ -680,7 +680,7 @@ static int net_failover_slave_name_change(struct net_device *slave_dev,
/* We need to bring up the slave after the rename by udev in case
* open failed with EBUSY when it was registered.
*/
- dev_open(slave_dev);
+ dev_open(slave_dev, NULL);
return 0;
}
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index cb3518474f0e..172b271c8bd2 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -48,7 +48,7 @@ struct nsim_bpf_bound_map {
struct list_head l;
};
-static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
+static int nsim_bpf_string_show(struct seq_file *file, void *data)
{
const char **str = file->private;
@@ -57,19 +57,7 @@ static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
return 0;
}
-
-static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f)
-{
- return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private);
-}
-
-static const struct file_operations nsim_bpf_string_fops = {
- .owner = THIS_MODULE,
- .open = nsim_debugfs_bpf_string_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek
-};
+DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string);
static int
nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
@@ -91,11 +79,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env)
return 0;
}
-static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = {
- .insn_hook = nsim_bpf_verify_insn,
- .finalize = nsim_bpf_finalize,
-};
-
static bool nsim_xdp_offload_active(struct netdevsim *ns)
{
return ns->xdp_hw.prog;
@@ -263,6 +246,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
return 0;
}
+static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
+{
+ struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev);
+
+ if (!ns->bpf_bind_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_create_prog(ns, prog);
+}
+
+static int nsim_bpf_translate(struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
+
+ state->state = "xlated";
+ return 0;
+}
+
static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
@@ -275,6 +276,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
kfree(state);
}
+static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
+ .insn_hook = nsim_bpf_verify_insn,
+ .finalize = nsim_bpf_finalize,
+ .prepare = nsim_bpf_verifier_prep,
+ .translate = nsim_bpf_translate,
+ .destroy = nsim_bpf_destroy_prog,
+};
+
static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
{
if (bpf->prog && bpf->prog->aux->offload) {
@@ -533,30 +542,11 @@ static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bpf_bound_prog *state;
int err;
ASSERT_RTNL();
switch (bpf->command) {
- case BPF_OFFLOAD_VERIFIER_PREP:
- if (!ns->bpf_bind_accept)
- return -EOPNOTSUPP;
-
- err = nsim_bpf_create_prog(ns, bpf->verifier.prog);
- if (err)
- return err;
-
- bpf->verifier.ops = &nsim_bpf_analyzer_ops;
- return 0;
- case BPF_OFFLOAD_TRANSLATE:
- state = bpf->offload.prog->aux->offload->dev_priv;
-
- state->state = "xlated";
- return 0;
- case BPF_OFFLOAD_DESTROY:
- nsim_bpf_destroy_prog(bpf->offload.prog);
- return 0;
case XDP_QUERY_PROG:
return xdp_attachment_query(&ns->xdp, bpf);
case XDP_QUERY_PROG_HW:
@@ -599,7 +589,7 @@ int nsim_bpf_init(struct netdevsim *ns)
if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
return -ENOMEM;
- ns->sdev->bpf_dev = bpf_offload_dev_create();
+ ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops);
err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
if (err)
return err;
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 2dcf6cc269d0..76e11d889bb6 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -227,18 +227,19 @@ static const struct xfrmdev_ops nsim_xfrmdev_ops = {
bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
{
+ struct sec_path *sp = skb_sec_path(skb);
struct nsim_ipsec *ipsec = &ns->ipsec;
struct xfrm_state *xs;
struct nsim_sa *tsa;
u32 sa_idx;
/* do we even need to check this packet? */
- if (!skb->sp)
+ if (!sp)
return true;
- if (unlikely(!skb->sp->len)) {
+ if (unlikely(!sp->len)) {
netdev_err(ns->netdev, "no xfrm state len = %d\n",
- skb->sp->len);
+ sp->len);
return false;
}
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 6fe5dc9201d0..9d0504f3e3b2 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -66,7 +66,6 @@ static struct phy_driver am79c_driver[] = { {
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = am79c_config_init,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index 632472cab3bb..beb3309bb0f0 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -25,15 +25,10 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQR405 0x03a1b4b0
-#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
- SUPPORTED_1000baseT_Full | \
- SUPPORTED_100baseT_Full | \
- PHY_DEFAULT_FEATURES)
-
static int aquantia_config_aneg(struct phy_device *phydev)
{
- phydev->supported = PHY_AQUANTIA_FEATURES;
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->supported, phy_10gbit_features);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
@@ -116,7 +111,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ1202",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -128,7 +122,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ2104",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -140,7 +133,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR105",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -152,7 +144,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR106",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -164,7 +155,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR107",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -176,7 +166,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR405",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e74a047a846e..f9432d053a22 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -379,7 +379,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -395,7 +394,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -410,7 +408,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index d95bffdec4c1..a88dd14a25c0 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -43,7 +43,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
int reg, err;
/* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
reg = phy_read(phydev, MII_BCM63XX_IR);
if (reg < 0)
@@ -69,7 +69,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
@@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b2b6307d64a4..712224cc442d 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -650,6 +650,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
@@ -670,6 +671,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
{ PHY_ID_BCM7268, 0xfffffff0, },
{ PHY_ID_BCM7271, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f7ebdcff53e4..1b350183bffb 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -86,8 +86,12 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
static int bcm87xx_config_init(struct phy_device *phydev)
{
- phydev->supported = SUPPORTED_10000baseR_FEC;
- phydev->advertising = ADVERTISED_10000baseR_FEC;
+ linkmode_zero(phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ phydev->supported);
+ linkmode_zero(phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ phydev->advertising);
phydev->state = PHY_NOLINK;
phydev->autoneg = AUTONEG_DISABLE;
@@ -193,7 +197,6 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -205,7 +208,6 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 704537010453..aa73c5cc5f86 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -602,7 +602,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -611,7 +610,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -620,7 +618,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -629,7 +626,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -638,7 +634,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -647,7 +642,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -657,7 +651,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -666,7 +659,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -676,7 +668,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -686,7 +677,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
@@ -696,7 +686,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -705,7 +694,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -714,7 +702,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -723,7 +710,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -732,7 +718,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -751,7 +736,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index c05af00bf4b6..fea61c81bda9 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -108,7 +108,6 @@ static struct phy_driver cis820x_driver[] = {
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
@@ -117,7 +116,6 @@ static struct phy_driver cis820x_driver[] = {
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5ee99b3b428c..97162008f42b 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,7 +150,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -160,7 +159,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161B/C",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -170,7 +168,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -180,7 +177,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index edd4d44a386d..18b41bc345ab 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1521,7 +1521,6 @@ static struct phy_driver dp83640_driver = {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
.soft_reset = dp83640_soft_reset,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6e8a2a4f3a6e..24c7f149f3e6 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -318,7 +318,6 @@ static struct phy_driver dp83822_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83822",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83822_config_init,
.soft_reset = dp83822_phy_reset,
.get_wol = dp83822_get_wol,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 6e8e42361fd5..a6b55909d1dc 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -108,7 +108,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.phy_id_mask = 0xfffffff0, \
.name = _name, \
.features = PHY_BASIC_FEATURES, \
- .flags = PHY_HAS_INTERRUPT, \
\
.soft_reset = genphy_soft_reset, \
.config_init = _config_init, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index b3935778b19f..da6a67d47ce9 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -334,7 +334,6 @@ static struct phy_driver dp83867_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83867",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 78cad134a79e..da13356999e5 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -346,7 +346,6 @@ static struct phy_driver dp83811_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83TC811",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83811_config_init,
.config_aneg = dp83811_config_aneg,
.soft_reset = dp83811_phy_reset,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 67b260877f30..72d43c88e6ff 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <linux/seqlock.h>
#include <linux/idr.h>
+#include <linux/netdevice.h>
#include "swphy.h"
@@ -38,6 +39,7 @@ struct fixed_phy {
struct phy_device *phydev;
seqcount_t seqcount;
struct fixed_phy_status status;
+ bool no_carrier;
int (*link_update)(struct net_device *, struct fixed_phy_status *);
struct list_head node;
int link_gpio;
@@ -48,9 +50,28 @@ static struct fixed_mdio_bus platform_fmb = {
.phys = LIST_HEAD_INIT(platform_fmb.phys),
};
+int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ struct fixed_mdio_bus *fmb = &platform_fmb;
+ struct phy_device *phydev = dev->phydev;
+ struct fixed_phy *fp;
+
+ if (!phydev || !phydev->mdio.bus)
+ return -EINVAL;
+
+ list_for_each_entry(fp, &fmb->phys, node) {
+ if (fp->addr == phydev->mdio.addr) {
+ fp->no_carrier = !new_carrier;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(fixed_phy_change_carrier);
+
static void fixed_phy_update(struct fixed_phy *fp)
{
- if (gpio_is_valid(fp->link_gpio))
+ if (!fp->no_carrier && gpio_is_valid(fp->link_gpio))
fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio);
}
@@ -66,6 +87,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
do {
s = read_seqcount_begin(&fp->seqcount);
+ fp->status.link = !fp->no_carrier;
/* Issue callback if user registered it. */
if (fp->link_update) {
fp->link_update(fp->phydev->attached_dev,
@@ -223,14 +245,23 @@ struct phy_device *fixed_phy_register(unsigned int irq,
switch (status->speed) {
case SPEED_1000:
- phy->supported = PHY_1000BT_FEATURES;
- break;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phy->supported);
+ /* fall through */
case SPEED_100:
- phy->supported = PHY_100BT_FEATURES;
- break;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phy->supported);
+ /* fall through */
case SPEED_10:
default:
- phy->supported = PHY_10BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phy->supported);
}
ret = phy_device_register(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 791587a49215..7d5938b87660 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -25,6 +25,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -36,14 +37,34 @@ MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
-#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
-#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
+#define IP1001_RXPHASE_SEL BIT(0) /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL BIT(1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
-#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
+#define IP101A_G_APS_ON BIT(1) /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
-#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
-#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
+#define IP101A_G_IRQ_PIN_USED BIT(15) /* INTR pin used */
+#define IP101A_G_IRQ_ALL_MASK BIT(11) /* IRQ's inactive */
+#define IP101A_G_IRQ_SPEED_CHANGE BIT(2)
+#define IP101A_G_IRQ_DUPLEX_CHANGE BIT(1)
+#define IP101A_G_IRQ_LINK_CHANGE BIT(0)
+
+#define IP101G_DIGITAL_IO_SPEC_CTRL 0x1d
+#define IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32 BIT(2)
+
+/* The 32-pin IP101GR package can re-configure the mode of the RXER/INTR_32 pin
+ * (pin number 21). The hardware default is RXER (receive error) mode. But it
+ * can be configured to interrupt mode manually.
+ */
+enum ip101gr_sel_intr32 {
+ IP101GR_SEL_INTR32_KEEP,
+ IP101GR_SEL_INTR32_INTR,
+ IP101GR_SEL_INTR32_RXER,
+};
+
+struct ip101a_g_phy_priv {
+ enum ip101gr_sel_intr32 sel_intr32;
+};
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -162,18 +183,92 @@ static int ip1001_config_init(struct phy_device *phydev)
return 0;
}
+static int ip175c_read_status(struct phy_device *phydev)
+{
+ if (phydev->mdio.addr == 4) /* WAN port */
+ genphy_read_status(phydev);
+ else
+ /* Don't need to read status for switch ports */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+
+ return 0;
+}
+
+static int ip175c_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->mdio.addr == 4) /* WAN port */
+ genphy_config_aneg(phydev);
+
+ return 0;
+}
+
+static int ip101a_g_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct ip101a_g_phy_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Both functions (RX error and interrupt status) are sharing the same
+ * pin on the 32-pin IP101GR, so this is an exclusive choice.
+ */
+ if (device_property_read_bool(dev, "icplus,select-rx-error") &&
+ device_property_read_bool(dev, "icplus,select-interrupt")) {
+ dev_err(dev,
+ "RXER and INTR mode cannot be selected together\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(dev, "icplus,select-rx-error"))
+ priv->sel_intr32 = IP101GR_SEL_INTR32_RXER;
+ else if (device_property_read_bool(dev, "icplus,select-interrupt"))
+ priv->sel_intr32 = IP101GR_SEL_INTR32_INTR;
+ else
+ priv->sel_intr32 = IP101GR_SEL_INTR32_KEEP;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static int ip101a_g_config_init(struct phy_device *phydev)
{
- int c;
+ struct ip101a_g_phy_priv *priv = phydev->priv;
+ int err, c;
c = ip1xx_reset(phydev);
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
+ /* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */
+ switch (priv->sel_intr32) {
+ case IP101GR_SEL_INTR32_RXER:
+ err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0);
+ if (err < 0)
+ return err;
+ break;
+
+ case IP101GR_SEL_INTR32_INTR:
+ err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32);
+ if (err < 0)
+ return err;
+ break;
+
+ default:
+ /* Don't touch IP101G_DIGITAL_IO_SPEC_CTRL because it's not
+ * documented on IP101A and it's not clear whether this would
+ * cause problems.
+ * For the 32-pin IP101GR we simply keep the SEL_INTR32
+ * configuration as set by the bootloader when not configured
+ * to one of the special functions.
+ */
+ break;
+ }
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
@@ -182,23 +277,29 @@ static int ip101a_g_config_init(struct phy_device *phydev)
return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
}
-static int ip175c_read_status(struct phy_device *phydev)
+static int ip101a_g_config_intr(struct phy_device *phydev)
{
- if (phydev->mdio.addr == 4) /* WAN port */
- genphy_read_status(phydev);
+ u16 val;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ /* INTR pin used: Speed/link/duplex will cause an interrupt */
+ val = IP101A_G_IRQ_PIN_USED;
else
- /* Don't need to read status for switch ports */
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ val = IP101A_G_IRQ_ALL_MASK;
- return 0;
+ return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
}
-static int ip175c_config_aneg(struct phy_device *phydev)
+static int ip101a_g_did_interrupt(struct phy_device *phydev)
{
- if (phydev->mdio.addr == 4) /* WAN port */
- genphy_config_aneg(phydev);
+ int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
- return 0;
+ if (val < 0)
+ return 0;
+
+ return val & (IP101A_G_IRQ_SPEED_CHANGE |
+ IP101A_G_IRQ_DUPLEX_CHANGE |
+ IP101A_G_IRQ_LINK_CHANGE);
}
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
@@ -234,7 +335,9 @@ static struct phy_driver icplus_driver[] = {
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
+ .probe = ip101a_g_probe,
+ .config_intr = ip101a_g_config_intr,
+ .did_interrupt = ip101a_g_did_interrupt,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 7d936fb61c22..fc0f5024a29e 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -242,7 +242,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -255,7 +254,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.3",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -268,7 +266,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -281,7 +278,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.4",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -294,7 +290,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -306,7 +301,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -318,7 +312,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.1 integrated)",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -330,7 +323,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.1 integrated)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -342,7 +334,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.2 integrated)",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -354,7 +345,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.2 integrated)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index c14b254b2879..c8bb29ae1a2a 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -177,7 +177,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
*/
} while (lpa == adv && retry--);
- phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
lpa &= adv;
@@ -218,7 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->pause = phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
}
return 0;
@@ -257,7 +257,6 @@ static struct phy_driver lxt97x_driver[] = {
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
@@ -266,7 +265,6 @@ static struct phy_driver lxt97x_driver[] = {
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cbec296107bd..a9c7c7f41b0c 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -491,25 +491,26 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
}
/**
- * ethtool_adv_to_fiber_adv_t
- * @ethadv: the ethtool advertisement settings
+ * linkmode_adv_to_fiber_adv_t
+ * @advertise: the linkmode advertisement settings
*
- * A small helper function that translates ethtool advertisement
- * settings to phy autonegotiation advertisements for the
- * MII_ADV register for fiber link.
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the MII_ADV
+ * register for fiber link.
*/
-static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv)
+static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
{
u32 result = 0;
- if (ethadv & ADVERTISED_1000baseT_Half)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
result |= ADVERTISE_FIBER_1000HALF;
- if (ethadv & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
result |= ADVERTISE_FIBER_1000FULL;
- if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP))
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
result |= LPA_PAUSE_ASYM_FIBER;
- else if (ethadv & ADVERTISE_PAUSE_CAP)
+ else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
result |= (ADVERTISE_PAUSE_FIBER
& (~ADVERTISE_PAUSE_ASYM_FIBER));
@@ -530,14 +531,13 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
int changed = 0;
int err;
int adv, oldadv;
- u32 advertise;
if (phydev->autoneg != AUTONEG_ENABLE)
return genphy_setup_forced(phydev);
/* Only allow advertising what this PHY supports */
- phydev->advertising &= phydev->supported;
- advertise = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
/* Setup fiber advertisement */
adv = phy_read(phydev, MII_ADVERTISE);
@@ -547,7 +547,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
oldadv = adv;
adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
| LPA_PAUSE_FIBER);
- adv |= ethtool_adv_to_fiber_adv_t(advertise);
+ adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- u32 pause;
/* Select page 18 */
err = marvell_set_page(phydev, 18);
@@ -878,9 +877,14 @@ static int m88e1510_config_init(struct phy_device *phydev)
* This means we can never be truely sure what was advertised,
* so disable Pause support.
*/
- pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported &= ~pause;
- phydev->advertising &= ~pause;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
}
return m88e1318_config_init(phydev);
@@ -1043,22 +1047,21 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
/**
- * fiber_lpa_to_ethtool_lpa_t
+ * fiber_lpa_mod_linkmode_lpa_t
+ * @advertising: the linkmode advertisement settings
* @lpa: value of the MII_LPA register for fiber link
*
- * A small helper function that translates MII_LPA
- * bits to ethtool LP advertisement settings.
+ * A small helper function that translates MII_LPA bits to linkmode LP
+ * advertisement settings. Other bits in advertising are left
+ * unchanged.
*/
-static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa)
+static void fiber_lpa_mod_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
{
- u32 result = 0;
-
- if (lpa & LPA_FIBER_1000HALF)
- result |= ADVERTISED_1000baseT_Half;
- if (lpa & LPA_FIBER_1000FULL)
- result |= ADVERTISED_1000baseT_Full;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising, lpa & LPA_FIBER_1000HALF);
- return result;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising, lpa & LPA_FIBER_1000FULL);
}
/**
@@ -1134,9 +1137,8 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
if (!fiber) {
- phydev->lp_advertising =
- mii_stat1000_to_ethtool_lpa_t(lpagb) |
- mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb);
if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
@@ -1144,7 +1146,7 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
} else {
/* The fiber link is only 1000M capable */
- phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+ fiber_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
if (phydev->duplex == DUPLEX_FULL) {
if (!(lpa & LPA_PAUSE_FIBER)) {
@@ -1183,7 +1185,7 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
return 0;
}
@@ -1235,7 +1237,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE &&
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported) &&
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
@@ -1278,7 +1281,8 @@ static int marvell_suspend(struct phy_device *phydev)
int err;
/* Suspend the fiber mode first */
- if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1312,7 +1316,8 @@ static int marvell_resume(struct phy_device *phydev)
int err;
/* Resume the fiber mode first */
- if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1463,7 +1468,8 @@ error:
static int marvell_get_sset_count(struct phy_device *phydev)
{
- if (phydev->supported & SUPPORTED_FIBRE)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported))
return ARRAY_SIZE(marvell_hw_stats);
else
return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
@@ -2005,7 +2011,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2024,7 +2029,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2043,7 +2047,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2063,7 +2066,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1118",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2082,7 +2084,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1121R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
@@ -2103,7 +2104,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
@@ -2126,7 +2126,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2146,7 +2145,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1149R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2165,7 +2163,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2184,7 +2181,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1116R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
.ack_interrupt = &marvell_ack_interrupt,
@@ -2202,7 +2198,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
.features = PHY_GBIT_FIBRE_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2226,7 +2221,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2248,7 +2242,6 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2268,7 +2261,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E3016",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
@@ -2289,7 +2281,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1c9d039eec63..82ab6ed3b74e 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -252,7 +252,6 @@ static int mv3310_resume(struct phy_device *phydev)
static int mv3310_config_init(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
- u32 mask;
int val;
/* Check that the PHY interface type is compatible */
@@ -336,13 +335,9 @@ static int mv3310_config_init(struct phy_device *phydev)
}
}
- if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
- phydev_warn(phydev,
- "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
-
- phydev->supported &= mask;
- phydev->advertising &= phydev->supported;
+ linkmode_copy(phydev->supported, supported);
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
return 0;
}
@@ -350,7 +345,7 @@ static int mv3310_config_init(struct phy_device *phydev)
static int mv3310_config_aneg(struct phy_device *phydev)
{
bool changed = false;
- u32 advertising;
+ u16 reg;
int ret;
/* We don't support manual MDI control */
@@ -364,31 +359,35 @@ static int mv3310_config_aneg(struct phy_device *phydev)
return genphy_c45_an_disable_aneg(phydev);
}
- phydev->advertising &= phydev->supported;
- advertising = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- ethtool_adv_to_mii_adv_t(advertising));
+ linkmode_adv_to_mii_adv_t(phydev->advertising));
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
+ reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
- ADVERTISE_1000FULL | ADVERTISE_1000HALF,
- ethtool_adv_to_mii_ctrl1000_t(advertising));
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
/* 10G control register */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->advertising))
+ reg = MDIO_AN_10GBT_CTRL_ADV10G;
+ else
+ reg = 0;
+
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G,
- advertising & ADVERTISED_10000baseT_Full ?
- MDIO_AN_10GBT_CTRL_ADV10G : 0);
+ MDIO_AN_10GBT_CTRL_ADV10G, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -458,7 +457,7 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
phydev->link = 0;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -491,7 +490,7 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val < 0)
return val;
- phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
if (phydev->autoneg == AUTONEG_ENABLE)
phy_resolve_aneg_linkmode(phydev);
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 0fbcedcdf6e2..ea9a0e339778 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/mdio-gpio.h>
#include <linux/mdio-bitbang.h>
#include <linux/mdio-gpio.h>
#include <linux/gpio/consumer.h>
@@ -112,6 +113,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
struct mdio_gpio_info *bitbang,
int bus_id)
{
+ struct mdio_gpio_platform_data *pdata = dev_get_platdata(dev);
struct mii_bus *new_bus;
bitbang->ctrl.ops = &mdio_gpio_ops;
@@ -128,6 +130,11 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
else
strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
+ if (pdata) {
+ new_bus->phy_mask = pdata->phy_mask;
+ new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
+ }
+
dev_set_drvdata(dev, new_bus);
return new_bus;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index ddc2c5ea3787..b03bcf2c388a 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -232,7 +232,7 @@ static struct phy_driver meson_gxl_phy[] = {
.phy_id_mask = 0xfffffff0,
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
+ .flags = PHY_IS_INTERNAL,
.config_init = meson_gxl_config_init,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9265dea79412..c33384710d26 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -311,17 +311,22 @@ static int kszphy_config_init(struct phy_device *phydev)
static int ksz8041_config_init(struct phy_device *phydev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
struct device_node *of_node = phydev->mdio.dev.of_node;
/* Limit supported and advertised modes in fiber mode */
if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
phydev->dev_flags |= MICREL_PHY_FXEN;
- phydev->supported &= SUPPORTED_100baseT_Full |
- SUPPORTED_100baseT_Half;
- phydev->supported |= SUPPORTED_FIBRE;
- phydev->advertising &= ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half;
- phydev->advertising |= ADVERTISED_FIBRE;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported);
+ linkmode_and(phydev->advertising, phydev->advertising, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->advertising);
phydev->autoneg = AUTONEG_DISABLE;
}
@@ -918,7 +923,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
@@ -930,7 +934,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -946,7 +949,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -962,7 +964,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -979,7 +980,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -995,7 +995,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1011,7 +1010,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1027,7 +1025,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1043,7 +1040,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1054,7 +1050,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -1072,7 +1067,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
@@ -1089,7 +1083,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -1115,7 +1108,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1124,7 +1116,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 04b12e34da58..7557bebd5d7f 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -346,7 +346,6 @@ static struct phy_driver microchip_phy_driver[] = {
.name = "Microchip LAN88xx",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index c600a8509d60..3d09b471632c 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -47,7 +47,6 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.name = "Microchip LAN87xx T1",
.features = PHY_BASIC_T1_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = genphy_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 7cae17517744..3949fe299b18 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -853,6 +853,51 @@ static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val)
__phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr));
}
+static int vsc8531_pre_init_seq_set(struct phy_device *phydev)
+{
+ int rc;
+ const struct reg_val init_seq[] = {
+ {0x0f90, 0x00688980},
+ {0x0696, 0x00000003},
+ {0x07fa, 0x0050100f},
+ {0x1686, 0x00000004},
+ };
+ unsigned int i;
+ int oldpage;
+
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_STANDARD,
+ MSCC_PHY_EXT_CNTL_STATUS, SMI_BROADCAST_WR_EN,
+ SMI_BROADCAST_WR_EN);
+ if (rc < 0)
+ return rc;
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
+ MSCC_PHY_TEST_PAGE_24, 0, 0x0400);
+ if (rc < 0)
+ return rc;
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
+ MSCC_PHY_TEST_PAGE_5, 0x0a00, 0x0e00);
+ if (rc < 0)
+ return rc;
+ rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
+ MSCC_PHY_TEST_PAGE_8, 0x8000, 0x8000);
+ if (rc < 0)
+ return rc;
+
+ mutex_lock(&phydev->lock);
+ oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR);
+ if (oldpage < 0)
+ goto out_unlock;
+
+ for (i = 0; i < ARRAY_SIZE(init_seq); i++)
+ vsc85xx_tr_write(phydev, init_seq[i].reg, init_seq[i].val);
+
+out_unlock:
+ oldpage = phy_restore_page(phydev, oldpage, oldpage);
+ mutex_unlock(&phydev->lock);
+
+ return oldpage;
+}
+
static int vsc85xx_eee_init_seq_set(struct phy_device *phydev)
{
const struct reg_val init_eee[] = {
@@ -1650,7 +1695,7 @@ err:
static int vsc85xx_config_init(struct phy_device *phydev)
{
- int rc, i;
+ int rc, i, phy_id;
struct vsc8531_private *vsc8531 = phydev->priv;
rc = vsc85xx_default_config(phydev);
@@ -1665,6 +1710,14 @@ static int vsc85xx_config_init(struct phy_device *phydev)
if (rc)
return rc;
+ phy_id = phydev->drv->phy_id & phydev->drv->phy_id_mask;
+ if (PHY_ID_VSC8531 == phy_id || PHY_ID_VSC8541 == phy_id ||
+ PHY_ID_VSC8530 == phy_id || PHY_ID_VSC8540 == phy_id) {
+ rc = vsc8531_pre_init_seq_set(phydev);
+ if (rc)
+ return rc;
+ }
+
rc = vsc85xx_eee_init_seq_set(phydev);
if (rc)
return rc;
@@ -1829,7 +1882,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi FE VSC8530",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1855,7 +1907,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1881,7 +1932,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1907,7 +1957,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi VSC8541 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1933,7 +1982,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1960,7 +2008,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2b1e336961f9..139bed2c8ab4 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -134,7 +134,6 @@ static struct phy_driver dp83865_driver[] = { {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index d7636ff03bc7..03af927fa5ad 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -181,7 +181,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
if (val < 0)
return val;
- phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
@@ -191,7 +191,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
return val;
if (val & MDIO_AN_10GBT_STAT_LP10G)
- phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->lp_advertising);
return 0;
}
@@ -304,8 +305,11 @@ EXPORT_SYMBOL_GPL(gen10g_no_soft_reset);
int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
- phydev->supported = SUPPORTED_10000baseT_Full;
- phydev->advertising = SUPPORTED_10000baseT_Full;
+ linkmode_zero(phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index c7da4cbb1103..20fbd5eb56fd 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -62,6 +62,124 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed. */
static const struct phy_setting settings[] = {
+ /* 100G */
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ },
+ /* 56G */
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ },
+ /* 50G */
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ },
+ /* 40G */
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ },
+ /* 25G */
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ },
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ },
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ },
+
+ /* 20G */
+ {
+ .speed = SPEED_20000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_20000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ },
+ /* 10G */
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ },
{
.speed = SPEED_10000,
.duplex = DUPLEX_FULL,
@@ -75,22 +193,51 @@ static const struct phy_setting settings[] = {
{
.speed = SPEED_10000,
.duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
.bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
},
+ /* 5G */
+ {
+ .speed = SPEED_5000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ },
+
+ /* 2.5G */
{
.speed = SPEED_2500,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
},
{
- .speed = SPEED_1000,
+ .speed = SPEED_2500,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
},
+ /* 1G */
{
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
},
{
.speed = SPEED_1000,
@@ -103,6 +250,12 @@ static const struct phy_setting settings[] = {
.bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
},
{
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ },
+ /* 100M */
+ {
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
.bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -112,6 +265,7 @@ static const struct phy_setting settings[] = {
.duplex = DUPLEX_HALF,
.bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
},
+ /* 10M */
{
.speed = SPEED_10,
.duplex = DUPLEX_FULL,
@@ -129,7 +283,6 @@ static const struct phy_setting settings[] = {
* @speed: speed to match
* @duplex: duplex to match
* @mask: allowed link modes
- * @maxbit: bit size of link modes
* @exact: an exact match is required
*
* Search the settings array for a setting that matches the speed and
@@ -143,14 +296,14 @@ static const struct phy_setting settings[] = {
* they all fail, %NULL will be returned.
*/
const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- size_t maxbit, bool exact)
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
{
const struct phy_setting *p, *match = NULL, *last = NULL;
int i;
for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->bit < maxbit && test_bit(p->bit, mask)) {
+ if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
+ test_bit(p->bit, mask)) {
last = p;
if (p->speed == speed && p->duplex == duplex) {
/* Exact match for speed and duplex */
@@ -175,13 +328,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
EXPORT_SYMBOL_GPL(phy_lookup_setting);
size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask, size_t maxbit)
+ unsigned long *mask)
{
size_t count;
int i;
for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
- if (settings[i].bit < maxbit &&
+ if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
test_bit(settings[i].bit, mask) &&
(count == 0 || speeds[count - 1] != settings[i].speed))
speeds[count++] = settings[i].speed;
@@ -199,35 +352,53 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
*/
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
- u32 common = phydev->lp_advertising & phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- if (common & ADVERTISED_10000baseT_Full) {
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) {
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_1000baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ common)) {
+ phydev->speed = SPEED_5000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ common)) {
+ phydev->speed = SPEED_2500;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_1000baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_HALF;
- } else if (common & ADVERTISED_100baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_100baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_HALF;
- } else if (common & ADVERTISED_10baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_10baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
}
if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
- phydev->asym_pause = !!(phydev->lp_advertising &
- ADVERTISED_Asym_Pause);
+ phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising);
+ phydev->asym_pause = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising);
}
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1d73ac3309ce..d33e7b3caf03 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -46,11 +46,8 @@ static const char *phy_state_to_str(enum phy_state st)
{
switch (st) {
PHY_STATE_STR(DOWN)
- PHY_STATE_STR(STARTING)
PHY_STATE_STR(READY)
- PHY_STATE_STR(PENDING)
PHY_STATE_STR(UP)
- PHY_STATE_STR(AN)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
@@ -62,6 +59,17 @@ static const char *phy_state_to_str(enum phy_state st)
return NULL;
}
+static void phy_link_up(struct phy_device *phydev)
+{
+ phydev->phy_link_change(phydev, true, true);
+ phy_led_trigger_change_speed(phydev);
+}
+
+static void phy_link_down(struct phy_device *phydev, bool do_carrier)
+{
+ phydev->phy_link_change(phydev, false, do_carrier);
+ phy_led_trigger_change_speed(phydev);
+}
/**
* phy_print_status - Convenience function to print out the current phy status
@@ -105,9 +113,9 @@ static int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success or < 0 on error.
*/
-static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, bool interrupts)
{
- phydev->interrupts = interrupts;
+ phydev->interrupts = interrupts ? 1 : 0;
if (phydev->drv->config_intr)
return phydev->drv->config_intr(phydev);
@@ -171,11 +179,9 @@ EXPORT_SYMBOL(phy_aneg_done);
* settings were found.
*/
static const struct phy_setting *
-phy_find_valid(int speed, int duplex, u32 supported)
+phy_find_valid(int speed, int duplex, unsigned long *supported)
{
- unsigned long mask = supported;
-
- return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false);
+ return phy_lookup_setting(speed, duplex, supported, false);
}
/**
@@ -192,9 +198,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size)
{
- unsigned long supported = phy->supported;
-
- return phy_speeds(speeds, size, &supported, BITS_PER_LONG);
+ return phy_speeds(speeds, size, phy->supported);
}
/**
@@ -206,11 +210,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*
* Description: Returns true if there is a valid setting, false otherwise.
*/
-static inline bool phy_check_valid(int speed, int duplex, u32 features)
+static inline bool phy_check_valid(int speed, int duplex,
+ unsigned long *features)
{
- unsigned long mask = features;
-
- return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true);
+ return !!phy_lookup_setting(speed, duplex, features, true);
}
/**
@@ -224,13 +227,13 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features)
static void phy_sanitize_settings(struct phy_device *phydev)
{
const struct phy_setting *setting;
- u32 features = phydev->supported;
/* Sanitize settings based on PHY capabilities */
- if ((features & SUPPORTED_Autoneg) == 0)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported))
phydev->autoneg = AUTONEG_DISABLE;
- setting = phy_find_valid(phydev->speed, phydev->duplex, features);
+ setting = phy_find_valid(phydev->speed, phydev->duplex,
+ phydev->supported);
if (setting) {
phydev->speed = setting->speed;
phydev->duplex = setting->duplex;
@@ -256,13 +259,15 @@ static void phy_sanitize_settings(struct phy_device *phydev)
*/
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u32 speed = ethtool_cmd_speed(cmd);
if (cmd->phy_address != phydev->mdio.addr)
return -EINVAL;
/* We make sure that we don't pass unsupported values in to the PHY */
- cmd->advertising &= phydev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising);
+ linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
@@ -283,12 +288,14 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
phydev->speed = speed;
- phydev->advertising = cmd->advertising;
+ linkmode_copy(phydev->advertising, advertising);
if (AUTONEG_ENABLE == cmd->autoneg)
- phydev->advertising |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
phydev->duplex = cmd->duplex;
@@ -304,25 +311,24 @@ EXPORT_SYMBOL(phy_ethtool_sset);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 autoneg = cmd->base.autoneg;
u8 duplex = cmd->base.duplex;
u32 speed = cmd->base.speed;
- u32 advertising;
if (cmd->base.phy_address != phydev->mdio.addr)
return -EINVAL;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
+ linkmode_copy(advertising, cmd->link_modes.advertising);
/* We make sure that we don't pass unsupported values in to the PHY */
- advertising &= phydev->supported;
+ linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (autoneg == AUTONEG_ENABLE && advertising == 0)
+ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
@@ -337,12 +343,14 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->speed = speed;
- phydev->advertising = advertising;
+ linkmode_copy(phydev->advertising, advertising);
if (autoneg == AUTONEG_ENABLE)
- phydev->advertising |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
phydev->duplex = duplex;
@@ -358,14 +366,9 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd)
{
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- phydev->supported);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- phydev->advertising);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- phydev->lp_advertising);
+ linkmode_copy(cmd->link_modes.supported, phydev->supported);
+ linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+ linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
cmd->base.speed = phydev->speed;
cmd->base.duplex = phydev->duplex;
@@ -434,7 +437,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
break;
case MII_ADVERTISE:
- phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+ mii_adv_mod_linkmode_adv_t(phydev->advertising,
+ val);
change_autoneg = true;
break;
default:
@@ -467,6 +471,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned int secs)
+{
+ mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+ secs * HZ);
+}
+
+static void phy_trigger_machine(struct phy_device *phydev)
+{
+ phy_queue_state_machine(phydev, 0);
+}
+
static int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
@@ -482,6 +498,34 @@ static int phy_config_aneg(struct phy_device *phydev)
}
/**
+ * phy_check_link_status - check link status and set state accordingly
+ * @phydev: the phy_device struct
+ *
+ * Description: Check for link and whether autoneg was triggered / is running
+ * and set state accordingly
+ */
+static int phy_check_link_status(struct phy_device *phydev)
+{
+ int err;
+
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
+ err = phy_read_status(phydev);
+ if (err)
+ return err;
+
+ if (phydev->link && phydev->state != PHY_RUNNING) {
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ } else if (!phydev->link && phydev->state != PHY_NOLINK) {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, true);
+ }
+
+ return 0;
+}
+
+/**
* phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -492,7 +536,6 @@ static int phy_config_aneg(struct phy_device *phydev)
*/
int phy_start_aneg(struct phy_device *phydev)
{
- bool trigger = 0;
int err;
if (!phydev->drv)
@@ -500,44 +543,33 @@ int phy_start_aneg(struct phy_device *phydev)
mutex_lock(&phydev->lock);
+ if (!__phy_is_started(phydev)) {
+ WARN(1, "called from state %s\n",
+ phy_state_to_str(phydev->state));
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
/* Invalidate LP advertising flags */
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
- if (phydev->state != PHY_HALTED) {
- if (AUTONEG_ENABLE == phydev->autoneg) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- } else {
- phydev->state = PHY_FORCING;
- phydev->link_timeout = PHY_FORCE_TIMEOUT;
- }
- }
-
- /* Re-schedule a PHY state machine to check PHY status because
- * negotiation may already be done and aneg interrupt may not be
- * generated.
- */
- if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
- err = phy_aneg_done(phydev);
- if (err > 0) {
- trigger = true;
- err = 0;
- }
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ err = phy_check_link_status(phydev);
+ } else {
+ phydev->state = PHY_FORCING;
+ phydev->link_timeout = PHY_FORCE_TIMEOUT;
}
out_unlock:
mutex_unlock(&phydev->lock);
- if (trigger)
- phy_trigger_machine(phydev);
-
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
@@ -573,20 +605,38 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
*/
int phy_speed_down(struct phy_device *phydev, bool sync)
{
- u32 adv = phydev->lp_advertising & phydev->supported;
- u32 adv_old = phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
int ret;
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- if (adv & PHY_10BT_FEATURES)
- phydev->advertising &= ~(PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
- else if (adv & PHY_100BT_FEATURES)
- phydev->advertising &= ~PHY_1000BT_FEATURES;
+ linkmode_copy(adv_old, phydev->advertising);
+ linkmode_copy(adv, phydev->lp_advertising);
+ linkmode_and(adv, adv, phydev->supported);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising);
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ adv) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising);
+ }
- if (phydev->advertising == adv_old)
+ if (linkmode_equal(phydev->advertising, adv_old))
return 0;
ret = phy_config_aneg(phydev);
@@ -605,28 +655,36 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
*/
int phy_speed_up(struct phy_device *phydev)
{
- u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
- u32 adv_old = phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds);
+
+ linkmode_copy(adv_old, phydev->advertising);
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds);
+
+ linkmode_andnot(not_speeds, adv_old, all_speeds);
+ linkmode_copy(supported, phydev->supported);
+ linkmode_and(speeds, supported, all_speeds);
+ linkmode_or(phydev->advertising, not_speeds, speeds);
- if (phydev->advertising == adv_old)
+ if (linkmode_equal(phydev->advertising, adv_old))
return 0;
return phy_config_aneg(phydev);
}
EXPORT_SYMBOL_GPL(phy_speed_up);
-static void phy_queue_state_machine(struct phy_device *phydev,
- unsigned int secs)
-{
- mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- secs * HZ);
-}
-
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
@@ -644,20 +702,6 @@ void phy_start_machine(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_start_machine);
/**
- * phy_trigger_machine - trigger the state machine to run
- *
- * @phydev: the phy_device struct
- *
- * Description: There has been a change in state which requires that the
- * state machine runs.
- */
-
-void phy_trigger_machine(struct phy_device *phydev)
-{
- phy_queue_state_machine(phydev, 0);
-}
-
-/**
* phy_stop_machine - stop the PHY state machine tracking
* @phydev: target phy_device struct
*
@@ -670,7 +714,7 @@ void phy_stop_machine(struct phy_device *phydev)
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+ if (__phy_is_started(phydev))
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
}
@@ -686,6 +730,8 @@ void phy_stop_machine(struct phy_device *phydev)
*/
static void phy_error(struct phy_device *phydev)
{
+ WARN_ON(1);
+
mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
@@ -711,30 +757,26 @@ static int phy_disable_interrupts(struct phy_device *phydev)
}
/**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
+ * phy_interrupt - PHY interrupt handler
+ * @irq: interrupt line
+ * @phy_dat: phy_device pointer
+ *
+ * Description: Handle PHY interrupt
*/
-static irqreturn_t phy_change(struct phy_device *phydev)
+static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
- if (phy_interrupt_is_valid(phydev)) {
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- return IRQ_NONE;
-
- if (phydev->state == PHY_HALTED)
- if (phy_disable_interrupts(phydev))
- goto phy_err;
- }
+ struct phy_device *phydev = phy_dat;
- mutex_lock(&phydev->lock);
- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
- phydev->state = PHY_CHANGELINK;
- mutex_unlock(&phydev->lock);
+ if (!phy_is_started(phydev))
+ return IRQ_NONE; /* It can't be ours. */
+
+ if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
+ return IRQ_NONE;
/* reschedule state queue work to run as soon as possible */
phy_trigger_machine(phydev);
- if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ if (phy_clear_interrupt(phydev))
goto phy_err;
return IRQ_HANDLED;
@@ -744,36 +786,6 @@ phy_err:
}
/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
- phy_change(phydev);
-}
-
-/**
- * phy_interrupt - PHY interrupt handler
- * @irq: interrupt line
- * @phy_dat: phy_device pointer
- *
- * Description: When a PHY interrupt occurs, the handler disables
- * interrupts, and uses phy_change to handle the interrupt.
- */
-static irqreturn_t phy_interrupt(int irq, void *phy_dat)
-{
- struct phy_device *phydev = phy_dat;
-
- if (PHY_HALTED == phydev->state)
- return IRQ_NONE; /* It can't be ours. */
-
- return phy_change(phydev);
-}
-
-/**
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
@@ -837,21 +849,24 @@ void phy_stop(struct phy_device *phydev)
{
mutex_lock(&phydev->lock);
- if (PHY_HALTED == phydev->state)
- goto out_unlock;
+ if (!__phy_is_started(phydev)) {
+ WARN(1, "called from state %s\n",
+ phy_state_to_str(phydev->state));
+ mutex_unlock(&phydev->lock);
+ return;
+ }
if (phy_interrupt_is_valid(phydev))
phy_disable_interrupts(phydev);
phydev->state = PHY_HALTED;
-out_unlock:
mutex_unlock(&phydev->lock);
phy_state_machine(&phydev->state_queue.work);
/* Cannot call flush_scheduled_work() here as desired because
- * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+ * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
* will not reenable interrupts.
*/
}
@@ -874,9 +889,6 @@ void phy_start(struct phy_device *phydev)
mutex_lock(&phydev->lock);
switch (phydev->state) {
- case PHY_STARTING:
- phydev->state = PHY_PENDING;
- break;
case PHY_READY:
phydev->state = PHY_UP;
break;
@@ -902,18 +914,6 @@ void phy_start(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_start);
-static void phy_link_up(struct phy_device *phydev)
-{
- phydev->phy_link_change(phydev, true, true);
- phy_led_trigger_change_speed(phydev);
-}
-
-static void phy_link_down(struct phy_device *phydev, bool do_carrier)
-{
- phydev->phy_link_change(phydev, false, do_carrier);
- phy_led_trigger_change_speed(phydev);
-}
-
/**
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
@@ -936,63 +936,17 @@ void phy_state_machine(struct work_struct *work)
switch (phydev->state) {
case PHY_DOWN:
- case PHY_STARTING:
case PHY_READY:
- case PHY_PENDING:
break;
case PHY_UP:
needs_aneg = true;
- phydev->link_timeout = PHY_AN_TIMEOUT;
-
- break;
- case PHY_AN:
- err = phy_read_status(phydev);
- if (err < 0)
- break;
-
- /* If the link is down, give up on negotiation for now */
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- break;
- }
-
- /* Check if negotiation is done. Break if there's an error */
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- /* If AN is done, we're running */
- if (err > 0) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else if (0 == phydev->link_timeout--)
- needs_aneg = true;
break;
case PHY_NOLINK:
- if (!phy_polling_mode(phydev))
- break;
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- if (!err) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- break;
- }
- }
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- }
+ case PHY_RUNNING:
+ case PHY_CHANGELINK:
+ case PHY_RESUMING:
+ err = phy_check_link_status(phydev);
break;
case PHY_FORCING:
err = genphy_update_link(phydev);
@@ -1008,32 +962,6 @@ void phy_state_machine(struct work_struct *work)
phy_link_down(phydev, false);
}
break;
- case PHY_RUNNING:
- if (!phy_polling_mode(phydev))
- break;
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- }
- break;
- case PHY_CHANGELINK:
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- }
- break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
@@ -1041,30 +969,6 @@ void phy_state_machine(struct work_struct *work)
do_suspend = true;
}
break;
- case PHY_RESUMING:
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0) {
- break;
- } else if (!err) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- break;
- }
- }
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
- }
- break;
}
mutex_unlock(&phydev->lock);
@@ -1090,7 +994,7 @@ void phy_state_machine(struct work_struct *work)
* state machine would be pointless and possibly error prone when
* called from phy_disconnect() synchronously.
*/
- if (phy_polling_mode(phydev) && old_state != PHY_HALTED)
+ if (phy_polling_mode(phydev) && phy_is_started(phydev))
phy_queue_state_machine(phydev, PHY_STATE_TIME);
}
@@ -1104,10 +1008,34 @@ void phy_state_machine(struct work_struct *work)
void phy_mac_interrupt(struct phy_device *phydev)
{
/* Trigger a state machine change */
- queue_work(system_power_efficient_wq, &phydev->phy_queue);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_mac_interrupt);
+static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
+{
+ linkmode_zero(advertising);
+
+ if (eee_adv & MDIO_EEE_100TX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_1000T)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_1000KX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GKX4)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GKR)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ advertising);
+}
+
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1126,9 +1054,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
/* According to 802.3az,the EEE is supported only in full duplex-mode.
*/
if (phydev->duplex == DUPLEX_FULL) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
int eee_lp, eee_cap, eee_adv;
- u32 lp, cap, adv;
int status;
+ u32 cap;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1155,9 +1086,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (eee_adv <= 0)
goto eee_exit_err;
- adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
- lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
- if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+ mmd_eee_adv_to_linkmode(adv, eee_adv);
+ mmd_eee_adv_to_linkmode(lp, eee_lp);
+ linkmode_and(common, adv, lp);
+
+ if (!phy_check_valid(phydev->speed, phydev->duplex, common))
goto eee_exit_err;
if (clk_stop_enable) {
@@ -1221,6 +1154,7 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
if (val < 0)
return val;
data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ data->eee_enabled = !!data->advertised;
/* Get LP advertisement EEE */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
@@ -1228,6 +1162,8 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
return val;
data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+ data->eee_active = !!(data->advertised & data->lp_advertised);
+
return 0;
}
EXPORT_SYMBOL(phy_ethtool_get_eee);
@@ -1241,7 +1177,7 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int cap, old_adv, adv, ret;
+ int cap, old_adv, adv = 0, ret;
if (!phydev->drv)
return -EIO;
@@ -1255,10 +1191,12 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
if (old_adv < 0)
return old_adv;
- adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
-
- /* Mask prohibited EEE modes */
- adv &= ~phydev->eee_broken_modes;
+ if (data->eee_enabled) {
+ adv = !data->advertised ? cap :
+ ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
+ /* Mask prohibited EEE modes */
+ adv &= ~phydev->eee_broken_modes;
+ }
if (old_adv != adv) {
ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ab33d1777132..51990002d495 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -66,10 +66,12 @@ static const int phy_basic_ports_array[] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
};
+EXPORT_SYMBOL_GPL(phy_basic_ports_array);
static const int phy_fibre_port_array[] = {
ETHTOOL_LINK_MODE_FIBRE_BIT,
};
+EXPORT_SYMBOL_GPL(phy_fibre_port_array);
static const int phy_all_ports_features_array[] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
@@ -80,27 +82,32 @@ static const int phy_all_ports_features_array[] = {
ETHTOOL_LINK_MODE_BNC_BIT,
ETHTOOL_LINK_MODE_Backplane_BIT,
};
+EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
-static const int phy_10_100_features_array[] = {
+const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-static const int phy_basic_t1_features_array[] = {
+const int phy_basic_t1_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
-static const int phy_gbit_features_array[] = {
+const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_gbit_features_array);
-static const int phy_10gbit_features_array[] = {
+const int phy_10gbit_features_array[1] = {
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -308,11 +315,8 @@ static int mdio_bus_phy_restore(struct device *dev)
if (ret < 0)
return ret;
- /* The PHY needs to renegotiate. */
- phydev->link = 0;
- phydev->state = PHY_UP;
-
- phy_start_machine(phydev);
+ if (phydev->attached_dev && phydev->adjust_link)
+ phy_start_machine(phydev);
return 0;
}
@@ -587,7 +591,6 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
- INIT_WORK(&dev->phy_queue, phy_change_work);
/* Request the appropriate module unconditionally; don't
* bother trying to do so only if it isn't already loaded,
@@ -599,7 +602,21 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
* driver will get bored and give up as soon as it finds that
* there's no driver _already_ loaded.
*/
- request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
+ if (is_c45 && c45_ids) {
+ const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
+ int i;
+
+ for (i = 1; i < num_ids; i++) {
+ if (!(c45_ids->devices_in_package & (1 << i)))
+ continue;
+
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(c45_ids->device_ids[i]));
+ }
+ } else {
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phy_id));
+ }
device_initialize(&mdiodev->dev);
@@ -1442,8 +1459,13 @@ static int genphy_config_advert(struct phy_device *phydev)
int err, changed = 0;
/* Only allow advertising what this PHY supports */
- phydev->advertising &= phydev->supported;
- advertise = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertise,
+ phydev->advertising))
+ phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ phydev->advertising);
/* Setup standard advertisement */
adv = phy_read(phydev, MII_ADVERTISE);
@@ -1482,10 +1504,11 @@ static int genphy_config_advert(struct phy_device *phydev)
oldadv = adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
- }
if (adv != oldadv)
changed = 1;
@@ -1690,11 +1713,13 @@ int genphy_read_status(struct phy_device *phydev)
if (err)
return err;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
if (AUTONEG_ENABLE == phydev->autoneg) {
- if (phydev->supported & (SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported)) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
@@ -1711,8 +1736,8 @@ int genphy_read_status(struct phy_device *phydev)
return -ENOLINK;
}
- phydev->lp_advertising =
- mii_stat1000_to_ethtool_lpa_t(lpagb);
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
common_adv_gb = lpagb & adv << 2;
}
@@ -1720,7 +1745,7 @@ int genphy_read_status(struct phy_device *phydev)
if (lpa < 0)
return lpa;
- phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
@@ -1801,11 +1826,13 @@ EXPORT_SYMBOL(genphy_soft_reset);
int genphy_config_init(struct phy_device *phydev)
{
int val;
- u32 features;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
- features = (SUPPORTED_TP | SUPPORTED_MII
- | SUPPORTED_AUI | SUPPORTED_FIBRE |
- SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
@@ -1813,16 +1840,16 @@ int genphy_config_init(struct phy_device *phydev)
return val;
if (val & BMSR_ANEGCAPABLE)
- features |= SUPPORTED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
if (val & BMSR_100FULL)
- features |= SUPPORTED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features);
if (val & BMSR_100HALF)
- features |= SUPPORTED_100baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features);
if (val & BMSR_10FULL)
- features |= SUPPORTED_10baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features);
if (val & BMSR_10HALF)
- features |= SUPPORTED_10baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features);
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
@@ -1830,13 +1857,15 @@ int genphy_config_init(struct phy_device *phydev)
return val;
if (val & ESTATUS_1000_TFULL)
- features |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ features);
if (val & ESTATUS_1000_THALF)
- features |= SUPPORTED_1000baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ features);
}
- phydev->supported &= features;
- phydev->advertising &= features;
+ linkmode_and(phydev->supported, phydev->supported, features);
+ linkmode_and(phydev->advertising, phydev->advertising, features);
return 0;
}
@@ -1880,20 +1909,23 @@ EXPORT_SYMBOL(genphy_loopback);
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
- PHY_10BT_FEATURES);
-
switch (max_speed) {
- default:
- return -ENOTSUPP;
- case SPEED_1000:
- phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_10:
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported);
/* fall through */
case SPEED_100:
- phydev->supported |= PHY_100BT_FEATURES;
- /* fall through */
- case SPEED_10:
- phydev->supported |= PHY_10BT_FEATURES;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported);
+ break;
+ case SPEED_1000:
+ break;
+ default:
+ return -ENOTSUPP;
}
return 0;
@@ -1907,7 +1939,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
if (err)
return err;
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
@@ -1924,10 +1956,8 @@ EXPORT_SYMBOL(phy_set_max_speed);
*/
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
{
- WARN_ON(link_mode > 31);
-
- phydev->supported &= ~BIT(link_mode);
- phydev->advertising = phydev->supported;
+ linkmode_clear_bit(link_mode, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_remove_link_mode);
@@ -1940,9 +1970,9 @@ EXPORT_SYMBOL(phy_remove_link_mode);
*/
void phy_support_sym_pause(struct phy_device *phydev)
{
- phydev->supported &= ~SUPPORTED_Asym_Pause;
- phydev->supported |= SUPPORTED_Pause;
- phydev->advertising = phydev->supported;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_sym_pause);
@@ -1954,8 +1984,9 @@ EXPORT_SYMBOL(phy_support_sym_pause);
*/
void phy_support_asym_pause(struct phy_device *phydev)
{
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_asym_pause);
@@ -1973,12 +2004,13 @@ EXPORT_SYMBOL(phy_support_asym_pause);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg)
{
- phydev->supported &= ~SUPPORTED_Pause;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
if (rx && tx && autoneg)
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_set_sym_pause);
@@ -1995,20 +2027,29 @@ EXPORT_SYMBOL(phy_set_sym_pause);
*/
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
{
- u16 oldadv = phydev->advertising;
- u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
- if (rx)
- newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- if (tx)
- newadv ^= SUPPORTED_Asym_Pause;
+ linkmode_copy(oldadv, phydev->advertising);
- if (oldadv != newadv) {
- phydev->advertising = newadv;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
- if (phydev->autoneg)
- phy_start_aneg(phydev);
+ if (rx) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
}
+
+ if (tx)
+ linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ if (!linkmode_equal(oldadv, phydev->advertising) &&
+ phydev->autoneg)
+ phy_start_aneg(phydev);
}
EXPORT_SYMBOL(phy_set_asym_pause);
@@ -2024,8 +2065,10 @@ EXPORT_SYMBOL(phy_set_asym_pause);
bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp)
{
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported) ||
+ (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported) &&
pp->rx_pause != pp->tx_pause))
return false;
return true;
@@ -2074,6 +2117,11 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+static bool phy_drv_supports_irq(struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->ack_interrupt;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -2087,7 +2135,6 @@ static int phy_probe(struct device *dev)
struct phy_device *phydev = to_phy_device(dev);
struct device_driver *drv = phydev->mdio.dev.driver;
struct phy_driver *phydrv = to_phy_driver(drv);
- u32 features;
int err = 0;
phydev->drv = phydrv;
@@ -2095,8 +2142,7 @@ static int phy_probe(struct device *dev)
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
- if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
- phy_interrupt_is_valid(phydev))
+ if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -2108,10 +2154,9 @@ static int phy_probe(struct device *dev)
* a controller will attach, and may modify one
* or both of these values
*/
- ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features);
- phydev->supported = features;
+ linkmode_copy(phydev->supported, phydrv->features);
of_set_phy_supported(phydev);
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Get the EEE modes we want to prohibit. We will ask
* the PHY stop advertising these mode later on
@@ -2131,14 +2176,22 @@ static int phy_probe(struct device *dev)
*/
if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
- phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydrv->features))
- phydev->supported |= SUPPORTED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
} else {
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
}
/* Set the state to READY by default */
@@ -2197,6 +2250,14 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
new_driver->mdiodrv.driver.remove = phy_remove;
new_driver->mdiodrv.driver.owner = owner;
+ /* The following works around an issue where the PHY driver doesn't bind
+ * to the device, resulting in the genphy driver being used instead of
+ * the dedicated driver. The root cause of the issue isn't known yet
+ * and seems to be in the base driver core. Once this is fixed we may
+ * remove this workaround.
+ */
+ new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
retval = driver_register(&new_driver->mdiodrv.driver);
if (retval) {
pr_err("%s: Error %d in registering driver\n",
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1bf5c4..263385b75bba 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -67,7 +67,7 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed);
static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
- size_t size, char *suffix)
+ size_t size, const char *suffix)
{
snprintf(buf, size, PHY_ID_FMT ":%s",
phy->mdio.bus->id, phy->mdio.addr, suffix);
@@ -77,20 +77,9 @@ static int phy_led_trigger_register(struct phy_device *phy,
struct phy_led_trigger *plt,
unsigned int speed)
{
- char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE];
-
plt->speed = speed;
-
- if (speed < SPEED_1000)
- snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed);
- else if (speed == SPEED_2500)
- snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps");
- else
- snprintf(name_suffix, sizeof(name_suffix), "%dGbps",
- DIV_ROUND_CLOSEST(speed, 1000));
-
phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
- name_suffix);
+ phy_speed_to_str(speed));
plt->trigger.name = plt->name;
return led_trigger_register(&plt->trigger);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9b8dd0d0ee42..e7becc7379d7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -191,8 +191,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_validate(pl, pl->supported, &pl->link_config);
s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
- pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, true);
+ pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
if (s) {
@@ -634,13 +633,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
{
struct phylink_link_state config;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- u32 advertising;
int ret;
memset(&config, 0, sizeof(config));
- ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
- ethtool_convert_legacy_u32_to_link_mode(config.advertising,
- phy->advertising);
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
config.interface = pl->link_config.interface;
/*
@@ -673,15 +670,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
linkmode_copy(pl->link_config.advertising, config.advertising);
/* Restrict the phy advertisement according to the MAC support. */
- ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising);
- phy->advertising = advertising;
+ linkmode_copy(phy->advertising, config.advertising);
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
netdev_dbg(pl->netdev,
- "phy: setting supported %*pb advertising 0x%08x\n",
+ "phy: setting supported %*pb advertising %*pb\n",
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
- phy->advertising);
+ __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
phy_start_machine(phy);
if (phy->irq > 0)
@@ -1088,8 +1084,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, false);
+ pl->supported, false);
if (!s)
return -EINVAL;
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 889a4dce1648..cfe2313dbefd 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -116,7 +116,6 @@ static struct phy_driver qs6612_driver[] = { {
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = qs6612_config_init,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 271e8adc39f1..c6010fb1aa0f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -213,17 +213,13 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
static struct phy_driver realtek_drvs[] = {
{
- .phy_id = 0x00008201,
+ PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
- .phy_id_mask = 0x0000ffff,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
}, {
- .phy_id = 0x001cc816,
+ PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
@@ -231,19 +227,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .phy_id = 0x001cc910,
+ PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
- .phy_id = 0x001cc912,
+ PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
.read_mmd = &genphy_read_mmd_unsupported,
@@ -251,39 +244,32 @@ static struct phy_driver realtek_drvs[] = {
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
}, {
- .phy_id = 0x001cc913,
+ PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
- .phy_id = 0x001cc914,
+ PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x001cc915,
+ PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x001cc916,
+ PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &rtl8211f_config_init,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
@@ -292,11 +278,9 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .phy_id = 0x001cc961,
+ PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &rtl8366rb_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -305,15 +289,8 @@ static struct phy_driver realtek_drvs[] = {
module_phy_driver(realtek_drvs);
-static struct mdio_device_id __maybe_unused realtek_tbl[] = {
- { 0x001cc816, 0x001fffff },
- { 0x001cc910, 0x001fffff },
- { 0x001cc912, 0x001fffff },
- { 0x001cc913, 0x001fffff },
- { 0x001cc914, 0x001fffff },
- { 0x001cc915, 0x001fffff },
- { 0x001cc916, 0x001fffff },
- { 0x001cc961, 0x001fffff },
+static const struct mdio_device_id __maybe_unused realtek_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x001cc800) },
{ }
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 83060fb349f4..ad9db652874d 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
/* 1000Base-PX or 1000Base-BX10 */
if ((id->base.e_base_px || id->base.e_base_bx10) &&
br_min <= 1300 && br_max >= 1200)
- phylink_set(support, 1000baseX_Full);
+ phylink_set(modes, 1000baseX_Full);
/* For active or passive cables, select the link modes
* based on the bit rates and the cable compliance bytes.
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c328208388da..f9477ff55545 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -219,7 +219,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN83C185",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -239,7 +238,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8187",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -264,7 +262,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8700",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -290,7 +287,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN911x Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -309,7 +305,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
@@ -335,7 +331,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8740",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 2fe9a87b55b5..33d733684f5b 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -87,7 +87,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
@@ -98,7 +97,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id_mask = 0xffffffff,
.name = "STe100p",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 55f48ee3595a..1e4fc42e4629 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -47,7 +47,7 @@ static int upd60620_read_status(struct phy_device *phydev)
return phy_state;
phydev->link = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -70,8 +70,8 @@ static int upd60620_read_status(struct phy_device *phydev)
if (phy_state < 0)
return phy_state;
- phydev->lp_advertising
- = mii_lpa_to_ethtool_lpa_t(phy_state);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
+ phy_state);
if (phydev->duplex == DUPLEX_FULL) {
if (phy_state & LPA_PAUSE_CAP)
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index fbf9ad429593..0646af458f6a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -70,7 +70,6 @@
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8572 0x000704d0
-#define PHY_ID_VSC8574 0x000704a0
#define PHY_ID_VSC8601 0x00070420
#define PHY_ID_VSC7385 0x00070450
#define PHY_ID_VSC7388 0x00070480
@@ -303,7 +302,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
phydev->drv->phy_id == PHY_ID_VSC8244 ||
phydev->drv->phy_id == PHY_ID_VSC8514 ||
phydev->drv->phy_id == PHY_ID_VSC8572 ||
- phydev->drv->phy_id == PHY_ID_VSC8574 ||
phydev->drv->phy_id == PHY_ID_VSC8601) ?
MII_VSC8244_IMASK_MASK :
MII_VSC8221_IMASK_MASK);
@@ -399,7 +397,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8234",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -409,7 +406,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -419,7 +415,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8514",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -429,17 +424,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8572",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_init = &vsc824x_config_init,
- .config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
-}, {
- .phy_id = PHY_ID_VSC8574,
- .name = "Vitesse VSC8574",
- .phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -449,7 +433,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8601_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -494,7 +477,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -505,7 +487,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -515,7 +496,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8211",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -528,7 +508,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
- { PHY_ID_VSC8574, 0x000ffff0 },
{ PHY_ID_VSC7385, 0x000ffff0 },
{ PHY_ID_VSC7388, 0x000ffff0 },
{ PHY_ID_VSC7395, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index bdc4d23627c5..b287bb811875 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -70,7 +70,7 @@ struct asyncppp {
struct tasklet_struct tsk;
refcount_t refcnt;
- struct semaphore dead_sem;
+ struct completion dead;
struct ppp_channel chan; /* interface to generic ppp layer */
unsigned char obuf[OBUFSIZE];
};
@@ -148,7 +148,7 @@ static struct asyncppp *ap_get(struct tty_struct *tty)
static void ap_put(struct asyncppp *ap)
{
if (refcount_dec_and_test(&ap->refcnt))
- up(&ap->dead_sem);
+ complete(&ap->dead);
}
/*
@@ -186,7 +186,7 @@ ppp_asynctty_open(struct tty_struct *tty)
tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
refcount_set(&ap->refcnt, 1);
- sema_init(&ap->dead_sem, 0);
+ init_completion(&ap->dead);
ap->chan.private = ap;
ap->chan.ops = &async_ops;
@@ -235,7 +235,7 @@ ppp_asynctty_close(struct tty_struct *tty)
* by the time it returns.
*/
if (!refcount_dec_and_test(&ap->refcnt))
- down(&ap->dead_sem);
+ wait_for_completion(&ap->dead);
tasklet_kill(&ap->tsk);
ppp_unregister_channel(&ap->chan);
@@ -770,7 +770,7 @@ process_input_packet(struct asyncppp *ap)
{
struct sk_buff *skb;
unsigned char *p;
- unsigned int len, fcs, proto;
+ unsigned int len, fcs;
skb = ap->rpkt;
if (ap->state & (SC_TOSS | SC_ESCAPE))
@@ -799,14 +799,14 @@ process_input_packet(struct asyncppp *ap)
goto err;
p = skb_pull(skb, 2);
}
- proto = p[0];
- if (proto & 1) {
- /* protocol is compressed */
- *(u8 *)skb_push(skb, 1) = 0;
- } else {
+
+ /* If protocol field is not compressed, it can be LCP packet */
+ if (!(p[0] & 0x01)) {
+ unsigned int proto;
+
if (skb->len < 2)
goto err;
- proto = (proto << 8) + p[1];
+ proto = (p[0] << 8) + p[1];
if (proto == PPP_LCP)
async_lcp_peek(ap, p, skb->len, 1);
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 500bc0027c1b..c708400fff4a 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1965,6 +1965,46 @@ ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
ppp_recv_unlock(ppp);
}
+/**
+ * __ppp_decompress_proto - Decompress protocol field, slim version.
+ * @skb: Socket buffer where protocol field should be decompressed. It must have
+ * at least 1 byte of head room and 1 byte of linear data. First byte of
+ * data must be a protocol field byte.
+ *
+ * Decompress protocol field in PPP header if it's compressed, e.g. when
+ * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data
+ * length are done in this function.
+ */
+static void __ppp_decompress_proto(struct sk_buff *skb)
+{
+ if (skb->data[0] & 0x01)
+ *(u8 *)skb_push(skb, 1) = 0x00;
+}
+
+/**
+ * ppp_decompress_proto - Check skb data room and decompress protocol field.
+ * @skb: Socket buffer where protocol field should be decompressed. First byte
+ * of data must be a protocol field byte.
+ *
+ * Decompress protocol field in PPP header if it's compressed, e.g. when
+ * Protocol-Field-Compression (PFC) was negotiated. This function also makes
+ * sure that skb data room is sufficient for Protocol field, before and after
+ * decompression.
+ *
+ * Return: true - decompressed successfully, false - not enough room in skb.
+ */
+static bool ppp_decompress_proto(struct sk_buff *skb)
+{
+ /* At least one byte should be present (if protocol is compressed) */
+ if (!pskb_may_pull(skb, 1))
+ return false;
+
+ __ppp_decompress_proto(skb);
+
+ /* Protocol field should occupy 2 bytes when not compressed */
+ return pskb_may_pull(skb, 2);
+}
+
void
ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
{
@@ -1977,7 +2017,7 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
}
read_lock_bh(&pch->upl);
- if (!pskb_may_pull(skb, 2)) {
+ if (!ppp_decompress_proto(skb)) {
kfree_skb(skb);
if (pch->ppp) {
++pch->ppp->dev->stats.rx_length_errors;
@@ -2074,6 +2114,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
goto err;
+ /* At this point the "Protocol" field MUST be decompressed, either in
+ * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame().
+ */
proto = PPP_PROTO(skb);
switch (proto) {
case PPP_VJC_COMP:
@@ -2245,6 +2288,9 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb)
skb_put(skb, len);
skb_pull(skb, 2); /* pull off the A/C bytes */
+ /* Don't call __ppp_decompress_proto() here, but instead rely on
+ * corresponding algo (mppe/bsd/deflate) to decompress it.
+ */
} else {
/* Uncompressed frame - pass to decompressor so it
can update its dictionary if necessary. */
@@ -2290,9 +2336,11 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
/*
* Do protocol ID decompression on the first fragment of each packet.
+ * We have to do that here, because ppp_receive_nonmp_frame() expects
+ * decompressed protocol field.
*/
- if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1))
- *(u8 *)skb_push(skb, 1) = 0;
+ if (PPP_MP_CB(skb)->BEbits & B)
+ __ppp_decompress_proto(skb);
/*
* Expand sequence number to 32 bits, making it as close
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 047f6c68a441..d02ba2494d93 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -709,11 +709,10 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
p = skb_pull(skb, 2);
}
- /* decompress protocol field if compressed */
- if (p[0] & 1) {
- /* protocol is compressed */
- *(u8 *)skb_push(skb, 1) = 0;
- } else if (skb->len < 2)
+ /* PPP packet length should be >= 2 bytes when protocol field is not
+ * compressed.
+ */
+ if (!(p[0] & 0x01) && skb->len < 2)
goto err;
/* queue the frame to be processed */
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 67ffe74747a1..8f09edd811e9 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -325,11 +325,6 @@ allow_packet:
skb_pull(skb, 2);
}
- if ((*skb->data) & 1) {
- /* protocol is compressed */
- *(u8 *)skb_push(skb, 1) = 0;
- }
-
skb->ip_summed = CHECKSUM_NONE;
skb_set_network_header(skb, skb->head-skb->data);
ppp_input(&po->chan, skb);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index e9f101c9bae2..bfbb39f93554 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* it just report sending a packet to the target
* (without actual packet transfer).
*/
- dev_kfree_skb_any(skb);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
}
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index f03004f37eca..443b2694130c 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1113,7 +1113,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd,
rtnl_unlock();
return -ENOLINK;
}
- ret = dev_set_mac_address(tap->dev, &sa);
+ ret = dev_set_mac_address(tap->dev, &sa, NULL);
tap_put_tap_dev(tap);
rtnl_unlock();
return ret;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 364f514d56d8..afd9d25d1992 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -59,7 +59,7 @@ static int __set_port_dev_addr(struct net_device *port_dev,
memcpy(addr.__data, dev_addr, port_dev->addr_len);
addr.ss_family = port_dev->type;
- return dev_set_mac_address(port_dev, (struct sockaddr *)&addr);
+ return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
}
static int team_port_set_orig_dev_addr(struct team_port *port)
@@ -1212,7 +1212,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
goto err_port_enter;
}
- err = dev_open(port_dev);
+ err = dev_open(port_dev, extack);
if (err) {
netdev_dbg(dev, "Device %s opening failed\n",
portname);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e244f5d7512a..a4fdad475594 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -188,6 +188,11 @@ struct tun_file {
struct xdp_rxq_info xdp_rxq;
};
+struct tun_page {
+ struct page *page;
+ int count;
+};
+
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
@@ -196,7 +201,7 @@ struct tun_flow_entry {
u32 rxhash;
u32 rps_rxhash;
int queue_index;
- unsigned long updated;
+ unsigned long updated ____cacheline_aligned_in_smp;
};
#define TUN_NUM_FLOW_ENTRIES 1024
@@ -524,18 +529,17 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
unsigned long delay = tun->ageing_time;
u16 queue_index = tfile->queue_index;
- if (!rxhash)
- return;
- else
- head = &tun->flows[tun_hashfn(rxhash)];
+ head = &tun->flows[tun_hashfn(rxhash)];
rcu_read_lock();
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
- e->queue_index = queue_index;
- e->updated = jiffies;
+ if (e->queue_index != queue_index)
+ e->queue_index = queue_index;
+ if (e->updated != jiffies)
+ e->updated = jiffies;
sock_rps_record_flow_hash(e->rps_rxhash);
} else {
spin_lock_bh(&tun->lock);
@@ -1249,6 +1253,21 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ if (new_carrier) {
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (!tun->numqueues)
+ return -EPERM;
+
+ netif_carrier_on(dev);
+ } else {
+ netif_carrier_off(dev);
+ }
+ return 0;
+}
+
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -1258,6 +1277,7 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_select_queue = tun_select_queue,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
+ .ndo_change_carrier = tun_net_change_carrier,
};
static void __tun_xdp_flush_tfile(struct tun_file *tfile)
@@ -1340,6 +1360,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_bpf = tun_xdp,
.ndo_xdp_xmit = tun_xdp_xmit,
+ .ndo_change_carrier = tun_net_change_carrier,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1473,23 +1494,22 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
- struct page_frag *pfrag = &current->task_frag;
size_t fragsz = it->iov[i].iov_len;
+ struct page *page;
+ void *frag;
if (fragsz == 0 || fragsz > PAGE_SIZE) {
err = -EINVAL;
goto free;
}
-
- if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
+ frag = netdev_alloc_frag(fragsz);
+ if (!frag) {
err = -ENOMEM;
goto free;
}
-
- skb_fill_page_desc(skb, i - 1, pfrag->page,
- pfrag->offset, fragsz);
- page_ref_inc(pfrag->page);
- pfrag->offset += fragsz;
+ page = virt_to_head_page(frag);
+ skb_fill_page_desc(skb, i - 1, page,
+ frag - page_address(page), fragsz);
}
return skb;
@@ -2293,9 +2313,9 @@ static void tun_setup(struct net_device *dev)
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
- if (!data)
- return 0;
- return -EINVAL;
+ NL_SET_ERR_MSG(extack,
+ "tun/tap creation via rtnetlink is not supported.");
+ return -EOPNOTSUPP;
}
static size_t tun_get_size(const struct net_device *dev)
@@ -2381,10 +2401,18 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static void tun_put_page(struct tun_page *tpage)
+{
+ if (tpage->page)
+ __page_frag_cache_drain(tpage->page, tpage->count);
+}
+
static int tun_xdp_one(struct tun_struct *tun,
struct tun_file *tfile,
- struct xdp_buff *xdp, int *flush)
+ struct xdp_buff *xdp, int *flush,
+ struct tun_page *tpage)
{
+ unsigned int datasize = xdp->data_end - xdp->data;
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
struct virtio_net_hdr *gso = &hdr->gso;
struct tun_pcpu_stats *stats;
@@ -2394,6 +2422,7 @@ static int tun_xdp_one(struct tun_struct *tun,
int buflen = hdr->buflen;
int err = 0;
bool skb_xdp = false;
+ struct page *page;
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
@@ -2420,7 +2449,14 @@ static int tun_xdp_one(struct tun_struct *tun,
case XDP_PASS:
break;
default:
- put_page(virt_to_head_page(xdp->data));
+ page = virt_to_head_page(xdp->data);
+ if (tpage->page == page) {
+ ++tpage->count;
+ } else {
+ tun_put_page(tpage);
+ tpage->page = page;
+ tpage->count = 1;
+ }
return 0;
}
}
@@ -2452,18 +2488,21 @@ build:
goto out;
}
- if (!rcu_dereference(tun->steering_prog))
+ if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
+ !tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
skb_record_rx_queue(skb, tfile->queue_index);
netif_receive_skb(skb);
- stats = get_cpu_ptr(tun->pcpu_stats);
+ /* No need for get_cpu_ptr() here since this function is
+ * always called with bh disabled
+ */
+ stats = this_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += datasize;
u64_stats_update_end(&stats->syncp);
- put_cpu_ptr(stats);
if (rxhash)
tun_flow_update(tun, rxhash, tfile);
@@ -2484,15 +2523,18 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -EBADFD;
if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ struct tun_page tpage;
int n = ctl->num;
int flush = 0;
+ memset(&tpage, 0, sizeof(tpage));
+
local_bh_disable();
rcu_read_lock();
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush);
+ tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
}
if (flush)
@@ -2501,6 +2543,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
rcu_read_unlock();
local_bh_enable();
+ tun_put_page(&tpage);
+
ret = total_len;
goto out;
}
@@ -2977,12 +3021,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct net *net = sock_net(&tfile->sk);
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
+ unsigned int ifindex, carrier;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
int sndbuf;
int vnet_hdr_sz;
- unsigned int ifindex;
int le;
int ret;
bool do_notify = false;
@@ -3160,7 +3204,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
ifr.ifr_hwaddr.sa_data);
- ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
+ ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
break;
case TUNGETSNDBUF:
@@ -3266,6 +3310,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
break;
+ case TUNSETCARRIER:
+ ret = -EFAULT;
+ if (copy_from_user(&carrier, argp, sizeof(carrier)))
+ goto unlock;
+
+ ret = tun_net_change_carrier(tun->dev, (bool)carrier);
+ break;
+
default:
ret = -EINVAL;
break;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 418b0904cecb..860352a525fb 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -613,4 +613,15 @@ config USB_NET_CH9200
To compile this driver as a module, choose M here: the
module will be called ch9200.
+config USB_NET_AQC111
+ tristate "Aquantia AQtion USB to 5/2.5GbE Controllers support"
+ depends on USB_USBNET
+ select CRC32
+ help
+ This option adds support for Aquantia AQtion USB
+ Ethernet adapters based on AQC111U/AQC112 chips.
+
+ This driver should work with at least the following devices:
+ * Aquantia AQtion USB to 5GbE
+
endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 27307a4ab003..99fd12be2111 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
+obj-$(CONFIG_USB_NET_AQC111) += aqc111.o
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
new file mode 100644
index 000000000000..57f1c94fca0b
--- /dev/null
+++ b/drivers/net/usb/aqc111.c
@@ -0,0 +1,1459 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+ * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2002-2003 TiVo Inc.
+ * Copyright (C) 2017-2018 ASIX
+ * Copyright (C) 2018 Aquantia Corp.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/usb/cdc.h>
+#include <linux/usb/usbnet.h>
+#include <linux/linkmode.h>
+
+#include "aqc111.h"
+
+#define DRIVER_NAME "aqc111"
+
+static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net,
+ "Failed to read(0x%x) reg index 0x%04x: %d\n",
+ cmd, index, ret);
+
+ return ret;
+}
+
+static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data, size);
+
+ if (unlikely(ret < 0))
+ netdev_warn(dev->net,
+ "Failed to read(0x%x) reg index 0x%04x: %d\n",
+ cmd, index, ret);
+
+ return ret;
+}
+
+static int aqc111_read16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ int ret = 0;
+
+ ret = aqc111_read_cmd_nopm(dev, cmd, value, index, sizeof(*data), data);
+ le16_to_cpus(data);
+
+ return ret;
+}
+
+static int aqc111_read16_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ int ret = 0;
+
+ ret = aqc111_read_cmd(dev, cmd, value, index, sizeof(*data), data);
+ le16_to_cpus(data);
+
+ return ret;
+}
+
+static int __aqc111_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype,
+ u16 value, u16 index, u16 size, const void *data)
+{
+ int err = -ENOMEM;
+ void *buf = NULL;
+
+ netdev_dbg(dev->net,
+ "%s cmd=%#x reqtype=%#x value=%#x index=%#x size=%d\n",
+ __func__, cmd, reqtype, value, index, size);
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ cmd, reqtype, value, index, buf, size,
+ (cmd == AQ_PHY_POWER) ? AQ_USB_PHY_SET_TIMEOUT :
+ AQ_USB_SET_TIMEOUT);
+
+ if (unlikely(err < 0))
+ netdev_warn(dev->net,
+ "Failed to write(0x%x) reg index 0x%04x: %d\n",
+ cmd, index, err);
+ kfree(buf);
+
+out:
+ return err;
+}
+
+static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, size, data);
+
+ return ret;
+}
+
+static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ int ret;
+
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+
+ ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, size, data);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
+}
+
+static int aqc111_write16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ u16 tmp = *data;
+
+ cpu_to_le16s(&tmp);
+
+ return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write16_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ u16 tmp = *data;
+
+ cpu_to_le16s(&tmp);
+
+ return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write32_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u32 *data)
+{
+ u32 tmp = *data;
+
+ cpu_to_le32s(&tmp);
+
+ return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write32_cmd(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u32 *data)
+{
+ u32 tmp = *data;
+
+ cpu_to_le32s(&tmp);
+
+ return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp);
+}
+
+static int aqc111_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data)
+{
+ return usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, data,
+ size);
+}
+
+static int aqc111_write16_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 *data)
+{
+ u16 tmp = *data;
+
+ cpu_to_le16s(&tmp);
+
+ return aqc111_write_cmd_async(dev, cmd, value, index,
+ sizeof(tmp), &tmp);
+}
+
+static void aqc111_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u",
+ aqc111_data->fw_ver.major,
+ aqc111_data->fw_ver.minor,
+ aqc111_data->fw_ver.rev);
+ info->eedump_len = 0x00;
+ info->regdump_len = 0x00;
+}
+
+static void aqc111_get_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ wolinfo->supported = WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+
+ if (aqc111_data->wol_flags & AQ_WOL_FLAG_MP)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int aqc111_set_wol(struct net_device *net,
+ struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ if (wolinfo->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ aqc111_data->wol_flags = 0;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ aqc111_data->wol_flags |= AQ_WOL_FLAG_MP;
+
+ return 0;
+}
+
+static void aqc111_speed_to_link_mode(u32 speed,
+ struct ethtool_link_ksettings *elk)
+{
+ switch (speed) {
+ case SPEED_5000:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 5000baseT_Full);
+ break;
+ case SPEED_2500:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 2500baseT_Full);
+ break;
+ case SPEED_1000:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 1000baseT_Full);
+ break;
+ case SPEED_100:
+ ethtool_link_ksettings_add_link_mode(elk, advertising,
+ 100baseT_Full);
+ break;
+ }
+}
+
+static int aqc111_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *elk)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ enum usb_device_speed usb_speed = dev->udev->speed;
+ u32 speed = SPEED_UNKNOWN;
+
+ ethtool_link_ksettings_zero_link_mode(elk, supported);
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 100baseT_Full);
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 1000baseT_Full);
+ if (usb_speed == USB_SPEED_SUPER) {
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(elk, supported,
+ 5000baseT_Full);
+ }
+ ethtool_link_ksettings_add_link_mode(elk, supported, TP);
+ ethtool_link_ksettings_add_link_mode(elk, supported, Autoneg);
+
+ elk->base.port = PORT_TP;
+ elk->base.transceiver = XCVR_INTERNAL;
+
+ elk->base.mdio_support = 0x00; /*Not supported*/
+
+ if (aqc111_data->autoneg)
+ linkmode_copy(elk->link_modes.advertising,
+ elk->link_modes.supported);
+ else
+ aqc111_speed_to_link_mode(aqc111_data->advertised_speed, elk);
+
+ elk->base.autoneg = aqc111_data->autoneg;
+
+ switch (aqc111_data->link_speed) {
+ case AQ_INT_SPEED_5G:
+ speed = SPEED_5000;
+ break;
+ case AQ_INT_SPEED_2_5G:
+ speed = SPEED_2500;
+ break;
+ case AQ_INT_SPEED_1G:
+ speed = SPEED_1000;
+ break;
+ case AQ_INT_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ }
+ elk->base.duplex = DUPLEX_FULL;
+ elk->base.speed = speed;
+
+ return 0;
+}
+
+static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+
+ aqc111_data->phy_cfg &= ~AQ_ADV_MASK;
+ aqc111_data->phy_cfg |= AQ_PAUSE;
+ aqc111_data->phy_cfg |= AQ_ASYM_PAUSE;
+ aqc111_data->phy_cfg |= AQ_DOWNSHIFT;
+ aqc111_data->phy_cfg &= ~AQ_DSH_RETRIES_MASK;
+ aqc111_data->phy_cfg |= (3 << AQ_DSH_RETRIES_SHIFT) &
+ AQ_DSH_RETRIES_MASK;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ switch (speed) {
+ case SPEED_5000:
+ aqc111_data->phy_cfg |= AQ_ADV_5G;
+ /* fall-through */
+ case SPEED_2500:
+ aqc111_data->phy_cfg |= AQ_ADV_2G5;
+ /* fall-through */
+ case SPEED_1000:
+ aqc111_data->phy_cfg |= AQ_ADV_1G;
+ /* fall-through */
+ case SPEED_100:
+ aqc111_data->phy_cfg |= AQ_ADV_100M;
+ /* fall-through */
+ }
+ } else {
+ switch (speed) {
+ case SPEED_5000:
+ aqc111_data->phy_cfg |= AQ_ADV_5G;
+ break;
+ case SPEED_2500:
+ aqc111_data->phy_cfg |= AQ_ADV_2G5;
+ break;
+ case SPEED_1000:
+ aqc111_data->phy_cfg |= AQ_ADV_1G;
+ break;
+ case SPEED_100:
+ aqc111_data->phy_cfg |= AQ_ADV_100M;
+ break;
+ }
+ }
+
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg);
+}
+
+static int aqc111_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *elk)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ enum usb_device_speed usb_speed = dev->udev->speed;
+ u8 autoneg = elk->base.autoneg;
+ u32 speed = elk->base.speed;
+
+ if (autoneg == AUTONEG_ENABLE) {
+ if (aqc111_data->autoneg != AUTONEG_ENABLE) {
+ aqc111_data->autoneg = AUTONEG_ENABLE;
+ aqc111_data->advertised_speed =
+ (usb_speed == USB_SPEED_SUPER) ?
+ SPEED_5000 : SPEED_1000;
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+ }
+ } else {
+ if (speed != SPEED_100 &&
+ speed != SPEED_1000 &&
+ speed != SPEED_2500 &&
+ speed != SPEED_5000 &&
+ speed != SPEED_UNKNOWN)
+ return -EINVAL;
+
+ if (elk->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ if (usb_speed != USB_SPEED_SUPER && speed > SPEED_1000)
+ return -EINVAL;
+
+ aqc111_data->autoneg = AUTONEG_DISABLE;
+ if (speed != SPEED_UNKNOWN)
+ aqc111_data->advertised_speed = speed;
+
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops aqc111_ethtool_ops = {
+ .get_drvinfo = aqc111_get_drvinfo,
+ .get_wol = aqc111_get_wol,
+ .set_wol = aqc111_set_wol,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = aqc111_get_link_ksettings,
+ .set_link_ksettings = aqc111_set_link_ksettings
+};
+
+static int aqc111_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 reg16 = 0;
+ u8 buf[5];
+
+ net->mtu = new_mtu;
+ dev->hard_mtu = net->mtu + net->hard_header_len;
+
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ if (net->mtu > 1500)
+ reg16 |= SFR_MEDIUM_JUMBO_EN;
+ else
+ reg16 &= ~SFR_MEDIUM_JUMBO_EN;
+
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) {
+ memcpy(buf, &AQC111_BULKIN_SIZE[2], 5);
+ /* RX bulk configuration */
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL,
+ 5, 5, buf);
+ }
+
+ /* Set high low water level */
+ if (dev->net->mtu <= 4500)
+ reg16 = 0x0810;
+ else if (dev->net->mtu <= 9500)
+ reg16 = 0x1020;
+ else if (dev->net->mtu <= 12500)
+ reg16 = 0x1420;
+ else if (dev->net->mtu <= 16334)
+ reg16 = 0x1A20;
+
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW,
+ 2, &reg16);
+
+ return 0;
+}
+
+static int aqc111_set_mac_addr(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ int ret = 0;
+
+ ret = eth_mac_addr(net, p);
+ if (ret < 0)
+ return ret;
+
+ /* Set the MAC address */
+ return aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN,
+ ETH_ALEN, net->dev_addr);
+}
+
+static int aqc111_vlan_rx_kill_vid(struct net_device *net,
+ __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 vlan_ctrl = 0;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ vlan_ctrl = reg8;
+
+ /* Address */
+ reg8 = (vid / 16);
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8);
+ /* Data */
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg16 &= ~(1 << (vid % 16));
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+
+ return 0;
+}
+
+static int aqc111_vlan_rx_add_vid(struct net_device *net, __be16 proto, u16 vid)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 vlan_ctrl = 0;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ vlan_ctrl = reg8;
+
+ /* Address */
+ reg8 = (vid / 16);
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, &reg8);
+ /* Data */
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg16 |= (1 << (vid % 16));
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, &reg16);
+ reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+
+ return 0;
+}
+
+static void aqc111_set_rx_mode(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ int mc_count = 0;
+
+ mc_count = netdev_mc_count(net);
+
+ aqc111_data->rxctl &= ~(SFR_RX_CTL_PRO | SFR_RX_CTL_AMALL |
+ SFR_RX_CTL_AM);
+
+ if (net->flags & IFF_PROMISC) {
+ aqc111_data->rxctl |= SFR_RX_CTL_PRO;
+ } else if ((net->flags & IFF_ALLMULTI) || mc_count > AQ_MAX_MCAST) {
+ aqc111_data->rxctl |= SFR_RX_CTL_AMALL;
+ } else if (!netdev_mc_empty(net)) {
+ u8 m_filter[AQ_MCAST_FILTER_SIZE] = { 0 };
+ struct netdev_hw_addr *ha = NULL;
+ u32 crc_bits = 0;
+
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ m_filter[crc_bits >> 3] |= BIT(crc_bits & 7);
+ }
+
+ aqc111_write_cmd_async(dev, AQ_ACCESS_MAC,
+ SFR_MULTI_FILTER_ARRY,
+ AQ_MCAST_FILTER_SIZE,
+ AQ_MCAST_FILTER_SIZE, m_filter);
+
+ aqc111_data->rxctl |= SFR_RX_CTL_AM;
+ }
+
+ aqc111_write16_cmd_async(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &aqc111_data->rxctl);
+}
+
+static int aqc111_set_features(struct net_device *net,
+ netdev_features_t features)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ netdev_features_t changed = net->features ^ features;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ if (changed & NETIF_F_IP_CSUM) {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8);
+ reg8 ^= SFR_TXCOE_TCP | SFR_TXCOE_UDP;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL,
+ 1, 1, &reg8);
+ }
+
+ if (changed & NETIF_F_IPV6_CSUM) {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8);
+ reg8 ^= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL,
+ 1, 1, &reg8);
+ }
+
+ if (changed & NETIF_F_RXCSUM) {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8);
+ if (features & NETIF_F_RXCSUM) {
+ aqc111_data->rx_checksum = 1;
+ reg8 &= ~(SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP |
+ SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6);
+ } else {
+ aqc111_data->rx_checksum = 0;
+ reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP |
+ SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6;
+ }
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL,
+ 1, 1, &reg8);
+ }
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ u16 i = 0;
+
+ for (i = 0; i < 256; i++) {
+ /* Address */
+ reg8 = i;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_ADDRESS,
+ 1, 1, &reg8);
+ /* Data */
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_DATA0,
+ 2, &reg16);
+ reg8 = SFR_VLAN_CONTROL_WE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+ }
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+ reg8 |= SFR_VLAN_CONTROL_VFE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ } else {
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+ reg8 &= ~SFR_VLAN_CONTROL_VFE;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC,
+ SFR_VLAN_ID_CONTROL, 1, 1, &reg8);
+ }
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops aqc111_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_get_stats64 = usbnet_get_stats64,
+ .ndo_change_mtu = aqc111_change_mtu,
+ .ndo_set_mac_address = aqc111_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = aqc111_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aqc111_vlan_rx_kill_vid,
+ .ndo_set_rx_mode = aqc111_set_rx_mode,
+ .ndo_set_features = aqc111_set_features,
+};
+
+static int aqc111_read_perm_mac(struct usbnet *dev)
+{
+ u8 buf[ETH_ALEN];
+ int ret;
+
+ ret = aqc111_read_cmd(dev, AQ_FLASH_PARAMETERS, 0, 0, ETH_ALEN, buf);
+ if (ret < 0)
+ goto out;
+
+ ether_addr_copy(dev->net->perm_addr, buf);
+
+ return 0;
+out:
+ return ret;
+}
+
+static void aqc111_read_fw_version(struct usbnet *dev,
+ struct aqc111_data *aqc111_data)
+{
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MAJOR,
+ 1, 1, &aqc111_data->fw_ver.major);
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MINOR,
+ 1, 1, &aqc111_data->fw_ver.minor);
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_REV,
+ 1, 1, &aqc111_data->fw_ver.rev);
+
+ if (aqc111_data->fw_ver.major & 0x80)
+ aqc111_data->fw_ver.major &= ~0x80;
+}
+
+static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ enum usb_device_speed usb_speed = udev->speed;
+ struct aqc111_data *aqc111_data;
+ int ret;
+
+ /* Check if vendor configuration */
+ if (udev->actconfig->desc.bConfigurationValue != 1) {
+ usb_driver_set_configuration(udev, 1);
+ return -ENODEV;
+ }
+
+ usb_reset_configuration(dev->udev);
+
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "usbnet_get_endpoints failed");
+ return ret;
+ }
+
+ aqc111_data = kzalloc(sizeof(*aqc111_data), GFP_KERNEL);
+ if (!aqc111_data)
+ return -ENOMEM;
+
+ /* store aqc111_data pointer in device data field */
+ dev->driver_priv = aqc111_data;
+
+ /* Init the MAC address */
+ ret = aqc111_read_perm_mac(dev);
+ if (ret)
+ goto out;
+
+ ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr);
+
+ /* Set Rx urb size */
+ dev->rx_urb_size = URB_SIZE;
+
+ /* Set TX needed headroom & tailroom */
+ dev->net->needed_headroom += sizeof(u64);
+ dev->net->needed_tailroom += sizeof(u64);
+
+ dev->net->max_mtu = 16334;
+
+ dev->net->netdev_ops = &aqc111_netdev_ops;
+ dev->net->ethtool_ops = &aqc111_ethtool_ops;
+
+ if (usb_device_no_sg_constraint(dev->udev))
+ dev->can_dma_sg = 1;
+
+ dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE;
+ dev->net->features |= AQ_SUPPORT_FEATURE;
+ dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE;
+
+ netif_set_gso_max_size(dev->net, 65535);
+
+ aqc111_read_fw_version(dev, aqc111_data);
+ aqc111_data->autoneg = AUTONEG_ENABLE;
+ aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ?
+ SPEED_5000 : SPEED_1000;
+
+ return 0;
+
+out:
+ kfree(aqc111_data);
+ return ret;
+}
+
+static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16;
+
+ /* Force bz */
+ reg16 = SFR_PHYPWR_RSTCTL_BZ;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+ reg16 = 0;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+
+ /* Power down ethernet PHY */
+ aqc111_data->phy_cfg &= ~AQ_ADV_MASK;
+ aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN;
+ aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ kfree(aqc111_data);
+}
+
+static void aqc111_status(struct usbnet *dev, struct urb *urb)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u64 *event_data = NULL;
+ int link = 0;
+
+ if (urb->actual_length < sizeof(*event_data))
+ return;
+
+ event_data = urb->transfer_buffer;
+ le64_to_cpus(event_data);
+
+ if (*event_data & AQ_LS_MASK)
+ link = 1;
+ else
+ link = 0;
+
+ aqc111_data->link_speed = (*event_data & AQ_SPEED_MASK) >>
+ AQ_SPEED_SHIFT;
+ aqc111_data->link = link;
+
+ if (netif_carrier_ok(dev->net) != link)
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+}
+
+static void aqc111_configure_rx(struct usbnet *dev,
+ struct aqc111_data *aqc111_data)
+{
+ enum usb_device_speed usb_speed = dev->udev->speed;
+ u16 link_speed = 0, usb_host = 0;
+ u8 buf[5] = { 0 };
+ u8 queue_num = 0;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ buf[0] = 0x00;
+ buf[1] = 0xF8;
+ buf[2] = 0x07;
+ switch (aqc111_data->link_speed) {
+ case AQ_INT_SPEED_5G:
+ link_speed = 5000;
+ reg8 = 0x05;
+ reg16 = 0x001F;
+ break;
+ case AQ_INT_SPEED_2_5G:
+ link_speed = 2500;
+ reg16 = 0x003F;
+ break;
+ case AQ_INT_SPEED_1G:
+ link_speed = 1000;
+ reg16 = 0x009F;
+ break;
+ case AQ_INT_SPEED_100M:
+ link_speed = 100;
+ queue_num = 1;
+ reg16 = 0x063F;
+ buf[1] = 0xFB;
+ buf[2] = 0x4;
+ break;
+ }
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_INTER_PACKET_GAP_0,
+ 1, 1, &reg8);
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TX_PAUSE_RESEND_T, 3, 3, buf);
+
+ switch (usb_speed) {
+ case USB_SPEED_SUPER:
+ usb_host = 3;
+ break;
+ case USB_SPEED_HIGH:
+ usb_host = 2;
+ break;
+ case USB_SPEED_FULL:
+ case USB_SPEED_LOW:
+ usb_host = 1;
+ queue_num = 0;
+ break;
+ default:
+ usb_host = 0;
+ break;
+ }
+
+ if (dev->net->mtu > 12500 && dev->net->mtu <= 16334)
+ queue_num = 2; /* For Jumbo packet 16KB */
+
+ memcpy(buf, &AQC111_BULKIN_SIZE[queue_num], 5);
+ /* RX bulk configuration */
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf);
+
+ /* Set high low water level */
+ if (dev->net->mtu <= 4500)
+ reg16 = 0x0810;
+ else if (dev->net->mtu <= 9500)
+ reg16 = 0x1020;
+ else if (dev->net->mtu <= 12500)
+ reg16 = 0x1420;
+ else if (dev->net->mtu <= 16334)
+ reg16 = 0x1A20;
+
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW,
+ 2, &reg16);
+ netdev_info(dev->net, "Link Speed %d, USB %d", link_speed, usb_host);
+}
+
+static void aqc111_configure_csum_offload(struct usbnet *dev)
+{
+ u8 reg8 = 0;
+
+ if (dev->net->features & NETIF_F_RXCSUM) {
+ reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP |
+ SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6;
+ }
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, &reg8);
+
+ reg8 = 0;
+ if (dev->net->features & NETIF_F_IP_CSUM)
+ reg8 |= SFR_TXCOE_IP | SFR_TXCOE_TCP | SFR_TXCOE_UDP;
+
+ if (dev->net->features & NETIF_F_IPV6_CSUM)
+ reg8 |= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6;
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, &reg8);
+}
+
+static int aqc111_link_reset(struct usbnet *dev)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16 = 0;
+ u8 reg8 = 0;
+
+ if (aqc111_data->link == 1) { /* Link up */
+ aqc111_configure_rx(dev, aqc111_data);
+
+ /* Vlan Tag Filter */
+ reg8 = SFR_VLAN_CONTROL_VSO;
+ if (dev->net->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ reg8 |= SFR_VLAN_CONTROL_VFE;
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL,
+ 1, 1, &reg8);
+
+ reg8 = 0x0;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL,
+ 1, 1, &reg8);
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMTX_DMA_CONTROL,
+ 1, 1, &reg8);
+
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ARC_CTRL, 1, 1, &reg8);
+
+ reg16 = SFR_RX_CTL_IPE | SFR_RX_CTL_AB;
+ aqc111_data->rxctl = reg16;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ reg8 = SFR_RX_PATH_READY;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+
+ reg8 = SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+
+ reg16 = 0;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ reg16 = SFR_MEDIUM_XGMIIMODE | SFR_MEDIUM_FULL_DUPLEX;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ aqc111_configure_csum_offload(dev);
+
+ aqc111_set_rx_mode(dev->net);
+
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ if (dev->net->mtu > 1500)
+ reg16 |= SFR_MEDIUM_JUMBO_EN;
+
+ reg16 |= SFR_MEDIUM_RECEIVE_EN | SFR_MEDIUM_RXFLOW_CTRLEN |
+ SFR_MEDIUM_TXFLOW_CTRLEN;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ aqc111_data->rxctl |= SFR_RX_CTL_START;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &aqc111_data->rxctl);
+
+ netif_carrier_on(dev->net);
+ } else {
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 &= ~SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+
+ aqc111_data->rxctl &= ~SFR_RX_CTL_START;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &aqc111_data->rxctl);
+
+ reg8 = SFR_BULK_OUT_FLUSH_EN | SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+ reg8 = SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+
+ netif_carrier_off(dev->net);
+ }
+ return 0;
+}
+
+static int aqc111_reset(struct usbnet *dev)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u8 reg8 = 0;
+
+ dev->rx_urb_size = URB_SIZE;
+
+ if (usb_device_no_sg_constraint(dev->udev))
+ dev->can_dma_sg = 1;
+
+ dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE;
+ dev->net->features |= AQ_SUPPORT_FEATURE;
+ dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE;
+
+ /* Power up ethernet PHY */
+ aqc111_data->phy_cfg = AQ_PHY_POWER_EN;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ /* Set the MAC address */
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN,
+ ETH_ALEN, dev->net->dev_addr);
+
+ reg8 = 0xFF;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, &reg8);
+
+ reg8 = 0x0;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_SWP_CTRL, 1, 1, &reg8);
+
+ aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8);
+ reg8 &= ~(SFR_MONITOR_MODE_EPHYRW | SFR_MONITOR_MODE_RWLC |
+ SFR_MONITOR_MODE_RWMP | SFR_MONITOR_MODE_RWWF |
+ SFR_MONITOR_MODE_RW_FLAG);
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, &reg8);
+
+ netif_carrier_off(dev->net);
+
+ /* Phy advertise */
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+
+ return 0;
+}
+
+static int aqc111_stop(struct usbnet *dev)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16 = 0;
+
+ aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 &= ~SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 = 0;
+ aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ /* Put PHY to low power*/
+ aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ netif_carrier_off(dev->net);
+
+ return 0;
+}
+
+static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc)
+{
+ u32 pkt_type = 0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+ /* checksum error bit is set */
+ if (pkt_desc & AQ_RX_PD_L4_ERR || pkt_desc & AQ_RX_PD_L3_ERR)
+ return;
+
+ pkt_type = pkt_desc & AQ_RX_PD_L4_TYPE_MASK;
+ /* It must be a TCP or UDP packet with a valid checksum */
+ if (pkt_type == AQ_RX_PD_L4_TCP || pkt_type == AQ_RX_PD_L4_UDP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ struct sk_buff *new_skb = NULL;
+ u32 pkt_total_offset = 0;
+ u64 *pkt_desc_ptr = NULL;
+ u32 start_of_descs = 0;
+ u32 desc_offset = 0; /*RX Header Offset*/
+ u16 pkt_count = 0;
+ u64 desc_hdr = 0;
+ u16 vlan_tag = 0;
+ u32 skb_len = 0;
+
+ if (!skb)
+ goto err;
+
+ if (skb->len == 0)
+ goto err;
+
+ skb_len = skb->len;
+ /* RX Descriptor Header */
+ skb_trim(skb, skb->len - sizeof(desc_hdr));
+ desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb));
+
+ /* Check these packets */
+ desc_offset = (desc_hdr & AQ_RX_DH_DESC_OFFSET_MASK) >>
+ AQ_RX_DH_DESC_OFFSET_SHIFT;
+ pkt_count = desc_hdr & AQ_RX_DH_PKT_CNT_MASK;
+ start_of_descs = skb_len - ((pkt_count + 1) * sizeof(desc_hdr));
+
+ /* self check descs position */
+ if (start_of_descs != desc_offset)
+ goto err;
+
+ /* self check desc_offset from header*/
+ if (desc_offset >= skb_len)
+ goto err;
+
+ if (pkt_count == 0)
+ goto err;
+
+ /* Get the first RX packet descriptor */
+ pkt_desc_ptr = (u64 *)(skb->data + desc_offset);
+
+ while (pkt_count--) {
+ u64 pkt_desc = le64_to_cpup(pkt_desc_ptr);
+ u32 pkt_len_with_padd = 0;
+ u32 pkt_len = 0;
+
+ pkt_len = (u32)((pkt_desc & AQ_RX_PD_LEN_MASK) >>
+ AQ_RX_PD_LEN_SHIFT);
+ pkt_len_with_padd = ((pkt_len + 7) & 0x7FFF8);
+
+ pkt_total_offset += pkt_len_with_padd;
+ if (pkt_total_offset > desc_offset ||
+ (pkt_count == 0 && pkt_total_offset != desc_offset)) {
+ goto err;
+ }
+
+ if (pkt_desc & AQ_RX_PD_DROP ||
+ !(pkt_desc & AQ_RX_PD_RX_OK) ||
+ pkt_len > (dev->hard_mtu + AQ_RX_HW_PAD)) {
+ skb_pull(skb, pkt_len_with_padd);
+ /* Next RX Packet Descriptor */
+ pkt_desc_ptr++;
+ continue;
+ }
+
+ /* Clone SKB */
+ new_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!new_skb)
+ goto err;
+
+ new_skb->len = pkt_len;
+ skb_pull(new_skb, AQ_RX_HW_PAD);
+ skb_set_tail_pointer(new_skb, new_skb->len);
+
+ new_skb->truesize = SKB_TRUESIZE(new_skb->len);
+ if (aqc111_data->rx_checksum)
+ aqc111_rx_checksum(new_skb, pkt_desc);
+
+ if (pkt_desc & AQ_RX_PD_VLAN) {
+ vlan_tag = pkt_desc >> AQ_RX_PD_VLAN_SHIFT;
+ __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
+ vlan_tag & VLAN_VID_MASK);
+ }
+
+ usbnet_skb_return(dev, new_skb);
+ if (pkt_count == 0)
+ break;
+
+ skb_pull(skb, pkt_len_with_padd);
+
+ /* Next RX Packet Header */
+ pkt_desc_ptr++;
+
+ new_skb = NULL;
+ }
+
+ return 1;
+
+err:
+ return 0;
+}
+
+static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int frame_size = dev->maxpacket;
+ struct sk_buff *new_skb = NULL;
+ u64 *tx_desc_ptr = NULL;
+ int padding_size = 0;
+ int headroom = 0;
+ int tailroom = 0;
+ u64 tx_desc = 0;
+ u16 tci = 0;
+
+ /*Length of actual data*/
+ tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK;
+
+ /* TSO MSS */
+ tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) <<
+ AQ_TX_DESC_MSS_SHIFT;
+
+ headroom = (skb->len + sizeof(tx_desc)) % 8;
+ if (headroom != 0)
+ padding_size = 8 - headroom;
+
+ if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) {
+ padding_size += 8;
+ tx_desc |= AQ_TX_DESC_DROP_PADD;
+ }
+
+ /* Vlan Tag */
+ if (vlan_get_tag(skb, &tci) >= 0) {
+ tx_desc |= AQ_TX_DESC_VLAN;
+ tx_desc |= ((u64)tci & AQ_TX_DESC_VLAN_MASK) <<
+ AQ_TX_DESC_VLAN_SHIFT;
+ }
+
+ if (!dev->can_dma_sg && (dev->net->features & NETIF_F_SG) &&
+ skb_linearize(skb))
+ return NULL;
+
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+
+ if (!(headroom >= sizeof(tx_desc) && tailroom >= padding_size)) {
+ new_skb = skb_copy_expand(skb, sizeof(tx_desc),
+ padding_size, flags);
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ if (!skb)
+ return NULL;
+ }
+ if (padding_size != 0)
+ skb_put_zero(skb, padding_size);
+ /* Copy TX header */
+ tx_desc_ptr = skb_push(skb, sizeof(tx_desc));
+ *tx_desc_ptr = cpu_to_le64(tx_desc);
+
+ usbnet_set_skb_tx_stats(skb, 1, 0);
+
+ return skb;
+}
+
+static const struct driver_info aqc111_info = {
+ .description = "Aquantia AQtion USB to 5GbE Controller",
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
+#define ASIX111_DESC \
+"ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter"
+
+static const struct driver_info asix111_info = {
+ .description = ASIX111_DESC,
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
+#undef ASIX111_DESC
+
+#define ASIX112_DESC \
+"ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter"
+
+static const struct driver_info asix112_info = {
+ .description = ASIX112_DESC,
+ .bind = aqc111_bind,
+ .unbind = aqc111_unbind,
+ .status = aqc111_status,
+ .link_reset = aqc111_link_reset,
+ .reset = aqc111_reset,
+ .stop = aqc111_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX |
+ FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+ .rx_fixup = aqc111_rx_fixup,
+ .tx_fixup = aqc111_tx_fixup,
+};
+
+#undef ASIX112_DESC
+
+static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 temp_rx_ctrl = 0x00;
+ u16 reg16;
+ u8 reg8;
+
+ usbnet_suspend(intf, message);
+
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+ temp_rx_ctrl = reg16;
+ /* Stop RX operations*/
+ reg16 &= ~SFR_RX_CTL_START;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+ /* Force bz */
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+ reg16 |= SFR_PHYPWR_RSTCTL_BZ;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL,
+ 2, &reg16);
+
+ reg8 = SFR_BULK_OUT_EFF_EN;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL,
+ 1, 1, &reg8);
+
+ temp_rx_ctrl &= ~(SFR_RX_CTL_START | SFR_RX_CTL_RF_WAK |
+ SFR_RX_CTL_AP | SFR_RX_CTL_AM);
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &temp_rx_ctrl);
+
+ reg8 = 0x00;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+
+ if (aqc111_data->wol_flags) {
+ struct aqc111_wol_cfg wol_cfg;
+
+ memset(&wol_cfg, 0, sizeof(struct aqc111_wol_cfg));
+
+ aqc111_data->phy_cfg |= AQ_WOL;
+ ether_addr_copy(wol_cfg.hw_addr, dev->net->dev_addr);
+ wol_cfg.flags = aqc111_data->wol_flags;
+
+ temp_rx_ctrl |= (SFR_RX_CTL_AB | SFR_RX_CTL_START);
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL,
+ 2, &temp_rx_ctrl);
+ reg8 = 0x00;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK,
+ 1, 1, &reg8);
+ reg8 = SFR_BMRX_DMA_EN;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL,
+ 1, 1, &reg8);
+ reg8 = SFR_RX_PATH_READY;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+ reg8 = 0x07;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL,
+ 1, 1, &reg8);
+ reg8 = 0x00;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_RX_BULKIN_QTIMR_LOW, 1, 1, &reg8);
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_RX_BULKIN_QTIMR_HIGH, 1, 1, &reg8);
+ reg8 = 0xFF;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QSIZE,
+ 1, 1, &reg8);
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QIFG,
+ 1, 1, &reg8);
+
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+ reg16 |= SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+
+ aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0,
+ WOL_CFG_SIZE, &wol_cfg);
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+ } else {
+ aqc111_data->phy_cfg |= AQ_LOW_POWER;
+ aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0,
+ &aqc111_data->phy_cfg);
+
+ /* Disable RX path */
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+ reg16 &= ~SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC,
+ SFR_MEDIUM_STATUS_MODE, 2, &reg16);
+ }
+
+ return 0;
+}
+
+static int aqc111_resume(struct usb_interface *intf)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+ struct aqc111_data *aqc111_data = dev->driver_priv;
+ u16 reg16;
+ u8 reg8;
+
+ netif_carrier_off(dev->net);
+
+ /* Power up ethernet PHY */
+ aqc111_data->phy_cfg |= AQ_PHY_POWER_EN;
+ aqc111_data->phy_cfg &= ~AQ_LOW_POWER;
+ aqc111_data->phy_cfg &= ~AQ_WOL;
+
+ reg8 = 0xFF;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK,
+ 1, 1, &reg8);
+ /* Configure RX control register => start operation */
+ reg16 = aqc111_data->rxctl;
+ reg16 &= ~SFR_RX_CTL_START;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ reg16 |= SFR_RX_CTL_START;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16);
+
+ aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+ aqc111_data->advertised_speed);
+
+ aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg16 |= SFR_MEDIUM_RECEIVE_EN;
+ aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+ 2, &reg16);
+ reg8 = SFR_RX_PATH_READY;
+ aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH,
+ 1, 1, &reg8);
+ reg8 = 0x0;
+ aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, &reg8);
+
+ return usbnet_resume(intf);
+}
+
+#define AQC111_USB_ETH_DEV(vid, pid, table) \
+ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_VENDOR_SPEC), \
+ .driver_info = (unsigned long)&(table) \
+}, \
+{ \
+ USB_DEVICE_AND_INTERFACE_INFO((vid), (pid), \
+ USB_CLASS_COMM, \
+ USB_CDC_SUBCLASS_ETHERNET, \
+ USB_CDC_PROTO_NONE), \
+ .driver_info = (unsigned long)&(table),
+
+static const struct usb_device_id products[] = {
+ {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)},
+ {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
+ {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
+ { },/* END */
+};
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver aq_driver = {
+ .name = "aqc111",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = aqc111_suspend,
+ .resume = aqc111_resume,
+ .disconnect = usbnet_disconnect,
+};
+
+module_usb_driver(aq_driver);
+
+MODULE_DESCRIPTION("Aquantia AQtion USB to 5/2.5GbE Controllers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/aqc111.h b/drivers/net/usb/aqc111.h
new file mode 100644
index 000000000000..4d68b3a6067c
--- /dev/null
+++ b/drivers/net/usb/aqc111.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Aquantia Corp. Aquantia AQtion USB to 5GbE Controller
+ * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2002-2003 TiVo Inc.
+ * Copyright (C) 2017-2018 ASIX
+ * Copyright (C) 2018 Aquantia Corp.
+ */
+
+#ifndef __LINUX_USBNET_AQC111_H
+#define __LINUX_USBNET_AQC111_H
+
+#define URB_SIZE (1024 * 62)
+
+#define AQ_MCAST_FILTER_SIZE 8
+#define AQ_MAX_MCAST 64
+
+#define AQ_ACCESS_MAC 0x01
+#define AQ_FLASH_PARAMETERS 0x20
+#define AQ_PHY_POWER 0x31
+#define AQ_WOL_CFG 0x60
+#define AQ_PHY_OPS 0x61
+
+#define AQ_USB_PHY_SET_TIMEOUT 10000
+#define AQ_USB_SET_TIMEOUT 4000
+
+/* Feature. ********************************************/
+#define AQ_SUPPORT_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX |\
+ NETIF_F_HW_VLAN_CTAG_RX)
+
+#define AQ_SUPPORT_HW_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\
+ NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_FILTER)
+
+#define AQ_SUPPORT_VLAN_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\
+ NETIF_F_TSO)
+
+/* SFR Reg. ********************************************/
+
+#define SFR_GENERAL_STATUS 0x03
+#define SFR_CHIP_STATUS 0x05
+#define SFR_RX_CTL 0x0B
+ #define SFR_RX_CTL_TXPADCRC 0x0400
+ #define SFR_RX_CTL_IPE 0x0200
+ #define SFR_RX_CTL_DROPCRCERR 0x0100
+ #define SFR_RX_CTL_START 0x0080
+ #define SFR_RX_CTL_RF_WAK 0x0040
+ #define SFR_RX_CTL_AP 0x0020
+ #define SFR_RX_CTL_AM 0x0010
+ #define SFR_RX_CTL_AB 0x0008
+ #define SFR_RX_CTL_AMALL 0x0002
+ #define SFR_RX_CTL_PRO 0x0001
+ #define SFR_RX_CTL_STOP 0x0000
+#define SFR_INTER_PACKET_GAP_0 0x0D
+#define SFR_NODE_ID 0x10
+#define SFR_MULTI_FILTER_ARRY 0x16
+#define SFR_MEDIUM_STATUS_MODE 0x22
+ #define SFR_MEDIUM_XGMIIMODE 0x0001
+ #define SFR_MEDIUM_FULL_DUPLEX 0x0002
+ #define SFR_MEDIUM_RXFLOW_CTRLEN 0x0010
+ #define SFR_MEDIUM_TXFLOW_CTRLEN 0x0020
+ #define SFR_MEDIUM_JUMBO_EN 0x0040
+ #define SFR_MEDIUM_RECEIVE_EN 0x0100
+#define SFR_MONITOR_MODE 0x24
+ #define SFR_MONITOR_MODE_EPHYRW 0x01
+ #define SFR_MONITOR_MODE_RWLC 0x02
+ #define SFR_MONITOR_MODE_RWMP 0x04
+ #define SFR_MONITOR_MODE_RWWF 0x08
+ #define SFR_MONITOR_MODE_RW_FLAG 0x10
+ #define SFR_MONITOR_MODE_PMEPOL 0x20
+ #define SFR_MONITOR_MODE_PMETYPE 0x40
+#define SFR_PHYPWR_RSTCTL 0x26
+ #define SFR_PHYPWR_RSTCTL_BZ 0x0010
+ #define SFR_PHYPWR_RSTCTL_IPRL 0x0020
+#define SFR_VLAN_ID_ADDRESS 0x2A
+#define SFR_VLAN_ID_CONTROL 0x2B
+ #define SFR_VLAN_CONTROL_WE 0x0001
+ #define SFR_VLAN_CONTROL_RD 0x0002
+ #define SFR_VLAN_CONTROL_VSO 0x0010
+ #define SFR_VLAN_CONTROL_VFE 0x0020
+#define SFR_VLAN_ID_DATA0 0x2C
+#define SFR_VLAN_ID_DATA1 0x2D
+#define SFR_RX_BULKIN_QCTRL 0x2E
+ #define SFR_RX_BULKIN_QCTRL_TIME 0x01
+ #define SFR_RX_BULKIN_QCTRL_IFG 0x02
+ #define SFR_RX_BULKIN_QCTRL_SIZE 0x04
+#define SFR_RX_BULKIN_QTIMR_LOW 0x2F
+#define SFR_RX_BULKIN_QTIMR_HIGH 0x30
+#define SFR_RX_BULKIN_QSIZE 0x31
+#define SFR_RX_BULKIN_QIFG 0x32
+#define SFR_RXCOE_CTL 0x34
+ #define SFR_RXCOE_IP 0x01
+ #define SFR_RXCOE_TCP 0x02
+ #define SFR_RXCOE_UDP 0x04
+ #define SFR_RXCOE_ICMP 0x08
+ #define SFR_RXCOE_IGMP 0x10
+ #define SFR_RXCOE_TCPV6 0x20
+ #define SFR_RXCOE_UDPV6 0x40
+ #define SFR_RXCOE_ICMV6 0x80
+#define SFR_TXCOE_CTL 0x35
+ #define SFR_TXCOE_IP 0x01
+ #define SFR_TXCOE_TCP 0x02
+ #define SFR_TXCOE_UDP 0x04
+ #define SFR_TXCOE_ICMP 0x08
+ #define SFR_TXCOE_IGMP 0x10
+ #define SFR_TXCOE_TCPV6 0x20
+ #define SFR_TXCOE_UDPV6 0x40
+ #define SFR_TXCOE_ICMV6 0x80
+#define SFR_BM_INT_MASK 0x41
+#define SFR_BMRX_DMA_CONTROL 0x43
+ #define SFR_BMRX_DMA_EN 0x80
+#define SFR_BMTX_DMA_CONTROL 0x46
+#define SFR_PAUSE_WATERLVL_LOW 0x54
+#define SFR_PAUSE_WATERLVL_HIGH 0x55
+#define SFR_ARC_CTRL 0x9E
+#define SFR_SWP_CTRL 0xB1
+#define SFR_TX_PAUSE_RESEND_T 0xB2
+#define SFR_ETH_MAC_PATH 0xB7
+ #define SFR_RX_PATH_READY 0x01
+#define SFR_BULK_OUT_CTRL 0xB9
+ #define SFR_BULK_OUT_FLUSH_EN 0x01
+ #define SFR_BULK_OUT_EFF_EN 0x02
+
+#define AQ_FW_VER_MAJOR 0xDA
+#define AQ_FW_VER_MINOR 0xDB
+#define AQ_FW_VER_REV 0xDC
+
+/*PHY_OPS**********************************************************************/
+
+#define AQ_ADV_100M BIT(0)
+#define AQ_ADV_1G BIT(1)
+#define AQ_ADV_2G5 BIT(2)
+#define AQ_ADV_5G BIT(3)
+#define AQ_ADV_MASK 0x0F
+
+#define AQ_PAUSE BIT(16)
+#define AQ_ASYM_PAUSE BIT(17)
+#define AQ_LOW_POWER BIT(18)
+#define AQ_PHY_POWER_EN BIT(19)
+#define AQ_WOL BIT(20)
+#define AQ_DOWNSHIFT BIT(21)
+
+#define AQ_DSH_RETRIES_SHIFT 0x18
+#define AQ_DSH_RETRIES_MASK 0xF000000
+
+#define AQ_WOL_FLAG_MP 0x2
+
+/******************************************************************************/
+
+struct aqc111_wol_cfg {
+ u8 hw_addr[6];
+ u8 flags;
+ u8 rsvd[283];
+} __packed;
+
+#define WOL_CFG_SIZE sizeof(struct aqc111_wol_cfg)
+
+struct aqc111_data {
+ u16 rxctl;
+ u8 rx_checksum;
+ u8 link_speed;
+ u8 link;
+ u8 autoneg;
+ u32 advertised_speed;
+ struct {
+ u8 major;
+ u8 minor;
+ u8 rev;
+ } fw_ver;
+ u32 phy_cfg;
+ u8 wol_flags;
+};
+
+#define AQ_LS_MASK 0x8000
+#define AQ_SPEED_MASK 0x7F00
+#define AQ_SPEED_SHIFT 0x0008
+#define AQ_INT_SPEED_5G 0x000F
+#define AQ_INT_SPEED_2_5G 0x0010
+#define AQ_INT_SPEED_1G 0x0011
+#define AQ_INT_SPEED_100M 0x0013
+
+/* TX Descriptor */
+#define AQ_TX_DESC_LEN_MASK 0x1FFFFF
+#define AQ_TX_DESC_DROP_PADD BIT(28)
+#define AQ_TX_DESC_VLAN BIT(29)
+#define AQ_TX_DESC_MSS_MASK 0x7FFF
+#define AQ_TX_DESC_MSS_SHIFT 0x20
+#define AQ_TX_DESC_VLAN_MASK 0xFFFF
+#define AQ_TX_DESC_VLAN_SHIFT 0x30
+
+#define AQ_RX_HW_PAD 0x02
+
+/* RX Packet Descriptor */
+#define AQ_RX_PD_L4_ERR BIT(0)
+#define AQ_RX_PD_L3_ERR BIT(1)
+#define AQ_RX_PD_L4_TYPE_MASK 0x1C
+#define AQ_RX_PD_L4_UDP 0x04
+#define AQ_RX_PD_L4_TCP 0x10
+#define AQ_RX_PD_L3_TYPE_MASK 0x60
+#define AQ_RX_PD_L3_IP 0x20
+#define AQ_RX_PD_L3_IP6 0x40
+
+#define AQ_RX_PD_VLAN BIT(10)
+#define AQ_RX_PD_RX_OK BIT(11)
+#define AQ_RX_PD_DROP BIT(31)
+#define AQ_RX_PD_LEN_MASK 0x7FFF0000
+#define AQ_RX_PD_LEN_SHIFT 0x10
+#define AQ_RX_PD_VLAN_SHIFT 0x20
+
+/* RX Descriptor header */
+#define AQ_RX_DH_PKT_CNT_MASK 0x1FFF
+#define AQ_RX_DH_DESC_OFFSET_MASK 0xFFFFE000
+#define AQ_RX_DH_DESC_OFFSET_SHIFT 0x0D
+
+static struct {
+ unsigned char ctrl;
+ unsigned char timer_l;
+ unsigned char timer_h;
+ unsigned char size;
+ unsigned char ifg;
+} AQC111_BULKIN_SIZE[] = {
+ /* xHCI & EHCI & OHCI */
+ {7, 0x00, 0x01, 0x1E, 0xFF},/* 10G, 5G, 2.5G, 1G */
+ {7, 0xA0, 0x00, 0x14, 0x00},/* 100M */
+ /* Jumbo packet */
+ {7, 0x00, 0x01, 0x18, 0xFF},
+};
+
+#endif /* __LINUX_USBNET_AQC111_H */
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5c42cf81a08b..b3b3c05903a1 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -562,6 +562,8 @@ static const struct driver_info wwan_info = {
#define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
#define TPLINK_VENDOR_ID 0x2357
+#define AQUANTIA_VENDOR_ID 0x2eca
+#define ASIX_VENDOR_ID 0x0b95
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -821,6 +823,30 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter(based on AQC111U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2790, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
+/* ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter(based on AQC112U) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2791, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 184c24baca15..d6916f787fce 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2807,6 +2807,12 @@ static int hso_get_config_data(struct usb_interface *interface)
return -EIO;
}
+ /* check if we have a valid interface */
+ if (if_num > 16) {
+ kfree(config_data);
+ return -EINVAL;
+ }
+
switch (config_data[if_num]) {
case 0x0:
result = 0;
@@ -2877,10 +2883,18 @@ static int hso_probe(struct usb_interface *interface,
/* Get the interface/port specification from either driver_info or from
* the device itself */
- if (id->driver_info)
+ if (id->driver_info) {
+ /* if_num is controlled by the device, driver_info is a 0 terminated
+ * array. Make sure, the access is in bounds! */
+ for (i = 0; i <= if_num; ++i)
+ if (((u32 *)(id->driver_info))[i] == 0)
+ goto exit;
port_spec = ((u32 *)(id->driver_info))[if_num];
- else
+ } else {
port_spec = hso_get_config_data(interface);
+ if (port_spec < 0)
+ goto exit;
+ }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 7275761a1177..3d8a70d3ea9b 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -140,7 +140,6 @@ struct ipheth_device {
struct usb_device *udev;
struct usb_interface *intf;
struct net_device *net;
- struct sk_buff *tx_skb;
struct urb *tx_urb;
struct urb *rx_urb;
unsigned char *tx_buf;
@@ -230,6 +229,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
+ case -EPROTO:
return;
case 0:
break;
@@ -281,7 +281,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
dev_err(&dev->intf->dev, "%s: urb status: %d\n",
__func__, status);
- dev_kfree_skb_irq(dev->tx_skb);
if (status == 0)
netif_wake_queue(dev->net);
else
@@ -423,7 +422,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
if (skb->len > IPHETH_BUF_SIZE) {
WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
dev->net->stats.tx_dropped++;
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -443,12 +442,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
__func__, retval);
dev->net->stats.tx_errors++;
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
} else {
- dev->tx_skb = skb;
-
dev->net->stats.tx_packets++;
dev->net->stats.tx_bytes += skb->len;
+ dev_consume_skb_any(skb);
netif_stop_queue(net);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index be1917be28f2..e96bc0c6140f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/linkmode.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -1586,18 +1587,17 @@ static int lan78xx_set_pause(struct net_device *net,
dev->fc_request_control |= FLOW_CTRL_TX;
if (ecmd.base.autoneg) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
u32 mii_adv;
- u32 advertising;
- ethtool_convert_link_mode_to_legacy_u32(
- &advertising, ecmd.link_modes.advertising);
-
- advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ecmd.link_modes.advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ecmd.link_modes.advertising);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
-
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd.link_modes.advertising, advertising);
+ mii_adv_to_linkmode_adv_t(fc, mii_adv);
+ linkmode_or(ecmd.link_modes.advertising, fc,
+ ecmd.link_modes.advertising);
phy_ethtool_ksettings_set(phydev, &ecmd);
}
@@ -2095,6 +2095,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
int ret;
u32 mii_adv;
struct phy_device *phydev;
@@ -2158,9 +2159,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
/* support both flow controls */
dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+ mii_adv_to_linkmode_adv_t(fc, mii_adv);
+ linkmode_or(phydev->advertising, fc, phydev->advertising);
if (phydev->mdio.dev.of_node) {
u32 reg;
@@ -2320,6 +2325,10 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
+ /* Added to support MAC address changes */
+ ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
+ ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
+
return 0;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 72a55b6b4211..774e1ff01c9a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -151,17 +151,18 @@ static bool qmimux_has_slaves(struct usbnet *dev)
static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
- unsigned int len, offset = sizeof(struct qmimux_hdr);
+ unsigned int len, offset = 0;
struct qmimux_hdr *hdr;
struct net_device *net;
struct sk_buff *skbn;
+ u8 qmimux_hdr_sz = sizeof(*hdr);
- while (offset < skb->len) {
- hdr = (struct qmimux_hdr *)skb->data;
+ while (offset + qmimux_hdr_sz < skb->len) {
+ hdr = (struct qmimux_hdr *)(skb->data + offset);
len = be16_to_cpu(hdr->pkt_len);
/* drop the packet, bogus length */
- if (offset + len > skb->len)
+ if (offset + len + qmimux_hdr_sz > skb->len)
return 0;
/* control packet, we do not know what to do */
@@ -176,7 +177,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
skbn->dev = net;
- switch (skb->data[offset] & 0xf0) {
+ switch (skb->data[offset + qmimux_hdr_sz] & 0xf0) {
case 0x40:
skbn->protocol = htons(ETH_P_IP);
break;
@@ -188,12 +189,12 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
goto skip;
}
- skb_put_data(skbn, skb->data + offset, len);
+ skb_put_data(skbn, skb->data + offset + qmimux_hdr_sz, len);
if (netif_rx(skbn) != NET_RX_SUCCESS)
return 0;
skip:
- offset += len + sizeof(struct qmimux_hdr);
+ offset += len + qmimux_hdr_sz;
}
return 1;
}
@@ -1117,6 +1118,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
{QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
+ {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
@@ -1229,6 +1231,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
@@ -1263,6 +1266,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
+ {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f1b5201cc320..60dd1ec1665f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -129,6 +129,7 @@
#define USB_UPS_CTRL 0xd800
#define USB_POWER_CUT 0xd80a
#define USB_MISC_0 0xd81a
+#define USB_MISC_1 0xd81f
#define USB_AFE_CTRL2 0xd824
#define USB_UPS_CFG 0xd842
#define USB_UPS_FLAGS 0xd848
@@ -555,6 +556,7 @@ enum spd_duplex {
/* MAC PASSTHRU */
#define AD_MASK 0xfee0
+#define BND_MASK 0x0004
#define EFUSE 0xcfdb
#define PASS_THRU_MASK 0x1
@@ -1150,7 +1152,7 @@ out1:
return ret;
}
-/* Devices containing RTL8153-AD can support a persistent
+/* Devices containing proper chips can support a persistent
* host system provided MAC address.
* Examples of this are Dell TB15 and Dell WD15 docks
*/
@@ -1165,13 +1167,23 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
/* test for -AD variant of RTL8153 */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
- if ((ocp_data & AD_MASK) != 0x1000)
- return -ENODEV;
-
- /* test for MAC address pass-through bit */
- ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
- if ((ocp_data & PASS_THRU_MASK) != 1)
- return -ENODEV;
+ if ((ocp_data & AD_MASK) == 0x1000) {
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1) {
+ netif_dbg(tp, probe, tp->netdev,
+ "No efuse for RTL8153-AD MAC pass through\n");
+ return -ENODEV;
+ }
+ } else {
+ /* test for RTL8153-BND */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1);
+ if ((ocp_data & BND_MASK) == 0) {
+ netif_dbg(tp, probe, tp->netdev,
+ "Invalid variant for MAC pass through\n");
+ return -ENODEV;
+ }
+ }
/* returns _AUXMAC_#AABBCCDDEEFF# */
status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
@@ -1217,9 +1229,8 @@ static int set_ethernet_addr(struct r8152 *tp)
if (tp->version == RTL_VER_01) {
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
} else {
- /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
- * or system doesn't provide valid _SB.AMAC this will be
- * be expected to non-zero
+ /* if device doesn't support MAC pass through this will
+ * be expected to be non-zero
*/
ret = vendor_mac_passthru_addr_read(tp, &sa);
if (ret < 0)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f2d01cb6f958..e3d08626828e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -618,9 +618,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
-
+ intdata = get_unaligned_le32(urb->transfer_buffer);
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT_)
@@ -1295,6 +1293,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
smsc95xx_init_mac_address(dev);
@@ -1933,8 +1932,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
unsigned char *packet;
u16 size;
- memcpy(&header, skb->data, sizeof(header));
- le32_to_cpus(&header);
+ header = get_unaligned_le32(skb->data);
skb_pull(skb, 4 + NET_IP_ALIGN);
packet = skb->data;
@@ -2011,12 +2009,30 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
return (high_16 << 16) | low_16;
}
+/* The TX CSUM won't work if the checksum lies in the last 4 bytes of the
+ * transmission. This is fairly unlikely, only seems to trigger with some
+ * short TCP ACK packets sent.
+ *
+ * Note, this calculation should probably check for the alignment of the
+ * data as well, but a straight check for csum being in the last four bytes
+ * of the packet should be ok for now.
+ */
+static bool smsc95xx_can_tx_checksum(struct sk_buff *skb)
+{
+ unsigned int len = skb->len - skb_checksum_start_offset(skb);
+
+ if (skb->len <= 45)
+ return false;
+ return skb->csum_offset < (len - (4 + 1));
+}
+
static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
struct sk_buff *skb, gfp_t flags)
{
bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
/* We do not advertise SG, so skbs should be already linearized */
BUG_ON(skb_shinfo(skb)->nr_frags);
@@ -2030,8 +2046,11 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
return NULL;
}
+ tx_cmd_b = (u32)skb->len;
+ tx_cmd_a = tx_cmd_b | TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+
if (csum) {
- if (skb->len <= 45) {
+ if (!smsc95xx_can_tx_checksum(skb)) {
/* workaround - hardware tx checksum does not work
* properly with extremely small packets */
long csstart = skb_checksum_start_offset(skb);
@@ -2043,24 +2062,18 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
csum = false;
} else {
u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
- skb_push(skb, 4);
- cpu_to_le32s(&csum_preamble);
- memcpy(skb->data, &csum_preamble, 4);
+ ptr = skb_push(skb, 4);
+ put_unaligned_le32(csum_preamble, ptr);
+
+ tx_cmd_a += 4;
+ tx_cmd_b += 4;
+ tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
}
}
- skb_push(skb, 4);
- tx_cmd_b = (u32)(skb->len - 4);
- if (csum)
- tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- tx_cmd_a = (u32)(skb->len - 8) | TX_CMD_A_FIRST_SEG_ |
- TX_CMD_A_LAST_SEG_;
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr+4);
return skb;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 890fa5b905e2..f412ea1cef18 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1253,7 +1253,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return PTR_ERR(net);
peer = rtnl_create_link(net, ifname, name_assign_type,
- &veth_link_ops, tbp);
+ &veth_link_ops, tbp, extack);
if (IS_ERR(peer)) {
put_net(net);
return PTR_ERR(peer);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index cecfd77c9f3c..023725086046 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -236,6 +236,7 @@ struct virtnet_info {
u32 speed;
unsigned long guest_offloads;
+ unsigned long guest_offloads_capable;
/* failover when STANDBY feature enabled */
struct failover *failover;
@@ -365,7 +366,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
- unsigned int len, unsigned int truesize)
+ unsigned int len, unsigned int truesize,
+ bool hdr_valid)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -387,7 +389,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- memcpy(hdr, p, hdr_len);
+ if (hdr_valid)
+ memcpy(hdr, p, hdr_len);
len -= hdr_len;
offset += hdr_padded_len;
@@ -739,7 +742,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct page *page = buf;
- struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+ struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
+ PAGE_SIZE, true);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -842,7 +846,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_unlock();
put_page(page);
head_skb = page_to_skb(vi, rq, xdp_page,
- offset, len, PAGE_SIZE);
+ offset, len,
+ PAGE_SIZE, false);
return head_skb;
}
break;
@@ -898,7 +903,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -2475,6 +2480,31 @@ static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
return 0;
}
+static int virtnet_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ u64 offloads;
+ int err;
+
+ if ((dev->features ^ features) & NETIF_F_LRO) {
+ if (vi->xdp_queue_pairs)
+ return -EBUSY;
+
+ if (features & NETIF_F_LRO)
+ offloads = vi->guest_offloads_capable;
+ else
+ offloads = 0;
+
+ err = virtnet_set_guest_offloads(vi, offloads);
+ if (err)
+ return err;
+ vi->guest_offloads = offloads;
+ }
+
+ return 0;
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -2489,6 +2519,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
+ .ndo_set_features = virtnet_set_features,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -2947,6 +2978,11 @@ static int virtnet_probe(struct virtio_device *vdev)
}
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
dev->features |= NETIF_F_RXCSUM;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
+ dev->features |= NETIF_F_LRO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
+ dev->hw_features |= NETIF_F_LRO;
dev->vlan_features = dev->features;
@@ -3076,6 +3112,7 @@ static int virtnet_probe(struct virtio_device *vdev)
for (i = 0; i < ARRAY_SIZE(guest_offloads); i++)
if (virtio_has_feature(vi->vdev, guest_offloads[i]))
set_bit(guest_offloads[i], &vi->guest_offloads);
+ vi->guest_offloads_capable = vi->guest_offloads;
pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
dev->name, max_queue_pairs);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 69b7227c637e..95909e262ba4 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -747,7 +747,8 @@ static int vrf_rtable_create(struct net_device *dev)
/**************************** device handling ********************/
/* cycle interface to flush neighbor cache and move routes across tables */
-static void cycle_netdev(struct net_device *dev)
+static void cycle_netdev(struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
unsigned int flags = dev->flags;
int ret;
@@ -755,9 +756,9 @@ static void cycle_netdev(struct net_device *dev)
if (!netif_running(dev))
return;
- ret = dev_change_flags(dev, flags & ~IFF_UP);
+ ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
if (ret >= 0)
- ret = dev_change_flags(dev, flags);
+ ret = dev_change_flags(dev, flags, extack);
if (ret < 0) {
netdev_err(dev,
@@ -785,7 +786,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
if (ret < 0)
goto err;
- cycle_netdev(port_dev);
+ cycle_netdev(port_dev, extack);
return 0;
@@ -815,7 +816,7 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
netdev_upper_dev_unlink(port_dev, dev);
port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
- cycle_netdev(port_dev);
+ cycle_netdev(port_dev, NULL);
return 0;
}
@@ -981,24 +982,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
int orig_iif = skb->skb_iif;
- bool need_strict;
+ bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ bool is_ndisc = ipv6_ndisc_frame(skb);
- /* loopback traffic; do not push through packet taps again.
- * Reset pkt_type for upper layers to process skb
+ /* loopback, multicast & non-ND link-local traffic; do not push through
+ * packet taps again. Reset pkt_type for upper layers to process skb
*/
- if (skb->pkt_type == PACKET_LOOPBACK) {
+ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
- skb->pkt_type = PACKET_HOST;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ skb->pkt_type = PACKET_HOST;
goto out;
}
- /* if packet is NDISC or addressed to multicast or link-local
- * then keep the ingress interface
- */
- need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
- if (!ipv6_ndisc_frame(skb) && !need_strict) {
+ /* if packet is NDISC then keep the ingress interface */
+ if (!is_ndisc) {
vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 297cdeaef479..5209ee9aac47 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -79,9 +79,11 @@ struct vxlan_fdb {
u8 eth_addr[ETH_ALEN];
u16 state; /* see ndm_state */
__be32 vni;
- u8 flags; /* see ndm_flags */
+ u16 flags; /* see ndm_flags and below */
};
+#define NTF_VXLAN_ADDED_BY_USER 0x100
+
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -186,7 +188,7 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
* and enabled unshareable flags.
*/
static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
- __be16 port, u32 flags)
+ __be16 port, u32 flags, int ifindex)
{
struct vxlan_sock *vs;
@@ -195,7 +197,8 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
if (inet_sk(vs->sock->sk)->inet_sport == port &&
vxlan_get_sk_family(vs) == family &&
- vs->flags == flags)
+ vs->flags == flags &&
+ vs->sock->sk->sk_bound_dev_if == ifindex)
return vs;
}
return NULL;
@@ -235,7 +238,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex,
{
struct vxlan_sock *vs;
- vs = vxlan_find_sock(net, family, port, flags);
+ vs = vxlan_find_sock(net, family, port, flags, ifindex);
if (!vs)
return NULL;
@@ -355,6 +358,23 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
+static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan,
+ const struct vxlan_fdb *fdb,
+ const struct vxlan_rdst *rd,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ fdb_info->info.dev = vxlan->dev;
+ fdb_info->info.extack = NULL;
+ fdb_info->remote_ip = rd->remote_ip;
+ fdb_info->remote_port = rd->remote_port;
+ fdb_info->remote_vni = rd->remote_vni;
+ fdb_info->remote_ifindex = rd->remote_ifindex;
+ memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN);
+ fdb_info->vni = fdb->vni;
+ fdb_info->offloaded = rd->offloaded;
+ fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER;
+}
+
static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
struct vxlan_fdb *fdb,
struct vxlan_rdst *rd,
@@ -368,31 +388,25 @@ static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan,
notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE
: SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE;
-
- info = (struct switchdev_notifier_vxlan_fdb_info){
- .remote_ip = rd->remote_ip,
- .remote_port = rd->remote_port,
- .remote_vni = rd->remote_vni,
- .remote_ifindex = rd->remote_ifindex,
- .vni = fdb->vni,
- .offloaded = rd->offloaded,
- };
- memcpy(info.eth_addr, fdb->eth_addr, ETH_ALEN);
-
+ vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, &info);
call_switchdev_notifiers(notifier_type, vxlan->dev,
&info.info);
}
static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
- struct vxlan_rdst *rd, int type)
+ struct vxlan_rdst *rd, int type, bool swdev_notify)
{
- switch (type) {
- case RTM_NEWNEIGH:
- vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, true);
- break;
- case RTM_DELNEIGH:
- vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, false);
- break;
+ if (swdev_notify) {
+ switch (type) {
+ case RTM_NEWNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+ true);
+ break;
+ case RTM_DELNEIGH:
+ vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd,
+ false);
+ break;
+ }
}
__vxlan_fdb_notify(vxlan, fdb, rd, type);
@@ -409,7 +423,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
.remote_vni = cpu_to_be32(VXLAN_N_VID),
};
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
}
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
@@ -421,7 +435,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
- vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
+ vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true);
}
/* Hash Ethernet address */
@@ -531,16 +545,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
}
rdst = first_remote_rcu(f);
-
- memset(fdb_info, 0, sizeof(*fdb_info));
- fdb_info->info.dev = dev;
- fdb_info->remote_ip = rdst->remote_ip;
- fdb_info->remote_port = rdst->remote_port;
- fdb_info->remote_vni = rdst->remote_vni;
- fdb_info->remote_ifindex = rdst->remote_ifindex;
- fdb_info->vni = vni;
- fdb_info->offloaded = rdst->offloaded;
- ether_addr_copy(fdb_info->eth_addr, mac);
+ vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, fdb_info);
out:
rcu_read_unlock();
@@ -548,6 +553,75 @@ out:
}
EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc);
+static int vxlan_fdb_notify_one(struct notifier_block *nb,
+ const struct vxlan_dev *vxlan,
+ const struct vxlan_fdb *f,
+ const struct vxlan_rdst *rdst)
+{
+ struct switchdev_notifier_vxlan_fdb_info fdb_info;
+ int rc;
+
+ vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, &fdb_info);
+ rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
+ &fdb_info);
+ return notifier_to_errno(rc);
+}
+
+int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb)
+{
+ struct vxlan_dev *vxlan;
+ struct vxlan_rdst *rdst;
+ struct vxlan_fdb *f;
+ unsigned int h;
+ int rc = 0;
+
+ if (!netif_is_vxlan(dev))
+ return -EINVAL;
+ vxlan = netdev_priv(dev);
+
+ spin_lock_bh(&vxlan->hash_lock);
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) {
+ if (f->vni == vni) {
+ list_for_each_entry(rdst, &f->remotes, list) {
+ rc = vxlan_fdb_notify_one(nb, vxlan,
+ f, rdst);
+ if (rc)
+ goto out;
+ }
+ }
+ }
+ }
+
+out:
+ spin_unlock_bh(&vxlan->hash_lock);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vxlan_fdb_replay);
+
+void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
+{
+ struct vxlan_dev *vxlan;
+ struct vxlan_rdst *rdst;
+ struct vxlan_fdb *f;
+ unsigned int h;
+
+ if (!netif_is_vxlan(dev))
+ return;
+ vxlan = netdev_priv(dev);
+
+ spin_lock_bh(&vxlan->hash_lock);
+ for (h = 0; h < FDB_HASH_SIZE; ++h) {
+ hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist)
+ if (f->vni == vni)
+ list_for_each_entry(rdst, &f->remotes, list)
+ rdst->offloaded = false;
+ }
+ spin_unlock_bh(&vxlan->hash_lock);
+}
+EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload);
+
/* Replace destination of unicast mac */
static int vxlan_fdb_replace(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port, __be32 vni,
@@ -568,6 +642,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
rd->remote_port = port;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
+ rd->offloaded = false;
return 1;
}
@@ -700,7 +775,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
const u8 *mac, __u16 state,
- __be32 src_vni, __u8 ndm_flags)
+ __be32 src_vni, __u16 ndm_flags)
{
struct vxlan_fdb *f;
@@ -720,7 +795,7 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __be16 port, __be32 src_vni,
- __be32 vni, __u32 ifindex, __u8 ndm_flags,
+ __be32 vni, __u32 ifindex, __u16 ndm_flags,
struct vxlan_fdb **fdb)
{
struct vxlan_rdst *rd = NULL;
@@ -756,9 +831,10 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
- __u32 ifindex, __u8 ndm_flags)
+ __u32 ifindex, __u16 ndm_flags,
+ bool swdev_notify)
{
- __u8 fdb_flags = (ndm_flags & ~NTF_USE);
+ __u16 fdb_flags = (ndm_flags & ~NTF_USE);
struct vxlan_rdst *rd = NULL;
struct vxlan_fdb *f;
int notify = 0;
@@ -771,16 +847,24 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
"lost race to create %pM\n", mac);
return -EEXIST;
}
- if (f->state != state) {
- f->state = state;
- f->updated = jiffies;
- notify = 1;
- }
- if (f->flags != fdb_flags) {
- f->flags = fdb_flags;
- f->updated = jiffies;
- notify = 1;
+
+ /* Do not allow an externally learned entry to take over an
+ * entry added by the user.
+ */
+ if (!(fdb_flags & NTF_EXT_LEARNED) ||
+ !(f->flags & NTF_VXLAN_ADDED_BY_USER)) {
+ if (f->state != state) {
+ f->state = state;
+ f->updated = jiffies;
+ notify = 1;
+ }
+ if (f->flags != fdb_flags) {
+ f->flags = fdb_flags;
+ f->updated = jiffies;
+ notify = 1;
+ }
}
+
if ((flags & NLM_F_REPLACE)) {
/* Only change unicasts */
if (!(is_multicast_ether_addr(f->eth_addr) ||
@@ -822,7 +906,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan,
if (notify) {
if (rd == NULL)
rd = first_remote_rtnl(f);
- vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify);
}
return 0;
@@ -841,7 +925,7 @@ static void vxlan_fdb_free(struct rcu_head *head)
}
static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
- bool do_notify)
+ bool do_notify, bool swdev_notify)
{
struct vxlan_rdst *rd;
@@ -851,7 +935,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
--vxlan->addrcnt;
if (do_notify)
list_for_each_entry(rd, &f->remotes, list)
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH,
+ swdev_notify);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -866,10 +951,10 @@ static void vxlan_dst_free(struct rcu_head *head)
}
static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
- struct vxlan_rdst *rd)
+ struct vxlan_rdst *rd, bool swdev_notify)
{
list_del_rcu(&rd->list);
- vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
+ vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify);
call_rcu(&rd->rcu, vxlan_dst_free);
}
@@ -968,7 +1053,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
spin_lock_bh(&vxlan->hash_lock);
err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
- port, src_vni, vni, ifindex, ndm->ndm_flags);
+ port, src_vni, vni, ifindex,
+ ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER,
+ true);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -977,7 +1064,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
__be16 port, __be32 src_vni, __be32 vni,
- u32 ifindex, u16 vid)
+ u32 ifindex, bool swdev_notify)
{
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
@@ -997,11 +1084,11 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
* otherwise destroy the fdb entry
*/
if (rd && !list_is_singular(&f->remotes)) {
- vxlan_fdb_dst_destroy(vxlan, f, rd);
+ vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify);
goto out;
}
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, swdev_notify);
out:
return 0;
@@ -1025,7 +1112,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
spin_lock_bh(&vxlan->hash_lock);
err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
- vid);
+ true);
spin_unlock_bh(&vxlan->hash_lock);
return err;
@@ -1066,6 +1153,39 @@ out:
return err;
}
+static int vxlan_fdb_get(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f;
+ __be32 vni;
+ int err;
+
+ if (tb[NDA_VNI])
+ vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI]));
+ else
+ vni = vxlan->default_dst.remote_vni;
+
+ rcu_read_lock();
+
+ f = __vxlan_find_mac(vxlan, addr, vni);
+ if (!f) {
+ NL_SET_ERR_MSG(extack, "Fdb entry not found");
+ err = -ENOENT;
+ goto errout;
+ }
+
+ err = vxlan_fdb_info(skb, vxlan, f, portid, seq,
+ RTM_NEWNEIGH, 0, first_remote_rcu(f));
+errout:
+ rcu_read_unlock();
+ return err;
+}
+
/* Watch incoming packets to learn mapping between Ethernet address
* and Tunnel endpoint.
* Return true if packet is bogus and should be dropped.
@@ -1103,7 +1223,7 @@ static bool vxlan_snoop(struct net_device *dev,
rdst->remote_ip = *src_ip;
f->updated = jiffies;
- vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true);
} else {
/* learned new entry */
spin_lock(&vxlan->hash_lock);
@@ -1116,7 +1236,7 @@ static bool vxlan_snoop(struct net_device *dev,
vxlan->cfg.dst_port,
vni,
vxlan->default_dst.remote_vni,
- ifindex, NTF_SELF);
+ ifindex, NTF_SELF, true);
spin_unlock(&vxlan->hash_lock);
}
@@ -1552,6 +1672,34 @@ drop:
return 0;
}
+/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
+static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *vs;
+ struct vxlanhdr *hdr;
+ __be32 vni;
+
+ if (skb->len < VXLAN_HLEN)
+ return -EINVAL;
+
+ hdr = vxlan_hdr(skb);
+
+ if (!(hdr->vx_flags & VXLAN_HF_VNI))
+ return -EINVAL;
+
+ vs = rcu_dereference_sk_user_data(sk);
+ if (!vs)
+ return -ENOENT;
+
+ vni = vxlan_vni(hdr->vx_vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ if (!vxlan)
+ return -ENOENT;
+
+ return 0;
+}
+
static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2240,6 +2388,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
__be16 df = 0;
+ if (!ifindex)
+ ifindex = sock4->sock->sk->sk_bound_dev_if;
+
rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos,
dst->sin.sin_addr.s_addr,
&local_ip.sin.sin_addr.s_addr,
@@ -2250,13 +2401,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- /* Bypass encapsulation if the destination is local */
if (!info) {
+ /* Bypass encapsulation if the destination is local */
err = encap_bypass_if_local(skb, dev, vxlan, dst,
dst_port, ifindex, vni,
&rt->dst, rt->rt_flags);
if (err)
goto out_unlock;
+
+ if (vxlan->cfg.df == VXLAN_DF_SET) {
+ df = htons(IP_DF);
+ } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
+ struct ethhdr *eth = eth_hdr(skb);
+
+ if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
+ (ntohs(eth->h_proto) == ETH_P_IP &&
+ old_iph->frag_off & htons(IP_DF)))
+ df = htons(IP_DF);
+ }
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
df = htons(IP_DF);
}
@@ -2278,6 +2440,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
} else {
struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock);
+ if (!ifindex)
+ ifindex = sock6->sock->sk->sk_bound_dev_if;
+
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos,
label, &dst->sin6.sin6_addr,
&local_ip.sin6.sin6_addr,
@@ -2461,7 +2626,7 @@ static void vxlan_cleanup(struct timer_list *t)
"garbage collect %pM\n",
f->eth_addr);
f->state = NUD_STALE;
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, true);
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
@@ -2512,7 +2677,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
spin_lock_bh(&vxlan->hash_lock);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, true);
spin_unlock_bh(&vxlan->hash_lock);
}
@@ -2566,7 +2731,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
- vxlan_fdb_destroy(vxlan, f, true);
+ vxlan_fdb_destroy(vxlan, f, true, true);
}
}
spin_unlock_bh(&vxlan->hash_lock);
@@ -2674,6 +2839,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_add = vxlan_fdb_add,
.ndo_fdb_del = vxlan_fdb_delete,
.ndo_fdb_dump = vxlan_fdb_dump,
+ .ndo_fdb_get = vxlan_fdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
@@ -2809,6 +2975,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
+ [IFLA_VXLAN_DF] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -2865,6 +3032,16 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_VXLAN_DF]) {
+ enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
+ if (df < 0 || df > VXLAN_DF_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
+ "Invalid DF attribute");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -2881,7 +3058,7 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
};
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
- __be16 port, u32 flags)
+ __be16 port, u32 flags, int ifindex)
{
struct socket *sock;
struct udp_port_cfg udp_conf;
@@ -2899,6 +3076,7 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
}
udp_conf.local_udp_port = port;
+ udp_conf.bind_ifindex = ifindex;
/* Open UDP socket */
err = udp_sock_create(net, &udp_conf, &sock);
@@ -2910,7 +3088,8 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
/* Create new listen socket if needed */
static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
- __be16 port, u32 flags)
+ __be16 port, u32 flags,
+ int ifindex)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_sock *vs;
@@ -2925,7 +3104,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
for (h = 0; h < VNI_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vs->vni_list[h]);
- sock = vxlan_create_sock(net, ipv6, port, flags);
+ sock = vxlan_create_sock(net, ipv6, port, flags, ifindex);
if (IS_ERR(sock)) {
kfree(vs);
return ERR_CAST(sock);
@@ -2948,6 +3127,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
+ tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
tunnel_cfg.encap_destroy = NULL;
tunnel_cfg.gro_receive = vxlan_gro_receive;
tunnel_cfg.gro_complete = vxlan_gro_complete;
@@ -2962,11 +3142,17 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
struct vxlan_sock *vs = NULL;
struct vxlan_dev_node *node;
+ int l3mdev_index = 0;
+
+ if (vxlan->cfg.remote_ifindex)
+ l3mdev_index = l3mdev_master_upper_ifindex_by_index(
+ vxlan->net, vxlan->cfg.remote_ifindex);
if (!vxlan->cfg.no_share) {
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
- vxlan->cfg.dst_port, vxlan->cfg.flags);
+ vxlan->cfg.dst_port, vxlan->cfg.flags,
+ l3mdev_index);
if (vs && !refcount_inc_not_zero(&vs->refcnt)) {
spin_unlock(&vn->sock_lock);
return -EBUSY;
@@ -2975,7 +3161,8 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
}
if (!vs)
vs = vxlan_socket_create(vxlan->net, ipv6,
- vxlan->cfg.dst_port, vxlan->cfg.flags);
+ vxlan->cfg.dst_port, vxlan->cfg.flags,
+ l3mdev_index);
if (IS_ERR(vs))
return PTR_ERR(vs);
#if IS_ENABLED(CONFIG_IPV6)
@@ -3258,6 +3445,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f = NULL;
+ bool unregister = false;
int err;
err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3283,22 +3471,29 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
err = register_netdevice(dev);
if (err)
goto errout;
+ unregister = true;
err = rtnl_configure_link(dev, NULL);
- if (err) {
- unregister_netdevice(dev);
+ if (err)
goto errout;
- }
/* notify default fdb entry */
if (f)
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH,
+ true);
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
+
errout:
+ /* unregister_netdevice() destroys the default FDB entry with deletion
+ * notification. But the addition notification was not sent yet, so
+ * destroy the entry by hand here.
+ */
if (f)
- vxlan_fdb_destroy(vxlan, f, false);
+ vxlan_fdb_destroy(vxlan, f, false, false);
+ if (unregister)
+ unregister_netdevice(dev);
return err;
}
@@ -3386,11 +3581,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->flags |= VXLAN_F_LEARN;
}
- if (data[IFLA_VXLAN_AGEING]) {
- if (changelink)
- return -EOPNOTSUPP;
+ if (data[IFLA_VXLAN_AGEING])
conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
- }
if (data[IFLA_VXLAN_PROXY]) {
if (changelink)
@@ -3509,6 +3701,9 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->mtu = nla_get_u32(tb[IFLA_MTU]);
}
+ if (data[IFLA_VXLAN_DF])
+ conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
return 0;
}
@@ -3532,9 +3727,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
+ unsigned long old_age_interval;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
- struct vxlan_fdb *f = NULL;
int err;
err = vxlan_nl2conf(tb, data,
@@ -3542,12 +3737,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
if (err)
return err;
+ old_age_interval = vxlan->cfg.age_interval;
memcpy(&old_dst, dst, sizeof(struct vxlan_rdst));
err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack);
if (err)
return err;
+ if (old_age_interval != vxlan->cfg.age_interval)
+ mod_timer(&vxlan->age_timer, jiffies);
+
/* handle default dst entry */
if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) {
spin_lock_bh(&vxlan->hash_lock);
@@ -3557,22 +3756,23 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
vxlan->cfg.dst_port,
old_dst.remote_vni,
old_dst.remote_vni,
- old_dst.remote_ifindex, 0);
+ old_dst.remote_ifindex,
+ true);
if (!vxlan_addr_any(&dst->remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ err = vxlan_fdb_update(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_APPEND | NLM_F_CREATE,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
- NTF_SELF, &f);
+ NTF_SELF, true);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
}
spin_unlock_bh(&vxlan->hash_lock);
}
@@ -3601,6 +3801,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
@@ -3667,6 +3868,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
!!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
+ nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
@@ -3749,7 +3951,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
+ &vxlan_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
@@ -3844,18 +4046,89 @@ out:
spin_unlock_bh(&vxlan->hash_lock);
}
+static int
+vxlan_fdb_external_learn_add(struct net_device *dev,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ spin_lock_bh(&vxlan->hash_lock);
+ err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip,
+ NUD_REACHABLE,
+ NLM_F_CREATE | NLM_F_REPLACE,
+ fdb_info->remote_port,
+ fdb_info->vni,
+ fdb_info->remote_vni,
+ fdb_info->remote_ifindex,
+ NTF_USE | NTF_SELF | NTF_EXT_LEARNED,
+ false);
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ return err;
+}
+
+static int
+vxlan_fdb_external_learn_del(struct net_device *dev,
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f;
+ int err = 0;
+
+ spin_lock_bh(&vxlan->hash_lock);
+
+ f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni);
+ if (!f)
+ err = -ENOENT;
+ else if (f->flags & NTF_EXT_LEARNED)
+ err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr,
+ fdb_info->remote_ip,
+ fdb_info->remote_port,
+ fdb_info->vni,
+ fdb_info->remote_vni,
+ fdb_info->remote_ifindex,
+ false);
+
+ spin_unlock_bh(&vxlan->hash_lock);
+
+ return err;
+}
+
static int vxlan_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_vxlan_fdb_info *fdb_info;
+ int err = 0;
switch (event) {
case SWITCHDEV_VXLAN_FDB_OFFLOADED:
vxlan_fdb_offloaded_set(dev, ptr);
break;
+ case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE:
+ fdb_info = ptr;
+ err = vxlan_fdb_external_learn_add(dev, fdb_info);
+ if (err) {
+ err = notifier_from_errno(err);
+ break;
+ }
+ fdb_info->offloaded = true;
+ vxlan_fdb_offloaded_set(dev, fdb_info);
+ break;
+ case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE:
+ fdb_info = ptr;
+ err = vxlan_fdb_external_learn_del(dev, fdb_info);
+ if (err) {
+ err = notifier_from_errno(err);
+ break;
+ }
+ fdb_info->offloaded = false;
+ vxlan_fdb_offloaded_set(dev, fdb_info);
+ break;
}
- return 0;
+ return err;
}
static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 4d6409605207..7a42336c8af8 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -391,6 +391,7 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return -ENOMEM;
}
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&priv->lock, flags);
/* Start from the next BD that should be filled */
@@ -447,6 +448,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
{
/* Start from the next BD that should be filled */
struct net_device *dev = priv->ndev;
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
struct qe_bd *bd; /* BD pointer */
u16 bd_status;
int tx_restart = 0;
@@ -474,6 +477,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
skb = priv->tx_skbuff[priv->skb_dirtytx];
if (!skb)
break;
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
@@ -501,6 +506,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
if (tx_restart)
hdlc_tx_restart(priv);
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -721,6 +727,7 @@ static int uhdlc_open(struct net_device *dev)
priv->hdlc_busy = 1;
netif_device_attach(priv->ndev);
napi_enable(&priv->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
}
@@ -812,6 +819,7 @@ static int uhdlc_close(struct net_device *dev)
free_irq(priv->ut_info->uf_info.irq, priv);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
priv->hdlc_busy = 0;
return 0;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 166920ae23f8..8c456a66ac3b 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -114,4 +114,11 @@ config USB_NET_RNDIS_WLAN
If you choose to build a module, it'll be called rndis_wlan.
+config VIRT_WIFI
+ tristate "Wifi wrapper for ethernet drivers"
+ depends on CFG80211
+ ---help---
+ This option adds support for ethernet connections to appear as if they
+ are wifi connections through a special rtnetlink device.
+
endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7fc96306712a..6cfe74515c95 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -27,3 +27,5 @@ obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
+
+obj-$(CONFIG_VIRT_WIFI) += virt_wifi.o
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index e1ad6b9166a6..a7fb5441ced4 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -47,8 +47,7 @@ config ATH10K_SNOC
select QCOM_QMI_HELPERS
---help---
This module adds support for integrated WCN3990 chip connected
- to system NOC(SNOC). Currently work in progress and will not
- fully work.
+ to system NOC(SNOC).
config ATH10K_DEBUG
bool "Atheros ath10k debugging"
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index da607febfd82..399b501f3c3c 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -561,6 +561,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+ .n_cipher_suites = 8,
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
@@ -594,6 +595,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
+ [ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@@ -2183,6 +2185,8 @@ static void ath10k_core_restart(struct work_struct *work)
if (ret)
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
ret);
+
+ complete(&ar->driver_recovery);
}
static void ath10k_core_set_coverage_class_work(struct work_struct *work)
@@ -2418,6 +2422,28 @@ static int ath10k_core_reset_rx_filter(struct ath10k *ar)
return 0;
}
+static int ath10k_core_compat_services(struct ath10k *ar)
+{
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ /* all 10.x firmware versions support thermal throttling but don't
+ * advertise the support via service flags so we have to hardcode
+ * it here
+ */
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
const struct ath10k_fw_components *fw)
{
@@ -2617,6 +2643,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_core_compat_services(ar);
+ if (status) {
+ ath10k_err(ar, "compat services failed: %d\n", status);
+ goto err_hif_stop;
+ }
+
/* Some firmware revisions do not properly set up hardware rx filter
* registers.
*
@@ -3046,6 +3078,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel);
init_completion(&ar->target_suspend);
+ init_completion(&ar->driver_recovery);
init_completion(&ar->wow.wakeup_completed);
init_completion(&ar->install_key_done);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 042418097cf9..46e9c8c97a4d 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -474,6 +474,7 @@ struct ath10k_htt_data_stats {
u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
+ u64 rate_table[ATH10K_COUNTER_TYPE_MAX][ATH10K_RATE_TABLE_NUM];
};
struct ath10k_htt_tx_stats {
@@ -493,6 +494,7 @@ struct ath10k_sta {
u32 smps;
u16 peer_id;
struct rate_info txrate;
+ struct ieee80211_tx_info tx_info;
struct work_struct update_wk;
u64 rx_duration;
@@ -760,6 +762,9 @@ enum ath10k_fw_features {
/* Firmware load is done externally, not by bmi */
ATH10K_FW_FEATURE_NON_BMI = 19,
+ /* Firmware sends only one chan_info event per channel */
+ ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL = 20,
+
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@@ -960,6 +965,7 @@ struct ath10k {
} hif;
struct completion target_suspend;
+ struct completion driver_recovery;
const struct ath10k_hw_regs *regs;
const struct ath10k_hw_ce_regs *hw_ce_regs;
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 4d28063052fe..eadae2f9206b 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -867,9 +867,105 @@ static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
},
};
+static const struct ath10k_mem_section ipq4019_soc_reg_range[] = {
+ {0x080000, 0x080004},
+ {0x080020, 0x080024},
+ {0x080028, 0x080050},
+ {0x0800d4, 0x0800ec},
+ {0x08010c, 0x080118},
+ {0x080284, 0x080290},
+ {0x0802a8, 0x0802b8},
+ {0x0802dc, 0x08030c},
+ {0x082000, 0x083fff}
+};
+
+static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x68000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0xC0000,
+ .len = 0x40000,
+ .name = "SRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x98000,
+ .len = 0x50000,
+ .name = "IRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x30000,
+ .len = 0x7000,
+ .name = "APB REG 1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x3f000,
+ .len = 0x3000,
+ .name = "APB REG 2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x43000,
+ .len = 0x3000,
+ .name = "WIFI REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x4A000,
+ .len = 0x5000,
+ .name = "CE REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x080000,
+ .len = 0x083fff - 0x080000,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = ipq4019_soc_reg_range,
+ .size = ARRAY_SIZE(ipq4019_soc_reg_range),
+ },
+ },
+};
+
static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -877,6 +973,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA6174_HW_1_1_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -884,6 +981,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA6174_HW_1_3_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -891,6 +989,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA6174_HW_2_1_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
.region_table = {
.regions = qca6174_hw21_mem_regions,
.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
@@ -898,6 +997,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA6174_HW_3_0_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -905,6 +1005,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA6174_HW_3_2_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -912,6 +1013,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA9377_HW_1_1_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA9377,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -919,6 +1021,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA988X_HW_2_0_VERSION,
+ .hw_rev = ATH10K_HW_QCA988X,
.region_table = {
.regions = qca988x_hw20_mem_regions,
.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
@@ -926,6 +1029,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA9984_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA9984,
.region_table = {
.regions = qca9984_hw10_mem_regions,
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
@@ -933,6 +1037,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA9888_HW_2_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA9888,
.region_table = {
.regions = qca9984_hw10_mem_regions,
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
@@ -940,12 +1045,20 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
},
{
.hw_id = QCA99X0_HW_2_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA99X0,
.region_table = {
.regions = qca99x0_hw20_mem_regions,
.size = ARRAY_SIZE(qca99x0_hw20_mem_regions),
},
},
-
+ {
+ .hw_id = QCA4019_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA4019,
+ .region_table = {
+ .regions = qca4019_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca4019_hw10_mem_regions),
+ },
+ },
};
static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
@@ -987,7 +1100,8 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k
return NULL;
for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
- if (ar->target_version == hw_mem_layouts[i].hw_id)
+ if (ar->target_version == hw_mem_layouts[i].hw_id &&
+ ar->hw_rev == hw_mem_layouts[i].hw_rev)
return &hw_mem_layouts[i];
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 3baaf9d2cbcd..5dac653e1649 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -165,6 +165,7 @@ struct ath10k_mem_region {
*/
struct ath10k_hw_mem_layout {
u32 hw_id;
+ u32 hw_rev;
struct {
const struct ath10k_mem_region *regions;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 15964b374f68..02988fc378a1 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -2578,8 +2578,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
&fops_pktlog_filter);
- debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
- &fops_quiet_period);
+ if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+ &fops_quiet_period);
debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
&fops_tpc_stats);
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index b09cdc699c69..4778a455d81a 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
- if (!peer)
+ if (!peer || !peer->sta)
goto out;
arsta = (struct ath10k_sta *)peer->sta->drv_priv;
@@ -665,7 +665,7 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
"retry", "ampdu"};
const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
int len = 0, i, j, k, retval = 0;
- const int size = 2 * 4096;
+ const int size = 16 * 4096;
char *buf;
buf = kzalloc(size, GFP_KERNEL);
@@ -719,6 +719,16 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
len += scnprintf(buf + len, size - len, "%llu ",
stats->legacy[j][i]);
len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " Rate table %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH10K_RATE_TABLE_NUM; i++) {
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->rate_table[j][i]);
+ if (!((i + 1) % 8))
+ len +=
+ scnprintf(buf + len, size - len, "\n ");
+ }
}
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ffec98f7be50..f42bac204ef8 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -469,6 +469,166 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
return msdu;
}
+static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
+ struct sk_buff *frag_list,
+ unsigned int frag_len)
+{
+ skb_shinfo(skb_head)->frag_list = frag_list;
+ skb_head->data_len = frag_len;
+ skb_head->len += skb_head->data_len;
+}
+
+static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
+ struct sk_buff *msdu,
+ struct htt_rx_in_ord_msdu_desc **msdu_desc)
+{
+ struct ath10k *ar = htt->ar;
+ u32 paddr;
+ struct sk_buff *frag_buf;
+ struct sk_buff *prev_frag_buf;
+ u8 last_frag;
+ struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
+ struct htt_rx_desc *rxd;
+ int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
+
+ rxd = (void *)msdu->data;
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(struct htt_rx_desc));
+ skb_pull(msdu, sizeof(struct htt_rx_desc));
+ skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ amsdu_len -= msdu->len;
+
+ last_frag = ind_desc->reserved;
+ if (last_frag) {
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len),
+ amsdu_len);
+ }
+ return 0;
+ }
+
+ ind_desc++;
+ paddr = __le32_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
+
+ amsdu_len -= frag_buf->len;
+ prev_frag_buf = frag_buf;
+ last_frag = ind_desc->reserved;
+ while (!last_frag) {
+ ind_desc++;
+ paddr = __le32_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
+ paddr);
+ prev_frag_buf->next = NULL;
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ last_frag = ind_desc->reserved;
+ amsdu_len -= frag_buf->len;
+
+ prev_frag_buf->next = frag_buf;
+ prev_frag_buf = frag_buf;
+ }
+
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
+ }
+
+ *msdu_desc = ind_desc;
+
+ prev_frag_buf->next = NULL;
+ return 0;
+}
+
+static int
+ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
+ struct sk_buff *msdu,
+ struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
+{
+ struct ath10k *ar = htt->ar;
+ u64 paddr;
+ struct sk_buff *frag_buf;
+ struct sk_buff *prev_frag_buf;
+ u8 last_frag;
+ struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
+ struct htt_rx_desc *rxd;
+ int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
+
+ rxd = (void *)msdu->data;
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(struct htt_rx_desc));
+ skb_pull(msdu, sizeof(struct htt_rx_desc));
+ skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ amsdu_len -= msdu->len;
+
+ last_frag = ind_desc->reserved;
+ if (last_frag) {
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len),
+ amsdu_len);
+ }
+ return 0;
+ }
+
+ ind_desc++;
+ paddr = __le64_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
+
+ amsdu_len -= frag_buf->len;
+ prev_frag_buf = frag_buf;
+ last_frag = ind_desc->reserved;
+ while (!last_frag) {
+ ind_desc++;
+ paddr = __le64_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
+ paddr);
+ prev_frag_buf->next = NULL;
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ last_frag = ind_desc->reserved;
+ amsdu_len -= frag_buf->len;
+
+ prev_frag_buf->next = frag_buf;
+ prev_frag_buf = frag_buf;
+ }
+
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
+ }
+
+ *msdu_desc = ind_desc;
+
+ prev_frag_buf->next = NULL;
+ return 0;
+}
+
static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
@@ -477,7 +637,7 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
- int msdu_count;
+ int msdu_count, ret;
bool is_offload;
u32 paddr;
@@ -495,6 +655,18 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
return -ENOENT;
}
+ if (!is_offload && ar->monitor_arvif) {
+ ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
+ &msdu_desc);
+ if (ret) {
+ __skb_queue_purge(list);
+ return ret;
+ }
+ __skb_queue_tail(list, msdu);
+ msdu_desc++;
+ continue;
+ }
+
__skb_queue_tail(list, msdu);
if (!is_offload) {
@@ -527,7 +699,7 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
- int msdu_count;
+ int msdu_count, ret;
bool is_offload;
u64 paddr;
@@ -544,6 +716,18 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
return -ENOENT;
}
+ if (!is_offload && ar->monitor_arvif) {
+ ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
+ &msdu_desc);
+ if (ret) {
+ __skb_queue_purge(list);
+ return ret;
+ }
+ __skb_queue_tail(list, msdu);
+ msdu_desc++;
+ continue;
+ }
+
__skb_queue_tail(list, msdu);
if (!is_offload) {
@@ -1159,7 +1343,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
enum htt_rx_mpdu_encrypt_type enctype,
- bool is_decrypted)
+ bool is_decrypted,
+ const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
@@ -1167,6 +1352,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
size_t crypto_len;
bool is_first;
bool is_last;
+ bool msdu_limit_err;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+ u8 *qos;
rxd = (void *)msdu->data - sizeof(*rxd);
is_first = !!(rxd->msdu_end.common.info0 &
@@ -1184,16 +1372,45 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
* [FCS] <-- at end, needs to be trimmed
*/
+ /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
+ * deaggregate, so that unwanted MSDU-deaggregation is avoided for
+ * error packets. If limit exceeds, hw sends all remaining MSDUs as
+ * a single last MSDU with this msdu limit error set.
+ */
+ msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
+
+ /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
+ * without first MSDU is expected in that case, and handled later here.
+ */
/* This probably shouldn't happen but warn just in case */
- if (WARN_ON_ONCE(!is_first))
+ if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
return;
/* This probably shouldn't happen but warn just in case */
- if (WARN_ON_ONCE(!(is_first && is_last)))
+ if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
return;
skb_trim(msdu, msdu->len - FCS_LEN);
+ /* Push original 80211 header */
+ if (unlikely(msdu_limit_err)) {
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ if (crypto_len)
+ memcpy(skb_push(msdu, crypto_len),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ crypto_len);
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+ }
+
/* In most cases this will be true for sniffed frames. It makes sense
* to deliver them as-is without stripping the crypto param. This is
* necessary for software based decryption.
@@ -1467,7 +1684,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
switch (decap) {
case RX_MSDU_DECAP_RAW:
ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
- is_decrypted);
+ is_decrypted, first_hdr);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
@@ -2627,7 +2844,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
-static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
+static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
{
static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
18, 24, 36, 48, 54};
@@ -2646,11 +2863,11 @@ static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_sta *arsta,
struct ath10k_per_peer_tx_stats *pstats,
- u8 legacy_rate_idx)
+ s8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath10k_htt_tx_stats *tx_stats;
- int ht_idx, gi, mcs, bw, nss;
+ int idx, ht_idx, gi, mcs, bw, nss;
if (!arsta->tx_stats)
return;
@@ -2661,6 +2878,8 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
mcs = txrate->mcs;
bw = txrate->bw;
nss = txrate->nss;
+ idx = mcs * 8 + 8 * 10 * nss;
+ idx += bw * 2 + gi;
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
@@ -2709,12 +2928,16 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
+ pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
pstats->succ_pkts + pstats->retry_pkts;
+ STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
+ pstats->succ_pkts + pstats->retry_pkts;
} else {
tx_stats->ack_fails +=
ATH10K_HW_BA_FAIL(pstats->flags);
@@ -2743,6 +2966,15 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
+
+ if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
+ }
}
static void
@@ -2751,8 +2983,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_chanctx_conf *conf = NULL;
u8 rate = 0, sgi;
s8 rate_idx = 0;
+ bool skip_auto_rate;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
@@ -2762,6 +2996,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
sgi = ATH10K_HW_GI(peer_stats->flags);
+ skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
+
+ /* Firmware's rate control skips broadcast/management frames,
+ * if host has configure fixed rates and in some other special cases.
+ */
+ if (skip_auto_rate)
+ return;
if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
@@ -2776,7 +3017,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
}
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
-
+ memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
@@ -2795,11 +3036,59 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
arsta->txrate.mcs = txrate.mcs;
}
- if (sgi)
- arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ switch (txrate.flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ if (arsta->arvif && arsta->arvif->vif)
+ conf = rcu_dereference(arsta->arvif->vif->chanctx_conf);
+ if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
+ arsta->tx_info.status.rates[0].idx = rate_idx - 4;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->tx_info.status.rates[0].idx = rate_idx;
+ if (sgi)
+ arsta->tx_info.status.rates[0].flags |=
+ (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
+ IEEE80211_TX_RC_SHORT_GI);
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->tx_info.status.rates[0].idx =
+ txrate.mcs + ((txrate.nss - 1) * 8);
+ if (sgi)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
+ txrate.mcs, txrate.nss);
+ if (sgi)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
+ break;
+ }
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (arsta->txrate.bw) {
+ case RATE_INFO_BW_40:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_INFO_BW_80:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ }
+
+ if (peer_stats->succ_pkts) {
+ arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
+ arsta->tx_info.status.rates[0].count = 1;
+ ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
+ }
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
@@ -2832,7 +3121,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
- if (!peer) {
+ if (!peer || !peer->sta) {
ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
peer_id);
goto out;
@@ -2885,7 +3174,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
- if (!peer) {
+ if (!peer || !peer->sta) {
ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
peer_id);
goto out;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index af8ae8117c62..61ecf931ba4d 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -1119,8 +1119,15 @@ static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
RX_MSDU_END_INFO1_L3_HDR_PAD);
}
+static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+{
+ return !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
+}
+
const struct ath10k_hw_ops qca99x0_ops = {
.rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
};
const struct ath10k_hw_ops qca6174_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 1b5da272d18c..e50a8dc5b093 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -624,6 +624,7 @@ struct ath10k_hw_ops {
int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
+ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
};
extern const struct ath10k_hw_ops qca988x_ops;
@@ -642,6 +643,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
return 0;
}
+static inline bool
+ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
+ struct htt_rx_desc *rxd)
+{
+ if (hw->hw_ops->rx_desc_get_msdu_limit_error)
+ return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd);
+ return false;
+}
+
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 7e49342bae38..e49b36752ba2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -22,6 +22,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <linux/acpi.h>
+#include <linux/of.h>
#include "hif.h"
#include "core.h"
@@ -4637,11 +4638,44 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
return ret;
}
+static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
+ struct wmi_bb_timing_cfg_arg *bb_timing)
+{
+ struct device_node *node;
+ const char *fem_name;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name);
+ if (ret)
+ return -ENOENT;
+
+ /*
+ * If external Front End module used in hardware, then default base band timing
+ * parameter cannot be used since they were fine tuned for reference hardware,
+ * so choosing different value suitable for that external FEM.
+ */
+ if (!strcmp("microsemi-lx5586", fem_name)) {
+ bb_timing->bb_tx_timing = 0x00;
+ bb_timing->bb_xpa_timing = 0x0101;
+ } else {
+ return -ENOENT;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
+ bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing);
+ return 0;
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
u32 param;
int ret = 0;
+ struct wmi_bb_timing_cfg_arg bb_timing = {0};
/*
* This makes sense only when restarting hw. It is harmless to call
@@ -4796,6 +4830,19 @@ static int ath10k_start(struct ieee80211_hw *hw)
clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
}
+ if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) {
+ ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing);
+ if (!ret) {
+ ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set bb timings: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+ }
+
ar->num_started_vdevs = 0;
ath10k_regd_update(ar);
@@ -5154,6 +5201,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
+ if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ ar->wmi.svc_map)) {
+ vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ WMI_VDEV_DISABLE_4_ADDR_SRC_LRN);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
@@ -5754,30 +5812,6 @@ static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
return data.num_tdls_stations;
}
-static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct ath10k_vif *arvif = (void *)vif->drv_priv;
- int *num_tdls_vifs = data;
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return;
-
- if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
- (*num_tdls_vifs)++;
-}
-
-static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
-{
- int num_tdls_vifs = 0;
-
- ieee80211_iterate_active_interfaces_atomic(hw,
- IEEE80211_IFACE_ITER_NORMAL,
- ath10k_mac_tdls_vifs_count_iter,
- &num_tdls_vifs);
- return num_tdls_vifs;
-}
-
static int ath10k_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
@@ -6285,7 +6319,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
*/
enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
u32 num_tdls_stations;
- u32 num_tdls_vifs;
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
@@ -6293,15 +6326,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ar->num_stations + 1, ar->max_num_stations,
ar->num_peers + 1, ar->max_num_peers);
- if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
- arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
- GFP_KERNEL);
- if (!arsta->tx_stats)
- goto exit;
- }
-
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
- num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
if (sta->tdls) {
if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
@@ -6321,12 +6346,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
goto exit;
}
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
+ GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
sta->addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
goto exit;
}
@@ -6339,6 +6374,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
ret = -ENOENT;
goto exit;
}
@@ -6359,6 +6395,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_peer_delete(ar, arvif->vdev_id,
sta->addr);
ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
goto exit;
}
@@ -6370,6 +6407,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
sta->addr, arvif->vdev_id, ret);
ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
if (num_tdls_stations != 0)
goto exit;
@@ -6385,9 +6423,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
arvif->vdev_id, sta->addr, sta);
- if (ath10k_debug_is_extd_tx_stats_enabled(ar))
- kfree(arsta->tx_stats);
-
if (sta->tdls) {
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
sta,
@@ -6427,6 +6462,11 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
}
spin_unlock_bh(&ar->data_lock);
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+ }
+
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
ath10k_mac_txq_unref(ar, sta->txq[i]);
@@ -8313,7 +8353,6 @@ static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
{
- struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
acpi_handle root_handle;
acpi_handle handle;
struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
@@ -8321,7 +8360,7 @@ static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
u32 alpha2_code;
char alpha2[3];
- root_handle = ACPI_HANDLE(&pdev->dev);
+ root_handle = ACPI_HANDLE(ar->dev);
if (!root_handle)
return -EOPNOTSUPP;
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 56cb1831dcdf..37b3bd629f48 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -543,7 +543,7 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
goto out;
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
- ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error);
+ ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
ret = -EINVAL;
goto out;
}
@@ -623,7 +623,7 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
goto out;
}
- ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n");
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
return 0;
out:
@@ -657,7 +657,7 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
wlfw_ind_register_req_msg_v01_ei, &req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- ath10k_err(ar, "failed to send indication registed request: %d\n", ret);
+ ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
goto out;
}
@@ -931,9 +931,9 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
qmi->msa_mem_size = resource_size(&r);
qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
MEMREMAP_WT);
- if (!qmi->msa_pa) {
+ if (IS_ERR(qmi->msa_va)) {
dev_err(dev, "failed to map memory region: %pa\n", &r.start);
- return -EBUSY;
+ return PTR_ERR(qmi->msa_va);
}
} else {
qmi->msa_va = dmam_alloc_coherent(dev, msa_size,
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index 310674de3cb8..dfbfe674e11e 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -572,6 +572,7 @@ struct rx_msdu_start {
#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
#define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14)
#define RX_MSDU_END_INFO0_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO0_MSDU_LIMIT_ERR BIT(18)
#define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30)
#define RX_MSDU_END_INFO0_RESERVED_3B BIT(31)
@@ -676,6 +677,12 @@ struct rx_msdu_end {
* Indicates the last MSDU of the A-MSDU. MPDU end status is
* only valid when last_msdu is set.
*
+ *msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and
+ * will not be decapsulated but will be received in RAW format
+ * as a single MSDU buffer.
+ *
*reserved_3a
* Reserved: HW should fill with zero. FW should ignore.
*
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 8d3d9bca410f..54efe6be8f1d 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -46,14 +46,14 @@ static char *const ce_name[] = {
"WLAN_CE_11",
};
-static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
- {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
- {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
- {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
- {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+static struct ath10k_vreg_info vreg_cfg[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
};
-static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+static struct ath10k_clk_info clk_cfg[] = {
{NULL, "cxo_ref_clk_pin", 0, false},
};
@@ -474,14 +474,14 @@ static struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
-void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
+static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
iowrite32(value, ar_snoc->mem + offset);
}
-u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
+static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
u32 val;
@@ -918,7 +918,9 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
static void ath10k_snoc_hif_stop(struct ath10k *ar)
{
- ath10k_snoc_irq_disable(ar);
+ if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ath10k_snoc_irq_disable(ar);
+
napi_synchronize(&ar->napi);
napi_disable(&ar->napi);
ath10k_snoc_buffer_cleanup(ar);
@@ -927,10 +929,14 @@ static void ath10k_snoc_hif_stop(struct ath10k *ar)
static int ath10k_snoc_hif_start(struct ath10k *ar)
{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
+ clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
+
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
return 0;
@@ -994,7 +1000,8 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
{
- ath10k_qmi_wlan_disable(ar);
+ if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ath10k_qmi_wlan_disable(ar);
}
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
@@ -1091,6 +1098,11 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
int done = 0;
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+ napi_complete(ctx);
+ return done;
+ }
+
ath10k_ce_per_engine_service_any(ar);
done = ath10k_htt_txrx_compl_task(ar, budget);
@@ -1187,17 +1199,29 @@ int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
struct ath10k_bus_params bus_params;
int ret;
+ if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
+ return 0;
+
switch (type) {
case ATH10K_QMI_EVENT_FW_READY_IND:
+ if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
+ queue_work(ar->workqueue, &ar->restart_work);
+ break;
+ }
+
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ar_snoc->target_info.soc_version;
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
- ath10k_err(ar, "failed to register driver core: %d\n",
+ ath10k_err(ar, "Failed to register driver core: %d\n",
ret);
+ return ret;
}
+ set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
break;
case ATH10K_QMI_EVENT_FW_DOWN_IND:
+ set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
break;
default:
ath10k_err(ar, "invalid fw indication: %llx\n", type);
@@ -1246,7 +1270,7 @@ static void ath10k_snoc_release_resource(struct ath10k *ar)
}
static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
- struct ath10k_wcn3990_vreg_info *vreg_info)
+ struct ath10k_vreg_info *vreg_info)
{
struct regulator *reg;
int ret = 0;
@@ -1284,7 +1308,7 @@ done:
}
static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
- struct ath10k_wcn3990_clk_info *clk_info)
+ struct ath10k_clk_info *clk_info)
{
struct clk *handle;
int ret = 0;
@@ -1311,10 +1335,80 @@ static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
return ret;
}
-static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+static int __ath10k_snoc_vreg_on(struct ath10k *ar,
+ struct ath10k_vreg_info *vreg_info)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v);
+ return ret;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
+ if (ret < 0) {
+ ath10k_err(ar, "failed to set regulator %s load: %d\n",
+ vreg_info->name, vreg_info->load_ua);
+ goto err_set_load;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ ath10k_err(ar, "failed to enable regulator %s\n",
+ vreg_info->name);
+ goto err_enable;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+
+ return 0;
+
+err_enable:
+ regulator_set_load(vreg_info->reg, 0);
+err_set_load:
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+
+ return ret;
+}
+
+static int __ath10k_snoc_vreg_off(struct ath10k *ar,
+ struct ath10k_vreg_info *vreg_info)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ ath10k_err(ar, "failed to disable regulator %s\n",
+ vreg_info->name);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ if (ret)
+ ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
+
+ return ret;
+}
+
+static int ath10k_snoc_vreg_on(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_wcn3990_vreg_info *vreg_info;
+ struct ath10k_vreg_info *vreg_info;
int ret = 0;
int i;
@@ -1324,62 +1418,30 @@ static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
if (!vreg_info->reg)
continue;
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
- vreg_info->name);
-
- ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
- vreg_info->max_v);
- if (ret) {
- ath10k_err(ar,
- "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
- vreg_info->name, vreg_info->min_v, vreg_info->max_v);
- goto err_reg_config;
- }
-
- if (vreg_info->load_ua) {
- ret = regulator_set_load(vreg_info->reg,
- vreg_info->load_ua);
- if (ret < 0) {
- ath10k_err(ar,
- "failed to set regulator %s load: %d\n",
- vreg_info->name,
- vreg_info->load_ua);
- goto err_reg_config;
- }
- }
-
- ret = regulator_enable(vreg_info->reg);
- if (ret) {
- ath10k_err(ar, "failed to enable regulator %s\n",
- vreg_info->name);
+ ret = __ath10k_snoc_vreg_on(ar, vreg_info);
+ if (ret)
goto err_reg_config;
- }
-
- if (vreg_info->settle_delay)
- udelay(vreg_info->settle_delay);
}
return 0;
err_reg_config:
- for (; i >= 0; i--) {
+ for (i = i - 1; i >= 0; i--) {
vreg_info = &ar_snoc->vreg[i];
if (!vreg_info->reg)
continue;
- regulator_disable(vreg_info->reg);
- regulator_set_load(vreg_info->reg, 0);
- regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ __ath10k_snoc_vreg_off(ar, vreg_info);
}
return ret;
}
-static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+static int ath10k_snoc_vreg_off(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_wcn3990_vreg_info *vreg_info;
+ struct ath10k_vreg_info *vreg_info;
int ret = 0;
int i;
@@ -1389,33 +1451,16 @@ static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
if (!vreg_info->reg)
continue;
- ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
- vreg_info->name);
-
- ret = regulator_disable(vreg_info->reg);
- if (ret)
- ath10k_err(ar, "failed to disable regulator %s\n",
- vreg_info->name);
-
- ret = regulator_set_load(vreg_info->reg, 0);
- if (ret < 0)
- ath10k_err(ar, "failed to set load %s\n",
- vreg_info->name);
-
- ret = regulator_set_voltage(vreg_info->reg, 0,
- vreg_info->max_v);
- if (ret)
- ath10k_err(ar, "failed to set voltage %s\n",
- vreg_info->name);
+ ret = __ath10k_snoc_vreg_off(ar, vreg_info);
}
return ret;
}
-static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+static int ath10k_snoc_clk_init(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_wcn3990_clk_info *clk_info;
+ struct ath10k_clk_info *clk_info;
int ret = 0;
int i;
@@ -1449,7 +1494,7 @@ static int ath10k_wcn3990_clk_init(struct ath10k *ar)
return 0;
err_clock_config:
- for (; i >= 0; i--) {
+ for (i = i - 1; i >= 0; i--) {
clk_info = &ar_snoc->clk[i];
if (!clk_info->handle)
@@ -1461,10 +1506,10 @@ err_clock_config:
return ret;
}
-static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+static int ath10k_snoc_clk_deinit(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
- struct ath10k_wcn3990_clk_info *clk_info;
+ struct ath10k_clk_info *clk_info;
int i;
for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
@@ -1488,18 +1533,18 @@ static int ath10k_hw_power_on(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
- ret = ath10k_wcn3990_vreg_on(ar);
+ ret = ath10k_snoc_vreg_on(ar);
if (ret)
return ret;
- ret = ath10k_wcn3990_clk_init(ar);
+ ret = ath10k_snoc_clk_init(ar);
if (ret)
goto vreg_off;
return ret;
vreg_off:
- ath10k_wcn3990_vreg_off(ar);
+ ath10k_snoc_vreg_off(ar);
return ret;
}
@@ -1509,9 +1554,9 @@ static int ath10k_hw_power_off(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
- ath10k_wcn3990_clk_deinit(ar);
+ ath10k_snoc_clk_deinit(ar);
- ret = ath10k_wcn3990_vreg_off(ar);
+ ret = ath10k_snoc_vreg_off(ar);
return ret;
}
@@ -1609,7 +1654,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
}
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
- ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
return 0;
@@ -1628,8 +1672,17 @@ err_core_destroy:
static int ath10k_snoc_remove(struct platform_device *pdev)
{
struct ath10k *ar = platform_get_drvdata(pdev);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
+
+ reinit_completion(&ar->driver_recovery);
+
+ if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
+
+ set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
+
ath10k_core_unregister(ar);
ath10k_hw_power_off(ar);
ath10k_snoc_free_irq(ar);
@@ -1641,12 +1694,12 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
}
static struct platform_driver ath10k_snoc_driver = {
- .probe = ath10k_snoc_probe,
- .remove = ath10k_snoc_remove,
- .driver = {
- .name = "ath10k_snoc",
- .of_match_table = ath10k_snoc_dt_match,
- },
+ .probe = ath10k_snoc_probe,
+ .remove = ath10k_snoc_remove,
+ .driver = {
+ .name = "ath10k_snoc",
+ .of_match_table = ath10k_snoc_dt_match,
+ },
};
module_platform_driver(ath10k_snoc_driver);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index e1d2d6675556..2b2f23cf7c5d 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -53,7 +53,7 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
-struct ath10k_wcn3990_vreg_info {
+struct ath10k_vreg_info {
struct regulator *reg;
const char *name;
u32 min_v;
@@ -63,13 +63,19 @@ struct ath10k_wcn3990_vreg_info {
bool required;
};
-struct ath10k_wcn3990_clk_info {
+struct ath10k_clk_info {
struct clk *handle;
const char *name;
u32 freq;
bool required;
};
+enum ath10k_snoc_flags {
+ ATH10K_SNOC_FLAG_REGISTERED,
+ ATH10K_SNOC_FLAG_UNREGISTERING,
+ ATH10K_SNOC_FLAG_RECOVERY,
+};
+
struct ath10k_snoc {
struct platform_device *dev;
struct ath10k *ar;
@@ -81,9 +87,10 @@ struct ath10k_snoc {
struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
struct ath10k_ce ce;
struct timer_list rx_post_retry;
- struct ath10k_wcn3990_vreg_info *vreg;
- struct ath10k_wcn3990_clk_info *clk;
+ struct ath10k_vreg_info *vreg;
+ struct ath10k_clk_info *clk;
struct ath10k_qmi *qmi;
+ unsigned long int flags;
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
@@ -91,8 +98,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
return (struct ath10k_snoc *)ar->drv_priv;
}
-void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
-u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index aa8978a8d751..fe35edcd3ec8 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -140,6 +140,9 @@ void ath10k_thermal_set_throttling(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return;
+
if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
return;
@@ -165,6 +168,9 @@ int ath10k_thermal_register(struct ath10k *ar)
struct device *hwmon_dev;
int ret;
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return 0;
+
cdev = thermal_cooling_device_register("ath10k_thermal", ar,
&ath10k_thermal_ops);
@@ -216,6 +222,9 @@ err_cooling_destroy:
void ath10k_thermal_unregister(struct ath10k *ar)
{
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return;
+
sysfs_remove_link(&ar->dev->kobj, "cooling_device");
thermal_cooling_device_unregister(ar->thermal.cdev);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 7978a7783f90..04663076d27a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -219,6 +219,9 @@ struct wmi_ops {
struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
u32 param);
+ struct sk_buff *(*gen_bb_timing)
+ (struct ath10k *ar,
+ const struct wmi_bb_timing_cfg_arg *arg);
};
@@ -1576,4 +1579,21 @@ ath10k_wmi_report_radar_found(struct ath10k *ar,
ar->wmi.cmd->radar_found_cmdid);
}
+static inline int
+ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
+ const struct wmi_bb_timing_cfg_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_bb_timing)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_bb_timing(ar, arg);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->set_bb_timing_cmdid);
+}
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index bab8b2527fb8..892bd8c30dd9 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -621,7 +621,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_mgmt_tx_compl(ar, skb);
break;
default:
- ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
break;
}
@@ -762,6 +762,9 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
arg->noise_floor = ev->noise_floor;
arg->rx_clear_count = ev->rx_clear_count;
arg->cycle_count = ev->cycle_count;
+ if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
+ ar->running_fw->fw_file.fw_features))
+ arg->mac_clk_mhz = ev->mac_clk_mhz;
kfree(tb);
return 0;
@@ -3452,7 +3455,6 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
struct wmi_tlv *tlv;
struct sk_buff *skb;
__le32 *channel_list;
- u16 tlv_len;
size_t len;
void *ptr;
u32 i;
@@ -3510,8 +3512,6 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
/* nlo_configured_parameters(nlo_list) */
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
WMI_NLO_MAX_SSIDS));
- tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
- sizeof(struct nlo_configured_parameters);
tlv = ptr;
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index 92c25f51bf86..e07e9907e355 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1564,6 +1564,9 @@ wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
+ WMI_SERVICE_THERM_THROT,
+ WMI_TLV_MAX_SERVICE);
}
#undef SVCMAP
@@ -1579,6 +1582,16 @@ struct ath10k_mgmt_tx_pkt_addr {
dma_addr_t paddr;
};
+struct chan_info_params {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 mac_clk_mhz;
+};
+
struct wmi_tlv_mgmt_tx_compl_ev {
__le32 desc_id;
__le32 status;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 25e8fa789e8d..ba837403e266 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -539,6 +539,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
.pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
.radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
};
/* 10.4 WMI cmd track */
@@ -825,6 +826,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -900,6 +902,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -974,6 +977,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1051,6 +1055,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
.inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
.dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
+ .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -2554,60 +2559,69 @@ static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
return 0;
}
-void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+/*
+ * Handle the channel info event for firmware which only sends one
+ * chan_info event per scanned channel.
+ */
+static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
+ struct chan_info_params *params)
{
- struct wmi_ch_info_ev_arg arg = {};
struct survey_info *survey;
- u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
- int idx, ret;
+ int idx;
- ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
- if (ret) {
- ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+ if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
return;
}
- err_code = __le32_to_cpu(arg.err_code);
- freq = __le32_to_cpu(arg.freq);
- cmd_flags = __le32_to_cpu(arg.cmd_flags);
- noise_floor = __le32_to_cpu(arg.noise_floor);
- rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
- cycle_count = __le32_to_cpu(arg.cycle_count);
+ idx = freq_to_idx(ar, params->freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ params->freq, idx);
+ return;
+ }
- ath10k_dbg(ar, ATH10K_DBG_WMI,
- "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
- err_code, freq, cmd_flags, noise_floor, rx_clear_count,
- cycle_count);
+ survey = &ar->survey[idx];
- spin_lock_bh(&ar->data_lock);
+ if (!params->mac_clk_mhz)
+ return;
- switch (ar->scan.state) {
- case ATH10K_SCAN_IDLE:
- case ATH10K_SCAN_STARTING:
- ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
- goto exit;
- case ATH10K_SCAN_RUNNING:
- case ATH10K_SCAN_ABORTING:
- break;
- }
+ memset(survey, 0, sizeof(*survey));
- idx = freq_to_idx(ar, freq);
+ survey->noise = params->noise_floor;
+ survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
+ survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
+ survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+}
+
+/*
+ * Handle the channel info event for firmware which sends chan_info
+ * event in pairs(start and stop events) for every scanned channel.
+ */
+static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
+ struct chan_info_params *params)
+{
+ struct survey_info *survey;
+ int idx;
+
+ idx = freq_to_idx(ar, params->freq);
if (idx >= ARRAY_SIZE(ar->survey)) {
ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
- freq, idx);
- goto exit;
+ params->freq, idx);
+ return;
}
- if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
if (ar->ch_info_can_report_survey) {
survey = &ar->survey[idx];
- survey->noise = noise_floor;
+ survey->noise = params->noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM;
ath10k_hw_fill_survey_time(ar,
survey,
- cycle_count,
- rx_clear_count,
+ params->cycle_count,
+ params->rx_clear_count,
ar->survey_last_cycle_count,
ar->survey_last_rx_clear_count);
}
@@ -2617,10 +2631,55 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
ar->ch_info_can_report_survey = true;
}
- if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
- ar->survey_last_rx_clear_count = rx_clear_count;
- ar->survey_last_cycle_count = cycle_count;
+ if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+ ar->survey_last_rx_clear_count = params->rx_clear_count;
+ ar->survey_last_cycle_count = params->cycle_count;
}
+}
+
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct chan_info_params ch_info_param;
+ struct wmi_ch_info_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+ return;
+ }
+
+ ch_info_param.err_code = __le32_to_cpu(arg.err_code);
+ ch_info_param.freq = __le32_to_cpu(arg.freq);
+ ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
+ ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
+ ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
+ ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
+ ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+ ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
+ ch_info_param.noise_floor, ch_info_param.rx_clear_count,
+ ch_info_param.cycle_count);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ break;
+ }
+
+ if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
+ ar->running_fw->fw_file.fw_features))
+ ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
+ else
+ ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
exit:
spin_unlock_bh(&ar->data_lock);
@@ -8785,6 +8844,27 @@ ath10k_wmi_barrier(struct ath10k *ar)
return 0;
}
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
+ const struct wmi_bb_timing_cfg_arg *arg)
+{
+ struct wmi_pdev_bb_timing_cfg_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
+ cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
+ cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
+ arg->bb_tx_timing, arg->bb_xpa_timing);
+ return skb;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -9058,6 +9138,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.gen_pdev_enable_adaptive_cca =
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
+ .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index f7badd079051..2034ccc7cc72 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -205,6 +205,9 @@ enum wmi_service {
WMI_SERVICE_SPOOF_MAC_SUPPORT,
WMI_SERVICE_TX_DATA_ACK_RSSI,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT,
+ WMI_SERVICE_THERM_THROT,
/* keep last */
WMI_SERVICE_MAX,
@@ -244,6 +247,9 @@ enum wmi_10x_service {
WMI_10X_SERVICE_PEER_STATS,
WMI_10X_SERVICE_RESET_CHIP,
WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_10X_SERVICE_VDEV_BCN_RATE_CONTROL,
+ WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT,
};
enum wmi_main_service {
@@ -359,6 +365,9 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL,
WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_10_4_SERVICE_HTT_ASSERT_TRIGGER_SUPPORT,
+ WMI_10_4_SERVICE_VDEV_FILTER_NEIGHBOR_RX_PACKETS,
+ WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
};
static inline char *wmi_service_name(int service_id)
@@ -568,6 +577,8 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_RESET_CHIP, len);
SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
+ SVCMAP(WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT,
+ WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, len);
}
static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
@@ -786,6 +797,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TX_DATA_ACK_RSSI, len);
SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, len);
}
#undef SVCMAP
@@ -986,6 +999,7 @@ struct wmi_cmd_map {
u32 pdev_wds_entry_list_cmdid;
u32 tdls_set_offchan_mode_cmdid;
u32 radar_found_cmdid;
+ u32 set_bb_timing_cmdid;
};
/*
@@ -1601,6 +1615,8 @@ enum wmi_10_2_cmd_id {
WMI_10_2_SET_LTEU_CONFIG_CMDID,
WMI_10_2_SET_CCA_PARAMS,
WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_2_FWTEST_CMDID,
+ WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
@@ -4984,6 +5000,7 @@ enum wmi_rate_preamble {
(((preamble) << 6) | ((nss) << 4) | (rate))
#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1)
#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3)
+#define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1)
#define ATH10K_VHT_MCS_NUM 10
#define ATH10K_BW_NUM 4
@@ -4991,6 +5008,7 @@ enum wmi_rate_preamble {
#define ATH10K_LEGACY_NUM 12
#define ATH10K_GI_NUM 2
#define ATH10K_HT_MCS_NUM 32
+#define ATH10K_RATE_TABLE_NUM 320
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
@@ -5064,6 +5082,7 @@ struct wmi_vdev_param_map {
u32 bw_nss_ratemask;
u32 inc_tsf;
u32 dec_tsf;
+ u32 disable_4addr_src_lrn;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -5403,8 +5422,20 @@ enum wmi_10_4_vdev_param {
WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS,
WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
+ WMI_10_4_VDEV_PARAM_SELFGEN_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_AMPDU_SUBFRAME_SIZE_PER_AC,
+ WMI_10_4_VDEV_PARAM_NSS_VHT160,
+ WMI_10_4_VDEV_PARAM_NSS_VHT80_80,
+ WMI_10_4_VDEV_PARAM_AMSDU_SUBFRAME_SIZE_PER_AC,
+ WMI_10_4_VDEV_PARAM_DISABLE_CABQ,
+ WMI_10_4_VDEV_PARAM_SIFS_TRIGGER_RATE,
+ WMI_10_4_VDEV_PARAM_TX_POWER,
+ WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
+ WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
};
+#define WMI_VDEV_DISABLE_4_ADDR_SRC_LRN 1
+
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
@@ -6441,6 +6472,14 @@ struct wmi_chan_info_event {
__le32 noise_floor;
__le32 rx_clear_count;
__le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+ __le32 my_bss_rx_cycle_count;
+ __le32 rx_11b_mode_data_duration;
+ __le32 tx_frame_cnt;
+ __le32 mac_clk_mhz;
+
} __packed;
struct wmi_10_4_chan_info_event {
@@ -6669,6 +6708,10 @@ struct wmi_ch_info_ev_arg {
__le32 chan_tx_pwr_range;
__le32 chan_tx_pwr_tp;
__le32 rx_frame_count;
+ __le32 my_bss_rx_cycle_count;
+ __le32 rx_11b_mode_data_duration;
+ __le32 tx_frame_cnt;
+ __le32 mac_clk_mhz;
};
/* From 10.4 firmware, not sure all have the same values. */
@@ -7140,6 +7183,23 @@ struct wmi_pdev_chan_info_req_cmd {
__le32 reserved;
} __packed;
+/* bb timing register configurations */
+struct wmi_bb_timing_cfg_arg {
+ /* Tx_end to pa off timing */
+ u32 bb_tx_timing;
+
+ /* Tx_end to external pa off timing */
+ u32 bb_xpa_timing;
+};
+
+struct wmi_pdev_bb_timing_cfg_cmd {
+ /* Tx_end to pa off timing */
+ __le32 bb_tx_timing;
+
+ /* Tx_end to external pa off timing */
+ __le32 bb_xpa_timing;
+} __packed;
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 51b26b305885..36d4245c308e 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -135,7 +135,7 @@ static void ath10k_wow_convert_8023_to_80211
&old_hdr_mask->h_proto,
sizeof(old_hdr_mask->h_proto));
- /* Caculate new pkt_offset */
+ /* Calculate new pkt_offset */
if (old->pkt_offset < ETH_ALEN)
new->pkt_offset = old->pkt_offset +
offsetof(struct ieee80211_hdr_3addr, addr1);
@@ -146,7 +146,7 @@ static void ath10k_wow_convert_8023_to_80211
else
new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
- /* Caculate new hdr end offset */
+ /* Calculate new hdr end offset */
if (total_len > ETH_HLEN)
hdr_80211_end_offset = hdr_len + rfc_len;
else if (total_len > offsetof(struct ethhdr, h_proto))
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index e121187f371f..5477a014e1fb 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -291,7 +291,7 @@ static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
}
if (!test_bit(WLAN_ENABLED, &vif->flags)) {
- ath6kl_err("wlan disabled\n");
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "wlan disabled\n");
return false;
}
@@ -939,7 +939,7 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
else
ssid_list[i].flag = ANY_SSID_FLAG;
- if (n_match_ssid == 0)
+ if (ar->wiphy->max_match_sets != 0 && n_match_ssid == 0)
ssid_list[i].flag |= MATCH_SSID_FLAG;
}
@@ -1093,7 +1093,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
for (i = 0; i < vif->scan_req->n_ssids; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
- i + 1, DISABLE_SSID_FLAG,
+ i, DISABLE_SSID_FLAG,
0, NULL);
}
}
@@ -1322,7 +1322,7 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
u8 key_usage;
- enum crypto_type key_type = NONE_CRYPT;
+ enum ath6kl_crypto_type key_type = NONE_CRYPT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index 4f82e8632d37..d6e5234f67a1 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -67,7 +67,7 @@ struct ath6kl_llc_snap_hdr {
__be16 eth_type;
} __packed;
-enum crypto_type {
+enum ath6kl_crypto_type {
NONE_CRYPT = 0x01,
WEP_CRYPT = 0x02,
TKIP_CRYPT = 0x04,
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index cb59016c723b..5e7ea838a921 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -389,6 +389,7 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
if (!ik->valid || ik->key_type != WAPI_CRYPT)
break;
/* for WAPI, we need to set the delayed group key, continue: */
+ /* fall through */
case WPA_PSK_AUTH:
case WPA2_PSK_AUTH:
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 777acc564ac9..9d7ac1ab2d02 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1849,9 +1849,9 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
- enum crypto_type pairwise_crypto,
+ enum ath6kl_crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
- enum crypto_type group_crypto,
+ enum ath6kl_crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
u8 *bssid, u16 channel, u32 ctrl_flags,
u8 nw_subtype)
@@ -2301,7 +2301,7 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
}
int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
- enum crypto_type key_type,
+ enum ath6kl_crypto_type key_type,
u8 key_usage, u8 key_len,
u8 *key_rsc, unsigned int key_rsc_len,
u8 *key_material,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index a60bb49fe920..784940ba4c90 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2556,9 +2556,9 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
- enum crypto_type pairwise_crypto,
+ enum ath6kl_crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
- enum crypto_type group_crypto,
+ enum ath6kl_crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
u8 *bssid, u16 channel, u32 ctrl_flags,
u8 nw_subtype);
@@ -2610,7 +2610,7 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
- enum crypto_type key_type,
+ enum ath6kl_crypto_type key_type,
u8 key_usage, u8 key_len,
u8 *key_rsc, unsigned int key_rsc_len,
u8 *key_material,
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 1f3523019509..ceca23a851d5 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -116,7 +116,7 @@ config ATH9K_DFS_CERTIFIED
except increase code size.
config ATH9K_DYNACK
- bool "Atheros ath9k ACK timeout estimation algorithm (EXPERIMENTAL)"
+ bool "Atheros ath9k ACK timeout estimation algorithm"
depends on ATH9K
default n
---help---
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 11d6f975c87d..dae95402eb3a 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -586,7 +586,7 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
- /* else: fall through */
+ /* fall through */
case 0x1:
case 0x2:
case 0x7:
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 713291881208..6f32b8d2ec7f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -119,7 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
aModeRefSel = 2;
if (aModeRefSel)
break;
- /* else: fall through */
+ /* fall through */
case 1:
default:
aModeRefSel = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 0fe9c8378249..9899661f9a60 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -1055,17 +1055,15 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 new_flags, to_set, to_clear;
+ u32 to_set, to_clear;
if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
return;
if (mci->is_2g) {
- new_flags = MCI_2G_FLAGS;
to_clear = MCI_2G_FLAGS_CLEAR_MASK;
to_set = MCI_2G_FLAGS_SET_MASK;
} else {
- new_flags = MCI_5G_FLAGS;
to_clear = MCI_5G_FLAGS_CLEAR_MASK;
to_set = MCI_5G_FLAGS_SET_MASK;
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 21ba20981a80..0fca44e91a71 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -272,7 +272,7 @@ struct ath_node {
#endif
u8 key_idx[4];
- u32 ackto;
+ int ackto;
struct list_head list;
};
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 7334c9b09e82..f112fa5b2eac 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -29,9 +29,13 @@
* ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
*
*/
-static inline u32 ath_dynack_ewma(u32 old, u32 new)
+static inline int ath_dynack_ewma(int old, int new)
{
- return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
+ if (old > 0)
+ return (new * (EWMA_DIV - EWMA_LEVEL) +
+ old * EWMA_LEVEL) / EWMA_DIV;
+ else
+ return new;
}
/**
@@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
*/
static void ath_dynack_compute_ackto(struct ath_hw *ah)
{
- struct ath_node *an;
- u32 to = 0;
- struct ath_dynack *da = &ah->dynack;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_dynack *da = &ah->dynack;
+ struct ath_node *an;
+ int to = 0;
list_for_each_entry(an, &da->nodes, list)
if (an->ackto > to)
@@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
an->ackto = ath_dynack_ewma(an->ackto,
ackto);
ath_dbg(ath9k_hw_common(ah), DYNACK,
- "%pM to %u\n", dst, an->ackto);
+ "%pM to %d [%u]\n", dst,
+ an->ackto, ackto);
if (time_is_before_jiffies(da->lto)) {
ath_dynack_compute_ackto(ah);
da->lto = jiffies + COMPUTE_TO;
@@ -166,18 +171,21 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
* @ah: ath hw
* @skb: socket buffer
* @ts: tx status info
+ * @sta: station pointer
*
*/
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ struct ieee80211_sta *sta)
{
- u8 ridx;
struct ieee80211_hdr *hdr;
struct ath_dynack *da = &ah->dynack;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u32 dur = ts->duration;
+ u8 ridx;
- if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
+ if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
return;
spin_lock_bh(&da->qlock);
@@ -187,11 +195,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
/* late ACK */
if (ts->ts_status & ATH9K_TXERR_XRETRY) {
if (ieee80211_is_assoc_req(hdr->frame_control) ||
- ieee80211_is_assoc_resp(hdr->frame_control)) {
+ ieee80211_is_assoc_resp(hdr->frame_control) ||
+ ieee80211_is_auth(hdr->frame_control)) {
ath_dbg(common, DYNACK, "late ack\n");
+
ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
+ if (sta) {
+ struct ath_node *an;
+
+ an = (struct ath_node *)sta->drv_priv;
+ an->ackto = -1;
+ }
da->lto = jiffies + LATEACK_DELAY;
}
@@ -202,14 +218,13 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
ridx = ts->ts_rateindex;
da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
- da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration;
ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
- u32 phy, sifs;
const struct ieee80211_rate *rate;
struct ieee80211_tx_rate *rates = info->status.rates;
+ u32 phy;
rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
if (info->band == NL80211_BAND_2GHZ &&
@@ -218,19 +233,18 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
else
phy = WLAN_RC_PHY_OFDM;
- sifs = ath_dynack_get_sifs(ah, phy);
- da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs;
+ dur -= ath_dynack_get_sifs(ah, phy);
}
-
- ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
- hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp,
- da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb,
- (da->st_rbf.t_rb + 1) % ATH_DYN_BUF);
+ da->st_rbf.ts[da->st_rbf.t_rb].dur = dur;
INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
if (da->st_rbf.t_rb == da->st_rbf.h_rb)
INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
+ ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
+ hdr->addr1, ts->ts_tstamp, dur, da->st_rbf.h_rb,
+ da->st_rbf.t_rb);
+
ath_dynack_compute_to(ah);
spin_unlock_bh(&da->qlock);
@@ -251,20 +265,19 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
+ if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
return;
spin_lock_bh(&da->qlock);
da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
- ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
- da->ack_rbf.tstamp[da->ack_rbf.t_rb],
- da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF);
-
INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
+ ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
+ ts, da->ack_rbf.h_rb, da->ack_rbf.t_rb);
+
ath_dynack_compute_to(ah);
spin_unlock_bh(&da->qlock);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h
index 6d7bef976742..cf60224d40df 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.h
+++ b/drivers/net/wireless/ath/ath9k/dynack.h
@@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an);
void ath_dynack_init(struct ath_hw *ah);
void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
- struct ath_tx_status *ts);
+ struct ath_tx_status *ts,
+ struct ieee80211_sta *sta);
#else
static inline void ath_dynack_init(struct ath_hw *ah) {}
static inline void ath_dynack_node_init(struct ath_hw *ah,
@@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah,
struct sk_buff *skb, u32 ts) {}
static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
struct sk_buff *skb,
- struct ath_tx_status *ts) {}
+ struct ath_tx_status *ts,
+ struct ieee80211_sta *sta) {}
#endif
#endif /* DYNACK_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index bb319f22761f..8581d917635a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -2279,6 +2279,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
case NL80211_IFTYPE_ADHOC:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
+ /* fall through */
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 25b3fc82d4ac..f448d5716639 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (bf == bf->bf_lastbf)
ath_dynack_sample_tx_ts(sc->sc_ah,
bf->bf_mpdu,
- ts);
+ ts, sta);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
@@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
memcpy(info->control.rates, bf->rates,
sizeof(info->control.rates));
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
+ ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
+ sta);
}
ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
} else
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 705063259c8f..f7c2f19e81c1 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -766,6 +766,7 @@ static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
goto drop;
}
+ /* fall through */
case AR9170_RX_STATUS_MPDU_MIDDLE:
/* These are just data + mac status */
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 8c75651ede6c..2407931440ed 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -830,10 +830,12 @@ static bool carl9170_tx_rts_check(struct ar9170 *ar,
case CARL9170_ERP_AUTO:
if (ampdu)
break;
+ /* fall through */
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
break;
+ /* fall through */
case CARL9170_ERP_RTS:
if (likely(!multi))
@@ -854,6 +856,7 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
break;
+ /* fall through */
case CARL9170_ERP_CTS:
return true;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index d18e81fae5f1..9b2f9f543952 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -51,6 +51,19 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
CHAN60G(4, 0),
};
+static void
+wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
+{
+ kfree(*pdst);
+ *pdst = NULL;
+ *pdst_len = 0;
+ if (src_len > 0) {
+ *pdst = kmemdup(src, src_len, GFP_KERNEL);
+ if (*pdst)
+ *pdst_len = src_len;
+ }
+}
+
static int wil_num_supported_channels(struct wil6210_priv *wil)
{
int num_channels = ARRAY_SIZE(wil_60ghz_channels);
@@ -1441,11 +1454,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
params->key, key_usage);
- if (!rc && !IS_ERR(cs))
+ if (!rc && !IS_ERR(cs)) {
+ /* update local storage used for AP recovery */
+ if (key_usage == WMI_KEY_USE_TX_GROUP && params->key &&
+ params->key_len <= WMI_MAX_KEY_LEN) {
+ vif->gtk_index = key_index;
+ memcpy(vif->gtk, params->key, params->key_len);
+ vif->gtk_len = params->key_len;
+ }
/* in FT set crypto will take place upon receiving
* WMI_RING_EN_EVENTID event
*/
wil_set_crypto_rx(key_index, key_usage, cs, params);
+ }
return rc;
}
@@ -1634,6 +1655,14 @@ static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
u16 len = 0, proberesp_len = 0;
u8 *ies = NULL, *proberesp;
+ /* update local storage used for AP recovery */
+ wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, bcon->probe_resp,
+ bcon->probe_resp_len);
+ wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len,
+ bcon->proberesp_ies, bcon->proberesp_ies_len);
+ wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len,
+ bcon->assocresp_ies, bcon->assocresp_ies_len);
+
proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
bcon->probe_resp_len,
&proberesp_len);
@@ -1735,6 +1764,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
vif->channel = chan;
vif->hidden_ssid = hidden_ssid;
vif->pbss = pbss;
+ vif->bi = bi;
+ memcpy(vif->ssid, ssid, ssid_len);
+ vif->ssid_len = ssid_len;
netif_carrier_on(ndev);
if (!wil_has_other_active_ifaces(wil, ndev, false, true))
@@ -1761,11 +1793,64 @@ out:
return rc;
}
+void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
+{
+ int rc, i;
+ struct wiphy *wiphy = wil_to_wiphy(wil);
+
+ for (i = 0; i < wil->max_vifs; i++) {
+ struct wil6210_vif *vif = wil->vifs[i];
+ struct net_device *ndev;
+ struct cfg80211_beacon_data bcon = {};
+ struct key_params key_params = {};
+
+ if (!vif || vif->ssid_len == 0)
+ continue;
+
+ ndev = vif_to_ndev(vif);
+ bcon.proberesp_ies = vif->proberesp_ies;
+ bcon.assocresp_ies = vif->assocresp_ies;
+ bcon.probe_resp = vif->proberesp;
+ bcon.proberesp_ies_len = vif->proberesp_ies_len;
+ bcon.assocresp_ies_len = vif->assocresp_ies_len;
+ bcon.probe_resp_len = vif->proberesp_len;
+
+ wil_info(wil,
+ "AP (vif %d) recovery: privacy %d, bi %d, channel %d, hidden %d, pbss %d\n",
+ i, vif->privacy, vif->bi, vif->channel,
+ vif->hidden_ssid, vif->pbss);
+ wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
+ vif->ssid, vif->ssid_len, true);
+ rc = _wil_cfg80211_start_ap(wiphy, ndev,
+ vif->ssid, vif->ssid_len,
+ vif->privacy, vif->bi,
+ vif->channel, &bcon,
+ vif->hidden_ssid, vif->pbss);
+ if (rc) {
+ wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
+ continue;
+ }
+
+ if (!vif->privacy || vif->gtk_len == 0)
+ continue;
+
+ key_params.key = vif->gtk;
+ key_params.key_len = vif->gtk_len;
+ key_params.seq_len = IEEE80211_GCMP_PN_LEN;
+ rc = wil_cfg80211_add_key(wiphy, ndev, vif->gtk_index, false,
+ NULL, &key_params);
+ if (rc)
+ wil_err(wil, "vif %d recovery add key failed (%d)\n",
+ i, rc);
+ }
+}
+
static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_beacon_data *bcon)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
u32 privacy = 0;
@@ -1778,15 +1863,16 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
bcon->tail_len))
privacy = 1;
+ memcpy(vif->ssid, wdev->ssid, wdev->ssid_len);
+ vif->ssid_len = wdev->ssid_len;
+
/* in case privacy has changed, need to restart the AP */
if (vif->privacy != privacy) {
- struct wireless_dev *wdev = ndev->ieee80211_ptr;
-
wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
vif->privacy, privacy);
- rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
- wdev->ssid_len, privacy,
+ rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
+ vif->ssid_len, privacy,
wdev->beacon_interval,
vif->channel, bcon,
vif->hidden_ssid,
@@ -1876,6 +1962,12 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
wmi_pcp_stop(vif);
clear_bit(wil_vif_ft_roam, vif->status);
+ vif->ssid_len = 0;
+ wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, NULL, 0);
+ wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len, NULL, 0);
+ wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len, NULL, 0);
+ memset(vif->gtk, 0, WMI_MAX_KEY_LEN);
+ vif->gtk_len = 0;
if (last)
__wil_down(wil);
@@ -1923,7 +2015,7 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
params->mac, params->reason_code, vif->mid);
mutex_lock(&wil->mutex);
- wil6210_disconnect(vif, params->mac, params->reason_code, false);
+ wil6210_disconnect(vif, params->mac, params->reason_code);
mutex_unlock(&wil->mutex);
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index aa50813a0595..835c902b84c1 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -124,7 +124,7 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "}\n");
}
-static int wil_ring_debugfs_show(struct seq_file *s, void *data)
+static int ring_show(struct seq_file *s, void *data)
{
uint i;
struct wil6210_priv *wil = s->private;
@@ -183,18 +183,7 @@ static int wil_ring_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_ring_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_ring_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_ring = {
- .open = wil_ring_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(ring);
static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
struct wil_status_ring *sring)
@@ -240,7 +229,7 @@ static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "}\n");
}
-static int wil_srings_debugfs_show(struct seq_file *s, void *data)
+static int srings_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int i = 0;
@@ -251,18 +240,7 @@ static int wil_srings_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_srings_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_srings_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_srings = {
- .open = wil_srings_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(srings);
static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
const char *prefix)
@@ -348,7 +326,7 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
wil_halp_unvote(wil);
}
-static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
+static int mbox_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int ret;
@@ -366,18 +344,7 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_mbox_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_mbox_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_mbox = {
- .open = wil_mbox_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(mbox);
static int wil_debugfs_iomem_x32_set(void *data, u64 val)
{
@@ -624,7 +591,7 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil,
return 0;
}
-static int wil_memread_debugfs_show(struct seq_file *s, void *data)
+static int memread_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
void __iomem *a;
@@ -645,18 +612,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_memread_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_memread_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_memread = {
- .open = wil_memread_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(memread);
static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -664,10 +620,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
enum { max_count = 4096 };
struct wil_blob_wrapper *wil_blob = file->private_data;
struct wil6210_priv *wil = wil_blob->wil;
- loff_t pos = *ppos;
+ loff_t aligned_pos, pos = *ppos;
size_t available = wil_blob->blob.size;
void *buf;
- size_t ret;
+ size_t unaligned_bytes, aligned_count, ret;
int rc;
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
@@ -685,7 +641,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (count > max_count)
count = max_count;
- buf = kmalloc(count, GFP_KERNEL);
+ /* set pos to 4 bytes aligned */
+ unaligned_bytes = pos % 4;
+ aligned_pos = pos - unaligned_bytes;
+ aligned_count = count + unaligned_bytes;
+
+ buf = kmalloc(aligned_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -696,9 +657,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
}
wil_memcpy_fromio_32(buf, (const void __iomem *)
- wil_blob->blob.data + pos, count);
+ wil_blob->blob.data + aligned_pos, aligned_count);
- ret = copy_to_user(user_buf, buf, count);
+ ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
wil_pm_runtime_put(wil);
@@ -962,6 +923,8 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
int rc;
void *frame;
+ memset(&params, 0, sizeof(params));
+
if (!len)
return -EINVAL;
@@ -1053,7 +1016,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
}
/*---------Tx/Rx descriptor------------*/
-static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
+static int txdesc_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil_ring *ring;
@@ -1146,21 +1109,10 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_txdesc_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_txdesc_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_txdesc = {
- .open = wil_txdesc_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(txdesc);
/*---------Tx/Rx status message------------*/
-static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
+static int status_msg_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
int sring_idx = dbg_sring_index;
@@ -1202,19 +1154,7 @@ static int wil_status_msg_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_status_msg_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_status_msg_debugfs_show,
- inode->i_private);
-}
-
-static const struct file_operations fops_status_msg = {
- .open = wil_status_msg_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(status_msg);
static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
{
@@ -1232,7 +1172,7 @@ static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh)
return i;
}
-static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
+static int rx_buff_mgmt_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt;
@@ -1257,19 +1197,7 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_rx_buff_mgmt_debugfs_show,
- inode->i_private);
-}
-
-static const struct file_operations fops_rx_buff_mgmt = {
- .open = wil_rx_buff_mgmt_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(rx_buff_mgmt);
/*---------beamforming------------*/
static char *wil_bfstatus_str(u32 status)
@@ -1299,7 +1227,7 @@ static bool is_all_zeros(void * const x_, size_t sz)
return true;
}
-static int wil_bf_debugfs_show(struct seq_file *s, void *data)
+static int bf_show(struct seq_file *s, void *data)
{
int rc;
int i;
@@ -1353,18 +1281,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data)
}
return 0;
}
-
-static int wil_bf_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_bf_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_bf = {
- .open = wil_bf_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(bf);
/*---------temp------------*/
static void print_temp(struct seq_file *s, const char *prefix, s32 t)
@@ -1381,7 +1298,7 @@ static void print_temp(struct seq_file *s, const char *prefix, s32 t)
}
}
-static int wil_temp_debugfs_show(struct seq_file *s, void *data)
+static int temp_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
s32 t_m, t_r;
@@ -1397,21 +1314,10 @@ static int wil_temp_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_temp_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_temp_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_temp = {
- .open = wil_temp_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(temp);
/*---------freq------------*/
-static int wil_freq_debugfs_show(struct seq_file *s, void *data)
+static int freq_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
@@ -1421,21 +1327,10 @@ static int wil_freq_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_freq_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_freq_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_freq = {
- .open = wil_freq_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(freq);
/*---------link------------*/
-static int wil_link_debugfs_show(struct seq_file *s, void *data)
+static int link_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct station_info *sinfo;
@@ -1487,21 +1382,10 @@ out:
kfree(sinfo);
return rc;
}
-
-static int wil_link_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_link_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_link = {
- .open = wil_link_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(link);
/*---------info------------*/
-static int wil_info_debugfs_show(struct seq_file *s, void *data)
+static int info_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct net_device *ndev = wil->main_ndev;
@@ -1536,18 +1420,7 @@ static int wil_info_debugfs_show(struct seq_file *s, void *data)
#undef CHECK_QSTATE
return 0;
}
-
-static int wil_info_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_info_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_info = {
- .open = wil_info_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(info);
/*---------recovery------------*/
/* mode = [manual|auto]
@@ -1663,7 +1536,7 @@ has_keys:
seq_puts(s, "\n");
}
-static int wil_sta_debugfs_show(struct seq_file *s, void *data)
+static int sta_show(struct seq_file *s, void *data)
__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
{
struct wil6210_priv *wil = s->private;
@@ -1745,20 +1618,9 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(sta);
-static int wil_sta_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_sta_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_sta = {
- .open = wil_sta_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
-
-static int wil_mids_debugfs_show(struct seq_file *s, void *data)
+static int mids_show(struct seq_file *s, void *data)
{
struct wil6210_priv *wil = s->private;
struct wil6210_vif *vif;
@@ -1781,18 +1643,7 @@ static int wil_mids_debugfs_show(struct seq_file *s, void *data)
return 0;
}
-
-static int wil_mids_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, wil_mids_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations fops_mids = {
- .open = wil_mids_seq_open,
- .release = single_release,
- .read = seq_read,
- .llseek = seq_lseek,
-};
+DEFINE_SHOW_ATTRIBUTE(mids);
static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data)
__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
@@ -2436,23 +2287,23 @@ static const struct {
umode_t mode;
const struct file_operations *fops;
} dbg_files[] = {
- {"mbox", 0444, &fops_mbox},
- {"rings", 0444, &fops_ring},
- {"stations", 0444, &fops_sta},
- {"mids", 0444, &fops_mids},
- {"desc", 0444, &fops_txdesc},
- {"bf", 0444, &fops_bf},
- {"mem_val", 0644, &fops_memread},
+ {"mbox", 0444, &mbox_fops},
+ {"rings", 0444, &ring_fops},
+ {"stations", 0444, &sta_fops},
+ {"mids", 0444, &mids_fops},
+ {"desc", 0444, &txdesc_fops},
+ {"bf", 0444, &bf_fops},
+ {"mem_val", 0644, &memread_fops},
{"rxon", 0244, &fops_rxon},
{"tx_mgmt", 0244, &fops_txmgmt},
{"wmi_send", 0244, &fops_wmi},
{"back", 0644, &fops_back},
{"pmccfg", 0644, &fops_pmccfg},
{"pmcdata", 0444, &fops_pmcdata},
- {"temp", 0444, &fops_temp},
- {"freq", 0444, &fops_freq},
- {"link", 0444, &fops_link},
- {"info", 0444, &fops_info},
+ {"temp", 0444, &temp_fops},
+ {"freq", 0444, &freq_fops},
+ {"link", 0444, &link_fops},
+ {"info", 0444, &info_fops},
{"recovery", 0644, &fops_recovery},
{"led_cfg", 0644, &fops_led_cfg},
{"led_blink_time", 0644, &fops_led_blink_time},
@@ -2460,9 +2311,9 @@ static const struct {
{"fw_version", 0444, &fops_fw_version},
{"suspend_stats", 0644, &fops_suspend_stats},
{"compressed_rx_status", 0644, &fops_compressed_rx_status},
- {"srings", 0444, &fops_srings},
- {"status_msg", 0444, &fops_status_msg},
- {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt},
+ {"srings", 0444, &srings_fops},
+ {"status_msg", 0444, &status_msg_fops},
+ {"rx_buff_mgmt", 0444, &rx_buff_mgmt_fops},
{"tx_latency", 0644, &fops_tx_latency},
{"link_stats", 0644, &fops_link_stats},
{"link_stats_global", 0644, &fops_link_stats_global},
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 398900a1c29e..5b7de00affe2 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -18,6 +18,7 @@
#include <linux/moduleparam.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
#include "wil6210.h"
#include "txrx.h"
@@ -80,7 +81,7 @@ static const struct kernel_param_ops mtu_max_ops = {
module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
MODULE_PARM_DESC(mtu_max, " Max MTU value.");
-static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
+static uint rx_ring_order;
static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
@@ -214,8 +215,21 @@ static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
wil->txrx_ops.ring_fini_tx(wil, ring);
}
-static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
- u16 reason_code, bool from_event)
+static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
+{
+ int i;
+
+ for (i = 0; i < WIL6210_MAX_CID; i++) {
+ if (wil->sta[i].mid == mid &&
+ wil->sta[i].status == wil_sta_connected)
+ return true;
+ }
+
+ return false;
+}
+
+static void wil_disconnect_cid_complete(struct wil6210_vif *vif, int cid,
+ u16 reason_code)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
uint i;
@@ -226,24 +240,14 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
int min_ring_id = wil_get_min_tx_ring_id(wil);
might_sleep();
- wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
+ wil_dbg_misc(wil,
+ "disconnect_cid_complete: CID %d, MID %d, status %d\n",
cid, sta->mid, sta->status);
- /* inform upper/lower layers */
+ /* inform upper layers */
if (sta->status != wil_sta_unused) {
if (vif->mid != sta->mid) {
wil_err(wil, "STA MID mismatch with VIF MID(%d)\n",
vif->mid);
- /* let FW override sta->mid but be more strict with
- * user space requests
- */
- if (!from_event)
- return;
- }
- if (!from_event) {
- bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ?
- disable_ap_sme : false;
- wmi_disconnect_sta(vif, sta->addr, reason_code,
- true, del_sta);
}
switch (wdev->iftype) {
@@ -283,36 +287,20 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
sta->stats.tx_latency_min_us = U32_MAX;
}
-static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
- if (wil->sta[i].mid == mid &&
- wil->sta[i].status == wil_sta_connected)
- return true;
- }
-
- return false;
-}
-
-static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
- u16 reason_code, bool from_event)
+static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
+ const u8 *bssid, u16 reason_code)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int cid = -ENOENT;
struct net_device *ndev;
struct wireless_dev *wdev;
- if (unlikely(!vif))
- return;
-
ndev = vif_to_ndev(vif);
wdev = vif_to_wdev(vif);
might_sleep();
- wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid,
- reason_code, from_event ? "+" : "-");
+ wil_info(wil, "disconnect_complete: bssid=%pM, reason=%d\n",
+ bssid, reason_code);
/* Cases are:
* - disconnect single STA, still connected
@@ -327,14 +315,15 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
if (bssid && !is_broadcast_ether_addr(bssid) &&
!ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
cid = wil_find_cid(wil, vif->mid, bssid);
- wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
+ wil_dbg_misc(wil,
+ "Disconnect complete %pM, CID=%d, reason=%d\n",
bssid, cid, reason_code);
if (cid >= 0) /* disconnect 1 peer */
- wil_disconnect_cid(vif, cid, reason_code, from_event);
+ wil_disconnect_cid_complete(vif, cid, reason_code);
} else { /* all */
- wil_dbg_misc(wil, "Disconnect all\n");
+ wil_dbg_misc(wil, "Disconnect complete all\n");
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
- wil_disconnect_cid(vif, cid, reason_code, from_event);
+ wil_disconnect_cid_complete(vif, cid, reason_code);
}
/* link state */
@@ -380,6 +369,82 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
}
}
+static int wil_disconnect_cid(struct wil6210_vif *vif, int cid,
+ u16 reason_code)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct wil_sta_info *sta = &wil->sta[cid];
+ bool del_sta = false;
+
+ might_sleep();
+ wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
+ cid, sta->mid, sta->status);
+
+ if (sta->status == wil_sta_unused)
+ return 0;
+
+ if (vif->mid != sta->mid) {
+ wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", vif->mid);
+ return -EINVAL;
+ }
+
+ /* inform lower layers */
+ if (wdev->iftype == NL80211_IFTYPE_AP && disable_ap_sme)
+ del_sta = true;
+
+ /* disconnect by sending command disconnect/del_sta and wait
+ * synchronously for WMI_DISCONNECT_EVENTID event.
+ */
+ return wmi_disconnect_sta(vif, sta->addr, reason_code, del_sta);
+}
+
+static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code)
+{
+ struct wil6210_priv *wil;
+ struct net_device *ndev;
+ int cid = -ENOENT;
+
+ if (unlikely(!vif))
+ return;
+
+ wil = vif_to_wil(vif);
+ ndev = vif_to_ndev(vif);
+
+ might_sleep();
+ wil_info(wil, "disconnect bssid=%pM, reason=%d\n", bssid, reason_code);
+
+ /* Cases are:
+ * - disconnect single STA, still connected
+ * - disconnect single STA, already disconnected
+ * - disconnect all
+ *
+ * For "disconnect all", there are 3 options:
+ * - bssid == NULL
+ * - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
+ * - bssid is our MAC address
+ */
+ if (bssid && !is_broadcast_ether_addr(bssid) &&
+ !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
+ cid = wil_find_cid(wil, vif->mid, bssid);
+ wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
+ bssid, cid, reason_code);
+ if (cid >= 0) /* disconnect 1 peer */
+ wil_disconnect_cid(vif, cid, reason_code);
+ } else { /* all */
+ wil_dbg_misc(wil, "Disconnect all\n");
+ for (cid = 0; cid < WIL6210_MAX_CID; cid++)
+ wil_disconnect_cid(vif, cid, reason_code);
+ }
+
+ /* call event handler manually after processing wmi_call,
+ * to avoid deadlock - disconnect event handler acquires
+ * wil->mutex while it is already held here
+ */
+ _wil6210_disconnect_complete(vif, bssid, reason_code);
+}
+
void wil_disconnect_worker(struct work_struct *work)
{
struct wil6210_vif *vif = container_of(work,
@@ -485,10 +550,11 @@ static void wil_fw_error_worker(struct work_struct *work)
if (wil_wait_for_recovery(wil) != 0)
return;
+ rtnl_lock();
mutex_lock(&wil->mutex);
/* Needs adaptation for multiple VIFs
* need to go over all VIFs and consider the appropriate
- * recovery.
+ * recovery because each one can have different iftype.
*/
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
@@ -500,15 +566,24 @@ static void wil_fw_error_worker(struct work_struct *work)
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
- wil_info(wil, "No recovery for AP-like interface\n");
- /* recovery in these modes is done by upper layers */
+ if (no_fw_recovery) /* upper layers do recovery */
+ break;
+ /* silent recovery, upper layers will see disconnect */
+ __wil_down(wil);
+ __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+ wil_cfg80211_ap_recovery(wil);
+ mutex_lock(&wil->mutex);
+ wil_info(wil, "... completed\n");
break;
default:
wil_err(wil, "No recovery - unknown interface type %d\n",
wdev->iftype);
break;
}
+
mutex_unlock(&wil->mutex);
+ rtnl_unlock();
}
static int wil_find_free_ring(struct wil6210_priv *wil)
@@ -694,20 +769,41 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
* @vif: virtual interface context
* @bssid: peer to disconnect, NULL to disconnect all
* @reason_code: Reason code for the Disassociation frame
- * @from_event: whether is invoked from FW event handler
*
- * Disconnect and release associated resources. If invoked not from the
- * FW event handler, issue WMI command(s) to trigger MAC disconnect.
+ * Disconnect and release associated resources. Issue WMI
+ * command(s) to trigger MAC disconnect. When command was issued
+ * successfully, call the wil6210_disconnect_complete function
+ * to handle the event synchronously
*/
void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
- u16 reason_code, bool from_event)
+ u16 reason_code)
+{
+ struct wil6210_priv *wil = vif_to_wil(vif);
+
+ wil_dbg_misc(wil, "disconnecting\n");
+
+ del_timer_sync(&vif->connect_timer);
+ _wil6210_disconnect(vif, bssid, reason_code);
+}
+
+/**
+ * wil6210_disconnect_complete - handle disconnect event
+ * @vif: virtual interface context
+ * @bssid: peer to disconnect, NULL to disconnect all
+ * @reason_code: Reason code for the Disassociation frame
+ *
+ * Release associated resources and indicate upper layers the
+ * connection is terminated.
+ */
+void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code)
{
struct wil6210_priv *wil = vif_to_wil(vif);
- wil_dbg_misc(wil, "disconnect\n");
+ wil_dbg_misc(wil, "got disconnect\n");
del_timer_sync(&vif->connect_timer);
- _wil6210_disconnect(vif, bssid, reason_code, from_event);
+ _wil6210_disconnect_complete(vif, bssid, reason_code);
}
void wil_priv_deinit(struct wil6210_priv *wil)
@@ -998,10 +1094,13 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
- /* Clear MAC link up */
- wil_s(wil, RGF_HP_CTRL, BIT(15));
- wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
- wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+ if (wil->hw_version < HW_VER_TALYN) {
+ /* Clear MAC link up */
+ wil_s(wil, RGF_HP_CTRL, BIT(15));
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0,
+ BIT_HPAL_PERST_FROM_PAD);
+ wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
+ }
wil_halt_cpu(wil);
@@ -1398,8 +1497,15 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
wil6210_clear_irq(wil);
/* CAF_ICR - clear and mask */
/* it is W1C, clear by writing back same value */
- wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
- wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+ if (wil->hw_version < HW_VER_TALYN_MB) {
+ wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
+ wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
+ } else {
+ wil_s(wil,
+ RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
+ wil_w(wil, RGF_CAF_ICR_TALYN_MB +
+ offsetof(struct RGF_ICR, IMV), ~0);
+ }
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
* In Talyn-MB host cannot access this register due to
* access control, hence PAL_UNIT_ICR is cleared by the FW
@@ -1511,7 +1617,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (vif) {
cancel_work_sync(&vif->disconnect_worker);
wil6210_disconnect(vif, NULL,
- WLAN_REASON_DEAUTH_LEAVING, false);
+ WLAN_REASON_DEAUTH_LEAVING);
}
}
wil_bcast_fini_all(wil);
@@ -1681,7 +1787,12 @@ int __wil_up(struct wil6210_priv *wil)
return rc;
/* Rx RING. After MAC and beacon */
- rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
+ if (rx_ring_order == 0)
+ rx_ring_order = wil->hw_version < HW_VER_TALYN_MB ?
+ WIL_RX_RING_SIZE_ORDER_DEFAULT :
+ WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT;
+
+ rc = wil->txrx_ops.rx_init(wil, rx_ring_order);
if (rc)
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 7a78a06bd356..b4e0eb1585b9 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -345,8 +345,7 @@ wil_vif_alloc(struct wil6210_priv *wil, const char *name,
ndev->ieee80211_ptr = wdev;
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GRO |
- NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_RXHASH;
+ NETIF_F_TSO | NETIF_F_TSO6;
ndev->features |= ndev->hw_features;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
@@ -513,7 +512,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
}
mutex_lock(&wil->mutex);
- wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
+ wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING);
mutex_unlock(&wil->mutex);
ndev = vif_to_ndev(vif);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index cc5f263cc965..3e1c831ab2fb 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -743,14 +743,6 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
stats = &wil->sta[cid].stats;
- if (ndev->features & NETIF_F_RXHASH)
- /* fake L4 to ensure it won't be re-calculated later
- * set hash to any non-zero value to activate rps
- * mechanism, core will be chosen according
- * to user-level rps configuration.
- */
- skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
-
skb_orphan(skb);
if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
@@ -880,7 +872,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
}
}
-static int wil_rx_init(struct wil6210_priv *wil, u16 size)
+static int wil_rx_init(struct wil6210_priv *wil, uint order)
{
struct wil_ring *vring = &wil->ring_rx;
int rc;
@@ -894,7 +886,7 @@ static int wil_rx_init(struct wil6210_priv *wil, u16 size)
wil_rx_buf_len_init(wil);
- vring->size = size;
+ vring->size = 1 << order;
vring->is_rx = true;
rc = wil_vring_alloc(wil, vring);
if (rc)
@@ -1403,6 +1395,8 @@ found:
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
wil_set_da_for_vring(wil, skb2, i);
wil_tx_ring(wil, vif, v2, skb2);
+ /* successful call to wil_tx_ring takes skb2 ref */
+ dev_kfree_skb_any(skb2);
} else {
wil_err(wil, "skb_copy failed\n");
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 2bbae75b9a84..05a8348bd7b9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -160,7 +160,7 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
struct wil_ring *ring, u32 i)
{
struct device *dev = wil_to_dev(wil);
- unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ unsigned int sz = wil->rx_buf_len;
dma_addr_t pa;
u16 buff_id;
struct list_head *active = &wil->rx_buff_mgmt.active;
@@ -234,9 +234,10 @@ static int wil_rx_refill_edma(struct wil6210_priv *wil)
struct wil_ring *ring = &wil->ring_rx;
u32 next_head;
int rc = 0;
- u32 swtail = *ring->edma_rx_swtail.va;
+ ring->swtail = *ring->edma_rx_swtail.va;
- for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
+ for (; next_head = wil_ring_next_head(ring),
+ (next_head != ring->swtail);
ring->swhead = next_head) {
rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
if (unlikely(rc)) {
@@ -264,43 +265,26 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
struct wil_ring *ring)
{
struct device *dev = wil_to_dev(wil);
- u32 next_tail;
- u32 swhead = (ring->swhead + 1) % ring->size;
+ struct list_head *active = &wil->rx_buff_mgmt.active;
dma_addr_t pa;
- u16 dmalen;
- for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
- ring->swtail = next_tail) {
- struct wil_rx_enhanced_desc dd, *d = &dd;
- struct wil_rx_enhanced_desc *_d =
- (struct wil_rx_enhanced_desc *)
- &ring->va[ring->swtail].rx.enhanced;
- struct sk_buff *skb;
- u16 buff_id;
+ while (!list_empty(active)) {
+ struct wil_rx_buff *rx_buff =
+ list_first_entry(active, struct wil_rx_buff, list);
+ struct sk_buff *skb = rx_buff->skb;
- *d = *_d;
-
- /* Extract the SKB from the rx_buff management array */
- buff_id = __le16_to_cpu(d->mac.buff_id);
- if (buff_id >= wil->rx_buff_mgmt.size) {
- wil_err(wil, "invalid buff_id %d\n", buff_id);
- continue;
- }
- skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
- wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
if (unlikely(!skb)) {
- wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
} else {
- pa = wil_rx_desc_get_addr_edma(&d->dma);
- dmalen = le16_to_cpu(d->dma.length);
- dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
-
+ rx_buff->skb = NULL;
+ memcpy(&pa, skb->cb, sizeof(pa));
+ dma_unmap_single(dev, pa, wil->rx_buf_len,
+ DMA_FROM_DEVICE);
kfree_skb(skb);
}
/* Move the buffer from the active to the free list */
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
- &wil->rx_buff_mgmt.free);
+ list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
}
}
@@ -357,8 +341,8 @@ static int wil_init_rx_sring(struct wil6210_priv *wil,
struct wil_status_ring *sring = &wil->srings[ring_id];
int rc;
- wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
- ring_id);
+ wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n",
+ status_ring_size, ring_id);
memset(&sring->rx_data, 0, sizeof(sring->rx_data));
@@ -602,20 +586,20 @@ static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
{
+ /* RX buffer size must be aligned to 4 bytes */
wil->rx_buf_len = rx_large_buf ?
WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
}
-static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
+static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order)
{
- u16 status_ring_size;
+ u16 status_ring_size, desc_ring_size = 1 << desc_ring_order;
struct wil_ring *ring = &wil->ring_rx;
int rc;
size_t elem_size = wil->use_compressed_rx_status ?
sizeof(struct wil_rx_status_compressed) :
sizeof(struct wil_rx_status_extended);
int i;
- u16 max_rx_pl_per_desc;
/* In SW reorder one must use extended status messages */
if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
@@ -623,7 +607,12 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
"compressed RX status cannot be used with SW reorder\n");
return -EINVAL;
}
-
+ if (wil->rx_status_ring_order <= desc_ring_order)
+ /* make sure sring is larger than desc ring */
+ wil->rx_status_ring_order = desc_ring_order + 1;
+ if (wil->rx_buff_id_count <= desc_ring_size)
+ /* make sure we will not run out of buff_ids */
+ wil->rx_buff_id_count = desc_ring_size + 512;
if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
@@ -636,8 +625,6 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
wil_rx_buf_len_init_edma(wil);
- max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
-
/* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
@@ -645,7 +632,7 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
wil->num_rx_status_rings);
- rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
+ rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len);
if (rc)
return rc;
@@ -834,23 +821,24 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
l2_rx_status);
/* Due to HW issue, KEY error will trigger a MIC error */
- if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
- wil_dbg_txrx(wil,
- "L2 MIC/KEY error, dropping packet\n");
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
+ wil_err_ratelimited(wil,
+ "L2 MIC/KEY error, dropping packet\n");
stats->rx_mic_error++;
}
- if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
- wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
+ wil_err_ratelimited(wil,
+ "L2 KEY error, dropping packet\n");
stats->rx_key_error++;
}
- if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
- wil_dbg_txrx(wil,
- "L2 REPLAY error, dropping packet\n");
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
+ wil_err_ratelimited(wil,
+ "L2 REPLAY error, dropping packet\n");
stats->rx_replay++;
}
- if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
- wil_dbg_txrx(wil,
- "L2 AMSDU error, dropping packet\n");
+ if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
+ wil_err_ratelimited(wil,
+ "L2 AMSDU error, dropping packet\n");
stats->rx_amsdu_error++;
}
return -EFAULT;
@@ -881,7 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
struct sk_buff *skb;
dma_addr_t pa;
struct wil_ring_rx_data *rxdata = &sring->rx_data;
- unsigned int sz = ALIGN(wil->rx_buf_len, 4);
+ unsigned int sz = wil->rx_buf_len;
struct wil_net_stats *stats = NULL;
u16 dmalen;
int cid;
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index a7fe9292fda3..343516a03a1e 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -23,9 +23,9 @@
#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
/* RX sring order should be bigger than RX ring order */
-#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
+#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12)
#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
-#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
+#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600)
#define WIL_DEFAULT_RX_STATUS_RING_ID 0
#define WIL_RX_DESC_RING_ID 0
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index abb82018d3b4..0f3be3ffc6a2 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -81,6 +81,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
#define WIL_TX_Q_LEN_DEFAULT (4000)
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
+#define WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT (11)
#define WIL_TX_RING_SIZE_ORDER_DEFAULT (12)
#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
@@ -319,6 +320,7 @@ struct RGF_ICR {
/* MAC timer, usec, for packet lifetime */
#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
+#define RGF_CAF_ICR_TALYN_MB (0x8893d4) /* struct RGF_ICR */
#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
#define RGF_CAF_OSC_CONTROL (0x88afa4)
#define BIT_CAF_OSC_XTAL_EN BIT(0)
@@ -613,7 +615,7 @@ struct wil_txrx_ops {
int cid, int tid);
irqreturn_t (*irq_tx)(int irq, void *cookie);
/* RX ops */
- int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
+ int (*rx_init)(struct wil6210_priv *wil, uint ring_order);
void (*rx_fini)(struct wil6210_priv *wil);
int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
u8 tid, u8 token, u16 status, bool amsdu,
@@ -848,6 +850,14 @@ struct wil6210_vif {
u8 hidden_ssid; /* relevant in AP mode */
u32 ap_isolate; /* no intra-BSS communication */
bool pbss;
+ int bi;
+ u8 *proberesp, *proberesp_ies, *assocresp_ies;
+ size_t proberesp_len, proberesp_ies_len, assocresp_ies_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u8 gtk_index;
+ u8 gtk[WMI_MAX_KEY_LEN];
+ size_t gtk_len;
int bcast_ring;
struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
int locally_generated_disc; /* relevant in STA mode */
@@ -1220,8 +1230,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
-int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
- u16 reason, bool full_disconnect, bool del_sta);
+int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
+ bool del_sta);
int wmi_addba(struct wil6210_priv *wil, u8 mid,
u8 ringid, u8 size, u16 timeout);
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason);
@@ -1276,6 +1286,7 @@ int wmi_stop_discovery(struct wil6210_vif *vif);
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie);
+void wil_cfg80211_ap_recovery(struct wil6210_priv *wil);
int wil_cfg80211_iface_combinations_from_fw(
struct wil6210_priv *wil,
const struct wil_fw_record_concurrency *conc);
@@ -1306,7 +1317,9 @@ void wil_abort_scan(struct wil6210_vif *vif, bool sync);
void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync);
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
- u16 reason_code, bool from_event);
+ u16 reason_code);
+void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
+ u16 reason_code);
void wil_probe_client_flush(struct wil6210_vif *vif);
void wil_probe_client_worker(struct work_struct *work);
void wil_disconnect_worker(struct work_struct *work);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 4859f0e43658..345f05969190 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1018,7 +1018,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
evt->cid, rc);
wmi_disconnect_sta(vif, wil->sta[evt->cid].addr,
- WLAN_REASON_UNSPECIFIED, false, false);
+ WLAN_REASON_UNSPECIFIED, false);
} else {
wil_info(wil, "successful connection to CID %d\n", evt->cid);
}
@@ -1112,7 +1112,24 @@ static void wmi_evt_disconnect(struct wil6210_vif *vif, int id,
}
mutex_lock(&wil->mutex);
- wil6210_disconnect(vif, evt->bssid, reason_code, true);
+ wil6210_disconnect_complete(vif, evt->bssid, reason_code);
+ if (disable_ap_sme) {
+ struct wireless_dev *wdev = vif_to_wdev(vif);
+ struct net_device *ndev = vif_to_ndev(vif);
+
+ /* disconnect event in disable_ap_sme mode means link loss */
+ switch (wdev->iftype) {
+ /* AP-like interface */
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ /* notify hostapd about link loss */
+ cfg80211_cqm_pktloss_notify(ndev, evt->bssid, 0,
+ GFP_KERNEL);
+ break;
+ default:
+ break;
+ }
+ }
mutex_unlock(&wil->mutex);
}
@@ -1637,7 +1654,7 @@ wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len)
return;
fail:
- wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
+ wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
static void
@@ -1766,7 +1783,7 @@ wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
return;
fail:
- wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
+ wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
}
/**
@@ -1949,16 +1966,17 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
{
int rc;
unsigned long remain;
+ ulong flags;
mutex_lock(&wil->wmi_mutex);
- spin_lock(&wil->wmi_ev_lock);
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
wil->reply_id = reply_id;
wil->reply_mid = mid;
wil->reply_buf = reply;
wil->reply_size = reply_size;
reinit_completion(&wil->wmi_call);
- spin_unlock(&wil->wmi_ev_lock);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
rc = __wmi_send(wil, cmdid, mid, buf, len);
if (rc)
@@ -1978,12 +1996,12 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
}
out:
- spin_lock(&wil->wmi_ev_lock);
+ spin_lock_irqsave(&wil->wmi_ev_lock, flags);
wil->reply_id = 0;
wil->reply_mid = U8_MAX;
wil->reply_buf = NULL;
wil->reply_size = 0;
- spin_unlock(&wil->wmi_ev_lock);
+ spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
mutex_unlock(&wil->wmi_mutex);
@@ -2560,12 +2578,11 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
return 0;
}
-int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
- u16 reason, bool full_disconnect, bool del_sta)
+int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
+ bool del_sta)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
- u16 reason_code;
struct wmi_disconnect_sta_cmd disc_sta_cmd = {
.disconnect_reason = cpu_to_le16(reason),
};
@@ -2598,21 +2615,8 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
wil_fw_error_recovery(wil);
return rc;
}
+ wil->sinfo_gen++;
- if (full_disconnect) {
- /* call event handler manually after processing wmi_call,
- * to avoid deadlock - disconnect event handler acquires
- * wil->mutex while it is already held here
- */
- reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
-
- wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
- reply.evt.bssid, reason_code,
- reply.evt.disconnect_reason);
-
- wil->sinfo_gen++;
- wil6210_disconnect(vif, reply.evt.bssid, reason_code, true);
- }
return 0;
}
@@ -3145,7 +3149,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
if (mid == MID_BROADCAST)
mid = 0;
- if (mid >= wil->max_vifs) {
+ if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) {
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
mid);
return;
diff --git a/drivers/net/wireless/broadcom/b43/Kconfig b/drivers/net/wireless/broadcom/b43/Kconfig
index fba856032ca5..3e4145747b20 100644
--- a/drivers/net/wireless/broadcom/b43/Kconfig
+++ b/drivers/net/wireless/broadcom/b43/Kconfig
@@ -4,6 +4,7 @@ config B43
select BCMA if B43_BCMA
select SSB if B43_SSB
select FW_LOADER
+ select CORDIC
---help---
b43 is a driver for the Broadcom 43xx series wireless devices.
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index 85f2ca989565..98c4fa5b919c 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -604,50 +604,3 @@ void b43_phy_force_clock(struct b43_wldev *dev, bool force)
#endif
}
}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
-struct b43_c32 b43_cordic(int theta)
-{
- static const u32 arctg[] = {
- 2949120, 1740967, 919879, 466945, 234379, 117304,
- 58666, 29335, 14668, 7334, 3667, 1833,
- 917, 458, 229, 115, 57, 29,
- };
- u8 i;
- s32 tmp;
- s8 signx = 1;
- u32 angle = 0;
- struct b43_c32 ret = { .i = 39797, .q = 0, };
-
- while (theta > (180 << 16))
- theta -= (360 << 16);
- while (theta < -(180 << 16))
- theta += (360 << 16);
-
- if (theta > (90 << 16)) {
- theta -= (180 << 16);
- signx = -1;
- } else if (theta < -(90 << 16)) {
- theta += (180 << 16);
- signx = -1;
- }
-
- for (i = 0; i <= 17; i++) {
- if (theta > angle) {
- tmp = ret.i - (ret.q >> i);
- ret.q += ret.i >> i;
- ret.i = tmp;
- angle += arctg[i];
- } else {
- tmp = ret.i + (ret.q >> i);
- ret.q -= ret.i >> i;
- ret.i = tmp;
- angle -= arctg[i];
- }
- }
-
- ret.i *= signx;
- ret.q *= signx;
-
- return ret;
-}
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h
index 57a1ad8afa08..4213caca9117 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.h
+++ b/drivers/net/wireless/broadcom/b43/phy_common.h
@@ -7,13 +7,6 @@
struct b43_wldev;
-/* Complex number using 2 32-bit signed integers */
-struct b43_c32 { s32 i, q; };
-
-#define CORDIC_CONVERT(value) (((value) >= 0) ? \
- ((((value) >> 15) + 1) >> 1) : \
- -((((-(value)) >> 15) + 1) >> 1))
-
/* PHY register routing bits */
#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
#define B43_PHYROUTE_BASE 0x0000 /* Base registers */
@@ -450,6 +443,4 @@ bool b43_is_40mhz(struct b43_wldev *dev);
void b43_phy_force_clock(struct b43_wldev *dev, bool force);
-struct b43_c32 b43_cordic(int theta);
-
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 6922cbb99a04..46408a560814 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -23,6 +23,7 @@
*/
+#include <linux/cordic.h>
#include <linux/slab.h>
#include "b43.h"
@@ -1780,9 +1781,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 buf[64];
- int i, samples = 0, angle = 0;
+ int i, samples = 0, theta = 0;
int rotation = (((36 * freq) / 20) << 16) / 100;
- struct b43_c32 sample;
+ struct cordic_iq sample;
lpphy->tx_tone_freq = freq;
@@ -1798,10 +1799,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
}
for (i = 0; i < samples; i++) {
- sample = b43_cordic(angle);
- angle += rotation;
- buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
- buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
+ sample = cordic_calc_iq(CORDIC_FIXED(theta));
+ theta += rotation;
+ buf[i] = CORDIC_FLOAT((sample.i * max) & 0xFF) << 8;
+ buf[i] |= CORDIC_FLOAT((sample.q * max) & 0xFF);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 44ab080d6518..77d7cd5563c4 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -23,6 +23,7 @@
*/
+#include <linux/cordic.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -1513,7 +1514,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
static int b43_nphy_load_samples(struct b43_wldev *dev,
- struct b43_c32 *samples, u16 len) {
+ struct cordic_iq *samples, u16 len) {
struct b43_phy_n *nphy = dev->phy.n;
u16 i;
u32 *data;
@@ -1544,7 +1545,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
{
int i;
u16 bw, len, rot, angle;
- struct b43_c32 *samples;
+ struct cordic_iq *samples;
bw = b43_is_40mhz(dev) ? 40 : 20;
len = bw << 3;
@@ -1561,7 +1562,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
len = bw << 1;
}
- samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL);
+ samples = kcalloc(len, sizeof(struct cordic_iq), GFP_KERNEL);
if (!samples) {
b43err(dev->wl, "allocation for samples generation failed\n");
return 0;
@@ -1570,10 +1571,10 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
angle = 0;
for (i = 0; i < len; i++) {
- samples[i] = b43_cordic(angle);
+ samples[i] = cordic_calc_iq(CORDIC_FIXED(angle));
angle += rot;
- samples[i].q = CORDIC_CONVERT(samples[i].q * max);
- samples[i].i = CORDIC_CONVERT(samples[i].i * max);
+ samples[i].q = CORDIC_FLOAT(samples[i].q * max);
+ samples[i].i = CORDIC_FLOAT(samples[i].i * max);
}
i = b43_nphy_load_samples(dev, samples, len);
@@ -5894,7 +5895,6 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan;
struct b43_ppr *ppr = &nphy->tx_pwr_max_ppr;
u8 max; /* qdBm */
- bool tx_pwr_state;
if (nphy->tx_pwr_last_recalc_freq == channel->center_freq &&
nphy->tx_pwr_last_recalc_limit == phy->desired_txpower)
@@ -5930,7 +5930,6 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
b43_ppr_apply_min(dev, ppr, INT_TO_Q52(8));
/* Apply */
- tx_pwr_state = nphy->txpwrctrl;
b43_mac_suspend(dev);
b43_nphy_tx_power_ctl_setup(dev);
if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) {
@@ -6043,7 +6042,6 @@ static int b43_phy_initn(struct b43_wldev *dev)
u8 tx_pwr_state;
struct nphy_txgains target;
u16 tmp;
- enum nl80211_band tmp2;
bool do_rssi_cal;
u16 clip[2];
@@ -6137,7 +6135,6 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
}
- tmp2 = b43_current_band(dev->wl);
if (b43_nphy_ipa(dev)) {
b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index 1f5a9b948abf..22fd95a736a8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -54,3 +54,5 @@ brcmfmac-$(CONFIG_BRCM_TRACING) += \
tracepoint.o
brcmfmac-$(CONFIG_OF) += \
of.o
+brcmfmac-$(CONFIG_DMI) += \
+ dmi.o
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 3e37c8cf82c6..d64bf233b12c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
return err;
}
+static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
+ struct mmc_command *mc, int sg_cnt, int req_sz,
+ int func_blk_sz, u32 *addr,
+ struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, int write)
+{
+ int ret;
+
+ md->sg_len = sg_cnt;
+ md->blocks = req_sz / func_blk_sz;
+ mc->arg |= (*addr & 0x1FFFF) << 9; /* address */
+ mc->arg |= md->blocks & 0x1FF; /* block count */
+ /* incrementing addr for function 1 */
+ if (func->num == 1)
+ *addr += req_sz;
+
+ mmc_set_data_timeout(md, func->card);
+ mmc_wait_for_req(func->card->host, mr);
+
+ ret = mc->error ? mc->error : md->error;
+ if (ret == -ENOMEDIUM) {
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+ } else if (ret != 0) {
+ brcmf_err("CMD53 sg block %s failed %d\n",
+ write ? "write" : "read", ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
/**
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
@@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
- unsigned int max_req_sz, orig_offset, dst_offset;
- unsigned short max_seg_cnt, seg_sz;
+ unsigned int max_req_sz, src_offset, dst_offset;
unsigned char *pkt_data, *orig_data, *dst_data;
- struct sk_buff *pkt_next = NULL, *local_pkt_next;
struct sk_buff_head local_list, *target_list;
+ struct sk_buff *pkt_next = NULL, *src;
+ unsigned short max_seg_cnt;
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
@@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
max_req_sz = sdiodev->max_request_size;
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
target_list->qlen);
- seg_sz = target_list->qlen;
- pkt_offset = 0;
- pkt_next = target_list->next;
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
@@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
- while (seg_sz) {
- req_sz = 0;
- sg_cnt = 0;
- sgl = sdiodev->sgtable.sgl;
- /* prep sg table */
- while (pkt_next != (struct sk_buff *)target_list) {
+ req_sz = 0;
+ sg_cnt = 0;
+ sgl = sdiodev->sgtable.sgl;
+ skb_queue_walk(target_list, pkt_next) {
+ pkt_offset = 0;
+ while (pkt_offset < pkt_next->len) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
if (sg_data_sz > sdiodev->max_segment_size)
@@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
sg_data_sz = max_req_sz - req_sz;
sg_set_buf(sgl, pkt_data, sg_data_sz);
-
sg_cnt++;
+
sgl = sg_next(sgl);
req_sz += sg_data_sz;
pkt_offset += sg_data_sz;
- if (pkt_offset == pkt_next->len) {
- pkt_offset = 0;
- pkt_next = pkt_next->next;
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
+ ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+ sg_cnt, req_sz, func_blk_sz,
+ &addr, sdiodev, func, write);
+ if (ret)
+ goto exit_queue_walk;
+ req_sz = 0;
+ sg_cnt = 0;
+ sgl = sdiodev->sgtable.sgl;
}
-
- if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
- break;
- }
- seg_sz -= sg_cnt;
-
- if (req_sz % func_blk_sz != 0) {
- brcmf_err("sg request length %u is not %u aligned\n",
- req_sz, func_blk_sz);
- ret = -ENOTBLK;
- goto exit;
- }
-
- mmc_dat.sg_len = sg_cnt;
- mmc_dat.blocks = req_sz / func_blk_sz;
- mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
- mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
- /* incrementing addr for function 1 */
- if (func->num == 1)
- addr += req_sz;
-
- mmc_set_data_timeout(&mmc_dat, func->card);
- mmc_wait_for_req(func->card->host, &mmc_req);
-
- ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
- if (ret == -ENOMEDIUM) {
- brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
- break;
- } else if (ret != 0) {
- brcmf_err("CMD53 sg block %s failed %d\n",
- write ? "write" : "read", ret);
- ret = -EIO;
- break;
}
}
-
+ if (sg_cnt)
+ ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+ sg_cnt, req_sz, func_blk_sz,
+ &addr, sdiodev, func, write);
+exit_queue_walk:
if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
- local_pkt_next = local_list.next;
- orig_offset = 0;
+ src = __skb_peek(&local_list);
+ src_offset = 0;
skb_queue_walk(pktlist, pkt_next) {
dst_offset = 0;
- do {
- req_sz = local_pkt_next->len - orig_offset;
- req_sz = min_t(uint, pkt_next->len - dst_offset,
- req_sz);
- orig_data = local_pkt_next->data + orig_offset;
+
+ /* This is safe because we must have enough SKB data
+ * in the local list to cover everything in pktlist.
+ */
+ while (1) {
+ req_sz = pkt_next->len - dst_offset;
+ if (req_sz > src->len - src_offset)
+ req_sz = src->len - src_offset;
+
+ orig_data = src->data + src_offset;
dst_data = pkt_next->data + dst_offset;
memcpy(dst_data, orig_data, req_sz);
- orig_offset += req_sz;
- dst_offset += req_sz;
- if (orig_offset == local_pkt_next->len) {
- orig_offset = 0;
- local_pkt_next = local_pkt_next->next;
+
+ src_offset += req_sz;
+ if (src_offset == src->len) {
+ src_offset = 0;
+ src = skb_peek_next(src, &local_list);
}
+ dst_offset += req_sz;
if (dst_offset == pkt_next->len)
break;
- } while (!skb_queue_empty(&local_list));
+ }
}
}
@@ -972,6 +983,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 7f0a5bade70a..35301237d435 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5196,10 +5196,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.del_pmk = brcmf_cfg80211_del_pmk,
};
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
{
- return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
+ struct cfg80211_ops *ops;
+
+ ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
GFP_KERNEL);
+
+ if (ops && settings->roamoff)
+ ops->update_connect_params = NULL;
+
+ return ops;
}
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
@@ -6309,6 +6316,16 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
+ [NL80211_IFTYPE_AP] = {
+ .tx = 0xffff,
+ .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+ BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+ BIT(IEEE80211_STYPE_ACTION >> 4)
}
};
@@ -6639,6 +6656,12 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
brcmf_configure_arp_nd_offload(ifp, true);
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_FAKEFRAG, 1);
+ if (err) {
+ brcmf_err("failed to set frameburst mode\n");
+ goto default_conf_out;
+ }
+
cfg->dongle_up = true;
default_conf_out:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index a4aec0004e4f..9a6287f084a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
s32 brcmf_cfg80211_up(struct net_device *ndev);
s32 brcmf_cfg80211_down(struct net_device *ndev);
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 927d62b3d41b..22534bf2a90c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -165,6 +165,7 @@ struct sbconfig {
#define SRCI_LSS_MASK 0x00f00000
#define SRCI_LSS_SHIFT 20
#define SRCI_SRNB_MASK 0xf0
+#define SRCI_SRNB_MASK_EXT 0x100
#define SRCI_SRNB_SHIFT 4
#define SRCI_SRBSZ_MASK 0xf
#define SRCI_SRBSZ_SHIFT 0
@@ -592,7 +593,13 @@ static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize,
if (lss != 0)
*ramsize += (1 << ((lss - 1) + SR_BSZ_BASE));
} else {
- nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ /* length of SRAM Banks increased for corerev greater than 23 */
+ if (sr->pub.rev >= 23) {
+ nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT))
+ >> SRCI_SRNB_SHIFT;
+ } else {
+ nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT;
+ }
for (i = 0; i < nb; i++) {
retent = brcmf_chip_socram_banksize(sr, i, &banksize);
*ramsize += banksize;
@@ -779,7 +786,7 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
u32 *regbase, u32 *wrapbase)
{
u8 desc;
- u32 val;
+ u32 val, szdesc;
u8 mpnum = 0;
u8 stype, sztype, wraptype;
@@ -825,14 +832,15 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
/* next size descriptor can be skipped */
if (sztype == DMP_SLAVE_SIZE_DESC) {
- val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ szdesc = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
/* skip upper size descriptor if present */
- if (val & DMP_DESC_ADDRSIZE_GT32)
+ if (szdesc & DMP_DESC_ADDRSIZE_GT32)
brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
}
- /* only look for 4K register regions */
- if (sztype != DMP_SLAVE_SIZE_4K)
+ /* look for 4K or 8K register regions */
+ if (sztype != DMP_SLAVE_SIZE_4K &&
+ sztype != DMP_SLAVE_SIZE_8K)
continue;
stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
@@ -889,7 +897,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
/* need core with ports */
if (nmw + nsw == 0 &&
- id != BCMA_CORE_PMU)
+ id != BCMA_CORE_PMU &&
+ id != BCMA_CORE_GCI)
continue;
/* try to obtain register address info */
@@ -1356,6 +1365,16 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, sr_control1);
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
+ case CY_CC_4373_CHIP_ID:
+ /* explicitly check SR engine enable bit */
+ addr = CORE_CC_REG(base, sr_control0);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & CC_SR_CTL0_ENABLE_MASK) != 0;
+ case CY_CC_43012_CHIP_ID:
+ addr = CORE_CC_REG(pmu->base, retention_ctl);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
default:
addr = CORE_CC_REG(pmu->base, pmucapabilities_ext);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 94044a7a6021..1f1e95a15a17 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -214,7 +214,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
sizeof(ifp->mac_addr));
if (err < 0) {
- brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
+ brcmf_err("Retrieving cur_etheraddr failed, %d\n", err);
goto done;
}
memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
@@ -269,7 +269,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
strcpy(buf, "ver");
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
- brcmf_err("Retreiving version information failed, %d\n",
+ brcmf_err("Retrieving version information failed, %d\n",
err);
goto done;
}
@@ -448,7 +448,8 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
}
}
if (!found) {
- /* No platform data for this device, try OF (Open Firwmare) */
+ /* No platform data for this device, try OF and DMI data */
+ brcmf_dmi_probe(settings, chip, chiprev);
brcmf_of_probe(dev, bus_type, settings);
}
return settings;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
index a34642cb4d2f..4ce56be90b74 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
@@ -59,6 +59,7 @@ struct brcmf_mp_device {
bool iapp;
bool ignore_probe_fail;
struct brcmfmac_pd_cc *country_codes;
+ const char *board_type;
union {
struct brcmfmac_sdio_pd sdio;
} bus;
@@ -74,4 +75,11 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
/* Sets dongle media info (drv_version, mac address). */
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
+#ifdef CONFIG_DMI
+void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
+#else
+static inline void
+brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
+#endif
+
#endif /* BRCMFMAC_COMMON_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b1f702faff4f..860a4372cb56 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
brcmf_dbg(TRACE, "Enter\n");
- ops = brcmf_cfg80211_get_ops();
+ ops = brcmf_cfg80211_get_ops(settings);
if (!ops)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
new file mode 100644
index 000000000000..51d76ac45075
--- /dev/null
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2018 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+#include "core.h"
+#include "common.h"
+#include "brcm_hw_ids.h"
+
+/* The DMI data never changes so we can use a static buf for this */
+static char dmi_board_type[128];
+
+struct brcmf_dmi_data {
+ u32 chip;
+ u32 chiprev;
+ const char *board_type;
+};
+
+/* NOTE: Please keep all entries sorted alphabetically */
+
+static const struct brcmf_dmi_data gpd_win_pocket_data = {
+ BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
+};
+
+static const struct brcmf_dmi_data jumper_ezpad_mini3_data = {
+ BRCM_CC_43430_CHIP_ID, 0, "jumper-ezpad-mini3"
+};
+
+static const struct brcmf_dmi_data meegopad_t08_data = {
+ BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08"
+};
+
+static const struct dmi_system_id dmi_platform_data[] = {
+ {
+ /* Match for the GPDwin which unfortunately uses somewhat
+ * generic dmi strings, which is why we test for 4 strings.
+ * Comparing against 23 other byt/cht boards, board_vendor
+ * and board_name are unique to the GPDwin, where as only one
+ * other board has the same board_serial and 3 others have
+ * the same default product_name. Also the GPDwin is the
+ * only device to have both board_ and product_name not set.
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_MATCH(DMI_BOARD_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ },
+ .driver_data = (void *)&gpd_win_pocket_data,
+ },
+ {
+ /* Jumper EZpad mini3 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
+ /* jumperx.T87.KFBNEEA02 with the version-nr dropped */
+ DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
+ },
+ .driver_data = (void *)&jumper_ezpad_mini3_data,
+ },
+ {
+ /* Meegopad T08 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+ DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
+ DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
+ },
+ .driver_data = (void *)&meegopad_t08_data,
+ },
+ {}
+};
+
+void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev)
+{
+ const struct dmi_system_id *match;
+ const struct brcmf_dmi_data *data;
+ const char *sys_vendor;
+ const char *product_name;
+
+ /* Some models have DMI strings which are too generic, e.g.
+ * "Default string", we use a quirk table for these.
+ */
+ for (match = dmi_first_match(dmi_platform_data);
+ match;
+ match = dmi_first_match(match + 1)) {
+ data = match->driver_data;
+
+ if (data->chip == chip && data->chiprev == chiprev) {
+ settings->board_type = data->board_type;
+ return;
+ }
+ }
+
+ /* Not found in the quirk-table, use sys_vendor-product_name */
+ sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (sys_vendor && product_name) {
+ snprintf(dmi_board_type, sizeof(dmi_board_type), "%s-%s",
+ sys_vendor, product_name);
+ settings->board_type = dmi_board_type;
+ }
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9095b830ae4d..14b948917a1a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -14,6 +14,7 @@
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/efi.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
@@ -445,6 +446,75 @@ struct brcmf_fw {
static void brcmf_fw_request_done(const struct firmware *fw, void *ctx);
+#ifdef CONFIG_EFI
+/* In some cases the EFI-var stored nvram contains "ccode=ALL" or "ccode=XV"
+ * to specify "worldwide" compatible settings, but these 2 ccode-s do not work
+ * properly. "ccode=ALL" causes channels 12 and 13 to not be available,
+ * "ccode=XV" causes all 5GHz channels to not be available. So we replace both
+ * with "ccode=X2" which allows channels 12+13 and 5Ghz channels in
+ * no-Initiate-Radiation mode. This means that we will never send on these
+ * channels without first having received valid wifi traffic on the channel.
+ */
+static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len)
+{
+ char *ccode;
+
+ ccode = strnstr((char *)data, "ccode=ALL", data_len);
+ if (!ccode)
+ ccode = strnstr((char *)data, "ccode=XV\r", data_len);
+ if (!ccode)
+ return;
+
+ ccode[6] = 'X';
+ ccode[7] = '2';
+ ccode[8] = '\r';
+}
+
+static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret)
+{
+ const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 };
+ struct efivar_entry *nvram_efivar;
+ unsigned long data_len = 0;
+ u8 *data = NULL;
+ int err;
+
+ nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL);
+ if (!nvram_efivar)
+ return NULL;
+
+ memcpy(&nvram_efivar->var.VariableName, name, sizeof(name));
+ nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61,
+ 0xb5, 0x1f, 0x43, 0x26,
+ 0x81, 0x23, 0xd1, 0x13);
+
+ err = efivar_entry_size(nvram_efivar, &data_len);
+ if (err)
+ goto fail;
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data)
+ goto fail;
+
+ err = efivar_entry_get(nvram_efivar, NULL, &data_len, data);
+ if (err)
+ goto fail;
+
+ brcmf_fw_fix_efi_nvram_ccode(data, data_len);
+ brcmf_info("Using nvram EFI variable\n");
+
+ kfree(nvram_efivar);
+ *data_len_ret = data_len;
+ return data;
+
+fail:
+ kfree(data);
+ kfree(nvram_efivar);
+ return NULL;
+}
+#else
+static inline u8 *brcmf_fw_nvram_from_efi(size_t *data_len) { return NULL; }
+#endif
+
static void brcmf_fw_free_request(struct brcmf_fw_request *req)
{
struct brcmf_fw_item *item;
@@ -463,11 +533,12 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
{
struct brcmf_fw *fwctx = ctx;
struct brcmf_fw_item *cur;
+ bool free_bcm47xx_nvram = false;
+ bool kfree_nvram = false;
u32 nvram_length = 0;
void *nvram = NULL;
u8 *data = NULL;
size_t data_len;
- bool raw_nvram;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
@@ -476,12 +547,13 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
if (fw && fw->data) {
data = (u8 *)fw->data;
data_len = fw->size;
- raw_nvram = false;
} else {
- data = bcm47xx_nvram_get_contents(&data_len);
- if (!data && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
+ if ((data = bcm47xx_nvram_get_contents(&data_len)))
+ free_bcm47xx_nvram = true;
+ else if ((data = brcmf_fw_nvram_from_efi(&data_len)))
+ kfree_nvram = true;
+ else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
goto fail;
- raw_nvram = true;
}
if (data)
@@ -489,8 +561,11 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
fwctx->req->domain_nr,
fwctx->req->bus_nr);
- if (raw_nvram)
+ if (free_bcm47xx_nvram)
bcm47xx_nvram_release_contents(data);
+ if (kfree_nvram)
+ kfree(data);
+
release_firmware(fw);
if (!nvram && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
goto fail;
@@ -504,90 +579,75 @@ fail:
return -ENOENT;
}
-static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async)
+static int brcmf_fw_complete_request(const struct firmware *fw,
+ struct brcmf_fw *fwctx)
{
- struct brcmf_fw_item *cur;
- const struct firmware *fw = NULL;
- int ret;
-
- cur = &fwctx->req->items[fwctx->curpos];
-
- brcmf_dbg(TRACE, "%srequest for %s\n", async ? "async " : "",
- cur->path);
-
- if (async)
- ret = request_firmware_nowait(THIS_MODULE, true, cur->path,
- fwctx->dev, GFP_KERNEL, fwctx,
- brcmf_fw_request_done);
- else
- ret = request_firmware(&fw, cur->path, fwctx->dev);
-
- if (ret < 0) {
- brcmf_fw_request_done(NULL, fwctx);
- } else if (!async && fw) {
- brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path,
- fw ? "" : "not ");
- if (cur->type == BRCMF_FW_TYPE_BINARY)
- cur->binary = fw;
- else if (cur->type == BRCMF_FW_TYPE_NVRAM)
- brcmf_fw_request_nvram_done(fw, fwctx);
- else
- release_firmware(fw);
-
- return -EAGAIN;
- }
- return 0;
-}
-
-static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
-{
- struct brcmf_fw *fwctx = ctx;
- struct brcmf_fw_item *cur;
+ struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
int ret = 0;
- cur = &fwctx->req->items[fwctx->curpos];
-
- brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path,
- fw ? "" : "not ");
-
- if (!fw)
- ret = -ENOENT;
+ brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, fw ? "" : "not ");
switch (cur->type) {
case BRCMF_FW_TYPE_NVRAM:
ret = brcmf_fw_request_nvram_done(fw, fwctx);
break;
case BRCMF_FW_TYPE_BINARY:
- cur->binary = fw;
+ if (fw)
+ cur->binary = fw;
+ else
+ ret = -ENOENT;
break;
default:
/* something fishy here so bail out early */
brcmf_err("unknown fw type: %d\n", cur->type);
release_firmware(fw);
ret = -EINVAL;
- goto fail;
}
- if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
- goto fail;
+ return (cur->flags & BRCMF_FW_REQF_OPTIONAL) ? 0 : ret;
+}
- do {
- if (++fwctx->curpos == fwctx->req->n_items) {
- ret = 0;
- goto done;
- }
+static int brcmf_fw_request_firmware(const struct firmware **fw,
+ struct brcmf_fw *fwctx)
+{
+ struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
+ int ret;
- ret = brcmf_fw_request_next_item(fwctx, false);
- } while (ret == -EAGAIN);
+ /* nvram files are board-specific, first try a board-specific path */
+ if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
+ char alt_path[BRCMF_FW_NAME_LEN];
- return;
+ strlcpy(alt_path, cur->path, BRCMF_FW_NAME_LEN);
+ /* strip .txt at the end */
+ alt_path[strlen(alt_path) - 4] = 0;
+ strlcat(alt_path, ".", BRCMF_FW_NAME_LEN);
+ strlcat(alt_path, fwctx->req->board_type, BRCMF_FW_NAME_LEN);
+ strlcat(alt_path, ".txt", BRCMF_FW_NAME_LEN);
-fail:
- brcmf_dbg(TRACE, "failed err=%d: dev=%s, fw=%s\n", ret,
- dev_name(fwctx->dev), cur->path);
- brcmf_fw_free_request(fwctx->req);
- fwctx->req = NULL;
-done:
+ ret = request_firmware(fw, alt_path, fwctx->dev);
+ if (ret == 0)
+ return ret;
+ }
+
+ return request_firmware(fw, cur->path, fwctx->dev);
+}
+
+static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
+{
+ struct brcmf_fw *fwctx = ctx;
+ int ret;
+
+ ret = brcmf_fw_complete_request(fw, fwctx);
+
+ while (ret == 0 && ++fwctx->curpos < fwctx->req->n_items) {
+ brcmf_fw_request_firmware(&fw, fwctx);
+ ret = brcmf_fw_complete_request(fw, ctx);
+ }
+
+ if (ret) {
+ brcmf_fw_free_request(fwctx->req);
+ fwctx->req = NULL;
+ }
fwctx->done(fwctx->dev, ret, fwctx->req);
kfree(fwctx);
}
@@ -611,7 +671,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
void (*fw_cb)(struct device *dev, int err,
struct brcmf_fw_request *req))
{
+ struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
+ int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
if (!fw_cb)
@@ -628,7 +690,12 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->req = req;
fwctx->done = fw_cb;
- brcmf_fw_request_next_item(fwctx, true);
+ ret = request_firmware_nowait(THIS_MODULE, true, first->path,
+ fwctx->dev, GFP_KERNEL, fwctx,
+ brcmf_fw_request_done);
+ if (ret < 0)
+ brcmf_fw_request_done(NULL, fwctx);
+
return 0;
}
@@ -641,8 +708,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
struct brcmf_fw_request *fwreq;
char chipname[12];
const char *mp_path;
+ size_t mp_path_len;
u32 i, j;
- char end;
+ char end = '\0';
size_t reqsz;
for (i = 0; i < table_size; i++) {
@@ -667,7 +735,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
mapping_table[i].fw_base, chipname);
mp_path = brcmf_mp_global.firmware_path;
- end = mp_path[strlen(mp_path) - 1];
+ mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
+ if (mp_path_len)
+ end = mp_path[mp_path_len - 1];
+
fwreq->n_items = n_fwnames;
for (j = 0; j < n_fwnames; j++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 2893e56910f0..a0834be8864e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -70,6 +70,7 @@ struct brcmf_fw_request {
u16 domain_nr;
u16 bus_nr;
u32 n_items;
+ const char *board_type;
struct brcmf_fw_item items[0];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 63b1287e2e6d..b6b183b18413 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -80,6 +80,7 @@
#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
#define BRCMF_C_SET_ASSOC_PREFER 205
#define BRCMF_C_GET_VALID_CHANNELS 217
+#define BRCMF_C_SET_FAKEFRAG 219
#define BRCMF_C_GET_KEY_PRIMARY 235
#define BRCMF_C_SET_KEY_PRIMARY 236
#define BRCMF_C_SET_SCAN_PASSIVE_TIME 258
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index d5bb81e88762..39ac1bbb6cc0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -176,6 +176,8 @@
#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8
+#define BRCMF_HE_CAP_MCS_MAP_NSS_MAX 8
+
/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
* ioctl. It is relatively small because firmware has small maximum size input
* playload restriction for ioctls.
@@ -601,13 +603,37 @@ struct brcmf_sta_info_le {
__le32 rx_pkts_retried; /* # rx with retry bit set */
__le32 tx_rate_fallback; /* lowest fallback TX rate */
- /* Fields valid for ver >= 5 */
- struct {
- __le32 count; /* # rates in this set */
- u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
- u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
- __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
- } rateset_adv;
+ union {
+ struct {
+ struct {
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
+ __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+ } rateset_adv;
+ } v5;
+
+ struct {
+ __le32 rx_dur_total; /* total user RX duration (estimated) */
+ __le16 chanspec; /** chanspec this sta is on */
+ __le16 pad_1;
+ struct {
+ __le16 version; /* version */
+ __le16 len; /* length */
+ __le32 count; /* # rates in this set */
+ u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
+ u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
+ __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
+ __le16 he_mcs[BRCMF_HE_CAP_MCS_MAP_NSS_MAX]; /* supported he mcs index bit map per nss */
+ } rateset_adv; /* rateset along with mcs index bitmap */
+ __le16 wpauth; /* authentication type */
+ u8 algo; /* crypto algorithm */
+ u8 pad_2;
+ __le32 tx_rspec; /* Rate of last successful tx frame */
+ __le32 rx_rspec; /* Rate of last successful rx frame */
+ __le32 wnm_cap; /* wnm capabilities */
+ } v7;
+ };
};
struct brcmf_chanspec_list {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index f3cbf78c8899..02759ebd207c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -511,6 +511,7 @@ struct brcmf_fws_info {
struct work_struct fws_dequeue_work;
u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT];
int fifo_credit[BRCMF_FWS_FIFO_COUNT];
+ int init_fifo_credit[BRCMF_FWS_FIFO_COUNT];
int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1];
int deq_node_pos[BRCMF_FWS_FIFO_COUNT];
u32 fifo_credit_map;
@@ -1237,6 +1238,9 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
}
fws->fifo_credit[fifo] += credits;
+ if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo])
+ fws->fifo_credit[fifo] = fws->init_fifo_credit[fifo];
+
}
static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
@@ -1451,9 +1455,10 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo,
static int
brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
- u32 genbit, u16 seq)
+ u32 genbit, u16 seq, u8 compcnt)
{
u32 fifo;
+ u8 cnt = 0;
int ret;
bool remove_from_hanger = true;
struct sk_buff *skb;
@@ -1464,60 +1469,71 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
brcmf_dbg(DATA, "flags %d\n", flags);
if (flags == BRCMF_FWS_TXSTATUS_DISCARD)
- fws->stats.txs_discard++;
+ fws->stats.txs_discard += compcnt;
else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) {
- fws->stats.txs_supp_core++;
+ fws->stats.txs_supp_core += compcnt;
remove_from_hanger = false;
} else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) {
- fws->stats.txs_supp_ps++;
+ fws->stats.txs_supp_ps += compcnt;
remove_from_hanger = false;
} else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
- fws->stats.txs_tossed++;
+ fws->stats.txs_tossed += compcnt;
else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
- fws->stats.txs_host_tossed++;
+ fws->stats.txs_host_tossed += compcnt;
else
brcmf_err("unexpected txstatus\n");
- ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
- remove_from_hanger);
- if (ret != 0) {
- brcmf_err("no packet in hanger slot: hslot=%d\n", hslot);
- return ret;
- }
+ while (cnt < compcnt) {
+ ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+ remove_from_hanger);
+ if (ret != 0) {
+ brcmf_err("no packet in hanger slot: hslot=%d\n",
+ hslot);
+ goto cont;
+ }
- skcb = brcmf_skbcb(skb);
- entry = skcb->mac;
- if (WARN_ON(!entry)) {
- brcmu_pkt_buf_free_skb(skb);
- return -EINVAL;
- }
- entry->transit_count--;
- if (entry->suppressed && entry->suppr_transit_count)
- entry->suppr_transit_count--;
+ skcb = brcmf_skbcb(skb);
+ entry = skcb->mac;
+ if (WARN_ON(!entry)) {
+ brcmu_pkt_buf_free_skb(skb);
+ goto cont;
+ }
+ entry->transit_count--;
+ if (entry->suppressed && entry->suppr_transit_count)
+ entry->suppr_transit_count--;
- brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags,
- skcb->htod, seq);
+ brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name,
+ flags, skcb->htod, seq);
- /* pick up the implicit credit from this packet */
- fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
- if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) ||
- (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
- (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) {
- brcmf_fws_return_credits(fws, fifo, 1);
- brcmf_fws_schedule_deq(fws);
- }
- brcmf_fws_macdesc_return_req_credit(skb);
+ /* pick up the implicit credit from this packet */
+ fifo = brcmf_skb_htod_tag_get_field(skb, FIFO);
+ if (fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT ||
+ (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) ||
+ flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) {
+ brcmf_fws_return_credits(fws, fifo, 1);
+ brcmf_fws_schedule_deq(fws);
+ }
+ brcmf_fws_macdesc_return_req_credit(skb);
- ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp);
- if (ret) {
- brcmu_pkt_buf_free_skb(skb);
- return -EINVAL;
+ ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp);
+ if (ret) {
+ brcmu_pkt_buf_free_skb(skb);
+ goto cont;
+ }
+ if (!remove_from_hanger)
+ ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb,
+ genbit, seq);
+ if (remove_from_hanger || ret)
+ brcmf_txfinalize(ifp, skb, true);
+
+cont:
+ hslot = (hslot + 1) & (BRCMF_FWS_TXSTAT_HSLOT_MASK >>
+ BRCMF_FWS_TXSTAT_HSLOT_SHIFT);
+ if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode))
+ seq = (seq + 1) & BRCMF_SKB_HTOD_SEQ_NR_MASK;
+
+ cnt++;
}
- if (!remove_from_hanger)
- ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb,
- genbit, seq);
- if (remove_from_hanger || ret)
- brcmf_txfinalize(ifp, skb, true);
return 0;
}
@@ -1543,7 +1559,8 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
return BRCMF_FWS_RET_OK_SCHEDULE;
}
-static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
+static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 type,
+ u8 *data)
{
__le32 status_le;
__le16 seq_le;
@@ -1552,23 +1569,31 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
u32 genbit;
u8 flags;
u16 seq;
+ u8 compcnt;
+ u8 compcnt_offset = BRCMF_FWS_TYPE_TXSTATUS_LEN;
- fws->stats.txs_indicate++;
memcpy(&status_le, data, sizeof(status_le));
status = le32_to_cpu(status_le);
flags = brcmf_txstatus_get_field(status, FLAGS);
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) {
- memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN],
+ memcpy(&seq_le, &data[BRCMF_FWS_TYPE_TXSTATUS_LEN],
sizeof(seq_le));
seq = le16_to_cpu(seq_le);
+ compcnt_offset += BRCMF_FWS_TYPE_SEQ_LEN;
} else {
seq = 0;
}
+ if (type == BRCMF_FWS_TYPE_COMP_TXSTATUS)
+ compcnt = data[compcnt_offset];
+ else
+ compcnt = 1;
+ fws->stats.txs_indicate += compcnt;
+
brcmf_fws_lock(fws);
- brcmf_fws_txs_process(fws, flags, hslot, genbit, seq);
+ brcmf_fws_txs_process(fws, flags, hslot, genbit, seq, compcnt);
brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
@@ -1595,19 +1620,21 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
brcmf_err("event payload too small (%d)\n", e->datalen);
return -EINVAL;
}
- if (fws->creditmap_received)
- return 0;
fws->creditmap_received = true;
brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
brcmf_fws_lock(fws);
for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
- if (*credits)
+ fws->fifo_credit[i] += credits[i] - fws->init_fifo_credit[i];
+ fws->init_fifo_credit[i] = credits[i];
+ if (fws->fifo_credit[i] > 0)
fws->fifo_credit_map |= 1 << i;
else
fws->fifo_credit_map &= ~(1 << i);
- fws->fifo_credit[i] = *credits++;
+ WARN_ONCE(fws->fifo_credit[i] < 0,
+ "fifo_credit[%d] is negative(%d)\n", i,
+ fws->fifo_credit[i]);
}
brcmf_fws_schedule_deq(fws);
brcmf_fws_unlock(fws);
@@ -1882,8 +1909,6 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
err = BRCMF_FWS_RET_OK_NOSCHEDULE;
switch (type) {
- case BRCMF_FWS_TYPE_COMP_TXSTATUS:
- break;
case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
rd = (struct brcmf_skb_reorder_data *)skb->cb;
rd->reorder = data;
@@ -1906,7 +1931,8 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
err = brcmf_fws_request_indicate(fws, type, data);
break;
case BRCMF_FWS_TYPE_TXSTATUS:
- brcmf_fws_txstatus_indicate(fws, data);
+ case BRCMF_FWS_TYPE_COMP_TXSTATUS:
+ brcmf_fws_txstatus_indicate(fws, type, data);
break;
case BRCMF_FWS_TYPE_FIFO_CREDITBACK:
err = brcmf_fws_fifocreditback_indicate(fws, data);
@@ -1995,7 +2021,7 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
fws->stats.rollback_failed++;
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED,
- hslot, 0, 0);
+ hslot, 0, 0, 1);
} else {
fws->stats.rollback_success++;
brcmf_fws_return_credits(fws, fifo, 1);
@@ -2013,7 +2039,7 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
}
for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
- if (fws->fifo_credit[lender_ac]) {
+ if (fws->fifo_credit[lender_ac] > 0) {
fws->credits_borrowed[lender_ac]++;
fws->fifo_credit[lender_ac]--;
if (fws->fifo_credit[lender_ac] == 0)
@@ -2210,8 +2236,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
}
continue;
}
- while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
- (fifo == BRCMF_FWS_FIFO_BCMC))) {
+ while ((fws->fifo_credit[fifo] > 0) ||
+ ((!fws->bcmc_credit_check) &&
+ (fifo == BRCMF_FWS_FIFO_BCMC))) {
skb = brcmf_fws_deq(fws, fifo);
if (!skb)
break;
@@ -2222,7 +2249,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
break;
}
if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
- (fws->fifo_credit[fifo] == 0) &&
+ (fws->fifo_credit[fifo] <= 0) &&
(!fws->bus_flow_blocked)) {
while (brcmf_fws_borrow_credit(fws) == 0) {
skb = brcmf_fws_deq(fws, fifo);
@@ -2455,7 +2482,8 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
}
brcmf_fws_lock(fws);
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
- brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0);
+ brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0,
+ 1);
brcmf_fws_unlock(fws);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index aee6e5937c41..84e3373289eb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -27,11 +27,20 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
struct brcmf_mp_device *settings)
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
- struct device_node *np = dev->of_node;
+ struct device_node *root, *np = dev->of_node;
+ struct property *prop;
int irq;
u32 irqf;
u32 val;
+ /* Set board-type to the first string of the machine compatible prop */
+ root = of_find_node_by_path("/");
+ if (root) {
+ prop = of_find_property(root, "compatible", NULL);
+ settings->board_type = of_prop_next_string(prop, NULL);
+ of_node_put(root);
+ }
+
if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
!of_device_is_compatible(np, "brcm,bcm4329-fmac"))
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 5dea569d63ed..16d7dda965d8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -1785,6 +1785,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
+ fwreq->board_type = devinfo->settings->board_type;
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
fwreq->bus_nr = devinfo->pdev->bus->number;
@@ -2018,6 +2019,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index b2e1ab5adb64..0cd5b8d970d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -49,6 +49,11 @@
#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500)
#define CTL_DONE_TIMEOUT msecs_to_jiffies(2500)
+/* watermark expressed in number of words */
+#define DEFAULT_F2_WATERMARK 0x8
+#define CY_4373_F2_WATERMARK 0x40
+#define CY_43012_F2_WATERMARK 0x60
+
#ifdef DEBUG
#define BRCMF_TRAP_INFO_SIZE 80
@@ -138,6 +143,8 @@ struct rte_console {
/* 1: isolate internal sdio signals, put external pads in tri-state; requires
* sdio bus power cycle to clear (rev 9) */
#define SBSDIO_DEVCTL_PADS_ISO 0x08
+/* 1: enable F2 Watermark */
+#define SBSDIO_DEVCTL_F2WM_ENAB 0x10
/* Force SD->SB reset mapping (rev 11) */
#define SBSDIO_DEVCTL_SB_RST_CTL 0x30
/* Determined by CoreControl bit */
@@ -618,6 +625,7 @@ BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
+BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
@@ -637,7 +645,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
- BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373)
+ BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
+ BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012)
};
static void pkt_align(struct sk_buff *p, int len, int align)
@@ -671,6 +680,14 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
/* 1st KSO write goes to AOS wake up core if device is asleep */
brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err);
+ /* In case of 43012 chip, the chip could go down immediately after
+ * KSO bit is cleared. So the further reads of KSO register could
+ * fail. Thereby just bailing out immediately after clearing KSO
+ * bit, to avoid polling of KSO bit.
+ */
+ if (!on && bus->ci->chip == CY_CC_43012_CHIP_ID)
+ return err;
+
if (on) {
/* device WAKEUP through KSO:
* write bit 0 & read back until
@@ -2396,6 +2413,14 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
return ret;
}
+static bool brcmf_chip_is_ulp(struct brcmf_chip *ci)
+{
+ if (ci->chip == CY_CC_43012_CHIP_ID)
+ return true;
+ else
+ return false;
+}
+
static void brcmf_sdio_bus_stop(struct device *dev)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -2403,7 +2428,7 @@ static void brcmf_sdio_bus_stop(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_core *core = bus->sdio_core;
u32 local_hostintmask;
- u8 saveclk;
+ u8 saveclk, bpreq;
int err;
brcmf_dbg(TRACE, "Enter\n");
@@ -2430,9 +2455,14 @@ static void brcmf_sdio_bus_stop(struct device *dev)
/* Force backplane clocks to assure F2 interrupt propagates */
saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
&err);
- if (!err)
- brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ if (!err) {
+ bpreq = saveclk;
+ bpreq |= brcmf_chip_is_ulp(bus->ci) ?
+ SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT;
+ brcmf_sdiod_writeb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ bpreq, &err);
+ }
if (err)
brcmf_err("Failed to force clock for F2: err %d\n",
err);
@@ -3322,20 +3352,49 @@ err:
return bcmerror;
}
+static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus)
+{
+ if (bus->ci->chip == CY_CC_43012_CHIP_ID ||
+ bus->ci->chip == CY_CC_4373_CHIP_ID ||
+ bus->ci->chip == BRCM_CC_4339_CHIP_ID ||
+ bus->ci->chip == BRCM_CC_4345_CHIP_ID ||
+ bus->ci->chip == BRCM_CC_4354_CHIP_ID)
+ return true;
+ else
+ return false;
+}
+
static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
u8 val;
+ u8 wakeupctrl;
+ u8 cardcap;
+ u8 chipclkcsr;
brcmf_dbg(TRACE, "Enter\n");
+ if (brcmf_chip_is_ulp(bus->ci)) {
+ wakeupctrl = SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT;
+ chipclkcsr = SBSDIO_HT_AVAIL_REQ;
+ } else {
+ wakeupctrl = SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+ chipclkcsr = SBSDIO_FORCE_HT;
+ }
+
+ if (brcmf_sdio_aos_no_decode(bus)) {
+ cardcap = SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC;
+ } else {
+ cardcap = (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
+ SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT);
+ }
+
val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err);
if (err) {
brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n");
return;
}
-
- val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT;
+ val |= 1 << wakeupctrl;
brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n");
@@ -3344,8 +3403,7 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
/* Add CMD14 Support */
brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP,
- (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT |
- SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT),
+ cardcap,
&err);
if (err) {
brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n");
@@ -3353,7 +3411,7 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
}
brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR,
- SBSDIO_FORCE_HT, &err);
+ chipclkcsr, &err);
if (err) {
brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n");
return;
@@ -4045,7 +4103,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code;
void *nvram;
u32 nvram_len;
- u8 saveclk;
+ u8 saveclk, bpreq;
+ u8 devctl;
brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
@@ -4078,8 +4137,11 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
/* Force clocks on backplane to be sure F2 interrupt propagates */
saveclk = brcmf_sdiod_readb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, &err);
if (!err) {
+ bpreq = saveclk;
+ bpreq |= brcmf_chip_is_ulp(bus->ci) ?
+ SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT;
brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
+ bpreq, &err);
}
if (err) {
brcmf_err("Failed to force clock for F2: err %d\n", err);
@@ -4101,8 +4163,37 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
bus->hostintmask, NULL);
-
- brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 8, &err);
+ switch (sdiod->func1->device) {
+ case SDIO_DEVICE_ID_CYPRESS_4373:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
+ CY_4373_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_4373_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
+ CY_4373_F2_WATERMARK |
+ SBSDIO_MESBUSYCTRL_ENAB, &err);
+ break;
+ case SDIO_DEVICE_ID_CYPRESS_43012:
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
+ CY_43012_F2_WATERMARK);
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ CY_43012_F2_WATERMARK, &err);
+ devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL,
+ &err);
+ devctl |= SBSDIO_DEVCTL_F2WM_ENAB;
+ brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl,
+ &err);
+ break;
+ default:
+ brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
+ DEFAULT_F2_WATERMARK, &err);
+ break;
+ }
} else {
/* Disable F2 again */
sdio_disable_func(sdiod->func2);
@@ -4174,6 +4265,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
+ fwreq->board_type = bus->sdiodev->settings->board_type;
return fwreq;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 7faed831f07d..34b031154da9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -77,7 +77,7 @@
#define SBSDIO_GPIO_OUT 0x10006
/* gpio enable */
#define SBSDIO_GPIO_EN 0x10007
-/* rev < 7, watermark for sdio device */
+/* rev < 7, watermark for sdio device TX path */
#define SBSDIO_WATERMARK 0x10008
/* control busy signal generation */
#define SBSDIO_DEVICE_CTL 0x10009
@@ -104,6 +104,13 @@
#define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C
/* MesBusyCtl (rev 11) */
#define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D
+/* Watermark for sdio device RX path */
+#define SBSDIO_MESBUSY_RXFIFO_WM_MASK 0x7F
+#define SBSDIO_MESBUSY_RXFIFO_WM_SHIFT 0
+/* Enable busy capability for MES access */
+#define SBSDIO_MESBUSYCTRL_ENAB 0x80
+#define SBSDIO_MESBUSYCTRL_ENAB_SHIFT 7
+
/* Sdio Core Rev 12 */
#define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E
#define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 81ff558046a8..6188275b17e5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -846,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
status = brcms_c_aggregatable(wl->wlc, tid);
spin_unlock_bh(&wl->lock);
if (!status) {
- brcms_err(wl->wlc->hw->d11core,
- "START: tid %d is not agg\'able\n", tid);
+ brcms_dbg_ht(wl->wlc->hw->d11core,
+ "START: tid %d is not agg\'able\n", tid);
return -EINVAL;
}
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
index 4960f7d26804..e9e8337f386c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h
@@ -220,13 +220,6 @@ enum phy_cal_mode {
#define BB_MULT_MASK 0x0000ffff
#define BB_MULT_VALID_MASK 0x80000000
-#define CORDIC_AG 39797
-#define CORDIC_NI 18
-#define FIXED(X) ((s32)((X) << 16))
-
-#define FLOAT(X) \
- (((X) >= 0) ? ((((X) >> 15) + 1) >> 1) : -((((-(X)) >> 15) + 1) >> 1))
-
#define PHY_CHAIN_TX_DISABLE_TEMP 115
#define PHY_HYSTERESIS_DELTATEMP 5
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 9fb0d9fbd939..e78a93a45741 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -3447,8 +3447,8 @@ wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
theta += rot;
- i_samp = (u16) (FLOAT(tone_samp.i * max_val) & 0x3ff);
- q_samp = (u16) (FLOAT(tone_samp.q * max_val) & 0x3ff);
+ i_samp = (u16)(CORDIC_FLOAT(tone_samp.i * max_val) & 0x3ff);
+ q_samp = (u16)(CORDIC_FLOAT(tone_samp.q * max_val) & 0x3ff);
data_buf[t] = (i_samp << 10) | q_samp;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index a57f2711f3c0..f4f5e9044152 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -23089,8 +23089,8 @@ wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
theta += rot;
- tone_buf[t].q = (s32) FLOAT(tone_buf[t].q * max_val);
- tone_buf[t].i = (s32) FLOAT(tone_buf[t].i * max_val);
+ tone_buf[t].q = (s32)CORDIC_FLOAT(tone_buf[t].q * max_val);
+ tone_buf[t].i = (s32)CORDIC_FLOAT(tone_buf[t].i * max_val);
}
wlc_phy_loadsampletable_nphy(pi, tone_buf, num_samps);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
index eb5db94f5745..8ac34821f1c1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
@@ -128,7 +128,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
}
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
@@ -140,7 +140,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
ch->band = BRCMU_CHAN_BAND_2G;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
}
@@ -167,7 +167,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->sb = BRCMU_CHAN_SB_U;
ch->control_ch_num += CH_10MHZ_APART;
} else {
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
}
break;
case BRCMU_CHSPEC_D11AC_BW_80:
@@ -188,7 +188,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->control_ch_num += CH_30MHZ_APART;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
break;
@@ -222,13 +222,13 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->control_ch_num += CH_70MHZ_APART;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
break;
case BRCMU_CHSPEC_D11AC_BW_8080:
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
@@ -240,7 +240,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
ch->band = BRCMU_CHAN_BAND_2G;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
break;
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index 686f7a85a045..839980da9643 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -60,6 +60,7 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_4371_CHIP_ID 0x4371
#define CY_CC_4373_CHIP_ID 0x4373
+#define CY_CC_43012_CHIP_ID 43012
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -74,6 +75,7 @@
/* PCIE Device IDs */
#define BRCM_PCIE_4350_DEVICE_ID 0x43a3
#define BRCM_PCIE_4354_DEVICE_ID 0x43df
+#define BRCM_PCIE_4354_RAW_DEVICE_ID 0x4354
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
index e1fd499930a0..de8225e6248b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h
@@ -269,6 +269,25 @@ struct chipcregs {
/* GSIO (spi/i2c) present, rev >= 37 */
#define CC_CAP2_GSIO 0x00000002
+/* sr_control0, rev >= 48 */
+#define CC_SR_CTL0_ENABLE_MASK BIT(0)
+#define CC_SR_CTL0_ENABLE_SHIFT 0
+#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */
+#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to
+ * sr_engine
+ */
+#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk
+ * in sr_engine
+ */
+#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16
+#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18
+#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19
+#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power
+ * domains
+ */
+#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25
+#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30
+
/* pmucapabilities */
#define PCAP_REV_MASK 0x000000ff
#define PCAP_RC_MASK 0x00001f00
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 04dd7a936593..3f5a14112c6b 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1359,7 +1359,7 @@ static int micsetup(struct airo_info *ai) {
int i;
if (ai->tfm == NULL)
- ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ ai->tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(ai->tfm)) {
airo_print_err(ai->dev->name, "failed to load transform for AES");
@@ -5462,7 +5462,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
we have to add a spin lock... */
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
- ptr += sprintf(ptr, "%pM %*s rssi = %d",
+ ptr += sprintf(ptr, "%pM %.*s rssi = %d",
BSSList_rid.bssid,
(int)BSSList_rid.ssidLen,
BSSList_rid.ssid,
diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig
index d6ec44d7a391..562395517e6c 100644
--- a/drivers/net/wireless/intel/ipw2x00/Kconfig
+++ b/drivers/net/wireless/intel/ipw2x00/Kconfig
@@ -15,9 +15,9 @@ config IPW2100
A driver for the Intel PRO/Wireless 2100 Network
Connection 802.11b wireless network adapter.
- See <file:Documentation/networking/README.ipw2100> for information on
- the capabilities currently enabled in this driver and for tips
- for debugging issues and problems.
+ See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
+ for information on the capabilities currently enabled in this driver
+ and for tips for debugging issues and problems.
In order to use this driver, you will need a firmware image for it.
You can obtain the firmware from
@@ -77,8 +77,8 @@ config IPW2200
A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
Connection adapters.
- See <file:Documentation/networking/README.ipw2200> for
- information on the capabilities currently enabled in this
+ See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
+ for information on the capabilities currently enabled in this
driver and for tips for debugging issues and problems.
In order to use this driver, you will need a firmware image for it.
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 910db46db6a1..52e5ed2d3bc2 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5603,12 +5603,8 @@ static void shim__set_security(struct net_device *dev,
if ((sec->flags & SEC_ACTIVE_KEY) &&
priv->ieee->sec.active_key != sec->active_key) {
- if (sec->active_key <= 3) {
- priv->ieee->sec.active_key = sec->active_key;
- priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
- } else
- priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
-
+ priv->ieee->sec.active_key = sec->active_key;
+ priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
priv->status |= STATUS_SECURITY_UPDATED;
}
@@ -8370,7 +8366,7 @@ static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
printk(KERN_WARNING DRV_NAME ": Firmware image not compatible "
"(detected version id of %u). "
- "See Documentation/networking/README.ipw2100\n",
+ "See Documentation/networking/device_drivers/intel/ipw2100.txt\n",
h->version);
return 1;
}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index bbdca13c5a9f..fa400f92d7e2 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -10722,11 +10722,8 @@ static void shim__set_security(struct net_device *dev,
}
if (sec->flags & SEC_ACTIVE_KEY) {
- if (sec->active_key <= 3) {
- priv->ieee->sec.active_key = sec->active_key;
- priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
- } else
- priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
+ priv->ieee->sec.active_key = sec->active_key;
+ priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
priv->status |= STATUS_SECURITY_UPDATED;
} else
priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index e8983c6a2b7b..a697edd46e7f 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -781,7 +781,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
switch (scale_action) {
case -1:
- /* Decrese rate */
+ /* Decrease rate */
if (low != RATE_INVALID)
idx = low;
break;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 280cd8ae1696..6b4488a178a7 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -559,7 +559,7 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
- /* fall through if TTAK OK */
+ /* fall through - if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 6514baf799fe..a2f86cbcc740 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -2695,6 +2695,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_KEY_TTAK)
break;
+ /* fall through */
case RX_RES_STATUS_SEC_TYPE_WEP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
@@ -2704,6 +2705,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
D_RX("Packet destroyed\n");
return -1;
}
+ /* fall through */
case RX_RES_STATUS_SEC_TYPE_CCMP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_DECRYPT_OK) {
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index e5a2fc738ac3..491ca3c8b43c 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -1,6 +1,6 @@
config IWLWIFI
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
- depends on PCI && MAC80211 && HAS_IOMEM
+ depends on PCI && HAS_IOMEM
select FW_LOADER
---help---
Select to build the driver supporting the:
@@ -53,6 +53,7 @@ config IWLWIFI_LEDS
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
+ depends on MAC80211
help
This is the driver that supports the DVM firmware. The list
of the devices that use this firmware is available here:
@@ -61,6 +62,7 @@ config IWLDVM
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
select WANT_DEV_COREDUMP
+ depends on MAC80211
help
This is the driver that supports the MVM firmware. The list
of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 04e376cc898c..ff41987a7e35 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -11,6 +11,7 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
+iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 76b5ddb20248..1ff388b593ad 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -48,7 +48,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index e7e45846dd07..a6ec7ad39dcb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -57,7 +57,7 @@
#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl2000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
@@ -71,7 +71,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index da5d5f9b2573..7e65073834b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,14 +56,13 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 41
+#define IWL_22000_UCODE_API_MAX 43
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
-#define IWL_22000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
#define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */
@@ -106,10 +105,8 @@
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
-#define NVM_HW_SECTION_NUM_FAMILY_22000 10
-
static const struct iwl_base_params iwl_22000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
@@ -121,7 +118,7 @@ static const struct iwl_base_params iwl_22000_base_params = {
};
static const struct iwl_base_params iwl_22560_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
@@ -142,7 +139,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
+ .nvm_hw_section_num = 10, \
.non_shared_ant = ANT_B, \
.dccm_offset = IWL_22000_DCCM_OFFSET, \
.dccm_len = IWL_22000_DCCM_LEN, \
@@ -157,7 +154,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mac_addr_from_csr = true, \
.ht_params = &iwl_22000_ht_params, \
.nvm_ver = IWL_22000_NVM_VERSION, \
- .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.use_tfh = true, \
.rf_id = true, \
@@ -323,7 +319,6 @@ MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index 30e62a7c9d52..fbb18d066cd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -66,7 +66,7 @@
#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl6000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl6000_base_params = {
};
static const struct iwl_base_params iwl6050_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
@@ -92,7 +92,7 @@ static const struct iwl_base_params iwl6050_base_params = {
};
static const struct iwl_base_params iwl6000_g2_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_2K,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index c973bfaa3414..289e3c398a12 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -80,17 +80,11 @@
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
-#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3160_NVM_VERSION 0x709
-#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3165_NVM_VERSION 0x709
-#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL3168_NVM_VERSION 0xd01
-#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265_NVM_VERSION 0x0a1d
-#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
#define IWL7265D_NVM_VERSION 0x0c11
-#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
/* DCCM offsets and lengths */
#define IWL7000_DCCM_OFFSET 0x800000
@@ -113,10 +107,8 @@
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
-#define NVM_HW_SECTION_NUM_FAMILY_7000 0
-
static const struct iwl_base_params iwl7000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_16K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
@@ -159,7 +151,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.device_family = IWL_DEVICE_FAMILY_7000, \
.base_params = &iwl7000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \
+ .nvm_hw_section_num = 0, \
.non_shared_ant = ANT_A, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.dccm_offset = IWL7000_DCCM_OFFSET, \
@@ -191,7 +183,6 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
- .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
@@ -203,7 +194,6 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
- .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.high_temp = true,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
@@ -217,7 +207,6 @@ const struct iwl_cfg iwl7260_2n_cfg = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
- .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
@@ -229,7 +218,6 @@ const struct iwl_cfg iwl7260_n_cfg = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
- .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
.dccm_len = IWL7260_DCCM_LEN,
@@ -241,7 +229,6 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3160_NVM_VERSION,
- .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.dccm_len = IWL3160_DCCM_LEN,
};
@@ -252,7 +239,6 @@ const struct iwl_cfg iwl3160_2n_cfg = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3160_NVM_VERSION,
- .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.dccm_len = IWL3160_DCCM_LEN,
};
@@ -263,7 +249,6 @@ const struct iwl_cfg iwl3160_n_cfg = {
IWL_DEVICE_7000,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3160_NVM_VERSION,
- .nvm_calib_ver = IWL3160_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.dccm_len = IWL3160_DCCM_LEN,
};
@@ -291,7 +276,6 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
IWL_DEVICE_7005D,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION,
- .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -302,7 +286,6 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
IWL_DEVICE_3008,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3168_NVM_VERSION,
- .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
.nvm_type = IWL_NVM_SDP,
@@ -314,7 +297,6 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
IWL_DEVICE_7005,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
- .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -325,7 +307,6 @@ const struct iwl_cfg iwl7265_2n_cfg = {
IWL_DEVICE_7005,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
- .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -336,7 +317,6 @@ const struct iwl_cfg iwl7265_n_cfg = {
IWL_DEVICE_7005,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
- .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -347,7 +327,6 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
IWL_DEVICE_7005D,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265D_NVM_VERSION,
- .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -358,7 +337,6 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
IWL_DEVICE_7005D,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265D_NVM_VERSION,
- .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
@@ -369,7 +347,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
IWL_DEVICE_7005D,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265D_NVM_VERSION,
- .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 348c40fcddcb..d7d17c1cceea 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -75,7 +75,6 @@
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
-#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
#define IWL8260_DCCM_OFFSET 0x800000
@@ -93,11 +92,10 @@
#define IWL8265_MODULE_FIRMWARE(api) \
IWL8265_FW_PRE __stringify(api) ".ucode"
-#define NVM_HW_SECTION_NUM_FAMILY_8000 10
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
static const struct iwl_base_params iwl8000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
@@ -139,7 +137,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.device_family = IWL_DEVICE_FAMILY_8000, \
.base_params = &iwl8000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \
+ .nvm_hw_section_num = 10, \
.features = NETIF_F_RXCSUM, \
.non_shared_ant = ANT_A, \
.dccm_offset = IWL8260_DCCM_OFFSET, \
@@ -177,7 +175,6 @@ const struct iwl_cfg iwl8260_2n_cfg = {
IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
};
const struct iwl_cfg iwl8260_2ac_cfg = {
@@ -186,7 +183,6 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
@@ -196,7 +192,6 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
@@ -207,7 +202,6 @@ const struct iwl_cfg iwl8275_2ac_cfg = {
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
@@ -218,7 +212,6 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
IWL_DEVICE_8000,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
- .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index d55fd23cafe6..f2114137c13f 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -57,14 +57,13 @@
#include "fw/file.h"
/* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX 41
+#define IWL9000_UCODE_API_MAX 43
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 30
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
-#define IWL9000_TX_POWER_VERSION 0xffff /* meaningless */
/* Memory offsets and lengths */
#define IWL9000_DCCM_OFFSET 0x800000
@@ -90,10 +89,8 @@
#define IWL9260B_MODULE_FIRMWARE(api) \
IWL9260B_FW_PRE __stringify(api) ".ucode"
-#define NVM_HW_SECTION_NUM_FAMILY_9000 10
-
static const struct iwl_base_params iwl9000_base_params = {
- .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
+ .eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
@@ -137,7 +134,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.device_family = IWL_DEVICE_FAMILY_9000, \
.base_params = &iwl9000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
- .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \
+ .nvm_hw_section_num = 10, \
.non_shared_ant = ANT_B, \
.dccm_offset = IWL9000_DCCM_OFFSET, \
.dccm_len = IWL9000_DCCM_LEN, \
@@ -157,17 +154,17 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.min_umac_error_event_table = 0x800000, \
.csr = &iwl_csr_v1, \
.d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 92 * 1024
+ .d3_debug_data_length = 92 * 1024, \
+ .ht_params = &iwl9000_ht_params, \
+ .nvm_ver = IWL9000_NVM_VERSION, \
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+
const struct iwl_cfg iwl9160_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9160",
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9260_2ac_cfg = {
@@ -175,10 +172,6 @@ const struct iwl_cfg iwl9260_2ac_cfg = {
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9260_killer_2ac_cfg = {
@@ -186,10 +179,6 @@ const struct iwl_cfg iwl9260_killer_2ac_cfg = {
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9270_2ac_cfg = {
@@ -197,10 +186,6 @@ const struct iwl_cfg iwl9270_2ac_cfg = {
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9460_2ac_cfg = {
@@ -208,10 +193,6 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9460_2ac_cfg_soc = {
@@ -220,10 +201,6 @@ const struct iwl_cfg iwl9460_2ac_cfg_soc = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
@@ -234,10 +211,6 @@ const struct iwl_cfg iwl9461_2ac_cfg_soc = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
@@ -248,10 +221,6 @@ const struct iwl_cfg iwl9462_2ac_cfg_soc = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
@@ -261,10 +230,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = {
.fw_name_pre = IWL9260A_FW_PRE,
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
};
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
@@ -273,10 +238,6 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
@@ -287,10 +248,6 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
@@ -301,10 +258,6 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
};
@@ -315,10 +268,6 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
@@ -330,10 +279,6 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
@@ -345,10 +290,6 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
@@ -360,10 +301,6 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
@@ -375,10 +312,6 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
@@ -390,10 +323,6 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
IWL_DEVICE_9000,
- .ht_params = &iwl9000_ht_params,
- .nvm_ver = IWL9000_NVM_VERSION,
- .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
.soc_latency = 5000,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 1088ff036e13..c219bca5cff4 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1224,6 +1224,23 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
return 0;
}
+static int iwl_nvm_check_version(struct iwl_nvm_data *data,
+ struct iwl_trans *trans)
+{
+ if (data->nvm_version >= trans->cfg->nvm_ver ||
+ data->calib_version >= trans->cfg->nvm_calib_ver) {
+ IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ data->nvm_version, data->calib_version);
+ return 0;
+ }
+
+ IWL_ERR(trans,
+ "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
+ data->nvm_version, trans->cfg->nvm_ver,
+ data->calib_version, trans->cfg->nvm_calib_ver);
+ return -EINVAL;
+}
+
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
const struct iwl_fw *fw,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 7f645b62804e..5e88fa2e6fb7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -127,17 +129,6 @@ struct iwl_phy_cfg_cmd {
struct iwl_calib_ctrl calib_control;
} __packed;
-#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1))
-#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3))
-#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5))
-#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7))
-#define PHY_CFG_TX_CHAIN_A BIT(8)
-#define PHY_CFG_TX_CHAIN_B BIT(9)
-#define PHY_CFG_TX_CHAIN_C BIT(10)
-#define PHY_CFG_RX_CHAIN_A BIT(12)
-#define PHY_CFG_RX_CHAIN_B BIT(13)
-#define PHY_CFG_RX_CHAIN_C BIT(14)
-
/*
* enum iwl_dc2dc_config_id - flag ids
*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index eff3249af48a..fdc54a5dc9de 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -105,6 +105,11 @@ enum iwl_data_path_subcmd_ids {
HE_AIR_SNIFFER_CONFIG_CMD = 0x13,
/**
+ * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
+ */
+ RX_NO_DATA_NOTIF = 0xF5,
+
+ /**
* @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
*/
TLC_MNG_UPDATE_NOTIF = 0xF7,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
new file mode 100644
index 000000000000..ab82b7a67967
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -0,0 +1,401 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_dbg_tlv_h__
+#define __iwl_fw_dbg_tlv_h__
+
+#include <linux/bitops.h>
+
+/*
+ * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
+ * @tlv_version: version info
+ * @apply_point: &enum iwl_fw_ini_apply_point
+ * @data: TLV data followed
+ **/
+struct iwl_fw_ini_header {
+ __le32 tlv_version;
+ __le32 apply_point;
+ u8 data[];
+} __packed; /* FW_INI_HEADER_TLV_S */
+
+/**
+ * struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION)
+ * buffer allocation TLV - for debug
+ *
+ * @iwl_fw_ini_header: header
+ * @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd
+ * if needed (DBGC1/DBGC2/SDFX/...)
+ * @buffer_location: type of iwl_fw_ini_buffer_location
+ * @size: size in bytes
+ * @max_fragments: the maximum allowed fragmentation in the desired memory
+ * allocation above
+ * @min_frag_size: the minimum allowed fragmentation size in bytes
+*/
+struct iwl_fw_ini_allocation_tlv {
+ struct iwl_fw_ini_header header;
+ __le32 allocation_id;
+ __le32 buffer_location;
+ __le32 size;
+ __le32 max_fragments;
+ __le32 min_frag_size;
+} __packed; /* FW_INI_BUFFER_ALLOCATION_TLV_S_VER_1 */
+
+/**
+ * struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD)
+ * Generic Host command pass through TLV
+ *
+ * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC
+ * @group: the desired cmd group
+ * @padding: all zeros for dword alignment
+ * @data: all of the relevant command (0xf6/0xf5) to be sent
+*/
+struct iwl_fw_ini_hcmd {
+ u8 id;
+ u8 group;
+ __le16 padding;
+ u8 data[0];
+} __packed; /* FW_INI_HCMD_S */
+
+/**
+ * struct iwl_fw_ini_hcmd_tlv
+ * @header: header
+ * @hcmd: a variable length host-command to be sent to apply the configuration.
+ */
+struct iwl_fw_ini_hcmd_tlv {
+ struct iwl_fw_ini_header header;
+ struct iwl_fw_ini_hcmd hcmd;
+} __packed; /* FW_INI_HCMD_TLV_S_VER_1 */
+
+/*
+ * struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW)
+ *
+ * @header: header
+ * @debug_flow_cfg: &enum iwl_fw_ini_debug_flow
+ */
+struct iwl_fw_ini_debug_flow_tlv {
+ struct iwl_fw_ini_header header;
+ __le32 debug_flow_cfg;
+} __packed; /* FW_INI_DEBUG_FLOW_TLV_S_VER_1 */
+
+#define IWL_FW_INI_MAX_REGION_ID 20
+#define IWL_FW_INI_MAX_NAME 32
+/**
+ * struct iwl_fw_ini_region_cfg
+ * @region_id: ID of this dump configuration
+ * @region_type: &enum iwl_fw_ini_region_type
+ * @num_regions: amount of regions in the address array.
+ * @allocation_id: For DRAM type field substitutes for allocation_id.
+ * @name_len: name length
+ * @name: file name to use for this region
+ * @size: size of the data, in bytes.(unused for IWL_FW_INI_REGION_DRAM_BUFFER)
+ * @start_addr: array of addresses. (unused for IWL_FW_INI_REGION_DRAM_BUFFER)
+ */
+struct iwl_fw_ini_region_cfg {
+ __le32 region_id;
+ __le32 region_type;
+ __le32 name_len;
+ u8 name[IWL_FW_INI_MAX_NAME];
+ union {
+ __le32 num_regions;
+ __le32 allocation_id;
+ };
+ __le32 size;
+ __le32 start_addr[];
+} __packed; /* FW_INI_REGION_CONFIG_S */
+
+/**
+ * struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG)
+ * DUMP sections define IDs and triggers that use those IDs TLV
+ * @header: header
+ * @num_regions: how many different region section and IDs are coming next
+ * @iwl_fw_ini_dump dump_config: list of dump configurations
+ */
+struct iwl_fw_ini_region_tlv {
+ struct iwl_fw_ini_header header;
+ __le32 num_regions;
+ struct iwl_fw_ini_region_cfg region_config[];
+} __packed; /* FW_INI_REGION_CFG_S */
+
+/**
+ * struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG)
+ * Region sections define IDs and triggers that use those IDs TLV
+ *
+ * @trigger_id: enum &iwl_fw_ini_tigger_id
+ * @ignore_default: override FW TLV with binary TLV
+ * @dump_delay: delay from trigger fire to dump, in usec
+ * @occurrences: max amount of times to be fired
+ * @ignore_consec: ignore consecutive triggers, in usec
+ * @force_restart: force FW restart
+ * @multi_dut: initiate debug dump data on several DUTs
+ * @trigger_data: generic data to be utilized per trigger
+ * @num_regions: number of dump regions defined for this trigger
+ * @data: region IDs
+ */
+struct iwl_fw_ini_trigger {
+ __le32 trigger_id;
+ __le32 ignore_default;
+ __le32 dump_delay;
+ __le32 occurrences;
+ __le32 ignore_consec;
+ __le32 force_restart;
+ __le32 multi_dut;
+ __le32 trigger_data;
+ __le32 num_regions;
+ __le32 data[];
+} __packed; /* FW_INI_TRIGGER_CONFIG_S */
+
+/**
+ * struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG)
+ * DUMP sections define IDs and triggers that use those IDs TLV
+ *
+ * @header: header
+ * @num_triggers: how many different triggers section and IDs are coming next
+ * @trigger_config: list of trigger configurations
+ */
+struct iwl_fw_ini_trigger_tlv {
+ struct iwl_fw_ini_header header;
+ __le32 num_triggers;
+ struct iwl_fw_ini_trigger trigger_config[];
+} __packed; /* FW_INI_TRIGGER_CFG_S */
+
+/**
+ * enum iwl_fw_ini_trigger_id
+ * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
+ * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
+ * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
+ * @IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR: FW error notification
+ * @IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING: FW warning notification
+ * @IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO: FW info notification
+ * @IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG: FW debug notification
+ * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
+ * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
+ * @FW_DEBUG_TLV_TRIGGER_ID_HOST_DID_INITIATED_EVENT: undefined
+ * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
+ * threshold was crossed
+ * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
+ * @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host
+ * @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request
+ * @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request
+ * @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request
+ * @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event
+ * @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined
+ * @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined
+ * @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined
+ * @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received
+ * @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed
+ * @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed
+ * @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response
+ * @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth
+ * @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed
+ * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed
+ * @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start
+ * @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end
+ * @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events
+ * @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events
+ * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined
+ * @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined
+ * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association
+ * failed
+ * @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event
+ * @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete
+ * @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received
+ * @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed
+ * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
+ */
+enum iwl_fw_ini_trigger_id {
+ /* Errors triggers */
+ IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
+ IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2,
+ IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3,
+ /* Generic triggers */
+ IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4,
+ IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5,
+ IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6,
+ IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7,
+ /* User Trigger */
+ IWL_FW_TRIGGER_ID_USER_TRIGGER = 8,
+ /* Host triggers */
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9,
+ IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10,
+ IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11,
+ IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12,
+ IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13,
+ IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14,
+ IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15,
+ IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19,
+ IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20,
+ IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21,
+ IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22,
+ IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23,
+ IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25,
+ IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27,
+ IWL_FW_TRIGGER_ID_HOST_D3_START = 28,
+ IWL_FW_TRIGGER_ID_HOST_D3_END = 29,
+ IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30,
+ IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32,
+ IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36,
+ IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37,
+ IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38,
+ IWL_FW_TRIGGER_ID_NUM,
+}; /* FW_INI_TRIGGER_ID_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_apply_point
+ * @IWL_FW_INI_APPLY_INVALID: invalid
+ * @IWL_FW_INI_APPLY_EARLY: pre loading FW
+ * @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive
+ * @IWL_FW_INI_APPLY_POST_INIT: last cmd in initialization sequence
+ * @IWL_FW_INI_APPLY_MISSED_BEACONS: missed beacons notification
+ * @IWL_FW_INI_APPLY_SCAN_COMPLETE: scan completed
+ * @IWL_FW_INI_APPLY_NUM: number of apply points
+*/
+enum iwl_fw_ini_apply_point {
+ IWL_FW_INI_APPLY_INVALID,
+ IWL_FW_INI_APPLY_EARLY,
+ IWL_FW_INI_APPLY_AFTER_ALIVE,
+ IWL_FW_INI_APPLY_POST_INIT,
+ IWL_FW_INI_APPLY_MISSED_BEACONS,
+ IWL_FW_INI_APPLY_SCAN_COMPLETE,
+ IWL_FW_INI_APPLY_NUM,
+}; /* FW_INI_APPLY_POINT_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_allocation_id
+ * @IWL_FW_INI_ALLOCATION_INVALID: invalid
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module
+ * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps
+ * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
+*/
+enum iwl_fw_ini_allocation_id {
+ IWL_FW_INI_ALLOCATION_INVALID,
+ IWL_FW_INI_ALLOCATION_ID_DBGC1,
+ IWL_FW_INI_ALLOCATION_ID_DBGC2,
+ IWL_FW_INI_ALLOCATION_ID_DBGC3,
+ IWL_FW_INI_ALLOCATION_ID_SDFX,
+ IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
+ IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
+}; /* FW_INI_ALLOCATION_ID_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_buffer_location
+ * @IWL_FW_INI_LOCATION_INVALID: invalid
+ * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
+ * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
+ */
+enum iwl_fw_ini_buffer_location {
+ IWL_FW_INI_LOCATION_SRAM_INVALID,
+ IWL_FW_INI_LOCATION_SRAM_PATH,
+ IWL_FW_INI_LOCATION_DRAM_PATH,
+}; /* FW_INI_BUFFER_LOCATION_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_debug_flow
+ * @IWL_FW_INI_DEBUG_INVALID: invalid
+ * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
+ * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
+ */
+enum iwl_fw_ini_debug_flow {
+ IWL_FW_INI_DEBUG_INVALID,
+ IWL_FW_INI_DEBUG_DBTR_FLOW,
+ IWL_FW_INI_DEBUG_TB2DTF_FLOW,
+}; /* FW_INI_DEBUG_FLOW_E_VER_1 */
+
+/**
+ * enum iwl_fw_ini_region_type
+ * @IWL_FW_INI_REGION_INVALID: invalid
+ * @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
+ * @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
+ * @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
+ * @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
+ * @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer
+ * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
+ * @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined
+ * @IWL_FW_INI_REGION_TXF: TX fifos
+ * @IWL_FW_INI_REGION_RXF: RX fifo
+ * @IWL_FW_INI_REGION_PAGING: paging memory
+ * @IWL_FW_INI_REGION_CSR: CSR registers
+ * @IWL_FW_INI_REGION_NUM: number of region types
+ */
+enum iwl_fw_ini_region_type {
+ IWL_FW_INI_REGION_INVALID,
+ IWL_FW_INI_REGION_DEVICE_MEMORY,
+ IWL_FW_INI_REGION_PERIPHERY_MAC,
+ IWL_FW_INI_REGION_PERIPHERY_PHY,
+ IWL_FW_INI_REGION_PERIPHERY_AUX,
+ IWL_FW_INI_REGION_DRAM_BUFFER,
+ IWL_FW_INI_REGION_DRAM_IMR,
+ IWL_FW_INI_REGION_INTERNAL_BUFFER,
+ IWL_FW_INI_REGION_TXF,
+ IWL_FW_INI_REGION_RXF,
+ IWL_FW_INI_REGION_PAGING,
+ IWL_FW_INI_REGION_CSR,
+ IWL_FW_INI_REGION_NUM
+}; /* FW_INI_REGION_TYPE_E_VER_1*/
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 1dd23f846fb9..7a3f7b7e6358 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -151,9 +151,9 @@ enum iwl_tsf_id {
* @beacon_time: beacon transmit time in system time
* @beacon_tsf: beacon transmit time in TSF
* @bi: beacon interval in TU
- * @bi_reciprocal: 2^32 / bi
+ * @reserved1: reserved
* @dtim_interval: dtim transmit time in TU
- * @dtim_reciprocal: 2^32 / dtim_interval
+ * @reserved2: reserved
* @mcast_qid: queue ID for multicast traffic.
* NOTE: obsolete from VER2 and on
* @beacon_template: beacon template ID
@@ -162,9 +162,9 @@ struct iwl_mac_data_ap {
__le32 beacon_time;
__le64 beacon_tsf;
__le32 bi;
- __le32 bi_reciprocal;
+ __le32 reserved1;
__le32 dtim_interval;
- __le32 dtim_reciprocal;
+ __le32 reserved2;
__le32 mcast_qid;
__le32 beacon_template;
} __packed; /* AP_MAC_DATA_API_S_VER_2 */
@@ -174,26 +174,34 @@ struct iwl_mac_data_ap {
* @beacon_time: beacon transmit time in system time
* @beacon_tsf: beacon transmit time in TSF
* @bi: beacon interval in TU
- * @bi_reciprocal: 2^32 / bi
+ * @reserved: reserved
* @beacon_template: beacon template ID
*/
struct iwl_mac_data_ibss {
__le32 beacon_time;
__le64 beacon_tsf;
__le32 bi;
- __le32 bi_reciprocal;
+ __le32 reserved;
__le32 beacon_template;
} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
/**
+ * enum iwl_mac_data_policy - policy of the data path for this MAC
+ * @TWT_SUPPORTED: twt is supported
+ */
+enum iwl_mac_data_policy {
+ TWT_SUPPORTED = BIT(0),
+};
+
+/**
* struct iwl_mac_data_sta - configuration data for station MAC context
* @is_assoc: 1 for associated state, 0 otherwise
* @dtim_time: DTIM arrival time in system time
* @dtim_tsf: DTIM arrival time in TSF
* @bi: beacon interval in TU, applicable only when associated
- * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @reserved1: reserved
* @dtim_interval: DTIM interval in TU, applicable only when associated
- * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @data_policy: see &enum iwl_mac_data_policy
* @listen_interval: in beacon intervals, applicable only when associated
* @assoc_id: unique ID assigned by the AP during association
* @assoc_beacon_arrive_time: TSF of first beacon after association
@@ -203,13 +211,13 @@ struct iwl_mac_data_sta {
__le32 dtim_time;
__le64 dtim_tsf;
__le32 bi;
- __le32 bi_reciprocal;
+ __le32 reserved1;
__le32 dtim_interval;
- __le32 dtim_reciprocal;
+ __le32 data_policy;
__le32 listen_interval;
__le32 assoc_id;
__le32 assoc_beacon_arrive_time;
-} __packed; /* STA_MAC_DATA_API_S_VER_1 */
+} __packed; /* STA_MAC_DATA_API_S_VER_2 */
/**
* struct iwl_mac_data_go - configuration data for P2P GO MAC context
@@ -233,7 +241,7 @@ struct iwl_mac_data_go {
struct iwl_mac_data_p2p_sta {
struct iwl_mac_data_sta sta;
__le32 ctwin;
-} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_2 */
/**
* struct iwl_mac_data_pibss - Pseudo IBSS config data
@@ -378,13 +386,6 @@ struct iwl_mac_ctx_cmd {
};
} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
-static inline u32 iwl_mvm_reciprocal(u32 v)
-{
- if (!v)
- return 0;
- return 0xFFFFFFFF / v;
-}
-
#define IWL_NONQOS_SEQ_GET 0x1
#define IWL_NONQOS_SEQ_SET 0x2
struct iwl_nonqos_seq_query_cmd {
@@ -442,7 +443,7 @@ struct iwl_he_backoff_conf {
* Support for Nss x BW (or RU) matrix:
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
* Each entry contains 2 QAM thresholds for 8us and 16us:
- * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
* QAM_tx < QAM_th1 --> PPE=0us
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 0537496b6eb1..0791a854fc8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -345,66 +345,98 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
-/*
- * enum iwl_rx_he_phy - HE PHY data
- */
-enum iwl_rx_he_phy {
- IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
- IWL_RX_HE_PHY_UPLINK = BIT(1),
- IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
- IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
- IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
- IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
- IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
- IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
- IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
- IWL_RX_HE_PHY_DOPPLER = BIT(24),
+/* TSF overload low dword */
+enum iwl_rx_phy_data0 {
+ /* info type: HE any */
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001,
+ IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002,
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00,
+ /* 1 bit reserved */
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000,
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000,
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000,
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000,
+ IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000,
/* 6 bits reserved */
- IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
+ IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000,
+};
+
+enum iwl_rx_phy_info_type {
+ IWL_RX_PHY_INFO_TYPE_NONE = 0,
+ IWL_RX_PHY_INFO_TYPE_CCK = 1,
+ IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2,
+ IWL_RX_PHY_INFO_TYPE_HT = 3,
+ IWL_RX_PHY_INFO_TYPE_VHT_SU = 4,
+ IWL_RX_PHY_INFO_TYPE_VHT_MU = 5,
+ IWL_RX_PHY_INFO_TYPE_HE_SU = 6,
+ IWL_RX_PHY_INFO_TYPE_HE_MU = 7,
+ IWL_RX_PHY_INFO_TYPE_HE_TB = 8,
+ IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9,
+ IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10,
+};
- /* second dword - common data */
- IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
- IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
+/* TSF overload high dword */
+enum iwl_rx_phy_data1 {
+ /*
+ * check this first - if TSF overload is set,
+ * see &enum iwl_rx_phy_info_type
+ */
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000,
+
+ /* info type: HT/VHT/HE any */
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000,
+
+ /* info type: HE MU/MU-EXT */
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001,
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e,
+
+ /* info type: HE any */
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0,
+ IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100,
/* trigger encoded */
- IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
- IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL,
- IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */
- IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */
- IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */
- IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */
-
- /* second dword - MU data */
- IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0),
- IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
- IWL_RX_HE_PHY_MU_SIGB_MCS_MASK = 0xf000000000000ULL,
- IWL_RX_HE_PHY_MU_SIGB_DCM = BIT_ULL(32 + 21),
- IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
-
- /* second dword - TB data */
- IWL_RX_HE_PHY_TB_PILOT_TYPE = BIT_ULL(32 + 0),
- IWL_RX_HE_PHY_TB_LOW_SS_MASK = 0xe00000000ULL
+ IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00,
+
+ /* info type: HE TB/TX-EXT */
+ IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001,
+ IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e,
};
-enum iwl_rx_he_sigb_common0 {
+/* goes into Metadata DW 7 */
+enum iwl_rx_phy_data2 {
+ /* info type: HE MU-EXT */
/* the a1/a2/... is what the PHY/firmware calls the values */
- IWL_RX_HE_SIGB_COMMON0_CH1_RU0 = 0x000000ff, /* a1 */
- IWL_RX_HE_SIGB_COMMON0_CH1_RU2 = 0x0000ff00, /* a2 */
- IWL_RX_HE_SIGB_COMMON0_CH2_RU0 = 0x00ff0000, /* b1 */
- IWL_RX_HE_SIGB_COMMON0_CH2_RU2 = 0xff000000, /* b2 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */
+ IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */
+
+ /* info type: HE TB-EXT */
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000,
};
-enum iwl_rx_he_sigb_common1 {
- IWL_RX_HE_SIGB_COMMON1_CH1_RU1 = 0x000000ff, /* c1 */
- IWL_RX_HE_SIGB_COMMON1_CH1_RU3 = 0x0000ff00, /* c2 */
- IWL_RX_HE_SIGB_COMMON1_CH2_RU1 = 0x00ff0000, /* d1 */
- IWL_RX_HE_SIGB_COMMON1_CH2_RU3 = 0xff000000, /* d2 */
+/* goes into Metadata DW 8 */
+enum iwl_rx_phy_data3 {
+ /* info type: HE MU-EXT */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */
+ IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */
};
-enum iwl_rx_he_sigb_common2 {
- IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU = 0x0001,
- IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU = 0x0002,
- IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK = 0x0004,
- IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK = 0x0008,
+/* goes into Metadata DW 4 high 16 bits */
+enum iwl_rx_phy_data4 {
+ /* info type: HE MU-EXT */
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
};
/**
@@ -419,9 +451,9 @@ struct iwl_rx_mpdu_desc_v1 {
__le32 rss_hash;
/**
- * @sigb_common0: for HE sniffer, HE-SIG-B common part 0
+ * @phy_data2: depends on info type (see @phy_data1)
*/
- __le32 sigb_common0;
+ __le32 phy_data2;
};
/* DW8 - carries filter_match only when rpa_en == 1 */
@@ -432,9 +464,9 @@ struct iwl_rx_mpdu_desc_v1 {
__le32 filter_match;
/**
- * @sigb_common1: for HE sniffer, HE-SIG-B common part 1
+ * @phy_data3: depends on info type (see @phy_data1)
*/
- __le32 sigb_common1;
+ __le32 phy_data3;
};
/* DW9 */
@@ -472,12 +504,19 @@ struct iwl_rx_mpdu_desc_v1 {
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
*/
__le64 tsf_on_air_rise;
- /**
- * @he_phy_data:
- * HE PHY data, see &enum iwl_rx_he_phy, valid
- * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
- */
- __le64 he_phy_data;
+
+ struct {
+ /**
+ * @phy_data0: depends on info_type, see @phy_data1
+ */
+ __le32 phy_data0;
+ /**
+ * @phy_data1: valid only if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+ * see &enum iwl_rx_phy_data1.
+ */
+ __le32 phy_data1;
+ };
};
} __packed;
@@ -493,9 +532,9 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 filter_match;
/**
- * @sigb_common0: for HE sniffer, HE-SIG-B common part 0
+ * @phy_data2: depends on info type (see @phy_data1)
*/
- __le32 sigb_common0;
+ __le32 phy_data2;
};
/* DW8 - carries rss_hash only when rpa_en == 1 */
@@ -506,9 +545,9 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 rss_hash;
/**
- * @sigb_common1: for HE sniffer, HE-SIG-B common part 1
+ * @phy_data3: depends on info type (see @phy_data1)
*/
- __le32 sigb_common1;
+ __le32 phy_data3;
};
/* DW9 */
/**
@@ -556,12 +595,19 @@ struct iwl_rx_mpdu_desc_v3 {
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
*/
__le64 tsf_on_air_rise;
- /**
- * @he_phy_data:
- * HE PHY data, see &enum iwl_rx_he_phy, valid
- * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
- */
- __le64 he_phy_data;
+
+ struct {
+ /**
+ * @phy_data0: depends on info_type, see @phy_data1
+ */
+ __le32 phy_data0;
+ /**
+ * @phy_data1: valid only if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+ * see &enum iwl_rx_phy_data1.
+ */
+ __le32 phy_data1;
+ };
};
/* DW16 & DW17 */
/**
@@ -613,9 +659,9 @@ struct iwl_rx_mpdu_desc {
__le16 l3l4_flags;
/**
- * @sigb_common2: for HE sniffer, HE-SIG-B common part 2
+ * @phy_data4: depends on info type, see phy_data1
*/
- __le16 sigb_common2;
+ __le16 phy_data4;
};
/* DW5 */
/**
@@ -651,6 +697,55 @@ struct iwl_rx_mpdu_desc {
#define IWL_CD_STTS_WIFI_STATUS_POS 4
#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
+#define RX_NO_DATA_CHAIN_A_POS 0
+#define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS)
+#define RX_NO_DATA_CHAIN_B_POS 8
+#define RX_NO_DATA_CHAIN_B_MSK (0xff << RX_NO_DATA_CHAIN_B_POS)
+#define RX_NO_DATA_CHANNEL_POS 16
+#define RX_NO_DATA_CHANNEL_MSK (0xff << RX_NO_DATA_CHANNEL_POS)
+
+#define RX_NO_DATA_INFO_TYPE_POS 0
+#define RX_NO_DATA_INFO_TYPE_MSK (0xff << RX_NO_DATA_INFO_TYPE_POS)
+#define RX_NO_DATA_INFO_TYPE_NONE 0
+#define RX_NO_DATA_INFO_TYPE_RX_ERR 1
+#define RX_NO_DATA_INFO_TYPE_NDP 2
+#define RX_NO_DATA_INFO_TYPE_MU_UNMATCHED 3
+#define RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED 4
+
+#define RX_NO_DATA_INFO_ERR_POS 8
+#define RX_NO_DATA_INFO_ERR_MSK (0xff << RX_NO_DATA_INFO_ERR_POS)
+#define RX_NO_DATA_INFO_ERR_NONE 0
+#define RX_NO_DATA_INFO_ERR_BAD_PLCP 1
+#define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2
+#define RX_NO_DATA_INFO_ERR_NO_DELIM 3
+#define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4
+
+#define RX_NO_DATA_FRAME_TIME_POS 0
+#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
+
+/**
+ * struct iwl_rx_no_data - RX no data descriptor
+ * @info: 7:0 frame type, 15:8 RX error type
+ * @rssi: 7:0 energy chain-A,
+ * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel
+ * @on_air_rise_time: GP2 during on air rise
+ * @fr_time: frame time
+ * @rate: rate/mcs of frame
+ * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type
+ * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
+ * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
+ * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
+ */
+struct iwl_rx_no_data {
+ __le32 info;
+ __le32 rssi;
+ __le32 on_air_rise_time;
+ __le32 fr_time;
+ __le32 rate;
+ __le32 phy_info[2];
+ __le32 rx_vec[3];
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
+
/**
* enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
* @IWL_CD_STTS_UNUSED: unused
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index c16757051f16..2a19b178c5e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -225,22 +225,18 @@ static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
*dump_data = iwl_fw_error_next_data(*dump_data);
}
-static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_error_dump_data **dump_data)
+static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data)
{
- struct iwl_fw_error_dump_fifo *fifo_hdr;
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
- u32 *fifo_data;
- u32 fifo_len;
unsigned long flags;
- int i, j;
- IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n");
+ IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return;
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
/* Pull RXF1 */
iwl_fwrt_dump_rxf(fwrt, dump_data,
cfg->lmac[0].rxfifo1_size, 0, 0);
@@ -254,7 +250,25 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
LMAC2_PRPH_OFFSET, 2);
}
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+ iwl_trans_release_nic_access(fwrt->trans, &flags);
+}
+
+static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data)
+{
+ struct iwl_fw_error_dump_fifo *fifo_hdr;
+ struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
+ u32 *fifo_data;
+ u32 fifo_len;
+ unsigned long flags;
+ int i, j;
+
+ IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
+
+ if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+ return;
+
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
/* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */
@@ -279,7 +293,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
}
}
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */
@@ -591,20 +605,42 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
}
+static void iwl_fw_dump_named_mem(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **dump_data,
+ u32 len, u32 ofs, u8 *name, u8 name_len)
+{
+ struct iwl_fw_error_dump_named_mem *dump_mem;
+
+ if (!len)
+ return;
+
+ (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem));
+ dump_mem = (void *)(*dump_data)->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM);
+ dump_mem->offset = cpu_to_le32(ofs);
+ dump_mem->name_len = name_len;
+ memcpy(dump_mem->name, name, name_len);
+ iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+
+ IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
+}
+
#define ADD_LEN(len, item_len, const_len) \
do {size_t item = item_len; len += (!!item) * const_len + item; } \
while (0)
-static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
- struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg)
{
size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
u32 fifo_len = 0;
int i;
- if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
- goto dump_txf;
+ if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
+ return 0;
/* Count RXF2 size */
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
@@ -613,8 +649,18 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
for (i = 0; i < mem_cfg->num_lmacs; i++)
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
-dump_txf:
- if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
+ return fifo_len;
+}
+
+static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fwrt_shared_mem_cfg *mem_cfg)
+{
+ size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ u32 fifo_len = 0;
+ int i;
+
+ if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
goto dump_internal_txf;
/* Count TXF sizes */
@@ -627,7 +673,7 @@ dump_txf:
}
dump_internal_txf:
- if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+ if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
goto out;
@@ -639,6 +685,32 @@ out:
return fifo_len;
}
+static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_error_dump_data **data)
+{
+ int i;
+
+ IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
+ for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
+ struct iwl_fw_error_dump_paging *paging;
+ struct page *pages =
+ fwrt->fw_paging_db[i].fw_paging_block;
+ dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+ (*data)->len = cpu_to_le32(sizeof(*paging) +
+ PAGING_BLOCK_SIZE);
+ paging = (void *)(*data)->data;
+ paging->index = cpu_to_le32(i);
+ dma_sync_single_for_cpu(fwrt->trans->dev, addr,
+ PAGING_BLOCK_SIZE,
+ DMA_BIDIRECTIONAL);
+ memcpy(paging->data, page_address(pages),
+ PAGING_BLOCK_SIZE);
+ (*data) = iwl_fw_error_next_data(*data);
+ }
+}
+
static struct iwl_fw_error_dump_file *
_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dump_ptrs *fw_error_dump)
@@ -655,13 +727,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
0 : fwrt->trans->cfg->dccm2_len;
- bool monitor_dump_only = false;
int i;
- if (fwrt->dump.trig &&
- fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
- monitor_dump_only = true;
-
/* SRAM - include stack CCM if driver knows the values for it */
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
const struct fw_img *img;
@@ -676,26 +743,27 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
/* reading RXF/TXF sizes */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
- fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
+ fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
+ fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
/* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2 &&
- fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
+ iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
prph_len += iwl_fw_get_prph_len(fwrt);
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
- fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
+ iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
file_len += sizeof(*dump_data) + sizeof(*dump_info);
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
size_t hdr_len = sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_mem);
@@ -712,10 +780,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
}
/* Make room for fw's virtual image pages, if it exists */
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
- !fwrt->trans->cfg->gen2 &&
- fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
- fwrt->fw_paging_db[0].fw_paging_block)
+ if (iwl_fw_dbg_is_paging_enabled(fwrt))
file_len += fwrt->num_of_paging_blk *
(sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -727,12 +792,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
}
/* If we only want a monitor dump, reset the file length */
- if (monitor_dump_only) {
+ if (fwrt->dump.monitor_only) {
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
}
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len;
@@ -746,7 +811,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
@@ -763,11 +828,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
sizeof(dump_info->dev_human_readable) - 1);
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
+ dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status);
dump_data = iwl_fw_error_next_data(dump_data);
}
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
/* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
@@ -799,12 +865,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
/* We only dump the FIFOs if the FW is in error state */
if (fifo_len) {
- iwl_fw_dump_fifos(fwrt, &dump_data);
+ iwl_fw_dump_rxf(fwrt, &dump_data);
+ iwl_fw_dump_txf(fwrt, &dump_data);
if (radio_len)
iwl_read_radio_regs(fwrt, &dump_data);
}
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
@@ -817,10 +884,10 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
}
/* In case we only want monitor dump, skip to dump trasport data */
- if (monitor_dump_only)
+ if (fwrt->dump.monitor_only)
goto out;
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+ if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
fwrt->fw->dbg.mem_tlv;
@@ -865,30 +932,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
}
/* Dump fw's virtual image */
- if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
- !fwrt->trans->cfg->gen2 &&
- fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
- fwrt->fw_paging_db[0].fw_paging_block) {
- IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
- for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
- struct iwl_fw_error_dump_paging *paging;
- struct page *pages =
- fwrt->fw_paging_db[i].fw_paging_block;
- dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
-
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
- dump_data->len = cpu_to_le32(sizeof(*paging) +
- PAGING_BLOCK_SIZE);
- paging = (void *)dump_data->data;
- paging->index = cpu_to_le32(i);
- dma_sync_single_for_cpu(fwrt->trans->dev, addr,
- PAGING_BLOCK_SIZE,
- DMA_BIDIRECTIONAL);
- memcpy(paging->data, page_address(pages),
- PAGING_BLOCK_SIZE);
- dump_data = iwl_fw_error_next_data(dump_data);
- }
- }
+ if (iwl_fw_dbg_is_paging_enabled(fwrt))
+ iwl_dump_paging(fwrt, &dump_data);
if (prph_len) {
iwl_dump_prph(fwrt->trans, &dump_data,
@@ -906,12 +951,245 @@ out:
return dump_file;
}
+static void iwl_dump_prph_ini(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ struct iwl_fw_error_dump_prph *prph;
+ unsigned long flags;
+ u32 i, size = le32_to_cpu(reg->num_regions);
+
+ IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
+ for (i = 0; i < size; i++) {
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+ (*data)->len = cpu_to_le32(le32_to_cpu(reg->size) +
+ sizeof(*prph));
+ prph = (void *)(*data)->data;
+ prph->prph_start = reg->start_addr[i];
+ prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans,
+ le32_to_cpu(prph->prph_start)));
+ *data = iwl_fw_error_next_data(*data);
+ }
+ iwl_trans_release_nic_access(trans, &flags);
+}
+
+static void iwl_dump_csr_ini(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_data **data,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ int i, num = le32_to_cpu(reg->num_regions);
+ u32 size = le32_to_cpu(reg->size);
+
+ IWL_DEBUG_INFO(trans, "WRT CSR dump\n");
+
+ for (i = 0; i < num; i++) {
+ u32 add = le32_to_cpu(reg->start_addr[i]);
+ __le32 *val;
+ int j;
+
+ (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+ (*data)->len = cpu_to_le32(size);
+ val = (void *)(*data)->data;
+
+ for (j = 0; j < size; j += 4)
+ *val++ = cpu_to_le32(iwl_trans_read32(trans, j + add));
+
+ *data = iwl_fw_error_next_data(*data);
+ }
+}
+
+static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger *trigger)
+{
+ int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data);
+
+ if (!trigger || !trigger->num_regions)
+ return 0;
+
+ num = le32_to_cpu(trigger->num_regions);
+ for (i = 0; i < num; i++) {
+ u32 reg_id = le32_to_cpu(trigger->data[i]);
+ struct iwl_fw_ini_region_cfg *reg;
+ enum iwl_fw_ini_region_type type;
+ u32 num_entries;
+
+ if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
+ continue;
+
+ reg = fwrt->dump.active_regs[reg_id].reg;
+ if (WARN(!reg, "Unassigned region %d\n", reg_id))
+ continue;
+
+ type = le32_to_cpu(reg->region_type);
+ num_entries = le32_to_cpu(reg->num_regions);
+
+ switch (type) {
+ case IWL_FW_INI_REGION_DEVICE_MEMORY:
+ size += hdr_len +
+ sizeof(struct iwl_fw_error_dump_named_mem) +
+ le32_to_cpu(reg->size);
+ break;
+ case IWL_FW_INI_REGION_PERIPHERY_MAC:
+ case IWL_FW_INI_REGION_PERIPHERY_PHY:
+ case IWL_FW_INI_REGION_PERIPHERY_AUX:
+ size += num_entries *
+ (hdr_len +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ sizeof(u32));
+ break;
+ case IWL_FW_INI_REGION_TXF:
+ size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg);
+ break;
+ case IWL_FW_INI_REGION_RXF:
+ size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg);
+ break;
+ case IWL_FW_INI_REGION_PAGING:
+ if (!iwl_fw_dbg_is_paging_enabled(fwrt))
+ break;
+ size += fwrt->num_of_paging_blk *
+ (hdr_len +
+ sizeof(struct iwl_fw_error_dump_paging) +
+ PAGING_BLOCK_SIZE);
+ break;
+ case IWL_FW_INI_REGION_CSR:
+ size += num_entries *
+ (hdr_len + le32_to_cpu(reg->size));
+ break;
+ case IWL_FW_INI_REGION_DRAM_BUFFER:
+ /* Transport takes care of DRAM dumping */
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ case IWL_FW_INI_REGION_DRAM_IMR:
+ /* Undefined yet */
+ default:
+ break;
+ }
+ }
+ return size;
+}
+
+static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger *trigger,
+ struct iwl_fw_error_dump_data **data,
+ u32 *dump_mask)
+{
+ int i, num = le32_to_cpu(trigger->num_regions);
+
+ for (i = 0; i < num; i++) {
+ u32 reg_id = le32_to_cpu(trigger->data[i]);
+ enum iwl_fw_ini_region_type type;
+ struct iwl_fw_ini_region_cfg *reg;
+
+ if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))
+ continue;
+
+ reg = fwrt->dump.active_regs[reg_id].reg;
+ /* Don't warn, get_trigger_len already warned */
+ if (!reg)
+ continue;
+
+ type = le32_to_cpu(reg->region_type);
+ switch (type) {
+ case IWL_FW_INI_REGION_DEVICE_MEMORY:
+ if (WARN_ON(le32_to_cpu(reg->num_regions) > 1))
+ continue;
+ iwl_fw_dump_named_mem(fwrt, data,
+ le32_to_cpu(reg->size),
+ le32_to_cpu(reg->start_addr[0]),
+ reg->name,
+ le32_to_cpu(reg->name_len));
+ break;
+ case IWL_FW_INI_REGION_PERIPHERY_MAC:
+ case IWL_FW_INI_REGION_PERIPHERY_PHY:
+ case IWL_FW_INI_REGION_PERIPHERY_AUX:
+ iwl_dump_prph_ini(fwrt->trans, data, reg);
+ break;
+ case IWL_FW_INI_REGION_DRAM_BUFFER:
+ *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR;
+ break;
+ case IWL_FW_INI_REGION_PAGING:
+ if (iwl_fw_dbg_is_paging_enabled(fwrt))
+ iwl_dump_paging(fwrt, data);
+ else
+ *dump_mask |= IWL_FW_ERROR_DUMP_PAGING;
+ break;
+ case IWL_FW_INI_REGION_TXF:
+ iwl_fw_dump_txf(fwrt, data);
+ break;
+ case IWL_FW_INI_REGION_RXF:
+ iwl_fw_dump_rxf(fwrt, data);
+ break;
+ case IWL_FW_INI_REGION_CSR:
+ iwl_dump_csr_ini(fwrt->trans, data, reg);
+ break;
+ case IWL_FW_INI_REGION_DRAM_IMR:
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ /* This is undefined yet */
+ default:
+ break;
+ }
+ }
+}
+
+static struct iwl_fw_error_dump_file *
+_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dump_ptrs *fw_error_dump,
+ u32 *dump_mask)
+{
+ int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
+ struct iwl_fw_error_dump_data *dump_data;
+ struct iwl_fw_error_dump_file *dump_file;
+ struct iwl_fw_ini_trigger *trigger, *ext;
+
+ if (id == FW_DBG_TRIGGER_FW_ASSERT)
+ id = IWL_FW_TRIGGER_ID_FW_ASSERT;
+ else if (id == FW_DBG_TRIGGER_USER)
+ id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+ else if (id < FW_DBG_TRIGGER_MAX)
+ return NULL;
+
+ if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
+ return NULL;
+
+ trigger = fwrt->dump.active_trigs[id].conf;
+ ext = fwrt->dump.active_trigs[id].conf_ext;
+
+ size = sizeof(*dump_file);
+ size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
+ size += iwl_fw_ini_get_trigger_len(fwrt, ext);
+
+ if (!size)
+ return NULL;
+
+ dump_file = vzalloc(size);
+ if (!dump_file)
+ return NULL;
+
+ fw_error_dump->fwrt_ptr = dump_file;
+
+ dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+ dump_data = (void *)dump_file->data;
+ dump_file->file_len = cpu_to_le32(size);
+
+ *dump_mask = 0;
+ if (trigger)
+ iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask);
+ if (ext)
+ iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask);
+
+ return dump_file;
+}
+
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
{
struct iwl_fw_dump_ptrs *fw_error_dump;
struct iwl_fw_error_dump_file *dump_file;
struct scatterlist *sg_dump_data;
u32 file_len;
+ u32 dump_mask = fwrt->fw->dbg.dump_mask;
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
@@ -925,14 +1203,21 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (!fw_error_dump)
goto out;
- dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
+ if (fwrt->trans->ini_valid)
+ dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump,
+ &dump_mask);
+ else
+ dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
+
if (!dump_file) {
kfree(fw_error_dump);
goto out;
}
- fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
- fwrt->dump.trig);
+ if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
+ dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
+
+ fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump->fwrt_len = file_len;
if (fw_error_dump->trans_ptr) {
@@ -973,6 +1258,14 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
};
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
+void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt)
+{
+ IWL_INFO(fwrt, "error dump due to fw assert\n");
+ fwrt->dump.desc = &iwl_dump_desc_assert;
+ iwl_fw_error_dump(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump);
+
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
{
struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
@@ -998,7 +1291,8 @@ void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
- const struct iwl_fw_dump_desc *desc, void *trigger,
+ const struct iwl_fw_dump_desc *desc,
+ bool monitor_only,
unsigned int delay)
{
/*
@@ -1028,7 +1322,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
le32_to_cpu(desc->trig_desc.type));
fwrt->dump.desc = desc;
- fwrt->dump.trig = trigger;
+ fwrt->dump.monitor_only = monitor_only;
schedule_delayed_work(&fwrt->dump.wk, delay);
@@ -1036,13 +1330,14 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
-int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len,
- struct iwl_fw_dbg_trigger_tlv *trigger)
+int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_fw_dump_desc *desc;
unsigned int delay = 0;
+ bool monitor_only = false;
if (trigger) {
u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
@@ -1059,6 +1354,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
trigger->occurrences = cpu_to_le16(occurrences);
delay = le16_to_cpu(trigger->trig_dis_ms);
+ monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
}
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
@@ -1070,7 +1366,48 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
desc->trig_desc.type = cpu_to_le32(trig);
memcpy(desc->trig_desc.data, str, len);
- return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
+ return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
+}
+IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect);
+
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ u32 id, const char *str, size_t len)
+{
+ struct iwl_fw_dump_desc *desc;
+ u32 occur, delay;
+
+ if (!fwrt->trans->ini_valid)
+ return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL);
+
+ if (id == FW_DBG_TRIGGER_USER)
+ id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+
+ if (WARN_ON(!fwrt->dump.active_trigs[id].active))
+ return -EINVAL;
+
+ delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec);
+ occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences);
+ if (!occur)
+ return 0;
+
+ if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) {
+ IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
+ iwl_force_nmi(fwrt->trans);
+ return 0;
+ }
+
+ desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+ if (!desc)
+ return -ENOMEM;
+
+ occur--;
+ fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur);
+
+ desc->len = len;
+ desc->trig_desc.type = cpu_to_le32(id);
+ memcpy(desc->trig_desc.data, str, len);
+
+ return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
@@ -1081,6 +1418,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
int ret, len = 0;
char buf[64];
+ if (fwrt->trans->ini_valid)
+ return 0;
+
if (fmt) {
va_list ap;
@@ -1097,8 +1437,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
len = strlen(buf) + 1;
}
- ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
- trigger);
+ ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
+ trigger);
if (ret)
return ret;
@@ -1224,3 +1564,217 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
cfg->d3_debug_data_length);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
+
+static void
+iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_allocation_tlv *alloc)
+{
+ struct iwl_trans *trans = fwrt->trans;
+ struct iwl_continuous_record_cmd cont_rec = {};
+ struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0];
+ struct iwl_host_cmd hcmd = {
+ .id = LDBG_CONFIG_CMD,
+ .flags = CMD_ASYNC,
+ .data[0] = &cont_rec,
+ .len[0] = sizeof(cont_rec),
+ };
+ void *virtual_addr = NULL;
+ u32 size = le32_to_cpu(alloc->size);
+ dma_addr_t phys_addr;
+
+ cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
+
+ if (!trans->num_blocks &&
+ le32_to_cpu(alloc->buffer_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH)
+ return;
+
+ virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size,
+ &phys_addr, GFP_KERNEL);
+
+ /* TODO: alloc fragments if needed */
+ if (!virtual_addr)
+ IWL_ERR(fwrt, "Failed to allocate debug memory\n");
+
+ if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
+ return;
+
+ trans->fw_mon[trans->num_blocks].block = virtual_addr;
+ trans->fw_mon[trans->num_blocks].physical = phys_addr;
+ trans->fw_mon[trans->num_blocks].size = size;
+ trans->num_blocks++;
+
+ IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
+
+ /* First block is assigned via registers / context info */
+ if (trans->num_blocks == 1)
+ return;
+
+ cmd->num_frags = cpu_to_le32(1);
+ cmd->fragments[0].address = cpu_to_le64(phys_addr);
+ cmd->fragments[0].size = alloc->size;
+ cmd->allocation_id = alloc->allocation_id;
+ cmd->buffer_location = alloc->buffer_location;
+
+ iwl_trans_send_cmd(trans, &hcmd);
+}
+
+static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
+ struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
+ struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
+ u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv);
+
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(data->group, data->id),
+ .len = { len, },
+ .data = { data->data, },
+ };
+
+ iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_tlv *tlv,
+ bool ext, enum iwl_fw_ini_apply_point pnt)
+{
+ void *iter = (void *)tlv->region_config;
+ int i, size = le32_to_cpu(tlv->num_regions);
+
+ for (i = 0; i < size; i++) {
+ struct iwl_fw_ini_region_cfg *reg = iter;
+ int id = le32_to_cpu(reg->region_id);
+ struct iwl_fw_ini_active_regs *active;
+
+ if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
+ "Invalid region id %d for apply point %d\n", id, pnt))
+ break;
+
+ active = &fwrt->dump.active_regs[id];
+
+ if (ext && active->apply_point == pnt)
+ IWL_WARN(fwrt->trans,
+ "External region TLV overrides FW default %x\n",
+ id);
+
+ IWL_DEBUG_FW(fwrt,
+ "%s: apply point %d, activating region ID %d\n",
+ __func__, pnt, id);
+
+ active->reg = reg;
+ active->apply_point = pnt;
+
+ if (le32_to_cpu(reg->region_type) !=
+ IWL_FW_INI_REGION_DRAM_BUFFER)
+ iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
+
+ iter += sizeof(*reg);
+ }
+}
+
+static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_trigger_tlv *tlv,
+ bool ext,
+ enum iwl_fw_ini_apply_point apply_point)
+{
+ int i, size = le32_to_cpu(tlv->num_triggers);
+ void *iter = (void *)tlv->trigger_config;
+
+ for (i = 0; i < size; i++) {
+ struct iwl_fw_ini_trigger *trig = iter;
+ struct iwl_fw_ini_active_triggers *active;
+ int id = le32_to_cpu(trig->trigger_id);
+ u32 num;
+
+ if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
+ break;
+
+ active = &fwrt->dump.active_trigs[id];
+
+ if (active->apply_point != apply_point) {
+ active->conf = NULL;
+ active->conf_ext = NULL;
+ }
+
+ num = le32_to_cpu(trig->num_regions);
+
+ if (ext && active->apply_point == apply_point) {
+ num += le32_to_cpu(active->conf->num_regions);
+ if (trig->ignore_default) {
+ active->conf_ext = active->conf;
+ active->conf = trig;
+ } else {
+ active->conf_ext = trig;
+ }
+ } else {
+ active->conf = trig;
+ }
+
+ /* Since zero means infinity - just set to -1 */
+ if (!le32_to_cpu(trig->occurrences))
+ trig->occurrences = cpu_to_le32(-1);
+ if (!le32_to_cpu(trig->ignore_consec))
+ trig->ignore_consec = cpu_to_le32(-1);
+
+ iter += sizeof(*trig) +
+ le32_to_cpu(trig->num_regions) * sizeof(__le32);
+
+ active->active = num;
+ active->apply_point = apply_point;
+ }
+}
+
+static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
+ struct iwl_apply_point_data *data,
+ enum iwl_fw_ini_apply_point pnt,
+ bool ext)
+{
+ void *iter = data->data;
+
+ while (iter && iter < data->data + data->size) {
+ struct iwl_ucode_tlv *tlv = iter;
+ void *ini_tlv = (void *)tlv->data;
+ u32 type = le32_to_cpu(tlv->type);
+
+ switch (type) {
+ case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+ iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
+ break;
+ case IWL_UCODE_TLV_TYPE_HCMD:
+ if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
+ IWL_ERR(fwrt,
+ "Invalid apply point %x for host command\n",
+ pnt);
+ goto next;
+ }
+ iwl_fw_dbg_send_hcmd(fwrt, tlv);
+ break;
+ case IWL_UCODE_TLV_TYPE_REGIONS:
+ iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
+ break;
+ case IWL_UCODE_TLV_TYPE_TRIGGERS:
+ iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt);
+ break;
+ case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
+ break;
+ default:
+ WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
+ break;
+ }
+next:
+ iter += sizeof(*tlv) + le32_to_cpu(tlv->length);
+ }
+}
+
+void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_apply_point apply_point)
+{
+ void *data = &fwrt->trans->apply_points[apply_point];
+
+ _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
+
+ data = &fwrt->trans->apply_points_ext[apply_point];
+ _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 6f8d3256f7b0..6aabbdd72326 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -72,6 +72,7 @@
#include "file.h"
#include "error-dump.h"
#include "api/commands.h"
+#include "api/dbg-tlv.h"
/**
* struct iwl_fw_dump_desc - describes the dump
@@ -101,17 +102,19 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
if (fwrt->dump.desc != &iwl_dump_desc_assert)
kfree(fwrt->dump.desc);
fwrt->dump.desc = NULL;
- fwrt->dump.trig = NULL;
+ fwrt->dump.rt_status = 0;
}
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
- void *trigger, unsigned int delay);
+ bool monitor_only, unsigned int delay);
+int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len,
- struct iwl_fw_dbg_trigger_tlv *trigger);
+ u32 id, const char *str, size_t len);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
@@ -193,6 +196,9 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_dbg_trigger_tlv *trig;
+ if (fwrt->trans->ini_valid)
+ return NULL;
+
if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
return NULL;
@@ -210,6 +216,37 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \
})
+static inline bool
+_iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
+ const enum iwl_fw_dbg_trigger id)
+{
+ struct iwl_fw_ini_active_triggers *trig = &fwrt->dump.active_trigs[id];
+ u32 ms;
+
+ if (!fwrt->trans->ini_valid)
+ return false;
+
+ if (!trig || !trig->active)
+ return false;
+
+ ms = le32_to_cpu(trig->conf->ignore_consec);
+ if (ms)
+ ms /= USEC_PER_MSEC;
+
+ if (iwl_fw_dbg_no_trig_window(fwrt, id, ms)) {
+ IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id);
+ return false;
+ }
+
+ return true;
+}
+
+#define iwl_fw_ini_trigger_on(fwrt, wdev, id) ({ \
+ BUILD_BUG_ON(!__builtin_constant_p(id)); \
+ BUILD_BUG_ON((id) >= IWL_FW_TRIGGER_ID_NUM); \
+ _iwl_fw_ini_trigger_on((fwrt), (wdev), (id)); \
+})
+
static inline void
_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
struct wireless_dev *wdev,
@@ -263,6 +300,9 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
udelay(100);
iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ trans->dbg_rec_on = false;
+#endif
}
static inline void
@@ -293,6 +333,14 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
}
}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
+{
+ if (fwrt->fw->dbg.dest_tlv && fwrt->cur_fw_img == IWL_UCODE_REGULAR)
+ fwrt->trans->dbg_rec_on = true;
+}
+#endif
+
static inline void
iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_params *params)
@@ -301,6 +349,9 @@ iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
_iwl_fw_dbg_restart_recording(fwrt->trans, params);
else
iwl_fw_dbg_start_stop_hcmd(fwrt, true);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_fw_set_dbg_rec_on(fwrt);
+#endif
}
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
@@ -310,12 +361,25 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
void iwl_fw_error_dump_wk(struct work_struct *work);
+static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type)
+{
+ return (fwrt->fw->dbg.dump_mask & BIT(type) || fwrt->trans->ini_valid);
+}
+
static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
{
return fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
fwrt->trans->cfg->d3_debug_data_length &&
- fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+ iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
+}
+
+static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
+{
+ return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
+ !fwrt->trans->cfg->gen2 &&
+ fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
+ fwrt->fw_paging_db[0].fw_paging_block;
}
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
@@ -366,6 +430,10 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
+void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt);
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
+void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_apply_point apply_point);
+
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 6fede174c664..65faecf552cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -187,6 +187,8 @@ enum iwl_fw_error_dump_family {
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
* @bus_human_readable: name of the bus used
+ * @rt_status: the error_id/rt_status that that triggered the latest dump
+ * if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
__le32 device_family;
@@ -194,6 +196,7 @@ struct iwl_fw_error_dump_info {
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
u8 bus_human_readable[8];
+ __le32 rt_status;
} __packed;
/**
@@ -249,6 +252,7 @@ struct iwl_fw_error_dump_prph {
enum iwl_fw_error_dump_mem_type {
IWL_FW_ERROR_DUMP_MEM_SRAM,
IWL_FW_ERROR_DUMP_MEM_SMEM,
+ IWL_FW_ERROR_DUMP_MEM_NAMED_MEM = 10,
};
/**
@@ -264,6 +268,22 @@ struct iwl_fw_error_dump_mem {
};
/**
+ * struct iwl_fw_error_dump_named_mem - chunk of memory
+ * @type: &enum iwl_fw_error_dump_mem_type
+ * @offset: the offset from which the memory was read
+ * @name_len: name length
+ * @name: file name
+ * @data: the content of the memory
+ */
+struct iwl_fw_error_dump_named_mem {
+ __le32 type;
+ __le32 offset;
+ u8 name_len;
+ u8 name[32];
+ u8 data[];
+};
+
+/**
* struct iwl_fw_error_dump_rb - content of an Receive Buffer
* @index: the index of the Receive Buffer in the Rx queue
* @rxq: the RB's Rx queue
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 6005a41c53d1..81f557c0b58d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -91,6 +91,8 @@ struct iwl_ucode_header {
} u;
};
+#define IWL_UCODE_INI_TLV_GROUP BIT(24)
+
/*
* new TLV uCode file layout
*
@@ -141,6 +143,11 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
+ IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
+ IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
+ IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
+ IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
+ IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
/* TLVs 0x1000-0x2000 are for internal driver usage */
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 54dbbd998abf..12333167ea23 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -65,6 +65,8 @@
#define __iwl_fw_img_h__
#include <linux/types.h>
+#include "api/dbg-tlv.h"
+
#include "file.h"
#include "error-dump.h"
@@ -221,6 +223,30 @@ struct iwl_fw_dbg {
};
/**
+ * struct iwl_fw_ini_active_triggers
+ * @active: is this trigger active
+ * @apply_point: last apply point that updated this trigger
+ * @conf: active trigger
+ * @conf_ext: second trigger, contains extra regions to dump
+ */
+struct iwl_fw_ini_active_triggers {
+ bool active;
+ enum iwl_fw_ini_apply_point apply_point;
+ struct iwl_fw_ini_trigger *conf;
+ struct iwl_fw_ini_trigger *conf_ext;
+};
+
+/**
+ * struct iwl_fw_ini_active_regs
+ * @reg: active region from TLV
+ * @apply_point: apply point where it became active
+ */
+struct iwl_fw_ini_active_regs {
+ struct iwl_fw_ini_region_cfg *reg;
+ enum iwl_fw_ini_apply_point apply_point;
+};
+
+/**
* struct iwl_fw - variables associated with the firmware
*
* @ucode_ver: ucode version from the ucode file
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 2b8b50a77990..4f7090f88cb0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -64,6 +64,7 @@
#include "iwl-trans.h"
#include "img.h"
#include "fw/api/debug.h"
+#include "fw/api/dbg-tlv.h"
#include "fw/api/paging.h"
#include "iwl-eeprom-parse.h"
@@ -131,14 +132,17 @@ struct iwl_fw_runtime {
/* debug */
struct {
const struct iwl_fw_dump_desc *desc;
- const struct iwl_fw_dbg_trigger_tlv *trig;
+ bool monitor_only;
struct delayed_work wk;
u8 conf;
/* ts of the beginning of a non-collect fw dbg data period */
- unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
+ unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM - 1];
u32 *d3_debug_data;
+ struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
+ struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
+ u32 rt_status;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 5eb906a0d0d2..91861a9cbe57 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -265,11 +265,9 @@ struct iwl_tt_params {
#define EEPROM_REGULATORY_BAND_NO_HT40 0
/* lower blocks contain EEPROM image and calibration data */
-#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
-#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */
-#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */
-#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000
-#define OTP_LOW_IMAGE_SIZE_FAMILY_22000 OTP_LOW_IMAGE_SIZE_FAMILY_9000
+#define OTP_LOW_IMAGE_SIZE_2K (2 * 512 * sizeof(u16)) /* 2 KB */
+#define OTP_LOW_IMAGE_SIZE_16K (16 * 512 * sizeof(u16)) /* 16 KB */
+#define OTP_LOW_IMAGE_SIZE_32K (32 * 512 * sizeof(u16)) /* 32 KB */
struct iwl_eeprom_params {
const u8 regulatory_bands[7];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
new file mode 100644
index 000000000000..43d815cb3ce9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -0,0 +1,231 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/firmware.h>
+#include "iwl-trans.h"
+#include "iwl-dbg-tlv.h"
+
+void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
+ bool ext)
+{
+ struct iwl_apply_point_data *data;
+ struct iwl_fw_ini_header *header = (void *)&tlv->data[0];
+ u32 apply_point = le32_to_cpu(header->apply_point);
+
+ int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
+
+ if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM,
+ "Invalid apply point id %d\n", apply_point))
+ return;
+
+ if (ext)
+ data = &trans->apply_points_ext[apply_point];
+ else
+ data = &trans->apply_points[apply_point];
+
+ /*
+ * Make sure we still have room to copy this TLV. Offset points to the
+ * location the last copy ended.
+ */
+ if (WARN_ONCE(data->offset + copy_size > data->size,
+ "Not enough memory for apply point %d\n",
+ apply_point))
+ return;
+
+ memcpy(data->data + data->offset, (void *)tlv, copy_size);
+ data->offset += copy_size;
+}
+
+void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
+ bool ext)
+{
+ struct iwl_ucode_tlv *tlv;
+ u32 size[IWL_FW_INI_APPLY_NUM] = {0};
+ int i;
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type, apply;
+ struct iwl_fw_ini_header *hdr;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len)
+ return;
+
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
+ continue;
+
+ hdr = (void *)&tlv->data[0];
+ apply = le32_to_cpu(hdr->apply_point);
+
+ IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
+ le32_to_cpu(tlv->type), apply);
+
+ if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
+ continue;
+
+ size[apply] += sizeof(*tlv) + tlv_len;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(size); i++) {
+ void *mem;
+
+ if (!size[i])
+ continue;
+
+ mem = kzalloc(size[i], GFP_KERNEL);
+
+ if (!mem) {
+ IWL_ERR(trans, "No memory for apply point %d\n", i);
+ return;
+ }
+
+ if (ext) {
+ trans->apply_points_ext[i].data = mem;
+ trans->apply_points_ext[i].size = size[i];
+ } else {
+ trans->apply_points[i].data = mem;
+ trans->apply_points[i].size = size[i];
+ }
+
+ trans->ini_valid = true;
+ }
+}
+
+void iwl_fw_dbg_free(struct iwl_trans *trans)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(trans->apply_points); i++) {
+ kfree(trans->apply_points[i].data);
+ trans->apply_points[i].size = 0;
+ trans->apply_points[i].offset = 0;
+
+ kfree(trans->apply_points_ext[i].data);
+ trans->apply_points_ext[i].size = 0;
+ trans->apply_points_ext[i].offset = 0;
+ }
+}
+
+static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+ enum iwl_ucode_tlv_type tlv_type;
+ u32 tlv_len;
+
+ while (len >= sizeof(*tlv)) {
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return -EINVAL;
+ }
+ len -= ALIGN(tlv_len, 4);
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+ case IWL_UCODE_TLV_TYPE_HCMD:
+ case IWL_UCODE_TLV_TYPE_REGIONS:
+ case IWL_UCODE_TLV_TYPE_TRIGGERS:
+ case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
+ iwl_fw_dbg_copy_tlv(trans, tlv, true);
+ break;
+ default:
+ WARN_ONCE(1, "Invalid TLV %x\n", tlv_type);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans)
+{
+ const struct firmware *fw;
+ int res;
+
+ if (trans->external_ini_loaded || !iwlwifi_mod_params.enable_ini)
+ return;
+
+ res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
+ if (res)
+ return;
+
+ iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true);
+ iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size);
+
+ trans->external_ini_loaded = true;
+ release_firmware(fw);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
new file mode 100644
index 000000000000..222cd789e07a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -0,0 +1,87 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_dbg_tlv_h__
+#define __iwl_dbg_tlv_h__
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ * struct iwl_apply_point_data
+ * @data: start address of this apply point data
+ * @size total size of the data
+ * @offset: current offset of the copied data
+ */
+struct iwl_apply_point_data {
+ void *data;
+ int size;
+ int offset;
+};
+
+struct iwl_trans;
+void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans);
+void iwl_fw_dbg_free(struct iwl_trans *trans);
+void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
+ bool ext);
+void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
+ bool ext);
+
+#endif /* __iwl_dbg_tlv_h__*/
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ba41d23b4211..bf1be985f36b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -72,6 +72,7 @@
#include "iwl-op-mode.h"
#include "iwl-agn-hw.h"
#include "fw/img.h"
+#include "iwl-dbg-tlv.h"
#include "iwl-config.h"
#include "iwl-modparams.h"
@@ -645,6 +646,9 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
len -= sizeof(*ucode);
+ if (iwlwifi_mod_params.enable_ini)
+ iwl_alloc_dbg_tlv(drv->trans, len, data, false);
+
while (len >= sizeof(*tlv)) {
len -= sizeof(*tlv);
tlv = (void *)data;
@@ -1086,6 +1090,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -ENOMEM;
break;
}
+ case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+ case IWL_UCODE_TLV_TYPE_HCMD:
+ case IWL_UCODE_TLV_TYPE_REGIONS:
+ case IWL_UCODE_TLV_TYPE_TRIGGERS:
+ case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
+ if (iwlwifi_mod_params.enable_ini)
+ iwl_fw_dbg_copy_tlv(drv->trans, tlv, false);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1565,7 +1577,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
if (!drv->dbgfs_drv) {
IWL_ERR(drv, "failed to create debugfs directory\n");
ret = -ENOMEM;
- goto err_free_drv;
+ goto err_free_tlv;
}
/* Create transport layer debugfs dir */
@@ -1590,7 +1602,8 @@ err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
err_free_dbgfs:
debugfs_remove_recursive(drv->dbgfs_drv);
-err_free_drv:
+err_free_tlv:
+ iwl_fw_dbg_free(drv->trans);
#endif
kfree(drv);
err:
@@ -1616,9 +1629,13 @@ void iwl_drv_stop(struct iwl_drv *drv)
mutex_unlock(&iwlwifi_opmode_table_mtx);
#ifdef CONFIG_IWLWIFI_DEBUGFS
+ drv->trans->ops->debugfs_cleanup(drv->trans);
+
debugfs_remove_recursive(drv->dbgfs_drv);
#endif
+ iwl_fw_dbg_free(drv->trans);
+
kfree(drv);
}
@@ -1749,6 +1766,10 @@ MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
MODULE_PARM_DESC(uapsd_disable,
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
+ bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_ini,
+ "Enable debug INI TLV FW debug infrastructure (default: 0");
/*
* set bt_coex_active to true, uCode will do kill/defer
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index 4e3422a1c7bb..75940ac406b9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -927,22 +927,3 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
return NULL;
}
IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
-
-/* helper functions */
-int iwl_nvm_check_version(struct iwl_nvm_data *data,
- struct iwl_trans *trans)
-{
- if (data->nvm_version >= trans->cfg->nvm_ver ||
- data->calib_version >= trans->cfg->nvm_calib_ver) {
- IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- data->nvm_version, data->calib_version);
- return 0;
- }
-
- IWL_ERR(trans,
- "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
- data->nvm_version, trans->cfg->nvm_ver,
- data->calib_version, trans->cfg->nvm_calib_ver);
- return -EINVAL;
-}
-IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index d910bda087f7..2375d300a7cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -117,9 +119,6 @@ struct iwl_nvm_data *
iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size);
-int iwl_nvm_check_version(struct iwl_nvm_data *data,
- struct iwl_trans *trans);
-
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum nl80211_band band);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 6fc8dac4aab7..73b1c46f1158 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -122,6 +122,7 @@ enum iwl_uapsd_disable {
* @fw_monitor: allow to use firmware monitor
* @disable_11ac: disable VHT capabilities, default = false.
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
+ * @enable_ini: enable new FW debug infratructure (INI TLVs)
*/
struct iwl_mod_params {
int swcrypto;
@@ -148,6 +149,7 @@ struct iwl_mod_params {
*/
bool disable_11ax;
bool remove_when_gone;
+ bool enable_ini;
};
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 96e101d79662..d9afedc3d1d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -465,101 +465,185 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
-static struct ieee80211_sband_iftype_data iwl_he_capa = {
- .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
- .he_cap = {
- .has_he = true,
- .he_cap_elem = {
- .mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE |
- IEEE80211_HE_MAC_CAP0_TWT_REQ,
- .mac_cap_info[1] =
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
- IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
- .mac_cap_info[2] =
- IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
- IEEE80211_HE_MAC_CAP2_MU_CASCADING |
- IEEE80211_HE_MAC_CAP2_ACK_EN,
- .mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
- .mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU |
- IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
- .mac_cap_info[5] =
- IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 |
- IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 |
- IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
- .phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
- .phy_cap_info[1] =
- IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
- IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
- IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
- .phy_cap_info[2] =
- IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
- IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
- .phy_cap_info[3] =
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
- .phy_cap_info[4] =
- IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
- IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
- .phy_cap_info[5] =
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 |
- IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
- IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK,
- .phy_cap_info[6] =
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
- IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
- IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
- IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO |
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
- .phy_cap_info[7] =
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP7_MAX_NC_1,
- .phy_cap_info[8] =
- IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ,
- .phy_cap_info[9] =
- IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
+ {
+ .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU |
+ IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39,
+ .mac_cap_info[5] =
+ IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 |
+ IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+ .phy_cap_info[3] =
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+ .phy_cap_info[4] =
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+ .phy_cap_info[5] =
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK,
+ .phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+ .phy_cap_info[8] =
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ,
+ .phy_cap_info[9] =
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+ },
+ /*
+ * Set default Tx/Rx HE MCS NSS Support field.
+ * Indicate support for up to 2 spatial streams and all
+ * MCS, without any special cases
+ */
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ /*
+ * Set default PPE thresholds, with PPET16 set to 0,
+ * PPET8 set to 7
+ */
+ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
- /*
- * Set default Tx/Rx HE MCS NSS Support field. Indicate support
- * for up to 2 spatial streams and all MCS, without any special
- * cases
- */
- .he_mcs_nss_supp = {
- .rx_mcs_80 = cpu_to_le16(0xfffa),
- .tx_mcs_80 = cpu_to_le16(0xfffa),
- .rx_mcs_160 = cpu_to_le16(0xfffa),
- .tx_mcs_160 = cpu_to_le16(0xfffa),
- .rx_mcs_80p80 = cpu_to_le16(0xffff),
- .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ {
+ .types_mask = BIT(NL80211_IFTYPE_AP),
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE |
+ IEEE80211_HE_MAC_CAP0_TWT_RES,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2,
+ .mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+ .phy_cap_info[3] =
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+ .phy_cap_info[4] =
+ IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+ .phy_cap_info[5] =
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 |
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK,
+ .phy_cap_info[6] =
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ .phy_cap_info[7] =
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1,
+ .phy_cap_info[8] =
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ,
+ .phy_cap_info[9] =
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
+ },
+ /*
+ * Set default Tx/Rx HE MCS NSS Support field.
+ * Indicate support for up to 2 spatial streams and all
+ * MCS, without any special cases
+ */
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xffff),
+ .tx_mcs_80p80 = cpu_to_le16(0xffff),
+ },
+ /*
+ * Set default PPE thresholds, with PPET16 set to 0,
+ * PPET8 set to 7
+ */
+ .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
- /*
- * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
- * to 7
- */
- .ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
};
@@ -568,20 +652,24 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
{
if (sband->band == NL80211_BAND_2GHZ ||
sband->band == NL80211_BAND_5GHZ)
- sband->iftype_data = &iwl_he_capa;
+ sband->iftype_data = iwl_he_capa;
else
return;
- sband->n_iftype_data = 1;
+ sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
if ((tx_chains & rx_chains) != ANT_AB) {
- iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
- ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
- iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
- ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
- iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[7] &=
- ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
+ int i;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &=
+ ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS;
+ iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &=
+ ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS;
+ iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &=
+ ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
+ }
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 0f51c7bea8d0..9d89b7d7f9fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -8,6 +8,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -360,6 +362,12 @@
#define MON_BUFF_END_ADDR (0xa03c40)
#define MON_BUFF_WRPTR (0xa03c44)
#define MON_BUFF_CYCLE_CNT (0xa03c48)
+/* FW monitor family 8000 and on */
+#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c)
+#define MON_BUFF_END_ADDR_VER2 (0xa03c20)
+#define MON_BUFF_WRPTR_VER2 (0xa03c24)
+#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)
+#define MON_BUFF_SHIFT_VER2 (0x8)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
@@ -394,6 +402,7 @@ enum aux_misc_master1_en {
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
#define RSA_ENABLE 0xA24B08
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
+#define PREG_PRPH_WPROT_0 0xA04CE0
#define SB_CPU_1_STATUS 0xA01E30
#define SB_CPU_2_STATUS 0xA01E34
#define UMAG_SB_CPU_1_STATUS 0xA038C0
@@ -420,4 +429,8 @@ enum {
#define UREG_CHICK (0xA05C00)
#define UREG_CHICK_MSI_ENABLE BIT(24)
#define UREG_CHICK_MSIX_ENABLE BIT(25)
+
+#define HPM_DEBUG 0xA03440
+#define PERSISTENCE_BIT BIT(12)
+#define PREG_WFPM_ACCESS BIT(12)
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 26b3c73051ca..a7009cd4232d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -73,6 +73,8 @@
#include "iwl-op-mode.h"
#include "fw/api/cmdhdr.h"
#include "fw/api/txq.h"
+#include "fw/api/dbg-tlv.h"
+#include "iwl-dbg-tlv.h"
/**
* DOC: Transport layer - what is it ?
@@ -534,6 +536,8 @@ struct iwl_trans_rxq_dma_data {
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
* Note that the transport must fill in the proper file headers.
+ * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
+ * of the trans debugfs
*/
struct iwl_trans_ops {
@@ -602,8 +606,8 @@ struct iwl_trans_ops {
void (*resume)(struct iwl_trans *trans);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
- const struct iwl_fw_dbg_trigger_tlv
- *trigger);
+ u32 dump_mask);
+ void (*debugfs_cleanup)(struct iwl_trans *trans);
};
/**
@@ -679,7 +683,6 @@ enum iwl_plat_pm_mode {
* enter/exit (in msecs).
*/
#define IWL_TRANS_IDLE_TIMEOUT 2000
-#define IWL_MAX_DEBUG_ALLOCATIONS 1
/**
* struct iwl_dram_data
@@ -734,6 +737,7 @@ struct iwl_dram_data {
* @runtime_pm_mode: the runtime power management mode in use. This
* mode is set during the initialization phase and is not
* supposed to change during runtime.
+ * @dbg_rec_on: true iff there is a fw debug recording currently active
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -774,17 +778,23 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
+ struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
+ struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
+
+ bool external_ini_loaded;
+ bool ini_valid;
+
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
- u32 dbg_dump_mask;
u8 dbg_n_dest_reg;
int num_blocks;
- struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
+ struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
enum iwl_plat_pm_mode system_pm_mode;
enum iwl_plat_pm_mode runtime_pm_mode;
bool suspending;
+ bool dbg_rec_on;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -897,12 +907,11 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
}
static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
{
if (!trans->ops->dump_data)
return NULL;
- return trans->ops->dump_data(trans, trigger);
+ return trans->ops->dump_data(trans, dump_mask);
}
static inline struct iwl_device_cmd *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 843f3b41b72e..01b5338201d6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1811,8 +1811,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
n_matches = 0;
}
- net_detect = kzalloc(sizeof(*net_detect) +
- (n_matches * sizeof(net_detect->matches[0])),
+ net_detect = kzalloc(struct_size(net_detect, matches, n_matches),
GFP_KERNEL);
if (!net_detect || !n_matches)
goto out_report_nd;
@@ -1827,8 +1826,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
n_channels += hweight8(fw_match->matching_channels[j]);
- match = kzalloc(sizeof(*match) +
- (n_channels * sizeof(*match->channels)),
+ match = kzalloc(struct_size(match, channels, n_channels),
GFP_KERNEL);
if (!match)
goto out_report_nd;
@@ -1956,7 +1954,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
iwl_mvm_dump_nic_error_log(mvm);
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- NULL, 0);
+ false, 0);
ret = 1;
goto err;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 1aa6c7e93088..33b0af24a537 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -1299,10 +1299,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
int len;
len = scnprintf(buf, sizeof(buf) - 1,
- "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+ "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
- !!(mvmvif->low_latency & LOW_LATENCY_VCMD));
+ !!(mvmvif->low_latency & LOW_LATENCY_VCMD),
+ !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
@@ -1440,15 +1441,6 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static const char * const chanwidths[] = {
- [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
- [NL80211_CHAN_WIDTH_20] = "ht20",
- [NL80211_CHAN_WIDTH_40] = "ht40",
- [NL80211_CHAN_WIDTH_80] = "vht80",
- [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
- [NL80211_CHAN_WIDTH_160] = "vht160",
-};
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 3b6b3d8fb961..52c361a6124c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1284,7 +1284,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return 0;
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
- (count - 1), NULL);
+ (count - 1));
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 2ba890445c35..0d6c313b6669 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -377,6 +377,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ iwl_fw_set_dbg_rec_on(&mvm->fwrt);
+#endif
clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
return 0;
@@ -407,6 +410,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ iwl_fw_assert_error_dump(&mvm->fwrt);
goto error;
}
@@ -543,7 +547,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
if (mvm->nvm_file_name)
iwl_mvm_load_nvm_to_nic(mvm);
- WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans));
+ WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver,
+ "Too old NVM version (0x%0x, required = 0x%0x)",
+ mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver);
/*
* abort after reading the nvm in case RF Kill is on, we will complete
@@ -881,6 +887,15 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+ /*
+ * This command is not supported on earlier firmware versions.
+ * Unfortunately, we don't have a TLV API flag to rely on, so
+ * rely on the major version which is in the first byte of
+ * ucode_ver.
+ */
+ if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+ return 0;
+
ret = iwl_mvm_sar_get_wgds_table(mvm);
if (ret < 0) {
IWL_DEBUG_RADIO(mvm,
@@ -1014,10 +1029,14 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret)
return ret;
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY);
+
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
if (ret)
return ret;
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE);
+
return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
}
@@ -1036,6 +1055,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+ iwl_fw_assert_error_dump(&mvm->fwrt);
goto error;
}
@@ -1045,11 +1065,13 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
if (ret)
IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
- mvm->fwrt.dump.conf = FW_DBG_INVALID;
- /* if we have a destination, assume EARLY START */
- if (mvm->fw->dbg.dest_tlv)
- mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
- iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
+ if (!mvm->trans->ini_valid) {
+ mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ /* if we have a destination, assume EARLY START */
+ if (mvm->fw->dbg.dest_tlv)
+ mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
+ iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
+ }
ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 6486cfb33f40..7779951a9533 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -767,13 +767,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
}
ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
- ctxt_sta->bi_reciprocal =
- cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
vif->bss_conf.dtim_period);
- ctxt_sta->dtim_reciprocal =
- cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period));
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
@@ -782,8 +777,30 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
- !iwlwifi_mod_params.disable_11ax)
+ !iwlwifi_mod_params.disable_11ax) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u8 sta_id = mvmvif->ap_sta_id;
+
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+ if (sta_id != IWL_MVM_INVALID_STA) {
+ struct ieee80211_sta *sta;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /*
+ * TODO: we should check the ext cap IE but it is
+ * unclear why the spec requires two bits (one in HE
+ * cap IE, and one in the ext cap IE). In the meantime
+ * rely on the HE cap IE only.
+ */
+ if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_TWT_RES))
+ ctxt_sta->data_policy |=
+ cpu_to_le32(TWT_SUPPORTED);
+ }
+ }
+
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
@@ -832,8 +849,6 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
- cmd.ibss.bi_reciprocal =
- cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
/* TODO: Assumes that the beacon id == mac context id */
cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
@@ -965,11 +980,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
tx->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
- mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
- mvm->mgmt_last_antenna_idx);
- }
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
tx->rate_n_flags =
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
@@ -1182,14 +1194,12 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
}
+ if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax)
+ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
- ctxt_ap->bi_reciprocal =
- cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
vif->bss_conf.dtim_period);
- ctxt_ap->dtim_reciprocal =
- cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
- vif->bss_conf.dtim_period));
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_STA_TYPE))
@@ -1522,6 +1532,8 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_beacon_loss_iterator,
mb);
+
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS);
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 00f831d88366..97dc464379d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -423,6 +423,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
if (iwl_mvm_has_tlc_offload(mvm)) {
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
@@ -813,6 +814,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!ieee80211_is_bufferable_mmpdu(hdr->frame_control))
sta = NULL;
+ /* If there is no sta, and it's not offchannel - send through AP */
+ if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+ info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(info->control.vif);
+ u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+
+ if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ /* mac80211 holds rcu read lock */
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
+ if (IS_ERR_OR_NULL(sta))
+ goto drop;
+ }
+ }
+
if (sta) {
if (iwl_mvm_defer_tx(mvm, sta, skb))
return;
@@ -1113,6 +1129,8 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
}
ret = iwl_mvm_up(mvm);
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT);
+
if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
/* Something went wrong - we need to finish some cleanup
* that normally iwl_mvm_mac_restart_complete() below
@@ -2005,7 +2023,13 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
- /* If PPE Thresholds exist, parse them into a FW-familiar format */
+ /*
+ * Initialize the PPE thresholds to "None" (7), as described in Table
+ * 9-262ac of 80211.ax/D3.0.
+ */
+ memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
u8 nss = (sta->he_cap.ppe_thres[0] &
@@ -2383,6 +2407,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* must be set before quota calculations */
mvmvif->ap_ibss_active = true;
+ if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
+ iwl_mvm_vif_set_low_latency(mvmvif, true,
+ LOW_LATENCY_VIF_TYPE);
+ iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id);
+ }
+
/* power updated needs to be done before quotas */
iwl_mvm_power_update_mac(mvm);
@@ -2445,6 +2475,12 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_ibss_active = false;
mvm->ap_last_beacon_gp2 = 0;
+ if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
+ iwl_mvm_vif_set_low_latency(mvmvif, false,
+ LOW_LATENCY_VIF_TYPE);
+ iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id);
+ }
+
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
@@ -2945,6 +2981,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_AP) {
mvmvif->ap_assoc_sta_count++;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ if (vif->bss_conf.he_support &&
+ !iwlwifi_mod_params.disable_11ax)
+ iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
}
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
@@ -3355,7 +3394,7 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
resp = (void *)pkt->data;
IWL_DEBUG_TE(mvm,
- "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
+ "Aux ROC: Received response from ucode: status=%d uid=%d\n",
resp->status, resp->event_unique_id);
te_data->uid = le32_to_cpu(resp->event_unique_id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 7ba5bc2ed1c4..1aa690e081ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -303,11 +303,13 @@ enum iwl_bt_force_ant_mode {
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
+* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
*/
enum iwl_mvm_low_latency_cause {
LOW_LATENCY_TRAFFIC = BIT(0),
LOW_LATENCY_DEBUGFS = BIT(1),
LOW_LATENCY_VCMD = BIT(2),
+ LOW_LATENCY_VIF_TYPE = BIT(3),
};
/**
@@ -844,7 +846,6 @@ struct iwl_mvm {
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
- spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
@@ -1521,6 +1522,11 @@ static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
mvm->fw->valid_rx_ant;
}
+static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
+{
+ *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant);
+}
+
static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
{
u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
@@ -1550,6 +1556,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
@@ -1846,6 +1854,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* get SystemLowLatencyMode - only needed for beacon threshold? */
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
+void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency,
+ u16 mac_id);
/* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index af3fba10abc1..30c5127034a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->aux_roc_te_list);
INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock);
- spin_lock_init(&mvm->queue_info_lock);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -770,7 +769,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
- trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len;
@@ -846,6 +844,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm);
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+
return op_mode;
out_unregister:
@@ -1073,6 +1073,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+ else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
+ iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
@@ -1110,11 +1112,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq;
-
- spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->hw_queue_to_mac80211[hw_queue];
- spin_unlock_bh(&mvm->queue_info_lock);
+ unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
iwl_mvm_stop_mac_queues(mvm, mq);
}
@@ -1140,11 +1138,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
- unsigned long mq;
-
- spin_lock_bh(&mvm->queue_info_lock);
- mq = mvm->hw_queue_to_mac80211[hw_queue];
- spin_unlock_bh(&mvm->queue_info_lock);
+ unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
iwl_mvm_start_mac_queues(mvm, mq);
}
@@ -1242,7 +1236,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- NULL, 0);
+ false, 0);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 7a98e1a1dc40..dabbc04853ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
{
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 supp = 0;
+ if (he_cap && he_cap->has_he)
+ return 0;
+
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index ef624833cf1b..6653a238f32e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -593,31 +593,28 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
int hyst = vif->bss_conf.cqm_rssi_hyst;
u16 id = le32_to_cpu(data->mac_id);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 vif_id = mvmvif->id;
/* This doesn't need the MAC ID check since it's not taking the
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
- if (data->general) {
- u16 vif_id = mvmvif->id;
-
- if (iwl_mvm_is_cdb_supported(mvm)) {
- struct mvm_statistics_general_cdb *general =
- data->general;
-
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(general->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -general->beacon_average_energy[vif_id];
- } else {
- struct mvm_statistics_general_v8 *general =
- data->general;
-
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(general->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -general->beacon_average_energy[vif_id];
- }
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ struct mvm_statistics_general_cdb *general =
+ data->general;
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(general->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -general->beacon_average_energy[vif_id];
+ } else {
+ struct mvm_statistics_general_v8 *general =
+ data->general;
+
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(general->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -general->beacon_average_energy[vif_id];
}
if (mvmvif->id != id)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 26ac9402568d..7bd8676508f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -200,7 +200,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
+ if (!(rx_status->flag & RX_FLAG_NO_PSDU) &&
+ iwl_mvm_check_pn(mvm, skb, queue, sta)) {
kfree_skb(skb);
} else {
unsigned int radiotap_len = 0;
@@ -863,68 +864,66 @@ static void iwl_mvm_flip_address(u8 *addr)
ether_addr_copy(addr, mac_addr);
}
-static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm,
- struct iwl_rx_mpdu_desc *desc,
- u32 rate_n_flags,
- struct ieee80211_radiotap_he_mu *he_mu)
-{
- u32 sigb0, sigb1;
- u16 sigb2;
-
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- sigb0 = le32_to_cpu(desc->v3.sigb_common0);
- sigb1 = le32_to_cpu(desc->v3.sigb_common1);
- } else {
- sigb0 = le32_to_cpu(desc->v1.sigb_common0);
- sigb1 = le32_to_cpu(desc->v1.sigb_common1);
- }
+struct iwl_mvm_rx_phy_data {
+ enum iwl_rx_phy_info_type info_type;
+ __le32 d0, d1, d2, d3;
+ __le16 d4;
+};
- sigb2 = le16_to_cpu(desc->sigb_common2);
+static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
+ struct iwl_mvm_rx_phy_data *phy_data,
+ u32 rate_n_flags,
+ struct ieee80211_radiotap_he_mu *he_mu)
+{
+ u32 phy_data2 = le32_to_cpu(phy_data->d2);
+ u32 phy_data3 = le32_to_cpu(phy_data->d3);
+ u16 phy_data4 = le16_to_cpu(phy_data->d4);
- if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK, sigb2)) {
+ if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) {
he_mu->flags1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN);
he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU,
- sigb2),
+ le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU,
+ phy_data4),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU);
- he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU0,
- sigb0);
- he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU1,
- sigb1);
- he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU2,
- sigb0);
- he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU3,
- sigb1);
+ he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0,
+ phy_data2);
+ he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1,
+ phy_data3);
+ he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2,
+ phy_data2);
+ he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3,
+ phy_data3);
}
- if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK, sigb2) &&
+ if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) &&
(rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
he_mu->flags1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU,
- sigb2),
+ le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU,
+ phy_data4),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU);
- he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU0,
- sigb0);
- he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU1,
- sigb1);
- he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU2,
- sigb0);
- he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU3,
- sigb1);
+ he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0,
+ phy_data2);
+ he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1,
+ phy_data3);
+ he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2,
+ phy_data2);
+ he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3,
+ phy_data3);
}
}
static void
-iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
+iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
+ u32 rate_n_flags,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status)
@@ -937,7 +936,7 @@ iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
* happen though as management frames where we need
* the TSF/timers are not be transmitted in HE-MU.
*/
- u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -976,7 +975,7 @@ iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN);
- if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+ if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80))
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
@@ -996,106 +995,122 @@ iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags,
}
static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
- struct iwl_rx_mpdu_desc *desc,
+ struct iwl_mvm_rx_phy_data *phy_data,
struct ieee80211_radiotap_he *he,
struct ieee80211_radiotap_he_mu *he_mu,
struct ieee80211_rx_status *rx_status,
- u64 he_phy_data, u32 rate_n_flags,
- int queue)
+ u32 rate_n_flags, int queue)
{
- u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
- bool sigb_data;
- u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN;
- u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
- IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN;
-
- he->data1 |= cpu_to_le16(d1known);
- he->data2 |= cpu_to_le16(d2known);
- he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
- he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
- he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
- he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
- he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
- he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
- he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA6_TXOP);
- he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
-
- switch (he_type) {
- case RATE_MCS_HE_TYPE_MU:
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_NONE:
+ case IWL_RX_PHY_INFO_TYPE_CCK:
+ case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY:
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ return;
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4),
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4);
+ /* fall through */
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ /* HE common */
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN);
+ he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR);
+ if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB &&
+ phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) {
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_UPLINK),
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+ }
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM),
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG);
+ he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK),
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_PE_DISAMBIG),
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG);
+ he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK),
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP);
+ he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_DOPPLER),
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER);
+ break;
+ }
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM,
- he_phy_data),
+ le16_encode_bits(le16_get_bits(phy_data->d4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
he_mu->flags1 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK,
- he_phy_data),
+ le16_encode_bits(le16_get_bits(phy_data->d4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+ le16_encode_bits(le16_get_bits(phy_data->d4,
+ IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+ iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu);
+ /* fall through */
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+ le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
-
- sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK,
- he_phy_data) ==
- IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO;
- if (sigb_data)
- iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu);
+ le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION),
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
/* fall through */
- case RATE_MCS_HE_TYPE_TRIG:
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
- he->data5 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
- break;
- case RATE_MCS_HE_TYPE_SU:
- case RATE_MCS_HE_TYPE_EXT_SU:
- he->data1 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
- he->data3 |=
- le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG,
- he_phy_data),
- IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
- break;
- }
-
- switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) {
- case IWL_RX_HE_PHY_INFO_TYPE_MU:
- case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO:
- case IWL_RX_HE_PHY_INFO_TYPE_TB:
- iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags,
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags,
he, he_mu, rx_status);
break;
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN);
+ he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0,
+ IWL_RX_PHY_DATA0_HE_BEAM_CHNG),
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE);
+ break;
default:
/* nothing */
break;
@@ -1103,13 +1118,10 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
}
static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
- struct iwl_rx_mpdu_desc *desc,
+ struct iwl_mvm_rx_phy_data *phy_data,
u32 rate_n_flags, u16 phy_info, int queue)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- /* this is invalid e.g. because puncture type doesn't allow 0b11 */
-#define HE_PHY_DATA_INVAL ((u64)-1)
- u64 he_phy_data = HE_PHY_DATA_INVAL;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
@@ -1136,49 +1148,41 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
radiotap_len += sizeof(known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
- if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
- else
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
-
- if (he_type == RATE_MCS_HE_TYPE_MU) {
- he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
- radiotap_len += sizeof(mu_known);
- rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- }
+ if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU ||
+ phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) {
+ he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known));
+ radiotap_len += sizeof(mu_known);
+ rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
}
/* temporarily hide the radiotap data */
__skb_pull(skb, radiotap_len);
- if (he_phy_data != HE_PHY_DATA_INVAL &&
- he_type == RATE_MCS_HE_TYPE_SU) {
+ if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) {
/* report the AMPDU-EOF bit on single frames */
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
- if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data))
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
}
- if (he_phy_data != HE_PHY_DATA_INVAL)
- iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status,
- he_phy_data, rate_n_flags, queue);
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status,
+ rate_n_flags, queue);
/* update aggregation data for monitor sake on default queue */
- if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+ if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) &&
+ (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
/* toggle is switched whenever new aggregation starts */
if (toggle_bit != mvm->ampdu_toggle &&
- he_phy_data != HE_PHY_DATA_INVAL &&
(he_type == RATE_MCS_HE_TYPE_MU ||
he_type == RATE_MCS_HE_TYPE_SU)) {
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
- if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
- he_phy_data))
+ if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
}
@@ -1261,43 +1265,34 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
break;
}
- he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
-
- if (he_type == RATE_MCS_HE_TYPE_SU ||
- he_type == RATE_MCS_HE_TYPE_EXT_SU) {
- u16 val;
-
- /* LTF syms correspond to streams */
- he->data2 |=
- cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
- switch (rx_status->nss) {
- case 1:
- val = 0;
- break;
- case 2:
- val = 1;
- break;
- case 3:
- case 4:
- val = 2;
- break;
- case 5:
- case 6:
- val = 3;
- break;
- case 7:
- case 8:
- val = 4;
- break;
- default:
- WARN_ONCE(1, "invalid nss: %d\n",
- rx_status->nss);
- val = 0;
- }
+ he->data5 |= le16_encode_bits(ltf,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+}
- he->data5 |=
- le16_encode_bits(val,
- IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+static void iwl_mvm_decode_lsig(struct sk_buff *skb,
+ struct iwl_mvm_rx_phy_data *phy_data)
+{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_radiotap_lsig *lsig;
+
+ switch (phy_data->info_type) {
+ case IWL_RX_PHY_INFO_TYPE_HT:
+ case IWL_RX_PHY_INFO_TYPE_VHT_SU:
+ case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_SU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU:
+ case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_HE_TB:
+ lsig = skb_put(skb, sizeof(*lsig));
+ lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN);
+ lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1,
+ IWL_RX_PHY_DATA1_LSIG_LEN_MASK),
+ IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH);
+ rx_status->flag |= RX_FLAG_RADIOTAP_LSIG;
+ break;
+ default:
+ break;
}
}
@@ -1315,6 +1310,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct sk_buff *skb;
u8 crypt_len = 0, channel, energy_a, energy_b;
size_t desc_size;
+ struct iwl_mvm_rx_phy_data phy_data = {
+ .d4 = desc->phy_data4,
+ .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
+ };
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1326,6 +1325,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
energy_a = desc->v3.energy_a;
energy_b = desc->v3.energy_b;
desc_size = sizeof(*desc);
+
+ phy_data.d0 = desc->v3.phy_data0;
+ phy_data.d1 = desc->v3.phy_data1;
+ phy_data.d2 = desc->v3.phy_data2;
+ phy_data.d3 = desc->v3.phy_data3;
} else {
rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
channel = desc->v1.channel;
@@ -1333,8 +1337,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
energy_a = desc->v1.energy_a;
energy_b = desc->v1.energy_b;
desc_size = IWL_RX_DESC_SIZE_V1;
+
+ phy_data.d0 = desc->v1.phy_data0;
+ phy_data.d1 = desc->v1.phy_data1;
+ phy_data.d2 = desc->v1.phy_data2;
+ phy_data.d3 = desc->v1.phy_data3;
}
+ if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD)
+ phy_data.info_type =
+ le32_get_bits(phy_data.d1,
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+
hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
@@ -1373,7 +1387,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (rate_n_flags & RATE_MCS_HE_MSK)
- iwl_mvm_rx_he(mvm, skb, desc, rate_n_flags, phy_info, queue);
+ iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
+ phy_info, queue);
+
+ iwl_mvm_decode_lsig(skb, &phy_data);
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -1422,12 +1439,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
/* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
- u64 he_phy_data;
-
- if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
- else
- he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref;
@@ -1596,6 +1607,129 @@ out:
rcu_read_unlock();
}
+void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_rx_no_data *desc = (void *)pkt->data;
+ u32 rate_n_flags = le32_to_cpu(desc->rate);
+ u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time);
+ u32 rssi = le32_to_cpu(desc->rssi);
+ u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
+ u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
+ struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb;
+ u8 channel, energy_a, energy_b;
+ struct iwl_mvm_rx_phy_data phy_data = {
+ .d0 = desc->phy_info[0],
+ .info_type = IWL_RX_PHY_INFO_TYPE_NONE,
+ };
+
+ if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return;
+
+ /* Currently only NDP type is supported */
+ if (info_type != RX_NO_DATA_INFO_TYPE_NDP)
+ return;
+
+ energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
+ energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
+ channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
+
+ phy_data.info_type =
+ le32_get_bits(desc->phy_info[1],
+ IWL_RX_PHY_DATA1_INFO_TYPE_MASK);
+
+ /* Dont use dev_alloc_skb(), we'll have enough headroom once
+ * ieee80211_hdr pulled.
+ */
+ skb = alloc_skb(128, GFP_ATOMIC);
+ if (!skb) {
+ IWL_ERR(mvm, "alloc_skb failed\n");
+ return;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+
+ /* 0-length PSDU */
+ rx_status->flag |= RX_FLAG_NO_PSDU;
+ /* currently this is the only type for which we get this notif */
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+
+ /* This may be overridden by iwl_mvm_rx_he() to HE_RU */
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ case RATE_MCS_CHAN_WIDTH_20:
+ break;
+ case RATE_MCS_CHAN_WIDTH_40:
+ rx_status->bw = RATE_INFO_BW_40;
+ break;
+ case RATE_MCS_CHAN_WIDTH_80:
+ rx_status->bw = RATE_INFO_BW_80;
+ break;
+ case RATE_MCS_CHAN_WIDTH_160:
+ rx_status->bw = RATE_INFO_BW_160;
+ break;
+ }
+
+ if (rate_n_flags & RATE_MCS_HE_MSK)
+ iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
+ phy_info, queue);
+
+ iwl_mvm_decode_lsig(skb, &phy_data);
+
+ rx_status->device_timestamp = gp2_on_air_rise;
+ rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
+ rx_status->freq = ieee80211_channel_to_frequency(channel,
+ rx_status->band);
+ iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+ energy_b);
+
+ rcu_read_lock();
+
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+ rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+ if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+ RATE_VHT_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+ if (rate_n_flags & RATE_MCS_BF_MSK)
+ rx_status->enc_flags |= RX_ENC_FLAG_BF;
+ } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
+ int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
+
+ if (WARN(rate < 0 || rate > 0xFF,
+ "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band)) {
+ kfree_skb(skb);
+ goto out;
+ }
+ rx_status->rate_idx = rate;
+ }
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+out:
+ rcu_read_unlock();
+}
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index cfb784fea77b..86d598d5b68f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -205,9 +205,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
{
u32 tx_ant;
- mvm->scan_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
- mvm->scan_last_antenna_idx);
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
if (band == NL80211_BAND_2GHZ && !no_cck)
@@ -1895,6 +1893,8 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
+
+ iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_SCAN_COMPLETE);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 1887d2b9f185..e28009832da0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
- spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
@@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
return -EINVAL;
if (iwl_mvm_has_new_tx_api(mvm)) {
- spin_lock_bh(&mvm->queue_info_lock);
-
if (remove_mac_queue)
mvm->hw_queue_to_mac80211[queue] &=
~BIT(mac80211_queue);
- spin_unlock_bh(&mvm->queue_info_lock);
-
iwl_trans_txq_free(mvm->trans, queue);
return 0;
}
- spin_lock_bh(&mvm->queue_info_lock);
-
- if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
- spin_unlock_bh(&mvm->queue_info_lock);
+ if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
return 0;
- }
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
@@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
mvm->hw_queue_to_mac80211[queue]);
/* If the queue is still enabled - nothing left to do in this func */
- if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
- spin_unlock_bh(&mvm->queue_info_lock);
+ if (cmd.action == SCD_CFG_ENABLE_QUEUE)
return 0;
- }
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
cmd.tid = mvm->queue_info[queue].txq_tid;
@@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
mvm->queue_info[queue].reserved = false;
- spin_unlock_bh(&mvm->queue_info_lock);
-
iwl_trans_txq_disable(mvm->trans, queue, false);
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
@@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
@@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
rcu_read_lock();
@@ -545,6 +527,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
rcu_read_unlock();
+ /*
+ * The TX path may have been using this TXQ_ID from the tid_data,
+ * so make sure it's no longer running so that we can safely reuse
+ * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
+ * above, but nothing guarantees we've stopped using them. Thus,
+ * without this, we could get to iwl_mvm_disable_txq() and remove
+ * the queue while still sending frames to it.
+ */
+ synchronize_net();
+
return disable_agg_tids;
}
@@ -562,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- spin_lock_bh(&mvm->queue_info_lock);
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
sta_id = mvm->queue_info[queue].ra_sta_id;
tid = mvm->queue_info[queue].txq_tid;
- spin_unlock_bh(&mvm->queue_info_lock);
same_sta = sta_id == new_sta_id;
@@ -610,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
* by the inactivity checker.
*/
lockdep_assert_held(&mvm->mutex);
- lockdep_assert_held(&mvm->queue_info_lock);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
@@ -696,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
* value 3 and VO with value 0, so to check if ac X is lower than ac Y
* we need to check if the numerical value of X is LARGER than of Y.
*/
- spin_lock_bh(&mvm->queue_info_lock);
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
- spin_unlock_bh(&mvm->queue_info_lock);
-
IWL_DEBUG_TX_QUEUES(mvm,
"No redirection needed on TXQ #%d\n",
queue);
@@ -711,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.tid = mvm->queue_info[queue].txq_tid;
mq = mvm->hw_queue_to_mac80211[queue];
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
queue, iwl_mvm_ac_to_tx_fifo[ac]);
@@ -737,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* Update the TID "owner" of the queue */
- spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid;
- spin_unlock_bh(&mvm->queue_info_lock);
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
@@ -748,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
/* Update AC marking of the queue */
- spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].mac80211_ac = ac;
- spin_unlock_bh(&mvm->queue_info_lock);
/*
* Mark queue as shared in transport if shared
@@ -773,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
{
int i;
- lockdep_assert_held(&mvm->queue_info_lock);
+ lockdep_assert_held(&mvm->mutex);
/* This should not be hit with new TX path */
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
@@ -853,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
{
bool enable_queue = true;
- spin_lock_bh(&mvm->queue_info_lock);
-
/* Make sure this TID isn't already enabled */
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
queue, tid);
return false;
@@ -893,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
queue, mvm->queue_info[queue].tid_bitmap,
mvm->hw_queue_to_mac80211[queue]);
- spin_unlock_bh(&mvm->queue_info_lock);
-
return enable_queue;
}
@@ -949,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return;
- spin_lock_bh(&mvm->queue_info_lock);
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
return;
@@ -968,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
return;
}
- spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].txq_tid = tid;
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
queue, tid);
}
@@ -992,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
lockdep_assert_held(&mvm->mutex);
- spin_lock_bh(&mvm->queue_info_lock);
sta_id = mvm->queue_info[queue].ra_sta_id;
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
- spin_unlock_bh(&mvm->queue_info_lock);
/* Find TID for queue, and make sure it is the only one on the queue */
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
@@ -1052,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
}
}
- spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
- spin_unlock_bh(&mvm->queue_info_lock);
}
/*
@@ -1073,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
int tid;
lockdep_assert_held(&mvmsta->lock);
- lockdep_assert_held(&mvm->queue_info_lock);
+ lockdep_assert_held(&mvm->mutex);
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return false;
@@ -1174,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
if (iwl_mvm_has_new_tx_api(mvm))
return -ENOSPC;
- spin_lock_bh(&mvm->queue_info_lock);
-
rcu_read_lock();
/* we skip the CMD queue below by starting at 1 */
@@ -1230,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- /* this isn't so nice, but works OK due to the way we loop */
- spin_unlock(&mvm->queue_info_lock);
-
- /* and we need this locking order */
- spin_lock(&mvmsta->lock);
- spin_lock(&mvm->queue_info_lock);
+ spin_lock_bh(&mvmsta->lock);
ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
inactive_tid_bitmap,
&unshare_queues,
@@ -1243,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
if (ret >= 0 && free_queue < 0)
free_queue = ret;
/* only unlock sta lock - we still need the queue info lock */
- spin_unlock(&mvmsta->lock);
+ spin_unlock_bh(&mvmsta->lock);
}
rcu_read_unlock();
- spin_unlock_bh(&mvm->queue_info_lock);
/* Reconfigure queues requiring reconfiguation */
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
@@ -1294,10 +1254,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
spin_lock_bh(&mvmsta->lock);
tfd_queue_mask = mvmsta->tfd_queue_msk;
+ ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
spin_unlock_bh(&mvmsta->lock);
- spin_lock_bh(&mvm->queue_info_lock);
-
/*
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
* exists
@@ -1327,12 +1286,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
- spin_unlock_bh(&mvm->queue_info_lock);
-
/* try harder - perhaps kill an inactive queue */
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
-
- spin_lock_bh(&mvm->queue_info_lock);
}
/* No free queue - we'll have to share */
@@ -1353,8 +1308,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
if (queue > 0 && !shared_queue)
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
- spin_unlock_bh(&mvm->queue_info_lock);
-
/* This shouldn't happen - out of queues */
if (WARN_ON(queue <= 0)) {
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
@@ -1388,13 +1341,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
}
}
- ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
ssn, &cfg, wdg_timeout);
- if (inc_ssn) {
- ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
- le16_add_cpu(&hdr->seq_ctrl, 0x10);
- }
/*
* Mark queue as shared in transport if shared
@@ -1411,8 +1359,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
* this ra/tid in our Tx path since we stop the Qdisc when we
* need to allocate a new TFD queue.
*/
- if (inc_ssn)
+ if (inc_ssn) {
mvmsta->tid_data[tid].seq_number += 0x10;
+ ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+ }
mvmsta->tid_data[tid].txq_id = queue;
mvmsta->tfd_queue_msk |= BIT(queue);
queue_state = mvmsta->tid_data[tid].state;
@@ -1556,8 +1506,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
/* run the general cleanup/unsharing of queues */
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
- spin_lock_bh(&mvm->queue_info_lock);
-
/* Make sure we have free resources for this STA */
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
@@ -1569,19 +1517,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
IWL_MVM_DQA_MIN_DATA_QUEUE,
IWL_MVM_DQA_MAX_DATA_QUEUE);
if (queue < 0) {
- spin_unlock_bh(&mvm->queue_info_lock);
/* try again - this time kick out a queue if needed */
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
if (queue < 0) {
IWL_ERR(mvm, "No available queues for new station\n");
return -ENOSPC;
}
- spin_lock_bh(&mvm->queue_info_lock);
}
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
- spin_unlock_bh(&mvm->queue_info_lock);
-
mvmsta->reserved_queue = queue;
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
@@ -1822,6 +1766,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
if (iwl_mvm_has_tlc_offload(mvm))
iwl_mvm_rs_add_sta(mvm, mvm_sta);
+ iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+
update_fw:
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
if (ret)
@@ -2004,18 +1950,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
* is still marked as IWL_MVM_QUEUE_RESERVED, and
* should be manually marked as free again
*/
- spin_lock_bh(&mvm->queue_info_lock);
status = &mvm->queue_info[reserved_txq].status;
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d",
- sta_id, reserved_txq, *status)) {
- spin_unlock_bh(&mvm->queue_info_lock);
+ sta_id, reserved_txq, *status))
return -EINVAL;
- }
*status = IWL_MVM_QUEUE_FREE;
- spin_unlock_bh(&mvm->queue_info_lock);
}
if (vif->type == NL80211_IFTYPE_STATION &&
@@ -2873,8 +2815,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
- spin_lock(&mvm->queue_info_lock);
-
/*
* Note the possible cases:
* 1. An enabled TXQ - TXQ needs to become agg'ed
@@ -2889,7 +2829,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (txq_id < 0) {
ret = txq_id;
IWL_ERR(mvm, "Failed to allocate agg queue\n");
- goto release_locks;
+ goto out;
}
/* TXQ hasn't yet been enabled, so mark it only as reserved */
@@ -2900,11 +2840,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(mvm,
"Can't start tid %d agg on shared queue!\n",
tid);
- goto release_locks;
+ goto out;
}
- spin_unlock(&mvm->queue_info_lock);
-
IWL_DEBUG_TX_QUEUES(mvm,
"AGG for tid %d will be on queue #%d\n",
tid, txq_id);
@@ -2935,10 +2873,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
ret = 0;
- goto out;
-release_locks:
- spin_unlock(&mvm->queue_info_lock);
out:
spin_unlock_bh(&mvmsta->lock);
@@ -3007,9 +2942,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
- spin_lock_bh(&mvm->queue_info_lock);
queue_status = mvm->queue_info[queue].status;
- spin_unlock_bh(&mvm->queue_info_lock);
/* Maybe there is no need to even alloc a queue... */
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
@@ -3055,9 +2988,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
/* No need to mark as reserved */
- spin_lock_bh(&mvm->queue_info_lock);
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
- spin_unlock_bh(&mvm->queue_info_lock);
out:
/*
@@ -3083,10 +3014,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
{
u16 txq_id = tid_data->txq_id;
+ lockdep_assert_held(&mvm->mutex);
+
if (iwl_mvm_has_new_tx_api(mvm))
return;
- spin_lock_bh(&mvm->queue_info_lock);
/*
* The TXQ is marked as reserved only if no traffic came through yet
* This means no traffic has been sent on this TID (agg'd or not), so
@@ -3098,8 +3030,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
}
-
- spin_unlock_bh(&mvm->queue_info_lock);
}
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index de1a0a2d8723..d52cd888f77d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -397,6 +397,9 @@ struct iwl_mvm_rxq_dup_data {
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
+ * @tx_ant: the index of the antenna to use for data tx to this station. Only
+ * used during connection establishment (e.g. for the 4 way handshake
+ * exchange).
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -439,6 +442,7 @@ struct iwl_mvm_sta {
u8 agg_tids;
u8 sleep_tx_count;
u8 avg_energy;
+ u8 tx_ant;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index ec57682efe54..995fe2a6abbb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -302,13 +302,30 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
offload_assist));
}
+static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ if (info->band == NL80211_BAND_2GHZ &&
+ !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+ return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+
+ if (sta && ieee80211_is_data(fc)) {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
+ }
+
+ return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+}
+
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
struct ieee80211_tx_info *info,
struct ieee80211_sta *sta)
{
int rate_idx;
u8 rate_plcp;
- u32 rate_flags;
+ u32 rate_flags = 0;
/* HT rate doesn't make sense for a non data frame */
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
@@ -332,13 +349,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
- if (info->band == NL80211_BAND_2GHZ &&
- !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
- rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
- else
- rate_flags =
- BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
-
/* Set CCK flag as needed */
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
rate_flags |= RATE_MCS_CCK_MSK;
@@ -346,6 +356,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
return (u32)rate_plcp | rate_flags;
}
+static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta, __le16 fc)
+{
+ return iwl_mvm_get_tx_rate(mvm, info, sta) |
+ iwl_mvm_get_tx_ant(mvm, info, sta, fc);
+}
+
/*
* Sets the fields in the Tx cmd that are rate related
*/
@@ -373,20 +391,21 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
*/
if (ieee80211_is_data(fc) && sta) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
- return;
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
+ tx_cmd->initial_rate_index = 0;
+ tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+ return;
+ }
} else if (ieee80211_is_back_req(fc)) {
tx_cmd->tx_flags |=
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
}
- mvm->mgmt_last_antenna_idx =
- iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
- mvm->mgmt_last_antenna_idx);
-
/* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+ tx_cmd->rate_n_flags =
+ cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
}
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
@@ -491,6 +510,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 offload_assist = 0;
u32 rate_n_flags = 0;
u16 flags = 0;
+ struct iwl_mvm_sta *mvmsta = sta ?
+ iwl_mvm_sta_from_mac80211(sta) : NULL;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
@@ -510,10 +531,16 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
- /* For data packets rate info comes from the fw */
- if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+ /*
+ * For data packets rate info comes from the fw. Only
+ * set rate/antenna during connection establishment.
+ */
+ if (sta && (!ieee80211_is_data(hdr->frame_control) ||
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
flags |= IWL_TX_FLAGS_CMD_RATE;
- rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+ rate_n_flags =
+ iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
+ hdr->frame_control);
}
if (mvm->trans->cfg->device_family >=
@@ -681,22 +708,12 @@ out:
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_info info;
struct iwl_device_cmd *dev_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
__le16 fc = hdr->frame_control;
- int queue;
-
- /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
- * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
- * queue. STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue
- */
- if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
- skb_info->control.vif->type == NL80211_IFTYPE_STATION)
- skb_info->hw_queue = mvm->aux_queue;
+ int queue = -1;
memcpy(&info, skb->cb, sizeof(info));
@@ -708,18 +725,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.hw_queue != info.control.vif->cab_queue)))
return -1;
- queue = info.hw_queue;
-
- /*
- * If the interface on which the frame is sent is the P2P_DEVICE
- * or an AP/GO interface use the broadcast station associated
- * with it; otherwise if the interface is a managed interface
- * use the AP station associated with it for multicast traffic
- * (this is not possible for unicast packets as a TLDS discovery
- * response are sent without a station entry); otherwise use the
- * AUX station.
- */
- sta_id = mvm->aux_sta.sta_id;
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
@@ -734,20 +739,28 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
hdr->frame_control);
- if (queue < 0)
- return -1;
- } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- is_multicast_ether_addr(hdr->addr1)) {
- u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
- if (ap_sta_id != IWL_MVM_INVALID_STA)
- sta_id = ap_sta_id;
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
+ } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
+ /*
+ * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
+ * that can be used in 2 different types of vifs, P2P &
+ * STATION.
+ * P2P uses the offchannel queue.
+ * STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue.
+ */
+ sta_id = mvm->aux_sta.sta_id;
+ queue = mvm->aux_queue;
}
}
+ if (queue < 0)
+ return -1;
+
if (unlikely(ieee80211_is_probe_resp(fc)))
iwl_mvm_probe_resp_set_noa(mvm, skb);
@@ -1160,11 +1173,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
* If we have timed-out TIDs - schedule the worker that will
* reconfig the queues and update them
*
- * Note that the mvm->queue_info_lock isn't being taken here in
- * order to not serialize the TX flow. This isn't dangerous
- * because scheduling mvm->add_stream_wk can't ruin the state,
- * and if we DON'T schedule it due to some race condition then
- * next TX we get here we will.
+ * Note that the no lock is taken here in order to not serialize
+ * the TX flow. This isn't dangerous because scheduling
+ * mvm->add_stream_wk can't ruin the state, and if we DON'T
+ * schedule it due to some race condition then next TX we get
+ * here we will.
*/
if (unlikely(mvm->queue_info[txq_id].status ==
IWL_MVM_QUEUE_SHARED &&
@@ -1451,7 +1464,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_get_agg_status(mvm, tx_resp);
u32 status = le16_to_cpu(agg_status->status);
u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
- struct iwl_mvm_sta *mvmsta;
struct sk_buff_head skbs;
u8 skb_freed = 0;
u8 lq_color;
@@ -1501,6 +1513,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
break;
}
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
+ ieee80211_is_mgmt(hdr->frame_control))
+ iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
+
/*
* If we are freeing multiple frames, mark all the frames
* but the first one as acked, since they were acknowledged
@@ -1595,11 +1611,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
goto out;
if (!IS_ERR(sta)) {
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_tx_airtime(mvm, mvmsta,
le16_to_cpu(tx_resp->wireless_media_time));
+ if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
+ mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
+ iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
+
if (sta->wme && tid != IWL_MGMT_TID) {
struct iwl_mvm_tid_data *tid_data =
&mvmsta->tid_data[tid];
@@ -1654,10 +1674,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
mvmsta->next_status_eosp = false;
ieee80211_sta_eosp(sta);
}
- } else {
- mvmsta = NULL;
}
-
out:
rcu_read_unlock();
}
@@ -1800,8 +1817,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
return;
}
- spin_lock_bh(&mvmsta->lock);
-
__skb_queue_head_init(&reclaimed_skbs);
/*
@@ -1811,6 +1826,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
+ spin_lock_bh(&mvmsta->lock);
+
tid_data->next_reclaimed = index;
iwl_mvm_check_ratid_empty(mvm, sta, tid);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 818e1180bbdd..d116c6ae18ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -285,6 +285,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
+#define FW_SYSASSERT_CPU_MASK 0xf0000000
static const struct {
const char *name;
u8 num;
@@ -301,6 +302,9 @@ static const struct {
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
{ "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
@@ -312,7 +316,7 @@ static const char *desc_lookup(u32 num)
int i;
for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
- if (advanced_lookup[i].num == num)
+ if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
return advanced_lookup[i].name;
/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
@@ -536,6 +540,9 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+ if (table.valid)
+ mvm->fwrt.dump.rt_status = table.error_id;
+
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
@@ -618,13 +625,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
return -EINVAL;
- spin_lock_bh(&mvm->queue_info_lock);
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
- "Trying to reconfig unallocated queue %d\n", queue)) {
- spin_unlock_bh(&mvm->queue_info_lock);
+ "Trying to reconfig unallocated queue %d\n", queue))
return -ENXIO;
- }
- spin_unlock_bh(&mvm->queue_info_lock);
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
@@ -768,6 +771,29 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
return result;
}
+void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
+ bool low_latency, u16 mac_id)
+{
+ struct iwl_mac_low_latency_cmd cmd = {
+ .mac_id = cpu_to_le32(mac_id)
+ };
+
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+ return;
+
+ if (low_latency) {
+ /* currently we don't care about the direction */
+ cmd.low_latency_rx = 1;
+ cmd.low_latency_tx = 1;
+ }
+
+ if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
+ MAC_CONF_GROUP, 0),
+ 0, sizeof(cmd), &cmd))
+ IWL_ERR(mvm, "Failed to send low latency command\n");
+}
+
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool low_latency,
enum iwl_mvm_low_latency_cause cause)
@@ -786,24 +812,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (low_latency == prev)
return 0;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
- struct iwl_mac_low_latency_cmd cmd = {
- .mac_id = cpu_to_le32(mvmvif->id)
- };
-
- if (low_latency) {
- /* currently we don't care about the direction */
- cmd.low_latency_rx = 1;
- cmd.low_latency_tx = 1;
- }
- res = iwl_mvm_send_cmd_pdu(mvm,
- iwl_cmd_id(LOW_LATENCY_CMD,
- MAC_CONF_GROUP, 0),
- 0, sizeof(cmd), &cmd);
- if (res)
- IWL_ERR(mvm, "Failed to send low latency command\n");
- }
+ iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res)
@@ -1372,6 +1381,7 @@ void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
{
int mac;
+ bool low_latency = false;
spin_lock_bh(&mvm->tcm.lock);
mvm->tcm.ts = jiffies;
@@ -1383,10 +1393,23 @@ void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+
+ if (mvm->tcm.result.low_latency[mac])
+ low_latency = true;
}
/* The TCM data needs to be reset before "paused" flag changes */
smp_mb();
mvm->tcm.paused = false;
+
+ /*
+ * if the current load is not low or low latency is active, force
+ * re-evaluation to cover the case of no traffic.
+ */
+ if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW)
+ schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD);
+ else if (low_latency)
+ schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD);
+
spin_unlock_bh(&mvm->tcm.lock);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 05ed4fb88e0c..ceb3aa03d561 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -94,11 +94,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
cpu_to_le64(trans_pcie->rxq->bd_dma);
/* Configure debug, for integration */
- iwl_pcie_alloc_fw_monitor(trans, 0);
- prph_sc_ctrl->hwm_cfg.hwm_base_addr =
- cpu_to_le64(trans->fw_mon[0].physical);
- prph_sc_ctrl->hwm_cfg.hwm_size =
- cpu_to_le32(trans->fw_mon[0].size);
+ if (!trans->ini_valid)
+ iwl_pcie_alloc_fw_monitor(trans, 0);
+ if (trans->num_blocks) {
+ prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+ cpu_to_le64(trans->fw_mon[0].physical);
+ prph_sc_ctrl->hwm_cfg.hwm_size =
+ cpu_to_le32(trans->fw_mon[0].size);
+ }
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 6f45a0303ddd..7f4aaa810ea1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -227,7 +227,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
iwl_enable_interrupts(trans);
/* Configure debug, if exists */
- if (trans->dbg_dest_tlv)
+ if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
/* kick FW self load */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 9e015212c2c0..353581ccc01e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
@@ -832,7 +882,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
- {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index f9c4c64dee66..d6fc6ce73e0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -378,6 +378,23 @@ struct iwl_tso_hdr_page {
u8 *pos;
};
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/**
+ * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
+ * debugfs file
+ *
+ * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
+ * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
+ * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
+ * set the file can no longer be used.
+ */
+enum iwl_fw_mon_dbgfs_state {
+ IWL_FW_MON_DBGFS_STATE_CLOSED,
+ IWL_FW_MON_DBGFS_STATE_OPEN,
+ IWL_FW_MON_DBGFS_STATE_DISABLED,
+};
+#endif
+
/**
* enum iwl_shared_irq_flags - level of sharing for irq
* @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
@@ -415,6 +432,26 @@ struct iwl_self_init_dram {
};
/**
+ * struct cont_rec: continuous recording data structure
+ * @prev_wr_ptr: the last address that was read in monitor_data
+ * debugfs file
+ * @prev_wrap_cnt: the wrap count that was used during the last read in
+ * monitor_data debugfs file
+ * @state: the state of monitor_data debugfs file as described
+ * in &iwl_fw_mon_dbgfs_state enum
+ * @mutex: locked while reading from monitor_data debugfs file
+ */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+struct cont_rec {
+ u32 prev_wr_ptr;
+ u32 prev_wrap_cnt;
+ u8 state;
+ /* Used to sync monitor_data debugfs file with driver unload flow */
+ struct mutex mutex;
+};
+#endif
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -451,6 +488,9 @@ struct iwl_self_init_dram {
* @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ * @fw_mon_data: fw continuous recording data
+#endif
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
* @shared_vec_mask: the type of causes the shared vector handles
@@ -538,6 +578,10 @@ struct iwl_trans_pcie {
bool cmd_hold_nic_awake;
bool ref_cmd_in_flight;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct cont_rec fw_mon_data;
+#endif
+
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
u8 shared_vec_mask;
@@ -965,6 +1009,11 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}
+static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
+{
+ return (trans->dbg_dest_tlv || trans->ini_valid);
+}
+
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 5bafb3f46eb8..f97aea5ffc44 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -71,6 +71,7 @@
#include <linux/vmalloc.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
+#include <linux/wait.h>
#include "iwl-drv.h"
#include "iwl-trans.h"
@@ -923,6 +924,20 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
int i;
+ if (trans->ini_valid) {
+ if (!trans->num_blocks)
+ return;
+
+ iwl_write_prph(trans, MON_BUFF_BASE_ADDR_VER2,
+ trans->fw_mon[0].physical >>
+ MON_BUFF_SHIFT_VER2);
+ iwl_write_prph(trans, MON_BUFF_END_ADDR_VER2,
+ (trans->fw_mon[0].physical +
+ trans->fw_mon[0].size - 256) >>
+ MON_BUFF_SHIFT_VER2);
+ return;
+ }
+
IWL_INFO(trans, "Applying debug destination %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
@@ -1025,7 +1040,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
(trans->fw_mon[0].physical +
trans->fw_mon[0].size) >> 4);
}
- } else if (trans->dbg_dest_tlv) {
+ } else if (iwl_pcie_dbg_on(trans)) {
iwl_pcie_apply_destination(trans);
}
@@ -1046,7 +1061,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single");
- if (trans->dbg_dest_tlv)
+ if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
@@ -1729,6 +1744,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 hpm;
int err;
lockdep_assert_held(&trans_pcie->mutex);
@@ -1739,6 +1755,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
return err;
}
+ hpm = iwl_trans_read_prph(trans, HPM_DEBUG);
+ if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
+ if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) &
+ PREG_WFPM_ACCESS) {
+ IWL_ERR(trans,
+ "Error, can not clear persistence bit\n");
+ return -EPERM;
+ }
+ iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT);
+ }
+
iwl_trans_pcie_sw_reset(trans);
err = iwl_pcie_apm_init(trans);
@@ -2697,6 +2724,137 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
return count;
}
+static int iwl_dbgfs_monitor_data_open(struct inode *inode,
+ struct file *file)
+{
+ struct iwl_trans *trans = inode->i_private;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!trans->dbg_dest_tlv ||
+ trans->dbg_dest_tlv->monitor_mode != EXTERNAL_MODE) {
+ IWL_ERR(trans, "Debug destination is not set to DRAM\n");
+ return -ENOENT;
+ }
+
+ if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
+ return -EBUSY;
+
+ trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
+ return simple_open(inode, file);
+}
+
+static int iwl_dbgfs_monitor_data_release(struct inode *inode,
+ struct file *file)
+{
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
+
+ if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
+ trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
+ return 0;
+}
+
+static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
+ void *buf, ssize_t *size,
+ ssize_t *bytes_copied)
+{
+ int buf_size_left = count - *bytes_copied;
+
+ buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
+ if (*size > buf_size_left)
+ *size = buf_size_left;
+
+ *size -= copy_to_user(user_buf, buf, *size);
+ *bytes_copied += *size;
+
+ if (buf_size_left == *size)
+ return true;
+ return false;
+}
+
+static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_trans *trans = file->private_data;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ void *cpu_addr = (void *)trans->fw_mon[0].block, *curr_buf;
+ struct cont_rec *data = &trans_pcie->fw_mon_data;
+ u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
+ ssize_t size, bytes_copied = 0;
+ bool b_full;
+
+ if (trans->dbg_dest_tlv) {
+ write_ptr_addr =
+ le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt_addr = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ } else {
+ write_ptr_addr = MON_BUFF_WRPTR;
+ wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
+ }
+
+ if (unlikely(!trans->dbg_rec_on))
+ return 0;
+
+ mutex_lock(&data->mutex);
+ if (data->state ==
+ IWL_FW_MON_DBGFS_STATE_DISABLED) {
+ mutex_unlock(&data->mutex);
+ return 0;
+ }
+
+ /* write_ptr position in bytes rather then DW */
+ write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
+ wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
+
+ if (data->prev_wrap_cnt == wrap_cnt) {
+ size = write_ptr - data->prev_wr_ptr;
+ curr_buf = cpu_addr + data->prev_wr_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ curr_buf, &size,
+ &bytes_copied);
+ data->prev_wr_ptr += size;
+
+ } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
+ write_ptr < data->prev_wr_ptr) {
+ size = trans->fw_mon[0].size - data->prev_wr_ptr;
+ curr_buf = cpu_addr + data->prev_wr_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ curr_buf, &size,
+ &bytes_copied);
+ data->prev_wr_ptr += size;
+
+ if (!b_full) {
+ size = write_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ cpu_addr, &size,
+ &bytes_copied);
+ data->prev_wr_ptr = size;
+ data->prev_wrap_cnt++;
+ }
+ } else {
+ if (data->prev_wrap_cnt == wrap_cnt - 1 &&
+ write_ptr > data->prev_wr_ptr)
+ IWL_WARN(trans,
+ "write pointer passed previous write pointer, start copying from the beginning\n");
+ else if (!unlikely(data->prev_wrap_cnt == 0 &&
+ data->prev_wr_ptr == 0))
+ IWL_WARN(trans,
+ "monitor data is out of sync, start copying from the beginning\n");
+
+ size = write_ptr;
+ b_full = iwl_write_to_user_buf(user_buf, count,
+ cpu_addr, &size,
+ &bytes_copied);
+ data->prev_wr_ptr = size;
+ data->prev_wrap_cnt = wrap_cnt;
+ }
+
+ mutex_unlock(&data->mutex);
+
+ return bytes_copied;
+}
+
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
DEBUGFS_READ_FILE_OPS(fh_reg);
DEBUGFS_READ_FILE_OPS(rx_queue);
@@ -2704,6 +2862,12 @@ DEBUGFS_READ_FILE_OPS(tx_queue);
DEBUGFS_WRITE_FILE_OPS(csr);
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+static const struct file_operations iwl_dbgfs_monitor_data_ops = {
+ .read = iwl_dbgfs_monitor_data_read,
+ .open = iwl_dbgfs_monitor_data_open,
+ .release = iwl_dbgfs_monitor_data_release,
+};
+
/* Create the debugfs files and directories */
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
@@ -2715,12 +2879,23 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(csr, dir, 0200);
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
+ DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
return 0;
err:
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
return -ENOMEM;
}
+
+static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct cont_rec *data = &trans_pcie->fw_mon_data;
+
+ mutex_lock(&data->mutex);
+ data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
+ mutex_unlock(&data->mutex);
+}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
@@ -2854,6 +3029,34 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
return monitor_len;
}
+static void
+iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
+ struct iwl_fw_error_dump_fw_mon *fw_mon_data)
+{
+ u32 base, write_ptr, wrap_cnt;
+
+ /* If there was a dest TLV - use the values from there */
+ if (trans->ini_valid) {
+ base = MON_BUFF_BASE_ADDR_VER2;
+ write_ptr = MON_BUFF_WRPTR_VER2;
+ wrap_cnt = MON_BUFF_CYCLE_CNT_VER2;
+ } else if (trans->dbg_dest_tlv) {
+ write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+ wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+ base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+ } else {
+ base = MON_BUFF_BASE_ADDR;
+ write_ptr = MON_BUFF_WRPTR;
+ wrap_cnt = MON_BUFF_CYCLE_CNT;
+ }
+ fw_mon_data->fw_mon_wr_ptr =
+ cpu_to_le32(iwl_read_prph(trans, write_ptr));
+ fw_mon_data->fw_mon_cycle_cnt =
+ cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+ fw_mon_data->fw_mon_base_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base));
+}
+
static u32
iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
struct iwl_fw_error_dump_data **data,
@@ -2863,30 +3066,14 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
if ((trans->num_blocks &&
trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
- trans->dbg_dest_tlv) {
+ (trans->dbg_dest_tlv && !trans->ini_valid) ||
+ (trans->ini_valid && trans->num_blocks)) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
- u32 base, write_ptr, wrap_cnt;
-
- /* If there was a dest TLV - use the values from there */
- if (trans->dbg_dest_tlv) {
- write_ptr =
- le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
- wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
- base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
- } else {
- base = MON_BUFF_BASE_ADDR;
- write_ptr = MON_BUFF_WRPTR;
- wrap_cnt = MON_BUFF_CYCLE_CNT;
- }
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
fw_mon_data = (void *)(*data)->data;
- fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, write_ptr));
- fw_mon_data->fw_mon_cycle_cnt =
- cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
- fw_mon_data->fw_mon_base_ptr =
- cpu_to_le32(iwl_read_prph(trans, base));
+
+ iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
len += sizeof(**data) + sizeof(*fw_mon_data);
if (trans->num_blocks) {
@@ -2896,6 +3083,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
monitor_len = trans->fw_mon[0].size;
} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+ u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
/*
* Update pointers to reflect actual values after
* shifting
@@ -2978,7 +3166,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
static struct iwl_trans_dump_data
*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
- const struct iwl_fw_dbg_trigger_tlv *trigger)
+ u32 dump_mask)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
@@ -2990,7 +3178,10 @@ static struct iwl_trans_dump_data
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
!trans->cfg->mq_rx_supported &&
- trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
+ dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
+
+ if (!dump_mask)
+ return NULL;
/* transport dump header */
len = sizeof(*dump_data);
@@ -3002,11 +3193,7 @@ static struct iwl_trans_dump_data
/* FW monitor */
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
- if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
- if (!(trans->dbg_dump_mask &
- BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
- return NULL;
-
+ if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) {
dump_data = vzalloc(len);
if (!dump_data)
return NULL;
@@ -3019,11 +3206,11 @@ static struct iwl_trans_dump_data
}
/* CSR registers */
- if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */
- if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
if (trans->cfg->gen2)
len += sizeof(*data) +
(FH_MEM_UPPER_BOUND_GEN2 -
@@ -3048,8 +3235,7 @@ static struct iwl_trans_dump_data
}
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2 &&
- trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+ if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) +
@@ -3062,7 +3248,7 @@ static struct iwl_trans_dump_data
len = 0;
data = (void *)dump_data->data;
- if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
u16 tfd_size = trans_pcie->tfd_size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
@@ -3096,16 +3282,15 @@ static struct iwl_trans_dump_data
data = iwl_fw_error_next_data(data);
}
- if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += iwl_trans_pcie_dump_csr(trans, &data);
- if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */
- if (trans->cfg->gen2 &&
- trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
+ if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr =
@@ -3125,7 +3310,7 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len;
}
}
- if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len;
@@ -3202,6 +3387,9 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
+#endif
};
static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
@@ -3221,6 +3409,9 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
+#endif
};
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
@@ -3392,8 +3583,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
- if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
- CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+ if (cfg == &iwl22000_2ax_cfg_hr) {
+ if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+ trans->cfg = &iwl22000_2ax_cfg_hr;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
+ trans->cfg = &iwl22000_2ax_cfg_jf;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
+ IWL_ERR(trans, "RF ID HRCDB is not supported\n");
+ ret = -EINVAL;
+ goto out_no_pci;
+ } else {
+ IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n",
+ CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
+ ret = -EINVAL;
+ goto out_no_pci;
+ }
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
@@ -3454,6 +3663,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
+ mutex_init(&trans_pcie->fw_mon_data.mutex);
+#endif
+
return trans;
out_free_ict:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index e880f69eac26..156ca1b1f621 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -238,7 +238,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
{
#ifdef CONFIG_INET
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
@@ -583,18 +583,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
- } else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
- }
-
if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
@@ -632,6 +620,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
return -1;
}
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
@@ -1228,8 +1228,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
txq->write_ptr = wr_ptr;
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- (txq->write_ptr) | (qid << 16));
+
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
iwl_free_resp(hcmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 87b7225fe289..ee990a7a5411 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1160,10 +1160,11 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
*/
iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
}
- spin_lock_bh(&txq->lock);
if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
+
+ spin_lock_bh(&txq->lock);
}
if (txq->read_ptr == txq->write_ptr) {
@@ -1245,11 +1246,11 @@ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
- IWL_ERR(trans,
- "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, idx,
- trans->cfg->base_params->max_tfd_queue_size,
- txq->write_ptr, txq->read_ptr);
+ WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx,
+ trans->cfg->base_params->max_tfd_queue_size,
+ txq->write_ptr, txq->read_ptr);
return;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 012930d35434..b0e7c0a0617e 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -690,7 +690,7 @@ static int prism2_open(struct net_device *dev)
/* Master radio interface is needed for all operation, so open
* it automatically when any virtual net_device is opened. */
local->master_dev_auto_open = 1;
- dev_open(local->dev);
+ dev_open(local->dev, NULL);
}
netif_device_attach(dev);
diff --git a/drivers/net/wireless/intersil/orinoco/mic.c b/drivers/net/wireless/intersil/orinoco/mic.c
index 08bc7822f820..709d9ab3e7bc 100644
--- a/drivers/net/wireless/intersil/orinoco/mic.c
+++ b/drivers/net/wireless/intersil/orinoco/mic.c
@@ -16,8 +16,7 @@
/********************************************************************/
int orinoco_mic_init(struct orinoco_private *priv)
{
- priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->tx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
"crypto API michael_mic\n");
@@ -25,8 +24,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
return -ENOMEM;
}
- priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0);
if (IS_ERR(priv->rx_tfm_mic)) {
printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
"crypto API michael_mic\n");
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 21bb68457cfe..40a8b941ad5c 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -908,6 +908,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
case EZUSB_CTX_REQ_SUBMITTED:
if (!ctx->in_rid)
break;
+ /* fall through */
default:
err("%s: Unexpected context state %d", __func__,
state);
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index ce9d4db0d9ca..b0eb58a62c90 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -235,6 +235,7 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue)
/* send queues */
case ISL38XX_CB_TX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
+ /* fall through */
case ISL38XX_CB_TX_DATA_LQ:
case ISL38XX_CB_TX_DATA_HQ:
diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
index 334717b0a2be..3ccf2a4b548c 100644
--- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
@@ -1691,6 +1691,7 @@ static int prism54_get_encodeext(struct net_device *ndev,
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
+ /* fall through */
case DOT11_AUTH_OS:
default:
wrqu->encoding.flags |= IW_ENCODE_OPEN;
diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c
index 325176d4d796..ad6d3a56ae06 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_dev.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c
@@ -932,6 +932,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
switch (new_state) {
case PRV_STATE_OFF:
priv->state_off++;
+ /* fall through */
default:
priv->state = new_state;
break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index aa8058264d5b..3a4b8786f7ea 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -374,6 +374,20 @@ static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 540 }
};
+static const u32 hwsim_ciphers[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+};
+
#define OUI_QCA 0x001374
#define QCA_NL80211_SUBCMD_TEST 1
enum qca_nl80211_vendor_subcmds {
@@ -451,48 +465,6 @@ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = {
{ .vendor_id = OUI_QCA, .subcmd = 1 },
};
-static const struct ieee80211_iface_limit hwsim_if_limits[] = {
- { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
- { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) },
- /* must be last, see hwsim_if_comb */
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
-};
-
-static const struct ieee80211_iface_combination hwsim_if_comb[] = {
- {
- .limits = hwsim_if_limits,
- /* remove the last entry which is P2P_DEVICE */
- .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
- .max_interfaces = 2048,
- .num_different_channels = 1,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160),
- },
-};
-
-static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
- {
- .limits = hwsim_if_limits,
- .n_limits = ARRAY_SIZE(hwsim_if_limits),
- .max_interfaces = 2048,
- .num_different_channels = 1,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160),
- },
-};
-
static spinlock_t hwsim_radio_lock;
static LIST_HEAD(hwsim_radios);
static struct rhashtable hwsim_radios_rht;
@@ -515,6 +487,10 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
+ struct ieee80211_iface_limit if_limits[3];
+ int n_if_limits;
+
+ u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
int channels, idx;
@@ -642,6 +618,8 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG },
[HWSIM_ATTR_FREQ] = { .type = NLA_U32 },
[HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
+ [HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 },
+ [HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -2414,6 +2392,9 @@ struct hwsim_new_radio_params {
const char *hwname;
bool no_vif;
const u8 *perm_addr;
+ u32 iftypes;
+ u32 *ciphers;
+ u8 n_ciphers;
};
static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
@@ -2630,6 +2611,27 @@ static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband)
sband->n_iftype_data = 1;
}
+#ifdef CONFIG_MAC80211_MESH
+#define HWSIM_MESH_BIT BIT(NL80211_IFTYPE_MESH_POINT)
+#else
+#define HWSIM_MESH_BIT 0
+#endif
+
+#define HWSIM_DEFAULT_IF_LIMIT \
+ (BIT(NL80211_IFTYPE_STATION) | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_AP) | \
+ BIT(NL80211_IFTYPE_P2P_GO) | \
+ HWSIM_MESH_BIT)
+
+#define HWSIM_IFTYPE_SUPPORT_MASK \
+ (BIT(NL80211_IFTYPE_STATION) | \
+ BIT(NL80211_IFTYPE_AP) | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO) | \
+ BIT(NL80211_IFTYPE_ADHOC) | \
+ BIT(NL80211_IFTYPE_MESH_POINT))
+
static int mac80211_hwsim_new_radio(struct genl_info *info,
struct hwsim_new_radio_params *param)
{
@@ -2641,6 +2643,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
struct net *net;
int idx;
+ int n_limits = 0;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
return -EINVAL;
@@ -2716,26 +2719,60 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
if (info)
data->portid = info->snd_portid;
+ /* setup interface limits, only on interface types we support */
+ if (param->iftypes & BIT(NL80211_IFTYPE_ADHOC)) {
+ data->if_limits[n_limits].max = 1;
+ data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_ADHOC);
+ n_limits++;
+ }
+
+ if (param->iftypes & HWSIM_DEFAULT_IF_LIMIT) {
+ data->if_limits[n_limits].max = 2048;
+ /*
+ * For this case, we may only support a subset of
+ * HWSIM_DEFAULT_IF_LIMIT, therefore we only want to add the
+ * bits that both param->iftype & HWSIM_DEFAULT_IF_LIMIT have.
+ */
+ data->if_limits[n_limits].types =
+ HWSIM_DEFAULT_IF_LIMIT & param->iftypes;
+ n_limits++;
+ }
+
+ if (param->iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) {
+ data->if_limits[n_limits].max = 1;
+ data->if_limits[n_limits].types =
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
+ n_limits++;
+ }
+
if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->max_remain_on_channel_duration = 1000;
- hw->wiphy->iface_combinations = &data->if_combination;
- if (param->p2p_device)
- data->if_combination = hwsim_if_comb_p2p_dev[0];
- else
- data->if_combination = hwsim_if_comb[0];
- hw->wiphy->n_iface_combinations = 1;
- /* For channels > 1 DFS is not allowed */
data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
- } else if (param->p2p_device) {
- hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(hwsim_if_comb_p2p_dev);
} else {
- hw->wiphy->iface_combinations = hwsim_if_comb;
- hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
+ data->if_combination.num_different_channels = 1;
+ data->if_combination.radar_detect_widths =
+ BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+ }
+
+ data->if_combination.n_limits = n_limits;
+ data->if_combination.max_interfaces = 2048;
+ data->if_combination.limits = data->if_limits;
+
+ hw->wiphy->iface_combinations = &data->if_combination;
+ hw->wiphy->n_iface_combinations = 1;
+
+ if (param->ciphers) {
+ memcpy(data->ciphers, param->ciphers,
+ param->n_ciphers * sizeof(u32));
+ hw->wiphy->cipher_suites = data->ciphers;
+ hw->wiphy->n_cipher_suites = param->n_ciphers;
}
INIT_DELAYED_WORK(&data->roc_start, hw_roc_start);
@@ -2744,15 +2781,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hw->queues = 5;
hw->offchannel_tx_hw_queue = 4;
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- if (param->p2p_device)
- hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, CHANCTX_STA_CSA);
@@ -2778,6 +2806,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ hw->wiphy->interface_modes = param->iftypes;
+
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
hw->sta_data_size = sizeof(struct hwsim_sta_priv);
@@ -2884,6 +2914,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ tasklet_hrtimer_init(&data->beacon_timer,
+ mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
err = ieee80211_register_hw(hw);
if (err < 0) {
pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
@@ -2908,10 +2942,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
data->debugfs,
data, &hwsim_simulate_radar);
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
spin_lock_bh(&hwsim_radio_lock);
err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
@@ -3293,6 +3323,29 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
return 0;
}
+/* ensures ciphers only include ciphers listed in 'hwsim_ciphers' array */
+static bool hwsim_known_ciphers(const u32 *ciphers, int n_ciphers)
+{
+ int i;
+
+ for (i = 0; i < n_ciphers; i++) {
+ int j;
+ int found = 0;
+
+ for (j = 0; j < ARRAY_SIZE(hwsim_ciphers); j++) {
+ if (ciphers[i] == hwsim_ciphers[j]) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return false;
+ }
+
+ return true;
+}
+
static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
{
struct hwsim_new_radio_params param = { 0 };
@@ -3321,15 +3374,6 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_NO_VIF])
param.no_vif = true;
- if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
- hwname = kasprintf(GFP_KERNEL, "%.*s",
- nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
- (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
- if (!hwname)
- return -ENOMEM;
- param.hwname = hwname;
- }
-
if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
param.use_chanctx = true;
else
@@ -3342,10 +3386,8 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
- if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
- kfree(hwname);
+ if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
return -EINVAL;
- }
idx = array_index_nospec(idx,
ARRAY_SIZE(hwsim_world_regdom_custom));
@@ -3358,14 +3400,72 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
GENL_SET_ERR_MSG(info,"MAC is no valid source addr");
NL_SET_BAD_ATTR(info->extack,
info->attrs[HWSIM_ATTR_PERM_ADDR]);
- kfree(hwname);
return -EINVAL;
}
-
param.perm_addr = nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]);
}
+ if (info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]) {
+ param.iftypes =
+ nla_get_u32(info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]);
+
+ if (param.iftypes & ~HWSIM_IFTYPE_SUPPORT_MASK) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT],
+ "cannot support more iftypes than kernel");
+ return -EINVAL;
+ }
+ } else {
+ param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
+ }
+
+ /* ensure both flag and iftype support is honored */
+ if (param.p2p_device ||
+ param.iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) {
+ param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ param.p2p_device = true;
+ }
+
+ if (info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]) {
+ u32 len = nla_len(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]);
+
+ param.ciphers =
+ nla_data(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]);
+
+ if (len % sizeof(u32)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[HWSIM_ATTR_CIPHER_SUPPORT],
+ "bad cipher list length");
+ return -EINVAL;
+ }
+
+ param.n_ciphers = len / sizeof(u32);
+
+ if (param.n_ciphers > ARRAY_SIZE(hwsim_ciphers)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[HWSIM_ATTR_CIPHER_SUPPORT],
+ "too many ciphers specified");
+ return -EINVAL;
+ }
+
+ if (!hwsim_known_ciphers(param.ciphers, param.n_ciphers)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[HWSIM_ATTR_CIPHER_SUPPORT],
+ "unsupported ciphers specified");
+ return -EINVAL;
+ }
+ }
+
+ if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
+ hwname = kasprintf(GFP_KERNEL, "%.*s",
+ nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
+ (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
+ if (!hwname)
+ return -ENOMEM;
+ param.hwname = hwname;
+ }
+
ret = mac80211_hwsim_new_radio(info, &param);
kfree(hwname);
return ret;
@@ -3703,16 +3803,16 @@ static int __init init_mac80211_hwsim(void)
if (err)
goto out_unregister_pernet;
+ err = hwsim_init_netlink();
+ if (err)
+ goto out_unregister_driver;
+
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
- goto out_unregister_driver;
+ goto out_exit_netlink;
}
- err = hwsim_init_netlink();
- if (err < 0)
- goto out_unregister_driver;
-
for (i = 0; i < radios; i++) {
struct hwsim_new_radio_params param = { 0 };
@@ -3785,6 +3885,7 @@ static int __init init_mac80211_hwsim(void)
param.p2p_device = support_p2p_device;
param.use_chanctx = channels > 1;
+ param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
@@ -3818,6 +3919,8 @@ out_free_mon:
free_netdev(hwsim_mon);
out_free_radios:
mac80211_hwsim_free();
+out_exit_netlink:
+ hwsim_exit_netlink();
out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
out_unregister_pernet:
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 0fe3199f8c72..a1ef8457fad4 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -132,6 +132,8 @@ enum {
* @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding
* rates of %HWSIM_ATTR_TX_INFO
* @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio
+ * @HWSIM_ATTR_IFTYPE_SUPPORT: u32 attribute of supported interface types bits
+ * @HWSIM_ATTR_CIPHER_SUPPORT: u32 array of supported cipher types
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -160,6 +162,8 @@ enum {
HWSIM_ATTR_PAD,
HWSIM_ATTR_TX_INFO_FLAGS,
HWSIM_ATTR_PERM_ADDR,
+ HWSIM_ATTR_IFTYPE_SUPPORT,
+ HWSIM_ATTR_CIPHER_SUPPORT,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c
index 504d6e096476..7c3224b83ef7 100644
--- a/drivers/net/wireless/marvell/libertas/if_spi.c
+++ b/drivers/net/wireless/marvell/libertas/if_spi.c
@@ -796,15 +796,13 @@ static void if_spi_h2c(struct if_spi_card *card,
{
struct lbs_private *priv = card->priv;
int err = 0;
- u16 int_type, port_reg;
+ u16 port_reg;
switch (type) {
case MVMS_DAT:
- int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
port_reg = IF_SPI_DATA_RDWRPORT_REG;
break;
case MVMS_CMD:
- int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
port_reg = IF_SPI_CMD_RDWRPORT_REG;
break;
default:
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index e2addd8b878b..5d75c971004b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -696,11 +696,10 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
"Send delba to tid=%d, %pM\n",
tid, rx_reor_tbl_ptr->ta);
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
- return;
+ goto exit;
}
}
+exit:
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 8e63d14c1e1c..5380fba652cc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -103,8 +103,6 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
* There could be holes in the buffer, which are skipped by the function.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
@@ -113,21 +111,25 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
{
int pkt_to_send, i;
void *rx_tmp_ptr;
+ unsigned long flags;
pkt_to_send = (start_win > tbl->start_win) ?
min((start_win - tbl->start_win), tbl->win_size) :
tbl->win_size;
for (i = 0; i < pkt_to_send; ++i) {
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_tmp_ptr = NULL;
if (tbl->rx_reorder_ptr[i]) {
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
}
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (rx_tmp_ptr)
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -138,6 +140,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
}
tbl->start_win = start_win;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -147,8 +150,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* The start window is adjusted automatically when a hole is located.
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
@@ -156,15 +157,22 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
{
int i, j, xchg;
void *rx_tmp_ptr;
+ unsigned long flags;
for (i = 0; i < tbl->win_size; ++i) {
- if (!tbl->rx_reorder_ptr[i])
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ if (!tbl->rx_reorder_ptr[i]) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
break;
+ }
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
tbl->rx_reorder_ptr[i] = NULL;
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
}
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
/*
* We don't have a circular buffer, hence use rotation to simulate
* circular buffer
@@ -177,6 +185,7 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
}
}
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -184,8 +193,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
*
* The function stops the associated timer and dispatches all the
* pending packets in the Rx reorder table before deletion.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
@@ -211,7 +218,11 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
del_timer_sync(&tbl->timer_context.timer);
tbl->timer_context.timer_is_set = false;
+
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_del(&tbl->list);
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
+
kfree(tbl->rx_reorder_ptr);
kfree(tbl);
@@ -224,17 +235,22 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
/*
* This function returns the pointer to an entry in Rx reordering
* table which matches the given TA/TID pair.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
{
struct mwifiex_rx_reorder_tbl *tbl;
+ unsigned long flags;
- list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
- if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
+ if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
return tbl;
+ }
+ }
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return NULL;
}
@@ -251,9 +267,14 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
return;
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
- list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
- if (!memcmp(tbl->ta, ta, ETH_ALEN))
+ list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
+ if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
mwifiex_del_rx_reorder_entry(priv, tbl);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ }
+ }
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
@@ -262,18 +283,24 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
/*
* This function finds the last sequence number used in the packets
* buffered in Rx reordering table.
- *
- * The caller must hold rx_reorder_tbl_lock spinlock.
*/
static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
{
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
+ struct mwifiex_private *priv = ctx->priv;
+ unsigned long flags;
int i;
- for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
- if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
+ if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
+ flags);
return i;
+ }
+ }
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return -1;
}
@@ -291,22 +318,17 @@ mwifiex_flush_data(struct timer_list *t)
struct reorder_tmr_cnxt *ctx =
from_timer(ctx, t, timer);
int start_win, seq_num;
- unsigned long flags;
ctx->timer_is_set = false;
- spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
seq_num = mwifiex_11n_find_last_seq_num(ctx);
- if (seq_num < 0) {
- spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
+ if (seq_num < 0)
return;
- }
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
start_win);
- spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
}
/*
@@ -333,14 +355,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
* If we get a TID, ta pair which is already present dispatch all the
* the packets and move the window size until the ssn
*/
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (tbl) {
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* if !tbl then create one */
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
if (!new_node)
@@ -551,20 +570,16 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int prev_start_win, start_win, end_win, win_size;
u16 pkt_index;
bool init_window_shift = false;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
if (pkt_type != PKT_TYPE_BAR)
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_11n_dispatch_pkt(priv, payload);
return ret;
}
@@ -651,8 +666,6 @@ done:
if (!tbl->timer_context.timer_is_set ||
prev_start_win != tbl->start_win)
mwifiex_11n_rxreorder_timer_restart(tbl);
-
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
@@ -681,18 +694,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
peer_mac, tid, initiator);
if (cleanup_rx_reorder_tbl) {
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
peer_mac);
if (!tbl) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
mwifiex_dbg(priv->adapter, EVENT,
"event: TID, TA not found in table\n");
return;
}
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
} else {
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
if (!ptx_tbl) {
@@ -726,7 +735,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
- unsigned long flags;
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
@@ -740,20 +748,17 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
add_ba_rsp->peer_mac_addr, tid);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl)
mwifiex_del_rx_reorder_entry(priv, tbl);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return 0;
}
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
>> BLOCKACKPARAM_WINSIZE_POS;
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
add_ba_rsp->peer_mac_addr);
if (tbl) {
@@ -764,7 +769,6 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
else
tbl->amsdu = false;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_dbg(priv->adapter, CMD,
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
@@ -804,8 +808,11 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
- &priv->rx_reorder_tbl_ptr, list)
+ &priv->rx_reorder_tbl_ptr, list) {
+ spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
+ spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
+ }
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
@@ -929,7 +936,6 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
int tlv_buf_left = len;
int ret;
u8 *tmp;
- unsigned long flags;
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
@@ -949,18 +955,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
tlv_bitmap_len);
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
rx_reor_tbl_ptr =
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
tlv_rxba->mac);
if (!rx_reor_tbl_ptr) {
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
- flags);
mwifiex_dbg(priv->adapter, ERROR,
"Can not find rx_reorder_tbl!");
return;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
for (i = 0; i < tlv_bitmap_len; i++) {
for (j = 0 ; j < 8; j++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index adc88433faa8..1467af22e394 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1275,27 +1275,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
}
static void
-mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
+mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 rateinfo, u8 htinfo,
struct rate_info *rate)
{
struct mwifiex_adapter *adapter = priv->adapter;
if (adapter->is_hw_11ac_capable) {
/* bit[1-0]: 00=LG 01=HT 10=VHT */
- if (tx_htinfo & BIT(0)) {
+ if (htinfo & BIT(0)) {
/* HT */
- rate->mcs = priv->tx_rate;
+ rate->mcs = rateinfo;
rate->flags |= RATE_INFO_FLAGS_MCS;
}
- if (tx_htinfo & BIT(1)) {
+ if (htinfo & BIT(1)) {
/* VHT */
- rate->mcs = priv->tx_rate & 0x0F;
+ rate->mcs = rateinfo & 0x0F;
rate->flags |= RATE_INFO_FLAGS_VHT_MCS;
}
- if (tx_htinfo & (BIT(1) | BIT(0))) {
+ if (htinfo & (BIT(1) | BIT(0))) {
/* HT or VHT */
- switch (tx_htinfo & (BIT(3) | BIT(2))) {
+ switch (htinfo & (BIT(3) | BIT(2))) {
case 0:
rate->bw = RATE_INFO_BW_20;
break;
@@ -1310,29 +1310,51 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
break;
}
- if (tx_htinfo & BIT(4))
+ if (htinfo & BIT(4))
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
- if ((priv->tx_rate >> 4) == 1)
+ if ((rateinfo >> 4) == 1)
rate->nss = 2;
else
rate->nss = 1;
}
} else {
/*
- * Bit 0 in tx_htinfo indicates that current Tx rate
- * is 11n rate. Valid MCS index values for us are 0 to 15.
+ * Bit 0 in htinfo indicates that current rate is 11n. Valid
+ * MCS index values for us are 0 to 15.
*/
- if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
- rate->mcs = priv->tx_rate;
+ if ((htinfo & BIT(0)) && (rateinfo < 16)) {
+ rate->mcs = rateinfo;
rate->flags |= RATE_INFO_FLAGS_MCS;
rate->bw = RATE_INFO_BW_20;
- if (tx_htinfo & BIT(1))
+ if (htinfo & BIT(1))
rate->bw = RATE_INFO_BW_40;
- if (tx_htinfo & BIT(2))
+ if (htinfo & BIT(2))
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
}
+
+ /* Decode legacy rates for non-HT. */
+ if (!(htinfo & (BIT(0) | BIT(1)))) {
+ /* Bitrates in multiples of 100kb/s. */
+ static const int legacy_rates[] = {
+ [0] = 10,
+ [1] = 20,
+ [2] = 55,
+ [3] = 110,
+ [4] = 60, /* MWIFIEX_RATE_INDEX_OFDM0 */
+ [5] = 60,
+ [6] = 90,
+ [7] = 120,
+ [8] = 180,
+ [9] = 240,
+ [10] = 360,
+ [11] = 480,
+ [12] = 540,
+ };
+ if (rateinfo < ARRAY_SIZE(legacy_rates))
+ rate->legacy = legacy_rates[rateinfo];
+ }
}
/*
@@ -1375,7 +1397,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->tx_packets = node->stats.tx_packets;
sinfo->tx_failed = node->stats.tx_failed;
- mwifiex_parse_htinfo(priv, node->stats.last_tx_htinfo,
+ mwifiex_parse_htinfo(priv, priv->tx_rate,
+ node->stats.last_tx_htinfo,
&sinfo->txrate);
sinfo->txrate.legacy = node->stats.last_tx_rate * 5;
@@ -1401,7 +1424,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
&priv->dtim_period, true);
- mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
+ mwifiex_parse_htinfo(priv, priv->tx_rate, priv->tx_htinfo,
+ &sinfo->txrate);
sinfo->signal_avg = priv->bcn_rssi_avg;
sinfo->rx_bytes = priv->stats.rx_bytes;
@@ -1412,6 +1436,10 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
sinfo->txrate.legacy = rate * 5;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ mwifiex_parse_htinfo(priv, priv->rxpd_rate, priv->rxpd_htinfo,
+ &sinfo->rxrate);
+
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index cce70252fd96..cbe4493b3266 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -273,15 +273,13 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
"total samples = %d\n",
atomic_read(&phist_data->num_samples));
- p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M");
- p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n");
- p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M");
- p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
+ p += sprintf(p,
+ "rx rates (in Mbps): 0=1M 1=2M 2=5.5M 3=11M 4=6M 5=9M 6=12M\n"
+ "7=18M 8=24M 9=36M 10=48M 11=54M 12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
- p += sprintf(p, "44-53=MCS0-9(VHT:BW20)");
- p += sprintf(p, "54-63=MCS0-9(VHT:BW40)");
- p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n");
+ p += sprintf(p,
+ "44-53=MCS0-9(VHT:BW20) 54-63=MCS0-9(VHT:BW40) 64-73=MCS0-9(VHT:BW80)\n\n");
} else {
p += sprintf(p, "\n");
}
@@ -310,7 +308,7 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) {
value = atomic_read(&phist_data->noise_flr[i]);
if (value)
- p += sprintf(p, "noise_flr[-%02ddBm] = %d\n",
+ p += sprintf(p, "noise_flr[%02ddBm] = %d\n",
(int)(i-128), value);
}
for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 75cbd609d606..6845eb57b39a 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -363,6 +363,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
(const u8 *)hdr,
hdr->len + sizeof(struct ieee_types_header)))
break;
+ /* fall through */
default:
memcpy(gen_ie->ie_buffer + ie_len, hdr,
hdr->len + sizeof(struct ieee_types_header));
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 8e483b0bc3b1..935778ec9a1b 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1882,15 +1882,17 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
ETH_ALEN))
mwifiex_update_curr_bss_params(priv,
bss);
- cfg80211_put_bss(priv->wdev.wiphy, bss);
- }
- if ((chan->flags & IEEE80211_CHAN_RADAR) ||
- (chan->flags & IEEE80211_CHAN_NO_IR)) {
- mwifiex_dbg(adapter, INFO,
- "radar or passive channel %d\n",
- channel);
- mwifiex_save_hidden_ssid_channels(priv, bss);
+ if ((chan->flags & IEEE80211_CHAN_RADAR) ||
+ (chan->flags & IEEE80211_CHAN_NO_IR)) {
+ mwifiex_dbg(adapter, INFO,
+ "radar or passive channel %d\n",
+ channel);
+ mwifiex_save_hidden_ssid_channels(priv,
+ bss);
+ }
+
+ cfg80211_put_bss(priv->wdev.wiphy, bss);
}
}
} else {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 00fcbda09349..fb28a5c7f441 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -152,14 +152,17 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
}
- priv->rxpd_rate = local_rx_pd->rx_rate;
-
- priv->rxpd_htinfo = local_rx_pd->ht_info;
+ /* Only stash RX bitrate for unicast packets. */
+ if (likely(!is_multicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest))) {
+ priv->rxpd_rate = local_rx_pd->rx_rate;
+ priv->rxpd_htinfo = local_rx_pd->ht_info;
+ }
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
- adj_rx_rate = mwifiex_adjust_data_rate(priv, priv->rxpd_rate,
- priv->rxpd_htinfo);
+ adj_rx_rate = mwifiex_adjust_data_rate(priv,
+ local_rx_pd->rx_rate,
+ local_rx_pd->ht_info);
mwifiex_hist_data_add(priv, adj_rx_rate, local_rx_pd->snr,
local_rx_pd->nf);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index a83c5afc256a..5ce85d5727e4 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -421,15 +421,12 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
}
- spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
if (!priv->ap_11n_enabled ||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
ret = mwifiex_handle_uap_rx_forward(priv, skb);
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
return ret;
}
- spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
/* Reorder and send to kernel */
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 9b8d7488c545..1a45cb30f39f 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -14,7 +14,8 @@ CFLAGS_mt76x02_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
- mt76x02_txrx.o mt76x02_trace.o
+ mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
+ mt76x02_dfs.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index f7fbd7016403..e2ba26378575 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -157,17 +157,20 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (entry.schedule)
q->swq_queued--;
- if (entry.skb)
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ if (entry.skb) {
+ spin_unlock_bh(&q->lock);
dev->drv->tx_complete_skb(dev, q, &entry, flush);
+ spin_lock_bh(&q->lock);
+ }
if (entry.txwi) {
mt76_put_txwi(dev, entry.txwi);
- wake = true;
+ wake = !flush;
}
- q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
-
if (!flush && q->tail == last)
last = ioread32(&q->regs->dma_idx);
}
@@ -258,6 +261,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOMEM;
}
+ skb->prev = skb->next = NULL;
dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE);
ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 7d219ff2d480..7b926dfa6b97 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -285,6 +285,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops)
spin_lock_init(&dev->cc_lock);
mutex_init(&dev->mutex);
init_waitqueue_head(&dev->tx_wait);
+ skb_queue_head_init(&dev->status_list);
return dev;
}
@@ -326,6 +327,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
ieee80211_hw_set(hw, TX_FRAG_LIST);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -359,6 +361,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
{
struct ieee80211_hw *hw = dev->hw;
+ mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
mt76_tx_free(dev);
}
@@ -629,3 +632,80 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
mt76_rx_complete(dev, &frames, napi);
}
EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
+
+static int
+mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int ret;
+ int i;
+
+ mutex_lock(&dev->mutex);
+
+ ret = dev->drv->sta_add(dev, vif, sta);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct mt76_txq *mtxq;
+
+ if (!sta->txq[i])
+ continue;
+
+ mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
+ mtxq->wcid = wcid;
+
+ mt76_txq_init(dev, sta->txq[i]);
+ }
+
+ rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+
+out:
+ mutex_unlock(&dev->mutex);
+
+ return ret;
+}
+
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int idx = wcid->idx;
+ int i;
+
+ rcu_assign_pointer(dev->wcid[idx], NULL);
+ synchronize_rcu();
+
+ mutex_lock(&dev->mutex);
+
+ if (dev->drv->sta_remove)
+ dev->drv->sta_remove(dev, vif, sta);
+
+ mt76_tx_status_check(dev, wcid, true);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ mt76_txq_remove(dev, sta->txq[i]);
+ mt76_wcid_free(dev->wcid_mask, idx);
+
+ mutex_unlock(&dev->mutex);
+}
+
+int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct mt76_dev *dev = hw->priv;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return mt76_sta_add(dev, vif, sta);
+
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ mt76_sta_remove(dev, vif, sta);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_sta_state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 3bfa7f5e3513..5cd508a68609 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -135,9 +135,8 @@ struct mt76_queue {
};
struct mt76_mcu_ops {
- struct sk_buff *(*mcu_msg_alloc)(const void *data, int len);
- int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp);
+ int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
+ int len, bool wait_resp);
int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
const struct mt76_reg_pair *rp, int len);
int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
@@ -195,6 +194,8 @@ struct mt76_wcid {
u8 tx_rate_nss;
s8 max_txpwr_adj;
bool sw_iv;
+
+ u8 packet_id;
};
struct mt76_txq {
@@ -233,6 +234,22 @@ struct mt76_rx_tid {
struct sk_buff *reorder_buf[];
};
+#define MT_TX_CB_DMA_DONE BIT(0)
+#define MT_TX_CB_TXS_DONE BIT(1)
+#define MT_TX_CB_TXS_FAILED BIT(2)
+
+#define MT_PACKET_ID_MASK GENMASK(7, 0)
+#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK
+
+#define MT_TX_STATUS_SKB_TIMEOUT HZ
+
+struct mt76_tx_cb {
+ unsigned long jiffies;
+ u8 wcid;
+ u8 pktid;
+ u8 flags;
+};
+
enum {
MT76_STATE_INITIALIZED,
MT76_STATE_RUNNING,
@@ -271,6 +288,12 @@ struct mt76_driver_ops {
void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool ps);
+
+ int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+ void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
};
struct mt76_channel_state {
@@ -400,6 +423,7 @@ struct mt76_dev {
const struct mt76_queue_ops *queue_ops;
wait_queue_head_t tx_wait;
+ struct sk_buff_head status_list;
unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
@@ -484,7 +508,6 @@ struct mt76_rx_status {
#define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
-#define mt76_mcu_msg_alloc(dev, ...) (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
@@ -594,6 +617,13 @@ wcid_to_sta(struct mt76_wcid *wcid)
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
+static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
+ sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
+ return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
+}
+
int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
@@ -625,6 +655,26 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key);
+void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __acquires(&dev->status_list.lock);
+void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __releases(&dev->status_list.lock);
+
+int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct sk_buff *skb);
+struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
+ struct mt76_wcid *wcid, int pktid,
+ struct sk_buff_head *list);
+void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+ struct sk_buff_head *list);
+void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
+void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ bool flush);
+int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state);
+
struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
/* internal */
@@ -668,8 +718,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void *buf, size_t len);
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
-u32 mt76u_rr(struct mt76_dev *dev, u32 addr);
-void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
void mt76u_deinit(struct mt76_dev *dev);
int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
index 20672978dceb..aa22ba954716 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile
@@ -2,11 +2,9 @@ obj-$(CONFIG_MT76x0U) += mt76x0u.o
obj-$(CONFIG_MT76x0E) += mt76x0e.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o
-mt76x0-common-y := \
- init.o main.o trace.o eeprom.o phy.o \
- mac.o debugfs.o
+mt76x0-common-y := init.o main.o eeprom.o phy.o
+
mt76x0u-y := usb.o usb_mcu.o
mt76x0e-y := pci.o pci_mcu.o
# ccflags-y := -DDEBUG
-CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
deleted file mode 100644
index 3224e5b1a1e5..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/debugfs.h>
-
-#include "mt76x0.h"
-#include "eeprom.h"
-
-static int
-mt76x0_ampdu_stat_read(struct seq_file *file, void *data)
-{
- struct mt76x02_dev *dev = file->private;
- int i, j;
-
-#define stat_printf(grp, off, name) \
- seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off])
-
- stat_printf(rx_stat, 0, rx_crc_err);
- stat_printf(rx_stat, 1, rx_phy_err);
- stat_printf(rx_stat, 2, rx_false_cca);
- stat_printf(rx_stat, 3, rx_plcp_err);
- stat_printf(rx_stat, 4, rx_fifo_overflow);
- stat_printf(rx_stat, 5, rx_duplicate);
-
- stat_printf(tx_stat, 0, tx_fail_cnt);
- stat_printf(tx_stat, 1, tx_bcn_cnt);
- stat_printf(tx_stat, 2, tx_success);
- stat_printf(tx_stat, 3, tx_retransmit);
- stat_printf(tx_stat, 4, tx_zero_len);
- stat_printf(tx_stat, 5, tx_underflow);
-
- stat_printf(aggr_stat, 0, non_aggr_tx);
- stat_printf(aggr_stat, 1, aggr_tx);
-
- stat_printf(zero_len_del, 0, tx_zero_len_del);
- stat_printf(zero_len_del, 1, rx_zero_len_del);
-#undef stat_printf
-
- seq_puts(file, "Aggregations stats:\n");
- for (i = 0; i < 4; i++) {
- for (j = 0; j < 8; j++)
- seq_printf(file, "%08llx ",
- dev->stats.aggr_n[i * 8 + j]);
- seq_putc(file, '\n');
- }
-
- seq_printf(file, "recent average AMPDU len: %d\n",
- atomic_read(&dev->avg_ampdu_len));
-
- return 0;
-}
-
-static int
-mt76x0_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt76x0_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt76x0_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void mt76x0_init_debugfs(struct mt76x02_dev *dev)
-{
- struct dentry *dir;
-
- dir = mt76_register_debugfs(&dev->mt76);
- if (!dir)
- return;
-
- debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index ab4fd6e0f23a..497e762978cc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -135,9 +135,6 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev)
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
u8 val;
- if (mt76x0_tssi_enabled(dev))
- return 0;
-
if (chandef->width == NL80211_CHAN_WIDTH_80) {
val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8;
} else if (chandef->width == NL80211_CHAN_WIDTH_40) {
@@ -160,8 +157,8 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_2ghz = chan->band == NL80211_BAND_2GHZ;
struct mt76_rate_power *t = &dev->mt76.rate_power;
- s8 delta = mt76x0_get_delta(dev);
u16 val, addr;
+ s8 delta;
memset(t, 0, sizeof(*t));
@@ -211,6 +208,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
t->vht[7] = s6_to_s8(val);
t->vht[8] = s6_to_s8(val >> 8);
+ delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev);
mt76x02_add_rate_power_offset(t, delta);
}
@@ -233,6 +231,20 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
u16 data;
int i;
+ if (mt76x0_tssi_enabled(dev)) {
+ s8 target_power;
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER);
+ else
+ data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
+ target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
+ info[0] = target_power + mt76x0_get_delta(dev);
+ info[1] = 0;
+
+ return;
+ }
+
for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
if (chan_map[i].chan <= chan->hw_value) {
offset = chan_map[i].offset;
@@ -340,8 +352,6 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev)
mt76x0_set_freq_offset(dev);
mt76x0_set_temp_offset(dev);
- dev->mt76.chainmask = 0x0101;
-
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index 4a9408801260..87b575fe1c74 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -16,7 +16,6 @@
#include "mt76x0.h"
#include "eeprom.h"
-#include "trace.h"
#include "mcu.h"
#include "initvals.h"
@@ -113,7 +112,7 @@ static int mt76x0_init_bbp(struct mt76x02_dev *dev)
{
int ret, i;
- ret = mt76x0_wait_bbp_ready(dev);
+ ret = mt76x0_phy_wait_bbp_ready(dev);
if (ret)
return ret;
@@ -134,80 +133,28 @@ static int mt76x0_init_bbp(struct mt76x02_dev *dev)
static void mt76x0_init_mac_registers(struct mt76x02_dev *dev)
{
- u32 reg;
-
RANDOM_WRITE(dev, common_mac_reg_table);
- mt76x02_set_beacon_offsets(dev);
-
/* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */
RANDOM_WRITE(dev, mt76x0_mac_reg_table);
/* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */
- reg = mt76_rr(dev, MT_MAC_SYS_CTRL);
- reg &= ~0x3;
- mt76_wr(dev, MT_MAC_SYS_CTRL, reg);
+ mt76_clear(dev, MT_MAC_SYS_CTRL, 0x3);
/* Set 0x141C[15:12]=0xF */
- reg = mt76_rr(dev, MT_EXT_CCA_CFG);
- reg |= 0x0000F000;
- mt76_wr(dev, MT_EXT_CCA_CFG, reg);
+ mt76_set(dev, MT_EXT_CCA_CFG, 0xf000);
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
/*
- TxRing 9 is for Mgmt frame.
- TxRing 8 is for In-band command frame.
- WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9.
- WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8.
- */
- reg = mt76_rr(dev, MT_WMM_CTRL);
- reg &= ~0x000003FF;
- reg |= 0x00000201;
- mt76_wr(dev, MT_WMM_CTRL, reg);
-}
-
-static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev)
-{
- u32 *vals;
- int i;
-
- vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
- if (!vals)
- return -ENOMEM;
-
- for (i = 0; i < MT76_N_WCIDS; i++) {
- vals[i * 2] = 0xffffffff;
- vals[i * 2 + 1] = 0x00ffffff;
- }
-
- mt76_wr_copy(dev, MT_WCID_ADDR_BASE, vals, MT76_N_WCIDS * 2);
- kfree(vals);
- return 0;
-}
-
-static void mt76x0_init_key_mem(struct mt76x02_dev *dev)
-{
- u32 vals[4] = {};
-
- mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals));
-}
-
-static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev)
-{
- u32 *vals;
- int i;
-
- vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL);
- if (!vals)
- return -ENOMEM;
-
- for (i = 0; i < MT76_N_WCIDS * 2; i++)
- vals[i] = 1;
-
- mt76_wr_copy(dev, MT_WCID_ATTR_BASE, vals, MT76_N_WCIDS * 2);
- kfree(vals);
- return 0;
+ * tx_ring 9 is for mgmt frame
+ * tx_ring 8 is for in-band command frame.
+ * WMM_RG0_TXQMA: this register setting is for FCE to
+ * define the rule of tx_ring 9
+ * WMM_RG1_TXQMA: this register setting is for FCE to
+ * define the rule of tx_ring 8
+ */
+ mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201);
}
static void mt76x0_reset_counters(struct mt76x02_dev *dev)
@@ -270,7 +217,7 @@ EXPORT_SYMBOL_GPL(mt76x0_mac_stop);
int mt76x0_init_hardware(struct mt76x02_dev *dev)
{
- int ret;
+ int ret, i, k;
if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000))
return -EIO;
@@ -280,7 +227,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return -ETIMEDOUT;
mt76x0_reset_csr_bbp(dev);
- ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
+ ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
if (ret)
return ret;
@@ -295,20 +242,12 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
- ret = mt76x0_init_wcid_mem(dev);
- if (ret)
- return ret;
+ for (i = 0; i < 16; i++)
+ for (k = 0; k < 4; k++)
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
- mt76x0_init_key_mem(dev);
-
- ret = mt76x0_init_wcid_attr_mem(dev);
- if (ret)
- return ret;
-
- mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX));
+ for (i = 0; i < 256; i++)
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
mt76x0_reset_counters(dev);
@@ -317,6 +256,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return ret;
mt76x0_phy_init(dev);
+ mt76x02_init_beacon_config(dev);
return 0;
}
@@ -339,7 +279,6 @@ mt76x0_alloc_device(struct device *pdev,
dev = container_of(mdev, struct mt76x02_dev, mt76);
mutex_init(&dev->phy_mutex);
- atomic_set(&dev->avg_ampdu_len, 1);
return dev;
}
@@ -347,49 +286,21 @@ EXPORT_SYMBOL_GPL(mt76x0_alloc_device);
int mt76x0_register_device(struct mt76x02_dev *dev)
{
- struct mt76_dev *mdev = &dev->mt76;
- struct ieee80211_hw *hw = mdev->hw;
- struct wiphy *wiphy = hw->wiphy;
int ret;
- /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to
- * entry no. 1 like it does in the vendor driver.
- */
- mdev->wcid_mask[0] |= 1;
-
- /* init fake wcid for monitor interfaces */
- mdev->global_wcid.idx = 0xff;
- mdev->global_wcid.hw_key_idx = -1;
-
- /* init antenna configuration */
- mdev->antenna_mask = 1;
-
- hw->queues = 4;
- hw->max_rates = 1;
- hw->max_report_rates = 7;
- hw->max_rate_tries = 1;
- hw->extra_tx_headroom = 2;
- if (mt76_is_usb(dev))
- hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
- MT_DMA_HDR_LEN;
-
- hw->sta_data_size = sizeof(struct mt76x02_sta);
- hw->vif_data_size = sizeof(struct mt76x02_vif);
-
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-
- INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work);
+ mt76x02_init_device(dev);
+ mt76x02_config_mac_addr_list(dev);
- ret = mt76_register_device(mdev, true, mt76x02_rates,
+ ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (ret)
return ret;
/* overwrite unsupported features */
- if (mdev->cap.has_5ghz)
+ if (dev->mt76.cap.has_5ghz)
mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband);
- mt76x0_init_debugfs(dev);
+ mt76x02_init_debugfs(dev);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 236dce6860b4..a1657922758e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -37,14 +37,14 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
{ MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
{ MT_TX_RETRY_CFG, 0x47d01f0f },
{ MT_AUTO_RSP_CFG, 0x00000013 },
- { MT_CCK_PROT_CFG, 0x05740003 },
- { MT_OFDM_PROT_CFG, 0x05740003 },
+ { MT_CCK_PROT_CFG, 0x07f40003 },
+ { MT_OFDM_PROT_CFG, 0x07f42004 },
{ MT_PBF_CFG, 0x00f40006 },
{ MT_WPDMA_GLO_CFG, 0x00000030 },
- { MT_GF20_PROT_CFG, 0x01744004 },
- { MT_GF40_PROT_CFG, 0x03f44084 },
- { MT_MM20_PROT_CFG, 0x01744004 },
- { MT_MM40_PROT_CFG, 0x03f54084 },
+ { MT_GF20_PROT_CFG, 0x01742004 },
+ { MT_GF40_PROT_CFG, 0x03f42084 },
+ { MT_MM20_PROT_CFG, 0x01742004 },
+ { MT_MM40_PROT_CFG, 0x03f42084 },
{ MT_TXOP_CTRL_CFG, 0x0000583f },
{ MT_TX_RTS_CFG, 0x00092b20 },
{ MT_EXP_ACK_TIME, 0x002400ca },
@@ -85,6 +85,9 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
{ MT_HT_CTRL_CFG, 0x000001FF },
{ MT_TXOP_HLDR_ET, 0x00000000 },
{ MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
};
static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
index 95d43efc1f3d..56c6fa73daf5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h
@@ -16,757 +16,626 @@
#ifndef __MT76X0U_PHY_INITVALS_H
#define __MT76X0U_PHY_INITVALS_H
-#define RF_REG_PAIR(bank, reg, value) \
- { (bank) << 16 | (reg), value }
-
-
static const struct mt76_reg_pair mt76x0_rf_central_tab[] = {
-/*
- Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC
-*/
- { MT_RF(0, 1), 0x01},
- { MT_RF(0, 2), 0x11},
-
- /*
- R3 ~ R7: VCO Cal.
- */
- { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */
- { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */
- { MT_RF(0, 5), 0x00},
- { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */
- { MT_RF(0, 7), 0x00},
-
- /*
- XO
- */
- { MT_RF(0, 8), 0x00},
- { MT_RF(0, 9), 0x00},
- { MT_RF(0, 10), 0x0C},
- { MT_RF(0, 11), 0x00},
- { MT_RF(0, 12), 0x00},
-
- /*
- BG
- */
- { MT_RF(0, 13), 0x00},
- { MT_RF(0, 14), 0x00},
- { MT_RF(0, 15), 0x00},
-
- /*
- LDO
- */
- { MT_RF(0, 19), 0x20},
- /*
- XO
- */
- { MT_RF(0, 20), 0x22},
- { MT_RF(0, 21), 0x12},
- { MT_RF(0, 23), 0x00},
- { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */
- { MT_RF(0, 25), 0x00},
-
- /*
- PLL, See Freq Selection
- */
- { MT_RF(0, 26), 0x00},
- { MT_RF(0, 27), 0x00},
- { MT_RF(0, 28), 0x00},
- { MT_RF(0, 29), 0x00},
- { MT_RF(0, 30), 0x00},
- { MT_RF(0, 31), 0x00},
- { MT_RF(0, 32), 0x00},
- { MT_RF(0, 33), 0x00},
- { MT_RF(0, 34), 0x00},
- { MT_RF(0, 35), 0x00},
- { MT_RF(0, 36), 0x00},
- { MT_RF(0, 37), 0x00},
-
- /*
- LO Buffer
- */
- { MT_RF(0, 38), 0x2F},
-
- /*
- Test Ports
- */
- { MT_RF(0, 64), 0x00},
- { MT_RF(0, 65), 0x80},
- { MT_RF(0, 66), 0x01},
- { MT_RF(0, 67), 0x04},
-
- /*
- ADC/DAC
- */
- { MT_RF(0, 68), 0x00},
- { MT_RF(0, 69), 0x08},
- { MT_RF(0, 70), 0x08},
- { MT_RF(0, 71), 0x40},
- { MT_RF(0, 72), 0xD0},
- { MT_RF(0, 73), 0x93},
+ { MT_RF(0, 1), 0x01 },
+ { MT_RF(0, 2), 0x11 },
+ /* R3 ~ R7: VCO Cal */
+ { MT_RF(0, 3), 0x73 }, /* VCO Freq Cal */
+ { MT_RF(0, 4), 0x30 }, /* R4 b<7>=1, VCO cal */
+ { MT_RF(0, 5), 0x00 },
+ { MT_RF(0, 6), 0x41 },
+ { MT_RF(0, 7), 0x00 },
+ { MT_RF(0, 8), 0x00 },
+ { MT_RF(0, 9), 0x00 },
+ { MT_RF(0, 10), 0x0C },
+ { MT_RF(0, 11), 0x00 },
+ { MT_RF(0, 12), 0x00 },
+ /* BG */
+ { MT_RF(0, 13), 0x00 },
+ { MT_RF(0, 14), 0x00 },
+ { MT_RF(0, 15), 0x00 },
+ /* LDO */
+ { MT_RF(0, 19), 0x20 },
+ { MT_RF(0, 20), 0x22 },
+ { MT_RF(0, 21), 0x12 },
+ { MT_RF(0, 23), 0x00 },
+ { MT_RF(0, 24), 0x33 },
+ { MT_RF(0, 25), 0x00 },
+ /* PLL */
+ { MT_RF(0, 26), 0x00 },
+ { MT_RF(0, 27), 0x00 },
+ { MT_RF(0, 28), 0x00 },
+ { MT_RF(0, 29), 0x00 },
+ { MT_RF(0, 30), 0x00 },
+ { MT_RF(0, 31), 0x00 },
+ { MT_RF(0, 32), 0x00 },
+ { MT_RF(0, 33), 0x00 },
+ { MT_RF(0, 34), 0x00 },
+ { MT_RF(0, 35), 0x00 },
+ { MT_RF(0, 36), 0x00 },
+ { MT_RF(0, 37), 0x00 },
+ /* LO Buffer */
+ { MT_RF(0, 38), 0x2F },
+ /* Test Ports */
+ { MT_RF(0, 64), 0x00 },
+ { MT_RF(0, 65), 0x80 },
+ { MT_RF(0, 66), 0x01 },
+ { MT_RF(0, 67), 0x04 },
+ /* ADC-DAC */
+ { MT_RF(0, 68), 0x00 },
+ { MT_RF(0, 69), 0x08 },
+ { MT_RF(0, 70), 0x08 },
+ { MT_RF(0, 71), 0x40 },
+ { MT_RF(0, 72), 0xD0 },
+ { MT_RF(0, 73), 0x93 },
};
static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = {
-/*
- Bank 5 - Channel 0 2G RF registers
-*/
- /*
- RX logic operation
- */
- /* RF_R00 Change in SelectBand6590 */
-
- { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */
- { MT_RF(5, 3), 0x00},
-
- /*
- TX logic operation
- */
- { MT_RF(5, 4), 0x00},
- { MT_RF(5, 5), 0x84},
- { MT_RF(5, 6), 0x02},
-
- /*
- LDO
- */
- { MT_RF(5, 7), 0x00},
- { MT_RF(5, 8), 0x00},
- { MT_RF(5, 9), 0x00},
-
- /*
- RX
- */
- { MT_RF(5, 10), 0x51},
- { MT_RF(5, 11), 0x22},
- { MT_RF(5, 12), 0x22},
- { MT_RF(5, 13), 0x0F},
- { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */
- { MT_RF(5, 15), 0x25},
- { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */
- { MT_RF(5, 17), 0x00},
- { MT_RF(5, 18), 0x00},
- { MT_RF(5, 19), 0x30}, /* Improve max Pin */
- { MT_RF(5, 20), 0x33},
- { MT_RF(5, 21), 0x02},
- { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */
- { MT_RF(5, 23), 0x00},
- { MT_RF(5, 24), 0x25},
- { MT_RF(5, 26), 0x00},
- { MT_RF(5, 27), 0x12},
- { MT_RF(5, 28), 0x0F},
- { MT_RF(5, 29), 0x00},
-
- /*
- LOGEN
- */
- { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */
- { MT_RF(5, 31), 0x35},
- { MT_RF(5, 32), 0x31},
- { MT_RF(5, 33), 0x31},
- { MT_RF(5, 34), 0x34},
- { MT_RF(5, 35), 0x03},
- { MT_RF(5, 36), 0x00},
-
- /*
- TX
- */
- { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */
- { MT_RF(5, 38), 0xB3},
- { MT_RF(5, 39), 0x33},
- { MT_RF(5, 40), 0xB1},
- { MT_RF(5, 41), 0x71},
- { MT_RF(5, 42), 0xF2},
- { MT_RF(5, 43), 0x47},
- { MT_RF(5, 44), 0x77},
- { MT_RF(5, 45), 0x0E},
- { MT_RF(5, 46), 0x10},
- { MT_RF(5, 47), 0x00},
- { MT_RF(5, 48), 0x53},
- { MT_RF(5, 49), 0x03},
- { MT_RF(5, 50), 0xEF},
- { MT_RF(5, 51), 0xC7},
- { MT_RF(5, 52), 0x62},
- { MT_RF(5, 53), 0x62},
- { MT_RF(5, 54), 0x00},
- { MT_RF(5, 55), 0x00},
- { MT_RF(5, 56), 0x0F},
- { MT_RF(5, 57), 0x0F},
- { MT_RF(5, 58), 0x16},
- { MT_RF(5, 59), 0x16},
- { MT_RF(5, 60), 0x10},
- { MT_RF(5, 61), 0x10},
- { MT_RF(5, 62), 0xD0},
- { MT_RF(5, 63), 0x6C},
- { MT_RF(5, 64), 0x58},
- { MT_RF(5, 65), 0x58},
- { MT_RF(5, 66), 0xF2},
- { MT_RF(5, 67), 0xE8},
- { MT_RF(5, 68), 0xF0},
- { MT_RF(5, 69), 0xF0},
- { MT_RF(5, 127), 0x04},
+ /* RX logic operation */
+ { MT_RF(5, 2), 0x0C }, /* 5G+2G */
+ { MT_RF(5, 3), 0x00 },
+ /* TX logic operation */
+ { MT_RF(5, 4), 0x00 },
+ { MT_RF(5, 5), 0x84 },
+ { MT_RF(5, 6), 0x02 },
+ /* LDO */
+ { MT_RF(5, 7), 0x00 },
+ { MT_RF(5, 8), 0x00 },
+ { MT_RF(5, 9), 0x00 },
+ /* RX */
+ { MT_RF(5, 10), 0x51 },
+ { MT_RF(5, 11), 0x22 },
+ { MT_RF(5, 12), 0x22 },
+ { MT_RF(5, 13), 0x0F },
+ { MT_RF(5, 14), 0x47 },
+ { MT_RF(5, 15), 0x25 },
+ { MT_RF(5, 16), 0xC7 },
+ { MT_RF(5, 17), 0x00 },
+ { MT_RF(5, 18), 0x00 },
+ { MT_RF(5, 19), 0x30 },
+ { MT_RF(5, 20), 0x33 },
+ { MT_RF(5, 21), 0x02 },
+ { MT_RF(5, 22), 0x32 },
+ { MT_RF(5, 23), 0x00 },
+ { MT_RF(5, 24), 0x25 },
+ { MT_RF(5, 26), 0x00 },
+ { MT_RF(5, 27), 0x12 },
+ { MT_RF(5, 28), 0x0F },
+ { MT_RF(5, 29), 0x00 },
+ /* LOGEN */
+ { MT_RF(5, 30), 0x51 },
+ { MT_RF(5, 31), 0x35 },
+ { MT_RF(5, 32), 0x31 },
+ { MT_RF(5, 33), 0x31 },
+ { MT_RF(5, 34), 0x34 },
+ { MT_RF(5, 35), 0x03 },
+ { MT_RF(5, 36), 0x00 },
+ /* TX */
+ { MT_RF(5, 37), 0xDD },
+ { MT_RF(5, 38), 0xB3 },
+ { MT_RF(5, 39), 0x33 },
+ { MT_RF(5, 40), 0xB1 },
+ { MT_RF(5, 41), 0x71 },
+ { MT_RF(5, 42), 0xF2 },
+ { MT_RF(5, 43), 0x47 },
+ { MT_RF(5, 44), 0x77 },
+ { MT_RF(5, 45), 0x0E },
+ { MT_RF(5, 46), 0x10 },
+ { MT_RF(5, 47), 0x00 },
+ { MT_RF(5, 48), 0x53 },
+ { MT_RF(5, 49), 0x03 },
+ { MT_RF(5, 50), 0xEF },
+ { MT_RF(5, 51), 0xC7 },
+ { MT_RF(5, 52), 0x62 },
+ { MT_RF(5, 53), 0x62 },
+ { MT_RF(5, 54), 0x00 },
+ { MT_RF(5, 55), 0x00 },
+ { MT_RF(5, 56), 0x0F },
+ { MT_RF(5, 57), 0x0F },
+ { MT_RF(5, 58), 0x16 },
+ { MT_RF(5, 59), 0x16 },
+ { MT_RF(5, 60), 0x10 },
+ { MT_RF(5, 61), 0x10 },
+ { MT_RF(5, 62), 0xD0 },
+ { MT_RF(5, 63), 0x6C },
+ { MT_RF(5, 64), 0x58 },
+ { MT_RF(5, 65), 0x58 },
+ { MT_RF(5, 66), 0xF2 },
+ { MT_RF(5, 67), 0xE8 },
+ { MT_RF(5, 68), 0xF0 },
+ { MT_RF(5, 69), 0xF0 },
+ { MT_RF(5, 127), 0x04 },
};
static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = {
-/*
- Bank 6 - Channel 0 5G RF registers
-*/
- /*
- RX logic operation
- */
- /* RF_R00 Change in SelectBandmt76x0 */
-
- { MT_RF(6, 2), 0x0C},
- { MT_RF(6, 3), 0x00},
-
- /*
- TX logic operation
- */
- { MT_RF(6, 4), 0x00},
- { MT_RF(6, 5), 0x84},
- { MT_RF(6, 6), 0x02},
-
- /*
- LDO
- */
- { MT_RF(6, 7), 0x00},
- { MT_RF(6, 8), 0x00},
- { MT_RF(6, 9), 0x00},
-
- /*
- RX
- */
- { MT_RF(6, 10), 0x00},
- { MT_RF(6, 11), 0x01},
-
- { MT_RF(6, 13), 0x23},
- { MT_RF(6, 14), 0x00},
- { MT_RF(6, 15), 0x04},
- { MT_RF(6, 16), 0x22},
-
- { MT_RF(6, 18), 0x08},
- { MT_RF(6, 19), 0x00},
- { MT_RF(6, 20), 0x00},
- { MT_RF(6, 21), 0x00},
- { MT_RF(6, 22), 0xFB},
-
- /*
- LOGEN5G
- */
- { MT_RF(6, 25), 0x76},
- { MT_RF(6, 26), 0x24},
- { MT_RF(6, 27), 0x04},
- { MT_RF(6, 28), 0x00},
- { MT_RF(6, 29), 0x00},
-
- /*
- TX
- */
- { MT_RF(6, 37), 0xBB},
- { MT_RF(6, 38), 0xB3},
-
- { MT_RF(6, 40), 0x33},
- { MT_RF(6, 41), 0x33},
-
- { MT_RF(6, 43), 0x03},
- { MT_RF(6, 44), 0xB3},
-
- { MT_RF(6, 46), 0x17},
- { MT_RF(6, 47), 0x0E},
- { MT_RF(6, 48), 0x10},
- { MT_RF(6, 49), 0x07},
-
- { MT_RF(6, 62), 0x00},
- { MT_RF(6, 63), 0x00},
- { MT_RF(6, 64), 0xF1},
- { MT_RF(6, 65), 0x0F},
+ /* RX logic operation */
+ { MT_RF(6, 2), 0x0C },
+ { MT_RF(6, 3), 0x00 },
+ /* TX logic operation */
+ { MT_RF(6, 4), 0x00 },
+ { MT_RF(6, 5), 0x84 },
+ { MT_RF(6, 6), 0x02 },
+ /* LDO */
+ { MT_RF(6, 7), 0x00 },
+ { MT_RF(6, 8), 0x00 },
+ { MT_RF(6, 9), 0x00 },
+ /* RX */
+ { MT_RF(6, 10), 0x00 },
+ { MT_RF(6, 11), 0x01 },
+ { MT_RF(6, 13), 0x23 },
+ { MT_RF(6, 14), 0x00 },
+ { MT_RF(6, 15), 0x04 },
+ { MT_RF(6, 16), 0x22 },
+ { MT_RF(6, 18), 0x08 },
+ { MT_RF(6, 19), 0x00 },
+ { MT_RF(6, 20), 0x00 },
+ { MT_RF(6, 21), 0x00 },
+ { MT_RF(6, 22), 0xFB },
+ /* LOGEN5G */
+ { MT_RF(6, 25), 0x76 },
+ { MT_RF(6, 26), 0x24 },
+ { MT_RF(6, 27), 0x04 },
+ { MT_RF(6, 28), 0x00 },
+ { MT_RF(6, 29), 0x00 },
+ /* TX */
+ { MT_RF(6, 37), 0xBB },
+ { MT_RF(6, 38), 0xB3 },
+ { MT_RF(6, 40), 0x33 },
+ { MT_RF(6, 41), 0x33 },
+ { MT_RF(6, 43), 0x03 },
+ { MT_RF(6, 44), 0xB3 },
+ { MT_RF(6, 46), 0x17 },
+ { MT_RF(6, 47), 0x0E },
+ { MT_RF(6, 48), 0x10 },
+ { MT_RF(6, 49), 0x07 },
+ { MT_RF(6, 62), 0x00 },
+ { MT_RF(6, 63), 0x00 },
+ { MT_RF(6, 64), 0xF1 },
+ { MT_RF(6, 65), 0x0F },
};
static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = {
-/*
- Bank 7 - Channel 0 VGA RF registers
-*/
/* E3 CR */
- { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */
- { MT_RF(7, 1), 0x00},
- { MT_RF(7, 2), 0x00},
- { MT_RF(7, 3), 0x00},
- { MT_RF(7, 4), 0x00},
-
- { MT_RF(7, 10), 0x13},
- { MT_RF(7, 11), 0x0F},
- { MT_RF(7, 12), 0x13}, /* For dcoc */
- { MT_RF(7, 13), 0x13}, /* For dcoc */
- { MT_RF(7, 14), 0x13}, /* For dcoc */
- { MT_RF(7, 15), 0x20}, /* For dcoc */
- { MT_RF(7, 16), 0x22}, /* For dcoc */
-
- { MT_RF(7, 17), 0x7C},
-
- { MT_RF(7, 18), 0x00},
- { MT_RF(7, 19), 0x00},
- { MT_RF(7, 20), 0x00},
- { MT_RF(7, 21), 0xF1},
- { MT_RF(7, 22), 0x11},
- { MT_RF(7, 23), 0xC2},
- { MT_RF(7, 24), 0x41},
- { MT_RF(7, 25), 0x20},
- { MT_RF(7, 26), 0x40},
- { MT_RF(7, 27), 0xD7},
- { MT_RF(7, 28), 0xA2},
- { MT_RF(7, 29), 0x60},
- { MT_RF(7, 30), 0x49},
- { MT_RF(7, 31), 0x20},
- { MT_RF(7, 32), 0x44},
- { MT_RF(7, 33), 0xC1},
- { MT_RF(7, 34), 0x60},
- { MT_RF(7, 35), 0xC0},
-
- { MT_RF(7, 61), 0x01},
-
- { MT_RF(7, 72), 0x3C},
- { MT_RF(7, 73), 0x34},
- { MT_RF(7, 74), 0x00},
+ { MT_RF(7, 0), 0x47 },
+ { MT_RF(7, 1), 0x00 },
+ { MT_RF(7, 2), 0x00 },
+ { MT_RF(7, 3), 0x00 },
+ { MT_RF(7, 4), 0x00 },
+ { MT_RF(7, 10), 0x13 },
+ { MT_RF(7, 11), 0x0F },
+ { MT_RF(7, 12), 0x13 },
+ { MT_RF(7, 13), 0x13 },
+ { MT_RF(7, 14), 0x13 },
+ { MT_RF(7, 15), 0x20 },
+ { MT_RF(7, 16), 0x22 },
+ { MT_RF(7, 17), 0x7C },
+ { MT_RF(7, 18), 0x00 },
+ { MT_RF(7, 19), 0x00 },
+ { MT_RF(7, 20), 0x00 },
+ { MT_RF(7, 21), 0xF1 },
+ { MT_RF(7, 22), 0x11 },
+ { MT_RF(7, 23), 0xC2 },
+ { MT_RF(7, 24), 0x41 },
+ { MT_RF(7, 25), 0x20 },
+ { MT_RF(7, 26), 0x40 },
+ { MT_RF(7, 27), 0xD7 },
+ { MT_RF(7, 28), 0xA2 },
+ { MT_RF(7, 29), 0x60 },
+ { MT_RF(7, 30), 0x49 },
+ { MT_RF(7, 31), 0x20 },
+ { MT_RF(7, 32), 0x44 },
+ { MT_RF(7, 33), 0xC1 },
+ { MT_RF(7, 34), 0x60 },
+ { MT_RF(7, 35), 0xC0 },
+ { MT_RF(7, 61), 0x01 },
+ { MT_RF(7, 72), 0x3C },
+ { MT_RF(7, 73), 0x34 },
+ { MT_RF(7, 74), 0x00 },
};
static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = {
- /* Bank, Register, Bw/Band, Value */
- { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00},
- { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00},
- { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00},
- { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00},
- { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00},
-
- /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */
- { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C},
- { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20},
- { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10},
-
- { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20},
- { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20},
- { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10},
-
- { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03},
- { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01},
- { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03},
- { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01},
- { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00},
-
- /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */
- { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40},
- { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40},
- { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10},
-
- { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40},
- { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40},
- { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40},
- { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10},
-
- { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA},
- { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA},
- { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA},
- { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA},
- { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA},
-
- { MT_RF(7, 76), RF_BW_20, 0x40},
- { MT_RF(7, 76), RF_BW_40, 0x40},
- { MT_RF(7, 76), RF_BW_80, 0x10},
-
- { MT_RF(7, 77), RF_BW_20, 0x40},
- { MT_RF(7, 77), RF_BW_40, 0x40},
- { MT_RF(7, 77), RF_BW_80, 0x10},
+ /* bank, reg bw/band value */
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00 },
+ { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00 },
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00 },
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00 },
+ { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00 },
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C },
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20 },
+ { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20 },
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20 },
+ { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03 },
+ { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01 },
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03 },
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01 },
+ { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00 },
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40 },
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40 },
+ { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10 },
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA },
+ { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA },
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA },
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA },
+ { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA },
+ { MT_RF(7, 76), RF_BW_20, 0x40 },
+ { MT_RF(7, 76), RF_BW_40, 0x40 },
+ { MT_RF(7, 76), RF_BW_80, 0x10 },
+ { MT_RF(7, 77), RF_BW_20, 0x40 },
+ { MT_RF(7, 77), RF_BW_40, 0x40 },
+ { MT_RF(7, 77), RF_BW_80, 0x10 },
};
static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = {
- /* Bank, Register, Bw/Band, Value */
- { MT_RF(0, 16), RF_G_BAND, 0x20},
- { MT_RF(0, 16), RF_A_BAND, 0x20},
-
- { MT_RF(0, 18), RF_G_BAND, 0x00},
- { MT_RF(0, 18), RF_A_BAND, 0x00},
-
- { MT_RF(0, 39), RF_G_BAND, 0x36},
- { MT_RF(0, 39), RF_A_BAND_LB, 0x34},
- { MT_RF(0, 39), RF_A_BAND_MB, 0x33},
- { MT_RF(0, 39), RF_A_BAND_HB, 0x31},
- { MT_RF(0, 39), RF_A_BAND_11J, 0x36},
-
- { MT_RF(6, 12), RF_A_BAND_LB, 0x44},
- { MT_RF(6, 12), RF_A_BAND_MB, 0x44},
- { MT_RF(6, 12), RF_A_BAND_HB, 0x55},
- { MT_RF(6, 12), RF_A_BAND_11J, 0x44},
-
- { MT_RF(6, 17), RF_A_BAND_LB, 0x02},
- { MT_RF(6, 17), RF_A_BAND_MB, 0x00},
- { MT_RF(6, 17), RF_A_BAND_HB, 0x00},
- { MT_RF(6, 17), RF_A_BAND_11J, 0x05},
-
- { MT_RF(6, 24), RF_A_BAND_LB, 0xA1},
- { MT_RF(6, 24), RF_A_BAND_MB, 0x41},
- { MT_RF(6, 24), RF_A_BAND_HB, 0x21},
- { MT_RF(6, 24), RF_A_BAND_11J, 0xE1},
-
- { MT_RF(6, 39), RF_A_BAND_LB, 0x36},
- { MT_RF(6, 39), RF_A_BAND_MB, 0x34},
- { MT_RF(6, 39), RF_A_BAND_HB, 0x32},
- { MT_RF(6, 39), RF_A_BAND_11J, 0x37},
-
- { MT_RF(6, 42), RF_A_BAND_LB, 0xFB},
- { MT_RF(6, 42), RF_A_BAND_MB, 0xF3},
- { MT_RF(6, 42), RF_A_BAND_HB, 0xEB},
- { MT_RF(6, 42), RF_A_BAND_11J, 0xEB},
-
- /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */
-
- { MT_RF(6, 127), RF_G_BAND, 0x84},
- { MT_RF(6, 127), RF_A_BAND, 0x04},
-
- { MT_RF(7, 5), RF_G_BAND, 0x40},
- { MT_RF(7, 5), RF_A_BAND, 0x00},
-
- { MT_RF(7, 9), RF_G_BAND, 0x00},
- { MT_RF(7, 9), RF_A_BAND, 0x00},
-
- { MT_RF(7, 70), RF_G_BAND, 0x00},
- { MT_RF(7, 70), RF_A_BAND, 0x6D},
-
- { MT_RF(7, 71), RF_G_BAND, 0x00},
- { MT_RF(7, 71), RF_A_BAND, 0xB0},
-
- { MT_RF(7, 78), RF_G_BAND, 0x00},
- { MT_RF(7, 78), RF_A_BAND, 0x55},
-
- { MT_RF(7, 79), RF_G_BAND, 0x00},
- { MT_RF(7, 79), RF_A_BAND, 0x55},
+ /* bank, reg bw/band value */
+ { MT_RF(0, 16), RF_G_BAND, 0x20 },
+ { MT_RF(0, 16), RF_A_BAND, 0x20 },
+ { MT_RF(0, 18), RF_G_BAND, 0x00 },
+ { MT_RF(0, 18), RF_A_BAND, 0x00 },
+ { MT_RF(0, 39), RF_G_BAND, 0x36 },
+ { MT_RF(0, 39), RF_A_BAND_LB, 0x34 },
+ { MT_RF(0, 39), RF_A_BAND_MB, 0x33 },
+ { MT_RF(0, 39), RF_A_BAND_HB, 0x31 },
+ { MT_RF(0, 39), RF_A_BAND_11J, 0x36 },
+ { MT_RF(6, 12), RF_A_BAND_LB, 0x44 },
+ { MT_RF(6, 12), RF_A_BAND_MB, 0x44 },
+ { MT_RF(6, 12), RF_A_BAND_HB, 0x55 },
+ { MT_RF(6, 12), RF_A_BAND_11J, 0x44 },
+ { MT_RF(6, 17), RF_A_BAND_LB, 0x02 },
+ { MT_RF(6, 17), RF_A_BAND_MB, 0x00 },
+ { MT_RF(6, 17), RF_A_BAND_HB, 0x00 },
+ { MT_RF(6, 17), RF_A_BAND_11J, 0x05 },
+ { MT_RF(6, 24), RF_A_BAND_LB, 0xA1 },
+ { MT_RF(6, 24), RF_A_BAND_MB, 0x41 },
+ { MT_RF(6, 24), RF_A_BAND_HB, 0x21 },
+ { MT_RF(6, 24), RF_A_BAND_11J, 0xE1 },
+ { MT_RF(6, 39), RF_A_BAND_LB, 0x36 },
+ { MT_RF(6, 39), RF_A_BAND_MB, 0x34 },
+ { MT_RF(6, 39), RF_A_BAND_HB, 0x32 },
+ { MT_RF(6, 39), RF_A_BAND_11J, 0x37 },
+ { MT_RF(6, 42), RF_A_BAND_LB, 0xFB },
+ { MT_RF(6, 42), RF_A_BAND_MB, 0xF3 },
+ { MT_RF(6, 42), RF_A_BAND_HB, 0xEB },
+ { MT_RF(6, 42), RF_A_BAND_11J, 0xEB },
+ { MT_RF(6, 127), RF_G_BAND, 0x84 },
+ { MT_RF(6, 127), RF_A_BAND, 0x04 },
+ { MT_RF(7, 5), RF_G_BAND, 0x40 },
+ { MT_RF(7, 5), RF_A_BAND, 0x00 },
+ { MT_RF(7, 9), RF_G_BAND, 0x00 },
+ { MT_RF(7, 9), RF_A_BAND, 0x00 },
+ { MT_RF(7, 70), RF_G_BAND, 0x00 },
+ { MT_RF(7, 70), RF_A_BAND, 0x6D },
+ { MT_RF(7, 71), RF_G_BAND, 0x00 },
+ { MT_RF(7, 71), RF_A_BAND, 0xB0 },
+ { MT_RF(7, 78), RF_G_BAND, 0x00 },
+ { MT_RF(7, 78), RF_A_BAND, 0x55 },
+ { MT_RF(7, 79), RF_G_BAND, 0x00 },
+ { MT_RF(7, 79), RF_A_BAND, 0x55 },
};
static const struct mt76x0_freq_item mt76x0_frequency_plan[] = {
- {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */
- {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */
- {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */
- {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */
- {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */
- {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */
- {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */
- {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */
- {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */
- {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */
- {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */
- {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */
- {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */
- {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */
-
- {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */
- {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */
- {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */
- {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */
- {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */
- {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */
- {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */
- {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */
-
- {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */
- {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */
- {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */
- {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */
- {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */
- {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */
- {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */
- {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */
- {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */
- {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */
- {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */
- {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */
- {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */
- {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */
- {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */
- {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */
- {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */
- {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */
- {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */
- {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */
- {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */
- {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */
- {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */
- {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */
- {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */
- {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */
- {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */
- {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */
- {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */
-
- {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */
- {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */
- {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */
- {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */
- {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */
- {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */
- {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */
- {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */
- {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */
- {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */
- {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */
- {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */
- {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */
- {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */
- {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */
- {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */
- {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */
- {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */
- {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */
- {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */
- {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */
- {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */
- {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */
- {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */
- {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */
- {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */
- {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */
- {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */
- {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */
- {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */
- {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */
- {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */
- {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */
- {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */
- {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */
- {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */
- {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */
-
- {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */
- {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */
- {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */
- {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */
- {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */
- {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */
- {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */
- {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */
- {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */
- {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */
- {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */
- {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */
- {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */
- {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */
- {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */
- {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */
- {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */
- {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */
- {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */
- {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */
- {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */
- {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */
- {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */
- {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */
- {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */
- {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */
- {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */
- {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */
- {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */
- {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */
- {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */
- {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */
- {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */
- {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */
- {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */
- {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */
- {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */
+ { 1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2412 */
+ { 2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1 }, /* Freq 2417 */
+ { 3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2422 */
+ { 4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2427 */
+ { 5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2432 */
+ { 6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2437 */
+ { 7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2442 */
+ { 8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1 }, /* Freq 2447 */
+ { 9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2452 */
+ { 10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0 }, /* Freq 2457 */
+ { 11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2462 */
+ { 12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2467 */
+ { 13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2472 */
+ { 14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2484 */
+ { 183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 4915 */
+ { 184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4920 */
+ { 185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4925 */
+ { 187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4935 */
+ { 188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4940 */
+ { 189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4945 */
+ { 192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4960 */
+ { 196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4980 */
+ { 36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5180 */
+ { 37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5185 */
+ { 38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5190 */
+ { 39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5195 */
+ { 40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5200 */
+ { 41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5205 */
+ { 42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5210 */
+ { 43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5215 */
+ { 44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5220 */
+ { 45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5225 */
+ { 46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5230 */
+ { 47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5235 */
+ { 48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5240 */
+ { 49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5245 */
+ { 50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5250 */
+ { 51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5255 */
+ { 52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5260 */
+ { 53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5265 */
+ { 54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5270 */
+ { 55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5275 */
+ { 56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5280 */
+ { 57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5285 */
+ { 58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5290 */
+ { 59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5295 */
+ { 60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5300 */
+ { 61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5305 */
+ { 62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5310 */
+ { 63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5315 */
+ { 64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5320 */
+ { 100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5500 */
+ { 101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5505 */
+ { 102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5510 */
+ { 103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5515 */
+ { 104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5520 */
+ { 105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5525 */
+ { 106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5530 */
+ { 107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5535 */
+ { 108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5540 */
+ { 109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5545 */
+ { 110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5550 */
+ { 111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5555 */
+ { 112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5560 */
+ { 113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5565 */
+ { 114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5570 */
+ { 115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5575 */
+ { 116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5580 */
+ { 117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5585 */
+ { 118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5590 */
+ { 119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5595 */
+ { 120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5600 */
+ { 121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5605 */
+ { 122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5610 */
+ { 123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5615 */
+ { 124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5620 */
+ { 125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5625 */
+ { 126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5630 */
+ { 127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5635 */
+ { 128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5640 */
+ { 129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5645 */
+ { 130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5650 */
+ { 131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5655 */
+ { 132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5660 */
+ { 133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5665 */
+ { 134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5670 */
+ { 135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5675 */
+ { 136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5680 */
+ { 137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5685 */
+ { 138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5690 */
+ { 139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5695 */
+ { 140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5700 */
+ { 141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5705 */
+ { 142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5710 */
+ { 143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5715 */
+ { 144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5720 */
+ { 145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5725 */
+ { 146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5730 */
+ { 147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5735 */
+ { 148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5740 */
+ { 149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5745 */
+ { 150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5750 */
+ { 151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5755 */
+ { 152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5760 */
+ { 153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5765 */
+ { 154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5770 */
+ { 155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5775 */
+ { 156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5780 */
+ { 157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5785 */
+ { 158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5790 */
+ { 159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5795 */
+ { 160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5800 */
+ { 161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5805 */
+ { 162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5810 */
+ { 163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5815 */
+ { 164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5820 */
+ { 165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5825 */
+ { 166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5830 */
+ { 167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5835 */
+ { 168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5840 */
+ { 169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5845 */
+ { 170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5850 */
+ { 171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5855 */
+ { 172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5860 */
+ { 173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5865 */
};
static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = {
- {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */
- {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */
- {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */
- {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */
- {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */
- {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */
- {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */
- {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */
- {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */
- {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */
- {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */
- {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */
- {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */
- {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */
-
- {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */
- {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */
- {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */
- {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */
- {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */
- {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */
- {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */
- {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */
-
- {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */
- {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */
- {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */
- {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */
- {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */
- {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */
- {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */
- {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */
- {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */
- {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */
- {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */
- {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */
- {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */
- {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */
- {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */
- {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */
- {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */
- {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */
- {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */
- {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */
- {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */
- {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */
- {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */
- {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */
- {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */
- {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */
- {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */
- {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */
- {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */
-
- {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */
- {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */
- {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */
- {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */
- {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */
- {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */
- {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */
- {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */
- {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */
- {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */
- {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */
- {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */
- {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */
- {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */
- {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */
- {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */
- {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */
- {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */
- {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */
- {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */
- {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */
- {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */
- {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */
- {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */
- {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */
- {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */
- {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */
- {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */
- {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */
- {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */
- {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */
- {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */
- {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */
- {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */
- {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */
- {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */
- {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */
-
- {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */
- {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */
- {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */
- {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */
- {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */
- {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */
- {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */
- {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */
- {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */
- {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */
- {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */
- {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */
- {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */
- {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */
- {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */
- {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */
- {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */
- {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */
- {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */
- {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */
- {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */
- {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */
- {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */
- {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */
- {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */
- {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */
- {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */
- {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */
- {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */
- {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */
- {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */
- {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */
- {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */
- {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */
- {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */
- {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */
- {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */
+ { 1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2412 */
+ { 2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3 }, /* Freq 2417 */
+ { 3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3 }, /* Freq 2422 */
+ { 4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3 }, /* Freq 2427 */
+ { 5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3 }, /* Freq 2432 */
+ { 6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3 }, /* Freq 2437 */
+ { 7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3 }, /* Freq 2442 */
+ { 8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3 }, /* Freq 2447 */
+ { 9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3 }, /* Freq 2452 */
+ { 10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3 }, /* Freq 2457 */
+ { 11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02222, 0x3 }, /* Freq 2462 */
+ { 12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x07777, 0x3 }, /* Freq 2467 */
+ { 13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2472 */
+ { 14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3 }, /* Freq 2484 */
+ { 183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 4915 */
+ { 184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 4920 */
+ { 185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 4925 */
+ { 187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 4935 */
+ { 188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 4940 */
+ { 189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 4945 */
+ { 192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 4960 */
+ { 196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 4980 */
+ { 36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5180 */
+ { 37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5185 */
+ { 38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5190 */
+ { 39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5195 */
+ { 40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5200 */
+ { 41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5205 */
+ { 42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5210 */
+ { 43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5215 */
+ { 44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5220 */
+ { 45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5225 */
+ { 46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5230 */
+ { 47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5235 */
+ { 48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5240 */
+ { 49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5245 */
+ { 50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5250 */
+ { 51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5255 */
+ { 52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5260 */
+ { 53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5265 */
+ { 54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5270 */
+ { 55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5275 */
+ { 56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5280 */
+ { 57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5285 */
+ { 58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5290 */
+ { 59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5295 */
+ { 60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5300 */
+ { 61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5305 */
+ { 62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5310 */
+ { 63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5315 */
+ { 64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5320 */
+ { 100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5500 */
+ { 101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5505 */
+ { 102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5510 */
+ { 103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5515 */
+ { 104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5520 */
+ { 105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5525 */
+ { 106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5530 */
+ { 107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5535 */
+ { 108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5540 */
+ { 109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5545 */
+ { 110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5550 */
+ { 111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5555 */
+ { 112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5560 */
+ { 113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5565 */
+ { 114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5570 */
+ { 115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5575 */
+ { 116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5580 */
+ { 117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5585 */
+ { 118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5590 */
+ { 119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5595 */
+ { 120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5600 */
+ { 121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5605 */
+ { 122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5610 */
+ { 123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5615 */
+ { 124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5620 */
+ { 125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5625 */
+ { 126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5630 */
+ { 127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5635 */
+ { 128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5640 */
+ { 129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5645 */
+ { 130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5650 */
+ { 131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5655 */
+ { 132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5660 */
+ { 133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5665 */
+ { 134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5670 */
+ { 135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5675 */
+ { 136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5680 */
+ { 137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5685 */
+ { 138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5690 */
+ { 139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5695 */
+ { 140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5700 */
+ { 141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5705 */
+ { 142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5710 */
+ { 143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5715 */
+ { 144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5720 */
+ { 145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5725 */
+ { 146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5730 */
+ { 147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5735 */
+ { 148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5740 */
+ { 149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5745 */
+ { 150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5750 */
+ { 151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5755 */
+ { 152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5760 */
+ { 153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5765 */
+ { 154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5770 */
+ { 155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5775 */
+ { 156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5780 */
+ { 157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5785 */
+ { 158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5790 */
+ { 159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5795 */
+ { 160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5800 */
+ { 161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5805 */
+ { 162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5810 */
+ { 163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5815 */
+ { 164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5820 */
+ { 165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5825 */
+ { 166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5830 */
+ { 167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5835 */
+ { 168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5840 */
+ { 169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5845 */
+ { 170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5850 */
+ { 171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5855 */
+ { 172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5860 */
+ { 173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5865 */
};
static const u8 mt76x0_sdm_channel[] = {
- 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165
+ 183, 185, 43, 45,
+ 54, 55, 57, 58,
+ 102, 103, 105, 106,
+ 115, 117, 126, 127,
+ 129, 130, 139, 141,
+ 150, 151, 153, 154,
+ 163, 165
};
static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = {
- { MT_RF(6, 45), RF_A_BAND_LB, 0x63},
- { MT_RF(6, 45), RF_A_BAND_MB, 0x43},
- { MT_RF(6, 45), RF_A_BAND_HB, 0x33},
- { MT_RF(6, 45), RF_A_BAND_11J, 0x73},
-
- { MT_RF(6, 50), RF_A_BAND_LB, 0x02},
- { MT_RF(6, 50), RF_A_BAND_MB, 0x02},
- { MT_RF(6, 50), RF_A_BAND_HB, 0x02},
- { MT_RF(6, 50), RF_A_BAND_11J, 0x02},
-
- { MT_RF(6, 51), RF_A_BAND_LB, 0x02},
- { MT_RF(6, 51), RF_A_BAND_MB, 0x02},
- { MT_RF(6, 51), RF_A_BAND_HB, 0x02},
- { MT_RF(6, 51), RF_A_BAND_11J, 0x02},
-
- { MT_RF(6, 52), RF_A_BAND_LB, 0x08},
- { MT_RF(6, 52), RF_A_BAND_MB, 0x08},
- { MT_RF(6, 52), RF_A_BAND_HB, 0x08},
- { MT_RF(6, 52), RF_A_BAND_11J, 0x08},
-
- { MT_RF(6, 53), RF_A_BAND_LB, 0x08},
- { MT_RF(6, 53), RF_A_BAND_MB, 0x08},
- { MT_RF(6, 53), RF_A_BAND_HB, 0x08},
- { MT_RF(6, 53), RF_A_BAND_11J, 0x08},
-
- { MT_RF(6, 54), RF_A_BAND_LB, 0x0A},
- { MT_RF(6, 54), RF_A_BAND_MB, 0x0A},
- { MT_RF(6, 54), RF_A_BAND_HB, 0x0A},
- { MT_RF(6, 54), RF_A_BAND_11J, 0x0A},
-
- { MT_RF(6, 55), RF_A_BAND_LB, 0x0A},
- { MT_RF(6, 55), RF_A_BAND_MB, 0x0A},
- { MT_RF(6, 55), RF_A_BAND_HB, 0x0A},
- { MT_RF(6, 55), RF_A_BAND_11J, 0x0A},
-
- { MT_RF(6, 56), RF_A_BAND_LB, 0x05},
- { MT_RF(6, 56), RF_A_BAND_MB, 0x05},
- { MT_RF(6, 56), RF_A_BAND_HB, 0x05},
- { MT_RF(6, 56), RF_A_BAND_11J, 0x05},
-
- { MT_RF(6, 57), RF_A_BAND_LB, 0x05},
- { MT_RF(6, 57), RF_A_BAND_MB, 0x05},
- { MT_RF(6, 57), RF_A_BAND_HB, 0x05},
- { MT_RF(6, 57), RF_A_BAND_11J, 0x05},
-
- { MT_RF(6, 58), RF_A_BAND_LB, 0x05},
- { MT_RF(6, 58), RF_A_BAND_MB, 0x03},
- { MT_RF(6, 58), RF_A_BAND_HB, 0x02},
- { MT_RF(6, 58), RF_A_BAND_11J, 0x07},
-
- { MT_RF(6, 59), RF_A_BAND_LB, 0x05},
- { MT_RF(6, 59), RF_A_BAND_MB, 0x03},
- { MT_RF(6, 59), RF_A_BAND_HB, 0x02},
- { MT_RF(6, 59), RF_A_BAND_11J, 0x07},
+ { MT_RF(6, 45), RF_A_BAND_LB, 0x63 },
+ { MT_RF(6, 45), RF_A_BAND_MB, 0x43 },
+ { MT_RF(6, 45), RF_A_BAND_HB, 0x33 },
+ { MT_RF(6, 45), RF_A_BAND_11J, 0x73 },
+ { MT_RF(6, 50), RF_A_BAND_LB, 0x02 },
+ { MT_RF(6, 50), RF_A_BAND_MB, 0x02 },
+ { MT_RF(6, 50), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 50), RF_A_BAND_11J, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_LB, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_MB, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 51), RF_A_BAND_11J, 0x02 },
+ { MT_RF(6, 52), RF_A_BAND_LB, 0x08 },
+ { MT_RF(6, 52), RF_A_BAND_MB, 0x08 },
+ { MT_RF(6, 52), RF_A_BAND_HB, 0x08 },
+ { MT_RF(6, 52), RF_A_BAND_11J, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_LB, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_MB, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_HB, 0x08 },
+ { MT_RF(6, 53), RF_A_BAND_11J, 0x08 },
+ { MT_RF(6, 54), RF_A_BAND_LB, 0x0A },
+ { MT_RF(6, 54), RF_A_BAND_MB, 0x0A },
+ { MT_RF(6, 54), RF_A_BAND_HB, 0x0A },
+ { MT_RF(6, 54), RF_A_BAND_11J, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_LB, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_MB, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_HB, 0x0A },
+ { MT_RF(6, 55), RF_A_BAND_11J, 0x0A },
+ { MT_RF(6, 56), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 56), RF_A_BAND_MB, 0x05 },
+ { MT_RF(6, 56), RF_A_BAND_HB, 0x05 },
+ { MT_RF(6, 56), RF_A_BAND_11J, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_MB, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_HB, 0x05 },
+ { MT_RF(6, 57), RF_A_BAND_11J, 0x05 },
+ { MT_RF(6, 58), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 58), RF_A_BAND_MB, 0x03 },
+ { MT_RF(6, 58), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 58), RF_A_BAND_11J, 0x07 },
+ { MT_RF(6, 59), RF_A_BAND_LB, 0x05 },
+ { MT_RF(6, 59), RF_A_BAND_MB, 0x03 },
+ { MT_RF(6, 59), RF_A_BAND_HB, 0x02 },
+ { MT_RF(6, 59), RF_A_BAND_11J, 0x07 },
};
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
deleted file mode 100644
index 7a422c590211..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/etherdevice.h>
-
-#include "mt76x0.h"
-#include "trace.h"
-
-void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
- int ht_mode)
-{
- int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
- u32 prot[6];
- bool ht_rts[4] = {};
- int i;
-
- prot[0] = MT_PROT_NAV_SHORT |
- MT_PROT_TXOP_ALLOW_ALL |
- MT_PROT_RTS_THR_EN;
- prot[1] = prot[0];
- if (legacy_prot)
- prot[1] |= MT_PROT_CTRL_CTS2SELF;
-
- prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20;
- prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL;
-
- if (legacy_prot) {
- prot[2] |= MT_PROT_RATE_CCK_11;
- prot[3] |= MT_PROT_RATE_CCK_11;
- prot[4] |= MT_PROT_RATE_CCK_11;
- prot[5] |= MT_PROT_RATE_CCK_11;
- } else {
- prot[2] |= MT_PROT_RATE_OFDM_24;
- prot[3] |= MT_PROT_RATE_DUP_OFDM_24;
- prot[4] |= MT_PROT_RATE_OFDM_24;
- prot[5] |= MT_PROT_RATE_DUP_OFDM_24;
- }
-
- switch (mode) {
- case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
- break;
-
- case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
- ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
- break;
-
- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
- ht_rts[1] = ht_rts[3] = true;
- break;
-
- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
- ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true;
- break;
- }
-
- if (non_gf)
- ht_rts[2] = ht_rts[3] = true;
-
- for (i = 0; i < 4; i++)
- if (ht_rts[i])
- prot[i + 2] |= MT_PROT_CTRL_RTS_CTS;
-
- for (i = 0; i < 6; i++)
- mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]);
-}
-
-void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb)
-{
- if (short_preamb)
- mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
- else
- mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
-}
-
-void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval)
-{
- u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG);
-
- val &= ~(MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE |
- MT_BEACON_TIME_CFG_TBTT_EN);
-
- if (!enable) {
- mt76_wr(dev, MT_BEACON_TIME_CFG, val);
- return;
- }
-
- val &= ~MT_BEACON_TIME_CFG_INTVAL;
- val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) |
- MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE |
- MT_BEACON_TIME_CFG_TBTT_EN;
-}
-
-static void mt76x0_check_mac_err(struct mt76x02_dev *dev)
-{
- u32 val = mt76_rr(dev, 0x10f4);
-
- if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
- return;
-
- dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n");
-
- mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
- udelay(10);
- mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
-}
-void mt76x0_mac_work(struct work_struct *work)
-{
- struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
- mac_work.work);
- struct {
- u32 addr_base;
- u32 span;
- u64 *stat_base;
- } spans[] = {
- { MT_RX_STAT_0, 3, dev->stats.rx_stat },
- { MT_TX_STA_0, 3, dev->stats.tx_stat },
- { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat },
- { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del },
- { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] },
- { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] },
- };
- u32 sum, n;
- int i, j, k;
-
- /* Note: using MCU_RANDOM_READ is actually slower then reading all the
- * registers by hand. MCU takes ca. 20ms to complete read of 24
- * registers while reading them one by one will takes roughly
- * 24*200us =~ 5ms.
- */
-
- k = 0;
- n = 0;
- sum = 0;
- for (i = 0; i < ARRAY_SIZE(spans); i++)
- for (j = 0; j < spans[i].span; j++) {
- u32 val = mt76_rr(dev, spans[i].addr_base + j * 4);
-
- spans[i].stat_base[j * 2] += val & 0xffff;
- spans[i].stat_base[j * 2 + 1] += val >> 16;
-
- /* Calculate average AMPDU length */
- if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 &&
- spans[i].addr_base != MT_TX_AGG_CNT_BASE1)
- continue;
-
- n += (val >> 16) + (val & 0xffff);
- sum += (val & 0xffff) * (1 + k * 2) +
- (val >> 16) * (2 + k * 2);
- k++;
- }
-
- atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1);
-
- mt76x0_check_mac_err(dev);
-
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ);
-}
-
-void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev)
-{
- struct ieee80211_sta *sta;
- struct mt76_wcid *wcid;
- void *msta;
- u8 min_factor = 3;
- int i;
-
- rcu_read_lock();
- for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
- wcid = rcu_dereference(dev->mt76.wcid[i]);
- if (!wcid)
- continue;
-
- msta = container_of(wcid, struct mt76x02_sta, wcid);
- sta = container_of(msta, struct ieee80211_sta, drv_priv);
-
- min_factor = min(min_factor, sta->ht_cap.ampdu_factor);
- }
- rcu_read_unlock();
-
- mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff |
- FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor));
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index 9273d2d2764a..a803a9b6a4c5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -22,9 +22,23 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
int ret;
cancel_delayed_work_sync(&dev->cal_work);
+ if (mt76_is_mmio(dev)) {
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->dfs_pd.dfs_tasklet);
+ }
mt76_set_channel(&dev->mt76);
ret = mt76x0_phy_set_channel(dev, chandef);
+
+ /* channel cycle counters read-and-clear */
+ mt76_rr(dev, MT_CH_IDLE);
+ mt76_rr(dev, MT_CH_BUSY);
+
+ if (mt76_is_mmio(dev)) {
+ mt76x02_dfs_init_params(dev);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->dfs_pd.dfs_tasklet);
+ }
mt76_txq_schedule_all(&dev->mt76);
return ret;
@@ -64,89 +78,3 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
EXPORT_SYMBOL_GPL(mt76x0_config);
-
-static void
-mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr)
-{
- mt76_wr(dev, offset, get_unaligned_le32(addr));
- mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8);
-}
-
-void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
-
- if (changed & BSS_CHANGED_BSSID) {
- mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid);
-
- /* Note: this is a hack because beacon_int is not changed
- * on leave nor is any more appropriate event generated.
- * rt2x00 doesn't seem to be bothered though.
- */
- if (is_zero_ether_addr(info->bssid))
- mt76x0_mac_config_tsf(dev, false, 0);
- }
-
- if (changed & BSS_CHANGED_BASIC_RATES) {
- mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates);
- mt76_wr(dev, MT_VHT_HT_FBK_CFG0, 0x65432100);
- mt76_wr(dev, MT_VHT_HT_FBK_CFG1, 0xedcba980);
- mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988);
- mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100);
- }
-
- if (changed & BSS_CHANGED_BEACON_INT)
- mt76x0_mac_config_tsf(dev, true, info->beacon_int);
-
- if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT)
- mt76x0_mac_set_protection(dev, info->use_cts_prot,
- info->ht_operation_mode);
-
- if (changed & BSS_CHANGED_ERP_PREAMBLE)
- mt76x0_mac_set_short_preamble(dev, info->use_short_preamble);
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- int slottime = info->use_short_slot ? 9 : 20;
-
- mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
- MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
- }
-
- if (changed & BSS_CHANGED_ASSOC)
- mt76x0_phy_recalibrate_after_assoc(dev);
-
- mutex_unlock(&dev->mt76.mutex);
-}
-EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed);
-
-void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- const u8 *mac_addr)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- set_bit(MT76_SCANNING, &dev->mt76.state);
-}
-EXPORT_SYMBOL_GPL(mt76x0_sw_scan);
-
-void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- clear_bit(MT76_SCANNING, &dev->mt76.state);
-}
-EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete);
-
-int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
index 2187bafaf2e9..46629f61673b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h
@@ -28,18 +28,26 @@
#include "../mt76x02.h"
#include "eeprom.h"
-#define MT_CALIBRATE_INTERVAL (4 * HZ)
+#define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
+#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
+
+#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
#define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */
#define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */
static inline bool is_mt7610e(struct mt76x02_dev *dev)
{
- /* TODO */
- return false;
+ if (!mt76_is_mmio(dev))
+ return false;
+
+ return mt76_chip(&dev->mt76) == 0x7610;
}
-void mt76x0_init_debugfs(struct mt76x02_dev *dev);
+static inline bool is_mt7630(struct mt76x02_dev *dev)
+{
+ return mt76_chip(&dev->mt76) == 0x7630;
+}
/* Init */
struct mt76x02_dev *
@@ -54,30 +62,12 @@ int mt76x0_mac_start(struct mt76x02_dev *dev);
void mt76x0_mac_stop(struct mt76x02_dev *dev);
int mt76x0_config(struct ieee80211_hw *hw, u32 changed);
-void mt76x0_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed);
-void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- const u8 *mac_addr);
-void mt76x0_sw_scan_complete(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
/* PHY */
void mt76x0_phy_init(struct mt76x02_dev *dev);
-int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev);
+int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev);
int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev);
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev);
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on);
-
-/* MAC */
-void mt76x0_mac_work(struct work_struct *work);
-void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot,
- int ht_mode);
-void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb);
-void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval);
-void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev);
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 522c86059bcb..d895b6f3dc44 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -68,6 +68,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
mutex_unlock(&dev->mt76.mutex);
}
+static void
+mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+}
+
+static int
+mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ return 0;
+}
+
static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx,
.start = mt76x0e_start,
@@ -76,15 +89,22 @@ static const struct ieee80211_ops mt76x0e_ops = {
.remove_interface = mt76x02_remove_interface,
.config = mt76x0_config,
.configure_filter = mt76x02_configure_filter,
- .sta_add = mt76x02_sta_add,
- .sta_remove = mt76x02_sta_remove,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
- .sw_scan_start = mt76x0_sw_scan,
- .sw_scan_complete = mt76x0_sw_scan_complete,
+ .sw_scan_start = mt76x02_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
.ampdu_action = mt76x02_ampdu_action,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.wake_tx_queue = mt76_wake_tx_queue,
+ .get_survey = mt76_get_survey,
+ .get_txpower = mt76x02_get_txpower,
+ .flush = mt76x0e_flush,
+ .set_tim = mt76x0e_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .set_coverage_class = mt76x02_set_coverage_class,
+ .set_rts_threshold = mt76x02_set_rts_threshold,
};
static int mt76x0e_register_device(struct mt76x02_dev *dev)
@@ -135,10 +155,14 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
int ret;
@@ -185,6 +209,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+ tasklet_disable(&dev->pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev);
@@ -209,6 +234,8 @@ static const struct pci_device_id mt76x0e_device_table[] = {
};
MODULE_DEVICE_TABLE(pci, mt76x0e_device_table);
+MODULE_FIRMWARE(MT7610E_FIRMWARE);
+MODULE_FIRMWARE(MT7650E_FIRMWARE);
MODULE_LICENSE("Dual BSD/GPL");
static struct pci_driver mt76x0e_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
index 569861289aa5..490c1869f2c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c
@@ -19,9 +19,6 @@
#include "mt76x0.h"
#include "mcu.h"
-#define MT7610E_FIRMWARE "mediatek/mt7610e.bin"
-#define MT7650E_FIRMWARE "mediatek/mt7650e.bin"
-
#define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE)
static int mt76x0e_load_firmware(struct mt76x02_dev *dev)
@@ -130,7 +127,6 @@ out:
int mt76x0e_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x0e_mcu_ops = {
- .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index cf024950e0ed..1eb1a802ed20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -20,7 +20,6 @@
#include "mt76x0.h"
#include "mcu.h"
#include "eeprom.h"
-#include "trace.h"
#include "phy.h"
#include "initvals.h"
#include "initvals_phy.h"
@@ -49,12 +48,12 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value)
}
mt76_wr(dev, MT_RF_CSR_CFG,
- FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
- FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
- FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
- MT_RF_CSR_CFG_WR |
- MT_RF_CSR_CFG_KICK);
- trace_mt76x0_rf_write(&dev->mt76, bank, offset, value);
+ FIELD_PREP(MT_RF_CSR_CFG_DATA, value) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_WR |
+ MT_RF_CSR_CFG_KICK);
+
out:
mutex_unlock(&dev->phy_mutex);
@@ -86,19 +85,18 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset)
goto out;
mt76_wr(dev, MT_RF_CSR_CFG,
- FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
- FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
- MT_RF_CSR_CFG_KICK);
+ FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) |
+ FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) |
+ MT_RF_CSR_CFG_KICK);
if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100))
goto out;
val = mt76_rr(dev, MT_RF_CSR_CFG);
if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg &&
- FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) {
+ FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank)
ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val);
- trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret);
- }
+
out:
mutex_unlock(&dev->phy_mutex);
@@ -110,7 +108,7 @@ out:
}
static int
-rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
+mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
{
if (mt76_is_usb(dev)) {
struct mt76_reg_pair pair = {
@@ -126,8 +124,7 @@ rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val)
}
}
-static int
-rf_rr(struct mt76x02_dev *dev, u32 offset)
+static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset)
{
int ret;
u32 val;
@@ -149,38 +146,36 @@ rf_rr(struct mt76x02_dev *dev, u32 offset)
}
static int
-rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
+mt76x0_rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val)
{
int ret;
- ret = rf_rr(dev, offset);
+ ret = mt76x0_rf_rr(dev, offset);
if (ret < 0)
return ret;
+
val |= ret & ~mask;
- ret = rf_wr(dev, offset, val);
- if (ret)
- return ret;
- return val;
+ ret = mt76x0_rf_wr(dev, offset, val);
+ return ret ? ret : val;
}
static int
-rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
+mt76x0_rf_set(struct mt76x02_dev *dev, u32 offset, u8 val)
{
- return rf_rmw(dev, offset, 0, val);
+ return mt76x0_rf_rmw(dev, offset, 0, val);
}
-#if 0
static int
-rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
+mt76x0_rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask)
{
- return rf_rmw(dev, offset, mask, 0);
+ return mt76x0_rf_rmw(dev, offset, mask, 0);
}
-#endif
static void
-mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
- int n)
+mt76x0_phy_rf_csr_wr_rp(struct mt76x02_dev *dev,
+ const struct mt76_reg_pair *data,
+ int n)
{
while (n-- > 0) {
mt76x0_rf_csr_wr(dev, data->reg, data->value);
@@ -190,12 +185,12 @@ mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data,
#define RF_RANDOM_WRITE(dev, tab) do { \
if (mt76_is_mmio(dev)) \
- mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
+ mt76x0_phy_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \
else \
mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\
} while (0)
-int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
+int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev)
{
int i = 20;
u32 val;
@@ -215,62 +210,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev)
return 0;
}
-static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel)
-{
- u8 val;
-
- val = rf_rr(dev, MT_RF(0, 4));
- if ((val & 0x70) != 0x30)
- return;
-
- /*
- * Calibration Mode - Open loop, closed loop, and amplitude:
- * B0.R06.[0]: 1
- * B0.R06.[3:1] bp_close_code: 100
- * B0.R05.[7:0] bp_open_code: 0x0
- * B0.R04.[2:0] cal_bits: 000
- * B0.R03.[2:0] startup_time: 011
- * B0.R03.[6:4] settle_time:
- * 80MHz channel: 110
- * 40MHz channel: 101
- * 20MHz channel: 100
- */
- val = rf_rr(dev, MT_RF(0, 6));
- val &= ~0xf;
- val |= 0x09;
- rf_wr(dev, MT_RF(0, 6), val);
-
- val = rf_rr(dev, MT_RF(0, 5));
- if (val != 0)
- rf_wr(dev, MT_RF(0, 5), 0x0);
-
- val = rf_rr(dev, MT_RF(0, 4));
- val &= ~0x07;
- rf_wr(dev, MT_RF(0, 4), val);
-
- val = rf_rr(dev, MT_RF(0, 3));
- val &= ~0x77;
- if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) {
- val |= 0x63;
- } else if (channel == 3 || channel == 4 || channel == 10) {
- val |= 0x53;
- } else if (channel == 2 || channel == 5 || channel == 6 ||
- channel == 8 || channel == 11 || channel == 12) {
- val |= 0x43;
- } else {
- WARN(1, "Unknown channel %u\n", channel);
- return;
- }
- rf_wr(dev, MT_RF(0, 3), val);
-
- /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */
- val = rf_rr(dev, MT_RF(0, 4));
- val = ((val & ~(0x80)) | 0x80);
- rf_wr(dev, MT_RF(0, 4), val);
-
- msleep(2);
-}
-
static void
mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
{
@@ -278,8 +217,8 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
case NL80211_BAND_2GHZ:
RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
- rf_wr(dev, MT_RF(5, 0), 0x45);
- rf_wr(dev, MT_RF(6, 0), 0x44);
+ mt76x0_rf_wr(dev, MT_RF(5, 0), 0x45);
+ mt76x0_rf_wr(dev, MT_RF(6, 0), 0x44);
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002);
@@ -287,8 +226,8 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
case NL80211_BAND_5GHZ:
RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
- rf_wr(dev, MT_RF(5, 0), 0x44);
- rf_wr(dev, MT_RF(6, 0), 0x45);
+ mt76x0_rf_wr(dev, MT_RF(5, 0), 0x44);
+ mt76x0_rf_wr(dev, MT_RF(6, 0), 0x45);
mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005);
mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102);
@@ -301,18 +240,17 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band)
static void
mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band)
{
+ const struct mt76x0_freq_item *freq_item;
u16 rf_band = rf_bw_band & 0xff00;
u16 rf_bw = rf_bw_band & 0x00ff;
enum nl80211_band band;
+ bool b_sdm = false;
u32 mac_reg;
- u8 rf_val;
int i;
- bool bSDM = false;
- const struct mt76x0_freq_item *freq_item;
for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) {
if (channel == mt76x0_sdm_channel[i]) {
- bSDM = true;
+ b_sdm = true;
break;
}
}
@@ -321,108 +259,84 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
if (channel == mt76x0_frequency_plan[i].channel) {
rf_band = mt76x0_frequency_plan[i].band;
- if (bSDM)
+ if (b_sdm)
freq_item = &(mt76x0_sdm_frequency_plan[i]);
else
freq_item = &(mt76x0_frequency_plan[i]);
- rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
- rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
- rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
- rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
- rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
+ mt76x0_rf_wr(dev, MT_RF(0, 37), freq_item->pllR37);
+ mt76x0_rf_wr(dev, MT_RF(0, 36), freq_item->pllR36);
+ mt76x0_rf_wr(dev, MT_RF(0, 35), freq_item->pllR35);
+ mt76x0_rf_wr(dev, MT_RF(0, 34), freq_item->pllR34);
+ mt76x0_rf_wr(dev, MT_RF(0, 33), freq_item->pllR33);
- rf_val = rf_rr(dev, MT_RF(0, 32));
- rf_val &= ~0xE0;
- rf_val |= freq_item->pllR32_b7b5;
- rf_wr(dev, MT_RF(0, 32), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 32), 0xe0,
+ freq_item->pllR32_b7b5);
/* R32<4:0> pll_den: (Denomina - 8) */
- rf_val = rf_rr(dev, MT_RF(0, 32));
- rf_val &= ~0x1F;
- rf_val |= freq_item->pllR32_b4b0;
- rf_wr(dev, MT_RF(0, 32), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 32), MT_RF_PLL_DEN_MASK,
+ freq_item->pllR32_b4b0);
/* R31<7:5> */
- rf_val = rf_rr(dev, MT_RF(0, 31));
- rf_val &= ~0xE0;
- rf_val |= freq_item->pllR31_b7b5;
- rf_wr(dev, MT_RF(0, 31), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 31), 0xe0,
+ freq_item->pllR31_b7b5);
/* R31<4:0> pll_k(Nominator) */
- rf_val = rf_rr(dev, MT_RF(0, 31));
- rf_val &= ~0x1F;
- rf_val |= freq_item->pllR31_b4b0;
- rf_wr(dev, MT_RF(0, 31), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 31), MT_RF_PLL_K_MASK,
+ freq_item->pllR31_b4b0);
/* R30<7> sdm_reset_n */
- rf_val = rf_rr(dev, MT_RF(0, 30));
- rf_val &= ~0x80;
- if (bSDM) {
- rf_wr(dev, MT_RF(0, 30), rf_val);
- rf_val |= 0x80;
- rf_wr(dev, MT_RF(0, 30), rf_val);
+ if (b_sdm) {
+ mt76x0_rf_clear(dev, MT_RF(0, 30),
+ MT_RF_SDM_RESET_MASK);
+ mt76x0_rf_set(dev, MT_RF(0, 30),
+ MT_RF_SDM_RESET_MASK);
} else {
- rf_val |= freq_item->pllR30_b7;
- rf_wr(dev, MT_RF(0, 30), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 30),
+ MT_RF_SDM_RESET_MASK,
+ freq_item->pllR30_b7);
}
/* R30<6:2> sdmmash_prbs,sin */
- rf_val = rf_rr(dev, MT_RF(0, 30));
- rf_val &= ~0x7C;
- rf_val |= freq_item->pllR30_b6b2;
- rf_wr(dev, MT_RF(0, 30), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 30),
+ MT_RF_SDM_MASH_PRBS_MASK,
+ freq_item->pllR30_b6b2);
/* R30<1> sdm_bp */
- rf_val = rf_rr(dev, MT_RF(0, 30));
- rf_val &= ~0x02;
- rf_val |= (freq_item->pllR30_b1 << 1);
- rf_wr(dev, MT_RF(0, 30), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 30), MT_RF_SDM_BP_MASK,
+ freq_item->pllR30_b1 << 1);
/* R30<0> R29<7:0> (hex) pll_n */
- rf_val = freq_item->pll_n & 0x00FF;
- rf_wr(dev, MT_RF(0, 29), rf_val);
+ mt76x0_rf_wr(dev, MT_RF(0, 29),
+ freq_item->pll_n & 0xff);
- rf_val = rf_rr(dev, MT_RF(0, 30));
- rf_val &= ~0x1;
- rf_val |= ((freq_item->pll_n >> 8) & 0x0001);
- rf_wr(dev, MT_RF(0, 30), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 30), 0x1,
+ (freq_item->pll_n >> 8) & 0x1);
/* R28<7:6> isi_iso */
- rf_val = rf_rr(dev, MT_RF(0, 28));
- rf_val &= ~0xC0;
- rf_val |= freq_item->pllR28_b7b6;
- rf_wr(dev, MT_RF(0, 28), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_ISI_ISO_MASK,
+ freq_item->pllR28_b7b6);
/* R28<5:4> pfd_dly */
- rf_val = rf_rr(dev, MT_RF(0, 28));
- rf_val &= ~0x30;
- rf_val |= freq_item->pllR28_b5b4;
- rf_wr(dev, MT_RF(0, 28), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_PFD_DLY_MASK,
+ freq_item->pllR28_b5b4);
/* R28<3:2> clksel option */
- rf_val = rf_rr(dev, MT_RF(0, 28));
- rf_val &= ~0x0C;
- rf_val |= freq_item->pllR28_b3b2;
- rf_wr(dev, MT_RF(0, 28), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_CLK_SEL_MASK,
+ freq_item->pllR28_b3b2);
/* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */
- rf_val = freq_item->pll_sdm_k & 0x000000FF;
- rf_wr(dev, MT_RF(0, 26), rf_val);
-
- rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF);
- rf_wr(dev, MT_RF(0, 27), rf_val);
+ mt76x0_rf_wr(dev, MT_RF(0, 26),
+ freq_item->pll_sdm_k & 0xff);
+ mt76x0_rf_wr(dev, MT_RF(0, 27),
+ (freq_item->pll_sdm_k >> 8) & 0xff);
- rf_val = rf_rr(dev, MT_RF(0, 28));
- rf_val &= ~0x3;
- rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003);
- rf_wr(dev, MT_RF(0, 28), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 28), 0x3,
+ (freq_item->pll_sdm_k >> 16) & 0x3);
/* R24<1:0> xo_div */
- rf_val = rf_rr(dev, MT_RF(0, 24));
- rf_val &= ~0x3;
- rf_val |= freq_item->pllR24_b1b0;
- rf_wr(dev, MT_RF(0, 24), rf_val);
+ mt76x0_rf_rmw(dev, MT_RF(0, 24), MT_RF_XO_DIV_MASK,
+ freq_item->pllR24_b1b0);
break;
}
@@ -430,25 +344,26 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) {
if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) {
- rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
- mt76x0_rf_bw_switch_tab[i].value);
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
} else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) &&
(rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) {
- rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
- mt76x0_rf_bw_switch_tab[i].value);
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_bw_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_bw_switch_tab[i].value);
}
}
for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) {
- rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg,
- mt76x0_rf_band_switch_tab[i].value);
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
}
}
- mac_reg = mt76_rr(dev, MT_RF_MISC);
- mac_reg &= ~0xC; /* Clear 0x518[3:2] */
- mt76_wr(dev, MT_RF_MISC, mac_reg);
+ mt76_clear(dev, MT_RF_MISC, 0xc);
band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
if (mt76x02_ext_pa_enabled(dev, band)) {
@@ -457,21 +372,17 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban
[2]1'b1: enable external A band PA, 1'b0: disable external A band PA
[3]1'b1: enable external G band PA, 1'b0: disable external G band PA
*/
- if (rf_band & RF_A_BAND) {
- mac_reg = mt76_rr(dev, MT_RF_MISC);
- mac_reg |= 0x4;
- mt76_wr(dev, MT_RF_MISC, mac_reg);
- } else {
- mac_reg = mt76_rr(dev, MT_RF_MISC);
- mac_reg |= 0x8;
- mt76_wr(dev, MT_RF_MISC, mac_reg);
- }
+ if (rf_band & RF_A_BAND)
+ mt76_set(dev, MT_RF_MISC, BIT(2));
+ else
+ mt76_set(dev, MT_RF_MISC, BIT(3));
/* External PA */
for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++)
if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band)
- rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
- mt76x0_rf_ext_pa_tab[i].value);
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_ext_pa_tab[i].rf_bank_reg,
+ mt76x0_rf_ext_pa_tab[i].value);
}
if (rf_band & RF_G_BAND) {
@@ -516,27 +427,53 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
}
}
-static void mt76x0_ant_select(struct mt76x02_dev *dev)
+static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
{
- struct ieee80211_channel *chan = dev->mt76.chandef.chan;
-
- /* single antenna mode */
- if (chan->band == NL80211_BAND_2GHZ) {
- mt76_rmw(dev, MT_COEXCFG3,
- BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1));
- mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6));
+ u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
+ u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
+ u32 wlan, coex3, cmb;
+ bool ant_div;
+
+ wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
+ cmb = mt76_rr(dev, MT_CMB_CTRL);
+ coex3 = mt76_rr(dev, MT_COEXCFG3);
+
+ cmb &= ~(BIT(14) | BIT(12));
+ wlan &= ~(BIT(6) | BIT(5));
+ coex3 &= ~GENMASK(5, 2);
+
+ if (ee_ant & MT_EE_ANTENNA_DUAL) {
+ /* dual antenna mode */
+ ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
+ (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
+ if (ant_div)
+ cmb |= BIT(12);
+ else
+ coex3 |= BIT(4);
+ coex3 |= BIT(3);
+ if (dev->mt76.cap.has_2ghz)
+ wlan |= BIT(6);
} else {
- mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2),
- BIT(4) | BIT(3));
- mt76_clear(dev, MT_WLAN_FUN_CTRL,
- BIT(6) | BIT(5));
+ /* sigle antenna mode */
+ if (dev->mt76.cap.has_5ghz) {
+ coex3 |= BIT(3) | BIT(4);
+ } else {
+ wlan |= BIT(6);
+ coex3 |= BIT(1);
+ }
}
- mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12));
+
+ if (is_mt7630(dev))
+ cmb |= BIT(14) | BIT(11);
+
+ mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
+ mt76_wr(dev, MT_CMB_CTRL, cmb);
mt76_clear(dev, MT_COEXCFG0, BIT(2));
+ mt76_wr(dev, MT_COEXCFG3, coex3);
}
static void
-mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
+mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
{
enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4};
int bw;
@@ -563,7 +500,346 @@ mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width)
return ;
}
- mt76x02_mcu_function_select(dev, BW_SETTING, bw, false);
+ mt76x02_mcu_function_select(dev, BW_SETTING, bw);
+}
+
+static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 val;
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ mt76x0_rf_clear(dev, MT_RF(0, 67), 0xf);
+
+ /* bypass ADDA control */
+ mt76_wr(dev, MT_RF_SETTING_0, 0x60002237);
+ mt76_wr(dev, MT_RF_BYPASS_0, 0xffffffff);
+
+ /* bbp sw reset */
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ usleep_range(500, 1000);
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+
+ val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+
+ /* enable TX with DAC0 input */
+ mt76_wr(dev, MT_BBP(TXBE, 6), BIT(31));
+
+ mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200);
+ dev->cal.tssi_dc = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ /* stop bypass ADDA */
+ mt76_wr(dev, MT_RF_BYPASS_0, 0);
+ /* stop TX */
+ mt76_wr(dev, MT_BBP(TXBE, 6), 0);
+ /* bbp sw reset */
+ mt76_set(dev, MT_BBP(CORE, 4), BIT(0));
+ usleep_range(500, 1000);
+ mt76_clear(dev, MT_BBP(CORE, 4), BIT(0));
+
+ if (chan->band == NL80211_BAND_5GHZ)
+ mt76x0_rf_rmw(dev, MT_RF(0, 67), 0xf, 0x4);
+}
+
+static int
+mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi,
+ u8 *info)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ u32 val;
+
+ val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050;
+ mt76_wr(dev, MT_BBP(CORE, 34), val);
+
+ if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) {
+ mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
+ return -ETIMEDOUT;
+ }
+
+ *ltssi = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+ if (chan->band == NL80211_BAND_5GHZ)
+ *ltssi += 128;
+
+ /* set packet info#1 mode */
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x80041);
+ info[0] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ /* set packet info#2 mode */
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x80042);
+ info[1] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ /* set packet info#3 mode */
+ mt76_wr(dev, MT_BBP(CORE, 34), 0x80043);
+ info[2] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff;
+
+ return 0;
+}
+
+static u8 mt76x0_phy_get_rf_pa_mode(struct mt76x02_dev *dev,
+ int index, u8 tx_rate)
+{
+ u32 val, reg;
+
+ reg = (index == 1) ? MT_RF_PA_MODE_CFG1 : MT_RF_PA_MODE_CFG0;
+ val = mt76_rr(dev, reg);
+ return (val & (3 << (tx_rate * 2))) >> (tx_rate * 2);
+}
+
+static int
+mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
+ u8 *info, s8 *target_power,
+ s8 *target_pa_power)
+{
+ u8 tx_rate, cur_power;
+
+ cur_power = mt76_rr(dev, MT_TX_ALC_CFG_0) & MT_TX_ALC_CFG_0_CH_INIT_0;
+ switch (tx_mode) {
+ case 0:
+ /* cck rates */
+ tx_rate = (info[0] & 0x60) >> 5;
+ if (tx_rate > 3)
+ return -EINVAL;
+
+ *target_power = cur_power + dev->mt76.rate_power.cck[tx_rate];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, tx_rate);
+ break;
+ case 1: {
+ u8 index;
+
+ /* ofdm rates */
+ tx_rate = (info[0] & 0xf0) >> 4;
+ switch (tx_rate) {
+ case 0xb:
+ index = 0;
+ break;
+ case 0xf:
+ index = 1;
+ break;
+ case 0xa:
+ index = 2;
+ break;
+ case 0xe:
+ index = 3;
+ break;
+ case 0x9:
+ index = 4;
+ break;
+ case 0xd:
+ index = 5;
+ break;
+ case 0x8:
+ index = 6;
+ break;
+ case 0xc:
+ index = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *target_power = cur_power + dev->mt76.rate_power.ofdm[index];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, index + 4);
+ break;
+ }
+ case 4:
+ /* vht rates */
+ tx_rate = info[1] & 0xf;
+ if (tx_rate > 9)
+ return -EINVAL;
+
+ *target_power = cur_power + dev->mt76.rate_power.vht[tx_rate];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+ break;
+ default:
+ /* ht rates */
+ tx_rate = info[1] & 0x7f;
+ if (tx_rate > 9)
+ return -EINVAL;
+
+ *target_power = cur_power + dev->mt76.rate_power.ht[tx_rate];
+ *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
+ break;
+ }
+
+ return 0;
+}
+
+static s16 mt76x0_phy_lin2db(u16 val)
+{
+ u32 mantissa = val << 4;
+ int ret, data;
+ s16 exp = -4;
+
+ while (mantissa < BIT(15)) {
+ mantissa <<= 1;
+ if (--exp < -20)
+ return -10000;
+ }
+ while (mantissa > 0xffff) {
+ mantissa >>= 1;
+ if (++exp > 20)
+ return -10000;
+ }
+
+ /* s(15,0) */
+ if (mantissa <= 47104)
+ data = mantissa + (mantissa >> 3) + (mantissa >> 4) - 38400;
+ else
+ data = mantissa - (mantissa >> 3) - (mantissa >> 6) - 23040;
+ data = max_t(int, 0, data);
+
+ ret = ((15 + exp) << 15) + data;
+ ret = (ret << 2) + (ret << 1) + (ret >> 6) + (ret >> 7);
+ return ret >> 10;
+}
+
+static int
+mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
+ s8 target_power, s8 target_pa_power,
+ s16 ltssi)
+{
+ struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ int tssi_target = target_power << 12, tssi_slope;
+ int tssi_offset, tssi_db, ret;
+ u32 data;
+ u16 val;
+
+ if (chan->band == NL80211_BAND_5GHZ) {
+ u8 bound[7];
+ int i, err;
+
+ err = mt76x02_eeprom_copy(dev, MT_EE_TSSI_BOUND1, bound,
+ sizeof(bound));
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++) {
+ if (chan->hw_value <= bound[i] || !bound[i])
+ break;
+ }
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_5G + i * 2);
+
+ tssi_offset = val >> 8;
+ if ((tssi_offset >= 64 && tssi_offset <= 127) ||
+ (tssi_offset & BIT(7)))
+ tssi_offset -= BIT(8);
+ } else {
+ val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_2G);
+
+ tssi_offset = val >> 8;
+ if (tssi_offset & BIT(7))
+ tssi_offset -= BIT(8);
+ }
+ tssi_slope = val & 0xff;
+
+ switch (target_pa_power) {
+ case 1:
+ if (chan->band == NL80211_BAND_2GHZ)
+ tssi_target += 29491; /* 3.6 * 8192 */
+ /* fall through */
+ case 0:
+ break;
+ default:
+ tssi_target += 4424; /* 0.54 * 8192 */
+ break;
+ }
+
+ if (!tx_mode) {
+ data = mt76_rr(dev, MT_BBP(CORE, 1));
+ if (is_mt7630(dev) && mt76_is_mmio(dev)) {
+ int offset;
+
+ /* 2.3 * 8192 or 1.5 * 8192 */
+ offset = (data & BIT(5)) ? 18841 : 12288;
+ tssi_target += offset;
+ } else if (data & BIT(5)) {
+ /* 0.8 * 8192 */
+ tssi_target += 6554;
+ }
+ }
+
+ data = mt76_rr(dev, MT_BBP(TXBE, 4));
+ switch (data & 0x3) {
+ case 1:
+ tssi_target -= 49152; /* -6db * 8192 */
+ break;
+ case 2:
+ tssi_target -= 98304; /* -12db * 8192 */
+ break;
+ case 3:
+ tssi_target += 49152; /* 6db * 8192 */
+ break;
+ default:
+ break;
+ }
+
+ tssi_db = mt76x0_phy_lin2db(ltssi - dev->cal.tssi_dc) * tssi_slope;
+ if (chan->band == NL80211_BAND_5GHZ) {
+ tssi_db += ((tssi_offset - 50) << 10); /* offset s4.3 */
+ tssi_target -= tssi_db;
+ if (ltssi > 254 && tssi_target > 0) {
+ /* upper saturate */
+ tssi_target = 0;
+ }
+ } else {
+ tssi_db += (tssi_offset << 9); /* offset s3.4 */
+ tssi_target -= tssi_db;
+ /* upper-lower saturate */
+ if ((ltssi > 126 && tssi_target > 0) ||
+ ((ltssi - dev->cal.tssi_dc) < 1 && tssi_target < 0)) {
+ tssi_target = 0;
+ }
+ }
+
+ if ((dev->cal.tssi_target ^ tssi_target) < 0 &&
+ dev->cal.tssi_target > -4096 && dev->cal.tssi_target < 4096 &&
+ tssi_target > -4096 && tssi_target < 4096) {
+ if ((tssi_target < 0 &&
+ tssi_target + dev->cal.tssi_target > 0) ||
+ (tssi_target > 0 &&
+ tssi_target + dev->cal.tssi_target <= 0))
+ tssi_target = 0;
+ else
+ dev->cal.tssi_target = tssi_target;
+ } else {
+ dev->cal.tssi_target = tssi_target;
+ }
+
+ /* make the compensate value to the nearest compensate code */
+ if (tssi_target > 0)
+ tssi_target += 2048;
+ else
+ tssi_target -= 2048;
+ tssi_target >>= 12;
+
+ ret = mt76_get_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP);
+ if (ret & BIT(5))
+ ret -= BIT(6);
+ ret += tssi_target;
+
+ ret = min_t(int, 31, ret);
+ return max_t(int, -32, ret);
+}
+
+static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
+{
+ s8 target_power, target_pa_power;
+ u8 tssi_info[3], tx_mode;
+ s16 ltssi;
+ s8 val;
+
+ if (mt76x0_phy_tssi_adc_calibrate(dev, &ltssi, tssi_info) < 0)
+ return;
+
+ tx_mode = tssi_info[0] & 0x7;
+ if (mt76x0_phy_get_target_power(dev, tx_mode, tssi_info,
+ &target_power, &target_pa_power) < 0)
+ return;
+
+ val = mt76x0_phy_get_delta_power(dev, tx_mode, target_power,
+ target_pa_power, ltssi);
+ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, val);
}
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
@@ -571,8 +847,8 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
struct mt76_rate_power *t = &dev->mt76.rate_power;
u8 info[2];
- mt76x0_get_power_info(dev, info);
mt76x0_get_tx_power_per_rate(dev);
+ mt76x0_get_power_info(dev, info);
mt76x02_add_rate_power_offset(t, info[0]);
mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
@@ -585,14 +861,25 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
+ int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
u32 val, tx_alc, reg_val;
+ if (is_mt7630(dev))
+ return;
+
if (power_on) {
- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value,
- false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value);
usleep_range(10, 20);
- /* XXX: tssi */
+
+ if (mt76x0_tssi_enabled(dev)) {
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ mt76x0_phy_tssi_dc_calibrate(dev);
+ mt76_wr(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX |
+ MT_MAC_SYS_CTRL_ENABLE_RX);
+ }
}
tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
@@ -602,7 +889,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
- if (chan->band == NL80211_BAND_5GHZ) {
+ if (is_5ghz) {
if (chan->hw_value < 100)
val = 0x701;
else if (chan->hw_value < 140)
@@ -613,14 +900,14 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
val = 0x600;
}
- mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val);
msleep(350);
- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz);
usleep_range(15000, 20000);
mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1);
}
EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate);
@@ -684,7 +971,7 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
}
if (mt76_is_usb(dev)) {
- mt76x0_bbp_set_bw(dev, chandef->width);
+ mt76x0_phy_bbp_set_bw(dev, chandef->width);
} else {
if (chandef->width == NL80211_CHAN_WIDTH_80 ||
chandef->width == NL80211_CHAN_WIDTH_40)
@@ -696,7 +983,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
mt76x02_phy_set_bw(dev, chandef->width, ch_group_index);
mt76x02_phy_set_band(dev, chandef->chan->band,
ch_group_index & 1);
- mt76x0_ant_select(dev);
mt76_rmw(dev, MT_EXT_CCA_CFG,
(MT_EXT_CCA_CFG_CCA0 |
@@ -710,29 +996,21 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band);
/* set Japan Tx filter at channel 14 */
- val = mt76_rr(dev, MT_BBP(CORE, 1));
if (channel == 14)
- val |= 0x20;
+ mt76_set(dev, MT_BBP(CORE, 1), 0x20);
else
- val &= ~0x20;
- mt76_wr(dev, MT_BBP(CORE, 1), val);
+ mt76_clear(dev, MT_BBP(CORE, 1), 0x20);
mt76x0_read_rx_gain(dev);
mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band);
- mt76x02_init_agc_gain(dev);
-
- if (mt76_is_usb(dev)) {
- mt76x0_vco_cal(dev, channel);
- } else {
- /* enable vco */
- rf_set(dev, MT_RF(0, 4), BIT(7));
- }
+ /* enable vco */
+ mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7));
if (scan)
return 0;
- if (mt76_is_mmio(dev))
- mt76x0_phy_calibrate(dev, false);
+ mt76x02_init_agc_gain(dev);
+ mt76x0_phy_calibrate(dev, false);
mt76x0_phy_set_txpower(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@@ -741,55 +1019,21 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev,
return 0;
}
-void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev)
-{
- u32 tx_alc, reg_val;
- u8 channel = dev->mt76.chandef.chan->hw_value;
- int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0;
-
- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
-
- mt76x0_vco_cal(dev, channel);
-
- tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0);
- mt76_wr(dev, MT_TX_ALC_CFG_0, 0);
- usleep_range(500, 700);
-
- reg_val = mt76_rr(dev, MT_BBP(IBI, 9));
- mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e);
-
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false);
-
- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false);
-
- mt76_wr(dev, MT_BBP(IBI, 9), reg_val);
- mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc);
- msleep(100);
-
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false);
-}
-
-static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
+static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev)
{
u8 rf_b7_73, rf_b0_66, rf_b0_67;
s8 val;
- rf_b7_73 = rf_rr(dev, MT_RF(7, 73));
- rf_b0_66 = rf_rr(dev, MT_RF(0, 66));
- rf_b0_67 = rf_rr(dev, MT_RF(0, 67));
+ rf_b7_73 = mt76x0_rf_rr(dev, MT_RF(7, 73));
+ rf_b0_66 = mt76x0_rf_rr(dev, MT_RF(0, 66));
+ rf_b0_67 = mt76x0_rf_rr(dev, MT_RF(0, 67));
- rf_wr(dev, MT_RF(7, 73), 0x02);
- rf_wr(dev, MT_RF(0, 66), 0x23);
- rf_wr(dev, MT_RF(0, 67), 0x01);
+ mt76x0_rf_wr(dev, MT_RF(7, 73), 0x02);
+ mt76x0_rf_wr(dev, MT_RF(0, 66), 0x23);
+ mt76x0_rf_wr(dev, MT_RF(0, 67), 0x01);
mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055);
-
- if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) {
+ if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) {
mt76_clear(dev, MT_BBP(CORE, 34), BIT(4));
goto done;
}
@@ -799,8 +1043,7 @@ static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
if (abs(val - dev->cal.temp_vco) > 20) {
mt76x02_mcu_calibrate(dev, MCU_CAL_VCO,
- dev->mt76.chandef.chan->hw_value,
- false);
+ dev->mt76.chandef.chan->hw_value);
dev->cal.temp_vco = val;
}
if (abs(val - dev->cal.temp) > 30) {
@@ -809,18 +1052,20 @@ static void mt76x0_temp_sensor(struct mt76x02_dev *dev)
}
done:
- rf_wr(dev, MT_RF(7, 73), rf_b7_73);
- rf_wr(dev, MT_RF(0, 66), rf_b0_66);
- rf_wr(dev, MT_RF(0, 67), rf_b0_67);
+ mt76x0_rf_wr(dev, MT_RF(7, 73), rf_b7_73);
+ mt76x0_rf_wr(dev, MT_RF(0, 66), rf_b0_66);
+ mt76x0_rf_wr(dev, MT_RF(0, 67), rf_b0_67);
}
static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev)
{
u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
- u32 val = 0x122c << 16 | 0xf2;
- mt76_wr(dev, MT_BBP(AGC, 8),
- val | FIELD_PREP(MT_BBP_AGC_GAIN, gain));
+ mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain);
+
+ if ((dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) &&
+ !is_mt7630(dev))
+ mt76x02_phy_dfs_adjust_agc(dev);
}
static void
@@ -835,7 +1080,8 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev)
low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
(dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
- gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ gain_change = dev->cal.low_gain < 0 ||
+ (dev->cal.low_gain & 2) ^ (low_gain & 2);
dev->cal.low_gain = low_gain;
if (!gain_change) {
@@ -860,20 +1106,65 @@ static void mt76x0_phy_calibration_work(struct work_struct *work)
cal_work.work);
mt76x0_phy_update_channel_gain(dev);
- if (!mt76x0_tssi_enabled(dev))
- mt76x0_temp_sensor(dev);
+ if (mt76x0_tssi_enabled(dev))
+ mt76x0_phy_tssi_calibrate(dev);
+ else
+ mt76x0_phy_temp_sensor(dev);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
- MT_CALIBRATE_INTERVAL);
+ 4 * MT_CALIBRATE_INTERVAL);
}
-static void mt76x0_rf_init(struct mt76x02_dev *dev)
+static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev,
+ const struct mt76_reg_pair *rp, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ u32 reg = rp[i].reg;
+ u8 val = rp[i].value;
+
+ switch (reg) {
+ case MT_RF(0, 3):
+ if (mt76_is_mmio(dev)) {
+ if (is_mt7630(dev))
+ val = 0x70;
+ else
+ val = 0x63;
+ } else {
+ val = 0x73;
+ }
+ break;
+ case MT_RF(0, 21):
+ if (is_mt7610e(dev))
+ val = 0x10;
+ else
+ val = 0x12;
+ break;
+ case MT_RF(5, 2):
+ if (is_mt7630(dev))
+ val = 0x1d;
+ else if (is_mt7610e(dev))
+ val = 0x00;
+ else
+ val = 0x0c;
+ break;
+ default:
+ break;
+ }
+ mt76x0_rf_wr(dev, reg, val);
+ }
+}
+
+static void mt76x0_phy_rf_init(struct mt76x02_dev *dev)
{
int i;
u8 val;
- RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab);
- RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab);
+ mt76x0_rf_patch_reg_array(dev, mt76x0_rf_central_tab,
+ ARRAY_SIZE(mt76x0_rf_central_tab));
+ mt76x0_rf_patch_reg_array(dev, mt76x0_rf_2g_channel_0_tab,
+ ARRAY_SIZE(mt76x0_rf_2g_channel_0_tab));
RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab);
RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab);
@@ -881,16 +1172,16 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev)
const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i];
if (item->bw_band == RF_BW_20)
- rf_wr(dev, item->rf_bank_reg, item->value);
+ mt76x0_rf_wr(dev, item->rf_bank_reg, item->value);
else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20))
- rf_wr(dev, item->rf_bank_reg, item->value);
+ mt76x0_rf_wr(dev, item->rf_bank_reg, item->value);
}
for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) {
if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) {
- rf_wr(dev,
- mt76x0_rf_band_switch_tab[i].rf_bank_reg,
- mt76x0_rf_band_switch_tab[i].value);
+ mt76x0_rf_wr(dev,
+ mt76x0_rf_band_switch_tab[i].rf_bank_reg,
+ mt76x0_rf_band_switch_tab[i].value);
}
}
@@ -899,32 +1190,29 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev)
E1: B0.R22<6:0>: xo_cxo<6:0>
E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1>
*/
- rf_wr(dev, MT_RF(0, 22),
- min_t(u8, dev->cal.rx.freq_offset, 0xbf));
- val = rf_rr(dev, MT_RF(0, 22));
-
- /*
- Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up.
+ mt76x0_rf_wr(dev, MT_RF(0, 22),
+ min_t(u8, dev->cal.rx.freq_offset, 0xbf));
+ val = mt76x0_rf_rr(dev, MT_RF(0, 22));
+
+ /* Reset procedure DAC during power-up:
+ * - set B0.R73<7>
+ * - clear B0.R73<7>
+ * - set B0.R73<7>
*/
- val = rf_rr(dev, MT_RF(0, 73));
- val |= 0x80;
- rf_wr(dev, MT_RF(0, 73), val);
- val &= ~0x80;
- rf_wr(dev, MT_RF(0, 73), val);
- val |= 0x80;
- rf_wr(dev, MT_RF(0, 73), val);
+ mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7));
+ mt76x0_rf_clear(dev, MT_RF(0, 73), BIT(7));
+ mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7));
- /*
- vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration.
- */
- rf_set(dev, MT_RF(0, 4), 0x80);
+ /* vcocal_en: initiate VCO calibration (reset after completion)) */
+ mt76x0_rf_set(dev, MT_RF(0, 4), 0x80);
}
void mt76x0_phy_init(struct mt76x02_dev *dev)
{
INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work);
- mt76x0_rf_init(dev);
+ mt76x0_phy_ant_select(dev);
+ mt76x0_phy_rf_init(dev);
mt76x02_phy_set_rxpath(dev);
mt76x02_phy_set_txdac(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
index 2880a43c3cb0..9889132b768a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h
@@ -30,6 +30,23 @@
#define MT_RF_BANK(offset) (offset >> 16)
#define MT_RF_REG(offset) (offset & 0xff)
+#define MT_RF_VCO_BP_CLOSE_LOOP BIT(3)
+#define MT_RF_VCO_BP_CLOSE_LOOP_MASK GENMASK(3, 0)
+#define MT_RF_VCO_CAL_MASK GENMASK(2, 0)
+#define MT_RF_START_TIME 0x3
+#define MT_RF_START_TIME_MASK GENMASK(2, 0)
+#define MT_RF_SETTLE_TIME_MASK GENMASK(6, 4)
+
+#define MT_RF_PLL_DEN_MASK GENMASK(4, 0)
+#define MT_RF_PLL_K_MASK GENMASK(4, 0)
+#define MT_RF_SDM_RESET_MASK BIT(7)
+#define MT_RF_SDM_MASH_PRBS_MASK GENMASK(6, 2)
+#define MT_RF_SDM_BP_MASK BIT(1)
+#define MT_RF_ISI_ISO_MASK GENMASK(7, 6)
+#define MT_RF_PFD_DLY_MASK GENMASK(5, 4)
+#define MT_RF_CLK_SEL_MASK GENMASK(3, 2)
+#define MT_RF_XO_DIV_MASK GENMASK(1, 0)
+
struct mt76x0_bbp_switch_item {
u16 bw_band;
struct mt76_reg_pair reg_pair;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
deleted file mode 100644
index 8abdd3cd546d..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-
-#ifndef __CHECKER__
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
deleted file mode 100644
index 75d1d6738c34..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/*
- * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __MT76X0U_TRACE_H
-
-#include <linux/tracepoint.h>
-#include "mt76x0.h"
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mt76x0
-
-#define MAXNAME 32
-#define DEV_ENTRY __array(char, wiphy_name, 32)
-#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \
- wiphy_name(dev->hw->wiphy), MAXNAME)
-#define DEV_PR_FMT "%s "
-#define DEV_PR_ARG __entry->wiphy_name
-
-#define REG_ENTRY __field(u32, reg) __field(u32, val)
-#define REG_ASSIGN __entry->reg = reg; __entry->val = val
-#define REG_PR_FMT "%04x=%08x"
-#define REG_PR_ARG __entry->reg, __entry->val
-
-DECLARE_EVENT_CLASS(dev_reg_evt,
- TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
- TP_ARGS(dev, reg, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- REG_ENTRY
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- REG_ASSIGN;
- ),
- TP_printk(
- DEV_PR_FMT REG_PR_FMT,
- DEV_PR_ARG, REG_PR_ARG
- )
-);
-
-DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read,
- TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
- TP_ARGS(dev, reg, val)
-);
-
-DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write,
- TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val),
- TP_ARGS(dev, reg, val)
-);
-
-TRACE_EVENT(mt76x0_submit_urb,
- TP_PROTO(struct mt76_dev *dev, struct urb *u),
- TP_ARGS(dev, u),
- TP_STRUCT__entry(
- DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->pipe = u->pipe;
- __entry->len = u->transfer_buffer_length;
- ),
- TP_printk(DEV_PR_FMT "p:%08x len:%u",
- DEV_PR_ARG, __entry->pipe, __entry->len)
-);
-
-#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \
- struct urb u; \
- u.pipe = __pipe; \
- u.transfer_buffer_length = __len; \
- trace_mt76x0_submit_urb(__dev, &u); \
-})
-
-TRACE_EVENT(mt76x0_mcu_msg_send,
- TP_PROTO(struct mt76_dev *dev,
- struct sk_buff *skb, u32 csum, bool resp),
- TP_ARGS(dev, skb, csum, resp),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, info)
- __field(u32, csum)
- __field(bool, resp)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->info = *(u32 *)skb->data;
- __entry->csum = csum;
- __entry->resp = resp;
- ),
- TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d",
- DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp)
-);
-
-TRACE_EVENT(mt76x0_vend_req,
- TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type,
- u16 val, u16 offset, void *buf, size_t buflen, int ret),
- TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(unsigned, pipe) __field(u8, req) __field(u8, req_type)
- __field(u16, val) __field(u16, offset) __field(void*, buf)
- __field(int, buflen) __field(int, ret)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->pipe = pipe;
- __entry->req = req;
- __entry->req_type = req_type;
- __entry->val = val;
- __entry->offset = offset;
- __entry->buf = buf;
- __entry->buflen = buflen;
- __entry->ret = ret;
- ),
- TP_printk(DEV_PR_FMT
- "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d",
- DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req,
- __entry->req_type, __entry->val, __entry->offset,
- !!__entry->buf, __entry->buflen)
-);
-
-DECLARE_EVENT_CLASS(dev_rf_reg_evt,
- TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
- TP_ARGS(dev, bank, reg, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u8, bank)
- __field(u8, reg)
- __field(u8, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- REG_ASSIGN;
- __entry->bank = bank;
- ),
- TP_printk(
- DEV_PR_FMT "%02hhx:%02hhx=%02hhx",
- DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val
- )
-);
-
-DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read,
- TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
- TP_ARGS(dev, bank, reg, val)
-);
-
-DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write,
- TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val),
- TP_ARGS(dev, bank, reg, val)
-);
-
-DECLARE_EVENT_CLASS(dev_simple_evt,
- TP_PROTO(struct mt76_dev *dev, u8 val),
- TP_ARGS(dev, val),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u8, val)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->val = val;
- ),
- TP_printk(
- DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val
- )
-);
-
-TRACE_EVENT(mt76x0_rx,
- TP_PROTO(struct mt76_dev *dev, struct mt76x02_rxwi *rxwi, u32 f),
- TP_ARGS(dev, rxwi, f),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field_struct(struct mt76x02_rxwi, rxwi)
- __field(u32, fce_info)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->rxwi = *rxwi;
- __entry->fce_info = f;
- ),
- TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG,
- le32_to_cpu(__entry->rxwi.rxinfo),
- le32_to_cpu(__entry->rxwi.ctl))
-);
-
-TRACE_EVENT(mt76x0_tx,
- TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb,
- struct mt76x02_sta *sta, struct mt76x02_txwi *h),
- TP_ARGS(dev, skb, sta, h),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field_struct(struct mt76x02_txwi, h)
- __field(struct sk_buff *, skb)
- __field(struct mt76x02_sta *, sta)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->h = *h;
- __entry->skb = skb;
- __entry->sta = sta;
- ),
- TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate:%04hx "
- "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG,
- __entry->skb, __entry->sta,
- le16_to_cpu(__entry->h.flags),
- le16_to_cpu(__entry->h.rate),
- __entry->h.ack_ctl, __entry->h.wcid,
- le16_to_cpu(__entry->h.len_ctl))
-);
-
-TRACE_EVENT(mt76x0_tx_dma_done,
- TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb),
- TP_ARGS(dev, skb),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(struct sk_buff *, skb)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->skb = skb;
- ),
- TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb)
-);
-
-TRACE_EVENT(mt76x0_tx_status_cleaned,
- TP_PROTO(struct mt76_dev *dev, int cleaned),
- TP_ARGS(dev, cleaned),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(int, cleaned)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->cleaned = cleaned;
- ),
- TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned)
-);
-
-TRACE_EVENT(mt76x0_tx_status,
- TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2),
- TP_ARGS(dev, stat1, stat2),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u32, stat1) __field(u32, stat2)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->stat1 = stat1;
- __entry->stat2 = stat2;
- ),
- TP_printk(DEV_PR_FMT "%08x %08x",
- DEV_PR_ARG, __entry->stat1, __entry->stat2)
-);
-
-TRACE_EVENT(mt76x0_rx_dma_aggr,
- TP_PROTO(struct mt76_dev *dev, int cnt, bool paged),
- TP_ARGS(dev, cnt, paged),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u8, cnt)
- __field(bool, paged)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->cnt = cnt;
- __entry->paged = paged;
- ),
- TP_printk(DEV_PR_FMT "cnt:%d paged:%d",
- DEV_PR_ARG, __entry->cnt, __entry->paged)
-);
-
-DEFINE_EVENT(dev_simple_evt, mt76x0_set_key,
- TP_PROTO(struct mt76_dev *dev, u8 val),
- TP_ARGS(dev, val)
-);
-
-TRACE_EVENT(mt76x0_set_shared_key,
- TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key),
- TP_ARGS(dev, vid, key),
- TP_STRUCT__entry(
- DEV_ENTRY
- __field(u8, vid)
- __field(u8, key)
- ),
- TP_fast_assign(
- DEV_ASSIGN;
- __entry->vid = vid;
- __entry->key = key;
- ),
- TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx",
- DEV_PR_ARG, __entry->vid, __entry->key)
-);
-
-#endif
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
-
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index a7fd36c2f633..0e6b43bb4678 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -17,7 +17,6 @@
#include "mt76x0.h"
#include "mcu.h"
-#include "trace.h"
#include "../mt76x02_usb.h"
static struct usb_device_id mt76x0_device_table[] = {
@@ -117,6 +116,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
if (ret)
goto out;
+ mt76x0_phy_calibrate(dev, true);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
MT_CALIBRATE_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
@@ -145,17 +145,17 @@ static const struct ieee80211_ops mt76x0u_ops = {
.remove_interface = mt76x02_remove_interface,
.config = mt76x0_config,
.configure_filter = mt76x02_configure_filter,
- .bss_info_changed = mt76x0_bss_info_changed,
- .sta_add = mt76x02_sta_add,
- .sta_remove = mt76x02_sta_remove,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
- .sw_scan_start = mt76x0_sw_scan,
- .sw_scan_complete = mt76x0_sw_scan_complete,
+ .sw_scan_start = mt76x02_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
.ampdu_action = mt76x02_ampdu_action,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
- .set_rts_threshold = mt76x0_set_rts_threshold,
+ .set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
+ .get_txpower = mt76x02_get_txpower,
};
static int mt76x0u_register_device(struct mt76x02_dev *dev)
@@ -218,6 +218,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
};
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
struct mt76x02_dev *dev;
@@ -337,6 +339,8 @@ err:
}
MODULE_DEVICE_TABLE(usb, mt76x0_device_table);
+MODULE_FIRMWARE(MT7610E_FIRMWARE);
+MODULE_FIRMWARE(MT7610U_FIRMWARE);
MODULE_LICENSE("GPL");
static struct usb_driver mt76x0_driver = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index a9f14d5149d1..9d7585029df9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -22,7 +22,6 @@
#define MCU_FW_URB_MAX_PAYLOAD 0x38f8
#define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12)
-#define MT7610U_FIRMWARE "mediatek/mt7610u.bin"
static int
mt76x0u_upload_firmware(struct mt76x02_dev *dev,
@@ -75,6 +74,24 @@ out:
return err;
}
+static int mt76x0_get_firmware(struct mt76x02_dev *dev,
+ const struct firmware **fw)
+{
+ int err;
+
+ /* try to load mt7610e fw if available
+ * otherwise fall back to mt7610u one
+ */
+ err = firmware_request_nowarn(fw, MT7610E_FIRMWARE, dev->mt76.dev);
+ if (err) {
+ dev_info(dev->mt76.dev, "%s not found, switching to %s",
+ MT7610E_FIRMWARE, MT7610U_FIRMWARE);
+ return request_firmware(fw, MT7610U_FIRMWARE,
+ dev->mt76.dev);
+ }
+ return 0;
+}
+
static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
{
const struct firmware *fw;
@@ -88,7 +105,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
if (mt76x0_firmware_running(dev))
return 0;
- ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev);
+ ret = mt76x0_get_firmware(dev, &fw);
if (ret)
return ret;
@@ -171,5 +188,3 @@ int mt76x0u_mcu_init(struct mt76x02_dev *dev)
return 0;
}
-
-MODULE_FIRMWARE(MT7610U_FIRMWARE);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 7806963b1905..6782665049dd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -26,13 +26,7 @@
#include "mt76x02_dfs.h"
#include "mt76x02_dma.h"
-struct mt76x02_mac_stats {
- u64 rx_stat[6];
- u64 tx_stat[6];
- u64 aggr_stat[2];
- u64 aggr_n[32];
- u64 zero_len_del[2];
-};
+#define MT_CALIBRATE_INTERVAL HZ
#define MT_MAX_CHAINS 2
struct mt76x02_rx_freq_cal {
@@ -63,6 +57,10 @@ struct mt76x02_calibration {
bool tssi_comp_pending;
bool dpd_cal_done;
bool channel_cal_done;
+ bool gain_init_done;
+
+ int tssi_target;
+ s8 tssi_dc;
};
struct mt76x02_dev {
@@ -82,8 +80,6 @@ struct mt76x02_dev {
struct delayed_work cal_work;
struct delayed_work mac_work;
- struct mt76x02_mac_stats stats;
- atomic_t avg_ampdu_len;
u32 aggr_stats[32];
struct sk_buff *beacons[8];
@@ -109,14 +105,16 @@ struct mt76x02_dev {
extern struct ieee80211_rate mt76x02_rates[12];
+void mt76x02_init_device(struct mt76x02_dev *dev);
void mt76x02_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
-int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev);
void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx);
int mt76x02_add_interface(struct ieee80211_hw *hw,
@@ -139,9 +137,12 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr,
s8 max_txpwr_adj);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr);
+void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
+void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class);
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
-void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -153,12 +154,24 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info);
+void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac);
+void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int mt76x02_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int *dbm);
+void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
+void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed);
extern const u16 mt76x02_beacon_offsets[16];
-void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev);
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
void mt76x02_mac_start(struct mt76x02_dev *dev);
+void mt76x02_init_debugfs(struct mt76x02_dev *dev);
+
static inline bool is_mt76x2(struct mt76x02_dev *dev)
{
return mt76_chip(&dev->mt76) == 0x7612 ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index e8f8ccc0a5ed..a9d52ba1e270 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -15,10 +15,10 @@
*/
#include <linux/debugfs.h>
-#include "mt76x2.h"
+#include "mt76x02.h"
static int
-mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
+mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
int i, j;
@@ -42,9 +42,9 @@ mt76x2_ampdu_stat_read(struct seq_file *file, void *data)
}
static int
-mt76x2_ampdu_stat_open(struct inode *inode, struct file *f)
+mt76x02_ampdu_stat_open(struct inode *inode, struct file *f)
{
- return single_open(f, mt76x2_ampdu_stat_read, inode->i_private);
+ return single_open(f, mt76x02_ampdu_stat_read, inode->i_private);
}
static int read_txpower(struct seq_file *file, void *data)
@@ -59,14 +59,14 @@ static int read_txpower(struct seq_file *file, void *data)
}
static const struct file_operations fops_ampdu_stat = {
- .open = mt76x2_ampdu_stat_open,
+ .open = mt76x02_ampdu_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int
-mt76x2_dfs_stat_read(struct seq_file *file, void *data)
+mt76x02_dfs_stat_read(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -92,13 +92,13 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data)
}
static int
-mt76x2_dfs_stat_open(struct inode *inode, struct file *f)
+mt76x02_dfs_stat_open(struct inode *inode, struct file *f)
{
- return single_open(f, mt76x2_dfs_stat_read, inode->i_private);
+ return single_open(f, mt76x02_dfs_stat_read, inode->i_private);
}
static const struct file_operations fops_dfs_stat = {
- .open = mt76x2_dfs_stat_open,
+ .open = mt76x02_dfs_stat_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
@@ -116,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data)
return 0;
}
-void mt76x2_init_debugfs(struct mt76x02_dev *dev)
+void mt76x02_init_debugfs(struct mt76x02_dev *dev)
{
struct dentry *dir;
@@ -134,4 +134,4 @@ void mt76x2_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc);
}
-EXPORT_SYMBOL_GPL(mt76x2_init_debugfs);
+EXPORT_SYMBOL_GPL(mt76x02_init_debugfs);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index b56febae8945..054609c634a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "mt76x2.h"
+#include "mt76x02.h"
#define RADAR_SPEC(m, len, el, eh, wl, wh, \
w_tolerance, tl, th, t_tolerance, \
@@ -151,8 +151,7 @@ static const struct mt76x02_radar_specs jp_w53_radar_specs[] = {
};
static void
-mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev,
- u8 enable)
+mt76x02_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, u8 enable)
{
u32 data;
@@ -160,8 +159,8 @@ mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev,
mt76_wr(dev, MT_BBP(DFS, 36), data);
}
-static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev,
- struct mt76x02_dfs_sequence *seq)
+static void mt76x02_dfs_seq_pool_put(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_sequence *seq)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -172,7 +171,7 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev,
}
static struct mt76x02_dfs_sequence *
-mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev)
+mt76x02_dfs_seq_pool_get(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sequence *seq;
@@ -192,7 +191,7 @@ mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev)
return seq;
}
-static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
+static int mt76x02_dfs_get_multiple(int val, int frac, int margin)
{
int remainder, factor;
@@ -214,7 +213,7 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin)
return factor;
}
-static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev)
+static void mt76x02_dfs_detector_reset(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sequence *seq, *tmp_seq;
@@ -231,11 +230,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev)
list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
list_del_init(&seq->head);
- mt76x2_dfs_seq_pool_put(dev, seq);
+ mt76x02_dfs_seq_pool_put(dev, seq);
}
}
-static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev)
+static bool mt76x02_dfs_check_chirp(struct mt76x02_dev *dev)
{
bool ret = false;
u32 current_ts, delta_ts;
@@ -256,8 +255,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev)
return ret;
}
-static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev,
- struct mt76x02_dfs_hw_pulse *pulse)
+static void mt76x02_dfs_get_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
{
u32 data;
@@ -276,8 +275,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev,
pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22));
}
-static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
- struct mt76x02_dfs_hw_pulse *pulse)
+static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_hw_pulse *pulse)
{
bool ret = false;
@@ -290,7 +289,7 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
break;
if (pulse->engine == 3) {
- ret = mt76x2_dfs_check_chirp(dev);
+ ret = mt76x02_dfs_check_chirp(dev);
break;
}
@@ -334,7 +333,7 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
break;
if (pulse->engine == 3) {
- ret = mt76x2_dfs_check_chirp(dev);
+ ret = mt76x02_dfs_check_chirp(dev);
break;
}
@@ -371,8 +370,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev,
return ret;
}
-static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev,
- struct mt76x02_dfs_event *event)
+static bool mt76x02_dfs_fetch_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
u32 data;
@@ -398,8 +397,8 @@ static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev,
return true;
}
-static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev,
- struct mt76x02_dfs_event *event)
+static bool mt76x02_dfs_check_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
if (event->engine == 2) {
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -417,8 +416,8 @@ static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev,
return true;
}
-static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev,
- struct mt76x02_dfs_event *event)
+static void mt76x02_dfs_queue_event(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_event_rb *event_buff;
@@ -435,9 +434,9 @@ static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev,
MT_DFS_EVENT_BUFLEN);
}
-static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
- struct mt76x02_dfs_event *event,
- u16 cur_len)
+static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event,
+ u16 cur_len)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sw_detector_params *sw_params;
@@ -497,7 +496,7 @@ static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
while (j != end) {
cur_event = &event_rb->data[j];
cur_pri = event->ts - cur_event->ts;
- factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri,
+ factor = mt76x02_dfs_get_multiple(cur_pri, seq.pri,
sw_params->pri_margin);
if (factor > 0) {
seq.first_ts = cur_event->ts;
@@ -509,7 +508,7 @@ static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev,
if (seq.count <= cur_len)
goto next;
- seq_p = mt76x2_dfs_seq_pool_get(dev);
+ seq_p = mt76x02_dfs_seq_pool_get(dev);
if (!seq_p)
return -ENOMEM;
@@ -522,8 +521,8 @@ next:
return 0;
}
-static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
- struct mt76x02_dfs_event *event)
+static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
+ struct mt76x02_dfs_event *event)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sw_detector_params *sw_params;
@@ -535,7 +534,7 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) {
list_del_init(&seq->head);
- mt76x2_dfs_seq_pool_put(dev, seq);
+ mt76x02_dfs_seq_pool_put(dev, seq);
continue;
}
@@ -543,8 +542,8 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
continue;
pri = event->ts - seq->last_ts;
- factor = mt76x2_dfs_get_multiple(pri, seq->pri,
- sw_params->pri_margin);
+ factor = mt76x02_dfs_get_multiple(pri, seq->pri,
+ sw_params->pri_margin);
if (factor > 0) {
seq->last_ts = event->ts;
seq->count++;
@@ -554,7 +553,7 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
return max_seq_len;
}
-static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev)
+static bool mt76x02_dfs_check_detection(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sequence *seq;
@@ -571,34 +570,34 @@ static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev)
return false;
}
-static void mt76x2_dfs_add_events(struct mt76x02_dev *dev)
+static void mt76x02_dfs_add_events(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_event event;
int i, seq_len;
/* disable debug mode */
- mt76x2_dfs_set_capture_mode_ctrl(dev, false);
+ mt76x02_dfs_set_capture_mode_ctrl(dev, false);
for (i = 0; i < MT_DFS_EVENT_LOOP; i++) {
- if (!mt76x2_dfs_fetch_event(dev, &event))
+ if (!mt76x02_dfs_fetch_event(dev, &event))
break;
if (dfs_pd->last_event_ts > event.ts)
- mt76x2_dfs_detector_reset(dev);
+ mt76x02_dfs_detector_reset(dev);
dfs_pd->last_event_ts = event.ts;
- if (!mt76x2_dfs_check_event(dev, &event))
+ if (!mt76x02_dfs_check_event(dev, &event))
continue;
- seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event);
- mt76x2_dfs_create_sequence(dev, &event, seq_len);
+ seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event);
+ mt76x02_dfs_create_sequence(dev, &event, seq_len);
- mt76x2_dfs_queue_event(dev, &event);
+ mt76x02_dfs_queue_event(dev, &event);
}
- mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+ mt76x02_dfs_set_capture_mode_ctrl(dev, true);
}
-static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev)
+static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_event_rb *event_buff;
@@ -621,7 +620,7 @@ static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev)
}
}
-static void mt76x2_dfs_tasklet(unsigned long arg)
+static void mt76x02_dfs_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -637,16 +636,16 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
dfs_pd->last_sw_check = jiffies;
- mt76x2_dfs_add_events(dev);
- radar_detected = mt76x2_dfs_check_detection(dev);
+ mt76x02_dfs_add_events(dev);
+ radar_detected = mt76x02_dfs_check_detection(dev);
if (radar_detected) {
/* sw detector rx radar pattern */
ieee80211_radar_detected(dev->mt76.hw);
- mt76x2_dfs_detector_reset(dev);
+ mt76x02_dfs_detector_reset(dev);
return;
}
- mt76x2_dfs_check_event_window(dev);
+ mt76x02_dfs_check_event_window(dev);
}
engine_mask = mt76_rr(dev, MT_BBP(DFS, 1));
@@ -660,9 +659,9 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
continue;
pulse.engine = i;
- mt76x2_dfs_get_hw_pulse(dev, &pulse);
+ mt76x02_dfs_get_hw_pulse(dev, &pulse);
- if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) {
+ if (!mt76x02_dfs_check_hw_pulse(dev, &pulse)) {
dfs_pd->stats[i].hw_pulse_discarded++;
continue;
}
@@ -670,7 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg)
/* hw detector rx radar pattern */
dfs_pd->stats[i].hw_pattern++;
ieee80211_radar_detected(dev->mt76.hw);
- mt76x2_dfs_detector_reset(dev);
+ mt76x02_dfs_detector_reset(dev);
return;
}
@@ -682,7 +681,7 @@ out:
mt76x02_irq_enable(dev, MT_INT_GPTIMER);
}
-static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev)
+static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -708,7 +707,7 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev)
}
}
-static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
+static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
{
const struct mt76x02_radar_specs *radar_specs;
u8 i, shift;
@@ -800,10 +799,10 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev)
/* enable detection*/
mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
- mt76_wr(dev, 0x212c, 0x0c350001);
+ mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
}
-void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
+void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev)
{
u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31;
@@ -821,19 +820,27 @@ void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev)
dfs_r31 = (dfs_r31 << 16) | 0x00000307;
mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31);
- mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+ if (is_mt76x2(dev)) {
+ mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071);
+ } else {
+ /* disable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), 0);
+ /* enable hw detector */
+ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
+ }
}
+EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc);
-void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
+void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->dfs_pd.region != NL80211_DFS_UNSET) {
- mt76x2_dfs_init_sw_detector(dev);
- mt76x2_dfs_set_bbp_params(dev);
+ mt76x02_dfs_init_sw_detector(dev);
+ mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
- mt76x2_dfs_set_capture_mode_ctrl(dev, true);
+ mt76x02_dfs_set_capture_mode_ctrl(dev, true);
mt76x02_irq_enable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
@@ -843,15 +850,20 @@ void mt76x2_dfs_init_params(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 0), 0);
/* clear detector status */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
- mt76_wr(dev, 0x212c, 0);
+ if (mt76_chip(&dev->mt76) == 0x7610 ||
+ mt76_chip(&dev->mt76) == 0x7630)
+ mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
+ else
+ mt76_wr(dev, MT_BBP(IBI, 11), 0);
mt76x02_irq_disable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
MT_INT_TIMER_EN_GP_TIMER_EN, 0);
}
}
+EXPORT_SYMBOL_GPL(mt76x02_dfs_init_params);
-void mt76x2_dfs_init_detector(struct mt76x02_dev *dev)
+void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -859,20 +871,29 @@ void mt76x2_dfs_init_detector(struct mt76x02_dev *dev)
INIT_LIST_HEAD(&dfs_pd->seq_pool);
dfs_pd->region = NL80211_DFS_UNSET;
dfs_pd->last_sw_check = jiffies;
- tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet,
+ tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
(unsigned long)dev);
}
-void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
- enum nl80211_dfs_regions region)
+static void
+mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
+ enum nl80211_dfs_regions region)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
if (dfs_pd->region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
dfs_pd->region = region;
- mt76x2_dfs_init_params(dev);
+ mt76x02_dfs_init_params(dev);
tasklet_enable(&dfs_pd->dfs_tasklet);
}
}
+void mt76x02_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt76x02_dev *dev = hw->priv;
+
+ mt76x02_dfs_set_domain(dev, request->dfs_region);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
index 7e177c934592..70b394e17340 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h
@@ -137,4 +137,9 @@ struct mt76x02_dfs_pattern_detector {
struct tasklet_struct dfs_tasklet;
};
+void mt76x02_dfs_init_params(struct mt76x02_dev *dev);
+void mt76x02_dfs_init_detector(struct mt76x02_dev *dev);
+void mt76x02_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request);
+void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev);
#endif /* __MT76x02_DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 9390de2a323e..07f0496d828a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -53,6 +53,18 @@ mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data,
return 0;
}
+int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
+ enum mt76x02_eeprom_field field,
+ void *dest, int len)
+{
+ if (field + len > dev->mt76.eeprom.size)
+ return -1;
+
+ memcpy(dest, dev->mt76.eeprom.data + field, len);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_eeprom_copy);
+
int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf,
int len, enum mt76x02_eeprom_modes mode)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
index b3ec74835d10..e3442bc4e0a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h
@@ -25,6 +25,7 @@ enum mt76x02_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_PCI_ID = 0x00A,
+ MT_EE_ANTENNA = 0x022,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_COUNTRY_REGION_5GHZ = 0x038,
@@ -55,6 +56,7 @@ enum mt76x02_eeprom_field {
#define MT_TX_POWER_GROUP_SIZE_5G 5
#define MT_TX_POWER_GROUPS_5G 6
MT_EE_TX_POWER_0_START_5G = 0x062,
+ MT_EE_TSSI_SLOPE_2G = 0x06e,
MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074,
MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076,
@@ -85,6 +87,7 @@ enum mt76x02_eeprom_field {
MT_EE_TSSI_BOUND5 = 0x0dc,
MT_EE_TX_POWER_BYRATE_BASE = 0x0de,
+ MT_EE_TSSI_SLOPE_5G = 0x0f0,
MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2,
MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4,
@@ -104,6 +107,8 @@ enum mt76x02_eeprom_field {
__MT_EE_MAX
};
+#define MT_EE_ANTENNA_DUAL BIT(15)
+
#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
#define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8)
@@ -118,12 +123,9 @@ enum mt76x02_eeprom_field {
#define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3)
#define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13)
-#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0)
-#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4)
-#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8)
+#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3)
+#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4)
#define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9)
-#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11)
-#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13)
#define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \
MT_EE_USAGE_MAP_START + 1)
@@ -188,5 +190,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan);
void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev);
+int mt76x02_eeprom_copy(struct mt76x02_dev *dev,
+ enum mt76x02_eeprom_field field,
+ void *dest, int len);
#endif /* __MT76x02_EEPROM_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 10578e4cb269..c08bf371e527 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -18,7 +18,7 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
-enum mt76x02_cipher_type
+static enum mt76x02_cipher_type
mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
{
memset(key_data, 0, 32);
@@ -43,7 +43,6 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
return MT_CIPHER_NONE;
}
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key)
@@ -95,7 +94,6 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
return 0;
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key);
void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
u8 vif_idx, u8 *mac)
@@ -108,9 +106,6 @@ void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx,
mt76_wr(dev, MT_WCID_ATTR(idx), attr);
- mt76_wr(dev, MT_WCID_TX_RATE(idx), 0);
- mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0);
-
if (idx >= 128)
return;
@@ -130,31 +125,6 @@ void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
if ((val & bit) != (bit * drop))
mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop);
-
-void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *) txq->drv_priv;
- if (txq->sta) {
- struct mt76x02_sta *sta;
-
- sta = (struct mt76x02_sta *) txq->sta->drv_priv;
- mtxq->wcid = &sta->wcid;
- } else {
- struct mt76x02_vif *mvif;
-
- mvif = (struct mt76x02_vif *) txq->vif->drv_priv;
- mtxq->wcid = &mvif->group_wcid;
- }
-
- mt76_txq_init(&dev->mt76, txq);
-}
-EXPORT_SYMBOL_GPL(mt76x02_txq_init);
static __le16
mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
@@ -216,6 +186,14 @@ void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
spin_unlock_bh(&dev->mt76.lock);
}
+void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
+{
+ if (enable)
+ mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+ else
+ mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT);
+}
+
bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat)
{
@@ -237,9 +215,10 @@ bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev,
stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2);
stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2);
+ trace_mac_txstat_fetch(dev, stat);
+
return true;
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_load_tx_status);
static int
mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
@@ -319,8 +298,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
else
txwi->wcid = 0xff;
- txwi->pktid = 1;
-
if (wcid && wcid->sw_iv && key) {
u64 pn = atomic64_inc_return(&key->tx_pn);
ccmp_pn[0] = pn;
@@ -366,8 +343,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
- txwi->pktid |= MT_TXWI_PKTID_PROBE;
if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) {
u8 ba_size = IEEE80211_MIN_AMPDU_BUF;
@@ -420,9 +395,6 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
info->status.ampdu_len = n_frames;
info->status.ampdu_ack_len = st->success ? n_frames : 0;
- if (st->pktid & MT_TXWI_PKTID_PROBE)
- info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
-
if (st->aggr)
info->flags |= IEEE80211_TX_CTL_AMPDU |
IEEE80211_TX_STAT_AMPDU;
@@ -437,23 +409,40 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update)
{
struct ieee80211_tx_info info = {};
- struct ieee80211_sta *sta = NULL;
+ struct ieee80211_tx_status status = {
+ .info = &info
+ };
struct mt76_wcid *wcid = NULL;
struct mt76x02_sta *msta = NULL;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+
+ if (stat->pktid == MT_PACKET_ID_NO_ACK)
+ return;
rcu_read_lock();
+ mt76_tx_status_lock(mdev, &list);
+
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
- if (wcid) {
+ if (wcid && wcid->sta) {
void *priv;
priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
- sta = container_of(priv, struct ieee80211_sta,
- drv_priv);
+ status.sta = container_of(priv, struct ieee80211_sta,
+ drv_priv);
+ }
+
+ if (wcid) {
+ if (stat->pktid)
+ status.skb = mt76_tx_status_skb_get(mdev, wcid,
+ stat->pktid, &list);
+ if (status.skb)
+ status.info = IEEE80211_SKB_CB(status.skb);
}
- if (msta && stat->aggr) {
+ if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache;
stat_val = stat->rate;
@@ -467,25 +456,28 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
goto out;
}
- mt76x02_mac_fill_tx_status(dev, &info, &msta->status,
+ mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
*update = 0;
} else {
- mt76x02_mac_fill_tx_status(dev, &info, stat, 1);
+ mt76x02_mac_fill_tx_status(dev, status.info, stat, 1);
*update = 1;
}
- ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info);
+ if (status.skb)
+ mt76_tx_status_skb_done(mdev, status.skb, &list);
+ else
+ ieee80211_tx_status_ext(mt76_hw(dev), &status);
out:
+ mt76_tx_status_unlock(mdev, &list);
rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(mt76x02_send_tx_status);
-int
+static int
mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
{
u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate);
@@ -551,7 +543,6 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate)
return 0;
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr)
{
@@ -695,8 +686,6 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
if (!ret)
break;
- trace_mac_txstat_fetch(dev, &stat);
-
if (!irq) {
mt76x02_send_tx_status(dev, &stat, &update);
continue;
@@ -705,33 +694,230 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
kfifo_put(&dev->txstatus_fifo, stat);
}
}
-EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status);
-static void
-mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb,
- void *txwi_ptr)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
+ struct mt76_queue_entry *e, bool flush)
{
- struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb);
- struct mt76x02_txwi *txwi = txwi_ptr;
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_txwi *txwi;
+
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
mt76x02_mac_poll_tx_status(dev, false);
- txi->tries = 0;
- txi->jiffies = jiffies;
- txi->wcid = txwi->wcid;
- txi->pktid = txwi->pktid;
+ txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
- mt76x02_tx_complete(&dev->mt76, skb);
+
+ mt76_tx_complete_skb(mdev, e->skb);
}
+EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
+{
+ u32 data = 0;
+
+ if (val != ~0)
+ data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
+ MT_PROT_CFG_RTS_THRESH;
+
+ mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
+
+ mt76_rmw(dev, MT_CCK_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_OFDM_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_MM40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF20_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_GF40_PROT_CFG,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG6,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG7,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+ mt76_rmw(dev, MT_TX_PROT_CFG8,
+ MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
+}
+
+void mt76x02_update_channel(struct mt76_dev *mdev)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76_channel_state *state;
+ u32 active, busy;
+
+ state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
+
+ busy = mt76_rr(dev, MT_CH_BUSY);
+ active = busy + mt76_rr(dev, MT_CH_IDLE);
+
+ spin_lock_bh(&dev->mt76.cc_lock);
+ state->cc_busy += busy;
+ state->cc_active += active;
+ spin_unlock_bh(&dev->mt76.cc_lock);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_channel);
+
+static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
+{
+ u32 val = mt76_rr(dev, 0x10f4);
+
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+
+ dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+
+ mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
+ udelay(10);
+ mt76_clear(dev, MT_MAC_SYS_CTRL,
+ MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
+}
+
+void mt76x02_mac_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
+ mac_work.work);
+ int i, idx;
+
+ mt76x02_update_channel(&dev->mt76);
+ for (i = 0, idx = 0; i < 16; i++) {
+ u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
- if (e->txwi)
- mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi);
+ dev->aggr_stats[idx++] += val & 0xffff;
+ dev->aggr_stats[idx++] += val >> 16;
+ }
+
+ /* XXX: check beacon stuck for ap mode */
+ if (!dev->beacon_mask)
+ mt76x02_check_mac_err(dev);
+
+ mt76_tx_status_check(&dev->mt76, NULL, false);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
+}
+
+void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
+{
+ idx &= 7;
+ mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
+ mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
+ get_unaligned_le16(addr + 4));
+}
+
+static int
+mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
+{
+ int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
+ struct mt76x02_txwi txwi;
+
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
+ return -ENOSPC;
+
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
+}
+
+static int
+__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
+ struct sk_buff *skb)
+{
+ int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
+ int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
+ int ret = 0;
+ int i;
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+ if (skb) {
+ ret = mt76x02_write_beacon(dev, beacon_addr, skb);
+ if (!ret)
+ dev->beacon_data_mask |= BIT(bcn_idx);
+ } else {
+ dev->beacon_data_mask &= ~BIT(bcn_idx);
+ for (i = 0; i < beacon_len; i += 4)
+ mt76_wr(dev, beacon_addr + i, 0);
+ }
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+ return ret;
+}
+
+int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+ struct sk_buff *skb)
+{
+ bool force_update = false;
+ int bcn_idx = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (vif_idx == i) {
+ force_update = !!dev->beacons[i] ^ !!skb;
+
+ if (dev->beacons[i])
+ dev_kfree_skb(dev->beacons[i]);
+
+ dev->beacons[i] = skb;
+ __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
+ } else if (force_update && dev->beacons[i]) {
+ __mt76x02_mac_set_beacon(dev, bcn_idx,
+ dev->beacons[i]);
+ }
+
+ bcn_idx += !!dev->beacons[i];
+ }
+
+ for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (!(dev->beacon_data_mask & BIT(i)))
+ break;
+
+ __mt76x02_mac_set_beacon(dev, i, NULL);
+ }
+
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+ bcn_idx - 1);
+ return 0;
+}
+
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ u8 vif_idx, bool val)
+{
+ u8 old_mask = dev->beacon_mask;
+ bool en;
+ u32 reg;
+
+ if (val) {
+ dev->beacon_mask |= BIT(vif_idx);
+ } else {
+ dev->beacon_mask &= ~BIT(vif_idx);
+ mt76x02_mac_set_beacon(dev, vif_idx, NULL);
+ }
+
+ if (!!old_mask == !!dev->beacon_mask)
+ return;
+
+ en = dev->beacon_mask;
+
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+ reg = MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN;
+ mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+ if (en)
+ mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
else
- dev_kfree_skb_any(e->skb);
+ mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
}
-EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index d99c18743969..4e597004c445 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -37,18 +37,8 @@ struct mt76x02_tx_status {
#define MT_MAX_VIFS 8
struct mt76x02_vif {
+ struct mt76_wcid group_wcid; /* must be first */
u8 idx;
-
- struct mt76_wcid group_wcid;
-};
-
-struct mt76x02_tx_info {
- unsigned long jiffies;
- u8 tries;
-
- u8 wcid;
- u8 pktid;
- u8 retry;
};
DECLARE_EWMA(signal, 10, 8);
@@ -153,8 +143,6 @@ enum mt76x2_phy_bandwidth {
#define MT_TXWI_ACK_CTL_NSEQ BIT(1)
#define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2)
-#define MT_TXWI_PKTID_PROBE BIT(7)
-
struct mt76x02_txwi {
__le16 flags;
__le16 rate;
@@ -190,18 +178,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev)
return false;
}
-static inline struct mt76x02_tx_info *
-mt76x02_skb_tx_info(struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- return (void *)info->status.status_driver_data;
-}
-
-void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq);
-enum mt76x02_cipher_type
-mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data);
-
+void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable);
int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
u8 key_idx, struct ieee80211_key_conf *key);
int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
@@ -217,8 +194,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
struct mt76x02_tx_status *stat, u8 *update);
int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void *rxi);
-int
-mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate);
+void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr);
void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -226,4 +202,12 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush);
+void mt76x02_update_channel(struct mt76_dev *mdev);
+void mt76x02_mac_work(struct work_struct *work);
+
+void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
+int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+ struct sk_buff *skb);
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
+ bool val);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
index 1b853bb723fb..b7f4edb729e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
@@ -21,7 +21,7 @@
#include "mt76x02_mcu.h"
-struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
+static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
{
struct sk_buff *skb;
@@ -32,7 +32,6 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len)
return skb;
}
-EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc);
static struct sk_buff *
mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires)
@@ -80,16 +79,18 @@ mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid,
return 0;
}
-int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, bool wait_resp)
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
unsigned long expires = jiffies + HZ;
+ struct sk_buff *skb;
int ret;
u8 seq;
+ skb = mt76x02_mcu_msg_alloc(data, len);
if (!skb)
- return -EINVAL;
+ return -ENOMEM;
mutex_lock(&mdev->mmio.mcu.mutex);
@@ -131,11 +132,9 @@ out:
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send);
-int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
- enum mcu_function func,
- u32 val, bool wait_resp)
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
+ u32 val)
{
- struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
@@ -143,16 +142,17 @@ int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
.id = cpu_to_le32(func),
.value = cpu_to_le32(val),
};
+ bool wait = false;
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp);
+ if (func != Q_SELECT)
+ wait = true;
+
+ return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select);
-int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
- bool wait_resp)
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on)
{
- struct sk_buff *skb;
struct {
__le32 mode;
__le32 level;
@@ -161,15 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
.level = cpu_to_le32(0),
};
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp);
+ return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), false);
}
EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state);
-int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
- u32 param, bool wait)
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param)
{
- struct sk_buff *skb;
struct {
__le32 id;
__le32 value;
@@ -177,17 +174,18 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
.id = cpu_to_le32(type),
.value = cpu_to_le32(param),
};
+ bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev);
int ret;
- if (wait)
+ if (is_mt76x2e)
mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0);
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+ ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
+ true);
if (ret)
return ret;
- if (wait &&
+ if (is_mt76x2e &&
WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0,
BIT(31), BIT(31), 100)))
return -ETIMEDOUT;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
index 2d8fd2514570..7e4004120102 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h
@@ -97,16 +97,12 @@ struct mt76x02_patch_header {
};
int mt76x02_mcu_cleanup(struct mt76x02_dev *dev);
-int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type,
- u32 param, bool wait);
-struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len);
-int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, bool wait_resp);
-int mt76x02_mcu_function_select(struct mt76x02_dev *dev,
- enum mcu_function func,
- u32 val, bool wait_resp);
-int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on,
- bool wait_resp);
+int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param);
+int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp);
+int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func,
+ u32 val);
+int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on);
void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev,
const struct mt76x02_fw_header *h);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 39f092034240..66315410aebe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -21,6 +21,130 @@
#include "mt76x02.h"
#include "mt76x02_trace.h"
+struct beacon_bc_data {
+ struct mt76x02_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[8];
+};
+
+static void
+mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+}
+
+static void
+mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt76x02_dev *dev = data->dev;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+}
+
+static void
+mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
+{
+ u32 timer_val = dev->beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 62)
+ return;
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 62)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+}
+
+static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
+ struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nframes;
+
+ mt76x02_resync_beacon_timer(dev);
+
+ data.dev = dev;
+ __skb_queue_head_init(&data.q);
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_update_beacon_iter, dev);
+
+ do {
+ nframes = skb_queue_len(&data.q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_add_buffered_bc, &data);
+ } while (nframes != skb_queue_len(&data.q));
+
+ if (!nframes)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
+ if (!data.tail[i])
+ continue;
+
+ mt76_skb_set_moredata(data.tail[i], false);
+ }
+
+ spin_lock_bh(&q->lock);
+ while ((skb = __skb_dequeue(&data.q)) != NULL) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+
+ mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
+ NULL);
+ }
+ spin_unlock_bh(&q->lock);
+}
+
static int
mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc)
@@ -98,6 +222,9 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
return -ENOMEM;
tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
+ tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
+ (unsigned long)dev);
+
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
mt76_dma_attach(&dev->mt76);
@@ -225,7 +352,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
mt76_clear(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
}
-EXPORT_SYMBOL_GPL(mt76x02_dma_enable);
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index 0f1d7b5c9f68..977a8e7e26df 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev)
memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init,
sizeof(dev->cal.agc_gain_cur));
dev->cal.low_gain = -1;
+ dev->cal.gain_init_done = true;
}
EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index d3de08872d6e..4598cb2cc3ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -22,6 +22,7 @@
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76x02_dev *dev = hw->priv;
struct ieee80211_vif *vif = info->control.vif;
@@ -33,7 +34,8 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
msta = (struct mt76x02_sta *)control->sta->drv_priv;
wcid = &msta->wcid;
/* sw encrypted frames */
- if (!info->control.hw_key && wcid->hw_key_idx != 0xff)
+ if (!info->control.hw_key && wcid->hw_key_idx != 0xff &&
+ ieee80211_has_protected(hdr->frame_control))
control->sta = NULL;
}
@@ -110,7 +112,6 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev,
return max_txpwr;
}
-EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj);
s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
{
@@ -125,7 +126,6 @@ s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj)
else
return (txpwr < -16) ? 8 : (txpwr + 32) / 2;
}
-EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj);
void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
{
@@ -140,21 +140,6 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr)
}
EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto);
-void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- ieee80211_free_txskb(dev->hw, skb);
- } else {
- ieee80211_tx_info_clear_status(info);
- info->status.rates[0].idx = -1;
- info->flags |= IEEE80211_TX_STAT_ACK;
- ieee80211_tx_status(dev->hw, skb);
- }
-}
-EXPORT_SYMBOL_GPL(mt76x02_tx_complete);
-
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
@@ -169,14 +154,15 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
}
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
-int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
+int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
u32 *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76x02_txwi *txwi = txwi_ptr;
int qsel = MT_QSEL_EDCA;
+ int pid;
int ret;
if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
@@ -184,11 +170,14 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+ pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ txwi->pktid = pid;
+
ret = mt76x02_insert_hdr_pad(skb);
if (ret < 0)
return ret;
- if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+ if (pid && pid != MT_PACKET_ID_NO_ACK)
qsel = MT_QSEL_MGMT;
*tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index dc2226c722dd..81970cf777c0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -30,7 +30,7 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
struct mt76_queue_entry *e, bool flush)
{
mt76x02u_remove_dma_hdr(e->skb);
- mt76x02_tx_complete(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->skb);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
@@ -67,27 +67,6 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
return 0;
}
-static int
-mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep)
-{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- enum mt76_qsel qsel;
- u32 flags;
-
- if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
- ep == MT_EP_OUT_HCCA)
- qsel = MT_QSEL_MGMT;
- else
- qsel = MT_QSEL_EDCA;
-
- flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
- MT_TXD_INFO_80211;
- if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
- flags |= MT_TXD_INFO_WIV;
-
- return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
-}
-
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct sk_buff *skb, struct mt76_queue *q,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
@@ -95,13 +74,30 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
+ enum mt76_qsel qsel;
int len = skb->len;
+ u32 flags;
+ int pid;
mt76x02_insert_hdr_pad(skb);
txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
- return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx));
+ pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ txwi->pktid = pid;
+
+ if ((pid && pid != MT_PACKET_ID_NO_ACK) ||
+ q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
+ qsel = MT_QSEL_MGMT;
+ else
+ qsel = MT_QSEL_EDCA;
+
+ flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
+ if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
+ flags |= MT_TXD_INFO_WIV;
+
+ return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
index da299b8a1334..6db789f90269 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
@@ -129,9 +129,6 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
u8 seq = 0;
u32 info;
- if (!skb)
- return -EINVAL;
-
if (test_bit(MT76_REMOVED, &dev->state))
return 0;
@@ -162,12 +159,17 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
}
static int
-mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
- int cmd, bool wait_resp)
+mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data,
+ int len, bool wait_resp)
{
struct mt76_usb *usb = &dev->usb;
+ struct sk_buff *skb;
int err;
+ skb = mt76x02u_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
mutex_lock(&usb->mcu.mutex);
err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp);
mutex_unlock(&usb->mcu.mutex);
@@ -186,6 +188,7 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
{
const int CMD_RANDOM_WRITE = 12;
const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8;
+ struct mt76_usb *usb = &dev->usb;
struct sk_buff *skb;
int cnt, i, ret;
@@ -204,7 +207,9 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base,
skb_put_le32(skb, data[i].value);
}
- ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ mutex_lock(&usb->mcu.mutex);
+ ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n);
+ mutex_unlock(&usb->mcu.mutex);
if (ret)
return ret;
@@ -345,7 +350,6 @@ EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data);
void mt76x02u_init_mcu(struct mt76_dev *dev)
{
static const struct mt76_mcu_ops mt76x02u_mcu_ops = {
- .mcu_msg_alloc = mt76x02u_mcu_msg_alloc,
.mcu_send_msg = mt76x02u_mcu_send_msg,
.mcu_wr_rp = mt76x02u_mcu_wr_rp,
.mcu_rd_rp = mt76x02u_mcu_rd_rp,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index ca05332f81fc..38bd466cff16 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -47,6 +47,92 @@ struct ieee80211_rate mt76x02_rates[] = {
};
EXPORT_SYMBOL_GPL(mt76x02_rates);
+static const struct ieee80211_iface_limit mt76x02_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP)
+ },
+};
+
+static const struct ieee80211_iface_combination mt76x02_if_comb[] = {
+ {
+ .limits = mt76x02_if_limits,
+ .n_limits = ARRAY_SIZE(mt76x02_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+ }
+};
+
+void mt76x02_init_device(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+
+ INIT_DELAYED_WORK(&dev->mac_work, mt76x02_mac_work);
+
+ hw->queues = 4;
+ hw->max_rates = 1;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 1;
+ hw->extra_tx_headroom = 2;
+
+ if (mt76_is_usb(dev)) {
+ hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
+ MT_DMA_HDR_LEN;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ } else {
+ mt76x02_dfs_init_detector(dev);
+
+ wiphy->reg_notifier = mt76x02_regd_notifier;
+ wiphy->iface_combinations = mt76x02_if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
+ wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_ADHOC);
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ }
+
+ hw->sta_data_size = sizeof(struct mt76x02_sta);
+ hw->vif_data_size = sizeof(struct mt76x02_vif);
+
+ ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ dev->mt76.global_wcid.idx = 255;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->slottime = 9;
+
+ if (is_mt76x2(dev)) {
+ dev->mt76.sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.chainmask = 0x202;
+ dev->mt76.antenna_mask = 3;
+ } else {
+ dev->mt76.chainmask = 0x101;
+ dev->mt76.antenna_mask = 1;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_device);
+
void mt76x02_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
@@ -81,23 +167,17 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(mt76x02_configure_filter);
-int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt76x02_dev *dev = hw->priv;
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- int ret = 0;
int idx = 0;
- int i;
-
- mutex_lock(&dev->mt76.mutex);
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
- if (idx < 0) {
- ret = -ENOSPC;
- goto out;
- }
+ if (idx < 0)
+ return -ENOSPC;
msta->vif = mvif;
msta->wcid.sta = 1;
@@ -105,41 +185,25 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
msta->wcid.hw_key_idx = -1;
mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr);
mt76x02_mac_wcid_set_drop(dev, idx, false);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76x02_txq_init(dev, sta->txq[i]);
if (vif->type == NL80211_IFTYPE_AP)
set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags);
ewma_signal_init(&msta->rssi);
- rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
-
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_add);
-int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
- struct mt76x02_dev *dev = hw->priv;
- struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
- int idx = msta->wcid.idx;
- int i;
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int idx = wcid->idx;
- mutex_lock(&dev->mt76.mutex);
- rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(&dev->mt76, sta->txq[i]);
mt76x02_mac_wcid_set_drop(dev, idx, true);
- mt76_wcid_free(dev->mt76.wcid_mask, idx);
mt76x02_mac_wcid_setup(dev, idx, 0, NULL);
- mutex_unlock(&dev->mt76.mutex);
-
- return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_sta_remove);
@@ -147,11 +211,15 @@ void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
unsigned int idx)
{
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct mt76_txq *mtxq;
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1;
- mt76x02_txq_init(dev, vif->txq);
+ mtxq = (struct mt76_txq *) vif->txq->drv_priv;
+ mtxq->wcid = &mvif->group_wcid;
+
+ mt76_txq_init(&dev->mt76, vif->txq);
}
EXPORT_SYMBOL_GPL(mt76x02_vif_init);
@@ -357,6 +425,51 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76x02_conf_tx);
+void mt76x02_set_tx_ackto(struct mt76x02_dev *dev)
+{
+ u8 ackto, sifs, slottime = dev->slottime;
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime += 3 * dev->coverage_class;
+ mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
+ MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
+
+ sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
+ MT_XIFS_TIME_CFG_OFDM_SIFS);
+
+ ackto = slottime + sifs;
+ mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
+ MT_TX_TIMEOUT_CFG_ACKTO, ackto);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_tx_ackto);
+
+void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
+ s16 coverage_class)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->coverage_class = coverage_class;
+ mt76x02_set_tx_ackto(dev);
+ mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_coverage_class);
+
+int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (val != ~0 && val > 0xffff)
+ return -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+ mt76x02_mac_set_tx_protection(dev, val);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_set_rts_threshold);
+
void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -405,6 +518,64 @@ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
}
EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad);
+void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ if (mt76_is_mmio(dev))
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sw_scan);
+
+void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+ if (mt76_is_mmio(dev))
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+
+ if (dev->cal.gain_init_done) {
+ /* Restore AGC gain and resume calibration after scanning. */
+ dev->cal.low_gain = -1;
+ ieee80211_queue_delayed_work(hw, &dev->cal_work, 0);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete);
+
+int mt76x02_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int *dbm)
+{
+ struct mt76x02_dev *dev = hw->priv;
+ u8 nstreams = dev->mt76.chainmask & 0xf;
+
+ *dbm = dev->mt76.txpower_cur / 2;
+
+ /* convert from per-chain power to combined
+ * output on 2x2 devices
+ */
+ if (nstreams > 1)
+ *dbm += 3;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_get_txpower);
+
+void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
+ bool ps)
+{
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv;
+ int idx = msta->wcid.idx;
+
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
+}
+EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
+
const u16 mt76x02_beacon_offsets[16] = {
/* 1024 byte per beacon */
0xc000,
@@ -425,9 +596,8 @@ const u16 mt76x02_beacon_offsets[16] = {
0xc000,
0xc000,
};
-EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets);
-void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
+static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
{
u16 val, base = MT_BEACON_BASE;
u32 regs[4] = {};
@@ -441,6 +611,98 @@ void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
for (i = 0; i < 4; i++)
mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
}
-EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets);
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const u8 null_addr[ETH_ALEN] = {};
+ int i;
+
+ mt76_wr(dev, MT_MAC_BSSID_DW0,
+ get_unaligned_le32(dev->mt76.macaddr));
+ mt76_wr(dev, MT_MAC_BSSID_DW1,
+ get_unaligned_le16(dev->mt76.macaddr + 4) |
+ FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
+ MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
+
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
+ 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+
+ for (i = 0; i < 8; i++) {
+ mt76x02_mac_set_bssid(dev, i, null_addr);
+ mt76x02_mac_set_beacon(dev, i, NULL);
+ }
+ mt76x02_set_beacon_offsets(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
+
+void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct mt76x02_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & BSS_CHANGED_BSSID)
+ mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ tasklet_disable(&dev->pre_tbtt_tasklet);
+ mt76x02_mac_set_beacon_enable(dev, mvif->idx,
+ info->enable_beacon);
+ tasklet_enable(&dev->pre_tbtt_tasklet);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL,
+ info->beacon_int << 4);
+ dev->beacon_int = info->beacon_int;
+ dev->tbtt_count = 0;
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE)
+ mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ dev->slottime = slottime;
+ mt76x02_set_tx_ackto(dev);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+EXPORT_SYMBOL_GPL(mt76x02_bss_info_changed);
+
+void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
+ u8 *addr = dev->macaddr_list[i].addr;
+
+ memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
+
+ if (!i)
+ continue;
+
+ addr[0] |= BIT(1);
+ addr[0] ^= ((i - 1) << 2);
+ }
+ wiphy->addresses = dev->macaddr_list;
+ wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
+}
+EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
index b71bb1049170..9297b850bbba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile
@@ -3,11 +3,11 @@ obj-$(CONFIG_MT76x2E) += mt76x2e.o
obj-$(CONFIG_MT76x2U) += mt76x2u.o
mt76x2-common-y := \
- eeprom.o mac.o init.o phy.o debugfs.o mcu.o
+ eeprom.o mac.o init.o phy.o mcu.o
mt76x2e-y := \
- pci.o pci_main.o pci_init.o pci_tx.o \
- pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o
+ pci.o pci_main.o pci_init.o pci_mcu.o \
+ pci_phy.o
mt76x2u-y := \
usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
deleted file mode 100644
index 3cb9d1864286..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef __DFS_H
-#define __DFS_H
-
-void mt76x2_dfs_init_params(struct mt76x02_dev *dev);
-void mt76x2_dfs_init_detector(struct mt76x02_dev *dev);
-void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev);
-void mt76x2_dfs_set_domain(struct mt76x02_dev *dev,
- enum nl80211_dfs_regions region);
-
-#endif /* __DFS_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index f39b622d03f4..6f6998561d9d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -22,17 +22,6 @@
#define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1
static int
-mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field,
- void *dest, int len)
-{
- if (field + len > dev->mt76.eeprom.size)
- return -1;
-
- memcpy(dest, dev->mt76.eeprom.data + field, len);
- return 0;
-}
-
-static int
mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev)
{
void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR;
@@ -378,7 +367,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev,
else
delta_idx = 5;
- mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+ mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1];
@@ -429,7 +418,7 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev,
else
delta_idx = 4;
- mt76x2_eeprom_copy(dev, offset, data, sizeof(data));
+ mt76x02_eeprom_copy(dev, offset, data, sizeof(data));
t->chain[chain].tssi_slope = data[0];
t->chain[chain].tssi_offset = data[1];
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index 3c73fdeaf30f..54a9b5fac787 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -158,38 +158,6 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76_write_mac_initvals);
-void mt76x2_init_device(struct mt76x02_dev *dev)
-{
- struct ieee80211_hw *hw = mt76_hw(dev);
-
- hw->queues = 4;
- hw->max_rates = 1;
- hw->max_report_rates = 7;
- hw->max_rate_tries = 1;
- hw->extra_tx_headroom = 2;
- if (mt76_is_usb(dev))
- hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) +
- MT_DMA_HDR_LEN;
-
- hw->sta_data_size = sizeof(struct mt76x02_sta);
- hw->vif_data_size = sizeof(struct mt76x02_vif);
-
- ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
- ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
- dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
-
- dev->mt76.chainmask = 0x202;
- dev->mt76.global_wcid.idx = 255;
- dev->mt76.global_wcid.hw_key_idx = -1;
- dev->slottime = 9;
-
- /* init antenna configuration */
- dev->mt76.antenna_mask = 3;
-}
-EXPORT_SYMBOL_GPL(mt76x2_init_device);
-
void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
index a31bd49ae6cb..4c8e20bce920 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
@@ -26,12 +26,5 @@ struct mt76x02_vif;
int mt76x2_mac_start(struct mt76x02_dev *dev);
void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force);
void mt76x2_mac_resume(struct mt76x02_dev *dev);
-void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr);
-
-int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb);
-void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val);
-
-void mt76x2_mac_work(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
index 88bd62cfbdf9..cd3e082f486c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c
@@ -26,7 +26,6 @@
int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
u8 bw_index, bool scan)
{
- struct sk_buff *skb;
struct {
u8 idx;
u8 scan;
@@ -45,21 +44,19 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw,
};
/* first set the channel without the extension channel info */
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
+ mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true);
usleep_range(5000, 10000);
msg.ext_chan = 0xe0 + bw_index;
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true);
+ return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg),
+ true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel);
int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
u8 channel)
{
- struct sk_buff *skb;
struct {
u8 cr_mode;
u8 temp;
@@ -80,15 +77,13 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
msg.cfg = cpu_to_le32(val);
/* first set the channel without the extension channel info */
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true);
+ return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr);
int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
bool force)
{
- struct sk_buff *skb;
struct {
__le32 channel;
__le32 gain_val;
@@ -100,15 +95,14 @@ int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain,
if (force)
msg.channel |= cpu_to_le32(BIT(31));
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true);
+ return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg),
+ true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain);
int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
struct mt76x2_tssi_comp *tssi_data)
{
- struct sk_buff *skb;
struct {
__le32 id;
struct mt76x2_tssi_comp data;
@@ -117,7 +111,7 @@ int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev,
.data = *tssi_data,
};
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true);
+ return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg),
+ true);
}
EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index ab93125f46de..b259e4b50f1e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -31,14 +31,8 @@
#define MT7662_ROM_PATCH "mt7662_rom_patch.bin"
#define MT7662_EEPROM_SIZE 512
-#define MT7662U_FIRMWARE "mediatek/mt7662u.bin"
-#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin"
-
-#define MT_CALIBRATE_INTERVAL HZ
-
#include "../mt76x02.h"
#include "mac.h"
-#include "dfs.h"
static inline bool is_mt7612(struct mt76x02_dev *dev)
{
@@ -57,15 +51,12 @@ extern const struct ieee80211_ops mt76x2_ops;
struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev);
int mt76x2_register_device(struct mt76x02_dev *dev);
-void mt76x2_init_debugfs(struct mt76x02_dev *dev);
-void mt76x2_init_device(struct mt76x02_dev *dev);
void mt76x2_phy_power_on(struct mt76x02_dev *dev);
int mt76x2_init_hardware(struct mt76x02_dev *dev);
void mt76x2_stop_hardware(struct mt76x02_dev *dev);
int mt76x2_eeprom_init(struct mt76x02_dev *dev);
int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel);
-void mt76x2_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x2_phy_set_antenna(struct mt76x02_dev *dev);
int mt76x2_phy_start(struct mt76x02_dev *dev);
@@ -82,24 +73,17 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
void mt76x2_cleanup(struct mt76x02_dev *dev);
-void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val);
-
-void mt76x2_pre_tbtt_tasklet(unsigned long arg);
-
-void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps);
-
-void mt76x2_update_channel(struct mt76_dev *mdev);
-
void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_supported_band *sband);
void mt76_write_mac_initvals(struct mt76x02_dev *dev);
-void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait);
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev);
void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev,
enum nl80211_band band);
void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
enum nl80211_band band, u8 bw);
void mt76x2_apply_gain_adj(struct mt76x02_dev *dev);
+void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
index 6e932b5010ef..0b0075411b34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
@@ -43,11 +43,8 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev);
int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef);
void mt76x2u_phy_calibrate(struct work_struct *work);
-void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev);
void mt76x2u_mcu_complete_urb(struct urb *urb);
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
- bool ext, int rssi, u32 false_cca);
int mt76x2u_mcu_init(struct mt76x02_dev *dev);
int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index fd125722d1fb..7f4ea2d00f42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -79,7 +79,6 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
{
- static const u8 null_addr[ETH_ALEN] = {};
const u8 *macaddr = dev->mt76.macaddr;
u32 val;
int i, k;
@@ -123,27 +122,18 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr));
mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4));
- mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr));
- mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) |
- FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */
- MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT);
-
- /* Fire a pre-TBTT interrupt 8 ms before TBTT */
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
- 8 << 4);
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
- MT_DFS_GP_INTERVAL);
- mt76_wr(dev, MT_INT_TIMER_EN, 0);
-
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+ mt76x02_init_beacon_config(dev);
if (!hard)
return 0;
for (i = 0; i < 256 / 32; i++)
mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0);
- for (i = 0; i < 256; i++)
+ for (i = 0; i < 256; i++) {
mt76x02_mac_wcid_setup(dev, i, 0, NULL);
+ mt76_wr(dev, MT_WCID_TX_RATE(i), 0);
+ mt76_wr(dev, MT_WCID_TX_RATE(i) + 4, 0);
+ }
for (i = 0; i < MT_MAX_VIFS; i++)
mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL);
@@ -152,11 +142,6 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
for (k = 0; k < 4; k++)
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
- for (i = 0; i < 8; i++) {
- mt76x2_mac_set_bssid(dev, i, null_addr);
- mt76x2_mac_set_beacon(dev, i, NULL);
- }
-
for (i = 0; i < 16; i++)
mt76_rr(dev, MT_TX_STAT_FIFO);
@@ -168,9 +153,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
MT_CH_TIME_CFG_EIFS_AS_BUSY |
FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1));
- mt76x02_set_beacon_offsets(dev);
-
- mt76x2_set_tx_ackto(dev);
+ mt76x02_set_tx_ackto(dev);
return 0;
}
@@ -277,30 +260,10 @@ mt76x2_power_on(struct mt76x02_dev *dev)
mt76x2_power_on_rf(dev, 1);
}
-void mt76x2_set_tx_ackto(struct mt76x02_dev *dev)
-{
- u8 ackto, sifs, slottime = dev->slottime;
-
- /* As defined by IEEE 802.11-2007 17.3.8.6 */
- slottime += 3 * dev->coverage_class;
- mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG,
- MT_BKOFF_SLOT_CFG_SLOTTIME, slottime);
-
- sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG,
- MT_XIFS_TIME_CFG_OFDM_SIFS);
-
- ackto = slottime + sifs;
- mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG,
- MT_TX_TIMEOUT_CFG_ACKTO, ackto);
-}
-
int mt76x2_init_hardware(struct mt76x02_dev *dev)
{
int ret;
- tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet,
- (unsigned long) dev);
-
mt76x02_dma_disable(dev);
mt76x2_reset_wlan(dev, true);
mt76x2_power_on(dev);
@@ -337,7 +300,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
cancel_delayed_work_sync(&dev->mac_work);
- mt76x02_mcu_set_radio_state(dev, false, true);
+ mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false);
}
@@ -354,12 +317,14 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
- .update_survey = mt76x2_update_channel,
+ .update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
.rx_skb = mt76x02_queue_rx_skb,
.rx_poll_complete = mt76x02_rx_poll_complete,
- .sta_ps = mt76x2_sta_ps,
+ .sta_ps = mt76x02_sta_ps,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
@@ -375,43 +340,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev)
return dev;
}
-static void mt76x2_regd_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct mt76x02_dev *dev = hw->priv;
-
- mt76x2_dfs_set_domain(dev, request->dfs_region);
-}
-
-static const struct ieee80211_iface_limit if_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_ADHOC)
- }, {
- .max = 8,
- .types = BIT(NL80211_IFTYPE_STATION) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_AP)
- },
-};
-
-static const struct ieee80211_iface_combination if_comb[] = {
- {
- .limits = if_limits,
- .n_limits = ARRAY_SIZE(if_limits),
- .max_interfaces = 8,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
- }
-};
-
static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on,
u8 delay_off)
{
@@ -462,49 +390,17 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev,
int mt76x2_register_device(struct mt76x02_dev *dev)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
- struct wiphy *wiphy = hw->wiphy;
- int i, ret;
+ int ret;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate);
- INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work);
- mt76x2_init_device(dev);
+ mt76x02_init_device(dev);
ret = mt76x2_init_hardware(dev);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) {
- u8 *addr = dev->macaddr_list[i].addr;
-
- memcpy(addr, dev->mt76.macaddr, ETH_ALEN);
-
- if (!i)
- continue;
-
- addr[0] |= BIT(1);
- addr[0] ^= ((i - 1) << 2);
- }
- wiphy->addresses = dev->macaddr_list;
- wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list);
-
- wiphy->iface_combinations = if_comb;
- wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
-
- wiphy->reg_notifier = mt76x2_regd_notifier;
-
- wiphy->interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
-#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT) |
-#endif
- BIT(NL80211_IFTYPE_ADHOC);
-
- wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
-
- mt76x2_dfs_init_detector(dev);
+ mt76x02_config_mac_addr_list(dev);
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
@@ -517,7 +413,7 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
if (ret)
goto fail;
- mt76x2_init_debugfs(dev);
+ mt76x02_init_debugfs(dev);
mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
deleted file mode 100644
index 4b331ed14bb2..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include "mt76x2.h"
-#include "mcu.h"
-#include "eeprom.h"
-
-void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
-{
- idx &= 7;
- mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr));
- mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
- get_unaligned_le16(addr + 4));
-}
-
-static int
-mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
-{
- int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
- struct mt76x02_txwi txwi;
-
- if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
- return -ENOSPC;
-
- mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
- mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
- offset += sizeof(txwi);
-
- mt76_wr_copy(dev, offset, skb->data, skb->len);
- return 0;
-}
-
-static int
-__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb)
-{
- int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
- int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
- int ret = 0;
- int i;
-
- /* Prevent corrupt transmissions during update */
- mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
-
- if (skb) {
- ret = mt76_write_beacon(dev, beacon_addr, skb);
- if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx);
- } else {
- dev->beacon_data_mask &= ~BIT(bcn_idx);
- for (i = 0; i < beacon_len; i += 4)
- mt76_wr(dev, beacon_addr + i, 0);
- }
-
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
-
- return ret;
-}
-
-int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb)
-{
- bool force_update = false;
- int bcn_idx = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
- if (vif_idx == i) {
- force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
- dev->beacons[i] = skb;
- __mt76x2_mac_set_beacon(dev, bcn_idx, skb);
- } else if (force_update && dev->beacons[i]) {
- __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]);
- }
-
- bcn_idx += !!dev->beacons[i];
- }
-
- for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
- if (!(dev->beacon_data_mask & BIT(i)))
- break;
-
- __mt76x2_mac_set_beacon(dev, i, NULL);
- }
-
- mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
- bcn_idx - 1);
- return 0;
-}
-
-void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev,
- u8 vif_idx, bool val)
-{
- u8 old_mask = dev->beacon_mask;
- bool en;
- u32 reg;
-
- if (val) {
- dev->beacon_mask |= BIT(vif_idx);
- } else {
- dev->beacon_mask &= ~BIT(vif_idx);
- mt76x2_mac_set_beacon(dev, vif_idx, NULL);
- }
-
- if (!!old_mask == !!dev->beacon_mask)
- return;
-
- en = dev->beacon_mask;
-
- mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
- reg = MT_BEACON_TIME_CFG_BEACON_TX |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_TIMER_EN;
- mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
-
- if (en)
- mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
- else
- mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
-}
-
-void mt76x2_update_channel(struct mt76_dev *mdev)
-{
- struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- struct mt76_channel_state *state;
- u32 active, busy;
-
- state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan);
-
- busy = mt76_rr(dev, MT_CH_BUSY);
- active = busy + mt76_rr(dev, MT_CH_IDLE);
-
- spin_lock_bh(&dev->mt76.cc_lock);
- state->cc_busy += busy;
- state->cc_active += active;
- spin_unlock_bh(&dev->mt76.cc_lock);
-}
-
-void mt76x2_mac_work(struct work_struct *work)
-{
- struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
- mac_work.work);
- int i, idx;
-
- mt76x2_update_channel(&dev->mt76);
- for (i = 0, idx = 0; i < 16; i++) {
- u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i));
-
- dev->aggr_stats[idx++] += val & 0xffff;
- dev->aggr_stats[idx++] += val >> 16;
- }
-
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
- MT_CALIBRATE_INTERVAL);
-}
-
-void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val)
-{
- u32 data = 0;
-
- if (val != ~0)
- data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) |
- MT_PROT_CFG_RTS_THRESH;
-
- mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val);
-
- mt76_rmw(dev, MT_CCK_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_OFDM_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_MM20_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_MM40_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_GF20_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_GF40_PROT_CFG,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG6,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG7,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
- mt76_rmw(dev, MT_TX_PROT_CFG8,
- MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 3f001bd6806c..b54a32397486 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -74,7 +74,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
- mt76x2_dfs_init_params(dev);
+ mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
@@ -128,103 +128,12 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
static void
-mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
-{
- struct mt76x02_dev *dev = hw->priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
-
- mutex_lock(&dev->mt76.mutex);
-
- if (changed & BSS_CHANGED_BSSID)
- mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid);
-
- if (changed & BSS_CHANGED_BEACON_INT) {
- mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
- MT_BEACON_TIME_CFG_INTVAL,
- info->beacon_int << 4);
- dev->beacon_int = info->beacon_int;
- dev->tbtt_count = 0;
- }
-
- if (changed & BSS_CHANGED_BEACON_ENABLED) {
- tasklet_disable(&dev->pre_tbtt_tasklet);
- mt76x2_mac_set_beacon_enable(dev, mvif->idx,
- info->enable_beacon);
- tasklet_enable(&dev->pre_tbtt_tasklet);
- }
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- int slottime = info->use_short_slot ? 9 : 20;
-
- dev->slottime = slottime;
- mt76x2_set_tx_ackto(dev);
- }
-
- mutex_unlock(&dev->mt76.mutex);
-}
-
-void
-mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
-{
- struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv;
- struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int idx = msta->wcid.idx;
-
- mt76_stop_tx_queues(&dev->mt76, sta, true);
- mt76x02_mac_wcid_set_drop(dev, idx, ps);
-}
-
-static void
-mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- const u8 *mac)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- tasklet_disable(&dev->pre_tbtt_tasklet);
- set_bit(MT76_SCANNING, &dev->mt76.state);
-}
-
-static void
-mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- clear_bit(MT76_SCANNING, &dev->mt76.state);
- tasklet_enable(&dev->pre_tbtt_tasklet);
-}
-
-static void
mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
}
static int
-mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- *dbm = dev->mt76.txpower_cur / 2;
-
- /* convert from per-chain power to combined output on 2x2 devices */
- *dbm += 3;
-
- return 0;
-}
-
-static void mt76x2_set_coverage_class(struct ieee80211_hw *hw,
- s16 coverage_class)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
- dev->coverage_class = coverage_class;
- mt76x2_set_tx_ackto(dev);
- mutex_unlock(&dev->mt76.mutex);
-}
-
-static int
mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
{
return 0;
@@ -264,21 +173,6 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant,
return 0;
}
-static int
-mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- if (val != ~0 && val > 0xffff)
- return -EINVAL;
-
- mutex_lock(&dev->mt76.mutex);
- mt76x2_mac_set_tx_protection(dev, val);
- mutex_unlock(&dev->mt76.mutex);
-
- return 0;
-}
-
const struct ieee80211_ops mt76x2_ops = {
.tx = mt76x02_tx,
.start = mt76x2_start,
@@ -287,24 +181,23 @@ const struct ieee80211_ops mt76x2_ops = {
.remove_interface = mt76x02_remove_interface,
.config = mt76x2_config,
.configure_filter = mt76x02_configure_filter,
- .bss_info_changed = mt76x2_bss_info_changed,
- .sta_add = mt76x02_sta_add,
- .sta_remove = mt76x02_sta_remove,
+ .bss_info_changed = mt76x02_bss_info_changed,
+ .sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
.conf_tx = mt76x02_conf_tx,
- .sw_scan_start = mt76x2_sw_scan,
- .sw_scan_complete = mt76x2_sw_scan_complete,
+ .sw_scan_start = mt76x02_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
.flush = mt76x2_flush,
.ampdu_action = mt76x02_ampdu_action,
- .get_txpower = mt76x2_get_txpower,
+ .get_txpower = mt76x02_get_txpower,
.wake_tx_queue = mt76_wake_tx_queue,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.release_buffered_frames = mt76_release_buffered_frames,
- .set_coverage_class = mt76x2_set_coverage_class,
+ .set_coverage_class = mt76x02_set_coverage_class,
.get_survey = mt76_get_survey,
.set_tim = mt76x2_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
- .set_rts_threshold = mt76x2_set_rts_threshold,
+ .set_rts_threshold = mt76x02_set_rts_threshold,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index d8fa9ba56437..03e24ae7f66c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -168,7 +168,6 @@ error:
int mt76x2_mcu_init(struct mt76x02_dev *dev)
{
static const struct mt76_mcu_ops mt76x2_mcu_ops = {
- .mcu_msg_alloc = mt76x02_mcu_msg_alloc,
.mcu_send_msg = mt76x02_mcu_msg_send,
};
int ret;
@@ -183,6 +182,6 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- mt76x02_mcu_function_select(dev, Q_SELECT, 1, true);
+ mt76x02_mcu_function_select(dev, Q_SELECT, 1);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
index 5bda44540225..da7cd40f56ff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
@@ -38,7 +38,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev)
if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
dev->cal.tssi_cal_done = true;
return true;
}
@@ -62,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_stop(dev, false);
if (is_5ghz)
- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true);
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
if (!mac_stopped)
mt76x2_mac_resume(dev);
@@ -124,96 +124,6 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev)
mt76_wr(dev, MT_BBP(AGC, 0), val);
}
-static void
-mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
-{
- u32 val;
- u8 gain_val[2];
-
- gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
- gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
-
- if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
- val = 0x1e42 << 16;
- else
- val = 0x1836 << 16;
-
- val |= 0xf8;
-
- mt76_wr(dev, MT_BBP(AGC, 8),
- val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
- mt76_wr(dev, MT_BBP(AGC, 9),
- val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
-
- if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
- mt76x2_dfs_adjust_agc(dev);
-}
-
-static void
-mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
-{
- u8 *gain = dev->cal.agc_gain_init;
- u8 low_gain_delta, gain_delta;
- bool gain_change;
- int low_gain;
- u32 val;
-
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
-
- low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
- (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
-
- gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2);
- dev->cal.low_gain = low_gain;
-
- if (!gain_change) {
- if (mt76x02_phy_adjust_vga_gain(dev))
- mt76x2_phy_set_gain_val(dev);
- return;
- }
-
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
- mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
- val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
- if (low_gain == 2)
- val |= 0x3;
- else
- val |= 0x5;
- mt76_wr(dev, MT_BBP(AGC, 26), val);
- } else {
- mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
- }
-
- if (mt76x2_has_ext_lna(dev))
- low_gain_delta = 10;
- else
- low_gain_delta = 14;
-
- if (low_gain == 2) {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
- mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
- mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
- gain_delta = low_gain_delta;
- dev->cal.agc_gain_adjust = 0;
- } else {
- mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
- if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
- mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
- else
- mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
- mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
- gain_delta = 0;
- dev->cal.agc_gain_adjust = low_gain_delta;
- }
-
- dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
- dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
- mt76x2_phy_set_gain_val(dev);
-
- /* clear false CCA counters */
- mt76_rr(dev, MT_RX_STAT_1);
-}
-
int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
struct cfg80211_chan_def *chandef)
{
@@ -313,14 +223,14 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev,
u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
}
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
dev->cal.init_cal_done = true;
@@ -384,7 +294,7 @@ void mt76x2_phy_calibrate(struct work_struct *work)
dev = container_of(work, struct mt76x02_dev, cal_work.work);
mt76x2_phy_channel_calibrate(dev, false);
- mt76x2_phy_tssi_compensate(dev, true);
+ mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_temp_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
@@ -395,7 +305,7 @@ int mt76x2_phy_start(struct mt76x02_dev *dev)
{
int ret;
- ret = mt76x02_mcu_set_radio_state(dev, true, true);
+ ret = mt76x02_mcu_set_radio_state(dev, true);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
deleted file mode 100644
index 3a2ec86d3e88..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "mt76x2.h"
-
-struct beacon_bc_data {
- struct mt76x02_dev *dev;
- struct sk_buff_head q;
- struct sk_buff *tail[8];
-};
-
-static void
-mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = (struct mt76x02_dev *) priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
- struct sk_buff *skb = NULL;
-
- if (!(dev->beacon_mask & BIT(mvif->idx)))
- return;
-
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
- if (!skb)
- return;
-
- mt76x2_mac_set_beacon(dev, mvif->idx, skb);
-}
-
-static void
-mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct beacon_bc_data *data = priv;
- struct mt76x02_dev *dev = data->dev;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
- struct ieee80211_tx_info *info;
- struct sk_buff *skb;
-
- if (!(dev->beacon_mask & BIT(mvif->idx)))
- return;
-
- skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
- if (!skb)
- return;
-
- info = IEEE80211_SKB_CB(skb);
- info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
- mt76_skb_set_moredata(skb, true);
- __skb_queue_tail(&data->q, skb);
- data->tail[mvif->idx] = skb;
-}
-
-static void
-mt76x2_resync_beacon_timer(struct mt76x02_dev *dev)
-{
- u32 timer_val = dev->beacon_int << 4;
-
- dev->tbtt_count++;
-
- /*
- * Beacon timer drifts by 1us every tick, the timer is configured
- * in 1/16 TU (64us) units.
- */
- if (dev->tbtt_count < 62)
- return;
-
- if (dev->tbtt_count >= 64) {
- dev->tbtt_count = 0;
- return;
- }
-
- /*
- * The updated beacon interval takes effect after two TBTT, because
- * at this point the original interval has already been loaded into
- * the next TBTT_TIMER value
- */
- if (dev->tbtt_count == 62)
- timer_val -= 1;
-
- mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
- MT_BEACON_TIME_CFG_INTVAL, timer_val);
-}
-
-void mt76x2_pre_tbtt_tasklet(unsigned long arg)
-{
- struct mt76x02_dev *dev = (struct mt76x02_dev *) arg;
- struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
- struct beacon_bc_data data = {};
- struct sk_buff *skb;
- int i, nframes;
-
- mt76x2_resync_beacon_timer(dev);
-
- data.dev = dev;
- __skb_queue_head_init(&data.q);
-
- ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76x2_update_beacon_iter, dev);
-
- do {
- nframes = skb_queue_len(&data.q);
- ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76x2_add_buffered_bc, &data);
- } while (nframes != skb_queue_len(&data.q));
-
- if (!nframes)
- return;
-
- for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
- if (!data.tail[i])
- continue;
-
- mt76_skb_set_moredata(data.tail[i], false);
- }
-
- spin_lock_bh(&q->lock);
- while ((skb = __skb_dequeue(&data.q)) != NULL) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_vif *vif = info->control.vif;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv;
-
- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
- NULL);
- }
- spin_unlock_bh(&q->lock);
-}
-
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index e9fff5b7f125..c9634a774705 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -210,7 +210,7 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay);
-void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
+void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
struct mt76x2_tx_power_info txp;
@@ -245,8 +245,99 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait)
return;
usleep_range(10000, 20000);
- mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value);
dev->cal.dpd_cal_done = true;
}
}
EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate);
+
+static void
+mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
+{
+ u32 val;
+ u8 gain_val[2];
+
+ gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
+ gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
+
+ if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+ val = 0x1e42 << 16;
+ else
+ val = 0x1836 << 16;
+
+ val |= 0xf8;
+
+ mt76_wr(dev, MT_BBP(AGC, 8),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0]));
+ mt76_wr(dev, MT_BBP(AGC, 9),
+ val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1]));
+
+ if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR)
+ mt76x02_phy_dfs_adjust_agc(dev);
+}
+
+void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
+{
+ u8 *gain = dev->cal.agc_gain_init;
+ u8 low_gain_delta, gain_delta;
+ bool gain_change;
+ int low_gain;
+ u32 val;
+
+ dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
+
+ low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) +
+ (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev));
+
+ gain_change = dev->cal.low_gain < 0 ||
+ (dev->cal.low_gain & 2) ^ (low_gain & 2);
+ dev->cal.low_gain = low_gain;
+
+ if (!gain_change) {
+ if (mt76x02_phy_adjust_vga_gain(dev))
+ mt76x2_phy_set_gain_val(dev);
+ return;
+ }
+
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) {
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211);
+ val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf;
+ if (low_gain == 2)
+ val |= 0x3;
+ else
+ val |= 0x5;
+ mt76_wr(dev, MT_BBP(AGC, 26), val);
+ } else {
+ mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423);
+ }
+
+ if (mt76x2_has_ext_lna(dev))
+ low_gain_delta = 10;
+ else
+ low_gain_delta = 14;
+
+ if (low_gain == 2) {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808);
+ gain_delta = low_gain_delta;
+ dev->cal.agc_gain_adjust = 0;
+ } else {
+ mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
+ if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
+ else
+ mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
+ mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
+ gain_delta = 0;
+ dev->cal.agc_gain_adjust = low_gain_delta;
+ }
+
+ dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
+ dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
+ mt76x2_phy_set_gain_val(dev);
+
+ /* clear false CCA counters */
+ mt76_rr(dev, MT_RX_STAT_1);
+}
+EXPORT_SYMBOL_GPL(mt76x2_phy_update_channel_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index 57baf8d1c830..4d1788eb3812 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -131,8 +131,8 @@ err:
}
MODULE_DEVICE_TABLE(usb, mt76x2u_device_table);
-MODULE_FIRMWARE(MT7662U_FIRMWARE);
-MODULE_FIRMWARE(MT7662U_ROM_PATCH);
+MODULE_FIRMWARE(MT7662_FIRMWARE);
+MODULE_FIRMWARE(MT7662_ROM_PATCH);
static struct usb_driver mt76x2u_driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 13cce2937573..0be3784f44fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -141,6 +141,8 @@ struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
+ .sta_add = mt76x02_sta_add,
+ .sta_remove = mt76x02_sta_remove,
};
struct mt76x02_dev *dev;
struct mt76_dev *mdev;
@@ -156,21 +158,9 @@ struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev)
return dev;
}
-static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev)
-{
- mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800);
- mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820);
- mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840);
- mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860);
-}
-
int mt76x2u_init_hardware(struct mt76x02_dev *dev)
{
- const struct mt76_wcid_addr addr = {
- .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
- .ba_mask = 0,
- };
- int i, err;
+ int i, k, err;
mt76x2_reset_wlan(dev, true);
mt76x2u_power_on(dev);
@@ -191,9 +181,6 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
if (!mt76x02_wait_for_mac(&dev->mt76))
return -ETIMEDOUT;
- mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0);
- mt76_wr(dev, MT_TSO_CTRL, 0);
-
mt76x2u_init_dma(dev);
err = mt76x2u_mcu_init(dev);
@@ -207,21 +194,18 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR);
dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG);
- mt76x2u_init_beacon_offsets(dev);
-
if (!mt76x02_wait_for_txrx_idle(&dev->mt76))
return -ETIMEDOUT;
/* reset wcid table */
- for (i = 0; i < 254; i++)
- mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr,
- sizeof(struct mt76_wcid_addr));
+ for (i = 0; i < 256; i++)
+ mt76x02_mac_wcid_setup(dev, i, 0, NULL);
/* reset shared key table and pairwise key table */
- for (i = 0; i < 4; i++)
- mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0);
- for (i = 0; i < 256; i++)
- mt76_wr(dev, MT_WCID_ATTR(i), 1);
+ for (i = 0; i < 16; i++) {
+ for (k = 0; k < 4; k++)
+ mt76x02_mac_shared_key_setup(dev, i, k, NULL);
+ }
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_TIMER_EN |
@@ -245,11 +229,10 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
int mt76x2u_register_device(struct mt76x02_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
- struct wiphy *wiphy = hw->wiphy;
int err;
INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate);
- mt76x2_init_device(dev);
+ mt76x02_init_device(dev);
err = mt76x2u_init_eeprom(dev);
if (err < 0)
@@ -267,8 +250,6 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
if (err < 0)
goto fail;
- wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
-
err = mt76_register_device(&dev->mt76, true, mt76x02_rates,
ARRAY_SIZE(mt76x02_rates));
if (err)
@@ -282,7 +263,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
- mt76x2_init_debugfs(dev);
+ mt76x02_init_debugfs(dev);
mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband);
@@ -297,12 +278,13 @@ void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{
mt76u_stop_stat_wk(&dev->mt76);
cancel_delayed_work_sync(&dev->cal_work);
+ cancel_delayed_work_sync(&dev->mac_work);
mt76x2u_mac_stop(dev);
}
void mt76x2u_cleanup(struct mt76x02_dev *dev)
{
- mt76x02_mcu_set_radio_state(dev, false, false);
+ mt76x02_mcu_set_radio_state(dev, false);
mt76x2u_stop_hw(dev);
mt76u_queues_deinit(&dev->mt76);
mt76u_mcu_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 1971a1b00038..2b48cc51a30d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -27,6 +27,8 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
if (ret)
goto out;
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
out:
@@ -48,11 +50,12 @@ static int mt76x2u_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt76x02_dev *dev = hw->priv;
+ unsigned int idx = 8;
if (!ether_addr_equal(dev->mt76.macaddr, vif->addr))
mt76x02_mac_setaddr(dev, vif->addr);
- mt76x02_vif_init(dev, vif, 0);
+ mt76x02_vif_init(dev, vif, idx);
return 0;
}
@@ -81,29 +84,6 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
return err;
}
-static void
-mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info, u32 changed)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
-
- if (changed & BSS_CHANGED_ASSOC) {
- mt76x2u_phy_channel_calibrate(dev);
- mt76x2_apply_gain_adj(dev);
- }
-
- if (changed & BSS_CHANGED_BSSID) {
- mt76_wr(dev, MT_MAC_BSSID_DW0,
- get_unaligned_le32(info->bssid));
- mt76_wr(dev, MT_MAC_BSSID_DW1,
- get_unaligned_le16(info->bssid + 4));
- }
-
- mutex_unlock(&dev->mt76.mutex);
-}
-
static int
mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
{
@@ -141,39 +121,22 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
return err;
}
-static void
-mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- const u8 *mac)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- set_bit(MT76_SCANNING, &dev->mt76.state);
-}
-
-static void
-mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- clear_bit(MT76_SCANNING, &dev->mt76.state);
-}
-
const struct ieee80211_ops mt76x2u_ops = {
.tx = mt76x02_tx,
.start = mt76x2u_start,
.stop = mt76x2u_stop,
.add_interface = mt76x2u_add_interface,
.remove_interface = mt76x02_remove_interface,
- .sta_add = mt76x02_sta_add,
- .sta_remove = mt76x02_sta_remove,
+ .sta_state = mt76_sta_state,
.set_key = mt76x02_set_key,
.ampdu_action = mt76x02_ampdu_action,
.config = mt76x2u_config,
.wake_tx_queue = mt76_wake_tx_queue,
- .bss_info_changed = mt76x2u_bss_info_changed,
+ .bss_info_changed = mt76x02_bss_info_changed,
.configure_filter = mt76x02_configure_filter,
.conf_tx = mt76x02_conf_tx,
- .sw_scan_start = mt76x2u_sw_scan,
- .sw_scan_complete = mt76x2u_sw_scan_complete,
+ .sw_scan_start = mt76x02_sw_scan,
+ .sw_scan_complete = mt76x02_sw_scan_complete,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
+ .get_txpower = mt76x02_get_txpower,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
index 3f1e558e5e6d..45a95ee3a415 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c
@@ -29,30 +29,6 @@
#define MT76U_MCU_DLM_OFFSET 0x110000
#define MT76U_MCU_ROM_PATCH_OFFSET 0x90000
-int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap,
- bool ext, int rssi, u32 false_cca)
-{
- struct {
- __le32 channel;
- __le32 rssi_val;
- __le32 false_cca_val;
- } __packed __aligned(4) msg = {
- .rssi_val = cpu_to_le32(rssi),
- .false_cca_val = cpu_to_le32(false_cca),
- };
- struct sk_buff *skb;
- u32 val = channel;
-
- if (ap)
- val |= BIT(31);
- if (ext)
- val |= BIT(30);
- msg.channel = cpu_to_le32(val);
-
- skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg));
- return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true);
-}
-
static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev)
{
mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE,
@@ -117,7 +93,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev)
return 0;
}
- err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev);
+ err = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev);
if (err < 0)
return err;
@@ -183,7 +159,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev)
int err, len, ilm_len, dlm_len;
const struct firmware *fw;
- err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev);
+ err = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev);
if (err < 0)
return err;
@@ -282,9 +258,9 @@ int mt76x2u_mcu_init(struct mt76x02_dev *dev)
{
int err;
- err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false);
+ err = mt76x02_mcu_function_select(dev, Q_SELECT, 1);
if (err < 0)
return err;
- return mt76x02_mcu_set_radio_state(dev, true, false);
+ return mt76x02_mcu_set_radio_state(dev, true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
index ca96ba60510e..11d414d86c68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
@@ -18,63 +18,35 @@
#include "eeprom.h"
#include "../mt76x02_phy.h"
-void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev)
+static void
+mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
{
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
bool is_5ghz = chan->band == NL80211_BAND_5GHZ;
+ if (dev->cal.channel_cal_done)
+ return;
+
if (mt76x2_channel_silent(dev))
return;
- mt76x2u_mac_stop(dev);
+ if (!mac_stopped)
+ mt76x2u_mac_stop(dev);
if (is_5ghz)
- mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false);
-
- mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false);
-
- mt76x2u_mac_resume(dev);
-}
+ mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0);
-static void
-mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev)
-{
- u8 channel = dev->mt76.chandef.chan->hw_value;
- int freq, freq1;
- u32 false_cca;
-
- freq = dev->mt76.chandef.chan->center_freq;
- freq1 = dev->mt76.chandef.center_freq1;
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0);
- switch (dev->mt76.chandef.width) {
- case NL80211_CHAN_WIDTH_80: {
- int ch_group_index;
+ if (!mac_stopped)
+ mt76x2u_mac_resume(dev);
+ mt76x2_apply_gain_adj(dev);
- ch_group_index = (freq - freq1 + 30) / 20;
- if (WARN_ON(ch_group_index < 0 || ch_group_index > 3))
- ch_group_index = 0;
- channel += 6 - ch_group_index * 4;
- break;
- }
- case NL80211_CHAN_WIDTH_40:
- if (freq1 > freq)
- channel += 2;
- else
- channel -= 2;
- break;
- default:
- break;
- }
-
- dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev);
- false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS,
- mt76_rr(dev, MT_RX_STAT_1));
-
- mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false,
- dev->cal.avg_rssi_all, false_cca);
+ dev->cal.channel_cal_done = true;
}
void mt76x2u_phy_calibrate(struct work_struct *work)
@@ -82,8 +54,9 @@ void mt76x2u_phy_calibrate(struct work_struct *work)
struct mt76x02_dev *dev;
dev = container_of(work, struct mt76x02_dev, cal_work.work);
- mt76x2_phy_tssi_compensate(dev, false);
- mt76x2u_phy_update_channel_gain(dev);
+ mt76x2u_phy_channel_calibrate(dev, false);
+ mt76x2_phy_tssi_compensate(dev);
+ mt76x2_phy_update_channel_gain(dev);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
@@ -180,14 +153,14 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT);
if (val != 0xff)
- mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0);
}
- mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel);
/* Rx LPF calibration */
if (!dev->cal.init_cal_done)
- mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0);
dev->cal.init_cal_done = true;
mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2);
@@ -202,6 +175,9 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
if (scan)
return 0;
+ mt76x2u_phy_channel_calibrate(dev, true);
+ mt76x02_init_agc_gain(dev);
+
if (mt76x2_tssi_enabled(dev)) {
/* init default values for temp compensation */
mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP,
@@ -219,7 +195,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev,
flag |= BIT(0);
if (mt76x02_ext_pa_enabled(dev, chan->band))
flag |= BIT(8);
- mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false);
+ mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag);
dev->cal.tssi_cal_done = true;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 7cbce03aa65b..7b711058807d 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -104,6 +104,157 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
}
void
+mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __acquires(&dev->status_list.lock)
+{
+ __skb_queue_head_init(list);
+ spin_lock_bh(&dev->status_list.lock);
+ __acquire(&dev->status_list.lock);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
+
+void
+mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
+ __releases(&dev->status_list.unlock)
+{
+ struct sk_buff *skb;
+
+ spin_unlock_bh(&dev->status_list.lock);
+ __release(&dev->status_list.unlock);
+
+ while ((skb = __skb_dequeue(list)) != NULL)
+ ieee80211_tx_status(dev->hw, skb);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
+
+static void
+__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
+ struct sk_buff_head *list)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
+
+ flags |= cb->flags;
+ cb->flags = flags;
+
+ if ((flags & done) != done)
+ return;
+
+ __skb_unlink(skb, &dev->status_list);
+
+ /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
+ if (flags & MT_TX_CB_TXS_FAILED) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ __skb_queue_tail(list, skb);
+}
+
+void
+mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
+ struct sk_buff_head *list)
+{
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
+
+int
+mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ int pid;
+
+ if (!wcid)
+ return 0;
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ return MT_PACKET_ID_NO_ACK;
+
+ if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
+ IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
+ return 0;
+
+ spin_lock_bh(&dev->status_list.lock);
+
+ memset(cb, 0, sizeof(*cb));
+ wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
+ if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK)
+ wcid->packet_id = 1;
+
+ pid = wcid->packet_id;
+ cb->wcid = wcid->idx;
+ cb->pktid = pid;
+ cb->jiffies = jiffies;
+
+ __skb_queue_tail(&dev->status_list, skb);
+ spin_unlock_bh(&dev->status_list.lock);
+
+ return pid;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
+
+struct sk_buff *
+mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
+ struct sk_buff_head *list)
+{
+ struct sk_buff *skb, *tmp;
+
+ if (pktid == MT_PACKET_ID_NO_ACK)
+ return NULL;
+
+ skb_queue_walk_safe(&dev->status_list, skb, tmp) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+
+ if (wcid && cb->wcid != wcid->idx)
+ continue;
+
+ if (cb->pktid == pktid)
+ return skb;
+
+ if (!pktid &&
+ !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
+ continue;
+
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
+ MT_TX_CB_TXS_DONE, list);
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
+
+void
+mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
+{
+ struct sk_buff_head list;
+
+ mt76_tx_status_lock(dev, &list);
+ mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
+ mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_status_check);
+
+void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
+{
+ struct sk_buff_head list;
+
+ if (!skb->prev) {
+ ieee80211_free_txskb(dev->hw, skb);
+ return;
+ }
+
+ mt76_tx_status_lock(dev, &list);
+ __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
+ mt76_tx_status_unlock(dev, &list);
+}
+EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+
+void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
@@ -400,7 +551,12 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ continue;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&mtxq->hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
@@ -439,7 +595,7 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
spin_lock_bh(&hwq->lock);
if (!list_empty(&mtxq->list))
- list_del(&mtxq->list);
+ list_del_init(&mtxq->list);
spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 5f0faf07c346..b061263453d4 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -100,7 +100,7 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
return data;
}
-u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
{
u32 ret;
@@ -110,7 +110,6 @@ u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
return ret;
}
-EXPORT_SYMBOL_GPL(mt76u_rr);
/* should be called with usb_ctrl_mtx locked */
static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
@@ -136,13 +135,12 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
trace_usb_reg_wr(dev, addr, val);
}
-void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
mutex_lock(&dev->usb.usb_ctrl_mtx);
__mt76u_wr(dev, addr, val);
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
-EXPORT_SYMBOL_GPL(mt76u_wr);
static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
u32 mask, u32 val)
@@ -356,6 +354,7 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
complete_fn, context);
+ trace_submit_urb(dev, buf->urb);
return usb_submit_urb(buf->urb, gfp);
}
@@ -442,6 +441,8 @@ static void mt76u_complete_rx(struct urb *urb)
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
unsigned long flags;
+ trace_rx_urb(dev, urb);
+
switch (urb->status) {
case -ECONNRESET:
case -ESHUTDOWN:
@@ -699,6 +700,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
if (q->queued == q->ndesc)
return -ENOSPC;
+ skb->prev = skb->next = NULL;
err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
if (err < 0)
return err;
@@ -728,6 +730,8 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
while (q->first != q->tail) {
buf = &q->entry[q->first].ubuf;
+
+ trace_submit_urb(dev, buf->urb);
err = usb_submit_urb(buf->urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h
index 52db7012304a..b56c32343eb1 100644
--- a/drivers/net/wireless/mediatek/mt76/usb_trace.h
+++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h
@@ -26,12 +26,12 @@
#define MAXNAME 32
#define DEV_ENTRY __array(char, wiphy_name, 32)
#define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME)
-#define DEV_PR_FMT "%s"
+#define DEV_PR_FMT "%s "
#define DEV_PR_ARG __entry->wiphy_name
#define REG_ENTRY __field(u32, reg) __field(u32, val)
#define REG_ASSIGN __entry->reg = reg; __entry->val = val
-#define REG_PR_FMT " %04x=%08x"
+#define REG_PR_FMT "reg:0x%04x=0x%08x"
#define REG_PR_ARG __entry->reg, __entry->val
DECLARE_EVENT_CLASS(dev_reg_evt,
@@ -61,6 +61,31 @@ DEFINE_EVENT(dev_reg_evt, usb_reg_wr,
TP_ARGS(dev, reg, val)
);
+DECLARE_EVENT_CLASS(urb_transfer,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u),
+ TP_STRUCT__entry(
+ DEV_ENTRY __field(unsigned, pipe) __field(u32, len)
+ ),
+ TP_fast_assign(
+ DEV_ASSIGN;
+ __entry->pipe = u->pipe;
+ __entry->len = u->transfer_buffer_length;
+ ),
+ TP_printk(DEV_PR_FMT "p:%08x len:%u",
+ DEV_PR_ARG, __entry->pipe, __entry->len)
+);
+
+DEFINE_EVENT(urb_transfer, submit_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u)
+);
+
+DEFINE_EVENT(urb_transfer, rx_urb,
+ TP_PROTO(struct mt76_dev *dev, struct urb *u),
+ TP_ARGS(dev, u)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index b8c12a5f16b4..6cf5202c3666 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -1,11 +1,11 @@
config QTNFMAC
tristate
- depends on QTNFMAC_PEARL_PCIE
- default m if QTNFMAC_PEARL_PCIE=m
- default y if QTNFMAC_PEARL_PCIE=y
+ depends on QTNFMAC_PCIE
+ default m if QTNFMAC_PCIE=m
+ default y if QTNFMAC_PCIE=y
-config QTNFMAC_PEARL_PCIE
- tristate "Quantenna QSR10g PCIe support"
+config QTNFMAC_PCIE
+ tristate "Quantenna QSR1000/QSR2000/QSR10g PCIe support"
default n
depends on PCI && CFG80211
select QTNFMAC
@@ -13,7 +13,8 @@ config QTNFMAC_PEARL_PCIE
select CRC32
help
This option adds support for wireless adapters based on Quantenna
- 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
+ 802.11ac QSR10g (aka Pearl) and QSR1000/QSR2000 (aka Topaz)
+ FullMAC chipsets running over PCIe.
If you choose to build it as a module, two modules will be built:
- qtnfmac.ko and qtnfmac_pearl_pcie.ko.
+ qtnfmac.ko and qtnfmac_pcie.ko.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile
index 17cd7adb4109..40dffbd2ea47 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Makefile
+++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile
@@ -19,11 +19,12 @@ qtnfmac-objs += \
#
-obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o
+obj-$(CONFIG_QTNFMAC_PCIE) += qtnfmac_pcie.o
-qtnfmac_pearl_pcie-objs += \
+qtnfmac_pcie-objs += \
shm_ipc.o \
pcie/pcie.o \
- pcie/pearl_pcie.o
+ pcie/pearl_pcie.o \
+ pcie/topaz_pcie.o
-qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o
+qtnfmac_pcie-$(CONFIG_DEBUG_FS) += debug.o
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index bfdc1ad30c13..659e7649fe22 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -84,7 +84,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
size_t *var_resp_size)
{
struct qlink_cmd *cmd;
- const struct qlink_resp *resp;
+ struct qlink_resp *resp = NULL;
struct sk_buff *resp_skb = NULL;
u16 cmd_id;
u8 mac_id;
@@ -113,7 +113,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
if (ret)
goto out;
- resp = (const struct qlink_resp *)resp_skb->data;
+ if (WARN_ON(!resp_skb || !resp_skb->data)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ resp = (struct qlink_resp *)resp_skb->data;
ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id,
const_resp_size);
if (ret)
@@ -686,7 +691,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_sta_info *cmd;
const struct qlink_resp_get_sta_info *resp;
- size_t var_resp_len;
+ size_t var_resp_len = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
@@ -1650,7 +1655,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
const struct qlink_resp_get_mac_info *resp;
- size_t var_data_len;
+ size_t var_data_len = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
@@ -1680,8 +1685,8 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
const struct qlink_resp_get_hw_info *resp;
+ size_t info_len = 0;
int ret = 0;
- size_t info_len;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
QLINK_CMD_GET_HW_INFO,
@@ -1709,9 +1714,9 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
struct ieee80211_supported_band *band)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
- size_t info_len;
struct qlink_cmd_band_info_get *cmd;
struct qlink_resp_band_info_get *resp;
+ size_t info_len = 0;
int ret = 0;
u8 qband;
@@ -1764,8 +1769,8 @@ out:
int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb, *resp_skb = NULL;
- size_t response_size;
struct qlink_resp_phy_params *resp;
+ size_t response_size = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
@@ -2431,7 +2436,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct sk_buff *cmd_skb, *resp_skb = NULL;
struct qlink_cmd_get_chan_stats *cmd;
struct qlink_resp_get_chan_stats *resp;
- size_t var_data_len;
+ size_t var_data_len = 0;
int ret = 0;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 16795dbe475b..c3a32effa6f0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */
+#include <linux/module.h>
#include <linux/printk.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -15,14 +16,37 @@
#include "shm_ipc.h"
#include "core.h"
#include "debug.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) "qtnf_pcie: %s: " fmt, __func__
+#include "util.h"
+#include "qtn_hw_ids.h"
#define QTN_SYSCTL_BAR 0
#define QTN_SHMEM_BAR 2
#define QTN_DMA_BAR 3
+#define QTN_PCIE_MAX_FW_BUFSZ (1 * 1024 * 1024)
+
+static bool use_msi = true;
+module_param(use_msi, bool, 0644);
+MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
+
+static unsigned int tx_bd_size_param;
+module_param(tx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size");
+
+static unsigned int rx_bd_size_param = 256;
+module_param(rx_bd_size_param, uint, 0644);
+MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size");
+
+static u8 flashboot = 1;
+module_param(flashboot, byte, 0644);
+MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
+
+static unsigned int fw_blksize_param = QTN_PCIE_MAX_FW_BUFSZ;
+module_param(fw_blksize_param, uint, 0644);
+MODULE_PARM_DESC(fw_blksize_param, "firmware loading block size in bytes");
+
+#define DRV_NAME "qtnfmac_pcie"
+
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
{
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
@@ -58,7 +82,7 @@ int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv)
return 0;
}
-void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus)
+static void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus)
{
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
struct pci_dev *pdev = priv->pdev;
@@ -72,7 +96,7 @@ static int qtnf_dbg_mps_show(struct seq_file *s, void *data)
struct qtnf_bus *bus = dev_get_drvdata(s->private);
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
- seq_printf(s, "%d\n", priv->mps);
+ seq_printf(s, "%d\n", pcie_get_mps(priv->pdev));
return 0;
}
@@ -104,8 +128,7 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
return 0;
}
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
- const char *drv_name)
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success)
{
struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
struct pci_dev *pdev = priv->pdev;
@@ -122,7 +145,7 @@ void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
}
if (boot_success) {
- qtnf_debugfs_init(bus, drv_name);
+ qtnf_debugfs_init(bus, DRV_NAME);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
@@ -133,9 +156,8 @@ void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
put_device(&pdev->dev);
}
-static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
+static void qtnf_tune_pcie_mps(struct pci_dev *pdev)
{
- struct pci_dev *pdev = priv->pdev;
struct pci_dev *parent;
int mps_p, mps_o, mps_m, mps;
int ret;
@@ -163,12 +185,10 @@ static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv)
if (ret) {
pr_err("failed to set mps to %d, keep using current %d\n",
mps, mps_o);
- priv->mps = mps_o;
return;
}
pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m);
- priv->mps = mps;
}
static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
@@ -194,20 +214,20 @@ static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi)
}
}
-static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
+static void __iomem *qtnf_map_bar(struct pci_dev *pdev, u8 index)
{
void __iomem *vaddr;
dma_addr_t busaddr;
size_t len;
int ret;
- ret = pcim_iomap_regions(priv->pdev, 1 << index, "qtnfmac_pcie");
+ ret = pcim_iomap_regions(pdev, 1 << index, "qtnfmac_pcie");
if (ret)
return IOMEM_ERR_PTR(ret);
- busaddr = pci_resource_start(priv->pdev, index);
- len = pci_resource_len(priv->pdev, index);
- vaddr = pcim_iomap_table(priv->pdev)[index];
+ busaddr = pci_resource_start(pdev, index);
+ len = pci_resource_len(pdev, index);
+ vaddr = pcim_iomap_table(pdev)[index];
if (!vaddr)
return IOMEM_ERR_PTR(-ENOMEM);
@@ -217,31 +237,6 @@ static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index)
return vaddr;
}
-static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv)
-{
- int ret = -ENOMEM;
-
- priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR);
- if (IS_ERR(priv->sysctl_bar)) {
- pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
- return ret;
- }
-
- priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR);
- if (IS_ERR(priv->dmareg_bar)) {
- pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
- return ret;
- }
-
- priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR);
- if (IS_ERR(priv->epmem_bar)) {
- pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
- return ret;
- }
-
- return 0;
-}
-
static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf,
size_t len)
{
@@ -282,27 +277,83 @@ void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
ipc_int, &rx_callback);
}
-int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
- const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
- bool use_msi)
+static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct qtnf_pcie_bus_priv *pcie_priv;
struct qtnf_bus *bus;
+ void __iomem *sysctl_bar;
+ void __iomem *epmem_bar;
+ void __iomem *dmareg_bar;
+ unsigned int chipid;
int ret;
- bus = devm_kzalloc(&pdev->dev,
- sizeof(*bus) + priv_size, GFP_KERNEL);
+ if (!pci_is_pcie(pdev)) {
+ pr_err("device %s is not PCI Express\n", pci_name(pdev));
+ return -EIO;
+ }
+
+ qtnf_tune_pcie_mps(pdev);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ pr_err("failed to init PCI device %x\n", pdev->device);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR);
+ if (IS_ERR(sysctl_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
+ return ret;
+ }
+
+ dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR);
+ if (IS_ERR(dmareg_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
+ return ret;
+ }
+
+ epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR);
+ if (IS_ERR(epmem_bar)) {
+ pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
+ return ret;
+ }
+
+ chipid = qtnf_chip_id_get(sysctl_bar);
+
+ pr_info("identified device: %s\n", qtnf_chipid_to_string(chipid));
+
+ switch (chipid) {
+ case QTN_CHIP_ID_PEARL:
+ case QTN_CHIP_ID_PEARL_B:
+ case QTN_CHIP_ID_PEARL_C:
+ bus = qtnf_pcie_pearl_alloc(pdev);
+ break;
+ case QTN_CHIP_ID_TOPAZ:
+ bus = qtnf_pcie_topaz_alloc(pdev);
+ break;
+ default:
+ pr_err("unsupported chip ID 0x%x\n", chipid);
+ return -ENOTSUPP;
+ }
+
if (!bus)
return -ENOMEM;
pcie_priv = get_bus_priv(bus);
-
pci_set_drvdata(pdev, bus);
- bus->bus_ops = bus_ops;
bus->dev = &pdev->dev;
bus->fw_state = QTNF_FW_STATE_RESET;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
+ pcie_priv->rx_bd_num = rx_bd_size_param;
+ pcie_priv->flashboot = flashboot;
+
+ if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ)
+ pcie_priv->fw_blksize = QTN_PCIE_MAX_FW_BUFSZ;
+ else
+ pcie_priv->fw_blksize = fw_blksize_param;
mutex_init(&bus->bus_lock);
spin_lock_init(&pcie_priv->tx_lock);
@@ -317,53 +368,35 @@ int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
pr_err("failed to alloc bus workqueue\n");
- ret = -ENODEV;
- goto err_init;
- }
-
- init_dummy_netdev(&bus->mux_dev);
-
- if (!pci_is_pcie(pdev)) {
- pr_err("device %s is not PCI Express\n", pci_name(pdev));
- ret = -EIO;
- goto err_base;
- }
-
- qtnf_tune_pcie_mps(pcie_priv);
-
- ret = pcim_enable_device(pdev);
- if (ret) {
- pr_err("failed to init PCI device %x\n", pdev->device);
- goto err_base;
- } else {
- pr_debug("successful init of PCI device %x\n", pdev->device);
+ return -ENODEV;
}
- ret = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ pcie_priv->dma_mask_get_cb());
if (ret) {
- pr_err("PCIE DMA coherent mask init failed\n");
- goto err_base;
+ pr_err("PCIE DMA coherent mask init failed 0x%llx\n",
+ pcie_priv->dma_mask_get_cb());
+ goto error;
}
- pci_set_master(pdev);
+ init_dummy_netdev(&bus->mux_dev);
qtnf_pcie_init_irq(pcie_priv, use_msi);
-
- ret = qtnf_pcie_init_memory(pcie_priv);
- if (ret < 0) {
- pr_err("PCIE memory init failed\n");
- goto err_base;
- }
-
+ pcie_priv->sysctl_bar = sysctl_bar;
+ pcie_priv->dmareg_bar = dmareg_bar;
+ pcie_priv->epmem_bar = epmem_bar;
pci_save_state(pdev);
+ ret = pcie_priv->probe_cb(bus, tx_bd_size_param);
+ if (ret)
+ goto error;
+
+ qtnf_pcie_bringup_fw_async(bus);
return 0;
-err_base:
+error:
flush_workqueue(pcie_priv->workqueue);
destroy_workqueue(pcie_priv->workqueue);
-err_init:
pci_set_drvdata(pdev, NULL);
-
return ret;
}
@@ -373,8 +406,17 @@ static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv)
qtnf_shm_ipc_free(&priv->shm_ipc_ep_out);
}
-void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv)
+static void qtnf_pcie_remove(struct pci_dev *dev)
{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(dev);
+ if (!bus)
+ return;
+
+ priv = get_bus_priv(bus);
+
cancel_work_sync(&bus->fw_work);
if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
@@ -388,5 +430,77 @@ void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv)
qtnf_pcie_free_shm_ipc(priv);
qtnf_debugfs_remove(bus);
+ priv->remove_cb(bus);
pci_set_drvdata(priv->pdev, NULL);
}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_suspend(struct device *dev)
+{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(to_pci_dev(dev));
+ if (!bus)
+ return -EFAULT;
+
+ priv = get_bus_priv(bus);
+ return priv->suspend_cb(bus);
+}
+
+static int qtnf_pcie_resume(struct device *dev)
+{
+ struct qtnf_pcie_bus_priv *priv;
+ struct qtnf_bus *bus;
+
+ bus = pci_get_drvdata(to_pci_dev(dev));
+ if (!bus)
+ return -EFAULT;
+
+ priv = get_bus_priv(bus);
+ return priv->resume_cb(bus);
+}
+
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend,
+ qtnf_pcie_resume);
+#endif
+
+static const struct pci_device_id qtnf_pcie_devid_table[] = {
+ {
+ PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QSR,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ },
+ { },
+};
+
+MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+
+static struct pci_driver qtnf_pcie_drv_data = {
+ .name = DRV_NAME,
+ .id_table = qtnf_pcie_devid_table,
+ .probe = qtnf_pcie_probe,
+ .remove = qtnf_pcie_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &qtnf_pcie_pm_ops,
+ },
+#endif
+};
+
+static int __init qtnf_pcie_register(void)
+{
+ return pci_register_driver(&qtnf_pcie_drv_data);
+}
+
+static void __exit qtnf_pcie_exit(void)
+{
+ pci_unregister_driver(&qtnf_pcie_drv_data);
+}
+
+module_init(qtnf_pcie_register);
+module_exit(qtnf_pcie_exit);
+
+MODULE_AUTHOR("Quantenna Communications");
+MODULE_DESCRIPTION("Quantenna PCIe bus driver for 802.11 wireless LAN.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index 5c70fb4c0f92..bbc074e1f34d 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -23,9 +23,14 @@
struct qtnf_pcie_bus_priv {
struct pci_dev *pdev;
+ int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size);
+ void (*remove_cb)(struct qtnf_bus *bus);
+ int (*suspend_cb)(struct qtnf_bus *bus);
+ int (*resume_cb)(struct qtnf_bus *bus);
+ u64 (*dma_mask_get_cb)(void);
+
spinlock_t tx_reclaim_lock;
spinlock_t tx_lock;
- int mps;
struct workqueue_struct *workqueue;
struct tasklet_struct reclaim_tq;
@@ -43,6 +48,8 @@ struct qtnf_pcie_bus_priv {
struct sk_buff **tx_skb;
struct sk_buff **rx_skb;
+ unsigned int fw_blksize;
+
u32 rx_bd_w_index;
u32 rx_bd_r_index;
@@ -58,21 +65,18 @@ struct qtnf_pcie_bus_priv {
u8 msi_enabled;
u8 tx_stopped;
+ bool flashboot;
};
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
-void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus);
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success,
- const char *drv_name);
+void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success);
void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
const struct qtnf_shm_ipc_int *ipc_int);
-int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size,
- const struct qtnf_bus_ops *bus_ops, u64 dma_mask,
- bool use_msi);
-void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv);
+struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev);
+struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev);
static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 95c7b95c6f8a..1f5facbb8905 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -2,7 +2,6 @@
/* Copyright (c) 2018 Quantenna Communications */
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
@@ -24,23 +23,7 @@
#include "shm_ipc.h"
#include "debug.h"
-static bool use_msi = true;
-module_param(use_msi, bool, 0644);
-MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt");
-
-static unsigned int tx_bd_size_param = 32;
-module_param(tx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two");
-
-static unsigned int rx_bd_size_param = 256;
-module_param(rx_bd_size_param, uint, 0644);
-MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two");
-
-static u8 flashboot = 1;
-module_param(flashboot, byte, 0644);
-MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS");
-
-#define DRV_NAME "qtnfmac_pearl_pcie"
+#define PEARL_TX_BD_SIZE_DEFAULT 32
struct qtnf_pearl_bda {
__le16 bda_len;
@@ -415,30 +398,28 @@ static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps)
return 0;
}
-static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps)
+static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps,
+ unsigned int tx_bd_size)
{
struct qtnf_pcie_bus_priv *priv = &ps->base;
int ret;
u32 val;
- priv->tx_bd_num = tx_bd_size_param;
- priv->rx_bd_num = rx_bd_size_param;
- priv->rx_bd_w_index = 0;
- priv->rx_bd_r_index = 0;
+ if (tx_bd_size == 0)
+ tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT;
- if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) {
- pr_err("tx_bd_size_param %u is not power of two\n",
- priv->tx_bd_num);
- return -EINVAL;
- }
+ val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd);
- val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd);
- if (val > PCIE_HHBM_MAX_SIZE) {
- pr_err("tx_bd_size_param %u is too large\n",
- priv->tx_bd_num);
- return -EINVAL;
+ if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) {
+ pr_warn("bad tx_bd_size value %u\n", tx_bd_size);
+ priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT;
+ } else {
+ priv->tx_bd_num = tx_bd_size;
}
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) {
pr_err("rx_bd_size_param %u is not power of two\n",
priv->rx_bd_num);
@@ -1006,7 +987,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
const char *fwname = QTN_PCI_PEARL_FW_NAME;
bool fw_boot_success = false;
- if (flashboot) {
+ if (ps->base.flashboot) {
state |= QTN_RC_FW_FLASHBOOT;
} else {
ret = request_firmware(&fw, fwname, &pdev->dev);
@@ -1022,7 +1003,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
QTN_FW_DL_TIMEOUT_MS)) {
pr_err("card is not ready\n");
- if (!flashboot)
+ if (!ps->base.flashboot)
release_firmware(fw);
goto fw_load_exit;
@@ -1030,7 +1011,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY);
- if (flashboot) {
+ if (ps->base.flashboot) {
pr_info("booting firmware from flash\n");
} else {
@@ -1061,7 +1042,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
fw_boot_success = true;
fw_load_exit:
- qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME);
+ qtnf_pcie_fw_boot_done(bus, fw_boot_success);
if (fw_boot_success) {
qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
@@ -1077,74 +1058,34 @@ static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
qtnf_en_txdone_irq(ps);
}
-static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps)
+static u64 qtnf_pearl_dma_mask_get(void)
{
- unsigned int chipid;
-
- chipid = qtnf_chip_id_get(ps->base.sysctl_bar);
-
- switch (chipid) {
- case QTN_CHIP_ID_PEARL:
- case QTN_CHIP_ID_PEARL_B:
- case QTN_CHIP_ID_PEARL_C:
- pr_info("chip ID is 0x%x\n", chipid);
- break;
- default:
- pr_err("incorrect chip ID 0x%x\n", chipid);
- return -ENODEV;
- }
-
- return 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ return DMA_BIT_MASK(64);
+#else
+ return DMA_BIT_MASK(32);
+#endif
}
-static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size)
{
struct qtnf_shm_ipc_int ipc_int;
- struct qtnf_pcie_pearl_state *ps;
- struct qtnf_bus *bus;
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
+ struct pci_dev *pdev = ps->base.pdev;
int ret;
- u64 dma_mask;
-
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- dma_mask = DMA_BIT_MASK(64);
-#else
- dma_mask = DMA_BIT_MASK(32);
-#endif
-
- ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops,
- dma_mask, use_msi);
- if (ret)
- return ret;
-
- bus = pci_get_drvdata(pdev);
- ps = get_bus_priv(bus);
+ bus->bus_ops = &qtnf_pcie_pearl_bus_ops;
spin_lock_init(&ps->irq_lock);
-
- tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
- (unsigned long)ps);
- netif_napi_add(&bus->mux_dev, &bus->mux_napi,
- qtnf_pcie_pearl_rx_poll, 10);
INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler);
ps->pcie_reg_base = ps->base.dmareg_bar;
ps->bda = ps->base.epmem_bar;
writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled);
- ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
- ipc_int.arg = ps;
- qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
- &ps->bda->bda_shm_reg2, &ipc_int);
-
- ret = qtnf_pearl_check_chip_id(ps);
- if (ret)
- goto error;
-
- ret = qtnf_pcie_pearl_init_xfer(ps);
+ ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size);
if (ret) {
pr_err("PCIE xfer init failed\n");
- goto error;
+ return ret;
}
/* init default irq settings */
@@ -1155,95 +1096,63 @@ static int qtnf_pcie_pearl_probe(struct pci_dev *pdev,
ret = devm_request_irq(&pdev->dev, pdev->irq,
&qtnf_pcie_pearl_interrupt, 0,
- "qtnf_pcie_irq", (void *)bus);
+ "qtnf_pearl_irq", (void *)bus);
if (ret) {
pr_err("failed to request pcie irq %d\n", pdev->irq);
- goto err_xfer;
+ qtnf_pearl_free_xfer_buffers(ps);
+ return ret;
}
- qtnf_pcie_bringup_fw_async(bus);
-
- return 0;
+ tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
+ (unsigned long)ps);
+ netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+ qtnf_pcie_pearl_rx_poll, 10);
-err_xfer:
- qtnf_pearl_free_xfer_buffers(ps);
-error:
- qtnf_pcie_remove(bus, &ps->base);
+ ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
+ ipc_int.arg = ps;
+ qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1,
+ &ps->bda->bda_shm_reg2, &ipc_int);
- return ret;
+ return 0;
}
-static void qtnf_pcie_pearl_remove(struct pci_dev *pdev)
+static void qtnf_pcie_pearl_remove(struct qtnf_bus *bus)
{
- struct qtnf_pcie_pearl_state *ps;
- struct qtnf_bus *bus;
-
- bus = pci_get_drvdata(pdev);
- if (!bus)
- return;
-
- ps = get_bus_priv(bus);
+ struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus);
- qtnf_pcie_remove(bus, &ps->base);
qtnf_pearl_reset_ep(ps);
qtnf_pearl_free_xfer_buffers(ps);
}
#ifdef CONFIG_PM_SLEEP
-static int qtnf_pcie_pearl_suspend(struct device *dev)
+static int qtnf_pcie_pearl_suspend(struct qtnf_bus *bus)
{
return -EOPNOTSUPP;
}
-static int qtnf_pcie_pearl_resume(struct device *dev)
+static int qtnf_pcie_pearl_resume(struct qtnf_bus *bus)
{
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-
-#ifdef CONFIG_PM_SLEEP
-/* Power Management Hooks */
-static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend,
- qtnf_pcie_pearl_resume);
#endif
-static const struct pci_device_id qtnf_pcie_devid_table[] = {
- {
- PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- },
- { },
-};
+struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev)
+{
+ struct qtnf_bus *bus;
+ struct qtnf_pcie_pearl_state *ps;
-MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table);
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ps), GFP_KERNEL);
+ if (!bus)
+ return NULL;
-static struct pci_driver qtnf_pcie_pearl_drv_data = {
- .name = DRV_NAME,
- .id_table = qtnf_pcie_devid_table,
- .probe = qtnf_pcie_pearl_probe,
- .remove = qtnf_pcie_pearl_remove,
+ ps = get_bus_priv(bus);
+ ps->base.probe_cb = qtnf_pcie_pearl_probe;
+ ps->base.remove_cb = qtnf_pcie_pearl_remove;
+ ps->base.dma_mask_get_cb = qtnf_pearl_dma_mask_get;
#ifdef CONFIG_PM_SLEEP
- .driver = {
- .pm = &qtnf_pcie_pearl_pm_ops,
- },
+ ps->base.resume_cb = qtnf_pcie_pearl_resume;
+ ps->base.suspend_cb = qtnf_pcie_pearl_suspend;
#endif
-};
-
-static int __init qtnf_pcie_pearl_register(void)
-{
- pr_info("register Quantenna QSR10g FullMAC PCIE driver\n");
- return pci_register_driver(&qtnf_pcie_pearl_drv_data);
-}
-static void __exit qtnf_pcie_pearl_exit(void)
-{
- pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n");
- pci_unregister_driver(&qtnf_pcie_pearl_drv_data);
+ return bus;
}
-
-module_init(qtnf_pcie_pearl_register);
-module_exit(qtnf_pcie_pearl_exit);
-
-MODULE_AUTHOR("Quantenna Communications");
-MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
new file mode 100644
index 000000000000..598edb814421
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -0,0 +1,1219 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018 Quantenna Communications */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/circ_buf.h>
+
+#include "pcie_priv.h"
+#include "topaz_pcie_regs.h"
+#include "topaz_pcie_ipc.h"
+#include "qtn_hw_ids.h"
+#include "core.h"
+#include "bus.h"
+#include "shm_ipc.h"
+#include "debug.h"
+
+#define TOPAZ_TX_BD_SIZE_DEFAULT 128
+
+struct qtnf_topaz_tx_bd {
+ __le32 addr;
+ __le32 info;
+} __packed;
+
+struct qtnf_topaz_rx_bd {
+ __le32 addr;
+ __le32 info;
+} __packed;
+
+struct qtnf_extra_bd_params {
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ __le32 param4;
+} __packed;
+
+#define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n)
+
+struct vmac_pkt_info {
+ __le32 addr;
+ __le32 info;
+};
+
+struct qtnf_topaz_bda {
+ __le16 bda_len;
+ __le16 bda_version;
+ __le32 bda_bootstate;
+ __le32 bda_dma_mask;
+ __le32 bda_dma_offset;
+ __le32 bda_flags;
+ __le32 bda_img;
+ __le32 bda_img_size;
+ __le32 bda_ep2h_irqstatus;
+ __le32 bda_h2ep_irqstatus;
+ __le32 bda_msi_addr;
+ u8 reserved1[56];
+ __le32 bda_flashsz;
+ u8 bda_boardname[PCIE_BDA_NAMELEN];
+ __le32 bda_pci_pre_status;
+ __le32 bda_pci_endian;
+ __le32 bda_pci_post_status;
+ __le32 bda_h2ep_txd_budget;
+ __le32 bda_ep2h_txd_budget;
+ __le32 bda_rc_rx_bd_base;
+ __le32 bda_rc_rx_bd_num;
+ __le32 bda_rc_tx_bd_base;
+ __le32 bda_rc_tx_bd_num;
+ u8 bda_ep_link_state;
+ u8 bda_rc_link_state;
+ u8 bda_rc_msi_enabled;
+ u8 reserved2;
+ __le32 bda_ep_next_pkt;
+ struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN];
+ struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096);
+ struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096);
+} __packed;
+
+struct qtnf_pcie_topaz_state {
+ struct qtnf_pcie_bus_priv base;
+ struct qtnf_topaz_bda __iomem *bda;
+
+ dma_addr_t dma_msi_dummy;
+ u32 dma_msi_imwr;
+
+ struct qtnf_topaz_tx_bd *tx_bd_vbase;
+ struct qtnf_topaz_rx_bd *rx_bd_vbase;
+
+ __le32 __iomem *ep_next_rx_pkt;
+ __le32 __iomem *txqueue_wake;
+ __le32 __iomem *ep_pmstate;
+
+ unsigned long rx_pkt_count;
+};
+
+static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
+ u32 cfg;
+
+ cfg = readl(reg);
+ cfg &= ~TOPAZ_ASSERT_INTX;
+ qtnf_non_posted_write(cfg, reg);
+}
+
+static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET;
+ u32 cfg = readl(reg);
+
+ return !!(cfg & TOPAZ_ASSERT_INTX);
+}
+
+static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts)
+{
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+ msleep(QTN_EP_RESET_WAIT_MS);
+ pci_restore_state(ts->base.pdev);
+}
+
+static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
+
+ ts->dma_msi_imwr = readl(reg);
+}
+
+static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
+
+ qtnf_non_posted_write(ts->dma_msi_imwr, reg);
+}
+
+static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts)
+{
+ void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar);
+
+ qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg);
+}
+
+static void qtnf_topaz_ipc_gen_ep_int(void *arg)
+{
+ struct qtnf_pcie_topaz_state *ts = arg;
+
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ),
+ TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar));
+}
+
+static int qtnf_is_state(__le32 __iomem *reg, u32 state)
+{
+ u32 s = readl(reg);
+
+ return (s == state);
+}
+
+static void qtnf_set_state(__le32 __iomem *reg, u32 state)
+{
+ qtnf_non_posted_write(state, reg);
+}
+
+static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms)
+{
+ u32 timeout = 0;
+
+ while ((qtnf_is_state(reg, state) == 0)) {
+ usleep_range(1000, 1200);
+ if (++timeout > delay_in_ms)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts,
+ struct qtnf_topaz_bda __iomem *bda)
+{
+ struct qtnf_extra_bd_params __iomem *extra_params;
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ dma_addr_t paddr;
+ void *vaddr;
+ int len;
+ int i;
+
+ /* bd table */
+
+ len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) +
+ priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) +
+ sizeof(struct qtnf_extra_bd_params);
+
+ vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+
+ memset(vaddr, 0, len);
+
+ /* tx bd */
+
+ ts->tx_bd_vbase = vaddr;
+ qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base);
+
+ for (i = 0; i < priv->tx_bd_num; i++)
+ ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY);
+
+ pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ priv->tx_bd_r_index = 0;
+ priv->tx_bd_w_index = 0;
+
+ /* rx bd */
+
+ vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num;
+ paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd);
+
+ ts->rx_bd_vbase = vaddr;
+ qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base);
+
+ pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr);
+
+ /* extra shared params */
+
+ vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num;
+ paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd);
+
+ extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr;
+
+ ts->ep_next_rx_pkt = &extra_params->param1;
+ qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1),
+ &bda->bda_ep_next_pkt);
+ ts->txqueue_wake = &extra_params->param2;
+ ts->ep_pmstate = &extra_params->param3;
+ ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4);
+
+ return 0;
+}
+
+static int
+topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap)
+{
+ struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index];
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ ts->base.rx_skb[index] = NULL;
+ return -ENOMEM;
+ }
+
+ ts->base.rx_skb[index] = skb;
+
+ paddr = pci_map_single(ts->base.pdev, skb->data,
+ SKB_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(ts->base.pdev, paddr)) {
+ pr_err("skb mapping error: %pad\n", &paddr);
+ return -ENOMEM;
+ }
+
+ rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr));
+ rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap);
+
+ ts->base.rx_bd_w_index = index;
+
+ return 0;
+}
+
+static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts)
+{
+ u16 i;
+ int ret = 0;
+
+ memset(ts->rx_bd_vbase, 0x0,
+ ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd));
+
+ for (i = 0; i < ts->base.rx_bd_num; i++) {
+ ret = topaz_skb2rbd_attach(ts, i, 0);
+ if (ret)
+ break;
+ }
+
+ ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |=
+ cpu_to_le32(QTN_BD_WRAP);
+
+ return ret;
+}
+
+/* all rx/tx activity should have ceased before calling this function */
+static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct qtnf_topaz_rx_bd *rxbd;
+ struct qtnf_topaz_tx_bd *txbd;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int i;
+
+ /* free rx buffers */
+ for (i = 0; i < priv->rx_bd_num; i++) {
+ if (priv->rx_skb && priv->rx_skb[i]) {
+ rxbd = &ts->rx_bd_vbase[i];
+ skb = priv->rx_skb[i];
+ paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ rxbd->addr = 0;
+ rxbd->info = 0;
+ }
+ }
+
+ /* free tx buffers */
+ for (i = 0; i < priv->tx_bd_num; i++) {
+ if (priv->tx_skb && priv->tx_skb[i]) {
+ txbd = &ts->tx_bd_vbase[i];
+ skb = priv->tx_skb[i];
+ paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ txbd->addr = 0;
+ txbd->info = 0;
+ }
+ }
+}
+
+static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts,
+ unsigned int tx_bd_size)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ int ret;
+
+ if (tx_bd_size == 0)
+ tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT;
+
+ /* check TX BD queue max length according to struct qtnf_topaz_bda */
+ if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) {
+ pr_warn("TX BD queue cannot exceed %d\n",
+ QTN_PCIE_RC_TX_QUEUE_LEN);
+ tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN;
+ }
+
+ priv->tx_bd_num = tx_bd_size;
+ qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num);
+ qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num);
+
+ priv->rx_bd_w_index = 0;
+ priv->rx_bd_r_index = 0;
+
+ ret = qtnf_pcie_alloc_skb_array(priv);
+ if (ret) {
+ pr_err("failed to allocate skb array\n");
+ return ret;
+ }
+
+ ret = topaz_alloc_bd_table(ts, bda);
+ if (ret) {
+ pr_err("failed to allocate bd table\n");
+ return ret;
+ }
+
+ ret = topaz_alloc_rx_buffers(ts);
+ if (ret) {
+ pr_err("failed to allocate rx buffers\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct qtnf_topaz_tx_bd *txbd;
+ struct sk_buff *skb;
+ unsigned long flags;
+ dma_addr_t paddr;
+ u32 tx_done_index;
+ int count = 0;
+ int i;
+
+ spin_lock_irqsave(&priv->tx_reclaim_lock, flags);
+
+ tx_done_index = readl(ts->ep_next_rx_pkt);
+ i = priv->tx_bd_r_index;
+
+ if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num))
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
+ TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
+
+ while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) {
+ skb = priv->tx_skb[i];
+
+ if (likely(skb)) {
+ txbd = &ts->tx_bd_vbase[i];
+ paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr));
+ pci_unmap_single(priv->pdev, paddr, skb->len,
+ PCI_DMA_TODEVICE);
+
+ if (skb->dev) {
+ qtnf_update_tx_stats(skb->dev, skb);
+ if (unlikely(priv->tx_stopped)) {
+ qtnf_wake_all_queues(skb->dev);
+ priv->tx_stopped = 0;
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+ }
+
+ priv->tx_skb[i] = NULL;
+ count++;
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+ }
+
+ priv->tx_reclaim_done += count;
+ priv->tx_reclaim_req++;
+ priv->tx_bd_r_index = i;
+
+ spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags);
+}
+
+static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+
+ if (ndev) {
+ netif_tx_stop_all_queues(ndev);
+ ts->base.tx_stopped = 1;
+ }
+
+ writel(0x0, ts->txqueue_wake);
+
+ /* sync up tx queue status before generating interrupt */
+ dma_wmb();
+
+ /* send irq to card: tx stopped */
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+
+ /* schedule reclaim attempt */
+ tasklet_hi_schedule(&ts->base.reclaim_tq);
+}
+
+static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ int ready;
+
+ ready = readl(ts->txqueue_wake);
+ if (ready) {
+ netif_wake_queue(ndev);
+ } else {
+ /* re-send irq to card: tx stopped */
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+ }
+}
+
+static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
+ qtnf_topaz_data_tx_reclaim(ts);
+
+ if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num)) {
+ priv->tx_full_count++;
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ struct qtnf_topaz_tx_bd *txbd;
+ dma_addr_t skb_paddr;
+ unsigned long flags;
+ int ret = 0;
+ int len;
+ int i;
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ if (!qtnf_tx_queue_ready(ts)) {
+ qtnf_try_stop_xmit(bus, skb->dev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ i = priv->tx_bd_w_index;
+ priv->tx_skb[i] = skb;
+ len = skb->len;
+
+ skb_paddr = pci_map_single(priv->pdev, skb->data,
+ skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, skb_paddr)) {
+ ret = -ENOMEM;
+ goto tx_done;
+ }
+
+ txbd = &ts->tx_bd_vbase[i];
+ txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr));
+
+ writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr);
+ writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info);
+
+ /* sync up descriptor updates before generating interrupt */
+ dma_wmb();
+
+ /* generate irq to card: tx done */
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ),
+ TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
+
+ if (++i >= priv->tx_bd_num)
+ i = 0;
+
+ priv->tx_bd_w_index = i;
+
+tx_done:
+ if (ret) {
+ if (skb->dev)
+ skb->dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+
+ priv->tx_done_count++;
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ qtnf_topaz_data_tx_reclaim(ts);
+
+ return NETDEV_TX_OK;
+}
+
+static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data)
+{
+ struct qtnf_bus *bus = (struct qtnf_bus *)data;
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+
+ if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts))
+ return IRQ_NONE;
+
+ priv->pcie_irq_count++;
+
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in);
+ qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out);
+
+ if (napi_schedule_prep(&bus->mux_napi)) {
+ disable_rx_irqs(ts);
+ __napi_schedule(&bus->mux_napi);
+ }
+
+ tasklet_hi_schedule(&priv->reclaim_tq);
+
+ if (!priv->msi_enabled)
+ qtnf_deassert_intx(ts);
+
+ return IRQ_HANDLED;
+}
+
+static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts)
+{
+ u16 index = ts->base.rx_bd_r_index;
+ struct qtnf_topaz_rx_bd *rxbd;
+ u32 descw;
+
+ rxbd = &ts->rx_bd_vbase[index];
+ descw = le32_to_cpu(rxbd->info);
+
+ if (descw & QTN_BD_EMPTY)
+ return 0;
+
+ return 1;
+}
+
+static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi);
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ struct net_device *ndev = NULL;
+ struct sk_buff *skb = NULL;
+ int processed = 0;
+ struct qtnf_topaz_rx_bd *rxbd;
+ dma_addr_t skb_paddr;
+ int consume;
+ u32 descw;
+ u32 poffset;
+ u32 psize;
+ u16 r_idx;
+ u16 w_idx;
+ int ret;
+
+ while (processed < budget) {
+ if (!qtnf_rx_data_ready(ts))
+ goto rx_out;
+
+ r_idx = priv->rx_bd_r_index;
+ rxbd = &ts->rx_bd_vbase[r_idx];
+ descw = le32_to_cpu(rxbd->info);
+
+ skb = priv->rx_skb[r_idx];
+ poffset = QTN_GET_OFFSET(descw);
+ psize = QTN_GET_LEN(descw);
+ consume = 1;
+
+ if (descw & QTN_BD_EMPTY) {
+ pr_warn("skip invalid rxbd[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (!skb) {
+ pr_warn("skip missing rx_skb[%d]\n", r_idx);
+ consume = 0;
+ }
+
+ if (skb && (skb_tailroom(skb) < psize)) {
+ pr_err("skip packet with invalid length: %u > %u\n",
+ psize, skb_tailroom(skb));
+ consume = 0;
+ }
+
+ if (skb) {
+ skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr));
+ pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ if (consume) {
+ skb_reserve(skb, poffset);
+ skb_put(skb, psize);
+ ndev = qtnf_classify_skb(bus, skb);
+ if (likely(ndev)) {
+ qtnf_update_rx_stats(ndev, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ } else {
+ pr_debug("drop untagged skb\n");
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ if (skb) {
+ bus->mux_dev.stats.rx_dropped++;
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ /* notify card about recv packets once per several packets */
+ if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0)
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ),
+ TOPAZ_LH_IPC4_INT(priv->sysctl_bar));
+
+ priv->rx_skb[r_idx] = NULL;
+ if (++r_idx >= priv->rx_bd_num)
+ r_idx = 0;
+
+ priv->rx_bd_r_index = r_idx;
+
+ /* repalce processed buffer by a new one */
+ w_idx = priv->rx_bd_w_index;
+ while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num) > 0) {
+ if (++w_idx >= priv->rx_bd_num)
+ w_idx = 0;
+
+ ret = topaz_skb2rbd_attach(ts, w_idx,
+ descw & QTN_BD_WRAP);
+ if (ret) {
+ pr_err("failed to allocate new rx_skb[%d]\n",
+ w_idx);
+ break;
+ }
+ }
+
+ processed++;
+ }
+
+rx_out:
+ if (processed < budget) {
+ napi_complete(napi);
+ enable_rx_irqs(ts);
+ }
+
+ return processed;
+}
+
+static void
+qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ qtnf_try_wake_xmit(bus, ndev);
+ tasklet_hi_schedule(&ts->base.reclaim_tq);
+}
+
+static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ napi_enable(&bus->mux_napi);
+ enable_rx_irqs(ts);
+}
+
+static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ disable_rx_irqs(ts);
+ napi_disable(&bus->mux_napi);
+}
+
+static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = {
+ /* control path methods */
+ .control_tx = qtnf_pcie_control_tx,
+
+ /* data path methods */
+ .data_tx = qtnf_pcie_data_tx,
+ .data_tx_timeout = qtnf_pcie_data_tx_timeout,
+ .data_rx_start = qtnf_pcie_data_rx_start,
+ .data_rx_stop = qtnf_pcie_data_rx_stop,
+};
+
+static int qtnf_dbg_irq_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count);
+
+ return 0;
+}
+
+static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
+{
+ struct qtnf_bus *bus = dev_get_drvdata(s->private);
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct qtnf_pcie_bus_priv *priv = &ts->base;
+ u32 tx_done_index = readl(ts->ep_next_rx_pkt);
+
+ seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count);
+ seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
+ seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
+ seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+
+ seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
+ seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
+ seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index);
+
+ seq_printf(s, "tx host queue len(%u)\n",
+ CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index,
+ priv->tx_bd_num));
+ seq_printf(s, "tx reclaim queue len(%u)\n",
+ CIRC_CNT(tx_done_index, priv->tx_bd_r_index,
+ priv->tx_bd_num));
+ seq_printf(s, "tx card queue len(%u)\n",
+ CIRC_CNT(priv->tx_bd_w_index, tx_done_index,
+ priv->tx_bd_num));
+
+ seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index);
+ seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index);
+ seq_printf(s, "rx alloc queue len(%u)\n",
+ CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index,
+ priv->rx_bd_num));
+
+ return 0;
+}
+
+static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ u32 offset = readl(&bda->bda_dma_offset);
+
+ if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR)
+ return;
+
+ writel(0x0, &bda->bda_dma_offset);
+}
+
+static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ u32 timeout = 0;
+ u32 endian;
+ int ret = 0;
+
+ writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian);
+
+ /* flush endian modifications before status update */
+ dma_wmb();
+
+ writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status);
+
+ while (readl(&bda->bda_pci_post_status) !=
+ QTN_PCI_ENDIAN_VALID_STATUS) {
+ usleep_range(1000, 1200);
+ if (++timeout > QTN_FW_DL_TIMEOUT_MS) {
+ pr_err("card endianness detection timed out\n");
+ ret = -ETIMEDOUT;
+ goto endian_out;
+ }
+ }
+
+ /* do not read before status is updated */
+ dma_rmb();
+
+ endian = readl(&bda->bda_pci_endian);
+ WARN(endian != QTN_PCI_LITTLE_ENDIAN,
+ "%s: unexpected card endianness", __func__);
+
+endian_out:
+ writel(0, &bda->bda_pci_pre_status);
+ writel(0, &bda->bda_pci_post_status);
+ writel(0, &bda->bda_pci_endian);
+
+ return ret;
+}
+
+static int qtnf_pre_init_ep(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ u32 flags;
+ int ret;
+
+ ret = qtnf_pcie_endian_detect(ts);
+ if (ret < 0) {
+ pr_err("failed to detect card endianness\n");
+ return ret;
+ }
+
+ writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled);
+ qtnf_reset_dma_offset(ts);
+
+ /* notify card about driver type and boot mode */
+ flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV;
+
+ if (ts->base.flashboot)
+ flags |= QTN_BDA_FLASH_BOOT;
+ else
+ flags &= ~QTN_BDA_FLASH_BOOT;
+
+ writel(flags, &bda->bda_flags);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("card is not ready to boot...\n");
+ return -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts)
+{
+ struct pci_dev *pdev = ts->base.pdev;
+
+ setup_rx_irqs(ts);
+ disable_rx_irqs(ts);
+
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE,
+ QTN_FW_QLINK_TIMEOUT_MS))
+ return -ETIMEDOUT;
+
+ enable_irq(pdev->irq);
+ return 0;
+}
+
+static int
+qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size)
+{
+ struct qtnf_topaz_bda __iomem *bda = ts->bda;
+ struct pci_dev *pdev = ts->base.pdev;
+ u32 remaining = fw_size;
+ u8 *curr = (u8 *)fw;
+ u32 blksize;
+ u32 nblocks;
+ u32 offset;
+ u32 count;
+ u32 size;
+ dma_addr_t paddr;
+ void *data;
+ int ret = 0;
+
+ pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size);
+
+ blksize = ts->base.fw_blksize;
+
+ if (blksize < PAGE_SIZE)
+ blksize = PAGE_SIZE;
+
+ while (blksize >= PAGE_SIZE) {
+ pr_debug("allocating %u bytes to upload FW\n", blksize);
+ data = dma_alloc_coherent(&pdev->dev, blksize,
+ &paddr, GFP_KERNEL);
+ if (data)
+ break;
+ blksize /= 2;
+ }
+
+ if (!data) {
+ pr_err("failed to allocate DMA buffer for FW upload\n");
+ ret = -ENOMEM;
+ goto fw_load_out;
+ }
+
+ nblocks = NBLOCKS(fw_size, blksize);
+ offset = readl(&bda->bda_dma_offset);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("card is not ready to download FW\n");
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ for (count = 0 ; count < nblocks; count++) {
+ size = (remaining > blksize) ? blksize : remaining;
+
+ memcpy(data, curr, size);
+ qtnf_non_posted_write(paddr + offset, &bda->bda_img);
+ qtnf_non_posted_write(size, &bda->bda_img_size);
+
+ pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n",
+ count, (void *)curr, &paddr, size);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_BLOCK_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("confirmation for block #%d timed out\n", count);
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ remaining = (remaining < size) ? remaining : (remaining - size);
+ curr += size;
+ }
+
+ /* upload completion mark: zero-sized block */
+ qtnf_non_posted_write(0, &bda->bda_img);
+ qtnf_non_posted_write(0, &bda->bda_img_size);
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("confirmation for the last block timed out\n");
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ /* RC is done */
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END);
+ if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("confirmation for FW upload completion timed out\n");
+ ret = -ETIMEDOUT;
+ goto fw_load_map;
+ }
+
+ pr_debug("FW upload completed: totally sent %d blocks\n", count);
+
+fw_load_map:
+ dma_free_coherent(&pdev->dev, blksize, data, paddr);
+
+fw_load_out:
+ return ret;
+}
+
+static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts,
+ const char *fwname)
+{
+ const struct firmware *fw;
+ struct pci_dev *pdev = ts->base.pdev;
+ int ret;
+
+ if (qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_LOAD_RDY,
+ QTN_FW_DL_TIMEOUT_MS)) {
+ pr_err("%s: card is not ready\n", fwname);
+ return -1;
+ }
+
+ pr_info("starting firmware upload: %s\n", fwname);
+
+ ret = request_firmware(&fw, fwname, &pdev->dev);
+ if (ret < 0) {
+ pr_err("%s: request_firmware error %d\n", fwname, ret);
+ return -1;
+ }
+
+ ret = qtnf_ep_fw_load(ts, fw->data, fw->size);
+ release_firmware(fw);
+
+ if (ret)
+ pr_err("%s: FW upload error\n", fwname);
+
+ return ret;
+}
+
+static void qtnf_topaz_fw_work_handler(struct work_struct *work)
+{
+ struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
+ struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
+ int ret;
+ int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
+
+ if (bootloader_needed) {
+ ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME);
+ if (ret)
+ goto fw_load_exit;
+
+ ret = qtnf_pre_init_ep(bus);
+ if (ret)
+ goto fw_load_exit;
+
+ qtnf_set_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_TARGET_BOOT);
+ }
+
+ if (ts->base.flashboot) {
+ pr_info("booting firmware from flash\n");
+
+ ret = qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_FLASH_BOOT,
+ QTN_FW_DL_TIMEOUT_MS);
+ if (ret)
+ goto fw_load_exit;
+ } else {
+ ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME);
+ if (ret)
+ goto fw_load_exit;
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START);
+ ret = qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_CONFIG,
+ QTN_FW_QLINK_TIMEOUT_MS);
+ if (ret) {
+ pr_err("FW bringup timed out\n");
+ goto fw_load_exit;
+ }
+
+ qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN);
+ ret = qtnf_poll_state(&ts->bda->bda_bootstate,
+ QTN_BDA_FW_RUNNING,
+ QTN_FW_QLINK_TIMEOUT_MS);
+ if (ret) {
+ pr_err("card bringup timed out\n");
+ goto fw_load_exit;
+ }
+ }
+
+ pr_info("firmware is up and running\n");
+
+ ret = qtnf_post_init_ep(ts);
+ if (ret)
+ pr_err("FW runtime failure\n");
+
+fw_load_exit:
+ qtnf_pcie_fw_boot_done(bus, ret ? false : true);
+
+ if (ret == 0) {
+ qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+ }
+}
+
+static void qtnf_reclaim_tasklet_fn(unsigned long data)
+{
+ struct qtnf_pcie_topaz_state *ts = (void *)data;
+
+ qtnf_topaz_data_tx_reclaim(ts);
+}
+
+static u64 qtnf_topaz_dma_mask_get(void)
+{
+ return DMA_BIT_MASK(32);
+}
+
+static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct pci_dev *pdev = ts->base.pdev;
+ struct qtnf_shm_ipc_int ipc_int;
+ unsigned long irqflags;
+ int ret;
+
+ bus->bus_ops = &qtnf_pcie_topaz_bus_ops;
+ INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler);
+ ts->bda = ts->base.epmem_bar;
+
+ /* assign host msi irq before card init */
+ if (ts->base.msi_enabled)
+ irqflags = IRQF_NOBALANCING;
+ else
+ irqflags = IRQF_NOBALANCING | IRQF_SHARED;
+
+ ret = devm_request_irq(&pdev->dev, pdev->irq,
+ &qtnf_pcie_topaz_interrupt,
+ irqflags, "qtnf_topaz_irq", (void *)bus);
+ if (ret) {
+ pr_err("failed to request pcie irq %d\n", pdev->irq);
+ return ret;
+ }
+
+ disable_irq(pdev->irq);
+
+ ret = qtnf_pre_init_ep(bus);
+ if (ret) {
+ pr_err("failed to init card\n");
+ return ret;
+ }
+
+ ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num);
+ if (ret) {
+ pr_err("PCIE xfer init failed\n");
+ return ret;
+ }
+
+ tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn,
+ (unsigned long)ts);
+ netif_napi_add(&bus->mux_dev, &bus->mux_napi,
+ qtnf_topaz_rx_poll, 10);
+
+ ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
+ ipc_int.arg = ts;
+ qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1,
+ &ts->bda->bda_shm_reg2, &ipc_int);
+
+ return 0;
+}
+
+static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+
+ qtnf_topaz_reset_ep(ts);
+ qtnf_topaz_free_xfer_buffers(ts);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct pci_dev *pdev = ts->base.pdev;
+
+ writel((u32 __force)PCI_D3hot, ts->ep_pmstate);
+ dma_wmb();
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus)
+{
+ struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus);
+ struct pci_dev *pdev = ts->base.pdev;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ writel((u32 __force)PCI_D0, ts->ep_pmstate);
+ dma_wmb();
+ writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ),
+ TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar));
+
+ return 0;
+}
+#endif
+
+struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev)
+{
+ struct qtnf_bus *bus;
+ struct qtnf_pcie_topaz_state *ts;
+
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL);
+ if (!bus)
+ return NULL;
+
+ ts = get_bus_priv(bus);
+ ts->base.probe_cb = qtnf_pcie_topaz_probe;
+ ts->base.remove_cb = qtnf_pcie_topaz_remove;
+ ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get;
+#ifdef CONFIG_PM_SLEEP
+ ts->base.resume_cb = qtnf_pcie_topaz_resume;
+ ts->base.suspend_cb = qtnf_pcie_topaz_suspend;
+#endif
+
+ return bus;
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h
new file mode 100644
index 000000000000..eb30e9d08de2
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Quantenna Communications */
+
+#ifndef _QTN_FMAC_PCIE_IPC_H_
+#define _QTN_FMAC_PCIE_IPC_H_
+
+#include <linux/types.h>
+
+#include "shm_ipc_defs.h"
+
+/* EP/RC status and flags */
+#define QTN_BDA_PCIE_INIT 0x01
+#define QTN_BDA_PCIE_RDY 0x02
+#define QTN_BDA_FW_LOAD_RDY 0x03
+#define QTN_BDA_FW_LOAD_DONE 0x04
+#define QTN_BDA_FW_START 0x05
+#define QTN_BDA_FW_RUN 0x06
+#define QTN_BDA_FW_HOST_RDY 0x07
+#define QTN_BDA_FW_TARGET_RDY 0x11
+#define QTN_BDA_FW_TARGET_BOOT 0x12
+#define QTN_BDA_FW_FLASH_BOOT 0x13
+#define QTN_BDA_FW_QLINK_DONE 0x14
+#define QTN_BDA_FW_HOST_LOAD 0x08
+#define QTN_BDA_FW_BLOCK_DONE 0x09
+#define QTN_BDA_FW_BLOCK_RDY 0x0A
+#define QTN_BDA_FW_EP_RDY 0x0B
+#define QTN_BDA_FW_BLOCK_END 0x0C
+#define QTN_BDA_FW_CONFIG 0x0D
+#define QTN_BDA_FW_RUNNING 0x0E
+#define QTN_BDA_PCIE_FAIL 0x82
+#define QTN_BDA_FW_LOAD_FAIL 0x85
+
+#define QTN_BDA_RCMODE BIT(1)
+#define QTN_BDA_MSI BIT(2)
+#define QTN_BDA_HOST_CALCMD BIT(3)
+#define QTN_BDA_FLASH_PRESENT BIT(4)
+#define QTN_BDA_FLASH_BOOT BIT(5)
+#define QTN_BDA_XMIT_UBOOT BIT(6)
+#define QTN_BDA_HOST_QLINK_DRV BIT(7)
+#define QTN_BDA_TARGET_FBOOT_ERR BIT(8)
+#define QTN_BDA_TARGET_FWLOAD_ERR BIT(9)
+#define QTN_BDA_HOST_NOFW_ERR BIT(12)
+#define QTN_BDA_HOST_MEMALLOC_ERR BIT(13)
+#define QTN_BDA_HOST_MEMMAP_ERR BIT(14)
+#define QTN_BDA_VER(x) (((x) >> 4) & 0xFF)
+#define QTN_BDA_ERROR_MASK 0xFF00
+
+/* registers and shmem address macros */
+#if BITS_PER_LONG == 64
+#define QTN_HOST_HI32(a) ((u32)(((u64)a) >> 32))
+#define QTN_HOST_LO32(a) ((u32)(((u64)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l))
+#elif BITS_PER_LONG == 32
+#define QTN_HOST_HI32(a) 0
+#define QTN_HOST_LO32(a) ((u32)(((u32)a) & 0xffffffffUL))
+#define QTN_HOST_ADDR(h, l) ((u32)l)
+#else
+#error Unexpected BITS_PER_LONG value
+#endif
+
+#define QTN_PCIE_BDA_VERSION 0x1001
+
+#define PCIE_BDA_NAMELEN 32
+
+#define QTN_PCIE_RC_TX_QUEUE_LEN 256
+#define QTN_PCIE_TX_VALID_PKT 0x80000000
+#define QTN_PCIE_PKT_LEN_MASK 0xffff
+
+#define QTN_BD_EMPTY ((uint32_t)0x00000001)
+#define QTN_BD_WRAP ((uint32_t)0x00000002)
+#define QTN_BD_MASK_LEN ((uint32_t)0xFFFF0000)
+#define QTN_BD_MASK_OFFSET ((uint32_t)0x0000FF00)
+
+#define QTN_GET_LEN(x) (((x) >> 16) & 0xFFFF)
+#define QTN_GET_OFFSET(x) (((x) >> 8) & 0xFF)
+#define QTN_SET_LEN(len) (((len) & 0xFFFF) << 16)
+#define QTN_SET_OFFSET(of) (((of) & 0xFF) << 8)
+
+#define RX_DONE_INTR_MSK ((0x1 << 6) - 1)
+
+#define PCIE_DMA_OFFSET_ERROR 0xFFFF
+#define PCIE_DMA_OFFSET_ERROR_MASK 0xFFFF
+
+#define QTN_PCI_ENDIAN_DETECT_DATA 0x12345678
+#define QTN_PCI_ENDIAN_REVERSE_DATA 0x78563412
+#define QTN_PCI_ENDIAN_VALID_STATUS 0x3c3c3c3c
+#define QTN_PCI_ENDIAN_INVALID_STATUS 0
+#define QTN_PCI_LITTLE_ENDIAN 0
+#define QTN_PCI_BIG_ENDIAN 0xffffffff
+
+#define NBLOCKS(size, blksize) \
+ ((size) / (blksize) + (((size) % (blksize) > 0) ? 1 : 0))
+
+#endif /* _QTN_FMAC_PCIE_IPC_H_ */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h
new file mode 100644
index 000000000000..4782e1ed3c2c
--- /dev/null
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018 Quantenna Communications */
+
+#ifndef __TOPAZ_PCIE_H
+#define __TOPAZ_PCIE_H
+
+/* Topaz PCIe DMA registers */
+#define PCIE_DMA_WR_INTR_STATUS(base) ((base) + 0x9bc)
+#define PCIE_DMA_WR_INTR_MASK(base) ((base) + 0x9c4)
+#define PCIE_DMA_WR_INTR_CLR(base) ((base) + 0x9c8)
+#define PCIE_DMA_WR_ERR_STATUS(base) ((base) + 0x9cc)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(base) ((base) + 0x9D0)
+#define PCIE_DMA_WR_DONE_IMWR_ADDR_HIGH(base) ((base) + 0x9d4)
+
+#define PCIE_DMA_RD_INTR_STATUS(base) ((base) + 0x310)
+#define PCIE_DMA_RD_INTR_MASK(base) ((base) + 0x319)
+#define PCIE_DMA_RD_INTR_CLR(base) ((base) + 0x31c)
+#define PCIE_DMA_RD_ERR_STATUS_LOW(base) ((base) + 0x324)
+#define PCIE_DMA_RD_ERR_STATUS_HIGH(base) ((base) + 0x328)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_LOW(base) ((base) + 0x33c)
+#define PCIE_DMA_RD_DONE_IMWR_ADDR_HIGH(base) ((base) + 0x340)
+
+/* Topaz LHost IPC4 interrupt */
+#define TOPAZ_LH_IPC4_INT(base) ((base) + 0x13C)
+#define TOPAZ_LH_IPC4_INT_MASK(base) ((base) + 0x140)
+
+#define TOPAZ_RC_TX_DONE_IRQ (0)
+#define TOPAZ_RC_RST_EP_IRQ (1)
+#define TOPAZ_RC_TX_STOP_IRQ (2)
+#define TOPAZ_RC_RX_DONE_IRQ (3)
+#define TOPAZ_RC_PM_EP_IRQ (4)
+
+/* Topaz LHost M2L interrupt */
+#define TOPAZ_CTL_M2L_INT(base) ((base) + 0x2C)
+#define TOPAZ_CTL_M2L_INT_MASK(base) ((base) + 0x30)
+
+#define TOPAZ_RC_CTRL_IRQ (6)
+
+#define TOPAZ_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16))
+
+/* PCIe legacy INTx */
+#define TOPAZ_PCIE_CFG0_OFFSET (0x6C)
+#define TOPAZ_ASSERT_INTX BIT(9)
+
+#endif /* __TOPAZ_PCIE_H */
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
index 1fe798a9a667..40295a511224 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h
@@ -23,7 +23,7 @@
/* PCIE Device IDs */
-#define PCIE_DEVICE_ID_QTN_PEARL (0x0008)
+#define PCIE_DEVICE_ID_QSR (0x0008)
#define QTN_REG_SYS_CTRL_CSR 0x14
#define QTN_CHIP_ID_MASK 0xF0
@@ -35,6 +35,8 @@
/* FW names */
#define QTN_PCI_PEARL_FW_NAME "qtn/fmac_qsr10g.img"
+#define QTN_PCI_TOPAZ_FW_NAME "qtn/fmac_qsr1000.img"
+#define QTN_PCI_TOPAZ_BOOTLD_NAME "qtn/uboot_qsr1000.img"
static inline unsigned int qtnf_chip_id_get(const void __iomem *regs_base)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c
index e745733ba417..3bc96b264769 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.c
@@ -15,6 +15,7 @@
*/
#include "util.h"
+#include "qtn_hw_ids.h"
void qtnf_sta_list_init(struct qtnf_sta_list *list)
{
@@ -116,3 +117,20 @@ void qtnf_sta_list_free(struct qtnf_sta_list *list)
INIT_LIST_HEAD(&list->head);
}
+
+const char *qtnf_chipid_to_string(unsigned long chip_id)
+{
+ switch (chip_id) {
+ case QTN_CHIP_ID_TOPAZ:
+ return "Topaz";
+ case QTN_CHIP_ID_PEARL:
+ return "Pearl revA";
+ case QTN_CHIP_ID_PEARL_B:
+ return "Pearl revB";
+ case QTN_CHIP_ID_PEARL_C:
+ return "Pearl revC";
+ default:
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL_GPL(qtnf_chipid_to_string);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h
index 0d4d92b11540..b8744baac332 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/util.h
@@ -20,6 +20,8 @@
#include <linux/kernel.h>
#include "core.h"
+const char *qtnf_chipid_to_string(unsigned long chip_id);
+
void qtnf_sta_list_init(struct qtnf_sta_list *list);
struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index 0bc8b0249c57..49a732798395 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1302,7 +1302,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Don't break, this is a failed frame! */
+ /* Fall through - this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 1ff5434798ec..e8e7bfe1ba9b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1430,7 +1430,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Don't break, this is a failed frame! */
+ /* Fall through - this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 9e7b8933d30c..0e95555aec62 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -2482,6 +2482,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fall through */
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
break;
@@ -2490,6 +2491,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fall through */
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
break;
@@ -9457,8 +9459,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
switch (rx_chains) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
+ /* fall through */
case 2:
spec->ht.mcs.rx_mask[1] = 0xff;
+ /* fall through */
case 1:
spec->ht.mcs.rx_mask[0] = 0xff;
spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index cb0e1196f2c2..4c5de8fc8f12 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2226,7 +2226,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
case 6: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Don't break, this is a failed frame! */
+ /* Fall through - this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 08c607c031bc..33ad87528d9a 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -889,8 +889,10 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev,
switch (ccsindex = get_free_tx_ccs(local)) {
case ECCSBUSY:
pr_debug("ray_hw_xmit tx_ccs table busy\n");
+ /* fall through */
case ECCSFULL:
pr_debug("ray_hw_xmit No free tx ccs\n");
+ /* fall through */
case ECARDGONE:
netif_stop_queue(dev);
return XMIT_NO_CCS;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index cec37787ecf8..1a2ea8b47714 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -444,12 +444,13 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev)
skb_queue_tail(&priv->rx_queue, skb);
usb_anchor_urb(entry, &priv->anchored);
ret = usb_submit_urb(entry, GFP_KERNEL);
- usb_put_urb(entry);
if (ret) {
skb_unlink(skb, &priv->rx_queue);
usb_unanchor_urb(entry);
+ usb_put_urb(entry);
goto err;
}
+ usb_put_urb(entry);
}
return ret;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 56040b181cf5..2bd43057dda3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1153,6 +1153,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
ht = false;
+ /* fall through */
case NL80211_CHAN_WIDTH_20:
opmode |= BW_OPMODE_20MHZ;
rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
@@ -1280,6 +1281,7 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
ht = false;
+ /* fall through */
case NL80211_CHAN_WIDTH_20:
rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20;
subchannel = 0;
@@ -1748,9 +1750,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
case 3:
priv->ep_tx_low_queue = 1;
priv->ep_tx_count++;
+ /* fall through */
case 2:
priv->ep_tx_normal_queue = 1;
priv->ep_tx_count++;
+ /* fall through */
case 1:
priv->ep_tx_high_queue = 1;
priv->ep_tx_count++;
@@ -5688,6 +5692,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case WLAN_CIPHER_SUITE_TKIP:
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index f4122c8fdd97..ef9b502ce576 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2289,6 +2289,7 @@ void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb)
if (rtl_c2h_fast_cmd(hw, skb)) {
rtl_c2h_content_parsing(hw, skb);
+ kfree_skb(skb);
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 6fbf8845a2ab..d748aab66aa2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -292,11 +292,9 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
static void halbtc_leave_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
- struct rtl_ps_ctl *ppsc;
bool ap_enable = false;
rtlpriv = btcoexist->adapter;
- ppsc = rtl_psc(rtlpriv);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
&ap_enable);
@@ -315,11 +313,9 @@ static void halbtc_leave_lps(struct btc_coexist *btcoexist)
static void halbtc_enter_lps(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv;
- struct rtl_ps_ctl *ppsc;
bool ap_enable = false;
rtlpriv = btcoexist->adapter;
- ppsc = rtl_psc(rtlpriv);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE,
&ap_enable);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 4c1f8b08fc10..14dcb0816bc0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -29,7 +29,6 @@
#include "../stats.h"
#include "reg.h"
#include "def.h"
-#include "phy.h"
#include "trx.h"
#include "led.h"
#include "dm.h"
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 85cedd083d2b..75bfa9dfef4a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -173,7 +173,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
rtl_read_byte(rtlpriv, FW_MAC1_READY));
}
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n",
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 5cf29f5a4b54..3f3327878b51 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -509,13 +509,10 @@ bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
int i;
bool rtstatus = true;
u32 *radioa_array_table;
- u32 *radiob_array_table;
- u16 radioa_arraylen, radiob_arraylen;
+ u16 radioa_arraylen;
radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8723E_RADIOA_1TARRAY;
- radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH;
- radiob_array_table = RTL8723E_RADIOB_1TARRAY;
rtstatus = true;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
index 61e86045f15c..1bbee0bfac23 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c
@@ -475,10 +475,6 @@ u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH] = {
0x000, 0x00030159,
};
-u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH] = {
- 0x0,
-};
-
u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH] = {
0x420, 0x00000080,
0x423, 0x00000000,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
index 57a548ceba7d..a044f3c456fa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h
@@ -36,8 +36,6 @@ extern u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH];
extern u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH];
#define RTL8723ERADIOA_1TARRAYLENGTH 282
extern u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH];
-#define RTL8723E_RADIOB_1TARRAYLENGTH 1
-extern u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH];
#define RTL8723E_MACARRAYLENGTH 172
extern u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH];
#define RTL8723E_AGCTAB_1TARRAYLENGTH 320
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 176deb2b5386..a75451c246fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -394,6 +394,7 @@ static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
}
+ /* fall through */
case 0:
case 2:
default:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index d7960dd5bf1a..0f2b7c619918 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -29,7 +29,6 @@
#include "../stats.h"
#include "reg.h"
#include "def.h"
-#include "phy.h"
#include "trx.h"
#include "led.h"
#include "dm.h"
@@ -307,14 +306,12 @@ static void translate_rx_signal_stuff(struct ieee80211_hw *hw,
u8 *praddr;
u8 *psaddr;
__le16 fc;
- u16 type;
bool packet_matchbssid, packet_toself, packet_beacon;
tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
fc = hdr->frame_control;
- type = WLAN_FC_GET_TYPE(hdr->frame_control);
praddr = hdr->addr1;
psaddr = ieee80211_get_SA(hdr);
ether_addr_copy(pstatus->psaddr, psaddr);
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 612c211e21a1..449f6d23c5e3 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -210,7 +210,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
}
/* This tells SDIO FIFO when to start read to host */
- rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__);
+ rsi_dbg(INIT_ZONE, "%s: Initializing SDIO read start level\n", __func__);
byte = 0x24;
status = rsi_sdio_write_register(adapter,
@@ -223,7 +223,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter)
return -1;
}
- rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__);
+ rsi_dbg(INIT_ZONE, "%s: Initializing FIFO ctrl registers\n", __func__);
byte = (128 - 32);
status = rsi_sdio_write_register(adapter,
diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c
index 295cb1a29f25..2231ba08bc1f 100644
--- a/drivers/net/wireless/st/cw1200/debug.c
+++ b/drivers/net/wireless/st/cw1200/debug.c
@@ -289,19 +289,7 @@ static int cw1200_status_show(struct seq_file *seq, void *v)
return 0;
}
-static int cw1200_status_open(struct inode *inode, struct file *file)
-{
- return single_open(file, &cw1200_status_show,
- inode->i_private);
-}
-
-static const struct file_operations fops_status = {
- .open = cw1200_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(cw1200_status);
static int cw1200_counters_show(struct seq_file *seq, void *v)
{
@@ -345,19 +333,7 @@ static int cw1200_counters_show(struct seq_file *seq, void *v)
return 0;
}
-static int cw1200_counters_open(struct inode *inode, struct file *file)
-{
- return single_open(file, &cw1200_counters_show,
- inode->i_private);
-}
-
-static const struct file_operations fops_counters = {
- .open = cw1200_counters_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(cw1200_counters);
static ssize_t cw1200_wsm_dumps(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
@@ -399,11 +375,11 @@ int cw1200_debug_init(struct cw1200_common *priv)
goto err;
if (!debugfs_create_file("status", 0400, d->debugfs_phy,
- priv, &fops_status))
+ priv, &cw1200_status_fops))
goto err;
if (!debugfs_create_file("counters", 0400, d->debugfs_phy,
- priv, &fops_counters))
+ priv, &cw1200_counters_fops))
goto err;
if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy,
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 67213f11acbd..0a9eac93dd01 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
if (!frame.skb)
@@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->ie_len)
skb_put_data(frame.skb, req->ie, req->ie_len);
- /* will be unlocked in cw1200_scan_work() */
- down(&priv->scan.lock);
- mutex_lock(&priv->conf_mutex);
-
ret = wsm_set_template_frame(priv, &frame);
if (!ret) {
/* Host want to be the probe responder. */
ret = wsm_set_probe_responder(priv, true);
}
if (ret) {
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
up(&priv->scan.lock);
- dev_kfree_skb(frame.skb);
return ret;
}
@@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- mutex_unlock(&priv->conf_mutex);
-
if (frame.skb)
dev_kfree_skb(frame.skb);
+ mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
}
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 38678e9a0562..8dae92a79fe1 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1123,7 +1123,7 @@ int cw1200_setup_mac(struct cw1200_common *priv)
*
* NOTE2: RSSI based reports have been switched to RCPI, since
* FW has a bug and RSSI reported values are not stable,
- * what can leads to signal level oscilations in user-end applications
+ * what can lead to signal level oscilations in user-end applications
*/
struct wsm_rcpi_rssi_threshold threshold = {
.rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE |
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index dbe78d8491ef..7f34ec077ee5 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -70,7 +70,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
new file mode 100644
index 000000000000..64b218699656
--- /dev/null
+++ b/drivers/net/wireless/virt_wifi.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+/* drivers/net/wireless/virt_wifi.c
+ *
+ * A fake implementation of cfg80211_ops that can be tacked on to an ethernet
+ * net_device to make it appear as a wireless connection.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * Author: schuffelen@google.com
+ */
+
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/module.h>
+
+static struct wiphy *common_wiphy;
+
+struct virt_wifi_wiphy_priv {
+ struct delayed_work scan_result;
+ struct cfg80211_scan_request *scan_request;
+ bool being_deleted;
+};
+
+static struct ieee80211_channel channel_2ghz = {
+ .band = NL80211_BAND_2GHZ,
+ .center_freq = 2432,
+ .hw_value = 2432,
+ .max_power = 20,
+};
+
+static struct ieee80211_rate bitrates_2ghz[] = {
+ { .bitrate = 10 },
+ { .bitrate = 20 },
+ { .bitrate = 55 },
+ { .bitrate = 110 },
+ { .bitrate = 60 },
+ { .bitrate = 120 },
+ { .bitrate = 240 },
+};
+
+static struct ieee80211_supported_band band_2ghz = {
+ .channels = &channel_2ghz,
+ .bitrates = bitrates_2ghz,
+ .band = NL80211_BAND_2GHZ,
+ .n_channels = 1,
+ .n_bitrates = ARRAY_SIZE(bitrates_2ghz),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40,
+ .ampdu_factor = 0x3,
+ .ampdu_density = 0x6,
+ .mcs = {
+ .rx_mask = {0xff, 0xff},
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+};
+
+static struct ieee80211_channel channel_5ghz = {
+ .band = NL80211_BAND_5GHZ,
+ .center_freq = 5240,
+ .hw_value = 5240,
+ .max_power = 20,
+};
+
+static struct ieee80211_rate bitrates_5ghz[] = {
+ { .bitrate = 60 },
+ { .bitrate = 120 },
+ { .bitrate = 240 },
+};
+
+#define RX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14)
+
+#define TX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14)
+
+static struct ieee80211_supported_band band_5ghz = {
+ .channels = &channel_5ghz,
+ .bitrates = bitrates_5ghz,
+ .band = NL80211_BAND_5GHZ,
+ .n_channels = 1,
+ .n_bitrates = ARRAY_SIZE(bitrates_5ghz),
+ .ht_cap = {
+ .ht_supported = true,
+ .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40,
+ .ampdu_factor = 0x3,
+ .ampdu_density = 0x6,
+ .mcs = {
+ .rx_mask = {0xff, 0xff},
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+ },
+ .vht_cap = {
+ .vht_supported = true,
+ .cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_RXSTBC_2 |
+ IEEE80211_VHT_CAP_RXSTBC_3 |
+ IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ .vht_mcs = {
+ .rx_mcs_map = cpu_to_le16(RX_MCS_MAP),
+ .tx_mcs_map = cpu_to_le16(TX_MCS_MAP),
+ }
+ },
+};
+
+/* Assigned at module init. Guaranteed locally-administered and unicast. */
+static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+
+/* Called with the rtnl lock held. */
+static int virt_wifi_scan(struct wiphy *wiphy,
+ struct cfg80211_scan_request *request)
+{
+ struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy);
+
+ wiphy_debug(wiphy, "scan\n");
+
+ if (priv->scan_request || priv->being_deleted)
+ return -EBUSY;
+
+ priv->scan_request = request;
+ schedule_delayed_work(&priv->scan_result, HZ * 2);
+
+ return 0;
+}
+
+/* Acquires and releases the rdev BSS lock. */
+static void virt_wifi_scan_result(struct work_struct *work)
+{
+ struct {
+ u8 tag;
+ u8 len;
+ u8 ssid[8];
+ } __packed ssid = {
+ .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi",
+ };
+ struct cfg80211_bss *informed_bss;
+ struct virt_wifi_wiphy_priv *priv =
+ container_of(work, struct virt_wifi_wiphy_priv,
+ scan_result.work);
+ struct wiphy *wiphy = priv_to_wiphy(priv);
+ struct cfg80211_scan_info scan_info = { .aborted = false };
+
+ informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
+ CFG80211_BSS_FTYPE_PRESP,
+ fake_router_bssid,
+ ktime_get_boot_ns(),
+ WLAN_CAPABILITY_ESS, 0,
+ (void *)&ssid, sizeof(ssid),
+ DBM_TO_MBM(-50), GFP_KERNEL);
+ cfg80211_put_bss(wiphy, informed_bss);
+
+ /* Schedules work which acquires and releases the rtnl lock. */
+ cfg80211_scan_done(priv->scan_request, &scan_info);
+ priv->scan_request = NULL;
+}
+
+/* May acquire and release the rdev BSS lock. */
+static void virt_wifi_cancel_scan(struct wiphy *wiphy)
+{
+ struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy);
+
+ cancel_delayed_work_sync(&priv->scan_result);
+ /* Clean up dangling callbacks if necessary. */
+ if (priv->scan_request) {
+ struct cfg80211_scan_info scan_info = { .aborted = true };
+ /* Schedules work which acquires and releases the rtnl lock. */
+ cfg80211_scan_done(priv->scan_request, &scan_info);
+ priv->scan_request = NULL;
+ }
+}
+
+struct virt_wifi_netdev_priv {
+ struct delayed_work connect;
+ struct net_device *lowerdev;
+ struct net_device *upperdev;
+ u32 tx_packets;
+ u32 tx_failed;
+ u8 connect_requested_bss[ETH_ALEN];
+ bool is_up;
+ bool is_connected;
+ bool being_deleted;
+};
+
+/* Called with the rtnl lock held. */
+static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_connect_params *sme)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(netdev);
+ bool could_schedule;
+
+ if (priv->being_deleted || !priv->is_up)
+ return -EBUSY;
+
+ could_schedule = schedule_delayed_work(&priv->connect, HZ * 2);
+ if (!could_schedule)
+ return -EBUSY;
+
+ if (sme->bssid)
+ ether_addr_copy(priv->connect_requested_bss, sme->bssid);
+ else
+ eth_zero_addr(priv->connect_requested_bss);
+
+ wiphy_debug(wiphy, "connect\n");
+
+ return 0;
+}
+
+/* Acquires and releases the rdev event lock. */
+static void virt_wifi_connect_complete(struct work_struct *work)
+{
+ struct virt_wifi_netdev_priv *priv =
+ container_of(work, struct virt_wifi_netdev_priv, connect.work);
+ u8 *requested_bss = priv->connect_requested_bss;
+ bool has_addr = !is_zero_ether_addr(requested_bss);
+ bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
+ u16 status = WLAN_STATUS_SUCCESS;
+
+ if (!priv->is_up || (has_addr && !right_addr))
+ status = WLAN_STATUS_UNSPECIFIED_FAILURE;
+ else
+ priv->is_connected = true;
+
+ /* Schedules an event that acquires the rtnl lock. */
+ cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0,
+ status, GFP_KERNEL);
+ netif_carrier_on(priv->upperdev);
+}
+
+/* May acquire and release the rdev event lock. */
+static void virt_wifi_cancel_connect(struct net_device *netdev)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(netdev);
+
+ /* If there is work pending, clean up dangling callbacks. */
+ if (cancel_delayed_work_sync(&priv->connect)) {
+ /* Schedules an event that acquires the rtnl lock. */
+ cfg80211_connect_result(priv->upperdev,
+ priv->connect_requested_bss, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ }
+}
+
+/* Called with the rtnl lock held. Acquires the rdev event lock. */
+static int virt_wifi_disconnect(struct wiphy *wiphy, struct net_device *netdev,
+ u16 reason_code)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(netdev);
+
+ if (priv->being_deleted)
+ return -EBUSY;
+
+ wiphy_debug(wiphy, "disconnect\n");
+ virt_wifi_cancel_connect(netdev);
+
+ cfg80211_disconnected(netdev, reason_code, NULL, 0, true, GFP_KERNEL);
+ priv->is_connected = false;
+ netif_carrier_off(netdev);
+
+ return 0;
+}
+
+/* Called with the rtnl lock held. */
+static int virt_wifi_get_station(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *mac, struct station_info *sinfo)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ wiphy_debug(wiphy, "get_station\n");
+
+ if (!priv->is_connected || !ether_addr_equal(mac, fake_router_bssid))
+ return -ENOENT;
+
+ sinfo->filled = BIT_ULL(NL80211_STA_INFO_TX_PACKETS) |
+ BIT_ULL(NL80211_STA_INFO_TX_FAILED) |
+ BIT_ULL(NL80211_STA_INFO_SIGNAL) |
+ BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ sinfo->tx_packets = priv->tx_packets;
+ sinfo->tx_failed = priv->tx_failed;
+ /* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_ */
+ sinfo->signal = -50;
+ sinfo->txrate = (struct rate_info) {
+ .legacy = 10, /* units are 100kbit/s */
+ };
+ return 0;
+}
+
+/* Called with the rtnl lock held. */
+static int virt_wifi_dump_station(struct wiphy *wiphy, struct net_device *dev,
+ int idx, u8 *mac, struct station_info *sinfo)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ wiphy_debug(wiphy, "dump_station\n");
+
+ if (idx != 0 || !priv->is_connected)
+ return -ENOENT;
+
+ ether_addr_copy(mac, fake_router_bssid);
+ return virt_wifi_get_station(wiphy, dev, fake_router_bssid, sinfo);
+}
+
+static const struct cfg80211_ops virt_wifi_cfg80211_ops = {
+ .scan = virt_wifi_scan,
+
+ .connect = virt_wifi_connect,
+ .disconnect = virt_wifi_disconnect,
+
+ .get_station = virt_wifi_get_station,
+ .dump_station = virt_wifi_dump_station,
+};
+
+/* Acquires and releases the rtnl lock. */
+static struct wiphy *virt_wifi_make_wiphy(void)
+{
+ struct wiphy *wiphy;
+ struct virt_wifi_wiphy_priv *priv;
+ int err;
+
+ wiphy = wiphy_new(&virt_wifi_cfg80211_ops, sizeof(*priv));
+
+ if (!wiphy)
+ return NULL;
+
+ wiphy->max_scan_ssids = 4;
+ wiphy->max_scan_ie_len = 1000;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->bands[NL80211_BAND_2GHZ] = &band_2ghz;
+ wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz;
+ wiphy->bands[NL80211_BAND_60GHZ] = NULL;
+
+ wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+
+ priv = wiphy_priv(wiphy);
+ priv->being_deleted = false;
+ priv->scan_request = NULL;
+ INIT_DELAYED_WORK(&priv->scan_result, virt_wifi_scan_result);
+
+ err = wiphy_register(wiphy);
+ if (err < 0) {
+ wiphy_free(wiphy);
+ return NULL;
+ }
+
+ return wiphy;
+}
+
+/* Acquires and releases the rtnl lock. */
+static void virt_wifi_destroy_wiphy(struct wiphy *wiphy)
+{
+ struct virt_wifi_wiphy_priv *priv;
+
+ WARN(!wiphy, "%s called with null wiphy", __func__);
+ if (!wiphy)
+ return;
+
+ priv = wiphy_priv(wiphy);
+ priv->being_deleted = true;
+ virt_wifi_cancel_scan(wiphy);
+
+ if (wiphy->registered)
+ wiphy_unregister(wiphy);
+ wiphy_free(wiphy);
+}
+
+/* Enters and exits a RCU-bh critical section. */
+static netdev_tx_t virt_wifi_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ priv->tx_packets++;
+ if (!priv->is_connected) {
+ priv->tx_failed++;
+ return NET_XMIT_DROP;
+ }
+
+ skb->dev = priv->lowerdev;
+ return dev_queue_xmit(skb);
+}
+
+/* Called with rtnl lock held. */
+static int virt_wifi_net_device_open(struct net_device *dev)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ priv->is_up = true;
+ return 0;
+}
+
+/* Called with rtnl lock held. */
+static int virt_wifi_net_device_stop(struct net_device *dev)
+{
+ struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev);
+ struct virt_wifi_wiphy_priv *w_priv;
+
+ n_priv->is_up = false;
+
+ if (!dev->ieee80211_ptr)
+ return 0;
+ w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
+
+ virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
+ virt_wifi_cancel_connect(dev);
+ netif_carrier_off(dev);
+
+ return 0;
+}
+
+static const struct net_device_ops virt_wifi_ops = {
+ .ndo_start_xmit = virt_wifi_start_xmit,
+ .ndo_open = virt_wifi_net_device_open,
+ .ndo_stop = virt_wifi_net_device_stop,
+};
+
+/* Invoked as part of rtnl lock release. */
+static void virt_wifi_net_device_destructor(struct net_device *dev)
+{
+ /* Delayed past dellink to allow nl80211 to react to the device being
+ * deleted.
+ */
+ kfree(dev->ieee80211_ptr);
+ dev->ieee80211_ptr = NULL;
+ free_netdev(dev);
+}
+
+/* No lock interaction. */
+static void virt_wifi_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->netdev_ops = &virt_wifi_ops;
+ dev->priv_destructor = virt_wifi_net_device_destructor;
+}
+
+/* Called in a RCU read critical section from netif_receive_skb */
+static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct virt_wifi_netdev_priv *priv =
+ rcu_dereference(skb->dev->rx_handler_data);
+
+ if (!priv->is_connected)
+ return RX_HANDLER_PASS;
+
+ /* GFP_ATOMIC because this is a packet interrupt handler. */
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb) {
+ dev_err(&priv->upperdev->dev, "can't skb_share_check\n");
+ return RX_HANDLER_CONSUMED;
+ }
+
+ *pskb = skb;
+ skb->dev = priv->upperdev;
+ skb->pkt_type = PACKET_HOST;
+ return RX_HANDLER_ANOTHER;
+}
+
+/* Called with rtnl lock held. */
+static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ netif_carrier_off(dev);
+
+ priv->upperdev = dev;
+ priv->lowerdev = __dev_get_by_index(src_net,
+ nla_get_u32(tb[IFLA_LINK]));
+
+ if (!priv->lowerdev)
+ return -ENODEV;
+ if (!tb[IFLA_MTU])
+ dev->mtu = priv->lowerdev->mtu;
+ else if (dev->mtu > priv->lowerdev->mtu)
+ return -EINVAL;
+
+ err = netdev_rx_handler_register(priv->lowerdev, virt_wifi_rx_handler,
+ priv);
+ if (err) {
+ dev_err(&priv->lowerdev->dev,
+ "can't netdev_rx_handler_register: %d\n", err);
+ return err;
+ }
+
+ eth_hw_addr_inherit(dev, priv->lowerdev);
+ netif_stacked_transfer_operstate(priv->lowerdev, dev);
+
+ SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
+ dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
+
+ if (!dev->ieee80211_ptr)
+ goto remove_handler;
+
+ dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
+ dev->ieee80211_ptr->wiphy = common_wiphy;
+
+ err = register_netdevice(dev);
+ if (err) {
+ dev_err(&priv->lowerdev->dev, "can't register_netdevice: %d\n",
+ err);
+ goto free_wireless_dev;
+ }
+
+ err = netdev_upper_dev_link(priv->lowerdev, dev, extack);
+ if (err) {
+ dev_err(&priv->lowerdev->dev, "can't netdev_upper_dev_link: %d\n",
+ err);
+ goto unregister_netdev;
+ }
+
+ priv->being_deleted = false;
+ priv->is_connected = false;
+ priv->is_up = false;
+ INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
+
+ return 0;
+unregister_netdev:
+ unregister_netdevice(dev);
+free_wireless_dev:
+ kfree(dev->ieee80211_ptr);
+ dev->ieee80211_ptr = NULL;
+remove_handler:
+ netdev_rx_handler_unregister(priv->lowerdev);
+
+ return err;
+}
+
+/* Called with rtnl lock held. */
+static void virt_wifi_dellink(struct net_device *dev,
+ struct list_head *head)
+{
+ struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
+
+ if (dev->ieee80211_ptr)
+ virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
+
+ priv->being_deleted = true;
+ virt_wifi_cancel_connect(dev);
+ netif_carrier_off(dev);
+
+ netdev_rx_handler_unregister(priv->lowerdev);
+ netdev_upper_dev_unlink(priv->lowerdev, dev);
+
+ unregister_netdevice_queue(dev, head);
+
+ /* Deleting the wiphy is handled in the module destructor. */
+}
+
+static struct rtnl_link_ops virt_wifi_link_ops = {
+ .kind = "virt_wifi",
+ .setup = virt_wifi_setup,
+ .newlink = virt_wifi_newlink,
+ .dellink = virt_wifi_dellink,
+ .priv_size = sizeof(struct virt_wifi_netdev_priv),
+};
+
+/* Acquires and releases the rtnl lock. */
+static int __init virt_wifi_init_module(void)
+{
+ int err;
+
+ /* Guaranteed to be locallly-administered and not multicast. */
+ eth_random_addr(fake_router_bssid);
+
+ common_wiphy = virt_wifi_make_wiphy();
+ if (!common_wiphy)
+ return -ENOMEM;
+
+ err = rtnl_link_register(&virt_wifi_link_ops);
+ if (err)
+ virt_wifi_destroy_wiphy(common_wiphy);
+
+ return err;
+}
+
+/* Acquires and releases the rtnl lock. */
+static void __exit virt_wifi_cleanup_module(void)
+{
+ /* Will delete any devices that depend on the wiphy. */
+ rtnl_link_unregister(&virt_wifi_link_ops);
+ virt_wifi_destroy_wiphy(common_wiphy);
+}
+
+module_init(virt_wifi_init_module);
+module_exit(virt_wifi_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Cody Schuffelen <schuffelen@google.com>");
+MODULE_DESCRIPTION("Driver for a wireless wrapper of ethernet devices");
+MODULE_ALIAS_RTNL_LINK("virt_wifi");
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 253403899fe9..22c70f1f568c 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -969,6 +969,7 @@ static int zd1201_set_mode(struct net_device *dev,
*/
zd1201_join(zd, "\0-*#\0", 5);
/* Put port in pIBSS */
+ /* Fall through */
case 8: /* No pseudo-IBSS in wireless extensions (yet) */
porttype = ZD1201_PORTTYPE_PSEUDOIBSS;
break;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index fe1d52247bbe..2625740bdc4a 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -186,7 +186,7 @@ static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.write = xenvif_write_io_ring,
};
-static int xenvif_read_ctrl(struct seq_file *m, void *v)
+static int xenvif_ctrl_show(struct seq_file *m, void *v)
{
struct xenvif *vif = m->private;
@@ -194,19 +194,7 @@ static int xenvif_read_ctrl(struct seq_file *m, void *v)
return 0;
}
-
-static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, xenvif_read_ctrl, inode->i_private);
-}
-
-static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
- .owner = THIS_MODULE,
- .open = xenvif_ctrl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl);
static void xenvif_debugfs_addif(struct xenvif *vif)
{
@@ -238,7 +226,7 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
0400,
vif->xenvif_dbg_root,
vif,
- &xenvif_dbg_ctrl_ops_fops);
+ &xenvif_ctrl_fops);
if (IS_ERR_OR_NULL(pfile))
pr_warn("Creation of ctrl file returned %ld!\n",
PTR_ERR(pfile));
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602e6171..c914c24f880b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -337,8 +337,6 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
return;
}
- wmb(); /* barrier so backend seens requests */
-
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);
@@ -905,7 +903,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- BUG_ON(pull_to <= skb_headlen(skb));
+ BUG_ON(pull_to < skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 182258f64417..d0c621b32f72 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -111,6 +111,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping, resource_size_t *overlap);
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
+ resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id);
int alias_dpa_busy(struct device *dev, void *data);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 24c64090169e..6f22272e8d80 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -649,14 +649,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
ALIGN_DOWN(phys, nd_pfn->align));
}
+/*
+ * Check if pmem collides with 'System RAM', or other regions when
+ * section aligned. Trim it accordingly.
+ */
+static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
+{
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
+ const resource_size_t start = nsio->res.start;
+ const resource_size_t end = start + resource_size(&nsio->res);
+ resource_size_t adjust, size;
+
+ *start_pad = 0;
+ *end_trunc = 0;
+
+ adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
+ size = resource_size(&nsio->res) + adjust;
+ if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED
+ || nd_region_conflict(nd_region, start - adjust, size))
+ *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+
+ /* Now check that end of the range does not collide. */
+ adjust = PHYS_SECTION_ALIGN_UP(end) - end;
+ size = resource_size(&nsio->res) + adjust;
+ if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED
+ || !IS_ALIGNED(end, nd_pfn->align)
+ || nd_region_conflict(nd_region, start, size + adjust))
+ *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
+}
+
static int nd_pfn_init(struct nd_pfn *nd_pfn)
{
u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
struct nd_namespace_common *ndns = nd_pfn->ndns;
- u32 start_pad = 0, end_trunc = 0;
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
resource_size_t start, size;
- struct nd_namespace_io *nsio;
struct nd_region *nd_region;
+ u32 start_pad, end_trunc;
struct nd_pfn_sb *pfn_sb;
unsigned long npfns;
phys_addr_t offset;
@@ -688,30 +721,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
memset(pfn_sb, 0, sizeof(*pfn_sb));
- /*
- * Check if pmem collides with 'System RAM' when section aligned and
- * trim it accordingly
- */
- nsio = to_nd_namespace_io(&ndns->dev);
- start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
- size = resource_size(&nsio->res);
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED) {
- start = nsio->res.start;
- start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
- }
-
- start = nsio->res.start;
- size = PHYS_SECTION_ALIGN_UP(start + size) - start;
- if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED
- || !IS_ALIGNED(start + resource_size(&nsio->res),
- nd_pfn->align)) {
- size = resource_size(&nsio->res);
- end_trunc = start + size - phys_pmem_align_down(nd_pfn,
- start + size);
- }
-
+ trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
if (start_pad + end_trunc)
dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
dev_name(&ndns->dev), start_pad + end_trunc);
@@ -722,7 +732,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* implementation will limit the pfns advertised through
* ->direct_access() to those that are included in the memmap.
*/
- start += start_pad;
+ start = nsio->res.start + start_pad;
size = resource_size(&nsio->res);
npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
/ PAGE_SIZE);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 0e39e3d1846f..f7019294740c 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -393,7 +393,7 @@ static int pmem_attach_disk(struct device *dev,
return -EBUSY;
}
- q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
+ q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
if (!q)
return -ENOMEM;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 174a418cb171..e7377f1028ef 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
}
EXPORT_SYMBOL_GPL(nvdimm_has_cache);
+struct conflict_context {
+ struct nd_region *nd_region;
+ resource_size_t start, size;
+};
+
+static int region_conflict(struct device *dev, void *data)
+{
+ struct nd_region *nd_region;
+ struct conflict_context *ctx = data;
+ resource_size_t res_end, region_end, region_start;
+
+ if (!is_memory(dev))
+ return 0;
+
+ nd_region = to_nd_region(dev);
+ if (nd_region == ctx->nd_region)
+ return 0;
+
+ res_end = ctx->start + ctx->size;
+ region_start = nd_region->ndr_start;
+ region_end = region_start + nd_region->ndr_size;
+ if (ctx->start >= region_start && ctx->start < region_end)
+ return -EBUSY;
+ if (res_end > region_start && res_end <= region_end)
+ return -EBUSY;
+ return 0;
+}
+
+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
+ resource_size_t size)
+{
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+ struct conflict_context ctx = {
+ .nd_region = nd_region,
+ .start = start,
+ .size = size,
+ };
+
+ return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
+}
+
void __exit nd_region_devs_exit(void)
{
ida_destroy(&region_ida);
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 88a8b5916624..0f345e207675 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -57,3 +57,18 @@ config NVME_FC
from https://github.com/linux-nvme/nvme-cli.
If unsure, say N.
+
+config NVME_TCP
+ tristate "NVM Express over Fabrics TCP host driver"
+ depends on INET
+ depends on BLK_DEV_NVME
+ select NVME_FABRICS
+ help
+ This provides support for the NVMe over Fabrics protocol using
+ the TCP transport. This allows you to use remote block devices
+ exported using the NVMe protocol set.
+
+ To configure a NVMe over Fabrics controller use the nvme-cli tool
+ from https://github.com/linux-nvme/nvme-cli.
+
+ If unsure, say N.
diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile
index aea459c65ae1..8a4b671c5f0c 100644
--- a/drivers/nvme/host/Makefile
+++ b/drivers/nvme/host/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
obj-$(CONFIG_NVME_FC) += nvme-fc.o
+obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
nvme-core-y := core.o
nvme-core-$(CONFIG_TRACING) += trace.o
@@ -21,3 +22,5 @@ nvme-fabrics-y += fabrics.o
nvme-rdma-y += rdma.o
nvme-fc-y += fc.o
+
+nvme-tcp-y += tcp.o
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 559d567693b8..08f2c92602f4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -97,7 +97,6 @@ static dev_t nvme_chr_devt;
static struct class *nvme_class;
static struct class *nvme_subsys_class;
-static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
@@ -245,12 +244,31 @@ static inline bool nvme_req_needs_retry(struct request *req)
return true;
}
+static void nvme_retry_req(struct request *req)
+{
+ struct nvme_ns *ns = req->q->queuedata;
+ unsigned long delay = 0;
+ u16 crd;
+
+ /* The mask and shift result must be <= 3 */
+ crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11;
+ if (ns && crd)
+ delay = ns->ctrl->crdt[crd - 1] * 100;
+
+ nvme_req(req)->retries++;
+ blk_mq_requeue_request(req, false);
+ blk_mq_delay_kick_requeue_list(req->q, delay);
+}
+
void nvme_complete_rq(struct request *req)
{
blk_status_t status = nvme_error_status(req);
trace_nvme_complete_rq(req);
+ if (nvme_req(req)->ctrl->kas)
+ nvme_req(req)->ctrl->comp_seen = true;
+
if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
if ((req->cmd_flags & REQ_NVME_MPATH) &&
blk_path_error(status)) {
@@ -259,8 +277,7 @@ void nvme_complete_rq(struct request *req)
}
if (!blk_queue_dying(req->q)) {
- nvme_req(req)->retries++;
- blk_mq_requeue_request(req, true);
+ nvme_retry_req(req);
return;
}
}
@@ -268,14 +285,14 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
-void nvme_cancel_request(struct request *req, void *data, bool reserved)
+bool nvme_cancel_request(struct request *req, void *data, bool reserved)
{
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
nvme_req(req)->status = NVME_SC_ABORT_REQ;
blk_mq_complete_request(req);
-
+ return true;
}
EXPORT_SYMBOL_GPL(nvme_cancel_request);
@@ -536,7 +553,6 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
- memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
@@ -548,9 +564,19 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_dsm_range *range;
struct bio *bio;
- range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
- if (!range)
- return BLK_STS_RESOURCE;
+ range = kmalloc_array(segments, sizeof(*range),
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!range) {
+ /*
+ * If we fail allocation our range, fallback to the controller
+ * discard page. If that's also busy, it's safe to return
+ * busy, as we know we can make progress once that's freed.
+ */
+ if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
+ return BLK_STS_RESOURCE;
+
+ range = page_address(ns->ctrl->discard_page);
+ }
__rq_for_each_bio(bio, req) {
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
@@ -565,11 +591,13 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
}
if (WARN_ON_ONCE(n != segments)) {
- kfree(range);
+ if (virt_to_page(range) == ns->ctrl->discard_page)
+ clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ else
+ kfree(range);
return BLK_STS_IOERR;
}
- memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
@@ -598,7 +626,6 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
if (req->cmd_flags & REQ_RAHEAD)
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
- memset(cmnd, 0, sizeof(*cmnd));
cmnd->rw.opcode = (rq_data_dir(req) ? nvme_cmd_write : nvme_cmd_read);
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
@@ -650,8 +677,13 @@ void nvme_cleanup_cmd(struct request *req)
blk_rq_bytes(req) >> ns->lba_shift);
}
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
- kfree(page_address(req->special_vec.bv_page) +
- req->special_vec.bv_offset);
+ struct nvme_ns *ns = req->rq_disk->private_data;
+ struct page *page = req->special_vec.bv_page;
+
+ if (page == ns->ctrl->discard_page)
+ clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
+ else
+ kfree(page_address(page) + req->special_vec.bv_offset);
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@@ -663,6 +695,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
nvme_clear_nvme_request(req);
+ memset(cmd, 0, sizeof(*cmd));
switch (req_op(req)) {
case REQ_OP_DRV_IN:
case REQ_OP_DRV_OUT:
@@ -691,6 +724,31 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
}
EXPORT_SYMBOL_GPL(nvme_setup_cmd);
+static void nvme_end_sync_rq(struct request *rq, blk_status_t error)
+{
+ struct completion *waiting = rq->end_io_data;
+
+ rq->end_io_data = NULL;
+ complete(waiting);
+}
+
+static void nvme_execute_rq_polled(struct request_queue *q,
+ struct gendisk *bd_disk, struct request *rq, int at_head)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+
+ WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags));
+
+ rq->cmd_flags |= REQ_HIPRI;
+ rq->end_io_data = &wait;
+ blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq);
+
+ while (!completion_done(&wait)) {
+ blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true);
+ cond_resched();
+ }
+}
+
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
* if the result is positive, it's an NVM Express status code
@@ -698,7 +756,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags)
+ blk_mq_req_flags_t flags, bool poll)
{
struct request *req;
int ret;
@@ -715,7 +773,10 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
goto out;
}
- blk_execute_rq(req->q, NULL, req, at_head);
+ if (poll)
+ nvme_execute_rq_polled(req->q, NULL, req, at_head);
+ else
+ blk_execute_rq(req->q, NULL, req, at_head);
if (result)
*result = nvme_req(req)->result;
if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
@@ -732,7 +793,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
void *buffer, unsigned bufflen)
{
return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0, 0, false);
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
@@ -831,6 +892,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
{
struct nvme_ctrl *ctrl = rq->end_io_data;
+ unsigned long flags;
+ bool startka = false;
blk_mq_free_request(rq);
@@ -841,7 +904,14 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
return;
}
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ ctrl->comp_seen = false;
+ spin_lock_irqsave(&ctrl->lock, flags);
+ if (ctrl->state == NVME_CTRL_LIVE ||
+ ctrl->state == NVME_CTRL_CONNECTING)
+ startka = true;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
+ if (startka)
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -865,6 +935,15 @@ static void nvme_keep_alive_work(struct work_struct *work)
{
struct nvme_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvme_ctrl, ka_work);
+ bool comp_seen = ctrl->comp_seen;
+
+ if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
+ dev_dbg(ctrl->device,
+ "reschedule traffic based keep-alive timer\n");
+ ctrl->comp_seen = false;
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ return;
+ }
if (nvme_keep_alive(ctrl)) {
/* allocation failure, reset the controller */
@@ -1033,7 +1112,7 @@ static int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword
c.features.dword11 = cpu_to_le32(dword11);
ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
- buffer, buflen, 0, NVME_QID_ANY, 0, 0);
+ buffer, buflen, 0, NVME_QID_ANY, 0, 0, false);
if (ret >= 0 && result)
*result = le32_to_cpu(res.u32);
return ret;
@@ -1232,12 +1311,12 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
c.common.nsid = cpu_to_le32(cmd.nsid);
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
- c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
- c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
- c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
- c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
- c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
- c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+ c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+ c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+ c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+ c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(cmd.cdw15);
if (cmd.timeout_ms)
timeout = msecs_to_jiffies(cmd.timeout_ms);
@@ -1516,8 +1595,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->noiob)
nvme_set_chunk_size(ns);
nvme_update_disk_info(disk, ns, id);
- if (ns->ndev)
- nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
@@ -1600,7 +1677,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
memset(&c, 0, sizeof(c));
c.common.opcode = op;
c.common.nsid = cpu_to_le32(ns->head->ns_id);
- c.common.cdw10[0] = cpu_to_le32(cdw10);
+ c.common.cdw10 = cpu_to_le32(cdw10);
ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
nvme_put_ns_from_disk(head, srcu_idx);
@@ -1674,11 +1751,11 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
else
cmd.common.opcode = nvme_admin_security_recv;
cmd.common.nsid = 0;
- cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
- cmd.common.cdw10[1] = cpu_to_le32(len);
+ cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
+ cmd.common.cdw11 = cpu_to_le32(len);
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
- ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
+ ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0, false);
}
EXPORT_SYMBOL_GPL(nvme_sec_submit);
#endif /* CONFIG_BLK_SED_OPAL */
@@ -1873,6 +1950,26 @@ static int nvme_configure_timestamp(struct nvme_ctrl *ctrl)
return ret;
}
+static int nvme_configure_acre(struct nvme_ctrl *ctrl)
+{
+ struct nvme_feat_host_behavior *host;
+ int ret;
+
+ /* Don't bother enabling the feature if retry delay is not reported */
+ if (!ctrl->crdt[0])
+ return 0;
+
+ host = kzalloc(sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return 0;
+
+ host->acre = NVME_ENABLE_ACRE;
+ ret = nvme_set_features(ctrl, NVME_FEAT_HOST_BEHAVIOR, 0,
+ host, sizeof(*host), NULL);
+ kfree(host);
+ return ret;
+}
+
static int nvme_configure_apst(struct nvme_ctrl *ctrl)
{
/*
@@ -2394,6 +2491,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->quirks &= ~NVME_QUIRK_NO_DEEPEST_PS;
}
+ ctrl->crdt[0] = le16_to_cpu(id->crdt1);
+ ctrl->crdt[1] = le16_to_cpu(id->crdt2);
+ ctrl->crdt[2] = le16_to_cpu(id->crdt3);
+
ctrl->oacs = le16_to_cpu(id->oacs);
ctrl->oncs = le16_to_cpup(&id->oncs);
ctrl->oaes = le32_to_cpu(id->oaes);
@@ -2411,6 +2512,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->sgls = le32_to_cpu(id->sgls);
ctrl->kas = le16_to_cpu(id->kas);
ctrl->max_namespaces = le32_to_cpu(id->mnan);
+ ctrl->ctratt = le32_to_cpu(id->ctratt);
if (id->rtd3e) {
/* us -> s */
@@ -2493,6 +2595,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ret < 0)
return ret;
+ ret = nvme_configure_acre(ctrl);
+ if (ret < 0)
+ return ret;
+
ctrl->identified = true;
return 0;
@@ -2768,6 +2874,7 @@ static ssize_t field##_show(struct device *dev, \
static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
nvme_show_int_function(cntlid);
+nvme_show_int_function(numa_node);
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -2847,6 +2954,7 @@ static struct attribute *nvme_dev_attrs[] = {
&dev_attr_subsysnqn.attr,
&dev_attr_address.attr,
&dev_attr_state.attr,
+ &dev_attr_numa_node.attr,
NULL
};
@@ -3057,7 +3165,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
struct gendisk *disk;
struct nvme_id_ns *id;
char disk_name[DISK_NAME_LEN];
- int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT;
+ int node = ctrl->numa_node, flags = GENHD_FL_EXT_DEVT;
ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
if (!ns)
@@ -3092,13 +3200,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_setup_streams_ns(ctrl, ns);
nvme_set_disk_name(disk_name, ns, ctrl, &flags);
- if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
- if (nvme_nvm_register(ns, disk_name, node)) {
- dev_warn(ctrl->device, "LightNVM init failure\n");
- goto out_unlink_ns;
- }
- }
-
disk = alloc_disk_node(0, node);
if (!disk)
goto out_unlink_ns;
@@ -3112,6 +3213,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
__nvme_revalidate_disk(disk, id);
+ if ((ctrl->quirks & NVME_QUIRK_LIGHTNVM) && id->vs[0] == 0x1) {
+ if (nvme_nvm_register(ns, disk_name, node)) {
+ dev_warn(ctrl->device, "LightNVM init failure\n");
+ goto out_put_disk;
+ }
+ }
+
down_write(&ctrl->namespaces_rwsem);
list_add_tail(&ns->list, &ctrl->namespaces);
up_write(&ctrl->namespaces_rwsem);
@@ -3125,6 +3233,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
kfree(id);
return;
+ out_put_disk:
+ put_disk(ns->disk);
out_unlink_ns:
mutex_lock(&ctrl->subsys->lock);
list_del_rcu(&ns->siblings);
@@ -3314,6 +3424,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
struct nvme_ns *ns, *next;
LIST_HEAD(ns_list);
+ /* prevent racing with ns scanning */
+ flush_work(&ctrl->scan_work);
+
/*
* The dead states indicates the controller was not gracefully
* disconnected. In that case, we won't be able to flush any data while
@@ -3476,7 +3589,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
nvme_mpath_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
- flush_work(&ctrl->scan_work);
cancel_work_sync(&ctrl->fw_act_work);
if (ctrl->ops->stop_ctrl)
ctrl->ops->stop_ctrl(ctrl);
@@ -3512,6 +3624,7 @@ static void nvme_free_ctrl(struct device *dev)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
kfree(ctrl->effects);
nvme_mpath_uninit(ctrl);
+ __free_page(ctrl->discard_page);
if (subsys) {
mutex_lock(&subsys->lock);
@@ -3552,6 +3665,14 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
+ BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
+ PAGE_SIZE);
+ ctrl->discard_page = alloc_page(GFP_KERNEL);
+ if (!ctrl->discard_page) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out;
@@ -3585,10 +3706,12 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
return 0;
out_free_name:
- kfree_const(dev->kobj.name);
+ kfree_const(ctrl->device->kobj.name);
out_release_instance:
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
out:
+ if (ctrl->discard_page)
+ __free_page(ctrl->discard_page);
return ret;
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
@@ -3607,7 +3730,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
/* Forcibly unquiesce queues to avoid blocking dispatch */
- if (ctrl->admin_q)
+ if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
blk_mq_unquiesce_queue(ctrl->admin_q);
list_for_each_entry(ns, &ctrl->namespaces, list)
@@ -3736,7 +3859,7 @@ out:
return result;
}
-void nvme_core_exit(void)
+void __exit nvme_core_exit(void)
{
ida_destroy(&nvme_subsystems_ida);
class_destroy(nvme_subsys_class);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index bd0969db6225..b2ab213f43de 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -159,7 +159,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0, 0, false);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -206,7 +206,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
cmd.prop_get.offset = cpu_to_le32(off);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res, NULL, 0, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0, 0, false);
if (ret >= 0)
*val = le64_to_cpu(res.u64);
@@ -252,7 +252,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
cmd.prop_set.value = cpu_to_le64(val);
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, NULL, 0, 0,
- NVME_QID_ANY, 0, 0);
+ NVME_QID_ANY, 0, 0, false);
if (unlikely(ret))
dev_err(ctrl->device,
"Property Set error: %d, offset %#x\n",
@@ -392,6 +392,9 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
cmd.connect.kato = ctrl->opts->discovery_nqn ? 0 :
cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000);
+ if (ctrl->opts->disable_sqflow)
+ cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -403,7 +406,7 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
ret = __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, &res,
data, sizeof(*data), 0, NVME_QID_ANY, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, false);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -438,7 +441,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_admin_queue);
* > 0: NVMe error status code
* < 0: Linux errno error code
*/
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll)
{
struct nvme_command cmd;
struct nvmf_connect_data *data;
@@ -451,6 +454,9 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
cmd.connect.qid = cpu_to_le16(qid);
cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ if (ctrl->opts->disable_sqflow)
+ cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -462,7 +468,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
data, sizeof(*data), 0, qid, 1,
- BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+ BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT, poll);
if (ret) {
nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
&cmd, data);
@@ -607,6 +613,11 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_HOST_TRADDR, "host_traddr=%s" },
{ NVMF_OPT_HOST_ID, "hostid=%s" },
{ NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
+ { NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
+ { NVMF_OPT_HDR_DIGEST, "hdr_digest" },
+ { NVMF_OPT_DATA_DIGEST, "data_digest" },
+ { NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
+ { NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_ERR, NULL }
};
@@ -626,6 +637,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
opts->kato = NVME_DEFAULT_KATO;
opts->duplicate_connect = false;
+ opts->hdr_digest = false;
+ opts->data_digest = false;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -817,6 +830,39 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DUP_CONNECT:
opts->duplicate_connect = true;
break;
+ case NVMF_OPT_DISABLE_SQFLOW:
+ opts->disable_sqflow = true;
+ break;
+ case NVMF_OPT_HDR_DIGEST:
+ opts->hdr_digest = true;
+ break;
+ case NVMF_OPT_DATA_DIGEST:
+ opts->data_digest = true;
+ break;
+ case NVMF_OPT_NR_WRITE_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid nr_write_queues %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_write_queues = token;
+ break;
+ case NVMF_OPT_NR_POLL_QUEUES:
+ if (match_int(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token <= 0) {
+ pr_err("Invalid nr_poll_queues %d\n", token);
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->nr_poll_queues = token;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -933,7 +979,8 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_REQUIRED_OPTS (NVMF_OPT_TRANSPORT | NVMF_OPT_NQN)
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
- NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT)
+ NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
+ NVMF_OPT_DISABLE_SQFLOW)
static struct nvme_ctrl *
nvmf_create_ctrl(struct device *dev, const char *buf, size_t count)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 6ea6275f332a..478343b73e38 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -58,6 +58,11 @@ enum {
NVMF_OPT_CTRL_LOSS_TMO = 1 << 11,
NVMF_OPT_HOST_ID = 1 << 12,
NVMF_OPT_DUP_CONNECT = 1 << 13,
+ NVMF_OPT_DISABLE_SQFLOW = 1 << 14,
+ NVMF_OPT_HDR_DIGEST = 1 << 15,
+ NVMF_OPT_DATA_DIGEST = 1 << 16,
+ NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
+ NVMF_OPT_NR_POLL_QUEUES = 1 << 18,
};
/**
@@ -85,6 +90,11 @@ enum {
* @max_reconnects: maximum number of allowed reconnect attempts before removing
* the controller, (-1) means reconnect forever, zero means remove
* immediately;
+ * @disable_sqflow: disable controller sq flow control
+ * @hdr_digest: generate/verify header digest (TCP)
+ * @data_digest: generate/verify data digest (TCP)
+ * @nr_write_queues: number of queues for write I/O
+ * @nr_poll_queues: number of queues for polling I/O
*/
struct nvmf_ctrl_options {
unsigned mask;
@@ -101,6 +111,11 @@ struct nvmf_ctrl_options {
unsigned int kato;
struct nvmf_host *host;
int max_reconnects;
+ bool disable_sqflow;
+ bool hdr_digest;
+ bool data_digest;
+ unsigned int nr_write_queues;
+ unsigned int nr_poll_queues;
};
/*
@@ -156,7 +171,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl);
-int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid);
+int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid, bool poll);
int nvmf_register_transport(struct nvmf_transport_ops *ops);
void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 54032c466636..89accc76d71c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1752,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
int res;
- nvme_req(rq)->ctrl = &ctrl->ctrl;
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
if (res)
return res;
op->op.fcp_req.first_sgl = &op->sgl[0];
op->op.fcp_req.private = &op->priv[0];
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
return res;
}
@@ -1975,7 +1975,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
(qsize / 5));
if (ret)
break;
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
if (ret)
break;
@@ -2326,38 +2326,6 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
}
-static struct blk_mq_tags *
-nvme_fc_tagset(struct nvme_fc_queue *queue)
-{
- if (queue->qnum == 0)
- return queue->ctrl->admin_tag_set.tags[queue->qnum];
-
- return queue->ctrl->tag_set.tags[queue->qnum - 1];
-}
-
-static int
-nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
-
-{
- struct nvme_fc_queue *queue = hctx->driver_data;
- struct nvme_fc_ctrl *ctrl = queue->ctrl;
- struct request *req;
- struct nvme_fc_fcp_op *op;
-
- req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
- if (!req)
- return 0;
-
- op = blk_mq_rq_to_pdu(req);
-
- if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
- (ctrl->lport->ops->poll_queue))
- ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
- queue->lldd_handle);
-
- return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
-}
-
static void
nvme_fc_submit_async_event(struct nvme_ctrl *arg)
{
@@ -2410,7 +2378,7 @@ nvme_fc_complete_rq(struct request *rq)
* status. The done path will return the io request back to the block
* layer with an error status.
*/
-static void
+static bool
nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
{
struct nvme_ctrl *nctrl = data;
@@ -2418,6 +2386,7 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
__nvme_fc_abort_op(ctrl, op);
+ return true;
}
@@ -2427,7 +2396,6 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.init_request = nvme_fc_init_request,
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
- .poll = nvme_fc_poll,
.timeout = nvme_fc_timeout,
};
@@ -2457,7 +2425,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->tag_set.ops = &nvme_fc_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
- ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size =
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
@@ -3050,6 +3018,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->ctrl.opts = opts;
ctrl->ctrl.nr_reconnects = 0;
+ ctrl->ctrl.numa_node = dev_to_node(lport->dev);
INIT_LIST_HEAD(&ctrl->ctrl_list);
ctrl->lport = lport;
ctrl->rport = rport;
@@ -3090,7 +3059,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
- ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size =
struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
ctrl->lport->ops->fcprqst_priv_sz);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index a4f3b263cd6c..b759c25c89c8 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -577,7 +577,8 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
struct ppa_addr ppa;
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
size_t log_pos, offset, len;
- int ret, i, max_len;
+ int i, max_len;
+ int ret = 0;
/*
* limit requests to maximum 256K to avoid issuing arbitrary large
@@ -731,11 +732,12 @@ static int nvme_nvm_submit_io_sync(struct nvm_dev *dev, struct nvm_rq *rqd)
return ret;
}
-static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name)
+static void *nvme_nvm_create_dma_pool(struct nvm_dev *nvmdev, char *name,
+ int size)
{
struct nvme_ns *ns = nvmdev->q->queuedata;
- return dma_pool_create(name, ns->ctrl->dev, PAGE_SIZE, PAGE_SIZE, 0);
+ return dma_pool_create(name, ns->ctrl->dev, size, PAGE_SIZE, 0);
}
static void nvme_nvm_destroy_dma_pool(void *pool)
@@ -935,9 +937,9 @@ static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
/* cdw11-12 */
c.ph_rw.length = cpu_to_le16(vcmd.nppas);
c.ph_rw.control = cpu_to_le16(vcmd.control);
- c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
- c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
- c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
+ c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
+ c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
+ c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
if (vcmd.timeout_ms)
timeout = msecs_to_jiffies(vcmd.timeout_ms);
@@ -972,22 +974,11 @@ int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg)
}
}
-void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
-{
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12)
- return;
-
- geo->csecs = 1 << ns->lba_shift;
- geo->sos = ns->ms;
-}
-
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
{
struct request_queue *q = ns->queue;
struct nvm_dev *dev;
+ struct nvm_geo *geo;
_nvme_nvm_check_size();
@@ -995,6 +986,12 @@ int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node)
if (!dev)
return -ENOMEM;
+ /* Note that csecs and sos will be overridden if it is a 1.2 drive. */
+ geo = &dev->geo;
+ geo->csecs = 1 << ns->lba_shift;
+ geo->sos = ns->ms;
+ geo->ext = ns->ext;
+
dev->q = q;
memcpy(dev->name, disk_name, DISK_NAME_LEN);
dev->ops = &nvme_nvm_dev_ops;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 9901afd804ce..183ec17ba067 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -141,7 +141,7 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
test_bit(NVME_NS_ANA_PENDING, &ns->flags))
continue;
- distance = node_distance(node, dev_to_node(ns->ctrl->dev));
+ distance = node_distance(node, ns->ctrl->numa_node);
switch (ns->ana_state) {
case NVME_ANA_OPTIMIZED:
@@ -220,21 +220,6 @@ static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
return ret;
}
-static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
-{
- struct nvme_ns_head *head = q->queuedata;
- struct nvme_ns *ns;
- bool found = false;
- int srcu_idx;
-
- srcu_idx = srcu_read_lock(&head->srcu);
- ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
- if (likely(ns && nvme_path_is_optimized(ns)))
- found = ns->queue->poll_fn(q, qc);
- srcu_read_unlock(&head->srcu, srcu_idx);
- return found;
-}
-
static void nvme_requeue_work(struct work_struct *work)
{
struct nvme_ns_head *head =
@@ -276,12 +261,11 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
return 0;
- q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
+ q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node);
if (!q)
goto out;
q->queuedata = head;
blk_queue_make_request(q, nvme_ns_head_make_request);
- q->poll_fn = nvme_ns_head_poll;
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cee79cb388af..2b36ac922596 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -145,6 +145,7 @@ enum nvme_ctrl_state {
};
struct nvme_ctrl {
+ bool comp_seen;
enum nvme_ctrl_state state;
bool identified;
spinlock_t lock;
@@ -153,6 +154,7 @@ struct nvme_ctrl {
struct request_queue *connect_q;
struct device *dev;
int instance;
+ int numa_node;
struct blk_mq_tag_set *tagset;
struct blk_mq_tag_set *admin_tagset;
struct list_head namespaces;
@@ -179,6 +181,7 @@ struct nvme_ctrl {
u32 page_size;
u32 max_hw_sectors;
u32 max_segments;
+ u16 crdt[3];
u16 oncs;
u16 oacs;
u16 nssa;
@@ -193,6 +196,7 @@ struct nvme_ctrl {
u8 apsta;
u32 oaes;
u32 aen_result;
+ u32 ctratt;
unsigned int shutdown_timeout;
unsigned int kato;
bool subsystem;
@@ -237,6 +241,9 @@ struct nvme_ctrl {
u16 maxcmd;
int nr_reconnects;
struct nvmf_ctrl_options *opts;
+
+ struct page *discard_page;
+ unsigned long discard_page_busy;
};
struct nvme_subsystem {
@@ -364,15 +371,6 @@ static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {}
static inline void nvme_should_fail(struct request *req) {}
#endif
-static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl)
-{
- u32 val = 0;
-
- if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
- return false;
- return val & NVME_CSTS_RDY;
-}
-
static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
{
if (!ctrl->subsystem)
@@ -408,7 +406,7 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
}
void nvme_complete_rq(struct request *req);
-void nvme_cancel_request(struct request *req, void *data, bool reserved);
+bool nvme_cancel_request(struct request *req, void *data, bool reserved);
bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
enum nvme_ctrl_state new_state);
int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap);
@@ -449,7 +447,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
union nvme_result *result, void *buffer, unsigned bufflen,
unsigned timeout, int qid, int at_head,
- blk_mq_req_flags_t flags);
+ blk_mq_req_flags_t flags, bool poll);
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
@@ -531,6 +529,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
+ if (ctrl->subsys->cmic & (1 << 3))
+ dev_warn(ctrl->device,
+"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
@@ -542,13 +543,11 @@ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
-void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
extern const struct attribute_group nvme_nvm_attr_group;
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
#else
-static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
int node)
{
@@ -569,6 +568,6 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
}
int __init nvme_core_init(void);
-void nvme_core_exit(void);
+void __exit nvme_core_exit(void);
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c33bb201b884..5a0bf6a24d50 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -32,6 +32,7 @@
#include <linux/sed-opal.h>
#include <linux/pci-p2pdma.h>
+#include "trace.h"
#include "nvme.h"
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
@@ -74,6 +75,22 @@ static int io_queue_depth = 1024;
module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644);
MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2");
+static int queue_count_set(const char *val, const struct kernel_param *kp);
+static const struct kernel_param_ops queue_count_ops = {
+ .set = queue_count_set,
+ .get = param_get_int,
+};
+
+static int write_queues;
+module_param_cb(write_queues, &queue_count_ops, &write_queues, 0644);
+MODULE_PARM_DESC(write_queues,
+ "Number of queues to use for writes. If not set, reads and writes "
+ "will share a queue set.");
+
+static int poll_queues = 0;
+module_param_cb(poll_queues, &queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO.");
+
struct nvme_dev;
struct nvme_queue;
@@ -92,6 +109,7 @@ struct nvme_dev {
struct dma_pool *prp_small_pool;
unsigned online_queues;
unsigned max_qid;
+ unsigned io_queues[HCTX_MAX_TYPES];
unsigned int num_vecs;
int q_depth;
u32 db_stride;
@@ -105,7 +123,6 @@ struct nvme_dev {
u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl;
- struct completion ioq_wait;
mempool_t *iod_mempool;
@@ -134,6 +151,17 @@ static int io_queue_depth_set(const char *val, const struct kernel_param *kp)
return param_set_int(val, kp);
}
+static int queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ int n = 0, ret;
+
+ ret = kstrtoint(val, 10, &n);
+ if (n > num_possible_cpus())
+ n = num_possible_cpus();
+
+ return param_set_int(val, kp);
+}
+
static inline unsigned int sq_idx(unsigned int qid, u32 stride)
{
return qid * 2 * stride;
@@ -158,8 +186,8 @@ struct nvme_queue {
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
- bool sq_cmds_is_io;
- spinlock_t cq_lock ____cacheline_aligned_in_smp;
+ /* only used for poll queues: */
+ spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
dma_addr_t sq_dma_addr;
@@ -168,14 +196,20 @@ struct nvme_queue {
u16 q_depth;
s16 cq_vector;
u16 sq_tail;
+ u16 last_sq_tail;
u16 cq_head;
u16 last_cq_head;
u16 qid;
u8 cq_phase;
+ unsigned long flags;
+#define NVMEQ_ENABLED 0
+#define NVMEQ_SQ_CMB 1
+#define NVMEQ_DELETE_ERROR 2
u32 *dbbuf_sq_db;
u32 *dbbuf_cq_db;
u32 *dbbuf_sq_ei;
u32 *dbbuf_cq_ei;
+ struct completion delete_done;
};
/*
@@ -218,9 +252,20 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64);
}
+static unsigned int max_io_queues(void)
+{
+ return num_possible_cpus() + write_queues + poll_queues;
+}
+
+static unsigned int max_queue_count(void)
+{
+ /* IO queues + admin queue */
+ return 1 + max_io_queues();
+}
+
static inline unsigned int nvme_dbbuf_size(u32 stride)
{
- return ((num_possible_cpus() + 1) * 8 * stride);
+ return (max_queue_count() * 8 * stride);
}
static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
@@ -431,30 +476,90 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
return 0;
}
+static int queue_irq_offset(struct nvme_dev *dev)
+{
+ /* if we have more than 1 vec, admin queue offsets us by 1 */
+ if (dev->num_vecs > 1)
+ return 1;
+
+ return 0;
+}
+
static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;
+ int i, qoff, offset;
+
+ offset = queue_irq_offset(dev);
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ map->nr_queues = dev->io_queues[i];
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
- return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
- dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ else
+ blk_mq_map_queues(map);
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
+
+ return 0;
+}
+
+/*
+ * Write sq tail if we are asked to, or if the next command would wrap.
+ */
+static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
+{
+ if (!write_sq) {
+ u16 next_tail = nvmeq->sq_tail + 1;
+
+ if (next_tail == nvmeq->q_depth)
+ next_tail = 0;
+ if (next_tail != nvmeq->last_sq_tail)
+ return;
+ }
+
+ if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
+ nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+ nvmeq->last_sq_tail = nvmeq->sq_tail;
}
/**
* nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
* @nvmeq: The queue to use
* @cmd: The command to send
+ * @write_sq: whether to write to the SQ doorbell
*/
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+ bool write_sq)
{
spin_lock(&nvmeq->sq_lock);
-
memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
-
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
- nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
- writel(nvmeq->sq_tail, nvmeq->q_db);
+ nvme_write_sq_db(nvmeq, write_sq);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+
+ spin_lock(&nvmeq->sq_lock);
+ if (nvmeq->sq_tail != nvmeq->last_sq_tail)
+ nvme_write_sq_db(nvmeq, true);
spin_unlock(&nvmeq->sq_lock);
}
@@ -822,7 +927,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
* We should not need to do this, but we're still using this to
* ensure we can drain requests on a dying queue.
*/
- if (unlikely(nvmeq->cq_vector < 0))
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
return BLK_STS_IOERR;
ret = nvme_setup_cmd(ns, req, &cmnd);
@@ -840,7 +945,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
blk_mq_start_request(req);
- nvme_submit_cmd(nvmeq, &cmnd);
+ nvme_submit_cmd(nvmeq, &cmnd, bd->last);
return BLK_STS_OK;
out_cleanup_iod:
nvme_free_iod(dev, req);
@@ -899,6 +1004,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
}
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe->command_id);
+ trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
nvme_end_request(req, cqe->status, cqe->result);
}
@@ -919,15 +1025,15 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline bool nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
- u16 *end, int tag)
+static inline int nvme_process_cq(struct nvme_queue *nvmeq, u16 *start,
+ u16 *end, unsigned int tag)
{
- bool found = false;
+ int found = 0;
*start = nvmeq->cq_head;
- while (!found && nvme_cqe_pending(nvmeq)) {
- if (nvmeq->cqes[nvmeq->cq_head].command_id == tag)
- found = true;
+ while (nvme_cqe_pending(nvmeq)) {
+ if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag)
+ found++;
nvme_update_cq_head(nvmeq);
}
*end = nvmeq->cq_head;
@@ -943,12 +1049,16 @@ static irqreturn_t nvme_irq(int irq, void *data)
irqreturn_t ret = IRQ_NONE;
u16 start, end;
- spin_lock(&nvmeq->cq_lock);
+ /*
+ * The rmb/wmb pair ensures we see all updates from a previous run of
+ * the irq handler, even if that was on another CPU.
+ */
+ rmb();
if (nvmeq->cq_head != nvmeq->last_cq_head)
ret = IRQ_HANDLED;
nvme_process_cq(nvmeq, &start, &end, -1);
nvmeq->last_cq_head = nvmeq->cq_head;
- spin_unlock(&nvmeq->cq_lock);
+ wmb();
if (start != end) {
nvme_complete_cqes(nvmeq, start, end);
@@ -966,27 +1076,50 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
return IRQ_NONE;
}
-static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
+/*
+ * Poll for completions any queue, including those not dedicated to polling.
+ * Can be called from any context.
+ */
+static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
{
+ struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
u16 start, end;
- bool found;
+ int found;
- if (!nvme_cqe_pending(nvmeq))
- return 0;
-
- spin_lock_irq(&nvmeq->cq_lock);
- found = nvme_process_cq(nvmeq, &start, &end, tag);
- spin_unlock_irq(&nvmeq->cq_lock);
+ /*
+ * For a poll queue we need to protect against the polling thread
+ * using the CQ lock. For normal interrupt driven threads we have
+ * to disable the interrupt to avoid racing with it.
+ */
+ if (nvmeq->cq_vector == -1) {
+ spin_lock(&nvmeq->cq_poll_lock);
+ found = nvme_process_cq(nvmeq, &start, &end, tag);
+ spin_unlock(&nvmeq->cq_poll_lock);
+ } else {
+ disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ found = nvme_process_cq(nvmeq, &start, &end, tag);
+ enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ }
nvme_complete_cqes(nvmeq, start, end);
return found;
}
-static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+static int nvme_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_queue *nvmeq = hctx->driver_data;
+ u16 start, end;
+ bool found;
+
+ if (!nvme_cqe_pending(nvmeq))
+ return 0;
+
+ spin_lock(&nvmeq->cq_poll_lock);
+ found = nvme_process_cq(nvmeq, &start, &end, -1);
+ spin_unlock(&nvmeq->cq_poll_lock);
- return __nvme_poll(nvmeq, tag);
+ nvme_complete_cqes(nvmeq, start, end);
+ return found;
}
static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
@@ -998,7 +1131,7 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
memset(&c, 0, sizeof(c));
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
- nvme_submit_cmd(nvmeq, &c);
+ nvme_submit_cmd(nvmeq, &c, true);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1016,7 +1149,10 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
struct nvme_queue *nvmeq, s16 vector)
{
struct nvme_command c;
- int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+ int flags = NVME_QUEUE_PHYS_CONTIG;
+
+ if (vector != -1)
+ flags |= NVME_CQ_IRQ_ENABLED;
/*
* Note: we (ab)use the fact that the prp fields survive if no data
@@ -1028,7 +1164,10 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
c.create_cq.cqid = cpu_to_le16(qid);
c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
c.create_cq.cq_flags = cpu_to_le16(flags);
- c.create_cq.irq_vector = cpu_to_le16(vector);
+ if (vector != -1)
+ c.create_cq.irq_vector = cpu_to_le16(vector);
+ else
+ c.create_cq.irq_vector = 0;
return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0);
}
@@ -1157,7 +1296,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
/*
* Did we miss an interrupt?
*/
- if (__nvme_poll(nvmeq, req->tag)) {
+ if (nvme_poll_irqdisable(nvmeq, req->tag)) {
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, completion polled\n",
req->tag, nvmeq->qid);
@@ -1237,17 +1376,15 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ if (!nvmeq->sq_cmds)
+ return;
- if (nvmeq->sq_cmds) {
- if (nvmeq->sq_cmds_is_io)
- pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
- nvmeq->sq_cmds,
- SQ_SIZE(nvmeq->q_depth));
- else
- dma_free_coherent(nvmeq->q_dmadev,
- SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds,
- nvmeq->sq_dma_addr);
+ if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
+ pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ nvmeq->sq_cmds, SQ_SIZE(nvmeq->q_depth));
+ } else {
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
}
}
@@ -1267,47 +1404,32 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- int vector;
-
- spin_lock_irq(&nvmeq->cq_lock);
- if (nvmeq->cq_vector == -1) {
- spin_unlock_irq(&nvmeq->cq_lock);
+ if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
return 1;
- }
- vector = nvmeq->cq_vector;
- nvmeq->dev->online_queues--;
- nvmeq->cq_vector = -1;
- spin_unlock_irq(&nvmeq->cq_lock);
- /*
- * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without
- * having to grab the lock.
- */
+ /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */
mb();
+ nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
-
- pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
-
+ if (nvmeq->cq_vector == -1)
+ return 0;
+ pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
+ nvmeq->cq_vector = -1;
return 0;
}
static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
{
struct nvme_queue *nvmeq = &dev->queues[0];
- u16 start, end;
if (shutdown)
nvme_shutdown_ctrl(&dev->ctrl);
else
nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
- spin_lock_irq(&nvmeq->cq_lock);
- nvme_process_cq(nvmeq, &start, &end, -1);
- spin_unlock_irq(&nvmeq->cq_lock);
-
- nvme_complete_cqes(nvmeq, start, end);
+ nvme_poll_irqdisable(nvmeq, -1);
}
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
@@ -1343,15 +1465,14 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
nvmeq->sq_cmds);
- nvmeq->sq_cmds_is_io = true;
- }
-
- if (!nvmeq->sq_cmds) {
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- nvmeq->sq_cmds_is_io = false;
+ if (nvmeq->sq_dma_addr) {
+ set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
+ return 0;
+ }
}
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
return 0;
@@ -1375,7 +1496,7 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth)
nvmeq->q_dmadev = dev->dev;
nvmeq->dev = dev;
spin_lock_init(&nvmeq->sq_lock);
- spin_lock_init(&nvmeq->cq_lock);
+ spin_lock_init(&nvmeq->cq_poll_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
@@ -1411,28 +1532,34 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
{
struct nvme_dev *dev = nvmeq->dev;
- spin_lock_irq(&nvmeq->cq_lock);
nvmeq->sq_tail = 0;
+ nvmeq->last_sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_dbbuf_init(dev, nvmeq, qid);
dev->online_queues++;
- spin_unlock_irq(&nvmeq->cq_lock);
+ wmb(); /* ensure the first interrupt sees the initialization */
}
-static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
+static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
{
struct nvme_dev *dev = nvmeq->dev;
int result;
s16 vector;
+ clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
+
/*
* A queue's vector matches the queue identifier unless the controller
* has only one vector available.
*/
- vector = dev->num_vecs == 1 ? 0 : qid;
+ if (!polled)
+ vector = dev->num_vecs == 1 ? 0 : qid;
+ else
+ vector = -1;
+
result = adapter_alloc_cq(dev, qid, nvmeq, vector);
if (result)
return result;
@@ -1443,17 +1570,16 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
else if (result)
goto release_cq;
- /*
- * Set cq_vector after alloc cq/sq, otherwise nvme_suspend_queue will
- * invoke free_irq for it and cause a 'Trying to free already-free IRQ
- * xxx' warning if the create CQ/SQ command times out.
- */
nvmeq->cq_vector = vector;
nvme_init_queue(nvmeq, qid);
- result = queue_request_irq(nvmeq);
- if (result < 0)
- goto release_sq;
+ if (vector != -1) {
+ result = queue_request_irq(nvmeq);
+ if (result < 0)
+ goto release_sq;
+ }
+
+ set_bit(NVMEQ_ENABLED, &nvmeq->flags);
return result;
release_sq:
@@ -1477,6 +1603,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
static const struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
.complete = nvme_pci_complete_rq,
+ .commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
.init_request = nvme_init_request,
.map_queues = nvme_pci_map_queues,
@@ -1602,12 +1729,13 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
return result;
}
+ set_bit(NVMEQ_ENABLED, &nvmeq->flags);
return result;
}
static int nvme_create_io_queues(struct nvme_dev *dev)
{
- unsigned i, max;
+ unsigned i, max, rw_queues;
int ret = 0;
for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) {
@@ -1618,8 +1746,17 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
}
max = min(dev->max_qid, dev->ctrl.queue_count - 1);
+ if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) {
+ rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] +
+ dev->io_queues[HCTX_TYPE_READ];
+ } else {
+ rw_queues = max;
+ }
+
for (i = dev->online_queues; i <= max; i++) {
- ret = nvme_create_queue(&dev->queues[i], i);
+ bool polled = i > rw_queues;
+
+ ret = nvme_create_queue(&dev->queues[i], i, polled);
if (ret)
break;
}
@@ -1891,6 +2028,110 @@ static int nvme_setup_host_mem(struct nvme_dev *dev)
return ret;
}
+static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues)
+{
+ unsigned int this_w_queues = write_queues;
+
+ /*
+ * Setup read/write queue split
+ */
+ if (irq_queues == 1) {
+ dev->io_queues[HCTX_TYPE_DEFAULT] = 1;
+ dev->io_queues[HCTX_TYPE_READ] = 0;
+ return;
+ }
+
+ /*
+ * If 'write_queues' is set, ensure it leaves room for at least
+ * one read queue
+ */
+ if (this_w_queues >= irq_queues)
+ this_w_queues = irq_queues - 1;
+
+ /*
+ * If 'write_queues' is set to zero, reads and writes will share
+ * a queue set.
+ */
+ if (!this_w_queues) {
+ dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues;
+ dev->io_queues[HCTX_TYPE_READ] = 0;
+ } else {
+ dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues;
+ dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues;
+ }
+}
+
+static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ int irq_sets[2];
+ struct irq_affinity affd = {
+ .pre_vectors = 1,
+ .nr_sets = ARRAY_SIZE(irq_sets),
+ .sets = irq_sets,
+ };
+ int result = 0;
+ unsigned int irq_queues, this_p_queues;
+
+ /*
+ * Poll queues don't need interrupts, but we need at least one IO
+ * queue left over for non-polled IO.
+ */
+ this_p_queues = poll_queues;
+ if (this_p_queues >= nr_io_queues) {
+ this_p_queues = nr_io_queues - 1;
+ irq_queues = 1;
+ } else {
+ irq_queues = nr_io_queues - this_p_queues;
+ }
+ dev->io_queues[HCTX_TYPE_POLL] = this_p_queues;
+
+ /*
+ * For irq sets, we have to ask for minvec == maxvec. This passes
+ * any reduction back to us, so we can adjust our queue counts and
+ * IRQ vector needs.
+ */
+ do {
+ nvme_calc_io_queues(dev, irq_queues);
+ irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT];
+ irq_sets[1] = dev->io_queues[HCTX_TYPE_READ];
+ if (!irq_sets[1])
+ affd.nr_sets = 1;
+
+ /*
+ * If we got a failure and we're down to asking for just
+ * 1 + 1 queues, just ask for a single vector. We'll share
+ * that between the single IO queue and the admin queue.
+ */
+ if (result >= 0 && irq_queues > 1)
+ irq_queues = irq_sets[0] + irq_sets[1] + 1;
+
+ result = pci_alloc_irq_vectors_affinity(pdev, irq_queues,
+ irq_queues,
+ PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+
+ /*
+ * Need to reduce our vec counts. If we get ENOSPC, the
+ * platform should support mulitple vecs, we just need
+ * to decrease our ask. If we get EINVAL, the platform
+ * likely does not. Back down to ask for just one vector.
+ */
+ if (result == -ENOSPC) {
+ irq_queues--;
+ if (!irq_queues)
+ return result;
+ continue;
+ } else if (result == -EINVAL) {
+ irq_queues = 1;
+ continue;
+ } else if (result <= 0)
+ return -EIO;
+ break;
+ } while (1);
+
+ return result;
+}
+
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
struct nvme_queue *adminq = &dev->queues[0];
@@ -1898,17 +2139,15 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
int result, nr_io_queues;
unsigned long size;
- struct irq_affinity affd = {
- .pre_vectors = 1
- };
-
- nr_io_queues = num_possible_cpus();
+ nr_io_queues = max_io_queues();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
if (nr_io_queues == 0)
return 0;
+
+ clear_bit(NVMEQ_ENABLED, &adminq->flags);
if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
@@ -1937,12 +2176,19 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
* setting up the full range we need.
*/
pci_free_irq_vectors(pdev);
- result = pci_alloc_irq_vectors_affinity(pdev, 1, nr_io_queues + 1,
- PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd);
+
+ result = nvme_setup_irqs(dev, nr_io_queues);
if (result <= 0)
return -EIO;
+
dev->num_vecs = result;
- dev->max_qid = max(result - 1, 1);
+ result = max(result - 1, 1);
+ dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL];
+
+ dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n",
+ dev->io_queues[HCTX_TYPE_DEFAULT],
+ dev->io_queues[HCTX_TYPE_READ],
+ dev->io_queues[HCTX_TYPE_POLL]);
/*
* Should investigate if there's a performance win from allocating
@@ -1956,6 +2202,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
adminq->cq_vector = -1;
return result;
}
+ set_bit(NVMEQ_ENABLED, &adminq->flags);
return nvme_create_io_queues(dev);
}
@@ -1964,23 +2211,15 @@ static void nvme_del_queue_end(struct request *req, blk_status_t error)
struct nvme_queue *nvmeq = req->end_io_data;
blk_mq_free_request(req);
- complete(&nvmeq->dev->ioq_wait);
+ complete(&nvmeq->delete_done);
}
static void nvme_del_cq_end(struct request *req, blk_status_t error)
{
struct nvme_queue *nvmeq = req->end_io_data;
- u16 start, end;
-
- if (!error) {
- unsigned long flags;
- spin_lock_irqsave(&nvmeq->cq_lock, flags);
- nvme_process_cq(nvmeq, &start, &end, -1);
- spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
-
- nvme_complete_cqes(nvmeq, start, end);
- }
+ if (error)
+ set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
nvme_del_queue_end(req, error);
}
@@ -2002,37 +2241,44 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->timeout = ADMIN_TIMEOUT;
req->end_io_data = nvmeq;
+ init_completion(&nvmeq->delete_done);
blk_execute_rq_nowait(q, NULL, req, false,
opcode == nvme_admin_delete_cq ?
nvme_del_cq_end : nvme_del_queue_end);
return 0;
}
-static void nvme_disable_io_queues(struct nvme_dev *dev)
+static bool nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
{
- int pass, queues = dev->online_queues - 1;
+ int nr_queues = dev->online_queues - 1, sent = 0;
unsigned long timeout;
- u8 opcode = nvme_admin_delete_sq;
-
- for (pass = 0; pass < 2; pass++) {
- int sent = 0, i = queues;
- reinit_completion(&dev->ioq_wait);
retry:
- timeout = ADMIN_TIMEOUT;
- for (; i > 0; i--, sent++)
- if (nvme_delete_queue(&dev->queues[i], opcode))
- break;
-
- while (sent--) {
- timeout = wait_for_completion_io_timeout(&dev->ioq_wait, timeout);
- if (timeout == 0)
- return;
- if (i)
- goto retry;
- }
- opcode = nvme_admin_delete_cq;
+ timeout = ADMIN_TIMEOUT;
+ while (nr_queues > 0) {
+ if (nvme_delete_queue(&dev->queues[nr_queues], opcode))
+ break;
+ nr_queues--;
+ sent++;
}
+ while (sent) {
+ struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
+
+ timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,
+ timeout);
+ if (timeout == 0)
+ return false;
+
+ /* handle any remaining CQEs */
+ if (opcode == nvme_admin_delete_cq &&
+ !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
+ nvme_poll_irqdisable(nvmeq, -1);
+
+ sent--;
+ if (nr_queues)
+ goto retry;
+ }
+ return true;
}
/*
@@ -2045,6 +2291,10 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
+ dev->tagset.nr_maps = 2; /* default + read */
+ if (dev->io_queues[HCTX_TYPE_POLL])
+ dev->tagset.nr_maps++;
+ dev->tagset.nr_maps = HCTX_MAX_TYPES;
dev->tagset.timeout = NVME_IO_TIMEOUT;
dev->tagset.numa_node = dev_to_node(dev->dev);
dev->tagset.queue_depth =
@@ -2187,7 +2437,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_stop_queues(&dev->ctrl);
if (!dead && dev->ctrl.queue_count > 0) {
- nvme_disable_io_queues(dev);
+ if (nvme_disable_io_queues(dev, nvme_admin_delete_sq))
+ nvme_disable_io_queues(dev, nvme_admin_delete_cq);
nvme_disable_admin_queue(dev, shutdown);
}
for (i = dev->ctrl.queue_count - 1; i >= 0; i--)
@@ -2491,8 +2742,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!dev)
return -ENOMEM;
- dev->queues = kcalloc_node(num_possible_cpus() + 1,
- sizeof(struct nvme_queue), GFP_KERNEL, node);
+ dev->queues = kcalloc_node(max_queue_count(), sizeof(struct nvme_queue),
+ GFP_KERNEL, node);
if (!dev->queues)
goto free;
@@ -2506,7 +2757,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work);
mutex_init(&dev->shutdown_lock);
- init_completion(&dev->ioq_wait);
result = nvme_setup_prp_pools(dev);
if (result)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d181cafedc58..0a2fd2949ad7 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -162,6 +162,13 @@ static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
return queue - queue->ctrl->queues;
}
+static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
+{
+ return nvme_rdma_queue_idx(queue) >
+ queue->ctrl->ctrl.opts->nr_io_queues +
+ queue->ctrl->ctrl.opts->nr_write_queues;
+}
+
static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
{
return queue->cmnd_capsule_len - sizeof(struct nvme_command);
@@ -184,6 +191,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
if (ib_dma_mapping_error(ibdev, qe->dma)) {
kfree(qe->data);
+ qe->data = NULL;
return -ENOMEM;
}
@@ -439,6 +447,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
const int send_wr_factor = 3; /* MR, SEND, INV */
const int cq_factor = send_wr_factor + 1; /* + RECV */
int comp_vector, idx = nvme_rdma_queue_idx(queue);
+ enum ib_poll_context poll_ctx;
int ret;
queue->device = nvme_rdma_find_get_device(queue->cm_id);
@@ -455,10 +464,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
*/
comp_vector = idx == 0 ? idx : idx - 1;
+ /* Polling queues need direct cq polling context */
+ if (nvme_rdma_poll_queue(queue))
+ poll_ctx = IB_POLL_DIRECT;
+ else
+ poll_ctx = IB_POLL_SOFTIRQ;
+
/* +1 for ib_stop_cq */
queue->ib_cq = ib_alloc_cq(ibdev, queue,
cq_factor * queue->queue_size + 1,
- comp_vector, IB_POLL_SOFTIRQ);
+ comp_vector, poll_ctx);
if (IS_ERR(queue->ib_cq)) {
ret = PTR_ERR(queue->ib_cq);
goto out_put_dev;
@@ -594,15 +609,17 @@ static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
{
+ struct nvme_rdma_queue *queue = &ctrl->queues[idx];
+ bool poll = nvme_rdma_poll_queue(queue);
int ret;
if (idx)
- ret = nvmf_connect_io_queue(&ctrl->ctrl, idx);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
else
ret = nvmf_connect_admin_queue(&ctrl->ctrl);
if (!ret)
- set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[idx].flags);
+ set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
else
dev_info(ctrl->ctrl.device,
"failed to connect queue: %d ret=%d\n", idx, ret);
@@ -644,6 +661,9 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
nr_io_queues = min_t(unsigned int, nr_io_queues,
ibdev->num_comp_vectors);
+ nr_io_queues += min(opts->nr_write_queues, num_online_cpus());
+ nr_io_queues += min(opts->nr_poll_queues, num_online_cpus());
+
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret)
return ret;
@@ -693,7 +713,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->ops = &nvme_rdma_admin_mq_ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
set->cmd_size = sizeof(struct nvme_rdma_request) +
SG_CHUNK_SIZE * sizeof(struct scatterlist);
set->driver_data = ctrl;
@@ -706,13 +726,14 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set->ops = &nvme_rdma_mq_ops;
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->cmd_size = sizeof(struct nvme_rdma_request) +
SG_CHUNK_SIZE * sizeof(struct scatterlist);
set->driver_data = ctrl;
set->nr_hw_queues = nctrl->queue_count - 1;
set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
}
ret = blk_mq_alloc_tag_set(set);
@@ -762,6 +783,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return error;
ctrl->device = ctrl->queues[0].device;
+ ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
@@ -823,6 +845,7 @@ out_free_tagset:
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
@@ -1409,12 +1432,11 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
WARN_ON_ONCE(ret);
}
-static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
- struct nvme_completion *cqe, struct ib_wc *wc, int tag)
+static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
+ struct nvme_completion *cqe, struct ib_wc *wc)
{
struct request *rq;
struct nvme_rdma_request *req;
- int ret = 0;
rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
if (!rq) {
@@ -1422,7 +1444,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
"tag 0x%x on QP %#x not found\n",
cqe->command_id, queue->qp->qp_num);
nvme_rdma_error_recovery(queue->ctrl);
- return ret;
+ return;
}
req = blk_mq_rq_to_pdu(rq);
@@ -1437,6 +1459,8 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
nvme_rdma_error_recovery(queue->ctrl);
}
} else if (req->mr) {
+ int ret;
+
ret = nvme_rdma_inv_rkey(queue, req);
if (unlikely(ret < 0)) {
dev_err(queue->ctrl->ctrl.device,
@@ -1445,19 +1469,14 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
nvme_rdma_error_recovery(queue->ctrl);
}
/* the local invalidation completion will end the request */
- return 0;
+ return;
}
- if (refcount_dec_and_test(&req->ref)) {
- if (rq->tag == tag)
- ret = 1;
+ if (refcount_dec_and_test(&req->ref))
nvme_end_request(rq, req->status, req->result);
- }
-
- return ret;
}
-static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
+static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_qe *qe =
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
@@ -1465,11 +1484,10 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
struct ib_device *ibdev = queue->device->dev;
struct nvme_completion *cqe = qe->data;
const size_t len = sizeof(struct nvme_completion);
- int ret = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
nvme_rdma_wr_error(cq, wc, "RECV");
- return 0;
+ return;
}
ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
@@ -1484,16 +1502,10 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag)
nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
&cqe->result);
else
- ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag);
+ nvme_rdma_process_nvme_rsp(queue, cqe, wc);
ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
nvme_rdma_post_recv(queue, qe);
- return ret;
-}
-
-static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
-{
- __nvme_rdma_recv_done(cq, wc, -1);
}
static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
@@ -1747,25 +1759,11 @@ err:
return BLK_STS_IOERR;
}
-static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
- struct ib_cq *cq = queue->ib_cq;
- struct ib_wc wc;
- int found = 0;
-
- while (ib_poll_cq(cq, 1, &wc) > 0) {
- struct ib_cqe *cqe = wc.wr_cqe;
-
- if (cqe) {
- if (cqe->done == nvme_rdma_recv_done)
- found |= __nvme_rdma_recv_done(cq, &wc, tag);
- else
- cqe->done(cq, &wc);
- }
- }
- return found;
+ return ib_process_cq_direct(queue->ib_cq, -1);
}
static void nvme_rdma_complete_rq(struct request *rq)
@@ -1780,7 +1778,36 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
- return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0);
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
+ if (ctrl->ctrl.opts->nr_write_queues) {
+ /* separate read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->ctrl.opts->nr_write_queues;
+ set->map[HCTX_TYPE_READ].queue_offset =
+ ctrl->ctrl.opts->nr_write_queues;
+ } else {
+ /* mixed read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->ctrl.opts->nr_io_queues;
+ set->map[HCTX_TYPE_READ].queue_offset = 0;
+ }
+ blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
+ ctrl->device->dev, 0);
+ blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
+ ctrl->device->dev, 0);
+
+ if (ctrl->ctrl.opts->nr_poll_queues) {
+ set->map[HCTX_TYPE_POLL].nr_queues =
+ ctrl->ctrl.opts->nr_poll_queues;
+ set->map[HCTX_TYPE_POLL].queue_offset =
+ ctrl->ctrl.opts->nr_io_queues;
+ if (ctrl->ctrl.opts->nr_write_queues)
+ set->map[HCTX_TYPE_POLL].queue_offset +=
+ ctrl->ctrl.opts->nr_write_queues;
+ blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
+ }
+ return 0;
}
static const struct blk_mq_ops nvme_rdma_mq_ops = {
@@ -1789,9 +1816,9 @@ static const struct blk_mq_ops nvme_rdma_mq_ops = {
.init_request = nvme_rdma_init_request,
.exit_request = nvme_rdma_exit_request,
.init_hctx = nvme_rdma_init_hctx,
- .poll = nvme_rdma_poll,
.timeout = nvme_rdma_timeout,
.map_queues = nvme_rdma_map_queues,
+ .poll = nvme_rdma_poll,
};
static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
@@ -1936,7 +1963,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
- ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
+ ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
+ opts->nr_poll_queues + 1;
ctrl->ctrl.sqsize = opts->queue_size - 1;
ctrl->ctrl.kato = opts->kato;
@@ -1987,7 +2015,8 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.module = THIS_MODULE,
.required_opts = NVMF_OPT_TRADDR,
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
- NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
+ NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
+ NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES,
.create_ctrl = nvme_rdma_create_ctrl,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
new file mode 100644
index 000000000000..de174912445e
--- /dev/null
+++ b/drivers/nvme/host/tcp.c
@@ -0,0 +1,2278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP host.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/nvme-tcp.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/blk-mq.h>
+#include <crypto/hash.h>
+
+#include "nvme.h"
+#include "fabrics.h"
+
+struct nvme_tcp_queue;
+
+enum nvme_tcp_send_state {
+ NVME_TCP_SEND_CMD_PDU = 0,
+ NVME_TCP_SEND_H2C_PDU,
+ NVME_TCP_SEND_DATA,
+ NVME_TCP_SEND_DDGST,
+};
+
+struct nvme_tcp_request {
+ struct nvme_request req;
+ void *pdu;
+ struct nvme_tcp_queue *queue;
+ u32 data_len;
+ u32 pdu_len;
+ u32 pdu_sent;
+ u16 ttag;
+ struct list_head entry;
+ __le32 ddgst;
+
+ struct bio *curr_bio;
+ struct iov_iter iter;
+
+ /* send state */
+ size_t offset;
+ size_t data_sent;
+ enum nvme_tcp_send_state state;
+};
+
+enum nvme_tcp_queue_flags {
+ NVME_TCP_Q_ALLOCATED = 0,
+ NVME_TCP_Q_LIVE = 1,
+};
+
+enum nvme_tcp_recv_state {
+ NVME_TCP_RECV_PDU = 0,
+ NVME_TCP_RECV_DATA,
+ NVME_TCP_RECV_DDGST,
+};
+
+struct nvme_tcp_ctrl;
+struct nvme_tcp_queue {
+ struct socket *sock;
+ struct work_struct io_work;
+ int io_cpu;
+
+ spinlock_t lock;
+ struct list_head send_list;
+
+ /* recv state */
+ void *pdu;
+ int pdu_remaining;
+ int pdu_offset;
+ size_t data_remaining;
+ size_t ddgst_remaining;
+
+ /* send state */
+ struct nvme_tcp_request *request;
+
+ int queue_size;
+ size_t cmnd_capsule_len;
+ struct nvme_tcp_ctrl *ctrl;
+ unsigned long flags;
+ bool rd_enabled;
+
+ bool hdr_digest;
+ bool data_digest;
+ struct ahash_request *rcv_hash;
+ struct ahash_request *snd_hash;
+ __le32 exp_ddgst;
+ __le32 recv_ddgst;
+
+ struct page_frag_cache pf_cache;
+
+ void (*state_change)(struct sock *);
+ void (*data_ready)(struct sock *);
+ void (*write_space)(struct sock *);
+};
+
+struct nvme_tcp_ctrl {
+ /* read only in the hot path */
+ struct nvme_tcp_queue *queues;
+ struct blk_mq_tag_set tag_set;
+
+ /* other member variables */
+ struct list_head list;
+ struct blk_mq_tag_set admin_tag_set;
+ struct sockaddr_storage addr;
+ struct sockaddr_storage src_addr;
+ struct nvme_ctrl ctrl;
+
+ struct work_struct err_work;
+ struct delayed_work connect_work;
+ struct nvme_tcp_request async_req;
+};
+
+static LIST_HEAD(nvme_tcp_ctrl_list);
+static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
+static struct workqueue_struct *nvme_tcp_wq;
+static struct blk_mq_ops nvme_tcp_mq_ops;
+static struct blk_mq_ops nvme_tcp_admin_mq_ops;
+
+static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
+{
+ return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
+}
+
+static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
+{
+ return queue - queue->ctrl->queues;
+}
+
+static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
+{
+ u32 queue_idx = nvme_tcp_queue_id(queue);
+
+ if (queue_idx == 0)
+ return queue->ctrl->admin_tag_set.tags[queue_idx];
+ return queue->ctrl->tag_set.tags[queue_idx - 1];
+}
+
+static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
+{
+ return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
+{
+ return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
+{
+ return queue->cmnd_capsule_len - sizeof(struct nvme_command);
+}
+
+static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
+{
+ return req == &req->queue->ctrl->async_req;
+}
+
+static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
+{
+ struct request *rq;
+ unsigned int bytes;
+
+ if (unlikely(nvme_tcp_async_req(req)))
+ return false; /* async events don't have a request */
+
+ rq = blk_mq_rq_from_pdu(req);
+ bytes = blk_rq_payload_bytes(rq);
+
+ return rq_data_dir(rq) == WRITE && bytes &&
+ bytes <= nvme_tcp_inline_data_size(req->queue);
+}
+
+static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
+{
+ return req->iter.bvec->bv_page;
+}
+
+static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
+{
+ return req->iter.bvec->bv_offset + req->iter.iov_offset;
+}
+
+static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
+{
+ return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
+ req->pdu_len - req->pdu_sent);
+}
+
+static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
+{
+ return req->iter.iov_offset;
+}
+
+static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
+{
+ return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
+ req->pdu_len - req->pdu_sent : 0;
+}
+
+static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
+ int len)
+{
+ return nvme_tcp_pdu_data_left(req) <= len;
+}
+
+static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
+ unsigned int dir)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ struct bio_vec *vec;
+ unsigned int size;
+ int nsegs;
+ size_t offset;
+
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ vec = &rq->special_vec;
+ nsegs = 1;
+ size = blk_rq_payload_bytes(rq);
+ offset = 0;
+ } else {
+ struct bio *bio = req->curr_bio;
+
+ vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ nsegs = bio_segments(bio);
+ size = bio->bi_iter.bi_size;
+ offset = bio->bi_iter.bi_bvec_done;
+ }
+
+ iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
+ req->iter.iov_offset = offset;
+}
+
+static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
+ int len)
+{
+ req->data_sent += len;
+ req->pdu_sent += len;
+ iov_iter_advance(&req->iter, len);
+ if (!iov_iter_count(&req->iter) &&
+ req->data_sent < req->data_len) {
+ req->curr_bio = req->curr_bio->bi_next;
+ nvme_tcp_init_iter(req, WRITE);
+ }
+}
+
+static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+
+ spin_lock(&queue->lock);
+ list_add_tail(&req->entry, &queue->send_list);
+ spin_unlock(&queue->lock);
+
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static inline struct nvme_tcp_request *
+nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+
+ spin_lock(&queue->lock);
+ req = list_first_entry_or_null(&queue->send_list,
+ struct nvme_tcp_request, entry);
+ if (req)
+ list_del(&req->entry);
+ spin_unlock(&queue->lock);
+
+ return req;
+}
+
+static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
+ __le32 *dgst)
+{
+ ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
+ crypto_ahash_final(hash);
+}
+
+static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
+ struct page *page, off_t off, size_t len)
+{
+ struct scatterlist sg;
+
+ sg_init_marker(&sg, 1);
+ sg_set_page(&sg, page, len, off);
+ ahash_request_set_crypt(hash, &sg, NULL, len);
+ crypto_ahash_update(hash);
+}
+
+static inline void nvme_tcp_hdgst(struct ahash_request *hash,
+ void *pdu, size_t len)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, pdu, len);
+ ahash_request_set_crypt(hash, &sg, pdu + len, len);
+ crypto_ahash_digest(hash);
+}
+
+static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
+ void *pdu, size_t pdu_len)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ __le32 recv_digest;
+ __le32 exp_digest;
+
+ if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d: header digest flag is cleared\n",
+ nvme_tcp_queue_id(queue));
+ return -EPROTO;
+ }
+
+ recv_digest = *(__le32 *)(pdu + hdr->hlen);
+ nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
+ exp_digest = *(__le32 *)(pdu + hdr->hlen);
+ if (recv_digest != exp_digest) {
+ dev_err(queue->ctrl->ctrl.device,
+ "header digest error: recv %#x expected %#x\n",
+ le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ u8 digest_len = nvme_tcp_hdgst_len(queue);
+ u32 len;
+
+ len = le32_to_cpu(hdr->plen) - hdr->hlen -
+ ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
+
+ if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d: data digest flag is cleared\n",
+ nvme_tcp_queue_id(queue));
+ return -EPROTO;
+ }
+ crypto_ahash_init(queue->rcv_hash);
+
+ return 0;
+}
+
+static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+
+ page_frag_free(req->pdu);
+}
+
+static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
+ struct request *rq, unsigned int hctx_idx,
+ unsigned int numa_node)
+{
+ struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
+ struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+
+ req->pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!req->pdu)
+ return -ENOMEM;
+
+ req->queue = queue;
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
+
+ return 0;
+}
+
+static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+ unsigned int hctx_idx)
+{
+ struct nvme_tcp_ctrl *ctrl = data;
+ struct nvme_tcp_queue *queue = &ctrl->queues[0];
+
+ hctx->driver_data = queue;
+ return 0;
+}
+
+static enum nvme_tcp_recv_state
+nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
+{
+ return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
+ (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
+ NVME_TCP_RECV_DATA;
+}
+
+static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
+{
+ queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
+ nvme_tcp_hdgst_len(queue);
+ queue->pdu_offset = 0;
+ queue->data_remaining = -1;
+ queue->ddgst_remaining = 0;
+}
+
+static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
+{
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+ return;
+
+ queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
+}
+
+static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
+ struct nvme_completion *cqe)
+{
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag 0x%x not found\n",
+ nvme_tcp_queue_id(queue), cqe->command_id);
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return -EINVAL;
+ }
+
+ nvme_end_request(rq, cqe->status, cqe->result);
+
+ return 0;
+}
+
+static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_data_pdu *pdu)
+{
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x not found\n",
+ nvme_tcp_queue_id(queue), pdu->command_id);
+ return -ENOENT;
+ }
+
+ if (!blk_rq_payload_bytes(rq)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x unexpected data\n",
+ nvme_tcp_queue_id(queue), rq->tag);
+ return -EIO;
+ }
+
+ queue->data_remaining = le32_to_cpu(pdu->data_length);
+
+ return 0;
+
+}
+
+static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_rsp_pdu *pdu)
+{
+ struct nvme_completion *cqe = &pdu->cqe;
+ int ret = 0;
+
+ /*
+ * AEN requests are special as they don't time out and can
+ * survive any kind of queue freeze and often don't respond to
+ * aborts. We don't even bother to allocate a struct request
+ * for them but rather special case them here.
+ */
+ if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
+ cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
+ nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
+ &cqe->result);
+ else
+ ret = nvme_tcp_process_nvme_cqe(queue, cqe);
+
+ return ret;
+}
+
+static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
+ struct nvme_tcp_r2t_pdu *pdu)
+{
+ struct nvme_tcp_data_pdu *data = req->pdu;
+ struct nvme_tcp_queue *queue = req->queue;
+ struct request *rq = blk_mq_rq_from_pdu(req);
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+ u8 ddgst = nvme_tcp_ddgst_len(queue);
+
+ req->pdu_len = le32_to_cpu(pdu->r2t_length);
+ req->pdu_sent = 0;
+
+ if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len %u exceeded data len %u (%zu sent)\n",
+ rq->tag, req->pdu_len, req->data_len,
+ req->data_sent);
+ return -EPROTO;
+ }
+
+ if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d unexpected r2t offset %u (expected %zu)\n",
+ rq->tag, le32_to_cpu(pdu->r2t_offset),
+ req->data_sent);
+ return -EPROTO;
+ }
+
+ memset(data, 0, sizeof(*data));
+ data->hdr.type = nvme_tcp_h2c_data;
+ data->hdr.flags = NVME_TCP_F_DATA_LAST;
+ if (queue->hdr_digest)
+ data->hdr.flags |= NVME_TCP_F_HDGST;
+ if (queue->data_digest)
+ data->hdr.flags |= NVME_TCP_F_DDGST;
+ data->hdr.hlen = sizeof(*data);
+ data->hdr.pdo = data->hdr.hlen + hdgst;
+ data->hdr.plen =
+ cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
+ data->ttag = pdu->ttag;
+ data->command_id = rq->tag;
+ data->data_offset = cpu_to_le32(req->data_sent);
+ data->data_length = cpu_to_le32(req->pdu_len);
+ return 0;
+}
+
+static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_r2t_pdu *pdu)
+{
+ struct nvme_tcp_request *req;
+ struct request *rq;
+ int ret;
+
+ rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x not found\n",
+ nvme_tcp_queue_id(queue), pdu->command_id);
+ return -ENOENT;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
+ if (unlikely(ret))
+ return ret;
+
+ req->state = NVME_TCP_SEND_H2C_PDU;
+ req->offset = 0;
+
+ nvme_tcp_queue_request(req);
+
+ return 0;
+}
+
+static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ unsigned int *offset, size_t *len)
+{
+ struct nvme_tcp_hdr *hdr;
+ char *pdu = queue->pdu;
+ size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
+ int ret;
+
+ ret = skb_copy_bits(skb, *offset,
+ &pdu[queue->pdu_offset], rcv_len);
+ if (unlikely(ret))
+ return ret;
+
+ queue->pdu_remaining -= rcv_len;
+ queue->pdu_offset += rcv_len;
+ *offset += rcv_len;
+ *len -= rcv_len;
+ if (queue->pdu_remaining)
+ return 0;
+
+ hdr = queue->pdu;
+ if (queue->hdr_digest) {
+ ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
+ if (unlikely(ret))
+ return ret;
+ }
+
+
+ if (queue->data_digest) {
+ ret = nvme_tcp_check_ddgst(queue, queue->pdu);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ switch (hdr->type) {
+ case nvme_tcp_c2h_data:
+ ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
+ break;
+ case nvme_tcp_rsp:
+ nvme_tcp_init_recv_ctx(queue);
+ ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
+ break;
+ case nvme_tcp_r2t:
+ nvme_tcp_init_recv_ctx(queue);
+ ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
+ break;
+ default:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
+ unsigned int *offset, size_t *len)
+{
+ struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
+ struct nvme_tcp_request *req;
+ struct request *rq;
+
+ rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
+ if (!rq) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d tag %#x not found\n",
+ nvme_tcp_queue_id(queue), pdu->command_id);
+ return -ENOENT;
+ }
+ req = blk_mq_rq_to_pdu(rq);
+
+ while (true) {
+ int recv_len, ret;
+
+ recv_len = min_t(size_t, *len, queue->data_remaining);
+ if (!recv_len)
+ break;
+
+ if (!iov_iter_count(&req->iter)) {
+ req->curr_bio = req->curr_bio->bi_next;
+
+ /*
+ * If we don`t have any bios it means that controller
+ * sent more data than we requested, hence error
+ */
+ if (!req->curr_bio) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d no space in request %#x",
+ nvme_tcp_queue_id(queue), rq->tag);
+ nvme_tcp_init_recv_ctx(queue);
+ return -EIO;
+ }
+ nvme_tcp_init_iter(req, READ);
+ }
+
+ /* we can read only from what is left in this bio */
+ recv_len = min_t(size_t, recv_len,
+ iov_iter_count(&req->iter));
+
+ if (queue->data_digest)
+ ret = skb_copy_and_hash_datagram_iter(skb, *offset,
+ &req->iter, recv_len, queue->rcv_hash);
+ else
+ ret = skb_copy_datagram_iter(skb, *offset,
+ &req->iter, recv_len);
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "queue %d failed to copy request %#x data",
+ nvme_tcp_queue_id(queue), rq->tag);
+ return ret;
+ }
+
+ *len -= recv_len;
+ *offset += recv_len;
+ queue->data_remaining -= recv_len;
+ }
+
+ if (!queue->data_remaining) {
+ if (queue->data_digest) {
+ nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
+ queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
+ } else {
+ nvme_tcp_init_recv_ctx(queue);
+ }
+ }
+
+ return 0;
+}
+
+static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
+ struct sk_buff *skb, unsigned int *offset, size_t *len)
+{
+ char *ddgst = (char *)&queue->recv_ddgst;
+ size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
+ off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
+ int ret;
+
+ ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
+ if (unlikely(ret))
+ return ret;
+
+ queue->ddgst_remaining -= recv_len;
+ *offset += recv_len;
+ *len -= recv_len;
+ if (queue->ddgst_remaining)
+ return 0;
+
+ if (queue->recv_ddgst != queue->exp_ddgst) {
+ dev_err(queue->ctrl->ctrl.device,
+ "data digest error: recv %#x expected %#x\n",
+ le32_to_cpu(queue->recv_ddgst),
+ le32_to_cpu(queue->exp_ddgst));
+ return -EIO;
+ }
+
+ nvme_tcp_init_recv_ctx(queue);
+ return 0;
+}
+
+static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
+{
+ struct nvme_tcp_queue *queue = desc->arg.data;
+ size_t consumed = len;
+ int result;
+
+ while (len) {
+ switch (nvme_tcp_recv_state(queue)) {
+ case NVME_TCP_RECV_PDU:
+ result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
+ break;
+ case NVME_TCP_RECV_DATA:
+ result = nvme_tcp_recv_data(queue, skb, &offset, &len);
+ break;
+ case NVME_TCP_RECV_DDGST:
+ result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
+ break;
+ default:
+ result = -EFAULT;
+ }
+ if (result) {
+ dev_err(queue->ctrl->ctrl.device,
+ "receive failed: %d\n", result);
+ queue->rd_enabled = false;
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ return result;
+ }
+ }
+
+ return consumed;
+}
+
+static void nvme_tcp_data_ready(struct sock *sk)
+{
+ struct nvme_tcp_queue *queue;
+
+ read_lock(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (likely(queue && queue->rd_enabled))
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void nvme_tcp_write_space(struct sock *sk)
+{
+ struct nvme_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (likely(queue && sk_stream_is_writeable(sk))) {
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+ }
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvme_tcp_state_change(struct sock *sk)
+{
+ struct nvme_tcp_queue *queue;
+
+ read_lock(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (!queue)
+ goto done;
+
+ switch (sk->sk_state) {
+ case TCP_CLOSE:
+ case TCP_CLOSE_WAIT:
+ case TCP_LAST_ACK:
+ case TCP_FIN_WAIT1:
+ case TCP_FIN_WAIT2:
+ /* fallthrough */
+ nvme_tcp_error_recovery(&queue->ctrl->ctrl);
+ break;
+ default:
+ dev_info(queue->ctrl->ctrl.device,
+ "queue %d socket state %d\n",
+ nvme_tcp_queue_id(queue), sk->sk_state);
+ }
+
+ queue->state_change(sk);
+done:
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
+{
+ queue->request = NULL;
+}
+
+static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
+{
+ union nvme_result res = {};
+
+ nvme_end_request(blk_mq_rq_from_pdu(req),
+ cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res);
+}
+
+static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+
+ while (true) {
+ struct page *page = nvme_tcp_req_cur_page(req);
+ size_t offset = nvme_tcp_req_cur_offset(req);
+ size_t len = nvme_tcp_req_cur_length(req);
+ bool last = nvme_tcp_pdu_last_send(req, len);
+ int ret, flags = MSG_DONTWAIT;
+
+ if (last && !queue->data_digest)
+ flags |= MSG_EOR;
+ else
+ flags |= MSG_MORE;
+
+ ret = kernel_sendpage(queue->sock, page, offset, len, flags);
+ if (ret <= 0)
+ return ret;
+
+ nvme_tcp_advance_req(req, ret);
+ if (queue->data_digest)
+ nvme_tcp_ddgst_update(queue->snd_hash, page,
+ offset, ret);
+
+ /* fully successful last write*/
+ if (last && ret == len) {
+ if (queue->data_digest) {
+ nvme_tcp_ddgst_final(queue->snd_hash,
+ &req->ddgst);
+ req->state = NVME_TCP_SEND_DDGST;
+ req->offset = 0;
+ } else {
+ nvme_tcp_done_send_req(queue);
+ }
+ return 1;
+ }
+ }
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ bool inline_data = nvme_tcp_has_inline_data(req);
+ int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+ int len = sizeof(*pdu) + hdgst - req->offset;
+ int ret;
+
+ if (queue->hdr_digest && !req->offset)
+ nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+
+ ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len, flags);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ len -= ret;
+ if (!len) {
+ if (inline_data) {
+ req->state = NVME_TCP_SEND_DATA;
+ if (queue->data_digest)
+ crypto_ahash_init(queue->snd_hash);
+ nvme_tcp_init_iter(req, WRITE);
+ } else {
+ nvme_tcp_done_send_req(queue);
+ }
+ return 1;
+ }
+ req->offset += ret;
+
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ struct nvme_tcp_data_pdu *pdu = req->pdu;
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+ int len = sizeof(*pdu) - req->offset + hdgst;
+ int ret;
+
+ if (queue->hdr_digest && !req->offset)
+ nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+
+ ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
+ offset_in_page(pdu) + req->offset, len,
+ MSG_DONTWAIT | MSG_MORE);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ len -= ret;
+ if (!len) {
+ req->state = NVME_TCP_SEND_DATA;
+ if (queue->data_digest)
+ crypto_ahash_init(queue->snd_hash);
+ if (!req->data_sent)
+ nvme_tcp_init_iter(req, WRITE);
+ return 1;
+ }
+ req->offset += ret;
+
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
+{
+ struct nvme_tcp_queue *queue = req->queue;
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
+ struct kvec iov = {
+ .iov_base = &req->ddgst + req->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
+ };
+
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ nvme_tcp_done_send_req(queue);
+ return 1;
+ }
+
+ req->offset += ret;
+ return -EAGAIN;
+}
+
+static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_request *req;
+ int ret = 1;
+
+ if (!queue->request) {
+ queue->request = nvme_tcp_fetch_request(queue);
+ if (!queue->request)
+ return 0;
+ }
+ req = queue->request;
+
+ if (req->state == NVME_TCP_SEND_CMD_PDU) {
+ ret = nvme_tcp_try_send_cmd_pdu(req);
+ if (ret <= 0)
+ goto done;
+ if (!nvme_tcp_has_inline_data(req))
+ return ret;
+ }
+
+ if (req->state == NVME_TCP_SEND_H2C_PDU) {
+ ret = nvme_tcp_try_send_data_pdu(req);
+ if (ret <= 0)
+ goto done;
+ }
+
+ if (req->state == NVME_TCP_SEND_DATA) {
+ ret = nvme_tcp_try_send_data(req);
+ if (ret <= 0)
+ goto done;
+ }
+
+ if (req->state == NVME_TCP_SEND_DDGST)
+ ret = nvme_tcp_try_send_ddgst(req);
+done:
+ if (ret == -EAGAIN)
+ ret = 0;
+ return ret;
+}
+
+static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
+{
+ struct sock *sk = queue->sock->sk;
+ read_descriptor_t rd_desc;
+ int consumed;
+
+ rd_desc.arg.data = queue;
+ rd_desc.count = 1;
+ lock_sock(sk);
+ consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
+ release_sock(sk);
+ return consumed;
+}
+
+static void nvme_tcp_io_work(struct work_struct *w)
+{
+ struct nvme_tcp_queue *queue =
+ container_of(w, struct nvme_tcp_queue, io_work);
+ unsigned long start = jiffies + msecs_to_jiffies(1);
+
+ do {
+ bool pending = false;
+ int result;
+
+ result = nvme_tcp_try_send(queue);
+ if (result > 0) {
+ pending = true;
+ } else if (unlikely(result < 0)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to send request %d\n", result);
+ if (result != -EPIPE)
+ nvme_tcp_fail_request(queue->request);
+ nvme_tcp_done_send_req(queue);
+ return;
+ }
+
+ result = nvme_tcp_try_recv(queue);
+ if (result > 0)
+ pending = true;
+
+ if (!pending)
+ return;
+
+ } while (time_after(jiffies, start)); /* quota is exhausted */
+
+ queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
+}
+
+static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
+
+ ahash_request_free(queue->rcv_hash);
+ ahash_request_free(queue->snd_hash);
+ crypto_free_ahash(tfm);
+}
+
+static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->snd_hash)
+ goto free_tfm;
+ ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
+
+ queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->rcv_hash)
+ goto free_snd_hash;
+ ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
+
+ return 0;
+free_snd_hash:
+ ahash_request_free(queue->snd_hash);
+free_tfm:
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+}
+
+static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
+{
+ struct nvme_tcp_request *async = &ctrl->async_req;
+
+ page_frag_free(async->pdu);
+}
+
+static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
+{
+ struct nvme_tcp_queue *queue = &ctrl->queues[0];
+ struct nvme_tcp_request *async = &ctrl->async_req;
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+
+ async->pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!async->pdu)
+ return -ENOMEM;
+
+ async->queue = &ctrl->queues[0];
+ return 0;
+}
+
+static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+
+ if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
+ return;
+
+ if (queue->hdr_digest || queue->data_digest)
+ nvme_tcp_free_crypto(queue);
+
+ sock_release(queue->sock);
+ kfree(queue->pdu);
+}
+
+static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
+{
+ struct nvme_tcp_icreq_pdu *icreq;
+ struct nvme_tcp_icresp_pdu *icresp;
+ struct msghdr msg = {};
+ struct kvec iov;
+ bool ctrl_hdgst, ctrl_ddgst;
+ int ret;
+
+ icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
+ if (!icreq)
+ return -ENOMEM;
+
+ icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
+ if (!icresp) {
+ ret = -ENOMEM;
+ goto free_icreq;
+ }
+
+ icreq->hdr.type = nvme_tcp_icreq;
+ icreq->hdr.hlen = sizeof(*icreq);
+ icreq->hdr.pdo = 0;
+ icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
+ icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+ icreq->maxr2t = 0; /* single inflight r2t supported */
+ icreq->hpda = 0; /* no alignment constraint */
+ if (queue->hdr_digest)
+ icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+ if (queue->data_digest)
+ icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
+
+ iov.iov_base = icreq;
+ iov.iov_len = sizeof(*icreq);
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (ret < 0)
+ goto free_icresp;
+
+ memset(&msg, 0, sizeof(msg));
+ iov.iov_base = icresp;
+ iov.iov_len = sizeof(*icresp);
+ ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (ret < 0)
+ goto free_icresp;
+
+ ret = -EINVAL;
+ if (icresp->hdr.type != nvme_tcp_icresp) {
+ pr_err("queue %d: bad type returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->hdr.type);
+ goto free_icresp;
+ }
+
+ if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
+ pr_err("queue %d: bad pdu length returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->hdr.plen);
+ goto free_icresp;
+ }
+
+ if (icresp->pfv != NVME_TCP_PFV_1_0) {
+ pr_err("queue %d: bad pfv returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->pfv);
+ goto free_icresp;
+ }
+
+ ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
+ if ((queue->data_digest && !ctrl_ddgst) ||
+ (!queue->data_digest && ctrl_ddgst)) {
+ pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
+ nvme_tcp_queue_id(queue),
+ queue->data_digest ? "enabled" : "disabled",
+ ctrl_ddgst ? "enabled" : "disabled");
+ goto free_icresp;
+ }
+
+ ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
+ if ((queue->hdr_digest && !ctrl_hdgst) ||
+ (!queue->hdr_digest && ctrl_hdgst)) {
+ pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
+ nvme_tcp_queue_id(queue),
+ queue->hdr_digest ? "enabled" : "disabled",
+ ctrl_hdgst ? "enabled" : "disabled");
+ goto free_icresp;
+ }
+
+ if (icresp->cpda != 0) {
+ pr_err("queue %d: unsupported cpda returned %d\n",
+ nvme_tcp_queue_id(queue), icresp->cpda);
+ goto free_icresp;
+ }
+
+ ret = 0;
+free_icresp:
+ kfree(icresp);
+free_icreq:
+ kfree(icreq);
+ return ret;
+}
+
+static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
+ int qid, size_t queue_size)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+ struct linger sol = { .l_onoff = 1, .l_linger = 0 };
+ int ret, opt, rcv_pdu_size, n;
+
+ queue->ctrl = ctrl;
+ INIT_LIST_HEAD(&queue->send_list);
+ spin_lock_init(&queue->lock);
+ INIT_WORK(&queue->io_work, nvme_tcp_io_work);
+ queue->queue_size = queue_size;
+
+ if (qid > 0)
+ queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
+ else
+ queue->cmnd_capsule_len = sizeof(struct nvme_command) +
+ NVME_TCP_ADMIN_CCSZ;
+
+ ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
+ IPPROTO_TCP, &queue->sock);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to create socket: %d\n", ret);
+ return ret;
+ }
+
+ /* Single syn retry */
+ opt = 1;
+ ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
+ (char *)&opt, sizeof(opt));
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to set TCP_SYNCNT sock opt %d\n", ret);
+ goto err_sock;
+ }
+
+ /* Set TCP no delay */
+ opt = 1;
+ ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
+ TCP_NODELAY, (char *)&opt, sizeof(opt));
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to set TCP_NODELAY sock opt %d\n", ret);
+ goto err_sock;
+ }
+
+ /*
+ * Cleanup whatever is sitting in the TCP transmit queue on socket
+ * close. This is done to prevent stale data from being sent should
+ * the network connection be restored before TCP times out.
+ */
+ ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
+ (char *)&sol, sizeof(sol));
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to set SO_LINGER sock opt %d\n", ret);
+ goto err_sock;
+ }
+
+ queue->sock->sk->sk_allocation = GFP_ATOMIC;
+ if (!qid)
+ n = 0;
+ else
+ n = (qid - 1) % num_online_cpus();
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ queue->request = NULL;
+ queue->data_remaining = 0;
+ queue->ddgst_remaining = 0;
+ queue->pdu_remaining = 0;
+ queue->pdu_offset = 0;
+ sk_set_memalloc(queue->sock->sk);
+
+ if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
+ ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
+ sizeof(ctrl->src_addr));
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to bind queue %d socket %d\n",
+ qid, ret);
+ goto err_sock;
+ }
+ }
+
+ queue->hdr_digest = nctrl->opts->hdr_digest;
+ queue->data_digest = nctrl->opts->data_digest;
+ if (queue->hdr_digest || queue->data_digest) {
+ ret = nvme_tcp_alloc_crypto(queue);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to allocate queue %d crypto\n", qid);
+ goto err_sock;
+ }
+ }
+
+ rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
+ nvme_tcp_hdgst_len(queue);
+ queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
+ if (!queue->pdu) {
+ ret = -ENOMEM;
+ goto err_crypto;
+ }
+
+ dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
+ nvme_tcp_queue_id(queue));
+
+ ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
+ sizeof(ctrl->addr), 0);
+ if (ret) {
+ dev_err(ctrl->ctrl.device,
+ "failed to connect socket: %d\n", ret);
+ goto err_rcv_pdu;
+ }
+
+ ret = nvme_tcp_init_connection(queue);
+ if (ret)
+ goto err_init_connect;
+
+ queue->rd_enabled = true;
+ set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
+ nvme_tcp_init_recv_ctx(queue);
+
+ write_lock_bh(&queue->sock->sk->sk_callback_lock);
+ queue->sock->sk->sk_user_data = queue;
+ queue->state_change = queue->sock->sk->sk_state_change;
+ queue->data_ready = queue->sock->sk->sk_data_ready;
+ queue->write_space = queue->sock->sk->sk_write_space;
+ queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
+ queue->sock->sk->sk_state_change = nvme_tcp_state_change;
+ queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+ write_unlock_bh(&queue->sock->sk->sk_callback_lock);
+
+ return 0;
+
+err_init_connect:
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+err_rcv_pdu:
+ kfree(queue->pdu);
+err_crypto:
+ if (queue->hdr_digest || queue->data_digest)
+ nvme_tcp_free_crypto(queue);
+err_sock:
+ sock_release(queue->sock);
+ queue->sock = NULL;
+ return ret;
+}
+
+static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = NULL;
+ sock->sk->sk_data_ready = queue->data_ready;
+ sock->sk->sk_state_change = queue->state_change;
+ sock->sk->sk_write_space = queue->write_space;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+}
+
+static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
+{
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ nvme_tcp_restore_sock_calls(queue);
+ cancel_work_sync(&queue->io_work);
+}
+
+static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvme_tcp_queue *queue = &ctrl->queues[qid];
+
+ if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return;
+
+ __nvme_tcp_stop_queue(queue);
+}
+
+static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ int ret;
+
+ if (idx)
+ ret = nvmf_connect_io_queue(nctrl, idx, false);
+ else
+ ret = nvmf_connect_admin_queue(nctrl);
+
+ if (!ret) {
+ set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
+ } else {
+ __nvme_tcp_stop_queue(&ctrl->queues[idx]);
+ dev_err(nctrl->device,
+ "failed to connect queue: %d ret=%d\n", idx, ret);
+ }
+ return ret;
+}
+
+static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
+ bool admin)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct blk_mq_tag_set *set;
+ int ret;
+
+ if (admin) {
+ set = &ctrl->admin_tag_set;
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_admin_mq_ops;
+ set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
+ set->reserved_tags = 2; /* connect + keep-alive */
+ set->numa_node = NUMA_NO_NODE;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = 1;
+ set->timeout = ADMIN_TIMEOUT;
+ } else {
+ set = &ctrl->tag_set;
+ memset(set, 0, sizeof(*set));
+ set->ops = &nvme_tcp_mq_ops;
+ set->queue_depth = nctrl->sqsize + 1;
+ set->reserved_tags = 1; /* fabric connect */
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ set->cmd_size = sizeof(struct nvme_tcp_request);
+ set->driver_data = ctrl;
+ set->nr_hw_queues = nctrl->queue_count - 1;
+ set->timeout = NVME_IO_TIMEOUT;
+ set->nr_maps = 2 /* default + read */;
+ }
+
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return set;
+}
+
+static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (to_tcp_ctrl(ctrl)->async_req.pdu) {
+ nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
+ to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
+ }
+
+ nvme_tcp_free_queue(ctrl, 0);
+}
+
+static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_tcp_free_queue(ctrl, i);
+}
+
+static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i;
+
+ for (i = 1; i < ctrl->queue_count; i++)
+ nvme_tcp_stop_queue(ctrl, i);
+}
+
+static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i, ret = 0;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_tcp_start_queue(ctrl, i);
+ if (ret)
+ goto out_stop_queues;
+ }
+
+ return 0;
+
+out_stop_queues:
+ for (i--; i >= 1; i--)
+ nvme_tcp_stop_queue(ctrl, i);
+ return ret;
+}
+
+static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
+{
+ int ret;
+
+ ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+ if (ret)
+ return ret;
+
+ ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
+ if (ret)
+ goto out_free_queue;
+
+ return 0;
+
+out_free_queue:
+ nvme_tcp_free_queue(ctrl, 0);
+ return ret;
+}
+
+static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
+{
+ int i, ret;
+
+ for (i = 1; i < ctrl->queue_count; i++) {
+ ret = nvme_tcp_alloc_queue(ctrl, i,
+ ctrl->sqsize + 1);
+ if (ret)
+ goto out_free_queues;
+ }
+
+ return 0;
+
+out_free_queues:
+ for (i--; i >= 1; i--)
+ nvme_tcp_free_queue(ctrl, i);
+
+ return ret;
+}
+
+static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
+{
+ unsigned int nr_io_queues;
+
+ nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
+ nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
+
+ return nr_io_queues;
+}
+
+static int nvme_alloc_io_queues(struct nvme_ctrl *ctrl)
+{
+ unsigned int nr_io_queues;
+ int ret;
+
+ nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
+ ret = nvme_set_queue_count(ctrl, &nr_io_queues);
+ if (ret)
+ return ret;
+
+ ctrl->queue_count = nr_io_queues + 1;
+ if (ctrl->queue_count < 2)
+ return 0;
+
+ dev_info(ctrl->device,
+ "creating %d I/O queues.\n", nr_io_queues);
+
+ return nvme_tcp_alloc_io_queues(ctrl);
+}
+
+static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
+{
+ nvme_tcp_stop_io_queues(ctrl);
+ if (remove) {
+ if (ctrl->ops->flags & NVME_F_FABRICS)
+ blk_cleanup_queue(ctrl->connect_q);
+ blk_mq_free_tag_set(ctrl->tagset);
+ }
+ nvme_tcp_free_io_queues(ctrl);
+}
+
+static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
+{
+ int ret;
+
+ ret = nvme_alloc_io_queues(ctrl);
+ if (ret)
+ return ret;
+
+ if (new) {
+ ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
+ if (IS_ERR(ctrl->tagset)) {
+ ret = PTR_ERR(ctrl->tagset);
+ goto out_free_io_queues;
+ }
+
+ if (ctrl->ops->flags & NVME_F_FABRICS) {
+ ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
+ if (IS_ERR(ctrl->connect_q)) {
+ ret = PTR_ERR(ctrl->connect_q);
+ goto out_free_tag_set;
+ }
+ }
+ } else {
+ blk_mq_update_nr_hw_queues(ctrl->tagset,
+ ctrl->queue_count - 1);
+ }
+
+ ret = nvme_tcp_start_io_queues(ctrl);
+ if (ret)
+ goto out_cleanup_connect_q;
+
+ return 0;
+
+out_cleanup_connect_q:
+ if (new && (ctrl->ops->flags & NVME_F_FABRICS))
+ blk_cleanup_queue(ctrl->connect_q);
+out_free_tag_set:
+ if (new)
+ blk_mq_free_tag_set(ctrl->tagset);
+out_free_io_queues:
+ nvme_tcp_free_io_queues(ctrl);
+ return ret;
+}
+
+static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
+{
+ nvme_tcp_stop_queue(ctrl, 0);
+ if (remove) {
+ free_opal_dev(ctrl->opal_dev);
+ blk_cleanup_queue(ctrl->admin_q);
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+ }
+ nvme_tcp_free_admin_queue(ctrl);
+}
+
+static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
+{
+ int error;
+
+ error = nvme_tcp_alloc_admin_queue(ctrl);
+ if (error)
+ return error;
+
+ if (new) {
+ ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
+ if (IS_ERR(ctrl->admin_tagset)) {
+ error = PTR_ERR(ctrl->admin_tagset);
+ goto out_free_queue;
+ }
+
+ ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
+ if (IS_ERR(ctrl->admin_q)) {
+ error = PTR_ERR(ctrl->admin_q);
+ goto out_free_tagset;
+ }
+ }
+
+ error = nvme_tcp_start_queue(ctrl, 0);
+ if (error)
+ goto out_cleanup_queue;
+
+ error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
+ if (error) {
+ dev_err(ctrl->device,
+ "prop_get NVME_REG_CAP failed\n");
+ goto out_stop_queue;
+ }
+
+ ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+
+ error = nvme_enable_ctrl(ctrl, ctrl->cap);
+ if (error)
+ goto out_stop_queue;
+
+ error = nvme_init_identify(ctrl);
+ if (error)
+ goto out_stop_queue;
+
+ return 0;
+
+out_stop_queue:
+ nvme_tcp_stop_queue(ctrl, 0);
+out_cleanup_queue:
+ if (new)
+ blk_cleanup_queue(ctrl->admin_q);
+out_free_tagset:
+ if (new)
+ blk_mq_free_tag_set(ctrl->admin_tagset);
+out_free_queue:
+ nvme_tcp_free_admin_queue(ctrl);
+ return error;
+}
+
+static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
+ bool remove)
+{
+ blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_tcp_stop_queue(ctrl, 0);
+ blk_mq_tagset_busy_iter(ctrl->admin_tagset, nvme_cancel_request, ctrl);
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_tcp_destroy_admin_queue(ctrl, remove);
+}
+
+static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
+ bool remove)
+{
+ if (ctrl->queue_count <= 1)
+ return;
+ nvme_stop_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
+ blk_mq_tagset_busy_iter(ctrl->tagset, nvme_cancel_request, ctrl);
+ if (remove)
+ nvme_start_queues(ctrl);
+ nvme_tcp_destroy_io_queues(ctrl, remove);
+}
+
+static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
+{
+ /* If we are resetting/deleting then do nothing */
+ if (ctrl->state != NVME_CTRL_CONNECTING) {
+ WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
+ ctrl->state == NVME_CTRL_LIVE);
+ return;
+ }
+
+ if (nvmf_should_reconnect(ctrl)) {
+ dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
+ ctrl->opts->reconnect_delay);
+ queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
+ ctrl->opts->reconnect_delay * HZ);
+ } else {
+ dev_info(ctrl->device, "Removing controller...\n");
+ nvme_delete_ctrl(ctrl);
+ }
+}
+
+static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
+{
+ struct nvmf_ctrl_options *opts = ctrl->opts;
+ int ret = -EINVAL;
+
+ ret = nvme_tcp_configure_admin_queue(ctrl, new);
+ if (ret)
+ return ret;
+
+ if (ctrl->icdoff) {
+ dev_err(ctrl->device, "icdoff is not supported!\n");
+ goto destroy_admin;
+ }
+
+ if (opts->queue_size > ctrl->sqsize + 1)
+ dev_warn(ctrl->device,
+ "queue_size %zu > ctrl sqsize %u, clamping down\n",
+ opts->queue_size, ctrl->sqsize + 1);
+
+ if (ctrl->sqsize + 1 > ctrl->maxcmd) {
+ dev_warn(ctrl->device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->sqsize + 1, ctrl->maxcmd);
+ ctrl->sqsize = ctrl->maxcmd - 1;
+ }
+
+ if (ctrl->queue_count > 1) {
+ ret = nvme_tcp_configure_io_queues(ctrl, new);
+ if (ret)
+ goto destroy_admin;
+ }
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
+ /* state change failure is ok if we're in DELETING state */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ ret = -EINVAL;
+ goto destroy_io;
+ }
+
+ nvme_start_ctrl(ctrl);
+ return 0;
+
+destroy_io:
+ if (ctrl->queue_count > 1)
+ nvme_tcp_destroy_io_queues(ctrl, new);
+destroy_admin:
+ nvme_tcp_stop_queue(ctrl, 0);
+ nvme_tcp_destroy_admin_queue(ctrl, new);
+ return ret;
+}
+
+static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
+{
+ struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
+ struct nvme_tcp_ctrl, connect_work);
+ struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+
+ ++ctrl->nr_reconnects;
+
+ if (nvme_tcp_setup_ctrl(ctrl, false))
+ goto requeue;
+
+ dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
+ ctrl->nr_reconnects);
+
+ ctrl->nr_reconnects = 0;
+
+ return;
+
+requeue:
+ dev_info(ctrl->device, "Failed reconnect attempt %d\n",
+ ctrl->nr_reconnects);
+ nvme_tcp_reconnect_or_remove(ctrl);
+}
+
+static void nvme_tcp_error_recovery_work(struct work_struct *work)
+{
+ struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
+ struct nvme_tcp_ctrl, err_work);
+ struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
+
+ nvme_stop_keep_alive(ctrl);
+ nvme_tcp_teardown_io_queues(ctrl, false);
+ /* unquiesce to fail fast pending requests */
+ nvme_start_queues(ctrl);
+ nvme_tcp_teardown_admin_queue(ctrl, false);
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we're in DELETING state */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ return;
+ }
+
+ nvme_tcp_reconnect_or_remove(ctrl);
+}
+
+static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
+{
+ nvme_tcp_teardown_io_queues(ctrl, shutdown);
+ if (shutdown)
+ nvme_shutdown_ctrl(ctrl);
+ else
+ nvme_disable_ctrl(ctrl, ctrl->cap);
+ nvme_tcp_teardown_admin_queue(ctrl, shutdown);
+}
+
+static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
+{
+ nvme_tcp_teardown_ctrl(ctrl, true);
+}
+
+static void nvme_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_ctrl *ctrl =
+ container_of(work, struct nvme_ctrl, reset_work);
+
+ nvme_stop_ctrl(ctrl);
+ nvme_tcp_teardown_ctrl(ctrl, false);
+
+ if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
+ /* state change failure is ok if we're in DELETING state */
+ WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
+ return;
+ }
+
+ if (nvme_tcp_setup_ctrl(ctrl, false))
+ goto out_fail;
+
+ return;
+
+out_fail:
+ ++ctrl->nr_reconnects;
+ nvme_tcp_reconnect_or_remove(ctrl);
+}
+
+static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
+{
+ cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
+ cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
+}
+
+static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+
+ if (list_empty(&ctrl->list))
+ goto free_ctrl;
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_del(&ctrl->list);
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+
+ nvmf_free_options(nctrl->opts);
+free_ctrl:
+ kfree(ctrl->queues);
+ kfree(ctrl);
+}
+
+static void nvme_tcp_set_sg_null(struct nvme_command *c)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ sg->addr = 0;
+ sg->length = 0;
+ sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+ NVME_SGL_FMT_TRANSPORT_A;
+}
+
+static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
+ struct nvme_command *c, u32 data_len)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
+ sg->length = cpu_to_le32(data_len);
+ sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
+}
+
+static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
+ u32 data_len)
+{
+ struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
+
+ sg->addr = 0;
+ sg->length = cpu_to_le32(data_len);
+ sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
+ NVME_SGL_FMT_TRANSPORT_A;
+}
+
+static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
+ struct nvme_tcp_queue *queue = &ctrl->queues[0];
+ struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
+ struct nvme_command *cmd = &pdu->cmd;
+ u8 hdgst = nvme_tcp_hdgst_len(queue);
+
+ memset(pdu, 0, sizeof(*pdu));
+ pdu->hdr.type = nvme_tcp_cmd;
+ if (queue->hdr_digest)
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
+
+ cmd->common.opcode = nvme_admin_async_event;
+ cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
+ cmd->common.flags |= NVME_CMD_SGL_METABUF;
+ nvme_tcp_set_sg_null(cmd);
+
+ ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
+ ctrl->async_req.offset = 0;
+ ctrl->async_req.curr_bio = NULL;
+ ctrl->async_req.data_len = 0;
+
+ nvme_tcp_queue_request(&ctrl->async_req);
+}
+
+static enum blk_eh_timer_return
+nvme_tcp_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
+ struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+
+ dev_dbg(ctrl->ctrl.device,
+ "queue %d: timeout request %#x type %d\n",
+ nvme_tcp_queue_id(req->queue), rq->tag,
+ pdu->hdr.type);
+
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ union nvme_result res = {};
+
+ nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
+ nvme_end_request(rq, cpu_to_le16(NVME_SC_ABORT_REQ), res);
+ return BLK_EH_DONE;
+ }
+
+ /* queue error recovery */
+ nvme_tcp_error_recovery(&ctrl->ctrl);
+
+ return BLK_EH_RESET_TIMER;
+}
+
+static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_command *c = &pdu->cmd;
+
+ c->common.flags |= NVME_CMD_SGL_METABUF;
+
+ if (rq_data_dir(rq) == WRITE && req->data_len &&
+ req->data_len <= nvme_tcp_inline_data_size(queue))
+ nvme_tcp_set_sg_inline(queue, c, req->data_len);
+ else
+ nvme_tcp_set_sg_host_data(c, req->data_len);
+
+ return 0;
+}
+
+static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
+ struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ struct nvme_tcp_queue *queue = req->queue;
+ u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
+ blk_status_t ret;
+
+ ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
+ if (ret)
+ return ret;
+
+ req->state = NVME_TCP_SEND_CMD_PDU;
+ req->offset = 0;
+ req->data_sent = 0;
+ req->pdu_len = 0;
+ req->pdu_sent = 0;
+ req->data_len = blk_rq_payload_bytes(rq);
+ req->curr_bio = rq->bio;
+
+ if (rq_data_dir(rq) == WRITE &&
+ req->data_len <= nvme_tcp_inline_data_size(queue))
+ req->pdu_len = req->data_len;
+ else if (req->curr_bio)
+ nvme_tcp_init_iter(req, READ);
+
+ pdu->hdr.type = nvme_tcp_cmd;
+ pdu->hdr.flags = 0;
+ if (queue->hdr_digest)
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ if (queue->data_digest && req->pdu_len) {
+ pdu->hdr.flags |= NVME_TCP_F_DDGST;
+ ddgst = nvme_tcp_ddgst_len(queue);
+ }
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
+ pdu->hdr.plen =
+ cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
+
+ ret = nvme_tcp_map_data(queue, rq);
+ if (unlikely(ret)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Failed to map data (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_tcp_queue *queue = hctx->driver_data;
+ struct request *rq = bd->rq;
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
+ blk_status_t ret;
+
+ if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
+
+ ret = nvme_tcp_setup_cmd_pdu(ns, rq);
+ if (unlikely(ret))
+ return ret;
+
+ blk_mq_start_request(rq);
+
+ nvme_tcp_queue_request(req);
+
+ return BLK_STS_OK;
+}
+
+static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_tcp_ctrl *ctrl = set->driver_data;
+
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
+ if (ctrl->ctrl.opts->nr_write_queues) {
+ /* separate read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->ctrl.opts->nr_write_queues;
+ set->map[HCTX_TYPE_READ].queue_offset =
+ ctrl->ctrl.opts->nr_write_queues;
+ } else {
+ /* mixed read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->ctrl.opts->nr_io_queues;
+ set->map[HCTX_TYPE_READ].queue_offset = 0;
+ }
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
+ return 0;
+}
+
+static struct blk_mq_ops nvme_tcp_mq_ops = {
+ .queue_rq = nvme_tcp_queue_rq,
+ .complete = nvme_complete_rq,
+ .init_request = nvme_tcp_init_request,
+ .exit_request = nvme_tcp_exit_request,
+ .init_hctx = nvme_tcp_init_hctx,
+ .timeout = nvme_tcp_timeout,
+ .map_queues = nvme_tcp_map_queues,
+};
+
+static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
+ .queue_rq = nvme_tcp_queue_rq,
+ .complete = nvme_complete_rq,
+ .init_request = nvme_tcp_init_request,
+ .exit_request = nvme_tcp_exit_request,
+ .init_hctx = nvme_tcp_init_admin_hctx,
+ .timeout = nvme_tcp_timeout,
+};
+
+static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
+ .name = "tcp",
+ .module = THIS_MODULE,
+ .flags = NVME_F_FABRICS,
+ .reg_read32 = nvmf_reg_read32,
+ .reg_read64 = nvmf_reg_read64,
+ .reg_write32 = nvmf_reg_write32,
+ .free_ctrl = nvme_tcp_free_ctrl,
+ .submit_async_event = nvme_tcp_submit_async_event,
+ .delete_ctrl = nvme_tcp_delete_ctrl,
+ .get_address = nvmf_get_address,
+ .stop_ctrl = nvme_tcp_stop_ctrl,
+};
+
+static bool
+nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
+{
+ struct nvme_tcp_ctrl *ctrl;
+ bool found = false;
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
+ found = nvmf_ip_options_match(&ctrl->ctrl, opts);
+ if (found)
+ break;
+ }
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+
+ return found;
+}
+
+static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
+ struct nvmf_ctrl_options *opts)
+{
+ struct nvme_tcp_ctrl *ctrl;
+ int ret;
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ctrl->list);
+ ctrl->ctrl.opts = opts;
+ ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
+ ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.kato = opts->kato;
+
+ INIT_DELAYED_WORK(&ctrl->connect_work,
+ nvme_tcp_reconnect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
+ INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
+
+ if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ opts->trsvcid =
+ kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
+ if (!opts->trsvcid) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+ opts->mask |= NVMF_OPT_TRSVCID;
+ }
+
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
+ opts->traddr, opts->trsvcid, &ctrl->addr);
+ if (ret) {
+ pr_err("malformed address passed: %s:%s\n",
+ opts->traddr, opts->trsvcid);
+ goto out_free_ctrl;
+ }
+
+ if (opts->mask & NVMF_OPT_HOST_TRADDR) {
+ ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
+ opts->host_traddr, NULL, &ctrl->src_addr);
+ if (ret) {
+ pr_err("malformed src address passed: %s\n",
+ opts->host_traddr);
+ goto out_free_ctrl;
+ }
+ }
+
+ if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
+ ret = -EALREADY;
+ goto out_free_ctrl;
+ }
+
+ ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
+ GFP_KERNEL);
+ if (!ctrl->queues) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
+ if (ret)
+ goto out_kfree_queues;
+
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ WARN_ON_ONCE(1);
+ ret = -EINTR;
+ goto out_uninit_ctrl;
+ }
+
+ ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
+ if (ret)
+ goto out_uninit_ctrl;
+
+ dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
+ ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+
+ nvme_get_ctrl(&ctrl->ctrl);
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+
+ return &ctrl->ctrl;
+
+out_uninit_ctrl:
+ nvme_uninit_ctrl(&ctrl->ctrl);
+ nvme_put_ctrl(&ctrl->ctrl);
+ if (ret > 0)
+ ret = -EIO;
+ return ERR_PTR(ret);
+out_kfree_queues:
+ kfree(ctrl->queues);
+out_free_ctrl:
+ kfree(ctrl);
+ return ERR_PTR(ret);
+}
+
+static struct nvmf_transport_ops nvme_tcp_transport = {
+ .name = "tcp",
+ .module = THIS_MODULE,
+ .required_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
+ NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
+ NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
+ NVMF_OPT_NR_WRITE_QUEUES,
+ .create_ctrl = nvme_tcp_create_ctrl,
+};
+
+static int __init nvme_tcp_init_module(void)
+{
+ nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!nvme_tcp_wq)
+ return -ENOMEM;
+
+ nvmf_register_transport(&nvme_tcp_transport);
+ return 0;
+}
+
+static void __exit nvme_tcp_cleanup_module(void)
+{
+ struct nvme_tcp_ctrl *ctrl;
+
+ nvmf_unregister_transport(&nvme_tcp_transport);
+
+ mutex_lock(&nvme_tcp_ctrl_mutex);
+ list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
+ nvme_delete_ctrl(&ctrl->ctrl);
+ mutex_unlock(&nvme_tcp_ctrl_mutex);
+ flush_workqueue(nvme_delete_wq);
+
+ destroy_workqueue(nvme_tcp_wq);
+}
+
+module_init(nvme_tcp_init_module);
+module_exit(nvme_tcp_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 25b0e310f4a8..5566dda3237a 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -139,3 +139,6 @@ const char *nvme_trace_disk_name(struct trace_seq *p, char *name)
return ret;
}
+EXPORT_SYMBOL_GPL(nvme_trace_disk_name);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(nvme_sq);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 196d5bd56718..3564120aa7b3 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -115,8 +115,8 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = le64_to_cpu(cmd->common.metadata);
__assign_disk_name(__entry->disk, req->rq_disk);
- memcpy(__entry->cdw10, cmd->common.cdw10,
- sizeof(__entry->cdw10));
+ memcpy(__entry->cdw10, &cmd->common.cdw10,
+ 6 * sizeof(__entry->cdw10));
),
TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
__entry->ctrl_id, __print_disk_name(__entry->disk),
@@ -184,6 +184,29 @@ TRACE_EVENT(nvme_async_event,
#undef aer_name
+TRACE_EVENT(nvme_sq,
+ TP_PROTO(struct request *req, __le16 sq_head, int sq_tail),
+ TP_ARGS(req, sq_head, sq_tail),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __array(char, disk, DISK_NAME_LEN)
+ __field(int, qid)
+ __field(u16, sq_head)
+ __field(u16, sq_tail)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = nvme_req(req)->ctrl->instance;
+ __assign_disk_name(__entry->disk, req->rq_disk);
+ __entry->qid = nvme_req_qid(req);
+ __entry->sq_head = le16_to_cpu(sq_head);
+ __entry->sq_tail = sq_tail;
+ ),
+ TP_printk("nvme%d: %sqid=%d, head=%u, tail=%u",
+ __entry->ctrl_id, __print_disk_name(__entry->disk),
+ __entry->qid, __entry->sq_head, __entry->sq_tail
+ )
+);
+
#endif /* _TRACE_NVME_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 3c7b61ddb0d1..d94f25cde019 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -60,3 +60,13 @@ config NVME_TARGET_FCLOOP
to test NVMe-FC transport interfaces.
If unsure, say N.
+
+config NVME_TARGET_TCP
+ tristate "NVMe over Fabrics TCP target support"
+ depends on INET
+ depends on NVME_TARGET
+ help
+ This enables the NVMe TCP target support, which allows exporting NVMe
+ devices over TCP.
+
+ If unsure, say N.
diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile
index 8118c93391c6..8c3ad0fb6860 100644
--- a/drivers/nvme/target/Makefile
+++ b/drivers/nvme/target/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
+obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
discovery.o io-cmd-file.o io-cmd-bdev.o
@@ -12,3 +13,4 @@ nvme-loop-y += loop.o
nvmet-rdma-y += rdma.o
nvmet-fc-y += fc.o
nvme-fcloop-y += fcloop.o
+nvmet-tcp-y += tcp.o
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 1179f6314323..11baeb14c388 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -19,19 +19,6 @@
#include <asm/unaligned.h>
#include "nvmet.h"
-/*
- * This helper allows us to clear the AEN based on the RAE bit,
- * Please use this helper when processing the log pages which are
- * associated with the AEN.
- */
-static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
-{
- int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
-
- if (!rae)
- clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
-}
-
u32 nvmet_get_log_page_len(struct nvme_command *cmd)
{
u32 len = le16_to_cpu(cmd->get_log_page.numdu);
@@ -50,6 +37,34 @@ static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
}
+static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ u16 status = NVME_SC_SUCCESS;
+ unsigned long flags;
+ off_t offset = 0;
+ u64 slot;
+ u64 i;
+
+ spin_lock_irqsave(&ctrl->error_lock, flags);
+ slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
+
+ for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
+ status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
+ sizeof(struct nvme_error_slot));
+ if (status)
+ break;
+
+ if (slot == 0)
+ slot = NVMET_ERROR_LOG_SLOTS - 1;
+ else
+ slot--;
+ offset += sizeof(struct nvme_error_slot);
+ }
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
+ nvmet_req_complete(req, status);
+}
+
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
struct nvme_smart_log *slog)
{
@@ -60,6 +75,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
if (!ns) {
pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
return NVME_SC_INVALID_NS;
}
@@ -119,6 +135,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
{
struct nvme_smart_log *log;
u16 status = NVME_SC_INTERNAL;
+ unsigned long flags;
if (req->data_len != sizeof(*log))
goto out;
@@ -134,6 +151,11 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
if (status)
goto out_free_log;
+ spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
+ put_unaligned_le64(req->sq->ctrl->err_counter,
+ &log->num_err_log_entries);
+ spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
+
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
out_free_log:
kfree(log);
@@ -189,7 +211,7 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
if (!status)
status = nvmet_zero_sgl(req, len, req->data_len - len);
ctrl->nr_changed_ns = 0;
- nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
mutex_unlock(&ctrl->lock);
out:
nvmet_req_complete(req, status);
@@ -252,7 +274,7 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
hdr.ngrps = cpu_to_le16(ngrps);
- nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
up_read(&nvmet_ana_sem);
kfree(desc);
@@ -304,7 +326,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
/* XXX: figure out what to do about RTD3R/RTD3 */
id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
- id->ctratt = cpu_to_le32(1 << 0);
+ id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT |
+ NVME_CTRL_ATTR_TBKAS);
id->oacs = 0;
@@ -392,6 +415,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
u16 status = 0;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out;
}
@@ -512,6 +536,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
if (!ns) {
+ req->error_loc = offsetof(struct nvme_identify, nsid);
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto out;
}
@@ -569,13 +594,15 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
{
- u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
+ u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
- if (unlikely(!req->ns))
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return status;
+ }
mutex_lock(&subsys->lock);
switch (write_protect) {
@@ -599,11 +626,36 @@ static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
return status;
}
+u16 nvmet_set_feat_kato(struct nvmet_req *req)
+{
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+
+ req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
+
+ nvmet_set_result(req, req->sq->ctrl->kato);
+
+ return 0;
+}
+
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
+{
+ u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
+
+ if (val32 & ~mask) {
+ req->error_loc = offsetof(struct nvme_common_command, cdw11);
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+
+ WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
+ nvmet_set_result(req, val32);
+
+ return 0;
+}
+
static void nvmet_execute_set_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
- u32 val32;
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
switch (cdw10 & 0xff) {
@@ -612,19 +664,10 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
break;
case NVME_FEAT_KATO:
- val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
- req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
- nvmet_set_result(req, req->sq->ctrl->kato);
+ status = nvmet_set_feat_kato(req);
break;
case NVME_FEAT_ASYNC_EVENT:
- val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
- if (val32 & ~NVMET_AEN_CFG_ALL) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- break;
- }
-
- WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
- nvmet_set_result(req, val32);
+ status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
break;
case NVME_FEAT_HOST_ID:
status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
@@ -633,6 +676,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
status = nvmet_set_feat_write_protect(req);
break;
default:
+ req->error_loc = offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
@@ -646,9 +690,10 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
u32 result;
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
- if (!req->ns)
+ if (!req->ns) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return NVME_SC_INVALID_NS | NVME_SC_DNR;
-
+ }
mutex_lock(&subsys->lock);
if (req->ns->readonly == true)
result = NVME_NS_WRITE_PROTECT;
@@ -660,10 +705,20 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
return 0;
}
+void nvmet_get_feat_kato(struct nvmet_req *req)
+{
+ nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+}
+
+void nvmet_get_feat_async_event(struct nvmet_req *req)
+{
+ nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
+}
+
static void nvmet_execute_get_features(struct nvmet_req *req)
{
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
- u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0;
switch (cdw10 & 0xff) {
@@ -689,7 +744,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
break;
#endif
case NVME_FEAT_ASYNC_EVENT:
- nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
+ nvmet_get_feat_async_event(req);
break;
case NVME_FEAT_VOLATILE_WC:
nvmet_set_result(req, 1);
@@ -699,11 +754,13 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
(subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
break;
case NVME_FEAT_KATO:
- nvmet_set_result(req, req->sq->ctrl->kato * 1000);
+ nvmet_get_feat_kato(req);
break;
case NVME_FEAT_HOST_ID:
/* need 128-bit host identifier flag */
- if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
+ if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw11);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
@@ -715,6 +772,8 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
status = nvmet_get_feat_write_protect(req);
break;
default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
break;
}
@@ -722,7 +781,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
nvmet_req_complete(req, status);
}
-static void nvmet_execute_async_event(struct nvmet_req *req)
+void nvmet_execute_async_event(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -738,7 +797,7 @@ static void nvmet_execute_async_event(struct nvmet_req *req)
schedule_work(&ctrl->async_event_work);
}
-static void nvmet_execute_keep_alive(struct nvmet_req *req)
+void nvmet_execute_keep_alive(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -764,13 +823,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
switch (cmd->get_log_page.lid) {
case NVME_LOG_ERROR:
- /*
- * We currently never set the More bit in the status
- * field, so all error log entries are invalid and can
- * be zeroed out. This is called a minum viable
- * implementation (TM) of this mandatory log page.
- */
- req->execute = nvmet_execute_get_log_page_noop;
+ req->execute = nvmet_execute_get_log_page_error;
return 0;
case NVME_LOG_SMART:
req->execute = nvmet_execute_get_log_page_smart;
@@ -836,5 +889,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
req->sq->qid);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d895579b6c5d..618bbd006544 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -25,12 +25,16 @@
static const struct config_item_type nvmet_host_type;
static const struct config_item_type nvmet_subsys_type;
+static LIST_HEAD(nvmet_ports_list);
+struct list_head *nvmet_ports = &nvmet_ports_list;
+
static const struct nvmet_transport_name {
u8 type;
const char *name;
} nvmet_transport_names[] = {
{ NVMF_TRTYPE_RDMA, "rdma" },
{ NVMF_TRTYPE_FC, "fc" },
+ { NVMF_TRTYPE_TCP, "tcp" },
{ NVMF_TRTYPE_LOOP, "loop" },
};
@@ -150,7 +154,8 @@ CONFIGFS_ATTR(nvmet_, addr_traddr);
static ssize_t nvmet_addr_treq_show(struct config_item *item,
char *page)
{
- switch (to_nvmet_port(item)->disc_addr.treq) {
+ switch (to_nvmet_port(item)->disc_addr.treq &
+ NVME_TREQ_SECURE_CHANNEL_MASK) {
case NVMF_TREQ_NOT_SPECIFIED:
return sprintf(page, "not specified\n");
case NVMF_TREQ_REQUIRED:
@@ -166,6 +171,7 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
const char *page, size_t count)
{
struct nvmet_port *port = to_nvmet_port(item);
+ u8 treq = port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK;
if (port->enabled) {
pr_err("Cannot modify address while enabled\n");
@@ -174,15 +180,16 @@ static ssize_t nvmet_addr_treq_store(struct config_item *item,
}
if (sysfs_streq(page, "not specified")) {
- port->disc_addr.treq = NVMF_TREQ_NOT_SPECIFIED;
+ treq |= NVMF_TREQ_NOT_SPECIFIED;
} else if (sysfs_streq(page, "required")) {
- port->disc_addr.treq = NVMF_TREQ_REQUIRED;
+ treq |= NVMF_TREQ_REQUIRED;
} else if (sysfs_streq(page, "not required")) {
- port->disc_addr.treq = NVMF_TREQ_NOT_REQUIRED;
+ treq |= NVMF_TREQ_NOT_REQUIRED;
} else {
pr_err("Invalid value '%s' for treq\n", page);
return -EINVAL;
}
+ port->disc_addr.treq = treq;
return count;
}
@@ -646,7 +653,8 @@ static int nvmet_port_subsys_allow_link(struct config_item *parent,
}
list_add_tail(&link->entry, &port->subsystems);
- nvmet_genctr++;
+ nvmet_port_disc_changed(port, subsys);
+
up_write(&nvmet_config_sem);
return 0;
@@ -673,7 +681,8 @@ static void nvmet_port_subsys_drop_link(struct config_item *parent,
found:
list_del(&p->entry);
- nvmet_genctr++;
+ nvmet_port_disc_changed(port, subsys);
+
if (list_empty(&port->subsystems))
nvmet_disable_port(port);
up_write(&nvmet_config_sem);
@@ -722,7 +731,8 @@ static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
goto out_free_link;
}
list_add_tail(&link->entry, &subsys->hosts);
- nvmet_genctr++;
+ nvmet_subsys_disc_changed(subsys, host);
+
up_write(&nvmet_config_sem);
return 0;
out_free_link:
@@ -748,7 +758,8 @@ static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
found:
list_del(&p->entry);
- nvmet_genctr++;
+ nvmet_subsys_disc_changed(subsys, host);
+
up_write(&nvmet_config_sem);
kfree(p);
}
@@ -787,7 +798,11 @@ static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
goto out_unlock;
}
- subsys->allow_any_host = allow_any_host;
+ if (subsys->allow_any_host != allow_any_host) {
+ subsys->allow_any_host = allow_any_host;
+ nvmet_subsys_disc_changed(subsys, NULL);
+ }
+
out_unlock:
up_write(&nvmet_config_sem);
return ret ? ret : count;
@@ -936,7 +951,7 @@ static ssize_t nvmet_referral_enable_store(struct config_item *item,
if (enable)
nvmet_referral_enable(parent, port);
else
- nvmet_referral_disable(port);
+ nvmet_referral_disable(parent, port);
return count;
inval:
@@ -962,9 +977,10 @@ static struct configfs_attribute *nvmet_referral_attrs[] = {
static void nvmet_referral_release(struct config_item *item)
{
+ struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
struct nvmet_port *port = to_nvmet_port(item);
- nvmet_referral_disable(port);
+ nvmet_referral_disable(parent, port);
kfree(port);
}
@@ -1137,6 +1153,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ list_del(&port->global_entry);
+
kfree(port->ana_state);
kfree(port);
}
@@ -1189,12 +1207,15 @@ static struct config_group *nvmet_ports_make(struct config_group *group,
port->ana_state[i] = NVME_ANA_INACCESSIBLE;
}
+ list_add(&port->global_entry, &nvmet_ports_list);
+
INIT_LIST_HEAD(&port->entry);
INIT_LIST_HEAD(&port->subsystems);
INIT_LIST_HEAD(&port->referrals);
port->inline_data_size = -1; /* < 0 == let the transport choose */
port->disc_addr.portid = cpu_to_le16(portid);
+ port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
config_group_init_type_name(&port->group, name, &nvmet_port_type);
config_group_init_type_name(&port->subsys_group,
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a5f9bbce863f..88d260f31835 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -45,28 +45,72 @@ u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
u64 nvmet_ana_chgcnt;
DECLARE_RWSEM(nvmet_ana_sem);
+inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
+{
+ u16 status;
+
+ switch (errno) {
+ case -ENOSPC:
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ break;
+ case -EREMOTEIO:
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ break;
+ case -EOPNOTSUPP:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_dsm:
+ case nvme_cmd_write_zeroes:
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ break;
+ default:
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ break;
+ case -ENODATA:
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
+ status = NVME_SC_ACCESS_DENIED;
+ break;
+ case -EIO:
+ /* FALLTHRU */
+ default:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ }
+
+ return status;
+}
+
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
const char *subsysnqn);
u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
size_t len)
{
- if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
return 0;
}
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
{
- if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
+ if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
return 0;
}
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
{
- if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
+ if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
+ }
return 0;
}
@@ -130,7 +174,7 @@ static void nvmet_async_event_work(struct work_struct *work)
}
}
-static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page)
{
struct nvmet_async_event *aen;
@@ -150,13 +194,6 @@ static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
schedule_work(&ctrl->async_event_work);
}
-static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
-{
- if (!(READ_ONCE(ctrl->aen_enabled) & aen))
- return true;
- return test_and_set_bit(aen, &ctrl->aen_masked);
-}
-
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
{
u32 i;
@@ -187,7 +224,7 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
- if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
NVME_AER_NOTICE_NS_CHANGED,
@@ -204,7 +241,7 @@ void nvmet_send_ana_event(struct nvmet_subsys *subsys,
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
if (port && ctrl->port != port)
continue;
- if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
continue;
nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
@@ -299,6 +336,15 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
{
struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
struct nvmet_ctrl, ka_work);
+ bool cmd_seen = ctrl->cmd_seen;
+
+ ctrl->cmd_seen = false;
+ if (cmd_seen) {
+ pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
+ ctrl->cntlid);
+ schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ return;
+ }
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
ctrl->cntlid, ctrl->kato);
@@ -595,26 +641,58 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
return ns;
}
-static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+static void nvmet_update_sq_head(struct nvmet_req *req)
{
- u32 old_sqhd, new_sqhd;
- u16 sqhd;
-
- if (status)
- nvmet_set_status(req, status);
-
if (req->sq->size) {
+ u32 old_sqhd, new_sqhd;
+
do {
old_sqhd = req->sq->sqhd;
new_sqhd = (old_sqhd + 1) % req->sq->size;
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd);
}
- sqhd = req->sq->sqhd & 0x0000FFFF;
- req->rsp->sq_head = cpu_to_le16(sqhd);
+ req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
+}
+
+static void nvmet_set_error(struct nvmet_req *req, u16 status)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvme_error_slot *new_error_slot;
+ unsigned long flags;
+
+ req->rsp->status = cpu_to_le16(status << 1);
+
+ if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
+ return;
+
+ spin_lock_irqsave(&ctrl->error_lock, flags);
+ ctrl->err_counter++;
+ new_error_slot =
+ &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
+
+ new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
+ new_error_slot->sqid = cpu_to_le16(req->sq->qid);
+ new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
+ new_error_slot->status_field = cpu_to_le16(status << 1);
+ new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
+ new_error_slot->lba = cpu_to_le64(req->error_slba);
+ new_error_slot->nsid = req->cmd->common.nsid;
+ spin_unlock_irqrestore(&ctrl->error_lock, flags);
+
+ /* set the more bit for this request */
+ req->rsp->status |= cpu_to_le16(1 << 14);
+}
+
+static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
+{
+ if (!req->sq->sqhd_disabled)
+ nvmet_update_sq_head(req);
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;
+ if (unlikely(status))
+ nvmet_set_error(req, status);
if (req->ns)
nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
@@ -735,14 +813,20 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
return ret;
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
- if (unlikely(!req->ns))
+ if (unlikely(!req->ns)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return NVME_SC_INVALID_NS | NVME_SC_DNR;
+ }
ret = nvmet_check_ana_state(req->port, req->ns);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return ret;
+ }
ret = nvmet_io_cmd_check_access(req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ req->error_loc = offsetof(struct nvme_common_command, nsid);
return ret;
+ }
if (req->ns->file)
return nvmet_file_parse_io_cmd(req);
@@ -763,10 +847,14 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg_cnt = 0;
req->transfer_len = 0;
req->rsp->status = 0;
+ req->rsp->sq_head = 0;
req->ns = NULL;
+ req->error_loc = NVMET_NO_ERROR_LOC;
+ req->error_slba = 0;
/* no support for fused commands yet */
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
+ req->error_loc = offsetof(struct nvme_common_command, flags);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
}
@@ -777,6 +865,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
* byte aligned.
*/
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
+ req->error_loc = offsetof(struct nvme_common_command, flags);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
goto fail;
}
@@ -801,6 +890,9 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
goto fail;
}
+ if (sq->ctrl)
+ sq->ctrl->cmd_seen = true;
+
return true;
fail:
@@ -819,9 +911,10 @@ EXPORT_SYMBOL_GPL(nvmet_req_uninit);
void nvmet_req_execute(struct nvmet_req *req)
{
- if (unlikely(req->data_len != req->transfer_len))
+ if (unlikely(req->data_len != req->transfer_len)) {
+ req->error_loc = offsetof(struct nvme_common_command, dptr);
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
- else
+ } else
req->execute(req);
}
EXPORT_SYMBOL_GPL(nvmet_req_execute);
@@ -1027,14 +1120,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
return 0;
}
-static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
- const char *hostnqn)
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
{
struct nvmet_host_link *p;
+ lockdep_assert_held(&nvmet_config_sem);
+
if (subsys->allow_any_host)
return true;
+ if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
+ return true;
+
list_for_each_entry(p, &subsys->hosts, entry) {
if (!strcmp(nvmet_host_name(p->host), hostnqn))
return true;
@@ -1043,30 +1140,6 @@ static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
return false;
}
-static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
- const char *hostnqn)
-{
- struct nvmet_subsys_link *s;
-
- list_for_each_entry(s, &req->port->subsystems, entry) {
- if (__nvmet_host_allowed(s->subsys, hostnqn))
- return true;
- }
-
- return false;
-}
-
-bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
- const char *hostnqn)
-{
- lockdep_assert_held(&nvmet_config_sem);
-
- if (subsys->type == NVME_NQN_DISC)
- return nvmet_host_discovery_allowed(req, hostnqn);
- else
- return __nvmet_host_allowed(subsys, hostnqn);
-}
-
/*
* Note: ctrl->subsys->lock should be held when calling this function
*/
@@ -1117,7 +1190,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
down_read(&nvmet_config_sem);
- if (!nvmet_host_allowed(req, subsys, hostnqn)) {
+ if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn);
req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
@@ -1175,31 +1248,20 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
ctrl->cntlid = ret;
ctrl->ops = req->ops;
- if (ctrl->subsys->type == NVME_NQN_DISC) {
- /* Don't accept keep-alive timeout for discovery controllers */
- if (kato) {
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- goto out_remove_ida;
- }
- /*
- * Discovery controllers use some arbitrary high value in order
- * to cleanup stale discovery sessions
- *
- * From the latest base diff RC:
- * "The Keep Alive command is not supported by
- * Discovery controllers. A transport may specify a
- * fixed Discovery controller activity timeout value
- * (e.g., 2 minutes). If no commands are received
- * by a Discovery controller within that time
- * period, the controller may perform the
- * actions for Keep Alive Timer expiration".
- */
- ctrl->kato = NVMET_DISC_KATO;
- } else {
- /* keep-alive timeout in seconds */
- ctrl->kato = DIV_ROUND_UP(kato, 1000);
- }
+ /*
+ * Discovery controllers may use some arbitrary high value
+ * in order to cleanup stale discovery sessions
+ */
+ if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
+ kato = NVMET_DISC_KATO_MS;
+
+ /* keep-alive timeout in seconds */
+ ctrl->kato = DIV_ROUND_UP(kato, 1000);
+
+ ctrl->err_counter = 0;
+ spin_lock_init(&ctrl->error_lock);
+
nvmet_start_keep_alive_timer(ctrl);
mutex_lock(&subsys->lock);
@@ -1210,8 +1272,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
*ctrlp = ctrl;
return 0;
-out_remove_ida:
- ida_simple_remove(&cntlid_ida, ctrl->cntlid);
out_free_sqs:
kfree(ctrl->sqs);
out_free_cqs:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index bc0aa0bf1543..d2cb71a0b419 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -18,7 +18,65 @@
struct nvmet_subsys *nvmet_disc_subsys;
-u64 nvmet_genctr;
+static u64 nvmet_genctr;
+
+static void __nvmet_disc_changed(struct nvmet_port *port,
+ struct nvmet_ctrl *ctrl)
+{
+ if (ctrl->port != port)
+ return;
+
+ if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
+ return;
+
+ nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
+ NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
+}
+
+void nvmet_port_disc_changed(struct nvmet_port *port,
+ struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ nvmet_genctr++;
+
+ list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
+ if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
+ continue;
+
+ __nvmet_disc_changed(port, ctrl);
+ }
+}
+
+static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
+ struct nvmet_subsys *subsys,
+ struct nvmet_host *host)
+{
+ struct nvmet_ctrl *ctrl;
+
+ list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
+ if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
+ continue;
+
+ __nvmet_disc_changed(port, ctrl);
+ }
+}
+
+void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
+ struct nvmet_host *host)
+{
+ struct nvmet_port *port;
+ struct nvmet_subsys_link *s;
+
+ nvmet_genctr++;
+
+ list_for_each_entry(port, nvmet_ports, global_entry)
+ list_for_each_entry(s, &port->subsystems, entry) {
+ if (s->subsys != subsys)
+ continue;
+ __nvmet_subsys_disc_changed(port, subsys, host);
+ }
+}
void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
{
@@ -26,18 +84,18 @@ void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
if (list_empty(&port->entry)) {
list_add_tail(&port->entry, &parent->referrals);
port->enabled = true;
- nvmet_genctr++;
+ nvmet_port_disc_changed(parent, NULL);
}
up_write(&nvmet_config_sem);
}
-void nvmet_referral_disable(struct nvmet_port *port)
+void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
{
down_write(&nvmet_config_sem);
if (!list_empty(&port->entry)) {
port->enabled = false;
list_del_init(&port->entry);
- nvmet_genctr++;
+ nvmet_port_disc_changed(parent, NULL);
}
up_write(&nvmet_config_sem);
}
@@ -107,7 +165,7 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
down_read(&nvmet_config_sem);
list_for_each_entry(p, &req->port->subsystems, entry) {
- if (!nvmet_host_allowed(req, p->subsys, ctrl->hostnqn))
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
if (residual_len >= entry_size) {
char traddr[NVMF_TRADDR_SIZE];
@@ -136,6 +194,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
hdr->numrec = cpu_to_le64(numrec);
hdr->recfmt = cpu_to_le16(0);
+ nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
+
up_read(&nvmet_config_sem);
status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
@@ -174,6 +234,8 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
+ id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
+
strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -183,6 +245,51 @@ out:
nvmet_req_complete(req, status);
}
+static void nvmet_execute_disc_set_features(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u16 stat;
+
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_KATO:
+ stat = nvmet_set_feat_kato(req);
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ stat = nvmet_set_feat_async_event(req,
+ NVMET_DISC_AEN_CFG_OPTIONAL);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
+ stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, stat);
+}
+
+static void nvmet_execute_disc_get_features(struct nvmet_req *req)
+{
+ u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
+ u16 stat = 0;
+
+ switch (cdw10 & 0xff) {
+ case NVME_FEAT_KATO:
+ nvmet_get_feat_kato(req);
+ break;
+ case NVME_FEAT_ASYNC_EVENT:
+ nvmet_get_feat_async_event(req);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvme_common_command, cdw10);
+ stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ break;
+ }
+
+ nvmet_req_complete(req, stat);
+}
+
u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
{
struct nvme_command *cmd = req->cmd;
@@ -190,10 +297,28 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
pr_err("got cmd %d while not ready\n",
cmd->common.opcode);
+ req->error_loc =
+ offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
switch (cmd->common.opcode) {
+ case nvme_admin_set_features:
+ req->execute = nvmet_execute_disc_set_features;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_get_features:
+ req->execute = nvmet_execute_disc_get_features;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_async_event:
+ req->execute = nvmet_execute_async_event;
+ req->data_len = 0;
+ return 0;
+ case nvme_admin_keep_alive:
+ req->execute = nvmet_execute_keep_alive;
+ req->data_len = 0;
+ return 0;
case nvme_admin_get_log_page:
req->data_len = nvmet_get_log_page_len(cmd);
@@ -204,6 +329,8 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
default:
pr_err("unsupported get_log_page lid %d\n",
cmd->get_log_page.lid);
+ req->error_loc =
+ offsetof(struct nvme_get_log_page_command, lid);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
case nvme_admin_identify:
@@ -216,10 +343,12 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
default:
pr_err("unsupported identify cns %d\n",
cmd->identify.cns);
+ req->error_loc = offsetof(struct nvme_identify, cns);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
default:
pr_err("unhandled cmd %d\n", cmd->common.opcode);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d84ae004cb85..6cf1fd9eb32e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -17,23 +17,26 @@
static void nvmet_execute_prop_set(struct nvmet_req *req)
{
+ u64 val = le64_to_cpu(req->cmd->prop_set.value);
u16 status = 0;
- if (!(req->cmd->prop_set.attrib & 1)) {
- u64 val = le64_to_cpu(req->cmd->prop_set.value);
-
- switch (le32_to_cpu(req->cmd->prop_set.offset)) {
- case NVME_REG_CC:
- nvmet_update_cc(req->sq->ctrl, val);
- break;
- default:
- status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
- break;
- }
- } else {
+ if (req->cmd->prop_set.attrib & 1) {
+ req->error_loc =
+ offsetof(struct nvmf_property_set_command, attrib);
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
}
+ switch (le32_to_cpu(req->cmd->prop_set.offset)) {
+ case NVME_REG_CC:
+ nvmet_update_cc(req->sq->ctrl, val);
+ break;
+ default:
+ req->error_loc =
+ offsetof(struct nvmf_property_set_command, offset);
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
+out:
nvmet_req_complete(req, status);
}
@@ -69,6 +72,14 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
}
}
+ if (status && req->cmd->prop_get.attrib & 1) {
+ req->error_loc =
+ offsetof(struct nvmf_property_get_command, offset);
+ } else {
+ req->error_loc =
+ offsetof(struct nvmf_property_get_command, attrib);
+ }
+
req->rsp->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status);
}
@@ -89,6 +100,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
default:
pr_err("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
@@ -105,16 +117,34 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
if (old) {
pr_warn("queue already connected!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, opcode);
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
}
if (!sqsize) {
pr_warn("queue size zero!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
}
/* note: convert queue size from 0's-based value to 1's-based value */
nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
+
+ if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
+ req->sq->sqhd_disabled = true;
+ req->rsp->sq_head = cpu_to_le16(0xffff);
+ }
+
+ if (ctrl->ops->install_queue) {
+ u16 ret = ctrl->ops->install_queue(req->sq);
+
+ if (ret) {
+ pr_err("failed to install queue %d cntlid %d ret %x\n",
+ qid, ret, ctrl->cntlid);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -141,6 +171,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
+ req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
goto out;
}
@@ -155,8 +186,13 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
le32_to_cpu(c->kato), &ctrl);
- if (status)
+ if (status) {
+ if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
+ req->error_loc =
+ offsetof(struct nvme_common_command, opcode);
goto out;
+ }
+
uuid_copy(&ctrl->hostid, &d->hostid);
status = nvmet_install_queue(ctrl, req);
@@ -243,11 +279,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
if (cmd->common.opcode != nvme_fabrics_command) {
pr_err("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
pr_err("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype);
+ req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 409081a03b24..f98f5c5bea26 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -86,8 +86,6 @@ struct nvmet_fc_fcp_iod {
spinlock_t flock;
struct nvmet_req req;
- struct work_struct work;
- struct work_struct done_work;
struct work_struct defer_work;
struct nvmet_fc_tgtport *tgtport;
@@ -134,7 +132,6 @@ struct nvmet_fc_tgt_queue {
u16 sqsize;
u16 ersp_ratio;
__le16 sqhd;
- int cpu;
atomic_t connected;
atomic_t sqtail;
atomic_t zrspcnt;
@@ -232,8 +229,6 @@ static LIST_HEAD(nvmet_fc_portentry_list);
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
-static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
-static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
@@ -438,8 +433,6 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
int i;
for (i = 0; i < queue->sqsize; fod++, i++) {
- INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
- INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
fod->tgtport = tgtport;
fod->queue = queue;
@@ -517,10 +510,7 @@ nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
fcpreq->hwqid = queue->qid ?
((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
- if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
- queue_work_on(queue->cpu, queue->work_q, &fod->work);
- else
- nvmet_fc_handle_fcp_rqst(tgtport, fod);
+ nvmet_fc_handle_fcp_rqst(tgtport, fod);
}
static void
@@ -599,30 +589,6 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
queue_work(queue->work_q, &fod->defer_work);
}
-static int
-nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
-{
- int cpu, idx, cnt;
-
- if (tgtport->ops->max_hw_queues == 1)
- return WORK_CPU_UNBOUND;
-
- /* Simple cpu selection based on qid modulo active cpu count */
- idx = !qid ? 0 : (qid - 1) % num_active_cpus();
-
- /* find the n'th active cpu */
- for (cpu = 0, cnt = 0; ; ) {
- if (cpu_active(cpu)) {
- if (cnt == idx)
- break;
- cnt++;
- }
- cpu = (cpu + 1) % num_possible_cpus();
- }
-
- return cpu;
-}
-
static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
u16 qid, u16 sqsize)
@@ -653,7 +619,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
- queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list);
INIT_LIST_HEAD(&queue->avail_defer_list);
INIT_LIST_HEAD(&queue->pending_cmd_list);
@@ -2146,25 +2111,11 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
}
static void
-nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
-{
- struct nvmet_fc_fcp_iod *fod =
- container_of(work, struct nvmet_fc_fcp_iod, done_work);
-
- nvmet_fc_fod_op_done(fod);
-}
-
-static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
{
struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
- struct nvmet_fc_tgt_queue *queue = fod->queue;
- if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
- /* context switch so completion is not in ISR context */
- queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
- else
- nvmet_fc_fod_op_done(fod);
+ nvmet_fc_fod_op_done(fod);
}
/*
@@ -2332,19 +2283,6 @@ transport_error:
nvmet_fc_abort_op(tgtport, fod);
}
-/*
- * Actual processing routine for received FC-NVME LS Requests from the LLD
- */
-static void
-nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
-{
- struct nvmet_fc_fcp_iod *fod =
- container_of(work, struct nvmet_fc_fcp_iod, work);
- struct nvmet_fc_tgtport *tgtport = fod->tgtport;
-
- nvmet_fc_handle_fcp_rqst(tgtport, fod);
-}
-
/**
* nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
* upon the reception of a NVME FCP CMD IU.
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c1ec3475a140..b6d030d3259f 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -44,13 +44,69 @@ void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
}
}
+static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
+{
+ u16 status = NVME_SC_SUCCESS;
+
+ if (likely(blk_sts == BLK_STS_OK))
+ return status;
+ /*
+ * Right now there exists M : 1 mapping between block layer error
+ * to the NVMe status code (see nvme_error_status()). For consistency,
+ * when we reverse map we use most appropriate NVMe Status code from
+ * the group of the NVMe staus codes used in the nvme_error_status().
+ */
+ switch (blk_sts) {
+ case BLK_STS_NOSPC:
+ status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_rw_command, length);
+ break;
+ case BLK_STS_TARGET:
+ status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_rw_command, slba);
+ break;
+ case BLK_STS_NOTSUPP:
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_dsm:
+ case nvme_cmd_write_zeroes:
+ status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
+ break;
+ default:
+ status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
+ }
+ break;
+ case BLK_STS_MEDIUM:
+ status = NVME_SC_ACCESS_DENIED;
+ req->error_loc = offsetof(struct nvme_rw_command, nsid);
+ break;
+ case BLK_STS_IOERR:
+ /* fallthru */
+ default:
+ status = NVME_SC_INTERNAL | NVME_SC_DNR;
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
+ }
+
+ switch (req->cmd->common.opcode) {
+ case nvme_cmd_read:
+ case nvme_cmd_write:
+ req->error_slba = le64_to_cpu(req->cmd->rw.slba);
+ break;
+ case nvme_cmd_write_zeroes:
+ req->error_slba =
+ le64_to_cpu(req->cmd->write_zeroes.slba);
+ break;
+ default:
+ req->error_slba = 0;
+ }
+ return status;
+}
+
static void nvmet_bio_done(struct bio *bio)
{
struct nvmet_req *req = bio->bi_private;
- nvmet_req_complete(req,
- bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
-
+ nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
if (bio != &req->b.inline_bio)
bio_put(bio);
}
@@ -61,7 +117,6 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
struct bio *bio;
struct scatterlist *sg;
sector_t sector;
- blk_qc_t cookie;
int op, op_flags = 0, i;
if (!req->sg_cnt) {
@@ -114,9 +169,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sg_cnt--;
}
- cookie = submit_bio(bio);
-
- blk_poll(bdev_get_queue(req->ns->bdev), cookie);
+ submit_bio(bio);
}
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
@@ -139,18 +192,21 @@ u16 nvmet_bdev_flush(struct nvmet_req *req)
return 0;
}
-static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
+static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
struct nvme_dsm_range *range, struct bio **bio)
{
+ struct nvmet_ns *ns = req->ns;
int ret;
ret = __blkdev_issue_discard(ns->bdev,
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
GFP_KERNEL, 0, bio);
- if (ret && ret != -EOPNOTSUPP)
- return NVME_SC_INTERNAL | NVME_SC_DNR;
- return 0;
+
+ if (ret)
+ req->error_slba = le64_to_cpu(range->slba);
+
+ return blk_to_nvme_status(req, errno_to_blk_status(ret));
}
static void nvmet_bdev_execute_discard(struct nvmet_req *req)
@@ -166,7 +222,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
if (status)
break;
- status = nvmet_bdev_discard_range(req->ns, &range, &bio);
+ status = nvmet_bdev_discard_range(req, &range, &bio);
if (status)
break;
}
@@ -207,16 +263,16 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
u16 status = NVME_SC_SUCCESS;
sector_t sector;
sector_t nr_sector;
+ int ret;
sector = le64_to_cpu(write_zeroes->slba) <<
(req->ns->blksize_shift - 9);
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
(req->ns->blksize_shift - 9));
- if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
- GFP_KERNEL, &bio, 0))
- status = NVME_SC_INTERNAL | NVME_SC_DNR;
-
+ ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
+ GFP_KERNEL, &bio, 0);
+ status = blk_to_nvme_status(req, errno_to_blk_status(ret));
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
@@ -251,6 +307,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
req->sq->qid);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 01feebec29ea..517522305e5c 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -83,17 +83,16 @@ static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
}
static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
- unsigned long nr_segs, size_t count)
+ unsigned long nr_segs, size_t count, int ki_flags)
{
struct kiocb *iocb = &req->f.iocb;
ssize_t (*call_iter)(struct kiocb *iocb, struct iov_iter *iter);
struct iov_iter iter;
- int ki_flags = 0, rw;
- ssize_t ret;
+ int rw;
if (req->cmd->rw.opcode == nvme_cmd_write) {
if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
- ki_flags = IOCB_DSYNC;
+ ki_flags |= IOCB_DSYNC;
call_iter = req->ns->file->f_op->write_iter;
rw = WRITE;
} else {
@@ -107,17 +106,13 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
iocb->ki_filp = req->ns->file;
iocb->ki_flags = ki_flags | iocb_flags(req->ns->file);
- ret = call_iter(iocb, &iter);
-
- if (ret != -EIOCBQUEUED && iocb->ki_complete)
- iocb->ki_complete(iocb, ret, 0);
-
- return ret;
+ return call_iter(iocb, &iter);
}
static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
{
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
+ u16 status = NVME_SC_SUCCESS;
if (req->f.bvec != req->inline_bvec) {
if (likely(req->f.mpool_alloc == false))
@@ -126,11 +121,12 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
mempool_free(req->f.bvec, req->ns->bvec_pool);
}
- nvmet_req_complete(req, ret != req->data_len ?
- NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+ if (unlikely(ret != req->data_len))
+ status = errno_to_nvme_status(req, ret);
+ nvmet_req_complete(req, status);
}
-static void nvmet_file_execute_rw(struct nvmet_req *req)
+static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
{
ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
struct sg_page_iter sg_pg_iter;
@@ -140,30 +136,14 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
ssize_t ret = 0;
loff_t pos;
- if (!req->sg_cnt || !nr_bvec) {
- nvmet_req_complete(req, 0);
- return;
- }
+
+ if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
+ is_sync = true;
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
if (unlikely(pos + req->data_len > req->ns->size)) {
- nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
- return;
- }
-
- if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
- req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
- GFP_KERNEL);
- else
- req->f.bvec = req->inline_bvec;
-
- req->f.mpool_alloc = false;
- if (unlikely(!req->f.bvec)) {
- /* fallback under memory pressure */
- req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
- req->f.mpool_alloc = true;
- if (nr_bvec > NVMET_MAX_MPOOL_BVEC)
- is_sync = true;
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
+ return true;
}
memset(&req->f.iocb, 0, sizeof(struct kiocb));
@@ -177,9 +157,10 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
if (unlikely(is_sync) &&
(nr_bvec - 1 == 0 || bv_cnt == NVMET_MAX_MPOOL_BVEC)) {
- ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len);
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, len, 0);
if (ret < 0)
- goto out;
+ goto complete;
+
pos += len;
bv_cnt = 0;
len = 0;
@@ -187,35 +168,95 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
nr_bvec--;
}
- if (WARN_ON_ONCE(total_len != req->data_len))
+ if (WARN_ON_ONCE(total_len != req->data_len)) {
ret = -EIO;
-out:
- if (unlikely(is_sync || ret)) {
- nvmet_file_io_done(&req->f.iocb, ret < 0 ? ret : total_len, 0);
- return;
+ goto complete;
+ }
+
+ if (unlikely(is_sync)) {
+ ret = total_len;
+ goto complete;
}
- req->f.iocb.ki_complete = nvmet_file_io_done;
- nvmet_file_submit_bvec(req, pos, bv_cnt, total_len);
+
+ /*
+ * A NULL ki_complete ask for synchronous execution, which we want
+ * for the IOCB_NOWAIT case.
+ */
+ if (!(ki_flags & IOCB_NOWAIT))
+ req->f.iocb.ki_complete = nvmet_file_io_done;
+
+ ret = nvmet_file_submit_bvec(req, pos, bv_cnt, total_len, ki_flags);
+
+ switch (ret) {
+ case -EIOCBQUEUED:
+ return true;
+ case -EAGAIN:
+ if (WARN_ON_ONCE(!(ki_flags & IOCB_NOWAIT)))
+ goto complete;
+ return false;
+ case -EOPNOTSUPP:
+ /*
+ * For file systems returning error -EOPNOTSUPP, handle
+ * IOCB_NOWAIT error case separately and retry without
+ * IOCB_NOWAIT.
+ */
+ if ((ki_flags & IOCB_NOWAIT))
+ return false;
+ break;
+ }
+
+complete:
+ nvmet_file_io_done(&req->f.iocb, ret, 0);
+ return true;
}
static void nvmet_file_buffered_io_work(struct work_struct *w)
{
struct nvmet_req *req = container_of(w, struct nvmet_req, f.work);
- nvmet_file_execute_rw(req);
+ nvmet_file_execute_io(req, 0);
}
-static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req)
+static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
{
INIT_WORK(&req->f.work, nvmet_file_buffered_io_work);
queue_work(buffered_io_wq, &req->f.work);
}
+static void nvmet_file_execute_rw(struct nvmet_req *req)
+{
+ ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+
+ if (!req->sg_cnt || !nr_bvec) {
+ nvmet_req_complete(req, 0);
+ return;
+ }
+
+ if (nr_bvec > NVMET_MAX_INLINE_BIOVEC)
+ req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ else
+ req->f.bvec = req->inline_bvec;
+
+ if (unlikely(!req->f.bvec)) {
+ /* fallback under memory pressure */
+ req->f.bvec = mempool_alloc(req->ns->bvec_pool, GFP_KERNEL);
+ req->f.mpool_alloc = true;
+ } else
+ req->f.mpool_alloc = false;
+
+ if (req->ns->buffered_io) {
+ if (likely(!req->f.mpool_alloc) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
+ return;
+ nvmet_file_submit_buffered_io(req);
+ } else
+ nvmet_file_execute_io(req, 0);
+}
+
u16 nvmet_file_flush(struct nvmet_req *req)
{
- if (vfs_fsync(req->ns->file, 1) < 0)
- return NVME_SC_INTERNAL | NVME_SC_DNR;
- return 0;
+ return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
}
static void nvmet_file_flush_work(struct work_struct *w)
@@ -236,30 +277,34 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
struct nvme_dsm_range range;
loff_t offset, len;
- u16 ret;
+ u16 status = 0;
+ int ret;
int i;
for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
- ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
+ status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
sizeof(range));
- if (ret)
+ if (status)
break;
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
len = le32_to_cpu(range.nlb);
len <<= req->ns->blksize_shift;
if (offset + len > req->ns->size) {
- ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
+ req->error_slba = le64_to_cpu(range.slba);
+ status = errno_to_nvme_status(req, -ENOSPC);
break;
}
- if (vfs_fallocate(req->ns->file, mode, offset, len)) {
- ret = NVME_SC_INTERNAL | NVME_SC_DNR;
+ ret = vfs_fallocate(req->ns->file, mode, offset, len);
+ if (ret) {
+ req->error_slba = le64_to_cpu(range.slba);
+ status = errno_to_nvme_status(req, ret);
break;
}
}
- nvmet_req_complete(req, ret);
+ nvmet_req_complete(req, status);
}
static void nvmet_file_dsm_work(struct work_struct *w)
@@ -299,12 +344,12 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
req->ns->blksize_shift);
if (unlikely(offset + len > req->ns->size)) {
- nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
+ nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
return;
}
ret = vfs_fallocate(req->ns->file, mode, offset, len);
- nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
+ nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
}
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
@@ -320,10 +365,7 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
switch (cmd->common.opcode) {
case nvme_cmd_read:
case nvme_cmd_write:
- if (req->ns->buffered_io)
- req->execute = nvmet_file_execute_rw_buffered_io;
- else
- req->execute = nvmet_file_execute_rw;
+ req->execute = nvmet_file_execute_rw;
req->data_len = nvmet_rw_len(req);
return 0;
case nvme_cmd_flush:
@@ -342,6 +384,7 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
default:
pr_err("unhandled cmd for file ns %d on qid %d\n",
cmd->common.opcode, req->sq->qid);
+ req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 9908082b32c4..4aac1b4a8112 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -345,7 +345,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
int i, ret;
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
- ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
+ ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
if (ret)
return ret;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index c2b4d9ee6391..3e4719fdba85 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -30,12 +30,15 @@
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
+#define NVMET_NO_ERROR_LOC ((u16)-1)
/*
* Supported optional AENs:
*/
#define NVMET_AEN_CFG_OPTIONAL \
(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
+#define NVMET_DISC_AEN_CFG_OPTIONAL \
+ (NVME_AEN_CFG_DISC_CHANGE)
/*
* Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
@@ -104,6 +107,7 @@ struct nvmet_sq {
u16 qid;
u16 size;
u32 sqhd;
+ bool sqhd_disabled;
struct completion free_done;
struct completion confirm_done;
};
@@ -137,6 +141,7 @@ struct nvmet_port {
struct list_head subsystems;
struct config_group referrals_group;
struct list_head referrals;
+ struct list_head global_entry;
struct config_group ana_groups_group;
struct nvmet_ana_group ana_default_group;
enum nvme_ana_state *ana_state;
@@ -163,6 +168,8 @@ struct nvmet_ctrl {
struct nvmet_cq **cqs;
struct nvmet_sq **sqs;
+ bool cmd_seen;
+
struct mutex lock;
u64 cap;
u32 cc;
@@ -194,8 +201,12 @@ struct nvmet_ctrl {
char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
- struct device *p2p_client;
- struct radix_tree_root p2p_ns_map;
+ struct device *p2p_client;
+ struct radix_tree_root p2p_ns_map;
+
+ spinlock_t error_lock;
+ u64 err_counter;
+ struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
};
struct nvmet_subsys {
@@ -273,6 +284,7 @@ struct nvmet_fabrics_ops {
void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
void (*disc_traddr)(struct nvmet_req *req,
struct nvmet_port *port, char *traddr);
+ u16 (*install_queue)(struct nvmet_sq *nvme_sq);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -308,17 +320,14 @@ struct nvmet_req {
void (*execute)(struct nvmet_req *req);
const struct nvmet_fabrics_ops *ops;
- struct pci_dev *p2p_dev;
- struct device *p2p_client;
+ struct pci_dev *p2p_dev;
+ struct device *p2p_client;
+ u16 error_loc;
+ u64 error_slba;
};
extern struct workqueue_struct *buffered_io_wq;
-static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
-{
- req->rsp->status = cpu_to_le16(status << 1);
-}
-
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
req->rsp->result.u32 = cpu_to_le32(result);
@@ -340,6 +349,27 @@ struct nvmet_async_event {
u8 log_page;
};
+static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
+{
+ int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
+
+ if (!rae)
+ clear_bit(bn, &req->sq->ctrl->aen_masked);
+}
+
+static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
+{
+ if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
+ return true;
+ return test_and_set_bit(bn, &ctrl->aen_masked);
+}
+
+void nvmet_get_feat_kato(struct nvmet_req *req);
+void nvmet_get_feat_async_event(struct nvmet_req *req);
+u16 nvmet_set_feat_kato(struct nvmet_req *req);
+u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
+void nvmet_execute_async_event(struct nvmet_req *req);
+
u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
@@ -355,6 +385,8 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgl(struct nvmet_req *req);
void nvmet_req_free_sgl(struct nvmet_req *req);
+void nvmet_execute_keep_alive(struct nvmet_req *req);
+
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
@@ -395,7 +427,7 @@ int nvmet_enable_port(struct nvmet_port *port);
void nvmet_disable_port(struct nvmet_port *port);
void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
-void nvmet_referral_disable(struct nvmet_port *port);
+void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
size_t len);
@@ -405,6 +437,14 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+extern struct list_head *nvmet_ports;
+void nvmet_port_disc_changed(struct nvmet_port *port,
+ struct nvmet_subsys *subsys);
+void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
+ struct nvmet_host *host);
+void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
+ u8 event_info, u8 log_page);
+
#define NVMET_QUEUE_SIZE 1024
#define NVMET_NR_QUEUES 128
#define NVMET_MAX_CMD NVMET_QUEUE_SIZE
@@ -425,7 +465,7 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd);
#define NVMET_DEFAULT_ANA_GRPID 1
#define NVMET_KAS 10
-#define NVMET_DISC_KATO 120
+#define NVMET_DISC_KATO_MS 120000
int __init nvmet_init_configfs(void);
void __exit nvmet_exit_configfs(void);
@@ -434,15 +474,13 @@ int __init nvmet_init_discovery(void);
void nvmet_exit_discovery(void);
extern struct nvmet_subsys *nvmet_disc_subsys;
-extern u64 nvmet_genctr;
extern struct rw_semaphore nvmet_config_sem;
extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
extern u64 nvmet_ana_chgcnt;
extern struct rw_semaphore nvmet_ana_sem;
-bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
- const char *hostnqn);
+bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
int nvmet_file_ns_enable(struct nvmet_ns *ns);
@@ -457,4 +495,6 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
req->ns->blksize_shift;
}
+
+u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3f7971d3706d..a8d23eb80192 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -196,7 +196,7 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{
unsigned long flags;
- if (rsp->allocated) {
+ if (unlikely(rsp->allocated)) {
kfree(rsp);
return;
}
@@ -529,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+ struct nvmet_rdma_queue *queue = cq->cq_context;
nvmet_rdma_release_rsp(rsp);
@@ -536,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
wc->status != IB_WC_WR_FLUSH_ERR)) {
pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
- nvmet_rdma_error_comp(rsp->queue);
+ nvmet_rdma_error_comp(queue);
}
}
@@ -629,8 +630,11 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
u64 off = le64_to_cpu(sgl->addr);
u32 len = le32_to_cpu(sgl->length);
- if (!nvme_is_write(rsp->req.cmd))
+ if (!nvme_is_write(rsp->req.cmd)) {
+ rsp->req.error_loc =
+ offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ }
if (off + len > rsp->queue->dev->inline_data_size) {
pr_err("invalid inline data offset!\n");
@@ -695,6 +699,8 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
return nvmet_rdma_map_sgl_inline(rsp);
default:
pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ rsp->req.error_loc =
+ offsetof(struct nvme_common_command, dptr);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
}
case NVME_KEY_SGL_FMT_DATA_DESC:
@@ -705,10 +711,13 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
default:
pr_err("invalid SGL subtype: %#x\n", sgl->type);
+ rsp->req.error_loc =
+ offsetof(struct nvme_common_command, dptr);
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
}
default:
pr_err("invalid SGL type: %#x\n", sgl->type);
+ rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
}
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
new file mode 100644
index 000000000000..44b37b202e39
--- /dev/null
+++ b/drivers/nvme/target/tcp.c
@@ -0,0 +1,1737 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP target.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/nvme-tcp.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <linux/inet.h>
+#include <linux/llist.h>
+#include <crypto/hash.h>
+
+#include "nvmet.h"
+
+#define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
+
+#define NVMET_TCP_RECV_BUDGET 8
+#define NVMET_TCP_SEND_BUDGET 8
+#define NVMET_TCP_IO_WORK_BUDGET 64
+
+enum nvmet_tcp_send_state {
+ NVMET_TCP_SEND_DATA_PDU,
+ NVMET_TCP_SEND_DATA,
+ NVMET_TCP_SEND_R2T,
+ NVMET_TCP_SEND_DDGST,
+ NVMET_TCP_SEND_RESPONSE
+};
+
+enum nvmet_tcp_recv_state {
+ NVMET_TCP_RECV_PDU,
+ NVMET_TCP_RECV_DATA,
+ NVMET_TCP_RECV_DDGST,
+ NVMET_TCP_RECV_ERR,
+};
+
+enum {
+ NVMET_TCP_F_INIT_FAILED = (1 << 0),
+};
+
+struct nvmet_tcp_cmd {
+ struct nvmet_tcp_queue *queue;
+ struct nvmet_req req;
+
+ struct nvme_tcp_cmd_pdu *cmd_pdu;
+ struct nvme_tcp_rsp_pdu *rsp_pdu;
+ struct nvme_tcp_data_pdu *data_pdu;
+ struct nvme_tcp_r2t_pdu *r2t_pdu;
+
+ u32 rbytes_done;
+ u32 wbytes_done;
+
+ u32 pdu_len;
+ u32 pdu_recv;
+ int sg_idx;
+ int nr_mapped;
+ struct msghdr recv_msg;
+ struct kvec *iov;
+ u32 flags;
+
+ struct list_head entry;
+ struct llist_node lentry;
+
+ /* send state */
+ u32 offset;
+ struct scatterlist *cur_sg;
+ enum nvmet_tcp_send_state state;
+
+ __le32 exp_ddgst;
+ __le32 recv_ddgst;
+};
+
+enum nvmet_tcp_queue_state {
+ NVMET_TCP_Q_CONNECTING,
+ NVMET_TCP_Q_LIVE,
+ NVMET_TCP_Q_DISCONNECTING,
+};
+
+struct nvmet_tcp_queue {
+ struct socket *sock;
+ struct nvmet_tcp_port *port;
+ struct work_struct io_work;
+ int cpu;
+ struct nvmet_cq nvme_cq;
+ struct nvmet_sq nvme_sq;
+
+ /* send state */
+ struct nvmet_tcp_cmd *cmds;
+ unsigned int nr_cmds;
+ struct list_head free_list;
+ struct llist_head resp_list;
+ struct list_head resp_send_list;
+ int send_list_len;
+ struct nvmet_tcp_cmd *snd_cmd;
+
+ /* recv state */
+ int offset;
+ int left;
+ enum nvmet_tcp_recv_state rcv_state;
+ struct nvmet_tcp_cmd *cmd;
+ union nvme_tcp_pdu pdu;
+
+ /* digest state */
+ bool hdr_digest;
+ bool data_digest;
+ struct ahash_request *snd_hash;
+ struct ahash_request *rcv_hash;
+
+ spinlock_t state_lock;
+ enum nvmet_tcp_queue_state state;
+
+ struct sockaddr_storage sockaddr;
+ struct sockaddr_storage sockaddr_peer;
+ struct work_struct release_work;
+
+ int idx;
+ struct list_head queue_list;
+
+ struct nvmet_tcp_cmd connect;
+
+ struct page_frag_cache pf_cache;
+
+ void (*data_ready)(struct sock *);
+ void (*state_change)(struct sock *);
+ void (*write_space)(struct sock *);
+};
+
+struct nvmet_tcp_port {
+ struct socket *sock;
+ struct work_struct accept_work;
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ int last_cpu;
+ void (*data_ready)(struct sock *);
+};
+
+static DEFINE_IDA(nvmet_tcp_queue_ida);
+static LIST_HEAD(nvmet_tcp_queue_list);
+static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
+
+static struct workqueue_struct *nvmet_tcp_wq;
+static struct nvmet_fabrics_ops nvmet_tcp_ops;
+static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
+static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
+
+static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
+ struct nvmet_tcp_cmd *cmd)
+{
+ return cmd - queue->cmds;
+}
+
+static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
+{
+ return nvme_is_write(cmd->req.cmd) &&
+ cmd->rbytes_done < cmd->req.transfer_len;
+}
+
+static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
+{
+ return nvmet_tcp_has_data_in(cmd) && !cmd->req.rsp->status;
+}
+
+static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
+{
+ return !nvme_is_write(cmd->req.cmd) &&
+ cmd->req.transfer_len > 0 &&
+ !cmd->req.rsp->status;
+}
+
+static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
+{
+ return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
+ !cmd->rbytes_done;
+}
+
+static inline struct nvmet_tcp_cmd *
+nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd;
+
+ cmd = list_first_entry_or_null(&queue->free_list,
+ struct nvmet_tcp_cmd, entry);
+ if (!cmd)
+ return NULL;
+ list_del_init(&cmd->entry);
+
+ cmd->rbytes_done = cmd->wbytes_done = 0;
+ cmd->pdu_len = 0;
+ cmd->pdu_recv = 0;
+ cmd->iov = NULL;
+ cmd->flags = 0;
+ return cmd;
+}
+
+static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
+{
+ if (unlikely(cmd == &cmd->queue->connect))
+ return;
+
+ list_add_tail(&cmd->entry, &cmd->queue->free_list);
+}
+
+static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
+{
+ return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
+{
+ return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
+}
+
+static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
+ void *pdu, size_t len)
+{
+ struct scatterlist sg;
+
+ sg_init_one(&sg, pdu, len);
+ ahash_request_set_crypt(hash, &sg, pdu + len, len);
+ crypto_ahash_digest(hash);
+}
+
+static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
+ void *pdu, size_t len)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ __le32 recv_digest;
+ __le32 exp_digest;
+
+ if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
+ pr_err("queue %d: header digest enabled but no header digest\n",
+ queue->idx);
+ return -EPROTO;
+ }
+
+ recv_digest = *(__le32 *)(pdu + hdr->hlen);
+ nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
+ exp_digest = *(__le32 *)(pdu + hdr->hlen);
+ if (recv_digest != exp_digest) {
+ pr_err("queue %d: header digest error: recv %#x expected %#x\n",
+ queue->idx, le32_to_cpu(recv_digest),
+ le32_to_cpu(exp_digest));
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
+{
+ struct nvme_tcp_hdr *hdr = pdu;
+ u8 digest_len = nvmet_tcp_hdgst_len(queue);
+ u32 len;
+
+ len = le32_to_cpu(hdr->plen) - hdr->hlen -
+ (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
+
+ if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
+ pr_err("queue %d: data digest flag is cleared\n", queue->idx);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+{
+ struct scatterlist *sg;
+ int i;
+
+ sg = &cmd->req.sg[cmd->sg_idx];
+
+ for (i = 0; i < cmd->nr_mapped; i++)
+ kunmap(sg_page(&sg[i]));
+}
+
+static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+{
+ struct kvec *iov = cmd->iov;
+ struct scatterlist *sg;
+ u32 length, offset, sg_offset;
+
+ length = cmd->pdu_len;
+ cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
+ offset = cmd->rbytes_done;
+ cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+ sg_offset = offset % PAGE_SIZE;
+ sg = &cmd->req.sg[cmd->sg_idx];
+
+ while (length) {
+ u32 iov_len = min_t(u32, length, sg->length - sg_offset);
+
+ iov->iov_base = kmap(sg_page(sg)) + sg->offset + sg_offset;
+ iov->iov_len = iov_len;
+
+ length -= iov_len;
+ sg = sg_next(sg);
+ iov++;
+ }
+
+ iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
+ cmd->nr_mapped, cmd->pdu_len);
+}
+
+static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
+{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
+ if (queue->nvme_sq.ctrl)
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
+ else
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+}
+
+static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
+ u32 len = le32_to_cpu(sgl->length);
+
+ if (!cmd->req.data_len)
+ return 0;
+
+ if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
+ NVME_SGL_FMT_OFFSET)) {
+ if (!nvme_is_write(cmd->req.cmd))
+ return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+
+ if (len > cmd->req.port->inline_data_size)
+ return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
+ cmd->pdu_len = len;
+ }
+ cmd->req.transfer_len += len;
+
+ cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
+ if (!cmd->req.sg)
+ return NVME_SC_INTERNAL;
+ cmd->cur_sg = cmd->req.sg;
+
+ if (nvmet_tcp_has_data_in(cmd)) {
+ cmd->iov = kmalloc_array(cmd->req.sg_cnt,
+ sizeof(*cmd->iov), GFP_KERNEL);
+ if (!cmd->iov)
+ goto err;
+ }
+
+ return 0;
+err:
+ sgl_free(cmd->req.sg);
+ return NVME_SC_INTERNAL;
+}
+
+static void nvmet_tcp_ddgst(struct ahash_request *hash,
+ struct nvmet_tcp_cmd *cmd)
+{
+ ahash_request_set_crypt(hash, cmd->req.sg,
+ (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
+ crypto_ahash_digest(hash);
+}
+
+static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
+
+ cmd->offset = 0;
+ cmd->state = NVMET_TCP_SEND_DATA_PDU;
+
+ pdu->hdr.type = nvme_tcp_c2h_data;
+ pdu->hdr.flags = NVME_TCP_F_DATA_LAST;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
+ pdu->hdr.plen =
+ cpu_to_le32(pdu->hdr.hlen + hdgst +
+ cmd->req.transfer_len + ddgst);
+ pdu->command_id = cmd->req.rsp->command_id;
+ pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
+ pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
+
+ if (queue->data_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_DDGST;
+ nvmet_tcp_ddgst(queue->snd_hash, cmd);
+ }
+
+ if (cmd->queue->hdr_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ }
+}
+
+static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+
+ cmd->offset = 0;
+ cmd->state = NVMET_TCP_SEND_R2T;
+
+ pdu->hdr.type = nvme_tcp_r2t;
+ pdu->hdr.flags = 0;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = 0;
+ pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
+
+ pdu->command_id = cmd->req.cmd->common.command_id;
+ pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
+ pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
+ pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
+ if (cmd->queue->hdr_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ }
+}
+
+static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+
+ cmd->offset = 0;
+ cmd->state = NVMET_TCP_SEND_RESPONSE;
+
+ pdu->hdr.type = nvme_tcp_rsp;
+ pdu->hdr.flags = 0;
+ pdu->hdr.hlen = sizeof(*pdu);
+ pdu->hdr.pdo = 0;
+ pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
+ if (cmd->queue->hdr_digest) {
+ pdu->hdr.flags |= NVME_TCP_F_HDGST;
+ nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
+ }
+}
+
+static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
+{
+ struct llist_node *node;
+
+ node = llist_del_all(&queue->resp_list);
+ if (!node)
+ return;
+
+ while (node) {
+ struct nvmet_tcp_cmd *cmd = llist_entry(node,
+ struct nvmet_tcp_cmd, lentry);
+
+ list_add(&cmd->entry, &queue->resp_send_list);
+ node = node->next;
+ queue->send_list_len++;
+ }
+}
+
+static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
+{
+ queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
+ struct nvmet_tcp_cmd, entry);
+ if (!queue->snd_cmd) {
+ nvmet_tcp_process_resp_list(queue);
+ queue->snd_cmd =
+ list_first_entry_or_null(&queue->resp_send_list,
+ struct nvmet_tcp_cmd, entry);
+ if (unlikely(!queue->snd_cmd))
+ return NULL;
+ }
+
+ list_del_init(&queue->snd_cmd->entry);
+ queue->send_list_len--;
+
+ if (nvmet_tcp_need_data_out(queue->snd_cmd))
+ nvmet_setup_c2h_data_pdu(queue->snd_cmd);
+ else if (nvmet_tcp_need_data_in(queue->snd_cmd))
+ nvmet_setup_r2t_pdu(queue->snd_cmd);
+ else
+ nvmet_setup_response_pdu(queue->snd_cmd);
+
+ return queue->snd_cmd;
+}
+
+static void nvmet_tcp_queue_response(struct nvmet_req *req)
+{
+ struct nvmet_tcp_cmd *cmd =
+ container_of(req, struct nvmet_tcp_cmd, req);
+ struct nvmet_tcp_queue *queue = cmd->queue;
+
+ llist_add(&cmd->lentry, &queue->resp_list);
+ queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work);
+}
+
+static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
+{
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
+ int ret;
+
+ ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
+ offset_in_page(cmd->data_pdu) + cmd->offset,
+ left, MSG_DONTWAIT | MSG_MORE);
+ if (ret <= 0)
+ return ret;
+
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ cmd->state = NVMET_TCP_SEND_DATA;
+ cmd->offset = 0;
+ return 1;
+}
+
+static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ int ret;
+
+ while (cmd->cur_sg) {
+ struct page *page = sg_page(cmd->cur_sg);
+ u32 left = cmd->cur_sg->length - cmd->offset;
+
+ ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
+ left, MSG_DONTWAIT | MSG_MORE);
+ if (ret <= 0)
+ return ret;
+
+ cmd->offset += ret;
+ cmd->wbytes_done += ret;
+
+ /* Done with sg?*/
+ if (cmd->offset == cmd->cur_sg->length) {
+ cmd->cur_sg = sg_next(cmd->cur_sg);
+ cmd->offset = 0;
+ }
+ }
+
+ if (queue->data_digest) {
+ cmd->state = NVMET_TCP_SEND_DDGST;
+ cmd->offset = 0;
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
+ return 1;
+
+}
+
+static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
+ bool last_in_batch)
+{
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
+ int flags = MSG_DONTWAIT;
+ int ret;
+
+ if (!last_in_batch && cmd->queue->send_list_len)
+ flags |= MSG_MORE;
+ else
+ flags |= MSG_EOR;
+
+ ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
+ offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
+ if (ret <= 0)
+ return ret;
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ return 1;
+}
+
+static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
+{
+ u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
+ int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
+ int flags = MSG_DONTWAIT;
+ int ret;
+
+ if (!last_in_batch && cmd->queue->send_list_len)
+ flags |= MSG_MORE;
+ else
+ flags |= MSG_EOR;
+
+ ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
+ offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
+ if (ret <= 0)
+ return ret;
+ cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
+
+ cmd->queue->snd_cmd = NULL;
+ return 1;
+}
+
+static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvmet_tcp_queue *queue = cmd->queue;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+ .iov_base = &cmd->exp_ddgst + cmd->offset,
+ .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ };
+ int ret;
+
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (unlikely(ret <= 0))
+ return ret;
+
+ cmd->offset += ret;
+ nvmet_setup_response_pdu(cmd);
+ return 1;
+}
+
+static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
+ bool last_in_batch)
+{
+ struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
+ int ret = 0;
+
+ if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
+ cmd = nvmet_tcp_fetch_cmd(queue);
+ if (unlikely(!cmd))
+ return 0;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
+ ret = nvmet_try_send_data_pdu(cmd);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_DATA) {
+ ret = nvmet_try_send_data(cmd);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_DDGST) {
+ ret = nvmet_try_send_ddgst(cmd);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_R2T) {
+ ret = nvmet_try_send_r2t(cmd, last_in_batch);
+ if (ret <= 0)
+ goto done_send;
+ }
+
+ if (cmd->state == NVMET_TCP_SEND_RESPONSE)
+ ret = nvmet_try_send_response(cmd, last_in_batch);
+
+done_send:
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ return 0;
+ return ret;
+ }
+
+ return 1;
+}
+
+static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
+ int budget, int *sends)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < budget; i++) {
+ ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
+ if (ret <= 0)
+ break;
+ (*sends)++;
+ }
+
+ return ret;
+}
+
+static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
+{
+ queue->offset = 0;
+ queue->left = sizeof(struct nvme_tcp_hdr);
+ queue->cmd = NULL;
+ queue->rcv_state = NVMET_TCP_RECV_PDU;
+}
+
+static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
+
+ ahash_request_free(queue->rcv_hash);
+ ahash_request_free(queue->snd_hash);
+ crypto_free_ahash(tfm);
+}
+
+static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
+{
+ struct crypto_ahash *tfm;
+
+ tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->snd_hash)
+ goto free_tfm;
+ ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
+
+ queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!queue->rcv_hash)
+ goto free_snd_hash;
+ ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
+
+ return 0;
+free_snd_hash:
+ ahash_request_free(queue->snd_hash);
+free_tfm:
+ crypto_free_ahash(tfm);
+ return -ENOMEM;
+}
+
+
+static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
+ struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
+ struct msghdr msg = {};
+ struct kvec iov;
+ int ret;
+
+ if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
+ pr_err("bad nvme-tcp pdu length (%d)\n",
+ le32_to_cpu(icreq->hdr.plen));
+ nvmet_tcp_fatal_error(queue);
+ }
+
+ if (icreq->pfv != NVME_TCP_PFV_1_0) {
+ pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
+ return -EPROTO;
+ }
+
+ if (icreq->hpda != 0) {
+ pr_err("queue %d: unsupported hpda %d\n", queue->idx,
+ icreq->hpda);
+ return -EPROTO;
+ }
+
+ if (icreq->maxr2t != 0) {
+ pr_err("queue %d: unsupported maxr2t %d\n", queue->idx,
+ le32_to_cpu(icreq->maxr2t) + 1);
+ return -EPROTO;
+ }
+
+ queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
+ queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
+ if (queue->hdr_digest || queue->data_digest) {
+ ret = nvmet_tcp_alloc_crypto(queue);
+ if (ret)
+ return ret;
+ }
+
+ memset(icresp, 0, sizeof(*icresp));
+ icresp->hdr.type = nvme_tcp_icresp;
+ icresp->hdr.hlen = sizeof(*icresp);
+ icresp->hdr.pdo = 0;
+ icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
+ icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
+ icresp->maxdata = cpu_to_le32(0xffff); /* FIXME: support r2t */
+ icresp->cpda = 0;
+ if (queue->hdr_digest)
+ icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
+ if (queue->data_digest)
+ icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
+
+ iov.iov_base = icresp;
+ iov.iov_len = sizeof(*icresp);
+ ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
+ if (ret < 0)
+ goto free_crypto;
+
+ queue->state = NVMET_TCP_Q_LIVE;
+ nvmet_prepare_receive_pdu(queue);
+ return 0;
+free_crypto:
+ if (queue->hdr_digest || queue->data_digest)
+ nvmet_tcp_free_crypto(queue);
+ return ret;
+}
+
+static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
+ struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
+{
+ int ret;
+
+ /* recover the expected data transfer length */
+ req->data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
+
+ if (!nvme_is_write(cmd->req.cmd) ||
+ req->data_len > cmd->req.port->inline_data_size) {
+ nvmet_prepare_receive_pdu(queue);
+ return;
+ }
+
+ ret = nvmet_tcp_map_data(cmd);
+ if (unlikely(ret)) {
+ pr_err("queue %d: failed to map data\n", queue->idx);
+ nvmet_tcp_fatal_error(queue);
+ return;
+ }
+
+ queue->rcv_state = NVMET_TCP_RECV_DATA;
+ nvmet_tcp_map_pdu_iovec(cmd);
+ cmd->flags |= NVMET_TCP_F_INIT_FAILED;
+}
+
+static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_data_pdu *data = &queue->pdu.data;
+ struct nvmet_tcp_cmd *cmd;
+
+ cmd = &queue->cmds[data->ttag];
+
+ if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
+ pr_err("ttag %u unexpected data offset %u (expected %u)\n",
+ data->ttag, le32_to_cpu(data->data_offset),
+ cmd->rbytes_done);
+ /* FIXME: use path and transport errors */
+ nvmet_req_complete(&cmd->req,
+ NVME_SC_INVALID_FIELD | NVME_SC_DNR);
+ return -EPROTO;
+ }
+
+ cmd->pdu_len = le32_to_cpu(data->data_length);
+ cmd->pdu_recv = 0;
+ nvmet_tcp_map_pdu_iovec(cmd);
+ queue->cmd = cmd;
+ queue->rcv_state = NVMET_TCP_RECV_DATA;
+
+ return 0;
+}
+
+static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
+ struct nvmet_req *req;
+ int ret;
+
+ if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
+ if (hdr->type != nvme_tcp_icreq) {
+ pr_err("unexpected pdu type (%d) before icreq\n",
+ hdr->type);
+ nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
+ }
+ return nvmet_tcp_handle_icreq(queue);
+ }
+
+ if (hdr->type == nvme_tcp_h2c_data) {
+ ret = nvmet_tcp_handle_h2c_data_pdu(queue);
+ if (unlikely(ret))
+ return ret;
+ return 0;
+ }
+
+ queue->cmd = nvmet_tcp_get_cmd(queue);
+ if (unlikely(!queue->cmd)) {
+ /* This should never happen */
+ pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
+ queue->idx, queue->nr_cmds, queue->send_list_len,
+ nvme_cmd->common.opcode);
+ nvmet_tcp_fatal_error(queue);
+ return -ENOMEM;
+ }
+
+ req = &queue->cmd->req;
+ memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
+
+ if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
+ &queue->nvme_sq, &nvmet_tcp_ops))) {
+ pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
+ req->cmd, req->cmd->common.command_id,
+ req->cmd->common.opcode,
+ le32_to_cpu(req->cmd->common.dptr.sgl.length));
+
+ nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
+ return -EAGAIN;
+ }
+
+ ret = nvmet_tcp_map_data(queue->cmd);
+ if (unlikely(ret)) {
+ pr_err("queue %d: failed to map data\n", queue->idx);
+ if (nvmet_tcp_has_inline_data(queue->cmd))
+ nvmet_tcp_fatal_error(queue);
+ else
+ nvmet_req_complete(req, ret);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (nvmet_tcp_need_data_in(queue->cmd)) {
+ if (nvmet_tcp_has_inline_data(queue->cmd)) {
+ queue->rcv_state = NVMET_TCP_RECV_DATA;
+ nvmet_tcp_map_pdu_iovec(queue->cmd);
+ return 0;
+ }
+ /* send back R2T */
+ nvmet_tcp_queue_response(&queue->cmd->req);
+ goto out;
+ }
+
+ nvmet_req_execute(&queue->cmd->req);
+out:
+ nvmet_prepare_receive_pdu(queue);
+ return ret;
+}
+
+static const u8 nvme_tcp_pdu_sizes[] = {
+ [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
+ [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
+ [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
+};
+
+static inline u8 nvmet_tcp_pdu_size(u8 type)
+{
+ size_t idx = type;
+
+ return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
+ nvme_tcp_pdu_sizes[idx]) ?
+ nvme_tcp_pdu_sizes[idx] : 0;
+}
+
+static inline bool nvmet_tcp_pdu_valid(u8 type)
+{
+ switch (type) {
+ case nvme_tcp_icreq:
+ case nvme_tcp_cmd:
+ case nvme_tcp_h2c_data:
+ /* fallthru */
+ return true;
+ }
+
+ return false;
+}
+
+static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
+{
+ struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
+ int len;
+ struct kvec iov;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+
+recv:
+ iov.iov_base = (void *)&queue->pdu + queue->offset;
+ iov.iov_len = queue->left;
+ len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(len < 0))
+ return len;
+
+ queue->offset += len;
+ queue->left -= len;
+ if (queue->left)
+ return -EAGAIN;
+
+ if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
+ u8 hdgst = nvmet_tcp_hdgst_len(queue);
+
+ if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
+ pr_err("unexpected pdu type %d\n", hdr->type);
+ nvmet_tcp_fatal_error(queue);
+ return -EIO;
+ }
+
+ if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
+ pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
+ return -EIO;
+ }
+
+ queue->left = hdr->hlen - queue->offset + hdgst;
+ goto recv;
+ }
+
+ if (queue->hdr_digest &&
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
+ nvmet_tcp_fatal_error(queue); /* fatal */
+ return -EPROTO;
+ }
+
+ if (queue->data_digest &&
+ nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
+ nvmet_tcp_fatal_error(queue); /* fatal */
+ return -EPROTO;
+ }
+
+ return nvmet_tcp_done_recv_pdu(queue);
+}
+
+static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
+{
+ struct nvmet_tcp_queue *queue = cmd->queue;
+
+ nvmet_tcp_ddgst(queue->rcv_hash, cmd);
+ queue->offset = 0;
+ queue->left = NVME_TCP_DIGEST_LENGTH;
+ queue->rcv_state = NVMET_TCP_RECV_DDGST;
+}
+
+static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmd;
+ int ret;
+
+ while (msg_data_left(&cmd->recv_msg)) {
+ ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
+ cmd->recv_msg.msg_flags);
+ if (ret <= 0)
+ return ret;
+
+ cmd->pdu_recv += ret;
+ cmd->rbytes_done += ret;
+ }
+
+ nvmet_tcp_unmap_pdu_iovec(cmd);
+
+ if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ cmd->rbytes_done == cmd->req.transfer_len) {
+ if (queue->data_digest) {
+ nvmet_tcp_prep_recv_ddgst(cmd);
+ return 0;
+ }
+ nvmet_req_execute(&cmd->req);
+ }
+
+ nvmet_prepare_receive_pdu(queue);
+ return 0;
+}
+
+static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmd;
+ int ret;
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
+ struct kvec iov = {
+ .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
+ .iov_len = queue->left
+ };
+
+ ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
+ iov.iov_len, msg.msg_flags);
+ if (unlikely(ret < 0))
+ return ret;
+
+ queue->offset += ret;
+ queue->left -= ret;
+ if (queue->left)
+ return -EAGAIN;
+
+ if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
+ pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
+ queue->idx, cmd->req.cmd->common.command_id,
+ queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
+ le32_to_cpu(cmd->exp_ddgst));
+ nvmet_tcp_finish_cmd(cmd);
+ nvmet_tcp_fatal_error(queue);
+ ret = -EPROTO;
+ goto out;
+ }
+
+ if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
+ cmd->rbytes_done == cmd->req.transfer_len)
+ nvmet_req_execute(&cmd->req);
+ ret = 0;
+out:
+ nvmet_prepare_receive_pdu(queue);
+ return ret;
+}
+
+static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
+{
+ int result;
+
+ if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
+ return 0;
+
+ if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
+ result = nvmet_tcp_try_recv_pdu(queue);
+ if (result != 0)
+ goto done_recv;
+ }
+
+ if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
+ result = nvmet_tcp_try_recv_data(queue);
+ if (result != 0)
+ goto done_recv;
+ }
+
+ if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
+ result = nvmet_tcp_try_recv_ddgst(queue);
+ if (result != 0)
+ goto done_recv;
+ }
+
+done_recv:
+ if (result < 0) {
+ if (result == -EAGAIN)
+ return 0;
+ return result;
+ }
+ return 1;
+}
+
+static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
+ int budget, int *recvs)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < budget; i++) {
+ ret = nvmet_tcp_try_recv_one(queue);
+ if (ret <= 0)
+ break;
+ (*recvs)++;
+ }
+
+ return ret;
+}
+
+static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
+{
+ spin_lock(&queue->state_lock);
+ if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
+ queue->state = NVMET_TCP_Q_DISCONNECTING;
+ schedule_work(&queue->release_work);
+ }
+ spin_unlock(&queue->state_lock);
+}
+
+static void nvmet_tcp_io_work(struct work_struct *w)
+{
+ struct nvmet_tcp_queue *queue =
+ container_of(w, struct nvmet_tcp_queue, io_work);
+ bool pending;
+ int ret, ops = 0;
+
+ do {
+ pending = false;
+
+ ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
+ if (ret > 0) {
+ pending = true;
+ } else if (ret < 0) {
+ if (ret == -EPIPE || ret == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+ return;
+ }
+
+ ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
+ if (ret > 0) {
+ /* transmitted message/data */
+ pending = true;
+ } else if (ret < 0) {
+ if (ret == -EPIPE || ret == -ECONNRESET)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ else
+ nvmet_tcp_fatal_error(queue);
+ return;
+ }
+
+ } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
+
+ /*
+ * We exahusted our budget, requeue our selves
+ */
+ if (pending)
+ queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+}
+
+static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
+ struct nvmet_tcp_cmd *c)
+{
+ u8 hdgst = nvmet_tcp_hdgst_len(queue);
+
+ c->queue = queue;
+ c->req.port = queue->port->nport;
+
+ c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->cmd_pdu)
+ return -ENOMEM;
+ c->req.cmd = &c->cmd_pdu->cmd;
+
+ c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->rsp_pdu)
+ goto out_free_cmd;
+ c->req.rsp = &c->rsp_pdu->cqe;
+
+ c->data_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->data_pdu)
+ goto out_free_rsp;
+
+ c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
+ sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
+ if (!c->r2t_pdu)
+ goto out_free_data;
+
+ c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
+
+ list_add_tail(&c->entry, &queue->free_list);
+
+ return 0;
+out_free_data:
+ page_frag_free(c->data_pdu);
+out_free_rsp:
+ page_frag_free(c->rsp_pdu);
+out_free_cmd:
+ page_frag_free(c->cmd_pdu);
+ return -ENOMEM;
+}
+
+static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
+{
+ page_frag_free(c->r2t_pdu);
+ page_frag_free(c->data_pdu);
+ page_frag_free(c->rsp_pdu);
+ page_frag_free(c->cmd_pdu);
+}
+
+static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmds;
+ int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
+
+ cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
+ if (!cmds)
+ goto out;
+
+ for (i = 0; i < nr_cmds; i++) {
+ ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
+ if (ret)
+ goto out_free;
+ }
+
+ queue->cmds = cmds;
+
+ return 0;
+out_free:
+ while (--i >= 0)
+ nvmet_tcp_free_cmd(cmds + i);
+ kfree(cmds);
+out:
+ return ret;
+}
+
+static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmds = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++)
+ nvmet_tcp_free_cmd(cmds + i);
+
+ nvmet_tcp_free_cmd(&queue->connect);
+ kfree(cmds);
+}
+
+static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_data_ready = queue->data_ready;
+ sock->sk->sk_state_change = queue->state_change;
+ sock->sk->sk_write_space = queue->write_space;
+ sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+}
+
+static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
+{
+ nvmet_req_uninit(&cmd->req);
+ nvmet_tcp_unmap_pdu_iovec(cmd);
+ sgl_free(cmd->req.sg);
+}
+
+static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
+{
+ struct nvmet_tcp_cmd *cmd = queue->cmds;
+ int i;
+
+ for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+ if (nvmet_tcp_need_data_in(cmd))
+ nvmet_tcp_finish_cmd(cmd);
+ }
+
+ if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
+ /* failed in connect */
+ nvmet_tcp_finish_cmd(&queue->connect);
+ }
+}
+
+static void nvmet_tcp_release_queue_work(struct work_struct *w)
+{
+ struct nvmet_tcp_queue *queue =
+ container_of(w, struct nvmet_tcp_queue, release_work);
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+
+ nvmet_tcp_restore_socket_callbacks(queue);
+ flush_work(&queue->io_work);
+
+ nvmet_tcp_uninit_data_in_cmds(queue);
+ nvmet_sq_destroy(&queue->nvme_sq);
+ cancel_work_sync(&queue->io_work);
+ sock_release(queue->sock);
+ nvmet_tcp_free_cmds(queue);
+ if (queue->hdr_digest || queue->data_digest)
+ nvmet_tcp_free_crypto(queue);
+ ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+
+ kfree(queue);
+}
+
+static void nvmet_tcp_data_ready(struct sock *sk)
+{
+ struct nvmet_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (likely(queue))
+ queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvmet_tcp_write_space(struct sock *sk)
+{
+ struct nvmet_tcp_queue *queue;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (unlikely(!queue))
+ goto out;
+
+ if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
+ queue->write_space(sk);
+ goto out;
+ }
+
+ if (sk_stream_is_writeable(sk)) {
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+ }
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static void nvmet_tcp_state_change(struct sock *sk)
+{
+ struct nvmet_tcp_queue *queue;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ queue = sk->sk_user_data;
+ if (!queue)
+ goto done;
+
+ switch (sk->sk_state) {
+ case TCP_FIN_WAIT1:
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSE:
+ /* FALLTHRU */
+ sk->sk_user_data = NULL;
+ nvmet_tcp_schedule_release_queue(queue);
+ break;
+ default:
+ pr_warn("queue %d unhandled state %d\n",
+ queue->idx, sk->sk_state);
+ }
+done:
+ write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
+{
+ struct socket *sock = queue->sock;
+ struct linger sol = { .l_onoff = 1, .l_linger = 0 };
+ int ret;
+
+ ret = kernel_getsockname(sock,
+ (struct sockaddr *)&queue->sockaddr);
+ if (ret < 0)
+ return ret;
+
+ ret = kernel_getpeername(sock,
+ (struct sockaddr *)&queue->sockaddr_peer);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Cleanup whatever is sitting in the TCP transmit queue on socket
+ * close. This is done to prevent stale data from being sent should
+ * the network connection be restored before TCP times out.
+ */
+ ret = kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER,
+ (char *)&sol, sizeof(sol));
+ if (ret)
+ return ret;
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ return 0;
+}
+
+static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
+ struct socket *newsock)
+{
+ struct nvmet_tcp_queue *queue;
+ int ret;
+
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return -ENOMEM;
+
+ INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
+ INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
+ queue->sock = newsock;
+ queue->port = port;
+ queue->nr_cmds = 0;
+ spin_lock_init(&queue->state_lock);
+ queue->state = NVMET_TCP_Q_CONNECTING;
+ INIT_LIST_HEAD(&queue->free_list);
+ init_llist_head(&queue->resp_list);
+ INIT_LIST_HEAD(&queue->resp_send_list);
+
+ queue->idx = ida_simple_get(&nvmet_tcp_queue_ida, 0, 0, GFP_KERNEL);
+ if (queue->idx < 0) {
+ ret = queue->idx;
+ goto out_free_queue;
+ }
+
+ ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
+ if (ret)
+ goto out_ida_remove;
+
+ ret = nvmet_sq_init(&queue->nvme_sq);
+ if (ret)
+ goto out_free_connect;
+
+ port->last_cpu = cpumask_next_wrap(port->last_cpu,
+ cpu_online_mask, -1, false);
+ queue->cpu = port->last_cpu;
+ nvmet_prepare_receive_pdu(queue);
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+
+ ret = nvmet_tcp_set_queue_sock(queue);
+ if (ret)
+ goto out_destroy_sq;
+
+ queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
+
+ return 0;
+out_destroy_sq:
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_del_init(&queue->queue_list);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ nvmet_sq_destroy(&queue->nvme_sq);
+out_free_connect:
+ nvmet_tcp_free_cmd(&queue->connect);
+out_ida_remove:
+ ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+out_free_queue:
+ kfree(queue);
+ return ret;
+}
+
+static void nvmet_tcp_accept_work(struct work_struct *w)
+{
+ struct nvmet_tcp_port *port =
+ container_of(w, struct nvmet_tcp_port, accept_work);
+ struct socket *newsock;
+ int ret;
+
+ while (true) {
+ ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ pr_warn("failed to accept err=%d\n", ret);
+ return;
+ }
+ ret = nvmet_tcp_alloc_queue(port, newsock);
+ if (ret) {
+ pr_err("failed to allocate queue\n");
+ sock_release(newsock);
+ }
+ }
+}
+
+static void nvmet_tcp_listen_data_ready(struct sock *sk)
+{
+ struct nvmet_tcp_port *port;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ port = sk->sk_user_data;
+ if (!port)
+ goto out;
+
+ if (sk->sk_state == TCP_LISTEN)
+ schedule_work(&port->accept_work);
+out:
+ read_unlock_bh(&sk->sk_callback_lock);
+}
+
+static int nvmet_tcp_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_tcp_port *port;
+ __kernel_sa_family_t af;
+ int opt, ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto err_port;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto err_port;
+ }
+
+ port->nport = nport;
+ port->last_cpu = -1;
+ INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
+ if (port->nport->inline_data_size < 0)
+ port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
+
+ ret = sock_create(port->addr.ss_family, SOCK_STREAM,
+ IPPROTO_TCP, &port->sock);
+ if (ret) {
+ pr_err("failed to create a socket\n");
+ goto err_port;
+ }
+
+ port->sock->sk->sk_user_data = port;
+ port->data_ready = port->sock->sk->sk_data_ready;
+ port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
+
+ opt = 1;
+ ret = kernel_setsockopt(port->sock, IPPROTO_TCP,
+ TCP_NODELAY, (char *)&opt, sizeof(opt));
+ if (ret) {
+ pr_err("failed to set TCP_NODELAY sock opt %d\n", ret);
+ goto err_sock;
+ }
+
+ ret = kernel_setsockopt(port->sock, SOL_SOCKET, SO_REUSEADDR,
+ (char *)&opt, sizeof(opt));
+ if (ret) {
+ pr_err("failed to set SO_REUSEADDR sock opt %d\n", ret);
+ goto err_sock;
+ }
+
+ ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
+ sizeof(port->addr));
+ if (ret) {
+ pr_err("failed to bind port socket %d\n", ret);
+ goto err_sock;
+ }
+
+ ret = kernel_listen(port->sock, 128);
+ if (ret) {
+ pr_err("failed to listen %d on port sock\n", ret);
+ goto err_sock;
+ }
+
+ nport->priv = port;
+ pr_info("enabling port %d (%pISpc)\n",
+ le16_to_cpu(nport->disc_addr.portid), &port->addr);
+
+ return 0;
+
+err_sock:
+ sock_release(port->sock);
+err_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_tcp_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_tcp_port *port = nport->priv;
+
+ write_lock_bh(&port->sock->sk->sk_callback_lock);
+ port->sock->sk->sk_data_ready = port->data_ready;
+ port->sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&port->sock->sk->sk_callback_lock);
+ cancel_work_sync(&port->accept_work);
+
+ sock_release(port->sock);
+ kfree(port);
+}
+
+static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->nvme_sq.ctrl == ctrl)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
+static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
+{
+ struct nvmet_tcp_queue *queue =
+ container_of(sq, struct nvmet_tcp_queue, nvme_sq);
+
+ if (sq->qid == 0) {
+ /* Let inflight controller teardown complete */
+ flush_scheduled_work();
+ }
+
+ queue->nr_cmds = sq->size * 2;
+ if (nvmet_tcp_alloc_cmds(queue))
+ return NVME_SC_INTERNAL;
+ return 0;
+}
+
+static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
+ struct nvmet_port *nport, char *traddr)
+{
+ struct nvmet_tcp_port *port = nport->priv;
+
+ if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
+ struct nvmet_tcp_cmd *cmd =
+ container_of(req, struct nvmet_tcp_cmd, req);
+ struct nvmet_tcp_queue *queue = cmd->queue;
+
+ sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
+ } else {
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ }
+}
+
+static struct nvmet_fabrics_ops nvmet_tcp_ops = {
+ .owner = THIS_MODULE,
+ .type = NVMF_TRTYPE_TCP,
+ .msdbd = 1,
+ .has_keyed_sgls = 0,
+ .add_port = nvmet_tcp_add_port,
+ .remove_port = nvmet_tcp_remove_port,
+ .queue_response = nvmet_tcp_queue_response,
+ .delete_ctrl = nvmet_tcp_delete_ctrl,
+ .install_queue = nvmet_tcp_install_queue,
+ .disc_traddr = nvmet_tcp_disc_port_addr,
+};
+
+static int __init nvmet_tcp_init(void)
+{
+ int ret;
+
+ nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq", WQ_HIGHPRI, 0);
+ if (!nvmet_tcp_wq)
+ return -ENOMEM;
+
+ ret = nvmet_register_transport(&nvmet_tcp_ops);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ destroy_workqueue(nvmet_tcp_wq);
+ return ret;
+}
+
+static void __exit nvmet_tcp_exit(void)
+{
+ struct nvmet_tcp_queue *queue;
+
+ nvmet_unregister_transport(&nvmet_tcp_ops);
+
+ flush_scheduled_work();
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+ flush_scheduled_work();
+
+ destroy_workqueue(nvmet_tcp_wq);
+}
+
+module_init(nvmet_tcp_init);
+module_exit(nvmet_tcp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index 53189d4022a6..810ab0fbcccb 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -81,42 +81,3 @@ const void *of_get_mac_address(struct device_node *np)
return of_get_mac_addr(np, "address");
}
EXPORT_SYMBOL(of_get_mac_address);
-
-/**
- * Obtain the MAC address from an nvmem provider named 'mac-address' through
- * device tree.
- * On success, copies the new address into memory pointed to by addr and
- * returns 0. Returns a negative error code otherwise.
- * @np: Device tree node containing the nvmem-cells phandle
- * @addr: Pointer to receive the MAC address using ether_addr_copy()
- */
-int of_get_nvmem_mac_address(struct device_node *np, void *addr)
-{
- struct nvmem_cell *cell;
- const void *mac;
- size_t len;
- int ret;
-
- cell = of_nvmem_cell_get(np, "mac-address");
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- mac = nvmem_cell_read(cell, &len);
-
- nvmem_cell_put(cell);
-
- if (IS_ERR(mac))
- return PTR_ERR(mac);
-
- if (len < ETH_ALEN || !is_valid_ether_addr(mac)) {
- ret = -EINVAL;
- } else {
- ether_addr_copy(addr, mac);
- ret = 0;
- }
-
- kfree(mac);
-
- return ret;
-}
-EXPORT_SYMBOL(of_get_nvmem_mac_address);
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 013e65de074a..c1633041621d 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -32,24 +32,7 @@ unsigned int of_pdt_unique_id __initdata;
static char * __init of_pdt_build_full_name(struct device_node *dp)
{
- int len, ourlen, plen;
- char *n;
-
- dp->path_component_name = build_path_component(dp);
-
- plen = strlen(dp->parent->full_name);
- ourlen = strlen(dp->path_component_name);
- len = ourlen + plen + 2;
-
- n = prom_early_alloc(len);
- strcpy(n, dp->parent->full_name);
- if (!of_node_is_root(dp->parent)) {
- strcpy(n + plen, "/");
- plen++;
- }
- strcpy(n + plen, dp->path_component_name);
-
- return n;
+ return build_path_component(dp);
}
#else /* CONFIG_SPARC */
@@ -60,23 +43,21 @@ static inline void irq_trans_init(struct device_node *dp) { }
static char * __init of_pdt_build_full_name(struct device_node *dp)
{
static int failsafe_id = 0; /* for generating unique names on failure */
+ const char *name;
+ char path[256];
char *buf;
int len;
- if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
- goto failsafe;
-
- buf = prom_early_alloc(len + 1);
- if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
- goto failsafe;
- return buf;
+ if (!of_pdt_prom_ops->pkg2path(dp->phandle, path, sizeof(path), &len)) {
+ name = kbasename(path);
+ buf = prom_early_alloc(strlen(name) + 1);
+ strcpy(buf, name);
+ return buf;
+ }
- failsafe:
- buf = prom_early_alloc(strlen(dp->parent->full_name) +
- strlen(dp->name) + 16);
- sprintf(buf, "%s/%s@unknown%i",
- of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
- dp->name, failsafe_id++);
+ name = of_get_property(dp, "name", &len);
+ buf = prom_early_alloc(len + 16);
+ sprintf(buf, "%s@unknown%i", name, failsafe_id++);
pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
return buf;
}
@@ -181,6 +162,8 @@ static struct device_node * __init of_pdt_create_node(phandle node,
dp->properties = of_pdt_build_prop_list(node);
+ dp->full_name = of_pdt_build_full_name(dp);
+
irq_trans_init(dp);
return dp;
@@ -204,8 +187,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
ret = dp;
prev_sibling = dp;
- dp->full_name = of_pdt_build_full_name(dp);
-
dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node));
if (of_pdt_build_more)
@@ -228,9 +209,6 @@ void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops)
of_pdt_prom_ops = ops;
of_root = of_pdt_create_node(root_node, NULL);
-#if defined(CONFIG_SPARC)
- of_root->path_component_name = "";
-#endif
of_root->full_name = "/";
of_root->child = of_pdt_build_tree(of_root,
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 2c2df4e4fc14..e5507add8f04 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -196,12 +196,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
if (IS_ERR(opp_table))
return 0;
- count = opp_table->regulator_count;
-
/* Regulator may not be required for the device */
- if (!count)
+ if (!opp_table->regulators)
goto put_opp_table;
+ count = opp_table->regulator_count;
+
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
if (!uV)
goto put_opp_table;
@@ -548,44 +548,6 @@ _generic_set_opp_clk_only(struct device *dev, struct clk *clk,
return ret;
}
-static inline int
-_generic_set_opp_domain(struct device *dev, struct clk *clk,
- unsigned long old_freq, unsigned long freq,
- unsigned int old_pstate, unsigned int new_pstate)
-{
- int ret;
-
- /* Scaling up? Scale domain performance state before frequency */
- if (freq > old_freq) {
- ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
- if (ret)
- return ret;
- }
-
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
- if (ret)
- goto restore_domain_state;
-
- /* Scaling down? Scale domain performance state after frequency */
- if (freq < old_freq) {
- ret = dev_pm_genpd_set_performance_state(dev, new_pstate);
- if (ret)
- goto restore_freq;
- }
-
- return 0;
-
-restore_freq:
- if (_generic_set_opp_clk_only(dev, clk, freq, old_freq))
- dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
- __func__, old_freq);
-restore_domain_state:
- if (freq > old_freq)
- dev_pm_genpd_set_performance_state(dev, old_pstate);
-
- return ret;
-}
-
static int _generic_set_opp_regulator(const struct opp_table *opp_table,
struct device *dev,
unsigned long old_freq,
@@ -635,6 +597,84 @@ restore_voltage:
return ret;
}
+static int _set_opp_custom(const struct opp_table *opp_table,
+ struct device *dev, unsigned long old_freq,
+ unsigned long freq,
+ struct dev_pm_opp_supply *old_supply,
+ struct dev_pm_opp_supply *new_supply)
+{
+ struct dev_pm_set_opp_data *data;
+ int size;
+
+ data = opp_table->set_opp_data;
+ data->regulators = opp_table->regulators;
+ data->regulator_count = opp_table->regulator_count;
+ data->clk = opp_table->clk;
+ data->dev = dev;
+
+ data->old_opp.rate = old_freq;
+ size = sizeof(*old_supply) * opp_table->regulator_count;
+ if (IS_ERR(old_supply))
+ memset(data->old_opp.supplies, 0, size);
+ else
+ memcpy(data->old_opp.supplies, old_supply, size);
+
+ data->new_opp.rate = freq;
+ memcpy(data->new_opp.supplies, new_supply, size);
+
+ return opp_table->set_opp(data);
+}
+
+/* This is only called for PM domain for now */
+static int _set_required_opps(struct device *dev,
+ struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
+{
+ struct opp_table **required_opp_tables = opp_table->required_opp_tables;
+ struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
+ unsigned int pstate;
+ int i, ret = 0;
+
+ if (!required_opp_tables)
+ return 0;
+
+ /* Single genpd case */
+ if (!genpd_virt_devs) {
+ pstate = opp->required_opps[0]->pstate;
+ ret = dev_pm_genpd_set_performance_state(dev, pstate);
+ if (ret) {
+ dev_err(dev, "Failed to set performance state of %s: %d (%d)\n",
+ dev_name(dev), pstate, ret);
+ }
+ return ret;
+ }
+
+ /* Multiple genpd case */
+
+ /*
+ * Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
+ * after it is freed from another thread.
+ */
+ mutex_lock(&opp_table->genpd_virt_dev_lock);
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ pstate = opp->required_opps[i]->pstate;
+
+ if (!genpd_virt_devs[i])
+ continue;
+
+ ret = dev_pm_genpd_set_performance_state(genpd_virt_devs[i], pstate);
+ if (ret) {
+ dev_err(dev, "Failed to set performance rate of %s: %d (%d)\n",
+ dev_name(genpd_virt_devs[i]), pstate, ret);
+ break;
+ }
+ }
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ return ret;
+}
+
/**
* dev_pm_opp_set_rate() - Configure new OPP based on frequency
* @dev: device for which we do this operation
@@ -649,7 +689,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
unsigned long freq, old_freq;
struct dev_pm_opp *old_opp, *opp;
struct clk *clk;
- int ret, size;
+ int ret;
if (unlikely(!target_freq)) {
dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
@@ -702,44 +742,34 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__,
old_freq, freq);
- /* Only frequency scaling */
- if (!opp_table->regulators) {
- /*
- * We don't support devices with both regulator and
- * domain performance-state for now.
- */
- if (opp_table->genpd_performance_state)
- ret = _generic_set_opp_domain(dev, clk, old_freq, freq,
- IS_ERR(old_opp) ? 0 : old_opp->pstate,
- opp->pstate);
- else
- ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
- } else if (!opp_table->set_opp) {
+ /* Scaling up? Configure required OPPs before frequency */
+ if (freq > old_freq) {
+ ret = _set_required_opps(dev, opp_table, opp);
+ if (ret)
+ goto put_opp;
+ }
+
+ if (opp_table->set_opp) {
+ ret = _set_opp_custom(opp_table, dev, old_freq, freq,
+ IS_ERR(old_opp) ? NULL : old_opp->supplies,
+ opp->supplies);
+ } else if (opp_table->regulators) {
ret = _generic_set_opp_regulator(opp_table, dev, old_freq, freq,
IS_ERR(old_opp) ? NULL : old_opp->supplies,
opp->supplies);
} else {
- struct dev_pm_set_opp_data *data;
-
- data = opp_table->set_opp_data;
- data->regulators = opp_table->regulators;
- data->regulator_count = opp_table->regulator_count;
- data->clk = clk;
- data->dev = dev;
-
- data->old_opp.rate = old_freq;
- size = sizeof(*opp->supplies) * opp_table->regulator_count;
- if (IS_ERR(old_opp))
- memset(data->old_opp.supplies, 0, size);
- else
- memcpy(data->old_opp.supplies, old_opp->supplies, size);
-
- data->new_opp.rate = freq;
- memcpy(data->new_opp.supplies, opp->supplies, size);
+ /* Only frequency scaling */
+ ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq);
+ }
- ret = opp_table->set_opp(data);
+ /* Scaling down? Configure required OPPs after frequency */
+ if (!ret && freq < old_freq) {
+ ret = _set_required_opps(dev, opp_table, opp);
+ if (ret)
+ dev_err(dev, "Failed to set required opps: %d\n", ret);
}
+put_opp:
dev_pm_opp_put(opp);
put_old_opp:
if (!IS_ERR(old_opp))
@@ -810,8 +840,12 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return NULL;
mutex_init(&opp_table->lock);
+ mutex_init(&opp_table->genpd_virt_dev_lock);
INIT_LIST_HEAD(&opp_table->dev_list);
+ /* Mark regulator count uninitialized */
+ opp_table->regulator_count = -1;
+
opp_dev = _add_opp_dev(dev, opp_table);
if (!opp_dev) {
kfree(opp_table);
@@ -888,6 +922,8 @@ static void _opp_table_kref_release(struct kref *kref)
struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
struct opp_device *opp_dev, *temp;
+ _of_clear_opp_table(opp_table);
+
/* Release clk */
if (!IS_ERR(opp_table->clk))
clk_put(opp_table->clk);
@@ -905,6 +941,7 @@ static void _opp_table_kref_release(struct kref *kref)
_remove_opp_dev(opp_dev, opp_table);
}
+ mutex_destroy(&opp_table->genpd_virt_dev_lock);
mutex_destroy(&opp_table->lock);
list_del(&opp_table->node);
kfree(opp_table);
@@ -961,6 +998,7 @@ static void _opp_kref_release(struct kref *kref)
* frequency/voltage list.
*/
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp);
+ _of_opp_free_required_opps(opp_table, opp);
opp_debug_remove_one(opp);
list_del(&opp->node);
kfree(opp);
@@ -1028,7 +1066,7 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *table)
int count, supply_size;
/* Allocate space for at least one supply */
- count = table->regulator_count ? table->regulator_count : 1;
+ count = table->regulator_count > 0 ? table->regulator_count : 1;
supply_size = sizeof(*opp->supplies) * count;
/* allocate new OPP node and supplies structures */
@@ -1049,6 +1087,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
struct regulator *reg;
int i;
+ if (!opp_table->regulators)
+ return true;
+
for (i = 0; i < opp_table->regulator_count; i++) {
reg = opp_table->regulators[i];
@@ -1333,7 +1374,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table)
struct dev_pm_set_opp_data *data;
int len, count = opp_table->regulator_count;
- if (WARN_ON(!count))
+ if (WARN_ON(!opp_table->regulators))
return -EINVAL;
/* space for set_opp_data */
@@ -1430,7 +1471,7 @@ free_regulators:
kfree(opp_table->regulators);
opp_table->regulators = NULL;
- opp_table->regulator_count = 0;
+ opp_table->regulator_count = -1;
err:
dev_pm_opp_put_opp_table(opp_table);
@@ -1459,7 +1500,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
kfree(opp_table->regulators);
opp_table->regulators = NULL;
- opp_table->regulator_count = 0;
+ opp_table->regulator_count = -1;
put_opp_table:
dev_pm_opp_put_opp_table(opp_table);
@@ -1587,6 +1628,155 @@ void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table)
EXPORT_SYMBOL_GPL(dev_pm_opp_unregister_set_opp_helper);
/**
+ * dev_pm_opp_set_genpd_virt_dev - Set virtual genpd device for an index
+ * @dev: Consumer device for which the genpd device is getting set.
+ * @virt_dev: virtual genpd device.
+ * @index: index.
+ *
+ * Multiple generic power domains for a device are supported with the help of
+ * virtual genpd devices, which are created for each consumer device - genpd
+ * pair. These are the device structures which are attached to the power domain
+ * and are required by the OPP core to set the performance state of the genpd.
+ *
+ * This helper will normally be called by the consumer driver of the device
+ * "dev", as only that has details of the genpd devices.
+ *
+ * This helper needs to be called once for each of those virtual devices, but
+ * only if multiple domains are available for a device. Otherwise the original
+ * device structure will be used instead by the OPP core.
+ */
+struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev,
+ struct device *virt_dev,
+ int index)
+{
+ struct opp_table *opp_table;
+
+ opp_table = dev_pm_opp_get_opp_table(dev);
+ if (!opp_table)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&opp_table->genpd_virt_dev_lock);
+
+ if (unlikely(!opp_table->genpd_virt_devs ||
+ index >= opp_table->required_opp_count ||
+ opp_table->genpd_virt_devs[index])) {
+
+ dev_err(dev, "Invalid request to set required device\n");
+ dev_pm_opp_put_opp_table(opp_table);
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ return ERR_PTR(-EINVAL);
+ }
+
+ opp_table->genpd_virt_devs[index] = virt_dev;
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ return opp_table;
+}
+
+/**
+ * dev_pm_opp_put_genpd_virt_dev() - Releases resources blocked for genpd device.
+ * @opp_table: OPP table returned by dev_pm_opp_set_genpd_virt_dev().
+ * @virt_dev: virtual genpd device.
+ *
+ * This releases the resource previously acquired with a call to
+ * dev_pm_opp_set_genpd_virt_dev(). The consumer driver shall call this helper
+ * if it doesn't want OPP core to update performance state of a power domain
+ * anymore.
+ */
+void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table,
+ struct device *virt_dev)
+{
+ int i;
+
+ /*
+ * Acquire genpd_virt_dev_lock to make sure virt_dev isn't getting
+ * used in parallel.
+ */
+ mutex_lock(&opp_table->genpd_virt_dev_lock);
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ if (opp_table->genpd_virt_devs[i] != virt_dev)
+ continue;
+
+ opp_table->genpd_virt_devs[i] = NULL;
+ dev_pm_opp_put_opp_table(opp_table);
+
+ /* Drop the vote */
+ dev_pm_genpd_set_performance_state(virt_dev, 0);
+ break;
+ }
+
+ mutex_unlock(&opp_table->genpd_virt_dev_lock);
+
+ if (unlikely(i == opp_table->required_opp_count))
+ dev_err(virt_dev, "Failed to find required device entry\n");
+}
+
+/**
+ * dev_pm_opp_xlate_performance_state() - Find required OPP's pstate for src_table.
+ * @src_table: OPP table which has dst_table as one of its required OPP table.
+ * @dst_table: Required OPP table of the src_table.
+ * @pstate: Current performance state of the src_table.
+ *
+ * This Returns pstate of the OPP (present in @dst_table) pointed out by the
+ * "required-opps" property of the OPP (present in @src_table) which has
+ * performance state set to @pstate.
+ *
+ * Return: Zero or positive performance state on success, otherwise negative
+ * value on errors.
+ */
+int dev_pm_opp_xlate_performance_state(struct opp_table *src_table,
+ struct opp_table *dst_table,
+ unsigned int pstate)
+{
+ struct dev_pm_opp *opp;
+ int dest_pstate = -EINVAL;
+ int i;
+
+ if (!pstate)
+ return 0;
+
+ /*
+ * Normally the src_table will have the "required_opps" property set to
+ * point to one of the OPPs in the dst_table, but in some cases the
+ * genpd and its master have one to one mapping of performance states
+ * and so none of them have the "required-opps" property set. Return the
+ * pstate of the src_table as it is in such cases.
+ */
+ if (!src_table->required_opp_count)
+ return pstate;
+
+ for (i = 0; i < src_table->required_opp_count; i++) {
+ if (src_table->required_opp_tables[i]->np == dst_table->np)
+ break;
+ }
+
+ if (unlikely(i == src_table->required_opp_count)) {
+ pr_err("%s: Couldn't find matching OPP table (%p: %p)\n",
+ __func__, src_table, dst_table);
+ return -EINVAL;
+ }
+
+ mutex_lock(&src_table->lock);
+
+ list_for_each_entry(opp, &src_table->opp_list, node) {
+ if (opp->pstate == pstate) {
+ dest_pstate = opp->required_opps[i]->pstate;
+ goto unlock;
+ }
+ }
+
+ pr_err("%s: Couldn't find matching OPP (%p: %p)\n", __func__, src_table,
+ dst_table);
+
+unlock:
+ mutex_unlock(&src_table->lock);
+
+ return dest_pstate;
+}
+
+/**
* dev_pm_opp_add() - Add an OPP table from a table definitions
* @dev: device for which we do this operation
* @freq: Frequency in Hz for this OPP
@@ -1612,6 +1802,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
if (!opp_table)
return -ENOMEM;
+ /* Fix regulator count for dynamic OPPs */
+ opp_table->regulator_count = 1;
+
ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
if (ret)
dev_pm_opp_put_opp_table(opp_table);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 5a4b47958073..06f0f632ec47 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -73,6 +73,167 @@ struct opp_table *_managed_opp(struct device *dev, int index)
return managed_table;
}
+/* The caller must call dev_pm_opp_put() after the OPP is used */
+static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
+ struct device_node *opp_np)
+{
+ struct dev_pm_opp *opp;
+
+ lockdep_assert_held(&opp_table_lock);
+
+ mutex_lock(&opp_table->lock);
+
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
+ if (opp->np == opp_np) {
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+ return opp;
+ }
+ }
+
+ mutex_unlock(&opp_table->lock);
+
+ return NULL;
+}
+
+static struct device_node *of_parse_required_opp(struct device_node *np,
+ int index)
+{
+ struct device_node *required_np;
+
+ required_np = of_parse_phandle(np, "required-opps", index);
+ if (unlikely(!required_np)) {
+ pr_err("%s: Unable to parse required-opps: %pOF, index: %d\n",
+ __func__, np, index);
+ }
+
+ return required_np;
+}
+
+/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
+static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
+{
+ struct opp_table *opp_table;
+ struct device_node *opp_table_np;
+
+ lockdep_assert_held(&opp_table_lock);
+
+ opp_table_np = of_get_parent(opp_np);
+ if (!opp_table_np)
+ goto err;
+
+ /* It is safe to put the node now as all we need now is its address */
+ of_node_put(opp_table_np);
+
+ list_for_each_entry(opp_table, &opp_tables, node) {
+ if (opp_table_np == opp_table->np) {
+ _get_opp_table_kref(opp_table);
+ return opp_table;
+ }
+ }
+
+err:
+ return ERR_PTR(-ENODEV);
+}
+
+/* Free resources previously acquired by _opp_table_alloc_required_tables() */
+static void _opp_table_free_required_tables(struct opp_table *opp_table)
+{
+ struct opp_table **required_opp_tables = opp_table->required_opp_tables;
+ struct device **genpd_virt_devs = opp_table->genpd_virt_devs;
+ int i;
+
+ if (!required_opp_tables)
+ return;
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ if (IS_ERR_OR_NULL(required_opp_tables[i]))
+ break;
+
+ dev_pm_opp_put_opp_table(required_opp_tables[i]);
+ }
+
+ kfree(required_opp_tables);
+ kfree(genpd_virt_devs);
+
+ opp_table->required_opp_count = 0;
+ opp_table->genpd_virt_devs = NULL;
+ opp_table->required_opp_tables = NULL;
+}
+
+/*
+ * Populate all devices and opp tables which are part of "required-opps" list.
+ * Checking only the first OPP node should be enough.
+ */
+static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
+ struct device *dev,
+ struct device_node *opp_np)
+{
+ struct opp_table **required_opp_tables;
+ struct device **genpd_virt_devs = NULL;
+ struct device_node *required_np, *np;
+ int count, i;
+
+ /* Traversing the first OPP node is all we need */
+ np = of_get_next_available_child(opp_np, NULL);
+ if (!np) {
+ dev_err(dev, "Empty OPP table\n");
+ return;
+ }
+
+ count = of_count_phandle_with_args(np, "required-opps", NULL);
+ if (!count)
+ goto put_np;
+
+ if (count > 1) {
+ genpd_virt_devs = kcalloc(count, sizeof(*genpd_virt_devs),
+ GFP_KERNEL);
+ if (!genpd_virt_devs)
+ goto put_np;
+ }
+
+ required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
+ GFP_KERNEL);
+ if (!required_opp_tables) {
+ kfree(genpd_virt_devs);
+ goto put_np;
+ }
+
+ opp_table->genpd_virt_devs = genpd_virt_devs;
+ opp_table->required_opp_tables = required_opp_tables;
+ opp_table->required_opp_count = count;
+
+ for (i = 0; i < count; i++) {
+ required_np = of_parse_required_opp(np, i);
+ if (!required_np)
+ goto free_required_tables;
+
+ required_opp_tables[i] = _find_table_of_opp_np(required_np);
+ of_node_put(required_np);
+
+ if (IS_ERR(required_opp_tables[i]))
+ goto free_required_tables;
+
+ /*
+ * We only support genpd's OPPs in the "required-opps" for now,
+ * as we don't know how much about other cases. Error out if the
+ * required OPP doesn't belong to a genpd.
+ */
+ if (!required_opp_tables[i]->is_genpd) {
+ dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n",
+ required_np);
+ goto free_required_tables;
+ }
+ }
+
+ goto put_np;
+
+free_required_tables:
+ _opp_table_free_required_tables(opp_table);
+put_np:
+ of_node_put(np);
+}
+
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
int index)
{
@@ -92,6 +253,9 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
of_property_read_u32(np, "voltage-tolerance",
&opp_table->voltage_tolerance_v1);
+ if (of_find_property(np, "#power-domain-cells", NULL))
+ opp_table->is_genpd = true;
+
/* Get OPP table node */
opp_np = _opp_of_get_opp_desc_node(np, index);
of_node_put(np);
@@ -106,9 +270,86 @@ void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
opp_table->np = opp_np;
+ _opp_table_alloc_required_tables(opp_table, dev, opp_np);
of_node_put(opp_np);
}
+void _of_clear_opp_table(struct opp_table *opp_table)
+{
+ _opp_table_free_required_tables(opp_table);
+}
+
+/*
+ * Release all resources previously acquired with a call to
+ * _of_opp_alloc_required_opps().
+ */
+void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
+{
+ struct dev_pm_opp **required_opps = opp->required_opps;
+ int i;
+
+ if (!required_opps)
+ return;
+
+ for (i = 0; i < opp_table->required_opp_count; i++) {
+ if (!required_opps[i])
+ break;
+
+ /* Put the reference back */
+ dev_pm_opp_put(required_opps[i]);
+ }
+
+ kfree(required_opps);
+ opp->required_opps = NULL;
+}
+
+/* Populate all required OPPs which are part of "required-opps" list */
+static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp)
+{
+ struct dev_pm_opp **required_opps;
+ struct opp_table *required_table;
+ struct device_node *np;
+ int i, ret, count = opp_table->required_opp_count;
+
+ if (!count)
+ return 0;
+
+ required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
+ if (!required_opps)
+ return -ENOMEM;
+
+ opp->required_opps = required_opps;
+
+ for (i = 0; i < count; i++) {
+ required_table = opp_table->required_opp_tables[i];
+
+ np = of_parse_required_opp(opp->np, i);
+ if (unlikely(!np)) {
+ ret = -ENODEV;
+ goto free_required_opps;
+ }
+
+ required_opps[i] = _find_opp_of_np(required_table, np);
+ of_node_put(np);
+
+ if (!required_opps[i]) {
+ pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
+ __func__, opp->np, i);
+ ret = -ENODEV;
+ goto free_required_opps;
+ }
+ }
+
+ return 0;
+
+free_required_opps:
+ _of_opp_free_required_opps(opp_table, opp);
+
+ return ret;
+}
+
static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
struct device_node *np)
{
@@ -150,12 +391,10 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
struct opp_table *opp_table)
{
u32 *microvolt, *microamp = NULL;
- int supplies, vcount, icount, ret, i, j;
+ int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
struct property *prop = NULL;
char name[NAME_MAX];
- supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
-
/* Search for "opp-microvolt-<name>" */
if (opp_table->prop_name) {
snprintf(name, sizeof(name), "opp-microvolt-%s",
@@ -170,7 +409,13 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
/* Missing property isn't a problem, but an invalid entry is */
if (!prop) {
- if (!opp_table->regulator_count)
+ if (unlikely(supplies == -1)) {
+ /* Initialize regulator_count */
+ opp_table->regulator_count = 0;
+ return 0;
+ }
+
+ if (!supplies)
return 0;
dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
@@ -179,6 +424,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
}
}
+ if (unlikely(supplies == -1)) {
+ /* Initialize regulator_count */
+ supplies = opp_table->regulator_count = 1;
+ } else if (unlikely(!supplies)) {
+ dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
+ return -EINVAL;
+ }
+
vcount = of_property_count_u32_elems(opp->np, name);
if (vcount < 0) {
dev_err(dev, "%s: Invalid %s property (%d)\n",
@@ -326,8 +579,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
ret = of_property_read_u64(np, "opp-hz", &rate);
if (ret < 0) {
/* "opp-hz" is optional for devices like power domains. */
- if (!of_find_property(dev->of_node, "#power-domain-cells",
- NULL)) {
+ if (!opp_table->is_genpd) {
dev_err(dev, "%s: opp-hz not found\n", __func__);
goto free_opp;
}
@@ -354,21 +606,26 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
new_opp->dynamic = false;
new_opp->available = true;
+ ret = _of_opp_alloc_required_opps(opp_table, new_opp);
+ if (ret)
+ goto free_opp;
+
if (!of_property_read_u32(np, "clock-latency-ns", &val))
new_opp->clock_latency_ns = val;
- new_opp->pstate = of_genpd_opp_to_performance_state(dev, np);
-
ret = opp_parse_supplies(new_opp, dev, opp_table);
if (ret)
- goto free_opp;
+ goto free_required_opps;
+
+ if (opp_table->is_genpd)
+ new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
if (ret) {
/* Don't return error for duplicate OPPs */
if (ret == -EBUSY)
ret = 0;
- goto free_opp;
+ goto free_required_opps;
}
/* OPP to select on device suspend */
@@ -398,6 +655,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
return new_opp;
+free_required_opps:
+ _of_opp_free_required_opps(opp_table, new_opp);
free_opp:
_opp_free(new_opp);
@@ -579,10 +838,8 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
*/
count = of_count_phandle_with_args(dev->of_node,
"operating-points-v2", NULL);
- if (count != 1)
- return -ENODEV;
-
- index = 0;
+ if (count == 1)
+ index = 0;
}
opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
@@ -729,58 +986,48 @@ put_cpu_node:
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
/**
- * of_dev_pm_opp_find_required_opp() - Search for required OPP.
- * @dev: The device whose OPP node is referenced by the 'np' DT node.
+ * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
* @np: Node that contains the "required-opps" property.
+ * @index: Index of the phandle to parse.
*
- * Returns the OPP of the device 'dev', whose phandle is present in the "np"
- * node. Although the "required-opps" property supports having multiple
- * phandles, this helper routine only parses the very first phandle in the list.
- *
- * Return: Matching opp, else returns ERR_PTR in case of error and should be
- * handled using IS_ERR.
+ * Returns the performance state of the OPP pointed out by the "required-opps"
+ * property at @index in @np.
*
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
- * use.
+ * Return: Zero or positive performance state on success, otherwise negative
+ * value on errors.
*/
-struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev,
- struct device_node *np)
+int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV);
+ struct dev_pm_opp *opp;
struct device_node *required_np;
struct opp_table *opp_table;
+ int pstate = -EINVAL;
- opp_table = _find_opp_table(dev);
- if (IS_ERR(opp_table))
- return ERR_CAST(opp_table);
+ required_np = of_parse_required_opp(np, index);
+ if (!required_np)
+ return -EINVAL;
- required_np = of_parse_phandle(np, "required-opps", 0);
- if (unlikely(!required_np)) {
- dev_err(dev, "Unable to parse required-opps\n");
- goto put_opp_table;
+ opp_table = _find_table_of_opp_np(required_np);
+ if (IS_ERR(opp_table)) {
+ pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
+ __func__, np, PTR_ERR(opp_table));
+ goto put_required_np;
}
- mutex_lock(&opp_table->lock);
-
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
- if (temp_opp->available && temp_opp->np == required_np) {
- opp = temp_opp;
-
- /* Increment the reference count of OPP */
- dev_pm_opp_get(opp);
- break;
- }
+ opp = _find_opp_of_np(opp_table, required_np);
+ if (opp) {
+ pstate = opp->pstate;
+ dev_pm_opp_put(opp);
}
- mutex_unlock(&opp_table->lock);
+ dev_pm_opp_put_opp_table(opp_table);
+put_required_np:
of_node_put(required_np);
-put_opp_table:
- dev_pm_opp_put_opp_table(opp_table);
- return opp;
+ return pstate;
}
-EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp);
+EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
/**
* dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index 9c6544b4f4f9..e24d81497375 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -63,6 +63,7 @@ extern struct list_head opp_tables;
* @supplies: Power supplies voltage/current values
* @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
* frequency from any other OPP's frequency.
+ * @required_opps: List of OPPs that are required by this OPP.
* @opp_table: points back to the opp_table struct this opp belongs to
* @np: OPP's device node.
* @dentry: debugfs dentry pointer (per opp)
@@ -84,6 +85,7 @@ struct dev_pm_opp {
unsigned long clock_latency_ns;
+ struct dev_pm_opp **required_opps;
struct opp_table *opp_table;
struct device_node *np;
@@ -133,13 +135,21 @@ enum opp_table_access {
* @parsed_static_opps: True if OPPs are initialized from DT.
* @shared_opp: OPP is shared between multiple devices.
* @suspend_opp: Pointer to OPP to be used during device suspend.
+ * @genpd_virt_dev_lock: Mutex protecting the genpd virtual device pointers.
+ * @genpd_virt_devs: List of virtual devices for multiple genpd support.
+ * @required_opp_tables: List of device OPP tables that are required by OPPs in
+ * this table.
+ * @required_opp_count: Number of required devices.
* @supported_hw: Array of version number to support.
* @supported_hw_count: Number of elements in supported_hw array.
* @prop_name: A name to postfix to many DT properties, while parsing them.
* @clk: Device's clock handle
* @regulators: Supply regulators
- * @regulator_count: Number of power supply regulators
+ * @regulator_count: Number of power supply regulators. Its value can be -1
+ * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
+ * property).
* @genpd_performance_state: Device's power domain support performance state.
+ * @is_genpd: Marks if the OPP table belongs to a genpd.
* @set_opp: Platform specific set_opp callback
* @set_opp_data: Data to be passed to set_opp callback
* @dentry: debugfs dentry pointer of the real device directory (not links).
@@ -171,13 +181,19 @@ struct opp_table {
enum opp_table_access shared_opp;
struct dev_pm_opp *suspend_opp;
+ struct mutex genpd_virt_dev_lock;
+ struct device **genpd_virt_devs;
+ struct opp_table **required_opp_tables;
+ unsigned int required_opp_count;
+
unsigned int *supported_hw;
unsigned int supported_hw_count;
const char *prop_name;
struct clk *clk;
struct regulator **regulators;
- unsigned int regulator_count;
+ int regulator_count;
bool genpd_performance_state;
+ bool is_genpd;
int (*set_opp)(struct dev_pm_set_opp_data *data);
struct dev_pm_set_opp_data *set_opp_data;
@@ -206,10 +222,16 @@ void _put_opp_list_kref(struct opp_table *opp_table);
#ifdef CONFIG_OF
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index);
+void _of_clear_opp_table(struct opp_table *opp_table);
struct opp_table *_managed_opp(struct device *dev, int index);
+void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp);
#else
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev, int index) {}
+static inline void _of_clear_opp_table(struct opp_table *opp_table) {}
static inline struct opp_table *_managed_opp(struct device *dev, int index) { return NULL; }
+static inline void _of_opp_free_required_opps(struct opp_table *opp_table,
+ struct dev_pm_opp *opp) {}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c
index 3f4fb4dbbe33..1c69c404df11 100644
--- a/drivers/opp/ti-opp-supply.c
+++ b/drivers/opp/ti-opp-supply.c
@@ -417,7 +417,6 @@ static struct platform_driver ti_opp_supply_driver = {
.probe = ti_opp_supply_probe,
.driver = {
.name = "ti_opp_supply",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ti_opp_supply_of_match),
},
};
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f2bda77a2df1..657d642fcc67 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -11,6 +11,7 @@ ifdef CONFIG_PCI
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
obj-$(CONFIG_OF) += of.o
+obj-$(CONFIG_ACPI) += pci-acpi.o
endif
obj-$(CONFIG_PCI_QUIRKS) += quirks.o
@@ -20,7 +21,6 @@ obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
obj-$(CONFIG_PCI_BRIDGE_EMUL) += pci-bridge-emul.o
-obj-$(CONFIG_ACPI) += pci-acpi.o
obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
obj-$(CONFIG_PCI_SYSCALL) += syscall.o
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 2cbef2d7c207..88af6bff945f 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -81,8 +81,6 @@ struct imx6_pcie {
#define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29)
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4)
#define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
#define PCIE_PHY_CTRL_DATA_LOC 0
@@ -711,12 +709,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static int imx6_pcie_link_up(struct dw_pcie *pci)
-{
- return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
- PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
-}
-
static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
.host_init = imx6_pcie_host_init,
};
@@ -749,7 +741,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
}
static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = imx6_pcie_link_up,
+ /* No special ops needed, but pcie-designware still expects this struct */
};
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 3724d3ef7008..7aa9a82b7ebd 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
int i;
for (i = 0; i < PCIE_IATU_NUM; i++)
- dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i);
+ dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
}
static int ls1021_pcie_link_up(struct dw_pcie *pci)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 1e7b02221eac..de8635af4cde 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -440,7 +440,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
tbl_offset = dw_pcie_readl_dbi(pci, reg);
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
- tbl_offset >>= 3;
reg = PCI_BASE_ADDRESS_0 + (4 * bir);
bar_addr_upper = 0;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index af24ed50a245..7a1c8a09efa5 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -534,14 +534,13 @@ error_attrs:
static struct msi_desc *
msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd)
{
- struct cpumask *masks = NULL;
+ struct irq_affinity_desc *masks = NULL;
struct msi_desc *entry;
u16 control;
if (affd)
masks = irq_create_affinity_masks(nvec, affd);
-
/* MSI Entry Initialization */
entry = alloc_msi_entry(&dev->dev, nvec, masks);
if (!entry)
@@ -672,7 +671,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
struct msix_entry *entries, int nvec,
const struct irq_affinity *affd)
{
- struct cpumask *curmsk, *masks = NULL;
+ struct irq_affinity_desc *curmsk, *masks = NULL;
struct msi_desc *entry;
int ret, i;
@@ -1036,6 +1035,13 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (maxvec < minvec)
return -ERANGE;
+ /*
+ * If the caller is passing in sets, we can't support a range of
+ * vectors. The caller needs to handle that.
+ */
+ if (affd && affd->nr_sets && minvec != maxvec)
+ return -EINVAL;
+
if (WARN_ON_ONCE(dev->msi_enabled))
return -EINVAL;
@@ -1087,6 +1093,13 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
if (maxvec < minvec)
return -ERANGE;
+ /*
+ * If the caller is passing in sets, we can't support a range of
+ * supported vectors. The caller needs to handle that.
+ */
+ if (affd && affd->nr_sets && minvec != maxvec)
+ return -EINVAL;
+
if (WARN_ON_ONCE(dev->msix_enabled))
return -EINVAL;
@@ -1250,7 +1263,7 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
for_each_pci_msi_entry(entry, dev) {
if (i == nr)
- return entry->affinity;
+ return &entry->affinity->mask;
i++;
}
WARN_ON_ONCE(1);
@@ -1262,7 +1275,7 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
nr >= entry->nvec_used))
return NULL;
- return &entry->affinity[nr];
+ return &entry->affinity[nr].mask;
} else {
return cpu_possible_mask;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index d068f11d08a7..c9d8e3c837de 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -5556,9 +5556,13 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
u32 lnkcap2, lnkcap;
/*
- * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
- * Speeds Vector in Link Capabilities 2 when supported, falling
- * back to Max Link Speed in Link Capabilities otherwise.
+ * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The
+ * implementation note there recommends using the Supported Link
+ * Speeds Vector in Link Capabilities 2 when supported.
+ *
+ * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
+ * should use the Supported Link Speeds field in Link Capabilities,
+ * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
*/
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
if (lnkcap2) { /* PCIe r3.0-compliant */
@@ -5574,16 +5578,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
}
pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
- if (lnkcap) {
- if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
- return PCIE_SPEED_16_0GT;
- else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
- return PCIE_SPEED_8_0GT;
- else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
- return PCIE_SPEED_5_0GT;
- else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
- return PCIE_SPEED_2_5GT;
- }
+ if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
+ return PCIE_SPEED_5_0GT;
+ else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
+ return PCIE_SPEED_2_5GT;
return PCI_SPEED_UNKNOWN;
}
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index a90a9194ac4a..fed29de783e0 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -1064,7 +1064,7 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
.regs = aer_regs,
};
- if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+ if (kfifo_in_spinlocked(&aer_recover_ring, &entry, 1,
&aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index dcb29cb76dc6..f78860ce884b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (!aspm_support_enabled || aspm_disabled)
+ if (!aspm_support_enabled)
return;
if (pdev->link_state)
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 08ebaf7cca8b..af9bc178495d 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -87,6 +87,15 @@ config QCOM_L3_PMU
Adds the L3 cache PMU into the perf events subsystem for
monitoring L3 cache events.
+config THUNDERX2_PMU
+ tristate "Cavium ThunderX2 SoC PMU UNCORE"
+ depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA
+ default m
+ help
+ Provides support for ThunderX2 UNCORE events.
+ The SoC has PMU support in its L3 cache controller (L3C) and
+ in the DDR4 Memory Controller (DMC).
+
config XGENE_PMU
depends on ARCH_XGENE
bool "APM X-Gene SoC PMU"
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index b3902bd37d53..909f27fd9db3 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o
obj-$(CONFIG_HISI_PMU) += hisilicon/
obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
+obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 54ec278d2fc4..8e46a9dad2fa 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -927,6 +927,11 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
idx = atomic_inc_return(&pmu_idx);
name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
+ if (!name) {
+ dev_err(dev, "failed to allocate name for pmu %d\n", idx);
+ return -ENOMEM;
+ }
+
return perf_pmu_register(&spe_pmu->pmu, name, -1);
}
@@ -1169,6 +1174,7 @@ static const struct of_device_id arm_spe_pmu_of_match[] = {
{ .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
{ /* Sentinel */ },
};
+MODULE_DEVICE_TABLE(of, arm_spe_pmu_of_match);
static int arm_spe_pmu_device_dt_probe(struct platform_device *pdev)
{
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
new file mode 100644
index 000000000000..c9a1701d3e54
--- /dev/null
+++ b/drivers/perf/thunderx2_pmu.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * CAVIUM THUNDERX2 SoC PMU UNCORE
+ * Copyright (C) 2018 Cavium Inc.
+ * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpuhotplug.h>
+#include <linux/perf_event.h>
+#include <linux/platform_device.h>
+
+/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
+ * Each UNCORE PMU device consists of 4 independent programmable counters.
+ * Counters are 32 bit and do not support overflow interrupt,
+ * they need to be sampled before overflow(i.e, at every 2 seconds).
+ */
+
+#define TX2_PMU_MAX_COUNTERS 4
+#define TX2_PMU_DMC_CHANNELS 8
+#define TX2_PMU_L3_TILES 16
+
+#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
+#define GET_EVENTID(ev) ((ev->hw.config) & 0x1f)
+#define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3)
+ /* 1 byte per counter(4 counters).
+ * Event id is encoded in bits [5:1] of a byte,
+ */
+#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
+
+#define L3C_COUNTER_CTL 0xA8
+#define L3C_COUNTER_DATA 0xAC
+#define DMC_COUNTER_CTL 0x234
+#define DMC_COUNTER_DATA 0x240
+
+/* L3C event IDs */
+#define L3_EVENT_READ_REQ 0xD
+#define L3_EVENT_WRITEBACK_REQ 0xE
+#define L3_EVENT_INV_N_WRITE_REQ 0xF
+#define L3_EVENT_INV_REQ 0x10
+#define L3_EVENT_EVICT_REQ 0x13
+#define L3_EVENT_INV_N_WRITE_HIT 0x14
+#define L3_EVENT_INV_HIT 0x15
+#define L3_EVENT_READ_HIT 0x17
+#define L3_EVENT_MAX 0x18
+
+/* DMC event IDs */
+#define DMC_EVENT_COUNT_CYCLES 0x1
+#define DMC_EVENT_WRITE_TXNS 0xB
+#define DMC_EVENT_DATA_TRANSFERS 0xD
+#define DMC_EVENT_READ_TXNS 0xF
+#define DMC_EVENT_MAX 0x10
+
+enum tx2_uncore_type {
+ PMU_TYPE_L3C,
+ PMU_TYPE_DMC,
+ PMU_TYPE_INVALID,
+};
+
+/*
+ * pmu on each socket has 2 uncore devices(dmc and l3c),
+ * each device has 4 counters.
+ */
+struct tx2_uncore_pmu {
+ struct hlist_node hpnode;
+ struct list_head entry;
+ struct pmu pmu;
+ char *name;
+ int node;
+ int cpu;
+ u32 max_counters;
+ u32 prorate_factor;
+ u32 max_events;
+ u64 hrtimer_interval;
+ void __iomem *base;
+ DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
+ struct perf_event *events[TX2_PMU_MAX_COUNTERS];
+ struct device *dev;
+ struct hrtimer hrtimer;
+ const struct attribute_group **attr_groups;
+ enum tx2_uncore_type type;
+ void (*init_cntr_base)(struct perf_event *event,
+ struct tx2_uncore_pmu *tx2_pmu);
+ void (*stop_event)(struct perf_event *event);
+ void (*start_event)(struct perf_event *event, int flags);
+};
+
+static LIST_HEAD(tx2_pmus);
+
+static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
+{
+ return container_of(pmu, struct tx2_uncore_pmu, pmu);
+}
+
+PMU_FORMAT_ATTR(event, "config:0-4");
+
+static struct attribute *l3c_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute *dmc_pmu_format_attrs[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = l3c_pmu_format_attrs,
+};
+
+static const struct attribute_group dmc_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = dmc_pmu_format_attrs,
+};
+
+/*
+ * sysfs event attributes
+ */
+static ssize_t tx2_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct dev_ext_attribute *eattr;
+
+ eattr = container_of(attr, struct dev_ext_attribute, attr);
+ return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
+}
+
+#define TX2_EVENT_ATTR(name, config) \
+ PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
+ config, tx2_pmu_event_show)
+
+TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
+TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
+TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
+TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
+TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
+TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
+TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
+TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
+
+static struct attribute *l3c_pmu_events_attrs[] = {
+ &tx2_pmu_event_attr_read_request.attr.attr,
+ &tx2_pmu_event_attr_writeback_request.attr.attr,
+ &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
+ &tx2_pmu_event_attr_inv_request.attr.attr,
+ &tx2_pmu_event_attr_evict_request.attr.attr,
+ &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
+ &tx2_pmu_event_attr_inv_hit.attr.attr,
+ &tx2_pmu_event_attr_read_hit.attr.attr,
+ NULL,
+};
+
+TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
+TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
+TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
+TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
+
+static struct attribute *dmc_pmu_events_attrs[] = {
+ &tx2_pmu_event_attr_cnt_cycles.attr.attr,
+ &tx2_pmu_event_attr_write_txns.attr.attr,
+ &tx2_pmu_event_attr_data_transfers.attr.attr,
+ &tx2_pmu_event_attr_read_txns.attr.attr,
+ NULL,
+};
+
+static const struct attribute_group l3c_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = l3c_pmu_events_attrs,
+};
+
+static const struct attribute_group dmc_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = dmc_pmu_events_attrs,
+};
+
+/*
+ * sysfs cpumask attributes
+ */
+static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
+ return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *tx2_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group pmu_cpumask_attr_group = {
+ .attrs = tx2_pmu_cpumask_attrs,
+};
+
+/*
+ * Per PMU device attribute groups
+ */
+static const struct attribute_group *l3c_pmu_attr_groups[] = {
+ &l3c_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &l3c_pmu_events_attr_group,
+ NULL
+};
+
+static const struct attribute_group *dmc_pmu_attr_groups[] = {
+ &dmc_pmu_format_attr_group,
+ &pmu_cpumask_attr_group,
+ &dmc_pmu_events_attr_group,
+ NULL
+};
+
+static inline u32 reg_readl(unsigned long addr)
+{
+ return readl((void __iomem *)addr);
+}
+
+static inline void reg_writel(u32 val, unsigned long addr)
+{
+ writel(val, (void __iomem *)addr);
+}
+
+static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
+{
+ int counter;
+
+ counter = find_first_zero_bit(tx2_pmu->active_counters,
+ tx2_pmu->max_counters);
+ if (counter == tx2_pmu->max_counters)
+ return -ENOSPC;
+
+ set_bit(counter, tx2_pmu->active_counters);
+ return counter;
+}
+
+static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
+{
+ clear_bit(counter, tx2_pmu->active_counters);
+}
+
+static void init_cntr_base_l3c(struct perf_event *event,
+ struct tx2_uncore_pmu *tx2_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* counter ctrl/data reg offset at 8 */
+ hwc->config_base = (unsigned long)tx2_pmu->base
+ + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
+ hwc->event_base = (unsigned long)tx2_pmu->base
+ + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
+}
+
+static void init_cntr_base_dmc(struct perf_event *event,
+ struct tx2_uncore_pmu *tx2_pmu)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->config_base = (unsigned long)tx2_pmu->base
+ + DMC_COUNTER_CTL;
+ /* counter data reg offset at 0xc */
+ hwc->event_base = (unsigned long)tx2_pmu->base
+ + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
+}
+
+static void uncore_start_event_l3c(struct perf_event *event, int flags)
+{
+ u32 val;
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* event id encoded in bits [07:03] */
+ val = GET_EVENTID(event) << 3;
+ reg_writel(val, hwc->config_base);
+ local64_set(&hwc->prev_count, 0);
+ reg_writel(0, hwc->event_base);
+}
+
+static inline void uncore_stop_event_l3c(struct perf_event *event)
+{
+ reg_writel(0, event->hw.config_base);
+}
+
+static void uncore_start_event_dmc(struct perf_event *event, int flags)
+{
+ u32 val;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = GET_COUNTERID(event);
+ int event_id = GET_EVENTID(event);
+
+ /* enable and start counters.
+ * 8 bits for each counter, bits[05:01] of a counter to set event type.
+ */
+ val = reg_readl(hwc->config_base);
+ val &= ~DMC_EVENT_CFG(idx, 0x1f);
+ val |= DMC_EVENT_CFG(idx, event_id);
+ reg_writel(val, hwc->config_base);
+ local64_set(&hwc->prev_count, 0);
+ reg_writel(0, hwc->event_base);
+}
+
+static void uncore_stop_event_dmc(struct perf_event *event)
+{
+ u32 val;
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = GET_COUNTERID(event);
+
+ /* clear event type(bits[05:01]) to stop counter */
+ val = reg_readl(hwc->config_base);
+ val &= ~DMC_EVENT_CFG(idx, 0x1f);
+ reg_writel(val, hwc->config_base);
+}
+
+static void tx2_uncore_event_update(struct perf_event *event)
+{
+ s64 prev, delta, new = 0;
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+ enum tx2_uncore_type type;
+ u32 prorate_factor;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ type = tx2_pmu->type;
+ prorate_factor = tx2_pmu->prorate_factor;
+
+ new = reg_readl(hwc->event_base);
+ prev = local64_xchg(&hwc->prev_count, new);
+
+ /* handles rollover of 32 bit counter */
+ delta = (u32)(((1UL << 32) - prev) + new);
+
+ /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
+ if (type == PMU_TYPE_DMC &&
+ GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
+ delta = delta/4;
+
+ /* L3C and DMC has 16 and 8 interleave channels respectively.
+ * The sampled value is for channel 0 and multiplied with
+ * prorate_factor to get the count for a device.
+ */
+ local64_add(delta * prorate_factor, &event->count);
+}
+
+static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
+{
+ int i = 0;
+ struct acpi_tx2_pmu_device {
+ __u8 id[ACPI_ID_LEN];
+ enum tx2_uncore_type type;
+ } devices[] = {
+ {"CAV901D", PMU_TYPE_L3C},
+ {"CAV901F", PMU_TYPE_DMC},
+ {"", PMU_TYPE_INVALID}
+ };
+
+ while (devices[i].type != PMU_TYPE_INVALID) {
+ if (!strcmp(acpi_device_hid(adev), devices[i].id))
+ break;
+ i++;
+ }
+
+ return devices[i].type;
+}
+
+static bool tx2_uncore_validate_event(struct pmu *pmu,
+ struct perf_event *event, int *counters)
+{
+ if (is_software_event(event))
+ return true;
+ /* Reject groups spanning multiple HW PMUs. */
+ if (event->pmu != pmu)
+ return false;
+
+ *counters = *counters + 1;
+ return true;
+}
+
+/*
+ * Make sure the group of events can be scheduled at once
+ * on the PMU.
+ */
+static bool tx2_uncore_validate_event_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ int counters = 0;
+
+ if (event->group_leader == event)
+ return true;
+
+ if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
+ return false;
+
+ for_each_sibling_event(sibling, leader) {
+ if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
+ return false;
+ }
+
+ if (!tx2_uncore_validate_event(event->pmu, event, &counters))
+ return false;
+
+ /*
+ * If the group requires more counters than the HW has,
+ * it cannot ever be scheduled.
+ */
+ return counters <= TX2_PMU_MAX_COUNTERS;
+}
+
+
+static int tx2_uncore_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ /* Test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /*
+ * SOC PMU counters are shared across all cores.
+ * Therefore, it does not support per-process mode.
+ * Also, it does not support event sampling mode.
+ */
+ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
+ return -EINVAL;
+
+ /* We have no filtering of any kind */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ if (tx2_pmu->cpu >= nr_cpu_ids)
+ return -EINVAL;
+ event->cpu = tx2_pmu->cpu;
+
+ if (event->attr.config >= tx2_pmu->max_events)
+ return -EINVAL;
+
+ /* store event id */
+ hwc->config = event->attr.config;
+
+ /* Validate the group */
+ if (!tx2_uncore_validate_event_group(event))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void tx2_uncore_event_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ hwc->state = 0;
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+
+ tx2_pmu->start_event(event, flags);
+ perf_event_update_userpage(event);
+
+ /* Start timer for first event */
+ if (bitmap_weight(tx2_pmu->active_counters,
+ tx2_pmu->max_counters) == 1) {
+ hrtimer_start(&tx2_pmu->hrtimer,
+ ns_to_ktime(tx2_pmu->hrtimer_interval),
+ HRTIMER_MODE_REL_PINNED);
+ }
+}
+
+static void tx2_uncore_event_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ tx2_pmu->stop_event(event);
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+ if (flags & PERF_EF_UPDATE) {
+ tx2_uncore_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int tx2_uncore_event_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+
+ /* Allocate a free counter */
+ hwc->idx = alloc_counter(tx2_pmu);
+ if (hwc->idx < 0)
+ return -EAGAIN;
+
+ tx2_pmu->events[hwc->idx] = event;
+ /* set counter control and data registers base address */
+ tx2_pmu->init_cntr_base(event, tx2_pmu);
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (flags & PERF_EF_START)
+ tx2_uncore_event_start(event, flags);
+
+ return 0;
+}
+
+static void tx2_uncore_event_del(struct perf_event *event, int flags)
+{
+ struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+
+ tx2_uncore_event_stop(event, PERF_EF_UPDATE);
+
+ /* clear the assigned counter */
+ free_counter(tx2_pmu, GET_COUNTERID(event));
+
+ perf_event_update_userpage(event);
+ tx2_pmu->events[hwc->idx] = NULL;
+ hwc->idx = -1;
+}
+
+static void tx2_uncore_event_read(struct perf_event *event)
+{
+ tx2_uncore_event_update(event);
+}
+
+static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
+{
+ struct tx2_uncore_pmu *tx2_pmu;
+ int max_counters, idx;
+
+ tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
+ max_counters = tx2_pmu->max_counters;
+
+ if (bitmap_empty(tx2_pmu->active_counters, max_counters))
+ return HRTIMER_NORESTART;
+
+ for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
+ struct perf_event *event = tx2_pmu->events[idx];
+
+ tx2_uncore_event_update(event);
+ }
+ hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
+ return HRTIMER_RESTART;
+}
+
+static int tx2_uncore_pmu_register(
+ struct tx2_uncore_pmu *tx2_pmu)
+{
+ struct device *dev = tx2_pmu->dev;
+ char *name = tx2_pmu->name;
+
+ /* Perf event registration */
+ tx2_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
+ .attr_groups = tx2_pmu->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = tx2_uncore_event_init,
+ .add = tx2_uncore_event_add,
+ .del = tx2_uncore_event_del,
+ .start = tx2_uncore_event_start,
+ .stop = tx2_uncore_event_stop,
+ .read = tx2_uncore_event_read,
+ };
+
+ tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
+ "%s", name);
+
+ return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
+}
+
+static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
+{
+ int ret, cpu;
+
+ cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
+ cpu_online_mask);
+
+ tx2_pmu->cpu = cpu;
+ hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
+
+ ret = tx2_uncore_pmu_register(tx2_pmu);
+ if (ret) {
+ dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
+ tx2_pmu->name);
+ return -ENODEV;
+ }
+
+ /* register hotplug callback for the pmu */
+ ret = cpuhp_state_add_instance(
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ &tx2_pmu->hpnode);
+ if (ret) {
+ dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
+ return ret;
+ }
+
+ /* Add to list */
+ list_add(&tx2_pmu->entry, &tx2_pmus);
+
+ dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
+ tx2_pmu->pmu.name);
+ return ret;
+}
+
+static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
+ acpi_handle handle, struct acpi_device *adev, u32 type)
+{
+ struct tx2_uncore_pmu *tx2_pmu;
+ void __iomem *base;
+ struct resource res;
+ struct resource_entry *rentry;
+ struct list_head list;
+ int ret;
+
+ INIT_LIST_HEAD(&list);
+ ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
+ if (ret <= 0) {
+ dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
+ return NULL;
+ }
+
+ list_for_each_entry(rentry, &list, node) {
+ if (resource_type(rentry->res) == IORESOURCE_MEM) {
+ res = *rentry->res;
+ break;
+ }
+ }
+
+ if (!rentry->res)
+ return NULL;
+
+ acpi_dev_free_resource_list(&list);
+ base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(base)) {
+ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
+ return NULL;
+ }
+
+ tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
+ if (!tx2_pmu)
+ return NULL;
+
+ tx2_pmu->dev = dev;
+ tx2_pmu->type = type;
+ tx2_pmu->base = base;
+ tx2_pmu->node = dev_to_node(dev);
+ INIT_LIST_HEAD(&tx2_pmu->entry);
+
+ switch (tx2_pmu->type) {
+ case PMU_TYPE_L3C:
+ tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
+ tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
+ tx2_pmu->max_events = L3_EVENT_MAX;
+ tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
+ tx2_pmu->attr_groups = l3c_pmu_attr_groups;
+ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
+ "uncore_l3c_%d", tx2_pmu->node);
+ tx2_pmu->init_cntr_base = init_cntr_base_l3c;
+ tx2_pmu->start_event = uncore_start_event_l3c;
+ tx2_pmu->stop_event = uncore_stop_event_l3c;
+ break;
+ case PMU_TYPE_DMC:
+ tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
+ tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
+ tx2_pmu->max_events = DMC_EVENT_MAX;
+ tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
+ tx2_pmu->attr_groups = dmc_pmu_attr_groups;
+ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
+ "uncore_dmc_%d", tx2_pmu->node);
+ tx2_pmu->init_cntr_base = init_cntr_base_dmc;
+ tx2_pmu->start_event = uncore_start_event_dmc;
+ tx2_pmu->stop_event = uncore_stop_event_dmc;
+ break;
+ case PMU_TYPE_INVALID:
+ devm_kfree(dev, tx2_pmu);
+ return NULL;
+ }
+
+ return tx2_pmu;
+}
+
+static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct tx2_uncore_pmu *tx2_pmu;
+ struct acpi_device *adev;
+ enum tx2_uncore_type type;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ type = get_tx2_pmu_type(adev);
+ if (type == PMU_TYPE_INVALID)
+ return AE_OK;
+
+ tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
+ handle, adev, type);
+
+ if (!tx2_pmu)
+ return AE_ERROR;
+
+ if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
+ /* Can't add the PMU device, abort */
+ return AE_ERROR;
+ }
+ return AE_OK;
+}
+
+static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
+ struct hlist_node *hpnode)
+{
+ struct tx2_uncore_pmu *tx2_pmu;
+
+ tx2_pmu = hlist_entry_safe(hpnode,
+ struct tx2_uncore_pmu, hpnode);
+
+ /* Pick this CPU, If there is no CPU/PMU association and both are
+ * from same node.
+ */
+ if ((tx2_pmu->cpu >= nr_cpu_ids) &&
+ (tx2_pmu->node == cpu_to_node(cpu)))
+ tx2_pmu->cpu = cpu;
+
+ return 0;
+}
+
+static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
+ struct hlist_node *hpnode)
+{
+ int new_cpu;
+ struct tx2_uncore_pmu *tx2_pmu;
+ struct cpumask cpu_online_mask_temp;
+
+ tx2_pmu = hlist_entry_safe(hpnode,
+ struct tx2_uncore_pmu, hpnode);
+
+ if (cpu != tx2_pmu->cpu)
+ return 0;
+
+ hrtimer_cancel(&tx2_pmu->hrtimer);
+ cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
+ cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
+ new_cpu = cpumask_any_and(
+ cpumask_of_node(tx2_pmu->node),
+ &cpu_online_mask_temp);
+
+ tx2_pmu->cpu = new_cpu;
+ if (new_cpu >= nr_cpu_ids)
+ return 0;
+ perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
+
+ return 0;
+}
+
+static const struct acpi_device_id tx2_uncore_acpi_match[] = {
+ {"CAV901C", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
+
+static int tx2_uncore_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle;
+ acpi_status status;
+
+ set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
+
+ if (!has_acpi_companion(dev))
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -EINVAL;
+
+ /* Walk through the tree for all PMU UNCORE devices */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ tx2_uncore_pmu_add,
+ NULL, dev, NULL);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "failed to probe PMU devices\n");
+ return_ACPI_STATUS(status);
+ }
+
+ dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
+ return 0;
+}
+
+static int tx2_uncore_remove(struct platform_device *pdev)
+{
+ struct tx2_uncore_pmu *tx2_pmu, *temp;
+ struct device *dev = &pdev->dev;
+
+ if (!list_empty(&tx2_pmus)) {
+ list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
+ if (tx2_pmu->node == dev_to_node(dev)) {
+ cpuhp_state_remove_instance_nocalls(
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ &tx2_pmu->hpnode);
+ perf_pmu_unregister(&tx2_pmu->pmu);
+ list_del(&tx2_pmu->entry);
+ }
+ }
+ }
+ return 0;
+}
+
+static struct platform_driver tx2_uncore_driver = {
+ .driver = {
+ .name = "tx2-uncore-pmu",
+ .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
+ },
+ .probe = tx2_uncore_probe,
+ .remove = tx2_uncore_remove,
+};
+
+static int __init tx2_uncore_driver_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ "perf/tx2/uncore:online",
+ tx2_uncore_pmu_online_cpu,
+ tx2_uncore_pmu_offline_cpu);
+ if (ret) {
+ pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
+ return ret;
+ }
+ ret = platform_driver_register(&tx2_uncore_driver);
+ if (ret)
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
+
+ return ret;
+}
+module_init(tx2_uncore_driver_init);
+
+static void __exit tx2_uncore_driver_exit(void)
+{
+ platform_driver_unregister(&tx2_uncore_driver);
+ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
+}
+module_exit(tx2_uncore_driver_exit);
+
+MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 0e31f1392a53..0dc9ff0f8894 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -21,6 +21,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
+#include <linux/cpuhotplug.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -130,12 +131,14 @@ struct xgene_pmu_ops {
struct xgene_pmu {
struct device *dev;
+ struct hlist_node node;
int version;
void __iomem *pcppmu_csr;
u32 mcb_active_mask;
u32 mc_active_mask;
u32 l3c_active_mask;
cpumask_t cpu;
+ int irq;
raw_spinlock_t lock;
const struct xgene_pmu_ops *ops;
struct list_head l3cpmus;
@@ -1806,6 +1809,53 @@ static const struct acpi_device_id xgene_pmu_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match);
#endif
+static int xgene_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
+ node);
+
+ if (cpumask_empty(&xgene_pmu->cpu))
+ cpumask_set_cpu(cpu, &xgene_pmu->cpu);
+
+ /* Overflow interrupt also should use the same CPU */
+ WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
+
+ return 0;
+}
+
+static int xgene_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
+{
+ struct xgene_pmu *xgene_pmu = hlist_entry_safe(node, struct xgene_pmu,
+ node);
+ struct xgene_pmu_dev_ctx *ctx;
+ unsigned int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &xgene_pmu->cpu))
+ return 0;
+ target = cpumask_any_but(cpu_online_mask, cpu);
+ if (target >= nr_cpu_ids)
+ return 0;
+
+ list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) {
+ perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
+ }
+ list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) {
+ perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
+ }
+ list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) {
+ perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
+ }
+ list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) {
+ perf_pmu_migrate_context(&ctx->pmu_dev->pmu, cpu, target);
+ }
+
+ cpumask_set_cpu(target, &xgene_pmu->cpu);
+ /* Overflow interrupt also should use the same CPU */
+ WARN_ON(irq_set_affinity(xgene_pmu->irq, &xgene_pmu->cpu));
+
+ return 0;
+}
+
static int xgene_pmu_probe(struct platform_device *pdev)
{
const struct xgene_pmu_data *dev_data;
@@ -1815,6 +1865,14 @@ static int xgene_pmu_probe(struct platform_device *pdev)
int irq, rc;
int version;
+ /* Install a hook to update the reader CPU in case it goes offline */
+ rc = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ "CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE",
+ xgene_pmu_online_cpu,
+ xgene_pmu_offline_cpu);
+ if (rc)
+ return rc;
+
xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL);
if (!xgene_pmu)
return -ENOMEM;
@@ -1865,6 +1923,7 @@ static int xgene_pmu_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "No IRQ resource\n");
return -EINVAL;
}
+
rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr,
IRQF_NOBALANCING | IRQF_NO_THREAD,
dev_name(&pdev->dev), xgene_pmu);
@@ -1873,6 +1932,8 @@ static int xgene_pmu_probe(struct platform_device *pdev)
return rc;
}
+ xgene_pmu->irq = irq;
+
raw_spin_lock_init(&xgene_pmu->lock);
/* Check for active MCBs and MCUs */
@@ -1883,13 +1944,11 @@ static int xgene_pmu_probe(struct platform_device *pdev)
xgene_pmu->mc_active_mask = 0x1;
}
- /* Pick one core to use for cpumask attributes */
- cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu);
-
- /* Make sure that the overflow interrupt is handled by this CPU */
- rc = irq_set_affinity(irq, &xgene_pmu->cpu);
+ /* Add this instance to the list used by the hotplug callback */
+ rc = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ &xgene_pmu->node);
if (rc) {
- dev_err(&pdev->dev, "Failed to set interrupt affinity!\n");
+ dev_err(&pdev->dev, "Error %d registering hotplug", rc);
return rc;
}
@@ -1897,13 +1956,18 @@ static int xgene_pmu_probe(struct platform_device *pdev)
rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev);
if (rc) {
dev_err(&pdev->dev, "No PMU perf devices found!\n");
- return rc;
+ goto out_unregister;
}
/* Enable interrupt */
xgene_pmu->ops->unmask_int(xgene_pmu);
return 0;
+
+out_unregister:
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ &xgene_pmu->node);
+ return rc;
}
static void
@@ -1924,6 +1988,8 @@ static int xgene_pmu_remove(struct platform_device *pdev)
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus);
xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus);
+ cpuhp_state_remove_instance(CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ &xgene_pmu->node);
return 0;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index 9ce531194f8a..6d4b44b569bc 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
.mask_core_ready = CORE_READY_STATUS,
.has_pll_override = true,
.autoresume_en = BIT(0),
+ .update_tune1_with_efuse = true,
};
static const char * const qusb2_phy_vreg_names[] = {
@@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
/*
* Read efuse register having TUNE2/1 parameter's high nibble.
- * If efuse register shows value as 0x0, or if we fail to find
- * a valid efuse register settings, then use default value
- * as 0xB for high nibble that we have already set while
- * configuring phy.
+ * If efuse register shows value as 0x0 (indicating value is not
+ * fused), or if we fail to find a valid efuse register setting,
+ * then use default value for high nibble that we have already
+ * set while configuring the phy.
*/
val = nvmem_cell_read(qphy->cell, NULL);
if (IS_ERR(val) || !val[0]) {
@@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
/* Fused TUNE1/2 value is the higher nibble only */
if (cfg->update_tune1_with_efuse)
- qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
- val[0] << 0x4);
+ qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
+ val[0] << HSTX_TRIM_SHIFT,
+ HSTX_TRIM_MASK);
else
- qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
- val[0] << 0x4);
-
+ qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
+ val[0] << HSTX_TRIM_SHIFT,
+ HSTX_TRIM_MASK);
}
static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
index 467e8147972b..9c85231a6dbc 100644
--- a/drivers/phy/socionext/Kconfig
+++ b/drivers/phy/socionext/Kconfig
@@ -26,7 +26,8 @@ config PHY_UNIPHIER_USB3
config PHY_UNIPHIER_PCIE
tristate "Uniphier PHY driver for PCIe controller"
- depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
default PCIE_UNIPHIER
select GENERIC_PHY
help
diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
index 53d449076dee..ea87d739f534 100644
--- a/drivers/pinctrl/meson/pinctrl-meson.c
+++ b/drivers/pinctrl/meson/pinctrl-meson.c
@@ -191,7 +191,8 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
case PIN_CONFIG_BIAS_DISABLE:
dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
- meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
+ meson_calc_reg_and_bit(bank, pin, REG_PULLEN, &reg,
+ &bit);
ret = regmap_update_bits(pc->reg_pullen, reg,
BIT(bit), 0);
if (ret)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 6838b38555a1..1bfb0ae6b387 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -33,7 +33,7 @@ enum {
}
-#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+#define PINGROUP(id, _tile, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
{ \
.name = "gpio" #id, \
.pins = gpio##id##_pins, \
@@ -51,11 +51,12 @@ enum {
msm_mux_##f9 \
}, \
.nfuncs = 10, \
- .ctl_reg = base + REG_SIZE * id, \
- .io_reg = base + 0x4 + REG_SIZE * id, \
- .intr_cfg_reg = base + 0x8 + REG_SIZE * id, \
- .intr_status_reg = base + 0xc + REG_SIZE * id, \
- .intr_target_reg = base + 0x8 + REG_SIZE * id, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .tile = _tile, \
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
@@ -82,6 +83,7 @@ enum {
.intr_cfg_reg = 0, \
.intr_status_reg = 0, \
.intr_target_reg = 0, \
+ .tile = NORTH, \
.mux_bit = -1, \
.pull_bit = pull, \
.drv_bit = drv, \
@@ -1397,13 +1399,13 @@ static const struct msm_pingroup sdm660_groups[] = {
PINGROUP(111, SOUTH, _, _, _, _, _, _, _, _, _),
PINGROUP(112, SOUTH, _, _, _, _, _, _, _, _, _),
PINGROUP(113, SOUTH, _, _, _, _, _, _, _, _, _),
- SDC_QDSD_PINGROUP(sdc1_clk, 0x99a000, 13, 6),
- SDC_QDSD_PINGROUP(sdc1_cmd, 0x99a000, 11, 3),
- SDC_QDSD_PINGROUP(sdc1_data, 0x99a000, 9, 0),
- SDC_QDSD_PINGROUP(sdc2_clk, 0x99b000, 14, 6),
- SDC_QDSD_PINGROUP(sdc2_cmd, 0x99b000, 11, 3),
- SDC_QDSD_PINGROUP(sdc2_data, 0x99b000, 9, 0),
- SDC_QDSD_PINGROUP(sdc1_rclk, 0x99a000, 15, 0),
+ SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+ SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc2_clk, 0x9b000, 14, 6),
+ SDC_QDSD_PINGROUP(sdc2_cmd, 0x9b000, 11, 3),
+ SDC_QDSD_PINGROUP(sdc2_data, 0x9b000, 9, 0),
+ SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
};
static const struct msm_pinctrl_soc_data sdm660_pinctrl = {
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 6624499eae72..4ada80317a3b 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
};
static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
index f66521c7f846..42efcb850722 100644
--- a/drivers/platform/mips/cpu_hwmon.c
+++ b/drivers/platform/mips/cpu_hwmon.c
@@ -25,9 +25,10 @@ int loongson3_cpu_temp(int cpu)
case PRID_REV_LOONGSON3A_R1:
reg = (reg >> 8) & 0xff;
break;
- case PRID_REV_LOONGSON3A_R2:
case PRID_REV_LOONGSON3B_R1:
case PRID_REV_LOONGSON3B_R2:
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
reg = ((reg >> 8) & 0xff) - 100;
break;
case PRID_REV_LOONGSON3A_R3_0:
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 54f6a40c75c6..45ef4d22f14c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -177,6 +177,8 @@ config DELL_LAPTOP
select POWER_SUPPLY
select LEDS_CLASS
select NEW_LEDS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
---help---
This driver adds support for rfkill and backlight control to Dell
laptops (except for some models covered by the Compal driver).
@@ -493,6 +495,8 @@ config THINKPAD_ACPI
select NVRAM
select NEW_LEDS
select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
---help---
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
support for Fn-Fx key combinations, Bluetooth control, video
@@ -1288,6 +1292,23 @@ config INTEL_ATOMISP2_PM
To compile this driver as a module, choose M here: the module
will be called intel_atomisp2_pm.
+config HUAWEI_WMI
+ tristate "Huawei WMI hotkeys driver"
+ depends on ACPI_WMI
+ depends on INPUT
+ select INPUT_SPARSEKMAP
+ select LEDS_CLASS
+ select LEDS_TRIGGERS
+ select LEDS_TRIGGER_AUDIO
+ select NEW_LEDS
+ help
+ This driver provides support for Huawei WMI hotkeys.
+ It enables the missing keys and adds support to the micmute
+ LED found on some of these laptops.
+
+ To compile this driver as a module, choose M here: the module
+ will be called huawei-wmi.
+
endif # X86_PLATFORM_DEVICES
config PMC_ATOM
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 39ae94135406..d841c550e3cc 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
obj-$(CONFIG_HP_WIRELESS) += hp-wireless.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
+obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
obj-$(CONFIG_GPD_POCKET_FAN) += gpd-pocket-fan.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index db2af09067db..b6f2ff95c3ed 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -442,8 +442,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
- { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
- { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
+ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index c285a16675ee..37b5de541270 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2131,7 +2131,8 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
- }
+ } else
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 06978c14c83b..95e6ca116e00 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -29,7 +29,6 @@
#include <linux/mm.h>
#include <linux/i8042.h>
#include <linux/debugfs.h>
-#include <linux/dell-led.h>
#include <linux/seq_file.h>
#include <acpi/video.h>
#include "dell-rbtn.h"
@@ -1565,8 +1564,10 @@ static ssize_t kbd_led_timeout_store(struct device *dev,
switch (unit) {
case KBD_TIMEOUT_DAYS:
value *= 24;
+ /* fall through */
case KBD_TIMEOUT_HOURS:
value *= 60;
+ /* fall through */
case KBD_TIMEOUT_MINUTES:
value *= 60;
unit = KBD_TIMEOUT_SECONDS;
@@ -2109,17 +2110,17 @@ static struct notifier_block dell_laptop_notifier = {
.notifier_call = dell_laptop_notifier_call,
};
-int dell_micmute_led_set(int state)
+static int micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
{
struct calling_interface_buffer buffer;
struct calling_interface_token *token;
+ int state = brightness != LED_OFF;
if (state == 0)
token = dell_smbios_find_token(GLOBAL_MIC_MUTE_DISABLE);
- else if (state == 1)
- token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
else
- return -EINVAL;
+ token = dell_smbios_find_token(GLOBAL_MIC_MUTE_ENABLE);
if (!token)
return -ENODEV;
@@ -2127,9 +2128,15 @@ int dell_micmute_led_set(int state)
dell_fill_request(&buffer, token->location, token->value, 0, 0);
dell_send_request(&buffer, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD);
- return state;
+ return 0;
}
-EXPORT_SYMBOL_GPL(dell_micmute_led_set);
+
+static struct led_classdev micmute_led_cdev = {
+ .name = "platform::micmute",
+ .max_brightness = 1,
+ .brightness_set_blocking = micmute_led_set,
+ .default_trigger = "audio-micmute",
+};
static int __init dell_init(void)
{
@@ -2175,6 +2182,11 @@ static int __init dell_init(void)
dell_laptop_register_notifier(&dell_laptop_notifier);
+ micmute_led_cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ ret = led_classdev_register(&platform_device->dev, &micmute_led_cdev);
+ if (ret < 0)
+ goto fail_led;
+
if (acpi_video_get_backlight_type() != acpi_backlight_vendor)
return 0;
@@ -2220,6 +2232,8 @@ static int __init dell_init(void)
fail_get_brightness:
backlight_device_unregister(dell_backlight_device);
fail_backlight:
+ led_classdev_unregister(&micmute_led_cdev);
+fail_led:
dell_cleanup_rfkill();
fail_rfkill:
platform_device_del(platform_device);
@@ -2239,6 +2253,7 @@ static void __exit dell_exit(void)
touchpad_led_exit();
kbd_led_exit();
backlight_device_unregister(dell_backlight_device);
+ led_classdev_unregister(&micmute_led_cdev);
dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
diff --git a/drivers/platform/x86/huawei-wmi.c b/drivers/platform/x86/huawei-wmi.c
new file mode 100644
index 000000000000..59872f87b741
--- /dev/null
+++ b/drivers/platform/x86/huawei-wmi.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Huawei WMI hotkeys
+ *
+ * Copyright (C) 2018 Ayman Bagabas <ayman.bagabas@gmail.com>
+ */
+
+#include <linux/acpi.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/wmi.h>
+
+/*
+ * Huawei WMI GUIDs
+ */
+#define WMI0_EVENT_GUID "59142400-C6A3-40fa-BADB-8A2652834100"
+#define AMW0_EVENT_GUID "ABBC0F5C-8EA1-11D1-A000-C90629100000"
+
+#define WMI0_EXPENSIVE_GUID "39142400-C6A3-40fa-BADB-8A2652834100"
+
+struct huawei_wmi_priv {
+ struct input_dev *idev;
+ struct led_classdev cdev;
+ acpi_handle handle;
+ char *acpi_method;
+};
+
+static const struct key_entry huawei_wmi_keymap[] = {
+ { KE_KEY, 0x281, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, 0x282, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x284, { KEY_MUTE } },
+ { KE_KEY, 0x285, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x286, { KEY_VOLUMEUP } },
+ { KE_KEY, 0x287, { KEY_MICMUTE } },
+ { KE_KEY, 0x289, { KEY_WLAN } },
+ // Huawei |M| key
+ { KE_KEY, 0x28a, { KEY_CONFIG } },
+ // Keyboard backlight
+ { KE_IGNORE, 0x293, { KEY_KBDILLUMTOGGLE } },
+ { KE_IGNORE, 0x294, { KEY_KBDILLUMUP } },
+ { KE_IGNORE, 0x295, { KEY_KBDILLUMUP } },
+ { KE_END, 0 }
+};
+
+static int huawei_wmi_micmute_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct huawei_wmi_priv *priv = dev_get_drvdata(led_cdev->dev->parent);
+ acpi_status status;
+ union acpi_object args[3];
+ struct acpi_object_list arg_list = {
+ .pointer = args,
+ .count = ARRAY_SIZE(args),
+ };
+
+ args[0].type = args[1].type = args[2].type = ACPI_TYPE_INTEGER;
+ args[1].integer.value = 0x04;
+
+ if (strcmp(priv->acpi_method, "SPIN") == 0) {
+ args[0].integer.value = 0;
+ args[2].integer.value = brightness ? 1 : 0;
+ } else if (strcmp(priv->acpi_method, "WPIN") == 0) {
+ args[0].integer.value = 1;
+ args[2].integer.value = brightness ? 0 : 1;
+ } else {
+ return -EINVAL;
+ }
+
+ status = acpi_evaluate_object(priv->handle, priv->acpi_method, &arg_list, NULL);
+ if (ACPI_FAILURE(status))
+ return -ENXIO;
+
+ return 0;
+}
+
+static int huawei_wmi_leds_setup(struct wmi_device *wdev)
+{
+ struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+
+ priv->handle = ec_get_handle();
+ if (!priv->handle)
+ return 0;
+
+ if (acpi_has_method(priv->handle, "SPIN"))
+ priv->acpi_method = "SPIN";
+ else if (acpi_has_method(priv->handle, "WPIN"))
+ priv->acpi_method = "WPIN";
+ else
+ return 0;
+
+ priv->cdev.name = "platform::micmute";
+ priv->cdev.max_brightness = 1;
+ priv->cdev.brightness_set_blocking = huawei_wmi_micmute_led_set;
+ priv->cdev.default_trigger = "audio-micmute";
+ priv->cdev.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
+ priv->cdev.dev = &wdev->dev;
+ priv->cdev.flags = LED_CORE_SUSPENDRESUME;
+
+ return devm_led_classdev_register(&wdev->dev, &priv->cdev);
+}
+
+static void huawei_wmi_process_key(struct wmi_device *wdev, int code)
+{
+ struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ const struct key_entry *key;
+
+ /*
+ * WMI0 uses code 0x80 to indicate a hotkey event.
+ * The actual key is fetched from the method WQ00
+ * using WMI0_EXPENSIVE_GUID.
+ */
+ if (code == 0x80) {
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ status = wmi_query_block(WMI0_EXPENSIVE_GUID, 0, &response);
+ if (ACPI_FAILURE(status))
+ return;
+
+ obj = (union acpi_object *)response.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ code = obj->integer.value;
+
+ kfree(response.pointer);
+ }
+
+ key = sparse_keymap_entry_from_scancode(priv->idev, code);
+ if (!key) {
+ dev_info(&wdev->dev, "Unknown key pressed, code: 0x%04x\n", code);
+ return;
+ }
+
+ sparse_keymap_report_entry(priv->idev, key, 1, true);
+}
+
+static void huawei_wmi_notify(struct wmi_device *wdev,
+ union acpi_object *obj)
+{
+ if (obj->type == ACPI_TYPE_INTEGER)
+ huawei_wmi_process_key(wdev, obj->integer.value);
+ else
+ dev_info(&wdev->dev, "Bad response type %d\n", obj->type);
+}
+
+static int huawei_wmi_input_setup(struct wmi_device *wdev)
+{
+ struct huawei_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
+ int err;
+
+ priv->idev = devm_input_allocate_device(&wdev->dev);
+ if (!priv->idev)
+ return -ENOMEM;
+
+ priv->idev->name = "Huawei WMI hotkeys";
+ priv->idev->phys = "wmi/input0";
+ priv->idev->id.bustype = BUS_HOST;
+ priv->idev->dev.parent = &wdev->dev;
+
+ err = sparse_keymap_setup(priv->idev, huawei_wmi_keymap, NULL);
+ if (err)
+ return err;
+
+ return input_register_device(priv->idev);
+}
+
+static int huawei_wmi_probe(struct wmi_device *wdev)
+{
+ struct huawei_wmi_priv *priv;
+ int err;
+
+ priv = devm_kzalloc(&wdev->dev, sizeof(struct huawei_wmi_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(&wdev->dev, priv);
+
+ err = huawei_wmi_input_setup(wdev);
+ if (err)
+ return err;
+
+ return huawei_wmi_leds_setup(wdev);
+}
+
+static const struct wmi_device_id huawei_wmi_id_table[] = {
+ { .guid_string = WMI0_EVENT_GUID },
+ { .guid_string = AMW0_EVENT_GUID },
+ { }
+};
+
+static struct wmi_driver huawei_wmi_driver = {
+ .driver = {
+ .name = "huawei-wmi",
+ },
+ .id_table = huawei_wmi_id_table,
+ .probe = huawei_wmi_probe,
+ .notify = huawei_wmi_notify,
+};
+
+module_wmi_driver(huawei_wmi_driver);
+
+MODULE_ALIAS("wmi:"WMI0_EVENT_GUID);
+MODULE_ALIAS("wmi:"AMW0_EVENT_GUID);
+MODULE_AUTHOR("Ayman Bagabas <ayman.bagabas@gmail.com>");
+MODULE_DESCRIPTION("Huawei WMI hotkeys");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index 5456581b473c..3d893e0ac250 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -7,15 +7,23 @@
*/
#include <linux/acpi.h>
+#include <linux/bits.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define IRQ_RESOURCE_TYPE GENMASK(1, 0)
+#define IRQ_RESOURCE_NONE 0
+#define IRQ_RESOURCE_GPIO 1
+#define IRQ_RESOURCE_APIC 2
struct i2c_inst_data {
const char *type;
- int gpio_irq_idx;
+ unsigned int flags;
+ int irq_idx;
};
struct i2c_multi_inst_data {
@@ -23,6 +31,31 @@ struct i2c_multi_inst_data {
struct i2c_client *clients[0];
};
+static int i2c_multi_inst_count(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_i2c_serialbus *sb;
+ int *count = data;
+
+ if (i2c_acpi_get_i2c_resource(ares, &sb))
+ *count = *count + 1;
+
+ return 1;
+}
+
+static int i2c_multi_inst_count_resources(struct acpi_device *adev)
+{
+ LIST_HEAD(r);
+ int count = 0;
+ int ret;
+
+ ret = acpi_dev_get_resources(adev, &r, i2c_multi_inst_count, &count);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&r);
+ return count;
+}
+
static int i2c_multi_inst_probe(struct platform_device *pdev)
{
struct i2c_multi_inst_data *multi;
@@ -44,40 +77,59 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
adev = ACPI_COMPANION(dev);
/* Count number of clients to instantiate */
- for (i = 0; inst_data[i].type; i++) {}
+ ret = i2c_multi_inst_count_resources(adev);
+ if (ret < 0)
+ return ret;
multi = devm_kmalloc(dev,
- offsetof(struct i2c_multi_inst_data, clients[i]),
+ offsetof(struct i2c_multi_inst_data, clients[ret]),
GFP_KERNEL);
if (!multi)
return -ENOMEM;
- multi->num_clients = i;
+ multi->num_clients = ret;
- for (i = 0; i < multi->num_clients; i++) {
+ for (i = 0; i < multi->num_clients && inst_data[i].type; i++) {
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, inst_data[i].type, I2C_NAME_SIZE);
- snprintf(name, sizeof(name), "%s-%s", match->id,
- inst_data[i].type);
+ snprintf(name, sizeof(name), "%s-%s.%d", match->id,
+ inst_data[i].type, i);
board_info.dev_name = name;
- board_info.irq = 0;
- if (inst_data[i].gpio_irq_idx != -1) {
- ret = acpi_dev_gpio_irq_get(adev,
- inst_data[i].gpio_irq_idx);
+ switch (inst_data[i].flags & IRQ_RESOURCE_TYPE) {
+ case IRQ_RESOURCE_GPIO:
+ ret = acpi_dev_gpio_irq_get(adev, inst_data[i].irq_idx);
if (ret < 0) {
dev_err(dev, "Error requesting irq at index %d: %d\n",
- inst_data[i].gpio_irq_idx, ret);
+ inst_data[i].irq_idx, ret);
goto error;
}
board_info.irq = ret;
+ break;
+ case IRQ_RESOURCE_APIC:
+ ret = platform_get_irq(pdev, inst_data[i].irq_idx);
+ if (ret < 0) {
+ dev_dbg(dev, "Error requesting irq at index %d: %d\n",
+ inst_data[i].irq_idx, ret);
+ }
+ board_info.irq = ret;
+ break;
+ default:
+ board_info.irq = 0;
+ break;
}
multi->clients[i] = i2c_acpi_new_device(dev, i, &board_info);
- if (!multi->clients[i]) {
- dev_err(dev, "Error creating i2c-client, idx %d\n", i);
- ret = -ENODEV;
+ if (IS_ERR(multi->clients[i])) {
+ ret = PTR_ERR(multi->clients[i]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Error creating i2c-client, idx %d\n", i);
goto error;
}
}
+ if (i < multi->num_clients) {
+ dev_err(dev, "Error finding driver, idx %d\n", i);
+ ret = -ENODEV;
+ goto error;
+ }
platform_set_drvdata(pdev, multi);
return 0;
@@ -101,9 +153,17 @@ static int i2c_multi_inst_remove(struct platform_device *pdev)
}
static const struct i2c_inst_data bsg1160_data[] = {
- { "bmc150_accel", 0 },
- { "bmc150_magn", -1 },
- { "bmg160", -1 },
+ { "bmc150_accel", IRQ_RESOURCE_GPIO, 0 },
+ { "bmc150_magn" },
+ { "bmg160" },
+ {}
+};
+
+static const struct i2c_inst_data int3515_data[] = {
+ { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 3 },
{}
};
@@ -113,6 +173,7 @@ static const struct i2c_inst_data bsg1160_data[] = {
*/
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
+ { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b6489cba2985..1589dffab9fa 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1189,6 +1189,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
},
},
{
+ .ident = "Lenovo Yoga 2 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Yoga 2 13"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 3 1170 / 1470",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c
index 9371603a0ac9..b0f421fea2a5 100644
--- a/drivers/platform/x86/intel_atomisp2_pm.c
+++ b/drivers/platform/x86/intel_atomisp2_pm.c
@@ -33,46 +33,45 @@
#define ISPSSPM0_IUNIT_POWER_ON 0x0
#define ISPSSPM0_IUNIT_POWER_OFF 0x3
-static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
+static int isp_set_power(struct pci_dev *dev, bool enable)
{
unsigned long timeout;
- u32 val;
-
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, 0);
-
- /*
- * MRFLD IUNIT DPHY is located in an always-power-on island
- * MRFLD HW design need all CSI ports are disabled before
- * powering down the IUNIT.
- */
- pci_read_config_dword(dev, PCI_CSI_CONTROL, &val);
- val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
- pci_write_config_dword(dev, PCI_CSI_CONTROL, val);
+ u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON :
+ ISPSSPM0_IUNIT_POWER_OFF;
- /* Write 0x3 to ISPSSPM0 bit[1:0] to power off the IUNIT */
+ /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0,
- ISPSSPM0_IUNIT_POWER_OFF, ISPSSPM0_ISPSSC_MASK);
+ val, ISPSSPM0_ISPSSC_MASK);
/*
* There should be no IUNIT access while power-down is
* in progress HW sighting: 4567865
* Wait up to 50 ms for the IUNIT to shut down.
+ * And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
while (1) {
- /* Wait until ISPSSPM0 bit[25:24] shows 0x3 */
- iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &val);
- val = (val & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
- if (val == ISPSSPM0_IUNIT_POWER_OFF)
+ u32 tmp;
+
+ /* Wait until ISPSSPM0 bit[25:24] shows the right value */
+ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp);
+ tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET;
+ if (tmp == val)
break;
if (time_after(jiffies, timeout)) {
- dev_err(&dev->dev, "IUNIT power-off timeout.\n");
+ dev_err(&dev->dev, "IUNIT power-%s timeout.\n",
+ enable ? "on" : "off");
return -EBUSY;
}
usleep_range(1000, 2000);
}
+ return 0;
+}
+
+static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
pm_runtime_allow(&dev->dev);
pm_runtime_put_sync_suspend(&dev->dev);
@@ -87,11 +86,40 @@ static void isp_remove(struct pci_dev *dev)
static int isp_pci_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u32 val;
+
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, 0);
+
+ /*
+ * MRFLD IUNIT DPHY is located in an always-power-on island
+ * MRFLD HW design need all CSI ports are disabled before
+ * powering down the IUNIT.
+ */
+ pci_read_config_dword(pdev, PCI_CSI_CONTROL, &val);
+ val |= PCI_CSI_CONTROL_PORTS_OFF_MASK;
+ pci_write_config_dword(pdev, PCI_CSI_CONTROL, val);
+
+ /*
+ * We lose config space access when punit power gates
+ * the ISP. Can't use pci_set_power_state() because
+ * pmcsr won't actually change when we write to it.
+ */
+ pci_save_state(pdev);
+ pdev->current_state = PCI_D3cold;
+ isp_set_power(pdev, false);
+
return 0;
}
static int isp_pci_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ isp_set_power(pdev, true);
+ pdev->current_state = PCI_D0;
+ pci_restore_state(pdev);
+
return 0;
}
@@ -99,6 +127,7 @@ static UNIVERSAL_DEV_PM_OPS(isp_pm_ops, isp_pci_suspend,
isp_pci_resume, NULL);
static const struct pci_device_id isp_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x0f38), },
{ PCI_VDEVICE(INTEL, 0x22b8), },
{ 0, }
};
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 464fe93657b5..616b8853a91f 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -168,8 +168,8 @@ static int cht_int33fe_probe(struct platform_device *pdev)
board_info.dev_name = "max17047";
board_info.properties = max17047_props;
data->max17047 = i2c_acpi_new_device(dev, 1, &board_info);
- if (!data->max17047)
- return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
+ if (IS_ERR(data->max17047))
+ return PTR_ERR(data->max17047);
}
data->connections[0].endpoint[0] = "port0";
@@ -194,16 +194,20 @@ static int cht_int33fe_probe(struct platform_device *pdev)
board_info.irq = fusb302_irq;
data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info);
- if (!data->fusb302)
+ if (IS_ERR(data->fusb302)) {
+ ret = PTR_ERR(data->fusb302);
goto out_unregister_max17047;
+ }
memset(&board_info, 0, sizeof(board_info));
board_info.dev_name = "pi3usb30532";
strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE);
data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info);
- if (!data->pi3usb30532)
+ if (IS_ERR(data->pi3usb30532)) {
+ ret = PTR_ERR(data->pi3usb30532);
goto out_unregister_fusb302;
+ }
platform_set_drvdata(pdev, data);
@@ -213,12 +217,11 @@ out_unregister_fusb302:
i2c_unregister_device(data->fusb302);
out_unregister_max17047:
- if (data->max17047)
- i2c_unregister_device(data->max17047);
+ i2c_unregister_device(data->max17047);
device_connections_remove(data->connections);
- return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
+ return ret;
}
static int cht_int33fe_remove(struct platform_device *pdev)
@@ -227,8 +230,7 @@ static int cht_int33fe_remove(struct platform_device *pdev)
i2c_unregister_device(data->pi3usb30532);
i2c_unregister_device(data->fusb302);
- if (data->max17047)
- i2c_unregister_device(data->max17047);
+ i2c_unregister_device(data->max17047);
device_connections_remove(data->connections);
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 225638a1b09e..bffe548187ee 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -1210,13 +1210,7 @@ static void ips_debugfs_cleanup(struct ips_driver *ips) { return; }
/* Expose current state and limits in debugfs if possible */
-struct ips_debugfs_node {
- struct ips_driver *ips;
- char *name;
- int (*show)(struct seq_file *m, void *data);
-};
-
-static int show_cpu_temp(struct seq_file *m, void *data)
+static int cpu_temp_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
@@ -1225,8 +1219,9 @@ static int show_cpu_temp(struct seq_file *m, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(cpu_temp);
-static int show_cpu_power(struct seq_file *m, void *data)
+static int cpu_power_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
@@ -1234,8 +1229,9 @@ static int show_cpu_power(struct seq_file *m, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(cpu_power);
-static int show_cpu_clamp(struct seq_file *m, void *data)
+static int cpu_clamp_show(struct seq_file *m, void *data)
{
u64 turbo_override;
int tdp, tdc;
@@ -1255,8 +1251,9 @@ static int show_cpu_clamp(struct seq_file *m, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(cpu_clamp);
-static int show_mch_temp(struct seq_file *m, void *data)
+static int mch_temp_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
@@ -1265,8 +1262,9 @@ static int show_mch_temp(struct seq_file *m, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(mch_temp);
-static int show_mch_power(struct seq_file *m, void *data)
+static int mch_power_show(struct seq_file *m, void *data)
{
struct ips_driver *ips = m->private;
@@ -1274,68 +1272,22 @@ static int show_mch_power(struct seq_file *m, void *data)
return 0;
}
-
-static struct ips_debugfs_node ips_debug_files[] = {
- { NULL, "cpu_temp", show_cpu_temp },
- { NULL, "cpu_power", show_cpu_power },
- { NULL, "cpu_clamp", show_cpu_clamp },
- { NULL, "mch_temp", show_mch_temp },
- { NULL, "mch_power", show_mch_power },
-};
-
-static int ips_debugfs_open(struct inode *inode, struct file *file)
-{
- struct ips_debugfs_node *node = inode->i_private;
-
- return single_open(file, node->show, node->ips);
-}
-
-static const struct file_operations ips_debugfs_ops = {
- .owner = THIS_MODULE,
- .open = ips_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mch_power);
static void ips_debugfs_cleanup(struct ips_driver *ips)
{
- if (ips->debug_root)
- debugfs_remove_recursive(ips->debug_root);
- return;
+ debugfs_remove_recursive(ips->debug_root);
}
static void ips_debugfs_init(struct ips_driver *ips)
{
- int i;
-
ips->debug_root = debugfs_create_dir("ips", NULL);
- if (!ips->debug_root) {
- dev_err(ips->dev, "failed to create debugfs entries: %ld\n",
- PTR_ERR(ips->debug_root));
- return;
- }
- for (i = 0; i < ARRAY_SIZE(ips_debug_files); i++) {
- struct dentry *ent;
- struct ips_debugfs_node *node = &ips_debug_files[i];
-
- node->ips = ips;
- ent = debugfs_create_file(node->name, S_IFREG | S_IRUGO,
- ips->debug_root, node,
- &ips_debugfs_ops);
- if (!ent) {
- dev_err(ips->dev, "failed to create debug file: %ld\n",
- PTR_ERR(ent));
- goto err_cleanup;
- }
- }
-
- return;
-
-err_cleanup:
- ips_debugfs_cleanup(ips);
- return;
+ debugfs_create_file("cpu_temp", 0444, ips->debug_root, ips, &cpu_temp_fops);
+ debugfs_create_file("cpu_power", 0444, ips->debug_root, ips, &cpu_power_fops);
+ debugfs_create_file("cpu_clamp", 0444, ips->debug_root, ips, &cpu_clamp_fops);
+ debugfs_create_file("mch_temp", 0444, ips->debug_root, ips, &mch_temp_fops);
+ debugfs_create_file("mch_power", 0444, ips->debug_root, ips, &mch_power_fops);
}
#endif /* CONFIG_DEBUG_FS */
@@ -1646,9 +1598,6 @@ static void ips_remove(struct pci_dev *dev)
struct ips_driver *ips = pci_get_drvdata(dev);
u64 turbo_override;
- if (!ips)
- return;
-
ips_debugfs_cleanup(ips);
/* Release i915 driver */
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 6b31d410cb09..22dbf115782e 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -101,10 +102,35 @@ static const struct pmc_bit_map spt_pfear_map[] = {
{},
};
+static const struct pmc_bit_map spt_ltr_show_map[] = {
+ {"SOUTHPORT_A", SPT_PMC_LTR_SPA},
+ {"SOUTHPORT_B", SPT_PMC_LTR_SPB},
+ {"SATA", SPT_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
+ {"XHCI", SPT_PMC_LTR_XHCI},
+ {"ME", SPT_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", SPT_PMC_LTR_EVA},
+ {"SOUTHPORT_C", SPT_PMC_LTR_SPC},
+ {"HD_AUDIO", SPT_PMC_LTR_AZ},
+ {"LPSS", SPT_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", SPT_PMC_LTR_SPD},
+ {"SOUTHPORT_E", SPT_PMC_LTR_SPE},
+ {"CAMERA", SPT_PMC_LTR_CAM},
+ {"ESPI", SPT_PMC_LTR_ESPI},
+ {"SCC", SPT_PMC_LTR_SCC},
+ {"ISH", SPT_PMC_LTR_ISH},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT},
+ {}
+};
+
static const struct pmc_reg_map spt_reg_map = {
.pfear_sts = spt_pfear_map,
.mphy_sts = spt_mphy_map,
.pll_sts = spt_pll_map,
+ .ltr_show_sts = spt_ltr_show_map,
.slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
.ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
.regmap_length = SPT_PMC_MMIO_REG_LEN,
@@ -112,6 +138,7 @@ static const struct pmc_reg_map spt_reg_map = {
.ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
};
/* Cannonlake: PGD PFET Enable Ack Status Register(s) bitmap */
@@ -243,10 +270,38 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
NULL,
};
+static const struct pmc_bit_map cnp_ltr_show_map[] = {
+ {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
+ {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
+ {"SATA", CNP_PMC_LTR_SATA},
+ {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
+ {"XHCI", CNP_PMC_LTR_XHCI},
+ {"ME", CNP_PMC_LTR_ME},
+ /* EVA is Enterprise Value Add, doesn't really exist on PCH */
+ {"EVA", CNP_PMC_LTR_EVA},
+ {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
+ {"HD_AUDIO", CNP_PMC_LTR_AZ},
+ {"CNV", CNP_PMC_LTR_CNV},
+ {"LPSS", CNP_PMC_LTR_LPSS},
+ {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
+ {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
+ {"CAMERA", CNP_PMC_LTR_CAM},
+ {"ESPI", CNP_PMC_LTR_ESPI},
+ {"SCC", CNP_PMC_LTR_SCC},
+ {"ISH", CNP_PMC_LTR_ISH},
+ {"UFSX2", CNP_PMC_LTR_UFSX2},
+ {"EMMC", CNP_PMC_LTR_EMMC},
+ /* Below two cannot be used for LTR_IGNORE */
+ {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
+ {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
+ {}
+};
+
static const struct pmc_reg_map cnp_reg_map = {
.pfear_sts = cnp_pfear_map,
.slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
.slps0_dbg_maps = cnp_slps0_dbg_maps,
+ .ltr_show_sts = cnp_ltr_show_map,
.slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
.ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
.regmap_length = CNP_PMC_MMIO_REG_LEN,
@@ -254,6 +309,7 @@ static const struct pmc_reg_map cnp_reg_map = {
.ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
.pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
.pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
+ .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
};
static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
@@ -311,7 +367,7 @@ static void pmc_core_display_map(struct seq_file *s, int index,
pf_map[index].bit_mask & pf_reg ? "Off" : "On");
}
-static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
+static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
@@ -329,18 +385,7 @@ static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
return 0;
}
-
-static int pmc_core_ppfear_sts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_ppfear_sts_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_ppfear_ops = {
- .open = pmc_core_ppfear_sts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
/* This function should return link status, 0 means ready */
static int pmc_core_mtpmc_link_status(void)
@@ -372,7 +417,7 @@ static int pmc_core_send_msg(u32 *addr_xram)
return 0;
}
-static int pmc_core_mphy_pg_sts_show(struct seq_file *s, void *unused)
+static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
@@ -424,18 +469,7 @@ out_unlock:
mutex_unlock(&pmcdev->lock);
return err;
}
-
-static int pmc_core_mphy_pg_sts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_mphy_pg_sts_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_mphy_pg_ops = {
- .open = pmc_core_mphy_pg_sts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
static int pmc_core_pll_show(struct seq_file *s, void *unused)
{
@@ -471,18 +505,7 @@ out_unlock:
mutex_unlock(&pmcdev->lock);
return err;
}
-
-static int pmc_core_pll_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pmc_core_pll_show, inode->i_private);
-}
-
-static const struct file_operations pmc_core_pll_ops = {
- .open = pmc_core_pll_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
*userbuf, size_t count, loff_t *ppos)
@@ -500,7 +523,7 @@ static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
goto out_unlock;
}
- if (val > NUM_IP_IGN_ALLOWED) {
+ if (val > map->ltr_ignore_max) {
err = -EINVAL;
goto out_unlock;
}
@@ -583,6 +606,77 @@ static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
+static u32 convert_ltr_scale(u32 val)
+{
+ /*
+ * As per PCIE specification supporting document
+ * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
+ * Tolerance Reporting data payload is encoded in a
+ * 3 bit scale and 10 bit value fields. Values are
+ * multiplied by the indicated scale to yield an absolute time
+ * value, expressible in a range from 1 nanosecond to
+ * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
+ *
+ * scale encoding is as follows:
+ *
+ * ----------------------------------------------
+ * |scale factor | Multiplier (ns) |
+ * ----------------------------------------------
+ * | 0 | 1 |
+ * | 1 | 32 |
+ * | 2 | 1024 |
+ * | 3 | 32768 |
+ * | 4 | 1048576 |
+ * | 5 | 33554432 |
+ * | 6 | Invalid |
+ * | 7 | Invalid |
+ * ----------------------------------------------
+ */
+ if (val > 5) {
+ pr_warn("Invalid LTR scale factor.\n");
+ return 0;
+ }
+
+ return 1U << (5 * val);
+}
+
+static int pmc_core_ltr_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
+ u32 ltr_raw_data, scale, val;
+ u16 snoop_ltr, nonsnoop_ltr;
+ int index;
+
+ for (index = 0; map[index].name ; index++) {
+ decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
+ ltr_raw_data = pmc_core_reg_read(pmcdev,
+ map[index].bit_mask);
+ snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
+ nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
+
+ if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
+ decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
+ scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
+ val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
+ decoded_snoop_ltr = val * convert_ltr_scale(scale);
+ }
+
+ seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
+ map[index].name, ltr_raw_data,
+ decoded_non_snoop_ltr,
+ decoded_snoop_ltr);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
+
static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
{
debugfs_remove_recursive(pmcdev->dbgfs_dir);
@@ -602,19 +696,21 @@ static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
&pmc_core_dev_state);
debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
- &pmc_core_ppfear_ops);
+ &pmc_core_ppfear_fops);
debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
&pmc_core_ltr_ignore_ops);
+ debugfs_create_file("ltr_show", 0644, dir, pmcdev, &pmc_core_ltr_fops);
+
if (pmcdev->map->pll_sts)
debugfs_create_file("pll_status", 0444, dir, pmcdev,
- &pmc_core_pll_ops);
+ &pmc_core_pll_fops);
if (pmcdev->map->mphy_sts)
debugfs_create_file("mphy_core_lanes_power_gating_status",
0444, dir, pmcdev,
- &pmc_core_mphy_pg_ops);
+ &pmc_core_mphy_pg_fops);
if (pmcdev->map->slps0_dbg_maps) {
debugfs_create_file("slp_s0_debug_status", 0444,
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
index be045348ad86..89554cba5758 100644
--- a/drivers/platform/x86/intel_pmc_core.h
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -12,6 +12,8 @@
#ifndef PMC_CORE_H
#define PMC_CORE_H
+#include <linux/bits.h>
+
#define PMC_BASE_ADDR_DEFAULT 0xFE000000
/* Sunrise Point Power Management Controller PCI Device ID */
@@ -35,7 +37,26 @@
#define SPT_PMC_READ_DISABLE_BIT 0x16
#define SPT_PMC_MSG_FULL_STS_BIT 0x18
#define NUM_RETRIES 100
-#define NUM_IP_IGN_ALLOWED 17
+#define SPT_NUM_IP_IGN_ALLOWED 17
+
+#define SPT_PMC_LTR_CUR_PLT 0x350
+#define SPT_PMC_LTR_CUR_ASLT 0x354
+#define SPT_PMC_LTR_SPA 0x360
+#define SPT_PMC_LTR_SPB 0x364
+#define SPT_PMC_LTR_SATA 0x368
+#define SPT_PMC_LTR_GBE 0x36C
+#define SPT_PMC_LTR_XHCI 0x370
+#define SPT_PMC_LTR_ME 0x378
+#define SPT_PMC_LTR_EVA 0x37C
+#define SPT_PMC_LTR_SPC 0x380
+#define SPT_PMC_LTR_AZ 0x384
+#define SPT_PMC_LTR_LPSS 0x38C
+#define SPT_PMC_LTR_CAM 0x390
+#define SPT_PMC_LTR_SPD 0x394
+#define SPT_PMC_LTR_SPE 0x398
+#define SPT_PMC_LTR_ESPI 0x39C
+#define SPT_PMC_LTR_SCC 0x3A0
+#define SPT_PMC_LTR_ISH 0x3A4
/* Sunrise Point: PGD PFET Enable Ack Status Registers */
enum ppfear_regs {
@@ -115,18 +136,46 @@ enum ppfear_regs {
#define SPT_PMC_BIT_MPHY_CMN_LANE3 BIT(3)
/* Cannonlake Power Management Controller register offsets */
-#define CNP_PMC_SLP_S0_RES_COUNTER_OFFSET 0x193C
-#define CNP_PMC_LTR_IGNORE_OFFSET 0x1B0C
-#define CNP_PMC_PM_CFG_OFFSET 0x1818
#define CNP_PMC_SLPS0_DBG_OFFSET 0x10B4
+#define CNP_PMC_PM_CFG_OFFSET 0x1818
+#define CNP_PMC_SLP_S0_RES_COUNTER_OFFSET 0x193C
+#define CNP_PMC_LTR_IGNORE_OFFSET 0x1B0C
/* Cannonlake: PGD PFET Enable Ack Status Register(s) start */
-#define CNP_PMC_HOST_PPFEAR0A 0x1D90
+#define CNP_PMC_HOST_PPFEAR0A 0x1D90
-#define CNP_PMC_MMIO_REG_LEN 0x2000
-#define CNP_PPFEAR_NUM_ENTRIES 8
-#define CNP_PMC_READ_DISABLE_BIT 22
#define CNP_PMC_LATCH_SLPS0_EVENTS BIT(31)
+#define CNP_PMC_MMIO_REG_LEN 0x2000
+#define CNP_PPFEAR_NUM_ENTRIES 8
+#define CNP_PMC_READ_DISABLE_BIT 22
+#define CNP_NUM_IP_IGN_ALLOWED 19
+#define CNP_PMC_LTR_CUR_PLT 0x1B50
+#define CNP_PMC_LTR_CUR_ASLT 0x1B54
+#define CNP_PMC_LTR_SPA 0x1B60
+#define CNP_PMC_LTR_SPB 0x1B64
+#define CNP_PMC_LTR_SATA 0x1B68
+#define CNP_PMC_LTR_GBE 0x1B6C
+#define CNP_PMC_LTR_XHCI 0x1B70
+#define CNP_PMC_LTR_ME 0x1B78
+#define CNP_PMC_LTR_EVA 0x1B7C
+#define CNP_PMC_LTR_SPC 0x1B80
+#define CNP_PMC_LTR_AZ 0x1B84
+#define CNP_PMC_LTR_LPSS 0x1B8C
+#define CNP_PMC_LTR_CAM 0x1B90
+#define CNP_PMC_LTR_SPD 0x1B94
+#define CNP_PMC_LTR_SPE 0x1B98
+#define CNP_PMC_LTR_ESPI 0x1B9C
+#define CNP_PMC_LTR_SCC 0x1BA0
+#define CNP_PMC_LTR_ISH 0x1BA4
+#define CNP_PMC_LTR_CNV 0x1BF0
+#define CNP_PMC_LTR_EMMC 0x1BF4
+#define CNP_PMC_LTR_UFSX2 0x1BF8
+
+#define LTR_DECODED_VAL GENMASK(9, 0)
+#define LTR_DECODED_SCALE GENMASK(12, 10)
+#define LTR_REQ_SNOOP BIT(15)
+#define LTR_REQ_NONSNOOP BIT(31)
+
struct pmc_bit_map {
const char *name;
u32 bit_mask;
@@ -139,6 +188,7 @@ struct pmc_bit_map {
* @mphy_sts: Maps name of MPHY lane to MPHY status lane status bit
* @pll_sts: Maps name of PLL to corresponding bit status
* @slps0_dbg_maps: Array of SLP_S0_DBG* registers containing debug info
+ * @ltr_show_sts: Maps PCH IP Names to their MMIO register offsets
* @slp_s0_offset: PWRMBASE offset to read SLP_S0 residency
* @ltr_ignore_offset: PWRMBASE offset to read/write LTR ignore bit
* @regmap_length: Length of memory to map from PWRMBASE address to access
@@ -157,6 +207,7 @@ struct pmc_reg_map {
const struct pmc_bit_map *mphy_sts;
const struct pmc_bit_map *pll_sts;
const struct pmc_bit_map **slps0_dbg_maps;
+ const struct pmc_bit_map *ltr_show_sts;
const u32 slp_s0_offset;
const u32 ltr_ignore_offset;
const int regmap_length;
@@ -165,6 +216,7 @@ struct pmc_reg_map {
const u32 pm_cfg_offset;
const int pm_read_disable_bit;
const u32 slps0_dbg_offset;
+ const u32 ltr_ignore_max;
};
/**
diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c
index 40bce560eb30..98ba9185a27b 100644
--- a/drivers/platform/x86/intel_telemetry_debugfs.c
+++ b/drivers/platform/x86/intel_telemetry_debugfs.c
@@ -466,17 +466,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused)
return 0;
}
-static int telem_pss_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, telem_pss_states_show, inode->i_private);
-}
-
-static const struct file_operations telem_pss_ops = {
- .open = telem_pss_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(telem_pss_states);
static int telem_ioss_states_show(struct seq_file *s, void *unused)
{
@@ -505,17 +495,7 @@ static int telem_ioss_states_show(struct seq_file *s, void *unused)
return 0;
}
-static int telem_ioss_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, telem_ioss_states_show, inode->i_private);
-}
-
-static const struct file_operations telem_ioss_ops = {
- .open = telem_ioss_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(telem_ioss_states);
static int telem_soc_states_show(struct seq_file *s, void *unused)
{
@@ -664,17 +644,7 @@ static int telem_soc_states_show(struct seq_file *s, void *unused)
return 0;
}
-static int telem_soc_state_open(struct inode *inode, struct file *file)
-{
- return single_open(file, telem_soc_states_show, inode->i_private);
-}
-
-static const struct file_operations telem_socstate_ops = {
- .open = telem_soc_state_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(telem_soc_states);
static int telem_s0ix_res_get(void *data, u64 *val)
{
@@ -960,7 +930,7 @@ static int __init telemetry_debugfs_init(void)
f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir, NULL,
- &telem_pss_ops);
+ &telem_pss_states_fops);
if (!f) {
pr_err("pss_sample_info debugfs register failed\n");
goto out;
@@ -968,7 +938,7 @@ static int __init telemetry_debugfs_init(void)
f = debugfs_create_file("ioss_info", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir, NULL,
- &telem_ioss_ops);
+ &telem_ioss_states_fops);
if (!f) {
pr_err("ioss_sample_info debugfs register failed\n");
goto out;
@@ -976,7 +946,7 @@ static int __init telemetry_debugfs_init(void)
f = debugfs_create_file("soc_states", S_IFREG | S_IRUGO,
debugfs_conf->telemetry_dbg_dir,
- NULL, &telem_socstate_ops);
+ NULL, &telem_soc_states_fops);
if (!f) {
pr_err("ioss_sample_info debugfs register failed\n");
goto out;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index c2c3a1a19879..df3fcd36776a 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/*
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com>
+ * Mellanox platform driver
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+ * Copyright (C) 2016-2018 Mellanox Technologies
+ * Copyright (C) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
*/
#include <linux/device.h>
@@ -49,7 +24,10 @@
#define MLXPLAT_CPLD_LPC_REG_BASE_ADRR 0x2500
#define MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET 0x00
#define MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET 0x01
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
+#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
+#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f
#define MLXPLAT_CPLD_LPC_REG_LED1_OFFSET 0x20
#define MLXPLAT_CPLD_LPC_REG_LED2_OFFSET 0x21
#define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22
@@ -83,12 +61,12 @@
#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
-#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xea
-#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xeb
-#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xec
-#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xed
-#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xee
-#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
+#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
+#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
+#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
+#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
@@ -1101,6 +1079,118 @@ static struct mlxreg_core_platform_data mlxplat_msn21xx_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_msn21xx_regs_io_data),
};
+/* Platform register access for next generation systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_comex",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_asic_thermal",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_voltmon_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
+ .data = mlxplat_mlxcpld_default_ng_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_regs_io_data),
+};
+
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
@@ -1208,7 +1298,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
switch (reg) {
case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
@@ -1258,7 +1351,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
@@ -1421,7 +1517,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug = &mlxplat_mlxcpld_msn201x_data;
mlxplat_hotplug->deferred_nr =
mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
- mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
return 1;
@@ -1439,7 +1535,8 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_hotplug = &mlxplat_mlxcpld_default_ng_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
- mlxplat_led = &mlxplat_msn21xx_led_data;
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
mlxplat_fan = &mlxplat_default_fan_data;
return 1;
@@ -1499,21 +1596,21 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
- DMI_MATCH(DMI_PRODUCT_NAME, "QMB7"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MQM87"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SN37"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN37"),
},
},
{
.callback = mlxplat_dmi_qmb7xx_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
- DMI_MATCH(DMI_PRODUCT_NAME, "SN34"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MSN34"),
},
},
{
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index fde08a997557..726341f2b638 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -81,7 +81,6 @@
#include <linux/acpi.h>
#include <linux/pci_ids.h>
#include <linux/power_supply.h>
-#include <linux/thinkpad_acpi.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/initval.h>
@@ -478,6 +477,12 @@ do { \
.ec = TPACPI_MATCH_ANY, \
.quirks = (__quirk) }
+#define TPACPI_QEC_IBM(__id1, __id2, __quirk) \
+ { .vendor = PCI_VENDOR_ID_IBM, \
+ .bios = TPACPI_MATCH_ANY, \
+ .ec = TPID(__id1, __id2), \
+ .quirks = (__quirk) }
+
#define TPACPI_QEC_LNV(__id1, __id2, __quirk) \
{ .vendor = PCI_VENDOR_ID_LENOVO, \
.bios = TPACPI_MATCH_ANY, \
@@ -3457,7 +3462,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN, KEY_UNKNOWN,
KEY_UNKNOWN,
- KEY_FAVORITES, /* Favorite app, 0x311 */
+ KEY_BOOKMARKS, /* Favorite app, 0x311 */
KEY_RESERVED, /* Clipping tool */
KEY_CALC, /* Calculator (above numpad, P52) */
KEY_BLUETOOTH, /* Bluetooth */
@@ -5973,9 +5978,6 @@ static const struct tpacpi_quirk led_useful_qtable[] __initconst = {
},
};
-#undef TPACPI_LEDQ_IBM
-#undef TPACPI_LEDQ_LNV
-
static enum led_access_mode __init led_init_detect_mode(void)
{
acpi_status status;
@@ -8710,40 +8712,18 @@ static const struct attribute_group fan_attr_group = {
.attrs = fan_attributes,
};
-#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
+#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
-#define TPACPI_FAN_QI(__id1, __id2, __quirks) \
- { .vendor = PCI_VENDOR_ID_IBM, \
- .bios = TPACPI_MATCH_ANY, \
- .ec = TPID(__id1, __id2), \
- .quirks = __quirks }
-
-#define TPACPI_FAN_QL(__id1, __id2, __quirks) \
- { .vendor = PCI_VENDOR_ID_LENOVO, \
- .bios = TPACPI_MATCH_ANY, \
- .ec = TPID(__id1, __id2), \
- .quirks = __quirks }
-
-#define TPACPI_FAN_QB(__id1, __id2, __quirks) \
- { .vendor = PCI_VENDOR_ID_LENOVO, \
- .bios = TPID(__id1, __id2), \
- .ec = TPACPI_MATCH_ANY, \
- .quirks = __quirks }
-
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
- TPACPI_FAN_QI('1', 'Y', TPACPI_FAN_Q1),
- TPACPI_FAN_QI('7', '8', TPACPI_FAN_Q1),
- TPACPI_FAN_QI('7', '6', TPACPI_FAN_Q1),
- TPACPI_FAN_QI('7', '0', TPACPI_FAN_Q1),
- TPACPI_FAN_QL('7', 'M', TPACPI_FAN_2FAN),
- TPACPI_FAN_QB('N', '1', TPACPI_FAN_2FAN),
+ TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
+ TPACPI_QEC_IBM('7', '8', TPACPI_FAN_Q1),
+ TPACPI_QEC_IBM('7', '6', TPACPI_FAN_Q1),
+ TPACPI_QEC_IBM('7', '0', TPACPI_FAN_Q1),
+ TPACPI_QEC_LNV('7', 'M', TPACPI_FAN_2FAN),
+ TPACPI_Q_LNV('N', '1', TPACPI_FAN_2FAN),
};
-#undef TPACPI_FAN_QL
-#undef TPACPI_FAN_QI
-#undef TPACPI_FAN_QB
-
static int __init fan_init(struct ibm_init_struct *iibm)
{
int rc;
@@ -9150,6 +9130,7 @@ static struct ibm_struct fan_driver_data = {
* Mute LED subdriver
*/
+#define TPACPI_LED_MAX 2
struct tp_led_table {
acpi_string name;
@@ -9158,13 +9139,13 @@ struct tp_led_table {
int state;
};
-static struct tp_led_table led_tables[] = {
- [TPACPI_LED_MUTE] = {
+static struct tp_led_table led_tables[TPACPI_LED_MAX] = {
+ [LED_AUDIO_MUTE] = {
.name = "SSMS",
.on_value = 1,
.off_value = 0,
},
- [TPACPI_LED_MICMUTE] = {
+ [LED_AUDIO_MICMUTE] = {
.name = "MMTS",
.on_value = 2,
.off_value = 0,
@@ -9189,31 +9170,64 @@ static int mute_led_on_off(struct tp_led_table *t, bool state)
return state;
}
-int tpacpi_led_set(int whichled, bool on)
+static int tpacpi_led_set(int whichled, bool on)
{
struct tp_led_table *t;
- if (whichled < 0 || whichled >= TPACPI_LED_MAX)
- return -EINVAL;
-
t = &led_tables[whichled];
if (t->state < 0 || t->state == on)
return t->state;
return mute_led_on_off(t, on);
}
-EXPORT_SYMBOL_GPL(tpacpi_led_set);
+
+static int tpacpi_led_mute_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return tpacpi_led_set(LED_AUDIO_MUTE, brightness != LED_OFF);
+}
+
+static int tpacpi_led_micmute_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ return tpacpi_led_set(LED_AUDIO_MICMUTE, brightness != LED_OFF);
+}
+
+static struct led_classdev mute_led_cdev[TPACPI_LED_MAX] = {
+ [LED_AUDIO_MUTE] = {
+ .name = "platform::mute",
+ .max_brightness = 1,
+ .brightness_set_blocking = tpacpi_led_mute_set,
+ .default_trigger = "audio-mute",
+ },
+ [LED_AUDIO_MICMUTE] = {
+ .name = "platform::micmute",
+ .max_brightness = 1,
+ .brightness_set_blocking = tpacpi_led_micmute_set,
+ .default_trigger = "audio-micmute",
+ },
+};
static int mute_led_init(struct ibm_init_struct *iibm)
{
acpi_handle temp;
- int i;
+ int i, err;
for (i = 0; i < TPACPI_LED_MAX; i++) {
struct tp_led_table *t = &led_tables[i];
- if (ACPI_SUCCESS(acpi_get_handle(hkey_handle, t->name, &temp)))
- mute_led_on_off(t, false);
- else
+ if (ACPI_FAILURE(acpi_get_handle(hkey_handle, t->name, &temp))) {
t->state = -ENODEV;
+ continue;
+ }
+
+ mute_led_cdev[i].brightness = ledtrig_audio_get(i);
+ err = led_classdev_register(&tpacpi_pdev->dev, &mute_led_cdev[i]);
+ if (err < 0) {
+ while (i--) {
+ if (led_tables[i].state >= 0)
+ led_classdev_unregister(&mute_led_cdev[i]);
+ }
+ return err;
+ }
}
return 0;
}
@@ -9222,8 +9236,12 @@ static void mute_led_exit(void)
{
int i;
- for (i = 0; i < TPACPI_LED_MAX; i++)
- tpacpi_led_set(i, false);
+ for (i = 0; i < TPACPI_LED_MAX; i++) {
+ if (led_tables[i].state >= 0) {
+ led_classdev_unregister(&mute_led_cdev[i]);
+ tpacpi_led_set(i, false);
+ }
+ }
}
static void mute_led_resume(void)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 5f2d7ea912b5..8c5d47c0aea6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -615,6 +615,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
+ .driver_data = (void *)&trekstor_primebook_c11_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+ },
+ },
+ {
/* Onda oBook 20 Plus */
.driver_data = (void *)&onda_obook_20_plus_data,
.matches = {
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
index 1360a7fa542c..c96c01e09740 100644
--- a/drivers/power/avs/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* OMAP SmartReflex Voltage Control
*
@@ -11,10 +12,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Lesly A M <x0080970@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/module.h>
@@ -37,7 +34,6 @@
static LIST_HEAD(sr_list);
static struct omap_sr_class_data *sr_class;
-static struct omap_sr_pmic_data *sr_pmic_data;
static struct dentry *sr_dbg_dir;
static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
@@ -780,25 +776,6 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
sr_class->disable(sr, 1);
}
-/**
- * omap_sr_register_pmic() - API to register pmic specific info.
- * @pmic_data: The structure containing pmic specific data.
- *
- * This API is to be called from the PMIC specific code to register with
- * smartreflex driver pmic specific info. Currently the only info required
- * is the smartreflex init on the PMIC side.
- */
-void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data)
-{
- if (!pmic_data) {
- pr_warn("%s: Trying to register NULL PMIC data structure with smartreflex\n",
- __func__);
- return;
- }
-
- sr_pmic_data = pmic_data;
-}
-
/* PM Debug FS entries to enable and disable smartreflex. */
static int omap_sr_autocomp_show(void *data, u64 *val)
{
@@ -1010,8 +987,7 @@ static int omap_sr_remove(struct platform_device *pdev)
if (sr_info->autocomp_active)
sr_stop_vddautocomp(sr_info);
- if (sr_info->dbg_dir)
- debugfs_remove_recursive(sr_info->dbg_dir);
+ debugfs_remove_recursive(sr_info->dbg_dir);
pm_runtime_disable(&pdev->dev);
list_del(&sr_info->node);
@@ -1065,17 +1041,6 @@ static int __init sr_init(void)
{
int ret = 0;
- /*
- * sr_init is a late init. If by then a pmic specific API is not
- * registered either there is no need for anything to be done on
- * the PMIC side or somebody has forgotten to register a PMIC
- * handler. Warn for the second condition.
- */
- if (sr_pmic_data && sr_pmic_data->sr_pmic_init)
- sr_pmic_data->sr_pmic_init();
- else
- pr_warn("%s: No PMIC hook to init smartreflex\n", __func__);
-
ret = platform_driver_register(&smartreflex_driver);
if (ret) {
pr_err("%s: platform driver register failed for SR\n",
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 2012551d93e0..797fab33bb98 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -121,18 +121,20 @@ int ptp_open(struct posix_clock *pc, fmode_t fmode)
long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
{
- struct ptp_clock_caps caps;
- struct ptp_clock_request req;
- struct ptp_sys_offset *sysoff = NULL;
- struct ptp_sys_offset_precise precise_offset;
- struct ptp_pin_desc pd;
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct ptp_sys_offset_extended *extoff = NULL;
+ struct ptp_sys_offset_precise precise_offset;
+ struct system_device_crosststamp xtstamp;
struct ptp_clock_info *ops = ptp->info;
+ struct ptp_sys_offset *sysoff = NULL;
+ struct ptp_system_timestamp sts;
+ struct ptp_clock_request req;
+ struct ptp_clock_caps caps;
struct ptp_clock_time *pct;
+ unsigned int i, pin_index;
+ struct ptp_pin_desc pd;
struct timespec64 ts;
- struct system_device_crosststamp xtstamp;
int enable, err = 0;
- unsigned int i, pin_index;
switch (cmd) {
@@ -211,6 +213,36 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EFAULT;
break;
+ case PTP_SYS_OFFSET_EXTENDED:
+ if (!ptp->info->gettimex64) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+ extoff = memdup_user((void __user *)arg, sizeof(*extoff));
+ if (IS_ERR(extoff)) {
+ err = PTR_ERR(extoff);
+ extoff = NULL;
+ break;
+ }
+ if (extoff->n_samples > PTP_MAX_SAMPLES) {
+ err = -EINVAL;
+ break;
+ }
+ for (i = 0; i < extoff->n_samples; i++) {
+ err = ptp->info->gettimex64(ptp->info, &ts, &sts);
+ if (err)
+ goto out;
+ extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
+ extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
+ extoff->ts[i][1].sec = ts.tv_sec;
+ extoff->ts[i][1].nsec = ts.tv_nsec;
+ extoff->ts[i][2].sec = sts.post_ts.tv_sec;
+ extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
+ }
+ if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
+ err = -EFAULT;
+ break;
+
case PTP_SYS_OFFSET:
sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
if (IS_ERR(sysoff)) {
@@ -228,7 +260,12 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
- ptp->info->gettime64(ptp->info, &ts);
+ if (ops->gettimex64)
+ err = ops->gettimex64(ops, &ts, NULL);
+ else
+ err = ops->gettime64(ops, &ts);
+ if (err)
+ goto out;
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
@@ -281,6 +318,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
}
+out:
+ kfree(extoff);
kfree(sysoff);
return err;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 5419a89d300e..8a81eecc0ecd 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -117,7 +117,10 @@ static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp)
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
int err;
- err = ptp->info->gettime64(ptp->info, tp);
+ if (ptp->info->gettimex64)
+ err = ptp->info->gettimex64(ptp->info, tp, NULL);
+ else
+ err = ptp->info->gettime64(ptp->info, tp);
return err;
}
@@ -249,8 +252,10 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
ptp, ptp->pin_attr_groups,
"ptp%d", ptp->index);
- if (IS_ERR(ptp->dev))
+ if (IS_ERR(ptp->dev)) {
+ err = PTR_ERR(ptp->dev);
goto no_device;
+ }
/* Register a new PPS source. */
if (info->pps) {
@@ -261,6 +266,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pps.owner = info->owner;
ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
if (!ptp->pps_source) {
+ err = -EINVAL;
pr_err("failed to register pps source\n");
goto no_pps;
}
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 27e5dd47a01f..a8f47df0655a 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -88,7 +88,9 @@ config PWM_BCM_IPROC
config PWM_BCM_KONA
tristate "Kona PWM support"
- depends on ARCH_BCM_MOBILE
+ depends on ARCH_BCM_MOBILE || ARCH_BCM_CYGNUS || COMPILE_TEST
+ depends on HAVE_CLK && HAS_IOMEM
+ default ARCH_BCM_MOBILE || ARCH_BCM_CYGNUS
help
Generic PWM framework driver for Broadcom Kona PWM block.
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index db001cba937f..5652f461d994 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -1,9 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2014 Bart Tanghe <bart.tanghe@thomasmore.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2.
*/
#include <linux/clk.h>
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index 26ec24e457b1..924d39a797cf 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -1,12 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cirrus Logic CLPS711X PWM driver
- *
- * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * Author: Alexander Shiyan <shc_work@mail.ru>
*/
#include <linux/clk.h>
@@ -48,7 +43,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v)
static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v)
{
/* Duty cycle 0..15 max */
- return DIV_ROUND_CLOSEST(v * 0xf, pwm_get_period(pwm));
+ return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period);
}
static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -71,7 +66,7 @@ static int clps711x_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
struct clps711x_chip *priv = to_clps711x_chip(chip);
unsigned int duty;
- if (period_ns != pwm_get_period(pwm))
+ if (period_ns != pwm->args.period)
return -EINVAL;
duty = clps711x_get_duty(pwm, duty_ns);
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 1d5242c9cde0..55a3a363d5be 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -5,17 +5,19 @@
* Derived from pxa PWM driver by eric miao <eric.miao@marvell.com>
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/err.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/io.h>
-#include <linux/pwm.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/slab.h>
/* i.MX1 and i.MX21 share the same PWM function block: */
@@ -23,7 +25,7 @@
#define MX1_PWMS 0x04 /* PWM Sample Register */
#define MX1_PWMP 0x08 /* PWM Period Register */
-#define MX1_PWMC_EN (1 << 4)
+#define MX1_PWMC_EN BIT(4)
/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */
@@ -31,22 +33,62 @@
#define MX3_PWMSR 0x04 /* PWM Status Register */
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
-#define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4)
-#define MX3_PWMCR_STOPEN (1 << 25)
-#define MX3_PWMCR_DOZEEN (1 << 24)
-#define MX3_PWMCR_WAITEN (1 << 23)
-#define MX3_PWMCR_DBGEN (1 << 22)
-#define MX3_PWMCR_POUTC (1 << 18)
-#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
-#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
-#define MX3_PWMCR_SWR (1 << 3)
-#define MX3_PWMCR_EN (1 << 0)
-#define MX3_PWMSR_FIFOAV_4WORDS 0x4
-#define MX3_PWMSR_FIFOAV_MASK 0x7
+
+#define MX3_PWMCR_FWM GENMASK(27, 26)
+#define MX3_PWMCR_STOPEN BIT(25)
+#define MX3_PWMCR_DOZEN BIT(24)
+#define MX3_PWMCR_WAITEN BIT(23)
+#define MX3_PWMCR_DBGEN BIT(22)
+#define MX3_PWMCR_BCTR BIT(21)
+#define MX3_PWMCR_HCTR BIT(20)
+
+#define MX3_PWMCR_POUTC GENMASK(19, 18)
+#define MX3_PWMCR_POUTC_NORMAL 0
+#define MX3_PWMCR_POUTC_INVERTED 1
+#define MX3_PWMCR_POUTC_OFF 2
+
+#define MX3_PWMCR_CLKSRC GENMASK(17, 16)
+#define MX3_PWMCR_CLKSRC_OFF 0
+#define MX3_PWMCR_CLKSRC_IPG 1
+#define MX3_PWMCR_CLKSRC_IPG_HIGH 2
+#define MX3_PWMCR_CLKSRC_IPG_32K 3
+
+#define MX3_PWMCR_PRESCALER GENMASK(15, 4)
+
+#define MX3_PWMCR_SWR BIT(3)
+
+#define MX3_PWMCR_REPEAT GENMASK(2, 1)
+#define MX3_PWMCR_REPEAT_1X 0
+#define MX3_PWMCR_REPEAT_2X 1
+#define MX3_PWMCR_REPEAT_4X 2
+#define MX3_PWMCR_REPEAT_8X 3
+
+#define MX3_PWMCR_EN BIT(0)
+
+#define MX3_PWMSR_FWE BIT(6)
+#define MX3_PWMSR_CMP BIT(5)
+#define MX3_PWMSR_ROV BIT(4)
+#define MX3_PWMSR_FE BIT(3)
+
+#define MX3_PWMSR_FIFOAV GENMASK(2, 0)
+#define MX3_PWMSR_FIFOAV_EMPTY 0
+#define MX3_PWMSR_FIFOAV_1WORD 1
+#define MX3_PWMSR_FIFOAV_2WORDS 2
+#define MX3_PWMSR_FIFOAV_3WORDS 3
+#define MX3_PWMSR_FIFOAV_4WORDS 4
+
+#define MX3_PWMCR_PRESCALER_SET(x) FIELD_PREP(MX3_PWMCR_PRESCALER, (x) - 1)
+#define MX3_PWMCR_PRESCALER_GET(x) (FIELD_GET(MX3_PWMCR_PRESCALER, \
+ (x)) + 1)
#define MX3_PWM_SWR_LOOP 5
+/* PWMPR register value of 0xffff has the same effect as 0xfffe */
+#define MX3_PWMPR_MAX 0xfffe
+
struct imx_chip {
+ struct clk *clk_ipg;
+
struct clk *clk_per;
void __iomem *mmio_base;
@@ -56,6 +98,87 @@ struct imx_chip {
#define to_imx_chip(chip) container_of(chip, struct imx_chip, chip)
+static int imx_pwm_clk_prepare_enable(struct pwm_chip *chip)
+{
+ struct imx_chip *imx = to_imx_chip(chip);
+ int ret;
+
+ ret = clk_prepare_enable(imx->clk_ipg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(imx->clk_per);
+ if (ret) {
+ clk_disable_unprepare(imx->clk_ipg);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx_pwm_clk_disable_unprepare(struct pwm_chip *chip)
+{
+ struct imx_chip *imx = to_imx_chip(chip);
+
+ clk_disable_unprepare(imx->clk_per);
+ clk_disable_unprepare(imx->clk_ipg);
+}
+
+static void imx_pwm_get_state(struct pwm_chip *chip,
+ struct pwm_device *pwm, struct pwm_state *state)
+{
+ struct imx_chip *imx = to_imx_chip(chip);
+ u32 period, prescaler, pwm_clk, ret, val;
+ u64 tmp;
+
+ ret = imx_pwm_clk_prepare_enable(chip);
+ if (ret < 0)
+ return;
+
+ val = readl(imx->mmio_base + MX3_PWMCR);
+
+ if (val & MX3_PWMCR_EN) {
+ state->enabled = true;
+ ret = imx_pwm_clk_prepare_enable(chip);
+ if (ret)
+ return;
+ } else {
+ state->enabled = false;
+ }
+
+ switch (FIELD_GET(MX3_PWMCR_POUTC, val)) {
+ case MX3_PWMCR_POUTC_NORMAL:
+ state->polarity = PWM_POLARITY_NORMAL;
+ break;
+ case MX3_PWMCR_POUTC_INVERTED:
+ state->polarity = PWM_POLARITY_INVERSED;
+ break;
+ default:
+ dev_warn(chip->dev, "can't set polarity, output disconnected");
+ }
+
+ prescaler = MX3_PWMCR_PRESCALER_GET(val);
+ pwm_clk = clk_get_rate(imx->clk_per);
+ pwm_clk = DIV_ROUND_CLOSEST_ULL(pwm_clk, prescaler);
+ val = readl(imx->mmio_base + MX3_PWMPR);
+ period = val >= MX3_PWMPR_MAX ? MX3_PWMPR_MAX : val;
+
+ /* PWMOUT (Hz) = PWMCLK / (PWMPR + 2) */
+ tmp = NSEC_PER_SEC * (u64)(period + 2);
+ state->period = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
+
+ /* PWMSAR can be read only if PWM is enabled */
+ if (state->enabled) {
+ val = readl(imx->mmio_base + MX3_PWMSAR);
+ tmp = NSEC_PER_SEC * (u64)(val);
+ state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
+ } else {
+ state->duty_cycle = 0;
+ }
+
+ imx_pwm_clk_disable_unprepare(chip);
+}
+
static int imx_pwm_config_v1(struct pwm_chip *chip,
struct pwm_device *pwm, int duty_ns, int period_ns)
{
@@ -91,7 +214,7 @@ static int imx_pwm_enable_v1(struct pwm_chip *chip, struct pwm_device *pwm)
u32 val;
int ret;
- ret = clk_prepare_enable(imx->clk_per);
+ ret = imx_pwm_clk_prepare_enable(chip);
if (ret < 0)
return ret;
@@ -111,7 +234,7 @@ static void imx_pwm_disable_v1(struct pwm_chip *chip, struct pwm_device *pwm)
val &= ~MX1_PWMC_EN;
writel(val, imx->mmio_base + MX1_PWMC);
- clk_disable_unprepare(imx->clk_per);
+ imx_pwm_clk_disable_unprepare(chip);
}
static void imx_pwm_sw_reset(struct pwm_chip *chip)
@@ -142,14 +265,14 @@ static void imx_pwm_wait_fifo_slot(struct pwm_chip *chip,
u32 sr;
sr = readl(imx->mmio_base + MX3_PWMSR);
- fifoav = sr & MX3_PWMSR_FIFOAV_MASK;
+ fifoav = FIELD_GET(MX3_PWMSR_FIFOAV, sr);
if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
period_ms = DIV_ROUND_UP(pwm_get_period(pwm),
NSEC_PER_MSEC);
msleep(period_ms);
sr = readl(imx->mmio_base + MX3_PWMSR);
- if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK))
+ if (fifoav == FIELD_GET(MX3_PWMSR_FIFOAV, sr))
dev_warn(dev, "there is no free FIFO slot\n");
}
}
@@ -197,7 +320,7 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
if (cstate.enabled) {
imx_pwm_wait_fifo_slot(chip, pwm);
} else {
- ret = clk_prepare_enable(imx->clk_per);
+ ret = imx_pwm_clk_prepare_enable(chip);
if (ret)
return ret;
@@ -207,19 +330,20 @@ static int imx_pwm_apply_v2(struct pwm_chip *chip, struct pwm_device *pwm,
writel(duty_cycles, imx->mmio_base + MX3_PWMSAR);
writel(period_cycles, imx->mmio_base + MX3_PWMPR);
- cr = MX3_PWMCR_PRESCALER(prescale) |
- MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
- MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH |
- MX3_PWMCR_EN;
+ cr = MX3_PWMCR_PRESCALER_SET(prescale) |
+ MX3_PWMCR_STOPEN | MX3_PWMCR_DOZEN | MX3_PWMCR_WAITEN |
+ FIELD_PREP(MX3_PWMCR_CLKSRC, MX3_PWMCR_CLKSRC_IPG_HIGH) |
+ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (state->polarity == PWM_POLARITY_INVERSED)
- cr |= MX3_PWMCR_POUTC;
+ cr |= FIELD_PREP(MX3_PWMCR_POUTC,
+ MX3_PWMCR_POUTC_INVERTED);
writel(cr, imx->mmio_base + MX3_PWMCR);
} else if (cstate.enabled) {
writel(0, imx->mmio_base + MX3_PWMCR);
- clk_disable_unprepare(imx->clk_per);
+ imx_pwm_clk_disable_unprepare(chip);
}
return 0;
@@ -234,6 +358,7 @@ static const struct pwm_ops imx_pwm_ops_v1 = {
static const struct pwm_ops imx_pwm_ops_v2 = {
.apply = imx_pwm_apply_v2,
+ .get_state = imx_pwm_get_state,
.owner = THIS_MODULE,
};
@@ -276,6 +401,13 @@ static int imx_pwm_probe(struct platform_device *pdev)
if (imx == NULL)
return -ENOMEM;
+ imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(imx->clk_ipg)) {
+ dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
+ PTR_ERR(imx->clk_ipg));
+ return PTR_ERR(imx->clk_ipg);
+ }
+
imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(imx->clk_per)) {
dev_err(&pdev->dev, "getting per clock failed with %ld\n",
@@ -315,6 +447,8 @@ static int imx_pwm_remove(struct platform_device *pdev)
if (imx == NULL)
return -ENODEV;
+ imx_pwm_clk_disable_unprepare(&imx->chip);
+
return pwmchip_remove(&imx->chip);
}
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index d7f5f7de030d..475918d9f543 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -296,7 +296,6 @@ static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
set_bit(event, &lpc18xx_pwm->event_map);
lpc18xx_data->duty_event = event;
- lpc18xx_pwm_config_duty(chip, pwm, pwm_get_duty_cycle(pwm));
return 0;
}
@@ -306,8 +305,6 @@ static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
- pwm_disable(pwm);
- pwm_set_duty_cycle(pwm, 0);
clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
}
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index fd86446e499b..28f55248eb90 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -328,7 +328,7 @@ static int pm8607_regulator_dt_init(struct platform_device *pdev,
return -ENODEV;
}
for_each_child_of_node(nproot, np) {
- if (!of_node_cmp(np->name, info->desc.name)) {
+ if (of_node_name_eq(np, info->desc.name)) {
config->init_data =
of_get_regulator_init_data(&pdev->dev, np,
&info->desc);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 926cee0d0b5f..ee60a222f5eb 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -567,6 +567,16 @@ config REGULATOR_MC13892
Say y here to support the regulators found on the Freescale MC13892
PMIC.
+config REGULATOR_MCP16502
+ tristate "Microchip MCP16502 PMIC"
+ depends on I2C && OF
+ select REGMAP_I2C
+ help
+ Say y here to support the MCP16502 PMIC. This driver supports
+ basic operations (get/set voltage, get/set operating mode)
+ through the regulator interface. In addition it enables
+ suspend-to-ram/standby transition.
+
config REGULATOR_MT6311
tristate "MediaTek MT6311 PMIC"
depends on I2C
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 72488ef11b8a..b12e1c9b2118 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -74,6 +74,7 @@ obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
+obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o
obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o
obj-$(CONFIG_REGULATOR_MT6380) += mt6380-regulator.o
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 43fda8b4455a..603db77723b6 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -15,31 +15,41 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
/**
* ACT8945A Global Register Map.
*/
#define ACT8945A_SYS_MODE 0x00
#define ACT8945A_SYS_CTRL 0x01
+#define ACT8945A_SYS_UNLK_REGS 0x0b
#define ACT8945A_DCDC1_VSET1 0x20
#define ACT8945A_DCDC1_VSET2 0x21
#define ACT8945A_DCDC1_CTRL 0x22
+#define ACT8945A_DCDC1_SUS 0x24
#define ACT8945A_DCDC2_VSET1 0x30
#define ACT8945A_DCDC2_VSET2 0x31
#define ACT8945A_DCDC2_CTRL 0x32
+#define ACT8945A_DCDC2_SUS 0x34
#define ACT8945A_DCDC3_VSET1 0x40
#define ACT8945A_DCDC3_VSET2 0x41
#define ACT8945A_DCDC3_CTRL 0x42
+#define ACT8945A_DCDC3_SUS 0x44
#define ACT8945A_LDO1_VSET 0x50
#define ACT8945A_LDO1_CTRL 0x51
+#define ACT8945A_LDO1_SUS 0x52
#define ACT8945A_LDO2_VSET 0x54
#define ACT8945A_LDO2_CTRL 0x55
+#define ACT8945A_LDO2_SUS 0x56
#define ACT8945A_LDO3_VSET 0x60
#define ACT8945A_LDO3_CTRL 0x61
+#define ACT8945A_LDO3_SUS 0x62
#define ACT8945A_LDO4_VSET 0x64
#define ACT8945A_LDO4_CTRL 0x65
+#define ACT8945A_LDO4_SUS 0x66
/**
* Field Definitions.
@@ -60,7 +70,12 @@ enum {
ACT8945A_ID_LDO2,
ACT8945A_ID_LDO3,
ACT8945A_ID_LDO4,
- ACT8945A_REG_NUM,
+ ACT8945A_ID_MAX,
+};
+
+struct act8945a_pmic {
+ struct regmap *regmap;
+ u32 op_mode[ACT8945A_ID_MAX];
};
static const struct regulator_linear_range act8945a_voltage_ranges[] = {
@@ -69,6 +84,143 @@ static const struct regulator_linear_range act8945a_voltage_ranges[] = {
REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
};
+static int act8945a_set_suspend_state(struct regulator_dev *rdev, bool enable)
+{
+ struct regmap *regmap = rdev->regmap;
+ int id = rdev->desc->id, reg, val;
+
+ switch (id) {
+ case ACT8945A_ID_DCDC1:
+ reg = ACT8945A_DCDC1_SUS;
+ val = 0xa8;
+ break;
+ case ACT8945A_ID_DCDC2:
+ reg = ACT8945A_DCDC2_SUS;
+ val = 0xa8;
+ break;
+ case ACT8945A_ID_DCDC3:
+ reg = ACT8945A_DCDC3_SUS;
+ val = 0xa8;
+ break;
+ case ACT8945A_ID_LDO1:
+ reg = ACT8945A_LDO1_SUS;
+ val = 0xe8;
+ break;
+ case ACT8945A_ID_LDO2:
+ reg = ACT8945A_LDO2_SUS;
+ val = 0xe8;
+ break;
+ case ACT8945A_ID_LDO3:
+ reg = ACT8945A_LDO3_SUS;
+ val = 0xe8;
+ break;
+ case ACT8945A_ID_LDO4:
+ reg = ACT8945A_LDO4_SUS;
+ val = 0xe8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (enable)
+ val |= BIT(4);
+
+ /*
+ * Ask the PMIC to enable/disable this output when entering hibernate
+ * mode.
+ */
+ return regmap_write(regmap, reg, val);
+}
+
+static int act8945a_set_suspend_enable(struct regulator_dev *rdev)
+{
+ return act8945a_set_suspend_state(rdev, true);
+}
+
+static int act8945a_set_suspend_disable(struct regulator_dev *rdev)
+{
+ return act8945a_set_suspend_state(rdev, false);
+}
+
+static unsigned int act8945a_of_map_mode(unsigned int mode)
+{
+ switch (mode) {
+ case ACT8945A_REGULATOR_MODE_FIXED:
+ case ACT8945A_REGULATOR_MODE_NORMAL:
+ return REGULATOR_MODE_NORMAL;
+ case ACT8945A_REGULATOR_MODE_LOWPOWER:
+ return REGULATOR_MODE_STANDBY;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
+ struct regmap *regmap = rdev->regmap;
+ int id = rdev->desc->id;
+ int reg, ret, val = 0;
+
+ switch (id) {
+ case ACT8945A_ID_DCDC1:
+ reg = ACT8945A_DCDC1_CTRL;
+ break;
+ case ACT8945A_ID_DCDC2:
+ reg = ACT8945A_DCDC2_CTRL;
+ break;
+ case ACT8945A_ID_DCDC3:
+ reg = ACT8945A_DCDC3_CTRL;
+ break;
+ case ACT8945A_ID_LDO1:
+ reg = ACT8945A_LDO1_SUS;
+ break;
+ case ACT8945A_ID_LDO2:
+ reg = ACT8945A_LDO2_SUS;
+ break;
+ case ACT8945A_ID_LDO3:
+ reg = ACT8945A_LDO3_SUS;
+ break;
+ case ACT8945A_ID_LDO4:
+ reg = ACT8945A_LDO4_SUS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case REGULATOR_MODE_STANDBY:
+ if (rdev->desc->id > ACT8945A_ID_DCDC3)
+ val = BIT(5);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ if (rdev->desc->id <= ACT8945A_ID_DCDC3)
+ val = BIT(5);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(regmap, reg, BIT(5), val);
+ if (ret)
+ return ret;
+
+ act8945a->op_mode[id] = mode;
+
+ return 0;
+}
+
+static unsigned int act8945a_get_mode(struct regulator_dev *rdev)
+{
+ struct act8945a_pmic *act8945a = rdev_get_drvdata(rdev);
+ int id = rdev->desc->id;
+
+ if (id < ACT8945A_ID_DCDC1 || id >= ACT8945A_ID_MAX)
+ return -EINVAL;
+
+ return act8945a->op_mode[id];
+}
+
static const struct regulator_ops act8945a_ops = {
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
@@ -76,7 +228,11 @@ static const struct regulator_ops act8945a_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
+ .set_mode = act8945a_set_mode,
+ .get_mode = act8945a_get_mode,
.is_enabled = regulator_is_enabled_regmap,
+ .set_suspend_enable = act8945a_set_suspend_enable,
+ .set_suspend_disable = act8945a_set_suspend_disable,
};
#define ACT89xx_REG(_name, _family, _id, _vsel_reg, _supply) \
@@ -84,6 +240,7 @@ static const struct regulator_ops act8945a_ops = {
.name = _name, \
.supply_name = _supply, \
.of_match = of_match_ptr("REG_"#_id), \
+ .of_map_mode = act8945a_of_map_mode, \
.regulators_node = of_match_ptr("regulators"), \
.id = _family##_ID_##_id, \
.type = REGULATOR_VOLTAGE, \
@@ -122,10 +279,22 @@ static int act8945a_pmic_probe(struct platform_device *pdev)
{
struct regulator_config config = { };
const struct regulator_desc *regulators;
+ struct act8945a_pmic *act8945a;
struct regulator_dev *rdev;
int i, num_regulators;
bool voltage_select;
+ act8945a = devm_kzalloc(&pdev->dev, sizeof(*act8945a), GFP_KERNEL);
+ if (!act8945a)
+ return -ENOMEM;
+
+ act8945a->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!act8945a->regmap) {
+ dev_err(&pdev->dev,
+ "could not retrieve regmap from parent device\n");
+ return -EINVAL;
+ }
+
voltage_select = of_property_read_bool(pdev->dev.parent->of_node,
"active-semi,vsel-high");
@@ -139,8 +308,10 @@ static int act8945a_pmic_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.dev->of_node = pdev->dev.parent->of_node;
+ config.driver_data = act8945a;
for (i = 0; i < num_regulators; i++) {
- rdev = devm_regulator_register(&pdev->dev, &regulators[i], &config);
+ rdev = devm_regulator_register(&pdev->dev, &regulators[i],
+ &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev,
"failed to register %s regulator\n",
@@ -149,14 +320,42 @@ static int act8945a_pmic_probe(struct platform_device *pdev)
}
}
- return 0;
+ platform_set_drvdata(pdev, act8945a);
+
+ /* Unlock expert registers. */
+ return regmap_write(act8945a->regmap, ACT8945A_SYS_UNLK_REGS, 0xef);
+}
+
+static int __maybe_unused act8945a_suspend(struct device *pdev)
+{
+ struct act8945a_pmic *act8945a = dev_get_drvdata(pdev);
+
+ /*
+ * Ask the PMIC to enter the suspend mode on the next PWRHLD
+ * transition.
+ */
+ return regmap_write(act8945a->regmap, ACT8945A_SYS_CTRL, 0x42);
+}
+
+static SIMPLE_DEV_PM_OPS(act8945a_pm, act8945a_suspend, NULL);
+
+static void act8945a_pmic_shutdown(struct platform_device *pdev)
+{
+ struct act8945a_pmic *act8945a = platform_get_drvdata(pdev);
+
+ /*
+ * Ask the PMIC to shutdown everything on the next PWRHLD transition.
+ */
+ regmap_write(act8945a->regmap, ACT8945A_SYS_CTRL, 0x0);
}
static struct platform_driver act8945a_pmic_driver = {
.driver = {
.name = "act8945a-regulator",
+ .pm = &act8945a_pm,
},
.probe = act8945a_pmic_probe,
+ .shutdown = act8945a_pmic_shutdown,
};
module_platform_driver(act8945a_pmic_driver);
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
index 9a72eae4926d..b9a93049e41e 100644
--- a/drivers/regulator/arizona-ldo1.c
+++ b/drivers/regulator/arizona-ldo1.c
@@ -283,9 +283,6 @@ static int arizona_ldo1_common_init(struct platform_device *pdev,
of_node_put(config.of_node);
if (IS_ERR(ldo1->regulator)) {
- if (config.ena_gpiod)
- gpiod_put(config.ena_gpiod);
-
ret = PTR_ERR(ldo1->regulator);
dev_err(&pdev->dev, "Failed to register LDO1 supply: %d\n",
ret);
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 565a71343a8e..f7fe218bb3e4 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* AS3711 PMIC regulator driver, using DCDC Step Down and LDO supplies
*
* Copyright (C) 2012 Renesas Electronics Corporation
* Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License as
- * published by the Free Software Foundation
*/
#include <linux/err.h>
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index a3734039a86a..48af859fd053 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -13,31 +13,262 @@
* GNU General Public License for more details.
*/
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
+#include <linux/mfd/axp20x.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/mfd/axp20x.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
+#define AXP20X_GPIO0_FUNC_MASK GENMASK(3, 0)
+#define AXP20X_GPIO1_FUNC_MASK GENMASK(3, 0)
+
#define AXP20X_IO_ENABLED 0x03
#define AXP20X_IO_DISABLED 0x07
+#define AXP20X_WORKMODE_DCDC2_MASK BIT_MASK(2)
+#define AXP20X_WORKMODE_DCDC3_MASK BIT_MASK(1)
+
+#define AXP20X_FREQ_DCDC_MASK GENMASK(3, 0)
+
+#define AXP20X_VBUS_IPSOUT_MGMT_MASK BIT_MASK(2)
+
+#define AXP20X_DCDC2_V_OUT_MASK GENMASK(5, 0)
+#define AXP20X_DCDC3_V_OUT_MASK GENMASK(7, 0)
+#define AXP20X_LDO24_V_OUT_MASK GENMASK(7, 4)
+#define AXP20X_LDO3_V_OUT_MASK GENMASK(6, 0)
+#define AXP20X_LDO5_V_OUT_MASK GENMASK(7, 4)
+
+#define AXP20X_PWR_OUT_EXTEN_MASK BIT_MASK(0)
+#define AXP20X_PWR_OUT_DCDC3_MASK BIT_MASK(1)
+#define AXP20X_PWR_OUT_LDO2_MASK BIT_MASK(2)
+#define AXP20X_PWR_OUT_LDO4_MASK BIT_MASK(3)
+#define AXP20X_PWR_OUT_DCDC2_MASK BIT_MASK(4)
+#define AXP20X_PWR_OUT_LDO3_MASK BIT_MASK(6)
+
+#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK BIT_MASK(0)
+#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE(x) \
+ ((x) << 0)
+#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK BIT_MASK(1)
+#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(x) \
+ ((x) << 1)
+#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK BIT_MASK(2)
+#define AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN BIT(2)
+#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK BIT_MASK(3)
+#define AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN BIT(3)
+
+#define AXP20X_LDO4_V_OUT_1250mV_START 0x0
+#define AXP20X_LDO4_V_OUT_1250mV_STEPS 0
+#define AXP20X_LDO4_V_OUT_1250mV_END \
+ (AXP20X_LDO4_V_OUT_1250mV_START + AXP20X_LDO4_V_OUT_1250mV_STEPS)
+#define AXP20X_LDO4_V_OUT_1300mV_START 0x1
+#define AXP20X_LDO4_V_OUT_1300mV_STEPS 7
+#define AXP20X_LDO4_V_OUT_1300mV_END \
+ (AXP20X_LDO4_V_OUT_1300mV_START + AXP20X_LDO4_V_OUT_1300mV_STEPS)
+#define AXP20X_LDO4_V_OUT_2500mV_START 0x9
+#define AXP20X_LDO4_V_OUT_2500mV_STEPS 0
+#define AXP20X_LDO4_V_OUT_2500mV_END \
+ (AXP20X_LDO4_V_OUT_2500mV_START + AXP20X_LDO4_V_OUT_2500mV_STEPS)
+#define AXP20X_LDO4_V_OUT_2700mV_START 0xa
+#define AXP20X_LDO4_V_OUT_2700mV_STEPS 1
+#define AXP20X_LDO4_V_OUT_2700mV_END \
+ (AXP20X_LDO4_V_OUT_2700mV_START + AXP20X_LDO4_V_OUT_2700mV_STEPS)
+#define AXP20X_LDO4_V_OUT_3000mV_START 0xc
+#define AXP20X_LDO4_V_OUT_3000mV_STEPS 3
+#define AXP20X_LDO4_V_OUT_3000mV_END \
+ (AXP20X_LDO4_V_OUT_3000mV_START + AXP20X_LDO4_V_OUT_3000mV_STEPS)
+#define AXP20X_LDO4_V_OUT_NUM_VOLTAGES 16
+
#define AXP22X_IO_ENABLED 0x03
#define AXP22X_IO_DISABLED 0x04
-#define AXP20X_WORKMODE_DCDC2_MASK BIT(2)
-#define AXP20X_WORKMODE_DCDC3_MASK BIT(1)
-#define AXP22X_WORKMODE_DCDCX_MASK(x) BIT(x)
-
-#define AXP20X_FREQ_DCDC_MASK 0x0f
+#define AXP22X_WORKMODE_DCDCX_MASK(x) BIT_MASK(x)
#define AXP22X_MISC_N_VBUSEN_FUNC BIT(4)
+#define AXP22X_DCDC1_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_DCDC2_V_OUT_MASK GENMASK(5, 0)
+#define AXP22X_DCDC3_V_OUT_MASK GENMASK(5, 0)
+#define AXP22X_DCDC4_V_OUT_MASK GENMASK(5, 0)
+#define AXP22X_DCDC5_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_DC5LDO_V_OUT_MASK GENMASK(2, 0)
+#define AXP22X_ALDO1_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_ALDO2_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_ALDO3_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_DLDO1_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_DLDO2_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_DLDO3_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_DLDO4_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_ELDO1_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_ELDO2_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_ELDO3_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_LDO_IO0_V_OUT_MASK GENMASK(4, 0)
+#define AXP22X_LDO_IO1_V_OUT_MASK GENMASK(4, 0)
+
+#define AXP22X_PWR_OUT_DC5LDO_MASK BIT_MASK(0)
+#define AXP22X_PWR_OUT_DCDC1_MASK BIT_MASK(1)
+#define AXP22X_PWR_OUT_DCDC2_MASK BIT_MASK(2)
+#define AXP22X_PWR_OUT_DCDC3_MASK BIT_MASK(3)
+#define AXP22X_PWR_OUT_DCDC4_MASK BIT_MASK(4)
+#define AXP22X_PWR_OUT_DCDC5_MASK BIT_MASK(5)
+#define AXP22X_PWR_OUT_ALDO1_MASK BIT_MASK(6)
+#define AXP22X_PWR_OUT_ALDO2_MASK BIT_MASK(7)
+
+#define AXP22X_PWR_OUT_SW_MASK BIT_MASK(6)
+#define AXP22X_PWR_OUT_DC1SW_MASK BIT_MASK(7)
+
+#define AXP22X_PWR_OUT_ELDO1_MASK BIT_MASK(0)
+#define AXP22X_PWR_OUT_ELDO2_MASK BIT_MASK(1)
+#define AXP22X_PWR_OUT_ELDO3_MASK BIT_MASK(2)
+#define AXP22X_PWR_OUT_DLDO1_MASK BIT_MASK(3)
+#define AXP22X_PWR_OUT_DLDO2_MASK BIT_MASK(4)
+#define AXP22X_PWR_OUT_DLDO3_MASK BIT_MASK(5)
+#define AXP22X_PWR_OUT_DLDO4_MASK BIT_MASK(6)
+#define AXP22X_PWR_OUT_ALDO3_MASK BIT_MASK(7)
+
+#define AXP803_PWR_OUT_DCDC1_MASK BIT_MASK(0)
+#define AXP803_PWR_OUT_DCDC2_MASK BIT_MASK(1)
+#define AXP803_PWR_OUT_DCDC3_MASK BIT_MASK(2)
+#define AXP803_PWR_OUT_DCDC4_MASK BIT_MASK(3)
+#define AXP803_PWR_OUT_DCDC5_MASK BIT_MASK(4)
+#define AXP803_PWR_OUT_DCDC6_MASK BIT_MASK(5)
+
+#define AXP803_PWR_OUT_FLDO1_MASK BIT_MASK(2)
+#define AXP803_PWR_OUT_FLDO2_MASK BIT_MASK(3)
+
+#define AXP803_DCDC1_V_OUT_MASK GENMASK(4, 0)
+#define AXP803_DCDC2_V_OUT_MASK GENMASK(6, 0)
+#define AXP803_DCDC3_V_OUT_MASK GENMASK(6, 0)
+#define AXP803_DCDC4_V_OUT_MASK GENMASK(6, 0)
+#define AXP803_DCDC5_V_OUT_MASK GENMASK(6, 0)
+#define AXP803_DCDC6_V_OUT_MASK GENMASK(6, 0)
+
+#define AXP803_FLDO1_V_OUT_MASK GENMASK(3, 0)
+#define AXP803_FLDO2_V_OUT_MASK GENMASK(3, 0)
+
+#define AXP803_DCDC23_POLYPHASE_DUAL BIT(6)
+#define AXP803_DCDC56_POLYPHASE_DUAL BIT(5)
+
+#define AXP803_DCDC234_500mV_START 0x00
+#define AXP803_DCDC234_500mV_STEPS 70
+#define AXP803_DCDC234_500mV_END \
+ (AXP803_DCDC234_500mV_START + AXP803_DCDC234_500mV_STEPS)
+#define AXP803_DCDC234_1220mV_START 0x47
+#define AXP803_DCDC234_1220mV_STEPS 4
+#define AXP803_DCDC234_1220mV_END \
+ (AXP803_DCDC234_1220mV_START + AXP803_DCDC234_1220mV_STEPS)
+#define AXP803_DCDC234_NUM_VOLTAGES 76
+
+#define AXP803_DCDC5_800mV_START 0x00
+#define AXP803_DCDC5_800mV_STEPS 32
+#define AXP803_DCDC5_800mV_END \
+ (AXP803_DCDC5_800mV_START + AXP803_DCDC5_800mV_STEPS)
+#define AXP803_DCDC5_1140mV_START 0x21
+#define AXP803_DCDC5_1140mV_STEPS 35
+#define AXP803_DCDC5_1140mV_END \
+ (AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
+#define AXP803_DCDC5_NUM_VOLTAGES 68
+
+#define AXP803_DCDC6_600mV_START 0x00
+#define AXP803_DCDC6_600mV_STEPS 50
+#define AXP803_DCDC6_600mV_END \
+ (AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
+#define AXP803_DCDC6_1120mV_START 0x33
+#define AXP803_DCDC6_1120mV_STEPS 14
+#define AXP803_DCDC6_1120mV_END \
+ (AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
+#define AXP803_DCDC6_NUM_VOLTAGES 72
+
+#define AXP803_DLDO2_700mV_START 0x00
+#define AXP803_DLDO2_700mV_STEPS 26
+#define AXP803_DLDO2_700mV_END \
+ (AXP803_DLDO2_700mV_START + AXP803_DLDO2_700mV_STEPS)
+#define AXP803_DLDO2_3400mV_START 0x1b
+#define AXP803_DLDO2_3400mV_STEPS 4
+#define AXP803_DLDO2_3400mV_END \
+ (AXP803_DLDO2_3400mV_START + AXP803_DLDO2_3400mV_STEPS)
+#define AXP803_DLDO2_NUM_VOLTAGES 32
+
+#define AXP806_DCDCA_V_CTRL_MASK GENMASK(6, 0)
+#define AXP806_DCDCB_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_DCDCC_V_CTRL_MASK GENMASK(6, 0)
+#define AXP806_DCDCD_V_CTRL_MASK GENMASK(5, 0)
+#define AXP806_DCDCE_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_ALDO1_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_ALDO2_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_ALDO3_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_BLDO1_V_CTRL_MASK GENMASK(3, 0)
+#define AXP806_BLDO2_V_CTRL_MASK GENMASK(3, 0)
+#define AXP806_BLDO3_V_CTRL_MASK GENMASK(3, 0)
+#define AXP806_BLDO4_V_CTRL_MASK GENMASK(3, 0)
+#define AXP806_CLDO1_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_CLDO2_V_CTRL_MASK GENMASK(4, 0)
+#define AXP806_CLDO3_V_CTRL_MASK GENMASK(4, 0)
+
+#define AXP806_PWR_OUT_DCDCA_MASK BIT_MASK(0)
+#define AXP806_PWR_OUT_DCDCB_MASK BIT_MASK(1)
+#define AXP806_PWR_OUT_DCDCC_MASK BIT_MASK(2)
+#define AXP806_PWR_OUT_DCDCD_MASK BIT_MASK(3)
+#define AXP806_PWR_OUT_DCDCE_MASK BIT_MASK(4)
+#define AXP806_PWR_OUT_ALDO1_MASK BIT_MASK(5)
+#define AXP806_PWR_OUT_ALDO2_MASK BIT_MASK(6)
+#define AXP806_PWR_OUT_ALDO3_MASK BIT_MASK(7)
+#define AXP806_PWR_OUT_BLDO1_MASK BIT_MASK(0)
+#define AXP806_PWR_OUT_BLDO2_MASK BIT_MASK(1)
+#define AXP806_PWR_OUT_BLDO3_MASK BIT_MASK(2)
+#define AXP806_PWR_OUT_BLDO4_MASK BIT_MASK(3)
+#define AXP806_PWR_OUT_CLDO1_MASK BIT_MASK(4)
+#define AXP806_PWR_OUT_CLDO2_MASK BIT_MASK(5)
+#define AXP806_PWR_OUT_CLDO3_MASK BIT_MASK(6)
+#define AXP806_PWR_OUT_SW_MASK BIT_MASK(7)
+
+#define AXP806_DCDCAB_POLYPHASE_DUAL 0x40
+#define AXP806_DCDCABC_POLYPHASE_TRI 0x80
+#define AXP806_DCDCABC_POLYPHASE_MASK GENMASK(7, 6)
+
+#define AXP806_DCDCDE_POLYPHASE_DUAL BIT(5)
+
+#define AXP806_DCDCA_600mV_START 0x00
+#define AXP806_DCDCA_600mV_STEPS 50
+#define AXP806_DCDCA_600mV_END \
+ (AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
+#define AXP806_DCDCA_1120mV_START 0x33
+#define AXP806_DCDCA_1120mV_STEPS 14
+#define AXP806_DCDCA_1120mV_END \
+ (AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
+#define AXP806_DCDCA_NUM_VOLTAGES 72
+
+#define AXP806_DCDCD_600mV_START 0x00
+#define AXP806_DCDCD_600mV_STEPS 45
+#define AXP806_DCDCD_600mV_END \
+ (AXP806_DCDCD_600mV_START + AXP806_DCDCD_600mV_STEPS)
+#define AXP806_DCDCD_1600mV_START 0x2e
+#define AXP806_DCDCD_1600mV_STEPS 17
+#define AXP806_DCDCD_1600mV_END \
+ (AXP806_DCDCD_1600mV_START + AXP806_DCDCD_1600mV_STEPS)
+#define AXP806_DCDCD_NUM_VOLTAGES 64
+
+#define AXP809_DCDC4_600mV_START 0x00
+#define AXP809_DCDC4_600mV_STEPS 47
+#define AXP809_DCDC4_600mV_END \
+ (AXP809_DCDC4_600mV_START + AXP809_DCDC4_600mV_STEPS)
+#define AXP809_DCDC4_1800mV_START 0x30
+#define AXP809_DCDC4_1800mV_STEPS 8
+#define AXP809_DCDC4_1800mV_END \
+ (AXP809_DCDC4_1800mV_START + AXP809_DCDC4_1800mV_STEPS)
+#define AXP809_DCDC4_NUM_VOLTAGES 57
+
+#define AXP813_DCDC7_V_OUT_MASK GENMASK(6, 0)
+
+#define AXP813_PWR_OUT_DCDC7_MASK BIT_MASK(6)
+
#define AXP_DESC_IO(_family, _id, _match, _supply, _min, _max, _step, _vreg, \
_vmask, _ereg, _emask, _enable_val, _disable_val) \
[_family##_##_id] = { \
@@ -128,6 +359,133 @@
.ops = &axp20x_ops_range, \
}
+static const int axp209_dcdc2_ldo3_slew_rates[] = {
+ 1600,
+ 800,
+};
+
+static int axp20x_set_ramp_delay(struct regulator_dev *rdev, int ramp)
+{
+ struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
+ const struct regulator_desc *desc = rdev->desc;
+ u8 reg, mask, enable, cfg = 0xff;
+ const int *slew_rates;
+ int rate_count = 0;
+
+ if (!rdev)
+ return -EINVAL;
+
+ switch (axp20x->variant) {
+ case AXP209_ID:
+ if (desc->id == AXP20X_DCDC2) {
+ slew_rates = axp209_dcdc2_ldo3_slew_rates;
+ rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
+ reg = AXP20X_DCDC2_LDO3_V_RAMP;
+ mask = AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_RATE_MASK |
+ AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN_MASK;
+ enable = (ramp > 0) ?
+ AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN :
+ !AXP20X_DCDC2_LDO3_V_RAMP_DCDC2_EN;
+ break;
+ }
+
+ if (desc->id == AXP20X_LDO3) {
+ slew_rates = axp209_dcdc2_ldo3_slew_rates;
+ rate_count = ARRAY_SIZE(axp209_dcdc2_ldo3_slew_rates);
+ reg = AXP20X_DCDC2_LDO3_V_RAMP;
+ mask = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE_MASK |
+ AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN_MASK;
+ enable = (ramp > 0) ?
+ AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN :
+ !AXP20X_DCDC2_LDO3_V_RAMP_LDO3_EN;
+ break;
+ }
+
+ if (rate_count > 0)
+ break;
+
+ /* fall through */
+ default:
+ /* Not supported for this regulator */
+ return -ENOTSUPP;
+ }
+
+ if (ramp == 0) {
+ cfg = enable;
+ } else {
+ int i;
+
+ for (i = 0; i < rate_count; i++) {
+ if (ramp <= slew_rates[i])
+ cfg = AXP20X_DCDC2_LDO3_V_RAMP_LDO3_RATE(i);
+ else
+ break;
+ }
+
+ if (cfg == 0xff) {
+ dev_err(axp20x->dev, "unsupported ramp value %d", ramp);
+ return -EINVAL;
+ }
+
+ cfg |= enable;
+ }
+
+ return regmap_update_bits(axp20x->regmap, reg, mask, cfg);
+}
+
+static int axp20x_regulator_enable_regmap(struct regulator_dev *rdev)
+{
+ struct axp20x_dev *axp20x = rdev_get_drvdata(rdev);
+ const struct regulator_desc *desc = rdev->desc;
+
+ if (!rdev)
+ return -EINVAL;
+
+ switch (axp20x->variant) {
+ case AXP209_ID:
+ if ((desc->id == AXP20X_LDO3) &&
+ rdev->constraints && rdev->constraints->soft_start) {
+ int v_out;
+ int ret;
+
+ /*
+ * On some boards, the LDO3 can be overloaded when
+ * turning on, causing the entire PMIC to shutdown
+ * without warning. Turning it on at the minimal voltage
+ * and then setting the voltage to the requested value
+ * works reliably.
+ */
+ if (regulator_is_enabled_regmap(rdev))
+ break;
+
+ v_out = regulator_get_voltage_sel_regmap(rdev);
+ if (v_out < 0)
+ return v_out;
+
+ if (v_out == 0)
+ break;
+
+ ret = regulator_set_voltage_sel_regmap(rdev, 0x00);
+ /*
+ * A small pause is needed between
+ * setting the voltage and enabling the LDO to give the
+ * internal state machine time to process the request.
+ */
+ usleep_range(1000, 5000);
+ ret |= regulator_enable_regmap(rdev);
+ ret |= regulator_set_voltage_sel_regmap(rdev, v_out);
+
+ return ret;
+ }
+ break;
+ default:
+ /* No quirks */
+ break;
+ }
+
+ return regulator_enable_regmap(rdev);
+};
+
static const struct regulator_ops axp20x_ops_fixed = {
.list_voltage = regulator_list_voltage_linear,
};
@@ -145,9 +503,10 @@ static const struct regulator_ops axp20x_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
- .enable = regulator_enable_regmap,
+ .enable = axp20x_regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
+ .set_ramp_delay = axp20x_set_ramp_delay,
};
static const struct regulator_ops axp20x_ops_sw = {
@@ -157,77 +516,116 @@ static const struct regulator_ops axp20x_ops_sw = {
};
static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
- REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
- REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
- REGULATOR_LINEAR_RANGE(2500000, 0x9, 0x9, 0),
- REGULATOR_LINEAR_RANGE(2700000, 0xa, 0xb, 100000),
- REGULATOR_LINEAR_RANGE(3000000, 0xc, 0xf, 100000),
+ REGULATOR_LINEAR_RANGE(1250000,
+ AXP20X_LDO4_V_OUT_1250mV_START,
+ AXP20X_LDO4_V_OUT_1250mV_END,
+ 0),
+ REGULATOR_LINEAR_RANGE(1300000,
+ AXP20X_LDO4_V_OUT_1300mV_START,
+ AXP20X_LDO4_V_OUT_1300mV_END,
+ 100000),
+ REGULATOR_LINEAR_RANGE(2500000,
+ AXP20X_LDO4_V_OUT_2500mV_START,
+ AXP20X_LDO4_V_OUT_2500mV_END,
+ 0),
+ REGULATOR_LINEAR_RANGE(2700000,
+ AXP20X_LDO4_V_OUT_2700mV_START,
+ AXP20X_LDO4_V_OUT_2700mV_END,
+ 100000),
+ REGULATOR_LINEAR_RANGE(3000000,
+ AXP20X_LDO4_V_OUT_3000mV_START,
+ AXP20X_LDO4_V_OUT_3000mV_END,
+ 100000),
};
static const struct regulator_desc axp20x_regulators[] = {
AXP_DESC(AXP20X, DCDC2, "dcdc2", "vin2", 700, 2275, 25,
- AXP20X_DCDC2_V_OUT, 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
+ AXP20X_DCDC2_V_OUT, AXP20X_DCDC2_V_OUT_MASK,
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC2_MASK),
AXP_DESC(AXP20X, DCDC3, "dcdc3", "vin3", 700, 3500, 25,
- AXP20X_DCDC3_V_OUT, 0x7f, AXP20X_PWR_OUT_CTRL, 0x02),
+ AXP20X_DCDC3_V_OUT, AXP20X_DCDC3_V_OUT_MASK,
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC3_MASK),
AXP_DESC_FIXED(AXP20X, LDO1, "ldo1", "acin", 1300),
AXP_DESC(AXP20X, LDO2, "ldo2", "ldo24in", 1800, 3300, 100,
- AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
+ AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK,
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO2_MASK),
AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25,
- AXP20X_LDO3_V_OUT, 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
- AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_ranges,
- 16, AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL,
- 0x08),
+ AXP20X_LDO3_V_OUT, AXP20X_LDO3_V_OUT_MASK,
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO3_MASK),
+ AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in",
+ axp20x_ldo4_ranges, AXP20X_LDO4_V_OUT_NUM_VOLTAGES,
+ AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK,
+ AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO4_MASK),
AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
- AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
+ AXP20X_LDO5_V_OUT, AXP20X_LDO5_V_OUT_MASK,
+ AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
};
static const struct regulator_desc axp22x_regulators[] = {
AXP_DESC(AXP22X, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
- AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(1)),
+ AXP22X_DCDC1_V_OUT, AXP22X_DCDC1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC1_MASK),
AXP_DESC(AXP22X, DCDC2, "dcdc2", "vin2", 600, 1540, 20,
- AXP22X_DCDC2_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(2)),
+ AXP22X_DCDC2_V_OUT, AXP22X_DCDC2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC2_MASK),
AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
- AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
+ AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK),
AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
- AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
+ AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK),
AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
- AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
+ AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC5_MASK),
/* secondary switchable output of DCDC1 */
- AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
- BIT(7)),
+ AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
/* LDO regulator internally chained to DCDC5 */
AXP_DESC(AXP22X, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
- AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP22X_DC5LDO_V_OUT, AXP22X_DC5LDO_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DC5LDO_MASK),
AXP_DESC(AXP22X, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
- AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(6)),
+ AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP22X, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(7)),
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP22X, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
- AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP22X_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP22X, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
- AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
+ AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC(AXP22X, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
- AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(4)),
+ AXP22X_DLDO2_V_OUT, AXP22X_PWR_OUT_DLDO2_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP22X, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
- AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO3_MASK),
AXP_DESC(AXP22X, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
- AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP22X_DLDO4_V_OUT, AXP22X_DLDO4_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO4_MASK),
AXP_DESC(AXP22X, ELDO1, "eldo1", "eldoin", 700, 3300, 100,
- AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
- AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
- AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints */
AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
- AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
+ AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
/* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints */
AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
- AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
+ AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
};
@@ -240,240 +638,354 @@ static const struct regulator_desc axp22x_drivevbus_regulator = {
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
.enable_reg = AXP20X_VBUS_IPSOUT_MGMT,
- .enable_mask = BIT(2),
+ .enable_mask = AXP20X_VBUS_IPSOUT_MGMT_MASK,
.ops = &axp20x_ops_sw,
};
/* DCDC ranges shared with AXP813 */
static const struct regulator_linear_range axp803_dcdc234_ranges[] = {
- REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000),
- REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000),
+ REGULATOR_LINEAR_RANGE(500000,
+ AXP803_DCDC234_500mV_START,
+ AXP803_DCDC234_500mV_END,
+ 10000),
+ REGULATOR_LINEAR_RANGE(1220000,
+ AXP803_DCDC234_1220mV_START,
+ AXP803_DCDC234_1220mV_END,
+ 20000),
};
static const struct regulator_linear_range axp803_dcdc5_ranges[] = {
- REGULATOR_LINEAR_RANGE(800000, 0x0, 0x20, 10000),
- REGULATOR_LINEAR_RANGE(1140000, 0x21, 0x44, 20000),
+ REGULATOR_LINEAR_RANGE(800000,
+ AXP803_DCDC5_800mV_START,
+ AXP803_DCDC5_800mV_END,
+ 10000),
+ REGULATOR_LINEAR_RANGE(1140000,
+ AXP803_DCDC5_1140mV_START,
+ AXP803_DCDC5_1140mV_END,
+ 20000),
};
static const struct regulator_linear_range axp803_dcdc6_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0x0, 0x32, 10000),
- REGULATOR_LINEAR_RANGE(1120000, 0x33, 0x47, 20000),
+ REGULATOR_LINEAR_RANGE(600000,
+ AXP803_DCDC6_600mV_START,
+ AXP803_DCDC6_600mV_END,
+ 10000),
+ REGULATOR_LINEAR_RANGE(1120000,
+ AXP803_DCDC6_1120mV_START,
+ AXP803_DCDC6_1120mV_END,
+ 20000),
};
-/* AXP806's CLDO2 and AXP809's DLDO1 shares the same range */
+/* AXP806's CLDO2 and AXP809's DLDO1 share the same range */
static const struct regulator_linear_range axp803_dldo2_ranges[] = {
- REGULATOR_LINEAR_RANGE(700000, 0x0, 0x1a, 100000),
- REGULATOR_LINEAR_RANGE(3400000, 0x1b, 0x1f, 200000),
+ REGULATOR_LINEAR_RANGE(700000,
+ AXP803_DLDO2_700mV_START,
+ AXP803_DLDO2_700mV_END,
+ 100000),
+ REGULATOR_LINEAR_RANGE(3400000,
+ AXP803_DLDO2_3400mV_START,
+ AXP803_DLDO2_3400mV_END,
+ 200000),
};
static const struct regulator_desc axp803_regulators[] = {
AXP_DESC(AXP803, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
- AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
- AXP_DESC_RANGES(AXP803, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
- 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(1)),
- AXP_DESC_RANGES(AXP803, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
- 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(2)),
- AXP_DESC_RANGES(AXP803, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
- 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(3)),
- AXP_DESC_RANGES(AXP803, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
- 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(4)),
- AXP_DESC_RANGES(AXP803, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
- 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(5)),
+ AXP803_DCDC1_V_OUT, AXP803_DCDC1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC1_MASK),
+ AXP_DESC_RANGES(AXP803, DCDC2, "dcdc2", "vin2",
+ axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
+ AXP803_DCDC2_V_OUT, AXP803_DCDC2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC2_MASK),
+ AXP_DESC_RANGES(AXP803, DCDC3, "dcdc3", "vin3",
+ axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
+ AXP803_DCDC3_V_OUT, AXP803_DCDC3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC3_MASK),
+ AXP_DESC_RANGES(AXP803, DCDC4, "dcdc4", "vin4",
+ axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
+ AXP803_DCDC4_V_OUT, AXP803_DCDC4_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC4_MASK),
+ AXP_DESC_RANGES(AXP803, DCDC5, "dcdc5", "vin5",
+ axp803_dcdc5_ranges, AXP803_DCDC5_NUM_VOLTAGES,
+ AXP803_DCDC5_V_OUT, AXP803_DCDC5_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC5_MASK),
+ AXP_DESC_RANGES(AXP803, DCDC6, "dcdc6", "vin6",
+ axp803_dcdc6_ranges, AXP803_DCDC6_NUM_VOLTAGES,
+ AXP803_DCDC6_V_OUT, AXP803_DCDC6_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC6_MASK),
/* secondary switchable output of DCDC1 */
- AXP_DESC_SW(AXP803, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
- BIT(7)),
+ AXP_DESC_SW(AXP803, DC1SW, "dc1sw", NULL,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
AXP_DESC(AXP803, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
- AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+ AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP803, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT,
+ AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP803, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
- AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP803, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
- AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
- AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
- 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
- BIT(4)),
+ AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
+ AXP_DESC_RANGES(AXP803, DLDO2, "dldo2", "dldoin",
+ axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP803, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
- AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO3_MASK),
AXP_DESC(AXP803, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
- AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP22X_DLDO4_V_OUT, AXP22X_DLDO4_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO4_MASK),
AXP_DESC(AXP803, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
- AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP803, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
- AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP803, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
- AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
AXP_DESC(AXP803, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
- AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+ AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO1_MASK),
AXP_DESC(AXP803, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
- AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+ AXP803_FLDO2_V_OUT, AXP803_FLDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO2_MASK),
AXP_DESC_IO(AXP803, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
- AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
+ AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_IO(AXP803, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
- AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
+ AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP803, RTC_LDO, "rtc-ldo", "ips", 3000),
};
static const struct regulator_linear_range axp806_dcdca_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0x0, 0x32, 10000),
- REGULATOR_LINEAR_RANGE(1120000, 0x33, 0x47, 20000),
+ REGULATOR_LINEAR_RANGE(600000,
+ AXP806_DCDCA_600mV_START,
+ AXP806_DCDCA_600mV_END,
+ 10000),
+ REGULATOR_LINEAR_RANGE(1120000,
+ AXP806_DCDCA_1120mV_START,
+ AXP806_DCDCA_1120mV_END,
+ 20000),
};
static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2d, 20000),
- REGULATOR_LINEAR_RANGE(1600000, 0x2e, 0x3f, 100000),
+ REGULATOR_LINEAR_RANGE(600000,
+ AXP806_DCDCD_600mV_START,
+ AXP806_DCDCD_600mV_END,
+ 20000),
+ REGULATOR_LINEAR_RANGE(1600000,
+ AXP806_DCDCD_600mV_START,
+ AXP806_DCDCD_600mV_END,
+ 100000),
};
static const struct regulator_desc axp806_regulators[] = {
- AXP_DESC_RANGES(AXP806, DCDCA, "dcdca", "vina", axp806_dcdca_ranges,
- 72, AXP806_DCDCA_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1,
- BIT(0)),
+ AXP_DESC_RANGES(AXP806, DCDCA, "dcdca", "vina",
+ axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES,
+ AXP806_DCDCA_V_CTRL, AXP806_DCDCA_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCA_MASK),
AXP_DESC(AXP806, DCDCB, "dcdcb", "vinb", 1000, 2550, 50,
- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(1)),
- AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc", axp806_dcdca_ranges,
- 72, AXP806_DCDCC_V_CTRL, 0x7f, AXP806_PWR_OUT_CTRL1,
- BIT(2)),
- AXP_DESC_RANGES(AXP806, DCDCD, "dcdcd", "vind", axp806_dcdcd_ranges,
- 64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
- BIT(3)),
+ AXP806_DCDCB_V_CTRL, AXP806_DCDCB_V_CTRL,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCB_MASK),
+ AXP_DESC_RANGES(AXP806, DCDCC, "dcdcc", "vinc",
+ axp806_dcdca_ranges, AXP806_DCDCA_NUM_VOLTAGES,
+ AXP806_DCDCC_V_CTRL, AXP806_DCDCC_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCC_MASK),
+ AXP_DESC_RANGES(AXP806, DCDCD, "dcdcd", "vind",
+ axp806_dcdcd_ranges, AXP806_DCDCD_NUM_VOLTAGES,
+ AXP806_DCDCD_V_CTRL, AXP806_DCDCD_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCD_MASK),
AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
- AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP806_DCDCE_V_CTRL, AXP806_DCDCE_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_DCDCE_MASK),
AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
- AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
+ AXP806_ALDO1_V_CTRL, AXP806_ALDO1_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
- AXP806_ALDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(6)),
+ AXP806_ALDO2_V_CTRL, AXP806_ALDO2_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP806, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
- AXP806_ALDO3_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(7)),
+ AXP806_ALDO3_V_CTRL, AXP806_ALDO3_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL1, AXP806_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP806, BLDO1, "bldo1", "bldoin", 700, 1900, 100,
- AXP806_BLDO1_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(0)),
+ AXP806_BLDO1_V_CTRL, AXP806_BLDO1_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO1_MASK),
AXP_DESC(AXP806, BLDO2, "bldo2", "bldoin", 700, 1900, 100,
- AXP806_BLDO2_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(1)),
+ AXP806_BLDO2_V_CTRL, AXP806_BLDO2_V_CTRL,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO2_MASK),
AXP_DESC(AXP806, BLDO3, "bldo3", "bldoin", 700, 1900, 100,
- AXP806_BLDO3_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(2)),
+ AXP806_BLDO3_V_CTRL, AXP806_BLDO3_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO3_MASK),
AXP_DESC(AXP806, BLDO4, "bldo4", "bldoin", 700, 1900, 100,
- AXP806_BLDO4_V_CTRL, 0x0f, AXP806_PWR_OUT_CTRL2, BIT(3)),
+ AXP806_BLDO4_V_CTRL, AXP806_BLDO4_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_BLDO4_MASK),
AXP_DESC(AXP806, CLDO1, "cldo1", "cldoin", 700, 3300, 100,
- AXP806_CLDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(4)),
- AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin", axp803_dldo2_ranges,
- 32, AXP806_CLDO2_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2,
- BIT(5)),
+ AXP806_CLDO1_V_CTRL, AXP806_CLDO1_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_CLDO1_MASK),
+ AXP_DESC_RANGES(AXP806, CLDO2, "cldo2", "cldoin",
+ axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
+ AXP806_CLDO2_V_CTRL, AXP806_CLDO2_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_CLDO2_MASK),
AXP_DESC(AXP806, CLDO3, "cldo3", "cldoin", 700, 3300, 100,
- AXP806_CLDO3_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL2, BIT(6)),
- AXP_DESC_SW(AXP806, SW, "sw", "swin", AXP806_PWR_OUT_CTRL2, BIT(7)),
+ AXP806_CLDO3_V_CTRL, AXP806_CLDO3_V_CTRL_MASK,
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_CLDO3_MASK),
+ AXP_DESC_SW(AXP806, SW, "sw", "swin",
+ AXP806_PWR_OUT_CTRL2, AXP806_PWR_OUT_SW_MASK),
};
static const struct regulator_linear_range axp809_dcdc4_ranges[] = {
- REGULATOR_LINEAR_RANGE(600000, 0x0, 0x2f, 20000),
- REGULATOR_LINEAR_RANGE(1800000, 0x30, 0x38, 100000),
+ REGULATOR_LINEAR_RANGE(600000,
+ AXP809_DCDC4_600mV_START,
+ AXP809_DCDC4_600mV_END,
+ 20000),
+ REGULATOR_LINEAR_RANGE(1800000,
+ AXP809_DCDC4_1800mV_START,
+ AXP809_DCDC4_1800mV_END,
+ 100000),
};
static const struct regulator_desc axp809_regulators[] = {
AXP_DESC(AXP809, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
- AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(1)),
+ AXP22X_DCDC1_V_OUT, AXP22X_DCDC1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC1_MASK),
AXP_DESC(AXP809, DCDC2, "dcdc2", "vin2", 600, 1540, 20,
- AXP22X_DCDC2_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(2)),
+ AXP22X_DCDC2_V_OUT, AXP22X_DCDC2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC2_MASK),
AXP_DESC(AXP809, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
- AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
- AXP_DESC_RANGES(AXP809, DCDC4, "dcdc4", "vin4", axp809_dcdc4_ranges,
- 57, AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1,
- BIT(4)),
+ AXP22X_DCDC3_V_OUT, AXP22X_DCDC3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC3_MASK),
+ AXP_DESC_RANGES(AXP809, DCDC4, "dcdc4", "vin4",
+ axp809_dcdc4_ranges, AXP809_DCDC4_NUM_VOLTAGES,
+ AXP22X_DCDC4_V_OUT, AXP22X_DCDC4_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC4_MASK),
AXP_DESC(AXP809, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
- AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
+ AXP22X_DCDC5_V_OUT, AXP22X_DCDC5_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DCDC5_MASK),
/* secondary switchable output of DCDC1 */
- AXP_DESC_SW(AXP809, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
- BIT(7)),
+ AXP_DESC_SW(AXP809, DC1SW, "dc1sw", NULL,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
/* LDO regulator internally chained to DCDC5 */
AXP_DESC(AXP809, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
- AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
+ AXP22X_DC5LDO_V_OUT, AXP22X_DC5LDO_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_DC5LDO_MASK),
AXP_DESC(AXP809, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
- AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(6)),
+ AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP809, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(7)),
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP22X_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP809, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
- AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
- AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin", axp803_dldo2_ranges,
- 32, AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
- BIT(3)),
+ AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ALDO3_MASK),
+ AXP_DESC_RANGES(AXP809, DLDO1, "dldo1", "dldoin",
+ axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
+ AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
AXP_DESC(AXP809, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
- AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(4)),
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP809, ELDO1, "eldo1", "eldoin", 700, 3300, 100,
- AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP809, ELDO2, "eldo2", "eldoin", 700, 3300, 100,
- AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP809, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
- AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/*
* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints
*/
AXP_DESC_IO(AXP809, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
- AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
+ AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
/*
* Note the datasheet only guarantees reliable operation up to
* 3.3V, this needs to be enforced via dts provided constraints
*/
AXP_DESC_IO(AXP809, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
- AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
+ AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_FIXED(AXP809, RTC_LDO, "rtc_ldo", "ips", 1800),
- AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP_DESC_SW(AXP809, SW, "sw", "swin",
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_SW_MASK),
};
static const struct regulator_desc axp813_regulators[] = {
AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100,
- AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)),
- AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges,
- 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(1)),
- AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges,
- 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(2)),
- AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges,
- 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(3)),
- AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges,
- 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(4)),
- AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges,
- 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(5)),
- AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7", axp803_dcdc6_ranges,
- 72, AXP813_DCDC7_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1,
- BIT(6)),
+ AXP803_DCDC1_V_OUT, AXP803_DCDC1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC1_MASK),
+ AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2",
+ axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
+ AXP803_DCDC2_V_OUT, AXP803_DCDC2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC2_MASK),
+ AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3",
+ axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
+ AXP803_DCDC3_V_OUT, AXP803_DCDC3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC3_MASK),
+ AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4",
+ axp803_dcdc234_ranges, AXP803_DCDC234_NUM_VOLTAGES,
+ AXP803_DCDC4_V_OUT, AXP803_DCDC4_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC4_MASK),
+ AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5",
+ axp803_dcdc5_ranges, AXP803_DCDC5_NUM_VOLTAGES,
+ AXP803_DCDC5_V_OUT, AXP803_DCDC5_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC5_MASK),
+ AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6",
+ axp803_dcdc6_ranges, AXP803_DCDC6_NUM_VOLTAGES,
+ AXP803_DCDC6_V_OUT, AXP803_DCDC6_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP803_PWR_OUT_DCDC6_MASK),
+ AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7",
+ axp803_dcdc6_ranges, AXP803_DCDC6_NUM_VOLTAGES,
+ AXP813_DCDC7_V_OUT, AXP813_DCDC7_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL1, AXP813_PWR_OUT_DCDC7_MASK),
AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
- AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)),
+ AXP22X_ALDO1_V_OUT, AXP22X_ALDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO1_MASK),
AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100,
- AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)),
+ AXP22X_ALDO2_V_OUT, AXP22X_ALDO2_V_OUT,
+ AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO2_MASK),
AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100,
- AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)),
+ AXP22X_ALDO3_V_OUT, AXP22X_ALDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP806_PWR_OUT_ALDO3_MASK),
AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100,
- AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)),
- AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges,
- 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2,
- BIT(4)),
+ AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
+ AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin",
+ axp803_dldo2_ranges, AXP803_DLDO2_NUM_VOLTAGES,
+ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
- AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)),
+ AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO3_MASK),
AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100,
- AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)),
+ AXP22X_DLDO4_V_OUT, AXP22X_DLDO4_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO4_MASK),
AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50,
- AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)),
+ AXP22X_ELDO1_V_OUT, AXP22X_ELDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO1_MASK),
AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50,
- AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
+ AXP22X_ELDO2_V_OUT, AXP22X_ELDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO2_MASK),
AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50,
- AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
+ AXP22X_ELDO3_V_OUT, AXP22X_ELDO3_V_OUT,
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_ELDO3_MASK),
/* to do / check ... */
AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50,
- AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)),
+ AXP803_FLDO1_V_OUT, AXP803_FLDO1_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO1_MASK),
AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50,
- AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)),
+ AXP803_FLDO2_V_OUT, AXP803_FLDO2_V_OUT_MASK,
+ AXP22X_PWR_OUT_CTRL3, AXP803_PWR_OUT_FLDO2_MASK),
/*
* TODO: FLDO3 = {DCDC5, FLDOIN} / 2
*
@@ -482,12 +994,15 @@ static const struct regulator_desc axp813_regulators[] = {
*/
AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800),
AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100,
- AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
+ AXP22X_LDO_IO0_V_OUT, AXP22X_LDO_IO0_V_OUT_MASK,
+ AXP20X_GPIO0_CTRL, AXP20X_GPIO0_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100,
- AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
+ AXP22X_LDO_IO1_V_OUT, AXP22X_LDO_IO1_V_OUT_MASK,
+ AXP20X_GPIO1_CTRL, AXP20X_GPIO1_FUNC_MASK,
AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
- AXP_DESC_SW(AXP813, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(7)),
+ AXP_DESC_SW(AXP813, SW, "sw", "swin",
+ AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DC1SW_MASK),
};
static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
@@ -663,9 +1178,9 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
switch (id) {
case AXP803_DCDC3:
- return !!(reg & BIT(6));
+ return !!(reg & AXP803_DCDC23_POLYPHASE_DUAL);
case AXP803_DCDC6:
- return !!(reg & BIT(5));
+ return !!(reg & AXP803_DCDC56_POLYPHASE_DUAL);
}
break;
@@ -674,12 +1189,15 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
switch (id) {
case AXP806_DCDCB:
- return (((reg & GENMASK(7, 6)) == BIT(6)) ||
- ((reg & GENMASK(7, 6)) == BIT(7)));
+ return (((reg & AXP806_DCDCABC_POLYPHASE_MASK) ==
+ AXP806_DCDCAB_POLYPHASE_DUAL) ||
+ ((reg & AXP806_DCDCABC_POLYPHASE_MASK) ==
+ AXP806_DCDCABC_POLYPHASE_TRI));
case AXP806_DCDCC:
- return ((reg & GENMASK(7, 6)) == BIT(7));
+ return ((reg & AXP806_DCDCABC_POLYPHASE_MASK) ==
+ AXP806_DCDCABC_POLYPHASE_TRI);
case AXP806_DCDCE:
- return !!(reg & BIT(5));
+ return !!(reg & AXP806_DCDCDE_POLYPHASE_DUAL);
}
break;
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index 3a47e0372e77..b8dcdc21dc22 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/mfd/rohm-bd718x7.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -130,6 +131,7 @@ static struct regulator_ops bd718xx_buck_regulator_nolinear_ops = {
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = bd718xx_set_voltage_sel_restricted,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_time_sel = regulator_set_voltage_time_sel,
@@ -1007,7 +1009,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
};
struct bd718xx_pmic_inits {
- const struct bd718xx_regulator_data (*r_datas)[];
+ const struct bd718xx_regulator_data *r_datas;
unsigned int r_amount;
};
@@ -1017,11 +1019,11 @@ static int bd718xx_probe(struct platform_device *pdev)
struct regulator_config config = { 0 };
struct bd718xx_pmic_inits pmic_regulators[] = {
[BD718XX_TYPE_BD71837] = {
- .r_datas = &bd71837_regulators,
+ .r_datas = bd71837_regulators,
.r_amount = ARRAY_SIZE(bd71837_regulators),
},
[BD718XX_TYPE_BD71847] = {
- .r_datas = &bd71847_regulators,
+ .r_datas = bd71847_regulators,
.r_amount = ARRAY_SIZE(bd71847_regulators),
},
};
@@ -1053,13 +1055,36 @@ static int bd718xx_probe(struct platform_device *pdev)
BD718XX_REG_REGLOCK);
}
+ /* At poweroff transition PMIC HW disables EN bit for regulators but
+ * leaves SEL bit untouched. So if state transition from POWEROFF
+ * is done to SNVS - then all power rails controlled by SW (having
+ * SEL bit set) stay disabled as EN is cleared. This may result boot
+ * failure if any crucial systems are powered by these rails.
+ *
+ * Change the next stage from poweroff to be READY instead of SNVS
+ * for all reset types because OTP loading at READY will clear SEL
+ * bit allowing HW defaults for power rails to be used
+ */
+ err = regmap_update_bits(mfd->regmap, BD718XX_REG_TRANS_COND1,
+ BD718XX_ON_REQ_POWEROFF_MASK |
+ BD718XX_SWRESET_POWEROFF_MASK |
+ BD718XX_WDOG_POWEROFF_MASK |
+ BD718XX_KEY_L_POWEROFF_MASK,
+ BD718XX_POWOFF_TO_RDY);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to change reset target\n");
+ goto err;
+ } else {
+ dev_dbg(&pdev->dev, "Changed all resets from SVNS to READY\n");
+ }
+
for (i = 0; i < pmic_regulators[mfd->chip_type].r_amount; i++) {
const struct regulator_desc *desc;
struct regulator_dev *rdev;
const struct bd718xx_regulator_data *r;
- r = &(*pmic_regulators[mfd->chip_type].r_datas)[i];
+ r = &pmic_regulators[mfd->chip_type].r_datas[i];
desc = &r->desc;
config.dev = pdev->dev.parent;
diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
index 274c5ed7cd73..e12dd1f750f3 100644
--- a/drivers/regulator/bd9571mwv-regulator.c
+++ b/drivers/regulator/bd9571mwv-regulator.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ROHM BD9571MWV-M regulator driver
*
* Copyright (C) 2017 Marek Vasut <marek.vasut+renesas@gmail.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether expressed or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License version 2 for more details.
- *
* Based on the TPS65086 driver
*
* NOTE: VD09 is missing
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 2c66b528aede..b9d7b45c7295 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -50,6 +50,8 @@
#define rdev_dbg(rdev, fmt, ...) \
pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+static DEFINE_WW_CLASS(regulator_ww_class);
+static DEFINE_MUTEX(regulator_nesting_mutex);
static DEFINE_MUTEX(regulator_list_mutex);
static LIST_HEAD(regulator_map_list);
static LIST_HEAD(regulator_ena_gpio_list);
@@ -97,7 +99,7 @@ struct regulator_supply_alias {
};
static int _regulator_is_enabled(struct regulator_dev *rdev);
-static int _regulator_disable(struct regulator_dev *rdev);
+static int _regulator_disable(struct regulator *regulator);
static int _regulator_get_voltage(struct regulator_dev *rdev);
static int _regulator_get_current_limit(struct regulator_dev *rdev);
static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
@@ -105,6 +107,11 @@ static int _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV);
+static int regulator_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state);
+static int regulator_set_voltage_rdev(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ suspend_state_t state);
static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
@@ -149,7 +156,7 @@ static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
/**
* regulator_lock_nested - lock a single regulator
* @rdev: regulator source
- * @subclass: mutex subclass used for lockdep
+ * @ww_ctx: w/w mutex acquire context
*
* This function can be called many times by one task on
* a single regulator and its mutex will be locked only
@@ -157,25 +164,54 @@ static inline struct regulator_dev *rdev_get_supply(struct regulator_dev *rdev)
* than the one, which initially locked the mutex, it will
* wait on mutex.
*/
-static void regulator_lock_nested(struct regulator_dev *rdev,
- unsigned int subclass)
+static inline int regulator_lock_nested(struct regulator_dev *rdev,
+ struct ww_acquire_ctx *ww_ctx)
{
- if (!mutex_trylock(&rdev->mutex)) {
- if (rdev->mutex_owner == current) {
+ bool lock = false;
+ int ret = 0;
+
+ mutex_lock(&regulator_nesting_mutex);
+
+ if (ww_ctx || !ww_mutex_trylock(&rdev->mutex)) {
+ if (rdev->mutex_owner == current)
rdev->ref_cnt++;
- return;
+ else
+ lock = true;
+
+ if (lock) {
+ mutex_unlock(&regulator_nesting_mutex);
+ ret = ww_mutex_lock(&rdev->mutex, ww_ctx);
+ mutex_lock(&regulator_nesting_mutex);
}
- mutex_lock_nested(&rdev->mutex, subclass);
+ } else {
+ lock = true;
+ }
+
+ if (lock && ret != -EDEADLK) {
+ rdev->ref_cnt++;
+ rdev->mutex_owner = current;
}
- rdev->ref_cnt = 1;
- rdev->mutex_owner = current;
+ mutex_unlock(&regulator_nesting_mutex);
+
+ return ret;
}
-static inline void regulator_lock(struct regulator_dev *rdev)
+/**
+ * regulator_lock - lock a single regulator
+ * @rdev: regulator source
+ *
+ * This function can be called many times by one task on
+ * a single regulator and its mutex will be locked only
+ * once. If a task, which is calling this function is other
+ * than the one, which initially locked the mutex, it will
+ * wait on mutex.
+ */
+void regulator_lock(struct regulator_dev *rdev)
{
- regulator_lock_nested(rdev, 0);
+ regulator_lock_nested(rdev, NULL);
}
+EXPORT_SYMBOL_GPL(regulator_lock);
/**
* regulator_unlock - unlock a single regulator
@@ -184,47 +220,191 @@ static inline void regulator_lock(struct regulator_dev *rdev)
* This function unlocks the mutex when the
* reference counter reaches 0.
*/
-static void regulator_unlock(struct regulator_dev *rdev)
+void regulator_unlock(struct regulator_dev *rdev)
+{
+ mutex_lock(&regulator_nesting_mutex);
+
+ if (--rdev->ref_cnt == 0) {
+ rdev->mutex_owner = NULL;
+ ww_mutex_unlock(&rdev->mutex);
+ }
+
+ WARN_ON_ONCE(rdev->ref_cnt < 0);
+
+ mutex_unlock(&regulator_nesting_mutex);
+}
+EXPORT_SYMBOL_GPL(regulator_unlock);
+
+static bool regulator_supply_is_couple(struct regulator_dev *rdev)
+{
+ struct regulator_dev *c_rdev;
+ int i;
+
+ for (i = 1; i < rdev->coupling_desc.n_coupled; i++) {
+ c_rdev = rdev->coupling_desc.coupled_rdevs[i];
+
+ if (rdev->supply->rdev == c_rdev)
+ return true;
+ }
+
+ return false;
+}
+
+static void regulator_unlock_recursive(struct regulator_dev *rdev,
+ unsigned int n_coupled)
{
- if (rdev->ref_cnt != 0) {
- rdev->ref_cnt--;
+ struct regulator_dev *c_rdev;
+ int i;
+
+ for (i = n_coupled; i > 0; i--) {
+ c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1];
+
+ if (!c_rdev)
+ continue;
+
+ if (c_rdev->supply && !regulator_supply_is_couple(c_rdev))
+ regulator_unlock_recursive(
+ c_rdev->supply->rdev,
+ c_rdev->coupling_desc.n_coupled);
- if (!rdev->ref_cnt) {
- rdev->mutex_owner = NULL;
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(c_rdev);
+ }
+}
+
+static int regulator_lock_recursive(struct regulator_dev *rdev,
+ struct regulator_dev **new_contended_rdev,
+ struct regulator_dev **old_contended_rdev,
+ struct ww_acquire_ctx *ww_ctx)
+{
+ struct regulator_dev *c_rdev;
+ int i, err;
+
+ for (i = 0; i < rdev->coupling_desc.n_coupled; i++) {
+ c_rdev = rdev->coupling_desc.coupled_rdevs[i];
+
+ if (!c_rdev)
+ continue;
+
+ if (c_rdev != *old_contended_rdev) {
+ err = regulator_lock_nested(c_rdev, ww_ctx);
+ if (err) {
+ if (err == -EDEADLK) {
+ *new_contended_rdev = c_rdev;
+ goto err_unlock;
+ }
+
+ /* shouldn't happen */
+ WARN_ON_ONCE(err != -EALREADY);
+ }
+ } else {
+ *old_contended_rdev = NULL;
+ }
+
+ if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) {
+ err = regulator_lock_recursive(c_rdev->supply->rdev,
+ new_contended_rdev,
+ old_contended_rdev,
+ ww_ctx);
+ if (err) {
+ regulator_unlock(c_rdev);
+ goto err_unlock;
+ }
}
}
+
+ return 0;
+
+err_unlock:
+ regulator_unlock_recursive(rdev, i);
+
+ return err;
}
/**
- * regulator_lock_supply - lock a regulator and its supplies
- * @rdev: regulator source
+ * regulator_unlock_dependent - unlock regulator's suppliers and coupled
+ * regulators
+ * @rdev: regulator source
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * Unlock all regulators related with rdev by coupling or suppling.
*/
-static void regulator_lock_supply(struct regulator_dev *rdev)
+static void regulator_unlock_dependent(struct regulator_dev *rdev,
+ struct ww_acquire_ctx *ww_ctx)
{
- int i;
-
- for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
- regulator_lock_nested(rdev, i);
+ regulator_unlock_recursive(rdev, rdev->coupling_desc.n_coupled);
+ ww_acquire_fini(ww_ctx);
}
/**
- * regulator_unlock_supply - unlock a regulator and its supplies
- * @rdev: regulator source
+ * regulator_lock_dependent - lock regulator's suppliers and coupled regulators
+ * @rdev: regulator source
+ * @ww_ctx: w/w mutex acquire context
+ *
+ * This function as a wrapper on regulator_lock_recursive(), which locks
+ * all regulators related with rdev by coupling or suppling.
*/
-static void regulator_unlock_supply(struct regulator_dev *rdev)
+static void regulator_lock_dependent(struct regulator_dev *rdev,
+ struct ww_acquire_ctx *ww_ctx)
{
- struct regulator *supply;
+ struct regulator_dev *new_contended_rdev = NULL;
+ struct regulator_dev *old_contended_rdev = NULL;
+ int err;
+
+ mutex_lock(&regulator_list_mutex);
+
+ ww_acquire_init(ww_ctx, &regulator_ww_class);
+
+ do {
+ if (new_contended_rdev) {
+ ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
+ old_contended_rdev = new_contended_rdev;
+ old_contended_rdev->ref_cnt++;
+ }
+
+ err = regulator_lock_recursive(rdev,
+ &new_contended_rdev,
+ &old_contended_rdev,
+ ww_ctx);
- while (1) {
- regulator_unlock(rdev);
- supply = rdev->supply;
+ if (old_contended_rdev)
+ regulator_unlock(old_contended_rdev);
- if (!rdev->supply)
- return;
+ } while (err == -EDEADLK);
- rdev = supply->rdev;
+ ww_acquire_done(ww_ctx);
+
+ mutex_unlock(&regulator_list_mutex);
+}
+
+/**
+ * of_get_child_regulator - get a child regulator device node
+ * based on supply name
+ * @parent: Parent device node
+ * @prop_name: Combination regulator supply name and "-supply"
+ *
+ * Traverse all child nodes.
+ * Extract the child regulator device node corresponding to the supply name.
+ * returns the device node corresponding to the regulator if found, else
+ * returns NULL.
+ */
+static struct device_node *of_get_child_regulator(struct device_node *parent,
+ const char *prop_name)
+{
+ struct device_node *regnode = NULL;
+ struct device_node *child = NULL;
+
+ for_each_child_of_node(parent, child) {
+ regnode = of_parse_phandle(child, prop_name, 0);
+
+ if (!regnode) {
+ regnode = of_get_child_regulator(child, prop_name);
+ if (regnode)
+ return regnode;
+ } else {
+ return regnode;
+ }
}
+ return NULL;
}
/**
@@ -247,6 +427,10 @@ static struct device_node *of_get_regulator(struct device *dev, const char *supp
regnode = of_parse_phandle(dev->of_node, prop_name, 0);
if (!regnode) {
+ regnode = of_get_child_regulator(dev->of_node, prop_name);
+ if (regnode)
+ return regnode;
+
dev_dbg(dev, "Looking up %s property in node %pOF failed\n",
prop_name, dev->of_node);
return NULL;
@@ -582,8 +766,10 @@ static ssize_t regulator_total_uA_show(struct device *dev,
int uA = 0;
regulator_lock(rdev);
- list_for_each_entry(regulator, &rdev->consumer_list, list)
- uA += regulator->uA_load;
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ if (regulator->enable_count)
+ uA += regulator->uA_load;
+ }
regulator_unlock(rdev);
return sprintf(buf, "%d\n", uA);
}
@@ -738,7 +924,7 @@ static int drms_uA_update(struct regulator_dev *rdev)
int current_uA = 0, output_uV, input_uV, err;
unsigned int mode;
- lockdep_assert_held_once(&rdev->mutex);
+ lockdep_assert_held_once(&rdev->mutex.base);
/*
* first check to see if we can set modes at all, otherwise just
@@ -756,8 +942,10 @@ static int drms_uA_update(struct regulator_dev *rdev)
return -EINVAL;
/* calc total requested load */
- list_for_each_entry(sibling, &rdev->consumer_list, list)
- current_uA += sibling->uA_load;
+ list_for_each_entry(sibling, &rdev->consumer_list, list) {
+ if (sibling->enable_count)
+ current_uA += sibling->uA_load;
+ }
current_uA += rdev->constraints->system_load;
@@ -1156,17 +1344,12 @@ static int set_machine_constraints(struct regulator_dev *rdev,
rdev_err(rdev, "failed to set initial mode: %d\n", ret);
return ret;
}
- }
-
- /* If the constraints say the regulator should be on at this point
- * and we have control then make sure it is enabled.
- */
- if (rdev->constraints->always_on || rdev->constraints->boot_on) {
- ret = _regulator_do_enable(rdev);
- if (ret < 0 && ret != -EINVAL) {
- rdev_err(rdev, "failed to enable\n");
- return ret;
- }
+ } else if (rdev->constraints->system_load) {
+ /*
+ * We'll only apply the initial system load if an
+ * initial mode wasn't specified.
+ */
+ drms_uA_update(rdev);
}
if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
@@ -1214,6 +1397,27 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ /* If the constraints say the regulator should be on at this point
+ * and we have control then make sure it is enabled.
+ */
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ _regulator_put(rdev->supply);
+ rdev->supply = NULL;
+ return ret;
+ }
+ }
+
+ ret = _regulator_do_enable(rdev);
+ if (ret < 0 && ret != -EINVAL) {
+ rdev_err(rdev, "failed to enable\n");
+ return ret;
+ }
+ rdev->use_count++;
+ }
+
print_constraints(rdev);
return 0;
}
@@ -1628,8 +1832,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
return ret;
}
- /* Cascade always-on state to supply */
- if (_regulator_is_enabled(rdev)) {
+ /*
+ * In set_machine_constraints() we may have turned this regulator on
+ * but we couldn't propagate to the supply if it hadn't been resolved
+ * yet. Do it now.
+ */
+ if (rdev->use_count) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
_regulator_put(rdev->supply);
@@ -1713,6 +1921,16 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
return regulator;
}
+ mutex_lock(&regulator_list_mutex);
+ ret = (rdev->coupling_desc.n_resolved != rdev->coupling_desc.n_coupled);
+ mutex_unlock(&regulator_list_mutex);
+
+ if (ret != 0) {
+ regulator = ERR_PTR(-EPROBE_DEFER);
+ put_device(&rdev->dev);
+ return regulator;
+ }
+
ret = regulator_resolve_supply(rdev);
if (ret < 0) {
regulator = ERR_PTR(ret);
@@ -1832,6 +2050,9 @@ static void _regulator_put(struct regulator *regulator)
lockdep_assert_held_once(&regulator_list_mutex);
+ /* Docs say you must disable before calling regulator_put() */
+ WARN_ON(regulator->enable_count);
+
rdev = regulator->rdev;
debugfs_remove_recursive(regulator->debugfs);
@@ -2225,34 +2446,109 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
return 0;
}
+/**
+ * _regulator_handle_consumer_enable - handle that a consumer enabled
+ * @regulator: regulator source
+ *
+ * Some things on a regulator consumer (like the contribution towards total
+ * load on the regulator) only have an effect when the consumer wants the
+ * regulator enabled. Explained in example with two consumers of the same
+ * regulator:
+ * consumer A: set_load(100); => total load = 0
+ * consumer A: regulator_enable(); => total load = 100
+ * consumer B: set_load(1000); => total load = 100
+ * consumer B: regulator_enable(); => total load = 1100
+ * consumer A: regulator_disable(); => total_load = 1000
+ *
+ * This function (together with _regulator_handle_consumer_disable) is
+ * responsible for keeping track of the refcount for a given regulator consumer
+ * and applying / unapplying these things.
+ *
+ * Returns 0 upon no error; -error upon error.
+ */
+static int _regulator_handle_consumer_enable(struct regulator *regulator)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+
+ lockdep_assert_held_once(&rdev->mutex.base);
+
+ regulator->enable_count++;
+ if (regulator->uA_load && regulator->enable_count == 1)
+ return drms_uA_update(rdev);
+
+ return 0;
+}
+
+/**
+ * _regulator_handle_consumer_disable - handle that a consumer disabled
+ * @regulator: regulator source
+ *
+ * The opposite of _regulator_handle_consumer_enable().
+ *
+ * Returns 0 upon no error; -error upon error.
+ */
+static int _regulator_handle_consumer_disable(struct regulator *regulator)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+
+ lockdep_assert_held_once(&rdev->mutex.base);
+
+ if (!regulator->enable_count) {
+ rdev_err(rdev, "Underflow of regulator enable count\n");
+ return -EINVAL;
+ }
+
+ regulator->enable_count--;
+ if (regulator->uA_load && regulator->enable_count == 0)
+ return drms_uA_update(rdev);
+
+ return 0;
+}
+
/* locks held by regulator_enable() */
-static int _regulator_enable(struct regulator_dev *rdev)
+static int _regulator_enable(struct regulator *regulator)
{
+ struct regulator_dev *rdev = regulator->rdev;
int ret;
- lockdep_assert_held_once(&rdev->mutex);
+ lockdep_assert_held_once(&rdev->mutex.base);
- /* check voltage and requested load before enabling */
- if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
- drms_uA_update(rdev);
+ if (rdev->use_count == 0 && rdev->supply) {
+ ret = _regulator_enable(rdev->supply);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* balance only if there are regulators coupled */
+ if (rdev->coupling_desc.n_coupled > 1) {
+ ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+ if (ret < 0)
+ goto err_disable_supply;
+ }
+
+ ret = _regulator_handle_consumer_enable(regulator);
+ if (ret < 0)
+ goto err_disable_supply;
if (rdev->use_count == 0) {
/* The regulator may on if it's not switchable or left on */
ret = _regulator_is_enabled(rdev);
if (ret == -EINVAL || ret == 0) {
if (!regulator_ops_is_valid(rdev,
- REGULATOR_CHANGE_STATUS))
- return -EPERM;
+ REGULATOR_CHANGE_STATUS)) {
+ ret = -EPERM;
+ goto err_consumer_disable;
+ }
ret = _regulator_do_enable(rdev);
if (ret < 0)
- return ret;
+ goto err_consumer_disable;
_notifier_call_chain(rdev, REGULATOR_EVENT_ENABLE,
NULL);
} else if (ret < 0) {
rdev_err(rdev, "is_enabled() failed: %d\n", ret);
- return ret;
+ goto err_consumer_disable;
}
/* Fallthrough on positive return values - already enabled */
}
@@ -2260,6 +2556,15 @@ static int _regulator_enable(struct regulator_dev *rdev)
rdev->use_count++;
return 0;
+
+err_consumer_disable:
+ _regulator_handle_consumer_disable(regulator);
+
+err_disable_supply:
+ if (rdev->use_count == 0 && rdev->supply)
+ _regulator_disable(rdev->supply);
+
+ return ret;
}
/**
@@ -2276,23 +2581,12 @@ static int _regulator_enable(struct regulator_dev *rdev)
int regulator_enable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- int ret = 0;
-
- if (regulator->always_on)
- return 0;
-
- if (rdev->supply) {
- ret = regulator_enable(rdev->supply);
- if (ret != 0)
- return ret;
- }
-
- mutex_lock(&rdev->mutex);
- ret = _regulator_enable(rdev);
- mutex_unlock(&rdev->mutex);
+ struct ww_acquire_ctx ww_ctx;
+ int ret;
- if (ret != 0 && rdev->supply)
- regulator_disable(rdev->supply);
+ regulator_lock_dependent(rdev, &ww_ctx);
+ ret = _regulator_enable(regulator);
+ regulator_unlock_dependent(rdev, &ww_ctx);
return ret;
}
@@ -2330,11 +2624,12 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
}
/* locks held by regulator_disable() */
-static int _regulator_disable(struct regulator_dev *rdev)
+static int _regulator_disable(struct regulator *regulator)
{
+ struct regulator_dev *rdev = regulator->rdev;
int ret = 0;
- lockdep_assert_held_once(&rdev->mutex);
+ lockdep_assert_held_once(&rdev->mutex.base);
if (WARN(rdev->use_count <= 0,
"unbalanced disables for %s\n", rdev_get_name(rdev)))
@@ -2366,12 +2661,18 @@ static int _regulator_disable(struct regulator_dev *rdev)
rdev->use_count = 0;
} else if (rdev->use_count > 1) {
- if (regulator_ops_is_valid(rdev, REGULATOR_CHANGE_DRMS))
- drms_uA_update(rdev);
-
rdev->use_count--;
}
+ if (ret == 0)
+ ret = _regulator_handle_consumer_disable(regulator);
+
+ if (ret == 0 && rdev->coupling_desc.n_coupled > 1)
+ ret = regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+
+ if (ret == 0 && rdev->use_count == 0 && rdev->supply)
+ ret = _regulator_disable(rdev->supply);
+
return ret;
}
@@ -2390,17 +2691,12 @@ static int _regulator_disable(struct regulator_dev *rdev)
int regulator_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
- int ret = 0;
-
- if (regulator->always_on)
- return 0;
-
- mutex_lock(&rdev->mutex);
- ret = _regulator_disable(rdev);
- mutex_unlock(&rdev->mutex);
+ struct ww_acquire_ctx ww_ctx;
+ int ret;
- if (ret == 0 && rdev->supply)
- regulator_disable(rdev->supply);
+ regulator_lock_dependent(rdev, &ww_ctx);
+ ret = _regulator_disable(regulator);
+ regulator_unlock_dependent(rdev, &ww_ctx);
return ret;
}
@@ -2411,7 +2707,7 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
- lockdep_assert_held_once(&rdev->mutex);
+ lockdep_assert_held_once(&rdev->mutex.base);
ret = _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
REGULATOR_EVENT_PRE_DISABLE, NULL);
@@ -2444,16 +2740,25 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
int regulator_force_disable(struct regulator *regulator)
{
struct regulator_dev *rdev = regulator->rdev;
+ struct ww_acquire_ctx ww_ctx;
int ret;
- mutex_lock(&rdev->mutex);
- regulator->uA_load = 0;
+ regulator_lock_dependent(rdev, &ww_ctx);
+
ret = _regulator_force_disable(regulator->rdev);
- mutex_unlock(&rdev->mutex);
- if (rdev->supply)
- while (rdev->open_count--)
- regulator_disable(rdev->supply);
+ if (rdev->coupling_desc.n_coupled > 1)
+ regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+
+ if (regulator->uA_load) {
+ regulator->uA_load = 0;
+ ret = drms_uA_update(rdev);
+ }
+
+ if (rdev->use_count != 0 && rdev->supply)
+ _regulator_disable(rdev->supply);
+
+ regulator_unlock_dependent(rdev, &ww_ctx);
return ret;
}
@@ -2463,14 +2768,12 @@ static void regulator_disable_work(struct work_struct *work)
{
struct regulator_dev *rdev = container_of(work, struct regulator_dev,
disable_work.work);
+ struct ww_acquire_ctx ww_ctx;
int count, i, ret;
+ struct regulator *regulator;
+ int total_count = 0;
- regulator_lock(rdev);
-
- BUG_ON(!rdev->deferred_disables);
-
- count = rdev->deferred_disables;
- rdev->deferred_disables = 0;
+ regulator_lock_dependent(rdev, &ww_ctx);
/*
* Workqueue functions queue the new work instance while the previous
@@ -2480,23 +2783,27 @@ static void regulator_disable_work(struct work_struct *work)
*/
cancel_delayed_work(&rdev->disable_work);
- for (i = 0; i < count; i++) {
- ret = _regulator_disable(rdev);
- if (ret != 0)
- rdev_err(rdev, "Deferred disable failed: %d\n", ret);
- }
+ list_for_each_entry(regulator, &rdev->consumer_list, list) {
+ count = regulator->deferred_disables;
- regulator_unlock(rdev);
+ if (!count)
+ continue;
+
+ total_count += count;
+ regulator->deferred_disables = 0;
- if (rdev->supply) {
for (i = 0; i < count; i++) {
- ret = regulator_disable(rdev->supply);
- if (ret != 0) {
- rdev_err(rdev,
- "Supply disable failed: %d\n", ret);
- }
+ ret = _regulator_disable(regulator);
+ if (ret != 0)
+ rdev_err(rdev, "Deferred disable failed: %d\n", ret);
}
}
+ WARN_ON(!total_count);
+
+ if (rdev->coupling_desc.n_coupled > 1)
+ regulator_balance_voltage(rdev, PM_SUSPEND_ON);
+
+ regulator_unlock_dependent(rdev, &ww_ctx);
}
/**
@@ -2515,14 +2822,11 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
{
struct regulator_dev *rdev = regulator->rdev;
- if (regulator->always_on)
- return 0;
-
if (!ms)
return regulator_disable(regulator);
regulator_lock(rdev);
- rdev->deferred_disables++;
+ regulator->deferred_disables++;
mod_delayed_work(system_power_efficient_wq, &rdev->disable_work,
msecs_to_jiffies(ms));
regulator_unlock(rdev);
@@ -2597,9 +2901,9 @@ int regulator_is_enabled(struct regulator *regulator)
if (regulator->always_on)
return 1;
- mutex_lock(&regulator->rdev->mutex);
+ regulator_lock(regulator->rdev);
ret = _regulator_is_enabled(regulator->rdev);
- mutex_unlock(&regulator->rdev->mutex);
+ regulator_unlock(regulator->rdev);
return ret;
}
@@ -3013,8 +3317,6 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
int ret = 0;
int old_min_uV, old_max_uV;
int current_uV;
- int best_supply_uV = 0;
- int supply_change_uV = 0;
/* If we're setting the same range as last time the change
* should be a noop (some cpufreq implementations use the same
@@ -3054,10 +3356,27 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
voltage->min_uV = min_uV;
voltage->max_uV = max_uV;
- ret = regulator_check_consumers(rdev, &min_uV, &max_uV, state);
+ /* for not coupled regulators this will just set the voltage */
+ ret = regulator_balance_voltage(rdev, state);
if (ret < 0)
goto out2;
+out:
+ return 0;
+out2:
+ voltage->min_uV = old_min_uV;
+ voltage->max_uV = old_max_uV;
+
+ return ret;
+}
+
+static int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV,
+ int max_uV, suspend_state_t state)
+{
+ int best_supply_uV = 0;
+ int supply_change_uV = 0;
+ int ret;
+
if (rdev->supply &&
regulator_ops_is_valid(rdev->supply->rdev,
REGULATOR_CHANGE_VOLTAGE) &&
@@ -3069,13 +3388,13 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
selector = regulator_map_voltage(rdev, min_uV, max_uV);
if (selector < 0) {
ret = selector;
- goto out2;
+ goto out;
}
best_supply_uV = _regulator_list_voltage(rdev, selector, 0);
if (best_supply_uV < 0) {
ret = best_supply_uV;
- goto out2;
+ goto out;
}
best_supply_uV += rdev->desc->min_dropout_uV;
@@ -3083,7 +3402,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
current_supply_uV = _regulator_get_voltage(rdev->supply->rdev);
if (current_supply_uV < 0) {
ret = current_supply_uV;
- goto out2;
+ goto out;
}
supply_change_uV = best_supply_uV - current_supply_uV;
@@ -3095,7 +3414,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
if (ret) {
dev_err(&rdev->dev, "Failed to increase supply voltage: %d\n",
ret);
- goto out2;
+ goto out;
}
}
@@ -3105,7 +3424,7 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
ret = _regulator_do_set_suspend_voltage(rdev, min_uV,
max_uV, state);
if (ret < 0)
- goto out2;
+ goto out;
if (supply_change_uV < 0) {
ret = regulator_set_voltage_unlocked(rdev->supply,
@@ -3119,10 +3438,273 @@ static int regulator_set_voltage_unlocked(struct regulator *regulator,
out:
return ret;
-out2:
- voltage->min_uV = old_min_uV;
- voltage->max_uV = old_max_uV;
+}
+
+static int regulator_limit_voltage_step(struct regulator_dev *rdev,
+ int *current_uV, int *min_uV)
+{
+ struct regulation_constraints *constraints = rdev->constraints;
+
+ /* Limit voltage change only if necessary */
+ if (!constraints->max_uV_step || !_regulator_is_enabled(rdev))
+ return 1;
+
+ if (*current_uV < 0) {
+ *current_uV = _regulator_get_voltage(rdev);
+
+ if (*current_uV < 0)
+ return *current_uV;
+ }
+
+ if (abs(*current_uV - *min_uV) <= constraints->max_uV_step)
+ return 1;
+
+ /* Clamp target voltage within the given step */
+ if (*current_uV < *min_uV)
+ *min_uV = min(*current_uV + constraints->max_uV_step,
+ *min_uV);
+ else
+ *min_uV = max(*current_uV - constraints->max_uV_step,
+ *min_uV);
+
+ return 0;
+}
+
+static int regulator_get_optimal_voltage(struct regulator_dev *rdev,
+ int *current_uV,
+ int *min_uV, int *max_uV,
+ suspend_state_t state,
+ int n_coupled)
+{
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ struct regulator_dev **c_rdevs = c_desc->coupled_rdevs;
+ struct regulation_constraints *constraints = rdev->constraints;
+ int max_spread = constraints->max_spread;
+ int desired_min_uV = 0, desired_max_uV = INT_MAX;
+ int max_current_uV = 0, min_current_uV = INT_MAX;
+ int highest_min_uV = 0, target_uV, possible_uV;
+ int i, ret;
+ bool done;
+
+ *current_uV = -1;
+
+ /*
+ * If there are no coupled regulators, simply set the voltage
+ * demanded by consumers.
+ */
+ if (n_coupled == 1) {
+ /*
+ * If consumers don't provide any demands, set voltage
+ * to min_uV
+ */
+ desired_min_uV = constraints->min_uV;
+ desired_max_uV = constraints->max_uV;
+
+ ret = regulator_check_consumers(rdev,
+ &desired_min_uV,
+ &desired_max_uV, state);
+ if (ret < 0)
+ return ret;
+
+ possible_uV = desired_min_uV;
+ done = true;
+
+ goto finish;
+ }
+
+ /* Find highest min desired voltage */
+ for (i = 0; i < n_coupled; i++) {
+ int tmp_min = 0;
+ int tmp_max = INT_MAX;
+
+ lockdep_assert_held_once(&c_rdevs[i]->mutex.base);
+
+ ret = regulator_check_consumers(c_rdevs[i],
+ &tmp_min,
+ &tmp_max, state);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_check_voltage(c_rdevs[i], &tmp_min, &tmp_max);
+ if (ret < 0)
+ return ret;
+
+ highest_min_uV = max(highest_min_uV, tmp_min);
+
+ if (i == 0) {
+ desired_min_uV = tmp_min;
+ desired_max_uV = tmp_max;
+ }
+ }
+
+ /*
+ * Let target_uV be equal to the desired one if possible.
+ * If not, set it to minimum voltage, allowed by other coupled
+ * regulators.
+ */
+ target_uV = max(desired_min_uV, highest_min_uV - max_spread);
+
+ /*
+ * Find min and max voltages, which currently aren't violating
+ * max_spread.
+ */
+ for (i = 1; i < n_coupled; i++) {
+ int tmp_act;
+
+ if (!_regulator_is_enabled(c_rdevs[i]))
+ continue;
+
+ tmp_act = _regulator_get_voltage(c_rdevs[i]);
+ if (tmp_act < 0)
+ return tmp_act;
+
+ min_current_uV = min(tmp_act, min_current_uV);
+ max_current_uV = max(tmp_act, max_current_uV);
+ }
+
+ /* There aren't any other regulators enabled */
+ if (max_current_uV == 0) {
+ possible_uV = target_uV;
+ } else {
+ /*
+ * Correct target voltage, so as it currently isn't
+ * violating max_spread
+ */
+ possible_uV = max(target_uV, max_current_uV - max_spread);
+ possible_uV = min(possible_uV, min_current_uV + max_spread);
+ }
+
+ if (possible_uV > desired_max_uV)
+ return -EINVAL;
+
+ done = (possible_uV == target_uV);
+ desired_min_uV = possible_uV;
+
+finish:
+ /* Apply max_uV_step constraint if necessary */
+ if (state == PM_SUSPEND_ON) {
+ ret = regulator_limit_voltage_step(rdev, current_uV,
+ &desired_min_uV);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0)
+ done = false;
+ }
+ /* Set current_uV if wasn't done earlier in the code and if necessary */
+ if (n_coupled > 1 && *current_uV == -1) {
+
+ if (_regulator_is_enabled(rdev)) {
+ ret = _regulator_get_voltage(rdev);
+ if (ret < 0)
+ return ret;
+
+ *current_uV = ret;
+ } else {
+ *current_uV = desired_min_uV;
+ }
+ }
+
+ *min_uV = desired_min_uV;
+ *max_uV = desired_max_uV;
+
+ return done;
+}
+
+static int regulator_balance_voltage(struct regulator_dev *rdev,
+ suspend_state_t state)
+{
+ struct regulator_dev **c_rdevs;
+ struct regulator_dev *best_rdev;
+ struct coupling_desc *c_desc = &rdev->coupling_desc;
+ int i, ret, n_coupled, best_min_uV, best_max_uV, best_c_rdev;
+ bool best_c_rdev_done, c_rdev_done[MAX_COUPLED];
+ unsigned int delta, best_delta;
+
+ c_rdevs = c_desc->coupled_rdevs;
+ n_coupled = c_desc->n_coupled;
+
+ /*
+ * If system is in a state other than PM_SUSPEND_ON, don't check
+ * other coupled regulators.
+ */
+ if (state != PM_SUSPEND_ON)
+ n_coupled = 1;
+
+ if (c_desc->n_resolved < n_coupled) {
+ rdev_err(rdev, "Not all coupled regulators registered\n");
+ return -EPERM;
+ }
+
+ for (i = 0; i < n_coupled; i++)
+ c_rdev_done[i] = false;
+
+ /*
+ * Find the best possible voltage change on each loop. Leave the loop
+ * if there isn't any possible change.
+ */
+ do {
+ best_c_rdev_done = false;
+ best_delta = 0;
+ best_min_uV = 0;
+ best_max_uV = 0;
+ best_c_rdev = 0;
+ best_rdev = NULL;
+
+ /*
+ * Find highest difference between optimal voltage
+ * and current voltage.
+ */
+ for (i = 0; i < n_coupled; i++) {
+ /*
+ * optimal_uV is the best voltage that can be set for
+ * i-th regulator at the moment without violating
+ * max_spread constraint in order to balance
+ * the coupled voltages.
+ */
+ int optimal_uV = 0, optimal_max_uV = 0, current_uV = 0;
+
+ if (c_rdev_done[i])
+ continue;
+
+ ret = regulator_get_optimal_voltage(c_rdevs[i],
+ &current_uV,
+ &optimal_uV,
+ &optimal_max_uV,
+ state, n_coupled);
+ if (ret < 0)
+ goto out;
+
+ delta = abs(optimal_uV - current_uV);
+
+ if (delta && best_delta <= delta) {
+ best_c_rdev_done = ret;
+ best_delta = delta;
+ best_rdev = c_rdevs[i];
+ best_min_uV = optimal_uV;
+ best_max_uV = optimal_max_uV;
+ best_c_rdev = i;
+ }
+ }
+
+ /* Nothing to change, return successfully */
+ if (!best_rdev) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = regulator_set_voltage_rdev(best_rdev, best_min_uV,
+ best_max_uV, state);
+
+ if (ret < 0)
+ goto out;
+
+ c_rdev_done[best_c_rdev] = best_c_rdev_done;
+
+ } while (n_coupled > 1);
+
+out:
return ret;
}
@@ -3146,14 +3728,15 @@ out2:
*/
int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
{
- int ret = 0;
+ struct ww_acquire_ctx ww_ctx;
+ int ret;
- regulator_lock_supply(regulator->rdev);
+ regulator_lock_dependent(regulator->rdev, &ww_ctx);
ret = regulator_set_voltage_unlocked(regulator, min_uV, max_uV,
PM_SUSPEND_ON);
- regulator_unlock_supply(regulator->rdev);
+ regulator_unlock_dependent(regulator->rdev, &ww_ctx);
return ret;
}
@@ -3225,18 +3808,19 @@ static int _regulator_set_suspend_voltage(struct regulator *regulator,
int regulator_set_suspend_voltage(struct regulator *regulator, int min_uV,
int max_uV, suspend_state_t state)
{
- int ret = 0;
+ struct ww_acquire_ctx ww_ctx;
+ int ret;
/* PM_SUSPEND_ON is handled by regulator_set_voltage() */
if (regulator_check_states(state) || state == PM_SUSPEND_ON)
return -EINVAL;
- regulator_lock_supply(regulator->rdev);
+ regulator_lock_dependent(regulator->rdev, &ww_ctx);
ret = _regulator_set_suspend_voltage(regulator, min_uV,
max_uV, state);
- regulator_unlock_supply(regulator->rdev);
+ regulator_unlock_dependent(regulator->rdev, &ww_ctx);
return ret;
}
@@ -3426,13 +4010,12 @@ static int _regulator_get_voltage(struct regulator_dev *rdev)
*/
int regulator_get_voltage(struct regulator *regulator)
{
+ struct ww_acquire_ctx ww_ctx;
int ret;
- regulator_lock_supply(regulator->rdev);
-
+ regulator_lock_dependent(regulator->rdev, &ww_ctx);
ret = _regulator_get_voltage(regulator->rdev);
-
- regulator_unlock_supply(regulator->rdev);
+ regulator_unlock_dependent(regulator->rdev, &ww_ctx);
return ret;
}
@@ -3650,16 +4233,30 @@ EXPORT_SYMBOL_GPL(regulator_get_error_flags);
* DRMS will sum the total requested load on the regulator and change
* to the most efficient operating mode if platform constraints allow.
*
+ * NOTE: when a regulator consumer requests to have a regulator
+ * disabled then any load that consumer requested no longer counts
+ * toward the total requested load. If the regulator is re-enabled
+ * then the previously requested load will start counting again.
+ *
+ * If a regulator is an always-on regulator then an individual consumer's
+ * load will still be removed if that consumer is fully disabled.
+ *
* On error a negative errno is returned.
*/
int regulator_set_load(struct regulator *regulator, int uA_load)
{
struct regulator_dev *rdev = regulator->rdev;
- int ret;
+ int old_uA_load;
+ int ret = 0;
regulator_lock(rdev);
+ old_uA_load = regulator->uA_load;
regulator->uA_load = uA_load;
- ret = drms_uA_update(rdev);
+ if (regulator->enable_count && old_uA_load != uA_load) {
+ ret = drms_uA_update(rdev);
+ if (ret < 0)
+ regulator->uA_load = old_uA_load;
+ }
regulator_unlock(rdev);
return ret;
@@ -3830,11 +4427,8 @@ int regulator_bulk_enable(int num_consumers,
int ret = 0;
for (i = 0; i < num_consumers; i++) {
- if (consumers[i].consumer->always_on)
- consumers[i].ret = 0;
- else
- async_schedule_domain(regulator_bulk_enable_async,
- &consumers[i], &async_domain);
+ async_schedule_domain(regulator_bulk_enable_async,
+ &consumers[i], &async_domain);
}
async_synchronize_full_domain(&async_domain);
@@ -3968,7 +4562,7 @@ EXPORT_SYMBOL_GPL(regulator_bulk_free);
int regulator_notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
- lockdep_assert_held_once(&rdev->mutex);
+ lockdep_assert_held_once(&rdev->mutex.base);
_notifier_call_chain(rdev, event, data);
return NOTIFY_DONE;
@@ -4070,10 +4664,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_bypass.attr)
return ops->get_bypass ? mode : 0;
- /* some attributes are type-specific */
- if (attr == &dev_attr_requested_microamps.attr)
- return rdev->desc->type == REGULATOR_CURRENT ? mode : 0;
-
/* constraints need specific supporting methods */
if (attr == &dev_attr_min_microvolts.attr ||
attr == &dev_attr_max_microvolts.attr)
@@ -4157,7 +4747,7 @@ static int regulator_register_resolve_supply(struct device *dev, void *data)
return 0;
}
-static int regulator_fill_coupling_array(struct regulator_dev *rdev)
+static void regulator_resolve_coupling(struct regulator_dev *rdev)
{
struct coupling_desc *c_desc = &rdev->coupling_desc;
int n_coupled = c_desc->n_coupled;
@@ -4171,33 +4761,58 @@ static int regulator_fill_coupling_array(struct regulator_dev *rdev)
c_rdev = of_parse_coupled_regulator(rdev, i - 1);
- if (c_rdev) {
- c_desc->coupled_rdevs[i] = c_rdev;
- c_desc->n_resolved++;
- }
- }
+ if (!c_rdev)
+ continue;
- if (rdev->coupling_desc.n_resolved < n_coupled)
- return -1;
- else
- return 0;
+ regulator_lock(c_rdev);
+
+ c_desc->coupled_rdevs[i] = c_rdev;
+ c_desc->n_resolved++;
+
+ regulator_unlock(c_rdev);
+
+ regulator_resolve_coupling(c_rdev);
+ }
}
-static int regulator_register_fill_coupling_array(struct device *dev,
- void *data)
+static void regulator_remove_coupling(struct regulator_dev *rdev)
{
- struct regulator_dev *rdev = dev_to_rdev(dev);
+ struct coupling_desc *__c_desc, *c_desc = &rdev->coupling_desc;
+ struct regulator_dev *__c_rdev, *c_rdev;
+ unsigned int __n_coupled, n_coupled;
+ int i, k;
- if (!IS_ENABLED(CONFIG_OF))
- return 0;
+ n_coupled = c_desc->n_coupled;
- if (regulator_fill_coupling_array(rdev))
- rdev_dbg(rdev, "unable to resolve coupling\n");
+ for (i = 1; i < n_coupled; i++) {
+ c_rdev = c_desc->coupled_rdevs[i];
- return 0;
+ if (!c_rdev)
+ continue;
+
+ regulator_lock(c_rdev);
+
+ __c_desc = &c_rdev->coupling_desc;
+ __n_coupled = __c_desc->n_coupled;
+
+ for (k = 1; k < __n_coupled; k++) {
+ __c_rdev = __c_desc->coupled_rdevs[k];
+
+ if (__c_rdev == rdev) {
+ __c_desc->coupled_rdevs[k] = NULL;
+ __c_desc->n_resolved--;
+ break;
+ }
+ }
+
+ regulator_unlock(c_rdev);
+
+ c_desc->coupled_rdevs[i] = NULL;
+ c_desc->n_resolved--;
+ }
}
-static int regulator_resolve_coupling(struct regulator_dev *rdev)
+static int regulator_init_coupling(struct regulator_dev *rdev)
{
int n_phandles;
@@ -4237,13 +4852,6 @@ static int regulator_resolve_coupling(struct regulator_dev *rdev)
if (!of_check_coupling_data(rdev))
return -EPERM;
- /*
- * After everything has been checked, try to fill rdevs array
- * with pointers to regulators parsed from device tree. If some
- * regulators are not registered yet, retry in late init call
- */
- regulator_fill_coupling_array(rdev);
-
return 0;
}
@@ -4265,21 +4873,33 @@ regulator_register(const struct regulator_desc *regulator_desc,
struct regulator_config *config = NULL;
static atomic_t regulator_no = ATOMIC_INIT(-1);
struct regulator_dev *rdev;
+ bool dangling_cfg_gpiod = false;
+ bool dangling_of_gpiod = false;
struct device *dev;
int ret, i;
- if (regulator_desc == NULL || cfg == NULL)
+ if (cfg == NULL)
return ERR_PTR(-EINVAL);
+ if (cfg->ena_gpiod)
+ dangling_cfg_gpiod = true;
+ if (regulator_desc == NULL) {
+ ret = -EINVAL;
+ goto rinse;
+ }
dev = cfg->dev;
WARN_ON(!dev);
- if (regulator_desc->name == NULL || regulator_desc->ops == NULL)
- return ERR_PTR(-EINVAL);
+ if (regulator_desc->name == NULL || regulator_desc->ops == NULL) {
+ ret = -EINVAL;
+ goto rinse;
+ }
if (regulator_desc->type != REGULATOR_VOLTAGE &&
- regulator_desc->type != REGULATOR_CURRENT)
- return ERR_PTR(-EINVAL);
+ regulator_desc->type != REGULATOR_CURRENT) {
+ ret = -EINVAL;
+ goto rinse;
+ }
/* Only one of each should be implemented */
WARN_ON(regulator_desc->ops->get_voltage &&
@@ -4290,16 +4910,20 @@ regulator_register(const struct regulator_desc *regulator_desc,
/* If we're using selectors we must implement list_voltage. */
if (regulator_desc->ops->get_voltage_sel &&
!regulator_desc->ops->list_voltage) {
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto rinse;
}
if (regulator_desc->ops->set_voltage_sel &&
!regulator_desc->ops->list_voltage) {
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto rinse;
}
rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
- if (rdev == NULL)
- return ERR_PTR(-ENOMEM);
+ if (rdev == NULL) {
+ ret = -ENOMEM;
+ goto rinse;
+ }
/*
* Duplicate the config so the driver could override it after
@@ -4308,17 +4932,28 @@ regulator_register(const struct regulator_desc *regulator_desc,
config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
if (config == NULL) {
kfree(rdev);
- return ERR_PTR(-ENOMEM);
+ ret = -ENOMEM;
+ goto rinse;
}
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
&rdev->dev.of_node);
+ /*
+ * We need to keep track of any GPIO descriptor coming from the
+ * device tree until we have handled it over to the core. If the
+ * config that was passed in to this function DOES NOT contain
+ * a descriptor, and the config after this call DOES contain
+ * a descriptor, we definately got one from parsing the device
+ * tree.
+ */
+ if (!cfg->ena_gpiod && config->ena_gpiod)
+ dangling_of_gpiod = true;
if (!init_data) {
init_data = config->init_data;
rdev->dev.of_node = of_node_get(config->of_node);
}
- mutex_init(&rdev->mutex);
+ ww_mutex_init(&rdev->mutex, &regulator_ww_class);
rdev->reg_data = config->driver_data;
rdev->owner = regulator_desc->owner;
rdev->desc = regulator_desc;
@@ -4351,6 +4986,9 @@ regulator_register(const struct regulator_desc *regulator_desc,
config->ena_gpio, ret);
goto clean;
}
+ /* The regulator core took over the GPIO descriptor */
+ dangling_cfg_gpiod = false;
+ dangling_of_gpiod = false;
}
/* register with sysfs */
@@ -4380,11 +5018,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto wash;
- mutex_lock(&regulator_list_mutex);
- ret = regulator_resolve_coupling(rdev);
- mutex_unlock(&regulator_list_mutex);
-
- if (ret != 0)
+ ret = regulator_init_coupling(rdev);
+ if (ret < 0)
goto wash;
/* add consumers devices */
@@ -4418,6 +5053,11 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev_init_debugfs(rdev);
+ /* try to resolve regulators coupling since a new one was registered */
+ mutex_lock(&regulator_list_mutex);
+ regulator_resolve_coupling(rdev);
+ mutex_unlock(&regulator_list_mutex);
+
/* try to resolve regulators supply since a new one was registered */
class_for_each_device(&regulator_class, NULL, NULL,
regulator_register_resolve_supply);
@@ -4434,8 +5074,13 @@ wash:
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
clean:
+ if (dangling_of_gpiod)
+ gpiod_put(config->ena_gpiod);
kfree(rdev);
kfree(config);
+rinse:
+ if (dangling_cfg_gpiod)
+ gpiod_put(cfg->ena_gpiod);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(regulator_register);
@@ -4456,15 +5101,19 @@ void regulator_unregister(struct regulator_dev *rdev)
regulator_disable(rdev->supply);
regulator_put(rdev->supply);
}
+
mutex_lock(&regulator_list_mutex);
+
debugfs_remove_recursive(rdev->debugfs);
flush_work(&rdev->disable_work.work);
WARN_ON(rdev->open_count);
+ regulator_remove_coupling(rdev);
unset_regulator_supplies(rdev);
list_del(&rdev->list);
regulator_ena_gpio_free(rdev);
- mutex_unlock(&regulator_list_mutex);
device_unregister(&rdev->dev);
+
+ mutex_unlock(&regulator_list_mutex);
}
EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -4621,23 +5270,8 @@ static int supply_map_show(struct seq_file *sf, void *data)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(supply_map);
-static int supply_map_open(struct inode *inode, struct file *file)
-{
- return single_open(file, supply_map_show, inode->i_private);
-}
-#endif
-
-static const struct file_operations supply_map_fops = {
-#ifdef CONFIG_DEBUG_FS
- .open = supply_map_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-#endif
-};
-
-#ifdef CONFIG_DEBUG_FS
struct summary_data {
struct seq_file *s;
struct regulator_dev *parent;
@@ -4672,8 +5306,6 @@ static void regulator_summary_show_subtree(struct seq_file *s,
if (!rdev)
return;
- regulator_lock_nested(rdev, level);
-
opmode = _regulator_get_mode_unlocked(rdev);
seq_printf(s, "%*s%-*s %3d %4d %6d %7s ",
level * 3 + 1, "",
@@ -4712,8 +5344,11 @@ static void regulator_summary_show_subtree(struct seq_file *s,
switch (rdev->desc->type) {
case REGULATOR_VOLTAGE:
- seq_printf(s, "%37dmA %5dmV %5dmV",
+ seq_printf(s, "%3d %33dmA%c%5dmV %5dmV",
+ consumer->enable_count,
consumer->uA_load / 1000,
+ consumer->uA_load && !consumer->enable_count ?
+ '*' : ' ',
consumer->voltage[PM_SUSPEND_ON].min_uV / 1000,
consumer->voltage[PM_SUSPEND_ON].max_uV / 1000);
break;
@@ -4730,8 +5365,105 @@ static void regulator_summary_show_subtree(struct seq_file *s,
class_for_each_device(&regulator_class, NULL, &summary_data,
regulator_summary_show_children);
+}
+
+struct summary_lock_data {
+ struct ww_acquire_ctx *ww_ctx;
+ struct regulator_dev **new_contended_rdev;
+ struct regulator_dev **old_contended_rdev;
+};
+
+static int regulator_summary_lock_one(struct device *dev, void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ struct summary_lock_data *lock_data = data;
+ int ret = 0;
+
+ if (rdev != *lock_data->old_contended_rdev) {
+ ret = regulator_lock_nested(rdev, lock_data->ww_ctx);
+
+ if (ret == -EDEADLK)
+ *lock_data->new_contended_rdev = rdev;
+ else
+ WARN_ON_ONCE(ret);
+ } else {
+ *lock_data->old_contended_rdev = NULL;
+ }
+
+ return ret;
+}
+
+static int regulator_summary_unlock_one(struct device *dev, void *data)
+{
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ struct summary_lock_data *lock_data = data;
+
+ if (lock_data) {
+ if (rdev == *lock_data->new_contended_rdev)
+ return -EDEADLK;
+ }
regulator_unlock(rdev);
+
+ return 0;
+}
+
+static int regulator_summary_lock_all(struct ww_acquire_ctx *ww_ctx,
+ struct regulator_dev **new_contended_rdev,
+ struct regulator_dev **old_contended_rdev)
+{
+ struct summary_lock_data lock_data;
+ int ret;
+
+ lock_data.ww_ctx = ww_ctx;
+ lock_data.new_contended_rdev = new_contended_rdev;
+ lock_data.old_contended_rdev = old_contended_rdev;
+
+ ret = class_for_each_device(&regulator_class, NULL, &lock_data,
+ regulator_summary_lock_one);
+ if (ret)
+ class_for_each_device(&regulator_class, NULL, &lock_data,
+ regulator_summary_unlock_one);
+
+ return ret;
+}
+
+static void regulator_summary_lock(struct ww_acquire_ctx *ww_ctx)
+{
+ struct regulator_dev *new_contended_rdev = NULL;
+ struct regulator_dev *old_contended_rdev = NULL;
+ int err;
+
+ mutex_lock(&regulator_list_mutex);
+
+ ww_acquire_init(ww_ctx, &regulator_ww_class);
+
+ do {
+ if (new_contended_rdev) {
+ ww_mutex_lock_slow(&new_contended_rdev->mutex, ww_ctx);
+ old_contended_rdev = new_contended_rdev;
+ old_contended_rdev->ref_cnt++;
+ }
+
+ err = regulator_summary_lock_all(ww_ctx,
+ &new_contended_rdev,
+ &old_contended_rdev);
+
+ if (old_contended_rdev)
+ regulator_unlock(old_contended_rdev);
+
+ } while (err == -EDEADLK);
+
+ ww_acquire_done(ww_ctx);
+}
+
+static void regulator_summary_unlock(struct ww_acquire_ctx *ww_ctx)
+{
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_summary_unlock_one);
+ ww_acquire_fini(ww_ctx);
+
+ mutex_unlock(&regulator_list_mutex);
}
static int regulator_summary_show_roots(struct device *dev, void *data)
@@ -4747,29 +5479,22 @@ static int regulator_summary_show_roots(struct device *dev, void *data)
static int regulator_summary_show(struct seq_file *s, void *data)
{
+ struct ww_acquire_ctx ww_ctx;
+
seq_puts(s, " regulator use open bypass opmode voltage current min max\n");
seq_puts(s, "---------------------------------------------------------------------------------------\n");
+ regulator_summary_lock(&ww_ctx);
+
class_for_each_device(&regulator_class, NULL, s,
regulator_summary_show_roots);
- return 0;
-}
+ regulator_summary_unlock(&ww_ctx);
-static int regulator_summary_open(struct inode *inode, struct file *file)
-{
- return single_open(file, regulator_summary_show, inode->i_private);
+ return 0;
}
-#endif
-
-static const struct file_operations regulator_summary_fops = {
-#ifdef CONFIG_DEBUG_FS
- .open = regulator_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-#endif
-};
+DEFINE_SHOW_ATTRIBUTE(regulator_summary);
+#endif /* CONFIG_DEBUG_FS */
static int __init regulator_init(void)
{
@@ -4781,12 +5506,13 @@ static int __init regulator_init(void)
if (!debugfs_root)
pr_warn("regulator: Failed to create debugfs directory\n");
+#ifdef CONFIG_DEBUG_FS
debugfs_create_file("supply_map", 0444, debugfs_root, NULL,
&supply_map_fops);
debugfs_create_file("regulator_summary", 0444, debugfs_root,
NULL, &regulator_summary_fops);
-
+#endif
regulator_dummy_init();
return ret;
@@ -4873,9 +5599,6 @@ static int __init regulator_init_complete(void)
class_for_each_device(&regulator_class, NULL, NULL,
regulator_late_cleanup);
- class_for_each_device(&regulator_class, NULL, NULL,
- regulator_register_fill_coupling_array);
-
return 0;
}
late_initcall_sync(regulator_init_complete);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 37e4025203e3..207cb3859dcc 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -435,7 +435,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
return -ENODEV;
for_each_child_of_node(nproot, np) {
- if (!of_node_cmp(np->name,
+ if (of_node_name_eq(np,
regulator->info->reg_desc.name)) {
config.init_data = of_get_regulator_init_data(
&pdev->dev, np,
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
index d0496d6b0934..84dba64ed11e 100644
--- a/drivers/regulator/da9210-regulator.c
+++ b/drivers/regulator/da9210-regulator.c
@@ -131,7 +131,7 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
if (error < 0)
goto error_i2c;
- mutex_lock(&chip->rdev->mutex);
+ regulator_lock(chip->rdev);
if (val & DA9210_E_OVCURR) {
regulator_notifier_call_chain(chip->rdev,
@@ -157,7 +157,7 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
handled |= DA9210_E_VMAX;
}
- mutex_unlock(&chip->rdev->mutex);
+ regulator_unlock(chip->rdev);
if (handled) {
/* Clear handled events */
diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c
index 8f68c7a05d27..109ee12d4362 100644
--- a/drivers/regulator/da9211-regulator.c
+++ b/drivers/regulator/da9211-regulator.c
@@ -389,6 +389,12 @@ static int da9211_regulator_init(struct da9211 *chip)
else
config.ena_gpiod = NULL;
+ /*
+ * Hand the GPIO descriptor management over to the regulator
+ * core, remove it from GPIO devres management.
+ */
+ if (config.ena_gpiod)
+ devm_gpiod_unhinge(chip->dev, config.ena_gpiod);
chip->rdev[i] = devm_regulator_register(chip->dev,
&da9211_regulators[i], &config);
if (IS_ERR(chip->rdev[i])) {
diff --git a/drivers/regulator/dbx500-prcmu.c b/drivers/regulator/dbx500-prcmu.c
index 8976141c1438..308e3ff0a1bd 100644
--- a/drivers/regulator/dbx500-prcmu.c
+++ b/drivers/regulator/dbx500-prcmu.c
@@ -75,7 +75,7 @@ static struct ux500_regulator_debug {
u8 *state_after_suspend;
} rdebug;
-static int ux500_regulator_power_state_cnt_print(struct seq_file *s, void *p)
+static int ux500_regulator_power_state_cnt_show(struct seq_file *s, void *p)
{
/* print power state count */
seq_printf(s, "ux500-regulator power state count: %i\n",
@@ -83,23 +83,9 @@ static int ux500_regulator_power_state_cnt_print(struct seq_file *s, void *p)
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(ux500_regulator_power_state_cnt);
-static int ux500_regulator_power_state_cnt_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, ux500_regulator_power_state_cnt_print,
- inode->i_private);
-}
-
-static const struct file_operations ux500_regulator_power_state_cnt_fops = {
- .open = ux500_regulator_power_state_cnt_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
-
-static int ux500_regulator_status_print(struct seq_file *s, void *p)
+static int ux500_regulator_status_show(struct seq_file *s, void *p)
{
int i;
@@ -122,20 +108,7 @@ static int ux500_regulator_status_print(struct seq_file *s, void *p)
return 0;
}
-
-static int ux500_regulator_status_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ux500_regulator_status_print,
- inode->i_private);
-}
-
-static const struct file_operations ux500_regulator_status_fops = {
- .open = ux500_regulator_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .owner = THIS_MODULE,
-};
+DEFINE_SHOW_ATTRIBUTE(ux500_regulator_status);
int __attribute__((weak)) dbx500_regulator_testcase(
struct dbx500_regulator_info *regulator_info,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index ccc29038f19a..9abdb9130766 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -183,7 +183,11 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
*/
gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
- cfg.ena_gpiod = devm_gpiod_get_optional(&pdev->dev, NULL, gflags);
+ /*
+ * Do not use devm* here: the regulator core takes over the
+ * lifecycle management of the GPIO descriptor.
+ */
+ cfg.ena_gpiod = gpiod_get_optional(&pdev->dev, NULL, gflags);
if (IS_ERR(cfg.ena_gpiod))
return PTR_ERR(cfg.ena_gpiod);
diff --git a/drivers/regulator/internal.h b/drivers/regulator/internal.h
index 943926a156f2..6017f15c5d75 100644
--- a/drivers/regulator/internal.h
+++ b/drivers/regulator/internal.h
@@ -42,6 +42,8 @@ struct regulator {
unsigned int always_on:1;
unsigned int bypass:1;
int uA_load;
+ unsigned int enable_count;
+ unsigned int deferred_disables;
struct regulator_voltage voltage[REGULATOR_STATES_NUM];
const char *supply_name;
struct device_attribute dev_attr;
diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
index bbedb08d257b..8c0e8419c43f 100644
--- a/drivers/regulator/lm363x-regulator.c
+++ b/drivers/regulator/lm363x-regulator.c
@@ -224,13 +224,15 @@ static struct gpio_desc *lm363x_regulator_of_get_enable_gpio(struct device *dev,
/*
* Check LCM_EN1/2_GPIO is configured.
* Those pins are used for enabling VPOS/VNEG LDOs.
+ * Do not use devm* here: the regulator core takes over the
+ * lifecycle management of the GPIO descriptor.
*/
switch (id) {
case LM3632_LDO_POS:
- return devm_gpiod_get_index_optional(dev, "enable", 0,
+ return gpiod_get_index_optional(dev, "enable", 0,
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
case LM3632_LDO_NEG:
- return devm_gpiod_get_index_optional(dev, "enable", 1,
+ return gpiod_get_index_optional(dev, "enable", 1,
GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
default:
return NULL;
@@ -263,6 +265,8 @@ static int lm363x_regulator_probe(struct platform_device *pdev)
LM3632_EXT_EN_MASK,
LM3632_EXT_EN_MASK);
if (ret) {
+ if (gpiod)
+ gpiod_put(gpiod);
dev_err(dev, "External pin err: %d\n", ret);
return ret;
}
diff --git a/drivers/regulator/lochnagar-regulator.c b/drivers/regulator/lochnagar-regulator.c
index 2b5073b9ff86..5a89e6d4b9a6 100644
--- a/drivers/regulator/lochnagar-regulator.c
+++ b/drivers/regulator/lochnagar-regulator.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
@@ -20,6 +21,8 @@
#include <linux/regulator/of_regulator.h>
#include <linux/mfd/lochnagar.h>
+#include <linux/mfd/lochnagar1_regs.h>
+#include <linux/mfd/lochnagar2_regs.h>
static const struct regulator_ops lochnagar_micvdd_ops = {
.enable = regulator_enable_regmap,
@@ -212,28 +215,52 @@ static const struct regulator_desc lochnagar_regulators[] = {
},
};
+static const struct of_device_id lochnagar_of_match[] = {
+ {
+ .compatible = "cirrus,lochnagar2-micvdd",
+ .data = &lochnagar_regulators[LOCHNAGAR_MICVDD],
+ },
+ {
+ .compatible = "cirrus,lochnagar2-mic1vdd",
+ .data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD],
+ },
+ {
+ .compatible = "cirrus,lochnagar2-mic2vdd",
+ .data = &lochnagar_regulators[LOCHNAGAR_MIC1VDD],
+ },
+ {
+ .compatible = "cirrus,lochnagar2-vddcore",
+ .data = &lochnagar_regulators[LOCHNAGAR_VDDCORE],
+ },
+ {},
+};
+
static int lochnagar_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct lochnagar *lochnagar = dev_get_drvdata(dev->parent);
struct regulator_config config = { };
+ const struct of_device_id *of_id;
+ const struct regulator_desc *desc;
struct regulator_dev *rdev;
- int ret, i;
+ int ret;
- config.dev = lochnagar->dev;
+ config.dev = dev;
config.regmap = lochnagar->regmap;
config.driver_data = lochnagar;
- for (i = 0; i < ARRAY_SIZE(lochnagar_regulators); i++) {
- const struct regulator_desc *desc = &lochnagar_regulators[i];
+ of_id = of_match_device(lochnagar_of_match, dev);
+ if (!of_id)
+ return -EINVAL;
- rdev = devm_regulator_register(dev, desc, &config);
- if (IS_ERR(rdev)) {
- ret = PTR_ERR(rdev);
- dev_err(dev, "Failed to register %s regulator: %d\n",
- desc->name, ret);
- return ret;
- }
+ desc = of_id->data;
+
+ rdev = devm_regulator_register(dev, desc, &config);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(dev, "Failed to register %s regulator: %d\n",
+ desc->name, ret);
+ return ret;
}
return 0;
@@ -242,6 +269,7 @@ static int lochnagar_regulator_probe(struct platform_device *pdev)
static struct platform_driver lochnagar_regulator_driver = {
.driver = {
.name = "lochnagar-regulator",
+ .of_match_table = of_match_ptr(lochnagar_of_match),
},
.probe = lochnagar_regulator_probe,
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
index 553b4790050f..2ee22e7ea675 100644
--- a/drivers/regulator/lp8788-ldo.c
+++ b/drivers/regulator/lp8788-ldo.c
@@ -501,8 +501,12 @@ static int lp8788_config_ldo_enable_mode(struct platform_device *pdev,
return 0;
}
- /* FIXME: check default mode for GPIO here: high or low? */
- ldo->ena_gpiod = devm_gpiod_get_index_optional(&pdev->dev,
+ /*
+ * Do not use devm* here: the regulator core takes over the
+ * lifecycle management of the GPIO descriptor.
+ * FIXME: check default mode for GPIO here: high or low?
+ */
+ ldo->ena_gpiod = gpiod_get_index_optional(&pdev->dev,
"enable",
enable_id,
GPIOD_OUT_HIGH |
diff --git a/drivers/regulator/max77686-regulator.c b/drivers/regulator/max77686-regulator.c
index bee060937f56..8020eb57374a 100644
--- a/drivers/regulator/max77686-regulator.c
+++ b/drivers/regulator/max77686-regulator.c
@@ -11,8 +11,7 @@
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
@@ -76,6 +75,7 @@ enum max77686_ramp_rate {
};
struct max77686_data {
+ struct device *dev;
DECLARE_BITMAP(gpio_enabled, MAX77686_REGULATORS);
/* Array indexed by regulator id */
@@ -250,26 +250,34 @@ static int max77686_of_parse_cb(struct device_node *np,
struct regulator_config *config)
{
struct max77686_data *max77686 = config->driver_data;
+ int ret;
switch (desc->id) {
case MAX77686_BUCK8:
case MAX77686_BUCK9:
case MAX77686_LDO20 ... MAX77686_LDO22:
- config->ena_gpio = of_get_named_gpio(np,
- "maxim,ena-gpios", 0);
- config->ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
- config->ena_gpio_initialized = true;
+ config->ena_gpiod = gpiod_get_from_of_node(np,
+ "maxim,ena",
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "max77686-regulator");
+ if (IS_ERR(config->ena_gpiod))
+ config->ena_gpiod = NULL;
break;
default:
return 0;
}
- if (gpio_is_valid(config->ena_gpio)) {
+ if (config->ena_gpiod) {
set_bit(desc->id, max77686->gpio_enabled);
- return regmap_update_bits(config->regmap, desc->enable_reg,
- desc->enable_mask,
- MAX77686_GPIO_CONTROL);
+ ret = regmap_update_bits(config->regmap, desc->enable_reg,
+ desc->enable_mask,
+ MAX77686_GPIO_CONTROL);
+ if (ret) {
+ gpiod_put(config->ena_gpiod);
+ config->ena_gpiod = NULL;
+ }
}
return 0;
@@ -507,6 +515,7 @@ static int max77686_pmic_probe(struct platform_device *pdev)
if (!max77686)
return -ENOMEM;
+ max77686->dev = &pdev->dev;
config.dev = iodev->dev;
config.regmap = iodev->regmap;
config.driver_data = max77686;
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 6c39fff73b8a..cf2a2912cb1b 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -231,9 +231,13 @@ static int max8952_pmic_probe(struct i2c_client *client,
else
gflags = GPIOD_OUT_LOW;
gflags |= GPIOD_FLAGS_BIT_NONEXCLUSIVE;
- gpiod = devm_gpiod_get_optional(&client->dev,
- "max8952,en",
- gflags);
+ /*
+ * Do not use devm* here: the regulator core takes over the
+ * lifecycle management of the GPIO descriptor.
+ */
+ gpiod = gpiod_get_optional(&client->dev,
+ "max8952,en",
+ gflags);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
if (gpiod)
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index e7a58b509032..9aee1444181d 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -808,7 +808,13 @@ static int max8973_probe(struct i2c_client *client,
config.of_node = client->dev.of_node;
config.regmap = max->regmap;
- /* Register the regulators */
+ /*
+ * Register the regulators
+ * Turn the GPIO descriptor over to the regulator core for
+ * lifecycle management if we pass an ena_gpiod.
+ */
+ if (config.ena_gpiod)
+ devm_gpiod_unhinge(&client->dev, config.ena_gpiod);
rdev = devm_regulator_register(&client->dev, &max->desc, &config);
if (IS_ERR(rdev)) {
ret = PTR_ERR(rdev);
diff --git a/drivers/regulator/max8997-regulator.c b/drivers/regulator/max8997-regulator.c
index 3bf5ddfaaea8..4d2487279a0a 100644
--- a/drivers/regulator/max8997-regulator.c
+++ b/drivers/regulator/max8997-regulator.c
@@ -925,7 +925,7 @@ static int max8997_pmic_dt_parse_pdata(struct platform_device *pdev,
pdata->regulators = rdata;
for_each_child_of_node(regulators_np, reg_np) {
for (i = 0; i < ARRAY_SIZE(regulators); i++)
- if (!of_node_cmp(reg_np->name, regulators[i].name))
+ if (of_node_name_eq(reg_np, regulators[i].name))
break;
if (i == ARRAY_SIZE(regulators)) {
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 65eb1e0350cf..2243138d8a58 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -186,7 +186,7 @@ struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
for (i = 0; i < num_regulators; i++) {
if (!regulators[i].desc.name)
continue;
- if (!of_node_cmp(child->name,
+ if (of_node_name_eq(child,
regulators[i].desc.name)) {
p->id = i;
p->init_data = of_get_regulator_init_data(
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
new file mode 100644
index 000000000000..3479ae009b0b
--- /dev/null
+++ b/drivers/regulator/mcp16502.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// MCP16502 PMIC driver
+//
+// Copyright (C) 2018 Microchip Technology Inc. and its subsidiaries
+//
+// Author: Andrei Stefanescu <andrei.stefanescu@microchip.com>
+//
+// Inspired from tps65086-regulator.c
+
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/suspend.h>
+
+#define VDD_LOW_SEL 0x0D
+#define VDD_HIGH_SEL 0x3F
+
+#define MCP16502_FLT BIT(7)
+#define MCP16502_ENS BIT(0)
+
+/*
+ * The PMIC has four sets of registers corresponding to four power modes:
+ * Performance, Active, Low-power, Hibernate.
+ *
+ * Registers:
+ * Each regulator has a register for each power mode. To access a register
+ * for a specific regulator and mode BASE_* and OFFSET_* need to be added.
+ *
+ * Operating modes:
+ * In order for the PMIC to transition to operating modes it has to be
+ * controlled via GPIO lines called LPM and HPM.
+ *
+ * The registers are fully configurable such that you can put all regulators in
+ * a low-power state while the PMIC is in Active mode. They are supposed to be
+ * configured at startup and then simply transition to/from a global low-power
+ * state by setting the GPIO lpm pin high/low.
+ *
+ * This driver keeps the PMIC in Active mode, Low-power state is set for the
+ * regulators by enabling/disabling operating mode (FPWM or Auto PFM).
+ *
+ * The PMIC's Low-power and Hibernate modes are used during standby/suspend.
+ * To enter standby/suspend the PMIC will go to Low-power mode. From there, it
+ * will transition to Hibernate when the PWRHLD line is set to low by the MPU.
+ */
+
+/*
+ * This function is useful for iterating over all regulators and accessing their
+ * registers in a generic way or accessing a regulator device by its id.
+ */
+#define MCP16502_BASE(i) (((i) + 1) << 4)
+#define MCP16502_STAT_BASE(i) ((i) + 5)
+
+#define MCP16502_OFFSET_MODE_A 0
+#define MCP16502_OFFSET_MODE_LPM 1
+#define MCP16502_OFFSET_MODE_HIB 2
+
+#define MCP16502_OPMODE_ACTIVE REGULATOR_MODE_NORMAL
+#define MCP16502_OPMODE_LPM REGULATOR_MODE_IDLE
+#define MCP16502_OPMODE_HIB REGULATOR_MODE_STANDBY
+
+#define MCP16502_MODE_AUTO_PFM 0
+#define MCP16502_MODE_FPWM BIT(6)
+
+#define MCP16502_VSEL 0x3F
+#define MCP16502_EN BIT(7)
+#define MCP16502_MODE BIT(6)
+
+#define MCP16502_MIN_REG 0x0
+#define MCP16502_MAX_REG 0x65
+
+static unsigned int mcp16502_of_map_mode(unsigned int mode)
+{
+ if (mode == REGULATOR_MODE_NORMAL || mode == REGULATOR_MODE_IDLE)
+ return mode;
+
+ return REGULATOR_MODE_INVALID;
+}
+
+#define MCP16502_REGULATOR(_name, _id, _ranges, _ops) \
+ [_id] = { \
+ .name = _name, \
+ .regulators_node = of_match_ptr("regulators"), \
+ .id = _id, \
+ .ops = &(_ops), \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .n_voltages = MCP16502_VSEL + 1, \
+ .linear_ranges = _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(_ranges), \
+ .of_match = of_match_ptr(_name), \
+ .of_map_mode = mcp16502_of_map_mode, \
+ .vsel_reg = (((_id) + 1) << 4), \
+ .vsel_mask = MCP16502_VSEL, \
+ .enable_reg = (((_id) + 1) << 4), \
+ .enable_mask = MCP16502_EN, \
+ }
+
+enum {
+ BUCK1 = 0,
+ BUCK2,
+ BUCK3,
+ BUCK4,
+ LDO1,
+ LDO2,
+ NUM_REGULATORS
+};
+
+/*
+ * struct mcp16502 - PMIC representation
+ * @rdev: the regulators belonging to this chip
+ * @rmap: regmap to be used for I2C communication
+ * @lpm: LPM GPIO descriptor
+ */
+struct mcp16502 {
+ struct regulator_dev *rdev[NUM_REGULATORS];
+ struct regmap *rmap;
+ struct gpio_desc *lpm;
+};
+
+/*
+ * mcp16502_gpio_set_mode() - set the GPIO corresponding value
+ *
+ * Used to prepare transitioning into hibernate or resuming from it.
+ */
+static void mcp16502_gpio_set_mode(struct mcp16502 *mcp, int mode)
+{
+ switch (mode) {
+ case MCP16502_OPMODE_ACTIVE:
+ gpiod_set_value(mcp->lpm, 0);
+ break;
+ case MCP16502_OPMODE_LPM:
+ case MCP16502_OPMODE_HIB:
+ gpiod_set_value(mcp->lpm, 1);
+ break;
+ default:
+ pr_err("%s: %d invalid\n", __func__, mode);
+ }
+}
+
+/*
+ * mcp16502_get_reg() - get the PMIC's configuration register for opmode
+ *
+ * @rdev: the regulator whose register we are searching
+ * @opmode: the PMIC's operating mode ACTIVE, Low-power, Hibernate
+ */
+static int mcp16502_get_reg(struct regulator_dev *rdev, int opmode)
+{
+ int reg = MCP16502_BASE(rdev_get_id(rdev));
+
+ switch (opmode) {
+ case MCP16502_OPMODE_ACTIVE:
+ return reg + MCP16502_OFFSET_MODE_A;
+ case MCP16502_OPMODE_LPM:
+ return reg + MCP16502_OFFSET_MODE_LPM;
+ case MCP16502_OPMODE_HIB:
+ return reg + MCP16502_OFFSET_MODE_HIB;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * mcp16502_get_mode() - return the current operating mode of a regulator
+ *
+ * Note: all functions that are not part of entering/exiting standby/suspend
+ * use the Active mode registers.
+ *
+ * Note: this is different from the PMIC's operatig mode, it is the
+ * MODE bit from the regulator's register.
+ */
+static unsigned int mcp16502_get_mode(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret, reg;
+ struct mcp16502 *mcp = rdev_get_drvdata(rdev);
+
+ reg = mcp16502_get_reg(rdev, MCP16502_OPMODE_ACTIVE);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(mcp->rmap, reg, &val);
+ if (ret)
+ return ret;
+
+ switch (val & MCP16502_MODE) {
+ case MCP16502_MODE_FPWM:
+ return REGULATOR_MODE_NORMAL;
+ case MCP16502_MODE_AUTO_PFM:
+ return REGULATOR_MODE_IDLE;
+ default:
+ return REGULATOR_MODE_INVALID;
+ }
+}
+
+/*
+ * _mcp16502_set_mode() - helper for set_mode and set_suspend_mode
+ *
+ * @rdev: the regulator for which we are setting the mode
+ * @mode: the regulator's mode (the one from MODE bit)
+ * @opmode: the PMIC's operating mode: Active/Low-power/Hibernate
+ */
+static int _mcp16502_set_mode(struct regulator_dev *rdev, unsigned int mode,
+ unsigned int op_mode)
+{
+ int val;
+ int reg;
+ struct mcp16502 *mcp = rdev_get_drvdata(rdev);
+
+ reg = mcp16502_get_reg(rdev, op_mode);
+ if (reg < 0)
+ return reg;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = MCP16502_MODE_FPWM;
+ break;
+ case REGULATOR_MODE_IDLE:
+ val = MCP16502_MODE_AUTO_PFM;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = regmap_update_bits(mcp->rmap, reg, MCP16502_MODE, val);
+ return reg;
+}
+
+/*
+ * mcp16502_set_mode() - regulator_ops set_mode
+ */
+static int mcp16502_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ return _mcp16502_set_mode(rdev, mode, MCP16502_OPMODE_ACTIVE);
+}
+
+/*
+ * mcp16502_get_status() - regulator_ops get_status
+ */
+static int mcp16502_get_status(struct regulator_dev *rdev)
+{
+ int ret;
+ unsigned int val;
+ struct mcp16502 *mcp = rdev_get_drvdata(rdev);
+
+ ret = regmap_read(mcp->rmap, MCP16502_STAT_BASE(rdev_get_id(rdev)),
+ &val);
+ if (ret)
+ return ret;
+
+ if (val & MCP16502_FLT)
+ return REGULATOR_STATUS_ERROR;
+ else if (val & MCP16502_ENS)
+ return REGULATOR_STATUS_ON;
+ else if (!(val & MCP16502_ENS))
+ return REGULATOR_STATUS_OFF;
+
+ return REGULATOR_STATUS_UNDEFINED;
+}
+
+#ifdef CONFIG_SUSPEND
+/*
+ * mcp16502_suspend_get_target_reg() - get the reg of the target suspend PMIC
+ * mode
+ */
+static int mcp16502_suspend_get_target_reg(struct regulator_dev *rdev)
+{
+ switch (pm_suspend_target_state) {
+ case PM_SUSPEND_STANDBY:
+ return mcp16502_get_reg(rdev, MCP16502_OPMODE_LPM);
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_MEM:
+ return mcp16502_get_reg(rdev, MCP16502_OPMODE_HIB);
+ default:
+ dev_err(&rdev->dev, "invalid suspend target: %d\n",
+ pm_suspend_target_state);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * mcp16502_set_suspend_voltage() - regulator_ops set_suspend_voltage
+ */
+static int mcp16502_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct mcp16502 *mcp = rdev_get_drvdata(rdev);
+ int sel = regulator_map_voltage_linear_range(rdev, uV, uV);
+ int reg = mcp16502_suspend_get_target_reg(rdev);
+
+ if (sel < 0)
+ return sel;
+
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(mcp->rmap, reg, MCP16502_VSEL, sel);
+}
+
+/*
+ * mcp16502_set_suspend_mode() - regulator_ops set_suspend_mode
+ */
+static int mcp16502_set_suspend_mode(struct regulator_dev *rdev,
+ unsigned int mode)
+{
+ switch (pm_suspend_target_state) {
+ case PM_SUSPEND_STANDBY:
+ return _mcp16502_set_mode(rdev, mode, MCP16502_OPMODE_LPM);
+ case PM_SUSPEND_ON:
+ case PM_SUSPEND_MEM:
+ return _mcp16502_set_mode(rdev, mode, MCP16502_OPMODE_HIB);
+ default:
+ dev_err(&rdev->dev, "invalid suspend target: %d\n",
+ pm_suspend_target_state);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * mcp16502_set_suspend_enable() - regulator_ops set_suspend_enable
+ */
+static int mcp16502_set_suspend_enable(struct regulator_dev *rdev)
+{
+ struct mcp16502 *mcp = rdev_get_drvdata(rdev);
+ int reg = mcp16502_suspend_get_target_reg(rdev);
+
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(mcp->rmap, reg, MCP16502_EN, MCP16502_EN);
+}
+
+/*
+ * mcp16502_set_suspend_disable() - regulator_ops set_suspend_disable
+ */
+static int mcp16502_set_suspend_disable(struct regulator_dev *rdev)
+{
+ struct mcp16502 *mcp = rdev_get_drvdata(rdev);
+ int reg = mcp16502_suspend_get_target_reg(rdev);
+
+ if (reg < 0)
+ return reg;
+
+ return regmap_update_bits(mcp->rmap, reg, MCP16502_EN, 0);
+}
+#endif /* CONFIG_SUSPEND */
+
+static const struct regulator_ops mcp16502_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mcp16502_get_status,
+
+ .set_mode = mcp16502_set_mode,
+ .get_mode = mcp16502_get_mode,
+
+#ifdef CONFIG_SUSPEND
+ .set_suspend_voltage = mcp16502_set_suspend_voltage,
+ .set_suspend_mode = mcp16502_set_suspend_mode,
+ .set_suspend_enable = mcp16502_set_suspend_enable,
+ .set_suspend_disable = mcp16502_set_suspend_disable,
+#endif /* CONFIG_SUSPEND */
+};
+
+/*
+ * LDOs cannot change operating modes.
+ */
+static const struct regulator_ops mcp16502_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mcp16502_get_status,
+
+#ifdef CONFIG_SUSPEND
+ .set_suspend_voltage = mcp16502_set_suspend_voltage,
+ .set_suspend_enable = mcp16502_set_suspend_enable,
+ .set_suspend_disable = mcp16502_set_suspend_disable,
+#endif /* CONFIG_SUSPEND */
+};
+
+static const struct of_device_id mcp16502_ids[] = {
+ { .compatible = "microchip,mcp16502", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mcp16502_ids);
+
+static const struct regulator_linear_range b1l12_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1200000, VDD_LOW_SEL, VDD_HIGH_SEL, 50000),
+};
+
+static const struct regulator_linear_range b234_ranges[] = {
+ REGULATOR_LINEAR_RANGE(600000, VDD_LOW_SEL, VDD_HIGH_SEL, 25000),
+};
+
+static const struct regulator_desc mcp16502_desc[] = {
+ /* MCP16502_REGULATOR(_name, _id, ranges, regulator_ops) */
+ MCP16502_REGULATOR("VDD_IO", BUCK1, b1l12_ranges, mcp16502_buck_ops),
+ MCP16502_REGULATOR("VDD_DDR", BUCK2, b234_ranges, mcp16502_buck_ops),
+ MCP16502_REGULATOR("VDD_CORE", BUCK3, b234_ranges, mcp16502_buck_ops),
+ MCP16502_REGULATOR("VDD_OTHER", BUCK4, b234_ranges, mcp16502_buck_ops),
+ MCP16502_REGULATOR("LDO1", LDO1, b1l12_ranges, mcp16502_ldo_ops),
+ MCP16502_REGULATOR("LDO2", LDO2, b1l12_ranges, mcp16502_ldo_ops)
+};
+
+static const struct regmap_range mcp16502_ranges[] = {
+ regmap_reg_range(MCP16502_MIN_REG, MCP16502_MAX_REG)
+};
+
+static const struct regmap_access_table mcp16502_yes_reg_table = {
+ .yes_ranges = mcp16502_ranges,
+ .n_yes_ranges = ARRAY_SIZE(mcp16502_ranges),
+};
+
+static const struct regmap_config mcp16502_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MCP16502_MAX_REG,
+ .cache_type = REGCACHE_NONE,
+ .rd_table = &mcp16502_yes_reg_table,
+ .wr_table = &mcp16502_yes_reg_table,
+};
+
+/*
+ * set_up_regulators() - initialize all regulators
+ */
+static int setup_regulators(struct mcp16502 *mcp, struct device *dev,
+ struct regulator_config config)
+{
+ int i;
+
+ for (i = 0; i < NUM_REGULATORS; i++) {
+ mcp->rdev[i] = devm_regulator_register(dev,
+ &mcp16502_desc[i],
+ &config);
+ if (IS_ERR(mcp->rdev[i])) {
+ dev_err(dev,
+ "failed to register %s regulator %ld\n",
+ mcp16502_desc[i].name, PTR_ERR(mcp->rdev[i]));
+ return PTR_ERR(mcp->rdev[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int mcp16502_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regulator_config config = { };
+ struct device *dev;
+ struct mcp16502 *mcp;
+ int ret = 0;
+
+ dev = &client->dev;
+ config.dev = dev;
+
+ mcp = devm_kzalloc(dev, sizeof(*mcp), GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ mcp->rmap = devm_regmap_init_i2c(client, &mcp16502_regmap_config);
+ if (IS_ERR(mcp->rmap)) {
+ ret = PTR_ERR(mcp->rmap);
+ dev_err(dev, "regmap init failed: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, mcp);
+ config.regmap = mcp->rmap;
+ config.driver_data = mcp;
+
+ mcp->lpm = devm_gpiod_get(dev, "lpm", GPIOD_OUT_LOW);
+ if (IS_ERR(mcp->lpm)) {
+ dev_err(dev, "failed to get lpm pin: %ld\n", PTR_ERR(mcp->lpm));
+ return PTR_ERR(mcp->lpm);
+ }
+
+ ret = setup_regulators(mcp, dev, config);
+ if (ret != 0)
+ return ret;
+
+ mcp16502_gpio_set_mode(mcp, MCP16502_OPMODE_ACTIVE);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mcp16502_suspend_noirq(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mcp16502 *mcp = i2c_get_clientdata(client);
+
+ mcp16502_gpio_set_mode(mcp, MCP16502_OPMODE_LPM);
+
+ return 0;
+}
+
+static int mcp16502_resume_noirq(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct mcp16502 *mcp = i2c_get_clientdata(client);
+
+ mcp16502_gpio_set_mode(mcp, MCP16502_OPMODE_ACTIVE);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops mcp16502_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mcp16502_suspend_noirq,
+ mcp16502_resume_noirq)
+};
+#endif
+static const struct i2c_device_id mcp16502_i2c_id[] = {
+ { "mcp16502", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mcp16502_i2c_id);
+
+static struct i2c_driver mcp16502_drv = {
+ .probe = mcp16502_probe,
+ .driver = {
+ .name = "mcp16502-regulator",
+ .of_match_table = of_match_ptr(mcp16502_ids),
+#ifdef CONFIG_PM
+ .pm = &mcp16502_pm_ops,
+#endif
+ },
+ .id_table = mcp16502_i2c_id,
+};
+
+module_i2c_driver(mcp16502_drv);
+
+MODULE_VERSION("1.0");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MCP16502 PMIC driver");
+MODULE_AUTHOR("Andrei Stefanescu andrei.stefanescu@microchip.com");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index c4223b3e0dff..ffa5fc3724e4 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -20,6 +20,7 @@
#include "internal.h"
static const char *const regulator_states[PM_SUSPEND_MAX + 1] = {
+ [PM_SUSPEND_STANDBY] = "regulator-state-standby",
[PM_SUSPEND_MEM] = "regulator-state-mem",
[PM_SUSPEND_MAX] = "regulator-state-disk",
};
@@ -170,6 +171,10 @@ static void of_get_regulation_constraints(struct device_node *np,
&pval))
constraints->max_spread = pval;
+ if (!of_property_read_u32(np, "regulator-max-step-microvolt",
+ &pval))
+ constraints->max_uV_step = pval;
+
constraints->over_current_protection = of_property_read_bool(np,
"regulator-over-current-protection");
@@ -181,9 +186,11 @@ static void of_get_regulation_constraints(struct device_node *np,
case PM_SUSPEND_MAX:
suspend_state = &constraints->state_disk;
break;
+ case PM_SUSPEND_STANDBY:
+ suspend_state = &constraints->state_standby;
+ break;
case PM_SUSPEND_ON:
case PM_SUSPEND_TO_IDLE:
- case PM_SUSPEND_STANDBY:
default:
continue;
}
@@ -364,24 +371,25 @@ int of_regulator_match(struct device *dev, struct device_node *node,
}
EXPORT_SYMBOL_GPL(of_regulator_match);
-struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
- const struct regulator_desc *desc,
- struct regulator_config *config,
- struct device_node **node)
+struct device_node *regulator_of_get_init_node(struct device *dev,
+ const struct regulator_desc *desc)
{
struct device_node *search, *child;
- struct regulator_init_data *init_data = NULL;
const char *name;
if (!dev->of_node || !desc->of_match)
return NULL;
- if (desc->regulators_node)
+ if (desc->regulators_node) {
search = of_get_child_by_name(dev->of_node,
desc->regulators_node);
- else
+ } else {
search = of_node_get(dev->of_node);
+ if (!strcmp(desc->of_match, search->name))
+ return search;
+ }
+
if (!search) {
dev_dbg(dev, "Failed to find regulator container node '%s'\n",
desc->regulators_node);
@@ -393,35 +401,48 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
if (!name)
name = child->name;
- if (strcmp(desc->of_match, name))
- continue;
+ if (!strcmp(desc->of_match, name))
+ return of_node_get(child);
+ }
- init_data = of_get_regulator_init_data(dev, child, desc);
- if (!init_data) {
- dev_err(dev,
- "failed to parse DT for regulator %pOFn\n",
- child);
- break;
- }
+ of_node_put(search);
- if (desc->of_parse_cb) {
- if (desc->of_parse_cb(child, desc, config)) {
- dev_err(dev,
- "driver callback failed to parse DT for regulator %pOFn\n",
- child);
- init_data = NULL;
- break;
- }
- }
+ return NULL;
+}
+
+struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
+ const struct regulator_desc *desc,
+ struct regulator_config *config,
+ struct device_node **node)
+{
+ struct device_node *child;
+ struct regulator_init_data *init_data = NULL;
- of_node_get(child);
- *node = child;
- break;
+ child = regulator_of_get_init_node(dev, desc);
+ if (!child)
+ return NULL;
+
+ init_data = of_get_regulator_init_data(dev, child, desc);
+ if (!init_data) {
+ dev_err(dev, "failed to parse DT for regulator %pOFn\n", child);
+ goto error;
}
- of_node_put(search);
+ if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
+ dev_err(dev,
+ "driver callback failed to parse DT for regulator %pOFn\n",
+ child);
+ goto error;
+ }
+
+ *node = child;
return init_data;
+
+error:
+ of_node_put(child);
+
+ return NULL;
}
static int of_node_match(struct device *dev, const void *data)
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index bb5ab7d78895..c2cc392a27d4 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -443,13 +443,16 @@ static int palmas_ldo_write(struct palmas *palmas, unsigned int reg,
static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
{
int id = rdev_get_id(dev);
+ int ret;
struct palmas_pmic *pmic = rdev_get_drvdata(dev);
struct palmas_pmic_driver_data *ddata = pmic->palmas->pmic_ddata;
struct palmas_regs_info *rinfo = &ddata->palmas_regs_info[id];
unsigned int reg;
bool rail_enable = true;
- palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, &reg);
+ ret = palmas_smps_read(pmic->palmas, rinfo->ctrl_addr, &reg);
+ if (ret)
+ return ret;
reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index dd41a9bb3f5c..df5df1c495ad 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -370,6 +370,7 @@ static struct pfuze_regulator pfuze100_regulators[] = {
PFUZE100_VGEN_REG(PFUZE100, VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE100, VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
PFUZE100_VGEN_REG(PFUZE100, VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+ PFUZE100_COIN_REG(PFUZE100, COIN, PFUZE100_COINVOL, 0x7, pfuze100_coin),
};
static struct pfuze_regulator pfuze200_regulators[] = {
@@ -436,6 +437,7 @@ static struct of_regulator_match pfuze100_matches[] = {
{ .name = "vgen4", },
{ .name = "vgen5", },
{ .name = "vgen6", },
+ { .name = "coin", },
};
/* PFUZE200 */
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 39ccf53fdeb3..b2c2d01d1637 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -410,7 +410,7 @@ static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg, struct device *dev,
vreg->dev = dev;
for (rpmh_data = pmic_rpmh_data; rpmh_data->name; rpmh_data++)
- if (!strcmp(rpmh_data->name, node->name))
+ if (of_node_name_eq(node, rpmh_data->name))
break;
if (!rpmh_data->name) {
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 5bb6f4ca48db..ee4a23ab0663 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -5,7 +5,7 @@
#include <linux/bug.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -14,7 +14,6 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
-#include <linux/of_gpio.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
@@ -44,7 +43,7 @@ struct s2mps11_info {
* Array (size: number of regulators) with GPIO-s for external
* sleep control.
*/
- int *ext_control_gpio;
+ struct gpio_desc **ext_control_gpiod;
};
static int get_ramp_delay(int ramp_delay)
@@ -511,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
case S2MPS14X:
if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
val = S2MPS14_ENABLE_SUSPEND;
- else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
+ else if (s2mps11->ext_control_gpiod[rdev_get_id(rdev)])
val = S2MPS14_ENABLE_EXT_CONTROL;
else
val = rdev->desc->enable_mask;
@@ -805,7 +804,7 @@ static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
struct of_regulator_match *rdata, struct s2mps11_info *s2mps11)
{
- int *gpio = s2mps11->ext_control_gpio;
+ struct gpio_desc **gpio = s2mps11->ext_control_gpiod;
unsigned int i;
unsigned int valid_regulators[3] = { S2MPS14_LDO10, S2MPS14_LDO11,
S2MPS14_LDO12 };
@@ -816,11 +815,20 @@ static void s2mps14_pmic_dt_parse_ext_control_gpio(struct platform_device *pdev,
if (!rdata[reg].init_data || !rdata[reg].of_node)
continue;
- gpio[reg] = of_get_named_gpio(rdata[reg].of_node,
- "samsung,ext-control-gpios", 0);
- if (gpio_is_valid(gpio[reg]))
- dev_dbg(&pdev->dev, "Using GPIO %d for ext-control over %d/%s\n",
- gpio[reg], reg, rdata[reg].name);
+ gpio[reg] = devm_gpiod_get_from_of_node(&pdev->dev,
+ rdata[reg].of_node,
+ "samsung,ext-control-gpios",
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "s2mps11-regulator");
+ if (IS_ERR(gpio[reg])) {
+ dev_err(&pdev->dev, "Failed to get control GPIO for %d/%s\n",
+ reg, rdata[reg].name);
+ continue;
+ }
+ if (gpio[reg])
+ dev_dbg(&pdev->dev, "Using GPIO for ext-control over %d/%s\n",
+ reg, rdata[reg].name);
}
}
@@ -1126,17 +1134,10 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
return -EINVAL;
}
- s2mps11->ext_control_gpio = devm_kmalloc_array(&pdev->dev,
- rdev_num, sizeof(*s2mps11->ext_control_gpio),
- GFP_KERNEL);
- if (!s2mps11->ext_control_gpio)
+ s2mps11->ext_control_gpiod = devm_kcalloc(&pdev->dev, rdev_num,
+ sizeof(*s2mps11->ext_control_gpiod), GFP_KERNEL);
+ if (!s2mps11->ext_control_gpiod)
return -ENOMEM;
- /*
- * 0 is a valid GPIO so initialize all GPIO-s to negative value
- * to indicate that external control won't be used for this regulator.
- */
- for (i = 0; i < rdev_num; i++)
- s2mps11->ext_control_gpio[i] = -EINVAL;
if (!iodev->dev->of_node) {
if (iodev->pdata) {
@@ -1166,8 +1167,6 @@ common_reg:
config.dev = &pdev->dev;
config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
- config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH;
- config.ena_gpio_initialized = true;
for (i = 0; i < rdev_num; i++) {
struct regulator_dev *regulator;
@@ -1178,8 +1177,13 @@ common_reg:
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
}
- config.ena_gpio = s2mps11->ext_control_gpio[i];
-
+ config.ena_gpiod = s2mps11->ext_control_gpiod[i];
+ /*
+ * Hand the GPIO descriptor management over to the regulator
+ * core, remove it from devres management.
+ */
+ if (config.ena_gpiod)
+ devm_gpiod_unhinge(&pdev->dev, config.ena_gpiod);
regulator = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
if (IS_ERR(regulator)) {
@@ -1189,7 +1193,7 @@ common_reg:
goto out;
}
- if (gpio_is_valid(s2mps11->ext_control_gpio[i])) {
+ if (s2mps11->ext_control_gpiod[i]) {
ret = s2mps14_pmic_enable_ext_control(s2mps11,
regulator);
if (ret < 0) {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 219b9afda0cb..b581f01f3395 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -561,7 +561,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
pdata->opmode = rmode;
for_each_child_of_node(regulators_np, reg_np) {
for (i = 0; i < ARRAY_SIZE(regulators); i++)
- if (!of_node_cmp(reg_np->name, regulators[i].name))
+ if (of_node_name_eq(reg_np, regulators[i].name))
break;
if (i == ARRAY_SIZE(regulators)) {
@@ -956,10 +956,17 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
config.ena_gpiod = NULL;
- if (pdata->regulators[i].ext_control_gpiod)
+ if (pdata->regulators[i].ext_control_gpiod) {
+ /* Assigns config.ena_gpiod */
s5m8767_regulator_config_ext_control(s5m8767,
&pdata->regulators[i], &config);
+ /*
+ * Hand the GPIO descriptor management over to the
+ * regulator core, remove it from devres management.
+ */
+ devm_gpiod_unhinge(s5m8767->dev, config.ena_gpiod);
+ }
rdev = devm_regulator_register(&pdev->dev, &regulators[id],
&config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/stpmic1_regulator.c b/drivers/regulator/stpmic1_regulator.c
index e15634edb8ce..16ba0297f709 100644
--- a/drivers/regulator/stpmic1_regulator.c
+++ b/drivers/regulator/stpmic1_regulator.c
@@ -489,14 +489,14 @@ static irqreturn_t stpmic1_curlim_irq_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
/* Send an overcurrent notification */
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_OVER_CURRENT,
NULL);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/tps65090-regulator.c b/drivers/regulator/tps65090-regulator.c
index db714d5edafc..0614551796a1 100644
--- a/drivers/regulator/tps65090-regulator.c
+++ b/drivers/regulator/tps65090-regulator.c
@@ -480,6 +480,12 @@ static int tps65090_regulator_probe(struct platform_device *pdev)
else
config.of_node = NULL;
+ /*
+ * Hand the GPIO descriptor management over to the regulator
+ * core, remove it from devres management.
+ */
+ if (config.ena_gpiod)
+ devm_gpiod_unhinge(&pdev->dev, config.ena_gpiod);
rdev = devm_regulator_register(&pdev->dev, ri->desc, &config);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 02ccdaa226a7..5ebb6ee73f07 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -1102,8 +1102,10 @@ static int tps65910_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmic);
/* Give control of all register to control port */
- tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+ err = tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
DEVCTRL_SR_CTL_I2C_SEL_MASK);
+ if (err < 0)
+ return err;
switch (tps65910_chip_id(tps65910)) {
case TPS65910:
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 8ad11b074b49..a1c7dfee5c37 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1153,7 +1153,7 @@ static irqreturn_t pmic_uv_handler(int irq, void *data)
{
struct regulator_dev *rdev = (struct regulator_dev *)data;
- mutex_lock(&rdev->mutex);
+ regulator_lock(rdev);
if (irq == WM8350_IRQ_CS1 || irq == WM8350_IRQ_CS2)
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_REGULATION_OUT,
@@ -1162,7 +1162,7 @@ static irqreturn_t pmic_uv_handler(int irq, void *data)
regulator_notifier_call_chain(rdev,
REGULATOR_EVENT_UNDER_VOLTAGE,
NULL);
- mutex_unlock(&rdev->mutex);
+ regulator_unlock(rdev);
return IRQ_HANDLED;
}
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 7a4ce6df4f22..38928cdcb6e6 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/mfd/wm8994/core.h>
@@ -129,6 +129,7 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
int id = pdev->id % ARRAY_SIZE(pdata->ldo);
struct regulator_config config = { };
struct wm8994_ldo *ldo;
+ struct gpio_desc *gpiod;
int ret;
dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
@@ -145,12 +146,18 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
config.driver_data = ldo;
config.regmap = wm8994->regmap;
config.init_data = &ldo->init_data;
- if (pdata) {
- config.ena_gpio = pdata->ldo[id].enable;
- } else if (wm8994->dev->of_node) {
- config.ena_gpio = wm8994->pdata.ldo[id].enable;
- config.ena_gpio_initialized = true;
- }
+
+ /*
+ * Look up LDO enable GPIO from the parent device node, we don't
+ * use devm because the regulator core will free the GPIO
+ */
+ gpiod = gpiod_get_optional(pdev->dev.parent,
+ id ? "wlf,ldo2ena" : "wlf,ldo1ena",
+ GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+ if (IS_ERR(gpiod))
+ return PTR_ERR(gpiod);
+ config.ena_gpiod = gpiod;
/* Use default constraints if none set up */
if (!pdata || !pdata->ldo[id].init_data || wm8994->dev->of_node) {
@@ -159,12 +166,17 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
ldo->init_data = wm8994_ldo_default[id];
ldo->init_data.consumer_supplies = &ldo->supply;
- if (!config.ena_gpio)
+ if (!gpiod)
ldo->init_data.constraints.valid_ops_mask = 0;
} else {
ldo->init_data = *pdata->ldo[id].init_data;
}
+ /*
+ * At this point the GPIO descriptor is handled over to the
+ * regulator core and we need not worry about it on the
+ * error path.
+ */
ldo->regulator = devm_regulator_register(&pdev->dev,
&wm8994_ldo_desc[id],
&config);
@@ -172,15 +184,12 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
id + 1, ret);
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, ldo);
return 0;
-
-err:
- return ret;
}
static struct platform_driver wm8994_ldo_driver = {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index de21f620b882..183fc42a510a 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -214,6 +214,16 @@ static u64 rproc_virtio_get_features(struct virtio_device *vdev)
return rsc->dfeatures;
}
+static void rproc_transport_features(struct virtio_device *vdev)
+{
+ /*
+ * Packed ring isn't enabled on remoteproc for now,
+ * because remoteproc uses vring_new_virtqueue() which
+ * creates virtio rings on preallocated memory.
+ */
+ __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
+}
+
static int rproc_virtio_finalize_features(struct virtio_device *vdev)
{
struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
@@ -224,6 +234,9 @@ static int rproc_virtio_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features */
vring_transport_features(vdev);
+ /* Give virtio_rproc a chance to accept features. */
+ rproc_transport_features(vdev);
+
/* Make sure we don't have any features > 32 bits! */
BUG_ON((u32)vdev->features != vdev->features);
diff --git a/drivers/rtc/rtc-hid-sensor-time.c b/drivers/rtc/rtc-hid-sensor-time.c
index 2751dba850c6..3e1abb455472 100644
--- a/drivers/rtc/rtc-hid-sensor-time.c
+++ b/drivers/rtc/rtc-hid-sensor-time.c
@@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* get a report with all values through requesting one value */
sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
- time_state->info[0].report_id, SENSOR_HUB_SYNC);
+ time_state->info[0].report_id, SENSOR_HUB_SYNC, false);
/* wait for all values (event) */
ret = wait_for_completion_killable_timeout(
&time_state->comp_last_time, HZ*6);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 2016e0ed5865..8e26001dc11c 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -412,6 +412,7 @@ static int dasd_ioctl_information(struct dasd_block *block,
struct ccw_dev_id dev_id;
struct dasd_device *base;
struct ccw_device *cdev;
+ struct list_head *l;
unsigned long flags;
int rc;
@@ -462,23 +463,10 @@ static int dasd_ioctl_information(struct dasd_block *block,
memcpy(dasd_info->type, base->discipline->name, 4);
- if (block->request_queue->request_fn) {
- struct list_head *l;
-#ifdef DASD_EXTENDED_PROFILING
- {
- struct list_head *l;
- spin_lock_irqsave(&block->lock, flags);
- list_for_each(l, &block->request_queue->queue_head)
- dasd_info->req_queue_len++;
- spin_unlock_irqrestore(&block->lock, flags);
- }
-#endif /* DASD_EXTENDED_PROFILING */
- spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
- list_for_each(l, &base->ccw_queue)
- dasd_info->chanq_len++;
- spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
- flags);
- }
+ spin_lock_irqsave(&block->queue_lock, flags);
+ list_for_each(l, &base->ccw_queue)
+ dasd_info->chanq_len++;
+ spin_unlock_irqrestore(&block->queue_lock, flags);
rc = 0;
if (copy_to_user(argp, dasd_info,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index fd77e46eb3b2..70a006ba4d05 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
* orb specified one of the unsupported formats, we defer
* checking for IDAWs in unsupported formats to here.
*/
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
+ kfree(p);
return -EOPNOTSUPP;
+ }
if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
break;
@@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
if (ret < 0)
- goto out_init;
+ goto out_unpin;
/* Translate this direct ccw to a idal ccw. */
idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index f47d16b5810b..a10cec0e86eb 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -22,7 +22,7 @@
#include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q;
-struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_io_region;
/*
* Helpers
@@ -134,14 +134,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_free;
- ret = vfio_ccw_mdev_reg(sch);
- if (ret)
- goto out_disable;
-
INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY;
+ ret = vfio_ccw_mdev_reg(sch);
+ if (ret)
+ goto out_disable;
+
return 0;
out_disable:
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 048665e4f13d..9f5a201c4c87 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -775,6 +775,8 @@ static int ap_device_probe(struct device *dev)
drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres)
return -ENODEV;
+ /* (re-)init queue's state machine */
+ ap_queue_reinit_state(to_ap_queue(dev));
}
/* Add queue/card to list of active queues/cards */
@@ -807,6 +809,8 @@ static int ap_device_remove(struct device *dev)
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = ap_dev->drv;
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
if (ap_drv->remove)
ap_drv->remove(ap_dev);
@@ -1444,10 +1448,6 @@ static void ap_scan_bus(struct work_struct *unused)
aq->ap_dev.device.parent = &ac->ap_dev.device;
dev_set_name(&aq->ap_dev.device,
"%02x.%04x", id, dom);
- /* Start with a device reset */
- spin_lock_bh(&aq->lock);
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
- spin_unlock_bh(&aq->lock);
/* Register device */
rc = device_register(&aq->ap_dev.device);
if (rc) {
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 3eed1b36c876..bfc66e4a9de1 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -254,6 +254,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
void ap_queue_remove(struct ap_queue *aq);
void ap_queue_suspend(struct ap_device *ap_dev);
void ap_queue_resume(struct ap_device *ap_dev);
+void ap_queue_reinit_state(struct ap_queue *aq);
struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
int comp_device_type, unsigned int functions);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 66f7334bcb03..0aa4b3ccc948 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
{
ap_flush_queue(aq);
del_timer_sync(&aq->timeout);
+
+ /* reset with zero, also clears irq registration */
+ spin_lock_bh(&aq->lock);
+ ap_zapq(aq->qid);
+ aq->state = AP_STATE_BORKED;
+ spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_remove);
+
+void ap_queue_reinit_state(struct ap_queue *aq)
+{
+ spin_lock_bh(&aq->lock);
+ aq->state = AP_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 146f54f5cbb8..c50f3e86cc74 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -196,7 +196,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
- ap_queue_remove(aq);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 546f67676734..35c7c6672713 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -251,7 +251,6 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
- ap_queue_remove(aq);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f9d4c6c7521d..582ffa7e0f18 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -275,7 +275,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
struct zcrypt_queue *zq = aq->private;
- ap_queue_remove(aq);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 04e294d1d16d..0ee026947f20 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -314,7 +314,7 @@ struct qeth_hdr_layer3 {
__u16 frame_offset;
union {
/* TX: */
- u8 ipv6_addr[16];
+ struct in6_addr ipv6_addr;
struct ipv4 {
u8 res[12];
u32 addr;
@@ -665,7 +665,6 @@ struct qeth_card_blkt {
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
-#define QETH_LAYER2_MAC_READ 0x01
#define QETH_LAYER2_MAC_REGISTERED 0x02
struct qeth_card_info {
unsigned short unit_addr2;
@@ -775,7 +774,6 @@ struct qeth_switch_info {
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
struct qeth_card {
- struct list_head list;
enum qeth_card_states state;
spinlock_t lock;
struct ccwgroup_device *gdev;
@@ -827,11 +825,6 @@ struct qeth_card {
struct work_struct close_dev_work;
};
-struct qeth_card_list_struct {
- struct list_head list;
- rwlock_t rwlock;
-};
-
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
@@ -978,11 +971,11 @@ int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
void qeth_core_free_discipline(struct qeth_card *);
/* exports for qeth discipline device drivers */
-extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
+struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_recovery_task(struct qeth_card *);
void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
@@ -1025,9 +1018,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
void *reply_param);
unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
- struct qeth_hdr *hdr, unsigned int offset,
- unsigned int hd_len);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
unsigned int offset, unsigned int hd_len,
@@ -1058,11 +1048,6 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
int qeth_vm_request_mac(struct qeth_card *card);
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr **hdr, unsigned int hdr_len,
- unsigned int proto_len, unsigned int *elements);
-void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
- struct sk_buff *skb, unsigned int proto_len);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
void (*fill_header)(struct qeth_card *card, struct qeth_hdr *hdr,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 4bce5ae65a55..e63e03143ca7 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -54,8 +54,6 @@ struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
};
EXPORT_SYMBOL_GPL(qeth_dbf);
-struct qeth_card_list_struct qeth_core_card_list;
-EXPORT_SYMBOL_GPL(qeth_core_card_list);
struct kmem_cache *qeth_core_header_cache;
EXPORT_SYMBOL_GPL(qeth_core_header_cache);
static struct kmem_cache *qeth_qdio_outbuf_cache;
@@ -2837,6 +2835,17 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.prot_version = prot;
}
+void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
+{
+ u8 prot_type = qeth_mpc_select_prot_type(card);
+
+ memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
+ memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
+ memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
+ &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
+}
+EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot)
{
@@ -2844,6 +2853,7 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
iob = qeth_get_buffer(&card->write);
if (iob) {
+ qeth_prepare_ipa_cmd(card, iob);
qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot);
} else {
dev_warn(&card->gdev->dev,
@@ -2856,17 +2866,6 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer);
-void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob)
-{
- u8 prot_type = qeth_mpc_select_prot_type(card);
-
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
- memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
-}
-EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
-
/**
* qeth_send_ipa_cmd() - send an IPA command
*
@@ -2881,7 +2880,6 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int rc;
QETH_CARD_TEXT(card, 4, "sendipa");
- qeth_prepare_ipa_cmd(card, iob);
rc = qeth_send_control_data(card, IPA_CMD_LENGTH,
iob, reply_cb, reply_param);
if (rc == -ETIME) {
@@ -3777,9 +3775,9 @@ EXPORT_SYMBOL_GPL(qeth_count_elements);
* The number of needed buffer elements is returned in @elements.
* Error to create the hdr is indicated by returning with < 0.
*/
-int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
- struct qeth_hdr **hdr, unsigned int hdr_len,
- unsigned int proto_len, unsigned int *elements)
+static int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb,
+ struct qeth_hdr **hdr, unsigned int hdr_len,
+ unsigned int proto_len, unsigned int *elements)
{
const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
const unsigned int contiguous = proto_len ? proto_len : 1;
@@ -3849,7 +3847,6 @@ check_layout:
skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_add_hw_header);
static void __qeth_fill_buffer(struct sk_buff *skb,
struct qeth_qdio_out_buffer *buf,
@@ -3972,9 +3969,9 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
return flush_cnt;
}
-int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
- struct qeth_hdr *hdr, unsigned int offset,
- unsigned int hd_len)
+static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
+ struct sk_buff *skb, struct qeth_hdr *hdr,
+ unsigned int offset, unsigned int hd_len)
{
int index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
@@ -3990,7 +3987,6 @@ int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb,
qeth_flush_buffers(queue, index, 1);
return 0;
}
-EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast);
int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
struct sk_buff *skb, struct qeth_hdr *hdr,
@@ -4082,8 +4078,9 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
-void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
- struct sk_buff *skb, unsigned int proto_len)
+static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
+ unsigned int payload_len, struct sk_buff *skb,
+ unsigned int proto_len)
{
struct qeth_hdr_ext_tso *ext = &hdr->ext;
@@ -4096,7 +4093,6 @@ void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, unsigned int payload_len,
ext->mss = skb_shinfo(skb)->gso_size;
ext->dg_hdr_len = proto_len;
}
-EXPORT_SYMBOL_GPL(qeth_fill_tso_ext);
int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type,
@@ -4119,7 +4115,7 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
} else {
hw_hdr_len = sizeof(struct qeth_hdr);
- proto_len = IS_IQD(card) ? ETH_HLEN : 0;
+ proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
}
rc = skb_cow_head(skb, hw_hdr_len);
@@ -4235,16 +4231,18 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_ipacmd_setadpparms *adp_cmd;
QETH_CARD_TEXT(card, 4, "chgmaccb");
if (qeth_setadpparms_inspect_rc(cmd))
return 0;
- if (IS_LAYER3(card) || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) {
- ether_addr_copy(card->dev->dev_addr,
- cmd->data.setadapterparms.data.change_addr.addr);
- card->info.mac_bits |= QETH_LAYER2_MAC_READ;
- }
+ adp_cmd = &cmd->data.setadapterparms;
+ if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
+ !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
+ return 0;
+
+ ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
return 0;
}
@@ -4499,9 +4497,6 @@ static int qeth_send_ipa_snmp_cmd(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "sendsnmp");
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
/* adjust PDU length fields in IPA_PDU_HEADER */
s1 = (u32) IPA_PDU_HEADER_SIZE + len;
s2 = (u32) len;
@@ -4518,8 +4513,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
struct qeth_arp_query_info *qinfo;
- struct qeth_snmp_cmd *snmp;
unsigned char *data;
+ void *snmp_data;
__u16 data_len;
QETH_CARD_TEXT(card, 3, "snpcmdcb");
@@ -4527,7 +4522,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) sdata;
data = (unsigned char *)((char *)cmd - reply->offset);
qinfo = (struct qeth_arp_query_info *) reply->param;
- snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
@@ -4540,10 +4534,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
- if (cmd->data.setadapterparms.hdr.seq_no == 1)
- data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
- else
- data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
+ if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+ snmp_data = &cmd->data.setadapterparms.data.snmp;
+ data_len -= offsetof(struct qeth_ipa_cmd,
+ data.setadapterparms.data.snmp);
+ } else {
+ snmp_data = &cmd->data.setadapterparms.data.snmp.request;
+ data_len -= offsetof(struct qeth_ipa_cmd,
+ data.setadapterparms.data.snmp.request);
+ }
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
@@ -4556,16 +4555,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
- if (cmd->data.setadapterparms.hdr.seq_no == 1) {
- memcpy(qinfo->udata + qinfo->udata_offset,
- (char *)snmp,
- data_len + offsetof(struct qeth_snmp_cmd, data));
- qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
- } else {
- memcpy(qinfo->udata + qinfo->udata_offset,
- (char *)&snmp->request, data_len);
- }
+ memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
qinfo->udata_offset += data_len;
+
/* check if all replies received ... */
QETH_CARD_TEXT_(card, 4, "srtot%i",
cmd->data.setadapterparms.hdr.used_total);
@@ -5480,34 +5472,11 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
-static int qeth_send_setassparms(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, u16 len,
- long data, int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *,
- unsigned long),
- void *reply_param)
-{
- int rc;
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 4, "sendassp");
-
- cmd = __ipa_cmd(iob);
- if (len <= sizeof(__u32))
- cmd->data.setassparms.data.flags_32bit = (__u32) data;
- else /* (len > sizeof(__u32)) */
- memcpy(&cmd->data.setassparms.data, (void *) data, len);
-
- rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
- return rc;
-}
-
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
u16 cmd_code, long data,
enum qeth_prot_versions prot)
{
- int rc;
int length = 0;
struct qeth_cmd_buffer *iob;
@@ -5517,9 +5486,9 @@ int qeth_send_simple_setassparms_prot(struct qeth_card *card,
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob, length, data,
- qeth_setassparms_cb, NULL);
- return rc;
+
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+ return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
}
EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
@@ -5806,9 +5775,6 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
}
- write_lock_irq(&qeth_core_card_list.rwlock);
- list_add_tail(&card->list, &qeth_core_card_list.list);
- write_unlock_irq(&qeth_core_card_list.rwlock);
return 0;
err_disc:
@@ -5833,9 +5799,6 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
qeth_core_free_discipline(card);
}
- write_lock_irq(&qeth_core_card_list.rwlock);
- list_del(&card->list);
- write_unlock_irq(&qeth_core_card_list.rwlock);
free_netdev(card->dev);
qeth_core_free_card(card);
put_device(&gdev->dev);
@@ -5950,6 +5913,21 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
.restore = qeth_core_restore,
};
+struct qeth_card *qeth_get_card_by_busid(char *bus_id)
+{
+ struct ccwgroup_device *gdev;
+ struct qeth_card *card;
+
+ gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
+ if (!gdev)
+ return NULL;
+
+ card = dev_get_drvdata(&gdev->dev);
+ put_device(&gdev->dev);
+ return card;
+}
+EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
+
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
@@ -6381,16 +6359,16 @@ static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
enum qeth_prot_versions prot)
{
struct qeth_cmd_buffer *iob;
- int rc = -ENOMEM;
QETH_CARD_TEXT(card, 4, "chkdocmd");
iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
sizeof(__u32), prot);
- if (iob)
- rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
- qeth_ipa_checksum_run_cmd_cb,
- chksum_cb);
- return rc;
+ if (!iob)
+ return -ENOMEM;
+
+ __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (__u32) data;
+ return qeth_send_ipa_cmd(card, iob, qeth_ipa_checksum_run_cmd_cb,
+ chksum_cb);
}
static int qeth_send_checksum_on(struct qeth_card *card, int cstype,
@@ -6488,8 +6466,7 @@ static int qeth_set_tso_on(struct qeth_card *card,
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob, 0, 0 /* unused */,
- qeth_start_tso_cb, &tso_data);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
if (rc)
return rc;
@@ -6506,10 +6483,9 @@ static int qeth_set_tso_on(struct qeth_card *card,
}
/* enable TSO capability */
- caps.supported = 0;
- caps.enabled = QETH_IPA_LARGE_SEND_TCP;
- rc = qeth_send_setassparms(card, iob, sizeof(caps), (long) &caps,
- qeth_setassparms_get_caps_cb, &caps);
+ __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
+ QETH_IPA_LARGE_SEND_TCP;
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
if (rc) {
qeth_set_tso_off(card, prot);
return rc;
@@ -6688,8 +6664,6 @@ static int __init qeth_core_init(void)
int rc;
pr_info("loading core functions\n");
- INIT_LIST_HEAD(&qeth_core_card_list.list);
- rwlock_init(&qeth_core_card_list.rwlock);
qeth_wq = create_singlethread_workqueue("qeth_wq");
if (!qeth_wq) {
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index e891c0b52f4c..16fc51ad0514 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -144,7 +144,6 @@ unsigned char IPA_PDU_HEADER[] = {
sizeof(struct qeth_ipa_cmd) % 256,
0x00, 0x00, 0x00, 0x40,
};
-EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 3e54be201b27..1ab321926f64 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -80,7 +80,9 @@ enum qeth_card_types {
};
#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD)
+#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
+#define IS_VM_NIC(card) ((card)->info.guestlan)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
@@ -529,17 +531,20 @@ struct qeth_query_switch_attributes {
__u8 reserved3[8];
};
+#define QETH_SETADP_FLAGS_VIRTUAL_MAC 0x80 /* for CHANGE_ADDR_READ_MAC */
+
struct qeth_ipacmd_setadpparms_hdr {
- __u32 supp_hw_cmds;
- __u32 reserved1;
- __u16 cmdlength;
- __u16 reserved2;
- __u32 command_code;
- __u16 return_code;
- __u8 used_total;
- __u8 seq_no;
- __u32 reserved3;
-} __attribute__ ((packed));
+ u32 supp_hw_cmds;
+ u32 reserved1;
+ u16 cmdlength;
+ u16 reserved2;
+ u32 command_code;
+ u16 return_code;
+ u8 used_total;
+ u8 seq_no;
+ u8 flags;
+ u8 reserved3[3];
+};
struct qeth_ipacmd_setadpparms {
struct qeth_ipacmd_setadpparms_hdr hdr;
@@ -828,10 +833,9 @@ enum qeth_ipa_arp_return_codes {
extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
-#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
- sizeof(struct qeth_ipacmd_setassparms_hdr))
-#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \
- QETH_SETASS_BASE_LEN)
+#define QETH_SETASS_BASE_LEN (IPA_PDU_HEADER_SIZE + \
+ sizeof(struct qeth_ipacmd_hdr) + \
+ sizeof(struct qeth_ipacmd_setassparms_hdr))
#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setadpparms_hdr))
#define QETH_SNMP_SETADP_CMDLENGTH 16
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 2914a1a69f83..f108d4b44605 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -36,28 +36,6 @@ static void qeth_l2_vnicc_init(struct qeth_card *card);
static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
u32 *timeout);
-static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
-{
- struct qeth_card *card;
- struct net_device *ndev;
- __u16 temp_dev_no;
- unsigned long flags;
- struct ccw_dev_id read_devid;
-
- ndev = NULL;
- memcpy(&temp_dev_no, read_dev_no, 2);
- read_lock_irqsave(&qeth_core_card_list.rwlock, flags);
- list_for_each_entry(card, &qeth_core_card_list.list, list) {
- ccw_device_get_id(CARD_RDEV(card), &read_devid);
- if (read_devid.devno == temp_dev_no) {
- ndev = card->dev;
- break;
- }
- }
- read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
- return ndev;
-}
-
static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode)
{
int rc;
@@ -461,12 +439,9 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
/* fall back to alternative mechanism: */
}
- if (card->info.type == QETH_CARD_TYPE_IQD ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX ||
- card->info.guestlan) {
+ if (!IS_OSN(card)) {
rc = qeth_setadpparms_change_macaddr(card);
- if (!rc)
+ if (!rc && is_valid_ether_addr(card->dev->dev_addr))
goto out;
QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
CARD_DEVID(card), rc);
@@ -917,7 +892,8 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1));
}
- qeth_l2_request_initial_mac(card);
+ if (!is_valid_ether_addr(card->dev->dev_addr))
+ qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
if (!rc && carrier_ok)
@@ -1031,7 +1007,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
qeth_l2_set_rx_mode(card->dev);
} else {
rtnl_lock();
- dev_open(card->dev);
+ dev_open(card->dev, NULL);
rtnl_unlock();
}
}
@@ -1288,13 +1264,16 @@ int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev,
int (*data_cb)(struct sk_buff *))
{
struct qeth_card *card;
+ char bus_id[16];
+ u16 devno;
- *dev = qeth_l2_netdev_by_devno(read_dev_no);
- if (*dev == NULL)
- return -ENODEV;
- card = (*dev)->ml_priv;
- if (!card)
+ memcpy(&devno, read_dev_no, 2);
+ sprintf(bus_id, "0.0.%04x", devno);
+ card = qeth_get_card_by_busid(bus_id);
+ if (!card || !IS_OSN(card))
return -ENODEV;
+ *dev = card->dev;
+
QETH_CARD_TEXT(card, 2, "osnreg");
if ((assist_cb == NULL) || (data_cb == NULL))
return -EINVAL;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index f08b745c2007..42a7cdc59b76 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -949,9 +949,6 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
if (cmd->hdr.return_code == 0)
ether_addr_copy(card->dev->dev_addr,
cmd->data.create_destroy_addr.unique_id);
- else
- eth_random_addr(card->dev->dev_addr);
-
return 0;
}
@@ -1685,21 +1682,6 @@ out_error:
return 0;
}
-static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, int len,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
- void *reply_param)
-{
- QETH_CARD_TEXT(card, 4, "sendarp");
-
- memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
- memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
- &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
- return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob,
- reply_cb, reply_param);
-}
-
static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
enum qeth_prot_versions prot,
struct qeth_arp_query_info *qinfo)
@@ -1719,11 +1701,9 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
return -ENOMEM;
cmd = __ipa_cmd(iob);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
- cmd->data.setassparms.data.query_arp.reply_bits = 0;
- cmd->data.setassparms.data.query_arp.no_entries = 0;
- rc = qeth_l3_send_ipa_arp_cmd(card, iob,
- QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
- qeth_l3_arp_query_cb, (void *)qinfo);
+ rc = qeth_send_control_data(card,
+ QETH_SETASS_BASE_LEN + QETH_ARP_CMD_LEN,
+ iob, qeth_l3_arp_query_cb, qinfo);
if (rc)
QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
CARD_DEVID(card), rc);
@@ -1929,22 +1909,6 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
}
}
-static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb,
- unsigned int data_len)
-{
- char daddr[16];
-
- hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
- hdr->hdr.l3.length = data_len;
- hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
-
- memset(daddr, 0, sizeof(daddr));
- daddr[0] = 0xfe;
- daddr[1] = 0x80;
- memcpy(&daddr[8], iucv_trans_hdr(skb)->destUserID, 8);
- memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16);
-}
-
static u8 qeth_l3_cast_type_to_flag(int cast_type)
{
if (cast_type == RTN_MULTICAST)
@@ -1960,6 +1924,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
struct sk_buff *skb, int ipv, int cast_type,
unsigned int data_len)
{
+ struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
hdr->hdr.l3.length = data_len;
@@ -1968,6 +1933,15 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO;
} else {
hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+
+ if (skb->protocol == htons(ETH_P_AF_IUCV)) {
+ l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
+ l3_hdr->next_hop.ipv6_addr.s6_addr16[0] = htons(0xfe80);
+ memcpy(&l3_hdr->next_hop.ipv6_addr.s6_addr32[2],
+ iucv_trans_hdr(skb)->destUserID, 8);
+ return;
+ }
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv);
/* some HW requires combined L3+L4 csum offload: */
@@ -2012,13 +1986,11 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
} else {
/* IPv6 */
const struct rt6_info *rt = skb_rt6_info(skb);
- const struct in6_addr *next_hop;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
- next_hop = &rt->rt6i_gateway;
+ l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
else
- next_hop = &ipv6_hdr(skb)->daddr;
- memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16);
+ l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
if (card->info.type != QETH_CARD_TYPE_IQD)
@@ -2044,84 +2016,25 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
- unsigned int hw_hdr_len, proto_len, frame_len, elements;
unsigned char eth_hdr[ETH_HLEN];
- bool is_tso = skb_is_gso(skb);
- unsigned int data_offset = 0;
- struct qeth_hdr *hdr = NULL;
- unsigned int hd_len = 0;
- int push_len, rc;
- bool is_sg;
-
- if (is_tso) {
- hw_hdr_len = sizeof(struct qeth_hdr_tso);
- proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb) -
- ETH_HLEN;
- } else {
- hw_hdr_len = sizeof(struct qeth_hdr);
- proto_len = 0;
- }
+ unsigned int hw_hdr_len;
+ int rc;
/* re-use the L2 header area for the HW header: */
+ hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) :
+ sizeof(struct qeth_hdr);
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
- frame_len = skb->len;
qeth_l3_fixup_headers(skb);
- push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len,
- &elements);
- if (push_len < 0)
- return push_len;
- if (is_tso || !push_len) {
- /* HW header needs its own buffer element. */
- hd_len = hw_hdr_len + proto_len;
- data_offset = push_len + proto_len;
- }
- memset(hdr, 0, hw_hdr_len);
-
- if (skb->protocol == htons(ETH_P_AF_IUCV)) {
- qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len);
- } else {
- qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len);
- if (is_tso)
- qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
- frame_len - proto_len, skb,
- proto_len);
- }
-
- is_sg = skb_is_nonlinear(skb);
- if (IS_IQD(card)) {
- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
- hd_len);
- } else {
- /* TODO: drop skb_orphan() once TX completion is fast enough */
- skb_orphan(skb);
- rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
- hd_len, elements);
- }
-
- if (!rc) {
- if (card->options.performance_stats) {
- card->perf_stats.buf_elements_sent += elements;
- if (is_sg)
- card->perf_stats.sg_skbs_sent++;
- if (is_tso) {
- card->perf_stats.large_send_bytes += frame_len;
- card->perf_stats.large_send_cnt++;
- }
- }
- } else {
- if (!push_len)
- kmem_cache_free(qeth_core_header_cache, hdr);
- if (rc == -EBUSY) {
- /* roll back to ETH header */
- skb_pull(skb, push_len);
- skb_push(skb, ETH_HLEN);
- skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
- }
+ rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
+ if (rc == -EBUSY) {
+ /* roll back to ETH header */
+ skb_push(skb, ETH_HLEN);
+ skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
}
return rc;
}
@@ -2366,9 +2279,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
rc = qeth_l3_iqd_read_initial_mac(card);
if (rc)
goto out;
-
- if (card->options.hsuid[0])
- memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
return -ENODEV;
@@ -2507,7 +2417,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
__qeth_l3_open(card->dev);
qeth_l3_set_rx_mode(card->dev);
} else {
- dev_open(card->dev);
+ dev_open(card->dev, NULL);
}
rtnl_unlock();
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 97b6f197f007..fc9dbad476c0 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -56,6 +56,7 @@ struct virtio_ccw_device {
unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
+ struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
unsigned long indicators;
unsigned long indicators2;
@@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
unsigned long flags;
int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+ mutex_lock(&vcdev->io_lock);
do {
spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
@@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
cpu_relax();
} while (ret == -EBUSY);
wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
- return ret ? ret : vcdev->err;
+ ret = ret ? ret : vcdev->err;
+ mutex_unlock(&vcdev->io_lock);
+ return ret;
}
static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
@@ -765,6 +769,17 @@ out_free:
return rc;
}
+static void ccw_transport_features(struct virtio_device *vdev)
+{
+ /*
+ * Packed ring isn't enabled on virtio_ccw for now,
+ * because virtio_ccw uses some legacy accessors,
+ * e.g. virtqueue_get_avail() and virtqueue_get_used()
+ * which aren't available in packed ring currently.
+ */
+ __virtio_clear_bit(vdev, VIRTIO_F_RING_PACKED);
+}
+
static int virtio_ccw_finalize_features(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
@@ -791,6 +806,9 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
+ /* Give virtio_ccw a chance to accept features. */
+ ccw_transport_features(vdev);
+
features->index = 0;
features->features = cpu_to_le32((u32)vdev->features);
/* Write the first half of the feature bits to the host. */
@@ -828,6 +846,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
int ret;
struct ccw1 *ccw;
void *config_area;
+ unsigned long flags;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
@@ -846,11 +865,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
if (ret)
goto out_free;
+ spin_lock_irqsave(&vcdev->lock, flags);
memcpy(vcdev->config, config_area, offset + len);
- if (buf)
- memcpy(buf, &vcdev->config[offset], len);
if (vcdev->config_ready < offset + len)
vcdev->config_ready = offset + len;
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+ if (buf)
+ memcpy(buf, config_area + offset, len);
out_free:
kfree(config_area);
@@ -864,6 +885,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
void *config_area;
+ unsigned long flags;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
@@ -876,9 +898,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
/* Make sure we don't overwrite fields. */
if (vcdev->config_ready < offset)
virtio_ccw_get_config(vdev, 0, NULL, offset);
+ spin_lock_irqsave(&vcdev->lock, flags);
memcpy(&vcdev->config[offset], buf, len);
/* Write the config area to the host. */
memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+ spin_unlock_irqrestore(&vcdev->lock, flags);
ccw->cmd_code = CCW_CMD_WRITE_CONF;
ccw->flags = 0;
ccw->count = offset + len;
@@ -1247,6 +1271,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
+ mutex_init(&vcdev->io_lock);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, vcdev);
diff --git a/drivers/sbus/char/bbc_envctrl.c b/drivers/sbus/char/bbc_envctrl.c
index fb5bcf6dddc1..4f2dd21e44a0 100644
--- a/drivers/sbus/char/bbc_envctrl.c
+++ b/drivers/sbus/char/bbc_envctrl.c
@@ -571,9 +571,9 @@ int bbc_envctrl_init(struct bbc_i2c_bus *bp)
int devidx = 0;
while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) {
- if (!strcmp(op->dev.of_node->name, "temperature"))
+ if (of_node_name_eq(op->dev.of_node, "temperature"))
attach_one_temp(bp, op, temp_index++);
- if (!strcmp(op->dev.of_node->name, "fan-control"))
+ if (of_node_name_eq(op->dev.of_node, "fan-control"))
attach_one_fan(bp, op, fan_index++);
}
if (temp_index != 0 && fan_index != 0) {
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 5c8ed7350a04..a36e4cf1841d 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op)
dev_set_drvdata(&op->dev, p);
d7s_device = p;
err = 0;
+ of_node_put(opts);
out:
return err;
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 56e962a01493..1a6e7224017c 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -906,12 +906,14 @@ static void envctrl_init_i2c_child(struct device_node *dp,
int len;
root_node = of_find_node_by_path("/");
- if (!strcmp(root_node->name, "SUNW,UltraSPARC-IIi-cEngine")) {
+ if (of_node_name_eq(root_node, "SUNW,UltraSPARC-IIi-cEngine")) {
for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
pchild->mon_type[len] = ENVCTRL_NOMON;
}
+ of_node_put(root_node);
return;
}
+ of_node_put(root_node);
}
/* Get the monitor channels. */
@@ -1037,10 +1039,10 @@ static int envctrl_probe(struct platform_device *op)
index = 0;
dp = op->dev.of_node->child;
while (dp) {
- if (!strcmp(dp->name, "gpio")) {
+ if (of_node_name_eq(dp, "gpio")) {
i2c_childlist[index].i2ctype = I2C_GPIO;
envctrl_init_i2c_child(dp, &(i2c_childlist[index++]));
- } else if (!strcmp(dp->name, "adc")) {
+ } else if (of_node_name_eq(dp, "adc")) {
i2c_childlist[index].i2ctype = I2C_ADC;
envctrl_init_i2c_child(dp, &(i2c_childlist[index++]));
}
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c
index a610b8d3d11f..515dc1ff1e33 100644
--- a/drivers/sbus/char/flash.c
+++ b/drivers/sbus/char/flash.c
@@ -165,9 +165,9 @@ static int flash_probe(struct platform_device *op)
parent = dp->parent;
- if (strcmp(parent->name, "sbus") &&
- strcmp(parent->name, "sbi") &&
- strcmp(parent->name, "ebus"))
+ if (!of_node_name_eq(parent, "sbus") &&
+ !of_node_name_eq(parent, "sbi") &&
+ !of_node_name_eq(parent, "ebus"))
return -ENODEV;
flash.read_base = op->resource[0].start;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 640cd1b31a18..f38882f6f37d 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -50,18 +50,6 @@ config SCSI_NETLINK
default n
depends on NET
-config SCSI_MQ_DEFAULT
- bool "SCSI: use blk-mq I/O path by default"
- default y
- depends on SCSI
- ---help---
- This option enables the blk-mq based I/O path for SCSI devices by
- default. With this option the scsi_mod.use_blk_mq module/boot
- option defaults to Y, without it to N, but it can still be
- overridden either way.
-
- If unsure say Y.
-
config SCSI_PROC_FS
bool "legacy /proc/scsi/ support"
depends on SCSI && PROC_FS
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index cd160f2ec75d..bcd30e2374f1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2364,7 +2364,7 @@ static int _bnx2fc_create(struct net_device *netdev,
if (!interface) {
printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
rc = -ENOMEM;
- goto ifput_err;
+ goto netdev_err;
}
if (is_vlan_dev(netdev)) {
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e9e669a6c2bc..6bad2689edd4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1906,7 +1906,6 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
struct iscsi_task *task;
struct scsi_cmnd *sc;
int rc = 0;
- int cpu;
spin_lock(&session->back_lock);
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
@@ -1917,14 +1916,9 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
}
sc = task->sc;
- if (!blk_rq_cpu_valid(sc->request))
- cpu = smp_processor_id();
- else
- cpu = sc->request->cpu;
-
spin_unlock(&session->back_lock);
- p = &per_cpu(bnx2i_percpu, cpu);
+ p = &per_cpu(bnx2i_percpu, blk_mq_rq_cpu(sc->request));
spin_lock(&p->p_work_lock);
if (unlikely(!p->iothread)) {
rc = -EINVAL;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 8c15b7acb4b7..a95debbea0e4 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1780,16 +1780,10 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
int nsge = 0;
int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
int retval;
- int cpu;
struct csio_scsi_qset *sqset;
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
- if (!blk_rq_cpu_valid(cmnd->request))
- cpu = smp_processor_id();
- else
- cpu = cmnd->request->cpu;
-
- sqset = &hw->sqset[ln->portid][cpu];
+ sqset = &hw->sqset[ln->portid][blk_mq_rq_cpu(cmnd->request)];
nr = fc_remote_port_chkready(rport);
if (nr) {
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 064ef5735182..907dd8792a0a 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1767,8 +1767,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->mtu = dst_mtu(csk->dst);
cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
csk->tx_chan = cxgb4_port_chan(ndev);
- csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
- cxgb4_port_viid(ndev));
+ csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
step = lldi->ntxq / lldi->nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
step = lldi->nrxq / lldi->nchan;
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6637116529aa..abdc9eac4173 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3088,12 +3088,6 @@ static ssize_t hwq_mode_store(struct device *dev,
return -EINVAL;
}
- if ((mode == HWQ_MODE_TAG) && !shost_use_blk_mq(shost)) {
- dev_info(cfgdev, "SCSI-MQ is not enabled, use a different "
- "HWQ steering mode.\n");
- return -EINVAL;
- }
-
afu->hwq_mode = mode;
return count;
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 12dc7100bb4c..d7ac498ba35a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1071,28 +1071,29 @@ static void alua_check(struct scsi_device *sdev, bool force)
* Fail I/O to all paths not in state
* active/optimized or active/non-optimized.
*/
-static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct alua_dh_data *h = sdev->handler_data;
struct alua_port_group *pg;
unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
- int ret = BLKPREP_OK;
rcu_read_lock();
pg = rcu_dereference(h->pg);
if (pg)
state = pg->state;
rcu_read_unlock();
- if (state == SCSI_ACCESS_STATE_TRANSITIONING)
- ret = BLKPREP_DEFER;
- else if (state != SCSI_ACCESS_STATE_OPTIMAL &&
- state != SCSI_ACCESS_STATE_ACTIVE &&
- state != SCSI_ACCESS_STATE_LBA) {
- ret = BLKPREP_KILL;
+
+ switch (state) {
+ case SCSI_ACCESS_STATE_OPTIMAL:
+ case SCSI_ACCESS_STATE_ACTIVE:
+ case SCSI_ACCESS_STATE_LBA:
+ return BLK_STS_OK;
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ return BLK_STS_RESOURCE;
+ default:
req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
}
- return ret;
-
}
static void alua_rescan(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 95c47909a58f..bea8e13febb6 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -341,17 +341,17 @@ static int clariion_check_sense(struct scsi_device *sdev,
return SCSI_RETURN_NOT_HANDLED;
}
-static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t clariion_prep_fn(struct scsi_device *sdev,
+ struct request *req)
{
struct clariion_dh_data *h = sdev->handler_data;
- int ret = BLKPREP_OK;
if (h->lun_state != CLARIION_LUN_OWNED) {
- ret = BLKPREP_KILL;
req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
}
- return ret;
+ return BLK_STS_OK;
}
static int clariion_std_inquiry(struct scsi_device *sdev,
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index e65a0ebb4b54..80129b033855 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -172,17 +172,16 @@ retry:
return rc;
}
-static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t hp_sw_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct hp_sw_dh_data *h = sdev->handler_data;
- int ret = BLKPREP_OK;
if (h->path_state != HP_SW_PATH_ACTIVE) {
- ret = BLKPREP_KILL;
req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
}
- return ret;
+ return BLK_STS_OK;
}
/*
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index d27fabae8ddd..65f1fe343c64 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -642,17 +642,16 @@ done:
return 0;
}
-static int rdac_prep_fn(struct scsi_device *sdev, struct request *req)
+static blk_status_t rdac_prep_fn(struct scsi_device *sdev, struct request *req)
{
struct rdac_dh_data *h = sdev->handler_data;
- int ret = BLKPREP_OK;
if (h->state != RDAC_STATE_ACTIVE) {
- ret = BLKPREP_KILL;
req->rq_flags |= RQF_QUIET;
+ return BLK_STS_IOERR;
}
- return ret;
+ return BLK_STS_OK;
}
static int rdac_check_sense(struct scsi_device *sdev,
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 96acfcecd540..cafbcfb85bfa 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2274,7 +2274,7 @@ fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
return SCSI_NO_TAG;
sc->tag = sc->request->tag = dummy->tag;
- sc->request->special = sc;
+ sc->host_scribble = (unsigned char *)dummy;
return dummy->tag;
}
@@ -2286,7 +2286,7 @@ fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
static inline void
fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct request *dummy = sc->request->special;
+ struct request *dummy = (struct request *)sc->host_scribble;
blk_mq_free_request(dummy);
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ea4b0bb0c1cd..cc71136ba300 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -222,18 +222,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto fail;
- if (shost_use_blk_mq(shost)) {
- error = scsi_mq_setup_tags(shost);
- if (error)
- goto fail;
- } else {
- shost->bqt = blk_init_tags(shost->can_queue,
- shost->hostt->tag_alloc_policy);
- if (!shost->bqt) {
- error = -ENOMEM;
- goto fail;
- }
- }
+ error = scsi_mq_setup_tags(shost);
+ if (error)
+ goto fail;
if (!shost->shost_gendev.parent)
shost->shost_gendev.parent = dev ? dev : &platform_bus;
@@ -309,8 +300,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- if (shost_use_blk_mq(shost))
- scsi_mq_destroy_tags(shost);
+ scsi_mq_destroy_tags(shost);
fail:
return error;
}
@@ -344,13 +334,8 @@ static void scsi_host_dev_release(struct device *dev)
kfree(dev_name(&shost->shost_dev));
}
- if (shost_use_blk_mq(shost)) {
- if (shost->tag_set.tags)
- scsi_mq_destroy_tags(shost);
- } else {
- if (shost->bqt)
- blk_free_tags(shost->bqt);
- }
+ if (shost->tag_set.tags)
+ scsi_mq_destroy_tags(shost);
kfree(shost->shost_data);
@@ -472,8 +457,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
else
shost->dma_boundary = 0xffffffff;
- shost->use_blk_mq = scsi_use_blk_mq || shost->hostt->force_blk_mq;
-
device_initialize(&shost->shost_gendev);
dev_set_name(&shost->shost_gendev, "host%d", shost->host_no);
shost->shost_gendev.bus = &scsi_bus_type;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 93c66ebad907..f78d2e5c1471 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
failed:
ISCSI_DBG_EH(session,
"failing session reset: Could not log back into "
- "%s, %s [age %d]\n", session->targetname,
- conn->persistent_address, session->age);
+ "%s [age %d]\n", session->targetname,
+ session->age);
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return FAILED;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 4f6cdf53e913..c90b278cc28c 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -601,12 +601,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
- struct request_queue *q = qc->scsicmd->device->request_queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
return;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 33229348dcb6..af085432c5fe 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -930,16 +930,10 @@ void sas_task_abort(struct sas_task *task)
return;
}
- if (dev_is_sata(task->dev)) {
+ if (dev_is_sata(task->dev))
sas_ata_task_abort(task);
- } else {
- struct request_queue *q = sc->device->request_queue;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
+ else
blk_abort_request(sc->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
}
void sas_target_destroy(struct scsi_target *starget)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 20fa6785a0e2..68d62d55a3a5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -167,7 +167,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
sizeof(phba->wwpn));
}
- phba->sli3_options = 0x0;
+ /*
+ * Clear all option bits except LPFC_SLI3_BG_ENABLED,
+ * which was already set in lpfc_get_cfgparam()
+ */
+ phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
/* Setup and issue mailbox READ REV command */
lpfc_read_rev(phba, pmb);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 4fa6703a9ec9..baed2b891efb 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3914,7 +3914,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
uint32_t tag;
uint16_t hwq;
- if (cmnd && shost_use_blk_mq(cmnd->device->host)) {
+ if (cmnd) {
tag = blk_mq_unique_tag(cmnd->request);
hwq = blk_mq_unique_tag_to_hwq(tag);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 783a1540cfbe..b9e5cd79931a 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4965,7 +4965,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
LPFC_SLI3_HBQ_ENABLED |
LPFC_SLI3_CRP_ENABLED |
- LPFC_SLI3_BG_ENABLED |
LPFC_SLI3_DSS_ENABLED);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index e19fa883376f..60cf7c5eb880 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -506,11 +506,11 @@ static void osd_request_async_done(struct request *req, blk_status_t error)
_set_error_resid(or, req, error);
if (req->next_rq) {
- __blk_put_request(req->q, req->next_rq);
+ blk_put_request(req->next_rq);
req->next_rq = NULL;
}
- __blk_put_request(req->q, req);
+ blk_put_request(req);
or->request = NULL;
or->in.req = NULL;
or->out.req = NULL;
diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c
index 7a1a1edde35d..664c1238a87f 100644
--- a/drivers/scsi/osst.c
+++ b/drivers/scsi/osst.c
@@ -341,7 +341,7 @@ static void osst_end_async(struct request *req, blk_status_t status)
blk_rq_unmap_user(SRpnt->bio);
}
- __blk_put_request(req->q, req);
+ blk_put_request(req);
}
/* osst_request memory management */
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 105b0e4d7818..311eb22068e1 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -644,8 +644,7 @@ static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
qedi->max_sqes = QEDI_SQ_SIZE;
- if (shost_use_blk_mq(shost))
- shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
+ shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
pci_set_drvdata(pdev, qedi);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 7e78e7eff783..fccc733145fc 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -272,17 +272,6 @@ static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
schedule_work(&priv->abort_work);
}
-static void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
-{
- struct qla_qpair *qpair = hw_queue_handle;
- unsigned long flags;
- struct scsi_qla_host *vha = lport->private;
-
- spin_lock_irqsave(&qpair->qp_lock, flags);
- qla24xx_process_response_queue(vha, qpair->rsp);
- spin_unlock_irqrestore(&qpair->qp_lock, flags);
-}
-
static inline int qla2x00_start_nvme_mq(srb_t *sp)
{
unsigned long flags;
@@ -578,7 +567,6 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
- .poll_queue = qla_nvme_poll,
.max_hw_queues = 8,
.max_sgl_segments = 128,
.max_dif_sgl_segments = 64,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index b658b9a5eb1e..f92196ec5489 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -857,13 +857,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
if (ha->mqenable) {
- if (shost_use_blk_mq(vha->host)) {
- tag = blk_mq_unique_tag(cmd->request);
- hwq = blk_mq_unique_tag_to_hwq(tag);
- qpair = ha->queue_pair_map[hwq];
- } else if (vha->vp_idx && vha->qpair) {
- qpair = vha->qpair;
- }
+ tag = blk_mq_unique_tag(cmd->request);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ qpair = ha->queue_pair_map[hwq];
if (qpair)
return qla2xxx_mqueuecommand(host, cmd, qpair);
@@ -1464,7 +1460,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
goto eh_reset_failed;
}
err = 2;
- if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
+ if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1)
!= QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800c,
"do_reset failed for cmd=%p.\n", cmd);
@@ -3159,7 +3155,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto probe_failed;
}
- if (ha->mqenable && shost_use_blk_mq(host)) {
+ if (ha->mqenable) {
/* number of hardware queues supported by blk/scsi-mq*/
host->nr_hw_queues = ha->max_qpairs;
@@ -3271,25 +3267,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
if (ha->mqenable) {
- bool mq = false;
bool startit = false;
- if (QLA_TGT_MODE_ENABLED()) {
- mq = true;
+ if (QLA_TGT_MODE_ENABLED())
startit = false;
- }
- if ((ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED) &&
- shost_use_blk_mq(host)) {
- mq = true;
+ if (ql2x_ini_mode == QLA2XXX_INI_MODE_ENABLED)
startit = true;
- }
- if (mq) {
- /* Create start of day qpairs for Block MQ */
- for (i = 0; i < ha->max_qpairs; i++)
- qla2xxx_create_qpair(base_vha, 5, 0, startit);
- }
+ /* Create start of day qpairs for Block MQ */
+ for (i = 0; i < ha->max_qpairs; i++)
+ qla2xxx_create_qpair(base_vha, 5, 0, startit);
}
if (ha->flags.running_gold_fw)
@@ -4886,10 +4874,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -6952,11 +6940,12 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
if (USER_CTRL_IRQ(vha->hw))
- rc = blk_mq_map_queues(&shost->tag_set);
+ rc = blk_mq_map_queues(qmap);
else
- rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
+ rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
return rc;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index fc1356d101b0..7675ff0ca2ea 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -780,11 +780,8 @@ MODULE_LICENSE("GPL");
module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
-#ifdef CONFIG_SCSI_MQ_DEFAULT
+/* This should go away in the future, it doesn't do anything anymore */
bool scsi_use_blk_mq = true;
-#else
-bool scsi_use_blk_mq = false;
-#endif
module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 60bcc6df97a9..4740f1e9dd17 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5881,8 +5881,7 @@ static int sdebug_driver_probe(struct device *dev)
}
/* Decide whether to tell scsi subsystem that we want mq */
/* Following should give the same answer for each host */
- if (shost_use_blk_mq(hpnt))
- hpnt->nr_hw_queues = submit_queues;
+ hpnt->nr_hw_queues = submit_queues;
sdbg_host->shost = hpnt;
*((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c736d61b1648..16eef068e9e9 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -297,19 +297,19 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
if (rtn == BLK_EH_DONE) {
/*
- * For blk-mq, we must set the request state to complete now
- * before sending the request to the scsi error handler. This
- * will prevent a use-after-free in the event the LLD manages
- * to complete the request before the error handler finishes
- * processing this timed out request.
+ * Set the command to complete first in order to prevent a real
+ * completion from releasing the command while error handling
+ * is using it. If the command was already completed, then the
+ * lower level driver beat the timeout handler, and it is safe
+ * to return without escalating error recovery.
*
- * If the request was already completed, then the LLD beat the
- * time out handler from transferring the request to the scsi
- * error handler. In that case we can return immediately as no
- * further action is required.
+ * If timeout handling lost the race to a real completion, the
+ * block layer may ignore that due to a fake timeout injection,
+ * so return RESET_TIMER to allow error handling another shot
+ * at this command.
*/
- if (req->q->mq_ops && !blk_mq_mark_complete(req))
- return rtn;
+ if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
+ return BLK_EH_RESET_TIMER;
if (scsi_abort_command(scmd) != SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
scsi_eh_scmd_add(scmd);
@@ -1932,7 +1932,7 @@ maybe_retry:
static void eh_lock_door_done(struct request *req, blk_status_t status)
{
- __blk_put_request(req->q, req);
+ blk_put_request(req);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa6e0c3b3aa6..0dbf25512778 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -168,8 +168,6 @@ static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
{
struct scsi_device *device = cmd->device;
- struct request_queue *q = device->request_queue;
- unsigned long flags;
SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
"Inserting command %p into mlqueue\n", cmd));
@@ -190,26 +188,20 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy)
* before blk_cleanup_queue() finishes.
*/
cmd->result = 0;
- if (q->mq_ops) {
- /*
- * Before a SCSI command is dispatched,
- * get_device(&sdev->sdev_gendev) is called and the host,
- * target and device busy counters are increased. Since
- * requeuing a request causes these actions to be repeated and
- * since scsi_device_unbusy() has already been called,
- * put_device(&device->sdev_gendev) must still be called. Call
- * put_device() after blk_mq_requeue_request() to avoid that
- * removal of the SCSI device can start before requeueing has
- * happened.
- */
- blk_mq_requeue_request(cmd->request, true);
- put_device(&device->sdev_gendev);
- return;
- }
- spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, cmd->request);
- kblockd_schedule_work(&device->requeue_work);
- spin_unlock_irqrestore(q->queue_lock, flags);
+
+ /*
+ * Before a SCSI command is dispatched,
+ * get_device(&sdev->sdev_gendev) is called and the host,
+ * target and device busy counters are increased. Since
+ * requeuing a request causes these actions to be repeated and
+ * since scsi_device_unbusy() has already been called,
+ * put_device(&device->sdev_gendev) must still be called. Call
+ * put_device() after blk_mq_requeue_request() to avoid that
+ * removal of the SCSI device can start before requeueing has
+ * happened.
+ */
+ blk_mq_requeue_request(cmd->request, true);
+ put_device(&device->sdev_gendev);
}
/*
@@ -370,10 +362,7 @@ void scsi_device_unbusy(struct scsi_device *sdev)
static void scsi_kick_queue(struct request_queue *q)
{
- if (q->mq_ops)
- blk_mq_run_hw_queues(q, false);
- else
- blk_run_queue(q);
+ blk_mq_run_hw_queues(q, false);
}
/*
@@ -534,10 +523,7 @@ static void scsi_run_queue(struct request_queue *q)
if (!list_empty(&sdev->host->starved_list))
scsi_starved_list_run(sdev->host);
- if (q->mq_ops)
- blk_mq_run_hw_queues(q, false);
- else
- blk_run_queue(q);
+ blk_mq_run_hw_queues(q, false);
}
void scsi_requeue_run_queue(struct work_struct *work)
@@ -550,42 +536,6 @@ void scsi_requeue_run_queue(struct work_struct *work)
scsi_run_queue(q);
}
-/*
- * Function: scsi_requeue_command()
- *
- * Purpose: Handle post-processing of completed commands.
- *
- * Arguments: q - queue to operate on
- * cmd - command that may need to be requeued.
- *
- * Returns: Nothing
- *
- * Notes: After command completion, there may be blocks left
- * over which weren't finished by the previous command
- * this can be for a number of reasons - the main one is
- * I/O errors in the middle of the request, in which case
- * we need to request the blocks that come after the bad
- * sector.
- * Notes: Upon return, cmd is a stale pointer.
- */
-static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
-{
- struct scsi_device *sdev = cmd->device;
- struct request *req = cmd->request;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_unprep_request(req);
- req->special = NULL;
- scsi_put_command(cmd);
- blk_requeue_request(q, req);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- scsi_run_queue(q);
-
- put_device(&sdev->sdev_gendev);
-}
-
void scsi_run_host_queues(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
@@ -626,42 +576,6 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
scsi_del_cmd_from_list(cmd);
}
-/*
- * Function: scsi_release_buffers()
- *
- * Purpose: Free resources allocate for a scsi_command.
- *
- * Arguments: cmd - command that we are bailing.
- *
- * Lock status: Assumed that no lock is held upon entry.
- *
- * Returns: Nothing
- *
- * Notes: In the event that an upper level driver rejects a
- * command, we must release resources allocated during
- * the __init_io() function. Primarily this would involve
- * the scatter-gather table.
- */
-static void scsi_release_buffers(struct scsi_cmnd *cmd)
-{
- if (cmd->sdb.table.nents)
- sg_free_table_chained(&cmd->sdb.table, false);
-
- memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-
- if (scsi_prot_sg_count(cmd))
- sg_free_table_chained(&cmd->prot_sdb->table, false);
-}
-
-static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
-{
- struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
-
- sg_free_table_chained(&bidi_sdb->table, false);
- kmem_cache_free(scsi_sdb_cache, bidi_sdb);
- cmd->request->next_rq->special = NULL;
-}
-
/* Returns false when no more bytes to process, true if there are more */
static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
@@ -687,46 +601,30 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
destroy_rcu_head(&cmd->rcu);
}
- if (req->mq_ctx) {
- /*
- * In the MQ case the command gets freed by __blk_mq_end_request,
- * so we have to do all cleanup that depends on it earlier.
- *
- * We also can't kick the queues from irq context, so we
- * will have to defer it to a workqueue.
- */
- scsi_mq_uninit_cmd(cmd);
-
- /*
- * queue is still alive, so grab the ref for preventing it
- * from being cleaned up during running queue.
- */
- percpu_ref_get(&q->q_usage_counter);
-
- __blk_mq_end_request(req, error);
-
- if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
- kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(q, true);
-
- percpu_ref_put(&q->q_usage_counter);
- } else {
- unsigned long flags;
+ /*
+ * In the MQ case the command gets freed by __blk_mq_end_request,
+ * so we have to do all cleanup that depends on it earlier.
+ *
+ * We also can't kick the queues from irq context, so we
+ * will have to defer it to a workqueue.
+ */
+ scsi_mq_uninit_cmd(cmd);
- if (bidi_bytes)
- scsi_release_bidi_buffers(cmd);
- scsi_release_buffers(cmd);
- scsi_put_command(cmd);
+ /*
+ * queue is still alive, so grab the ref for preventing it
+ * from being cleaned up during running queue.
+ */
+ percpu_ref_get(&q->q_usage_counter);
- spin_lock_irqsave(q->queue_lock, flags);
- blk_finish_request(req, error);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ __blk_mq_end_request(req, error);
- scsi_run_queue(q);
- }
+ if (scsi_target(sdev)->single_lun ||
+ !list_empty(&sdev->host->starved_list))
+ kblockd_schedule_work(&sdev->requeue_work);
+ else
+ blk_mq_run_hw_queues(q, true);
+ percpu_ref_put(&q->q_usage_counter);
put_device(&sdev->sdev_gendev);
return false;
}
@@ -774,13 +672,7 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
struct request_queue *q)
{
/* A new command will be prepared and issued. */
- if (q->mq_ops) {
- scsi_mq_requeue_cmd(cmd);
- } else {
- /* Unprep request and put it back at head of the queue. */
- scsi_release_buffers(cmd);
- scsi_requeue_command(q, cmd);
- }
+ scsi_mq_requeue_cmd(cmd);
}
/* Helper for scsi_io_completion() when special action required. */
@@ -1120,7 +1012,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_io_completion_action(cmd, result);
}
-static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
+static blk_status_t scsi_init_sgtable(struct request *req,
+ struct scsi_data_buffer *sdb)
{
int count;
@@ -1129,7 +1022,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
*/
if (unlikely(sg_alloc_table_chained(&sdb->table,
blk_rq_nr_phys_segments(req), sdb->table.sgl)))
- return BLKPREP_DEFER;
+ return BLK_STS_RESOURCE;
/*
* Next, walk the list, and fill in the addresses and sizes of
@@ -1139,7 +1032,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
sdb->length = blk_rq_payload_bytes(req);
- return BLKPREP_OK;
+ return BLK_STS_OK;
}
/*
@@ -1149,62 +1042,48 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
*
* Arguments: cmd - Command descriptor we wish to initialize
*
- * Returns: 0 on success
- * BLKPREP_DEFER if the failure is retryable
- * BLKPREP_KILL if the failure is fatal
+ * Returns: BLK_STS_OK on success
+ * BLK_STS_RESOURCE if the failure is retryable
+ * BLK_STS_IOERR if the failure is fatal
*/
-int scsi_init_io(struct scsi_cmnd *cmd)
+blk_status_t scsi_init_io(struct scsi_cmnd *cmd)
{
- struct scsi_device *sdev = cmd->device;
struct request *rq = cmd->request;
- bool is_mq = (rq->mq_ctx != NULL);
- int error = BLKPREP_KILL;
+ blk_status_t ret;
if (WARN_ON_ONCE(!blk_rq_nr_phys_segments(rq)))
- goto err_exit;
+ return BLK_STS_IOERR;
- error = scsi_init_sgtable(rq, &cmd->sdb);
- if (error)
- goto err_exit;
+ ret = scsi_init_sgtable(rq, &cmd->sdb);
+ if (ret)
+ return ret;
if (blk_bidi_rq(rq)) {
- if (!rq->q->mq_ops) {
- struct scsi_data_buffer *bidi_sdb =
- kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
- if (!bidi_sdb) {
- error = BLKPREP_DEFER;
- goto err_exit;
- }
-
- rq->next_rq->special = bidi_sdb;
- }
-
- error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
- if (error)
- goto err_exit;
+ ret = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
+ if (ret)
+ goto out_free_sgtables;
}
if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
- if (prot_sdb == NULL) {
+ if (WARN_ON_ONCE(!prot_sdb)) {
/*
* This can happen if someone (e.g. multipath)
* queues a command to a device on an adapter
* that does not support DIX.
*/
- WARN_ON_ONCE(1);
- error = BLKPREP_KILL;
- goto err_exit;
+ ret = BLK_STS_IOERR;
+ goto out_free_sgtables;
}
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (sg_alloc_table_chained(&prot_sdb->table, ivecs,
prot_sdb->table.sgl)) {
- error = BLKPREP_DEFER;
- goto err_exit;
+ ret = BLK_STS_RESOURCE;
+ goto out_free_sgtables;
}
count = blk_rq_map_integrity_sg(rq->q, rq->bio,
@@ -1216,17 +1095,10 @@ int scsi_init_io(struct scsi_cmnd *cmd)
cmd->prot_sdb->table.nents = count;
}
- return BLKPREP_OK;
-err_exit:
- if (is_mq) {
- scsi_mq_free_sgtables(cmd);
- } else {
- scsi_release_buffers(cmd);
- cmd->request->special = NULL;
- scsi_put_command(cmd);
- put_device(&sdev->sdev_gendev);
- }
- return error;
+ return BLK_STS_OK;
+out_free_sgtables:
+ scsi_mq_free_sgtables(cmd);
+ return ret;
}
EXPORT_SYMBOL(scsi_init_io);
@@ -1312,7 +1184,8 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
scsi_add_cmd_to_list(cmd);
}
-static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
+static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev,
+ struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
@@ -1323,8 +1196,8 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
* submit a request without an attached bio.
*/
if (req->bio) {
- int ret = scsi_init_io(cmd);
- if (unlikely(ret))
+ blk_status_t ret = scsi_init_io(cmd);
+ if (unlikely(ret != BLK_STS_OK))
return ret;
} else {
BUG_ON(blk_rq_bytes(req));
@@ -1336,20 +1209,21 @@ static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
cmd->cmnd = scsi_req(req)->cmd;
cmd->transfersize = blk_rq_bytes(req);
cmd->allowed = scsi_req(req)->retries;
- return BLKPREP_OK;
+ return BLK_STS_OK;
}
/*
* Setup a normal block command. These are simple request from filesystems
* that still need to be translated to SCSI CDBs from the ULD.
*/
-static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
+static blk_status_t scsi_setup_fs_cmnd(struct scsi_device *sdev,
+ struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
- int ret = sdev->handler->prep_fn(sdev, req);
- if (ret != BLKPREP_OK)
+ blk_status_t ret = sdev->handler->prep_fn(sdev, req);
+ if (ret != BLK_STS_OK)
return ret;
}
@@ -1358,7 +1232,8 @@ static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
return scsi_cmd_to_driver(cmd)->init_command(cmd);
}
-static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
+static blk_status_t scsi_setup_cmnd(struct scsi_device *sdev,
+ struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
@@ -1375,129 +1250,48 @@ static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
return scsi_setup_fs_cmnd(sdev, req);
}
-static int
+static blk_status_t
scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
{
- int ret = BLKPREP_OK;
-
- /*
- * If the device is not in running state we will reject some
- * or all commands.
- */
- if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
- switch (sdev->sdev_state) {
- case SDEV_OFFLINE:
- case SDEV_TRANSPORT_OFFLINE:
- /*
- * If the device is offline we refuse to process any
- * commands. The device must be brought online
- * before trying any recovery commands.
- */
- sdev_printk(KERN_ERR, sdev,
- "rejecting I/O to offline device\n");
- ret = BLKPREP_KILL;
- break;
- case SDEV_DEL:
- /*
- * If the device is fully deleted, we refuse to
- * process any commands as well.
- */
- sdev_printk(KERN_ERR, sdev,
- "rejecting I/O to dead device\n");
- ret = BLKPREP_KILL;
- break;
- case SDEV_BLOCK:
- case SDEV_CREATED_BLOCK:
- ret = BLKPREP_DEFER;
- break;
- case SDEV_QUIESCE:
- /*
- * If the devices is blocked we defer normal commands.
- */
- if (req && !(req->rq_flags & RQF_PREEMPT))
- ret = BLKPREP_DEFER;
- break;
- default:
- /*
- * For any other not fully online state we only allow
- * special commands. In particular any user initiated
- * command is not allowed.
- */
- if (req && !(req->rq_flags & RQF_PREEMPT))
- ret = BLKPREP_KILL;
- break;
- }
- }
- return ret;
-}
-
-static int
-scsi_prep_return(struct request_queue *q, struct request *req, int ret)
-{
- struct scsi_device *sdev = q->queuedata;
-
- switch (ret) {
- case BLKPREP_KILL:
- case BLKPREP_INVALID:
- scsi_req(req)->result = DID_NO_CONNECT << 16;
- /* release the command and kill it */
- if (req->special) {
- struct scsi_cmnd *cmd = req->special;
- scsi_release_buffers(cmd);
- scsi_put_command(cmd);
- put_device(&sdev->sdev_gendev);
- req->special = NULL;
- }
- break;
- case BLKPREP_DEFER:
+ switch (sdev->sdev_state) {
+ case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
/*
- * If we defer, the blk_peek_request() returns NULL, but the
- * queue must be restarted, so we schedule a callback to happen
- * shortly.
+ * If the device is offline we refuse to process any
+ * commands. The device must be brought online
+ * before trying any recovery commands.
*/
- if (atomic_read(&sdev->device_busy) == 0)
- blk_delay_queue(q, SCSI_QUEUE_DELAY);
- break;
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to offline device\n");
+ return BLK_STS_IOERR;
+ case SDEV_DEL:
+ /*
+ * If the device is fully deleted, we refuse to
+ * process any commands as well.
+ */
+ sdev_printk(KERN_ERR, sdev,
+ "rejecting I/O to dead device\n");
+ return BLK_STS_IOERR;
+ case SDEV_BLOCK:
+ case SDEV_CREATED_BLOCK:
+ return BLK_STS_RESOURCE;
+ case SDEV_QUIESCE:
+ /*
+ * If the devices is blocked we defer normal commands.
+ */
+ if (req && !(req->rq_flags & RQF_PREEMPT))
+ return BLK_STS_RESOURCE;
+ return BLK_STS_OK;
default:
- req->rq_flags |= RQF_DONTPREP;
- }
-
- return ret;
-}
-
-static int scsi_prep_fn(struct request_queue *q, struct request *req)
-{
- struct scsi_device *sdev = q->queuedata;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- int ret;
-
- ret = scsi_prep_state_check(sdev, req);
- if (ret != BLKPREP_OK)
- goto out;
-
- if (!req->special) {
- /* Bail if we can't get a reference to the device */
- if (unlikely(!get_device(&sdev->sdev_gendev))) {
- ret = BLKPREP_DEFER;
- goto out;
- }
-
- scsi_init_command(sdev, cmd);
- req->special = cmd;
+ /*
+ * For any other not fully online state we only allow
+ * special commands. In particular any user initiated
+ * command is not allowed.
+ */
+ if (req && !(req->rq_flags & RQF_PREEMPT))
+ return BLK_STS_IOERR;
+ return BLK_STS_OK;
}
-
- cmd->tag = req->tag;
- cmd->request = req;
- cmd->prot_op = SCSI_PROT_NORMAL;
-
- ret = scsi_setup_cmnd(sdev, req);
-out:
- return scsi_prep_return(q, req, ret);
-}
-
-static void scsi_unprep_fn(struct request_queue *q, struct request *req)
-{
- scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
}
/*
@@ -1519,14 +1313,8 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,
/*
* unblock after device_blocked iterates to zero
*/
- if (atomic_dec_return(&sdev->device_blocked) > 0) {
- /*
- * For the MQ case we take care of this in the caller.
- */
- if (!q->mq_ops)
- blk_delay_queue(q, SCSI_QUEUE_DELAY);
+ if (atomic_dec_return(&sdev->device_blocked) > 0)
goto out_dec;
- }
SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
"unblocking device at zero depth\n"));
}
@@ -1661,13 +1449,13 @@ out_dec:
* needs to return 'not busy'. Otherwise, request stacking drivers
* may hold requests forever.
*/
-static int scsi_lld_busy(struct request_queue *q)
+static bool scsi_mq_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
if (blk_queue_dying(q))
- return 0;
+ return false;
shost = sdev->host;
@@ -1678,43 +1466,9 @@ static int scsi_lld_busy(struct request_queue *q)
* in SCSI layer.
*/
if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
- return 1;
-
- return 0;
-}
-
-/*
- * Kill a request for a dead device
- */
-static void scsi_kill_request(struct request *req, struct request_queue *q)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- struct scsi_device *sdev;
- struct scsi_target *starget;
- struct Scsi_Host *shost;
-
- blk_start_request(req);
-
- scmd_printk(KERN_INFO, cmd, "killing request\n");
-
- sdev = cmd->device;
- starget = scsi_target(sdev);
- shost = sdev->host;
- scsi_init_cmd_errh(cmd);
- cmd->result = DID_NO_CONNECT << 16;
- atomic_inc(&cmd->device->iorequest_cnt);
-
- /*
- * SCSI request completion path will do scsi_device_unbusy(),
- * bump busy counts. To bump the counters, we need to dance
- * with the locks as normal issue path does.
- */
- atomic_inc(&sdev->device_busy);
- atomic_inc(&shost->host_busy);
- if (starget->can_queue > 0)
- atomic_inc(&starget->target_busy);
+ return true;
- blk_complete_request(req);
+ return false;
}
static void scsi_softirq_done(struct request *rq)
@@ -1837,170 +1591,6 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
return 0;
}
-/**
- * scsi_done - Invoke completion on finished SCSI command.
- * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
- * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
- *
- * Description: This function is the mid-level's (SCSI Core) interrupt routine,
- * which regains ownership of the SCSI command (de facto) from a LLDD, and
- * calls blk_complete_request() for further processing.
- *
- * This function is interrupt context safe.
- */
-static void scsi_done(struct scsi_cmnd *cmd)
-{
- trace_scsi_dispatch_cmd_done(cmd);
- blk_complete_request(cmd->request);
-}
-
-/*
- * Function: scsi_request_fn()
- *
- * Purpose: Main strategy routine for SCSI.
- *
- * Arguments: q - Pointer to actual queue.
- *
- * Returns: Nothing
- *
- * Lock status: request queue lock assumed to be held when called.
- *
- * Note: See sd_zbc.c sd_zbc_write_lock_zone() for write order
- * protection for ZBC disks.
- */
-static void scsi_request_fn(struct request_queue *q)
- __releases(q->queue_lock)
- __acquires(q->queue_lock)
-{
- struct scsi_device *sdev = q->queuedata;
- struct Scsi_Host *shost;
- struct scsi_cmnd *cmd;
- struct request *req;
-
- /*
- * To start with, we keep looping until the queue is empty, or until
- * the host is no longer able to accept any more requests.
- */
- shost = sdev->host;
- for (;;) {
- int rtn;
- /*
- * get next queueable request. We do this early to make sure
- * that the request is fully prepared even if we cannot
- * accept it.
- */
- req = blk_peek_request(q);
- if (!req)
- break;
-
- if (unlikely(!scsi_device_online(sdev))) {
- sdev_printk(KERN_ERR, sdev,
- "rejecting I/O to offline device\n");
- scsi_kill_request(req, q);
- continue;
- }
-
- if (!scsi_dev_queue_ready(q, sdev))
- break;
-
- /*
- * Remove the request from the request list.
- */
- if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
- blk_start_request(req);
-
- spin_unlock_irq(q->queue_lock);
- cmd = blk_mq_rq_to_pdu(req);
- if (cmd != req->special) {
- printk(KERN_CRIT "impossible request in %s.\n"
- "please mail a stack trace to "
- "linux-scsi@vger.kernel.org\n",
- __func__);
- blk_dump_rq_flags(req, "foo");
- BUG();
- }
-
- /*
- * We hit this when the driver is using a host wide
- * tag map. For device level tag maps the queue_depth check
- * in the device ready fn would prevent us from trying
- * to allocate a tag. Since the map is a shared host resource
- * we add the dev to the starved list so it eventually gets
- * a run when a tag is freed.
- */
- if (blk_queue_tagged(q) && !(req->rq_flags & RQF_QUEUED)) {
- spin_lock_irq(shost->host_lock);
- if (list_empty(&sdev->starved_entry))
- list_add_tail(&sdev->starved_entry,
- &shost->starved_list);
- spin_unlock_irq(shost->host_lock);
- goto not_ready;
- }
-
- if (!scsi_target_queue_ready(shost, sdev))
- goto not_ready;
-
- if (!scsi_host_queue_ready(q, shost, sdev))
- goto host_not_ready;
-
- if (sdev->simple_tags)
- cmd->flags |= SCMD_TAGGED;
- else
- cmd->flags &= ~SCMD_TAGGED;
-
- /*
- * Finally, initialize any error handling parameters, and set up
- * the timers for timeouts.
- */
- scsi_init_cmd_errh(cmd);
-
- /*
- * Dispatch the command to the low-level driver.
- */
- cmd->scsi_done = scsi_done;
- rtn = scsi_dispatch_cmd(cmd);
- if (rtn) {
- scsi_queue_insert(cmd, rtn);
- spin_lock_irq(q->queue_lock);
- goto out_delay;
- }
- spin_lock_irq(q->queue_lock);
- }
-
- return;
-
- host_not_ready:
- if (scsi_target(sdev)->can_queue > 0)
- atomic_dec(&scsi_target(sdev)->target_busy);
- not_ready:
- /*
- * lock q, handle tag, requeue req, and decrement device_busy. We
- * must return with queue_lock held.
- *
- * Decrementing device_busy without checking it is OK, as all such
- * cases (host limits or settings) should run the queue at some
- * later time.
- */
- spin_lock_irq(q->queue_lock);
- blk_requeue_request(q, req);
- atomic_dec(&sdev->device_busy);
-out_delay:
- if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
- blk_delay_queue(q, SCSI_QUEUE_DELAY);
-}
-
-static inline blk_status_t prep_to_mq(int ret)
-{
- switch (ret) {
- case BLKPREP_OK:
- return BLK_STS_OK;
- case BLKPREP_DEFER:
- return BLK_STS_RESOURCE;
- default:
- return BLK_STS_IOERR;
- }
-}
-
/* Size in bytes of the sg-list stored in the scsi-mq command-private data. */
static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
{
@@ -2008,7 +1598,7 @@ static unsigned int scsi_mq_sgl_size(struct Scsi_Host *shost)
sizeof(struct scatterlist);
}
-static int scsi_mq_prep_fn(struct request *req)
+static blk_status_t scsi_mq_prep_fn(struct request *req)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = req->q->queuedata;
@@ -2052,8 +1642,18 @@ static int scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
+ if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state)))
+ return;
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(cmd->request);
+
+ /*
+ * If the block layer didn't complete the request due to a timeout
+ * injection, scsi must clear its internal completed state so that the
+ * timeout handler will see it needs to escalate its own error
+ * recovery.
+ */
+ if (unlikely(!blk_mq_complete_request(cmd->request)))
+ clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
}
static void scsi_mq_put_budget(struct blk_mq_hw_ctx *hctx)
@@ -2096,9 +1696,15 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
int reason;
- ret = prep_to_mq(scsi_prep_state_check(sdev, req));
- if (ret != BLK_STS_OK)
- goto out_put_budget;
+ /*
+ * If the device is not in running state we will reject some or all
+ * commands.
+ */
+ if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
+ ret = scsi_prep_state_check(sdev, req);
+ if (ret != BLK_STS_OK)
+ goto out_put_budget;
+ }
ret = BLK_STS_RESOURCE;
if (!scsi_target_queue_ready(shost, sdev))
@@ -2106,8 +1712,9 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev))
goto out_dec_target_busy;
+ clear_bit(SCMD_STATE_COMPLETE, &cmd->state);
if (!(req->rq_flags & RQF_DONTPREP)) {
- ret = prep_to_mq(scsi_mq_prep_fn(req));
+ ret = scsi_mq_prep_fn(req);
if (ret != BLK_STS_OK)
goto out_dec_host_busy;
req->rq_flags |= RQF_DONTPREP;
@@ -2208,7 +1815,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)
if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
- return blk_mq_map_queues(set);
+ return blk_mq_map_queues(&set->map[0]);
}
void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
@@ -2251,77 +1858,6 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(__scsi_init_queue);
-static int scsi_old_init_rq(struct request_queue *q, struct request *rq,
- gfp_t gfp)
-{
- struct Scsi_Host *shost = q->rq_alloc_data;
- const bool unchecked_isa_dma = shost->unchecked_isa_dma;
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
-
- memset(cmd, 0, sizeof(*cmd));
-
- if (unchecked_isa_dma)
- cmd->flags |= SCMD_UNCHECKED_ISA_DMA;
- cmd->sense_buffer = scsi_alloc_sense_buffer(unchecked_isa_dma, gfp,
- NUMA_NO_NODE);
- if (!cmd->sense_buffer)
- goto fail;
- cmd->req.sense = cmd->sense_buffer;
-
- if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {
- cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp);
- if (!cmd->prot_sdb)
- goto fail_free_sense;
- }
-
- return 0;
-
-fail_free_sense:
- scsi_free_sense_buffer(unchecked_isa_dma, cmd->sense_buffer);
-fail:
- return -ENOMEM;
-}
-
-static void scsi_old_exit_rq(struct request_queue *q, struct request *rq)
-{
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
-
- if (cmd->prot_sdb)
- kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb);
- scsi_free_sense_buffer(cmd->flags & SCMD_UNCHECKED_ISA_DMA,
- cmd->sense_buffer);
-}
-
-struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
-{
- struct Scsi_Host *shost = sdev->host;
- struct request_queue *q;
-
- q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
- if (!q)
- return NULL;
- q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
- q->rq_alloc_data = shost;
- q->request_fn = scsi_request_fn;
- q->init_rq_fn = scsi_old_init_rq;
- q->exit_rq_fn = scsi_old_exit_rq;
- q->initialize_rq_fn = scsi_initialize_rq;
-
- if (blk_init_allocated_queue(q) < 0) {
- blk_cleanup_queue(q);
- return NULL;
- }
-
- __scsi_init_queue(shost, q);
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
- blk_queue_prep_rq(q, scsi_prep_fn);
- blk_queue_unprep_rq(q, scsi_unprep_fn);
- blk_queue_softirq_done(q, scsi_softirq_done);
- blk_queue_rq_timed_out(q, scsi_times_out);
- blk_queue_lld_busy(q, scsi_lld_busy);
- return q;
-}
-
static const struct blk_mq_ops scsi_mq_ops = {
.get_budget = scsi_mq_get_budget,
.put_budget = scsi_mq_put_budget,
@@ -2334,6 +1870,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
.initialize_rq_fn = scsi_initialize_rq,
+ .busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
};
@@ -2388,10 +1925,7 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
{
struct scsi_device *sdev = NULL;
- if (q->mq_ops) {
- if (q->mq_ops == &scsi_mq_ops)
- sdev = q->queuedata;
- } else if (q->request_fn == scsi_request_fn)
+ if (q->mq_ops == &scsi_mq_ops)
sdev = q->queuedata;
if (!sdev || !get_device(&sdev->sdev_gendev))
sdev = NULL;
@@ -2995,39 +2529,6 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
- * @sdev: SCSI device to count the number of scsi_request_fn() callers for.
- */
-static int scsi_request_fn_active(struct scsi_device *sdev)
-{
- struct request_queue *q = sdev->request_queue;
- int request_fn_active;
-
- WARN_ON_ONCE(sdev->host->use_blk_mq);
-
- spin_lock_irq(q->queue_lock);
- request_fn_active = q->request_fn_active;
- spin_unlock_irq(q->queue_lock);
-
- return request_fn_active;
-}
-
-/**
- * scsi_wait_for_queuecommand() - wait for ongoing queuecommand() calls
- * @sdev: SCSI device pointer.
- *
- * Wait until the ongoing shost->hostt->queuecommand() calls that are
- * invoked from scsi_request_fn() have finished.
- */
-static void scsi_wait_for_queuecommand(struct scsi_device *sdev)
-{
- WARN_ON_ONCE(sdev->host->use_blk_mq);
-
- while (scsi_request_fn_active(sdev))
- msleep(20);
-}
-
-/**
* scsi_device_quiesce - Block user issued commands.
* @sdev: scsi device to quiesce.
*
@@ -3150,7 +2651,6 @@ EXPORT_SYMBOL(scsi_target_resume);
int scsi_internal_device_block_nowait(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
- unsigned long flags;
int err = 0;
err = scsi_device_set_state(sdev, SDEV_BLOCK);
@@ -3166,14 +2666,7 @@ int scsi_internal_device_block_nowait(struct scsi_device *sdev)
* block layer from calling the midlayer with this device's
* request queue.
*/
- if (q->mq_ops) {
- blk_mq_quiesce_queue_nowait(q);
- } else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
-
+ blk_mq_quiesce_queue_nowait(q);
return 0;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait);
@@ -3204,12 +2697,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
mutex_lock(&sdev->state_mutex);
err = scsi_internal_device_block_nowait(sdev);
- if (err == 0) {
- if (q->mq_ops)
- blk_mq_quiesce_queue(q);
- else
- scsi_wait_for_queuecommand(sdev);
- }
+ if (err == 0)
+ blk_mq_quiesce_queue(q);
mutex_unlock(&sdev->state_mutex);
return err;
@@ -3218,15 +2707,8 @@ static int scsi_internal_device_block(struct scsi_device *sdev)
void scsi_start_queue(struct scsi_device *sdev)
{
struct request_queue *q = sdev->request_queue;
- unsigned long flags;
- if (q->mq_ops) {
- blk_mq_unquiesce_queue(q);
- } else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
+ blk_mq_unquiesce_queue(q);
}
/**
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99f1db5e467e..5f21547b2ad2 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -92,7 +92,6 @@ extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
-extern struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev);
extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 78ca63dfba4a..dd0d516f65e2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -266,10 +266,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
*/
sdev->borken = 1;
- if (shost_use_blk_mq(shost))
- sdev->request_queue = scsi_mq_alloc_queue(sdev);
- else
- sdev->request_queue = scsi_old_alloc_queue(sdev);
+ sdev->request_queue = scsi_mq_alloc_queue(sdev);
if (!sdev->request_queue) {
/* release fn is set up in scsi_sysfs_device_initialise, so
* have to free and put manually here */
@@ -280,11 +277,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev;
- if (!shost_use_blk_mq(sdev->host)) {
- blk_queue_init_tags(sdev->request_queue,
- sdev->host->cmd_per_lun, shost->bqt,
- shost->hostt->tag_alloc_policy);
- }
scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun ?
sdev->host->cmd_per_lun : 1);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 3aee9464a7bf..6a9040faed00 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -367,7 +367,6 @@ store_shost_eh_deadline(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline);
-shost_rd_attr(use_blk_mq, "%d\n");
shost_rd_attr(unique_id, "%u\n");
shost_rd_attr(cmd_per_lun, "%hd\n");
shost_rd_attr(can_queue, "%hd\n");
@@ -386,6 +385,13 @@ show_host_busy(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL);
+static ssize_t
+show_use_blk_mq(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "1\n");
+}
+static DEVICE_ATTR(use_blk_mq, S_IRUGO, show_use_blk_mq, NULL);
+
static struct attribute *scsi_sysfs_shost_attrs[] = {
&dev_attr_use_blk_mq.attr,
&dev_attr_unique_id.attr,
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 381668fa135d..d7035270d274 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
/* the blk_end_sync_io() doesn't check the error */
if (inflight)
- __blk_complete_request(req);
+ blk_mq_end_request(req, BLK_STS_IOERR);
return BLK_EH_DONE;
}
@@ -3684,14 +3684,9 @@ static void
fc_bsg_goose_queue(struct fc_rport *rport)
{
struct request_queue *q = rport->rqst_q;
- unsigned long flags;
-
- if (!q)
- return;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_run_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (q)
+ blk_mq_run_hw_queues(q, true);
}
/**
@@ -3759,6 +3754,37 @@ static int fc_bsg_dispatch(struct bsg_job *job)
return fc_bsg_host_dispatch(shost, job);
}
+static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
+{
+ if (rport->port_state == FC_PORTSTATE_BLOCKED &&
+ !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
+ return BLK_STS_RESOURCE;
+
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ return BLK_STS_IOERR;
+
+ return BLK_STS_OK;
+}
+
+
+static int fc_bsg_dispatch_prep(struct bsg_job *job)
+{
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ blk_status_t ret;
+
+ ret = fc_bsg_rport_prep(rport);
+ switch (ret) {
+ case BLK_STS_OK:
+ break;
+ case BLK_STS_RESOURCE:
+ return -EAGAIN;
+ default:
+ return -EIO;
+ }
+
+ return fc_bsg_dispatch(job);
+}
+
/**
* fc_bsg_hostadd - Create and add the bsg hooks so we can receive requests
* @shost: shost for fc_host
@@ -3780,7 +3806,8 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
snprintf(bsg_name, sizeof(bsg_name),
"fc_host%d", shost->host_no);
- q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, i->f->dd_bsg_size);
+ q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout,
+ i->f->dd_bsg_size);
if (IS_ERR(q)) {
dev_err(dev,
"fc_host%d: bsg interface failed to initialize - setup queue\n",
@@ -3788,26 +3815,11 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host)
return PTR_ERR(q);
}
__scsi_init_queue(shost, q);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT);
fc_host->rqst_q = q;
return 0;
}
-static int fc_bsg_rport_prep(struct request_queue *q, struct request *req)
-{
- struct fc_rport *rport = dev_to_rport(q->queuedata);
-
- if (rport->port_state == FC_PORTSTATE_BLOCKED &&
- !(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
- return BLKPREP_DEFER;
-
- if (rport->port_state != FC_PORTSTATE_ONLINE)
- return BLKPREP_KILL;
-
- return BLKPREP_OK;
-}
-
/**
* fc_bsg_rportadd - Create and add the bsg hooks so we can receive requests
* @shost: shost that rport is attached to
@@ -3825,15 +3837,13 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
if (!i->f->bsg_request)
return -ENOTSUPP;
- q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch,
- i->f->dd_bsg_size);
+ q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep,
+ fc_bsg_job_timeout, i->f->dd_bsg_size);
if (IS_ERR(q)) {
dev_err(dev, "failed to setup bsg queue\n");
return PTR_ERR(q);
}
__scsi_init_queue(shost, q);
- blk_queue_prep_rq(q, fc_bsg_rport_prep);
- blk_queue_rq_timed_out(q, fc_bsg_job_timeout);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
rport->rqst_q = q;
return 0;
@@ -3852,10 +3862,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport)
static void
fc_bsg_remove(struct request_queue *q)
{
- if (q) {
- bsg_unregister_queue(q);
- blk_cleanup_queue(q);
- }
+ bsg_remove_queue(q);
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 6fd2fe210fc3..ff123023e5a5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1542,7 +1542,7 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
return -ENOTSUPP;
snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
- q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, 0);
+ q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0);
if (IS_ERR(q)) {
shost_printk(KERN_ERR, shost, "bsg interface failed to "
"initialize - no request queue\n");
@@ -1576,10 +1576,7 @@ static int iscsi_remove_host(struct transport_container *tc,
struct Scsi_Host *shost = dev_to_shost(dev);
struct iscsi_cls_host *ihost = shost->shost_data;
- if (ihost->bsg_q) {
- bsg_unregister_queue(ihost->bsg_q);
- blk_cleanup_queue(ihost->bsg_q);
- }
+ bsg_remove_queue(ihost->bsg_q);
return 0;
}
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0a165b2b3e81..692b46937e52 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -198,7 +198,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
if (rphy) {
q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev),
- sas_smp_dispatch, 0);
+ sas_smp_dispatch, NULL, 0);
if (IS_ERR(q))
return PTR_ERR(q);
rphy->q = q;
@@ -207,7 +207,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
snprintf(name, sizeof(name), "sas_host%d", shost->host_no);
q = bsg_setup_queue(&shost->shost_gendev, name,
- sas_smp_dispatch, 0);
+ sas_smp_dispatch, NULL, 0);
if (IS_ERR(q))
return PTR_ERR(q);
to_sas_host_attrs(shost)->q = q;
@@ -246,11 +246,7 @@ static int sas_host_remove(struct transport_container *tc, struct device *dev,
struct Scsi_Host *shost = dev_to_shost(dev);
struct request_queue *q = to_sas_host_attrs(shost)->q;
- if (q) {
- bsg_unregister_queue(q);
- blk_cleanup_queue(q);
- }
-
+ bsg_remove_queue(q);
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3bb2b3351e35..a1a44f52e0e8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -114,7 +114,7 @@ static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
-static int sd_init_command(struct scsi_cmnd *SCpnt);
+static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
static int sd_done(struct scsi_cmnd *);
static void sd_eh_reset(struct scsi_cmnd *);
@@ -133,6 +133,7 @@ static DEFINE_MUTEX(sd_ref_mutex);
static struct kmem_cache *sd_cdb_cache;
static mempool_t *sd_cdb_pool;
+static mempool_t *sd_page_pool;
static const char *sd_cache_types[] = {
"write through", "none", "write back",
@@ -750,7 +751,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}
-static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
@@ -759,9 +760,10 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
unsigned int data_len = 24;
char *buf;
- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!rq->special_vec.bv_page)
- return BLKPREP_DEFER;
+ return BLK_STS_RESOURCE;
+ clear_highpage(rq->special_vec.bv_page);
rq->special_vec.bv_offset = 0;
rq->special_vec.bv_len = data_len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -784,7 +786,8 @@ static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
return scsi_init_io(cmd);
}
-static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
+static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
+ bool unmap)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
@@ -792,9 +795,10 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
u32 data_len = sdp->sector_size;
- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!rq->special_vec.bv_page)
- return BLKPREP_DEFER;
+ return BLK_STS_RESOURCE;
+ clear_highpage(rq->special_vec.bv_page);
rq->special_vec.bv_offset = 0;
rq->special_vec.bv_len = data_len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -814,7 +818,8 @@ static int sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd, bool unmap)
return scsi_init_io(cmd);
}
-static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
+static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
+ bool unmap)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
@@ -822,9 +827,10 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
u32 nr_sectors = blk_rq_sectors(rq) >> (ilog2(sdp->sector_size) - 9);
u32 data_len = sdp->sector_size;
- rq->special_vec.bv_page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
+ rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
if (!rq->special_vec.bv_page)
- return BLKPREP_DEFER;
+ return BLK_STS_RESOURCE;
+ clear_highpage(rq->special_vec.bv_page);
rq->special_vec.bv_offset = 0;
rq->special_vec.bv_len = data_len;
rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
@@ -844,7 +850,7 @@ static int sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd, bool unmap)
return scsi_init_io(cmd);
}
-static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
struct scsi_device *sdp = cmd->device;
@@ -862,7 +868,7 @@ static int sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
}
if (sdp->no_write_same)
- return BLKPREP_INVALID;
+ return BLK_STS_TARGET;
if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff)
return sd_setup_write_same16_cmnd(cmd, false);
@@ -939,7 +945,7 @@ out:
* Will set up either WRITE SAME(10) or WRITE SAME(16) depending on
* the preference indicated by the target device.
**/
-static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
struct scsi_device *sdp = cmd->device;
@@ -948,10 +954,10 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
sector_t sector = blk_rq_pos(rq);
unsigned int nr_sectors = blk_rq_sectors(rq);
unsigned int nr_bytes = blk_rq_bytes(rq);
- int ret;
+ blk_status_t ret;
if (sdkp->device->no_write_same)
- return BLKPREP_INVALID;
+ return BLK_STS_TARGET;
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
@@ -992,7 +998,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
return ret;
}
-static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
+static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -1005,10 +1011,10 @@ static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
cmd->allowed = SD_MAX_RETRIES;
rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
- return BLKPREP_OK;
+ return BLK_STS_OK;
}
-static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
+static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
{
struct request *rq = SCpnt->request;
struct scsi_device *sdp = SCpnt->device;
@@ -1018,18 +1024,14 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
sector_t threshold;
unsigned int this_count = blk_rq_sectors(rq);
unsigned int dif, dix;
- int ret;
unsigned char protect;
+ blk_status_t ret;
ret = scsi_init_io(SCpnt);
- if (ret != BLKPREP_OK)
+ if (ret != BLK_STS_OK)
return ret;
WARN_ON_ONCE(SCpnt != rq->special);
- /* from here on until we're complete, any goto out
- * is used for a killable error condition */
- ret = BLKPREP_KILL;
-
SCSI_LOG_HLQUEUE(1,
scmd_printk(KERN_INFO, SCpnt,
"%s: block=%llu, count=%d\n",
@@ -1042,7 +1044,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
blk_rq_sectors(rq)));
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
"Retry with 0x%p\n", SCpnt));
- goto out;
+ return BLK_STS_IOERR;
}
if (sdp->changed) {
@@ -1051,7 +1053,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
* the changed bit has been reset
*/
/* printk("SCSI disk has been changed or is not present. Prohibiting further I/O.\n"); */
- goto out;
+ return BLK_STS_IOERR;
}
/*
@@ -1089,31 +1091,28 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
if ((block & 1) || (blk_rq_sectors(rq) & 1)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
- goto out;
- } else {
- block = block >> 1;
- this_count = this_count >> 1;
+ return BLK_STS_IOERR;
}
+ block = block >> 1;
+ this_count = this_count >> 1;
}
if (sdp->sector_size == 2048) {
if ((block & 3) || (blk_rq_sectors(rq) & 3)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
- goto out;
- } else {
- block = block >> 2;
- this_count = this_count >> 2;
+ return BLK_STS_IOERR;
}
+ block = block >> 2;
+ this_count = this_count >> 2;
}
if (sdp->sector_size == 4096) {
if ((block & 7) || (blk_rq_sectors(rq) & 7)) {
scmd_printk(KERN_ERR, SCpnt,
"Bad block number requested\n");
- goto out;
- } else {
- block = block >> 3;
- this_count = this_count >> 3;
+ return BLK_STS_IOERR;
}
+ block = block >> 3;
+ this_count = this_count >> 3;
}
if (rq_data_dir(rq) == WRITE) {
SCpnt->cmnd[0] = WRITE_6;
@@ -1125,7 +1124,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
SCpnt->cmnd[0] = READ_6;
} else {
scmd_printk(KERN_ERR, SCpnt, "Unknown command %d\n", req_op(rq));
- goto out;
+ return BLK_STS_IOERR;
}
SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt,
@@ -1145,10 +1144,8 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
- if (unlikely(SCpnt->cmnd == NULL)) {
- ret = BLKPREP_DEFER;
- goto out;
- }
+ if (unlikely(!SCpnt->cmnd))
+ return BLK_STS_RESOURCE;
SCpnt->cmd_len = SD_EXT_CDB_SIZE;
memset(SCpnt->cmnd, 0, SCpnt->cmd_len);
@@ -1216,7 +1213,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
*/
scmd_printk(KERN_ERR, SCpnt,
"FUA write on READ/WRITE(6) drive\n");
- goto out;
+ return BLK_STS_IOERR;
}
SCpnt->cmnd[1] |= (unsigned char) ((block >> 16) & 0x1f);
@@ -1240,12 +1237,10 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
* This indicates that the command is ready from our end to be
* queued.
*/
- ret = BLKPREP_OK;
- out:
- return ret;
+ return BLK_STS_OK;
}
-static int sd_init_command(struct scsi_cmnd *cmd)
+static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
@@ -1261,7 +1256,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case SD_LBP_ZERO:
return sd_setup_write_same10_cmnd(cmd, false);
default:
- return BLKPREP_INVALID;
+ return BLK_STS_TARGET;
}
case REQ_OP_WRITE_ZEROES:
return sd_setup_write_zeroes_cmnd(cmd);
@@ -1276,7 +1271,7 @@ static int sd_init_command(struct scsi_cmnd *cmd)
return sd_zbc_setup_reset_cmnd(cmd);
default:
WARN_ON_ONCE(1);
- return BLKPREP_KILL;
+ return BLK_STS_NOTSUPP;
}
}
@@ -1286,7 +1281,7 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt)
u8 *cmnd;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
- __free_page(rq->special_vec.bv_page);
+ mempool_free(rq->special_vec.bv_page, sd_page_pool);
if (SCpnt->cmnd != scsi_req(rq)->cmd) {
cmnd = SCpnt->cmnd;
@@ -3623,6 +3618,13 @@ static int __init init_sd(void)
goto err_out_cache;
}
+ sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
+ if (!sd_page_pool) {
+ printk(KERN_ERR "sd: can't init discard page pool\n");
+ err = -ENOMEM;
+ goto err_out_ppool;
+ }
+
err = scsi_register_driver(&sd_template.gendrv);
if (err)
goto err_out_driver;
@@ -3630,6 +3632,9 @@ static int __init init_sd(void)
return 0;
err_out_driver:
+ mempool_destroy(sd_page_pool);
+
+err_out_ppool:
mempool_destroy(sd_cdb_pool);
err_out_cache:
@@ -3656,6 +3661,7 @@ static void __exit exit_sd(void)
scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_cdb_pool);
+ mempool_destroy(sd_page_pool);
kmem_cache_destroy(sd_cdb_cache);
class_unregister(&sd_disk_class);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 1d63f3a23ffb..7f43e6839bce 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -271,7 +271,7 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
+extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
@@ -288,9 +288,9 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
- return BLKPREP_INVALID;
+ return BLK_STS_TARGET;
}
static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e06c48c866e4..83365b29a4d8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -185,7 +185,7 @@ static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
*
* Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
*/
-int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
+blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
@@ -194,14 +194,14 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
- return BLKPREP_KILL;
+ return BLK_STS_IOERR;
if (sdkp->device->changed)
- return BLKPREP_KILL;
+ return BLK_STS_IOERR;
if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
/* Unaligned request */
- return BLKPREP_KILL;
+ return BLK_STS_IOERR;
cmd->cmd_len = 16;
memset(cmd->cmnd, 0, cmd->cmd_len);
@@ -214,7 +214,7 @@ int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
cmd->transfersize = 0;
cmd->allowed = 0;
- return BLKPREP_OK;
+ return BLK_STS_OK;
}
/**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c6ad00703c5b..4e27460ec926 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1390,7 +1390,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
*/
srp->rq = NULL;
scsi_req_free_cmd(scsi_req(rq));
- __blk_put_request(rq->q, rq);
+ blk_put_request(rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index a25a07a0b7f0..bac084260d80 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5319,7 +5319,8 @@ static int pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
- return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
+ return blk_mq_pci_map_queues(&shost->tag_set.map[0],
+ ctrl_info->pci_dev, 0);
}
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 54dd70ae9731..38ddbbfe5f3c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -80,7 +80,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
static DEFINE_MUTEX(sr_mutex);
static int sr_probe(struct device *);
static int sr_remove(struct device *);
-static int sr_init_command(struct scsi_cmnd *SCpnt);
+static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt);
static int sr_done(struct scsi_cmnd *);
static int sr_runtime_suspend(struct device *dev);
@@ -384,22 +384,22 @@ static int sr_done(struct scsi_cmnd *SCpnt)
return good_bytes;
}
-static int sr_init_command(struct scsi_cmnd *SCpnt)
+static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
{
int block = 0, this_count, s_size;
struct scsi_cd *cd;
struct request *rq = SCpnt->request;
- int ret;
+ blk_status_t ret;
ret = scsi_init_io(SCpnt);
- if (ret != BLKPREP_OK)
+ if (ret != BLK_STS_OK)
goto out;
WARN_ON_ONCE(SCpnt != rq->special);
cd = scsi_cd(rq->rq_disk);
/* from here on until we're complete, any goto out
* is used for a killable error condition */
- ret = BLKPREP_KILL;
+ ret = BLK_STS_IOERR;
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block));
@@ -516,7 +516,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
* This indicates that the command is ready from our end to be
* queued.
*/
- ret = BLKPREP_OK;
+ ret = BLK_STS_OK;
out:
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 307df2fa39a3..7ff22d3f03e3 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -530,7 +530,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
complete(SRpnt->waiting);
blk_rq_unmap_user(tmp);
- __blk_put_request(req->q, req);
+ blk_put_request(req);
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index f03dc03a42c3..8f88348ebe42 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -446,7 +446,6 @@ struct storvsc_device {
bool destroy;
bool drain_notify;
- bool open_sub_channel;
atomic_t num_outstanding_req;
struct Scsi_Host *host;
@@ -636,33 +635,38 @@ get_in_err:
static void handle_sc_creation(struct vmbus_channel *new_sc)
{
struct hv_device *device = new_sc->primary_channel->device_obj;
+ struct device *dev = &device->device;
struct storvsc_device *stor_device;
struct vmstorage_channel_properties props;
+ int ret;
stor_device = get_out_stor_device(device);
if (!stor_device)
return;
- if (stor_device->open_sub_channel == false)
- return;
-
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
- vmbus_open(new_sc,
- storvsc_ringbuffer_size,
- storvsc_ringbuffer_size,
- (void *)&props,
- sizeof(struct vmstorage_channel_properties),
- storvsc_on_channel_callback, new_sc);
+ ret = vmbus_open(new_sc,
+ storvsc_ringbuffer_size,
+ storvsc_ringbuffer_size,
+ (void *)&props,
+ sizeof(struct vmstorage_channel_properties),
+ storvsc_on_channel_callback, new_sc);
- if (new_sc->state == CHANNEL_OPENED_STATE) {
- stor_device->stor_chns[new_sc->target_cpu] = new_sc;
- cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+ /* In case vmbus_open() fails, we don't use the sub-channel. */
+ if (ret != 0) {
+ dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
+ return;
}
+
+ /* Add the sub-channel to the array of available channels. */
+ stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+ cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
}
static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
+ struct device *dev = &device->device;
struct storvsc_device *stor_device;
int num_cpus = num_online_cpus();
int num_sc;
@@ -679,22 +683,12 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
request = &stor_device->init_request;
vstor_packet = &request->vstor_packet;
- stor_device->open_sub_channel = true;
/*
* Establish a handler for dealing with subchannels.
*/
vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
/*
- * Check to see if sub-channels have already been created. This
- * can happen when this driver is re-loaded after unloading.
- */
-
- if (vmbus_are_subchannels_present(device->channel))
- return;
-
- stor_device->open_sub_channel = false;
- /*
* Request the host to create sub-channels.
*/
memset(request, 0, sizeof(struct storvsc_cmd_request));
@@ -710,23 +704,29 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
+ if (ret != 0) {
+ dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
return;
+ }
t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
- if (t == 0)
+ if (t == 0) {
+ dev_err(dev, "Failed to create sub-channel: timed out\n");
return;
+ }
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
- vstor_packet->status != 0)
+ vstor_packet->status != 0) {
+ dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+ vstor_packet->operation, vstor_packet->status);
return;
+ }
/*
- * Now that we created the sub-channels, invoke the check; this
- * may trigger the callback.
+ * We need to do nothing here, because vmbus_process_offer()
+ * invokes channel->sc_creation_callback, which will open and use
+ * the sub-channel(s).
*/
- stor_device->open_sub_channel = true;
- vmbus_are_subchannels_present(device->channel);
}
static void cache_wwn(struct storvsc_device *stor_device,
@@ -1794,7 +1794,6 @@ static int storvsc_probe(struct hv_device *device,
}
stor_device->destroy = false;
- stor_device->open_sub_channel = false;
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
stor_device->host = host;
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index e5f8e54bf644..775bb4e5e36e 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -157,7 +157,7 @@ void ufs_bsg_remove(struct ufs_hba *hba)
if (!hba->bsg_queue)
return;
- bsg_unregister_queue(hba->bsg_queue);
+ bsg_remove_queue(hba->bsg_queue);
device_del(bsg_dev);
put_device(bsg_dev);
@@ -193,7 +193,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
if (ret)
goto out;
- q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, 0);
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 1c72db94270e..c3c95b314286 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -719,8 +719,9 @@ static void virtscsi_target_destroy(struct scsi_target *starget)
static int virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];
- return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
+ return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}
/*
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 6e491023fdd8..0d6b2a88fc8e 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
{
- pvscsi_shutdown_intr(adapter);
-
if (adapter->workqueue)
destroy_workqueue(adapter->workqueue);
@@ -1534,6 +1532,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_reset_adapter:
ll_adapter_reset(adapter);
out_release_resources:
+ pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter);
scsi_host_put(host);
out_disable_device:
@@ -1542,6 +1541,7 @@ out_disable_device:
return error;
out_release_resources_and_disable:
+ pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter);
goto out_disable_device;
}
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 321a92613a7e..ec0837ff039a 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -601,3 +601,71 @@ struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
return ret;
}
EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
+
+/**
+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
+ * @d: the given DPIO object.
+ * @fqid: the id of frame queue to be queried.
+ * @fcnt: the queried frame count.
+ * @bcnt: the queried byte count.
+ *
+ * Knowing the FQ count at run-time can be useful in debugging situations.
+ * The instantaneous frame- and byte-count are hereby returned.
+ *
+ * Return 0 for a successful query, and negative error code if query fails.
+ */
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
+ u32 *fcnt, u32 *bcnt)
+{
+ struct qbman_fq_query_np_rslt state;
+ struct qbman_swp *swp;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ swp = d->swp;
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_fq_query_state(swp, fqid, &state);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ if (ret)
+ return ret;
+ *fcnt = qbman_fq_state_frame_count(&state);
+ *bcnt = qbman_fq_state_byte_count(&state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
+
+/**
+ * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
+ * buffer pool.
+ * @d: the given DPIO object.
+ * @bpid: the index of buffer pool to be queried.
+ * @num: the queried number of buffers in the buffer pool.
+ *
+ * Return 0 for a successful query, and negative error code if query fails.
+ */
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
+{
+ struct qbman_bp_query_rslt state;
+ struct qbman_swp *swp;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ swp = d->swp;
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_bp_query(swp, bpid, &state);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ if (ret)
+ return ret;
+ *num = qbman_bp_info_num_free_bufs(&state);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index cf1d448ea468..0bddb85c0ae5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -1003,3 +1003,99 @@ int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
return 0;
}
+
+#define QBMAN_RESPONSE_VERB_MASK 0x7f
+#define QBMAN_FQ_QUERY_NP 0x45
+#define QBMAN_BP_QUERY 0x32
+
+struct qbman_fq_query_desc {
+ u8 verb;
+ u8 reserved[3];
+ __le32 fqid;
+ u8 reserved2[56];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
+ struct qbman_fq_query_np_rslt *r)
+{
+ struct qbman_fq_query_desc *p;
+ void *resp;
+
+ p = (struct qbman_fq_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ /* FQID is a 24 bit value */
+ p->fqid = cpu_to_le32(fqid & 0x00FFFFFF);
+ resp = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP);
+ if (!resp) {
+ pr_err("qbman: Query FQID %d NP fields failed, no response\n",
+ fqid);
+ return -EIO;
+ }
+ *r = *(struct qbman_fq_query_np_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_FQ_QUERY_NP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n",
+ p->fqid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return (le32_to_cpu(r->frm_cnt) & 0x00FFFFFF);
+}
+
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r)
+{
+ return le32_to_cpu(r->byte_cnt);
+}
+
+struct qbman_bp_query_desc {
+ u8 verb;
+ u8 reserved;
+ __le16 bpid;
+ u8 reserved2[60];
+};
+
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
+ struct qbman_bp_query_rslt *r)
+{
+ struct qbman_bp_query_desc *p;
+ void *resp;
+
+ p = (struct qbman_bp_query_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->bpid = cpu_to_le16(bpid);
+ resp = qbman_swp_mc_complete(s, p, QBMAN_BP_QUERY);
+ if (!resp) {
+ pr_err("qbman: Query BPID %d fields failed, no response\n",
+ bpid);
+ return -EIO;
+ }
+ *r = *(struct qbman_bp_query_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_BP_QUERY);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Query fields of BPID 0x%x failed, code=0x%02x\n",
+ bpid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
+{
+ return le32_to_cpu(a->fill);
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index 89d1dd9969b6..fa35fc1afeaa 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -441,4 +441,62 @@ static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
return cmd;
}
+/* Query APIs */
+struct qbman_fq_query_np_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 st1;
+ u8 st2;
+ u8 reserved[2];
+ __le16 od1_sfdr;
+ __le16 od2_sfdr;
+ __le16 od3_sfdr;
+ __le16 ra1_sfdr;
+ __le16 ra2_sfdr;
+ __le32 pfdr_hptr;
+ __le32 pfdr_tptr;
+ __le32 frm_cnt;
+ __le32 byte_cnt;
+ __le16 ics_surp;
+ u8 is;
+ u8 reserved2[29];
+};
+
+int qbman_fq_query_state(struct qbman_swp *s, u32 fqid,
+ struct qbman_fq_query_np_rslt *r);
+u32 qbman_fq_state_frame_count(const struct qbman_fq_query_np_rslt *r);
+u32 qbman_fq_state_byte_count(const struct qbman_fq_query_np_rslt *r);
+
+struct qbman_bp_query_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 reserved[4];
+ u8 bdi;
+ u8 state;
+ __le32 fill;
+ __le32 hdotr;
+ __le16 swdet;
+ __le16 swdxt;
+ __le16 hwdet;
+ __le16 hwdxt;
+ __le16 swset;
+ __le16 swsxt;
+ __le16 vbpid;
+ __le16 icid;
+ __le64 bpscn_addr;
+ __le64 bpscn_ctx;
+ __le16 hw_targ;
+ u8 dbe;
+ u8 reserved2;
+ u8 sdcnt;
+ u8 hdcnt;
+ u8 sscnt;
+ u8 reserved3[9];
+};
+
+int qbman_bp_query(struct qbman_swp *s, u16 bpid,
+ struct qbman_bp_query_rslt *r);
+
+u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 5ce24718c2fd..52c153cd795a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -36,6 +36,8 @@
#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
#define QMAN_POLL_LIMIT 32
#define QMAN_PIRQ_DQRR_ITHRESH 12
+#define QMAN_DQRR_IT_MAX 15
+#define QMAN_ITP_MAX 0xFFF
#define QMAN_PIRQ_MR_ITHRESH 4
#define QMAN_PIRQ_IPERIOD 100
@@ -727,9 +729,15 @@ static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
qm_out(portal, QM_REG_DQRR_VDQCR, vdqcr);
}
-static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+static inline int qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
{
+
+ if (ithresh > QMAN_DQRR_IT_MAX)
+ return -EINVAL;
+
qm_out(portal, QM_REG_DQRR_ITR, ithresh);
+
+ return 0;
}
/* --- MR API --- */
@@ -1012,20 +1020,27 @@ static inline void put_affine_portal(void)
static struct workqueue_struct *qm_portal_wq;
-void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh)
{
+ int res;
+
if (!portal)
- return;
+ return -EINVAL;
+
+ res = qm_dqrr_set_ithresh(&portal->p, ithresh);
+ if (res)
+ return res;
- qm_dqrr_set_ithresh(&portal->p, ithresh);
portal->p.dqrr.ithresh = ithresh;
+
+ return 0;
}
EXPORT_SYMBOL(qman_dqrr_set_ithresh);
void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh)
{
if (portal && ithresh)
- *ithresh = portal->p.dqrr.ithresh;
+ *ithresh = qm_in(&portal->p, QM_REG_DQRR_ITR);
}
EXPORT_SYMBOL(qman_dqrr_get_ithresh);
@@ -1036,10 +1051,14 @@ void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod)
}
EXPORT_SYMBOL(qman_portal_get_iperiod);
-void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod)
{
- if (portal)
- qm_out(&portal->p, QM_REG_ITPR, iperiod);
+ if (!portal || iperiod > QMAN_ITP_MAX)
+ return -EINVAL;
+
+ qm_out(&portal->p, QM_REG_ITPR, iperiod);
+
+ return 0;
}
EXPORT_SYMBOL(qman_portal_set_iperiod);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 1fa840e3d930..b30af18516d6 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -600,7 +600,6 @@ int tegra_powergate_sequence_power_up(unsigned int id, struct clk *clk,
}
EXPORT_SYMBOL(tegra_powergate_sequence_power_up);
-#ifdef CONFIG_SMP
/**
* tegra_get_cpu_powergate_id() - convert from CPU ID to partition ID
* @cpuid: CPU partition ID
@@ -660,7 +659,6 @@ int tegra_pmc_cpu_remove_clamping(unsigned int cpuid)
return tegra_powergate_remove_clamping(id);
}
-#endif /* CONFIG_SMP */
static int tegra_pmc_restart_notify(struct notifier_block *this,
unsigned long action, void *data)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 7d3a5c94727e..9f89cb134549 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -91,6 +91,15 @@ config SPI_AT91_USART
This selects a driver for the AT91 USART Controller as SPI Master,
present on AT91 and SAMA5 SoC series.
+config SPI_ATMEL_QUADSPI
+ tristate "Atmel Quad SPI Controller"
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
+ depends on OF && HAS_IOMEM
+ help
+ This enables support for the Quad SPI controller in master mode.
+ This driver does not support generic SPI. The implementation only
+ supports spi-mem interface.
+
config SPI_AU1550
tristate "Au1550/Au1200/Au1300 SPI Controller"
depends on MIPS_ALCHEMY
@@ -397,6 +406,13 @@ config SPI_MT65XX
say Y or M here.If you are not sure, say N.
SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+config SPI_NPCM_PSPI
+ tristate "Nuvoton NPCM PSPI Controller"
+ depends on ARCH_NPCM || COMPILE_TEST
+ help
+ This driver provides support for Nuvoton NPCM BMC
+ Peripheral SPI controller in master mode.
+
config SPI_NUC900
tristate "Nuvoton NUC900 series SPI"
depends on ARCH_W90X900
@@ -435,7 +451,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
- depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
select SG_SPLIT
help
SPI master controller for OMAP24XX and later Multichannel SPI
@@ -684,6 +700,12 @@ config SPI_SUN6I
help
This enables using the SPI controller on the Allwinner A31 SoCs.
+config SPI_MXIC
+ tristate "Macronix MX25F0A SPI controller"
+ depends on SPI_MASTER
+ help
+ This selects the Macronix MX25F0A SPI controller driver.
+
config SPI_MXS
tristate "Freescale MXS SPI controller"
depends on ARCH_MXS
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 3575205c5c27..f29627040dfb 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
@@ -58,7 +59,9 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
+obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 820048726b4f..ddc712410812 100644
--- a/drivers/mtd/spi-nor/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -2,8 +2,10 @@
* Driver for Atmel QSPI Controller
*
* Copyright (C) 2015 Atmel Corporation
+ * Copyright (C) 2018 Cryptera A/S
*
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+ * Author: Piotr Bugalski <bugalski.piotr@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -27,14 +29,10 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/spi-nor.h>
-#include <linux/platform_data/atmel.h>
#include <linux/of.h>
#include <linux/io.h>
-#include <linux/gpio/consumer.h>
+#include <linux/spi/spi-mem.h>
/* QSPI register offsets */
#define QSPI_CR 0x0000 /* Control Register */
@@ -67,7 +65,7 @@
#define QSPI_CR_LASTXFER BIT(24)
/* Bitfields in QSPI_MR (Mode Register) */
-#define QSPI_MR_SSM BIT(0)
+#define QSPI_MR_SMM BIT(0)
#define QSPI_MR_LLB BIT(1)
#define QSPI_MR_WDRBT BIT(2)
#define QSPI_MR_SMRM BIT(3)
@@ -157,33 +155,24 @@ struct atmel_qspi {
struct clk *clk;
struct platform_device *pdev;
u32 pending;
-
- struct spi_nor nor;
- u32 clk_rate;
struct completion cmd_completion;
};
-struct atmel_qspi_command {
- union {
- struct {
- u32 instruction:1;
- u32 address:3;
- u32 mode:1;
- u32 dummy:1;
- u32 data:1;
- u32 reserved:25;
- } bits;
- u32 word;
- } enable;
- u8 instruction;
- u8 mode;
- u8 num_mode_cycles;
- u8 num_dummy_cycles;
- u32 address;
-
- size_t buf_len;
- const void *tx_buf;
- void *rx_buf;
+struct qspi_mode {
+ u8 cmd_buswidth;
+ u8 addr_buswidth;
+ u8 data_buswidth;
+ u32 config;
+};
+
+static const struct qspi_mode sama5d2_qspi_modes[] = {
+ { 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
+ { 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
+ { 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
+ { 1, 2, 2, QSPI_IFR_WIDTH_DUAL_IO },
+ { 1, 4, 4, QSPI_IFR_WIDTH_QUAD_IO },
+ { 2, 2, 2, QSPI_IFR_WIDTH_DUAL_CMD },
+ { 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
/* Register access functions */
@@ -197,246 +186,140 @@ static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
writel_relaxed(value, aq->regs + reg);
}
-static int atmel_qspi_run_transfer(struct atmel_qspi *aq,
- const struct atmel_qspi_command *cmd)
+static inline bool is_compatible(const struct spi_mem_op *op,
+ const struct qspi_mode *mode)
{
- void __iomem *ahb_mem;
-
- /* Then fallback to a PIO transfer (memcpy() DOES NOT work!) */
- ahb_mem = aq->mem;
- if (cmd->enable.bits.address)
- ahb_mem += cmd->address;
- if (cmd->tx_buf)
- _memcpy_toio(ahb_mem, cmd->tx_buf, cmd->buf_len);
- else
- _memcpy_fromio(cmd->rx_buf, ahb_mem, cmd->buf_len);
-
- return 0;
-}
+ if (op->cmd.buswidth != mode->cmd_buswidth)
+ return false;
-#ifdef DEBUG
-static void atmel_qspi_debug_command(struct atmel_qspi *aq,
- const struct atmel_qspi_command *cmd,
- u32 ifr)
-{
- u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
- size_t len = 0;
- int i;
+ if (op->addr.nbytes && op->addr.buswidth != mode->addr_buswidth)
+ return false;
- if (cmd->enable.bits.instruction)
- cmd_buf[len++] = cmd->instruction;
+ if (op->data.nbytes && op->data.buswidth != mode->data_buswidth)
+ return false;
- for (i = cmd->enable.bits.address-1; i >= 0; --i)
- cmd_buf[len++] = (cmd->address >> (i << 3)) & 0xff;
+ return true;
+}
- if (cmd->enable.bits.mode)
- cmd_buf[len++] = cmd->mode;
+static int find_mode(const struct spi_mem_op *op)
+{
+ u32 i;
- if (cmd->enable.bits.dummy) {
- int num = cmd->num_dummy_cycles;
+ for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++)
+ if (is_compatible(op, &sama5d2_qspi_modes[i]))
+ return i;
- switch (ifr & QSPI_IFR_WIDTH_MASK) {
- case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
- case QSPI_IFR_WIDTH_DUAL_OUTPUT:
- case QSPI_IFR_WIDTH_QUAD_OUTPUT:
- num >>= 3;
- break;
- case QSPI_IFR_WIDTH_DUAL_IO:
- case QSPI_IFR_WIDTH_DUAL_CMD:
- num >>= 2;
- break;
- case QSPI_IFR_WIDTH_QUAD_IO:
- case QSPI_IFR_WIDTH_QUAD_CMD:
- num >>= 1;
- break;
- default:
- return;
- }
+ return -1;
+}
- for (i = 0; i < num; ++i)
- cmd_buf[len++] = 0;
- }
+static bool atmel_qspi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (find_mode(op) < 0)
+ return false;
- /* Dump the SPI command */
- print_hex_dump(KERN_DEBUG, "qspi cmd: ", DUMP_PREFIX_NONE,
- 32, 1, cmd_buf, len, false);
+ /* special case not supported by hardware */
+ if (op->addr.nbytes == 2 && op->cmd.buswidth != op->addr.buswidth &&
+ op->dummy.nbytes == 0)
+ return false;
-#ifdef VERBOSE_DEBUG
- /* If verbose debug is enabled, also dump the TX data */
- if (cmd->enable.bits.data && cmd->tx_buf)
- print_hex_dump(KERN_DEBUG, "qspi tx : ", DUMP_PREFIX_NONE,
- 32, 1, cmd->tx_buf, cmd->buf_len, false);
-#endif
+ return true;
}
-#else
-#define atmel_qspi_debug_command(aq, cmd, ifr)
-#endif
-static int atmel_qspi_run_command(struct atmel_qspi *aq,
- const struct atmel_qspi_command *cmd,
- u32 ifr_tfrtyp, enum spi_nor_protocol proto)
+static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
+ int mode;
+ u32 dummy_cycles = 0;
u32 iar, icr, ifr, sr;
int err = 0;
iar = 0;
- icr = 0;
- ifr = ifr_tfrtyp;
+ icr = QSPI_ICR_INST(op->cmd.opcode);
+ ifr = QSPI_IFR_INSTEN;
- /* Set the SPI protocol */
- switch (proto) {
- case SNOR_PROTO_1_1_1:
- ifr |= QSPI_IFR_WIDTH_SINGLE_BIT_SPI;
- break;
+ qspi_writel(aq, QSPI_MR, QSPI_MR_SMM);
- case SNOR_PROTO_1_1_2:
- ifr |= QSPI_IFR_WIDTH_DUAL_OUTPUT;
- break;
+ mode = find_mode(op);
+ if (mode < 0)
+ return -ENOTSUPP;
- case SNOR_PROTO_1_1_4:
- ifr |= QSPI_IFR_WIDTH_QUAD_OUTPUT;
- break;
+ ifr |= sama5d2_qspi_modes[mode].config;
- case SNOR_PROTO_1_2_2:
- ifr |= QSPI_IFR_WIDTH_DUAL_IO;
- break;
+ if (op->dummy.buswidth && op->dummy.nbytes)
+ dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
- case SNOR_PROTO_1_4_4:
- ifr |= QSPI_IFR_WIDTH_QUAD_IO;
- break;
-
- case SNOR_PROTO_2_2_2:
- ifr |= QSPI_IFR_WIDTH_DUAL_CMD;
- break;
-
- case SNOR_PROTO_4_4_4:
- ifr |= QSPI_IFR_WIDTH_QUAD_CMD;
- break;
-
- default:
- return -EINVAL;
- }
-
- /* Compute instruction parameters */
- if (cmd->enable.bits.instruction) {
- icr |= QSPI_ICR_INST(cmd->instruction);
- ifr |= QSPI_IFR_INSTEN;
- }
-
- /* Compute address parameters */
- switch (cmd->enable.bits.address) {
- case 4:
- ifr |= QSPI_IFR_ADDRL;
- /* fall through to the 24bit (3 byte) address case. */
- case 3:
- iar = (cmd->enable.bits.data) ? 0 : cmd->address;
- ifr |= QSPI_IFR_ADDREN;
- break;
- case 0:
- break;
- default:
- return -EINVAL;
- }
-
- /* Compute option parameters */
- if (cmd->enable.bits.mode && cmd->num_mode_cycles) {
- u32 mode_cycle_bits, mode_bits;
-
- icr |= QSPI_ICR_OPT(cmd->mode);
- ifr |= QSPI_IFR_OPTEN;
-
- switch (ifr & QSPI_IFR_WIDTH_MASK) {
- case QSPI_IFR_WIDTH_SINGLE_BIT_SPI:
- case QSPI_IFR_WIDTH_DUAL_OUTPUT:
- case QSPI_IFR_WIDTH_QUAD_OUTPUT:
- mode_cycle_bits = 1;
- break;
- case QSPI_IFR_WIDTH_DUAL_IO:
- case QSPI_IFR_WIDTH_DUAL_CMD:
- mode_cycle_bits = 2;
+ if (op->addr.buswidth) {
+ switch (op->addr.nbytes) {
+ case 0:
break;
- case QSPI_IFR_WIDTH_QUAD_IO:
- case QSPI_IFR_WIDTH_QUAD_CMD:
- mode_cycle_bits = 4;
- break;
- default:
- return -EINVAL;
- }
-
- mode_bits = cmd->num_mode_cycles * mode_cycle_bits;
- switch (mode_bits) {
case 1:
- ifr |= QSPI_IFR_OPTL_1BIT;
+ ifr |= QSPI_IFR_OPTEN | QSPI_IFR_OPTL_8BIT;
+ icr |= QSPI_ICR_OPT(op->addr.val & 0xff);
break;
-
case 2:
- ifr |= QSPI_IFR_OPTL_2BIT;
+ if (dummy_cycles < 8 / op->addr.buswidth) {
+ ifr &= ~QSPI_IFR_INSTEN;
+ ifr |= QSPI_IFR_ADDREN;
+ iar = (op->cmd.opcode << 16) |
+ (op->addr.val & 0xffff);
+ } else {
+ ifr |= QSPI_IFR_ADDREN;
+ iar = (op->addr.val << 8) & 0xffffff;
+ dummy_cycles -= 8 / op->addr.buswidth;
+ }
break;
-
- case 4:
- ifr |= QSPI_IFR_OPTL_4BIT;
+ case 3:
+ ifr |= QSPI_IFR_ADDREN;
+ iar = op->addr.val & 0xffffff;
break;
-
- case 8:
- ifr |= QSPI_IFR_OPTL_8BIT;
+ case 4:
+ ifr |= QSPI_IFR_ADDREN | QSPI_IFR_ADDRL;
+ iar = op->addr.val & 0x7ffffff;
break;
-
default:
- return -EINVAL;
+ return -ENOTSUPP;
}
}
/* Set number of dummy cycles */
- if (cmd->enable.bits.dummy)
- ifr |= QSPI_IFR_NBDUM(cmd->num_dummy_cycles);
+ if (dummy_cycles)
+ ifr |= QSPI_IFR_NBDUM(dummy_cycles);
/* Set data enable */
- if (cmd->enable.bits.data) {
+ if (op->data.nbytes)
ifr |= QSPI_IFR_DATAEN;
- /* Special case for Continuous Read Mode */
- if (!cmd->tx_buf && !cmd->rx_buf)
- ifr |= QSPI_IFR_CRM;
- }
+ if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
+ ifr |= QSPI_IFR_TFRTYP_TRSFR_READ;
+ else
+ ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE;
/* Clear pending interrupts */
(void)qspi_readl(aq, QSPI_SR);
/* Set QSPI Instruction Frame registers */
- atmel_qspi_debug_command(aq, cmd, ifr);
qspi_writel(aq, QSPI_IAR, iar);
qspi_writel(aq, QSPI_ICR, icr);
qspi_writel(aq, QSPI_IFR, ifr);
/* Skip to the final steps if there is no data */
- if (!cmd->enable.bits.data)
- goto no_data;
-
- /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
- (void)qspi_readl(aq, QSPI_IFR);
-
- /* Stop here for continuous read */
- if (!cmd->tx_buf && !cmd->rx_buf)
- return 0;
- /* Send/Receive data */
- err = atmel_qspi_run_transfer(aq, cmd);
-
- /* Release the chip-select */
- qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
-
- if (err)
- return err;
+ if (op->data.nbytes) {
+ /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
+ (void)qspi_readl(aq, QSPI_IFR);
+
+ /* Send/Receive data */
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ _memcpy_fromio(op->data.buf.in,
+ aq->mem + iar, op->data.nbytes);
+ else
+ _memcpy_toio(aq->mem + iar,
+ op->data.buf.out, op->data.nbytes);
+
+ /* Release the chip-select */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
+ }
-#if defined(DEBUG) && defined(VERBOSE_DEBUG)
- /*
- * If verbose debug is enabled, also dump the RX data in addition to
- * the SPI command previously dumped by atmel_qspi_debug_command()
- */
- if (cmd->rx_buf)
- print_hex_dump(KERN_DEBUG, "qspi rx : ", DUMP_PREFIX_NONE,
- 32, 1, cmd->rx_buf, cmd->buf_len, false);
-#endif
-no_data:
/* Poll INSTRuction End status */
sr = qspi_readl(aq, QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
@@ -454,129 +337,50 @@ no_data:
return err;
}
-static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode,
- u8 *buf, int len)
-{
- struct atmel_qspi *aq = nor->priv;
- struct atmel_qspi_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.enable.bits.instruction = 1;
- cmd.enable.bits.data = 1;
- cmd.instruction = opcode;
- cmd.rx_buf = buf;
- cmd.buf_len = len;
- return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ,
- nor->reg_proto);
-}
-
-static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode,
- u8 *buf, int len)
-{
- struct atmel_qspi *aq = nor->priv;
- struct atmel_qspi_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.enable.bits.instruction = 1;
- cmd.enable.bits.data = (buf != NULL && len > 0);
- cmd.instruction = opcode;
- cmd.tx_buf = buf;
- cmd.buf_len = len;
- return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
- nor->reg_proto);
-}
-
-static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len,
- const u_char *write_buf)
+const char *atmel_qspi_get_name(struct spi_mem *spimem)
{
- struct atmel_qspi *aq = nor->priv;
- struct atmel_qspi_command cmd;
- ssize_t ret;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.enable.bits.instruction = 1;
- cmd.enable.bits.address = nor->addr_width;
- cmd.enable.bits.data = 1;
- cmd.instruction = nor->program_opcode;
- cmd.address = (u32)to;
- cmd.tx_buf = write_buf;
- cmd.buf_len = len;
- ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM,
- nor->write_proto);
- return (ret < 0) ? ret : len;
+ return dev_name(spimem->spi->dev.parent);
}
-static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs)
-{
- struct atmel_qspi *aq = nor->priv;
- struct atmel_qspi_command cmd;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.enable.bits.instruction = 1;
- cmd.enable.bits.address = nor->addr_width;
- cmd.instruction = nor->erase_opcode;
- cmd.address = (u32)offs;
- return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE,
- nor->reg_proto);
-}
-
-static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len,
- u_char *read_buf)
-{
- struct atmel_qspi *aq = nor->priv;
- struct atmel_qspi_command cmd;
- u8 num_mode_cycles, num_dummy_cycles;
- ssize_t ret;
-
- if (nor->read_dummy >= 2) {
- num_mode_cycles = 2;
- num_dummy_cycles = nor->read_dummy - 2;
- } else {
- num_mode_cycles = nor->read_dummy;
- num_dummy_cycles = 0;
- }
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.enable.bits.instruction = 1;
- cmd.enable.bits.address = nor->addr_width;
- cmd.enable.bits.mode = (num_mode_cycles > 0);
- cmd.enable.bits.dummy = (num_dummy_cycles > 0);
- cmd.enable.bits.data = 1;
- cmd.instruction = nor->read_opcode;
- cmd.address = (u32)from;
- cmd.mode = 0xff; /* This value prevents from entering the 0-4-4 mode */
- cmd.num_mode_cycles = num_mode_cycles;
- cmd.num_dummy_cycles = num_dummy_cycles;
- cmd.rx_buf = read_buf;
- cmd.buf_len = len;
- ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM,
- nor->read_proto);
- return (ret < 0) ? ret : len;
-}
+static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
+ .supports_op = atmel_qspi_supports_op,
+ .exec_op = atmel_qspi_exec_op,
+ .get_name = atmel_qspi_get_name
+};
-static int atmel_qspi_init(struct atmel_qspi *aq)
+static int atmel_qspi_setup(struct spi_device *spi)
{
+ struct spi_controller *ctrl = spi->master;
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
- u32 mr, scr, scbr;
+ u32 scr, scbr;
- /* Reset the QSPI controller */
- qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+ if (ctrl->busy)
+ return -EBUSY;
- /* Set the QSPI controller in Serial Memory Mode */
- mr = QSPI_MR_NBBITS(8) | QSPI_MR_SSM;
- qspi_writel(aq, QSPI_MR, mr);
+ if (!spi->max_speed_hz)
+ return -EINVAL;
src_rate = clk_get_rate(aq->clk);
if (!src_rate)
return -EINVAL;
/* Compute the QSPI baudrate */
- scbr = DIV_ROUND_UP(src_rate, aq->clk_rate);
+ scbr = DIV_ROUND_UP(src_rate, spi->max_speed_hz);
if (scbr > 0)
scbr--;
+
scr = QSPI_SCR_SCBR(scbr);
qspi_writel(aq, QSPI_SCR, scr);
+ return 0;
+}
+
+static int atmel_qspi_init(struct atmel_qspi *aq)
+{
+ /* Reset the QSPI controller */
+ qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
+
/* Enable the QSPI controller */
qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
@@ -604,38 +408,25 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
static int atmel_qspi_probe(struct platform_device *pdev)
{
- const struct spi_nor_hwcaps hwcaps = {
- .mask = SNOR_HWCAPS_READ |
- SNOR_HWCAPS_READ_FAST |
- SNOR_HWCAPS_READ_1_1_2 |
- SNOR_HWCAPS_READ_1_2_2 |
- SNOR_HWCAPS_READ_2_2_2 |
- SNOR_HWCAPS_READ_1_1_4 |
- SNOR_HWCAPS_READ_1_4_4 |
- SNOR_HWCAPS_READ_4_4_4 |
- SNOR_HWCAPS_PP |
- SNOR_HWCAPS_PP_1_1_4 |
- SNOR_HWCAPS_PP_1_4_4 |
- SNOR_HWCAPS_PP_4_4_4,
- };
- struct device_node *child, *np = pdev->dev.of_node;
+ struct spi_controller *ctrl;
struct atmel_qspi *aq;
struct resource *res;
- struct spi_nor *nor;
- struct mtd_info *mtd;
int irq, err = 0;
- if (of_get_child_count(np) != 1)
- return -ENODEV;
- child = of_get_next_child(np, NULL);
+ ctrl = spi_alloc_master(&pdev->dev, sizeof(*aq));
+ if (!ctrl)
+ return -ENOMEM;
- aq = devm_kzalloc(&pdev->dev, sizeof(*aq), GFP_KERNEL);
- if (!aq) {
- err = -ENOMEM;
- goto exit;
- }
+ ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
+ ctrl->setup = atmel_qspi_setup;
+ ctrl->bus_num = -1;
+ ctrl->mem_ops = &atmel_qspi_mem_ops;
+ ctrl->num_chipselect = 1;
+ ctrl->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, ctrl);
+
+ aq = spi_controller_get_devdata(ctrl);
- platform_set_drvdata(pdev, aq);
init_completion(&aq->cmd_completion);
aq->pdev = pdev;
@@ -684,54 +475,30 @@ static int atmel_qspi_probe(struct platform_device *pdev)
if (err)
goto disable_clk;
- /* Setup the spi-nor */
- nor = &aq->nor;
- mtd = &nor->mtd;
-
- nor->dev = &pdev->dev;
- spi_nor_set_flash_node(nor, child);
- nor->priv = aq;
- mtd->priv = nor;
-
- nor->read_reg = atmel_qspi_read_reg;
- nor->write_reg = atmel_qspi_write_reg;
- nor->read = atmel_qspi_read;
- nor->write = atmel_qspi_write;
- nor->erase = atmel_qspi_erase;
-
- err = of_property_read_u32(child, "spi-max-frequency", &aq->clk_rate);
- if (err < 0)
- goto disable_clk;
-
err = atmel_qspi_init(aq);
if (err)
goto disable_clk;
- err = spi_nor_scan(nor, NULL, &hwcaps);
+ err = spi_register_controller(ctrl);
if (err)
goto disable_clk;
- err = mtd_device_register(mtd, NULL, 0);
- if (err)
- goto disable_clk;
-
- of_node_put(child);
-
return 0;
disable_clk:
clk_disable_unprepare(aq->clk);
exit:
- of_node_put(child);
+ spi_controller_put(ctrl);
return err;
}
static int atmel_qspi_remove(struct platform_device *pdev)
{
- struct atmel_qspi *aq = platform_get_drvdata(pdev);
+ struct spi_controller *ctrl = platform_get_drvdata(pdev);
+ struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
- mtd_device_unregister(&aq->nor.mtd);
+ spi_unregister_controller(ctrl);
qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
clk_disable_unprepare(aq->clk);
return 0;
@@ -777,5 +544,6 @@ static struct platform_driver atmel_qspi_driver = {
module_platform_driver(atmel_qspi_driver);
MODULE_AUTHOR("Cyrille Pitchen <cyrille.pitchen@atmel.com>");
+MODULE_AUTHOR("Piotr Bugalski <bugalski.piotr@gmail.com");
MODULE_DESCRIPTION("Atmel QSPI Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index a924657642fa..a694d702e574 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -12,7 +12,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@@ -399,6 +401,59 @@ at91_usart_spi_probe_fail:
return ret;
}
+__maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_controller *ctlr = dev_get_drvdata(dev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ clk_disable_unprepare(aus->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+__maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(aus->clk);
+}
+
+__maybe_unused static int at91_usart_spi_suspend(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_controller_suspend(ctrl);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ at91_usart_spi_runtime_suspend(dev);
+
+ return 0;
+}
+
+__maybe_unused static int at91_usart_spi_resume(struct device *dev)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = at91_usart_spi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ at91_usart_spi_init(aus);
+
+ return spi_controller_resume(ctrl);
+}
+
static int at91_usart_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
@@ -409,6 +464,12 @@ static int at91_usart_spi_remove(struct platform_device *pdev)
return 0;
}
+static const struct dev_pm_ops at91_usart_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(at91_usart_spi_suspend, at91_usart_spi_resume)
+ SET_RUNTIME_PM_OPS(at91_usart_spi_runtime_suspend,
+ at91_usart_spi_runtime_resume, NULL)
+};
+
static const struct of_device_id at91_usart_spi_dt_ids[] = {
{ .compatible = "microchip,at91sam9g45-usart-spi"},
{ /* sentinel */}
@@ -419,6 +480,7 @@ MODULE_DEVICE_TABLE(of, at91_usart_spi_dt_ids);
static struct platform_driver at91_usart_spi_driver = {
.driver = {
.name = "at91_usart_spi",
+ .pm = &at91_usart_spi_pm_ops,
},
.probe = at91_usart_spi_probe,
.remove = at91_usart_spi_remove,
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index f35cc10772f6..35aebdfd3b4e 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -20,7 +20,6 @@
* GNU General Public License for more details.
*/
-#include <asm/page.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -72,6 +71,8 @@
#define BCM2835_SPI_CS_CS_10 0x00000002
#define BCM2835_SPI_CS_CS_01 0x00000001
+#define BCM2835_SPI_FIFO_SIZE 64
+#define BCM2835_SPI_FIFO_SIZE_3_4 48
#define BCM2835_SPI_POLLING_LIMIT_US 30
#define BCM2835_SPI_POLLING_JIFFIES 2
#define BCM2835_SPI_DMA_MIN_LENGTH 96
@@ -80,15 +81,36 @@
#define DRV_NAME "spi-bcm2835"
+/**
+ * struct bcm2835_spi - BCM2835 SPI controller
+ * @regs: base address of register map
+ * @clk: core clock, divided to calculate serial clock
+ * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full
+ * @tfr: SPI transfer currently processed
+ * @tx_buf: pointer whence next transmitted byte is read
+ * @rx_buf: pointer where next received byte is written
+ * @tx_len: remaining bytes to transmit
+ * @rx_len: remaining bytes to receive
+ * @tx_prologue: bytes transmitted without DMA if first TX sglist entry's
+ * length is not a multiple of 4 (to overcome hardware limitation)
+ * @rx_prologue: bytes received without DMA if first RX sglist entry's
+ * length is not a multiple of 4 (to overcome hardware limitation)
+ * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
+ * @dma_pending: whether a DMA transfer is in progress
+ */
struct bcm2835_spi {
void __iomem *regs;
struct clk *clk;
int irq;
+ struct spi_transfer *tfr;
const u8 *tx_buf;
u8 *rx_buf;
int tx_len;
int rx_len;
- bool dma_pending;
+ int tx_prologue;
+ int rx_prologue;
+ unsigned int tx_spillover;
+ unsigned int dma_pending;
};
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -126,6 +148,115 @@ static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
}
}
+/**
+ * bcm2835_rd_fifo_count() - blindly read exactly @count bytes from RX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes to read from RX FIFO
+ *
+ * The caller must ensure that @bs->rx_len is greater than or equal to @count,
+ * that the RX FIFO contains at least @count bytes and that the DMA Enable flag
+ * in the CS register is set (such that a read from the FIFO register receives
+ * 32-bit instead of just 8-bit). Moreover @bs->rx_buf must not be %NULL.
+ */
+static inline void bcm2835_rd_fifo_count(struct bcm2835_spi *bs, int count)
+{
+ u32 val;
+ int len;
+
+ bs->rx_len -= count;
+
+ while (count > 0) {
+ val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ len = min(count, 4);
+ memcpy(bs->rx_buf, &val, len);
+ bs->rx_buf += len;
+ count -= 4;
+ }
+}
+
+/**
+ * bcm2835_wr_fifo_count() - blindly write exactly @count bytes to TX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes to write to TX FIFO
+ *
+ * The caller must ensure that @bs->tx_len is greater than or equal to @count,
+ * that the TX FIFO can accommodate @count bytes and that the DMA Enable flag
+ * in the CS register is set (such that a write to the FIFO register transmits
+ * 32-bit instead of just 8-bit).
+ */
+static inline void bcm2835_wr_fifo_count(struct bcm2835_spi *bs, int count)
+{
+ u32 val;
+ int len;
+
+ bs->tx_len -= count;
+
+ while (count > 0) {
+ if (bs->tx_buf) {
+ len = min(count, 4);
+ memcpy(&val, bs->tx_buf, len);
+ bs->tx_buf += len;
+ } else {
+ val = 0;
+ }
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
+ count -= 4;
+ }
+}
+
+/**
+ * bcm2835_wait_tx_fifo_empty() - busy-wait for TX FIFO to empty
+ * @bs: BCM2835 SPI controller
+ *
+ * The caller must ensure that the RX FIFO can accommodate as many bytes
+ * as have been written to the TX FIFO: Transmission is halted once the
+ * RX FIFO is full, causing this function to spin forever.
+ */
+static inline void bcm2835_wait_tx_fifo_empty(struct bcm2835_spi *bs)
+{
+ while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
+ cpu_relax();
+}
+
+/**
+ * bcm2835_rd_fifo_blind() - blindly read up to @count bytes from RX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes available for reading in RX FIFO
+ */
+static inline void bcm2835_rd_fifo_blind(struct bcm2835_spi *bs, int count)
+{
+ u8 val;
+
+ count = min(count, bs->rx_len);
+ bs->rx_len -= count;
+
+ while (count) {
+ val = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ if (bs->rx_buf)
+ *bs->rx_buf++ = val;
+ count--;
+ }
+}
+
+/**
+ * bcm2835_wr_fifo_blind() - blindly write up to @count bytes to TX FIFO
+ * @bs: BCM2835 SPI controller
+ * @count: bytes available for writing in TX FIFO
+ */
+static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
+{
+ u8 val;
+
+ count = min(count, bs->tx_len);
+ bs->tx_len -= count;
+
+ while (count) {
+ val = bs->tx_buf ? *bs->tx_buf++ : 0;
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, val);
+ count--;
+ }
+}
+
static void bcm2835_spi_reset_hw(struct spi_master *master)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
@@ -149,14 +280,26 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
{
struct spi_master *master = dev_id;
struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /*
+ * An interrupt is signaled either if DONE is set (TX FIFO empty)
+ * or if RXR is set (RX FIFO >= ¾ full).
+ */
+ if (cs & BCM2835_SPI_CS_RXF)
+ bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
+ else if (cs & BCM2835_SPI_CS_RXR)
+ bcm2835_rd_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE_3_4);
+
+ if (bs->tx_len && cs & BCM2835_SPI_CS_DONE)
+ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
/* Read as many bytes as possible from FIFO */
bcm2835_rd_fifo(bs);
/* Write as many bytes as possible to FIFO */
bcm2835_wr_fifo(bs);
- /* based on flags decide if we can finish the transfer */
- if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+ if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(master);
/* wake up the framework */
@@ -169,32 +312,22 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr,
- u32 cs)
+ u32 cs, bool fifo_empty)
{
struct bcm2835_spi *bs = spi_master_get_devdata(master);
- /* fill in fifo if we have gpio-cs
- * note that there have been rare events where the native-CS
- * flapped for <1us which may change the behaviour
- * with gpio-cs this does not happen, so it is implemented
- * only for this case
- */
- if (gpio_is_valid(spi->cs_gpio)) {
- /* enable HW block, but without interrupts enabled
- * this would triggern an immediate interrupt
- */
- bcm2835_wr(bs, BCM2835_SPI_CS,
- cs | BCM2835_SPI_CS_TA);
- /* fill in tx fifo as much as possible */
- bcm2835_wr_fifo(bs);
- }
-
/*
- * Enable the HW block. This will immediately trigger a DONE (TX
- * empty) interrupt, upon which we will fill the TX FIFO with the
- * first TX bytes. Pre-filling the TX FIFO here to avoid the
- * interrupt doesn't work:-(
+ * Enable HW block, but with interrupts still disabled.
+ * Otherwise the empty TX FIFO would immediately trigger an interrupt.
*/
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
+
+ /* fill TX FIFO as much as possible */
+ if (fifo_empty)
+ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
+ bcm2835_wr_fifo(bs);
+
+ /* enable interrupts */
cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
@@ -211,15 +344,162 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
* the main one being that DMA transfers are limited to 16 bit
* (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
*
- * also we currently assume that the scatter-gather fragments are
- * all multiple of 4 (except the last) - otherwise we would need
- * to reset the FIFO before subsequent transfers...
- * this also means that tx/rx transfers sg's need to be of equal size!
- *
* there may be a few more border-cases we may need to address as well
* but unfortunately this would mean splitting up the scatter-gather
* list making it slightly unpractical...
*/
+
+/**
+ * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
+ * @master: SPI master
+ * @tfr: SPI transfer
+ * @bs: BCM2835 SPI controller
+ * @cs: CS register
+ *
+ * A limitation in DMA mode is that the FIFO must be accessed in 4 byte chunks.
+ * Only the final write access is permitted to transmit less than 4 bytes, the
+ * SPI controller deduces its intended size from the DLEN register.
+ *
+ * If a TX or RX sglist contains multiple entries, one per page, and the first
+ * entry starts in the middle of a page, that first entry's length may not be
+ * a multiple of 4. Subsequent entries are fine because they span an entire
+ * page, hence do have a length that's a multiple of 4.
+ *
+ * This cannot happen with kmalloc'ed buffers (which is what most clients use)
+ * because they are contiguous in physical memory and therefore not split on
+ * page boundaries by spi_map_buf(). But it *can* happen with vmalloc'ed
+ * buffers.
+ *
+ * The DMA engine is incapable of combining sglist entries into a continuous
+ * stream of 4 byte chunks, it treats every entry separately: A TX entry is
+ * rounded up a to a multiple of 4 bytes by transmitting surplus bytes, an RX
+ * entry is rounded up by throwing away received bytes.
+ *
+ * Overcome this limitation by transferring the first few bytes without DMA:
+ * E.g. if the first TX sglist entry's length is 23 and the first RX's is 42,
+ * write 3 bytes to the TX FIFO but read only 2 bytes from the RX FIFO.
+ * The residue of 1 byte in the RX FIFO is picked up by DMA. Together with
+ * the rest of the first RX sglist entry it makes up a multiple of 4 bytes.
+ *
+ * Should the RX prologue be larger, say, 3 vis-à-vis a TX prologue of 1,
+ * write 1 + 4 = 5 bytes to the TX FIFO and read 3 bytes from the RX FIFO.
+ * Caution, the additional 4 bytes spill over to the second TX sglist entry
+ * if the length of the first is *exactly* 1.
+ *
+ * At most 6 bytes are written and at most 3 bytes read. Do we know the
+ * transfer has this many bytes? Yes, see BCM2835_SPI_DMA_MIN_LENGTH.
+ *
+ * The FIFO is normally accessed with 8-bit width by the CPU and 32-bit width
+ * by the DMA engine. Toggling the DMA Enable flag in the CS register switches
+ * the width but also garbles the FIFO's contents. The prologue must therefore
+ * be transmitted in 32-bit width to ensure that the following DMA transfer can
+ * pick up the residue in the RX FIFO in ungarbled form.
+ */
+static void bcm2835_spi_transfer_prologue(struct spi_master *master,
+ struct spi_transfer *tfr,
+ struct bcm2835_spi *bs,
+ u32 cs)
+{
+ int tx_remaining;
+
+ bs->tfr = tfr;
+ bs->tx_prologue = 0;
+ bs->rx_prologue = 0;
+ bs->tx_spillover = false;
+
+ if (!sg_is_last(&tfr->tx_sg.sgl[0]))
+ bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
+
+ if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
+ bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
+
+ if (bs->rx_prologue > bs->tx_prologue) {
+ if (sg_is_last(&tfr->tx_sg.sgl[0])) {
+ bs->tx_prologue = bs->rx_prologue;
+ } else {
+ bs->tx_prologue += 4;
+ bs->tx_spillover =
+ !(sg_dma_len(&tfr->tx_sg.sgl[0]) & ~3);
+ }
+ }
+ }
+
+ /* rx_prologue > 0 implies tx_prologue > 0, so check only the latter */
+ if (!bs->tx_prologue)
+ return;
+
+ /* Write and read RX prologue. Adjust first entry in RX sglist. */
+ if (bs->rx_prologue) {
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->rx_prologue);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
+ | BCM2835_SPI_CS_DMAEN);
+ bcm2835_wr_fifo_count(bs, bs->rx_prologue);
+ bcm2835_wait_tx_fifo_empty(bs);
+ bcm2835_rd_fifo_count(bs, bs->rx_prologue);
+ bcm2835_spi_reset_hw(master);
+
+ dma_sync_single_for_device(master->dma_rx->device->dev,
+ sg_dma_address(&tfr->rx_sg.sgl[0]),
+ bs->rx_prologue, DMA_FROM_DEVICE);
+
+ sg_dma_address(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
+ sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
+ }
+
+ /*
+ * Write remaining TX prologue. Adjust first entry in TX sglist.
+ * Also adjust second entry if prologue spills over to it.
+ */
+ tx_remaining = bs->tx_prologue - bs->rx_prologue;
+ if (tx_remaining) {
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, tx_remaining);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA
+ | BCM2835_SPI_CS_DMAEN);
+ bcm2835_wr_fifo_count(bs, tx_remaining);
+ bcm2835_wait_tx_fifo_empty(bs);
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_CLEAR_TX);
+ }
+
+ if (likely(!bs->tx_spillover)) {
+ sg_dma_address(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
+ sg_dma_len(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
+ } else {
+ sg_dma_len(&tfr->tx_sg.sgl[0]) = 0;
+ sg_dma_address(&tfr->tx_sg.sgl[1]) += 4;
+ sg_dma_len(&tfr->tx_sg.sgl[1]) -= 4;
+ }
+}
+
+/**
+ * bcm2835_spi_undo_prologue() - reconstruct original sglist state
+ * @bs: BCM2835 SPI controller
+ *
+ * Undo changes which were made to an SPI transfer's sglist when transmitting
+ * the prologue. This is necessary to ensure the same memory ranges are
+ * unmapped that were originally mapped.
+ */
+static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
+{
+ struct spi_transfer *tfr = bs->tfr;
+
+ if (!bs->tx_prologue)
+ return;
+
+ if (bs->rx_prologue) {
+ sg_dma_address(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
+ sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
+ }
+
+ if (likely(!bs->tx_spillover)) {
+ sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
+ sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
+ } else {
+ sg_dma_len(&tfr->tx_sg.sgl[0]) = bs->tx_prologue - 4;
+ sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
+ sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
+ }
+}
+
static void bcm2835_spi_dma_done(void *data)
{
struct spi_master *master = data;
@@ -233,10 +513,10 @@ static void bcm2835_spi_dma_done(void *data)
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
- dmaengine_terminate_all(master->dma_tx);
-
- /* mark as no longer pending */
- bs->dma_pending = 0;
+ if (cmpxchg(&bs->dma_pending, true, false)) {
+ dmaengine_terminate_async(master->dma_tx);
+ bcm2835_spi_undo_prologue(bs);
+ }
/* and mark as completed */;
complete(&master->xfer_completion);
@@ -286,20 +566,6 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
return dma_submit_error(cookie);
}
-static inline int bcm2835_check_sg_length(struct sg_table *sgt)
-{
- int i;
- struct scatterlist *sgl;
-
- /* check that the sg entries are word-sized (except for last) */
- for_each_sg(sgt->sgl, sgl, (int)sgt->nents - 1, i) {
- if (sg_dma_len(sgl) % 4)
- return -EFAULT;
- }
-
- return 0;
-}
-
static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr,
@@ -308,18 +574,16 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
struct bcm2835_spi *bs = spi_master_get_devdata(master);
int ret;
- /* check that the scatter gather segments are all a multiple of 4 */
- if (bcm2835_check_sg_length(&tfr->tx_sg) ||
- bcm2835_check_sg_length(&tfr->rx_sg)) {
- dev_warn_once(&spi->dev,
- "scatter gather segment length is not a multiple of 4 - falling back to interrupt mode\n");
- return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
- }
+ /*
+ * Transfer first few bytes without DMA if length of first TX or RX
+ * sglist entry is not a multiple of 4 bytes (hardware limitation).
+ */
+ bcm2835_spi_transfer_prologue(master, tfr, bs, cs);
/* setup tx-DMA */
ret = bcm2835_spi_prepare_sg(master, tfr, true);
if (ret)
- return ret;
+ goto err_reset_hw;
/* start TX early */
dma_async_issue_pending(master->dma_tx);
@@ -328,7 +592,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
bs->dma_pending = 1;
/* set the DMA length */
- bcm2835_wr(bs, BCM2835_SPI_DLEN, tfr->len);
+ bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
/* start the HW */
bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -341,9 +605,9 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
ret = bcm2835_spi_prepare_sg(master, tfr, false);
if (ret) {
/* need to reset on errors */
- dmaengine_terminate_all(master->dma_tx);
- bcm2835_spi_reset_hw(master);
- return ret;
+ dmaengine_terminate_sync(master->dma_tx);
+ bs->dma_pending = false;
+ goto err_reset_hw;
}
/* start rx dma late */
@@ -351,16 +615,17 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
/* wait for wakeup in framework */
return 1;
+
+err_reset_hw:
+ bcm2835_spi_reset_hw(master);
+ bcm2835_spi_undo_prologue(bs);
+ return ret;
}
static bool bcm2835_spi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *tfr)
{
- /* only run for gpio_cs */
- if (!gpio_is_valid(spi->cs_gpio))
- return false;
-
/* we start DMA efforts only on bigger transfers */
if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
return false;
@@ -378,25 +643,6 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
return false;
}
- /* if we run rx/tx_buf with word aligned addresses then we are OK */
- if ((((size_t)tfr->rx_buf & 3) == 0) &&
- (((size_t)tfr->tx_buf & 3) == 0))
- return true;
-
- /* otherwise we only allow transfers within the same page
- * to avoid wasting time on dma_mapping when it is not practical
- */
- if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
- dev_warn_once(&spi->dev,
- "Unaligned spi tx-transfer bridging page\n");
- return false;
- }
- if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
- dev_warn_once(&spi->dev,
- "Unaligned spi rx-transfer bridging page\n");
- return false;
- }
-
/* return OK */
return true;
}
@@ -404,12 +650,12 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
static void bcm2835_dma_release(struct spi_master *master)
{
if (master->dma_tx) {
- dmaengine_terminate_all(master->dma_tx);
+ dmaengine_terminate_sync(master->dma_tx);
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
}
if (master->dma_rx) {
- dmaengine_terminate_all(master->dma_rx);
+ dmaengine_terminate_sync(master->dma_rx);
dma_release_channel(master->dma_rx);
master->dma_rx = NULL;
}
@@ -492,7 +738,7 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
* if we are interrupted here, then the data is
* getting transferred by the HW while we are interrupted
*/
- bcm2835_wr_fifo(bs);
+ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
/* set the timeout */
timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES;
@@ -515,7 +761,7 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
bs->tx_len, bs->rx_len);
/* fall back to interrupt mode */
return bcm2835_spi_transfer_one_irq(master, spi,
- tfr, cs);
+ tfr, cs, false);
}
}
@@ -560,12 +806,12 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
else
cs &= ~BCM2835_SPI_CS_REN;
- /* for gpio_cs set dummy CS so that no HW-CS get changed
- * we can not run this in bcm2835_spi_set_cs, as it does
- * not get called for cs_gpio cases, so we need to do it here
+ /*
+ * The driver always uses software-controlled GPIO Chip Select.
+ * Set the hardware-controlled native Chip Select to an invalid
+ * value to prevent it from interfering.
*/
- if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS))
- cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
/* set transmit buffers and length */
bs->tx_buf = tfr->tx_buf;
@@ -589,7 +835,7 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
/* run in interrupt-mode */
- return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
+ return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs, true);
}
static int bcm2835_spi_prepare_message(struct spi_master *master,
@@ -617,68 +863,15 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* if an error occurred and we have an active dma, then terminate */
- if (bs->dma_pending) {
- dmaengine_terminate_all(master->dma_tx);
- dmaengine_terminate_all(master->dma_rx);
- bs->dma_pending = 0;
+ if (cmpxchg(&bs->dma_pending, true, false)) {
+ dmaengine_terminate_sync(master->dma_tx);
+ dmaengine_terminate_sync(master->dma_rx);
+ bcm2835_spi_undo_prologue(bs);
}
/* and reset */
bcm2835_spi_reset_hw(master);
}
-static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level)
-{
- /*
- * we can assume that we are "native" as per spi_set_cs
- * calling us ONLY when cs_gpio is not set
- * we can also assume that we are CS < 3 as per bcm2835_spi_setup
- * we would not get called because of error handling there.
- * the level passed is the electrical level not enabled/disabled
- * so it has to get translated back to enable/disable
- * see spi_set_cs in spi.c for the implementation
- */
-
- struct spi_master *master = spi->master;
- struct bcm2835_spi *bs = spi_master_get_devdata(master);
- u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
- bool enable;
-
- /* calculate the enable flag from the passed gpio_level */
- enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level;
-
- /* set flags for "reverse" polarity in the registers */
- if (spi->mode & SPI_CS_HIGH) {
- /* set the correct CS-bits */
- cs |= BCM2835_SPI_CS_CSPOL;
- cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
- } else {
- /* clean the CS-bits */
- cs &= ~BCM2835_SPI_CS_CSPOL;
- cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select);
- }
-
- /* select the correct chip_select depending on disabled/enabled */
- if (enable) {
- /* set cs correctly */
- if (spi->mode & SPI_NO_CS) {
- /* use the "undefined" chip-select */
- cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
- } else {
- /* set the chip select */
- cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
- cs |= spi->chip_select;
- }
- } else {
- /* disable CSPOL which puts HW-CS into deselected state */
- cs &= ~BCM2835_SPI_CS_CSPOL;
- /* use the "undefined" chip-select as precaution */
- cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
- }
-
- /* finally set the calculated flags in SPI_CS */
- bcm2835_wr(bs, BCM2835_SPI_CS, cs);
-}
-
static int chip_match_name(struct gpio_chip *chip, void *data)
{
return !strcmp(chip->label, data);
@@ -750,7 +943,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->num_chipselect = 3;
master->setup = bcm2835_spi_setup;
- master->set_cs = bcm2835_spi_set_cs;
master->transfer_one = bcm2835_spi_transfer_one;
master->handle_err = bcm2835_spi_handle_err;
master->prepare_message = bcm2835_spi_prepare_message;
@@ -843,4 +1035,4 @@ module_platform_driver(bcm2835_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 3094d818cf06..671e374e1b01 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -542,4 +542,4 @@ module_platform_driver(bcm2835aux_spi_driver);
MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835 aux");
MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 3ffb6a40fe0c..d0dd7814e997 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
+#include <linux/acpi.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -243,12 +244,19 @@ static const struct of_device_id dw_spi_mmio_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
+static const struct acpi_device_id dw_spi_mmio_acpi_match[] = {
+ {"HISI0173", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
+
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
.remove = dw_spi_mmio_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = dw_spi_mmio_of_match,
+ .acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match),
},
};
module_platform_driver(dw_spi_mmio_driver);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b705f2bdb8b9..2e822a56576a 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -507,6 +507,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->handle_err = dw_spi_handle_err;
master->max_speed_hz = dws->max_freq;
master->dev.of_node = dev->of_node;
+ master->dev.fwnode = dev->fwnode;
master->flags = SPI_MASTER_GPIO_SS;
if (dws->set_cs)
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 3082e72e4f6c..5e10dc5c93a5 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1090,8 +1090,8 @@ static int dspi_probe(struct platform_device *pdev)
goto out_clk_put;
}
- ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
- pdev->name, dspi);
+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
+ IRQF_SHARED, pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_clk_put;
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 51670976faa3..08dcc3c22e88 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -3,6 +3,7 @@
// Freescale i.MX7ULP LPSPI driver
//
// Copyright 2016 Freescale Semiconductor, Inc.
+// Copyright 2018 NXP Semiconductors
#include <linux/clk.h>
#include <linux/completion.h>
@@ -54,6 +55,7 @@
#define IER_RDIE BIT(1)
#define IER_TDIE BIT(0)
#define CFGR1_PCSCFG BIT(27)
+#define CFGR1_PINCFG (BIT(24)|BIT(25))
#define CFGR1_PCSPOL BIT(8)
#define CFGR1_NOSTALL BIT(3)
#define CFGR1_MASTER BIT(0)
@@ -79,6 +81,7 @@ struct fsl_lpspi_data {
struct device *dev;
void __iomem *base;
struct clk *clk;
+ bool is_slave;
void *rx_buf;
const void *tx_buf;
@@ -86,11 +89,14 @@ struct fsl_lpspi_data {
void (*rx)(struct fsl_lpspi_data *);
u32 remain;
+ u8 watermark;
u8 txfifosize;
u8 rxfifosize;
struct lpspi_config config;
struct completion xfer_done;
+
+ bool slave_aborted;
};
static const struct of_device_id fsl_lpspi_dt_ids[] = {
@@ -137,16 +143,18 @@ static void fsl_lpspi_intctrl(struct fsl_lpspi_data *fsl_lpspi,
writel(enable, fsl_lpspi->base + IMX7ULP_IER);
}
-static int lpspi_prepare_xfer_hardware(struct spi_master *master)
+static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
{
- struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
return clk_prepare_enable(fsl_lpspi->clk);
}
-static int lpspi_unprepare_xfer_hardware(struct spi_master *master)
+static int lpspi_unprepare_xfer_hardware(struct spi_controller *controller)
{
- struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
clk_disable_unprepare(fsl_lpspi->clk);
@@ -203,21 +211,22 @@ static void fsl_lpspi_set_cmd(struct fsl_lpspi_data *fsl_lpspi,
u32 temp = 0;
temp |= fsl_lpspi->config.bpw - 1;
- temp |= fsl_lpspi->config.prescale << 27;
temp |= (fsl_lpspi->config.mode & 0x3) << 30;
- temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
-
- /*
- * Set TCR_CONT will keep SS asserted after current transfer.
- * For the first transfer, clear TCR_CONTC to assert SS.
- * For subsequent transfer, set TCR_CONTC to keep SS asserted.
- */
- temp |= TCR_CONT;
- if (is_first_xfer)
- temp &= ~TCR_CONTC;
- else
- temp |= TCR_CONTC;
-
+ if (!fsl_lpspi->is_slave) {
+ temp |= fsl_lpspi->config.prescale << 27;
+ temp |= (fsl_lpspi->config.chip_select & 0x3) << 24;
+
+ /*
+ * Set TCR_CONT will keep SS asserted after current transfer.
+ * For the first transfer, clear TCR_CONTC to assert SS.
+ * For subsequent transfer, set TCR_CONTC to keep SS asserted.
+ */
+ temp |= TCR_CONT;
+ if (is_first_xfer)
+ temp &= ~TCR_CONTC;
+ else
+ temp |= TCR_CONTC;
+ }
writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
dev_dbg(fsl_lpspi->dev, "TCR=0x%x\n", temp);
@@ -227,7 +236,7 @@ static void fsl_lpspi_set_watermark(struct fsl_lpspi_data *fsl_lpspi)
{
u32 temp;
- temp = fsl_lpspi->txfifosize >> 1 | (fsl_lpspi->rxfifosize >> 1) << 16;
+ temp = fsl_lpspi->watermark >> 1 | (fsl_lpspi->watermark >> 1) << 16;
writel(temp, fsl_lpspi->base + IMX7ULP_FCR);
@@ -253,7 +262,8 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
if (prescale == 8 && scldiv >= 256)
return -EINVAL;
- writel(scldiv, fsl_lpspi->base + IMX7ULP_CCR);
+ writel(scldiv | (scldiv << 8) | ((scldiv >> 1) << 16),
+ fsl_lpspi->base + IMX7ULP_CCR);
dev_dbg(fsl_lpspi->dev, "perclk=%d, speed=%d, prescale =%d, scldiv=%d\n",
perclk_rate, config.speed_hz, prescale, scldiv);
@@ -270,13 +280,18 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
writel(temp, fsl_lpspi->base + IMX7ULP_CR);
writel(0, fsl_lpspi->base + IMX7ULP_CR);
- ret = fsl_lpspi_set_bitrate(fsl_lpspi);
- if (ret)
- return ret;
+ if (!fsl_lpspi->is_slave) {
+ ret = fsl_lpspi_set_bitrate(fsl_lpspi);
+ if (ret)
+ return ret;
+ }
fsl_lpspi_set_watermark(fsl_lpspi);
- temp = CFGR1_PCSCFG | CFGR1_MASTER;
+ if (!fsl_lpspi->is_slave)
+ temp = CFGR1_MASTER;
+ else
+ temp = CFGR1_PINCFG;
if (fsl_lpspi->config.mode & SPI_CS_HIGH)
temp |= CFGR1_PCSPOL;
writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);
@@ -291,7 +306,8 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
static void fsl_lpspi_setup_transfer(struct spi_device *spi,
struct spi_transfer *t)
{
- struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(spi->master);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(spi->controller);
fsl_lpspi->config.mode = spi->mode;
fsl_lpspi->config.bpw = t ? t->bits_per_word : spi->bits_per_word;
@@ -315,14 +331,51 @@ static void fsl_lpspi_setup_transfer(struct spi_device *spi,
fsl_lpspi->tx = fsl_lpspi_buf_tx_u32;
}
+ if (t->len <= fsl_lpspi->txfifosize)
+ fsl_lpspi->watermark = t->len;
+ else
+ fsl_lpspi->watermark = fsl_lpspi->txfifosize;
+
fsl_lpspi_config(fsl_lpspi);
}
-static int fsl_lpspi_transfer_one(struct spi_master *master,
+static int fsl_lpspi_slave_abort(struct spi_controller *controller)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ fsl_lpspi->slave_aborted = true;
+ complete(&fsl_lpspi->xfer_done);
+ return 0;
+}
+
+static int fsl_lpspi_wait_for_completion(struct spi_controller *controller)
+{
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
+
+ if (fsl_lpspi->is_slave) {
+ if (wait_for_completion_interruptible(&fsl_lpspi->xfer_done) ||
+ fsl_lpspi->slave_aborted) {
+ dev_dbg(fsl_lpspi->dev, "interrupted\n");
+ return -EINTR;
+ }
+ } else {
+ if (!wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ)) {
+ dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static int fsl_lpspi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *t)
{
- struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
int ret;
fsl_lpspi->tx_buf = t->tx_buf;
@@ -330,13 +383,13 @@ static int fsl_lpspi_transfer_one(struct spi_master *master,
fsl_lpspi->remain = t->len;
reinit_completion(&fsl_lpspi->xfer_done);
+ fsl_lpspi->slave_aborted = false;
+
fsl_lpspi_write_tx_fifo(fsl_lpspi);
- ret = wait_for_completion_timeout(&fsl_lpspi->xfer_done, HZ);
- if (!ret) {
- dev_dbg(fsl_lpspi->dev, "wait for completion timeout\n");
- return -ETIMEDOUT;
- }
+ ret = fsl_lpspi_wait_for_completion(controller);
+ if (ret)
+ return ret;
ret = fsl_lpspi_txfifo_empty(fsl_lpspi);
if (ret)
@@ -347,10 +400,11 @@ static int fsl_lpspi_transfer_one(struct spi_master *master,
return 0;
}
-static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
+static int fsl_lpspi_transfer_one_msg(struct spi_controller *controller,
struct spi_message *msg)
{
- struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
struct spi_device *spi = msg->spi;
struct spi_transfer *xfer;
bool is_first_xfer = true;
@@ -366,7 +420,7 @@ static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
is_first_xfer = false;
- ret = fsl_lpspi_transfer_one(master, spi, xfer);
+ ret = fsl_lpspi_transfer_one(controller, spi, xfer);
if (ret < 0)
goto complete;
@@ -374,13 +428,15 @@ static int fsl_lpspi_transfer_one_msg(struct spi_master *master,
}
complete:
- /* de-assert SS, then finalize current message */
- temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
- temp &= ~TCR_CONTC;
- writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+ if (!fsl_lpspi->is_slave) {
+ /* de-assert SS, then finalize current message */
+ temp = readl(fsl_lpspi->base + IMX7ULP_TCR);
+ temp &= ~TCR_CONTC;
+ writel(temp, fsl_lpspi->base + IMX7ULP_TCR);
+ }
msg->status = ret;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(controller);
return ret;
}
@@ -410,30 +466,39 @@ static irqreturn_t fsl_lpspi_isr(int irq, void *dev_id)
static int fsl_lpspi_probe(struct platform_device *pdev)
{
struct fsl_lpspi_data *fsl_lpspi;
- struct spi_master *master;
+ struct spi_controller *controller;
struct resource *res;
int ret, irq;
u32 temp;
- master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_lpspi_data));
- if (!master)
+ if (of_property_read_bool((&pdev->dev)->of_node, "spi-slave"))
+ controller = spi_alloc_slave(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
+ else
+ controller = spi_alloc_master(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
+
+ if (!controller)
return -ENOMEM;
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, controller);
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
- master->bus_num = pdev->id;
+ controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ controller->bus_num = pdev->id;
- fsl_lpspi = spi_master_get_devdata(master);
+ fsl_lpspi = spi_controller_get_devdata(controller);
fsl_lpspi->dev = &pdev->dev;
-
- master->transfer_one_message = fsl_lpspi_transfer_one_msg;
- master->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
- master->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
+ fsl_lpspi->is_slave = of_property_read_bool((&pdev->dev)->of_node,
+ "spi-slave");
+
+ controller->transfer_one_message = fsl_lpspi_transfer_one_msg;
+ controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware;
+ controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware;
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->dev.of_node = pdev->dev.of_node;
+ controller->bus_num = pdev->id;
+ controller->slave_abort = fsl_lpspi_slave_abort;
init_completion(&fsl_lpspi->xfer_done);
@@ -441,32 +506,32 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
fsl_lpspi->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(fsl_lpspi->base)) {
ret = PTR_ERR(fsl_lpspi->base);
- goto out_master_put;
+ goto out_controller_put;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto out_master_put;
+ goto out_controller_put;
}
ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0,
dev_name(&pdev->dev), fsl_lpspi);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
- goto out_master_put;
+ goto out_controller_put;
}
fsl_lpspi->clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(fsl_lpspi->clk)) {
ret = PTR_ERR(fsl_lpspi->clk);
- goto out_master_put;
+ goto out_controller_put;
}
ret = clk_prepare_enable(fsl_lpspi->clk);
if (ret) {
dev_err(&pdev->dev, "can't enable lpspi clock, ret=%d\n", ret);
- goto out_master_put;
+ goto out_controller_put;
}
temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
@@ -475,24 +540,25 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
clk_disable_unprepare(fsl_lpspi->clk);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_master error.\n");
- goto out_master_put;
+ dev_err(&pdev->dev, "spi_register_controller error.\n");
+ goto out_controller_put;
}
return 0;
-out_master_put:
- spi_master_put(master);
+out_controller_put:
+ spi_controller_put(controller);
return ret;
}
static int fsl_lpspi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct fsl_lpspi_data *fsl_lpspi = spi_master_get_devdata(master);
+ struct spi_controller *controller = platform_get_drvdata(pdev);
+ struct fsl_lpspi_data *fsl_lpspi =
+ spi_controller_get_devdata(controller);
clk_disable_unprepare(fsl_lpspi->clk);
@@ -509,6 +575,6 @@ static struct platform_driver fsl_lpspi_driver = {
};
module_platform_driver(fsl_lpspi_driver);
-MODULE_DESCRIPTION("LPSPI Master Controller driver");
+MODULE_DESCRIPTION("LPSPI Controller driver");
MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 6432ecc4e2ca..fdb7cb88fb56 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -64,15 +64,13 @@
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
-/* SPI M_COMMAND OPCODE */
-enum spi_mcmd_code {
+enum spi_m_cmd_opcode {
CMD_NONE,
CMD_XFER,
CMD_CS,
CMD_CANCEL,
};
-
struct spi_geni_master {
struct geni_se se;
struct device *dev;
@@ -87,7 +85,7 @@ struct spi_geni_master {
struct completion xfer_done;
unsigned int oversampling;
spinlock_t lock;
- unsigned int cur_mcmd;
+ enum spi_m_cmd_opcode cur_mcmd;
int irq;
};
@@ -129,7 +127,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
struct spi_master *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
- unsigned long timeout;
+ unsigned long time_left;
reinit_completion(&mas->xfer_done);
pm_runtime_get_sync(mas->dev);
@@ -142,8 +140,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
else
geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
- timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
- if (!timeout)
+ time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
+ if (!time_left)
handle_fifo_timeout(spi, NULL);
pm_runtime_put(mas->dev);
@@ -485,7 +483,6 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
struct geni_se *se = &mas->se;
u32 m_irq;
unsigned long flags;
- irqreturn_t ret = IRQ_HANDLED;
if (mas->cur_mcmd == CMD_NONE)
return IRQ_NONE;
@@ -533,16 +530,35 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
spin_unlock_irqrestore(&mas->lock, flags);
- return ret;
+ return IRQ_HANDLED;
}
static int spi_geni_probe(struct platform_device *pdev)
{
- int ret;
+ int ret, irq;
struct spi_master *spi;
struct spi_geni_master *mas;
struct resource *res;
- struct geni_se *se;
+ void __iomem *base;
+ struct clk *clk;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Err getting IRQ %d\n", irq);
+ return irq;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(&pdev->dev, "se");
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Err getting SE Core clk %ld\n",
+ PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
if (!spi)
@@ -550,27 +566,15 @@ static int spi_geni_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, spi);
mas = spi_master_get_devdata(spi);
+ mas->irq = irq;
mas->dev = &pdev->dev;
mas->se.dev = &pdev->dev;
mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
- se = &mas->se;
+ mas->se.base = base;
+ mas->se.clk = clk;
spi->bus_num = -1;
spi->dev.of_node = pdev->dev.of_node;
- mas->se.clk = devm_clk_get(&pdev->dev, "se");
- if (IS_ERR(mas->se.clk)) {
- ret = PTR_ERR(mas->se.clk);
- dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
- goto spi_geni_probe_err;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- se->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(se->base)) {
- ret = PTR_ERR(se->base);
- goto spi_geni_probe_err;
- }
-
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
@@ -589,13 +593,6 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
goto spi_geni_probe_runtime_disable;
- mas->irq = platform_get_irq(pdev, 0);
- if (mas->irq < 0) {
- ret = mas->irq;
- dev_err(&pdev->dev, "Err getting IRQ %d\n", ret);
- goto spi_geni_probe_runtime_disable;
- }
-
ret = request_irq(mas->irq, geni_spi_isr,
IRQF_TRIGGER_HIGH, "spi_geni", spi);
if (ret)
@@ -610,7 +607,6 @@ spi_geni_probe_free_irq:
free_irq(mas->irq, spi);
spi_geni_probe_runtime_disable:
pm_runtime_disable(&pdev->dev);
-spi_geni_probe_err:
spi_master_put(spi);
return ret;
}
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 45973ee3ae11..a4aee26028cd 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -256,11 +256,29 @@ static int spi_gpio_setup(struct spi_device *spi)
static int spi_gpio_set_direction(struct spi_device *spi, bool output)
{
struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+ int ret;
if (output)
return gpiod_direction_output(spi_gpio->mosi, 1);
- else
- return gpiod_direction_input(spi_gpio->mosi);
+
+ ret = gpiod_direction_input(spi_gpio->mosi);
+ if (ret)
+ return ret;
+ /*
+ * Send a turnaround high impedance cycle when switching
+ * from output to input. Theoretically there should be
+ * a clock delay here, but as has been noted above, the
+ * nsec delay function for bit-banged GPIO is simply
+ * {} because bit-banging just doesn't get fast enough
+ * anyway.
+ */
+ if (spi->mode & SPI_3WIRE_HIZ) {
+ gpiod_set_value_cansleep(spi_gpio->sck,
+ !(spi->mode & SPI_CPOL));
+ gpiod_set_value_cansleep(spi_gpio->sck,
+ !!(spi->mode & SPI_CPOL));
+ }
+ return 0;
}
static void spi_gpio_cleanup(struct spi_device *spi)
@@ -410,7 +428,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
return status;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
+ master->mode_bits = SPI_3WIRE | SPI_3WIRE_HIZ | SPI_CPHA | SPI_CPOL;
master->flags = master_flags;
master->bus_num = pdev->id;
/* The master needs to think there is a chipselect even if not connected */
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index dd1ce12aa386..6ec647bbba77 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -39,8 +39,8 @@
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */
-/* The maximum bytes that a sdma BD can transfer.*/
-#define MAX_SDMA_BD_BYTES (1 << 15)
+/* The maximum bytes that a sdma BD can transfer. */
+#define MAX_SDMA_BD_BYTES (1 << 15)
#define MX51_ECSPI_CTRL_MAX_BURST 512
/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/
#define MX53_MAX_TRANSFER_BYTES 512
@@ -59,7 +59,9 @@ struct spi_imx_data;
struct spi_imx_devtype_data {
void (*intctrl)(struct spi_imx_data *, int);
- int (*config)(struct spi_device *);
+ int (*prepare_message)(struct spi_imx_data *, struct spi_message *);
+ int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *,
+ struct spi_transfer *);
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
@@ -85,7 +87,6 @@ struct spi_imx_data {
unsigned long spi_clk;
unsigned int spi_bus_clk;
- unsigned int speed_hz;
unsigned int bits_per_word;
unsigned int spi_drctl;
@@ -256,7 +257,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
#define MX51_ECSPI_INT_RREN (1 << 3)
#define MX51_ECSPI_INT_RDREN (1 << 4)
-#define MX51_ECSPI_DMA 0x14
+#define MX51_ECSPI_DMA 0x14
#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
@@ -486,11 +487,12 @@ static void mx51_ecspi_disable(struct spi_imx_data *spi_imx)
writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
}
-static int mx51_ecspi_config(struct spi_device *spi)
+static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ struct spi_device *spi = msg->spi;
u32 ctrl = MX51_ECSPI_CTRL_ENABLE;
- u32 clk = spi_imx->speed_hz, delay, reg;
+ u32 testreg;
u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG);
/* set Master or Slave mode */
@@ -505,19 +507,21 @@ static int mx51_ecspi_config(struct spi_device *spi)
if (spi->mode & SPI_READY)
ctrl |= MX51_ECSPI_CTRL_DRCTL(spi_imx->spi_drctl);
- /* set clock speed */
- ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->speed_hz, &clk);
- spi_imx->spi_bus_clk = clk;
-
/* set chip select to use */
ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select);
- if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
- ctrl |= (spi_imx->slave_burst * 8 - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
+ /*
+ * The ctrl register must be written first, with the EN bit set other
+ * registers must not be written to.
+ */
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+
+ testreg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
+ if (spi->mode & SPI_LOOP)
+ testreg |= MX51_ECSPI_TESTREG_LBC;
else
- ctrl |= (spi_imx->bits_per_word - 1)
- << MX51_ECSPI_CTRL_BL_OFFSET;
+ testreg &= ~MX51_ECSPI_TESTREG_LBC;
+ writel(testreg, spi_imx->base + MX51_ECSPI_TESTREG);
/*
* eCSPI burst completion by Chip Select signal in Slave mode
@@ -541,25 +545,43 @@ static int mx51_ecspi_config(struct spi_device *spi)
cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(spi->chip_select);
cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(spi->chip_select);
}
+
if (spi->mode & SPI_CS_HIGH)
cfg |= MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
else
cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(spi->chip_select);
- if (spi_imx->usedma)
- ctrl |= MX51_ECSPI_CTRL_SMC;
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
- /* CTRL register always go first to bring out controller from reset */
- writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ return 0;
+}
- reg = readl(spi_imx->base + MX51_ECSPI_TESTREG);
- if (spi->mode & SPI_LOOP)
- reg |= MX51_ECSPI_TESTREG_LBC;
+static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ u32 clk = t->speed_hz, delay;
+
+ /* Clear BL field and set the right value */
+ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK;
+ if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx))
+ ctrl |= (spi_imx->slave_burst * 8 - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
else
- reg &= ~MX51_ECSPI_TESTREG_LBC;
- writel(reg, spi_imx->base + MX51_ECSPI_TESTREG);
+ ctrl |= (spi_imx->bits_per_word - 1)
+ << MX51_ECSPI_CTRL_BL_OFFSET;
- writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+ /* set clock speed */
+ ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET |
+ 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx, t->speed_hz, &clk);
+ spi_imx->spi_bus_clk = clk;
+
+ if (spi_imx->usedma)
+ ctrl |= MX51_ECSPI_CTRL_SMC;
+
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
/*
* Wait until the changes in the configuration register CONFIGREG
@@ -587,7 +609,6 @@ static void mx51_setup_wml(struct spi_imx_data *spi_imx)
* Configure the DMA register: setup the watermark
* and enable DMA request.
*/
-
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
@@ -659,13 +680,20 @@ static void mx31_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int mx31_config(struct spi_device *spi)
+static int mx31_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static int mx31_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
unsigned int clk;
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
MX31_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
@@ -700,8 +728,10 @@ static int mx31_config(struct spi_device *spi)
writel(reg, spi_imx->base + MX31_CSPI_TESTREG);
if (spi_imx->usedma) {
- /* configure DMA requests when RXFIFO is half full and
- when TXFIFO is half empty */
+ /*
+ * configure DMA requests when RXFIFO is half full and
+ * when TXFIFO is half empty
+ */
writel(MX31_DMAREG_RH_DEN | MX31_DMAREG_TH_DEN,
spi_imx->base + MX31_CSPI_DMAREG);
}
@@ -755,14 +785,21 @@ static void mx21_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int mx21_config(struct spi_device *spi)
+static int mx21_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static int mx21_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
unsigned int clk;
- reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->speed_hz, max, &clk)
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, t->speed_hz, max, &clk)
<< MX21_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
@@ -824,13 +861,20 @@ static void mx1_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int mx1_config(struct spi_device *spi)
+static int mx1_prepare_message(struct spi_imx_data *spi_imx,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static int mx1_prepare_transfer(struct spi_imx_data *spi_imx,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
unsigned int clk;
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->speed_hz, &clk) <<
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) <<
MX1_CSPICTRL_DR_SHIFT;
spi_imx->spi_bus_clk = clk;
@@ -858,7 +902,8 @@ static void mx1_reset(struct spi_imx_data *spi_imx)
static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
.intctrl = mx1_intctrl,
- .config = mx1_config,
+ .prepare_message = mx1_prepare_message,
+ .prepare_transfer = mx1_prepare_transfer,
.trigger = mx1_trigger,
.rx_available = mx1_rx_available,
.reset = mx1_reset,
@@ -871,7 +916,8 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
.intctrl = mx21_intctrl,
- .config = mx21_config,
+ .prepare_message = mx21_prepare_message,
+ .prepare_transfer = mx21_prepare_transfer,
.trigger = mx21_trigger,
.rx_available = mx21_rx_available,
.reset = mx21_reset,
@@ -885,7 +931,8 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
/* i.mx27 cspi shares the functions with i.mx21 one */
.intctrl = mx21_intctrl,
- .config = mx21_config,
+ .prepare_message = mx21_prepare_message,
+ .prepare_transfer = mx21_prepare_transfer,
.trigger = mx21_trigger,
.rx_available = mx21_rx_available,
.reset = mx21_reset,
@@ -898,7 +945,8 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
.intctrl = mx31_intctrl,
- .config = mx31_config,
+ .prepare_message = mx31_prepare_message,
+ .prepare_transfer = mx31_prepare_transfer,
.trigger = mx31_trigger,
.rx_available = mx31_rx_available,
.reset = mx31_reset,
@@ -912,7 +960,8 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
/* i.mx35 and later cspi shares the functions with i.mx31 one */
.intctrl = mx31_intctrl,
- .config = mx31_config,
+ .prepare_message = mx31_prepare_message,
+ .prepare_transfer = mx31_prepare_transfer,
.trigger = mx31_trigger,
.rx_available = mx31_rx_available,
.reset = mx31_reset,
@@ -925,7 +974,8 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.intctrl = mx51_ecspi_intctrl,
- .config = mx51_ecspi_config,
+ .prepare_message = mx51_ecspi_prepare_message,
+ .prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
@@ -940,7 +990,8 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
static struct spi_imx_devtype_data imx53_ecspi_devtype_data = {
.intctrl = mx51_ecspi_intctrl,
- .config = mx51_ecspi_config,
+ .prepare_message = mx51_ecspi_prepare_message,
+ .prepare_transfer = mx51_ecspi_prepare_transfer,
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
@@ -1048,7 +1099,7 @@ static void spi_imx_push(struct spi_imx_data *spi_imx)
if (!spi_imx->count)
break;
if (spi_imx->dynamic_burst &&
- spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
+ spi_imx->txfifo >= DIV_ROUND_UP(spi_imx->remainder,
fifo_words))
break;
spi_imx->tx(spi_imx);
@@ -1142,7 +1193,6 @@ static int spi_imx_setupxfer(struct spi_device *spi,
return 0;
spi_imx->bits_per_word = t->bits_per_word;
- spi_imx->speed_hz = t->speed_hz;
/*
* Initialize the functions for transfer. To transfer non byte-aligned
@@ -1183,7 +1233,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
spi_imx->slave_burst = t->len;
}
- spi_imx->devtype_data->config(spi);
+ spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t);
return 0;
}
@@ -1492,7 +1542,13 @@ spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
return ret;
}
- return 0;
+ ret = spi_imx->devtype_data->prepare_message(spi_imx, msg);
+ if (ret) {
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
+ }
+
+ return ret;
}
static int
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 62a7b80801d2..5217a5628be2 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -12,7 +12,7 @@
#include "internals.h"
-#define SPI_MEM_MAX_BUSWIDTH 4
+#define SPI_MEM_MAX_BUSWIDTH 8
/**
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
@@ -121,6 +121,13 @@ static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
break;
+ case 8:
+ if ((tx && (mode & SPI_TX_OCTAL)) ||
+ (!tx && (mode & SPI_RX_OCTAL)))
+ return 0;
+
+ break;
+
default:
break;
}
@@ -142,7 +149,7 @@ static bool spi_mem_default_supports_op(struct spi_mem *mem,
spi_check_buswidth_req(mem, op->dummy.buswidth, true))
return false;
- if (op->data.nbytes &&
+ if (op->data.dir != SPI_MEM_NO_DATA &&
spi_check_buswidth_req(mem, op->data.buswidth,
op->data.dir == SPI_MEM_DATA_OUT))
return false;
@@ -213,6 +220,44 @@ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_supports_op);
+static int spi_mem_access_start(struct spi_mem *mem)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ /*
+ * Flush the message queue before executing our SPI memory
+ * operation to prevent preemption of regular SPI transfers.
+ */
+ spi_flush_queue(ctlr);
+
+ if (ctlr->auto_runtime_pm) {
+ int ret;
+
+ ret = pm_runtime_get_sync(ctlr->dev.parent);
+ if (ret < 0) {
+ dev_err(&ctlr->dev, "Failed to power device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ mutex_lock(&ctlr->bus_lock_mutex);
+ mutex_lock(&ctlr->io_mutex);
+
+ return 0;
+}
+
+static void spi_mem_access_end(struct spi_mem *mem)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+
+ mutex_unlock(&ctlr->io_mutex);
+ mutex_unlock(&ctlr->bus_lock_mutex);
+
+ if (ctlr->auto_runtime_pm)
+ pm_runtime_put(ctlr->dev.parent);
+}
+
/**
* spi_mem_exec_op() - Execute a memory operation
* @mem: the SPI memory
@@ -242,30 +287,13 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return -ENOTSUPP;
if (ctlr->mem_ops) {
- /*
- * Flush the message queue before executing our SPI memory
- * operation to prevent preemption of regular SPI transfers.
- */
- spi_flush_queue(ctlr);
-
- if (ctlr->auto_runtime_pm) {
- ret = pm_runtime_get_sync(ctlr->dev.parent);
- if (ret < 0) {
- dev_err(&ctlr->dev,
- "Failed to power device: %d\n",
- ret);
- return ret;
- }
- }
+ ret = spi_mem_access_start(mem);
+ if (ret)
+ return ret;
- mutex_lock(&ctlr->bus_lock_mutex);
- mutex_lock(&ctlr->io_mutex);
ret = ctlr->mem_ops->exec_op(mem, op);
- mutex_unlock(&ctlr->io_mutex);
- mutex_unlock(&ctlr->bus_lock_mutex);
- if (ctlr->auto_runtime_pm)
- pm_runtime_put(ctlr->dev.parent);
+ spi_mem_access_end(mem);
/*
* Some controllers only optimize specific paths (typically the
@@ -411,6 +439,210 @@ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.in = buf;
+ op.data.nbytes = len;
+ ret = spi_mem_adjust_op_size(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ return op.data.nbytes;
+}
+
+static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct spi_mem_op op = desc->info.op_tmpl;
+ int ret;
+
+ op.addr.val = desc->info.offset + offs;
+ op.data.buf.out = buf;
+ op.data.nbytes = len;
+ ret = spi_mem_adjust_op_size(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ ret = spi_mem_exec_op(desc->mem, &op);
+ if (ret)
+ return ret;
+
+ return op.data.nbytes;
+}
+
+/**
+ * spi_mem_dirmap_create() - Create a direct mapping descriptor
+ * @mem: SPI mem device this direct mapping should be created for
+ * @info: direct mapping information
+ *
+ * This function is creating a direct mapping descriptor which can then be used
+ * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
+ * If the SPI controller driver does not support direct mapping, this function
+ * fallback to an implementation using spi_mem_exec_op(), so that the caller
+ * doesn't have to bother implementing a fallback on his own.
+ *
+ * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
+ */
+struct spi_mem_dirmap_desc *
+spi_mem_dirmap_create(struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info)
+{
+ struct spi_controller *ctlr = mem->spi->controller;
+ struct spi_mem_dirmap_desc *desc;
+ int ret = -ENOTSUPP;
+
+ /* Make sure the number of address cycles is between 1 and 8 bytes. */
+ if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
+ return ERR_PTR(-EINVAL);
+
+ /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
+ if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
+ return ERR_PTR(-EINVAL);
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->mem = mem;
+ desc->info = *info;
+ if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
+ ret = ctlr->mem_ops->dirmap_create(desc);
+
+ if (ret) {
+ desc->nodirmap = true;
+ if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
+ ret = -ENOTSUPP;
+ else
+ ret = 0;
+ }
+
+ if (ret) {
+ kfree(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
+
+/**
+ * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
+ * @desc: the direct mapping descriptor to destroy
+ * @info: direct mapping information
+ *
+ * This function destroys a direct mapping descriptor previously created by
+ * spi_mem_dirmap_create().
+ */
+void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
+{
+ struct spi_controller *ctlr = desc->mem->spi->controller;
+
+ if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
+ ctlr->mem_ops->dirmap_destroy(desc);
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
+
+/**
+ * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
+ * @desc: direct mapping descriptor
+ * @offs: offset to start reading from. Note that this is not an absolute
+ * offset, but the offset within the direct mapping which already has
+ * its own offset
+ * @len: length in bytes
+ * @buf: destination buffer. This buffer must be DMA-able
+ *
+ * This function reads data from a memory device using a direct mapping
+ * previously instantiated with spi_mem_dirmap_create().
+ *
+ * Return: the amount of data read from the memory device or a negative error
+ * code. Note that the returned size might be smaller than @len, and the caller
+ * is responsible for calling spi_mem_dirmap_read() again when that happens.
+ */
+ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+{
+ struct spi_controller *ctlr = desc->mem->spi->controller;
+ ssize_t ret;
+
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
+ return -EINVAL;
+
+ if (!len)
+ return 0;
+
+ if (desc->nodirmap) {
+ ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
+ } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
+ ret = spi_mem_access_start(desc->mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
+
+ spi_mem_access_end(desc->mem);
+ } else {
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
+
+/**
+ * spi_mem_dirmap_dirmap_write() - Write data through a direct mapping
+ * @desc: direct mapping descriptor
+ * @offs: offset to start writing from. Note that this is not an absolute
+ * offset, but the offset within the direct mapping which already has
+ * its own offset
+ * @len: length in bytes
+ * @buf: source buffer. This buffer must be DMA-able
+ *
+ * This function writes data to a memory device using a direct mapping
+ * previously instantiated with spi_mem_dirmap_create().
+ *
+ * Return: the amount of data written to the memory device or a negative error
+ * code. Note that the returned size might be smaller than @len, and the caller
+ * is responsible for calling spi_mem_dirmap_write() again when that happens.
+ */
+ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf)
+{
+ struct spi_controller *ctlr = desc->mem->spi->controller;
+ ssize_t ret;
+
+ if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
+ return -EINVAL;
+
+ if (!len)
+ return 0;
+
+ if (desc->nodirmap) {
+ ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
+ } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
+ ret = spi_mem_access_start(desc->mem);
+ if (ret)
+ return ret;
+
+ ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
+
+ spi_mem_access_end(desc->mem);
+ } else {
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
+
static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
{
return container_of(drv, struct spi_mem_driver, spidrv.driver);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 3dc31627c655..0cce6f0ba824 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -120,6 +120,12 @@ static const struct mtk_spi_compatible mt8173_compat = {
.must_tx = true,
};
+static const struct mtk_spi_compatible mt8183_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+ .enhance_timing = true,
+};
+
/*
* A piece of default chip info unless the platform
* supplies it.
@@ -144,12 +150,18 @@ static const struct of_device_id mtk_spi_of_match[] = {
{ .compatible = "mediatek,mt7622-spi",
.data = (void *)&mt7622_compat,
},
+ { .compatible = "mediatek,mt7629-spi",
+ .data = (void *)&mt7622_compat,
+ },
{ .compatible = "mediatek,mt8135-spi",
.data = (void *)&mtk_common_compat,
},
{ .compatible = "mediatek,mt8173-spi",
.data = (void *)&mt8173_compat,
},
+ { .compatible = "mediatek,mt8183-spi",
+ .data = (void *)&mt8183_compat,
+ },
{}
};
MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
@@ -522,11 +534,11 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(master);
- cnt = len / 4;
+ cnt = mdata->xfer_len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
trans->tx_buf + mdata->num_xfered, cnt);
- remainder = len % 4;
+ remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = 0;
memcpy(&reg_val,
diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
new file mode 100644
index 000000000000..e41ae6ef0f8a
--- /dev/null
+++ b/drivers/spi/spi-mxic.c
@@ -0,0 +1,619 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2018 Macronix International Co., Ltd.
+//
+// Authors:
+// Mason Yang <masonccyang@mxic.com.tw>
+// zhengxunli <zhengxunli@mxic.com.tw>
+// Boris Brezillon <boris.brezillon@bootlin.com>
+//
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+#define HC_CFG 0x0
+#define HC_CFG_IF_CFG(x) ((x) << 27)
+#define HC_CFG_DUAL_SLAVE BIT(31)
+#define HC_CFG_INDIVIDUAL BIT(30)
+#define HC_CFG_NIO(x) (((x) / 4) << 27)
+#define HC_CFG_TYPE(s, t) ((t) << (23 + ((s) * 2)))
+#define HC_CFG_TYPE_SPI_NOR 0
+#define HC_CFG_TYPE_SPI_NAND 1
+#define HC_CFG_TYPE_SPI_RAM 2
+#define HC_CFG_TYPE_RAW_NAND 3
+#define HC_CFG_SLV_ACT(x) ((x) << 21)
+#define HC_CFG_CLK_PH_EN BIT(20)
+#define HC_CFG_CLK_POL_INV BIT(19)
+#define HC_CFG_BIG_ENDIAN BIT(18)
+#define HC_CFG_DATA_PASS BIT(17)
+#define HC_CFG_IDLE_SIO_LVL(x) ((x) << 16)
+#define HC_CFG_MAN_START_EN BIT(3)
+#define HC_CFG_MAN_START BIT(2)
+#define HC_CFG_MAN_CS_EN BIT(1)
+#define HC_CFG_MAN_CS_ASSERT BIT(0)
+
+#define INT_STS 0x4
+#define INT_STS_EN 0x8
+#define INT_SIG_EN 0xc
+#define INT_STS_ALL GENMASK(31, 0)
+#define INT_RDY_PIN BIT(26)
+#define INT_RDY_SR BIT(25)
+#define INT_LNR_SUSP BIT(24)
+#define INT_ECC_ERR BIT(17)
+#define INT_CRC_ERR BIT(16)
+#define INT_LWR_DIS BIT(12)
+#define INT_LRD_DIS BIT(11)
+#define INT_SDMA_INT BIT(10)
+#define INT_DMA_FINISH BIT(9)
+#define INT_RX_NOT_FULL BIT(3)
+#define INT_RX_NOT_EMPTY BIT(2)
+#define INT_TX_NOT_FULL BIT(1)
+#define INT_TX_EMPTY BIT(0)
+
+#define HC_EN 0x10
+#define HC_EN_BIT BIT(0)
+
+#define TXD(x) (0x14 + ((x) * 4))
+#define RXD 0x24
+
+#define SS_CTRL(s) (0x30 + ((s) * 4))
+#define LRD_CFG 0x44
+#define LWR_CFG 0x80
+#define RWW_CFG 0x70
+#define OP_READ BIT(23)
+#define OP_DUMMY_CYC(x) ((x) << 17)
+#define OP_ADDR_BYTES(x) ((x) << 14)
+#define OP_CMD_BYTES(x) (((x) - 1) << 13)
+#define OP_OCTA_CRC_EN BIT(12)
+#define OP_DQS_EN BIT(11)
+#define OP_ENHC_EN BIT(10)
+#define OP_PREAMBLE_EN BIT(9)
+#define OP_DATA_DDR BIT(8)
+#define OP_DATA_BUSW(x) ((x) << 6)
+#define OP_ADDR_DDR BIT(5)
+#define OP_ADDR_BUSW(x) ((x) << 3)
+#define OP_CMD_DDR BIT(2)
+#define OP_CMD_BUSW(x) (x)
+#define OP_BUSW_1 0
+#define OP_BUSW_2 1
+#define OP_BUSW_4 2
+#define OP_BUSW_8 3
+
+#define OCTA_CRC 0x38
+#define OCTA_CRC_IN_EN(s) BIT(3 + ((s) * 16))
+#define OCTA_CRC_CHUNK(s, x) ((fls((x) / 32)) << (1 + ((s) * 16)))
+#define OCTA_CRC_OUT_EN(s) BIT(0 + ((s) * 16))
+
+#define ONFI_DIN_CNT(s) (0x3c + (s))
+
+#define LRD_CTRL 0x48
+#define RWW_CTRL 0x74
+#define LWR_CTRL 0x84
+#define LMODE_EN BIT(31)
+#define LMODE_SLV_ACT(x) ((x) << 21)
+#define LMODE_CMD1(x) ((x) << 8)
+#define LMODE_CMD0(x) (x)
+
+#define LRD_ADDR 0x4c
+#define LWR_ADDR 0x88
+#define LRD_RANGE 0x50
+#define LWR_RANGE 0x8c
+
+#define AXI_SLV_ADDR 0x54
+
+#define DMAC_RD_CFG 0x58
+#define DMAC_WR_CFG 0x94
+#define DMAC_CFG_PERIPH_EN BIT(31)
+#define DMAC_CFG_ALLFLUSH_EN BIT(30)
+#define DMAC_CFG_LASTFLUSH_EN BIT(29)
+#define DMAC_CFG_QE(x) (((x) + 1) << 16)
+#define DMAC_CFG_BURST_LEN(x) (((x) + 1) << 12)
+#define DMAC_CFG_BURST_SZ(x) ((x) << 8)
+#define DMAC_CFG_DIR_READ BIT(1)
+#define DMAC_CFG_START BIT(0)
+
+#define DMAC_RD_CNT 0x5c
+#define DMAC_WR_CNT 0x98
+
+#define SDMA_ADDR 0x60
+
+#define DMAM_CFG 0x64
+#define DMAM_CFG_START BIT(31)
+#define DMAM_CFG_CONT BIT(30)
+#define DMAM_CFG_SDMA_GAP(x) (fls((x) / 8192) << 2)
+#define DMAM_CFG_DIR_READ BIT(1)
+#define DMAM_CFG_EN BIT(0)
+
+#define DMAM_CNT 0x68
+
+#define LNR_TIMER_TH 0x6c
+
+#define RDM_CFG0 0x78
+#define RDM_CFG0_POLY(x) (x)
+
+#define RDM_CFG1 0x7c
+#define RDM_CFG1_RDM_EN BIT(31)
+#define RDM_CFG1_SEED(x) (x)
+
+#define LWR_SUSP_CTRL 0x90
+#define LWR_SUSP_CTRL_EN BIT(31)
+
+#define DMAS_CTRL 0x9c
+#define DMAS_CTRL_DIR_READ BIT(31)
+#define DMAS_CTRL_EN BIT(30)
+
+#define DATA_STROB 0xa0
+#define DATA_STROB_EDO_EN BIT(2)
+#define DATA_STROB_INV_POL BIT(1)
+#define DATA_STROB_DELAY_2CYC BIT(0)
+
+#define IDLY_CODE(x) (0xa4 + ((x) * 4))
+#define IDLY_CODE_VAL(x, v) ((v) << (((x) % 4) * 8))
+
+#define GPIO 0xc4
+#define GPIO_PT(x) BIT(3 + ((x) * 16))
+#define GPIO_RESET(x) BIT(2 + ((x) * 16))
+#define GPIO_HOLDB(x) BIT(1 + ((x) * 16))
+#define GPIO_WPB(x) BIT((x) * 16)
+
+#define HC_VER 0xd0
+
+#define HW_TEST(x) (0xe0 + ((x) * 4))
+
+struct mxic_spi {
+ struct clk *ps_clk;
+ struct clk *send_clk;
+ struct clk *send_dly_clk;
+ void __iomem *regs;
+ u32 cur_speed_hz;
+};
+
+static int mxic_spi_clk_enable(struct mxic_spi *mxic)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mxic->send_clk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(mxic->send_dly_clk);
+ if (ret)
+ goto err_send_dly_clk;
+
+ return ret;
+
+err_send_dly_clk:
+ clk_disable_unprepare(mxic->send_clk);
+
+ return ret;
+}
+
+static void mxic_spi_clk_disable(struct mxic_spi *mxic)
+{
+ clk_disable_unprepare(mxic->send_clk);
+ clk_disable_unprepare(mxic->send_dly_clk);
+}
+
+static void mxic_spi_set_input_delay_dqs(struct mxic_spi *mxic, u8 idly_code)
+{
+ writel(IDLY_CODE_VAL(0, idly_code) |
+ IDLY_CODE_VAL(1, idly_code) |
+ IDLY_CODE_VAL(2, idly_code) |
+ IDLY_CODE_VAL(3, idly_code),
+ mxic->regs + IDLY_CODE(0));
+ writel(IDLY_CODE_VAL(4, idly_code) |
+ IDLY_CODE_VAL(5, idly_code) |
+ IDLY_CODE_VAL(6, idly_code) |
+ IDLY_CODE_VAL(7, idly_code),
+ mxic->regs + IDLY_CODE(1));
+}
+
+static int mxic_spi_clk_setup(struct mxic_spi *mxic, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_set_rate(mxic->send_clk, freq);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(mxic->send_dly_clk, freq);
+ if (ret)
+ return ret;
+
+ /*
+ * A constant delay range from 0x0 ~ 0x1F for input delay,
+ * the unit is 78 ps, the max input delay is 2.418 ns.
+ */
+ mxic_spi_set_input_delay_dqs(mxic, 0xf);
+
+ /*
+ * Phase degree = 360 * freq * output-delay
+ * where output-delay is a constant value 1 ns in FPGA.
+ *
+ * Get Phase degree = 360 * freq * 1 ns
+ * = 360 * freq * 1 sec / 1000000000
+ * = 9 * freq / 25000000
+ */
+ ret = clk_set_phase(mxic->send_dly_clk, 9 * freq / 25000000);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mxic_spi_set_freq(struct mxic_spi *mxic, unsigned long freq)
+{
+ int ret;
+
+ if (mxic->cur_speed_hz == freq)
+ return 0;
+
+ mxic_spi_clk_disable(mxic);
+ ret = mxic_spi_clk_setup(mxic, freq);
+ if (ret)
+ return ret;
+
+ ret = mxic_spi_clk_enable(mxic);
+ if (ret)
+ return ret;
+
+ mxic->cur_speed_hz = freq;
+
+ return 0;
+}
+
+static void mxic_spi_hw_init(struct mxic_spi *mxic)
+{
+ writel(0, mxic->regs + DATA_STROB);
+ writel(INT_STS_ALL, mxic->regs + INT_STS_EN);
+ writel(0, mxic->regs + HC_EN);
+ writel(0, mxic->regs + LRD_CFG);
+ writel(0, mxic->regs + LRD_CTRL);
+ writel(HC_CFG_NIO(1) | HC_CFG_TYPE(0, HC_CFG_TYPE_SPI_NAND) |
+ HC_CFG_SLV_ACT(0) | HC_CFG_MAN_CS_EN | HC_CFG_IDLE_SIO_LVL(1),
+ mxic->regs + HC_CFG);
+}
+
+static int mxic_spi_data_xfer(struct mxic_spi *mxic, const void *txbuf,
+ void *rxbuf, unsigned int len)
+{
+ unsigned int pos = 0;
+
+ while (pos < len) {
+ unsigned int nbytes = len - pos;
+ u32 data = 0xffffffff;
+ u32 sts;
+ int ret;
+
+ if (nbytes > 4)
+ nbytes = 4;
+
+ if (txbuf)
+ memcpy(&data, txbuf + pos, nbytes);
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ writel(data, mxic->regs + TXD(nbytes % 4));
+
+ if (rxbuf) {
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_TX_EMPTY, 0,
+ USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(mxic->regs + INT_STS, sts,
+ sts & INT_RX_NOT_EMPTY, 0,
+ USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ data = readl(mxic->regs + RXD);
+ data >>= (8 * (4 - nbytes));
+ memcpy(rxbuf + pos, &data, nbytes);
+ WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
+ } else {
+ readl(mxic->regs + RXD);
+ }
+ WARN_ON(readl(mxic->regs + INT_STS) & INT_RX_NOT_EMPTY);
+
+ pos += nbytes;
+ }
+
+ return 0;
+}
+
+static bool mxic_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
+ op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
+ return false;
+
+ if (op->data.nbytes && op->dummy.nbytes &&
+ op->data.buswidth != op->dummy.buswidth)
+ return false;
+
+ if (op->addr.nbytes > 7)
+ return false;
+
+ return true;
+}
+
+static int mxic_spi_mem_exec_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(mem->spi->master);
+ int nio = 1, i, ret;
+ u32 ss_ctrl;
+ u8 addr[8];
+
+ ret = mxic_spi_set_freq(mxic, mem->spi->max_speed_hz);
+ if (ret)
+ return ret;
+
+ if (mem->spi->mode & (SPI_TX_QUAD | SPI_RX_QUAD))
+ nio = 4;
+ else if (mem->spi->mode & (SPI_TX_DUAL | SPI_RX_DUAL))
+ nio = 2;
+
+ writel(HC_CFG_NIO(nio) |
+ HC_CFG_TYPE(mem->spi->chip_select, HC_CFG_TYPE_SPI_NOR) |
+ HC_CFG_SLV_ACT(mem->spi->chip_select) | HC_CFG_IDLE_SIO_LVL(1) |
+ HC_CFG_MAN_CS_EN,
+ mxic->regs + HC_CFG);
+ writel(HC_EN_BIT, mxic->regs + HC_EN);
+
+ ss_ctrl = OP_CMD_BYTES(1) | OP_CMD_BUSW(fls(op->cmd.buswidth) - 1);
+
+ if (op->addr.nbytes)
+ ss_ctrl |= OP_ADDR_BYTES(op->addr.nbytes) |
+ OP_ADDR_BUSW(fls(op->addr.buswidth) - 1);
+
+ if (op->dummy.nbytes)
+ ss_ctrl |= OP_DUMMY_CYC(op->dummy.nbytes);
+
+ if (op->data.nbytes) {
+ ss_ctrl |= OP_DATA_BUSW(fls(op->data.buswidth) - 1);
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ ss_ctrl |= OP_READ;
+ }
+
+ writel(ss_ctrl, mxic->regs + SS_CTRL(mem->spi->chip_select));
+
+ writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+
+ ret = mxic_spi_data_xfer(mxic, &op->cmd.opcode, NULL, 1);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < op->addr.nbytes; i++)
+ addr[i] = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
+
+ ret = mxic_spi_data_xfer(mxic, addr, NULL, op->addr.nbytes);
+ if (ret)
+ goto out;
+
+ ret = mxic_spi_data_xfer(mxic, NULL, NULL, op->dummy.nbytes);
+ if (ret)
+ goto out;
+
+ ret = mxic_spi_data_xfer(mxic,
+ op->data.dir == SPI_MEM_DATA_OUT ?
+ op->data.buf.out : NULL,
+ op->data.dir == SPI_MEM_DATA_IN ?
+ op->data.buf.in : NULL,
+ op->data.nbytes);
+
+out:
+ writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+ writel(0, mxic->regs + HC_EN);
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops mxic_spi_mem_ops = {
+ .supports_op = mxic_spi_mem_supports_op,
+ .exec_op = mxic_spi_mem_exec_op,
+};
+
+static void mxic_spi_set_cs(struct spi_device *spi, bool lvl)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(spi->master);
+
+ if (!lvl) {
+ writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_EN,
+ mxic->regs + HC_CFG);
+ writel(HC_EN_BIT, mxic->regs + HC_EN);
+ writel(readl(mxic->regs + HC_CFG) | HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+ } else {
+ writel(readl(mxic->regs + HC_CFG) & ~HC_CFG_MAN_CS_ASSERT,
+ mxic->regs + HC_CFG);
+ writel(0, mxic->regs + HC_EN);
+ }
+}
+
+static int mxic_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+ unsigned int busw = OP_BUSW_1;
+ int ret;
+
+ if (t->rx_buf && t->tx_buf) {
+ if (((spi->mode & SPI_TX_QUAD) &&
+ !(spi->mode & SPI_RX_QUAD)) ||
+ ((spi->mode & SPI_TX_DUAL) &&
+ !(spi->mode & SPI_RX_DUAL)))
+ return -ENOTSUPP;
+ }
+
+ ret = mxic_spi_set_freq(mxic, t->speed_hz);
+ if (ret)
+ return ret;
+
+ if (t->tx_buf) {
+ if (spi->mode & SPI_TX_QUAD)
+ busw = OP_BUSW_4;
+ else if (spi->mode & SPI_TX_DUAL)
+ busw = OP_BUSW_2;
+ } else if (t->rx_buf) {
+ if (spi->mode & SPI_RX_QUAD)
+ busw = OP_BUSW_4;
+ else if (spi->mode & SPI_RX_DUAL)
+ busw = OP_BUSW_2;
+ }
+
+ writel(OP_CMD_BYTES(1) | OP_CMD_BUSW(busw) |
+ OP_DATA_BUSW(busw) | (t->rx_buf ? OP_READ : 0),
+ mxic->regs + SS_CTRL(0));
+
+ ret = mxic_spi_data_xfer(mxic, t->tx_buf, t->rx_buf, t->len);
+ if (ret)
+ return ret;
+
+ spi_finalize_current_transfer(master);
+
+ return 0;
+}
+
+static int __maybe_unused mxic_spi_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+
+ mxic_spi_clk_disable(mxic);
+ clk_disable_unprepare(mxic->ps_clk);
+
+ return 0;
+}
+
+static int __maybe_unused mxic_spi_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mxic_spi *mxic = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(mxic->ps_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable ps_clock.\n");
+ return ret;
+ }
+
+ return mxic_spi_clk_enable(mxic);
+}
+
+static const struct dev_pm_ops mxic_spi_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(mxic_spi_runtime_suspend,
+ mxic_spi_runtime_resume, NULL)
+};
+
+static int mxic_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct resource *res;
+ struct mxic_spi *mxic;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ mxic = spi_master_get_devdata(master);
+
+ master->dev.of_node = pdev->dev.of_node;
+
+ mxic->ps_clk = devm_clk_get(&pdev->dev, "ps_clk");
+ if (IS_ERR(mxic->ps_clk))
+ return PTR_ERR(mxic->ps_clk);
+
+ mxic->send_clk = devm_clk_get(&pdev->dev, "send_clk");
+ if (IS_ERR(mxic->send_clk))
+ return PTR_ERR(mxic->send_clk);
+
+ mxic->send_dly_clk = devm_clk_get(&pdev->dev, "send_dly_clk");
+ if (IS_ERR(mxic->send_dly_clk))
+ return PTR_ERR(mxic->send_dly_clk);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ mxic->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mxic->regs))
+ return PTR_ERR(mxic->regs);
+
+ pm_runtime_enable(&pdev->dev);
+ master->auto_runtime_pm = true;
+
+ master->num_chipselect = 1;
+ master->mem_ops = &mxic_spi_mem_ops;
+
+ master->set_cs = mxic_spi_set_cs;
+ master->transfer_one = mxic_spi_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA |
+ SPI_RX_DUAL | SPI_TX_DUAL |
+ SPI_RX_QUAD | SPI_TX_QUAD;
+
+ mxic_spi_hw_init(mxic);
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto err_put_master;
+ }
+
+ return 0;
+
+err_put_master:
+ spi_master_put(master);
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int mxic_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+static const struct of_device_id mxic_spi_of_ids[] = {
+ { .compatible = "mxicy,mx25f0a-spi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxic_spi_of_ids);
+
+static struct platform_driver mxic_spi_driver = {
+ .probe = mxic_spi_probe,
+ .remove = mxic_spi_remove,
+ .driver = {
+ .name = "mxic-spi",
+ .of_match_table = mxic_spi_of_ids,
+ .pm = &mxic_spi_dev_pm_ops,
+ },
+};
+module_platform_driver(mxic_spi_driver);
+
+MODULE_AUTHOR("Mason Yang <masonccyang@mxic.com.tw>");
+MODULE_DESCRIPTION("MX25F0A SPI controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c
new file mode 100644
index 000000000000..e1dca79b9090
--- /dev/null
+++ b/drivers/spi/spi-npcm-pspi.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Nuvoton Technology corporation.
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include <asm/unaligned.h>
+
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+struct npcm_pspi {
+ struct completion xfer_done;
+ struct regmap *rst_regmap;
+ struct spi_master *master;
+ unsigned int tx_bytes;
+ unsigned int rx_bytes;
+ void __iomem *base;
+ bool is_save_param;
+ u8 bits_per_word;
+ const u8 *tx_buf;
+ struct clk *clk;
+ u32 speed_hz;
+ u8 *rx_buf;
+ u16 mode;
+ u32 id;
+};
+
+#define DRIVER_NAME "npcm-pspi"
+
+#define NPCM_PSPI_DATA 0x00
+#define NPCM_PSPI_CTL1 0x02
+#define NPCM_PSPI_STAT 0x04
+
+/* definitions for control and status register */
+#define NPCM_PSPI_CTL1_SPIEN BIT(0)
+#define NPCM_PSPI_CTL1_MOD BIT(2)
+#define NPCM_PSPI_CTL1_EIR BIT(5)
+#define NPCM_PSPI_CTL1_EIW BIT(6)
+#define NPCM_PSPI_CTL1_SCM BIT(7)
+#define NPCM_PSPI_CTL1_SCIDL BIT(8)
+#define NPCM_PSPI_CTL1_SCDV6_0 GENMASK(15, 9)
+
+#define NPCM_PSPI_STAT_BSY BIT(0)
+#define NPCM_PSPI_STAT_RBF BIT(1)
+
+/* general definitions */
+#define NPCM_PSPI_TIMEOUT_MS 2000
+#define NPCM_PSPI_MAX_CLK_DIVIDER 256
+#define NPCM_PSPI_MIN_CLK_DIVIDER 4
+#define NPCM_PSPI_DEFAULT_CLK 25000000
+
+/* reset register */
+#define NPCM7XX_IPSRST2_OFFSET 0x24
+
+#define NPCM7XX_PSPI1_RESET BIT(22)
+#define NPCM7XX_PSPI2_RESET BIT(23)
+
+static inline unsigned int bytes_per_word(unsigned int bits)
+{
+ return bits <= 8 ? 1 : 2;
+}
+
+static inline void npcm_pspi_irq_enable(struct npcm_pspi *priv, u16 mask)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val |= mask;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static inline void npcm_pspi_irq_disable(struct npcm_pspi *priv, u16 mask)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val &= ~mask;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static inline void npcm_pspi_enable(struct npcm_pspi *priv)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val |= NPCM_PSPI_CTL1_SPIEN;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static inline void npcm_pspi_disable(struct npcm_pspi *priv)
+{
+ u16 val;
+
+ val = ioread16(priv->base + NPCM_PSPI_CTL1);
+ val &= ~NPCM_PSPI_CTL1_SPIEN;
+ iowrite16(val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static void npcm_pspi_set_mode(struct spi_device *spi)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(spi->master);
+ u16 regtemp;
+ u16 mode_val;
+
+ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ mode_val = 0;
+ break;
+ case SPI_MODE_1:
+ mode_val = NPCM_PSPI_CTL1_SCIDL;
+ break;
+ case SPI_MODE_2:
+ mode_val = NPCM_PSPI_CTL1_SCM;
+ break;
+ case SPI_MODE_3:
+ mode_val = NPCM_PSPI_CTL1_SCIDL | NPCM_PSPI_CTL1_SCM;
+ break;
+ }
+
+ regtemp = ioread16(priv->base + NPCM_PSPI_CTL1);
+ regtemp &= ~(NPCM_PSPI_CTL1_SCM | NPCM_PSPI_CTL1_SCIDL);
+ iowrite16(regtemp | mode_val, priv->base + NPCM_PSPI_CTL1);
+}
+
+static void npcm_pspi_set_transfer_size(struct npcm_pspi *priv, int size)
+{
+ u16 regtemp;
+
+ regtemp = ioread16(NPCM_PSPI_CTL1 + priv->base);
+
+ switch (size) {
+ case 8:
+ regtemp &= ~NPCM_PSPI_CTL1_MOD;
+ break;
+ case 16:
+ regtemp |= NPCM_PSPI_CTL1_MOD;
+ break;
+ }
+
+ iowrite16(regtemp, NPCM_PSPI_CTL1 + priv->base);
+}
+
+static void npcm_pspi_set_baudrate(struct npcm_pspi *priv, unsigned int speed)
+{
+ u32 ckdiv;
+ u16 regtemp;
+
+ /* the supported rates are numbers from 4 to 256. */
+ ckdiv = DIV_ROUND_CLOSEST(clk_get_rate(priv->clk), (2 * speed)) - 1;
+
+ regtemp = ioread16(NPCM_PSPI_CTL1 + priv->base);
+ regtemp &= ~NPCM_PSPI_CTL1_SCDV6_0;
+ iowrite16(regtemp | (ckdiv << 9), NPCM_PSPI_CTL1 + priv->base);
+}
+
+static void npcm_pspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(spi->master);
+
+ priv->tx_buf = t->tx_buf;
+ priv->rx_buf = t->rx_buf;
+ priv->tx_bytes = t->len;
+ priv->rx_bytes = t->len;
+
+ if (!priv->is_save_param || priv->mode != spi->mode) {
+ npcm_pspi_set_mode(spi);
+ priv->mode = spi->mode;
+ }
+
+ if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
+ npcm_pspi_set_transfer_size(priv, t->bits_per_word);
+ priv->bits_per_word = t->bits_per_word;
+ }
+
+ if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
+ npcm_pspi_set_baudrate(priv, t->speed_hz);
+ priv->speed_hz = t->speed_hz;
+ }
+
+ if (!priv->is_save_param)
+ priv->is_save_param = true;
+}
+
+static void npcm_pspi_send(struct npcm_pspi *priv)
+{
+ int wsize;
+
+ wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
+ priv->tx_bytes -= wsize;
+
+ if (!priv->tx_buf)
+ return;
+
+ switch (wsize) {
+ case 1:
+ iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ break;
+ case 2:
+ iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ priv->tx_buf += wsize;
+}
+
+static void npcm_pspi_recv(struct npcm_pspi *priv)
+{
+ int rsize;
+ u16 val;
+
+ rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
+ priv->rx_bytes -= rsize;
+
+ if (!priv->rx_buf)
+ return;
+
+ switch (rsize) {
+ case 1:
+ val = ioread8(priv->base + NPCM_PSPI_DATA);
+ break;
+ case 2:
+ val = ioread16(priv->base + NPCM_PSPI_DATA);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ *priv->rx_buf = val;
+ priv->rx_buf += rsize;
+}
+
+static int npcm_pspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+ int status;
+
+ npcm_pspi_setup_transfer(spi, t);
+ reinit_completion(&priv->xfer_done);
+ npcm_pspi_enable(priv);
+ status = wait_for_completion_timeout(&priv->xfer_done,
+ msecs_to_jiffies
+ (NPCM_PSPI_TIMEOUT_MS));
+ if (status == 0) {
+ npcm_pspi_disable(priv);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int npcm_pspi_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+
+ npcm_pspi_irq_enable(priv, NPCM_PSPI_CTL1_EIR | NPCM_PSPI_CTL1_EIW);
+
+ return 0;
+}
+
+static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct npcm_pspi *priv = spi_master_get_devdata(master);
+
+ npcm_pspi_irq_disable(priv, NPCM_PSPI_CTL1_EIR | NPCM_PSPI_CTL1_EIW);
+
+ return 0;
+}
+
+static void npcm_pspi_reset_hw(struct npcm_pspi *priv)
+{
+ regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET,
+ NPCM7XX_PSPI1_RESET << priv->id);
+ regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0);
+}
+
+static irqreturn_t npcm_pspi_handler(int irq, void *dev_id)
+{
+ struct npcm_pspi *priv = dev_id;
+ u16 val;
+ u8 stat;
+
+ stat = ioread8(priv->base + NPCM_PSPI_STAT);
+
+ if (!priv->tx_buf && !priv->rx_buf)
+ return IRQ_NONE;
+
+ if (priv->tx_buf) {
+ if (stat & NPCM_PSPI_STAT_RBF) {
+ val = ioread8(NPCM_PSPI_DATA + priv->base);
+ if (priv->tx_bytes == 0) {
+ npcm_pspi_disable(priv);
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+ }
+ }
+
+ if ((stat & NPCM_PSPI_STAT_BSY) == 0)
+ if (priv->tx_bytes)
+ npcm_pspi_send(priv);
+ }
+
+ if (priv->rx_buf) {
+ if (stat & NPCM_PSPI_STAT_RBF) {
+ if (!priv->rx_bytes)
+ return IRQ_NONE;
+
+ npcm_pspi_recv(priv);
+
+ if (!priv->rx_bytes) {
+ npcm_pspi_disable(priv);
+ complete(&priv->xfer_done);
+ return IRQ_HANDLED;
+ }
+ }
+
+ if (((stat & NPCM_PSPI_STAT_BSY) == 0) && !priv->tx_buf)
+ iowrite8(0x0, NPCM_PSPI_DATA + priv->base);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int npcm_pspi_probe(struct platform_device *pdev)
+{
+ struct npcm_pspi *priv;
+ struct spi_master *master;
+ struct resource *res;
+ unsigned long clk_hz;
+ struct device_node *np = pdev->dev.of_node;
+ int num_cs, i;
+ int csgpio;
+ int irq;
+ int ret;
+
+ num_cs = of_gpio_named_count(np, "cs-gpios");
+ if (num_cs < 0)
+ return num_cs;
+
+ pdev->id = of_alias_get_id(np, "spi");
+ if (pdev->id < 0)
+ pdev->id = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+ priv->is_save_param = false;
+ priv->id = pdev->id;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_master_put;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ ret = PTR_ERR(priv->clk);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_master_put;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ\n");
+ ret = irq;
+ goto out_disable_clk;
+ }
+
+ priv->rst_regmap =
+ syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst");
+ if (IS_ERR(priv->rst_regmap)) {
+ dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n");
+ return PTR_ERR(priv->rst_regmap);
+ }
+
+ /* reset SPI-HW block */
+ npcm_pspi_reset_hw(priv);
+
+ ret = devm_request_irq(&pdev->dev, irq, npcm_pspi_handler, 0,
+ "npcm-pspi", priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ goto out_disable_clk;
+ }
+
+ init_completion(&priv->xfer_done);
+
+ clk_hz = clk_get_rate(priv->clk);
+
+ master->max_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MIN_CLK_DIVIDER);
+ master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER);
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->transfer_one = npcm_pspi_transfer_one;
+ master->prepare_transfer_hardware =
+ npcm_pspi_prepare_transfer_hardware;
+ master->unprepare_transfer_hardware =
+ npcm_pspi_unprepare_transfer_hardware;
+ master->num_chipselect = num_cs;
+
+ for (i = 0; i < num_cs; i++) {
+ csgpio = of_get_named_gpio(np, "cs-gpios", i);
+ if (csgpio < 0) {
+ dev_err(&pdev->dev, "failed to get csgpio#%u\n", i);
+ goto out_disable_clk;
+ }
+ dev_dbg(&pdev->dev, "csgpio#%u = %d\n", i, csgpio);
+ ret = devm_gpio_request_one(&pdev->dev, csgpio,
+ GPIOF_OUT_INIT_HIGH, DRIVER_NAME);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to configure csgpio#%u %d\n"
+ , i, csgpio);
+ goto out_disable_clk;
+ }
+ }
+
+ /* set to default clock rate */
+ npcm_pspi_set_baudrate(priv, NPCM_PSPI_DEFAULT_CLK);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_disable_clk;
+
+ pr_info("NPCM Peripheral SPI %d probed\n", pdev->id);
+
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+
+out_master_put:
+ spi_master_put(master);
+ return ret;
+}
+
+static int npcm_pspi_remove(struct platform_device *pdev)
+{
+ struct npcm_pspi *priv = platform_get_drvdata(pdev);
+
+ npcm_pspi_reset_hw(priv);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct of_device_id npcm_pspi_match[] = {
+ { .compatible = "nuvoton,npcm750-pspi", .data = NULL },
+ {}
+};
+MODULE_DEVICE_TABLE(of, npcm_pspi_match);
+
+static struct platform_driver npcm_pspi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = npcm_pspi_match,
+ },
+ .probe = npcm_pspi_probe,
+ .remove = npcm_pspi_remove,
+};
+module_platform_driver(npcm_pspi_driver);
+
+MODULE_DESCRIPTION("NPCM peripheral SPI Controller driver");
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index f024c3fc3679..2fd8881fcd65 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1540,13 +1540,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:omap2_mcspi");
-#ifdef CONFIG_SUSPEND
-static int omap2_mcspi_suspend_noirq(struct device *dev)
+static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
{
- return pinctrl_pm_select_sleep_state(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ int error;
+
+ error = pinctrl_pm_select_sleep_state(dev);
+ if (error)
+ dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+ __func__, error);
+
+ error = spi_master_suspend(master);
+ if (error)
+ dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
+ __func__, error);
+
+ return pm_runtime_force_suspend(dev);
}
-static int omap2_mcspi_resume_noirq(struct device *dev)
+static int __maybe_unused omap2_mcspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
@@ -1557,17 +1570,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
__func__, error);
- return 0;
-}
+ error = spi_master_resume(master);
+ if (error)
+ dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
+ __func__, error);
-#else
-#define omap2_mcspi_suspend_noirq NULL
-#define omap2_mcspi_resume_noirq NULL
-#endif
+ return pm_runtime_force_resume(dev);
+}
static const struct dev_pm_ops omap2_mcspi_pm_ops = {
- .suspend_noirq = omap2_mcspi_suspend_noirq,
- .resume_noirq = omap2_mcspi_resume_noirq,
+ SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
+ omap2_mcspi_resume)
.runtime_resume = omap_mcspi_runtime_resume,
};
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 6120e6abcd96..0c793e31d60f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -861,11 +861,10 @@ static void dma_callback(void *data)
/* Update total bytes transferred */
msg->actual_length += pl022->cur_transfer->len;
- if (pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
-
/* Move to next transfer */
msg->state = next_transfer(pl022);
+ if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
tasklet_schedule(&pl022->pump_transfers);
}
@@ -1333,10 +1332,10 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
}
/* Update total bytes transferred */
msg->actual_length += pl022->cur_transfer->len;
- if (pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
/* Move to next transfer */
msg->state = next_transfer(pl022);
+ if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
tasklet_schedule(&pl022->pump_transfers);
return IRQ_HANDLED;
}
@@ -1544,10 +1543,11 @@ static void do_polling_transfer(struct pl022 *pl022)
/* Update total byte transferred */
message->actual_length += pl022->cur_transfer->len;
- if (pl022->cur_transfer->cs_change)
- pl022_cs_control(pl022, SSP_CHIP_DESELECT);
/* Move to next transfer */
message->state = next_transfer(pl022);
+ if (message->state != STATE_DONE
+ && pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
}
out:
/* Handle end of message */
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 612cc49db28f..d84b893a64d7 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -626,6 +626,11 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
return IRQ_HANDLED;
}
+ if (irq_status & SSSR_TUR) {
+ int_error_stop(drv_data, "interrupt_transfer: fifo underrun");
+ return IRQ_HANDLED;
+ }
+
if (irq_status & SSSR_TINT) {
pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
if (drv_data->read(drv_data)) {
@@ -1073,6 +1078,30 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
}
+ if (drv_data->ssp_type == MMP2_SSP) {
+ u8 tx_level = (pxa2xx_spi_read(drv_data, SSSR)
+ & SSSR_TFL_MASK) >> 8;
+
+ if (tx_level) {
+ /* On MMP2, flipping SSE doesn't to empty TXFIFO. */
+ dev_warn(&spi->dev, "%d bytes of garbage in TXFIFO!\n",
+ tx_level);
+ if (tx_level > transfer->len)
+ tx_level = transfer->len;
+ drv_data->tx += tx_level;
+ }
+ }
+
+ if (spi_controller_is_slave(master)) {
+ while (drv_data->write(drv_data))
+ ;
+ if (drv_data->gpiod_ready) {
+ gpiod_set_value(drv_data->gpiod_ready, 1);
+ udelay(1);
+ gpiod_set_value(drv_data->gpiod_ready, 0);
+ }
+ }
+
/*
* Release the data by enabling service requests and interrupts,
* without changing any mode bits
@@ -1082,6 +1111,27 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
return 1;
}
+static int pxa2xx_spi_slave_abort(struct spi_master *master)
+{
+ struct driver_data *drv_data = spi_controller_get_devdata(master);
+
+ /* Stop and reset SSP */
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ reset_sccr1(drv_data);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+ pxa2xx_spi_flush(drv_data);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+
+ dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
+
+ drv_data->master->cur_msg->status = -EINTR;
+ spi_finalize_current_transfer(drv_data->master);
+
+ return 0;
+}
+
static void pxa2xx_spi_handle_err(struct spi_controller *master,
struct spi_message *msg)
{
@@ -1209,9 +1259,14 @@ static int setup(struct spi_device *spi)
rx_thres = config->rx_threshold;
break;
default:
- tx_thres = TX_THRESH_DFLT;
tx_hi_thres = 0;
- rx_thres = RX_THRESH_DFLT;
+ if (spi_controller_is_slave(drv_data->master)) {
+ tx_thres = 1;
+ rx_thres = 2;
+ } else {
+ tx_thres = TX_THRESH_DFLT;
+ rx_thres = RX_THRESH_DFLT;
+ }
break;
}
@@ -1255,6 +1310,12 @@ static int setup(struct spi_device *spi)
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
}
+ if (spi_controller_is_slave(drv_data->master)) {
+ chip->cr1 |= SSCR1_SCFR;
+ chip->cr1 |= SSCR1_SCLKDIR;
+ chip->cr1 |= SSCR1_SFRMDIR;
+ chip->cr1 |= SSCR1_SPH;
+ }
chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
@@ -1500,6 +1561,7 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
ssp->pdev = pdev;
ssp->port_id = pxa2xx_spi_get_port_id(adev);
+ pdata->is_slave = of_property_read_bool(pdev->dev.of_node, "spi-slave");
pdata->num_chipselect = 1;
pdata->enable_dma = true;
@@ -1559,7 +1621,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
return -ENODEV;
}
- master = spi_alloc_master(dev, sizeof(struct driver_data));
+ if (platform_info->is_slave)
+ master = spi_alloc_slave(dev, sizeof(struct driver_data));
+ else
+ master = spi_alloc_master(dev, sizeof(struct driver_data));
+
if (!master) {
dev_err(&pdev->dev, "cannot alloc spi_master\n");
pxa_ssp_free(ssp);
@@ -1581,6 +1647,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->setup = setup;
master->set_cs = pxa2xx_spi_set_cs;
master->transfer_one = pxa2xx_spi_transfer_one;
+ master->slave_abort = pxa2xx_spi_slave_abort;
master->handle_err = pxa2xx_spi_handle_err;
master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
@@ -1610,7 +1677,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
- drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
+ drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS
+ | SSSR_ROR | SSSR_TUR;
}
status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
@@ -1658,10 +1726,22 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
default:
- tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
- SSCR1_TxTresh(TX_THRESH_DFLT);
+
+ if (spi_controller_is_slave(master)) {
+ tmp = SSCR1_SCFR |
+ SSCR1_SCLKDIR |
+ SSCR1_SFRMDIR |
+ SSCR1_RxTresh(2) |
+ SSCR1_TxTresh(1) |
+ SSCR1_SPH;
+ } else {
+ tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+ SSCR1_TxTresh(TX_THRESH_DFLT);
+ }
pxa2xx_spi_write(drv_data, SSCR1, tmp);
- tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+ tmp = SSCR0_Motorola | SSCR0_DataSize(8);
+ if (!spi_controller_is_slave(master))
+ tmp |= SSCR0_SCR(2);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
}
@@ -1711,7 +1791,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (PTR_ERR(gpiod) == -ENOENT)
continue;
- status = (int)PTR_ERR(gpiod);
+ status = PTR_ERR(gpiod);
goto out_error_clock_enabled;
} else {
drv_data->cs_gpiods[i] = gpiod;
@@ -1719,6 +1799,15 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
}
+ if (platform_info->is_slave) {
+ drv_data->gpiod_ready = devm_gpiod_get_optional(dev,
+ "ready", GPIOD_OUT_LOW);
+ if (IS_ERR(drv_data->gpiod_ready)) {
+ status = PTR_ERR(drv_data->gpiod_ready);
+ goto out_error_clock_enabled;
+ }
+ }
+
pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -1811,10 +1900,6 @@ static int pxa2xx_spi_resume(struct device *dev)
return status;
}
- /* Restore LPSS private register bits */
- if (is_lpss_ssp(drv_data))
- lpss_ssp_setup(drv_data);
-
/* Start the queue running */
return spi_controller_resume(drv_data->master);
}
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 513c53aaeab2..4e324da66ef7 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -64,6 +64,9 @@ struct driver_data {
/* GPIOs for chip selects */
struct gpio_desc **cs_gpiods;
+
+ /* Optional slave FIFO ready signal */
+ struct gpio_desc *gpiod_ready;
};
struct chip_data {
diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c
index b8163b40bb92..e0f061139c8f 100644
--- a/drivers/spi/spi-qcom-qspi.c
+++ b/drivers/spi/spi-qcom-qspi.c
@@ -90,6 +90,9 @@
#define PIO_DATAOUT_1B 0x0020
#define PIO_DATAOUT_4B 0x0024
+#define RD_FIFO_CFG 0x0028
+#define CONTINUOUS_MODE BIT(0)
+
#define RD_FIFO_STATUS 0x002c
#define FIFO_EMPTY BIT(11)
#define WR_CNTS_MSK 0x7f0
@@ -99,9 +102,6 @@
#define RDY_16BYTE BIT(1)
#define FIFO_RDY BIT(0)
-#define RD_FIFO_CFG 0x0028
-#define CONTINUOUS_MODE BIT(0)
-
#define RD_FIFO_RESET 0x0030
#define RESET_FIFO BIT(0)
@@ -139,7 +139,7 @@ struct qcom_qspi {
struct device *dev;
struct clk_bulk_data clks[QSPI_NUM_CLKS];
struct qspi_xfer xfer;
- /* Lock to protect data accessed by IRQs */
+ /* Lock to protect xfer and IRQ accessed registers */
spinlock_t lock;
};
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 51ef632bca52..3912526ead66 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -54,6 +54,9 @@
/* Bit fields in CTRLR0 */
#define CR0_DFS_OFFSET 0
+#define CR0_DFS_4BIT 0x0
+#define CR0_DFS_8BIT 0x1
+#define CR0_DFS_16BIT 0x2
#define CR0_CFS_OFFSET 2
@@ -94,6 +97,7 @@
#define CR0_BHT_8BIT 0x1
#define CR0_RSD_OFFSET 14
+#define CR0_RSD_MAX 0x3
#define CR0_FRF_OFFSET 16
#define CR0_FRF_SPI 0x0
@@ -115,6 +119,10 @@
/* Bit fields in SER, 2bit */
#define SER_MASK 0x3
+/* Bit fields in BAUDR */
+#define BAUDR_SCKDV_MIN 2
+#define BAUDR_SCKDV_MAX 65534
+
/* Bit fields in SR, 5bit */
#define SR_MASK 0x1f
#define SR_BUSY (1 << 0)
@@ -142,11 +150,12 @@
#define RF_DMA_EN (1 << 0)
#define TF_DMA_EN (1 << 1)
-#define RXBUSY (1 << 0)
-#define TXBUSY (1 << 1)
+/* Driver state flags */
+#define RXDMA (1 << 0)
+#define TXDMA (1 << 1)
/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
-#define MAX_SCLK_OUT 50000000
+#define MAX_SCLK_OUT 50000000U
/*
* SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
@@ -156,72 +165,37 @@
#define ROCKCHIP_SPI_MAX_CS_NUM 2
-enum rockchip_ssi_type {
- SSI_MOTO_SPI = 0,
- SSI_TI_SSP,
- SSI_NS_MICROWIRE,
-};
-
-struct rockchip_spi_dma_data {
- struct dma_chan *ch;
- dma_addr_t addr;
-};
-
struct rockchip_spi {
struct device *dev;
- struct spi_master *master;
struct clk *spiclk;
struct clk *apb_pclk;
void __iomem *regs;
- /*depth of the FIFO buffer */
- u32 fifo_len;
- /* max bus freq supported */
- u32 max_freq;
- /* supported slave numbers */
- enum rockchip_ssi_type type;
-
- u16 mode;
- u8 tmode;
- u8 bpw;
- u8 n_bytes;
- u32 rsd_nsecs;
- unsigned len;
- u32 speed;
+ dma_addr_t dma_addr_rx;
+ dma_addr_t dma_addr_tx;
const void *tx;
- const void *tx_end;
void *rx;
- void *rx_end;
+ unsigned int tx_left;
+ unsigned int rx_left;
- u32 state;
- /* protect state */
- spinlock_t lock;
+ atomic_t state;
- bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
-
- bool use_dma;
- struct sg_table tx_sg;
- struct sg_table rx_sg;
- struct rockchip_spi_dma_data dma_rx;
- struct rockchip_spi_dma_data dma_tx;
-};
+ /*depth of the FIFO buffer */
+ u32 fifo_len;
+ /* frequency of spiclk */
+ u32 freq;
-static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
-{
- writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
-}
+ u8 n_bytes;
+ u8 rsd;
-static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
-{
- writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
-}
+ bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
+};
-static inline void flush_fifo(struct rockchip_spi *rs)
+static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
{
- while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
- readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+ writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
}
static inline void wait_for_idle(struct rockchip_spi *rs)
@@ -251,24 +225,6 @@ static u32 get_fifo_len(struct rockchip_spi *rs)
return (fifo == 31) ? 0 : fifo;
}
-static inline u32 tx_max(struct rockchip_spi *rs)
-{
- u32 tx_left, tx_room;
-
- tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
- tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
-
- return min(tx_left, tx_room);
-}
-
-static inline u32 rx_max(struct rockchip_spi *rs)
-{
- u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
- u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
-
- return min(rx_left, rx_room);
-}
-
static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
{
struct spi_master *master = spi->master;
@@ -296,64 +252,39 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
rs->cs_asserted[spi->chip_select] = cs_asserted;
}
-static int rockchip_spi_prepare_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct rockchip_spi *rs = spi_master_get_devdata(master);
- struct spi_device *spi = msg->spi;
-
- rs->mode = spi->mode;
-
- return 0;
-}
-
static void rockchip_spi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
- unsigned long flags;
struct rockchip_spi *rs = spi_master_get_devdata(master);
- spin_lock_irqsave(&rs->lock, flags);
-
- /*
- * For DMA mode, we need terminate DMA channel and flush
- * fifo for the next transfer if DMA thansfer timeout.
- * handle_err() was called by core if transfer failed.
- * Maybe it is reasonable for error handling here.
+ /* stop running spi transfer
+ * this also flushes both rx and tx fifos
*/
- if (rs->use_dma) {
- if (rs->state & RXBUSY) {
- dmaengine_terminate_async(rs->dma_rx.ch);
- flush_fifo(rs);
- }
-
- if (rs->state & TXBUSY)
- dmaengine_terminate_async(rs->dma_tx.ch);
- }
+ spi_enable_chip(rs, false);
- spin_unlock_irqrestore(&rs->lock, flags);
-}
+ /* make sure all interrupts are masked */
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
-static int rockchip_spi_unprepare_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct rockchip_spi *rs = spi_master_get_devdata(master);
+ if (atomic_read(&rs->state) & TXDMA)
+ dmaengine_terminate_async(master->dma_tx);
- spi_enable_chip(rs, 0);
-
- return 0;
+ if (atomic_read(&rs->state) & RXDMA)
+ dmaengine_terminate_async(master->dma_rx);
}
static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
{
- u32 max = tx_max(rs);
- u32 txw = 0;
+ u32 tx_free = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
+ u32 words = min(rs->tx_left, tx_free);
+
+ rs->tx_left -= words;
+ for (; words; words--) {
+ u32 txw;
- while (max--) {
if (rs->n_bytes == 1)
- txw = *(u8 *)(rs->tx);
+ txw = *(u8 *)rs->tx;
else
- txw = *(u16 *)(rs->tx);
+ txw = *(u16 *)rs->tx;
writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
rs->tx += rs->n_bytes;
@@ -362,229 +293,249 @@ static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
{
- u32 max = rx_max(rs);
- u32 rxw;
+ u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+ u32 rx_left = rs->rx_left - words;
+
+ /* the hardware doesn't allow us to change fifo threshold
+ * level while spi is enabled, so instead make sure to leave
+ * enough words in the rx fifo to get the last interrupt
+ * exactly when all words have been received
+ */
+ if (rx_left) {
+ u32 ftl = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFTLR) + 1;
+
+ if (rx_left < ftl) {
+ rx_left = ftl;
+ words = rs->rx_left - rx_left;
+ }
+ }
+
+ rs->rx_left = rx_left;
+ for (; words; words--) {
+ u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+
+ if (!rs->rx)
+ continue;
- while (max--) {
- rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
if (rs->n_bytes == 1)
- *(u8 *)(rs->rx) = (u8)rxw;
+ *(u8 *)rs->rx = (u8)rxw;
else
- *(u16 *)(rs->rx) = (u16)rxw;
+ *(u16 *)rs->rx = (u16)rxw;
rs->rx += rs->n_bytes;
}
}
-static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
+static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
{
- int remain = 0;
+ struct spi_master *master = dev_id;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
- spi_enable_chip(rs, 1);
+ if (rs->tx_left)
+ rockchip_spi_pio_writer(rs);
- do {
- if (rs->tx) {
- remain = rs->tx_end - rs->tx;
- rockchip_spi_pio_writer(rs);
- }
+ rockchip_spi_pio_reader(rs);
+ if (!rs->rx_left) {
+ spi_enable_chip(rs, false);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
+ spi_finalize_current_transfer(master);
+ }
- if (rs->rx) {
- remain = rs->rx_end - rs->rx;
- rockchip_spi_pio_reader(rs);
- }
+ return IRQ_HANDLED;
+}
- cpu_relax();
- } while (remain);
+static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
+ struct spi_transfer *xfer)
+{
+ rs->tx = xfer->tx_buf;
+ rs->rx = xfer->rx_buf;
+ rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
+ rs->rx_left = xfer->len / rs->n_bytes;
- /* If tx, wait until the FIFO data completely. */
- if (rs->tx)
- wait_for_idle(rs);
+ writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
+ spi_enable_chip(rs, true);
- spi_enable_chip(rs, 0);
+ if (rs->tx_left)
+ rockchip_spi_pio_writer(rs);
- return 0;
+ /* 1 means the transfer is in progress */
+ return 1;
}
static void rockchip_spi_dma_rxcb(void *data)
{
- unsigned long flags;
- struct rockchip_spi *rs = data;
-
- spin_lock_irqsave(&rs->lock, flags);
+ struct spi_master *master = data;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(RXDMA, &rs->state);
- rs->state &= ~RXBUSY;
- if (!(rs->state & TXBUSY)) {
- spi_enable_chip(rs, 0);
- spi_finalize_current_transfer(rs->master);
- }
+ if (state & TXDMA)
+ return;
- spin_unlock_irqrestore(&rs->lock, flags);
+ spi_enable_chip(rs, false);
+ spi_finalize_current_transfer(master);
}
static void rockchip_spi_dma_txcb(void *data)
{
- unsigned long flags;
- struct rockchip_spi *rs = data;
+ struct spi_master *master = data;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+ int state = atomic_fetch_andnot(TXDMA, &rs->state);
+
+ if (state & RXDMA)
+ return;
/* Wait until the FIFO data completely. */
wait_for_idle(rs);
- spin_lock_irqsave(&rs->lock, flags);
-
- rs->state &= ~TXBUSY;
- if (!(rs->state & RXBUSY)) {
- spi_enable_chip(rs, 0);
- spi_finalize_current_transfer(rs->master);
- }
-
- spin_unlock_irqrestore(&rs->lock, flags);
+ spi_enable_chip(rs, false);
+ spi_finalize_current_transfer(master);
}
-static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
+static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
+ struct spi_master *master, struct spi_transfer *xfer)
{
- unsigned long flags;
- struct dma_slave_config rxconf, txconf;
struct dma_async_tx_descriptor *rxdesc, *txdesc;
- memset(&rxconf, 0, sizeof(rxconf));
- memset(&txconf, 0, sizeof(txconf));
-
- spin_lock_irqsave(&rs->lock, flags);
- rs->state &= ~RXBUSY;
- rs->state &= ~TXBUSY;
- spin_unlock_irqrestore(&rs->lock, flags);
+ atomic_set(&rs->state, 0);
rxdesc = NULL;
- if (rs->rx) {
- rxconf.direction = DMA_DEV_TO_MEM;
- rxconf.src_addr = rs->dma_rx.addr;
- rxconf.src_addr_width = rs->n_bytes;
- rxconf.src_maxburst = 1;
- dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
+ if (xfer->rx_buf) {
+ struct dma_slave_config rxconf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = rs->dma_addr_rx,
+ .src_addr_width = rs->n_bytes,
+ .src_maxburst = 1,
+ };
+
+ dmaengine_slave_config(master->dma_rx, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
- rs->dma_rx.ch,
- rs->rx_sg.sgl, rs->rx_sg.nents,
+ master->dma_rx,
+ xfer->rx_sg.sgl, xfer->rx_sg.nents,
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
rxdesc->callback = rockchip_spi_dma_rxcb;
- rxdesc->callback_param = rs;
+ rxdesc->callback_param = master;
}
txdesc = NULL;
- if (rs->tx) {
- txconf.direction = DMA_MEM_TO_DEV;
- txconf.dst_addr = rs->dma_tx.addr;
- txconf.dst_addr_width = rs->n_bytes;
- txconf.dst_maxburst = rs->fifo_len / 2;
- dmaengine_slave_config(rs->dma_tx.ch, &txconf);
+ if (xfer->tx_buf) {
+ struct dma_slave_config txconf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = rs->dma_addr_tx,
+ .dst_addr_width = rs->n_bytes,
+ .dst_maxburst = rs->fifo_len / 2,
+ };
+
+ dmaengine_slave_config(master->dma_tx, &txconf);
txdesc = dmaengine_prep_slave_sg(
- rs->dma_tx.ch,
- rs->tx_sg.sgl, rs->tx_sg.nents,
+ master->dma_tx,
+ xfer->tx_sg.sgl, xfer->tx_sg.nents,
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
- dmaengine_terminate_sync(rs->dma_rx.ch);
+ dmaengine_terminate_sync(master->dma_rx);
return -EINVAL;
}
txdesc->callback = rockchip_spi_dma_txcb;
- txdesc->callback_param = rs;
+ txdesc->callback_param = master;
}
/* rx must be started before tx due to spi instinct */
if (rxdesc) {
- spin_lock_irqsave(&rs->lock, flags);
- rs->state |= RXBUSY;
- spin_unlock_irqrestore(&rs->lock, flags);
+ atomic_or(RXDMA, &rs->state);
dmaengine_submit(rxdesc);
- dma_async_issue_pending(rs->dma_rx.ch);
+ dma_async_issue_pending(master->dma_rx);
}
- spi_enable_chip(rs, 1);
+ spi_enable_chip(rs, true);
if (txdesc) {
- spin_lock_irqsave(&rs->lock, flags);
- rs->state |= TXBUSY;
- spin_unlock_irqrestore(&rs->lock, flags);
+ atomic_or(TXDMA, &rs->state);
dmaengine_submit(txdesc);
- dma_async_issue_pending(rs->dma_tx.ch);
+ dma_async_issue_pending(master->dma_tx);
}
/* 1 means the transfer is in progress */
return 1;
}
-static void rockchip_spi_config(struct rockchip_spi *rs)
+static void rockchip_spi_config(struct rockchip_spi *rs,
+ struct spi_device *spi, struct spi_transfer *xfer,
+ bool use_dma)
{
- u32 div = 0;
+ u32 cr0 = CR0_FRF_SPI << CR0_FRF_OFFSET
+ | CR0_BHT_8BIT << CR0_BHT_OFFSET
+ | CR0_SSD_ONE << CR0_SSD_OFFSET
+ | CR0_EM_BIG << CR0_EM_OFFSET;
+ u32 cr1;
u32 dmacr = 0;
- int rsd = 0;
-
- u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
- | (CR0_SSD_ONE << CR0_SSD_OFFSET)
- | (CR0_EM_BIG << CR0_EM_OFFSET);
- cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
- cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
- cr0 |= (rs->tmode << CR0_XFM_OFFSET);
- cr0 |= (rs->type << CR0_FRF_OFFSET);
+ cr0 |= rs->rsd << CR0_RSD_OFFSET;
+ cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
+ if (spi->mode & SPI_LSB_FIRST)
+ cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
+
+ if (xfer->rx_buf && xfer->tx_buf)
+ cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
+ else if (xfer->rx_buf)
+ cr0 |= CR0_XFM_RO << CR0_XFM_OFFSET;
+ else if (use_dma)
+ cr0 |= CR0_XFM_TO << CR0_XFM_OFFSET;
+
+ switch (xfer->bits_per_word) {
+ case 4:
+ cr0 |= CR0_DFS_4BIT << CR0_DFS_OFFSET;
+ cr1 = xfer->len - 1;
+ break;
+ case 8:
+ cr0 |= CR0_DFS_8BIT << CR0_DFS_OFFSET;
+ cr1 = xfer->len - 1;
+ break;
+ case 16:
+ cr0 |= CR0_DFS_16BIT << CR0_DFS_OFFSET;
+ cr1 = xfer->len / 2 - 1;
+ break;
+ default:
+ /* we only whitelist 4, 8 and 16 bit words in
+ * master->bits_per_word_mask, so this shouldn't
+ * happen
+ */
+ unreachable();
+ }
- if (rs->use_dma) {
- if (rs->tx)
+ if (use_dma) {
+ if (xfer->tx_buf)
dmacr |= TF_DMA_EN;
- if (rs->rx)
+ if (xfer->rx_buf)
dmacr |= RF_DMA_EN;
}
- if (WARN_ON(rs->speed > MAX_SCLK_OUT))
- rs->speed = MAX_SCLK_OUT;
-
- /* the minimum divisor is 2 */
- if (rs->max_freq < 2 * rs->speed) {
- clk_set_rate(rs->spiclk, 2 * rs->speed);
- rs->max_freq = clk_get_rate(rs->spiclk);
- }
-
- /* div doesn't support odd number */
- div = DIV_ROUND_UP(rs->max_freq, rs->speed);
- div = (div + 1) & 0xfffe;
-
- /* Rx sample delay is expressed in parent clock cycles (max 3) */
- rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
- 1000000000 >> 8);
- if (!rsd && rs->rsd_nsecs) {
- pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
- rs->max_freq, rs->rsd_nsecs);
- } else if (rsd > 3) {
- rsd = 3;
- pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
- rs->max_freq, rs->rsd_nsecs,
- rsd * 1000000000U / rs->max_freq);
- }
- cr0 |= rsd << CR0_RSD_OFFSET;
-
writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+ writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
- if (rs->n_bytes == 1)
- writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
- else if (rs->n_bytes == 2)
- writel_relaxed((rs->len / 2) - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
+ /* unfortunately setting the fifo threshold level to generate an
+ * interrupt exactly when the fifo is full doesn't seem to work,
+ * so we need the strict inequality here
+ */
+ if (xfer->len < rs->fifo_len)
+ writel_relaxed(xfer->len - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
else
- writel_relaxed((rs->len * 2) - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
-
- writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
- writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
- spi_set_clk(rs, div);
-
- dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
+ /* the hardware only supports an even clock divisor, so
+ * round divisor = spiclk / speed up to nearest even number
+ * so that the resulting speed is <= the requested speed
+ */
+ writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
+ rs->regs + ROCKCHIP_SPI_BAUDR);
}
static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
@@ -598,6 +549,7 @@ static int rockchip_spi_transfer_one(
struct spi_transfer *xfer)
{
struct rockchip_spi *rs = spi_master_get_devdata(master);
+ bool use_dma;
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
@@ -612,38 +564,16 @@ static int rockchip_spi_transfer_one(
return -EINVAL;
}
- rs->speed = xfer->speed_hz;
- rs->bpw = xfer->bits_per_word;
- rs->n_bytes = rs->bpw >> 3;
+ rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
- rs->tx = xfer->tx_buf;
- rs->tx_end = rs->tx + xfer->len;
- rs->rx = xfer->rx_buf;
- rs->rx_end = rs->rx + xfer->len;
- rs->len = xfer->len;
-
- rs->tx_sg = xfer->tx_sg;
- rs->rx_sg = xfer->rx_sg;
-
- if (rs->tx && rs->rx)
- rs->tmode = CR0_XFM_TR;
- else if (rs->tx)
- rs->tmode = CR0_XFM_TO;
- else if (rs->rx)
- rs->tmode = CR0_XFM_RO;
-
- /* we need prepare dma before spi was enabled */
- if (master->can_dma && master->can_dma(master, spi, xfer))
- rs->use_dma = true;
- else
- rs->use_dma = false;
+ use_dma = master->can_dma ? master->can_dma(master, spi, xfer) : false;
- rockchip_spi_config(rs);
+ rockchip_spi_config(rs, spi, xfer, use_dma);
- if (rs->use_dma)
- return rockchip_spi_prepare_dma(rs);
+ if (use_dma)
+ return rockchip_spi_prepare_dma(rs, master, xfer);
- return rockchip_spi_pio_transfer(rs);
+ return rockchip_spi_prepare_irq(rs, xfer);
}
static bool rockchip_spi_can_dma(struct spi_master *master,
@@ -651,8 +581,13 @@ static bool rockchip_spi_can_dma(struct spi_master *master,
struct spi_transfer *xfer)
{
struct rockchip_spi *rs = spi_master_get_devdata(master);
+ unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
- return (xfer->len > rs->fifo_len);
+ /* if the numbor of spi words to transfer is less than the fifo
+ * length we can just fill the fifo and wait for a single irq,
+ * so don't bother setting up dma
+ */
+ return xfer->len / bytes_per_word >= rs->fifo_len;
}
static int rockchip_spi_probe(struct platform_device *pdev)
@@ -705,16 +640,36 @@ static int rockchip_spi_probe(struct platform_device *pdev)
goto err_disable_apbclk;
}
- spi_enable_chip(rs, 0);
+ spi_enable_chip(rs, false);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_disable_spiclk;
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL,
+ IRQF_ONESHOT, dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_disable_spiclk;
- rs->type = SSI_MOTO_SPI;
- rs->master = master;
rs->dev = &pdev->dev;
- rs->max_freq = clk_get_rate(rs->spiclk);
+ rs->freq = clk_get_rate(rs->spiclk);
if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
- &rsd_nsecs))
- rs->rsd_nsecs = rsd_nsecs;
+ &rsd_nsecs)) {
+ /* rx sample delay is expressed in parent clock cycles (max 3) */
+ u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8),
+ 1000000000 >> 8);
+ if (!rsd) {
+ dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n",
+ rs->freq, rsd_nsecs);
+ } else if (rsd > CR0_RSD_MAX) {
+ rsd = CR0_RSD_MAX;
+ dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n",
+ rs->freq, rsd_nsecs,
+ CR0_RSD_MAX * 1000000000U / rs->freq);
+ }
+ rs->rsd = rsd;
+ }
rs->fifo_len = get_fifo_len(rs);
if (!rs->fifo_len) {
@@ -723,54 +678,49 @@ static int rockchip_spi_probe(struct platform_device *pdev)
goto err_disable_spiclk;
}
- spin_lock_init(&rs->lock);
-
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
master->auto_runtime_pm = true;
master->bus_num = pdev->id;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
master->num_chipselect = ROCKCHIP_SPI_MAX_CS_NUM;
master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+ master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
+ master->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
+ master->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
master->set_cs = rockchip_spi_set_cs;
- master->prepare_message = rockchip_spi_prepare_message;
- master->unprepare_message = rockchip_spi_unprepare_message;
master->transfer_one = rockchip_spi_transfer_one;
master->max_transfer_size = rockchip_spi_max_transfer_size;
master->handle_err = rockchip_spi_handle_err;
master->flags = SPI_MASTER_GPIO_SS;
- rs->dma_tx.ch = dma_request_chan(rs->dev, "tx");
- if (IS_ERR(rs->dma_tx.ch)) {
+ master->dma_tx = dma_request_chan(rs->dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
/* Check tx to see if we need defer probing driver */
- if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
+ if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_disable_pm_runtime;
}
dev_warn(rs->dev, "Failed to request TX DMA channel\n");
- rs->dma_tx.ch = NULL;
+ master->dma_tx = NULL;
}
- rs->dma_rx.ch = dma_request_chan(rs->dev, "rx");
- if (IS_ERR(rs->dma_rx.ch)) {
- if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) {
+ master->dma_rx = dma_request_chan(rs->dev, "rx");
+ if (IS_ERR(master->dma_rx)) {
+ if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto err_free_dma_tx;
}
dev_warn(rs->dev, "Failed to request RX DMA channel\n");
- rs->dma_rx.ch = NULL;
+ master->dma_rx = NULL;
}
- if (rs->dma_tx.ch && rs->dma_rx.ch) {
- rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
- rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
-
+ if (master->dma_tx && master->dma_rx) {
+ rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
+ rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
master->can_dma = rockchip_spi_can_dma;
- master->dma_tx = rs->dma_tx.ch;
- master->dma_rx = rs->dma_rx.ch;
}
ret = devm_spi_register_master(&pdev->dev, master);
@@ -782,11 +732,11 @@ static int rockchip_spi_probe(struct platform_device *pdev)
return 0;
err_free_dma_rx:
- if (rs->dma_rx.ch)
- dma_release_channel(rs->dma_rx.ch);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
err_free_dma_tx:
- if (rs->dma_tx.ch)
- dma_release_channel(rs->dma_tx.ch);
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
err_disable_spiclk:
@@ -813,10 +763,10 @@ static int rockchip_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
- if (rs->dma_tx.ch)
- dma_release_channel(rs->dma_tx.ch);
- if (rs->dma_rx.ch)
- dma_release_channel(rs->dma_rx.ch);
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
spi_master_put(master);
@@ -828,9 +778,8 @@ static int rockchip_spi_suspend(struct device *dev)
{
int ret;
struct spi_master *master = dev_get_drvdata(dev);
- struct rockchip_spi *rs = spi_master_get_devdata(master);
- ret = spi_master_suspend(rs->master);
+ ret = spi_master_suspend(master);
if (ret < 0)
return ret;
@@ -855,7 +804,7 @@ static int rockchip_spi_resume(struct device *dev)
if (ret < 0)
return ret;
- ret = spi_master_resume(rs->master);
+ ret = spi_master_resume(master);
if (ret < 0) {
clk_disable_unprepare(rs->spiclk);
clk_disable_unprepare(rs->apb_pclk);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 55f8e55327b3..a4ef641b5227 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1347,16 +1347,14 @@ MODULE_DEVICE_TABLE(platform, spi_driver_ids);
#ifdef CONFIG_PM_SLEEP
static int rspi_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rspi_data *rspi = platform_get_drvdata(pdev);
+ struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_master_suspend(rspi->master);
}
static int rspi_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct rspi_data *rspi = platform_get_drvdata(pdev);
+ struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_master_resume(rspi->master);
}
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index adf384323934..d14b407cc800 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -977,7 +977,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
return 0;
}
- if (bits <= 8 && len > 15 && !(len & 3)) {
+ if (bits <= 8 && len > 15) {
bits = 32;
swab = true;
} else {
@@ -1038,6 +1038,14 @@ static int sh_msiof_transfer_one(struct spi_master *master,
if (rx_buf)
rx_buf += n * bytes_per_word;
words -= n;
+
+ if (words == 0 && (len % bytes_per_word)) {
+ words = len % bytes_per_word;
+ bits = t->bits_per_word;
+ bytes_per_word = 1;
+ tx_fifo = sh_msiof_spi_write_fifo_8;
+ rx_fifo = sh_msiof_spi_read_fifo_8;
+ }
}
return 0;
@@ -1426,16 +1434,14 @@ MODULE_DEVICE_TABLE(platform, spi_driver_ids);
#ifdef CONFIG_PM_SLEEP
static int sh_msiof_spi_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+ struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
return spi_master_suspend(p->master);
}
static int sh_msiof_spi_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+ struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
return spi_master_resume(p->master);
}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index cc4d31033494..9f83e1b17aa1 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -960,8 +960,7 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
*/
static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
clk_disable(xqspi->refclk);
@@ -980,8 +979,7 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
*/
static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
int ret;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6ca59406b0b7..9a7def7c3237 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SPI init/core code
- *
- * Copyright (C) 2005 David Brownell
- * Copyright (C) 2008 Secret Lab Technologies Ltd.
- */
+// SPI init/core code
+//
+// Copyright (C) 2005 David Brownell
+// Copyright (C) 2008 Secret Lab Technologies Ltd.
#include <linux/kernel.h>
#include <linux/device.h>
@@ -1037,6 +1035,42 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
return __spi_map_msg(ctlr, msg);
}
+static int spi_transfer_wait(struct spi_controller *ctlr,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ struct spi_statistics *statm = &ctlr->statistics;
+ struct spi_statistics *stats = &msg->spi->statistics;
+ unsigned long long ms = 1;
+
+ if (spi_controller_is_slave(ctlr)) {
+ if (wait_for_completion_interruptible(&ctlr->xfer_completion)) {
+ dev_dbg(&msg->spi->dev, "SPI transfer interrupted\n");
+ return -EINTR;
+ }
+ } else {
+ ms = 8LL * 1000LL * xfer->len;
+ do_div(ms, xfer->speed_hz);
+ ms += ms + 200; /* some tolerance */
+
+ if (ms > UINT_MAX)
+ ms = UINT_MAX;
+
+ ms = wait_for_completion_timeout(&ctlr->xfer_completion,
+ msecs_to_jiffies(ms));
+
+ if (ms == 0) {
+ SPI_STATISTICS_INCREMENT_FIELD(statm, timedout);
+ SPI_STATISTICS_INCREMENT_FIELD(stats, timedout);
+ dev_err(&msg->spi->dev,
+ "SPI transfer timed out\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -1050,7 +1084,6 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
- unsigned long long ms = 1;
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
@@ -1080,26 +1113,9 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
}
if (ret > 0) {
- ret = 0;
- ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
- ms += ms + 200; /* some tolerance */
-
- if (ms > UINT_MAX)
- ms = UINT_MAX;
-
- ms = wait_for_completion_timeout(&ctlr->xfer_completion,
- msecs_to_jiffies(ms));
- }
-
- if (ms == 0) {
- SPI_STATISTICS_INCREMENT_FIELD(statm,
- timedout);
- SPI_STATISTICS_INCREMENT_FIELD(stats,
- timedout);
- dev_err(&msg->spi->dev,
- "SPI transfer timed out\n");
- msg->status = -ETIMEDOUT;
+ ret = spi_transfer_wait(ctlr, msg, xfer);
+ if (ret < 0)
+ msg->status = ret;
}
} else {
if (xfer->len)
@@ -1617,6 +1633,9 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
case 4:
spi->mode |= SPI_TX_QUAD;
break;
+ case 8:
+ spi->mode |= SPI_TX_OCTAL;
+ break;
default:
dev_warn(&ctlr->dev,
"spi-tx-bus-width %d not supported\n",
@@ -1635,6 +1654,9 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
case 4:
spi->mode |= SPI_RX_QUAD;
break;
+ case 8:
+ spi->mode |= SPI_RX_OCTAL;
+ break;
default:
dev_warn(&ctlr->dev,
"spi-rx-bus-width %d not supported\n",
@@ -1644,7 +1666,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
}
if (spi_controller_is_slave(ctlr)) {
- if (strcmp(nc->name, "slave")) {
+ if (!of_node_name_eq(nc, "slave")) {
dev_err(&ctlr->dev, "%pOF is not called 'slave'\n",
nc);
return -EINVAL;
@@ -2823,7 +2845,8 @@ int spi_setup(struct spi_device *spi)
/* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
*/
if ((spi->mode & SPI_3WIRE) && (spi->mode &
- (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))
return -EINVAL;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current controller
@@ -2832,7 +2855,8 @@ int spi_setup(struct spi_device *spi)
*/
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
ugly_bits = bad_bits &
- (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL);
if (ugly_bits) {
dev_warn(&spi->dev,
"setup: ignoring unsupported mode bits %x\n",
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7c015536360d..e4f608815c05 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -78,8 +78,6 @@ source "drivers/staging/goldfish/Kconfig"
source "drivers/staging/netlogic/Kconfig"
-source "drivers/staging/mt29f_spinand/Kconfig"
-
source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a79b3fe20cf0..5868631e8f1b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
obj-$(CONFIG_GOLDFISH) += goldfish/
-obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/
obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/
obj-$(CONFIG_UNISYSSPAR) += unisys/
obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
diff --git a/drivers/staging/comedi/comedi.h b/drivers/staging/comedi/comedi.h
index e90b17775284..09a940066c0e 100644
--- a/drivers/staging/comedi/comedi.h
+++ b/drivers/staging/comedi/comedi.h
@@ -1005,35 +1005,38 @@ enum i8254_mode {
* and INSN_DEVICE_CONFIG_GET_ROUTES.
*/
#define NI_NAMES_BASE 0x8000u
+
+#define _TERM_N(base, n, x) ((base) + ((x) & ((n) - 1)))
+
/*
* not necessarily all allowed 64 PFIs are valid--certainly not for all devices
*/
-#define NI_PFI(x) (NI_NAMES_BASE + ((x) & 0x3f))
+#define NI_PFI(x) _TERM_N(NI_NAMES_BASE, 64, x)
/* 8 trigger lines by standard, Some devices cannot talk to all eight. */
-#define TRIGGER_LINE(x) (NI_PFI(-1) + 1 + ((x) & 0x7))
+#define TRIGGER_LINE(x) _TERM_N(NI_PFI(-1) + 1, 8, x)
/* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */
-#define NI_RTSI_BRD(x) (TRIGGER_LINE(-1) + 1 + ((x) & 0x3))
+#define NI_RTSI_BRD(x) _TERM_N(TRIGGER_LINE(-1) + 1, 4, x)
/* *** Counter/timer names : 8 counters max *** */
-#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1)
-#define NI_MAX_COUNTERS 7
-#define NI_CtrSource(x) (NI_COUNTER_NAMES_BASE + ((x) & NI_MAX_COUNTERS))
+#define NI_MAX_COUNTERS 8
+#define NI_COUNTER_NAMES_BASE (NI_RTSI_BRD(-1) + 1)
+#define NI_CtrSource(x) _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x)
/* Gate, Aux, A,B,Z are all treated, at times as gates */
-#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1)
-#define NI_CtrGate(x) (NI_GATES_NAMES_BASE + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrAux(x) (NI_CtrGate(-1) + 1 + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrA(x) (NI_CtrAux(-1) + 1 + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrB(x) (NI_CtrA(-1) + 1 + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrZ(x) (NI_CtrB(-1) + 1 + ((x) & NI_MAX_COUNTERS))
-#define NI_GATES_NAMES_MAX NI_CtrZ(-1)
-#define NI_CtrArmStartTrigger(x) (NI_CtrZ(-1) + 1 + ((x) & NI_MAX_COUNTERS))
+#define NI_GATES_NAMES_BASE (NI_CtrSource(-1) + 1)
+#define NI_CtrGate(x) _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x)
+#define NI_CtrAux(x) _TERM_N(NI_CtrGate(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrA(x) _TERM_N(NI_CtrAux(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrB(x) _TERM_N(NI_CtrA(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrZ(x) _TERM_N(NI_CtrB(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_GATES_NAMES_MAX NI_CtrZ(-1)
+#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1) + 1, NI_MAX_COUNTERS, x)
#define NI_CtrInternalOutput(x) \
- (NI_CtrArmStartTrigger(-1) + 1 + ((x) & NI_MAX_COUNTERS))
+ _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x)
/** external pin(s) labeled conveniently as Ctr<i>Out. */
-#define NI_CtrOut(x) (NI_CtrInternalOutput(-1) + 1 + ((x) & NI_MAX_COUNTERS))
+#define NI_CtrOut(x) _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x)
/** For Buffered sampling of ctr -- x series capability. */
-#define NI_CtrSampleClock(x) (NI_CtrOut(-1) + 1 + ((x) & NI_MAX_COUNTERS))
-#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1)
+#define NI_CtrSampleClock(x) _TERM_N(NI_CtrOut(-1) + 1, NI_MAX_COUNTERS, x)
+#define NI_COUNTER_NAMES_MAX NI_CtrSampleClock(-1)
enum ni_common_signal_names {
/* PXI_Star: this is a non-NI-specific signal */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 2d1e0325d04d..5edf59ac6706 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -2843,7 +2843,8 @@ static int ni_ao_insn_config(struct comedi_device *dev,
return ni_ao_arm(dev, s);
case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS:
/* we don't care about actual channels */
- data[1] = board->ao_speed;
+ /* data[3] : chanlist_len */
+ data[1] = board->ao_speed * data[3];
data[2] = 0;
return 0;
default:
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 7a7ca67822c5..daabaceeea52 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -719,9 +719,6 @@ static int port_vlans_add(struct net_device *netdev,
struct ethsw_port_priv *port_priv = netdev_priv(netdev);
int vid, err = 0;
- if (netif_is_bridge_master(vlan->obj.orig_dev))
- return -EOPNOTSUPP;
-
if (switchdev_trans_ph_prepare(trans))
return 0;
@@ -930,8 +927,6 @@ static int swdev_port_obj_del(struct net_device *netdev,
static const struct switchdev_ops ethsw_port_switchdev_ops = {
.switchdev_port_attr_get = swdev_port_attr_get,
.switchdev_port_attr_set = swdev_port_attr_set,
- .switchdev_port_obj_add = swdev_port_obj_add,
- .switchdev_port_obj_del = swdev_port_obj_del,
};
/* For the moment, only flood setting needs to be updated */
@@ -972,6 +967,11 @@ static int port_bridge_leave(struct net_device *netdev)
return err;
}
+static bool ethsw_port_dev_check(const struct net_device *netdev)
+{
+ return netdev->netdev_ops == &ethsw_port_ops;
+}
+
static int port_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -980,7 +980,7 @@ static int port_netdevice_event(struct notifier_block *unused,
struct net_device *upper_dev;
int err = 0;
- if (netdev->netdev_ops != &ethsw_port_ops)
+ if (!ethsw_port_dev_check(netdev))
return NOTIFY_DONE;
/* Handle just upper dev link/unlink for the moment */
@@ -1083,10 +1083,51 @@ err_addr_alloc:
return NOTIFY_BAD;
}
+static int
+ethsw_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = swdev_port_obj_add(netdev, port_obj_info->obj,
+ port_obj_info->trans);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = swdev_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int port_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ if (!ethsw_port_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return ethsw_switchdev_port_obj_event(event, dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
static struct notifier_block port_switchdev_nb = {
.notifier_call = port_switchdev_event,
};
+static struct notifier_block port_switchdev_blocking_nb = {
+ .notifier_call = port_switchdev_blocking_event,
+};
+
static int ethsw_register_notifier(struct device *dev)
{
int err;
@@ -1103,8 +1144,16 @@ static int ethsw_register_notifier(struct device *dev)
goto err_switchdev_nb;
}
+ err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
+ if (err) {
+ dev_err(dev, "Failed to register switchdev blocking notifier\n");
+ goto err_switchdev_blocking_nb;
+ }
+
return 0;
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&port_switchdev_nb);
err_switchdev_nb:
unregister_netdevice_notifier(&port_nb);
return err;
@@ -1123,7 +1172,7 @@ static int ethsw_open(struct ethsw_core *ethsw)
for (i = 0; i < ethsw->sw_attr.num_ifs; i++) {
port_priv = ethsw->ports[i];
- err = dev_open(port_priv->netdev);
+ err = dev_open(port_priv->netdev, NULL);
if (err) {
netdev_err(port_priv->netdev, "dev_open err %d\n", err);
return err;
@@ -1291,8 +1340,15 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
static void ethsw_unregister_notifier(struct device *dev)
{
+ struct notifier_block *nb;
int err;
+ nb = &port_switchdev_blocking_nb;
+ err = unregister_switchdev_blocking_notifier(nb);
+ if (err)
+ dev_err(dev,
+ "Failed to unregister switchdev blocking notifier (%d)\n", err);
+
err = unregister_switchdev_notifier(&port_switchdev_nb);
if (err)
dev_err(dev,
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index b3620a8f2d9f..19cadd17e542 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -31,10 +31,14 @@ source "drivers/staging/media/mt9t031/Kconfig"
source "drivers/staging/media/omap4iss/Kconfig"
+source "drivers/staging/media/rockchip/vpu/Kconfig"
+
source "drivers/staging/media/sunxi/Kconfig"
source "drivers/staging/media/tegra-vde/Kconfig"
source "drivers/staging/media/zoran/Kconfig"
+source "drivers/staging/media/ipu3/Kconfig"
+
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 42948f805548..edde1960b030 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -8,3 +8,5 @@ obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
obj-$(CONFIG_TEGRA_VDE) += tegra-vde/
obj-$(CONFIG_VIDEO_ZORAN) += zoran/
+obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip/vpu/
+obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
index 874d290f9622..debd1122875d 100644
--- a/drivers/staging/media/bcm2048/radio-bcm2048.c
+++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
@@ -2574,8 +2574,7 @@ static const struct video_device bcm2048_viddev_template = {
/*
* I2C driver interface
*/
-static int bcm2048_i2c_driver_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bcm2048_i2c_driver_probe(struct i2c_client *client)
{
struct bcm2048_device *bdev;
int err;
@@ -2679,7 +2678,7 @@ static struct i2c_driver bcm2048_i2c_driver = {
.driver = {
.name = BCM2048_DRIVER_NAME,
},
- .probe = bcm2048_i2c_driver_probe,
+ .probe_new = bcm2048_i2c_driver_probe,
.remove = bcm2048_i2c_driver_remove,
.id_table = bcm2048_id,
};
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index b2e840f96c50..a01327f6e045 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -162,7 +162,7 @@ int imx_media_create_csi_of_links(struct imx_media_dev *imxmd,
fwnode_property_read_u32(fwnode, "reg", &link.remote_port);
fwnode = fwnode_get_next_parent(fwnode);
if (is_of_node(fwnode) &&
- of_node_cmp(to_of_node(fwnode)->name, "ports") == 0)
+ of_node_name_eq(to_of_node(fwnode), "ports"))
fwnode = fwnode_get_next_parent(fwnode);
link.remote_node = fwnode;
diff --git a/drivers/staging/media/ipu3/Kconfig b/drivers/staging/media/ipu3/Kconfig
new file mode 100644
index 000000000000..75cd889f18f7
--- /dev/null
+++ b/drivers/staging/media/ipu3/Kconfig
@@ -0,0 +1,14 @@
+config VIDEO_IPU3_IMGU
+ tristate "Intel ipu3-imgu driver"
+ depends on PCI && VIDEO_V4L2
+ depends on MEDIA_CONTROLLER && VIDEO_V4L2_SUBDEV_API
+ depends on X86
+ select IOMMU_IOVA
+ select VIDEOBUF2_DMA_SG
+ ---help---
+ This is the Video4Linux2 driver for Intel IPU3 image processing unit,
+ found in Intel Skylake and Kaby Lake SoCs and used for processing
+ images and video.
+
+ Say Y or M here if you have a Skylake/Kaby Lake SoC with a MIPI
+ camera. The module will be called ipu3-imgu.
diff --git a/drivers/staging/media/ipu3/Makefile b/drivers/staging/media/ipu3/Makefile
new file mode 100644
index 000000000000..fb146d178bd4
--- /dev/null
+++ b/drivers/staging/media/ipu3/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the IPU3 ImgU drivers
+#
+
+ipu3-imgu-objs += \
+ ipu3-mmu.o ipu3-dmamap.o \
+ ipu3-tables.o ipu3-css-pool.o \
+ ipu3-css-fw.o ipu3-css-params.o \
+ ipu3-css.o ipu3-v4l2.o ipu3.o
+
+obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3-imgu.o
diff --git a/drivers/staging/media/ipu3/TODO b/drivers/staging/media/ipu3/TODO
new file mode 100644
index 000000000000..905bbb190217
--- /dev/null
+++ b/drivers/staging/media/ipu3/TODO
@@ -0,0 +1,34 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+
+- Request API conversion. Remove of the dual pipeline and associate buffers
+ as well as formats and the binary used to a request. Remove the
+ opportunistic buffer management. (Sakari)
+
+- Using ENABLED and IMMUTABLE link flags for the links where those are
+ relevant. (Sakari)
+
+- Prefix imgu for all public APIs, i.e. change ipu3_v4l2_register() to
+ imgu_v4l2_register(). (Sakari)
+
+- Use V4L2_CTRL_TYPE_MENU for dual-pipe mode control. (Sakari)
+
+- IPU3 driver documentation (Laurent)
+ Add diagram in driver rst to describe output capability.
+ Comments on configuring v4l2 subdevs for CIO2 and ImgU.
+
+- uAPI documentation:
+ Further clarification on some ambiguities such as data type conversion of
+ IEFD CU inputs. (Sakari)
+ Move acronyms to doc-rst file. (Mauro)
+
+- Switch to yavta from v4l2n in driver docs.
+
+- Elaborate the functionality of different selection rectangles in driver
+ documentation. This may require driver changes as well.
+
+- More detailed documentation on calculating BDS, GCD etc. sizes needed.
+
+- Document different operation modes, and which buffer queues are relevant
+ in each mode. To process an image, which queues require a buffer an in
+ which ones is it optional?
diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
new file mode 100644
index 000000000000..ec0b74829351
--- /dev/null
+++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
@@ -0,0 +1,2785 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 - 2018 Intel Corporation */
+
+#ifndef __IPU3_UAPI_H
+#define __IPU3_UAPI_H
+
+#include <linux/types.h>
+
+/* from /drivers/staging/media/ipu3/include/videodev2.h */
+
+/* Vendor specific - used for IPU3 camera sub-system */
+#define V4L2_META_FMT_IPU3_PARAMS v4l2_fourcc('i', 'p', '3', 'p') /* IPU3 processing parameters */
+#define V4L2_META_FMT_IPU3_STAT_3A v4l2_fourcc('i', 'p', '3', 's') /* IPU3 3A statistics */
+
+/* from include/uapi/linux/v4l2-controls.h */
+#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10c0)
+#define V4L2_CID_INTEL_IPU3_MODE (V4L2_CID_INTEL_IPU3_BASE + 1)
+
+/* custom ctrl to set pipe mode */
+enum ipu3_running_mode {
+ IPU3_RUNNING_MODE_VIDEO = 0,
+ IPU3_RUNNING_MODE_STILL = 1,
+};
+
+/******************* ipu3_uapi_stats_3a *******************/
+
+#define IPU3_UAPI_MAX_STRIPES 2
+#define IPU3_UAPI_MAX_BUBBLE_SIZE 10
+
+#define IPU3_UAPI_GRID_START_MASK ((1 << 12) - 1)
+#define IPU3_UAPI_GRID_Y_START_EN (1 << 15)
+
+/* controls generation of meta_data (like FF enable/disable) */
+#define IPU3_UAPI_AWB_RGBS_THR_B_EN (1 << 14)
+#define IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT (1 << 15)
+
+/**
+ * struct ipu3_uapi_grid_config - Grid plane config
+ *
+ * @width: Grid horizontal dimensions, in number of grid blocks(cells).
+ * @height: Grid vertical dimensions, in number of grid cells.
+ * @block_width_log2: Log2 of the width of each cell in pixels.
+ * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * @block_height_log2: Log2 of the height of each cell in pixels.
+ * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * @height_per_slice: The number of blocks in vertical axis per slice.
+ * Default 2.
+ * @x_start: X value of top left corner of Region of Interest(ROI).
+ * @y_start: Y value of top left corner of ROI
+ * @x_end: X value of bottom right corner of ROI
+ * @y_end: Y value of bottom right corner of ROI
+ *
+ * Due to the size of total amount of collected data, most statistics
+ * create a grid-based output, and the data is then divided into "slices".
+ */
+struct ipu3_uapi_grid_config {
+ __u8 width;
+ __u8 height;
+ __u16 block_width_log2:3;
+ __u16 block_height_log2:3;
+ __u16 height_per_slice:8;
+ __u16 x_start;
+ __u16 y_start;
+ __u16 x_end;
+ __u16 y_end;
+} __packed;
+
+/*
+ * The grid based data is divided into "slices" called set, each slice of setX
+ * refers to ipu3_uapi_grid_config width * height_per_slice.
+ */
+#define IPU3_UAPI_AWB_MAX_SETS 60
+/* Based on grid size 80 * 60 and cell size 16 x 16 */
+#define IPU3_UAPI_AWB_SET_SIZE 1280
+#define IPU3_UAPI_AWB_MD_ITEM_SIZE 8
+#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AWB_MD_ITEM_SIZE)
+#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \
+ (IPU3_UAPI_AWB_MAX_SETS * \
+ (IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
+
+
+/**
+ * struct ipu3_uapi_awb_raw_buffer - AWB raw buffer
+ *
+ * @meta_data: buffer to hold auto white balance meta data which is
+ * the average values for each color channel.
+ */
+struct ipu3_uapi_awb_raw_buffer {
+ __u8 meta_data[IPU3_UAPI_AWB_MAX_BUFFER_SIZE]
+ __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_awb_config_s - AWB config
+ *
+ * @rgbs_thr_gr: gr threshold value.
+ * @rgbs_thr_r: Red threshold value.
+ * @rgbs_thr_gb: gb threshold value.
+ * @rgbs_thr_b: Blue threshold value.
+ * @grid: &ipu3_uapi_grid_config, the default grid resolution is 16x16 cells.
+ *
+ * The threshold is a saturation measure range [0, 8191], 8191 is default.
+ * Values over threshold may be optionally rejected for averaging.
+ */
+struct ipu3_uapi_awb_config_s {
+ __u16 rgbs_thr_gr;
+ __u16 rgbs_thr_r;
+ __u16 rgbs_thr_gb;
+ __u16 rgbs_thr_b;
+ struct ipu3_uapi_grid_config grid;
+} __attribute__((aligned(32))) __packed;
+
+/**
+ * struct ipu3_uapi_awb_config - AWB config wrapper
+ *
+ * @config: config for auto white balance as defined by &ipu3_uapi_awb_config_s
+ */
+struct ipu3_uapi_awb_config {
+ struct ipu3_uapi_awb_config_s config __attribute__((aligned(32)));
+} __packed;
+
+#define IPU3_UAPI_AE_COLORS 4 /* R, G, B, Y */
+#define IPU3_UAPI_AE_BINS 256
+#define IPU3_UAPI_AE_WEIGHTS 96
+
+/**
+ + * struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram
+ + *
+ + * @vals: Sum of IPU3_UAPI_AE_COLORS in cell
+ + *
+ + * Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned
+ + * for counting the number of the pixel.
+ + */
+struct ipu3_uapi_ae_raw_buffer {
+ __u32 vals[IPU3_UAPI_AE_BINS * IPU3_UAPI_AE_COLORS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_raw_buffer_aligned - AE raw buffer
+ *
+ * @buff: &ipu3_uapi_ae_raw_buffer to hold full frame meta data.
+ */
+struct ipu3_uapi_ae_raw_buffer_aligned {
+ struct ipu3_uapi_ae_raw_buffer buff __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_grid_config - AE weight grid
+ *
+ * @width: Grid horizontal dimensions. Value: [16, 32], default 16.
+ * @height: Grid vertical dimensions. Value: [16, 24], default 16.
+ * @block_width_log2: Log2 of the width of the grid cell, value: [3, 7].
+ * @block_height_log2: Log2 of the height of the grid cell, value: [3, 7].
+ * default is 3 (cell size 8x8), 4 cell per grid.
+ * @reserved0: reserved
+ * @ae_en: 0: does not write to &ipu3_uapi_ae_raw_buffer_aligned array,
+ * 1: write normally.
+ * @rst_hist_array: write 1 to trigger histogram array reset.
+ * @done_rst_hist_array: flag for histogram array reset done.
+ * @x_start: X value of top left corner of ROI, default 0.
+ * @y_start: Y value of top left corner of ROI, default 0.
+ * @x_end: X value of bottom right corner of ROI
+ * @y_end: Y value of bottom right corner of ROI
+ *
+ * The AE block accumulates 4 global weighted histograms(R, G, B, Y) over
+ * a defined ROI within the frame. The contribution of each pixel into the
+ * histogram, defined by &ipu3_uapi_ae_weight_elem LUT, is indexed by a grid.
+ */
+struct ipu3_uapi_ae_grid_config {
+ __u8 width;
+ __u8 height;
+ __u8 block_width_log2:4;
+ __u8 block_height_log2:4;
+ __u8 reserved0:5;
+ __u8 ae_en:1;
+ __u8 rst_hist_array:1;
+ __u8 done_rst_hist_array:1;
+ __u16 x_start;
+ __u16 y_start;
+ __u16 x_end;
+ __u16 y_end;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_weight_elem - AE weights LUT
+ *
+ * @cell0: weighted histogram grid value.
+ * @cell1: weighted histogram grid value.
+ * @cell2: weighted histogram grid value.
+ * @cell3: weighted histogram grid value.
+ * @cell4: weighted histogram grid value.
+ * @cell5: weighted histogram grid value.
+ * @cell6: weighted histogram grid value.
+ * @cell7: weighted histogram grid value.
+ *
+ * Use weighted grid value to give a different contribution factor to each cell.
+ * Precision u4, range [0, 15].
+ */
+struct ipu3_uapi_ae_weight_elem {
+ __u32 cell0:4;
+ __u32 cell1:4;
+ __u32 cell2:4;
+ __u32 cell3:4;
+ __u32 cell4:4;
+ __u32 cell5:4;
+ __u32 cell6:4;
+ __u32 cell7:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_ccm - AE coefficients for WB and CCM
+ *
+ * @gain_gr: WB gain factor for the gr channels. Default 256.
+ * @gain_r: WB gain factor for the r channel. Default 256.
+ * @gain_b: WB gain factor for the b channel. Default 256.
+ * @gain_gb: WB gain factor for the gb channels. Default 256.
+ * @mat: 4x4 matrix that transforms Bayer quad output from WB to RGB+Y.
+ *
+ * Default:
+ * 128, 0, 0, 0,
+ * 0, 128, 0, 0,
+ * 0, 0, 128, 0,
+ * 0, 0, 0, 128,
+ *
+ * As part of the raw frame pre-process stage, the WB and color conversion need
+ * to be applied to expose the impact of these gain operations.
+ */
+struct ipu3_uapi_ae_ccm {
+ __u16 gain_gr;
+ __u16 gain_r;
+ __u16 gain_b;
+ __u16 gain_gb;
+ __s16 mat[16];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_config - AE config
+ *
+ * @grid_cfg: config for auto exposure statistics grid. See struct
+ * &ipu3_uapi_ae_grid_config
+ * @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid.
+ * Each grid cell has a corresponding value in weights LUT called
+ * grid value, global histogram is updated based on grid value and
+ * pixel value.
+ * @ae_ccm: Color convert matrix pre-processing block.
+ *
+ * Calculate AE grid from image resolution, resample ae weights.
+ */
+struct ipu3_uapi_ae_config {
+ struct ipu3_uapi_ae_grid_config grid_cfg __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_weight_elem weights[
+ IPU3_UAPI_AE_WEIGHTS] __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_ccm ae_ccm __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_af_filter_config - AF 2D filter for contrast measurements
+ *
+ * @y1_coeff_0: filter Y1, structure: 3x11, support both symmetry and
+ * anti-symmetry type. A12 is center, A1-A11 are neighbours.
+ * for analyzing low frequency content, used to calculate sum
+ * of gradients in x direction.
+ * @y1_coeff_0.a1: filter1 coefficients A1, u8, default 0.
+ * @y1_coeff_0.a2: filter1 coefficients A2, u8, default 0.
+ * @y1_coeff_0.a3: filter1 coefficients A3, u8, default 0.
+ * @y1_coeff_0.a4: filter1 coefficients A4, u8, default 0.
+ * @y1_coeff_1: Struct
+ * @y1_coeff_1.a5: filter1 coefficients A5, u8, default 0.
+ * @y1_coeff_1.a6: filter1 coefficients A6, u8, default 0.
+ * @y1_coeff_1.a7: filter1 coefficients A7, u8, default 0.
+ * @y1_coeff_1.a8: filter1 coefficients A8, u8, default 0.
+ * @y1_coeff_2: Struct
+ * @y1_coeff_2.a9: filter1 coefficients A9, u8, default 0.
+ * @y1_coeff_2.a10: filter1 coefficients A10, u8, default 0.
+ * @y1_coeff_2.a11: filter1 coefficients A11, u8, default 0.
+ * @y1_coeff_2.a12: filter1 coefficients A12, u8, default 128.
+ * @y1_sign_vec: Each bit corresponds to one coefficient sign bit,
+ * 0: positive, 1: negative, default 0.
+ * @y2_coeff_0: Y2, same structure as Y1. For analyzing high frequency content.
+ * @y2_coeff_0.a1: filter2 coefficients A1, u8, default 0.
+ * @y2_coeff_0.a2: filter2 coefficients A2, u8, default 0.
+ * @y2_coeff_0.a3: filter2 coefficients A3, u8, default 0.
+ * @y2_coeff_0.a4: filter2 coefficients A4, u8, default 0.
+ * @y2_coeff_1: Struct
+ * @y2_coeff_1.a5: filter2 coefficients A5, u8, default 0.
+ * @y2_coeff_1.a6: filter2 coefficients A6, u8, default 0.
+ * @y2_coeff_1.a7: filter2 coefficients A7, u8, default 0.
+ * @y2_coeff_1.a8: filter2 coefficients A8, u8, default 0.
+ * @y2_coeff_2: Struct
+ * @y2_coeff_2.a9: filter1 coefficients A9, u8, default 0.
+ * @y2_coeff_2.a10: filter1 coefficients A10, u8, default 0.
+ * @y2_coeff_2.a11: filter1 coefficients A11, u8, default 0.
+ * @y2_coeff_2.a12: filter1 coefficients A12, u8, default 128.
+ * @y2_sign_vec: Each bit corresponds to one coefficient sign bit,
+ * 0: positive, 1: negative, default 0.
+ * @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
+ * used for building histogram. Range [0, 32], default 8.
+ * Rule:
+ * y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
+ * A single Y is calculated based on sum of Gr/R/B/Gb based on
+ * their contribution ratio.
+ * @y_calc.y_gen_rate_gr: Contribution ratio Gr for Y
+ * @y_calc.y_gen_rate_r: Contribution ratio R for Y
+ * @y_calc.y_gen_rate_b: Contribution ratio B for Y
+ * @y_calc.y_gen_rate_gb: Contribution ratio Gb for Y
+ * @nf: The shift right value that should be applied during the Y1/Y2 filter to
+ * make sure the total memory needed is 2 bytes per grid cell.
+ * @nf.reserved0: reserved
+ * @nf.y1_nf: Normalization factor for the convolution coeffs of y1,
+ * should be log2 of the sum of the abs values of the filter
+ * coeffs, default 7 (2^7 = 128).
+ * @nf.reserved1: reserved
+ * @nf.y2_nf: Normalization factor for y2, should be log2 of the sum of the
+ * abs values of the filter coeffs.
+ * @nf.reserved2: reserved
+ */
+struct ipu3_uapi_af_filter_config {
+ struct {
+ __u8 a1;
+ __u8 a2;
+ __u8 a3;
+ __u8 a4;
+ } y1_coeff_0;
+ struct {
+ __u8 a5;
+ __u8 a6;
+ __u8 a7;
+ __u8 a8;
+ } y1_coeff_1;
+ struct {
+ __u8 a9;
+ __u8 a10;
+ __u8 a11;
+ __u8 a12;
+ } y1_coeff_2;
+
+ __u32 y1_sign_vec;
+
+ struct {
+ __u8 a1;
+ __u8 a2;
+ __u8 a3;
+ __u8 a4;
+ } y2_coeff_0;
+ struct {
+ __u8 a5;
+ __u8 a6;
+ __u8 a7;
+ __u8 a8;
+ } y2_coeff_1;
+ struct {
+ __u8 a9;
+ __u8 a10;
+ __u8 a11;
+ __u8 a12;
+ } y2_coeff_2;
+
+ __u32 y2_sign_vec;
+
+ struct {
+ __u8 y_gen_rate_gr;
+ __u8 y_gen_rate_r;
+ __u8 y_gen_rate_b;
+ __u8 y_gen_rate_gb;
+ } y_calc;
+
+ struct {
+ __u32 reserved0:8;
+ __u32 y1_nf:4;
+ __u32 reserved1:4;
+ __u32 y2_nf:4;
+ __u32 reserved2:12;
+ } nf;
+} __packed;
+
+#define IPU3_UAPI_AF_MAX_SETS 24
+#define IPU3_UAPI_AF_MD_ITEM_SIZE 4
+#define IPU3_UAPI_AF_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AF_MD_ITEM_SIZE)
+#define IPU3_UAPI_AF_Y_TABLE_SET_SIZE 128
+#define IPU3_UAPI_AF_Y_TABLE_MAX_SIZE \
+ (IPU3_UAPI_AF_MAX_SETS * \
+ (IPU3_UAPI_AF_Y_TABLE_SET_SIZE + IPU3_UAPI_AF_SPARE_FOR_BUBBLES) * \
+ IPU3_UAPI_MAX_STRIPES)
+
+/**
+ * struct ipu3_uapi_af_raw_buffer - AF meta data
+ *
+ * @y_table: Each color component will be convolved separately with filter1
+ * and filter2 and the result will be summed out and averaged for
+ * each cell.
+ */
+struct ipu3_uapi_af_raw_buffer {
+ __u8 y_table[IPU3_UAPI_AF_Y_TABLE_MAX_SIZE] __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_af_config_s - AF config
+ *
+ * @filter_config: AF uses Y1 and Y2 filters as configured in
+ * &ipu3_uapi_af_filter_config
+ * @padding: paddings
+ * @grid_cfg: See &ipu3_uapi_grid_config, default resolution 16x16. Use large
+ * grid size for large image and vice versa.
+ */
+struct ipu3_uapi_af_config_s {
+ struct ipu3_uapi_af_filter_config filter_config __attribute__((aligned(32)));
+ __u8 padding[4];
+ struct ipu3_uapi_grid_config grid_cfg __attribute__((aligned(32)));
+} __packed;
+
+#define IPU3_UAPI_AWB_FR_MAX_SETS 24
+#define IPU3_UAPI_AWB_FR_MD_ITEM_SIZE 8
+#define IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE 256
+#define IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AWB_FR_MD_ITEM_SIZE)
+#define IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE \
+ (IPU3_UAPI_AWB_FR_MAX_SETS * \
+ (IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE + \
+ IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES)
+
+/**
+ * struct ipu3_uapi_awb_fr_meta_data - AWB filter response meta data
+ *
+ * @meta_data: Statistics output on the grid after convolving with 1D filter.
+ */
+struct ipu3_uapi_awb_fr_raw_buffer {
+ __u8 meta_data[IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE]
+ __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_awb_fr_config_s - AWB filter response config
+ *
+ * @grid_cfg: grid config, default 16x16.
+ * @bayer_coeff: 1D Filter 1x11 center symmetry/anti-symmetry.
+ * coeffcients defaults { 0, 0, 0, 0, 0, 128 }.
+ * Applied on whole image for each Bayer channel separately
+ * by a weighted sum of its 11x1 neighbors.
+ * @reserved1: reserved
+ * @bayer_sign: sign of filter coeffcients, default 0.
+ * @bayer_nf: normalization factor for the convolution coeffs, to make sure
+ * total memory needed is within pre-determined range.
+ * NF should be the log2 of the sum of the abs values of the
+ * filter coeffs, range [7, 14], default 7.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_awb_fr_config_s {
+ struct ipu3_uapi_grid_config grid_cfg;
+ __u8 bayer_coeff[6];
+ __u16 reserved1;
+ __u32 bayer_sign;
+ __u8 bayer_nf;
+ __u8 reserved2[3];
+} __attribute__((aligned(32))) __packed;
+
+/**
+ * struct ipu3_uapi_4a_config - 4A config
+ *
+ * @awb_config: &ipu3_uapi_awb_config_s, default resolution 16x16
+ * @ae_grd_config: auto exposure statistics &ipu3_uapi_ae_grid_config
+ * @padding: paddings
+ * @af_config: auto focus config &ipu3_uapi_af_config_s
+ * @awb_fr_config: &ipu3_uapi_awb_fr_config_s, default resolution 16x16
+ */
+struct ipu3_uapi_4a_config {
+ struct ipu3_uapi_awb_config_s awb_config __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_grid_config ae_grd_config;
+ __u8 padding[20];
+ struct ipu3_uapi_af_config_s af_config;
+ struct ipu3_uapi_awb_fr_config_s awb_fr_config;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bubble_info - Bubble info for host side debugging
+ *
+ * @num_of_stripes: A single frame is divided into several parts called stripes
+ * due to limitation on line buffer memory.
+ * The separation between the stripes is vertical. Each such
+ * stripe is processed as a single frame by the ISP pipe.
+ * @padding: padding bytes.
+ * @num_sets: number of sets.
+ * @padding1: padding bytes.
+ * @size_of_set: set size.
+ * @padding2: padding bytes.
+ * @bubble_size: is the amount of padding in the bubble expressed in "sets".
+ * @padding3: padding bytes.
+ */
+struct ipu3_uapi_bubble_info {
+ __u32 num_of_stripes __attribute__((aligned(32)));
+ __u8 padding[28];
+ __u32 num_sets;
+ __u8 padding1[28];
+ __u32 size_of_set;
+ __u8 padding2[28];
+ __u32 bubble_size;
+ __u8 padding3[28];
+} __packed;
+
+/*
+ * struct ipu3_uapi_stats_3a_bubble_info_per_stripe
+ */
+struct ipu3_uapi_stats_3a_bubble_info_per_stripe {
+ struct ipu3_uapi_bubble_info awb[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_bubble_info af[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_bubble_info awb_fr[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ff_status - Enable bits for each 3A fixed function
+ *
+ * @awb_en: auto white balance enable
+ * @padding: padding config
+ * @ae_en: auto exposure enable
+ * @padding1: padding config
+ * @af_en: auto focus enable
+ * @padding2: padding config
+ * @awb_fr_en: awb filter response enable bit
+ * @padding3: padding config
+ */
+struct ipu3_uapi_ff_status {
+ __u32 awb_en __attribute__((aligned(32)));
+ __u8 padding[28];
+ __u32 ae_en;
+ __u8 padding1[28];
+ __u32 af_en;
+ __u8 padding2[28];
+ __u32 awb_fr_en;
+ __u8 padding3[28];
+} __packed;
+
+/**
+ * struct ipu3_uapi_stats_3a - 3A statistics
+ *
+ * @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer
+ * @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned
+ * @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data
+ * @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer
+ * @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config.
+ * @ae_join_buffers: 1 to use ae_raw_buffer.
+ * @padding: padding config
+ * @stats_3a_bubble_per_stripe: a &ipu3_uapi_stats_3a_bubble_info_per_stripe
+ * @stats_3a_status: 3a statistics status set in &ipu3_uapi_ff_status
+ */
+struct ipu3_uapi_stats_3a {
+ struct ipu3_uapi_awb_raw_buffer awb_raw_buffer;
+ struct ipu3_uapi_ae_raw_buffer_aligned
+ ae_raw_buffer[IPU3_UAPI_MAX_STRIPES];
+ struct ipu3_uapi_af_raw_buffer af_raw_buffer;
+ struct ipu3_uapi_awb_fr_raw_buffer awb_fr_raw_buffer;
+ struct ipu3_uapi_4a_config stats_4a_config;
+ __u32 ae_join_buffers;
+ __u8 padding[28];
+ struct ipu3_uapi_stats_3a_bubble_info_per_stripe
+ stats_3a_bubble_per_stripe;
+ struct ipu3_uapi_ff_status stats_3a_status;
+} __packed;
+
+/******************* ipu3_uapi_acc_param *******************/
+
+#define IPU3_UAPI_ISP_VEC_ELEMS 64
+#define IPU3_UAPI_ISP_TNR3_VMEM_LEN 9
+
+#define IPU3_UAPI_BNR_LUT_SIZE 32
+
+/* number of elements in gamma correction LUT */
+#define IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES 256
+
+/* largest grid is 73x56, for grid_height_per_slice of 2, 73x2 = 146 */
+#define IPU3_UAPI_SHD_MAX_CELLS_PER_SET 146
+#define IPU3_UAPI_SHD_MAX_CFG_SETS 28
+/* Normalization shift aka nf */
+#define IPU3_UAPI_SHD_BLGR_NF_SHIFT 13
+#define IPU3_UAPI_SHD_BLGR_NF_MASK 7
+
+#define IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS 16
+#define IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS 14
+#define IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS 258
+#define IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS 24
+
+#define IPU3_UAPI_ANR_LUT_SIZE 26
+#define IPU3_UAPI_ANR_PYRAMID_SIZE 22
+
+#define IPU3_UAPI_LIN_LUT_SIZE 64
+
+/* Bayer Noise Reduction related structs */
+
+/**
+ * struct ipu3_uapi_bnr_static_config_wb_gains_config - White balance gains
+ *
+ * @gr: white balance gain for Gr channel.
+ * @r: white balance gain for R channel.
+ * @b: white balance gain for B channel.
+ * @gb: white balance gain for Gb channel.
+ *
+ * Precision u3.13, range [0, 8). White balance correction is done by applying
+ * a multiplicative gain to each color channels prior to BNR.
+ */
+struct ipu3_uapi_bnr_static_config_wb_gains_config {
+ __u16 gr;
+ __u16 r;
+ __u16 b;
+ __u16 gb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_wb_gains_thr_config - Threshold config
+ *
+ * @gr: white balance threshold gain for Gr channel.
+ * @r: white balance threshold gain for R channel.
+ * @b: white balance threshold gain for B channel.
+ * @gb: white balance threshold gain for Gb channel.
+ *
+ * Defines the threshold that specifies how different a defect pixel can be from
+ * its neighbors.(used by dynamic defect pixel correction sub block)
+ * Precision u4.4 range [0, 8].
+ */
+struct ipu3_uapi_bnr_static_config_wb_gains_thr_config {
+ __u8 gr;
+ __u8 r;
+ __u8 b;
+ __u8 gb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_thr_coeffs_config - Noise model
+ * coefficients that controls noise threshold
+ *
+ * @cf: Free coefficient for threshold calculation, range [0, 8191], default 0.
+ * @reserved0: reserved
+ * @cg: Gain coefficient for threshold calculation, [0, 31], default 8.
+ * @ci: Intensity coefficient for threshold calculation. range [0, 0x1f]
+ * default 6.
+ * format: u3.2 (3 most significant bits represent whole number,
+ * 2 least significant bits represent the fractional part
+ * with each count representing 0.25)
+ * e.g. 6 in binary format is 00110, that translates to 1.5
+ * @reserved1: reserved
+ * @r_nf: Normalization shift value for r^2 calculation, range [12, 20]
+ * where r is a radius of pixel [row, col] from centor of sensor.
+ * default 14.
+ *
+ * Threshold used to distinguish between noise and details.
+ */
+struct ipu3_uapi_bnr_static_config_thr_coeffs_config {
+ __u32 cf:13;
+ __u32 reserved0:3;
+ __u32 cg:5;
+ __u32 ci:5;
+ __u32 reserved1:1;
+ __u32 r_nf:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config - Shading config
+ *
+ * @gr: Coefficient defines lens shading gain approximation for gr channel
+ * @r: Coefficient defines lens shading gain approximation for r channel
+ * @b: Coefficient defines lens shading gain approximation for b channel
+ * @gb: Coefficient defines lens shading gain approximation for gb channel
+ *
+ * Parameters for noise model (NM) adaptation of BNR due to shading correction.
+ * All above have precision of u3.3, default to 0.
+ */
+struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config {
+ __u8 gr;
+ __u8 r;
+ __u8 b;
+ __u8 gb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_opt_center_config - Optical center config
+ *
+ * @x_reset: Reset value of X (col start - X center). Precision s12.0.
+ * @reserved0: reserved
+ * @y_reset: Reset value of Y (row start - Y center). Precision s12.0.
+ * @reserved2: reserved
+ *
+ * Distance from corner to optical center for NM adaptation due to shading
+ * correction (should be calculated based on shading tables)
+ */
+struct ipu3_uapi_bnr_static_config_opt_center_config {
+ __s32 x_reset:13;
+ __u32 reserved0:3;
+ __s32 y_reset:13;
+ __u32 reserved2:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_lut_config - BNR square root lookup table
+ *
+ * @values: pre-calculated values of square root function.
+ *
+ * LUT implementation of square root operation.
+ */
+struct ipu3_uapi_bnr_static_config_lut_config {
+ __u8 values[IPU3_UAPI_BNR_LUT_SIZE];
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_bp_ctrl_config - Detect bad pixels (bp)
+ *
+ * @bp_thr_gain: Defines the threshold that specifies how different a
+ * defect pixel can be from its neighbors. Threshold is
+ * dependent on de-noise threshold calculated by algorithm.
+ * Range [4, 31], default 4.
+ * @reserved0: reserved
+ * @defect_mode: Mode of addressed defect pixels,
+ * 0 - single defect pixel is expected,
+ * 1 - 2 adjacent defect pixels are expected, default 1.
+ * @bp_gain: Defines how 2nd derivation that passes through a defect pixel
+ * is different from 2nd derivations that pass through
+ * neighbor pixels. u4.2, range [0, 256], default 8.
+ * @reserved1: reserved
+ * @w0_coeff: Blending coefficient of defect pixel correction.
+ * Precision u4, range [0, 8], default 8.
+ * @reserved2: reserved
+ * @w1_coeff: Enable influence of incorrect defect pixel correction to be
+ * avoided. Precision u4, range [1, 8], default 8.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_bnr_static_config_bp_ctrl_config {
+ __u32 bp_thr_gain:5;
+ __u32 reserved0:2;
+ __u32 defect_mode:1;
+ __u32 bp_gain:6;
+ __u32 reserved1:18;
+ __u32 w0_coeff:4;
+ __u32 reserved2:4;
+ __u32 w1_coeff:4;
+ __u32 reserved3:20;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config - Denoising config
+ *
+ * @alpha: Weight of central element of smoothing filter.
+ * @beta: Weight of peripheral elements of smoothing filter, default 4.
+ * @gamma: Weight of diagonal elements of smoothing filter, default 4.
+ *
+ * beta and gamma parameter define the strength of the noise removal filter.
+ * All above has precision u0.4, range [0, 0xf]
+ * format: u0.4 (no / zero bits represent whole number,
+ * 4 bits represent the fractional part
+ * with each count representing 0.0625)
+ * e.g. 0xf translates to 0.0625x15 = 0.9375
+ *
+ * @reserved0: reserved
+ * @max_inf: Maximum increase of peripheral or diagonal element influence
+ * relative to the pre-defined value range: [0x5, 0xa]
+ * @reserved1: reserved
+ * @gd_enable: Green disparity enable control, 0 - disable, 1 - enable.
+ * @bpc_enable: Bad pixel correction enable control, 0 - disable, 1 - enable.
+ * @bnr_enable: Bayer noise removal enable control, 0 - disable, 1 - enable.
+ * @ff_enable: Fixed function enable, 0 - disable, 1 - enable.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config {
+ __u32 alpha:4;
+ __u32 beta:4;
+ __u32 gamma:4;
+ __u32 reserved0:4;
+ __u32 max_inf:4;
+ __u32 reserved1:7;
+ __u32 gd_enable:1;
+ __u32 bpc_enable:1;
+ __u32 bnr_enable:1;
+ __u32 ff_enable:1;
+ __u32 reserved2:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_opt_center_sqr_config - BNR optical square
+ *
+ * @x_sqr_reset: Reset value of X^2.
+ * @y_sqr_reset: Reset value of Y^2.
+ *
+ * Please note:
+ *
+ * #. X and Y ref to
+ * &ipu3_uapi_bnr_static_config_opt_center_config
+ * #. Both structs are used in threshold formula to calculate r^2, where r
+ * is a radius of pixel [row, col] from centor of sensor.
+ */
+struct ipu3_uapi_bnr_static_config_opt_center_sqr_config {
+ __u32 x_sqr_reset;
+ __u32 y_sqr_reset;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config - BNR static config
+ *
+ * @wb_gains: white balance gains &ipu3_uapi_bnr_static_config_wb_gains_config
+ * @wb_gains_thr: white balance gains threshold as defined by
+ * &ipu3_uapi_bnr_static_config_wb_gains_thr_config
+ * @thr_coeffs: coefficients of threshold
+ * &ipu3_uapi_bnr_static_config_thr_coeffs_config
+ * @thr_ctrl_shd: control of shading threshold
+ * &ipu3_uapi_bnr_static_config_thr_ctrl_shd_config
+ * @opt_center: optical center &ipu3_uapi_bnr_static_config_opt_center_config
+ *
+ * Above parameters and opt_center_sqr are used for white balance and shading.
+ *
+ * @lut: lookup table &ipu3_uapi_bnr_static_config_lut_config
+ * @bp_ctrl: detect and remove bad pixels as defined in struct
+ * &ipu3_uapi_bnr_static_config_bp_ctrl_config
+ * @dn_detect_ctrl: detect and remove noise.
+ * &ipu3_uapi_bnr_static_config_dn_detect_ctrl_config
+ * @column_size: The number of pixels in column.
+ * @opt_center_sqr: Reset value of r^2 to optical center, see
+ * &ipu3_uapi_bnr_static_config_opt_center_sqr_config.
+ */
+struct ipu3_uapi_bnr_static_config {
+ struct ipu3_uapi_bnr_static_config_wb_gains_config wb_gains;
+ struct ipu3_uapi_bnr_static_config_wb_gains_thr_config wb_gains_thr;
+ struct ipu3_uapi_bnr_static_config_thr_coeffs_config thr_coeffs;
+ struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config thr_ctrl_shd;
+ struct ipu3_uapi_bnr_static_config_opt_center_config opt_center;
+ struct ipu3_uapi_bnr_static_config_lut_config lut;
+ struct ipu3_uapi_bnr_static_config_bp_ctrl_config bp_ctrl;
+ struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config dn_detect_ctrl;
+ __u32 column_size;
+ struct ipu3_uapi_bnr_static_config_opt_center_sqr_config opt_center_sqr;
+} __packed;
+
+/**
+ * struct ipu3_uapi_bnr_static_config_green_disparity - Correct green disparity
+ *
+ * @gd_red: Shading gain coeff for gr disparity level in bright red region.
+ * Precision u0.6, default 4(0.0625).
+ * @reserved0: reserved
+ * @gd_green: Shading gain coeff for gr disparity level in bright green
+ * region. Precision u0.6, default 4(0.0625).
+ * @reserved1: reserved
+ * @gd_blue: Shading gain coeff for gr disparity level in bright blue region.
+ * Precision u0.6, default 4(0.0625).
+ * @reserved2: reserved
+ * @gd_black: Maximal green disparity level in dark region (stronger disparity
+ * assumed to be image detail). Precision u14, default 80.
+ * @reserved3: reserved
+ * @gd_shading: Change maximal green disparity level according to square
+ * distance from image center.
+ * @reserved4: reserved
+ * @gd_support: Lower bound for the number of second green color pixels in
+ * current pixel neighborhood with less than threshold difference
+ * from it.
+ *
+ * The shading gain coeff of red, green, blue and black are used to calculate
+ * threshold given a pixel's color value and its coordinates in the image.
+ *
+ * @reserved5: reserved
+ * @gd_clip: Turn green disparity clip on/off, [0, 1], default 1.
+ * @gd_central_weight: Central pixel weight in 9 pixels weighted sum.
+ */
+struct ipu3_uapi_bnr_static_config_green_disparity {
+ __u32 gd_red:6;
+ __u32 reserved0:2;
+ __u32 gd_green:6;
+ __u32 reserved1:2;
+ __u32 gd_blue:6;
+ __u32 reserved2:10;
+ __u32 gd_black:14;
+ __u32 reserved3:2;
+ __u32 gd_shading:7;
+ __u32 reserved4:1;
+ __u32 gd_support:2;
+ __u32 reserved5:1;
+ __u32 gd_clip:1;
+ __u32 gd_central_weight:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_dm_config - De-mosaic parameters
+ *
+ * @dm_en: de-mosaic enable.
+ * @ch_ar_en: Checker artifacts removal enable flag. Default 0.
+ * @fcc_en: False color correction (FCC) enable flag. Default 0.
+ * @reserved0: reserved
+ * @frame_width: do not care
+ * @gamma_sc: Sharpening coefficient (coefficient of 2-d derivation of
+ * complementary color in Hamilton-Adams interpolation).
+ * u5, range [0, 31], default 8.
+ * @reserved1: reserved
+ * @lc_ctrl: Parameter that controls weights of Chroma Homogeneity metric
+ * in calculation of final homogeneity metric.
+ * u5, range [0, 31], default 7.
+ * @reserved2: reserved
+ * @cr_param1: First parameter that defines Checker artifact removal
+ * feature gain. Precision u5, range [0, 31], default 8.
+ * @reserved3: reserved
+ * @cr_param2: Second parameter that defines Checker artifact removal
+ * feature gain. Precision u5, range [0, 31], default 8.
+ * @reserved4: reserved
+ * @coring_param: Defines power of false color correction operation.
+ * low for preserving edge colors, high for preserving gray
+ * edge artifacts.
+ * Precision u1.4, range [0, 1.9375], default 4 (0.25).
+ * @reserved5: reserved
+ *
+ * The demosaic fixed function block is responsible to covert Bayer(mosaiced)
+ * images into color images based on demosaicing algorithm.
+ */
+struct ipu3_uapi_dm_config {
+ __u32 dm_en:1;
+ __u32 ch_ar_en:1;
+ __u32 fcc_en:1;
+ __u32 reserved0:13;
+ __u32 frame_width:16;
+
+ __u32 gamma_sc:5;
+ __u32 reserved1:3;
+ __u32 lc_ctrl:5;
+ __u32 reserved2:3;
+ __u32 cr_param1:5;
+ __u32 reserved3:3;
+ __u32 cr_param2:5;
+ __u32 reserved4:3;
+
+ __u32 coring_param:5;
+ __u32 reserved5:27;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ccm_mat_config - Color correction matrix
+ *
+ * @coeff_m11: CCM 3x3 coefficient, range [-65536, 65535]
+ * @coeff_m12: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m13: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_r: Bias 3x1 coefficient, range [-8191, 8181]
+ * @coeff_m21: CCM 3x3 coefficient, range [-32767, 32767]
+ * @coeff_m22: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m23: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_g: Bias 3x1 coefficient, range [-8191, 8181]
+ * @coeff_m31: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_m32: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m33: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_b: Bias 3x1 coefficient, range [-8191, 8181]
+ *
+ * Transform sensor specific color space to standard sRGB by applying 3x3 matrix
+ * and adding a bias vector O. The transformation is basically a rotation and
+ * translation in the 3-dimensional color spaces. Here are the defaults:
+ *
+ * 9775, -2671, 1087, 0
+ * -1071, 8303, 815, 0
+ * -23, -7887, 16103, 0
+ */
+struct ipu3_uapi_ccm_mat_config {
+ __s16 coeff_m11;
+ __s16 coeff_m12;
+ __s16 coeff_m13;
+ __s16 coeff_o_r;
+ __s16 coeff_m21;
+ __s16 coeff_m22;
+ __s16 coeff_m23;
+ __s16 coeff_o_g;
+ __s16 coeff_m31;
+ __s16 coeff_m32;
+ __s16 coeff_m33;
+ __s16 coeff_o_b;
+} __packed;
+
+/**
+ * struct ipu3_uapi_gamma_corr_ctrl - Gamma correction
+ *
+ * @enable: gamma correction enable.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_gamma_corr_ctrl {
+ __u32 enable:1;
+ __u32 reserved:31;
+} __packed;
+
+/**
+ * struct ipu3_uapi_gamma_corr_lut - Per-pixel tone mapping implemented as LUT.
+ *
+ * @lut: 256 tabulated values of the gamma function. LUT[1].. LUT[256]
+ * format u13.0, range [0, 8191].
+ *
+ * The tone mapping operation is done by a Piece wise linear graph
+ * that is implemented as a lookup table(LUT). The pixel component input
+ * intensity is the X-axis of the graph which is the table entry.
+ */
+struct ipu3_uapi_gamma_corr_lut {
+ __u16 lut[IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES];
+} __packed;
+
+/**
+ * struct ipu3_uapi_gamma_config - Gamma config
+ *
+ * @gc_ctrl: control of gamma correction &ipu3_uapi_gamma_corr_ctrl
+ * @gc_lut: lookup table of gamma correction &ipu3_uapi_gamma_corr_lut
+ */
+struct ipu3_uapi_gamma_config {
+ struct ipu3_uapi_gamma_corr_ctrl gc_ctrl __attribute__((aligned(32)));
+ struct ipu3_uapi_gamma_corr_lut gc_lut __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_csc_mat_config - Color space conversion matrix config
+ *
+ * @coeff_c11: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_c12: Conversion matrix value, format s0.14, range [-8192, 8191].
+ * @coeff_c13: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_b1: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
+ * @coeff_c21: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_c22: Conversion matrix value, format s0.14, range [-8192, 8191].
+ * @coeff_c23: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_b2: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
+ * @coeff_c31: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_c32: Conversion matrix value, format s0.14, range [-8192, 8191].
+ * @coeff_c33: Conversion matrix value, format s0.14, range [-16384, 16383].
+ * @coeff_b3: Bias 3x1 coefficient, s13.0 range [-8192, 8191].
+ *
+ * To transform each pixel from RGB to YUV (Y - brightness/luminance,
+ * UV -chroma) by applying the pixel's values by a 3x3 matrix and adding an
+ * optional bias 3x1 vector. Here are the default values for the matrix:
+ *
+ * 4898, 9617, 1867, 0,
+ * -2410, -4732, 7143, 0,
+ * 10076, -8437, -1638, 0,
+ *
+ * (i.e. for real number 0.299, 0.299 * 2^14 becomes 4898.)
+ */
+struct ipu3_uapi_csc_mat_config {
+ __s16 coeff_c11;
+ __s16 coeff_c12;
+ __s16 coeff_c13;
+ __s16 coeff_b1;
+ __s16 coeff_c21;
+ __s16 coeff_c22;
+ __s16 coeff_c23;
+ __s16 coeff_b2;
+ __s16 coeff_c31;
+ __s16 coeff_c32;
+ __s16 coeff_c33;
+ __s16 coeff_b3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_cds_params - Chroma down-scaling
+ *
+ * @ds_c00: range [0, 3]
+ * @ds_c01: range [0, 3]
+ * @ds_c02: range [0, 3]
+ * @ds_c03: range [0, 3]
+ * @ds_c10: range [0, 3]
+ * @ds_c11: range [0, 3]
+ * @ds_c12: range [0, 3]
+ * @ds_c13: range [0, 3]
+ *
+ * In case user does not provide, above 4x2 filter will use following defaults:
+ * 1, 3, 3, 1,
+ * 1, 3, 3, 1,
+ *
+ * @ds_nf: Normalization factor for Chroma output downscaling filter,
+ * range 0,4, default 2.
+ * @reserved0: reserved
+ * @csc_en: Color space conversion enable
+ * @uv_bin_output: 0: output YUV 4.2.0, 1: output YUV 4.2.2(default).
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_cds_params {
+ __u32 ds_c00:2;
+ __u32 ds_c01:2;
+ __u32 ds_c02:2;
+ __u32 ds_c03:2;
+ __u32 ds_c10:2;
+ __u32 ds_c11:2;
+ __u32 ds_c12:2;
+ __u32 ds_c13:2;
+ __u32 ds_nf:5;
+ __u32 reserved0:3;
+ __u32 csc_en:1;
+ __u32 uv_bin_output:1;
+ __u32 reserved1:6;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_grid_config - Bayer shading(darkening) correction
+ *
+ * @width: Grid horizontal dimensions, u8, [8, 128], default 73
+ * @height: Grid vertical dimensions, u8, [8, 128], default 56
+ * @block_width_log2: Log2 of the width of the grid cell in pixel count
+ * u4, [0, 15], default value 5.
+ * @reserved0: reserved
+ * @block_height_log2: Log2 of the height of the grid cell in pixel count
+ * u4, [0, 15], default value 6.
+ * @reserved1: reserved
+ * @grid_height_per_slice: SHD_MAX_CELLS_PER_SET/width.
+ * (with SHD_MAX_CELLS_PER_SET = 146).
+ * @x_start: X value of top left corner of sensor relative to ROI
+ * s13, [-4096, 0], default 0, only negative values.
+ * @y_start: Y value of top left corner of sensor relative to ROI
+ * s13, [-4096, 0], default 0, only negative values.
+ */
+struct ipu3_uapi_shd_grid_config {
+ /* reg 0 */
+ __u8 width;
+ __u8 height;
+ __u8 block_width_log2:3;
+ __u8 reserved0:1;
+ __u8 block_height_log2:3;
+ __u8 reserved1:1;
+ __u8 grid_height_per_slice;
+ /* reg 1 */
+ __s16 x_start;
+ __s16 y_start;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_general_config - Shading general config
+ *
+ * @init_set_vrt_offst_ul: set vertical offset,
+ * y_start >> block_height_log2 % grid_height_per_slice.
+ * @shd_enable: shading enable.
+ * @gain_factor: Gain factor. Shift calculated anti shading value. Precision u2.
+ * 0x0 - gain factor [1, 5], means no shift interpolated value.
+ * 0x1 - gain factor [1, 9], means shift interpolated by 1.
+ * 0x2 - gain factor [1, 17], means shift interpolated by 2.
+ * @reserved: reserved
+ *
+ * Correction is performed by multiplying a gain factor for each of the 4 Bayer
+ * channels as a function of the pixel location in the sensor.
+ */
+struct ipu3_uapi_shd_general_config {
+ __u32 init_set_vrt_offst_ul:8;
+ __u32 shd_enable:1;
+ __u32 gain_factor:2;
+ __u32 reserved:21;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_black_level_config - Black level correction
+ *
+ * @bl_r: Bios values for green red. s11 range [-2048, 2047].
+ * @bl_gr: Bios values for green blue. s11 range [-2048, 2047].
+ * @bl_gb: Bios values for red. s11 range [-2048, 2047].
+ * @bl_b: Bios values for blue. s11 range [-2048, 2047].
+ */
+struct ipu3_uapi_shd_black_level_config {
+ __s16 bl_r;
+ __s16 bl_gr;
+ __s16 bl_gb;
+ __s16 bl_b;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_config_static - Shading config static
+ *
+ * @grid: shading grid config &ipu3_uapi_shd_grid_config
+ * @general: shading general config &ipu3_uapi_shd_general_config
+ * @black_level: black level config for shading correction as defined by
+ * &ipu3_uapi_shd_black_level_config
+ */
+struct ipu3_uapi_shd_config_static {
+ struct ipu3_uapi_shd_grid_config grid;
+ struct ipu3_uapi_shd_general_config general;
+ struct ipu3_uapi_shd_black_level_config black_level;
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_lut - Shading gain factor lookup table.
+ *
+ * @sets: array
+ * @sets.r_and_gr: Red and GreenR Lookup table.
+ * @sets.r_and_gr.r: Red shading factor.
+ * @sets.r_and_gr.gr: GreenR shading factor.
+ * @sets.reserved1: reserved
+ * @sets.gb_and_b: GreenB and Blue Lookup table.
+ * @sets.gb_and_b.gb: GreenB shading factor.
+ * @sets.gb_and_b.b: Blue shading factor.
+ * @sets.reserved2: reserved
+ *
+ * Map to shading correction LUT register set.
+ */
+struct ipu3_uapi_shd_lut {
+ struct {
+ struct {
+ __u16 r;
+ __u16 gr;
+ } r_and_gr[IPU3_UAPI_SHD_MAX_CELLS_PER_SET];
+ __u8 reserved1[24];
+ struct {
+ __u16 gb;
+ __u16 b;
+ } gb_and_b[IPU3_UAPI_SHD_MAX_CELLS_PER_SET];
+ __u8 reserved2[24];
+ } sets[IPU3_UAPI_SHD_MAX_CFG_SETS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_shd_config - Shading config
+ *
+ * @shd: shading static config, see &ipu3_uapi_shd_config_static
+ * @shd_lut: shading lookup table &ipu3_uapi_shd_lut
+ */
+struct ipu3_uapi_shd_config {
+ struct ipu3_uapi_shd_config_static shd __attribute__((aligned(32)));
+ struct ipu3_uapi_shd_lut shd_lut __attribute__((aligned(32)));
+} __packed;
+
+/* Image Enhancement Filter directed */
+
+/**
+ * struct ipu3_uapi_iefd_cux2 - IEFd Config Unit 2 parameters
+ *
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A of Config Unit, s4.4, default 0.
+ * @b01: Slope B, always 0.
+ *
+ * Calculate weight for blending directed and non-directed denoise elements
+ *
+ * Note:
+ * Each instance of Config Unit needs X coordinate of n points and
+ * slope A factor between points calculated by driver based on calibration
+ * parameters.
+ *
+ * All CU inputs are unsigned, they will be converted to signed when written
+ * to register, i.e. a01 will be written to 9 bit register in s4.4 format.
+ * This applies to &ipu3_uapi_iefd_cux6_ed, &ipu3_uapi_iefd_cux2_1,
+ * &ipu3_uapi_iefd_cux2_1, &ipu3_uapi_iefd_cux4 and &ipu3_uapi_iefd_cux6_rad.
+ */
+struct ipu3_uapi_iefd_cux2 {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 a01:9;
+ __u32 b01:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux6_ed - Calculate power of non-directed sharpening
+ * element, Config Unit 6 for edge detail (ED).
+ *
+ * @x0: X coordinate of point 0, u9.0, default 0.
+ * @x1: X coordinate of point 1, u9.0, default 0.
+ * @x2: X coordinate of point 2, u9.0, default 0.
+ * @reserved0: reserved
+ * @x3: X coordinate of point 3, u9.0, default 0.
+ * @x4: X coordinate of point 4, u9.0, default 0.
+ * @x5: X coordinate of point 5, u9.0, default 0.
+ * @reserved1: reserved
+ * @a01: slope A points 01, s4.4, default 0.
+ * @a12: slope A points 12, s4.4, default 0.
+ * @a23: slope A points 23, s4.4, default 0.
+ * @reserved2: reserved
+ * @a34: slope A points 34, s4.4, default 0.
+ * @a45: slope A points 45, s4.4, default 0.
+ * @reserved3: reserved
+ * @b01: slope B points 01, s4.4, default 0.
+ * @b12: slope B points 12, s4.4, default 0.
+ * @b23: slope B points 23, s4.4, default 0.
+ * @reserved4: reserved
+ * @b34: slope B points 34, s4.4, default 0.
+ * @b45: slope B points 45, s4.4, default 0.
+ * @reserved5: reserved.
+ */
+struct ipu3_uapi_iefd_cux6_ed {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 x2:9;
+ __u32 reserved0:5;
+
+ __u32 x3:9;
+ __u32 x4:9;
+ __u32 x5:9;
+ __u32 reserved1:5;
+
+ __u32 a01:9;
+ __u32 a12:9;
+ __u32 a23:9;
+ __u32 reserved2:5;
+
+ __u32 a34:9;
+ __u32 a45:9;
+ __u32 reserved3:14;
+
+ __u32 b01:9;
+ __u32 b12:9;
+ __u32 b23:9;
+ __u32 reserved4:5;
+
+ __u32 b34:9;
+ __u32 b45:9;
+ __u32 reserved5:14;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux2_1 - Calculate power of non-directed denoise
+ * element apply.
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A of Config Unit, s4.4, default 0.
+ * @reserved1: reserved
+ * @b01: offset B0 of Config Unit, u7.0, default 0.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_iefd_cux2_1 {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 a01:9;
+ __u32 reserved1:5;
+
+ __u32 b01:8;
+ __u32 reserved2:24;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux4 - Calculate power of non-directed sharpening
+ * element.
+ *
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @x2: X2 point of Config Unit, u9.0, default 0.
+ * @reserved0: reserved
+ * @x3: X3 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A0 of Config Unit, s4.4, default 0.
+ * @a12: Slope A1 of Config Unit, s4.4, default 0.
+ * @reserved1: reserved
+ * @a23: Slope A2 of Config Unit, s4.4, default 0.
+ * @b01: Offset B0 of Config Unit, s7.0, default 0.
+ * @b12: Offset B1 of Config Unit, s7.0, default 0.
+ * @reserved2: reserved
+ * @b23: Offset B2 of Config Unit, s7.0, default 0.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_iefd_cux4 {
+ __u32 x0:9;
+ __u32 x1:9;
+ __u32 x2:9;
+ __u32 reserved0:5;
+
+ __u32 x3:9;
+ __u32 a01:9;
+ __u32 a12:9;
+ __u32 reserved1:5;
+
+ __u32 a23:9;
+ __u32 b01:8;
+ __u32 b12:8;
+ __u32 reserved2:7;
+
+ __u32 b23:8;
+ __u32 reserved3:24;
+} __packed;
+
+/**
+ * struct ipu3_uapi_iefd_cux6_rad - Radial Config Unit (CU)
+ *
+ * @x0: x0 points of Config Unit radial, u8.0
+ * @x1: x1 points of Config Unit radial, u8.0
+ * @x2: x2 points of Config Unit radial, u8.0
+ * @x3: x3 points of Config Unit radial, u8.0
+ * @x4: x4 points of Config Unit radial, u8.0
+ * @x5: x5 points of Config Unit radial, u8.0
+ * @reserved1: reserved
+ * @a01: Slope A of Config Unit radial, s7.8
+ * @a12: Slope A of Config Unit radial, s7.8
+ * @a23: Slope A of Config Unit radial, s7.8
+ * @a34: Slope A of Config Unit radial, s7.8
+ * @a45: Slope A of Config Unit radial, s7.8
+ * @reserved2: reserved
+ * @b01: Slope B of Config Unit radial, s9.0
+ * @b12: Slope B of Config Unit radial, s9.0
+ * @b23: Slope B of Config Unit radial, s9.0
+ * @reserved4: reserved
+ * @b34: Slope B of Config Unit radial, s9.0
+ * @b45: Slope B of Config Unit radial, s9.0
+ * @reserved5: reserved
+ */
+struct ipu3_uapi_iefd_cux6_rad {
+ __u32 x0:8;
+ __u32 x1:8;
+ __u32 x2:8;
+ __u32 x3:8;
+
+ __u32 x4:8;
+ __u32 x5:8;
+ __u32 reserved1:16;
+
+ __u32 a01:16;
+ __u32 a12:16;
+
+ __u32 a23:16;
+ __u32 a34:16;
+
+ __u32 a45:16;
+ __u32 reserved2:16;
+
+ __u32 b01:10;
+ __u32 b12:10;
+ __u32 b23:10;
+ __u32 reserved4:2;
+
+ __u32 b34:10;
+ __u32 b45:10;
+ __u32 reserved5:12;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_cfg_units - IEFd Config Units parameters
+ *
+ * @cu_1: calculate weight for blending directed and
+ * non-directed denoise elements. See &ipu3_uapi_iefd_cux2
+ * @cu_ed: calculate power of non-directed sharpening element, see
+ * &ipu3_uapi_iefd_cux6_ed
+ * @cu_3: calculate weight for blending directed and
+ * non-directed denoise elements. A &ipu3_uapi_iefd_cux2
+ * @cu_5: calculate power of non-directed denoise element apply, use
+ * &ipu3_uapi_iefd_cux2_1
+ * @cu_6: calculate power of non-directed sharpening element. See
+ * &ipu3_uapi_iefd_cux4
+ * @cu_7: calculate weight for blending directed and
+ * non-directed denoise elements. Use &ipu3_uapi_iefd_cux2
+ * @cu_unsharp: Config Unit of unsharp &ipu3_uapi_iefd_cux4
+ * @cu_radial: Config Unit of radial &ipu3_uapi_iefd_cux6_rad
+ * @cu_vssnlm: Config Unit of vssnlm &ipu3_uapi_iefd_cux2
+ */
+struct ipu3_uapi_yuvp1_iefd_cfg_units {
+ struct ipu3_uapi_iefd_cux2 cu_1;
+ struct ipu3_uapi_iefd_cux6_ed cu_ed;
+ struct ipu3_uapi_iefd_cux2 cu_3;
+ struct ipu3_uapi_iefd_cux2_1 cu_5;
+ struct ipu3_uapi_iefd_cux4 cu_6;
+ struct ipu3_uapi_iefd_cux2 cu_7;
+ struct ipu3_uapi_iefd_cux4 cu_unsharp;
+ struct ipu3_uapi_iefd_cux6_rad cu_radial;
+ struct ipu3_uapi_iefd_cux2 cu_vssnlm;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_config_s - IEFd config
+ *
+ * @horver_diag_coeff: Gradient compensation. Compared with vertical /
+ * horizontal (0 / 90 degree), coefficient of diagonal (45 /
+ * 135 degree) direction should be corrected by approx.
+ * 1/sqrt(2).
+ * @reserved0: reserved
+ * @clamp_stitch: Slope to stitch between clamped and unclamped edge values
+ * @reserved1: reserved
+ * @direct_metric_update: Update coeff for direction metric
+ * @reserved2: reserved
+ * @ed_horver_diag_coeff: Radial Coefficient that compensates for
+ * different distance for vertical/horizontal and
+ * diagonal gradient calculation (approx. 1/sqrt(2))
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_iefd_config_s {
+ __u32 horver_diag_coeff:7;
+ __u32 reserved0:1;
+ __u32 clamp_stitch:6;
+ __u32 reserved1:2;
+ __u32 direct_metric_update:5;
+ __u32 reserved2:3;
+ __u32 ed_horver_diag_coeff:7;
+ __u32 reserved3:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_control - IEFd control
+ *
+ * @iefd_en: Enable IEFd
+ * @denoise_en: Enable denoise
+ * @direct_smooth_en: Enable directional smooth
+ * @rad_en: Enable radial update
+ * @vssnlm_en: Enable VSSNLM output filter
+ * @reserved: reserved
+ */
+struct ipu3_uapi_yuvp1_iefd_control {
+ __u32 iefd_en:1;
+ __u32 denoise_en:1;
+ __u32 direct_smooth_en:1;
+ __u32 rad_en:1;
+ __u32 vssnlm_en:1;
+ __u32 reserved:27;
+} __packed;
+
+/**
+ * struct ipu3_uapi_sharp_cfg - Sharpening config
+ *
+ * @nega_lmt_txt: Sharpening limit for negative overshoots for texture.
+ * @reserved0: reserved
+ * @posi_lmt_txt: Sharpening limit for positive overshoots for texture.
+ * @reserved1: reserved
+ * @nega_lmt_dir: Sharpening limit for negative overshoots for direction (edge).
+ * @reserved2: reserved
+ * @posi_lmt_dir: Sharpening limit for positive overshoots for direction (edge).
+ * @reserved3: reserved
+ *
+ * Fixed point type u13.0, range [0, 8191].
+ */
+struct ipu3_uapi_sharp_cfg {
+ __u32 nega_lmt_txt:13;
+ __u32 reserved0:19;
+ __u32 posi_lmt_txt:13;
+ __u32 reserved1:19;
+ __u32 nega_lmt_dir:13;
+ __u32 reserved2:19;
+ __u32 posi_lmt_dir:13;
+ __u32 reserved3:19;
+} __packed;
+
+/**
+ * struct struct ipu3_uapi_far_w - Sharpening config for far sub-group
+ *
+ * @dir_shrp: Weight of wide direct sharpening, u1.6, range [0, 64], default 64.
+ * @reserved0: reserved
+ * @dir_dns: Weight of wide direct denoising, u1.6, range [0, 64], default 0.
+ * @reserved1: reserved
+ * @ndir_dns_powr: Power of non-direct denoising,
+ * Precision u1.6, range [0, 64], default 64.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_far_w {
+ __u32 dir_shrp:7;
+ __u32 reserved0:1;
+ __u32 dir_dns:7;
+ __u32 reserved1:1;
+ __u32 ndir_dns_powr:7;
+ __u32 reserved2:9;
+} __packed;
+
+/**
+ * struct struct ipu3_uapi_unsharp_cfg - Unsharp config
+ *
+ * @unsharp_weight: Unsharp mask blending weight.
+ * u1.6, range [0, 64], default 16.
+ * 0 - disabled, 64 - use only unsharp.
+ * @reserved0: reserved
+ * @unsharp_amount: Unsharp mask amount, u4.5, range [0, 511], default 0.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_unsharp_cfg {
+ __u32 unsharp_weight:7;
+ __u32 reserved0:1;
+ __u32 unsharp_amount:9;
+ __u32 reserved1:15;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_shrp_cfg - IEFd sharpness config
+ *
+ * @cfg: sharpness config &ipu3_uapi_sharp_cfg
+ * @far_w: wide range config, value as specified by &ipu3_uapi_far_w:
+ * The 5x5 environment is separated into 2 sub-groups, the 3x3 nearest
+ * neighbors (8 pixels called Near), and the second order neighborhood
+ * around them (16 pixels called Far).
+ * @unshrp_cfg: unsharpness config. &ipu3_uapi_unsharp_cfg
+ */
+struct ipu3_uapi_yuvp1_iefd_shrp_cfg {
+ struct ipu3_uapi_sharp_cfg cfg;
+ struct ipu3_uapi_far_w far_w;
+ struct ipu3_uapi_unsharp_cfg unshrp_cfg;
+} __packed;
+
+/**
+ * struct ipu3_uapi_unsharp_coef0 - Unsharp mask coefficients
+ *
+ * @c00: Coeff11, s0.8, range [-255, 255], default 1.
+ * @c01: Coeff12, s0.8, range [-255, 255], default 5.
+ * @c02: Coeff13, s0.8, range [-255, 255], default 9.
+ * @reserved: reserved
+ *
+ * Configurable registers for common sharpening support.
+ */
+struct ipu3_uapi_unsharp_coef0 {
+ __u32 c00:9;
+ __u32 c01:9;
+ __u32 c02:9;
+ __u32 reserved:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_unsharp_coef1 - Unsharp mask coefficients
+ *
+ * @c11: Coeff22, s0.8, range [-255, 255], default 29.
+ * @c12: Coeff23, s0.8, range [-255, 255], default 55.
+ * @c22: Coeff33, s0.8, range [-255, 255], default 96.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_unsharp_coef1 {
+ __u32 c11:9;
+ __u32 c12:9;
+ __u32 c22:9;
+ __u32 reserved:5;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_unshrp_cfg - Unsharp mask config
+ *
+ * @unsharp_coef0: unsharp coefficient 0 config. See &ipu3_uapi_unsharp_coef0
+ * @unsharp_coef1: unsharp coefficient 1 config. See &ipu3_uapi_unsharp_coef1
+ */
+struct ipu3_uapi_yuvp1_iefd_unshrp_cfg {
+ struct ipu3_uapi_unsharp_coef0 unsharp_coef0;
+ struct ipu3_uapi_unsharp_coef1 unsharp_coef1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_reset_xy - Radial coordinate reset
+ *
+ * @x: Radial reset of x coordinate. Precision s12, [-4095, 4095], default 0.
+ * @reserved0: reserved
+ * @y: Radial center y coordinate. Precision s12, [-4095, 4095], default 0.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_radial_reset_xy {
+ __s32 x:13;
+ __u32 reserved0:3;
+ __s32 y:13;
+ __u32 reserved1:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_reset_x2 - Radial X^2 reset
+ *
+ * @x2: Radial reset of x^2 coordinate. Precision u24, default 0.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_radial_reset_x2 {
+ __u32 x2:24;
+ __u32 reserved:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_reset_y2 - Radial Y^2 reset
+ *
+ * @y2: Radial reset of y^2 coordinate. Precision u24, default 0.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_radial_reset_y2 {
+ __u32 y2:24;
+ __u32 reserved:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_radial_cfg - Radial config
+ *
+ * @rad_nf: Radial. R^2 normalization factor is scale down by 2^ - (15 + scale)
+ * @reserved0: reserved
+ * @rad_inv_r2: Radial R^-2 normelized to (0.5..1).
+ * Precision u7, range [0, 127].
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_radial_cfg {
+ __u32 rad_nf:4;
+ __u32 reserved0:4;
+ __u32 rad_inv_r2:7;
+ __u32 reserved1:17;
+} __packed;
+
+/**
+ * struct ipu3_uapi_rad_far_w - Radial FAR sub-group
+ *
+ * @rad_dir_far_sharp_w: Weight of wide direct sharpening, u1.6, range [0, 64],
+ * default 64.
+ * @rad_dir_far_dns_w: Weight of wide direct denoising, u1.6, range [0, 64],
+ * default 0.
+ * @rad_ndir_far_dns_power: power of non-direct sharpening, u1.6, range [0, 64],
+ * default 0.
+ * @reserved: reserved
+ */
+struct ipu3_uapi_rad_far_w {
+ __u32 rad_dir_far_sharp_w:8;
+ __u32 rad_dir_far_dns_w:8;
+ __u32 rad_ndir_far_dns_power:8;
+ __u32 reserved:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_cu_cfg0 - Radius Config Unit cfg0 register
+ *
+ * @cu6_pow: Power of CU6. Power of non-direct sharpening, u3.4.
+ * @reserved0: reserved
+ * @cu_unsharp_pow: Power of unsharp mask, u2.4.
+ * @reserved1: reserved
+ * @rad_cu6_pow: Radial/corner CU6. Directed sharpening power, u3.4.
+ * @reserved2: reserved
+ * @rad_cu_unsharp_pow: Radial power of unsharp mask, u2.4.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_cu_cfg0 {
+ __u32 cu6_pow:7;
+ __u32 reserved0:1;
+ __u32 cu_unsharp_pow:7;
+ __u32 reserved1:1;
+ __u32 rad_cu6_pow:7;
+ __u32 reserved2:1;
+ __u32 rad_cu_unsharp_pow:6;
+ __u32 reserved3:2;
+} __packed;
+
+/**
+ * struct ipu3_uapi_cu_cfg1 - Radius Config Unit cfg1 register
+ *
+ * @rad_cu6_x1: X1 point of Config Unit 6, precision u9.0.
+ * @reserved0: reserved
+ * @rad_cu_unsharp_x1: X1 point for Config Unit unsharp for radial/corner point
+ * precision u9.0.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_cu_cfg1 {
+ __u32 rad_cu6_x1:9;
+ __u32 reserved0:1;
+ __u32 rad_cu_unsharp_x1:9;
+ __u32 reserved1:13;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_rad_cfg - IEFd parameters changed radially over
+ * the picture plane.
+ *
+ * @reset_xy: reset xy value in radial calculation. &ipu3_uapi_radial_reset_xy
+ * @reset_x2: reset x square value in radial calculation. See struct
+ * &ipu3_uapi_radial_reset_x2
+ * @reset_y2: reset y square value in radial calculation. See struct
+ * &ipu3_uapi_radial_reset_y2
+ * @cfg: radial config defined in &ipu3_uapi_radial_cfg
+ * @rad_far_w: weight for wide range radial. &ipu3_uapi_rad_far_w
+ * @cu_cfg0: configuration unit 0. See &ipu3_uapi_cu_cfg0
+ * @cu_cfg1: configuration unit 1. See &ipu3_uapi_cu_cfg1
+ */
+struct ipu3_uapi_yuvp1_iefd_rad_cfg {
+ struct ipu3_uapi_radial_reset_xy reset_xy;
+ struct ipu3_uapi_radial_reset_x2 reset_x2;
+ struct ipu3_uapi_radial_reset_y2 reset_y2;
+ struct ipu3_uapi_radial_cfg cfg;
+ struct ipu3_uapi_rad_far_w rad_far_w;
+ struct ipu3_uapi_cu_cfg0 cu_cfg0;
+ struct ipu3_uapi_cu_cfg1 cu_cfg1;
+} __packed;
+
+/* Vssnlm - Very small scale non-local mean algorithm */
+
+/**
+ * struct ipu3_uapi_vss_lut_x - Vssnlm LUT x0/x1/x2
+ *
+ * @vs_x0: Vssnlm LUT x0, precision u8, range [0, 255], default 16.
+ * @vs_x1: Vssnlm LUT x1, precision u8, range [0, 255], default 32.
+ * @vs_x2: Vssnlm LUT x2, precision u8, range [0, 255], default 64.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_vss_lut_x {
+ __u32 vs_x0:8;
+ __u32 vs_x1:8;
+ __u32 vs_x2:8;
+ __u32 reserved2:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_vss_lut_y - Vssnlm LUT y0/y1/y2
+ *
+ * @vs_y1: Vssnlm LUT y1, precision u4, range [0, 8], default 1.
+ * @reserved0: reserved
+ * @vs_y2: Vssnlm LUT y2, precision u4, range [0, 8], default 3.
+ * @reserved1: reserved
+ * @vs_y3: Vssnlm LUT y3, precision u4, range [0, 8], default 8.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_vss_lut_y {
+ __u32 vs_y1:4;
+ __u32 reserved0:4;
+ __u32 vs_y2:4;
+ __u32 reserved1:4;
+ __u32 vs_y3:4;
+ __u32 reserved2:12;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_vssnlm_cf - IEFd Vssnlm Lookup table
+ *
+ * @vss_lut_x: vss lookup table. See &ipu3_uapi_vss_lut_x description
+ * @vss_lut_y: vss lookup table. See &ipu3_uapi_vss_lut_y description
+ */
+struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg {
+ struct ipu3_uapi_vss_lut_x vss_lut_x;
+ struct ipu3_uapi_vss_lut_y vss_lut_y;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_iefd_config - IEFd config
+ *
+ * @units: configuration unit setting, &ipu3_uapi_yuvp1_iefd_cfg_units
+ * @config: configuration, as defined by &ipu3_uapi_yuvp1_iefd_config_s
+ * @control: control setting, as defined by &ipu3_uapi_yuvp1_iefd_control
+ * @sharp: sharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_shrp_cfg
+ * @unsharp: unsharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_unshrp_cfg
+ * @rad: radial setting, as defined by &ipu3_uapi_yuvp1_iefd_rad_cfg
+ * @vsslnm: vsslnm setting, as defined by &ipu3_uapi_yuvp1_iefd_vssnlm_cfg
+ */
+struct ipu3_uapi_yuvp1_iefd_config {
+ struct ipu3_uapi_yuvp1_iefd_cfg_units units;
+ struct ipu3_uapi_yuvp1_iefd_config_s config;
+ struct ipu3_uapi_yuvp1_iefd_control control;
+ struct ipu3_uapi_yuvp1_iefd_shrp_cfg sharp;
+ struct ipu3_uapi_yuvp1_iefd_unshrp_cfg unsharp;
+ struct ipu3_uapi_yuvp1_iefd_rad_cfg rad;
+ struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg vsslnm;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_yds_config - Y Down-Sampling config
+ *
+ * @c00: range [0, 3], default 0x0
+ * @c01: range [0, 3], default 0x1
+ * @c02: range [0, 3], default 0x1
+ * @c03: range [0, 3], default 0x0
+ * @c10: range [0, 3], default 0x0
+ * @c11: range [0, 3], default 0x1
+ * @c12: range [0, 3], default 0x1
+ * @c13: range [0, 3], default 0x0
+ *
+ * Above are 4x2 filter coefficients for chroma output downscaling.
+ *
+ * @norm_factor: Normalization factor, range [0, 4], default 2
+ * 0 - divide by 1
+ * 1 - divide by 2
+ * 2 - divide by 4
+ * 3 - divide by 8
+ * 4 - divide by 16
+ * @reserved0: reserved
+ * @bin_output: Down sampling on Luma channel in two optional modes
+ * 0 - Bin output 4.2.0 (default), 1 output 4.2.2.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_yds_config {
+ __u32 c00:2;
+ __u32 c01:2;
+ __u32 c02:2;
+ __u32 c03:2;
+ __u32 c10:2;
+ __u32 c11:2;
+ __u32 c12:2;
+ __u32 c13:2;
+ __u32 norm_factor:5;
+ __u32 reserved0:4;
+ __u32 bin_output:1;
+ __u32 reserved1:6;
+} __packed;
+
+/* Chroma Noise Reduction */
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_enable_config - Chroma noise reduction enable
+ *
+ * @enable: enable/disable chroma noise reduction
+ * @yuv_mode: 0 - YUV420, 1 - YUV422
+ * @reserved0: reserved
+ * @col_size: number of columns in the frame, max width is 2560
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_enable_config {
+ __u32 enable:1;
+ __u32 yuv_mode:1;
+ __u32 reserved0:14;
+ __u32 col_size:12;
+ __u32 reserved1:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_coring_config - Coring thresholds for UV
+ *
+ * @u: U coring level, u0.13, range [0.0, 1.0], default 0.0
+ * @reserved0: reserved
+ * @v: V coring level, u0.13, range [0.0, 1.0], default 0.0
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_coring_config {
+ __u32 u:13;
+ __u32 reserved0:3;
+ __u32 v:13;
+ __u32 reserved1:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_sense_gain_config - Chroma noise reduction gains
+ *
+ * All sensitivity gain parameters have precision u13.0, range [0, 8191].
+ *
+ * @vy: Sensitivity of horizontal edge of Y, default 100
+ * @vu: Sensitivity of horizontal edge of U, default 100
+ * @vv: Sensitivity of horizontal edge of V, default 100
+ * @reserved0: reserved
+ * @hy: Sensitivity of vertical edge of Y, default 50
+ * @hu: Sensitivity of vertical edge of U, default 50
+ * @hv: Sensitivity of vertical edge of V, default 50
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_sense_gain_config {
+ __u32 vy:8;
+ __u32 vu:8;
+ __u32 vv:8;
+ __u32 reserved0:8;
+
+ __u32 hy:8;
+ __u32 hu:8;
+ __u32 hv:8;
+ __u32 reserved1:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_iir_fir_config - Chroma IIR/FIR filter config
+ *
+ * @fir_0h: Value of center tap in horizontal FIR, range [0, 32], default 8.
+ * @reserved0: reserved
+ * @fir_1h: Value of distance 1 in horizontal FIR, range [0, 32], default 12.
+ * @reserved1: reserved
+ * @fir_2h: Value of distance 2 tap in horizontal FIR, range [0, 32], default 0.
+ * @dalpha_clip_val: weight for previous row in IIR, range [1, 256], default 0.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_yuvp1_chnr_iir_fir_config {
+ __u32 fir_0h:6;
+ __u32 reserved0:2;
+ __u32 fir_1h:6;
+ __u32 reserved1:2;
+ __u32 fir_2h:6;
+ __u32 dalpha_clip_val:9;
+ __u32 reserved2:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_config - Chroma noise reduction config
+ *
+ * @enable: chroma noise reduction enable, see
+ * &ipu3_uapi_yuvp1_chnr_enable_config
+ * @coring: coring config for chroma noise reduction, see
+ * &ipu3_uapi_yuvp1_chnr_coring_config
+ * @sense_gain: sensitivity config for chroma noise reduction, see
+ * ipu3_uapi_yuvp1_chnr_sense_gain_config
+ * @iir_fir: iir and fir config for chroma noise reduction, see
+ * ipu3_uapi_yuvp1_chnr_iir_fir_config
+ */
+struct ipu3_uapi_yuvp1_chnr_config {
+ struct ipu3_uapi_yuvp1_chnr_enable_config enable;
+ struct ipu3_uapi_yuvp1_chnr_coring_config coring;
+ struct ipu3_uapi_yuvp1_chnr_sense_gain_config sense_gain;
+ struct ipu3_uapi_yuvp1_chnr_iir_fir_config iir_fir;
+} __packed;
+
+/* Edge Enhancement and Noise Reduction */
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config - Luma(Y) edge enhancement low-pass
+ * filter coefficients
+ *
+ * @a_diag: Smoothing diagonal coefficient, u5.0.
+ * @reserved0: reserved
+ * @a_periph: Image smoothing perpherial, u5.0.
+ * @reserved1: reserved
+ * @a_cent: Image Smoothing center coefficient, u5.0.
+ * @reserved2: reserved
+ * @enable: 0: Y_EE_NR disabled, output = input; 1: Y_EE_NR enabled.
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config {
+ __u32 a_diag:5;
+ __u32 reserved0:3;
+ __u32 a_periph:5;
+ __u32 reserved1:3;
+ __u32 a_cent:5;
+ __u32 reserved2:9;
+ __u32 enable:1;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_sense_config - Luma(Y) edge enhancement
+ * noise reduction sensitivity gains
+ *
+ * @edge_sense_0: Sensitivity of edge in dark area. u13.0, default 8191.
+ * @reserved0: reserved
+ * @delta_edge_sense: Difference in the sensitivity of edges between
+ * the bright and dark areas. u13.0, default 0.
+ * @reserved1: reserved
+ * @corner_sense_0: Sensitivity of corner in dark area. u13.0, default 0.
+ * @reserved2: reserved
+ * @delta_corner_sense: Difference in the sensitivity of corners between
+ * the bright and dark areas. u13.0, default 8191.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_sense_config {
+ __u32 edge_sense_0:13;
+ __u32 reserved0:3;
+ __u32 delta_edge_sense:13;
+ __u32 reserved1:3;
+ __u32 corner_sense_0:13;
+ __u32 reserved2:3;
+ __u32 delta_corner_sense:13;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_gain_config - Luma(Y) edge enhancement
+ * noise reduction gain config
+ *
+ * @gain_pos_0: Gain for positive edge in dark area. u5.0, [0, 16], default 2.
+ * @reserved0: reserved
+ * @delta_gain_posi: Difference in the gain of edges between the bright and
+ * dark areas for positive edges. u5.0, [0, 16], default 0.
+ * @reserved1: reserved
+ * @gain_neg_0: Gain for negative edge in dark area. u5.0, [0, 16], default 8.
+ * @reserved2: reserved
+ * @delta_gain_neg: Difference in the gain of edges between the bright and
+ * dark areas for negative edges. u5.0, [0, 16], default 0.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_gain_config {
+ __u32 gain_pos_0:5;
+ __u32 reserved0:3;
+ __u32 delta_gain_posi:5;
+ __u32 reserved1:3;
+ __u32 gain_neg_0:5;
+ __u32 reserved2:3;
+ __u32 delta_gain_neg:5;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_clip_config - Luma(Y) edge enhancement
+ * noise reduction clipping config
+ *
+ * @clip_pos_0: Limit of positive edge in dark area
+ * u5, value [0, 16], default 8.
+ * @reserved0: reserved
+ * @delta_clip_posi: Difference in the limit of edges between the bright
+ * and dark areas for positive edges.
+ * u5, value [0, 16], default 8.
+ * @reserved1: reserved
+ * @clip_neg_0: Limit of negative edge in dark area
+ * u5, value [0, 16], default 8.
+ * @reserved2: reserved
+ * @delta_clip_neg: Difference in the limit of edges between the bright
+ * and dark areas for negative edges.
+ * u5, value [0, 16], default 8.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_clip_config {
+ __u32 clip_pos_0:5;
+ __u32 reserved0:3;
+ __u32 delta_clip_posi:5;
+ __u32 reserved1:3;
+ __u32 clip_neg_0:5;
+ __u32 reserved2:3;
+ __u32 delta_clip_neg:5;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_frng_config - Luma(Y) edge enhancement
+ * noise reduction fringe config
+ *
+ * @gain_exp: Common exponent of gains, u4, [0, 8], default 2.
+ * @reserved0: reserved
+ * @min_edge: Threshold for edge and smooth stitching, u13.
+ * @reserved1: reserved
+ * @lin_seg_param: Power of LinSeg, u4.
+ * @reserved2: reserved
+ * @t1: Parameter for enabling/disabling the edge enhancement, u1.0, [0, 1],
+ * default 1.
+ * @t2: Parameter for enabling/disabling the smoothing, u1.0, [0, 1],
+ * default 1.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_frng_config {
+ __u32 gain_exp:4;
+ __u32 reserved0:28;
+ __u32 min_edge:13;
+ __u32 reserved1:3;
+ __u32 lin_seg_param:4;
+ __u32 reserved2:4;
+ __u32 t1:1;
+ __u32 t2:1;
+ __u32 reserved3:6;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_diag_config - Luma(Y) edge enhancement
+ * noise reduction diagonal config
+ *
+ * @diag_disc_g: Coefficient that prioritize diagonal edge direction on
+ * horizontal or vertical for final enhancement.
+ * u4.0, [1, 15], default 1.
+ * @reserved0: reserved
+ * @hvw_hor: Weight of horizontal/vertical edge enhancement for hv edge.
+ * u2.2, [1, 15], default 4.
+ * @dw_hor: Weight of diagonal edge enhancement for hv edge.
+ * u2.2, [1, 15], default 1.
+ * @hvw_diag: Weight of horizontal/vertical edge enhancement for diagonal edge.
+ * u2.2, [1, 15], default 1.
+ * @dw_diag: Weight of diagonal edge enhancement for diagonal edge.
+ * u2.2, [1, 15], default 4.
+ * @reserved1: reserved
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_diag_config {
+ __u32 diag_disc_g:4;
+ __u32 reserved0:4;
+ __u32 hvw_hor:4;
+ __u32 dw_hor:4;
+ __u32 hvw_diag:4;
+ __u32 dw_diag:4;
+ __u32 reserved1:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config - Luma(Y) edge enhancement
+ * noise reduction false color correction (FCC) coring config
+ *
+ * @pos_0: Gain for positive edge in dark, u13.0, [0, 16], default 0.
+ * @reserved0: reserved
+ * @pos_delta: Gain for positive edge in bright, value: pos_0 + pos_delta <=16
+ * u13.0, default 0.
+ * @reserved1: reserved
+ * @neg_0: Gain for negative edge in dark area, u13.0, range [0, 16], default 0.
+ * @reserved2: reserved
+ * @neg_delta: Gain for negative edge in bright area. neg_0 + neg_delta <=16
+ * u13.0, default 0.
+ * @reserved3: reserved
+ *
+ * Coring is a simple soft thresholding technique.
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config {
+ __u32 pos_0:13;
+ __u32 reserved0:3;
+ __u32 pos_delta:13;
+ __u32 reserved1:3;
+ __u32 neg_0:13;
+ __u32 reserved2:3;
+ __u32 neg_delta:13;
+ __u32 reserved3:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_config - Edge enhancement and noise reduction
+ *
+ * @lpf: low-pass filter config. See &ipu3_uapi_yuvp1_y_ee_nr_lpf_config
+ * @sense: sensitivity config. See &ipu3_uapi_yuvp1_y_ee_nr_sense_config
+ * @gain: gain config as defined in &ipu3_uapi_yuvp1_y_ee_nr_gain_config
+ * @clip: clip config as defined in &ipu3_uapi_yuvp1_y_ee_nr_clip_config
+ * @frng: fringe config as defined in &ipu3_uapi_yuvp1_y_ee_nr_frng_config
+ * @diag: diagonal edge config. See &ipu3_uapi_yuvp1_y_ee_nr_diag_config
+ * @fc_coring: coring config for fringe control. See
+ * &ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config
+ */
+struct ipu3_uapi_yuvp1_y_ee_nr_config {
+ struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config lpf;
+ struct ipu3_uapi_yuvp1_y_ee_nr_sense_config sense;
+ struct ipu3_uapi_yuvp1_y_ee_nr_gain_config gain;
+ struct ipu3_uapi_yuvp1_y_ee_nr_clip_config clip;
+ struct ipu3_uapi_yuvp1_y_ee_nr_frng_config frng;
+ struct ipu3_uapi_yuvp1_y_ee_nr_diag_config diag;
+ struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config fc_coring;
+} __packed;
+
+/* Total Color Correction */
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_gen_control_static_config - Total color correction
+ * general control config
+ *
+ * @en: 0 - TCC disabled. Output = input 1 - TCC enabled.
+ * @blend_shift: blend shift, Range[3, 4], default NA.
+ * @gain_according_to_y_only: 0: Gain is calculated according to YUV,
+ * 1: Gain is calculated according to Y only
+ * @reserved0: reserved
+ * @gamma: Final blending coefficients. Values[-16, 16], default NA.
+ * @reserved1: reserved
+ * @delta: Final blending coefficients. Values[-16, 16], default NA.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_yuvp2_tcc_gen_control_static_config {
+ __u32 en:1;
+ __u32 blend_shift:3;
+ __u32 gain_according_to_y_only:1;
+ __u32 reserved0:11;
+ __s32 gamma:5;
+ __u32 reserved1:3;
+ __s32 delta:5;
+ __u32 reserved2:3;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config - Total color correction
+ * multi-axis color control (MACC) config
+ *
+ * @a: a coefficient for 2x2 MACC conversion matrix.
+ * @reserved0: reserved
+ * @b: b coefficient 2x2 MACC conversion matrix.
+ * @reserved1: reserved
+ * @c: c coefficient for 2x2 MACC conversion matrix.
+ * @reserved2: reserved
+ * @d: d coefficient for 2x2 MACC conversion matrix.
+ * @reserved3: reserved
+ */
+struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config {
+ __s32 a:12;
+ __u32 reserved0:4;
+ __s32 b:12;
+ __u32 reserved1:4;
+ __s32 c:12;
+ __u32 reserved2:4;
+ __s32 d:12;
+ __u32 reserved3:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_macc_table_static_config - Total color correction
+ * multi-axis color control (MACC) table array
+ *
+ * @entries: config for multi axis color correction, as specified by
+ * &ipu3_uapi_yuvp2_tcc_macc_elem_static_config
+ */
+struct ipu3_uapi_yuvp2_tcc_macc_table_static_config {
+ struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config
+ entries[IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config - Total color correction
+ * inverse y lookup table
+ *
+ * @entries: lookup table for inverse y estimation, and use it to estimate the
+ * ratio between luma and chroma. Chroma by approximate the absolute
+ * value of the radius on the chroma plane (R = sqrt(u^2+v^2) ) and
+ * luma by approximate by 1/Y.
+ */
+struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config {
+ __u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config - Total color
+ * correction lookup table for PCWL
+ *
+ * @entries: lookup table for gain piece wise linear transformation (PCWL)
+ */
+struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config {
+ __u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config - Total color correction
+ * lookup table for r square root
+ *
+ * @entries: lookup table for r square root estimation
+ */
+struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config {
+ __s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_static_config- Total color correction static
+ *
+ * @gen_control: general config for Total Color Correction
+ * @macc_table: config for multi axis color correction
+ * @inv_y_lut: lookup table for inverse y estimation
+ * @gain_pcwl: lookup table for gain PCWL
+ * @r_sqr_lut: lookup table for r square root estimation.
+ */
+struct ipu3_uapi_yuvp2_tcc_static_config {
+ struct ipu3_uapi_yuvp2_tcc_gen_control_static_config gen_control;
+ struct ipu3_uapi_yuvp2_tcc_macc_table_static_config macc_table;
+ struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config inv_y_lut;
+ struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config gain_pcwl;
+ struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config r_sqr_lut;
+} __packed;
+
+/* Advanced Noise Reduction related structs */
+
+/*
+ * struct ipu3_uapi_anr_alpha - Advanced noise reduction alpha
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
+struct ipu3_uapi_anr_alpha {
+ __u16 gr;
+ __u16 r;
+ __u16 b;
+ __u16 gb;
+ __u16 dc_gr;
+ __u16 dc_r;
+ __u16 dc_b;
+ __u16 dc_gb;
+} __packed;
+
+/*
+ * struct ipu3_uapi_anr_beta - Advanced noise reduction beta
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
+struct ipu3_uapi_anr_beta {
+ __u16 beta_gr;
+ __u16 beta_r;
+ __u16 beta_b;
+ __u16 beta_gb;
+} __packed;
+
+/*
+ * struct ipu3_uapi_anr_plane_color - Advanced noise reduction per plane R, Gr,
+ * Gb and B register settings
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
+struct ipu3_uapi_anr_plane_color {
+ __u16 reg_w_gr[16];
+ __u16 reg_w_r[16];
+ __u16 reg_w_b[16];
+ __u16 reg_w_gb[16];
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_transform_config - Advanced noise reduction transform
+ *
+ * @enable: advanced noise reduction enabled.
+ * @adaptive_treshhold_en: On IPU3, adaptive threshold is always enabled.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ * @alpha: using following defaults:
+ * 13, 13, 13, 13, 0, 0, 0, 0
+ * 11, 11, 11, 11, 0, 0, 0, 0
+ * 14, 14, 14, 14, 0, 0, 0, 0
+ * @beta: use following defaults:
+ * 24, 24, 24, 24
+ * 21, 20, 20, 21
+ * 25, 25, 25, 25
+ * @color: use defaults defined in driver/media/pci/intel/ipu3-tables.c
+ * @sqrt_lut: 11 bits per element, values =
+ * [724 768 810 849 887
+ * 923 958 991 1024 1056
+ * 1116 1145 1173 1201 1086
+ * 1228 1254 1280 1305 1330
+ * 1355 1379 1402 1425 1448]
+ * @xreset: Reset value of X for r^2 calculation Value: col_start-X_center
+ * Constraint: Xreset + FrameWdith=4095 Xreset= -4095, default -1632.
+ * @reserved3: reserved
+ * @yreset: Reset value of Y for r^2 calculation Value: row_start-Y_center
+ * Constraint: Yreset + FrameHeight=4095 Yreset= -4095, default -1224.
+ * @reserved4: reserved
+ * @x_sqr_reset: Reset value of X^2 for r^2 calculation Value = (Xreset)^2
+ * @r_normfactor: Normalization factor for R. Default 14.
+ * @reserved5: reserved
+ * @y_sqr_reset: Reset value of Y^2 for r^2 calculation Value = (Yreset)^2
+ * @gain_scale: Parameter describing shading gain as a function of distance
+ * from the image center.
+ * A single value per frame, loaded by the driver. Default 115.
+ */
+struct ipu3_uapi_anr_transform_config {
+ __u32 enable:1; /* 0 or 1, disabled or enabled */
+ __u32 adaptive_treshhold_en:1; /* On IPU3, always enabled */
+
+ __u32 reserved1:30;
+ __u8 reserved2[44];
+
+ struct ipu3_uapi_anr_alpha alpha[3];
+ struct ipu3_uapi_anr_beta beta[3];
+ struct ipu3_uapi_anr_plane_color color[3];
+
+ __u16 sqrt_lut[IPU3_UAPI_ANR_LUT_SIZE]; /* 11 bits per element */
+
+ __s16 xreset:13;
+ __u16 reserved3:3;
+ __s16 yreset:13;
+ __u16 reserved4:3;
+
+ __u32 x_sqr_reset:24;
+ __u32 r_normfactor:5;
+ __u32 reserved5:3;
+
+ __u32 y_sqr_reset:24;
+ __u32 gain_scale:8;
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_stitch_pyramid - ANR stitch pyramid
+ *
+ * @entry0: pyramid LUT entry0, range [0x0, 0x3f]
+ * @entry1: pyramid LUT entry1, range [0x0, 0x3f]
+ * @entry2: pyramid LUT entry2, range [0x0, 0x3f]
+ * @reserved: reserved
+ */
+struct ipu3_uapi_anr_stitch_pyramid {
+ __u32 entry0:6;
+ __u32 entry1:6;
+ __u32 entry2:6;
+ __u32 reserved:14;
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_stitch_config - ANR stitch config
+ *
+ * @anr_stitch_en: enable stitch. Enabled with 1.
+ * @reserved: reserved
+ * @pyramid: pyramid table as defined by &ipu3_uapi_anr_stitch_pyramid
+ * default values:
+ * { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
+ * { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
+ * { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
+ * { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
+ * { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
+ * { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
+ * { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
+ */
+struct ipu3_uapi_anr_stitch_config {
+ __u32 anr_stitch_en;
+ __u8 reserved[44];
+ struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
+} __packed;
+
+/**
+ * struct ipu3_uapi_anr_config - ANR config
+ *
+ * @transform: advanced noise reduction transform config as specified by
+ * &ipu3_uapi_anr_transform_config
+ * @stitch: create 4x4 patch from 4 surrounding 8x8 patches.
+ */
+struct ipu3_uapi_anr_config {
+ struct ipu3_uapi_anr_transform_config transform __attribute__((aligned(32)));
+ struct ipu3_uapi_anr_stitch_config stitch __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_acc_param - Accelerator cluster parameters
+ *
+ * ACC refers to the HW cluster containing all Fixed Functions (FFs). Each FF
+ * implements a specific algorithm.
+ *
+ * @bnr: parameters for bayer noise reduction static config. See
+ * &ipu3_uapi_bnr_static_config
+ * @green_disparity: disparity static config between gr and gb channel.
+ * See &ipu3_uapi_bnr_static_config_green_disparity
+ * @dm: de-mosaic config. See &ipu3_uapi_dm_config
+ * @ccm: color correction matrix. See &ipu3_uapi_ccm_mat_config
+ * @gamma: gamma correction config. See &ipu3_uapi_gamma_config
+ * @csc: color space conversion matrix. See &ipu3_uapi_csc_mat_config
+ * @cds: color down sample config. See &ipu3_uapi_cds_params
+ * @shd: lens shading correction config. See &ipu3_uapi_shd_config
+ * @iefd: Image enhancement filter and denoise config.
+ * &ipu3_uapi_yuvp1_iefd_config
+ * @yds_c0: y down scaler config. &ipu3_uapi_yuvp1_yds_config
+ * @chnr_c0: chroma noise reduction config. &ipu3_uapi_yuvp1_chnr_config
+ * @y_ee_nr: y edge enhancement and noise reduction config.
+ * &ipu3_uapi_yuvp1_y_ee_nr_config
+ * @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config
+ * @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config
+ * @reserved1: reserved
+ * @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config
+ * @tcc: total color correction config as defined in struct
+ * &ipu3_uapi_yuvp2_tcc_static_config
+ * @reserved2: reserved
+ * @anr: advanced noise reduction config.See &ipu3_uapi_anr_config
+ * @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config
+ * @ae: auto exposure config As specified by &ipu3_uapi_ae_config
+ * @af: auto focus config. As specified by &ipu3_uapi_af_config
+ * @awb: auto white balance config. As specified by &ipu3_uapi_awb_config
+ */
+struct ipu3_uapi_acc_param {
+ struct ipu3_uapi_bnr_static_config bnr;
+ struct ipu3_uapi_bnr_static_config_green_disparity
+ green_disparity __attribute__((aligned(32)));
+ struct ipu3_uapi_dm_config dm __attribute__((aligned(32)));
+ struct ipu3_uapi_ccm_mat_config ccm __attribute__((aligned(32)));
+ struct ipu3_uapi_gamma_config gamma __attribute__((aligned(32)));
+ struct ipu3_uapi_csc_mat_config csc __attribute__((aligned(32)));
+ struct ipu3_uapi_cds_params cds __attribute__((aligned(32)));
+ struct ipu3_uapi_shd_config shd __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_iefd_config iefd __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds_c0 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_chnr_config chnr __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
+ struct ipu3_uapi_anr_config anr;
+ struct ipu3_uapi_awb_fr_config_s awb_fr;
+ struct ipu3_uapi_ae_config ae;
+ struct ipu3_uapi_af_config_s af;
+ struct ipu3_uapi_awb_config awb;
+} __packed;
+
+/**
+ * struct ipu3_uapi_isp_lin_vmem_params - Linearization parameters
+ *
+ * @lin_lutlow_gr: linearization look-up table for GR channel interpolation.
+ * @lin_lutlow_r: linearization look-up table for R channel interpolation.
+ * @lin_lutlow_b: linearization look-up table for B channel interpolation.
+ * @lin_lutlow_gb: linearization look-up table for GB channel interpolation.
+ * lin_lutlow_gr / lin_lutlow_r / lin_lutlow_b /
+ * lin_lutlow_gb <= LIN_MAX_VALUE - 1.
+ * @lin_lutdif_gr: lin_lutlow_gr[i+1] - lin_lutlow_gr[i].
+ * @lin_lutdif_r: lin_lutlow_r[i+1] - lin_lutlow_r[i].
+ * @lin_lutdif_b: lin_lutlow_b[i+1] - lin_lutlow_b[i].
+ * @lin_lutdif_gb: lin_lutlow_gb[i+1] - lin_lutlow_gb[i].
+ */
+struct ipu3_uapi_isp_lin_vmem_params {
+ __s16 lin_lutlow_gr[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutlow_r[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutlow_b[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutlow_gb[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_gr[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_r[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_b[IPU3_UAPI_LIN_LUT_SIZE];
+ __s16 lin_lutdif_gb[IPU3_UAPI_LIN_LUT_SIZE];
+} __packed;
+
+/* Temporal Noise Reduction */
+
+/**
+ * struct ipu3_uapi_isp_tnr3_vmem_params - Temporal noise reduction vector
+ * memory parameters
+ *
+ * @slope: slope setting in interpolation curve for temporal noise reduction.
+ * @reserved1: reserved
+ * @sigma: knee point setting in interpolation curve for temporal
+ * noise reduction.
+ * @reserved2: reserved
+ */
+struct ipu3_uapi_isp_tnr3_vmem_params {
+ __u16 slope[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+ __u16 reserved1[IPU3_UAPI_ISP_VEC_ELEMS
+ - IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+ __u16 sigma[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+ __u16 reserved2[IPU3_UAPI_ISP_VEC_ELEMS
+ - IPU3_UAPI_ISP_TNR3_VMEM_LEN];
+} __packed;
+
+/**
+ * struct ipu3_uapi_isp_tnr3_params - Temporal noise reduction v3 parameters
+ *
+ * @knee_y1: Knee point TNR3 assumes standard deviation of Y,U and
+ * V at Y1 are TnrY1_Sigma_Y, U and V.
+ * @knee_y2: Knee point TNR3 assumes standard deviation of Y,U and
+ * V at Y2 are TnrY2_Sigma_Y, U and V.
+ * @maxfb_y: Max feedback gain for Y
+ * @maxfb_u: Max feedback gain for U
+ * @maxfb_v: Max feedback gain for V
+ * @round_adj_y: rounding Adjust for Y
+ * @round_adj_u: rounding Adjust for U
+ * @round_adj_v: rounding Adjust for V
+ * @ref_buf_select: selection of the reference frame buffer to be used.
+ */
+struct ipu3_uapi_isp_tnr3_params {
+ __u32 knee_y1;
+ __u32 knee_y2;
+ __u32 maxfb_y;
+ __u32 maxfb_u;
+ __u32 maxfb_v;
+ __u32 round_adj_y;
+ __u32 round_adj_u;
+ __u32 round_adj_v;
+ __u32 ref_buf_select;
+} __packed;
+
+/* Extreme Noise Reduction version 3 */
+
+/**
+ * struct ipu3_uapi_isp_xnr3_vmem_params - Extreme noise reduction v3
+ * vector memory parameters
+ *
+ * @x: xnr3 parameters.
+ * @a: xnr3 parameters.
+ * @b: xnr3 parameters.
+ * @c: xnr3 parameters.
+ */
+struct ipu3_uapi_isp_xnr3_vmem_params {
+ __u16 x[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 a[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 b[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 c[IPU3_UAPI_ISP_VEC_ELEMS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_alpha_params - Extreme noise reduction v3
+ * alpha tuning parameters
+ *
+ * @y0: Sigma for Y range similarity in dark area.
+ * @u0: Sigma for U range similarity in dark area.
+ * @v0: Sigma for V range similarity in dark area.
+ * @ydiff: Sigma difference for Y between bright area and dark area.
+ * @udiff: Sigma difference for U between bright area and dark area.
+ * @vdiff: Sigma difference for V between bright area and dark area.
+ */
+struct ipu3_uapi_xnr3_alpha_params {
+ __u32 y0;
+ __u32 u0;
+ __u32 v0;
+ __u32 ydiff;
+ __u32 udiff;
+ __u32 vdiff;
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_coring_params - Extreme noise reduction v3
+ * coring parameters
+ *
+ * @u0: Coring Threshold of U channel in dark area.
+ * @v0: Coring Threshold of V channel in dark area.
+ * @udiff: Threshold difference of U channel between bright and dark area.
+ * @vdiff: Threshold difference of V channel between bright and dark area.
+ */
+struct ipu3_uapi_xnr3_coring_params {
+ __u32 u0;
+ __u32 v0;
+ __u32 udiff;
+ __u32 vdiff;
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_blending_params - Blending factor
+ *
+ * @strength: The factor for blending output with input. This is tuning
+ * parameterHigher values lead to more aggressive XNR operation.
+ */
+struct ipu3_uapi_xnr3_blending_params {
+ __u32 strength;
+} __packed;
+
+/**
+ * struct ipu3_uapi_isp_xnr3_params - Extreme noise reduction v3 parameters
+ *
+ * @alpha: parameters for xnr3 alpha. See &ipu3_uapi_xnr3_alpha_params
+ * @coring: parameters for xnr3 coring. See &ipu3_uapi_xnr3_coring_params
+ * @blending: parameters for xnr3 blending. See &ipu3_uapi_xnr3_blending_params
+ */
+struct ipu3_uapi_isp_xnr3_params {
+ struct ipu3_uapi_xnr3_alpha_params alpha;
+ struct ipu3_uapi_xnr3_coring_params coring;
+ struct ipu3_uapi_xnr3_blending_params blending;
+} __packed;
+
+/***** Obgrid (optical black level compensation) table entry *****/
+
+/**
+ * struct ipu3_uapi_obgrid_param - Optical black level compensation parameters
+ *
+ * @gr: Grid table values for color GR
+ * @r: Grid table values for color R
+ * @b: Grid table values for color B
+ * @gb: Grid table values for color GB
+ *
+ * Black level is different for red, green, and blue channels. So black level
+ * compensation is different per channel.
+ */
+struct ipu3_uapi_obgrid_param {
+ __u16 gr;
+ __u16 r;
+ __u16 b;
+ __u16 gb;
+} __packed;
+
+/******************* V4L2_META_FMT_IPU3_PARAMS *******************/
+
+/**
+ * struct ipu3_uapi_flags - bits to indicate which pipeline needs update
+ *
+ * @gdc: 0 = no update, 1 = update.
+ * @obgrid: 0 = no update, 1 = update.
+ * @reserved1: Not used.
+ * @acc_bnr: 0 = no update, 1 = update.
+ * @acc_green_disparity: 0 = no update, 1 = update.
+ * @acc_dm: 0 = no update, 1 = update.
+ * @acc_ccm: 0 = no update, 1 = update.
+ * @acc_gamma: 0 = no update, 1 = update.
+ * @acc_csc: 0 = no update, 1 = update.
+ * @acc_cds: 0 = no update, 1 = update.
+ * @acc_shd: 0 = no update, 1 = update.
+ * @reserved2: Not used.
+ * @acc_iefd: 0 = no update, 1 = update.
+ * @acc_yds_c0: 0 = no update, 1 = update.
+ * @acc_chnr_c0: 0 = no update, 1 = update.
+ * @acc_y_ee_nr: 0 = no update, 1 = update.
+ * @acc_yds: 0 = no update, 1 = update.
+ * @acc_chnr: 0 = no update, 1 = update.
+ * @acc_ytm: 0 = no update, 1 = update.
+ * @acc_yds2: 0 = no update, 1 = update.
+ * @acc_tcc: 0 = no update, 1 = update.
+ * @acc_dpc: 0 = no update, 1 = update.
+ * @acc_bds: 0 = no update, 1 = update.
+ * @acc_anr: 0 = no update, 1 = update.
+ * @acc_awb_fr: 0 = no update, 1 = update.
+ * @acc_ae: 0 = no update, 1 = update.
+ * @acc_af: 0 = no update, 1 = update.
+ * @acc_awb: 0 = no update, 1 = update.
+ * @__acc_osys: 0 = no update, 1 = update.
+ * @reserved3: Not used.
+ * @lin_vmem_params: 0 = no update, 1 = update.
+ * @tnr3_vmem_params: 0 = no update, 1 = update.
+ * @xnr3_vmem_params: 0 = no update, 1 = update.
+ * @tnr3_dmem_params: 0 = no update, 1 = update.
+ * @xnr3_dmem_params: 0 = no update, 1 = update.
+ * @reserved4: Not used.
+ * @obgrid_param: 0 = no update, 1 = update.
+ * @reserved5: Not used.
+ */
+struct ipu3_uapi_flags {
+ __u32 gdc:1;
+ __u32 obgrid:1;
+ __u32 reserved1:30;
+
+ __u32 acc_bnr:1;
+ __u32 acc_green_disparity:1;
+ __u32 acc_dm:1;
+ __u32 acc_ccm:1;
+ __u32 acc_gamma:1;
+ __u32 acc_csc:1;
+ __u32 acc_cds:1;
+ __u32 acc_shd:1;
+ __u32 reserved2:2;
+ __u32 acc_iefd:1;
+ __u32 acc_yds_c0:1;
+ __u32 acc_chnr_c0:1;
+ __u32 acc_y_ee_nr:1;
+ __u32 acc_yds:1;
+ __u32 acc_chnr:1;
+ __u32 acc_ytm:1;
+ __u32 acc_yds2:1;
+ __u32 acc_tcc:1;
+ __u32 acc_dpc:1;
+ __u32 acc_bds:1;
+ __u32 acc_anr:1;
+ __u32 acc_awb_fr:1;
+ __u32 acc_ae:1;
+ __u32 acc_af:1;
+ __u32 acc_awb:1;
+ __u32 reserved3:4;
+
+ __u32 lin_vmem_params:1;
+ __u32 tnr3_vmem_params:1;
+ __u32 xnr3_vmem_params:1;
+ __u32 tnr3_dmem_params:1;
+ __u32 xnr3_dmem_params:1;
+ __u32 reserved4:1;
+ __u32 obgrid_param:1;
+ __u32 reserved5:25;
+} __packed;
+
+/**
+ * struct ipu3_uapi_params - V4L2_META_FMT_IPU3_PARAMS
+ *
+ * @use: select which parameters to apply, see &ipu3_uapi_flags
+ * @acc_param: ACC parameters, as specified by &ipu3_uapi_acc_param
+ * @lin_vmem_params: linearization VMEM, as specified by
+ * &ipu3_uapi_isp_lin_vmem_params
+ * @tnr3_vmem_params: tnr3 VMEM as specified by
+ * &ipu3_uapi_isp_tnr3_vmem_params
+ * @xnr3_vmem_params: xnr3 VMEM as specified by
+ * &ipu3_uapi_isp_xnr3_vmem_params
+ * @tnr3_dmem_params: tnr3 DMEM as specified by &ipu3_uapi_isp_tnr3_params
+ * @xnr3_dmem_params: xnr3 DMEM as specified by &ipu3_uapi_isp_xnr3_params
+ * @obgrid_param: obgrid parameters as specified by
+ * &ipu3_uapi_obgrid_param
+ *
+ * The video queue "parameters" is of format V4L2_META_FMT_IPU3_PARAMS.
+ * This is a "single plane" v4l2_meta_format using V4L2_BUF_TYPE_META_OUTPUT.
+ *
+ * struct ipu3_uapi_params as defined below contains a lot of parameters and
+ * ipu3_uapi_flags selects which parameters to apply.
+ */
+struct ipu3_uapi_params {
+ /* Flags which of the settings below are to be applied */
+ struct ipu3_uapi_flags use __attribute__((aligned(32)));
+
+ /* Accelerator cluster parameters */
+ struct ipu3_uapi_acc_param acc_param;
+
+ /* ISP vector address space parameters */
+ struct ipu3_uapi_isp_lin_vmem_params lin_vmem_params;
+ struct ipu3_uapi_isp_tnr3_vmem_params tnr3_vmem_params;
+ struct ipu3_uapi_isp_xnr3_vmem_params xnr3_vmem_params;
+
+ /* ISP data memory (DMEM) parameters */
+ struct ipu3_uapi_isp_tnr3_params tnr3_dmem_params;
+ struct ipu3_uapi_isp_xnr3_params xnr3_dmem_params;
+
+ /* Optical black level compensation */
+ struct ipu3_uapi_obgrid_param obgrid_param;
+} __packed;
+
+#endif /* __IPU3_UAPI_H */
diff --git a/drivers/staging/media/ipu3/ipu3-abi.h b/drivers/staging/media/ipu3/ipu3-abi.h
new file mode 100644
index 000000000000..25be56ff01c8
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-abi.h
@@ -0,0 +1,2011 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_ABI_H
+#define __IPU3_ABI_H
+
+#include "include/intel-ipu3.h"
+
+/******************* IMGU Hardware information *******************/
+
+typedef u32 imgu_addr_t;
+
+#define IMGU_ISP_VMEM_ALIGN 128
+#define IMGU_DVS_BLOCK_W 64
+#define IMGU_DVS_BLOCK_H 32
+#define IMGU_GDC_BUF_X (2 * IMGU_DVS_BLOCK_W)
+#define IMGU_GDC_BUF_Y IMGU_DVS_BLOCK_H
+/* n = 0..1 */
+#define IMGU_SP_PMEM_BASE(n) (0x20000 + (n) * 0x4000)
+#define IMGU_MAX_BQ_GRID_WIDTH 80
+#define IMGU_MAX_BQ_GRID_HEIGHT 60
+#define IMGU_OBGRID_TILE_SIZE 16
+#define IMGU_PIXELS_PER_WORD 50
+#define IMGU_BYTES_PER_WORD 64
+#define IMGU_STRIPE_FIXED_HALF_OVERLAP 2
+#define IMGU_SHD_SETS 3
+#define IMGU_BDS_MIN_CLIP_VAL 0
+#define IMGU_BDS_MAX_CLIP_VAL 2
+
+#define IMGU_ABI_AWB_MAX_CELLS_PER_SET 160
+#define IMGU_ABI_AF_MAX_CELLS_PER_SET 32
+#define IMGU_ABI_AWB_FR_MAX_CELLS_PER_SET 32
+
+#define IMGU_ABI_ACC_OP_IDLE 0
+#define IMGU_ABI_ACC_OP_END_OF_ACK 1
+#define IMGU_ABI_ACC_OP_END_OF_OPS 2
+#define IMGU_ABI_ACC_OP_NO_OPS 3
+
+#define IMGU_ABI_ACC_OPTYPE_PROCESS_LINES 0
+#define IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA 1
+
+/* Register definitions */
+
+/* PM_CTRL_0_5_0_IMGHMMADR */
+#define IMGU_REG_PM_CTRL 0x0
+#define IMGU_PM_CTRL_START BIT(0)
+#define IMGU_PM_CTRL_CFG_DONE BIT(1)
+#define IMGU_PM_CTRL_RACE_TO_HALT BIT(2)
+#define IMGU_PM_CTRL_NACK_ALL BIT(3)
+#define IMGU_PM_CTRL_CSS_PWRDN BIT(4)
+#define IMGU_PM_CTRL_RST_AT_EOF BIT(5)
+#define IMGU_PM_CTRL_FORCE_HALT BIT(6)
+#define IMGU_PM_CTRL_FORCE_UNHALT BIT(7)
+#define IMGU_PM_CTRL_FORCE_PWRDN BIT(8)
+#define IMGU_PM_CTRL_FORCE_RESET BIT(9)
+
+/* SYSTEM_REQ_0_5_0_IMGHMMADR */
+#define IMGU_REG_SYSTEM_REQ 0x18
+#define IMGU_SYSTEM_REQ_FREQ_MASK 0x3f
+#define IMGU_SYSTEM_REQ_FREQ_DIVIDER 25
+#define IMGU_REG_INT_STATUS 0x30
+#define IMGU_REG_INT_ENABLE 0x34
+#define IMGU_REG_INT_CSS_IRQ BIT(31)
+/* STATE_0_5_0_IMGHMMADR */
+#define IMGU_REG_STATE 0x130
+#define IMGU_STATE_HALT_STS BIT(0)
+#define IMGU_STATE_IDLE_STS BIT(1)
+#define IMGU_STATE_POWER_UP BIT(2)
+#define IMGU_STATE_POWER_DOWN BIT(3)
+#define IMGU_STATE_CSS_BUSY_MASK 0xc0
+#define IMGU_STATE_PM_FSM_MASK 0x180
+#define IMGU_STATE_PWRDNM_FSM_MASK 0x1E00000
+/* PM_STS_0_5_0_IMGHMMADR */
+#define IMGU_REG_PM_STS 0x140
+
+#define IMGU_REG_BASE 0x4000
+
+#define IMGU_REG_ISP_CTRL (IMGU_REG_BASE + 0x00)
+#define IMGU_CTRL_RST BIT(0)
+#define IMGU_CTRL_START BIT(1)
+#define IMGU_CTRL_BREAK BIT(2)
+#define IMGU_CTRL_RUN BIT(3)
+#define IMGU_CTRL_BROKEN BIT(4)
+#define IMGU_CTRL_IDLE BIT(5)
+#define IMGU_CTRL_SLEEPING BIT(6)
+#define IMGU_CTRL_STALLING BIT(7)
+#define IMGU_CTRL_IRQ_CLEAR BIT(8)
+#define IMGU_CTRL_IRQ_READY BIT(10)
+#define IMGU_CTRL_IRQ_SLEEPING BIT(11)
+#define IMGU_CTRL_ICACHE_INV BIT(12)
+#define IMGU_CTRL_IPREFETCH_EN BIT(13)
+#define IMGU_REG_ISP_START_ADDR (IMGU_REG_BASE + 0x04)
+#define IMGU_REG_ISP_ICACHE_ADDR (IMGU_REG_BASE + 0x10)
+#define IMGU_REG_ISP_PC (IMGU_REG_BASE + 0x1c)
+
+/* SP Registers, sp = 0:SP0; 1:SP1 */
+#define IMGU_REG_SP_CTRL(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x100)
+ /* For bits in IMGU_REG_SP_CTRL, see IMGU_CTRL_* */
+#define IMGU_REG_SP_START_ADDR(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x104)
+#define IMGU_REG_SP_ICACHE_ADDR(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x11c)
+#define IMGU_REG_SP_CTRL_SINK(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x130)
+#define IMGU_REG_SP_PC(sp) (IMGU_REG_BASE + (sp) * 0x100 + 0x134)
+
+#define IMGU_REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300)
+#define IMGU_TLB_INVALIDATE 1
+#define IMGU_REG_L1_PHYS (IMGU_REG_BASE + 0x304) /* 27-bit pfn */
+
+#define IMGU_REG_CIO_GATE_BURST_STATE (IMGU_REG_BASE + 0x404)
+#define IMGU_CIO_GATE_BURST_MASK 0x80
+
+#define IMGU_REG_GP_BUSY (IMGU_REG_BASE + 0x500)
+#define IMGU_REG_GP_STARVING (IMGU_REG_BASE + 0x504)
+#define IMGU_REG_GP_WORKLOAD (IMGU_REG_BASE + 0x508)
+#define IMGU_REG_GP_IRQ(n) (IMGU_REG_BASE + (n) * 4 + 0x50c) /* n = 0..4 */
+#define IMGU_REG_GP_SP1_STRMON_STAT (IMGU_REG_BASE + 0x520)
+#define IMGU_REG_GP_SP2_STRMON_STAT (IMGU_REG_BASE + 0x524)
+#define IMGU_REG_GP_ISP_STRMON_STAT (IMGU_REG_BASE + 0x528)
+#define IMGU_REG_GP_MOD_STRMON_STAT (IMGU_REG_BASE + 0x52c)
+
+/* Port definitions for the streaming monitors. */
+/* For each definition there is signal pair : valid [bit 0]- accept [bit 1] */
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP12DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_DMA2SP1 BIT(2)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP12SP2 BIT(4)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP22SP1 BIT(6)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_SP12ISP BIT(8)
+#define IMGU_GP_STRMON_STAT_SP1_PORT_ISP2SP1 BIT(10)
+
+#define IMGU_GP_STRMON_STAT_SP2_PORT_SP22DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_SP2_PORT_DMA2SP2 BIT(2)
+#define IMGU_GP_STRMON_STAT_SP2_PORT_SP22SP1 BIT(4)
+#define IMGU_GP_STRMON_STAT_SP2_PORT_SP12SP2 BIT(6)
+
+#define IMGU_GP_STRMON_STAT_ISP_PORT_ISP2DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_ISP_PORT_DMA2ISP BIT(2)
+#define IMGU_GP_STRMON_STAT_ISP_PORT_ISP2SP1 BIT(4)
+#define IMGU_GP_STRMON_STAT_ISP_PORT_SP12ISP BIT(6)
+
+/* Between the devices and the fifo */
+#define IMGU_GP_STRMON_STAT_MOD_PORT_SP12DMA BIT(0)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DMA2SP1 BIT(2)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_SP22DMA BIT(4)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DMA2SP2 BIT(6)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_ISP2DMA BIT(8)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DMA2ISP BIT(10)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC BIT(12)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS BIT(14)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2DECOMP BIT(16)
+#define IMGU_GP_STRMON_STAT_MOD_PORT_DECOMP2CELLS BIT(18)
+/* n = 1..6 */
+#define IMGU_GP_STRMON_STAT_MOD_PORT_S2V(n) (1 << (((n) - 1) * 2 + 20))
+
+/* n = 1..15 */
+#define IMGU_GP_STRMON_STAT_ACCS_PORT_ACC(n) (1 << (((n) - 1) * 2))
+
+/* After FIFO and demux before SP1, n = 1..15 */
+#define IMGU_GP_STRMON_STAT_ACCS2SP1_MON_PORT_ACC(n) (1 << (((n) - 1) * 2))
+
+/* After FIFO and demux before SP2, n = 1..15 */
+#define IMGU_GP_STRMON_STAT_ACCS2SP2_MON_PORT_ACC(n) (1 << (((n) - 1) * 2))
+
+#define IMGU_REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
+
+ /* n = 0..2 (main ctrl, SP0, SP1) */
+#define IMGU_REG_IRQCTRL_BASE(n) (IMGU_REG_BASE + (n) * 0x100 + 0x700)
+#define IMGU_IRQCTRL_MAIN 0
+#define IMGU_IRQCTRL_SP0 1
+#define IMGU_IRQCTRL_SP1 2
+#define IMGU_IRQCTRL_NUM 3
+#define IMGU_IRQCTRL_IRQ_SP1 BIT(0)
+#define IMGU_IRQCTRL_IRQ_SP2 BIT(1)
+#define IMGU_IRQCTRL_IRQ_ISP BIT(2)
+#define IMGU_IRQCTRL_IRQ_SP1_STREAM_MON BIT(3)
+#define IMGU_IRQCTRL_IRQ_SP2_STREAM_MON BIT(4)
+#define IMGU_IRQCTRL_IRQ_ISP_STREAM_MON BIT(5)
+#define IMGU_IRQCTRL_IRQ_MOD_STREAM_MON BIT(6)
+#define IMGU_IRQCTRL_IRQ_MOD_ISP_STREAM_MON BIT(7)
+#define IMGU_IRQCTRL_IRQ_ACCS_STREAM_MON BIT(8)
+#define IMGU_IRQCTRL_IRQ_ACCS_SP1_STREAM_MON BIT(9)
+#define IMGU_IRQCTRL_IRQ_ACCS_SP2_STREAM_MON BIT(10)
+#define IMGU_IRQCTRL_IRQ_ISP_PMEM_ERROR BIT(11)
+#define IMGU_IRQCTRL_IRQ_ISP_BAMEM_ERROR BIT(12)
+#define IMGU_IRQCTRL_IRQ_ISP_VMEM_ERROR BIT(13)
+#define IMGU_IRQCTRL_IRQ_ISP_DMEM_ERROR BIT(14)
+#define IMGU_IRQCTRL_IRQ_SP1_ICACHE_MEM_ERROR BIT(15)
+#define IMGU_IRQCTRL_IRQ_SP1_DMEM_ERROR BIT(16)
+#define IMGU_IRQCTRL_IRQ_SP2_ICACHE_MEM_ERROR BIT(17)
+#define IMGU_IRQCTRL_IRQ_SP2_DMEM_ERROR BIT(18)
+#define IMGU_IRQCTRL_IRQ_ACCS_SCRATCH_MEM_ERROR BIT(19)
+#define IMGU_IRQCTRL_IRQ_GP_TIMER(n) BIT(20 + (n)) /* n=0..1 */
+#define IMGU_IRQCTRL_IRQ_DMA BIT(22)
+#define IMGU_IRQCTRL_IRQ_SW_PIN(n) BIT(23 + (n)) /* n=0..4 */
+#define IMGU_IRQCTRL_IRQ_ACC_SYS BIT(28)
+#define IMGU_IRQCTRL_IRQ_OUT_FORM_IRQ_CTRL BIT(29)
+#define IMGU_IRQCTRL_IRQ_SP1_IRQ_CTRL BIT(30)
+#define IMGU_IRQCTRL_IRQ_SP2_IRQ_CTRL BIT(31)
+#define IMGU_REG_IRQCTRL_EDGE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x00)
+#define IMGU_REG_IRQCTRL_MASK(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x04)
+#define IMGU_REG_IRQCTRL_STATUS(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x08)
+#define IMGU_REG_IRQCTRL_CLEAR(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x0c)
+#define IMGU_REG_IRQCTRL_ENABLE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x10)
+#define IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x14)
+#define IMGU_REG_IRQCTRL_STR_OUT_ENABLE(n) (IMGU_REG_IRQCTRL_BASE(n) + 0x18)
+
+#define IMGU_REG_GP_TIMER (IMGU_REG_BASE + 0xa34)
+
+#define IMGU_REG_SP_DMEM_BASE(n) (IMGU_REG_BASE + (n) * 0x4000 + 0x4000)
+#define IMGU_REG_ISP_DMEM_BASE (IMGU_REG_BASE + 0xc000)
+
+#define IMGU_REG_GDC_BASE (IMGU_REG_BASE + 0x18000)
+#define IMGU_REG_GDC_LUT_BASE (IMGU_REG_GDC_BASE + 0x140)
+#define IMGU_GDC_LUT_MASK ((1 << 12) - 1) /* Range -1024..+1024 */
+
+#define IMGU_SCALER_PHASES 32
+#define IMGU_SCALER_COEFF_BITS 24
+#define IMGU_SCALER_PHASE_COUNTER_PREC_REF 6
+#define IMGU_SCALER_MAX_EXPONENT_SHIFT 3
+#define IMGU_SCALER_FILTER_TAPS 4
+#define IMGU_SCALER_TAPS_Y IMGU_SCALER_FILTER_TAPS
+#define IMGU_SCALER_TAPS_UV (IMGU_SCALER_FILTER_TAPS / 2)
+#define IMGU_SCALER_FIR_PHASES \
+ (IMGU_SCALER_PHASES << IMGU_SCALER_PHASE_COUNTER_PREC_REF)
+
+/******************* imgu_abi_acc_param *******************/
+
+#define IMGU_ABI_SHD_MAX_PROCESS_LINES 31
+#define IMGU_ABI_SHD_MAX_TRANSFERS 31
+#define IMGU_ABI_SHD_MAX_OPERATIONS \
+ (IMGU_ABI_SHD_MAX_PROCESS_LINES + IMGU_ABI_SHD_MAX_TRANSFERS)
+#define IMGU_ABI_SHD_MAX_CELLS_PER_SET 146
+/* largest grid is 73x56 */
+#define IMGU_ABI_SHD_MAX_CFG_SETS (2 * 28)
+
+#define IMGU_ABI_DVS_STAT_MAX_OPERATIONS 100
+#define IMGU_ABI_DVS_STAT_MAX_PROCESS_LINES 52
+#define IMGU_ABI_DVS_STAT_MAX_TRANSFERS 52
+
+#define IMGU_ABI_BDS_SAMPLE_PATTERN_ARRAY_SIZE 8
+#define IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE 32
+
+#define IMGU_ABI_AWB_FR_MAX_TRANSFERS 30
+#define IMGU_ABI_AWB_FR_MAX_PROCESS_LINES 30
+#define IMGU_ABI_AWB_FR_MAX_OPERATIONS \
+ (IMGU_ABI_AWB_FR_MAX_TRANSFERS + IMGU_ABI_AWB_FR_MAX_PROCESS_LINES)
+
+#define IMGU_ABI_AF_MAX_TRANSFERS 30
+#define IMGU_ABI_AF_MAX_PROCESS_LINES 30
+#define IMGU_ABI_AF_MAX_OPERATIONS \
+ (IMGU_ABI_AF_MAX_TRANSFERS + IMGU_ABI_AF_MAX_PROCESS_LINES)
+
+#define IMGU_ABI_AWB_MAX_PROCESS_LINES 68
+#define IMGU_ABI_AWB_MAX_TRANSFERS 68
+#define IMGU_ABI_AWB_MAX_OPERATIONS \
+ (IMGU_ABI_AWB_MAX_PROCESS_LINES + IMGU_ABI_AWB_MAX_TRANSFERS)
+
+#define IMGU_ABI_OSYS_PIN_VF 0
+#define IMGU_ABI_OSYS_PIN_OUT 1
+#define IMGU_ABI_OSYS_PINS 2
+
+#define IMGU_ABI_DVS_STAT_LEVELS 3
+#define IMGU_ABI_YUVP2_YTM_LUT_ENTRIES 256
+#define IMGU_ABI_GDC_FRAC_BITS 8
+#define IMGU_ABI_BINARY_MAX_OUTPUT_PORTS 2
+#define IMGU_ABI_MAX_BINARY_NAME 64
+#define IMGU_ABI_ISP_DDR_WORD_BITS 256
+#define IMGU_ABI_ISP_DDR_WORD_BYTES (IMGU_ABI_ISP_DDR_WORD_BITS / 8)
+#define IMGU_ABI_MAX_STAGES 3
+#define IMGU_ABI_MAX_IF_CONFIGS 3
+#define IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP BIT(31)
+#define IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST BIT(0)
+#define IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST BIT(4)
+#define IMGU_ABI_MAX_SP_THREADS 4
+#define IMGU_ABI_FRAMES_REF 3
+#define IMGU_ABI_FRAMES_TNR 4
+#define IMGU_ABI_BUF_SETS_TNR 1
+
+#define IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread, queue) \
+ (0 << 24 | (thread) << 16 | (queue) << 8)
+#define IMGU_ABI_EVENT_BUFFER_DEQUEUED(queue) (1 << 24 | (queue) << 8)
+#define IMGU_ABI_EVENT_EVENT_DEQUEUED (2 << 24)
+#define IMGU_ABI_EVENT_START_STREAM (3 << 24)
+#define IMGU_ABI_EVENT_STOP_STREAM (4 << 24)
+#define IMGU_ABI_EVENT_MIPI_BUFFERS_READY (5 << 24)
+#define IMGU_ABI_EVENT_UNLOCK_RAW_BUFFER (6 << 24)
+#define IMGU_ABI_EVENT_STAGE_ENABLE_DISABLE (7 << 24)
+
+#define IMGU_ABI_HOST2SP_BUFQ_SIZE 3
+#define IMGU_ABI_SP2HOST_BUFQ_SIZE (2 * IMGU_ABI_MAX_SP_THREADS)
+#define IMGU_ABI_HOST2SP_EVTQ_SIZE (IMGU_ABI_QUEUE_NUM * \
+ IMGU_ABI_MAX_SP_THREADS * 2 + IMGU_ABI_MAX_SP_THREADS * 4)
+#define IMGU_ABI_SP2HOST_EVTQ_SIZE (6 * IMGU_ABI_MAX_SP_THREADS)
+
+#define IMGU_ABI_EVTTYPE_EVENT_SHIFT 0
+#define IMGU_ABI_EVTTYPE_EVENT_MASK (0xff << IMGU_ABI_EVTTYPE_EVENT_SHIFT)
+#define IMGU_ABI_EVTTYPE_PIPE_SHIFT 8
+#define IMGU_ABI_EVTTYPE_PIPE_MASK (0xff << IMGU_ABI_EVTTYPE_PIPE_SHIFT)
+#define IMGU_ABI_EVTTYPE_PIPEID_SHIFT 16
+#define IMGU_ABI_EVTTYPE_PIPEID_MASK (0xff << IMGU_ABI_EVTTYPE_PIPEID_SHIFT)
+#define IMGU_ABI_EVTTYPE_MODULEID_SHIFT 8
+#define IMGU_ABI_EVTTYPE_MODULEID_MASK (0xff << IMGU_ABI_EVTTYPE_MODULEID_SHIFT)
+#define IMGU_ABI_EVTTYPE_LINENO_SHIFT 16
+#define IMGU_ABI_EVTTYPE_LINENO_MASK (0xffff << IMGU_ABI_EVTTYPE_LINENO_SHIFT)
+
+/* Output frame ready */
+#define IMGU_ABI_EVTTYPE_OUT_FRAME_DONE 0
+/* Second output frame ready */
+#define IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE 1
+/* Viewfinder Output frame ready */
+#define IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE 2
+/* Second viewfinder Output frame ready */
+#define IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE 3
+/* Indication that 3A statistics are available */
+#define IMGU_ABI_EVTTYPE_3A_STATS_DONE 4
+/* Indication that DIS statistics are available */
+#define IMGU_ABI_EVTTYPE_DIS_STATS_DONE 5
+/* Pipeline Done event, sent after last pipeline stage */
+#define IMGU_ABI_EVTTYPE_PIPELINE_DONE 6
+/* Frame tagged */
+#define IMGU_ABI_EVTTYPE_FRAME_TAGGED 7
+/* Input frame ready */
+#define IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE 8
+/* Metadata ready */
+#define IMGU_ABI_EVTTYPE_METADATA_DONE 9
+/* Indication that LACE statistics are available */
+#define IMGU_ABI_EVTTYPE_LACE_STATS_DONE 10
+/* Extension stage executed */
+#define IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE 11
+/* Timing measurement data */
+#define IMGU_ABI_EVTTYPE_TIMER 12
+/* End Of Frame event, sent when in buffered sensor mode */
+#define IMGU_ABI_EVTTYPE_PORT_EOF 13
+/* Performance warning encountered by FW */
+#define IMGU_ABI_EVTTYPE_FW_WARNING 14
+/* Assertion hit by FW */
+#define IMGU_ABI_EVTTYPE_FW_ASSERT 15
+
+#define IMGU_ABI_NUM_CONTINUOUS_FRAMES 10
+#define IMGU_ABI_SP_COMM_COMMAND 0x00
+
+/*
+ * The host2sp_cmd_ready command is the only command written by the SP
+ * It acknowledges that is previous command has been received.
+ * (this does not mean that the command has been executed)
+ * It also indicates that a new command can be send (it is a queue
+ * with depth 1).
+ */
+#define IMGU_ABI_SP_COMM_COMMAND_READY 1
+/* Command written by the Host */
+#define IMGU_ABI_SP_COMM_COMMAND_DUMMY 2 /* No action */
+#define IMGU_ABI_SP_COMM_COMMAND_START_FLASH 3 /* Start the flash */
+#define IMGU_ABI_SP_COMM_COMMAND_TERMINATE 4 /* Terminate */
+
+/* n = 0..IPU3_CSS_PIPE_ID_NUM-1 */
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(n) ((n) * 4 + 0x60)
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT 0
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_AND_SHIFT 16
+
+#define IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM 1 /* sp_pmem */
+
+/***** For parameter computation *****/
+
+#define IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET 0xC
+#define IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET 0x8
+#define IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS 32
+
+#define IMGU_SCALER_ELEMS_PER_VEC 0x10
+#define IMGU_SCALER_FILTER_TAPS_Y 0x4
+#define IMGU_SCALER_OUT_BPP 0x8
+
+#define IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR 0x400
+#define IMGU_SCALER_TO_OF_ACK_FA_ADDR \
+ (0xC00 + IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET)
+#define IMGU_OF_TO_ACK_FA_ADDR (0xC00 + IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET)
+#define IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR 0
+#define IMGU_SCALER_INTR_BPP 10
+
+#define IMGU_PS_SNR_PRESERVE_BITS 3
+#define IMGU_CNTX_BPP 11
+#define IMGU_SCALER_FILTER_TAPS_UV (IMGU_SCALER_FILTER_TAPS_Y / 2)
+
+#define IMGU_VMEM2_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
+#define IMGU_STRIDE_Y (IMGU_SCALER_FILTER_TAPS_Y + 1)
+#define IMGU_MAX_FRAME_WIDTH 3840
+#define IMGU_VMEM3_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
+
+#define IMGU_VER_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_OUT_BPP + \
+ IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 1 */
+#define IMGU_MAX_INPUT_BLOCK_HEIGHT 64
+#define IMGU_HOR_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_INTR_BPP + \
+ IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 2 */
+#define IMGU_MAX_OUTPUT_BLOCK_WIDTH 128
+#define IMGU_CNTX_STRIDE_UV (IMGU_SCALER_FILTER_TAPS_UV + 1)
+
+#define IMGU_OSYS_DMA_CROP_W_LIMIT 64
+#define IMGU_OSYS_DMA_CROP_H_LIMIT 4
+#define IMGU_OSYS_BLOCK_WIDTH (2 * IPU3_UAPI_ISP_VEC_ELEMS)
+#define IMGU_OSYS_BLOCK_HEIGHT 32
+#define IMGU_OSYS_PHASES 0x20
+#define IMGU_OSYS_FILTER_TAPS 0x4
+#define IMGU_OSYS_PHASE_COUNTER_PREC_REF 6
+#define IMGU_OSYS_NUM_INPUT_BUFFERS 2
+#define IMGU_OSYS_FIR_PHASES \
+ (IMGU_OSYS_PHASES << IMGU_OSYS_PHASE_COUNTER_PREC_REF)
+#define IMGU_OSYS_TAPS_UV (IMGU_OSYS_FILTER_TAPS / 2)
+#define IMGU_OSYS_TAPS_Y (IMGU_OSYS_FILTER_TAPS)
+#define IMGU_OSYS_NUM_INTERM_BUFFERS 2
+
+#define IMGU_VMEM1_Y_SIZE \
+ (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
+#define IMGU_VMEM1_UV_SIZE (IMGU_VMEM1_Y_SIZE / 4)
+#define IMGU_VMEM1_OUT_BUF_ADDR (IMGU_VMEM1_INP_BUF_ADDR + \
+ (IMGU_OSYS_NUM_INPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+#define IMGU_OSYS_NUM_OUTPUT_BUFFERS 2
+
+/* transpose of input height */
+#define IMGU_VMEM2_VECS_PER_LINE \
+ (DIV_ROUND_UP(IMGU_OSYS_BLOCK_HEIGHT, IMGU_VMEM2_ELEMS_PER_VEC))
+/* size in words (vectors) */
+#define IMGU_VMEM2_BUF_SIZE \
+ (IMGU_VMEM2_VECS_PER_LINE * IMGU_VMEM2_LINES_PER_BLOCK)
+#define IMGU_VMEM3_VER_Y_SIZE \
+ ((IMGU_STRIDE_Y * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
+#define IMGU_VMEM3_HOR_Y_SIZE \
+ ((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS)
+#define IMGU_VMEM3_VER_Y_EXTRA \
+ ((IMGU_STRIDE_Y * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
+#define IMGU_VMEM3_VER_U_SIZE \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_HOR_U_SIZE \
+ (((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_VER_U_EXTRA \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_VER_V_SIZE \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+
+#define IMGU_ISP_VEC_NELEMS 64
+#define IMGU_LUMA_TO_CHROMA_RATIO 2
+#define IMGU_INPUT_BLOCK_WIDTH (128)
+#define IMGU_FIFO_ADDR_SCALER_TO_FMT \
+ (IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR >> 2)
+#define IMGU_FIFO_ADDR_SCALER_TO_SP (IMGU_SCALER_TO_OF_ACK_FA_ADDR >> 2)
+#define IMGU_VMEM1_INP_BUF_ADDR 0
+#define IMGU_VMEM1_Y_STRIDE \
+ (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
+#define IMGU_VMEM1_BUF_SIZE (IMGU_VMEM1_V_OFFSET + IMGU_VMEM1_UV_SIZE)
+
+#define IMGU_VMEM1_U_OFFSET (IMGU_VMEM1_Y_SIZE)
+#define IMGU_VMEM1_V_OFFSET (IMGU_VMEM1_U_OFFSET + IMGU_VMEM1_UV_SIZE)
+#define IMGU_VMEM1_UV_STRIDE (IMGU_VMEM1_Y_STRIDE / 2)
+#define IMGU_VMEM1_INT_BUF_ADDR (IMGU_VMEM1_OUT_BUF_ADDR + \
+ (IMGU_OSYS_NUM_OUTPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+
+#define IMGU_VMEM1_ELEMS_PER_VEC (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
+#define IMGU_VMEM2_BUF_Y_ADDR 0
+#define IMGU_VMEM2_BUF_Y_STRIDE (IMGU_VMEM2_VECS_PER_LINE)
+#define IMGU_VMEM2_BUF_U_ADDR \
+ (IMGU_VMEM2_BUF_Y_ADDR + IMGU_VMEM2_BUF_SIZE)
+#define IMGU_VMEM2_BUF_V_ADDR \
+ (IMGU_VMEM2_BUF_U_ADDR + IMGU_VMEM2_BUF_SIZE / 4)
+#define IMGU_VMEM2_BUF_UV_STRIDE (IMGU_VMEM2_VECS_PER_LINE / 2)
+/* 1.5 x depth of intermediate buffer */
+#define IMGU_VMEM2_LINES_PER_BLOCK 192
+#define IMGU_VMEM3_HOR_Y_ADDR \
+ (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE)
+#define IMGU_VMEM3_HOR_U_ADDR \
+ (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE)
+#define IMGU_VMEM3_HOR_V_ADDR \
+ (IMGU_VMEM3_VER_V_ADDR + IMGU_VMEM3_VER_V_SIZE)
+#define IMGU_VMEM3_VER_Y_ADDR 0
+#define IMGU_VMEM3_VER_U_ADDR \
+ (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE + \
+ max(IMGU_VMEM3_HOR_Y_SIZE, IMGU_VMEM3_VER_Y_EXTRA))
+#define IMGU_VMEM3_VER_V_ADDR \
+ (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE + \
+ max(IMGU_VMEM3_HOR_U_SIZE, IMGU_VMEM3_VER_U_EXTRA))
+#define IMGU_FIFO_ADDR_FMT_TO_SP (IMGU_OF_TO_ACK_FA_ADDR >> 2)
+#define IMGU_FIFO_ADDR_FMT_TO_SCALER (IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR >> 2)
+#define IMGU_VMEM1_HST_BUF_ADDR (IMGU_VMEM1_INT_BUF_ADDR + \
+ (IMGU_OSYS_NUM_INTERM_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+#define IMGU_VMEM1_HST_BUF_STRIDE 120
+#define IMGU_VMEM1_HST_BUF_NLINES 3
+
+enum imgu_abi_frame_format {
+ IMGU_ABI_FRAME_FORMAT_NV11, /* 12 bit YUV 411, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV12_16, /* 16 bit YUV 420, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV12_TILEY,/* 12 bit YUV 420,Intel tiled format */
+ IMGU_ABI_FRAME_FORMAT_NV16, /* 16 bit YUV 422, Y, UV plane */
+ IMGU_ABI_FRAME_FORMAT_NV21, /* 12 bit YUV 420, Y, VU plane */
+ IMGU_ABI_FRAME_FORMAT_NV61, /* 16 bit YUV 422, Y, VU plane */
+ IMGU_ABI_FRAME_FORMAT_YV12, /* 12 bit YUV 420, Y, V, U plane */
+ IMGU_ABI_FRAME_FORMAT_YV16, /* 16 bit YUV 422, Y, V, U plane */
+ IMGU_ABI_FRAME_FORMAT_YUV420, /* 12 bit YUV 420, Y, U, V plane */
+ IMGU_ABI_FRAME_FORMAT_YUV420_16,/* yuv420, 16 bits per subpixel */
+ IMGU_ABI_FRAME_FORMAT_YUV422, /* 16 bit YUV 422, Y, U, V plane */
+ IMGU_ABI_FRAME_FORMAT_YUV422_16,/* yuv422, 16 bits per subpixel */
+ IMGU_ABI_FRAME_FORMAT_UYVY, /* 16 bit YUV 422, UYVY interleaved */
+ IMGU_ABI_FRAME_FORMAT_YUYV, /* 16 bit YUV 422, YUYV interleaved */
+ IMGU_ABI_FRAME_FORMAT_YUV444, /* 24 bit YUV 444, Y, U, V plane */
+ IMGU_ABI_FRAME_FORMAT_YUV_LINE, /* Internal format, 2 y lines */
+ /* followed by a uv-interleaved line */
+ IMGU_ABI_FRAME_FORMAT_RAW, /* RAW, 1 plane */
+ IMGU_ABI_FRAME_FORMAT_RGB565, /* 16 bit RGB, 1 plane. Each 3 sub
+ * pixels are packed into one 16 bit
+ * value, 5 bits for R, 6 bits for G
+ * and 5 bits for B.
+ */
+ IMGU_ABI_FRAME_FORMAT_PLANAR_RGB888, /* 24 bit RGB, 3 planes */
+ IMGU_ABI_FRAME_FORMAT_RGBA888, /* 32 bit RGBA, 1 plane, A=Alpha
+ * (alpha is unused)
+ */
+ IMGU_ABI_FRAME_FORMAT_QPLANE6, /* Internal, for advanced ISP */
+ IMGU_ABI_FRAME_FORMAT_BINARY_8, /* byte stream, used for jpeg. For
+ * frames of this type, we set the
+ * height to 1 and the width to the
+ * number of allocated bytes.
+ */
+ IMGU_ABI_FRAME_FORMAT_MIPI, /* MIPI frame, 1 plane */
+ IMGU_ABI_FRAME_FORMAT_RAW_PACKED, /* RAW, 1 plane, packed */
+ IMGU_ABI_FRAME_FORMAT_CSI_MIPI_YUV420_8, /* 8 bit per Y/U/V. Y odd line
+ * UYVY interleaved even line
+ */
+ IMGU_ABI_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8, /* Legacy YUV420.
+ * UY odd line;
+ * VY even line
+ */
+ IMGU_ABI_FRAME_FORMAT_CSI_MIPI_YUV420_10,/* 10 bit per Y/U/V. Y odd
+ * line; UYVY interleaved
+ * even line
+ */
+ IMGU_ABI_FRAME_FORMAT_YCGCO444_16, /* Internal format for ISP2.7,
+ * 16 bits per plane YUV 444,
+ * Y, U, V plane
+ */
+ IMGU_ABI_FRAME_FORMAT_NUM
+};
+
+enum imgu_abi_bayer_order {
+ IMGU_ABI_BAYER_ORDER_GRBG,
+ IMGU_ABI_BAYER_ORDER_RGGB,
+ IMGU_ABI_BAYER_ORDER_BGGR,
+ IMGU_ABI_BAYER_ORDER_GBRG
+};
+
+enum imgu_abi_osys_format {
+ IMGU_ABI_OSYS_FORMAT_YUV420,
+ IMGU_ABI_OSYS_FORMAT_YV12,
+ IMGU_ABI_OSYS_FORMAT_NV12,
+ IMGU_ABI_OSYS_FORMAT_NV21,
+ IMGU_ABI_OSYS_FORMAT_YUV_LINE,
+ IMGU_ABI_OSYS_FORMAT_YUY2, /* = IMGU_ABI_OSYS_FORMAT_YUYV */
+ IMGU_ABI_OSYS_FORMAT_NV16,
+ IMGU_ABI_OSYS_FORMAT_RGBA,
+ IMGU_ABI_OSYS_FORMAT_BGRA
+};
+
+enum imgu_abi_osys_tiling {
+ IMGU_ABI_OSYS_TILING_NONE,
+ IMGU_ABI_OSYS_TILING_Y,
+ IMGU_ABI_OSYS_TILING_YF,
+};
+
+enum imgu_abi_osys_procmode {
+ IMGU_ABI_OSYS_PROCMODE_BYPASS,
+ IMGU_ABI_OSYS_PROCMODE_UPSCALE,
+ IMGU_ABI_OSYS_PROCMODE_DOWNSCALE,
+};
+
+enum imgu_abi_queue_id {
+ IMGU_ABI_QUEUE_EVENT_ID = -1,
+ IMGU_ABI_QUEUE_A_ID = 0,
+ IMGU_ABI_QUEUE_B_ID,
+ IMGU_ABI_QUEUE_C_ID,
+ IMGU_ABI_QUEUE_D_ID,
+ IMGU_ABI_QUEUE_E_ID,
+ IMGU_ABI_QUEUE_F_ID,
+ IMGU_ABI_QUEUE_G_ID,
+ IMGU_ABI_QUEUE_H_ID, /* input frame queue for skycam */
+ IMGU_ABI_QUEUE_NUM
+};
+
+enum imgu_abi_buffer_type {
+ IMGU_ABI_BUFFER_TYPE_INVALID = -1,
+ IMGU_ABI_BUFFER_TYPE_3A_STATISTICS = 0,
+ IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS,
+ IMGU_ABI_BUFFER_TYPE_LACE_STATISTICS,
+ IMGU_ABI_BUFFER_TYPE_INPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_RAW_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_CUSTOM_INPUT,
+ IMGU_ABI_BUFFER_TYPE_CUSTOM_OUTPUT,
+ IMGU_ABI_BUFFER_TYPE_METADATA,
+ IMGU_ABI_BUFFER_TYPE_PARAMETER_SET,
+ IMGU_ABI_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
+ IMGU_ABI_NUM_DYNAMIC_BUFFER_TYPE,
+ IMGU_ABI_NUM_BUFFER_TYPE
+};
+
+enum imgu_abi_raw_type {
+ IMGU_ABI_RAW_TYPE_BAYER,
+ IMGU_ABI_RAW_TYPE_IR_ON_GR,
+ IMGU_ABI_RAW_TYPE_IR_ON_GB
+};
+
+enum imgu_abi_memories {
+ IMGU_ABI_MEM_ISP_PMEM0 = 0,
+ IMGU_ABI_MEM_ISP_DMEM0,
+ IMGU_ABI_MEM_ISP_VMEM0,
+ IMGU_ABI_MEM_ISP_VAMEM0,
+ IMGU_ABI_MEM_ISP_VAMEM1,
+ IMGU_ABI_MEM_ISP_VAMEM2,
+ IMGU_ABI_MEM_ISP_HMEM0,
+ IMGU_ABI_MEM_SP0_DMEM0,
+ IMGU_ABI_MEM_SP1_DMEM0,
+ IMGU_ABI_MEM_DDR,
+ IMGU_ABI_NUM_MEMORIES
+};
+
+enum imgu_abi_param_class {
+ IMGU_ABI_PARAM_CLASS_PARAM, /* Late binding parameters, like 3A */
+ IMGU_ABI_PARAM_CLASS_CONFIG, /* Pipe config time parameters */
+ IMGU_ABI_PARAM_CLASS_STATE, /* State parameters, eg. buffer index */
+ IMGU_ABI_PARAM_CLASS_NUM
+};
+
+enum imgu_abi_bin_input_src {
+ IMGU_ABI_BINARY_INPUT_SOURCE_SENSOR,
+ IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY,
+ IMGU_ABI_BINARY_INPUT_SOURCE_VARIABLE,
+};
+
+enum imgu_abi_sp_swstate {
+ IMGU_ABI_SP_SWSTATE_TERMINATED,
+ IMGU_ABI_SP_SWSTATE_INITIALIZED,
+ IMGU_ABI_SP_SWSTATE_CONNECTED,
+ IMGU_ABI_SP_SWSTATE_RUNNING,
+};
+
+enum imgu_abi_bl_swstate {
+ IMGU_ABI_BL_SWSTATE_OK = 0x100,
+ IMGU_ABI_BL_SWSTATE_BUSY,
+ IMGU_ABI_BL_SWSTATE_ERR,
+};
+
+/* The type of pipe stage */
+enum imgu_abi_stage_type {
+ IMGU_ABI_STAGE_TYPE_SP,
+ IMGU_ABI_STAGE_TYPE_ISP,
+};
+
+struct imgu_abi_acc_operation {
+ /*
+ * zero means on init,
+ * others mean upon receiving an ack signal from the BC acc.
+ */
+ u8 op_indicator;
+ u8 op_type;
+} __packed;
+
+struct imgu_abi_acc_process_lines_cmd_data {
+ u16 lines;
+ u8 cfg_set;
+ u8 reserved; /* Align to 4 bytes */
+} __packed;
+
+/* Bayer shading definitions */
+
+struct imgu_abi_shd_transfer_luts_set_data {
+ u8 set_number;
+ u8 padding[3];
+ imgu_addr_t rg_lut_ddr_addr;
+ imgu_addr_t bg_lut_ddr_addr;
+ u32 align_dummy;
+} __packed;
+
+struct imgu_abi_shd_grid_config {
+ /* reg 0 */
+ u32 grid_width:8;
+ u32 grid_height:8;
+ u32 block_width:3;
+ u32 reserved0:1;
+ u32 block_height:3;
+ u32 reserved1:1;
+ u32 grid_height_per_slice:8;
+ /* reg 1 */
+ s32 x_start:13;
+ s32 reserved2:3;
+ s32 y_start:13;
+ s32 reserved3:3;
+} __packed;
+
+struct imgu_abi_shd_general_config {
+ u32 init_set_vrt_offst_ul:8;
+ u32 shd_enable:1;
+ /* aka 'gf' */
+ u32 gain_factor:2;
+ u32 reserved:21;
+} __packed;
+
+struct imgu_abi_shd_black_level_config {
+ /* reg 0 */
+ s32 bl_r:12;
+ s32 reserved0:4;
+ s32 bl_gr:12;
+ u32 reserved1:1;
+ /* aka 'nf' */
+ u32 normalization_shift:3;
+ /* reg 1 */
+ s32 bl_gb:12;
+ s32 reserved2:4;
+ s32 bl_b:12;
+ s32 reserved3:4;
+} __packed;
+
+struct imgu_abi_shd_intra_frame_operations_data {
+ struct imgu_abi_acc_operation
+ operation_list[IMGU_ABI_SHD_MAX_OPERATIONS] __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_SHD_MAX_PROCESS_LINES] __aligned(32);
+ struct imgu_abi_shd_transfer_luts_set_data
+ transfer_data[IMGU_ABI_SHD_MAX_TRANSFERS] __aligned(32);
+} __packed;
+
+struct imgu_abi_shd_config {
+ struct ipu3_uapi_shd_config_static shd __aligned(32);
+ struct imgu_abi_shd_intra_frame_operations_data shd_ops __aligned(32);
+ struct ipu3_uapi_shd_lut shd_lut __aligned(32);
+} __packed;
+
+struct imgu_abi_stripe_input_frame_resolution {
+ u16 width;
+ u16 height;
+ u32 bayer_order; /* enum ipu3_uapi_bayer_order */
+ u32 raw_bit_depth;
+} __packed;
+
+/* Stripe-based processing */
+
+struct imgu_abi_stripes {
+ /* offset from start of frame - measured in pixels */
+ u16 offset;
+ /* stripe width - measured in pixels */
+ u16 width;
+ /* stripe width - measured in pixels */
+ u16 height;
+} __packed;
+
+struct imgu_abi_stripe_data {
+ /*
+ * number of stripes for current processing source
+ * - VLIW binary parameter we currently support 1 or 2 stripes
+ */
+ u16 num_of_stripes;
+
+ u8 padding[2];
+
+ /*
+ * the following data is derived from resolution-related
+ * pipe config and from num_of_stripes
+ */
+
+ /*
+ *'input-stripes' - before input cropping
+ * used by input feeder
+ */
+ struct imgu_abi_stripe_input_frame_resolution input_frame;
+
+ /*'effective-stripes' - after input cropping used dpc, bds */
+ struct imgu_abi_stripes effective_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /* 'down-scaled-stripes' - after down-scaling ONLY. used by BDS */
+ struct imgu_abi_stripes down_scaled_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /*
+ *'bds-out-stripes' - after bayer down-scaling and padding.
+ * used by all algos starting with norm up to the ref-frame for GDC
+ * (currently up to the output kernel)
+ */
+ struct imgu_abi_stripes bds_out_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /* 'bds-out-stripes (no overlap)' - used for ref kernel */
+ struct imgu_abi_stripes
+ bds_out_stripes_no_overlap[IPU3_UAPI_MAX_STRIPES];
+
+ /*
+ * input resolution for output system (equal to bds_out - envelope)
+ * output-system input frame width as configured by user
+ */
+ u16 output_system_in_frame_width;
+ /* output-system input frame height as configured by user */
+ u16 output_system_in_frame_height;
+
+ /*
+ * 'output-stripes' - accounts for stiching on the output (no overlap)
+ * used by the output kernel
+ */
+ struct imgu_abi_stripes output_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ /*
+ * 'block-stripes' - accounts for stiching by the output system
+ * (1 or more blocks overlap)
+ * used by DVS, TNR and the output system kernel
+ */
+ struct imgu_abi_stripes block_stripes[IPU3_UAPI_MAX_STRIPES];
+
+ u16 effective_frame_width; /* Needed for vertical cropping */
+ u16 bds_frame_width;
+ u16 out_frame_width; /* Output frame width as configured by user */
+ u16 out_frame_height; /* Output frame height as configured by user */
+
+ /* GDC in buffer (A.K.A delay frame,ref buffer) info */
+ u16 gdc_in_buffer_width; /* GDC in buffer width */
+ u16 gdc_in_buffer_height; /* GDC in buffer height */
+ /* GDC in buffer first valid pixel x offset */
+ u16 gdc_in_buffer_offset_x;
+ /* GDC in buffer first valid pixel y offset */
+ u16 gdc_in_buffer_offset_y;
+
+ /* Display frame width as configured by user */
+ u16 display_frame_width;
+ /* Display frame height as configured by user */
+ u16 display_frame_height;
+ u16 bds_aligned_frame_width;
+ /* Number of vectors to left-crop when writing stripes (not stripe 0) */
+ u16 half_overlap_vectors;
+ /* Decimate ISP and fixed func resolutions after BDS (ir_extraction) */
+ u16 ir_ext_decimation;
+ u8 padding1[2];
+} __packed;
+
+/* Input feeder related structs */
+
+struct imgu_abi_input_feeder_data {
+ u32 row_stride; /* row stride */
+ u32 start_row_address; /* start row address */
+ u32 start_pixel; /* start pixel */
+} __packed;
+
+struct imgu_abi_input_feeder_data_aligned {
+ struct imgu_abi_input_feeder_data data __aligned(32);
+} __packed;
+
+struct imgu_abi_input_feeder_data_per_stripe {
+ struct imgu_abi_input_feeder_data_aligned
+ input_feeder_data[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_input_feeder_config {
+ struct imgu_abi_input_feeder_data data;
+ struct imgu_abi_input_feeder_data_per_stripe data_per_stripe
+ __aligned(32);
+} __packed;
+
+/* DVS related definitions */
+
+struct imgu_abi_dvs_stat_grd_config {
+ u8 grid_width;
+ u8 grid_height;
+ u8 block_width;
+ u8 block_height;
+ u16 x_start;
+ u16 y_start;
+ u16 enable;
+ u16 x_end;
+ u16 y_end;
+} __packed;
+
+struct imgu_abi_dvs_stat_cfg {
+ u8 reserved0[4];
+ struct imgu_abi_dvs_stat_grd_config
+ grd_config[IMGU_ABI_DVS_STAT_LEVELS];
+ u8 reserved1[18];
+} __packed;
+
+struct imgu_abi_dvs_stat_transfer_op_data {
+ u8 set_number;
+} __packed;
+
+struct imgu_abi_dvs_stat_intra_frame_operations_data {
+ struct imgu_abi_acc_operation
+ ops[IMGU_ABI_DVS_STAT_MAX_OPERATIONS] __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_DVS_STAT_MAX_PROCESS_LINES]
+ __aligned(32);
+ struct imgu_abi_dvs_stat_transfer_op_data
+ transfer_data[IMGU_ABI_DVS_STAT_MAX_TRANSFERS] __aligned(32);
+} __packed;
+
+struct imgu_abi_dvs_stat_config {
+ struct imgu_abi_dvs_stat_cfg cfg __aligned(32);
+ u8 reserved0[128];
+ struct imgu_abi_dvs_stat_intra_frame_operations_data operations_data;
+ u8 reserved1[64];
+} __packed;
+
+/* Y-tone Mapping */
+
+struct imgu_abi_yuvp2_y_tm_lut_static_config {
+ u16 entries[IMGU_ABI_YUVP2_YTM_LUT_ENTRIES];
+ u32 enable;
+} __packed;
+
+/* Output formatter related structs */
+
+struct imgu_abi_osys_formatter_params {
+ u32 format;
+ u32 flip;
+ u32 mirror;
+ u32 tiling;
+ u32 reduce_range;
+ u32 alpha_blending;
+ u32 release_inp_addr;
+ u32 release_inp_en;
+ u32 process_out_buf_addr;
+ u32 image_width_vecs;
+ u32 image_height_lines;
+ u32 inp_buff_y_st_addr;
+ u32 inp_buff_y_line_stride;
+ u32 inp_buff_y_buffer_stride;
+ u32 int_buff_u_st_addr;
+ u32 int_buff_v_st_addr;
+ u32 inp_buff_uv_line_stride;
+ u32 inp_buff_uv_buffer_stride;
+ u32 out_buff_level;
+ u32 out_buff_nr_y_lines;
+ u32 out_buff_u_st_offset;
+ u32 out_buff_v_st_offset;
+ u32 out_buff_y_line_stride;
+ u32 out_buff_uv_line_stride;
+ u32 hist_buff_st_addr;
+ u32 hist_buff_line_stride;
+ u32 hist_buff_nr_lines;
+} __packed;
+
+struct imgu_abi_osys_formatter {
+ struct imgu_abi_osys_formatter_params param __aligned(32);
+} __packed;
+
+struct imgu_abi_osys_scaler_params {
+ u32 inp_buf_y_st_addr;
+ u32 inp_buf_y_line_stride;
+ u32 inp_buf_y_buffer_stride;
+ u32 inp_buf_u_st_addr;
+ u32 inp_buf_v_st_addr;
+ u32 inp_buf_uv_line_stride;
+ u32 inp_buf_uv_buffer_stride;
+ u32 inp_buf_chunk_width;
+ u32 inp_buf_nr_buffers;
+ /* Output buffers */
+ u32 out_buf_y_st_addr;
+ u32 out_buf_y_line_stride;
+ u32 out_buf_y_buffer_stride;
+ u32 out_buf_u_st_addr;
+ u32 out_buf_v_st_addr;
+ u32 out_buf_uv_line_stride;
+ u32 out_buf_uv_buffer_stride;
+ u32 out_buf_nr_buffers;
+ /* Intermediate buffers */
+ u32 int_buf_y_st_addr;
+ u32 int_buf_y_line_stride;
+ u32 int_buf_u_st_addr;
+ u32 int_buf_v_st_addr;
+ u32 int_buf_uv_line_stride;
+ u32 int_buf_height;
+ u32 int_buf_chunk_width;
+ u32 int_buf_chunk_height;
+ /* Context buffers */
+ u32 ctx_buf_hor_y_st_addr;
+ u32 ctx_buf_hor_u_st_addr;
+ u32 ctx_buf_hor_v_st_addr;
+ u32 ctx_buf_ver_y_st_addr;
+ u32 ctx_buf_ver_u_st_addr;
+ u32 ctx_buf_ver_v_st_addr;
+ /* Addresses for release-input and process-output tokens */
+ u32 release_inp_buf_addr;
+ u32 release_inp_buf_en;
+ u32 release_out_buf_en;
+ u32 process_out_buf_addr;
+ /* Settings dimensions, padding, cropping */
+ u32 input_image_y_width;
+ u32 input_image_y_height;
+ u32 input_image_y_start_column;
+ u32 input_image_uv_start_column;
+ u32 input_image_y_left_pad;
+ u32 input_image_uv_left_pad;
+ u32 input_image_y_right_pad;
+ u32 input_image_uv_right_pad;
+ u32 input_image_y_top_pad;
+ u32 input_image_uv_top_pad;
+ u32 input_image_y_bottom_pad;
+ u32 input_image_uv_bottom_pad;
+ u32 processing_mode; /* enum imgu_abi_osys_procmode */
+ u32 scaling_ratio;
+ u32 y_left_phase_init;
+ u32 uv_left_phase_init;
+ u32 y_top_phase_init;
+ u32 uv_top_phase_init;
+ u32 coeffs_exp_shift;
+ u32 out_y_left_crop;
+ u32 out_uv_left_crop;
+ u32 out_y_top_crop;
+ u32 out_uv_top_crop;
+} __packed;
+
+struct imgu_abi_osys_scaler {
+ struct imgu_abi_osys_scaler_params param __aligned(32);
+} __packed;
+
+struct imgu_abi_osys_frame_params {
+ /* Output pins */
+ u32 enable;
+ u32 format; /* enum imgu_abi_osys_format */
+ u32 flip;
+ u32 mirror;
+ u32 tiling; /* enum imgu_abi_osys_tiling */
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 scaled;
+} __packed;
+
+struct imgu_abi_osys_frame {
+ struct imgu_abi_osys_frame_params param __aligned(32);
+} __packed;
+
+struct imgu_abi_osys_stripe {
+ /* Input resolution */
+ u32 input_width;
+ u32 input_height;
+ /* Output Stripe */
+ u32 output_width[IMGU_ABI_OSYS_PINS];
+ u32 output_height[IMGU_ABI_OSYS_PINS];
+ u32 output_offset[IMGU_ABI_OSYS_PINS];
+ u32 buf_stride[IMGU_ABI_OSYS_PINS];
+ /* Scaler params */
+ u32 block_width;
+ u32 block_height;
+ /* Output Crop factor */
+ u32 crop_top[IMGU_ABI_OSYS_PINS];
+ u32 crop_left[IMGU_ABI_OSYS_PINS];
+} __packed;
+
+struct imgu_abi_osys_config {
+ struct imgu_abi_osys_formatter
+ formatter[IPU3_UAPI_MAX_STRIPES][IMGU_ABI_OSYS_PINS];
+ struct imgu_abi_osys_scaler scaler[IPU3_UAPI_MAX_STRIPES];
+ struct imgu_abi_osys_frame frame[IMGU_ABI_OSYS_PINS];
+ struct imgu_abi_osys_stripe stripe[IPU3_UAPI_MAX_STRIPES];
+ /* 32 packed coefficients for luma and chroma */
+ s8 scaler_coeffs_chroma[128];
+ s8 scaler_coeffs_luma[128];
+} __packed;
+
+/* BDS */
+
+struct imgu_abi_bds_hor_ctrl0 {
+ u32 sample_patrn_length:9;
+ u32 reserved0:3;
+ u32 hor_ds_en:1;
+ u32 min_clip_val:1;
+ u32 max_clip_val:2;
+ u32 out_frame_width:13;
+ u32 reserved1:3;
+} __packed;
+
+struct imgu_abi_bds_ptrn_arr {
+ u32 elems[IMGU_ABI_BDS_SAMPLE_PATTERN_ARRAY_SIZE];
+} __packed;
+
+struct imgu_abi_bds_phase_entry {
+ s8 coeff_min2;
+ s8 coeff_min1;
+ s8 coeff_0;
+ s8 nf;
+ s8 coeff_pls1;
+ s8 coeff_pls2;
+ s8 coeff_pls3;
+ u8 reserved;
+} __packed;
+
+struct imgu_abi_bds_phase_arr {
+ struct imgu_abi_bds_phase_entry
+ even[IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE];
+ struct imgu_abi_bds_phase_entry
+ odd[IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE];
+} __packed;
+
+struct imgu_abi_bds_hor_ctrl1 {
+ u32 hor_crop_start:13;
+ u32 reserved0:3;
+ u32 hor_crop_end:13;
+ u32 reserved1:1;
+ u32 hor_crop_en:1;
+ u32 reserved2:1;
+} __packed;
+
+struct imgu_abi_bds_hor_ctrl2 {
+ u32 input_frame_height:13;
+ u32 reserved0:19;
+} __packed;
+
+struct imgu_abi_bds_hor {
+ struct imgu_abi_bds_hor_ctrl0 hor_ctrl0;
+ struct imgu_abi_bds_ptrn_arr hor_ptrn_arr;
+ struct imgu_abi_bds_phase_arr hor_phase_arr;
+ struct imgu_abi_bds_hor_ctrl1 hor_ctrl1;
+ struct imgu_abi_bds_hor_ctrl2 hor_ctrl2;
+} __packed;
+
+struct imgu_abi_bds_ver_ctrl0 {
+ u32 sample_patrn_length:9;
+ u32 reserved0:3;
+ u32 ver_ds_en:1;
+ u32 min_clip_val:1;
+ u32 max_clip_val:2;
+ u32 reserved1:16;
+} __packed;
+
+struct imgu_abi_bds_ver_ctrl1 {
+ u32 out_frame_width:13;
+ u32 reserved0:3;
+ u32 out_frame_height:13;
+ u32 reserved1:3;
+} __packed;
+
+struct imgu_abi_bds_ver {
+ struct imgu_abi_bds_ver_ctrl0 ver_ctrl0;
+ struct imgu_abi_bds_ptrn_arr ver_ptrn_arr;
+ struct imgu_abi_bds_phase_arr ver_phase_arr;
+ struct imgu_abi_bds_ver_ctrl1 ver_ctrl1;
+} __packed;
+
+struct imgu_abi_bds_per_stripe_data {
+ struct imgu_abi_bds_hor_ctrl0 hor_ctrl0;
+ struct imgu_abi_bds_ver_ctrl1 ver_ctrl1;
+ struct imgu_abi_bds_hor_ctrl1 crop;
+} __packed;
+
+struct imgu_abi_bds_per_stripe_data_aligned {
+ struct imgu_abi_bds_per_stripe_data data __aligned(32);
+} __packed;
+
+struct imgu_abi_bds_per_stripe {
+ struct imgu_abi_bds_per_stripe_data_aligned
+ aligned_data[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_bds_config {
+ struct imgu_abi_bds_hor hor __aligned(32);
+ struct imgu_abi_bds_ver ver __aligned(32);
+ struct imgu_abi_bds_per_stripe per_stripe __aligned(32);
+ u32 enabled;
+} __packed;
+
+/* ANR */
+
+struct imgu_abi_anr_search_config {
+ u32 enable;
+ u16 frame_width;
+ u16 frame_height;
+} __packed;
+
+struct imgu_abi_anr_stitch_config {
+ u32 anr_stitch_en;
+ u16 frame_width;
+ u16 frame_height;
+ u8 reserved[40];
+ struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
+} __packed;
+
+struct imgu_abi_anr_tile2strm_config {
+ u32 enable;
+ u16 frame_width;
+ u16 frame_height;
+} __packed;
+
+struct imgu_abi_anr_config {
+ struct imgu_abi_anr_search_config search __aligned(32);
+ struct ipu3_uapi_anr_transform_config transform __aligned(32);
+ struct imgu_abi_anr_stitch_config stitch __aligned(32);
+ struct imgu_abi_anr_tile2strm_config tile2strm __aligned(32);
+} __packed;
+
+/* AF */
+
+struct imgu_abi_af_frame_size {
+ u16 width;
+ u16 height;
+} __packed;
+
+struct imgu_abi_af_config_s {
+ struct ipu3_uapi_af_filter_config filter_config __aligned(32);
+ struct imgu_abi_af_frame_size frame_size;
+ struct ipu3_uapi_grid_config grid_cfg __aligned(32);
+} __packed;
+
+struct imgu_abi_af_intra_frame_operations_data {
+ struct imgu_abi_acc_operation ops[IMGU_ABI_AF_MAX_OPERATIONS]
+ __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_AF_MAX_PROCESS_LINES] __aligned(32);
+} __packed;
+
+struct imgu_abi_af_stripe_config {
+ struct imgu_abi_af_frame_size frame_size __aligned(32);
+ struct ipu3_uapi_grid_config grid_cfg __aligned(32);
+} __packed;
+
+struct imgu_abi_af_config {
+ struct imgu_abi_af_config_s config;
+ struct imgu_abi_af_intra_frame_operations_data operations_data;
+ struct imgu_abi_af_stripe_config stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+/* AE */
+
+struct imgu_abi_ae_config {
+ struct ipu3_uapi_ae_grid_config grid_cfg __aligned(32);
+ struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
+ __aligned(32);
+ struct ipu3_uapi_ae_ccm ae_ccm __aligned(32);
+ struct {
+ struct ipu3_uapi_ae_grid_config grid __aligned(32);
+ } stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+/* AWB_FR */
+
+struct imgu_abi_awb_fr_intra_frame_operations_data {
+ struct imgu_abi_acc_operation ops[IMGU_ABI_AWB_FR_MAX_OPERATIONS]
+ __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_AWB_FR_MAX_PROCESS_LINES] __aligned(32);
+} __packed;
+
+struct imgu_abi_awb_fr_config {
+ struct ipu3_uapi_awb_fr_config_s config;
+ struct imgu_abi_awb_fr_intra_frame_operations_data operations_data;
+ struct ipu3_uapi_awb_fr_config_s stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_acc_transfer_op_data {
+ u8 set_number;
+} __packed;
+
+struct imgu_abi_awb_intra_frame_operations_data {
+ struct imgu_abi_acc_operation ops[IMGU_ABI_AWB_MAX_OPERATIONS]
+ __aligned(32);
+ struct imgu_abi_acc_process_lines_cmd_data
+ process_lines_data[IMGU_ABI_AWB_MAX_PROCESS_LINES] __aligned(32);
+ struct imgu_abi_acc_transfer_op_data
+ transfer_data[IMGU_ABI_AWB_MAX_TRANSFERS] __aligned(32);
+} __aligned(32) __packed;
+
+struct imgu_abi_awb_config {
+ struct ipu3_uapi_awb_config_s config __aligned(32);
+ struct imgu_abi_awb_intra_frame_operations_data operations_data;
+ struct ipu3_uapi_awb_config_s stripes[IPU3_UAPI_MAX_STRIPES];
+} __packed;
+
+struct imgu_abi_acc_param {
+ struct imgu_abi_stripe_data stripe;
+ u8 padding[8];
+ struct imgu_abi_input_feeder_config input_feeder;
+ struct ipu3_uapi_bnr_static_config bnr;
+ struct ipu3_uapi_bnr_static_config_green_disparity green_disparity
+ __aligned(32);
+ struct ipu3_uapi_dm_config dm __aligned(32);
+ struct ipu3_uapi_ccm_mat_config ccm __aligned(32);
+ struct ipu3_uapi_gamma_config gamma __aligned(32);
+ struct ipu3_uapi_csc_mat_config csc __aligned(32);
+ struct ipu3_uapi_cds_params cds __aligned(32);
+ struct imgu_abi_shd_config shd __aligned(32);
+ struct imgu_abi_dvs_stat_config dvs_stat;
+ u8 padding1[224]; /* reserved for lace_stat */
+ struct ipu3_uapi_yuvp1_iefd_config iefd __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds_c0 __aligned(32);
+ struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __aligned(32);
+ struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds __aligned(32);
+ struct ipu3_uapi_yuvp1_chnr_config chnr __aligned(32);
+ struct imgu_abi_yuvp2_y_tm_lut_static_config ytm __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds2 __aligned(32);
+ struct ipu3_uapi_yuvp2_tcc_static_config tcc __aligned(32);
+ /* reserved for defect pixel correction */
+ u8 dpc[240832] __aligned(32);
+ struct imgu_abi_bds_config bds;
+ struct imgu_abi_anr_config anr;
+ struct imgu_abi_awb_fr_config awb_fr;
+ struct imgu_abi_ae_config ae;
+ struct imgu_abi_af_config af;
+ struct imgu_abi_awb_config awb;
+ struct imgu_abi_osys_config osys;
+} __packed;
+
+/***** Morphing table entry *****/
+
+struct imgu_abi_gdc_warp_param {
+ u32 origin_x;
+ u32 origin_y;
+ u32 in_addr_offset;
+ u32 in_block_width;
+ u32 in_block_height;
+ u32 p0_x;
+ u32 p0_y;
+ u32 p1_x;
+ u32 p1_y;
+ u32 p2_x;
+ u32 p2_y;
+ u32 p3_x;
+ u32 p3_y;
+ u32 in_block_width_a;
+ u32 in_block_width_b;
+ u32 padding; /* struct size multiple of DDR word */
+} __packed;
+
+/******************* Firmware ABI definitions *******************/
+
+/***** struct imgu_abi_sp_stage *****/
+
+struct imgu_abi_crop_pos {
+ u16 x;
+ u16 y;
+} __packed;
+
+struct imgu_abi_sp_resolution {
+ u16 width; /* Width of valid data in pixels */
+ u16 height; /* Height of valid data in lines */
+} __packed;
+
+/*
+ * Frame info struct. This describes the contents of an image frame buffer.
+ */
+struct imgu_abi_frame_sp_info {
+ struct imgu_abi_sp_resolution res;
+ u16 padded_width; /* stride of line in memory
+ * (in pixels)
+ */
+ u8 format; /* format of the frame data */
+ u8 raw_bit_depth; /* number of valid bits per pixel,
+ * only valid for RAW bayer frames
+ */
+ u8 raw_bayer_order; /* bayer order, only valid
+ * for RAW bayer frames
+ */
+ u8 raw_type; /* To choose the proper raw frame type. for
+ * Legacy SKC pipes/Default is set to
+ * IMGU_ABI_RAW_TYPE_BAYER. For RGB IR sensor -
+ * driver should set it to:
+ * IronGr case - IMGU_ABI_RAW_TYPE_IR_ON_GR
+ * IronGb case - IMGU_ABI_RAW_TYPE_IR_ON_GB
+ */
+ u8 padding[2]; /* Extend to 32 bit multiple */
+} __packed;
+
+struct imgu_abi_buffer_sp {
+ union {
+ imgu_addr_t xmem_addr;
+ s32 queue_id; /* enum imgu_abi_queue_id */
+ } buf_src;
+ s32 buf_type; /* enum imgu_abi_buffer_type */
+} __packed;
+
+struct imgu_abi_frame_sp_plane {
+ u32 offset; /* offset in bytes to start of frame data */
+ /* offset is wrt data in imgu_abi_sp_sp_frame */
+} __packed;
+
+struct imgu_abi_frame_sp_rgb_planes {
+ struct imgu_abi_frame_sp_plane r;
+ struct imgu_abi_frame_sp_plane g;
+ struct imgu_abi_frame_sp_plane b;
+} __packed;
+
+struct imgu_abi_frame_sp_yuv_planes {
+ struct imgu_abi_frame_sp_plane y;
+ struct imgu_abi_frame_sp_plane u;
+ struct imgu_abi_frame_sp_plane v;
+} __packed;
+
+struct imgu_abi_frame_sp_nv_planes {
+ struct imgu_abi_frame_sp_plane y;
+ struct imgu_abi_frame_sp_plane uv;
+} __packed;
+
+struct imgu_abi_frame_sp_plane6 {
+ struct imgu_abi_frame_sp_plane r;
+ struct imgu_abi_frame_sp_plane r_at_b;
+ struct imgu_abi_frame_sp_plane gr;
+ struct imgu_abi_frame_sp_plane gb;
+ struct imgu_abi_frame_sp_plane b;
+ struct imgu_abi_frame_sp_plane b_at_r;
+} __packed;
+
+struct imgu_abi_frame_sp_binary_plane {
+ u32 size;
+ struct imgu_abi_frame_sp_plane data;
+} __packed;
+
+struct imgu_abi_frame_sp {
+ struct imgu_abi_frame_sp_info info;
+ struct imgu_abi_buffer_sp buf_attr;
+ union {
+ struct imgu_abi_frame_sp_plane raw;
+ struct imgu_abi_frame_sp_plane rgb;
+ struct imgu_abi_frame_sp_rgb_planes planar_rgb;
+ struct imgu_abi_frame_sp_plane yuyv;
+ struct imgu_abi_frame_sp_yuv_planes yuv;
+ struct imgu_abi_frame_sp_nv_planes nv;
+ struct imgu_abi_frame_sp_plane6 plane6;
+ struct imgu_abi_frame_sp_binary_plane binary;
+ } planes;
+} __packed;
+
+struct imgu_abi_resolution {
+ u32 width;
+ u32 height;
+} __packed;
+
+struct imgu_abi_frames_sp {
+ struct imgu_abi_frame_sp in;
+ struct imgu_abi_frame_sp out[IMGU_ABI_BINARY_MAX_OUTPUT_PORTS];
+ struct imgu_abi_resolution effective_in_res;
+ struct imgu_abi_frame_sp out_vf;
+ struct imgu_abi_frame_sp_info internal_frame_info;
+ struct imgu_abi_buffer_sp s3a_buf;
+ struct imgu_abi_buffer_sp dvs_buf;
+ struct imgu_abi_buffer_sp lace_buf;
+} __packed;
+
+struct imgu_abi_uds_info {
+ u16 curr_dx;
+ u16 curr_dy;
+ u16 xc;
+ u16 yc;
+} __packed;
+
+/* Information for a single pipeline stage */
+struct imgu_abi_sp_stage {
+ /* Multiple boolean flags can be stored in an integer */
+ u8 num; /* Stage number */
+ u8 isp_online;
+ u8 isp_copy_vf;
+ u8 isp_copy_output;
+ u8 sp_enable_xnr;
+ u8 isp_deci_log_factor;
+ u8 isp_vf_downscale_bits;
+ u8 deinterleaved;
+ /*
+ * NOTE: Programming the input circuit can only be done at the
+ * start of a session. It is illegal to program it during execution
+ * The input circuit defines the connectivity
+ */
+ u8 program_input_circuit;
+ u8 func;
+ u8 stage_type; /* enum imgu_abi_stage_type */
+ u8 num_stripes;
+ u8 isp_pipe_version;
+ struct {
+ u8 vf_output;
+ u8 s3a;
+ u8 sdis;
+ u8 dvs_stats;
+ u8 lace_stats;
+ } enable;
+
+ struct imgu_abi_crop_pos sp_out_crop_pos;
+ u8 padding[2];
+ struct imgu_abi_frames_sp frames;
+ struct imgu_abi_resolution dvs_envelope;
+ struct imgu_abi_uds_info uds;
+ imgu_addr_t isp_stage_addr;
+ imgu_addr_t xmem_bin_addr;
+ imgu_addr_t xmem_map_addr;
+
+ u16 top_cropping;
+ u16 row_stripes_height;
+ u16 row_stripes_overlap_lines;
+ u8 if_config_index; /* Which should be applied by this stage. */
+ u8 padding2;
+} __packed;
+
+/***** struct imgu_abi_isp_stage *****/
+
+struct imgu_abi_isp_param_memory_offsets {
+ u32 offsets[IMGU_ABI_PARAM_CLASS_NUM]; /* offset wrt hdr in bytes */
+} __packed;
+
+/*
+ * Blob descriptor.
+ * This structure describes an SP or ISP blob.
+ * It describes the test, data and bss sections as well as position in a
+ * firmware file.
+ * For convenience, it contains dynamic data after loading.
+ */
+struct imgu_abi_blob_info {
+ /* Static blob data */
+ u32 offset; /* Blob offset in fw file */
+ struct imgu_abi_isp_param_memory_offsets memory_offsets;
+ /* offset wrt hdr in bytes */
+ u32 prog_name_offset; /* offset wrt hdr in bytes */
+ u32 size; /* Size of blob */
+ u32 padding_size; /* total cummulative of bytes added
+ * due to section alignment
+ */
+ u32 icache_source; /* Position of icache in blob */
+ u32 icache_size; /* Size of icache section */
+ u32 icache_padding; /* added due to icache section alignment */
+ u32 text_source; /* Position of text in blob */
+ u32 text_size; /* Size of text section */
+ u32 text_padding; /* bytes added due to text section alignment */
+ u32 data_source; /* Position of data in blob */
+ u32 data_target; /* Start of data in SP dmem */
+ u32 data_size; /* Size of text section */
+ u32 data_padding; /* bytes added due to data section alignment */
+ u32 bss_target; /* Start position of bss in SP dmem */
+ u32 bss_size; /* Size of bss section
+ * Dynamic data filled by loader
+ */
+ u64 code __aligned(8); /* Code section absolute pointer */
+ /* within fw, code = icache + text */
+ u64 data __aligned(8); /* Data section absolute pointer */
+ /* within fw, data = data + bss */
+} __packed;
+
+struct imgu_abi_binary_pipeline_info {
+ u32 mode;
+ u32 isp_pipe_version;
+ u32 pipelining;
+ u32 c_subsampling;
+ u32 top_cropping;
+ u32 left_cropping;
+ u32 variable_resolution;
+} __packed;
+
+struct imgu_abi_binary_input_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 source; /* enum imgu_abi_bin_input_src */
+} __packed;
+
+struct imgu_abi_binary_output_info {
+ u32 min_width;
+ u32 min_height;
+ u32 max_width;
+ u32 max_height;
+ u32 num_chunks;
+ u32 variable_format;
+} __packed;
+
+struct imgu_abi_binary_internal_info {
+ u32 max_width;
+ u32 max_height;
+} __packed;
+
+struct imgu_abi_binary_bds_info {
+ u32 supported_bds_factors;
+} __packed;
+
+struct imgu_abi_binary_dvs_info {
+ u32 max_envelope_width;
+ u32 max_envelope_height;
+} __packed;
+
+struct imgu_abi_binary_vf_dec_info {
+ u32 is_variable;
+ u32 max_log_downscale;
+} __packed;
+
+struct imgu_abi_binary_s3a_info {
+ u32 s3atbl_use_dmem;
+ u32 fixed_s3a_deci_log;
+} __packed;
+
+struct imgu_abi_binary_dpc_info {
+ u32 bnr_lite; /* bnr lite enable flag */
+} __packed;
+
+struct imgu_abi_binary_iterator_info {
+ u32 num_stripes;
+ u32 row_stripes_height;
+ u32 row_stripes_overlap_lines;
+} __packed;
+
+struct imgu_abi_binary_address_info {
+ u32 isp_addresses; /* Address in ISP dmem */
+ u32 main_entry; /* Address of entry fct */
+ u32 in_frame; /* Address in ISP dmem */
+ u32 out_frame; /* Address in ISP dmem */
+ u32 in_data; /* Address in ISP dmem */
+ u32 out_data; /* Address in ISP dmem */
+ u32 sh_dma_cmd_ptr; /* In ISP dmem */
+} __packed;
+
+struct imgu_abi_binary_uds_info {
+ u16 bpp;
+ u16 use_bci;
+ u16 use_str;
+ u16 woix;
+ u16 woiy;
+ u16 extra_out_vecs;
+ u16 vectors_per_line_in;
+ u16 vectors_per_line_out;
+ u16 vectors_c_per_line_in;
+ u16 vectors_c_per_line_out;
+ u16 vmem_gdc_in_block_height_y;
+ u16 vmem_gdc_in_block_height_c;
+} __packed;
+
+struct imgu_abi_binary_block_info {
+ u32 block_width;
+ u32 block_height;
+ u32 output_block_height;
+} __packed;
+
+struct imgu_abi_isp_data {
+ imgu_addr_t address; /* ISP address */
+ u32 size; /* Disabled if 0 */
+} __packed;
+
+struct imgu_abi_isp_param_segments {
+ struct imgu_abi_isp_data
+ params[IMGU_ABI_PARAM_CLASS_NUM][IMGU_ABI_NUM_MEMORIES];
+} __packed;
+
+struct imgu_abi_binary_info {
+ u32 id __aligned(8); /* IMGU_ABI_BINARY_ID_* */
+ struct imgu_abi_binary_pipeline_info pipeline;
+ struct imgu_abi_binary_input_info input;
+ struct imgu_abi_binary_output_info output;
+ struct imgu_abi_binary_internal_info internal;
+ struct imgu_abi_binary_bds_info bds;
+ struct imgu_abi_binary_dvs_info dvs;
+ struct imgu_abi_binary_vf_dec_info vf_dec;
+ struct imgu_abi_binary_s3a_info s3a;
+ struct imgu_abi_binary_dpc_info dpc_bnr; /* DPC related binary info */
+ struct imgu_abi_binary_iterator_info iterator;
+ struct imgu_abi_binary_address_info addresses;
+ struct imgu_abi_binary_uds_info uds;
+ struct imgu_abi_binary_block_info block;
+ struct imgu_abi_isp_param_segments mem_initializers;
+ struct {
+ u8 input_feeder;
+ u8 output_system;
+ u8 obgrid;
+ u8 lin;
+ u8 dpc_acc;
+ u8 bds_acc;
+ u8 shd_acc;
+ u8 shd_ff;
+ u8 stats_3a_raw_buffer;
+ u8 acc_bayer_denoise;
+ u8 bnr_ff;
+ u8 awb_acc;
+ u8 awb_fr_acc;
+ u8 anr_acc;
+ u8 rgbpp_acc;
+ u8 rgbpp_ff;
+ u8 demosaic_acc;
+ u8 demosaic_ff;
+ u8 dvs_stats;
+ u8 lace_stats;
+ u8 yuvp1_b0_acc;
+ u8 yuvp1_c0_acc;
+ u8 yuvp2_acc;
+ u8 ae;
+ u8 af;
+ u8 dergb;
+ u8 rgb2yuv;
+ u8 high_quality;
+ u8 kerneltest;
+ u8 routing_shd_to_bnr; /* connect SHD with BNR ACCs */
+ u8 routing_bnr_to_anr; /* connect BNR with ANR ACCs */
+ u8 routing_anr_to_de; /* connect ANR with DE ACCs */
+ u8 routing_rgb_to_yuvp1; /* connect RGB with YUVP1 */
+ u8 routing_yuvp1_to_yuvp2; /* connect YUVP1 with YUVP2 */
+ u8 luma_only;
+ u8 input_yuv;
+ u8 input_raw;
+ u8 reduced_pipe;
+ u8 vf_veceven;
+ u8 dis;
+ u8 dvs_envelope;
+ u8 uds;
+ u8 dvs_6axis;
+ u8 block_output;
+ u8 streaming_dma;
+ u8 ds;
+ u8 bayer_fir_6db;
+ u8 raw_binning;
+ u8 continuous;
+ u8 s3a;
+ u8 fpnr;
+ u8 sc;
+ u8 macc;
+ u8 output;
+ u8 ref_frame;
+ u8 tnr;
+ u8 xnr;
+ u8 params;
+ u8 ca_gdc;
+ u8 isp_addresses;
+ u8 in_frame;
+ u8 out_frame;
+ u8 high_speed;
+ u8 dpc;
+ u8 padding[2];
+ u8 rgbir;
+ } enable;
+ struct {
+ u8 ref_y_channel;
+ u8 ref_c_channel;
+ u8 tnr_channel;
+ u8 tnr_out_channel;
+ u8 dvs_coords_channel;
+ u8 output_channel;
+ u8 c_channel;
+ u8 vfout_channel;
+ u8 vfout_c_channel;
+ u8 vfdec_bits_per_pixel;
+ u8 claimed_by_isp;
+ u8 padding[2];
+ } dma;
+} __packed;
+
+struct imgu_abi_isp_stage {
+ struct imgu_abi_blob_info blob_info;
+ struct imgu_abi_binary_info binary_info;
+ char binary_name[IMGU_ABI_MAX_BINARY_NAME];
+ struct imgu_abi_isp_param_segments mem_initializers;
+} __packed;
+
+/***** struct imgu_abi_ddr_address_map and parameter set *****/
+
+/* xmem address map allocation */
+struct imgu_abi_ddr_address_map {
+ imgu_addr_t isp_mem_param[IMGU_ABI_MAX_STAGES][IMGU_ABI_NUM_MEMORIES];
+ imgu_addr_t obgrid_tbl[IPU3_UAPI_MAX_STRIPES];
+ imgu_addr_t acc_cluster_params_for_sp;
+ imgu_addr_t dvs_6axis_params_y;
+} __packed;
+
+struct imgu_abi_parameter_set_info {
+ /* Pointers to Parameters in ISP format IMPT */
+ struct imgu_abi_ddr_address_map mem_map;
+ /* Unique ID to track per-frame configurations */
+ u32 isp_parameters_id;
+ /* Output frame to which this config has to be applied (optional) */
+ imgu_addr_t output_frame_ptr;
+} __packed;
+
+/***** struct imgu_abi_sp_group *****/
+
+/* SP configuration information */
+struct imgu_abi_sp_config {
+ u8 no_isp_sync; /* Signal host immediately after start */
+ u8 enable_raw_pool_locking; /* Enable Raw Buffer Locking for HALv3 */
+ u8 lock_all;
+ u8 disable_cont_vf;
+ u8 disable_preview_on_capture;
+ u8 padding[3];
+} __packed;
+
+/* Information for a pipeline */
+struct imgu_abi_sp_pipeline {
+ u32 pipe_id; /* the pipe ID */
+ u32 pipe_num; /* the dynamic pipe number */
+ u32 thread_id; /* the sp thread ID */
+ u32 pipe_config; /* the pipe config */
+ u32 pipe_qos_config; /* Bitmap of multiple QOS extension fw
+ * state, 0xffffffff indicates non
+ * QOS pipe.
+ */
+ u32 inout_port_config;
+ u32 required_bds_factor;
+ u32 dvs_frame_delay;
+ u32 num_stages; /* the pipe config */
+ u32 running; /* needed for pipe termination */
+ imgu_addr_t sp_stage_addr[IMGU_ABI_MAX_STAGES];
+ imgu_addr_t scaler_pp_lut; /* Early bound LUT */
+ u32 stage; /* stage ptr is only used on sp */
+ s32 num_execs; /* number of times to run if this is
+ * an acceleration pipe.
+ */
+ union {
+ struct {
+ u32 bytes_available;
+ } bin;
+ struct {
+ u32 height;
+ u32 width;
+ u32 padded_width;
+ u32 max_input_width;
+ u32 raw_bit_depth;
+ } raw;
+ } copy;
+
+ /* Parameters passed to Shading Correction kernel. */
+ struct {
+ /* Origin X (bqs) of internal frame on shading table */
+ u32 internal_frame_origin_x_bqs_on_sctbl;
+ /* Origin Y (bqs) of internal frame on shading table */
+ u32 internal_frame_origin_y_bqs_on_sctbl;
+ } shading;
+} __packed;
+
+struct imgu_abi_sp_debug_command {
+ /*
+ * The DMA software-mask,
+ * Bit 31...24: unused.
+ * Bit 23...16: unused.
+ * Bit 15...08: reading-request enabling bits for DMA channel 7..0
+ * Bit 07...00: writing-request enabling bits for DMA channel 7..0
+ *
+ * For example, "0...0 0...0 11111011 11111101" indicates that the
+ * writing request through DMA Channel 1 and the reading request
+ * through DMA channel 2 are both disabled. The others are enabled.
+ */
+ u32 dma_sw_reg;
+} __packed;
+
+/*
+ * Group all host initialized SP variables into this struct.
+ * This is initialized every stage through dma.
+ * The stage part itself is transferred through imgu_abi_sp_stage.
+ */
+struct imgu_abi_sp_group {
+ struct imgu_abi_sp_config config;
+ struct imgu_abi_sp_pipeline pipe[IMGU_ABI_MAX_SP_THREADS];
+ struct imgu_abi_sp_debug_command debug;
+} __packed;
+
+/***** parameter and state class binary configurations *****/
+
+struct imgu_abi_isp_iterator_config {
+ struct imgu_abi_frame_sp_info input_info;
+ struct imgu_abi_frame_sp_info internal_info;
+ struct imgu_abi_frame_sp_info output_info;
+ struct imgu_abi_frame_sp_info vf_info;
+ struct imgu_abi_sp_resolution dvs_envelope;
+} __packed;
+
+struct imgu_abi_dma_port_config {
+ u8 crop, elems;
+ u16 width;
+ u32 stride;
+} __packed;
+
+struct imgu_abi_isp_ref_config {
+ u32 width_a_over_b;
+ struct imgu_abi_dma_port_config port_b;
+ u32 ref_frame_addr_y[IMGU_ABI_FRAMES_REF];
+ u32 ref_frame_addr_c[IMGU_ABI_FRAMES_REF];
+ u32 dvs_frame_delay;
+} __packed;
+
+struct imgu_abi_isp_ref_dmem_state {
+ u32 ref_in_buf_idx;
+ u32 ref_out_buf_idx;
+} __packed;
+
+struct imgu_abi_isp_dvs_config {
+ u32 num_horizontal_blocks;
+ u32 num_vertical_blocks;
+} __packed;
+
+struct imgu_abi_isp_tnr3_config {
+ u32 width_a_over_b;
+ u32 frame_height;
+ struct imgu_abi_dma_port_config port_b;
+ u32 delay_frame;
+ u32 frame_addr[IMGU_ABI_FRAMES_TNR];
+} __packed;
+
+struct imgu_abi_isp_tnr3_dmem_state {
+ u32 in_bufidx;
+ u32 out_bufidx;
+ u32 total_frame_counter;
+ u32 buffer_frame_counter[IMGU_ABI_BUF_SETS_TNR];
+ u32 bypass_filter;
+} __packed;
+
+/***** Queues *****/
+
+struct imgu_abi_queue_info {
+ u8 size; /* the maximum number of elements*/
+ u8 step; /* number of bytes per element */
+ u8 start; /* index of the oldest element */
+ u8 end; /* index at which to write the new element */
+} __packed;
+
+struct imgu_abi_queues {
+ /*
+ * Queues for the dynamic frame information,
+ * i.e. the "in_frame" buffer, the "out_frame"
+ * buffer and the "vf_out_frame" buffer.
+ */
+ struct imgu_abi_queue_info host2sp_bufq_info
+ [IMGU_ABI_MAX_SP_THREADS][IMGU_ABI_QUEUE_NUM];
+ u32 host2sp_bufq[IMGU_ABI_MAX_SP_THREADS][IMGU_ABI_QUEUE_NUM]
+ [IMGU_ABI_HOST2SP_BUFQ_SIZE];
+ struct imgu_abi_queue_info sp2host_bufq_info[IMGU_ABI_QUEUE_NUM];
+ u32 sp2host_bufq[IMGU_ABI_QUEUE_NUM][IMGU_ABI_SP2HOST_BUFQ_SIZE];
+
+ /*
+ * The queues for the events.
+ */
+ struct imgu_abi_queue_info host2sp_evtq_info;
+ u32 host2sp_evtq[IMGU_ABI_HOST2SP_EVTQ_SIZE];
+ struct imgu_abi_queue_info sp2host_evtq_info;
+ u32 sp2host_evtq[IMGU_ABI_SP2HOST_EVTQ_SIZE];
+} __packed;
+
+/***** Buffer descriptor *****/
+
+struct imgu_abi_metadata_info {
+ struct imgu_abi_resolution resolution; /* Resolution */
+ u32 stride; /* Stride in bytes */
+ u32 size; /* Total size in bytes */
+} __packed;
+
+struct imgu_abi_isp_3a_statistics {
+ union {
+ struct {
+ imgu_addr_t s3a_tbl;
+ } dmem;
+ struct {
+ imgu_addr_t s3a_tbl_hi;
+ imgu_addr_t s3a_tbl_lo;
+ } vmem;
+ } data;
+ struct {
+ imgu_addr_t rgby_tbl;
+ } data_hmem;
+ u32 exp_id; /* exposure id, to match statistics to a frame, */
+ u32 isp_config_id; /* Tracks per-frame configs */
+ imgu_addr_t data_ptr; /* pointer to base of all data */
+ u32 size; /* total size of all data */
+ u32 dmem_size;
+ u32 vmem_size; /* both lo and hi have this size */
+ u32 hmem_size;
+} __packed;
+
+struct imgu_abi_metadata {
+ struct imgu_abi_metadata_info info; /* Layout info */
+ imgu_addr_t address; /* CSS virtual address */
+ u32 exp_id; /* Exposure ID */
+} __packed;
+
+struct imgu_abi_time_meas {
+ u32 start_timer_value; /* measured time in ticks */
+ u32 end_timer_value; /* measured time in ticks */
+} __packed;
+
+struct imgu_abi_buffer {
+ union {
+ struct imgu_abi_isp_3a_statistics s3a;
+ u8 reserved[28];
+ imgu_addr_t skc_dvs_statistics;
+ imgu_addr_t lace_stat;
+ struct imgu_abi_metadata metadata;
+ struct {
+ imgu_addr_t frame_data;
+ u32 flashed;
+ u32 exp_id;
+ u32 isp_parameters_id; /* Tracks per-frame configs */
+ u32 padded_width;
+ } frame;
+ imgu_addr_t ddr_ptrs;
+ } payload;
+ /*
+ * kernel_ptr is present for host administration purposes only.
+ * type is uint64_t in order to be 64-bit host compatible.
+ * uint64_t does not exist on SP/ISP.
+ * Size of the struct is checked by sp.hive.c.
+ */
+ u64 cookie_ptr __aligned(8);
+ u64 kernel_ptr;
+ struct imgu_abi_time_meas timing_data;
+ u32 isys_eof_clock_tick;
+} __packed;
+
+struct imgu_abi_bl_dma_cmd_entry {
+ u32 src_addr; /* virtual DDR address */
+ u32 size; /* number of bytes to transferred */
+ u32 dst_type;
+ u32 dst_addr; /* hmm address of xMEM or MMIO */
+} __packed;
+
+struct imgu_abi_sp_init_dmem_cfg {
+ u32 ddr_data_addr; /* data segment address in ddr */
+ u32 dmem_data_addr; /* data segment address in dmem */
+ u32 dmem_bss_addr; /* bss segment address in dmem */
+ u32 data_size; /* data segment size */
+ u32 bss_size; /* bss segment size */
+ u32 sp_id; /* sp id */
+} __packed;
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
new file mode 100644
index 000000000000..55861aa8fb03
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "ipu3-css.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-dmamap.h"
+
+static void ipu3_css_fw_show_binary(struct device *dev, struct imgu_fw_info *bi,
+ const char *name)
+{
+ unsigned int i;
+
+ dev_dbg(dev, "found firmware binary type %i size %i name %s\n",
+ bi->type, bi->blob.size, name);
+ if (bi->type != IMGU_FW_ISP_FIRMWARE)
+ return;
+
+ dev_dbg(dev, " id %i mode %i bds 0x%x veceven %i/%i out_pins %i\n",
+ bi->info.isp.sp.id, bi->info.isp.sp.pipeline.mode,
+ bi->info.isp.sp.bds.supported_bds_factors,
+ bi->info.isp.sp.enable.vf_veceven,
+ bi->info.isp.sp.vf_dec.is_variable,
+ bi->info.isp.num_output_pins);
+
+ dev_dbg(dev, " input (%i,%i)-(%i,%i) formats %s%s%s\n",
+ bi->info.isp.sp.input.min_width,
+ bi->info.isp.sp.input.min_height,
+ bi->info.isp.sp.input.max_width,
+ bi->info.isp.sp.input.max_height,
+ bi->info.isp.sp.enable.input_yuv ? "yuv420 " : "",
+ bi->info.isp.sp.enable.input_feeder ||
+ bi->info.isp.sp.enable.input_raw ? "raw8 raw10 " : "",
+ bi->info.isp.sp.enable.input_raw ? "raw12" : "");
+
+ dev_dbg(dev, " internal (%i,%i)\n",
+ bi->info.isp.sp.internal.max_width,
+ bi->info.isp.sp.internal.max_height);
+
+ dev_dbg(dev, " output (%i,%i)-(%i,%i) formats",
+ bi->info.isp.sp.output.min_width,
+ bi->info.isp.sp.output.min_height,
+ bi->info.isp.sp.output.max_width,
+ bi->info.isp.sp.output.max_height);
+ for (i = 0; i < bi->info.isp.num_output_formats; i++)
+ dev_dbg(dev, " %i", bi->info.isp.output_formats[i]);
+ dev_dbg(dev, " vf");
+ for (i = 0; i < bi->info.isp.num_vf_formats; i++)
+ dev_dbg(dev, " %i", bi->info.isp.vf_formats[i]);
+ dev_dbg(dev, "\n");
+}
+
+unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi)
+{
+ unsigned int width = DIV_ROUND_UP(bi->info.isp.sp.internal.max_width,
+ IMGU_OBGRID_TILE_SIZE * 2) + 1;
+ unsigned int height = DIV_ROUND_UP(bi->info.isp.sp.internal.max_height,
+ IMGU_OBGRID_TILE_SIZE * 2) + 1;
+ unsigned int obgrid_size;
+
+ width = ALIGN(width, IPU3_UAPI_ISP_VEC_ELEMS / 4);
+ obgrid_size = PAGE_ALIGN(width * height *
+ sizeof(struct ipu3_uapi_obgrid_param)) *
+ bi->info.isp.sp.iterator.num_stripes;
+ return obgrid_size;
+}
+
+void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
+ enum imgu_abi_param_class cls,
+ enum imgu_abi_memories mem,
+ struct imgu_fw_isp_parameter *par,
+ size_t par_size, void *binary_params)
+{
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->pipes[pipe].bindex];
+
+ if (par->offset + par->size >
+ bi->info.isp.sp.mem_initializers.params[cls][mem].size)
+ return NULL;
+
+ if (par->size != par_size)
+ pr_warn("parameter size doesn't match defined size\n");
+
+ if (par->size < par_size)
+ return NULL;
+
+ return binary_params + par->offset;
+}
+
+void ipu3_css_fw_cleanup(struct ipu3_css *css)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+
+ if (css->binary) {
+ unsigned int i;
+
+ for (i = 0; i < css->fwp->file_header.binary_nr; i++)
+ ipu3_dmamap_free(imgu, &css->binary[i]);
+ kfree(css->binary);
+ }
+ if (css->fw)
+ release_firmware(css->fw);
+
+ css->binary = NULL;
+ css->fw = NULL;
+}
+
+int ipu3_css_fw_init(struct ipu3_css *css)
+{
+ static const u32 BLOCK_MAX = 65536;
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct device *dev = css->dev;
+ unsigned int i, j, binary_nr;
+ int r;
+
+ r = request_firmware(&css->fw, IMGU_FW_NAME, css->dev);
+ if (r)
+ return r;
+
+ /* Check and display fw header info */
+
+ css->fwp = (struct imgu_fw_header *)css->fw->data;
+ if (css->fw->size < sizeof(struct imgu_fw_header *) ||
+ css->fwp->file_header.h_size != sizeof(struct imgu_fw_bi_file_h))
+ goto bad_fw;
+ if (sizeof(struct imgu_fw_bi_file_h) +
+ css->fwp->file_header.binary_nr * sizeof(struct imgu_fw_info) >
+ css->fw->size)
+ goto bad_fw;
+
+ dev_info(dev, "loaded firmware version %.64s, %u binaries, %zu bytes\n",
+ css->fwp->file_header.version, css->fwp->file_header.binary_nr,
+ css->fw->size);
+
+ /* Validate and display info on fw binaries */
+
+ binary_nr = css->fwp->file_header.binary_nr;
+
+ css->fw_bl = -1;
+ css->fw_sp[0] = -1;
+ css->fw_sp[1] = -1;
+
+ for (i = 0; i < binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+ const char *name = (void *)css->fwp + bi->blob.prog_name_offset;
+ size_t len;
+
+ if (bi->blob.prog_name_offset >= css->fw->size)
+ goto bad_fw;
+ len = strnlen(name, css->fw->size - bi->blob.prog_name_offset);
+ if (len + 1 > css->fw->size - bi->blob.prog_name_offset ||
+ len + 1 >= IMGU_ABI_MAX_BINARY_NAME)
+ goto bad_fw;
+
+ if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size
+ + bi->blob.data_size + bi->blob.padding_size)
+ goto bad_fw;
+ if (bi->blob.offset + bi->blob.size > css->fw->size)
+ goto bad_fw;
+
+ if (bi->type == IMGU_FW_BOOTLOADER_FIRMWARE) {
+ css->fw_bl = i;
+ if (bi->info.bl.sw_state >= css->iomem_length ||
+ bi->info.bl.num_dma_cmds >= css->iomem_length ||
+ bi->info.bl.dma_cmd_list >= css->iomem_length)
+ goto bad_fw;
+ }
+ if (bi->type == IMGU_FW_SP_FIRMWARE ||
+ bi->type == IMGU_FW_SP1_FIRMWARE) {
+ css->fw_sp[bi->type == IMGU_FW_SP_FIRMWARE ? 0 : 1] = i;
+ if (bi->info.sp.per_frame_data >= css->iomem_length ||
+ bi->info.sp.init_dmem_data >= css->iomem_length ||
+ bi->info.sp.host_sp_queue >= css->iomem_length ||
+ bi->info.sp.isp_started >= css->iomem_length ||
+ bi->info.sp.sw_state >= css->iomem_length ||
+ bi->info.sp.sleep_mode >= css->iomem_length ||
+ bi->info.sp.invalidate_tlb >= css->iomem_length ||
+ bi->info.sp.host_sp_com >= css->iomem_length ||
+ bi->info.sp.output + 12 >= css->iomem_length ||
+ bi->info.sp.host_sp_queues_initialized >=
+ css->iomem_length)
+ goto bad_fw;
+ }
+ if (bi->type != IMGU_FW_ISP_FIRMWARE)
+ continue;
+
+ if (bi->info.isp.sp.pipeline.mode >= IPU3_CSS_PIPE_ID_NUM)
+ goto bad_fw;
+
+ if (bi->info.isp.sp.iterator.num_stripes >
+ IPU3_UAPI_MAX_STRIPES)
+ goto bad_fw;
+
+ if (bi->info.isp.num_vf_formats > IMGU_ABI_FRAME_FORMAT_NUM ||
+ bi->info.isp.num_output_formats > IMGU_ABI_FRAME_FORMAT_NUM)
+ goto bad_fw;
+
+ for (j = 0; j < bi->info.isp.num_output_formats; j++)
+ if (bi->info.isp.output_formats[j] < 0 ||
+ bi->info.isp.output_formats[j] >=
+ IMGU_ABI_FRAME_FORMAT_NUM)
+ goto bad_fw;
+ for (j = 0; j < bi->info.isp.num_vf_formats; j++)
+ if (bi->info.isp.vf_formats[j] < 0 ||
+ bi->info.isp.vf_formats[j] >=
+ IMGU_ABI_FRAME_FORMAT_NUM)
+ goto bad_fw;
+
+ if (bi->info.isp.sp.block.block_width <= 0 ||
+ bi->info.isp.sp.block.block_width > BLOCK_MAX ||
+ bi->info.isp.sp.block.output_block_height <= 0 ||
+ bi->info.isp.sp.block.output_block_height > BLOCK_MAX)
+ goto bad_fw;
+
+ if (bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM]
+ + sizeof(struct imgu_fw_param_memory_offsets) >
+ css->fw->size ||
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG]
+ + sizeof(struct imgu_fw_config_memory_offsets) >
+ css->fw->size ||
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE]
+ + sizeof(struct imgu_fw_state_memory_offsets) >
+ css->fw->size)
+ goto bad_fw;
+
+ ipu3_css_fw_show_binary(dev, bi, name);
+ }
+
+ if (css->fw_bl == -1 || css->fw_sp[0] == -1 || css->fw_sp[1] == -1)
+ goto bad_fw;
+
+ /* Allocate and map fw binaries into IMGU */
+
+ css->binary = kcalloc(binary_nr, sizeof(*css->binary), GFP_KERNEL);
+ if (!css->binary) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+
+ for (i = 0; i < css->fwp->file_header.binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+ void *blob = (void *)css->fwp + bi->blob.offset;
+ size_t size = bi->blob.size;
+
+ if (!ipu3_dmamap_alloc(imgu, &css->binary[i], size)) {
+ r = -ENOMEM;
+ goto error_out;
+ }
+ memcpy(css->binary[i].vaddr, blob, size);
+ }
+
+ return 0;
+
+bad_fw:
+ dev_err(dev, "invalid firmware binary, size %u\n", (int)css->fw->size);
+ r = -ENODEV;
+
+error_out:
+ ipu3_css_fw_cleanup(css);
+ return r;
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.h b/drivers/staging/media/ipu3/ipu3-css-fw.h
new file mode 100644
index 000000000000..07d8bb8b25f3
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_CSS_FW_H
+#define __IPU3_CSS_FW_H
+
+/******************* Firmware file definitions *******************/
+
+#define IMGU_FW_NAME "intel/ipu3-fw.bin"
+
+typedef u32 imgu_fw_ptr;
+
+enum imgu_fw_type {
+ IMGU_FW_SP_FIRMWARE, /* Firmware for the SP */
+ IMGU_FW_SP1_FIRMWARE, /* Firmware for the SP1 */
+ IMGU_FW_ISP_FIRMWARE, /* Firmware for the ISP */
+ IMGU_FW_BOOTLOADER_FIRMWARE, /* Firmware for the BootLoader */
+ IMGU_FW_ACC_FIRMWARE /* Firmware for accelerations */
+};
+
+enum imgu_fw_acc_type {
+ IMGU_FW_ACC_NONE, /* Normal binary */
+ IMGU_FW_ACC_OUTPUT, /* Accelerator stage on output frame */
+ IMGU_FW_ACC_VIEWFINDER, /* Accelerator stage on viewfinder frame */
+ IMGU_FW_ACC_STANDALONE, /* Stand-alone acceleration */
+};
+
+struct imgu_fw_isp_parameter {
+ u32 offset; /* Offset in isp_<mem> config, params, etc. */
+ u32 size; /* Disabled if 0 */
+};
+
+struct imgu_fw_param_memory_offsets {
+ struct {
+ struct imgu_fw_isp_parameter lin; /* lin_vmem_params */
+ struct imgu_fw_isp_parameter tnr3; /* tnr3_vmem_params */
+ struct imgu_fw_isp_parameter xnr3; /* xnr3_vmem_params */
+ } vmem;
+ struct {
+ struct imgu_fw_isp_parameter tnr;
+ struct imgu_fw_isp_parameter tnr3; /* tnr3_params */
+ struct imgu_fw_isp_parameter xnr3; /* xnr3_params */
+ struct imgu_fw_isp_parameter plane_io_config; /* 192 bytes */
+ struct imgu_fw_isp_parameter rgbir; /* rgbir_params */
+ } dmem;
+};
+
+struct imgu_fw_config_memory_offsets {
+ struct {
+ struct imgu_fw_isp_parameter iterator;
+ struct imgu_fw_isp_parameter dvs;
+ struct imgu_fw_isp_parameter output;
+ struct imgu_fw_isp_parameter raw;
+ struct imgu_fw_isp_parameter input_yuv;
+ struct imgu_fw_isp_parameter tnr;
+ struct imgu_fw_isp_parameter tnr3;
+ struct imgu_fw_isp_parameter ref;
+ } dmem;
+};
+
+struct imgu_fw_state_memory_offsets {
+ struct {
+ struct imgu_fw_isp_parameter tnr;
+ struct imgu_fw_isp_parameter tnr3;
+ struct imgu_fw_isp_parameter ref;
+ } dmem;
+};
+
+union imgu_fw_all_memory_offsets {
+ struct {
+ u64 imgu_fw_mem_offsets[3]; /* params, config, state */
+ } offsets;
+ struct {
+ u64 ptr;
+ } array[IMGU_ABI_PARAM_CLASS_NUM];
+};
+
+struct imgu_fw_binary_xinfo {
+ /* Part that is of interest to the SP. */
+ struct imgu_abi_binary_info sp;
+
+ /* Rest of the binary info, only interesting to the host. */
+ u32 type; /* enum imgu_fw_acc_type */
+
+ u32 num_output_formats __aligned(8);
+ u32 output_formats[IMGU_ABI_FRAME_FORMAT_NUM]; /* enum frame_format */
+
+ /* number of supported vf formats */
+ u32 num_vf_formats __aligned(8);
+ /* types of supported vf formats */
+ u32 vf_formats[IMGU_ABI_FRAME_FORMAT_NUM]; /* enum frame_format */
+ u8 num_output_pins;
+ imgu_fw_ptr xmem_addr;
+
+ u64 imgu_fw_blob_descr_ptr __aligned(8);
+ u32 blob_index __aligned(8);
+ union imgu_fw_all_memory_offsets mem_offsets __aligned(8);
+ struct imgu_fw_binary_xinfo *next __aligned(8);
+};
+
+struct imgu_fw_sp_info {
+ u32 init_dmem_data; /* data sect config, stored to dmem */
+ u32 per_frame_data; /* Per frame data, stored to dmem */
+ u32 group; /* Per pipeline data, loaded by dma */
+ u32 output; /* SP output data, loaded by dmem */
+ u32 host_sp_queue; /* Host <-> SP queues */
+ u32 host_sp_com; /* Host <-> SP commands */
+ u32 isp_started; /* P'ed from sensor thread, csim only */
+ u32 sw_state; /* Polled from css, enum imgu_abi_sp_swstate */
+ u32 host_sp_queues_initialized; /* Polled from the SP */
+ u32 sleep_mode; /* different mode to halt SP */
+ u32 invalidate_tlb; /* inform SP to invalidate mmu TLB */
+ u32 debug_buffer_ddr_address; /* the addr of DDR debug queue */
+
+ /* input system perf count array */
+ u32 perf_counter_input_system_error;
+ u32 threads_stack; /* sp thread's stack pointers */
+ u32 threads_stack_size; /* sp thread's stack sizes */
+ u32 curr_binary_id; /* current binary id */
+ u32 raw_copy_line_count; /* raw copy line counter */
+ u32 ddr_parameter_address; /* acc param ddrptr, sp dmem */
+ u32 ddr_parameter_size; /* acc param size, sp dmem */
+ /* Entry functions */
+ u32 sp_entry; /* The SP entry function */
+ u32 tagger_frames_addr; /* Base address of tagger state */
+};
+
+struct imgu_fw_bl_info {
+ u32 num_dma_cmds; /* Number of cmds sent by CSS */
+ u32 dma_cmd_list; /* Dma command list sent by CSS */
+ u32 sw_state; /* Polled from css, enum imgu_abi_bl_swstate */
+ /* Entry functions */
+ u32 bl_entry; /* The SP entry function */
+};
+
+struct imgu_fw_acc_info {
+ u32 per_frame_data; /* Dummy for now */
+};
+
+union imgu_fw_union {
+ struct imgu_fw_binary_xinfo isp; /* ISP info */
+ struct imgu_fw_sp_info sp; /* SP info */
+ struct imgu_fw_sp_info sp1; /* SP1 info */
+ struct imgu_fw_bl_info bl; /* Bootloader info */
+ struct imgu_fw_acc_info acc; /* Accelerator info */
+};
+
+struct imgu_fw_info {
+ size_t header_size; /* size of fw header */
+ u32 type __aligned(8); /* enum imgu_fw_type */
+ union imgu_fw_union info; /* Binary info */
+ struct imgu_abi_blob_info blob; /* Blob info */
+ /* Dynamic part */
+ u64 next;
+
+ u32 loaded __aligned(8); /* Firmware has been loaded */
+ const u64 isp_code __aligned(8); /* ISP pointer to code */
+ /* Firmware handle between user space and kernel */
+ u32 handle __aligned(8);
+ /* Sections to copy from/to ISP */
+ struct imgu_abi_isp_param_segments mem_initializers;
+ /* Initializer for local ISP memories */
+};
+
+struct imgu_fw_bi_file_h {
+ char version[64]; /* branch tag + week day + time */
+ int binary_nr; /* Number of binaries */
+ unsigned int h_size; /* sizeof(struct imgu_fw_bi_file_h) */
+};
+
+struct imgu_fw_header {
+ struct imgu_fw_bi_file_h file_header;
+ struct imgu_fw_info binary_header[1]; /* binary_nr items */
+};
+
+/******************* Firmware functions *******************/
+
+int ipu3_css_fw_init(struct ipu3_css *css);
+void ipu3_css_fw_cleanup(struct ipu3_css *css);
+
+unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi);
+void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
+ enum imgu_abi_param_class cls,
+ enum imgu_abi_memories mem,
+ struct imgu_fw_isp_parameter *par,
+ size_t par_size, void *binary_params);
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.c b/drivers/staging/media/ipu3/ipu3-css-params.c
new file mode 100644
index 000000000000..776206ded83b
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-params.c
@@ -0,0 +1,2943 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+
+#include "ipu3-css.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-tables.h"
+
+#define DIV_ROUND_CLOSEST_DOWN(a, b) (((a) + ((b) / 2) - 1) / (b))
+#define roundclosest_down(a, b) (DIV_ROUND_CLOSEST_DOWN(a, b) * (b))
+
+#define IPU3_UAPI_ANR_MAX_RESET ((1 << 12) - 1)
+#define IPU3_UAPI_ANR_MIN_RESET (((-1) << 12) + 1)
+
+struct ipu3_css_scaler_info {
+ unsigned int phase_step; /* Same for luma/chroma */
+ int exp_shift;
+
+ unsigned int phase_init; /* luma/chroma dependent */
+ int pad_left;
+ int pad_right;
+ int crop_left;
+ int crop_top;
+};
+
+static unsigned int ipu3_css_scaler_get_exp(unsigned int counter,
+ unsigned int divider)
+{
+ int i = fls(divider) - fls(counter);
+
+ if (i <= 0)
+ return 0;
+
+ if (divider >> i < counter)
+ i = i - 1;
+
+ return i;
+}
+
+/* Set up the CSS scaler look up table */
+static void
+ipu3_css_scaler_setup_lut(unsigned int taps, unsigned int input_width,
+ unsigned int output_width, int phase_step_correction,
+ const int *coeffs, unsigned int coeffs_size,
+ s8 coeff_lut[], struct ipu3_css_scaler_info *info)
+{
+ int tap, phase, phase_sum_left, phase_sum_right;
+ int exponent = ipu3_css_scaler_get_exp(output_width, input_width);
+ int mantissa = (1 << exponent) * output_width;
+ unsigned int phase_step;
+
+ if (input_width == output_width) {
+ for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
+ for (tap = 0; tap < taps; tap++) {
+ coeff_lut[phase * IMGU_SCALER_FILTER_TAPS + tap]
+ = 0;
+ }
+ }
+
+ info->phase_step = IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF);
+ info->exp_shift = 0;
+ info->pad_left = 0;
+ info->pad_right = 0;
+ info->phase_init = 0;
+ info->crop_left = 0;
+ info->crop_top = 0;
+ return;
+ }
+
+ for (phase = 0; phase < IMGU_SCALER_PHASES; phase++) {
+ for (tap = 0; tap < taps; tap++) {
+ /* flip table to for convolution reverse indexing */
+ s64 coeff = coeffs[coeffs_size -
+ ((tap * (coeffs_size / taps)) + phase) - 1];
+ coeff *= mantissa;
+ coeff = div64_long(coeff, input_width);
+
+ /* Add +"0.5" */
+ coeff += 1 << (IMGU_SCALER_COEFF_BITS - 1);
+ coeff >>= IMGU_SCALER_COEFF_BITS;
+
+ coeff_lut[phase * IMGU_SCALER_FILTER_TAPS + tap] =
+ coeff;
+ }
+ }
+
+ phase_step = IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF) *
+ output_width / input_width;
+ phase_step += phase_step_correction;
+ phase_sum_left = (taps / 2 * IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF)) -
+ (1 << (IMGU_SCALER_PHASE_COUNTER_PREC_REF - 1));
+ phase_sum_right = (taps / 2 * IMGU_SCALER_PHASES *
+ (1 << IMGU_SCALER_PHASE_COUNTER_PREC_REF)) +
+ (1 << (IMGU_SCALER_PHASE_COUNTER_PREC_REF - 1));
+
+ info->exp_shift = IMGU_SCALER_MAX_EXPONENT_SHIFT - exponent;
+ info->pad_left = (phase_sum_left % phase_step == 0) ?
+ phase_sum_left / phase_step - 1 : phase_sum_left / phase_step;
+ info->pad_right = (phase_sum_right % phase_step == 0) ?
+ phase_sum_right / phase_step - 1 : phase_sum_right / phase_step;
+ info->phase_init = phase_sum_left - phase_step * info->pad_left;
+ info->phase_step = phase_step;
+ info->crop_left = taps - 1;
+ info->crop_top = taps - 1;
+}
+
+/*
+ * Calculates the exact output image width/height, based on phase_step setting
+ * (must be perfectly aligned with hardware).
+ */
+static unsigned int
+ipu3_css_scaler_calc_scaled_output(unsigned int input,
+ struct ipu3_css_scaler_info *info)
+{
+ unsigned int arg1 = input * info->phase_step +
+ (1 - IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES -
+ IMGU_SCALER_FIR_PHASES / (2 * IMGU_SCALER_PHASES);
+ unsigned int arg2 = ((IMGU_SCALER_TAPS_Y / 2) * IMGU_SCALER_FIR_PHASES +
+ IMGU_SCALER_FIR_PHASES / (2 * IMGU_SCALER_PHASES)) *
+ IMGU_SCALER_FIR_PHASES + info->phase_step / 2;
+
+ return ((arg1 + (arg2 - IMGU_SCALER_FIR_PHASES * info->phase_step) /
+ IMGU_SCALER_FIR_PHASES) / (2 * IMGU_SCALER_FIR_PHASES)) * 2;
+}
+
+/*
+ * Calculate the output width and height, given the luma
+ * and chroma details of a scaler
+ */
+static void
+ipu3_css_scaler_calc(u32 input_width, u32 input_height, u32 target_width,
+ u32 target_height, struct imgu_abi_osys_config *cfg,
+ struct ipu3_css_scaler_info *info_luma,
+ struct ipu3_css_scaler_info *info_chroma,
+ unsigned int *output_width, unsigned int *output_height,
+ unsigned int *procmode)
+{
+ u32 out_width = target_width;
+ u32 out_height = target_height;
+ const unsigned int height_alignment = 2;
+ int phase_step_correction = -1;
+
+ /*
+ * Calculate scaled output width. If the horizontal and vertical scaling
+ * factor is different, then choose the biggest and crop off excess
+ * lines or columns after formatting.
+ */
+ if (target_height * input_width > target_width * input_height)
+ target_width = DIV_ROUND_UP(target_height * input_width,
+ input_height);
+
+ if (input_width == target_width)
+ *procmode = IMGU_ABI_OSYS_PROCMODE_BYPASS;
+ else
+ *procmode = IMGU_ABI_OSYS_PROCMODE_DOWNSCALE;
+
+ memset(&cfg->scaler_coeffs_chroma, 0,
+ sizeof(cfg->scaler_coeffs_chroma));
+ memset(&cfg->scaler_coeffs_luma, 0, sizeof(*cfg->scaler_coeffs_luma));
+ do {
+ phase_step_correction++;
+
+ ipu3_css_scaler_setup_lut(IMGU_SCALER_TAPS_Y,
+ input_width, target_width,
+ phase_step_correction,
+ ipu3_css_downscale_4taps,
+ IMGU_SCALER_DOWNSCALE_4TAPS_LEN,
+ cfg->scaler_coeffs_luma, info_luma);
+
+ ipu3_css_scaler_setup_lut(IMGU_SCALER_TAPS_UV,
+ input_width, target_width,
+ phase_step_correction,
+ ipu3_css_downscale_2taps,
+ IMGU_SCALER_DOWNSCALE_2TAPS_LEN,
+ cfg->scaler_coeffs_chroma,
+ info_chroma);
+
+ out_width = ipu3_css_scaler_calc_scaled_output(input_width,
+ info_luma);
+ out_height = ipu3_css_scaler_calc_scaled_output(input_height,
+ info_luma);
+ } while ((out_width < target_width || out_height < target_height ||
+ !IS_ALIGNED(out_height, height_alignment)) &&
+ phase_step_correction <= 5);
+
+ *output_width = out_width;
+ *output_height = out_height;
+}
+
+/********************** Osys routines for scaler****************************/
+
+static void ipu3_css_osys_set_format(enum imgu_abi_frame_format host_format,
+ unsigned int *osys_format,
+ unsigned int *osys_tiling)
+{
+ *osys_format = IMGU_ABI_OSYS_FORMAT_YUV420;
+ *osys_tiling = IMGU_ABI_OSYS_TILING_NONE;
+
+ switch (host_format) {
+ case IMGU_ABI_FRAME_FORMAT_YUV420:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_YUV420;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_YV12:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_YV12;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV12:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV12;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV16:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV16;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV21:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV21;
+ break;
+ case IMGU_ABI_FRAME_FORMAT_NV12_TILEY:
+ *osys_format = IMGU_ABI_OSYS_FORMAT_NV12;
+ *osys_tiling = IMGU_ABI_OSYS_TILING_Y;
+ break;
+ default:
+ /* For now, assume use default values */
+ break;
+ }
+}
+
+/*
+ * Function calculates input frame stripe offset, based
+ * on output frame stripe offset and filter parameters.
+ */
+static int ipu3_css_osys_calc_stripe_offset(int stripe_offset_out,
+ int fir_phases, int phase_init,
+ int phase_step, int pad_left)
+{
+ int stripe_offset_inp = stripe_offset_out * fir_phases -
+ pad_left * phase_step;
+
+ return DIV_ROUND_UP(stripe_offset_inp - phase_init, phase_step);
+}
+
+/*
+ * Calculate input frame phase, given the output frame
+ * stripe offset and filter parameters
+ */
+static int ipu3_css_osys_calc_stripe_phase_init(int stripe_offset_out,
+ int fir_phases, int phase_init,
+ int phase_step, int pad_left)
+{
+ int stripe_offset_inp =
+ ipu3_css_osys_calc_stripe_offset(stripe_offset_out,
+ fir_phases, phase_init,
+ phase_step, pad_left);
+
+ return phase_init + ((pad_left + stripe_offset_inp) * phase_step) -
+ stripe_offset_out * fir_phases;
+}
+
+/*
+ * This function calculates input frame stripe width,
+ * based on output frame stripe offset and filter parameters
+ */
+static int ipu3_css_osys_calc_inp_stripe_width(int stripe_width_out,
+ int fir_phases, int phase_init,
+ int phase_step, int fir_taps,
+ int pad_left, int pad_right)
+{
+ int stripe_width_inp = (stripe_width_out + fir_taps - 1) * fir_phases;
+
+ stripe_width_inp = DIV_ROUND_UP(stripe_width_inp - phase_init,
+ phase_step);
+
+ return stripe_width_inp - pad_left - pad_right;
+}
+
+/*
+ * This function calculates output frame stripe width, basedi
+ * on output frame stripe offset and filter parameters
+ */
+static int ipu3_css_osys_out_stripe_width(int stripe_width_inp, int fir_phases,
+ int phase_init, int phase_step,
+ int fir_taps, int pad_left,
+ int pad_right, int column_offset)
+{
+ int stripe_width_out = (pad_left + stripe_width_inp +
+ pad_right - column_offset) * phase_step;
+
+ stripe_width_out = (stripe_width_out + phase_init) / fir_phases;
+
+ return stripe_width_out - (fir_taps - 1);
+}
+
+struct ipu3_css_reso {
+ unsigned int input_width;
+ unsigned int input_height;
+ enum imgu_abi_frame_format input_format;
+ unsigned int pin_width[IMGU_ABI_OSYS_PINS];
+ unsigned int pin_height[IMGU_ABI_OSYS_PINS];
+ unsigned int pin_stride[IMGU_ABI_OSYS_PINS];
+ enum imgu_abi_frame_format pin_format[IMGU_ABI_OSYS_PINS];
+ int chunk_width;
+ int chunk_height;
+ int block_height;
+ int block_width;
+};
+
+struct ipu3_css_frame_params {
+ /* Output pins */
+ unsigned int enable;
+ unsigned int format;
+ unsigned int flip;
+ unsigned int mirror;
+ unsigned int tiling;
+ unsigned int reduce_range;
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int scaled;
+ unsigned int crop_left;
+ unsigned int crop_top;
+};
+
+struct ipu3_css_stripe_params {
+ unsigned int processing_mode;
+ unsigned int phase_step;
+ unsigned int exp_shift;
+ unsigned int phase_init_left_y;
+ unsigned int phase_init_left_uv;
+ unsigned int phase_init_top_y;
+ unsigned int phase_init_top_uv;
+ unsigned int pad_left_y;
+ unsigned int pad_left_uv;
+ unsigned int pad_right_y;
+ unsigned int pad_right_uv;
+ unsigned int pad_top_y;
+ unsigned int pad_top_uv;
+ unsigned int pad_bottom_y;
+ unsigned int pad_bottom_uv;
+ unsigned int crop_left_y;
+ unsigned int crop_top_y;
+ unsigned int crop_left_uv;
+ unsigned int crop_top_uv;
+ unsigned int start_column_y;
+ unsigned int start_column_uv;
+ unsigned int chunk_width;
+ unsigned int chunk_height;
+ unsigned int block_width;
+ unsigned int block_height;
+ unsigned int input_width;
+ unsigned int input_height;
+ int output_width[IMGU_ABI_OSYS_PINS];
+ int output_height[IMGU_ABI_OSYS_PINS];
+ int output_offset[IMGU_ABI_OSYS_PINS];
+};
+
+/*
+ * frame_params - size IMGU_ABI_OSYS_PINS
+ * stripe_params - size IPU3_UAPI_MAX_STRIPES
+ */
+static int ipu3_css_osys_calc_frame_and_stripe_params(
+ struct ipu3_css *css, unsigned int stripes,
+ struct imgu_abi_osys_config *osys,
+ struct ipu3_css_scaler_info *scaler_luma,
+ struct ipu3_css_scaler_info *scaler_chroma,
+ struct ipu3_css_frame_params frame_params[],
+ struct ipu3_css_stripe_params stripe_params[],
+ unsigned int pipe)
+{
+ struct ipu3_css_reso reso;
+ unsigned int output_width, pin, s;
+ u32 input_width, input_height, target_width, target_height;
+ unsigned int procmode = 0;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+ input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+ target_width = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ target_height = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+
+ /* Frame parameters */
+
+ /* Input width for Output System is output width of DVS (with GDC) */
+ reso.input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+
+ /* Input height for Output System is output height of DVS (with GDC) */
+ reso.input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+
+ reso.input_format =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+
+ reso.pin_width[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ reso.pin_height[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ reso.pin_stride[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ reso.pin_format[IMGU_ABI_OSYS_PIN_OUT] =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+
+ reso.pin_width[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ reso.pin_height[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ reso.pin_stride[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ reso.pin_format[IMGU_ABI_OSYS_PIN_VF] =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+
+ /* Configure the frame parameters for all output pins */
+
+ frame_params[IMGU_ABI_OSYS_PIN_OUT].width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ frame_params[IMGU_ABI_OSYS_PIN_OUT].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top = 0;
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left = 0;
+
+ for (pin = 0; pin < IMGU_ABI_OSYS_PINS; pin++) {
+ int enable = 0;
+ int scaled = 0;
+ unsigned int format = 0;
+ unsigned int tiling = 0;
+
+ frame_params[pin].flip = 0;
+ frame_params[pin].mirror = 0;
+ frame_params[pin].reduce_range = 0;
+ if (reso.pin_width[pin] != 0 && reso.pin_height[pin] != 0) {
+ enable = 1;
+ if (pin == IMGU_ABI_OSYS_PIN_OUT) {
+ if (reso.input_width < reso.pin_width[pin] ||
+ reso.input_height < reso.pin_height[pin])
+ return -EINVAL;
+ /*
+ * When input and output resolution is
+ * different instead of scaling, cropping
+ * should happen. Determine the crop factor
+ * to do the symmetric cropping
+ */
+ frame_params[pin].crop_left = roundclosest_down(
+ (reso.input_width -
+ reso.pin_width[pin]) / 2,
+ IMGU_OSYS_DMA_CROP_W_LIMIT);
+ frame_params[pin].crop_top = roundclosest_down(
+ (reso.input_height -
+ reso.pin_height[pin]) / 2,
+ IMGU_OSYS_DMA_CROP_H_LIMIT);
+ } else {
+ if (reso.pin_width[pin] != reso.input_width ||
+ reso.pin_height[pin] != reso.input_height) {
+ /*
+ * If resolution is different at input
+ * and output of OSYS, scaling is
+ * considered except when pin is MAIN.
+ * Later it will be decide whether
+ * scaler factor is 1 or other
+ * and cropping has to be done or not.
+ */
+ scaled = 1;
+ }
+ }
+ ipu3_css_osys_set_format(reso.pin_format[pin], &format,
+ &tiling);
+ } else {
+ enable = 0;
+ }
+ frame_params[pin].enable = enable;
+ frame_params[pin].format = format;
+ frame_params[pin].tiling = tiling;
+ frame_params[pin].stride = reso.pin_stride[pin];
+ frame_params[pin].scaled = scaled;
+ }
+
+ ipu3_css_scaler_calc(input_width, input_height, target_width,
+ target_height, osys, scaler_luma, scaler_chroma,
+ &reso.pin_width[IMGU_ABI_OSYS_PIN_VF],
+ &reso.pin_height[IMGU_ABI_OSYS_PIN_VF], &procmode);
+ dev_dbg(css->dev, "osys scaler procmode is %u", procmode);
+ output_width = reso.pin_width[IMGU_ABI_OSYS_PIN_VF];
+
+ if (output_width < reso.input_width / 2) {
+ /* Scaling factor <= 0.5 */
+ reso.chunk_width = IMGU_OSYS_BLOCK_WIDTH;
+ reso.block_width = IMGU_OSYS_BLOCK_WIDTH;
+ } else { /* 0.5 <= Scaling factor <= 1.0 */
+ reso.chunk_width = IMGU_OSYS_BLOCK_WIDTH / 2;
+ reso.block_width = IMGU_OSYS_BLOCK_WIDTH;
+ }
+
+ if (output_width <= reso.input_width * 7 / 8) {
+ /* Scaling factor <= 0.875 */
+ reso.chunk_height = IMGU_OSYS_BLOCK_HEIGHT;
+ reso.block_height = IMGU_OSYS_BLOCK_HEIGHT;
+ } else { /* 1.0 <= Scaling factor <= 1.75 */
+ reso.chunk_height = IMGU_OSYS_BLOCK_HEIGHT / 2;
+ reso.block_height = IMGU_OSYS_BLOCK_HEIGHT;
+ }
+
+ /*
+ * Calculate scaler configuration parameters based on input and output
+ * resolution.
+ */
+
+ if (frame_params[IMGU_ABI_OSYS_PIN_VF].enable) {
+ /*
+ * When aspect ratio is different between target resolution and
+ * required resolution, determine the crop factor to do
+ * symmetric cropping
+ */
+ u32 w = reso.pin_width[IMGU_ABI_OSYS_PIN_VF] -
+ frame_params[IMGU_ABI_OSYS_PIN_VF].width;
+ u32 h = reso.pin_height[IMGU_ABI_OSYS_PIN_VF] -
+ frame_params[IMGU_ABI_OSYS_PIN_VF].height;
+
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left =
+ roundclosest_down(w / 2, IMGU_OSYS_DMA_CROP_W_LIMIT);
+ frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top =
+ roundclosest_down(h / 2, IMGU_OSYS_DMA_CROP_H_LIMIT);
+
+ if (reso.input_height % 4 || reso.input_width % 8) {
+ dev_err(css->dev, "OSYS input width is not multiple of 8 or\n");
+ dev_err(css->dev, "height is not multiple of 4\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Stripe parameters */
+
+ if (frame_params[IMGU_ABI_OSYS_PIN_VF].enable) {
+ output_width = reso.pin_width[IMGU_ABI_OSYS_PIN_VF];
+ } else {
+ /*
+ * in case scaler output is not enabled
+ * take output width as input width since
+ * there is no scaling at main pin.
+ * Due to the fact that main pin can be different
+ * from input resolution to osys in the case of cropping,
+ * main pin resolution is not taken.
+ */
+ output_width = reso.input_width;
+ }
+
+ for (s = 0; s < stripes; s++) {
+ int stripe_offset_inp_y = 0;
+ int stripe_offset_inp_uv = 0;
+ int stripe_offset_out_y = 0;
+ int stripe_offset_out_uv = 0;
+ int stripe_phase_init_y = scaler_luma->phase_init;
+ int stripe_phase_init_uv = scaler_chroma->phase_init;
+ int stripe_offset_blk_y = 0;
+ int stripe_offset_blk_uv = 0;
+ int stripe_offset_col_y = 0;
+ int stripe_offset_col_uv = 0;
+ int stripe_pad_left_y = scaler_luma->pad_left;
+ int stripe_pad_left_uv = scaler_chroma->pad_left;
+ int stripe_pad_right_y = scaler_luma->pad_right;
+ int stripe_pad_right_uv = scaler_chroma->pad_right;
+ int stripe_crop_left_y = scaler_luma->crop_left;
+ int stripe_crop_left_uv = scaler_chroma->crop_left;
+ int stripe_input_width_y = reso.input_width;
+ int stripe_input_width_uv = 0;
+ int stripe_output_width_y = output_width;
+ int stripe_output_width_uv = 0;
+ int chunk_floor_y = 0;
+ int chunk_floor_uv = 0;
+ int chunk_ceil_uv = 0;
+
+ if (stripes > 1) {
+ if (s > 0) {
+ /* Calculate stripe offsets */
+ stripe_offset_out_y =
+ output_width * s / stripes;
+ stripe_offset_out_y =
+ rounddown(stripe_offset_out_y,
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ stripe_offset_out_uv = stripe_offset_out_y /
+ IMGU_LUMA_TO_CHROMA_RATIO;
+ stripe_offset_inp_y =
+ ipu3_css_osys_calc_stripe_offset(
+ stripe_offset_out_y,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_luma->phase_init,
+ scaler_luma->phase_step,
+ scaler_luma->pad_left);
+ stripe_offset_inp_uv =
+ ipu3_css_osys_calc_stripe_offset(
+ stripe_offset_out_uv,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_chroma->phase_init,
+ scaler_chroma->phase_step,
+ scaler_chroma->pad_left);
+
+ /* Calculate stripe phase init */
+ stripe_phase_init_y =
+ ipu3_css_osys_calc_stripe_phase_init(
+ stripe_offset_out_y,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_luma->phase_init,
+ scaler_luma->phase_step,
+ scaler_luma->pad_left);
+ stripe_phase_init_uv =
+ ipu3_css_osys_calc_stripe_phase_init(
+ stripe_offset_out_uv,
+ IMGU_OSYS_FIR_PHASES,
+ scaler_chroma->phase_init,
+ scaler_chroma->phase_step,
+ scaler_chroma->pad_left);
+
+ /*
+ * Chunk boundary corner case - luma and chroma
+ * start from different input chunks.
+ */
+ chunk_floor_y = rounddown(stripe_offset_inp_y,
+ reso.chunk_width);
+ chunk_floor_uv =
+ rounddown(stripe_offset_inp_uv,
+ reso.chunk_width /
+ IMGU_LUMA_TO_CHROMA_RATIO);
+
+ if (chunk_floor_y != chunk_floor_uv *
+ IMGU_LUMA_TO_CHROMA_RATIO) {
+ /*
+ * Match starting luma/chroma chunks.
+ * Decrease offset for UV and add output
+ * cropping.
+ */
+ stripe_offset_inp_uv -= 1;
+ stripe_crop_left_uv += 1;
+ stripe_phase_init_uv -=
+ scaler_luma->phase_step;
+ if (stripe_phase_init_uv < 0)
+ stripe_phase_init_uv =
+ stripe_phase_init_uv +
+ IMGU_OSYS_FIR_PHASES;
+ }
+ /*
+ * FW workaround for a HW bug: if the first
+ * chroma pixel is generated exactly at the end
+ * of chunck scaler HW may not output the pixel
+ * for downscale factors smaller than 1.5
+ * (timing issue).
+ */
+ chunk_ceil_uv =
+ roundup(stripe_offset_inp_uv,
+ reso.chunk_width /
+ IMGU_LUMA_TO_CHROMA_RATIO);
+
+ if (stripe_offset_inp_uv ==
+ chunk_ceil_uv - IMGU_OSYS_TAPS_UV) {
+ /*
+ * Decrease input offset and add
+ * output cropping
+ */
+ stripe_offset_inp_uv -= 1;
+ stripe_phase_init_uv -=
+ scaler_luma->phase_step;
+ if (stripe_phase_init_uv < 0) {
+ stripe_phase_init_uv +=
+ IMGU_OSYS_FIR_PHASES;
+ stripe_crop_left_uv += 1;
+ }
+ }
+
+ /*
+ * Calculate block and column offsets for the
+ * input stripe
+ */
+ stripe_offset_blk_y =
+ rounddown(stripe_offset_inp_y,
+ IMGU_INPUT_BLOCK_WIDTH);
+ stripe_offset_blk_uv =
+ rounddown(stripe_offset_inp_uv,
+ IMGU_INPUT_BLOCK_WIDTH /
+ IMGU_LUMA_TO_CHROMA_RATIO);
+ stripe_offset_col_y = stripe_offset_inp_y -
+ stripe_offset_blk_y;
+ stripe_offset_col_uv = stripe_offset_inp_uv -
+ stripe_offset_blk_uv;
+
+ /* Left padding is only for the first stripe */
+ stripe_pad_left_y = 0;
+ stripe_pad_left_uv = 0;
+ }
+
+ /* Right padding is only for the last stripe */
+ if (s < stripes - 1) {
+ int next_offset;
+
+ stripe_pad_right_y = 0;
+ stripe_pad_right_uv = 0;
+
+ next_offset = output_width * (s + 1) / stripes;
+ next_offset = rounddown(next_offset, 64);
+ stripe_output_width_y = next_offset -
+ stripe_offset_out_y;
+ } else {
+ stripe_output_width_y = output_width -
+ stripe_offset_out_y;
+ }
+
+ /* Calculate target output stripe width */
+ stripe_output_width_uv = stripe_output_width_y /
+ IMGU_LUMA_TO_CHROMA_RATIO;
+ /* Calculate input stripe width */
+ stripe_input_width_y = stripe_offset_col_y +
+ ipu3_css_osys_calc_inp_stripe_width(
+ stripe_output_width_y,
+ IMGU_OSYS_FIR_PHASES,
+ stripe_phase_init_y,
+ scaler_luma->phase_step,
+ IMGU_OSYS_TAPS_Y,
+ stripe_pad_left_y,
+ stripe_pad_right_y);
+
+ stripe_input_width_uv = stripe_offset_col_uv +
+ ipu3_css_osys_calc_inp_stripe_width(
+ stripe_output_width_uv,
+ IMGU_OSYS_FIR_PHASES,
+ stripe_phase_init_uv,
+ scaler_chroma->phase_step,
+ IMGU_OSYS_TAPS_UV,
+ stripe_pad_left_uv,
+ stripe_pad_right_uv);
+
+ stripe_input_width_uv = max(DIV_ROUND_UP(
+ stripe_input_width_y,
+ IMGU_LUMA_TO_CHROMA_RATIO),
+ stripe_input_width_uv);
+
+ stripe_input_width_y = stripe_input_width_uv *
+ IMGU_LUMA_TO_CHROMA_RATIO;
+
+ if (s >= stripes - 1) {
+ stripe_input_width_y = reso.input_width -
+ stripe_offset_blk_y;
+ /*
+ * The scaler requires that the last stripe
+ * spans at least two input blocks.
+ */
+ }
+
+ /*
+ * Spec: input stripe width must be a multiple of 8.
+ * Increase the input width and recalculate the output
+ * width. This may produce an extra column of junk
+ * blocks which will be overwritten by the
+ * next stripe.
+ */
+ stripe_input_width_y = ALIGN(stripe_input_width_y, 8);
+ stripe_output_width_y =
+ ipu3_css_osys_out_stripe_width(
+ stripe_input_width_y,
+ IMGU_OSYS_FIR_PHASES,
+ stripe_phase_init_y,
+ scaler_luma->phase_step,
+ IMGU_OSYS_TAPS_Y,
+ stripe_pad_left_y,
+ stripe_pad_right_y,
+ stripe_offset_col_y);
+
+ stripe_output_width_y =
+ rounddown(stripe_output_width_y,
+ IMGU_LUMA_TO_CHROMA_RATIO);
+ }
+ /*
+ * Following section executes and process parameters
+ * for both cases - Striping or No Striping.
+ */
+ {
+ unsigned int i;
+ int pin_scale = 0;
+ /*Input resolution */
+
+ stripe_params[s].input_width = stripe_input_width_y;
+ stripe_params[s].input_height = reso.input_height;
+
+ for (i = 0; i < IMGU_ABI_OSYS_PINS; i++) {
+ if (frame_params[i].scaled) {
+ /*
+ * Output stripe resolution and offset
+ * as produced by the scaler; actual
+ * output resolution may be slightly
+ * smaller.
+ */
+ stripe_params[s].output_width[i] =
+ stripe_output_width_y;
+ stripe_params[s].output_height[i] =
+ reso.pin_height[i];
+ stripe_params[s].output_offset[i] =
+ stripe_offset_out_y;
+
+ pin_scale += frame_params[i].scaled;
+ } else {
+ /* Unscaled pin */
+ stripe_params[s].output_width[i] =
+ stripe_params[s].input_width;
+ stripe_params[s].output_height[i] =
+ stripe_params[s].input_height;
+ stripe_params[s].output_offset[i] =
+ stripe_offset_blk_y;
+ }
+ }
+
+ /* If no pin use scale, we use BYPASS mode */
+ stripe_params[s].processing_mode = procmode;
+ stripe_params[s].phase_step = scaler_luma->phase_step;
+ stripe_params[s].exp_shift = scaler_luma->exp_shift;
+ stripe_params[s].phase_init_left_y =
+ stripe_phase_init_y;
+ stripe_params[s].phase_init_left_uv =
+ stripe_phase_init_uv;
+ stripe_params[s].phase_init_top_y =
+ scaler_luma->phase_init;
+ stripe_params[s].phase_init_top_uv =
+ scaler_chroma->phase_init;
+ stripe_params[s].pad_left_y = stripe_pad_left_y;
+ stripe_params[s].pad_left_uv = stripe_pad_left_uv;
+ stripe_params[s].pad_right_y = stripe_pad_right_y;
+ stripe_params[s].pad_right_uv = stripe_pad_right_uv;
+ stripe_params[s].pad_top_y = scaler_luma->pad_left;
+ stripe_params[s].pad_top_uv = scaler_chroma->pad_left;
+ stripe_params[s].pad_bottom_y = scaler_luma->pad_right;
+ stripe_params[s].pad_bottom_uv =
+ scaler_chroma->pad_right;
+ stripe_params[s].crop_left_y = stripe_crop_left_y;
+ stripe_params[s].crop_top_y = scaler_luma->crop_top;
+ stripe_params[s].crop_left_uv = stripe_crop_left_uv;
+ stripe_params[s].crop_top_uv = scaler_chroma->crop_top;
+ stripe_params[s].start_column_y = stripe_offset_col_y;
+ stripe_params[s].start_column_uv = stripe_offset_col_uv;
+ stripe_params[s].chunk_width = reso.chunk_width;
+ stripe_params[s].chunk_height = reso.chunk_height;
+ stripe_params[s].block_width = reso.block_width;
+ stripe_params[s].block_height = reso.block_height;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * This function configures the Output Formatter System, given the number of
+ * stripes, scaler luma and chrome parameters
+ */
+static int ipu3_css_osys_calc(struct ipu3_css *css, unsigned int pipe,
+ unsigned int stripes,
+ struct imgu_abi_osys_config *osys,
+ struct ipu3_css_scaler_info *scaler_luma,
+ struct ipu3_css_scaler_info *scaler_chroma,
+ struct imgu_abi_stripes block_stripes[])
+{
+ struct ipu3_css_frame_params frame_params[IMGU_ABI_OSYS_PINS];
+ struct ipu3_css_stripe_params stripe_params[IPU3_UAPI_MAX_STRIPES];
+ struct imgu_abi_osys_formatter_params *param;
+ unsigned int pin, s;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ memset(osys, 0, sizeof(*osys));
+
+ /* Compute the frame and stripe params */
+ if (ipu3_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
+ scaler_luma,
+ scaler_chroma,
+ frame_params,
+ stripe_params, pipe))
+ return -EINVAL;
+
+ /* Output formatter system parameters */
+
+ for (s = 0; s < stripes; s++) {
+ struct imgu_abi_osys_scaler_params *scaler =
+ &osys->scaler[s].param;
+ int fifo_addr_fmt = IMGU_FIFO_ADDR_SCALER_TO_FMT;
+ int fifo_addr_ack = IMGU_FIFO_ADDR_SCALER_TO_SP;
+
+ /* OUTPUT 0 / PIN 0 is only Scaler output */
+ scaler->inp_buf_y_st_addr = IMGU_VMEM1_INP_BUF_ADDR;
+
+ /*
+ * = (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
+ * = (2 * IPU3_UAPI_ISP_VEC_ELEMS) /
+ * (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
+ * = 2 * 64 / 32 = 4
+ */
+ scaler->inp_buf_y_line_stride = IMGU_VMEM1_Y_STRIDE;
+ /*
+ * = (IMGU_VMEM1_V_OFFSET + VMEM1_uv_size)
+ * = (IMGU_VMEM1_U_OFFSET + VMEM1_uv_size) +
+ * (VMEM1_y_size / 4)
+ * = (VMEM1_y_size) + (VMEM1_y_size / 4) +
+ * (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)/4
+ * = (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
+ */
+ scaler->inp_buf_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->inp_buf_u_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ scaler->inp_buf_v_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ scaler->inp_buf_uv_line_stride = IMGU_VMEM1_UV_STRIDE;
+ scaler->inp_buf_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->inp_buf_chunk_width = stripe_params[s].chunk_width;
+ scaler->inp_buf_nr_buffers = IMGU_OSYS_NUM_INPUT_BUFFERS;
+
+ /* Output buffers */
+ scaler->out_buf_y_st_addr = IMGU_VMEM1_INT_BUF_ADDR;
+ scaler->out_buf_y_line_stride = stripe_params[s].block_width /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ scaler->out_buf_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->out_buf_u_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ scaler->out_buf_v_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ scaler->out_buf_uv_line_stride = stripe_params[s].block_width /
+ IMGU_VMEM1_ELEMS_PER_VEC / 2;
+ scaler->out_buf_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ scaler->out_buf_nr_buffers = IMGU_OSYS_NUM_INTERM_BUFFERS;
+
+ /* Intermediate buffers */
+ scaler->int_buf_y_st_addr = IMGU_VMEM2_BUF_Y_ADDR;
+ scaler->int_buf_y_line_stride = IMGU_VMEM2_BUF_Y_STRIDE;
+ scaler->int_buf_u_st_addr = IMGU_VMEM2_BUF_U_ADDR;
+ scaler->int_buf_v_st_addr = IMGU_VMEM2_BUF_V_ADDR;
+ scaler->int_buf_uv_line_stride = IMGU_VMEM2_BUF_UV_STRIDE;
+ scaler->int_buf_height = IMGU_VMEM2_LINES_PER_BLOCK;
+ scaler->int_buf_chunk_width = stripe_params[s].chunk_height;
+ scaler->int_buf_chunk_height = stripe_params[s].block_width;
+
+ /* Context buffers */
+ scaler->ctx_buf_hor_y_st_addr = IMGU_VMEM3_HOR_Y_ADDR;
+ scaler->ctx_buf_hor_u_st_addr = IMGU_VMEM3_HOR_U_ADDR;
+ scaler->ctx_buf_hor_v_st_addr = IMGU_VMEM3_HOR_V_ADDR;
+ scaler->ctx_buf_ver_y_st_addr = IMGU_VMEM3_VER_Y_ADDR;
+ scaler->ctx_buf_ver_u_st_addr = IMGU_VMEM3_VER_U_ADDR;
+ scaler->ctx_buf_ver_v_st_addr = IMGU_VMEM3_VER_V_ADDR;
+
+ /* Addresses for release-input and process-output tokens */
+ scaler->release_inp_buf_addr = fifo_addr_ack;
+ scaler->release_inp_buf_en = 1;
+ scaler->release_out_buf_en = 1;
+ scaler->process_out_buf_addr = fifo_addr_fmt;
+
+ /* Settings dimensions, padding, cropping */
+ scaler->input_image_y_width = stripe_params[s].input_width;
+ scaler->input_image_y_height = stripe_params[s].input_height;
+ scaler->input_image_y_start_column =
+ stripe_params[s].start_column_y;
+ scaler->input_image_uv_start_column =
+ stripe_params[s].start_column_uv;
+ scaler->input_image_y_left_pad = stripe_params[s].pad_left_y;
+ scaler->input_image_uv_left_pad = stripe_params[s].pad_left_uv;
+ scaler->input_image_y_right_pad = stripe_params[s].pad_right_y;
+ scaler->input_image_uv_right_pad =
+ stripe_params[s].pad_right_uv;
+ scaler->input_image_y_top_pad = stripe_params[s].pad_top_y;
+ scaler->input_image_uv_top_pad = stripe_params[s].pad_top_uv;
+ scaler->input_image_y_bottom_pad =
+ stripe_params[s].pad_bottom_y;
+ scaler->input_image_uv_bottom_pad =
+ stripe_params[s].pad_bottom_uv;
+ scaler->processing_mode = stripe_params[s].processing_mode;
+ scaler->scaling_ratio = stripe_params[s].phase_step;
+ scaler->y_left_phase_init = stripe_params[s].phase_init_left_y;
+ scaler->uv_left_phase_init =
+ stripe_params[s].phase_init_left_uv;
+ scaler->y_top_phase_init = stripe_params[s].phase_init_top_y;
+ scaler->uv_top_phase_init = stripe_params[s].phase_init_top_uv;
+ scaler->coeffs_exp_shift = stripe_params[s].exp_shift;
+ scaler->out_y_left_crop = stripe_params[s].crop_left_y;
+ scaler->out_uv_left_crop = stripe_params[s].crop_left_uv;
+ scaler->out_y_top_crop = stripe_params[s].crop_top_y;
+ scaler->out_uv_top_crop = stripe_params[s].crop_top_uv;
+
+ for (pin = 0; pin < IMGU_ABI_OSYS_PINS; pin++) {
+ int in_fifo_addr;
+ int out_fifo_addr;
+ int block_width_vecs;
+ int input_width_s;
+ int input_width_vecs;
+ int input_buf_y_st_addr;
+ int input_buf_u_st_addr;
+ int input_buf_v_st_addr;
+ int input_buf_y_line_stride;
+ int input_buf_uv_line_stride;
+ int output_buf_y_line_stride;
+ int output_buf_uv_line_stride;
+ int output_buf_nr_y_lines;
+ int block_height;
+ int block_width;
+ struct imgu_abi_osys_frame_params *fr_pr;
+
+ fr_pr = &osys->frame[pin].param;
+
+ /* Frame parameters */
+ fr_pr->enable = frame_params[pin].enable;
+ fr_pr->format = frame_params[pin].format;
+ fr_pr->mirror = frame_params[pin].mirror;
+ fr_pr->flip = frame_params[pin].flip;
+ fr_pr->tiling = frame_params[pin].tiling;
+ fr_pr->width = frame_params[pin].width;
+ fr_pr->height = frame_params[pin].height;
+ fr_pr->stride = frame_params[pin].stride;
+ fr_pr->scaled = frame_params[pin].scaled;
+
+ /* Stripe parameters */
+ osys->stripe[s].crop_top[pin] =
+ frame_params[pin].crop_top;
+ osys->stripe[s].input_width =
+ stripe_params[s].input_width;
+ osys->stripe[s].input_height =
+ stripe_params[s].input_height;
+ osys->stripe[s].block_height =
+ stripe_params[s].block_height;
+ osys->stripe[s].block_width =
+ stripe_params[s].block_width;
+ osys->stripe[s].output_width[pin] =
+ stripe_params[s].output_width[pin];
+ osys->stripe[s].output_height[pin] =
+ stripe_params[s].output_height[pin];
+
+ if (s == 0) {
+ /* Only first stripe should do left cropping */
+ osys->stripe[s].crop_left[pin] =
+ frame_params[pin].crop_left;
+ osys->stripe[s].output_offset[pin] =
+ stripe_params[s].output_offset[pin];
+ } else {
+ /*
+ * Stripe offset for other strips should be
+ * adjusted according to the cropping done
+ * at the first strip
+ */
+ osys->stripe[s].crop_left[pin] = 0;
+ osys->stripe[s].output_offset[pin] =
+ (stripe_params[s].output_offset[pin] -
+ osys->stripe[0].crop_left[pin]);
+ }
+
+ if (!frame_params[pin].enable)
+ continue;
+
+ /* Formatter: configurations */
+
+ /*
+ * Get the dimensions of the input blocks of the
+ * formatter, which is the same as the output
+ * blocks of the scaler.
+ */
+ if (frame_params[pin].scaled) {
+ block_height = stripe_params[s].block_height;
+ block_width = stripe_params[s].block_width;
+ } else {
+ block_height = IMGU_OSYS_BLOCK_HEIGHT;
+ block_width = IMGU_OSYS_BLOCK_WIDTH;
+ }
+ block_width_vecs =
+ block_width / IMGU_VMEM1_ELEMS_PER_VEC;
+ /*
+ * The input/output line stride depends on the
+ * block size.
+ */
+ input_buf_y_line_stride = block_width_vecs;
+ input_buf_uv_line_stride = block_width_vecs / 2;
+ output_buf_y_line_stride = block_width_vecs;
+ output_buf_uv_line_stride = block_width_vecs / 2;
+ output_buf_nr_y_lines = block_height;
+ if (frame_params[pin].format ==
+ IMGU_ABI_OSYS_FORMAT_NV12 ||
+ frame_params[pin].format ==
+ IMGU_ABI_OSYS_FORMAT_NV21)
+ output_buf_uv_line_stride =
+ output_buf_y_line_stride;
+
+ /*
+ * Tiled outputs use a different output buffer
+ * configuration. The input (= scaler output) block
+ * width translates to a tile height, and the block
+ * height to the tile width. The default block size of
+ * 128x32 maps exactly onto a 4kB tile (512x8) for Y.
+ * For UV, the tile width is always half.
+ */
+ if (frame_params[pin].tiling) {
+ output_buf_nr_y_lines = 8;
+ output_buf_y_line_stride = 512 /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ output_buf_uv_line_stride = 256 /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ }
+
+ /*
+ * Store the output buffer line stride. Will be
+ * used to compute buffer offsets in boundary
+ * conditions when output blocks are partially
+ * outside the image.
+ */
+ osys->stripe[s].buf_stride[pin] =
+ output_buf_y_line_stride *
+ IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS;
+ if (frame_params[pin].scaled) {
+ /*
+ * The input buffs are the intermediate
+ * buffers (scalers' output)
+ */
+ input_buf_y_st_addr = IMGU_VMEM1_INT_BUF_ADDR;
+ input_buf_u_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ input_buf_v_st_addr = IMGU_VMEM1_INT_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ } else {
+ /*
+ * The input bufferss are the buffers
+ * filled by the SP
+ */
+ input_buf_y_st_addr = IMGU_VMEM1_INP_BUF_ADDR;
+ input_buf_u_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_U_OFFSET;
+ input_buf_v_st_addr = IMGU_VMEM1_INP_BUF_ADDR +
+ IMGU_VMEM1_V_OFFSET;
+ }
+
+ /*
+ * The formatter input width must be rounded to
+ * the block width. Otherwise the formatter will
+ * not recognize the end of the line, resulting
+ * in incorrect tiling (system may hang!) and
+ * possibly other problems.
+ */
+ input_width_s =
+ roundup(stripe_params[s].output_width[pin],
+ block_width);
+ input_width_vecs = input_width_s /
+ IMGU_VMEM1_ELEMS_PER_VEC;
+ out_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SP;
+ /*
+ * Process-output tokens must be sent to the SP.
+ * When scaling, the release-input tokens can be
+ * sent directly to the scaler, otherwise the
+ * formatter should send them to the SP.
+ */
+ if (frame_params[pin].scaled)
+ in_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SCALER;
+ else
+ in_fifo_addr = IMGU_FIFO_ADDR_FMT_TO_SP;
+
+ /* Formatter */
+ param = &osys->formatter[s][pin].param;
+
+ param->format = frame_params[pin].format;
+ param->flip = frame_params[pin].flip;
+ param->mirror = frame_params[pin].mirror;
+ param->tiling = frame_params[pin].tiling;
+ param->reduce_range = frame_params[pin].reduce_range;
+ param->alpha_blending = 0;
+ param->release_inp_addr = in_fifo_addr;
+ param->release_inp_en = 1;
+ param->process_out_buf_addr = out_fifo_addr;
+ param->image_width_vecs = input_width_vecs;
+ param->image_height_lines =
+ stripe_params[s].output_height[pin];
+ param->inp_buff_y_st_addr = input_buf_y_st_addr;
+ param->inp_buff_y_line_stride = input_buf_y_line_stride;
+ param->inp_buff_y_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ param->int_buff_u_st_addr = input_buf_u_st_addr;
+ param->int_buff_v_st_addr = input_buf_v_st_addr;
+ param->inp_buff_uv_line_stride =
+ input_buf_uv_line_stride;
+ param->inp_buff_uv_buffer_stride = IMGU_VMEM1_BUF_SIZE;
+ param->out_buff_level = 0;
+ param->out_buff_nr_y_lines = output_buf_nr_y_lines;
+ param->out_buff_u_st_offset = IMGU_VMEM1_U_OFFSET;
+ param->out_buff_v_st_offset = IMGU_VMEM1_V_OFFSET;
+ param->out_buff_y_line_stride =
+ output_buf_y_line_stride;
+ param->out_buff_uv_line_stride =
+ output_buf_uv_line_stride;
+ param->hist_buff_st_addr = IMGU_VMEM1_HST_BUF_ADDR;
+ param->hist_buff_line_stride =
+ IMGU_VMEM1_HST_BUF_STRIDE;
+ param->hist_buff_nr_lines = IMGU_VMEM1_HST_BUF_NLINES;
+ }
+ }
+
+ block_stripes[0].offset = 0;
+ if (stripes <= 1) {
+ block_stripes[0].width = stripe_params[0].input_width;
+ block_stripes[0].height = stripe_params[0].input_height;
+ } else {
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ unsigned int sp_block_width =
+ bi->info.isp.sp.block.block_width *
+ IPU3_UAPI_ISP_VEC_ELEMS;
+
+ block_stripes[0].width = roundup(stripe_params[0].input_width,
+ sp_block_width);
+ block_stripes[1].offset =
+ rounddown(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
+ stripe_params[1].input_width, sp_block_width);
+ block_stripes[1].width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
+ block_stripes[1].offset, sp_block_width);
+ block_stripes[0].height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+ block_stripes[1].height = block_stripes[0].height;
+ }
+
+ return 0;
+}
+
+/*********************** Mostly 3A operations ******************************/
+
+/*
+ * This function creates a "TO-DO list" (operations) for the sp code.
+ *
+ * There are 2 types of operations:
+ * 1. Transfer: Issue DMA transfer request for copying grid cells from DDR to
+ * accelerator space (NOTE that this space is limited) associated data:
+ * DDR address + accelerator's config set index(acc's address).
+ *
+ * 2. Issue "Process Lines Command" to shd accelerator
+ * associated data: #lines + which config set to use (actually, accelerator
+ * will use x AND (x+1)%num_of_sets - NOTE that this implies the restriction
+ * of not touching config sets x & (x+1)%num_of_sets when process_lines(x)
+ * is active).
+ *
+ * Basically there are 2 types of operations "chunks":
+ * 1. "initial chunk": Initially, we do as much transfers as we can (and need)
+ * [0 - max sets(3) ] followed by 1 or 2 "process lines" operations.
+ *
+ * 2. "regular chunk" - 1 transfer followed by 1 process line operation.
+ * (in some cases we might need additional transfer ate the last chunk).
+ *
+ * for some case:
+ * --> init
+ * tr (0)
+ * tr (1)
+ * tr (2)
+ * pl (0)
+ * pl (1)
+ * --> ack (0)
+ * tr (3)
+ * pl (2)
+ * --> ack (1)
+ * pl (3)
+ * --> ack (2)
+ * do nothing
+ * --> ack (3)
+ * do nothing
+ */
+
+static int
+ipu3_css_shd_ops_calc(struct imgu_abi_shd_intra_frame_operations_data *ops,
+ const struct ipu3_uapi_shd_grid_config *grid,
+ unsigned int image_height)
+{
+ unsigned int block_height = 1 << grid->block_height_log2;
+ unsigned int grid_height_per_slice = grid->grid_height_per_slice;
+ unsigned int set_height = grid_height_per_slice * block_height;
+
+ /* We currently support only abs(y_start) > grid_height_per_slice */
+ unsigned int positive_y_start = (unsigned int)-grid->y_start;
+ unsigned int first_process_lines =
+ set_height - (positive_y_start % set_height);
+ unsigned int last_set_height;
+ unsigned int num_of_sets;
+
+ struct imgu_abi_acc_operation *p_op;
+ struct imgu_abi_acc_process_lines_cmd_data *p_pl;
+ struct imgu_abi_shd_transfer_luts_set_data *p_tr;
+
+ unsigned int op_idx, pl_idx, tr_idx;
+ unsigned char tr_set_num, pl_cfg_set;
+
+ /*
+ * When the number of lines for the last process lines command
+ * is equal to a set height, we need another line of grid cell -
+ * additional transfer is required.
+ */
+ unsigned char last_tr = 0;
+
+ /* Add "process lines" command to the list of operations */
+ bool add_pl;
+ /* Add DMA xfer (config set) command to the list of ops */
+ bool add_tr;
+
+ /*
+ * Available partial grid (the part that fits into #IMGU_SHD_SETS sets)
+ * doesn't cover whole frame - need to process in chunks
+ */
+ if (image_height > first_process_lines) {
+ last_set_height =
+ (image_height - first_process_lines) % set_height;
+ num_of_sets = last_set_height > 0 ?
+ (image_height - first_process_lines) / set_height + 2 :
+ (image_height - first_process_lines) / set_height + 1;
+ last_tr = (set_height - last_set_height <= block_height ||
+ last_set_height == 0) ? 1 : 0;
+ } else { /* partial grid covers whole frame */
+ last_set_height = 0;
+ num_of_sets = 1;
+ first_process_lines = image_height;
+ last_tr = set_height - image_height <= block_height ? 1 : 0;
+ }
+
+ /* Init operations lists and counters */
+ p_op = ops->operation_list;
+ op_idx = 0;
+ p_pl = ops->process_lines_data;
+ pl_idx = 0;
+ p_tr = ops->transfer_data;
+ tr_idx = 0;
+
+ memset(ops, 0, sizeof(*ops));
+
+ /* Cyclic counters that holds config set number [0,IMGU_SHD_SETS) */
+ tr_set_num = 0;
+ pl_cfg_set = 0;
+
+ /*
+ * Always start with a transfer - process lines command must be
+ * initiated only after appropriate config sets are in place
+ * (2 configuration sets per process line command, except for last one).
+ */
+ add_pl = false;
+ add_tr = true;
+
+ while (add_pl || add_tr) {
+ /* Transfer ops */
+ if (add_tr) {
+ if (op_idx >= IMGU_ABI_SHD_MAX_OPERATIONS ||
+ tr_idx >= IMGU_ABI_SHD_MAX_TRANSFERS)
+ return -EINVAL;
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA;
+ p_op[op_idx].op_indicator = IMGU_ABI_ACC_OP_IDLE;
+ op_idx++;
+ p_tr[tr_idx].set_number = tr_set_num;
+ tr_idx++;
+ tr_set_num = (tr_set_num + 1) % IMGU_SHD_SETS;
+ }
+
+ /* Process-lines ops */
+ if (add_pl) {
+ if (op_idx >= IMGU_ABI_SHD_MAX_OPERATIONS ||
+ pl_idx >= IMGU_ABI_SHD_MAX_PROCESS_LINES)
+ return -EINVAL;
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
+
+ /*
+ * In case we have 2 process lines commands -
+ * don't stop after the first one
+ */
+ if (pl_idx == 0 && num_of_sets != 1)
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+ /*
+ * Initiate last process lines command -
+ * end of operation list.
+ */
+ else if (pl_idx == num_of_sets - 1)
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_OPS;
+ /*
+ * Intermediate process line command - end of operation
+ * "chunk" (meaning few "transfers" followed by few
+ * "process lines" commands).
+ */
+ else
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+
+ op_idx++;
+
+ /* first process line operation */
+ if (pl_idx == 0)
+ p_pl[pl_idx].lines = first_process_lines;
+ /* Last process line operation */
+ else if (pl_idx == num_of_sets - 1 &&
+ last_set_height > 0)
+ p_pl[pl_idx].lines = last_set_height;
+ else /* "regular" process lines operation */
+ p_pl[pl_idx].lines = set_height;
+
+ p_pl[pl_idx].cfg_set = pl_cfg_set;
+ pl_idx++;
+ pl_cfg_set = (pl_cfg_set + 1) % IMGU_SHD_SETS;
+ }
+
+ /*
+ * Initially, we always transfer
+ * min(IMGU_SHD_SETS, num_of_sets) - after that we fill in the
+ * corresponding process lines commands.
+ */
+ if (tr_idx == IMGU_SHD_SETS ||
+ tr_idx == num_of_sets + last_tr) {
+ add_tr = false;
+ add_pl = true;
+ }
+
+ /*
+ * We have finished the "initial" operations chunk -
+ * be ready to get more chunks.
+ */
+ if (pl_idx == 2) {
+ add_tr = true;
+ add_pl = true;
+ }
+
+ /* Stop conditions for each operation type */
+ if (tr_idx == num_of_sets + last_tr)
+ add_tr = false;
+ if (pl_idx == num_of_sets)
+ add_pl = false;
+ }
+
+ return 0;
+}
+
+/*
+ * The follow handshake procotol is the same for AF, AWB and AWB FR.
+ *
+ * for n sets of meta-data, the flow is:
+ * --> init
+ * process-lines (0)
+ * process-lines (1) eoc
+ * --> ack (0)
+ * read-meta-data (0)
+ * process-lines (2) eoc
+ * --> ack (1)
+ * read-meta-data (1)
+ * process-lines (3) eoc
+ * ...
+ *
+ * --> ack (n-3)
+ * read-meta-data (n-3)
+ * process-lines (n-1) eoc
+ * --> ack (n-2)
+ * read-meta-data (n-2) eoc
+ * --> ack (n-1)
+ * read-meta-data (n-1) eof
+ *
+ * for 2 sets we get:
+ * --> init
+ * pl (0)
+ * pl (1) eoc
+ * --> ack (0)
+ * pl (2) - rest of image, if applicable)
+ * rmd (0) eoc
+ * --> ack (1)
+ * rmd (1) eof
+ * --> (ack (2))
+ * do nothing
+ *
+ * for only one set:
+ *
+ * --> init
+ * pl(0) eoc
+ * --> ack (0)
+ * rmd (0) eof
+ *
+ * grid smaller than image case
+ * for example 128x128 grid (block size 8x8, 16x16 num of blocks)
+ * start at (0,0)
+ * 1st set holds 160 cells - 10 blocks vertical, 16 horizontal
+ * => 1st process lines = 80
+ * we're left with 128-80=48 lines (6 blocks vertical)
+ * => 2nd process lines = 48
+ * last process lines to cover the image - image_height - 128
+ *
+ * --> init
+ * pl (0) first
+ * pl (1) last-in-grid
+ * --> ack (0)
+ * rmd (0)
+ * pl (2) after-grid
+ * --> ack (1)
+ * rmd (1) eof
+ * --> ack (2)
+ * do nothing
+ */
+struct process_lines {
+ unsigned int image_height;
+ unsigned short grid_height;
+ unsigned short block_height;
+ unsigned short y_start;
+ unsigned char grid_height_per_slice;
+
+ unsigned short max_op; /* max operation */
+ unsigned short max_tr; /* max transaction */
+ unsigned char acc_enable;
+};
+
+/* Helper to config intra_frame_operations_data. */
+static int
+ipu3_css_acc_process_lines(const struct process_lines *pl,
+ struct imgu_abi_acc_operation *p_op,
+ struct imgu_abi_acc_process_lines_cmd_data *p_pl,
+ struct imgu_abi_acc_transfer_op_data *p_tr)
+{
+ unsigned short op_idx = 0, pl_idx = 0, tr_idx = 0;
+ unsigned char tr_set_num = 0, pl_cfg_set = 0;
+ const unsigned short grid_last_line =
+ pl->y_start + pl->grid_height * pl->block_height;
+ const unsigned short process_lines =
+ pl->grid_height_per_slice * pl->block_height;
+
+ unsigned int process_lines_after_grid;
+ unsigned short first_process_lines;
+ unsigned short last_process_lines_in_grid;
+
+ unsigned short num_of_process_lines;
+ unsigned short num_of_sets;
+
+ if (pl->grid_height_per_slice == 0)
+ return -EINVAL;
+
+ if (pl->acc_enable && grid_last_line > pl->image_height)
+ return -EINVAL;
+
+ num_of_sets = pl->grid_height / pl->grid_height_per_slice;
+ if (num_of_sets * pl->grid_height_per_slice < pl->grid_height)
+ num_of_sets++;
+
+ /* Account for two line delay inside the FF */
+ if (pl->max_op == IMGU_ABI_AF_MAX_OPERATIONS) {
+ first_process_lines = process_lines + pl->y_start + 2;
+ last_process_lines_in_grid =
+ (grid_last_line - first_process_lines) -
+ ((num_of_sets - 2) * process_lines) + 4;
+ process_lines_after_grid =
+ pl->image_height - grid_last_line - 4;
+ } else {
+ first_process_lines = process_lines + pl->y_start;
+ last_process_lines_in_grid =
+ (grid_last_line - first_process_lines) -
+ ((num_of_sets - 2) * process_lines);
+ process_lines_after_grid = pl->image_height - grid_last_line;
+ }
+
+ num_of_process_lines = num_of_sets;
+ if (process_lines_after_grid > 0)
+ num_of_process_lines++;
+
+ while (tr_idx < num_of_sets || pl_idx < num_of_process_lines) {
+ /* Read meta-data */
+ if (pl_idx >= 2 || (pl_idx == 1 && num_of_sets == 1)) {
+ if (op_idx >= pl->max_op || tr_idx >= pl->max_tr)
+ return -EINVAL;
+
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_TRANSFER_DATA;
+
+ if (tr_idx == num_of_sets - 1)
+ /* The last operation is always a tr */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_OPS;
+ else if (tr_idx == num_of_sets - 2)
+ if (process_lines_after_grid == 0)
+ /*
+ * No additional pl op left -
+ * this op is left as lats of cycle
+ */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+ else
+ /*
+ * We still have to process-lines after
+ * the grid so have one more pl op
+ */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+ else
+ /* Default - usually there's a pl after a tr */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+
+ op_idx++;
+ if (p_tr) {
+ p_tr[tr_idx].set_number = tr_set_num;
+ tr_set_num = 1 - tr_set_num;
+ }
+ tr_idx++;
+ }
+
+ /* process_lines */
+ if (pl_idx < num_of_process_lines) {
+ if (op_idx >= pl->max_op || pl_idx >= pl->max_tr)
+ return -EINVAL;
+
+ p_op[op_idx].op_type =
+ IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
+ if (pl_idx == 0)
+ if (num_of_process_lines == 1)
+ /* Only one pl op */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+ else
+ /* On init - do two pl ops */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_IDLE;
+ else
+ /* Usually pl is the end of the ack cycle */
+ p_op[op_idx].op_indicator =
+ IMGU_ABI_ACC_OP_END_OF_ACK;
+
+ op_idx++;
+
+ if (pl_idx == 0)
+ /* First process line */
+ p_pl[pl_idx].lines = first_process_lines;
+ else if (pl_idx == num_of_sets - 1)
+ /* Last in grid */
+ p_pl[pl_idx].lines = last_process_lines_in_grid;
+ else if (pl_idx == num_of_process_lines - 1)
+ /* After the grid */
+ p_pl[pl_idx].lines = process_lines_after_grid;
+ else
+ /* Inside the grid */
+ p_pl[pl_idx].lines = process_lines;
+
+ if (p_tr) {
+ p_pl[pl_idx].cfg_set = pl_cfg_set;
+ pl_cfg_set = 1 - pl_cfg_set;
+ }
+ pl_idx++;
+ }
+ }
+
+ return 0;
+}
+
+static int ipu3_css_af_ops_calc(struct ipu3_css *css, unsigned int pipe,
+ struct imgu_abi_af_config *af_config)
+{
+ struct imgu_abi_af_intra_frame_operations_data *to =
+ &af_config->operations_data;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+
+ struct process_lines pl = {
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ .grid_height = af_config->config.grid_cfg.height,
+ .block_height =
+ 1 << af_config->config.grid_cfg.block_height_log2,
+ .y_start = af_config->config.grid_cfg.y_start &
+ IPU3_UAPI_GRID_START_MASK,
+ .grid_height_per_slice =
+ af_config->stripes[0].grid_cfg.height_per_slice,
+ .max_op = IMGU_ABI_AF_MAX_OPERATIONS,
+ .max_tr = IMGU_ABI_AF_MAX_TRANSFERS,
+ .acc_enable = bi->info.isp.sp.enable.af,
+ };
+
+ return ipu3_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ NULL);
+}
+
+static int
+ipu3_css_awb_fr_ops_calc(struct ipu3_css *css, unsigned int pipe,
+ struct imgu_abi_awb_fr_config *awb_fr_config)
+{
+ struct imgu_abi_awb_fr_intra_frame_operations_data *to =
+ &awb_fr_config->operations_data;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ struct process_lines pl = {
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ .grid_height = awb_fr_config->config.grid_cfg.height,
+ .block_height =
+ 1 << awb_fr_config->config.grid_cfg.block_height_log2,
+ .y_start = awb_fr_config->config.grid_cfg.y_start &
+ IPU3_UAPI_GRID_START_MASK,
+ .grid_height_per_slice =
+ awb_fr_config->stripes[0].grid_cfg.height_per_slice,
+ .max_op = IMGU_ABI_AWB_FR_MAX_OPERATIONS,
+ .max_tr = IMGU_ABI_AWB_FR_MAX_PROCESS_LINES,
+ .acc_enable = bi->info.isp.sp.enable.awb_fr_acc,
+ };
+
+ return ipu3_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ NULL);
+}
+
+static int ipu3_css_awb_ops_calc(struct ipu3_css *css, unsigned int pipe,
+ struct imgu_abi_awb_config *awb_config)
+{
+ struct imgu_abi_awb_intra_frame_operations_data *to =
+ &awb_config->operations_data;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+
+ struct process_lines pl = {
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ .grid_height = awb_config->config.grid.height,
+ .block_height =
+ 1 << awb_config->config.grid.block_height_log2,
+ .y_start = awb_config->config.grid.y_start,
+ .grid_height_per_slice =
+ awb_config->stripes[0].grid.height_per_slice,
+ .max_op = IMGU_ABI_AWB_MAX_OPERATIONS,
+ .max_tr = IMGU_ABI_AWB_MAX_TRANSFERS,
+ .acc_enable = bi->info.isp.sp.enable.awb_acc,
+ };
+
+ return ipu3_css_acc_process_lines(&pl, to->ops, to->process_lines_data,
+ to->transfer_data);
+}
+
+static u16 ipu3_css_grid_end(u16 start, u8 width, u8 block_width_log2)
+{
+ return (start & IPU3_UAPI_GRID_START_MASK) +
+ (width << block_width_log2) - 1;
+}
+
+static void ipu3_css_grid_end_calc(struct ipu3_uapi_grid_config *grid_cfg)
+{
+ grid_cfg->x_end = ipu3_css_grid_end(grid_cfg->x_start, grid_cfg->width,
+ grid_cfg->block_width_log2);
+ grid_cfg->y_end = ipu3_css_grid_end(grid_cfg->y_start, grid_cfg->height,
+ grid_cfg->block_height_log2);
+}
+
+/****************** config computation *****************************/
+
+static int ipu3_css_cfg_acc_stripe(struct ipu3_css *css, unsigned int pipe,
+ struct imgu_abi_acc_param *acc)
+{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ struct ipu3_css_scaler_info scaler_luma, scaler_chroma;
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+ const unsigned int f = IPU3_UAPI_ISP_VEC_ELEMS * 2;
+ unsigned int bds_ds, i;
+
+ memset(acc, 0, sizeof(*acc));
+
+ /* acc_param: osys_config */
+
+ if (ipu3_css_osys_calc(css, pipe, stripes, &acc->osys, &scaler_luma,
+ &scaler_chroma, acc->stripe.block_stripes))
+ return -EINVAL;
+
+ /* acc_param: stripe data */
+
+ /*
+ * For the striped case the approach is as follows:
+ * 1. down-scaled stripes are calculated - with 128 overlap
+ * (this is the main limiter therefore it's first)
+ * 2. input stripes are derived by up-scaling the down-scaled stripes
+ * (there are no alignment requirements on input stripes)
+ * 3. output stripes are derived from down-scaled stripes too
+ */
+
+ acc->stripe.num_of_stripes = stripes;
+ acc->stripe.input_frame.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ acc->stripe.input_frame.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ acc->stripe.input_frame.bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+
+ for (i = 0; i < stripes; i++)
+ acc->stripe.bds_out_stripes[i].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->stripe.bds_out_stripes[0].offset = 0;
+ if (stripes <= 1) {
+ acc->stripe.bds_out_stripes[0].width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
+ } else {
+ /* Image processing is divided into two stripes */
+ acc->stripe.bds_out_stripes[0].width =
+ acc->stripe.bds_out_stripes[1].width =
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width / 2 & ~(f - 1)) + f;
+ /*
+ * Sum of width of the two stripes should not be smaller
+ * than output width and must be even times of overlapping
+ * unit f.
+ */
+ if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) !=
+ !!(css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1)))
+ acc->stripe.bds_out_stripes[0].width += f;
+ if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) &&
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1))) {
+ acc->stripe.bds_out_stripes[0].width += f;
+ acc->stripe.bds_out_stripes[1].width += f;
+ }
+ /* Overlap between stripes is IPU3_UAPI_ISP_VEC_ELEMS * 4 */
+ acc->stripe.bds_out_stripes[1].offset =
+ acc->stripe.bds_out_stripes[0].width - 2 * f;
+ }
+
+ acc->stripe.effective_stripes[0].height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ acc->stripe.effective_stripes[0].offset = 0;
+ acc->stripe.bds_out_stripes_no_overlap[0].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->stripe.bds_out_stripes_no_overlap[0].offset = 0;
+ acc->stripe.output_stripes[0].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ acc->stripe.output_stripes[0].offset = 0;
+ if (stripes <= 1) {
+ acc->stripe.down_scaled_stripes[0].width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->stripe.down_scaled_stripes[0].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->stripe.down_scaled_stripes[0].offset = 0;
+
+ acc->stripe.effective_stripes[0].width =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ acc->stripe.bds_out_stripes_no_overlap[0].width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
+
+ acc->stripe.output_stripes[0].width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ } else { /* Two stripes */
+ bds_ds = css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width *
+ IMGU_BDS_GRANULARITY /
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+
+ acc->stripe.down_scaled_stripes[0] =
+ acc->stripe.bds_out_stripes[0];
+ acc->stripe.down_scaled_stripes[1] =
+ acc->stripe.bds_out_stripes[1];
+ if (!IS_ALIGNED(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f))
+ acc->stripe.down_scaled_stripes[1].width +=
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width
+ & (f - 1)) - f;
+
+ acc->stripe.effective_stripes[0].width = bds_ds *
+ acc->stripe.down_scaled_stripes[0].width /
+ IMGU_BDS_GRANULARITY;
+ acc->stripe.effective_stripes[1].width = bds_ds *
+ acc->stripe.down_scaled_stripes[1].width /
+ IMGU_BDS_GRANULARITY;
+ acc->stripe.effective_stripes[1].height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ acc->stripe.effective_stripes[1].offset = bds_ds *
+ acc->stripe.down_scaled_stripes[1].offset /
+ IMGU_BDS_GRANULARITY;
+
+ acc->stripe.bds_out_stripes_no_overlap[0].width =
+ acc->stripe.bds_out_stripes_no_overlap[1].offset =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, 2 * f) / 2;
+ acc->stripe.bds_out_stripes_no_overlap[1].width =
+ DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f)
+ / 2 * f;
+ acc->stripe.bds_out_stripes_no_overlap[1].height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ acc->stripe.output_stripes[0].width =
+ acc->stripe.down_scaled_stripes[0].width - f;
+ acc->stripe.output_stripes[1].width =
+ acc->stripe.down_scaled_stripes[1].width - f;
+ acc->stripe.output_stripes[1].height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ acc->stripe.output_stripes[1].offset =
+ acc->stripe.output_stripes[0].width;
+ }
+
+ acc->stripe.output_system_in_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+ acc->stripe.output_system_in_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+
+ acc->stripe.effective_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ acc->stripe.bds_frame_width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->stripe.out_frame_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ acc->stripe.out_frame_height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ acc->stripe.gdc_in_buffer_width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline /
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel;
+ acc->stripe.gdc_in_buffer_height =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ acc->stripe.gdc_in_buffer_offset_x = IMGU_GDC_BUF_X;
+ acc->stripe.gdc_in_buffer_offset_y = IMGU_GDC_BUF_Y;
+ acc->stripe.display_frame_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ acc->stripe.display_frame_height =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ acc->stripe.bds_aligned_frame_width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
+ 2 * IPU3_UAPI_ISP_VEC_ELEMS);
+
+ if (stripes > 1)
+ acc->stripe.half_overlap_vectors =
+ IMGU_STRIPE_FIXED_HALF_OVERLAP;
+ else
+ acc->stripe.half_overlap_vectors = 0;
+
+ return 0;
+}
+
+static void ipu3_css_cfg_acc_dvs(struct ipu3_css *css,
+ struct imgu_abi_acc_param *acc,
+ unsigned int pipe)
+{
+ unsigned int i;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ /* Disable DVS statistics */
+ acc->dvs_stat.operations_data.process_lines_data[0].lines =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->dvs_stat.operations_data.process_lines_data[0].cfg_set = 0;
+ acc->dvs_stat.operations_data.ops[0].op_type =
+ IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
+ acc->dvs_stat.operations_data.ops[0].op_indicator =
+ IMGU_ABI_ACC_OP_NO_OPS;
+ for (i = 0; i < IMGU_ABI_DVS_STAT_LEVELS; i++)
+ acc->dvs_stat.cfg.grd_config[i].enable = 0;
+}
+
+static void acc_bds_per_stripe_data(struct ipu3_css *css,
+ struct imgu_abi_acc_param *acc,
+ const int i, unsigned int pipe)
+{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_en = 0;
+ acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_start = 0;
+ acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_end = 0;
+ acc->bds.per_stripe.aligned_data[i].data.hor_ctrl0 =
+ acc->bds.hor.hor_ctrl0;
+ acc->bds.per_stripe.aligned_data[i].data.hor_ctrl0.out_frame_width =
+ acc->stripe.down_scaled_stripes[i].width;
+ acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_width =
+ acc->stripe.down_scaled_stripes[i].width;
+ acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+}
+
+/*
+ * Configure `acc' parameters. `acc_old' contains the old values (or is NULL)
+ * and `acc_user' contains new prospective values. `use' contains flags
+ * telling which fields to take from the old values (or generate if it is NULL)
+ * and which to take from the new user values.
+ */
+int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ struct imgu_abi_acc_param *acc,
+ struct imgu_abi_acc_param *acc_old,
+ struct ipu3_uapi_acc_param *acc_user)
+{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+ const unsigned int tnr_frame_width =
+ acc->stripe.bds_aligned_frame_width;
+ const unsigned int min_overlap = 10;
+ const struct v4l2_pix_format_mplane *pixm =
+ &css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ const struct ipu3_css_bds_config *cfg_bds;
+ struct imgu_abi_input_feeder_data *feeder_data;
+
+ unsigned int bds_ds, ofs_x, ofs_y, i, width, height;
+ u8 b_w_log2; /* Block width log2 */
+
+ /* Update stripe using chroma and luma */
+
+ if (ipu3_css_cfg_acc_stripe(css, pipe, acc))
+ return -EINVAL;
+
+ /* acc_param: input_feeder_config */
+
+ ofs_x = ((pixm->width -
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width) >> 1) & ~1;
+ ofs_x += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_RGGB ||
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
+ ofs_y = ((pixm->height -
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height) >> 1) & ~1;
+ ofs_y += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_BGGR ||
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
+ acc->input_feeder.data.row_stride = pixm->plane_fmt[0].bytesperline;
+ acc->input_feeder.data.start_row_address =
+ ofs_x / IMGU_PIXELS_PER_WORD * IMGU_BYTES_PER_WORD +
+ ofs_y * acc->input_feeder.data.row_stride;
+ acc->input_feeder.data.start_pixel = ofs_x % IMGU_PIXELS_PER_WORD;
+
+ acc->input_feeder.data_per_stripe.input_feeder_data[0].data =
+ acc->input_feeder.data;
+
+ ofs_x += acc->stripe.effective_stripes[1].offset;
+
+ feeder_data =
+ &acc->input_feeder.data_per_stripe.input_feeder_data[1].data;
+ feeder_data->row_stride = acc->input_feeder.data.row_stride;
+ feeder_data->start_row_address =
+ ofs_x / IMGU_PIXELS_PER_WORD * IMGU_BYTES_PER_WORD +
+ ofs_y * acc->input_feeder.data.row_stride;
+ feeder_data->start_pixel = ofs_x % IMGU_PIXELS_PER_WORD;
+
+ /* acc_param: bnr_static_config */
+
+ /*
+ * Originate from user or be the original default values if user has
+ * never set them before, when user gives a new set of parameters,
+ * for each chunk in the parameter structure there is a flag use->xxx
+ * whether to use the user-provided parameter or not. If not, the
+ * parameter remains unchanged in the driver:
+ * it's value is taken from acc_old.
+ */
+ if (use && use->acc_bnr) {
+ /* Take values from user */
+ acc->bnr = acc_user->bnr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->bnr = acc_old->bnr;
+ } else {
+ /* Calculate from scratch */
+ acc->bnr = ipu3_css_bnr_defaults;
+ }
+
+ acc->bnr.column_size = tnr_frame_width;
+
+ /* acc_param: bnr_static_config_green_disparity */
+
+ if (use && use->acc_green_disparity) {
+ /* Take values from user */
+ acc->green_disparity = acc_user->green_disparity;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->green_disparity = acc_old->green_disparity;
+ } else {
+ /* Calculate from scratch */
+ memset(&acc->green_disparity, 0, sizeof(acc->green_disparity));
+ }
+
+ /* acc_param: dm_config */
+
+ if (use && use->acc_dm) {
+ /* Take values from user */
+ acc->dm = acc_user->dm;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->dm = acc_old->dm;
+ } else {
+ /* Calculate from scratch */
+ acc->dm = ipu3_css_dm_defaults;
+ }
+
+ acc->dm.frame_width = tnr_frame_width;
+
+ /* acc_param: ccm_mat_config */
+
+ if (use && use->acc_ccm) {
+ /* Take values from user */
+ acc->ccm = acc_user->ccm;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->ccm = acc_old->ccm;
+ } else {
+ /* Calculate from scratch */
+ acc->ccm = ipu3_css_ccm_defaults;
+ }
+
+ /* acc_param: gamma_config */
+
+ if (use && use->acc_gamma) {
+ /* Take values from user */
+ acc->gamma = acc_user->gamma;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->gamma = acc_old->gamma;
+ } else {
+ /* Calculate from scratch */
+ acc->gamma.gc_ctrl.enable = 1;
+ acc->gamma.gc_lut = ipu3_css_gamma_lut;
+ }
+
+ /* acc_param: csc_mat_config */
+
+ if (use && use->acc_csc) {
+ /* Take values from user */
+ acc->csc = acc_user->csc;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->csc = acc_old->csc;
+ } else {
+ /* Calculate from scratch */
+ acc->csc = ipu3_css_csc_defaults;
+ }
+
+ /* acc_param: cds_params */
+
+ if (use && use->acc_cds) {
+ /* Take values from user */
+ acc->cds = acc_user->cds;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->cds = acc_old->cds;
+ } else {
+ /* Calculate from scratch */
+ acc->cds = ipu3_css_cds_defaults;
+ }
+
+ /* acc_param: shd_config */
+
+ if (use && use->acc_shd) {
+ /* Take values from user */
+ acc->shd.shd = acc_user->shd.shd;
+ acc->shd.shd_lut = acc_user->shd.shd_lut;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->shd.shd = acc_old->shd.shd;
+ acc->shd.shd_lut = acc_old->shd.shd_lut;
+ } else {
+ /* Calculate from scratch */
+ acc->shd.shd = ipu3_css_shd_defaults;
+ memset(&acc->shd.shd_lut, 0, sizeof(acc->shd.shd_lut));
+ }
+
+ if (acc->shd.shd.grid.width <= 0)
+ return -EINVAL;
+
+ acc->shd.shd.grid.grid_height_per_slice =
+ IMGU_ABI_SHD_MAX_CELLS_PER_SET / acc->shd.shd.grid.width;
+
+ if (acc->shd.shd.grid.grid_height_per_slice <= 0)
+ return -EINVAL;
+
+ acc->shd.shd.general.init_set_vrt_offst_ul =
+ (-acc->shd.shd.grid.y_start >>
+ acc->shd.shd.grid.block_height_log2) %
+ acc->shd.shd.grid.grid_height_per_slice;
+
+ if (ipu3_css_shd_ops_calc(&acc->shd.shd_ops, &acc->shd.shd.grid,
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height))
+ return -EINVAL;
+
+ /* acc_param: dvs_stat_config */
+ ipu3_css_cfg_acc_dvs(css, acc, pipe);
+
+ /* acc_param: yuvp1_iefd_config */
+
+ if (use && use->acc_iefd) {
+ /* Take values from user */
+ acc->iefd = acc_user->iefd;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->iefd = acc_old->iefd;
+ } else {
+ /* Calculate from scratch */
+ acc->iefd = ipu3_css_iefd_defaults;
+ }
+
+ /* acc_param: yuvp1_yds_config yds_c0 */
+
+ if (use && use->acc_yds_c0) {
+ /* Take values from user */
+ acc->yds_c0 = acc_user->yds_c0;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->yds_c0 = acc_old->yds_c0;
+ } else {
+ /* Calculate from scratch */
+ acc->yds_c0 = ipu3_css_yds_defaults;
+ }
+
+ /* acc_param: yuvp1_chnr_config chnr_c0 */
+
+ if (use && use->acc_chnr_c0) {
+ /* Take values from user */
+ acc->chnr_c0 = acc_user->chnr_c0;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->chnr_c0 = acc_old->chnr_c0;
+ } else {
+ /* Calculate from scratch */
+ acc->chnr_c0 = ipu3_css_chnr_defaults;
+ }
+
+ /* acc_param: yuvp1_y_ee_nr_config */
+
+ if (use && use->acc_y_ee_nr) {
+ /* Take values from user */
+ acc->y_ee_nr = acc_user->y_ee_nr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->y_ee_nr = acc_old->y_ee_nr;
+ } else {
+ /* Calculate from scratch */
+ acc->y_ee_nr = ipu3_css_y_ee_nr_defaults;
+ }
+
+ /* acc_param: yuvp1_yds_config yds */
+
+ if (use && use->acc_yds) {
+ /* Take values from user */
+ acc->yds = acc_user->yds;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->yds = acc_old->yds;
+ } else {
+ /* Calculate from scratch */
+ acc->yds = ipu3_css_yds_defaults;
+ }
+
+ /* acc_param: yuvp1_chnr_config chnr */
+
+ if (use && use->acc_chnr) {
+ /* Take values from user */
+ acc->chnr = acc_user->chnr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->chnr = acc_old->chnr;
+ } else {
+ /* Calculate from scratch */
+ acc->chnr = ipu3_css_chnr_defaults;
+ }
+
+ /* acc_param: yuvp2_y_tm_lut_static_config */
+
+ for (i = 0; i < IMGU_ABI_YUVP2_YTM_LUT_ENTRIES; i++)
+ acc->ytm.entries[i] = i * 32;
+ acc->ytm.enable = 0; /* Always disabled on IPU3 */
+
+ /* acc_param: yuvp1_yds_config yds2 */
+
+ if (use && use->acc_yds2) {
+ /* Take values from user */
+ acc->yds2 = acc_user->yds2;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->yds2 = acc_old->yds2;
+ } else {
+ /* Calculate from scratch */
+ acc->yds2 = ipu3_css_yds_defaults;
+ }
+
+ /* acc_param: yuvp2_tcc_static_config */
+
+ if (use && use->acc_tcc) {
+ /* Take values from user */
+ acc->tcc = acc_user->tcc;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->tcc = acc_old->tcc;
+ } else {
+ /* Calculate from scratch */
+ memset(&acc->tcc, 0, sizeof(acc->tcc));
+
+ acc->tcc.gen_control.en = 1;
+ acc->tcc.gen_control.blend_shift = 3;
+ acc->tcc.gen_control.gain_according_to_y_only = 1;
+ acc->tcc.gen_control.gamma = 8;
+ acc->tcc.gen_control.delta = 0;
+
+ for (i = 0; i < IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS; i++) {
+ acc->tcc.macc_table.entries[i].a = 1024;
+ acc->tcc.macc_table.entries[i].b = 0;
+ acc->tcc.macc_table.entries[i].c = 0;
+ acc->tcc.macc_table.entries[i].d = 1024;
+ }
+
+ acc->tcc.inv_y_lut.entries[6] = 1023;
+ for (i = 7; i < IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS; i++)
+ acc->tcc.inv_y_lut.entries[i] = 1024 >> (i - 6);
+
+ acc->tcc.gain_pcwl = ipu3_css_tcc_gain_pcwl_lut;
+ acc->tcc.r_sqr_lut = ipu3_css_tcc_r_sqr_lut;
+ }
+
+ /* acc_param: dpc_config */
+
+ if (use && use->acc_dpc)
+ return -EINVAL; /* Not supported yet */
+
+ /* Just disable by default */
+ memset(&acc->dpc, 0, sizeof(acc->dpc));
+
+ /* acc_param: bds_config */
+
+ bds_ds = (css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height *
+ IMGU_BDS_GRANULARITY) / css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ if (bds_ds < IMGU_BDS_MIN_SF_INV ||
+ bds_ds - IMGU_BDS_MIN_SF_INV >= ARRAY_SIZE(ipu3_css_bds_configs))
+ return -EINVAL;
+
+ cfg_bds = &ipu3_css_bds_configs[bds_ds - IMGU_BDS_MIN_SF_INV];
+ acc->bds.hor.hor_ctrl1.hor_crop_en = 0;
+ acc->bds.hor.hor_ctrl1.hor_crop_start = 0;
+ acc->bds.hor.hor_ctrl1.hor_crop_end = 0;
+ acc->bds.hor.hor_ctrl0.sample_patrn_length =
+ cfg_bds->sample_patrn_length;
+ acc->bds.hor.hor_ctrl0.hor_ds_en = cfg_bds->hor_ds_en;
+ acc->bds.hor.hor_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
+ acc->bds.hor.hor_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
+ acc->bds.hor.hor_ctrl0.out_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->bds.hor.hor_ptrn_arr = cfg_bds->ptrn_arr;
+ acc->bds.hor.hor_phase_arr = cfg_bds->hor_phase_arr;
+ acc->bds.hor.hor_ctrl2.input_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ acc->bds.ver.ver_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
+ acc->bds.ver.ver_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
+ acc->bds.ver.ver_ctrl0.sample_patrn_length =
+ cfg_bds->sample_patrn_length;
+ acc->bds.ver.ver_ctrl0.ver_ds_en = cfg_bds->ver_ds_en;
+ acc->bds.ver.ver_ptrn_arr = cfg_bds->ptrn_arr;
+ acc->bds.ver.ver_phase_arr = cfg_bds->ver_phase_arr;
+ acc->bds.ver.ver_ctrl1.out_frame_width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ acc->bds.ver.ver_ctrl1.out_frame_height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ for (i = 0; i < stripes; i++)
+ acc_bds_per_stripe_data(css, acc, i, pipe);
+
+ acc->bds.enabled = cfg_bds->hor_ds_en || cfg_bds->ver_ds_en;
+
+ /* acc_param: anr_config */
+
+ if (use && use->acc_anr) {
+ /* Take values from user */
+ acc->anr.transform = acc_user->anr.transform;
+ acc->anr.stitch.anr_stitch_en =
+ acc_user->anr.stitch.anr_stitch_en;
+ memcpy(acc->anr.stitch.pyramid, acc_user->anr.stitch.pyramid,
+ sizeof(acc->anr.stitch.pyramid));
+ } else if (acc_old) {
+ /* Use old value */
+ acc->anr.transform = acc_old->anr.transform;
+ acc->anr.stitch.anr_stitch_en =
+ acc_old->anr.stitch.anr_stitch_en;
+ memcpy(acc->anr.stitch.pyramid, acc_old->anr.stitch.pyramid,
+ sizeof(acc->anr.stitch.pyramid));
+ } else {
+ /* Calculate from scratch */
+ acc->anr = ipu3_css_anr_defaults;
+ }
+
+ /* Always enabled */
+ acc->anr.search.enable = 1;
+ acc->anr.transform.enable = 1;
+ acc->anr.tile2strm.enable = 1;
+ acc->anr.tile2strm.frame_width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ acc->anr.search.frame_width = acc->anr.tile2strm.frame_width;
+ acc->anr.stitch.frame_width = acc->anr.tile2strm.frame_width;
+ acc->anr.tile2strm.frame_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->anr.search.frame_height = acc->anr.tile2strm.frame_height;
+ acc->anr.stitch.frame_height = acc->anr.tile2strm.frame_height;
+
+ width = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ if (acc->anr.transform.xreset + width > IPU3_UAPI_ANR_MAX_RESET)
+ acc->anr.transform.xreset = IPU3_UAPI_ANR_MAX_RESET - width;
+ if (acc->anr.transform.xreset < IPU3_UAPI_ANR_MIN_RESET)
+ acc->anr.transform.xreset = IPU3_UAPI_ANR_MIN_RESET;
+
+ if (acc->anr.transform.yreset + height > IPU3_UAPI_ANR_MAX_RESET)
+ acc->anr.transform.yreset = IPU3_UAPI_ANR_MAX_RESET - height;
+ if (acc->anr.transform.yreset < IPU3_UAPI_ANR_MIN_RESET)
+ acc->anr.transform.yreset = IPU3_UAPI_ANR_MIN_RESET;
+
+ /* acc_param: awb_fr_config */
+
+ if (use && use->acc_awb_fr) {
+ /* Take values from user */
+ acc->awb_fr.config = acc_user->awb_fr;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->awb_fr.config = acc_old->awb_fr.config;
+ } else {
+ /* Set from scratch */
+ acc->awb_fr.config = ipu3_css_awb_fr_defaults;
+ }
+
+ ipu3_css_grid_end_calc(&acc->awb_fr.config.grid_cfg);
+
+ if (acc->awb_fr.config.grid_cfg.width <= 0)
+ return -EINVAL;
+
+ acc->awb_fr.config.grid_cfg.height_per_slice =
+ IMGU_ABI_AWB_FR_MAX_CELLS_PER_SET /
+ acc->awb_fr.config.grid_cfg.width;
+
+ for (i = 0; i < stripes; i++)
+ acc->awb_fr.stripes[i] = acc->awb_fr.config;
+
+ if (acc->awb_fr.config.grid_cfg.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->awb_fr.stripes[0].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else if (acc->awb_fr.config.grid_cfg.x_end <=
+ acc->stripe.bds_out_stripes[0].width - min_overlap) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->awb_fr.stripes[1].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else {
+ /* Enable for both stripes */
+ u16 end; /* width for grid end */
+
+ acc->awb_fr.stripes[0].grid_cfg.width =
+ (acc->stripe.bds_out_stripes[0].width - min_overlap -
+ acc->awb_fr.config.grid_cfg.x_start + 1) >>
+ acc->awb_fr.config.grid_cfg.block_width_log2;
+ acc->awb_fr.stripes[1].grid_cfg.width =
+ acc->awb_fr.config.grid_cfg.width -
+ acc->awb_fr.stripes[0].grid_cfg.width;
+
+ b_w_log2 = acc->awb_fr.stripes[0].grid_cfg.block_width_log2;
+ end = ipu3_css_grid_end(acc->awb_fr.stripes[0].grid_cfg.x_start,
+ acc->awb_fr.stripes[0].grid_cfg.width,
+ b_w_log2);
+ acc->awb_fr.stripes[0].grid_cfg.x_end = end;
+
+ acc->awb_fr.stripes[1].grid_cfg.x_start =
+ (acc->awb_fr.stripes[0].grid_cfg.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+ b_w_log2 = acc->awb_fr.stripes[1].grid_cfg.block_width_log2;
+ end = ipu3_css_grid_end(acc->awb_fr.stripes[1].grid_cfg.x_start,
+ acc->awb_fr.stripes[1].grid_cfg.width,
+ b_w_log2);
+ acc->awb_fr.stripes[1].grid_cfg.x_end = end;
+
+ /*
+ * To reduce complexity of debubbling and loading
+ * statistics fix grid_height_per_slice to 1 for both
+ * stripes.
+ */
+ for (i = 0; i < stripes; i++)
+ acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
+ }
+
+ if (ipu3_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
+ return -EINVAL;
+
+ /* acc_param: ae_config */
+
+ if (use && use->acc_ae) {
+ /* Take values from user */
+ acc->ae.grid_cfg = acc_user->ae.grid_cfg;
+ acc->ae.ae_ccm = acc_user->ae.ae_ccm;
+ for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
+ acc->ae.weights[i] = acc_user->ae.weights[i];
+ } else if (acc_old) {
+ /* Use old value */
+ acc->ae.grid_cfg = acc_old->ae.grid_cfg;
+ acc->ae.ae_ccm = acc_old->ae.ae_ccm;
+ for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
+ acc->ae.weights[i] = acc_old->ae.weights[i];
+ } else {
+ /* Set from scratch */
+ static const struct ipu3_uapi_ae_weight_elem
+ weight_def = { 1, 1, 1, 1, 1, 1, 1, 1 };
+
+ acc->ae.grid_cfg = ipu3_css_ae_grid_defaults;
+ acc->ae.ae_ccm = ipu3_css_ae_ccm_defaults;
+ for (i = 0; i < IPU3_UAPI_AE_WEIGHTS; i++)
+ acc->ae.weights[i] = weight_def;
+ }
+
+ b_w_log2 = acc->ae.grid_cfg.block_width_log2;
+ acc->ae.grid_cfg.x_end = ipu3_css_grid_end(acc->ae.grid_cfg.x_start,
+ acc->ae.grid_cfg.width,
+ b_w_log2);
+ b_w_log2 = acc->ae.grid_cfg.block_height_log2;
+ acc->ae.grid_cfg.y_end = ipu3_css_grid_end(acc->ae.grid_cfg.y_start,
+ acc->ae.grid_cfg.height,
+ b_w_log2);
+
+ for (i = 0; i < stripes; i++)
+ acc->ae.stripes[i].grid = acc->ae.grid_cfg;
+
+ if (acc->ae.grid_cfg.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->ae.stripes[0].grid.ae_en = 0;
+ } else if (acc->ae.grid_cfg.x_end <=
+ acc->stripe.bds_out_stripes[0].width) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->ae.stripes[1].grid.ae_en = 0;
+ } else {
+ /* Enable for both stripes */
+ u8 b_w_log2;
+
+ acc->ae.stripes[0].grid.width =
+ (acc->stripe.bds_out_stripes[0].width -
+ acc->ae.grid_cfg.x_start + 1) >>
+ acc->ae.grid_cfg.block_width_log2;
+
+ acc->ae.stripes[1].grid.width =
+ acc->ae.grid_cfg.width - acc->ae.stripes[0].grid.width;
+
+ b_w_log2 = acc->ae.stripes[0].grid.block_width_log2;
+ acc->ae.stripes[0].grid.x_end =
+ ipu3_css_grid_end(acc->ae.stripes[0].grid.x_start,
+ acc->ae.stripes[0].grid.width,
+ b_w_log2);
+
+ acc->ae.stripes[1].grid.x_start =
+ (acc->ae.stripes[0].grid.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+ b_w_log2 = acc->ae.stripes[1].grid.block_width_log2;
+ acc->ae.stripes[1].grid.x_end =
+ ipu3_css_grid_end(acc->ae.stripes[1].grid.x_start,
+ acc->ae.stripes[1].grid.width,
+ b_w_log2);
+ }
+
+ /* acc_param: af_config */
+
+ if (use && use->acc_af) {
+ /* Take values from user */
+ acc->af.config.filter_config = acc_user->af.filter_config;
+ acc->af.config.grid_cfg = acc_user->af.grid_cfg;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->af.config = acc_old->af.config;
+ } else {
+ /* Set from scratch */
+ acc->af.config.filter_config =
+ ipu3_css_af_defaults.filter_config;
+ acc->af.config.grid_cfg = ipu3_css_af_defaults.grid_cfg;
+ }
+
+ ipu3_css_grid_end_calc(&acc->af.config.grid_cfg);
+
+ if (acc->af.config.grid_cfg.width <= 0)
+ return -EINVAL;
+
+ acc->af.config.grid_cfg.height_per_slice =
+ IMGU_ABI_AF_MAX_CELLS_PER_SET / acc->af.config.grid_cfg.width;
+ acc->af.config.frame_size.width =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ acc->af.config.frame_size.height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ if (acc->stripe.bds_out_stripes[0].width <= min_overlap)
+ return -EINVAL;
+
+ for (i = 0; i < stripes; i++) {
+ acc->af.stripes[i].grid_cfg = acc->af.config.grid_cfg;
+ acc->af.stripes[i].frame_size.height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ acc->af.stripes[i].frame_size.width =
+ acc->stripe.bds_out_stripes[i].width;
+ }
+
+ if (acc->af.config.grid_cfg.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->af.stripes[0].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else if (acc->af.config.grid_cfg.x_end <=
+ acc->stripe.bds_out_stripes[0].width - min_overlap) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->af.stripes[1].grid_cfg.y_start &=
+ ~IPU3_UAPI_GRID_Y_START_EN;
+ } else {
+ /* Enable for both stripes */
+
+ acc->af.stripes[0].grid_cfg.width =
+ (acc->stripe.bds_out_stripes[0].width - min_overlap -
+ acc->af.config.grid_cfg.x_start + 1) >>
+ acc->af.config.grid_cfg.block_width_log2;
+ acc->af.stripes[1].grid_cfg.width =
+ acc->af.config.grid_cfg.width -
+ acc->af.stripes[0].grid_cfg.width;
+
+ b_w_log2 = acc->af.stripes[0].grid_cfg.block_width_log2;
+ acc->af.stripes[0].grid_cfg.x_end =
+ ipu3_css_grid_end(acc->af.stripes[0].grid_cfg.x_start,
+ acc->af.stripes[0].grid_cfg.width,
+ b_w_log2);
+
+ acc->af.stripes[1].grid_cfg.x_start =
+ (acc->af.stripes[0].grid_cfg.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+
+ b_w_log2 = acc->af.stripes[1].grid_cfg.block_width_log2;
+ acc->af.stripes[1].grid_cfg.x_end =
+ ipu3_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
+ acc->af.stripes[1].grid_cfg.width,
+ b_w_log2);
+
+ /*
+ * To reduce complexity of debubbling and loading statistics
+ * fix grid_height_per_slice to 1 for both stripes
+ */
+ for (i = 0; i < stripes; i++)
+ acc->af.stripes[i].grid_cfg.height_per_slice = 1;
+ }
+
+ if (ipu3_css_af_ops_calc(css, pipe, &acc->af))
+ return -EINVAL;
+
+ /* acc_param: awb_config */
+
+ if (use && use->acc_awb) {
+ /* Take values from user */
+ acc->awb.config = acc_user->awb.config;
+ } else if (acc_old) {
+ /* Use old value */
+ acc->awb.config = acc_old->awb.config;
+ } else {
+ /* Set from scratch */
+ acc->awb.config = ipu3_css_awb_defaults;
+ }
+
+ if (acc->awb.config.grid.width <= 0)
+ return -EINVAL;
+
+ acc->awb.config.grid.height_per_slice =
+ IMGU_ABI_AWB_MAX_CELLS_PER_SET / acc->awb.config.grid.width,
+ ipu3_css_grid_end_calc(&acc->awb.config.grid);
+
+ for (i = 0; i < stripes; i++)
+ acc->awb.stripes[i] = acc->awb.config;
+
+ if (acc->awb.config.grid.x_start >=
+ acc->stripe.down_scaled_stripes[1].offset + min_overlap) {
+ /* Enable only for rightmost stripe, disable left */
+ acc->awb.stripes[0].rgbs_thr_b &= ~IPU3_UAPI_AWB_RGBS_THR_B_EN;
+ } else if (acc->awb.config.grid.x_end <=
+ acc->stripe.bds_out_stripes[0].width - min_overlap) {
+ /* Enable only for leftmost stripe, disable right */
+ acc->awb.stripes[1].rgbs_thr_b &= ~IPU3_UAPI_AWB_RGBS_THR_B_EN;
+ } else {
+ /* Enable for both stripes */
+
+ acc->awb.stripes[0].grid.width =
+ (acc->stripe.bds_out_stripes[0].width -
+ acc->awb.config.grid.x_start + 1) >>
+ acc->awb.config.grid.block_width_log2;
+ acc->awb.stripes[1].grid.width = acc->awb.config.grid.width -
+ acc->awb.stripes[0].grid.width;
+
+ b_w_log2 = acc->awb.stripes[0].grid.block_width_log2;
+ acc->awb.stripes[0].grid.x_end =
+ ipu3_css_grid_end(acc->awb.stripes[0].grid.x_start,
+ acc->awb.stripes[0].grid.width,
+ b_w_log2);
+
+ acc->awb.stripes[1].grid.x_start =
+ (acc->awb.stripes[0].grid.x_end + 1 -
+ acc->stripe.down_scaled_stripes[1].offset) &
+ IPU3_UAPI_GRID_START_MASK;
+
+ b_w_log2 = acc->awb.stripes[1].grid.block_width_log2;
+ acc->awb.stripes[1].grid.x_end =
+ ipu3_css_grid_end(acc->awb.stripes[1].grid.x_start,
+ acc->awb.stripes[1].grid.width,
+ b_w_log2);
+
+ /*
+ * To reduce complexity of debubbling and loading statistics
+ * fix grid_height_per_slice to 1 for both stripes
+ */
+ for (i = 0; i < stripes; i++)
+ acc->awb.stripes[i].grid.height_per_slice = 1;
+ }
+
+ if (ipu3_css_awb_ops_calc(css, pipe, &acc->awb))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Fill the indicated structure in `new_binary_params' from the possible
+ * sources based on `use_user' flag: if the flag is false, copy from
+ * `old_binary_params', or if the flag is true, copy from `user_setting'
+ * and return NULL (or error pointer on error).
+ * If the flag is false and `old_binary_params' is NULL, return pointer
+ * to the structure inside `new_binary_params'. In that case the caller
+ * should calculate and fill the structure from scratch.
+ */
+static void *ipu3_css_cfg_copy(struct ipu3_css *css,
+ unsigned int pipe, bool use_user,
+ void *user_setting, void *old_binary_params,
+ void *new_binary_params,
+ enum imgu_abi_memories m,
+ struct imgu_fw_isp_parameter *par,
+ size_t par_size)
+{
+ const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
+ void *new_setting, *old_setting;
+
+ new_setting = ipu3_css_fw_pipeline_params(css, pipe, c, m, par,
+ par_size, new_binary_params);
+ if (!new_setting)
+ return ERR_PTR(-EPROTO); /* Corrupted firmware */
+
+ if (use_user) {
+ /* Take new user parameters */
+ memcpy(new_setting, user_setting, par_size);
+ } else if (old_binary_params) {
+ /* Take previous value */
+ old_setting = ipu3_css_fw_pipeline_params(css, pipe, c, m, par,
+ par_size,
+ old_binary_params);
+ if (!old_setting)
+ return ERR_PTR(-EPROTO);
+ memcpy(new_setting, old_setting, par_size);
+ } else {
+ return new_setting; /* Need to calculate */
+ }
+
+ return NULL; /* Copied from other value */
+}
+
+/*
+ * Configure VMEM0 parameters (late binding parameters).
+ */
+int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *vmem0, void *vmem0_old,
+ struct ipu3_uapi_params *user)
+{
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->pipes[pipe].bindex];
+ struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
+ struct ipu3_uapi_isp_lin_vmem_params *lin_vmem = NULL;
+ struct ipu3_uapi_isp_tnr3_vmem_params *tnr_vmem = NULL;
+ struct ipu3_uapi_isp_xnr3_vmem_params *xnr_vmem = NULL;
+ const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
+ const enum imgu_abi_memories m = IMGU_ABI_MEM_ISP_VMEM0;
+ unsigned int i;
+
+ /* Configure VMEM0 */
+
+ memset(vmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
+
+ /* Configure Linearization VMEM0 parameters */
+
+ lin_vmem = ipu3_css_cfg_copy(css, pipe, use && use->lin_vmem_params,
+ &user->lin_vmem_params, vmem0_old, vmem0,
+ m, &pofs->vmem.lin, sizeof(*lin_vmem));
+ if (!IS_ERR_OR_NULL(lin_vmem)) {
+ /* Generate parameter from scratch */
+ for (i = 0; i < IPU3_UAPI_LIN_LUT_SIZE; i++) {
+ lin_vmem->lin_lutlow_gr[i] = 32 * i;
+ lin_vmem->lin_lutlow_r[i] = 32 * i;
+ lin_vmem->lin_lutlow_b[i] = 32 * i;
+ lin_vmem->lin_lutlow_gb[i] = 32 * i;
+
+ lin_vmem->lin_lutdif_gr[i] = 32;
+ lin_vmem->lin_lutdif_r[i] = 32;
+ lin_vmem->lin_lutdif_b[i] = 32;
+ lin_vmem->lin_lutdif_gb[i] = 32;
+ }
+ }
+
+ /* Configure TNR3 VMEM parameters */
+ if (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ tnr_vmem = ipu3_css_cfg_copy(css, pipe,
+ use && use->tnr3_vmem_params,
+ &user->tnr3_vmem_params,
+ vmem0_old, vmem0, m,
+ &pofs->vmem.tnr3,
+ sizeof(*tnr_vmem));
+ if (!IS_ERR_OR_NULL(tnr_vmem)) {
+ /* Generate parameter from scratch */
+ for (i = 0; i < IPU3_UAPI_ISP_TNR3_VMEM_LEN; i++)
+ tnr_vmem->sigma[i] = 256;
+ }
+ }
+ i = IPU3_UAPI_ISP_TNR3_VMEM_LEN;
+
+ /* Configure XNR3 VMEM parameters */
+
+ xnr_vmem = ipu3_css_cfg_copy(css, pipe, use && use->xnr3_vmem_params,
+ &user->xnr3_vmem_params, vmem0_old, vmem0,
+ m, &pofs->vmem.xnr3, sizeof(*xnr_vmem));
+ if (!IS_ERR_OR_NULL(xnr_vmem)) {
+ xnr_vmem->x[i] = ipu3_css_xnr3_vmem_defaults.x
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ xnr_vmem->a[i] = ipu3_css_xnr3_vmem_defaults.a
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ xnr_vmem->b[i] = ipu3_css_xnr3_vmem_defaults.b
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ xnr_vmem->c[i] = ipu3_css_xnr3_vmem_defaults.c
+ [i % IMGU_XNR3_VMEM_LUT_LEN];
+ }
+
+ return IS_ERR(lin_vmem) || IS_ERR(tnr_vmem) || IS_ERR(xnr_vmem) ?
+ -EPROTO : 0;
+}
+
+/*
+ * Configure DMEM0 parameters (late binding parameters).
+ */
+int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *dmem0, void *dmem0_old,
+ struct ipu3_uapi_params *user)
+{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
+
+ struct ipu3_uapi_isp_tnr3_params *tnr_dmem = NULL;
+ struct ipu3_uapi_isp_xnr3_params *xnr_dmem;
+
+ const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
+ const enum imgu_abi_memories m = IMGU_ABI_MEM_ISP_DMEM0;
+
+ /* Configure DMEM0 */
+
+ memset(dmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
+
+ /* Configure TNR3 DMEM0 parameters */
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ tnr_dmem = ipu3_css_cfg_copy(css, pipe,
+ use && use->tnr3_dmem_params,
+ &user->tnr3_dmem_params,
+ dmem0_old, dmem0, m,
+ &pofs->dmem.tnr3,
+ sizeof(*tnr_dmem));
+ if (!IS_ERR_OR_NULL(tnr_dmem)) {
+ /* Generate parameter from scratch */
+ tnr_dmem->knee_y1 = 768;
+ tnr_dmem->knee_y2 = 1280;
+ }
+ }
+
+ /* Configure XNR3 DMEM0 parameters */
+
+ xnr_dmem = ipu3_css_cfg_copy(css, pipe, use && use->xnr3_dmem_params,
+ &user->xnr3_dmem_params, dmem0_old, dmem0,
+ m, &pofs->dmem.xnr3, sizeof(*xnr_dmem));
+ if (!IS_ERR_OR_NULL(xnr_dmem)) {
+ /* Generate parameter from scratch */
+ xnr_dmem->alpha.y0 = 2047;
+ xnr_dmem->alpha.u0 = 2047;
+ xnr_dmem->alpha.v0 = 2047;
+ }
+
+ return IS_ERR(tnr_dmem) || IS_ERR(xnr_dmem) ? -EPROTO : 0;
+}
+
+/* Generate unity morphing table without morphing effect */
+void ipu3_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
+ int frame_in_x, int frame_in_y,
+ int frame_out_x, int frame_out_y,
+ int env_w, int env_h)
+{
+ static const unsigned int FRAC_BITS = IMGU_ABI_GDC_FRAC_BITS;
+ static const unsigned int XMEM_ALIGN = 1 << 4;
+ const unsigned int XMEM_ALIGN_MASK = ~(XMEM_ALIGN - 1);
+ static const unsigned int BCI_ENV = 4;
+ static const unsigned int BYP = 2; /* Bytes per pixel */
+ const unsigned int OFFSET_X = 2 * IMGU_DVS_BLOCK_W + env_w + 1;
+ const unsigned int OFFSET_Y = IMGU_DVS_BLOCK_H + env_h + 1;
+
+ struct imgu_abi_gdc_warp_param gdc_luma, gdc_chroma;
+
+ unsigned int blocks_x = ALIGN(DIV_ROUND_UP(frame_out_x,
+ IMGU_DVS_BLOCK_W), 2);
+ unsigned int blocks_y = DIV_ROUND_UP(frame_out_y, IMGU_DVS_BLOCK_H);
+ unsigned int y0, x0, x1, x, y;
+
+ /* Global luma settings */
+ gdc_luma.origin_x = 0;
+ gdc_luma.origin_y = 0;
+ gdc_luma.p0_x = (OFFSET_X - (OFFSET_X & XMEM_ALIGN_MASK)) << FRAC_BITS;
+ gdc_luma.p0_y = 0;
+ gdc_luma.p1_x = gdc_luma.p0_x + (IMGU_DVS_BLOCK_W << FRAC_BITS);
+ gdc_luma.p1_y = gdc_luma.p0_y;
+ gdc_luma.p2_x = gdc_luma.p0_x;
+ gdc_luma.p2_y = gdc_luma.p0_y + (IMGU_DVS_BLOCK_H << FRAC_BITS);
+ gdc_luma.p3_x = gdc_luma.p1_x;
+ gdc_luma.p3_y = gdc_luma.p2_y;
+
+ gdc_luma.in_block_width = IMGU_DVS_BLOCK_W + BCI_ENV +
+ OFFSET_X - (OFFSET_X & XMEM_ALIGN_MASK);
+ gdc_luma.in_block_width_a = DIV_ROUND_UP(gdc_luma.in_block_width,
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ gdc_luma.in_block_width_b = DIV_ROUND_UP(gdc_luma.in_block_width,
+ IMGU_ABI_ISP_DDR_WORD_BYTES /
+ BYP);
+ gdc_luma.in_block_height = IMGU_DVS_BLOCK_H + BCI_ENV;
+ gdc_luma.padding = 0;
+
+ /* Global chroma settings */
+ gdc_chroma.origin_x = 0;
+ gdc_chroma.origin_y = 0;
+ gdc_chroma.p0_x = (OFFSET_X / 2 - (OFFSET_X / 2 & XMEM_ALIGN_MASK)) <<
+ FRAC_BITS;
+ gdc_chroma.p0_y = 0;
+ gdc_chroma.p1_x = gdc_chroma.p0_x + (IMGU_DVS_BLOCK_W << FRAC_BITS);
+ gdc_chroma.p1_y = gdc_chroma.p0_y;
+ gdc_chroma.p2_x = gdc_chroma.p0_x;
+ gdc_chroma.p2_y = gdc_chroma.p0_y + (IMGU_DVS_BLOCK_H / 2 << FRAC_BITS);
+ gdc_chroma.p3_x = gdc_chroma.p1_x;
+ gdc_chroma.p3_y = gdc_chroma.p2_y;
+
+ gdc_chroma.in_block_width = IMGU_DVS_BLOCK_W + BCI_ENV;
+ gdc_chroma.in_block_width_a = DIV_ROUND_UP(gdc_chroma.in_block_width,
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ gdc_chroma.in_block_width_b = DIV_ROUND_UP(gdc_chroma.in_block_width,
+ IMGU_ABI_ISP_DDR_WORD_BYTES /
+ BYP);
+ gdc_chroma.in_block_height = IMGU_DVS_BLOCK_H / 2 + BCI_ENV;
+ gdc_chroma.padding = 0;
+
+ /* Calculate block offsets for luma and chroma */
+ for (y0 = 0; y0 < blocks_y; y0++) {
+ for (x0 = 0; x0 < blocks_x / 2; x0++) {
+ for (x1 = 0; x1 < 2; x1++) {
+ /* Luma blocks */
+ x = (x0 * 2 + x1) * IMGU_DVS_BLOCK_W + OFFSET_X;
+ x &= XMEM_ALIGN_MASK;
+ y = y0 * IMGU_DVS_BLOCK_H + OFFSET_Y;
+ *gdc = gdc_luma;
+ gdc->in_addr_offset =
+ (y * frame_in_x + x) * BYP;
+ gdc++;
+ }
+
+ /* Chroma block */
+ x = x0 * IMGU_DVS_BLOCK_W + OFFSET_X / 2;
+ x &= XMEM_ALIGN_MASK;
+ y = y0 * (IMGU_DVS_BLOCK_H / 2) + OFFSET_Y / 2;
+ *gdc = gdc_chroma;
+ gdc->in_addr_offset = (y * frame_in_x + x) * BYP;
+ gdc++;
+ }
+ }
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css-params.h b/drivers/staging/media/ipu3/ipu3-css-params.h
new file mode 100644
index 000000000000..f3a0a47117a4
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-params.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_PARAMS_H
+#define __IPU3_PARAMS_H
+
+int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ struct imgu_abi_acc_param *acc,
+ struct imgu_abi_acc_param *acc_old,
+ struct ipu3_uapi_acc_param *acc_user);
+
+int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *vmem0, void *vmem0_old,
+ struct ipu3_uapi_params *user);
+
+int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
+ void *dmem0, void *dmem0_old,
+ struct ipu3_uapi_params *user);
+
+void ipu3_css_cfg_gdc_table(struct imgu_abi_gdc_warp_param *gdc,
+ int frame_in_x, int frame_in_y,
+ int frame_out_x, int frame_out_y,
+ int env_w, int env_h);
+
+#endif /*__IPU3_PARAMS_H */
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.c b/drivers/staging/media/ipu3/ipu3-css-pool.c
new file mode 100644
index 000000000000..6f271f81669b
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+
+#include "ipu3.h"
+#include "ipu3-css-pool.h"
+#include "ipu3-dmamap.h"
+
+int ipu3_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct ipu3_css_map *map, size_t size)
+{
+ if (map->size < size && map->vaddr) {
+ dev_warn(&imgu->pci_dev->dev, "dma buf resized from %zu to %zu",
+ map->size, size);
+
+ ipu3_dmamap_free(imgu, map);
+ if (!ipu3_dmamap_alloc(imgu, map, size))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ipu3_css_pool_cleanup(struct imgu_device *imgu, struct ipu3_css_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPU3_CSS_POOL_SIZE; i++)
+ ipu3_dmamap_free(imgu, &pool->entry[i].param);
+}
+
+int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
+ size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < IPU3_CSS_POOL_SIZE; i++) {
+ pool->entry[i].valid = false;
+ if (size == 0) {
+ pool->entry[i].param.vaddr = NULL;
+ continue;
+ }
+
+ if (!ipu3_dmamap_alloc(imgu, &pool->entry[i].param, size))
+ goto fail;
+ }
+
+ pool->last = IPU3_CSS_POOL_SIZE;
+
+ return 0;
+
+fail:
+ ipu3_css_pool_cleanup(imgu, pool);
+ return -ENOMEM;
+}
+
+/*
+ * Allocate a new parameter via recycling the oldest entry in the pool.
+ */
+void ipu3_css_pool_get(struct ipu3_css_pool *pool)
+{
+ /* Get the oldest entry */
+ u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
+
+ pool->entry[n].valid = true;
+ pool->last = n;
+}
+
+/*
+ * Undo, for all practical purposes, the effect of pool_get().
+ */
+void ipu3_css_pool_put(struct ipu3_css_pool *pool)
+{
+ pool->entry[pool->last].valid = false;
+ pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
+}
+
+/**
+ * ipu3_css_pool_last - Retrieve the nth pool entry from last
+ *
+ * @pool: a pointer to &struct ipu3_css_pool.
+ * @n: the distance to the last index.
+ *
+ * Returns:
+ * The nth entry from last or null map to indicate no frame stored.
+ */
+const struct ipu3_css_map *
+ipu3_css_pool_last(struct ipu3_css_pool *pool, unsigned int n)
+{
+ static const struct ipu3_css_map null_map = { 0 };
+ int i = (pool->last + IPU3_CSS_POOL_SIZE - n) % IPU3_CSS_POOL_SIZE;
+
+ WARN_ON(n >= IPU3_CSS_POOL_SIZE);
+
+ if (!pool->entry[i].valid)
+ return &null_map;
+
+ return &pool->entry[i].param;
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css-pool.h b/drivers/staging/media/ipu3/ipu3-css-pool.h
new file mode 100644
index 000000000000..2657c39a4d71
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css-pool.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_UTIL_H
+#define __IPU3_UTIL_H
+
+struct device;
+struct imgu_device;
+
+#define IPU3_CSS_POOL_SIZE 4
+
+/**
+ * ipu3_css_map - store DMA mapping info for buffer
+ *
+ * @size: size of the buffer in bytes.
+ * @vaddr: kernel virtual address.
+ * @daddr: iova dma address to access IPU3.
+ * @vma: private, a pointer to &struct vm_struct,
+ * used for ipu3_dmamap_free.
+ */
+struct ipu3_css_map {
+ size_t size;
+ void *vaddr;
+ dma_addr_t daddr;
+ struct vm_struct *vma;
+};
+
+/**
+ * ipu3_css_pool - circular buffer pool definition
+ *
+ * @entry: array with IPU3_CSS_POOL_SIZE elements.
+ * @entry.param: a &struct ipu3_css_map for storing the mem mapping.
+ * @entry.valid: used to mark if the entry has valid data.
+ * @last: write pointer, initialized to IPU3_CSS_POOL_SIZE.
+ */
+struct ipu3_css_pool {
+ struct {
+ struct ipu3_css_map param;
+ bool valid;
+ } entry[IPU3_CSS_POOL_SIZE];
+ u32 last;
+};
+
+int ipu3_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct ipu3_css_map *map, size_t size);
+void ipu3_css_pool_cleanup(struct imgu_device *imgu,
+ struct ipu3_css_pool *pool);
+int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
+ size_t size);
+void ipu3_css_pool_get(struct ipu3_css_pool *pool);
+void ipu3_css_pool_put(struct ipu3_css_pool *pool);
+const struct ipu3_css_map *ipu3_css_pool_last(struct ipu3_css_pool *pool,
+ u32 last);
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
new file mode 100644
index 000000000000..44c55639389a
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -0,0 +1,2391 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/device.h>
+#include <linux/iopoll.h>
+
+#include "ipu3-css.h"
+#include "ipu3-css-fw.h"
+#include "ipu3-css-params.h"
+#include "ipu3-dmamap.h"
+#include "ipu3-tables.h"
+
+/* IRQ configuration */
+#define IMGU_IRQCTRL_IRQ_MASK (IMGU_IRQCTRL_IRQ_SP1 | \
+ IMGU_IRQCTRL_IRQ_SP2 | \
+ IMGU_IRQCTRL_IRQ_SW_PIN(0) | \
+ IMGU_IRQCTRL_IRQ_SW_PIN(1))
+
+#define IPU3_CSS_FORMAT_BPP_DEN 50 /* Denominator */
+
+/* Some sane limits for resolutions */
+#define IPU3_CSS_MIN_RES 32
+#define IPU3_CSS_MAX_H 3136
+#define IPU3_CSS_MAX_W 4224
+
+/* filter size from graph settings is fixed as 4 */
+#define FILTER_SIZE 4
+#define MIN_ENVELOPE 8
+
+/*
+ * pre-allocated buffer size for CSS ABI, auxiliary frames
+ * after BDS and before GDC. Those values should be tuned
+ * to big enough to avoid buffer re-allocation when
+ * streaming to lower streaming latency.
+ */
+#define CSS_ABI_SIZE 136
+#define CSS_BDS_SIZE (4480 * 3200 * 3)
+#define CSS_GDC_SIZE (4224 * 3200 * 12 / 8)
+
+#define IPU3_CSS_QUEUE_TO_FLAGS(q) (1 << (q))
+#define IPU3_CSS_FORMAT_FL_IN \
+ IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_IN)
+#define IPU3_CSS_FORMAT_FL_OUT \
+ IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_OUT)
+#define IPU3_CSS_FORMAT_FL_VF \
+ IPU3_CSS_QUEUE_TO_FLAGS(IPU3_CSS_QUEUE_VF)
+
+/* Formats supported by IPU3 Camera Sub System */
+static const struct ipu3_css_format ipu3_css_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_NV12,
+ .osys_format = IMGU_ABI_OSYS_FORMAT_NV12,
+ .osys_tiling = IMGU_ABI_OSYS_TILING_NONE,
+ .bytesperpixel_num = 1 * IPU3_CSS_FORMAT_BPP_DEN,
+ .chroma_decim = 4,
+ .width_align = IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_OUT | IPU3_CSS_FORMAT_FL_VF,
+ }, {
+ /* Each 32 bytes contains 25 10-bit pixels */
+ .pixelformat = V4L2_PIX_FMT_IPU3_SBGGR10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_BGGR,
+ .bit_depth = 10,
+ .bytesperpixel_num = 64,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_IPU3_SGBRG10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_GBRG,
+ .bit_depth = 10,
+ .bytesperpixel_num = 64,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_IPU3_SGRBG10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_GRBG,
+ .bit_depth = 10,
+ .bytesperpixel_num = 64,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_IPU3_SRGGB10,
+ .colorspace = V4L2_COLORSPACE_RAW,
+ .frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
+ .bayer_order = IMGU_ABI_BAYER_ORDER_RGGB,
+ .bit_depth = 10,
+ .bytesperpixel_num = 64,
+ .width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
+ .flags = IPU3_CSS_FORMAT_FL_IN,
+ },
+};
+
+static const struct {
+ enum imgu_abi_queue_id qid;
+ size_t ptr_ofs;
+} ipu3_css_queues[IPU3_CSS_QUEUES] = {
+ [IPU3_CSS_QUEUE_IN] = {
+ IMGU_ABI_QUEUE_C_ID,
+ offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
+ },
+ [IPU3_CSS_QUEUE_OUT] = {
+ IMGU_ABI_QUEUE_D_ID,
+ offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
+ },
+ [IPU3_CSS_QUEUE_VF] = {
+ IMGU_ABI_QUEUE_E_ID,
+ offsetof(struct imgu_abi_buffer, payload.frame.frame_data)
+ },
+ [IPU3_CSS_QUEUE_STAT_3A] = {
+ IMGU_ABI_QUEUE_F_ID,
+ offsetof(struct imgu_abi_buffer, payload.s3a.data_ptr)
+ },
+};
+
+/* Initialize queue based on given format, adjust format as needed */
+static int ipu3_css_queue_init(struct ipu3_css_queue *queue,
+ struct v4l2_pix_format_mplane *fmt, u32 flags)
+{
+ struct v4l2_pix_format_mplane *const f = &queue->fmt.mpix;
+ unsigned int i;
+ u32 sizeimage;
+
+ INIT_LIST_HEAD(&queue->bufs);
+
+ queue->css_fmt = NULL; /* Disable */
+ if (!fmt)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ipu3_css_formats); i++) {
+ if (!(ipu3_css_formats[i].flags & flags))
+ continue;
+ queue->css_fmt = &ipu3_css_formats[i];
+ if (ipu3_css_formats[i].pixelformat == fmt->pixelformat)
+ break;
+ }
+ if (!queue->css_fmt)
+ return -EINVAL; /* Could not find any suitable format */
+
+ queue->fmt.mpix = *fmt;
+
+ f->width = ALIGN(clamp_t(u32, f->width,
+ IPU3_CSS_MIN_RES, IPU3_CSS_MAX_W), 2);
+ f->height = ALIGN(clamp_t(u32, f->height,
+ IPU3_CSS_MIN_RES, IPU3_CSS_MAX_H), 2);
+ queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
+ if (queue->css_fmt->frame_format != IMGU_ABI_FRAME_FORMAT_RAW_PACKED)
+ f->plane_fmt[0].bytesperline = DIV_ROUND_UP(queue->width_pad *
+ queue->css_fmt->bytesperpixel_num,
+ IPU3_CSS_FORMAT_BPP_DEN);
+ else
+ /* For packed raw, alignment for bpl is by 50 to the width */
+ f->plane_fmt[0].bytesperline =
+ DIV_ROUND_UP(f->width,
+ IPU3_CSS_FORMAT_BPP_DEN) *
+ queue->css_fmt->bytesperpixel_num;
+
+ sizeimage = f->height * f->plane_fmt[0].bytesperline;
+ if (queue->css_fmt->chroma_decim)
+ sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
+
+ f->plane_fmt[0].sizeimage = sizeimage;
+ f->field = V4L2_FIELD_NONE;
+ f->num_planes = 1;
+ f->colorspace = queue->css_fmt->colorspace;
+ f->flags = 0;
+ f->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ f->quantization = V4L2_QUANTIZATION_DEFAULT;
+ f->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+ memset(f->reserved, 0, sizeof(f->reserved));
+
+ return 0;
+}
+
+static bool ipu3_css_queue_enabled(struct ipu3_css_queue *q)
+{
+ return q->css_fmt;
+}
+
+/******************* css hw *******************/
+
+/* In the style of writesl() defined in include/asm-generic/io.h */
+static inline void writes(const void *mem, ssize_t count, void __iomem *addr)
+{
+ if (count >= 4) {
+ const u32 *buf = mem;
+
+ count /= 4;
+ do {
+ writel(*buf++, addr);
+ addr += 4;
+ } while (--count);
+ }
+}
+
+/* Wait until register `reg', masked with `mask', becomes `cmp' */
+static int ipu3_hw_wait(void __iomem *base, int reg, u32 mask, u32 cmp)
+{
+ u32 val;
+
+ return readl_poll_timeout(base + reg, val, (val & mask) == cmp,
+ 1000, 100 * 1000);
+}
+
+/* Initialize the IPU3 CSS hardware and associated h/w blocks */
+
+int ipu3_css_set_powerup(struct device *dev, void __iomem *base)
+{
+ static const unsigned int freq = 450;
+ u32 pm_ctrl, state, val;
+
+ dev_dbg(dev, "%s\n", __func__);
+ /* Clear the CSS busy signal */
+ readl(base + IMGU_REG_GP_BUSY);
+ writel(0, base + IMGU_REG_GP_BUSY);
+
+ /* Wait for idle signal */
+ if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ IMGU_STATE_IDLE_STS)) {
+ dev_err(dev, "failed to set CSS idle\n");
+ goto fail;
+ }
+
+ /* Reset the css */
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
+ base + IMGU_REG_PM_CTRL);
+
+ usleep_range(200, 300);
+
+ /** Prepare CSS */
+
+ pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
+ state = readl(base + IMGU_REG_STATE);
+
+ dev_dbg(dev, "CSS pm_ctrl 0x%x state 0x%x (power %s)\n",
+ pm_ctrl, state, state & IMGU_STATE_POWER_DOWN ? "down" : "up");
+
+ /* Power up CSS using wrapper */
+ if (state & IMGU_STATE_POWER_DOWN) {
+ writel(IMGU_PM_CTRL_RACE_TO_HALT | IMGU_PM_CTRL_START,
+ base + IMGU_REG_PM_CTRL);
+ if (ipu3_hw_wait(base, IMGU_REG_PM_CTRL,
+ IMGU_PM_CTRL_START, 0)) {
+ dev_err(dev, "failed to power up CSS\n");
+ goto fail;
+ }
+ usleep_range(2000, 3000);
+ } else {
+ writel(IMGU_PM_CTRL_RACE_TO_HALT, base + IMGU_REG_PM_CTRL);
+ }
+
+ /* Set the busy bit */
+ writel(readl(base + IMGU_REG_GP_BUSY) | 1, base + IMGU_REG_GP_BUSY);
+
+ /* Set CSS clock frequency */
+ pm_ctrl = readl(base + IMGU_REG_PM_CTRL);
+ val = pm_ctrl & ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
+ writel(val, base + IMGU_REG_PM_CTRL);
+ writel(0, base + IMGU_REG_GP_BUSY);
+ if (ipu3_hw_wait(base, IMGU_REG_STATE,
+ IMGU_STATE_PWRDNM_FSM_MASK, 0)) {
+ dev_err(dev, "failed to pwrdn CSS\n");
+ goto fail;
+ }
+ val = (freq / IMGU_SYSTEM_REQ_FREQ_DIVIDER) & IMGU_SYSTEM_REQ_FREQ_MASK;
+ writel(val, base + IMGU_REG_SYSTEM_REQ);
+ writel(1, base + IMGU_REG_GP_BUSY);
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_HALT,
+ base + IMGU_REG_PM_CTRL);
+ if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
+ IMGU_STATE_HALT_STS)) {
+ dev_err(dev, "failed to halt CSS\n");
+ goto fail;
+ }
+
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_START,
+ base + IMGU_REG_PM_CTRL);
+ if (ipu3_hw_wait(base, IMGU_REG_PM_CTRL, IMGU_PM_CTRL_START, 0)) {
+ dev_err(dev, "failed to start CSS\n");
+ goto fail;
+ }
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_UNHALT,
+ base + IMGU_REG_PM_CTRL);
+
+ val = readl(base + IMGU_REG_PM_CTRL); /* get pm_ctrl */
+ val &= ~(IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
+ val |= pm_ctrl & (IMGU_PM_CTRL_CSS_PWRDN | IMGU_PM_CTRL_RST_AT_EOF);
+ writel(val, base + IMGU_REG_PM_CTRL);
+
+ return 0;
+
+fail:
+ ipu3_css_set_powerdown(dev, base);
+ return -EIO;
+}
+
+void ipu3_css_set_powerdown(struct device *dev, void __iomem *base)
+{
+ dev_dbg(dev, "%s\n", __func__);
+ /* wait for cio idle signal */
+ if (ipu3_hw_wait(base, IMGU_REG_CIO_GATE_BURST_STATE,
+ IMGU_CIO_GATE_BURST_MASK, 0))
+ dev_warn(dev, "wait cio gate idle timeout");
+
+ /* wait for css idle signal */
+ if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ IMGU_STATE_IDLE_STS))
+ dev_warn(dev, "wait css idle timeout\n");
+
+ /* do halt-halted handshake with css */
+ writel(1, base + IMGU_REG_GP_HALT);
+ if (ipu3_hw_wait(base, IMGU_REG_STATE, IMGU_STATE_HALT_STS,
+ IMGU_STATE_HALT_STS))
+ dev_warn(dev, "failed to halt css");
+
+ /* de-assert the busy bit */
+ writel(0, base + IMGU_REG_GP_BUSY);
+}
+
+static void ipu3_css_hw_enable_irq(struct ipu3_css *css)
+{
+ void __iomem *const base = css->base;
+ u32 val, i;
+
+ /* Set up interrupts */
+
+ /*
+ * Enable IRQ on the SP which signals that SP goes to idle
+ * (aka ready state) and set trigger to pulse
+ */
+ val = readl(base + IMGU_REG_SP_CTRL(0)) | IMGU_CTRL_IRQ_READY;
+ writel(val, base + IMGU_REG_SP_CTRL(0));
+ writel(val | IMGU_CTRL_IRQ_CLEAR, base + IMGU_REG_SP_CTRL(0));
+
+ /* Enable IRQs from the IMGU wrapper */
+ writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_ENABLE);
+ /* Clear */
+ writel(IMGU_REG_INT_CSS_IRQ, base + IMGU_REG_INT_STATUS);
+
+ /* Enable IRQs from main IRQ controller */
+ writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(IMGU_IRQCTRL_MAIN));
+ writel(0, base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_EDGE(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_CLEAR(IMGU_IRQCTRL_MAIN));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_MASK(IMGU_IRQCTRL_MAIN));
+ /* Wait for write complete */
+ readl(base + IMGU_REG_IRQCTRL_ENABLE(IMGU_IRQCTRL_MAIN));
+
+ /* Enable IRQs from SP0 and SP1 controllers */
+ for (i = IMGU_IRQCTRL_SP0; i <= IMGU_IRQCTRL_SP1; i++) {
+ writel(~0, base + IMGU_REG_IRQCTRL_EDGE_NOT_PULSE(i));
+ writel(0, base + IMGU_REG_IRQCTRL_MASK(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_EDGE(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK,
+ base + IMGU_REG_IRQCTRL_ENABLE(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_CLEAR(i));
+ writel(IMGU_IRQCTRL_IRQ_MASK, base + IMGU_REG_IRQCTRL_MASK(i));
+ /* Wait for write complete */
+ readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
+ }
+}
+
+static int ipu3_css_hw_init(struct ipu3_css *css)
+{
+ /* For checking that streaming monitor statuses are valid */
+ static const struct {
+ u32 reg;
+ u32 mask;
+ const char *name;
+ } stream_monitors[] = {
+ {
+ IMGU_REG_GP_SP1_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_ISP_PORT_SP12ISP,
+ "ISP0 to SP0"
+ }, {
+ IMGU_REG_GP_ISP_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_SP1_PORT_ISP2SP1,
+ "SP0 to ISP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_ISP2DMA,
+ "ISP0 to DMA0"
+ }, {
+ IMGU_REG_GP_ISP_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_ISP_PORT_DMA2ISP,
+ "DMA0 to ISP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
+ "ISP0 to GDC0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
+ "GDC0 to ISP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_SP12DMA,
+ "SP0 to DMA0"
+ }, {
+ IMGU_REG_GP_SP1_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_SP1_PORT_DMA2SP1,
+ "DMA0 to SP0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_CELLS2GDC,
+ "SP0 to GDC0"
+ }, {
+ IMGU_REG_GP_MOD_STRMON_STAT,
+ IMGU_GP_STRMON_STAT_MOD_PORT_GDC2CELLS,
+ "GDC0 to SP0"
+ },
+ };
+
+ struct device *dev = css->dev;
+ void __iomem *const base = css->base;
+ u32 val, i;
+
+ /* Set instruction cache address and inv bit for ISP, SP, and SP1 */
+ for (i = 0; i < IMGU_NUM_SP; i++) {
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->fw_sp[i]];
+
+ writel(css->binary[css->fw_sp[i]].daddr,
+ base + IMGU_REG_SP_ICACHE_ADDR(bi->type));
+ writel(readl(base + IMGU_REG_SP_CTRL(bi->type)) |
+ IMGU_CTRL_ICACHE_INV,
+ base + IMGU_REG_SP_CTRL(bi->type));
+ }
+ writel(css->binary[css->fw_bl].daddr, base + IMGU_REG_ISP_ICACHE_ADDR);
+ writel(readl(base + IMGU_REG_ISP_CTRL) | IMGU_CTRL_ICACHE_INV,
+ base + IMGU_REG_ISP_CTRL);
+
+ /* Check that IMGU hardware is ready */
+
+ if (!(readl(base + IMGU_REG_SP_CTRL(0)) & IMGU_CTRL_IDLE)) {
+ dev_err(dev, "SP is not idle\n");
+ return -EIO;
+ }
+ if (!(readl(base + IMGU_REG_ISP_CTRL) & IMGU_CTRL_IDLE)) {
+ dev_err(dev, "ISP is not idle\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stream_monitors); i++) {
+ val = readl(base + stream_monitors[i].reg);
+ if (val & stream_monitors[i].mask) {
+ dev_err(dev, "error: Stream monitor %s is valid\n",
+ stream_monitors[i].name);
+ return -EIO;
+ }
+ }
+
+ /* Initialize GDC with default values */
+
+ for (i = 0; i < ARRAY_SIZE(ipu3_css_gdc_lut[0]); i++) {
+ u32 val0 = ipu3_css_gdc_lut[0][i] & IMGU_GDC_LUT_MASK;
+ u32 val1 = ipu3_css_gdc_lut[1][i] & IMGU_GDC_LUT_MASK;
+ u32 val2 = ipu3_css_gdc_lut[2][i] & IMGU_GDC_LUT_MASK;
+ u32 val3 = ipu3_css_gdc_lut[3][i] & IMGU_GDC_LUT_MASK;
+
+ writel(val0 | (val1 << 16),
+ base + IMGU_REG_GDC_LUT_BASE + i * 8);
+ writel(val2 | (val3 << 16),
+ base + IMGU_REG_GDC_LUT_BASE + i * 8 + 4);
+ }
+
+ return 0;
+}
+
+/* Boot the given IPU3 CSS SP */
+static int ipu3_css_hw_start_sp(struct ipu3_css *css, int sp)
+{
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_sp_init_dmem_cfg dmem_cfg = {
+ .ddr_data_addr = css->binary[css->fw_sp[sp]].daddr
+ + bi->blob.data_source,
+ .dmem_data_addr = bi->blob.data_target,
+ .dmem_bss_addr = bi->blob.bss_target,
+ .data_size = bi->blob.data_size,
+ .bss_size = bi->blob.bss_size,
+ .sp_id = sp,
+ };
+
+ writes(&dmem_cfg, sizeof(dmem_cfg), base +
+ IMGU_REG_SP_DMEM_BASE(sp) + bi->info.sp.init_dmem_data);
+
+ writel(bi->info.sp.sp_entry, base + IMGU_REG_SP_START_ADDR(sp));
+
+ writel(readl(base + IMGU_REG_SP_CTRL(sp))
+ | IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_SP_CTRL(sp));
+
+ if (ipu3_hw_wait(css->base, IMGU_REG_SP_DMEM_BASE(sp)
+ + bi->info.sp.sw_state,
+ ~0, IMGU_ABI_SP_SWSTATE_INITIALIZED))
+ return -EIO;
+
+ return 0;
+}
+
+/* Start the IPU3 CSS ImgU (Imaging Unit) and all the SPs */
+static int ipu3_css_hw_start(struct ipu3_css *css)
+{
+ static const u32 event_mask =
+ ((1 << IMGU_ABI_EVTTYPE_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_3A_STATS_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_DIS_STATS_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_PIPELINE_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_FRAME_TAGGED) |
+ (1 << IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_METADATA_DONE) |
+ (1 << IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE))
+ << IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT;
+
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi, *bl = &css->fwp->binary_header[css->fw_bl];
+ unsigned int i;
+
+ writel(IMGU_TLB_INVALIDATE, base + IMGU_REG_TLB_INVALIDATE);
+
+ /* Start bootloader */
+
+ writel(IMGU_ABI_BL_SWSTATE_BUSY,
+ base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.sw_state);
+ writel(IMGU_NUM_SP,
+ base + IMGU_REG_ISP_DMEM_BASE + bl->info.bl.num_dma_cmds);
+
+ for (i = 0; i < IMGU_NUM_SP; i++) {
+ int j = IMGU_NUM_SP - i - 1; /* load sp1 first, then sp0 */
+ struct imgu_fw_info *sp =
+ &css->fwp->binary_header[css->fw_sp[j]];
+ struct imgu_abi_bl_dma_cmd_entry dma_cmd = {
+ .src_addr = css->binary[css->fw_sp[j]].daddr
+ + sp->blob.text_source,
+ .size = sp->blob.text_size,
+ .dst_type = IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM,
+ .dst_addr = IMGU_SP_PMEM_BASE(j),
+ };
+
+ writes(&dma_cmd, sizeof(dma_cmd),
+ base + IMGU_REG_ISP_DMEM_BASE + i * sizeof(dma_cmd) +
+ bl->info.bl.dma_cmd_list);
+ }
+
+ writel(bl->info.bl.bl_entry, base + IMGU_REG_ISP_START_ADDR);
+
+ writel(readl(base + IMGU_REG_ISP_CTRL)
+ | IMGU_CTRL_START | IMGU_CTRL_RUN, base + IMGU_REG_ISP_CTRL);
+ if (ipu3_hw_wait(css->base, IMGU_REG_ISP_DMEM_BASE
+ + bl->info.bl.sw_state, ~0,
+ IMGU_ABI_BL_SWSTATE_OK)) {
+ dev_err(css->dev, "failed to start bootloader\n");
+ return -EIO;
+ }
+
+ /* Start ISP */
+
+ memset(css->xmem_sp_group_ptrs.vaddr, 0,
+ sizeof(struct imgu_abi_sp_group));
+
+ bi = &css->fwp->binary_header[css->fw_sp[0]];
+
+ writel(css->xmem_sp_group_ptrs.daddr,
+ base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.per_frame_data);
+
+ writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
+ base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state);
+ writel(1, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
+
+ if (ipu3_css_hw_start_sp(css, 0))
+ return -EIO;
+
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.isp_started);
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.host_sp_queues_initialized);
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sleep_mode);
+ writel(0, base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.invalidate_tlb);
+ writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(0)
+ + bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
+
+ /* Enable all events for all queues */
+
+ for (i = 0; i < IPU3_CSS_PIPE_ID_NUM; i++)
+ writel(event_mask, base + IMGU_REG_SP_DMEM_BASE(0)
+ + bi->info.sp.host_sp_com
+ + IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(i));
+ writel(1, base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.host_sp_queues_initialized);
+
+ /* Start SP1 */
+
+ bi = &css->fwp->binary_header[css->fw_sp[1]];
+
+ writel(IMGU_ABI_SP_SWSTATE_TERMINATED,
+ base + IMGU_REG_SP_DMEM_BASE(1) + bi->info.sp.sw_state);
+
+ if (ipu3_css_hw_start_sp(css, 1))
+ return -EIO;
+
+ writel(IMGU_ABI_SP_COMM_COMMAND_READY, base + IMGU_REG_SP_DMEM_BASE(1)
+ + bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
+
+ return 0;
+}
+
+static void ipu3_css_hw_stop(struct ipu3_css *css)
+{
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
+
+ /* Stop fw */
+ writel(IMGU_ABI_SP_COMM_COMMAND_TERMINATE,
+ base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.host_sp_com + IMGU_ABI_SP_COMM_COMMAND);
+ if (ipu3_hw_wait(css->base, IMGU_REG_SP_CTRL(0),
+ IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
+ dev_err(css->dev, "wait sp0 idle timeout.\n");
+ if (readl(base + IMGU_REG_SP_DMEM_BASE(0) + bi->info.sp.sw_state) !=
+ IMGU_ABI_SP_SWSTATE_TERMINATED)
+ dev_err(css->dev, "sp0 is not terminated.\n");
+ if (ipu3_hw_wait(css->base, IMGU_REG_ISP_CTRL,
+ IMGU_CTRL_IDLE, IMGU_CTRL_IDLE))
+ dev_err(css->dev, "wait isp idle timeout\n");
+}
+
+static void ipu3_css_hw_cleanup(struct ipu3_css *css)
+{
+ void __iomem *const base = css->base;
+
+ /** Reset CSS **/
+
+ /* Clear the CSS busy signal */
+ readl(base + IMGU_REG_GP_BUSY);
+ writel(0, base + IMGU_REG_GP_BUSY);
+
+ /* Wait for idle signal */
+ if (ipu3_hw_wait(css->base, IMGU_REG_STATE, IMGU_STATE_IDLE_STS,
+ IMGU_STATE_IDLE_STS))
+ dev_err(css->dev, "failed to shut down hw cleanly\n");
+
+ /* Reset the css */
+ writel(readl(base + IMGU_REG_PM_CTRL) | IMGU_PM_CTRL_FORCE_RESET,
+ base + IMGU_REG_PM_CTRL);
+
+ usleep_range(200, 300);
+}
+
+static void ipu3_css_pipeline_cleanup(struct ipu3_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int i;
+
+ ipu3_css_pool_cleanup(imgu,
+ &css->pipes[pipe].pool.parameter_set_info);
+ ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.acc);
+ ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.gdc);
+ ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.obgrid);
+
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ ipu3_css_pool_cleanup(imgu,
+ &css->pipes[pipe].pool.binary_params_p[i]);
+}
+
+/*
+ * This function initializes various stages of the
+ * IPU3 CSS ISP pipeline
+ */
+static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
+{
+ static const int BYPC = 2; /* Bytes per component */
+ static const struct imgu_abi_buffer_sp buffer_sp_init = {
+ .buf_src = {.queue_id = IMGU_ABI_QUEUE_EVENT_ID},
+ .buf_type = IMGU_ABI_BUFFER_TYPE_INVALID,
+ };
+
+ struct imgu_abi_isp_iterator_config *cfg_iter;
+ struct imgu_abi_isp_ref_config *cfg_ref;
+ struct imgu_abi_isp_dvs_config *cfg_dvs;
+ struct imgu_abi_isp_tnr3_config *cfg_tnr;
+ struct imgu_abi_isp_ref_dmem_state *cfg_ref_state;
+ struct imgu_abi_isp_tnr3_dmem_state *cfg_tnr_state;
+
+ const int stage = 0;
+ unsigned int i, j;
+
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ const struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+
+ struct imgu_fw_config_memory_offsets *cofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG];
+ struct imgu_fw_state_memory_offsets *sofs = (void *)css->fwp +
+ bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE];
+
+ struct imgu_abi_isp_stage *isp_stage;
+ struct imgu_abi_sp_stage *sp_stage;
+ struct imgu_abi_sp_group *sp_group;
+
+ const unsigned int bds_width_pad =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
+ 2 * IPU3_UAPI_ISP_VEC_ELEMS);
+
+ const enum imgu_abi_memories m0 = IMGU_ABI_MEM_ISP_DMEM0;
+ enum imgu_abi_param_class cfg = IMGU_ABI_PARAM_CLASS_CONFIG;
+ void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
+
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+
+ dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
+
+ /* Configure iterator */
+
+ cfg_iter = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.iterator,
+ sizeof(*cfg_iter), vaddr);
+ if (!cfg_iter)
+ goto bad_firmware;
+
+ cfg_iter->input_info.res.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ cfg_iter->input_info.res.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ cfg_iter->input_info.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
+ cfg_iter->input_info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
+ cfg_iter->input_info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
+ cfg_iter->input_info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+ cfg_iter->input_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ cfg_iter->internal_info.res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ cfg_iter->internal_info.res.height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ cfg_iter->internal_info.padded_width = bds_width_pad;
+ cfg_iter->internal_info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ cfg_iter->internal_info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ cfg_iter->internal_info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ cfg_iter->internal_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ cfg_iter->output_info.res.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ cfg_iter->output_info.res.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ cfg_iter->output_info.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ cfg_iter->output_info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ cfg_iter->output_info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ cfg_iter->output_info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ cfg_iter->output_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ cfg_iter->vf_info.res.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ cfg_iter->vf_info.res.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ cfg_iter->vf_info.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ cfg_iter->vf_info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ cfg_iter->vf_info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
+ cfg_iter->vf_info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
+ cfg_iter->vf_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ cfg_iter->dvs_envelope.width = css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ cfg_iter->dvs_envelope.height =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
+
+ /* Configure reference (delay) frames */
+
+ cfg_ref = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.ref,
+ sizeof(*cfg_ref), vaddr);
+ if (!cfg_ref)
+ goto bad_firmware;
+
+ cfg_ref->port_b.crop = 0;
+ cfg_ref->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES / BYPC;
+ cfg_ref->port_b.width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width;
+ cfg_ref->port_b.stride =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline;
+ cfg_ref->width_a_over_b =
+ IPU3_UAPI_ISP_VEC_ELEMS / cfg_ref->port_b.elems;
+ cfg_ref->dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++) {
+ cfg_ref->ref_frame_addr_y[i] =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr;
+ cfg_ref->ref_frame_addr_c[i] =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr +
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline *
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ }
+ for (; i < IMGU_ABI_FRAMES_REF; i++) {
+ cfg_ref->ref_frame_addr_y[i] = 0;
+ cfg_ref->ref_frame_addr_c[i] = 0;
+ }
+
+ /* Configure DVS (digital video stabilization) */
+
+ cfg_dvs = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.dvs, sizeof(*cfg_dvs),
+ vaddr);
+ if (!cfg_dvs)
+ goto bad_firmware;
+
+ cfg_dvs->num_horizontal_blocks =
+ ALIGN(DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
+ IMGU_DVS_BLOCK_W), 2);
+ cfg_dvs->num_vertical_blocks =
+ DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
+ IMGU_DVS_BLOCK_H);
+
+ /* Configure TNR (temporal noise reduction) */
+
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ cfg_tnr = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.tnr3,
+ sizeof(*cfg_tnr),
+ vaddr);
+ if (!cfg_tnr)
+ goto bad_firmware;
+
+ cfg_tnr->port_b.crop = 0;
+ cfg_tnr->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES;
+ cfg_tnr->port_b.width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
+ cfg_tnr->port_b.stride =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline;
+ cfg_tnr->width_a_over_b =
+ IPU3_UAPI_ISP_VEC_ELEMS / cfg_tnr->port_b.elems;
+ cfg_tnr->frame_height =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
+ cfg_tnr->delay_frame = IPU3_CSS_AUX_FRAMES - 1;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ cfg_tnr->frame_addr[i] =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR]
+ .mem[i].daddr;
+ for (; i < IMGU_ABI_FRAMES_TNR; i++)
+ cfg_tnr->frame_addr[i] = 0;
+ }
+
+ /* Configure ref dmem state parameters */
+
+ cfg = IMGU_ABI_PARAM_CLASS_STATE;
+ vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
+
+ cfg_ref_state = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &sofs->dmem.ref,
+ sizeof(*cfg_ref_state),
+ vaddr);
+ if (!cfg_ref_state)
+ goto bad_firmware;
+
+ cfg_ref_state->ref_in_buf_idx = 0;
+ cfg_ref_state->ref_out_buf_idx = 1;
+
+ /* Configure tnr dmem state parameters */
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ cfg_tnr_state =
+ ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &sofs->dmem.tnr3,
+ sizeof(*cfg_tnr_state),
+ vaddr);
+ if (!cfg_tnr_state)
+ goto bad_firmware;
+
+ cfg_tnr_state->in_bufidx = 0;
+ cfg_tnr_state->out_bufidx = 1;
+ cfg_tnr_state->bypass_filter = 0;
+ cfg_tnr_state->total_frame_counter = 0;
+ for (i = 0; i < IMGU_ABI_BUF_SETS_TNR; i++)
+ cfg_tnr_state->buffer_frame_counter[i] = 0;
+ }
+
+ /* Configure ISP stage */
+
+ isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr;
+ memset(isp_stage, 0, sizeof(*isp_stage));
+ isp_stage->blob_info = bi->blob;
+ isp_stage->binary_info = bi->info.isp.sp;
+ strscpy(isp_stage->binary_name,
+ (char *)css->fwp + bi->blob.prog_name_offset,
+ sizeof(isp_stage->binary_name));
+ isp_stage->mem_initializers = bi->info.isp.sp.mem_initializers;
+ for (i = IMGU_ABI_PARAM_CLASS_CONFIG; i < IMGU_ABI_PARAM_CLASS_NUM; i++)
+ for (j = 0; j < IMGU_ABI_NUM_MEMORIES; j++)
+ isp_stage->mem_initializers.params[i][j].address =
+ css_pipe->binary_params_cs[i - 1][j].daddr;
+
+ /* Configure SP stage */
+
+ sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
+ memset(sp_stage, 0, sizeof(*sp_stage));
+
+ sp_stage->frames.in.buf_attr = buffer_sp_init;
+ for (i = 0; i < IMGU_ABI_BINARY_MAX_OUTPUT_PORTS; i++)
+ sp_stage->frames.out[i].buf_attr = buffer_sp_init;
+ sp_stage->frames.out_vf.buf_attr = buffer_sp_init;
+ sp_stage->frames.s3a_buf = buffer_sp_init;
+ sp_stage->frames.dvs_buf = buffer_sp_init;
+
+ sp_stage->stage_type = IMGU_ABI_STAGE_TYPE_ISP;
+ sp_stage->num = stage;
+ sp_stage->isp_online = 0;
+ sp_stage->isp_copy_vf = 0;
+ sp_stage->isp_copy_output = 0;
+
+ sp_stage->enable.vf_output = css_pipe->vf_output_en;
+
+ sp_stage->frames.effective_in_res.width =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ sp_stage->frames.effective_in_res.height =
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ sp_stage->frames.in.info.res.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ sp_stage->frames.in.info.res.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ sp_stage->frames.in.info.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
+ sp_stage->frames.in.info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
+ sp_stage->frames.in.info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
+ sp_stage->frames.in.info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+ sp_stage->frames.in.info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ sp_stage->frames.in.buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
+ sp_stage->frames.in.buf_attr.buf_type =
+ IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
+
+ sp_stage->frames.out[0].info.res.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ sp_stage->frames.out[0].info.res.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ sp_stage->frames.out[0].info.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ sp_stage->frames.out[0].info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ sp_stage->frames.out[0].info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ sp_stage->frames.out[0].info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ sp_stage->frames.out[0].info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ sp_stage->frames.out[0].planes.nv.uv.offset =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad *
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ sp_stage->frames.out[0].buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
+ sp_stage->frames.out[0].buf_attr.buf_type =
+ IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
+
+ sp_stage->frames.out[1].buf_attr.buf_src.queue_id =
+ IMGU_ABI_QUEUE_EVENT_ID;
+
+ sp_stage->frames.internal_frame_info.res.width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ sp_stage->frames.internal_frame_info.res.height =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+ sp_stage->frames.internal_frame_info.padded_width = bds_width_pad;
+
+ sp_stage->frames.internal_frame_info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ sp_stage->frames.internal_frame_info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ sp_stage->frames.internal_frame_info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ sp_stage->frames.internal_frame_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+
+ sp_stage->frames.out_vf.info.res.width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ sp_stage->frames.out_vf.info.res.height =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ sp_stage->frames.out_vf.info.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ sp_stage->frames.out_vf.info.format =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ sp_stage->frames.out_vf.info.raw_bit_depth =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
+ sp_stage->frames.out_vf.info.raw_bayer_order =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
+ sp_stage->frames.out_vf.info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
+ sp_stage->frames.out_vf.planes.yuv.u.offset =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ sp_stage->frames.out_vf.planes.yuv.v.offset =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height * 5 / 4;
+ sp_stage->frames.out_vf.buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
+ sp_stage->frames.out_vf.buf_attr.buf_type =
+ IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
+
+ sp_stage->frames.s3a_buf.buf_src.queue_id = IMGU_ABI_QUEUE_F_ID;
+ sp_stage->frames.s3a_buf.buf_type = IMGU_ABI_BUFFER_TYPE_3A_STATISTICS;
+
+ sp_stage->frames.dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
+ sp_stage->frames.dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
+
+ sp_stage->dvs_envelope.width = css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ sp_stage->dvs_envelope.height =
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
+
+ sp_stage->isp_pipe_version =
+ bi->info.isp.sp.pipeline.isp_pipe_version;
+ sp_stage->isp_deci_log_factor =
+ clamp(max(fls(css_pipe->rect[IPU3_CSS_RECT_BDS].width /
+ IMGU_MAX_BQ_GRID_WIDTH),
+ fls(css_pipe->rect[IPU3_CSS_RECT_BDS].height /
+ IMGU_MAX_BQ_GRID_HEIGHT)) - 1, 3, 5);
+ sp_stage->isp_vf_downscale_bits = 0;
+ sp_stage->if_config_index = 255;
+ sp_stage->sp_enable_xnr = 0;
+ sp_stage->num_stripes = stripes;
+ sp_stage->enable.s3a = 1;
+ sp_stage->enable.dvs_stats = 0;
+
+ sp_stage->xmem_bin_addr = css->binary[css_pipe->bindex].daddr;
+ sp_stage->xmem_map_addr = css_pipe->sp_ddr_ptrs.daddr;
+ sp_stage->isp_stage_addr =
+ css_pipe->xmem_isp_stage_ptrs[pipe][stage].daddr;
+
+ /* Configure SP group */
+
+ sp_group = css->xmem_sp_group_ptrs.vaddr;
+ memset(&sp_group->pipe[pipe], 0, sizeof(struct imgu_abi_sp_pipeline));
+
+ sp_group->pipe[pipe].num_stages = 1;
+ sp_group->pipe[pipe].pipe_id = css_pipe->pipe_id;
+ sp_group->pipe[pipe].thread_id = pipe;
+ sp_group->pipe[pipe].pipe_num = pipe;
+ sp_group->pipe[pipe].num_execs = -1;
+ sp_group->pipe[pipe].pipe_qos_config = -1;
+ sp_group->pipe[pipe].required_bds_factor = 0;
+ sp_group->pipe[pipe].dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
+ sp_group->pipe[pipe].inout_port_config =
+ IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST |
+ IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST;
+ sp_group->pipe[pipe].scaler_pp_lut = 0;
+ sp_group->pipe[pipe].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
+ sp_group->pipe[pipe].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
+ sp_group->pipe[pipe].sp_stage_addr[stage] =
+ css_pipe->xmem_sp_stage_ptrs[pipe][stage].daddr;
+ sp_group->pipe[pipe].pipe_config =
+ bi->info.isp.sp.enable.params ? (1 << pipe) : 0;
+ sp_group->pipe[pipe].pipe_config |= IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP;
+
+ /* Initialize parameter pools */
+
+ if (ipu3_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
+ sizeof(struct imgu_abi_parameter_set_info)) ||
+ ipu3_css_pool_init(imgu, &css_pipe->pool.acc,
+ sizeof(struct imgu_abi_acc_param)) ||
+ ipu3_css_pool_init(imgu, &css_pipe->pool.gdc,
+ sizeof(struct imgu_abi_gdc_warp_param) *
+ 3 * cfg_dvs->num_horizontal_blocks / 2 *
+ cfg_dvs->num_vertical_blocks) ||
+ ipu3_css_pool_init(imgu, &css_pipe->pool.obgrid,
+ ipu3_css_fw_obgrid_size(
+ &css->fwp->binary_header[css_pipe->bindex])))
+ goto out_of_memory;
+
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ if (ipu3_css_pool_init(imgu,
+ &css_pipe->pool.binary_params_p[i],
+ bi->info.isp.sp.mem_initializers.params
+ [IMGU_ABI_PARAM_CLASS_PARAM][i].size))
+ goto out_of_memory;
+
+ return 0;
+
+bad_firmware:
+ ipu3_css_pipeline_cleanup(css, pipe);
+ return -EPROTO;
+
+out_of_memory:
+ ipu3_css_pipeline_cleanup(css, pipe);
+ return -ENOMEM;
+}
+
+static u8 ipu3_css_queue_pos(struct ipu3_css *css, int queue, int thread)
+{
+ static const unsigned int sp;
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
+ bi->info.sp.host_sp_queue;
+
+ return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) :
+ readb(&q->host2sp_evtq_info.end);
+}
+
+/* Sent data to sp using given buffer queue, or if queue < 0, event queue. */
+static int ipu3_css_queue_data(struct ipu3_css *css,
+ int queue, int thread, u32 data)
+{
+ static const unsigned int sp;
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
+ bi->info.sp.host_sp_queue;
+ u8 size, start, end, end2;
+
+ if (queue >= 0) {
+ size = readb(&q->host2sp_bufq_info[thread][queue].size);
+ start = readb(&q->host2sp_bufq_info[thread][queue].start);
+ end = readb(&q->host2sp_bufq_info[thread][queue].end);
+ } else {
+ size = readb(&q->host2sp_evtq_info.size);
+ start = readb(&q->host2sp_evtq_info.start);
+ end = readb(&q->host2sp_evtq_info.end);
+ }
+
+ if (size == 0)
+ return -EIO;
+
+ end2 = (end + 1) % size;
+ if (end2 == start)
+ return -EBUSY; /* Queue full */
+
+ if (queue >= 0) {
+ writel(data, &q->host2sp_bufq[thread][queue][end]);
+ writeb(end2, &q->host2sp_bufq_info[thread][queue].end);
+ } else {
+ writel(data, &q->host2sp_evtq[end]);
+ writeb(end2, &q->host2sp_evtq_info.end);
+ }
+
+ return 0;
+}
+
+/* Receive data using given buffer queue, or if queue < 0, event queue. */
+static int ipu3_css_dequeue_data(struct ipu3_css *css, int queue, u32 *data)
+{
+ static const unsigned int sp;
+ void __iomem *const base = css->base;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[sp]];
+ struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) +
+ bi->info.sp.host_sp_queue;
+ u8 size, start, end, start2;
+
+ if (queue >= 0) {
+ size = readb(&q->sp2host_bufq_info[queue].size);
+ start = readb(&q->sp2host_bufq_info[queue].start);
+ end = readb(&q->sp2host_bufq_info[queue].end);
+ } else {
+ size = readb(&q->sp2host_evtq_info.size);
+ start = readb(&q->sp2host_evtq_info.start);
+ end = readb(&q->sp2host_evtq_info.end);
+ }
+
+ if (size == 0)
+ return -EIO;
+
+ if (end == start)
+ return -EBUSY; /* Queue empty */
+
+ start2 = (start + 1) % size;
+
+ if (queue >= 0) {
+ *data = readl(&q->sp2host_bufq[queue][start]);
+ writeb(start2, &q->sp2host_bufq_info[queue].start);
+ } else {
+ int r;
+
+ *data = readl(&q->sp2host_evtq[start]);
+ writeb(start2, &q->sp2host_evtq_info.start);
+
+ /* Acknowledge events dequeued from event queue */
+ r = ipu3_css_queue_data(css, queue, 0,
+ IMGU_ABI_EVENT_EVENT_DEQUEUED);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+/* Free binary-specific resources */
+static void ipu3_css_binary_cleanup(struct ipu3_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int i, j;
+
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (j = 0; j < IMGU_ABI_PARAM_CLASS_NUM - 1; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ ipu3_dmamap_free(imgu,
+ &css_pipe->binary_params_cs[j][i]);
+
+ j = IPU3_CSS_AUX_FRAME_REF;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ ipu3_dmamap_free(imgu,
+ &css_pipe->aux_frames[j].mem[i]);
+
+ j = IPU3_CSS_AUX_FRAME_TNR;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ ipu3_dmamap_free(imgu,
+ &css_pipe->aux_frames[j].mem[i]);
+}
+
+static int ipu3_css_binary_preallocate(struct ipu3_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int i, j;
+
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (j = IMGU_ABI_PARAM_CLASS_CONFIG;
+ j < IMGU_ABI_PARAM_CLASS_NUM; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->binary_params_cs[j - 1][i],
+ CSS_ABI_SIZE))
+ goto out_of_memory;
+
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].
+ mem[i], CSS_BDS_SIZE))
+ goto out_of_memory;
+
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].
+ mem[i], CSS_GDC_SIZE))
+ goto out_of_memory;
+
+ return 0;
+
+out_of_memory:
+ ipu3_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+}
+
+/* allocate binary-specific resources */
+static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
+{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ int i, j, size;
+ static const int BYPC = 2; /* Bytes per component */
+ unsigned int w, h;
+
+ /* Allocate parameter memory blocks for this binary */
+
+ for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
+ if (ipu3_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->binary_params_cs[j - 1][i],
+ bi->info.isp.sp.mem_initializers.params[j][i].size))
+ goto out_of_memory;
+ }
+
+ /* Allocate internal frame buffers */
+
+ /* Reference frames for DVS, FRAME_FORMAT_YUV420_16 */
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel = BYPC;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ IMGU_DVS_BLOCK_H) + 2 * IMGU_GDC_BUF_Y;
+ h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ w = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
+ 2 * IPU3_UAPI_ISP_VEC_ELEMS) + 2 * IMGU_GDC_BUF_X;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;
+ size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (ipu3_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
+ size))
+ goto out_of_memory;
+
+ /* TNR frames for temporal noise reduction, FRAME_FORMAT_YUV_LINE */
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
+ bi->info.isp.sp.block.block_width *
+ IPU3_UAPI_ISP_VEC_ELEMS);
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
+ bi->info.isp.sp.block.output_block_height);
+
+ w = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;
+ h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
+ size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */
+ for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
+ if (ipu3_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
+ size))
+ goto out_of_memory;
+
+ return 0;
+
+out_of_memory:
+ ipu3_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+}
+
+int ipu3_css_start_streaming(struct ipu3_css *css)
+{
+ u32 data;
+ int r, pipe;
+
+ if (css->streaming)
+ return -EPROTO;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_binary_setup(css, pipe);
+ if (r < 0)
+ return r;
+ }
+
+ r = ipu3_css_hw_init(css);
+ if (r < 0)
+ return r;
+
+ r = ipu3_css_hw_start(css);
+ if (r < 0)
+ goto fail;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_pipeline_init(css, pipe);
+ if (r < 0)
+ goto fail;
+ }
+
+ css->streaming = true;
+
+ ipu3_css_hw_enable_irq(css);
+
+ /* Initialize parameters to default */
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_set_parameters(css, pipe, NULL);
+ if (r < 0)
+ goto fail;
+ }
+
+ while (!(r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
+ ;
+ if (r != -EBUSY)
+ goto fail;
+
+ while (!(r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_B_ID, &data)))
+ ;
+ if (r != -EBUSY)
+ goto fail;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_START_STREAM |
+ pipe << 16);
+ if (r < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ css->streaming = false;
+ ipu3_css_hw_cleanup(css);
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ ipu3_css_pipeline_cleanup(css, pipe);
+ ipu3_css_binary_cleanup(css, pipe);
+ }
+
+ return r;
+}
+
+void ipu3_css_stop_streaming(struct ipu3_css *css)
+{
+ struct ipu3_css_buffer *b, *b0;
+ int q, r, pipe;
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_STOP_STREAM);
+ if (r < 0)
+ dev_warn(css->dev, "failed on stop stream event\n");
+ }
+
+ if (!css->streaming)
+ return;
+
+ ipu3_css_hw_stop(css);
+
+ ipu3_css_hw_cleanup(css);
+
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ ipu3_css_pipeline_cleanup(css, pipe);
+
+ spin_lock(&css_pipe->qlock);
+ for (q = 0; q < IPU3_CSS_QUEUES; q++)
+ list_for_each_entry_safe(b, b0,
+ &css_pipe->queue[q].bufs,
+ list) {
+ b->state = IPU3_CSS_BUFFER_FAILED;
+ list_del(&b->list);
+ }
+ spin_unlock(&css_pipe->qlock);
+ }
+
+ css->streaming = false;
+}
+
+bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe)
+{
+ int q;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ spin_lock(&css_pipe->qlock);
+ for (q = 0; q < IPU3_CSS_QUEUES; q++)
+ if (!list_empty(&css_pipe->queue[q].bufs))
+ break;
+ spin_unlock(&css_pipe->qlock);
+ return (q == IPU3_CSS_QUEUES);
+}
+
+bool ipu3_css_queue_empty(struct ipu3_css *css)
+{
+ unsigned int pipe;
+ bool ret = 0;
+
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
+ ret &= ipu3_css_pipe_queue_empty(css, pipe);
+
+ return ret;
+}
+
+bool ipu3_css_is_streaming(struct ipu3_css *css)
+{
+ return css->streaming;
+}
+
+static int ipu3_css_map_init(struct ipu3_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ unsigned int p, q, i;
+
+ /* Allocate and map common structures with imgu hardware */
+ for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->
+ xmem_sp_stage_ptrs[p][i],
+ sizeof(struct imgu_abi_sp_stage)))
+ return -ENOMEM;
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->
+ xmem_isp_stage_ptrs[p][i],
+ sizeof(struct imgu_abi_isp_stage)))
+ return -ENOMEM;
+ }
+
+ if (!ipu3_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
+ ALIGN(sizeof(struct imgu_abi_ddr_address_map),
+ IMGU_ABI_ISP_DDR_WORD_BYTES)))
+ return -ENOMEM;
+
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
+
+ for (i = 0; i < abi_buf_num; i++)
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->abi_buffers[q][i],
+ sizeof(struct imgu_abi_buffer)))
+ return -ENOMEM;
+ }
+
+ if (ipu3_css_binary_preallocate(css, pipe)) {
+ ipu3_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ipu3_css_pipe_cleanup(struct ipu3_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ unsigned int p, q, i, abi_buf_num;
+
+ ipu3_css_binary_cleanup(css, pipe);
+
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
+ for (i = 0; i < abi_buf_num; i++)
+ ipu3_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
+ }
+
+ for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
+ ipu3_dmamap_free(imgu,
+ &css_pipe->xmem_sp_stage_ptrs[p][i]);
+ ipu3_dmamap_free(imgu,
+ &css_pipe->xmem_isp_stage_ptrs[p][i]);
+ }
+
+ ipu3_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
+}
+
+void ipu3_css_cleanup(struct ipu3_css *css)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int pipe;
+
+ ipu3_css_stop_streaming(css);
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
+ ipu3_css_pipe_cleanup(css, pipe);
+ ipu3_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
+ ipu3_css_fw_cleanup(css);
+}
+
+int ipu3_css_init(struct device *dev, struct ipu3_css *css,
+ void __iomem *base, int length)
+{
+ struct imgu_device *imgu = dev_get_drvdata(dev);
+ int r, q, pipe;
+
+ /* Initialize main data structure */
+ css->dev = dev;
+ css->base = base;
+ css->iomem_length = length;
+
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++) {
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ css_pipe->vf_output_en = false;
+ spin_lock_init(&css_pipe->qlock);
+ css_pipe->bindex = IPU3_CSS_DEFAULT_BINARY;
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ r = ipu3_css_queue_init(&css_pipe->queue[q], NULL, 0);
+ if (r)
+ return r;
+ }
+ r = ipu3_css_map_init(css, pipe);
+ if (r) {
+ ipu3_css_cleanup(css);
+ return r;
+ }
+ }
+ if (!ipu3_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
+ sizeof(struct imgu_abi_sp_group)))
+ return -ENOMEM;
+
+ r = ipu3_css_fw_init(css);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static u32 ipu3_css_adjust(u32 res, u32 align)
+{
+ u32 val = max_t(u32, IPU3_CSS_MIN_RES, res);
+
+ return DIV_ROUND_CLOSEST(val, align) * align;
+}
+
+/* Select a binary matching the required resolutions and formats */
+static int ipu3_css_find_binary(struct ipu3_css *css,
+ unsigned int pipe,
+ struct ipu3_css_queue queue[IPU3_CSS_QUEUES],
+ struct v4l2_rect rects[IPU3_CSS_RECTS])
+{
+ const int binary_nr = css->fwp->file_header.binary_nr;
+ unsigned int binary_mode =
+ (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_CAPTURE) ?
+ IA_CSS_BINARY_MODE_PRIMARY : IA_CSS_BINARY_MODE_VIDEO;
+ const struct v4l2_pix_format_mplane *in =
+ &queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ const struct v4l2_pix_format_mplane *out =
+ &queue[IPU3_CSS_QUEUE_OUT].fmt.mpix;
+ const struct v4l2_pix_format_mplane *vf =
+ &queue[IPU3_CSS_QUEUE_VF].fmt.mpix;
+ u32 stripe_w = 0, stripe_h = 0;
+ const char *name;
+ int i, j;
+
+ if (!ipu3_css_queue_enabled(&queue[IPU3_CSS_QUEUE_IN]))
+ return -EINVAL;
+
+ /* Find out the strip size boundary */
+ for (i = 0; i < binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+
+ u32 max_width = bi->info.isp.sp.output.max_width;
+ u32 max_height = bi->info.isp.sp.output.max_height;
+
+ if (bi->info.isp.sp.iterator.num_stripes <= 1) {
+ stripe_w = stripe_w ?
+ min(stripe_w, max_width) : max_width;
+ stripe_h = stripe_h ?
+ min(stripe_h, max_height) : max_height;
+ }
+ }
+
+ for (i = 0; i < binary_nr; i++) {
+ struct imgu_fw_info *bi = &css->fwp->binary_header[i];
+ enum imgu_abi_frame_format q_fmt;
+
+ name = (void *)css->fwp + bi->blob.prog_name_offset;
+
+ /* Check that binary supports memory-to-memory processing */
+ if (bi->info.isp.sp.input.source !=
+ IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY)
+ continue;
+
+ /* Check that binary supports raw10 input */
+ if (!bi->info.isp.sp.enable.input_feeder &&
+ !bi->info.isp.sp.enable.input_raw)
+ continue;
+
+ /* Check binary mode */
+ if (bi->info.isp.sp.pipeline.mode != binary_mode)
+ continue;
+
+ /* Since input is RGGB bayer, need to process colors */
+ if (bi->info.isp.sp.enable.luma_only)
+ continue;
+
+ if (in->width < bi->info.isp.sp.input.min_width ||
+ in->width > bi->info.isp.sp.input.max_width ||
+ in->height < bi->info.isp.sp.input.min_height ||
+ in->height > bi->info.isp.sp.input.max_height)
+ continue;
+
+ if (ipu3_css_queue_enabled(&queue[IPU3_CSS_QUEUE_OUT])) {
+ if (bi->info.isp.num_output_pins <= 0)
+ continue;
+
+ q_fmt = queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ for (j = 0; j < bi->info.isp.num_output_formats; j++)
+ if (bi->info.isp.output_formats[j] == q_fmt)
+ break;
+ if (j >= bi->info.isp.num_output_formats)
+ continue;
+
+ if (out->width < bi->info.isp.sp.output.min_width ||
+ out->width > bi->info.isp.sp.output.max_width ||
+ out->height < bi->info.isp.sp.output.min_height ||
+ out->height > bi->info.isp.sp.output.max_height)
+ continue;
+
+ if (out->width > bi->info.isp.sp.internal.max_width ||
+ out->height > bi->info.isp.sp.internal.max_height)
+ continue;
+ }
+
+ if (ipu3_css_queue_enabled(&queue[IPU3_CSS_QUEUE_VF])) {
+ if (bi->info.isp.num_output_pins <= 1)
+ continue;
+
+ q_fmt = queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ for (j = 0; j < bi->info.isp.num_output_formats; j++)
+ if (bi->info.isp.output_formats[j] == q_fmt)
+ break;
+ if (j >= bi->info.isp.num_output_formats)
+ continue;
+
+ if (vf->width < bi->info.isp.sp.output.min_width ||
+ vf->width > bi->info.isp.sp.output.max_width ||
+ vf->height < bi->info.isp.sp.output.min_height ||
+ vf->height > bi->info.isp.sp.output.max_height)
+ continue;
+ }
+
+ /* All checks passed, select the binary */
+ dev_dbg(css->dev, "using binary %s id = %u\n", name,
+ bi->info.isp.sp.id);
+ return i;
+ }
+
+ /* Can not find suitable binary for these parameters */
+ return -EINVAL;
+}
+
+/*
+ * Check that there is a binary matching requirements. Parameters may be
+ * NULL indicating disabled input/output. Return negative if given
+ * parameters can not be supported or on error, zero or positive indicating
+ * found binary number. May modify the given parameters if not exact match
+ * is found.
+ */
+int ipu3_css_fmt_try(struct ipu3_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe)
+{
+ static const u32 EFF_ALIGN_W = 2;
+ static const u32 BDS_ALIGN_W = 4;
+ static const u32 OUT_ALIGN_W = 8;
+ static const u32 OUT_ALIGN_H = 4;
+ static const u32 VF_ALIGN_W = 2;
+ static const char *qnames[IPU3_CSS_QUEUES] = {
+ [IPU3_CSS_QUEUE_IN] = "in",
+ [IPU3_CSS_QUEUE_PARAMS] = "params",
+ [IPU3_CSS_QUEUE_OUT] = "out",
+ [IPU3_CSS_QUEUE_VF] = "vf",
+ [IPU3_CSS_QUEUE_STAT_3A] = "3a",
+ };
+ static const char *rnames[IPU3_CSS_RECTS] = {
+ [IPU3_CSS_RECT_EFFECTIVE] = "effective resolution",
+ [IPU3_CSS_RECT_BDS] = "bayer-domain scaled resolution",
+ [IPU3_CSS_RECT_ENVELOPE] = "DVS envelope size",
+ [IPU3_CSS_RECT_GDC] = "GDC output res",
+ };
+ struct v4l2_rect r[IPU3_CSS_RECTS] = { };
+ struct v4l2_rect *const eff = &r[IPU3_CSS_RECT_EFFECTIVE];
+ struct v4l2_rect *const bds = &r[IPU3_CSS_RECT_BDS];
+ struct v4l2_rect *const env = &r[IPU3_CSS_RECT_ENVELOPE];
+ struct v4l2_rect *const gdc = &r[IPU3_CSS_RECT_GDC];
+ struct ipu3_css_queue q[IPU3_CSS_QUEUES];
+ struct v4l2_pix_format_mplane *const in =
+ &q[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ struct v4l2_pix_format_mplane *const out =
+ &q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
+ struct v4l2_pix_format_mplane *const vf =
+ &q[IPU3_CSS_QUEUE_VF].fmt.mpix;
+ int i, s;
+
+ /* Adjust all formats, get statistics buffer sizes and formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ if (fmts[i])
+ dev_dbg(css->dev, "%s %s: (%i,%i) fmt 0x%x\n", __func__,
+ qnames[i], fmts[i]->width, fmts[i]->height,
+ fmts[i]->pixelformat);
+ else
+ dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
+ qnames[i]);
+ if (ipu3_css_queue_init(&q[i], fmts[i],
+ IPU3_CSS_QUEUE_TO_FLAGS(i))) {
+ dev_notice(css->dev, "can not initialize queue %s\n",
+ qnames[i]);
+ return -EINVAL;
+ }
+ }
+ for (i = 0; i < IPU3_CSS_RECTS; i++) {
+ if (rects[i]) {
+ dev_dbg(css->dev, "%s %s: (%i,%i)\n", __func__,
+ rnames[i], rects[i]->width, rects[i]->height);
+ r[i].width = rects[i]->width;
+ r[i].height = rects[i]->height;
+ } else {
+ dev_dbg(css->dev, "%s %s: (not set)\n", __func__,
+ rnames[i]);
+ }
+ /* For now, force known good resolutions */
+ r[i].left = 0;
+ r[i].top = 0;
+ }
+
+ /* Always require one input and vf only if out is also enabled */
+ if (!ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
+ !ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ dev_warn(css->dev, "required queues are disabled\n");
+ return -EINVAL;
+ }
+
+ if (!ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ out->width = in->width;
+ out->height = in->height;
+ }
+ if (eff->width <= 0 || eff->height <= 0) {
+ eff->width = in->width;
+ eff->height = in->height;
+ }
+ if (bds->width <= 0 || bds->height <= 0) {
+ bds->width = out->width;
+ bds->height = out->height;
+ }
+ if (gdc->width <= 0 || gdc->height <= 0) {
+ gdc->width = out->width;
+ gdc->height = out->height;
+ }
+
+ in->width = ipu3_css_adjust(in->width, 1);
+ in->height = ipu3_css_adjust(in->height, 1);
+ eff->width = ipu3_css_adjust(eff->width, EFF_ALIGN_W);
+ eff->height = ipu3_css_adjust(eff->height, 1);
+ bds->width = ipu3_css_adjust(bds->width, BDS_ALIGN_W);
+ bds->height = ipu3_css_adjust(bds->height, 1);
+ gdc->width = ipu3_css_adjust(gdc->width, OUT_ALIGN_W);
+ gdc->height = ipu3_css_adjust(gdc->height, OUT_ALIGN_H);
+ out->width = ipu3_css_adjust(out->width, OUT_ALIGN_W);
+ out->height = ipu3_css_adjust(out->height, OUT_ALIGN_H);
+ vf->width = ipu3_css_adjust(vf->width, VF_ALIGN_W);
+ vf->height = ipu3_css_adjust(vf->height, 1);
+
+ s = (bds->width - gdc->width) / 2 - FILTER_SIZE;
+ env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
+ s = (bds->height - gdc->height) / 2 - FILTER_SIZE;
+ env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
+
+ css->pipes[pipe].bindex =
+ ipu3_css_find_binary(css, pipe, q, r);
+ if (css->pipes[pipe].bindex < 0) {
+ dev_err(css->dev, "failed to find suitable binary\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(css->dev, "Binary index %d for pipe %d found.",
+ css->pipes[pipe].bindex, pipe);
+
+ /* Final adjustment and set back the queried formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ if (fmts[i]) {
+ if (ipu3_css_queue_init(&q[i], &q[i].fmt.mpix,
+ IPU3_CSS_QUEUE_TO_FLAGS(i))) {
+ dev_err(css->dev,
+ "final resolution adjustment failed\n");
+ return -EINVAL;
+ }
+ *fmts[i] = q[i].fmt.mpix;
+ }
+ }
+
+ for (i = 0; i < IPU3_CSS_RECTS; i++)
+ if (rects[i])
+ *rects[i] = r[i];
+
+ dev_dbg(css->dev,
+ "in(%u,%u) if(%u,%u) ds(%u,%u) gdc(%u,%u) out(%u,%u) vf(%u,%u)",
+ in->width, in->height, eff->width, eff->height,
+ bds->width, bds->height, gdc->width, gdc->height,
+ out->width, out->height, vf->width, vf->height);
+
+ return 0;
+}
+
+int ipu3_css_fmt_set(struct ipu3_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe)
+{
+ struct v4l2_rect rect_data[IPU3_CSS_RECTS];
+ struct v4l2_rect *all_rects[IPU3_CSS_RECTS];
+ int i, r;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (i = 0; i < IPU3_CSS_RECTS; i++) {
+ if (rects[i])
+ rect_data[i] = *rects[i];
+ else
+ memset(&rect_data[i], 0, sizeof(rect_data[i]));
+ all_rects[i] = &rect_data[i];
+ }
+ r = ipu3_css_fmt_try(css, fmts, all_rects, pipe);
+ if (r < 0)
+ return r;
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++)
+ if (ipu3_css_queue_init(&css_pipe->queue[i], fmts[i],
+ IPU3_CSS_QUEUE_TO_FLAGS(i)))
+ return -EINVAL;
+ for (i = 0; i < IPU3_CSS_RECTS; i++) {
+ css_pipe->rect[i] = rect_data[i];
+ if (rects[i])
+ *rects[i] = rect_data[i];
+ }
+
+ return 0;
+}
+
+int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt)
+{
+ switch (fmt->dataformat) {
+ case V4L2_META_FMT_IPU3_PARAMS:
+ fmt->buffersize = sizeof(struct ipu3_uapi_params);
+ break;
+ case V4L2_META_FMT_IPU3_STAT_3A:
+ fmt->buffersize = sizeof(struct ipu3_uapi_stats_3a);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Queue given buffer to CSS. ipu3_css_buf_prepare() must have been first
+ * called for the buffer. May be called from interrupt context.
+ * Returns 0 on success, -EBUSY if the buffer queue is full, or some other
+ * code on error conditions.
+ */
+int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_css_buffer *b)
+{
+ struct imgu_abi_buffer *abi_buf;
+ struct imgu_addr_t *buf_addr;
+ u32 data;
+ int r;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ if (!css->streaming)
+ return -EPROTO; /* CSS or buffer in wrong state */
+
+ if (b->queue >= IPU3_CSS_QUEUES || !ipu3_css_queues[b->queue].qid)
+ return -EINVAL;
+
+ b->queue_pos = ipu3_css_queue_pos(css, ipu3_css_queues[b->queue].qid,
+ pipe);
+
+ if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
+ return -EIO;
+ abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
+
+ /* Fill struct abi_buffer for firmware */
+ memset(abi_buf, 0, sizeof(*abi_buf));
+
+ buf_addr = (void *)abi_buf + ipu3_css_queues[b->queue].ptr_ofs;
+ *(imgu_addr_t *)buf_addr = b->daddr;
+
+ if (b->queue == IPU3_CSS_QUEUE_STAT_3A)
+ abi_buf->payload.s3a.data.dmem.s3a_tbl = b->daddr;
+
+ if (b->queue == IPU3_CSS_QUEUE_OUT)
+ abi_buf->payload.frame.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+
+ if (b->queue == IPU3_CSS_QUEUE_VF)
+ abi_buf->payload.frame.padded_width =
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
+
+ spin_lock(&css_pipe->qlock);
+ list_add_tail(&b->list, &css_pipe->queue[b->queue].bufs);
+ spin_unlock(&css_pipe->qlock);
+ b->state = IPU3_CSS_BUFFER_QUEUED;
+
+ data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
+ r = ipu3_css_queue_data(css, ipu3_css_queues[b->queue].qid,
+ pipe, data);
+ if (r < 0)
+ goto queueing_failed;
+
+ data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
+ ipu3_css_queues[b->queue].qid);
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
+ if (r < 0)
+ goto queueing_failed;
+
+ dev_dbg(css->dev, "queued buffer %p to css queue %i in pipe %d\n",
+ b, b->queue, pipe);
+
+ return 0;
+
+queueing_failed:
+ b->state = (r == -EBUSY || r == -EAGAIN) ?
+ IPU3_CSS_BUFFER_NEW : IPU3_CSS_BUFFER_FAILED;
+ list_del(&b->list);
+
+ return r;
+}
+
+/*
+ * Get next ready CSS buffer. Returns -EAGAIN in which case the function
+ * should be called again, or -EBUSY which means that there are no more
+ * buffers available. May be called from interrupt context.
+ */
+struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
+{
+ static const unsigned char evtype_to_queue[] = {
+ [IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE] = IPU3_CSS_QUEUE_IN,
+ [IMGU_ABI_EVTTYPE_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_OUT,
+ [IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_VF,
+ [IMGU_ABI_EVTTYPE_3A_STATS_DONE] = IPU3_CSS_QUEUE_STAT_3A,
+ };
+ struct ipu3_css_buffer *b = ERR_PTR(-EAGAIN);
+ u32 event, daddr;
+ int evtype, pipe, pipeid, queue, qid, r;
+ struct ipu3_css_pipe *css_pipe;
+
+ if (!css->streaming)
+ return ERR_PTR(-EPROTO);
+
+ r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
+ if (r < 0)
+ return ERR_PTR(r);
+
+ evtype = (event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
+ IMGU_ABI_EVTTYPE_EVENT_SHIFT;
+
+ switch (evtype) {
+ case IMGU_ABI_EVTTYPE_OUT_FRAME_DONE:
+ case IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE:
+ case IMGU_ABI_EVTTYPE_3A_STATS_DONE:
+ case IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE:
+ pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPE_SHIFT;
+ pipeid = (event & IMGU_ABI_EVTTYPE_PIPEID_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPEID_SHIFT;
+ queue = evtype_to_queue[evtype];
+ qid = ipu3_css_queues[queue].qid;
+
+ if (pipe >= IMGU_MAX_PIPE_NUM) {
+ dev_err(css->dev, "Invalid pipe: %i\n", pipe);
+ return ERR_PTR(-EIO);
+ }
+
+ if (qid >= IMGU_ABI_QUEUE_NUM) {
+ dev_err(css->dev, "Invalid qid: %i\n", qid);
+ return ERR_PTR(-EIO);
+ }
+ css_pipe = &css->pipes[pipe];
+ dev_dbg(css->dev,
+ "event: buffer done 0x%x queue %i pipe %i pipeid %i\n",
+ event, queue, pipe, pipeid);
+
+ r = ipu3_css_dequeue_data(css, qid, &daddr);
+ if (r < 0) {
+ dev_err(css->dev, "failed to dequeue buffer\n");
+ /* Force real error, not -EBUSY */
+ return ERR_PTR(-EIO);
+ }
+
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
+ if (r < 0) {
+ dev_err(css->dev, "failed to queue event\n");
+ return ERR_PTR(-EIO);
+ }
+
+ spin_lock(&css_pipe->qlock);
+ if (list_empty(&css_pipe->queue[queue].bufs)) {
+ spin_unlock(&css_pipe->qlock);
+ dev_err(css->dev, "event on empty queue\n");
+ return ERR_PTR(-EIO);
+ }
+ b = list_first_entry(&css_pipe->queue[queue].bufs,
+ struct ipu3_css_buffer, list);
+ if (queue != b->queue ||
+ daddr != css_pipe->abi_buffers
+ [b->queue][b->queue_pos].daddr) {
+ spin_unlock(&css_pipe->qlock);
+ dev_err(css->dev, "dequeued bad buffer 0x%x\n", daddr);
+ return ERR_PTR(-EIO);
+ }
+
+ dev_dbg(css->dev, "buffer 0x%8x done from pipe %d\n", daddr, pipe);
+ b->pipe = pipe;
+ b->state = IPU3_CSS_BUFFER_DONE;
+ list_del(&b->list);
+ spin_unlock(&css_pipe->qlock);
+ break;
+ case IMGU_ABI_EVTTYPE_PIPELINE_DONE:
+ pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPE_SHIFT;
+ if (pipe >= IMGU_MAX_PIPE_NUM) {
+ dev_err(css->dev, "Invalid pipe: %i\n", pipe);
+ return ERR_PTR(-EIO);
+ }
+
+ css_pipe = &css->pipes[pipe];
+ dev_dbg(css->dev, "event: pipeline done 0x%8x for pipe %d\n",
+ event, pipe);
+ break;
+ case IMGU_ABI_EVTTYPE_TIMER:
+ r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
+ if (r < 0)
+ return ERR_PTR(r);
+
+ if ((event & IMGU_ABI_EVTTYPE_EVENT_MASK) >>
+ IMGU_ABI_EVTTYPE_EVENT_SHIFT == IMGU_ABI_EVTTYPE_TIMER)
+ dev_dbg(css->dev, "event: timer\n");
+ else
+ dev_warn(css->dev, "half of timer event missing\n");
+ break;
+ case IMGU_ABI_EVTTYPE_FW_WARNING:
+ dev_warn(css->dev, "event: firmware warning 0x%x\n", event);
+ break;
+ case IMGU_ABI_EVTTYPE_FW_ASSERT:
+ dev_err(css->dev,
+ "event: firmware assert 0x%x module_id %i line_no %i\n",
+ event,
+ (event & IMGU_ABI_EVTTYPE_MODULEID_MASK) >>
+ IMGU_ABI_EVTTYPE_MODULEID_SHIFT,
+ swab16((event & IMGU_ABI_EVTTYPE_LINENO_MASK) >>
+ IMGU_ABI_EVTTYPE_LINENO_SHIFT));
+ break;
+ default:
+ dev_warn(css->dev, "received unknown event 0x%x\n", event);
+ }
+
+ return b;
+}
+
+/*
+ * Get a new set of parameters from pool and initialize them based on
+ * the parameters params, gdc, and obgrid. Any of these may be NULL,
+ * in which case the previously set parameters are used.
+ * If parameters haven't been set previously, initialize from scratch.
+ *
+ * Return index to css->parameter_set_info which has the newly created
+ * parameters or negative value on error.
+ */
+int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_params *set_params)
+{
+ static const unsigned int queue_id = IMGU_ABI_QUEUE_A_ID;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ const int stage = 0;
+ const struct imgu_fw_info *bi;
+ int obgrid_size;
+ unsigned int stripes, i;
+ struct ipu3_uapi_flags *use = set_params ? &set_params->use : NULL;
+
+ /* Destination buffers which are filled here */
+ struct imgu_abi_parameter_set_info *param_set;
+ struct imgu_abi_acc_param *acc = NULL;
+ struct imgu_abi_gdc_warp_param *gdc = NULL;
+ struct ipu3_uapi_obgrid_param *obgrid = NULL;
+ const struct ipu3_css_map *map;
+ void *vmem0 = NULL;
+ void *dmem0 = NULL;
+
+ enum imgu_abi_memories m;
+ int r = -EBUSY;
+
+ if (!css->streaming)
+ return -EPROTO;
+
+ dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
+
+ bi = &css->fwp->binary_header[css_pipe->bindex];
+ obgrid_size = ipu3_css_fw_obgrid_size(bi);
+ stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
+
+ /*
+ * TODO(b/118782861): If userspace queues more than 4 buffers, the
+ * parameters from previous buffers will be overwritten. Fix the driver
+ * not to allow this.
+ */
+ ipu3_css_pool_get(&css_pipe->pool.parameter_set_info);
+ param_set = ipu3_css_pool_last(&css_pipe->pool.parameter_set_info,
+ 0)->vaddr;
+
+ /* Get a new acc only if new parameters given, or none yet */
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
+ if (set_params || !map->vaddr) {
+ ipu3_css_pool_get(&css_pipe->pool.acc);
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
+ acc = map->vaddr;
+ }
+
+ /* Get new VMEM0 only if needed, or none yet */
+ m = IMGU_ABI_MEM_ISP_VMEM0;
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
+ set_params->use.tnr3_vmem_params ||
+ set_params->use.xnr3_vmem_params))) {
+ ipu3_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ vmem0 = map->vaddr;
+ }
+
+ /* Get new DMEM0 only if needed, or none yet */
+ m = IMGU_ABI_MEM_ISP_DMEM0;
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
+ set_params->use.xnr3_dmem_params))) {
+ ipu3_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ dmem0 = map->vaddr;
+ }
+
+ /* Configure acc parameter cluster */
+ if (acc) {
+ /* get acc_old */
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 1);
+ /* user acc */
+ r = ipu3_css_cfg_acc(css, pipe, use, acc, map->vaddr,
+ set_params ? &set_params->acc_param : NULL);
+ if (r < 0)
+ goto fail;
+ }
+
+ /* Configure late binding parameters */
+ if (vmem0) {
+ m = IMGU_ABI_MEM_ISP_VMEM0;
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = ipu3_css_cfg_vmem0(css, pipe, use, vmem0,
+ map->vaddr, set_params);
+ if (r < 0)
+ goto fail;
+ }
+
+ if (dmem0) {
+ m = IMGU_ABI_MEM_ISP_DMEM0;
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = ipu3_css_cfg_dmem0(css, pipe, use, dmem0,
+ map->vaddr, set_params);
+ if (r < 0)
+ goto fail;
+ }
+
+ /* Get a new gdc only if a new gdc is given, or none yet */
+ if (bi->info.isp.sp.enable.dvs_6axis) {
+ unsigned int a = IPU3_CSS_AUX_FRAME_REF;
+ unsigned int g = IPU3_CSS_RECT_GDC;
+ unsigned int e = IPU3_CSS_RECT_ENVELOPE;
+
+ map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
+ if (!map->vaddr) {
+ ipu3_css_pool_get(&css_pipe->pool.gdc);
+ map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
+ gdc = map->vaddr;
+ ipu3_css_cfg_gdc_table(map->vaddr,
+ css_pipe->aux_frames[a].bytesperline /
+ css_pipe->aux_frames[a].bytesperpixel,
+ css_pipe->aux_frames[a].height,
+ css_pipe->rect[g].width,
+ css_pipe->rect[g].height,
+ css_pipe->rect[e].width + FILTER_SIZE,
+ css_pipe->rect[e].height +
+ FILTER_SIZE);
+ }
+ }
+
+ /* Get a new obgrid only if a new obgrid is given, or none yet */
+ map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
+ if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
+ ipu3_css_pool_get(&css_pipe->pool.obgrid);
+ map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
+ obgrid = map->vaddr;
+
+ /* Configure optical black level grid (obgrid) */
+ if (set_params && set_params->use.obgrid_param)
+ for (i = 0; i < obgrid_size / sizeof(*obgrid); i++)
+ obgrid[i] = set_params->obgrid_param;
+ else
+ memset(obgrid, 0, obgrid_size);
+ }
+
+ /* Configure parameter set info, queued to `queue_id' */
+
+ memset(param_set, 0, sizeof(*param_set));
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
+ param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
+
+ map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
+ param_set->mem_map.dvs_6axis_params_y = map->daddr;
+
+ for (i = 0; i < stripes; i++) {
+ map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
+ param_set->mem_map.obgrid_tbl[i] =
+ map->daddr + (obgrid_size / stripes) * i;
+ }
+
+ for (m = 0; m < IMGU_ABI_NUM_MEMORIES; m++) {
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
+ param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
+ }
+
+ /* Then queue the new parameter buffer */
+ map = ipu3_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
+ r = ipu3_css_queue_data(css, queue_id, pipe, map->daddr);
+ if (r < 0)
+ goto fail;
+
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
+ queue_id));
+ if (r < 0)
+ goto fail_no_put;
+
+ /* Finally dequeue all old parameter buffers */
+
+ do {
+ u32 daddr;
+
+ r = ipu3_css_dequeue_data(css, queue_id, &daddr);
+ if (r == -EBUSY)
+ break;
+ if (r)
+ goto fail_no_put;
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_DEQUEUED
+ (queue_id));
+ if (r < 0) {
+ dev_err(css->dev, "failed to queue parameter event\n");
+ goto fail_no_put;
+ }
+ } while (1);
+
+ return 0;
+
+fail:
+ /*
+ * A failure, most likely the parameter queue was full.
+ * Return error but continue streaming. User can try submitting new
+ * parameters again later.
+ */
+
+ ipu3_css_pool_put(&css_pipe->pool.parameter_set_info);
+ if (acc)
+ ipu3_css_pool_put(&css_pipe->pool.acc);
+ if (gdc)
+ ipu3_css_pool_put(&css_pipe->pool.gdc);
+ if (obgrid)
+ ipu3_css_pool_put(&css_pipe->pool.obgrid);
+ if (vmem0)
+ ipu3_css_pool_put(
+ &css_pipe->pool.binary_params_p
+ [IMGU_ABI_MEM_ISP_VMEM0]);
+ if (dmem0)
+ ipu3_css_pool_put(
+ &css_pipe->pool.binary_params_p
+ [IMGU_ABI_MEM_ISP_DMEM0]);
+
+fail_no_put:
+ return r;
+}
+
+int ipu3_css_irq_ack(struct ipu3_css *css)
+{
+ static const int NUM_SWIRQS = 3;
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css->fw_sp[0]];
+ void __iomem *const base = css->base;
+ u32 irq_status[IMGU_IRQCTRL_NUM];
+ int i;
+
+ u32 imgu_status = readl(base + IMGU_REG_INT_STATUS);
+
+ writel(imgu_status, base + IMGU_REG_INT_STATUS);
+ for (i = 0; i < IMGU_IRQCTRL_NUM; i++)
+ irq_status[i] = readl(base + IMGU_REG_IRQCTRL_STATUS(i));
+
+ for (i = 0; i < NUM_SWIRQS; i++) {
+ if (irq_status[IMGU_IRQCTRL_SP0] & IMGU_IRQCTRL_IRQ_SW_PIN(i)) {
+ /* SP SW interrupt */
+ u32 cnt = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.output);
+ u32 val = readl(base + IMGU_REG_SP_DMEM_BASE(0) +
+ bi->info.sp.output + 4 + 4 * i);
+
+ dev_dbg(css->dev, "%s: swirq %i cnt %i val 0x%x\n",
+ __func__, i, cnt, val);
+ }
+ }
+
+ for (i = IMGU_IRQCTRL_NUM - 1; i >= 0; i--)
+ if (irq_status[i]) {
+ writel(irq_status[i], base + IMGU_REG_IRQCTRL_CLEAR(i));
+ /* Wait for write to complete */
+ readl(base + IMGU_REG_IRQCTRL_ENABLE(i));
+ }
+
+ dev_dbg(css->dev, "%s: imgu 0x%x main 0x%x sp0 0x%x sp1 0x%x\n",
+ __func__, imgu_status, irq_status[IMGU_IRQCTRL_MAIN],
+ irq_status[IMGU_IRQCTRL_SP0], irq_status[IMGU_IRQCTRL_SP1]);
+
+ if (!imgu_status && !irq_status[IMGU_IRQCTRL_MAIN])
+ return -ENOMSG;
+
+ return 0;
+}
diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h
new file mode 100644
index 000000000000..e88d60f1a0c3
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-css.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_CSS_H
+#define __IPU3_CSS_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+
+#include "ipu3-abi.h"
+#include "ipu3-css-pool.h"
+
+/* 2 stages for split isp pipeline, 1 for scaling */
+#define IMGU_NUM_SP 2
+#define IMGU_MAX_PIPELINE_NUM 20
+#define IMGU_MAX_PIPE_NUM 2
+
+/* For DVS etc., format FRAME_FMT_YUV420_16 */
+#define IPU3_CSS_AUX_FRAME_REF 0
+/* For temporal noise reduction DVS etc., format FRAME_FMT_YUV_LINE */
+#define IPU3_CSS_AUX_FRAME_TNR 1
+#define IPU3_CSS_AUX_FRAME_TYPES 2 /* REF and TNR */
+#define IPU3_CSS_AUX_FRAMES 2 /* 2 for REF and 2 for TNR */
+
+#define IPU3_CSS_QUEUE_IN 0
+#define IPU3_CSS_QUEUE_PARAMS 1
+#define IPU3_CSS_QUEUE_OUT 2
+#define IPU3_CSS_QUEUE_VF 3
+#define IPU3_CSS_QUEUE_STAT_3A 4
+#define IPU3_CSS_QUEUES 5
+
+#define IPU3_CSS_RECT_EFFECTIVE 0 /* Effective resolution */
+#define IPU3_CSS_RECT_BDS 1 /* Resolution after BDS */
+#define IPU3_CSS_RECT_ENVELOPE 2 /* DVS envelope size */
+#define IPU3_CSS_RECT_GDC 3 /* gdc output res */
+#define IPU3_CSS_RECTS 4 /* number of rects */
+
+#define IA_CSS_BINARY_MODE_PRIMARY 2
+#define IA_CSS_BINARY_MODE_VIDEO 3
+#define IPU3_CSS_DEFAULT_BINARY 3 /* default binary index */
+
+/*
+ * The pipe id type, distinguishes the kind of pipes that
+ * can be run in parallel.
+ */
+enum ipu3_css_pipe_id {
+ IPU3_CSS_PIPE_ID_PREVIEW,
+ IPU3_CSS_PIPE_ID_COPY,
+ IPU3_CSS_PIPE_ID_VIDEO,
+ IPU3_CSS_PIPE_ID_CAPTURE,
+ IPU3_CSS_PIPE_ID_YUVPP,
+ IPU3_CSS_PIPE_ID_ACC,
+ IPU3_CSS_PIPE_ID_NUM
+};
+
+struct ipu3_css_resolution {
+ u32 w;
+ u32 h;
+};
+
+enum ipu3_css_buffer_state {
+ IPU3_CSS_BUFFER_NEW, /* Not yet queued */
+ IPU3_CSS_BUFFER_QUEUED, /* Queued, waiting to be filled */
+ IPU3_CSS_BUFFER_DONE, /* Finished processing, removed from queue */
+ IPU3_CSS_BUFFER_FAILED, /* Was not processed, removed from queue */
+};
+
+struct ipu3_css_buffer {
+ /* Private fields: user doesn't touch */
+ dma_addr_t daddr;
+ unsigned int queue;
+ enum ipu3_css_buffer_state state;
+ struct list_head list;
+ u8 queue_pos;
+ unsigned int pipe;
+};
+
+struct ipu3_css_format {
+ u32 pixelformat;
+ enum v4l2_colorspace colorspace;
+ enum imgu_abi_frame_format frame_format;
+ enum imgu_abi_bayer_order bayer_order;
+ enum imgu_abi_osys_format osys_format;
+ enum imgu_abi_osys_tiling osys_tiling;
+ u32 bytesperpixel_num; /* Bytes per pixel in first plane * 50 */
+ u8 bit_depth; /* Effective bits per pixel */
+ u8 chroma_decim; /* Chroma plane decimation, 0=no chroma plane */
+ u8 width_align; /* Alignment requirement for width_pad */
+ u8 flags;
+};
+
+struct ipu3_css_queue {
+ union {
+ struct v4l2_pix_format_mplane mpix;
+ struct v4l2_meta_format meta;
+
+ } fmt;
+ const struct ipu3_css_format *css_fmt;
+ unsigned int width_pad;
+ struct list_head bufs;
+};
+
+struct ipu3_css_pipe {
+ enum ipu3_css_pipe_id pipe_id;
+ unsigned int bindex;
+
+ struct ipu3_css_queue queue[IPU3_CSS_QUEUES];
+ struct v4l2_rect rect[IPU3_CSS_RECTS];
+
+ bool vf_output_en;
+ /* Protect access to queue[IPU3_CSS_QUEUES] */
+ spinlock_t qlock;
+
+ /* Data structures shared with IMGU and driver, always allocated */
+ struct ipu3_css_map sp_ddr_ptrs;
+ struct ipu3_css_map xmem_sp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
+ [IMGU_ABI_MAX_STAGES];
+ struct ipu3_css_map xmem_isp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
+ [IMGU_ABI_MAX_STAGES];
+
+ /*
+ * Data structures shared with IMGU and driver, binary specific.
+ * PARAM_CLASS_CONFIG and PARAM_CLASS_STATE parameters.
+ */
+ struct ipu3_css_map binary_params_cs[IMGU_ABI_PARAM_CLASS_NUM - 1]
+ [IMGU_ABI_NUM_MEMORIES];
+
+ struct {
+ struct ipu3_css_map mem[IPU3_CSS_AUX_FRAMES];
+ unsigned int width;
+ unsigned int height;
+ unsigned int bytesperline;
+ unsigned int bytesperpixel;
+ } aux_frames[IPU3_CSS_AUX_FRAME_TYPES];
+
+ struct {
+ struct ipu3_css_pool parameter_set_info;
+ struct ipu3_css_pool acc;
+ struct ipu3_css_pool gdc;
+ struct ipu3_css_pool obgrid;
+ /* PARAM_CLASS_PARAM parameters for binding while streaming */
+ struct ipu3_css_pool binary_params_p[IMGU_ABI_NUM_MEMORIES];
+ } pool;
+
+ struct ipu3_css_map abi_buffers[IPU3_CSS_QUEUES]
+ [IMGU_ABI_HOST2SP_BUFQ_SIZE];
+};
+
+/* IPU3 Camera Sub System structure */
+struct ipu3_css {
+ struct device *dev;
+ void __iomem *base;
+ const struct firmware *fw;
+ struct imgu_fw_header *fwp;
+ int iomem_length;
+ int fw_bl, fw_sp[IMGU_NUM_SP]; /* Indices of bl and SP binaries */
+ struct ipu3_css_map *binary; /* fw binaries mapped to device */
+ bool streaming; /* true when streaming is enabled */
+
+ struct ipu3_css_pipe pipes[IMGU_MAX_PIPE_NUM];
+ struct ipu3_css_map xmem_sp_group_ptrs;
+
+ /* enabled pipe(s) */
+ DECLARE_BITMAP(enabled_pipes, IMGU_MAX_PIPE_NUM);
+};
+
+/******************* css v4l *******************/
+int ipu3_css_init(struct device *dev, struct ipu3_css *css,
+ void __iomem *base, int length);
+void ipu3_css_cleanup(struct ipu3_css *css);
+int ipu3_css_fmt_try(struct ipu3_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe);
+int ipu3_css_fmt_set(struct ipu3_css *css,
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe);
+int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt);
+int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_css_buffer *b);
+struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css);
+int ipu3_css_start_streaming(struct ipu3_css *css);
+void ipu3_css_stop_streaming(struct ipu3_css *css);
+bool ipu3_css_queue_empty(struct ipu3_css *css);
+bool ipu3_css_is_streaming(struct ipu3_css *css);
+bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe);
+
+/******************* css hw *******************/
+int ipu3_css_set_powerup(struct device *dev, void __iomem *base);
+void ipu3_css_set_powerdown(struct device *dev, void __iomem *base);
+int ipu3_css_irq_ack(struct ipu3_css *css);
+
+/******************* set parameters ************/
+int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_params *set_params);
+
+/******************* auxiliary helpers *******************/
+static inline enum ipu3_css_buffer_state
+ipu3_css_buf_state(struct ipu3_css_buffer *b)
+{
+ return b->state;
+}
+
+/* Initialize given buffer. May be called several times. */
+static inline void ipu3_css_buf_init(struct ipu3_css_buffer *b,
+ unsigned int queue, dma_addr_t daddr)
+{
+ b->state = IPU3_CSS_BUFFER_NEW;
+ b->queue = queue;
+ b->daddr = daddr;
+}
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.c b/drivers/staging/media/ipu3/ipu3-dmamap.c
new file mode 100644
index 000000000000..93a393d4e15e
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright 2018 Google LLC.
+ *
+ * Author: Tomasz Figa <tfiga@chromium.org>
+ * Author: Yong Zhi <yong.zhi@intel.com>
+ */
+
+#include <linux/vmalloc.h>
+
+#include "ipu3.h"
+#include "ipu3-css-pool.h"
+#include "ipu3-mmu.h"
+
+/*
+ * Free a buffer allocated by ipu3_dmamap_alloc_buffer()
+ */
+static void ipu3_dmamap_free_buffer(struct page **pages,
+ size_t size)
+{
+ int count = size >> PAGE_SHIFT;
+
+ while (count--)
+ __free_page(pages[count]);
+ kvfree(pages);
+}
+
+/*
+ * Based on the implementation of __iommu_dma_alloc_pages()
+ * defined in drivers/iommu/dma-iommu.c
+ */
+static struct page **ipu3_dmamap_alloc_buffer(size_t size,
+ unsigned long order_mask,
+ gfp_t gfp)
+{
+ struct page **pages;
+ unsigned int i = 0, count = size >> PAGE_SHIFT;
+ const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
+
+ /* Allocate mem for array of page ptrs */
+ pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
+
+ if (!pages)
+ return NULL;
+
+ order_mask &= (2U << MAX_ORDER) - 1;
+ if (!order_mask)
+ return NULL;
+
+ gfp |= __GFP_HIGHMEM | __GFP_ZERO;
+
+ while (count) {
+ struct page *page = NULL;
+ unsigned int order_size;
+
+ for (order_mask &= (2U << __fls(count)) - 1;
+ order_mask; order_mask &= ~order_size) {
+ unsigned int order = __fls(order_mask);
+
+ order_size = 1U << order;
+ page = alloc_pages((order_mask - order_size) ?
+ gfp | high_order_gfp : gfp, order);
+ if (!page)
+ continue;
+ if (!order)
+ break;
+ if (!PageCompound(page)) {
+ split_page(page, order);
+ break;
+ }
+
+ __free_pages(page, order);
+ }
+ if (!page) {
+ ipu3_dmamap_free_buffer(pages, i << PAGE_SHIFT);
+ return NULL;
+ }
+ count -= order_size;
+ while (order_size--)
+ pages[i++] = page++;
+ }
+
+ return pages;
+}
+
+/**
+ * ipu3_dmamap_alloc - allocate and map a buffer into KVA
+ * @imgu: struct device pointer
+ * @map: struct to store mapping variables
+ * @len: size required
+ *
+ * Returns:
+ * KVA on success
+ * %NULL on failure
+ */
+void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
+ size_t len)
+{
+ unsigned long shift = iova_shift(&imgu->iova_domain);
+ unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
+ struct device *dev = &imgu->pci_dev->dev;
+ size_t size = PAGE_ALIGN(len);
+ struct page **pages;
+ dma_addr_t iovaddr;
+ struct iova *iova;
+ int i, rval;
+
+ dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
+
+ iova = alloc_iova(&imgu->iova_domain, size >> shift,
+ imgu->mmu->aperture_end >> shift, 0);
+ if (!iova)
+ return NULL;
+
+ pages = ipu3_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
+ GFP_KERNEL);
+ if (!pages)
+ goto out_free_iova;
+
+ /* Call IOMMU driver to setup pgt */
+ iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
+ for (i = 0; i < size / PAGE_SIZE; ++i) {
+ rval = ipu3_mmu_map(imgu->mmu, iovaddr,
+ page_to_phys(pages[i]), PAGE_SIZE);
+ if (rval)
+ goto out_unmap;
+
+ iovaddr += PAGE_SIZE;
+ }
+
+ /* Now grab a virtual region */
+ map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END);
+ if (!map->vma)
+ goto out_unmap;
+
+ map->vma->pages = pages;
+ /* And map it in KVA */
+ if (map_vm_area(map->vma, PAGE_KERNEL, pages))
+ goto out_vunmap;
+
+ map->size = size;
+ map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
+ map->vaddr = map->vma->addr;
+
+ dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
+ size, &map->daddr, map->vma->addr);
+
+ return map->vma->addr;
+
+out_vunmap:
+ vunmap(map->vma->addr);
+
+out_unmap:
+ ipu3_dmamap_free_buffer(pages, size);
+ ipu3_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ i * PAGE_SIZE);
+ map->vma = NULL;
+
+out_free_iova:
+ __free_iova(&imgu->iova_domain, iova);
+
+ return NULL;
+}
+
+void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map)
+{
+ struct iova *iova;
+
+ iova = find_iova(&imgu->iova_domain,
+ iova_pfn(&imgu->iova_domain, map->daddr));
+ if (WARN_ON(!iova))
+ return;
+
+ ipu3_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ iova_size(iova) << iova_shift(&imgu->iova_domain));
+
+ __free_iova(&imgu->iova_domain, iova);
+}
+
+/*
+ * Counterpart of ipu3_dmamap_alloc
+ */
+void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map)
+{
+ struct vm_struct *area = map->vma;
+
+ dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
+ __func__, map->size, &map->daddr, map->vaddr);
+
+ if (!map->vaddr)
+ return;
+
+ ipu3_dmamap_unmap(imgu, map);
+
+ if (WARN_ON(!area) || WARN_ON(!area->pages))
+ return;
+
+ ipu3_dmamap_free_buffer(area->pages, map->size);
+ vunmap(map->vaddr);
+ map->vaddr = NULL;
+}
+
+int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
+ int nents, struct ipu3_css_map *map)
+{
+ unsigned long shift = iova_shift(&imgu->iova_domain);
+ struct scatterlist *sg;
+ struct iova *iova;
+ size_t size = 0;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i) {
+ if (sg->offset)
+ return -EINVAL;
+
+ if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
+ return -EINVAL;
+
+ size += sg->length;
+ }
+
+ size = iova_align(&imgu->iova_domain, size);
+ dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
+ nents, size >> shift);
+
+ iova = alloc_iova(&imgu->iova_domain, size >> shift,
+ imgu->mmu->aperture_end >> shift, 0);
+ if (!iova)
+ return -ENOMEM;
+
+ dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
+ iova->pfn_lo, iova->pfn_hi);
+
+ if (ipu3_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
+ sglist, nents) < size)
+ goto out_fail;
+
+ memset(map, 0, sizeof(*map));
+ map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
+ map->size = size;
+
+ return 0;
+
+out_fail:
+ __free_iova(&imgu->iova_domain, iova);
+
+ return -EFAULT;
+}
+
+int ipu3_dmamap_init(struct imgu_device *imgu)
+{
+ unsigned long order, base_pfn;
+ int ret = iova_cache_get();
+
+ if (ret)
+ return ret;
+
+ order = __ffs(imgu->mmu->pgsize_bitmap);
+ base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
+ init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
+
+ return 0;
+}
+
+void ipu3_dmamap_exit(struct imgu_device *imgu)
+{
+ put_iova_domain(&imgu->iova_domain);
+ iova_cache_put();
+}
diff --git a/drivers/staging/media/ipu3/ipu3-dmamap.h b/drivers/staging/media/ipu3/ipu3-dmamap.h
new file mode 100644
index 000000000000..b9d224a33273
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-dmamap.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+/* Copyright 2018 Google LLC. */
+
+#ifndef __IPU3_DMAMAP_H
+#define __IPU3_DMAMAP_H
+
+struct imgu_device;
+struct scatterlist;
+
+void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
+ size_t len);
+void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map);
+
+int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
+ int nents, struct ipu3_css_map *map);
+void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map);
+
+int ipu3_dmamap_init(struct imgu_device *imgu);
+void ipu3_dmamap_exit(struct imgu_device *imgu);
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.c b/drivers/staging/media/ipu3/ipu3-mmu.c
new file mode 100644
index 000000000000..b9f209541f78
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-mmu.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Intel Corporation.
+ * Copyright 2018 Google LLC.
+ *
+ * Author: Tuukka Toivonen <tuukka.toivonen@intel.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ * Author: Samu Onkalo <samu.onkalo@intel.com>
+ * Author: Tomasz Figa <tfiga@chromium.org>
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <asm/set_memory.h>
+
+#include "ipu3-mmu.h"
+
+#define IPU3_PAGE_SHIFT 12
+#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
+
+#define IPU3_PT_BITS 10
+#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
+#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
+#define IPU3_PT_ORDER (IPU3_PT_SIZE >> PAGE_SHIFT)
+
+#define IPU3_ADDR2PTE(addr) ((addr) >> IPU3_PAGE_SHIFT)
+#define IPU3_PTE2ADDR(pte) ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT)
+
+#define IPU3_L2PT_SHIFT IPU3_PT_BITS
+#define IPU3_L2PT_MASK ((1UL << IPU3_L2PT_SHIFT) - 1)
+
+#define IPU3_L1PT_SHIFT IPU3_PT_BITS
+#define IPU3_L1PT_MASK ((1UL << IPU3_L1PT_SHIFT) - 1)
+
+#define IPU3_MMU_ADDRESS_BITS (IPU3_PAGE_SHIFT + \
+ IPU3_L2PT_SHIFT + \
+ IPU3_L1PT_SHIFT)
+
+#define IMGU_REG_BASE 0x4000
+#define REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300)
+#define TLB_INVALIDATE 1
+#define REG_L1_PHYS (IMGU_REG_BASE + 0x304) /* 27-bit pfn */
+#define REG_GP_HALT (IMGU_REG_BASE + 0x5dc)
+#define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0)
+
+struct ipu3_mmu {
+ struct device *dev;
+ void __iomem *base;
+ /* protect access to l2pts, l1pt */
+ spinlock_t lock;
+
+ void *dummy_page;
+ u32 dummy_page_pteval;
+
+ u32 *dummy_l2pt;
+ u32 dummy_l2pt_pteval;
+
+ u32 **l2pts;
+ u32 *l1pt;
+
+ struct ipu3_mmu_info geometry;
+};
+
+static inline struct ipu3_mmu *to_ipu3_mmu(struct ipu3_mmu_info *info)
+{
+ return container_of(info, struct ipu3_mmu, geometry);
+}
+
+/**
+ * ipu3_mmu_tlb_invalidate - invalidate translation look-aside buffer
+ * @mmu: MMU to perform the invalidate operation on
+ *
+ * This function invalidates the whole TLB. Must be called when the hardware
+ * is powered on.
+ */
+static void ipu3_mmu_tlb_invalidate(struct ipu3_mmu *mmu)
+{
+ writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE);
+}
+
+static void call_if_ipu3_is_powered(struct ipu3_mmu *mmu,
+ void (*func)(struct ipu3_mmu *mmu))
+{
+ if (!pm_runtime_get_if_in_use(mmu->dev))
+ return;
+
+ func(mmu);
+ pm_runtime_put(mmu->dev);
+}
+
+/**
+ * ipu3_mmu_set_halt - set CIO gate halt bit
+ * @mmu: MMU to set the CIO gate bit in.
+ * @halt: Desired state of the gate bit.
+ *
+ * This function sets the CIO gate bit that controls whether external memory
+ * accesses are allowed. Must be called when the hardware is powered on.
+ */
+static void ipu3_mmu_set_halt(struct ipu3_mmu *mmu, bool halt)
+{
+ int ret;
+ u32 val;
+
+ writel(halt, mmu->base + REG_GP_HALT);
+ ret = readl_poll_timeout(mmu->base + REG_GP_HALTED,
+ val, (val & 1) == halt, 1000, 100000);
+
+ if (ret)
+ dev_err(mmu->dev, "failed to %s CIO gate halt\n",
+ halt ? "set" : "clear");
+}
+
+/**
+ * ipu3_mmu_alloc_page_table - allocate a pre-filled page table
+ * @pteval: Value to initialize for page table entries with.
+ *
+ * Return: Pointer to allocated page table or NULL on failure.
+ */
+static u32 *ipu3_mmu_alloc_page_table(u32 pteval)
+{
+ u32 *pt;
+ int pte;
+
+ pt = (u32 *)__get_free_page(GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ for (pte = 0; pte < IPU3_PT_PTES; pte++)
+ pt[pte] = pteval;
+
+ set_memory_uc((unsigned long int)pt, IPU3_PT_ORDER);
+
+ return pt;
+}
+
+/**
+ * ipu3_mmu_free_page_table - free page table
+ * @pt: Page table to free.
+ */
+static void ipu3_mmu_free_page_table(u32 *pt)
+{
+ set_memory_wb((unsigned long int)pt, IPU3_PT_ORDER);
+ free_page((unsigned long)pt);
+}
+
+/**
+ * address_to_pte_idx - split IOVA into L1 and L2 page table indices
+ * @iova: IOVA to split.
+ * @l1pt_idx: Output for the L1 page table index.
+ * @l2pt_idx: Output for the L2 page index.
+ */
+static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
+ u32 *l2pt_idx)
+{
+ iova >>= IPU3_PAGE_SHIFT;
+
+ if (l2pt_idx)
+ *l2pt_idx = iova & IPU3_L2PT_MASK;
+
+ iova >>= IPU3_L2PT_SHIFT;
+
+ if (l1pt_idx)
+ *l1pt_idx = iova & IPU3_L1PT_MASK;
+}
+
+static u32 *ipu3_mmu_get_l2pt(struct ipu3_mmu *mmu, u32 l1pt_idx)
+{
+ unsigned long flags;
+ u32 *l2pt, *new_l2pt;
+ u32 pteval;
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ l2pt = mmu->l2pts[l1pt_idx];
+ if (l2pt)
+ goto done;
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+
+ new_l2pt = ipu3_mmu_alloc_page_table(mmu->dummy_page_pteval);
+ if (!new_l2pt)
+ return NULL;
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n",
+ new_l2pt, l1pt_idx);
+
+ l2pt = mmu->l2pts[l1pt_idx];
+ if (l2pt) {
+ ipu3_mmu_free_page_table(new_l2pt);
+ goto done;
+ }
+
+ l2pt = new_l2pt;
+ mmu->l2pts[l1pt_idx] = new_l2pt;
+
+ pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt));
+ mmu->l1pt[l1pt_idx] = pteval;
+
+done:
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return l2pt;
+}
+
+static int __ipu3_mmu_map(struct ipu3_mmu *mmu, unsigned long iova,
+ phys_addr_t paddr)
+{
+ u32 l1pt_idx, l2pt_idx;
+ unsigned long flags;
+ u32 *l2pt;
+
+ if (!mmu)
+ return -ENODEV;
+
+ address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
+
+ l2pt = ipu3_mmu_get_l2pt(mmu, l1pt_idx);
+ if (!l2pt)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return -EBUSY;
+ }
+
+ l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr);
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+
+ return 0;
+}
+
+/**
+ * The following four functions are implemented based on iommu.c
+ * drivers/iommu/iommu.c/iommu_pgsize().
+ */
+static size_t ipu3_mmu_pgsize(unsigned long pgsize_bitmap,
+ unsigned long addr_merge, size_t size)
+{
+ unsigned int pgsize_idx;
+ size_t pgsize;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by address */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= pgsize_bitmap;
+
+ /* make sure we're still sane */
+ WARN_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ return pgsize;
+}
+
+/* drivers/iommu/iommu.c/iommu_map() */
+int ipu3_mmu_map(struct ipu3_mmu_info *info, unsigned long iova,
+ phys_addr_t paddr, size_t size)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ unsigned int min_pagesz;
+ int ret = 0;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ iova, &paddr, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n",
+ iova, &paddr, size);
+
+ while (size) {
+ size_t pgsize = ipu3_mmu_pgsize(mmu->geometry.pgsize_bitmap,
+ iova | paddr, size);
+
+ dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
+ iova, &paddr, pgsize);
+
+ ret = __ipu3_mmu_map(mmu, iova, paddr);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ call_if_ipu3_is_powered(mmu, ipu3_mmu_tlb_invalidate);
+
+ return ret;
+}
+
+/* drivers/iommu/iommu.c/default_iommu_map_sg() */
+size_t ipu3_mmu_map_sg(struct ipu3_mmu_info *info, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ struct scatterlist *s;
+ size_t s_length, mapped = 0;
+ unsigned int i, min_pagesz;
+ int ret;
+
+ min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
+
+ for_each_sg(sg, s, nents, i) {
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+
+ s_length = s->length;
+
+ if (!IS_ALIGNED(s->offset, min_pagesz))
+ goto out_err;
+
+ /* must be min_pagesz aligned to be mapped singlely */
+ if (i == nents - 1 && !IS_ALIGNED(s->length, min_pagesz))
+ s_length = PAGE_ALIGN(s->length);
+
+ ret = ipu3_mmu_map(info, iova + mapped, phys, s_length);
+ if (ret)
+ goto out_err;
+
+ mapped += s_length;
+ }
+
+ call_if_ipu3_is_powered(mmu, ipu3_mmu_tlb_invalidate);
+
+ return mapped;
+
+out_err:
+ /* undo mappings already done */
+ ipu3_mmu_unmap(info, iova, mapped);
+
+ return 0;
+}
+
+static size_t __ipu3_mmu_unmap(struct ipu3_mmu *mmu,
+ unsigned long iova, size_t size)
+{
+ u32 l1pt_idx, l2pt_idx;
+ unsigned long flags;
+ size_t unmap = size;
+ u32 *l2pt;
+
+ if (!mmu)
+ return 0;
+
+ address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
+
+ spin_lock_irqsave(&mmu->lock, flags);
+
+ l2pt = mmu->l2pts[l1pt_idx];
+ if (!l2pt) {
+ spin_unlock_irqrestore(&mmu->lock, flags);
+ return 0;
+ }
+
+ if (l2pt[l2pt_idx] == mmu->dummy_page_pteval)
+ unmap = 0;
+
+ l2pt[l2pt_idx] = mmu->dummy_page_pteval;
+
+ spin_unlock_irqrestore(&mmu->lock, flags);
+
+ return unmap;
+}
+
+/* drivers/iommu/iommu.c/iommu_unmap() */
+size_t ipu3_mmu_unmap(struct ipu3_mmu_info *info, unsigned long iova,
+ size_t size)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
+
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
+
+ /*
+ * The virtual address, as well as the size of the mapping, must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ iova, size, min_pagesz);
+ return -EINVAL;
+ }
+
+ dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t pgsize = ipu3_mmu_pgsize(mmu->geometry.pgsize_bitmap,
+ iova, size - unmapped);
+
+ unmapped_page = __ipu3_mmu_unmap(mmu, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n",
+ iova, unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ call_if_ipu3_is_powered(mmu, ipu3_mmu_tlb_invalidate);
+
+ return unmapped;
+}
+
+/**
+ * ipu3_mmu_init() - initialize IPU3 MMU block
+ * @base: IOMEM base of hardware registers.
+ *
+ * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error.
+ */
+struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base)
+{
+ struct ipu3_mmu *mmu;
+ u32 pteval;
+
+ mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
+ if (!mmu)
+ return ERR_PTR(-ENOMEM);
+
+ mmu->dev = parent;
+ mmu->base = base;
+ spin_lock_init(&mmu->lock);
+
+ /* Disallow external memory access when having no valid page tables. */
+ ipu3_mmu_set_halt(mmu, true);
+
+ /*
+ * The MMU does not have a "valid" bit, so we have to use a dummy
+ * page for invalid entries.
+ */
+ mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!mmu->dummy_page)
+ goto fail_group;
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page));
+ mmu->dummy_page_pteval = pteval;
+
+ /*
+ * Allocate a dummy L2 page table with all entries pointing to
+ * the dummy page.
+ */
+ mmu->dummy_l2pt = ipu3_mmu_alloc_page_table(pteval);
+ if (!mmu->dummy_l2pt)
+ goto fail_dummy_page;
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt));
+ mmu->dummy_l2pt_pteval = pteval;
+
+ /*
+ * Allocate the array of L2PT CPU pointers, initialized to zero,
+ * which means the dummy L2PT allocated above.
+ */
+ mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts));
+ if (!mmu->l2pts)
+ goto fail_l2pt;
+
+ /* Allocate the L1 page table. */
+ mmu->l1pt = ipu3_mmu_alloc_page_table(mmu->dummy_l2pt_pteval);
+ if (!mmu->l1pt)
+ goto fail_l2pts;
+
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
+ writel(pteval, mmu->base + REG_L1_PHYS);
+ ipu3_mmu_tlb_invalidate(mmu);
+ ipu3_mmu_set_halt(mmu, false);
+
+ mmu->geometry.aperture_start = 0;
+ mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
+ mmu->geometry.pgsize_bitmap = IPU3_PAGE_SIZE;
+
+ return &mmu->geometry;
+
+fail_l2pts:
+ vfree(mmu->l2pts);
+fail_l2pt:
+ ipu3_mmu_free_page_table(mmu->dummy_l2pt);
+fail_dummy_page:
+ free_page((unsigned long)mmu->dummy_page);
+fail_group:
+ kfree(mmu);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * ipu3_mmu_exit() - clean up IPU3 MMU block
+ * @mmu: IPU3 MMU private data
+ */
+void ipu3_mmu_exit(struct ipu3_mmu_info *info)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+
+ /* We are going to free our page tables, no more memory access. */
+ ipu3_mmu_set_halt(mmu, true);
+ ipu3_mmu_tlb_invalidate(mmu);
+
+ ipu3_mmu_free_page_table(mmu->l1pt);
+ vfree(mmu->l2pts);
+ ipu3_mmu_free_page_table(mmu->dummy_l2pt);
+ free_page((unsigned long)mmu->dummy_page);
+ kfree(mmu);
+}
+
+void ipu3_mmu_suspend(struct ipu3_mmu_info *info)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+
+ ipu3_mmu_set_halt(mmu, true);
+}
+
+void ipu3_mmu_resume(struct ipu3_mmu_info *info)
+{
+ struct ipu3_mmu *mmu = to_ipu3_mmu(info);
+ u32 pteval;
+
+ ipu3_mmu_set_halt(mmu, true);
+
+ pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt));
+ writel(pteval, mmu->base + REG_L1_PHYS);
+
+ ipu3_mmu_tlb_invalidate(mmu);
+ ipu3_mmu_set_halt(mmu, false);
+}
diff --git a/drivers/staging/media/ipu3/ipu3-mmu.h b/drivers/staging/media/ipu3/ipu3-mmu.h
new file mode 100644
index 000000000000..8fe63b4c6e1c
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-mmu.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+/* Copyright 2018 Google LLC. */
+
+#ifndef __IPU3_MMU_H
+#define __IPU3_MMU_H
+
+/**
+ * struct ipu3_mmu_info - Describes mmu geometry
+ *
+ * @aperture_start: First address that can be mapped
+ * @aperture_end: Last address that can be mapped
+ * @pgsize_bitmap: Bitmap of page sizes in use
+ */
+struct ipu3_mmu_info {
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+ unsigned long pgsize_bitmap;
+};
+
+struct device;
+struct scatterlist;
+
+struct ipu3_mmu_info *ipu3_mmu_init(struct device *parent, void __iomem *base);
+void ipu3_mmu_exit(struct ipu3_mmu_info *info);
+void ipu3_mmu_suspend(struct ipu3_mmu_info *info);
+void ipu3_mmu_resume(struct ipu3_mmu_info *info);
+
+int ipu3_mmu_map(struct ipu3_mmu_info *info, unsigned long iova,
+ phys_addr_t paddr, size_t size);
+size_t ipu3_mmu_unmap(struct ipu3_mmu_info *info, unsigned long iova,
+ size_t size);
+size_t ipu3_mmu_map_sg(struct ipu3_mmu_info *info, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents);
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-tables.c b/drivers/staging/media/ipu3/ipu3-tables.c
new file mode 100644
index 000000000000..334517987eba
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-tables.c
@@ -0,0 +1,9609 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include "ipu3-tables.h"
+
+#define X 0 /* Don't care value */
+
+const struct ipu3_css_bds_config
+ ipu3_css_bds_configs[IMGU_BDS_CONFIG_LEN] = { {
+ /* Scale factor 32 / (32 + 0) = 1 */
+ .hor_phase_arr = {
+ .even = { { 0, 0, 64, 6, 0, 0, 0 } },
+ .odd = { { 0, 0, 64, 6, 0, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 0, 64, 6, 0, 0, 0 } },
+ .odd = { { 0, 0, 64, 6, 0, 0, 0 } } },
+ .ptrn_arr = { { 0x3 } },
+ .sample_patrn_length = 2,
+ .hor_ds_en = 0,
+ .ver_ds_en = 0
+}, {
+ /* Scale factor 32 / (32 + 1) = 0.969697 */
+ .hor_phase_arr = {
+ .even = { { 0, 3, 122, 7, 3, 0, 0 },
+ { 0, 0, 122, 7, 7, -1, 0 },
+ { 0, -3, 122, 7, 10, -1, 0 },
+ { 0, -5, 121, 7, 14, -2, 0 },
+ { 0, -7, 120, 7, 18, -3, 0 },
+ { 0, -9, 118, 7, 23, -4, 0 },
+ { 0, -11, 116, 7, 27, -4, 0 },
+ { 0, -12, 113, 7, 32, -5, 0 },
+ { 0, -13, 110, 7, 37, -6, 0 },
+ { 0, -14, 107, 7, 42, -7, 0 },
+ { 0, -14, 103, 7, 47, -8, 0 },
+ { 0, -15, 100, 7, 52, -9, 0 },
+ { 0, -15, 96, 7, 57, -10, 0 },
+ { 0, -15, 92, 7, 62, -11, 0 },
+ { 0, -14, 86, 7, 68, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 68, 7, 86, -14, 0 },
+ { 0, -11, 62, 7, 92, -15, 0 },
+ { 0, -10, 57, 7, 96, -15, 0 },
+ { 0, -9, 52, 7, 100, -15, 0 },
+ { 0, -8, 47, 7, 103, -14, 0 },
+ { 0, -7, 42, 7, 107, -14, 0 },
+ { 0, -6, 37, 7, 110, -13, 0 },
+ { 0, -5, 32, 7, 113, -12, 0 },
+ { 0, -4, 27, 7, 116, -11, 0 },
+ { 0, -4, 23, 7, 118, -9, 0 },
+ { 0, -3, 18, 7, 120, -7, 0 },
+ { 0, -2, 14, 7, 121, -5, 0 },
+ { 0, -1, 10, 7, 122, -3, 0 },
+ { 0, -1, 7, 7, 122, 0, 0 } },
+ .odd = { { 0, 2, 122, 7, 5, -1, 0 },
+ { 0, -1, 122, 7, 8, -1, 0 },
+ { 0, -4, 122, 7, 12, -2, 0 },
+ { 0, -6, 120, 7, 16, -2, 0 },
+ { 0, -8, 118, 7, 21, -3, 0 },
+ { 0, -10, 117, 7, 25, -4, 0 },
+ { 0, -11, 114, 7, 30, -5, 0 },
+ { 0, -13, 112, 7, 35, -6, 0 },
+ { 0, -14, 109, 7, 40, -7, 0 },
+ { 0, -14, 105, 7, 45, -8, 0 },
+ { 0, -15, 102, 7, 50, -9, 0 },
+ { 0, -15, 98, 7, 55, -10, 0 },
+ { 0, -15, 94, 7, 60, -11, 0 },
+ { 0, -15, 90, 7, 65, -12, 0 },
+ { 0, -14, 85, 7, 70, -13, 0 },
+ { 0, -14, 80, 7, 75, -13, 0 },
+ { 0, -13, 75, 7, 80, -14, 0 },
+ { 0, -13, 70, 7, 85, -14, 0 },
+ { 0, -12, 65, 7, 90, -15, 0 },
+ { 0, -11, 60, 7, 94, -15, 0 },
+ { 0, -10, 55, 7, 98, -15, 0 },
+ { 0, -9, 50, 7, 102, -15, 0 },
+ { 0, -8, 45, 7, 105, -14, 0 },
+ { 0, -7, 40, 7, 109, -14, 0 },
+ { 0, -6, 35, 7, 112, -13, 0 },
+ { 0, -5, 30, 7, 114, -11, 0 },
+ { 0, -4, 25, 7, 117, -10, 0 },
+ { 0, -3, 21, 7, 118, -8, 0 },
+ { 0, -2, 16, 7, 120, -6, 0 },
+ { 0, -2, 12, 7, 122, -4, 0 },
+ { 0, -1, 8, 7, 122, -1, 0 },
+ { 0, -1, 5, 7, 122, 2, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 3, 122, 7, 3, 0, 0 },
+ { 0, 0, 122, 7, 7, -1, 0 },
+ { 0, -3, 122, 7, 10, -1, 0 },
+ { 0, -5, 121, 7, 14, -2, 0 },
+ { 0, -7, 120, 7, 18, -3, 0 },
+ { 0, -9, 118, 7, 23, -4, 0 },
+ { 0, -11, 116, 7, 27, -4, 0 },
+ { 0, -12, 113, 7, 32, -5, 0 },
+ { 0, -13, 110, 7, 37, -6, 0 },
+ { 0, -14, 107, 7, 42, -7, 0 },
+ { 0, -14, 103, 7, 47, -8, 0 },
+ { 0, -15, 100, 7, 52, -9, 0 },
+ { 0, -15, 96, 7, 57, -10, 0 },
+ { 0, -15, 92, 7, 62, -11, 0 },
+ { 0, -14, 86, 7, 68, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 68, 7, 86, -14, 0 },
+ { 0, -11, 62, 7, 92, -15, 0 },
+ { 0, -10, 57, 7, 96, -15, 0 },
+ { 0, -9, 52, 7, 100, -15, 0 },
+ { 0, -8, 47, 7, 103, -14, 0 },
+ { 0, -7, 42, 7, 107, -14, 0 },
+ { 0, -6, 37, 7, 110, -13, 0 },
+ { 0, -5, 32, 7, 113, -12, 0 },
+ { 0, -4, 27, 7, 116, -11, 0 },
+ { 0, -4, 23, 7, 118, -9, 0 },
+ { 0, -3, 18, 7, 120, -7, 0 },
+ { 0, -2, 14, 7, 121, -5, 0 },
+ { 0, -1, 10, 7, 122, -3, 0 },
+ { 0, -1, 7, 7, 122, 0, 0 } },
+ .odd = { { 0, 2, 122, 7, 5, -1, 0 },
+ { 0, -1, 122, 7, 8, -1, 0 },
+ { 0, -4, 122, 7, 12, -2, 0 },
+ { 0, -6, 120, 7, 16, -2, 0 },
+ { 0, -8, 118, 7, 21, -3, 0 },
+ { 0, -10, 117, 7, 25, -4, 0 },
+ { 0, -11, 114, 7, 30, -5, 0 },
+ { 0, -13, 112, 7, 35, -6, 0 },
+ { 0, -14, 109, 7, 40, -7, 0 },
+ { 0, -14, 105, 7, 45, -8, 0 },
+ { 0, -15, 102, 7, 50, -9, 0 },
+ { 0, -15, 98, 7, 55, -10, 0 },
+ { 0, -15, 94, 7, 60, -11, 0 },
+ { 0, -15, 90, 7, 65, -12, 0 },
+ { 0, -14, 85, 7, 70, -13, 0 },
+ { 0, -14, 80, 7, 75, -13, 0 },
+ { 0, -13, 75, 7, 80, -14, 0 },
+ { 0, -13, 70, 7, 85, -14, 0 },
+ { 0, -12, 65, 7, 90, -15, 0 },
+ { 0, -11, 60, 7, 94, -15, 0 },
+ { 0, -10, 55, 7, 98, -15, 0 },
+ { 0, -9, 50, 7, 102, -15, 0 },
+ { 0, -8, 45, 7, 105, -14, 0 },
+ { 0, -7, 40, 7, 109, -14, 0 },
+ { 0, -6, 35, 7, 112, -13, 0 },
+ { 0, -5, 30, 7, 114, -11, 0 },
+ { 0, -4, 25, 7, 117, -10, 0 },
+ { 0, -3, 21, 7, 118, -8, 0 },
+ { 0, -2, 16, 7, 120, -6, 0 },
+ { 0, -2, 12, 7, 122, -4, 0 },
+ { 0, -1, 8, 7, 122, -1, 0 },
+ { 0, -1, 5, 7, 122, 2, 0 } } },
+ .ptrn_arr = { { 0xffffffff, 0xffffffff } },
+ .sample_patrn_length = 66,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 2) = 0.941176 */
+ .hor_phase_arr = {
+ .even = { { -1, 6, 118, 7, 6, -1, 0 },
+ { 0, 0, 117, 7, 13, -2, 0 },
+ { 0, -5, 116, 7, 21, -4, 0 },
+ { 0, -9, 113, 7, 30, -6, 0 },
+ { 0, -12, 109, 7, 39, -8, 0 },
+ { 0, -13, 102, 7, 49, -10, 0 },
+ { 0, -14, 94, 7, 59, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 59, 7, 94, -14, 0 },
+ { 0, -10, 49, 7, 102, -13, 0 },
+ { 0, -8, 39, 7, 109, -12, 0 },
+ { 0, -6, 30, 7, 113, -9, 0 },
+ { 0, -4, 21, 7, 116, -5, 0 },
+ { 0, -2, 13, 7, 117, 0, 0 } },
+ .odd = { { -1, 3, 118, 7, 10, -2, 0 },
+ { 0, -3, 117, 7, 17, -3, 0 },
+ { 0, -7, 114, 7, 26, -5, 0 },
+ { 0, -10, 110, 7, 35, -7, 0 },
+ { 0, -13, 106, 7, 44, -9, 0 },
+ { 0, -14, 99, 7, 54, -11, 0 },
+ { 0, -14, 90, 7, 64, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 64, 7, 90, -14, 0 },
+ { 0, -11, 54, 7, 99, -14, 0 },
+ { 0, -9, 44, 7, 106, -13, 0 },
+ { 0, -7, 35, 7, 110, -10, 0 },
+ { 0, -5, 26, 7, 114, -7, 0 },
+ { 0, -3, 17, 7, 117, -3, 0 },
+ { 0, -2, 10, 7, 118, 3, -1 } } },
+ .ver_phase_arr = {
+ .even = { { -1, 6, 118, 7, 6, -1, 0 },
+ { 0, 0, 117, 7, 13, -2, 0 },
+ { 0, -5, 116, 7, 21, -4, 0 },
+ { 0, -9, 113, 7, 30, -6, 0 },
+ { 0, -12, 109, 7, 39, -8, 0 },
+ { 0, -13, 102, 7, 49, -10, 0 },
+ { 0, -14, 94, 7, 59, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 59, 7, 94, -14, 0 },
+ { 0, -10, 49, 7, 102, -13, 0 },
+ { 0, -8, 39, 7, 109, -12, 0 },
+ { 0, -6, 30, 7, 113, -9, 0 },
+ { 0, -4, 21, 7, 116, -5, 0 },
+ { 0, -2, 13, 7, 117, 0, 0 } },
+ .odd = { { -1, 3, 118, 7, 10, -2, 0 },
+ { 0, -3, 117, 7, 17, -3, 0 },
+ { 0, -7, 114, 7, 26, -5, 0 },
+ { 0, -10, 110, 7, 35, -7, 0 },
+ { 0, -13, 106, 7, 44, -9, 0 },
+ { 0, -14, 99, 7, 54, -11, 0 },
+ { 0, -14, 90, 7, 64, -12, 0 },
+ { 0, -14, 82, 7, 73, -13, 0 },
+ { 0, -13, 73, 7, 82, -14, 0 },
+ { 0, -12, 64, 7, 90, -14, 0 },
+ { 0, -11, 54, 7, 99, -14, 0 },
+ { 0, -9, 44, 7, 106, -13, 0 },
+ { 0, -7, 35, 7, 110, -10, 0 },
+ { 0, -5, 26, 7, 114, -7, 0 },
+ { 0, -3, 17, 7, 117, -3, 0 },
+ { 0, -2, 10, 7, 118, 3, -1 } } },
+ .ptrn_arr = { { 0xffffffff } },
+ .sample_patrn_length = 34,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 3) = 0.914286 */
+ .hor_phase_arr = {
+ .even = { { -2, 9, 114, 7, 9, -2, 0 },
+ { -1, 0, 114, 7, 20, -5, 0 },
+ { 0, -7, 110, 7, 32, -7, 0 },
+ { 0, -11, 103, 7, 46, -10, 0 },
+ { 0, -13, 93, 7, 60, -12, 0 },
+ { 0, -14, 82, 7, 74, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 55, 7, 97, -13, 0 },
+ { 0, -9, 41, 7, 106, -10, 0 },
+ { 0, -6, 28, 7, 111, -5, 0 },
+ { 0, -4, 16, 7, 114, 3, -1 },
+ { -2, 6, 115, 7, 12, -3, 0 },
+ { 0, -2, 111, 7, 24, -5, 0 },
+ { 0, -8, 107, 7, 37, -8, 0 },
+ { 0, -12, 100, 7, 51, -11, 0 },
+ { 0, -14, 90, 7, 65, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 65, 7, 90, -14, 0 },
+ { 0, -11, 51, 7, 100, -12, 0 },
+ { 0, -8, 37, 7, 107, -8, 0 },
+ { 0, -5, 24, 7, 111, -2, 0 },
+ { 0, -3, 12, 7, 115, 6, -2 },
+ { -1, 3, 114, 7, 16, -4, 0 },
+ { 0, -5, 111, 7, 28, -6, 0 },
+ { 0, -10, 106, 7, 41, -9, 0 },
+ { 0, -13, 97, 7, 55, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 74, 7, 82, -14, 0 },
+ { 0, -12, 60, 7, 93, -13, 0 },
+ { 0, -10, 46, 7, 103, -11, 0 },
+ { 0, -7, 32, 7, 110, -7, 0 },
+ { 0, -5, 20, 7, 114, 0, -1 } },
+ .odd = { { -1, 4, 114, 7, 14, -3, 0 },
+ { 0, -4, 112, 7, 26, -6, 0 },
+ { 0, -9, 107, 7, 39, -9, 0 },
+ { 0, -13, 99, 7, 53, -11, 0 },
+ { 0, -14, 88, 7, 67, -13, 0 },
+ { 0, -14, 76, 7, 80, -14, 0 },
+ { 0, -13, 62, 7, 93, -14, 0 },
+ { 0, -10, 48, 7, 102, -12, 0 },
+ { 0, -8, 35, 7, 109, -8, 0 },
+ { 0, -5, 22, 7, 112, -1, 0 },
+ { 0, -3, 11, 7, 115, 7, -2 },
+ { -1, 1, 114, 7, 18, -4, 0 },
+ { 0, -6, 111, 7, 30, -7, 0 },
+ { 0, -10, 103, 7, 44, -9, 0 },
+ { 0, -13, 95, 7, 58, -12, 0 },
+ { 0, -14, 85, 7, 71, -14, 0 },
+ { 0, -14, 71, 7, 85, -14, 0 },
+ { 0, -12, 58, 7, 95, -13, 0 },
+ { 0, -9, 44, 7, 103, -10, 0 },
+ { 0, -7, 30, 7, 111, -6, 0 },
+ { 0, -4, 18, 7, 114, 1, -1 },
+ { -2, 7, 115, 7, 11, -3, 0 },
+ { 0, -1, 112, 7, 22, -5, 0 },
+ { 0, -8, 109, 7, 35, -8, 0 },
+ { 0, -12, 102, 7, 48, -10, 0 },
+ { 0, -14, 93, 7, 62, -13, 0 },
+ { 0, -14, 80, 7, 76, -14, 0 },
+ { 0, -13, 67, 7, 88, -14, 0 },
+ { 0, -11, 53, 7, 99, -13, 0 },
+ { 0, -9, 39, 7, 107, -9, 0 },
+ { 0, -6, 26, 7, 112, -4, 0 },
+ { 0, -3, 14, 7, 114, 4, -1 } } },
+ .ver_phase_arr = {
+ .even = { { -2, 9, 114, 7, 9, -2, 0 },
+ { -1, 0, 114, 7, 20, -5, 0 },
+ { 0, -7, 110, 7, 32, -7, 0 },
+ { 0, -11, 103, 7, 46, -10, 0 },
+ { 0, -13, 93, 7, 60, -12, 0 },
+ { 0, -14, 82, 7, 74, -14, 0 },
+ { 0, -13, 69, 7, 86, -14, 0 },
+ { 0, -11, 55, 7, 97, -13, 0 },
+ { 0, -9, 41, 7, 106, -10, 0 },
+ { 0, -6, 28, 7, 111, -5, 0 },
+ { 0, -4, 16, 7, 114, 3, -1 },
+ { -2, 6, 115, 7, 12, -3, 0 },
+ { 0, -2, 111, 7, 24, -5, 0 },
+ { 0, -8, 107, 7, 37, -8, 0 },
+ { 0, -12, 100, 7, 51, -11, 0 },
+ { 0, -14, 90, 7, 65, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 65, 7, 90, -14, 0 },
+ { 0, -11, 51, 7, 100, -12, 0 },
+ { 0, -8, 37, 7, 107, -8, 0 },
+ { 0, -5, 24, 7, 111, -2, 0 },
+ { 0, -3, 12, 7, 115, 6, -2 },
+ { -1, 3, 114, 7, 16, -4, 0 },
+ { 0, -5, 111, 7, 28, -6, 0 },
+ { 0, -10, 106, 7, 41, -9, 0 },
+ { 0, -13, 97, 7, 55, -11, 0 },
+ { 0, -14, 86, 7, 69, -13, 0 },
+ { 0, -14, 74, 7, 82, -14, 0 },
+ { 0, -12, 60, 7, 93, -13, 0 },
+ { 0, -10, 46, 7, 103, -11, 0 },
+ { 0, -7, 32, 7, 110, -7, 0 },
+ { 0, -5, 20, 7, 114, 0, -1 } },
+ .odd = { { -1, 4, 114, 7, 14, -3, 0 },
+ { 0, -4, 112, 7, 26, -6, 0 },
+ { 0, -9, 107, 7, 39, -9, 0 },
+ { 0, -13, 99, 7, 53, -11, 0 },
+ { 0, -14, 88, 7, 67, -13, 0 },
+ { 0, -14, 76, 7, 80, -14, 0 },
+ { 0, -13, 62, 7, 93, -14, 0 },
+ { 0, -10, 48, 7, 102, -12, 0 },
+ { 0, -8, 35, 7, 109, -8, 0 },
+ { 0, -5, 22, 7, 112, -1, 0 },
+ { 0, -3, 11, 7, 115, 7, -2 },
+ { -1, 1, 114, 7, 18, -4, 0 },
+ { 0, -6, 111, 7, 30, -7, 0 },
+ { 0, -10, 103, 7, 44, -9, 0 },
+ { 0, -13, 95, 7, 58, -12, 0 },
+ { 0, -14, 85, 7, 71, -14, 0 },
+ { 0, -14, 71, 7, 85, -14, 0 },
+ { 0, -12, 58, 7, 95, -13, 0 },
+ { 0, -9, 44, 7, 103, -10, 0 },
+ { 0, -7, 30, 7, 111, -6, 0 },
+ { 0, -4, 18, 7, 114, 1, -1 },
+ { -2, 7, 115, 7, 11, -3, 0 },
+ { 0, -1, 112, 7, 22, -5, 0 },
+ { 0, -8, 109, 7, 35, -8, 0 },
+ { 0, -12, 102, 7, 48, -10, 0 },
+ { 0, -14, 93, 7, 62, -13, 0 },
+ { 0, -14, 80, 7, 76, -14, 0 },
+ { 0, -13, 67, 7, 88, -14, 0 },
+ { 0, -11, 53, 7, 99, -13, 0 },
+ { 0, -9, 39, 7, 107, -9, 0 },
+ { 0, -6, 26, 7, 112, -4, 0 },
+ { 0, -3, 14, 7, 114, 4, -1 } } },
+ .ptrn_arr = { { 0xff3fffff, 0xffff9fff, 0xf } },
+ .sample_patrn_length = 70,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 4) = 0.888889 */
+ .hor_phase_arr = {
+ .even = { { -3, 12, 110, 7, 12, -3, 0 },
+ { -1, 0, 110, 7, 26, -7, 0 },
+ { 0, -8, 103, 7, 43, -10, 0 },
+ { 0, -12, 92, 7, 61, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 61, 7, 92, -12, 0 },
+ { 0, -10, 43, 7, 103, -8, 0 },
+ { 0, -7, 26, 7, 110, 0, -1 } },
+ .odd = { { -2, 5, 111, 7, 19, -5, 0 },
+ { 0, -4, 106, 7, 34, -8, 0 },
+ { 0, -11, 98, 7, 52, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 52, 7, 98, -11, 0 },
+ { 0, -8, 34, 7, 106, -4, 0 },
+ { 0, -5, 19, 7, 111, 5, -2 } } },
+ .ver_phase_arr = {
+ .even = { { -3, 12, 110, 7, 12, -3, 0 },
+ { -1, 0, 110, 7, 26, -7, 0 },
+ { 0, -8, 103, 7, 43, -10, 0 },
+ { 0, -12, 92, 7, 61, -13, 0 },
+ { 0, -14, 78, 7, 78, -14, 0 },
+ { 0, -13, 61, 7, 92, -12, 0 },
+ { 0, -10, 43, 7, 103, -8, 0 },
+ { 0, -7, 26, 7, 110, 0, -1 } },
+ .odd = { { -2, 5, 111, 7, 19, -5, 0 },
+ { 0, -4, 106, 7, 34, -8, 0 },
+ { 0, -11, 98, 7, 52, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 52, 7, 98, -11, 0 },
+ { 0, -8, 34, 7, 106, -4, 0 },
+ { 0, -5, 19, 7, 111, 5, -2 } } },
+ .ptrn_arr = { { 0xffff } },
+ .sample_patrn_length = 18,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 5) = 0.864865 */
+ .hor_phase_arr = {
+ .even = { { -5, 14, 110, 7, 14, -5, 0 },
+ { -1, 0, 106, 7, 32, -9, 0 },
+ { 0, -9, 96, 7, 53, -12, 0 },
+ { 0, -13, 81, 7, 73, -13, 0 },
+ { 0, -13, 61, 7, 91, -11, 0 },
+ { 0, -10, 40, 7, 103, -4, -1 },
+ { 0, -6, 21, 7, 108, 8, -3 },
+ { -3, 5, 108, 7, 25, -7, 0 },
+ { 0, -6, 101, 7, 44, -11, 0 },
+ { 0, -12, 88, 7, 65, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 49, 7, 98, -8, 0 },
+ { 0, -8, 28, 7, 108, 2, -2 },
+ { -4, 11, 108, 7, 18, -5, 0 },
+ { -1, -2, 104, 7, 36, -9, 0 },
+ { 0, -10, 93, 7, 57, -12, 0 },
+ { 0, -13, 77, 7, 77, -13, 0 },
+ { 0, -12, 57, 7, 93, -10, 0 },
+ { 0, -9, 36, 7, 104, -2, -1 },
+ { 0, -5, 18, 7, 108, 11, -4 },
+ { -2, 2, 108, 7, 28, -8, 0 },
+ { 0, -8, 98, 7, 49, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 65, 7, 88, -12, 0 },
+ { 0, -11, 44, 7, 101, -6, 0 },
+ { 0, -7, 25, 7, 108, 5, -3 },
+ { -3, 8, 108, 7, 21, -6, 0 },
+ { -1, -4, 103, 7, 40, -10, 0 },
+ { 0, -11, 91, 7, 61, -13, 0 },
+ { 0, -13, 73, 7, 81, -13, 0 },
+ { 0, -12, 53, 7, 96, -9, 0 },
+ { 0, -9, 32, 7, 106, 0, -1 } },
+ .odd = { { -3, 7, 108, 7, 23, -7, 0 },
+ { 0, -5, 101, 7, 42, -10, 0 },
+ { 0, -12, 90, 7, 63, -13, 0 },
+ { 0, -13, 71, 7, 83, -13, 0 },
+ { 0, -12, 51, 7, 97, -8, 0 },
+ { 0, -8, 30, 7, 107, 1, -2 },
+ { -4, 13, 108, 7, 16, -5, 0 },
+ { -1, -1, 105, 7, 34, -9, 0 },
+ { 0, -10, 95, 7, 55, -12, 0 },
+ { 0, -13, 79, 7, 75, -13, 0 },
+ { 0, -13, 59, 7, 93, -11, 0 },
+ { 0, -10, 38, 7, 104, -3, -1 },
+ { 0, -6, 19, 7, 110, 9, -4 },
+ { -2, 4, 106, 7, 27, -7, 0 },
+ { 0, -7, 99, 7, 47, -11, 0 },
+ { 0, -12, 86, 7, 67, -13, 0 },
+ { 0, -13, 67, 7, 86, -12, 0 },
+ { 0, -11, 47, 7, 99, -7, 0 },
+ { 0, -7, 27, 7, 106, 4, -2 },
+ { -4, 9, 110, 7, 19, -6, 0 },
+ { -1, -3, 104, 7, 38, -10, 0 },
+ { 0, -11, 93, 7, 59, -13, 0 },
+ { 0, -13, 75, 7, 79, -13, 0 },
+ { 0, -12, 55, 7, 95, -10, 0 },
+ { 0, -9, 34, 7, 105, -1, -1 },
+ { 0, -5, 16, 7, 108, 13, -4 },
+ { -2, 1, 107, 7, 30, -8, 0 },
+ { 0, -8, 97, 7, 51, -12, 0 },
+ { 0, -13, 83, 7, 71, -13, 0 },
+ { 0, -13, 63, 7, 90, -12, 0 },
+ { 0, -10, 42, 7, 101, -5, 0 },
+ { 0, -7, 23, 7, 108, 7, -3 } } },
+ .ver_phase_arr = {
+ .even = { { -5, 14, 110, 7, 14, -5, 0 },
+ { -1, 0, 106, 7, 32, -9, 0 },
+ { 0, -9, 96, 7, 53, -12, 0 },
+ { 0, -13, 81, 7, 73, -13, 0 },
+ { 0, -13, 61, 7, 91, -11, 0 },
+ { 0, -10, 40, 7, 103, -4, -1 },
+ { 0, -6, 21, 7, 108, 8, -3 },
+ { -3, 5, 108, 7, 25, -7, 0 },
+ { 0, -6, 101, 7, 44, -11, 0 },
+ { 0, -12, 88, 7, 65, -13, 0 },
+ { 0, -13, 69, 7, 85, -13, 0 },
+ { 0, -11, 49, 7, 98, -8, 0 },
+ { 0, -8, 28, 7, 108, 2, -2 },
+ { -4, 11, 108, 7, 18, -5, 0 },
+ { -1, -2, 104, 7, 36, -9, 0 },
+ { 0, -10, 93, 7, 57, -12, 0 },
+ { 0, -13, 77, 7, 77, -13, 0 },
+ { 0, -12, 57, 7, 93, -10, 0 },
+ { 0, -9, 36, 7, 104, -2, -1 },
+ { 0, -5, 18, 7, 108, 11, -4 },
+ { -2, 2, 108, 7, 28, -8, 0 },
+ { 0, -8, 98, 7, 49, -11, 0 },
+ { 0, -13, 85, 7, 69, -13, 0 },
+ { 0, -13, 65, 7, 88, -12, 0 },
+ { 0, -11, 44, 7, 101, -6, 0 },
+ { 0, -7, 25, 7, 108, 5, -3 },
+ { -3, 8, 108, 7, 21, -6, 0 },
+ { -1, -4, 103, 7, 40, -10, 0 },
+ { 0, -11, 91, 7, 61, -13, 0 },
+ { 0, -13, 73, 7, 81, -13, 0 },
+ { 0, -12, 53, 7, 96, -9, 0 },
+ { 0, -9, 32, 7, 106, 0, -1 } },
+ .odd = { { -3, 7, 108, 7, 23, -7, 0 },
+ { 0, -5, 101, 7, 42, -10, 0 },
+ { 0, -12, 90, 7, 63, -13, 0 },
+ { 0, -13, 71, 7, 83, -13, 0 },
+ { 0, -12, 51, 7, 97, -8, 0 },
+ { 0, -8, 30, 7, 107, 1, -2 },
+ { -4, 13, 108, 7, 16, -5, 0 },
+ { -1, -1, 105, 7, 34, -9, 0 },
+ { 0, -10, 95, 7, 55, -12, 0 },
+ { 0, -13, 79, 7, 75, -13, 0 },
+ { 0, -13, 59, 7, 93, -11, 0 },
+ { 0, -10, 38, 7, 104, -3, -1 },
+ { 0, -6, 19, 7, 110, 9, -4 },
+ { -2, 4, 106, 7, 27, -7, 0 },
+ { 0, -7, 99, 7, 47, -11, 0 },
+ { 0, -12, 86, 7, 67, -13, 0 },
+ { 0, -13, 67, 7, 86, -12, 0 },
+ { 0, -11, 47, 7, 99, -7, 0 },
+ { 0, -7, 27, 7, 106, 4, -2 },
+ { -4, 9, 110, 7, 19, -6, 0 },
+ { -1, -3, 104, 7, 38, -10, 0 },
+ { 0, -11, 93, 7, 59, -13, 0 },
+ { 0, -13, 75, 7, 79, -13, 0 },
+ { 0, -12, 55, 7, 95, -10, 0 },
+ { 0, -9, 34, 7, 105, -1, -1 },
+ { 0, -5, 16, 7, 108, 13, -4 },
+ { -2, 1, 107, 7, 30, -8, 0 },
+ { 0, -8, 97, 7, 51, -12, 0 },
+ { 0, -13, 83, 7, 71, -13, 0 },
+ { 0, -13, 63, 7, 90, -12, 0 },
+ { 0, -10, 42, 7, 101, -5, 0 },
+ { 0, -7, 23, 7, 108, 7, -3 } } },
+ .ptrn_arr = { { 0xcfff9fff, 0xf3ffe7ff, 0xff } },
+ .sample_patrn_length = 74,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 6) = 0.842105 */
+ .hor_phase_arr = {
+ .even = { { -6, 17, 106, 7, 17, -6, 0 },
+ { -2, 0, 102, 7, 38, -10, 0 },
+ { 0, -10, 89, 7, 62, -13, 0 },
+ { 0, -13, 69, 7, 83, -11, 0 },
+ { 0, -11, 46, 7, 98, -4, -1 },
+ { 0, -7, 23, 7, 106, 10, -4 },
+ { -3, 5, 104, 7, 31, -9, 0 },
+ { 0, -7, 93, 7, 54, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 54, 7, 93, -7, 0 },
+ { 0, -9, 31, 7, 104, 5, -3 },
+ { -4, 10, 106, 7, 23, -7, 0 },
+ { -1, -4, 98, 7, 46, -11, 0 },
+ { 0, -11, 83, 7, 69, -13, 0 },
+ { 0, -13, 62, 7, 89, -10, 0 },
+ { 0, -10, 38, 7, 102, 0, -2 } },
+ .odd = { { -4, 8, 105, 7, 27, -8, 0 },
+ { 0, -6, 96, 7, 50, -12, 0 },
+ { 0, -12, 80, 7, 73, -13, 0 },
+ { 0, -13, 58, 7, 92, -9, 0 },
+ { 0, -9, 34, 7, 103, 2, -2 },
+ { -5, 13, 107, 7, 20, -7, 0 },
+ { -1, -2, 100, 7, 42, -11, 0 },
+ { 0, -11, 87, 7, 65, -13, 0 },
+ { 0, -13, 65, 7, 87, -11, 0 },
+ { 0, -11, 42, 7, 100, -2, -1 },
+ { 0, -7, 20, 7, 107, 13, -5 },
+ { -2, 2, 103, 7, 34, -9, 0 },
+ { 0, -9, 92, 7, 58, -13, 0 },
+ { 0, -13, 73, 7, 80, -12, 0 },
+ { 0, -12, 50, 7, 96, -6, 0 },
+ { 0, -8, 27, 7, 105, 8, -4 } } },
+ .ver_phase_arr = {
+ .even = { { -6, 17, 106, 7, 17, -6, 0 },
+ { -2, 0, 102, 7, 38, -10, 0 },
+ { 0, -10, 89, 7, 62, -13, 0 },
+ { 0, -13, 69, 7, 83, -11, 0 },
+ { 0, -11, 46, 7, 98, -4, -1 },
+ { 0, -7, 23, 7, 106, 10, -4 },
+ { -3, 5, 104, 7, 31, -9, 0 },
+ { 0, -7, 93, 7, 54, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 54, 7, 93, -7, 0 },
+ { 0, -9, 31, 7, 104, 5, -3 },
+ { -4, 10, 106, 7, 23, -7, 0 },
+ { -1, -4, 98, 7, 46, -11, 0 },
+ { 0, -11, 83, 7, 69, -13, 0 },
+ { 0, -13, 62, 7, 89, -10, 0 },
+ { 0, -10, 38, 7, 102, 0, -2 } },
+ .odd = { { -4, 8, 105, 7, 27, -8, 0 },
+ { 0, -6, 96, 7, 50, -12, 0 },
+ { 0, -12, 80, 7, 73, -13, 0 },
+ { 0, -13, 58, 7, 92, -9, 0 },
+ { 0, -9, 34, 7, 103, 2, -2 },
+ { -5, 13, 107, 7, 20, -7, 0 },
+ { -1, -2, 100, 7, 42, -11, 0 },
+ { 0, -11, 87, 7, 65, -13, 0 },
+ { 0, -13, 65, 7, 87, -11, 0 },
+ { 0, -11, 42, 7, 100, -2, -1 },
+ { 0, -7, 20, 7, 107, 13, -5 },
+ { -2, 2, 103, 7, 34, -9, 0 },
+ { 0, -9, 92, 7, 58, -13, 0 },
+ { 0, -13, 73, 7, 80, -12, 0 },
+ { 0, -12, 50, 7, 96, -6, 0 },
+ { 0, -8, 27, 7, 105, 8, -4 } } },
+ .ptrn_arr = { { 0xfcffe7ff, 0xf } },
+ .sample_patrn_length = 38,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 7) = 0.820513 */
+ .hor_phase_arr = {
+ .even = { { -7, 19, 104, 7, 19, -7, 0 },
+ { -2, 0, 98, 7, 43, -11, 0 },
+ { 0, -10, 81, 7, 69, -12, 0 },
+ { 0, -12, 58, 7, 89, -7, 0 },
+ { 0, -10, 32, 7, 103, 7, -4 },
+ { -5, 10, 103, 7, 29, -9, 0 },
+ { -1, -6, 93, 7, 54, -12, 0 },
+ { 0, -12, 72, 7, 79, -11, 0 },
+ { 0, -12, 47, 7, 97, -2, -2 },
+ { 0, -8, 22, 7, 104, 16, -6 },
+ { -3, 2, 100, 7, 40, -11, 0 },
+ { 0, -9, 84, 7, 65, -12, 0 },
+ { 0, -13, 62, 7, 87, -8, 0 },
+ { 0, -10, 36, 7, 100, 5, -3 },
+ { -5, 13, 103, 7, 25, -8, 0 },
+ { -1, -4, 94, 7, 51, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 51, 7, 94, -4, -1 },
+ { 0, -8, 25, 7, 103, 13, -5 },
+ { -3, 5, 100, 7, 36, -10, 0 },
+ { 0, -8, 87, 7, 62, -13, 0 },
+ { 0, -12, 65, 7, 84, -9, 0 },
+ { 0, -11, 40, 7, 100, 2, -3 },
+ { -6, 16, 104, 7, 22, -8, 0 },
+ { -2, -2, 97, 7, 47, -12, 0 },
+ { 0, -11, 79, 7, 72, -12, 0 },
+ { 0, -12, 54, 7, 93, -6, -1 },
+ { 0, -9, 29, 7, 103, 10, -5 },
+ { -4, 7, 103, 7, 32, -10, 0 },
+ { 0, -7, 89, 7, 58, -12, 0 },
+ { 0, -12, 69, 7, 81, -10, 0 },
+ { 0, -11, 43, 7, 98, 0, -2 } },
+ .odd = { { -4, 9, 101, 7, 31, -9, 0 },
+ { -1, -6, 91, 7, 56, -12, 0 },
+ { 0, -12, 71, 7, 80, -11, 0 },
+ { 0, -11, 45, 7, 97, -1, -2 },
+ { 0, -7, 20, 7, 105, 17, -7 },
+ { -3, 1, 100, 7, 41, -11, 0 },
+ { 0, -10, 83, 7, 67, -12, 0 },
+ { 0, -13, 60, 7, 89, -8, 0 },
+ { 0, -10, 34, 7, 102, 6, -4 },
+ { -5, 11, 104, 7, 27, -9, 0 },
+ { -1, -5, 94, 7, 52, -12, 0 },
+ { 0, -12, 74, 7, 77, -11, 0 },
+ { 0, -12, 49, 7, 95, -3, -1 },
+ { 0, -8, 24, 7, 104, 14, -6 },
+ { -3, 3, 100, 7, 38, -10, 0 },
+ { 0, -9, 87, 7, 63, -13, 0 },
+ { 0, -13, 63, 7, 87, -9, 0 },
+ { 0, -10, 38, 7, 100, 3, -3 },
+ { -6, 14, 104, 7, 24, -8, 0 },
+ { -1, -3, 95, 7, 49, -12, 0 },
+ { 0, -11, 77, 7, 74, -12, 0 },
+ { 0, -12, 52, 7, 94, -5, -1 },
+ { 0, -9, 27, 7, 104, 11, -5 },
+ { -4, 6, 102, 7, 34, -10, 0 },
+ { 0, -8, 89, 7, 60, -13, 0 },
+ { 0, -12, 67, 7, 83, -10, 0 },
+ { 0, -11, 41, 7, 100, 1, -3 },
+ { -7, 17, 105, 7, 20, -7, 0 },
+ { -2, -1, 97, 7, 45, -11, 0 },
+ { 0, -11, 80, 7, 71, -12, 0 },
+ { 0, -12, 56, 7, 91, -6, -1 },
+ { 0, -9, 31, 7, 101, 9, -4 } } },
+ .ver_phase_arr = {
+ .even = { { -7, 19, 104, 7, 19, -7, 0 },
+ { -2, 0, 98, 7, 43, -11, 0 },
+ { 0, -10, 81, 7, 69, -12, 0 },
+ { 0, -12, 58, 7, 89, -7, 0 },
+ { 0, -10, 32, 7, 103, 7, -4 },
+ { -5, 10, 103, 7, 29, -9, 0 },
+ { -1, -6, 93, 7, 54, -12, 0 },
+ { 0, -12, 72, 7, 79, -11, 0 },
+ { 0, -12, 47, 7, 97, -2, -2 },
+ { 0, -8, 22, 7, 104, 16, -6 },
+ { -3, 2, 100, 7, 40, -11, 0 },
+ { 0, -9, 84, 7, 65, -12, 0 },
+ { 0, -13, 62, 7, 87, -8, 0 },
+ { 0, -10, 36, 7, 100, 5, -3 },
+ { -5, 13, 103, 7, 25, -8, 0 },
+ { -1, -4, 94, 7, 51, -12, 0 },
+ { 0, -12, 76, 7, 76, -12, 0 },
+ { 0, -12, 51, 7, 94, -4, -1 },
+ { 0, -8, 25, 7, 103, 13, -5 },
+ { -3, 5, 100, 7, 36, -10, 0 },
+ { 0, -8, 87, 7, 62, -13, 0 },
+ { 0, -12, 65, 7, 84, -9, 0 },
+ { 0, -11, 40, 7, 100, 2, -3 },
+ { -6, 16, 104, 7, 22, -8, 0 },
+ { -2, -2, 97, 7, 47, -12, 0 },
+ { 0, -11, 79, 7, 72, -12, 0 },
+ { 0, -12, 54, 7, 93, -6, -1 },
+ { 0, -9, 29, 7, 103, 10, -5 },
+ { -4, 7, 103, 7, 32, -10, 0 },
+ { 0, -7, 89, 7, 58, -12, 0 },
+ { 0, -12, 69, 7, 81, -10, 0 },
+ { 0, -11, 43, 7, 98, 0, -2 } },
+ .odd = { { -4, 9, 101, 7, 31, -9, 0 },
+ { -1, -6, 91, 7, 56, -12, 0 },
+ { 0, -12, 71, 7, 80, -11, 0 },
+ { 0, -11, 45, 7, 97, -1, -2 },
+ { 0, -7, 20, 7, 105, 17, -7 },
+ { -3, 1, 100, 7, 41, -11, 0 },
+ { 0, -10, 83, 7, 67, -12, 0 },
+ { 0, -13, 60, 7, 89, -8, 0 },
+ { 0, -10, 34, 7, 102, 6, -4 },
+ { -5, 11, 104, 7, 27, -9, 0 },
+ { -1, -5, 94, 7, 52, -12, 0 },
+ { 0, -12, 74, 7, 77, -11, 0 },
+ { 0, -12, 49, 7, 95, -3, -1 },
+ { 0, -8, 24, 7, 104, 14, -6 },
+ { -3, 3, 100, 7, 38, -10, 0 },
+ { 0, -9, 87, 7, 63, -13, 0 },
+ { 0, -13, 63, 7, 87, -9, 0 },
+ { 0, -10, 38, 7, 100, 3, -3 },
+ { -6, 14, 104, 7, 24, -8, 0 },
+ { -1, -3, 95, 7, 49, -12, 0 },
+ { 0, -11, 77, 7, 74, -12, 0 },
+ { 0, -12, 52, 7, 94, -5, -1 },
+ { 0, -9, 27, 7, 104, 11, -5 },
+ { -4, 6, 102, 7, 34, -10, 0 },
+ { 0, -8, 89, 7, 60, -13, 0 },
+ { 0, -12, 67, 7, 83, -10, 0 },
+ { 0, -11, 41, 7, 100, 1, -3 },
+ { -7, 17, 105, 7, 20, -7, 0 },
+ { -2, -1, 97, 7, 45, -11, 0 },
+ { 0, -11, 80, 7, 71, -12, 0 },
+ { 0, -12, 56, 7, 91, -6, -1 },
+ { 0, -9, 31, 7, 101, 9, -4 } } },
+ .ptrn_arr = { { 0xff9ff3ff, 0xff3fe7fc, 0xff9 } },
+ .sample_patrn_length = 78,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 8) = 0.8 */
+ .hor_phase_arr = {
+ .even = { { -8, 21, 102, 7, 21, -8, 0 },
+ { -3, 0, 95, 7, 48, -12, 0 },
+ { 0, -11, 75, 7, 75, -11, 0 },
+ { 0, -12, 48, 7, 95, 0, -3 } },
+ .odd = { { -5, 9, 100, 7, 34, -10, 0 },
+ { -1, -7, 86, 7, 62, -12, 0 },
+ { 0, -12, 62, 7, 86, -7, -1 },
+ { 0, -10, 34, 7, 100, 9, -5 } } },
+ .ver_phase_arr = {
+ .even = { { -8, 21, 102, 7, 21, -8, 0 },
+ { -3, 0, 95, 7, 48, -12, 0 },
+ { 0, -11, 75, 7, 75, -11, 0 },
+ { 0, -12, 48, 7, 95, 0, -3 } },
+ .odd = { { -5, 9, 100, 7, 34, -10, 0 },
+ { -1, -7, 86, 7, 62, -12, 0 },
+ { 0, -12, 62, 7, 86, -7, -1 },
+ { 0, -10, 34, 7, 100, 9, -5 } } },
+ .ptrn_arr = { { 0xff } },
+ .sample_patrn_length = 10,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 9) = 0.780488 */
+ .hor_phase_arr = {
+ .even = { { -9, 23, 100, 7, 23, -9, 0 },
+ { -3, 0, 91, 7, 52, -12, 0 },
+ { 0, -11, 68, 7, 80, -8, -1 },
+ { 0, -11, 39, 7, 96, 9, -5 },
+ { -6, 12, 98, 7, 35, -11, 0 },
+ { -1, -6, 81, 7, 65, -11, 0 },
+ { 0, -12, 55, 7, 89, -2, -2 },
+ { 0, -9, 26, 7, 99, 20, -8 },
+ { -4, 2, 93, 7, 49, -12, 0 },
+ { 0, -10, 71, 7, 76, -9, 0 },
+ { 0, -11, 42, 7, 95, 7, -5 },
+ { -7, 14, 99, 7, 32, -10, 0 },
+ { -1, -5, 84, 7, 62, -12, 0 },
+ { 0, -12, 59, 7, 87, -4, -2 },
+ { 0, -10, 29, 7, 99, 17, -7 },
+ { -4, 4, 95, 7, 45, -12, 0 },
+ { 0, -9, 72, 7, 74, -9, 0 },
+ { 0, -12, 45, 7, 95, 4, -4 },
+ { -7, 17, 99, 7, 29, -10, 0 },
+ { -2, -4, 87, 7, 59, -12, 0 },
+ { 0, -12, 62, 7, 84, -5, -1 },
+ { 0, -10, 32, 7, 99, 14, -7 },
+ { -5, 7, 95, 7, 42, -11, 0 },
+ { 0, -9, 76, 7, 71, -10, 0 },
+ { 0, -12, 49, 7, 93, 2, -4 },
+ { -8, 20, 99, 7, 26, -9, 0 },
+ { -2, -2, 89, 7, 55, -12, 0 },
+ { 0, -11, 65, 7, 81, -6, -1 },
+ { 0, -11, 35, 7, 98, 12, -6 },
+ { -5, 9, 96, 7, 39, -11, 0 },
+ { -1, -8, 80, 7, 68, -11, 0 },
+ { 0, -12, 52, 7, 91, 0, -3 } },
+ .odd = { { -6, 10, 98, 7, 37, -11, 0 },
+ { -1, -7, 81, 7, 66, -11, 0 },
+ { 0, -12, 54, 7, 90, -1, -3 },
+ { 0, -9, 24, 7, 100, 21, -8 },
+ { -3, 1, 92, 7, 50, -12, 0 },
+ { 0, -10, 69, 7, 78, -8, -1 },
+ { 0, -11, 40, 7, 96, 8, -5 },
+ { -6, 13, 97, 7, 34, -10, 0 },
+ { -1, -6, 83, 7, 63, -11, 0 },
+ { 0, -12, 57, 7, 88, -3, -2 },
+ { 0, -9, 27, 7, 100, 18, -8 },
+ { -4, 3, 94, 7, 47, -12, 0 },
+ { 0, -10, 72, 7, 75, -9, 0 },
+ { 0, -11, 44, 7, 95, 5, -5 },
+ { -7, 16, 98, 7, 31, -10, 0 },
+ { -2, -4, 86, 7, 60, -12, 0 },
+ { 0, -12, 60, 7, 86, -4, -2 },
+ { 0, -10, 31, 7, 98, 16, -7 },
+ { -5, 5, 95, 7, 44, -11, 0 },
+ { 0, -9, 75, 7, 72, -10, 0 },
+ { 0, -12, 47, 7, 94, 3, -4 },
+ { -8, 18, 100, 7, 27, -9, 0 },
+ { -2, -3, 88, 7, 57, -12, 0 },
+ { 0, -11, 63, 7, 83, -6, -1 },
+ { 0, -10, 34, 7, 97, 13, -6 },
+ { -5, 8, 96, 7, 40, -11, 0 },
+ { -1, -8, 78, 7, 69, -10, 0 },
+ { 0, -12, 50, 7, 92, 1, -3 },
+ { -8, 21, 100, 7, 24, -9, 0 },
+ { -3, -1, 90, 7, 54, -12, 0 },
+ { 0, -11, 66, 7, 81, -7, -1 },
+ { 0, -11, 37, 7, 98, 10, -6 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 23, 100, 7, 23, -9, 0 },
+ { -3, 0, 91, 7, 52, -12, 0 },
+ { 0, -11, 68, 7, 80, -8, -1 },
+ { 0, -11, 39, 7, 96, 9, -5 },
+ { -6, 12, 98, 7, 35, -11, 0 },
+ { -1, -6, 81, 7, 65, -11, 0 },
+ { 0, -12, 55, 7, 89, -2, -2 },
+ { 0, -9, 26, 7, 99, 20, -8 },
+ { -4, 2, 93, 7, 49, -12, 0 },
+ { 0, -10, 71, 7, 76, -9, 0 },
+ { 0, -11, 42, 7, 95, 7, -5 },
+ { -7, 14, 99, 7, 32, -10, 0 },
+ { -1, -5, 84, 7, 62, -12, 0 },
+ { 0, -12, 59, 7, 87, -4, -2 },
+ { 0, -10, 29, 7, 99, 17, -7 },
+ { -4, 4, 95, 7, 45, -12, 0 },
+ { 0, -9, 72, 7, 74, -9, 0 },
+ { 0, -12, 45, 7, 95, 4, -4 },
+ { -7, 17, 99, 7, 29, -10, 0 },
+ { -2, -4, 87, 7, 59, -12, 0 },
+ { 0, -12, 62, 7, 84, -5, -1 },
+ { 0, -10, 32, 7, 99, 14, -7 },
+ { -5, 7, 95, 7, 42, -11, 0 },
+ { 0, -9, 76, 7, 71, -10, 0 },
+ { 0, -12, 49, 7, 93, 2, -4 },
+ { -8, 20, 99, 7, 26, -9, 0 },
+ { -2, -2, 89, 7, 55, -12, 0 },
+ { 0, -11, 65, 7, 81, -6, -1 },
+ { 0, -11, 35, 7, 98, 12, -6 },
+ { -5, 9, 96, 7, 39, -11, 0 },
+ { -1, -8, 80, 7, 68, -11, 0 },
+ { 0, -12, 52, 7, 91, 0, -3 } },
+ .odd = { { -6, 10, 98, 7, 37, -11, 0 },
+ { -1, -7, 81, 7, 66, -11, 0 },
+ { 0, -12, 54, 7, 90, -1, -3 },
+ { 0, -9, 24, 7, 100, 21, -8 },
+ { -3, 1, 92, 7, 50, -12, 0 },
+ { 0, -10, 69, 7, 78, -8, -1 },
+ { 0, -11, 40, 7, 96, 8, -5 },
+ { -6, 13, 97, 7, 34, -10, 0 },
+ { -1, -6, 83, 7, 63, -11, 0 },
+ { 0, -12, 57, 7, 88, -3, -2 },
+ { 0, -9, 27, 7, 100, 18, -8 },
+ { -4, 3, 94, 7, 47, -12, 0 },
+ { 0, -10, 72, 7, 75, -9, 0 },
+ { 0, -11, 44, 7, 95, 5, -5 },
+ { -7, 16, 98, 7, 31, -10, 0 },
+ { -2, -4, 86, 7, 60, -12, 0 },
+ { 0, -12, 60, 7, 86, -4, -2 },
+ { 0, -10, 31, 7, 98, 16, -7 },
+ { -5, 5, 95, 7, 44, -11, 0 },
+ { 0, -9, 75, 7, 72, -10, 0 },
+ { 0, -12, 47, 7, 94, 3, -4 },
+ { -8, 18, 100, 7, 27, -9, 0 },
+ { -2, -3, 88, 7, 57, -12, 0 },
+ { 0, -11, 63, 7, 83, -6, -1 },
+ { 0, -10, 34, 7, 97, 13, -6 },
+ { -5, 8, 96, 7, 40, -11, 0 },
+ { -1, -8, 78, 7, 69, -10, 0 },
+ { 0, -12, 50, 7, 92, 1, -3 },
+ { -8, 21, 100, 7, 24, -9, 0 },
+ { -3, -1, 90, 7, 54, -12, 0 },
+ { 0, -11, 66, 7, 81, -7, -1 },
+ { 0, -11, 37, 7, 98, 10, -6 } } },
+ .ptrn_arr = { { 0xf3f9fcff, 0x3f9fcfe7, 0xfe7f } },
+ .sample_patrn_length = 82,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 10) = 0.761905 */
+ .hor_phase_arr = {
+ .even = { { -9, 25, 96, 7, 25, -9, 0 },
+ { -3, 0, 86, 7, 56, -11, 0 },
+ { 0, -11, 62, 7, 82, -3, -2 },
+ { 0, -10, 31, 7, 96, 19, -8 },
+ { -5, 4, 92, 7, 49, -12, 0 },
+ { 0, -10, 67, 7, 78, -6, -1 },
+ { 0, -11, 37, 7, 95, 14, -7 },
+ { -6, 9, 93, 7, 43, -11, 0 },
+ { -1, -8, 73, 7, 73, -8, -1 },
+ { 0, -11, 43, 7, 93, 9, -6 },
+ { -7, 14, 95, 7, 37, -11, 0 },
+ { -1, -6, 78, 7, 67, -10, 0 },
+ { 0, -12, 49, 7, 92, 4, -5 },
+ { -8, 19, 96, 7, 31, -10, 0 },
+ { -2, -3, 82, 7, 62, -11, 0 },
+ { 0, -11, 56, 7, 86, 0, -3 } },
+ .odd = { { -6, 11, 94, 7, 40, -11, 0 },
+ { -1, -7, 75, 7, 70, -9, 0 },
+ { 0, -12, 46, 7, 93, 6, -5 },
+ { -8, 16, 97, 7, 34, -11, 0 },
+ { -2, -5, 81, 7, 64, -10, 0 },
+ { 0, -12, 53, 7, 89, 2, -4 },
+ { -9, 22, 97, 7, 28, -10, 0 },
+ { -3, -2, 85, 7, 59, -11, 0 },
+ { 0, -11, 59, 7, 85, -2, -3 },
+ { 0, -10, 28, 7, 97, 22, -9 },
+ { -4, 2, 89, 7, 53, -12, 0 },
+ { 0, -10, 64, 7, 81, -5, -2 },
+ { 0, -11, 34, 7, 97, 16, -8 },
+ { -5, 6, 93, 7, 46, -12, 0 },
+ { 0, -9, 70, 7, 75, -7, -1 },
+ { 0, -11, 40, 7, 94, 11, -6 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 25, 96, 7, 25, -9, 0 },
+ { -3, 0, 86, 7, 56, -11, 0 },
+ { 0, -11, 62, 7, 82, -3, -2 },
+ { 0, -10, 31, 7, 96, 19, -8 },
+ { -5, 4, 92, 7, 49, -12, 0 },
+ { 0, -10, 67, 7, 78, -6, -1 },
+ { 0, -11, 37, 7, 95, 14, -7 },
+ { -6, 9, 93, 7, 43, -11, 0 },
+ { -1, -8, 73, 7, 73, -8, -1 },
+ { 0, -11, 43, 7, 93, 9, -6 },
+ { -7, 14, 95, 7, 37, -11, 0 },
+ { -1, -6, 78, 7, 67, -10, 0 },
+ { 0, -12, 49, 7, 92, 4, -5 },
+ { -8, 19, 96, 7, 31, -10, 0 },
+ { -2, -3, 82, 7, 62, -11, 0 },
+ { 0, -11, 56, 7, 86, 0, -3 } },
+ .odd = { { -6, 11, 94, 7, 40, -11, 0 },
+ { -1, -7, 75, 7, 70, -9, 0 },
+ { 0, -12, 46, 7, 93, 6, -5 },
+ { -8, 16, 97, 7, 34, -11, 0 },
+ { -2, -5, 81, 7, 64, -10, 0 },
+ { 0, -12, 53, 7, 89, 2, -4 },
+ { -9, 22, 97, 7, 28, -10, 0 },
+ { -3, -2, 85, 7, 59, -11, 0 },
+ { 0, -11, 59, 7, 85, -2, -3 },
+ { 0, -10, 28, 7, 97, 22, -9 },
+ { -4, 2, 89, 7, 53, -12, 0 },
+ { 0, -10, 64, 7, 81, -5, -2 },
+ { 0, -11, 34, 7, 97, 16, -8 },
+ { -5, 6, 93, 7, 46, -12, 0 },
+ { 0, -9, 70, 7, 75, -7, -1 },
+ { 0, -11, 40, 7, 94, 11, -6 } } },
+ .ptrn_arr = { { 0xfcfe7e7f, 0xfc } },
+ .sample_patrn_length = 42,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 11) = 0.744186 */
+ .hor_phase_arr = {
+ .even = { { -10, 26, 96, 7, 26, -10, 0 },
+ { -4, 0, 83, 7, 59, -10, 0 },
+ { 0, -11, 56, 7, 85, 2, -4 },
+ { -9, 23, 95, 7, 29, -10, 0 },
+ { -3, -2, 82, 7, 61, -10, 0 },
+ { 0, -11, 53, 7, 87, 4, -5 },
+ { -9, 21, 94, 7, 32, -10, 0 },
+ { -3, -3, 79, 7, 64, -9, 0 },
+ { 0, -11, 50, 7, 88, 6, -5 },
+ { -8, 18, 94, 7, 35, -11, 0 },
+ { -2, -5, 78, 7, 67, -9, -1 },
+ { 0, -11, 47, 7, 90, 8, -6 },
+ { -8, 15, 94, 7, 38, -11, 0 },
+ { -2, -6, 75, 7, 70, -8, -1 },
+ { 0, -11, 44, 7, 92, 10, -7 },
+ { -7, 13, 92, 7, 41, -11, 0 },
+ { -1, -7, 72, 7, 72, -7, -1 },
+ { 0, -11, 41, 7, 92, 13, -7 },
+ { -7, 10, 92, 7, 44, -11, 0 },
+ { -1, -8, 70, 7, 75, -6, -2 },
+ { 0, -11, 38, 7, 94, 15, -8 },
+ { -6, 8, 90, 7, 47, -11, 0 },
+ { -1, -9, 67, 7, 78, -5, -2 },
+ { 0, -11, 35, 7, 94, 18, -8 },
+ { -5, 6, 88, 7, 50, -11, 0 },
+ { 0, -9, 64, 7, 79, -3, -3 },
+ { 0, -10, 32, 7, 94, 21, -9 },
+ { -5, 4, 87, 7, 53, -11, 0 },
+ { 0, -10, 61, 7, 82, -2, -3 },
+ { 0, -10, 29, 7, 95, 23, -9 },
+ { -4, 2, 85, 7, 56, -11, 0 },
+ { 0, -10, 59, 7, 83, 0, -4 } },
+ .odd = { { -7, 12, 92, 7, 42, -11, 0 },
+ { -1, -7, 71, 7, 72, -6, -1 },
+ { 0, -11, 39, 7, 93, 14, -7 },
+ { -6, 9, 91, 7, 45, -11, 0 },
+ { -1, -8, 68, 7, 76, -5, -2 },
+ { 0, -11, 36, 7, 94, 17, -8 },
+ { -6, 7, 90, 7, 48, -11, 0 },
+ { 0, -9, 66, 7, 77, -4, -2 },
+ { 0, -11, 33, 7, 96, 19, -9 },
+ { -5, 5, 88, 7, 51, -11, 0 },
+ { 0, -10, 63, 7, 80, -2, -3 },
+ { 0, -10, 31, 7, 94, 22, -9 },
+ { -5, 3, 87, 7, 54, -11, 0 },
+ { 0, -10, 60, 7, 82, -1, -3 },
+ { 0, -10, 28, 7, 94, 25, -9 },
+ { -4, 1, 85, 7, 57, -11, 0 },
+ { 0, -11, 57, 7, 85, 1, -4 },
+ { -9, 25, 94, 7, 28, -10, 0 },
+ { -3, -1, 82, 7, 60, -10, 0 },
+ { 0, -11, 54, 7, 87, 3, -5 },
+ { -9, 22, 94, 7, 31, -10, 0 },
+ { -3, -2, 80, 7, 63, -10, 0 },
+ { 0, -11, 51, 7, 88, 5, -5 },
+ { -9, 19, 96, 7, 33, -11, 0 },
+ { -2, -4, 77, 7, 66, -9, 0 },
+ { 0, -11, 48, 7, 90, 7, -6 },
+ { -8, 17, 94, 7, 36, -11, 0 },
+ { -2, -5, 76, 7, 68, -8, -1 },
+ { 0, -11, 45, 7, 91, 9, -6 },
+ { -7, 14, 93, 7, 39, -11, 0 },
+ { -1, -6, 72, 7, 71, -7, -1 },
+ { 0, -11, 42, 7, 92, 12, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 26, 96, 7, 26, -10, 0 },
+ { -4, 0, 83, 7, 59, -10, 0 },
+ { 0, -11, 56, 7, 85, 2, -4 },
+ { -9, 23, 95, 7, 29, -10, 0 },
+ { -3, -2, 82, 7, 61, -10, 0 },
+ { 0, -11, 53, 7, 87, 4, -5 },
+ { -9, 21, 94, 7, 32, -10, 0 },
+ { -3, -3, 79, 7, 64, -9, 0 },
+ { 0, -11, 50, 7, 88, 6, -5 },
+ { -8, 18, 94, 7, 35, -11, 0 },
+ { -2, -5, 78, 7, 67, -9, -1 },
+ { 0, -11, 47, 7, 90, 8, -6 },
+ { -8, 15, 94, 7, 38, -11, 0 },
+ { -2, -6, 75, 7, 70, -8, -1 },
+ { 0, -11, 44, 7, 92, 10, -7 },
+ { -7, 13, 92, 7, 41, -11, 0 },
+ { -1, -7, 72, 7, 72, -7, -1 },
+ { 0, -11, 41, 7, 92, 13, -7 },
+ { -7, 10, 92, 7, 44, -11, 0 },
+ { -1, -8, 70, 7, 75, -6, -2 },
+ { 0, -11, 38, 7, 94, 15, -8 },
+ { -6, 8, 90, 7, 47, -11, 0 },
+ { -1, -9, 67, 7, 78, -5, -2 },
+ { 0, -11, 35, 7, 94, 18, -8 },
+ { -5, 6, 88, 7, 50, -11, 0 },
+ { 0, -9, 64, 7, 79, -3, -3 },
+ { 0, -10, 32, 7, 94, 21, -9 },
+ { -5, 4, 87, 7, 53, -11, 0 },
+ { 0, -10, 61, 7, 82, -2, -3 },
+ { 0, -10, 29, 7, 95, 23, -9 },
+ { -4, 2, 85, 7, 56, -11, 0 },
+ { 0, -10, 59, 7, 83, 0, -4 } },
+ .odd = { { -7, 12, 92, 7, 42, -11, 0 },
+ { -1, -7, 71, 7, 72, -6, -1 },
+ { 0, -11, 39, 7, 93, 14, -7 },
+ { -6, 9, 91, 7, 45, -11, 0 },
+ { -1, -8, 68, 7, 76, -5, -2 },
+ { 0, -11, 36, 7, 94, 17, -8 },
+ { -6, 7, 90, 7, 48, -11, 0 },
+ { 0, -9, 66, 7, 77, -4, -2 },
+ { 0, -11, 33, 7, 96, 19, -9 },
+ { -5, 5, 88, 7, 51, -11, 0 },
+ { 0, -10, 63, 7, 80, -2, -3 },
+ { 0, -10, 31, 7, 94, 22, -9 },
+ { -5, 3, 87, 7, 54, -11, 0 },
+ { 0, -10, 60, 7, 82, -1, -3 },
+ { 0, -10, 28, 7, 94, 25, -9 },
+ { -4, 1, 85, 7, 57, -11, 0 },
+ { 0, -11, 57, 7, 85, 1, -4 },
+ { -9, 25, 94, 7, 28, -10, 0 },
+ { -3, -1, 82, 7, 60, -10, 0 },
+ { 0, -11, 54, 7, 87, 3, -5 },
+ { -9, 22, 94, 7, 31, -10, 0 },
+ { -3, -2, 80, 7, 63, -10, 0 },
+ { 0, -11, 51, 7, 88, 5, -5 },
+ { -9, 19, 96, 7, 33, -11, 0 },
+ { -2, -4, 77, 7, 66, -9, 0 },
+ { 0, -11, 48, 7, 90, 7, -6 },
+ { -8, 17, 94, 7, 36, -11, 0 },
+ { -2, -5, 76, 7, 68, -8, -1 },
+ { 0, -11, 45, 7, 91, 9, -6 },
+ { -7, 14, 93, 7, 39, -11, 0 },
+ { -1, -6, 72, 7, 71, -7, -1 },
+ { 0, -11, 42, 7, 92, 12, -7 } } },
+ .ptrn_arr = { { 0x3f3f3f3f, 0x9f9f9f3f, 0xf9f9f } },
+ .sample_patrn_length = 86,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 12) = 0.727273 */
+ .hor_phase_arr = {
+ .even = { { -10, 28, 92, 7, 28, -10, 0 },
+ { -4, 0, 81, 7, 61, -9, -1 },
+ { 0, -11, 50, 7, 87, 8, -6 },
+ { -8, 17, 91, 7, 39, -11, 0 },
+ { -2, -6, 72, 7, 72, -6, -2 },
+ { 0, -11, 39, 7, 91, 17, -8 },
+ { -6, 8, 87, 7, 50, -11, 0 },
+ { -1, -9, 61, 7, 81, 0, -4 } },
+ .odd = { { -7, 12, 89, 7, 45, -11, 0 },
+ { -1, -8, 67, 7, 76, -3, -3 },
+ { 0, -11, 33, 7, 93, 22, -9 },
+ { -5, 4, 83, 7, 56, -10, 0 },
+ { 0, -10, 56, 7, 83, 4, -5 },
+ { -9, 22, 93, 7, 33, -11, 0 },
+ { -3, -3, 76, 7, 67, -8, -1 },
+ { 0, -11, 45, 7, 89, 12, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 28, 92, 7, 28, -10, 0 },
+ { -4, 0, 81, 7, 61, -9, -1 },
+ { 0, -11, 50, 7, 87, 8, -6 },
+ { -8, 17, 91, 7, 39, -11, 0 },
+ { -2, -6, 72, 7, 72, -6, -2 },
+ { 0, -11, 39, 7, 91, 17, -8 },
+ { -6, 8, 87, 7, 50, -11, 0 },
+ { -1, -9, 61, 7, 81, 0, -4 } },
+ .odd = { { -7, 12, 89, 7, 45, -11, 0 },
+ { -1, -8, 67, 7, 76, -3, -3 },
+ { 0, -11, 33, 7, 93, 22, -9 },
+ { -5, 4, 83, 7, 56, -10, 0 },
+ { 0, -10, 56, 7, 83, 4, -5 },
+ { -9, 22, 93, 7, 33, -11, 0 },
+ { -3, -3, 76, 7, 67, -8, -1 },
+ { 0, -11, 45, 7, 89, 12, -7 } } },
+ .ptrn_arr = { { 0xf9f3f } },
+ .sample_patrn_length = 22,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 13) = 0.711111 */
+ .hor_phase_arr = {
+ .even = { { -10, 29, 90, 7, 29, -10, 0 },
+ { -4, 0, 76, 7, 64, -7, -1 },
+ { 0, -11, 45, 7, 88, 14, -8 },
+ { -7, 12, 85, 7, 48, -10, 0 },
+ { -1, -8, 61, 7, 79, 2, -5 },
+ { -10, 26, 90, 7, 32, -10, 0 },
+ { -4, -2, 76, 7, 66, -6, -2 },
+ { 0, -11, 42, 7, 89, 16, -8 },
+ { -7, 10, 84, 7, 51, -10, 0 },
+ { -1, -9, 59, 7, 81, 3, -5 },
+ { -10, 24, 91, 7, 34, -11, 0 },
+ { -3, -3, 72, 7, 69, -5, -2 },
+ { 0, -11, 40, 7, 89, 19, -9 },
+ { -6, 7, 84, 7, 53, -10, 0 },
+ { -1, -9, 56, 7, 83, 5, -6 },
+ { -9, 21, 90, 7, 37, -11, 0 },
+ { -3, -4, 71, 7, 71, -4, -3 },
+ { 0, -11, 37, 7, 90, 21, -9 },
+ { -6, 5, 83, 7, 56, -9, -1 },
+ { 0, -10, 53, 7, 84, 7, -6 },
+ { -9, 19, 89, 7, 40, -11, 0 },
+ { -2, -5, 69, 7, 72, -3, -3 },
+ { 0, -11, 34, 7, 91, 24, -10 },
+ { -5, 3, 81, 7, 59, -9, -1 },
+ { 0, -10, 51, 7, 84, 10, -7 },
+ { -8, 16, 89, 7, 42, -11, 0 },
+ { -2, -6, 66, 7, 76, -2, -4 },
+ { 0, -10, 32, 7, 90, 26, -10 },
+ { -5, 2, 79, 7, 61, -8, -1 },
+ { 0, -10, 48, 7, 85, 12, -7 },
+ { -8, 14, 88, 7, 45, -11, 0 },
+ { -1, -7, 64, 7, 76, 0, -4 } },
+ .odd = { { -8, 13, 88, 7, 46, -11, 0 },
+ { -1, -8, 63, 7, 78, 1, -5 },
+ { -10, 28, 90, 7, 30, -10, 0 },
+ { -4, -1, 77, 7, 65, -7, -2 },
+ { 0, -11, 44, 7, 88, 15, -8 },
+ { -7, 11, 85, 7, 49, -10, 0 },
+ { -1, -8, 60, 7, 79, 3, -5 },
+ { -10, 25, 91, 7, 33, -11, 0 },
+ { -4, -2, 74, 7, 68, -6, -2 },
+ { 0, -11, 41, 7, 89, 18, -9 },
+ { -7, 8, 85, 7, 52, -10, 0 },
+ { -1, -9, 57, 7, 83, 4, -6 },
+ { -9, 22, 90, 7, 36, -11, 0 },
+ { -3, -4, 73, 7, 70, -5, -3 },
+ { 0, -11, 38, 7, 90, 20, -9 },
+ { -6, 6, 83, 7, 55, -10, 0 },
+ { 0, -10, 55, 7, 83, 6, -6 },
+ { -9, 20, 90, 7, 38, -11, 0 },
+ { -3, -5, 70, 7, 73, -4, -3 },
+ { 0, -11, 36, 7, 90, 22, -9 },
+ { -6, 4, 83, 7, 57, -9, -1 },
+ { 0, -10, 52, 7, 85, 8, -7 },
+ { -9, 18, 89, 7, 41, -11, 0 },
+ { -2, -6, 68, 7, 74, -2, -4 },
+ { 0, -11, 33, 7, 91, 25, -10 },
+ { -5, 3, 79, 7, 60, -8, -1 },
+ { 0, -10, 49, 7, 85, 11, -7 },
+ { -8, 15, 88, 7, 44, -11, 0 },
+ { -2, -7, 65, 7, 77, -1, -4 },
+ { 0, -10, 30, 7, 90, 28, -10 },
+ { -5, 1, 78, 7, 63, -8, -1 },
+ { 0, -11, 46, 7, 88, 13, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 29, 90, 7, 29, -10, 0 },
+ { -4, 0, 76, 7, 64, -7, -1 },
+ { 0, -11, 45, 7, 88, 14, -8 },
+ { -7, 12, 85, 7, 48, -10, 0 },
+ { -1, -8, 61, 7, 79, 2, -5 },
+ { -10, 26, 90, 7, 32, -10, 0 },
+ { -4, -2, 76, 7, 66, -6, -2 },
+ { 0, -11, 42, 7, 89, 16, -8 },
+ { -7, 10, 84, 7, 51, -10, 0 },
+ { -1, -9, 59, 7, 81, 3, -5 },
+ { -10, 24, 91, 7, 34, -11, 0 },
+ { -3, -3, 72, 7, 69, -5, -2 },
+ { 0, -11, 40, 7, 89, 19, -9 },
+ { -6, 7, 84, 7, 53, -10, 0 },
+ { -1, -9, 56, 7, 83, 5, -6 },
+ { -9, 21, 90, 7, 37, -11, 0 },
+ { -3, -4, 71, 7, 71, -4, -3 },
+ { 0, -11, 37, 7, 90, 21, -9 },
+ { -6, 5, 83, 7, 56, -9, -1 },
+ { 0, -10, 53, 7, 84, 7, -6 },
+ { -9, 19, 89, 7, 40, -11, 0 },
+ { -2, -5, 69, 7, 72, -3, -3 },
+ { 0, -11, 34, 7, 91, 24, -10 },
+ { -5, 3, 81, 7, 59, -9, -1 },
+ { 0, -10, 51, 7, 84, 10, -7 },
+ { -8, 16, 89, 7, 42, -11, 0 },
+ { -2, -6, 66, 7, 76, -2, -4 },
+ { 0, -10, 32, 7, 90, 26, -10 },
+ { -5, 2, 79, 7, 61, -8, -1 },
+ { 0, -10, 48, 7, 85, 12, -7 },
+ { -8, 14, 88, 7, 45, -11, 0 },
+ { -1, -7, 64, 7, 76, 0, -4 } },
+ .odd = { { -8, 13, 88, 7, 46, -11, 0 },
+ { -1, -8, 63, 7, 78, 1, -5 },
+ { -10, 28, 90, 7, 30, -10, 0 },
+ { -4, -1, 77, 7, 65, -7, -2 },
+ { 0, -11, 44, 7, 88, 15, -8 },
+ { -7, 11, 85, 7, 49, -10, 0 },
+ { -1, -8, 60, 7, 79, 3, -5 },
+ { -10, 25, 91, 7, 33, -11, 0 },
+ { -4, -2, 74, 7, 68, -6, -2 },
+ { 0, -11, 41, 7, 89, 18, -9 },
+ { -7, 8, 85, 7, 52, -10, 0 },
+ { -1, -9, 57, 7, 83, 4, -6 },
+ { -9, 22, 90, 7, 36, -11, 0 },
+ { -3, -4, 73, 7, 70, -5, -3 },
+ { 0, -11, 38, 7, 90, 20, -9 },
+ { -6, 6, 83, 7, 55, -10, 0 },
+ { 0, -10, 55, 7, 83, 6, -6 },
+ { -9, 20, 90, 7, 38, -11, 0 },
+ { -3, -5, 70, 7, 73, -4, -3 },
+ { 0, -11, 36, 7, 90, 22, -9 },
+ { -6, 4, 83, 7, 57, -9, -1 },
+ { 0, -10, 52, 7, 85, 8, -7 },
+ { -9, 18, 89, 7, 41, -11, 0 },
+ { -2, -6, 68, 7, 74, -2, -4 },
+ { 0, -11, 33, 7, 91, 25, -10 },
+ { -5, 3, 79, 7, 60, -8, -1 },
+ { 0, -10, 49, 7, 85, 11, -7 },
+ { -8, 15, 88, 7, 44, -11, 0 },
+ { -2, -7, 65, 7, 77, -1, -4 },
+ { 0, -10, 30, 7, 90, 28, -10 },
+ { -5, 1, 78, 7, 63, -8, -1 },
+ { 0, -11, 46, 7, 88, 13, -8 } } },
+ .ptrn_arr = { { 0xf3e7cf9f, 0x9f3e7cf9, 0xf3e7cf } },
+ .sample_patrn_length = 90,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 14) = 0.695652 */
+ .hor_phase_arr = {
+ .even = { { -10, 30, 88, 7, 30, -10, 0 },
+ { -5, 0, 75, 7, 66, -5, -3 },
+ { 0, -10, 40, 7, 87, 20, -9 },
+ { -7, 7, 81, 7, 56, -8, -1 },
+ { 0, -9, 51, 7, 83, 11, -8 },
+ { -8, 16, 84, 7, 46, -10, 0 },
+ { -2, -7, 61, 7, 79, 3, -6 },
+ { -10, 25, 88, 7, 35, -10, 0 },
+ { -4, -3, 72, 7, 70, -3, -4 },
+ { 0, -10, 35, 7, 88, 25, -10 },
+ { -6, 3, 79, 7, 61, -7, -2 },
+ { 0, -10, 46, 7, 84, 16, -8 },
+ { -8, 11, 83, 7, 51, -9, 0 },
+ { -1, -8, 56, 7, 81, 7, -7 },
+ { -9, 20, 87, 7, 40, -10, 0 },
+ { -3, -5, 66, 7, 75, 0, -5 } },
+ .odd = { { -8, 13, 85, 7, 48, -10, 0 },
+ { -1, -8, 59, 7, 79, 5, -6 },
+ { -10, 23, 87, 7, 38, -10, 0 },
+ { -3, -4, 68, 7, 72, -1, -4 },
+ { 0, -10, 33, 7, 87, 28, -10 },
+ { -5, 2, 75, 7, 64, -6, -2 },
+ { 0, -10, 43, 7, 86, 18, -9 },
+ { -7, 9, 83, 7, 53, -9, -1 },
+ { -1, -9, 53, 7, 83, 9, -7 },
+ { -9, 18, 86, 7, 43, -10, 0 },
+ { -2, -6, 64, 7, 75, 2, -5 },
+ { -10, 28, 87, 7, 33, -10, 0 },
+ { -4, -1, 72, 7, 68, -4, -3 },
+ { 0, -10, 38, 7, 87, 23, -10 },
+ { -6, 5, 79, 7, 59, -8, -1 },
+ { 0, -10, 48, 7, 85, 13, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 30, 88, 7, 30, -10, 0 },
+ { -5, 0, 75, 7, 66, -5, -3 },
+ { 0, -10, 40, 7, 87, 20, -9 },
+ { -7, 7, 81, 7, 56, -8, -1 },
+ { 0, -9, 51, 7, 83, 11, -8 },
+ { -8, 16, 84, 7, 46, -10, 0 },
+ { -2, -7, 61, 7, 79, 3, -6 },
+ { -10, 25, 88, 7, 35, -10, 0 },
+ { -4, -3, 72, 7, 70, -3, -4 },
+ { 0, -10, 35, 7, 88, 25, -10 },
+ { -6, 3, 79, 7, 61, -7, -2 },
+ { 0, -10, 46, 7, 84, 16, -8 },
+ { -8, 11, 83, 7, 51, -9, 0 },
+ { -1, -8, 56, 7, 81, 7, -7 },
+ { -9, 20, 87, 7, 40, -10, 0 },
+ { -3, -5, 66, 7, 75, 0, -5 } },
+ .odd = { { -8, 13, 85, 7, 48, -10, 0 },
+ { -1, -8, 59, 7, 79, 5, -6 },
+ { -10, 23, 87, 7, 38, -10, 0 },
+ { -3, -4, 68, 7, 72, -1, -4 },
+ { 0, -10, 33, 7, 87, 28, -10 },
+ { -5, 2, 75, 7, 64, -6, -2 },
+ { 0, -10, 43, 7, 86, 18, -9 },
+ { -7, 9, 83, 7, 53, -9, -1 },
+ { -1, -9, 53, 7, 83, 9, -7 },
+ { -9, 18, 86, 7, 43, -10, 0 },
+ { -2, -6, 64, 7, 75, 2, -5 },
+ { -10, 28, 87, 7, 33, -10, 0 },
+ { -4, -1, 72, 7, 68, -4, -3 },
+ { 0, -10, 38, 7, 87, 23, -10 },
+ { -6, 5, 79, 7, 59, -8, -1 },
+ { 0, -10, 48, 7, 85, 13, -8 } } },
+ .ptrn_arr = { { 0x79f3cf9f, 0xf3e } },
+ .sample_patrn_length = 46,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 15) = 0.680851 */
+ .hor_phase_arr = {
+ .even = { { -10, 31, 86, 7, 31, -10, 0 },
+ { -5, 0, 72, 7, 68, -3, -4 },
+ { 0, -10, 36, 7, 86, 26, -10 },
+ { -6, 3, 76, 7, 63, -5, -3 },
+ { 0, -10, 41, 7, 85, 21, -9 },
+ { -7, 7, 78, 7, 59, -7, -2 },
+ { 0, -10, 46, 7, 84, 17, -9 },
+ { -8, 11, 80, 7, 54, -8, -1 },
+ { -1, -9, 51, 7, 82, 13, -8 },
+ { -9, 15, 83, 7, 49, -9, -1 },
+ { -2, -8, 56, 7, 80, 9, -7 },
+ { -9, 19, 85, 7, 43, -10, 0 },
+ { -3, -6, 61, 7, 77, 5, -6 },
+ { -10, 24, 86, 7, 38, -10, 0 },
+ { -3, -4, 66, 7, 72, 2, -5 },
+ { -10, 29, 86, 7, 33, -10, 0 },
+ { -4, -1, 68, 7, 70, -1, -4 },
+ { 0, -10, 33, 7, 86, 29, -10 },
+ { -5, 2, 72, 7, 66, -4, -3 },
+ { 0, -10, 38, 7, 86, 24, -10 },
+ { -6, 5, 77, 7, 61, -6, -3 },
+ { 0, -10, 43, 7, 85, 19, -9 },
+ { -7, 9, 80, 7, 56, -8, -2 },
+ { -1, -9, 49, 7, 83, 15, -9 },
+ { -8, 13, 82, 7, 51, -9, -1 },
+ { -1, -8, 54, 7, 80, 11, -8 },
+ { -9, 17, 84, 7, 46, -10, 0 },
+ { -2, -7, 59, 7, 78, 7, -7 },
+ { -9, 21, 85, 7, 41, -10, 0 },
+ { -3, -5, 63, 7, 76, 3, -6 },
+ { -10, 26, 86, 7, 36, -10, 0 },
+ { -4, -3, 68, 7, 72, 0, -5 } },
+ .odd = { { -8, 14, 82, 7, 50, -9, -1 },
+ { -1, -8, 55, 7, 79, 10, -7 },
+ { -9, 18, 84, 7, 45, -10, 0 },
+ { -2, -6, 60, 7, 77, 6, -7 },
+ { -10, 23, 85, 7, 40, -10, 0 },
+ { -3, -4, 64, 7, 75, 2, -6 },
+ { -10, 27, 86, 7, 35, -10, 0 },
+ { -4, -2, 69, 7, 71, -1, -5 },
+ { 0, -10, 32, 7, 86, 30, -10 },
+ { -5, 1, 72, 7, 67, -3, -4 },
+ { 0, -10, 37, 7, 86, 25, -10 },
+ { -6, 4, 77, 7, 62, -6, -3 },
+ { 0, -10, 42, 7, 85, 20, -9 },
+ { -7, 8, 79, 7, 57, -7, -2 },
+ { -1, -9, 47, 7, 84, 16, -9 },
+ { -8, 12, 81, 7, 52, -8, -1 },
+ { -1, -8, 52, 7, 81, 12, -8 },
+ { -9, 16, 84, 7, 47, -9, -1 },
+ { -2, -7, 57, 7, 79, 8, -7 },
+ { -9, 20, 85, 7, 42, -10, 0 },
+ { -3, -6, 62, 7, 77, 4, -6 },
+ { -10, 25, 86, 7, 37, -10, 0 },
+ { -4, -3, 67, 7, 72, 1, -5 },
+ { -10, 30, 86, 7, 32, -10, 0 },
+ { -5, -1, 71, 7, 69, -2, -4 },
+ { 0, -10, 35, 7, 86, 27, -10 },
+ { -6, 2, 75, 7, 64, -4, -3 },
+ { 0, -10, 40, 7, 85, 23, -10 },
+ { -7, 6, 77, 7, 60, -6, -2 },
+ { 0, -10, 45, 7, 84, 18, -9 },
+ { -7, 10, 79, 7, 55, -8, -1 },
+ { -1, -9, 50, 7, 82, 14, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 31, 86, 7, 31, -10, 0 },
+ { -5, 0, 72, 7, 68, -3, -4 },
+ { 0, -10, 36, 7, 86, 26, -10 },
+ { -6, 3, 76, 7, 63, -5, -3 },
+ { 0, -10, 41, 7, 85, 21, -9 },
+ { -7, 7, 78, 7, 59, -7, -2 },
+ { 0, -10, 46, 7, 84, 17, -9 },
+ { -8, 11, 80, 7, 54, -8, -1 },
+ { -1, -9, 51, 7, 82, 13, -8 },
+ { -9, 15, 83, 7, 49, -9, -1 },
+ { -2, -8, 56, 7, 80, 9, -7 },
+ { -9, 19, 85, 7, 43, -10, 0 },
+ { -3, -6, 61, 7, 77, 5, -6 },
+ { -10, 24, 86, 7, 38, -10, 0 },
+ { -3, -4, 66, 7, 72, 2, -5 },
+ { -10, 29, 86, 7, 33, -10, 0 },
+ { -4, -1, 68, 7, 70, -1, -4 },
+ { 0, -10, 33, 7, 86, 29, -10 },
+ { -5, 2, 72, 7, 66, -4, -3 },
+ { 0, -10, 38, 7, 86, 24, -10 },
+ { -6, 5, 77, 7, 61, -6, -3 },
+ { 0, -10, 43, 7, 85, 19, -9 },
+ { -7, 9, 80, 7, 56, -8, -2 },
+ { -1, -9, 49, 7, 83, 15, -9 },
+ { -8, 13, 82, 7, 51, -9, -1 },
+ { -1, -8, 54, 7, 80, 11, -8 },
+ { -9, 17, 84, 7, 46, -10, 0 },
+ { -2, -7, 59, 7, 78, 7, -7 },
+ { -9, 21, 85, 7, 41, -10, 0 },
+ { -3, -5, 63, 7, 76, 3, -6 },
+ { -10, 26, 86, 7, 36, -10, 0 },
+ { -4, -3, 68, 7, 72, 0, -5 } },
+ .odd = { { -8, 14, 82, 7, 50, -9, -1 },
+ { -1, -8, 55, 7, 79, 10, -7 },
+ { -9, 18, 84, 7, 45, -10, 0 },
+ { -2, -6, 60, 7, 77, 6, -7 },
+ { -10, 23, 85, 7, 40, -10, 0 },
+ { -3, -4, 64, 7, 75, 2, -6 },
+ { -10, 27, 86, 7, 35, -10, 0 },
+ { -4, -2, 69, 7, 71, -1, -5 },
+ { 0, -10, 32, 7, 86, 30, -10 },
+ { -5, 1, 72, 7, 67, -3, -4 },
+ { 0, -10, 37, 7, 86, 25, -10 },
+ { -6, 4, 77, 7, 62, -6, -3 },
+ { 0, -10, 42, 7, 85, 20, -9 },
+ { -7, 8, 79, 7, 57, -7, -2 },
+ { -1, -9, 47, 7, 84, 16, -9 },
+ { -8, 12, 81, 7, 52, -8, -1 },
+ { -1, -8, 52, 7, 81, 12, -8 },
+ { -9, 16, 84, 7, 47, -9, -1 },
+ { -2, -7, 57, 7, 79, 8, -7 },
+ { -9, 20, 85, 7, 42, -10, 0 },
+ { -3, -6, 62, 7, 77, 4, -6 },
+ { -10, 25, 86, 7, 37, -10, 0 },
+ { -4, -3, 67, 7, 72, 1, -5 },
+ { -10, 30, 86, 7, 32, -10, 0 },
+ { -5, -1, 71, 7, 69, -2, -4 },
+ { 0, -10, 35, 7, 86, 27, -10 },
+ { -6, 2, 75, 7, 64, -4, -3 },
+ { 0, -10, 40, 7, 85, 23, -10 },
+ { -7, 6, 77, 7, 60, -6, -2 },
+ { 0, -10, 45, 7, 84, 18, -9 },
+ { -7, 10, 79, 7, 55, -8, -1 },
+ { -1, -9, 50, 7, 82, 14, -8 } } },
+ .ptrn_arr = { { 0x3cf9e79f, 0x9e79f3cf, 0xf3cf3e7 } },
+ .sample_patrn_length = 94,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 16) = 0.666667 */
+ .hor_phase_arr = {
+ .even = { { -10, 32, 84, 7, 32, -10, 0 },
+ { -5, 0, 69, 7, 69, 0, -5 } },
+ .odd = { { -9, 14, 82, 7, 51, -8, -2 },
+ { -2, -8, 51, 7, 82, 14, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 32, 84, 7, 32, -10, 0 },
+ { -5, 0, 69, 7, 69, 0, -5 } },
+ .odd = { { -9, 14, 82, 7, 51, -8, -2 },
+ { -2, -8, 51, 7, 82, 14, -9 } } },
+ .ptrn_arr = { { 0xf } },
+ .sample_patrn_length = 6,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 17) = 0.653061 */
+ .hor_phase_arr = {
+ .even = { { -10, 33, 82, 7, 33, -10, 0 },
+ { -5, 0, 66, 7, 70, 3, -6 },
+ { -10, 28, 82, 7, 37, -9, 0 },
+ { -4, -3, 62, 7, 74, 6, -7 },
+ { -10, 24, 82, 7, 42, -9, -1 },
+ { -3, -5, 58, 7, 76, 10, -8 },
+ { -9, 20, 79, 7, 47, -8, -1 },
+ { -3, -6, 54, 7, 78, 14, -9 },
+ { -9, 16, 79, 7, 51, -7, -2 },
+ { -2, -8, 49, 7, 80, 18, -9 },
+ { -8, 12, 77, 7, 56, -6, -3 },
+ { -1, -9, 44, 7, 81, 22, -9 },
+ { -7, 8, 75, 7, 60, -4, -4 },
+ { -1, -9, 40, 7, 82, 26, -10 },
+ { -7, 5, 71, 7, 65, -1, -5 },
+ { 0, -10, 35, 7, 83, 30, -10 },
+ { -6, 1, 70, 7, 68, 1, -6 },
+ { -10, 30, 83, 7, 35, -10, 0 },
+ { -5, -1, 65, 7, 71, 5, -7 },
+ { -10, 26, 82, 7, 40, -9, -1 },
+ { -4, -4, 60, 7, 75, 8, -7 },
+ { -9, 22, 81, 7, 44, -9, -1 },
+ { -3, -6, 56, 7, 77, 12, -8 },
+ { -9, 18, 80, 7, 49, -8, -2 },
+ { -2, -7, 51, 7, 79, 16, -9 },
+ { -9, 14, 78, 7, 54, -6, -3 },
+ { -1, -8, 47, 7, 79, 20, -9 },
+ { -8, 10, 76, 7, 58, -5, -3 },
+ { -1, -9, 42, 7, 82, 24, -10 },
+ { -7, 6, 74, 7, 62, -3, -4 },
+ { 0, -9, 37, 7, 82, 28, -10 },
+ { -6, 3, 70, 7, 66, 0, -5 } },
+ .odd = { { -9, 15, 79, 7, 52, -7, -2 },
+ { -2, -8, 48, 7, 80, 19, -9 },
+ { -8, 11, 76, 7, 57, -5, -3 },
+ { -1, -9, 43, 7, 82, 23, -10 },
+ { -7, 7, 74, 7, 61, -3, -4 },
+ { -1, -9, 38, 7, 83, 27, -10 },
+ { -6, 4, 70, 7, 66, -1, -5 },
+ { 0, -10, 34, 7, 83, 31, -10 },
+ { -6, 1, 67, 7, 70, 2, -6 },
+ { -10, 29, 83, 7, 36, -10, 0 },
+ { -5, -2, 64, 7, 73, 5, -7 },
+ { -10, 25, 82, 7, 41, -9, -1 },
+ { -4, -4, 59, 7, 76, 9, -8 },
+ { -9, 21, 80, 7, 45, -8, -1 },
+ { -3, -6, 55, 7, 77, 13, -8 },
+ { -9, 17, 79, 7, 50, -7, -2 },
+ { -2, -7, 50, 7, 79, 17, -9 },
+ { -8, 13, 77, 7, 55, -6, -3 },
+ { -1, -8, 45, 7, 80, 21, -9 },
+ { -8, 9, 76, 7, 59, -4, -4 },
+ { -1, -9, 41, 7, 82, 25, -10 },
+ { -7, 5, 73, 7, 64, -2, -5 },
+ { 0, -10, 36, 7, 83, 29, -10 },
+ { -6, 2, 70, 7, 67, 1, -6 },
+ { -10, 31, 83, 7, 34, -10, 0 },
+ { -5, -1, 66, 7, 70, 4, -6 },
+ { -10, 27, 83, 7, 38, -9, -1 },
+ { -4, -3, 61, 7, 74, 7, -7 },
+ { -10, 23, 82, 7, 43, -9, -1 },
+ { -3, -5, 57, 7, 76, 11, -8 },
+ { -9, 19, 80, 7, 48, -8, -2 },
+ { -2, -7, 52, 7, 79, 15, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -10, 33, 82, 7, 33, -10, 0 },
+ { -5, 0, 66, 7, 70, 3, -6 },
+ { -10, 28, 82, 7, 37, -9, 0 },
+ { -4, -3, 62, 7, 74, 6, -7 },
+ { -10, 24, 82, 7, 42, -9, -1 },
+ { -3, -5, 58, 7, 76, 10, -8 },
+ { -9, 20, 79, 7, 47, -8, -1 },
+ { -3, -6, 54, 7, 78, 14, -9 },
+ { -9, 16, 79, 7, 51, -7, -2 },
+ { -2, -8, 49, 7, 80, 18, -9 },
+ { -8, 12, 77, 7, 56, -6, -3 },
+ { -1, -9, 44, 7, 81, 22, -9 },
+ { -7, 8, 75, 7, 60, -4, -4 },
+ { -1, -9, 40, 7, 82, 26, -10 },
+ { -7, 5, 71, 7, 65, -1, -5 },
+ { 0, -10, 35, 7, 83, 30, -10 },
+ { -6, 1, 70, 7, 68, 1, -6 },
+ { -10, 30, 83, 7, 35, -10, 0 },
+ { -5, -1, 65, 7, 71, 5, -7 },
+ { -10, 26, 82, 7, 40, -9, -1 },
+ { -4, -4, 60, 7, 75, 8, -7 },
+ { -9, 22, 81, 7, 44, -9, -1 },
+ { -3, -6, 56, 7, 77, 12, -8 },
+ { -9, 18, 80, 7, 49, -8, -2 },
+ { -2, -7, 51, 7, 79, 16, -9 },
+ { -9, 14, 78, 7, 54, -6, -3 },
+ { -1, -8, 47, 7, 79, 20, -9 },
+ { -8, 10, 76, 7, 58, -5, -3 },
+ { -1, -9, 42, 7, 82, 24, -10 },
+ { -7, 6, 74, 7, 62, -3, -4 },
+ { 0, -9, 37, 7, 82, 28, -10 },
+ { -6, 3, 70, 7, 66, 0, -5 } },
+ .odd = { { -9, 15, 79, 7, 52, -7, -2 },
+ { -2, -8, 48, 7, 80, 19, -9 },
+ { -8, 11, 76, 7, 57, -5, -3 },
+ { -1, -9, 43, 7, 82, 23, -10 },
+ { -7, 7, 74, 7, 61, -3, -4 },
+ { -1, -9, 38, 7, 83, 27, -10 },
+ { -6, 4, 70, 7, 66, -1, -5 },
+ { 0, -10, 34, 7, 83, 31, -10 },
+ { -6, 1, 67, 7, 70, 2, -6 },
+ { -10, 29, 83, 7, 36, -10, 0 },
+ { -5, -2, 64, 7, 73, 5, -7 },
+ { -10, 25, 82, 7, 41, -9, -1 },
+ { -4, -4, 59, 7, 76, 9, -8 },
+ { -9, 21, 80, 7, 45, -8, -1 },
+ { -3, -6, 55, 7, 77, 13, -8 },
+ { -9, 17, 79, 7, 50, -7, -2 },
+ { -2, -7, 50, 7, 79, 17, -9 },
+ { -8, 13, 77, 7, 55, -6, -3 },
+ { -1, -8, 45, 7, 80, 21, -9 },
+ { -8, 9, 76, 7, 59, -4, -4 },
+ { -1, -9, 41, 7, 82, 25, -10 },
+ { -7, 5, 73, 7, 64, -2, -5 },
+ { 0, -10, 36, 7, 83, 29, -10 },
+ { -6, 2, 70, 7, 67, 1, -6 },
+ { -10, 31, 83, 7, 34, -10, 0 },
+ { -5, -1, 66, 7, 70, 4, -6 },
+ { -10, 27, 83, 7, 38, -9, -1 },
+ { -4, -3, 61, 7, 74, 7, -7 },
+ { -10, 23, 82, 7, 43, -9, -1 },
+ { -3, -5, 57, 7, 76, 11, -8 },
+ { -9, 19, 80, 7, 48, -8, -2 },
+ { -2, -7, 52, 7, 79, 15, -9 } } },
+ .ptrn_arr = { { 0xe73cf3cf, 0x3cf39e79, 0xe79e79cf } },
+ .sample_patrn_length = 98,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 18) = 0.64 */
+ .hor_phase_arr = {
+ .even = { { -9, 33, 80, 7, 33, -9, 0 },
+ { -6, 0, 64, 7, 71, 6, -7 },
+ { -10, 25, 80, 7, 42, -8, -1 },
+ { -4, -4, 56, 7, 76, 13, -9 },
+ { -9, 17, 78, 7, 51, -6, -3 },
+ { -2, -7, 47, 7, 78, 21, -9 },
+ { -8, 9, 74, 7, 60, -2, -5 },
+ { -1, -9, 38, 7, 81, 29, -10 },
+ { -6, 3, 66, 7, 68, 3, -6 },
+ { -10, 29, 81, 7, 38, -9, -1 },
+ { -5, -2, 60, 7, 74, 9, -8 },
+ { -9, 21, 78, 7, 47, -7, -2 },
+ { -3, -6, 51, 7, 78, 17, -9 },
+ { -9, 13, 76, 7, 56, -4, -4 },
+ { -1, -8, 42, 7, 80, 25, -10 },
+ { -7, 6, 71, 7, 64, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -5, -3 },
+ { -2, -8, 45, 7, 80, 23, -10 },
+ { -8, 8, 72, 7, 62, -1, -5 },
+ { -1, -9, 36, 7, 80, 31, -9 },
+ { -6, 1, 66, 7, 70, 4, -7 },
+ { -10, 27, 81, 7, 40, -9, -1 },
+ { -4, -4, 58, 7, 75, 11, -8 },
+ { -9, 19, 78, 7, 49, -7, -2 },
+ { -2, -7, 49, 7, 78, 19, -9 },
+ { -8, 11, 75, 7, 58, -4, -4 },
+ { -1, -9, 40, 7, 81, 27, -10 },
+ { -7, 4, 70, 7, 66, 1, -6 },
+ { -9, 31, 80, 7, 36, -9, -1 },
+ { -5, -1, 62, 7, 72, 8, -8 },
+ { -10, 23, 80, 7, 45, -8, -2 },
+ { -3, -5, 54, 7, 76, 15, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 33, 80, 7, 33, -9, 0 },
+ { -6, 0, 64, 7, 71, 6, -7 },
+ { -10, 25, 80, 7, 42, -8, -1 },
+ { -4, -4, 56, 7, 76, 13, -9 },
+ { -9, 17, 78, 7, 51, -6, -3 },
+ { -2, -7, 47, 7, 78, 21, -9 },
+ { -8, 9, 74, 7, 60, -2, -5 },
+ { -1, -9, 38, 7, 81, 29, -10 },
+ { -6, 3, 66, 7, 68, 3, -6 },
+ { -10, 29, 81, 7, 38, -9, -1 },
+ { -5, -2, 60, 7, 74, 9, -8 },
+ { -9, 21, 78, 7, 47, -7, -2 },
+ { -3, -6, 51, 7, 78, 17, -9 },
+ { -9, 13, 76, 7, 56, -4, -4 },
+ { -1, -8, 42, 7, 80, 25, -10 },
+ { -7, 6, 71, 7, 64, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -5, -3 },
+ { -2, -8, 45, 7, 80, 23, -10 },
+ { -8, 8, 72, 7, 62, -1, -5 },
+ { -1, -9, 36, 7, 80, 31, -9 },
+ { -6, 1, 66, 7, 70, 4, -7 },
+ { -10, 27, 81, 7, 40, -9, -1 },
+ { -4, -4, 58, 7, 75, 11, -8 },
+ { -9, 19, 78, 7, 49, -7, -2 },
+ { -2, -7, 49, 7, 78, 19, -9 },
+ { -8, 11, 75, 7, 58, -4, -4 },
+ { -1, -9, 40, 7, 81, 27, -10 },
+ { -7, 4, 70, 7, 66, 1, -6 },
+ { -9, 31, 80, 7, 36, -9, -1 },
+ { -5, -1, 62, 7, 72, 8, -8 },
+ { -10, 23, 80, 7, 45, -8, -2 },
+ { -3, -5, 54, 7, 76, 15, -9 } } },
+ .ptrn_arr = { { 0xf39e73cf, 0xe79c } },
+ .sample_patrn_length = 50,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 19) = 0.627451 */
+ .hor_phase_arr = {
+ .even = { { -9, 34, 79, 7, 34, -9, -1 },
+ { -6, 0, 61, 7, 72, 9, -8 },
+ { -9, 22, 78, 7, 47, -7, -3 },
+ { -3, -6, 49, 7, 77, 20, -9 },
+ { -8, 11, 72, 7, 59, -1, -5 },
+ { -1, -9, 36, 7, 79, 32, -9 },
+ { -6, 1, 63, 7, 71, 7, -8 },
+ { -9, 24, 77, 7, 45, -7, -2 },
+ { -4, -5, 51, 7, 77, 18, -9 },
+ { -9, 13, 73, 7, 58, -2, -5 },
+ { -1, -8, 38, 7, 78, 30, -9 },
+ { -6, 3, 65, 7, 67, 6, -7 },
+ { -9, 26, 78, 7, 43, -8, -2 },
+ { -4, -4, 53, 7, 76, 16, -9 },
+ { -9, 14, 75, 7, 55, -3, -4 },
+ { -2, -8, 40, 7, 79, 28, -9 },
+ { -7, 4, 67, 7, 67, 4, -7 },
+ { -9, 28, 79, 7, 40, -8, -2 },
+ { -4, -3, 55, 7, 75, 14, -9 },
+ { -9, 16, 76, 7, 53, -4, -4 },
+ { -2, -8, 43, 7, 78, 26, -9 },
+ { -7, 6, 67, 7, 65, 3, -6 },
+ { -9, 30, 78, 7, 38, -8, -1 },
+ { -5, -2, 58, 7, 73, 13, -9 },
+ { -9, 18, 77, 7, 51, -5, -4 },
+ { -2, -7, 45, 7, 77, 24, -9 },
+ { -8, 7, 71, 7, 63, 1, -6 },
+ { -9, 32, 79, 7, 36, -9, -1 },
+ { -5, -1, 59, 7, 72, 11, -8 },
+ { -9, 20, 77, 7, 49, -6, -3 },
+ { -3, -7, 47, 7, 78, 22, -9 },
+ { -8, 9, 72, 7, 61, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -4, -4 },
+ { -2, -8, 41, 7, 79, 27, -9 },
+ { -7, 5, 68, 7, 66, 3, -7 },
+ { -9, 29, 78, 7, 39, -8, -1 },
+ { -5, -3, 56, 7, 76, 13, -9 },
+ { -9, 17, 77, 7, 52, -5, -4 },
+ { -2, -7, 44, 7, 77, 25, -9 },
+ { -7, 7, 68, 7, 64, 2, -6 },
+ { -9, 31, 79, 7, 37, -9, -1 },
+ { -5, -2, 59, 7, 72, 12, -8 },
+ { -9, 19, 77, 7, 50, -6, -3 },
+ { -3, -7, 46, 7, 78, 23, -9 },
+ { -8, 8, 71, 7, 62, 1, -6 },
+ { -9, 33, 79, 7, 35, -9, -1 },
+ { -5, -1, 60, 7, 72, 10, -8 },
+ { -9, 21, 77, 7, 48, -6, -3 },
+ { -3, -6, 48, 7, 77, 21, -9 },
+ { -8, 10, 72, 7, 60, -1, -5 },
+ { -1, -9, 35, 7, 79, 33, -9 },
+ { -6, 1, 62, 7, 71, 8, -8 },
+ { -9, 23, 78, 7, 46, -7, -3 },
+ { -3, -6, 50, 7, 77, 19, -9 },
+ { -8, 12, 72, 7, 59, -2, -5 },
+ { -1, -9, 37, 7, 79, 31, -9 },
+ { -6, 2, 64, 7, 68, 7, -7 },
+ { -9, 25, 77, 7, 44, -7, -2 },
+ { -4, -5, 52, 7, 77, 17, -9 },
+ { -9, 13, 76, 7, 56, -3, -5 },
+ { -1, -8, 39, 7, 78, 29, -9 },
+ { -7, 3, 66, 7, 68, 5, -7 },
+ { -9, 27, 79, 7, 41, -8, -2 },
+ { -4, -4, 54, 7, 76, 15, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -9, 34, 79, 7, 34, -9, -1 },
+ { -6, 0, 61, 7, 72, 9, -8 },
+ { -9, 22, 78, 7, 47, -7, -3 },
+ { -3, -6, 49, 7, 77, 20, -9 },
+ { -8, 11, 72, 7, 59, -1, -5 },
+ { -1, -9, 36, 7, 79, 32, -9 },
+ { -6, 1, 63, 7, 71, 7, -8 },
+ { -9, 24, 77, 7, 45, -7, -2 },
+ { -4, -5, 51, 7, 77, 18, -9 },
+ { -9, 13, 73, 7, 58, -2, -5 },
+ { -1, -8, 38, 7, 78, 30, -9 },
+ { -6, 3, 65, 7, 67, 6, -7 },
+ { -9, 26, 78, 7, 43, -8, -2 },
+ { -4, -4, 53, 7, 76, 16, -9 },
+ { -9, 14, 75, 7, 55, -3, -4 },
+ { -2, -8, 40, 7, 79, 28, -9 },
+ { -7, 4, 67, 7, 67, 4, -7 },
+ { -9, 28, 79, 7, 40, -8, -2 },
+ { -4, -3, 55, 7, 75, 14, -9 },
+ { -9, 16, 76, 7, 53, -4, -4 },
+ { -2, -8, 43, 7, 78, 26, -9 },
+ { -7, 6, 67, 7, 65, 3, -6 },
+ { -9, 30, 78, 7, 38, -8, -1 },
+ { -5, -2, 58, 7, 73, 13, -9 },
+ { -9, 18, 77, 7, 51, -5, -4 },
+ { -2, -7, 45, 7, 77, 24, -9 },
+ { -8, 7, 71, 7, 63, 1, -6 },
+ { -9, 32, 79, 7, 36, -9, -1 },
+ { -5, -1, 59, 7, 72, 11, -8 },
+ { -9, 20, 77, 7, 49, -6, -3 },
+ { -3, -7, 47, 7, 78, 22, -9 },
+ { -8, 9, 72, 7, 61, 0, -6 } },
+ .odd = { { -9, 15, 76, 7, 54, -4, -4 },
+ { -2, -8, 41, 7, 79, 27, -9 },
+ { -7, 5, 68, 7, 66, 3, -7 },
+ { -9, 29, 78, 7, 39, -8, -1 },
+ { -5, -3, 56, 7, 76, 13, -9 },
+ { -9, 17, 77, 7, 52, -5, -4 },
+ { -2, -7, 44, 7, 77, 25, -9 },
+ { -7, 7, 68, 7, 64, 2, -6 },
+ { -9, 31, 79, 7, 37, -9, -1 },
+ { -5, -2, 59, 7, 72, 12, -8 },
+ { -9, 19, 77, 7, 50, -6, -3 },
+ { -3, -7, 46, 7, 78, 23, -9 },
+ { -8, 8, 71, 7, 62, 1, -6 },
+ { -9, 33, 79, 7, 35, -9, -1 },
+ { -5, -1, 60, 7, 72, 10, -8 },
+ { -9, 21, 77, 7, 48, -6, -3 },
+ { -3, -6, 48, 7, 77, 21, -9 },
+ { -8, 10, 72, 7, 60, -1, -5 },
+ { -1, -9, 35, 7, 79, 33, -9 },
+ { -6, 1, 62, 7, 71, 8, -8 },
+ { -9, 23, 78, 7, 46, -7, -3 },
+ { -3, -6, 50, 7, 77, 19, -9 },
+ { -8, 12, 72, 7, 59, -2, -5 },
+ { -1, -9, 37, 7, 79, 31, -9 },
+ { -6, 2, 64, 7, 68, 7, -7 },
+ { -9, 25, 77, 7, 44, -7, -2 },
+ { -4, -5, 52, 7, 77, 17, -9 },
+ { -9, 13, 76, 7, 56, -3, -5 },
+ { -1, -8, 39, 7, 78, 29, -9 },
+ { -7, 3, 66, 7, 68, 5, -7 },
+ { -9, 27, 79, 7, 41, -8, -2 },
+ { -4, -4, 54, 7, 76, 15, -9 } } },
+ .ptrn_arr = { { 0x79ce79cf, 0x73ce79ce, 0x73ce73ce, 0xe } },
+ .sample_patrn_length = 102,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 20) = 0.615385 */
+ .hor_phase_arr = {
+ .even = { { -8, 34, 77, 7, 34, -8, -1 },
+ { -6, 0, 59, 7, 71, 12, -8 },
+ { -9, 19, 75, 7, 51, -4, -4 },
+ { -3, -7, 43, 7, 77, 27, -9 },
+ { -7, 6, 64, 7, 66, 6, -7 },
+ { -9, 27, 77, 7, 43, -7, -3 },
+ { -4, -4, 51, 7, 75, 19, -9 },
+ { -8, 12, 71, 7, 59, 0, -6 } },
+ .odd = { { -9, 16, 73, 7, 55, -2, -5 },
+ { -2, -8, 39, 7, 77, 31, -9 },
+ { -7, 3, 63, 7, 68, 9, -8 },
+ { -9, 23, 76, 7, 47, -6, -3 },
+ { -3, -6, 47, 7, 76, 23, -9 },
+ { -8, 9, 68, 7, 63, 3, -7 },
+ { -9, 31, 77, 7, 39, -8, -2 },
+ { -5, -2, 55, 7, 73, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -8, 34, 77, 7, 34, -8, -1 },
+ { -6, 0, 59, 7, 71, 12, -8 },
+ { -9, 19, 75, 7, 51, -4, -4 },
+ { -3, -7, 43, 7, 77, 27, -9 },
+ { -7, 6, 64, 7, 66, 6, -7 },
+ { -9, 27, 77, 7, 43, -7, -3 },
+ { -4, -4, 51, 7, 75, 19, -9 },
+ { -8, 12, 71, 7, 59, 0, -6 } },
+ .odd = { { -9, 16, 73, 7, 55, -2, -5 },
+ { -2, -8, 39, 7, 77, 31, -9 },
+ { -7, 3, 63, 7, 68, 9, -8 },
+ { -9, 23, 76, 7, 47, -6, -3 },
+ { -3, -6, 47, 7, 76, 23, -9 },
+ { -8, 9, 68, 7, 63, 3, -7 },
+ { -9, 31, 77, 7, 39, -8, -2 },
+ { -5, -2, 55, 7, 73, 16, -9 } } },
+ .ptrn_arr = { { 0xe739cf } },
+ .sample_patrn_length = 26,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 21) = 0.603774 */
+ .hor_phase_arr = {
+ .even = { { -8, 35, 76, 7, 35, -8, -2 },
+ { -6, 0, 57, 7, 71, 15, -9 },
+ { -9, 17, 71, 7, 55, -1, -5 },
+ { -2, -8, 37, 7, 76, 33, -8 },
+ { -6, 1, 58, 7, 71, 13, -9 },
+ { -9, 18, 73, 7, 53, -2, -5 },
+ { -2, -7, 39, 7, 75, 31, -8 },
+ { -7, 2, 60, 7, 69, 12, -8 },
+ { -9, 20, 74, 7, 51, -3, -5 },
+ { -3, -7, 41, 7, 77, 29, -9 },
+ { -7, 4, 62, 7, 67, 10, -8 },
+ { -9, 22, 74, 7, 49, -4, -4 },
+ { -3, -6, 43, 7, 75, 28, -9 },
+ { -7, 5, 63, 7, 67, 8, -8 },
+ { -9, 24, 75, 7, 47, -5, -4 },
+ { -4, -5, 45, 7, 75, 26, -9 },
+ { -8, 7, 65, 7, 65, 7, -8 },
+ { -9, 26, 75, 7, 45, -5, -4 },
+ { -4, -5, 47, 7, 75, 24, -9 },
+ { -8, 8, 67, 7, 63, 5, -7 },
+ { -9, 28, 75, 7, 43, -6, -3 },
+ { -4, -4, 49, 7, 74, 22, -9 },
+ { -8, 10, 67, 7, 62, 4, -7 },
+ { -9, 29, 77, 7, 41, -7, -3 },
+ { -5, -3, 51, 7, 74, 20, -9 },
+ { -8, 12, 69, 7, 60, 2, -7 },
+ { -8, 31, 75, 7, 39, -7, -2 },
+ { -5, -2, 53, 7, 73, 18, -9 },
+ { -9, 13, 71, 7, 58, 1, -6 },
+ { -8, 33, 76, 7, 37, -8, -2 },
+ { -5, -1, 55, 7, 71, 17, -9 },
+ { -9, 15, 71, 7, 57, 0, -6 } },
+ .odd = { { -9, 16, 72, 7, 56, -1, -6 },
+ { -2, -8, 36, 7, 76, 34, -8 },
+ { -6, 1, 58, 7, 70, 14, -9 },
+ { -9, 18, 72, 7, 54, -2, -5 },
+ { -2, -7, 38, 7, 75, 32, -8 },
+ { -6, 2, 59, 7, 70, 12, -9 },
+ { -9, 19, 74, 7, 52, -3, -5 },
+ { -3, -7, 40, 7, 77, 30, -9 },
+ { -7, 3, 61, 7, 68, 11, -8 },
+ { -9, 21, 75, 7, 50, -4, -5 },
+ { -3, -6, 42, 7, 75, 29, -9 },
+ { -7, 5, 63, 7, 66, 9, -8 },
+ { -9, 23, 74, 7, 48, -4, -4 },
+ { -3, -6, 44, 7, 75, 27, -9 },
+ { -7, 6, 64, 7, 65, 8, -8 },
+ { -9, 25, 75, 7, 46, -5, -4 },
+ { -4, -5, 46, 7, 75, 25, -9 },
+ { -8, 8, 65, 7, 64, 6, -7 },
+ { -9, 27, 75, 7, 44, -6, -3 },
+ { -4, -4, 48, 7, 74, 23, -9 },
+ { -8, 9, 66, 7, 63, 5, -7 },
+ { -9, 29, 75, 7, 42, -6, -3 },
+ { -5, -4, 50, 7, 75, 21, -9 },
+ { -8, 11, 68, 7, 61, 3, -7 },
+ { -9, 30, 77, 7, 40, -7, -3 },
+ { -5, -3, 52, 7, 74, 19, -9 },
+ { -9, 12, 70, 7, 59, 2, -6 },
+ { -8, 32, 75, 7, 38, -7, -2 },
+ { -5, -2, 54, 7, 72, 18, -9 },
+ { -9, 14, 70, 7, 58, 1, -6 },
+ { -8, 34, 76, 7, 36, -8, -2 },
+ { -6, -1, 56, 7, 72, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -8, 35, 76, 7, 35, -8, -2 },
+ { -6, 0, 57, 7, 71, 15, -9 },
+ { -9, 17, 71, 7, 55, -1, -5 },
+ { -2, -8, 37, 7, 76, 33, -8 },
+ { -6, 1, 58, 7, 71, 13, -9 },
+ { -9, 18, 73, 7, 53, -2, -5 },
+ { -2, -7, 39, 7, 75, 31, -8 },
+ { -7, 2, 60, 7, 69, 12, -8 },
+ { -9, 20, 74, 7, 51, -3, -5 },
+ { -3, -7, 41, 7, 77, 29, -9 },
+ { -7, 4, 62, 7, 67, 10, -8 },
+ { -9, 22, 74, 7, 49, -4, -4 },
+ { -3, -6, 43, 7, 75, 28, -9 },
+ { -7, 5, 63, 7, 67, 8, -8 },
+ { -9, 24, 75, 7, 47, -5, -4 },
+ { -4, -5, 45, 7, 75, 26, -9 },
+ { -8, 7, 65, 7, 65, 7, -8 },
+ { -9, 26, 75, 7, 45, -5, -4 },
+ { -4, -5, 47, 7, 75, 24, -9 },
+ { -8, 8, 67, 7, 63, 5, -7 },
+ { -9, 28, 75, 7, 43, -6, -3 },
+ { -4, -4, 49, 7, 74, 22, -9 },
+ { -8, 10, 67, 7, 62, 4, -7 },
+ { -9, 29, 77, 7, 41, -7, -3 },
+ { -5, -3, 51, 7, 74, 20, -9 },
+ { -8, 12, 69, 7, 60, 2, -7 },
+ { -8, 31, 75, 7, 39, -7, -2 },
+ { -5, -2, 53, 7, 73, 18, -9 },
+ { -9, 13, 71, 7, 58, 1, -6 },
+ { -8, 33, 76, 7, 37, -8, -2 },
+ { -5, -1, 55, 7, 71, 17, -9 },
+ { -9, 15, 71, 7, 57, 0, -6 } },
+ .odd = { { -9, 16, 72, 7, 56, -1, -6 },
+ { -2, -8, 36, 7, 76, 34, -8 },
+ { -6, 1, 58, 7, 70, 14, -9 },
+ { -9, 18, 72, 7, 54, -2, -5 },
+ { -2, -7, 38, 7, 75, 32, -8 },
+ { -6, 2, 59, 7, 70, 12, -9 },
+ { -9, 19, 74, 7, 52, -3, -5 },
+ { -3, -7, 40, 7, 77, 30, -9 },
+ { -7, 3, 61, 7, 68, 11, -8 },
+ { -9, 21, 75, 7, 50, -4, -5 },
+ { -3, -6, 42, 7, 75, 29, -9 },
+ { -7, 5, 63, 7, 66, 9, -8 },
+ { -9, 23, 74, 7, 48, -4, -4 },
+ { -3, -6, 44, 7, 75, 27, -9 },
+ { -7, 6, 64, 7, 65, 8, -8 },
+ { -9, 25, 75, 7, 46, -5, -4 },
+ { -4, -5, 46, 7, 75, 25, -9 },
+ { -8, 8, 65, 7, 64, 6, -7 },
+ { -9, 27, 75, 7, 44, -6, -3 },
+ { -4, -4, 48, 7, 74, 23, -9 },
+ { -8, 9, 66, 7, 63, 5, -7 },
+ { -9, 29, 75, 7, 42, -6, -3 },
+ { -5, -4, 50, 7, 75, 21, -9 },
+ { -8, 11, 68, 7, 61, 3, -7 },
+ { -9, 30, 77, 7, 40, -7, -3 },
+ { -5, -3, 52, 7, 74, 19, -9 },
+ { -9, 12, 70, 7, 59, 2, -6 },
+ { -8, 32, 75, 7, 38, -7, -2 },
+ { -5, -2, 54, 7, 72, 18, -9 },
+ { -9, 14, 70, 7, 58, 1, -6 },
+ { -8, 34, 76, 7, 36, -8, -2 },
+ { -6, -1, 56, 7, 72, 16, -9 } } },
+ .ptrn_arr = { { 0x9ce739cf, 0xe739ce73, 0x39ce739c, 0xe7 } },
+ .sample_patrn_length = 106,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 22) = 0.592593 */
+ .hor_phase_arr = {
+ .even = { { -7, 35, 74, 7, 35, -7, -2 },
+ { -6, 0, 54, 7, 71, 18, -9 },
+ { -9, 14, 70, 7, 58, 2, -7 },
+ { -8, 32, 74, 7, 39, -6, -3 },
+ { -5, -2, 51, 7, 72, 21, -9 },
+ { -8, 11, 66, 7, 61, 5, -7 },
+ { -9, 28, 75, 7, 43, -5, -4 },
+ { -4, -4, 47, 7, 73, 25, -9 },
+ { -8, 8, 64, 7, 64, 8, -8 },
+ { -9, 25, 73, 7, 47, -4, -4 },
+ { -4, -5, 43, 7, 75, 28, -9 },
+ { -7, 5, 61, 7, 66, 11, -8 },
+ { -9, 21, 72, 7, 51, -2, -5 },
+ { -3, -6, 39, 7, 74, 32, -8 },
+ { -7, 2, 58, 7, 70, 14, -9 },
+ { -9, 18, 71, 7, 54, 0, -6 } },
+ .odd = { { -9, 16, 70, 7, 56, 1, -6 },
+ { -8, 34, 75, 7, 37, -7, -3 },
+ { -6, -1, 53, 7, 72, 19, -9 },
+ { -9, 13, 68, 7, 59, 4, -7 },
+ { -8, 30, 74, 7, 41, -6, -3 },
+ { -5, -3, 49, 7, 73, 23, -9 },
+ { -8, 10, 66, 7, 62, 6, -8 },
+ { -9, 27, 74, 7, 45, -5, -4 },
+ { -4, -5, 45, 7, 74, 27, -9 },
+ { -8, 6, 62, 7, 66, 10, -8 },
+ { -9, 23, 73, 7, 49, -3, -5 },
+ { -3, -6, 41, 7, 74, 30, -8 },
+ { -7, 4, 59, 7, 68, 13, -9 },
+ { -9, 19, 72, 7, 53, -1, -6 },
+ { -3, -7, 37, 7, 75, 34, -8 },
+ { -6, 1, 56, 7, 70, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -7, 35, 74, 7, 35, -7, -2 },
+ { -6, 0, 54, 7, 71, 18, -9 },
+ { -9, 14, 70, 7, 58, 2, -7 },
+ { -8, 32, 74, 7, 39, -6, -3 },
+ { -5, -2, 51, 7, 72, 21, -9 },
+ { -8, 11, 66, 7, 61, 5, -7 },
+ { -9, 28, 75, 7, 43, -5, -4 },
+ { -4, -4, 47, 7, 73, 25, -9 },
+ { -8, 8, 64, 7, 64, 8, -8 },
+ { -9, 25, 73, 7, 47, -4, -4 },
+ { -4, -5, 43, 7, 75, 28, -9 },
+ { -7, 5, 61, 7, 66, 11, -8 },
+ { -9, 21, 72, 7, 51, -2, -5 },
+ { -3, -6, 39, 7, 74, 32, -8 },
+ { -7, 2, 58, 7, 70, 14, -9 },
+ { -9, 18, 71, 7, 54, 0, -6 } },
+ .odd = { { -9, 16, 70, 7, 56, 1, -6 },
+ { -8, 34, 75, 7, 37, -7, -3 },
+ { -6, -1, 53, 7, 72, 19, -9 },
+ { -9, 13, 68, 7, 59, 4, -7 },
+ { -8, 30, 74, 7, 41, -6, -3 },
+ { -5, -3, 49, 7, 73, 23, -9 },
+ { -8, 10, 66, 7, 62, 6, -8 },
+ { -9, 27, 74, 7, 45, -5, -4 },
+ { -4, -5, 45, 7, 74, 27, -9 },
+ { -8, 6, 62, 7, 66, 10, -8 },
+ { -9, 23, 73, 7, 49, -3, -5 },
+ { -3, -6, 41, 7, 74, 30, -8 },
+ { -7, 4, 59, 7, 68, 13, -9 },
+ { -9, 19, 72, 7, 53, -1, -6 },
+ { -3, -7, 37, 7, 75, 34, -8 },
+ { -6, 1, 56, 7, 70, 16, -9 } } },
+ .ptrn_arr = { { 0xce739ce7, 0xce739 } },
+ .sample_patrn_length = 54,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 23) = 0.581818 */
+ .hor_phase_arr = {
+ .even = { { -7, 36, 73, 7, 36, -7, -3 },
+ { -6, 0, 52, 7, 71, 20, -9 },
+ { -8, 12, 66, 7, 60, 6, -8 },
+ { -8, 27, 73, 7, 45, -4, -5 },
+ { -4, -4, 43, 7, 72, 29, -8 },
+ { -7, 5, 59, 7, 66, 14, -9 },
+ { -9, 19, 69, 7, 54, 1, -6 },
+ { -7, 34, 72, 7, 38, -6, -3 },
+ { -6, -1, 50, 7, 72, 22, -9 },
+ { -8, 11, 63, 7, 62, 8, -8 },
+ { -9, 26, 72, 7, 47, -3, -5 },
+ { -4, -5, 41, 7, 73, 31, -8 },
+ { -7, 4, 57, 7, 68, 15, -9 },
+ { -9, 17, 69, 7, 56, 2, -7 },
+ { -7, 32, 74, 7, 39, -6, -4 },
+ { -5, -2, 49, 7, 71, 24, -9 },
+ { -8, 9, 63, 7, 63, 9, -8 },
+ { -9, 24, 71, 7, 49, -2, -5 },
+ { -4, -6, 39, 7, 74, 32, -7 },
+ { -7, 2, 56, 7, 69, 17, -9 },
+ { -9, 15, 68, 7, 57, 4, -7 },
+ { -8, 31, 73, 7, 41, -5, -4 },
+ { -5, -3, 47, 7, 72, 26, -9 },
+ { -8, 8, 62, 7, 63, 11, -8 },
+ { -9, 22, 72, 7, 50, -1, -6 },
+ { -3, -6, 38, 7, 72, 34, -7 },
+ { -6, 1, 54, 7, 69, 19, -9 },
+ { -9, 14, 66, 7, 59, 5, -7 },
+ { -8, 29, 72, 7, 43, -4, -4 },
+ { -5, -4, 45, 7, 73, 27, -8 },
+ { -8, 6, 60, 7, 66, 12, -8 },
+ { -9, 20, 71, 7, 52, 0, -6 } },
+ .odd = { { -9, 16, 69, 7, 56, 3, -7 },
+ { -8, 31, 74, 7, 40, -5, -4 },
+ { -5, -2, 48, 7, 71, 25, -9 },
+ { -8, 8, 62, 7, 64, 10, -8 },
+ { -9, 23, 72, 7, 50, -2, -6 },
+ { -3, -6, 39, 7, 72, 33, -7 },
+ { -7, 2, 55, 7, 69, 18, -9 },
+ { -9, 15, 67, 7, 58, 4, -7 },
+ { -8, 30, 73, 7, 42, -5, -4 },
+ { -5, -3, 46, 7, 72, 26, -8 },
+ { -8, 7, 61, 7, 65, 11, -8 },
+ { -9, 21, 72, 7, 51, -1, -6 },
+ { -3, -6, 37, 7, 72, 35, -7 },
+ { -6, 1, 53, 7, 69, 20, -9 },
+ { -9, 13, 66, 7, 59, 6, -7 },
+ { -8, 28, 72, 7, 44, -4, -4 },
+ { -4, -4, 44, 7, 72, 28, -8 },
+ { -7, 6, 59, 7, 66, 13, -9 },
+ { -9, 20, 69, 7, 53, 1, -6 },
+ { -7, 35, 72, 7, 37, -6, -3 },
+ { -6, -1, 51, 7, 72, 21, -9 },
+ { -8, 11, 65, 7, 61, 7, -8 },
+ { -8, 26, 72, 7, 46, -3, -5 },
+ { -4, -5, 42, 7, 73, 30, -8 },
+ { -7, 4, 58, 7, 67, 15, -9 },
+ { -9, 18, 69, 7, 55, 2, -7 },
+ { -7, 33, 72, 7, 39, -6, -3 },
+ { -6, -2, 50, 7, 72, 23, -9 },
+ { -8, 10, 64, 7, 62, 8, -8 },
+ { -9, 25, 71, 7, 48, -2, -5 },
+ { -4, -5, 40, 7, 74, 31, -8 },
+ { -7, 3, 56, 7, 69, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -7, 36, 73, 7, 36, -7, -3 },
+ { -6, 0, 52, 7, 71, 20, -9 },
+ { -8, 12, 66, 7, 60, 6, -8 },
+ { -8, 27, 73, 7, 45, -4, -5 },
+ { -4, -4, 43, 7, 72, 29, -8 },
+ { -7, 5, 59, 7, 66, 14, -9 },
+ { -9, 19, 69, 7, 54, 1, -6 },
+ { -7, 34, 72, 7, 38, -6, -3 },
+ { -6, -1, 50, 7, 72, 22, -9 },
+ { -8, 11, 63, 7, 62, 8, -8 },
+ { -9, 26, 72, 7, 47, -3, -5 },
+ { -4, -5, 41, 7, 73, 31, -8 },
+ { -7, 4, 57, 7, 68, 15, -9 },
+ { -9, 17, 69, 7, 56, 2, -7 },
+ { -7, 32, 74, 7, 39, -6, -4 },
+ { -5, -2, 49, 7, 71, 24, -9 },
+ { -8, 9, 63, 7, 63, 9, -8 },
+ { -9, 24, 71, 7, 49, -2, -5 },
+ { -4, -6, 39, 7, 74, 32, -7 },
+ { -7, 2, 56, 7, 69, 17, -9 },
+ { -9, 15, 68, 7, 57, 4, -7 },
+ { -8, 31, 73, 7, 41, -5, -4 },
+ { -5, -3, 47, 7, 72, 26, -9 },
+ { -8, 8, 62, 7, 63, 11, -8 },
+ { -9, 22, 72, 7, 50, -1, -6 },
+ { -3, -6, 38, 7, 72, 34, -7 },
+ { -6, 1, 54, 7, 69, 19, -9 },
+ { -9, 14, 66, 7, 59, 5, -7 },
+ { -8, 29, 72, 7, 43, -4, -4 },
+ { -5, -4, 45, 7, 73, 27, -8 },
+ { -8, 6, 60, 7, 66, 12, -8 },
+ { -9, 20, 71, 7, 52, 0, -6 } },
+ .odd = { { -9, 16, 69, 7, 56, 3, -7 },
+ { -8, 31, 74, 7, 40, -5, -4 },
+ { -5, -2, 48, 7, 71, 25, -9 },
+ { -8, 8, 62, 7, 64, 10, -8 },
+ { -9, 23, 72, 7, 50, -2, -6 },
+ { -3, -6, 39, 7, 72, 33, -7 },
+ { -7, 2, 55, 7, 69, 18, -9 },
+ { -9, 15, 67, 7, 58, 4, -7 },
+ { -8, 30, 73, 7, 42, -5, -4 },
+ { -5, -3, 46, 7, 72, 26, -8 },
+ { -8, 7, 61, 7, 65, 11, -8 },
+ { -9, 21, 72, 7, 51, -1, -6 },
+ { -3, -6, 37, 7, 72, 35, -7 },
+ { -6, 1, 53, 7, 69, 20, -9 },
+ { -9, 13, 66, 7, 59, 6, -7 },
+ { -8, 28, 72, 7, 44, -4, -4 },
+ { -4, -4, 44, 7, 72, 28, -8 },
+ { -7, 6, 59, 7, 66, 13, -9 },
+ { -9, 20, 69, 7, 53, 1, -6 },
+ { -7, 35, 72, 7, 37, -6, -3 },
+ { -6, -1, 51, 7, 72, 21, -9 },
+ { -8, 11, 65, 7, 61, 7, -8 },
+ { -8, 26, 72, 7, 46, -3, -5 },
+ { -4, -5, 42, 7, 73, 30, -8 },
+ { -7, 4, 58, 7, 67, 15, -9 },
+ { -9, 18, 69, 7, 55, 2, -7 },
+ { -7, 33, 72, 7, 39, -6, -3 },
+ { -6, -2, 50, 7, 72, 23, -9 },
+ { -8, 10, 64, 7, 62, 8, -8 },
+ { -9, 25, 71, 7, 48, -2, -5 },
+ { -4, -5, 40, 7, 74, 31, -8 },
+ { -7, 3, 56, 7, 69, 16, -9 } } },
+ .ptrn_arr = { { 0xe7339ce7, 0x9ce7339c, 0x399ce739, 0xce7 } },
+ .sample_patrn_length = 110,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 24) = 0.571429 */
+ .hor_phase_arr = {
+ .even = { { -6, 36, 71, 7, 36, -6, -3 },
+ { -6, 0, 50, 7, 69, 23, -8 },
+ { -8, 10, 62, 7, 62, 10, -8 },
+ { -8, 23, 69, 7, 50, 0, -6 } },
+ .odd = { { -9, 16, 67, 7, 56, 5, -7 },
+ { -8, 29, 73, 7, 43, -4, -5 },
+ { -5, -4, 43, 7, 73, 29, -8 },
+ { -7, 5, 56, 7, 67, 16, -9 } } },
+ .ver_phase_arr = {
+ .even = { { -6, 36, 71, 7, 36, -6, -3 },
+ { -6, 0, 50, 7, 69, 23, -8 },
+ { -8, 10, 62, 7, 62, 10, -8 },
+ { -8, 23, 69, 7, 50, 0, -6 } },
+ .odd = { { -9, 16, 67, 7, 56, 5, -7 },
+ { -8, 29, 73, 7, 43, -4, -5 },
+ { -5, -4, 43, 7, 73, 29, -8 },
+ { -7, 5, 56, 7, 67, 16, -9 } } },
+ .ptrn_arr = { { 0xce7 } },
+ .sample_patrn_length = 14,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 25) = 0.561404 */
+ .hor_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 48, 7, 69, 25, -8 },
+ { -8, 8, 59, 7, 63, 14, -8 },
+ { -8, 19, 66, 7, 54, 4, -7 },
+ { -7, 30, 70, 7, 43, -3, -5 },
+ { -5, -3, 41, 7, 70, 32, -7 },
+ { -7, 3, 53, 7, 67, 20, -8 },
+ { -8, 13, 61, 7, 60, 10, -8 },
+ { -8, 24, 67, 7, 50, 1, -6 },
+ { -6, 35, 70, 7, 38, -5, -4 },
+ { -6, -1, 46, 7, 70, 27, -8 },
+ { -8, 7, 57, 7, 64, 16, -8 },
+ { -8, 17, 64, 7, 56, 6, -7 },
+ { -7, 28, 69, 7, 45, -2, -5 },
+ { -4, -4, 40, 7, 69, 33, -6 },
+ { -7, 2, 51, 7, 68, 22, -8 },
+ { -8, 11, 61, 7, 61, 11, -8 },
+ { -8, 22, 68, 7, 51, 2, -7 },
+ { -6, 33, 69, 7, 40, -4, -4 },
+ { -5, -2, 45, 7, 69, 28, -7 },
+ { -7, 6, 56, 7, 64, 17, -8 },
+ { -8, 16, 64, 7, 57, 7, -8 },
+ { -8, 27, 70, 7, 46, -1, -6 },
+ { -4, -5, 38, 7, 70, 35, -6 },
+ { -6, 1, 50, 7, 67, 24, -8 },
+ { -8, 10, 60, 7, 61, 13, -8 },
+ { -8, 20, 67, 7, 53, 3, -7 },
+ { -7, 32, 70, 7, 41, -3, -5 },
+ { -5, -3, 43, 7, 70, 30, -7 },
+ { -7, 4, 54, 7, 66, 19, -8 },
+ { -8, 14, 63, 7, 59, 8, -8 },
+ { -8, 25, 69, 7, 48, 0, -6 } },
+ .odd = { { -8, 16, 66, 7, 56, 6, -8 },
+ { -8, 28, 69, 7, 46, -1, -6 },
+ { -4, -4, 39, 7, 69, 34, -6 },
+ { -7, 2, 51, 7, 67, 23, -8 },
+ { -8, 10, 60, 7, 62, 12, -8 },
+ { -8, 21, 67, 7, 52, 3, -7 },
+ { -7, 32, 71, 7, 41, -4, -5 },
+ { -5, -2, 44, 7, 69, 29, -7 },
+ { -7, 5, 55, 7, 65, 18, -8 },
+ { -8, 15, 63, 7, 58, 8, -8 },
+ { -8, 26, 69, 7, 47, 0, -6 },
+ { -4, -5, 37, 7, 71, 35, -6 },
+ { -6, 1, 49, 7, 68, 24, -8 },
+ { -8, 9, 59, 7, 63, 13, -8 },
+ { -8, 20, 65, 7, 54, 4, -7 },
+ { -7, 31, 70, 7, 42, -3, -5 },
+ { -5, -3, 42, 7, 70, 31, -7 },
+ { -7, 4, 54, 7, 65, 20, -8 },
+ { -8, 13, 63, 7, 59, 9, -8 },
+ { -8, 24, 68, 7, 49, 1, -6 },
+ { -6, 35, 71, 7, 37, -5, -4 },
+ { -6, 0, 47, 7, 69, 26, -8 },
+ { -8, 8, 58, 7, 63, 15, -8 },
+ { -8, 18, 65, 7, 55, 5, -7 },
+ { -7, 29, 69, 7, 44, -2, -5 },
+ { -5, -4, 41, 7, 71, 32, -7 },
+ { -7, 3, 52, 7, 67, 21, -8 },
+ { -8, 12, 62, 7, 60, 10, -8 },
+ { -8, 23, 67, 7, 51, 2, -7 },
+ { -6, 34, 69, 7, 39, -4, -4 },
+ { -6, -1, 46, 7, 69, 28, -8 },
+ { -8, 6, 56, 7, 66, 16, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 48, 7, 69, 25, -8 },
+ { -8, 8, 59, 7, 63, 14, -8 },
+ { -8, 19, 66, 7, 54, 4, -7 },
+ { -7, 30, 70, 7, 43, -3, -5 },
+ { -5, -3, 41, 7, 70, 32, -7 },
+ { -7, 3, 53, 7, 67, 20, -8 },
+ { -8, 13, 61, 7, 60, 10, -8 },
+ { -8, 24, 67, 7, 50, 1, -6 },
+ { -6, 35, 70, 7, 38, -5, -4 },
+ { -6, -1, 46, 7, 70, 27, -8 },
+ { -8, 7, 57, 7, 64, 16, -8 },
+ { -8, 17, 64, 7, 56, 6, -7 },
+ { -7, 28, 69, 7, 45, -2, -5 },
+ { -4, -4, 40, 7, 69, 33, -6 },
+ { -7, 2, 51, 7, 68, 22, -8 },
+ { -8, 11, 61, 7, 61, 11, -8 },
+ { -8, 22, 68, 7, 51, 2, -7 },
+ { -6, 33, 69, 7, 40, -4, -4 },
+ { -5, -2, 45, 7, 69, 28, -7 },
+ { -7, 6, 56, 7, 64, 17, -8 },
+ { -8, 16, 64, 7, 57, 7, -8 },
+ { -8, 27, 70, 7, 46, -1, -6 },
+ { -4, -5, 38, 7, 70, 35, -6 },
+ { -6, 1, 50, 7, 67, 24, -8 },
+ { -8, 10, 60, 7, 61, 13, -8 },
+ { -8, 20, 67, 7, 53, 3, -7 },
+ { -7, 32, 70, 7, 41, -3, -5 },
+ { -5, -3, 43, 7, 70, 30, -7 },
+ { -7, 4, 54, 7, 66, 19, -8 },
+ { -8, 14, 63, 7, 59, 8, -8 },
+ { -8, 25, 69, 7, 48, 0, -6 } },
+ .odd = { { -8, 16, 66, 7, 56, 6, -8 },
+ { -8, 28, 69, 7, 46, -1, -6 },
+ { -4, -4, 39, 7, 69, 34, -6 },
+ { -7, 2, 51, 7, 67, 23, -8 },
+ { -8, 10, 60, 7, 62, 12, -8 },
+ { -8, 21, 67, 7, 52, 3, -7 },
+ { -7, 32, 71, 7, 41, -4, -5 },
+ { -5, -2, 44, 7, 69, 29, -7 },
+ { -7, 5, 55, 7, 65, 18, -8 },
+ { -8, 15, 63, 7, 58, 8, -8 },
+ { -8, 26, 69, 7, 47, 0, -6 },
+ { -4, -5, 37, 7, 71, 35, -6 },
+ { -6, 1, 49, 7, 68, 24, -8 },
+ { -8, 9, 59, 7, 63, 13, -8 },
+ { -8, 20, 65, 7, 54, 4, -7 },
+ { -7, 31, 70, 7, 42, -3, -5 },
+ { -5, -3, 42, 7, 70, 31, -7 },
+ { -7, 4, 54, 7, 65, 20, -8 },
+ { -8, 13, 63, 7, 59, 9, -8 },
+ { -8, 24, 68, 7, 49, 1, -6 },
+ { -6, 35, 71, 7, 37, -5, -4 },
+ { -6, 0, 47, 7, 69, 26, -8 },
+ { -8, 8, 58, 7, 63, 15, -8 },
+ { -8, 18, 65, 7, 55, 5, -7 },
+ { -7, 29, 69, 7, 44, -2, -5 },
+ { -5, -4, 41, 7, 71, 32, -7 },
+ { -7, 3, 52, 7, 67, 21, -8 },
+ { -8, 12, 62, 7, 60, 10, -8 },
+ { -8, 23, 67, 7, 51, 2, -7 },
+ { -6, 34, 69, 7, 39, -4, -4 },
+ { -6, -1, 46, 7, 69, 28, -8 },
+ { -8, 6, 56, 7, 66, 16, -8 } } },
+ .ptrn_arr = { { 0x3399cce7, 0x3399cce7, 0x3399ce67, 0xce67 } },
+ .sample_patrn_length = 114,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 26) = 0.551724 */
+ .hor_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 46, 7, 68, 27, -7 },
+ { -8, 7, 55, 7, 64, 18, -8 },
+ { -8, 15, 62, 7, 58, 9, -8 },
+ { -8, 24, 68, 7, 49, 2, -7 },
+ { -6, 33, 69, 7, 40, -3, -5 },
+ { -6, -2, 43, 7, 70, 30, -7 },
+ { -7, 4, 52, 7, 66, 21, -8 },
+ { -8, 12, 60, 7, 60, 12, -8 },
+ { -8, 21, 66, 7, 52, 4, -7 },
+ { -7, 30, 70, 7, 43, -2, -6 },
+ { -5, -3, 40, 7, 69, 33, -6 },
+ { -7, 2, 49, 7, 68, 24, -8 },
+ { -8, 9, 58, 7, 62, 15, -8 },
+ { -8, 18, 64, 7, 55, 7, -8 },
+ { -7, 27, 68, 7, 46, 0, -6 } },
+ .odd = { { -8, 17, 63, 7, 56, 8, -8 },
+ { -8, 26, 67, 7, 48, 1, -6 },
+ { -5, 35, 69, 7, 38, -4, -5 },
+ { -6, -1, 45, 7, 68, 29, -7 },
+ { -7, 5, 54, 7, 64, 20, -8 },
+ { -8, 14, 60, 7, 59, 11, -8 },
+ { -8, 23, 66, 7, 51, 3, -7 },
+ { -6, 32, 69, 7, 41, -3, -5 },
+ { -5, -3, 41, 7, 69, 32, -6 },
+ { -7, 3, 51, 7, 66, 23, -8 },
+ { -8, 11, 59, 7, 60, 14, -8 },
+ { -8, 20, 64, 7, 54, 5, -7 },
+ { -7, 29, 68, 7, 45, -1, -6 },
+ { -5, -4, 38, 7, 69, 35, -5 },
+ { -6, 1, 48, 7, 67, 26, -8 },
+ { -8, 8, 56, 7, 63, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -5, 36, 70, 7, 36, -5, -4 },
+ { -6, 0, 46, 7, 68, 27, -7 },
+ { -8, 7, 55, 7, 64, 18, -8 },
+ { -8, 15, 62, 7, 58, 9, -8 },
+ { -8, 24, 68, 7, 49, 2, -7 },
+ { -6, 33, 69, 7, 40, -3, -5 },
+ { -6, -2, 43, 7, 70, 30, -7 },
+ { -7, 4, 52, 7, 66, 21, -8 },
+ { -8, 12, 60, 7, 60, 12, -8 },
+ { -8, 21, 66, 7, 52, 4, -7 },
+ { -7, 30, 70, 7, 43, -2, -6 },
+ { -5, -3, 40, 7, 69, 33, -6 },
+ { -7, 2, 49, 7, 68, 24, -8 },
+ { -8, 9, 58, 7, 62, 15, -8 },
+ { -8, 18, 64, 7, 55, 7, -8 },
+ { -7, 27, 68, 7, 46, 0, -6 } },
+ .odd = { { -8, 17, 63, 7, 56, 8, -8 },
+ { -8, 26, 67, 7, 48, 1, -6 },
+ { -5, 35, 69, 7, 38, -4, -5 },
+ { -6, -1, 45, 7, 68, 29, -7 },
+ { -7, 5, 54, 7, 64, 20, -8 },
+ { -8, 14, 60, 7, 59, 11, -8 },
+ { -8, 23, 66, 7, 51, 3, -7 },
+ { -6, 32, 69, 7, 41, -3, -5 },
+ { -5, -3, 41, 7, 69, 32, -6 },
+ { -7, 3, 51, 7, 66, 23, -8 },
+ { -8, 11, 59, 7, 60, 14, -8 },
+ { -8, 20, 64, 7, 54, 5, -7 },
+ { -7, 29, 68, 7, 45, -1, -6 },
+ { -5, -4, 38, 7, 69, 35, -5 },
+ { -6, 1, 48, 7, 67, 26, -8 },
+ { -8, 8, 56, 7, 63, 17, -8 } } },
+ .ptrn_arr = { { 0x399cce67, 0xcce673 } },
+ .sample_patrn_length = 58,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 27) = 0.542373 */
+ .hor_phase_arr = {
+ .even = { { -4, 37, 67, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 67, 29, -6 },
+ { -7, 5, 52, 7, 64, 22, -8 },
+ { -8, 12, 58, 7, 60, 14, -8 },
+ { -8, 19, 63, 7, 54, 8, -8 },
+ { -7, 26, 67, 7, 47, 2, -7 },
+ { -5, 34, 66, 7, 40, -2, -5 },
+ { -6, -2, 41, 7, 68, 32, -5 },
+ { -7, 3, 49, 7, 65, 25, -7 },
+ { -8, 9, 56, 7, 62, 17, -8 },
+ { -8, 16, 61, 7, 57, 10, -8 },
+ { -8, 23, 66, 7, 50, 4, -7 },
+ { -6, 31, 67, 7, 43, -1, -6 },
+ { -5, -3, 38, 7, 67, 35, -4 },
+ { -6, 1, 46, 7, 66, 28, -7 },
+ { -8, 6, 53, 7, 65, 20, -8 },
+ { -8, 13, 59, 7, 59, 13, -8 },
+ { -8, 20, 65, 7, 53, 6, -8 },
+ { -7, 28, 66, 7, 46, 1, -6 },
+ { -4, 35, 67, 7, 38, -3, -5 },
+ { -6, -1, 43, 7, 67, 31, -6 },
+ { -7, 4, 50, 7, 66, 23, -8 },
+ { -8, 10, 57, 7, 61, 16, -8 },
+ { -8, 17, 62, 7, 56, 9, -8 },
+ { -7, 25, 65, 7, 49, 3, -7 },
+ { -5, 32, 68, 7, 41, -2, -6 },
+ { -5, -2, 40, 7, 66, 34, -5 },
+ { -7, 2, 47, 7, 67, 26, -7 },
+ { -8, 8, 54, 7, 63, 19, -8 },
+ { -8, 14, 60, 7, 58, 12, -8 },
+ { -8, 22, 64, 7, 52, 5, -7 },
+ { -6, 29, 67, 7, 44, 0, -6 } },
+ .odd = { { -8, 17, 61, 7, 56, 10, -8 },
+ { -7, 24, 64, 7, 50, 4, -7 },
+ { -6, 31, 68, 7, 42, -1, -6 },
+ { -5, -3, 39, 7, 68, 34, -5 },
+ { -7, 1, 47, 7, 67, 27, -7 },
+ { -8, 7, 54, 7, 64, 19, -8 },
+ { -8, 14, 59, 7, 59, 12, -8 },
+ { -8, 21, 64, 7, 52, 6, -7 },
+ { -7, 28, 68, 7, 45, 0, -6 },
+ { -4, 36, 68, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 66, 30, -6 },
+ { -7, 5, 51, 7, 65, 22, -8 },
+ { -8, 11, 57, 7, 61, 15, -8 },
+ { -8, 18, 63, 7, 55, 8, -8 },
+ { -7, 25, 67, 7, 48, 2, -7 },
+ { -5, 33, 66, 7, 41, -2, -5 },
+ { -5, -2, 41, 7, 66, 33, -5 },
+ { -7, 2, 48, 7, 67, 25, -7 },
+ { -8, 8, 55, 7, 63, 18, -8 },
+ { -8, 15, 61, 7, 57, 11, -8 },
+ { -8, 22, 65, 7, 51, 5, -7 },
+ { -6, 30, 66, 7, 44, 0, -6 },
+ { -5, -4, 37, 7, 68, 36, -4 },
+ { -6, 0, 45, 7, 68, 28, -7 },
+ { -7, 6, 52, 7, 64, 21, -8 },
+ { -8, 12, 59, 7, 59, 14, -8 },
+ { -8, 19, 64, 7, 54, 7, -8 },
+ { -7, 27, 67, 7, 47, 1, -7 },
+ { -5, 34, 68, 7, 39, -3, -5 },
+ { -6, -1, 42, 7, 68, 31, -6 },
+ { -7, 4, 50, 7, 64, 24, -7 },
+ { -8, 10, 56, 7, 61, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -4, 37, 67, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 67, 29, -6 },
+ { -7, 5, 52, 7, 64, 22, -8 },
+ { -8, 12, 58, 7, 60, 14, -8 },
+ { -8, 19, 63, 7, 54, 8, -8 },
+ { -7, 26, 67, 7, 47, 2, -7 },
+ { -5, 34, 66, 7, 40, -2, -5 },
+ { -6, -2, 41, 7, 68, 32, -5 },
+ { -7, 3, 49, 7, 65, 25, -7 },
+ { -8, 9, 56, 7, 62, 17, -8 },
+ { -8, 16, 61, 7, 57, 10, -8 },
+ { -8, 23, 66, 7, 50, 4, -7 },
+ { -6, 31, 67, 7, 43, -1, -6 },
+ { -5, -3, 38, 7, 67, 35, -4 },
+ { -6, 1, 46, 7, 66, 28, -7 },
+ { -8, 6, 53, 7, 65, 20, -8 },
+ { -8, 13, 59, 7, 59, 13, -8 },
+ { -8, 20, 65, 7, 53, 6, -8 },
+ { -7, 28, 66, 7, 46, 1, -6 },
+ { -4, 35, 67, 7, 38, -3, -5 },
+ { -6, -1, 43, 7, 67, 31, -6 },
+ { -7, 4, 50, 7, 66, 23, -8 },
+ { -8, 10, 57, 7, 61, 16, -8 },
+ { -8, 17, 62, 7, 56, 9, -8 },
+ { -7, 25, 65, 7, 49, 3, -7 },
+ { -5, 32, 68, 7, 41, -2, -6 },
+ { -5, -2, 40, 7, 66, 34, -5 },
+ { -7, 2, 47, 7, 67, 26, -7 },
+ { -8, 8, 54, 7, 63, 19, -8 },
+ { -8, 14, 60, 7, 58, 12, -8 },
+ { -8, 22, 64, 7, 52, 5, -7 },
+ { -6, 29, 67, 7, 44, 0, -6 } },
+ .odd = { { -8, 17, 61, 7, 56, 10, -8 },
+ { -7, 24, 64, 7, 50, 4, -7 },
+ { -6, 31, 68, 7, 42, -1, -6 },
+ { -5, -3, 39, 7, 68, 34, -5 },
+ { -7, 1, 47, 7, 67, 27, -7 },
+ { -8, 7, 54, 7, 64, 19, -8 },
+ { -8, 14, 59, 7, 59, 12, -8 },
+ { -8, 21, 64, 7, 52, 6, -7 },
+ { -7, 28, 68, 7, 45, 0, -6 },
+ { -4, 36, 68, 7, 37, -4, -5 },
+ { -6, 0, 44, 7, 66, 30, -6 },
+ { -7, 5, 51, 7, 65, 22, -8 },
+ { -8, 11, 57, 7, 61, 15, -8 },
+ { -8, 18, 63, 7, 55, 8, -8 },
+ { -7, 25, 67, 7, 48, 2, -7 },
+ { -5, 33, 66, 7, 41, -2, -5 },
+ { -5, -2, 41, 7, 66, 33, -5 },
+ { -7, 2, 48, 7, 67, 25, -7 },
+ { -8, 8, 55, 7, 63, 18, -8 },
+ { -8, 15, 61, 7, 57, 11, -8 },
+ { -8, 22, 65, 7, 51, 5, -7 },
+ { -6, 30, 66, 7, 44, 0, -6 },
+ { -5, -4, 37, 7, 68, 36, -4 },
+ { -6, 0, 45, 7, 68, 28, -7 },
+ { -7, 6, 52, 7, 64, 21, -8 },
+ { -8, 12, 59, 7, 59, 14, -8 },
+ { -8, 19, 64, 7, 54, 7, -8 },
+ { -7, 27, 67, 7, 47, 1, -7 },
+ { -5, 34, 68, 7, 39, -3, -5 },
+ { -6, -1, 42, 7, 68, 31, -6 },
+ { -7, 4, 50, 7, 64, 24, -7 },
+ { -8, 10, 56, 7, 61, 17, -8 } } },
+ .ptrn_arr = { { 0x99ccce67, 0xce667339, 0x733399cc, 0xcce66 } },
+ .sample_patrn_length = 118,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 28) = 0.533333 */
+ .hor_phase_arr = {
+ .even = { { -3, 37, 65, 7, 37, -3, -5 },
+ { -6, 0, 43, 7, 65, 31, -5 },
+ { -7, 4, 48, 7, 65, 25, -7 },
+ { -8, 9, 54, 7, 62, 19, -8 },
+ { -8, 14, 58, 7, 58, 14, -8 },
+ { -8, 19, 62, 7, 54, 9, -8 },
+ { -7, 25, 65, 7, 48, 4, -7 },
+ { -5, 31, 65, 7, 43, 0, -6 } },
+ .odd = { { -8, 17, 60, 7, 56, 11, -8 },
+ { -7, 22, 63, 7, 51, 6, -7 },
+ { -6, 28, 65, 7, 46, 2, -7 },
+ { -4, 34, 66, 7, 40, -2, -6 },
+ { -6, -2, 40, 7, 66, 34, -4 },
+ { -7, 2, 46, 7, 65, 28, -6 },
+ { -7, 6, 51, 7, 63, 22, -7 },
+ { -8, 11, 56, 7, 60, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -3, 37, 65, 7, 37, -3, -5 },
+ { -6, 0, 43, 7, 65, 31, -5 },
+ { -7, 4, 48, 7, 65, 25, -7 },
+ { -8, 9, 54, 7, 62, 19, -8 },
+ { -8, 14, 58, 7, 58, 14, -8 },
+ { -8, 19, 62, 7, 54, 9, -8 },
+ { -7, 25, 65, 7, 48, 4, -7 },
+ { -5, 31, 65, 7, 43, 0, -6 } },
+ .odd = { { -8, 17, 60, 7, 56, 11, -8 },
+ { -7, 22, 63, 7, 51, 6, -7 },
+ { -6, 28, 65, 7, 46, 2, -7 },
+ { -4, 34, 66, 7, 40, -2, -6 },
+ { -6, -2, 40, 7, 66, 34, -4 },
+ { -7, 2, 46, 7, 65, 28, -6 },
+ { -7, 6, 51, 7, 63, 22, -7 },
+ { -8, 11, 56, 7, 60, 17, -8 } } },
+ .ptrn_arr = { { 0xccce667 } },
+ .sample_patrn_length = 30,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 29) = 0.52459 */
+ .hor_phase_arr = {
+ .even = { { -2, 37, 63, 7, 37, -2, -5 },
+ { -6, 0, 41, 7, 64, 33, -4 },
+ { -7, 3, 45, 7, 65, 28, -6 },
+ { -7, 6, 49, 7, 63, 24, -7 },
+ { -8, 9, 53, 7, 61, 20, -7 },
+ { -8, 13, 56, 7, 59, 16, -8 },
+ { -8, 17, 60, 7, 55, 12, -8 },
+ { -7, 21, 62, 7, 52, 8, -8 },
+ { -6, 26, 62, 7, 48, 5, -7 },
+ { -5, 30, 64, 7, 44, 2, -7 },
+ { -4, 34, 65, 7, 40, -1, -6 },
+ { -6, -2, 38, 7, 66, 35, -3 },
+ { -6, 1, 42, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 63, 27, -6 },
+ { -7, 7, 50, 7, 62, 23, -7 },
+ { -8, 11, 54, 7, 59, 19, -7 },
+ { -8, 15, 57, 7, 57, 15, -8 },
+ { -7, 19, 59, 7, 54, 11, -8 },
+ { -7, 23, 62, 7, 50, 7, -7 },
+ { -6, 27, 63, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 42, 1, -6 },
+ { -3, 35, 66, 7, 38, -2, -6 },
+ { -6, -1, 40, 7, 65, 34, -4 },
+ { -7, 2, 44, 7, 64, 30, -5 },
+ { -7, 5, 48, 7, 62, 26, -6 },
+ { -8, 8, 52, 7, 62, 21, -7 },
+ { -8, 12, 55, 7, 60, 17, -8 },
+ { -8, 16, 59, 7, 56, 13, -8 },
+ { -7, 20, 61, 7, 53, 9, -8 },
+ { -7, 24, 63, 7, 49, 6, -7 },
+ { -6, 28, 65, 7, 45, 3, -7 },
+ { -4, 33, 64, 7, 41, 0, -6 } },
+ .odd = { { -8, 17, 58, 7, 56, 13, -8 },
+ { -7, 21, 61, 7, 52, 9, -8 },
+ { -6, 25, 62, 7, 49, 5, -7 },
+ { -5, 29, 64, 7, 45, 2, -7 },
+ { -4, 33, 65, 7, 40, 0, -6 },
+ { -6, -2, 37, 7, 66, 36, -3 },
+ { -6, 0, 42, 7, 64, 32, -4 },
+ { -7, 3, 46, 7, 64, 28, -6 },
+ { -7, 7, 50, 7, 61, 24, -7 },
+ { -8, 10, 53, 7, 61, 19, -7 },
+ { -8, 14, 57, 7, 58, 15, -8 },
+ { -8, 18, 60, 7, 55, 11, -8 },
+ { -7, 22, 62, 7, 51, 8, -8 },
+ { -6, 26, 64, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 43, 1, -7 },
+ { -3, 35, 64, 7, 39, -1, -6 },
+ { -6, -1, 39, 7, 64, 35, -3 },
+ { -7, 1, 43, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 64, 26, -6 },
+ { -8, 8, 51, 7, 62, 22, -7 },
+ { -8, 11, 55, 7, 60, 18, -8 },
+ { -8, 15, 58, 7, 57, 14, -8 },
+ { -7, 19, 61, 7, 53, 10, -8 },
+ { -7, 24, 61, 7, 50, 7, -7 },
+ { -6, 28, 64, 7, 46, 3, -7 },
+ { -4, 32, 64, 7, 42, 0, -6 },
+ { -3, 36, 66, 7, 37, -2, -6 },
+ { -6, 0, 40, 7, 65, 33, -4 },
+ { -7, 2, 45, 7, 64, 29, -5 },
+ { -7, 5, 49, 7, 62, 25, -6 },
+ { -8, 9, 52, 7, 61, 21, -7 },
+ { -8, 13, 56, 7, 58, 17, -8 } } },
+ .ver_phase_arr = {
+ .even = { { -2, 37, 63, 7, 37, -2, -5 },
+ { -6, 0, 41, 7, 64, 33, -4 },
+ { -7, 3, 45, 7, 65, 28, -6 },
+ { -7, 6, 49, 7, 63, 24, -7 },
+ { -8, 9, 53, 7, 61, 20, -7 },
+ { -8, 13, 56, 7, 59, 16, -8 },
+ { -8, 17, 60, 7, 55, 12, -8 },
+ { -7, 21, 62, 7, 52, 8, -8 },
+ { -6, 26, 62, 7, 48, 5, -7 },
+ { -5, 30, 64, 7, 44, 2, -7 },
+ { -4, 34, 65, 7, 40, -1, -6 },
+ { -6, -2, 38, 7, 66, 35, -3 },
+ { -6, 1, 42, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 63, 27, -6 },
+ { -7, 7, 50, 7, 62, 23, -7 },
+ { -8, 11, 54, 7, 59, 19, -7 },
+ { -8, 15, 57, 7, 57, 15, -8 },
+ { -7, 19, 59, 7, 54, 11, -8 },
+ { -7, 23, 62, 7, 50, 7, -7 },
+ { -6, 27, 63, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 42, 1, -6 },
+ { -3, 35, 66, 7, 38, -2, -6 },
+ { -6, -1, 40, 7, 65, 34, -4 },
+ { -7, 2, 44, 7, 64, 30, -5 },
+ { -7, 5, 48, 7, 62, 26, -6 },
+ { -8, 8, 52, 7, 62, 21, -7 },
+ { -8, 12, 55, 7, 60, 17, -8 },
+ { -8, 16, 59, 7, 56, 13, -8 },
+ { -7, 20, 61, 7, 53, 9, -8 },
+ { -7, 24, 63, 7, 49, 6, -7 },
+ { -6, 28, 65, 7, 45, 3, -7 },
+ { -4, 33, 64, 7, 41, 0, -6 } },
+ .odd = { { -8, 17, 58, 7, 56, 13, -8 },
+ { -7, 21, 61, 7, 52, 9, -8 },
+ { -6, 25, 62, 7, 49, 5, -7 },
+ { -5, 29, 64, 7, 45, 2, -7 },
+ { -4, 33, 65, 7, 40, 0, -6 },
+ { -6, -2, 37, 7, 66, 36, -3 },
+ { -6, 0, 42, 7, 64, 32, -4 },
+ { -7, 3, 46, 7, 64, 28, -6 },
+ { -7, 7, 50, 7, 61, 24, -7 },
+ { -8, 10, 53, 7, 61, 19, -7 },
+ { -8, 14, 57, 7, 58, 15, -8 },
+ { -8, 18, 60, 7, 55, 11, -8 },
+ { -7, 22, 62, 7, 51, 8, -8 },
+ { -6, 26, 64, 7, 47, 4, -7 },
+ { -5, 31, 65, 7, 43, 1, -7 },
+ { -3, 35, 64, 7, 39, -1, -6 },
+ { -6, -1, 39, 7, 64, 35, -3 },
+ { -7, 1, 43, 7, 65, 31, -5 },
+ { -7, 4, 47, 7, 64, 26, -6 },
+ { -8, 8, 51, 7, 62, 22, -7 },
+ { -8, 11, 55, 7, 60, 18, -8 },
+ { -8, 15, 58, 7, 57, 14, -8 },
+ { -7, 19, 61, 7, 53, 10, -8 },
+ { -7, 24, 61, 7, 50, 7, -7 },
+ { -6, 28, 64, 7, 46, 3, -7 },
+ { -4, 32, 64, 7, 42, 0, -6 },
+ { -3, 36, 66, 7, 37, -2, -6 },
+ { -6, 0, 40, 7, 65, 33, -4 },
+ { -7, 2, 45, 7, 64, 29, -5 },
+ { -7, 5, 49, 7, 62, 25, -6 },
+ { -8, 9, 52, 7, 61, 21, -7 },
+ { -8, 13, 56, 7, 58, 17, -8 } } },
+ .ptrn_arr = { { 0xccce6667, 0x399999cc, 0x66673333, 0xcccce6 } },
+ .sample_patrn_length = 122,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 30) = 0.516129 */
+ .hor_phase_arr = {
+ .even = { { -2, 37, 64, 7, 37, -2, -6 },
+ { -6, 0, 39, 7, 64, 34, -3 },
+ { -7, 2, 42, 7, 64, 31, -4 },
+ { -7, 4, 45, 7, 62, 29, -5 },
+ { -7, 6, 47, 7, 62, 26, -6 },
+ { -7, 8, 50, 7, 60, 23, -6 },
+ { -8, 10, 52, 7, 60, 21, -7 },
+ { -8, 13, 54, 7, 58, 18, -7 },
+ { -8, 15, 58, 7, 56, 15, -8 },
+ { -7, 18, 58, 7, 54, 13, -8 },
+ { -7, 21, 60, 7, 52, 10, -8 },
+ { -6, 23, 60, 7, 50, 8, -7 },
+ { -6, 26, 62, 7, 47, 6, -7 },
+ { -5, 29, 62, 7, 45, 4, -7 },
+ { -4, 31, 64, 7, 42, 2, -7 },
+ { -3, 34, 64, 7, 39, 0, -6 } },
+ .odd = { { -7, 17, 57, 7, 55, 14, -8 },
+ { -7, 19, 59, 7, 53, 12, -8 },
+ { -7, 22, 61, 7, 51, 9, -8 },
+ { -6, 25, 60, 7, 49, 7, -7 },
+ { -5, 27, 62, 7, 46, 5, -7 },
+ { -5, 30, 63, 7, 44, 3, -7 },
+ { -3, 33, 62, 7, 41, 1, -6 },
+ { -2, 35, 64, 7, 38, -1, -6 },
+ { -6, -1, 38, 7, 64, 35, -2 },
+ { -6, 1, 41, 7, 62, 33, -3 },
+ { -7, 3, 44, 7, 63, 30, -5 },
+ { -7, 5, 46, 7, 62, 27, -5 },
+ { -7, 7, 49, 7, 60, 25, -6 },
+ { -8, 9, 51, 7, 61, 22, -7 },
+ { -8, 12, 53, 7, 59, 19, -7 },
+ { -8, 14, 55, 7, 57, 17, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -2, 37, 64, 7, 37, -2, -6 },
+ { -6, 0, 39, 7, 64, 34, -3 },
+ { -7, 2, 42, 7, 64, 31, -4 },
+ { -7, 4, 45, 7, 62, 29, -5 },
+ { -7, 6, 47, 7, 62, 26, -6 },
+ { -7, 8, 50, 7, 60, 23, -6 },
+ { -8, 10, 52, 7, 60, 21, -7 },
+ { -8, 13, 54, 7, 58, 18, -7 },
+ { -8, 15, 58, 7, 56, 15, -8 },
+ { -7, 18, 58, 7, 54, 13, -8 },
+ { -7, 21, 60, 7, 52, 10, -8 },
+ { -6, 23, 60, 7, 50, 8, -7 },
+ { -6, 26, 62, 7, 47, 6, -7 },
+ { -5, 29, 62, 7, 45, 4, -7 },
+ { -4, 31, 64, 7, 42, 2, -7 },
+ { -3, 34, 64, 7, 39, 0, -6 } },
+ .odd = { { -7, 17, 57, 7, 55, 14, -8 },
+ { -7, 19, 59, 7, 53, 12, -8 },
+ { -7, 22, 61, 7, 51, 9, -8 },
+ { -6, 25, 60, 7, 49, 7, -7 },
+ { -5, 27, 62, 7, 46, 5, -7 },
+ { -5, 30, 63, 7, 44, 3, -7 },
+ { -3, 33, 62, 7, 41, 1, -6 },
+ { -2, 35, 64, 7, 38, -1, -6 },
+ { -6, -1, 38, 7, 64, 35, -2 },
+ { -6, 1, 41, 7, 62, 33, -3 },
+ { -7, 3, 44, 7, 63, 30, -5 },
+ { -7, 5, 46, 7, 62, 27, -5 },
+ { -7, 7, 49, 7, 60, 25, -6 },
+ { -8, 9, 51, 7, 61, 22, -7 },
+ { -8, 12, 53, 7, 59, 19, -7 },
+ { -8, 14, 55, 7, 57, 17, -7 } } },
+ .ptrn_arr = { { 0xe6666667, 0xccccccc } },
+ .sample_patrn_length = 62,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 31) = 0.507937 */
+ .hor_phase_arr = {
+ .even = { { -1, 37, 62, 7, 37, -1, -6 },
+ { -6, 0, 38, 7, 62, 35, -1 },
+ { -6, 1, 39, 7, 62, 34, -2 },
+ { -7, 2, 41, 7, 62, 33, -3 },
+ { -7, 3, 42, 7, 61, 32, -3 },
+ { -7, 4, 43, 7, 62, 30, -4 },
+ { -7, 4, 44, 7, 62, 29, -4 },
+ { -7, 6, 46, 7, 60, 28, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 48, 7, 60, 25, -6 },
+ { -7, 9, 49, 7, 59, 24, -6 },
+ { -7, 10, 50, 7, 59, 22, -6 },
+ { -7, 11, 51, 7, 59, 21, -7 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 15, 54, 7, 56, 17, -7 },
+ { -7, 16, 55, 7, 55, 16, -7 },
+ { -7, 17, 56, 7, 54, 15, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -7, 21, 59, 7, 51, 11, -7 },
+ { -6, 22, 59, 7, 50, 10, -7 },
+ { -6, 24, 59, 7, 49, 9, -7 },
+ { -6, 25, 60, 7, 48, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 28, 60, 7, 46, 6, -7 },
+ { -4, 29, 62, 7, 44, 4, -7 },
+ { -4, 30, 62, 7, 43, 4, -7 },
+ { -3, 32, 61, 7, 42, 3, -7 },
+ { -3, 33, 62, 7, 41, 2, -7 },
+ { -2, 34, 62, 7, 39, 1, -6 },
+ { -1, 35, 62, 7, 38, 0, -6 } },
+ .odd = { { -7, 17, 55, 7, 55, 15, -7 },
+ { -7, 18, 56, 7, 54, 14, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -6, 22, 58, 7, 51, 10, -7 },
+ { -6, 23, 59, 7, 50, 9, -7 },
+ { -6, 24, 60, 7, 49, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 27, 61, 7, 46, 6, -7 },
+ { -5, 28, 62, 7, 45, 5, -7 },
+ { -4, 30, 61, 7, 44, 4, -7 },
+ { -4, 31, 62, 7, 43, 3, -7 },
+ { -3, 32, 63, 7, 41, 2, -7 },
+ { -2, 34, 61, 7, 40, 1, -6 },
+ { -2, 35, 62, 7, 39, 0, -6 },
+ { -1, 36, 62, 7, 37, 0, -6 },
+ { -6, 0, 37, 7, 62, 36, -1 },
+ { -6, 0, 39, 7, 62, 35, -2 },
+ { -6, 1, 40, 7, 61, 34, -2 },
+ { -7, 2, 41, 7, 63, 32, -3 },
+ { -7, 3, 43, 7, 62, 31, -4 },
+ { -7, 4, 44, 7, 61, 30, -4 },
+ { -7, 5, 45, 7, 62, 28, -5 },
+ { -7, 6, 46, 7, 61, 27, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 49, 7, 60, 24, -6 },
+ { -7, 9, 50, 7, 59, 23, -6 },
+ { -7, 10, 51, 7, 58, 22, -6 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 14, 54, 7, 56, 18, -7 },
+ { -7, 15, 55, 7, 55, 17, -7 } } },
+ .ver_phase_arr = {
+ .even = { { -1, 37, 62, 7, 37, -1, -6 },
+ { -6, 0, 38, 7, 62, 35, -1 },
+ { -6, 1, 39, 7, 62, 34, -2 },
+ { -7, 2, 41, 7, 62, 33, -3 },
+ { -7, 3, 42, 7, 61, 32, -3 },
+ { -7, 4, 43, 7, 62, 30, -4 },
+ { -7, 4, 44, 7, 62, 29, -4 },
+ { -7, 6, 46, 7, 60, 28, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 48, 7, 60, 25, -6 },
+ { -7, 9, 49, 7, 59, 24, -6 },
+ { -7, 10, 50, 7, 59, 22, -6 },
+ { -7, 11, 51, 7, 59, 21, -7 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 15, 54, 7, 56, 17, -7 },
+ { -7, 16, 55, 7, 55, 16, -7 },
+ { -7, 17, 56, 7, 54, 15, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -7, 21, 59, 7, 51, 11, -7 },
+ { -6, 22, 59, 7, 50, 10, -7 },
+ { -6, 24, 59, 7, 49, 9, -7 },
+ { -6, 25, 60, 7, 48, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 28, 60, 7, 46, 6, -7 },
+ { -4, 29, 62, 7, 44, 4, -7 },
+ { -4, 30, 62, 7, 43, 4, -7 },
+ { -3, 32, 61, 7, 42, 3, -7 },
+ { -3, 33, 62, 7, 41, 2, -7 },
+ { -2, 34, 62, 7, 39, 1, -6 },
+ { -1, 35, 62, 7, 38, 0, -6 } },
+ .odd = { { -7, 17, 55, 7, 55, 15, -7 },
+ { -7, 18, 56, 7, 54, 14, -7 },
+ { -7, 19, 57, 7, 53, 13, -7 },
+ { -7, 20, 58, 7, 52, 12, -7 },
+ { -6, 22, 58, 7, 51, 10, -7 },
+ { -6, 23, 59, 7, 50, 9, -7 },
+ { -6, 24, 60, 7, 49, 8, -7 },
+ { -5, 26, 60, 7, 47, 7, -7 },
+ { -5, 27, 61, 7, 46, 6, -7 },
+ { -5, 28, 62, 7, 45, 5, -7 },
+ { -4, 30, 61, 7, 44, 4, -7 },
+ { -4, 31, 62, 7, 43, 3, -7 },
+ { -3, 32, 63, 7, 41, 2, -7 },
+ { -2, 34, 61, 7, 40, 1, -6 },
+ { -2, 35, 62, 7, 39, 0, -6 },
+ { -1, 36, 62, 7, 37, 0, -6 },
+ { -6, 0, 37, 7, 62, 36, -1 },
+ { -6, 0, 39, 7, 62, 35, -2 },
+ { -6, 1, 40, 7, 61, 34, -2 },
+ { -7, 2, 41, 7, 63, 32, -3 },
+ { -7, 3, 43, 7, 62, 31, -4 },
+ { -7, 4, 44, 7, 61, 30, -4 },
+ { -7, 5, 45, 7, 62, 28, -5 },
+ { -7, 6, 46, 7, 61, 27, -5 },
+ { -7, 7, 47, 7, 60, 26, -5 },
+ { -7, 8, 49, 7, 60, 24, -6 },
+ { -7, 9, 50, 7, 59, 23, -6 },
+ { -7, 10, 51, 7, 58, 22, -6 },
+ { -7, 12, 52, 7, 58, 20, -7 },
+ { -7, 13, 53, 7, 57, 19, -7 },
+ { -7, 14, 54, 7, 56, 18, -7 },
+ { -7, 15, 55, 7, 55, 17, -7 } } },
+ .ptrn_arr = { { 0x66666667, 0xe6666666, 0xcccccccc, 0xccccccc } },
+ .sample_patrn_length = 126,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 32) = 0.5 */
+ .hor_phase_arr = {
+ .even = { { 0, 8, 112, 7, 8, 0, 0 } },
+ .odd = { { 0, 0, 64, 7, 64, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 8, 112, 7, 8, 0, 0 } },
+ .odd = { { 0, 0, 64, 7, 64, 0, 0 } } },
+ .ptrn_arr = { { 0x3 } },
+ .sample_patrn_length = 4,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 33) = 0.492308 */
+ .hor_phase_arr = {
+ .even = { { 0, 9, 110, 7, 9, 0, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 7, 109, 7, 12, 0, 0 },
+ { 0, 6, 108, 7, 14, 0, 0 },
+ { 0, 5, 107, 7, 16, 0, 0 },
+ { 0, 4, 105, 7, 19, 0, 0 },
+ { 0, 3, 103, 7, 22, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 98, 7, 28, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 83, 7, 44, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 44, 7, 83, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 28, 7, 98, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 22, 7, 103, 3, 0 },
+ { 0, 0, 19, 7, 105, 4, 0 },
+ { 0, 0, 16, 7, 107, 5, 0 },
+ { 0, 0, 14, 7, 108, 6, 0 },
+ { 0, 0, 12, 7, 109, 7, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 } },
+ .odd = { { 0, 0, 61, 7, 67, 0, 0 },
+ { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 51, 7, 76, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 42, 7, 85, 1, 0 },
+ { 0, 0, 38, 7, 89, 1, 0 },
+ { 0, 0, 34, 7, 92, 2, 0 },
+ { 0, 0, 30, 7, 96, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 23, 7, 102, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 18, 7, 106, 4, 0 },
+ { 0, 0, 15, 7, 108, 5, 0 },
+ { 0, 0, 13, 7, 109, 6, 0 },
+ { 0, 0, 11, 7, 110, 7, 0 },
+ { 0, 0, 10, 7, 110, 8, 0 },
+ { 0, 8, 110, 7, 10, 0, 0 },
+ { 0, 7, 110, 7, 11, 0, 0 },
+ { 0, 6, 109, 7, 13, 0, 0 },
+ { 0, 5, 108, 7, 15, 0, 0 },
+ { 0, 4, 106, 7, 18, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 102, 7, 23, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 96, 7, 30, 0, 0 },
+ { 0, 2, 92, 7, 34, 0, 0 },
+ { 0, 1, 89, 7, 38, 0, 0 },
+ { 0, 1, 85, 7, 42, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 76, 7, 51, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 },
+ { 0, 0, 67, 7, 61, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 9, 110, 7, 9, 0, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 7, 109, 7, 12, 0, 0 },
+ { 0, 6, 108, 7, 14, 0, 0 },
+ { 0, 5, 107, 7, 16, 0, 0 },
+ { 0, 4, 105, 7, 19, 0, 0 },
+ { 0, 3, 103, 7, 22, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 98, 7, 28, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 83, 7, 44, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 44, 7, 83, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 28, 7, 98, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 22, 7, 103, 3, 0 },
+ { 0, 0, 19, 7, 105, 4, 0 },
+ { 0, 0, 16, 7, 107, 5, 0 },
+ { 0, 0, 14, 7, 108, 6, 0 },
+ { 0, 0, 12, 7, 109, 7, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 } },
+ .odd = { { 0, 0, 61, 7, 67, 0, 0 },
+ { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 51, 7, 76, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 42, 7, 85, 1, 0 },
+ { 0, 0, 38, 7, 89, 1, 0 },
+ { 0, 0, 34, 7, 92, 2, 0 },
+ { 0, 0, 30, 7, 96, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 23, 7, 102, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 18, 7, 106, 4, 0 },
+ { 0, 0, 15, 7, 108, 5, 0 },
+ { 0, 0, 13, 7, 109, 6, 0 },
+ { 0, 0, 11, 7, 110, 7, 0 },
+ { 0, 0, 10, 7, 110, 8, 0 },
+ { 0, 8, 110, 7, 10, 0, 0 },
+ { 0, 7, 110, 7, 11, 0, 0 },
+ { 0, 6, 109, 7, 13, 0, 0 },
+ { 0, 5, 108, 7, 15, 0, 0 },
+ { 0, 4, 106, 7, 18, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 102, 7, 23, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 96, 7, 30, 0, 0 },
+ { 0, 2, 92, 7, 34, 0, 0 },
+ { 0, 1, 89, 7, 38, 0, 0 },
+ { 0, 1, 85, 7, 42, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 76, 7, 51, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 },
+ { 0, 0, 67, 7, 61, 0, 0 } } },
+ .ptrn_arr = { { 0x33333333, 0x33333333, 0x99999999, 0x99999999 } },
+ .sample_patrn_length = 130,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 34) = 0.484848 */
+ .hor_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 7, 108, 7, 13, 0, 0 },
+ { 0, 5, 106, 7, 17, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 17, 7, 106, 5, 0 },
+ { 0, 0, 13, 7, 108, 7, 0 } },
+ .odd = { { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 15, 7, 107, 6, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 6, 107, 7, 15, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 7, 108, 7, 13, 0, 0 },
+ { 0, 5, 106, 7, 17, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 90, 7, 36, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 64, 7, 64, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 36, 7, 90, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 17, 7, 106, 5, 0 },
+ { 0, 0, 13, 7, 108, 7, 0 } },
+ .odd = { { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 49, 7, 78, 1, 0 },
+ { 0, 0, 40, 7, 87, 1, 0 },
+ { 0, 0, 32, 7, 94, 2, 0 },
+ { 0, 0, 25, 7, 100, 3, 0 },
+ { 0, 0, 20, 7, 104, 4, 0 },
+ { 0, 0, 15, 7, 107, 6, 0 },
+ { 0, 0, 11, 7, 109, 8, 0 },
+ { 0, 8, 109, 7, 11, 0, 0 },
+ { 0, 6, 107, 7, 15, 0, 0 },
+ { 0, 4, 104, 7, 20, 0, 0 },
+ { 0, 3, 100, 7, 25, 0, 0 },
+ { 0, 2, 94, 7, 32, 0, 0 },
+ { 0, 1, 87, 7, 40, 0, 0 },
+ { 0, 1, 78, 7, 49, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 } } },
+ .ptrn_arr = { { 0x33333333, 0x99999999 } },
+ .sample_patrn_length = 66,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 35) = 0.477612 */
+ .hor_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 6, 106, 7, 16, 0, 0 },
+ { 0, 4, 101, 7, 23, 0, 0 },
+ { 0, 2, 93, 7, 33, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 41, 7, 85, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 20, 7, 103, 5, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 9, 107, 7, 12, 0, 0 },
+ { 0, 5, 105, 7, 18, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 18, 7, 105, 5, 0 },
+ { 0, 0, 12, 7, 107, 9, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 5, 103, 7, 20, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 85, 7, 41, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 33, 7, 93, 2, 0 },
+ { 0, 0, 23, 7, 101, 4, 0 },
+ { 0, 0, 16, 7, 106, 6, 0 } },
+ .odd = { { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 43, 7, 84, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 15, 7, 106, 7, 0 },
+ { 0, 9, 108, 7, 11, 0, 0 },
+ { 0, 6, 105, 7, 17, 0, 0 },
+ { 0, 4, 99, 7, 25, 0, 0 },
+ { 0, 2, 91, 7, 35, 0, 0 },
+ { 0, 1, 80, 7, 47, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 39, 7, 87, 2, 0 },
+ { 0, 0, 28, 7, 97, 3, 0 },
+ { 0, 0, 19, 7, 104, 5, 0 },
+ { 0, 0, 13, 7, 107, 8, 0 },
+ { 0, 8, 107, 7, 13, 0, 0 },
+ { 0, 5, 104, 7, 19, 0, 0 },
+ { 0, 3, 97, 7, 28, 0, 0 },
+ { 0, 2, 87, 7, 39, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 47, 7, 80, 1, 0 },
+ { 0, 0, 35, 7, 91, 2, 0 },
+ { 0, 0, 25, 7, 99, 4, 0 },
+ { 0, 0, 17, 7, 105, 6, 0 },
+ { 0, 0, 11, 7, 108, 9, 0 },
+ { 0, 7, 106, 7, 15, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 84, 7, 43, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 10, 108, 7, 10, 0, 0 },
+ { 0, 6, 106, 7, 16, 0, 0 },
+ { 0, 4, 101, 7, 23, 0, 0 },
+ { 0, 2, 93, 7, 33, 0, 0 },
+ { 0, 1, 82, 7, 45, 0, 0 },
+ { 0, 1, 68, 7, 59, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 41, 7, 85, 2, 0 },
+ { 0, 0, 29, 7, 96, 3, 0 },
+ { 0, 0, 20, 7, 103, 5, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 9, 107, 7, 12, 0, 0 },
+ { 0, 5, 105, 7, 18, 0, 0 },
+ { 0, 3, 99, 7, 26, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 26, 7, 99, 3, 0 },
+ { 0, 0, 18, 7, 105, 5, 0 },
+ { 0, 0, 12, 7, 107, 9, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 5, 103, 7, 20, 0, 0 },
+ { 0, 3, 96, 7, 29, 0, 0 },
+ { 0, 2, 85, 7, 41, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 59, 7, 68, 1, 0 },
+ { 0, 0, 45, 7, 82, 1, 0 },
+ { 0, 0, 33, 7, 93, 2, 0 },
+ { 0, 0, 23, 7, 101, 4, 0 },
+ { 0, 0, 16, 7, 106, 6, 0 } },
+ .odd = { { 0, 0, 56, 7, 71, 1, 0 },
+ { 0, 0, 43, 7, 84, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 22, 7, 102, 4, 0 },
+ { 0, 0, 15, 7, 106, 7, 0 },
+ { 0, 9, 108, 7, 11, 0, 0 },
+ { 0, 6, 105, 7, 17, 0, 0 },
+ { 0, 4, 99, 7, 25, 0, 0 },
+ { 0, 2, 91, 7, 35, 0, 0 },
+ { 0, 1, 80, 7, 47, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 39, 7, 87, 2, 0 },
+ { 0, 0, 28, 7, 97, 3, 0 },
+ { 0, 0, 19, 7, 104, 5, 0 },
+ { 0, 0, 13, 7, 107, 8, 0 },
+ { 0, 8, 107, 7, 13, 0, 0 },
+ { 0, 5, 104, 7, 19, 0, 0 },
+ { 0, 3, 97, 7, 28, 0, 0 },
+ { 0, 2, 87, 7, 39, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 47, 7, 80, 1, 0 },
+ { 0, 0, 35, 7, 91, 2, 0 },
+ { 0, 0, 25, 7, 99, 4, 0 },
+ { 0, 0, 17, 7, 105, 6, 0 },
+ { 0, 0, 11, 7, 108, 9, 0 },
+ { 0, 7, 106, 7, 15, 0, 0 },
+ { 0, 4, 102, 7, 22, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 84, 7, 43, 0, 0 },
+ { 0, 1, 71, 7, 56, 0, 0 } } },
+ .ptrn_arr = { { 0x99933333, 0xccccc999, 0x32666664, 0x99993333,
+ 0x9 } },
+ .sample_patrn_length = 134,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 36) = 0.470588 */
+ .hor_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 95, 7, 30, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 30, 7, 95, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 } },
+ .odd = { { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 24, 7, 100, 4, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 4, 100, 7, 24, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 95, 7, 30, 0, 0 },
+ { 0, 1, 81, 7, 46, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 46, 7, 81, 1, 0 },
+ { 0, 0, 30, 7, 95, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 } },
+ .odd = { { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 37, 7, 89, 2, 0 },
+ { 0, 0, 24, 7, 100, 4, 0 },
+ { 0, 0, 14, 7, 106, 8, 0 },
+ { 0, 8, 106, 7, 14, 0, 0 },
+ { 0, 4, 100, 7, 24, 0, 0 },
+ { 0, 2, 89, 7, 37, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 } } },
+ .ptrn_arr = { { 0x99993333 } },
+ .sample_patrn_length = 34,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 37) = 0.463768 */
+ .hor_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 5, 101, 7, 22, 0, 0 },
+ { 0, 2, 88, 7, 38, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 15, 7, 104, 9, 0 },
+ { 0, 7, 104, 7, 17, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 34, 7, 91, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 },
+ { 0, 10, 105, 7, 13, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 },
+ { 0, 0, 13, 7, 105, 10, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 91, 7, 34, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 17, 7, 104, 7, 0 },
+ { 0, 9, 104, 7, 15, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 38, 7, 88, 2, 0 },
+ { 0, 0, 22, 7, 101, 5, 0 } },
+ .odd = { { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 33, 7, 92, 3, 0 },
+ { 0, 0, 18, 7, 103, 7, 0 },
+ { 0, 9, 105, 7, 14, 0, 0 },
+ { 0, 4, 98, 7, 26, 0, 0 },
+ { 0, 2, 82, 7, 44, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 40, 7, 86, 2, 0 },
+ { 0, 0, 23, 7, 100, 5, 0 },
+ { 0, 0, 12, 7, 105, 11, 0 },
+ { 0, 6, 101, 7, 21, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 48, 7, 79, 1, 0 },
+ { 0, 0, 29, 7, 95, 4, 0 },
+ { 0, 0, 16, 7, 104, 8, 0 },
+ { 0, 8, 104, 7, 16, 0, 0 },
+ { 0, 4, 95, 7, 29, 0, 0 },
+ { 0, 1, 79, 7, 48, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 21, 7, 101, 6, 0 },
+ { 0, 11, 105, 7, 12, 0, 0 },
+ { 0, 5, 100, 7, 23, 0, 0 },
+ { 0, 2, 86, 7, 40, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 44, 7, 82, 2, 0 },
+ { 0, 0, 26, 7, 98, 4, 0 },
+ { 0, 0, 14, 7, 105, 9, 0 },
+ { 0, 7, 103, 7, 18, 0, 0 },
+ { 0, 3, 92, 7, 33, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 11, 106, 7, 11, 0, 0 },
+ { 0, 5, 101, 7, 22, 0, 0 },
+ { 0, 2, 88, 7, 38, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 15, 7, 104, 9, 0 },
+ { 0, 7, 104, 7, 17, 0, 0 },
+ { 0, 3, 94, 7, 31, 0, 0 },
+ { 0, 1, 77, 7, 50, 0, 0 },
+ { 0, 0, 54, 7, 73, 1, 0 },
+ { 0, 0, 34, 7, 91, 3, 0 },
+ { 0, 0, 19, 7, 103, 6, 0 },
+ { 0, 10, 105, 7, 13, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 },
+ { 0, 0, 13, 7, 105, 10, 0 },
+ { 0, 6, 103, 7, 19, 0, 0 },
+ { 0, 3, 91, 7, 34, 0, 0 },
+ { 0, 1, 73, 7, 54, 0, 0 },
+ { 0, 0, 50, 7, 77, 1, 0 },
+ { 0, 0, 31, 7, 94, 3, 0 },
+ { 0, 0, 17, 7, 104, 7, 0 },
+ { 0, 9, 104, 7, 15, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 38, 7, 88, 2, 0 },
+ { 0, 0, 22, 7, 101, 5, 0 } },
+ .odd = { { 0, 0, 52, 7, 75, 1, 0 },
+ { 0, 0, 33, 7, 92, 3, 0 },
+ { 0, 0, 18, 7, 103, 7, 0 },
+ { 0, 9, 105, 7, 14, 0, 0 },
+ { 0, 4, 98, 7, 26, 0, 0 },
+ { 0, 2, 82, 7, 44, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 40, 7, 86, 2, 0 },
+ { 0, 0, 23, 7, 100, 5, 0 },
+ { 0, 0, 12, 7, 105, 11, 0 },
+ { 0, 6, 101, 7, 21, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 48, 7, 79, 1, 0 },
+ { 0, 0, 29, 7, 95, 4, 0 },
+ { 0, 0, 16, 7, 104, 8, 0 },
+ { 0, 8, 104, 7, 16, 0, 0 },
+ { 0, 4, 95, 7, 29, 0, 0 },
+ { 0, 1, 79, 7, 48, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 21, 7, 101, 6, 0 },
+ { 0, 11, 105, 7, 12, 0, 0 },
+ { 0, 5, 100, 7, 23, 0, 0 },
+ { 0, 2, 86, 7, 40, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 44, 7, 82, 2, 0 },
+ { 0, 0, 26, 7, 98, 4, 0 },
+ { 0, 0, 14, 7, 105, 9, 0 },
+ { 0, 7, 103, 7, 18, 0, 0 },
+ { 0, 3, 92, 7, 33, 0, 0 },
+ { 0, 1, 75, 7, 52, 0, 0 } } },
+ .ptrn_arr = { { 0xc9999333, 0x332664cc, 0x4cc99993, 0x93332666,
+ 0x99 } },
+ .sample_patrn_length = 138,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 38) = 0.457143 */
+ .hor_phase_arr = {
+ .even = { { 0, 12, 104, 7, 12, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 16, 7, 103, 9, 0 },
+ { 0, 7, 101, 7, 20, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 20, 7, 101, 7, 0 },
+ { 0, 9, 103, 7, 16, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 } },
+ .odd = { { 0, 0, 50, 7, 76, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 14, 7, 104, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 35, 7, 90, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 90, 7, 35, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 104, 7, 14, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 76, 7, 50, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 12, 104, 7, 12, 0, 0 },
+ { 0, 5, 98, 7, 25, 0, 0 },
+ { 0, 2, 80, 7, 46, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 16, 7, 103, 9, 0 },
+ { 0, 7, 101, 7, 20, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 20, 7, 101, 7, 0 },
+ { 0, 9, 103, 7, 16, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 46, 7, 80, 2, 0 },
+ { 0, 0, 25, 7, 98, 5, 0 } },
+ .odd = { { 0, 0, 50, 7, 76, 2, 0 },
+ { 0, 0, 28, 7, 96, 4, 0 },
+ { 0, 0, 14, 7, 104, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 84, 7, 42, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 35, 7, 90, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 90, 7, 35, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 42, 7, 84, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 104, 7, 14, 0, 0 },
+ { 0, 4, 96, 7, 28, 0, 0 },
+ { 0, 2, 76, 7, 50, 0, 0 } } },
+ .ptrn_arr = { { 0xcc999333, 0x99332664, 0x9 } },
+ .sample_patrn_length = 70,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 39) = 0.450704 */
+ .hor_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 5, 94, 7, 29, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 43, 7, 83, 2, 0 },
+ { 0, 0, 21, 7, 100, 7, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 14, 7, 103, 11, 0 },
+ { 0, 5, 97, 7, 26, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 102, 7, 16, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 16, 7, 102, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 26, 7, 97, 5, 0 },
+ { 0, 11, 103, 7, 14, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 7, 100, 7, 21, 0, 0 },
+ { 0, 2, 83, 7, 43, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 29, 7, 94, 5, 0 } },
+ .odd = { { 0, 0, 49, 7, 77, 2, 0 },
+ { 0, 0, 25, 7, 97, 6, 0 },
+ { 0, 10, 103, 7, 15, 0, 0 },
+ { 0, 4, 90, 7, 34, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 37, 7, 88, 3, 0 },
+ { 0, 0, 17, 7, 102, 9, 0 },
+ { 0, 7, 99, 7, 22, 0, 0 },
+ { 0, 2, 81, 7, 45, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 27, 7, 96, 5, 0 },
+ { 0, 12, 103, 7, 13, 0, 0 },
+ { 0, 4, 93, 7, 31, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 41, 7, 84, 3, 0 },
+ { 0, 0, 20, 7, 100, 8, 0 },
+ { 0, 8, 100, 7, 20, 0, 0 },
+ { 0, 3, 84, 7, 41, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 31, 7, 93, 4, 0 },
+ { 0, 0, 13, 7, 103, 12, 0 },
+ { 0, 5, 96, 7, 27, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 45, 7, 81, 2, 0 },
+ { 0, 0, 22, 7, 99, 7, 0 },
+ { 0, 9, 102, 7, 17, 0, 0 },
+ { 0, 3, 88, 7, 37, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 34, 7, 90, 4, 0 },
+ { 0, 0, 15, 7, 103, 10, 0 },
+ { 0, 6, 97, 7, 25, 0, 0 },
+ { 0, 2, 77, 7, 49, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 5, 94, 7, 29, 0, 0 },
+ { 0, 1, 71, 7, 55, 1, 0 },
+ { 0, 0, 43, 7, 83, 2, 0 },
+ { 0, 0, 21, 7, 100, 7, 0 },
+ { 0, 8, 102, 7, 18, 0, 0 },
+ { 0, 3, 86, 7, 39, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 32, 7, 92, 4, 0 },
+ { 0, 0, 14, 7, 103, 11, 0 },
+ { 0, 5, 97, 7, 26, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 23, 7, 99, 6, 0 },
+ { 0, 10, 102, 7, 16, 0, 0 },
+ { 0, 3, 89, 7, 36, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 36, 7, 89, 3, 0 },
+ { 0, 0, 16, 7, 102, 10, 0 },
+ { 0, 6, 99, 7, 23, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 26, 7, 97, 5, 0 },
+ { 0, 11, 103, 7, 14, 0, 0 },
+ { 0, 4, 92, 7, 32, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 39, 7, 86, 3, 0 },
+ { 0, 0, 18, 7, 102, 8, 0 },
+ { 0, 7, 100, 7, 21, 0, 0 },
+ { 0, 2, 83, 7, 43, 0, 0 },
+ { 0, 1, 55, 7, 71, 1, 0 },
+ { 0, 0, 29, 7, 94, 5, 0 } },
+ .odd = { { 0, 0, 49, 7, 77, 2, 0 },
+ { 0, 0, 25, 7, 97, 6, 0 },
+ { 0, 10, 103, 7, 15, 0, 0 },
+ { 0, 4, 90, 7, 34, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 37, 7, 88, 3, 0 },
+ { 0, 0, 17, 7, 102, 9, 0 },
+ { 0, 7, 99, 7, 22, 0, 0 },
+ { 0, 2, 81, 7, 45, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 27, 7, 96, 5, 0 },
+ { 0, 12, 103, 7, 13, 0, 0 },
+ { 0, 4, 93, 7, 31, 0, 0 },
+ { 0, 1, 69, 7, 57, 1, 0 },
+ { 0, 0, 41, 7, 84, 3, 0 },
+ { 0, 0, 20, 7, 100, 8, 0 },
+ { 0, 8, 100, 7, 20, 0, 0 },
+ { 0, 3, 84, 7, 41, 0, 0 },
+ { 0, 1, 57, 7, 69, 1, 0 },
+ { 0, 0, 31, 7, 93, 4, 0 },
+ { 0, 0, 13, 7, 103, 12, 0 },
+ { 0, 5, 96, 7, 27, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 45, 7, 81, 2, 0 },
+ { 0, 0, 22, 7, 99, 7, 0 },
+ { 0, 9, 102, 7, 17, 0, 0 },
+ { 0, 3, 88, 7, 37, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 34, 7, 90, 4, 0 },
+ { 0, 0, 15, 7, 103, 10, 0 },
+ { 0, 6, 97, 7, 25, 0, 0 },
+ { 0, 2, 77, 7, 49, 0, 0 } } },
+ .ptrn_arr = { { 0x4cc99933, 0xc9993266, 0x9332664c, 0x32664cc9,
+ 0x993 } },
+ .sample_patrn_length = 142,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 40) = 0.444444 */
+ .hor_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 4, 91, 7, 33, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 33, 7, 91, 4, 0 } },
+ .odd = { { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 21, 7, 99, 8, 0 },
+ { 0, 8, 99, 7, 21, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 13, 102, 7, 13, 0, 0 },
+ { 0, 4, 91, 7, 33, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 33, 7, 91, 4, 0 } },
+ .odd = { { 0, 0, 47, 7, 79, 2, 0 },
+ { 0, 0, 21, 7, 99, 8, 0 },
+ { 0, 8, 99, 7, 21, 0, 0 },
+ { 0, 2, 79, 7, 47, 0, 0 } } },
+ .ptrn_arr = { { 0x9933 } },
+ .sample_patrn_length = 18,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 41) = 0.438356 */
+ .hor_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 25, 7, 96, 7, 0 },
+ { 0, 8, 98, 7, 22, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 40, 7, 85, 3, 0 },
+ { 0, 0, 16, 7, 100, 12, 0 },
+ { 0, 5, 90, 7, 33, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 27, 7, 95, 6, 0 },
+ { 0, 9, 99, 7, 20, 0, 0 },
+ { 0, 2, 78, 7, 47, 1, 0 },
+ { 0, 0, 44, 7, 81, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 93, 7, 30, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 30, 7, 93, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 81, 7, 44, 0, 0 },
+ { 0, 1, 47, 7, 78, 2, 0 },
+ { 0, 0, 20, 7, 99, 9, 0 },
+ { 0, 6, 95, 7, 27, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 33, 7, 90, 5, 0 },
+ { 0, 12, 100, 7, 16, 0, 0 },
+ { 0, 3, 85, 7, 40, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 22, 7, 98, 8, 0 },
+ { 0, 7, 96, 7, 25, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 } },
+ .odd = { { 0, 0, 45, 7, 80, 3, 0 },
+ { 0, 0, 19, 7, 99, 10, 0 },
+ { 0, 6, 93, 7, 29, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 32, 7, 91, 5, 0 },
+ { 0, 11, 100, 7, 17, 0, 0 },
+ { 0, 3, 83, 7, 42, 0, 0 },
+ { 0, 1, 49, 7, 76, 2, 0 },
+ { 0, 0, 21, 7, 98, 9, 0 },
+ { 0, 7, 95, 7, 26, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 35, 7, 89, 4, 0 },
+ { 0, 13, 100, 7, 15, 0, 0 },
+ { 0, 4, 86, 7, 38, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 23, 7, 97, 8, 0 },
+ { 0, 8, 97, 7, 23, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 38, 7, 86, 4, 0 },
+ { 0, 0, 15, 7, 100, 13, 0 },
+ { 0, 4, 89, 7, 35, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 26, 7, 95, 7, 0 },
+ { 0, 9, 98, 7, 21, 0, 0 },
+ { 0, 2, 76, 7, 49, 1, 0 },
+ { 0, 0, 42, 7, 83, 3, 0 },
+ { 0, 0, 17, 7, 100, 11, 0 },
+ { 0, 5, 91, 7, 32, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 29, 7, 93, 6, 0 },
+ { 0, 10, 99, 7, 19, 0, 0 },
+ { 0, 3, 80, 7, 45, 0, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 25, 7, 96, 7, 0 },
+ { 0, 8, 98, 7, 22, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 40, 7, 85, 3, 0 },
+ { 0, 0, 16, 7, 100, 12, 0 },
+ { 0, 5, 90, 7, 33, 0, 0 },
+ { 0, 1, 59, 7, 67, 1, 0 },
+ { 0, 0, 27, 7, 95, 6, 0 },
+ { 0, 9, 99, 7, 20, 0, 0 },
+ { 0, 2, 78, 7, 47, 1, 0 },
+ { 0, 0, 44, 7, 81, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 93, 7, 30, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 30, 7, 93, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 81, 7, 44, 0, 0 },
+ { 0, 1, 47, 7, 78, 2, 0 },
+ { 0, 0, 20, 7, 99, 9, 0 },
+ { 0, 6, 95, 7, 27, 0, 0 },
+ { 0, 1, 67, 7, 59, 1, 0 },
+ { 0, 0, 33, 7, 90, 5, 0 },
+ { 0, 12, 100, 7, 16, 0, 0 },
+ { 0, 3, 85, 7, 40, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 22, 7, 98, 8, 0 },
+ { 0, 7, 96, 7, 25, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 } },
+ .odd = { { 0, 0, 45, 7, 80, 3, 0 },
+ { 0, 0, 19, 7, 99, 10, 0 },
+ { 0, 6, 93, 7, 29, 0, 0 },
+ { 0, 1, 65, 7, 61, 1, 0 },
+ { 0, 0, 32, 7, 91, 5, 0 },
+ { 0, 11, 100, 7, 17, 0, 0 },
+ { 0, 3, 83, 7, 42, 0, 0 },
+ { 0, 1, 49, 7, 76, 2, 0 },
+ { 0, 0, 21, 7, 98, 9, 0 },
+ { 0, 7, 95, 7, 26, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 35, 7, 89, 4, 0 },
+ { 0, 13, 100, 7, 15, 0, 0 },
+ { 0, 4, 86, 7, 38, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 23, 7, 97, 8, 0 },
+ { 0, 8, 97, 7, 23, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 38, 7, 86, 4, 0 },
+ { 0, 0, 15, 7, 100, 13, 0 },
+ { 0, 4, 89, 7, 35, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 26, 7, 95, 7, 0 },
+ { 0, 9, 98, 7, 21, 0, 0 },
+ { 0, 2, 76, 7, 49, 1, 0 },
+ { 0, 0, 42, 7, 83, 3, 0 },
+ { 0, 0, 17, 7, 100, 11, 0 },
+ { 0, 5, 91, 7, 32, 0, 0 },
+ { 0, 1, 61, 7, 65, 1, 0 },
+ { 0, 0, 29, 7, 93, 6, 0 },
+ { 0, 10, 99, 7, 19, 0, 0 },
+ { 0, 3, 80, 7, 45, 0, 0 } } },
+ .ptrn_arr = { { 0x664c9933, 0x664c9932, 0x64cc9932, 0x64cc9932,
+ 0x9932 } },
+ .sample_patrn_length = 146,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 42) = 0.432432 */
+ .hor_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 84, 7, 40, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 7, 93, 7, 28, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 28, 7, 93, 7, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 40, 7, 84, 4, 0 } },
+ .odd = { { 0, 1, 44, 7, 80, 3, 0 },
+ { 0, 0, 16, 7, 99, 13, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 20, 7, 98, 10, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 25, 7, 95, 8, 0 },
+ { 0, 8, 95, 7, 25, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 10, 98, 7, 20, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 },
+ { 0, 13, 99, 7, 16, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 14, 100, 7, 14, 0, 0 },
+ { 0, 4, 84, 7, 40, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 18, 7, 99, 11, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 7, 93, 7, 28, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 28, 7, 93, 7, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 11, 99, 7, 18, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 40, 7, 84, 4, 0 } },
+ .odd = { { 0, 1, 44, 7, 80, 3, 0 },
+ { 0, 0, 16, 7, 99, 13, 0 },
+ { 0, 4, 87, 7, 37, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 20, 7, 98, 10, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 25, 7, 95, 8, 0 },
+ { 0, 8, 95, 7, 25, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 10, 98, 7, 20, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 37, 7, 87, 4, 0 },
+ { 0, 13, 99, 7, 16, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 } } },
+ .ptrn_arr = { { 0x264c9933, 0x3264c993, 0x99 } },
+ .sample_patrn_length = 74,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 43) = 0.426667 */
+ .hor_phase_arr = {
+ .even = { { 0, 15, 98, 7, 15, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 },
+ { 0, 0, 41, 7, 83, 4, 0 },
+ { 0, 13, 98, 7, 17, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 10, 97, 7, 21, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 8, 94, 7, 26, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 26, 7, 94, 8, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 21, 7, 97, 10, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 17, 7, 98, 13, 0 },
+ { 0, 4, 83, 7, 41, 0, 0 },
+ { 0, 1, 44, 7, 80, 3, 0 } },
+ .odd = { { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 14, 98, 7, 16, 0, 0 },
+ { 0, 3, 78, 7, 46, 1, 0 },
+ { 0, 0, 39, 7, 85, 4, 0 },
+ { 0, 12, 98, 7, 18, 0, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 36, 7, 87, 5, 0 },
+ { 0, 11, 97, 7, 20, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 33, 7, 89, 6, 0 },
+ { 0, 10, 96, 7, 22, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 30, 7, 92, 6, 0 },
+ { 0, 9, 94, 7, 25, 0, 0 },
+ { 0, 2, 64, 7, 61, 1, 0 },
+ { 0, 0, 27, 7, 94, 7, 0 },
+ { 0, 7, 94, 7, 27, 0, 0 },
+ { 0, 1, 61, 7, 64, 2, 0 },
+ { 0, 0, 25, 7, 94, 9, 0 },
+ { 0, 6, 92, 7, 30, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 22, 7, 96, 10, 0 },
+ { 0, 6, 89, 7, 33, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 20, 7, 97, 11, 0 },
+ { 0, 5, 87, 7, 36, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 0, 18, 7, 98, 12, 0 },
+ { 0, 4, 85, 7, 39, 0, 0 },
+ { 0, 1, 46, 7, 78, 3, 0 },
+ { 0, 0, 16, 7, 98, 14, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 15, 98, 7, 15, 0, 0 },
+ { 0, 3, 80, 7, 44, 1, 0 },
+ { 0, 0, 41, 7, 83, 4, 0 },
+ { 0, 13, 98, 7, 17, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 74, 7, 51, 1, 0 },
+ { 0, 0, 34, 7, 89, 5, 0 },
+ { 0, 10, 97, 7, 21, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 31, 7, 91, 6, 0 },
+ { 0, 9, 96, 7, 23, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 8, 94, 7, 26, 0, 0 },
+ { 0, 1, 63, 7, 63, 1, 0 },
+ { 0, 0, 26, 7, 94, 8, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 23, 7, 96, 9, 0 },
+ { 0, 6, 91, 7, 31, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 21, 7, 97, 10, 0 },
+ { 0, 5, 89, 7, 34, 0, 0 },
+ { 0, 1, 51, 7, 74, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 0, 17, 7, 98, 13, 0 },
+ { 0, 4, 83, 7, 41, 0, 0 },
+ { 0, 1, 44, 7, 80, 3, 0 } },
+ .odd = { { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 14, 98, 7, 16, 0, 0 },
+ { 0, 3, 78, 7, 46, 1, 0 },
+ { 0, 0, 39, 7, 85, 4, 0 },
+ { 0, 12, 98, 7, 18, 0, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 36, 7, 87, 5, 0 },
+ { 0, 11, 97, 7, 20, 0, 0 },
+ { 0, 2, 72, 7, 53, 1, 0 },
+ { 0, 0, 33, 7, 89, 6, 0 },
+ { 0, 10, 96, 7, 22, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 30, 7, 92, 6, 0 },
+ { 0, 9, 94, 7, 25, 0, 0 },
+ { 0, 2, 64, 7, 61, 1, 0 },
+ { 0, 0, 27, 7, 94, 7, 0 },
+ { 0, 7, 94, 7, 27, 0, 0 },
+ { 0, 1, 61, 7, 64, 2, 0 },
+ { 0, 0, 25, 7, 94, 9, 0 },
+ { 0, 6, 92, 7, 30, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 22, 7, 96, 10, 0 },
+ { 0, 6, 89, 7, 33, 0, 0 },
+ { 0, 1, 53, 7, 72, 2, 0 },
+ { 0, 0, 20, 7, 97, 11, 0 },
+ { 0, 5, 87, 7, 36, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 0, 18, 7, 98, 12, 0 },
+ { 0, 4, 85, 7, 39, 0, 0 },
+ { 0, 1, 46, 7, 78, 3, 0 },
+ { 0, 0, 16, 7, 98, 14, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 } } },
+ .ptrn_arr = { { 0x3264c993, 0x93264c99, 0x993264c9, 0xc993264c,
+ 0x93264 } },
+ .sample_patrn_length = 150,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 44) = 0.421053 */
+ .hor_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 } },
+ .odd = { { 0, 1, 41, 7, 82, 4, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 4, 82, 7, 41, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 } },
+ .odd = { { 0, 1, 41, 7, 82, 4, 0 },
+ { 0, 12, 97, 7, 19, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 29, 7, 92, 7, 0 },
+ { 0, 7, 92, 7, 29, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 19, 7, 97, 12, 0 },
+ { 0, 4, 82, 7, 41, 1, 0 } } },
+ .ptrn_arr = { { 0x3264c993, 0x9 } },
+ .sample_patrn_length = 38,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 45) = 0.415584 */
+ .hor_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 7, 89, 7, 32, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 14, 96, 7, 18, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 27, 7, 92, 9, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 13, 95, 7, 20, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 42, 7, 81, 4, 0 },
+ { 0, 11, 95, 7, 22, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 22, 7, 95, 11, 0 },
+ { 0, 4, 81, 7, 42, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 20, 7, 95, 13, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 9, 92, 7, 27, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 18, 7, 96, 14, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 32, 7, 89, 7, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 } },
+ .odd = { { 0, 1, 40, 7, 82, 5, 0 },
+ { 0, 11, 94, 7, 23, 0, 0 },
+ { 0, 2, 61, 7, 63, 2, 0 },
+ { 0, 0, 21, 7, 95, 12, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 },
+ { 0, 0, 37, 7, 85, 6, 0 },
+ { 0, 9, 93, 7, 26, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 19, 7, 95, 14, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 34, 7, 88, 6, 0 },
+ { 0, 8, 92, 7, 28, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 0, 17, 7, 96, 15, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 31, 7, 90, 7, 0 },
+ { 0, 7, 90, 7, 31, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 15, 96, 7, 17, 0, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 28, 7, 92, 8, 0 },
+ { 0, 6, 88, 7, 34, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 14, 95, 7, 19, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 26, 7, 93, 9, 0 },
+ { 0, 6, 85, 7, 37, 0, 0 },
+ { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 12, 95, 7, 21, 0, 0 },
+ { 0, 2, 63, 7, 61, 2, 0 },
+ { 0, 0, 23, 7, 94, 11, 0 },
+ { 0, 5, 82, 7, 40, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 16, 96, 7, 16, 0, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 7, 89, 7, 32, 0, 0 },
+ { 0, 1, 48, 7, 76, 3, 0 },
+ { 0, 14, 96, 7, 18, 0, 0 },
+ { 0, 2, 70, 7, 55, 1, 0 },
+ { 0, 0, 27, 7, 92, 9, 0 },
+ { 0, 6, 87, 7, 35, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 13, 95, 7, 20, 0, 0 },
+ { 0, 2, 66, 7, 59, 1, 0 },
+ { 0, 0, 24, 7, 94, 10, 0 },
+ { 0, 5, 85, 7, 38, 0, 0 },
+ { 0, 1, 42, 7, 81, 4, 0 },
+ { 0, 11, 95, 7, 22, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 22, 7, 95, 11, 0 },
+ { 0, 4, 81, 7, 42, 1, 0 },
+ { 0, 0, 38, 7, 85, 5, 0 },
+ { 0, 10, 94, 7, 24, 0, 0 },
+ { 0, 1, 59, 7, 66, 2, 0 },
+ { 0, 0, 20, 7, 95, 13, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 35, 7, 87, 6, 0 },
+ { 0, 9, 92, 7, 27, 0, 0 },
+ { 0, 1, 55, 7, 70, 2, 0 },
+ { 0, 0, 18, 7, 96, 14, 0 },
+ { 0, 3, 76, 7, 48, 1, 0 },
+ { 0, 0, 32, 7, 89, 7, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 } },
+ .odd = { { 0, 1, 40, 7, 82, 5, 0 },
+ { 0, 11, 94, 7, 23, 0, 0 },
+ { 0, 2, 61, 7, 63, 2, 0 },
+ { 0, 0, 21, 7, 95, 12, 0 },
+ { 0, 4, 80, 7, 43, 1, 0 },
+ { 0, 0, 37, 7, 85, 6, 0 },
+ { 0, 9, 93, 7, 26, 0, 0 },
+ { 0, 1, 57, 7, 68, 2, 0 },
+ { 0, 0, 19, 7, 95, 14, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 34, 7, 88, 6, 0 },
+ { 0, 8, 92, 7, 28, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 0, 17, 7, 96, 15, 0 },
+ { 0, 3, 74, 7, 50, 1, 0 },
+ { 0, 0, 31, 7, 90, 7, 0 },
+ { 0, 7, 90, 7, 31, 0, 0 },
+ { 0, 1, 50, 7, 74, 3, 0 },
+ { 0, 15, 96, 7, 17, 0, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 28, 7, 92, 8, 0 },
+ { 0, 6, 88, 7, 34, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 14, 95, 7, 19, 0, 0 },
+ { 0, 2, 68, 7, 57, 1, 0 },
+ { 0, 0, 26, 7, 93, 9, 0 },
+ { 0, 6, 85, 7, 37, 0, 0 },
+ { 0, 1, 43, 7, 80, 4, 0 },
+ { 0, 12, 95, 7, 21, 0, 0 },
+ { 0, 2, 63, 7, 61, 2, 0 },
+ { 0, 0, 23, 7, 94, 11, 0 },
+ { 0, 5, 82, 7, 40, 1, 0 } } },
+ .ptrn_arr = { { 0x9324c993, 0xc99324c9, 0x26499324, 0x93264993,
+ 0x932649 } },
+ .sample_patrn_length = 154,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 46) = 0.410256 */
+ .hor_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 3, 69, 7, 55, 1, 0 },
+ { 0, 0, 25, 7, 93, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 36, 7, 86, 6, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 13, 94, 7, 21, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 21, 7, 94, 13, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 6, 86, 7, 36, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 93, 7, 25, 0, 0 },
+ { 0, 1, 55, 7, 69, 3, 0 } },
+ .odd = { { 0, 1, 39, 7, 83, 5, 0 },
+ { 0, 9, 91, 7, 28, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 33, 7, 88, 7, 0 },
+ { 0, 7, 88, 7, 33, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 28, 7, 91, 9, 0 },
+ { 0, 5, 83, 7, 39, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 3, 69, 7, 55, 1, 0 },
+ { 0, 0, 25, 7, 93, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 36, 7, 86, 6, 0 },
+ { 0, 8, 90, 7, 30, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 13, 94, 7, 21, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 21, 7, 94, 13, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 30, 7, 90, 8, 0 },
+ { 0, 6, 86, 7, 36, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 93, 7, 25, 0, 0 },
+ { 0, 1, 55, 7, 69, 3, 0 } },
+ .odd = { { 0, 1, 39, 7, 83, 5, 0 },
+ { 0, 9, 91, 7, 28, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 33, 7, 88, 7, 0 },
+ { 0, 7, 88, 7, 33, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 28, 7, 91, 9, 0 },
+ { 0, 5, 83, 7, 39, 1, 0 } } },
+ .ptrn_arr = { { 0x93264993, 0x4c99264c, 0x932 } },
+ .sample_patrn_length = 78,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 47) = 0.405063 */
+ .hor_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 21, 7, 93, 14, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 26, 7, 91, 11, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 31, 7, 88, 9, 0 },
+ { 0, 6, 82, 7, 39, 1, 0 },
+ { 0, 1, 36, 7, 84, 7, 0 },
+ { 0, 8, 87, 7, 33, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 90, 7, 28, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 28, 7, 90, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 33, 7, 87, 8, 0 },
+ { 0, 7, 84, 7, 36, 1, 0 },
+ { 0, 1, 39, 7, 82, 6, 0 },
+ { 0, 9, 88, 7, 31, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 11, 91, 7, 26, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 14, 93, 7, 21, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 } },
+ .odd = { { 0, 1, 38, 7, 83, 6, 0 },
+ { 0, 8, 88, 7, 32, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 10, 91, 7, 27, 0, 0 },
+ { 0, 1, 50, 7, 73, 4, 0 },
+ { 0, 13, 93, 7, 22, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 16, 94, 7, 18, 0, 0 },
+ { 0, 2, 64, 7, 60, 2, 0 },
+ { 0, 0, 20, 7, 93, 15, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 24, 7, 92, 12, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 29, 7, 90, 9, 0 },
+ { 0, 5, 81, 7, 41, 1, 0 },
+ { 0, 0, 35, 7, 86, 7, 0 },
+ { 0, 7, 86, 7, 35, 0, 0 },
+ { 0, 1, 41, 7, 81, 5, 0 },
+ { 0, 9, 90, 7, 29, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 12, 92, 7, 24, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 15, 93, 7, 20, 0, 0 },
+ { 0, 2, 60, 7, 64, 2, 0 },
+ { 0, 0, 18, 7, 94, 16, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 22, 7, 93, 13, 0 },
+ { 0, 4, 73, 7, 50, 1, 0 },
+ { 0, 0, 27, 7, 91, 10, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 32, 7, 88, 8, 0 },
+ { 0, 6, 83, 7, 38, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 17, 94, 7, 17, 0, 0 },
+ { 0, 2, 65, 7, 59, 2, 0 },
+ { 0, 0, 21, 7, 93, 14, 0 },
+ { 0, 3, 72, 7, 52, 1, 0 },
+ { 0, 0, 26, 7, 91, 11, 0 },
+ { 0, 4, 78, 7, 45, 1, 0 },
+ { 0, 0, 31, 7, 88, 9, 0 },
+ { 0, 6, 82, 7, 39, 1, 0 },
+ { 0, 1, 36, 7, 84, 7, 0 },
+ { 0, 8, 87, 7, 33, 0, 0 },
+ { 0, 1, 42, 7, 80, 5, 0 },
+ { 0, 10, 90, 7, 28, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 12, 93, 7, 23, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 15, 94, 7, 19, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 0, 19, 7, 94, 15, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 23, 7, 93, 12, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 28, 7, 90, 10, 0 },
+ { 0, 5, 80, 7, 42, 1, 0 },
+ { 0, 0, 33, 7, 87, 8, 0 },
+ { 0, 7, 84, 7, 36, 1, 0 },
+ { 0, 1, 39, 7, 82, 6, 0 },
+ { 0, 9, 88, 7, 31, 0, 0 },
+ { 0, 1, 45, 7, 78, 4, 0 },
+ { 0, 11, 91, 7, 26, 0, 0 },
+ { 0, 1, 52, 7, 72, 3, 0 },
+ { 0, 14, 93, 7, 21, 0, 0 },
+ { 0, 2, 59, 7, 65, 2, 0 } },
+ .odd = { { 0, 1, 38, 7, 83, 6, 0 },
+ { 0, 8, 88, 7, 32, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 10, 91, 7, 27, 0, 0 },
+ { 0, 1, 50, 7, 73, 4, 0 },
+ { 0, 13, 93, 7, 22, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 16, 94, 7, 18, 0, 0 },
+ { 0, 2, 64, 7, 60, 2, 0 },
+ { 0, 0, 20, 7, 93, 15, 0 },
+ { 0, 3, 70, 7, 54, 1, 0 },
+ { 0, 0, 24, 7, 92, 12, 0 },
+ { 0, 4, 76, 7, 47, 1, 0 },
+ { 0, 0, 29, 7, 90, 9, 0 },
+ { 0, 5, 81, 7, 41, 1, 0 },
+ { 0, 0, 35, 7, 86, 7, 0 },
+ { 0, 7, 86, 7, 35, 0, 0 },
+ { 0, 1, 41, 7, 81, 5, 0 },
+ { 0, 9, 90, 7, 29, 0, 0 },
+ { 0, 1, 47, 7, 76, 4, 0 },
+ { 0, 12, 92, 7, 24, 0, 0 },
+ { 0, 1, 54, 7, 70, 3, 0 },
+ { 0, 15, 93, 7, 20, 0, 0 },
+ { 0, 2, 60, 7, 64, 2, 0 },
+ { 0, 0, 18, 7, 94, 16, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 22, 7, 93, 13, 0 },
+ { 0, 4, 73, 7, 50, 1, 0 },
+ { 0, 0, 27, 7, 91, 10, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 32, 7, 88, 8, 0 },
+ { 0, 6, 83, 7, 38, 1, 0 } } },
+ .ptrn_arr = { { 0x99264993, 0x24c93264, 0x99264c93, 0x24c99264,
+ 0x9324c93 } },
+ .sample_patrn_length = 158,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 48) = 0.4 */
+ .hor_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 } },
+ .odd = { { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 } },
+ .odd = { { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 } } },
+ .ptrn_arr = { { 0x93 } },
+ .sample_patrn_length = 10,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 49) = 0.395062 */
+ .hor_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 15, 91, 7, 22, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 87, 7, 32, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 },
+ { 0, 1, 34, 7, 85, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 24, 7, 91, 13, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 20, 7, 92, 16, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 16, 92, 7, 20, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 13, 91, 7, 24, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 85, 7, 34, 1, 0 },
+ { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 32, 7, 87, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 22, 7, 91, 15, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 } },
+ .odd = { { 0, 1, 36, 7, 83, 8, 0 },
+ { 0, 6, 80, 7, 41, 1, 0 },
+ { 0, 0, 30, 7, 88, 10, 0 },
+ { 0, 5, 75, 7, 47, 1, 0 },
+ { 0, 0, 25, 7, 90, 13, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 0, 21, 7, 91, 16, 0 },
+ { 0, 3, 63, 7, 60, 2, 0 },
+ { 0, 17, 92, 7, 19, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 14, 91, 7, 23, 0, 0 },
+ { 0, 1, 51, 7, 72, 4, 0 },
+ { 0, 11, 89, 7, 28, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 9, 85, 7, 33, 1, 0 },
+ { 0, 1, 38, 7, 82, 7, 0 },
+ { 0, 7, 82, 7, 38, 1, 0 },
+ { 0, 1, 33, 7, 85, 9, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 28, 7, 89, 11, 0 },
+ { 0, 4, 72, 7, 51, 1, 0 },
+ { 0, 0, 23, 7, 91, 14, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 19, 7, 92, 17, 0 },
+ { 0, 2, 60, 7, 63, 3, 0 },
+ { 0, 16, 91, 7, 21, 0, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 13, 90, 7, 25, 0, 0 },
+ { 0, 1, 47, 7, 75, 5, 0 },
+ { 0, 10, 88, 7, 30, 0, 0 },
+ { 0, 1, 41, 7, 80, 6, 0 },
+ { 0, 8, 83, 7, 36, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 18, 92, 7, 18, 0, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 15, 91, 7, 22, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 87, 7, 32, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 83, 7, 37, 1, 0 },
+ { 0, 1, 34, 7, 85, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 74, 7, 49, 1, 0 },
+ { 0, 0, 24, 7, 91, 13, 0 },
+ { 0, 3, 68, 7, 55, 2, 0 },
+ { 0, 0, 20, 7, 92, 16, 0 },
+ { 0, 2, 62, 7, 62, 2, 0 },
+ { 0, 16, 92, 7, 20, 0, 0 },
+ { 0, 2, 55, 7, 68, 3, 0 },
+ { 0, 13, 91, 7, 24, 0, 0 },
+ { 0, 1, 49, 7, 74, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 85, 7, 34, 1, 0 },
+ { 0, 1, 37, 7, 83, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 32, 7, 87, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 22, 7, 91, 15, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 } },
+ .odd = { { 0, 1, 36, 7, 83, 8, 0 },
+ { 0, 6, 80, 7, 41, 1, 0 },
+ { 0, 0, 30, 7, 88, 10, 0 },
+ { 0, 5, 75, 7, 47, 1, 0 },
+ { 0, 0, 25, 7, 90, 13, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 0, 21, 7, 91, 16, 0 },
+ { 0, 3, 63, 7, 60, 2, 0 },
+ { 0, 17, 92, 7, 19, 0, 0 },
+ { 0, 2, 57, 7, 66, 3, 0 },
+ { 0, 14, 91, 7, 23, 0, 0 },
+ { 0, 1, 51, 7, 72, 4, 0 },
+ { 0, 11, 89, 7, 28, 0, 0 },
+ { 0, 1, 44, 7, 78, 5, 0 },
+ { 0, 9, 85, 7, 33, 1, 0 },
+ { 0, 1, 38, 7, 82, 7, 0 },
+ { 0, 7, 82, 7, 38, 1, 0 },
+ { 0, 1, 33, 7, 85, 9, 0 },
+ { 0, 5, 78, 7, 44, 1, 0 },
+ { 0, 0, 28, 7, 89, 11, 0 },
+ { 0, 4, 72, 7, 51, 1, 0 },
+ { 0, 0, 23, 7, 91, 14, 0 },
+ { 0, 3, 66, 7, 57, 2, 0 },
+ { 0, 0, 19, 7, 92, 17, 0 },
+ { 0, 2, 60, 7, 63, 3, 0 },
+ { 0, 16, 91, 7, 21, 0, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 13, 90, 7, 25, 0, 0 },
+ { 0, 1, 47, 7, 75, 5, 0 },
+ { 0, 10, 88, 7, 30, 0, 0 },
+ { 0, 1, 41, 7, 80, 6, 0 },
+ { 0, 8, 83, 7, 36, 1, 0 } } },
+ .ptrn_arr = { { 0xc9324c93, 0x92649924, 0x24c92649, 0x49324c93,
+ 0x92649926 } },
+ .sample_patrn_length = 162,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 50) = 0.390244 */
+ .hor_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 82, 7, 37, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 5, 73, 7, 49, 1, 0 },
+ { 0, 0, 23, 7, 90, 15, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 15, 90, 7, 23, 0, 0 },
+ { 0, 1, 49, 7, 73, 5, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 37, 7, 82, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 } },
+ .odd = { { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 12, 89, 7, 27, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 8, 82, 7, 37, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 5, 73, 7, 49, 1, 0 },
+ { 0, 0, 23, 7, 90, 15, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 15, 90, 7, 23, 0, 0 },
+ { 0, 1, 49, 7, 73, 5, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 37, 7, 82, 8, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 27, 7, 89, 12, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 } },
+ .odd = { { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 76, 7, 46, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 11, 88, 7, 29, 0, 0 },
+ { 0, 1, 40, 7, 80, 7, 0 },
+ { 0, 7, 80, 7, 40, 1, 0 },
+ { 0, 0, 29, 7, 88, 11, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 46, 7, 76, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 } } },
+ .ptrn_arr = { { 0x49924c93, 0x9324c926, 0x9264 } },
+ .sample_patrn_length = 82,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 51) = 0.385542 */
+ .hor_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 87, 7, 30, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 6, 75, 7, 46, 1, 0 },
+ { 0, 0, 23, 7, 89, 16, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 41, 7, 79, 7, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 7, 79, 7, 41, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 16, 89, 7, 23, 0, 0 },
+ { 0, 1, 46, 7, 75, 6, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 30, 7, 87, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 } },
+ .odd = { { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 0, 20, 7, 90, 18, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 11, 85, 7, 31, 1, 0 },
+ { 0, 1, 36, 7, 82, 9, 0 },
+ { 0, 5, 74, 7, 48, 1, 0 },
+ { 0, 0, 22, 7, 89, 17, 0 },
+ { 0, 2, 57, 7, 65, 4, 0 },
+ { 0, 12, 87, 7, 29, 0, 0 },
+ { 0, 1, 39, 7, 80, 8, 0 },
+ { 0, 6, 76, 7, 45, 1, 0 },
+ { 0, 0, 24, 7, 89, 15, 0 },
+ { 0, 3, 60, 7, 62, 3, 0 },
+ { 0, 13, 89, 7, 26, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 26, 7, 89, 13, 0 },
+ { 0, 3, 62, 7, 60, 3, 0 },
+ { 0, 15, 89, 7, 24, 0, 0 },
+ { 0, 1, 45, 7, 76, 6, 0 },
+ { 0, 8, 80, 7, 39, 1, 0 },
+ { 0, 0, 29, 7, 87, 12, 0 },
+ { 0, 4, 65, 7, 57, 2, 0 },
+ { 0, 17, 89, 7, 22, 0, 0 },
+ { 0, 1, 48, 7, 74, 5, 0 },
+ { 0, 9, 82, 7, 36, 1, 0 },
+ { 0, 1, 31, 7, 85, 11, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 18, 90, 7, 20, 0, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 19, 90, 7, 19, 0, 0 },
+ { 0, 2, 52, 7, 70, 4, 0 },
+ { 0, 10, 85, 7, 32, 1, 0 },
+ { 0, 1, 35, 7, 83, 9, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 0, 21, 7, 90, 17, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 87, 7, 30, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 6, 75, 7, 46, 1, 0 },
+ { 0, 0, 23, 7, 89, 16, 0 },
+ { 0, 2, 58, 7, 65, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 41, 7, 79, 7, 0 },
+ { 0, 6, 78, 7, 43, 1, 0 },
+ { 0, 0, 25, 7, 89, 14, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 14, 89, 7, 25, 0, 0 },
+ { 0, 1, 43, 7, 78, 6, 0 },
+ { 0, 7, 79, 7, 41, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 65, 7, 58, 2, 0 },
+ { 0, 16, 89, 7, 23, 0, 0 },
+ { 0, 1, 46, 7, 75, 6, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 30, 7, 87, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 17, 90, 7, 21, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 9, 83, 7, 35, 1, 0 },
+ { 0, 1, 32, 7, 85, 10, 0 },
+ { 0, 4, 70, 7, 52, 2, 0 } },
+ .odd = { { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 0, 20, 7, 90, 18, 0 },
+ { 0, 2, 54, 7, 68, 4, 0 },
+ { 0, 11, 85, 7, 31, 1, 0 },
+ { 0, 1, 36, 7, 82, 9, 0 },
+ { 0, 5, 74, 7, 48, 1, 0 },
+ { 0, 0, 22, 7, 89, 17, 0 },
+ { 0, 2, 57, 7, 65, 4, 0 },
+ { 0, 12, 87, 7, 29, 0, 0 },
+ { 0, 1, 39, 7, 80, 8, 0 },
+ { 0, 6, 76, 7, 45, 1, 0 },
+ { 0, 0, 24, 7, 89, 15, 0 },
+ { 0, 3, 60, 7, 62, 3, 0 },
+ { 0, 13, 89, 7, 26, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 26, 7, 89, 13, 0 },
+ { 0, 3, 62, 7, 60, 3, 0 },
+ { 0, 15, 89, 7, 24, 0, 0 },
+ { 0, 1, 45, 7, 76, 6, 0 },
+ { 0, 8, 80, 7, 39, 1, 0 },
+ { 0, 0, 29, 7, 87, 12, 0 },
+ { 0, 4, 65, 7, 57, 2, 0 },
+ { 0, 17, 89, 7, 22, 0, 0 },
+ { 0, 1, 48, 7, 74, 5, 0 },
+ { 0, 9, 82, 7, 36, 1, 0 },
+ { 0, 1, 31, 7, 85, 11, 0 },
+ { 0, 4, 68, 7, 54, 2, 0 },
+ { 0, 18, 90, 7, 20, 0, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 } } },
+ .ptrn_arr = { { 0x49924c93, 0xc9264932, 0x93249924, 0x924c9264,
+ 0x26493249, 0x9 } },
+ .sample_patrn_length = 166,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 52) = 0.380952 */
+ .hor_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 } },
+ .odd = { { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 16, 88, 7, 24, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 24, 7, 88, 16, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 81, 7, 38, 1, 0 },
+ { 0, 0, 28, 7, 87, 13, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 13, 87, 7, 28, 0, 0 },
+ { 0, 1, 38, 7, 81, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 } },
+ .odd = { { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 16, 88, 7, 24, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 24, 7, 88, 16, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 } } },
+ .ptrn_arr = { { 0x4c926493, 0x92 } },
+ .sample_patrn_length = 42,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 53) = 0.376471 */
+ .hor_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 47, 7, 73, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 22, 7, 88, 18, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 24, 7, 87, 17, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 9, 80, 7, 38, 1, 0 },
+ { 0, 0, 26, 7, 87, 15, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 1, 28, 7, 85, 14, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 },
+ { 0, 1, 31, 7, 84, 12, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 12, 84, 7, 31, 1, 0 },
+ { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 14, 85, 7, 28, 1, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 15, 87, 7, 26, 0, 0 },
+ { 0, 1, 38, 7, 80, 9, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 17, 87, 7, 24, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 18, 88, 7, 22, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 73, 7, 47, 2, 0 } },
+ .odd = { { 0, 1, 32, 7, 83, 12, 0 },
+ { 0, 3, 63, 7, 59, 3, 0 },
+ { 0, 13, 84, 7, 30, 1, 0 },
+ { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 14, 87, 7, 27, 0, 0 },
+ { 0, 1, 37, 7, 81, 9, 0 },
+ { 0, 5, 67, 7, 54, 2, 0 },
+ { 0, 16, 87, 7, 25, 0, 0 },
+ { 0, 1, 40, 7, 79, 8, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 18, 87, 7, 23, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 19, 88, 7, 21, 0, 0 },
+ { 0, 1, 45, 7, 75, 7, 0 },
+ { 0, 7, 75, 7, 45, 1, 0 },
+ { 0, 0, 21, 7, 88, 19, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 23, 7, 87, 18, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 8, 79, 7, 40, 1, 0 },
+ { 0, 0, 25, 7, 87, 16, 0 },
+ { 0, 2, 54, 7, 67, 5, 0 },
+ { 0, 9, 81, 7, 37, 1, 0 },
+ { 0, 0, 27, 7, 87, 14, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 },
+ { 0, 1, 30, 7, 84, 13, 0 },
+ { 0, 3, 59, 7, 63, 3, 0 },
+ { 0, 12, 83, 7, 32, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 20, 88, 7, 20, 0, 0 },
+ { 0, 2, 47, 7, 73, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 },
+ { 0, 0, 22, 7, 88, 18, 0 },
+ { 0, 2, 49, 7, 72, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 24, 7, 87, 17, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 9, 80, 7, 38, 1, 0 },
+ { 0, 0, 26, 7, 87, 15, 0 },
+ { 0, 2, 55, 7, 67, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 1, 28, 7, 85, 14, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 11, 83, 7, 33, 1, 0 },
+ { 0, 1, 31, 7, 84, 12, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 12, 84, 7, 31, 1, 0 },
+ { 0, 1, 33, 7, 83, 11, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 14, 85, 7, 28, 1, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 67, 7, 55, 2, 0 },
+ { 0, 15, 87, 7, 26, 0, 0 },
+ { 0, 1, 38, 7, 80, 9, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 17, 87, 7, 24, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 72, 7, 49, 2, 0 },
+ { 0, 18, 88, 7, 22, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 73, 7, 47, 2, 0 } },
+ .odd = { { 0, 1, 32, 7, 83, 12, 0 },
+ { 0, 3, 63, 7, 59, 3, 0 },
+ { 0, 13, 84, 7, 30, 1, 0 },
+ { 0, 1, 34, 7, 83, 10, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 14, 87, 7, 27, 0, 0 },
+ { 0, 1, 37, 7, 81, 9, 0 },
+ { 0, 5, 67, 7, 54, 2, 0 },
+ { 0, 16, 87, 7, 25, 0, 0 },
+ { 0, 1, 40, 7, 79, 8, 0 },
+ { 0, 5, 70, 7, 51, 2, 0 },
+ { 0, 18, 87, 7, 23, 0, 0 },
+ { 0, 1, 42, 7, 78, 7, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 19, 88, 7, 21, 0, 0 },
+ { 0, 1, 45, 7, 75, 7, 0 },
+ { 0, 7, 75, 7, 45, 1, 0 },
+ { 0, 0, 21, 7, 88, 19, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 7, 78, 7, 42, 1, 0 },
+ { 0, 0, 23, 7, 87, 18, 0 },
+ { 0, 2, 51, 7, 70, 5, 0 },
+ { 0, 8, 79, 7, 40, 1, 0 },
+ { 0, 0, 25, 7, 87, 16, 0 },
+ { 0, 2, 54, 7, 67, 5, 0 },
+ { 0, 9, 81, 7, 37, 1, 0 },
+ { 0, 0, 27, 7, 87, 14, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 10, 83, 7, 34, 1, 0 },
+ { 0, 1, 30, 7, 84, 13, 0 },
+ { 0, 3, 59, 7, 63, 3, 0 },
+ { 0, 12, 83, 7, 32, 1, 0 } } },
+ .ptrn_arr = { { 0x64926493, 0x64926492, 0x4c926492, 0x4c924c92,
+ 0x4c924c92, 0x92 } },
+ .sample_patrn_length = 170,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 54) = 0.372093 */
+ .hor_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 71, 7, 49, 2, 0 },
+ { 0, 17, 86, 7, 25, 0, 0 },
+ { 0, 1, 39, 7, 79, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 14, 84, 7, 29, 1, 0 },
+ { 0, 1, 34, 7, 82, 11, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 11, 82, 7, 34, 1, 0 },
+ { 0, 1, 29, 7, 84, 14, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 79, 7, 39, 1, 0 },
+ { 0, 0, 25, 7, 86, 17, 0 },
+ { 0, 2, 49, 7, 71, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 } },
+ .odd = { { 0, 1, 31, 7, 83, 13, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 0, 27, 7, 85, 16, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 16, 85, 7, 27, 0, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 13, 83, 7, 31, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 44, 7, 76, 7, 0 },
+ { 0, 6, 71, 7, 49, 2, 0 },
+ { 0, 17, 86, 7, 25, 0, 0 },
+ { 0, 1, 39, 7, 79, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 14, 84, 7, 29, 1, 0 },
+ { 0, 1, 34, 7, 82, 11, 0 },
+ { 0, 3, 61, 7, 61, 3, 0 },
+ { 0, 11, 82, 7, 34, 1, 0 },
+ { 0, 1, 29, 7, 84, 14, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 79, 7, 39, 1, 0 },
+ { 0, 0, 25, 7, 86, 17, 0 },
+ { 0, 2, 49, 7, 71, 6, 0 },
+ { 0, 7, 76, 7, 44, 1, 0 } },
+ .odd = { { 0, 1, 31, 7, 83, 13, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 81, 7, 36, 1, 0 },
+ { 0, 0, 27, 7, 85, 16, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 78, 7, 41, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 41, 7, 78, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 16, 85, 7, 27, 0, 0 },
+ { 0, 1, 36, 7, 81, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 13, 83, 7, 31, 1, 0 } } },
+ .ptrn_arr = { { 0x24932493, 0x24992493, 0x92499 } },
+ .sample_patrn_length = 86,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 55) = 0.367816 */
+ .hor_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 41, 7, 77, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 13, 82, 7, 32, 1, 0 },
+ { 0, 1, 29, 7, 83, 15, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 1, 27, 7, 84, 16, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 18, 85, 7, 25, 0, 0 },
+ { 0, 1, 36, 7, 80, 11, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 11, 80, 7, 36, 1, 0 },
+ { 0, 0, 25, 7, 85, 18, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 16, 84, 7, 27, 1, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 15, 83, 7, 29, 1, 0 },
+ { 0, 1, 32, 7, 82, 13, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 77, 7, 41, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 83, 14, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 8, 76, 7, 43, 1, 0 },
+ { 0, 20, 86, 7, 22, 0, 0 },
+ { 0, 1, 40, 7, 78, 9, 0 },
+ { 0, 4, 65, 7, 56, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 28, 7, 84, 15, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 7, 74, 7, 45, 2, 0 },
+ { 0, 18, 86, 7, 24, 0, 0 },
+ { 0, 1, 38, 7, 79, 10, 0 },
+ { 0, 4, 62, 7, 59, 3, 0 },
+ { 0, 11, 81, 7, 35, 1, 0 },
+ { 0, 0, 26, 7, 85, 17, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 17, 85, 7, 26, 0, 0 },
+ { 0, 1, 35, 7, 81, 11, 0 },
+ { 0, 3, 59, 7, 62, 4, 0 },
+ { 0, 10, 79, 7, 38, 1, 0 },
+ { 0, 0, 24, 7, 86, 18, 0 },
+ { 0, 2, 45, 7, 74, 7, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 15, 84, 7, 28, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 56, 7, 65, 4, 0 },
+ { 0, 9, 78, 7, 40, 1, 0 },
+ { 0, 0, 22, 7, 86, 20, 0 },
+ { 0, 1, 43, 7, 76, 8, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 14, 83, 7, 30, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 },
+ { 0, 1, 41, 7, 77, 9, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 13, 82, 7, 32, 1, 0 },
+ { 0, 1, 29, 7, 83, 15, 0 },
+ { 0, 2, 52, 7, 69, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 19, 86, 7, 23, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 63, 7, 58, 3, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 1, 27, 7, 84, 16, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 18, 85, 7, 25, 0, 0 },
+ { 0, 1, 36, 7, 80, 11, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 11, 80, 7, 36, 1, 0 },
+ { 0, 0, 25, 7, 85, 18, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 16, 84, 7, 27, 1, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 3, 58, 7, 63, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 0, 23, 7, 86, 19, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 69, 7, 52, 2, 0 },
+ { 0, 15, 83, 7, 29, 1, 0 },
+ { 0, 1, 32, 7, 82, 13, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 9, 77, 7, 41, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 83, 14, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 8, 76, 7, 43, 1, 0 },
+ { 0, 20, 86, 7, 22, 0, 0 },
+ { 0, 1, 40, 7, 78, 9, 0 },
+ { 0, 4, 65, 7, 56, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 28, 7, 84, 15, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 7, 74, 7, 45, 2, 0 },
+ { 0, 18, 86, 7, 24, 0, 0 },
+ { 0, 1, 38, 7, 79, 10, 0 },
+ { 0, 4, 62, 7, 59, 3, 0 },
+ { 0, 11, 81, 7, 35, 1, 0 },
+ { 0, 0, 26, 7, 85, 17, 0 },
+ { 0, 2, 48, 7, 72, 6, 0 },
+ { 0, 6, 72, 7, 48, 2, 0 },
+ { 0, 17, 85, 7, 26, 0, 0 },
+ { 0, 1, 35, 7, 81, 11, 0 },
+ { 0, 3, 59, 7, 62, 4, 0 },
+ { 0, 10, 79, 7, 38, 1, 0 },
+ { 0, 0, 24, 7, 86, 18, 0 },
+ { 0, 2, 45, 7, 74, 7, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 15, 84, 7, 28, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 56, 7, 65, 4, 0 },
+ { 0, 9, 78, 7, 40, 1, 0 },
+ { 0, 0, 22, 7, 86, 20, 0 },
+ { 0, 1, 43, 7, 76, 8, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 14, 83, 7, 30, 1, 0 } } },
+ .ptrn_arr = { { 0x24992493, 0x264924c9, 0x92493249, 0x924c9249,
+ 0x93249264, 0x924 } },
+ .sample_patrn_length = 174,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 56) = 0.363636 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 } },
+ .odd = { { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 } } },
+ .ptrn_arr = { { 0x92493 } },
+ .sample_patrn_length = 22,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 57) = 0.359551 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 } },
+ .odd = { { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 19, 83, 7, 25, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 51, 7, 68, 6, 0 },
+ { 0, 6, 68, 7, 51, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 25, 7, 83, 19, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 } },
+ .odd = { { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 19, 83, 7, 25, 1, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 3, 51, 7, 68, 6, 0 },
+ { 0, 6, 68, 7, 51, 3, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 1, 25, 7, 83, 19, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 } } },
+ .ptrn_arr = { { 0x26492493, 0x924c9249, 0x49249924, 0x64924932,
+ 0x24c92492, 0x9249 } },
+ .sample_patrn_length = 178,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 58) = 0.355556 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 } } },
+ .ptrn_arr = { { 0x32492493, 0x99249249, 0x924924 } },
+ .sample_patrn_length = 90,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 59) = 0.351648 */
+ .hor_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 81, 18, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 10, 73, 7, 43, 2, 0 },
+ { 0, 15, 80, 7, 32, 1, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 1, 32, 7, 80, 15, 0 },
+ { 0, 2, 43, 7, 73, 10, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 18, 81, 7, 28, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 } },
+ .odd = { { 0, 1, 28, 7, 81, 18, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 10, 73, 7, 43, 2, 0 },
+ { 0, 15, 80, 7, 32, 1, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 1, 32, 7, 80, 15, 0 },
+ { 0, 2, 43, 7, 73, 10, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 18, 81, 7, 28, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x4924924c, 0x24924992, 0x92493249,
+ 0x49264924, 0x92492 } },
+ .sample_patrn_length = 182,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 60) = 0.347826 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 } },
+ .odd = { { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 } },
+ .odd = { { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x924 } },
+ .sample_patrn_length = 46,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 61) = 0.344086 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 16, 78, 7, 33, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 1, 25, 7, 79, 22, 1 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 1, 22, 79, 7, 25, 1, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 33, 7, 78, 16, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 80, 21, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 18, 79, 7, 30, 1, 0 },
+ { 1, 23, 79, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 1, 24, 7, 79, 23, 1 },
+ { 0, 1, 30, 7, 79, 18, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 21, 80, 7, 26, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 16, 78, 7, 33, 1, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 1, 25, 7, 79, 22, 1 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 1, 22, 79, 7, 25, 1, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 1, 33, 7, 78, 16, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 80, 21, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 18, 79, 7, 30, 1, 0 },
+ { 1, 23, 79, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 1, 24, 7, 79, 23, 1 },
+ { 0, 1, 30, 7, 79, 18, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 21, 80, 7, 26, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x64924924, 0x92492492, 0x4c924924,
+ 0x92492492, 0x924924 } },
+ .sample_patrn_length = 186,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 62) = 0.340426 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 1, 32, 7, 78, 17, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 17, 78, 7, 32, 1, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 1, 32, 7, 78, 17, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 17, 78, 7, 32, 1, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 } },
+ .odd = { { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x24924924, 0x9249249 } },
+ .sample_patrn_length = 94,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 63) = 0.336842 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } },
+ .odd = { { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 1, 29, 7, 78, 20, 0 },
+ { 0, 1, 31, 7, 78, 18, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 18, 78, 7, 31, 1, 0 },
+ { 0, 20, 78, 7, 29, 1, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 } },
+ .odd = { { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 1, 29, 7, 78, 20, 0 },
+ { 0, 1, 31, 7, 78, 18, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 18, 78, 7, 31, 1, 0 },
+ { 0, 20, 78, 7, 29, 1, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 } } },
+ .ptrn_arr = { { 0x92492493, 0x24924924, 0x49249249, 0x92492492,
+ 0x24924924, 0x9249249 } },
+ .sample_patrn_length = 190,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 64) = 0.333333 */
+ .hor_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 } },
+ .odd = { { 0, 4, 60, 7, 60, 4, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 21, 86, 7, 21, 0, 0 } },
+ .odd = { { 0, 4, 60, 7, 60, 4, 0 } } },
+ .ptrn_arr = { { 0x9 } },
+ .sample_patrn_length = 6,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 65) = 0.329897 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 20, 85, 7, 23, 0, 0 },
+ { 0, 18, 84, 7, 25, 1, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 1, 25, 7, 84, 18, 0 },
+ { 0, 0, 23, 7, 85, 20, 0 } },
+ .odd = { { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 19, 85, 7, 24, 0, 0 },
+ { 0, 17, 84, 7, 26, 1, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 },
+ { 0, 14, 82, 7, 31, 1, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 12, 80, 7, 35, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 10, 77, 7, 40, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 40, 7, 77, 10, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 35, 7, 80, 12, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 1, 31, 7, 82, 14, 0 },
+ { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 1, 26, 7, 84, 17, 0 },
+ { 0, 0, 24, 7, 85, 19, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 20, 85, 7, 23, 0, 0 },
+ { 0, 18, 84, 7, 25, 1, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 81, 7, 34, 1, 0 },
+ { 0, 11, 79, 7, 37, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 72, 7, 47, 2, 0 },
+ { 0, 6, 70, 7, 50, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 64, 7, 57, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 57, 7, 64, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 50, 7, 70, 6, 0 },
+ { 0, 2, 47, 7, 72, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 37, 7, 79, 11, 0 },
+ { 0, 1, 34, 7, 81, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 1, 25, 7, 84, 18, 0 },
+ { 0, 0, 23, 7, 85, 20, 0 } },
+ .odd = { { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 19, 85, 7, 24, 0, 0 },
+ { 0, 17, 84, 7, 26, 1, 0 },
+ { 0, 16, 82, 7, 29, 1, 0 },
+ { 0, 14, 82, 7, 31, 1, 0 },
+ { 0, 13, 81, 7, 33, 1, 0 },
+ { 0, 12, 80, 7, 35, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 10, 77, 7, 40, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 6, 69, 7, 51, 2, 0 },
+ { 0, 5, 66, 7, 54, 3, 0 },
+ { 0, 5, 64, 7, 56, 3, 0 },
+ { 0, 4, 61, 7, 59, 4, 0 },
+ { 0, 4, 59, 7, 61, 4, 0 },
+ { 0, 3, 56, 7, 64, 5, 0 },
+ { 0, 3, 54, 7, 66, 5, 0 },
+ { 0, 2, 51, 7, 69, 6, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 40, 7, 77, 10, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 35, 7, 80, 12, 0 },
+ { 0, 1, 33, 7, 81, 13, 0 },
+ { 0, 1, 31, 7, 82, 14, 0 },
+ { 0, 1, 29, 7, 82, 16, 0 },
+ { 0, 1, 26, 7, 84, 17, 0 },
+ { 0, 0, 24, 7, 85, 19, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x92492492, 0x24924924, 0x49249249,
+ 0x92492492, 0x24924924 } },
+ .sample_patrn_length = 194,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 66) = 0.326531 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 } },
+ .odd = { { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 18, 83, 7, 26, 1, 0 },
+ { 0, 15, 82, 7, 30, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 78, 7, 39, 1, 0 },
+ { 0, 8, 74, 7, 44, 2, 0 },
+ { 0, 7, 69, 7, 50, 2, 0 },
+ { 0, 5, 65, 7, 55, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 55, 7, 65, 5, 0 },
+ { 0, 2, 50, 7, 69, 7, 0 },
+ { 0, 2, 44, 7, 74, 8, 0 },
+ { 0, 1, 39, 7, 78, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 30, 7, 82, 15, 0 },
+ { 0, 1, 26, 7, 83, 18, 0 } },
+ .odd = { { 0, 20, 84, 7, 24, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 14, 81, 7, 32, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 75, 7, 42, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 42, 7, 75, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 32, 7, 81, 14, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 },
+ { 0, 0, 24, 7, 84, 20, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x92492492, 0x24924924 } },
+ .sample_patrn_length = 98,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 67) = 0.323232 */
+ .hor_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 } },
+ .odd = { { 0, 20, 82, 7, 25, 1, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 1, 29, 7, 81, 17, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 5, 63, 7, 56, 4, 0 },
+ { 0, 4, 56, 7, 63, 5, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 17, 81, 7, 29, 1, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 1, 25, 7, 82, 20, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 22, 84, 7, 22, 0, 0 },
+ { 0, 17, 82, 7, 28, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 55, 7, 64, 6, 0 },
+ { 0, 2, 47, 7, 71, 8, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 33, 7, 80, 14, 0 },
+ { 0, 1, 26, 7, 82, 19, 0 },
+ { 0, 21, 82, 7, 24, 1, 0 },
+ { 0, 16, 81, 7, 30, 1, 0 },
+ { 0, 12, 78, 7, 37, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 67, 7, 52, 3, 0 },
+ { 0, 4, 60, 7, 60, 4, 0 },
+ { 0, 3, 52, 7, 67, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 37, 7, 78, 12, 0 },
+ { 0, 1, 30, 7, 81, 16, 0 },
+ { 0, 1, 24, 7, 82, 21, 0 },
+ { 0, 19, 82, 7, 26, 1, 0 },
+ { 0, 14, 80, 7, 33, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 8, 71, 7, 47, 2, 0 },
+ { 0, 6, 64, 7, 55, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 28, 7, 82, 17, 0 } },
+ .odd = { { 0, 20, 82, 7, 25, 1, 0 },
+ { 0, 15, 81, 7, 31, 1, 0 },
+ { 0, 11, 78, 7, 38, 1, 0 },
+ { 0, 8, 72, 7, 46, 2, 0 },
+ { 0, 6, 66, 7, 53, 3, 0 },
+ { 0, 4, 58, 7, 61, 5, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 43, 7, 74, 9, 0 },
+ { 0, 1, 36, 7, 79, 12, 0 },
+ { 0, 1, 29, 7, 81, 17, 0 },
+ { 0, 0, 23, 7, 84, 21, 0 },
+ { 0, 18, 82, 7, 27, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 10, 75, 7, 41, 2, 0 },
+ { 0, 7, 71, 7, 48, 2, 0 },
+ { 0, 5, 63, 7, 56, 4, 0 },
+ { 0, 4, 56, 7, 63, 5, 0 },
+ { 0, 2, 48, 7, 71, 7, 0 },
+ { 0, 2, 41, 7, 75, 10, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 27, 7, 82, 18, 0 },
+ { 0, 21, 84, 7, 23, 0, 0 },
+ { 0, 17, 81, 7, 29, 1, 0 },
+ { 0, 12, 79, 7, 36, 1, 0 },
+ { 0, 9, 74, 7, 43, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 5, 61, 7, 58, 4, 0 },
+ { 0, 3, 53, 7, 66, 6, 0 },
+ { 0, 2, 46, 7, 72, 8, 0 },
+ { 0, 1, 38, 7, 78, 11, 0 },
+ { 0, 1, 31, 7, 81, 15, 0 },
+ { 0, 1, 25, 7, 82, 20, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x92492492, 0x92492490, 0x24924924,
+ 0x24924921, 0x49249249, 0x2 } },
+ .sample_patrn_length = 198,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 68) = 0.32 */
+ .hor_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 } },
+ .odd = { { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 0, 23, 82, 7, 23, 0, 0 },
+ { 0, 16, 80, 7, 31, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 68, 7, 50, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 50, 7, 68, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 31, 7, 80, 16, 0 } },
+ .odd = { { 0, 19, 81, 7, 27, 1, 0 },
+ { 0, 13, 79, 7, 35, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 35, 7, 79, 13, 0 },
+ { 0, 1, 27, 7, 81, 19, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x2492 } },
+ .sample_patrn_length = 50,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 69) = 0.316832 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 8, 70, 7, 47, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 47, 7, 70, 8, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 } },
+ .odd = { { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 65, 7, 53, 4, 0 },
+ { 0, 4, 53, 7, 65, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 72, 7, 45, 2, 0 },
+ { 0, 5, 62, 7, 57, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 75, 7, 40, 2, 0 },
+ { 0, 7, 66, 7, 52, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 42, 7, 74, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 21, 81, 7, 25, 1, 0 },
+ { 0, 14, 78, 7, 35, 1, 0 },
+ { 0, 8, 70, 7, 47, 3, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 3, 47, 7, 70, 8, 0 },
+ { 0, 1, 35, 7, 78, 14, 0 },
+ { 0, 1, 25, 7, 81, 21, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 74, 7, 42, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 52, 7, 66, 7, 0 },
+ { 0, 2, 40, 7, 75, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 57, 7, 62, 5, 0 },
+ { 0, 2, 45, 7, 72, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 } },
+ .odd = { { 0, 19, 80, 7, 28, 1, 0 },
+ { 0, 12, 75, 7, 39, 2, 0 },
+ { 0, 7, 67, 7, 51, 3, 0 },
+ { 0, 4, 56, 7, 62, 6, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 32, 7, 79, 16, 0 },
+ { 0, 22, 81, 7, 24, 1, 0 },
+ { 0, 14, 79, 7, 34, 1, 0 },
+ { 0, 9, 71, 7, 46, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 3, 48, 7, 69, 8, 0 },
+ { 0, 1, 36, 7, 78, 13, 0 },
+ { 0, 1, 26, 7, 81, 20, 0 },
+ { 0, 17, 80, 7, 30, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 65, 7, 53, 4, 0 },
+ { 0, 4, 53, 7, 65, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 30, 7, 80, 17, 0 },
+ { 0, 20, 81, 7, 26, 1, 0 },
+ { 0, 13, 78, 7, 36, 1, 0 },
+ { 0, 8, 69, 7, 48, 3, 0 },
+ { 0, 5, 58, 7, 60, 5, 0 },
+ { 0, 2, 46, 7, 71, 9, 0 },
+ { 0, 1, 34, 7, 79, 14, 0 },
+ { 0, 1, 24, 7, 81, 22, 0 },
+ { 0, 16, 79, 7, 32, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 6, 62, 7, 56, 4, 0 },
+ { 0, 3, 51, 7, 67, 7, 0 },
+ { 0, 2, 39, 7, 75, 12, 0 },
+ { 0, 1, 28, 7, 80, 19, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x49249212, 0x49242492, 0x48492492,
+ 0x92492492, 0x92492490, 0x24 } },
+ .sample_patrn_length = 202,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 70) = 0.313725 */
+ .hor_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 } },
+ .odd = { { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 6, 61, 7, 57, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 22, 80, 7, 25, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 25, 7, 80, 22, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 57, 7, 61, 6, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 23, 80, 7, 23, 1, 0 },
+ { 0, 14, 77, 7, 36, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 54, 7, 64, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 10, 71, 7, 45, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 45, 7, 71, 10, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 64, 7, 54, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 36, 7, 77, 14, 0 } },
+ .odd = { { 0, 18, 80, 7, 29, 1, 0 },
+ { 0, 11, 73, 7, 42, 2, 0 },
+ { 0, 6, 61, 7, 57, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 33, 7, 79, 15, 0 },
+ { 0, 22, 80, 7, 25, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 },
+ { 0, 1, 25, 7, 80, 22, 0 },
+ { 0, 15, 79, 7, 33, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 57, 7, 61, 6, 0 },
+ { 0, 2, 42, 7, 73, 11, 0 },
+ { 0, 1, 29, 7, 80, 18, 0 } } },
+ .ptrn_arr = { { 0x49249249, 0x49249248, 0x49249242, 0x2 } },
+ .sample_patrn_length = 102,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 71) = 0.31068 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 19, 79, 7, 29, 1, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 14, 76, 7, 36, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 36, 7, 76, 14, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 1, 29, 7, 79, 19, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 } },
+ .odd = { { 0, 18, 79, 7, 30, 1, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 5, 58, 7, 59, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 26, 7, 79, 21, 1 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 1, 21, 79, 7, 26, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 59, 7, 58, 5, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 1, 30, 7, 79, 18, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 13, 75, 7, 38, 2, 0 },
+ { 0, 7, 63, 7, 54, 4, 0 },
+ { 0, 3, 47, 7, 69, 9, 0 },
+ { 0, 1, 31, 7, 79, 17, 0 },
+ { 0, 19, 79, 7, 29, 1, 0 },
+ { 0, 10, 70, 7, 45, 3, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 },
+ { 0, 1, 26, 7, 78, 22, 1 },
+ { 0, 14, 76, 7, 36, 2, 0 },
+ { 0, 7, 65, 7, 52, 4, 0 },
+ { 0, 3, 50, 7, 67, 8, 0 },
+ { 0, 1, 34, 7, 77, 16, 0 },
+ { 0, 20, 80, 7, 27, 1, 0 },
+ { 0, 11, 72, 7, 43, 2, 0 },
+ { 0, 5, 59, 7, 59, 5, 0 },
+ { 0, 2, 43, 7, 72, 11, 0 },
+ { 0, 1, 27, 7, 80, 20, 0 },
+ { 0, 16, 77, 7, 34, 1, 0 },
+ { 0, 8, 67, 7, 50, 3, 0 },
+ { 0, 4, 52, 7, 65, 7, 0 },
+ { 0, 2, 36, 7, 76, 14, 0 },
+ { 1, 22, 78, 7, 26, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 3, 45, 7, 70, 10, 0 },
+ { 0, 1, 29, 7, 79, 19, 0 },
+ { 0, 17, 79, 7, 31, 1, 0 },
+ { 0, 9, 69, 7, 47, 3, 0 },
+ { 0, 4, 54, 7, 63, 7, 0 },
+ { 0, 2, 38, 7, 75, 13, 0 } },
+ .odd = { { 0, 18, 79, 7, 30, 1, 0 },
+ { 0, 9, 70, 7, 46, 3, 0 },
+ { 0, 4, 55, 7, 63, 6, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 },
+ { 0, 20, 79, 7, 28, 1, 0 },
+ { 0, 10, 72, 7, 44, 2, 0 },
+ { 0, 5, 58, 7, 59, 6, 0 },
+ { 0, 2, 41, 7, 74, 11, 0 },
+ { 0, 1, 26, 7, 79, 21, 1 },
+ { 0, 15, 77, 7, 35, 1, 0 },
+ { 0, 8, 66, 7, 51, 3, 0 },
+ { 0, 3, 51, 7, 66, 8, 0 },
+ { 0, 1, 35, 7, 77, 15, 0 },
+ { 1, 21, 79, 7, 26, 1, 0 },
+ { 0, 11, 74, 7, 41, 2, 0 },
+ { 0, 6, 59, 7, 58, 5, 0 },
+ { 0, 2, 44, 7, 72, 10, 0 },
+ { 0, 1, 28, 7, 79, 20, 0 },
+ { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 6, 63, 7, 55, 4, 0 },
+ { 0, 3, 46, 7, 70, 9, 0 },
+ { 0, 1, 30, 7, 79, 18, 0 } } },
+ .ptrn_arr = { { 0x9249249, 0x21249249, 0x24249249, 0x24849249,
+ 0x24909249, 0x24921249, 0x249 } },
+ .sample_patrn_length = 206,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 72) = 0.307692 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 } },
+ .odd = { { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 68, 7, 49, 3, 0 },
+ { 0, 3, 49, 7, 68, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 74, 7, 40, 2, 0 },
+ { 0, 5, 60, 7, 58, 5, 0 },
+ { 0, 2, 40, 7, 74, 12, 0 } },
+ .odd = { { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 68, 7, 49, 3, 0 },
+ { 0, 3, 49, 7, 68, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 } } },
+ .ptrn_arr = { { 0x249249 } },
+ .sample_patrn_length = 26,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 73) = 0.304762 */
+ .hor_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 2, 34, 7, 76, 16, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 16, 76, 7, 34, 2, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 } },
+ .odd = { { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 19, 77, 7, 31, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 20, 77, 7, 29, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 29, 7, 77, 20, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 31, 7, 77, 19, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 24, 78, 7, 24, 1, 0 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 2, 34, 7, 76, 16, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 13, 73, 7, 40, 2, 0 },
+ { 0, 5, 56, 7, 61, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 19, 78, 7, 30, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 30, 7, 78, 19, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 61, 7, 56, 5, 0 },
+ { 0, 2, 40, 7, 73, 13, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 16, 76, 7, 34, 2, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 } },
+ .odd = { { 0, 17, 77, 7, 33, 1, 0 },
+ { 0, 7, 64, 7, 53, 4, 0 },
+ { 0, 3, 44, 7, 70, 11, 0 },
+ { 0, 1, 25, 7, 78, 23, 1 },
+ { 0, 12, 72, 7, 42, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 },
+ { 0, 19, 77, 7, 31, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 3, 46, 7, 69, 10, 0 },
+ { 0, 1, 27, 7, 77, 22, 1 },
+ { 0, 13, 74, 7, 39, 2, 0 },
+ { 0, 5, 57, 7, 60, 6, 0 },
+ { 0, 2, 37, 7, 75, 14, 0 },
+ { 1, 20, 77, 7, 29, 1, 0 },
+ { 0, 9, 68, 7, 48, 3, 0 },
+ { 0, 3, 48, 7, 68, 9, 0 },
+ { 0, 1, 29, 7, 77, 20, 1 },
+ { 0, 14, 75, 7, 37, 2, 0 },
+ { 0, 6, 60, 7, 57, 5, 0 },
+ { 0, 2, 39, 7, 74, 13, 0 },
+ { 1, 22, 77, 7, 27, 1, 0 },
+ { 0, 10, 69, 7, 46, 3, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 31, 7, 77, 19, 0 },
+ { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 42, 7, 72, 12, 0 },
+ { 1, 23, 78, 7, 25, 1, 0 },
+ { 0, 11, 70, 7, 44, 3, 0 },
+ { 0, 4, 53, 7, 64, 7, 0 },
+ { 0, 1, 33, 7, 77, 17, 0 } } },
+ .ptrn_arr = { { 0x24249249, 0x24921249, 0x84924909, 0x92424924,
+ 0x92492124, 0x48492490, 0x2492 } },
+ .sample_patrn_length = 210,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 74) = 0.301887 */
+ .hor_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 } },
+ .odd = { { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 7, 60, 7, 56, 5, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 5, 56, 7, 60, 7, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 28, 7, 77, 21, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 5, 54, 7, 62, 7, 0 },
+ { 0, 1, 32, 7, 77, 18, 0 },
+ { 0, 15, 75, 7, 36, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 36, 7, 75, 15, 0 },
+ { 0, 18, 77, 7, 32, 1, 0 },
+ { 0, 7, 62, 7, 54, 5, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 21, 77, 7, 28, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 } },
+ .odd = { { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 7, 60, 7, 56, 5, 0 },
+ { 0, 2, 38, 7, 74, 14, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 64, 7, 52, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 23, 77, 7, 26, 1, 0 },
+ { 0, 10, 68, 7, 47, 3, 0 },
+ { 0, 3, 47, 7, 68, 10, 0 },
+ { 0, 1, 26, 7, 77, 23, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 52, 7, 64, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 14, 74, 7, 38, 2, 0 },
+ { 0, 5, 56, 7, 60, 7, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 } } },
+ .ptrn_arr = { { 0x24849249, 0x24924849, 0x92424924, 0x24 } },
+ .sample_patrn_length = 106,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 75) = 0.299065 */
+ .hor_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 67, 7, 47, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 23, 76, 7, 27, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 61, 7, 54, 5, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 19, 76, 7, 32, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 36, 7, 74, 16, 0 },
+ { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 },
+ { 0, 16, 74, 7, 36, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 32, 7, 76, 19, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 5, 54, 7, 61, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 27, 7, 76, 23, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 47, 7, 67, 10, 0 } },
+ .odd = { { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 6, 57, 7, 58, 7, 0 },
+ { 0, 2, 33, 7, 75, 18, 0 },
+ { 0, 15, 73, 7, 38, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 1, 31, 7, 76, 19, 1 },
+ { 0, 14, 72, 7, 40, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 50, 7, 65, 9, 0 },
+ { 0, 1, 28, 7, 76, 22, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 0, 1, 26, 7, 76, 24, 1 },
+ { 0, 11, 68, 7, 46, 3, 0 },
+ { 0, 3, 46, 7, 68, 11, 0 },
+ { 1, 24, 76, 7, 26, 1, 0 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 22, 76, 7, 28, 1, 0 },
+ { 0, 9, 65, 7, 50, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 40, 7, 72, 14, 0 },
+ { 1, 19, 76, 7, 31, 1, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 38, 7, 73, 15, 0 },
+ { 0, 18, 75, 7, 33, 2, 0 },
+ { 0, 7, 58, 7, 57, 6, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 67, 7, 47, 4, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 23, 76, 7, 27, 1, 0 },
+ { 0, 9, 66, 7, 49, 4, 0 },
+ { 0, 3, 43, 7, 70, 12, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 65, 7, 51, 4, 0 },
+ { 0, 2, 41, 7, 72, 13, 0 },
+ { 1, 20, 76, 7, 30, 1, 0 },
+ { 0, 8, 61, 7, 54, 5, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 19, 76, 7, 32, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 36, 7, 74, 16, 0 },
+ { 0, 17, 75, 7, 34, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 34, 7, 75, 17, 0 },
+ { 0, 16, 74, 7, 36, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 32, 7, 76, 19, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 5, 54, 7, 61, 8, 0 },
+ { 0, 1, 30, 7, 76, 20, 1 },
+ { 0, 13, 72, 7, 41, 2, 0 },
+ { 0, 4, 51, 7, 65, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 12, 70, 7, 43, 3, 0 },
+ { 0, 4, 49, 7, 66, 9, 0 },
+ { 0, 1, 27, 7, 76, 23, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 4, 47, 7, 67, 10, 0 } },
+ .odd = { { 0, 16, 75, 7, 35, 2, 0 },
+ { 0, 6, 57, 7, 58, 7, 0 },
+ { 0, 2, 33, 7, 75, 18, 0 },
+ { 0, 15, 73, 7, 38, 2, 0 },
+ { 0, 5, 55, 7, 61, 7, 0 },
+ { 0, 1, 31, 7, 76, 19, 1 },
+ { 0, 14, 72, 7, 40, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 50, 7, 65, 9, 0 },
+ { 0, 1, 28, 7, 76, 22, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 0, 1, 26, 7, 76, 24, 1 },
+ { 0, 11, 68, 7, 46, 3, 0 },
+ { 0, 3, 46, 7, 68, 11, 0 },
+ { 1, 24, 76, 7, 26, 1, 0 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 22, 76, 7, 28, 1, 0 },
+ { 0, 9, 65, 7, 50, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 40, 7, 72, 14, 0 },
+ { 1, 19, 76, 7, 31, 1, 0 },
+ { 0, 7, 61, 7, 55, 5, 0 },
+ { 0, 2, 38, 7, 73, 15, 0 },
+ { 0, 18, 75, 7, 33, 2, 0 },
+ { 0, 7, 58, 7, 57, 6, 0 },
+ { 0, 2, 35, 7, 75, 16, 0 } } },
+ .ptrn_arr = { { 0x24909249, 0x90924909, 0x92490924, 0x49212490,
+ 0x21249212, 0x24921249, 0x24921 } },
+ .sample_patrn_length = 214,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 76) = 0.296296 */
+ .hor_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 } },
+ .odd = { { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 25, 76, 7, 25, 1, 0 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 58, 7, 58, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 } },
+ .odd = { { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 69, 7, 45, 3, 0 },
+ { 0, 3, 45, 7, 69, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 } } },
+ .ptrn_arr = { { 0x24909249, 0x24921 } },
+ .sample_patrn_length = 54,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 77) = 0.293578 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 },
+ { 1, 24, 75, 7, 27, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 60, 7, 55, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 45, 7, 68, 12, 0 },
+ { 1, 21, 74, 7, 31, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 31, 7, 74, 21, 1 },
+ { 0, 12, 68, 7, 45, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 55, 7, 60, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 27, 7, 75, 24, 1 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 } },
+ .odd = { { 0, 16, 72, 7, 38, 2, 0 },
+ { 0, 5, 50, 7, 64, 9, 0 },
+ { 1, 25, 75, 7, 26, 1, 0 },
+ { 0, 8, 63, 7, 52, 5, 0 },
+ { 0, 2, 36, 7, 73, 17, 0 },
+ { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 1, 23, 75, 7, 28, 1, 0 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 34, 7, 73, 18, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 46, 7, 67, 11, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 11, 67, 7, 46, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 18, 73, 7, 34, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 0, 1, 28, 7, 75, 23, 1 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 },
+ { 0, 17, 73, 7, 36, 2, 0 },
+ { 0, 5, 52, 7, 63, 8, 0 },
+ { 0, 1, 26, 7, 75, 25, 1 },
+ { 0, 9, 64, 7, 50, 5, 0 },
+ { 0, 2, 38, 7, 72, 16, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 37, 7, 73, 16, 0 },
+ { 0, 15, 72, 7, 39, 2, 0 },
+ { 0, 4, 49, 7, 65, 10, 0 },
+ { 1, 24, 75, 7, 27, 1, 0 },
+ { 0, 8, 62, 7, 53, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 60, 7, 55, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 45, 7, 68, 12, 0 },
+ { 1, 21, 74, 7, 31, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 31, 7, 74, 21, 1 },
+ { 0, 12, 68, 7, 45, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 55, 7, 60, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 53, 7, 62, 8, 0 },
+ { 0, 1, 27, 7, 75, 24, 1 },
+ { 0, 10, 65, 7, 49, 4, 0 },
+ { 0, 2, 39, 7, 72, 15, 0 },
+ { 0, 16, 73, 7, 37, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 } },
+ .odd = { { 0, 16, 72, 7, 38, 2, 0 },
+ { 0, 5, 50, 7, 64, 9, 0 },
+ { 1, 25, 75, 7, 26, 1, 0 },
+ { 0, 8, 63, 7, 52, 5, 0 },
+ { 0, 2, 36, 7, 73, 17, 0 },
+ { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 48, 7, 66, 10, 0 },
+ { 1, 23, 75, 7, 28, 1, 0 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 34, 7, 73, 18, 1 },
+ { 0, 13, 70, 7, 42, 3, 0 },
+ { 0, 4, 46, 7, 67, 11, 0 },
+ { 1, 21, 75, 7, 30, 1, 0 },
+ { 0, 7, 59, 7, 56, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 12, 69, 7, 44, 3, 0 },
+ { 0, 3, 44, 7, 69, 12, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 56, 7, 59, 7, 0 },
+ { 0, 1, 30, 7, 75, 21, 1 },
+ { 0, 11, 67, 7, 46, 4, 0 },
+ { 0, 3, 42, 7, 70, 13, 0 },
+ { 1, 18, 73, 7, 34, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 0, 1, 28, 7, 75, 23, 1 },
+ { 0, 10, 66, 7, 48, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 },
+ { 0, 17, 73, 7, 36, 2, 0 },
+ { 0, 5, 52, 7, 63, 8, 0 },
+ { 0, 1, 26, 7, 75, 25, 1 },
+ { 0, 9, 64, 7, 50, 5, 0 },
+ { 0, 2, 38, 7, 72, 16, 0 } } },
+ .ptrn_arr = { { 0x24921249, 0x92484924, 0x49212490, 0x24849242,
+ 0x92124909, 0x48492424, 0x249092 } },
+ .sample_patrn_length = 218,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 78) = 0.290909 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 61, 7, 53, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 0, 17, 72, 7, 37, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 37, 7, 72, 17, 0 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 53, 7, 61, 8, 0 } },
+ .odd = { { 0, 15, 71, 7, 39, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 39, 7, 71, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 61, 7, 53, 6, 0 },
+ { 0, 2, 33, 7, 73, 19, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 41, 7, 70, 14, 0 },
+ { 0, 17, 72, 7, 37, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 22, 75, 7, 29, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 29, 7, 75, 22, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 37, 7, 72, 17, 0 },
+ { 0, 14, 70, 7, 41, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 19, 73, 7, 33, 2, 0 },
+ { 0, 6, 53, 7, 61, 8, 0 } },
+ .odd = { { 0, 15, 71, 7, 39, 3, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 3, 39, 7, 71, 15, 0 } } },
+ .ptrn_arr = { { 0x24921249, 0x12490924, 0x9248492, 0x249 } },
+ .sample_patrn_length = 110,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 79) = 0.288288 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 1, 30, 7, 73, 23, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 33, 7, 72, 20, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 2, 37, 7, 71, 17, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 16, 70, 7, 39, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 39, 7, 70, 16, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 17, 71, 7, 37, 2, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 20, 72, 7, 33, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 23, 73, 7, 30, 1, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 } },
+ .odd = { { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 1, 18, 71, 7, 36, 2, 0 },
+ { 0, 4, 48, 7, 65, 11, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 24, 73, 7, 29, 1, 0 },
+ { 0, 7, 56, 7, 58, 7, 0 },
+ { 0, 1, 27, 7, 74, 25, 1 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 30, 7, 73, 22, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 34, 7, 72, 19, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 3, 38, 7, 71, 16, 0 },
+ { 0, 14, 69, 7, 42, 3, 0 },
+ { 0, 3, 42, 7, 69, 14, 0 },
+ { 0, 16, 71, 7, 38, 3, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 19, 72, 7, 34, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 22, 73, 7, 30, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 1, 25, 74, 7, 27, 1, 0 },
+ { 0, 7, 58, 7, 56, 7, 0 },
+ { 0, 1, 29, 7, 73, 24, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 11, 65, 7, 48, 4, 0 },
+ { 0, 2, 36, 7, 71, 18, 1 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 8, 59, 7, 55, 6, 0 },
+ { 0, 1, 30, 7, 73, 23, 1 },
+ { 0, 9, 63, 7, 51, 5, 0 },
+ { 0, 2, 33, 7, 72, 20, 1 },
+ { 0, 11, 66, 7, 47, 4, 0 },
+ { 0, 2, 37, 7, 71, 17, 1 },
+ { 0, 13, 69, 7, 43, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 16, 70, 7, 39, 3, 0 },
+ { 0, 4, 45, 7, 67, 12, 0 },
+ { 1, 18, 72, 7, 35, 2, 0 },
+ { 0, 5, 49, 7, 64, 10, 0 },
+ { 1, 21, 73, 7, 31, 2, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 24, 74, 7, 28, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 },
+ { 0, 1, 28, 7, 74, 24, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 2, 31, 7, 73, 21, 1 },
+ { 0, 10, 64, 7, 49, 5, 0 },
+ { 0, 2, 35, 7, 72, 18, 1 },
+ { 0, 12, 67, 7, 45, 4, 0 },
+ { 0, 3, 39, 7, 70, 16, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 43, 7, 69, 13, 0 },
+ { 1, 17, 71, 7, 37, 2, 0 },
+ { 0, 4, 47, 7, 66, 11, 0 },
+ { 1, 20, 72, 7, 33, 2, 0 },
+ { 0, 5, 51, 7, 63, 9, 0 },
+ { 1, 23, 73, 7, 30, 1, 0 },
+ { 0, 6, 55, 7, 59, 8, 0 } },
+ .odd = { { 0, 15, 70, 7, 40, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 1, 18, 71, 7, 36, 2, 0 },
+ { 0, 4, 48, 7, 65, 11, 0 },
+ { 1, 20, 73, 7, 32, 2, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 24, 73, 7, 29, 1, 0 },
+ { 0, 7, 56, 7, 58, 7, 0 },
+ { 0, 1, 27, 7, 74, 25, 1 },
+ { 0, 8, 60, 7, 54, 6, 0 },
+ { 0, 2, 30, 7, 73, 22, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 34, 7, 72, 19, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 3, 38, 7, 71, 16, 0 },
+ { 0, 14, 69, 7, 42, 3, 0 },
+ { 0, 3, 42, 7, 69, 14, 0 },
+ { 0, 16, 71, 7, 38, 3, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 19, 72, 7, 34, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 22, 73, 7, 30, 2, 0 },
+ { 0, 6, 54, 7, 60, 8, 0 },
+ { 1, 25, 74, 7, 27, 1, 0 },
+ { 0, 7, 58, 7, 56, 7, 0 },
+ { 0, 1, 29, 7, 73, 24, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 },
+ { 0, 2, 32, 7, 73, 20, 1 },
+ { 0, 11, 65, 7, 48, 4, 0 },
+ { 0, 2, 36, 7, 71, 18, 1 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 40, 7, 70, 15, 0 } } },
+ .ptrn_arr = { { 0x84921249, 0x42492124, 0x24249092, 0x92124909,
+ 0x49212484, 0x24909248, 0x2490924 } },
+ .sample_patrn_length = 222,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 80) = 0.285714 */
+ .hor_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 } },
+ .odd = { { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 26, 74, 7, 26, 1, 0 },
+ { 0, 7, 57, 7, 57, 7, 0 } },
+ .odd = { { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 } } },
+ .ptrn_arr = { { 0x249 } },
+ .sample_patrn_length = 14,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 81) = 0.283186 */
+ .hor_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 59, 8, 0 },
+ { 1, 23, 72, 7, 30, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 39, 7, 69, 16, 1 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 35, 7, 71, 19, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 1, 28, 7, 73, 25, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 25, 73, 7, 28, 1, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 19, 71, 7, 35, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 1, 16, 69, 7, 39, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 30, 7, 72, 23, 1 },
+ { 0, 8, 59, 7, 54, 7, 0 } },
+ .odd = { { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 38, 7, 69, 17, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 0, 1, 27, 7, 73, 26, 1 },
+ { 0, 7, 55, 7, 58, 8, 0 },
+ { 1, 24, 72, 7, 29, 2, 0 },
+ { 0, 6, 52, 7, 60, 10, 0 },
+ { 1, 21, 71, 7, 33, 2, 0 },
+ { 0, 5, 48, 7, 64, 11, 0 },
+ { 1, 18, 70, 7, 36, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 0, 16, 69, 7, 40, 3, 0 },
+ { 0, 3, 40, 7, 69, 16, 0 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 36, 7, 70, 18, 1 },
+ { 0, 11, 64, 7, 48, 5, 0 },
+ { 0, 2, 33, 7, 71, 21, 1 },
+ { 0, 10, 60, 7, 52, 6, 0 },
+ { 0, 2, 29, 7, 72, 24, 1 },
+ { 0, 8, 58, 7, 55, 7, 0 },
+ { 1, 26, 73, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 17, 69, 7, 38, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 59, 8, 0 },
+ { 1, 23, 72, 7, 30, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 },
+ { 0, 15, 69, 7, 41, 3, 0 },
+ { 0, 3, 39, 7, 69, 16, 1 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 35, 7, 71, 19, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 60, 7, 53, 6, 0 },
+ { 0, 1, 28, 7, 73, 25, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 25, 73, 7, 28, 1, 0 },
+ { 0, 6, 53, 7, 60, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 19, 71, 7, 35, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 1, 16, 69, 7, 39, 3, 0 },
+ { 0, 3, 41, 7, 69, 15, 0 },
+ { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 30, 7, 72, 23, 1 },
+ { 0, 8, 59, 7, 54, 7, 0 } },
+ .odd = { { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 38, 7, 69, 17, 1 },
+ { 0, 12, 66, 7, 46, 4, 0 },
+ { 0, 2, 34, 7, 71, 20, 1 },
+ { 0, 10, 63, 7, 50, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 0, 1, 27, 7, 73, 26, 1 },
+ { 0, 7, 55, 7, 58, 8, 0 },
+ { 1, 24, 72, 7, 29, 2, 0 },
+ { 0, 6, 52, 7, 60, 10, 0 },
+ { 1, 21, 71, 7, 33, 2, 0 },
+ { 0, 5, 48, 7, 64, 11, 0 },
+ { 1, 18, 70, 7, 36, 3, 0 },
+ { 0, 4, 44, 7, 67, 13, 0 },
+ { 0, 16, 69, 7, 40, 3, 0 },
+ { 0, 3, 40, 7, 69, 16, 0 },
+ { 0, 13, 67, 7, 44, 4, 0 },
+ { 0, 3, 36, 7, 70, 18, 1 },
+ { 0, 11, 64, 7, 48, 5, 0 },
+ { 0, 2, 33, 7, 71, 21, 1 },
+ { 0, 10, 60, 7, 52, 6, 0 },
+ { 0, 2, 29, 7, 72, 24, 1 },
+ { 0, 8, 58, 7, 55, 7, 0 },
+ { 1, 26, 73, 7, 27, 1, 0 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 50, 7, 63, 10, 0 },
+ { 1, 20, 71, 7, 34, 2, 0 },
+ { 0, 4, 46, 7, 66, 12, 0 },
+ { 1, 17, 69, 7, 38, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 } } },
+ .ptrn_arr = { { 0x90924249, 0x49092424, 0x84921248, 0x49092124,
+ 0x24909242, 0x48492124, 0x24849212 } },
+ .sample_patrn_length = 226,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 82) = 0.280702 */
+ .hor_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 0, 15, 68, 7, 41, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 41, 7, 68, 15, 0 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 } },
+ .odd = { { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 28, 7, 72, 25, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 25, 72, 7, 28, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 52, 7, 61, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 45, 7, 66, 13, 0 },
+ { 0, 15, 68, 7, 41, 4, 0 },
+ { 0, 3, 37, 7, 69, 18, 1 },
+ { 0, 11, 63, 7, 49, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 49, 7, 63, 11, 0 },
+ { 1, 18, 69, 7, 37, 3, 0 },
+ { 0, 4, 41, 7, 68, 15, 0 },
+ { 0, 13, 66, 7, 45, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 61, 7, 52, 6, 0 } },
+ .odd = { { 0, 14, 67, 7, 43, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 0, 2, 28, 7, 72, 25, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 25, 72, 7, 28, 2, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 43, 7, 67, 14, 0 } } },
+ .ptrn_arr = { { 0x90924249, 0x9212484, 0x92124249, 0x2484 } },
+ .sample_patrn_length = 114,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 83) = 0.278261 */
+ .hor_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 43, 7, 66, 15, 0 },
+ { 0, 13, 65, 7, 45, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 45, 7, 65, 13, 0 },
+ { 0, 15, 66, 7, 43, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 } },
+ .odd = { { 0, 14, 66, 7, 44, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 8, 57, 7, 55, 8, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 },
+ { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 35, 7, 69, 20, 1 },
+ { 0, 9, 59, 7, 53, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 5, 48, 7, 63, 12, 0 },
+ { 1, 16, 68, 7, 40, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 1, 26, 71, 7, 28, 2, 0 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 0, 2, 28, 7, 71, 26, 1 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 40, 7, 68, 16, 1 },
+ { 0, 12, 63, 7, 48, 5, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 53, 7, 59, 9, 0 },
+ { 1, 20, 69, 7, 35, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 },
+ { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 8, 55, 7, 57, 8, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 44, 7, 66, 14, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 1, 27, 72, 7, 27, 1, 0 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 39, 7, 68, 17, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 19, 69, 7, 36, 3, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 64, 7, 47, 5, 0 },
+ { 0, 2, 30, 7, 71, 24, 1 },
+ { 0, 7, 54, 7, 58, 9, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 43, 7, 66, 15, 0 },
+ { 0, 13, 65, 7, 45, 5, 0 },
+ { 0, 2, 32, 7, 71, 22, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 22, 71, 7, 32, 2, 0 },
+ { 0, 5, 45, 7, 65, 13, 0 },
+ { 0, 15, 66, 7, 43, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 9, 58, 7, 54, 7, 0 },
+ { 1, 24, 71, 7, 30, 2, 0 },
+ { 0, 5, 47, 7, 64, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 3, 36, 7, 69, 19, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 17, 68, 7, 39, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 } },
+ .odd = { { 0, 14, 66, 7, 44, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 8, 57, 7, 55, 8, 0 },
+ { 1, 23, 71, 7, 31, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 },
+ { 0, 15, 67, 7, 42, 4, 0 },
+ { 0, 3, 35, 7, 69, 20, 1 },
+ { 0, 9, 59, 7, 53, 7, 0 },
+ { 1, 25, 71, 7, 29, 2, 0 },
+ { 0, 5, 48, 7, 63, 12, 0 },
+ { 1, 16, 68, 7, 40, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 10, 61, 7, 51, 6, 0 },
+ { 1, 26, 71, 7, 28, 2, 0 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 0, 2, 28, 7, 71, 26, 1 },
+ { 0, 6, 51, 7, 61, 10, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 40, 7, 68, 16, 1 },
+ { 0, 12, 63, 7, 48, 5, 0 },
+ { 0, 2, 29, 7, 71, 25, 1 },
+ { 0, 7, 53, 7, 59, 9, 0 },
+ { 1, 20, 69, 7, 35, 3, 0 },
+ { 0, 4, 42, 7, 67, 15, 0 },
+ { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 31, 7, 71, 23, 1 },
+ { 0, 8, 55, 7, 57, 8, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 44, 7, 66, 14, 0 } } },
+ .ptrn_arr = { { 0x92124249, 0x21242484, 0x12424849, 0x24248492,
+ 0x42484909, 0x24849092, 0x48490924, 0x2 } },
+ .sample_patrn_length = 230,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 84) = 0.275862 */
+ .hor_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 6, 49, 7, 61, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 61, 7, 49, 6, 0 } },
+ .odd = { { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 6, 49, 7, 61, 12, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 34, 7, 70, 21, 1 },
+ { 0, 8, 56, 7, 56, 8, 0 },
+ { 1, 21, 70, 7, 34, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 12, 61, 7, 49, 6, 0 } },
+ .odd = { { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 18, 68, 7, 38, 3, 0 },
+ { 0, 3, 38, 7, 68, 18, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 } } },
+ .ptrn_arr = { { 0x92124249, 0x248490 } },
+ .sample_patrn_length = 58,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 85) = 0.273504 */
+ .hor_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 5, 47, 7, 63, 13, 0 },
+ { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 29, 7, 70, 26, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 15, 65, 7, 43, 4, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 32, 7, 70, 23, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 17, 67, 7, 39, 4, 0 },
+ { 0, 3, 34, 7, 69, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 69, 7, 34, 3, 0 },
+ { 0, 4, 39, 7, 67, 17, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 23, 70, 7, 32, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 4, 43, 7, 65, 15, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 26, 70, 7, 29, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 },
+ { 0, 13, 63, 7, 47, 5, 0 } },
+ .odd = { { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 28, 7, 69, 27, 2 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 64, 7, 44, 5, 0 },
+ { 0, 2, 30, 7, 70, 25, 1 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 16, 65, 7, 42, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 7, 53, 7, 58, 10, 0 },
+ { 1, 18, 67, 7, 39, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 39, 7, 67, 18, 1 },
+ { 0, 10, 58, 7, 53, 7, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 42, 7, 65, 16, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 1, 25, 70, 7, 30, 2, 0 },
+ { 0, 5, 44, 7, 64, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 2, 27, 69, 7, 28, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 27, 70, 7, 27, 2, 0 },
+ { 0, 5, 47, 7, 63, 13, 0 },
+ { 0, 14, 64, 7, 45, 5, 0 },
+ { 0, 2, 29, 7, 70, 26, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 15, 65, 7, 43, 4, 0 },
+ { 0, 2, 31, 7, 70, 24, 1 },
+ { 0, 6, 50, 7, 61, 11, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 32, 7, 70, 23, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 17, 67, 7, 39, 4, 0 },
+ { 0, 3, 34, 7, 69, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 69, 7, 34, 3, 0 },
+ { 0, 4, 39, 7, 67, 17, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 23, 70, 7, 32, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 11, 61, 7, 50, 6, 0 },
+ { 1, 24, 70, 7, 31, 2, 0 },
+ { 0, 4, 43, 7, 65, 15, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 26, 70, 7, 29, 2, 0 },
+ { 0, 5, 45, 7, 64, 14, 0 },
+ { 0, 13, 63, 7, 47, 5, 0 } },
+ .odd = { { 0, 13, 64, 7, 46, 5, 0 },
+ { 0, 2, 28, 7, 69, 27, 2 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 64, 7, 44, 5, 0 },
+ { 0, 2, 30, 7, 70, 25, 1 },
+ { 0, 6, 49, 7, 62, 11, 0 },
+ { 1, 16, 65, 7, 42, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 70, 22, 1 },
+ { 0, 7, 53, 7, 58, 10, 0 },
+ { 1, 18, 67, 7, 39, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 8, 54, 7, 57, 9, 0 },
+ { 1, 19, 68, 7, 37, 3, 0 },
+ { 0, 3, 37, 7, 68, 19, 1 },
+ { 0, 9, 57, 7, 54, 8, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 39, 7, 67, 18, 1 },
+ { 0, 10, 58, 7, 53, 7, 0 },
+ { 1, 22, 70, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 42, 7, 65, 16, 1 },
+ { 0, 11, 62, 7, 49, 6, 0 },
+ { 1, 25, 70, 7, 30, 2, 0 },
+ { 0, 5, 44, 7, 64, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 2, 27, 69, 7, 28, 2, 0 },
+ { 0, 5, 46, 7, 64, 13, 0 } } },
+ .ptrn_arr = { { 0x92124249, 0x24248490, 0x48490921, 0x90921242,
+ 0x21242484, 0x42484909, 0x84909212, 0x24 } },
+ .sample_patrn_length = 234,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 86) = 0.271186 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 5, 43, 7, 64, 15, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 23, 69, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 69, 23, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 15, 64, 7, 43, 5, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 41, 7, 66, 16, 1 },
+ { 0, 10, 59, 7, 52, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 38, 7, 67, 19, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 19, 67, 7, 38, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 52, 7, 59, 10, 0 },
+ { 1, 16, 66, 7, 41, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 5, 43, 7, 64, 15, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 23, 69, 7, 33, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 20, 68, 7, 36, 3, 0 },
+ { 0, 3, 36, 7, 68, 20, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 33, 7, 69, 23, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 15, 64, 7, 43, 5, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ptrn_arr = { { 0x12424849, 0x24849092, 0x49092124, 0x24248 } },
+ .sample_patrn_length = 118,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 87) = 0.268908 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 19, 66, 7, 38, 4, 0 },
+ { 0, 3, 33, 7, 68, 23, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 23, 68, 7, 33, 3, 0 },
+ { 0, 4, 38, 7, 66, 19, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 9, 54, 7, 56, 9, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 30, 7, 69, 25, 2 },
+ { 0, 5, 46, 7, 62, 14, 1 },
+ { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 37, 7, 67, 20, 1 },
+ { 0, 8, 52, 7, 58, 10, 0 },
+ { 1, 16, 64, 7, 42, 5, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 5, 44, 7, 63, 15, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 15, 63, 7, 44, 5, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 5, 42, 7, 64, 16, 1 },
+ { 0, 10, 58, 7, 52, 8, 0 },
+ { 1, 20, 67, 7, 37, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 },
+ { 1, 14, 62, 7, 46, 5, 0 },
+ { 2, 25, 69, 7, 30, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 50, 7, 60, 11, 0 },
+ { 1, 14, 63, 7, 45, 5, 0 },
+ { 2, 26, 69, 7, 29, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 10, 57, 7, 53, 8, 0 },
+ { 1, 19, 66, 7, 38, 4, 0 },
+ { 0, 3, 33, 7, 68, 23, 1 },
+ { 0, 6, 48, 7, 62, 12, 0 },
+ { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 25, 69, 7, 31, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 0, 2, 31, 7, 69, 25, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 },
+ { 0, 12, 62, 7, 48, 6, 0 },
+ { 1, 23, 68, 7, 33, 3, 0 },
+ { 0, 4, 38, 7, 66, 19, 1 },
+ { 0, 8, 53, 7, 57, 10, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 },
+ { 0, 2, 29, 7, 69, 26, 2 },
+ { 0, 5, 45, 7, 63, 14, 1 },
+ { 0, 11, 60, 7, 50, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 } },
+ .odd = { { 0, 13, 62, 7, 47, 6, 0 },
+ { 1, 24, 69, 7, 32, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 9, 54, 7, 56, 9, 0 },
+ { 1, 17, 66, 7, 40, 4, 0 },
+ { 0, 2, 30, 7, 69, 25, 2 },
+ { 0, 5, 46, 7, 62, 14, 1 },
+ { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 37, 7, 67, 20, 1 },
+ { 0, 8, 52, 7, 58, 10, 0 },
+ { 1, 16, 64, 7, 42, 5, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 5, 44, 7, 63, 15, 1 },
+ { 0, 11, 59, 7, 51, 7, 0 },
+ { 1, 21, 68, 7, 35, 3, 0 },
+ { 0, 3, 35, 7, 68, 21, 1 },
+ { 0, 7, 51, 7, 59, 11, 0 },
+ { 1, 15, 63, 7, 44, 5, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 5, 42, 7, 64, 16, 1 },
+ { 0, 10, 58, 7, 52, 8, 0 },
+ { 1, 20, 67, 7, 37, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 },
+ { 1, 14, 62, 7, 46, 5, 0 },
+ { 2, 25, 69, 7, 30, 2, 0 },
+ { 0, 4, 40, 7, 66, 17, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 32, 7, 69, 24, 1 },
+ { 0, 6, 47, 7, 62, 13, 0 } } },
+ .ptrn_arr = { { 0x12424849, 0x84909092, 0x9212424, 0x42484909,
+ 0x90921212, 0x21242484, 0x48490921, 0x242 } },
+ .sample_patrn_length = 238,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 88) = 0.266667 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 } },
+ .odd = { { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 41, 7, 65, 17, 1 },
+ { 0, 9, 55, 7, 55, 9, 0 },
+ { 1, 17, 65, 7, 41, 4, 0 } },
+ .odd = { { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 22, 68, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 68, 22, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 } } },
+ .ptrn_arr = { { 0x2424849 } },
+ .sample_patrn_length = 30,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 89) = 0.264463 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 33, 7, 67, 24, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 50, 7, 59, 12, 0 },
+ { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 },
+ { 0, 12, 59, 7, 50, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 24, 67, 7, 33, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 } },
+ .odd = { { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 20, 66, 7, 37, 4, 0 },
+ { 0, 2, 31, 7, 67, 26, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 9, 54, 7, 55, 10, 0 },
+ { 1, 16, 62, 7, 44, 5, 0 },
+ { 2, 24, 67, 7, 32, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 47, 7, 61, 13, 1 },
+ { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 },
+ { 1, 13, 61, 7, 47, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 32, 7, 67, 24, 2 },
+ { 0, 5, 44, 7, 62, 16, 1 },
+ { 0, 10, 55, 7, 54, 9, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 26, 67, 7, 31, 2, 0 },
+ { 0, 4, 37, 7, 66, 20, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 33, 7, 67, 24, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 50, 7, 59, 12, 0 },
+ { 0, 13, 60, 7, 48, 7, 0 },
+ { 1, 21, 67, 7, 36, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 56, 7, 54, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 36, 7, 67, 21, 1 },
+ { 0, 7, 48, 7, 60, 13, 0 },
+ { 0, 12, 59, 7, 50, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 24, 67, 7, 33, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 } },
+ .odd = { { 0, 12, 60, 7, 49, 7, 0 },
+ { 1, 20, 66, 7, 37, 4, 0 },
+ { 0, 2, 31, 7, 67, 26, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 9, 54, 7, 55, 10, 0 },
+ { 1, 16, 62, 7, 44, 5, 0 },
+ { 2, 24, 67, 7, 32, 3, 0 },
+ { 0, 3, 35, 7, 67, 22, 1 },
+ { 0, 6, 47, 7, 61, 13, 1 },
+ { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 19, 65, 7, 39, 4, 0 },
+ { 0, 2, 29, 7, 68, 27, 2 },
+ { 0, 4, 40, 7, 65, 18, 1 },
+ { 0, 8, 52, 7, 57, 11, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 23, 67, 7, 34, 3, 0 },
+ { 0, 3, 34, 7, 67, 23, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 11, 57, 7, 52, 8, 0 },
+ { 1, 18, 65, 7, 40, 4, 0 },
+ { 2, 27, 68, 7, 29, 2, 0 },
+ { 0, 4, 39, 7, 65, 19, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 },
+ { 1, 13, 61, 7, 47, 6, 0 },
+ { 1, 22, 67, 7, 35, 3, 0 },
+ { 0, 3, 32, 7, 67, 24, 2 },
+ { 0, 5, 44, 7, 62, 16, 1 },
+ { 0, 10, 55, 7, 54, 9, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 26, 67, 7, 31, 2, 0 },
+ { 0, 4, 37, 7, 66, 20, 1 },
+ { 0, 7, 49, 7, 60, 12, 0 } } },
+ .ptrn_arr = { { 0x42424849, 0x90921212, 0x24248490, 0x9212124,
+ 0x48484909, 0x92121242, 0x84849090, 0x2424 } },
+ .sample_patrn_length = 242,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 90) = 0.262295 */
+ .hor_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 } },
+ .odd = { { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 19, 64, 7, 40, 4, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 4, 40, 7, 64, 19, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 28, 68, 7, 28, 2, 0 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 58, 7, 51, 8, 0 },
+ { 1, 17, 64, 7, 41, 5, 0 },
+ { 2, 25, 67, 7, 31, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 31, 7, 67, 25, 2 },
+ { 0, 5, 41, 7, 64, 17, 1 },
+ { 0, 8, 51, 7, 58, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 } },
+ .odd = { { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 19, 64, 7, 40, 4, 0 },
+ { 2, 27, 67, 7, 30, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 46, 7, 61, 14, 1 },
+ { 0, 10, 56, 7, 53, 9, 0 },
+ { 1, 16, 63, 7, 43, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 43, 7, 63, 16, 1 },
+ { 0, 9, 53, 7, 56, 10, 0 },
+ { 1, 14, 61, 7, 46, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 },
+ { 0, 2, 30, 7, 67, 27, 2 },
+ { 0, 4, 40, 7, 64, 19, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 } } },
+ .ptrn_arr = { { 0x42484849, 0x92121242, 0x84849090, 0x242424 } },
+ .sample_patrn_length = 122,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 91) = 0.260163 */
+ .hor_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 },
+ { 0, 12, 57, 7, 51, 8, 0 },
+ { 1, 17, 62, 7, 43, 5, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 46, 7, 60, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 60, 7, 46, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 5, 43, 7, 62, 17, 1 },
+ { 0, 8, 51, 7, 57, 12, 0 },
+ { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 } },
+ .odd = { { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 23, 66, 7, 34, 3, 0 },
+ { 0, 3, 31, 7, 66, 26, 2 },
+ { 0, 4, 39, 7, 64, 20, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 28, 67, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 58, 7, 49, 7, 0 },
+ { 1, 18, 64, 7, 40, 5, 0 },
+ { 2, 25, 66, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 66, 25, 2 },
+ { 0, 5, 40, 7, 64, 18, 1 },
+ { 0, 7, 49, 7, 58, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 0, 2, 29, 7, 67, 28, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 53, 7, 55, 10, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 20, 64, 7, 39, 4, 0 },
+ { 2, 26, 66, 7, 31, 3, 0 },
+ { 0, 3, 34, 7, 66, 23, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 66, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 59, 7, 48, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 33, 7, 66, 24, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 8, 49, 7, 59, 12, 0 },
+ { 0, 12, 57, 7, 51, 8, 0 },
+ { 1, 17, 62, 7, 43, 5, 0 },
+ { 1, 23, 66, 7, 35, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 7, 46, 7, 60, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 60, 7, 46, 7, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 35, 7, 66, 23, 1 },
+ { 0, 5, 43, 7, 62, 17, 1 },
+ { 0, 8, 51, 7, 57, 12, 0 },
+ { 0, 12, 59, 7, 49, 8, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 24, 66, 7, 33, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 48, 7, 59, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 66, 7, 36, 4, 0 } },
+ .odd = { { 0, 12, 58, 7, 50, 8, 0 },
+ { 1, 17, 63, 7, 42, 5, 0 },
+ { 2, 23, 66, 7, 34, 3, 0 },
+ { 0, 3, 31, 7, 66, 26, 2 },
+ { 0, 4, 39, 7, 64, 20, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 15, 61, 7, 45, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 28, 67, 7, 29, 2, 0 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 52, 7, 56, 11, 0 },
+ { 1, 13, 58, 7, 49, 7, 0 },
+ { 1, 18, 64, 7, 40, 5, 0 },
+ { 2, 25, 66, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 66, 25, 2 },
+ { 0, 5, 40, 7, 64, 18, 1 },
+ { 0, 7, 49, 7, 58, 13, 1 },
+ { 0, 11, 56, 7, 52, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 0, 2, 29, 7, 67, 28, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 45, 7, 61, 15, 1 },
+ { 0, 10, 53, 7, 55, 10, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 20, 64, 7, 39, 4, 0 },
+ { 2, 26, 66, 7, 31, 3, 0 },
+ { 0, 3, 34, 7, 66, 23, 2 },
+ { 0, 5, 42, 7, 63, 17, 1 },
+ { 0, 8, 50, 7, 58, 12, 0 } } },
+ .ptrn_arr = { { 0x42484849, 0x12124242, 0x90909212, 0x24848484,
+ 0x21242424, 0x9090921, 0x48484849, 0x24242 } },
+ .sample_patrn_length = 246,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 92) = 0.258065 */
+ .hor_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 48, 7, 58, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 58, 7, 48, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 48, 7, 58, 14, 1 },
+ { 0, 10, 54, 7, 54, 10, 0 },
+ { 1, 14, 58, 7, 48, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 65, 7, 38, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 38, 7, 65, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x2424242 } },
+ .sample_patrn_length = 62,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 93) = 0.256 */
+ .hor_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 38, 7, 64, 21, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 21, 64, 7, 38, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 34, 7, 65, 24, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 48, 7, 58, 13, 1 },
+ { 0, 10, 53, 7, 54, 11, 0 },
+ { 1, 12, 57, 7, 50, 8, 0 },
+ { 1, 15, 60, 7, 45, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 45, 7, 60, 15, 1 },
+ { 0, 8, 50, 7, 57, 12, 1 },
+ { 0, 11, 54, 7, 53, 10, 0 },
+ { 1, 13, 58, 7, 48, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 24, 65, 7, 34, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 2, 29, 66, 7, 29, 2, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 38, 7, 64, 21, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 22, 65, 7, 36, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 0, 3, 30, 7, 66, 27, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 55, 7, 53, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 27, 66, 7, 30, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 36, 7, 65, 22, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 21, 64, 7, 38, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 } },
+ .odd = { { 0, 12, 56, 7, 51, 9, 0 },
+ { 1, 14, 59, 7, 47, 7, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 21, 65, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 34, 7, 65, 24, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 6, 44, 7, 61, 16, 1 },
+ { 0, 8, 48, 7, 58, 13, 1 },
+ { 0, 10, 53, 7, 54, 11, 0 },
+ { 1, 12, 57, 7, 50, 8, 0 },
+ { 1, 15, 60, 7, 45, 7, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 7, 45, 7, 60, 15, 1 },
+ { 0, 8, 50, 7, 57, 12, 1 },
+ { 0, 11, 54, 7, 53, 10, 0 },
+ { 1, 13, 58, 7, 48, 8, 0 },
+ { 1, 16, 61, 7, 44, 6, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 24, 65, 7, 34, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 65, 21, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 7, 47, 7, 59, 14, 1 },
+ { 0, 9, 51, 7, 56, 12, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x42424248, 0x12124242, 0x92121212,
+ 0x90909090, 0x84848490, 0x24248484, 0x242424 } },
+ .sample_patrn_length = 250,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 94) = 0.253968 */
+ .hor_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 35, 7, 64, 23, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 41, 7, 63, 18, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 18, 63, 7, 41, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 23, 64, 7, 35, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 13, 57, 7, 49, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 25, 65, 7, 33, 3, 0 },
+ { 2, 28, 65, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 65, 28, 2 },
+ { 0, 3, 33, 7, 65, 25, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 49, 7, 57, 13, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x48484848, 0x42424242, 0x2424242 } },
+ .sample_patrn_length = 126,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 95) = 0.251969 */
+ .hor_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 31, 7, 64, 28, 2 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 34, 7, 63, 25, 2 },
+ { 0, 4, 35, 7, 63, 24, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 41, 7, 61, 19, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 19, 61, 7, 41, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 24, 63, 7, 35, 4, 0 },
+ { 2, 25, 63, 7, 34, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 2, 28, 64, 7, 31, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 57, 7, 48, 8, 0 },
+ { 1, 15, 58, 7, 46, 8, 0 },
+ { 1, 16, 59, 7, 45, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 22, 62, 7, 37, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 24, 64, 7, 34, 4, 0 },
+ { 2, 26, 64, 7, 33, 3, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 3, 28, 64, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 64, 28, 3 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 3, 33, 7, 64, 26, 2 },
+ { 0, 4, 34, 7, 64, 24, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 37, 7, 62, 22, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 45, 7, 59, 16, 1 },
+ { 0, 8, 46, 7, 58, 15, 1 },
+ { 0, 8, 48, 7, 57, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 },
+ { 0, 3, 31, 7, 64, 28, 2 },
+ { 0, 3, 32, 7, 65, 26, 2 },
+ { 0, 4, 34, 7, 63, 25, 2 },
+ { 0, 4, 35, 7, 63, 24, 2 },
+ { 0, 4, 37, 7, 63, 22, 2 },
+ { 0, 5, 38, 7, 63, 21, 1 },
+ { 0, 5, 40, 7, 62, 20, 1 },
+ { 0, 6, 41, 7, 61, 19, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 44, 7, 60, 16, 1 },
+ { 0, 7, 46, 7, 59, 15, 1 },
+ { 0, 8, 47, 7, 58, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 9, 50, 7, 56, 12, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 11, 53, 7, 53, 11, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 12, 56, 7, 50, 9, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 58, 7, 47, 8, 0 },
+ { 1, 15, 59, 7, 46, 7, 0 },
+ { 1, 16, 60, 7, 44, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 19, 61, 7, 41, 6, 0 },
+ { 1, 20, 62, 7, 40, 5, 0 },
+ { 1, 21, 63, 7, 38, 5, 0 },
+ { 2, 22, 63, 7, 37, 4, 0 },
+ { 2, 24, 63, 7, 35, 4, 0 },
+ { 2, 25, 63, 7, 34, 4, 0 },
+ { 2, 26, 65, 7, 32, 3, 0 },
+ { 2, 28, 64, 7, 31, 3, 0 } },
+ .odd = { { 0, 11, 55, 7, 52, 10, 0 },
+ { 1, 12, 54, 7, 51, 10, 0 },
+ { 1, 13, 56, 7, 49, 9, 0 },
+ { 1, 14, 57, 7, 48, 8, 0 },
+ { 1, 15, 58, 7, 46, 8, 0 },
+ { 1, 16, 59, 7, 45, 7, 0 },
+ { 1, 17, 61, 7, 43, 6, 0 },
+ { 1, 18, 61, 7, 42, 6, 0 },
+ { 1, 19, 63, 7, 40, 5, 0 },
+ { 1, 20, 63, 7, 39, 5, 0 },
+ { 2, 22, 62, 7, 37, 5, 0 },
+ { 2, 23, 63, 7, 36, 4, 0 },
+ { 2, 24, 64, 7, 34, 4, 0 },
+ { 2, 26, 64, 7, 33, 3, 0 },
+ { 2, 27, 65, 7, 31, 3, 0 },
+ { 3, 28, 64, 7, 30, 3, 0 },
+ { 0, 3, 30, 7, 64, 28, 3 },
+ { 0, 3, 31, 7, 65, 27, 2 },
+ { 0, 3, 33, 7, 64, 26, 2 },
+ { 0, 4, 34, 7, 64, 24, 2 },
+ { 0, 4, 36, 7, 63, 23, 2 },
+ { 0, 5, 37, 7, 62, 22, 2 },
+ { 0, 5, 39, 7, 63, 20, 1 },
+ { 0, 5, 40, 7, 63, 19, 1 },
+ { 0, 6, 42, 7, 61, 18, 1 },
+ { 0, 6, 43, 7, 61, 17, 1 },
+ { 0, 7, 45, 7, 59, 16, 1 },
+ { 0, 8, 46, 7, 58, 15, 1 },
+ { 0, 8, 48, 7, 57, 14, 1 },
+ { 0, 9, 49, 7, 56, 13, 1 },
+ { 0, 10, 51, 7, 54, 12, 1 },
+ { 0, 10, 52, 7, 55, 11, 0 } } },
+ .ptrn_arr = { { 0x48484849, 0x48484848, 0x48484848, 0x48484848,
+ 0x42424242, 0x42424242, 0x42424242, 0x2424242 } },
+ .sample_patrn_length = 254,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+}, {
+ /* Scale factor 32 / (32 + 96) = 0.25 */
+ .hor_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 } },
+ .odd = { { 0, 11, 53, 7, 53, 11, 0 } } },
+ .ver_phase_arr = {
+ .even = { { 3, 29, 64, 7, 29, 3, 0 } },
+ .odd = { { 0, 11, 53, 7, 53, 11, 0 } } },
+ .ptrn_arr = { { 0x9 } },
+ .sample_patrn_length = 8,
+ .hor_ds_en = 1,
+ .ver_ds_en = 1
+} };
+
+const s32 ipu3_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN] = {
+ IMGU_SCALER_FP * -0.000000000000000,
+ IMGU_SCALER_FP * -0.000249009327023,
+ IMGU_SCALER_FP * -0.001022241683322,
+ IMGU_SCALER_FP * -0.002352252699175,
+ IMGU_SCALER_FP * -0.004261594242362,
+ IMGU_SCALER_FP * -0.006761648795689,
+ IMGU_SCALER_FP * -0.009851589454154,
+ IMGU_SCALER_FP * -0.013517488475013,
+ IMGU_SCALER_FP * -0.017731595701026,
+ IMGU_SCALER_FP * -0.022451806160682,
+ IMGU_SCALER_FP * -0.027621333752351,
+ IMGU_SCALER_FP * -0.033168605172067,
+ IMGU_SCALER_FP * -0.039007385183627,
+ IMGU_SCALER_FP * -0.045037140997445,
+ IMGU_SCALER_FP * -0.051143649969349,
+ IMGU_SCALER_FP * -0.057199851105019,
+ IMGU_SCALER_FP * -0.063066937016941,
+ IMGU_SCALER_FP * -0.068595679088417,
+ IMGU_SCALER_FP * -0.073627974715370,
+ IMGU_SCALER_FP * -0.077998601684588,
+ IMGU_SCALER_FP * -0.081537161069780,
+ IMGU_SCALER_FP * -0.084070186546763,
+ IMGU_SCALER_FP * -0.085423394806327,
+ IMGU_SCALER_FP * -0.085424048835192,
+ IMGU_SCALER_FP * -0.083903403294908,
+ IMGU_SCALER_FP * -0.080699199103829,
+ IMGU_SCALER_FP * -0.075658172660608,
+ IMGU_SCALER_FP * -0.068638543974523,
+ IMGU_SCALER_FP * -0.059512447316781,
+ IMGU_SCALER_FP * -0.048168267897836,
+ IMGU_SCALER_FP * -0.034512848520921,
+ IMGU_SCALER_FP * -0.018473531164409,
+ IMGU_SCALER_FP * 0.000000000000000,
+ IMGU_SCALER_FP * 0.020934105554674,
+ IMGU_SCALER_FP * 0.044329836544650,
+ IMGU_SCALER_FP * 0.070161864654994,
+ IMGU_SCALER_FP * 0.098377719033862,
+ IMGU_SCALER_FP * 0.128897348012514,
+ IMGU_SCALER_FP * 0.161613019706978,
+ IMGU_SCALER_FP * 0.196389570939079,
+ IMGU_SCALER_FP * 0.233065009152522,
+ IMGU_SCALER_FP * 0.271451467092549,
+ IMGU_SCALER_FP * 0.311336505037934,
+ IMGU_SCALER_FP * 0.352484750396743,
+ IMGU_SCALER_FP * 0.394639859577736,
+ IMGU_SCALER_FP * 0.437526782302744,
+ IMGU_SCALER_FP * 0.480854304005320,
+ IMGU_SCALER_FP * 0.524317837738108,
+ IMGU_SCALER_FP * 0.567602433152471,
+ IMGU_SCALER_FP * 0.610385966680669,
+ IMGU_SCALER_FP * 0.652342474098843,
+ IMGU_SCALER_FP * 0.693145584226952,
+ IMGU_SCALER_FP * 0.732472010670320,
+ IMGU_SCALER_FP * 0.770005057258970,
+ IMGU_SCALER_FP * 0.805438092218553,
+ IMGU_SCALER_FP * 0.838477946124244,
+ IMGU_SCALER_FP * 0.868848189350256,
+ IMGU_SCALER_FP * 0.896292246026874,
+ IMGU_SCALER_FP * 0.920576303438191,
+ IMGU_SCALER_FP * 0.941491978311745,
+ IMGU_SCALER_FP * 0.958858704531378,
+ IMGU_SCALER_FP * 0.972525810403401,
+ IMGU_SCALER_FP * 0.982374257672165,
+ IMGU_SCALER_FP * 0.988318018955586,
+ IMGU_SCALER_FP * 0.990305075088925,
+ IMGU_SCALER_FP * 0.988318018955586,
+ IMGU_SCALER_FP * 0.982374257672165,
+ IMGU_SCALER_FP * 0.972525810403401,
+ IMGU_SCALER_FP * 0.958858704531378,
+ IMGU_SCALER_FP * 0.941491978311745,
+ IMGU_SCALER_FP * 0.920576303438191,
+ IMGU_SCALER_FP * 0.896292246026874,
+ IMGU_SCALER_FP * 0.868848189350256,
+ IMGU_SCALER_FP * 0.838477946124244,
+ IMGU_SCALER_FP * 0.805438092218553,
+ IMGU_SCALER_FP * 0.770005057258970,
+ IMGU_SCALER_FP * 0.732472010670320,
+ IMGU_SCALER_FP * 0.693145584226952,
+ IMGU_SCALER_FP * 0.652342474098843,
+ IMGU_SCALER_FP * 0.610385966680669,
+ IMGU_SCALER_FP * 0.567602433152471,
+ IMGU_SCALER_FP * 0.524317837738108,
+ IMGU_SCALER_FP * 0.480854304005320,
+ IMGU_SCALER_FP * 0.437526782302744,
+ IMGU_SCALER_FP * 0.394639859577736,
+ IMGU_SCALER_FP * 0.352484750396743,
+ IMGU_SCALER_FP * 0.311336505037934,
+ IMGU_SCALER_FP * 0.271451467092549,
+ IMGU_SCALER_FP * 0.233065009152522,
+ IMGU_SCALER_FP * 0.196389570939079,
+ IMGU_SCALER_FP * 0.161613019706978,
+ IMGU_SCALER_FP * 0.128897348012514,
+ IMGU_SCALER_FP * 0.098377719033862,
+ IMGU_SCALER_FP * 0.070161864654994,
+ IMGU_SCALER_FP * 0.044329836544650,
+ IMGU_SCALER_FP * 0.020934105554674,
+ IMGU_SCALER_FP * 0.000000000000000,
+ IMGU_SCALER_FP * -0.018473531164409,
+ IMGU_SCALER_FP * -0.034512848520921,
+ IMGU_SCALER_FP * -0.048168267897836,
+ IMGU_SCALER_FP * -0.059512447316781,
+ IMGU_SCALER_FP * -0.068638543974523,
+ IMGU_SCALER_FP * -0.075658172660608,
+ IMGU_SCALER_FP * -0.080699199103829,
+ IMGU_SCALER_FP * -0.083903403294908,
+ IMGU_SCALER_FP * -0.085424048835192,
+ IMGU_SCALER_FP * -0.085423394806327,
+ IMGU_SCALER_FP * -0.084070186546763,
+ IMGU_SCALER_FP * -0.081537161069780,
+ IMGU_SCALER_FP * -0.077998601684588,
+ IMGU_SCALER_FP * -0.073627974715370,
+ IMGU_SCALER_FP * -0.068595679088417,
+ IMGU_SCALER_FP * -0.063066937016941,
+ IMGU_SCALER_FP * -0.057199851105019,
+ IMGU_SCALER_FP * -0.051143649969349,
+ IMGU_SCALER_FP * -0.045037140997445,
+ IMGU_SCALER_FP * -0.039007385183627,
+ IMGU_SCALER_FP * -0.033168605172067,
+ IMGU_SCALER_FP * -0.027621333752351,
+ IMGU_SCALER_FP * -0.022451806160682,
+ IMGU_SCALER_FP * -0.017731595701026,
+ IMGU_SCALER_FP * -0.013517488475013,
+ IMGU_SCALER_FP * -0.009851589454154,
+ IMGU_SCALER_FP * -0.006761648795689,
+ IMGU_SCALER_FP * -0.004261594242362,
+ IMGU_SCALER_FP * -0.002352252699175,
+ IMGU_SCALER_FP * -0.001022241683322,
+ IMGU_SCALER_FP * -0.000249009327023
+};
+
+const s32 ipu3_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN] = {
+ IMGU_SCALER_FP * 0.074300676367033,
+ IMGU_SCALER_FP * 0.094030234498392,
+ IMGU_SCALER_FP * 0.115522859526596,
+ IMGU_SCALER_FP * 0.138778551451644,
+ IMGU_SCALER_FP * 0.163629399140505,
+ IMGU_SCALER_FP * 0.190075402593178,
+ IMGU_SCALER_FP * 0.217864695110113,
+ IMGU_SCALER_FP * 0.247081232257828,
+ IMGU_SCALER_FP * 0.277389191770256,
+ IMGU_SCALER_FP * 0.308704618080881,
+ IMGU_SCALER_FP * 0.340859600056670,
+ IMGU_SCALER_FP * 0.373602270998074,
+ IMGU_SCALER_FP * 0.406848675338577,
+ IMGU_SCALER_FP * 0.440346946378629,
+ IMGU_SCALER_FP * 0.473845217418681,
+ IMGU_SCALER_FP * 0.507091621759184,
+ IMGU_SCALER_FP * 0.540002203833621,
+ IMGU_SCALER_FP * 0.572157185809410,
+ IMGU_SCALER_FP * 0.603472612120036,
+ IMGU_SCALER_FP * 0.633612660499431,
+ IMGU_SCALER_FP * 0.662493375381080,
+ IMGU_SCALER_FP * 0.689778934498917,
+ IMGU_SCALER_FP * 0.715301426719909,
+ IMGU_SCALER_FP * 0.738892940911023,
+ IMGU_SCALER_FP * 0.760385565939227,
+ IMGU_SCALER_FP * 0.779527435104971,
+ IMGU_SCALER_FP * 0.796234592841739,
+ IMGU_SCALER_FP * 0.810339128016497,
+ IMGU_SCALER_FP * 0.821841040629247,
+ IMGU_SCALER_FP * 0.830488463980438,
+ IMGU_SCALER_FP * 0.836281398070072,
+ IMGU_SCALER_FP * 0.839219842898146,
+ IMGU_SCALER_FP * 0.839219842898146,
+ IMGU_SCALER_FP * 0.836281398070072,
+ IMGU_SCALER_FP * 0.830488463980438,
+ IMGU_SCALER_FP * 0.821841040629247,
+ IMGU_SCALER_FP * 0.810339128016497,
+ IMGU_SCALER_FP * 0.796234592841739,
+ IMGU_SCALER_FP * 0.779527435104971,
+ IMGU_SCALER_FP * 0.760385565939227,
+ IMGU_SCALER_FP * 0.738892940911023,
+ IMGU_SCALER_FP * 0.715301426719909,
+ IMGU_SCALER_FP * 0.689778934498917,
+ IMGU_SCALER_FP * 0.662493375381080,
+ IMGU_SCALER_FP * 0.633612660499431,
+ IMGU_SCALER_FP * 0.603472612120036,
+ IMGU_SCALER_FP * 0.572157185809410,
+ IMGU_SCALER_FP * 0.540002203833621,
+ IMGU_SCALER_FP * 0.507091621759184,
+ IMGU_SCALER_FP * 0.473845217418681,
+ IMGU_SCALER_FP * 0.440346946378629,
+ IMGU_SCALER_FP * 0.406848675338577,
+ IMGU_SCALER_FP * 0.373602270998074,
+ IMGU_SCALER_FP * 0.340859600056670,
+ IMGU_SCALER_FP * 0.308704618080881,
+ IMGU_SCALER_FP * 0.277389191770256,
+ IMGU_SCALER_FP * 0.247081232257828,
+ IMGU_SCALER_FP * 0.217864695110113,
+ IMGU_SCALER_FP * 0.190075402593178,
+ IMGU_SCALER_FP * 0.163629399140505,
+ IMGU_SCALER_FP * 0.138778551451644,
+ IMGU_SCALER_FP * 0.115522859526596,
+ IMGU_SCALER_FP * 0.094030234498392,
+ IMGU_SCALER_FP * 0.074300676367033
+};
+
+/* settings for Geometric Distortion Correction */
+const s16 ipu3_css_gdc_lut[4][256] = { {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, -1, -1, -1, -2, -2, -2,
+ -2, -3, -3, -3, -4, -4, -4, -5, -5, -5, -6, -6, -7, -7, -7, -8, -8,
+ -9, -9, -10, -10, -11, -11, -12, -12, -13, -13, -14, -14, -15, -15,
+ -16, -16, -17, -17, -18, -19, -19, -20, -20, -21, -22, -22, -23, -24,
+ -24, -25, -25, -26, -27, -27, -28, -29, -29, -30, -31, -31, -32, -33,
+ -33, -34, -35, -35, -36, -37, -37, -38, -39, -39, -40, -41, -41, -42,
+ -43, -43, -44, -45, -45, -46, -46, -47, -48, -48, -49, -50, -50, -51,
+ -52, -52, -53, -53, -54, -55, -55, -56, -56, -57, -58, -58, -59, -59,
+ -60, -60, -61, -61, -62, -62, -63, -64, -64, -64, -65, -65, -66, -66,
+ -67, -67, -68, -68, -68, -69, -69, -70, -70, -70, -71, -71, -71, -72,
+ -72, -72, -73, -73, -73, -73, -74, -74, -74, -74, -74, -75, -75, -75,
+ -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75, -75,
+ -75, -75, -75, -75, -74, -74, -74, -74, -74, -73, -73, -73, -73, -72,
+ -72, -72, -71, -71, -70, -70, -69, -69, -68, -68, -67, -67, -66, -66,
+ -65, -64, -64, -63, -62, -61, -61, -60, -59, -58, -57, -56, -56, -55,
+ -54, -53, -52, -51, -50, -49, -47, -46, -45, -44, -43, -41, -40, -39,
+ -38, -36, -35, -33, -32, -31, -29, -28, -26, -25, -23, -21, -20, -18,
+ -16, -15, -13, -11, -9, -7, -5, -3, -1
+}, {
+ 0, 2, 4, 6, 8, 10, 13, 15, 17, 20, 23, 25, 28, 31, 33, 36, 39, 42, 45,
+ 48, 51, 54, 58, 61, 64, 68, 71, 74, 78, 82, 85, 89, 93, 96, 100, 104,
+ 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 149, 153, 157, 162,
+ 166, 171, 175, 180, 184, 189, 193, 198, 203, 207, 212, 217, 222, 227,
+ 232, 236, 241, 246, 251, 256, 261, 266, 271, 276, 282, 287, 292, 297,
+ 302, 307, 313, 318, 323, 328, 334, 339, 344, 350, 355, 360, 366, 371,
+ 377, 382, 388, 393, 399, 404, 409, 415, 420, 426, 431, 437, 443, 448,
+ 454, 459, 465, 470, 476, 481, 487, 492, 498, 504, 509, 515, 520, 526,
+ 531, 537, 542, 548, 553, 559, 564, 570, 576, 581, 586, 592, 597, 603,
+ 608, 614, 619, 625, 630, 635, 641, 646, 651, 657, 662, 667, 673, 678,
+ 683, 688, 694, 699, 704, 709, 714, 719, 724, 729, 735, 740, 745, 749,
+ 754, 759, 764, 769, 774, 779, 783, 788, 793, 797, 802, 807, 811, 816,
+ 820, 825, 829, 834, 838, 842, 847, 851, 855, 859, 863, 868, 872, 876,
+ 880, 884, 888, 891, 895, 899, 903, 906, 910, 914, 917, 921, 924, 927,
+ 931, 934, 937, 940, 944, 947, 950, 953, 956, 959, 961, 964, 967, 970,
+ 972, 975, 977, 980, 982, 984, 987, 989, 991, 993, 995, 997, 999, 1001,
+ 1002, 1004, 1006, 1007, 1009, 1010, 1011, 1013, 1014, 1015, 1016, 1017,
+ 1018, 1019, 1020, 1020, 1021, 1022, 1022, 1023, 1023, 1023, 1023, 1023
+}, {
+ 1024, 1023, 1023, 1023, 1023, 1023, 1022, 1022, 1021, 1020, 1020, 1019,
+ 1018, 1017, 1016, 1015, 1014, 1013, 1011, 1010, 1009, 1007, 1006, 1004,
+ 1002, 1001, 999, 997, 995, 993, 991, 989, 987, 984, 982, 980, 977, 975,
+ 972, 970, 967, 964, 961, 959, 956, 953, 950, 947, 944, 940, 937, 934,
+ 931, 927, 924, 921, 917, 914, 910, 906, 903, 899, 895, 891, 888, 884,
+ 880, 876, 872, 868, 863, 859, 855, 851, 847, 842, 838, 834, 829, 825,
+ 820, 816, 811, 807, 802, 797, 793, 788, 783, 779, 774, 769, 764, 759,
+ 754, 749, 745, 740, 735, 729, 724, 719, 714, 709, 704, 699, 694, 688,
+ 683, 678, 673, 667, 662, 657, 651, 646, 641, 635, 630, 625, 619, 614,
+ 608, 603, 597, 592, 586, 581, 576, 570, 564, 559, 553, 548, 542, 537,
+ 531, 526, 520, 515, 509, 504, 498, 492, 487, 481, 476, 470, 465, 459,
+ 454, 448, 443, 437, 431, 426, 420, 415, 409, 404, 399, 393, 388, 382,
+ 377, 371, 366, 360, 355, 350, 344, 339, 334, 328, 323, 318, 313, 307,
+ 302, 297, 292, 287, 282, 276, 271, 266, 261, 256, 251, 246, 241, 236,
+ 232, 227, 222, 217, 212, 207, 203, 198, 193, 189, 184, 180, 175, 171,
+ 166, 162, 157, 153, 149, 144, 140, 136, 132, 128, 124, 120, 116, 112,
+ 108, 104, 100, 96, 93, 89, 85, 82, 78, 74, 71, 68, 64, 61, 58, 54, 51,
+ 48, 45, 42, 39, 36, 33, 31, 28, 25, 23, 20, 17, 15, 13, 10, 8, 6, 4, 2
+}, {
+ 0, -1, -3, -5, -7, -9, -11, -13, -14, -16, -19, -20, -21, -23, -24, -26,
+ -28, -29, -30, -32, -34, -34, -37, -38, -38, -41, -42, -42, -44, -46,
+ -46, -48, -49, -49, -51, -52, -53, -54, -55, -56, -57, -57, -58, -59,
+ -60, -60, -62, -62, -63, -63, -64, -65, -66, -66, -67, -68, -67, -69,
+ -69, -69, -70, -70, -71, -71, -72, -72, -72, -73, -73, -73, -73, -73,
+ -73, -74, -75, -74, -75, -75, -74, -75, -75, -75, -75, -75, -75, -75,
+ -75, -75, -75, -75, -75, -75, -75, -74, -75, -74, -75, -75, -74, -74,
+ -73, -73, -73, -73, -73, -73, -73, -71, -72, -71, -72, -70, -70, -70,
+ -69, -70, -69, -68, -68, -68, -67, -67, -66, -66, -65, -65, -64, -64,
+ -64, -63, -62, -62, -61, -61, -60, -60, -59, -59, -58, -58, -57, -57,
+ -55, -55, -55, -53, -54, -53, -52, -51, -52, -50, -50, -49, -48, -47,
+ -46, -46, -46, -46, -45, -43, -43, -42, -42, -41, -41, -40, -39, -39,
+ -38, -37, -37, -36, -35, -35, -34, -33, -32, -32, -31, -31, -31, -29,
+ -28, -27, -27, -27, -26, -25, -25, -24, -24, -23, -22, -22, -21, -20,
+ -20, -20, -18, -19, -17, -17, -16, -16, -15, -14, -14, -14, -14, -12,
+ -12, -12, -11, -11, -11, -10, -9, -9, -8, -8, -7, -6, -7, -7, -6, -6,
+ -5, -4, -5, -5, -3, -3, -4, -2, -3, -2, -1, -2, -1, -1, 0, -1, -1, 0,
+ -1, 0, 1, 0, 0, 0, 0, 0, 0, 0
+} };
+
+const struct ipu3_css_xnr3_vmem_defaults ipu3_css_xnr3_vmem_defaults = {
+ .x = {
+ 1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352,
+ 2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120
+ },
+ .a = {
+ -7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585,
+ -4529, -3697, -3010, -2485, -2070, -1727, -1428, 0
+ },
+ .b = {
+ 4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783,
+ 1603, 1446, 1307, 1185, 1077, 981, 895, 819
+ },
+ .c = {
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ },
+};
+
+/* settings for Bayer Noise Reduction */
+const struct ipu3_uapi_bnr_static_config ipu3_css_bnr_defaults = {
+ { 16, 16, 16, 16 }, /* wb_gains */
+ { 16, 16, 16, 16 }, /* wb_gains_thr */
+ { 0, X, 8, 6, X, 14 }, /* thr_coeffs */
+ { 0, 0, 0, 0 }, /* thr_ctrl_shd */
+ { -128, X, -128, X }, /* opt_center */
+ { /* lut */
+ { 17, 23, 28, 32, 36, 39, 42, 45,
+ 48, 51, 53, 55, 58, 60, 62, 64,
+ 66, 68, 70, 72, 73, 75, 77, 78,
+ 80, 82, 83, 85, 86, 88, 89, 90 }
+ },
+ { 4, X, 1, 8, X, 8, X, 8, X }, /* bp_ctrl */
+ { 8, 4, 4, X, 8, X, 1, 1, 1, 1 }, /* dn_detect_ctrl */
+};
+
+const struct ipu3_uapi_dm_config ipu3_css_dm_defaults = {
+ 1, 1, 1, X, X, 8, X, 7, X, 8, X, 8, X, 4, X
+};
+
+const struct ipu3_uapi_ccm_mat_config ipu3_css_ccm_defaults = {
+ 9775, -2671, 1087, 0,
+ -1071, 8303, 815, 0,
+ -23, -7887, 16103, 0
+};
+
+/* settings for Gamma correction */
+const struct ipu3_uapi_gamma_corr_lut ipu3_css_gamma_lut = { {
+ 63, 79, 95, 111, 127, 143, 159, 175, 191, 207, 223, 239, 255, 271, 287,
+ 303, 319, 335, 351, 367, 383, 399, 415, 431, 447, 463, 479, 495, 511,
+ 527, 543, 559, 575, 591, 607, 623, 639, 655, 671, 687, 703, 719, 735,
+ 751, 767, 783, 799, 815, 831, 847, 863, 879, 895, 911, 927, 943, 959,
+ 975, 991, 1007, 1023, 1039, 1055, 1071, 1087, 1103, 1119, 1135, 1151,
+ 1167, 1183, 1199, 1215, 1231, 1247, 1263, 1279, 1295, 1311, 1327, 1343,
+ 1359, 1375, 1391, 1407, 1423, 1439, 1455, 1471, 1487, 1503, 1519, 1535,
+ 1551, 1567, 1583, 1599, 1615, 1631, 1647, 1663, 1679, 1695, 1711, 1727,
+ 1743, 1759, 1775, 1791, 1807, 1823, 1839, 1855, 1871, 1887, 1903, 1919,
+ 1935, 1951, 1967, 1983, 1999, 2015, 2031, 2047, 2063, 2079, 2095, 2111,
+ 2143, 2175, 2207, 2239, 2271, 2303, 2335, 2367, 2399, 2431, 2463, 2495,
+ 2527, 2559, 2591, 2623, 2655, 2687, 2719, 2751, 2783, 2815, 2847, 2879,
+ 2911, 2943, 2975, 3007, 3039, 3071, 3103, 3135, 3167, 3199, 3231, 3263,
+ 3295, 3327, 3359, 3391, 3423, 3455, 3487, 3519, 3551, 3583, 3615, 3647,
+ 3679, 3711, 3743, 3775, 3807, 3839, 3871, 3903, 3935, 3967, 3999, 4031,
+ 4063, 4095, 4127, 4159, 4223, 4287, 4351, 4415, 4479, 4543, 4607, 4671,
+ 4735, 4799, 4863, 4927, 4991, 5055, 5119, 5183, 5247, 5311, 5375, 5439,
+ 5503, 5567, 5631, 5695, 5759, 5823, 5887, 5951, 6015, 6079, 6143, 6207,
+ 6271, 6335, 6399, 6463, 6527, 6591, 6655, 6719, 6783, 6847, 6911, 6975,
+ 7039, 7103, 7167, 7231, 7295, 7359, 7423, 7487, 7551, 7615, 7679, 7743,
+ 7807, 7871, 7935, 7999, 8063, 8127, 8191
+} };
+
+const struct ipu3_uapi_csc_mat_config ipu3_css_csc_defaults = {
+ 4898, 9617, 1867, 0,
+ -2410, -4732, 7143, 0,
+ 10076, -8437, -1638, 0
+};
+
+const struct ipu3_uapi_cds_params ipu3_css_cds_defaults = {
+ 1, 3, 3, 1,
+ 1, 3, 3, 1,
+ 4, X, /* ds_nf */
+ 1, /* csc_en */
+ 0, X /* uv_bin_output */
+};
+
+const struct ipu3_uapi_shd_config_static ipu3_css_shd_defaults = {
+ .grid = {
+ .width = 73,
+ .height = 55,
+ .block_width_log2 = 7,
+ .block_height_log2 = 7,
+ .x_start = 0,
+ .y_start = 0,
+ },
+ .general = {
+ .shd_enable = 1,
+ .gain_factor = 0,
+ },
+ .black_level = {
+ .bl_r = 0,
+ .bl_gr = 0 | (0 << IPU3_UAPI_SHD_BLGR_NF_SHIFT),
+ .bl_gb = 0,
+ .bl_b = 0,
+ },
+};
+
+const struct ipu3_uapi_yuvp1_iefd_config ipu3_css_iefd_defaults = {
+ .units = {
+ .cu_1 = { 0, 150, 7, 0 },
+ .cu_ed = { 7, 110, 244, X, 307, 409, 511, X,
+ 184, 255, 255, X, 0, 0, X,
+ 7, 81, 255, X, 255, 255, X },
+ .cu_3 = { 148, 251, 10, 0 },
+ .cu_5 = { 25, 70, 501, X, 32, X },
+ .cu_6 = { 32, 63, 183, X, 397,
+ 33, 0, X, 0,
+ 0, 64, X, 64, X },
+ .cu_7 = { 200, 303,
+ 10, 0 },
+ .cu_unsharp = { 10, 64, 110, X, 511,
+ 66, 12, X, 0,
+ 0, 56, X, 64, X },
+ .cu_radial = { 6, 203, 255, 255, 255, 255, X,
+ 84, 444, 397, 288, 300, X,
+ 4, 69, 207, X, 369, 448, X },
+ .cu_vssnlm = { 61, 100, 25, 0}
+ },
+ .config = { 45, X, 0, X, 16, X, 45, X },
+ .control = { 1, 1, 1, 1, 1, X },
+ .sharp = { { 50, X, 511, X, 50, X, 50, X },
+ { 64, X, 0, X, 0, X},
+ { 56, X, 56, X } },
+ .unsharp = { { 36, 17, 8, X },
+ { 13, 7, 3, X } },
+ .rad = { { -2104, X, -1559, X },
+ { 4426816, X },
+ { 2430481, X },
+ { 6, X, 79, X },
+ { 64, 0, 0, X },
+ { 1, X, 2, X, 0, X, 0, X },
+ { 40, X, 62, X } },
+ .vsslnm = { { 16, 32, 64, X },
+ { 1, X, 2, X, 8, X } },
+};
+
+const struct ipu3_uapi_yuvp1_yds_config ipu3_css_yds_defaults = {
+ 0, 1, 1, 0, 0, 1, 1, 0, 2, X, 0, X
+};
+
+const struct ipu3_uapi_yuvp1_chnr_config ipu3_css_chnr_defaults = {
+ .coring = { 0, X, 0, X },
+ .sense_gain = { 6, 6, 6, X, 4, 4, 4, X },
+ .iir_fir = { 8, X, 12, X, 0, 256 - 127, X },
+};
+
+const struct ipu3_uapi_yuvp1_y_ee_nr_config ipu3_css_y_ee_nr_defaults = {
+ .lpf = { 4, X, 8, X, 16, X, 0 },
+ .sense = { 8191, X, 0, X, 8191, X, 0, X },
+ .gain = { 8, X, 0, X, 8, X, 0, X },
+ .clip = { 8, X, 0, X, 8, X, 0, X },
+ .frng = { 2, X, 200, X, 2, X, 1, 1, X },
+ .diag = { 1, X, 4, 1, 1, 4, X },
+ .fc_coring = { 0, X, 0, X, 0, X, 0, X }
+};
+
+const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
+ ipu3_css_tcc_gain_pcwl_lut = { {
+ 1024, 1032, 1040, 1048, 1057, 1065, 1073, 1081, 1089, 1097, 1105, 1113,
+ 1122, 1130, 1138, 1146, 1154, 1162, 1170, 1178, 1187, 1195, 1203, 1211,
+ 1219, 1227, 1235, 1243, 1252, 1260, 1268, 1276, 1284, 1292, 1300, 1308,
+ 1317, 1325, 1333, 1341, 1349, 1357, 1365, 1373, 1382, 1390, 1398, 1406,
+ 1414, 1422, 1430, 1438, 1447, 1455, 1463, 1471, 1479, 1487, 1495, 1503,
+ 1512, 1520, 1528, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ 1536, 1536, 1528, 1520, 1512, 1503, 1495, 1487, 1479, 1471, 1463, 1455,
+ 1447, 1438, 1430, 1422, 1414, 1406, 1398, 1390, 1382, 1373, 1365, 1357,
+ 1349, 1341, 1333, 1325, 1317, 1308, 1300, 1292, 1284, 1276, 1268, 1260,
+ 1252, 1243, 1235, 1227, 1219, 1211, 1203, 1195, 1187, 1178, 1170, 1162,
+ 1154, 1146, 1138, 1130, 1122, 1113, 1105, 1097, 1089, 1081, 1073, 1065,
+ 1057, 1048, 1040, 1032, 1024
+} };
+
+const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
+ ipu3_css_tcc_r_sqr_lut = { {
+ 32, 44, 64, 92, 128, 180, 256, 364, 512, 628, 724, 808, 888,
+ 956, 1024, 1088, 1144, 1200, 1256, 1304, 1356, 1404, 1448
+} };
+
+const struct imgu_abi_anr_config ipu3_css_anr_defaults = {
+ .transform = {
+ .adaptive_treshhold_en = 1,
+ .alpha = { { 13, 13, 13, 13, 0, 0, 0, 0},
+ { 11, 11, 11, 11, 0, 0, 0, 0},
+ { 14, 14, 14, 14, 0, 0, 0, 0} },
+ .beta = { { 24, 24, 24, 24},
+ { 21, 20, 20, 21},
+ { 25, 25, 25, 25} },
+ .color = { { { 166, 173, 149, 166, 161, 146, 145, 173,
+ 145, 150, 141, 149, 145, 141, 142 },
+ { 166, 173, 149, 165, 161, 145, 145, 173,
+ 145, 150, 141, 149, 145, 141, 142 },
+ { 166, 174, 149, 166, 162, 146, 146, 173,
+ 145, 150, 141, 149, 145, 141, 142 },
+ { 166, 173, 149, 165, 161, 145, 145, 173,
+ 146, 150, 141, 149, 145, 141, 142 } },
+ { { 141, 131, 140, 141, 144, 143, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 },
+ { 141, 131, 140, 141, 143, 143, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 },
+ { 141, 131, 141, 141, 144, 144, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 },
+ { 140, 131, 140, 141, 143, 143, 144, 131,
+ 143, 137, 140, 140, 144, 140, 141 } },
+ { { 184, 173, 188, 184, 182, 182, 181, 173,
+ 182, 179, 182, 188, 181, 182, 180 },
+ { 184, 173, 188, 184, 183, 182, 181, 173,
+ 182, 178, 182, 188, 181, 182, 180 },
+ { 184, 173, 188, 184, 182, 182, 181, 173,
+ 182, 178, 182, 188, 181, 182, 181 },
+ { 184, 172, 188, 184, 182, 182, 181, 173,
+ 182, 178, 182, 188, 182, 182, 180 } } },
+ .sqrt_lut = { 724, 768, 810, 849, 887, 923, 958, 991, 1024,
+ 1056, 1086, 1116, 1145, 1173, 1201, 1228, 1254,
+ 1280, 1305, 1330, 1355, 1379, 1402, 1425, 1448 },
+ .xreset = -1632,
+ .yreset = -1224,
+ .x_sqr_reset = 2663424,
+ .r_normfactor = 14,
+ .y_sqr_reset = 1498176,
+ .gain_scale = 115
+ },
+ .stitch = {
+ .anr_stitch_en = 1,
+ .pyramid = { { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
+ { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
+ { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
+ { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
+ { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
+ { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
+ { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
+ }
+ }
+};
+
+/* frame settings for Auto White Balance */
+const struct ipu3_uapi_awb_fr_config_s ipu3_css_awb_fr_defaults = {
+ .grid_cfg = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .x_start = 10,
+ .y_start = 2 | IPU3_UAPI_GRID_Y_START_EN,
+ },
+ .bayer_coeff = { 0, 0, 0, 0, 0, 128 },
+ .bayer_sign = 0,
+ .bayer_nf = 7
+};
+
+/* settings for Auto Exposure */
+const struct ipu3_uapi_ae_grid_config ipu3_css_ae_grid_defaults = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .ae_en = 1,
+ .x_start = 0,
+ .y_start = 0,
+};
+
+/* settings for Auto Exposure color correction matrix */
+const struct ipu3_uapi_ae_ccm ipu3_css_ae_ccm_defaults = {
+ 256, 256, 256, 256, /* gain_gr/r/b/gb */
+ .mat = { 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128, 0, 0, 0, 0, 128 },
+};
+
+/* settings for Auto Focus */
+const struct ipu3_uapi_af_config_s ipu3_css_af_defaults = {
+ .filter_config = {
+ { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
+ { 0, 0, 0, 0 }, { 0, 0, 0, 0 }, { 0, 0, 0, 128 }, 0,
+ .y_calc = { 8, 8, 8, 8 },
+ .nf = { X, 7, X, 7 },
+ },
+ .grid_cfg = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .x_start = 10,
+ .y_start = 2 | IPU3_UAPI_GRID_Y_START_EN,
+ },
+};
+
+/* settings for Auto White Balance */
+const struct ipu3_uapi_awb_config_s ipu3_css_awb_defaults = {
+ 8191, 8191, 8191, 8191 | /* rgbs_thr_gr/r/gb/b */
+ IPU3_UAPI_AWB_RGBS_THR_B_EN | IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT,
+ .grid = {
+ .width = 16,
+ .height = 16,
+ .block_width_log2 = 3,
+ .block_height_log2 = 3,
+ .x_start = 0,
+ .y_start = 0,
+ },
+};
diff --git a/drivers/staging/media/ipu3/ipu3-tables.h b/drivers/staging/media/ipu3/ipu3-tables.h
new file mode 100644
index 000000000000..6563782cbd22
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-tables.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_TABLES_H
+#define __IPU3_TABLES_H
+
+#include "ipu3-abi.h"
+
+#define IMGU_BDS_GRANULARITY 32 /* Downscaling granularity */
+#define IMGU_BDS_MIN_SF_INV IMGU_BDS_GRANULARITY
+#define IMGU_BDS_CONFIG_LEN 97
+
+#define IMGU_SCALER_DOWNSCALE_4TAPS_LEN 128
+#define IMGU_SCALER_DOWNSCALE_2TAPS_LEN 64
+#define IMGU_SCALER_FP ((u32)1 << 31) /* 1.0 in fixed point */
+
+#define IMGU_XNR3_VMEM_LUT_LEN 16
+
+#define IMGU_GDC_LUT_UNIT 4
+#define IMGU_GDC_LUT_LEN 256
+
+struct ipu3_css_bds_config {
+ struct imgu_abi_bds_phase_arr hor_phase_arr;
+ struct imgu_abi_bds_phase_arr ver_phase_arr;
+ struct imgu_abi_bds_ptrn_arr ptrn_arr;
+ u16 sample_patrn_length;
+ u8 hor_ds_en;
+ u8 ver_ds_en;
+};
+
+struct ipu3_css_xnr3_vmem_defaults {
+ s16 x[IMGU_XNR3_VMEM_LUT_LEN];
+ s16 a[IMGU_XNR3_VMEM_LUT_LEN];
+ s16 b[IMGU_XNR3_VMEM_LUT_LEN];
+ s16 c[IMGU_XNR3_VMEM_LUT_LEN];
+};
+
+extern const struct ipu3_css_bds_config
+ ipu3_css_bds_configs[IMGU_BDS_CONFIG_LEN];
+extern const s32 ipu3_css_downscale_4taps[IMGU_SCALER_DOWNSCALE_4TAPS_LEN];
+extern const s32 ipu3_css_downscale_2taps[IMGU_SCALER_DOWNSCALE_2TAPS_LEN];
+extern const s16 ipu3_css_gdc_lut[IMGU_GDC_LUT_UNIT][IMGU_GDC_LUT_LEN];
+extern const struct ipu3_css_xnr3_vmem_defaults ipu3_css_xnr3_vmem_defaults;
+extern const struct ipu3_uapi_bnr_static_config ipu3_css_bnr_defaults;
+extern const struct ipu3_uapi_dm_config ipu3_css_dm_defaults;
+extern const struct ipu3_uapi_ccm_mat_config ipu3_css_ccm_defaults;
+extern const struct ipu3_uapi_gamma_corr_lut ipu3_css_gamma_lut;
+extern const struct ipu3_uapi_csc_mat_config ipu3_css_csc_defaults;
+extern const struct ipu3_uapi_cds_params ipu3_css_cds_defaults;
+extern const struct ipu3_uapi_shd_config_static ipu3_css_shd_defaults;
+extern const struct ipu3_uapi_yuvp1_iefd_config ipu3_css_iefd_defaults;
+extern const struct ipu3_uapi_yuvp1_yds_config ipu3_css_yds_defaults;
+extern const struct ipu3_uapi_yuvp1_chnr_config ipu3_css_chnr_defaults;
+extern const struct ipu3_uapi_yuvp1_y_ee_nr_config ipu3_css_y_ee_nr_defaults;
+extern const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
+ ipu3_css_tcc_gain_pcwl_lut;
+extern const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
+ ipu3_css_tcc_r_sqr_lut;
+extern const struct imgu_abi_anr_config ipu3_css_anr_defaults;
+extern const struct ipu3_uapi_awb_fr_config_s ipu3_css_awb_fr_defaults;
+extern const struct ipu3_uapi_ae_grid_config ipu3_css_ae_grid_defaults;
+extern const struct ipu3_uapi_ae_ccm ipu3_css_ae_ccm_defaults;
+extern const struct ipu3_uapi_af_config_s ipu3_css_af_defaults;
+extern const struct ipu3_uapi_awb_config_s ipu3_css_awb_defaults;
+
+#endif
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
new file mode 100644
index 000000000000..c7936032beb9
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -0,0 +1,1419 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#include "ipu3.h"
+#include "ipu3-dmamap.h"
+
+/******************** v4l2_subdev_ops ********************/
+
+static int ipu3_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[imgu_sd->pipe];
+ struct v4l2_rect try_crop = {
+ .top = 0,
+ .left = 0,
+ };
+ unsigned int i;
+
+ try_crop.width =
+ imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.width;
+ try_crop.height =
+ imgu_pipe->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.height;
+
+ /* Initialize try_fmt */
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, i);
+
+ try_fmt->width = try_crop.width;
+ try_fmt->height = try_crop.height;
+ try_fmt->code = imgu_pipe->nodes[i].pad_fmt.code;
+ try_fmt->field = V4L2_FIELD_NONE;
+ }
+
+ *v4l2_subdev_get_try_crop(sd, fh->pad, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_get_try_compose(sd, fh->pad, IMGU_NODE_IN) = try_crop;
+
+ return 0;
+}
+
+static int ipu3_subdev_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ int i;
+ unsigned int node;
+ int r = 0;
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+ struct device *dev = &imgu->pci_dev->dev;
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct ipu3_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ dev_dbg(dev, "%s %d for pipe %d", __func__, enable, pipe);
+ /* grab ctrl after streamon and return after off */
+ v4l2_ctrl_grab(imgu_sd->ctrl, enable);
+
+ if (!enable) {
+ imgu_sd->active = false;
+ return 0;
+ }
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ imgu_pipe->queue_enabled[i] = imgu_pipe->nodes[i].enabled;
+
+ /* This is handled specially */
+ imgu_pipe->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false;
+
+ /* Initialize CSS formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ node = imgu_map_node(imgu, i);
+ /* No need to reconfig meta nodes */
+ if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
+ continue;
+ fmts[i] = imgu_pipe->queue_enabled[node] ?
+ &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
+ }
+
+ /* Enable VF output only when VF queue requested by user */
+ css_pipe->vf_output_en = false;
+ if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
+ css_pipe->vf_output_en = true;
+
+ if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ else
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
+
+ dev_dbg(dev, "IPU3 pipe %d pipe_id %d", pipe, css_pipe->pipe_id);
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
+ rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
+
+ r = ipu3_css_fmt_set(&imgu->css, fmts, rects, pipe);
+ if (r) {
+ dev_err(dev, "failed to set initial formats pipe %d with (%d)",
+ pipe, r);
+ return r;
+ }
+
+ imgu_sd->active = true;
+
+ return 0;
+}
+
+static int ipu3_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct v4l2_mbus_framefmt *mf;
+ struct imgu_media_pipe *imgu_pipe;
+ u32 pad = fmt->pad;
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ fmt->format = imgu_pipe->nodes[pad].pad_fmt;
+ } else {
+ mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ fmt->format = *mf;
+ }
+
+ return 0;
+}
+
+static int ipu3_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+
+ struct v4l2_mbus_framefmt *mf;
+ u32 pad = fmt->pad;
+ unsigned int pipe = imgu_sd->pipe;
+
+ dev_dbg(&imgu->pci_dev->dev, "set subdev %d pad %d fmt to [%dx%d]",
+ pipe, pad, fmt->format.width, fmt->format.height);
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ mf = v4l2_subdev_get_try_format(sd, cfg, pad);
+ else
+ mf = &imgu_pipe->nodes[pad].pad_fmt;
+
+ fmt->format.code = mf->code;
+ /* Clamp the w and h based on the hardware capabilities */
+ if (imgu_sd->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) {
+ fmt->format.width = clamp(fmt->format.width,
+ IPU3_OUTPUT_MIN_WIDTH,
+ IPU3_OUTPUT_MAX_WIDTH);
+ fmt->format.height = clamp(fmt->format.height,
+ IPU3_OUTPUT_MIN_HEIGHT,
+ IPU3_OUTPUT_MAX_HEIGHT);
+ } else {
+ fmt->format.width = clamp(fmt->format.width,
+ IPU3_INPUT_MIN_WIDTH,
+ IPU3_INPUT_MAX_WIDTH);
+ fmt->format.height = clamp(fmt->format.height,
+ IPU3_INPUT_MIN_HEIGHT,
+ IPU3_INPUT_MAX_HEIGHT);
+ }
+
+ *mf = fmt->format;
+
+ return 0;
+}
+
+static int ipu3_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_rect *try_sel, *r;
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+
+ if (sel->pad != IMGU_NODE_IN)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ r = &imgu_sd->rect.eff;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ r = &imgu_sd->rect.bds;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ sel->r = *try_sel;
+ else
+ sel->r = *r;
+
+ return 0;
+}
+
+static int ipu3_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ struct v4l2_rect *rect, *try_sel;
+
+ dev_dbg(&imgu->pci_dev->dev,
+ "set subdev %d sel which %d target 0x%4x rect [%dx%d]",
+ imgu_sd->pipe, sel->which, sel->target,
+ sel->r.width, sel->r.height);
+
+ if (sel->pad != IMGU_NODE_IN)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ rect = &imgu_sd->rect.eff;
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ rect = &imgu_sd->rect.bds;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
+ *try_sel = sel->r;
+ else
+ *rect = sel->r;
+
+ return 0;
+}
+
+/******************** media_entity_operations ********************/
+
+static int ipu3_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev,
+ entity);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+ u32 pad = local->index;
+
+ WARN_ON(pad >= IMGU_NODE_NUM);
+
+ dev_dbg(&imgu->pci_dev->dev, "pipe %d pad %d is %s", pipe, pad,
+ flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
+
+ /* enable input node to enable the pipe */
+ if (pad != IMGU_NODE_IN)
+ return 0;
+
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ __set_bit(pipe, imgu->css.enabled_pipes);
+ else
+ __clear_bit(pipe, imgu->css.enabled_pipes);
+
+ dev_dbg(&imgu->pci_dev->dev, "pipe %d is %s", pipe,
+ flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
+
+ return 0;
+}
+
+/******************** vb2_ops ********************/
+
+static int ipu3_vb2_buf_init(struct vb2_buffer *vb)
+{
+ struct sg_table *sg = vb2_dma_sg_plane_desc(vb, 0);
+ struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
+ struct imgu_buffer *buf = container_of(vb,
+ struct imgu_buffer, vid_buf.vbb.vb2_buf);
+ struct imgu_video_device *node =
+ container_of(vb->vb2_queue, struct imgu_video_device, vbq);
+ unsigned int queue = imgu_node_to_queue(node->id);
+
+ if (queue == IPU3_CSS_QUEUE_PARAMS)
+ return 0;
+
+ return ipu3_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
+}
+
+/* Called when each buffer is freed */
+static void ipu3_vb2_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
+ struct imgu_buffer *buf = container_of(vb,
+ struct imgu_buffer, vid_buf.vbb.vb2_buf);
+ struct imgu_video_device *node =
+ container_of(vb->vb2_queue, struct imgu_video_device, vbq);
+ unsigned int queue = imgu_node_to_queue(node->id);
+
+ if (queue == IPU3_CSS_QUEUE_PARAMS)
+ return;
+
+ ipu3_dmamap_unmap(imgu, &buf->map);
+}
+
+/* Transfer buffer ownership to me */
+static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
+{
+ struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
+ struct imgu_video_device *node =
+ container_of(vb->vb2_queue, struct imgu_video_device, vbq);
+ unsigned int queue = imgu_node_to_queue(node->id);
+ unsigned long need_bytes;
+ unsigned int pipe = node->pipe;
+
+ if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE ||
+ vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT)
+ need_bytes = node->vdev_fmt.fmt.meta.buffersize;
+ else
+ need_bytes = node->vdev_fmt.fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (queue == IPU3_CSS_QUEUE_PARAMS) {
+ unsigned long payload = vb2_get_plane_payload(vb, 0);
+ struct vb2_v4l2_buffer *buf =
+ container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ int r = -EINVAL;
+
+ if (payload == 0) {
+ payload = need_bytes;
+ vb2_set_plane_payload(vb, 0, payload);
+ }
+ if (payload >= need_bytes)
+ r = ipu3_css_set_parameters(&imgu->css, pipe,
+ vb2_plane_vaddr(vb, 0));
+ buf->flags = V4L2_BUF_FLAG_DONE;
+ vb2_buffer_done(vb, r == 0 ? VB2_BUF_STATE_DONE
+ : VB2_BUF_STATE_ERROR);
+
+ } else {
+ struct imgu_buffer *buf = container_of(vb, struct imgu_buffer,
+ vid_buf.vbb.vb2_buf);
+
+ mutex_lock(&imgu->lock);
+ ipu3_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
+ list_add_tail(&buf->vid_buf.list,
+ &node->buffers);
+ mutex_unlock(&imgu->lock);
+
+ vb2_set_plane_payload(&buf->vid_buf.vbb.vb2_buf, 0, need_bytes);
+
+ if (imgu->streaming)
+ imgu_queue_buffers(imgu, false, pipe);
+ }
+
+ dev_dbg(&imgu->pci_dev->dev, "%s for pipe %d node %d", __func__,
+ node->pipe, node->id);
+
+}
+
+static int ipu3_vb2_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct imgu_video_device *node =
+ container_of(vq, struct imgu_video_device, vbq);
+ const struct v4l2_format *fmt = &node->vdev_fmt;
+ unsigned int size;
+
+ *num_buffers = clamp_val(*num_buffers, 1, VB2_MAX_FRAME);
+ alloc_devs[0] = &imgu->pci_dev->dev;
+
+ if (vq->type == V4L2_BUF_TYPE_META_CAPTURE ||
+ vq->type == V4L2_BUF_TYPE_META_OUTPUT)
+ size = fmt->fmt.meta.buffersize;
+ else
+ size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (*num_planes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *num_planes = 1;
+ sizes[0] = size;
+
+ /* Initialize buffer queue */
+ INIT_LIST_HEAD(&node->buffers);
+
+ return 0;
+}
+
+/* Check if all enabled video nodes are streaming, exception ignored */
+static bool ipu3_all_nodes_streaming(struct imgu_device *imgu,
+ struct imgu_video_device *except)
+{
+ unsigned int i, pipe, p;
+ struct imgu_video_device *node;
+ struct device *dev = &imgu->pci_dev->dev;
+
+ pipe = except->pipe;
+ if (!test_bit(pipe, imgu->css.enabled_pipes)) {
+ dev_warn(&imgu->pci_dev->dev,
+ "pipe %d link is not ready yet", pipe);
+ return false;
+ }
+
+ for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ node = &imgu->imgu_pipe[p].nodes[i];
+ dev_dbg(dev, "%s pipe %u queue %u name %s enabled = %u",
+ __func__, p, i, node->name, node->enabled);
+ if (node == except)
+ continue;
+ if (node->enabled && !vb2_start_streaming_called(&node->vbq))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ipu3_return_all_buffers(struct imgu_device *imgu,
+ struct imgu_video_device *node,
+ enum vb2_buffer_state state)
+{
+ struct ipu3_vb2_buffer *b, *b0;
+
+ /* Return all buffers */
+ mutex_lock(&imgu->lock);
+ list_for_each_entry_safe(b, b0, &node->buffers, list) {
+ list_del(&b->list);
+ vb2_buffer_done(&b->vbb.vb2_buf, state);
+ }
+ mutex_unlock(&imgu->lock);
+}
+
+static int ipu3_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node =
+ container_of(vq, struct imgu_video_device, vbq);
+ int r;
+ unsigned int pipe;
+
+ dev_dbg(dev, "%s node name %s pipe %d id %u", __func__,
+ node->name, node->pipe, node->id);
+
+ if (imgu->streaming) {
+ r = -EBUSY;
+ goto fail_return_bufs;
+ }
+
+ if (!node->enabled) {
+ dev_err(dev, "IMGU node is not enabled");
+ r = -EINVAL;
+ goto fail_return_bufs;
+ }
+
+ pipe = node->pipe;
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ r = media_pipeline_start(&node->vdev.entity, &imgu_pipe->pipeline);
+ if (r < 0)
+ goto fail_return_bufs;
+
+
+ if (!ipu3_all_nodes_streaming(imgu, node))
+ return 0;
+
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = v4l2_subdev_call(&imgu->imgu_pipe[pipe].imgu_sd.subdev,
+ video, s_stream, 1);
+ if (r < 0)
+ goto fail_stop_pipeline;
+ }
+
+ /* Start streaming of the whole pipeline now */
+ dev_dbg(dev, "IMGU streaming is ready to start");
+ r = imgu_s_stream(imgu, true);
+ if (!r)
+ imgu->streaming = true;
+
+ return 0;
+
+fail_stop_pipeline:
+ media_pipeline_stop(&node->vdev.entity);
+fail_return_bufs:
+ ipu3_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED);
+
+ return r;
+}
+
+static void ipu3_vb2_stop_streaming(struct vb2_queue *vq)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node =
+ container_of(vq, struct imgu_video_device, vbq);
+ int r;
+ unsigned int pipe;
+
+ WARN_ON(!node->enabled);
+
+ pipe = node->pipe;
+ dev_dbg(dev, "Try to stream off node [%d][%d]", pipe, node->id);
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0);
+ if (r)
+ dev_err(&imgu->pci_dev->dev,
+ "failed to stop subdev streaming\n");
+
+ /* Was this the first node with streaming disabled? */
+ if (imgu->streaming && ipu3_all_nodes_streaming(imgu, node)) {
+ /* Yes, really stop streaming now */
+ dev_dbg(dev, "IMGU streaming is ready to stop");
+ r = imgu_s_stream(imgu, false);
+ if (!r)
+ imgu->streaming = false;
+ }
+
+ ipu3_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
+ media_pipeline_stop(&node->vdev.entity);
+}
+
+/******************** v4l2_ioctl_ops ********************/
+
+#define VID_CAPTURE 0
+#define VID_OUTPUT 1
+#define DEF_VID_CAPTURE 0
+#define DEF_VID_OUTPUT 1
+
+struct ipu3_fmt {
+ u32 fourcc;
+ u16 type; /* VID_CAPTURE or VID_OUTPUT not both */
+};
+
+/* format descriptions for capture and preview */
+static const struct ipu3_fmt formats[] = {
+ { V4L2_PIX_FMT_NV12, VID_CAPTURE },
+ { V4L2_PIX_FMT_IPU3_SGRBG10, VID_OUTPUT },
+ { V4L2_PIX_FMT_IPU3_SBGGR10, VID_OUTPUT },
+ { V4L2_PIX_FMT_IPU3_SGBRG10, VID_OUTPUT },
+ { V4L2_PIX_FMT_IPU3_SRGGB10, VID_OUTPUT },
+};
+
+/* Find the first matched format, return default if not found */
+static const struct ipu3_fmt *find_format(struct v4l2_format *f, u32 type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].fourcc == f->fmt.pix_mp.pixelformat &&
+ formats[i].type == type)
+ return &formats[i];
+ }
+
+ return type == VID_CAPTURE ? &formats[DEF_VID_CAPTURE] :
+ &formats[DEF_VID_OUTPUT];
+}
+
+static int ipu3_vidioc_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+
+ strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", node->name);
+
+ return 0;
+}
+
+static int enum_fmts(struct v4l2_fmtdesc *f, u32 type)
+{
+ unsigned int i, j;
+
+ for (i = j = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].type == type) {
+ if (j == f->index)
+ break;
+ ++j;
+ }
+ }
+
+ if (i < ARRAY_SIZE(formats)) {
+ f->pixelformat = formats[i].fourcc;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return enum_fmts(f, VID_CAPTURE);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return enum_fmts(f, VID_OUTPUT);
+}
+
+/* Propagate forward always the format from the CIO2 subdev */
+static int ipu3_vidioc_g_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+
+ f->fmt = node->vdev_fmt.fmt;
+
+ return 0;
+}
+
+/*
+ * Set input/output format. Unless it is just a try, this also resets
+ * selections (ie. effective and BDS resolutions) to defaults.
+ */
+static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
+ struct v4l2_format *f, bool try)
+{
+ struct device *dev = &imgu->pci_dev->dev;
+ struct v4l2_pix_format_mplane try_fmts[IPU3_CSS_QUEUES];
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct v4l2_mbus_framefmt pad_fmt;
+ unsigned int i, css_q;
+ int r;
+ struct ipu3_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
+
+ dev_dbg(dev, "set fmt node [%u][%u](try = %d)", pipe, node, try);
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ dev_dbg(dev, "IMGU pipe %d node %d enabled = %d",
+ pipe, i, imgu_pipe->nodes[i].enabled);
+
+ if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
+ css_pipe->vf_output_en = true;
+
+ if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ else
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
+
+ dev_dbg(dev, "IPU3 pipe %d pipe_id = %d", pipe, css_pipe->pipe_id);
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ unsigned int inode = imgu_map_node(imgu, i);
+
+ /* Skip the meta node */
+ if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
+ continue;
+
+ if (try) {
+ try_fmts[i] =
+ imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
+ fmts[i] = &try_fmts[i];
+ } else {
+ fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
+ }
+
+ /* CSS expects some format on OUT queue */
+ if (i != IPU3_CSS_QUEUE_OUT &&
+ !imgu_pipe->nodes[inode].enabled)
+ fmts[i] = NULL;
+ }
+
+ if (!try) {
+ /* eff and bds res got by imgu_s_sel */
+ struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
+ rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
+
+ /* suppose that pad fmt was set by subdev s_fmt before */
+ pad_fmt = imgu_pipe->nodes[IMGU_NODE_IN].pad_fmt;
+ rects[IPU3_CSS_RECT_GDC]->width = pad_fmt.width;
+ rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
+ }
+
+ /*
+ * imgu doesn't set the node to the value given by user
+ * before we return success from this function, so set it here.
+ */
+ css_q = imgu_node_to_queue(node);
+ if (fmts[css_q])
+ *fmts[css_q] = f->fmt.pix_mp;
+ else
+ return -EINVAL;
+
+ if (try)
+ r = ipu3_css_fmt_try(&imgu->css, fmts, rects, pipe);
+ else
+ r = ipu3_css_fmt_set(&imgu->css, fmts, rects, pipe);
+
+ /* r is the binary number in the firmware blob */
+ if (r < 0)
+ return r;
+
+ if (try)
+ f->fmt.pix_mp = *fmts[css_q];
+ else
+ f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
+
+ return 0;
+}
+
+static int ipu3_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
+ const struct ipu3_fmt *fmt;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ fmt = find_format(f, VID_CAPTURE);
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ fmt = find_format(f, VID_OUTPUT);
+ else
+ return -EINVAL;
+
+ pixm->pixelformat = fmt->fourcc;
+
+ memset(pixm->plane_fmt[0].reserved, 0,
+ sizeof(pixm->plane_fmt[0].reserved));
+
+ return 0;
+}
+
+static int ipu3_vidioc_try_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imgu_device *imgu = video_drvdata(file);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ int r;
+
+ dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
+ pix_mp->width, pix_mp->height, node->id);
+
+ r = ipu3_try_fmt(file, fh, f);
+ if (r)
+ return r;
+
+ return imgu_fmt(imgu, node->pipe, node->id, f, true);
+}
+
+static int ipu3_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
+{
+ struct imgu_device *imgu = video_drvdata(file);
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ int r;
+
+ dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
+ pix_mp->width, pix_mp->height, node->id);
+
+ r = ipu3_try_fmt(file, fh, f);
+ if (r)
+ return r;
+
+ return imgu_fmt(imgu, node->pipe, node->id, f, false);
+}
+
+struct ipu3_meta_fmt {
+ __u32 fourcc;
+ char *name;
+};
+
+/* From drivers/media/v4l2-core/v4l2-ioctl.c */
+static const struct ipu3_meta_fmt meta_fmts[] = {
+ { V4L2_META_FMT_IPU3_PARAMS, "IPU3 processing parameters" },
+ { V4L2_META_FMT_IPU3_STAT_3A, "IPU3 3A statistics" },
+};
+
+static int ipu3_meta_enum_format(struct file *file, void *fh,
+ struct v4l2_fmtdesc *fmt)
+{
+ struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ unsigned int i = fmt->type == V4L2_BUF_TYPE_META_OUTPUT ? 0 : 1;
+
+ /* Each node is dedicated to only one meta format */
+ if (fmt->index > 0 || fmt->type != node->vbq.type)
+ return -EINVAL;
+
+ strscpy(fmt->description, meta_fmts[i].name, sizeof(fmt->description));
+ fmt->pixelformat = meta_fmts[i].fourcc;
+
+ return 0;
+}
+
+static int ipu3_vidioc_g_meta_fmt(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+
+ if (f->type != node->vbq.type)
+ return -EINVAL;
+
+ f->fmt = node->vdev_fmt.fmt;
+
+ return 0;
+}
+
+static int ipu3_vidioc_enum_input(struct file *file, void *fh,
+ struct v4l2_input *input)
+{
+ if (input->index > 0)
+ return -EINVAL;
+ strscpy(input->name, "camera", sizeof(input->name));
+ input->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int ipu3_vidioc_g_input(struct file *file, void *fh, unsigned int *input)
+{
+ *input = 0;
+
+ return 0;
+}
+
+static int ipu3_vidioc_s_input(struct file *file, void *fh, unsigned int input)
+{
+ return input == 0 ? 0 : -EINVAL;
+}
+
+static int ipu3_vidioc_enum_output(struct file *file, void *fh,
+ struct v4l2_output *output)
+{
+ if (output->index > 0)
+ return -EINVAL;
+ strscpy(output->name, "camera", sizeof(output->name));
+ output->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int ipu3_vidioc_g_output(struct file *file, void *fh,
+ unsigned int *output)
+{
+ *output = 0;
+
+ return 0;
+}
+
+static int ipu3_vidioc_s_output(struct file *file, void *fh,
+ unsigned int output)
+{
+ return output == 0 ? 0 : -EINVAL;
+}
+
+/******************** function pointers ********************/
+
+static struct v4l2_subdev_internal_ops ipu3_subdev_internal_ops = {
+ .open = ipu3_subdev_open,
+};
+
+static const struct v4l2_subdev_core_ops ipu3_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops ipu3_subdev_video_ops = {
+ .s_stream = ipu3_subdev_s_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ipu3_subdev_pad_ops = {
+ .link_validate = v4l2_subdev_link_validate_default,
+ .get_fmt = ipu3_subdev_get_fmt,
+ .set_fmt = ipu3_subdev_set_fmt,
+ .get_selection = ipu3_subdev_get_selection,
+ .set_selection = ipu3_subdev_set_selection,
+};
+
+static const struct v4l2_subdev_ops ipu3_subdev_ops = {
+ .core = &ipu3_subdev_core_ops,
+ .video = &ipu3_subdev_video_ops,
+ .pad = &ipu3_subdev_pad_ops,
+};
+
+static const struct media_entity_operations ipu3_media_ops = {
+ .link_setup = ipu3_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/****************** vb2_ops of the Q ********************/
+
+static const struct vb2_ops ipu3_vb2_ops = {
+ .buf_init = ipu3_vb2_buf_init,
+ .buf_cleanup = ipu3_vb2_buf_cleanup,
+ .buf_queue = ipu3_vb2_buf_queue,
+ .queue_setup = ipu3_vb2_queue_setup,
+ .start_streaming = ipu3_vb2_start_streaming,
+ .stop_streaming = ipu3_vb2_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+/****************** v4l2_file_operations *****************/
+
+static const struct v4l2_file_operations ipu3_v4l2_fops = {
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/******************** v4l2_ioctl_ops ********************/
+
+static const struct v4l2_ioctl_ops ipu3_v4l2_ioctl_ops = {
+ .vidioc_querycap = ipu3_vidioc_querycap,
+
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap_mplane = ipu3_vidioc_g_fmt,
+ .vidioc_s_fmt_vid_cap_mplane = ipu3_vidioc_s_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = ipu3_vidioc_try_fmt,
+
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out,
+ .vidioc_g_fmt_vid_out_mplane = ipu3_vidioc_g_fmt,
+ .vidioc_s_fmt_vid_out_mplane = ipu3_vidioc_s_fmt,
+ .vidioc_try_fmt_vid_out_mplane = ipu3_vidioc_try_fmt,
+
+ .vidioc_enum_output = ipu3_vidioc_enum_output,
+ .vidioc_g_output = ipu3_vidioc_g_output,
+ .vidioc_s_output = ipu3_vidioc_s_output,
+
+ .vidioc_enum_input = ipu3_vidioc_enum_input,
+ .vidioc_g_input = ipu3_vidioc_g_input,
+ .vidioc_s_input = ipu3_vidioc_s_input,
+
+ /* buffer queue management */
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static const struct v4l2_ioctl_ops ipu3_v4l2_meta_ioctl_ops = {
+ .vidioc_querycap = ipu3_vidioc_querycap,
+
+ /* meta capture */
+ .vidioc_enum_fmt_meta_cap = ipu3_meta_enum_format,
+ .vidioc_g_fmt_meta_cap = ipu3_vidioc_g_meta_fmt,
+ .vidioc_s_fmt_meta_cap = ipu3_vidioc_g_meta_fmt,
+ .vidioc_try_fmt_meta_cap = ipu3_vidioc_g_meta_fmt,
+
+ /* meta output */
+ .vidioc_enum_fmt_meta_out = ipu3_meta_enum_format,
+ .vidioc_g_fmt_meta_out = ipu3_vidioc_g_meta_fmt,
+ .vidioc_s_fmt_meta_out = ipu3_vidioc_g_meta_fmt,
+ .vidioc_try_fmt_meta_out = ipu3_vidioc_g_meta_fmt,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+};
+
+static int ipu3_sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imgu_v4l2_subdev *imgu_sd =
+ container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler);
+ struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev);
+ struct device *dev = &imgu->pci_dev->dev;
+
+ dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %d",
+ ctrl->val, ctrl->id, imgu_sd->pipe);
+
+ switch (ctrl->id) {
+ case V4L2_CID_INTEL_IPU3_MODE:
+ atomic_set(&imgu_sd->running_mode, ctrl->val);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops ipu3_subdev_ctrl_ops = {
+ .s_ctrl = ipu3_sd_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config ipu3_subdev_ctrl_mode = {
+ .ops = &ipu3_subdev_ctrl_ops,
+ .id = V4L2_CID_INTEL_IPU3_MODE,
+ .name = "IPU3 Pipe Mode",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = IPU3_RUNNING_MODE_VIDEO,
+ .max = IPU3_RUNNING_MODE_STILL,
+ .step = 1,
+ .def = IPU3_RUNNING_MODE_VIDEO,
+};
+
+/******************** Framework registration ********************/
+
+/* helper function to config node's video properties */
+static void ipu3_node_to_v4l2(u32 node, struct video_device *vdev,
+ struct v4l2_format *f)
+{
+ u32 cap;
+
+ /* Should not happen */
+ WARN_ON(node >= IMGU_NODE_NUM);
+
+ switch (node) {
+ case IMGU_NODE_IN:
+ cap = V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+ f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ vdev->ioctl_ops = &ipu3_v4l2_ioctl_ops;
+ break;
+ case IMGU_NODE_PARAMS:
+ cap = V4L2_CAP_META_OUTPUT;
+ f->type = V4L2_BUF_TYPE_META_OUTPUT;
+ f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_PARAMS;
+ vdev->ioctl_ops = &ipu3_v4l2_meta_ioctl_ops;
+ ipu3_css_meta_fmt_set(&f->fmt.meta);
+ break;
+ case IMGU_NODE_STAT_3A:
+ cap = V4L2_CAP_META_CAPTURE;
+ f->type = V4L2_BUF_TYPE_META_CAPTURE;
+ f->fmt.meta.dataformat = V4L2_META_FMT_IPU3_STAT_3A;
+ vdev->ioctl_ops = &ipu3_v4l2_meta_ioctl_ops;
+ ipu3_css_meta_fmt_set(&f->fmt.meta);
+ break;
+ default:
+ cap = V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ vdev->ioctl_ops = &ipu3_v4l2_ioctl_ops;
+ }
+
+ vdev->device_caps = V4L2_CAP_STREAMING | cap;
+}
+
+static int ipu3_v4l2_subdev_register(struct imgu_device *imgu,
+ struct imgu_v4l2_subdev *imgu_sd,
+ unsigned int pipe)
+{
+ int i, r;
+ struct v4l2_ctrl_handler *hdl = &imgu_sd->ctrl_handler;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* Initialize subdev media entity */
+ r = media_entity_pads_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
+ imgu_sd->subdev_pads);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed initialize subdev media entity (%d)\n", r);
+ return r;
+ }
+ imgu_sd->subdev.entity.ops = &ipu3_media_ops;
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
+ MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
+ }
+
+ /* Initialize subdev */
+ v4l2_subdev_init(&imgu_sd->subdev, &ipu3_subdev_ops);
+ imgu_sd->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_STATISTICS;
+ imgu_sd->subdev.internal_ops = &ipu3_subdev_internal_ops;
+ imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
+ "%s %d", IMGU_NAME, pipe);
+ v4l2_set_subdevdata(&imgu_sd->subdev, imgu);
+ atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
+ v4l2_ctrl_handler_init(hdl, 1);
+ imgu_sd->subdev.ctrl_handler = hdl;
+ imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &ipu3_subdev_ctrl_mode, NULL);
+ if (hdl->error) {
+ r = hdl->error;
+ dev_err(&imgu->pci_dev->dev,
+ "failed to create subdev v4l2 ctrl with err %d", r);
+ goto fail_subdev;
+ }
+ r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu_sd->subdev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed initialize subdev (%d)\n", r);
+ goto fail_subdev;
+ }
+
+ imgu_sd->pipe = pipe;
+ return 0;
+
+fail_subdev:
+ v4l2_ctrl_handler_free(imgu_sd->subdev.ctrl_handler);
+ media_entity_cleanup(&imgu_sd->subdev.entity);
+
+ return r;
+}
+
+static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+ int node_num)
+{
+ int r;
+ u32 flags;
+ struct v4l2_mbus_framefmt def_bus_fmt = { 0 };
+ struct v4l2_pix_format_mplane def_pix_fmt = { 0 };
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ struct v4l2_subdev *sd = &imgu_pipe->imgu_sd.subdev;
+ struct imgu_video_device *node = &imgu_pipe->nodes[node_num];
+ struct video_device *vdev = &node->vdev;
+ struct vb2_queue *vbq = &node->vbq;
+
+ /* Initialize formats to default values */
+ def_bus_fmt.width = 1920;
+ def_bus_fmt.height = 1080;
+ def_bus_fmt.code = MEDIA_BUS_FMT_FIXED;
+ def_bus_fmt.field = V4L2_FIELD_NONE;
+ def_bus_fmt.colorspace = V4L2_COLORSPACE_RAW;
+ def_bus_fmt.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ def_bus_fmt.quantization = V4L2_QUANTIZATION_DEFAULT;
+ def_bus_fmt.xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ def_pix_fmt.width = def_bus_fmt.width;
+ def_pix_fmt.height = def_bus_fmt.height;
+ def_pix_fmt.field = def_bus_fmt.field;
+ def_pix_fmt.num_planes = 1;
+ def_pix_fmt.plane_fmt[0].bytesperline = def_pix_fmt.width * 2;
+ def_pix_fmt.plane_fmt[0].sizeimage =
+ def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline;
+ def_pix_fmt.flags = 0;
+ def_pix_fmt.colorspace = def_bus_fmt.colorspace;
+ def_pix_fmt.ycbcr_enc = def_bus_fmt.ycbcr_enc;
+ def_pix_fmt.quantization = def_bus_fmt.quantization;
+ def_pix_fmt.xfer_func = def_bus_fmt.xfer_func;
+
+ /* Initialize miscellaneous variables */
+ mutex_init(&node->lock);
+ INIT_LIST_HEAD(&node->buffers);
+
+ /* Initialize formats to default values */
+ node->pad_fmt = def_bus_fmt;
+ node->id = node_num;
+ node->pipe = pipe;
+ ipu3_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
+ if (node->vdev_fmt.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+ node->vdev_fmt.type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ def_pix_fmt.pixelformat = node->output ?
+ V4L2_PIX_FMT_IPU3_SGRBG10 :
+ V4L2_PIX_FMT_NV12;
+ node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
+ }
+
+ /* Initialize media entities */
+ r = media_entity_pads_init(&vdev->entity, 1, &node->vdev_pad);
+ if (r) {
+ dev_err(dev, "failed initialize media entity (%d)\n", r);
+ mutex_destroy(&node->lock);
+ return r;
+ }
+ node->vdev_pad.flags = node->output ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = NULL;
+
+ /* Initialize vbq */
+ vbq->type = node->vdev_fmt.type;
+ vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
+ vbq->ops = &ipu3_vb2_ops;
+ vbq->mem_ops = &vb2_dma_sg_memops;
+ if (imgu->buf_struct_size <= 0)
+ imgu->buf_struct_size =
+ sizeof(struct ipu3_vb2_buffer);
+ vbq->buf_struct_size = imgu->buf_struct_size;
+ vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ /* can streamon w/o buffers */
+ vbq->min_buffers_needed = 0;
+ vbq->drv_priv = imgu;
+ vbq->lock = &node->lock;
+ r = vb2_queue_init(vbq);
+ if (r) {
+ dev_err(dev, "failed to initialize video queue (%d)", r);
+ media_entity_cleanup(&vdev->entity);
+ return r;
+ }
+
+ /* Initialize vdev */
+ snprintf(vdev->name, sizeof(vdev->name), "%s %d %s",
+ IMGU_NAME, pipe, node->name);
+ vdev->release = video_device_release_empty;
+ vdev->fops = &ipu3_v4l2_fops;
+ vdev->lock = &node->lock;
+ vdev->v4l2_dev = &imgu->v4l2_dev;
+ vdev->queue = &node->vbq;
+ vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
+ video_set_drvdata(vdev, imgu);
+ r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (r) {
+ dev_err(dev, "failed to register video device (%d)", r);
+ media_entity_cleanup(&vdev->entity);
+ return r;
+ }
+
+ /* Create link between video node and the subdev pad */
+ flags = 0;
+ if (node->enabled)
+ flags |= MEDIA_LNK_FL_ENABLED;
+ if (node->output) {
+ r = media_create_pad_link(&vdev->entity, 0, &sd->entity,
+ node_num, flags);
+ } else {
+ r = media_create_pad_link(&sd->entity, node_num, &vdev->entity,
+ 0, flags);
+ }
+ if (r) {
+ dev_err(dev, "failed to create pad link (%d)", r);
+ video_unregister_device(vdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void ipu3_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
+ unsigned int pipe, int node)
+{
+ int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < node; i++) {
+ video_unregister_device(&imgu_pipe->nodes[i].vdev);
+ media_entity_cleanup(&imgu_pipe->nodes[i].vdev.entity);
+ mutex_destroy(&imgu_pipe->nodes[i].lock);
+ }
+}
+
+static int ipu3_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
+{
+ int i, r;
+
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ r = ipu3_v4l2_node_setup(imgu, pipe, i);
+ if (r)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ ipu3_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
+ return r;
+}
+
+static void ipu3_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
+{
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i];
+
+ v4l2_device_unregister_subdev(&imgu_pipe->imgu_sd.subdev);
+ v4l2_ctrl_handler_free(imgu_pipe->imgu_sd.subdev.ctrl_handler);
+ media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity);
+}
+
+static void ipu3_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe; i++) {
+ ipu3_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
+ ipu3_v4l2_subdev_cleanup(imgu, i);
+ }
+}
+
+static int imgu_v4l2_register_pipes(struct imgu_device *imgu)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ int i, r;
+
+ for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) {
+ imgu_pipe = &imgu->imgu_pipe[i];
+ r = ipu3_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register subdev%d ret (%d)\n", i, r);
+ goto pipes_cleanup;
+ }
+ r = ipu3_v4l2_nodes_setup_pipe(imgu, i);
+ if (r) {
+ ipu3_v4l2_subdev_cleanup(imgu, i);
+ goto pipes_cleanup;
+ }
+ }
+
+ return 0;
+
+pipes_cleanup:
+ ipu3_v4l2_cleanup_pipes(imgu, i);
+ return r;
+}
+
+int imgu_v4l2_register(struct imgu_device *imgu)
+{
+ int r;
+
+ /* Initialize miscellaneous variables */
+ imgu->streaming = false;
+
+ /* Set up media device */
+ media_device_pci_init(&imgu->media_dev, imgu->pci_dev, IMGU_NAME);
+
+ /* Set up v4l2 device */
+ imgu->v4l2_dev.mdev = &imgu->media_dev;
+ imgu->v4l2_dev.ctrl_handler = NULL;
+ r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register V4L2 device (%d)\n", r);
+ goto fail_v4l2_dev;
+ }
+
+ r = imgu_v4l2_register_pipes(imgu);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register pipes (%d)\n", r);
+ goto fail_v4l2_pipes;
+ }
+
+ r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register subdevs (%d)\n", r);
+ goto fail_subdevs;
+ }
+
+ r = media_device_register(&imgu->media_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register media device (%d)\n", r);
+ goto fail_subdevs;
+ }
+
+ return 0;
+
+fail_subdevs:
+ ipu3_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+fail_v4l2_pipes:
+ v4l2_device_unregister(&imgu->v4l2_dev);
+fail_v4l2_dev:
+ media_device_cleanup(&imgu->media_dev);
+
+ return r;
+}
+
+int imgu_v4l2_unregister(struct imgu_device *imgu)
+{
+ media_device_unregister(&imgu->media_dev);
+ ipu3_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+ v4l2_device_unregister(&imgu->v4l2_dev);
+ media_device_cleanup(&imgu->media_dev);
+
+ return 0;
+}
+
+void imgu_v4l2_buffer_done(struct vb2_buffer *vb,
+ enum vb2_buffer_state state)
+{
+ struct ipu3_vb2_buffer *b =
+ container_of(vb, struct ipu3_vb2_buffer, vbb.vb2_buf);
+
+ list_del(&b->list);
+ vb2_buffer_done(&b->vbb.vb2_buf, state);
+}
diff --git a/drivers/staging/media/ipu3/ipu3.c b/drivers/staging/media/ipu3/ipu3.c
new file mode 100644
index 000000000000..d521b3afb8b1
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3.c
@@ -0,0 +1,830 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 - 2018 Intel Corporation
+ * Copyright 2017 Google LLC
+ *
+ * Based on Intel IPU4 driver.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include "ipu3.h"
+#include "ipu3-dmamap.h"
+#include "ipu3-mmu.h"
+
+#define IMGU_PCI_ID 0x1919
+#define IMGU_PCI_BAR 0
+#define IMGU_DMA_MASK DMA_BIT_MASK(39)
+#define IMGU_MAX_QUEUE_DEPTH (2 + 2)
+
+/*
+ * pre-allocated buffer size for IMGU dummy buffers. Those
+ * values should be tuned to big enough to avoid buffer
+ * re-allocation when streaming to lower streaming latency.
+ */
+#define CSS_QUEUE_IN_BUF_SIZE 0
+#define CSS_QUEUE_PARAMS_BUF_SIZE 0
+#define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8)
+#define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8)
+#define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a)
+
+static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
+ [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
+ [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE,
+ [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE,
+ [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE,
+ [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE,
+};
+
+static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
+ [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"},
+ [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
+ [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
+ [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
+ [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
+};
+
+unsigned int imgu_node_to_queue(unsigned int node)
+{
+ return imgu_node_map[node].css_queue;
+}
+
+unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
+{
+ unsigned int i;
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ if (imgu_node_map[i].css_queue == css_queue)
+ break;
+
+ return i;
+}
+
+/**************** Dummy buffers ****************/
+
+static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++)
+ ipu3_dmamap_free(imgu,
+ &imgu_pipe->queues[i].dmap);
+}
+
+static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
+ unsigned int pipe)
+{
+ unsigned int i;
+ size_t size;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ size = css_queue_buf_size_map[i];
+ /*
+ * Do not enable dummy buffers for master queue,
+ * always require that real buffers from user are
+ * available.
+ */
+ if (i == IMGU_QUEUE_MASTER || size == 0)
+ continue;
+
+ if (!ipu3_dmamap_alloc(imgu,
+ &imgu_pipe->queues[i].dmap, size)) {
+ imgu_dummybufs_cleanup(imgu, pipe);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
+{
+ const struct v4l2_pix_format_mplane *mpix;
+ const struct v4l2_meta_format *meta;
+ unsigned int i, k, node;
+ size_t size;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* Allocate a dummy buffer for each queue where buffer is optional */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ node = imgu_map_node(imgu, i);
+ if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
+ continue;
+
+ if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
+ i == IPU3_CSS_QUEUE_VF)
+ /*
+ * Do not enable dummy buffers for VF if it is not
+ * requested by the user.
+ */
+ continue;
+
+ meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
+ mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
+
+ if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
+ size = meta->buffersize;
+ else
+ size = mpix->plane_fmt[0].sizeimage;
+
+ if (ipu3_css_dma_buffer_resize(imgu,
+ &imgu_pipe->queues[i].dmap,
+ size)) {
+ imgu_dummybufs_cleanup(imgu, pipe);
+ return -ENOMEM;
+ }
+
+ for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
+ ipu3_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
+ imgu_pipe->queues[i].dmap.daddr);
+ }
+
+ return 0;
+}
+
+/* May be called from atomic context */
+static struct ipu3_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
+ int queue, unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ /* dummybufs are not allocated for master q */
+ if (queue == IPU3_CSS_QUEUE_IN)
+ return NULL;
+
+ if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
+ /* Buffer should not be allocated here */
+ return NULL;
+
+ for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
+ if (ipu3_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
+ IPU3_CSS_BUFFER_QUEUED)
+ break;
+
+ if (i == IMGU_MAX_QUEUE_DEPTH)
+ return NULL;
+
+ ipu3_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
+ imgu_pipe->queues[queue].dmap.daddr);
+
+ return &imgu_pipe->queues[queue].dummybufs[i];
+}
+
+/* Check if given buffer is a dummy buffer */
+static bool imgu_dummybufs_check(struct imgu_device *imgu,
+ struct ipu3_css_buffer *buf,
+ unsigned int pipe)
+{
+ unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
+ if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
+ break;
+
+ return i < IMGU_MAX_QUEUE_DEPTH;
+}
+
+static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
+ enum vb2_buffer_state state)
+{
+ mutex_lock(&imgu->lock);
+ imgu_v4l2_buffer_done(vb, state);
+ mutex_unlock(&imgu->lock);
+}
+
+static struct ipu3_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
+ unsigned int node,
+ unsigned int pipe)
+{
+ struct imgu_buffer *buf;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ if (WARN_ON(node >= IMGU_NODE_NUM))
+ return NULL;
+
+ /* Find first free buffer from the node */
+ list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
+ if (ipu3_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
+ return &buf->css_buf;
+ }
+
+ /* There were no free buffers, try to return a dummy buffer */
+ return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
+}
+
+/*
+ * Queue as many buffers to CSS as possible. If all buffers don't fit into
+ * CSS buffer queues, they remain unqueued and will be queued later.
+ */
+int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
+{
+ unsigned int node;
+ int r = 0;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ if (!ipu3_css_is_streaming(&imgu->css))
+ return 0;
+
+ dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
+ mutex_lock(&imgu->lock);
+
+ /* Buffer set is queued to FW only when input buffer is ready */
+ for (node = IMGU_NODE_NUM - 1;
+ imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
+ node = node ? node - 1 : IMGU_NODE_NUM - 1) {
+
+ if (node == IMGU_NODE_VF &&
+ !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
+ dev_warn(&imgu->pci_dev->dev,
+ "Vf not enabled, ignore queue");
+ continue;
+ } else if (imgu_pipe->queue_enabled[node]) {
+ struct ipu3_css_buffer *buf =
+ imgu_queue_getbuf(imgu, node, pipe);
+ struct imgu_buffer *ibuf = NULL;
+ bool dummy;
+
+ if (!buf)
+ break;
+
+ r = ipu3_css_buf_queue(&imgu->css, pipe, buf);
+ if (r)
+ break;
+ dummy = imgu_dummybufs_check(imgu, buf, pipe);
+ if (!dummy)
+ ibuf = container_of(buf, struct imgu_buffer,
+ css_buf);
+ dev_dbg(&imgu->pci_dev->dev,
+ "queue %s %s buffer %u to css da: 0x%08x\n",
+ dummy ? "dummy" : "user",
+ imgu_node_map[node].name,
+ dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
+ (u32)buf->daddr);
+ }
+ }
+ mutex_unlock(&imgu->lock);
+
+ if (r && r != -EBUSY)
+ goto failed;
+
+ return 0;
+
+failed:
+ /*
+ * On error, mark all buffers as failed which are not
+ * yet queued to CSS
+ */
+ dev_err(&imgu->pci_dev->dev,
+ "failed to queue buffer to CSS on queue %i (%d)\n",
+ node, r);
+
+ if (initial)
+ /* If we were called from streamon(), no need to finish bufs */
+ return r;
+
+ for (node = 0; node < IMGU_NODE_NUM; node++) {
+ struct imgu_buffer *buf, *buf0;
+
+ if (!imgu_pipe->queue_enabled[node])
+ continue; /* Skip disabled queues */
+
+ mutex_lock(&imgu->lock);
+ list_for_each_entry_safe(buf, buf0,
+ &imgu_pipe->nodes[node].buffers,
+ vid_buf.list) {
+ if (ipu3_css_buf_state(&buf->css_buf) ==
+ IPU3_CSS_BUFFER_QUEUED)
+ continue; /* Was already queued, skip */
+
+ imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
+ VB2_BUF_STATE_ERROR);
+ }
+ mutex_unlock(&imgu->lock);
+ }
+
+ return r;
+}
+
+static int imgu_powerup(struct imgu_device *imgu)
+{
+ int r;
+
+ r = ipu3_css_set_powerup(&imgu->pci_dev->dev, imgu->base);
+ if (r)
+ return r;
+
+ ipu3_mmu_resume(imgu->mmu);
+ return 0;
+}
+
+static void imgu_powerdown(struct imgu_device *imgu)
+{
+ ipu3_mmu_suspend(imgu->mmu);
+ ipu3_css_set_powerdown(&imgu->pci_dev->dev, imgu->base);
+}
+
+int imgu_s_stream(struct imgu_device *imgu, int enable)
+{
+ struct device *dev = &imgu->pci_dev->dev;
+ int r, pipe;
+
+ if (!enable) {
+ /* Stop streaming */
+ dev_dbg(dev, "stream off\n");
+ /* Block new buffers to be queued to CSS. */
+ atomic_set(&imgu->qbuf_barrier, 1);
+ ipu3_css_stop_streaming(&imgu->css);
+ synchronize_irq(imgu->pci_dev->irq);
+ atomic_set(&imgu->qbuf_barrier, 0);
+ imgu_powerdown(imgu);
+ pm_runtime_put(&imgu->pci_dev->dev);
+
+ return 0;
+ }
+
+ /* Set Power */
+ r = pm_runtime_get_sync(dev);
+ if (r < 0) {
+ dev_err(dev, "failed to set imgu power\n");
+ pm_runtime_put(dev);
+ return r;
+ }
+
+ r = imgu_powerup(imgu);
+ if (r) {
+ dev_err(dev, "failed to power up imgu\n");
+ pm_runtime_put(dev);
+ return r;
+ }
+
+ /* Start CSS streaming */
+ r = ipu3_css_start_streaming(&imgu->css);
+ if (r) {
+ dev_err(dev, "failed to start css streaming (%d)", r);
+ goto fail_start_streaming;
+ }
+
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ /* Initialize dummy buffers */
+ r = imgu_dummybufs_init(imgu, pipe);
+ if (r) {
+ dev_err(dev, "failed to initialize dummy buffers (%d)", r);
+ goto fail_dummybufs;
+ }
+
+ /* Queue as many buffers from queue as possible */
+ r = imgu_queue_buffers(imgu, true, pipe);
+ if (r) {
+ dev_err(dev, "failed to queue initial buffers (%d)", r);
+ goto fail_queueing;
+ }
+ }
+
+ return 0;
+fail_queueing:
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
+ imgu_dummybufs_cleanup(imgu, pipe);
+fail_dummybufs:
+ ipu3_css_stop_streaming(&imgu->css);
+fail_start_streaming:
+ pm_runtime_put(dev);
+
+ return r;
+}
+
+static int imgu_video_nodes_init(struct imgu_device *imgu)
+{
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct imgu_media_pipe *imgu_pipe;
+ unsigned int i, j;
+ int r;
+
+ imgu->buf_struct_size = sizeof(struct imgu_buffer);
+
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
+ imgu_pipe = &imgu->imgu_pipe[j];
+
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_pipe->nodes[i].name = imgu_node_map[i].name;
+ imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
+ imgu_pipe->nodes[i].enabled = false;
+
+ if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
+ fmts[imgu_node_map[i].css_queue] =
+ &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
+ atomic_set(&imgu_pipe->nodes[i].sequence, 0);
+ }
+ }
+
+ r = imgu_v4l2_register(imgu);
+ if (r)
+ return r;
+
+ /* Set initial formats and initialize formats of video nodes */
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
+ imgu_pipe = &imgu->imgu_pipe[j];
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
+ ipu3_css_fmt_set(&imgu->css, fmts, rects, j);
+
+ /* Pre-allocate dummy buffers */
+ r = imgu_dummybufs_preallocate(imgu, j);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to pre-allocate dummy buffers (%d)", r);
+ goto out_cleanup;
+ }
+ }
+
+ return 0;
+
+out_cleanup:
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++)
+ imgu_dummybufs_cleanup(imgu, j);
+
+ imgu_v4l2_unregister(imgu);
+
+ return r;
+}
+
+static void imgu_video_nodes_exit(struct imgu_device *imgu)
+{
+ int i;
+
+ for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
+ imgu_dummybufs_cleanup(imgu, i);
+
+ imgu_v4l2_unregister(imgu);
+}
+
+/**************** PCI interface ****************/
+
+static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
+{
+ struct imgu_device *imgu = imgu_ptr;
+ struct imgu_media_pipe *imgu_pipe;
+ int p;
+
+ /* Dequeue / queue buffers */
+ do {
+ u64 ns = ktime_get_ns();
+ struct ipu3_css_buffer *b;
+ struct imgu_buffer *buf = NULL;
+ unsigned int node, pipe;
+ bool dummy;
+
+ do {
+ mutex_lock(&imgu->lock);
+ b = ipu3_css_buf_dequeue(&imgu->css);
+ mutex_unlock(&imgu->lock);
+ } while (PTR_ERR(b) == -EAGAIN);
+
+ if (IS_ERR_OR_NULL(b)) {
+ if (!b || PTR_ERR(b) == -EBUSY) /* All done */
+ break;
+ dev_err(&imgu->pci_dev->dev,
+ "failed to dequeue buffers (%ld)\n",
+ PTR_ERR(b));
+ break;
+ }
+
+ node = imgu_map_node(imgu, b->queue);
+ pipe = b->pipe;
+ dummy = imgu_dummybufs_check(imgu, b, pipe);
+ if (!dummy)
+ buf = container_of(b, struct imgu_buffer, css_buf);
+ dev_dbg(&imgu->pci_dev->dev,
+ "dequeue %s %s buffer %d daddr 0x%x from css\n",
+ dummy ? "dummy" : "user",
+ imgu_node_map[node].name,
+ dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
+ (u32)b->daddr);
+
+ if (dummy)
+ /* It was a dummy buffer, skip it */
+ continue;
+
+ /* Fill vb2 buffer entries and tell it's ready */
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (!imgu_pipe->nodes[node].output) {
+ buf->vid_buf.vbb.vb2_buf.timestamp = ns;
+ buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
+ buf->vid_buf.vbb.sequence =
+ atomic_inc_return(
+ &imgu_pipe->nodes[node].sequence);
+ dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
+ buf->vid_buf.vbb.sequence);
+ }
+ imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
+ ipu3_css_buf_state(&buf->css_buf) ==
+ IPU3_CSS_BUFFER_DONE ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR);
+ mutex_lock(&imgu->lock);
+ if (ipu3_css_queue_empty(&imgu->css))
+ wake_up_all(&imgu->buf_drain_wq);
+ mutex_unlock(&imgu->lock);
+ } while (1);
+
+ /*
+ * Try to queue more buffers for CSS.
+ * qbuf_barrier is used to disable new buffers
+ * to be queued to CSS.
+ */
+ if (!atomic_read(&imgu->qbuf_barrier))
+ for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
+ imgu_queue_buffers(imgu, false, p);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t imgu_isr(int irq, void *imgu_ptr)
+{
+ struct imgu_device *imgu = imgu_ptr;
+
+ /* acknowledge interruption */
+ if (ipu3_css_irq_ack(&imgu->css) < 0)
+ return IRQ_NONE;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int imgu_pci_config_setup(struct pci_dev *dev)
+{
+ u16 pci_command;
+ int r = pci_enable_msi(dev);
+
+ if (r) {
+ dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
+ return r;
+ }
+
+ pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+ pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(dev, PCI_COMMAND, pci_command);
+
+ return 0;
+}
+
+static int imgu_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ struct imgu_device *imgu;
+ phys_addr_t phys;
+ unsigned long phys_len;
+ void __iomem *const *iomap;
+ int r;
+
+ imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL);
+ if (!imgu)
+ return -ENOMEM;
+
+ imgu->pci_dev = pci_dev;
+
+ r = pcim_enable_device(pci_dev);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
+ return r;
+ }
+
+ dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
+ pci_dev->device, pci_dev->revision);
+
+ phys = pci_resource_start(pci_dev, IMGU_PCI_BAR);
+ phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR);
+
+ r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev));
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
+ return r;
+ }
+ dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n",
+ &phys, phys_len);
+
+ iomap = pcim_iomap_table(pci_dev);
+ if (!iomap) {
+ dev_err(&pci_dev->dev, "failed to iomap table\n");
+ return -ENODEV;
+ }
+
+ imgu->base = iomap[IMGU_PCI_BAR];
+
+ pci_set_drvdata(pci_dev, imgu);
+
+ pci_set_master(pci_dev);
+
+ r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
+ return -ENODEV;
+ }
+
+ r = imgu_pci_config_setup(pci_dev);
+ if (r)
+ return r;
+
+ mutex_init(&imgu->lock);
+ atomic_set(&imgu->qbuf_barrier, 0);
+ init_waitqueue_head(&imgu->buf_drain_wq);
+
+ r = ipu3_css_set_powerup(&pci_dev->dev, imgu->base);
+ if (r) {
+ dev_err(&pci_dev->dev,
+ "failed to power up CSS (%d)\n", r);
+ goto out_mutex_destroy;
+ }
+
+ imgu->mmu = ipu3_mmu_init(&pci_dev->dev, imgu->base);
+ if (IS_ERR(imgu->mmu)) {
+ r = PTR_ERR(imgu->mmu);
+ dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r);
+ goto out_css_powerdown;
+ }
+
+ r = ipu3_dmamap_init(imgu);
+ if (r) {
+ dev_err(&pci_dev->dev,
+ "failed to initialize DMA mapping (%d)\n", r);
+ goto out_mmu_exit;
+ }
+
+ /* ISP programming */
+ r = ipu3_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r);
+ goto out_dmamap_exit;
+ }
+
+ /* v4l2 sub-device registration */
+ r = imgu_video_nodes_init(imgu);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n",
+ r);
+ goto out_css_cleanup;
+ }
+
+ r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq,
+ imgu_isr, imgu_isr_threaded,
+ IRQF_SHARED, IMGU_NAME, imgu);
+ if (r) {
+ dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
+ goto out_video_exit;
+ }
+
+ pm_runtime_put_noidle(&pci_dev->dev);
+ pm_runtime_allow(&pci_dev->dev);
+
+ return 0;
+
+out_video_exit:
+ imgu_video_nodes_exit(imgu);
+out_css_cleanup:
+ ipu3_css_cleanup(&imgu->css);
+out_dmamap_exit:
+ ipu3_dmamap_exit(imgu);
+out_mmu_exit:
+ ipu3_mmu_exit(imgu->mmu);
+out_css_powerdown:
+ ipu3_css_set_powerdown(&pci_dev->dev, imgu->base);
+out_mutex_destroy:
+ mutex_destroy(&imgu->lock);
+
+ return r;
+}
+
+static void imgu_pci_remove(struct pci_dev *pci_dev)
+{
+ struct imgu_device *imgu = pci_get_drvdata(pci_dev);
+
+ pm_runtime_forbid(&pci_dev->dev);
+ pm_runtime_get_noresume(&pci_dev->dev);
+
+ imgu_video_nodes_exit(imgu);
+ ipu3_css_cleanup(&imgu->css);
+ ipu3_css_set_powerdown(&pci_dev->dev, imgu->base);
+ ipu3_dmamap_exit(imgu);
+ ipu3_mmu_exit(imgu->mmu);
+ mutex_destroy(&imgu->lock);
+}
+
+static int __maybe_unused imgu_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct imgu_device *imgu = pci_get_drvdata(pci_dev);
+
+ dev_dbg(dev, "enter %s\n", __func__);
+ imgu->suspend_in_stream = ipu3_css_is_streaming(&imgu->css);
+ if (!imgu->suspend_in_stream)
+ goto out;
+ /* Block new buffers to be queued to CSS. */
+ atomic_set(&imgu->qbuf_barrier, 1);
+ /*
+ * Wait for currently running irq handler to be done so that
+ * no new buffers will be queued to fw later.
+ */
+ synchronize_irq(pci_dev->irq);
+ /* Wait until all buffers in CSS are done. */
+ if (!wait_event_timeout(imgu->buf_drain_wq,
+ ipu3_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
+ dev_err(dev, "wait buffer drain timeout.\n");
+
+ ipu3_css_stop_streaming(&imgu->css);
+ atomic_set(&imgu->qbuf_barrier, 0);
+ imgu_powerdown(imgu);
+ pm_runtime_force_suspend(dev);
+out:
+ dev_dbg(dev, "leave %s\n", __func__);
+ return 0;
+}
+
+static int __maybe_unused imgu_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct imgu_device *imgu = pci_get_drvdata(pci_dev);
+ int r = 0;
+ unsigned int pipe;
+
+ dev_dbg(dev, "enter %s\n", __func__);
+
+ if (!imgu->suspend_in_stream)
+ goto out;
+
+ pm_runtime_force_resume(dev);
+
+ r = imgu_powerup(imgu);
+ if (r) {
+ dev_err(dev, "failed to power up imgu\n");
+ goto out;
+ }
+
+ /* Start CSS streaming */
+ r = ipu3_css_start_streaming(&imgu->css);
+ if (r) {
+ dev_err(dev, "failed to resume css streaming (%d)", r);
+ goto out;
+ }
+
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_queue_buffers(imgu, true, pipe);
+ if (r)
+ dev_err(dev, "failed to queue buffers to pipe %d (%d)",
+ pipe, r);
+ }
+
+out:
+ dev_dbg(dev, "leave %s\n", __func__);
+
+ return r;
+}
+
+/*
+ * PCI rpm framework checks the existence of driver rpm callbacks.
+ * Place a dummy callback here to avoid rpm going into error state.
+ */
+static int imgu_rpm_dummy_cb(struct device *dev)
+{
+ return 0;
+}
+
+static const struct dev_pm_ops imgu_pm_ops = {
+ SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume)
+};
+
+static const struct pci_device_id imgu_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) },
+ { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, imgu_pci_tbl);
+
+static struct pci_driver imgu_pci_driver = {
+ .name = IMGU_NAME,
+ .id_table = imgu_pci_tbl,
+ .probe = imgu_pci_probe,
+ .remove = imgu_pci_remove,
+ .driver = {
+ .pm = &imgu_pm_ops,
+ },
+};
+
+module_pci_driver(imgu_pci_driver);
+
+MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
+MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
+MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
new file mode 100644
index 000000000000..04fc99f47ebb
--- /dev/null
+++ b/drivers/staging/media/ipu3/ipu3.h
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Intel Corporation */
+
+#ifndef __IPU3_H
+#define __IPU3_H
+
+#include <linux/iova.h>
+#include <linux/pci.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "ipu3-css.h"
+
+#define IMGU_NAME "ipu3-imgu"
+
+/*
+ * The semantics of the driver is that whenever there is a buffer available in
+ * master queue, the driver queues a buffer also to all other active nodes.
+ * If user space hasn't provided a buffer to all other video nodes first,
+ * the driver gets an internal dummy buffer and queues it.
+ */
+#define IMGU_QUEUE_MASTER IPU3_CSS_QUEUE_IN
+#define IMGU_QUEUE_FIRST_INPUT IPU3_CSS_QUEUE_OUT
+#define IMGU_MAX_QUEUE_DEPTH (2 + 2)
+
+#define IMGU_NODE_IN 0 /* Input RAW image */
+#define IMGU_NODE_PARAMS 1 /* Input parameters */
+#define IMGU_NODE_OUT 2 /* Main output for still or video */
+#define IMGU_NODE_VF 3 /* Preview */
+#define IMGU_NODE_STAT_3A 4 /* 3A statistics */
+#define IMGU_NODE_NUM 5
+
+#define file_to_intel_ipu3_node(__file) \
+ container_of(video_devdata(__file), struct imgu_video_device, vdev)
+
+#define IPU3_INPUT_MIN_WIDTH 0U
+#define IPU3_INPUT_MIN_HEIGHT 0U
+#define IPU3_INPUT_MAX_WIDTH 5120U
+#define IPU3_INPUT_MAX_HEIGHT 38404U
+#define IPU3_OUTPUT_MIN_WIDTH 2U
+#define IPU3_OUTPUT_MIN_HEIGHT 2U
+#define IPU3_OUTPUT_MAX_WIDTH 4480U
+#define IPU3_OUTPUT_MAX_HEIGHT 34004U
+
+struct ipu3_vb2_buffer {
+ /* Public fields */
+ struct vb2_v4l2_buffer vbb; /* Must be the first field */
+
+ /* Private fields */
+ struct list_head list;
+};
+
+struct imgu_buffer {
+ struct ipu3_vb2_buffer vid_buf; /* Must be the first field */
+ struct ipu3_css_buffer css_buf;
+ struct ipu3_css_map map;
+};
+
+struct imgu_node_mapping {
+ unsigned int css_queue;
+ const char *name;
+};
+
+/**
+ * struct imgu_video_device
+ * each node registers as video device and maintains its
+ * own vb2_queue.
+ */
+struct imgu_video_device {
+ const char *name;
+ bool output;
+ bool enabled;
+ struct v4l2_format vdev_fmt; /* Currently set format */
+
+ /* Private fields */
+ struct video_device vdev;
+ struct media_pad vdev_pad;
+ struct v4l2_mbus_framefmt pad_fmt;
+ struct vb2_queue vbq;
+ struct list_head buffers;
+ /* Protect vb2_queue and vdev structs*/
+ struct mutex lock;
+ atomic_t sequence;
+ unsigned int id;
+ unsigned int pipe;
+};
+
+struct imgu_v4l2_subdev {
+ unsigned int pipe;
+ struct v4l2_subdev subdev;
+ struct media_pad subdev_pads[IMGU_NODE_NUM];
+ struct {
+ struct v4l2_rect eff; /* effective resolution */
+ struct v4l2_rect bds; /* bayer-domain scaled resolution*/
+ struct v4l2_rect gdc; /* gdc output resolution */
+ } rect;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl;
+ atomic_t running_mode;
+ bool active;
+};
+
+struct imgu_media_pipe {
+ unsigned int pipe;
+
+ /* Internally enabled queues */
+ struct {
+ struct ipu3_css_map dmap;
+ struct ipu3_css_buffer dummybufs[IMGU_MAX_QUEUE_DEPTH];
+ } queues[IPU3_CSS_QUEUES];
+ struct imgu_video_device nodes[IMGU_NODE_NUM];
+ bool queue_enabled[IMGU_NODE_NUM];
+ struct media_pipeline pipeline;
+ struct imgu_v4l2_subdev imgu_sd;
+};
+
+/*
+ * imgu_device -- ImgU (Imaging Unit) driver
+ */
+struct imgu_device {
+ struct pci_dev *pci_dev;
+ void __iomem *base;
+
+ /* Public fields, fill before registering */
+ unsigned int buf_struct_size;
+ bool streaming; /* Public read only */
+
+ struct imgu_media_pipe imgu_pipe[IMGU_MAX_PIPE_NUM];
+
+ /* Private fields */
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+ struct v4l2_file_operations v4l2_file_ops;
+
+ /* MMU driver for css */
+ struct ipu3_mmu_info *mmu;
+ struct iova_domain iova_domain;
+
+ /* css - Camera Sub-System */
+ struct ipu3_css css;
+
+ /*
+ * Coarse-grained lock to protect
+ * vid_buf.list and css->queue
+ */
+ struct mutex lock;
+ /* Forbit streaming and buffer queuing during system suspend. */
+ atomic_t qbuf_barrier;
+ /* Indicate if system suspend take place while imgu is streaming. */
+ bool suspend_in_stream;
+ /* Used to wait for FW buffer queue drain. */
+ wait_queue_head_t buf_drain_wq;
+};
+
+unsigned int imgu_node_to_queue(unsigned int node);
+unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue);
+int imgu_queue_buffers(struct imgu_device *imgu, bool initial,
+ unsigned int pipe);
+
+int imgu_v4l2_register(struct imgu_device *dev);
+int imgu_v4l2_unregister(struct imgu_device *dev);
+void imgu_v4l2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
+
+int imgu_s_stream(struct imgu_device *imgu, int enable);
+
+#endif
diff --git a/drivers/staging/media/rockchip/vpu/Kconfig b/drivers/staging/media/rockchip/vpu/Kconfig
new file mode 100644
index 000000000000..9a6fc1378242
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/Kconfig
@@ -0,0 +1,13 @@
+config VIDEO_ROCKCHIP_VPU
+ tristate "Rockchip VPU driver"
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ default n
+ help
+ Support for the Video Processing Unit present on Rockchip SoC,
+ which accelerates video and image encoding and decoding.
+ To compile this driver as a module, choose M here: the module
+ will be called rockchip-vpu.
diff --git a/drivers/staging/media/rockchip/vpu/Makefile b/drivers/staging/media/rockchip/vpu/Makefile
new file mode 100644
index 000000000000..e9d733bb7632
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/Makefile
@@ -0,0 +1,10 @@
+obj-$(CONFIG_VIDEO_ROCKCHIP_VPU) += rockchip-vpu.o
+
+rockchip-vpu-y += \
+ rockchip_vpu_drv.o \
+ rockchip_vpu_enc.o \
+ rk3288_vpu_hw.o \
+ rk3288_vpu_hw_jpeg_enc.o \
+ rk3399_vpu_hw.o \
+ rk3399_vpu_hw_jpeg_enc.o \
+ rockchip_vpu_jpeg.o
diff --git a/drivers/staging/media/rockchip/vpu/TODO b/drivers/staging/media/rockchip/vpu/TODO
new file mode 100644
index 000000000000..fa0c94057007
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/TODO
@@ -0,0 +1,13 @@
+* Support for VP8, VP9 and H264 is planned for this driver.
+
+ Given the V4L controls for those CODECs will be part of
+ the uABI, it will be required to have the driver in staging.
+
+ For this reason, we are keeping this driver in staging for now.
+
+* Add support for the S_SELECTION API.
+ See the comment for VEPU_REG_ENC_OVER_FILL_STRM_OFFSET.
+
+* Instead of having a DMA bounce buffer, it could be possible to use a
+ normal buffer and memmove() the payload to make space for the header.
+ This might need to use extra JPEG markers for padding reasons.
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c
new file mode 100644
index 000000000000..a5e9d183fffd
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "rockchip_vpu.h"
+#include "rockchip_vpu_jpeg.h"
+#include "rk3288_vpu_regs.h"
+
+#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct rockchip_vpu_fmt rk3288_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = RK_VPU_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = JPEG_MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = JPEG_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rk3288_vepu_irq(int irq, void *dev_id)
+{
+ struct rockchip_vpu_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status, bytesused;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ state = (status & VEPU_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ rockchip_vpu_irq_done(vpu, bytesused, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3288_vpu_hw_init(struct rockchip_vpu_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3288_vpu_enc_reset(struct rockchip_vpu_ctx *ctx)
+{
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENC_CTRL);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct rockchip_vpu_codec_ops rk3288_vpu_codec_ops[] = {
+ [RK_VPU_MODE_JPEG_ENC] = {
+ .run = rk3288_vpu_jpeg_enc_run,
+ .reset = rk3288_vpu_enc_reset,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+const struct rockchip_vpu_variant rk3288_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rk3288_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rk3288_vpu_enc_fmts),
+ .codec_ops = rk3288_vpu_codec_ops,
+ .codec = RK_VPU_CODEC_JPEG,
+ .vepu_irq = rk3288_vepu_irq,
+ .init = rk3288_vpu_hw_init,
+ .clk_names = {"aclk", "hclk"},
+ .num_clocks = 2
+};
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
new file mode 100644
index 000000000000..5282236d1bb1
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "rockchip_vpu_jpeg.h"
+#include "rockchip_vpu.h"
+#include "rockchip_vpu_common.h"
+#include "rockchip_vpu_hw.h"
+#include "rk3288_vpu_regs.h"
+
+#define VEPU_JPEG_QUANT_TABLE_COUNT 16
+
+static void rk3288_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ u32 reg;
+
+ reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width)
+ | VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(0)
+ | VEPU_REG_IN_IMG_CTRL_OVRFLB_D4(0)
+ | VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_IN_IMG_CTRL);
+}
+
+static void rk3288_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, ctx->bounce_dma_addr,
+ VEPU_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, ctx->bounce_size,
+ VEPU_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ /* single plane formats we supported are all interlaced */
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+rk3288_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
+
+ reg = get_unaligned_be32(&chroma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
+{
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct rockchip_vpu_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, VEPU_REG_ENC_CTRL_ENC_MODE_JPEG,
+ VEPU_REG_ENC_CTRL);
+
+ rk3288_vpu_set_src_img_ctrl(vpu, ctx);
+ rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
+ rk3288_vpu_jpeg_enc_set_qtable(vpu,
+ rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
+ rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
+
+ reg = VEPU_REG_AXI_CTRL_OUTPUT_SWAP16
+ | VEPU_REG_AXI_CTRL_INPUT_SWAP16
+ | VEPU_REG_AXI_CTRL_BURST_LEN(16)
+ | VEPU_REG_AXI_CTRL_OUTPUT_SWAP32
+ | VEPU_REG_AXI_CTRL_INPUT_SWAP32
+ | VEPU_REG_AXI_CTRL_OUTPUT_SWAP8
+ | VEPU_REG_AXI_CTRL_INPUT_SWAP8;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, VEPU_REG_AXI_CTRL);
+
+ reg = VEPU_REG_ENC_CTRL_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_ENC_CTRL_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ | VEPU_REG_ENC_CTRL_ENC_MODE_JPEG
+ | VEPU_REG_ENC_PIC_INTRA
+ | VEPU_REG_ENC_CTRL_EN_BIT;
+ /* Kick the watchdog and start encoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ vepu_write(vpu, reg, VEPU_REG_ENC_CTRL);
+}
diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h b/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h
new file mode 100644
index 000000000000..9d0b9bdf3297
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rk3288_vpu_regs.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef RK3288_VPU_REGS_H_
+#define RK3288_VPU_REGS_H_
+
+/* Encoder registers. */
+#define VEPU_REG_INTERRUPT 0x004
+#define VEPU_REG_INTERRUPT_FRAME_RDY BIT(2)
+#define VEPU_REG_INTERRUPT_DIS_BIT BIT(1)
+#define VEPU_REG_INTERRUPT_BIT BIT(0)
+#define VEPU_REG_AXI_CTRL 0x008
+#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
+#define VEPU_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
+#define VEPU_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
+#define VEPU_REG_AXI_CTRL_GATE_BIT BIT(4)
+#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
+#define VEPU_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
+#define VEPU_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
+#define VEPU_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
+#define VEPU_REG_ADDR_OUTPUT_STREAM 0x014
+#define VEPU_REG_ADDR_OUTPUT_CTRL 0x018
+#define VEPU_REG_ADDR_REF_LUMA 0x01c
+#define VEPU_REG_ADDR_REF_CHROMA 0x020
+#define VEPU_REG_ADDR_REC_LUMA 0x024
+#define VEPU_REG_ADDR_REC_CHROMA 0x028
+#define VEPU_REG_ADDR_IN_PLANE_0 0x02c
+#define VEPU_REG_ADDR_IN_PLANE_1 0x030
+#define VEPU_REG_ADDR_IN_PLANE_2 0x034
+#define VEPU_REG_ENC_CTRL 0x038
+#define VEPU_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
+#define VEPU_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
+#define VEPU_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
+#define VEPU_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
+#define VEPU_REG_ENC_PIC_INTER (0x0 << 3)
+#define VEPU_REG_ENC_PIC_INTRA (0x1 << 3)
+#define VEPU_REG_ENC_PIC_MVCINTER (0x2 << 3)
+#define VEPU_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
+#define VEPU_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
+#define VEPU_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
+#define VEPU_REG_ENC_CTRL_EN_BIT BIT(0)
+#define VEPU_REG_IN_IMG_CTRL 0x03c
+#define VEPU_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLB_D4(x) ((x) << 6)
+#define VEPU_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
+#define VEPU_REG_ENC_CTRL0 0x040
+#define VEPU_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
+#define VEPU_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
+#define VEPU_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
+#define VEPU_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
+#define VEPU_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
+#define VEPU_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
+#define VEPU_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
+#define VEPU_REG_ENC_CTRL1 0x044
+#define VEPU_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
+#define VEPU_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
+#define VEPU_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
+#define VEPU_REG_ENC_CTRL2 0x048
+#define VEPU_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
+#define VEPU_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
+#define VEPU_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
+#define VEPU_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
+#define VEPU_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
+#define VEPU_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
+#define VEPU_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
+#define VEPU_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
+#define VEPU_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
+#define VEPU_REG_ENC_CTRL3 0x04c
+#define VEPU_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
+#define VEPU_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
+#define VEPU_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
+#define VEPU_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
+#define VEPU_REG_ENC_CTRL4 0x050
+#define VEPU_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
+#define VEPU_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
+#define VEPU_REG_ENC_CTRL4_8X4_4X8(x) ((x))
+#define VEPU_REG_ENC_CTRL5 0x054
+#define VEPU_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
+#define VEPU_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
+#define VEPU_REG_ENC_CTRL5_INTER_MODE(x) ((x))
+#define VEPU_REG_STR_HDR_REM_MSB 0x058
+#define VEPU_REG_STR_HDR_REM_LSB 0x05c
+#define VEPU_REG_STR_BUF_LIMIT 0x060
+#define VEPU_REG_MAD_CTRL 0x064
+#define VEPU_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
+#define VEPU_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
+#define VEPU_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
+#define VEPU_REG_ADDR_VP8_PROB_CNT 0x068
+#define VEPU_REG_QP_VAL 0x06c
+#define VEPU_REG_QP_VAL_LUM(x) ((x) << 26)
+#define VEPU_REG_QP_VAL_MAX(x) ((x) << 20)
+#define VEPU_REG_QP_VAL_MIN(x) ((x) << 14)
+#define VEPU_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
+#define VEPU_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
+#define VEPU_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
+#define VEPU_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
+ * (i & 1))) & 0xffff) \
+ * 32)
+#define VEPU_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_VP8_BOOL_ENC 0x08c
+#define VEPU_REG_CHKPT_DELTA_QP 0x090
+#define VEPU_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define VEPU_REG_VP8_CTRL0 0x090
+#define VEPU_REG_RLC_CTRL 0x094
+#define VEPU_REG_RLC_CTRL_STR_OFFS_SHIFT 23
+#define VEPU_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
+#define VEPU_REG_RLC_CTRL_RLC_SUM(x) ((x))
+#define VEPU_REG_MB_CTRL 0x098
+#define VEPU_REG_MB_CNT_OUT(x) (((x) & 0xffff))
+#define VEPU_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_ADDR_NEXT_PIC 0x09c
+#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
+#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
+#define VEPU_REG_STABILIZATION_OUTPUT 0x0A0
+#define VEPU_REG_ADDR_CABAC_TBL 0x0cc
+#define VEPU_REG_ADDR_MV_OUT 0x0d0
+#define VEPU_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
+#define VEPU_REG_RGB_MASK_MSB 0x0dc
+#define VEPU_REG_INTRA_AREA_CTRL 0x0e0
+#define VEPU_REG_CIR_INTRA_CTRL 0x0e4
+#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
+#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
+#define VEPU_REG_FIRST_ROI_AREA 0x0f0
+#define VEPU_REG_SECOND_ROI_AREA 0x0f4
+#define VEPU_REG_MVC_CTRL 0x0f8
+#define VEPU_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
+#define VEPU_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
+#define VEPU_REG_ADDR_VP8_SEG_MAP 0x11c
+#define VEPU_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
+#define VEPU_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
+#define VEPU_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define VEPU_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
+#define VEPU_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define VEPU_REG_VP8_CTRL1 0x280
+#define VEPU_REG_VP8_BIT_COST_GOLDEN 0x284
+#define VEPU_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
+
+/* Decoder registers. */
+#define VDPU_REG_INTERRUPT 0x004
+#define VDPU_REG_INTERRUPT_DEC_PIC_INF BIT(24)
+#define VDPU_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
+#define VDPU_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
+#define VDPU_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
+#define VDPU_REG_INTERRUPT_DEC_ASO_INT BIT(15)
+#define VDPU_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
+#define VDPU_REG_INTERRUPT_DEC_BUS_INT BIT(13)
+#define VDPU_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define VDPU_REG_INTERRUPT_DEC_IRQ BIT(8)
+#define VDPU_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define VDPU_REG_INTERRUPT_DEC_E BIT(0)
+#define VDPU_REG_CONFIG 0x008
+#define VDPU_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(20)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
+#define VDPU_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
+#define VDPU_REG_CONFIG_TILED_MODE_MSB BIT(17)
+#define VDPU_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
+#define VDPU_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
+#define VDPU_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
+#define VDPU_REG_CONFIG_TILED_MODE_LSB BIT(7)
+#define VDPU_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
+#define VDPU_REG_CONFIG_DEC_SCMD_DIS BIT(5)
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_DEC_CTRL0 0x00c
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
+#define VDPU_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
+#define VDPU_REG_DEC_CTRL0_SKIP_MODE BIT(26)
+#define VDPU_REG_DEC_CTRL0_DIVX3_E BIT(25)
+#define VDPU_REG_DEC_CTRL0_PJPEG_E BIT(24)
+#define VDPU_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
+#define VDPU_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
+#define VDPU_REG_DEC_CTRL0_PIC_B_E BIT(21)
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
+#define VDPU_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
+#define VDPU_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
+#define VDPU_REG_DEC_CTRL0_SORENSON_E BIT(17)
+#define VDPU_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
+#define VDPU_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
+#define VDPU_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
+#define VDPU_REG_DEC_CTRL0_WEBP_E BIT(13)
+#define VDPU_REG_DEC_CTRL0_MVC_E BIT(13)
+#define VDPU_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
+#define VDPU_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
+#define VDPU_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
+#define VDPU_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
+#define VDPU_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
+#define VDPU_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
+#define VDPU_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_CTRL1 0x010
+#define VDPU_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define VDPU_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define VDPU_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define VDPU_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
+#define VDPU_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
+#define VDPU_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define VDPU_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
+#define VDPU_REG_DEC_CTRL2 0x014
+#define VDPU_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
+#define VDPU_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define VDPU_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
+#define VDPU_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
+#define VDPU_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
+#define VDPU_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
+#define VDPU_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
+#define VDPU_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
+#define VDPU_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
+#define VDPU_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
+#define VDPU_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
+#define VDPU_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
+#define VDPU_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
+#define VDPU_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
+#define VDPU_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
+#define VDPU_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
+#define VDPU_REG_DEC_CTRL2_CON_MV_E BIT(4)
+#define VDPU_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
+#define VDPU_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
+#define VDPU_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
+#define VDPU_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
+#define VDPU_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
+#define VDPU_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
+#define VDPU_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
+#define VDPU_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
+#define VDPU_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
+#define VDPU_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
+#define VDPU_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
+#define VDPU_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
+#define VDPU_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
+#define VDPU_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
+#define VDPU_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_DEC_CTRL3 0x018
+#define VDPU_REG_DEC_CTRL3_START_CODE_E BIT(31)
+#define VDPU_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define VDPU_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
+#define VDPU_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
+#define VDPU_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_DEC_CTRL4 0x01c
+#define VDPU_REG_DEC_CTRL4_CABAC_E BIT(31)
+#define VDPU_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
+#define VDPU_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
+#define VDPU_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
+#define VDPU_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
+#define VDPU_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
+#define VDPU_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
+#define VDPU_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
+#define VDPU_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
+#define VDPU_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
+#define VDPU_REG_DEC_CTRL4_TTMBF BIT(19)
+#define VDPU_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_DEC_CTRL4_UNIQP_E BIT(11)
+#define VDPU_REG_DEC_CTRL4_HALFQP_E BIT(10)
+#define VDPU_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
+#define VDPU_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
+#define VDPU_REG_DEC_CTRL4_DQUANT_E BIT(6)
+#define VDPU_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
+#define VDPU_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
+#define VDPU_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
+#define VDPU_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
+#define VDPU_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
+#define VDPU_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
+#define VDPU_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
+#define VDPU_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define VDPU_REG_DEC_CTRL4_CH_MV_RES BIT(13)
+#define VDPU_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
+#define VDPU_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
+#define VDPU_REG_DEC_CTRL4_VP7_VERSION BIT(5)
+#define VDPU_REG_DEC_CTRL5 0x020
+#define VDPU_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
+#define VDPU_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
+#define VDPU_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
+#define VDPU_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
+#define VDPU_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
+#define VDPU_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
+#define VDPU_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
+#define VDPU_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
+#define VDPU_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
+#define VDPU_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
+#define VDPU_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
+#define VDPU_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
+#define VDPU_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
+#define VDPU_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
+#define VDPU_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
+#define VDPU_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
+#define VDPU_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL6 0x024
+#define VDPU_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define VDPU_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define VDPU_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_CTRL6_ICOMP0_E BIT(24)
+#define VDPU_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
+#define VDPU_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
+#define VDPU_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define VDPU_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_FWD_PIC1_ICOMP1_E BIT(24)
+#define VDPU_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
+#define VDPU_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_DEC_CTRL7 0x02c
+#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_DEC_CTRL7_ICOMP2_E BIT(24)
+#define VDPU_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
+#define VDPU_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define VDPU_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_ADDR_STR 0x030
+#define VDPU_REG_ADDR_DST 0x034
+#define VDPU_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
+#define VDPU_REG_ADDR_REF_FIELD_E BIT(1)
+#define VDPU_REG_ADDR_REF_TOPC_E BIT(0)
+#define VDPU_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define VDPU_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_LT_REF 0x098
+#define VDPU_REG_VALID_REF 0x09c
+#define VDPU_REG_ADDR_QTABLE 0x0a0
+#define VDPU_REG_ADDR_DIR_MV 0x0a4
+#define VDPU_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define VDPU_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_BD_P_REF_PIC 0x0bc
+#define VDPU_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_ERR_CONC 0x0c0
+#define VDPU_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
+#define VDPU_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
+#define VDPU_REG_PRED_FLT 0x0c4
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_REF_BUF_CTRL 0x0cc
+#define VDPU_REG_REF_BUF_CTRL_REFBU_E BIT(31)
+#define VDPU_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
+#define VDPU_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
+#define VDPU_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
+#define VDPU_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
+#define VDPU_REG_REF_BUF_CTRL2 0x0dc
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
+
+#endif /* RK3288_VPU_REGS_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c
new file mode 100644
index 000000000000..6fdef61e2127
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "rockchip_vpu.h"
+#include "rockchip_vpu_jpeg.h"
+#include "rk3399_vpu_regs.h"
+
+#define RK3399_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct rockchip_vpu_fmt rk3399_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = RK_VPU_MODE_NONE,
+ .enc_fmt = RK3288_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = RK_VPU_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = JPEG_MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = JPEG_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rk3399_vepu_irq(int irq, void *dev_id)
+{
+ struct rockchip_vpu_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status, bytesused;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ rockchip_vpu_irq_done(vpu, bytesused, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3399_vpu_hw_init(struct rockchip_vpu_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3399_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3399_vpu_enc_reset(struct rockchip_vpu_ctx *ctx)
+{
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct rockchip_vpu_codec_ops rk3399_vpu_codec_ops[] = {
+ [RK_VPU_MODE_JPEG_ENC] = {
+ .run = rk3399_vpu_jpeg_enc_run,
+ .reset = rk3399_vpu_enc_reset,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+const struct rockchip_vpu_variant rk3399_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rk3399_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rk3399_vpu_enc_fmts),
+ .codec = RK_VPU_CODEC_JPEG,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .vepu_irq = rk3399_vepu_irq,
+ .init = rk3399_vpu_hw_init,
+ .clk_names = {"aclk", "hclk"},
+ .num_clocks = 2
+};
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
new file mode 100644
index 000000000000..dbc86d95fe3b
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ *
+ * JPEG encoder
+ * ------------
+ * The VPU JPEG encoder produces JPEG baseline sequential format.
+ * The quantization coefficients are 8-bit values, complying with
+ * the baseline specification. Therefore, it requires
+ * luma and chroma quantization tables. The hardware does entropy
+ * encoding using internal Huffman tables, as specified in the JPEG
+ * specification.
+ *
+ * In other words, only the luma and chroma quantization tables are
+ * required for the encoding operation.
+ *
+ * Quantization luma table values are written to registers
+ * VEPU_swreg_0-VEPU_swreg_15, and chroma table values to
+ * VEPU_swreg_16-VEPU_swreg_31.
+ *
+ * JPEG zigzag order is expected on the quantization tables.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "rockchip_vpu_jpeg.h"
+#include "rockchip_vpu.h"
+#include "rockchip_vpu_common.h"
+#include "rockchip_vpu_hw.h"
+#include "rk3399_vpu_regs.h"
+
+#define VEPU_JPEG_QUANT_TABLE_COUNT 16
+
+static void rk3399_vpu_set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ u32 reg;
+
+ /*
+ * The pix fmt width/height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback
+ */
+ reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_INPUT_LUMA_INFO);
+
+ reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(0) |
+ VEPU_REG_IN_IMG_CTRL_OVRFLB(0);
+ /*
+ * This register controls the input crop, as the offset
+ * from the right/bottom within the last macroblock. The offset from the
+ * right must be divided by 4 and so the crop must be aligned to 4 pixels
+ * horizontally.
+ */
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_OVER_FILL_STRM_OFFSET);
+
+ reg = VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
+}
+
+static void rk3399_vpu_jpeg_enc_set_buffers(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, ctx->bounce_dma_addr,
+ VEPU_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, ctx->bounce_size,
+ VEPU_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
+
+ reg = get_unaligned_be32(&chroma_qtable[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
+{
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct rockchip_vpu_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ rockchip_vpu_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
+ VEPU_REG_ENCODE_START);
+
+ rk3399_vpu_set_src_img_ctrl(vpu, ctx);
+ rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
+ rk3399_vpu_jpeg_enc_set_qtable(vpu,
+ rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
+ rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
+
+ reg = VEPU_REG_OUTPUT_SWAP32
+ | VEPU_REG_OUTPUT_SWAP16
+ | VEPU_REG_OUTPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP16
+ | VEPU_REG_INPUT_SWAP32;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);
+
+ reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
+
+ reg = VEPU_REG_MB_WIDTH(JPEG_MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(JPEG_MB_HEIGHT(ctx->src_fmt.height))
+ | VEPU_REG_FRAME_TYPE_INTRA
+ | VEPU_REG_ENCODE_FORMAT_JPEG
+ | VEPU_REG_ENCODE_ENABLE;
+
+ /* Kick the watchdog and start encoding */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
+}
diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h b/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h
new file mode 100644
index 000000000000..fbe294177ec9
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rk3399_vpu_regs.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <alpha.lin@rock-chips.com>
+ */
+
+#ifndef RK3399_VPU_REGS_H_
+#define RK3399_VPU_REGS_H_
+
+/* Encoder registers. */
+#define VEPU_REG_VP8_QUT_1ST(i) (0x000 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_DC_Y2(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_DC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_2ND(i) (0x004 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_AC_Y1(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_DC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_3RD(i) (0x008 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_AC_CHR(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_AC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_4TH(i) (0x00c + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_ZB_DC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_QUT_ZB_DC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_QUT_ZB_DC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_QUT_5TH(i) (0x010 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_ZB_AC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_QUT_ZB_AC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_QUT_ZB_AC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_QUT_6TH(i) (0x014 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_RND_DC_CHR(x) (((x) & 0xff) << 16)
+#define VEPU_REG_VP8_QUT_RND_DC_Y2(x) (((x) & 0xff) << 8)
+#define VEPU_REG_VP8_QUT_RND_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_7TH(i) (0x018 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_RND_AC_CHR(x) (((x) & 0xff) << 16)
+#define VEPU_REG_VP8_QUT_RND_AC_Y2(x) (((x) & 0xff) << 8)
+#define VEPU_REG_VP8_QUT_RND_AC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_8TH(i) (0x01c + ((i) * 0x24))
+#define VEPU_REG_VP8_SEG_FILTER_LEVEL(x) (((x) & 0x3f) << 25)
+#define VEPU_REG_VP8_DEQUT_DC_CHR(x) (((x) & 0xff) << 17)
+#define VEPU_REG_VP8_DEQUT_DC_Y2(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_VP8_DEQUT_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_9TH(i) (0x020 + ((i) * 0x24))
+#define VEPU_REG_VP8_DEQUT_AC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_DEQUT_AC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_DEQUT_AC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_ADDR_VP8_SEG_MAP 0x06c
+#define VEPU_REG_VP8_INTRA_4X4_PENALTY(i) (0x070 + ((i) * 0x4))
+#define VEPU_REG_VP8_INTRA_4X4_PENALTY_0(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_INTRA_4x4_PENALTY_1(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY(i) (0x084 + ((i) * 0x4))
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY_0(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY_1(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_CONTROL 0x0a0
+#define VEPU_REG_VP8_LF_MODE_DELTA_BPRED(x) (((x) & 0x1f) << 24)
+#define VEPU_REG_VP8_LF_REF_DELTA_INTRA_MB(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_INTER_TYPE_BIT_COST(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_REF_FRAME_VAL 0x0a4
+#define VEPU_REG_VP8_COEF_DMV_PENALTY(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_REF_FRAME(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_LOOP_FILTER_REF_DELTA 0x0a8
+#define VEPU_REG_VP8_LF_REF_DELTA_ALT_REF(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_LF_REF_DELTA_LAST_REF(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_VP8_LF_REF_DELTA_GOLDEN(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_VP8_LOOP_FILTER_MODE_DELTA 0x0ac
+#define VEPU_REG_VP8_LF_MODE_DELTA_SPLITMV(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_LF_MODE_DELTA_ZEROMV(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_VP8_LF_MODE_DELTA_NEWMV(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x000 + ((i) * 0x4))
+#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x040 + ((i) * 0x4))
+#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0b0 + ((i) * 0x4))
+#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0b0 + ((i) * 0x4))
+#define VEPU_REG_INTRA_AREA_CTRL 0x0b8
+#define VEPU_REG_INTRA_AREA_TOP(x) (((x) & 0xff) << 24)
+#define VEPU_REG_INTRA_AREA_BOTTOM(x) (((x) & 0xff) << 16)
+#define VEPU_REG_INTRA_AREA_LEFT(x) (((x) & 0xff) << 8)
+#define VEPU_REG_INTRA_AREA_RIGHT(x) (((x) & 0xff) << 0)
+#define VEPU_REG_CIR_INTRA_CTRL 0x0bc
+#define VEPU_REG_CIR_INTRA_FIRST_MB(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CIR_INTRA_INTERVAL(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ADDR_IN_PLANE_0 0x0c0
+#define VEPU_REG_ADDR_IN_PLANE_1 0x0c4
+#define VEPU_REG_ADDR_IN_PLANE_2 0x0c8
+#define VEPU_REG_STR_HDR_REM_MSB 0x0cc
+#define VEPU_REG_STR_HDR_REM_LSB 0x0d0
+#define VEPU_REG_STR_BUF_LIMIT 0x0d4
+#define VEPU_REG_AXI_CTRL 0x0d8
+#define VEPU_REG_AXI_CTRL_READ_ID(x) (((x) & 0xff) << 24)
+#define VEPU_REG_AXI_CTRL_WRITE_ID(x) (((x) & 0xff) << 16)
+#define VEPU_REG_AXI_CTRL_BURST_LEN(x) (((x) & 0x3f) << 8)
+#define VEPU_REG_AXI_CTRL_INCREMENT_MODE(x) (((x) & 0x01) << 2)
+#define VEPU_REG_AXI_CTRL_BIRST_DISCARD(x) (((x) & 0x01) << 1)
+#define VEPU_REG_AXI_CTRL_BIRST_DISABLE BIT(0)
+#define VEPU_QP_ADJUST_MAD_DELTA_ROI 0x0dc
+#define VEPU_REG_ROI_QP_DELTA_1 (((x) & 0xf) << 12)
+#define VEPU_REG_ROI_QP_DELTA_2 (((x) & 0xf) << 8)
+#define VEPU_REG_MAD_QP_ADJUSTMENT (((x) & 0xf) << 0)
+#define VEPU_REG_ADDR_REF_LUMA 0x0e0
+#define VEPU_REG_ADDR_REF_CHROMA 0x0e4
+#define VEPU_REG_QP_SUM_DIV2 0x0e8
+#define VEPU_REG_QP_SUM(x) (((x) & 0x001fffff) * 2)
+#define VEPU_REG_ENC_CTRL0 0x0ec
+#define VEPU_REG_DISABLE_QUARTER_PIXEL_MV BIT(28)
+#define VEPU_REG_DEBLOCKING_FILTER_MODE(x) (((x) & 0x3) << 24)
+#define VEPU_REG_CABAC_INIT_IDC(x) (((x) & 0x3) << 21)
+#define VEPU_REG_ENTROPY_CODING_MODE BIT(20)
+#define VEPU_REG_H264_TRANS8X8_MODE BIT(17)
+#define VEPU_REG_H264_INTER4X4_MODE BIT(16)
+#define VEPU_REG_H264_STREAM_MODE BIT(15)
+#define VEPU_REG_H264_SLICE_SIZE(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_ENC_OVER_FILL_STRM_OFFSET 0x0f0
+#define VEPU_REG_STREAM_START_OFFSET(x) (((x) & 0x3f) << 16)
+#define VEPU_REG_SKIP_MACROBLOCK_PENALTY(x) (((x) & 0xff) << 8)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(x) (((x) & 0x3) << 4)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLB(x) (((x) & 0xf) << 0)
+#define VEPU_REG_INPUT_LUMA_INFO 0x0f4
+#define VEPU_REG_IN_IMG_CHROMA_OFFSET(x) (((x) & 0x7) << 20)
+#define VEPU_REG_IN_IMG_LUMA_OFFSET(x) (((x) & 0x7) << 16)
+#define VEPU_REG_IN_IMG_CTRL_ROW_LEN(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_RLC_SUM 0x0f8
+#define VEPU_REG_RLC_SUM_OUT(x) (((x) & 0x007fffff) * 4)
+#define VEPU_REG_SPLIT_PENALTY_4X4 0x0f8
+#define VEPU_REG_VP8_SPLIT_PENALTY_4X4 (((x) & 0x1ff) << 19)
+#define VEPU_REG_ADDR_REC_LUMA 0x0fc
+#define VEPU_REG_ADDR_REC_CHROMA 0x100
+#define VEPU_REG_CHECKPOINT(i) (0x104 + ((i) * 0x4))
+#define VEPU_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CHECKPOINT_RESULT(x) \
+ ((((x) >> (16 - 16 * ((i) & 1))) & 0xffff) * 32)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_Y1 0x104
+#define VEPU_REG_VP8_SEG0_RND_AC_Y1(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_Y1(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_Y2 0x108
+#define VEPU_REG_VP8_SEG0_RND_DC_Y2(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_Y2(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_Y2 0x10c
+#define VEPU_REG_VP8_SEG0_RND_AC_Y2(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_Y2(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_CHR 0x110
+#define VEPU_REG_VP8_SEG0_RND_DC_CHR(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_CHR(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_CHR 0x114
+#define VEPU_REG_VP8_SEG0_RND_AC_CHR(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_CHR(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DQUT 0x118
+#define VEPU_REG_VP8_MV_REF_IDX1(x) (((x) & 0x03) << 26)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_Y2(x) (((x) & 0x1ff) << 17)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_Y1(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_CHKPT_WORD_ERR(i) (0x118 + ((i) * 0x4))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_VP8_SEG0_QUANT_DQUT_1 0x11c
+#define VEPU_REG_VP8_SEGMENT_MAP_UPDATE BIT(30)
+#define VEPU_REG_VP8_SEGMENT_EN BIT(29)
+#define VEPU_REG_VP8_MV_REF_IDX2_EN BIT(28)
+#define VEPU_REG_VP8_MV_REF_IDX2(x) (((x) & 0x03) << 26)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_CHR(x) (((x) & 0x1ff) << 17)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_CHR(x) (((x) & 0xff) << 9)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_Y2(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_BOOL_ENC_VALUE 0x120
+#define VEPU_REG_CHKPT_DELTA_QP 0x124
+#define VEPU_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define VEPU_REG_VP8_ENC_CTRL2 0x124
+#define VEPU_REG_VP8_ZERO_MV_PENALTY_FOR_REF2(x) (((x) & 0xff) << 24)
+#define VEPU_REG_VP8_FILTER_SHARPNESS(x) (((x) & 0x07) << 21)
+#define VEPU_REG_VP8_FILTER_LEVEL(x) (((x) & 0x3f) << 15)
+#define VEPU_REG_VP8_DCT_PARTITION_CNT(x) (((x) & 0x03) << 13)
+#define VEPU_REG_VP8_BOOL_ENC_VALUE_BITS(x) (((x) & 0x1f) << 8)
+#define VEPU_REG_VP8_BOOL_ENC_RANGE(x) (((x) & 0xff) << 0)
+#define VEPU_REG_ENC_CTRL1 0x128
+#define VEPU_REG_MAD_THRESHOLD(x) (((x) & 0x3f) << 24)
+#define VEPU_REG_COMPLETED_SLICES(x) (((x) & 0xff) << 16)
+#define VEPU_REG_IN_IMG_CTRL_FMT(x) (((x) & 0xf) << 4)
+#define VEPU_REG_IN_IMG_ROTATE_MODE(x) (((x) & 0x3) << 2)
+#define VEPU_REG_SIZE_TABLE_PRESENT BIT(0)
+#define VEPU_REG_INTRA_INTER_MODE 0x12c
+#define VEPU_REG_INTRA16X16_MODE(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_INTER_MODE(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ENC_CTRL2 0x130
+#define VEPU_REG_PPS_INIT_QP(x) (((x) & 0x3f) << 26)
+#define VEPU_REG_SLICE_FILTER_ALPHA(x) (((x) & 0xf) << 22)
+#define VEPU_REG_SLICE_FILTER_BETA(x) (((x) & 0xf) << 18)
+#define VEPU_REG_CHROMA_QP_OFFSET(x) (((x) & 0x1f) << 13)
+#define VEPU_REG_FILTER_DISABLE BIT(5)
+#define VEPU_REG_IDR_PIC_ID(x) (((x) & 0xf) << 1)
+#define VEPU_REG_CONSTRAINED_INTRA_PREDICTION BIT(0)
+#define VEPU_REG_ADDR_OUTPUT_STREAM 0x134
+#define VEPU_REG_ADDR_OUTPUT_CTRL 0x138
+#define VEPU_REG_ADDR_NEXT_PIC 0x13c
+#define VEPU_REG_ADDR_MV_OUT 0x140
+#define VEPU_REG_ADDR_CABAC_TBL 0x144
+#define VEPU_REG_ROI1 0x148
+#define VEPU_REG_ROI1_TOP_MB(x) (((x) & 0xff) << 24)
+#define VEPU_REG_ROI1_BOTTOM_MB(x) (((x) & 0xff) << 16)
+#define VEPU_REG_ROI1_LEFT_MB(x) (((x) & 0xff) << 8)
+#define VEPU_REG_ROI1_RIGHT_MB(x) (((x) & 0xff) << 0)
+#define VEPU_REG_ROI2 0x14c
+#define VEPU_REG_ROI2_TOP_MB(x) (((x) & 0xff) << 24)
+#define VEPU_REG_ROI2_BOTTOM_MB(x) (((x) & 0xff) << 16)
+#define VEPU_REG_ROI2_LEFT_MB(x) (((x) & 0xff) << 8)
+#define VEPU_REG_ROI2_RIGHT_MB(x) (((x) & 0xff) << 0)
+#define VEPU_REG_STABLE_MATRIX(i) (0x150 + ((i) * 0x4))
+#define VEPU_REG_STABLE_MOTION_SUM 0x174
+#define VEPU_REG_STABILIZATION_OUTPUT 0x178
+#define VEPU_REG_STABLE_MIN_VALUE(x) (((x) & 0xffffff) << 8)
+#define VEPU_REG_STABLE_MODE_SEL(x) (((x) & 0x3) << 6)
+#define VEPU_REG_STABLE_HOR_GMV(x) (((x) & 0x3f) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF1 0x17c
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFB(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFA(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF2 0x180
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFE(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFC(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF3 0x184
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFF(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB_MASK_MSB 0x188
+#define VEPU_REG_RGB_MASK_B_MSB(x) (((x) & 0x1f) << 16)
+#define VEPU_REG_RGB_MASK_G_MSB(x) (((x) & 0x1f) << 8)
+#define VEPU_REG_RGB_MASK_R_MSB(x) (((x) & 0x1f) << 0)
+#define VEPU_REG_MV_PENALTY 0x18c
+#define VEPU_REG_1MV_PENALTY(x) (((x) & 0x3ff) << 21)
+#define VEPU_REG_QMV_PENALTY(x) (((x) & 0x3ff) << 11)
+#define VEPU_REG_4MV_PENALTY(x) (((x) & 0x3ff) << 1)
+#define VEPU_REG_SPLIT_MV_MODE_EN BIT(0)
+#define VEPU_REG_QP_VAL 0x190
+#define VEPU_REG_H264_LUMA_INIT_QP(x) (((x) & 0x3f) << 26)
+#define VEPU_REG_H264_QP_MAX(x) (((x) & 0x3f) << 20)
+#define VEPU_REG_H264_QP_MIN(x) (((x) & 0x3f) << 14)
+#define VEPU_REG_H264_CHKPT_DISTANCE(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_Y1 0x190
+#define VEPU_REG_VP8_SEG0_RND_DC_Y1(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_Y1(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_MVC_RELATE 0x198
+#define VEPU_REG_ZERO_MV_FAVOR_D2(x) (((x) & 0xf) << 20)
+#define VEPU_REG_PENALTY_4X4MV(x) (((x) & 0x1ff) << 11)
+#define VEPU_REG_MVC_VIEW_ID(x) (((x) & 0x7) << 8)
+#define VEPU_REG_MVC_ANCHOR_PIC_FLAG BIT(7)
+#define VEPU_REG_MVC_PRIORITY_ID(x) (((x) & 0x7) << 4)
+#define VEPU_REG_MVC_TEMPORAL_ID(x) (((x) & 0x7) << 1)
+#define VEPU_REG_MVC_INTER_VIEW_FLAG BIT(0)
+#define VEPU_REG_ENCODE_START 0x19c
+#define VEPU_REG_MB_HEIGHT(x) (((x) & 0x1ff) << 20)
+#define VEPU_REG_MB_WIDTH(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_FRAME_TYPE_INTER (0x0 << 6)
+#define VEPU_REG_FRAME_TYPE_INTRA (0x1 << 6)
+#define VEPU_REG_FRAME_TYPE_MVCINTER (0x2 << 6)
+#define VEPU_REG_ENCODE_FORMAT_JPEG (0x2 << 4)
+#define VEPU_REG_ENCODE_FORMAT_H264 (0x3 << 4)
+#define VEPU_REG_ENCODE_ENABLE BIT(0)
+#define VEPU_REG_MB_CTRL 0x1a0
+#define VEPU_REG_MB_CNT_OUT(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_MB_CNT_SET(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_DATA_ENDIAN 0x1a4
+#define VEPU_REG_INPUT_SWAP8 BIT(31)
+#define VEPU_REG_INPUT_SWAP16 BIT(30)
+#define VEPU_REG_INPUT_SWAP32 BIT(29)
+#define VEPU_REG_OUTPUT_SWAP8 BIT(28)
+#define VEPU_REG_OUTPUT_SWAP16 BIT(27)
+#define VEPU_REG_OUTPUT_SWAP32 BIT(26)
+#define VEPU_REG_TEST_IRQ BIT(24)
+#define VEPU_REG_TEST_COUNTER(x) (((x) & 0xf) << 20)
+#define VEPU_REG_TEST_REG BIT(19)
+#define VEPU_REG_TEST_MEMORY BIT(18)
+#define VEPU_REG_TEST_LEN(x) (((x) & 0x3ffff) << 0)
+#define VEPU_REG_ENC_CTRL3 0x1a8
+#define VEPU_REG_PPS_ID(x) (((x) & 0xff) << 24)
+#define VEPU_REG_INTRA_PRED_MODE(x) (((x) & 0xff) << 16)
+#define VEPU_REG_FRAME_NUM(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ENC_CTRL4 0x1ac
+#define VEPU_REG_MV_PENALTY_16X8_8X16(x) (((x) & 0x3ff) << 20)
+#define VEPU_REG_MV_PENALTY_8X8(x) (((x) & 0x3ff) << 10)
+#define VEPU_REG_MV_PENALTY_8X4_4X8(x) (((x) & 0x3ff) << 0)
+#define VEPU_REG_ADDR_VP8_PROB_CNT 0x1b0
+#define VEPU_REG_INTERRUPT 0x1b4
+#define VEPU_REG_INTERRUPT_NON BIT(28)
+#define VEPU_REG_MV_WRITE_EN BIT(24)
+#define VEPU_REG_RECON_WRITE_DIS BIT(20)
+#define VEPU_REG_INTERRUPT_SLICE_READY_EN BIT(16)
+#define VEPU_REG_CLK_GATING_EN BIT(12)
+#define VEPU_REG_INTERRUPT_TIMEOUT_EN BIT(10)
+#define VEPU_REG_INTERRUPT_RESET BIT(9)
+#define VEPU_REG_INTERRUPT_DIS_BIT BIT(8)
+#define VEPU_REG_INTERRUPT_TIMEOUT BIT(6)
+#define VEPU_REG_INTERRUPT_BUFFER_FULL BIT(5)
+#define VEPU_REG_INTERRUPT_BUS_ERROR BIT(4)
+#define VEPU_REG_INTERRUPT_FUSE BIT(3)
+#define VEPU_REG_INTERRUPT_SLICE_READY BIT(2)
+#define VEPU_REG_INTERRUPT_FRAME_READY BIT(1)
+#define VEPU_REG_INTERRUPT_BIT BIT(0)
+#define VEPU_REG_DMV_PENALTY_TBL(i) (0x1E0 + ((i) * 0x4))
+#define VEPU_REG_DMV_PENALTY_TABLE_BIT(x, i) ((x) << (i) * 8)
+#define VEPU_REG_DMV_Q_PIXEL_PENALTY_TBL(i) (0x260 + ((i) * 0x4))
+#define VEPU_REG_DMV_Q_PIXEL_PENALTY_TABLE_BIT(x, i) ((x) << (i) * 8)
+
+/* vpu decoder register */
+#define VDPU_REG_DEC_CTRL0 0x0c8 // 50
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 13)
+#define VDPU_REG_CONFIG_TILED_MODE_LSB BIT(12)
+#define VDPU_REG_CONFIG_DEC_ADV_PRE_DIS BIT(11)
+#define VDPU_REG_CONFIG_DEC_SCMD_DIS BIT(10)
+#define VDPU_REG_DEC_CTRL0_SKIP_MODE BIT(9)
+#define VDPU_REG_DEC_CTRL0_FILTERING_DIS BIT(8)
+#define VDPU_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(7)
+#define VDPU_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 1)
+#define VDPU_REG_CONFIG_TILED_MODE_MSB(x) BIT(0)
+#define VDPU_REG_CONFIG_DEC_OUT_TILED_E BIT(0)
+#define VDPU_REG_STREAM_LEN 0x0cc
+#define VDPU_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define VDPU_REG_DEC_STREAM_LEN_HI BIT(24)
+#define VDPU_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_ERROR_CONCEALMENT 0x0d0
+#define VDPU_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 17)
+#define VDPU_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 8)
+#define VDPU_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_FORMAT 0x0d4
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
+#define VDPU_REG_DATA_ENDIAN 0x0d8
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
+#define VDPU_REG_INTERRUPT 0x0dc
+#define VDPU_REG_INTERRUPT_DEC_TIMEOUT BIT(13)
+#define VDPU_REG_INTERRUPT_DEC_ERROR_INT BIT(12)
+#define VDPU_REG_INTERRUPT_DEC_PIC_INF BIT(10)
+#define VDPU_REG_INTERRUPT_DEC_SLICE_INT BIT(9)
+#define VDPU_REG_INTERRUPT_DEC_ASO_INT BIT(8)
+#define VDPU_REG_INTERRUPT_DEC_BUFFER_INT BIT(6)
+#define VDPU_REG_INTERRUPT_DEC_BUS_INT BIT(5)
+#define VDPU_REG_INTERRUPT_DEC_RDY_INT BIT(4)
+#define VDPU_REG_INTERRUPT_DEC_IRQ_DIS BIT(1)
+#define VDPU_REG_INTERRUPT_DEC_IRQ BIT(0)
+#define VDPU_REG_AXI_CTRL 0x0e0
+#define VDPU_REG_AXI_DEC_SEL BIT(23)
+#define VDPU_REG_CONFIG_DEC_DATA_DISC_E BIT(22)
+#define VDPU_REG_PARAL_BUS_E(x) BIT(21)
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 8)
+#define VDPU_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 0)
+#define VDPU_REG_EN_FLAGS 0x0e4
+#define VDPU_REG_AHB_HLOCK_E BIT(31)
+#define VDPU_REG_CACHE_E BIT(29)
+#define VDPU_REG_PREFETCH_SINGLE_CHANNEL_E BIT(28)
+#define VDPU_REG_INTRA_3_CYCLE_ENHANCE BIT(27)
+#define VDPU_REG_INTRA_DOUBLE_SPEED BIT(26)
+#define VDPU_REG_INTER_DOUBLE_SPEED BIT(25)
+#define VDPU_REG_DEC_CTRL3_START_CODE_E BIT(22)
+#define VDPU_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(21)
+#define VDPU_REG_DEC_CTRL0_RLC_MODE_E BIT(20)
+#define VDPU_REG_DEC_CTRL0_DIVX3_E BIT(19)
+#define VDPU_REG_DEC_CTRL0_PJPEG_E BIT(18)
+#define VDPU_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(17)
+#define VDPU_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(16)
+#define VDPU_REG_DEC_CTRL0_PIC_B_E BIT(15)
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
+#define VDPU_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(13)
+#define VDPU_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(12)
+#define VDPU_REG_DEC_CTRL0_SORENSON_E BIT(11)
+#define VDPU_REG_DEC_CTRL0_WRITE_MVS_E BIT(10)
+#define VDPU_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(9)
+#define VDPU_REG_DEC_CTRL0_REFTOPFIRST_E BIT(8)
+#define VDPU_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(7)
+#define VDPU_REG_DEC_CTRL0_PICORD_COUNT_E BIT(6)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
+#define VDPU_REG_DEC_CTRL0_DEC_OUT_DIS BIT(2)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(1)
+#define VDPU_REG_INTERRUPT_DEC_E BIT(0)
+#define VDPU_REG_SOFT_RESET 0x0e8
+#define VDPU_REG_PRED_FLT 0x0ec
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_ADDITIONAL_CHROMA_ADDRESS 0x0f0
+#define VDPU_REG_ADDR_QTABLE 0x0f4
+#define VDPU_REG_DIRECT_MV_ADDR 0x0f8
+#define VDPU_REG_ADDR_DST 0x0fc
+#define VDPU_REG_ADDR_STR 0x100
+#define VDPU_REG_REFBUF_RELATED 0x104
+#define VDPU_REG_FWD_PIC(i) (0x128 + ((i) * 0x4))
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_REF_PIC(i) (0x130 + ((i) * 0x4))
+#define VDPU_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define VDPU_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_H264_ADDR_REF(i) (0x150 + ((i) * 0x4))
+#define VDPU_REG_ADDR_REF_FIELD_E BIT(1)
+#define VDPU_REG_ADDR_REF_TOPC_E BIT(0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST0 0x190
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST1 0x194
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F11(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F10(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F9(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F8(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F7(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F6(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST2 0x198
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F14(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F13(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F12(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST3 0x19c
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST4 0x1a0
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B11(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B10(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B9(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B8(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B7(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B6(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST5 0x1a4
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B14(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B13(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B12(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST6 0x1a8
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_LT_REF 0x1ac
+#define VDPU_REG_VALID_REF 0x1b0
+#define VDPU_REG_H264_PIC_MB_SIZE 0x1b8
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 17)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 9)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 0)
+#define VDPU_REG_H264_CTRL 0x1bc
+#define VDPU_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 16)
+#define VDPU_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_CURRENT_FRAME 0x1c0
+#define VDPU_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(31)
+#define VDPU_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(30)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_REF_FRAME 0x1c4
+#define VDPU_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 16)
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL6 0x1c8
+#define VDPU_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define VDPU_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define VDPU_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define VDPU_REG_ENABLE_FLAG 0x1cc
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_E BIT(8)
+#define VDPU_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(7)
+#define VDPU_REG_DEC_CTRL4_BLACKWHITE_E BIT(6)
+#define VDPU_REG_DEC_CTRL4_CABAC_E BIT(5)
+#define VDPU_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(4)
+#define VDPU_REG_DEC_CTRL5_CONST_INTRA_E BIT(3)
+#define VDPU_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(2)
+#define VDPU_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(1)
+#define VDPU_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
+#define VDPU_REG_DEC_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define VDPU_REG_DEC_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define VDPU_REG_DEC_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define VDPU_REG_DEC_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
+#define VDPU_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_VP8_CTRL0 0x1e8
+#define VDPU_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define VDPU_REG_VP8_DATA_VAL 0x1f0
+#define VDPU_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define VDPU_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_PRED_FLT7 0x1f4
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_1(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_2(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_3(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT8 0x1f8
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT9 0x1fc
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT10 0x200
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define VDPU_REG_FILTER_LEVEL 0x204
+#define VDPU_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_VP8_QUANTER0 0x208
+#define VDPU_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_VP8_ADDR_REF0 0x20c
+#define VDPU_REG_FILTER_MB_ADJ 0x210
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_FILT_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_FILT_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_FILT_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_FILT_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_FILTER_REF_ADJ 0x214
+#define VDPU_REG_REF_PIC_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_REF_PIC_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_REF_PIC_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_REF_PIC_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
+#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_DCT_BASE(i) (0x230 + ((i) * 0x4))
+#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
+#define VDPU_REG_VP8_ADDR_REF1 0x250
+#define VDPU_REG_VP8_SEGMENT_VAL 0x254
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_VP8_DCT_START_BIT2 0x258
+#define VDPU_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define VDPU_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_VP8_QUANTER1 0x25c
+#define VDPU_REG_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_VP8_QUANTER2 0x260
+#define VDPU_REG_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_4(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_5(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_PRED_FLT1 0x264
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT2 0x268
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_0(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT3 0x26c
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_1(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_2(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_3(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT4 0x270
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT5 0x274
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT6 0x278
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_0(x) (((x) & 0x3ff) << 2)
+
+#endif /* RK3399_VPU_REGS_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu.h
new file mode 100644
index 000000000000..1ec2be483e27
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef ROCKCHIP_VPU_H_
+#define ROCKCHIP_VPU_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "rockchip_vpu_hw.h"
+
+#define ROCKCHIP_VPU_MAX_CLOCKS 4
+
+#define JPEG_MB_DIM 16
+#define JPEG_MB_WIDTH(w) DIV_ROUND_UP(w, JPEG_MB_DIM)
+#define JPEG_MB_HEIGHT(h) DIV_ROUND_UP(h, JPEG_MB_DIM)
+
+struct rockchip_vpu_ctx;
+struct rockchip_vpu_codec_ops;
+
+#define RK_VPU_CODEC_JPEG BIT(0)
+
+/**
+ * struct rockchip_vpu_variant - information about VPU hardware variant
+ *
+ * @enc_offset: Offset from VPU base to encoder registers.
+ * @enc_fmts: Encoder formats.
+ * @num_enc_fmts: Number of encoder formats.
+ * @codec: Supported codecs
+ * @codec_ops: Codec ops.
+ * @init: Initialize hardware.
+ * @vepu_irq: encoder interrupt handler
+ * @clk_names: array of clock names
+ * @num_clocks: number of clocks in the array
+ */
+struct rockchip_vpu_variant {
+ unsigned int enc_offset;
+ const struct rockchip_vpu_fmt *enc_fmts;
+ unsigned int num_enc_fmts;
+ unsigned int codec;
+ const struct rockchip_vpu_codec_ops *codec_ops;
+ int (*init)(struct rockchip_vpu_dev *vpu);
+ irqreturn_t (*vepu_irq)(int irq, void *priv);
+ const char *clk_names[ROCKCHIP_VPU_MAX_CLOCKS];
+ int num_clocks;
+};
+
+/**
+ * enum rockchip_vpu_codec_mode - codec operating mode.
+ * @RK_VPU_MODE_NONE: No operating mode. Used for RAW video formats.
+ * @RK_VPU_MODE_JPEG_ENC: JPEG encoder.
+ */
+enum rockchip_vpu_codec_mode {
+ RK_VPU_MODE_NONE = -1,
+ RK_VPU_MODE_JPEG_ENC,
+};
+
+/**
+ * struct rockchip_vpu_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @m2m_dev: mem2mem device associated to this device.
+ * @mdev: media device associated to this device.
+ * @vfd_enc: Video device for encoder.
+ * @pdev: Pointer to VPU platform device.
+ * @dev: Pointer to device for convenient logging using
+ * dev_ macros.
+ * @clocks: Array of clock handles.
+ * @base: Mapped address of VPU registers.
+ * @enc_base: Mapped address of VPU encoder register for convenience.
+ * @vpu_mutex: Mutex to synchronize V4L2 calls.
+ * @irqlock: Spinlock to synchronize access to data structures
+ * shared with interrupt handlers.
+ * @variant: Hardware variant-specific parameters.
+ * @watchdog_work: Delayed work for hardware timeout handling.
+ */
+struct rockchip_vpu_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct media_device mdev;
+ struct video_device *vfd_enc;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_bulk_data clocks[ROCKCHIP_VPU_MAX_CLOCKS];
+ void __iomem *base;
+ void __iomem *enc_base;
+
+ struct mutex vpu_mutex; /* video_device lock */
+ spinlock_t irqlock;
+ const struct rockchip_vpu_variant *variant;
+ struct delayed_work watchdog_work;
+};
+
+/**
+ * struct rockchip_vpu_ctx - Context (instance) private data.
+ *
+ * @dev: VPU driver data to which the context belongs.
+ * @fh: V4L2 file handler.
+ *
+ * @sequence_cap: Sequence counter for capture queue
+ * @sequence_out: Sequence counter for output queue
+ *
+ * @vpu_src_fmt: Descriptor of active source format.
+ * @src_fmt: V4L2 pixel format of active source format.
+ * @vpu_dst_fmt: Descriptor of active destination format.
+ * @dst_fmt: V4L2 pixel format of active destination format.
+ *
+ * @ctrl_handler: Control handler used to register controls.
+ * @jpeg_quality: User-specified JPEG compression quality.
+ *
+ * @codec_ops: Set of operations related to codec mode.
+ *
+ * @bounce_dma_addr: Bounce buffer bus address.
+ * @bounce_buf: Bounce buffer pointer.
+ * @bounce_size: Bounce buffer size.
+ */
+struct rockchip_vpu_ctx {
+ struct rockchip_vpu_dev *dev;
+ struct v4l2_fh fh;
+
+ u32 sequence_cap;
+ u32 sequence_out;
+
+ const struct rockchip_vpu_fmt *vpu_src_fmt;
+ struct v4l2_pix_format_mplane src_fmt;
+ const struct rockchip_vpu_fmt *vpu_dst_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ int jpeg_quality;
+
+ const struct rockchip_vpu_codec_ops *codec_ops;
+
+ dma_addr_t bounce_dma_addr;
+ void *bounce_buf;
+ size_t bounce_size;
+};
+
+/**
+ * struct rockchip_vpu_fmt - information about supported video formats.
+ * @name: Human readable name of the format.
+ * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
+ * @codec_mode: Codec mode related to this format. See
+ * enum rockchip_vpu_codec_mode.
+ * @header_size: Optional header size. Currently used by JPEG encoder.
+ * @max_depth: Maximum depth, for bitstream formats
+ * @enc_fmt: Format identifier for encoder registers.
+ * @frmsize: Supported range of frame sizes (only for bitstream formats).
+ */
+struct rockchip_vpu_fmt {
+ char *name;
+ u32 fourcc;
+ enum rockchip_vpu_codec_mode codec_mode;
+ int header_size;
+ int max_depth;
+ enum rockchip_vpu_enc_fmt enc_fmt;
+ struct v4l2_frmsize_stepwise frmsize;
+};
+
+/* Logging helpers */
+
+/**
+ * debug - Module parameter to control level of debugging messages.
+ *
+ * Level of debugging messages can be controlled by bits of
+ * module parameter called "debug". Meaning of particular
+ * bits is as follows:
+ *
+ * bit 0 - global information: mode, size, init, release
+ * bit 1 - each run start/result information
+ * bit 2 - contents of small controls from userspace
+ * bit 3 - contents of big controls from userspace
+ * bit 4 - detail fmt, ctrl, buffer q/dq information
+ * bit 5 - detail function enter/leave trace information
+ * bit 6 - register write/read information
+ */
+extern int rockchip_vpu_debug;
+
+#define vpu_debug(level, fmt, args...) \
+ do { \
+ if (rockchip_vpu_debug & BIT(level)) \
+ pr_info("%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define vpu_err(fmt, args...) \
+ pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
+
+/* Structure access helpers. */
+static inline struct rockchip_vpu_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct rockchip_vpu_ctx, fh);
+}
+
+/* Register accessors. */
+static inline void vepu_write_relaxed(struct rockchip_vpu_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->enc_base + reg);
+}
+
+static inline void vepu_write(struct rockchip_vpu_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->enc_base + reg);
+}
+
+static inline u32 vepu_read(struct rockchip_vpu_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->enc_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+#endif /* ROCKCHIP_VPU_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h
new file mode 100644
index 000000000000..ca77668d9579
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_common.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef ROCKCHIP_VPU_COMMON_H_
+#define ROCKCHIP_VPU_COMMON_H_
+
+#include "rockchip_vpu.h"
+
+extern const struct v4l2_ioctl_ops rockchip_vpu_enc_ioctl_ops;
+extern const struct vb2_ops rockchip_vpu_enc_queue_ops;
+
+void rockchip_vpu_enc_reset_src_fmt(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx);
+void rockchip_vpu_enc_reset_dst_fmt(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx);
+
+#endif /* ROCKCHIP_VPU_COMMON_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
new file mode 100644
index 000000000000..962412c79b91
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_drv.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "rockchip_vpu_common.h"
+#include "rockchip_vpu.h"
+#include "rockchip_vpu_hw.h"
+
+#define DRIVER_NAME "rockchip-vpu"
+
+int rockchip_vpu_debug;
+module_param_named(debug, rockchip_vpu_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "Debug level - higher value produces more verbose messages");
+
+static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx,
+ unsigned int bytesused,
+ enum vb2_buffer_state result)
+{
+ struct vb2_v4l2_buffer *src, *dst;
+ size_t avail_size;
+
+ pm_runtime_mark_last_busy(vpu->dev);
+ pm_runtime_put_autosuspend(vpu->dev);
+ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+
+ src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+
+ if (WARN_ON(!src))
+ return;
+ if (WARN_ON(!dst))
+ return;
+
+ src->sequence = ctx->sequence_out++;
+ dst->sequence = ctx->sequence_cap++;
+
+ dst->field = src->field;
+ if (src->flags & V4L2_BUF_FLAG_TIMECODE)
+ dst->timecode = src->timecode;
+ dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
+ dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
+ V4L2_BUF_FLAG_TIMECODE);
+ dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
+ V4L2_BUF_FLAG_TIMECODE);
+
+ avail_size = vb2_plane_size(&dst->vb2_buf, 0) -
+ ctx->vpu_dst_fmt->header_size;
+ if (bytesused <= avail_size) {
+ if (ctx->bounce_buf) {
+ memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ ctx->bounce_buf, bytesused);
+ }
+ dst->vb2_buf.planes[0].bytesused =
+ ctx->vpu_dst_fmt->header_size + bytesused;
+ } else {
+ result = VB2_BUF_STATE_ERROR;
+ }
+
+ v4l2_m2m_buf_done(src, result);
+ v4l2_m2m_buf_done(dst, result);
+
+ v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx);
+}
+
+void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
+ unsigned int bytesused,
+ enum vb2_buffer_state result)
+{
+ struct rockchip_vpu_ctx *ctx =
+ v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+
+ /*
+ * If cancel_delayed_work returns false
+ * the timeout expired. The watchdog is running,
+ * and will take care of finishing the job.
+ */
+ if (cancel_delayed_work(&vpu->watchdog_work))
+ rockchip_vpu_job_finish(vpu, ctx, bytesused, result);
+}
+
+void rockchip_vpu_watchdog(struct work_struct *work)
+{
+ struct rockchip_vpu_dev *vpu;
+ struct rockchip_vpu_ctx *ctx;
+
+ vpu = container_of(to_delayed_work(work),
+ struct rockchip_vpu_dev, watchdog_work);
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+ ctx->codec_ops->reset(ctx);
+ rockchip_vpu_job_finish(vpu, ctx, 0, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void device_run(void *priv)
+{
+ struct rockchip_vpu_ctx *ctx = priv;
+ int ret;
+
+ ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
+ if (ret)
+ goto err_cancel_job;
+ ret = pm_runtime_get_sync(ctx->dev->dev);
+ if (ret < 0)
+ goto err_cancel_job;
+
+ ctx->codec_ops->run(ctx);
+ return;
+
+err_cancel_job:
+ rockchip_vpu_job_finish(ctx->dev, ctx, 0, VB2_BUF_STATE_ERROR);
+}
+
+static struct v4l2_m2m_ops vpu_m2m_ops = {
+ .device_run = device_run,
+};
+
+static int
+enc_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct rockchip_vpu_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &rockchip_vpu_enc_queue_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ /*
+ * Driver does mostly sequential access, so sacrifice TLB efficiency
+ * for faster allocation. Also, no CPU access on the source queue,
+ * so no kernel mapping needed.
+ */
+ src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->vpu_mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ /*
+ * The CAPTURE queue doesn't need dma memory,
+ * as the CPU needs to create the JPEG frames,
+ * from the hardware-produced JPEG payload.
+ *
+ * For the DMA destination buffer, we use
+ * a bounce buffer.
+ */
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &rockchip_vpu_enc_queue_ops;
+ dst_vq->mem_ops = &vb2_vmalloc_memops;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->vpu_mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int rockchip_vpu_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct rockchip_vpu_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct rockchip_vpu_ctx, ctrl_handler);
+
+ vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->jpeg_quality = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops rockchip_vpu_ctrl_ops = {
+ .s_ctrl = rockchip_vpu_s_ctrl,
+};
+
+static int rockchip_vpu_ctrls_setup(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1);
+ if (vpu->variant->codec & RK_VPU_CODEC_JPEG) {
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &rockchip_vpu_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ 5, 100, 1, 50);
+ if (ctx->ctrl_handler.error) {
+ vpu_err("Adding JPEG control failed %d\n",
+ ctx->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ctx->ctrl_handler.error;
+ }
+ }
+
+ return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+/*
+ * V4L2 file operations.
+ */
+
+static int rockchip_vpu_open(struct file *filp)
+{
+ struct rockchip_vpu_dev *vpu = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+ struct rockchip_vpu_ctx *ctx;
+ int ret;
+
+ /*
+ * We do not need any extra locking here, because we operate only
+ * on local data here, except reading few fields from dev, which
+ * do not change through device's lifetime (which is guaranteed by
+ * reference on module from open()) and V4L2 internal objects (such
+ * as vdev and ctx->fh), which have proper locking done in respective
+ * helper functions used here.
+ */
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = vpu;
+ if (vdev == vpu->vfd_enc)
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx,
+ &enc_queue_init);
+ else
+ ctx->fh.m2m_ctx = ERR_PTR(-ENODEV);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ kfree(ctx);
+ return ret;
+ }
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ filp->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ if (vdev == vpu->vfd_enc) {
+ rockchip_vpu_enc_reset_dst_fmt(vpu, ctx);
+ rockchip_vpu_enc_reset_src_fmt(vpu, ctx);
+ }
+
+ ret = rockchip_vpu_ctrls_setup(vpu, ctx);
+ if (ret) {
+ vpu_err("Failed to set up controls\n");
+ goto err_fh_free;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ return 0;
+
+err_fh_free:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ kfree(ctx);
+ return ret;
+}
+
+static int rockchip_vpu_release(struct file *filp)
+{
+ struct rockchip_vpu_ctx *ctx =
+ container_of(filp->private_data, struct rockchip_vpu_ctx, fh);
+
+ /*
+ * No need for extra locking because this was the last reference
+ * to this file.
+ */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations rockchip_vpu_fops = {
+ .owner = THIS_MODULE,
+ .open = rockchip_vpu_open,
+ .release = rockchip_vpu_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct of_device_id of_rockchip_vpu_match[] = {
+ { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_rockchip_vpu_match);
+
+static int rockchip_vpu_video_device_register(struct rockchip_vpu_dev *vpu)
+{
+ const struct of_device_id *match;
+ struct video_device *vfd;
+ int function, ret;
+
+ match = of_match_node(of_rockchip_vpu_match, vpu->dev->of_node);
+ vfd = video_device_alloc();
+ if (!vfd) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ vfd->fops = &rockchip_vpu_fops;
+ vfd->release = video_device_release;
+ vfd->lock = &vpu->vpu_mutex;
+ vfd->v4l2_dev = &vpu->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ vfd->ioctl_ops = &rockchip_vpu_enc_ioctl_ops;
+ snprintf(vfd->name, sizeof(vfd->name), "%s-enc", match->compatible);
+ vpu->vfd_enc = vfd;
+ video_set_drvdata(vfd, vpu);
+
+ ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
+ goto err_free_dev;
+ }
+ v4l2_info(&vpu->v4l2_dev, "registered as /dev/video%d\n", vfd->num);
+
+ function = MEDIA_ENT_F_PROC_VIDEO_ENCODER;
+ ret = v4l2_m2m_register_media_controller(vpu->m2m_dev, vfd, function);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem media controller\n");
+ goto err_unreg_video;
+ }
+ return 0;
+
+err_unreg_video:
+ video_unregister_device(vfd);
+err_free_dev:
+ video_device_release(vfd);
+ return ret;
+}
+
+static int rockchip_vpu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct rockchip_vpu_dev *vpu;
+ struct resource *res;
+ int i, ret;
+
+ vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ vpu->pdev = pdev;
+ mutex_init(&vpu->vpu_mutex);
+ spin_lock_init(&vpu->irqlock);
+
+ match = of_match_node(of_rockchip_vpu_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
+ INIT_DELAYED_WORK(&vpu->watchdog_work, rockchip_vpu_watchdog);
+
+ for (i = 0; i < vpu->variant->num_clocks; i++)
+ vpu->clocks[i].id = vpu->variant->clk_names[i];
+ ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
+ vpu->clocks);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
+ vpu->base = devm_ioremap_resource(vpu->dev, res);
+ if (IS_ERR(vpu->base))
+ return PTR_ERR(vpu->base);
+ vpu->enc_base = vpu->base + vpu->variant->enc_offset;
+
+ ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
+ return ret;
+ }
+
+ if (vpu->variant->vepu_irq) {
+ int irq;
+
+ irq = platform_get_irq_byname(vpu->pdev, "vepu");
+ if (irq <= 0) {
+ dev_err(vpu->dev, "Could not get vepu IRQ.\n");
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(vpu->dev, irq, vpu->variant->vepu_irq,
+ 0, dev_name(vpu->dev), vpu);
+ if (ret) {
+ dev_err(vpu->dev, "Could not request vepu IRQ.\n");
+ return ret;
+ }
+ }
+
+ ret = vpu->variant->init(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init VPU hardware\n");
+ return ret;
+ }
+
+ pm_runtime_set_autosuspend_delay(vpu->dev, 100);
+ pm_runtime_use_autosuspend(vpu->dev);
+ pm_runtime_enable(vpu->dev);
+
+ ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare clocks\n");
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto err_clk_unprepare;
+ }
+ platform_set_drvdata(pdev, vpu);
+
+ vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+ goto err_v4l2_unreg;
+ }
+
+ vpu->mdev.dev = vpu->dev;
+ strlcpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ media_device_init(&vpu->mdev);
+ vpu->v4l2_dev.mdev = &vpu->mdev;
+
+ ret = rockchip_vpu_video_device_register(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register encoder\n");
+ goto err_m2m_rel;
+ }
+
+ ret = media_device_register(&vpu->mdev);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto err_video_dev_unreg;
+ }
+ return 0;
+err_video_dev_unreg:
+ if (vpu->vfd_enc) {
+ video_unregister_device(vpu->vfd_enc);
+ video_device_release(vpu->vfd_enc);
+ }
+err_m2m_rel:
+ v4l2_m2m_release(vpu->m2m_dev);
+err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+err_clk_unprepare:
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_disable(vpu->dev);
+ return ret;
+}
+
+static int rockchip_vpu_remove(struct platform_device *pdev)
+{
+ struct rockchip_vpu_dev *vpu = platform_get_drvdata(pdev);
+
+ v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
+
+ media_device_unregister(&vpu->mdev);
+ v4l2_m2m_unregister_media_controller(vpu->m2m_dev);
+ v4l2_m2m_release(vpu->m2m_dev);
+ media_device_cleanup(&vpu->mdev);
+ if (vpu->vfd_enc) {
+ video_unregister_device(vpu->vfd_enc);
+ video_device_release(vpu->vfd_enc);
+ }
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ pm_runtime_disable(vpu->dev);
+ return 0;
+}
+
+static const struct dev_pm_ops rockchip_vpu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver rockchip_vpu_driver = {
+ .probe = rockchip_vpu_probe,
+ .remove = rockchip_vpu_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_rockchip_vpu_match),
+ .pm = &rockchip_vpu_pm_ops,
+ },
+};
+module_platform_driver(rockchip_vpu_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
+MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Rockchip VPU codec driver");
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
new file mode 100644
index 000000000000..ab0fb2053620
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_enc.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-sg.h>
+
+#include "rockchip_vpu.h"
+#include "rockchip_vpu_hw.h"
+#include "rockchip_vpu_common.h"
+
+/**
+ * struct v4l2_format_info - information about a V4L2 format
+ * @format: 4CC format identifier (V4L2_PIX_FMT_*)
+ * @header_size: Size of header, optional and used by compressed formats
+ * @num_planes: Number of planes (1 to 3)
+ * @cpp: Number of bytes per pixel (per plane)
+ * @hsub: Horizontal chroma subsampling factor
+ * @vsub: Vertical chroma subsampling factor
+ * @is_compressed: Is it a compressed format?
+ * @multiplanar: Is it a multiplanar variant format? (e.g. NV12M)
+ */
+struct v4l2_format_info {
+ u32 format;
+ u32 header_size;
+ u8 num_planes;
+ u8 cpp[3];
+ u8 hsub;
+ u8 vsub;
+ u8 is_compressed;
+ u8 multiplanar;
+};
+
+static const struct v4l2_format_info *
+v4l2_format_info(u32 format)
+{
+ static const struct v4l2_format_info formats[] = {
+ { .format = V4L2_PIX_FMT_YUV420M, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
+ { .format = V4L2_PIX_FMT_NV12M, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .multiplanar = 1 },
+ { .format = V4L2_PIX_FMT_YUYV, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ { .format = V4L2_PIX_FMT_UYVY, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ vpu_err("Unsupported V4L 4CC format (%08x)\n", format);
+ return NULL;
+}
+
+static void
+fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt,
+ int pixelformat, int width, int height)
+{
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane;
+ int i;
+
+ info = v4l2_format_info(pixelformat);
+ if (!info)
+ return;
+
+ pixfmt->width = width;
+ pixfmt->height = height;
+ pixfmt->pixelformat = pixelformat;
+
+ if (!info->multiplanar) {
+ pixfmt->num_planes = 1;
+ plane = &pixfmt->plane_fmt[0];
+ plane->bytesperline = info->is_compressed ?
+ 0 : width * info->cpp[0];
+ plane->sizeimage = info->header_size;
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int hsub = (i == 0) ? 1 : info->hsub;
+ unsigned int vsub = (i == 0) ? 1 : info->vsub;
+
+ plane->sizeimage += info->cpp[i] *
+ DIV_ROUND_UP(width, hsub) *
+ DIV_ROUND_UP(height, vsub);
+ }
+ } else {
+ pixfmt->num_planes = info->num_planes;
+ for (i = 0; i < info->num_planes; i++) {
+ unsigned int hsub = (i == 0) ? 1 : info->hsub;
+ unsigned int vsub = (i == 0) ? 1 : info->vsub;
+
+ plane = &pixfmt->plane_fmt[i];
+ plane->bytesperline =
+ info->cpp[i] * DIV_ROUND_UP(width, hsub);
+ plane->sizeimage =
+ plane->bytesperline * DIV_ROUND_UP(height, vsub);
+ }
+ }
+}
+
+static const struct rockchip_vpu_fmt *
+rockchip_vpu_find_format(struct rockchip_vpu_ctx *ctx, u32 fourcc)
+{
+ struct rockchip_vpu_dev *dev = ctx->dev;
+ const struct rockchip_vpu_fmt *formats;
+ unsigned int num_fmts, i;
+
+ formats = dev->variant->enc_fmts;
+ num_fmts = dev->variant->num_enc_fmts;
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ return NULL;
+}
+
+static const struct rockchip_vpu_fmt *
+rockchip_vpu_get_default_fmt(struct rockchip_vpu_ctx *ctx, bool bitstream)
+{
+ struct rockchip_vpu_dev *dev = ctx->dev;
+ const struct rockchip_vpu_fmt *formats;
+ unsigned int num_fmts, i;
+
+ formats = dev->variant->enc_fmts;
+ num_fmts = dev->variant->num_enc_fmts;
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode != RK_VPU_MODE_NONE))
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct rockchip_vpu_dev *vpu = video_drvdata(file);
+
+ strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, vpu->vfd_enc->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
+ vpu->dev->driver->name);
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+ const struct rockchip_vpu_fmt *fmt;
+
+ if (fsize->index != 0) {
+ vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ fsize->index);
+ return -EINVAL;
+ }
+
+ fmt = rockchip_vpu_find_format(ctx, fsize->pixel_format);
+ if (!fmt) {
+ vpu_debug(0, "unsupported bitstream format (%08x)\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ /* This only makes sense for coded formats */
+ if (fmt->codec_mode == RK_VPU_MODE_NONE)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct rockchip_vpu_dev *dev = video_drvdata(file);
+ const struct rockchip_vpu_fmt *fmt;
+ const struct rockchip_vpu_fmt *formats;
+ int num_fmts, i, j = 0;
+
+ formats = dev->variant->enc_fmts;
+ num_fmts = dev->variant->num_enc_fmts;
+ for (i = 0; i < num_fmts; i++) {
+ /* Skip uncompressed formats */
+ if (formats[i].codec_mode == RK_VPU_MODE_NONE)
+ continue;
+ if (j == f->index) {
+ fmt = &formats[i];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_out_mplane(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct rockchip_vpu_dev *dev = video_drvdata(file);
+ const struct rockchip_vpu_fmt *formats;
+ const struct rockchip_vpu_fmt *fmt;
+ int num_fmts, i, j = 0;
+
+ formats = dev->variant->enc_fmts;
+ num_fmts = dev->variant->num_enc_fmts;
+ for (i = 0; i < num_fmts; i++) {
+ if (formats[i].codec_mode != RK_VPU_MODE_NONE)
+ continue;
+ if (j == f->index) {
+ fmt = &formats[i];
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->src_fmt;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int
+vidioc_try_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct rockchip_vpu_fmt *fmt;
+
+ vpu_debug(4, "%c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
+ if (!fmt) {
+ fmt = rockchip_vpu_get_default_fmt(ctx, true);
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ pix_mp->num_planes = 1;
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->width = clamp(pix_mp->width,
+ fmt->frmsize.min_width,
+ fmt->frmsize.max_width);
+ pix_mp->height = clamp(pix_mp->height,
+ fmt->frmsize.min_height,
+ fmt->frmsize.max_height);
+ /* Round up to macroblocks. */
+ pix_mp->width = round_up(pix_mp->width, JPEG_MB_DIM);
+ pix_mp->height = round_up(pix_mp->height, JPEG_MB_DIM);
+
+ /*
+ * For compressed formats the application can specify
+ * sizeimage. If the application passes a zero sizeimage,
+ * let's default to the maximum frame size.
+ */
+ if (!pix_mp->plane_fmt[0].sizeimage)
+ pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
+ pix_mp->width * pix_mp->height * fmt->max_depth;
+ memset(pix_mp->plane_fmt[0].reserved, 0,
+ sizeof(pix_mp->plane_fmt[0].reserved));
+ return 0;
+}
+
+static int
+vidioc_try_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct rockchip_vpu_fmt *fmt;
+ unsigned int width, height;
+ int i;
+
+ vpu_debug(4, "%c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
+ if (!fmt) {
+ fmt = rockchip_vpu_get_default_fmt(ctx, false);
+ f->fmt.pix.pixelformat = fmt->fourcc;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ width = clamp(pix_mp->width,
+ ctx->vpu_dst_fmt->frmsize.min_width,
+ ctx->vpu_dst_fmt->frmsize.max_width);
+ height = clamp(pix_mp->height,
+ ctx->vpu_dst_fmt->frmsize.min_height,
+ ctx->vpu_dst_fmt->frmsize.max_height);
+ /* Round up to macroblocks. */
+ width = round_up(width, JPEG_MB_DIM);
+ height = round_up(height, JPEG_MB_DIM);
+
+ /* Fill remaining fields */
+ fill_pixfmt_mp(pix_mp, fmt->fourcc, width, height);
+
+ for (i = 0; i < pix_mp->num_planes; i++) {
+ memset(pix_mp->plane_fmt[i].reserved, 0,
+ sizeof(pix_mp->plane_fmt[i].reserved));
+ }
+ return 0;
+}
+
+void rockchip_vpu_enc_reset_dst_fmt(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *fmt = &ctx->dst_fmt;
+
+ ctx->vpu_dst_fmt = rockchip_vpu_get_default_fmt(ctx, true);
+
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->num_planes = 1;
+ fmt->width = clamp(fmt->width, ctx->vpu_dst_fmt->frmsize.min_width,
+ ctx->vpu_dst_fmt->frmsize.max_width);
+ fmt->height = clamp(fmt->height, ctx->vpu_dst_fmt->frmsize.min_height,
+ ctx->vpu_dst_fmt->frmsize.max_height);
+ fmt->pixelformat = ctx->vpu_dst_fmt->fourcc;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG,
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ fmt->plane_fmt[0].sizeimage = ctx->vpu_dst_fmt->header_size +
+ fmt->width * fmt->height * ctx->vpu_dst_fmt->max_depth;
+}
+
+void rockchip_vpu_enc_reset_src_fmt(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *fmt = &ctx->src_fmt;
+ unsigned int width, height;
+
+ ctx->vpu_src_fmt = rockchip_vpu_get_default_fmt(ctx, false);
+
+ memset(fmt, 0, sizeof(*fmt));
+
+ width = clamp(fmt->width, ctx->vpu_dst_fmt->frmsize.min_width,
+ ctx->vpu_dst_fmt->frmsize.max_width);
+ height = clamp(fmt->height, ctx->vpu_dst_fmt->frmsize.min_height,
+ ctx->vpu_dst_fmt->frmsize.max_height);
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG,
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+ fill_pixfmt_mp(fmt, ctx->vpu_src_fmt->fourcc, width, height);
+}
+
+static int
+vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Change not allowed if queue is streaming. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq))
+ return -EBUSY;
+
+ ret = vidioc_try_fmt_out_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->vpu_src_fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
+ ctx->src_fmt = *pix_mp;
+
+ /* Propagate to the CAPTURE format */
+ ctx->dst_fmt.colorspace = pix_mp->colorspace;
+ ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->dst_fmt.quantization = pix_mp->quantization;
+ ctx->dst_fmt.width = pix_mp->width;
+ ctx->dst_fmt.height = pix_mp->height;
+
+ vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d, mb - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height,
+ JPEG_MB_WIDTH(pix_mp->width),
+ JPEG_MB_HEIGHT(pix_mp->height));
+ return 0;
+}
+
+static int
+vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct rockchip_vpu_ctx *ctx = fh_to_ctx(priv);
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+ struct vb2_queue *vq, *peer_vq;
+ int ret;
+
+ /* Change not allowed if queue is streaming. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (vb2_is_streaming(vq))
+ return -EBUSY;
+
+ /*
+ * Since format change on the CAPTURE queue will reset
+ * the OUTPUT queue, we can't allow doing so
+ * when the OUTPUT queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_busy(peer_vq) &&
+ (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
+ pix_mp->height != ctx->dst_fmt.height ||
+ pix_mp->width != ctx->dst_fmt.width))
+ return -EBUSY;
+
+ ret = vidioc_try_fmt_cap_mplane(file, priv, f);
+ if (ret)
+ return ret;
+
+ ctx->vpu_dst_fmt = rockchip_vpu_find_format(ctx, pix_mp->pixelformat);
+ ctx->dst_fmt = *pix_mp;
+
+ vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d, mb - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height,
+ JPEG_MB_WIDTH(pix_mp->width),
+ JPEG_MB_HEIGHT(pix_mp->height));
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ */
+ rockchip_vpu_enc_reset_src_fmt(vpu, ctx);
+ return 0;
+}
+
+const struct v4l2_ioctl_ops rockchip_vpu_enc_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
+ .vidioc_enum_fmt_vid_out_mplane = vidioc_enum_fmt_vid_out_mplane,
+ .vidioc_enum_fmt_vid_cap_mplane = vidioc_enum_fmt_vid_cap_mplane,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+};
+
+static int
+rockchip_vpu_queue_setup(struct vb2_queue *vq,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pixfmt;
+ int i;
+
+ switch (vq->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pixfmt = &ctx->dst_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pixfmt = &ctx->src_fmt;
+ break;
+ default:
+ vpu_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixfmt->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = pixfmt->num_planes;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ sizes[i] = pixfmt->plane_fmt[i].sizeimage;
+ return 0;
+}
+
+static int rockchip_vpu_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pixfmt;
+ unsigned int sz;
+ int ret = 0;
+ int i;
+
+ switch (vq->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pixfmt = &ctx->dst_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pixfmt = &ctx->src_fmt;
+
+ if (vbuf->field == V4L2_FIELD_ANY)
+ vbuf->field = V4L2_FIELD_NONE;
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ vpu_debug(4, "field %d not supported\n",
+ vbuf->field);
+ return -EINVAL;
+ }
+ break;
+ default:
+ vpu_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < pixfmt->num_planes; ++i) {
+ sz = pixfmt->plane_fmt[i].sizeimage;
+ vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
+ i, vb2_plane_size(vb, i), sz);
+ if (vb2_plane_size(vb, i) < sz) {
+ vpu_err("plane %d is too small\n", i);
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void rockchip_vpu_buf_queue(struct vb2_buffer *vb)
+{
+ struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static int rockchip_vpu_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(q);
+ enum rockchip_vpu_codec_mode codec_mode;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->sequence_out = 0;
+ else
+ ctx->sequence_cap = 0;
+
+ /* Set codec_ops for the chosen destination format */
+ codec_mode = ctx->vpu_dst_fmt->codec_mode;
+
+ vpu_debug(4, "Codec mode = %d\n", codec_mode);
+ ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
+
+ /* A bounce buffer is needed for the JPEG payload */
+ if (!V4L2_TYPE_IS_OUTPUT(q->type)) {
+ ctx->bounce_size = ctx->dst_fmt.plane_fmt[0].sizeimage -
+ ctx->vpu_dst_fmt->header_size;
+ ctx->bounce_buf = dma_alloc_attrs(ctx->dev->dev,
+ ctx->bounce_size,
+ &ctx->bounce_dma_addr,
+ GFP_KERNEL,
+ DMA_ATTR_ALLOC_SINGLE_PAGES);
+ }
+ return 0;
+}
+
+static void rockchip_vpu_stop_streaming(struct vb2_queue *q)
+{
+ struct rockchip_vpu_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (!V4L2_TYPE_IS_OUTPUT(q->type))
+ dma_free_attrs(ctx->dev->dev,
+ ctx->bounce_size,
+ ctx->bounce_buf,
+ ctx->bounce_dma_addr,
+ DMA_ATTR_ALLOC_SINGLE_PAGES);
+
+ /*
+ * The mem2mem framework calls v4l2_m2m_cancel_job before
+ * .stop_streaming, so there isn't any job running and
+ * it is safe to return all the buffers.
+ */
+ for (;;) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+const struct vb2_ops rockchip_vpu_enc_queue_ops = {
+ .queue_setup = rockchip_vpu_queue_setup,
+ .buf_prepare = rockchip_vpu_buf_prepare,
+ .buf_queue = rockchip_vpu_buf_queue,
+ .start_streaming = rockchip_vpu_start_streaming,
+ .stop_streaming = rockchip_vpu_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h
new file mode 100644
index 000000000000..2b955da1be1a
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_hw.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Rockchip VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef ROCKCHIP_VPU_HW_H_
+#define ROCKCHIP_VPU_HW_H_
+
+#include <linux/interrupt.h>
+#include <linux/v4l2-controls.h>
+#include <media/videobuf2-core.h>
+
+struct rockchip_vpu_dev;
+struct rockchip_vpu_ctx;
+struct rockchip_vpu_buf;
+struct rockchip_vpu_variant;
+
+/**
+ * struct rockchip_vpu_codec_ops - codec mode specific operations
+ *
+ * @run: Start single {en,de)coding job. Called from atomic context
+ * to indicate that a pair of buffers is ready and the hardware
+ * should be programmed and started.
+ * @done: Read back processing results and additional data from hardware.
+ * @reset: Reset the hardware in case of a timeout.
+ */
+struct rockchip_vpu_codec_ops {
+ void (*run)(struct rockchip_vpu_ctx *ctx);
+ void (*done)(struct rockchip_vpu_ctx *ctx, enum vb2_buffer_state);
+ void (*reset)(struct rockchip_vpu_ctx *ctx);
+};
+
+/**
+ * enum rockchip_vpu_enc_fmt - source format ID for hardware registers.
+ */
+enum rockchip_vpu_enc_fmt {
+ RK3288_VPU_ENC_FMT_YUV420P = 0,
+ RK3288_VPU_ENC_FMT_YUV420SP = 1,
+ RK3288_VPU_ENC_FMT_YUYV422 = 2,
+ RK3288_VPU_ENC_FMT_UYVY422 = 3,
+};
+
+extern const struct rockchip_vpu_variant rk3399_vpu_variant;
+extern const struct rockchip_vpu_variant rk3288_vpu_variant;
+
+void rockchip_vpu_watchdog(struct work_struct *work);
+void rockchip_vpu_run(struct rockchip_vpu_ctx *ctx);
+void rockchip_vpu_irq_done(struct rockchip_vpu_dev *vpu,
+ unsigned int bytesused,
+ enum vb2_buffer_state result);
+
+void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx);
+void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx);
+
+#endif /* ROCKCHIP_VPU_HW_H_ */
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c b/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c
new file mode 100644
index 000000000000..0ff0badc1f7a
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Collabora, Ltd.
+ *
+ * Based on GSPCA and CODA drivers:
+ * Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "rockchip_vpu_jpeg.h"
+
+#define LUMA_QUANT_OFF 7
+#define CHROMA_QUANT_OFF 72
+#define HEIGHT_OFF 141
+#define WIDTH_OFF 143
+
+#define HUFF_LUMA_DC_OFF 160
+#define HUFF_LUMA_AC_OFF 193
+#define HUFF_CHROMA_DC_OFF 376
+#define HUFF_CHROMA_AC_OFF 409
+
+/* Default tables from JPEG ITU-T.81
+ * (ISO/IEC 10918-1) Annex K.3, I
+ */
+static const unsigned char luma_q_table[] = {
+ 0x10, 0x0b, 0x0a, 0x10, 0x7c, 0x8c, 0x97, 0xa1,
+ 0x0c, 0x0c, 0x0e, 0x13, 0x7e, 0x9e, 0xa0, 0x9b,
+ 0x0e, 0x0d, 0x10, 0x18, 0x8c, 0x9d, 0xa9, 0x9c,
+ 0x0e, 0x11, 0x16, 0x1d, 0x97, 0xbb, 0xb4, 0xa2,
+ 0x12, 0x16, 0x25, 0x38, 0xa8, 0x6d, 0x67, 0xb1,
+ 0x18, 0x23, 0x37, 0x40, 0xb5, 0x68, 0x71, 0xc0,
+ 0x31, 0x40, 0x4e, 0x57, 0x67, 0x79, 0x78, 0x65,
+ 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0xc7,
+};
+
+static const unsigned char chroma_q_table[] = {
+ 0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63,
+ 0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63,
+ 0x18, 0x1a, 0x38, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x2f, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
+};
+
+/* Huffman tables are shared with CODA */
+static const unsigned char luma_dc_table[] = {
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char chroma_dc_table[] = {
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char luma_ac_table[] = {
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+static const unsigned char chroma_ac_table[] = {
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+/* For simplicity, we keep a pre-formatted JPEG header,
+ * and we'll use fixed offsets to change the width, height
+ * quantization tables, etc.
+ */
+static const unsigned char rockchip_vpu_jpeg_header[JPEG_HEADER_SIZE] = {
+ /* SOI */
+ 0xff, 0xd8,
+
+ /* DQT */
+ 0xff, 0xdb, 0x00, 0x84,
+
+ 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* SOF */
+ 0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0xf0, 0x01,
+ 0x40, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01,
+ 0x03, 0x11, 0x01,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0x1f, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0xb5, 0x10,
+
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0x1f, 0x01,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0xb5, 0x11,
+
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* SOS */
+ 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
+ 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
+};
+
+static void
+jpeg_scale_quant_table(unsigned char *q_tab,
+ const unsigned char *tab, int scale)
+{
+ unsigned int temp;
+ int i;
+
+ for (i = 0; i < 64; i++) {
+ temp = DIV_ROUND_CLOSEST((unsigned int)tab[i] * scale, 100);
+ if (temp <= 0)
+ temp = 1;
+ if (temp > 255)
+ temp = 255;
+ q_tab[i] = (unsigned char)temp;
+ }
+}
+
+static void jpeg_set_quality(unsigned char *buffer, int quality)
+{
+ int scale;
+
+ /*
+ * Non-linear scaling factor:
+ * [5,50] -> [1000..100], [51,100] -> [98..0]
+ */
+ if (quality < 50)
+ scale = 5000 / quality;
+ else
+ scale = 200 - 2 * quality;
+
+ jpeg_scale_quant_table(buffer + LUMA_QUANT_OFF,
+ luma_q_table, scale);
+ jpeg_scale_quant_table(buffer + CHROMA_QUANT_OFF,
+ chroma_q_table, scale);
+}
+
+unsigned char *
+rockchip_vpu_jpeg_get_qtable(struct rockchip_vpu_jpeg_ctx *ctx, int index)
+{
+ if (index == 0)
+ return ctx->buffer + LUMA_QUANT_OFF;
+ return ctx->buffer + CHROMA_QUANT_OFF;
+}
+
+void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx)
+{
+ char *buf = ctx->buffer;
+
+ memcpy(buf, rockchip_vpu_jpeg_header,
+ sizeof(rockchip_vpu_jpeg_header));
+
+ buf[HEIGHT_OFF + 0] = ctx->height >> 8;
+ buf[HEIGHT_OFF + 1] = ctx->height;
+ buf[WIDTH_OFF + 0] = ctx->width >> 8;
+ buf[WIDTH_OFF + 1] = ctx->width;
+
+ memcpy(buf + HUFF_LUMA_DC_OFF, luma_dc_table, sizeof(luma_dc_table));
+ memcpy(buf + HUFF_LUMA_AC_OFF, luma_ac_table, sizeof(luma_ac_table));
+ memcpy(buf + HUFF_CHROMA_DC_OFF, chroma_dc_table,
+ sizeof(chroma_dc_table));
+ memcpy(buf + HUFF_CHROMA_AC_OFF, chroma_ac_table,
+ sizeof(chroma_ac_table));
+
+ jpeg_set_quality(buf, ctx->quality);
+}
diff --git a/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h b/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h
new file mode 100644
index 000000000000..72645d8e2ade
--- /dev/null
+++ b/drivers/staging/media/rockchip/vpu/rockchip_vpu_jpeg.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#define JPEG_HEADER_SIZE 601
+
+struct rockchip_vpu_jpeg_ctx {
+ int width;
+ int height;
+ int quality;
+ unsigned char *buffer;
+};
+
+unsigned char *
+rockchip_vpu_jpeg_get_qtable(struct rockchip_vpu_jpeg_ctx *ctx, int index);
+void rockchip_vpu_jpeg_header_assemble(struct rockchip_vpu_jpeg_ctx *ctx);
diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig
index a7a34e89c42d..3252efa422f9 100644
--- a/drivers/staging/media/sunxi/cedrus/Kconfig
+++ b/drivers/staging/media/sunxi/cedrus/Kconfig
@@ -3,6 +3,7 @@ config VIDEO_SUNXI_CEDRUS
depends on VIDEO_DEV && VIDEO_V4L2 && MEDIA_CONTROLLER
depends on HAS_DMA
depends on OF
+ depends on MEDIA_CONTROLLER_REQUEST_API
select SUNXI_SRAM
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
diff --git a/drivers/staging/media/sunxi/cedrus/TODO b/drivers/staging/media/sunxi/cedrus/TODO
index ec277ece47af..a951b3fd1ea1 100644
--- a/drivers/staging/media/sunxi/cedrus/TODO
+++ b/drivers/staging/media/sunxi/cedrus/TODO
@@ -5,3 +5,8 @@ Before this stateless decoder driver can leave the staging area:
* Userspace support for the Request API needs to be reviewed;
* Another stateless decoder driver should be submitted;
* At least one stateless encoder driver should be submitted.
+* When queueing a request containing references to I frames, the
+ refcount of the memory for those I frames needs to be incremented
+ and decremented when the request is completed. This will likely
+ require some help from vb2. The driver should fail the request
+ if the memory/buffer is gone.
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index dd121f66fa2d..ff11cbeba205 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -72,10 +72,11 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
ctrl_size = sizeof(ctrl) * CEDRUS_CONTROLS_COUNT + 1;
ctx->ctrls = kzalloc(ctrl_size, GFP_KERNEL);
- memset(ctx->ctrls, 0, ctrl_size);
+ if (!ctx->ctrls)
+ return -ENOMEM;
for (i = 0; i < CEDRUS_CONTROLS_COUNT; i++) {
- struct v4l2_ctrl_config cfg = { 0 };
+ struct v4l2_ctrl_config cfg = {};
cfg.elem_size = cedrus_controls[i].elem_size;
cfg.id = cedrus_controls[i].id;
@@ -108,17 +109,6 @@ static int cedrus_request_validate(struct media_request *req)
unsigned int count;
unsigned int i;
- count = vb2_request_buffer_cnt(req);
- if (!count) {
- v4l2_info(&ctx->dev->v4l2_dev,
- "No buffer was provided with the request\n");
- return -ENOENT;
- } else if (count > 1) {
- v4l2_info(&ctx->dev->v4l2_dev,
- "More than one buffer was provided with the request\n");
- return -EINVAL;
- }
-
list_for_each_entry(obj, &req->objects, list) {
struct vb2_buffer *vb;
@@ -133,6 +123,17 @@ static int cedrus_request_validate(struct media_request *req)
if (!ctx)
return -ENOENT;
+ count = vb2_request_buffer_cnt(req);
+ if (!count) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "No buffer was provided with the request\n");
+ return -ENOENT;
+ } else if (count > 1) {
+ v4l2_info(&ctx->dev->v4l2_dev,
+ "More than one buffer was provided with the request\n");
+ return -EINVAL;
+ }
+
parent_hdl = &ctx->hdl;
hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
@@ -279,7 +280,6 @@ static int cedrus_probe(struct platform_device *pdev)
dev->dec_ops[CEDRUS_CODEC_MPEG2] = &cedrus_dec_ops_mpeg2;
mutex_init(&dev->dev_mutex);
- spin_lock_init(&dev->irq_lock);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
@@ -388,6 +388,14 @@ static const struct cedrus_variant sun8i_h3_cedrus_variant = {
.capabilities = CEDRUS_CAPABILITY_UNTILED,
};
+static const struct cedrus_variant sun50i_a64_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+};
+
+static const struct cedrus_variant sun50i_h5_cedrus_variant = {
+ .capabilities = CEDRUS_CAPABILITY_UNTILED,
+};
+
static const struct of_device_id cedrus_dt_match[] = {
{
.compatible = "allwinner,sun4i-a10-video-engine",
@@ -409,6 +417,14 @@ static const struct of_device_id cedrus_dt_match[] = {
.compatible = "allwinner,sun8i-h3-video-engine",
.data = &sun8i_h3_cedrus_variant,
},
+ {
+ .compatible = "allwinner,sun50i-a64-video-engine",
+ .data = &sun50i_a64_cedrus_variant,
+ },
+ {
+ .compatible = "allwinner,sun50i-h5-video-engine",
+ .data = &sun50i_h5_cedrus_variant,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, cedrus_dt_match);
@@ -418,7 +434,6 @@ static struct platform_driver cedrus_driver = {
.remove = cedrus_remove,
.driver = {
.name = CEDRUS_NAME,
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(cedrus_dt_match),
},
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 3f61248c57ac..3acfdcf83691 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -105,8 +105,6 @@ struct cedrus_dev {
/* Device file mutex */
struct mutex dev_mutex;
- /* Interrupt spinlock */
- spinlock_t irq_lock;
void __iomem *base;
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index e40180a33951..591d191d4286 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -26,9 +26,8 @@ void cedrus_device_run(void *priv)
{
struct cedrus_ctx *ctx = priv;
struct cedrus_dev *dev = ctx->dev;
- struct cedrus_run run = { 0 };
+ struct cedrus_run run = {};
struct media_request *src_req;
- unsigned long flags;
run.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
run.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
@@ -39,8 +38,6 @@ void cedrus_device_run(void *priv)
if (src_req)
v4l2_ctrl_request_setup(src_req, &ctx->hdl);
- spin_lock_irqsave(&ctx->dev->irq_lock, flags);
-
switch (ctx->src_fmt.pixelformat) {
case V4L2_PIX_FMT_MPEG2_SLICE:
run.mpeg2.slice_params = cedrus_find_control_data(ctx,
@@ -55,16 +52,10 @@ void cedrus_device_run(void *priv)
dev->dec_ops[ctx->current_codec]->setup(ctx, &run);
- spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
-
/* Complete request(s) controls if needed. */
if (src_req)
v4l2_ctrl_request_complete(src_req, &ctx->hdl);
- spin_lock_irqsave(&ctx->dev->irq_lock, flags);
-
dev->dec_ops[ctx->current_codec]->trigger(ctx);
-
- spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 32adbcbe6175..300339fee1bc 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -98,23 +98,6 @@ void cedrus_dst_format_set(struct cedrus_dev *dev,
}
}
-static irqreturn_t cedrus_bh(int irq, void *data)
-{
- struct cedrus_dev *dev = data;
- struct cedrus_ctx *ctx;
-
- ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
- if (!ctx) {
- v4l2_err(&dev->v4l2_dev,
- "Instance released before the end of transaction\n");
- return IRQ_HANDLED;
- }
-
- v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t cedrus_irq(int irq, void *data)
{
struct cedrus_dev *dev = data;
@@ -122,24 +105,17 @@ static irqreturn_t cedrus_irq(int irq, void *data)
struct vb2_v4l2_buffer *src_buf, *dst_buf;
enum vb2_buffer_state state;
enum cedrus_irq_status status;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->irq_lock, flags);
ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
if (!ctx) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
- spin_unlock_irqrestore(&dev->irq_lock, flags);
-
return IRQ_NONE;
}
status = dev->dec_ops[ctx->current_codec]->irq_status(ctx);
- if (status == CEDRUS_IRQ_NONE) {
- spin_unlock_irqrestore(&dev->irq_lock, flags);
+ if (status == CEDRUS_IRQ_NONE)
return IRQ_NONE;
- }
dev->dec_ops[ctx->current_codec]->irq_disable(ctx);
dev->dec_ops[ctx->current_codec]->irq_clear(ctx);
@@ -150,8 +126,6 @@ static irqreturn_t cedrus_irq(int irq, void *data)
if (!src_buf || !dst_buf) {
v4l2_err(&dev->v4l2_dev,
"Missing source and/or destination buffers\n");
- spin_unlock_irqrestore(&dev->irq_lock, flags);
-
return IRQ_HANDLED;
}
@@ -163,9 +137,9 @@ static irqreturn_t cedrus_irq(int irq, void *data)
v4l2_m2m_buf_done(src_buf, state);
v4l2_m2m_buf_done(dst_buf, state);
- spin_unlock_irqrestore(&dev->irq_lock, flags);
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
- return IRQ_WAKE_THREAD;
+ return IRQ_HANDLED;
}
int cedrus_hw_probe(struct cedrus_dev *dev)
@@ -187,9 +161,8 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
return irq_dec;
}
- ret = devm_request_threaded_irq(dev->dev, irq_dec, cedrus_irq,
- cedrus_bh, 0, dev_name(dev->dev),
- dev);
+ ret = devm_request_irq(dev->dev, irq_dec, cedrus_irq,
+ 0, dev_name(dev->dev), dev);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to request IRQ\n");
@@ -255,10 +228,10 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
res = platform_get_resource(dev->pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(dev->dev, res);
- if (!dev->base) {
+ if (IS_ERR(dev->base)) {
v4l2_err(&dev->v4l2_dev, "Failed to map registers\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(dev->base);
goto err_sram;
}
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 5c5fce678b93..8721b4a7d496 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -380,18 +380,13 @@ static void cedrus_queue_cleanup(struct vb2_queue *vq, u32 state)
{
struct cedrus_ctx *ctx = vb2_get_drv_priv(vq);
struct vb2_v4l2_buffer *vbuf;
- unsigned long flags;
for (;;) {
- spin_lock_irqsave(&ctx->dev->irq_lock, flags);
-
if (V4L2_TYPE_IS_OUTPUT(vq->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
- spin_unlock_irqrestore(&ctx->dev->irq_lock, flags);
-
if (!vbuf)
return;
diff --git a/drivers/staging/media/tegra-vde/tegra-vde.c b/drivers/staging/media/tegra-vde/tegra-vde.c
index 6f06061a40d9..aa6c6bba961e 100644
--- a/drivers/staging/media/tegra-vde/tegra-vde.c
+++ b/drivers/staging/media/tegra-vde/tegra-vde.c
@@ -35,14 +35,6 @@
#define BSE_ICMDQUE_EMPTY BIT(3)
#define BSE_DMA_BUSY BIT(23)
-#define VDE_WR(__data, __addr) \
-do { \
- dev_dbg(vde->miscdev.parent, \
- "%s: %d: 0x%08X => " #__addr ")\n", \
- __func__, __LINE__, (u32)(__data)); \
- writel_relaxed(__data, __addr); \
-} while (0)
-
struct video_frame {
struct dma_buf_attachment *y_dmabuf_attachment;
struct dma_buf_attachment *cb_dmabuf_attachment;
@@ -81,12 +73,66 @@ struct tegra_vde {
u32 *iram;
};
+static __maybe_unused char const *
+tegra_vde_reg_base_name(struct tegra_vde *vde, void __iomem *base)
+{
+ if (vde->sxe == base)
+ return "SXE";
+
+ if (vde->bsev == base)
+ return "BSEV";
+
+ if (vde->mbe == base)
+ return "MBE";
+
+ if (vde->ppe == base)
+ return "PPE";
+
+ if (vde->mce == base)
+ return "MCE";
+
+ if (vde->tfe == base)
+ return "TFE";
+
+ if (vde->ppb == base)
+ return "PPB";
+
+ if (vde->vdma == base)
+ return "VDMA";
+
+ if (vde->frameid == base)
+ return "FRAMEID";
+
+ return "???";
+}
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+static void tegra_vde_writel(struct tegra_vde *vde,
+ u32 value, void __iomem *base, u32 offset)
+{
+ trace_vde_writel(vde, base, offset, value);
+
+ writel_relaxed(value, base + offset);
+}
+
+static u32 tegra_vde_readl(struct tegra_vde *vde,
+ void __iomem *base, u32 offset)
+{
+ u32 value = readl_relaxed(base + offset);
+
+ trace_vde_readl(vde, base, offset, value);
+
+ return value;
+}
+
static void tegra_vde_set_bits(struct tegra_vde *vde,
- u32 mask, void __iomem *regs)
+ u32 mask, void __iomem *base, u32 offset)
{
- u32 value = readl_relaxed(regs);
+ u32 value = tegra_vde_readl(vde, base, offset);
- VDE_WR(value | mask, regs);
+ tegra_vde_writel(vde, value | mask, base, offset);
}
static int tegra_vde_wait_mbe(struct tegra_vde *vde)
@@ -107,8 +153,8 @@ static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
unsigned int idx;
int err;
- VDE_WR(0xD0000000 | (0 << 23), vde->mbe + 0x80);
- VDE_WR(0xD0200000 | (0 << 23), vde->mbe + 0x80);
+ tegra_vde_writel(vde, 0xD0000000 | (0 << 23), vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xD0200000 | (0 << 23), vde->mbe, 0x80);
err = tegra_vde_wait_mbe(vde);
if (err)
@@ -118,8 +164,10 @@ static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
return 0;
for (idx = 0, frame_idx = 1; idx < refs_nb; idx++, frame_idx++) {
- VDE_WR(0xD0000000 | (frame_idx << 23), vde->mbe + 0x80);
- VDE_WR(0xD0200000 | (frame_idx << 23), vde->mbe + 0x80);
+ tegra_vde_writel(vde, 0xD0000000 | (frame_idx << 23),
+ vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xD0200000 | (frame_idx << 23),
+ vde->mbe, 0x80);
frame_idx_enb_mask |= frame_idx << (6 * (idx % 4));
@@ -128,7 +176,7 @@ static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
value |= (idx >> 2) << 24;
value |= frame_idx_enb_mask;
- VDE_WR(value, vde->mbe + 0x80);
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
err = tegra_vde_wait_mbe(vde);
if (err)
@@ -143,8 +191,10 @@ static int tegra_vde_setup_mbe_frame_idx(struct tegra_vde *vde,
static void tegra_vde_mbe_set_0xa_reg(struct tegra_vde *vde, int reg, u32 val)
{
- VDE_WR(0xA0000000 | (reg << 24) | (val & 0xFFFF), vde->mbe + 0x80);
- VDE_WR(0xA0000000 | ((reg + 1) << 24) | (val >> 16), vde->mbe + 0x80);
+ tegra_vde_writel(vde, 0xA0000000 | (reg << 24) | (val & 0xFFFF),
+ vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xA0000000 | ((reg + 1) << 24) | (val >> 16),
+ vde->mbe, 0x80);
}
static int tegra_vde_wait_bsev(struct tegra_vde *vde, bool wait_dma)
@@ -183,7 +233,7 @@ static int tegra_vde_wait_bsev(struct tegra_vde *vde, bool wait_dma)
static int tegra_vde_push_to_bsev_icmdqueue(struct tegra_vde *vde,
u32 value, bool wait_dma)
{
- VDE_WR(value, vde->bsev + ICMDQUE_WR);
+ tegra_vde_writel(vde, value, vde->bsev, ICMDQUE_WR);
return tegra_vde_wait_bsev(vde, wait_dma);
}
@@ -199,11 +249,11 @@ static void tegra_vde_setup_frameid(struct tegra_vde *vde,
u32 value1 = frame ? ((mbs_width << 16) | mbs_height) : 0;
u32 value2 = frame ? ((((mbs_width + 1) >> 1) << 6) | 1) : 0;
- VDE_WR(y_addr >> 8, vde->frameid + 0x000 + frameid * 4);
- VDE_WR(cb_addr >> 8, vde->frameid + 0x100 + frameid * 4);
- VDE_WR(cr_addr >> 8, vde->frameid + 0x180 + frameid * 4);
- VDE_WR(value1, vde->frameid + 0x080 + frameid * 4);
- VDE_WR(value2, vde->frameid + 0x280 + frameid * 4);
+ tegra_vde_writel(vde, y_addr >> 8, vde->frameid, 0x000 + frameid * 4);
+ tegra_vde_writel(vde, cb_addr >> 8, vde->frameid, 0x100 + frameid * 4);
+ tegra_vde_writel(vde, cr_addr >> 8, vde->frameid, 0x180 + frameid * 4);
+ tegra_vde_writel(vde, value1, vde->frameid, 0x080 + frameid * 4);
+ tegra_vde_writel(vde, value2, vde->frameid, 0x280 + frameid * 4);
}
static void tegra_setup_frameidx(struct tegra_vde *vde,
@@ -228,8 +278,7 @@ static void tegra_vde_setup_iram_entry(struct tegra_vde *vde,
{
u32 *iram_tables = vde->iram;
- dev_dbg(vde->miscdev.parent, "IRAM table %u: row %u: 0x%08X 0x%08X\n",
- table, row, value1, value2);
+ trace_vde_setup_iram_entry(table, row, value1, value2);
iram_tables[0x20 * table + row * 2] = value1;
iram_tables[0x20 * table + row * 2 + 1] = value2;
@@ -245,10 +294,7 @@ static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
int with_later_poc_nb;
unsigned int i, k;
- dev_dbg(vde->miscdev.parent, "DPB: Frame 0: frame_num = %d\n",
- dpb_frames[0].frame_num);
-
- dev_dbg(vde->miscdev.parent, "REF L0:\n");
+ trace_vde_ref_l0(dpb_frames[0].frame_num);
for (i = 0; i < 16; i++) {
if (i < ref_frames_nb) {
@@ -260,11 +306,6 @@ static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
value |= !(frame->flags & FLAG_B_FRAME) << 25;
value |= 1 << 24;
value |= frame->frame_num;
-
- dev_dbg(vde->miscdev.parent,
- "\tFrame %d: frame_num = %d B_frame = %d\n",
- i + 1, frame->frame_num,
- (frame->flags & FLAG_B_FRAME));
} else {
aux_addr = 0x6ADEAD00;
value = 0;
@@ -284,9 +325,7 @@ static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
with_later_poc_nb = ref_frames_nb - with_earlier_poc_nb;
- dev_dbg(vde->miscdev.parent,
- "REF L1: with_later_poc_nb %d with_earlier_poc_nb %d\n",
- with_later_poc_nb, with_earlier_poc_nb);
+ trace_vde_ref_l1(with_later_poc_nb, with_earlier_poc_nb);
for (i = 0, k = with_earlier_poc_nb; i < with_later_poc_nb; i++, k++) {
frame = &dpb_frames[k + 1];
@@ -298,10 +337,6 @@ static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
value |= 1 << 24;
value |= frame->frame_num;
- dev_dbg(vde->miscdev.parent,
- "\tFrame %d: frame_num = %d\n",
- k + 1, frame->frame_num);
-
tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
}
@@ -315,10 +350,6 @@ static void tegra_vde_setup_iram_tables(struct tegra_vde *vde,
value |= 1 << 24;
value |= frame->frame_num;
- dev_dbg(vde->miscdev.parent,
- "\tFrame %d: frame_num = %d\n",
- k + 1, frame->frame_num);
-
tegra_vde_setup_iram_entry(vde, 2, i, value, aux_addr);
}
}
@@ -334,32 +365,32 @@ static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
u32 value;
int err;
- tegra_vde_set_bits(vde, 0x000A, vde->sxe + 0xF0);
- tegra_vde_set_bits(vde, 0x000B, vde->bsev + CMDQUE_CONTROL);
- tegra_vde_set_bits(vde, 0x8002, vde->mbe + 0x50);
- tegra_vde_set_bits(vde, 0x000A, vde->mbe + 0xA0);
- tegra_vde_set_bits(vde, 0x000A, vde->ppe + 0x14);
- tegra_vde_set_bits(vde, 0x000A, vde->ppe + 0x28);
- tegra_vde_set_bits(vde, 0x0A00, vde->mce + 0x08);
- tegra_vde_set_bits(vde, 0x000A, vde->tfe + 0x00);
- tegra_vde_set_bits(vde, 0x0005, vde->vdma + 0x04);
-
- VDE_WR(0x00000000, vde->vdma + 0x1C);
- VDE_WR(0x00000000, vde->vdma + 0x00);
- VDE_WR(0x00000007, vde->vdma + 0x04);
- VDE_WR(0x00000007, vde->frameid + 0x200);
- VDE_WR(0x00000005, vde->tfe + 0x04);
- VDE_WR(0x00000000, vde->mbe + 0x84);
- VDE_WR(0x00000010, vde->sxe + 0x08);
- VDE_WR(0x00000150, vde->sxe + 0x54);
- VDE_WR(0x0000054C, vde->sxe + 0x58);
- VDE_WR(0x00000E34, vde->sxe + 0x5C);
- VDE_WR(0x063C063C, vde->mce + 0x10);
- VDE_WR(0x0003FC00, vde->bsev + INTR_STATUS);
- VDE_WR(0x0000150D, vde->bsev + BSE_CONFIG);
- VDE_WR(0x00000100, vde->bsev + BSE_INT_ENB);
- VDE_WR(0x00000000, vde->bsev + 0x98);
- VDE_WR(0x00000060, vde->bsev + 0x9C);
+ tegra_vde_set_bits(vde, 0x000A, vde->sxe, 0xF0);
+ tegra_vde_set_bits(vde, 0x000B, vde->bsev, CMDQUE_CONTROL);
+ tegra_vde_set_bits(vde, 0x8002, vde->mbe, 0x50);
+ tegra_vde_set_bits(vde, 0x000A, vde->mbe, 0xA0);
+ tegra_vde_set_bits(vde, 0x000A, vde->ppe, 0x14);
+ tegra_vde_set_bits(vde, 0x000A, vde->ppe, 0x28);
+ tegra_vde_set_bits(vde, 0x0A00, vde->mce, 0x08);
+ tegra_vde_set_bits(vde, 0x000A, vde->tfe, 0x00);
+ tegra_vde_set_bits(vde, 0x0005, vde->vdma, 0x04);
+
+ tegra_vde_writel(vde, 0x00000000, vde->vdma, 0x1C);
+ tegra_vde_writel(vde, 0x00000000, vde->vdma, 0x00);
+ tegra_vde_writel(vde, 0x00000007, vde->vdma, 0x04);
+ tegra_vde_writel(vde, 0x00000007, vde->frameid, 0x200);
+ tegra_vde_writel(vde, 0x00000005, vde->tfe, 0x04);
+ tegra_vde_writel(vde, 0x00000000, vde->mbe, 0x84);
+ tegra_vde_writel(vde, 0x00000010, vde->sxe, 0x08);
+ tegra_vde_writel(vde, 0x00000150, vde->sxe, 0x54);
+ tegra_vde_writel(vde, 0x0000054C, vde->sxe, 0x58);
+ tegra_vde_writel(vde, 0x00000E34, vde->sxe, 0x5C);
+ tegra_vde_writel(vde, 0x063C063C, vde->mce, 0x10);
+ tegra_vde_writel(vde, 0x0003FC00, vde->bsev, INTR_STATUS);
+ tegra_vde_writel(vde, 0x0000150D, vde->bsev, BSE_CONFIG);
+ tegra_vde_writel(vde, 0x00000100, vde->bsev, BSE_INT_ENB);
+ tegra_vde_writel(vde, 0x00000000, vde->bsev, 0x98);
+ tegra_vde_writel(vde, 0x00000060, vde->bsev, 0x9C);
memset(vde->iram + 128, 0, macroblocks_nb / 2);
@@ -376,13 +407,13 @@ static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
*/
wmb();
- VDE_WR(0x00000000, vde->bsev + 0x8C);
- VDE_WR(bitstream_data_addr + bitstream_data_size,
- vde->bsev + 0x54);
+ tegra_vde_writel(vde, 0x00000000, vde->bsev, 0x8C);
+ tegra_vde_writel(vde, bitstream_data_addr + bitstream_data_size,
+ vde->bsev, 0x54);
value = ctx->pic_width_in_mbs << 11 | ctx->pic_height_in_mbs << 3;
- VDE_WR(value, vde->bsev + 0x88);
+ tegra_vde_writel(vde, value, vde->bsev, 0x88);
err = tegra_vde_wait_bsev(vde, false);
if (err)
@@ -417,7 +448,7 @@ static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
value |= ctx->pic_width_in_mbs << 11;
value |= ctx->pic_height_in_mbs << 3;
- VDE_WR(value, vde->sxe + 0x10);
+ tegra_vde_writel(vde, value, vde->sxe, 0x10);
value = !ctx->baseline_profile << 17;
value |= ctx->level_idc << 13;
@@ -425,54 +456,54 @@ static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
value |= ctx->pic_order_cnt_type << 5;
value |= ctx->log2_max_frame_num;
- VDE_WR(value, vde->sxe + 0x40);
+ tegra_vde_writel(vde, value, vde->sxe, 0x40);
value = ctx->pic_init_qp << 25;
value |= !!(ctx->deblocking_filter_control_present_flag) << 2;
value |= !!ctx->pic_order_present_flag;
- VDE_WR(value, vde->sxe + 0x44);
+ tegra_vde_writel(vde, value, vde->sxe, 0x44);
value = ctx->chroma_qp_index_offset;
value |= ctx->num_ref_idx_l0_active_minus1 << 5;
value |= ctx->num_ref_idx_l1_active_minus1 << 10;
value |= !!ctx->constrained_intra_pred_flag << 15;
- VDE_WR(value, vde->sxe + 0x48);
+ tegra_vde_writel(vde, value, vde->sxe, 0x48);
value = 0x0C000000;
value |= !!(dpb_frames[0].flags & FLAG_B_FRAME) << 24;
- VDE_WR(value, vde->sxe + 0x4C);
+ tegra_vde_writel(vde, value, vde->sxe, 0x4C);
value = 0x03800000;
value |= bitstream_data_size & GENMASK(19, 15);
- VDE_WR(value, vde->sxe + 0x68);
+ tegra_vde_writel(vde, value, vde->sxe, 0x68);
- VDE_WR(bitstream_data_addr, vde->sxe + 0x6C);
+ tegra_vde_writel(vde, bitstream_data_addr, vde->sxe, 0x6C);
value = 0x10000005;
value |= ctx->pic_width_in_mbs << 11;
value |= ctx->pic_height_in_mbs << 3;
- VDE_WR(value, vde->mbe + 0x80);
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
value = 0x26800000;
value |= ctx->level_idc << 4;
value |= !ctx->baseline_profile << 1;
value |= !!ctx->direct_8x8_inference_flag;
- VDE_WR(value, vde->mbe + 0x80);
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
- VDE_WR(0xF4000001, vde->mbe + 0x80);
- VDE_WR(0x20000000, vde->mbe + 0x80);
- VDE_WR(0xF4000101, vde->mbe + 0x80);
+ tegra_vde_writel(vde, 0xF4000001, vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0x20000000, vde->mbe, 0x80);
+ tegra_vde_writel(vde, 0xF4000101, vde->mbe, 0x80);
value = 0x20000000;
value |= ctx->chroma_qp_index_offset << 8;
- VDE_WR(value, vde->mbe + 0x80);
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
err = tegra_vde_setup_mbe_frame_idx(vde,
ctx->dpb_frames_nb - 1,
@@ -494,7 +525,7 @@ static int tegra_vde_setup_hw_context(struct tegra_vde *vde,
if (!ctx->baseline_profile)
value |= !!(dpb_frames[0].flags & FLAG_REFERENCE) << 1;
- VDE_WR(value, vde->mbe + 0x80);
+ tegra_vde_writel(vde, value, vde->mbe, 0x80);
err = tegra_vde_wait_mbe(vde);
if (err) {
@@ -510,8 +541,9 @@ static void tegra_vde_decode_frame(struct tegra_vde *vde,
{
reinit_completion(&vde->decode_completion);
- VDE_WR(0x00000001, vde->bsev + 0x8C);
- VDE_WR(0x20000000 | (macroblocks_nb - 1), vde->sxe + 0x00);
+ tegra_vde_writel(vde, 0x00000001, vde->bsev, 0x8C);
+ tegra_vde_writel(vde, 0x20000000 | (macroblocks_nb - 1),
+ vde->sxe, 0x00);
}
static void tegra_vde_detach_and_put_dmabuf(struct dma_buf_attachment *a,
@@ -883,8 +915,8 @@ static int tegra_vde_ioctl_decode_h264(struct tegra_vde *vde,
timeout = wait_for_completion_interruptible_timeout(
&vde->decode_completion, msecs_to_jiffies(1000));
if (timeout == 0) {
- bsev_ptr = readl_relaxed(vde->bsev + 0x10);
- macroblocks_nb = readl_relaxed(vde->sxe + 0xC8) & 0x1FFF;
+ bsev_ptr = tegra_vde_readl(vde, vde->bsev, 0x10);
+ macroblocks_nb = tegra_vde_readl(vde, vde->sxe, 0xC8) & 0x1FFF;
read_bytes = bsev_ptr ? bsev_ptr - bitstream_data_addr : 0;
dev_err(dev, "Decoding failed: read 0x%X bytes, %u macroblocks parsed\n",
@@ -962,7 +994,7 @@ static irqreturn_t tegra_vde_isr(int irq, void *data)
if (completion_done(&vde->decode_completion))
return IRQ_NONE;
- tegra_vde_set_bits(vde, 0, vde->frameid + 0x208);
+ tegra_vde_set_bits(vde, 0, vde->frameid, 0x208);
complete(&vde->decode_completion);
return IRQ_HANDLED;
diff --git a/drivers/staging/media/tegra-vde/trace.h b/drivers/staging/media/tegra-vde/trace.h
new file mode 100644
index 000000000000..85e2f7e2d4d0
--- /dev/null
+++ b/drivers/staging/media/tegra-vde/trace.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tegra_vde
+
+#if !defined(TEGRA_VDE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define TEGRA_VDE_TRACE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(register_access,
+ TP_PROTO(struct tegra_vde *vde, void __iomem *base,
+ u32 offset, u32 value),
+ TP_ARGS(vde, base, offset, value),
+ TP_STRUCT__entry(
+ __string(hw_name, tegra_vde_reg_base_name(vde, base))
+ __field(u32, offset)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ __assign_str(hw_name, tegra_vde_reg_base_name(vde, base));
+ __entry->offset = offset;
+ __entry->value = value;
+ ),
+ TP_printk("%s:0x%03x 0x%08x", __get_str(hw_name), __entry->offset,
+ __entry->value)
+);
+
+DEFINE_EVENT(register_access, vde_writel,
+ TP_PROTO(struct tegra_vde *vde, void __iomem *base,
+ u32 offset, u32 value),
+ TP_ARGS(vde, base, offset, value));
+DEFINE_EVENT(register_access, vde_readl,
+ TP_PROTO(struct tegra_vde *vde, void __iomem *base,
+ u32 offset, u32 value),
+ TP_ARGS(vde, base, offset, value));
+
+TRACE_EVENT(vde_setup_iram_entry,
+ TP_PROTO(unsigned int table, unsigned int row, u32 value, u32 aux_addr),
+ TP_ARGS(table, row, value, aux_addr),
+ TP_STRUCT__entry(
+ __field(unsigned int, table)
+ __field(unsigned int, row)
+ __field(u32, value)
+ __field(u32, aux_addr)
+ ),
+ TP_fast_assign(
+ __entry->table = table;
+ __entry->row = row;
+ __entry->value = value;
+ __entry->aux_addr = aux_addr;
+ ),
+ TP_printk("[%u][%u] = { 0x%08x (flags = \"%s\", frame_num = %u); 0x%08x }",
+ __entry->table, __entry->row, __entry->value,
+ __print_flags(__entry->value, " ", { (1 << 25), "B" }),
+ __entry->value & 0x7FFFFF, __entry->aux_addr)
+);
+
+TRACE_EVENT(vde_ref_l0,
+ TP_PROTO(unsigned int frame_num),
+ TP_ARGS(frame_num),
+ TP_STRUCT__entry(
+ __field(unsigned int, frame_num)
+ ),
+ TP_fast_assign(
+ __entry->frame_num = frame_num;
+ ),
+ TP_printk("REF L0: DPB: Frame 0: frame_num = %u", __entry->frame_num)
+);
+
+TRACE_EVENT(vde_ref_l1,
+ TP_PROTO(unsigned int with_later_poc_nb,
+ unsigned int with_earlier_poc_nb),
+ TP_ARGS(with_later_poc_nb, with_earlier_poc_nb),
+ TP_STRUCT__entry(
+ __field(unsigned int, with_later_poc_nb)
+ __field(unsigned int, with_earlier_poc_nb)
+ ),
+ TP_fast_assign(
+ __entry->with_later_poc_nb = with_later_poc_nb;
+ __entry->with_earlier_poc_nb = with_earlier_poc_nb;
+ ),
+ TP_printk("REF L1: with_later_poc_nb %u, with_earlier_poc_nb %u",
+ __entry->with_later_poc_nb, __entry->with_earlier_poc_nb)
+);
+
+#endif /* TEGRA_VDE_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/staging/media/tegra-vde
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/drivers/staging/most/core.c b/drivers/staging/most/core.c
index 6a18cf73c85e..18936cdb1083 100644
--- a/drivers/staging/most/core.c
+++ b/drivers/staging/most/core.c
@@ -351,7 +351,7 @@ static ssize_t set_datatype_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
- return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+ return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
}
return snprintf(buf, PAGE_SIZE, "unconfigured\n");
}
diff --git a/drivers/staging/mt29f_spinand/Kconfig b/drivers/staging/mt29f_spinand/Kconfig
deleted file mode 100644
index f3f9cb3b5c35..000000000000
--- a/drivers/staging/mt29f_spinand/Kconfig
+++ /dev/null
@@ -1,16 +0,0 @@
-config MTD_SPINAND_MT29F
- tristate "SPINAND Device Support for Micron"
- depends on MTD_NAND && SPI
- help
- This enables support for accessing Micron SPI NAND flash
- devices.
- If you have Micron SPI NAND chip say yes.
-
- If unsure, say no here.
-
-config MTD_SPINAND_ONDIEECC
- bool "Use SPINAND internal ECC"
- depends on MTD_SPINAND_MT29F
- help
- Internal ECC.
- Enables Hardware ECC support for Micron SPI NAND.
diff --git a/drivers/staging/mt29f_spinand/Makefile b/drivers/staging/mt29f_spinand/Makefile
deleted file mode 100644
index e47af0f7fda9..000000000000
--- a/drivers/staging/mt29f_spinand/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand.o
diff --git a/drivers/staging/mt29f_spinand/TODO b/drivers/staging/mt29f_spinand/TODO
deleted file mode 100644
index a2209b72d371..000000000000
--- a/drivers/staging/mt29f_spinand/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO:
- - Tested on XLP platform, needs to be tested on other platforms.
- - Checkpatch.pl cleanups
- - Sparce fixes.
- - Clean up coding style to meet kernel standard.
-
-Please send patches
-To:
-Kamlakant Patel <kamlakant.patel@broadcom.com>
-Cc:
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Mona Anonuevo <manonuevo@micron.com>
-linux-mtd@lists.infradead.org
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
deleted file mode 100644
index def8a1f57d1c..000000000000
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
+++ /dev/null
@@ -1,980 +0,0 @@
-/*
- * Copyright (c) 2003-2013 Broadcom Corporation
- *
- * Copyright (c) 2009-2010 Micron Technology, Inc.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/spi/spi.h>
-
-#include "mt29f_spinand.h"
-
-#define BUFSIZE (10 * 64 * 2048)
-#define CACHE_BUF 2112
-/*
- * OOB area specification layout: Total 32 available free bytes.
- */
-
-static inline struct spinand_state *mtd_to_state(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct spinand_info *info = nand_get_controller_data(chip);
- struct spinand_state *state = info->priv;
-
- return state;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int enable_hw_ecc;
-static int enable_read_hw_ecc;
-
-static int spinand_ooblayout_64_ecc(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section > 3)
- return -ERANGE;
-
- oobregion->offset = (section * 16) + 1;
- oobregion->length = 6;
-
- return 0;
-}
-
-static int spinand_ooblayout_64_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
-{
- if (section > 3)
- return -ERANGE;
-
- oobregion->offset = (section * 16) + 8;
- oobregion->length = 8;
-
- return 0;
-}
-
-static const struct mtd_ooblayout_ops spinand_oob_64_ops = {
- .ecc = spinand_ooblayout_64_ecc,
- .free = spinand_ooblayout_64_free,
-};
-#endif
-
-/**
- * spinand_cmd - process a command to send to the SPI Nand
- * Description:
- * Set up the command buffer to send to the SPI controller.
- * The command buffer has to initialized to 0.
- */
-
-static int spinand_cmd(struct spi_device *spi, struct spinand_cmd *cmd)
-{
- struct spi_message message;
- struct spi_transfer x[4];
- u8 dummy = 0xff;
-
- spi_message_init(&message);
- memset(x, 0, sizeof(x));
-
- x[0].len = 1;
- x[0].tx_buf = &cmd->cmd;
- spi_message_add_tail(&x[0], &message);
-
- if (cmd->n_addr) {
- x[1].len = cmd->n_addr;
- x[1].tx_buf = cmd->addr;
- spi_message_add_tail(&x[1], &message);
- }
-
- if (cmd->n_dummy) {
- x[2].len = cmd->n_dummy;
- x[2].tx_buf = &dummy;
- spi_message_add_tail(&x[2], &message);
- }
-
- if (cmd->n_tx) {
- x[3].len = cmd->n_tx;
- x[3].tx_buf = cmd->tx_buf;
- spi_message_add_tail(&x[3], &message);
- }
-
- if (cmd->n_rx) {
- x[3].len = cmd->n_rx;
- x[3].rx_buf = cmd->rx_buf;
- spi_message_add_tail(&x[3], &message);
- }
-
- return spi_sync(spi, &message);
-}
-
-/**
- * spinand_read_id - Read SPI Nand ID
- * Description:
- * read two ID bytes from the SPI Nand device
- */
-static int spinand_read_id(struct spi_device *spi_nand, u8 *id)
-{
- int retval;
- u8 nand_id[3];
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_READ_ID;
- cmd.n_rx = 3;
- cmd.rx_buf = &nand_id[0];
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "error %d reading id\n", retval);
- return retval;
- }
- id[0] = nand_id[1];
- id[1] = nand_id[2];
- return retval;
-}
-
-/**
- * spinand_read_status - send command 0xf to the SPI Nand status register
- * Description:
- * After read, write, or erase, the Nand device is expected to set the
- * busy status.
- * This function is to allow reading the status of the command: read,
- * write, and erase.
- * Once the status turns to be ready, the other status bits also are
- * valid status bits.
- */
-static int spinand_read_status(struct spi_device *spi_nand, u8 *status)
-{
- struct spinand_cmd cmd = {0};
- int ret;
-
- cmd.cmd = CMD_READ_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_STATUS;
- cmd.n_rx = 1;
- cmd.rx_buf = status;
-
- ret = spinand_cmd(spi_nand, &cmd);
- if (ret < 0)
- dev_err(&spi_nand->dev, "err: %d read status register\n", ret);
-
- return ret;
-}
-
-#define MAX_WAIT_JIFFIES (40 * HZ)
-static int wait_till_ready(struct spi_device *spi_nand)
-{
- unsigned long deadline;
- int retval;
- u8 stat = 0;
-
- deadline = jiffies + MAX_WAIT_JIFFIES;
- do {
- retval = spinand_read_status(spi_nand, &stat);
- if (retval < 0)
- return -1;
- if (!(stat & 0x1))
- break;
-
- cond_resched();
- } while (!time_after_eq(jiffies, deadline));
-
- if ((stat & 0x1) == 0)
- return 0;
-
- return -1;
-}
-
-/**
- * spinand_get_otp - send command 0xf to read the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_get_otp(struct spi_device *spi_nand, u8 *otp)
-{
- struct spinand_cmd cmd = {0};
- int retval;
-
- cmd.cmd = CMD_READ_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_OTP;
- cmd.n_rx = 1;
- cmd.rx_buf = otp;
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0)
- dev_err(&spi_nand->dev, "error %d get otp\n", retval);
- return retval;
-}
-
-/**
- * spinand_set_otp - send command 0x1f to write the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_set_otp(struct spi_device *spi_nand, u8 *otp)
-{
- int retval;
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_WRITE_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_OTP;
- cmd.n_tx = 1;
- cmd.tx_buf = otp;
-
- retval = spinand_cmd(spi_nand, &cmd);
- if (retval < 0)
- dev_err(&spi_nand->dev, "error %d set otp\n", retval);
-
- return retval;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-/**
- * spinand_enable_ecc - send command 0x1f to write the SPI Nand OTP register
- * Description:
- * There is one bit( bit 0x10 ) to set or to clear the internal ECC.
- * Enable chip internal ECC, set the bit to 1
- * Disable chip internal ECC, clear the bit to 0
- */
-static int spinand_enable_ecc(struct spi_device *spi_nand)
-{
- int retval;
- u8 otp = 0;
-
- retval = spinand_get_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
-
- if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK)
- return 0;
- otp |= OTP_ECC_MASK;
- retval = spinand_set_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
- return spinand_get_otp(spi_nand, &otp);
-}
-#endif
-
-static int spinand_disable_ecc(struct spi_device *spi_nand)
-{
- int retval;
- u8 otp = 0;
-
- retval = spinand_get_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
-
- if ((otp & OTP_ECC_MASK) == OTP_ECC_MASK) {
- otp &= ~OTP_ECC_MASK;
- retval = spinand_set_otp(spi_nand, &otp);
- if (retval < 0)
- return retval;
- return spinand_get_otp(spi_nand, &otp);
- }
- return 0;
-}
-
-/**
- * spinand_write_enable - send command 0x06 to enable write or erase the
- * Nand cells
- * Description:
- * Before write and erase the Nand cells, the write enable has to be set.
- * After the write or erase, the write enable bit is automatically
- * cleared (status register bit 2)
- * Set the bit 2 of the status register has the same effect
- */
-static int spinand_write_enable(struct spi_device *spi_nand)
-{
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_WR_ENABLE;
- return spinand_cmd(spi_nand, &cmd);
-}
-
-static int spinand_read_page_to_cache(struct spi_device *spi_nand, u16 page_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = page_id;
- cmd.cmd = CMD_READ;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((row & 0xff0000) >> 16);
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_read_from_cache - send command 0x03 to read out the data from the
- * cache register (2112 bytes max)
- * Description:
- * The read can specify 1 to 2112 bytes of data read at the corresponding
- * locations.
- * No tRd delay.
- */
-static int spinand_read_from_cache(struct spi_device *spi_nand, u16 page_id,
- u16 byte_id, u16 len, u8 *rbuf)
-{
- struct spinand_cmd cmd = {0};
- u16 column;
-
- column = byte_id;
- cmd.cmd = CMD_READ_RDM;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((column & 0xff00) >> 8);
- cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
- cmd.addr[1] = (u8)(column & 0x00ff);
- cmd.addr[2] = (u8)(0xff);
- cmd.n_dummy = 0;
- cmd.n_rx = len;
- cmd.rx_buf = rbuf;
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_read_page - read a page
- * @page_id: the physical page number
- * @offset: the location from 0 to 2111
- * @len: number of bytes to read
- * @rbuf: read buffer to hold @len bytes
- *
- * Description:
- * The read includes two commands to the Nand - 0x13 and 0x03 commands
- * Poll to read status to wait for tRD time.
- */
-static int spinand_read_page(struct spi_device *spi_nand, u16 page_id,
- u16 offset, u16 len, u8 *rbuf)
-{
- int ret;
- u8 status = 0;
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_read_hw_ecc) {
- if (spinand_enable_ecc(spi_nand) < 0)
- dev_err(&spi_nand->dev, "enable HW ECC failed!");
- }
-#endif
- ret = spinand_read_page_to_cache(spi_nand, page_id);
- if (ret < 0)
- return ret;
-
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "WAIT timedout!!!\n");
-
- while (1) {
- ret = spinand_read_status(spi_nand, &status);
- if (ret < 0) {
- dev_err(&spi_nand->dev,
- "err %d read status register\n", ret);
- return ret;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
- dev_err(&spi_nand->dev, "ecc error, page=%d\n",
- page_id);
- return 0;
- }
- break;
- }
- }
-
- ret = spinand_read_from_cache(spi_nand, page_id, offset, len, rbuf);
- if (ret < 0) {
- dev_err(&spi_nand->dev, "read from cache failed!!\n");
- return ret;
- }
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_read_hw_ecc) {
- ret = spinand_disable_ecc(spi_nand);
- if (ret < 0) {
- dev_err(&spi_nand->dev, "disable ecc failed!!\n");
- return ret;
- }
- enable_read_hw_ecc = 0;
- }
-#endif
- return ret;
-}
-
-/**
- * spinand_program_data_to_cache - write a page to cache
- * @byte_id: the location to write to the cache
- * @len: number of bytes to write
- * @wbuf: write buffer holding @len bytes
- *
- * Description:
- * The write command used here is 0x84--indicating that the cache is
- * not cleared first.
- * Since it is writing the data to cache, there is no tPROG time.
- */
-static int spinand_program_data_to_cache(struct spi_device *spi_nand,
- u16 page_id, u16 byte_id,
- u16 len, u8 *wbuf)
-{
- struct spinand_cmd cmd = {0};
- u16 column;
-
- column = byte_id;
- cmd.cmd = CMD_PROG_PAGE_CLRCACHE;
- cmd.n_addr = 2;
- cmd.addr[0] = (u8)((column & 0xff00) >> 8);
- cmd.addr[0] |= (u8)(((page_id >> 6) & 0x1) << 4);
- cmd.addr[1] = (u8)(column & 0x00ff);
- cmd.n_tx = len;
- cmd.tx_buf = wbuf;
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_program_execute - write a page from cache to the Nand array
- * @page_id: the physical page location to write the page.
- *
- * Description:
- * The write command used here is 0x10--indicating the cache is writing to
- * the Nand array.
- * Need to wait for tPROG time to finish the transaction.
- */
-static int spinand_program_execute(struct spi_device *spi_nand, u16 page_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = page_id;
- cmd.cmd = CMD_PROG_PAGE_EXC;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((row & 0xff0000) >> 16);
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_program_page - write a page
- * @page_id: the physical page location to write the page.
- * @offset: the location from the cache starting from 0 to 2111
- * @len: the number of bytes to write
- * @buf: the buffer holding @len bytes
- *
- * Description:
- * The commands used here are 0x06, 0x84, and 0x10--indicating that
- * the write enable is first sent, the write cache command, and the
- * write execute command.
- * Poll to wait for the tPROG time to finish the transaction.
- */
-static int spinand_program_page(struct spi_device *spi_nand,
- u16 page_id, u16 offset, u16 len, u8 *buf)
-{
- int retval;
- u8 status = 0;
- u8 *wbuf;
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- unsigned int i, j;
-
- wbuf = devm_kzalloc(&spi_nand->dev, CACHE_BUF, GFP_KERNEL);
- if (!wbuf)
- return -ENOMEM;
-
- enable_read_hw_ecc = 1;
- retval = spinand_read_page(spi_nand, page_id, 0, CACHE_BUF, wbuf);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "ecc error on read page!!!\n");
- return retval;
- }
-
- for (i = offset, j = 0; i < len; i++, j++)
- wbuf[i] &= buf[j];
-
- if (enable_hw_ecc) {
- retval = spinand_enable_ecc(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "enable ecc failed!!\n");
- return retval;
- }
- }
-#else
- wbuf = buf;
-#endif
- retval = spinand_write_enable(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "write enable failed!!\n");
- return retval;
- }
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!!!\n");
-
- retval = spinand_program_data_to_cache(spi_nand, page_id,
- offset, len, wbuf);
- if (retval < 0)
- return retval;
- retval = spinand_program_execute(spi_nand, page_id);
- if (retval < 0)
- return retval;
- while (1) {
- retval = spinand_read_status(spi_nand, &status);
- if (retval < 0) {
- dev_err(&spi_nand->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_P_FAIL_MASK) == STATUS_P_FAIL) {
- dev_err(&spi_nand->dev,
- "program error, page %d\n", page_id);
- return -1;
- }
- break;
- }
- }
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- if (enable_hw_ecc) {
- retval = spinand_disable_ecc(spi_nand);
- if (retval < 0) {
- dev_err(&spi_nand->dev, "disable ecc failed!!\n");
- return retval;
- }
- enable_hw_ecc = 0;
- }
-#endif
-
- return 0;
-}
-
-/**
- * spinand_erase_block_erase - erase a page
- * @block_id: the physical block location to erase.
- *
- * Description:
- * The command used here is 0xd8--indicating an erase command to erase
- * one block--64 pages
- * Need to wait for tERS.
- */
-static int spinand_erase_block_erase(struct spi_device *spi_nand, u16 block_id)
-{
- struct spinand_cmd cmd = {0};
- u16 row;
-
- row = block_id;
- cmd.cmd = CMD_ERASE_BLK;
- cmd.n_addr = 3;
- cmd.addr[0] = (u8)((row & 0xff0000) >> 16);
- cmd.addr[1] = (u8)((row & 0xff00) >> 8);
- cmd.addr[2] = (u8)(row & 0x00ff);
-
- return spinand_cmd(spi_nand, &cmd);
-}
-
-/**
- * spinand_erase_block - erase a page
- * @block_id: the physical block location to erase.
- *
- * Description:
- * The commands used here are 0x06 and 0xd8--indicating an erase
- * command to erase one block--64 pages
- * It will first to enable the write enable bit (0x06 command),
- * and then send the 0xd8 erase command
- * Poll to wait for the tERS time to complete the tranaction.
- */
-static int spinand_erase_block(struct spi_device *spi_nand, u16 block_id)
-{
- int retval;
- u8 status = 0;
-
- retval = spinand_write_enable(spi_nand);
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!!!\n");
-
- retval = spinand_erase_block_erase(spi_nand, block_id);
- while (1) {
- retval = spinand_read_status(spi_nand, &status);
- if (retval < 0) {
- dev_err(&spi_nand->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_E_FAIL_MASK) == STATUS_E_FAIL) {
- dev_err(&spi_nand->dev,
- "erase error, block %d\n", block_id);
- return -1;
- }
- break;
- }
- }
- return 0;
-}
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
-static int spinand_write_page_hwecc(struct nand_chip *chip,
- const u8 *buf, int oob_required,
- int page)
-{
- const u8 *p = buf;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
-
- enable_hw_ecc = 1;
- return nand_prog_page_op(chip, page, 0, p, eccsize * eccsteps);
-}
-
-static int spinand_read_page_hwecc(struct nand_chip *chip, u8 *buf,
- int oob_required, int page)
-{
- int retval;
- u8 status;
- u8 *p = buf;
- int eccsize = chip->ecc.size;
- int eccsteps = chip->ecc.steps;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct spinand_info *info = nand_get_controller_data(chip);
-
- enable_read_hw_ecc = 1;
-
- nand_read_page_op(chip, page, 0, p, eccsize * eccsteps);
- if (oob_required)
- chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
-
- while (1) {
- retval = spinand_read_status(info->spi, &status);
- if (retval < 0) {
- dev_err(&mtd->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY) {
- if ((status & STATUS_ECC_MASK) == STATUS_ECC_ERROR) {
- pr_info("spinand: ECC error\n");
- mtd->ecc_stats.failed++;
- } else if ((status & STATUS_ECC_MASK) ==
- STATUS_ECC_1BIT_CORRECTED)
- mtd->ecc_stats.corrected++;
- break;
- }
- }
- return 0;
-}
-#endif
-
-static void spinand_select_chip(struct nand_chip *chip, int dev)
-{
-}
-
-static u8 spinand_read_byte(struct nand_chip *chip)
-{
- struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
- u8 data;
-
- data = state->buf[state->buf_ptr];
- state->buf_ptr++;
- return data;
-}
-
-static int spinand_wait(struct nand_chip *chip)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct spinand_info *info = nand_get_controller_data(chip);
-
- unsigned long timeo = jiffies;
- int retval, state = chip->state;
- u8 status;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
-
- while (time_before(jiffies, timeo)) {
- retval = spinand_read_status(info->spi, &status);
- if (retval < 0) {
- dev_err(&mtd->dev,
- "error %d reading status register\n", retval);
- return retval;
- }
-
- if ((status & STATUS_OIP_MASK) == STATUS_READY)
- return 0;
-
- cond_resched();
- }
- return 0;
-}
-
-static void spinand_write_buf(struct nand_chip *chip, const u8 *buf, int len)
-{
- struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
-
- memcpy(state->buf + state->buf_ptr, buf, len);
- state->buf_ptr += len;
-}
-
-static void spinand_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
- struct spinand_state *state = mtd_to_state(nand_to_mtd(chip));
-
- memcpy(buf, state->buf + state->buf_ptr, len);
- state->buf_ptr += len;
-}
-
-/*
- * spinand_reset- send RESET command "0xff" to the Nand device.
- */
-static void spinand_reset(struct spi_device *spi_nand)
-{
- struct spinand_cmd cmd = {0};
-
- cmd.cmd = CMD_RESET;
-
- if (spinand_cmd(spi_nand, &cmd) < 0)
- pr_info("spinand reset failed!\n");
-
- /* elapse 1ms before issuing any other command */
- usleep_range(1000, 2000);
-
- if (wait_till_ready(spi_nand))
- dev_err(&spi_nand->dev, "wait timedout!\n");
-}
-
-static void spinand_cmdfunc(struct nand_chip *chip, unsigned int command,
- int column, int page)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct spinand_info *info = nand_get_controller_data(chip);
- struct spinand_state *state = info->priv;
-
- switch (command) {
- /*
- * READ0 - read in first 0x800 bytes
- */
- case NAND_CMD_READ1:
- case NAND_CMD_READ0:
- state->buf_ptr = 0;
- spinand_read_page(info->spi, page, 0x0, 0x840, state->buf);
- break;
- /* READOOB reads only the OOB because no ECC is performed. */
- case NAND_CMD_READOOB:
- state->buf_ptr = 0;
- spinand_read_page(info->spi, page, 0x800, 0x40, state->buf);
- break;
- case NAND_CMD_RNDOUT:
- state->buf_ptr = column;
- break;
- case NAND_CMD_READID:
- state->buf_ptr = 0;
- spinand_read_id(info->spi, state->buf);
- break;
- case NAND_CMD_PARAM:
- state->buf_ptr = 0;
- break;
- /* ERASE1 stores the block and page address */
- case NAND_CMD_ERASE1:
- spinand_erase_block(info->spi, page);
- break;
- /* ERASE2 uses the block and page address from ERASE1 */
- case NAND_CMD_ERASE2:
- break;
- /* SEQIN sets up the addr buffer and all registers except the length */
- case NAND_CMD_SEQIN:
- state->col = column;
- state->row = page;
- state->buf_ptr = 0;
- break;
- /* PAGEPROG reuses all of the setup from SEQIN and adds the length */
- case NAND_CMD_PAGEPROG:
- spinand_program_page(info->spi, state->row, state->col,
- state->buf_ptr, state->buf);
- break;
- case NAND_CMD_STATUS:
- spinand_get_otp(info->spi, state->buf);
- if (!(state->buf[0] & 0x80))
- state->buf[0] = 0x80;
- state->buf_ptr = 0;
- break;
- /* RESET command */
- case NAND_CMD_RESET:
- if (wait_till_ready(info->spi))
- dev_err(&info->spi->dev, "WAIT timedout!!!\n");
- /* a minimum of 250us must elapse before issuing RESET cmd*/
- usleep_range(250, 1000);
- spinand_reset(info->spi);
- break;
- default:
- dev_err(&mtd->dev, "Unknown CMD: 0x%x\n", command);
- }
-}
-
-/**
- * spinand_lock_block - send write register 0x1f command to the Nand device
- *
- * Description:
- * After power up, all the Nand blocks are locked. This function allows
- * one to unlock the blocks, and so it can be written or erased.
- */
-static int spinand_lock_block(struct spi_device *spi_nand, u8 lock)
-{
- struct spinand_cmd cmd = {0};
- int ret;
- u8 otp = 0;
-
- ret = spinand_get_otp(spi_nand, &otp);
-
- cmd.cmd = CMD_WRITE_REG;
- cmd.n_addr = 1;
- cmd.addr[0] = REG_BLOCK_LOCK;
- cmd.n_tx = 1;
- cmd.tx_buf = &lock;
-
- ret = spinand_cmd(spi_nand, &cmd);
- if (ret < 0)
- dev_err(&spi_nand->dev, "error %d lock block\n", ret);
-
- return ret;
-}
-
-/**
- * spinand_probe - [spinand Interface]
- * @spi_nand: registered device driver.
- *
- * Description:
- * Set up the device driver parameters to make the device available.
- */
-static int spinand_probe(struct spi_device *spi_nand)
-{
- struct mtd_info *mtd;
- struct nand_chip *chip;
- struct spinand_info *info;
- struct spinand_state *state;
-
- info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->spi = spi_nand;
-
- spinand_lock_block(spi_nand, BL_ALL_UNLOCKED);
-
- state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state),
- GFP_KERNEL);
- if (!state)
- return -ENOMEM;
-
- info->priv = state;
- state->buf_ptr = 0;
- state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL);
- if (!state->buf)
- return -ENOMEM;
-
- chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- chip->ecc.mode = NAND_ECC_HW;
- chip->ecc.size = 0x200;
- chip->ecc.bytes = 0x6;
- chip->ecc.steps = 0x4;
-
- chip->ecc.strength = 1;
- chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
- chip->ecc.read_page = spinand_read_page_hwecc;
- chip->ecc.write_page = spinand_write_page_hwecc;
-#else
- chip->ecc.mode = NAND_ECC_SOFT;
- chip->ecc.algo = NAND_ECC_HAMMING;
- if (spinand_disable_ecc(spi_nand) < 0)
- dev_info(&spi_nand->dev, "%s: disable ecc failed!\n",
- __func__);
-#endif
-
- nand_set_flash_node(chip, spi_nand->dev.of_node);
- nand_set_controller_data(chip, info);
- chip->legacy.read_buf = spinand_read_buf;
- chip->legacy.write_buf = spinand_write_buf;
- chip->legacy.read_byte = spinand_read_byte;
- chip->legacy.cmdfunc = spinand_cmdfunc;
- chip->legacy.waitfunc = spinand_wait;
- chip->options |= NAND_CACHEPRG;
- chip->select_chip = spinand_select_chip;
- chip->legacy.set_features = nand_get_set_features_notsupp;
- chip->legacy.get_features = nand_get_set_features_notsupp;
-
- mtd = nand_to_mtd(chip);
-
- dev_set_drvdata(&spi_nand->dev, mtd);
-
- mtd->dev.parent = &spi_nand->dev;
- mtd->oobsize = 64;
-#ifdef CONFIG_MTD_SPINAND_ONDIEECC
- mtd_set_ooblayout(mtd, &spinand_oob_64_ops);
-#endif
-
- if (nand_scan(chip, 1))
- return -ENXIO;
-
- return mtd_device_register(mtd, NULL, 0);
-}
-
-/**
- * spinand_remove - remove the device driver
- * @spi: the spi device.
- *
- * Description:
- * Remove the device driver parameters and free up allocated memories.
- */
-static int spinand_remove(struct spi_device *spi)
-{
- mtd_device_unregister(dev_get_drvdata(&spi->dev));
-
- return 0;
-}
-
-static const struct of_device_id spinand_dt[] = {
- { .compatible = "spinand,mt29f", },
- {}
-};
-MODULE_DEVICE_TABLE(of, spinand_dt);
-
-/*
- * Device name structure description
- */
-static struct spi_driver spinand_driver = {
- .driver = {
- .name = "mt29f",
- .of_match_table = spinand_dt,
- },
- .probe = spinand_probe,
- .remove = spinand_remove,
-};
-
-module_spi_driver(spinand_driver);
-
-MODULE_DESCRIPTION("SPI NAND driver for Micron");
-MODULE_AUTHOR("Henry Pan <hspan@micron.com>, Kamlakant Patel <kamlakant.patel@broadcom.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.h b/drivers/staging/mt29f_spinand/mt29f_spinand.h
deleted file mode 100644
index 457dc7ffdaf1..000000000000
--- a/drivers/staging/mt29f_spinand/mt29f_spinand.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*-
- * Copyright 2013 Broadcom Corporation
- *
- * Copyright (c) 2009-2010 Micron Technology, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Henry Pan <hspan@micron.com>
- *
- * based on nand.h
- */
-#ifndef __LINUX_MTD_SPI_NAND_H
-#define __LINUX_MTD_SPI_NAND_H
-
-#include <linux/wait.h>
-#include <linux/spinlock.h>
-#include <linux/mtd/mtd.h>
-
-/* cmd */
-#define CMD_READ 0x13
-#define CMD_READ_RDM 0x03
-#define CMD_PROG_PAGE_CLRCACHE 0x02
-#define CMD_PROG_PAGE 0x84
-#define CMD_PROG_PAGE_EXC 0x10
-#define CMD_ERASE_BLK 0xd8
-#define CMD_WR_ENABLE 0x06
-#define CMD_WR_DISABLE 0x04
-#define CMD_READ_ID 0x9f
-#define CMD_RESET 0xff
-#define CMD_READ_REG 0x0f
-#define CMD_WRITE_REG 0x1f
-
-/* feature/ status reg */
-#define REG_BLOCK_LOCK 0xa0
-#define REG_OTP 0xb0
-#define REG_STATUS 0xc0/* timing */
-
-/* status */
-#define STATUS_OIP_MASK 0x01
-#define STATUS_READY 0
-#define STATUS_BUSY BIT(0)
-
-#define STATUS_E_FAIL_MASK 0x04
-#define STATUS_E_FAIL BIT(2)
-
-#define STATUS_P_FAIL_MASK 0x08
-#define STATUS_P_FAIL BIT(3)
-
-#define STATUS_ECC_MASK 0x30
-#define STATUS_ECC_1BIT_CORRECTED BIT(4)
-#define STATUS_ECC_ERROR BIT(5)
-#define STATUS_ECC_RESERVED (BIT(5) | BIT(4))
-
-/*ECC enable defines*/
-#define OTP_ECC_MASK 0x10
-#define OTP_ECC_OFF 0
-#define OTP_ECC_ON 1
-
-#define ECC_DISABLED
-#define ECC_IN_NAND
-#define ECC_SOFT
-
-/* block lock */
-#define BL_ALL_LOCKED 0x38
-#define BL_1_2_LOCKED 0x30
-#define BL_1_4_LOCKED 0x28
-#define BL_1_8_LOCKED 0x20
-#define BL_1_16_LOCKED 0x18
-#define BL_1_32_LOCKED 0x10
-#define BL_1_64_LOCKED 0x08
-#define BL_ALL_UNLOCKED 0
-
-struct spinand_info {
- struct spi_device *spi;
- void *priv;
-};
-
-struct spinand_state {
- u32 col;
- u32 row;
- int buf_ptr;
- u8 *buf;
-};
-
-struct spinand_cmd {
- u8 cmd;
- u32 n_addr; /* Number of address */
- u8 addr[3]; /* Reg Offset */
- u32 n_dummy; /* Dummy use */
- u32 n_tx; /* Number of tx bytes */
- u8 *tx_buf; /* Tx buf */
- u32 n_rx; /* Number of rx bytes */
- u8 *rx_buf; /* Rx buf */
-};
-
-int spinand_mtd(struct mtd_info *mtd);
-void spinand_mtd_release(struct mtd_info *mtd);
-
-#endif /* __LINUX_MTD_SPI_NAND_H */
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index df6ebf41bdea..5831f816c17b 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -335,6 +335,8 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
/* tx desc */
src = sg->src_addr;
for (i = 0; i < chan->desc->num_sgs; i++) {
+ tx_desc = &chan->tx_ring[chan->tx_idx];
+
if (len > HSDMA_MAX_PLEN)
tlen = HSDMA_MAX_PLEN;
else
@@ -344,7 +346,6 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
tx_desc->addr1 = src;
tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
} else {
- tx_desc = &chan->tx_ring[chan->tx_idx];
tx_desc->addr0 = src;
tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
index b8566ed898f1..aa98fbb17013 100644
--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
+++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
@@ -82,7 +82,7 @@ static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
struct property *prop;
const char *function_name, *group_name;
int ret;
- int ngroups;
+ int ngroups = 0;
unsigned int reserved_maps = 0;
for_each_node_with_property(np_config, "group")
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index df3441b815bb..317c9720467c 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -359,8 +359,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
dst_release(skb_dst(skb));
skb_dst_set(skb, NULL);
#ifdef CONFIG_XFRM
- secpath_put(skb->sp);
- skb->sp = NULL;
+ secpath_reset(skb);
#endif
nf_reset(skb);
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index bc45cf098b04..91871503364d 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -67,7 +67,7 @@ static void *rtllib_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ priv->tfm = (void *)crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(priv->tfm)) {
pr_debug("Could not allocate crypto API aes\n");
priv->tfm = NULL;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
index 041f1b123888..3534ddb900d1 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_ccmp.c
@@ -71,7 +71,7 @@ static void *ieee80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ priv->tfm = (void *)crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(priv->tfm)) {
pr_debug("ieee80211_crypt_ccmp: could not allocate crypto API aes\n");
priv->tfm = NULL;
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index 9d156efbc9ed..4d473f008aa4 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -146,7 +146,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs=");
len = sec_ie[1] + 2;
- len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
for (i = 0; i < len; i++)
p += sprintf(p, "%02x", sec_ie[i]);
p += sprintf(p, ")");
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index a7374006a9fb..986a1d526918 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -1346,7 +1346,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
u8 *out_ie, uint in_len)
{
u8 authmode = 0, match;
- u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
+ u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255];
u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
uint ielength, cnt, remove_cnt;
int iEntry;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 69c7abc0e3a5..8445d516c93d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -1565,7 +1565,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
if (pstat->aid > 0) {
DBG_871X(" old AID %d\n", pstat->aid);
} else {
- for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
+ for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
break;
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 85077947b9b8..85aba8a503cd 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -109,12 +109,12 @@ static void update_recvframe_phyinfo(union recv_frame *precvframe,
rx_bssid = get_hdr_bssid(wlanhdr);
pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) &&
!pattrib->icv_err && !pattrib->crc_err &&
- !ether_addr_equal(rx_bssid, my_bssid));
+ ether_addr_equal(rx_bssid, my_bssid));
rx_ra = get_ra(wlanhdr);
my_hwaddr = myid(&padapter->eeprompriv);
pkt_info.to_self = pkt_info.bssid_match &&
- !ether_addr_equal(rx_ra, my_hwaddr);
+ ether_addr_equal(rx_ra, my_hwaddr);
pkt_info.is_beacon = pkt_info.bssid_match &&
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index af2234798fa8..db553f2e4c0b 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1277,7 +1277,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
sinfo->tx_packets = psta->sta_stats.tx_pkts;
-
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
/* for Ad-Hoc/AP mode */
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 28bfdbdc6e76..b8631baf128d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
exit:
kfree(ptmp);
- return 0;
+ return ret;
}
static int rtw_wx_write32(struct net_device *dev,
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 3647b8f1ed28..5eeb4b93b45b 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -2095,7 +2095,7 @@ static int visornic_resume(struct visor_device *dev,
mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
rtnl_lock();
- dev_open(netdev);
+ dev_open(netdev, NULL);
rtnl_unlock();
complete_func(dev, 0);
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index 257030460fb6..d3e23dd70c1b 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -279,7 +279,6 @@ static struct drm_driver driver = {
.gem_free_object_unlocked = vbox_gem_free_object,
.dumb_create = vbox_dumb_create,
.dumb_map_offset = vbox_dumb_mmap_offset,
- .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h
index 73395a7536c5..fa933d422951 100644
--- a/drivers/staging/vboxvideo/vbox_drv.h
+++ b/drivers/staging/vboxvideo/vbox_drv.h
@@ -99,8 +99,6 @@ struct vbox_private {
int fb_mtrr;
struct {
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c
index 5ecfa7629173..b36ec019c332 100644
--- a/drivers/staging/vboxvideo/vbox_ttm.c
+++ b/drivers/staging/vboxvideo/vbox_ttm.c
@@ -35,61 +35,6 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct vbox_private, ttm.bdev);
}
-static int vbox_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void vbox_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
-
-/**
- * Adds the vbox memory manager object/structures to the global memory manager.
- */
-static int vbox_ttm_global_init(struct vbox_private *vbox)
-{
- struct drm_global_reference *global_ref;
- int ret;
-
- global_ref = &vbox->ttm.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &vbox_ttm_mem_global_init;
- global_ref->release = &vbox_ttm_mem_global_release;
- ret = drm_global_item_ref(global_ref);
- if (ret) {
- DRM_ERROR("Failed setting up TTM memory subsystem.\n");
- return ret;
- }
-
- vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object;
- global_ref = &vbox->ttm.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
-
- ret = drm_global_item_ref(global_ref);
- if (ret) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- drm_global_item_unref(&vbox->ttm.mem_global_ref);
- return ret;
- }
-
- return 0;
-}
-
-/**
- * Removes the vbox memory manager object from the global memory manager.
- */
-static void vbox_ttm_global_release(struct vbox_private *vbox)
-{
- drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
- drm_global_item_unref(&vbox->ttm.mem_global_ref);
-}
-
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct vbox_bo *bo;
@@ -227,18 +172,13 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
- ret = vbox_ttm_global_init(vbox);
- if (ret)
- return ret;
-
ret = ttm_bo_device_init(&vbox->ttm.bdev,
- vbox->ttm.bo_global_ref.ref.object,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
- goto err_ttm_global_release;
+ return ret;
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@@ -260,8 +200,6 @@ int vbox_mm_init(struct vbox_private *vbox)
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
-err_ttm_global_release:
- vbox_ttm_global_release(vbox);
return ret;
}
@@ -275,7 +213,6 @@ void vbox_mm_fini(struct vbox_private *vbox)
arch_phys_wc_del(vbox->fb_mtrr);
#endif
ttm_bo_device_release(&vbox->ttm.bdev);
- vbox_ttm_global_release(vbox);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index ea789376de0f..45de21c210c1 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1795,6 +1795,7 @@ vchiq_compat_ioctl_await_completion(struct file *file,
struct vchiq_await_completion32 args32;
struct vchiq_completion_data32 completion32;
unsigned int *msgbufcount32;
+ unsigned int msgbufcount_native;
compat_uptr_t msgbuf32;
void *msgbuf;
void **msgbufptr;
@@ -1906,7 +1907,11 @@ vchiq_compat_ioctl_await_completion(struct file *file,
sizeof(completion32)))
return -EFAULT;
- args32.msgbufcount--;
+ if (get_user(msgbufcount_native, &args->msgbufcount))
+ return -EFAULT;
+
+ if (!msgbufcount_native)
+ args32.msgbufcount--;
msgbufcount32 =
&((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 71888b979ab5..dab09b610723 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -641,8 +641,11 @@ static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
{
+ struct cxgbit_sock *csk = handle;
+
pr_debug("%s cxgbit_device %p\n", __func__, handle);
kfree_skb(skb);
+ cxgbit_put_csk(csk);
}
static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
@@ -932,8 +935,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
goto out;
csk->mtu = ndev->mtu;
csk->tx_chan = cxgb4_port_chan(ndev);
- csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
- cxgb4_port_viid(ndev));
+ csk->smac_idx =
+ ((struct port_info *)netdev_priv(ndev))->smt_idx;
step = cdev->lldi.ntxq /
cdev->lldi.nchan;
csk->txq_idx = cxgb4_port_idx(ndev) * step;
@@ -968,8 +971,8 @@ cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
port_id = cxgb4_port_idx(ndev);
csk->mtu = dst_mtu(dst);
csk->tx_chan = cxgb4_port_chan(ndev);
- csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
- cxgb4_port_viid(ndev));
+ csk->smac_idx =
+ ((struct port_info *)netdev_priv(ndev))->smt_idx;
step = cdev->lldi.ntxq /
cdev->lldi.nports;
csk->txq_idx = (port_id * step) +
@@ -1206,7 +1209,7 @@ cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
rpl5->opt0 = cpu_to_be64(opt0);
rpl5->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
- t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
+ t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index f3f8856bfb68..c011c826fc26 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -58,6 +58,7 @@ static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
return ERR_PTR(-ENOMEM);
kref_init(&cdev->kref);
+ spin_lock_init(&cdev->np_lock);
cdev->lldi = *lldi;
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 36b742932c72..86987da86dd6 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -150,24 +150,26 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd)
static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
{
int tag = -1;
- DEFINE_WAIT(wait);
+ DEFINE_SBQ_WAIT(wait);
struct sbq_wait_state *ws;
+ struct sbitmap_queue *sbq;
if (state == TASK_RUNNING)
return tag;
- ws = &se_sess->sess_tag_pool.ws[0];
+ sbq = &se_sess->sess_tag_pool;
+ ws = &sbq->ws[0];
for (;;) {
- prepare_to_wait_exclusive(&ws->wait, &wait, state);
+ sbitmap_prepare_to_wait(sbq, ws, &wait, state);
if (signal_pending_state(state, current))
break;
- tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup);
+ tag = sbitmap_queue_get(sbq, cpup);
if (tag >= 0)
break;
schedule();
}
- finish_wait(&ws->wait, &wait);
+ sbitmap_finish_wait(sbq, ws, &wait);
return tag;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 47d76c862014..c062d363dce3 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1094,7 +1094,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
break;
}
- __blk_put_request(req->q, req);
+ blk_put_request(req);
kfree(pt);
}
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 92f67d40f2e9..d7105d01859a 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -357,7 +357,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
int ret;
/* Valid check */
- if (armada_is_valid(priv)) {
+ if (!armada_is_valid(priv)) {
dev_err(priv->dev,
"Temperature sensor reading not valid\n");
return -EIO;
@@ -395,7 +395,7 @@ unlock_mutex:
return ret;
}
-static struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops of_ops = {
.get_temp = armada_get_temp,
};
@@ -526,23 +526,21 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
/* First memory region points towards the status register */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EIO;
-
- /*
- * Edit the resource start address and length to map over all the
- * registers, instead of pointing at them one by one.
- */
- res->start -= data->syscon_status_off;
- res->end = res->start + max(data->syscon_status_off,
- max(data->syscon_control0_off,
- data->syscon_control1_off)) +
- sizeof(unsigned int) - 1;
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
+ /*
+ * Fix up from the old individual DT register specification to
+ * cover all the registers. We do this by adjusting the ioremap()
+ * result, which should be fine as ioremap() deals with pages.
+ * However, validate that we do not cross a page boundary while
+ * making this adjustment.
+ */
+ if (((unsigned long)base & ~PAGE_MASK) < data->syscon_status_off)
+ return -EINVAL;
+ base -= data->syscon_status_off;
+
priv->syscon = devm_regmap_init_mmio(&pdev->dev, base,
&armada_thermal_regmap_config);
if (IS_ERR(priv->syscon))
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 23ad4f9f2143..b9d90f0ed504 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Driver for Broadcom BCM2835 SoC temperature sensor
*
* Copyright (C) 2016 Martin Sperl
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 1919f91fa756..e8b1570cc388 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -299,7 +299,7 @@ static int brcmstb_set_trips(void *data, int low, int high)
return 0;
}
-static struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops of_ops = {
.get_temp = brcmstb_get_temp,
.set_trips = brcmstb_set_trips,
};
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index c4111a98f1a7..2d26ae80e202 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -424,7 +424,7 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- data->nr_sensors = 2;
+ data->nr_sensors = 1;
data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
data->nr_sensors, GFP_KERNEL);
@@ -589,7 +589,7 @@ static int hisi_thermal_probe(struct platform_device *pdev)
return ret;
}
- ret = platform_get_irq_byname(pdev, sensor->irq_name);
+ ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index 47623da0f91b..bbd73c5a4a4e 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -241,8 +241,8 @@ static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
sensor->t0 = TS1_T0_VAL1;
/* Retrieve fmt0 and put it on Hz */
- sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
- & TS1_FMT0_MASK;
+ sensor->fmt0 = ADJUST * (readl_relaxed(sensor->base +
+ DTS_T0VALR1_OFFSET) & TS1_FMT0_MASK);
/* Retrieve ramp coefficient */
sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
@@ -532,6 +532,10 @@ static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
if (ret)
return ret;
+ ret = stm_thermal_read_factory_settings(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
ret = stm_thermal_calibration(sensor);
if (ret)
goto thermal_unprepare;
@@ -636,10 +640,6 @@ static int stm_thermal_probe(struct platform_device *pdev)
/* Populate sensor */
sensor->base = base;
- ret = stm_thermal_read_factory_settings(sensor);
- if (ret)
- return ret;
-
sensor->clk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(sensor->clk)) {
dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 52ff854f0d6c..cd96994dc094 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -863,6 +863,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
+static void nvm_authenticate_start(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ /*
+ * During host router NVM upgrade we should not allow root port to
+ * go into D3cold because some root ports cannot trigger PME
+ * itself. To be on the safe side keep the root port in D0 during
+ * the whole upgrade process.
+ */
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete(struct tb_switch *sw)
+{
+ struct pci_dev *root_port;
+
+ root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+ if (root_port)
+ pm_runtime_put(&root_port->dev);
+}
+
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -912,10 +936,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
sw->nvm->authenticating = true;
- if (!tb_route(sw))
+ if (!tb_route(sw)) {
+ /*
+ * Keep root port from suspending as long as the
+ * NVM upgrade process is running.
+ */
+ nvm_authenticate_start(sw);
ret = nvm_authenticate_host(sw);
- else
+ if (ret)
+ nvm_authenticate_complete(sw);
+ } else {
ret = nvm_authenticate_device(sw);
+ }
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
}
@@ -1334,6 +1366,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
if (ret <= 0)
return ret;
+ /* Now we can allow root port to suspend again */
+ if (!tb_route(sw))
+ nvm_authenticate_complete(sw);
+
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
tb_switch_set_uuid(sw);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index dd5e1cede2b5..c3f933d10295 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -213,17 +213,17 @@ static int mtk8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- err = mtk8250_runtime_resume(&pdev->dev);
- if (err)
- return err;
- }
+ err = mtk8250_runtime_resume(&pdev->dev);
+ if (err)
+ return err;
data->line = serial8250_register_8250_port(&uart);
if (data->line < 0)
return data->line;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
}
@@ -234,13 +234,11 @@ static int mtk8250_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
serial8250_unregister_port(data->line);
+ mtk8250_runtime_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- mtk8250_runtime_suspend(&pdev->dev);
-
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index f776b3eafb96..3f779d25ec0c 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -552,30 +552,11 @@ static unsigned int serial_icr_read(struct uart_8250_port *up, int offset)
*/
static void serial8250_clear_fifos(struct uart_8250_port *p)
{
- unsigned char fcr;
- unsigned char clr_mask = UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT;
-
if (p->capabilities & UART_CAP_FIFO) {
- /*
- * Make sure to avoid changing FCR[7:3] and ENABLE_FIFO bits.
- * In case ENABLE_FIFO is not set, there is nothing to flush
- * so just return. Furthermore, on certain implementations of
- * the 8250 core, the FCR[7:3] bits may only be changed under
- * specific conditions and changing them if those conditions
- * are not met can have nasty side effects. One such core is
- * the 8250-omap present in TI AM335x.
- */
- fcr = serial_in(p, UART_FCR);
-
- /* FIFO is not enabled, there's nothing to clear. */
- if (!(fcr & UART_FCR_ENABLE_FIFO))
- return;
-
- fcr |= clr_mask;
- serial_out(p, UART_FCR, fcr);
-
- fcr &= ~clr_mask;
- serial_out(p, UART_FCR, fcr);
+ serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO);
+ serial_out(p, UART_FCR, UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+ serial_out(p, UART_FCR, 0);
}
}
@@ -1467,7 +1448,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
* Enable previously disabled RX interrupts.
*/
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
- serial8250_clear_fifos(p);
+ serial8250_clear_and_reinit_fifos(p);
p->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_port_out(&p->port, UART_IER, p->ier);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index baeeeaec3f03..6fb312e7af71 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -233,7 +233,7 @@ static void kgdboc_put_char(u8 chr)
static int param_set_kgdboc_var(const char *kmessage,
const struct kernel_param *kp)
{
- int len = strlen(kmessage);
+ size_t len = strlen(kmessage);
if (len >= MAX_CONFIG_LEN) {
pr_err("config string too long\n");
@@ -254,7 +254,7 @@ static int param_set_kgdboc_var(const char *kmessage,
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
- if (config[len - 1] == '\n')
+ if (len && config[len - 1] == '\n')
config[len - 1] = '\0';
if (configured == 1)
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 70a4ea4eaa6e..990376576970 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
mode = of_get_property(dp, mode_prop, NULL);
if (!mode)
mode = "9600,8,n,1,-";
+ of_node_put(dp);
}
cflag = CREAD | HUPCL | CLOCAL;
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 6cf3e9b0728f..3e77475668c0 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1394,22 +1394,43 @@ static inline struct console *SUNSU_CONSOLE(void)
static enum su_type su_get_type(struct device_node *dp)
{
struct device_node *ap = of_find_node_by_path("/aliases");
+ enum su_type rc = SU_PORT_PORT;
if (ap) {
const char *keyb = of_get_property(ap, "keyboard", NULL);
const char *ms = of_get_property(ap, "mouse", NULL);
+ struct device_node *match;
if (keyb) {
- if (dp == of_find_node_by_path(keyb))
- return SU_PORT_KBD;
+ match = of_find_node_by_path(keyb);
+
+ /*
+ * The pointer is used as an identifier not
+ * as a pointer, we can drop the refcount on
+ * the of__node immediately after getting it.
+ */
+ of_node_put(match);
+
+ if (dp == match) {
+ rc = SU_PORT_KBD;
+ goto out;
+ }
}
if (ms) {
- if (dp == of_find_node_by_path(ms))
- return SU_PORT_MS;
+ match = of_find_node_by_path(ms);
+
+ of_node_put(match);
+
+ if (dp == match) {
+ rc = SU_PORT_MS;
+ goto out;
+ }
}
}
- return SU_PORT_PORT;
+out:
+ of_node_put(ap);
+ return rc;
}
static int su_probe(struct platform_device *op)
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 50f567b6a66e..28f87fd6a28e 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -61,20 +61,19 @@ static void tty_audit_log(const char *description, dev_t dev,
unsigned char *data, size_t size)
{
struct audit_buffer *ab;
- struct task_struct *tsk = current;
- pid_t pid = task_pid_nr(tsk);
- uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
- uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
- unsigned int sessionid = audit_get_sessionid(tsk);
+ pid_t pid = task_pid_nr(current);
+ uid_t uid = from_kuid(&init_user_ns, task_uid(current));
+ uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(current));
+ unsigned int sessionid = audit_get_sessionid(current);
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY);
if (ab) {
- char name[sizeof(tsk->comm)];
+ char name[sizeof(current->comm)];
audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
" minor=%d comm=", description, pid, uid,
loginuid, sessionid, MAJOR(dev), MINOR(dev));
- get_task_comm(name, tsk);
+ get_task_comm(name, current);
audit_log_untrustedstring(ab, name);
audit_log_format(ab, " data=");
audit_log_n_hex(ab, data, size);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index ee80dfbd5442..687250ec8032 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1373,7 +1373,13 @@ err_release_lock:
return ERR_PTR(retval);
}
-static void tty_free_termios(struct tty_struct *tty)
+/**
+ * tty_save_termios() - save tty termios data in driver table
+ * @tty: tty whose termios data to save
+ *
+ * Locking: Caller guarantees serialisation with tty_init_termios().
+ */
+void tty_save_termios(struct tty_struct *tty)
{
struct ktermios *tp;
int idx = tty->index;
@@ -1392,6 +1398,7 @@ static void tty_free_termios(struct tty_struct *tty)
}
*tp = tty->termios;
}
+EXPORT_SYMBOL_GPL(tty_save_termios);
/**
* tty_flush_works - flush all works of a tty/pty pair
@@ -1491,7 +1498,7 @@ static void release_tty(struct tty_struct *tty, int idx)
WARN_ON(!mutex_is_locked(&tty_mutex));
if (tty->ops->shutdown)
tty->ops->shutdown(tty);
- tty_free_termios(tty);
+ tty_save_termios(tty);
tty_driver_remove_tty(tty->driver, tty);
tty->port->itty = NULL;
if (tty->link)
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index cb6075096a5b..044c3cbdcfa4 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -633,7 +633,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
if (tty_port_close_start(port, tty, filp) == 0)
return;
tty_port_shutdown(port, tty);
- set_bit(TTY_IO_ERROR, &tty->flags);
+ if (!port->console)
+ set_bit(TTY_IO_ERROR, &tty->flags);
tty_port_close_end(port, tty);
tty_port_tty_set(port, NULL);
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index c2493d011225..3c5169eb23f5 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -204,9 +204,11 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
if (atomic_inc_return(&pdata->refcnt) != 1)
return 0;
+ vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
+ vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+
ret = vmbus_connect_ring(dev->channel,
hv_uio_channel_cb, dev->channel);
-
if (ret == 0)
dev->channel->inbound.ring_buffer->interrupt_mask = 1;
else
@@ -334,9 +336,6 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
- vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
-
ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
if (ret)
dev_notice(&dev->device,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0f9381b69a3b..f76b2e0aba9d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2251,7 +2251,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
/* descriptor may appear anywhere in config */
err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
le16_to_cpu(udev->config[0].desc.wTotalLength),
- USB_DT_OTG, (void **) &desc);
+ USB_DT_OTG, (void **) &desc, sizeof(*desc));
if (err || !(desc->bmAttributes & USB_OTG_HNP))
return 0;
@@ -5163,7 +5163,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* Handle notifying userspace about hub over-current events */
static void port_over_current_notify(struct usb_port *port_dev)
{
- static char *envp[] = { NULL, NULL, NULL };
+ char *envp[3];
struct device *hub_dev;
char *port_dev_path;
@@ -5187,6 +5187,7 @@ static void port_over_current_notify(struct usb_port *port_dev)
if (!envp[1])
goto exit;
+ envp[2] = NULL;
kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
kfree(envp[1]);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f9ff03e6af93..514c5214ddb2 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -209,6 +209,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
+ { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -330,6 +333,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Midiman M-Audio Keystation 88es */
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* SanDisk Ultra Fit and Ultra Flair */
+ { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 79d8bd7a612e..4ebfbd737905 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -832,14 +832,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
*/
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr)
+ unsigned char type, void **ptr, size_t minsize)
{
struct usb_descriptor_header *header;
while (size >= sizeof(struct usb_descriptor_header)) {
header = (struct usb_descriptor_header *)buffer;
- if (header->bLength < 2) {
+ if (header->bLength < 2 || header->bLength > size) {
printk(KERN_ERR
"%s: bogus descriptor, type %d length %d\n",
usbcore_name,
@@ -848,7 +848,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
return -1;
}
- if (header->bDescriptorType == type) {
+ if (header->bDescriptorType == type && header->bLength >= minsize) {
*ptr = header;
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9faad896b3a1..9f92ee03dde7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1470,9 +1470,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
unsigned transfer_in_flight;
unsigned started;
- if (dep->flags & DWC3_EP_STALL)
- return 0;
-
if (dep->number > 1)
trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
else
@@ -1494,8 +1491,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
else
dep->flags |= DWC3_EP_STALL;
} else {
- if (!(dep->flags & DWC3_EP_STALL))
- return 0;
ret = dwc3_send_clear_stall_ep_cmd(dep);
if (ret)
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 1000d864929c..737bd77a575d 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -401,12 +401,12 @@ done:
static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
{
struct usb_request *req;
- struct usb_request *tmp;
unsigned long flags;
/* fill unused rxq slots with some skb */
spin_lock_irqsave(&dev->req_lock, flags);
- list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
+ while (!list_empty(&dev->rx_reqs)) {
+ req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
list_del_init(&req->list);
spin_unlock_irqrestore(&dev->req_lock, flags);
@@ -879,7 +879,7 @@ int gether_register_netdev(struct net_device *net)
sa.sa_family = net->type;
memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
rtnl_lock();
- status = dev_set_mac_address(net, &sa);
+ status = dev_set_mac_address(net, &sa, NULL);
rtnl_unlock();
if (status)
pr_warn("cannot set self ethernet address: %d\n", status);
@@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
{
struct eth_dev *dev = link->ioport;
struct usb_request *req;
- struct usb_request *tmp;
WARN_ON(!dev);
if (!dev)
@@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
*/
usb_ep_disable(link->in_ep);
spin_lock(&dev->req_lock);
- list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
+ while (!list_empty(&dev->tx_reqs)) {
+ req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
list_del(&req->list);
spin_unlock(&dev->req_lock);
@@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
usb_ep_disable(link->out_ep);
spin_lock(&dev->req_lock);
- list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
+ while (!list_empty(&dev->rx_reqs)) {
+ req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
list_del(&req->list);
spin_unlock(&dev->req_lock);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 3a16431da321..fcf13ef33b31 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
{
return machine_is_omap_innovator()
|| machine_is_omap_osk()
+ || machine_is_omap_palmte()
|| machine_is_sx1()
/* No known omap7xx boards with vbus sense */
|| cpu_is_omap7xx();
@@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
static int omap_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
- int status = -ENODEV;
+ int status;
struct omap_ep *ep;
unsigned long flags;
@@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
goto done;
}
} else {
+ status = 0;
if (can_pullup(udc))
pullup_enable(udc);
else
@@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
static void omap_udc_release(struct device *dev)
{
- complete(udc->done);
+ pullup_disable(udc);
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ usb_put_phy(udc->transceiver);
+ udc->transceiver = NULL;
+ }
+ omap_writew(0, UDC_SYSCON1);
+ remove_proc_file();
+ if (udc->dc_clk) {
+ if (udc->clk_requested)
+ omap_udc_enable_clock(0);
+ clk_put(udc->hhc_clk);
+ clk_put(udc->dc_clk);
+ }
+ if (udc->done)
+ complete(udc->done);
kfree(udc);
- udc = NULL;
}
static int
@@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
udc->gadget.speed = USB_SPEED_UNKNOWN;
udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
+ udc->gadget.quirk_ep_out_aligned_size = 1;
udc->transceiver = xceiv;
/* ep0 is special; put it right after the SETUP buffer */
@@ -2867,8 +2883,8 @@ bad_on_1710:
udc->clr_halt = UDC_RESET_EP;
/* USB general purpose IRQ: ep0, state changes, dma, etc */
- status = request_irq(pdev->resource[1].start, omap_udc_irq,
- 0, driver_name, udc);
+ status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
+ omap_udc_irq, 0, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
@@ -2876,20 +2892,20 @@ bad_on_1710:
}
/* USB "non-iso" IRQ (PIO for all but ep0) */
- status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
- 0, "omap_udc pio", udc);
+ status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
+ omap_udc_pio_irq, 0, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
- goto cleanup2;
+ goto cleanup1;
}
#ifdef USE_ISO
- status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
- 0, "omap_udc iso", udc);
+ status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
+ omap_udc_iso_irq, 0, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
- goto cleanup3;
+ goto cleanup1;
}
#endif
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@@ -2900,23 +2916,8 @@ bad_on_1710:
}
create_proc_file();
- status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
- omap_udc_release);
- if (status)
- goto cleanup4;
-
- return 0;
-
-cleanup4:
- remove_proc_file();
-
-#ifdef USE_ISO
-cleanup3:
- free_irq(pdev->resource[2].start, udc);
-#endif
-
-cleanup2:
- free_irq(pdev->resource[1].start, udc);
+ return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+ omap_udc_release);
cleanup1:
kfree(udc);
@@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
- if (!udc)
- return -ENODEV;
-
- usb_del_gadget_udc(&udc->gadget);
- if (udc->driver)
- return -EBUSY;
-
udc->done = &done;
- pullup_disable(udc);
- if (!IS_ERR_OR_NULL(udc->transceiver)) {
- usb_put_phy(udc->transceiver);
- udc->transceiver = NULL;
- }
- omap_writew(0, UDC_SYSCON1);
-
- remove_proc_file();
-
-#ifdef USE_ISO
- free_irq(pdev->resource[3].start, udc);
-#endif
- free_irq(pdev->resource[2].start, udc);
- free_irq(pdev->resource[1].start, udc);
+ usb_del_gadget_udc(&udc->gadget);
- if (udc->dc_clk) {
- if (udc->clk_requested)
- omap_udc_enable_clock(0);
- clk_put(udc->hhc_clk);
- clk_put(udc->dc_clk);
- }
+ wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
- wait_for_completion(&done);
-
return 0;
}
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 684d6f074c3a..09a8ebd95588 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
top = itr + itr_size;
result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
- USB_DT_SECURITY, (void **) &secd);
+ USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
if (result == -1) {
dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
return 0;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 94aca1b5ac8a..01b5818a4be5 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1507,7 +1507,8 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
portsc_buf[port_index] = 0;
/* Bail out if a USB3 port has a new device in link training */
- if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+ if ((hcd->speed >= HCD_USB3) &&
+ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index a9515265db4d..a9ec7051f286 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -139,6 +139,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x43bb))
xhci->quirks |= XHCI_SUSPEND_DELAY;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+ xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c928dbbff881..dae3be1b9c8f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -968,6 +968,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
unsigned int delay = XHCI_MAX_HALT_USEC;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
+ u32 res;
if (!hcd->state)
return 0;
@@ -1021,11 +1022,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
command = readl(&xhci->op_regs->command);
command |= CMD_CSS;
writel(command, &xhci->op_regs->command);
+ xhci->broken_suspend = 0;
if (xhci_handshake(&xhci->op_regs->status,
STS_SAVE, 0, 10 * 1000)) {
- xhci_warn(xhci, "WARN: xHC save state timeout\n");
- spin_unlock_irq(&xhci->lock);
- return -ETIMEDOUT;
+ /*
+ * AMD SNPS xHC 3.0 occasionally does not clear the
+ * SSS bit of USBSTS and when driver tries to poll
+ * to see if the xHC clears BIT(8) which never happens
+ * and driver assumes that controller is not responding
+ * and times out. To workaround this, its good to check
+ * if SRE and HCE bits are not set (as per xhci
+ * Section 5.4.2) and bypass the timeout.
+ */
+ res = readl(&xhci->op_regs->status);
+ if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
+ (((res & STS_SRE) == 0) &&
+ ((res & STS_HCE) == 0))) {
+ xhci->broken_suspend = 1;
+ } else {
+ xhci_warn(xhci, "WARN: xHC save state timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+ }
}
spin_unlock_irq(&xhci->lock);
@@ -1078,7 +1096,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
spin_lock_irq(&xhci->lock);
- if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
hibernated = true;
if (!hibernated) {
@@ -4496,6 +4514,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ /* Prevent U1 if service interval is shorter than U1 exit latency */
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
+ return USB3_LPM_DISABLED;
+ }
+ }
+
if (xhci->quirks & XHCI_INTEL_HOST)
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
else
@@ -4552,6 +4578,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ /* Prevent U2 if service interval is shorter than U2 exit latency */
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
+ return USB3_LPM_DISABLED;
+ }
+ }
+
if (xhci->quirks & XHCI_INTEL_HOST)
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
else
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 260b259b72bc..011dd45f8718 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1850,6 +1850,7 @@ struct xhci_hcd {
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
#define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34)
+#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35)
unsigned int num_active_eps;
unsigned int limit_active_eps;
@@ -1862,6 +1863,8 @@ struct xhci_hcd {
unsigned sw_lpm_support:1;
/* support xHCI 1.0 spec USB2 hardware LPM */
unsigned hw_lpm_support:1;
+ /* Broken Suspend flag for SNPS Suspend resume issue */
+ unsigned broken_suspend:1;
/* cached usb2 extened protocol capabilites */
u32 *ext_caps;
unsigned int num_ext_caps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 85b48c6ddc7e..39ca31b4de46 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -51,6 +51,7 @@ static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x921c) },
{ APPLEDISPLAY_DEVICE(0x921d) },
{ APPLEDISPLAY_DEVICE(0x9222) },
+ { APPLEDISPLAY_DEVICE(0x9226) },
{ APPLEDISPLAY_DEVICE(0x9236) },
/* Terminating entry */
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 17940589c647..7d289302ff6c 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options)
cflag |= PARENB;
break;
}
- co->cflag = cflag;
/*
* no need to check the index here: if the index is wrong, console
@@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options)
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
+ tty_save_termios(tty);
tty_kref_put(tty);
}
tty_port_set_initialized(&port->port, 1);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e24ff16d4147..1ce27f3ff7a7 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1164,6 +1164,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1328,6 +1332,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1531,6 +1536,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1758,6 +1764,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1940,7 +1947,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index d17cd95b55bb..6b2140f966ef 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -27,4 +27,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
"USB Card Reader",
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
index c84c8c189e90..1c0033ad8738 100644
--- a/drivers/usb/typec/tps6598x.c
+++ b/drivers/usb/typec/tps6598x.c
@@ -501,19 +501,19 @@ static int tps6598x_remove(struct i2c_client *client)
return 0;
}
-static const struct acpi_device_id tps6598x_acpi_match[] = {
- { "INT3515", 0 },
+static const struct i2c_device_id tps6598x_id[] = {
+ { "tps6598x" },
{ }
};
-MODULE_DEVICE_TABLE(acpi, tps6598x_acpi_match);
+MODULE_DEVICE_TABLE(i2c, tps6598x_id);
static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
- .acpi_match_table = tps6598x_acpi_match,
},
.probe_new = tps6598x_probe,
.remove = tps6598x_remove,
+ .id_table = tps6598x_id,
};
module_i2c_driver(tps6598x_i2c_driver);
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 68ddee86a886..edb7263bff40 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -316,7 +316,7 @@ ssize_t wusb_prf(void *out, size_t out_size,
goto error_setkey_cbc;
}
- tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ tfm_aes = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(tfm_aes)) {
result = PTR_ERR(tfm_aes);
printk(KERN_ERR "E: can't load AES: %d\n", (int)result);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 42dc1d3d71cf..d0f8e4f5a039 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -38,3 +38,9 @@ config VFIO_PCI_IGD
and LPC bridge config space.
To enable Intel IGD assignment through vfio-pci, say Y.
+
+config VFIO_PCI_NVLINK2
+ def_bool y
+ depends on VFIO_PCI && PPC_POWERNV
+ help
+ VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
diff --git a/drivers/vfio/pci/Makefile b/drivers/vfio/pci/Makefile
index 76d8ec058edd..9662c063a6b1 100644
--- a/drivers/vfio/pci/Makefile
+++ b/drivers/vfio/pci/Makefile
@@ -1,5 +1,6 @@
vfio-pci-y := vfio_pci.o vfio_pci_intrs.o vfio_pci_rdwr.o vfio_pci_config.o
vfio-pci-$(CONFIG_VFIO_PCI_IGD) += vfio_pci_igd.o
+vfio-pci-$(CONFIG_VFIO_PCI_NVLINK2) += vfio_pci_nvlink2.o
obj-$(CONFIG_VFIO_PCI) += vfio-pci.o
diff --git a/drivers/vfio/pci/trace.h b/drivers/vfio/pci/trace.h
new file mode 100644
index 000000000000..228ccdb8d1c8
--- /dev/null
+++ b/drivers/vfio/pci/trace.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * VFIO PCI mmap/mmap_fault tracepoints
+ *
+ * Copyright (C) 2018 IBM Corp. All rights reserved.
+ * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_pci
+
+#if !defined(_TRACE_VFIO_PCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VFIO_PCI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_pci_nvgpu_mmap_fault,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ vm_fault_t ret),
+ TP_ARGS(pdev, hpa, ua, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->ret)
+);
+
+TRACE_EVENT(vfio_pci_nvgpu_mmap,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ unsigned long size, int ret),
+ TP_ARGS(pdev, hpa, ua, size, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(unsigned long, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->size = size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->size, __entry->ret)
+);
+
+TRACE_EVENT(vfio_pci_npu2_mmap,
+ TP_PROTO(struct pci_dev *pdev, unsigned long hpa, unsigned long ua,
+ unsigned long size, int ret),
+ TP_ARGS(pdev, hpa, ua, size, ret),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(unsigned long, hpa)
+ __field(unsigned long, ua)
+ __field(unsigned long, size)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->name = dev_name(&pdev->dev),
+ __entry->hpa = hpa;
+ __entry->ua = ua;
+ __entry->size = size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("%s: %lx -> %lx size=%lx ret=%d", __entry->name, __entry->hpa,
+ __entry->ua, __entry->size, __entry->ret)
+);
+
+#endif /* _TRACE_VFIO_PCI_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 50cdedfca9fe..a89fa5d4e877 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -289,14 +289,37 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
if (ret) {
dev_warn(&vdev->pdev->dev,
"Failed to setup Intel IGD regions\n");
- vfio_pci_disable(vdev);
- return ret;
+ goto disable_exit;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
+ IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
+ ret = vfio_pci_nvdia_v100_nvlink2_init(vdev);
+ if (ret && ret != -ENODEV) {
+ dev_warn(&vdev->pdev->dev,
+ "Failed to setup NVIDIA NV2 RAM region\n");
+ goto disable_exit;
+ }
+ }
+
+ if (pdev->vendor == PCI_VENDOR_ID_IBM &&
+ IS_ENABLED(CONFIG_VFIO_PCI_NVLINK2)) {
+ ret = vfio_pci_ibm_npu2_init(vdev);
+ if (ret && ret != -ENODEV) {
+ dev_warn(&vdev->pdev->dev,
+ "Failed to setup NVIDIA NV2 ATSD region\n");
+ goto disable_exit;
}
}
vfio_pci_probe_mmaps(vdev);
return 0;
+
+disable_exit:
+ vfio_pci_disable(vdev);
+ return ret;
}
static void vfio_pci_disable(struct vfio_pci_device *vdev)
@@ -750,6 +773,12 @@ static long vfio_pci_ioctl(void *device_data,
if (ret)
return ret;
+ if (vdev->region[i].ops->add_capability) {
+ ret = vdev->region[i].ops->add_capability(vdev,
+ &vdev->region[i], &caps);
+ if (ret)
+ return ret;
+ }
}
}
@@ -1117,6 +1146,15 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
return -EINVAL;
if ((vma->vm_flags & VM_SHARED) == 0)
return -EINVAL;
+ if (index >= VFIO_PCI_NUM_REGIONS) {
+ int regnum = index - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_region *region = vdev->region + regnum;
+
+ if (region && region->ops && region->ops->mmap &&
+ (region->flags & VFIO_REGION_INFO_FLAG_MMAP))
+ return region->ops->mmap(vdev, region, vma);
+ return -EINVAL;
+ }
if (index >= VFIO_PCI_ROM_REGION_INDEX)
return -EINVAL;
if (!vdev->bar_mmap_supported[index])
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
new file mode 100644
index 000000000000..054a2cf9dd8e
--- /dev/null
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VFIO PCI NVIDIA Whitherspoon GPU support a.k.a. NVLink2.
+ *
+ * Copyright (C) 2018 IBM Corp. All rights reserved.
+ * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Register an on-GPU RAM region for cacheable access.
+ *
+ * Derived from original vfio_pci_igd.c:
+ * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ */
+
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/vfio.h>
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/kvm_ppc.h>
+#include "vfio_pci_private.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap_fault);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_nvgpu_mmap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vfio_pci_npu2_mmap);
+
+struct vfio_pci_nvgpu_data {
+ unsigned long gpu_hpa; /* GPU RAM physical address */
+ unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
+ unsigned long useraddr; /* GPU RAM userspace address */
+ unsigned long size; /* Size of the GPU RAM window (usually 128GB) */
+ struct mm_struct *mm;
+ struct mm_iommu_table_group_mem_t *mem; /* Pre-registered RAM descr. */
+ struct pci_dev *gpdev;
+ struct notifier_block group_notifier;
+};
+
+static size_t vfio_pci_nvgpu_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_nvgpu_data *data = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ loff_t posaligned = pos & PAGE_MASK, posoff = pos & ~PAGE_MASK;
+ size_t sizealigned;
+ void __iomem *ptr;
+
+ if (pos >= vdev->region[i].size)
+ return -EINVAL;
+
+ count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ /*
+ * We map only a bit of GPU RAM for a short time instead of mapping it
+ * for the guest lifetime as:
+ *
+ * 1) we do not know GPU RAM size, only aperture which is 4-8 times
+ * bigger than actual RAM size (16/32GB RAM vs. 128GB aperture);
+ * 2) mapping GPU RAM allows CPU to prefetch and if this happens
+ * before NVLink bridge is reset (which fences GPU RAM),
+ * hardware management interrupts (HMI) might happen, this
+ * will freeze NVLink bridge.
+ *
+ * This is not fast path anyway.
+ */
+ sizealigned = _ALIGN_UP(posoff + count, PAGE_SIZE);
+ ptr = ioremap_cache(data->gpu_hpa + posaligned, sizealigned);
+ if (!ptr)
+ return -EFAULT;
+
+ if (iswrite) {
+ if (copy_from_user(ptr + posoff, buf, count))
+ count = -EFAULT;
+ else
+ *ppos += count;
+ } else {
+ if (copy_to_user(buf, ptr + posoff, count))
+ count = -EFAULT;
+ else
+ *ppos += count;
+ }
+
+ iounmap(ptr);
+
+ return count;
+}
+
+static void vfio_pci_nvgpu_release(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct vfio_pci_nvgpu_data *data = region->data;
+ long ret;
+
+ /* If there were any mappings at all... */
+ if (data->mm) {
+ ret = mm_iommu_put(data->mm, data->mem);
+ WARN_ON(ret);
+
+ mmdrop(data->mm);
+ }
+
+ vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
+ &data->group_notifier);
+
+ pnv_npu2_unmap_lpar_dev(data->gpdev);
+
+ kfree(data);
+}
+
+static vm_fault_t vfio_pci_nvgpu_mmap_fault(struct vm_fault *vmf)
+{
+ vm_fault_t ret;
+ struct vm_area_struct *vma = vmf->vma;
+ struct vfio_pci_region *region = vma->vm_private_data;
+ struct vfio_pci_nvgpu_data *data = region->data;
+ unsigned long vmf_off = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long nv2pg = data->gpu_hpa >> PAGE_SHIFT;
+ unsigned long vm_pgoff = vma->vm_pgoff &
+ ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
+ unsigned long pfn = nv2pg + vm_pgoff + vmf_off;
+
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ trace_vfio_pci_nvgpu_mmap_fault(data->gpdev, pfn << PAGE_SHIFT,
+ vmf->address, ret);
+
+ return ret;
+}
+
+static const struct vm_operations_struct vfio_pci_nvgpu_mmap_vmops = {
+ .fault = vfio_pci_nvgpu_mmap_fault,
+};
+
+static int vfio_pci_nvgpu_mmap(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vfio_pci_nvgpu_data *data = region->data;
+
+ if (data->useraddr)
+ return -EPERM;
+
+ if (vma->vm_end - vma->vm_start > data->size)
+ return -EINVAL;
+
+ vma->vm_private_data = region;
+ vma->vm_flags |= VM_PFNMAP;
+ vma->vm_ops = &vfio_pci_nvgpu_mmap_vmops;
+
+ /*
+ * Calling mm_iommu_newdev() here once as the region is not
+ * registered yet and therefore right initialization will happen now.
+ * Other places will use mm_iommu_find() which returns
+ * registered @mem and does not go gup().
+ */
+ data->useraddr = vma->vm_start;
+ data->mm = current->mm;
+
+ atomic_inc(&data->mm->mm_count);
+ ret = (int) mm_iommu_newdev(data->mm, data->useraddr,
+ (vma->vm_end - vma->vm_start) >> PAGE_SHIFT,
+ data->gpu_hpa, &data->mem);
+
+ trace_vfio_pci_nvgpu_mmap(vdev->pdev, data->gpu_hpa, data->useraddr,
+ vma->vm_end - vma->vm_start, ret);
+
+ return ret;
+}
+
+static int vfio_pci_nvgpu_add_capability(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vfio_info_cap *caps)
+{
+ struct vfio_pci_nvgpu_data *data = region->data;
+ struct vfio_region_info_cap_nvlink2_ssatgt cap = { 0 };
+
+ cap.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
+ cap.header.version = 1;
+ cap.tgt = data->gpu_tgt;
+
+ return vfio_info_add_capability(caps, &cap.header, sizeof(cap));
+}
+
+static const struct vfio_pci_regops vfio_pci_nvgpu_regops = {
+ .rw = vfio_pci_nvgpu_rw,
+ .release = vfio_pci_nvgpu_release,
+ .mmap = vfio_pci_nvgpu_mmap,
+ .add_capability = vfio_pci_nvgpu_add_capability,
+};
+
+static int vfio_pci_nvgpu_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *opaque)
+{
+ struct kvm *kvm = opaque;
+ struct vfio_pci_nvgpu_data *data = container_of(nb,
+ struct vfio_pci_nvgpu_data,
+ group_notifier);
+
+ if (action == VFIO_GROUP_NOTIFY_SET_KVM && kvm &&
+ pnv_npu2_map_lpar_dev(data->gpdev,
+ kvm->arch.lpid, MSR_DR | MSR_PR))
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
+{
+ int ret;
+ u64 reg[2];
+ u64 tgt = 0;
+ struct device_node *npu_node, *mem_node;
+ struct pci_dev *npu_dev;
+ struct vfio_pci_nvgpu_data *data;
+ uint32_t mem_phandle = 0;
+ unsigned long events = VFIO_GROUP_NOTIFY_SET_KVM;
+
+ /*
+ * PCI config space does not tell us about NVLink presense but
+ * platform does, use this.
+ */
+ npu_dev = pnv_pci_get_npu_dev(vdev->pdev, 0);
+ if (!npu_dev)
+ return -ENODEV;
+
+ npu_node = pci_device_to_OF_node(npu_dev);
+ if (!npu_node)
+ return -EINVAL;
+
+ if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
+ return -EINVAL;
+
+ mem_node = of_find_node_by_phandle(mem_phandle);
+ if (!mem_node)
+ return -EINVAL;
+
+ if (of_property_read_variable_u64_array(mem_node, "reg", reg,
+ ARRAY_SIZE(reg), ARRAY_SIZE(reg)) !=
+ ARRAY_SIZE(reg))
+ return -EINVAL;
+
+ if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
+ dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
+ return -EFAULT;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->gpu_hpa = reg[0];
+ data->gpu_tgt = tgt;
+ data->size = reg[1];
+
+ dev_dbg(&vdev->pdev->dev, "%lx..%lx\n", data->gpu_hpa,
+ data->gpu_hpa + data->size - 1);
+
+ data->gpdev = vdev->pdev;
+ data->group_notifier.notifier_call = vfio_pci_nvgpu_group_notifier;
+
+ ret = vfio_register_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
+ &events, &data->group_notifier);
+ if (ret)
+ goto free_exit;
+
+ /*
+ * We have just set KVM, we do not need the listener anymore.
+ * Also, keeping it registered means that if more than one GPU is
+ * assigned, we will get several similar notifiers notifying about
+ * the same device again which does not help with anything.
+ */
+ vfio_unregister_notifier(&data->gpdev->dev, VFIO_GROUP_NOTIFY,
+ &data->group_notifier);
+
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_NVIDIA | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
+ &vfio_pci_nvgpu_regops,
+ data->size,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP,
+ data);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+free_exit:
+ kfree(data);
+
+ return ret;
+}
+
+/*
+ * IBM NPU2 bridge
+ */
+struct vfio_pci_npu2_data {
+ void *base; /* ATSD register virtual address, for emulated access */
+ unsigned long mmio_atsd; /* ATSD physical address */
+ unsigned long gpu_tgt; /* TGT address of corresponding GPU RAM */
+ unsigned int link_speed; /* The link speed from DT's ibm,nvlink-speed */
+};
+
+static size_t vfio_pci_npu2_rw(struct vfio_pci_device *vdev,
+ char __user *buf, size_t count, loff_t *ppos, bool iswrite)
+{
+ unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
+ struct vfio_pci_npu2_data *data = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+
+ if (pos >= vdev->region[i].size)
+ return -EINVAL;
+
+ count = min(count, (size_t)(vdev->region[i].size - pos));
+
+ if (iswrite) {
+ if (copy_from_user(data->base + pos, buf, count))
+ return -EFAULT;
+ } else {
+ if (copy_to_user(buf, data->base + pos, count))
+ return -EFAULT;
+ }
+ *ppos += count;
+
+ return count;
+}
+
+static int vfio_pci_npu2_mmap(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vfio_pci_npu2_data *data = region->data;
+ unsigned long req_len = vma->vm_end - vma->vm_start;
+
+ if (req_len != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_flags |= VM_PFNMAP;
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = remap_pfn_range(vma, vma->vm_start, data->mmio_atsd >> PAGE_SHIFT,
+ req_len, vma->vm_page_prot);
+ trace_vfio_pci_npu2_mmap(vdev->pdev, data->mmio_atsd, vma->vm_start,
+ vma->vm_end - vma->vm_start, ret);
+
+ return ret;
+}
+
+static void vfio_pci_npu2_release(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region)
+{
+ struct vfio_pci_npu2_data *data = region->data;
+
+ memunmap(data->base);
+ kfree(data);
+}
+
+static int vfio_pci_npu2_add_capability(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region, struct vfio_info_cap *caps)
+{
+ struct vfio_pci_npu2_data *data = region->data;
+ struct vfio_region_info_cap_nvlink2_ssatgt captgt = { 0 };
+ struct vfio_region_info_cap_nvlink2_lnkspd capspd = { 0 };
+ int ret;
+
+ captgt.header.id = VFIO_REGION_INFO_CAP_NVLINK2_SSATGT;
+ captgt.header.version = 1;
+ captgt.tgt = data->gpu_tgt;
+
+ capspd.header.id = VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD;
+ capspd.header.version = 1;
+ capspd.link_speed = data->link_speed;
+
+ ret = vfio_info_add_capability(caps, &captgt.header, sizeof(captgt));
+ if (ret)
+ return ret;
+
+ return vfio_info_add_capability(caps, &capspd.header, sizeof(capspd));
+}
+
+static const struct vfio_pci_regops vfio_pci_npu2_regops = {
+ .rw = vfio_pci_npu2_rw,
+ .mmap = vfio_pci_npu2_mmap,
+ .release = vfio_pci_npu2_release,
+ .add_capability = vfio_pci_npu2_add_capability,
+};
+
+int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
+{
+ int ret;
+ struct vfio_pci_npu2_data *data;
+ struct device_node *nvlink_dn;
+ u32 nvlink_index = 0;
+ struct pci_dev *npdev = vdev->pdev;
+ struct device_node *npu_node = pci_device_to_OF_node(npdev);
+ struct pci_controller *hose = pci_bus_to_host(npdev->bus);
+ u64 mmio_atsd = 0;
+ u64 tgt = 0;
+ u32 link_speed = 0xff;
+
+ /*
+ * PCI config space does not tell us about NVLink presense but
+ * platform does, use this.
+ */
+ if (!pnv_pci_get_gpu_dev(vdev->pdev))
+ return -ENODEV;
+
+ /*
+ * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
+ * so we can allocate one register per link, using nvlink index as
+ * a key.
+ * There is always at least one ATSD register so as long as at least
+ * NVLink bridge #0 is passed to the guest, ATSD will be available.
+ */
+ nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
+ if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
+ &nvlink_index)))
+ return -ENODEV;
+
+ if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index,
+ &mmio_atsd)) {
+ dev_warn(&vdev->pdev->dev, "No available ATSD found\n");
+ mmio_atsd = 0;
+ }
+
+ if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) {
+ dev_warn(&vdev->pdev->dev, "No ibm,device-tgt-addr found\n");
+ return -EFAULT;
+ }
+
+ if (of_property_read_u32(npu_node, "ibm,nvlink-speed", &link_speed)) {
+ dev_warn(&vdev->pdev->dev, "No ibm,nvlink-speed found\n");
+ return -EFAULT;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->mmio_atsd = mmio_atsd;
+ data->gpu_tgt = tgt;
+ data->link_speed = link_speed;
+ if (data->mmio_atsd) {
+ data->base = memremap(data->mmio_atsd, SZ_64K, MEMREMAP_WT);
+ if (!data->base) {
+ ret = -ENOMEM;
+ goto free_exit;
+ }
+ }
+
+ /*
+ * We want to expose the capability even if this specific NVLink
+ * did not get its own ATSD register because capabilities
+ * belong to VFIO regions and normally there will be ATSD register
+ * assigned to the NVLink bridge.
+ */
+ ret = vfio_pci_register_dev_region(vdev,
+ PCI_VENDOR_ID_IBM |
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
+ VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
+ &vfio_pci_npu2_regops,
+ data->mmio_atsd ? PAGE_SIZE : 0,
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE |
+ VFIO_REGION_INFO_FLAG_MMAP,
+ data);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ kfree(data);
+
+ return ret;
+}
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index cde3b5d3441a..127071b84dd7 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -59,6 +59,12 @@ struct vfio_pci_regops {
size_t count, loff_t *ppos, bool iswrite);
void (*release)(struct vfio_pci_device *vdev,
struct vfio_pci_region *region);
+ int (*mmap)(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region,
+ struct vm_area_struct *vma);
+ int (*add_capability)(struct vfio_pci_device *vdev,
+ struct vfio_pci_region *region,
+ struct vfio_info_cap *caps);
};
struct vfio_pci_region {
@@ -157,4 +163,18 @@ static inline int vfio_pci_igd_init(struct vfio_pci_device *vdev)
return -ENODEV;
}
#endif
+#ifdef CONFIG_VFIO_PCI_NVLINK2
+extern int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev);
+extern int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev);
+#else
+static inline int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
+{
+ return -ENODEV;
+}
+
+static inline int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
+{
+ return -ENODEV;
+}
+#endif
#endif /* VFIO_PCI_PRIVATE_H */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index b30926e11d87..c424913324e3 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -152,11 +152,12 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem;
struct tce_iommu_prereg *tcemem;
bool found = false;
+ long ret;
if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
return -EINVAL;
- mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
+ mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT);
if (!mem)
return -ENOENT;
@@ -168,9 +169,13 @@ static long tce_iommu_unregister_pages(struct tce_container *container,
}
if (!found)
- return -ENOENT;
+ ret = -ENOENT;
+ else
+ ret = tce_iommu_prereg_free(container, tcemem);
- return tce_iommu_prereg_free(container, tcemem);
+ mm_iommu_put(container->mm, mem);
+
+ return ret;
}
static long tce_iommu_register_pages(struct tce_container *container,
@@ -185,22 +190,24 @@ static long tce_iommu_register_pages(struct tce_container *container,
((vaddr + size) < vaddr))
return -EINVAL;
- mem = mm_iommu_find(container->mm, vaddr, entries);
+ mem = mm_iommu_get(container->mm, vaddr, entries);
if (mem) {
list_for_each_entry(tcemem, &container->prereg_list, next) {
- if (tcemem->mem == mem)
- return -EBUSY;
+ if (tcemem->mem == mem) {
+ ret = -EBUSY;
+ goto put_exit;
+ }
}
+ } else {
+ ret = mm_iommu_new(container->mm, vaddr, entries, &mem);
+ if (ret)
+ return ret;
}
- ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
- if (ret)
- return ret;
-
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
if (!tcemem) {
- mm_iommu_put(container->mm, mem);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto put_exit;
}
tcemem->mem = mem;
@@ -209,10 +216,22 @@ static long tce_iommu_register_pages(struct tce_container *container,
container->enabled = true;
return 0;
+
+put_exit:
+ mm_iommu_put(container->mm, mem);
+ return ret;
}
-static bool tce_page_is_contained(struct page *page, unsigned page_shift)
+static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
+ unsigned int page_shift)
{
+ struct page *page;
+ unsigned long size = 0;
+
+ if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
+ return size == (1UL << page_shift);
+
+ page = pfn_to_page(hpa >> PAGE_SHIFT);
/*
* Check that the TCE table granularity is not bigger than the size of
* a page we just found. Otherwise the hardware can get access to
@@ -371,6 +390,7 @@ static void tce_iommu_release(void *iommu_data)
{
struct tce_container *container = iommu_data;
struct tce_iommu_group *tcegrp;
+ struct tce_iommu_prereg *tcemem, *tmtmp;
long i;
while (tce_groups_attached(container)) {
@@ -393,13 +413,8 @@ static void tce_iommu_release(void *iommu_data)
tce_iommu_free_table(container, tbl);
}
- while (!list_empty(&container->prereg_list)) {
- struct tce_iommu_prereg *tcemem;
-
- tcemem = list_first_entry(&container->prereg_list,
- struct tce_iommu_prereg, next);
- WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
- }
+ list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next)
+ WARN_ON(tce_iommu_prereg_free(container, tcemem));
tce_iommu_disable(container);
if (container->mm)
@@ -492,7 +507,8 @@ static int tce_iommu_clear(struct tce_container *container,
direction = DMA_NONE;
oldhpa = 0;
- ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
+ ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
+ &direction);
if (ret)
continue;
@@ -530,7 +546,6 @@ static long tce_iommu_build(struct tce_container *container,
enum dma_data_direction direction)
{
long i, ret = 0;
- struct page *page;
unsigned long hpa;
enum dma_data_direction dirtmp;
@@ -541,15 +556,16 @@ static long tce_iommu_build(struct tce_container *container,
if (ret)
break;
- page = pfn_to_page(hpa >> PAGE_SHIFT);
- if (!tce_page_is_contained(page, tbl->it_page_shift)) {
+ if (!tce_page_is_contained(container->mm, hpa,
+ tbl->it_page_shift)) {
ret = -EPERM;
break;
}
hpa |= offset;
dirtmp = direction;
- ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
+ ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
+ &dirtmp);
if (ret) {
tce_iommu_unuse_page(container, hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
@@ -576,7 +592,6 @@ static long tce_iommu_build_v2(struct tce_container *container,
enum dma_data_direction direction)
{
long i, ret = 0;
- struct page *page;
unsigned long hpa;
enum dma_data_direction dirtmp;
@@ -589,8 +604,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (ret)
break;
- page = pfn_to_page(hpa >> PAGE_SHIFT);
- if (!tce_page_is_contained(page, tbl->it_page_shift)) {
+ if (!tce_page_is_contained(container->mm, hpa,
+ tbl->it_page_shift)) {
ret = -EPERM;
break;
}
@@ -603,7 +618,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (mm_iommu_mapped_inc(mem))
break;
- ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
+ ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
+ &dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
tce_iommu_unuse_page_v2(container, tbl, entry + i);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ab11b2bee273..36f3d0f49e60 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -141,6 +141,10 @@ struct vhost_net {
unsigned tx_zcopy_err;
/* Flush in progress. Protected by tx vq lock. */
bool tx_flush;
+ /* Private page frag */
+ struct page_frag page_frag;
+ /* Refcount bias of page frag */
+ int refcnt_bias;
};
static unsigned vhost_net_zcopy_mask __read_mostly;
@@ -513,7 +517,13 @@ static void vhost_net_busy_poll(struct vhost_net *net,
struct socket *sock;
struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
- mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
+ /* Try to hold the vq mutex of the paired virtqueue. We can't
+ * use mutex_lock() here since we could not guarantee a
+ * consistenet lock ordering.
+ */
+ if (!mutex_trylock(&vq->mutex))
+ return;
+
vhost_disable_notify(&net->dev, vq);
sock = rvq->private_data;
@@ -637,14 +647,53 @@ static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
!vhost_vq_avail_empty(vq->dev, vq);
}
+#define SKB_FRAG_PAGE_ORDER get_order(32768)
+
+static bool vhost_net_page_frag_refill(struct vhost_net *net, unsigned int sz,
+ struct page_frag *pfrag, gfp_t gfp)
+{
+ if (pfrag->page) {
+ if (pfrag->offset + sz <= pfrag->size)
+ return true;
+ __page_frag_cache_drain(pfrag->page, net->refcnt_bias);
+ }
+
+ pfrag->offset = 0;
+ net->refcnt_bias = 0;
+ if (SKB_FRAG_PAGE_ORDER) {
+ /* Avoid direct reclaim but allow kswapd to wake */
+ pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) |
+ __GFP_COMP | __GFP_NOWARN |
+ __GFP_NORETRY,
+ SKB_FRAG_PAGE_ORDER);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER;
+ goto done;
+ }
+ }
+ pfrag->page = alloc_page(gfp);
+ if (likely(pfrag->page)) {
+ pfrag->size = PAGE_SIZE;
+ goto done;
+ }
+ return false;
+
+done:
+ net->refcnt_bias = USHRT_MAX;
+ page_ref_add(pfrag->page, USHRT_MAX - 1);
+ return true;
+}
+
#define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
struct iov_iter *from)
{
struct vhost_virtqueue *vq = &nvq->vq;
+ struct vhost_net *net = container_of(vq->dev, struct vhost_net,
+ dev);
struct socket *sock = vq->private_data;
- struct page_frag *alloc_frag = &current->task_frag;
+ struct page_frag *alloc_frag = &net->page_frag;
struct virtio_net_hdr *gso;
struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
struct tun_xdp_hdr *hdr;
@@ -665,7 +714,8 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
buflen += SKB_DATA_ALIGN(len + pad);
alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
- if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
+ if (unlikely(!vhost_net_page_frag_refill(net, buflen,
+ alloc_frag, GFP_KERNEL)))
return -ENOMEM;
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
@@ -703,7 +753,7 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
xdp->data_end = xdp->data + len;
hdr->buflen = buflen;
- get_page(alloc_frag->page);
+ --net->refcnt_bias;
alloc_frag->offset += buflen;
++nvq->batched_xdp;
@@ -1292,6 +1342,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
f->private_data = n;
+ n->page_frag.page = NULL;
+ n->refcnt_bias = 0;
return 0;
}
@@ -1359,13 +1411,15 @@ static int vhost_net_release(struct inode *inode, struct file *f)
if (rx_sock)
sockfd_put(rx_sock);
/* Make sure no callbacks are outstanding */
- synchronize_rcu_bh();
+ synchronize_rcu();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
kfree(n->dev.vqs);
+ if (n->page_frag.page)
+ __page_frag_cache_drain(n->page_frag.page, n->refcnt_bias);
kvfree(n);
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3a5f81a66d34..55e5aa662ad5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -295,11 +295,8 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
{
int i;
- for (i = 0; i < d->nvqs; ++i) {
- mutex_lock(&d->vqs[i]->mutex);
+ for (i = 0; i < d->nvqs; ++i)
__vhost_vq_meta_reset(d->vqs[i]);
- mutex_unlock(&d->vqs[i]->mutex);
- }
}
static void vhost_vq_reset(struct vhost_dev *dev,
@@ -895,6 +892,20 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
#define vhost_get_used(vq, x, ptr) \
vhost_get_user(vq, x, ptr, VHOST_ADDR_USED)
+static void vhost_dev_lock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_lock_nested(&d->vqs[i]->mutex, i);
+}
+
+static void vhost_dev_unlock_vqs(struct vhost_dev *d)
+{
+ int i = 0;
+ for (i = 0; i < d->nvqs; ++i)
+ mutex_unlock(&d->vqs[i]->mutex);
+}
+
static int vhost_new_umem_range(struct vhost_umem *umem,
u64 start, u64 size, u64 end,
u64 userspace_addr, int perm)
@@ -944,10 +955,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
if (msg->iova <= vq_msg->iova &&
msg->iova + msg->size - 1 >= vq_msg->iova &&
vq_msg->type == VHOST_IOTLB_MISS) {
- mutex_lock(&node->vq->mutex);
vhost_poll_queue(&node->vq->poll);
- mutex_unlock(&node->vq->mutex);
-
list_del(&node->node);
kfree(node);
}
@@ -979,6 +987,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
int ret = 0;
mutex_lock(&dev->mutex);
+ vhost_dev_lock_vqs(dev);
switch (msg->type) {
case VHOST_IOTLB_UPDATE:
if (!dev->iotlb) {
@@ -1012,6 +1021,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
+ vhost_dev_unlock_vqs(dev);
mutex_unlock(&dev->mutex);
return ret;
@@ -2223,6 +2233,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return -EFAULT;
}
if (unlikely(vq->log_used)) {
+ /* Make sure used idx is seen before log. */
+ smp_wmb();
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 34bc3ab40c6d..98ed5be132c6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -15,6 +15,7 @@
#include <net/sock.h>
#include <linux/virtio_vsock.h>
#include <linux/vhost.h>
+#include <linux/hashtable.h>
#include <net/af_vsock.h>
#include "vhost.h"
@@ -27,14 +28,14 @@ enum {
/* Used to track all the vhost_vsock instances on the system. */
static DEFINE_SPINLOCK(vhost_vsock_lock);
-static LIST_HEAD(vhost_vsock_list);
+static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
- /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
- struct list_head list;
+ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
+ struct hlist_node hash;
struct vhost_work send_pkt_work;
spinlock_t send_pkt_list_lock;
@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
+/* Callers that dereference the return value must hold vhost_vsock_lock or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
- list_for_each_entry(vsock, &vhost_vsock_list, list) {
+ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
u32 other_cid = vsock->guest_cid;
/* Skip instances that have no CID yet */
@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
return NULL;
}
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
-{
- struct vhost_vsock *vsock;
-
- spin_lock_bh(&vhost_vsock_lock);
- vsock = __vhost_vsock_get(guest_cid);
- spin_unlock_bh(&vhost_vsock_lock);
-
- return vsock;
-}
-
static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
struct vhost_vsock *vsock;
int len = pkt->len;
+ rcu_read_lock();
+
/* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
if (!vsock) {
+ rcu_read_unlock();
virtio_transport_free_pkt(pkt);
return -ENODEV;
}
@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
spin_unlock_bh(&vsock->send_pkt_list_lock);
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+ rcu_read_unlock();
return len;
}
@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
struct vhost_vsock *vsock;
struct virtio_vsock_pkt *pkt, *n;
int cnt = 0;
+ int ret = -ENODEV;
LIST_HEAD(freeme);
+ rcu_read_lock();
+
/* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
if (!vsock)
- return -ENODEV;
+ goto out;
spin_lock_bh(&vsock->send_pkt_list_lock);
list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
vhost_poll_queue(&tx_vq->poll);
}
- return 0;
+ ret = 0;
+out:
+ rcu_read_unlock();
+ return ret;
}
static struct virtio_vsock_pkt *
@@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
-
- spin_lock_bh(&vhost_vsock_lock);
- list_add_tail(&vsock->list, &vhost_vsock_list);
- spin_unlock_bh(&vhost_vsock_lock);
return 0;
out:
@@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing.
*/
- if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown = SHUTDOWN_MASK;
- sk->sk_state = SS_UNCONNECTED;
- sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
- }
+ /* If the peer is still valid, no need to reset connection */
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ return;
+
+ /* If the close timeout is pending, let it expire. This avoids races
+ * with the timeout callback.
+ */
+ if (vsk->close_work_scheduled)
+ return;
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
}
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
@@ -577,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
struct vhost_vsock *vsock = file->private_data;
spin_lock_bh(&vhost_vsock_lock);
- list_del(&vsock->list);
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
spin_unlock_bh(&vhost_vsock_lock);
+ /* Wait for other CPUs to finish using vsock */
+ synchronize_rcu();
+
/* Iterating over all connections for all CIDs to find orphans is
* inefficient. Room for improvement here. */
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
@@ -620,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
/* Refuse if CID is already in use */
spin_lock_bh(&vhost_vsock_lock);
- other = __vhost_vsock_get(guest_cid);
+ other = vhost_vsock_get(guest_cid);
if (other && other != vsock) {
spin_unlock_bh(&vhost_vsock_lock);
return -EADDRINUSE;
}
+
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+
vsock->guest_cid = guest_cid;
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
spin_unlock_bh(&vhost_vsock_lock);
return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 678b27063198..f9ef0673a083 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
- if (!data->levels) {
+ if (data->levels) {
+ /*
+ * For the DT case, only when brightness levels is defined
+ * data->levels is filled. For the non-DT case, data->levels
+ * can come from platform data, however is not usual.
+ */
+ for (i = 0; i <= data->max_brightness; i++) {
+ if (data->levels[i] > pb->scale)
+ pb->scale = data->levels[i];
+
+ pb->levels = data->levels;
+ }
+ } else if (!data->max_brightness) {
+ /*
+ * If no brightness levels are provided and max_brightness is
+ * not set, use the default brightness table. For the DT case,
+ * max_brightness is set to 0 when brightness levels is not
+ * specified. For the non-DT case, max_brightness is usually
+ * set to some value.
+ */
+
+ /* Get the PWM period (in nanoseconds) */
+ pwm_get_state(pb->pwm, &state);
+
ret = pwm_backlight_brightness_default(&pdev->dev, data,
state.period);
if (ret < 0) {
@@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev)
"failed to setup default brightness table\n");
goto err_alloc;
}
- }
- for (i = 0; i <= data->max_brightness; i++) {
- if (data->levels[i] > pb->scale)
- pb->scale = data->levels[i];
+ for (i = 0; i <= data->max_brightness; i++) {
+ if (data->levels[i] > pb->scale)
+ pb->scale = data->levels[i];
- pb->levels = data->levels;
+ pb->levels = data->levels;
+ }
+ } else {
+ /*
+ * That only happens for the non-DT case, where platform data
+ * sets the max_brightness value.
+ */
+ pb->scale = data->max_brightness;
}
pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 8a3e8f61b991..799ae49774f5 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -31,7 +31,7 @@
#define hdmi_log(fmt, ...) dev_printk(level, dev, fmt, ##__VA_ARGS__)
-static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
+static u8 hdmi_infoframe_checksum(const u8 *ptr, size_t size)
{
u8 csum = 0;
size_t i;
@@ -68,8 +68,36 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
+static int hdmi_avi_infoframe_check_only(const struct hdmi_avi_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_AVI ||
+ frame->version != 2 ||
+ frame->length != HDMI_AVI_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
- * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
+ * hdmi_avi_infoframe_check() - check a HDMI AVI infoframe
+ * @frame: HDMI AVI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame)
+{
+ return hdmi_avi_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_avi_infoframe_check);
+
+/**
+ * hdmi_avi_infoframe_pack_only() - write HDMI AVI infoframe to binary buffer
* @frame: HDMI AVI infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -82,20 +110,22 @@ EXPORT_SYMBOL(hdmi_avi_infoframe_init);
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
- size_t size)
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ int ret;
+
+ ret = hdmi_avi_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
- if (frame->picture_aspect > HDMI_PICTURE_ASPECT_16_9)
- return -EINVAL;
-
memset(buffer, 0, size);
ptr[0] = frame->type;
@@ -152,6 +182,36 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
return length;
}
+EXPORT_SYMBOL(hdmi_avi_infoframe_pack_only);
+
+/**
+ * hdmi_avi_infoframe_pack() - check a HDMI AVI infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI AVI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_avi_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
/**
@@ -178,8 +238,33 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
}
EXPORT_SYMBOL(hdmi_spd_infoframe_init);
+static int hdmi_spd_infoframe_check_only(const struct hdmi_spd_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_SPD ||
+ frame->version != 1 ||
+ frame->length != HDMI_SPD_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
- * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
+ * hdmi_spd_infoframe_check() - check a HDMI SPD infoframe
+ * @frame: HDMI SPD infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame)
+{
+ return hdmi_spd_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_spd_infoframe_check);
+
+/**
+ * hdmi_spd_infoframe_pack_only() - write HDMI SPD infoframe to binary buffer
* @frame: HDMI SPD infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -192,11 +277,16 @@ EXPORT_SYMBOL(hdmi_spd_infoframe_init);
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
- size_t size)
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ int ret;
+
+ ret = hdmi_spd_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -222,6 +312,36 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
return length;
}
+EXPORT_SYMBOL(hdmi_spd_infoframe_pack_only);
+
+/**
+ * hdmi_spd_infoframe_pack() - check a HDMI SPD infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI SPD infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_spd_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
/**
@@ -242,8 +362,33 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
}
EXPORT_SYMBOL(hdmi_audio_infoframe_init);
+static int hdmi_audio_infoframe_check_only(const struct hdmi_audio_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_AUDIO ||
+ frame->version != 1 ||
+ frame->length != HDMI_AUDIO_INFOFRAME_SIZE)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
- * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
+ * hdmi_audio_infoframe_check() - check a HDMI audio infoframe
+ * @frame: HDMI audio infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame)
+{
+ return hdmi_audio_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_audio_infoframe_check);
+
+/**
+ * hdmi_audio_infoframe_pack_only() - write HDMI audio infoframe to binary buffer
* @frame: HDMI audio infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -256,12 +401,17 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_init);
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
- void *buffer, size_t size)
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
{
unsigned char channels;
u8 *ptr = buffer;
size_t length;
+ int ret;
+
+ ret = hdmi_audio_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -297,6 +447,36 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
return length;
}
+EXPORT_SYMBOL(hdmi_audio_infoframe_pack_only);
+
+/**
+ * hdmi_audio_infoframe_pack() - check a HDMI Audio infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI Audio infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_audio_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_audio_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
@@ -319,6 +499,7 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
* value
*/
frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+ frame->length = 4;
return 0;
}
@@ -335,8 +516,42 @@ static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *fram
return 4;
}
+static int hdmi_vendor_infoframe_check_only(const struct hdmi_vendor_infoframe *frame)
+{
+ if (frame->type != HDMI_INFOFRAME_TYPE_VENDOR ||
+ frame->version != 1 ||
+ frame->oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ /* only one of those can be supplied */
+ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ if (frame->length != hdmi_vendor_infoframe_length(frame))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hdmi_vendor_infoframe_check() - check a HDMI vendor infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame)
+{
+ frame->length = hdmi_vendor_infoframe_length(frame);
+
+ return hdmi_vendor_infoframe_check_only(frame);
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_check);
+
/**
- * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+ * hdmi_vendor_infoframe_pack_only() - write a HDMI vendor infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -349,17 +564,16 @@ static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *fram
* Returns the number of bytes packed into the binary buffer or a negative
* error code on failure.
*/
-ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
- void *buffer, size_t size)
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ int ret;
- /* only one of those can be supplied */
- if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
- return -EINVAL;
-
- frame->length = hdmi_vendor_infoframe_length(frame);
+ ret = hdmi_vendor_infoframe_check_only(frame);
+ if (ret)
+ return ret;
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
@@ -394,24 +608,134 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
return length;
}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_pack_only);
+
+/**
+ * hdmi_vendor_infoframe_pack() - check a HDMI Vendor infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI Vendor infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_vendor_infoframe_pack_only(frame, buffer, size);
+}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+static int
+hdmi_vendor_any_infoframe_check_only(const union hdmi_vendor_any_infoframe *frame)
+{
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_VENDOR ||
+ frame->any.version != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * hdmi_vendor_any_infoframe_check() - check a vendor infoframe
+ */
+static int
+hdmi_vendor_any_infoframe_check(union hdmi_vendor_any_infoframe *frame)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_check(&frame->hdmi);
+}
+
/*
- * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
+ * hdmi_vendor_any_infoframe_pack_only() - write a vendor infoframe to binary buffer
*/
static ssize_t
-hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
- void *buffer, size_t size)
+hdmi_vendor_any_infoframe_pack_only(const union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check_only(frame);
+ if (ret)
+ return ret;
+
/* we only know about HDMI vendor infoframes */
if (frame->any.oui != HDMI_IEEE_OUI)
return -EINVAL;
- return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
+ return hdmi_vendor_infoframe_pack_only(&frame->hdmi, buffer, size);
+}
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - check a vendor infoframe,
+ * and write it to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ int ret;
+
+ ret = hdmi_vendor_any_infoframe_check(frame);
+ if (ret)
+ return ret;
+
+ return hdmi_vendor_any_infoframe_pack_only(frame, buffer, size);
}
/**
- * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * hdmi_infoframe_check() - check a HDMI infoframe
+ * @frame: HDMI infoframe
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+hdmi_infoframe_check(union hdmi_infoframe *frame)
+{
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ return hdmi_avi_infoframe_check(&frame->avi);
+ case HDMI_INFOFRAME_TYPE_SPD:
+ return hdmi_spd_infoframe_check(&frame->spd);
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ return hdmi_audio_infoframe_check(&frame->audio);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return hdmi_vendor_any_infoframe_check(&frame->vendor);
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(hdmi_infoframe_check);
+
+/**
+ * hdmi_infoframe_pack_only() - write a HDMI infoframe to binary buffer
* @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
@@ -425,7 +749,56 @@ hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
* error code on failure.
*/
ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+hdmi_infoframe_pack_only(const union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack_only(&frame->avi,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack_only(&frame->spd,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack_only(&frame->audio,
+ buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack_only(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack_only);
+
+/**
+ * hdmi_infoframe_pack() - check a HDMI infoframe,
+ * and write it to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Validates that the infoframe is consistent and updates derived fields
+ * (eg. length) based on other fields, after which it packs the information
+ * contained in the @frame structure into a binary representation that
+ * can be written into the corresponding controller registers. This function
+ * also computes the checksum as required by section 5.3.5 of the HDMI 1.4
+ * specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame,
+ void *buffer, size_t size)
{
ssize_t length;
@@ -471,7 +844,7 @@ static const char *hdmi_infoframe_type_get_name(enum hdmi_infoframe_type type)
static void hdmi_infoframe_log_header(const char *level,
struct device *dev,
- struct hdmi_any_infoframe *frame)
+ const struct hdmi_any_infoframe *frame)
{
hdmi_log("HDMI infoframe: %s, version %u, length %u\n",
hdmi_infoframe_type_get_name(frame->type),
@@ -673,10 +1046,10 @@ hdmi_content_type_get_name(enum hdmi_content_type content_type)
*/
static void hdmi_avi_infoframe_log(const char *level,
struct device *dev,
- struct hdmi_avi_infoframe *frame)
+ const struct hdmi_avi_infoframe *frame)
{
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
hdmi_log(" colorspace: %s\n",
hdmi_colorspace_get_name(frame->colorspace));
@@ -750,12 +1123,12 @@ static const char *hdmi_spd_sdi_get_name(enum hdmi_spd_sdi sdi)
*/
static void hdmi_spd_infoframe_log(const char *level,
struct device *dev,
- struct hdmi_spd_infoframe *frame)
+ const struct hdmi_spd_infoframe *frame)
{
u8 buf[17];
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
memset(buf, 0, sizeof(buf));
@@ -886,10 +1259,10 @@ hdmi_audio_coding_type_ext_get_name(enum hdmi_audio_coding_type_ext ctx)
*/
static void hdmi_audio_infoframe_log(const char *level,
struct device *dev,
- struct hdmi_audio_infoframe *frame)
+ const struct hdmi_audio_infoframe *frame)
{
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
if (frame->channels)
hdmi_log(" channels: %u\n", frame->channels - 1);
@@ -949,12 +1322,12 @@ hdmi_3d_structure_get_name(enum hdmi_3d_structure s3d_struct)
static void
hdmi_vendor_any_infoframe_log(const char *level,
struct device *dev,
- union hdmi_vendor_any_infoframe *frame)
+ const union hdmi_vendor_any_infoframe *frame)
{
- struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+ const struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
hdmi_infoframe_log_header(level, dev,
- (struct hdmi_any_infoframe *)frame);
+ (const struct hdmi_any_infoframe *)frame);
if (frame->any.oui != HDMI_IEEE_OUI) {
hdmi_log(" not a HDMI vendor infoframe\n");
@@ -984,7 +1357,7 @@ hdmi_vendor_any_infoframe_log(const char *level,
*/
void hdmi_infoframe_log(const char *level,
struct device *dev,
- union hdmi_infoframe *frame)
+ const union hdmi_infoframe *frame)
{
switch (frame->any.type) {
case HDMI_INFOFRAME_TYPE_AVI:
@@ -1005,8 +1378,9 @@ EXPORT_SYMBOL(hdmi_infoframe_log);
/**
* hdmi_avi_infoframe_unpack() - unpack binary buffer to a HDMI AVI infoframe
- * @buffer: source buffer
* @frame: HDMI AVI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Auxiliary Video (AVI) information frame.
@@ -1016,11 +1390,14 @@ EXPORT_SYMBOL(hdmi_infoframe_log);
* Returns 0 on success or a negative error code on failure.
*/
static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
int ret;
+ if (size < HDMI_INFOFRAME_SIZE(AVI))
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_AVI ||
ptr[1] != 2 ||
ptr[2] != HDMI_AVI_INFOFRAME_SIZE)
@@ -1068,8 +1445,9 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
/**
* hdmi_spd_infoframe_unpack() - unpack binary buffer to a HDMI SPD infoframe
- * @buffer: source buffer
* @frame: HDMI SPD infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Source Product Description (SPD) information frame.
@@ -1079,11 +1457,14 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
* Returns 0 on success or a negative error code on failure.
*/
static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
int ret;
+ if (size < HDMI_INFOFRAME_SIZE(SPD))
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_SPD ||
ptr[1] != 1 ||
ptr[2] != HDMI_SPD_INFOFRAME_SIZE) {
@@ -1106,8 +1487,9 @@ static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
/**
* hdmi_audio_infoframe_unpack() - unpack binary buffer to a HDMI AUDIO infoframe
- * @buffer: source buffer
* @frame: HDMI Audio infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Audio information frame.
@@ -1117,11 +1499,14 @@ static int hdmi_spd_infoframe_unpack(struct hdmi_spd_infoframe *frame,
* Returns 0 on success or a negative error code on failure.
*/
static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
int ret;
+ if (size < HDMI_INFOFRAME_SIZE(AUDIO))
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_AUDIO ||
ptr[1] != 1 ||
ptr[2] != HDMI_AUDIO_INFOFRAME_SIZE) {
@@ -1151,8 +1536,9 @@ static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
/**
* hdmi_vendor_infoframe_unpack() - unpack binary buffer to a HDMI vendor infoframe
- * @buffer: source buffer
* @frame: HDMI Vendor infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary @buffer into a structured
* @frame of the HDMI Vendor information frame.
@@ -1163,14 +1549,17 @@ static int hdmi_audio_infoframe_unpack(struct hdmi_audio_infoframe *frame,
*/
static int
hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
- void *buffer)
+ const void *buffer, size_t size)
{
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
size_t length;
int ret;
u8 hdmi_video_format;
struct hdmi_vendor_infoframe *hvf = &frame->hdmi;
+ if (size < HDMI_INFOFRAME_HEADER_SIZE)
+ return -EINVAL;
+
if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
ptr[1] != 1 ||
(ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
@@ -1178,6 +1567,9 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
length = ptr[2];
+ if (size < HDMI_INFOFRAME_HEADER_SIZE + length)
+ return -EINVAL;
+
if (hdmi_infoframe_checksum(buffer,
HDMI_INFOFRAME_HEADER_SIZE + length) != 0)
return -EINVAL;
@@ -1224,8 +1616,9 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
/**
* hdmi_infoframe_unpack() - unpack binary buffer to a HDMI infoframe
- * @buffer: source buffer
* @frame: HDMI infoframe
+ * @buffer: source buffer
+ * @size: size of buffer
*
* Unpacks the information contained in binary buffer @buffer into a structured
* @frame of a HDMI infoframe.
@@ -1234,23 +1627,27 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
*
* Returns 0 on success or a negative error code on failure.
*/
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer)
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size)
{
int ret;
- u8 *ptr = buffer;
+ const u8 *ptr = buffer;
+
+ if (size < HDMI_INFOFRAME_HEADER_SIZE)
+ return -EINVAL;
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
- ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer);
+ ret = hdmi_avi_infoframe_unpack(&frame->avi, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_SPD:
- ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer);
+ ret = hdmi_spd_infoframe_unpack(&frame->spd, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_AUDIO:
- ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer);
+ ret = hdmi_audio_infoframe_unpack(&frame->audio, buffer, size);
break;
case HDMI_INFOFRAME_TYPE_VENDOR:
- ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer);
+ ret = hdmi_vendor_any_infoframe_unpack(&frame->vendor, buffer, size);
break;
default:
ret = -EINVAL;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 814b395007b2..cd7e755484e3 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -44,6 +44,26 @@
} while (0)
#define END_USE(_vq) \
do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
+#define LAST_ADD_TIME_UPDATE(_vq) \
+ do { \
+ ktime_t now = ktime_get(); \
+ \
+ /* No kick or get, with .1 second between? Warn. */ \
+ if ((_vq)->last_add_time_valid) \
+ WARN_ON(ktime_to_ms(ktime_sub(now, \
+ (_vq)->last_add_time)) > 100); \
+ (_vq)->last_add_time = now; \
+ (_vq)->last_add_time_valid = true; \
+ } while (0)
+#define LAST_ADD_TIME_CHECK(_vq) \
+ do { \
+ if ((_vq)->last_add_time_valid) { \
+ WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
+ (_vq)->last_add_time)) > 100); \
+ } \
+ } while (0)
+#define LAST_ADD_TIME_INVALID(_vq) \
+ ((_vq)->last_add_time_valid = false)
#else
#define BAD_RING(_vq, fmt, args...) \
do { \
@@ -53,18 +73,38 @@
} while (0)
#define START_USE(vq)
#define END_USE(vq)
+#define LAST_ADD_TIME_UPDATE(vq)
+#define LAST_ADD_TIME_CHECK(vq)
+#define LAST_ADD_TIME_INVALID(vq)
#endif
-struct vring_desc_state {
+struct vring_desc_state_split {
void *data; /* Data for callback. */
struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
};
+struct vring_desc_state_packed {
+ void *data; /* Data for callback. */
+ struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
+ u16 num; /* Descriptor list length. */
+ u16 next; /* The next desc state in a list. */
+ u16 last; /* The last desc state in a list. */
+};
+
+struct vring_desc_extra_packed {
+ dma_addr_t addr; /* Buffer DMA addr. */
+ u32 len; /* Buffer length. */
+ u16 flags; /* Descriptor flags. */
+};
+
struct vring_virtqueue {
struct virtqueue vq;
- /* Actual memory layout for this queue */
- struct vring vring;
+ /* Is this a packed ring? */
+ bool packed_ring;
+
+ /* Is DMA API used? */
+ bool use_dma_api;
/* Can we use weak barriers? */
bool weak_barriers;
@@ -86,19 +126,70 @@ struct vring_virtqueue {
/* Last used index we've seen. */
u16 last_used_idx;
- /* Last written value to avail->flags */
- u16 avail_flags_shadow;
+ union {
+ /* Available for split ring */
+ struct {
+ /* Actual memory layout for this queue. */
+ struct vring vring;
+
+ /* Last written value to avail->flags */
+ u16 avail_flags_shadow;
+
+ /*
+ * Last written value to avail->idx in
+ * guest byte order.
+ */
+ u16 avail_idx_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_split *desc_state;
- /* Last written value to avail->idx in guest byte order */
- u16 avail_idx_shadow;
+ /* DMA address and size information */
+ dma_addr_t queue_dma_addr;
+ size_t queue_size_in_bytes;
+ } split;
+
+ /* Available for packed ring */
+ struct {
+ /* Actual memory layout for this queue. */
+ struct vring_packed vring;
+
+ /* Driver ring wrap counter. */
+ bool avail_wrap_counter;
+
+ /* Device ring wrap counter. */
+ bool used_wrap_counter;
+
+ /* Avail used flags. */
+ u16 avail_used_flags;
+
+ /* Index of the next avail descriptor. */
+ u16 next_avail_idx;
+
+ /*
+ * Last written value to driver->flags in
+ * guest byte order.
+ */
+ u16 event_flags_shadow;
+
+ /* Per-descriptor state. */
+ struct vring_desc_state_packed *desc_state;
+ struct vring_desc_extra_packed *desc_extra;
+
+ /* DMA address and size information */
+ dma_addr_t ring_dma_addr;
+ dma_addr_t driver_event_dma_addr;
+ dma_addr_t device_event_dma_addr;
+ size_t ring_size_in_bytes;
+ size_t event_size_in_bytes;
+ } packed;
+ };
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
/* DMA, allocation, and size information */
bool we_own_ring;
- size_t queue_size_in_bytes;
- dma_addr_t queue_dma_addr;
#ifdef DEBUG
/* They're supposed to lock for us. */
@@ -108,13 +199,27 @@ struct vring_virtqueue {
bool last_add_time_valid;
ktime_t last_add_time;
#endif
-
- /* Per-descriptor state. */
- struct vring_desc_state desc_state[];
};
+
+/*
+ * Helpers.
+ */
+
#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
+static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
+ unsigned int total_sg)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /*
+ * If the host supports indirect descriptor tables, and we have multiple
+ * buffers, then go indirect. FIXME: tune this threshold
+ */
+ return (vq->indirect && total_sg > 1 && vq->vq.num_free);
+}
+
/*
* Modern virtio devices have feature bits to specify whether they need a
* quirk and bypass the IOMMU. If not there, just use the DMA API.
@@ -161,6 +266,48 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
return false;
}
+static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ if (vring_use_dma_api(vdev)) {
+ return dma_alloc_coherent(vdev->dev.parent, size,
+ dma_handle, flag);
+ } else {
+ void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
+
+ if (queue) {
+ phys_addr_t phys_addr = virt_to_phys(queue);
+ *dma_handle = (dma_addr_t)phys_addr;
+
+ /*
+ * Sanity check: make sure we dind't truncate
+ * the address. The only arches I can find that
+ * have 64-bit phys_addr_t but 32-bit dma_addr_t
+ * are certain non-highmem MIPS and x86
+ * configurations, but these configurations
+ * should never allocate physical pages above 32
+ * bits, so this is fine. Just in case, throw a
+ * warning and abort if we end up with an
+ * unrepresentable address.
+ */
+ if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+ free_pages_exact(queue, PAGE_ALIGN(size));
+ return NULL;
+ }
+ }
+ return queue;
+ }
+}
+
+static void vring_free_queue(struct virtio_device *vdev, size_t size,
+ void *queue, dma_addr_t dma_handle)
+{
+ if (vring_use_dma_api(vdev))
+ dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
+ else
+ free_pages_exact(queue, PAGE_ALIGN(size));
+}
+
/*
* The DMA ops on various arches are rather gnarly right now, and
* making all of the arch DMA ops work on the vring device itself
@@ -176,7 +323,7 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
struct scatterlist *sg,
enum dma_data_direction direction)
{
- if (!vring_use_dma_api(vq->vq.vdev))
+ if (!vq->use_dma_api)
return (dma_addr_t)sg_phys(sg);
/*
@@ -193,19 +340,33 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- if (!vring_use_dma_api(vq->vq.vdev))
+ if (!vq->use_dma_api)
return (dma_addr_t)virt_to_phys(cpu_addr);
return dma_map_single(vring_dma_dev(vq),
cpu_addr, size, direction);
}
-static void vring_unmap_one(const struct vring_virtqueue *vq,
- struct vring_desc *desc)
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
+{
+ if (!vq->use_dma_api)
+ return 0;
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+
+
+/*
+ * Split ring specific functions - *_split().
+ */
+
+static void vring_unmap_one_split(const struct vring_virtqueue *vq,
+ struct vring_desc *desc)
{
u16 flags;
- if (!vring_use_dma_api(vq->vq.vdev))
+ if (!vq->use_dma_api)
return;
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
@@ -225,17 +386,9 @@ static void vring_unmap_one(const struct vring_virtqueue *vq,
}
}
-static int vring_mapping_error(const struct vring_virtqueue *vq,
- dma_addr_t addr)
-{
- if (!vring_use_dma_api(vq->vq.vdev))
- return 0;
-
- return dma_mapping_error(vring_dma_dev(vq), addr);
-}
-
-static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
- unsigned int total_sg, gfp_t gfp)
+static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
+ unsigned int total_sg,
+ gfp_t gfp)
{
struct vring_desc *desc;
unsigned int i;
@@ -256,14 +409,14 @@ static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
return desc;
}
-static inline int virtqueue_add(struct virtqueue *_vq,
- struct scatterlist *sgs[],
- unsigned int total_sg,
- unsigned int out_sgs,
- unsigned int in_sgs,
- void *data,
- void *ctx,
- gfp_t gfp)
+static inline int virtqueue_add_split(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ void *ctx,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct scatterlist *sg;
@@ -282,30 +435,17 @@ static inline int virtqueue_add(struct virtqueue *_vq,
return -EIO;
}
-#ifdef DEBUG
- {
- ktime_t now = ktime_get();
-
- /* No kick or get, with .1 second between? Warn. */
- if (vq->last_add_time_valid)
- WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
- > 100);
- vq->last_add_time = now;
- vq->last_add_time_valid = true;
- }
-#endif
+ LAST_ADD_TIME_UPDATE(vq);
BUG_ON(total_sg == 0);
head = vq->free_head;
- /* If the host supports indirect descriptor tables, and we have multiple
- * buffers, then go indirect. FIXME: tune this threshold */
- if (vq->indirect && total_sg > 1 && vq->vq.num_free)
- desc = alloc_indirect(_vq, total_sg, gfp);
+ if (virtqueue_use_indirect(_vq, total_sg))
+ desc = alloc_indirect_split(_vq, total_sg, gfp);
else {
desc = NULL;
- WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
+ WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
}
if (desc) {
@@ -316,7 +456,7 @@ static inline int virtqueue_add(struct virtqueue *_vq,
descs_used = 1;
} else {
indirect = false;
- desc = vq->vring.desc;
+ desc = vq->split.vring.desc;
i = head;
descs_used = total_sg;
}
@@ -372,10 +512,13 @@ static inline int virtqueue_add(struct virtqueue *_vq,
if (vring_mapping_error(vq, addr))
goto unmap_release;
- vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
- vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
+ vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
+ VRING_DESC_F_INDIRECT);
+ vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
+ addr);
- vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
+ vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
+ total_sg * sizeof(struct vring_desc));
}
/* We're using some buffers from the free list. */
@@ -383,27 +526,29 @@ static inline int virtqueue_add(struct virtqueue *_vq,
/* Update free pointer */
if (indirect)
- vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
+ vq->free_head = virtio16_to_cpu(_vq->vdev,
+ vq->split.vring.desc[head].next);
else
vq->free_head = i;
/* Store token and indirect buffer state. */
- vq->desc_state[head].data = data;
+ vq->split.desc_state[head].data = data;
if (indirect)
- vq->desc_state[head].indir_desc = desc;
+ vq->split.desc_state[head].indir_desc = desc;
else
- vq->desc_state[head].indir_desc = ctx;
+ vq->split.desc_state[head].indir_desc = ctx;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
- avail = vq->avail_idx_shadow & (vq->vring.num - 1);
- vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
+ avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
+ vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq->weak_barriers);
- vq->avail_idx_shadow++;
- vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
+ vq->split.avail_idx_shadow++;
+ vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
+ vq->split.avail_idx_shadow);
vq->num_added++;
pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -423,8 +568,8 @@ unmap_release:
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
- vring_unmap_one(vq, &desc[i]);
- i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
+ vring_unmap_one_split(vq, &desc[i]);
+ i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next);
}
if (indirect)
@@ -434,6 +579,1122 @@ unmap_release:
return -EIO;
}
+static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 new, old;
+ bool needs_kick;
+
+ START_USE(vq);
+ /* We need to expose available array entries before checking avail
+ * event. */
+ virtio_mb(vq->weak_barriers);
+
+ old = vq->split.avail_idx_shadow - vq->num_added;
+ new = vq->split.avail_idx_shadow;
+ vq->num_added = 0;
+
+ LAST_ADD_TIME_CHECK(vq);
+ LAST_ADD_TIME_INVALID(vq);
+
+ if (vq->event) {
+ needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
+ vring_avail_event(&vq->split.vring)),
+ new, old);
+ } else {
+ needs_kick = !(vq->split.vring.used->flags &
+ cpu_to_virtio16(_vq->vdev,
+ VRING_USED_F_NO_NOTIFY));
+ }
+ END_USE(vq);
+ return needs_kick;
+}
+
+static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
+ void **ctx)
+{
+ unsigned int i, j;
+ __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
+
+ /* Clear data ptr. */
+ vq->split.desc_state[head].data = NULL;
+
+ /* Put back on free list: unmap first-level descriptors and find end */
+ i = head;
+
+ while (vq->split.vring.desc[i].flags & nextflag) {
+ vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
+ i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
+ vq->vq.num_free++;
+ }
+
+ vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
+ vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
+ vq->free_head);
+ vq->free_head = head;
+
+ /* Plus final descriptor */
+ vq->vq.num_free++;
+
+ if (vq->indirect) {
+ struct vring_desc *indir_desc =
+ vq->split.desc_state[head].indir_desc;
+ u32 len;
+
+ /* Free the indirect table, if any, now that it's unmapped. */
+ if (!indir_desc)
+ return;
+
+ len = virtio32_to_cpu(vq->vq.vdev,
+ vq->split.vring.desc[head].len);
+
+ BUG_ON(!(vq->split.vring.desc[head].flags &
+ cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
+ BUG_ON(len == 0 || len % sizeof(struct vring_desc));
+
+ for (j = 0; j < len / sizeof(struct vring_desc); j++)
+ vring_unmap_one_split(vq, &indir_desc[j]);
+
+ kfree(indir_desc);
+ vq->split.desc_state[head].indir_desc = NULL;
+ } else if (ctx) {
+ *ctx = vq->split.desc_state[head].indir_desc;
+ }
+}
+
+static inline bool more_used_split(const struct vring_virtqueue *vq)
+{
+ return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
+ vq->split.vring.used->idx);
+}
+
+static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
+ unsigned int *len,
+ void **ctx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ void *ret;
+ unsigned int i;
+ u16 last_used;
+
+ START_USE(vq);
+
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return NULL;
+ }
+
+ if (!more_used_split(vq)) {
+ pr_debug("No more buffers in queue\n");
+ END_USE(vq);
+ return NULL;
+ }
+
+ /* Only get used array entries after they have been exposed by host. */
+ virtio_rmb(vq->weak_barriers);
+
+ last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
+ i = virtio32_to_cpu(_vq->vdev,
+ vq->split.vring.used->ring[last_used].id);
+ *len = virtio32_to_cpu(_vq->vdev,
+ vq->split.vring.used->ring[last_used].len);
+
+ if (unlikely(i >= vq->split.vring.num)) {
+ BAD_RING(vq, "id %u out of range\n", i);
+ return NULL;
+ }
+ if (unlikely(!vq->split.desc_state[i].data)) {
+ BAD_RING(vq, "id %u is not a head!\n", i);
+ return NULL;
+ }
+
+ /* detach_buf_split clears data, so grab it now. */
+ ret = vq->split.desc_state[i].data;
+ detach_buf_split(vq, i, ctx);
+ vq->last_used_idx++;
+ /* If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call. */
+ if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
+ virtio_store_mb(vq->weak_barriers,
+ &vring_used_event(&vq->split.vring),
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
+
+ LAST_ADD_TIME_INVALID(vq);
+
+ END_USE(vq);
+ return ret;
+}
+
+static void virtqueue_disable_cb_split(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
+ vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vq->split.vring.avail->flags =
+ cpu_to_virtio16(_vq->vdev,
+ vq->split.avail_flags_shadow);
+ }
+}
+
+static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 last_used_idx;
+
+ START_USE(vq);
+
+ /* We optimistically turn back on interrupts, then check if there was
+ * more to do. */
+ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always do both to keep code simple. */
+ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+ vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vq->split.vring.avail->flags =
+ cpu_to_virtio16(_vq->vdev,
+ vq->split.avail_flags_shadow);
+ }
+ vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
+ last_used_idx = vq->last_used_idx);
+ END_USE(vq);
+ return last_used_idx;
+}
+
+static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
+ vq->split.vring.used->idx);
+}
+
+static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 bufs;
+
+ START_USE(vq);
+
+ /* We optimistically turn back on interrupts, then check if there was
+ * more to do. */
+ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
+ * either clear the flags bit or point the event index at the next
+ * entry. Always update the event index to keep code simple. */
+ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
+ vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ if (!vq->event)
+ vq->split.vring.avail->flags =
+ cpu_to_virtio16(_vq->vdev,
+ vq->split.avail_flags_shadow);
+ }
+ /* TODO: tune this threshold */
+ bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
+
+ virtio_store_mb(vq->weak_barriers,
+ &vring_used_event(&vq->split.vring),
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
+
+ if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
+ - vq->last_used_idx) > bufs)) {
+ END_USE(vq);
+ return false;
+ }
+
+ END_USE(vq);
+ return true;
+}
+
+static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ unsigned int i;
+ void *buf;
+
+ START_USE(vq);
+
+ for (i = 0; i < vq->split.vring.num; i++) {
+ if (!vq->split.desc_state[i].data)
+ continue;
+ /* detach_buf_split clears data, so grab it now. */
+ buf = vq->split.desc_state[i].data;
+ detach_buf_split(vq, i, NULL);
+ vq->split.avail_idx_shadow--;
+ vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
+ vq->split.avail_idx_shadow);
+ END_USE(vq);
+ return buf;
+ }
+ /* That should have freed everything. */
+ BUG_ON(vq->vq.num_free != vq->split.vring.num);
+
+ END_USE(vq);
+ return NULL;
+}
+
+static struct virtqueue *vring_create_virtqueue_split(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct virtqueue *vq;
+ void *queue = NULL;
+ dma_addr_t dma_addr;
+ size_t queue_size_in_bytes;
+ struct vring vring;
+
+ /* We assume num is a power of 2. */
+ if (num & (num - 1)) {
+ dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
+ return NULL;
+ }
+
+ /* TODO: allocate each queue chunk individually */
+ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
+ queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
+ &dma_addr,
+ GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ if (queue)
+ break;
+ }
+
+ if (!num)
+ return NULL;
+
+ if (!queue) {
+ /* Try to get a single page. You are my only hope! */
+ queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
+ &dma_addr, GFP_KERNEL|__GFP_ZERO);
+ }
+ if (!queue)
+ return NULL;
+
+ queue_size_in_bytes = vring_size(num, vring_align);
+ vring_init(&vring, num, queue, vring_align);
+
+ vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
+ notify, callback, name);
+ if (!vq) {
+ vring_free_queue(vdev, queue_size_in_bytes, queue,
+ dma_addr);
+ return NULL;
+ }
+
+ to_vvq(vq)->split.queue_dma_addr = dma_addr;
+ to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
+ to_vvq(vq)->we_own_ring = true;
+
+ return vq;
+}
+
+
+/*
+ * Packed ring specific functions - *_packed().
+ */
+
+static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
+ struct vring_desc_extra_packed *state)
+{
+ u16 flags;
+
+ if (!vq->use_dma_api)
+ return;
+
+ flags = state->flags;
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq),
+ state->addr, state->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ state->addr, state->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
+ struct vring_packed_desc *desc)
+{
+ u16 flags;
+
+ if (!vq->use_dma_api)
+ return;
+
+ flags = le16_to_cpu(desc->flags);
+
+ if (flags & VRING_DESC_F_INDIRECT) {
+ dma_unmap_single(vring_dma_dev(vq),
+ le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(vring_dma_dev(vq),
+ le64_to_cpu(desc->addr),
+ le32_to_cpu(desc->len),
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
+ gfp_t gfp)
+{
+ struct vring_packed_desc *desc;
+
+ /*
+ * We require lowmem mappings for the descriptors because
+ * otherwise virt_to_phys will give us bogus addresses in the
+ * virtqueue.
+ */
+ gfp &= ~__GFP_HIGHMEM;
+
+ desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
+
+ return desc;
+}
+
+static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp)
+{
+ struct vring_packed_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i, n, err_idx;
+ u16 head, id;
+ dma_addr_t addr;
+
+ head = vq->packed.next_avail_idx;
+ desc = alloc_indirect_packed(total_sg, gfp);
+
+ if (unlikely(vq->vq.num_free < 1)) {
+ pr_debug("Can't add buf len 1 - avail = 0\n");
+ END_USE(vq);
+ return -ENOSPC;
+ }
+
+ i = 0;
+ id = vq->free_head;
+ BUG_ON(id == vq->packed.vring.num);
+
+ for (n = 0; n < out_sgs + in_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ addr = vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
+ desc[i].flags = cpu_to_le16(n < out_sgs ?
+ 0 : VRING_DESC_F_WRITE);
+ desc[i].addr = cpu_to_le64(addr);
+ desc[i].len = cpu_to_le32(sg->length);
+ i++;
+ }
+ }
+
+ /* Now that the indirect table is filled in, map it. */
+ addr = vring_map_single(vq, desc,
+ total_sg * sizeof(struct vring_packed_desc),
+ DMA_TO_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
+ vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
+ vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
+ sizeof(struct vring_packed_desc));
+ vq->packed.vring.desc[head].id = cpu_to_le16(id);
+
+ if (vq->use_dma_api) {
+ vq->packed.desc_extra[id].addr = addr;
+ vq->packed.desc_extra[id].len = total_sg *
+ sizeof(struct vring_packed_desc);
+ vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
+ vq->packed.avail_used_flags;
+ }
+
+ /*
+ * A driver MUST NOT make the first descriptor in the list
+ * available before all subsequent descriptors comprising
+ * the list are made available.
+ */
+ virtio_wmb(vq->weak_barriers);
+ vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
+ vq->packed.avail_used_flags);
+
+ /* We're using some buffers from the free list. */
+ vq->vq.num_free -= 1;
+
+ /* Update free pointer */
+ n = head + 1;
+ if (n >= vq->packed.vring.num) {
+ n = 0;
+ vq->packed.avail_wrap_counter ^= 1;
+ vq->packed.avail_used_flags ^=
+ 1 << VRING_PACKED_DESC_F_AVAIL |
+ 1 << VRING_PACKED_DESC_F_USED;
+ }
+ vq->packed.next_avail_idx = n;
+ vq->free_head = vq->packed.desc_state[id].next;
+
+ /* Store token and indirect buffer state. */
+ vq->packed.desc_state[id].num = 1;
+ vq->packed.desc_state[id].data = data;
+ vq->packed.desc_state[id].indir_desc = desc;
+ vq->packed.desc_state[id].last = id;
+
+ vq->num_added += 1;
+
+ pr_debug("Added buffer head %i to %p\n", head, vq);
+ END_USE(vq);
+
+ return 0;
+
+unmap_release:
+ err_idx = i;
+
+ for (i = 0; i < err_idx; i++)
+ vring_unmap_desc_packed(vq, &desc[i]);
+
+ kfree(desc);
+
+ END_USE(vq);
+ return -EIO;
+}
+
+static inline int virtqueue_add_packed(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ void *ctx,
+ gfp_t gfp)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct vring_packed_desc *desc;
+ struct scatterlist *sg;
+ unsigned int i, n, c, descs_used, err_idx;
+ __le16 uninitialized_var(head_flags), flags;
+ u16 head, id, uninitialized_var(prev), curr, avail_used_flags;
+
+ START_USE(vq);
+
+ BUG_ON(data == NULL);
+ BUG_ON(ctx && vq->indirect);
+
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return -EIO;
+ }
+
+ LAST_ADD_TIME_UPDATE(vq);
+
+ BUG_ON(total_sg == 0);
+
+ if (virtqueue_use_indirect(_vq, total_sg))
+ return virtqueue_add_indirect_packed(vq, sgs, total_sg,
+ out_sgs, in_sgs, data, gfp);
+
+ head = vq->packed.next_avail_idx;
+ avail_used_flags = vq->packed.avail_used_flags;
+
+ WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
+
+ desc = vq->packed.vring.desc;
+ i = head;
+ descs_used = total_sg;
+
+ if (unlikely(vq->vq.num_free < descs_used)) {
+ pr_debug("Can't add buf len %i - avail = %i\n",
+ descs_used, vq->vq.num_free);
+ END_USE(vq);
+ return -ENOSPC;
+ }
+
+ id = vq->free_head;
+ BUG_ON(id == vq->packed.vring.num);
+
+ curr = id;
+ c = 0;
+ for (n = 0; n < out_sgs + in_sgs; n++) {
+ for (sg = sgs[n]; sg; sg = sg_next(sg)) {
+ dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (vring_mapping_error(vq, addr))
+ goto unmap_release;
+
+ flags = cpu_to_le16(vq->packed.avail_used_flags |
+ (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
+ (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
+ if (i == head)
+ head_flags = flags;
+ else
+ desc[i].flags = flags;
+
+ desc[i].addr = cpu_to_le64(addr);
+ desc[i].len = cpu_to_le32(sg->length);
+ desc[i].id = cpu_to_le16(id);
+
+ if (unlikely(vq->use_dma_api)) {
+ vq->packed.desc_extra[curr].addr = addr;
+ vq->packed.desc_extra[curr].len = sg->length;
+ vq->packed.desc_extra[curr].flags =
+ le16_to_cpu(flags);
+ }
+ prev = curr;
+ curr = vq->packed.desc_state[curr].next;
+
+ if ((unlikely(++i >= vq->packed.vring.num))) {
+ i = 0;
+ vq->packed.avail_used_flags ^=
+ 1 << VRING_PACKED_DESC_F_AVAIL |
+ 1 << VRING_PACKED_DESC_F_USED;
+ }
+ }
+ }
+
+ if (i < head)
+ vq->packed.avail_wrap_counter ^= 1;
+
+ /* We're using some buffers from the free list. */
+ vq->vq.num_free -= descs_used;
+
+ /* Update free pointer */
+ vq->packed.next_avail_idx = i;
+ vq->free_head = curr;
+
+ /* Store token. */
+ vq->packed.desc_state[id].num = descs_used;
+ vq->packed.desc_state[id].data = data;
+ vq->packed.desc_state[id].indir_desc = ctx;
+ vq->packed.desc_state[id].last = prev;
+
+ /*
+ * A driver MUST NOT make the first descriptor in the list
+ * available before all subsequent descriptors comprising
+ * the list are made available.
+ */
+ virtio_wmb(vq->weak_barriers);
+ vq->packed.vring.desc[head].flags = head_flags;
+ vq->num_added += descs_used;
+
+ pr_debug("Added buffer head %i to %p\n", head, vq);
+ END_USE(vq);
+
+ return 0;
+
+unmap_release:
+ err_idx = i;
+ i = head;
+
+ vq->packed.avail_used_flags = avail_used_flags;
+
+ for (n = 0; n < total_sg; n++) {
+ if (i == err_idx)
+ break;
+ vring_unmap_desc_packed(vq, &desc[i]);
+ i++;
+ if (i >= vq->packed.vring.num)
+ i = 0;
+ }
+
+ END_USE(vq);
+ return -EIO;
+}
+
+static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 new, old, off_wrap, flags, wrap_counter, event_idx;
+ bool needs_kick;
+ union {
+ struct {
+ __le16 off_wrap;
+ __le16 flags;
+ };
+ u32 u32;
+ } snapshot;
+
+ START_USE(vq);
+
+ /*
+ * We need to expose the new flags value before checking notification
+ * suppressions.
+ */
+ virtio_mb(vq->weak_barriers);
+
+ old = vq->packed.next_avail_idx - vq->num_added;
+ new = vq->packed.next_avail_idx;
+ vq->num_added = 0;
+
+ snapshot.u32 = *(u32 *)vq->packed.vring.device;
+ flags = le16_to_cpu(snapshot.flags);
+
+ LAST_ADD_TIME_CHECK(vq);
+ LAST_ADD_TIME_INVALID(vq);
+
+ if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
+ needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
+ goto out;
+ }
+
+ off_wrap = le16_to_cpu(snapshot.off_wrap);
+
+ wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
+ event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+ if (wrap_counter != vq->packed.avail_wrap_counter)
+ event_idx -= vq->packed.vring.num;
+
+ needs_kick = vring_need_event(event_idx, new, old);
+out:
+ END_USE(vq);
+ return needs_kick;
+}
+
+static void detach_buf_packed(struct vring_virtqueue *vq,
+ unsigned int id, void **ctx)
+{
+ struct vring_desc_state_packed *state = NULL;
+ struct vring_packed_desc *desc;
+ unsigned int i, curr;
+
+ state = &vq->packed.desc_state[id];
+
+ /* Clear data ptr. */
+ state->data = NULL;
+
+ vq->packed.desc_state[state->last].next = vq->free_head;
+ vq->free_head = id;
+ vq->vq.num_free += state->num;
+
+ if (unlikely(vq->use_dma_api)) {
+ curr = id;
+ for (i = 0; i < state->num; i++) {
+ vring_unmap_state_packed(vq,
+ &vq->packed.desc_extra[curr]);
+ curr = vq->packed.desc_state[curr].next;
+ }
+ }
+
+ if (vq->indirect) {
+ u32 len;
+
+ /* Free the indirect table, if any, now that it's unmapped. */
+ desc = state->indir_desc;
+ if (!desc)
+ return;
+
+ if (vq->use_dma_api) {
+ len = vq->packed.desc_extra[id].len;
+ for (i = 0; i < len / sizeof(struct vring_packed_desc);
+ i++)
+ vring_unmap_desc_packed(vq, &desc[i]);
+ }
+ kfree(desc);
+ state->indir_desc = NULL;
+ } else if (ctx) {
+ *ctx = state->indir_desc;
+ }
+}
+
+static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
+ u16 idx, bool used_wrap_counter)
+{
+ bool avail, used;
+ u16 flags;
+
+ flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
+ avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
+ used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
+
+ return avail == used && used == used_wrap_counter;
+}
+
+static inline bool more_used_packed(const struct vring_virtqueue *vq)
+{
+ return is_used_desc_packed(vq, vq->last_used_idx,
+ vq->packed.used_wrap_counter);
+}
+
+static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
+ unsigned int *len,
+ void **ctx)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 last_used, id;
+ void *ret;
+
+ START_USE(vq);
+
+ if (unlikely(vq->broken)) {
+ END_USE(vq);
+ return NULL;
+ }
+
+ if (!more_used_packed(vq)) {
+ pr_debug("No more buffers in queue\n");
+ END_USE(vq);
+ return NULL;
+ }
+
+ /* Only get used elements after they have been exposed by host. */
+ virtio_rmb(vq->weak_barriers);
+
+ last_used = vq->last_used_idx;
+ id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
+ *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
+
+ if (unlikely(id >= vq->packed.vring.num)) {
+ BAD_RING(vq, "id %u out of range\n", id);
+ return NULL;
+ }
+ if (unlikely(!vq->packed.desc_state[id].data)) {
+ BAD_RING(vq, "id %u is not a head!\n", id);
+ return NULL;
+ }
+
+ /* detach_buf_packed clears data, so grab it now. */
+ ret = vq->packed.desc_state[id].data;
+ detach_buf_packed(vq, id, ctx);
+
+ vq->last_used_idx += vq->packed.desc_state[id].num;
+ if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
+ vq->last_used_idx -= vq->packed.vring.num;
+ vq->packed.used_wrap_counter ^= 1;
+ }
+
+ /*
+ * If we expect an interrupt for the next entry, tell host
+ * by writing event index and flush out the write before
+ * the read in the next get_buf call.
+ */
+ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
+ virtio_store_mb(vq->weak_barriers,
+ &vq->packed.vring.driver->off_wrap,
+ cpu_to_le16(vq->last_used_idx |
+ (vq->packed.used_wrap_counter <<
+ VRING_PACKED_EVENT_F_WRAP_CTR)));
+
+ LAST_ADD_TIME_INVALID(vq);
+
+ END_USE(vq);
+ return ret;
+}
+
+static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
+ vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+}
+
+static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ START_USE(vq);
+
+ /*
+ * We optimistically turn back on interrupts, then check if there was
+ * more to do.
+ */
+
+ if (vq->event) {
+ vq->packed.vring.driver->off_wrap =
+ cpu_to_le16(vq->last_used_idx |
+ (vq->packed.used_wrap_counter <<
+ VRING_PACKED_EVENT_F_WRAP_CTR));
+ /*
+ * We need to update event offset and event wrap
+ * counter first before updating event flags.
+ */
+ virtio_wmb(vq->weak_barriers);
+ }
+
+ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
+ vq->packed.event_flags_shadow = vq->event ?
+ VRING_PACKED_EVENT_FLAG_DESC :
+ VRING_PACKED_EVENT_FLAG_ENABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+
+ END_USE(vq);
+ return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
+ VRING_PACKED_EVENT_F_WRAP_CTR);
+}
+
+static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ bool wrap_counter;
+ u16 used_idx;
+
+ wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
+ used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
+
+ return is_used_desc_packed(vq, used_idx, wrap_counter);
+}
+
+static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u16 used_idx, wrap_counter;
+ u16 bufs;
+
+ START_USE(vq);
+
+ /*
+ * We optimistically turn back on interrupts, then check if there was
+ * more to do.
+ */
+
+ if (vq->event) {
+ /* TODO: tune this threshold */
+ bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
+ wrap_counter = vq->packed.used_wrap_counter;
+
+ used_idx = vq->last_used_idx + bufs;
+ if (used_idx >= vq->packed.vring.num) {
+ used_idx -= vq->packed.vring.num;
+ wrap_counter ^= 1;
+ }
+
+ vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
+ (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
+
+ /*
+ * We need to update event offset and event wrap
+ * counter first before updating event flags.
+ */
+ virtio_wmb(vq->weak_barriers);
+ } else {
+ used_idx = vq->last_used_idx;
+ wrap_counter = vq->packed.used_wrap_counter;
+ }
+
+ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
+ vq->packed.event_flags_shadow = vq->event ?
+ VRING_PACKED_EVENT_FLAG_DESC :
+ VRING_PACKED_EVENT_FLAG_ENABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+
+ /*
+ * We need to update event suppression structure first
+ * before re-checking for more used buffers.
+ */
+ virtio_mb(vq->weak_barriers);
+
+ if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
+ END_USE(vq);
+ return false;
+ }
+
+ END_USE(vq);
+ return true;
+}
+
+static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ unsigned int i;
+ void *buf;
+
+ START_USE(vq);
+
+ for (i = 0; i < vq->packed.vring.num; i++) {
+ if (!vq->packed.desc_state[i].data)
+ continue;
+ /* detach_buf clears data, so grab it now. */
+ buf = vq->packed.desc_state[i].data;
+ detach_buf_packed(vq, i, NULL);
+ END_USE(vq);
+ return buf;
+ }
+ /* That should have freed everything. */
+ BUG_ON(vq->vq.num_free != vq->packed.vring.num);
+
+ END_USE(vq);
+ return NULL;
+}
+
+static struct virtqueue *vring_create_virtqueue_packed(
+ unsigned int index,
+ unsigned int num,
+ unsigned int vring_align,
+ struct virtio_device *vdev,
+ bool weak_barriers,
+ bool may_reduce_num,
+ bool context,
+ bool (*notify)(struct virtqueue *),
+ void (*callback)(struct virtqueue *),
+ const char *name)
+{
+ struct vring_virtqueue *vq;
+ struct vring_packed_desc *ring;
+ struct vring_packed_desc_event *driver, *device;
+ dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
+ size_t ring_size_in_bytes, event_size_in_bytes;
+ unsigned int i;
+
+ ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
+
+ ring = vring_alloc_queue(vdev, ring_size_in_bytes,
+ &ring_dma_addr,
+ GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ if (!ring)
+ goto err_ring;
+
+ event_size_in_bytes = sizeof(struct vring_packed_desc_event);
+
+ driver = vring_alloc_queue(vdev, event_size_in_bytes,
+ &driver_event_dma_addr,
+ GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ if (!driver)
+ goto err_driver;
+
+ device = vring_alloc_queue(vdev, event_size_in_bytes,
+ &device_event_dma_addr,
+ GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
+ if (!device)
+ goto err_device;
+
+ vq = kmalloc(sizeof(*vq), GFP_KERNEL);
+ if (!vq)
+ goto err_vq;
+
+ vq->vq.callback = callback;
+ vq->vq.vdev = vdev;
+ vq->vq.name = name;
+ vq->vq.num_free = num;
+ vq->vq.index = index;
+ vq->we_own_ring = true;
+ vq->notify = notify;
+ vq->weak_barriers = weak_barriers;
+ vq->broken = false;
+ vq->last_used_idx = 0;
+ vq->num_added = 0;
+ vq->packed_ring = true;
+ vq->use_dma_api = vring_use_dma_api(vdev);
+ list_add_tail(&vq->vq.list, &vdev->vqs);
+#ifdef DEBUG
+ vq->in_use = false;
+ vq->last_add_time_valid = false;
+#endif
+
+ vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
+ !context;
+ vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+
+ vq->packed.ring_dma_addr = ring_dma_addr;
+ vq->packed.driver_event_dma_addr = driver_event_dma_addr;
+ vq->packed.device_event_dma_addr = device_event_dma_addr;
+
+ vq->packed.ring_size_in_bytes = ring_size_in_bytes;
+ vq->packed.event_size_in_bytes = event_size_in_bytes;
+
+ vq->packed.vring.num = num;
+ vq->packed.vring.desc = ring;
+ vq->packed.vring.driver = driver;
+ vq->packed.vring.device = device;
+
+ vq->packed.next_avail_idx = 0;
+ vq->packed.avail_wrap_counter = 1;
+ vq->packed.used_wrap_counter = 1;
+ vq->packed.event_flags_shadow = 0;
+ vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
+
+ vq->packed.desc_state = kmalloc_array(num,
+ sizeof(struct vring_desc_state_packed),
+ GFP_KERNEL);
+ if (!vq->packed.desc_state)
+ goto err_desc_state;
+
+ memset(vq->packed.desc_state, 0,
+ num * sizeof(struct vring_desc_state_packed));
+
+ /* Put everything in free lists. */
+ vq->free_head = 0;
+ for (i = 0; i < num-1; i++)
+ vq->packed.desc_state[i].next = i + 1;
+
+ vq->packed.desc_extra = kmalloc_array(num,
+ sizeof(struct vring_desc_extra_packed),
+ GFP_KERNEL);
+ if (!vq->packed.desc_extra)
+ goto err_desc_extra;
+
+ memset(vq->packed.desc_extra, 0,
+ num * sizeof(struct vring_desc_extra_packed));
+
+ /* No callback? Tell other side not to bother us. */
+ if (!callback) {
+ vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
+ vq->packed.vring.driver->flags =
+ cpu_to_le16(vq->packed.event_flags_shadow);
+ }
+
+ return &vq->vq;
+
+err_desc_extra:
+ kfree(vq->packed.desc_state);
+err_desc_state:
+ kfree(vq);
+err_vq:
+ vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
+err_device:
+ vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
+err_driver:
+ vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
+err_ring:
+ return NULL;
+}
+
+
+/*
+ * Generic functions and exported symbols.
+ */
+
+static inline int virtqueue_add(struct virtqueue *_vq,
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ void *ctx,
+ gfp_t gfp)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
+ out_sgs, in_sgs, data, ctx, gfp) :
+ virtqueue_add_split(_vq, sgs, total_sg,
+ out_sgs, in_sgs, data, ctx, gfp);
+}
+
/**
* virtqueue_add_sgs - expose buffers to other end
* @vq: the struct virtqueue we're talking about.
@@ -460,6 +1721,7 @@ int virtqueue_add_sgs(struct virtqueue *_vq,
/* Count them first. */
for (i = 0; i < out_sgs + in_sgs; i++) {
struct scatterlist *sg;
+
for (sg = sgs[i]; sg; sg = sg_next(sg))
total_sg++;
}
@@ -550,34 +1812,9 @@ EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- u16 new, old;
- bool needs_kick;
- START_USE(vq);
- /* We need to expose available array entries before checking avail
- * event. */
- virtio_mb(vq->weak_barriers);
-
- old = vq->avail_idx_shadow - vq->num_added;
- new = vq->avail_idx_shadow;
- vq->num_added = 0;
-
-#ifdef DEBUG
- if (vq->last_add_time_valid) {
- WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
- vq->last_add_time)) > 100);
- }
- vq->last_add_time_valid = false;
-#endif
-
- if (vq->event) {
- needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
- new, old);
- } else {
- needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
- }
- END_USE(vq);
- return needs_kick;
+ return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
+ virtqueue_kick_prepare_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
@@ -625,60 +1862,6 @@ bool virtqueue_kick(struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
-static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
- void **ctx)
-{
- unsigned int i, j;
- __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
-
- /* Clear data ptr. */
- vq->desc_state[head].data = NULL;
-
- /* Put back on free list: unmap first-level descriptors and find end */
- i = head;
-
- while (vq->vring.desc[i].flags & nextflag) {
- vring_unmap_one(vq, &vq->vring.desc[i]);
- i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
- vq->vq.num_free++;
- }
-
- vring_unmap_one(vq, &vq->vring.desc[i]);
- vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
- vq->free_head = head;
-
- /* Plus final descriptor */
- vq->vq.num_free++;
-
- if (vq->indirect) {
- struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
- u32 len;
-
- /* Free the indirect table, if any, now that it's unmapped. */
- if (!indir_desc)
- return;
-
- len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
-
- BUG_ON(!(vq->vring.desc[head].flags &
- cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
- BUG_ON(len == 0 || len % sizeof(struct vring_desc));
-
- for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one(vq, &indir_desc[j]);
-
- kfree(indir_desc);
- vq->desc_state[head].indir_desc = NULL;
- } else if (ctx) {
- *ctx = vq->desc_state[head].indir_desc;
- }
-}
-
-static inline bool more_used(const struct vring_virtqueue *vq)
-{
- return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
-}
-
/**
* virtqueue_get_buf - get the next used buffer
* @vq: the struct virtqueue we're talking about.
@@ -699,57 +1882,9 @@ void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
void **ctx)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- void *ret;
- unsigned int i;
- u16 last_used;
-
- START_USE(vq);
- if (unlikely(vq->broken)) {
- END_USE(vq);
- return NULL;
- }
-
- if (!more_used(vq)) {
- pr_debug("No more buffers in queue\n");
- END_USE(vq);
- return NULL;
- }
-
- /* Only get used array entries after they have been exposed by host. */
- virtio_rmb(vq->weak_barriers);
-
- last_used = (vq->last_used_idx & (vq->vring.num - 1));
- i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
- *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
-
- if (unlikely(i >= vq->vring.num)) {
- BAD_RING(vq, "id %u out of range\n", i);
- return NULL;
- }
- if (unlikely(!vq->desc_state[i].data)) {
- BAD_RING(vq, "id %u is not a head!\n", i);
- return NULL;
- }
-
- /* detach_buf clears data, so grab it now. */
- ret = vq->desc_state[i].data;
- detach_buf(vq, i, ctx);
- vq->last_used_idx++;
- /* If we expect an interrupt for the next entry, tell host
- * by writing event index and flush out the write before
- * the read in the next get_buf call. */
- if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
- virtio_store_mb(vq->weak_barriers,
- &vring_used_event(&vq->vring),
- cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
-
-#ifdef DEBUG
- vq->last_add_time_valid = false;
-#endif
-
- END_USE(vq);
- return ret;
+ return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
+ virtqueue_get_buf_ctx_split(_vq, len, ctx);
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
@@ -771,12 +1906,10 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
- vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
- }
-
+ if (vq->packed_ring)
+ virtqueue_disable_cb_packed(_vq);
+ else
+ virtqueue_disable_cb_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
@@ -795,23 +1928,9 @@ EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- u16 last_used_idx;
-
- START_USE(vq);
- /* We optimistically turn back on interrupts, then check if there was
- * more to do. */
- /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
- * either clear the flags bit or point the event index at the next
- * entry. Always do both to keep code simple. */
- if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
- vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
- }
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
- END_USE(vq);
- return last_used_idx;
+ return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
+ virtqueue_enable_cb_prepare_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
@@ -829,7 +1948,8 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
struct vring_virtqueue *vq = to_vvq(_vq);
virtio_mb(vq->weak_barriers);
- return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
+ return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
+ virtqueue_poll_split(_vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_poll);
@@ -847,6 +1967,7 @@ EXPORT_SYMBOL_GPL(virtqueue_poll);
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
+
return !virtqueue_poll(_vq, last_used_idx);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
@@ -867,34 +1988,9 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- u16 bufs;
-
- START_USE(vq);
-
- /* We optimistically turn back on interrupts, then check if there was
- * more to do. */
- /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
- * either clear the flags bit or point the event index at the next
- * entry. Always update the event index to keep code simple. */
- if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
- vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
- if (!vq->event)
- vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
- }
- /* TODO: tune this threshold */
- bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
-
- virtio_store_mb(vq->weak_barriers,
- &vring_used_event(&vq->vring),
- cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
-
- if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
- END_USE(vq);
- return false;
- }
- END_USE(vq);
- return true;
+ return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
+ virtqueue_enable_cb_delayed_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
@@ -909,30 +2005,17 @@ EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- unsigned int i;
- void *buf;
-
- START_USE(vq);
-
- for (i = 0; i < vq->vring.num; i++) {
- if (!vq->desc_state[i].data)
- continue;
- /* detach_buf clears data, so grab it now. */
- buf = vq->desc_state[i].data;
- detach_buf(vq, i, NULL);
- vq->avail_idx_shadow--;
- vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
- END_USE(vq);
- return buf;
- }
- /* That should have freed everything. */
- BUG_ON(vq->vq.num_free != vq->vring.num);
- END_USE(vq);
- return NULL;
+ return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
+ virtqueue_detach_unused_buf_split(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
+static inline bool more_used(const struct vring_virtqueue *vq)
+{
+ return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
+}
+
irqreturn_t vring_interrupt(int irq, void *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -953,6 +2036,7 @@ irqreturn_t vring_interrupt(int irq, void *_vq)
}
EXPORT_SYMBOL_GPL(vring_interrupt);
+/* Only available for split ring */
struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring vring,
struct virtio_device *vdev,
@@ -965,27 +2049,26 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
unsigned int i;
struct vring_virtqueue *vq;
- vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
- GFP_KERNEL);
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
+ return NULL;
+
+ vq = kmalloc(sizeof(*vq), GFP_KERNEL);
if (!vq)
return NULL;
- vq->vring = vring;
+ vq->packed_ring = false;
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.num_free = vring.num;
vq->vq.index = index;
vq->we_own_ring = false;
- vq->queue_dma_addr = 0;
- vq->queue_size_in_bytes = 0;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
- vq->avail_flags_shadow = 0;
- vq->avail_idx_shadow = 0;
vq->num_added = 0;
+ vq->use_dma_api = vring_use_dma_api(vdev);
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
@@ -996,65 +2079,39 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
!context;
vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
+ vq->split.queue_dma_addr = 0;
+ vq->split.queue_size_in_bytes = 0;
+
+ vq->split.vring = vring;
+ vq->split.avail_flags_shadow = 0;
+ vq->split.avail_idx_shadow = 0;
+
/* No callback? Tell other side not to bother us. */
if (!callback) {
- vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
+ vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
if (!vq->event)
- vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+ vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
+ vq->split.avail_flags_shadow);
+ }
+
+ vq->split.desc_state = kmalloc_array(vring.num,
+ sizeof(struct vring_desc_state_split), GFP_KERNEL);
+ if (!vq->split.desc_state) {
+ kfree(vq);
+ return NULL;
}
/* Put everything in free lists. */
vq->free_head = 0;
for (i = 0; i < vring.num-1; i++)
- vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
- memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
+ vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
+ memset(vq->split.desc_state, 0, vring.num *
+ sizeof(struct vring_desc_state_split));
return &vq->vq;
}
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
-static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
-{
- if (vring_use_dma_api(vdev)) {
- return dma_alloc_coherent(vdev->dev.parent, size,
- dma_handle, flag);
- } else {
- void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
- if (queue) {
- phys_addr_t phys_addr = virt_to_phys(queue);
- *dma_handle = (dma_addr_t)phys_addr;
-
- /*
- * Sanity check: make sure we dind't truncate
- * the address. The only arches I can find that
- * have 64-bit phys_addr_t but 32-bit dma_addr_t
- * are certain non-highmem MIPS and x86
- * configurations, but these configurations
- * should never allocate physical pages above 32
- * bits, so this is fine. Just in case, throw a
- * warning and abort if we end up with an
- * unrepresentable address.
- */
- if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
- free_pages_exact(queue, PAGE_ALIGN(size));
- return NULL;
- }
- }
- return queue;
- }
-}
-
-static void vring_free_queue(struct virtio_device *vdev, size_t size,
- void *queue, dma_addr_t dma_handle)
-{
- if (vring_use_dma_api(vdev)) {
- dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
- } else {
- free_pages_exact(queue, PAGE_ALIGN(size));
- }
-}
-
struct virtqueue *vring_create_virtqueue(
unsigned int index,
unsigned int num,
@@ -1067,57 +2124,19 @@ struct virtqueue *vring_create_virtqueue(
void (*callback)(struct virtqueue *),
const char *name)
{
- struct virtqueue *vq;
- void *queue = NULL;
- dma_addr_t dma_addr;
- size_t queue_size_in_bytes;
- struct vring vring;
-
- /* We assume num is a power of 2. */
- if (num & (num - 1)) {
- dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
- return NULL;
- }
-
- /* TODO: allocate each queue chunk individually */
- for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
- queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
- &dma_addr,
- GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
- if (queue)
- break;
- }
-
- if (!num)
- return NULL;
-
- if (!queue) {
- /* Try to get a single page. You are my only hope! */
- queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
- &dma_addr, GFP_KERNEL|__GFP_ZERO);
- }
- if (!queue)
- return NULL;
- queue_size_in_bytes = vring_size(num, vring_align);
- vring_init(&vring, num, queue, vring_align);
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
+ return vring_create_virtqueue_packed(index, num, vring_align,
+ vdev, weak_barriers, may_reduce_num,
+ context, notify, callback, name);
- vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
- notify, callback, name);
- if (!vq) {
- vring_free_queue(vdev, queue_size_in_bytes, queue,
- dma_addr);
- return NULL;
- }
-
- to_vvq(vq)->queue_dma_addr = dma_addr;
- to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
- to_vvq(vq)->we_own_ring = true;
-
- return vq;
+ return vring_create_virtqueue_split(index, num, vring_align,
+ vdev, weak_barriers, may_reduce_num,
+ context, notify, callback, name);
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
+/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -1130,6 +2149,10 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
const char *name)
{
struct vring vring;
+
+ if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
+ return NULL;
+
vring_init(&vring, num, pages, vring_align);
return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
notify, callback, name);
@@ -1141,8 +2164,32 @@ void vring_del_virtqueue(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->we_own_ring) {
- vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
- vq->vring.desc, vq->queue_dma_addr);
+ if (vq->packed_ring) {
+ vring_free_queue(vq->vq.vdev,
+ vq->packed.ring_size_in_bytes,
+ vq->packed.vring.desc,
+ vq->packed.ring_dma_addr);
+
+ vring_free_queue(vq->vq.vdev,
+ vq->packed.event_size_in_bytes,
+ vq->packed.vring.driver,
+ vq->packed.driver_event_dma_addr);
+
+ vring_free_queue(vq->vq.vdev,
+ vq->packed.event_size_in_bytes,
+ vq->packed.vring.device,
+ vq->packed.device_event_dma_addr);
+
+ kfree(vq->packed.desc_state);
+ kfree(vq->packed.desc_extra);
+ } else {
+ vring_free_queue(vq->vq.vdev,
+ vq->split.queue_size_in_bytes,
+ vq->split.vring.desc,
+ vq->split.queue_dma_addr);
+
+ kfree(vq->split.desc_state);
+ }
}
list_del(&_vq->list);
kfree(vq);
@@ -1164,6 +2211,8 @@ void vring_transport_features(struct virtio_device *vdev)
break;
case VIRTIO_F_IOMMU_PLATFORM:
break;
+ case VIRTIO_F_RING_PACKED:
+ break;
default:
/* We don't understand this bit. */
__virtio_clear_bit(vdev, i);
@@ -1184,7 +2233,7 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
struct vring_virtqueue *vq = to_vvq(_vq);
- return vq->vring.num;
+ return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
@@ -1217,7 +2266,10 @@ dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
BUG_ON(!vq->we_own_ring);
- return vq->queue_dma_addr;
+ if (vq->packed_ring)
+ return vq->packed.ring_dma_addr;
+
+ return vq->split.queue_dma_addr;
}
EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
@@ -1227,8 +2279,11 @@ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
BUG_ON(!vq->we_own_ring);
- return vq->queue_dma_addr +
- ((char *)vq->vring.avail - (char *)vq->vring.desc);
+ if (vq->packed_ring)
+ return vq->packed.driver_event_dma_addr;
+
+ return vq->split.queue_dma_addr +
+ ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
}
EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
@@ -1238,14 +2293,18 @@ dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
BUG_ON(!vq->we_own_ring);
- return vq->queue_dma_addr +
- ((char *)vq->vring.used - (char *)vq->vring.desc);
+ if (vq->packed_ring)
+ return vq->packed.device_event_dma_addr;
+
+ return vq->split.queue_dma_addr +
+ ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
}
EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
+/* Only available for split ring */
const struct vring *virtqueue_get_vring(struct virtqueue *vq)
{
- return &to_vvq(vq)->vring;
+ return &to_vvq(vq)->split.vring;
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 815b9e9bb975..838b66a9a0e7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -340,4 +340,7 @@ config XEN_SYMS
config XEN_HAVE_VPMU
bool
+config XEN_FRONT_PGDIR_SHBUF
+ tristate
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 3e542f60f29f..c48927a58e10 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -44,3 +44,4 @@ xen-gntdev-y := gntdev.o
xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
+obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fdfc64f5acea..221b7333d067 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
kfree(resource);
}
-/*
- * Host memory not allocated to dom0. We can use this range for hotplug-based
- * ballooning.
- *
- * It's a type-less resource. Setting IORESOURCE_MEM will make resource
- * management algorithms (arch_remove_reservations()) look into guest e820,
- * which we don't want.
- */
-static struct resource hostmem_resource = {
- .name = "Host RAM",
-};
-
-void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
-{}
-
static struct resource *additional_memory_resource(phys_addr_t size)
{
- struct resource *res, *res_hostmem;
- int ret = -ENOMEM;
+ struct resource *res;
+ int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
res->name = "System RAM";
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
- if (res_hostmem) {
- /* Try to grab a range from hostmem */
- res_hostmem->name = "Host memory";
- ret = allocate_resource(&hostmem_resource, res_hostmem,
- size, 0, -1,
- PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
- }
-
- if (!ret) {
- /*
- * Insert this resource into iomem. Because hostmem_resource
- * tracks portion of guest e820 marked as UNUSABLE noone else
- * should try to use it.
- */
- res->start = res_hostmem->start;
- res->end = res_hostmem->end;
- ret = insert_resource(&iomem_resource, res);
- if (ret < 0) {
- pr_err("Can't insert iomem_resource [%llx - %llx]\n",
- res->start, res->end);
- release_memory_resource(res_hostmem);
- res_hostmem = NULL;
- res->start = res->end = 0;
- }
- }
-
- if (ret) {
- ret = allocate_resource(&iomem_resource, res,
- size, 0, -1,
- PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
- if (ret < 0) {
- pr_err("Cannot allocate new System RAM resource\n");
- kfree(res);
- return NULL;
- }
+ ret = allocate_resource(&iomem_resource, res,
+ size, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ if (ret < 0) {
+ pr_err("Cannot allocate new System RAM resource\n");
+ kfree(res);
+ return NULL;
}
#ifdef CONFIG_SPARSEMEM
@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
pfn, limit);
release_memory_resource(res);
- release_memory_resource(res_hostmem);
return NULL;
}
}
@@ -750,8 +705,6 @@ static int __init balloon_init(void)
set_online_page_callback(&xen_online_page);
register_memory_notifier(&xen_memory_nb);
register_sysctl_table(xen_root);
-
- arch_xen_balloon_init(&hostmem_resource);
#endif
#ifdef CONFIG_XEN_PV
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 2f11ca72a281..77224d8f3e6f 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
- kfree(map->active.data.in);
- kfree(map->active.ring);
+ free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
+ free_page((unsigned long)map->active.ring);
return ret;
}
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
new file mode 100644
index 000000000000..48a658dc7ccf
--- /dev/null
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen frontend/backend page directory based shared buffer
+ * helper module.
+ *
+ * Copyright (C) 2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+
+#include <asm/xen/hypervisor.h>
+#include <xen/balloon.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/interface/io/ring.h>
+
+#include <xen/xen-front-pgdir-shbuf.h>
+
+#ifndef GRANT_INVALID_REF
+/*
+ * FIXME: usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ */
+#define GRANT_INVALID_REF 0
+#endif
+
+/**
+ * This structure represents the structure of a shared page
+ * that contains grant references to the pages of the shared
+ * buffer. This structure is common to many Xen para-virtualized
+ * protocols at include/xen/interface/io/
+ */
+struct xen_page_directory {
+ grant_ref_t gref_dir_next_page;
+ grant_ref_t gref[1]; /* Variable length */
+};
+
+/**
+ * Shared buffer ops which are differently implemented
+ * depending on the allocation mode, e.g. if the buffer
+ * is allocated by the corresponding backend or frontend.
+ * Some of the operations.
+ */
+struct xen_front_pgdir_shbuf_ops {
+ /*
+ * Calculate number of grefs required to handle this buffer,
+ * e.g. if grefs are required for page directory only or the buffer
+ * pages as well.
+ */
+ void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
+
+ /* Fill page directory according to para-virtual display protocol. */
+ void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
+
+ /* Claim grant references for the pages of the buffer. */
+ int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
+ grant_ref_t *priv_gref_head, int gref_idx);
+
+ /* Map grant references of the buffer. */
+ int (*map)(struct xen_front_pgdir_shbuf *buf);
+
+ /* Unmap grant references of the buffer. */
+ int (*unmap)(struct xen_front_pgdir_shbuf *buf);
+};
+
+/**
+ * Get granted reference to the very first page of the
+ * page directory. Usually this is passed to the backend,
+ * so it can find/fill the grant references to the buffer's
+ * pages.
+ *
+ * \param buf shared buffer which page directory is of interest.
+ * \return granted reference to the very first page of the
+ * page directory.
+ */
+grant_ref_t
+xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
+{
+ if (!buf->grefs)
+ return GRANT_INVALID_REF;
+
+ return buf->grefs[0];
+}
+EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
+
+/**
+ * Map granted references of the shared buffer.
+ *
+ * Depending on the shared buffer mode of allocation
+ * (be_alloc flag) this can either do nothing (for buffers
+ * shared by the frontend itself) or map the provided granted
+ * references onto the backing storage (buf->pages).
+ *
+ * \param buf shared buffer which grants to be maped.
+ * \return zero on success or a negative number on failure.
+ */
+int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
+{
+ if (buf->ops && buf->ops->map)
+ return buf->ops->map(buf);
+
+ /* No need to map own grant references. */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
+
+/**
+ * Unmap granted references of the shared buffer.
+ *
+ * Depending on the shared buffer mode of allocation
+ * (be_alloc flag) this can either do nothing (for buffers
+ * shared by the frontend itself) or unmap the provided granted
+ * references.
+ *
+ * \param buf shared buffer which grants to be unmaped.
+ * \return zero on success or a negative number on failure.
+ */
+int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
+{
+ if (buf->ops && buf->ops->unmap)
+ return buf->ops->unmap(buf);
+
+ /* No need to unmap own grant references. */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
+
+/**
+ * Free all the resources of the shared buffer.
+ *
+ * \param buf shared buffer which resources to be freed.
+ */
+void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
+{
+ if (buf->grefs) {
+ int i;
+
+ for (i = 0; i < buf->num_grefs; i++)
+ if (buf->grefs[i] != GRANT_INVALID_REF)
+ gnttab_end_foreign_access(buf->grefs[i],
+ 0, 0UL);
+ }
+ kfree(buf->grefs);
+ kfree(buf->directory);
+}
+EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
+
+/*
+ * Number of grefs a page can hold with respect to the
+ * struct xen_page_directory header.
+ */
+#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
+ offsetof(struct xen_page_directory, \
+ gref)) / sizeof(grant_ref_t))
+
+/**
+ * Get the number of pages the page directory consumes itself.
+ *
+ * \param buf shared buffer.
+ */
+static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
+{
+ return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
+}
+
+/**
+ * Calculate the number of grant references needed to share the buffer
+ * and its pages when backend allocates the buffer.
+ *
+ * \param buf shared buffer.
+ */
+static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
+{
+ /* Only for pages the page directory consumes itself. */
+ buf->num_grefs = get_num_pages_dir(buf);
+}
+
+/**
+ * Calculate the number of grant references needed to share the buffer
+ * and its pages when frontend allocates the buffer.
+ *
+ * \param buf shared buffer.
+ */
+static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
+{
+ /*
+ * Number of pages the page directory consumes itself
+ * plus grefs for the buffer pages.
+ */
+ buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
+}
+
+#define xen_page_to_vaddr(page) \
+ ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
+
+/**
+ * Unmap the buffer previously mapped with grant references
+ * provided by the backend.
+ *
+ * \param buf shared buffer.
+ * \return zero on success or a negative number on failure.
+ */
+static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
+{
+ struct gnttab_unmap_grant_ref *unmap_ops;
+ int i, ret;
+
+ if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
+ return 0;
+
+ unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
+ GFP_KERNEL);
+ if (!unmap_ops)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->num_pages; i++) {
+ phys_addr_t addr;
+
+ addr = xen_page_to_vaddr(buf->pages[i]);
+ gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
+ buf->backend_map_handles[i]);
+ }
+
+ ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
+ buf->num_pages);
+
+ for (i = 0; i < buf->num_pages; i++) {
+ if (unlikely(unmap_ops[i].status != GNTST_okay))
+ dev_err(&buf->xb_dev->dev,
+ "Failed to unmap page %d: %d\n",
+ i, unmap_ops[i].status);
+ }
+
+ if (ret)
+ dev_err(&buf->xb_dev->dev,
+ "Failed to unmap grant references, ret %d", ret);
+
+ kfree(unmap_ops);
+ kfree(buf->backend_map_handles);
+ buf->backend_map_handles = NULL;
+ return ret;
+}
+
+/**
+ * Map the buffer with grant references provided by the backend.
+ *
+ * \param buf shared buffer.
+ * \return zero on success or a negative number on failure.
+ */
+static int backend_map(struct xen_front_pgdir_shbuf *buf)
+{
+ struct gnttab_map_grant_ref *map_ops = NULL;
+ unsigned char *ptr;
+ int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
+
+ map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
+ if (!map_ops)
+ return -ENOMEM;
+
+ buf->backend_map_handles = kcalloc(buf->num_pages,
+ sizeof(*buf->backend_map_handles),
+ GFP_KERNEL);
+ if (!buf->backend_map_handles) {
+ kfree(map_ops);
+ return -ENOMEM;
+ }
+
+ /*
+ * Read page directory to get grefs from the backend: for external
+ * buffer we only allocate buf->grefs for the page directory,
+ * so buf->num_grefs has number of pages in the page directory itself.
+ */
+ ptr = buf->directory;
+ grefs_left = buf->num_pages;
+ cur_page = 0;
+ for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
+ struct xen_page_directory *page_dir =
+ (struct xen_page_directory *)ptr;
+ int to_copy = XEN_NUM_GREFS_PER_PAGE;
+
+ if (to_copy > grefs_left)
+ to_copy = grefs_left;
+
+ for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
+ phys_addr_t addr;
+
+ addr = xen_page_to_vaddr(buf->pages[cur_page]);
+ gnttab_set_map_op(&map_ops[cur_page], addr,
+ GNTMAP_host_map,
+ page_dir->gref[cur_gref],
+ buf->xb_dev->otherend_id);
+ cur_page++;
+ }
+
+ grefs_left -= to_copy;
+ ptr += PAGE_SIZE;
+ }
+ ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
+
+ /* Save handles even if error, so we can unmap. */
+ for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
+ buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
+ if (unlikely(map_ops[cur_page].status != GNTST_okay))
+ dev_err(&buf->xb_dev->dev,
+ "Failed to map page %d: %d\n",
+ cur_page, map_ops[cur_page].status);
+ }
+
+ if (ret) {
+ dev_err(&buf->xb_dev->dev,
+ "Failed to map grant references, ret %d", ret);
+ backend_unmap(buf);
+ }
+
+ kfree(map_ops);
+ return ret;
+}
+
+/**
+ * Fill page directory with grant references to the pages of the
+ * page directory itself.
+ *
+ * The grant references to the buffer pages are provided by the
+ * backend in this case.
+ *
+ * \param buf shared buffer.
+ */
+static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
+{
+ struct xen_page_directory *page_dir;
+ unsigned char *ptr;
+ int i, num_pages_dir;
+
+ ptr = buf->directory;
+ num_pages_dir = get_num_pages_dir(buf);
+
+ /* Fill only grefs for the page directory itself. */
+ for (i = 0; i < num_pages_dir - 1; i++) {
+ page_dir = (struct xen_page_directory *)ptr;
+
+ page_dir->gref_dir_next_page = buf->grefs[i + 1];
+ ptr += PAGE_SIZE;
+ }
+ /* Last page must say there is no more pages. */
+ page_dir = (struct xen_page_directory *)ptr;
+ page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+}
+
+/**
+ * Fill page directory with grant references to the pages of the
+ * page directory and the buffer we share with the backend.
+ *
+ * \param buf shared buffer.
+ */
+static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
+{
+ unsigned char *ptr;
+ int cur_gref, grefs_left, to_copy, i, num_pages_dir;
+
+ ptr = buf->directory;
+ num_pages_dir = get_num_pages_dir(buf);
+
+ /*
+ * While copying, skip grefs at start, they are for pages
+ * granted for the page directory itself.
+ */
+ cur_gref = num_pages_dir;
+ grefs_left = buf->num_pages;
+ for (i = 0; i < num_pages_dir; i++) {
+ struct xen_page_directory *page_dir =
+ (struct xen_page_directory *)ptr;
+
+ if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
+ to_copy = grefs_left;
+ page_dir->gref_dir_next_page = GRANT_INVALID_REF;
+ } else {
+ to_copy = XEN_NUM_GREFS_PER_PAGE;
+ page_dir->gref_dir_next_page = buf->grefs[i + 1];
+ }
+ memcpy(&page_dir->gref, &buf->grefs[cur_gref],
+ to_copy * sizeof(grant_ref_t));
+ ptr += PAGE_SIZE;
+ grefs_left -= to_copy;
+ cur_gref += to_copy;
+ }
+}
+
+/**
+ * Grant references to the frontend's buffer pages.
+ *
+ * These will be shared with the backend, so it can
+ * access the buffer's data.
+ *
+ * \param buf shared buffer.
+ * \return zero on success or a negative number on failure.
+ */
+static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
+ grant_ref_t *priv_gref_head,
+ int gref_idx)
+{
+ int i, cur_ref, otherend_id;
+
+ otherend_id = buf->xb_dev->otherend_id;
+ for (i = 0; i < buf->num_pages; i++) {
+ cur_ref = gnttab_claim_grant_reference(priv_gref_head);
+ if (cur_ref < 0)
+ return cur_ref;
+
+ gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
+ xen_page_to_gfn(buf->pages[i]),
+ 0);
+ buf->grefs[gref_idx++] = cur_ref;
+ }
+ return 0;
+}
+
+/**
+ * Grant all the references needed to share the buffer.
+ *
+ * Grant references to the page directory pages and, if
+ * needed, also to the pages of the shared buffer data.
+ *
+ * \param buf shared buffer.
+ * \return zero on success or a negative number on failure.
+ */
+static int grant_references(struct xen_front_pgdir_shbuf *buf)
+{
+ grant_ref_t priv_gref_head;
+ int ret, i, j, cur_ref;
+ int otherend_id, num_pages_dir;
+
+ ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
+ if (ret < 0) {
+ dev_err(&buf->xb_dev->dev,
+ "Cannot allocate grant references\n");
+ return ret;
+ }
+
+ otherend_id = buf->xb_dev->otherend_id;
+ j = 0;
+ num_pages_dir = get_num_pages_dir(buf);
+ for (i = 0; i < num_pages_dir; i++) {
+ unsigned long frame;
+
+ cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
+ if (cur_ref < 0)
+ return cur_ref;
+
+ frame = xen_page_to_gfn(virt_to_page(buf->directory +
+ PAGE_SIZE * i));
+ gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
+ buf->grefs[j++] = cur_ref;
+ }
+
+ if (buf->ops->grant_refs_for_buffer) {
+ ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
+ if (ret)
+ return ret;
+ }
+
+ gnttab_free_grant_references(priv_gref_head);
+ return 0;
+}
+
+/**
+ * Allocate all required structures to mange shared buffer.
+ *
+ * \param buf shared buffer.
+ * \return zero on success or a negative number on failure.
+ */
+static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
+{
+ buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
+ if (!buf->grefs)
+ return -ENOMEM;
+
+ buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
+ if (!buf->directory)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * For backend allocated buffers we don't need grant_refs_for_buffer
+ * as those grant references are allocated at backend side.
+ */
+static const struct xen_front_pgdir_shbuf_ops backend_ops = {
+ .calc_num_grefs = backend_calc_num_grefs,
+ .fill_page_dir = backend_fill_page_dir,
+ .map = backend_map,
+ .unmap = backend_unmap
+};
+
+/*
+ * For locally granted references we do not need to map/unmap
+ * the references.
+ */
+static const struct xen_front_pgdir_shbuf_ops local_ops = {
+ .calc_num_grefs = guest_calc_num_grefs,
+ .fill_page_dir = guest_fill_page_dir,
+ .grant_refs_for_buffer = guest_grant_refs_for_buffer,
+};
+
+/**
+ * Allocate a new instance of a shared buffer.
+ *
+ * \param cfg configuration to be used while allocating a new shared buffer.
+ * \return zero on success or a negative number on failure.
+ */
+int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
+{
+ struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
+ int ret;
+
+ if (cfg->be_alloc)
+ buf->ops = &backend_ops;
+ else
+ buf->ops = &local_ops;
+ buf->xb_dev = cfg->xb_dev;
+ buf->num_pages = cfg->num_pages;
+ buf->pages = cfg->pages;
+
+ buf->ops->calc_num_grefs(buf);
+
+ ret = alloc_storage(buf);
+ if (ret)
+ goto fail;
+
+ ret = grant_references(buf);
+ if (ret)
+ goto fail;
+
+ buf->ops->fill_page_dir(buf);
+
+ return 0;
+
+fail:
+ xen_front_pgdir_shbuf_free(buf);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
+
+MODULE_DESCRIPTION("Xen frontend/backend page directory based "
+ "shared buffer handling");
+MODULE_AUTHOR("Oleksandr Andrushchenko");
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 59661db144e5..097410a7cdb7 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -106,7 +106,8 @@ static void pcistub_device_release(struct kref *kref)
* is called from "unbind" which takes a device_lock mutex.
*/
__pci_reset_function_locked(dev);
- if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
+ if (dev_data &&
+ pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
dev_info(&dev->dev, "Could not reload PCI state\n");
else
pci_restore_state(dev);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 23f1387b3ef7..e7df65d32c91 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -36,6 +36,7 @@
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
+#include <xen/xen-ops.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 43dea3b00c29..8a2562e3a316 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1075,8 +1075,6 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
if (fc->ac.error < 0)
return;
- d_drop(new_dentry);
-
inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
newfid, newstatus, newcb, fc->cbi);
if (IS_ERR(inode)) {
@@ -1090,7 +1088,7 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
afs_vnode_commit_status(fc, vnode, 0);
- d_add(new_dentry, inode);
+ d_instantiate(new_dentry, inode);
}
/*
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index d049cb459742..fde6b4d4121e 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -61,8 +61,11 @@ void afs_fileserver_probe_result(struct afs_call *call)
afs_io_error(call, afs_io_error_fs_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
+ case -ERFKILL:
+ case -EADDRNOTAVAIL:
case -ENETUNREACH:
case -EHOSTUNREACH:
+ case -EHOSTDOWN:
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ETIME:
@@ -132,12 +135,14 @@ out:
static int afs_do_probe_fileserver(struct afs_net *net,
struct afs_server *server,
struct key *key,
- unsigned int server_index)
+ unsigned int server_index,
+ struct afs_error *_e)
{
struct afs_addr_cursor ac = {
.index = 0,
};
- int ret;
+ bool in_progress = false;
+ int err;
_enter("%pU", &server->uuid);
@@ -151,15 +156,17 @@ static int afs_do_probe_fileserver(struct afs_net *net,
server->probe.rtt = UINT_MAX;
for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
- ret = afs_fs_get_capabilities(net, server, &ac, key, server_index,
+ err = afs_fs_get_capabilities(net, server, &ac, key, server_index,
true);
- if (ret != -EINPROGRESS) {
- afs_fs_probe_done(server);
- return ret;
- }
+ if (err == -EINPROGRESS)
+ in_progress = true;
+ else
+ afs_prioritise_error(_e, err, ac.abort_code);
}
- return 0;
+ if (!in_progress)
+ afs_fs_probe_done(server);
+ return in_progress;
}
/*
@@ -169,21 +176,23 @@ int afs_probe_fileservers(struct afs_net *net, struct key *key,
struct afs_server_list *list)
{
struct afs_server *server;
- int i, ret;
+ struct afs_error e;
+ bool in_progress = false;
+ int i;
+ e.error = 0;
+ e.responded = false;
for (i = 0; i < list->nr_servers; i++) {
server = list->servers[i].server;
if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
continue;
- if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags)) {
- ret = afs_do_probe_fileserver(net, server, key, i);
- if (ret)
- return ret;
- }
+ if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) &&
+ afs_do_probe_fileserver(net, server, key, i, &e))
+ in_progress = true;
}
- return 0;
+ return in_progress ? 0 : e.error;
}
/*
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 4c6d8e1112c2..6b17d3620414 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -382,7 +382,7 @@ void afs_zap_data(struct afs_vnode *vnode)
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
time64_t now = ktime_get_real_seconds();
- bool valid = false;
+ bool valid;
int ret;
_enter("{v={%llx:%llu} fl=%lx},%x",
@@ -402,15 +402,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
} else if (vnode->status.type == AFS_FTYPE_DIR &&
- test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
- vnode->cb_expires_at - 10 > now) {
- valid = true;
- } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
- vnode->cb_expires_at - 10 > now) {
+ (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
+ vnode->cb_expires_at - 10 <= now)) {
+ valid = false;
+ } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
+ vnode->cb_expires_at - 10 <= now) {
+ valid = false;
+ } else {
valid = true;
}
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
+ } else {
+ vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ valid = false;
}
read_sequnlock_excl(&vnode->cb_lock);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5da3b09b7518..8871b9e8645f 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -696,6 +696,14 @@ struct afs_interface {
};
/*
+ * Error prioritisation and accumulation.
+ */
+struct afs_error {
+ short error; /* Accumulated error */
+ bool responded; /* T if server responded */
+};
+
+/*
* Cursor for iterating over a server's address list.
*/
struct afs_addr_cursor {
@@ -1015,6 +1023,7 @@ static inline void __afs_stat(atomic_t *s)
* misc.c
*/
extern int afs_abort_to_error(u32);
+extern void afs_prioritise_error(struct afs_error *, int, u32);
/*
* mntpt.c
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 700a5fa7f4ec..bbb1fd51b019 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -118,3 +118,55 @@ int afs_abort_to_error(u32 abort_code)
default: return -EREMOTEIO;
}
}
+
+/*
+ * Select the error to report from a set of errors.
+ */
+void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
+{
+ switch (error) {
+ case 0:
+ return;
+ default:
+ if (e->error == -ETIMEDOUT ||
+ e->error == -ETIME)
+ return;
+ case -ETIMEDOUT:
+ case -ETIME:
+ if (e->error == -ENOMEM ||
+ e->error == -ENONET)
+ return;
+ case -ENOMEM:
+ case -ENONET:
+ if (e->error == -ERFKILL)
+ return;
+ case -ERFKILL:
+ if (e->error == -EADDRNOTAVAIL)
+ return;
+ case -EADDRNOTAVAIL:
+ if (e->error == -ENETUNREACH)
+ return;
+ case -ENETUNREACH:
+ if (e->error == -EHOSTUNREACH)
+ return;
+ case -EHOSTUNREACH:
+ if (e->error == -EHOSTDOWN)
+ return;
+ case -EHOSTDOWN:
+ if (e->error == -ECONNREFUSED)
+ return;
+ case -ECONNREFUSED:
+ if (e->error == -ECONNRESET)
+ return;
+ case -ECONNRESET: /* Responded, but call expired. */
+ if (e->responded)
+ return;
+ e->error = error;
+ return;
+
+ case -ECONNABORTED:
+ e->responded = true;
+ e->error = afs_abort_to_error(abort_code);
+ return;
+ }
+}
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 00504254c1c2..c3ae324781f8 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -136,7 +136,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
struct afs_addr_list *alist;
struct afs_server *server;
struct afs_vnode *vnode = fc->vnode;
- u32 rtt, abort_code;
+ struct afs_error e;
+ u32 rtt;
int error = fc->ac.error, i;
_enter("%lx[%d],%lx[%d],%d,%d",
@@ -306,8 +307,11 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
if (fc->error != -EDESTADDRREQ)
goto iterate_address;
/* Fall through */
+ case -ERFKILL:
+ case -EADDRNOTAVAIL:
case -ENETUNREACH:
case -EHOSTUNREACH:
+ case -EHOSTDOWN:
case -ECONNREFUSED:
_debug("no conn");
fc->error = error;
@@ -446,50 +450,15 @@ no_more_servers:
if (fc->flags & AFS_FS_CURSOR_VBUSY)
goto restart_from_beginning;
- abort_code = 0;
- error = -EDESTADDRREQ;
+ e.error = -EDESTADDRREQ;
+ e.responded = false;
for (i = 0; i < fc->server_list->nr_servers; i++) {
struct afs_server *s = fc->server_list->servers[i].server;
- int probe_error = READ_ONCE(s->probe.error);
- switch (probe_error) {
- case 0:
- continue;
- default:
- if (error == -ETIMEDOUT ||
- error == -ETIME)
- continue;
- case -ETIMEDOUT:
- case -ETIME:
- if (error == -ENOMEM ||
- error == -ENONET)
- continue;
- case -ENOMEM:
- case -ENONET:
- if (error == -ENETUNREACH)
- continue;
- case -ENETUNREACH:
- if (error == -EHOSTUNREACH)
- continue;
- case -EHOSTUNREACH:
- if (error == -ECONNREFUSED)
- continue;
- case -ECONNREFUSED:
- if (error == -ECONNRESET)
- continue;
- case -ECONNRESET: /* Responded, but call expired. */
- if (error == -ECONNABORTED)
- continue;
- case -ECONNABORTED:
- abort_code = s->probe.abort_code;
- error = probe_error;
- continue;
- }
+ afs_prioritise_error(&e, READ_ONCE(s->probe.error),
+ s->probe.abort_code);
}
- if (error == -ECONNABORTED)
- error = afs_abort_to_error(abort_code);
-
failed_set_error:
fc->error = error;
failed:
@@ -553,8 +522,11 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
_leave(" = f [abort]");
return false;
+ case -ERFKILL:
+ case -EADDRNOTAVAIL:
case -ENETUNREACH:
case -EHOSTUNREACH:
+ case -EHOSTDOWN:
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ETIME:
@@ -633,6 +605,7 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc)
struct afs_net *net = afs_v2net(fc->vnode);
if (fc->error == -EDESTADDRREQ ||
+ fc->error == -EADDRNOTAVAIL ||
fc->error == -ENETUNREACH ||
fc->error == -EHOSTUNREACH)
afs_dump_edestaddrreq(fc);
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index c0f616bd70cb..f0b032976487 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -61,8 +61,11 @@ void afs_vlserver_probe_result(struct afs_call *call)
afs_io_error(call, afs_io_error_vl_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
+ case -ERFKILL:
+ case -EADDRNOTAVAIL:
case -ENETUNREACH:
case -EHOSTUNREACH:
+ case -EHOSTDOWN:
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ETIME:
@@ -129,15 +132,17 @@ out:
* Probe all of a vlserver's addresses to find out the best route and to
* query its capabilities.
*/
-static int afs_do_probe_vlserver(struct afs_net *net,
- struct afs_vlserver *server,
- struct key *key,
- unsigned int server_index)
+static bool afs_do_probe_vlserver(struct afs_net *net,
+ struct afs_vlserver *server,
+ struct key *key,
+ unsigned int server_index,
+ struct afs_error *_e)
{
struct afs_addr_cursor ac = {
.index = 0,
};
- int ret;
+ bool in_progress = false;
+ int err;
_enter("%s", server->name);
@@ -151,15 +156,17 @@ static int afs_do_probe_vlserver(struct afs_net *net,
server->probe.rtt = UINT_MAX;
for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
- ret = afs_vl_get_capabilities(net, &ac, key, server,
+ err = afs_vl_get_capabilities(net, &ac, key, server,
server_index, true);
- if (ret != -EINPROGRESS) {
- afs_vl_probe_done(server);
- return ret;
- }
+ if (err == -EINPROGRESS)
+ in_progress = true;
+ else
+ afs_prioritise_error(_e, err, ac.abort_code);
}
- return 0;
+ if (!in_progress)
+ afs_vl_probe_done(server);
+ return in_progress;
}
/*
@@ -169,21 +176,23 @@ int afs_send_vl_probes(struct afs_net *net, struct key *key,
struct afs_vlserver_list *vllist)
{
struct afs_vlserver *server;
- int i, ret;
+ struct afs_error e;
+ bool in_progress = false;
+ int i;
+ e.error = 0;
+ e.responded = false;
for (i = 0; i < vllist->nr_servers; i++) {
server = vllist->servers[i].server;
if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
continue;
- if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags)) {
- ret = afs_do_probe_vlserver(net, server, key, i);
- if (ret)
- return ret;
- }
+ if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags) &&
+ afs_do_probe_vlserver(net, server, key, i, &e))
+ in_progress = true;
}
- return 0;
+ return in_progress ? 0 : e.error;
}
/*
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index b64a284b99d2..7adde83a0648 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -71,8 +71,9 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
{
struct afs_addr_list *alist;
struct afs_vlserver *vlserver;
+ struct afs_error e;
u32 rtt;
- int error = vc->ac.error, abort_code, i;
+ int error = vc->ac.error, i;
_enter("%lx[%d],%lx[%d],%d,%d",
vc->untried, vc->index,
@@ -119,8 +120,11 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
goto failed;
}
+ case -ERFKILL:
+ case -EADDRNOTAVAIL:
case -ENETUNREACH:
case -EHOSTUNREACH:
+ case -EHOSTDOWN:
case -ECONNREFUSED:
case -ETIMEDOUT:
case -ETIME:
@@ -235,50 +239,15 @@ no_more_servers:
if (vc->flags & AFS_VL_CURSOR_RETRY)
goto restart_from_beginning;
- abort_code = 0;
- error = -EDESTADDRREQ;
+ e.error = -EDESTADDRREQ;
+ e.responded = false;
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
- int probe_error = READ_ONCE(s->probe.error);
- switch (probe_error) {
- case 0:
- continue;
- default:
- if (error == -ETIMEDOUT ||
- error == -ETIME)
- continue;
- case -ETIMEDOUT:
- case -ETIME:
- if (error == -ENOMEM ||
- error == -ENONET)
- continue;
- case -ENOMEM:
- case -ENONET:
- if (error == -ENETUNREACH)
- continue;
- case -ENETUNREACH:
- if (error == -EHOSTUNREACH)
- continue;
- case -EHOSTUNREACH:
- if (error == -ECONNREFUSED)
- continue;
- case -ECONNREFUSED:
- if (error == -ECONNRESET)
- continue;
- case -ECONNRESET: /* Responded, but call expired. */
- if (error == -ECONNABORTED)
- continue;
- case -ECONNABORTED:
- abort_code = s->probe.abort_code;
- error = probe_error;
- continue;
- }
+ afs_prioritise_error(&e, READ_ONCE(s->probe.error),
+ s->probe.abort_code);
}
- if (error == -ECONNABORTED)
- error = afs_abort_to_error(abort_code);
-
failed_set_error:
vc->error = error;
failed:
@@ -341,6 +310,7 @@ int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
struct afs_net *net = vc->cell->net;
if (vc->error == -EDESTADDRREQ ||
+ vc->error == -EADDRNOTAVAIL ||
vc->error == -ENETUNREACH ||
vc->error == -EHOSTUNREACH)
afs_vl_dump_edestaddrreq(vc);
diff --git a/fs/aio.c b/fs/aio.c
index 301e6314183b..43c508f99e35 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -45,6 +45,7 @@
#include <asm/kmap_types.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "internal.h"
@@ -69,6 +70,12 @@ struct aio_ring {
struct io_event io_events[0];
}; /* 128 bytes + ring size */
+/*
+ * Plugging is meant to work with larger batches of IOs. If we don't
+ * have more than the below, then don't bother setting up a plug.
+ */
+#define AIO_PLUG_THRESHOLD 2
+
#define AIO_RING_PAGES 8
struct kioctx_table {
@@ -901,7 +908,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
local_irq_restore(flags);
}
-static bool get_reqs_available(struct kioctx *ctx)
+static bool __get_reqs_available(struct kioctx *ctx)
{
struct kioctx_cpu *kcpu;
bool ret = false;
@@ -993,6 +1000,14 @@ static void user_refill_reqs_available(struct kioctx *ctx)
spin_unlock_irq(&ctx->completion_lock);
}
+static bool get_reqs_available(struct kioctx *ctx)
+{
+ if (__get_reqs_available(ctx))
+ return true;
+ user_refill_reqs_available(ctx);
+ return __get_reqs_available(ctx);
+}
+
/* aio_get_req
* Allocate a slot for an aio request.
* Returns NULL if no requests are free.
@@ -1001,24 +1016,16 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
{
struct aio_kiocb *req;
- if (!get_reqs_available(ctx)) {
- user_refill_reqs_available(ctx);
- if (!get_reqs_available(ctx))
- return NULL;
- }
-
- req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
+ req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
if (unlikely(!req))
- goto out_put;
+ return NULL;
percpu_ref_get(&ctx->reqs);
+ req->ki_ctx = ctx;
INIT_LIST_HEAD(&req->ki_list);
refcount_set(&req->ki_refcnt, 0);
- req->ki_ctx = ctx;
+ req->ki_eventfd = NULL;
return req;
-out_put:
- put_reqs_available(ctx, 1);
- return NULL;
}
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
@@ -1038,6 +1045,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr)
goto out;
+ id = array_index_nospec(id, table->nr);
ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
if (percpu_ref_tryget_live(&ctx->users))
@@ -1057,6 +1065,15 @@ static inline void iocb_put(struct aio_kiocb *iocb)
}
}
+static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
+ long res, long res2)
+{
+ ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
+ ev->data = iocb->ki_user_data;
+ ev->res = res;
+ ev->res2 = res2;
+}
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
@@ -1084,10 +1101,7 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE;
- event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
- event->data = iocb->ki_user_data;
- event->res = res;
- event->res2 = res2;
+ aio_fill_event(event, iocb, res, res2);
kunmap_atomic(ev_page);
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
@@ -1414,7 +1428,7 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
aio_complete(iocb, res, res2);
}
-static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
+static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
{
int ret;
@@ -1436,20 +1450,26 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
ret = ioprio_check_cap(iocb->aio_reqprio);
if (ret) {
pr_debug("aio ioprio check cap error: %d\n", ret);
- return ret;
+ goto out_fput;
}
req->ki_ioprio = iocb->aio_reqprio;
} else
- req->ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+ req->ki_ioprio = get_current_ioprio();
ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
if (unlikely(ret))
- fput(req->ki_filp);
+ goto out_fput;
+
+ req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */
+ return 0;
+
+out_fput:
+ fput(req->ki_filp);
return ret;
}
-static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
+static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec,
bool vectored, bool compat, struct iov_iter *iter)
{
void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
@@ -1484,12 +1504,12 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
ret = -EINTR;
/*FALLTHRU*/
default:
- aio_complete_rw(req, ret, 0);
+ req->ki_complete(req, ret, 0);
}
}
-static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
- bool compat)
+static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
+ bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
@@ -1521,8 +1541,8 @@ out_fput:
return ret;
}
-static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
- bool compat)
+static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
+ bool vectored, bool compat)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
@@ -1577,7 +1597,8 @@ static void aio_fsync_work(struct work_struct *work)
aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
}
-static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
+static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
+ bool datasync)
{
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
iocb->aio_rw_flags))
@@ -1705,7 +1726,7 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
add_wait_queue(head, &pt->iocb->poll.wait);
}
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
+static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
{
struct kioctx *ctx = aiocb->ki_ctx;
struct poll_iocb *req = &aiocb->poll;
@@ -1725,6 +1746,10 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
if (unlikely(!req->file))
return -EBADF;
+ req->head = NULL;
+ req->woken = false;
+ req->cancelled = false;
+
apt.pt._qproc = aio_poll_queue_proc;
apt.pt._key = req->events;
apt.iocb = aiocb;
@@ -1773,44 +1798,44 @@ out:
return 0;
}
-static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- bool compat)
+static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
+ struct iocb __user *user_iocb, bool compat)
{
struct aio_kiocb *req;
- struct iocb iocb;
ssize_t ret;
- if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
- return -EFAULT;
-
/* enforce forwards compatibility on users */
- if (unlikely(iocb.aio_reserved2)) {
+ if (unlikely(iocb->aio_reserved2)) {
pr_debug("EINVAL: reserve field set\n");
return -EINVAL;
}
/* prevent overflows */
if (unlikely(
- (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
- (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
- ((ssize_t)iocb.aio_nbytes < 0)
+ (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
+ (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
+ ((ssize_t)iocb->aio_nbytes < 0)
)) {
pr_debug("EINVAL: overflow check\n");
return -EINVAL;
}
+ if (!get_reqs_available(ctx))
+ return -EAGAIN;
+
+ ret = -EAGAIN;
req = aio_get_req(ctx);
if (unlikely(!req))
- return -EAGAIN;
+ goto out_put_reqs_available;
- if (iocb.aio_flags & IOCB_FLAG_RESFD) {
+ if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
* instance of the file* now. The file descriptor must be
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
- req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd);
+ req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
@@ -1825,32 +1850,32 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
}
req->ki_user_iocb = user_iocb;
- req->ki_user_data = iocb.aio_data;
+ req->ki_user_data = iocb->aio_data;
- switch (iocb.aio_lio_opcode) {
+ switch (iocb->aio_lio_opcode) {
case IOCB_CMD_PREAD:
- ret = aio_read(&req->rw, &iocb, false, compat);
+ ret = aio_read(&req->rw, iocb, false, compat);
break;
case IOCB_CMD_PWRITE:
- ret = aio_write(&req->rw, &iocb, false, compat);
+ ret = aio_write(&req->rw, iocb, false, compat);
break;
case IOCB_CMD_PREADV:
- ret = aio_read(&req->rw, &iocb, true, compat);
+ ret = aio_read(&req->rw, iocb, true, compat);
break;
case IOCB_CMD_PWRITEV:
- ret = aio_write(&req->rw, &iocb, true, compat);
+ ret = aio_write(&req->rw, iocb, true, compat);
break;
case IOCB_CMD_FSYNC:
- ret = aio_fsync(&req->fsync, &iocb, false);
+ ret = aio_fsync(&req->fsync, iocb, false);
break;
case IOCB_CMD_FDSYNC:
- ret = aio_fsync(&req->fsync, &iocb, true);
+ ret = aio_fsync(&req->fsync, iocb, true);
break;
case IOCB_CMD_POLL:
- ret = aio_poll(req, &iocb);
+ ret = aio_poll(req, iocb);
break;
default:
- pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
+ pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
ret = -EINVAL;
break;
}
@@ -1864,14 +1889,25 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
goto out_put_req;
return 0;
out_put_req:
- put_reqs_available(ctx, 1);
- percpu_ref_put(&ctx->reqs);
if (req->ki_eventfd)
eventfd_ctx_put(req->ki_eventfd);
- kmem_cache_free(kiocb_cachep, req);
+ iocb_put(req);
+out_put_reqs_available:
+ put_reqs_available(ctx, 1);
return ret;
}
+static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+ bool compat)
+{
+ struct iocb iocb;
+
+ if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
+ return -EFAULT;
+
+ return __io_submit_one(ctx, &iocb, user_iocb, compat);
+}
+
/* sys_io_submit:
* Queue the nr iocbs pointed to by iocbpp for processing. Returns
* the number of iocbs queued. May return -EINVAL if the aio_context
@@ -1904,7 +1940,8 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
if (nr > ctx->nr_events)
nr = ctx->nr_events;
- blk_start_plug(&plug);
+ if (nr > AIO_PLUG_THRESHOLD)
+ blk_start_plug(&plug);
for (i = 0; i < nr; i++) {
struct iocb __user *user_iocb;
@@ -1917,7 +1954,8 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
if (ret)
break;
}
- blk_finish_plug(&plug);
+ if (nr > AIO_PLUG_THRESHOLD)
+ blk_finish_plug(&plug);
percpu_ref_put(&ctx->users);
return i ? i : ret;
@@ -1944,7 +1982,8 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
if (nr > ctx->nr_events)
nr = ctx->nr_events;
- blk_start_plug(&plug);
+ if (nr > AIO_PLUG_THRESHOLD)
+ blk_start_plug(&plug);
for (i = 0; i < nr; i++) {
compat_uptr_t user_iocb;
@@ -1957,7 +1996,8 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
if (ret)
break;
}
- blk_finish_plug(&plug);
+ if (nr > AIO_PLUG_THRESHOLD)
+ blk_finish_plug(&plug);
percpu_ref_put(&ctx->users);
return i ? i : ret;
@@ -2062,11 +2102,13 @@ static long do_io_getevents(aio_context_t ctx_id,
* specifies an infinite timeout. Note that the timeout pointed to by
* timeout is relative. Will fail with -ENOSYS if not implemented.
*/
+#if !defined(CONFIG_64BIT_TIME) || defined(CONFIG_64BIT)
+
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
long, nr,
struct io_event __user *, events,
- struct timespec __user *, timeout)
+ struct __kernel_timespec __user *, timeout)
{
struct timespec64 ts;
int ret;
@@ -2080,6 +2122,8 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
return ret;
}
+#endif
+
struct __aio_sigset {
const sigset_t __user *sigmask;
size_t sigsetsize;
@@ -2090,7 +2134,7 @@ SYSCALL_DEFINE6(io_pgetevents,
long, min_nr,
long, nr,
struct io_event __user *, events,
- struct timespec __user *, timeout,
+ struct __kernel_timespec __user *, timeout,
const struct __aio_sigset __user *, usig)
{
struct __aio_sigset ksig = { NULL, };
@@ -2104,33 +2148,56 @@ SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- if (ksig.sigmask) {
- if (ksig.sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (copy_from_user(&ksigmask, ksig.sigmask, sizeof(ksigmask)))
- return -EFAULT;
- sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
- }
+ ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
+ if (ret)
+ return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
- if (signal_pending(current)) {
- if (ksig.sigmask) {
- current->saved_sigmask = sigsaved;
- set_restore_sigmask();
- }
+ restore_user_sigmask(ksig.sigmask, &sigsaved);
+ if (signal_pending(current) && !ret)
+ ret = -ERESTARTNOHAND;
- if (!ret)
- ret = -ERESTARTNOHAND;
- } else {
- if (ksig.sigmask)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- }
+ return ret;
+}
+
+#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
+
+SYSCALL_DEFINE6(io_pgetevents_time32,
+ aio_context_t, ctx_id,
+ long, min_nr,
+ long, nr,
+ struct io_event __user *, events,
+ struct old_timespec32 __user *, timeout,
+ const struct __aio_sigset __user *, usig)
+{
+ struct __aio_sigset ksig = { NULL, };
+ sigset_t ksigmask, sigsaved;
+ struct timespec64 ts;
+ int ret;
+
+ if (timeout && unlikely(get_old_timespec32(&ts, timeout)))
+ return -EFAULT;
+
+ if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
+ return -EFAULT;
+
+
+ ret = set_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
+ if (ret)
+ return ret;
+
+ ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
+ restore_user_sigmask(ksig.sigmask, &sigsaved);
+ if (signal_pending(current) && !ret)
+ ret = -ERESTARTNOHAND;
return ret;
}
-#ifdef CONFIG_COMPAT
+#endif
+
+#if defined(CONFIG_COMPAT_32BIT_TIME)
+
COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
@@ -2149,12 +2216,17 @@ COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
return ret;
}
+#endif
+
+#ifdef CONFIG_COMPAT
struct __compat_aio_sigset {
compat_sigset_t __user *sigmask;
compat_size_t sigsetsize;
};
+#if defined(CONFIG_COMPAT_32BIT_TIME)
+
COMPAT_SYSCALL_DEFINE6(io_pgetevents,
compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
@@ -2174,27 +2246,47 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
return -EFAULT;
- if (ksig.sigmask) {
- if (ksig.sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
- if (get_compat_sigset(&ksigmask, ksig.sigmask))
- return -EFAULT;
- sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
- }
+ ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
+ if (ret)
+ return ret;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
- if (signal_pending(current)) {
- if (ksig.sigmask) {
- current->saved_sigmask = sigsaved;
- set_restore_sigmask();
- }
- if (!ret)
- ret = -ERESTARTNOHAND;
- } else {
- if (ksig.sigmask)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- }
+ restore_user_sigmask(ksig.sigmask, &sigsaved);
+ if (signal_pending(current) && !ret)
+ ret = -ERESTARTNOHAND;
+
+ return ret;
+}
+
+#endif
+
+COMPAT_SYSCALL_DEFINE6(io_pgetevents_time64,
+ compat_aio_context_t, ctx_id,
+ compat_long_t, min_nr,
+ compat_long_t, nr,
+ struct io_event __user *, events,
+ struct __kernel_timespec __user *, timeout,
+ const struct __compat_aio_sigset __user *, usig)
+{
+ struct __compat_aio_sigset ksig = { NULL, };
+ sigset_t ksigmask, sigsaved;
+ struct timespec64 t;
+ int ret;
+
+ if (timeout && get_timespec64(&t, timeout))
+ return -EFAULT;
+
+ if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
+ return -EFAULT;
+
+ ret = set_compat_user_sigmask(ksig.sigmask, &ksigmask, &sigsaved, ksig.sigsetsize);
+ if (ret)
+ return ret;
+
+ ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
+ restore_user_sigmask(ksig.sigmask, &sigsaved);
+ if (signal_pending(current) && !ret)
+ ret = -ERESTARTNOHAND;
return ret;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index a80b4f0ee7c4..e1886cc7048f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -181,7 +181,7 @@ static void blkdev_bio_end_io_simple(struct bio *bio)
struct task_struct *waiter = bio->bi_private;
WRITE_ONCE(bio->bi_private, NULL);
- wake_up_process(waiter);
+ blk_wake_io_task(waiter);
}
static ssize_t
@@ -232,14 +232,18 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
bio.bi_opf = dio_bio_write_op(iocb);
task_io_account_write(ret);
}
+ if (iocb->ki_flags & IOCB_HIPRI)
+ bio.bi_opf |= REQ_HIPRI;
qc = submit_bio(&bio);
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(bio.bi_private))
break;
+
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc))
+ !blk_poll(bdev_get_queue(bdev), qc, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
@@ -298,12 +302,13 @@ static void blkdev_bio_end_io(struct bio *bio)
}
dio->iocb->ki_complete(iocb, ret, 0);
- bio_put(&dio->bio);
+ if (dio->multi_bio)
+ bio_put(&dio->bio);
} else {
struct task_struct *waiter = dio->waiter;
WRITE_ONCE(dio->waiter, NULL);
- wake_up_process(waiter);
+ blk_wake_io_task(waiter);
}
}
@@ -328,6 +333,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct blk_plug plug;
struct blkdev_dio *dio;
struct bio *bio;
+ bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
@@ -338,20 +344,27 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
return -EINVAL;
bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
- bio_get(bio); /* extra ref for the completion handler */
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
- if (dio->is_sync)
+ if (dio->is_sync) {
dio->waiter = current;
- else
+ bio_get(bio);
+ } else {
dio->iocb = iocb;
+ }
dio->size = 0;
dio->multi_bio = false;
dio->should_dirty = is_read && iter_is_iovec(iter);
- blk_start_plug(&plug);
+ /*
+ * Don't plug for HIPRI/polled IO, as those should go straight
+ * to issue
+ */
+ if (!is_poll)
+ blk_start_plug(&plug);
+
for (;;) {
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
@@ -381,11 +394,21 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
if (!nr_pages) {
+ if (iocb->ki_flags & IOCB_HIPRI)
+ bio->bi_opf |= REQ_HIPRI;
+
qc = submit_bio(bio);
break;
}
if (!dio->multi_bio) {
+ /*
+ * AIO needs an extra reference to ensure the dio
+ * structure which is embedded into the first bio
+ * stays around.
+ */
+ if (!is_sync)
+ bio_get(bio);
dio->multi_bio = true;
atomic_set(&dio->ref, 2);
} else {
@@ -395,18 +418,21 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
submit_bio(bio);
bio = bio_alloc(GFP_KERNEL, nr_pages);
}
- blk_finish_plug(&plug);
+
+ if (!is_poll)
+ blk_finish_plug(&plug);
if (!is_sync)
return -EIOCBQUEUED;
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(dio->waiter))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(bdev_get_queue(bdev), qc))
+ !blk_poll(bdev_get_queue(bdev), qc, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 68ebe188446a..78556447e1d5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -591,7 +591,7 @@ unode_aux_to_inode_list(struct ulist_node *node)
}
/*
- * We maintain three seperate rbtrees: one for direct refs, one for
+ * We maintain three separate rbtrees: one for direct refs, one for
* indirect refs which have a key, and one for indirect refs which do not
* have a key. Each tree does merge on insertion.
*
@@ -695,7 +695,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
}
/*
- * Now it's a direct ref, put it in the the direct tree. We must
+ * Now it's a direct ref, put it in the direct tree. We must
* do this last because the ref could be merged/freed here.
*/
prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
@@ -2020,9 +2020,6 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
ret = -ENOMEM;
break;
}
- extent_buffer_get(eb);
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
btrfs_release_path(path);
item = btrfs_item_nr(slot);
@@ -2042,7 +2039,6 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
len = sizeof(*iref) + name_len;
iref = (struct btrfs_inode_ref *)((char *)iref + len);
}
- btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
}
@@ -2083,10 +2079,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
ret = -ENOMEM;
break;
}
- extent_buffer_get(eb);
-
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
btrfs_release_path(path);
item_size = btrfs_item_size_nr(eb, slot);
@@ -2107,7 +2099,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
cur_offset += btrfs_inode_extref_name_len(eb, extref);
cur_offset += sizeof(*extref);
}
- btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
offset++;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 97d91e55b70a..6f5d07415dab 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -20,7 +20,7 @@
* new data the application may have written before commit.
*/
enum {
- BTRFS_INODE_ORDERED_DATA_CLOSE = 0,
+ BTRFS_INODE_ORDERED_DATA_CLOSE,
BTRFS_INODE_DUMMY,
BTRFS_INODE_IN_DEFRAG,
BTRFS_INODE_HAS_ASYNC_EXTENT,
@@ -29,6 +29,7 @@ enum {
BTRFS_INODE_IN_DELALLOC_LIST,
BTRFS_INODE_READDIO_NEED_LOCK,
BTRFS_INODE_HAS_PROPS,
+ BTRFS_INODE_SNAPSHOT_FLUSH,
};
/* in memory btrfs inode */
@@ -147,6 +148,12 @@ struct btrfs_inode {
u64 last_unlink_trans;
/*
+ * Track the transaction id of the last transaction used to create a
+ * hard link for the inode. This is used by the log tree (fsync).
+ */
+ u64 last_link_trans;
+
+ /*
* Number of bytes outstanding that are going to need csums. This is
* used in ENOSPC accounting.
*/
@@ -253,6 +260,11 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode)
return false;
}
+static inline bool is_data_inode(struct inode *inode)
+{
+ return btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID;
+}
+
static inline void btrfs_mod_outstanding_extents(struct btrfs_inode *inode,
int mod)
{
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 2e43fba44035..b0c8094528d1 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1202,24 +1202,24 @@ static void btrfsic_read_from_block_data(
void *dstv, u32 offset, size_t len)
{
size_t cur;
- size_t offset_in_page;
+ size_t pgoff;
char *kaddr;
char *dst = (char *)dstv;
- size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(block_ctx->start);
unsigned long i = (start_offset + offset) >> PAGE_SHIFT;
WARN_ON(offset + len > block_ctx->len);
- offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1);
+ pgoff = offset_in_page(start_offset + offset);
while (len > 0) {
- cur = min(len, ((size_t)PAGE_SIZE - offset_in_page));
+ cur = min(len, ((size_t)PAGE_SIZE - pgoff));
BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE));
kaddr = block_ctx->datav[i];
- memcpy(dst, kaddr + offset_in_page, cur);
+ memcpy(dst, kaddr + pgoff, cur);
dst += cur;
len -= cur;
- offset_in_page = 0;
+ pgoff = 0;
i++;
}
}
@@ -1601,7 +1601,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
BUG_ON(block_ctx->datav);
BUG_ON(block_ctx->pagev);
BUG_ON(block_ctx->mem_to_free);
- if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) {
+ if (!PAGE_ALIGNED(block_ctx->dev_bytenr)) {
pr_info("btrfsic: read_block() with unaligned bytenr %llu\n",
block_ctx->dev_bytenr);
return -1;
@@ -1720,7 +1720,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
num_pages = state->metablock_size >> PAGE_SHIFT;
h = (struct btrfs_header *)datav[0];
- if (memcmp(h->fsid, fs_info->fsid, BTRFS_FSID_SIZE))
+ if (memcmp(h->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE))
return 1;
for (i = 0; i < num_pages; i++) {
@@ -1778,7 +1778,7 @@ again:
return;
}
is_metadata = 1;
- BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1));
+ BUG_ON(!PAGE_ALIGNED(BTRFS_SUPER_INFO_SIZE));
processed_len = BTRFS_SUPER_INFO_SIZE;
if (state->print_mask &
BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
@@ -2327,7 +2327,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
* write operations. Therefore it keeps the linkage
* information for a block until a block is
* rewritten. This can temporarily cause incorrect
- * and even circular linkage informations. This
+ * and even circular linkage information. This
* causes no harm unless such blocks are referenced
* by the most recent super block.
*/
@@ -2892,12 +2892,12 @@ int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
- if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
+ if (!PAGE_ALIGNED(fs_info->nodesize)) {
pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
fs_info->nodesize, PAGE_SIZE);
return -1;
}
- if (fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) {
+ if (!PAGE_ALIGNED(fs_info->sectorsize)) {
pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
fs_info->sectorsize, PAGE_SIZE);
return -1;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 2955a4ea2fa8..548057630b69 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -229,7 +229,6 @@ static noinline void end_compressed_writeback(struct inode *inode,
*/
static void end_compressed_bio_write(struct bio *bio)
{
- struct extent_io_tree *tree;
struct compressed_bio *cb = bio->bi_private;
struct inode *inode;
struct page *page;
@@ -248,14 +247,10 @@ static void end_compressed_bio_write(struct bio *bio)
* call back into the FS and do all the end_io operations
*/
inode = cb->inode;
- tree = &BTRFS_I(inode)->io_tree;
cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
- tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
- cb->start,
- cb->start + cb->len - 1,
- NULL,
- bio->bi_status ?
- BLK_STS_OK : BLK_STS_NOTSUPP);
+ btrfs_writepage_endio_finish_ordered(cb->compressed_pages[0],
+ cb->start, cb->start + cb->len - 1,
+ bio->bi_status ? BLK_STS_OK : BLK_STS_NOTSUPP);
cb->compressed_pages[0]->mapping = NULL;
end_compressed_writeback(inode, cb);
@@ -306,7 +301,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
blk_status_t ret;
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
- WARN_ON(start & ((u64)PAGE_SIZE - 1));
+ WARN_ON(!PAGE_ALIGNED(start));
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
return BLK_STS_RESOURCE;
@@ -337,7 +332,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_iter.bi_size)
- submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0);
+ submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
+ 0);
page->mapping = NULL;
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
@@ -481,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (page->index == end_index) {
char *userpage;
- size_t zero_offset = isize & (PAGE_SIZE - 1);
+ size_t zero_offset = offset_in_page(isize);
if (zero_offset) {
int zeros;
@@ -615,8 +611,8 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size)
- submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE,
- comp_bio, 0);
+ submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE,
+ comp_bio, 0);
page->mapping = NULL;
if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
@@ -1207,7 +1203,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
/*
* Shannon Entropy calculation
*
- * Pure byte distribution analysis fails to determine compressiability of data.
+ * Pure byte distribution analysis fails to determine compressibility of data.
* Try calculating entropy to estimate the average minimum number of bits
* needed to encode the sampled data.
*
@@ -1271,7 +1267,7 @@ static u8 get4bits(u64 num, int shift) {
/*
* Use 4 bits as radix base
- * Use 16 u32 counters for calculating new possition in buf array
+ * Use 16 u32 counters for calculating new position in buf array
*
* @array - array that will be sorted
* @array_buf - buffer array to store sorting results
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 539901fb5165..d92462fe66c8 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -12,6 +12,7 @@
#include "transaction.h"
#include "print-tree.h"
#include "locking.h"
+#include "volumes.h"
static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, int level);
@@ -224,7 +225,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
else
btrfs_set_header_owner(cow, new_root_objectid);
- write_extent_buffer_fsid(cow, fs_info->fsid);
+ write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
WARN_ON(btrfs_header_generation(buf) > trans->transid);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
@@ -1050,7 +1051,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
else
btrfs_set_header_owner(cow, root->root_key.objectid);
- write_extent_buffer_fsid(cow, fs_info->fsid);
+ write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
@@ -1290,7 +1291,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
- extent_buffer_get(eb_rewin);
btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
@@ -1362,7 +1362,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (!eb)
return NULL;
- extent_buffer_get(eb);
btrfs_tree_read_lock(eb);
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
@@ -1415,7 +1414,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
*
* What is forced COW:
* when we create snapshot during committing the transaction,
- * after we've finished coping src root, we must COW the shared
+ * after we've finished copying src root, we must COW the shared
* block to ensure the metadata consistency.
*/
if (btrfs_header_generation(buf) == trans->transid &&
@@ -1441,6 +1440,10 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
u64 search_start;
int ret;
+ if (test_bit(BTRFS_ROOT_DELETING, &root->state))
+ btrfs_err(fs_info,
+ "COW'ing blocks on a fs root that's being dropped");
+
if (trans->transaction != fs_info->running_transaction)
WARN(1, KERN_CRIT "trans %llu running %llu\n",
trans->transid,
@@ -2584,14 +2587,27 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
root_lock = BTRFS_READ_LOCK;
if (p->search_commit_root) {
- /* The commit roots are read only so we always do read locks */
- if (p->need_commit_sem)
+ /*
+ * The commit roots are read only so we always do read locks,
+ * and we always must hold the commit_root_sem when doing
+ * searches on them, the only exception is send where we don't
+ * want to block transaction commits for a long time, so
+ * we need to clone the commit root in order to avoid races
+ * with transaction commits that create a snapshot of one of
+ * the roots used by a send operation.
+ */
+ if (p->need_commit_sem) {
down_read(&fs_info->commit_root_sem);
- b = root->commit_root;
- extent_buffer_get(b);
- level = btrfs_header_level(b);
- if (p->need_commit_sem)
+ b = btrfs_clone_extent_buffer(root->commit_root);
up_read(&fs_info->commit_root_sem);
+ if (!b)
+ return ERR_PTR(-ENOMEM);
+
+ } else {
+ b = root->commit_root;
+ extent_buffer_get(b);
+ }
+ level = btrfs_header_level(b);
/*
* Ensure that all callers have set skip_locking when
* p->search_commit_root = 1.
@@ -2717,6 +2733,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
again:
prev_cmp = -1;
b = btrfs_search_slot_get_root(root, p, write_lock_level);
+ if (IS_ERR(b)) {
+ ret = PTR_ERR(b);
+ goto done;
+ }
while (b) {
level = btrfs_header_level(b);
@@ -3751,7 +3771,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
/* Key greater than all keys in the leaf, right neighbor has
* enough room for it and we're not emptying our leaf to delete
* it, therefore use right neighbor to insert the new item and
- * no need to touch/dirty our left leaft. */
+ * no need to touch/dirty our left leaf. */
btrfs_tree_unlock(left);
free_extent_buffer(left);
path->nodes[0] = right;
@@ -5390,7 +5410,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
ret = -ENOMEM;
goto out;
}
- extent_buffer_get(left_path->nodes[left_level]);
right_level = btrfs_header_level(right_root->commit_root);
right_root_level = right_level;
@@ -5401,7 +5420,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
ret = -ENOMEM;
goto out;
}
- extent_buffer_get(right_path->nodes[right_level]);
up_read(&fs_info->commit_root_sem);
if (left_level == 0)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 68f322f600a0..f031a447a047 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -109,13 +109,26 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
}
/*
- * File system states
+ * Runtime (in-memory) states of filesystem
*/
-#define BTRFS_FS_STATE_ERROR 0
-#define BTRFS_FS_STATE_REMOUNTING 1
-#define BTRFS_FS_STATE_TRANS_ABORTED 2
-#define BTRFS_FS_STATE_DEV_REPLACING 3
-#define BTRFS_FS_STATE_DUMMY_FS_INFO 4
+enum {
+ /* Global indicator of serious filesystem errors */
+ BTRFS_FS_STATE_ERROR,
+ /*
+ * Filesystem is being remounted, allow to skip some operations, like
+ * defrag
+ */
+ BTRFS_FS_STATE_REMOUNTING,
+ /* Track if a transaction abort has been reported on this filesystem */
+ BTRFS_FS_STATE_TRANS_ABORTED,
+ /*
+ * Bio operations should be blocked on this filesystem because a source
+ * or target device is being destroyed as part of a device replace
+ */
+ BTRFS_FS_STATE_DEV_REPLACING,
+ /* The btrfs_fs_info created for self-tests */
+ BTRFS_FS_STATE_DUMMY_FS_INFO,
+};
#define BTRFS_BACKREF_REV_MAX 256
#define BTRFS_BACKREF_REV_SHIFT 56
@@ -195,9 +208,10 @@ struct btrfs_root_backup {
* it currently lacks any block count etc etc
*/
struct btrfs_super_block {
- u8 csum[BTRFS_CSUM_SIZE];
/* the first 4 fields must match struct btrfs_header */
- u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
+ u8 csum[BTRFS_CSUM_SIZE];
+ /* FS specific UUID, visible to user */
+ u8 fsid[BTRFS_FSID_SIZE];
__le64 bytenr; /* this block number */
__le64 flags;
@@ -234,8 +248,11 @@ struct btrfs_super_block {
__le64 cache_generation;
__le64 uuid_tree_generation;
+ /* the UUID written into btree blocks */
+ u8 metadata_uuid[BTRFS_FSID_SIZE];
+
/* future expansion */
- __le64 reserved[30];
+ __le64 reserved[28];
u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
} __attribute__ ((__packed__));
@@ -265,7 +282,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
- BTRFS_FEATURE_INCOMPAT_NO_HOLES)
+ BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID)
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
@@ -316,7 +334,7 @@ struct btrfs_node {
* The slots array records the index of the item or block pointer
* used while walking the tree.
*/
-enum { READA_NONE = 0, READA_BACK, READA_FORWARD };
+enum { READA_NONE, READA_BACK, READA_FORWARD };
struct btrfs_path {
struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
int slots[BTRFS_MAX_LEVEL];
@@ -360,9 +378,7 @@ struct btrfs_dev_replace {
struct btrfs_device *tgtdev;
struct mutex lock_finishing_cancel_unmount;
- rwlock_t lock;
- atomic_t blocking_readers;
- wait_queue_head_t read_lock_wq;
+ struct rw_semaphore rwsem;
struct btrfs_scrub_progress scrub_progress;
@@ -443,13 +459,19 @@ struct btrfs_space_info {
struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
};
-#define BTRFS_BLOCK_RSV_GLOBAL 1
-#define BTRFS_BLOCK_RSV_DELALLOC 2
-#define BTRFS_BLOCK_RSV_TRANS 3
-#define BTRFS_BLOCK_RSV_CHUNK 4
-#define BTRFS_BLOCK_RSV_DELOPS 5
-#define BTRFS_BLOCK_RSV_EMPTY 6
-#define BTRFS_BLOCK_RSV_TEMP 7
+/*
+ * Types of block reserves
+ */
+enum {
+ BTRFS_BLOCK_RSV_GLOBAL,
+ BTRFS_BLOCK_RSV_DELALLOC,
+ BTRFS_BLOCK_RSV_TRANS,
+ BTRFS_BLOCK_RSV_CHUNK,
+ BTRFS_BLOCK_RSV_DELOPS,
+ BTRFS_BLOCK_RSV_DELREFS,
+ BTRFS_BLOCK_RSV_EMPTY,
+ BTRFS_BLOCK_RSV_TEMP,
+};
struct btrfs_block_rsv {
u64 size;
@@ -509,18 +531,18 @@ struct btrfs_free_cluster {
};
enum btrfs_caching_type {
- BTRFS_CACHE_NO = 0,
- BTRFS_CACHE_STARTED = 1,
- BTRFS_CACHE_FAST = 2,
- BTRFS_CACHE_FINISHED = 3,
- BTRFS_CACHE_ERROR = 4,
+ BTRFS_CACHE_NO,
+ BTRFS_CACHE_STARTED,
+ BTRFS_CACHE_FAST,
+ BTRFS_CACHE_FINISHED,
+ BTRFS_CACHE_ERROR,
};
enum btrfs_disk_cache_state {
- BTRFS_DC_WRITTEN = 0,
- BTRFS_DC_ERROR = 1,
- BTRFS_DC_CLEAR = 2,
- BTRFS_DC_SETUP = 3,
+ BTRFS_DC_WRITTEN,
+ BTRFS_DC_ERROR,
+ BTRFS_DC_CLEAR,
+ BTRFS_DC_SETUP,
};
struct btrfs_caching_control {
@@ -712,41 +734,61 @@ struct btrfs_fs_devices;
struct btrfs_balance_control;
struct btrfs_delayed_root;
-#define BTRFS_FS_BARRIER 1
-#define BTRFS_FS_CLOSING_START 2
-#define BTRFS_FS_CLOSING_DONE 3
-#define BTRFS_FS_LOG_RECOVERING 4
-#define BTRFS_FS_OPEN 5
-#define BTRFS_FS_QUOTA_ENABLED 6
-#define BTRFS_FS_UPDATE_UUID_TREE_GEN 9
-#define BTRFS_FS_CREATING_FREE_SPACE_TREE 10
-#define BTRFS_FS_BTREE_ERR 11
-#define BTRFS_FS_LOG1_ERR 12
-#define BTRFS_FS_LOG2_ERR 13
-#define BTRFS_FS_QUOTA_OVERRIDE 14
-/* Used to record internally whether fs has been frozen */
-#define BTRFS_FS_FROZEN 15
-
-/*
- * Indicate that a whole-filesystem exclusive operation is running
- * (device replace, resize, device add/delete, balance)
- */
-#define BTRFS_FS_EXCL_OP 16
-
/*
- * To info transaction_kthread we need an immediate commit so it doesn't
- * need to wait for commit_interval
+ * Block group or device which contains an active swapfile. Used for preventing
+ * unsafe operations while a swapfile is active.
+ *
+ * These are sorted on (ptr, inode) (note that a block group or device can
+ * contain more than one swapfile). We compare the pointer values because we
+ * don't actually care what the object is, we just need a quick check whether
+ * the object exists in the rbtree.
*/
-#define BTRFS_FS_NEED_ASYNC_COMMIT 17
+struct btrfs_swapfile_pin {
+ struct rb_node node;
+ void *ptr;
+ struct inode *inode;
+ /*
+ * If true, ptr points to a struct btrfs_block_group_cache. Otherwise,
+ * ptr points to a struct btrfs_device.
+ */
+ bool is_block_group;
+};
-/*
- * Indicate that balance has been set up from the ioctl and is in the main
- * phase. The fs_info::balance_ctl is initialized.
- */
-#define BTRFS_FS_BALANCE_RUNNING 18
+bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr);
+
+enum {
+ BTRFS_FS_BARRIER,
+ BTRFS_FS_CLOSING_START,
+ BTRFS_FS_CLOSING_DONE,
+ BTRFS_FS_LOG_RECOVERING,
+ BTRFS_FS_OPEN,
+ BTRFS_FS_QUOTA_ENABLED,
+ BTRFS_FS_UPDATE_UUID_TREE_GEN,
+ BTRFS_FS_CREATING_FREE_SPACE_TREE,
+ BTRFS_FS_BTREE_ERR,
+ BTRFS_FS_LOG1_ERR,
+ BTRFS_FS_LOG2_ERR,
+ BTRFS_FS_QUOTA_OVERRIDE,
+ /* Used to record internally whether fs has been frozen */
+ BTRFS_FS_FROZEN,
+ /*
+ * Indicate that a whole-filesystem exclusive operation is running
+ * (device replace, resize, device add/delete, balance)
+ */
+ BTRFS_FS_EXCL_OP,
+ /*
+ * To info transaction_kthread we need an immediate commit so it
+ * doesn't need to wait for commit_interval
+ */
+ BTRFS_FS_NEED_ASYNC_COMMIT,
+ /*
+ * Indicate that balance has been set up from the ioctl and is in the
+ * main phase. The fs_info::balance_ctl is initialized.
+ */
+ BTRFS_FS_BALANCE_RUNNING,
+};
struct btrfs_fs_info {
- u8 fsid[BTRFS_FSID_SIZE];
u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
unsigned long flags;
struct btrfs_root *extent_root;
@@ -790,6 +832,8 @@ struct btrfs_fs_info {
struct btrfs_block_rsv chunk_block_rsv;
/* block reservation for delayed operations */
struct btrfs_block_rsv delayed_block_rsv;
+ /* block reservation for delayed refs */
+ struct btrfs_block_rsv delayed_refs_rsv;
struct btrfs_block_rsv empty_block_rsv;
@@ -1114,6 +1158,10 @@ struct btrfs_fs_info {
u32 sectorsize;
u32 stripesize;
+ /* Block groups and devices containing active swapfiles. */
+ spinlock_t swapfile_pins_lock;
+ struct rb_root swapfile_pins;
+
#ifdef CONFIG_BTRFS_FS_REF_VERIFY
spinlock_t ref_verify_lock;
struct rb_root block_tree;
@@ -1133,22 +1181,24 @@ struct btrfs_subvolume_writers {
/*
* The state of btrfs root
*/
-/*
- * btrfs_record_root_in_trans is a multi-step process,
- * and it can race with the balancing code. But the
- * race is very small, and only the first time the root
- * is added to each transaction. So IN_TRANS_SETUP
- * is used to tell us when more checks are required
- */
-#define BTRFS_ROOT_IN_TRANS_SETUP 0
-#define BTRFS_ROOT_REF_COWS 1
-#define BTRFS_ROOT_TRACK_DIRTY 2
-#define BTRFS_ROOT_IN_RADIX 3
-#define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 4
-#define BTRFS_ROOT_DEFRAG_RUNNING 5
-#define BTRFS_ROOT_FORCE_COW 6
-#define BTRFS_ROOT_MULTI_LOG_TASKS 7
-#define BTRFS_ROOT_DIRTY 8
+enum {
+ /*
+ * btrfs_record_root_in_trans is a multi-step process, and it can race
+ * with the balancing code. But the race is very small, and only the
+ * first time the root is added to each transaction. So IN_TRANS_SETUP
+ * is used to tell us when more checks are required
+ */
+ BTRFS_ROOT_IN_TRANS_SETUP,
+ BTRFS_ROOT_REF_COWS,
+ BTRFS_ROOT_TRACK_DIRTY,
+ BTRFS_ROOT_IN_RADIX,
+ BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
+ BTRFS_ROOT_DEFRAG_RUNNING,
+ BTRFS_ROOT_FORCE_COW,
+ BTRFS_ROOT_MULTI_LOG_TASKS,
+ BTRFS_ROOT_DIRTY,
+ BTRFS_ROOT_DELETING,
+};
/*
* in ram representation of the tree. extent_root is used for all allocations
@@ -1274,6 +1324,9 @@ struct btrfs_root {
u64 qgroup_meta_rsv_pertrans;
u64 qgroup_meta_rsv_prealloc;
+ /* Number of active swapfiles */
+ atomic_t nr_swapfiles;
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
u64 alloc_bytenr;
#endif
@@ -2570,10 +2623,10 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
/* extent-tree.c */
enum btrfs_inline_ref_type {
- BTRFS_REF_TYPE_INVALID = 0,
- BTRFS_REF_TYPE_BLOCK = 1,
- BTRFS_REF_TYPE_DATA = 2,
- BTRFS_REF_TYPE_ANY = 3,
+ BTRFS_REF_TYPE_INVALID,
+ BTRFS_REF_TYPE_BLOCK,
+ BTRFS_REF_TYPE_DATA,
+ BTRFS_REF_TYPE_ANY,
};
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
@@ -2599,7 +2652,7 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans);
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@@ -2713,10 +2766,12 @@ enum btrfs_reserve_flush_enum {
enum btrfs_flush_state {
FLUSH_DELAYED_ITEMS_NR = 1,
FLUSH_DELAYED_ITEMS = 2,
- FLUSH_DELALLOC = 3,
- FLUSH_DELALLOC_WAIT = 4,
- ALLOC_CHUNK = 5,
- COMMIT_TRANS = 6,
+ FLUSH_DELAYED_REFS_NR = 3,
+ FLUSH_DELAYED_REFS = 4,
+ FLUSH_DELALLOC = 5,
+ FLUSH_DELALLOC_WAIT = 6,
+ ALLOC_CHUNK = 7,
+ COMMIT_TRANS = 8,
};
int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes);
@@ -2767,6 +2822,13 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr);
+void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
+int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
+ enum btrfs_reserve_flush_enum flush);
+void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *src,
+ u64 num_bytes);
int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
@@ -3141,7 +3203,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct inode *inode, u64 new_size,
u32 min_type);
-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
unsigned int extra_bits,
@@ -3150,9 +3212,16 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
struct btrfs_root *parent_root,
u64 new_dirid);
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
- size_t size, struct bio *bio,
- unsigned long bio_flags);
+ void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
+ unsigned *bits);
+void btrfs_clear_delalloc_extent(struct inode *inode,
+ struct extent_state *state, unsigned *bits);
+void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
+ struct extent_state *other);
+void btrfs_split_delalloc_extent(struct inode *inode,
+ struct extent_state *orig, u64 split);
+int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
+ unsigned long bio_flags);
void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end);
vm_fault_t btrfs_page_mkwrite(struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
@@ -3189,6 +3258,12 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
struct btrfs_trans_handle *trans, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
+int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+ u64 start, u64 end, int *page_started, unsigned long *nr_written,
+ struct writeback_control *wbc);
+int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
+void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
+ u64 end, int uptodate);
extern const struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
@@ -3428,6 +3503,16 @@ static inline void assfail(const char *expr, const char *file, int line)
#define ASSERT(expr) ((void)0)
#endif
+/*
+ * Use that for functions that are conditionally exported for sanity tests but
+ * otherwise static
+ */
+#ifndef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+#define EXPORT_FOR_TESTS static
+#else
+#define EXPORT_FOR_TESTS
+#endif
+
__cold
static inline void btrfs_print_v0_err(struct btrfs_fs_info *fs_info)
{
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 9301b3ad9217..cad36c99a483 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -251,8 +251,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
atomic_dec(&delayed_refs->num_entries);
- if (trans->delayed_ref_updates)
- trans->delayed_ref_updates--;
}
static bool merge_ref(struct btrfs_trans_handle *trans,
@@ -400,6 +398,20 @@ again:
return head;
}
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head)
+{
+ lockdep_assert_held(&delayed_refs->lock);
+ lockdep_assert_held(&head->lock);
+
+ rb_erase_cached(&head->href_node, &delayed_refs->href_root);
+ RB_CLEAR_NODE(&head->href_node);
+ atomic_dec(&delayed_refs->num_entries);
+ delayed_refs->num_heads--;
+ if (head->processing == 0)
+ delayed_refs->num_heads_ready--;
+}
+
/*
* Helper to insert the ref_node to the tail or merge with tail.
*
@@ -453,7 +465,6 @@ inserted:
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
- trans->delayed_ref_updates++;
spin_unlock(&href->lock);
return ret;
}
@@ -462,12 +473,14 @@ inserted:
* helper function to update the accounting in the head ref
* existing and update must have the same bytenr
*/
-static noinline void
-update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
+static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *existing,
struct btrfs_delayed_ref_head *update,
int *old_ref_mod_ret)
{
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
int old_ref_mod;
BUG_ON(existing->is_data != update->is_data);
@@ -525,10 +538,18 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
* versa we need to make sure to adjust pending_csums accordingly.
*/
if (existing->is_data) {
- if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
+ u64 csum_leaves =
+ btrfs_csum_bytes_to_leaves(fs_info,
+ existing->num_bytes);
+
+ if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
delayed_refs->pending_csums -= existing->num_bytes;
- if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
+ btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
+ }
+ if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
delayed_refs->pending_csums += existing->num_bytes;
+ trans->delayed_ref_updates += csum_leaves;
+ }
}
spin_unlock(&existing->lock);
}
@@ -634,7 +655,7 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
&& head_ref->qgroup_reserved
&& existing->qgroup_ref_root
&& existing->qgroup_reserved);
- update_existing_head_ref(delayed_refs, existing, head_ref,
+ update_existing_head_ref(trans, existing, head_ref,
old_ref_mod);
/*
* we've updated the existing ref, free the newly
@@ -645,8 +666,12 @@ add_delayed_ref_head(struct btrfs_trans_handle *trans,
} else {
if (old_ref_mod)
*old_ref_mod = 0;
- if (head_ref->is_data && head_ref->ref_mod < 0)
+ if (head_ref->is_data && head_ref->ref_mod < 0) {
delayed_refs->pending_csums += head_ref->num_bytes;
+ trans->delayed_ref_updates +=
+ btrfs_csum_bytes_to_leaves(trans->fs_info,
+ head_ref->num_bytes);
+ }
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
atomic_inc(&delayed_refs->num_entries);
@@ -782,6 +807,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
+ /*
+ * Need to update the delayed_refs_rsv with any changes we may have
+ * made.
+ */
+ btrfs_update_delayed_refs_rsv(trans);
+
trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
action == BTRFS_ADD_DELAYED_EXTENT ?
BTRFS_ADD_DELAYED_REF : action);
@@ -863,6 +894,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
+ /*
+ * Need to update the delayed_refs_rsv with any changes we may have
+ * made.
+ */
+ btrfs_update_delayed_refs_rsv(trans);
+
trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
action == BTRFS_ADD_DELAYED_EXTENT ?
BTRFS_ADD_DELAYED_REF : action);
@@ -899,6 +936,12 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
NULL, NULL, NULL);
spin_unlock(&delayed_refs->lock);
+
+ /*
+ * Need to update the delayed_refs_rsv with any changes we may have
+ * made.
+ */
+ btrfs_update_delayed_refs_rsv(trans);
return 0;
}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 8e20c5cb5404..d2af974f68a1 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
{
mutex_unlock(&head->mutex);
}
-
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_delayed_ref_head *head);
struct btrfs_delayed_ref_head *btrfs_select_ref_head(
struct btrfs_delayed_ref_root *delayed_refs);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 2aa48aecc52b..8750c835f535 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -59,7 +59,6 @@ no_valid_dev_replace_entry_found:
BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED;
dev_replace->cont_reading_from_srcdev_mode =
BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS;
- dev_replace->replace_state = 0;
dev_replace->time_started = 0;
dev_replace->time_stopped = 0;
atomic64_set(&dev_replace->num_write_errors, 0);
@@ -285,13 +284,13 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_dev_replace_item *ptr;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- btrfs_dev_replace_read_lock(dev_replace);
+ down_read(&dev_replace->rwsem);
if (!dev_replace->is_valid ||
!dev_replace->item_needs_writeback) {
- btrfs_dev_replace_read_unlock(dev_replace);
+ up_read(&dev_replace->rwsem);
return 0;
}
- btrfs_dev_replace_read_unlock(dev_replace);
+ up_read(&dev_replace->rwsem);
key.objectid = 0;
key.type = BTRFS_DEV_REPLACE_KEY;
@@ -349,7 +348,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
ptr = btrfs_item_ptr(eb, path->slots[0],
struct btrfs_dev_replace_item);
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
if (dev_replace->srcdev)
btrfs_set_dev_replace_src_devid(eb, ptr,
dev_replace->srcdev->devid);
@@ -372,7 +371,7 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
btrfs_set_dev_replace_cursor_right(eb, ptr,
dev_replace->cursor_right);
dev_replace->item_needs_writeback = 0;
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
btrfs_mark_buffer_dirty(eb);
@@ -390,7 +389,7 @@ static char* btrfs_dev_name(struct btrfs_device *device)
return rcu_str_deref(device->name);
}
-int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
+static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
int read_src)
{
@@ -407,6 +406,13 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (IS_ERR(src_device))
return PTR_ERR(src_device);
+ if (btrfs_pinned_by_swapfile(fs_info, src_device)) {
+ btrfs_warn_in_rcu(fs_info,
+ "cannot replace device %s (devid %llu) due to active swapfile",
+ btrfs_dev_name(src_device), src_device->devid);
+ return -ETXTBSY;
+ }
+
ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
if (ret)
@@ -426,7 +432,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
}
need_unlock = true;
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
@@ -464,7 +470,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
dev_replace->item_needs_writeback = 1;
atomic64_set(&dev_replace->num_write_errors, 0);
atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
need_unlock = false;
ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device);
@@ -478,7 +484,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
need_unlock = true;
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
dev_replace->srcdev = NULL;
@@ -497,7 +503,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
ret = btrfs_dev_replace_finishing(fs_info, ret);
if (ret == -EINPROGRESS) {
ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
- } else {
+ } else if (ret != -ECANCELED) {
WARN_ON(ret);
}
@@ -505,7 +511,7 @@ int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
leave:
if (need_unlock)
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
btrfs_destroy_dev_replace_tgtdev(tgt_device);
return ret;
}
@@ -533,8 +539,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
args->start.cont_reading_from_srcdev_mode);
args->result = ret;
/* don't warn if EINPROGRESS, someone else might be running scrub */
- if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS)
- ret = 0;
+ if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS ||
+ ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR)
+ return 0;
return ret;
}
@@ -572,18 +579,18 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
/* don't allow cancel or unmount to disturb the finishing procedure */
mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
- btrfs_dev_replace_read_lock(dev_replace);
+ down_read(&dev_replace->rwsem);
/* was the operation canceled, or is it finished? */
if (dev_replace->replace_state !=
BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED) {
- btrfs_dev_replace_read_unlock(dev_replace);
+ up_read(&dev_replace->rwsem);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return 0;
}
tgt_device = dev_replace->tgtdev;
src_device = dev_replace->srcdev;
- btrfs_dev_replace_read_unlock(dev_replace);
+ up_read(&dev_replace->rwsem);
/*
* flush all outstanding I/O and inode extent mappings before the
@@ -607,7 +614,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
/* keep away write_all_supers() during the finishing procedure */
mutex_lock(&fs_info->fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
dev_replace->replace_state =
scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
: BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED;
@@ -622,12 +629,13 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
src_device,
tgt_device);
} else {
- btrfs_err_in_rcu(fs_info,
+ if (scrub_ret != -ECANCELED)
+ btrfs_err_in_rcu(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
btrfs_dev_name(src_device),
src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret);
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_rm_dev_replace_blocked(fs_info);
@@ -663,8 +671,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
fs_info->fs_devices->rw_devices++;
- btrfs_dev_replace_write_unlock(dev_replace);
-
+ up_write(&dev_replace->rwsem);
btrfs_rm_dev_replace_blocked(fs_info);
btrfs_rm_dev_replace_remove_srcdev(src_device);
@@ -761,7 +768,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
{
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- btrfs_dev_replace_read_lock(dev_replace);
+ down_read(&dev_replace->rwsem);
/* even if !dev_replace_is_valid, the values are good enough for
* the replace_status ioctl */
args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
@@ -773,7 +780,7 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
args->status.num_uncorrectable_read_errors =
atomic64_read(&dev_replace->num_uncorrectable_read_errors);
args->status.progress_1000 = btrfs_dev_replace_progress(fs_info);
- btrfs_dev_replace_read_unlock(dev_replace);
+ up_read(&dev_replace->rwsem);
}
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
@@ -790,46 +797,74 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
return -EROFS;
mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
- btrfs_dev_replace_write_unlock(dev_replace);
- goto leave;
+ up_write(&dev_replace->rwsem);
+ break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
+ tgt_device = dev_replace->tgtdev;
+ src_device = dev_replace->srcdev;
+ up_write(&dev_replace->rwsem);
+ ret = btrfs_scrub_cancel(fs_info);
+ if (ret < 0) {
+ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED;
+ } else {
+ result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
+ /*
+ * btrfs_dev_replace_finishing() will handle the
+ * cleanup part
+ */
+ btrfs_info_in_rcu(fs_info,
+ "dev_replace from %s (devid %llu) to %s canceled",
+ btrfs_dev_name(src_device), src_device->devid,
+ btrfs_dev_name(tgt_device));
+ }
+ break;
case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
+ /*
+ * Scrub doing the replace isn't running so we need to do the
+ * cleanup step of btrfs_dev_replace_finishing() here
+ */
result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
tgt_device = dev_replace->tgtdev;
src_device = dev_replace->srcdev;
dev_replace->tgtdev = NULL;
dev_replace->srcdev = NULL;
- break;
- }
- dev_replace->replace_state = BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
- dev_replace->time_stopped = ktime_get_real_seconds();
- dev_replace->item_needs_writeback = 1;
- btrfs_dev_replace_write_unlock(dev_replace);
- btrfs_scrub_cancel(fs_info);
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED;
+ dev_replace->time_stopped = ktime_get_real_seconds();
+ dev_replace->item_needs_writeback = 1;
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
- return PTR_ERR(trans);
- }
- ret = btrfs_commit_transaction(trans);
- WARN_ON(ret);
+ up_write(&dev_replace->rwsem);
- btrfs_info_in_rcu(fs_info,
- "dev_replace from %s (devid %llu) to %s canceled",
- btrfs_dev_name(src_device), src_device->devid,
- btrfs_dev_name(tgt_device));
+ /* Scrub for replace must not be running in suspended state */
+ ret = btrfs_scrub_cancel(fs_info);
+ ASSERT(ret != -ENOTCONN);
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return PTR_ERR(trans);
+ }
+ ret = btrfs_commit_transaction(trans);
+ WARN_ON(ret);
- if (tgt_device)
- btrfs_destroy_dev_replace_tgtdev(tgt_device);
+ btrfs_info_in_rcu(fs_info,
+ "suspended dev_replace from %s (devid %llu) to %s canceled",
+ btrfs_dev_name(src_device), src_device->devid,
+ btrfs_dev_name(tgt_device));
+
+ if (tgt_device)
+ btrfs_destroy_dev_replace_tgtdev(tgt_device);
+ break;
+ default:
+ result = -EINVAL;
+ }
-leave:
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return result;
}
@@ -839,7 +874,8 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
+
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
@@ -855,7 +891,7 @@ void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info)
break;
}
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
}
@@ -865,12 +901,13 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
struct task_struct *task;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
- btrfs_dev_replace_write_lock(dev_replace);
+ down_write(&dev_replace->rwsem);
+
switch (dev_replace->replace_state) {
case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
return 0;
case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
break;
@@ -884,10 +921,12 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
"cannot continue dev_replace, tgtdev is missing");
btrfs_info(fs_info,
"you may cancel the operation after 'mount -o degraded'");
- btrfs_dev_replace_write_unlock(dev_replace);
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+ up_write(&dev_replace->rwsem);
return 0;
}
- btrfs_dev_replace_write_unlock(dev_replace);
+ up_write(&dev_replace->rwsem);
/*
* This could collide with a paused balance, but the exclusive op logic
@@ -895,6 +934,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
* dev-replace to start anyway.
*/
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+ down_write(&dev_replace->rwsem);
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+ up_write(&dev_replace->rwsem);
btrfs_info(fs_info,
"cannot resume dev-replace, other exclusive operation running");
return 0;
@@ -925,7 +968,7 @@ static int btrfs_dev_replace_kthread(void *data)
btrfs_device_get_total_bytes(dev_replace->srcdev),
&dev_replace->scrub_progress, 0, 1);
ret = btrfs_dev_replace_finishing(fs_info, ret);
- WARN_ON(ret);
+ WARN_ON(ret && ret != -ECANCELED);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
return 0;
@@ -948,7 +991,7 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
* something that can happen if the dev_replace
* procedure is suspended by an umount and then
* the tgtdev is missing (or "btrfs dev scan") was
- * not called and the the filesystem is remounted
+ * not called and the filesystem is remounted
* in degraded state. This does not stop the
* dev_replace procedure. It needs to be canceled
* manually if the cancellation is wanted.
@@ -958,42 +1001,6 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
return 1;
}
-void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace)
-{
- read_lock(&dev_replace->lock);
-}
-
-void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace)
-{
- read_unlock(&dev_replace->lock);
-}
-
-void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace)
-{
-again:
- wait_event(dev_replace->read_lock_wq,
- atomic_read(&dev_replace->blocking_readers) == 0);
- write_lock(&dev_replace->lock);
- if (atomic_read(&dev_replace->blocking_readers)) {
- write_unlock(&dev_replace->lock);
- goto again;
- }
-}
-
-void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace)
-{
- write_unlock(&dev_replace->lock);
-}
-
-/* inc blocking cnt and release read lock */
-void btrfs_dev_replace_set_lock_blocking(
- struct btrfs_dev_replace *dev_replace)
-{
- /* only set blocking for read lock */
- atomic_inc(&dev_replace->blocking_readers);
- read_unlock(&dev_replace->lock);
-}
-
void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
{
percpu_counter_inc(&fs_info->dev_replace.bio_counter);
diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h
index 795c551f5b5e..4aa40bacc6cc 100644
--- a/fs/btrfs/dev-replace.h
+++ b/fs/btrfs/dev-replace.h
@@ -13,19 +13,11 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
-int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
- const char *tgtdev_name, u64 srcdevid, const char *srcdev_name,
- int read_src);
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_read_lock(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_read_unlock(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_write_lock(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_write_unlock(struct btrfs_dev_replace *dev_replace);
-void btrfs_dev_replace_set_lock_blocking(struct btrfs_dev_replace *dev_replace);
#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 3f0b6d1936e8..8da2f380d3c0 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -279,6 +279,12 @@ static int csum_tree_block(struct btrfs_fs_info *fs_info,
len = buf->len - offset;
while (len > 0) {
+ /*
+ * Note: we don't need to check for the err == 1 case here, as
+ * with the given combination of 'start = BTRFS_CSUM_SIZE (32)'
+ * and 'min_len = 32' and the currently implemented mapping
+ * algorithm we cannot cross a page boundary.
+ */
err = map_private_extent_buffer(buf, offset, 32,
&kaddr, &map_start, &map_len);
if (err)
@@ -477,9 +483,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
int mirror_num = 0;
int failed_mirror = 0;
- clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
while (1) {
+ clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
mirror_num);
if (!ret) {
@@ -493,15 +499,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
break;
}
- /*
- * This buffer's crc is fine, but its contents are corrupted, so
- * there is no reason to read the other copies, they won't be
- * any less wrong.
- */
- if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
- ret == -EUCLEAN)
- break;
-
num_copies = btrfs_num_copies(fs_info,
eb->start, eb->len);
if (num_copies == 1)
@@ -551,7 +548,7 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
if (WARN_ON(!PageUptodate(page)))
return -EUCLEAN;
- ASSERT(memcmp_extent_buffer(eb, fs_info->fsid,
+ ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
btrfs_header_fsid(), BTRFS_FSID_SIZE) == 0);
return csum_tree_block(fs_info, eb, 0);
@@ -566,7 +563,20 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
read_extent_buffer(eb, fsid, btrfs_header_fsid(), BTRFS_FSID_SIZE);
while (fs_devices) {
- if (!memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE)) {
+ u8 *metadata_uuid;
+
+ /*
+ * Checking the incompat flag is only valid for the current
+ * fs. For seed devices it's forbidden to have their uuid
+ * changed so reading ->fsid in this case is fine
+ */
+ if (fs_devices == fs_info->fs_devices &&
+ btrfs_fs_incompat(fs_info, METADATA_UUID))
+ metadata_uuid = fs_devices->metadata_uuid;
+ else
+ metadata_uuid = fs_devices->fsid;
+
+ if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) {
ret = 0;
break;
}
@@ -669,19 +679,6 @@ out:
return ret;
}
-static int btree_io_failed_hook(struct page *page, int failed_mirror)
-{
- struct extent_buffer *eb;
-
- eb = (struct extent_buffer *)page->private;
- set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- eb->read_mirror = failed_mirror;
- atomic_dec(&eb->io_pages);
- if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
- btree_readahead_hook(eb, -EIO);
- return -EIO; /* we fixed nothing */
-}
-
static void end_workqueue_bio(struct bio *bio)
{
struct btrfs_end_io_wq *end_io_wq = bio->bi_private;
@@ -760,11 +757,22 @@ static void run_one_async_start(struct btrfs_work *work)
async->status = ret;
}
+/*
+ * In order to insert checksums into the metadata in large chunks, we wait
+ * until bio submission time. All the pages in the bio are checksummed and
+ * sums are attached onto the ordered extent record.
+ *
+ * At IO completion time the csums attached on the ordered extent record are
+ * inserted into the tree.
+ */
static void run_one_async_done(struct btrfs_work *work)
{
struct async_submit_bio *async;
+ struct inode *inode;
+ blk_status_t ret;
async = container_of(work, struct async_submit_bio, work);
+ inode = async->private_data;
/* If an error occurred we just want to clean up the bio and move on */
if (async->status) {
@@ -773,7 +781,12 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- btrfs_submit_bio_done(async->private_data, async->bio, async->mirror_num);
+ ret = btrfs_map_bio(btrfs_sb(inode->i_sb), async->bio,
+ async->mirror_num, 1);
+ if (ret) {
+ async->bio->bi_status = ret;
+ bio_endio(async->bio);
+ }
}
static void run_one_async_free(struct btrfs_work *work)
@@ -1187,6 +1200,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
refcount_set(&root->refs, 1);
atomic_set(&root->will_be_snapshotted, 0);
atomic_set(&root->snapshot_force_cow, 0);
+ atomic_set(&root->nr_swapfiles, 0);
root->log_transid = 0;
root->log_transid_committed = -1;
root->last_log_commit = 0;
@@ -2127,10 +2141,8 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
{
mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
- rwlock_init(&fs_info->dev_replace.lock);
- atomic_set(&fs_info->dev_replace.blocking_readers, 0);
+ init_rwsem(&fs_info->dev_replace.rwsem);
init_waitqueue_head(&fs_info->dev_replace.replace_wait);
- init_waitqueue_head(&fs_info->dev_replace.read_lock_wq);
}
static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
@@ -2451,10 +2463,11 @@ static int validate_super(struct btrfs_fs_info *fs_info,
ret = -EINVAL;
}
- if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) {
+ if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid,
+ BTRFS_FSID_SIZE) != 0) {
btrfs_err(fs_info,
- "dev_item UUID does not match fsid: %pU != %pU",
- fs_info->fsid, sb->dev_item.fsid);
+ "dev_item UUID does not match metadata fsid: %pU != %pU",
+ fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
ret = -EINVAL;
}
@@ -2665,6 +2678,9 @@ int open_ctree(struct super_block *sb,
btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
BTRFS_BLOCK_RSV_DELOPS);
+ btrfs_init_block_rsv(&fs_info->delayed_refs_rsv,
+ BTRFS_BLOCK_RSV_DELREFS);
+
atomic_set(&fs_info->async_delalloc_pages, 0);
atomic_set(&fs_info->defrag_running, 0);
atomic_set(&fs_info->qgroup_op_seq, 0);
@@ -2754,6 +2770,9 @@ int open_ctree(struct super_block *sb,
fs_info->sectorsize = 4096;
fs_info->stripesize = 4096;
+ spin_lock_init(&fs_info->swapfile_pins_lock);
+ fs_info->swapfile_pins = RB_ROOT;
+
ret = btrfs_alloc_stripe_hash_table(fs_info);
if (ret) {
err = ret;
@@ -2790,11 +2809,29 @@ int open_ctree(struct super_block *sb,
* the whole block of INFO_SIZE
*/
memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
- memcpy(fs_info->super_for_commit, fs_info->super_copy,
- sizeof(*fs_info->super_for_commit));
brelse(bh);
- memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
+ disk_super = fs_info->super_copy;
+
+ ASSERT(!memcmp(fs_info->fs_devices->fsid, fs_info->super_copy->fsid,
+ BTRFS_FSID_SIZE));
+
+ if (btrfs_fs_incompat(fs_info, METADATA_UUID)) {
+ ASSERT(!memcmp(fs_info->fs_devices->metadata_uuid,
+ fs_info->super_copy->metadata_uuid,
+ BTRFS_FSID_SIZE));
+ }
+
+ features = btrfs_super_flags(disk_super);
+ if (features & BTRFS_SUPER_FLAG_CHANGING_FSID_V2) {
+ features &= ~BTRFS_SUPER_FLAG_CHANGING_FSID_V2;
+ btrfs_set_super_flags(disk_super, features);
+ btrfs_info(fs_info,
+ "found metadata UUID change in progress flag, clearing");
+ }
+
+ memcpy(fs_info->super_for_commit, fs_info->super_copy,
+ sizeof(*fs_info->super_for_commit));
ret = btrfs_validate_mount_super(fs_info);
if (ret) {
@@ -2803,7 +2840,6 @@ int open_ctree(struct super_block *sb,
goto fail_alloc;
}
- disk_super = fs_info->super_copy;
if (!btrfs_super_root(disk_super))
goto fail_alloc;
@@ -2915,7 +2951,7 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
- memcpy(&sb->s_uuid, fs_info->fsid, BTRFS_FSID_SIZE);
+ memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
mutex_lock(&fs_info->chunk_mutex);
ret = btrfs_read_sys_array(fs_info);
@@ -3064,7 +3100,7 @@ retry_root_backup:
if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
- "writeable mount is not allowed due to too many missing devices");
+ "writable mount is not allowed due to too many missing devices");
goto fail_sysfs;
}
@@ -3733,7 +3769,8 @@ int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
btrfs_set_stack_device_io_width(dev_item, dev->io_width);
btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
- memcpy(dev_item->fsid, dev->fs_devices->fsid, BTRFS_FSID_SIZE);
+ memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
+ BTRFS_FSID_SIZE);
flags = btrfs_super_flags(sb);
btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
@@ -4040,7 +4077,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/*
* This is a fast path so only do this check if we have sanity tests
- * enabled. Normal people shouldn't be using umapped buffers as dirty
+ * enabled. Normal people shouldn't be using unmapped buffers as dirty
* outside of the sanity tests.
*/
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
@@ -4338,6 +4375,8 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
unpin = pinned_extents;
again:
while (1) {
+ struct extent_state *cached_state = NULL;
+
/*
* The btrfs_finish_extent_commit() may get the same range as
* ours between find_first_extent_bit and clear_extent_dirty.
@@ -4346,13 +4385,14 @@ again:
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, NULL);
+ EXTENT_DIRTY, &cached_state);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
}
- clear_extent_dirty(unpin, start, end);
+ clear_extent_dirty(unpin, start, end, &cached_state);
+ free_extent_state(cached_state);
btrfs_error_unpin_extent_range(fs_info, start, end);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
@@ -4409,6 +4449,7 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
spin_unlock(&cur_trans->dirty_bgs_lock);
btrfs_put_block_group(cache);
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
@@ -4514,7 +4555,4 @@ static const struct extent_io_ops btree_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btree_submit_bio_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
- .readpage_io_failed_hook = btree_io_failed_hook,
-
- /* optional callbacks */
};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 4cccba22640f..987a64bc0c66 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -21,11 +21,11 @@
#define BTRFS_BDEV_BLOCKSIZE (4096)
enum btrfs_wq_endio_type {
- BTRFS_WQ_ENDIO_DATA = 0,
- BTRFS_WQ_ENDIO_METADATA = 1,
- BTRFS_WQ_ENDIO_FREE_SPACE = 2,
- BTRFS_WQ_ENDIO_RAID56 = 3,
- BTRFS_WQ_ENDIO_DIO_REPAIR = 4,
+ BTRFS_WQ_ENDIO_DATA,
+ BTRFS_WQ_ENDIO_METADATA,
+ BTRFS_WQ_ENDIO_FREE_SPACE,
+ BTRFS_WQ_ENDIO_RAID56,
+ BTRFS_WQ_ENDIO_DIO_REPAIR,
};
static inline u64 btrfs_sb_offset(int mirror)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a1febf155747..b15afeae16df 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -51,6 +51,24 @@ enum {
CHUNK_ALLOC_FORCE = 2,
};
+/*
+ * Declare a helper function to detect underflow of various space info members
+ */
+#define DECLARE_SPACE_INFO_UPDATE(name) \
+static inline void update_##name(struct btrfs_space_info *sinfo, \
+ s64 bytes) \
+{ \
+ if (bytes < 0 && sinfo->name < -bytes) { \
+ WARN_ON(1); \
+ sinfo->name = 0; \
+ return; \
+ } \
+ sinfo->name += bytes; \
+}
+
+DECLARE_SPACE_INFO_UPDATE(bytes_may_use);
+DECLARE_SPACE_INFO_UPDATE(bytes_pinned);
+
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *node, u64 parent,
u64 root_objectid, u64 owner_objectid,
@@ -1037,7 +1055,7 @@ out_free:
/*
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
- * is_data == BTRFS_REF_TYPE_DATA, data type is requried,
+ * is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
*/
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
@@ -2406,25 +2424,82 @@ static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_ref
btrfs_delayed_ref_unlock(head);
}
-static int cleanup_extent_op(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_ref_head *head)
+static struct btrfs_delayed_extent_op *cleanup_extent_op(
+ struct btrfs_delayed_ref_head *head)
{
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
- int ret;
if (!extent_op)
- return 0;
- head->extent_op = NULL;
+ return NULL;
+
if (head->must_insert_reserved) {
+ head->extent_op = NULL;
btrfs_free_delayed_extent_op(extent_op);
- return 0;
+ return NULL;
}
+ return extent_op;
+}
+
+static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_delayed_extent_op *extent_op;
+ int ret;
+
+ extent_op = cleanup_extent_op(head);
+ if (!extent_op)
+ return 0;
+ head->extent_op = NULL;
spin_unlock(&head->lock);
ret = run_delayed_extent_op(trans, head, extent_op);
btrfs_free_delayed_extent_op(extent_op);
return ret ? ret : 1;
}
+static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
+ struct btrfs_delayed_ref_head *head)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_delayed_ref_root *delayed_refs =
+ &trans->transaction->delayed_refs;
+ int nr_items = 1; /* Dropping this ref head update. */
+
+ if (head->total_ref_mod < 0) {
+ struct btrfs_space_info *space_info;
+ u64 flags;
+
+ if (head->is_data)
+ flags = BTRFS_BLOCK_GROUP_DATA;
+ else if (head->is_system)
+ flags = BTRFS_BLOCK_GROUP_SYSTEM;
+ else
+ flags = BTRFS_BLOCK_GROUP_METADATA;
+ space_info = __find_space_info(fs_info, flags);
+ ASSERT(space_info);
+ percpu_counter_add_batch(&space_info->total_bytes_pinned,
+ -head->num_bytes,
+ BTRFS_TOTAL_BYTES_PINNED_BATCH);
+
+ /*
+ * We had csum deletions accounted for in our delayed refs rsv,
+ * we need to drop the csum leaves for this update from our
+ * delayed_refs_rsv.
+ */
+ if (head->is_data) {
+ spin_lock(&delayed_refs->lock);
+ delayed_refs->pending_csums -= head->num_bytes;
+ spin_unlock(&delayed_refs->lock);
+ nr_items += btrfs_csum_bytes_to_leaves(fs_info,
+ head->num_bytes);
+ }
+ }
+
+ /* Also free its reserved qgroup space */
+ btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
+ head->qgroup_reserved);
+ btrfs_delayed_refs_rsv_release(fs_info, nr_items);
+}
+
static int cleanup_ref_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head)
{
@@ -2435,7 +2510,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
- ret = cleanup_extent_op(trans, head);
+ ret = run_and_cleanup_extent_op(trans, head);
if (ret < 0) {
unselect_delayed_ref_head(delayed_refs, head);
btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
@@ -2456,37 +2531,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
return 1;
}
- delayed_refs->num_heads--;
- rb_erase_cached(&head->href_node, &delayed_refs->href_root);
- RB_CLEAR_NODE(&head->href_node);
+ btrfs_delete_ref_head(delayed_refs, head);
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
- atomic_dec(&delayed_refs->num_entries);
-
- trace_run_delayed_ref_head(fs_info, head, 0);
-
- if (head->total_ref_mod < 0) {
- struct btrfs_space_info *space_info;
- u64 flags;
-
- if (head->is_data)
- flags = BTRFS_BLOCK_GROUP_DATA;
- else if (head->is_system)
- flags = BTRFS_BLOCK_GROUP_SYSTEM;
- else
- flags = BTRFS_BLOCK_GROUP_METADATA;
- space_info = __find_space_info(fs_info, flags);
- ASSERT(space_info);
- percpu_counter_add_batch(&space_info->total_bytes_pinned,
- -head->num_bytes,
- BTRFS_TOTAL_BYTES_PINNED_BATCH);
-
- if (head->is_data) {
- spin_lock(&delayed_refs->lock);
- delayed_refs->pending_csums -= head->num_bytes;
- spin_unlock(&delayed_refs->lock);
- }
- }
if (head->must_insert_reserved) {
btrfs_pin_extent(fs_info, head->bytenr,
@@ -2497,9 +2544,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
}
}
- /* Also free its reserved qgroup space */
- btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
- head->qgroup_reserved);
+ cleanup_ref_head_accounting(trans, head);
+
+ trace_run_delayed_ref_head(fs_info, head, 0);
btrfs_delayed_ref_unlock(head);
btrfs_put_delayed_ref_head(head);
return 0;
@@ -2792,40 +2839,28 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
return num_csums;
}
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans)
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
{
- struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_block_rsv *global_rsv;
- u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
- u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
- unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
- u64 num_bytes, num_dirty_bgs_bytes;
- int ret = 0;
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ bool ret = false;
+ u64 reserved;
- num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
- num_heads = heads_to_leaves(fs_info, num_heads);
- if (num_heads > 1)
- num_bytes += (num_heads - 1) * fs_info->nodesize;
- num_bytes <<= 1;
- num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
- fs_info->nodesize;
- num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
- num_dirty_bgs);
- global_rsv = &fs_info->global_block_rsv;
+ spin_lock(&global_rsv->lock);
+ reserved = global_rsv->reserved;
+ spin_unlock(&global_rsv->lock);
/*
- * If we can't allocate any more chunks lets make sure we have _lots_ of
- * wiggle room since running delayed refs can create more delayed refs.
+ * Since the global reserve is just kind of magic we don't really want
+ * to rely on it to save our bacon, so if our size is more than the
+ * delayed_refs_rsv and the global rsv then it's time to think about
+ * bailing.
*/
- if (global_rsv->space_info->full) {
- num_dirty_bgs_bytes <<= 1;
- num_bytes <<= 1;
- }
-
- spin_lock(&global_rsv->lock);
- if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
- ret = 1;
- spin_unlock(&global_rsv->lock);
+ spin_lock(&delayed_refs_rsv->lock);
+ reserved += delayed_refs_rsv->reserved;
+ if (delayed_refs_rsv->size >= reserved)
+ ret = true;
+ spin_unlock(&delayed_refs_rsv->lock);
return ret;
}
@@ -2844,7 +2879,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
if (val >= NSEC_PER_SEC / 2)
return 2;
- return btrfs_check_space_for_delayed_refs(trans);
+ return btrfs_check_space_for_delayed_refs(trans->fs_info);
}
struct async_delayed_refs {
@@ -3588,6 +3623,8 @@ again:
*/
mutex_lock(&trans->transaction->cache_write_mutex);
while (!list_empty(&dirty)) {
+ bool drop_reserve = true;
+
cache = list_first_entry(&dirty,
struct btrfs_block_group_cache,
dirty_list);
@@ -3660,6 +3697,7 @@ again:
list_add_tail(&cache->dirty_list,
&cur_trans->dirty_bgs);
btrfs_get_block_group(cache);
+ drop_reserve = false;
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret) {
@@ -3667,9 +3705,11 @@ again:
}
}
- /* if its not on the io list, we need to put the block group */
+ /* if it's not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
+ if (drop_reserve)
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
if (ret)
break;
@@ -3818,6 +3858,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
/* if its not on the io list, we need to put the block group */
if (should_put)
btrfs_put_block_group(cache);
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
spin_lock(&cur_trans->dirty_bgs_lock);
}
spin_unlock(&cur_trans->dirty_bgs_lock);
@@ -4256,7 +4297,7 @@ commit_trans:
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
- data_sinfo->bytes_may_use += bytes;
+ update_bytes_may_use(data_sinfo, bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
@@ -4309,10 +4350,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
data_sinfo = fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
- if (WARN_ON(data_sinfo->bytes_may_use < len))
- data_sinfo->bytes_may_use = 0;
- else
- data_sinfo->bytes_may_use -= len;
+ update_bytes_may_use(data_sinfo, -len);
trace_btrfs_space_reservation(fs_info, "space_info",
data_sinfo->flags, len, 0);
spin_unlock(&data_sinfo->lock);
@@ -4637,7 +4675,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
/*
* If we have dup, raid1 or raid10 then only half of the free
- * space is actually useable. For raid56, the space info used
+ * space is actually usable. For raid56, the space info used
* doesn't include the parity drive, so we don't have to
* change the math
*/
@@ -4793,8 +4831,10 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
{
struct reserve_ticket *ticket = NULL;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_trans_handle *trans;
- u64 bytes;
+ u64 bytes_needed;
+ u64 reclaim_bytes = 0;
trans = (struct btrfs_trans_handle *)current->journal_info;
if (trans)
@@ -4807,15 +4847,15 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
else if (!list_empty(&space_info->tickets))
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
- bytes = (ticket) ? ticket->bytes : 0;
+ bytes_needed = (ticket) ? ticket->bytes : 0;
spin_unlock(&space_info->lock);
- if (!bytes)
+ if (!bytes_needed)
return 0;
/* See if there is enough pinned space to make this reservation */
if (__percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes,
+ bytes_needed,
BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
goto commit;
@@ -4827,14 +4867,18 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
return -ENOSPC;
spin_lock(&delayed_rsv->lock);
- if (delayed_rsv->size > bytes)
- bytes = 0;
- else
- bytes -= delayed_rsv->size;
+ reclaim_bytes += delayed_rsv->reserved;
spin_unlock(&delayed_rsv->lock);
+ spin_lock(&delayed_refs_rsv->lock);
+ reclaim_bytes += delayed_refs_rsv->reserved;
+ spin_unlock(&delayed_refs_rsv->lock);
+ if (reclaim_bytes >= bytes_needed)
+ goto commit;
+ bytes_needed -= reclaim_bytes;
+
if (__percpu_counter_compare(&space_info->total_bytes_pinned,
- bytes,
+ bytes_needed,
BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0) {
return -ENOSPC;
}
@@ -4882,6 +4926,20 @@ static void flush_space(struct btrfs_fs_info *fs_info,
shrink_delalloc(fs_info, num_bytes * 2, num_bytes,
state == FLUSH_DELALLOC_WAIT);
break;
+ case FLUSH_DELAYED_REFS_NR:
+ case FLUSH_DELAYED_REFS:
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ break;
+ }
+ if (state == FLUSH_DELAYED_REFS_NR)
+ nr = calc_reclaim_items_nr(fs_info, num_bytes);
+ else
+ nr = 0;
+ btrfs_run_delayed_refs(trans, nr);
+ btrfs_end_transaction(trans);
+ break;
case ALLOC_CHUNK:
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
@@ -5108,7 +5166,7 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
list_del_init(&ticket->list);
if (ticket->bytes && ticket->bytes < orig_bytes) {
u64 num_bytes = orig_bytes - ticket->bytes;
- space_info->bytes_may_use -= num_bytes;
+ update_bytes_may_use(space_info, -num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, num_bytes, 0);
}
@@ -5154,13 +5212,13 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
* If not things get more complicated.
*/
if (used + orig_bytes <= space_info->total_bytes) {
- space_info->bytes_may_use += orig_bytes;
+ update_bytes_may_use(space_info, orig_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, orig_bytes, 1);
ret = 0;
} else if (can_overcommit(fs_info, space_info, orig_bytes, flush,
system_chunk)) {
- space_info->bytes_may_use += orig_bytes;
+ update_bytes_may_use(space_info, orig_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, orig_bytes, 1);
ret = 0;
@@ -5223,7 +5281,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
if (ticket.bytes) {
if (ticket.bytes < orig_bytes) {
u64 num_bytes = orig_bytes - ticket.bytes;
- space_info->bytes_may_use -= num_bytes;
+ update_bytes_may_use(space_info, -num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags,
num_bytes, 0);
@@ -5244,7 +5302,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
- * This will reserve orgi_bytes number of bytes from the space info associated
+ * This will reserve orig_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
* flush out space to make room. It will do this by flushing delalloc if
* possible or committing the transaction. If flush is 0 then no attempts to
@@ -5354,6 +5412,90 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
return 0;
}
+/**
+ * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
+ * @fs_info - the fs info for our fs.
+ * @src - the source block rsv to transfer from.
+ * @num_bytes - the number of bytes to transfer.
+ *
+ * This transfers up to the num_bytes amount from the src rsv to the
+ * delayed_refs_rsv. Any extra bytes are returned to the space info.
+ */
+void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *src,
+ u64 num_bytes)
+{
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ u64 to_free = 0;
+
+ spin_lock(&src->lock);
+ src->reserved -= num_bytes;
+ src->size -= num_bytes;
+ spin_unlock(&src->lock);
+
+ spin_lock(&delayed_refs_rsv->lock);
+ if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
+ u64 delta = delayed_refs_rsv->size -
+ delayed_refs_rsv->reserved;
+ if (num_bytes > delta) {
+ to_free = num_bytes - delta;
+ num_bytes = delta;
+ }
+ } else {
+ to_free = num_bytes;
+ num_bytes = 0;
+ }
+
+ if (num_bytes)
+ delayed_refs_rsv->reserved += num_bytes;
+ if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
+ delayed_refs_rsv->full = 1;
+ spin_unlock(&delayed_refs_rsv->lock);
+
+ if (num_bytes)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+ 0, num_bytes, 1);
+ if (to_free)
+ space_info_add_old_bytes(fs_info, delayed_refs_rsv->space_info,
+ to_free);
+}
+
+/**
+ * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
+ * @fs_info - the fs_info for our fs.
+ * @flush - control how we can flush for this reservation.
+ *
+ * This will refill the delayed block_rsv up to 1 items size worth of space and
+ * will return -ENOSPC if we can't make the reservation.
+ */
+int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
+ enum btrfs_reserve_flush_enum flush)
+{
+ struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
+ u64 limit = btrfs_calc_trans_metadata_size(fs_info, 1);
+ u64 num_bytes = 0;
+ int ret = -ENOSPC;
+
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved < block_rsv->size) {
+ num_bytes = block_rsv->size - block_rsv->reserved;
+ num_bytes = min(num_bytes, limit);
+ }
+ spin_unlock(&block_rsv->lock);
+
+ if (!num_bytes)
+ return 0;
+
+ ret = reserve_metadata_bytes(fs_info->extent_root, block_rsv,
+ num_bytes, flush);
+ if (ret)
+ return ret;
+ block_rsv_add_bytes(block_rsv, num_bytes, 0);
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+ 0, num_bytes, 1);
+ return 0;
+}
+
/*
* This is for space we already have accounted in space_info->bytes_may_use, so
* basically when we're returning space from block_rsv's.
@@ -5407,7 +5549,7 @@ again:
flush = BTRFS_RESERVE_FLUSH_ALL;
goto again;
}
- space_info->bytes_may_use -= num_bytes;
+ update_bytes_may_use(space_info, -num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags, num_bytes, 0);
spin_unlock(&space_info->lock);
@@ -5435,7 +5577,7 @@ again:
ticket->bytes, 1);
list_del_init(&ticket->list);
num_bytes -= ticket->bytes;
- space_info->bytes_may_use += ticket->bytes;
+ update_bytes_may_use(space_info, ticket->bytes);
ticket->bytes = 0;
space_info->tickets_id++;
wake_up(&ticket->wait);
@@ -5443,7 +5585,7 @@ again:
trace_btrfs_space_reservation(fs_info, "space_info",
space_info->flags,
num_bytes, 1);
- space_info->bytes_may_use += num_bytes;
+ update_bytes_may_use(space_info, num_bytes);
ticket->bytes -= num_bytes;
num_bytes = 0;
}
@@ -5629,11 +5771,11 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
/**
* btrfs_inode_rsv_refill - refill the inode block rsv.
* @inode - the inode we are refilling.
- * @flush - the flusing restriction.
+ * @flush - the flushing restriction.
*
* Essentially the same as btrfs_block_rsv_refill, except it uses the
* block_rsv->size as the minimum size. We'll either refill the missing amount
- * or return if we already have enough space. This will also handle the resreve
+ * or return if we already have enough space. This will also handle the reserve
* tracepoint for the reserved amount.
*/
static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
@@ -5674,6 +5816,31 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
return ret;
}
+static u64 __btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv,
+ u64 num_bytes, u64 *qgroup_to_release)
+{
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *target = delayed_rsv;
+
+ if (target->full || target == block_rsv)
+ target = global_rsv;
+
+ if (block_rsv->space_info != target->space_info)
+ target = NULL;
+
+ return block_rsv_release_bytes(fs_info, block_rsv, target, num_bytes,
+ qgroup_to_release);
+}
+
+void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
+ struct btrfs_block_rsv *block_rsv,
+ u64 num_bytes)
+{
+ __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
+}
+
/**
* btrfs_inode_rsv_release - release any excessive reservation.
* @inode - the inode we need to release from.
@@ -5688,7 +5855,6 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
struct btrfs_block_rsv *block_rsv = &inode->block_rsv;
u64 released = 0;
u64 qgroup_to_release = 0;
@@ -5698,8 +5864,8 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
* are releasing 0 bytes, and then we'll just get the reservation over
* the size free'd.
*/
- released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0,
- &qgroup_to_release);
+ released = __btrfs_block_rsv_release(fs_info, block_rsv, 0,
+ &qgroup_to_release);
if (released > 0)
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), released, 0);
@@ -5710,16 +5876,26 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free)
qgroup_to_release);
}
-void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *block_rsv,
- u64 num_bytes)
+/**
+ * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
+ * @fs_info - the fs_info for our fs.
+ * @nr - the number of items to drop.
+ *
+ * This drops the delayed ref head's count from the delayed refs rsv and frees
+ * any excess reservation we had.
+ */
+void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
{
+ struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, nr);
+ u64 released = 0;
- if (global_rsv == block_rsv ||
- block_rsv->space_info != global_rsv->space_info)
- global_rsv = NULL;
- block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL);
+ released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv,
+ num_bytes, NULL);
+ if (released)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
+ 0, released, 0);
}
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
@@ -5750,14 +5926,14 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
num_bytes = min(num_bytes,
block_rsv->size - block_rsv->reserved);
block_rsv->reserved += num_bytes;
- sinfo->bytes_may_use += num_bytes;
+ update_bytes_may_use(sinfo, num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes,
1);
}
} else if (block_rsv->reserved > block_rsv->size) {
num_bytes = block_rsv->reserved - block_rsv->size;
- sinfo->bytes_may_use -= num_bytes;
+ update_bytes_may_use(sinfo, -num_bytes);
trace_btrfs_space_reservation(fs_info, "space_info",
sinfo->flags, num_bytes, 0);
block_rsv->reserved = block_rsv->size;
@@ -5784,9 +5960,10 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
fs_info->trans_block_rsv.space_info = space_info;
fs_info->empty_block_rsv.space_info = space_info;
fs_info->delayed_block_rsv.space_info = space_info;
+ fs_info->delayed_refs_rsv.space_info = space_info;
- fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
- fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
+ fs_info->extent_root->block_rsv = &fs_info->delayed_refs_rsv;
+ fs_info->csum_root->block_rsv = &fs_info->delayed_refs_rsv;
fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
if (fs_info->quota_root)
@@ -5806,8 +5983,34 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
WARN_ON(fs_info->delayed_block_rsv.size > 0);
WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
+ WARN_ON(fs_info->delayed_refs_rsv.reserved > 0);
+ WARN_ON(fs_info->delayed_refs_rsv.size > 0);
}
+/*
+ * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
+ * @trans - the trans that may have generated delayed refs
+ *
+ * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
+ * it'll calculate the additional size and add it to the delayed_refs_rsv.
+ */
+void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
+ u64 num_bytes;
+
+ if (!trans->delayed_ref_updates)
+ return;
+
+ num_bytes = btrfs_calc_trans_metadata_size(fs_info,
+ trans->delayed_ref_updates);
+ spin_lock(&delayed_rsv->lock);
+ delayed_rsv->size += num_bytes;
+ delayed_rsv->full = 0;
+ spin_unlock(&delayed_rsv->lock);
+ trans->delayed_ref_updates = 0;
+}
/*
* To be called after all the new block groups attached to the transaction
@@ -6100,6 +6303,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
u64 old_val;
u64 byte_in_group;
int factor;
+ int ret = 0;
/* block accounting for super block */
spin_lock(&info->delalloc_root_lock);
@@ -6113,8 +6317,10 @@ static int update_block_group(struct btrfs_trans_handle *trans,
while (total) {
cache = btrfs_lookup_block_group(info, bytenr);
- if (!cache)
- return -ENOENT;
+ if (!cache) {
+ ret = -ENOENT;
+ break;
+ }
factor = btrfs_bg_type_to_factor(cache->flags);
/*
@@ -6151,7 +6357,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
old_val -= num_bytes;
btrfs_set_block_group_used(&cache->item, old_val);
cache->pinned += num_bytes;
- cache->space_info->bytes_pinned += num_bytes;
+ update_bytes_pinned(cache->space_info, num_bytes);
cache->space_info->bytes_used -= num_bytes;
cache->space_info->disk_used -= num_bytes * factor;
spin_unlock(&cache->lock);
@@ -6173,6 +6379,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
list_add_tail(&cache->dirty_list,
&trans->transaction->dirty_bgs);
trans->transaction->num_dirty_bgs++;
+ trans->delayed_ref_updates++;
btrfs_get_block_group(cache);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
@@ -6190,7 +6397,10 @@ static int update_block_group(struct btrfs_trans_handle *trans,
total -= num_bytes;
bytenr += num_bytes;
}
- return 0;
+
+ /* Modified block groups are accounted for in the delayed_refs_rsv. */
+ btrfs_update_delayed_refs_rsv(trans);
+ return ret;
}
static u64 first_logical_byte(struct btrfs_fs_info *fs_info, u64 search_start)
@@ -6222,7 +6432,7 @@ static int pin_down_extent(struct btrfs_fs_info *fs_info,
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
cache->pinned += num_bytes;
- cache->space_info->bytes_pinned += num_bytes;
+ update_bytes_pinned(cache->space_info, num_bytes);
if (reserved) {
cache->reserved -= num_bytes;
cache->space_info->bytes_reserved -= num_bytes;
@@ -6431,7 +6641,7 @@ static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache,
} else {
cache->reserved += num_bytes;
space_info->bytes_reserved += num_bytes;
- space_info->bytes_may_use -= ram_bytes;
+ update_bytes_may_use(space_info, -ram_bytes);
if (delalloc)
cache->delalloc_bytes += num_bytes;
}
@@ -6587,7 +6797,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
spin_lock(&cache->lock);
cache->pinned -= len;
- space_info->bytes_pinned -= len;
+ update_bytes_pinned(space_info, -len);
trace_btrfs_space_reservation(fs_info, "pinned",
space_info->flags, len, 0);
@@ -6608,7 +6818,7 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
to_add = min(len, global_rsv->size -
global_rsv->reserved);
global_rsv->reserved += to_add;
- space_info->bytes_may_use += to_add;
+ update_bytes_may_use(space_info, to_add);
if (global_rsv->reserved >= global_rsv->size)
global_rsv->full = 1;
trace_btrfs_space_reservation(fs_info,
@@ -6647,9 +6857,11 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
unpin = &fs_info->freed_extents[0];
while (!trans->aborted) {
+ struct extent_state *cached_state = NULL;
+
mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
- EXTENT_DIRTY, NULL);
+ EXTENT_DIRTY, &cached_state);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
@@ -6659,9 +6871,10 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
ret = btrfs_discard_extent(fs_info, start,
end + 1 - start, NULL);
- clear_extent_dirty(unpin, start, end);
+ clear_extent_dirty(unpin, start, end, &cached_state);
unpin_extent_range(fs_info, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
+ free_extent_state(cached_state);
cond_resched();
}
@@ -6955,12 +7168,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
goto out;
- if (head->extent_op) {
- if (!head->must_insert_reserved)
- goto out;
- btrfs_free_delayed_extent_op(head->extent_op);
- head->extent_op = NULL;
- }
+ if (cleanup_extent_op(head) != NULL)
+ goto out;
/*
* waiting for the lock here would deadlock. If someone else has it
@@ -6969,22 +7178,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (!mutex_trylock(&head->mutex))
goto out;
- /*
- * at this point we have a head with no other entries. Go
- * ahead and process it.
- */
- rb_erase_cached(&head->href_node, &delayed_refs->href_root);
- RB_CLEAR_NODE(&head->href_node);
- atomic_dec(&delayed_refs->num_entries);
-
- /*
- * we don't take a ref on the node because we're removing it from the
- * tree, so we just steal the ref the tree was holding.
- */
- delayed_refs->num_heads--;
- if (head->processing == 0)
- delayed_refs->num_heads_ready--;
+ btrfs_delete_ref_head(delayed_refs, head);
head->processing = 0;
+
spin_unlock(&head->lock);
spin_unlock(&delayed_refs->lock);
@@ -6992,6 +7188,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->must_insert_reserved)
ret = 1;
+ cleanup_ref_head_accounting(trans, head);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref_head(head);
return ret;
@@ -7239,6 +7436,345 @@ btrfs_release_block_group(struct btrfs_block_group_cache *cache,
}
/*
+ * Structure used internally for find_free_extent() function. Wraps needed
+ * parameters.
+ */
+struct find_free_extent_ctl {
+ /* Basic allocation info */
+ u64 ram_bytes;
+ u64 num_bytes;
+ u64 empty_size;
+ u64 flags;
+ int delalloc;
+
+ /* Where to start the search inside the bg */
+ u64 search_start;
+
+ /* For clustered allocation */
+ u64 empty_cluster;
+
+ bool have_caching_bg;
+ bool orig_have_caching_bg;
+
+ /* RAID index, converted from flags */
+ int index;
+
+ /*
+ * Current loop number, check find_free_extent_update_loop() for details
+ */
+ int loop;
+
+ /*
+ * Whether we're refilling a cluster, if true we need to re-search
+ * current block group but don't try to refill the cluster again.
+ */
+ bool retry_clustered;
+
+ /*
+ * Whether we're updating free space cache, if true we need to re-search
+ * current block group but don't try updating free space cache again.
+ */
+ bool retry_unclustered;
+
+ /* If current block group is cached */
+ int cached;
+
+ /* Max contiguous hole found */
+ u64 max_extent_size;
+
+ /* Total free space from free space cache, not always contiguous */
+ u64 total_free_space;
+
+ /* Found result */
+ u64 found_offset;
+};
+
+
+/*
+ * Helper function for find_free_extent().
+ *
+ * Return -ENOENT to inform caller that we need fallback to unclustered mode.
+ * Return -EAGAIN to inform caller that we need to re-search this block group
+ * Return >0 to inform caller that we find nothing
+ * Return 0 means we have found a location and set ffe_ctl->found_offset.
+ */
+static int find_free_extent_clustered(struct btrfs_block_group_cache *bg,
+ struct btrfs_free_cluster *last_ptr,
+ struct find_free_extent_ctl *ffe_ctl,
+ struct btrfs_block_group_cache **cluster_bg_ret)
+{
+ struct btrfs_fs_info *fs_info = bg->fs_info;
+ struct btrfs_block_group_cache *cluster_bg;
+ u64 aligned_cluster;
+ u64 offset;
+ int ret;
+
+ cluster_bg = btrfs_lock_cluster(bg, last_ptr, ffe_ctl->delalloc);
+ if (!cluster_bg)
+ goto refill_cluster;
+ if (cluster_bg != bg && (cluster_bg->ro ||
+ !block_group_bits(cluster_bg, ffe_ctl->flags)))
+ goto release_cluster;
+
+ offset = btrfs_alloc_from_cluster(cluster_bg, last_ptr,
+ ffe_ctl->num_bytes, cluster_bg->key.objectid,
+ &ffe_ctl->max_extent_size);
+ if (offset) {
+ /* We have a block, we're done */
+ spin_unlock(&last_ptr->refill_lock);
+ trace_btrfs_reserve_extent_cluster(cluster_bg,
+ ffe_ctl->search_start, ffe_ctl->num_bytes);
+ *cluster_bg_ret = cluster_bg;
+ ffe_ctl->found_offset = offset;
+ return 0;
+ }
+ WARN_ON(last_ptr->block_group != cluster_bg);
+
+release_cluster:
+ /*
+ * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so
+ * lets just skip it and let the allocator find whatever block it can
+ * find. If we reach this point, we will have tried the cluster
+ * allocator plenty of times and not have found anything, so we are
+ * likely way too fragmented for the clustering stuff to find anything.
+ *
+ * However, if the cluster is taken from the current block group,
+ * release the cluster first, so that we stand a better chance of
+ * succeeding in the unclustered allocation.
+ */
+ if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) {
+ spin_unlock(&last_ptr->refill_lock);
+ btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
+ return -ENOENT;
+ }
+
+ /* This cluster didn't work out, free it and start over */
+ btrfs_return_cluster_to_free_space(NULL, last_ptr);
+
+ if (cluster_bg != bg)
+ btrfs_release_block_group(cluster_bg, ffe_ctl->delalloc);
+
+refill_cluster:
+ if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) {
+ spin_unlock(&last_ptr->refill_lock);
+ return -ENOENT;
+ }
+
+ aligned_cluster = max_t(u64,
+ ffe_ctl->empty_cluster + ffe_ctl->empty_size,
+ bg->full_stripe_len);
+ ret = btrfs_find_space_cluster(fs_info, bg, last_ptr,
+ ffe_ctl->search_start, ffe_ctl->num_bytes,
+ aligned_cluster);
+ if (ret == 0) {
+ /* Now pull our allocation out of this cluster */
+ offset = btrfs_alloc_from_cluster(bg, last_ptr,
+ ffe_ctl->num_bytes, ffe_ctl->search_start,
+ &ffe_ctl->max_extent_size);
+ if (offset) {
+ /* We found one, proceed */
+ spin_unlock(&last_ptr->refill_lock);
+ trace_btrfs_reserve_extent_cluster(bg,
+ ffe_ctl->search_start,
+ ffe_ctl->num_bytes);
+ ffe_ctl->found_offset = offset;
+ return 0;
+ }
+ } else if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT &&
+ !ffe_ctl->retry_clustered) {
+ spin_unlock(&last_ptr->refill_lock);
+
+ ffe_ctl->retry_clustered = true;
+ wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+ ffe_ctl->empty_cluster + ffe_ctl->empty_size);
+ return -EAGAIN;
+ }
+ /*
+ * At this point we either didn't find a cluster or we weren't able to
+ * allocate a block from our cluster. Free the cluster we've been
+ * trying to use, and go to the next block group.
+ */
+ btrfs_return_cluster_to_free_space(NULL, last_ptr);
+ spin_unlock(&last_ptr->refill_lock);
+ return 1;
+}
+
+/*
+ * Return >0 to inform caller that we find nothing
+ * Return 0 when we found an free extent and set ffe_ctrl->found_offset
+ * Return -EAGAIN to inform caller that we need to re-search this block group
+ */
+static int find_free_extent_unclustered(struct btrfs_block_group_cache *bg,
+ struct btrfs_free_cluster *last_ptr,
+ struct find_free_extent_ctl *ffe_ctl)
+{
+ u64 offset;
+
+ /*
+ * We are doing an unclustered allocation, set the fragmented flag so
+ * we don't bother trying to setup a cluster again until we get more
+ * space.
+ */
+ if (unlikely(last_ptr)) {
+ spin_lock(&last_ptr->lock);
+ last_ptr->fragmented = 1;
+ spin_unlock(&last_ptr->lock);
+ }
+ if (ffe_ctl->cached) {
+ struct btrfs_free_space_ctl *free_space_ctl;
+
+ free_space_ctl = bg->free_space_ctl;
+ spin_lock(&free_space_ctl->tree_lock);
+ if (free_space_ctl->free_space <
+ ffe_ctl->num_bytes + ffe_ctl->empty_cluster +
+ ffe_ctl->empty_size) {
+ ffe_ctl->total_free_space = max_t(u64,
+ ffe_ctl->total_free_space,
+ free_space_ctl->free_space);
+ spin_unlock(&free_space_ctl->tree_lock);
+ return 1;
+ }
+ spin_unlock(&free_space_ctl->tree_lock);
+ }
+
+ offset = btrfs_find_space_for_alloc(bg, ffe_ctl->search_start,
+ ffe_ctl->num_bytes, ffe_ctl->empty_size,
+ &ffe_ctl->max_extent_size);
+
+ /*
+ * If we didn't find a chunk, and we haven't failed on this block group
+ * before, and this block group is in the middle of caching and we are
+ * ok with waiting, then go ahead and wait for progress to be made, and
+ * set @retry_unclustered to true.
+ *
+ * If @retry_unclustered is true then we've already waited on this
+ * block group once and should move on to the next block group.
+ */
+ if (!offset && !ffe_ctl->retry_unclustered && !ffe_ctl->cached &&
+ ffe_ctl->loop > LOOP_CACHING_NOWAIT) {
+ wait_block_group_cache_progress(bg, ffe_ctl->num_bytes +
+ ffe_ctl->empty_size);
+ ffe_ctl->retry_unclustered = true;
+ return -EAGAIN;
+ } else if (!offset) {
+ return 1;
+ }
+ ffe_ctl->found_offset = offset;
+ return 0;
+}
+
+/*
+ * Return >0 means caller needs to re-search for free extent
+ * Return 0 means we have the needed free extent.
+ * Return <0 means we failed to locate any free extent.
+ */
+static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
+ struct btrfs_free_cluster *last_ptr,
+ struct btrfs_key *ins,
+ struct find_free_extent_ctl *ffe_ctl,
+ int full_search, bool use_cluster)
+{
+ struct btrfs_root *root = fs_info->extent_root;
+ int ret;
+
+ if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) &&
+ ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg)
+ ffe_ctl->orig_have_caching_bg = true;
+
+ if (!ins->objectid && ffe_ctl->loop >= LOOP_CACHING_WAIT &&
+ ffe_ctl->have_caching_bg)
+ return 1;
+
+ if (!ins->objectid && ++(ffe_ctl->index) < BTRFS_NR_RAID_TYPES)
+ return 1;
+
+ if (ins->objectid) {
+ if (!use_cluster && last_ptr) {
+ spin_lock(&last_ptr->lock);
+ last_ptr->window_start = ins->objectid;
+ spin_unlock(&last_ptr->lock);
+ }
+ return 0;
+ }
+
+ /*
+ * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
+ * caching kthreads as we move along
+ * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
+ * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
+ * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
+ * again
+ */
+ if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) {
+ ffe_ctl->index = 0;
+ if (ffe_ctl->loop == LOOP_CACHING_NOWAIT) {
+ /*
+ * We want to skip the LOOP_CACHING_WAIT step if we
+ * don't have any uncached bgs and we've already done a
+ * full search through.
+ */
+ if (ffe_ctl->orig_have_caching_bg || !full_search)
+ ffe_ctl->loop = LOOP_CACHING_WAIT;
+ else
+ ffe_ctl->loop = LOOP_ALLOC_CHUNK;
+ } else {
+ ffe_ctl->loop++;
+ }
+
+ if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) {
+ struct btrfs_trans_handle *trans;
+ int exist = 0;
+
+ trans = current->journal_info;
+ if (trans)
+ exist = 1;
+ else
+ trans = btrfs_join_transaction(root);
+
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ return ret;
+ }
+
+ ret = do_chunk_alloc(trans, ffe_ctl->flags,
+ CHUNK_ALLOC_FORCE);
+
+ /*
+ * If we can't allocate a new chunk we've already looped
+ * through at least once, move on to the NO_EMPTY_SIZE
+ * case.
+ */
+ if (ret == -ENOSPC)
+ ffe_ctl->loop = LOOP_NO_EMPTY_SIZE;
+
+ /* Do not bail out on ENOSPC since we can do more. */
+ if (ret < 0 && ret != -ENOSPC)
+ btrfs_abort_transaction(trans, ret);
+ else
+ ret = 0;
+ if (!exist)
+ btrfs_end_transaction(trans);
+ if (ret)
+ return ret;
+ }
+
+ if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) {
+ /*
+ * Don't loop again if we already have no empty_size and
+ * no empty_cluster.
+ */
+ if (ffe_ctl->empty_size == 0 &&
+ ffe_ctl->empty_cluster == 0)
+ return -ENOSPC;
+ ffe_ctl->empty_size = 0;
+ ffe_ctl->empty_cluster = 0;
+ }
+ return 1;
+ }
+ return -ENOSPC;
+}
+
+/*
* walks the btree of allocated extents and find a hole of a given size.
* The key ins is changed to record the hole:
* ins->objectid == start position
@@ -7248,6 +7784,20 @@ btrfs_release_block_group(struct btrfs_block_group_cache *cache,
*
* If there is no suitable free space, we will record the max size of
* the free space extent currently.
+ *
+ * The overall logic and call chain:
+ *
+ * find_free_extent()
+ * |- Iterate through all block groups
+ * | |- Get a valid block group
+ * | |- Try to do clustered allocation in that block group
+ * | |- Try to do unclustered allocation in that block group
+ * | |- Check if the result is valid
+ * | | |- If valid, then exit
+ * | |- Jump to next block group
+ * |
+ * |- Push harder to find free extents
+ * |- If not found, re-iterate all block groups
*/
static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
u64 ram_bytes, u64 num_bytes, u64 empty_size,
@@ -7255,24 +7805,28 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
u64 flags, int delalloc)
{
int ret = 0;
- struct btrfs_root *root = fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
- u64 search_start = 0;
- u64 max_extent_size = 0;
- u64 max_free_space = 0;
- u64 empty_cluster = 0;
+ struct find_free_extent_ctl ffe_ctl = {0};
struct btrfs_space_info *space_info;
- int loop = 0;
- int index = btrfs_bg_flags_to_raid_index(flags);
- bool failed_cluster_refill = false;
- bool failed_alloc = false;
bool use_cluster = true;
- bool have_caching_bg = false;
- bool orig_have_caching_bg = false;
bool full_search = false;
WARN_ON(num_bytes < fs_info->sectorsize);
+
+ ffe_ctl.ram_bytes = ram_bytes;
+ ffe_ctl.num_bytes = num_bytes;
+ ffe_ctl.empty_size = empty_size;
+ ffe_ctl.flags = flags;
+ ffe_ctl.search_start = 0;
+ ffe_ctl.retry_clustered = false;
+ ffe_ctl.retry_unclustered = false;
+ ffe_ctl.delalloc = delalloc;
+ ffe_ctl.index = btrfs_bg_flags_to_raid_index(flags);
+ ffe_ctl.have_caching_bg = false;
+ ffe_ctl.orig_have_caching_bg = false;
+ ffe_ctl.found_offset = 0;
+
ins->type = BTRFS_EXTENT_ITEM_KEY;
ins->objectid = 0;
ins->offset = 0;
@@ -7308,7 +7862,8 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
spin_unlock(&space_info->lock);
}
- last_ptr = fetch_cluster_info(fs_info, space_info, &empty_cluster);
+ last_ptr = fetch_cluster_info(fs_info, space_info,
+ &ffe_ctl.empty_cluster);
if (last_ptr) {
spin_lock(&last_ptr->lock);
if (last_ptr->block_group)
@@ -7325,10 +7880,12 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
spin_unlock(&last_ptr->lock);
}
- search_start = max(search_start, first_logical_byte(fs_info, 0));
- search_start = max(search_start, hint_byte);
- if (search_start == hint_byte) {
- block_group = btrfs_lookup_block_group(fs_info, search_start);
+ ffe_ctl.search_start = max(ffe_ctl.search_start,
+ first_logical_byte(fs_info, 0));
+ ffe_ctl.search_start = max(ffe_ctl.search_start, hint_byte);
+ if (ffe_ctl.search_start == hint_byte) {
+ block_group = btrfs_lookup_block_group(fs_info,
+ ffe_ctl.search_start);
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
@@ -7350,7 +7907,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
btrfs_put_block_group(block_group);
up_read(&space_info->groups_sem);
} else {
- index = btrfs_bg_flags_to_raid_index(
+ ffe_ctl.index = btrfs_bg_flags_to_raid_index(
block_group->flags);
btrfs_lock_block_group(block_group, delalloc);
goto have_block_group;
@@ -7360,21 +7917,19 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
}
}
search:
- have_caching_bg = false;
- if (index == 0 || index == btrfs_bg_flags_to_raid_index(flags))
+ ffe_ctl.have_caching_bg = false;
+ if (ffe_ctl.index == btrfs_bg_flags_to_raid_index(flags) ||
+ ffe_ctl.index == 0)
full_search = true;
down_read(&space_info->groups_sem);
- list_for_each_entry(block_group, &space_info->block_groups[index],
- list) {
- u64 offset;
- int cached;
-
+ list_for_each_entry(block_group,
+ &space_info->block_groups[ffe_ctl.index], list) {
/* If the block group is read-only, we can skip it entirely. */
if (unlikely(block_group->ro))
continue;
btrfs_grab_block_group(block_group, delalloc);
- search_start = block_group->key.objectid;
+ ffe_ctl.search_start = block_group->key.objectid;
/*
* this can happen if we end up cycling through all the
@@ -7398,9 +7953,9 @@ search:
}
have_block_group:
- cached = block_group_cache_done(block_group);
- if (unlikely(!cached)) {
- have_caching_bg = true;
+ ffe_ctl.cached = block_group_cache_done(block_group);
+ if (unlikely(!ffe_ctl.cached)) {
+ ffe_ctl.have_caching_bg = true;
ret = cache_block_group(block_group, 0);
BUG_ON(ret < 0);
ret = 0;
@@ -7414,322 +7969,92 @@ have_block_group:
* lets look there
*/
if (last_ptr && use_cluster) {
- struct btrfs_block_group_cache *used_block_group;
- unsigned long aligned_cluster;
- /*
- * the refill lock keeps out other
- * people trying to start a new cluster
- */
- used_block_group = btrfs_lock_cluster(block_group,
- last_ptr,
- delalloc);
- if (!used_block_group)
- goto refill_cluster;
-
- if (used_block_group != block_group &&
- (used_block_group->ro ||
- !block_group_bits(used_block_group, flags)))
- goto release_cluster;
-
- offset = btrfs_alloc_from_cluster(used_block_group,
- last_ptr,
- num_bytes,
- used_block_group->key.objectid,
- &max_extent_size);
- if (offset) {
- /* we have a block, we're done */
- spin_unlock(&last_ptr->refill_lock);
- trace_btrfs_reserve_extent_cluster(
- used_block_group,
- search_start, num_bytes);
- if (used_block_group != block_group) {
- btrfs_release_block_group(block_group,
- delalloc);
- block_group = used_block_group;
- }
- goto checks;
- }
-
- WARN_ON(last_ptr->block_group != used_block_group);
-release_cluster:
- /* If we are on LOOP_NO_EMPTY_SIZE, we can't
- * set up a new clusters, so lets just skip it
- * and let the allocator find whatever block
- * it can find. If we reach this point, we
- * will have tried the cluster allocator
- * plenty of times and not have found
- * anything, so we are likely way too
- * fragmented for the clustering stuff to find
- * anything.
- *
- * However, if the cluster is taken from the
- * current block group, release the cluster
- * first, so that we stand a better chance of
- * succeeding in the unclustered
- * allocation. */
- if (loop >= LOOP_NO_EMPTY_SIZE &&
- used_block_group != block_group) {
- spin_unlock(&last_ptr->refill_lock);
- btrfs_release_block_group(used_block_group,
- delalloc);
- goto unclustered_alloc;
- }
+ struct btrfs_block_group_cache *cluster_bg = NULL;
- /*
- * this cluster didn't work out, free it and
- * start over
- */
- btrfs_return_cluster_to_free_space(NULL, last_ptr);
-
- if (used_block_group != block_group)
- btrfs_release_block_group(used_block_group,
- delalloc);
-refill_cluster:
- if (loop >= LOOP_NO_EMPTY_SIZE) {
- spin_unlock(&last_ptr->refill_lock);
- goto unclustered_alloc;
- }
-
- aligned_cluster = max_t(unsigned long,
- empty_cluster + empty_size,
- block_group->full_stripe_len);
+ ret = find_free_extent_clustered(block_group, last_ptr,
+ &ffe_ctl, &cluster_bg);
- /* allocate a cluster in this block group */
- ret = btrfs_find_space_cluster(fs_info, block_group,
- last_ptr, search_start,
- num_bytes,
- aligned_cluster);
if (ret == 0) {
- /*
- * now pull our allocation out of this
- * cluster
- */
- offset = btrfs_alloc_from_cluster(block_group,
- last_ptr,
- num_bytes,
- search_start,
- &max_extent_size);
- if (offset) {
- /* we found one, proceed */
- spin_unlock(&last_ptr->refill_lock);
- trace_btrfs_reserve_extent_cluster(
- block_group, search_start,
- num_bytes);
- goto checks;
+ if (cluster_bg && cluster_bg != block_group) {
+ btrfs_release_block_group(block_group,
+ delalloc);
+ block_group = cluster_bg;
}
- } else if (!cached && loop > LOOP_CACHING_NOWAIT
- && !failed_cluster_refill) {
- spin_unlock(&last_ptr->refill_lock);
-
- failed_cluster_refill = true;
- wait_block_group_cache_progress(block_group,
- num_bytes + empty_cluster + empty_size);
+ goto checks;
+ } else if (ret == -EAGAIN) {
goto have_block_group;
- }
-
- /*
- * at this point we either didn't find a cluster
- * or we weren't able to allocate a block from our
- * cluster. Free the cluster we've been trying
- * to use, and go to the next block group
- */
- btrfs_return_cluster_to_free_space(NULL, last_ptr);
- spin_unlock(&last_ptr->refill_lock);
- goto loop;
- }
-
-unclustered_alloc:
- /*
- * We are doing an unclustered alloc, set the fragmented flag so
- * we don't bother trying to setup a cluster again until we get
- * more space.
- */
- if (unlikely(last_ptr)) {
- spin_lock(&last_ptr->lock);
- last_ptr->fragmented = 1;
- spin_unlock(&last_ptr->lock);
- }
- if (cached) {
- struct btrfs_free_space_ctl *ctl =
- block_group->free_space_ctl;
-
- spin_lock(&ctl->tree_lock);
- if (ctl->free_space <
- num_bytes + empty_cluster + empty_size) {
- max_free_space = max(max_free_space,
- ctl->free_space);
- spin_unlock(&ctl->tree_lock);
+ } else if (ret > 0) {
goto loop;
}
- spin_unlock(&ctl->tree_lock);
+ /* ret == -ENOENT case falls through */
}
- offset = btrfs_find_space_for_alloc(block_group, search_start,
- num_bytes, empty_size,
- &max_extent_size);
- /*
- * If we didn't find a chunk, and we haven't failed on this
- * block group before, and this block group is in the middle of
- * caching and we are ok with waiting, then go ahead and wait
- * for progress to be made, and set failed_alloc to true.
- *
- * If failed_alloc is true then we've already waited on this
- * block group once and should move on to the next block group.
- */
- if (!offset && !failed_alloc && !cached &&
- loop > LOOP_CACHING_NOWAIT) {
- wait_block_group_cache_progress(block_group,
- num_bytes + empty_size);
- failed_alloc = true;
+ ret = find_free_extent_unclustered(block_group, last_ptr,
+ &ffe_ctl);
+ if (ret == -EAGAIN)
goto have_block_group;
- } else if (!offset) {
+ else if (ret > 0)
goto loop;
- }
+ /* ret == 0 case falls through */
checks:
- search_start = round_up(offset, fs_info->stripesize);
+ ffe_ctl.search_start = round_up(ffe_ctl.found_offset,
+ fs_info->stripesize);
/* move on to the next group */
- if (search_start + num_bytes >
+ if (ffe_ctl.search_start + num_bytes >
block_group->key.objectid + block_group->key.offset) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(block_group, ffe_ctl.found_offset,
+ num_bytes);
goto loop;
}
- if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
- search_start - offset);
+ if (ffe_ctl.found_offset < ffe_ctl.search_start)
+ btrfs_add_free_space(block_group, ffe_ctl.found_offset,
+ ffe_ctl.search_start - ffe_ctl.found_offset);
ret = btrfs_add_reserved_bytes(block_group, ram_bytes,
num_bytes, delalloc);
if (ret == -EAGAIN) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(block_group, ffe_ctl.found_offset,
+ num_bytes);
goto loop;
}
btrfs_inc_block_group_reservations(block_group);
/* we are all good, lets return */
- ins->objectid = search_start;
+ ins->objectid = ffe_ctl.search_start;
ins->offset = num_bytes;
- trace_btrfs_reserve_extent(block_group, search_start, num_bytes);
+ trace_btrfs_reserve_extent(block_group, ffe_ctl.search_start,
+ num_bytes);
btrfs_release_block_group(block_group, delalloc);
break;
loop:
- failed_cluster_refill = false;
- failed_alloc = false;
+ ffe_ctl.retry_clustered = false;
+ ffe_ctl.retry_unclustered = false;
BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) !=
- index);
+ ffe_ctl.index);
btrfs_release_block_group(block_group, delalloc);
cond_resched();
}
up_read(&space_info->groups_sem);
- if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
- && !orig_have_caching_bg)
- orig_have_caching_bg = true;
-
- if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
- goto search;
-
- if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
+ ret = find_free_extent_update_loop(fs_info, last_ptr, ins, &ffe_ctl,
+ full_search, use_cluster);
+ if (ret > 0)
goto search;
- /*
- * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
- * caching kthreads as we move along
- * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
- * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
- * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
- * again
- */
- if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
- index = 0;
- if (loop == LOOP_CACHING_NOWAIT) {
- /*
- * We want to skip the LOOP_CACHING_WAIT step if we
- * don't have any uncached bgs and we've already done a
- * full search through.
- */
- if (orig_have_caching_bg || !full_search)
- loop = LOOP_CACHING_WAIT;
- else
- loop = LOOP_ALLOC_CHUNK;
- } else {
- loop++;
- }
-
- if (loop == LOOP_ALLOC_CHUNK) {
- struct btrfs_trans_handle *trans;
- int exist = 0;
-
- trans = current->journal_info;
- if (trans)
- exist = 1;
- else
- trans = btrfs_join_transaction(root);
-
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- goto out;
- }
-
- ret = do_chunk_alloc(trans, flags, CHUNK_ALLOC_FORCE);
-
- /*
- * If we can't allocate a new chunk we've already looped
- * through at least once, move on to the NO_EMPTY_SIZE
- * case.
- */
- if (ret == -ENOSPC)
- loop = LOOP_NO_EMPTY_SIZE;
-
- /*
- * Do not bail out on ENOSPC since we
- * can do more things.
- */
- if (ret < 0 && ret != -ENOSPC)
- btrfs_abort_transaction(trans, ret);
- else
- ret = 0;
- if (!exist)
- btrfs_end_transaction(trans);
- if (ret)
- goto out;
- }
-
- if (loop == LOOP_NO_EMPTY_SIZE) {
- /*
- * Don't loop again if we already have no empty_size and
- * no empty_cluster.
- */
- if (empty_size == 0 &&
- empty_cluster == 0) {
- ret = -ENOSPC;
- goto out;
- }
- empty_size = 0;
- empty_cluster = 0;
- }
-
- goto search;
- } else if (!ins->objectid) {
- ret = -ENOSPC;
- } else if (ins->objectid) {
- if (!use_cluster && last_ptr) {
- spin_lock(&last_ptr->lock);
- last_ptr->window_start = ins->objectid;
- spin_unlock(&last_ptr->lock);
- }
- ret = 0;
- }
-out:
if (ret == -ENOSPC) {
- if (!max_extent_size)
- max_extent_size = max_free_space;
+ /*
+ * Use ffe_ctl->total_free_space as fallback if we can't find
+ * any contiguous hole.
+ */
+ if (!ffe_ctl.max_extent_size)
+ ffe_ctl.max_extent_size = ffe_ctl.total_free_space;
spin_lock(&space_info->lock);
- space_info->max_extent_size = max_extent_size;
+ space_info->max_extent_size = ffe_ctl.max_extent_size;
spin_unlock(&space_info->lock);
- ins->offset = max_extent_size;
+ ins->offset = ffe_ctl.max_extent_size;
}
return ret;
}
@@ -8169,13 +8494,13 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_header_backref_rev(buf, BTRFS_MIXED_BACKREF_REV);
btrfs_set_header_owner(buf, owner);
- write_extent_buffer_fsid(buf, fs_info->fsid);
+ write_extent_buffer_fsid(buf, fs_info->fs_devices->metadata_uuid);
write_extent_buffer_chunk_tree_uuid(buf, fs_info->chunk_tree_uuid);
if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
buf->log_index = root->log_transid % 2;
/*
* we allow two log transactions at a time, use different
- * EXENT bit to differentiate dirty pages.
+ * EXTENT bit to differentiate dirty pages.
*/
if (buf->log_index == 0)
set_extent_dirty(&root->dirty_log_pages, buf->start,
@@ -8221,7 +8546,12 @@ again:
goto again;
}
- if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
+ /*
+ * The global reserve still exists to save us from ourselves, so don't
+ * warn_on if we are short on our delayed refs reserve.
+ */
+ if (block_rsv->type != BTRFS_BLOCK_RSV_DELREFS &&
+ btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
/*DEFAULT_RATELIMIT_BURST*/ 1);
@@ -8544,7 +8874,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
u64 bytenr;
u64 generation;
u64 parent;
- u32 blocksize;
struct btrfs_key key;
struct btrfs_key first_key;
struct extent_buffer *next;
@@ -8569,7 +8898,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
btrfs_node_key_to_cpu(path->nodes[level], &first_key,
path->slots[level]);
- blocksize = fs_info->nodesize;
next = find_extent_buffer(fs_info, bytenr);
if (!next) {
@@ -8693,7 +9021,7 @@ skip:
ret);
}
}
- ret = btrfs_free_extent(trans, root, bytenr, blocksize,
+ ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
parent, root->root_key.objectid,
level - 1, 0);
if (ret)
@@ -8944,9 +9272,22 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
goto out_free;
}
+ err = btrfs_run_delayed_items(trans);
+ if (err)
+ goto out_end_trans;
+
if (block_rsv)
trans->block_rsv = block_rsv;
+ /*
+ * This will help us catch people modifying the fs tree while we're
+ * dropping it. It is unsafe to mess with the fs tree while it's being
+ * dropped as we unlock the root node and parent nodes as we walk down
+ * the tree, assuming nothing will change. If something does change
+ * then we'll have stale information and drop references to blocks we've
+ * already dropped.
+ */
+ set_bit(BTRFS_ROOT_DELETING, &root->state);
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
level = btrfs_header_level(root->node);
path->nodes[level] = btrfs_lock_root_node(root);
@@ -9421,7 +9762,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
}
/*
- * checks to see if its even possible to relocate this block group.
+ * Checks to see if it's even possible to relocate this block group.
*
* @return - -1 if it's not a good idea to relocate this block group, 0 if its
* ok to go ahead and try.
@@ -10049,7 +10390,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
* check for two cases, either we are full, and therefore
* don't need to bother with the caching work since we won't
* find any space, or we are empty, and we can just add all
- * the space in and be done with it. This saves us _alot_ of
+ * the space in and be done with it. This saves us _a_lot_ of
* time, particularly in the full case.
*/
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
@@ -10154,6 +10495,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
add_block_group_free_space(trans, block_group);
/* already aborted the transaction if it failed. */
next:
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
list_del_init(&block_group->bg_list);
}
btrfs_trans_release_chunk_metadata(trans);
@@ -10231,6 +10573,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
link_block_group(cache);
list_add_tail(&cache->bg_list, &trans->new_bgs);
+ trans->delayed_ref_updates++;
+ btrfs_update_delayed_refs_rsv(trans);
set_avail_alloc_bits(fs_info, type);
return 0;
@@ -10268,6 +10612,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
int factor;
struct btrfs_caching_control *caching_ctl = NULL;
bool remove_em;
+ bool remove_rsv = false;
block_group = btrfs_lookup_block_group(fs_info, group_start);
BUG_ON(!block_group);
@@ -10315,7 +10660,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
mutex_lock(&trans->transaction->cache_write_mutex);
/*
- * make sure our free spache cache IO is done before remove the
+ * Make sure our free space cache IO is done before removing the
* free space inode
*/
spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -10332,6 +10677,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
if (!list_empty(&block_group->dirty_list)) {
list_del_init(&block_group->dirty_list);
+ remove_rsv = true;
btrfs_put_block_group(block_group);
}
spin_unlock(&trans->transaction->dirty_bgs_lock);
@@ -10541,6 +10887,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
ret = btrfs_del_item(trans, root, path);
out:
+ if (remove_rsv)
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
btrfs_free_path(path);
return ret;
}
@@ -10698,7 +11046,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_lock(&space_info->lock);
spin_lock(&block_group->lock);
- space_info->bytes_pinned -= block_group->pinned;
+ update_bytes_pinned(space_info, -block_group->pinned);
space_info->bytes_readonly += block_group->pinned;
percpu_counter_add_batch(&space_info->total_bytes_pinned,
-block_group->pinned,
@@ -10829,7 +11177,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
if (!blk_queue_discard(bdev_get_queue(device->bdev)))
return 0;
- /* Not writeable = nothing to do. */
+ /* Not writable = nothing to do. */
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d228f706ff3e..fc126b92ea59 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -89,9 +89,18 @@ void btrfs_leak_debug_check(void)
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
struct extent_io_tree *tree, u64 start, u64 end)
{
- if (tree->ops && tree->ops->check_extent_io_range)
- tree->ops->check_extent_io_range(tree->private_data, caller,
- start, end);
+ struct inode *inode = tree->private_data;
+ u64 isize;
+
+ if (!inode || !is_data_inode(inode))
+ return;
+
+ isize = i_size_read(inode);
+ if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
+ btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
+ "%s: ino %llu isize %llu odd range [%llu,%llu]",
+ caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
+ }
}
#else
#define btrfs_leak_debug_add(new, head) do {} while (0)
@@ -344,13 +353,6 @@ static inline struct rb_node *tree_search(struct extent_io_tree *tree,
return tree_search_for_insert(tree, offset, NULL, NULL);
}
-static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
- struct extent_state *other)
-{
- if (tree->ops && tree->ops->merge_extent_hook)
- tree->ops->merge_extent_hook(tree->private_data, new, other);
-}
-
/*
* utility function to look for merge candidates inside a given range.
* Any extents with matching state are merged together into a single
@@ -374,7 +376,10 @@ static void merge_state(struct extent_io_tree *tree,
other = rb_entry(other_node, struct extent_state, rb_node);
if (other->end == state->start - 1 &&
other->state == state->state) {
- merge_cb(tree, state, other);
+ if (tree->private_data &&
+ is_data_inode(tree->private_data))
+ btrfs_merge_delalloc_extent(tree->private_data,
+ state, other);
state->start = other->start;
rb_erase(&other->rb_node, &tree->state);
RB_CLEAR_NODE(&other->rb_node);
@@ -386,7 +391,10 @@ static void merge_state(struct extent_io_tree *tree,
other = rb_entry(other_node, struct extent_state, rb_node);
if (other->start == state->end + 1 &&
other->state == state->state) {
- merge_cb(tree, state, other);
+ if (tree->private_data &&
+ is_data_inode(tree->private_data))
+ btrfs_merge_delalloc_extent(tree->private_data,
+ state, other);
state->end = other->end;
rb_erase(&other->rb_node, &tree->state);
RB_CLEAR_NODE(&other->rb_node);
@@ -395,20 +403,6 @@ static void merge_state(struct extent_io_tree *tree,
}
}
-static void set_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, unsigned *bits)
-{
- if (tree->ops && tree->ops->set_bit_hook)
- tree->ops->set_bit_hook(tree->private_data, state, bits);
-}
-
-static void clear_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, unsigned *bits)
-{
- if (tree->ops && tree->ops->clear_bit_hook)
- tree->ops->clear_bit_hook(tree->private_data, state, bits);
-}
-
static void set_state_bits(struct extent_io_tree *tree,
struct extent_state *state, unsigned *bits,
struct extent_changeset *changeset);
@@ -451,13 +445,6 @@ static int insert_state(struct extent_io_tree *tree,
return 0;
}
-static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
- u64 split)
-{
- if (tree->ops && tree->ops->split_extent_hook)
- tree->ops->split_extent_hook(tree->private_data, orig, split);
-}
-
/*
* split a given extent state struct in two, inserting the preallocated
* struct 'prealloc' as the newly created second half. 'split' indicates an
@@ -477,7 +464,8 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
{
struct rb_node *node;
- split_cb(tree, orig, split);
+ if (tree->private_data && is_data_inode(tree->private_data))
+ btrfs_split_delalloc_extent(tree->private_data, orig, split);
prealloc->start = orig->start;
prealloc->end = split - 1;
@@ -504,7 +492,7 @@ static struct extent_state *next_state(struct extent_state *state)
/*
* utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1).
+ * it will optionally wake up anyone waiting on this state (wake == 1).
*
* If no bits are set on the state struct after clearing things, the
* struct is freed and removed from the tree
@@ -523,7 +511,10 @@ static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
WARN_ON(range > tree->dirty_bytes);
tree->dirty_bytes -= range;
}
- clear_state_cb(tree, state, bits);
+
+ if (tree->private_data && is_data_inode(tree->private_data))
+ btrfs_clear_delalloc_extent(tree->private_data, state, bits);
+
ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
BUG_ON(ret < 0);
state->state &= ~bits_to_clear;
@@ -800,7 +791,9 @@ static void set_state_bits(struct extent_io_tree *tree,
unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
int ret;
- set_state_cb(tree, state, bits);
+ if (tree->private_data && is_data_inode(tree->private_data))
+ btrfs_set_delalloc_extent(tree->private_data, state, bits);
+
if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
u64 range = state->end - state->start + 1;
tree->dirty_bytes += range;
@@ -1459,16 +1452,16 @@ out:
* find a contiguous range of bytes in the file marked as delalloc, not
* more than 'max_bytes'. start and end are used to return the range,
*
- * 1 is returned if we find something, 0 if nothing was in the tree
+ * true is returned if we find something, false if nothing was in the tree
*/
-static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
+static noinline bool find_delalloc_range(struct extent_io_tree *tree,
u64 *start, u64 *end, u64 max_bytes,
struct extent_state **cached_state)
{
struct rb_node *node;
struct extent_state *state;
u64 cur_start = *start;
- u64 found = 0;
+ bool found = false;
u64 total_bytes = 0;
spin_lock(&tree->lock);
@@ -1479,8 +1472,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
*/
node = tree_search(tree, cur_start);
if (!node) {
- if (!found)
- *end = (u64)-1;
+ *end = (u64)-1;
goto out;
}
@@ -1500,7 +1492,7 @@ static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
*cached_state = state;
refcount_inc(&state->refs);
}
- found++;
+ found = true;
*end = state->end;
cur_start = state->end + 1;
node = rb_next(node);
@@ -1558,19 +1550,22 @@ static noinline int lock_delalloc_pages(struct inode *inode,
}
/*
- * find a contiguous range of bytes in the file marked as delalloc, not
- * more than 'max_bytes'. start and end are used to return the range,
+ * Find and lock a contiguous range of bytes in the file marked as delalloc, no
+ * more than @max_bytes. @Start and @end are used to return the range,
*
- * 1 is returned if we find something, 0 if nothing was in the tree
+ * Return: true if we find something
+ * false if nothing was in the tree
*/
-static noinline_for_stack u64 find_lock_delalloc_range(struct inode *inode,
+EXPORT_FOR_TESTS
+noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
struct extent_io_tree *tree,
struct page *locked_page, u64 *start,
- u64 *end, u64 max_bytes)
+ u64 *end)
{
+ u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
u64 delalloc_start;
u64 delalloc_end;
- u64 found;
+ bool found;
struct extent_state *cached_state = NULL;
int ret;
int loops = 0;
@@ -1585,7 +1580,7 @@ again:
*start = delalloc_start;
*end = delalloc_end;
free_extent_state(cached_state);
- return 0;
+ return false;
}
/*
@@ -1605,6 +1600,7 @@ again:
/* step two, lock all the pages after the page that has start */
ret = lock_delalloc_pages(inode, locked_page,
delalloc_start, delalloc_end);
+ ASSERT(!ret || ret == -EAGAIN);
if (ret == -EAGAIN) {
/* some of the pages are gone, lets avoid looping by
* shortening the size of the delalloc range we're searching
@@ -1616,11 +1612,10 @@ again:
loops = 1;
goto again;
} else {
- found = 0;
+ found = false;
goto out_failed;
}
}
- BUG_ON(ret); /* Only valid values are 0 and -EAGAIN */
/* step three, lock the state bits for the whole range */
lock_extent_bits(tree, delalloc_start, delalloc_end, &cached_state);
@@ -1643,17 +1638,6 @@ out_failed:
return found;
}
-#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-u64 btrfs_find_lock_delalloc_range(struct inode *inode,
- struct extent_io_tree *tree,
- struct page *locked_page, u64 *start,
- u64 *end, u64 max_bytes)
-{
- return find_lock_delalloc_range(inode, tree, locked_page, start, end,
- max_bytes);
-}
-#endif
-
static int __process_pages_contig(struct address_space *mapping,
struct page *locked_page,
pgoff_t start_index, pgoff_t end_index,
@@ -2349,13 +2333,11 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
}
/*
- * this is a generic handler for readpage errors (default
- * readpage_io_failed_hook). if other copies exist, read those and write back
- * good data to the failed position. does not investigate in remapping the
- * failed extent elsewhere, hoping the device will be smart enough to do this as
- * needed
+ * This is a generic handler for readpage errors. If other copies exist, read
+ * those and write back good data to the failed position. Does not investigate
+ * in remapping the failed extent elsewhere, hoping the device will be smart
+ * enough to do this as needed
*/
-
static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int failed_mirror)
@@ -2412,14 +2394,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
{
int uptodate = (err == 0);
- struct extent_io_tree *tree;
int ret = 0;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
-
- if (tree->ops && tree->ops->writepage_end_io_hook)
- tree->ops->writepage_end_io_hook(page, start, end, NULL,
- uptodate);
+ btrfs_writepage_endio_finish_ordered(page, start, end, uptodate);
if (!uptodate) {
ClearPageUptodate(page);
@@ -2522,6 +2499,8 @@ static void end_bio_extent_readpage(struct bio *bio)
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ bool data_inode = btrfs_ino(BTRFS_I(inode))
+ != BTRFS_BTREE_INODE_OBJECTID;
btrfs_debug(fs_info,
"end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
@@ -2551,7 +2530,7 @@ static void end_bio_extent_readpage(struct bio *bio)
len = bvec->bv_len;
mirror = io_bio->mirror_num;
- if (likely(uptodate && tree->ops)) {
+ if (likely(uptodate)) {
ret = tree->ops->readpage_end_io_hook(io_bio, offset,
page, start, end,
mirror);
@@ -2567,38 +2546,37 @@ static void end_bio_extent_readpage(struct bio *bio)
if (likely(uptodate))
goto readpage_ok;
- if (tree->ops) {
- ret = tree->ops->readpage_io_failed_hook(page, mirror);
- if (ret == -EAGAIN) {
- /*
- * Data inode's readpage_io_failed_hook() always
- * returns -EAGAIN.
- *
- * The generic bio_readpage_error handles errors
- * the following way: If possible, new read
- * requests are created and submitted and will
- * end up in end_bio_extent_readpage as well (if
- * we're lucky, not in the !uptodate case). In
- * that case it returns 0 and we just go on with
- * the next page in our bio. If it can't handle
- * the error it will return -EIO and we remain
- * responsible for that page.
- */
- ret = bio_readpage_error(bio, offset, page,
- start, end, mirror);
- if (ret == 0) {
- uptodate = !bio->bi_status;
- offset += len;
- continue;
- }
- }
+ if (data_inode) {
/*
- * metadata's readpage_io_failed_hook() always returns
- * -EIO and fixes nothing. -EIO is also returned if
- * data inode error could not be fixed.
+ * The generic bio_readpage_error handles errors the
+ * following way: If possible, new read requests are
+ * created and submitted and will end up in
+ * end_bio_extent_readpage as well (if we're lucky,
+ * not in the !uptodate case). In that case it returns
+ * 0 and we just go on with the next page in our bio.
+ * If it can't handle the error it will return -EIO and
+ * we remain responsible for that page.
*/
- ASSERT(ret == -EIO);
+ ret = bio_readpage_error(bio, offset, page, start, end,
+ mirror);
+ if (ret == 0) {
+ uptodate = !bio->bi_status;
+ offset += len;
+ continue;
+ }
+ } else {
+ struct extent_buffer *eb;
+
+ eb = (struct extent_buffer *)page->private;
+ set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
+ eb->read_mirror = mirror;
+ atomic_dec(&eb->io_pages);
+ if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD,
+ &eb->bflags))
+ btree_readahead_hook(eb, -EIO);
+
+ ret = -EIO;
}
readpage_ok:
if (likely(uptodate)) {
@@ -2607,7 +2585,7 @@ readpage_ok:
unsigned off;
/* Zero out the end if this page straddles i_size */
- off = i_size & (PAGE_SIZE-1);
+ off = offset_in_page(i_size);
if (page->index == end_index && off)
zero_user_segment(page, off, PAGE_SIZE);
SetPageUptodate(page);
@@ -2644,8 +2622,7 @@ readpage_ok:
if (extent_len)
endio_readpage_release_extent(tree, extent_start, extent_len,
uptodate);
- if (io_bio->end_io)
- io_bio->end_io(io_bio, blk_status_to_errno(bio->bi_status));
+ btrfs_io_bio_free_csum(io_bio);
bio_put(bio);
}
@@ -2782,8 +2759,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
else
contig = bio_end_sector(bio) == sector;
- if (tree->ops && btrfs_merge_bio_hook(page, offset, page_size,
- bio, bio_flags))
+ ASSERT(tree->ops);
+ if (btrfs_bio_fits_in_stripe(page, page_size, bio, bio_flags))
can_merge = false;
if (prev_bio_flags != bio_flags || !contig || !can_merge ||
@@ -2911,7 +2888,7 @@ static int __do_readpage(struct extent_io_tree *tree,
if (page->index == last_byte >> PAGE_SHIFT) {
char *userpage;
- size_t zero_offset = last_byte & (PAGE_SIZE - 1);
+ size_t zero_offset = offset_in_page(last_byte);
if (zero_offset) {
iosize = PAGE_SIZE - zero_offset;
@@ -3205,7 +3182,7 @@ static void update_nr_written(struct writeback_control *wbc,
/*
* helper for __extent_writepage, doing all of the delayed allocation setup.
*
- * This returns 1 if our fill_delalloc function did all the work required
+ * This returns 1 if btrfs_run_delalloc_range function did all the work required
* to write the page (copy into inline extent). In this case the IO has
* been started and the page is already unlocked.
*
@@ -3213,44 +3190,37 @@ static void update_nr_written(struct writeback_control *wbc,
* This returns < 0 if there were errors (page still locked)
*/
static noinline_for_stack int writepage_delalloc(struct inode *inode,
- struct page *page, struct writeback_control *wbc,
- struct extent_page_data *epd,
- u64 delalloc_start,
- unsigned long *nr_written)
+ struct page *page, struct writeback_control *wbc,
+ u64 delalloc_start, unsigned long *nr_written)
{
- struct extent_io_tree *tree = epd->tree;
+ struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
u64 page_end = delalloc_start + PAGE_SIZE - 1;
- u64 nr_delalloc;
+ bool found;
u64 delalloc_to_write = 0;
u64 delalloc_end = 0;
int ret;
int page_started = 0;
- if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
- return 0;
while (delalloc_end < page_end) {
- nr_delalloc = find_lock_delalloc_range(inode, tree,
+ found = find_lock_delalloc_range(inode, tree,
page,
&delalloc_start,
- &delalloc_end,
- BTRFS_MAX_EXTENT_SIZE);
- if (nr_delalloc == 0) {
+ &delalloc_end);
+ if (!found) {
delalloc_start = delalloc_end + 1;
continue;
}
- ret = tree->ops->fill_delalloc(inode, page,
- delalloc_start,
- delalloc_end,
- &page_started,
- nr_written, wbc);
+ ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
+ delalloc_end, &page_started, nr_written, wbc);
/* File system has been set read-only */
if (ret) {
SetPageError(page);
- /* fill_delalloc should be return < 0 for error
- * but just in case, we use > 0 here meaning the
- * IO is started, so we don't want to return > 0
- * unless things are going well.
+ /*
+ * btrfs_run_delalloc_range should return < 0 for error
+ * but just in case, we use > 0 here meaning the IO is
+ * started, so we don't want to return > 0 unless
+ * things are going well.
*/
ret = ret < 0 ? ret : -EIO;
goto done;
@@ -3323,20 +3293,17 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
int nr = 0;
bool compressed;
- if (tree->ops && tree->ops->writepage_start_hook) {
- ret = tree->ops->writepage_start_hook(page, start,
- page_end);
- if (ret) {
- /* Fixup worker will requeue */
- if (ret == -EBUSY)
- wbc->pages_skipped++;
- else
- redirty_page_for_writepage(wbc, page);
+ ret = btrfs_writepage_cow_fixup(page, start, page_end);
+ if (ret) {
+ /* Fixup worker will requeue */
+ if (ret == -EBUSY)
+ wbc->pages_skipped++;
+ else
+ redirty_page_for_writepage(wbc, page);
- update_nr_written(wbc, nr_written);
- unlock_page(page);
- return 1;
- }
+ update_nr_written(wbc, nr_written);
+ unlock_page(page);
+ return 1;
}
/*
@@ -3347,9 +3314,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
end = page_end;
if (i_size <= start) {
- if (tree->ops && tree->ops->writepage_end_io_hook)
- tree->ops->writepage_end_io_hook(page, start,
- page_end, NULL, 1);
+ btrfs_writepage_endio_finish_ordered(page, start, page_end, 1);
goto done;
}
@@ -3360,9 +3325,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
u64 offset;
if (cur >= i_size) {
- if (tree->ops && tree->ops->writepage_end_io_hook)
- tree->ops->writepage_end_io_hook(page, cur,
- page_end, NULL, 1);
+ btrfs_writepage_endio_finish_ordered(page, cur,
+ page_end, 1);
break;
}
em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, cur,
@@ -3396,11 +3360,10 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
* end_io notification does not happen here for
* compressed extents
*/
- if (!compressed && tree->ops &&
- tree->ops->writepage_end_io_hook)
- tree->ops->writepage_end_io_hook(page, cur,
- cur + iosize - 1,
- NULL, 1);
+ if (!compressed)
+ btrfs_writepage_endio_finish_ordered(page, cur,
+ cur + iosize - 1,
+ 1);
else if (compressed) {
/* we don't want to end_page_writeback on
* a compressed extent. this happens
@@ -3469,7 +3432,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
ClearPageError(page);
- pg_offset = i_size & (PAGE_SIZE - 1);
+ pg_offset = offset_in_page(i_size);
if (page->index > end_index ||
(page->index == end_index && !pg_offset)) {
page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
@@ -3491,11 +3454,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
set_page_extent_mapped(page);
- ret = writepage_delalloc(inode, page, wbc, epd, start, &nr_written);
- if (ret == 1)
- goto done_unlocked;
- if (ret)
- goto done;
+ if (!epd->extent_locked) {
+ ret = writepage_delalloc(inode, page, wbc, start, &nr_written);
+ if (ret == 1)
+ goto done_unlocked;
+ if (ret)
+ goto done;
+ }
ret = __extent_writepage_io(inode, page, wbc, epd,
i_size, nr_written, write_flags, &nr);
@@ -3934,12 +3899,25 @@ static int extent_write_cache_pages(struct address_space *mapping,
range_whole = 1;
scanned = 1;
}
- if (wbc->sync_mode == WB_SYNC_ALL)
+
+ /*
+ * We do the tagged writepage as long as the snapshot flush bit is set
+ * and we are the first one who do the filemap_flush() on this inode.
+ *
+ * The nr_to_write == LONG_MAX is needed to make sure other flushers do
+ * not race in and drop the bit.
+ */
+ if (range_whole && wbc->nr_to_write == LONG_MAX &&
+ test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+ &BTRFS_I(inode)->runtime_flags))
+ wbc->tagged_writepages = 1;
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
@@ -4084,10 +4062,8 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
if (clear_page_dirty_for_io(page))
ret = __extent_writepage(page, &wbc_writepages, &epd);
else {
- if (tree->ops && tree->ops->writepage_end_io_hook)
- tree->ops->writepage_end_io_hook(page, start,
- start + PAGE_SIZE - 1,
- NULL, 1);
+ btrfs_writepage_endio_finish_ordered(page, start,
+ start + PAGE_SIZE - 1, 1);
unlock_page(page);
}
put_page(page);
@@ -4118,42 +4094,36 @@ int extent_readpages(struct address_space *mapping, struct list_head *pages,
unsigned nr_pages)
{
struct bio *bio = NULL;
- unsigned page_idx;
unsigned long bio_flags = 0;
struct page *pagepool[16];
- struct page *page;
struct extent_map *em_cached = NULL;
struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree;
int nr = 0;
u64 prev_em_start = (u64)-1;
- for (page_idx = 0; page_idx < nr_pages; page_idx++) {
- page = list_entry(pages->prev, struct page, lru);
+ while (!list_empty(pages)) {
+ for (nr = 0; nr < ARRAY_SIZE(pagepool) && !list_empty(pages);) {
+ struct page *page = list_entry(pages->prev,
+ struct page, lru);
- prefetchw(&page->flags);
- list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping,
- page->index,
- readahead_gfp_mask(mapping))) {
- put_page(page);
- continue;
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ readahead_gfp_mask(mapping))) {
+ put_page(page);
+ continue;
+ }
+
+ pagepool[nr++] = page;
}
- pagepool[nr++] = page;
- if (nr < ARRAY_SIZE(pagepool))
- continue;
__extent_readpages(tree, pagepool, nr, &em_cached, &bio,
- &bio_flags, &prev_em_start);
- nr = 0;
+ &bio_flags, &prev_em_start);
}
- if (nr)
- __extent_readpages(tree, pagepool, nr, &em_cached, &bio,
- &bio_flags, &prev_em_start);
if (em_cached)
free_extent_map(em_cached);
- BUG_ON(!list_empty(pages));
if (bio)
return submit_one_bio(bio, 0, bio_flags);
return 0;
@@ -4342,7 +4312,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
/*
* Sanity check, extent_fiemap() should have ensured that new
- * fiemap extent won't overlap with cahced one.
+ * fiemap extent won't overlap with cached one.
* Not recoverable.
*
* NOTE: Physical address can overlap, due to compression
@@ -4914,13 +4884,6 @@ again:
check_buffer_tree_ref(eb);
set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
- /*
- * We will free dummy extent buffer's if they come into
- * free_extent_buffer with a ref count of 2, but if we are using this we
- * want the buffers to stay in memory until we're done with them, so
- * bump the ref count again.
- */
- atomic_inc(&eb->refs);
return eb;
free_eb:
btrfs_release_extent_buffer(eb);
@@ -5102,7 +5065,9 @@ void free_extent_buffer(struct extent_buffer *eb)
while (1) {
refs = atomic_read(&eb->refs);
- if (refs <= 3)
+ if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
+ || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
+ refs == 1))
break;
old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
if (old == refs)
@@ -5111,10 +5076,6 @@ void free_extent_buffer(struct extent_buffer *eb)
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 &&
- test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))
- atomic_dec(&eb->refs);
-
- if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
!extent_buffer_under_io(eb) &&
test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -5340,7 +5301,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
struct page *page;
char *kaddr;
char *dst = (char *)dstv;
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
unsigned long i = (start_offset + start) >> PAGE_SHIFT;
if (start + len > eb->len) {
@@ -5350,7 +5311,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
return;
}
- offset = (start_offset + start) & (PAGE_SIZE - 1);
+ offset = offset_in_page(start_offset + start);
while (len > 0) {
page = eb->pages[i];
@@ -5375,14 +5336,14 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
struct page *page;
char *kaddr;
char __user *dst = (char __user *)dstv;
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
unsigned long i = (start_offset + start) >> PAGE_SHIFT;
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_SIZE - 1);
+ offset = offset_in_page(start_offset + start);
while (len > 0) {
page = eb->pages[i];
@@ -5413,10 +5374,10 @@ int map_private_extent_buffer(const struct extent_buffer *eb,
char **map, unsigned long *map_start,
unsigned long *map_len)
{
- size_t offset = start & (PAGE_SIZE - 1);
+ size_t offset;
char *kaddr;
struct page *p;
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
unsigned long i = (start_offset + start) >> PAGE_SHIFT;
unsigned long end_i = (start_offset + start + min_len - 1) >>
PAGE_SHIFT;
@@ -5453,14 +5414,14 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
struct page *page;
char *kaddr;
char *ptr = (char *)ptrv;
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
unsigned long i = (start_offset + start) >> PAGE_SHIFT;
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_SIZE - 1);
+ offset = offset_in_page(start_offset + start);
while (len > 0) {
page = eb->pages[i];
@@ -5509,13 +5470,13 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
struct page *page;
char *kaddr;
char *src = (char *)srcv;
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
unsigned long i = (start_offset + start) >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_SIZE - 1);
+ offset = offset_in_page(start_offset + start);
while (len > 0) {
page = eb->pages[i];
@@ -5539,13 +5500,13 @@ void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
size_t offset;
struct page *page;
char *kaddr;
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
unsigned long i = (start_offset + start) >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = (start_offset + start) & (PAGE_SIZE - 1);
+ offset = offset_in_page(start_offset + start);
while (len > 0) {
page = eb->pages[i];
@@ -5584,13 +5545,12 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
size_t offset;
struct page *page;
char *kaddr;
- size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(dst->start);
unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
WARN_ON(src->len != dst_len);
- offset = (start_offset + dst_offset) &
- (PAGE_SIZE - 1);
+ offset = offset_in_page(start_offset + dst_offset);
while (len > 0) {
page = dst->pages[i];
@@ -5626,7 +5586,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
unsigned long *page_index,
size_t *page_offset)
{
- size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(eb->start);
size_t byte_offset = BIT_BYTE(nr);
size_t offset;
@@ -5638,7 +5598,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
offset = start_offset + start + byte_offset;
*page_index = offset >> PAGE_SHIFT;
- *page_offset = offset & (PAGE_SIZE - 1);
+ *page_offset = offset_in_page(offset);
}
/**
@@ -5780,7 +5740,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
- size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(dst->start);
unsigned long dst_i;
unsigned long src_i;
@@ -5798,10 +5758,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
}
while (len > 0) {
- dst_off_in_page = (start_offset + dst_offset) &
- (PAGE_SIZE - 1);
- src_off_in_page = (start_offset + src_offset) &
- (PAGE_SIZE - 1);
+ dst_off_in_page = offset_in_page(start_offset + dst_offset);
+ src_off_in_page = offset_in_page(start_offset + src_offset);
dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
src_i = (start_offset + src_offset) >> PAGE_SHIFT;
@@ -5829,7 +5787,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
size_t src_off_in_page;
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
- size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1);
+ size_t start_offset = offset_in_page(dst->start);
unsigned long dst_i;
unsigned long src_i;
@@ -5853,10 +5811,8 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
src_i = (start_offset + src_end) >> PAGE_SHIFT;
- dst_off_in_page = (start_offset + dst_end) &
- (PAGE_SIZE - 1);
- src_off_in_page = (start_offset + src_end) &
- (PAGE_SIZE - 1);
+ dst_off_in_page = offset_in_page(start_offset + dst_end);
+ src_off_in_page = offset_in_page(start_offset + src_end);
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 369daa5d4f73..9673be3f3d1f 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -37,18 +37,22 @@
#define EXTENT_BIO_COMPRESSED 1
#define EXTENT_BIO_FLAG_SHIFT 16
-/* these are bit numbers for test/set bit */
-#define EXTENT_BUFFER_UPTODATE 0
-#define EXTENT_BUFFER_DIRTY 2
-#define EXTENT_BUFFER_CORRUPT 3
-#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
-#define EXTENT_BUFFER_TREE_REF 5
-#define EXTENT_BUFFER_STALE 6
-#define EXTENT_BUFFER_WRITEBACK 7
-#define EXTENT_BUFFER_READ_ERR 8 /* read IO error */
-#define EXTENT_BUFFER_UNMAPPED 9
-#define EXTENT_BUFFER_IN_TREE 10
-#define EXTENT_BUFFER_WRITE_ERR 11 /* write IO error */
+enum {
+ EXTENT_BUFFER_UPTODATE,
+ EXTENT_BUFFER_DIRTY,
+ EXTENT_BUFFER_CORRUPT,
+ /* this got triggered by readahead */
+ EXTENT_BUFFER_READAHEAD,
+ EXTENT_BUFFER_TREE_REF,
+ EXTENT_BUFFER_STALE,
+ EXTENT_BUFFER_WRITEBACK,
+ /* read IO error */
+ EXTENT_BUFFER_READ_ERR,
+ EXTENT_BUFFER_UNMAPPED,
+ EXTENT_BUFFER_IN_TREE,
+ /* write IO error */
+ EXTENT_BUFFER_WRITE_ERR,
+};
/* these are flags for __process_pages_contig */
#define PAGE_UNLOCK (1 << 0)
@@ -94,38 +98,13 @@ typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct extent_io_ops {
/*
- * The following callbacks must be allways defined, the function
+ * The following callbacks must be always defined, the function
* pointer will be called unconditionally.
*/
extent_submit_bio_hook_t *submit_bio_hook;
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int mirror);
- int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
-
- /*
- * Optional hooks, called if the pointer is not NULL
- */
- int (*fill_delalloc)(void *private_data, struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- struct writeback_control *wbc);
-
- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
- void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
- struct extent_state *state, int uptodate);
- void (*set_bit_hook)(void *private_data, struct extent_state *state,
- unsigned *bits);
- void (*clear_bit_hook)(void *private_data,
- struct extent_state *state,
- unsigned *bits);
- void (*merge_extent_hook)(void *private_data,
- struct extent_state *new,
- struct extent_state *other);
- void (*split_extent_hook)(void *private_data,
- struct extent_state *orig, u64 split);
- void (*check_extent_io_range)(void *private_data, const char *caller,
- u64 start, u64 end);
};
struct extent_io_tree {
@@ -353,11 +332,11 @@ static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
}
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end)
+ u64 end, struct extent_state **cached)
{
return clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL);
+ EXTENT_DO_ACCOUNTING, 0, 0, cached);
}
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -546,10 +525,9 @@ int free_io_failure(struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree,
struct io_failure_record *rec);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-u64 btrfs_find_lock_delalloc_range(struct inode *inode,
- struct extent_io_tree *tree,
- struct page *locked_page, u64 *start,
- u64 *end, u64 max_bytes);
+bool find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree,
+ struct page *locked_page, u64 *start,
+ u64 *end);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 7eea8b6e2cd3..a042a193c120 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -475,7 +475,8 @@ static struct extent_map *prev_extent_map(struct extent_map *em)
return container_of(prev, struct extent_map, rb_node);
}
-/* helper for btfs_get_extent. Given an existing extent in the tree,
+/*
+ * Helper for btrfs_get_extent. Given an existing extent in the tree,
* the existing extent is the nearest extent to map_start,
* and an extent that you want to insert, deal with overlap and insert
* the best fitted new extent into the tree.
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index 31977ffd6190..ef05a0121652 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -11,13 +11,20 @@
#define EXTENT_MAP_INLINE ((u64)-2)
#define EXTENT_MAP_DELALLOC ((u64)-1)
-/* bits for the flags field */
-#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
-#define EXTENT_FLAG_COMPRESSED 1
-#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
-#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
-#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
-#define EXTENT_FLAG_FS_MAPPING 6 /* filesystem extent mapping type */
+/* bits for the extent_map::flags field */
+enum {
+ /* this entry not yet on disk, don't free it */
+ EXTENT_FLAG_PINNED,
+ EXTENT_FLAG_COMPRESSED,
+ /* pre-allocated extent */
+ EXTENT_FLAG_PREALLOC,
+ /* Logging this extent */
+ EXTENT_FLAG_LOGGING,
+ /* Filling in a preallocated extent */
+ EXTENT_FLAG_FILLING,
+ /* filesystem extent mapping type */
+ EXTENT_FLAG_FS_MAPPING,
+};
struct extent_map {
struct rb_node rb_node;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index ba74827beb32..920bf3b4b0ef 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -142,11 +142,6 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
return ret;
}
-static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
-{
- kfree(bio->csum_allocated);
-}
-
static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
@@ -175,14 +170,12 @@ static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio
nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) {
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
- btrfs_bio->csum_allocated = kmalloc_array(nblocks,
- csum_size, GFP_NOFS);
- if (!btrfs_bio->csum_allocated) {
+ btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
+ GFP_NOFS);
+ if (!btrfs_bio->csum) {
btrfs_free_path(path);
return BLK_STS_RESOURCE;
}
- btrfs_bio->csum = btrfs_bio->csum_allocated;
- btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
} else {
btrfs_bio->csum = btrfs_bio->csum_inline;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a3c22e16509b..d38dc8c31533 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -399,7 +399,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
size_t copied = 0;
size_t total_copied = 0;
int pg = 0;
- int offset = pos & (PAGE_SIZE - 1);
+ int offset = offset_in_page(pos);
while (write_bytes > 0) {
size_t count = min_t(size_t,
@@ -1611,7 +1611,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
return -ENOMEM;
while (iov_iter_count(i) > 0) {
- size_t offset = pos & (PAGE_SIZE - 1);
+ size_t offset = offset_in_page(pos);
size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i),
nrptrs * (size_t)PAGE_SIZE -
@@ -2005,7 +2005,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
filp->private_data = NULL;
/*
- * ordered_data_close is set by settattr when we are about to truncate
+ * ordered_data_close is set by setattr when we are about to truncate
* a file from a non-zero size to a zero size. This tries to
* flush down new bytes that may have been written if the
* application were using truncate to replace a file in place.
@@ -2089,8 +2089,32 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch);
/*
+ * Before we acquired the inode's lock, someone may have dirtied more
+ * pages in the target range. We need to make sure that writeback for
+ * any such pages does not start while we are logging the inode, because
+ * if it does, any of the following might happen when we are not doing a
+ * full inode sync:
+ *
+ * 1) We log an extent after its writeback finishes but before its
+ * checksums are added to the csum tree, leading to -EIO errors
+ * when attempting to read the extent after a log replay.
+ *
+ * 2) We can end up logging an extent before its writeback finishes.
+ * Therefore after the log replay we will have a file extent item
+ * pointing to an unwritten extent (and no data checksums as well).
+ *
+ * So trigger writeback for any eventual new dirty pages and then we
+ * wait for all ordered extents to complete below.
+ */
+ ret = start_ordered_ops(inode, start, end);
+ if (ret) {
+ inode_unlock(inode);
+ goto out;
+ }
+
+ /*
* We have to do this here to avoid the priority inversion of waiting on
- * IO of a lower priority task while holding a transaciton open.
+ * IO of a lower priority task while holding a transaction open.
*/
ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
@@ -2130,7 +2154,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* here we could get into a situation where we're waiting on IO to
* happen that is blocked on a transaction trying to commit. With start
* we inc the extwriter counter, so we wait for all extwriters to exit
- * before we start blocking join'ers. This comment is to keep somebody
+ * before we start blocking joiners. This comment is to keep somebody
* from thinking they are super smart and changing this to
* btrfs_join_transaction *cough*Josef*cough*.
*/
@@ -2162,25 +2186,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
up_write(&BTRFS_I(inode)->dio_sem);
inode_unlock(inode);
- /*
- * If any of the ordered extents had an error, just return it to user
- * space, so that the application knows some writes didn't succeed and
- * can take proper action (retry for e.g.). Blindly committing the
- * transaction in this case, would fool userspace that everything was
- * successful. And we also want to make sure our log doesn't contain
- * file extent items pointing to extents that weren't fully written to -
- * just like in the non fast fsync path, where we check for the ordered
- * operation's error flag before writing to the log tree and return -EIO
- * if any of them had this flag set (btrfs_wait_ordered_range) -
- * therefore we need to check for errors in the ordered operations,
- * which are indicated by ctx.io_err.
- */
- if (ctx.io_err) {
- btrfs_end_transaction(trans);
- ret = ctx.io_err;
- goto out;
- }
-
if (ret != BTRFS_NO_LOG_SYNC) {
if (!ret) {
ret = btrfs_sync_log(trans, root, &ctx);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index d6736595ec57..e5089087eaa6 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -74,11 +74,11 @@ out:
return ret;
}
-struct btrfs_free_space_info *
-search_free_space_info(struct btrfs_trans_handle *trans,
- struct btrfs_fs_info *fs_info,
- struct btrfs_block_group_cache *block_group,
- struct btrfs_path *path, int cow)
+EXPORT_FOR_TESTS
+struct btrfs_free_space_info *search_free_space_info(
+ struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info,
+ struct btrfs_block_group_cache *block_group,
+ struct btrfs_path *path, int cow)
{
struct btrfs_root *root = fs_info->free_space_root;
struct btrfs_key key;
@@ -176,6 +176,7 @@ static void le_bitmap_set(unsigned long *map, unsigned int start, int len)
}
}
+EXPORT_FOR_TESTS
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
@@ -315,6 +316,7 @@ out:
return ret;
}
+EXPORT_FOR_TESTS
int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
@@ -487,6 +489,7 @@ out:
return ret;
}
+EXPORT_FOR_TESTS
int free_space_test_bit(struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, u64 offset)
{
@@ -775,6 +778,7 @@ out:
return ret;
}
+EXPORT_FOR_TESTS
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, u64 start, u64 size)
@@ -968,6 +972,7 @@ out:
return ret;
}
+EXPORT_FOR_TESTS
int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, u64 start, u64 size)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9ea4c6f0352f..43eb4535319d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -27,6 +27,7 @@
#include <linux/uio.h>
#include <linux/magic.h>
#include <linux/iversion.h>
+#include <linux/swap.h>
#include <asm/unaligned.h>
#include "ctree.h"
#include "disk-io.h"
@@ -103,23 +104,23 @@ static void __endio_write_update_ordered(struct inode *inode,
/*
* Cleanup all submitted ordered extents in specified range to handle errors
- * from the fill_dellaloc() callback.
+ * from the btrfs_run_delalloc_range() callback.
*
* NOTE: caller must ensure that when an error happens, it can not call
* extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
* and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
* to be released, which we want to happen only when finishing the ordered
- * extent (btrfs_finish_ordered_io()). Also note that the caller of the
- * fill_delalloc() callback already does proper cleanup for the first page of
- * the range, that is, it invokes the callback writepage_end_io_hook() for the
- * range of the first page.
+ * extent (btrfs_finish_ordered_io()).
*/
static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
- const u64 offset,
- const u64 bytes)
+ struct page *locked_page,
+ u64 offset, u64 bytes)
{
unsigned long index = offset >> PAGE_SHIFT;
unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
+ u64 page_start = page_offset(locked_page);
+ u64 page_end = page_start + PAGE_SIZE - 1;
+
struct page *page;
while (index <= end_index) {
@@ -130,8 +131,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
ClearPagePrivate2(page);
put_page(page);
}
- return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
- bytes - PAGE_SIZE, false);
+
+ /*
+ * In case this page belongs to the delalloc range being instantiated
+ * then skip it, since the first page of a range is going to be
+ * properly cleaned up by the caller of run_delalloc_range
+ */
+ if (page_start >= offset && page_end <= (offset + bytes - 1)) {
+ offset += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
+ }
+
+ return __endio_write_update_ordered(inode, offset, bytes, false);
}
static int btrfs_dirty_inode(struct inode *inode);
@@ -229,7 +240,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans,
start >> PAGE_SHIFT);
btrfs_set_file_extent_compression(leaf, ei, 0);
kaddr = kmap_atomic(page);
- offset = start & (PAGE_SIZE - 1);
+ offset = offset_in_page(start);
write_extent_buffer(leaf, kaddr + offset, ptr, size);
kunmap_atomic(kaddr);
put_page(page);
@@ -357,7 +368,7 @@ struct async_extent {
struct async_cow {
struct inode *inode;
- struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
struct page *locked_page;
u64 start;
u64 end;
@@ -538,8 +549,7 @@ again:
&total_compressed);
if (!ret) {
- unsigned long offset = total_compressed &
- (PAGE_SIZE - 1);
+ unsigned long offset = offset_in_page(total_compressed);
struct page *page = pages[nr_pages - 1];
char *kaddr;
@@ -847,14 +857,13 @@ retry:
ins.offset, async_extent->pages,
async_extent->nr_pages,
async_cow->write_flags)) {
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *p = async_extent->pages[0];
const u64 start = async_extent->start;
const u64 end = start + async_extent->ram_size - 1;
p->mapping = inode->i_mapping;
- tree->ops->writepage_end_io_hook(p, start, end,
- NULL, 0);
+ btrfs_writepage_endio_finish_ordered(p, start, end, 0);
+
p->mapping = NULL;
extent_clear_unlock_delalloc(inode, start, end, end,
NULL, 0,
@@ -1144,13 +1153,11 @@ static noinline void async_cow_submit(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info;
struct async_cow *async_cow;
- struct btrfs_root *root;
unsigned long nr_pages;
async_cow = container_of(work, struct async_cow, work);
- root = async_cow->root;
- fs_info = root->fs_info;
+ fs_info = async_cow->fs_info;
nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >>
PAGE_SHIFT;
@@ -1179,7 +1186,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct async_cow *async_cow;
- struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long nr_pages;
u64 cur_end;
@@ -1189,7 +1195,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
BUG_ON(!async_cow); /* -ENOMEM */
async_cow->inode = igrab(inode);
- async_cow->root = root;
+ async_cow->fs_info = fs_info;
async_cow->locked_page = locked_page;
async_cow->start = start;
async_cow->write_flags = write_flags;
@@ -1372,7 +1378,8 @@ next_slot:
* Do the same check as in btrfs_cross_ref_exist but
* without the unnecessary search.
*/
- if (btrfs_file_extent_generation(leaf, fi) <=
+ if (!nolock &&
+ btrfs_file_extent_generation(leaf, fi) <=
btrfs_root_last_snapshot(&root->root_item))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
@@ -1576,12 +1583,12 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
}
/*
- * extent_io.c call back to do delayed allocation processing
+ * Function to process delayed allocation (create CoW) for ranges which are
+ * being touched for the first time.
*/
-static int run_delalloc_range(void *private_data, struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- struct writeback_control *wbc)
+int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
+ u64 start, u64 end, int *page_started, unsigned long *nr_written,
+ struct writeback_control *wbc)
{
struct inode *inode = private_data;
int ret;
@@ -1605,14 +1612,14 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
write_flags);
}
if (ret)
- btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
+ btrfs_cleanup_ordered_extents(inode, locked_page, start,
+ end - start + 1);
return ret;
}
-static void btrfs_split_extent_hook(void *private_data,
- struct extent_state *orig, u64 split)
+void btrfs_split_delalloc_extent(struct inode *inode,
+ struct extent_state *orig, u64 split)
{
- struct inode *inode = private_data;
u64 size;
/* not delalloc, ignore it */
@@ -1625,7 +1632,7 @@ static void btrfs_split_extent_hook(void *private_data,
u64 new_size;
/*
- * See the explanation in btrfs_merge_extent_hook, the same
+ * See the explanation in btrfs_merge_delalloc_extent, the same
* applies here, just in reverse.
*/
new_size = orig->end - split + 1;
@@ -1642,16 +1649,13 @@ static void btrfs_split_extent_hook(void *private_data,
}
/*
- * extent_io.c merge_extent_hook, used to track merged delayed allocation
- * extents so we can keep track of new extents that are just merged onto old
- * extents, such as when we are doing sequential writes, so we can properly
- * account for the metadata space we'll need.
+ * Handle merged delayed allocation extents so we can keep track of new extents
+ * that are just merged onto old extents, such as when we are doing sequential
+ * writes, so we can properly account for the metadata space we'll need.
*/
-static void btrfs_merge_extent_hook(void *private_data,
- struct extent_state *new,
- struct extent_state *other)
+void btrfs_merge_delalloc_extent(struct inode *inode, struct extent_state *new,
+ struct extent_state *other)
{
- struct inode *inode = private_data;
u64 new_size, old_size;
u32 num_extents;
@@ -1755,15 +1759,12 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
}
/*
- * extent_io.c set_bit_hook, used to track delayed allocation
- * bytes in this file, and to maintain the list of inodes that
- * have pending delalloc work to be done.
+ * Properly track delayed allocation bytes in the inode and to maintain the
+ * list of inodes that have pending delalloc work to be done.
*/
-static void btrfs_set_bit_hook(void *private_data,
- struct extent_state *state, unsigned *bits)
+void btrfs_set_delalloc_extent(struct inode *inode, struct extent_state *state,
+ unsigned *bits)
{
- struct inode *inode = private_data;
-
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
@@ -1809,14 +1810,14 @@ static void btrfs_set_bit_hook(void *private_data,
}
/*
- * extent_io.c clear_bit_hook, see set_bit_hook for why
+ * Once a range is no longer delalloc this function ensures that proper
+ * accounting happens.
*/
-static void btrfs_clear_bit_hook(void *private_data,
- struct extent_state *state,
- unsigned *bits)
+void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
+ struct extent_state *state, unsigned *bits)
{
- struct btrfs_inode *inode = BTRFS_I((struct inode *)private_data);
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
+ struct btrfs_inode *inode = BTRFS_I(vfs_inode);
+ struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb);
u64 len = state->end + 1 - state->start;
u32 num_extents = count_max_extents(len);
@@ -1841,7 +1842,7 @@ static void btrfs_clear_bit_hook(void *private_data,
/*
* We don't reserve metadata space for space cache inodes so we
- * don't need to call dellalloc_release_metadata if there is an
+ * don't need to call delalloc_release_metadata if there is an
* error.
*/
if (*bits & EXTENT_CLEAR_META_RESV &&
@@ -1880,16 +1881,21 @@ static void btrfs_clear_bit_hook(void *private_data,
}
/*
- * Merge bio hook, this must check the chunk tree to make sure we don't create
- * bios that span stripes or chunks
+ * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
+ * in a chunk's stripe. This function ensures that bios do not span a
+ * stripe/chunk
*
- * return 1 if page cannot be merged to bio
- * return 0 if page can be merged to bio
+ * @page - The page we are about to add to the bio
+ * @size - size we want to add to the bio
+ * @bio - bio we want to ensure is smaller than a stripe
+ * @bio_flags - flags of the bio
+ *
+ * return 1 if page cannot be added to the bio
+ * return 0 if page can be added to the bio
* return error otherwise
*/
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
- size_t size, struct bio *bio,
- unsigned long bio_flags)
+int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
+ unsigned long bio_flags)
{
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -1932,29 +1938,6 @@ static blk_status_t btrfs_submit_bio_start(void *private_data, struct bio *bio,
}
/*
- * in order to insert checksums into the metadata in large chunks,
- * we wait until bio submission time. All the pages in the bio are
- * checksummed and sums are attached onto the ordered extent record.
- *
- * At IO completion time the cums attached on the ordered extent record
- * are inserted into the btree
- */
-blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
- int mirror_num)
-{
- struct inode *inode = private_data;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- blk_status_t ret;
-
- ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
- if (ret) {
- bio->bi_status = ret;
- bio_endio(bio);
- }
- return ret;
-}
-
-/*
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read.
*
@@ -2056,7 +2039,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
unsigned int extra_bits,
struct extent_state **cached_state, int dedupe)
{
- WARN_ON((end & (PAGE_SIZE - 1)) == 0);
+ WARN_ON(PAGE_ALIGNED(end));
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
extra_bits, cached_state);
}
@@ -2152,7 +2135,7 @@ out_page:
* to fix it up. The async helper will wait for ordered extents, set
* the delalloc bit and make it safe to write the page.
*/
-static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
+int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end)
{
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -3159,8 +3142,8 @@ static void finish_ordered_fn(struct btrfs_work *work)
btrfs_finish_ordered_io(ordered_extent);
}
-static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
- struct extent_state *state, int uptodate)
+void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
+ u64 end, int uptodate)
{
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@@ -3686,6 +3669,21 @@ cache_index:
* inode is not a directory, logging its parent unnecessarily.
*/
BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+ /*
+ * Similar reasoning for last_link_trans, needs to be set otherwise
+ * for a case like the following:
+ *
+ * mkdir A
+ * touch foo
+ * ln foo A/bar
+ * echo 2 > /proc/sys/vm/drop_caches
+ * fsync foo
+ * <power failure>
+ *
+ * Would result in link bar and directory A not existing after the power
+ * failure.
+ */
+ BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
path->slots[0]++;
if (inode->i_nlink != 1 ||
@@ -4444,31 +4442,6 @@ out:
return err;
}
-static int truncate_space_check(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- u64 bytes_deleted)
-{
- struct btrfs_fs_info *fs_info = root->fs_info;
- int ret;
-
- /*
- * This is only used to apply pressure to the enospc system, we don't
- * intend to use this reservation at all.
- */
- bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
- bytes_deleted *= fs_info->nodesize;
- ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
- bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
- if (!ret) {
- trace_btrfs_space_reservation(fs_info, "transaction",
- trans->transid,
- bytes_deleted, 1);
- trans->bytes_reserved += bytes_deleted;
- }
- return ret;
-
-}
-
/*
* Return this if we need to call truncate_block for the last bit of the
* truncate.
@@ -4513,7 +4486,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u64 bytes_deleted = 0;
bool be_nice = false;
bool should_throttle = false;
- bool should_end = false;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
@@ -4544,7 +4516,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
/*
* This function is also used to drop the items in the log tree before
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
- * it is used to drop the loged items. So we shouldn't kill the delayed
+ * it is used to drop the logged items. So we shouldn't kill the delayed
* items.
*/
if (min_type == 0 && root == BTRFS_I(inode)->root)
@@ -4726,15 +4698,7 @@ delete:
btrfs_abort_transaction(trans, ret);
break;
}
- if (btrfs_should_throttle_delayed_refs(trans))
- btrfs_async_run_delayed_refs(fs_info,
- trans->delayed_ref_updates * 2,
- trans->transid, 0);
if (be_nice) {
- if (truncate_space_check(trans, root,
- extent_num_bytes)) {
- should_end = true;
- }
if (btrfs_should_throttle_delayed_refs(trans))
should_throttle = true;
}
@@ -4745,7 +4709,7 @@ delete:
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot ||
- should_throttle || should_end) {
+ should_throttle) {
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
@@ -4757,23 +4721,24 @@ delete:
pending_del_nr = 0;
}
btrfs_release_path(path);
- if (should_throttle) {
- unsigned long updates = trans->delayed_ref_updates;
- if (updates) {
- trans->delayed_ref_updates = 0;
- ret = btrfs_run_delayed_refs(trans,
- updates * 2);
- if (ret)
- break;
- }
- }
+
/*
- * if we failed to refill our space rsv, bail out
- * and let the transaction restart
+ * We can generate a lot of delayed refs, so we need to
+ * throttle every once and a while and make sure we're
+ * adding enough space to keep up with the work we are
+ * generating. Since we hold a transaction here we
+ * can't flush, and we don't want to FLUSH_LIMIT because
+ * we could have generated too many delayed refs to
+ * actually allocate, so just bail if we're short and
+ * let the normal reservation dance happen higher up.
*/
- if (should_end) {
- ret = -EAGAIN;
- break;
+ if (should_throttle) {
+ ret = btrfs_delayed_refs_rsv_refill(fs_info,
+ BTRFS_RESERVE_NO_FLUSH);
+ if (ret) {
+ ret = -EAGAIN;
+ break;
+ }
}
goto search_again;
} else {
@@ -4799,18 +4764,6 @@ out:
}
btrfs_free_path(path);
-
- if (be_nice && bytes_deleted > SZ_32M && (ret >= 0 || ret == -EAGAIN)) {
- unsigned long updates = trans->delayed_ref_updates;
- int err;
-
- if (updates) {
- trans->delayed_ref_updates = 0;
- err = btrfs_run_delayed_refs(trans, updates * 2);
- if (err)
- ret = err;
- }
- }
return ret;
}
@@ -5155,7 +5108,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
truncate_setsize(inode, newsize);
- /* Disable nonlocked read DIO to avoid the end less truncate */
+ /* Disable nonlocked read DIO to avoid the endless truncate */
btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
inode_dio_wait(inode);
btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
@@ -5333,8 +5286,8 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
* Try to steal from the global reserve if there is space for
* it.
*/
- if (!btrfs_check_space_for_delayed_refs(trans) &&
- !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, false))
+ if (!btrfs_check_space_for_delayed_refs(fs_info) &&
+ !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0))
return trans;
/* If not, commit and try again. */
@@ -6406,14 +6359,19 @@ fail_dir_item:
err = btrfs_del_root_ref(trans, key.objectid,
root->root_key.objectid, parent_ino,
&local_index, name, name_len);
-
+ if (err)
+ btrfs_abort_transaction(trans, err);
} else if (add_backref) {
u64 local_index;
int err;
err = btrfs_del_inode_ref(trans, root, name, name_len,
ino, parent_ino, &local_index);
+ if (err)
+ btrfs_abort_transaction(trans, err);
}
+
+ /* Return the original error code */
return ret;
}
@@ -6625,6 +6583,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
if (err)
goto fail;
}
+ BTRFS_I(inode)->last_link_trans = trans->transid;
d_instantiate(dentry, inode);
ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
true, NULL);
@@ -6652,7 +6611,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
int err = 0;
- int drop_on_err = 0;
u64 objectid = 0;
u64 index = 0;
@@ -6678,7 +6636,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out_fail;
}
- drop_on_err = 1;
/* these must be set before we unlock the inode */
inode->i_op = &btrfs_dir_inode_operations;
inode->i_fop = &btrfs_dir_file_operations;
@@ -6699,7 +6656,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out_fail;
d_instantiate_new(dentry, inode);
- drop_on_err = 0;
out_fail:
btrfs_end_transaction(trans);
@@ -8053,9 +8009,7 @@ static void btrfs_endio_direct_read(struct bio *bio)
dio_bio->bi_status = err;
dio_end_io(dio_bio);
-
- if (io_bio->end_io)
- io_bio->end_io(io_bio, blk_status_to_errno(err));
+ btrfs_io_bio_free_csum(io_bio);
bio_put(bio);
}
@@ -8098,7 +8052,7 @@ static void __endio_write_update_ordered(struct inode *inode,
return;
/*
* Our bio might span multiple ordered extents. In this case
- * we keep goin until we have accounted the whole dio.
+ * we keep going until we have accounted the whole dio.
*/
if (ordered_offset < offset + bytes) {
ordered_bytes = offset + bytes - ordered_offset;
@@ -8408,8 +8362,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
if (!ret)
return;
- if (io_bio->end_io)
- io_bio->end_io(io_bio, ret);
+ btrfs_io_bio_free_csum(io_bio);
free_ordered:
/*
@@ -8912,7 +8865,7 @@ again:
/* page is wholly or partially inside EOF */
if (page_start + PAGE_SIZE > size)
- zero_start = size & ~PAGE_MASK;
+ zero_start = offset_in_page(size);
else
zero_start = PAGE_SIZE;
@@ -9157,6 +9110,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->index_cnt = (u64)-1;
ei->dir_index = 0;
ei->last_unlink_trans = 0;
+ ei->last_link_trans = 0;
ei->last_log_commit = 0;
spin_lock_init(&ei->lock);
@@ -9968,7 +9922,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-static int start_delalloc_inodes(struct btrfs_root *root, int nr)
+static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
{
struct btrfs_inode *binode;
struct inode *inode;
@@ -9996,6 +9950,9 @@ static int start_delalloc_inodes(struct btrfs_root *root, int nr)
}
spin_unlock(&root->delalloc_lock);
+ if (snapshot)
+ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+ &binode->runtime_flags);
work = btrfs_alloc_delalloc_work(inode);
if (!work) {
iput(inode);
@@ -10029,7 +9986,7 @@ out:
return ret;
}
-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -10037,7 +9994,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
- ret = start_delalloc_inodes(root, -1);
+ ret = start_delalloc_inodes(root, -1, true);
if (ret > 0)
ret = 0;
return ret;
@@ -10066,7 +10023,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
- ret = start_delalloc_inodes(root, nr);
+ ret = start_delalloc_inodes(root, nr, false);
btrfs_put_fs_root(root);
if (ret < 0)
goto out;
@@ -10445,26 +10402,6 @@ out:
return ret;
}
-__attribute__((const))
-static int btrfs_readpage_io_failed_hook(struct page *page, int failed_mirror)
-{
- return -EAGAIN;
-}
-
-static void btrfs_check_extent_io_range(void *private_data, const char *caller,
- u64 start, u64 end)
-{
- struct inode *inode = private_data;
- u64 isize;
-
- isize = i_size_read(inode);
- if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
- btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
- "%s: ino %llu isize %llu odd range [%llu,%llu]",
- caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
- }
-}
-
void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
{
struct inode *inode = tree->private_data;
@@ -10481,6 +10418,343 @@ void btrfs_set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
}
}
+#ifdef CONFIG_SWAP
+/*
+ * Add an entry indicating a block group or device which is pinned by a
+ * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
+ * negative errno on failure.
+ */
+static int btrfs_add_swapfile_pin(struct inode *inode, void *ptr,
+ bool is_block_group)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_swapfile_pin *sp, *entry;
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ sp = kmalloc(sizeof(*sp), GFP_NOFS);
+ if (!sp)
+ return -ENOMEM;
+ sp->ptr = ptr;
+ sp->inode = inode;
+ sp->is_block_group = is_block_group;
+
+ spin_lock(&fs_info->swapfile_pins_lock);
+ p = &fs_info->swapfile_pins.rb_node;
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct btrfs_swapfile_pin, node);
+ if (sp->ptr < entry->ptr ||
+ (sp->ptr == entry->ptr && sp->inode < entry->inode)) {
+ p = &(*p)->rb_left;
+ } else if (sp->ptr > entry->ptr ||
+ (sp->ptr == entry->ptr && sp->inode > entry->inode)) {
+ p = &(*p)->rb_right;
+ } else {
+ spin_unlock(&fs_info->swapfile_pins_lock);
+ kfree(sp);
+ return 1;
+ }
+ }
+ rb_link_node(&sp->node, parent, p);
+ rb_insert_color(&sp->node, &fs_info->swapfile_pins);
+ spin_unlock(&fs_info->swapfile_pins_lock);
+ return 0;
+}
+
+/* Free all of the entries pinned by this swapfile. */
+static void btrfs_free_swapfile_pins(struct inode *inode)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_swapfile_pin *sp;
+ struct rb_node *node, *next;
+
+ spin_lock(&fs_info->swapfile_pins_lock);
+ node = rb_first(&fs_info->swapfile_pins);
+ while (node) {
+ next = rb_next(node);
+ sp = rb_entry(node, struct btrfs_swapfile_pin, node);
+ if (sp->inode == inode) {
+ rb_erase(&sp->node, &fs_info->swapfile_pins);
+ if (sp->is_block_group)
+ btrfs_put_block_group(sp->ptr);
+ kfree(sp);
+ }
+ node = next;
+ }
+ spin_unlock(&fs_info->swapfile_pins_lock);
+}
+
+struct btrfs_swap_info {
+ u64 start;
+ u64 block_start;
+ u64 block_len;
+ u64 lowest_ppage;
+ u64 highest_ppage;
+ unsigned long nr_pages;
+ int nr_extents;
+};
+
+static int btrfs_add_swap_extent(struct swap_info_struct *sis,
+ struct btrfs_swap_info *bsi)
+{
+ unsigned long nr_pages;
+ u64 first_ppage, first_ppage_reported, next_ppage;
+ int ret;
+
+ first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT;
+ next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len,
+ PAGE_SIZE) >> PAGE_SHIFT;
+
+ if (first_ppage >= next_ppage)
+ return 0;
+ nr_pages = next_ppage - first_ppage;
+
+ first_ppage_reported = first_ppage;
+ if (bsi->start == 0)
+ first_ppage_reported++;
+ if (bsi->lowest_ppage > first_ppage_reported)
+ bsi->lowest_ppage = first_ppage_reported;
+ if (bsi->highest_ppage < (next_ppage - 1))
+ bsi->highest_ppage = next_ppage - 1;
+
+ ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage);
+ if (ret < 0)
+ return ret;
+ bsi->nr_extents += ret;
+ bsi->nr_pages += nr_pages;
+ return 0;
+}
+
+static void btrfs_swap_deactivate(struct file *file)
+{
+ struct inode *inode = file_inode(file);
+
+ btrfs_free_swapfile_pins(inode);
+ atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles);
+}
+
+static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+{
+ struct inode *inode = file_inode(file);
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ struct extent_state *cached_state = NULL;
+ struct extent_map *em = NULL;
+ struct btrfs_device *device = NULL;
+ struct btrfs_swap_info bsi = {
+ .lowest_ppage = (sector_t)-1ULL,
+ };
+ int ret = 0;
+ u64 isize;
+ u64 start;
+
+ /*
+ * If the swap file was just created, make sure delalloc is done. If the
+ * file changes again after this, the user is doing something stupid and
+ * we don't really care.
+ */
+ ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (ret)
+ return ret;
+
+ /*
+ * The inode is locked, so these flags won't change after we check them.
+ */
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) {
+ btrfs_warn(fs_info, "swapfile must not be compressed");
+ return -EINVAL;
+ }
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) {
+ btrfs_warn(fs_info, "swapfile must not be copy-on-write");
+ return -EINVAL;
+ }
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ btrfs_warn(fs_info, "swapfile must not be checksummed");
+ return -EINVAL;
+ }
+
+ /*
+ * Balance or device remove/replace/resize can move stuff around from
+ * under us. The EXCL_OP flag makes sure they aren't running/won't run
+ * concurrently while we are mapping the swap extents, and
+ * fs_info->swapfile_pins prevents them from running while the swap file
+ * is active and moving the extents. Note that this also prevents a
+ * concurrent device add which isn't actually necessary, but it's not
+ * really worth the trouble to allow it.
+ */
+ if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+ btrfs_warn(fs_info,
+ "cannot activate swapfile while exclusive operation is running");
+ return -EBUSY;
+ }
+ /*
+ * Snapshots can create extents which require COW even if NODATACOW is
+ * set. We use this counter to prevent snapshots. We must increment it
+ * before walking the extents because we don't want a concurrent
+ * snapshot to run after we've already checked the extents.
+ */
+ atomic_inc(&BTRFS_I(inode)->root->nr_swapfiles);
+
+ isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
+
+ lock_extent_bits(io_tree, 0, isize - 1, &cached_state);
+ start = 0;
+ while (start < isize) {
+ u64 logical_block_start, physical_block_start;
+ struct btrfs_block_group_cache *bg;
+ u64 len = isize - start;
+
+ em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+
+ if (em->block_start == EXTENT_MAP_HOLE) {
+ btrfs_warn(fs_info, "swapfile must not have holes");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->block_start == EXTENT_MAP_INLINE) {
+ /*
+ * It's unlikely we'll ever actually find ourselves
+ * here, as a file small enough to fit inline won't be
+ * big enough to store more than the swap header, but in
+ * case something changes in the future, let's catch it
+ * here rather than later.
+ */
+ btrfs_warn(fs_info, "swapfile must not be inline");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+ btrfs_warn(fs_info, "swapfile must not be compressed");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ logical_block_start = em->block_start + (start - em->start);
+ len = min(len, em->len - (start - em->start));
+ free_extent_map(em);
+ em = NULL;
+
+ ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
+ if (ret < 0) {
+ goto out;
+ } else if (ret) {
+ ret = 0;
+ } else {
+ btrfs_warn(fs_info,
+ "swapfile must not be copy-on-write");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ em = btrfs_get_chunk_map(fs_info, logical_block_start, len);
+ if (IS_ERR(em)) {
+ ret = PTR_ERR(em);
+ goto out;
+ }
+
+ if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ btrfs_warn(fs_info,
+ "swapfile must have single data profile");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (device == NULL) {
+ device = em->map_lookup->stripes[0].dev;
+ ret = btrfs_add_swapfile_pin(inode, device, false);
+ if (ret == 1)
+ ret = 0;
+ else if (ret)
+ goto out;
+ } else if (device != em->map_lookup->stripes[0].dev) {
+ btrfs_warn(fs_info, "swapfile must be on one device");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ physical_block_start = (em->map_lookup->stripes[0].physical +
+ (logical_block_start - em->start));
+ len = min(len, em->len - (logical_block_start - em->start));
+ free_extent_map(em);
+ em = NULL;
+
+ bg = btrfs_lookup_block_group(fs_info, logical_block_start);
+ if (!bg) {
+ btrfs_warn(fs_info,
+ "could not find block group containing swapfile");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = btrfs_add_swapfile_pin(inode, bg, true);
+ if (ret) {
+ btrfs_put_block_group(bg);
+ if (ret == 1)
+ ret = 0;
+ else
+ goto out;
+ }
+
+ if (bsi.block_len &&
+ bsi.block_start + bsi.block_len == physical_block_start) {
+ bsi.block_len += len;
+ } else {
+ if (bsi.block_len) {
+ ret = btrfs_add_swap_extent(sis, &bsi);
+ if (ret)
+ goto out;
+ }
+ bsi.start = start;
+ bsi.block_start = physical_block_start;
+ bsi.block_len = len;
+ }
+
+ start += len;
+ }
+
+ if (bsi.block_len)
+ ret = btrfs_add_swap_extent(sis, &bsi);
+
+out:
+ if (!IS_ERR_OR_NULL(em))
+ free_extent_map(em);
+
+ unlock_extent_cached(io_tree, 0, isize - 1, &cached_state);
+
+ if (ret)
+ btrfs_swap_deactivate(file);
+
+ clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
+
+ if (ret)
+ return ret;
+
+ if (device)
+ sis->bdev = device->bdev;
+ *span = bsi.highest_ppage - bsi.lowest_ppage + 1;
+ sis->max = bsi.nr_pages;
+ sis->pages = bsi.nr_pages - 1;
+ sis->highest_bit = bsi.nr_pages - 1;
+ return bsi.nr_extents;
+}
+#else
+static void btrfs_swap_deactivate(struct file *file)
+{
+}
+
+static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
+ sector_t *span)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
static const struct inode_operations btrfs_dir_inode_operations = {
.getattr = btrfs_getattr,
.lookup = btrfs_lookup,
@@ -10523,17 +10797,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btrfs_submit_bio_hook,
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
- .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
-
- /* optional callbacks */
- .fill_delalloc = run_delalloc_range,
- .writepage_end_io_hook = btrfs_writepage_end_io_hook,
- .writepage_start_hook = btrfs_writepage_start_hook,
- .set_bit_hook = btrfs_set_bit_hook,
- .clear_bit_hook = btrfs_clear_bit_hook,
- .merge_extent_hook = btrfs_merge_extent_hook,
- .split_extent_hook = btrfs_split_extent_hook,
- .check_extent_io_range = btrfs_check_extent_io_range,
};
/*
@@ -10558,6 +10821,8 @@ static const struct address_space_operations btrfs_aops = {
.releasepage = btrfs_releasepage,
.set_page_dirty = btrfs_set_page_dirty,
.error_remove_page = generic_error_remove_page,
+ .swap_activate = btrfs_swap_activate,
+ .swap_deactivate = btrfs_swap_deactivate,
};
static const struct inode_operations btrfs_file_inode_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 802a628e9f7d..fab9443f6a42 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -290,6 +290,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
} else if (fsflags & FS_COMPR_FL) {
const char *comp;
+ if (IS_SWAPFILE(inode)) {
+ ret = -ETXTBSY;
+ goto out_unlock;
+ }
+
binode->flags |= BTRFS_INODE_COMPRESS;
binode->flags &= ~BTRFS_INODE_NOCOMPRESS;
@@ -754,6 +759,12 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
return -EINVAL;
+ if (atomic_read(&root->nr_swapfiles)) {
+ btrfs_warn(fs_info,
+ "cannot snapshot subvolume with active swapfile");
+ return -ETXTBSY;
+ }
+
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL);
if (!pending_snapshot)
return -ENOMEM;
@@ -777,7 +788,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
wait_event(root->subv_writers->wait,
percpu_counter_sum(&root->subv_writers->counter) == 0);
- ret = btrfs_start_delalloc_inodes(root);
+ ret = btrfs_start_delalloc_snapshot(root);
if (ret)
goto dec_and_free;
@@ -1505,9 +1516,13 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
}
inode_lock(inode);
- if (do_compress)
- BTRFS_I(inode)->defrag_compress = compress_type;
- ret = cluster_pages_for_defrag(inode, pages, i, cluster);
+ if (IS_SWAPFILE(inode)) {
+ ret = -ETXTBSY;
+ } else {
+ if (do_compress)
+ BTRFS_I(inode)->defrag_compress = compress_type;
+ ret = cluster_pages_for_defrag(inode, pages, i, cluster);
+ }
if (ret < 0) {
inode_unlock(inode);
goto out_ra;
@@ -3135,7 +3150,7 @@ static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
}
rcu_read_unlock();
- memcpy(&fi_args->fsid, fs_info->fsid, sizeof(fi_args->fsid));
+ memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid));
fi_args->nodesize = fs_info->nodesize;
fi_args->sectorsize = fs_info->sectorsize;
fi_args->clone_alignment = fs_info->sectorsize;
@@ -3191,92 +3206,6 @@ out:
return ret;
}
-static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
-{
- struct page *page;
-
- page = grab_cache_page(inode->i_mapping, index);
- if (!page)
- return ERR_PTR(-ENOMEM);
-
- if (!PageUptodate(page)) {
- int ret;
-
- ret = btrfs_readpage(NULL, page);
- if (ret)
- return ERR_PTR(ret);
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- put_page(page);
- return ERR_PTR(-EIO);
- }
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
- put_page(page);
- return ERR_PTR(-EAGAIN);
- }
- }
-
- return page;
-}
-
-static int gather_extent_pages(struct inode *inode, struct page **pages,
- int num_pages, u64 off)
-{
- int i;
- pgoff_t index = off >> PAGE_SHIFT;
-
- for (i = 0; i < num_pages; i++) {
-again:
- pages[i] = extent_same_get_page(inode, index + i);
- if (IS_ERR(pages[i])) {
- int err = PTR_ERR(pages[i]);
-
- if (err == -EAGAIN)
- goto again;
- pages[i] = NULL;
- return err;
- }
- }
- return 0;
-}
-
-static int lock_extent_range(struct inode *inode, u64 off, u64 len,
- bool retry_range_locking)
-{
- /*
- * Do any pending delalloc/csum calculations on inode, one way or
- * another, and lock file content.
- * The locking order is:
- *
- * 1) pages
- * 2) range in the inode's io tree
- */
- while (1) {
- struct btrfs_ordered_extent *ordered;
- lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
- ordered = btrfs_lookup_first_ordered_extent(inode,
- off + len - 1);
- if ((!ordered ||
- ordered->file_offset + ordered->len <= off ||
- ordered->file_offset >= off + len) &&
- !test_range_bit(&BTRFS_I(inode)->io_tree, off,
- off + len - 1, EXTENT_DELALLOC, 0, NULL)) {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- break;
- }
- unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- if (!retry_range_locking)
- return -EAGAIN;
- btrfs_wait_ordered_range(inode, off, len);
- }
- return 0;
-}
-
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
{
inode_unlock(inode1);
@@ -3292,261 +3221,32 @@ static void btrfs_double_inode_lock(struct inode *inode1, struct inode *inode2)
inode_lock_nested(inode2, I_MUTEX_CHILD);
}
-static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
-{
- unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
- unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
-}
-
-static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len,
- bool retry_range_locking)
-{
- int ret;
-
- if (inode1 < inode2) {
- swap(inode1, inode2);
- swap(loff1, loff2);
- }
- ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
- if (ret)
- return ret;
- ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
- if (ret)
- unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
- loff1 + len - 1);
- return ret;
-}
-
-struct cmp_pages {
- int num_pages;
- struct page **src_pages;
- struct page **dst_pages;
-};
-
-static void btrfs_cmp_data_free(struct cmp_pages *cmp)
-{
- int i;
- struct page *pg;
-
- for (i = 0; i < cmp->num_pages; i++) {
- pg = cmp->src_pages[i];
- if (pg) {
- unlock_page(pg);
- put_page(pg);
- cmp->src_pages[i] = NULL;
- }
- pg = cmp->dst_pages[i];
- if (pg) {
- unlock_page(pg);
- put_page(pg);
- cmp->dst_pages[i] = NULL;
- }
- }
-}
-
-static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
- struct inode *dst, u64 dst_loff,
- u64 len, struct cmp_pages *cmp)
-{
- int ret;
- int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT;
-
- cmp->num_pages = num_pages;
-
- ret = gather_extent_pages(src, cmp->src_pages, num_pages, loff);
- if (ret)
- goto out;
-
- ret = gather_extent_pages(dst, cmp->dst_pages, num_pages, dst_loff);
-
-out:
- if (ret)
- btrfs_cmp_data_free(cmp);
- return ret;
-}
-
-static int btrfs_cmp_data(u64 len, struct cmp_pages *cmp)
-{
- int ret = 0;
- int i;
- struct page *src_page, *dst_page;
- unsigned int cmp_len = PAGE_SIZE;
- void *addr, *dst_addr;
-
- i = 0;
- while (len) {
- if (len < PAGE_SIZE)
- cmp_len = len;
-
- BUG_ON(i >= cmp->num_pages);
-
- src_page = cmp->src_pages[i];
- dst_page = cmp->dst_pages[i];
- ASSERT(PageLocked(src_page));
- ASSERT(PageLocked(dst_page));
-
- addr = kmap_atomic(src_page);
- dst_addr = kmap_atomic(dst_page);
-
- flush_dcache_page(src_page);
- flush_dcache_page(dst_page);
-
- if (memcmp(addr, dst_addr, cmp_len))
- ret = -EBADE;
-
- kunmap_atomic(addr);
- kunmap_atomic(dst_addr);
-
- if (ret)
- break;
-
- len -= cmp_len;
- i++;
- }
-
- return ret;
-}
-
-static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
- u64 olen)
-{
- u64 len = *plen;
- u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
-
- if (off + olen > inode->i_size || off + olen < off)
- return -EINVAL;
-
- /* if we extend to eof, continue to block boundary */
- if (off + len == inode->i_size)
- *plen = len = ALIGN(inode->i_size, bs) - off;
-
- /* Check that we are block aligned - btrfs_clone() requires this */
- if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
- return -EINVAL;
-
- return 0;
-}
-
static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
- struct inode *dst, u64 dst_loff,
- struct cmp_pages *cmp)
+ struct inode *dst, u64 dst_loff)
{
+ u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
int ret;
u64 len = olen;
- bool same_inode = (src == dst);
- u64 same_lock_start = 0;
- u64 same_lock_len = 0;
-
- ret = extent_same_check_offsets(src, loff, &len, olen);
- if (ret)
- return ret;
-
- ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
- if (ret)
- return ret;
-
- if (same_inode) {
- /*
- * Single inode case wants the same checks, except we
- * don't want our length pushed out past i_size as
- * comparing that data range makes no sense.
- *
- * extent_same_check_offsets() will do this for an
- * unaligned length at i_size, so catch it here and
- * reject the request.
- *
- * This effectively means we require aligned extents
- * for the single-inode case, whereas the other cases
- * allow an unaligned length so long as it ends at
- * i_size.
- */
- if (len != olen)
- return -EINVAL;
-
- /* Check for overlapping ranges */
- if (dst_loff + len > loff && dst_loff < loff + len)
- return -EINVAL;
-
- same_lock_start = min_t(u64, loff, dst_loff);
- same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
- } else {
- /*
- * If the source and destination inodes are different, the
- * source's range end offset matches the source's i_size, that
- * i_size is not a multiple of the sector size, and the
- * destination range does not go past the destination's i_size,
- * we must round down the length to the nearest sector size
- * multiple. If we don't do this adjustment we end replacing
- * with zeroes the bytes in the range that starts at the
- * deduplication range's end offset and ends at the next sector
- * size multiple.
- */
- if (loff + olen == i_size_read(src) &&
- dst_loff + len < i_size_read(dst)) {
- const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
-
- len = round_down(i_size_read(src), sz) - loff;
- if (len == 0)
- return 0;
- olen = len;
- }
- }
-
-again:
- ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, cmp);
- if (ret)
- return ret;
- if (same_inode)
- ret = lock_extent_range(src, same_lock_start, same_lock_len,
- false);
- else
- ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
- false);
+ if (loff + len == src->i_size)
+ len = ALIGN(src->i_size, bs) - loff;
/*
- * If one of the inodes has dirty pages in the respective range or
- * ordered extents, we need to flush dellaloc and wait for all ordered
- * extents in the range. We must unlock the pages and the ranges in the
- * io trees to avoid deadlocks when flushing delalloc (requires locking
- * pages) and when waiting for ordered extents to complete (they require
- * range locking).
+ * For same inode case we don't want our length pushed out past i_size
+ * as comparing that data range makes no sense.
+ *
+ * This effectively means we require aligned extents for the single
+ * inode case, whereas the other cases allow an unaligned length so long
+ * as it ends at i_size.
*/
- if (ret == -EAGAIN) {
- /*
- * Ranges in the io trees already unlocked. Now unlock all
- * pages before waiting for all IO to complete.
- */
- btrfs_cmp_data_free(cmp);
- if (same_inode) {
- btrfs_wait_ordered_range(src, same_lock_start,
- same_lock_len);
- } else {
- btrfs_wait_ordered_range(src, loff, len);
- btrfs_wait_ordered_range(dst, dst_loff, len);
- }
- goto again;
- }
- ASSERT(ret == 0);
- if (WARN_ON(ret)) {
- /* ranges in the io trees already unlocked */
- btrfs_cmp_data_free(cmp);
- return ret;
- }
-
- /* pass original length for comparison so we stay within i_size */
- ret = btrfs_cmp_data(olen, cmp);
- if (ret == 0)
- ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
-
- if (same_inode)
- unlock_extent(&BTRFS_I(src)->io_tree, same_lock_start,
- same_lock_start + same_lock_len - 1);
- else
- btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
+ if (dst == src && len != olen)
+ return -EINVAL;
- btrfs_cmp_data_free(cmp);
+ /*
+ * Lock destination range to serialize with concurrent readpages().
+ */
+ lock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1);
+ ret = btrfs_clone(src, dst, loff, olen, len, dst_loff, 1);
+ unlock_extent(&BTRFS_I(dst)->io_tree, dst_loff, dst_loff + len - 1);
return ret;
}
@@ -3557,58 +3257,27 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
struct inode *dst, u64 dst_loff)
{
int ret;
- struct cmp_pages cmp;
int num_pages = PAGE_ALIGN(BTRFS_MAX_DEDUPE_LEN) >> PAGE_SHIFT;
- bool same_inode = (src == dst);
u64 i, tail_len, chunk_count;
- if (olen == 0)
- return 0;
-
- if (same_inode)
- inode_lock(src);
- else
- btrfs_double_inode_lock(src, dst);
-
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ (BTRFS_I(dst)->flags & BTRFS_INODE_NODATASUM))
+ return -EINVAL;
+
+ if (IS_SWAPFILE(src) || IS_SWAPFILE(dst))
+ return -ETXTBSY;
tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
if (chunk_count == 0)
num_pages = PAGE_ALIGN(tail_len) >> PAGE_SHIFT;
- /*
- * If deduping ranges in the same inode, locking rules make it
- * mandatory to always lock pages in ascending order to avoid deadlocks
- * with concurrent tasks (such as starting writeback/delalloc).
- */
- if (same_inode && dst_loff < loff)
- swap(loff, dst_loff);
-
- /*
- * We must gather up all the pages before we initiate our extent
- * locking. We use an array for the page pointers. Size of the array is
- * bounded by len, which is in turn bounded by BTRFS_MAX_DEDUPE_LEN.
- */
- cmp.src_pages = kvmalloc_array(num_pages, sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- cmp.dst_pages = kvmalloc_array(num_pages, sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!cmp.src_pages || !cmp.dst_pages) {
- ret = -ENOMEM;
- goto out_free;
- }
-
for (i = 0; i < chunk_count; i++) {
ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
- dst, dst_loff, &cmp);
+ dst, dst_loff);
if (ret)
- goto out_free;
+ return ret;
loff += BTRFS_MAX_DEDUPE_LEN;
dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3616,17 +3285,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
if (tail_len > 0)
ret = btrfs_extent_same_range(src, loff, tail_len, dst,
- dst_loff, &cmp);
-
-out_free:
- kvfree(cmp.src_pages);
- kvfree(cmp.dst_pages);
-
-out_unlock:
- if (same_inode)
- inode_unlock(src);
- else
- btrfs_double_inode_unlock(src, dst);
+ dst_loff);
return ret;
}
@@ -4213,11 +3872,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
struct inode *inode = file_inode(file);
struct inode *src = file_inode(file_src);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
u64 len = olen;
u64 bs = fs_info->sb->s_blocksize;
- int same_inode = src == inode;
/*
* TODO:
@@ -4230,101 +3887,35 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
* be either compressed or non-compressed.
*/
- if (btrfs_root_readonly(root))
- return -EROFS;
-
- if (file_src->f_path.mnt != file->f_path.mnt ||
- src->i_sb != inode->i_sb)
- return -EXDEV;
-
- if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
- return -EISDIR;
-
- if (!same_inode) {
- btrfs_double_inode_lock(src, inode);
- } else {
- inode_lock(src);
- }
-
/* don't make the dst file partly checksummed */
if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
+ return -EINVAL;
+
+ if (IS_SWAPFILE(src) || IS_SWAPFILE(inode))
+ return -ETXTBSY;
- /* determine range to clone */
- ret = -EINVAL;
- if (off + len > src->i_size || off + len < off)
- goto out_unlock;
- if (len == 0)
- olen = len = src->i_size - off;
/*
- * If we extend to eof, continue to block boundary if and only if the
- * destination end offset matches the destination file's size, otherwise
- * we would be corrupting data by placing the eof block into the middle
- * of a file.
+ * VFS's generic_remap_file_range_prep() protects us from cloning the
+ * eof block into the middle of a file, which would result in corruption
+ * if the file size is not blocksize aligned. So we don't need to check
+ * for that case here.
*/
- if (off + len == src->i_size) {
- if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
- goto out_unlock;
+ if (off + len == src->i_size)
len = ALIGN(src->i_size, bs) - off;
- }
-
- if (len == 0) {
- ret = 0;
- goto out_unlock;
- }
-
- /* verify the end result is block aligned */
- if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
- !IS_ALIGNED(destoff, bs))
- goto out_unlock;
-
- /* verify if ranges are overlapped within the same file */
- if (same_inode) {
- if (destoff + len > off && destoff < off + len)
- goto out_unlock;
- }
if (destoff > inode->i_size) {
ret = btrfs_cont_expand(inode, inode->i_size, destoff);
if (ret)
- goto out_unlock;
+ return ret;
}
/*
- * Lock the target range too. Right after we replace the file extent
- * items in the fs tree (which now point to the cloned data), we might
- * have a worker replace them with extent items relative to a write
- * operation that was issued before this clone operation (i.e. confront
- * with inode.c:btrfs_finish_ordered_io).
+ * Lock destination range to serialize with concurrent readpages().
*/
- if (same_inode) {
- u64 lock_start = min_t(u64, off, destoff);
- u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
-
- ret = lock_extent_range(src, lock_start, lock_len, true);
- } else {
- ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
- true);
- }
- ASSERT(ret == 0);
- if (WARN_ON(ret)) {
- /* ranges in the io trees already unlocked */
- goto out_unlock;
- }
-
+ lock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1);
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
-
- if (same_inode) {
- u64 lock_start = min_t(u64, off, destoff);
- u64 lock_end = max_t(u64, off, destoff) + len - 1;
-
- unlock_extent(&BTRFS_I(src)->io_tree, lock_start, lock_end);
- } else {
- btrfs_double_extent_unlock(src, off, inode, destoff, len);
- }
+ unlock_extent(&BTRFS_I(inode)->io_tree, destoff, destoff + len - 1);
/*
* Truncate page cache pages so that future reads will see the cloned
* data immediately and not the previous data.
@@ -4332,11 +3923,87 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
truncate_inode_pages_range(&inode->i_data,
round_down(destoff, PAGE_SIZE),
round_up(destoff + len, PAGE_SIZE) - 1);
-out_unlock:
+
+ return ret;
+}
+
+static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ loff_t *len, unsigned int remap_flags)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct inode *inode_out = file_inode(file_out);
+ u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
+ bool same_inode = inode_out == inode_in;
+ u64 wb_len;
+ int ret;
+
+ if (!(remap_flags & REMAP_FILE_DEDUP)) {
+ struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
+
+ if (btrfs_root_readonly(root_out))
+ return -EROFS;
+
+ if (file_in->f_path.mnt != file_out->f_path.mnt ||
+ inode_in->i_sb != inode_out->i_sb)
+ return -EXDEV;
+ }
+
+ if (same_inode)
+ inode_lock(inode_in);
+ else
+ btrfs_double_inode_lock(inode_in, inode_out);
+
+ /*
+ * Now that the inodes are locked, we need to start writeback ourselves
+ * and can not rely on the writeback from the VFS's generic helper
+ * generic_remap_file_range_prep() because:
+ *
+ * 1) For compression we must call filemap_fdatawrite_range() range
+ * twice (btrfs_fdatawrite_range() does it for us), and the generic
+ * helper only calls it once;
+ *
+ * 2) filemap_fdatawrite_range(), called by the generic helper only
+ * waits for the writeback to complete, i.e. for IO to be done, and
+ * not for the ordered extents to complete. We need to wait for them
+ * to complete so that new file extent items are in the fs tree.
+ */
+ if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
+ wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
+ else
+ wb_len = ALIGN(*len, bs);
+
+ /*
+ * Since we don't lock ranges, wait for ongoing lockless dio writes (as
+ * any in progress could create its ordered extents after we wait for
+ * existing ordered extents below).
+ */
+ inode_dio_wait(inode_in);
if (!same_inode)
- btrfs_double_inode_unlock(src, inode);
+ inode_dio_wait(inode_out);
+
+ ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
+ wb_len);
+ if (ret < 0)
+ goto out_unlock;
+ ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
+ wb_len);
+ if (ret < 0)
+ goto out_unlock;
+
+ ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
+ len, remap_flags);
+ if (ret < 0 || *len == 0)
+ goto out_unlock;
+
+ return 0;
+
+ out_unlock:
+ if (same_inode)
+ inode_unlock(inode_in);
else
- inode_unlock(src);
+ btrfs_double_inode_unlock(inode_in, inode_out);
+
return ret;
}
@@ -4344,29 +4011,29 @@ loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
struct file *dst_file, loff_t destoff, loff_t len,
unsigned int remap_flags)
{
+ struct inode *src_inode = file_inode(src_file);
+ struct inode *dst_inode = file_inode(dst_file);
+ bool same_inode = dst_inode == src_inode;
int ret;
if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
return -EINVAL;
- if (remap_flags & REMAP_FILE_DEDUP) {
- struct inode *src = file_inode(src_file);
- struct inode *dst = file_inode(dst_file);
- u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
-
- if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
- /*
- * Btrfs does not support blocksize < page_size. As a
- * result, btrfs_cmp_data() won't correctly handle
- * this situation without an update.
- */
- return -EINVAL;
- }
+ ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
+ &len, remap_flags);
+ if (ret < 0 || len == 0)
+ return ret;
- ret = btrfs_extent_same(src, off, len, dst, destoff);
- } else {
+ if (remap_flags & REMAP_FILE_DEDUP)
+ ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
+ else
ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
- }
+
+ if (same_inode)
+ inode_unlock(src_inode);
+ else
+ btrfs_double_inode_unlock(src_inode, dst_inode);
+
return ret < 0 ? ret : len;
}
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index b6a4cc178bee..90639140439f 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -27,7 +27,7 @@
* Records the total size (including the header) of compressed data.
*
* 2. Segment(s)
- * Variable size. Each segment includes one segment header, followd by data
+ * Variable size. Each segment includes one segment header, followed by data
* payload.
* One regular LZO compressed extent can have one or more segments.
* For inlined LZO compressed extent, only one segment is allowed.
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 0c4ef208b8b9..6fde2b2741ef 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -460,7 +460,6 @@ void btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
struct btrfs_root *root = btrfs_inode->root;
struct rb_node *node;
- bool dec_pending_ordered = false;
/* This is paired with btrfs_add_ordered_extent. */
spin_lock(&btrfs_inode->lock);
@@ -477,37 +476,8 @@ void btrfs_remove_ordered_extent(struct inode *inode,
if (tree->last == node)
tree->last = NULL;
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
- if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
- dec_pending_ordered = true;
spin_unlock_irq(&tree->lock);
- /*
- * The current running transaction is waiting on us, we need to let it
- * know that we're complete and wake it up.
- */
- if (dec_pending_ordered) {
- struct btrfs_transaction *trans;
-
- /*
- * The checks for trans are just a formality, it should be set,
- * but if it isn't we don't want to deref/assert under the spin
- * lock, so be nice and check if trans is set, but ASSERT() so
- * if it isn't set a developer will notice.
- */
- spin_lock(&fs_info->trans_lock);
- trans = fs_info->running_transaction;
- if (trans)
- refcount_inc(&trans->use_count);
- spin_unlock(&fs_info->trans_lock);
-
- ASSERT(trans);
- if (trans) {
- if (atomic_dec_and_test(&trans->pending_ordered))
- wake_up(&trans->pending_wait);
- btrfs_put_transaction(trans);
- }
- }
-
spin_lock(&root->ordered_extent_lock);
list_del_init(&entry->root_extent_list);
root->nr_ordered_extents--;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 02d813aaa261..fb9a161f0215 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -37,28 +37,31 @@ struct btrfs_ordered_sum {
* rbtree, just before waking any waiters. It is used to indicate the
* IO is done and any metadata is inserted into the tree.
*/
-#define BTRFS_ORDERED_IO_DONE 0 /* set when all the pages are written */
-
-#define BTRFS_ORDERED_COMPLETE 1 /* set when removed from the tree */
-
-#define BTRFS_ORDERED_NOCOW 2 /* set when we want to write in place */
-
-#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
-
-#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */
-
-#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
-
-#define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */
-
-#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
- * has done its due diligence in updating
- * the isize. */
-#define BTRFS_ORDERED_TRUNCATED 8 /* Set when we have to truncate an extent */
-
-#define BTRFS_ORDERED_PENDING 9 /* We are waiting for this ordered extent to
- * complete in the current transaction. */
-#define BTRFS_ORDERED_REGULAR 10 /* Regular IO for COW */
+enum {
+ /* set when all the pages are written */
+ BTRFS_ORDERED_IO_DONE,
+ /* set when removed from the tree */
+ BTRFS_ORDERED_COMPLETE,
+ /* set when we want to write in place */
+ BTRFS_ORDERED_NOCOW,
+ /* writing a zlib compressed extent */
+ BTRFS_ORDERED_COMPRESSED,
+ /* set when writing to preallocated extent */
+ BTRFS_ORDERED_PREALLOC,
+ /* set when we're doing DIO with this extent */
+ BTRFS_ORDERED_DIRECT,
+ /* We had an io error when writing this out */
+ BTRFS_ORDERED_IOERR,
+ /*
+ * indicates whether this ordered extent has done its due diligence in
+ * updating the isize
+ */
+ BTRFS_ORDERED_UPDATED_ISIZE,
+ /* Set when we have to truncate an extent */
+ BTRFS_ORDERED_TRUNCATED,
+ /* Regular IO for COW */
+ BTRFS_ORDERED_REGULAR,
+};
struct btrfs_ordered_extent {
/* logical offset in the file */
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 45868fd76209..4e473a998219 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -30,7 +30,7 @@
* - sync
* - copy also limits on subvol creation
* - limit
- * - caches fuer ulists
+ * - caches for ulists
* - performance benchmarks
* - check all ioctl parameters
*/
@@ -522,7 +522,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
__del_qgroup_rb(qgroup);
}
/*
- * we call btrfs_free_qgroup_config() when umounting
+ * We call btrfs_free_qgroup_config() when unmounting
* filesystem and disabling quota, so we set qgroup_ulist
* to be null here to avoid double free.
*/
@@ -1013,16 +1013,22 @@ out_add_root:
btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
- spin_lock(&fs_info->qgroup_lock);
- fs_info->quota_root = quota_root;
- set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- spin_unlock(&fs_info->qgroup_lock);
ret = btrfs_commit_transaction(trans);
trans = NULL;
if (ret)
goto out_free_path;
+ /*
+ * Set quota enabled flag after committing the transaction, to avoid
+ * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
+ * creation.
+ */
+ spin_lock(&fs_info->qgroup_lock);
+ fs_info->quota_root = quota_root;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ spin_unlock(&fs_info->qgroup_lock);
+
ret = qgroup_rescan_init(fs_info, 0, 1);
if (!ret) {
qgroup_rescan_zero_tracking(fs_info);
@@ -1122,7 +1128,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
* The easy accounting, we're updating qgroup relationship whose child qgroup
* only has exclusive extents.
*
- * In this case, all exclsuive extents will also be exlusive for parent, so
+ * In this case, all exclusive extents will also be exclusive for parent, so
* excl/rfer just get added/removed.
*
* So is qgroup reservation space, which should also be added/removed to
@@ -1749,14 +1755,14 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
*
* 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
* NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
- * They should be marked during preivous (@dst_level = 1) iteration.
+ * They should be marked during previous (@dst_level = 1) iteration.
*
* 3) Mark file extents in leaves dirty
* We don't have good way to pick out new file extents only.
* So we still follow the old method by scanning all file extents in
* the leave.
*
- * This function can free us from keeping two pathes, thus later we only need
+ * This function can free us from keeping two paths, thus later we only need
* to care about how to iterate all new tree blocks in reloc tree.
*/
static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
@@ -1895,7 +1901,7 @@ out:
*
* We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
* above tree blocks along with their counter parts in file tree.
- * While during search, old tree blocsk OO(c) will be skiped as tree block swap
+ * While during search, old tree blocks OO(c) will be skipped as tree block swap
* won't affect OO(c).
*/
static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
@@ -2020,7 +2026,7 @@ out:
* Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
* @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
* and then go down @src_eb (pointed by @src_parent and @src_slot) to find
- * the conterpart of the tree block, then mark both tree blocks as qgroup dirty,
+ * the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
* and skip all tree blocks whose generation is smaller than last_snapshot.
*
* This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),
@@ -2659,7 +2665,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
int i;
u64 *i_qgroups;
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *quota_root = fs_info->quota_root;
+ struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
u32 level_size = 0;
@@ -2669,6 +2675,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
goto out;
+ quota_root = fs_info->quota_root;
if (!quota_root) {
ret = -EINVAL;
goto out;
@@ -3103,9 +3110,6 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
mutex_unlock(&fs_info->qgroup_rescan_lock);
goto out;
}
- extent_buffer_get(scratch_leaf);
- btrfs_tree_read_lock(scratch_leaf);
- btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK);
slot = path->slots[0];
btrfs_release_path(path);
mutex_unlock(&fs_info->qgroup_rescan_lock);
@@ -3131,10 +3135,8 @@ static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
goto out;
}
out:
- if (scratch_leaf) {
- btrfs_tree_read_unlock_blocking(scratch_leaf);
+ if (scratch_leaf)
free_extent_buffer(scratch_leaf);
- }
if (done && !ret) {
ret = 1;
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index d8f78f5ab854..20c6bd5fa701 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -70,7 +70,7 @@ struct btrfs_qgroup_extent_record {
* be converted into META_PERTRANS.
*/
enum btrfs_qgroup_rsv_type {
- BTRFS_QGROUP_RSV_DATA = 0,
+ BTRFS_QGROUP_RSV_DATA,
BTRFS_QGROUP_RSV_META_PERTRANS,
BTRFS_QGROUP_RSV_META_PREALLOC,
BTRFS_QGROUP_RSV_LAST,
@@ -81,10 +81,10 @@ enum btrfs_qgroup_rsv_type {
*
* Each type should have different reservation behavior.
* E.g, data follows its io_tree flag modification, while
- * *currently* meta is just reserve-and-clear during transcation.
+ * *currently* meta is just reserve-and-clear during transaction.
*
* TODO: Add new type for reservation which can survive transaction commit.
- * Currect metadata reservation behavior is not suitable for such case.
+ * Current metadata reservation behavior is not suitable for such case.
*/
struct btrfs_qgroup_rsv {
u64 values[BTRFS_QGROUP_RSV_LAST];
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index df41d7049936..e74455eb42f9 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1980,7 +1980,7 @@ cleanup_io:
* - In case of single failure, where rbio->failb == -1:
*
* Cache this rbio iff the above read reconstruction is
- * excuted without problems.
+ * executed without problems.
*/
if (err == BLK_STS_OK && rbio->failb < 0)
cache_rbio_pages(rbio);
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index dec14b739b10..10d9589001a9 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -376,26 +376,28 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
goto error;
}
+ /* Insert extent in reada tree + all per-device trees, all or nothing */
+ down_read(&fs_info->dev_replace.rwsem);
ret = radix_tree_preload(GFP_KERNEL);
- if (ret)
+ if (ret) {
+ up_read(&fs_info->dev_replace.rwsem);
goto error;
+ }
- /* insert extent in reada_tree + all per-device trees, all or nothing */
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
spin_lock(&fs_info->reada_lock);
ret = radix_tree_insert(&fs_info->reada_tree, index, re);
if (ret == -EEXIST) {
re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
re_exist->refcnt++;
spin_unlock(&fs_info->reada_lock);
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
radix_tree_preload_end();
+ up_read(&fs_info->dev_replace.rwsem);
goto error;
}
if (ret) {
spin_unlock(&fs_info->reada_lock);
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
radix_tree_preload_end();
+ up_read(&fs_info->dev_replace.rwsem);
goto error;
}
radix_tree_preload_end();
@@ -437,13 +439,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
}
radix_tree_delete(&fs_info->reada_tree, index);
spin_unlock(&fs_info->reada_lock);
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ up_read(&fs_info->dev_replace.rwsem);
goto error;
}
have_zone = 1;
}
spin_unlock(&fs_info->reada_lock);
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ up_read(&fs_info->dev_replace.rwsem);
if (!have_zone)
goto error;
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index d69fbfb30aa9..c3557c12656b 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -43,7 +43,7 @@ struct ref_entry {
* back to the delayed ref action. We hold the ref we are changing in the
* action so we can account for the history properly, and we record the root we
* were called with since it could be different from ref_root. We also store
- * stack traces because thats how I roll.
+ * stack traces because that's how I roll.
*/
struct ref_action {
int action;
@@ -56,7 +56,7 @@ struct ref_action {
/*
* One of these for every block we reference, it holds the roots and references
- * to it as well as all of the ref actions that have occured to it. We never
+ * to it as well as all of the ref actions that have occurred to it. We never
* free it until we unmount the file system in order to make sure re-allocations
* are happening properly.
*/
@@ -859,7 +859,7 @@ int btrfs_ref_tree_mod(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
* This shouldn't happen because we will add our re
* above when we lookup the be with !parent, but just in
* case catch this case so we don't panic because I
- * didn't thik of some other corner case.
+ * didn't think of some other corner case.
*/
btrfs_err(fs_info, "failed to find root %llu for %llu",
root->root_key.objectid, be->bytenr);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 924116f654a1..272b287f8cf0 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2631,7 +2631,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
* only one thread can access block_rsv at this point,
* so we don't need hold lock to protect block_rsv.
* we expand more reservation size here to allow enough
- * space for relocation and we will return eailer in
+ * space for relocation and we will return earlier in
* enospc case.
*/
rc->block_rsv->size = tmp + fs_info->nodesize *
@@ -3959,6 +3959,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans);
+ trans = NULL;
continue;
}
@@ -4184,37 +4185,13 @@ static struct reloc_control *alloc_reloc_control(void)
static void describe_relocation(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group)
{
- char buf[128]; /* prefixed by a '|' that'll be dropped */
- u64 flags = block_group->flags;
+ char buf[128] = {'\0'};
- /* Shouldn't happen */
- if (!flags) {
- strcpy(buf, "|NONE");
- } else {
- char *bp = buf;
-
-#define DESCRIBE_FLAG(f, d) \
- if (flags & BTRFS_BLOCK_GROUP_##f) { \
- bp += snprintf(bp, buf - bp + sizeof(buf), "|%s", d); \
- flags &= ~BTRFS_BLOCK_GROUP_##f; \
- }
- DESCRIBE_FLAG(DATA, "data");
- DESCRIBE_FLAG(SYSTEM, "system");
- DESCRIBE_FLAG(METADATA, "metadata");
- DESCRIBE_FLAG(RAID0, "raid0");
- DESCRIBE_FLAG(RAID1, "raid1");
- DESCRIBE_FLAG(DUP, "dup");
- DESCRIBE_FLAG(RAID10, "raid10");
- DESCRIBE_FLAG(RAID5, "raid5");
- DESCRIBE_FLAG(RAID6, "raid6");
- if (flags)
- snprintf(bp, buf - bp + sizeof(buf), "|0x%llx", flags);
-#undef DESCRIBE_FLAG
- }
+ btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
btrfs_info(fs_info,
"relocating block group %llu flags %s",
- block_group->key.objectid, buf + 1);
+ block_group->key.objectid, buf);
}
/*
@@ -4222,6 +4199,7 @@ static void describe_relocation(struct btrfs_fs_info *fs_info,
*/
int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
{
+ struct btrfs_block_group_cache *bg;
struct btrfs_root *extent_root = fs_info->extent_root;
struct reloc_control *rc;
struct inode *inode;
@@ -4230,14 +4208,23 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
int rw = 0;
int err = 0;
+ bg = btrfs_lookup_block_group(fs_info, group_start);
+ if (!bg)
+ return -ENOENT;
+
+ if (btrfs_pinned_by_swapfile(fs_info, bg)) {
+ btrfs_put_block_group(bg);
+ return -ETXTBSY;
+ }
+
rc = alloc_reloc_control();
- if (!rc)
+ if (!rc) {
+ btrfs_put_block_group(bg);
return -ENOMEM;
+ }
rc->extent_root = extent_root;
-
- rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
- BUG_ON(!rc->block_group);
+ rc->block_group = bg;
ret = btrfs_inc_block_group_ro(rc->block_group);
if (ret) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 902819d3cf41..6dcd36d7b849 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -339,7 +339,9 @@ static struct full_stripe_lock *insert_full_stripe_lock(
}
}
- /* Insert new lock */
+ /*
+ * Insert new lock.
+ */
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
@@ -568,12 +570,11 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
scrub_free_ctx(sctx);
}
-static noinline_for_stack
-struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
+static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
+ struct btrfs_fs_info *fs_info, int is_dev_replace)
{
struct scrub_ctx *sctx;
int i;
- struct btrfs_fs_info *fs_info = dev->fs_info;
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx)
@@ -582,7 +583,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx->curr = -1;
- sctx->fs_info = dev->fs_info;
+ sctx->fs_info = fs_info;
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
struct scrub_bio *sbio;
@@ -832,6 +833,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
int page_num;
int success;
bool full_stripe_locked;
+ unsigned int nofs_flag;
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
@@ -857,6 +859,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
dev = sblock_to_check->pagev[0]->dev;
/*
+ * We must use GFP_NOFS because the scrub task might be waiting for a
+ * worker task executing this function and in turn a transaction commit
+ * might be waiting the scrub task to pause (which needs to wait for all
+ * the worker tasks to complete before pausing).
+ * We do allocations in the workers through insert_full_stripe_lock()
+ * and scrub_add_page_to_wr_bio(), which happens down the call chain of
+ * this function.
+ */
+ nofs_flag = memalloc_nofs_save();
+ /*
* For RAID5/6, race can happen for a different device scrub thread.
* For data corruption, Parity and Data threads will both try
* to recovery the data.
@@ -865,6 +877,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
*/
ret = lock_full_stripe(fs_info, logical, &full_stripe_locked);
if (ret < 0) {
+ memalloc_nofs_restore(nofs_flag);
spin_lock(&sctx->stat_lock);
if (ret == -ENOMEM)
sctx->stat.malloc_errors++;
@@ -904,7 +917,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
*/
sblocks_for_recheck = kcalloc(BTRFS_MAX_MIRRORS,
- sizeof(*sblocks_for_recheck), GFP_NOFS);
+ sizeof(*sblocks_for_recheck), GFP_KERNEL);
if (!sblocks_for_recheck) {
spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++;
@@ -1202,6 +1215,7 @@ out:
}
ret = unlock_full_stripe(fs_info, logical, full_stripe_locked);
+ memalloc_nofs_restore(nofs_flag);
if (ret < 0)
return ret;
return 0;
@@ -3540,7 +3554,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (!ret && sctx->is_dev_replace) {
/*
* If we are doing a device replace wait for any tasks
- * that started dellaloc right before we set the block
+ * that started delalloc right before we set the block
* group to RO mode, as they might have just allocated
* an extent from it or decided they could do a nocow
* write. And if any such tasks did that, wait for their
@@ -3596,11 +3610,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
break;
}
- btrfs_dev_replace_write_lock(&fs_info->dev_replace);
+ down_write(&fs_info->dev_replace.rwsem);
dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1;
- btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
+ up_write(&dev_replace->rwsem);
+
ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
found_key.offset, cache);
@@ -3636,10 +3651,10 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
scrub_pause_off(fs_info);
- btrfs_dev_replace_write_lock(&fs_info->dev_replace);
+ down_write(&fs_info->dev_replace.rwsem);
dev_replace->cursor_left = dev_replace->cursor_right;
dev_replace->item_needs_writeback = 1;
- btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
+ up_write(&fs_info->dev_replace.rwsem);
if (ro_set)
btrfs_dec_block_group_ro(cache);
@@ -3772,6 +3787,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
struct scrub_ctx *sctx;
int ret;
struct btrfs_device *dev;
+ unsigned int nofs_flag;
if (btrfs_fs_closing(fs_info))
return -EINVAL;
@@ -3813,13 +3829,18 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return -EINVAL;
}
+ /* Allocate outside of device_list_mutex */
+ sctx = scrub_setup_ctx(fs_info, is_dev_replace);
+ if (IS_ERR(sctx))
+ return PTR_ERR(sctx);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_free_ctx;
}
if (!is_dev_replace && !readonly &&
@@ -3827,7 +3848,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
rcu_str_deref(dev->name));
- return -EROFS;
+ ret = -EROFS;
+ goto out_free_ctx;
}
mutex_lock(&fs_info->scrub_lock);
@@ -3835,34 +3857,29 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- return -EIO;
+ ret = -EIO;
+ goto out_free_ctx;
}
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
+ down_read(&fs_info->dev_replace.rwsem);
if (dev->scrub_ctx ||
(!is_dev_replace &&
btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ up_read(&fs_info->dev_replace.rwsem);
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- return -EINPROGRESS;
+ ret = -EINPROGRESS;
+ goto out_free_ctx;
}
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ up_read(&fs_info->dev_replace.rwsem);
ret = scrub_workers_get(fs_info, is_dev_replace);
if (ret) {
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- return ret;
+ goto out_free_ctx;
}
- sctx = scrub_setup_ctx(dev, is_dev_replace);
- if (IS_ERR(sctx)) {
- mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- scrub_workers_put(fs_info);
- return PTR_ERR(sctx);
- }
sctx->readonly = readonly;
dev->scrub_ctx = sctx;
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -3875,6 +3892,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
atomic_inc(&fs_info->scrubs_running);
mutex_unlock(&fs_info->scrub_lock);
+ /*
+ * In order to avoid deadlock with reclaim when there is a transaction
+ * trying to pause scrub, make sure we use GFP_NOFS for all the
+ * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
+ * invoked by our callees. The pausing request is done when the
+ * transaction commit starts, and it blocks the transaction until scrub
+ * is paused (done at specific points at scrub_stripe() or right above
+ * before incrementing fs_info->scrubs_running).
+ */
+ nofs_flag = memalloc_nofs_save();
if (!is_dev_replace) {
/*
* by holding device list mutex, we can
@@ -3887,6 +3914,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (!ret)
ret = scrub_enumerate_chunks(sctx, dev, start, end);
+ memalloc_nofs_restore(nofs_flag);
wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running);
@@ -3905,6 +3933,11 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
scrub_put_ctx(sctx);
return ret;
+
+out_free_ctx:
+ scrub_free_ctx(sctx);
+
+ return ret;
}
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 094cc1444a90..1b15b43905f8 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -2238,7 +2238,7 @@ out:
* inodes "orphan" name instead of the real name and stop. Same with new inodes
* that were not created yet and overwritten inodes/refs.
*
- * When do we have have orphan inodes:
+ * When do we have orphan inodes:
* 1. When an inode is freshly created and thus no valid refs are available yet
* 2. When a directory lost all it's refs (deleted) but still has dir items
* inside which were not processed yet (pending for move/delete). If anyone
@@ -3340,7 +3340,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
kfree(m);
}
-static void tail_append_pending_moves(struct pending_dir_move *moves,
+static void tail_append_pending_moves(struct send_ctx *sctx,
+ struct pending_dir_move *moves,
struct list_head *stack)
{
if (list_empty(&moves->list)) {
@@ -3351,6 +3352,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
list_add_tail(&moves->list, stack);
list_splice_tail(&list, stack);
}
+ if (!RB_EMPTY_NODE(&moves->node)) {
+ rb_erase(&moves->node, &sctx->pending_dir_moves);
+ RB_CLEAR_NODE(&moves->node);
+ }
}
static int apply_children_dir_moves(struct send_ctx *sctx)
@@ -3365,7 +3370,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
return 0;
INIT_LIST_HEAD(&stack);
- tail_append_pending_moves(pm, &stack);
+ tail_append_pending_moves(sctx, pm, &stack);
while (!list_empty(&stack)) {
pm = list_first_entry(&stack, struct pending_dir_move, list);
@@ -3376,7 +3381,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
goto out;
pm = get_pending_dir_moves(sctx, parent_ino);
if (pm)
- tail_append_pending_moves(pm, &stack);
+ tail_append_pending_moves(sctx, pm, &stack);
}
return 0;
@@ -3849,7 +3854,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
/*
* We may have refs where the parent directory does not exist
* yet. This happens if the parent directories inum is higher
- * the the current inum. To handle this case, we create the
+ * than the current inum. To handle this case, we create the
* parent directory out of order. But we need to check if this
* did already happen before due to other refs in the same dir.
*/
@@ -4770,7 +4775,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
struct btrfs_key key;
pgoff_t index = offset >> PAGE_SHIFT;
pgoff_t last_index;
- unsigned pg_offset = offset & ~PAGE_MASK;
+ unsigned pg_offset = offset_in_page(offset);
ssize_t ret = 0;
key.objectid = sctx->cur_ino;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index cbc9d0d2c12d..368a5b9e6c13 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -93,7 +93,7 @@ const char *btrfs_decode_error(int errno)
/*
* __btrfs_handle_fs_error decodes expected errors from the caller and
- * invokes the approciate error response.
+ * invokes the appropriate error response.
*/
__cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
@@ -151,7 +151,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* although there is no way to update the progress. It would add the
* risk of a deadlock, therefore the canceling is omitted. The only
* penalty is that some I/O remains active until the procedure
- * completes. The next time when the filesystem is mounted writeable
+ * completes. The next time when the filesystem is mounted writable
* again, the device replace operation continues.
*/
}
@@ -1848,7 +1848,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (!btrfs_check_rw_degradable(fs_info, NULL)) {
btrfs_warn(fs_info,
- "too many missing devices, writeable remount is not allowed");
+ "too many missing devices, writable remount is not allowed");
ret = -EACCES;
goto restore;
}
@@ -2090,7 +2090,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 total_free_data = 0;
u64 total_free_meta = 0;
int bits = dentry->d_sb->s_blocksize_bits;
- __be32 *fsid = (__be32 *)fs_info->fsid;
+ __be32 *fsid = (__be32 *)fs_info->fs_devices->fsid;
unsigned factor = 1;
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
int ret;
@@ -2237,6 +2237,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
vol = memdup_user((void __user *)arg, sizeof(*vol));
if (IS_ERR(vol))
return PTR_ERR(vol);
+ vol->name[BTRFS_PATH_NAME_MAX] = '\0';
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
@@ -2311,7 +2312,7 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
* device_list_mutex here as we only read the device data and the list
* is protected by RCU. Even if a device is deleted during the list
* traversals, we'll get valid data, the freeing callback will wait at
- * least until until the rcu_read_unlock.
+ * least until the rcu_read_unlock.
*/
rcu_read_lock();
cur_devices = fs_info->fs_devices;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 3717c864ba23..5a5930e3d32b 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -191,6 +191,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
+BTRFS_FEAT_ATTR_INCOMPAT(metadata_uuid, METADATA_UUID);
BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
static struct attribute *btrfs_supported_feature_attrs[] = {
@@ -204,6 +205,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(raid56),
BTRFS_FEAT_ATTR_PTR(skinny_metadata),
BTRFS_FEAT_ATTR_PTR(no_holes),
+ BTRFS_FEAT_ATTR_PTR(metadata_uuid),
BTRFS_FEAT_ATTR_PTR(free_space_tree),
NULL
};
@@ -505,12 +507,24 @@ static ssize_t quota_override_store(struct kobject *kobj,
BTRFS_ATTR_RW(, quota_override, quota_override_show, quota_override_store);
+static ssize_t btrfs_metadata_uuid_show(struct kobject *kobj,
+ struct kobj_attribute *a, char *buf)
+{
+ struct btrfs_fs_info *fs_info = to_fs_info(kobj);
+
+ return snprintf(buf, PAGE_SIZE, "%pU\n",
+ fs_info->fs_devices->metadata_uuid);
+}
+
+BTRFS_ATTR(, metadata_uuid, btrfs_metadata_uuid_show);
+
static const struct attribute *btrfs_attrs[] = {
BTRFS_ATTR_PTR(, label),
BTRFS_ATTR_PTR(, nodesize),
BTRFS_ATTR_PTR(, sectorsize),
BTRFS_ATTR_PTR(, clone_alignment),
BTRFS_ATTR_PTR(, quota_override),
+ BTRFS_ATTR_PTR(, metadata_uuid),
NULL,
};
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index c6ee600aff89..40716b357c1d 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -9,7 +9,7 @@
extern u64 btrfs_debugfs_test;
enum btrfs_feature_set {
- FEAT_COMPAT = 0,
+ FEAT_COMPAT,
FEAT_COMPAT_RO,
FEAT_INCOMPAT,
FEAT_MAX
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index db72b3b6209e..8a59597f1883 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -174,8 +174,10 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
/* Will be freed by btrfs_free_fs_roots */
if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
return;
- if (root->node)
+ if (root->node) {
+ /* One for allocate_extent_buffer */
free_extent_buffer(root->node);
+ }
kfree(root);
}
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 9e0f4a01be14..3c46d7f23456 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -62,10 +62,11 @@ static int test_find_delalloc(u32 sectorsize)
struct page *page;
struct page *locked_page = NULL;
unsigned long index = 0;
- u64 total_dirty = SZ_256M;
- u64 max_bytes = SZ_128M;
+ /* In this test we need at least 2 file extents at its maximum size */
+ u64 max_bytes = BTRFS_MAX_EXTENT_SIZE;
+ u64 total_dirty = 2 * max_bytes;
u64 start, end, test_start;
- u64 found;
+ bool found;
int ret = -EINVAL;
test_msg("running find delalloc tests");
@@ -76,7 +77,7 @@ static int test_find_delalloc(u32 sectorsize)
return -ENOMEM;
}
- extent_io_tree_init(&tmp, inode);
+ extent_io_tree_init(&tmp, NULL);
/*
* First go through and create and mark all of our pages dirty, we pin
@@ -106,8 +107,8 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
start = 0;
end = 0;
- found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
- &end, max_bytes);
+ found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ &end);
if (!found) {
test_err("should have found at least one delalloc");
goto out_bits;
@@ -137,8 +138,8 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
start = test_start;
end = 0;
- found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
- &end, max_bytes);
+ found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ &end);
if (!found) {
test_err("couldn't find delalloc in our range");
goto out_bits;
@@ -171,8 +172,8 @@ static int test_find_delalloc(u32 sectorsize)
}
start = test_start;
end = 0;
- found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
- &end, max_bytes);
+ found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ &end);
if (found) {
test_err("found range when we shouldn't have");
goto out_bits;
@@ -192,8 +193,8 @@ static int test_find_delalloc(u32 sectorsize)
set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
start = test_start;
end = 0;
- found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
- &end, max_bytes);
+ found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ &end);
if (!found) {
test_err("didn't find our range");
goto out_bits;
@@ -233,8 +234,8 @@ static int test_find_delalloc(u32 sectorsize)
* this changes at any point in the future we will need to fix this
* tests expected behavior.
*/
- found = btrfs_find_lock_delalloc_range(inode, &tmp, locked_page, &start,
- &end, max_bytes);
+ found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
+ &end);
if (!found) {
test_err("didn't find our range");
goto out_bits;
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 64043f028820..af0c8e30d9e2 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -254,11 +254,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
goto out;
}
- /*
- * We will just free a dummy node if it's ref count is 2 so we need an
- * extra ref so our searches don't accidentally release our page.
- */
- extent_buffer_get(root->node);
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
ret = -EINVAL;
@@ -860,7 +855,6 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
goto out;
}
- extent_buffer_get(root->node);
btrfs_set_header_nritems(root->node, 0);
btrfs_set_header_level(root->node, 0);
BTRFS_I(inode)->root = root;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d1eeef9ec5da..127fa1535f58 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -233,14 +233,12 @@ loop:
extwriter_counter_init(cur_trans, type);
init_waitqueue_head(&cur_trans->writer_wait);
init_waitqueue_head(&cur_trans->commit_wait);
- init_waitqueue_head(&cur_trans->pending_wait);
cur_trans->state = TRANS_STATE_RUNNING;
/*
* One for this trans handle, one so it will live on until we
* commit the transaction.
*/
refcount_set(&cur_trans->use_count, 2);
- atomic_set(&cur_trans->pending_ordered, 0);
cur_trans->flags = 0;
cur_trans->start_time = ktime_get_seconds();
@@ -456,7 +454,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
bool enforce_qgroups)
{
struct btrfs_fs_info *fs_info = root->fs_info;
-
+ struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
struct btrfs_trans_handle *h;
struct btrfs_transaction *cur_trans;
u64 num_bytes = 0;
@@ -485,13 +483,28 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* the appropriate flushing if need be.
*/
if (num_items && root != fs_info->chunk_root) {
+ struct btrfs_block_rsv *rsv = &fs_info->trans_block_rsv;
+ u64 delayed_refs_bytes = 0;
+
qgroup_reserved = num_items * fs_info->nodesize;
ret = btrfs_qgroup_reserve_meta_pertrans(root, qgroup_reserved,
enforce_qgroups);
if (ret)
return ERR_PTR(ret);
+ /*
+ * We want to reserve all the bytes we may need all at once, so
+ * we only do 1 enospc flushing cycle per transaction start. We
+ * accomplish this by simply assuming we'll do 2 x num_items
+ * worth of delayed refs updates in this trans handle, and
+ * refill that amount for whatever is missing in the reserve.
+ */
num_bytes = btrfs_calc_trans_metadata_size(fs_info, num_items);
+ if (delayed_refs_rsv->full == 0) {
+ delayed_refs_bytes = num_bytes;
+ num_bytes <<= 1;
+ }
+
/*
* Do the reservation for the relocation root creation
*/
@@ -500,8 +513,24 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
reloc_reserved = true;
}
- ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
- num_bytes, flush);
+ ret = btrfs_block_rsv_add(root, rsv, num_bytes, flush);
+ if (ret)
+ goto reserve_fail;
+ if (delayed_refs_bytes) {
+ btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
+ delayed_refs_bytes);
+ num_bytes -= delayed_refs_bytes;
+ }
+ } else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
+ !delayed_refs_rsv->full) {
+ /*
+ * Some people call with btrfs_start_transaction(root, 0)
+ * because they can be throttled, but have some other mechanism
+ * for reserving space. We still want these guys to refill the
+ * delayed block_rsv so just add 1 items worth of reservation
+ * here.
+ */
+ ret = btrfs_delayed_refs_rsv_refill(fs_info, flush);
if (ret)
goto reserve_fail;
}
@@ -670,7 +699,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
/*
* btrfs_attach_transaction_barrier() - catch the running transaction
*
- * It is similar to the above function, the differentia is this one
+ * It is similar to the above function, the difference is this one
* will wait for all the inactive transactions until they fully
* complete.
*/
@@ -760,7 +789,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- if (btrfs_check_space_for_delayed_refs(trans))
+ if (btrfs_check_space_for_delayed_refs(fs_info))
return 1;
return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
@@ -769,22 +798,12 @@ static int should_end_transaction(struct btrfs_trans_handle *trans)
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_transaction *cur_trans = trans->transaction;
- int updates;
- int err;
smp_mb();
if (cur_trans->state >= TRANS_STATE_BLOCKED ||
cur_trans->delayed_refs.flushing)
return 1;
- updates = trans->delayed_ref_updates;
- trans->delayed_ref_updates = 0;
- if (updates) {
- err = btrfs_run_delayed_refs(trans, updates * 2);
- if (err) /* Error code will also eval true */
- return err;
- }
-
return should_end_transaction(trans);
}
@@ -814,11 +833,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- u64 transid = trans->transid;
- unsigned long cur = trans->delayed_ref_updates;
int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
- int must_run_delayed_refs = 0;
if (refcount_read(&trans->use_count) > 1) {
refcount_dec(&trans->use_count);
@@ -832,27 +848,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans);
- trans->delayed_ref_updates = 0;
- if (!trans->sync) {
- must_run_delayed_refs =
- btrfs_should_throttle_delayed_refs(trans);
- cur = max_t(unsigned long, cur, 32);
-
- /*
- * don't make the caller wait if they are from a NOLOCK
- * or ATTACH transaction, it will deadlock with commit
- */
- if (must_run_delayed_refs == 1 &&
- (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
- must_run_delayed_refs = 2;
- }
-
- btrfs_trans_release_metadata(trans);
- trans->block_rsv = NULL;
-
- if (!list_empty(&trans->new_bgs))
- btrfs_create_pending_block_groups(trans);
-
btrfs_trans_release_chunk_metadata(trans);
if (lock && should_end_transaction(trans) &&
@@ -894,10 +889,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
kmem_cache_free(btrfs_trans_handle_cachep, trans);
- if (must_run_delayed_refs) {
- btrfs_async_run_delayed_refs(info, cur, transid,
- must_run_delayed_refs == 1);
- }
return err;
}
@@ -1338,7 +1329,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
return 0;
/*
- * Ensure dirty @src will be commited. Or, after comming
+ * Ensure dirty @src will be committed. Or, after coming
* commit_fs_roots() and switch_commit_roots(), any dirty but not
* recorded root will never be updated again, causing an outdated root
* item.
@@ -1842,7 +1833,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
- DEFINE_WAIT(wait);
WARN_ON(refcount_read(&trans->use_count) > 1);
@@ -1911,13 +1901,6 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
-static inline void
-btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans)
-{
- wait_event(cur_trans->pending_wait,
- atomic_read(&cur_trans->pending_ordered) == 0);
-}
-
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -2052,8 +2035,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
btrfs_wait_delalloc_flush(fs_info);
- btrfs_wait_pending_ordered(cur_trans);
-
btrfs_scrub_pause(fs_info);
/*
* Ok now we need to make sure to block out any other joins while we
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 4cbb1b55387d..f1ba78949d1b 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -12,13 +12,13 @@
#include "ctree.h"
enum btrfs_trans_state {
- TRANS_STATE_RUNNING = 0,
- TRANS_STATE_BLOCKED = 1,
- TRANS_STATE_COMMIT_START = 2,
- TRANS_STATE_COMMIT_DOING = 3,
- TRANS_STATE_UNBLOCKED = 4,
- TRANS_STATE_COMPLETED = 5,
- TRANS_STATE_MAX = 6,
+ TRANS_STATE_RUNNING,
+ TRANS_STATE_BLOCKED,
+ TRANS_STATE_COMMIT_START,
+ TRANS_STATE_COMMIT_DOING,
+ TRANS_STATE_UNBLOCKED,
+ TRANS_STATE_COMPLETED,
+ TRANS_STATE_MAX,
};
#define BTRFS_TRANS_HAVE_FREE_BGS 0
@@ -39,7 +39,6 @@ struct btrfs_transaction {
*/
atomic_t num_writers;
refcount_t use_count;
- atomic_t pending_ordered;
unsigned long flags;
@@ -51,7 +50,6 @@ struct btrfs_transaction {
time64_t start_time;
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
- wait_queue_head_t pending_wait;
struct list_head pending_snapshots;
struct list_head pending_chunks;
struct list_head switch_commits;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index efcf89a8ba44..a62e1e837a89 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -27,10 +27,10 @@
*
* @type: leaf or node
* @identifier: the necessary info to locate the leaf/node.
- * It's recommened to decode key.objecitd/offset if it's
+ * It's recommended to decode key.objecitd/offset if it's
* meaningful.
* @reason: describe the error
- * @bad_value: optional, it's recommened to output bad value and its
+ * @bad_value: optional, it's recommended to output bad value and its
* expected value (range).
*
* Since comma is used to separate the components, only space is allowed
@@ -130,7 +130,7 @@ static int check_extent_data_item(struct btrfs_fs_info *fs_info,
}
/*
- * Support for new compression/encrption must introduce incompat flag,
+ * Support for new compression/encryption must introduce incompat flag,
* and must be caught in open_ctree().
*/
if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
@@ -389,13 +389,11 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
/*
* Here we don't really care about alignment since extent allocator can
- * handle it. We care more about the size, as if one block group is
- * larger than maximum size, it's must be some obvious corruption.
+ * handle it. We care more about the size.
*/
- if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+ if (key->offset == 0) {
block_group_err(fs_info, leaf, slot,
- "invalid block group size, have %llu expect (0, %llu]",
- key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+ "invalid block group size 0");
return -EUCLEAN;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a5ce99a6c936..ac232b3d6d7e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1144,7 +1144,7 @@ next:
}
btrfs_release_path(path);
- /* look for a conflicing name */
+ /* look for a conflicting name */
di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
name, namelen, 0);
if (di && !IS_ERR(di)) {
@@ -3149,7 +3149,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex);
/*
- * nobody else is going to jump in and write the the ctree
+ * Nobody else is going to jump in and write the ctree
* super here because the log_commit atomic below is protecting
* us. We must be called with a transaction handle pinning
* the running transaction open, so a full commit can't hop
@@ -3201,8 +3201,6 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *log)
{
int ret;
- u64 start;
- u64 end;
struct walk_control wc = {
.free = 1,
.process_func = process_one_buffer
@@ -3216,18 +3214,8 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
btrfs_handle_fs_error(log->fs_info, ret, NULL);
}
- while (1) {
- ret = find_first_extent_bit(&log->dirty_log_pages,
- 0, &start, &end,
- EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
- NULL);
- if (ret)
- break;
-
- clear_extent_bits(&log->dirty_log_pages, start, end,
- EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
- }
-
+ clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
+ EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
free_extent_buffer(log->node);
kfree(log);
}
@@ -4383,7 +4371,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct extent_map *em, *n;
struct list_head extents;
struct extent_map_tree *tree = &inode->extent_tree;
- u64 logged_start, logged_end;
u64 test_gen;
int ret = 0;
int num = 0;
@@ -4392,8 +4379,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
write_lock(&tree->lock);
test_gen = root->fs_info->last_trans_committed;
- logged_start = start;
- logged_end = end;
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
/*
@@ -4434,11 +4419,6 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
em->start >= i_size_read(&inode->vfs_inode))
continue;
- if (em->start < logged_start)
- logged_start = em->start;
- if ((em->start + em->len - 1) > logged_end)
- logged_end = em->start + em->len - 1;
-
/* Need a ref to keep it from getting evicted from cache */
refcount_inc(&em->refs);
set_bit(EXTENT_FLAG_LOGGING, &em->flags);
@@ -5778,6 +5758,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_trans;
}
+ /*
+ * If a new hard link was added to the inode in the current transaction
+ * and its link count is now greater than 1, we need to fallback to a
+ * transaction commit, otherwise we can end up not logging all its new
+ * parents for all the hard links. Here just from the dentry used to
+ * fsync, we can not visit the ancestor inodes for all the other hard
+ * links to figure out if any is new, so we fallback to a transaction
+ * commit (instead of adding a lot of complexity of scanning a btree,
+ * since this scenario is not a common use case).
+ */
+ if (inode->vfs_inode.i_nlink > 1 &&
+ inode->last_link_trans > last_committed) {
+ ret = -EMLINK;
+ goto end_trans;
+ }
+
while (1) {
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
break;
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h
index 767765031e59..0fab84a8f670 100644
--- a/fs/btrfs/tree-log.h
+++ b/fs/btrfs/tree-log.h
@@ -15,7 +15,6 @@
struct btrfs_log_ctx {
int log_ret;
int log_transid;
- int io_err;
bool log_new_dentries;
struct inode *inode;
struct list_head list;
@@ -26,7 +25,6 @@ static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
{
ctx->log_ret = 0;
ctx->log_transid = 0;
- ctx->io_err = 0;
ctx->log_new_dentries = false;
ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f435d397019e..2576b1a379c9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -37,6 +37,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.tolerated_failures = 1,
.devs_increment = 2,
.ncopies = 2,
+ .nparity = 0,
.raid_name = "raid10",
.bg_flag = BTRFS_BLOCK_GROUP_RAID10,
.mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
@@ -49,6 +50,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.tolerated_failures = 1,
.devs_increment = 2,
.ncopies = 2,
+ .nparity = 0,
.raid_name = "raid1",
.bg_flag = BTRFS_BLOCK_GROUP_RAID1,
.mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
@@ -61,6 +63,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.tolerated_failures = 0,
.devs_increment = 1,
.ncopies = 2,
+ .nparity = 0,
.raid_name = "dup",
.bg_flag = BTRFS_BLOCK_GROUP_DUP,
.mindev_error = 0,
@@ -73,6 +76,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.tolerated_failures = 0,
.devs_increment = 1,
.ncopies = 1,
+ .nparity = 0,
.raid_name = "raid0",
.bg_flag = BTRFS_BLOCK_GROUP_RAID0,
.mindev_error = 0,
@@ -85,6 +89,7 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.tolerated_failures = 0,
.devs_increment = 1,
.ncopies = 1,
+ .nparity = 0,
.raid_name = "single",
.bg_flag = 0,
.mindev_error = 0,
@@ -96,7 +101,8 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.devs_min = 2,
.tolerated_failures = 1,
.devs_increment = 1,
- .ncopies = 2,
+ .ncopies = 1,
+ .nparity = 1,
.raid_name = "raid5",
.bg_flag = BTRFS_BLOCK_GROUP_RAID5,
.mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
@@ -108,7 +114,8 @@ const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
.devs_min = 3,
.tolerated_failures = 2,
.devs_increment = 1,
- .ncopies = 3,
+ .ncopies = 1,
+ .nparity = 2,
.raid_name = "raid6",
.bg_flag = BTRFS_BLOCK_GROUP_RAID6,
.mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
@@ -123,6 +130,60 @@ const char *get_raid_name(enum btrfs_raid_types type)
return btrfs_raid_array[type].raid_name;
}
+/*
+ * Fill @buf with textual description of @bg_flags, no more than @size_buf
+ * bytes including terminating null byte.
+ */
+void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
+{
+ int i;
+ int ret;
+ char *bp = buf;
+ u64 flags = bg_flags;
+ u32 size_bp = size_buf;
+
+ if (!flags) {
+ strcpy(bp, "NONE");
+ return;
+ }
+
+#define DESCRIBE_FLAG(flag, desc) \
+ do { \
+ if (flags & (flag)) { \
+ ret = snprintf(bp, size_bp, "%s|", (desc)); \
+ if (ret < 0 || ret >= size_bp) \
+ goto out_overflow; \
+ size_bp -= ret; \
+ bp += ret; \
+ flags &= ~(flag); \
+ } \
+ } while (0)
+
+ DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
+ DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
+ DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
+
+ DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
+ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
+ DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
+ btrfs_raid_array[i].raid_name);
+#undef DESCRIBE_FLAG
+
+ if (flags) {
+ ret = snprintf(bp, size_bp, "0x%llx|", flags);
+ size_bp -= ret;
+ }
+
+ if (size_bp < size_buf)
+ buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
+
+ /*
+ * The text is trimmed, it's up to the caller to provide sufficiently
+ * large buffer
+ */
+out_overflow:;
+}
+
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
@@ -151,7 +212,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
* the mutex can be very coarse and can cover long-running operations
*
* protects: updates to fs_devices counters like missing devices, rw devices,
- * seeding, structure cloning, openning/closing devices at mount/umount time
+ * seeding, structure cloning, opening/closing devices at mount/umount time
*
* global::fs_devs - add, remove, updates to the global list
*
@@ -238,13 +299,15 @@ struct list_head *btrfs_get_fs_uuids(void)
/*
* alloc_fs_devices - allocate struct btrfs_fs_devices
- * @fsid: if not NULL, copy the uuid to fs_devices::fsid
+ * @fsid: if not NULL, copy the UUID to fs_devices::fsid
+ * @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
*
* Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
* The returned struct is not linked onto any lists and can be destroyed with
* kfree() right away.
*/
-static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
+static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
+ const u8 *metadata_fsid)
{
struct btrfs_fs_devices *fs_devs;
@@ -261,6 +324,11 @@ static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
if (fsid)
memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
+ if (metadata_fsid)
+ memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
+ else if (fsid)
+ memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
+
return fs_devs;
}
@@ -368,13 +436,57 @@ static struct btrfs_device *find_device(struct btrfs_fs_devices *fs_devices,
return NULL;
}
-static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
+static noinline struct btrfs_fs_devices *find_fsid(
+ const u8 *fsid, const u8 *metadata_fsid)
{
struct btrfs_fs_devices *fs_devices;
+ ASSERT(fsid);
+
+ if (metadata_fsid) {
+ /*
+ * Handle scanned device having completed its fsid change but
+ * belonging to a fs_devices that was created by first scanning
+ * a device which didn't have its fsid/metadata_uuid changed
+ * at all and the CHANGING_FSID_V2 flag set.
+ */
+ list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
+ if (fs_devices->fsid_change &&
+ memcmp(metadata_fsid, fs_devices->fsid,
+ BTRFS_FSID_SIZE) == 0 &&
+ memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
+ BTRFS_FSID_SIZE) == 0) {
+ return fs_devices;
+ }
+ }
+ /*
+ * Handle scanned device having completed its fsid change but
+ * belonging to a fs_devices that was created by a device that
+ * has an outdated pair of fsid/metadata_uuid and
+ * CHANGING_FSID_V2 flag set.
+ */
+ list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
+ if (fs_devices->fsid_change &&
+ memcmp(fs_devices->metadata_uuid,
+ fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
+ memcmp(metadata_fsid, fs_devices->metadata_uuid,
+ BTRFS_FSID_SIZE) == 0) {
+ return fs_devices;
+ }
+ }
+ }
+
+ /* Handle non-split brain cases */
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
- if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
- return fs_devices;
+ if (metadata_fsid) {
+ if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
+ && memcmp(metadata_fsid, fs_devices->metadata_uuid,
+ BTRFS_FSID_SIZE) == 0)
+ return fs_devices;
+ } else {
+ if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
+ return fs_devices;
+ }
}
return NULL;
}
@@ -709,6 +821,13 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
device->generation = btrfs_super_generation(disk_super);
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
+ if (btrfs_super_incompat_flags(disk_super) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
+ pr_err(
+ "BTRFS: Invalid seeding and uuid-changed device detected\n");
+ goto error_brelse;
+ }
+
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->seeding = 1;
} else {
@@ -744,6 +863,51 @@ error_brelse:
}
/*
+ * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
+ * being created with a disk that has already completed its fsid change.
+ */
+static struct btrfs_fs_devices *find_fsid_inprogress(
+ struct btrfs_super_block *disk_super)
+{
+ struct btrfs_fs_devices *fs_devices;
+
+ list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
+ if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
+ BTRFS_FSID_SIZE) != 0 &&
+ memcmp(fs_devices->metadata_uuid, disk_super->fsid,
+ BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
+ return fs_devices;
+ }
+ }
+
+ return NULL;
+}
+
+
+static struct btrfs_fs_devices *find_fsid_changed(
+ struct btrfs_super_block *disk_super)
+{
+ struct btrfs_fs_devices *fs_devices;
+
+ /*
+ * Handles the case where scanned device is part of an fs that had
+ * multiple successful changes of FSID but curently device didn't
+ * observe it. Meaning our fsid will be different than theirs.
+ */
+ list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
+ if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
+ BTRFS_FSID_SIZE) != 0 &&
+ memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
+ BTRFS_FSID_SIZE) == 0 &&
+ memcmp(fs_devices->fsid, disk_super->fsid,
+ BTRFS_FSID_SIZE) != 0) {
+ return fs_devices;
+ }
+ }
+
+ return NULL;
+}
+/*
* Add new device to list of registered devices
*
* Returns:
@@ -755,14 +919,46 @@ static noinline struct btrfs_device *device_list_add(const char *path,
bool *new_device_added)
{
struct btrfs_device *device;
- struct btrfs_fs_devices *fs_devices;
+ struct btrfs_fs_devices *fs_devices = NULL;
struct rcu_string *name;
u64 found_transid = btrfs_super_generation(disk_super);
u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
+ bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
+ BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
+ bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
+ BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
+
+ if (fsid_change_in_progress) {
+ if (!has_metadata_uuid) {
+ /*
+ * When we have an image which has CHANGING_FSID_V2 set
+ * it might belong to either a filesystem which has
+ * disks with completed fsid change or it might belong
+ * to fs with no UUID changes in effect, handle both.
+ */
+ fs_devices = find_fsid_inprogress(disk_super);
+ if (!fs_devices)
+ fs_devices = find_fsid(disk_super->fsid, NULL);
+ } else {
+ fs_devices = find_fsid_changed(disk_super);
+ }
+ } else if (has_metadata_uuid) {
+ fs_devices = find_fsid(disk_super->fsid,
+ disk_super->metadata_uuid);
+ } else {
+ fs_devices = find_fsid(disk_super->fsid, NULL);
+ }
+
- fs_devices = find_fsid(disk_super->fsid);
if (!fs_devices) {
- fs_devices = alloc_fs_devices(disk_super->fsid);
+ if (has_metadata_uuid)
+ fs_devices = alloc_fs_devices(disk_super->fsid,
+ disk_super->metadata_uuid);
+ else
+ fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
+
+ fs_devices->fsid_change = fsid_change_in_progress;
+
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
@@ -774,6 +970,21 @@ static noinline struct btrfs_device *device_list_add(const char *path,
mutex_lock(&fs_devices->device_list_mutex);
device = find_device(fs_devices, devid,
disk_super->dev_item.uuid);
+
+ /*
+ * If this disk has been pulled into an fs devices created by
+ * a device which had the CHANGING_FSID_V2 flag then replace the
+ * metadata_uuid/fsid values of the fs_devices.
+ */
+ if (has_metadata_uuid && fs_devices->fsid_change &&
+ found_transid > fs_devices->latest_generation) {
+ memcpy(fs_devices->fsid, disk_super->fsid,
+ BTRFS_FSID_SIZE);
+ memcpy(fs_devices->metadata_uuid,
+ disk_super->metadata_uuid, BTRFS_FSID_SIZE);
+
+ fs_devices->fsid_change = false;
+ }
}
if (!device) {
@@ -850,6 +1061,35 @@ static noinline struct btrfs_device *device_list_add(const char *path,
return ERR_PTR(-EEXIST);
}
+ /*
+ * We are going to replace the device path for a given devid,
+ * make sure it's the same device if the device is mounted
+ */
+ if (device->bdev) {
+ struct block_device *path_bdev;
+
+ path_bdev = lookup_bdev(path);
+ if (IS_ERR(path_bdev)) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+ return ERR_CAST(path_bdev);
+ }
+
+ if (device->bdev != path_bdev) {
+ bdput(path_bdev);
+ mutex_unlock(&fs_devices->device_list_mutex);
+ btrfs_warn_in_rcu(device->fs_info,
+ "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
+ disk_super->fsid, devid,
+ rcu_str_deref(device->name), path);
+ return ERR_PTR(-EEXIST);
+ }
+ bdput(path_bdev);
+ btrfs_info_in_rcu(device->fs_info,
+ "device fsid %pU devid %llu moved old:%s new:%s",
+ disk_super->fsid, devid,
+ rcu_str_deref(device->name), path);
+ }
+
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
mutex_unlock(&fs_devices->device_list_mutex);
@@ -869,8 +1109,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
* it back. We need it to pick the disk with largest generation
* (as above).
*/
- if (!fs_devices->opened)
+ if (!fs_devices->opened) {
device->generation = found_transid;
+ fs_devices->latest_generation = max_t(u64, found_transid,
+ fs_devices->latest_generation);
+ }
fs_devices->total_devices = btrfs_super_num_devices(disk_super);
@@ -884,7 +1127,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
struct btrfs_device *device;
struct btrfs_device *orig_dev;
- fs_devices = alloc_fs_devices(orig->fsid);
+ fs_devices = alloc_fs_devices(orig->fsid, NULL);
if (IS_ERR(fs_devices))
return fs_devices;
@@ -1193,7 +1436,7 @@ static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
p = kmap(*page);
/* align our pointer to the offset of the super block */
- *disk_super = p + (bytenr & ~PAGE_MASK);
+ *disk_super = p + offset_in_page(bytenr);
if (btrfs_super_bytenr(*disk_super) != bytenr ||
btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
@@ -1709,7 +1952,8 @@ static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
ptr = btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
ptr = btrfs_device_fsid(dev_item);
- write_extent_buffer(leaf, trans->fs_info->fsid, ptr, BTRFS_FSID_SIZE);
+ write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
+ ptr, BTRFS_FSID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
@@ -1862,12 +2106,12 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
{
u64 num_devices = fs_info->fs_devices->num_devices;
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
+ down_read(&fs_info->dev_replace.rwsem);
if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
ASSERT(num_devices > 1);
num_devices--;
}
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ up_read(&fs_info->dev_replace.rwsem);
return num_devices;
}
@@ -1900,6 +2144,14 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
goto out;
}
+ if (btrfs_pinned_by_swapfile(fs_info, device)) {
+ btrfs_warn_in_rcu(fs_info,
+ "cannot remove device %s (devid %llu) due to active swapfile",
+ rcu_str_deref(device->name), device->devid);
+ ret = -ETXTBSY;
+ goto out;
+ }
+
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
@@ -2132,7 +2384,13 @@ static struct btrfs_device *btrfs_find_device_by_path(
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
- device = btrfs_find_device(fs_info, devid, dev_uuid, disk_super->fsid);
+ if (btrfs_fs_incompat(fs_info, METADATA_UUID))
+ device = btrfs_find_device(fs_info, devid, dev_uuid,
+ disk_super->metadata_uuid);
+ else
+ device = btrfs_find_device(fs_info, devid,
+ dev_uuid, disk_super->fsid);
+
brelse(bh);
if (!device)
device = ERR_PTR(-ENOENT);
@@ -2202,7 +2460,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
if (!fs_devices->seeding)
return -EINVAL;
- seed_devices = alloc_fs_devices(NULL);
+ seed_devices = alloc_fs_devices(NULL, NULL);
if (IS_ERR(seed_devices))
return PTR_ERR(seed_devices);
@@ -2238,7 +2496,7 @@ static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
- memcpy(fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+ memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
mutex_unlock(&fs_devices->device_list_mutex);
@@ -2480,7 +2738,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
* so rename the fsid on the sysfs
*/
snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
- fs_info->fsid);
+ fs_info->fs_devices->fsid);
if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
btrfs_warn(fs_info,
"sysfs: failed to create fsid for sprout");
@@ -2718,8 +2976,15 @@ static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
return ret;
}
-static struct extent_map *get_chunk_map(struct btrfs_fs_info *fs_info,
- u64 logical, u64 length)
+/*
+ * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
+ * @logical: Logical block offset in bytes.
+ * @length: Length of extent in bytes.
+ *
+ * Return: Chunk mapping or ERR_PTR.
+ */
+struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
@@ -2756,7 +3021,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
int i, ret = 0;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
- em = get_chunk_map(fs_info, chunk_offset, 1);
+ em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
if (IS_ERR(em)) {
/*
* This is a logic error, but we don't want to just rely on the
@@ -2797,13 +3062,11 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
mutex_unlock(&fs_info->chunk_mutex);
}
- if (map->stripes[i].dev) {
- ret = btrfs_update_device(trans, map->stripes[i].dev);
- if (ret) {
- mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
+ ret = btrfs_update_device(trans, device);
+ if (ret) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
}
}
mutex_unlock(&fs_devices->device_list_mutex);
@@ -3437,17 +3700,11 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_root *chunk_root = fs_info->chunk_root;
- struct btrfs_root *dev_root = fs_info->dev_root;
- struct list_head *devices;
- struct btrfs_device *device;
- u64 old_size;
- u64 size_to_free;
u64 chunk_type;
struct btrfs_chunk *chunk;
struct btrfs_path *path = NULL;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_trans_handle *trans;
struct extent_buffer *leaf;
int slot;
int ret;
@@ -3462,53 +3719,6 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
u32 count_sys = 0;
int chunk_reserved = 0;
- /* step one make some room on all the devices */
- devices = &fs_info->fs_devices->devices;
- list_for_each_entry(device, devices, dev_list) {
- old_size = btrfs_device_get_total_bytes(device);
- size_to_free = div_factor(old_size, 1);
- size_to_free = min_t(u64, size_to_free, SZ_1M);
- if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) ||
- btrfs_device_get_total_bytes(device) -
- btrfs_device_get_bytes_used(device) > size_to_free ||
- test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
- continue;
-
- ret = btrfs_shrink_device(device, old_size - size_to_free);
- if (ret == -ENOSPC)
- break;
- if (ret) {
- /* btrfs_shrink_device never returns ret > 0 */
- WARN_ON(ret > 0);
- goto error;
- }
-
- trans = btrfs_start_transaction(dev_root, 0);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- btrfs_info_in_rcu(fs_info,
- "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
- rcu_str_deref(device->name), ret,
- old_size, old_size - size_to_free);
- goto error;
- }
-
- ret = btrfs_grow_device(trans, device, old_size);
- if (ret) {
- btrfs_end_transaction(trans);
- /* btrfs_grow_device never returns ret > 0 */
- WARN_ON(ret > 0);
- btrfs_info_in_rcu(fs_info,
- "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
- rcu_str_deref(device->name), ret,
- old_size, old_size - size_to_free);
- goto error;
- }
-
- btrfs_end_transaction(trans);
- }
-
- /* step two, relocate all the chunks */
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
@@ -3638,10 +3848,15 @@ again:
ret = btrfs_relocate_chunk(fs_info, found_key.offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
- if (ret && ret != -ENOSPC)
- goto error;
if (ret == -ENOSPC) {
enospc_errors++;
+ } else if (ret == -ETXTBSY) {
+ btrfs_info(fs_info,
+ "skipping relocation of block group %llu due to active swapfile",
+ found_key.offset);
+ ret = 0;
+ } else if (ret) {
+ goto error;
} else {
spin_lock(&fs_info->balance_lock);
bctl->stat.completed++;
@@ -3712,6 +3927,162 @@ static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
}
/*
+ * Fill @buf with textual description of balance filter flags @bargs, up to
+ * @size_buf including the terminating null. The output may be trimmed if it
+ * does not fit into the provided buffer.
+ */
+static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
+ u32 size_buf)
+{
+ int ret;
+ u32 size_bp = size_buf;
+ char *bp = buf;
+ u64 flags = bargs->flags;
+ char tmp_buf[128] = {'\0'};
+
+ if (!flags)
+ return;
+
+#define CHECK_APPEND_NOARG(a) \
+ do { \
+ ret = snprintf(bp, size_bp, (a)); \
+ if (ret < 0 || ret >= size_bp) \
+ goto out_overflow; \
+ size_bp -= ret; \
+ bp += ret; \
+ } while (0)
+
+#define CHECK_APPEND_1ARG(a, v1) \
+ do { \
+ ret = snprintf(bp, size_bp, (a), (v1)); \
+ if (ret < 0 || ret >= size_bp) \
+ goto out_overflow; \
+ size_bp -= ret; \
+ bp += ret; \
+ } while (0)
+
+#define CHECK_APPEND_2ARG(a, v1, v2) \
+ do { \
+ ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
+ if (ret < 0 || ret >= size_bp) \
+ goto out_overflow; \
+ size_bp -= ret; \
+ bp += ret; \
+ } while (0)
+
+ if (flags & BTRFS_BALANCE_ARGS_CONVERT) {
+ int index = btrfs_bg_flags_to_raid_index(bargs->target);
+
+ CHECK_APPEND_1ARG("convert=%s,", get_raid_name(index));
+ }
+
+ if (flags & BTRFS_BALANCE_ARGS_SOFT)
+ CHECK_APPEND_NOARG("soft,");
+
+ if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
+ btrfs_describe_block_groups(bargs->profiles, tmp_buf,
+ sizeof(tmp_buf));
+ CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
+ }
+
+ if (flags & BTRFS_BALANCE_ARGS_USAGE)
+ CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
+
+ if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
+ CHECK_APPEND_2ARG("usage=%u..%u,",
+ bargs->usage_min, bargs->usage_max);
+
+ if (flags & BTRFS_BALANCE_ARGS_DEVID)
+ CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
+
+ if (flags & BTRFS_BALANCE_ARGS_DRANGE)
+ CHECK_APPEND_2ARG("drange=%llu..%llu,",
+ bargs->pstart, bargs->pend);
+
+ if (flags & BTRFS_BALANCE_ARGS_VRANGE)
+ CHECK_APPEND_2ARG("vrange=%llu..%llu,",
+ bargs->vstart, bargs->vend);
+
+ if (flags & BTRFS_BALANCE_ARGS_LIMIT)
+ CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
+
+ if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
+ CHECK_APPEND_2ARG("limit=%u..%u,",
+ bargs->limit_min, bargs->limit_max);
+
+ if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
+ CHECK_APPEND_2ARG("stripes=%u..%u,",
+ bargs->stripes_min, bargs->stripes_max);
+
+#undef CHECK_APPEND_2ARG
+#undef CHECK_APPEND_1ARG
+#undef CHECK_APPEND_NOARG
+
+out_overflow:
+
+ if (size_bp < size_buf)
+ buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
+ else
+ buf[0] = '\0';
+}
+
+static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
+{
+ u32 size_buf = 1024;
+ char tmp_buf[192] = {'\0'};
+ char *buf;
+ char *bp;
+ u32 size_bp = size_buf;
+ int ret;
+ struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+
+ buf = kzalloc(size_buf, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ bp = buf;
+
+#define CHECK_APPEND_1ARG(a, v1) \
+ do { \
+ ret = snprintf(bp, size_bp, (a), (v1)); \
+ if (ret < 0 || ret >= size_bp) \
+ goto out_overflow; \
+ size_bp -= ret; \
+ bp += ret; \
+ } while (0)
+
+ if (bctl->flags & BTRFS_BALANCE_FORCE)
+ CHECK_APPEND_1ARG("%s", "-f ");
+
+ if (bctl->flags & BTRFS_BALANCE_DATA) {
+ describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
+ CHECK_APPEND_1ARG("-d%s ", tmp_buf);
+ }
+
+ if (bctl->flags & BTRFS_BALANCE_METADATA) {
+ describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
+ CHECK_APPEND_1ARG("-m%s ", tmp_buf);
+ }
+
+ if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
+ describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
+ CHECK_APPEND_1ARG("-s%s ", tmp_buf);
+ }
+
+#undef CHECK_APPEND_1ARG
+
+out_overflow:
+
+ if (size_bp < size_buf)
+ buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
+ btrfs_info(fs_info, "balance: %s %s",
+ (bctl->flags & BTRFS_BALANCE_RESUME) ?
+ "resume" : "start", buf);
+
+ kfree(buf);
+}
+
+/*
* Should be called with balance mutexe held
*/
int btrfs_balance(struct btrfs_fs_info *fs_info,
@@ -3724,6 +4095,7 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
int ret;
u64 num_devices;
unsigned seq;
+ bool reducing_integrity;
if (btrfs_fs_closing(fs_info) ||
atomic_read(&fs_info->balance_pause_req) ||
@@ -3803,24 +4175,30 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
!(bctl->sys.target & allowed)) ||
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(fs_info->avail_metadata_alloc_bits & allowed) &&
- !(bctl->meta.target & allowed))) {
- if (bctl->flags & BTRFS_BALANCE_FORCE) {
- btrfs_info(fs_info,
- "balance: force reducing metadata integrity");
- } else {
- btrfs_err(fs_info,
- "balance: reduces metadata integrity, use --force if you want this");
- ret = -EINVAL;
- goto out;
- }
- }
+ !(bctl->meta.target & allowed)))
+ reducing_integrity = true;
+ else
+ reducing_integrity = false;
+
+ /* if we're not converting, the target field is uninitialized */
+ meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+ bctl->meta.target : fs_info->avail_metadata_alloc_bits;
+ data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
+ bctl->data.target : fs_info->avail_data_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
- /* if we're not converting, the target field is uninitialized */
- meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
- bctl->meta.target : fs_info->avail_metadata_alloc_bits;
- data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
- bctl->data.target : fs_info->avail_data_alloc_bits;
+ if (reducing_integrity) {
+ if (bctl->flags & BTRFS_BALANCE_FORCE) {
+ btrfs_info(fs_info,
+ "balance: force reducing metadata integrity");
+ } else {
+ btrfs_err(fs_info,
+ "balance: reduces metadata integrity, use --force if you want this");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
@@ -3850,11 +4228,19 @@ int btrfs_balance(struct btrfs_fs_info *fs_info,
ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
+ describe_balance_start_or_resume(fs_info);
mutex_unlock(&fs_info->balance_mutex);
ret = __btrfs_balance(fs_info);
mutex_lock(&fs_info->balance_mutex);
+ if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
+ btrfs_info(fs_info, "balance: paused");
+ else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
+ btrfs_info(fs_info, "balance: canceled");
+ else
+ btrfs_info(fs_info, "balance: ended with status: %d", ret);
+
clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
if (bargs) {
@@ -3887,10 +4273,8 @@ static int balance_kthread(void *data)
int ret = 0;
mutex_lock(&fs_info->balance_mutex);
- if (fs_info->balance_ctl) {
- btrfs_info(fs_info, "balance: resuming");
+ if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
- }
mutex_unlock(&fs_info->balance_mutex);
return ret;
@@ -4433,10 +4817,16 @@ again:
ret = btrfs_relocate_chunk(fs_info, chunk_offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
- if (ret && ret != -ENOSPC)
- goto done;
- if (ret == -ENOSPC)
+ if (ret == -ENOSPC) {
failed++;
+ } else if (ret) {
+ if (ret == -ETXTBSY) {
+ btrfs_warn(fs_info,
+ "could not shrink block group %llu due to active swapfile",
+ chunk_offset);
+ }
+ goto done;
+ }
} while (key.offset-- > 0);
if (failed && !retried) {
@@ -4602,11 +4992,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
int devs_min; /* min devs needed */
int devs_increment; /* ndevs has to be a multiple of this */
int ncopies; /* how many copies to data has */
+ int nparity; /* number of stripes worth of bytes to
+ store parity information */
int ret;
u64 max_stripe_size;
u64 max_chunk_size;
u64 stripe_size;
- u64 num_bytes;
+ u64 chunk_size;
int ndevs;
int i;
int j;
@@ -4628,6 +5020,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
devs_min = btrfs_raid_array[index].devs_min;
devs_increment = btrfs_raid_array[index].devs_increment;
ncopies = btrfs_raid_array[index].ncopies;
+ nparity = btrfs_raid_array[index].nparity;
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = SZ_1G;
@@ -4654,7 +5047,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
BUG_ON(1);
}
- /* we don't want a chunk larger than 10% of writeable space */
+ /* We don't want a chunk larger than 10% of writable space */
max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
max_chunk_size);
@@ -4757,30 +5150,22 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
* this will have to be fixed for RAID1 and RAID10 over
* more drives
*/
- data_stripes = num_stripes / ncopies;
-
- if (type & BTRFS_BLOCK_GROUP_RAID5)
- data_stripes = num_stripes - 1;
-
- if (type & BTRFS_BLOCK_GROUP_RAID6)
- data_stripes = num_stripes - 2;
+ data_stripes = (num_stripes - nparity) / ncopies;
/*
* Use the number of data stripes to figure out how big this chunk
* is really going to be in terms of logical address space,
- * and compare that answer with the max chunk size
+ * and compare that answer with the max chunk size. If it's higher,
+ * we try to reduce stripe_size.
*/
if (stripe_size * data_stripes > max_chunk_size) {
- stripe_size = div_u64(max_chunk_size, data_stripes);
-
- /* bump the answer up to a 16MB boundary */
- stripe_size = round_up(stripe_size, SZ_16M);
-
/*
- * But don't go higher than the limits we found while searching
- * for free extents
+ * Reduce stripe_size, round it up to a 16MB boundary again and
+ * then use it, unless it ends up being even bigger than the
+ * previous value we had already.
*/
- stripe_size = min(devices_info[ndevs - 1].max_avail,
+ stripe_size = min(round_up(div_u64(max_chunk_size,
+ data_stripes), SZ_16M),
stripe_size);
}
@@ -4808,9 +5193,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
map->type = type;
map->sub_stripes = sub_stripes;
- num_bytes = stripe_size * data_stripes;
+ chunk_size = stripe_size * data_stripes;
- trace_btrfs_chunk_alloc(info, map, start, num_bytes);
+ trace_btrfs_chunk_alloc(info, map, start, chunk_size);
em = alloc_extent_map();
if (!em) {
@@ -4821,7 +5206,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
em->start = start;
- em->len = num_bytes;
+ em->len = chunk_size;
em->block_start = 0;
em->block_len = em->len;
em->orig_block_len = stripe_size;
@@ -4839,14 +5224,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
refcount_inc(&em->refs);
write_unlock(&em_tree->lock);
- ret = btrfs_make_block_group(trans, 0, type, start, num_bytes);
+ ret = btrfs_make_block_group(trans, 0, type, start, chunk_size);
if (ret)
goto error_del_extent;
- for (i = 0; i < map->num_stripes; i++) {
- num_bytes = map->stripes[i].dev->bytes_used + stripe_size;
- btrfs_device_set_bytes_used(map->stripes[i].dev, num_bytes);
- }
+ for (i = 0; i < map->num_stripes; i++)
+ btrfs_device_set_bytes_used(map->stripes[i].dev,
+ map->stripes[i].dev->bytes_used + stripe_size);
atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
@@ -4890,7 +5274,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
int i = 0;
int ret = 0;
- em = get_chunk_map(fs_info, chunk_offset, chunk_size);
+ em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
if (IS_ERR(em))
return PTR_ERR(em);
@@ -4971,10 +5355,10 @@ out:
}
/*
- * Chunk allocation falls into two parts. The first part does works
- * that make the new allocated chunk useable, but not do any operation
- * that modifies the chunk tree. The second part does the works that
- * require modifying the chunk tree. This division is important for the
+ * Chunk allocation falls into two parts. The first part does work
+ * that makes the new allocated chunk usable, but does not do any operation
+ * that modifies the chunk tree. The second part does the work that
+ * requires modifying the chunk tree. This division is important for the
* bootstrap process of adding storage to a seed btrfs.
*/
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
@@ -5032,7 +5416,7 @@ int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
int miss_ndevs = 0;
int i;
- em = get_chunk_map(fs_info, chunk_offset, 1);
+ em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
if (IS_ERR(em))
return 1;
@@ -5092,7 +5476,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
struct map_lookup *map;
int ret;
- em = get_chunk_map(fs_info, logical, len);
+ em = btrfs_get_chunk_map(fs_info, logical, len);
if (IS_ERR(em))
/*
* We could return errors for these cases, but that could get
@@ -5122,11 +5506,11 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
ret = 1;
free_extent_map(em);
- btrfs_dev_replace_read_lock(&fs_info->dev_replace);
+ down_read(&fs_info->dev_replace.rwsem);
if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
fs_info->dev_replace.tgtdev)
ret++;
- btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
+ up_read(&fs_info->dev_replace.rwsem);
return ret;
}
@@ -5138,7 +5522,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct map_lookup *map;
unsigned long len = fs_info->sectorsize;
- em = get_chunk_map(fs_info, logical, len);
+ em = btrfs_get_chunk_map(fs_info, logical, len);
if (!WARN_ON(IS_ERR(em))) {
map = em->map_lookup;
@@ -5155,7 +5539,7 @@ int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
struct map_lookup *map;
int ret = 0;
- em = get_chunk_map(fs_info, logical, len);
+ em = btrfs_get_chunk_map(fs_info, logical, len);
if(!WARN_ON(IS_ERR(em))) {
map = em->map_lookup;
@@ -5314,7 +5698,7 @@ static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
/* discard always return a bbio */
ASSERT(bbio_ret);
- em = get_chunk_map(fs_info, logical, length);
+ em = btrfs_get_chunk_map(fs_info, logical, length);
if (IS_ERR(em))
return PTR_ERR(em);
@@ -5640,7 +6024,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
return __btrfs_map_block_for_discard(fs_info, logical,
*length, bbio_ret);
- em = get_chunk_map(fs_info, logical, *length);
+ em = btrfs_get_chunk_map(fs_info, logical, *length);
if (IS_ERR(em))
return PTR_ERR(em);
@@ -5699,17 +6083,21 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
*length = em->len - offset;
}
- /* This is for when we're called from btrfs_merge_bio_hook() and all
- it cares about is the length */
+ /*
+ * This is for when we're called from btrfs_bio_fits_in_stripe and all
+ * it cares about is the length
+ */
if (!bbio_ret)
goto out;
- btrfs_dev_replace_read_lock(dev_replace);
+ down_read(&dev_replace->rwsem);
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
+ /*
+ * Hold the semaphore for read during the whole operation, write is
+ * requested at commit time but must wait.
+ */
if (!dev_replace_is_ongoing)
- btrfs_dev_replace_read_unlock(dev_replace);
- else
- btrfs_dev_replace_set_lock_blocking(dev_replace);
+ up_read(&dev_replace->rwsem);
if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
!need_full_stripe(op) && dev_replace->tgtdev != NULL) {
@@ -5904,12 +6292,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
}
out:
if (dev_replace_is_ongoing) {
- ASSERT(atomic_read(&dev_replace->blocking_readers) > 0);
- btrfs_dev_replace_read_lock(dev_replace);
- /* Barrier implied by atomic_dec_and_test */
- if (atomic_dec_and_test(&dev_replace->blocking_readers))
- cond_wake_up_nomb(&dev_replace->read_lock_wq);
- btrfs_dev_replace_read_unlock(dev_replace);
+ lockdep_assert_held(&dev_replace->rwsem);
+ /* Unlock and let waiting writers proceed */
+ up_read(&dev_replace->rwsem);
}
free_extent_map(em);
return ret;
@@ -5943,7 +6328,7 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
u64 rmap_len;
int i, j, nr = 0;
- em = get_chunk_map(fs_info, chunk_start, 1);
+ em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
if (IS_ERR(em))
return -EIO;
@@ -6083,12 +6468,6 @@ static noinline void btrfs_schedule_bio(struct btrfs_device *device,
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
- if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state) ||
- !device->bdev) {
- bio_io_error(bio);
- return;
- }
-
/* don't bother with additional async steps for reads, right now */
if (bio_op(bio) == REQ_OP_READ) {
btrfsic_submit_bio(bio);
@@ -6217,7 +6596,8 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
- if (!dev || !dev->bdev ||
+ if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
+ &dev->dev_state) ||
(bio_op(first_bio) == REQ_OP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bbio_error(bbio, first_bio, logical);
@@ -6245,7 +6625,7 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
cur_devices = fs_info->fs_devices;
while (cur_devices) {
if (!fsid ||
- !memcmp(cur_devices->fsid, fsid, BTRFS_FSID_SIZE)) {
+ !memcmp(cur_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
device = find_device(cur_devices, devid, uuid);
if (device)
return device;
@@ -6574,12 +6954,12 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
fs_devices = fs_devices->seed;
}
- fs_devices = find_fsid(fsid);
+ fs_devices = find_fsid(fsid, NULL);
if (!fs_devices) {
if (!btrfs_test_opt(fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
- fs_devices = alloc_fs_devices(fsid);
+ fs_devices = alloc_fs_devices(fsid, NULL);
if (IS_ERR(fs_devices))
return fs_devices;
@@ -6629,7 +7009,7 @@ static int read_one_dev(struct btrfs_fs_info *fs_info,
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
- if (memcmp(fs_uuid, fs_info->fsid, BTRFS_FSID_SIZE)) {
+ if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
fs_devices = open_seed_devices(fs_info, fs_uuid);
if (IS_ERR(fs_devices))
return PTR_ERR(fs_devices);
@@ -6876,7 +7256,7 @@ bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
if (missing > max_tolerated) {
if (!failing_dev)
btrfs_warn(fs_info,
- "chunk %llu missing %d devices, max tolerance is %d for writeable mount",
+ "chunk %llu missing %d devices, max tolerance is %d for writable mount",
em->start, missing, max_tolerated);
free_extent_map(em);
ret = false;
@@ -7387,6 +7767,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
struct extent_map *em;
struct map_lookup *map;
+ struct btrfs_device *dev;
u64 stripe_len;
bool found = false;
int ret = 0;
@@ -7436,6 +7817,22 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
physical_offset, devid);
ret = -EUCLEAN;
}
+
+ /* Make sure no dev extent is beyond device bondary */
+ dev = btrfs_find_device(fs_info, devid, NULL, NULL);
+ if (!dev) {
+ btrfs_err(fs_info, "failed to find devid %llu", devid);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ if (physical_offset + physical_len > dev->disk_total_bytes) {
+ btrfs_err(fs_info,
+"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
+ devid, physical_offset, physical_len,
+ dev->disk_total_bytes);
+ ret = -EUCLEAN;
+ goto out;
+ }
out:
free_extent_map(em);
return ret;
@@ -7478,6 +7875,8 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
struct btrfs_path *path;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
+ u64 prev_devid = 0;
+ u64 prev_dev_ext_end = 0;
int ret = 0;
key.objectid = 1;
@@ -7522,10 +7921,22 @@ int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
physical_len = btrfs_dev_extent_length(leaf, dext);
+ /* Check if this dev extent overlaps with the previous one */
+ if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
+ btrfs_err(fs_info,
+"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
+ devid, physical_offset, prev_dev_ext_end);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
physical_offset, physical_len);
if (ret < 0)
goto out;
+ prev_devid = devid;
+ prev_dev_ext_end = physical_offset + physical_len;
+
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;
@@ -7541,3 +7952,27 @@ out:
btrfs_free_path(path);
return ret;
}
+
+/*
+ * Check whether the given block group or device is pinned by any inode being
+ * used as a swapfile.
+ */
+bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
+{
+ struct btrfs_swapfile_pin *sp;
+ struct rb_node *node;
+
+ spin_lock(&fs_info->swapfile_pins_lock);
+ node = fs_info->swapfile_pins.rb_node;
+ while (node) {
+ sp = rb_entry(node, struct btrfs_swapfile_pin, node);
+ if (ptr < sp->ptr)
+ node = node->rb_left;
+ else if (ptr > sp->ptr)
+ node = node->rb_right;
+ else
+ break;
+ }
+ spin_unlock(&fs_info->swapfile_pins_lock);
+ return node != NULL;
+}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index aefce895e994..ed806649a473 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -210,6 +210,8 @@ BTRFS_DEVICE_GETSET_FUNCS(bytes_used);
struct btrfs_fs_devices {
u8 fsid[BTRFS_FSID_SIZE]; /* FS specific uuid */
+ u8 metadata_uuid[BTRFS_FSID_SIZE];
+ bool fsid_change;
struct list_head fs_list;
u64 num_devices;
@@ -218,6 +220,10 @@ struct btrfs_fs_devices {
u64 missing_devices;
u64 total_rw_bytes;
u64 total_devices;
+
+ /* Highest generation number of seen devices */
+ u64 latest_generation;
+
struct block_device *latest_bdev;
/* all of the devices in the FS, protected by a mutex
@@ -261,15 +267,12 @@ struct btrfs_fs_devices {
* we allocate are actually btrfs_io_bios. We'll cram as much of
* struct btrfs_bio as we can into this over time.
*/
-typedef void (btrfs_io_bio_end_io_t) (struct btrfs_io_bio *bio, int err);
struct btrfs_io_bio {
unsigned int mirror_num;
unsigned int stripe_index;
u64 logical;
u8 *csum;
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
- u8 *csum_allocated;
- btrfs_io_bio_end_io_t *end_io;
struct bvec_iter iter;
/*
* This member must come last, bio_alloc_bioset will allocate enough
@@ -283,15 +286,20 @@ static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
return container_of(bio, struct btrfs_io_bio, bio);
}
+static inline void btrfs_io_bio_free_csum(struct btrfs_io_bio *io_bio)
+{
+ if (io_bio->csum != io_bio->csum_inline) {
+ kfree(io_bio->csum);
+ io_bio->csum = NULL;
+ }
+}
+
struct btrfs_bio_stripe {
struct btrfs_device *dev;
u64 physical;
u64 length; /* only used for discard mappings */
};
-struct btrfs_bio;
-typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
-
struct btrfs_bio {
refcount_t refs;
atomic_t stripes_pending;
@@ -331,6 +339,8 @@ struct btrfs_raid_attr {
int tolerated_failures; /* max tolerated fail devs */
int devs_increment; /* ndevs has to be a multiple of this */
int ncopies; /* how many copies to data has */
+ int nparity; /* number of stripes worth of bytes to store
+ * parity information */
int mindev_error; /* error code if min devs requisite is unmet */
const char raid_name[8]; /* name of the raid */
u64 bg_flag; /* block group flag of the raid */
@@ -430,6 +440,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path);
int btrfs_balance(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs);
+void btrfs_describe_block_groups(u64 flags, char *buf, u32 size_buf);
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
@@ -462,6 +473,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
u64 chunk_offset, u64 chunk_size);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset);
+struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
+ u64 logical, u64 length);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index ea78c3d6dcfc..f141b45ce349 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -11,6 +11,7 @@
#include <linux/security.h>
#include <linux/posix_acl_xattr.h>
#include <linux/iversion.h>
+#include <linux/sched/mm.h>
#include "ctree.h"
#include "btrfs_inode.h"
#include "transaction.h"
@@ -422,9 +423,15 @@ static int btrfs_initxattrs(struct inode *inode,
{
const struct xattr *xattr;
struct btrfs_trans_handle *trans = fs_info;
+ unsigned int nofs_flag;
char *name;
int err = 0;
+ /*
+ * We're holding a transaction handle, so use a NOFS memory allocation
+ * context to avoid deadlock if reclaim happens.
+ */
+ nofs_flag = memalloc_nofs_save();
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
strlen(xattr->name) + 1, GFP_KERNEL);
@@ -440,6 +447,7 @@ static int btrfs_initxattrs(struct inode *inode,
if (err < 0)
break;
}
+ memalloc_nofs_restore(nofs_flag);
return err;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 1286c2b95498..d60d61e8ed7d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3060,11 +3060,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
*/
bio = bio_alloc(GFP_NOIO, 1);
- if (wbc) {
- wbc_init_bio(wbc, bio);
- wbc_account_io(wbc, bh->b_page, bh->b_size);
- }
-
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
@@ -3084,6 +3079,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
+ if (wbc) {
+ wbc_init_bio(wbc, bio);
+ wbc_account_io(wbc, bh->b_page, bh->b_size);
+ }
+
submit_bio(bio);
return 0;
}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 95983c744164..1645fcfd9691 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -244,11 +244,13 @@ wait_for_old_object:
ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
- cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry);
+ cache->cache.ops->put_object(&xobject->fscache,
+ (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
goto try_again;
requeue:
- cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
+ cache->cache.ops->put_object(&xobject->fscache,
+ (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
_leave(" = -ETIMEDOUT");
return -ETIMEDOUT;
}
@@ -336,7 +338,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
try_again:
/* first step is to make up a grave dentry in the graveyard */
sprintf(nbuffer, "%08x%08x",
- (uint32_t) get_seconds(),
+ (uint32_t) ktime_get_real_seconds(),
(uint32_t) atomic_inc_return(&cache->gravecounter));
/* do the multiway lock magic */
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 40f7595aad10..8a577409d030 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage->index, cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
+ put_page(backpage);
+ backpage = NULL;
put_page(netpage);
+ netpage = NULL;
fscache_retrieval_complete(op, 1);
continue;
}
@@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
netpage->index, cachefiles_gfp);
if (ret < 0) {
if (ret == -EEXIST) {
+ put_page(backpage);
+ backpage = NULL;
put_page(netpage);
+ netpage = NULL;
fscache_retrieval_complete(op, 1);
continue;
}
@@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
__releases(&object->fscache.cookie->lock)
{
struct cachefiles_object *object;
- struct cachefiles_cache *cache;
object = container_of(_object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
_enter("%p,{%lu}", object, page->index);
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 0a29a00aed2e..511e6c68156a 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
struct dentry *dentry = object->dentry;
int ret;
- ASSERT(dentry);
+ if (!dentry)
+ return -ESTALE;
_enter("%p,#%d", object, auxdata->len);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index b5ecd6f50360..4e9a7cc488da 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -563,8 +563,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noacl");
#endif
- if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM)
- seq_puts(m, ",nocopyfrom");
+ if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
+ seq_puts(m, ",copyfrom");
if (fsopt->mds_namespace)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index c005a5400f2e..79a265ba9200 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -42,7 +42,9 @@
#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
-#define CEPH_MOUNT_OPT_DEFAULT CEPH_MOUNT_OPT_DCACHE
+#define CEPH_MOUNT_OPT_DEFAULT \
+ (CEPH_MOUNT_OPT_DCACHE | \
+ CEPH_MOUNT_OPT_NOCOPYFROM)
#define ceph_set_mount_opt(fsc, opt) \
(fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index abcd78e332fe..85dadb93c992 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -133,7 +133,7 @@ config CIFS_XATTR
config CIFS_POSIX
bool "CIFS POSIX Extensions"
- depends on CIFS_XATTR
+ depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
help
Enabling this option will cause the cifs client to attempt to
negotiate a newer dialect with servers, such as Samba 3.0.5
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 3713d22b95a7..907e85d65bb4 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -174,7 +174,7 @@ cifs_bp_rename_retry:
cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
- full_path[dfsplen] = '\\';
+ full_path[dfsplen] = dirsep;
for (i = 0; i < pplen-1; i++)
if (full_path[dfsplen+1+i] == '/')
full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 74c33d5fafc8..6706328ce03f 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1103,10 +1103,10 @@ try_again:
rc = posix_lock_file(file, flock, NULL);
up_write(&cinode->lock_sem);
if (rc == FILE_LOCK_DEFERRED) {
- rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
+ rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
if (!rc)
goto try_again;
- posix_unblock_lock(flock);
+ locks_delete_block(flock);
}
return rc;
}
@@ -2541,14 +2541,13 @@ static int
cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
struct cifs_aio_ctx *ctx)
{
- int wait_retry = 0;
unsigned int wsize, credits;
int rc;
struct TCP_Server_Info *server =
tlink_tcon(wdata->cfile->tlink)->ses->server;
/*
- * Try to resend this wdata, waiting for credits up to 3 seconds.
+ * Wait for credits to resend this wdata.
* Note: we are attempting to resend the whole wdata not in segments
*/
do {
@@ -2556,19 +2555,13 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
server, wdata->bytes, &wsize, &credits);
if (rc)
- break;
+ goto out;
if (wsize < wdata->bytes) {
add_credits_and_wake_if(server, credits, 0);
msleep(1000);
- wait_retry++;
}
- } while (wsize < wdata->bytes && wait_retry < 3);
-
- if (wsize < wdata->bytes) {
- rc = -EBUSY;
- goto out;
- }
+ } while (wsize < wdata->bytes);
rc = -EAGAIN;
while (rc == -EAGAIN) {
@@ -3234,14 +3227,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
struct list_head *rdata_list,
struct cifs_aio_ctx *ctx)
{
- int wait_retry = 0;
unsigned int rsize, credits;
int rc;
struct TCP_Server_Info *server =
tlink_tcon(rdata->cfile->tlink)->ses->server;
/*
- * Try to resend this rdata, waiting for credits up to 3 seconds.
+ * Wait for credits to resend this rdata.
* Note: we are attempting to resend the whole rdata not in segments
*/
do {
@@ -3249,24 +3241,13 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
&rsize, &credits);
if (rc)
- break;
+ goto out;
if (rsize < rdata->bytes) {
add_credits_and_wake_if(server, credits, 0);
msleep(1000);
- wait_retry++;
}
- } while (rsize < rdata->bytes && wait_retry < 3);
-
- /*
- * If we can't find enough credits to send this rdata
- * release the rdata and return failure, this will pass
- * whatever I/O amount we have finished to VFS.
- */
- if (rsize < rdata->bytes) {
- rc = -EBUSY;
- goto out;
- }
+ } while (rsize < rdata->bytes);
rc = -EAGAIN;
while (rc == -EAGAIN) {
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 9e7ef7ec2d70..a8999f930b22 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -97,7 +97,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (rc)
goto finished;
- smb2_set_next_command(server, &rqst[num_rqst++]);
+ smb2_set_next_command(server, &rqst[num_rqst++], 0);
/* Operation */
switch (command) {
@@ -111,7 +111,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
- smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_DELETE:
@@ -127,14 +127,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
rqst[num_rqst].rq_iov = si_iov;
rqst[num_rqst].rq_nvec = 1;
- size[0] = 8;
+ size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
data[0] = &delete_pending[0];
rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_DISPOSITION_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
- smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_next_command(server, &rqst[num_rqst], 1);
smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_EOF:
@@ -149,7 +149,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
- smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_INFO:
@@ -165,7 +165,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_BASIC_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
- smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_RENAME:
@@ -189,7 +189,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
- smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_HARDLINK:
@@ -213,7 +213,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
COMPOUND_FID, current->tgid,
FILE_LINK_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
- smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_next_command(server, &rqst[num_rqst], 0);
smb2_set_related(&rqst[num_rqst++]);
break;
default:
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 225fec1cfa67..e25c7aade98a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1194,7 +1194,7 @@ smb2_ioctl_query_info(const unsigned int xid,
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
if (rc)
goto iqinf_exit;
- smb2_set_next_command(ses->server, &rqst[0]);
+ smb2_set_next_command(ses->server, &rqst[0], 0);
/* Query */
memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1208,7 +1208,7 @@ smb2_ioctl_query_info(const unsigned int xid,
qi.output_buffer_length, buffer);
if (rc)
goto iqinf_exit;
- smb2_set_next_command(ses->server, &rqst[1]);
+ smb2_set_next_command(ses->server, &rqst[1], 0);
smb2_set_related(&rqst[1]);
/* Close */
@@ -1761,16 +1761,23 @@ smb2_set_related(struct smb_rqst *rqst)
char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
void
-smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ bool has_space_for_padding)
{
struct smb2_sync_hdr *shdr;
unsigned long len = smb_rqst_len(server, rqst);
/* SMB headers in a compound are 8 byte aligned. */
if (len & 7) {
- rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
- rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
- rqst->rq_nvec++;
+ if (has_space_for_padding) {
+ len = rqst->rq_iov[rqst->rq_nvec - 1].iov_len;
+ rqst->rq_iov[rqst->rq_nvec - 1].iov_len =
+ (len + 7) & ~7;
+ } else {
+ rqst->rq_iov[rqst->rq_nvec].iov_base = smb2_padding;
+ rqst->rq_iov[rqst->rq_nvec].iov_len = 8 - (len & 7);
+ rqst->rq_nvec++;
+ }
len = smb_rqst_len(server, rqst);
}
@@ -1820,7 +1827,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &srch_path);
if (rc)
goto qfs_exit;
- smb2_set_next_command(server, &rqst[0]);
+ smb2_set_next_command(server, &rqst[0], 0);
memset(&qi_iov, 0, sizeof(qi_iov));
rqst[1].rq_iov = qi_iov;
@@ -1833,7 +1840,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
NULL);
if (rc)
goto qfs_exit;
- smb2_set_next_command(server, &rqst[1]);
+ smb2_set_next_command(server, &rqst[1], 0);
smb2_set_related(&rqst[1]);
memset(&close_iov, 0, sizeof(close_iov));
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 9f4e9ed9ce53..2fe78acd7d0c 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -117,7 +117,8 @@ extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
struct smb_rqst *rqst);
extern void smb2_set_next_command(struct TCP_Server_Info *server,
- struct smb_rqst *rqst);
+ struct smb_rqst *rqst,
+ bool has_space_for_padding);
extern void smb2_set_related(struct smb_rqst *rqst);
/*
diff --git a/fs/dax.c b/fs/dax.c
index 9bcce89ea18e..48132eca3761 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -232,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
}
}
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+
+ /*
+ * Entry lock waits are exclusive. Wake up the next waiter since
+ * we aren't sure we will acquire the entry lock and thus wake
+ * the next waiter up on unlock.
+ */
+ if (waitqueue_active(wq))
+ __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
+}
+
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
@@ -351,21 +379,21 @@ static struct page *dax_busy_page(void *entry)
* @page: The page whose entry we want to lock
*
* Context: Process context.
- * Return: %true if the entry was locked or does not need to be locked.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
*/
-bool dax_lock_mapping_entry(struct page *page)
+dax_entry_t dax_lock_page(struct page *page)
{
XA_STATE(xas, NULL, 0);
void *entry;
- bool locked;
/* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
- locked = false;
- if (!dax_mapping(mapping))
+ entry = NULL;
+ if (!mapping || !dax_mapping(mapping))
break;
/*
@@ -375,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
- locked = true;
+ entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode))
break;
@@ -389,9 +417,7 @@ bool dax_lock_mapping_entry(struct page *page)
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
rcu_read_unlock();
- entry = get_unlocked_entry(&xas);
- xas_unlock_irq(&xas);
- put_unlocked_entry(&xas, entry);
+ wait_entry_unlocked(&xas, entry);
rcu_read_lock();
continue;
}
@@ -400,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
break;
}
rcu_read_unlock();
- return locked;
+ return (dax_entry_t)entry;
}
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
- void *entry;
if (S_ISCHR(mapping->host->i_mode))
return;
- rcu_read_lock();
- entry = xas_load(&xas);
- rcu_read_unlock();
- entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
- dax_unlock_entry(&xas, entry);
+ dax_unlock_entry(&xas, (void *)cookie);
}
/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 722d17c88edb..dbc1a1f080ce 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
*/
dio->iocb->ki_pos += transferred;
- if (dio->op == REQ_OP_WRITE)
- ret = generic_write_sync(dio->iocb, transferred);
+ if (ret > 0 && dio->op == REQ_OP_WRITE)
+ ret = generic_write_sync(dio->iocb, ret);
dio->iocb->ki_complete(dio->iocb, ret, 0);
}
@@ -518,7 +518,7 @@ static struct bio *dio_await_one(struct dio *dio)
dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
- !blk_poll(dio->bio_disk->queue, dio->bio_cookie))
+ !blk_poll(dio->bio_disk->queue, dio->bio_cookie, true))
io_schedule();
/* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags);
@@ -1265,6 +1265,8 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
} else {
dio->op = REQ_OP_READ;
}
+ if (iocb->ki_flags & IOCB_HIPRI)
+ dio->op_flags |= REQ_HIPRI;
/*
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 562fa8c3edff..47ee66d70109 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
flush_workqueue(ls->ls_callback_wq);
}
+#define MAX_CB_QUEUE 25
+
void dlm_callback_resume(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
if (!ls->ls_callback_wq)
return;
+more:
mutex_lock(&ls->ls_cb_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
list_del_init(&lkb->lkb_cb_list);
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
count++;
+ if (count == MAX_CB_QUEUE)
+ break;
}
mutex_unlock(&ls->ls_cb_mutex);
if (count)
log_rinfo(ls, "dlm_callback_resume %d", count);
+ if (count == MAX_CB_QUEUE) {
+ count = 0;
+ cond_resched();
+ goto more;
+ }
}
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index cc91963683de..a928ba008d7d 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1209,6 +1209,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv);
+ dlm_free_lkb(lkb);
return rv;
}
@@ -4179,6 +4180,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
(unsigned long long)lkb->lkb_recover_seq,
ms->m_header.h_nodeid, ms->m_lkid);
error = -ENOENT;
+ dlm_put_lkb(lkb);
goto fail;
}
@@ -4232,6 +4234,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
lkb->lkb_id, lkb->lkb_remid,
ms->m_header.h_nodeid, ms->m_lkid);
error = -ENOENT;
+ dlm_put_lkb(lkb);
goto fail;
}
@@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
goto out;
}
}
-
- /* After ua is attached to lkb it will be freed by dlm_free_lkb().
- When DLM_IFL_USER is set, the dlm knows that this is a userspace
- lock and that lkb_astparam is the dlm_user_args structure. */
-
error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
fake_astfn, ua, fake_bastfn, &args);
- lkb->lkb_flags |= DLM_IFL_USER;
-
if (error) {
+ kfree(ua->lksb.sb_lvbptr);
+ ua->lksb.sb_lvbptr = NULL;
+ kfree(ua);
__put_lkb(ls, lkb);
goto out;
}
+ /* After ua is attached to lkb it will be freed by dlm_free_lkb().
+ When DLM_IFL_USER is set, the dlm knows that this is a userspace
+ lock and that lkb_astparam is the dlm_user_args structure. */
+ lkb->lkb_flags |= DLM_IFL_USER;
error = request_lock(ls, lkb, name, namelen, &args);
switch (error) {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 5ba94be006ee..db43b98c4d64 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -431,7 +431,7 @@ static int new_lockspace(const char *name, const char *cluster,
int do_unreg = 0;
int namelen = strlen(name);
- if (namelen > DLM_LOCKSPACE_LEN)
+ if (namelen > DLM_LOCKSPACE_LEN || namelen == 0)
return -EINVAL;
if (!lvblen || (lvblen % 8))
@@ -680,11 +680,9 @@ static int new_lockspace(const char *name, const char *cluster,
kfree(ls->ls_recover_buf);
out_lkbidr:
idr_destroy(&ls->ls_lkbidr);
- for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
- if (ls->ls_remove_names[i])
- kfree(ls->ls_remove_names[i]);
- }
out_rsbtbl:
+ for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++)
+ kfree(ls->ls_remove_names[i]);
vfree(ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
@@ -807,6 +805,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_delete_debug_file(ls);
+ idr_destroy(&ls->ls_recover_idr);
kfree(ls->ls_recover_buf);
/*
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 3fda3832cf6a..0bc43b35d2c5 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -671,7 +671,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
int dlm_ls_start(struct dlm_ls *ls)
{
struct dlm_recover *rv, *rv_old;
- struct dlm_config_node *nodes;
+ struct dlm_config_node *nodes = NULL;
int error, count;
rv = kzalloc(sizeof(*rv), GFP_NOFS);
@@ -680,7 +680,7 @@ int dlm_ls_start(struct dlm_ls *ls)
error = dlm_config_nodes(ls->ls_name, &nodes, &count);
if (error < 0)
- goto fail;
+ goto fail_rv;
spin_lock(&ls->ls_recover_lock);
@@ -712,8 +712,9 @@ int dlm_ls_start(struct dlm_ls *ls)
return 0;
fail:
- kfree(rv);
kfree(nodes);
+ fail_rv:
+ kfree(rv);
return error;
}
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 7cd24bccd4fe..37be29f21d04 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -38,10 +38,8 @@ int __init dlm_memory_init(void)
void dlm_memory_exit(void)
{
- if (lkb_cache)
- kmem_cache_destroy(lkb_cache);
- if (rsb_cache)
- kmem_cache_destroy(rsb_cache);
+ kmem_cache_destroy(lkb_cache);
+ kmem_cache_destroy(rsb_cache);
}
char *dlm_allocate_lvb(struct dlm_ls *ls)
@@ -86,8 +84,7 @@ void dlm_free_lkb(struct dlm_lkb *lkb)
struct dlm_user_args *ua;
ua = lkb->lkb_ua;
if (ua) {
- if (ua->lksb.sb_lvbptr)
- kfree(ua->lksb.sb_lvbptr);
+ kfree(ua->lksb.sb_lvbptr);
kfree(ua);
}
}
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 2a669390cd7f..3c84c62dadb7 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -25,6 +25,7 @@
#include "lvb_table.h"
#include "user.h"
#include "ast.h"
+#include "config.h"
static const char name_prefix[] = "dlm";
static const struct file_operations device_fops;
@@ -404,7 +405,7 @@ static int device_create_lockspace(struct dlm_lspace_params *params)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- error = dlm_new_lockspace(params->name, NULL, params->flags,
+ error = dlm_new_lockspace(params->name, dlm_config.ci_cluster_name, params->flags,
DLM_USER_LVB_LEN, NULL, NULL, NULL,
&lockspace);
if (error)
@@ -702,7 +703,7 @@ static int copy_result_to_user(struct dlm_user_args *ua, int compat,
result.version[0] = DLM_DEVICE_VERSION_MAJOR;
result.version[1] = DLM_DEVICE_VERSION_MINOR;
result.version[2] = DLM_DEVICE_VERSION_PATCH;
- memcpy(&result.lksb, &ua->lksb, sizeof(struct dlm_lksb));
+ memcpy(&result.lksb, &ua->lksb, offsetof(struct dlm_lksb, sb_lvbptr));
result.user_lksb = ua->user_lksb;
/* FIXME: dlm1 provides for the user's bastparam/addr to not be updated
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 42bbe6824b4b..8a5a1010886b 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -2223,31 +2223,13 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
- if (sigmask) {
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
- return -EFAULT;
- sigsaved = current->blocked;
- set_current_blocked(&ksigmask);
- }
+ error = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (error)
+ return error;
error = do_epoll_wait(epfd, events, maxevents, timeout);
- /*
- * If we changed the signal mask, we need to restore the original one.
- * In case we've got a signal while waiting, we do not restore the
- * signal mask yet, and we allow do_signal() to deliver the signal on
- * the way back to userspace, before the signal mask is restored.
- */
- if (sigmask) {
- if (error == -EINTR) {
- memcpy(&current->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_restore_sigmask();
- } else
- set_current_blocked(&sigsaved);
- }
+ restore_user_sigmask(sigmask, &sigsaved);
return error;
}
@@ -2266,31 +2248,13 @@ COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,
* If the caller wants a certain signal mask to be set during the wait,
* we apply it here.
*/
- if (sigmask) {
- if (sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
- if (get_compat_sigset(&ksigmask, sigmask))
- return -EFAULT;
- sigsaved = current->blocked;
- set_current_blocked(&ksigmask);
- }
+ err = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (err)
+ return err;
err = do_epoll_wait(epfd, events, maxevents, timeout);
- /*
- * If we changed the signal mask, we need to restore the original one.
- * In case we've got a signal while waiting, we do not restore the
- * signal mask yet, and we allow do_signal() to deliver the signal on
- * the way back to userspace, before the signal mask is restored.
- */
- if (sigmask) {
- if (err == -EINTR) {
- memcpy(&current->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_restore_sigmask();
- } else
- set_current_blocked(&sigsaved);
- }
+ restore_user_sigmask(sigmask, &sigsaved);
return err;
}
diff --git a/fs/exec.c b/fs/exec.c
index acc3a5536384..fc281b738a98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -62,7 +62,6 @@
#include <linux/oom.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
-#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -1084,7 +1083,7 @@ static int de_thread(struct task_struct *tsk)
while (sig->notify_count) {
__set_current_state(TASK_KILLABLE);
spin_unlock_irq(lock);
- freezable_schedule();
+ schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
spin_lock_irq(lock);
@@ -1112,7 +1111,7 @@ static int de_thread(struct task_struct *tsk)
__set_current_state(TASK_KILLABLE);
write_unlock_irq(&tasklist_lock);
cgroup_threadgroup_change_end(tsk);
- freezable_schedule();
+ schedule();
if (unlikely(__fatal_signal_pending(tsk)))
goto killed;
}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 645158dc33f1..c69927bed4ef 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
struct dentry *parent = dget_parent(dentry);
dput(dentry);
- if (IS_ROOT(dentry)) {
+ if (dentry == parent) {
dput(parent);
return false;
}
@@ -147,6 +147,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
if (IS_ERR(tmp)) {
dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
+ err = PTR_ERR(tmp);
goto out_err;
}
if (tmp != dentry) {
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index cb91baa4275d..73b2d528237f 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -73,7 +73,7 @@ void ext2_error(struct super_block *sb, const char *function,
if (test_opt(sb, ERRORS_PANIC))
panic("EXT2-fs: panic from previous error\n");
- if (test_opt(sb, ERRORS_RO)) {
+ if (!sb_rdonly(sb) && test_opt(sb, ERRORS_RO)) {
ext2_msg(sb, KERN_CRIT,
"error: remounting filesystem read-only");
sb->s_flags |= SB_RDONLY;
@@ -148,10 +148,9 @@ static void ext2_put_super (struct super_block * sb)
ext2_quota_off_umount(sb);
- if (sbi->s_ea_block_cache) {
- ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
- sbi->s_ea_block_cache = NULL;
- }
+ ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
+ sbi->s_ea_block_cache = NULL;
+
if (!sb_rdonly(sb)) {
struct ext2_super_block *es = sbi->s_es;
@@ -892,6 +891,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
+ opts.s_mount_opt = 0;
/* Set defaults before we parse the mount options */
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
if (def_mount_opts & EXT2_DEFM_DEBUG)
@@ -1197,8 +1197,7 @@ cantfind_ext2:
sb->s_id);
goto failed_mount;
failed_mount3:
- if (sbi->s_ea_block_cache)
- ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
+ ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 62d9a659a8ff..4f30876ee325 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -612,9 +612,9 @@ skip_replace:
}
cleanup:
- brelse(bh);
if (!(bh && header == HDR(bh)))
kfree(header);
+ brelse(bh);
up_write(&EXT2_I(inode)->xattr_sem);
return error;
@@ -835,7 +835,8 @@ ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
__u32 hash = le32_to_cpu(HDR(bh)->h_hash);
int error;
- error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 1);
+ error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
+ true);
if (error) {
if (error == -EBUSY) {
ea_bdebug(bh, "already in cache (%d cache entries)",
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index c1d570ee1d9f..8c7bbf3e566d 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -248,7 +248,8 @@ retry:
error = posix_acl_update_mode(inode, &mode, &acl);
if (error)
goto out_stop;
- update_mode = 1;
+ if (mode != inode->i_mode)
+ update_mode = 1;
}
error = __ext4_set_acl(handle, inode, type, acl, 0 /* xattr_flags */);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3f89d0ab08fc..185a05d3257e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2454,8 +2454,19 @@ int do_journal_get_write_access(handle_t *handle,
#define FALL_BACK_TO_NONDELALLOC 1
#define CONVERT_INLINE_DATA 2
-extern struct inode *ext4_iget(struct super_block *, unsigned long);
-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+typedef enum {
+ EXT4_IGET_NORMAL = 0,
+ EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
+ EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
+} ext4_iget_flags;
+
+extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ ext4_iget_flags flags, const char *function,
+ unsigned int line);
+
+#define ext4_iget(sb, ino, flags) \
+ __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
+
extern int ext4_write_inode(struct inode *, struct writeback_control *);
extern int ext4_setattr(struct dentry *, struct iattr *);
extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -2538,6 +2549,8 @@ extern int ext4_group_extend(struct super_block *sb,
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
/* super.c */
+extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
+ sector_t block, int op_flags);
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
extern void ext4_superblock_csum_set(struct super_block *sb);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 014f6a698cb7..7ff14a1adba3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1225,7 +1225,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
if (!ext4_test_bit(bit, bitmap_bh->b_data))
goto bad_orphan;
- inode = ext4_iget(sb, ino);
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 9c4bac18cc6c..27373d88b5f0 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -705,8 +705,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (!PageUptodate(page)) {
ret = ext4_read_inline_page(inode, page);
- if (ret < 0)
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
goto out_up_read;
+ }
}
ret = 1;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 22a9d8159720..9affabd07682 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4817,7 +4817,9 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
return inode_peek_iversion(inode);
}
-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ ext4_iget_flags flags, const char *function,
+ unsigned int line)
{
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
@@ -4831,6 +4833,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
gid_t i_gid;
projid_t i_projid;
+ if (((flags & EXT4_IGET_NORMAL) &&
+ (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
+ (ino < EXT4_ROOT_INO) ||
+ (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
+ if (flags & EXT4_IGET_HANDLE)
+ return ERR_PTR(-ESTALE);
+ __ext4_error(sb, function, line,
+ "inode #%lu: comm %s: iget: illegal inode #",
+ ino, current->comm);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -4846,18 +4860,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
raw_inode = ext4_raw_inode(&iloc);
if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
- EXT4_ERROR_INODE(inode, "root inode unallocated");
+ ext4_error_inode(inode, function, line, 0,
+ "iget: root inode unallocated");
ret = -EFSCORRUPTED;
goto bad_inode;
}
+ if ((flags & EXT4_IGET_HANDLE) &&
+ (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
+ ret = -ESTALE;
+ goto bad_inode;
+ }
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb) ||
(ei->i_extra_isize & 3)) {
- EXT4_ERROR_INODE(inode,
- "bad extra_isize %u (inode size %u)",
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bad extra_isize %u "
+ "(inode size %u)",
ei->i_extra_isize,
EXT4_INODE_SIZE(inode->i_sb));
ret = -EFSCORRUPTED;
@@ -4879,7 +4901,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
}
if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
- EXT4_ERROR_INODE(inode, "checksum invalid");
+ ext4_error_inode(inode, function, line, 0,
+ "iget: checksum invalid");
ret = -EFSBADCRC;
goto bad_inode;
}
@@ -4936,7 +4959,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(sb, raw_inode);
if ((size = i_size_read(inode)) < 0) {
- EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bad i_size value: %lld", size);
ret = -EFSCORRUPTED;
goto bad_inode;
}
@@ -5012,7 +5036,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ret = 0;
if (ei->i_file_acl &&
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
- EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bad extended attribute block %llu",
ei->i_file_acl);
ret = -EFSCORRUPTED;
goto bad_inode;
@@ -5040,8 +5065,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
} else if (S_ISLNK(inode->i_mode)) {
/* VFS does not allow setting these so must be corruption */
if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
- EXT4_ERROR_INODE(inode,
- "immutable or append flags not allowed on symlinks");
+ ext4_error_inode(inode, function, line, 0,
+ "iget: immutable or append flags "
+ "not allowed on symlinks");
ret = -EFSCORRUPTED;
goto bad_inode;
}
@@ -5071,7 +5097,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
make_bad_inode(inode);
} else {
ret = -EFSCORRUPTED;
- EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
brelse(iloc.bh);
@@ -5085,13 +5112,6 @@ bad_inode:
return ERR_PTR(ret);
}
-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
-{
- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
- return ERR_PTR(-EFSCORRUPTED);
- return ext4_iget(sb, ino);
-}
-
static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
@@ -5380,9 +5400,13 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
+ sb_rdonly(inode->i_sb))
return 0;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
@@ -5398,7 +5422,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
return 0;
- err = ext4_force_commit(inode->i_sb);
+ err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+ EXT4_I(inode)->i_sync_tid);
} else {
struct ext4_iloc iloc;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0edee31913d1..d37dafa1d133 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -125,7 +125,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
return -EPERM;
- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
+ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(inode_bl))
return PTR_ERR(inode_bl);
ei_bl = EXT4_I(inode_bl);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 61a9d1927817..b1e4d359f73b 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -116,9 +116,9 @@ static int update_ind_extent_range(handle_t *handle, struct inode *inode,
int i, retval = 0;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, pblock);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
i_data = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -145,9 +145,9 @@ static int update_dind_extent_range(handle_t *handle, struct inode *inode,
int i, retval = 0;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, pblock);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
i_data = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -175,9 +175,9 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
int i, retval = 0;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, pblock);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
i_data = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -224,9 +224,9 @@ static int free_dind_blocks(handle_t *handle,
struct buffer_head *bh;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
tmp_idata = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -254,9 +254,9 @@ static int free_tind_blocks(handle_t *handle,
struct buffer_head *bh;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
tmp_idata = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -382,9 +382,9 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
struct ext4_extent_header *eh;
block = ext4_idx_pblock(ix);
- bh = sb_bread(inode->i_sb, block);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, block, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
eh = (struct ext4_extent_header *)bh->b_data;
if (eh->eh_depth != 0) {
@@ -535,22 +535,22 @@ int ext4_ext_migrate(struct inode *inode)
if (i_data[EXT4_IND_BLOCK]) {
retval = update_ind_extent_range(handle, tmp_inode,
le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
- if (retval)
- goto err_out;
+ if (retval)
+ goto err_out;
} else
lb.curr_block += max_entries;
if (i_data[EXT4_DIND_BLOCK]) {
retval = update_dind_extent_range(handle, tmp_inode,
le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
- if (retval)
- goto err_out;
+ if (retval)
+ goto err_out;
} else
lb.curr_block += max_entries * max_entries;
if (i_data[EXT4_TIND_BLOCK]) {
retval = update_tind_extent_range(handle, tmp_inode,
le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
- if (retval)
- goto err_out;
+ if (retval)
+ goto err_out;
}
/*
* Build the last extent
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 437f71fe83ae..2b928eb07fa2 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1571,7 +1571,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
dentry);
return ERR_PTR(-EFSCORRUPTED);
}
- inode = ext4_iget_normal(dir->i_sb, ino);
+ inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
if (inode == ERR_PTR(-ESTALE)) {
EXT4_ERROR_INODE(dir,
"deleted inode referenced: %u",
@@ -1613,7 +1613,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
return ERR_PTR(-EFSCORRUPTED);
}
- return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
+ return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
}
/*
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db7590178dfc..2aa62d58d8dd 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
- wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
+ wbc_init_bio(io->io_wbc, bio);
return 0;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a5efee34415f..48421de803b7 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
else if (free_blocks_count < 0)
ext4_warning(sb, "Bad blocks count %u",
input->blocks_count);
- else if (!(bh = sb_bread(sb, end - 1)))
+ else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
+ err = PTR_ERR(bh);
+ bh = NULL;
ext4_warning(sb, "Cannot read last block (%llu)",
end - 1);
- else if (outside(input->block_bitmap, start, end))
+ } else if (outside(input->block_bitmap, start, end))
ext4_warning(sb, "Block bitmap not in group (block %llu)",
(unsigned long long)input->block_bitmap);
else if (outside(input->inode_bitmap, start, end))
@@ -781,11 +783,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
- struct buffer_head **o_group_desc, **n_group_desc;
- struct buffer_head *dind;
- struct buffer_head *gdb_bh;
+ struct buffer_head **o_group_desc, **n_group_desc = NULL;
+ struct buffer_head *dind = NULL;
+ struct buffer_head *gdb_bh = NULL;
int gdbackups;
- struct ext4_iloc iloc;
+ struct ext4_iloc iloc = { .bh = NULL };
__le32 *data;
int err;
@@ -794,21 +796,22 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
gdb_num);
- gdb_bh = sb_bread(sb, gdblock);
- if (!gdb_bh)
- return -EIO;
+ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+ if (IS_ERR(gdb_bh))
+ return PTR_ERR(gdb_bh);
gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
if (gdbackups < 0) {
err = gdbackups;
- goto exit_bh;
+ goto errout;
}
data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
- dind = sb_bread(sb, le32_to_cpu(*data));
- if (!dind) {
- err = -EIO;
- goto exit_bh;
+ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+ if (IS_ERR(dind)) {
+ err = PTR_ERR(dind);
+ dind = NULL;
+ goto errout;
}
data = (__le32 *)dind->b_data;
@@ -816,18 +819,18 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
ext4_warning(sb, "new group %u GDT block %llu not reserved",
group, gdblock);
err = -EINVAL;
- goto exit_dind;
+ goto errout;
}
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (unlikely(err))
- goto exit_dind;
+ goto errout;
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
- goto exit_dind;
+ goto errout;
BUFFER_TRACE(dind, "get_write_access");
err = ext4_journal_get_write_access(handle, dind);
@@ -837,7 +840,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
/* ext4_reserve_inode_write() gets a reference on the iloc */
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (unlikely(err))
- goto exit_dind;
+ goto errout;
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
sizeof(struct buffer_head *),
@@ -846,7 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
gdb_num + 1);
- goto exit_inode;
+ goto errout;
}
/*
@@ -862,7 +865,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, dind);
if (unlikely(err)) {
ext4_std_error(sb, err);
- goto exit_inode;
+ goto errout;
}
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
(9 - EXT4_SB(sb)->s_cluster_bits);
@@ -871,8 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
- iloc.bh = NULL;
- goto exit_inode;
+ goto errout;
}
brelse(dind);
@@ -888,15 +890,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_super(handle, sb);
if (err)
ext4_std_error(sb, err);
-
return err;
-
-exit_inode:
+errout:
kvfree(n_group_desc);
brelse(iloc.bh);
-exit_dind:
brelse(dind);
-exit_bh:
brelse(gdb_bh);
ext4_debug("leaving with error %d\n", err);
@@ -916,9 +914,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
gdblock = ext4_meta_bg_first_block_no(sb, group) +
ext4_bg_has_super(sb, group);
- gdb_bh = sb_bread(sb, gdblock);
- if (!gdb_bh)
- return -EIO;
+ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+ if (IS_ERR(gdb_bh))
+ return PTR_ERR(gdb_bh);
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
sizeof(struct buffer_head *),
GFP_NOFS);
@@ -975,9 +973,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
return -ENOMEM;
data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
- dind = sb_bread(sb, le32_to_cpu(*data));
- if (!dind) {
- err = -EIO;
+ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+ if (IS_ERR(dind)) {
+ err = PTR_ERR(dind);
+ dind = NULL;
goto exit_free;
}
@@ -996,9 +995,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
err = -EINVAL;
goto exit_bh;
}
- primary[res] = sb_bread(sb, blk);
- if (!primary[res]) {
- err = -EIO;
+ primary[res] = ext4_sb_bread(sb, blk, 0);
+ if (IS_ERR(primary[res])) {
+ err = PTR_ERR(primary[res]);
+ primary[res] = NULL;
goto exit_bh;
}
gdbackups = verify_reserved_gdb(sb, group, primary[res]);
@@ -1631,13 +1631,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
}
if (reserved_gdb || gdb_off == 0) {
- if (ext4_has_feature_resize_inode(sb) ||
+ if (!ext4_has_feature_resize_inode(sb) ||
!le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
- inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(inode)) {
ext4_warning(sb, "Error opening resize inode");
return PTR_ERR(inode);
@@ -1965,7 +1965,8 @@ retry:
}
if (!resize_inode)
- resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
+ EXT4_IGET_SPECIAL);
if (IS_ERR(resize_inode)) {
ext4_warning(sb, "Error opening resize inode");
return PTR_ERR(resize_inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 53ff6c2a26ed..d6c142d73d99 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
MODULE_ALIAS("ext3");
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+/*
+ * This works like sb_bread() except it uses ERR_PTR for error
+ * returns. Currently with sb_bread it's impossible to distinguish
+ * between ENOMEM and EIO situations (since both result in a NULL
+ * return.
+ */
+struct buffer_head *
+ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
+{
+ struct buffer_head *bh = sb_getblk(sb, block);
+
+ if (bh == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (buffer_uptodate(bh))
+ return bh;
+ ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ return bh;
+ put_bh(bh);
+ return ERR_PTR(-EIO);
+}
+
static int ext4_verify_csum_type(struct super_block *sb,
struct ext4_super_block *es)
{
@@ -1000,14 +1023,13 @@ static void ext4_put_super(struct super_block *sb)
invalidate_bdev(sbi->journal_bdev);
ext4_blkdev_remove(sbi);
}
- if (sbi->s_ea_inode_cache) {
- ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
- sbi->s_ea_inode_cache = NULL;
- }
- if (sbi->s_ea_block_cache) {
- ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
- sbi->s_ea_block_cache = NULL;
- }
+
+ ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
+ sbi->s_ea_inode_cache = NULL;
+
+ ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
+ sbi->s_ea_block_cache = NULL;
+
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
brelse(sbi->s_sbh);
@@ -1151,20 +1173,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
{
struct inode *inode;
- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
- return ERR_PTR(-ESTALE);
- if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
- return ERR_PTR(-ESTALE);
-
- /* iget isn't really right if the inode is currently unallocated!!
- *
- * ext4_read_inode will return a bad_inode if the inode had been
- * deleted, so we should be safe.
- *
+ /*
* Currently we don't know the generation for parent directory, so
* a generation of 0 means "accept any"
*/
- inode = ext4_iget_normal(sb, ino);
+ inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
@@ -1189,6 +1202,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
ext4_nfs_get_inode);
}
+static int ext4_nfs_commit_metadata(struct inode *inode)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL
+ };
+
+ trace_ext4_nfs_commit_metadata(inode);
+ return ext4_write_inode(inode, &wbc);
+}
+
/*
* Try to release metadata pages (indirect blocks, directories) which are
* mapped via the block device. Since these pages could have journal heads
@@ -1393,6 +1416,7 @@ static const struct export_operations ext4_export_ops = {
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
+ .commit_metadata = ext4_nfs_commit_metadata,
};
enum {
@@ -1939,7 +1963,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
#ifdef CONFIG_FS_DAX
ext4_msg(sb, KERN_WARNING,
"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- sbi->s_mount_opt |= m->mount_opt;
+ sbi->s_mount_opt |= m->mount_opt;
#else
ext4_msg(sb, KERN_INFO, "dax option not supported");
return -1;
@@ -3842,12 +3866,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (ext4_has_feature_inline_data(sb)) {
ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
" that may contain inline data");
- sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
+ goto failed_mount;
}
if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
ext4_msg(sb, KERN_ERR,
- "DAX unsupported by block device. Turning off DAX.");
- sbi->s_mount_opt &= ~EXT4_MOUNT_DAX;
+ "DAX unsupported by block device.");
+ goto failed_mount;
}
}
@@ -4328,7 +4352,7 @@ no_journal:
* so we can safely mount the rest of the filesystem now.
*/
- root = ext4_iget(sb, EXT4_ROOT_INO);
+ root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
@@ -4522,14 +4546,12 @@ failed_mount4:
if (EXT4_SB(sb)->rsv_conversion_wq)
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
failed_mount_wq:
- if (sbi->s_ea_inode_cache) {
- ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
- sbi->s_ea_inode_cache = NULL;
- }
- if (sbi->s_ea_block_cache) {
- ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
- sbi->s_ea_block_cache = NULL;
- }
+ ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
+ sbi->s_ea_inode_cache = NULL;
+
+ ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
+ sbi->s_ea_block_cache = NULL;
+
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
@@ -4598,7 +4620,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* happen if we iget() an unused inode, as the subsequent iput()
* will try to delete it.
*/
- journal_inode = ext4_iget(sb, journal_inum);
+ journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
if (IS_ERR(journal_inode)) {
ext4_msg(sb, KERN_ERR, "no journal found");
return NULL;
@@ -5680,7 +5702,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
if (!qf_inums[type])
return -EPERM;
- qf_inode = ext4_iget(sb, qf_inums[type]);
+ qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
if (IS_ERR(qf_inode)) {
ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
return PTR_ERR(qf_inode);
@@ -5690,9 +5712,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
- iput(qf_inode);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ iput(qf_inode);
return err;
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 7643d52c776c..86ed9c686249 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -384,7 +384,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
struct inode *inode;
int err;
- inode = ext4_iget(parent->i_sb, ea_ino);
+ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(parent->i_sb,
@@ -522,14 +522,13 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
name_index, name, buffer, (long)buffer_size);
- error = -ENODATA;
if (!EXT4_I(inode)->i_file_acl)
- goto cleanup;
+ return -ENODATA;
ea_idebug(inode, "reading block %llu",
(unsigned long long)EXT4_I(inode)->i_file_acl);
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh)
- goto cleanup;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
error = ext4_xattr_check_block(inode, bh);
@@ -696,26 +695,23 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
buffer, (long)buffer_size);
- error = 0;
if (!EXT4_I(inode)->i_file_acl)
- goto cleanup;
+ return 0;
ea_idebug(inode, "reading block %llu",
(unsigned long long)EXT4_I(inode)->i_file_acl);
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- error = -EIO;
- if (!bh)
- goto cleanup;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
error = ext4_xattr_check_block(inode, bh);
if (error)
goto cleanup;
ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
- error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
+ error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
+ buffer_size);
cleanup:
brelse(bh);
-
return error;
}
@@ -830,9 +826,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
}
if (EXT4_I(inode)->i_file_acl) {
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh) {
- ret = -EIO;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
goto out;
}
@@ -1486,7 +1482,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
}
while (ce) {
- ea_inode = ext4_iget(inode->i_sb, ce->e_value);
+ ea_inode = ext4_iget(inode->i_sb, ce->e_value,
+ EXT4_IGET_NORMAL);
if (!IS_ERR(ea_inode) &&
!is_bad_inode(ea_inode) &&
(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
@@ -1821,16 +1818,15 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
if (EXT4_I(inode)->i_file_acl) {
/* The inode already has an extended attribute block. */
- bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
- error = -EIO;
- if (!bs->bh)
- goto cleanup;
+ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bs->bh))
+ return PTR_ERR(bs->bh);
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
error = ext4_xattr_check_block(inode, bs->bh);
if (error)
- goto cleanup;
+ return error;
/* Find the named attribute. */
bs->s.base = BHDR(bs->bh);
bs->s.first = BFIRST(bs->bh);
@@ -1839,13 +1835,10 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
i->name_index, i->name, 1);
if (error && error != -ENODATA)
- goto cleanup;
+ return error;
bs->s.not_found = error;
}
- error = 0;
-
-cleanup:
- return error;
+ return 0;
}
static int
@@ -2274,9 +2267,9 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
if (!EXT4_I(inode)->i_file_acl)
return NULL;
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh)
- return ERR_PTR(-EIO);
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh))
+ return bh;
error = ext4_xattr_check_block(inode, bh);
if (error) {
brelse(bh);
@@ -2729,7 +2722,7 @@ retry:
base = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
min_offs = end - base;
- total_ino = sizeof(struct ext4_xattr_ibody_header);
+ total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
error = xattr_check_inode(inode, header, end);
if (error)
@@ -2746,10 +2739,11 @@ retry:
if (EXT4_I(inode)->i_file_acl) {
struct buffer_head *bh;
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- error = -EIO;
- if (!bh)
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ error = PTR_ERR(bh);
goto cleanup;
+ }
error = ext4_xattr_check_block(inode, bh);
if (error) {
brelse(bh);
@@ -2903,11 +2897,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
}
if (EXT4_I(inode)->i_file_acl) {
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh) {
- EXT4_ERROR_INODE(inode, "block %llu read error",
- EXT4_I(inode)->i_file_acl);
- error = -EIO;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ error = PTR_ERR(bh);
+ if (error == -EIO)
+ EXT4_ERROR_INODE(inode, "block %llu read error",
+ EXT4_I(inode)->i_file_acl);
goto cleanup;
}
error = ext4_xattr_check_block(inode, bh);
@@ -3060,8 +3055,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
while (ce) {
struct buffer_head *bh;
- bh = sb_bread(inode->i_sb, ce->e_value);
- if (!bh) {
+ bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ if (PTR_ERR(bh) == -ENOMEM)
+ return NULL;
EXT4_ERROR_INODE(inode, "block %lu read error",
(unsigned long)ce->e_value);
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/file.c b/fs/file.c
index 7ffd6e9d103d..50304c7525ea 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -158,7 +158,7 @@ static int expand_fdtable(struct files_struct *files, unsigned int nr)
* or have finished their rcu_read_lock_sched() section.
*/
if (atomic_read(&files->count) > 1)
- synchronize_sched();
+ synchronize_rcu();
spin_lock(&files->file_lock);
if (!new_fdt)
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9edc920f651f..6d9cb1719de5 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
if (awaken)
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
+ if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+
/* Prevent a race with our last child, which has to signal EV_CLEARED
* before dropping our spinlock.
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 47395b0c3b35..e909678afa2d 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1119,8 +1119,10 @@ static int fuse_permission(struct inode *inode, int mask)
if (fc->default_permissions ||
((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
struct fuse_inode *fi = get_fuse_inode(inode);
+ u32 perm_mask = STATX_MODE | STATX_UID | STATX_GID;
- if (time_before64(fi->i_time, get_jiffies_64())) {
+ if (perm_mask & READ_ONCE(fi->inval_mask) ||
+ time_before64(fi->i_time, get_jiffies_64())) {
refreshed = true;
err = fuse_perm_getattr(inode, mask);
@@ -1241,7 +1243,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
static int fuse_dir_release(struct inode *inode, struct file *file)
{
- fuse_release_common(file, FUSE_RELEASEDIR);
+ fuse_release_common(file, true);
return 0;
}
@@ -1249,7 +1251,25 @@ static int fuse_dir_release(struct inode *inode, struct file *file)
static int fuse_dir_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
- return fuse_fsync_common(file, start, end, datasync, 1);
+ struct inode *inode = file->f_mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ int err;
+
+ if (is_bad_inode(inode))
+ return -EIO;
+
+ if (fc->no_fsyncdir)
+ return 0;
+
+ inode_lock(inode);
+ err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNCDIR);
+ if (err == -ENOSYS) {
+ fc->no_fsyncdir = 1;
+ err = 0;
+ }
+ inode_unlock(inode);
+
+ return err;
}
static long fuse_dir_ioctl(struct file *file, unsigned int cmd,
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b52f9baaa3e7..ffaffe18352a 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -89,12 +89,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
iput(req->misc.release.inode);
}
-static void fuse_file_put(struct fuse_file *ff, bool sync)
+static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
{
if (refcount_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
- if (ff->fc->no_open) {
+ if (ff->fc->no_open && !isdir) {
/*
* Drop the release request when client does not
* implement 'open'
@@ -247,10 +247,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
req->in.args[0].value = inarg;
}
-void fuse_release_common(struct file *file, int opcode)
+void fuse_release_common(struct file *file, bool isdir)
{
struct fuse_file *ff = file->private_data;
struct fuse_req *req = ff->reserved_req;
+ int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
fuse_prepare_release(ff, file->f_flags, opcode);
@@ -272,7 +273,7 @@ void fuse_release_common(struct file *file, int opcode)
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
- fuse_file_put(ff, ff->fc->destroy_req != NULL);
+ fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
}
static int fuse_open(struct inode *inode, struct file *file)
@@ -288,7 +289,7 @@ static int fuse_release(struct inode *inode, struct file *file)
if (fc->writeback_cache)
write_inode_now(inode, 1);
- fuse_release_common(file, FUSE_RELEASE);
+ fuse_release_common(file, false);
/* return value is ignored by VFS */
return 0;
@@ -302,7 +303,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
* iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here"
*/
- fuse_file_put(ff, true);
+ fuse_file_put(ff, true, false);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
@@ -441,13 +442,30 @@ static int fuse_flush(struct file *file, fl_owner_t id)
}
int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
- int datasync, int isdir)
+ int datasync, int opcode)
{
struct inode *inode = file->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
FUSE_ARGS(args);
struct fuse_fsync_in inarg;
+
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.fh = ff->fh;
+ inarg.fsync_flags = datasync ? 1 : 0;
+ args.in.h.opcode = opcode;
+ args.in.h.nodeid = get_node_id(inode);
+ args.in.numargs = 1;
+ args.in.args[0].size = sizeof(inarg);
+ args.in.args[0].value = &inarg;
+ return fuse_simple_request(fc, &args);
+}
+
+static int fuse_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct fuse_conn *fc = get_fuse_conn(inode);
int err;
if (is_bad_inode(inode))
@@ -479,34 +497,18 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
if (err)
goto out;
- if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
+ if (fc->no_fsync)
goto out;
- memset(&inarg, 0, sizeof(inarg));
- inarg.fh = ff->fh;
- inarg.fsync_flags = datasync ? 1 : 0;
- args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
- args.in.h.nodeid = get_node_id(inode);
- args.in.numargs = 1;
- args.in.args[0].size = sizeof(inarg);
- args.in.args[0].value = &inarg;
- err = fuse_simple_request(fc, &args);
+ err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
if (err == -ENOSYS) {
- if (isdir)
- fc->no_fsyncdir = 1;
- else
- fc->no_fsync = 1;
+ fc->no_fsync = 1;
err = 0;
}
out:
inode_unlock(inode);
- return err;
-}
-static int fuse_fsync(struct file *file, loff_t start, loff_t end,
- int datasync)
-{
- return fuse_fsync_common(file, start, end, datasync, 0);
+ return err;
}
void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
@@ -807,7 +809,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
put_page(page);
}
if (req->ff)
- fuse_file_put(req->ff, false);
+ fuse_file_put(req->ff, false, false);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1460,7 +1462,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
__free_page(req->pages[i]);
if (req->ff)
- fuse_file_put(req->ff, false);
+ fuse_file_put(req->ff, false, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@@ -1619,7 +1621,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
ff = __fuse_write_file_get(fc, fi);
err = fuse_flush_times(inode, ff);
if (ff)
- fuse_file_put(ff, 0);
+ fuse_file_put(ff, false, false);
return err;
}
@@ -1940,7 +1942,7 @@ static int fuse_writepages(struct address_space *mapping,
err = 0;
}
if (data.ff)
- fuse_file_put(data.ff, false);
+ fuse_file_put(data.ff, false, false);
kfree(data.orig_pages);
out:
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e9f712e81c7d..2f2c92e6f8cb 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -822,13 +822,13 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
/**
* Send RELEASE or RELEASEDIR request
*/
-void fuse_release_common(struct file *file, int opcode);
+void fuse_release_common(struct file *file, bool isdir);
/**
* Send FSYNC or FSYNCDIR request
*/
int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
- int datasync, int isdir);
+ int datasync, int opcode);
/**
* Notify poll wakeup
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 0b94b23b02d4..568abed20eb2 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -115,7 +115,7 @@ static void fuse_i_callback(struct rcu_head *head)
static void fuse_destroy_inode(struct inode *inode)
{
struct fuse_inode *fi = get_fuse_inode(inode);
- if (S_ISREG(inode->i_mode)) {
+ if (S_ISREG(inode->i_mode) && !is_bad_inode(inode)) {
WARN_ON(!list_empty(&fi->write_files));
WARN_ON(!list_empty(&fi->queued_writes));
}
@@ -1068,6 +1068,7 @@ void fuse_dev_free(struct fuse_dev *fud)
fuse_conn_put(fc);
}
+ kfree(fud->pq.processing);
kfree(fud);
}
EXPORT_SYMBOL_GPL(fuse_dev_free);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 8afbb35559b9..05dd78f4b2b3 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -820,10 +820,10 @@ out:
* @page: the page that's being released
* @gfp_mask: passed from Linux VFS, ignored by us
*
- * Call try_to_free_buffers() if the buffers in this page can be
- * released.
+ * Calls try_to_free_buffers() to free the buffers and put the page if the
+ * buffers can be released.
*
- * Returns: 0
+ * Returns: 1 if the page was put or else 0
*/
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
@@ -930,14 +930,14 @@ static const struct address_space_operations gfs2_jdata_aops = {
void gfs2_set_aops(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
- if (gfs2_is_writeback(ip))
+ if (gfs2_is_jdata(ip))
+ inode->i_mapping->a_ops = &gfs2_jdata_aops;
+ else if (gfs2_is_writeback(sdp))
inode->i_mapping->a_ops = &gfs2_writeback_aops;
- else if (gfs2_is_ordered(ip))
+ else if (gfs2_is_ordered(sdp))
inode->i_mapping->a_ops = &gfs2_ordered_aops;
- else if (gfs2_is_jdata(ip))
- inode->i_mapping->a_ops = &gfs2_jdata_aops;
else
BUG();
}
-
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 9a4a15d646eb..02b2646d84b3 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -14,6 +14,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/iomap.h>
+#include <linux/ktime.h>
#include "gfs2.h"
#include "incore.h"
@@ -2083,6 +2084,8 @@ static int do_grow(struct inode *inode, u64 size)
}
error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
+ (unstuff &&
+ gfs2_is_jdata(ip) ? RES_JDATA : 0) +
(sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
0 : RES_QUOTA), 0);
if (error)
@@ -2248,7 +2251,9 @@ int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
unsigned int shift = sdp->sd_sb.sb_bsize_shift;
u64 size;
int rc;
+ ktime_t start, end;
+ start = ktime_get();
lblock_stop = i_size_read(jd->jd_inode) >> shift;
size = (lblock_stop - lblock) << shift;
jd->nr_extents = 0;
@@ -2268,8 +2273,9 @@ int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
lblock += (bh.b_size >> ip->i_inode.i_blkbits);
} while(size > 0);
- fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
- jd->nr_extents);
+ end = ktime_get();
+ fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
+ jd->nr_extents, ktime_ms_delta(end, start));
return 0;
fail:
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 45a17b770d97..a2dea5bc0427 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -1199,13 +1199,13 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
mutex_lock(&fp->f_fl_mutex);
if (gfs2_holder_initialized(fl_gh)) {
+ struct file_lock request;
if (fl_gh->gh_state == state)
goto out;
- locks_lock_file_wait(file,
- &(struct file_lock) {
- .fl_type = F_UNLCK,
- .fl_flags = FL_FLOCK
- });
+ locks_init_lock(&request);
+ request.fl_type = F_UNLCK;
+ request.fl_flags = FL_FLOCK;
+ locks_lock_file_wait(file, &request);
gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
} else {
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 05431324b262..b92740edc416 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1777,7 +1777,7 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*
*/
-void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
+void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned long long dtime;
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 5e12220cc0c2..8949bf28b249 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -202,7 +202,7 @@ extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
struct gfs2_holder *gh);
extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
+extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { gfs2_dump_glock(NULL, gl); BUG(); } } while(0)
extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index c63bee9adb6a..f15b4c57c4bd 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -28,6 +28,7 @@
#include "util.h"
#include "trans.h"
#include "dir.h"
+#include "lops.h"
struct workqueue_struct *gfs2_freeze_wq;
@@ -466,17 +467,25 @@ static int inode_go_lock(struct gfs2_holder *gh)
*
*/
-static void inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl)
{
- const struct gfs2_inode *ip = gl->gl_object;
+ struct gfs2_inode *ip = gl->gl_object;
+ struct inode *inode = &ip->i_inode;
+ unsigned long nrpages;
+
if (ip == NULL)
return;
- gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu\n",
+
+ xa_lock_irq(&inode->i_data.i_pages);
+ nrpages = inode->i_data.nrpages;
+ xa_unlock_irq(&inode->i_data.i_pages);
+
+ gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu p:%lu\n",
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
IF2DT(ip->i_inode.i_mode), ip->i_flags,
(unsigned int)ip->i_diskflags,
- (unsigned long long)i_size_read(&ip->i_inode));
+ (unsigned long long)i_size_read(inode), nrpages);
}
/**
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 888b62cfd6d1..e10e0b0a7cd5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -165,7 +165,6 @@ struct gfs2_bufdata {
u64 bd_blkno;
struct list_head bd_list;
- const struct gfs2_log_operations *bd_ops;
struct gfs2_trans *bd_tr;
struct list_head bd_ail_st_list;
@@ -244,7 +243,7 @@ struct gfs2_glock_operations {
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
- void (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
+ void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
const int go_type;
const unsigned long go_flags;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 648f0ca1ad57..998051c4aea7 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -744,17 +744,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
the gfs2 structures. */
if (default_acl) {
error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ if (error)
+ goto fail_gunlock3;
posix_acl_release(default_acl);
+ default_acl = NULL;
}
if (acl) {
- if (!error)
- error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ if (error)
+ goto fail_gunlock3;
posix_acl_release(acl);
+ acl = NULL;
}
- if (error)
- goto fail_gunlock3;
-
error = security_inode_init_security(&ip->i_inode, &dip->i_inode, name,
&gfs2_initxattrs, NULL);
if (error)
@@ -789,10 +791,8 @@ fail_free_inode:
}
gfs2_rsqa_delete(ip, NULL);
fail_free_acls:
- if (default_acl)
- posix_acl_release(default_acl);
- if (acl)
- posix_acl_release(acl);
+ posix_acl_release(default_acl);
+ posix_acl_release(acl);
fail_gunlock:
gfs2_dir_no_add(&da);
gfs2_glock_dq_uninit(ghs);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index b5b6341a4f5c..793808263c6d 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -30,16 +30,14 @@ static inline int gfs2_is_jdata(const struct gfs2_inode *ip)
return ip->i_diskflags & GFS2_DIF_JDATA;
}
-static inline int gfs2_is_writeback(const struct gfs2_inode *ip)
+static inline bool gfs2_is_ordered(const struct gfs2_sbd *sdp)
{
- const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- return (sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK) && !gfs2_is_jdata(ip);
+ return sdp->sd_args.ar_data == GFS2_DATA_ORDERED;
}
-static inline int gfs2_is_ordered(const struct gfs2_inode *ip)
+static inline bool gfs2_is_writeback(const struct gfs2_sbd *sdp)
{
- const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- return (sdp->sd_args.ar_data == GFS2_DATA_ORDERED) && !gfs2_is_jdata(ip);
+ return sdp->sd_args.ar_data == GFS2_DATA_WRITEBACK;
}
static inline int gfs2_is_dir(const struct gfs2_inode *ip)
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 99dd58694ba1..5bfaf381921a 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -605,7 +605,6 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
bd->bd_blkno = bh->b_blocknr;
gfs2_remove_from_ail(bd); /* drops ref on bh */
bd->bd_bh = NULL;
- bd->bd_ops = &gfs2_revoke_lops;
sdp->sd_log_num_revoke++;
atomic_inc(&gl->gl_revokes);
set_bit(GLF_LFLUSH, &gl->gl_flags);
@@ -734,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
lh->lh_crc = cpu_to_be32(crc);
gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr);
- gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
+ gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags);
log_flush_wait(sdp);
}
@@ -811,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
gfs2_ordered_write(sdp);
lops_before_commit(sdp, tr);
- gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
+ gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE);
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_flush_wait(sdp);
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 20241436126d..1bc9bd444b28 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -51,12 +51,11 @@ static inline void gfs2_log_pointers_init(struct gfs2_sbd *sdp,
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{
- struct gfs2_sbd *sdp;
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- if (!gfs2_is_ordered(ip))
+ if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
return;
- sdp = GFS2_SB(&ip->i_inode);
if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
spin_lock(&sdp->sd_ordered_lock);
if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4c7069b8f3c1..94dcab655bc0 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -17,7 +17,9 @@
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/list_sort.h>
+#include <linux/blkdev.h>
+#include "bmap.h"
#include "dir.h"
#include "gfs2.h"
#include "incore.h"
@@ -193,7 +195,6 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec,
/**
* gfs2_end_log_write - end of i/o to the log
* @bio: The bio
- * @error: Status of i/o request
*
* Each bio_vec contains either data from the pagecache or data
* relating to the log itself. Here we iterate over the bio_vec
@@ -228,83 +229,86 @@ static void gfs2_end_log_write(struct bio *bio)
}
/**
- * gfs2_log_flush_bio - Submit any pending log bio
- * @sdp: The superblock
- * @op: REQ_OP
- * @op_flags: req_flag_bits
+ * gfs2_log_submit_bio - Submit any pending log bio
+ * @biop: Address of the bio pointer
+ * @opf: REQ_OP | op_flags
*
* Submit any pending part-built or full bio to the block device. If
* there is no pending bio, then this is a no-op.
*/
-void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
+void gfs2_log_submit_bio(struct bio **biop, int opf)
{
- if (sdp->sd_log_bio) {
+ struct bio *bio = *biop;
+ if (bio) {
+ struct gfs2_sbd *sdp = bio->bi_private;
atomic_inc(&sdp->sd_log_in_flight);
- bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
- submit_bio(sdp->sd_log_bio);
- sdp->sd_log_bio = NULL;
+ bio->bi_opf = opf;
+ submit_bio(bio);
+ *biop = NULL;
}
}
/**
- * gfs2_log_alloc_bio - Allocate a new bio for log writing
- * @sdp: The superblock
- * @blkno: The next device block number we want to write to
+ * gfs2_log_alloc_bio - Allocate a bio
+ * @sdp: The super block
+ * @blkno: The device block number we want to write to
+ * @end_io: The bi_end_io callback
*
- * This should never be called when there is a cached bio in the
- * super block. When it returns, there will be a cached bio in the
- * super block which will have as many bio_vecs as the device is
- * happy to handle.
+ * Allocate a new bio, initialize it with the given parameters and return it.
*
- * Returns: Newly allocated bio
+ * Returns: The newly allocated bio
*/
-static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
+static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
+ bio_end_io_t *end_io)
{
struct super_block *sb = sdp->sd_vfs;
- struct bio *bio;
-
- BUG_ON(sdp->sd_log_bio);
+ struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
- bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
bio_set_dev(bio, sb->s_bdev);
- bio->bi_end_io = gfs2_end_log_write;
+ bio->bi_end_io = end_io;
bio->bi_private = sdp;
- sdp->sd_log_bio = bio;
-
return bio;
}
/**
* gfs2_log_get_bio - Get cached log bio, or allocate a new one
- * @sdp: The superblock
+ * @sdp: The super block
* @blkno: The device block number we want to write to
+ * @bio: The bio to get or allocate
+ * @op: REQ_OP
+ * @end_io: The bi_end_io callback
+ * @flush: Always flush the current bio and allocate a new one?
*
* If there is a cached bio, then if the next block number is sequential
* with the previous one, return it, otherwise flush the bio to the
- * device. If there is not a cached bio, or we just flushed it, then
+ * device. If there is no cached bio, or we just flushed it, then
* allocate a new one.
*
* Returns: The bio to use for log writes
*/
-static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
+static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
+ struct bio **biop, int op,
+ bio_end_io_t *end_io, bool flush)
{
- struct bio *bio = sdp->sd_log_bio;
- u64 nblk;
+ struct bio *bio = *biop;
if (bio) {
+ u64 nblk;
+
nblk = bio_end_sector(bio);
nblk >>= sdp->sd_fsb2bb_shift;
- if (blkno == nblk)
+ if (blkno == nblk && !flush)
return bio;
- gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
+ gfs2_log_submit_bio(biop, op);
}
- return gfs2_log_alloc_bio(sdp, blkno);
+ *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
+ return *biop;
}
/**
@@ -326,11 +330,12 @@ void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
struct bio *bio;
int ret;
- bio = gfs2_log_get_bio(sdp, blkno);
+ bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
+ gfs2_end_log_write, false);
ret = bio_add_page(bio, page, size, offset);
if (ret == 0) {
- gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
- bio = gfs2_log_alloc_bio(sdp, blkno);
+ bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
+ REQ_OP_WRITE, gfs2_end_log_write, true);
ret = bio_add_page(bio, page, size, offset);
WARN_ON(ret == 0);
}
@@ -370,6 +375,184 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
gfs2_log_bmap(sdp));
}
+/**
+ * gfs2_end_log_read - end I/O callback for reads from the log
+ * @bio: The bio
+ *
+ * Simply unlock the pages in the bio. The main thread will wait on them and
+ * process them in order as necessary.
+ */
+
+static void gfs2_end_log_read(struct bio *bio)
+{
+ struct page *page;
+ struct bio_vec *bvec;
+ int i;
+
+ bio_for_each_segment_all(bvec, bio, i) {
+ page = bvec->bv_page;
+ if (bio->bi_status) {
+ int err = blk_status_to_errno(bio->bi_status);
+
+ SetPageError(page);
+ mapping_set_error(page->mapping, err);
+ }
+ unlock_page(page);
+ }
+
+ bio_put(bio);
+}
+
+/**
+ * gfs2_jhead_pg_srch - Look for the journal head in a given page.
+ * @jd: The journal descriptor
+ * @page: The page to look in
+ *
+ * Returns: 1 if found, 0 otherwise.
+ */
+
+static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head,
+ struct page *page)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
+ struct gfs2_log_header_host uninitialized_var(lh);
+ void *kaddr = kmap_atomic(page);
+ unsigned int offset;
+ bool ret = false;
+
+ for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
+ if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
+ if (lh.lh_sequence > head->lh_sequence)
+ *head = lh;
+ else {
+ ret = true;
+ break;
+ }
+ }
+ }
+ kunmap_atomic(kaddr);
+ return ret;
+}
+
+/**
+ * gfs2_jhead_process_page - Search/cleanup a page
+ * @jd: The journal descriptor
+ * @index: Index of the page to look into
+ * @done: If set, perform only cleanup, else search and set if found.
+ *
+ * Find the page with 'index' in the journal's mapping. Search the page for
+ * the journal head if requested (cleanup == false). Release refs on the
+ * page so the page cache can reclaim it (put_page() twice). We grabbed a
+ * reference on this page two times, first when we did a find_or_create_page()
+ * to obtain the page to add it to the bio and second when we do a
+ * find_get_page() here to get the page to wait on while I/O on it is being
+ * completed.
+ * This function is also used to free up a page we might've grabbed but not
+ * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
+ * submitted the I/O, but we already found the jhead so we only need to drop
+ * our references to the page.
+ */
+
+static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
+ struct gfs2_log_header_host *head,
+ bool *done)
+{
+ struct page *page;
+
+ page = find_get_page(jd->jd_inode->i_mapping, index);
+ wait_on_page_locked(page);
+
+ if (PageError(page))
+ *done = true;
+
+ if (!*done)
+ *done = gfs2_jhead_pg_srch(jd, head, page);
+
+ put_page(page); /* Once for find_get_page */
+ put_page(page); /* Once more for find_or_create_page */
+}
+
+/**
+ * gfs2_find_jhead - find the head of a log
+ * @jd: The journal descriptor
+ * @head: The log descriptor for the head of the log is returned here
+ *
+ * Do a search of a journal by reading it in large chunks using bios and find
+ * the valid log entry with the highest sequence number. (i.e. the log head)
+ *
+ * Returns: 0 on success, errno otherwise
+ */
+
+int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
+{
+ struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
+ struct address_space *mapping = jd->jd_inode->i_mapping;
+ struct gfs2_journal_extent *je;
+ u32 block, read_idx = 0, submit_idx = 0, index = 0;
+ int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
+ int blocks_per_page = 1 << shift, sz, ret = 0;
+ struct bio *bio = NULL;
+ struct page *page;
+ bool done = false;
+ errseq_t since;
+
+ memset(head, 0, sizeof(*head));
+ if (list_empty(&jd->extent_list))
+ gfs2_map_journal_extents(sdp, jd);
+
+ since = filemap_sample_wb_err(mapping);
+ list_for_each_entry(je, &jd->extent_list, list) {
+ for (block = 0; block < je->blocks; block += blocks_per_page) {
+ index = (je->lblock + block) >> shift;
+
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ if (!page) {
+ ret = -ENOMEM;
+ done = true;
+ goto out;
+ }
+
+ if (bio) {
+ sz = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (sz == PAGE_SIZE)
+ goto page_added;
+ submit_idx = index;
+ submit_bio(bio);
+ bio = NULL;
+ }
+
+ bio = gfs2_log_alloc_bio(sdp,
+ je->dblock + (index << shift),
+ gfs2_end_log_read);
+ bio->bi_opf = REQ_OP_READ;
+ sz = bio_add_page(bio, page, PAGE_SIZE, 0);
+ gfs2_assert_warn(sdp, sz == PAGE_SIZE);
+
+page_added:
+ if (submit_idx <= read_idx + BIO_MAX_PAGES) {
+ /* Keep at least one bio in flight */
+ continue;
+ }
+
+ gfs2_jhead_process_page(jd, read_idx++, head, &done);
+ if (done)
+ goto out; /* found */
+ }
+ }
+
+out:
+ if (bio)
+ submit_bio(bio);
+ while (read_idx <= index)
+ gfs2_jhead_process_page(jd, read_idx++, head, &done);
+
+ if (!ret)
+ ret = filemap_check_wb_err(mapping, since);
+
+ return ret;
+}
+
static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
u32 ld_length, u32 ld_data1)
{
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index e4949394f054..331160fc568b 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -30,8 +30,10 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp);
extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
unsigned size, unsigned offset, u64 blkno);
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
-extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags);
+extern void gfs2_log_submit_bio(struct bio **biop, int opf);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
+extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
+ struct gfs2_log_header_host *head);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
{
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index b041cb8ae383..1179763f6370 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -41,6 +41,7 @@
#include "dir.h"
#include "meta_io.h"
#include "trace_gfs2.h"
+#include "lops.h"
#define DO 0
#define UNDO 1
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 0f501f938d1c..7389e445a7a7 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -120,6 +120,35 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
}
}
+int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
+ unsigned int blkno, struct gfs2_log_header_host *head)
+{
+ u32 hash, crc;
+
+ if (lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
+ lh->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH) ||
+ (blkno && be32_to_cpu(lh->lh_blkno) != blkno))
+ return 1;
+
+ hash = crc32(~0, lh, LH_V1_SIZE - 4);
+ hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
+
+ if (be32_to_cpu(lh->lh_hash) != hash)
+ return 1;
+
+ crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
+ sdp->sd_sb.sb_bsize - LH_V1_SIZE - 4);
+
+ if ((lh->lh_crc != 0 && be32_to_cpu(lh->lh_crc) != crc))
+ return 1;
+
+ head->lh_sequence = be64_to_cpu(lh->lh_sequence);
+ head->lh_flags = be32_to_cpu(lh->lh_flags);
+ head->lh_tail = be32_to_cpu(lh->lh_tail);
+ head->lh_blkno = be32_to_cpu(lh->lh_blkno);
+
+ return 0;
+}
/**
* get_log_header - read the log header for a given segment
* @jd: the journal
@@ -137,159 +166,18 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
struct gfs2_log_header_host *head)
{
- struct gfs2_log_header *lh;
+ struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct buffer_head *bh;
- u32 hash, crc;
int error;
error = gfs2_replay_read_block(jd, blk, &bh);
if (error)
return error;
- lh = (void *)bh->b_data;
-
- hash = crc32(~0, lh, LH_V1_SIZE - 4);
- hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
-
- crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
- bh->b_size - LH_V1_SIZE - 4);
-
- error = lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
- lh->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH) ||
- be32_to_cpu(lh->lh_blkno) != blk ||
- be32_to_cpu(lh->lh_hash) != hash ||
- (lh->lh_crc != 0 && be32_to_cpu(lh->lh_crc) != crc);
+ error = __get_log_header(sdp, (const struct gfs2_log_header *)bh->b_data,
+ blk, head);
brelse(bh);
- if (!error) {
- head->lh_sequence = be64_to_cpu(lh->lh_sequence);
- head->lh_flags = be32_to_cpu(lh->lh_flags);
- head->lh_tail = be32_to_cpu(lh->lh_tail);
- head->lh_blkno = be32_to_cpu(lh->lh_blkno);
- }
- return error;
-}
-
-/**
- * find_good_lh - find a good log header
- * @jd: the journal
- * @blk: the segment to start searching from
- * @lh: the log header to fill in
- * @forward: if true search forward in the log, else search backward
- *
- * Call get_log_header() to get a log header for a segment, but if the
- * segment is bad, either scan forward or backward until we find a good one.
- *
- * Returns: errno
- */
-
-static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk,
- struct gfs2_log_header_host *head)
-{
- unsigned int orig_blk = *blk;
- int error;
-
- for (;;) {
- error = get_log_header(jd, *blk, head);
- if (error <= 0)
- return error;
-
- if (++*blk == jd->jd_blocks)
- *blk = 0;
-
- if (*blk == orig_blk) {
- gfs2_consist_inode(GFS2_I(jd->jd_inode));
- return -EIO;
- }
- }
-}
-
-/**
- * jhead_scan - make sure we've found the head of the log
- * @jd: the journal
- * @head: this is filled in with the log descriptor of the head
- *
- * At this point, seg and lh should be either the head of the log or just
- * before. Scan forward until we find the head.
- *
- * Returns: errno
- */
-
-static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
-{
- unsigned int blk = head->lh_blkno;
- struct gfs2_log_header_host lh;
- int error;
-
- for (;;) {
- if (++blk == jd->jd_blocks)
- blk = 0;
-
- error = get_log_header(jd, blk, &lh);
- if (error < 0)
- return error;
- if (error == 1)
- continue;
-
- if (lh.lh_sequence == head->lh_sequence) {
- gfs2_consist_inode(GFS2_I(jd->jd_inode));
- return -EIO;
- }
- if (lh.lh_sequence < head->lh_sequence)
- break;
-
- *head = lh;
- }
-
- return 0;
-}
-
-/**
- * gfs2_find_jhead - find the head of a log
- * @jd: the journal
- * @head: the log descriptor for the head of the log is returned here
- *
- * Do a binary search of a journal and find the valid log entry with the
- * highest sequence number. (i.e. the log head)
- *
- * Returns: errno
- */
-
-int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
-{
- struct gfs2_log_header_host lh_1, lh_m;
- u32 blk_1, blk_2, blk_m;
- int error;
-
- blk_1 = 0;
- blk_2 = jd->jd_blocks - 1;
-
- for (;;) {
- blk_m = (blk_1 + blk_2) / 2;
-
- error = find_good_lh(jd, &blk_1, &lh_1);
- if (error)
- return error;
-
- error = find_good_lh(jd, &blk_m, &lh_m);
- if (error)
- return error;
-
- if (blk_1 == blk_m || blk_m == blk_2)
- break;
-
- if (lh_1.lh_sequence <= lh_m.lh_sequence)
- blk_1 = blk_m;
- else
- blk_2 = blk_m;
- }
-
- error = jhead_scan(jd, &lh_1);
- if (error)
- return error;
-
- *head = lh_1;
-
return error;
}
@@ -460,6 +348,8 @@ void gfs2_recover_func(struct work_struct *work)
if (error)
goto fail_gunlock_ji;
t_jhd = ktime_get();
+ fs_info(sdp, "jid=%u: Journal head lookup took %lldms\n", jd->jd_jid,
+ ktime_ms_delta(t_jhd, t_jlck));
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 11fdfab4bf99..99575ab81202 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -27,10 +27,11 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
-extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
- struct gfs2_log_header_host *head);
extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
extern void gfs2_recover_func(struct work_struct *work);
+extern int __get_log_header(struct gfs2_sbd *sdp,
+ const struct gfs2_log_header *lh, unsigned int blkno,
+ struct gfs2_log_header_host *head);
#endif /* __RECOVERY_DOT_H__ */
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index b08a530433ad..831d7cb5a49c 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext,
goto next_iter;
}
if (ret == -E2BIG) {
+ n += rbm->bii - initial_bii;
rbm->bii = 0;
rbm->offset = 0;
- n += (rbm->bii - initial_bii);
goto res_covered_end_of_rgrp;
}
return ret;
@@ -2256,7 +2256,7 @@ static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd,
*
*/
-void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl)
{
struct gfs2_rgrpd *rgd = gl->gl_object;
struct gfs2_blkreserv *trs;
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index b596c3d17988..499079a9dbbe 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -72,7 +72,7 @@ extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist);
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
-extern void gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl);
+extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_glock *gl);
extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
struct buffer_head *bh,
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index ca71163ff7cf..d4b11c903971 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -45,6 +45,7 @@
#include "util.h"
#include "sys.h"
#include "xattr.h"
+#include "lops.h"
#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index 423bc2d03dd8..cd9a94a6b5bb 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -124,15 +124,13 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
}
static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
- struct buffer_head *bh,
- const struct gfs2_log_operations *lops)
+ struct buffer_head *bh)
{
struct gfs2_bufdata *bd;
bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
bd->bd_bh = bh;
bd->bd_gl = gl;
- bd->bd_ops = lops;
INIT_LIST_HEAD(&bd->bd_list);
bh->b_private = bd;
return bd;
@@ -169,7 +167,7 @@ void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
gfs2_log_unlock(sdp);
unlock_buffer(bh);
if (bh->b_private == NULL)
- bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops);
+ bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
lock_buffer(bh);
@@ -210,7 +208,7 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
unlock_buffer(bh);
lock_page(bh->b_page);
if (bh->b_private == NULL)
- bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops);
+ bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
unlock_page(bh->b_page);
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 98b96ffb95ed..19017d296173 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -338,13 +338,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
nidx -= len * 8;
i = node->next;
- hfs_bnode_put(node);
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. bmap not found!\n",
node->this);
+ hfs_bnode_put(node);
return;
}
+ hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 236efe51eca6..66774f4cb4fd 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -466,14 +466,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
nidx -= len * 8;
i = node->next;
- hfs_bnode_put(node);
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. "
"bmap not found!\n",
node->this);
+ hfs_bnode_put(node);
return;
}
+ hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;
diff --git a/fs/inode.c b/fs/inode.c
index 35d2108d567c..0cd47fe0dbe5 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -2149,7 +2149,9 @@ EXPORT_SYMBOL(timespec64_trunc);
*/
struct timespec64 current_time(struct inode *inode)
{
- struct timespec64 now = current_kernel_time64();
+ struct timespec64 now;
+
+ ktime_get_coarse_real_ts64(&now);
if (unlikely(!inode->i_sb)) {
WARN(1, "current_time() called with uninitialized super_block in the inode");
diff --git a/fs/iomap.c b/fs/iomap.c
index 3ffb776fbebe..e87c288cd5ef 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -492,16 +492,29 @@ done:
}
EXPORT_SYMBOL_GPL(iomap_readpages);
+/*
+ * iomap_is_partially_uptodate checks whether blocks within a page are
+ * uptodate or not.
+ *
+ * Returns true if all blocks which correspond to a file portion
+ * we want to read within the page are uptodate.
+ */
int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
{
struct iomap_page *iop = to_iomap_page(page);
struct inode *inode = page->mapping->host;
- unsigned first = from >> inode->i_blkbits;
- unsigned last = (from + count - 1) >> inode->i_blkbits;
+ unsigned len, first, last;
unsigned i;
+ /* Limit range to one page */
+ len = min_t(unsigned, PAGE_SIZE - from, count);
+
+ /* First and last blocks in range within page */
+ first = from >> inode->i_blkbits;
+ last = (from + len - 1) >> inode->i_blkbits;
+
if (iop) {
for (i = first; i <= last; i++)
if (!test_bit(i, iop->uptodate))
@@ -1530,7 +1543,7 @@ static void iomap_dio_bio_end_io(struct bio *bio)
if (dio->wait_for_completion) {
struct task_struct *waiter = dio->submit.waiter;
WRITE_ONCE(dio->submit.waiter, NULL);
- wake_up_process(waiter);
+ blk_wake_io_task(waiter);
} else if (dio->flags & IOMAP_DIO_WRITE) {
struct inode *inode = file_inode(dio->iocb->ki_filp);
@@ -1558,6 +1571,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
unsigned len)
{
struct page *page = ZERO_PAGE(0);
+ int flags = REQ_SYNC | REQ_IDLE;
struct bio *bio;
bio = bio_alloc(GFP_KERNEL, 1);
@@ -1566,9 +1580,12 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
bio->bi_private = dio;
bio->bi_end_io = iomap_dio_bio_end_io;
+ if (dio->iocb->ki_flags & IOCB_HIPRI)
+ flags |= REQ_HIPRI;
+
get_page(page);
__bio_add_page(bio, page, len, 0);
- bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC | REQ_IDLE);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
atomic_inc(&dio->ref);
return submit_bio(bio);
@@ -1674,6 +1691,9 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
bio_set_pages_dirty(bio);
}
+ if (dio->iocb->ki_flags & IOCB_HIPRI)
+ bio->bi_opf |= REQ_HIPRI;
+
iov_iter_advance(dio->submit.iter, n);
dio->size += n;
@@ -1877,15 +1897,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->wait_for_completion = true;
ret = 0;
}
-
- /*
- * Splicing to pipes can fail on a full pipe. We have to
- * swallow this to make it look like a short IO
- * otherwise the higher splice layers will completely
- * mishandle the error and stop moving data.
- */
- if (ret == -EFAULT)
- ret = 0;
break;
}
pos += ret;
@@ -1910,14 +1921,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
return -EIOCBQUEUED;
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(dio->submit.waiter))
break;
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue ||
!blk_poll(dio->submit.last_queue,
- dio->submit.cookie))
+ dio->submit.cookie, true))
io_schedule();
}
__set_current_state(TASK_RUNNING);
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 150cc030b4d7..2eb55c3361a8 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -439,6 +439,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
finish_wait(&journal->j_wait_updates, &wait);
}
spin_unlock(&commit_transaction->t_handle_lock);
+ commit_transaction->t_state = T_SWITCH;
+ write_unlock(&journal->j_state_lock);
J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
journal->j_max_transaction_buffers);
@@ -505,6 +507,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
atomic_sub(atomic_read(&journal->j_reserved_credits),
&commit_transaction->t_outstanding_credits);
+ write_lock(&journal->j_state_lock);
trace_jbd2_commit_flushing(journal, commit_transaction);
stats.run.rs_flushing = jiffies;
stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index c0b66a7a795b..cc35537232f2 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -138,9 +138,9 @@ static inline void update_t_max_wait(transaction_t *transaction,
}
/*
- * Wait until running transaction passes T_LOCKED state. Also starts the commit
- * if needed. The function expects running transaction to exist and releases
- * j_state_lock.
+ * Wait until running transaction passes to T_FLUSH state and new transaction
+ * can thus be started. Also starts the commit if needed. The function expects
+ * running transaction to exist and releases j_state_lock.
*/
static void wait_transaction_locked(journal_t *journal)
__releases(journal->j_state_lock)
@@ -160,6 +160,32 @@ static void wait_transaction_locked(journal_t *journal)
finish_wait(&journal->j_wait_transaction_locked, &wait);
}
+/*
+ * Wait until running transaction transitions from T_SWITCH to T_FLUSH
+ * state and new transaction can thus be started. The function releases
+ * j_state_lock.
+ */
+static void wait_transaction_switching(journal_t *journal)
+ __releases(journal->j_state_lock)
+{
+ DEFINE_WAIT(wait);
+
+ if (WARN_ON(!journal->j_running_transaction ||
+ journal->j_running_transaction->t_state != T_SWITCH))
+ return;
+ prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
+ TASK_UNINTERRUPTIBLE);
+ read_unlock(&journal->j_state_lock);
+ /*
+ * We don't call jbd2_might_wait_for_commit() here as there's no
+ * waiting for outstanding handles happening anymore in T_SWITCH state
+ * and handling of reserved handles actually relies on that for
+ * correctness.
+ */
+ schedule();
+ finish_wait(&journal->j_wait_transaction_locked, &wait);
+}
+
static void sub_reserved_credits(journal_t *journal, int blocks)
{
atomic_sub(blocks, &journal->j_reserved_credits);
@@ -183,7 +209,8 @@ static int add_transaction_credits(journal_t *journal, int blocks,
* If the current transaction is locked down for commit, wait
* for the lock to be released.
*/
- if (t->t_state == T_LOCKED) {
+ if (t->t_state != T_RUNNING) {
+ WARN_ON_ONCE(t->t_state >= T_FLUSH);
wait_transaction_locked(journal);
return 1;
}
@@ -360,8 +387,14 @@ repeat:
/*
* We have handle reserved so we are allowed to join T_LOCKED
* transaction and we don't have to check for transaction size
- * and journal space.
+ * and journal space. But we still have to wait while running
+ * transaction is being switched to a committing one as it
+ * won't wait for any handles anymore.
*/
+ if (transaction->t_state == T_SWITCH) {
+ wait_transaction_switching(journal);
+ goto repeat;
+ }
sub_reserved_credits(journal, blocks);
handle->h_reserved = 0;
}
@@ -910,7 +943,7 @@ repeat:
* this is the first time this transaction is touching this buffer,
* reset the modified flag
*/
- jh->b_modified = 0;
+ jh->b_modified = 0;
/*
* If the buffer is not journaled right now, we need to make sure it
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 902a7dd10e5c..bb6ae387469f 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
- cancel_delayed_work_sync(&c->wbuf_dwork);
+ if (jffs2_is_writebuffered(c))
+ cancel_delayed_work_sync(&c->wbuf_dwork);
#endif
mutex_lock(&c->alloc_sem);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 74330daeab71..ea719cdd6a36 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -276,7 +276,7 @@ static int nlmsvc_unlink_block(struct nlm_block *block)
dprintk("lockd: unlinking block %p...\n", block);
/* Remove block from list */
- status = posix_unblock_lock(&block->b_call->a_args.lock.fl);
+ status = locks_delete_block(&block->b_call->a_args.lock.fl);
nlmsvc_remove_block(block);
return status;
}
diff --git a/fs/locks.c b/fs/locks.c
index 2ecb4db8c840..f0b24d98f36b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -11,11 +11,11 @@
*
* Miscellaneous edits, and a total rewrite of posix_lock_file() code.
* Kai Petzke (wpp@marie.physik.tu-berlin.de), 1994
- *
+ *
* Converted file_lock_table to a linked list from an array, which eliminates
* the limits on how many active file locks are open.
* Chad Page (pageone@netcom.com), November 27, 1994
- *
+ *
* Removed dependency on file descriptors. dup()'ed file descriptors now
* get the same locks as the original file descriptors, and a close() on
* any file descriptor removes ALL the locks on the file for the current
@@ -41,7 +41,7 @@
* with a file pointer (filp). As a result they can be shared by a parent
* process and its children after a fork(). They are removed when the last
* file descriptor referring to the file pointer is closed (unless explicitly
- * unlocked).
+ * unlocked).
*
* FL_FLOCK locks never deadlock, an existing lock is always removed before
* upgrading from shared to exclusive (or vice versa). When this happens
@@ -50,7 +50,7 @@
* Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
*
* Removed some race conditions in flock_lock_file(), marked other possible
- * races. Just grep for FIXME to see them.
+ * races. Just grep for FIXME to see them.
* Dmitry Gorodchanin (pgmdsg@ibi.com), February 09, 1996.
*
* Addressed Dmitry's concerns. Deadlock checking no longer recursive.
@@ -112,6 +112,46 @@
* Leases and LOCK_MAND
* Matthew Wilcox <willy@debian.org>, June, 2000.
* Stephen Rothwell <sfr@canb.auug.org.au>, June, 2000.
+ *
+ * Locking conflicts and dependencies:
+ * If multiple threads attempt to lock the same byte (or flock the same file)
+ * only one can be granted the lock, and other must wait their turn.
+ * The first lock has been "applied" or "granted", the others are "waiting"
+ * and are "blocked" by the "applied" lock..
+ *
+ * Waiting and applied locks are all kept in trees whose properties are:
+ *
+ * - the root of a tree may be an applied or waiting lock.
+ * - every other node in the tree is a waiting lock that
+ * conflicts with every ancestor of that node.
+ *
+ * Every such tree begins life as a waiting singleton which obviously
+ * satisfies the above properties.
+ *
+ * The only ways we modify trees preserve these properties:
+ *
+ * 1. We may add a new leaf node, but only after first verifying that it
+ * conflicts with all of its ancestors.
+ * 2. We may remove the root of a tree, creating a new singleton
+ * tree from the root and N new trees rooted in the immediate
+ * children.
+ * 3. If the root of a tree is not currently an applied lock, we may
+ * apply it (if possible).
+ * 4. We may upgrade the root of the tree (either extend its range,
+ * or upgrade its entire range from read to write).
+ *
+ * When an applied lock is modified in a way that reduces or downgrades any
+ * part of its range, we remove all its children (2 above). This particularly
+ * happens when a lock is unlocked.
+ *
+ * For each of those child trees we "wake up" the thread which is
+ * waiting for the lock so it can continue handling as follows: if the
+ * root of the tree applies, we do so (3). If it doesn't, it must
+ * conflict with some applied lock. We remove (wake up) all of its children
+ * (2), and add it is a new leaf to the tree rooted in the applied
+ * lock (1). We then repeat the process recursively with those
+ * children.
+ *
*/
#include <linux/capability.h>
@@ -189,9 +229,9 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS);
* This lock protects the blocked_hash. Generally, if you're accessing it, you
* want to be holding this lock.
*
- * In addition, it also protects the fl->fl_block list, and the fl->fl_next
- * pointer for file_lock structures that are acting as lock requests (in
- * contrast to those that are acting as records of acquired locks).
+ * In addition, it also protects the fl->fl_blocked_requests list, and the
+ * fl->fl_blocker pointer for file_lock structures that are acting as lock
+ * requests (in contrast to those that are acting as records of acquired locks).
*
* Note that when we acquire this lock in order to change the above fields,
* we often hold the flc_lock as well. In certain cases, when reading the fields
@@ -293,7 +333,8 @@ static void locks_init_lock_heads(struct file_lock *fl)
{
INIT_HLIST_NODE(&fl->fl_link);
INIT_LIST_HEAD(&fl->fl_list);
- INIT_LIST_HEAD(&fl->fl_block);
+ INIT_LIST_HEAD(&fl->fl_blocked_requests);
+ INIT_LIST_HEAD(&fl->fl_blocked_member);
init_waitqueue_head(&fl->fl_wait);
}
@@ -332,7 +373,8 @@ void locks_free_lock(struct file_lock *fl)
{
BUG_ON(waitqueue_active(&fl->fl_wait));
BUG_ON(!list_empty(&fl->fl_list));
- BUG_ON(!list_empty(&fl->fl_block));
+ BUG_ON(!list_empty(&fl->fl_blocked_requests));
+ BUG_ON(!list_empty(&fl->fl_blocked_member));
BUG_ON(!hlist_unhashed(&fl->fl_link));
locks_release_private(fl);
@@ -357,7 +399,6 @@ void locks_init_lock(struct file_lock *fl)
memset(fl, 0, sizeof(struct file_lock));
locks_init_lock_heads(fl);
}
-
EXPORT_SYMBOL(locks_init_lock);
/*
@@ -397,9 +438,26 @@ void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
fl->fl_ops->fl_copy_lock(new, fl);
}
}
-
EXPORT_SYMBOL(locks_copy_lock);
+static void locks_move_blocks(struct file_lock *new, struct file_lock *fl)
+{
+ struct file_lock *f;
+
+ /*
+ * As ctx->flc_lock is held, new requests cannot be added to
+ * ->fl_blocked_requests, so we don't need a lock to check if it
+ * is empty.
+ */
+ if (list_empty(&fl->fl_blocked_requests))
+ return;
+ spin_lock(&blocked_lock_lock);
+ list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests);
+ list_for_each_entry(f, &fl->fl_blocked_requests, fl_blocked_member)
+ f->fl_blocker = new;
+ spin_unlock(&blocked_lock_lock);
+}
+
static inline int flock_translate_cmd(int cmd) {
if (cmd & LOCK_MAND)
return cmd & (LOCK_MAND | LOCK_RW);
@@ -416,17 +474,20 @@ static inline int flock_translate_cmd(int cmd) {
/* Fill in a file_lock structure with an appropriate FLOCK lock. */
static struct file_lock *
-flock_make_lock(struct file *filp, unsigned int cmd)
+flock_make_lock(struct file *filp, unsigned int cmd, struct file_lock *fl)
{
- struct file_lock *fl;
int type = flock_translate_cmd(cmd);
if (type < 0)
return ERR_PTR(type);
-
- fl = locks_alloc_lock();
- if (fl == NULL)
- return ERR_PTR(-ENOMEM);
+
+ if (fl == NULL) {
+ fl = locks_alloc_lock();
+ if (fl == NULL)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ locks_init_lock(fl);
+ }
fl->fl_file = filp;
fl->fl_owner = filp;
@@ -434,7 +495,7 @@ flock_make_lock(struct file *filp, unsigned int cmd)
fl->fl_flags = FL_FLOCK;
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
-
+
return fl;
}
@@ -666,16 +727,58 @@ static void locks_delete_global_blocked(struct file_lock *waiter)
static void __locks_delete_block(struct file_lock *waiter)
{
locks_delete_global_blocked(waiter);
- list_del_init(&waiter->fl_block);
- waiter->fl_next = NULL;
+ list_del_init(&waiter->fl_blocked_member);
+ waiter->fl_blocker = NULL;
}
-static void locks_delete_block(struct file_lock *waiter)
+static void __locks_wake_up_blocks(struct file_lock *blocker)
{
+ while (!list_empty(&blocker->fl_blocked_requests)) {
+ struct file_lock *waiter;
+
+ waiter = list_first_entry(&blocker->fl_blocked_requests,
+ struct file_lock, fl_blocked_member);
+ __locks_delete_block(waiter);
+ if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
+ waiter->fl_lmops->lm_notify(waiter);
+ else
+ wake_up(&waiter->fl_wait);
+ }
+}
+
+/**
+ * locks_delete_lock - stop waiting for a file lock
+ * @waiter: the lock which was waiting
+ *
+ * lockd/nfsd need to disconnect the lock while working on it.
+ */
+int locks_delete_block(struct file_lock *waiter)
+{
+ int status = -ENOENT;
+
+ /*
+ * If fl_blocker is NULL, it won't be set again as this thread
+ * "owns" the lock and is the only one that might try to claim
+ * the lock. So it is safe to test fl_blocker locklessly.
+ * Also if fl_blocker is NULL, this waiter is not listed on
+ * fl_blocked_requests for some lock, so no other request can
+ * be added to the list of fl_blocked_requests for this
+ * request. So if fl_blocker is NULL, it is safe to
+ * locklessly check if fl_blocked_requests is empty. If both
+ * of these checks succeed, there is no need to take the lock.
+ */
+ if (waiter->fl_blocker == NULL &&
+ list_empty(&waiter->fl_blocked_requests))
+ return status;
spin_lock(&blocked_lock_lock);
+ if (waiter->fl_blocker)
+ status = 0;
+ __locks_wake_up_blocks(waiter);
__locks_delete_block(waiter);
spin_unlock(&blocked_lock_lock);
+ return status;
}
+EXPORT_SYMBOL(locks_delete_block);
/* Insert waiter into blocker's block list.
* We use a circular list so that processes can be easily woken up in
@@ -683,26 +786,49 @@ static void locks_delete_block(struct file_lock *waiter)
* it seems like the reasonable thing to do.
*
* Must be called with both the flc_lock and blocked_lock_lock held. The
- * fl_block list itself is protected by the blocked_lock_lock, but by ensuring
- * that the flc_lock is also held on insertions we can avoid taking the
- * blocked_lock_lock in some cases when we see that the fl_block list is empty.
+ * fl_blocked_requests list itself is protected by the blocked_lock_lock,
+ * but by ensuring that the flc_lock is also held on insertions we can avoid
+ * taking the blocked_lock_lock in some cases when we see that the
+ * fl_blocked_requests list is empty.
+ *
+ * Rather than just adding to the list, we check for conflicts with any existing
+ * waiters, and add beneath any waiter that blocks the new waiter.
+ * Thus wakeups don't happen until needed.
*/
static void __locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter)
+ struct file_lock *waiter,
+ bool conflict(struct file_lock *,
+ struct file_lock *))
{
- BUG_ON(!list_empty(&waiter->fl_block));
- waiter->fl_next = blocker;
- list_add_tail(&waiter->fl_block, &blocker->fl_block);
+ struct file_lock *fl;
+ BUG_ON(!list_empty(&waiter->fl_blocked_member));
+
+new_blocker:
+ list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member)
+ if (conflict(fl, waiter)) {
+ blocker = fl;
+ goto new_blocker;
+ }
+ waiter->fl_blocker = blocker;
+ list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests);
if (IS_POSIX(blocker) && !IS_OFDLCK(blocker))
locks_insert_global_blocked(waiter);
+
+ /* The requests in waiter->fl_blocked are known to conflict with
+ * waiter, but might not conflict with blocker, or the requests
+ * and lock which block it. So they all need to be woken.
+ */
+ __locks_wake_up_blocks(waiter);
}
/* Must be called with flc_lock held. */
static void locks_insert_block(struct file_lock *blocker,
- struct file_lock *waiter)
+ struct file_lock *waiter,
+ bool conflict(struct file_lock *,
+ struct file_lock *))
{
spin_lock(&blocked_lock_lock);
- __locks_insert_block(blocker, waiter);
+ __locks_insert_block(blocker, waiter, conflict);
spin_unlock(&blocked_lock_lock);
}
@@ -716,25 +842,15 @@ static void locks_wake_up_blocks(struct file_lock *blocker)
/*
* Avoid taking global lock if list is empty. This is safe since new
* blocked requests are only added to the list under the flc_lock, and
- * the flc_lock is always held here. Note that removal from the fl_block
- * list does not require the flc_lock, so we must recheck list_empty()
- * after acquiring the blocked_lock_lock.
+ * the flc_lock is always held here. Note that removal from the
+ * fl_blocked_requests list does not require the flc_lock, so we must
+ * recheck list_empty() after acquiring the blocked_lock_lock.
*/
- if (list_empty(&blocker->fl_block))
+ if (list_empty(&blocker->fl_blocked_requests))
return;
spin_lock(&blocked_lock_lock);
- while (!list_empty(&blocker->fl_block)) {
- struct file_lock *waiter;
-
- waiter = list_first_entry(&blocker->fl_block,
- struct file_lock, fl_block);
- __locks_delete_block(waiter);
- if (waiter->fl_lmops && waiter->fl_lmops->lm_notify)
- waiter->fl_lmops->lm_notify(waiter);
- else
- wake_up(&waiter->fl_wait);
- }
+ __locks_wake_up_blocks(blocker);
spin_unlock(&blocked_lock_lock);
}
@@ -766,47 +882,50 @@ locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose)
/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
* checks for shared/exclusive status of overlapping locks.
*/
-static int locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+static bool locks_conflict(struct file_lock *caller_fl,
+ struct file_lock *sys_fl)
{
if (sys_fl->fl_type == F_WRLCK)
- return 1;
+ return true;
if (caller_fl->fl_type == F_WRLCK)
- return 1;
- return 0;
+ return true;
+ return false;
}
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
* checking before calling the locks_conflict().
*/
-static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+static bool posix_locks_conflict(struct file_lock *caller_fl,
+ struct file_lock *sys_fl)
{
/* POSIX locks owned by the same process do not conflict with
* each other.
*/
if (posix_same_owner(caller_fl, sys_fl))
- return (0);
+ return false;
/* Check whether they overlap */
if (!locks_overlap(caller_fl, sys_fl))
- return 0;
+ return false;
- return (locks_conflict(caller_fl, sys_fl));
+ return locks_conflict(caller_fl, sys_fl);
}
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
* checking before calling the locks_conflict().
*/
-static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
+static bool flock_locks_conflict(struct file_lock *caller_fl,
+ struct file_lock *sys_fl)
{
/* FLOCK locks referring to the same filp do not conflict with
* each other.
*/
if (caller_fl->fl_file == sys_fl->fl_file)
- return (0);
+ return false;
if ((caller_fl->fl_type & LOCK_MAND) || (sys_fl->fl_type & LOCK_MAND))
- return 0;
+ return false;
- return (locks_conflict(caller_fl, sys_fl));
+ return locks_conflict(caller_fl, sys_fl);
}
void
@@ -877,8 +996,11 @@ static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl)
struct file_lock *fl;
hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) {
- if (posix_same_owner(fl, block_fl))
- return fl->fl_next;
+ if (posix_same_owner(fl, block_fl)) {
+ while (fl->fl_blocker)
+ fl = fl->fl_blocker;
+ return fl;
+ }
}
return NULL;
}
@@ -965,12 +1087,13 @@ find_conflict:
if (!(request->fl_flags & FL_SLEEP))
goto out;
error = FILE_LOCK_DEFERRED;
- locks_insert_block(fl, request);
+ locks_insert_block(fl, request, flock_locks_conflict);
goto out;
}
if (request->fl_flags & FL_ACCESS)
goto out;
locks_copy_lock(new_fl, request);
+ locks_move_blocks(new_fl, request);
locks_insert_lock_ctx(new_fl, &ctx->flc_flock);
new_fl = NULL;
error = 0;
@@ -1039,12 +1162,13 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
spin_lock(&blocked_lock_lock);
if (likely(!posix_locks_deadlock(request, fl))) {
error = FILE_LOCK_DEFERRED;
- __locks_insert_block(fl, request);
+ __locks_insert_block(fl, request,
+ posix_locks_conflict);
}
spin_unlock(&blocked_lock_lock);
goto out;
- }
- }
+ }
+ }
/* If we're just looking for a conflict, we're done. */
error = 0;
@@ -1164,6 +1288,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
goto out;
}
locks_copy_lock(new_fl, request);
+ locks_move_blocks(new_fl, request);
locks_insert_lock_ctx(new_fl, &fl->fl_list);
fl = new_fl;
new_fl = NULL;
@@ -1237,13 +1362,11 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
error = posix_lock_inode(inode, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
- if (!error)
- continue;
-
- locks_delete_block(fl);
- break;
+ error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+ if (error)
+ break;
}
+ locks_delete_block(fl);
return error;
}
@@ -1324,7 +1447,7 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
error = posix_lock_inode(inode, &fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl.fl_wait, !fl.fl_next);
+ error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
if (!error) {
/*
* If we've been sleeping someone might have
@@ -1334,13 +1457,12 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
continue;
}
- locks_delete_block(&fl);
break;
}
+ locks_delete_block(&fl);
return error;
}
-
EXPORT_SYMBOL(locks_mandatory_area);
#endif /* CONFIG_MANDATORY_FILE_LOCKING */
@@ -1511,14 +1633,14 @@ restart:
break_time -= jiffies;
if (break_time == 0)
break_time++;
- locks_insert_block(fl, new_fl);
+ locks_insert_block(fl, new_fl, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem);
locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait,
- !new_fl->fl_next, break_time);
+ !new_fl->fl_blocker, break_time);
percpu_down_read_preempt_disable(&file_rwsem);
spin_lock(&ctx->flc_lock);
@@ -1542,7 +1664,6 @@ out:
locks_free_lock(new_fl);
return error;
}
-
EXPORT_SYMBOL(__break_lease);
/**
@@ -1573,7 +1694,6 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time)
if (has_lease)
*time = current_time(inode);
}
-
EXPORT_SYMBOL(lease_get_mtime);
/**
@@ -1628,8 +1748,8 @@ int fcntl_getlease(struct file *filp)
/**
* check_conflicting_open - see if the given dentry points to a file that has
- * an existing open that would conflict with the
- * desired lease.
+ * an existing open that would conflict with the
+ * desired lease.
* @dentry: dentry to check
* @arg: type of lease that we're trying to acquire
* @flags: current lock flags
@@ -1646,7 +1766,7 @@ check_conflicting_open(const struct dentry *dentry, const long arg, int flags)
if (flags & FL_LAYOUT)
return 0;
- if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
+ if ((arg == F_RDLCK) && inode_is_open_for_write(inode))
return -EAGAIN;
if ((arg == F_WRLCK) && ((d_count(dentry) > 1) ||
@@ -1853,7 +1973,7 @@ EXPORT_SYMBOL(generic_setlease);
* @arg: type of lease to obtain
* @lease: file_lock to use when adding a lease
* @priv: private info for lm_setup when adding a lease (may be
- * NULL if lm_setup doesn't require it)
+ * NULL if lm_setup doesn't require it)
*
* Call this to establish a lease on the file. The "lease" argument is not
* used for F_UNLCK requests and may be NULL. For commands that set or alter
@@ -1931,13 +2051,11 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
error = flock_lock_inode(inode, fl);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
- if (!error)
- continue;
-
- locks_delete_block(fl);
- break;
+ error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+ if (error)
+ break;
}
+ locks_delete_block(fl);
return error;
}
@@ -2001,7 +2119,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
!(f.file->f_mode & (FMODE_READ|FMODE_WRITE)))
goto out_putf;
- lock = flock_make_lock(f.file, cmd);
+ lock = flock_make_lock(f.file, cmd, NULL);
if (IS_ERR(lock)) {
error = PTR_ERR(lock);
goto out_putf;
@@ -2143,7 +2261,7 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock)
error = vfs_test_lock(filp, fl);
if (error)
goto out;
-
+
flock->l_type = fl->fl_type;
if (fl->fl_type != F_UNLCK) {
error = posix_lock_to_flock(flock, fl);
@@ -2210,13 +2328,11 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
error = vfs_lock_file(filp, cmd, fl, NULL);
if (error != FILE_LOCK_DEFERRED)
break;
- error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
- if (!error)
- continue;
-
- locks_delete_block(fl);
- break;
+ error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+ if (error)
+ break;
}
+ locks_delete_block(fl);
return error;
}
@@ -2476,6 +2592,7 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
if (!ctx || list_empty(&ctx->flc_posix))
return;
+ locks_init_lock(&lock);
lock.fl_type = F_UNLCK;
lock.fl_flags = FL_POSIX | FL_CLOSE;
lock.fl_start = 0;
@@ -2492,26 +2609,21 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner)
lock.fl_ops->fl_release_private(&lock);
trace_locks_remove_posix(inode, &lock, error);
}
-
EXPORT_SYMBOL(locks_remove_posix);
/* The i_flctx must be valid when calling into here */
static void
locks_remove_flock(struct file *filp, struct file_lock_context *flctx)
{
- struct file_lock fl = {
- .fl_owner = filp,
- .fl_pid = current->tgid,
- .fl_file = filp,
- .fl_flags = FL_FLOCK | FL_CLOSE,
- .fl_type = F_UNLCK,
- .fl_end = OFFSET_MAX,
- };
+ struct file_lock fl;
struct inode *inode = locks_inode(filp);
if (list_empty(&flctx->flc_flock))
return;
+ flock_make_lock(filp, LOCK_UN, &fl);
+ fl.fl_flags |= FL_CLOSE;
+
if (filp->f_op->flock)
filp->f_op->flock(filp, F_SETLKW, &fl);
else
@@ -2570,27 +2682,6 @@ void locks_remove_file(struct file *filp)
}
/**
- * posix_unblock_lock - stop waiting for a file lock
- * @waiter: the lock which was waiting
- *
- * lockd needs to block waiting for locks.
- */
-int
-posix_unblock_lock(struct file_lock *waiter)
-{
- int status = 0;
-
- spin_lock(&blocked_lock_lock);
- if (waiter->fl_next)
- __locks_delete_block(waiter);
- else
- status = -ENOENT;
- spin_unlock(&blocked_lock_lock);
- return status;
-}
-EXPORT_SYMBOL(posix_unblock_lock);
-
-/**
* vfs_cancel_lock - file byte range unblock lock
* @filp: The file to apply the unblock to
* @fl: The lock to be unblocked
@@ -2603,7 +2694,6 @@ int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
return filp->f_op->lock(filp, F_CANCELLK, fl);
return 0;
}
-
EXPORT_SYMBOL_GPL(vfs_cancel_lock);
#ifdef CONFIG_PROC_FS
@@ -2707,7 +2797,7 @@ static int locks_show(struct seq_file *f, void *v)
lock_get_status(f, fl, iter->li_pos, "");
- list_for_each_entry(bfl, &fl->fl_block, fl_block)
+ list_for_each_entry(bfl, &fl->fl_blocked_requests, fl_blocked_member)
lock_get_status(f, bfl, iter->li_pos, " ->");
return 0;
@@ -2803,7 +2893,6 @@ static int __init filelock_init(void)
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
-
for_each_possible_cpu(i) {
struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i);
@@ -2813,5 +2902,4 @@ static int __init filelock_init(void)
return 0;
}
-
core_initcall(filelock_init);
diff --git a/fs/namei.c b/fs/namei.c
index 0cab6494978c..914178cdbe94 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3701,8 +3701,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
if (error)
return error;
- if ((S_ISCHR(mode) || S_ISBLK(mode)) &&
- !ns_capable(dentry->d_sb->s_user_ns, CAP_MKNOD))
+ if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
return -EPERM;
if (!dir->i_op->mknod)
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index aa12c3063bae..33824a0a57bf 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -98,8 +98,11 @@ struct nfs_direct_req {
struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
struct work_struct work;
int flags;
+ /* for write */
#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
+ /* for read */
+#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
struct nfs_writeverf verf; /* unstable write verifier */
};
@@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page;
- if (!PageCompound(page) && bytes < hdr->good_bytes)
+ if (!PageCompound(page) && bytes < hdr->good_bytes &&
+ (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
set_page_dirty(page);
bytes += req->wb_bytes;
nfs_list_remove_request(req);
@@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
+ if (iter_is_iovec(iter))
+ dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+
nfs_start_io_direct(inode);
NFS_I(inode)->read_io += count;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 74b36ed883ca..310d7500f665 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1733,7 +1733,8 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
if (fh)
hdr->args.fh = fh;
- if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+ if (vers == 4 &&
+ !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
goto out_failed;
/*
@@ -1798,7 +1799,8 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
if (fh)
hdr->args.fh = fh;
- if (!nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+ if (vers == 4 &&
+ !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
goto out_failed;
/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 867457d6dfbe..0ba2b0fb8ff3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6311,7 +6311,8 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
p->l_ctx = nfs_get_lock_context(ctx);
- memcpy(&p->fl, fl, sizeof(p->fl));
+ locks_init_lock(&p->fl);
+ locks_copy_lock(&p->fl, fl);
p->server = NFS_SERVER(inode);
return p;
}
@@ -6533,7 +6534,8 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->server = server;
refcount_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
- memcpy(&p->fl, fl, sizeof(p->fl));
+ locks_init_lock(&p->fl);
+ locks_copy_lock(&p->fl, fl);
return p;
out_free_seqid:
nfs_free_seqid(p->arg.open_seqid);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f093fbe47133..a334828723fa 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -238,7 +238,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
}
spin_unlock(&nn->blocked_locks_lock);
if (found)
- posix_unblock_lock(&found->nbl_lock);
+ locks_delete_block(&found->nbl_lock);
return found;
}
@@ -293,7 +293,7 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
nbl_lru);
list_del_init(&nbl->nbl_lru);
- posix_unblock_lock(&nbl->nbl_lock);
+ locks_delete_block(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
}
@@ -4863,7 +4863,7 @@ nfs4_laundromat(struct nfsd_net *nn)
nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
- posix_unblock_lock(&nbl->nbl_lock);
+ locks_delete_block(&nbl->nbl_lock);
free_blocked_lock(nbl);
}
out:
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index e08a6647267b..3723f3d18d20 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -89,7 +89,13 @@ static int fanotify_get_response(struct fsnotify_group *group,
return ret;
}
-static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
+/*
+ * This function returns a mask for an event that only contains the flags
+ * that have been specifically requested by the user. Flags that may have
+ * been included within the event mask, but have not been explicitly
+ * requested by the user, will not be present in the returned mask.
+ */
+static u32 fanotify_group_event_mask(struct fsnotify_iter_info *iter_info,
u32 event_mask, const void *data,
int data_type)
{
@@ -101,14 +107,14 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n",
__func__, iter_info->report_mask, event_mask, data, data_type);
- /* if we don't have enough info to send an event to userspace say no */
+ /* If we don't have enough info to send an event to userspace say no */
if (data_type != FSNOTIFY_EVENT_PATH)
- return false;
+ return 0;
- /* sorry, fanotify only gives a damn about files and dirs */
+ /* Sorry, fanotify only gives a damn about files and dirs */
if (!d_is_reg(path->dentry) &&
!d_can_lookup(path->dentry))
- return false;
+ return 0;
fsnotify_foreach_obj_type(type) {
if (!fsnotify_iter_should_report_type(iter_info, type))
@@ -129,13 +135,10 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
if (d_is_dir(path->dentry) &&
!(marks_mask & FS_ISDIR & ~marks_ignored_mask))
- return false;
-
- if (event_mask & FANOTIFY_OUTGOING_EVENTS &
- marks_mask & ~marks_ignored_mask)
- return true;
+ return 0;
- return false;
+ return event_mask & FANOTIFY_OUTGOING_EVENTS & marks_mask &
+ ~marks_ignored_mask;
}
struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
@@ -207,10 +210,13 @@ static int fanotify_handle_event(struct fsnotify_group *group,
BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR);
+ BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
+ BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 10);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 12);
- if (!fanotify_should_send_event(iter_info, mask, data, data_type))
+ mask = fanotify_group_event_mask(iter_info, mask, data, data_type);
+ if (!mask)
return 0;
pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode,
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index e03be5071362..9c870b0d2b56 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -206,7 +206,7 @@ static int process_access_response(struct fsnotify_group *group,
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
- char __user *buf)
+ char __user *buf, size_t count)
{
struct fanotify_event_metadata fanotify_event_metadata;
struct file *f;
@@ -220,6 +220,12 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
fd = fanotify_event_metadata.fd;
ret = -EFAULT;
+ /*
+ * Sanity check copy size in case get_one_event() and
+ * fill_event_metadata() event_len sizes ever get out of sync.
+ */
+ if (WARN_ON_ONCE(fanotify_event_metadata.event_len > count))
+ goto out_close_fd;
if (copy_to_user(buf, &fanotify_event_metadata,
fanotify_event_metadata.event_len))
goto out_close_fd;
@@ -295,7 +301,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
continue;
}
- ret = copy_event_to_user(group, kevent, buf);
+ ret = copy_event_to_user(group, kevent, buf, count);
if (unlikely(ret == -EOPENSTALE)) {
/*
* We cannot report events with stale fd so drop it.
@@ -669,7 +675,7 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,
*/
if ((flags & FAN_MARK_IGNORED_MASK) &&
!(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
- (atomic_read(&inode->i_writecount) > 0))
+ inode_is_open_for_write(inode))
return 0;
return fanotify_add_mark(group, &inode->i_fsnotify_marks,
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index 348a184bcdda..1e2bfd26b352 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -15,6 +15,7 @@
#include <linux/exportfs.h>
#include "inotify/inotify.h"
+#include "fdinfo.h"
#include "fsnotify.h"
#if defined(CONFIG_PROC_FS)
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index d2c34900ae05..ecf09b6243d9 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -401,7 +401,7 @@ static __init int fsnotify_init(void)
{
int ret;
- BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 23);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 25);
ret = init_srcu_struct(&fsnotify_mark_srcu);
if (ret)
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 9f88188060db..4bf8d5854b27 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -125,10 +125,10 @@ check_err:
check_gen:
if (handle->ih_generation != inode->i_generation) {
- iput(inode);
trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
handle->ih_generation,
inode->i_generation);
+ iput(inode);
result = ERR_PTR(-ESTALE);
goto bail;
}
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index d56f0079b858..b11acd34001a 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -52,6 +52,7 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
lockres->l_level > LKM_NLMODE) {
int old_level = 0;
+ struct file_lock request;
if (lockres->l_level == LKM_EXMODE)
old_level = 1;
@@ -66,11 +67,10 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
* level.
*/
- locks_lock_file_wait(file,
- &(struct file_lock) {
- .fl_type = F_UNLCK,
- .fl_flags = FL_FLOCK
- });
+ locks_init_lock(&request);
+ request.fl_type = F_UNLCK;
+ request.fl_flags = FL_FLOCK;
+ locks_lock_file_wait(file, &request);
ocfs2_file_unlock(file);
}
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 3f1685d7d43b..1565dd8e8856 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -157,18 +157,14 @@ out:
}
/*
- * lock allocators, and reserving appropriate number of bits for
- * meta blocks and data clusters.
- *
- * in some cases, we don't need to reserve clusters, just let data_ac
- * be NULL.
+ * lock allocator, and reserve appropriate number of bits for
+ * meta blocks.
*/
-static int ocfs2_lock_allocators_move_extents(struct inode *inode,
+static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 clusters_to_move,
u32 extents_to_split,
struct ocfs2_alloc_context **meta_ac,
- struct ocfs2_alloc_context **data_ac,
int extra_blocks,
int *credits)
{
@@ -193,13 +189,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
goto out;
}
- if (data_ac) {
- ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
@@ -259,10 +248,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
}
}
- ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
- &context->meta_ac,
- &context->data_ac,
- extra_blocks, &credits);
+ ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+ *len, 1,
+ &context->meta_ac,
+ extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -285,6 +274,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
}
}
+ /*
+ * Make sure ocfs2_reserve_cluster is called after
+ * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
+ *
+ * If ocfs2_reserve_cluster is called
+ * before __ocfs2_flush_truncate_log, dead lock on global bitmap
+ * may happen.
+ *
+ */
+ ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock_mutex;
+ }
+
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -617,9 +621,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
}
}
- ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
- &context->meta_ac,
- NULL, extra_blocks, &credits);
+ ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+ len, 1,
+ &context->meta_ac,
+ extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index 607092f367ad..1b2d0d2fe2ee 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -199,10 +199,11 @@ static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry
child = dp->child;
while (child) {
- int n = strlen(child->path_component_name);
+ const char *node_name = kbasename(child->full_name);
+ int n = strlen(node_name);
if (len == n &&
- !strncmp(child->path_component_name, name, len)) {
+ !strncmp(node_name, name, len)) {
ent_type = op_inode_node;
ent_data.node = child;
ino = child->unique_id;
@@ -245,7 +246,7 @@ found:
set_nlink(inode, 2);
break;
case op_inode_prop:
- if (!strcmp(dp->name, "options") && (len == 17) &&
+ if (of_node_name_eq(dp, "options") && (len == 17) &&
!strncmp (name, "security-password", 17))
inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR;
else
@@ -293,8 +294,8 @@ static int openpromfs_readdir(struct file *file, struct dir_context *ctx)
}
while (child) {
if (!dir_emit(ctx,
- child->path_component_name,
- strlen(child->path_component_name),
+ kbasename(child->full_name),
+ strlen(kbasename(child->full_name)),
child->unique_id, DT_DIR))
goto out;
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index c6289147c787..82c129bfe58d 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -651,6 +651,18 @@ static int ovl_symlink(struct inode *dir, struct dentry *dentry,
return ovl_create_object(dentry, S_IFLNK, 0, link);
}
+static int ovl_set_link_redirect(struct dentry *dentry)
+{
+ const struct cred *old_cred;
+ int err;
+
+ old_cred = ovl_override_creds(dentry->d_sb);
+ err = ovl_set_redirect(dentry, false);
+ revert_creds(old_cred);
+
+ return err;
+}
+
static int ovl_link(struct dentry *old, struct inode *newdir,
struct dentry *new)
{
@@ -670,7 +682,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
goto out_drop_write;
if (ovl_is_metacopy_dentry(old)) {
- err = ovl_set_redirect(old, false);
+ err = ovl_set_link_redirect(old);
if (err)
goto out_drop_write;
}
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index 8fa37cd7818a..54e5d17d7f3e 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -754,9 +754,8 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
goto out;
}
- /* Otherwise, get a connected non-upper dir or disconnected non-dir */
- if (d_is_dir(origin.dentry) &&
- (origin.dentry->d_flags & DCACHE_DISCONNECTED)) {
+ /* Find origin.dentry again with ovl_acceptable() layer check */
+ if (d_is_dir(origin.dentry)) {
dput(origin.dentry);
origin.dentry = NULL;
err = ovl_check_origin_fh(ofs, fh, true, NULL, &stack);
@@ -769,6 +768,7 @@ static struct dentry *ovl_lower_fh_to_d(struct super_block *sb,
goto out_err;
}
+ /* Get a connected non-upper dir or disconnected non-dir */
dentry = ovl_get_dentry(sb, NULL, &origin, index);
out:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 6bcc9dedc342..3b7ed5d2279c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -286,22 +286,13 @@ int ovl_permission(struct inode *inode, int mask)
if (err)
return err;
- /* No need to do any access on underlying for special files */
- if (special_file(realinode->i_mode))
- return 0;
-
- /* No need to access underlying for execute */
- mask &= ~MAY_EXEC;
- if ((mask & (MAY_READ | MAY_WRITE)) == 0)
- return 0;
-
- /* Lower files get copied up, so turn write access into read */
- if (!upperinode && mask & MAY_WRITE) {
+ old_cred = ovl_override_creds(inode->i_sb);
+ if (!upperinode &&
+ !special_file(realinode->i_mode) && mask & MAY_WRITE) {
mask &= ~(MAY_WRITE | MAY_APPEND);
+ /* Make sure mounter can read file for copy up later */
mask |= MAY_READ;
}
-
- old_cred = ovl_override_creds(inode->i_sb);
err = inode_permission(realinode, mask);
revert_creds(old_cred);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 89921a0d2ebb..4d598a399bbf 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -464,7 +464,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
inode = new_inode(sb);
if (!inode)
- goto out;
+ return ERR_PTR(-ENOMEM);
inode->i_ino = get_next_ino();
@@ -474,8 +474,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
if (unlikely(head->unregistering)) {
spin_unlock(&sysctl_lock);
iput(inode);
- inode = NULL;
- goto out;
+ return ERR_PTR(-ENOENT);
}
ei->sysctl = head;
ei->sysctl_entry = table;
@@ -500,7 +499,6 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
if (root->set_ownership)
root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
-out:
return inode;
}
@@ -549,10 +547,11 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- err = ERR_PTR(-ENOMEM);
inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
- if (!inode)
+ if (IS_ERR(inode)) {
+ err = ERR_CAST(inode);
goto out;
+ }
d_set_d_op(dentry, &proc_sys_dentry_operations);
err = d_splice_alias(inode, dentry);
@@ -685,7 +684,7 @@ static bool proc_sys_fill_cache(struct file *file,
if (d_in_lookup(child)) {
struct dentry *res;
inode = proc_sys_make_inode(dir->d_sb, head, table);
- if (!inode) {
+ if (IS_ERR(inode)) {
d_lookup_done(child);
dput(child);
return false;
diff --git a/fs/pstore/ftrace.c b/fs/pstore/ftrace.c
index 06aab07b6bb7..b8a0931568f8 100644
--- a/fs/pstore/ftrace.c
+++ b/fs/pstore/ftrace.c
@@ -148,7 +148,7 @@ void pstore_unregister_ftrace(void)
mutex_lock(&pstore_ftrace_lock);
if (pstore_ftrace_enabled) {
unregister_ftrace_function(&pstore_ftrace_ops);
- pstore_ftrace_enabled = 0;
+ pstore_ftrace_enabled = false;
}
mutex_unlock(&pstore_ftrace_lock);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 8cf2218b46a7..c60ee46f3e39 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -335,53 +335,10 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
goto fail_alloc;
private->record = record;
- switch (record->type) {
- case PSTORE_TYPE_DMESG:
- scnprintf(name, sizeof(name), "dmesg-%s-%llu%s",
- record->psi->name, record->id,
- record->compressed ? ".enc.z" : "");
- break;
- case PSTORE_TYPE_CONSOLE:
- scnprintf(name, sizeof(name), "console-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_FTRACE:
- scnprintf(name, sizeof(name), "ftrace-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_MCE:
- scnprintf(name, sizeof(name), "mce-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_PPC_RTAS:
- scnprintf(name, sizeof(name), "rtas-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_PPC_OF:
- scnprintf(name, sizeof(name), "powerpc-ofw-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_PPC_COMMON:
- scnprintf(name, sizeof(name), "powerpc-common-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_PMSG:
- scnprintf(name, sizeof(name), "pmsg-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_PPC_OPAL:
- scnprintf(name, sizeof(name), "powerpc-opal-%s-%llu",
- record->psi->name, record->id);
- break;
- case PSTORE_TYPE_UNKNOWN:
- scnprintf(name, sizeof(name), "unknown-%s-%llu",
- record->psi->name, record->id);
- break;
- default:
- scnprintf(name, sizeof(name), "type%d-%s-%llu",
- record->type, record->psi->name, record->id);
- break;
- }
+ scnprintf(name, sizeof(name), "%s-%s-%llu%s",
+ pstore_type_to_name(record->type),
+ record->psi->name, record->id,
+ record->compressed ? ".enc.z" : "");
dentry = d_alloc_name(root, name);
if (!dentry)
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b821054ca3ed..2d1066ed3c28 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -59,6 +59,19 @@ MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
"enabling this option is not safe, it may lead to further "
"corruption on Oopses)");
+/* Names should be in the same order as the enum pstore_type_id */
+static const char * const pstore_type_names[] = {
+ "dmesg",
+ "mce",
+ "console",
+ "ftrace",
+ "rtas",
+ "powerpc-ofw",
+ "powerpc-common",
+ "pmsg",
+ "powerpc-opal",
+};
+
static int pstore_new_entry;
static void pstore_timefunc(struct timer_list *);
@@ -104,6 +117,30 @@ void pstore_set_kmsg_bytes(int bytes)
/* Tag each group of saved records with a sequence number */
static int oopscount;
+const char *pstore_type_to_name(enum pstore_type_id type)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(pstore_type_names) != PSTORE_TYPE_MAX);
+
+ if (WARN_ON_ONCE(type >= PSTORE_TYPE_MAX))
+ return "unknown";
+
+ return pstore_type_names[type];
+}
+EXPORT_SYMBOL_GPL(pstore_type_to_name);
+
+enum pstore_type_id pstore_name_to_type(const char *name)
+{
+ int i;
+
+ for (i = 0; i < PSTORE_TYPE_MAX; i++) {
+ if (!strcmp(pstore_type_names[i], name))
+ return i;
+ }
+
+ return PSTORE_TYPE_MAX;
+}
+EXPORT_SYMBOL_GPL(pstore_name_to_type);
+
static const char *get_reason_str(enum kmsg_dump_reason reason)
{
switch (reason) {
@@ -124,26 +161,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
}
}
-bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
+/*
+ * Should pstore_dump() wait for a concurrent pstore_dump()? If
+ * not, the current pstore_dump() will report a failure to dump
+ * and return.
+ */
+static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
{
- /*
- * In case of NMI path, pstore shouldn't be blocked
- * regardless of reason.
- */
+ /* In NMI path, pstore shouldn't block regardless of reason. */
if (in_nmi())
return true;
switch (reason) {
/* In panic case, other cpus are stopped by smp_send_stop(). */
case KMSG_DUMP_PANIC:
- /* Emergency restart shouldn't be blocked by spin lock. */
+ /* Emergency restart shouldn't be blocked. */
case KMSG_DUMP_EMERG:
return true;
default:
return false;
}
}
-EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
#if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
static int zbufsize_deflate(size_t size)
@@ -258,20 +296,6 @@ static int pstore_compress(const void *in, void *out,
return outlen;
}
-static int pstore_decompress(void *in, void *out,
- unsigned int inlen, unsigned int outlen)
-{
- int ret;
-
- ret = crypto_comp_decompress(tfm, in, inlen, out, &outlen);
- if (ret) {
- pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
- return ret;
- }
-
- return outlen;
-}
-
static void allocate_buf_for_compression(void)
{
struct crypto_comp *ctx;
@@ -318,7 +342,7 @@ static void allocate_buf_for_compression(void)
big_oops_buf_sz = size;
big_oops_buf = buf;
- pr_info("Using compression: %s\n", zbackend->name);
+ pr_info("Using crash dump compression: %s\n", zbackend->name);
}
static void free_buf_for_compression(void)
@@ -368,9 +392,8 @@ void pstore_record_init(struct pstore_record *record,
}
/*
- * callback from kmsg_dump. (s2,l2) has the most recently
- * written bytes, older bytes are in (s1,l1). Save as much
- * as we can from the end of the buffer.
+ * callback from kmsg_dump. Save as much as we can (up to kmsg_bytes) from the
+ * end of the buffer.
*/
static void pstore_dump(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
@@ -378,23 +401,23 @@ static void pstore_dump(struct kmsg_dumper *dumper,
unsigned long total = 0;
const char *why;
unsigned int part = 1;
- unsigned long flags = 0;
- int is_locked;
int ret;
why = get_reason_str(reason);
- if (pstore_cannot_block_path(reason)) {
- is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
- if (!is_locked) {
- pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
- , in_nmi() ? "NMI" : why);
+ if (down_trylock(&psinfo->buf_lock)) {
+ /* Failed to acquire lock: give up if we cannot wait. */
+ if (pstore_cannot_wait(reason)) {
+ pr_err("dump skipped in %s path: may corrupt error record\n",
+ in_nmi() ? "NMI" : why);
+ return;
+ }
+ if (down_interruptible(&psinfo->buf_lock)) {
+ pr_err("could not grab semaphore?!\n");
return;
}
- } else {
- spin_lock_irqsave(&psinfo->buf_lock, flags);
- is_locked = 1;
}
+
oopscount++;
while (total < kmsg_bytes) {
char *dst;
@@ -411,7 +434,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
record.part = part;
record.buf = psinfo->buf;
- if (big_oops_buf && is_locked) {
+ if (big_oops_buf) {
dst = big_oops_buf;
dst_size = big_oops_buf_sz;
} else {
@@ -429,7 +452,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
dst_size, &dump_size))
break;
- if (big_oops_buf && is_locked) {
+ if (big_oops_buf) {
zipped_len = pstore_compress(dst, psinfo->buf,
header_size + dump_size,
psinfo->bufsize);
@@ -452,8 +475,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
total += record.size;
part++;
}
- if (is_locked)
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+
+ up(&psinfo->buf_lock);
}
static struct kmsg_dumper pstore_dumper = {
@@ -476,31 +499,14 @@ static void pstore_unregister_kmsg(void)
#ifdef CONFIG_PSTORE_CONSOLE
static void pstore_console_write(struct console *con, const char *s, unsigned c)
{
- const char *e = s + c;
-
- while (s < e) {
- struct pstore_record record;
- unsigned long flags;
+ struct pstore_record record;
- pstore_record_init(&record, psinfo);
- record.type = PSTORE_TYPE_CONSOLE;
-
- if (c > psinfo->bufsize)
- c = psinfo->bufsize;
+ pstore_record_init(&record, psinfo);
+ record.type = PSTORE_TYPE_CONSOLE;
- if (oops_in_progress) {
- if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
- break;
- } else {
- spin_lock_irqsave(&psinfo->buf_lock, flags);
- }
- record.buf = (char *)s;
- record.size = c;
- psinfo->write(&record);
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
- s += c;
- c = e - s;
- }
+ record.buf = (char *)s;
+ record.size = c;
+ psinfo->write(&record);
}
static struct console pstore_console = {
@@ -589,6 +595,7 @@ int pstore_register(struct pstore_info *psi)
psi->write_user = pstore_write_user_compat;
psinfo = psi;
mutex_init(&psinfo->read_mutex);
+ sema_init(&psinfo->buf_lock, 1);
spin_unlock(&pstore_lock);
if (owner && !try_module_get(owner)) {
@@ -656,8 +663,9 @@ EXPORT_SYMBOL_GPL(pstore_unregister);
static void decompress_record(struct pstore_record *record)
{
+ int ret;
int unzipped_len;
- char *decompressed;
+ char *unzipped, *workspace;
if (!record->compressed)
return;
@@ -668,35 +676,42 @@ static void decompress_record(struct pstore_record *record)
return;
}
- /* No compression method has created the common buffer. */
+ /* Missing compression buffer means compression was not initialized. */
if (!big_oops_buf) {
- pr_warn("no decompression buffer allocated\n");
+ pr_warn("no decompression method initialized!\n");
return;
}
- unzipped_len = pstore_decompress(record->buf, big_oops_buf,
- record->size, big_oops_buf_sz);
- if (unzipped_len <= 0) {
- pr_err("decompression failed: %d\n", unzipped_len);
+ /* Allocate enough space to hold max decompression and ECC. */
+ unzipped_len = big_oops_buf_sz;
+ workspace = kmalloc(unzipped_len + record->ecc_notice_size,
+ GFP_KERNEL);
+ if (!workspace)
return;
- }
- /* Build new buffer for decompressed contents. */
- decompressed = kmalloc(unzipped_len + record->ecc_notice_size,
- GFP_KERNEL);
- if (!decompressed) {
- pr_err("decompression ran out of memory\n");
+ /* After decompression "unzipped_len" is almost certainly smaller. */
+ ret = crypto_comp_decompress(tfm, record->buf, record->size,
+ workspace, &unzipped_len);
+ if (ret) {
+ pr_err("crypto_comp_decompress failed, ret = %d!\n", ret);
+ kfree(workspace);
return;
}
- memcpy(decompressed, big_oops_buf, unzipped_len);
/* Append ECC notice to decompressed buffer. */
- memcpy(decompressed + unzipped_len, record->buf + record->size,
+ memcpy(workspace + unzipped_len, record->buf + record->size,
record->ecc_notice_size);
- /* Swap out compresed contents with decompressed contents. */
+ /* Copy decompressed contents into an minimum-sized allocation. */
+ unzipped = kmemdup(workspace, unzipped_len + record->ecc_notice_size,
+ GFP_KERNEL);
+ kfree(workspace);
+ if (!unzipped)
+ return;
+
+ /* Swap out compressed contents with decompressed contents. */
kfree(record->buf);
- record->buf = decompressed;
+ record->buf = unzipped;
record->size = unzipped_len;
record->compressed = false;
}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ffcff6516e89..96f7d32cd184 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -124,19 +124,17 @@ static int ramoops_pstore_open(struct pstore_info *psi)
}
static struct persistent_ram_zone *
-ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
- u64 *id,
- enum pstore_type_id *typep, enum pstore_type_id type,
- bool update)
+ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id,
+ struct pstore_record *record)
{
struct persistent_ram_zone *prz;
- int i = (*c)++;
+ bool update = (record->type == PSTORE_TYPE_DMESG);
/* Give up if we never existed or have hit the end. */
- if (!przs || i >= max)
+ if (!przs)
return NULL;
- prz = przs[i];
+ prz = przs[id];
if (!prz)
return NULL;
@@ -147,8 +145,8 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
if (!persistent_ram_old_size(prz))
return NULL;
- *typep = type;
- *id = i;
+ record->type = prz->type;
+ record->id = id;
return prz;
}
@@ -255,10 +253,8 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
/* Find the next valid persistent_ram_zone for DMESG */
while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) {
- prz = ramoops_get_next_prz(cxt->dprzs, &cxt->dump_read_cnt,
- cxt->max_dump_cnt, &record->id,
- &record->type,
- PSTORE_TYPE_DMESG, 1);
+ prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++,
+ record);
if (!prz_ok(prz))
continue;
header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz),
@@ -272,22 +268,18 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
}
}
- if (!prz_ok(prz))
- prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
- 1, &record->id, &record->type,
- PSTORE_TYPE_CONSOLE, 0);
+ if (!prz_ok(prz) && !cxt->console_read_cnt++)
+ prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record);
- if (!prz_ok(prz))
- prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
- 1, &record->id, &record->type,
- PSTORE_TYPE_PMSG, 0);
+ if (!prz_ok(prz) && !cxt->pmsg_read_cnt++)
+ prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record);
/* ftrace is last since it may want to dynamically allocate memory. */
if (!prz_ok(prz)) {
- if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)) {
- prz = ramoops_get_next_prz(cxt->fprzs,
- &cxt->ftrace_read_cnt, 1, &record->id,
- &record->type, PSTORE_TYPE_FTRACE, 0);
+ if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) &&
+ !cxt->ftrace_read_cnt++) {
+ prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */,
+ record);
} else {
/*
* Build a new dummy record which combines all the
@@ -299,15 +291,12 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
GFP_KERNEL);
if (!tmp_prz)
return -ENOMEM;
+ prz = tmp_prz;
free_prz = true;
while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) {
prz_next = ramoops_get_next_prz(cxt->fprzs,
- &cxt->ftrace_read_cnt,
- cxt->max_ftrace_cnt,
- &record->id,
- &record->type,
- PSTORE_TYPE_FTRACE, 0);
+ cxt->ftrace_read_cnt++, record);
if (!prz_ok(prz_next))
continue;
@@ -321,7 +310,6 @@ static ssize_t ramoops_pstore_read(struct pstore_record *record)
goto out;
}
record->id = 0;
- prz = tmp_prz;
}
}
@@ -611,6 +599,7 @@ static int ramoops_init_przs(const char *name,
goto fail;
}
*paddr += zone_sz;
+ prz_ar[i]->type = pstore_name_to_type(name);
}
*przs = prz_ar;
@@ -640,7 +629,7 @@ static int ramoops_init_prz(const char *name,
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
- cxt->memtype, 0, label);
+ cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
@@ -649,9 +638,8 @@ static int ramoops_init_prz(const char *name,
return err;
}
- persistent_ram_zap(*prz);
-
*paddr += sz;
+ (*prz)->type = pstore_name_to_type(name);
return 0;
}
@@ -787,7 +775,7 @@ static int ramoops_probe(struct platform_device *pdev)
dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
- cxt->pmsg_size;
- err = ramoops_init_przs("dump", dev, cxt, &cxt->dprzs, &paddr,
+ err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr,
dump_mem_sz, cxt->record_size,
&cxt->max_dump_cnt, 0, 0);
if (err)
@@ -816,21 +804,17 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->pstore.data = cxt;
/*
- * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
- * have to handle dumps, we must have at least record_size buffer. And
- * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
- * ZERO_SIZE_PTR).
+ * Since bufsize is only used for dmesg crash dumps, it
+ * must match the size of the dprz record (after PRZ header
+ * and ECC bytes have been accounted for).
*/
- if (cxt->console_size)
- cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
- cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
- cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
+ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+ cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
if (!cxt->pstore.buf) {
- pr_err("cannot allocate pstore buffer\n");
+ pr_err("cannot allocate pstore crash dump buffer\n");
err = -ENOMEM;
goto fail_clear;
}
- spin_lock_init(&cxt->pstore.buf_lock);
cxt->pstore.flags = PSTORE_FLAGS_DMESG;
if (cxt->console_size)
@@ -858,9 +842,9 @@ static int ramoops_probe(struct platform_device *pdev)
ramoops_pmsg_size = pdata->pmsg_size;
ramoops_ftrace_size = pdata->ftrace_size;
- pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n",
+ pr_info("using 0x%lx@0x%llx, ecc: %d\n",
cxt->size, (unsigned long long)cxt->phys_addr,
- cxt->ecc_info.ecc_size, cxt->ecc_info.block_size);
+ cxt->ecc_info.ecc_size);
return 0;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 12e21f789194..c11711c2cc83 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -12,7 +12,7 @@
*
*/
-#define pr_fmt(fmt) "persistent_ram: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/err.h>
@@ -29,6 +29,16 @@
#include <linux/vmalloc.h>
#include <asm/page.h>
+/**
+ * struct persistent_ram_buffer - persistent circular RAM buffer
+ *
+ * @sig:
+ * signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
+ * @start:
+ * offset into @data where the beginning of the stored bytes begin
+ * @size:
+ * number of valid bytes stored in @data
+ */
struct persistent_ram_buffer {
uint32_t sig;
atomic_t start;
@@ -443,7 +453,8 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
void *va;
if (!request_mem_region(start, size, label ?: "ramoops")) {
- pr_err("request mem region (0x%llx@0x%llx) failed\n",
+ pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
+ label ?: "ramoops",
(unsigned long long)size, (unsigned long long)start);
return NULL;
}
@@ -489,32 +500,42 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
struct persistent_ram_ecc_info *ecc_info)
{
int ret;
+ bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
ret = persistent_ram_init_ecc(prz, ecc_info);
- if (ret)
+ if (ret) {
+ pr_warn("ECC failed %s\n", prz->label);
return ret;
+ }
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
+ if (buffer_size(prz) == 0) {
+ pr_debug("found existing empty buffer\n");
+ return 0;
+ }
+
if (buffer_size(prz) > prz->buffer_size ||
- buffer_start(prz) > buffer_size(prz))
+ buffer_start(prz) > buffer_size(prz)) {
pr_info("found existing invalid buffer, size %zu, start %zu\n",
buffer_size(prz), buffer_start(prz));
- else {
+ zap = true;
+ } else {
pr_debug("found existing buffer, size %zu, start %zu\n",
buffer_size(prz), buffer_start(prz));
persistent_ram_save_old(prz);
- return 0;
}
} else {
pr_debug("no valid data in buffer (sig = 0x%08x)\n",
prz->buffer->sig);
+ prz->buffer->sig = sig;
+ zap = true;
}
- /* Rewind missing or invalid memory area. */
- prz->buffer->sig = sig;
- persistent_ram_zap(prz);
+ /* Reset missing, invalid, or single-use memory area. */
+ if (zap)
+ persistent_ram_zap(prz);
return 0;
}
@@ -572,6 +593,12 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
if (ret)
goto err;
+ pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
+ prz->label, prz->size, (unsigned long long)prz->paddr,
+ sizeof(*prz->buffer), prz->buffer_size,
+ prz->size - sizeof(*prz->buffer) - prz->buffer_size,
+ prz->ecc_info.ecc_size, prz->ecc_info.block_size);
+
return prz;
err:
persistent_ram_free(prz);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index f0cbf58ad4da..fd5dd806f1b9 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -791,7 +791,8 @@ static int quotactl_cmd_write(int cmd)
/* Return true if quotactl command is manipulating quota on/off state */
static bool quotactl_cmd_onoff(int cmd)
{
- return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF);
+ return (cmd == Q_QUOTAON) || (cmd == Q_QUOTAOFF) ||
+ (cmd == Q_XQUOTAON) || (cmd == Q_XQUOTAOFF);
}
/*
diff --git a/fs/read_write.c b/fs/read_write.c
index 4dae0399c75a..58f30537c47a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1956,7 +1956,7 @@ loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
struct inode *inode_out = file_inode(file_out);
loff_t ret;
- WARN_ON_ONCE(remap_flags);
+ WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
return -EISDIR;
diff --git a/fs/select.c b/fs/select.c
index 22b3bf89f051..4c8652390c94 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -287,12 +287,18 @@ int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec)
return 0;
}
+enum poll_time_type {
+ PT_TIMEVAL = 0,
+ PT_OLD_TIMEVAL = 1,
+ PT_TIMESPEC = 2,
+ PT_OLD_TIMESPEC = 3,
+};
+
static int poll_select_copy_remaining(struct timespec64 *end_time,
void __user *p,
- int timeval, int ret)
+ enum poll_time_type pt_type, int ret)
{
struct timespec64 rts;
- struct timeval rtv;
if (!p)
return ret;
@@ -310,18 +316,40 @@ static int poll_select_copy_remaining(struct timespec64 *end_time,
rts.tv_sec = rts.tv_nsec = 0;
- if (timeval) {
- if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
- memset(&rtv, 0, sizeof(rtv));
- rtv.tv_sec = rts.tv_sec;
- rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
+ switch (pt_type) {
+ case PT_TIMEVAL:
+ {
+ struct timeval rtv;
- if (!copy_to_user(p, &rtv, sizeof(rtv)))
+ if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
+ memset(&rtv, 0, sizeof(rtv));
+ rtv.tv_sec = rts.tv_sec;
+ rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
+ if (!copy_to_user(p, &rtv, sizeof(rtv)))
+ return ret;
+ }
+ break;
+ case PT_OLD_TIMEVAL:
+ {
+ struct old_timeval32 rtv;
+
+ rtv.tv_sec = rts.tv_sec;
+ rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
+ if (!copy_to_user(p, &rtv, sizeof(rtv)))
+ return ret;
+ }
+ break;
+ case PT_TIMESPEC:
+ if (!put_timespec64(&rts, p))
return ret;
-
- } else if (!put_timespec64(&rts, p))
- return ret;
-
+ break;
+ case PT_OLD_TIMESPEC:
+ if (!put_old_timespec32(&rts, p))
+ return ret;
+ break;
+ default:
+ BUG();
+ }
/*
* If an application puts its timeval in read-only memory, we
* don't want the Linux-specific update to the timeval to
@@ -689,7 +717,7 @@ static int kern_select(int n, fd_set __user *inp, fd_set __user *outp,
}
ret = core_sys_select(n, inp, outp, exp, to);
- ret = poll_select_copy_remaining(&end_time, tvp, 1, ret);
+ ret = poll_select_copy_remaining(&end_time, tvp, PT_TIMEVAL, ret);
return ret;
}
@@ -701,49 +729,41 @@ SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
}
static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timespec __user *tsp,
- const sigset_t __user *sigmask, size_t sigsetsize)
+ fd_set __user *exp, void __user *tsp,
+ const sigset_t __user *sigmask, size_t sigsetsize,
+ enum poll_time_type type)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
if (tsp) {
- if (get_timespec64(&ts, tsp))
- return -EFAULT;
+ switch (type) {
+ case PT_TIMESPEC:
+ if (get_timespec64(&ts, tsp))
+ return -EFAULT;
+ break;
+ case PT_OLD_TIMESPEC:
+ if (get_old_timespec32(&ts, tsp))
+ return -EFAULT;
+ break;
+ default:
+ BUG();
+ }
to = &end_time;
if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
return -EINVAL;
}
- if (sigmask) {
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
- return -EFAULT;
-
- sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
- }
+ ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (ret)
+ return ret;
ret = core_sys_select(n, inp, outp, exp, to);
- ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
+ ret = poll_select_copy_remaining(&end_time, tsp, type, ret);
- if (ret == -ERESTARTNOHAND) {
- /*
- * Don't restore the signal mask yet. Let do_signal() deliver
- * the signal on the way back to userspace, before the signal
- * mask is restored.
- */
- if (sigmask) {
- memcpy(&current->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_restore_sigmask();
- }
- } else if (sigmask)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ restore_user_sigmask(sigmask, &sigsaved);
return ret;
}
@@ -755,7 +775,7 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
* the sigset size.
*/
SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
- fd_set __user *, exp, struct timespec __user *, tsp,
+ fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
void __user *, sig)
{
size_t sigsetsize = 0;
@@ -769,9 +789,31 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
return -EFAULT;
}
- return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
+ return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC);
}
+#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
+
+SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *, outp,
+ fd_set __user *, exp, struct old_timespec32 __user *, tsp,
+ void __user *, sig)
+{
+ size_t sigsetsize = 0;
+ sigset_t __user *up = NULL;
+
+ if (sig) {
+ if (!access_ok(VERIFY_READ, sig, sizeof(void *)+sizeof(size_t))
+ || __get_user(up, (sigset_t __user * __user *)sig)
+ || __get_user(sigsetsize,
+ (size_t __user *)(sig+sizeof(void *))))
+ return -EFAULT;
+ }
+
+ return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC);
+}
+
+#endif
+
#ifdef __ARCH_WANT_SYS_OLD_SELECT
struct sel_arg_struct {
unsigned long n;
@@ -1045,7 +1087,7 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
}
SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
- struct timespec __user *, tsp, const sigset_t __user *, sigmask,
+ struct __kernel_timespec __user *, tsp, const sigset_t __user *, sigmask,
size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1061,89 +1103,62 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
return -EINVAL;
}
- if (sigmask) {
- /* XXX: Don't preclude handling different sized sigset_t's. */
- if (sigsetsize != sizeof(sigset_t))
- return -EINVAL;
- if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
- return -EFAULT;
-
- sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
- }
+ ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (ret)
+ return ret;
ret = do_sys_poll(ufds, nfds, to);
+ restore_user_sigmask(sigmask, &sigsaved);
+
/* We can restart this syscall, usually */
- if (ret == -EINTR) {
- /*
- * Don't restore the signal mask yet. Let do_signal() deliver
- * the signal on the way back to userspace, before the signal
- * mask is restored.
- */
- if (sigmask) {
- memcpy(&current->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_restore_sigmask();
- }
+ if (ret == -EINTR)
ret = -ERESTARTNOHAND;
- } else if (sigmask)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- ret = poll_select_copy_remaining(&end_time, tsp, 0, ret);
+ ret = poll_select_copy_remaining(&end_time, tsp, PT_TIMESPEC, ret);
return ret;
}
-#ifdef CONFIG_COMPAT
-#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
+#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
-static
-int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *p,
- int timeval, int ret)
+SYSCALL_DEFINE5(ppoll_time32, struct pollfd __user *, ufds, unsigned int, nfds,
+ struct old_timespec32 __user *, tsp, const sigset_t __user *, sigmask,
+ size_t, sigsetsize)
{
- struct timespec64 ts;
+ sigset_t ksigmask, sigsaved;
+ struct timespec64 ts, end_time, *to = NULL;
+ int ret;
- if (!p)
- return ret;
+ if (tsp) {
+ if (get_old_timespec32(&ts, tsp))
+ return -EFAULT;
- if (current->personality & STICKY_TIMEOUTS)
- goto sticky;
+ to = &end_time;
+ if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
+ return -EINVAL;
+ }
- /* No update for zero timeout */
- if (!end_time->tv_sec && !end_time->tv_nsec)
+ ret = set_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (ret)
return ret;
- ktime_get_ts64(&ts);
- ts = timespec64_sub(*end_time, ts);
- if (ts.tv_sec < 0)
- ts.tv_sec = ts.tv_nsec = 0;
+ ret = do_sys_poll(ufds, nfds, to);
- if (timeval) {
- struct old_timeval32 rtv;
+ restore_user_sigmask(sigmask, &sigsaved);
- rtv.tv_sec = ts.tv_sec;
- rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+ /* We can restart this syscall, usually */
+ if (ret == -EINTR)
+ ret = -ERESTARTNOHAND;
- if (!copy_to_user(p, &rtv, sizeof(rtv)))
- return ret;
- } else {
- if (!put_old_timespec32(&ts, p))
- return ret;
- }
- /*
- * If an application puts its timeval in read-only memory, we
- * don't want the Linux-specific update to the timeval to
- * cause a fault after the select has completed
- * successfully. However, because we're not updating the
- * timeval, we can't restart the system call.
- */
+ ret = poll_select_copy_remaining(&end_time, tsp, PT_OLD_TIMESPEC, ret);
-sticky:
- if (ret == -ERESTARTNOHAND)
- ret = -EINTR;
return ret;
}
+#endif
+
+#ifdef CONFIG_COMPAT
+#define __COMPAT_NFDBITS (8 * sizeof(compat_ulong_t))
/*
* Ooo, nasty. We need here to frob 32-bit unsigned longs to
@@ -1275,7 +1290,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp,
}
ret = compat_core_sys_select(n, inp, outp, exp, to);
- ret = compat_poll_select_copy_remaining(&end_time, tvp, 1, ret);
+ ret = poll_select_copy_remaining(&end_time, tvp, PT_OLD_TIMEVAL, ret);
return ret;
}
@@ -1307,52 +1322,66 @@ COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
static long do_compat_pselect(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct old_timespec32 __user *tsp, compat_sigset_t __user *sigmask,
- compat_size_t sigsetsize)
+ void __user *tsp, compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize, enum poll_time_type type)
{
sigset_t ksigmask, sigsaved;
struct timespec64 ts, end_time, *to = NULL;
int ret;
if (tsp) {
- if (get_old_timespec32(&ts, tsp))
- return -EFAULT;
+ switch (type) {
+ case PT_OLD_TIMESPEC:
+ if (get_old_timespec32(&ts, tsp))
+ return -EFAULT;
+ break;
+ case PT_TIMESPEC:
+ if (get_timespec64(&ts, tsp))
+ return -EFAULT;
+ break;
+ default:
+ BUG();
+ }
to = &end_time;
if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
return -EINVAL;
}
- if (sigmask) {
- if (sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
- if (get_compat_sigset(&ksigmask, sigmask))
- return -EFAULT;
-
- sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
- }
+ ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (ret)
+ return ret;
ret = compat_core_sys_select(n, inp, outp, exp, to);
- ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret);
+ ret = poll_select_copy_remaining(&end_time, tsp, type, ret);
- if (ret == -ERESTARTNOHAND) {
- /*
- * Don't restore the signal mask yet. Let do_signal() deliver
- * the signal on the way back to userspace, before the signal
- * mask is restored.
- */
- if (sigmask) {
- memcpy(&current->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_restore_sigmask();
- }
- } else if (sigmask)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ restore_user_sigmask(sigmask, &sigsaved);
return ret;
}
+COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
+ compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
+ struct __kernel_timespec __user *, tsp, void __user *, sig)
+{
+ compat_size_t sigsetsize = 0;
+ compat_uptr_t up = 0;
+
+ if (sig) {
+ if (!access_ok(VERIFY_READ, sig,
+ sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
+ __get_user(up, (compat_uptr_t __user *)sig) ||
+ __get_user(sigsetsize,
+ (compat_size_t __user *)(sig+sizeof(up))))
+ return -EFAULT;
+ }
+
+ return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
+ sigsetsize, PT_TIMESPEC);
+}
+
+#if defined(CONFIG_COMPAT_32BIT_TIME)
+
COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
struct old_timespec32 __user *, tsp, void __user *, sig)
@@ -1368,10 +1397,14 @@ COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
(compat_size_t __user *)(sig+sizeof(up))))
return -EFAULT;
}
+
return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
- sigsetsize);
+ sigsetsize, PT_OLD_TIMESPEC);
}
+#endif
+
+#if defined(CONFIG_COMPAT_32BIT_TIME)
COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
unsigned int, nfds, struct old_timespec32 __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
@@ -1389,36 +1422,57 @@ COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
return -EINVAL;
}
- if (sigmask) {
- if (sigsetsize != sizeof(compat_sigset_t))
- return -EINVAL;
- if (get_compat_sigset(&ksigmask, sigmask))
+ ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (ret)
+ return ret;
+
+ ret = do_sys_poll(ufds, nfds, to);
+
+ restore_user_sigmask(sigmask, &sigsaved);
+
+ /* We can restart this syscall, usually */
+ if (ret == -EINTR)
+ ret = -ERESTARTNOHAND;
+
+ ret = poll_select_copy_remaining(&end_time, tsp, PT_OLD_TIMESPEC, ret);
+
+ return ret;
+}
+#endif
+
+/* New compat syscall for 64 bit time_t*/
+COMPAT_SYSCALL_DEFINE5(ppoll_time64, struct pollfd __user *, ufds,
+ unsigned int, nfds, struct __kernel_timespec __user *, tsp,
+ const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
+{
+ sigset_t ksigmask, sigsaved;
+ struct timespec64 ts, end_time, *to = NULL;
+ int ret;
+
+ if (tsp) {
+ if (get_timespec64(&ts, tsp))
return -EFAULT;
- sigdelsetmask(&ksigmask, sigmask(SIGKILL)|sigmask(SIGSTOP));
- sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
+ to = &end_time;
+ if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))
+ return -EINVAL;
}
+ ret = set_compat_user_sigmask(sigmask, &ksigmask, &sigsaved, sigsetsize);
+ if (ret)
+ return ret;
+
ret = do_sys_poll(ufds, nfds, to);
+ restore_user_sigmask(sigmask, &sigsaved);
+
/* We can restart this syscall, usually */
- if (ret == -EINTR) {
- /*
- * Don't restore the signal mask yet. Let do_signal() deliver
- * the signal on the way back to userspace, before the signal
- * mask is restored.
- */
- if (sigmask) {
- memcpy(&current->saved_sigmask, &sigsaved,
- sizeof(sigsaved));
- set_restore_sigmask();
- }
+ if (ret == -EINTR)
ret = -ERESTARTNOHAND;
- } else if (sigmask)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
- ret = compat_poll_select_copy_remaining(&end_time, tsp, 0, ret);
+ ret = poll_select_copy_remaining(&end_time, tsp, PT_TIMESPEC, ret);
return ret;
}
+
#endif
diff --git a/fs/splice.c b/fs/splice.c
index 3553f1956508..de2ede048473 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -945,11 +945,16 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
sd->flags &= ~SPLICE_F_NONBLOCK;
more = sd->flags & SPLICE_F_MORE;
+ WARN_ON_ONCE(pipe->nrbufs != 0);
+
while (len) {
size_t read_len;
loff_t pos = sd->pos, prev_pos = pos;
- ret = do_splice_to(in, &pos, pipe, len, flags);
+ /* Don't try to read more the pipe has space for. */
+ read_len = min_t(size_t, len,
+ (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
+ ret = do_splice_to(in, &pos, pipe, read_len, flags);
if (unlikely(ret <= 0))
goto out_release;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 0a7252aecfa5..bb71db63c99c 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -334,7 +334,7 @@ int sysfs_create_file_ns(struct kobject *kobj, const struct attribute *attr,
}
EXPORT_SYMBOL_GPL(sysfs_create_file_ns);
-int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
+int sysfs_create_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int err = 0;
int i;
@@ -493,7 +493,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
return ret;
}
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
{
int i;
for (i = 0; ptr[i]; i++)
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 499a20a5a010..273736f41be3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
}
}
brelse(bh);
- return 0;
+ return err;
}
int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 529856fbccd0..bc1e082d921d 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -12,9 +12,10 @@ config UBIFS_FS
help
UBIFS is a file system for flash devices which works on top of UBI.
+if UBIFS_FS
+
config UBIFS_FS_ADVANCED_COMPR
bool "Advanced compression options"
- depends on UBIFS_FS
help
This option allows to explicitly choose which compressions, if any,
are enabled in UBIFS. Removing compressors means inability to read
@@ -24,7 +25,6 @@ config UBIFS_FS_ADVANCED_COMPR
config UBIFS_FS_LZO
bool "LZO compression support" if UBIFS_FS_ADVANCED_COMPR
- depends on UBIFS_FS
default y
help
LZO compressor is generally faster than zlib but compresses worse.
@@ -32,14 +32,12 @@ config UBIFS_FS_LZO
config UBIFS_FS_ZLIB
bool "ZLIB compression support" if UBIFS_FS_ADVANCED_COMPR
- depends on UBIFS_FS
default y
help
Zlib compresses better than LZO but it is slower. Say 'Y' if unsure.
config UBIFS_ATIME_SUPPORT
- bool "Access time support" if UBIFS_FS
- depends on UBIFS_FS
+ bool "Access time support"
default n
help
Originally UBIFS did not support atime, because it looked like a bad idea due
@@ -54,7 +52,6 @@ config UBIFS_ATIME_SUPPORT
config UBIFS_FS_XATTR
bool "UBIFS XATTR support"
- depends on UBIFS_FS
default y
help
Saying Y here includes support for extended attributes (xattrs).
@@ -65,7 +62,7 @@ config UBIFS_FS_XATTR
config UBIFS_FS_ENCRYPTION
bool "UBIFS Encryption"
- depends on UBIFS_FS && UBIFS_FS_XATTR && BLOCK
+ depends on UBIFS_FS_XATTR && BLOCK
select FS_ENCRYPTION
default n
help
@@ -76,7 +73,7 @@ config UBIFS_FS_ENCRYPTION
config UBIFS_FS_SECURITY
bool "UBIFS Security Labels"
- depends on UBIFS_FS && UBIFS_FS_XATTR
+ depends on UBIFS_FS_XATTR
default y
help
Security labels provide an access control facility to support Linux
@@ -89,6 +86,7 @@ config UBIFS_FS_SECURITY
config UBIFS_FS_AUTHENTICATION
bool "UBIFS authentication support"
+ depends on KEYS
select CRYPTO_HMAC
help
Enable authentication support for UBIFS. This feature offers protection
@@ -96,3 +94,5 @@ config UBIFS_FS_AUTHENTICATION
If you say yes here you should also select a hashing algorithm such as
sha256, these are not selected automatically since there are many
different options.
+
+endif # UBIFS_FS
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
index 124e965a28b3..5bf5fd08879e 100644
--- a/fs/ubifs/auth.c
+++ b/fs/ubifs/auth.c
@@ -269,8 +269,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
goto out;
}
- c->hash_tfm = crypto_alloc_shash(c->auth_hash_name, 0,
- CRYPTO_ALG_ASYNC);
+ c->hash_tfm = crypto_alloc_shash(c->auth_hash_name, 0, 0);
if (IS_ERR(c->hash_tfm)) {
err = PTR_ERR(c->hash_tfm);
ubifs_err(c, "Can not allocate %s: %d",
@@ -286,7 +285,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
goto out_free_hash;
}
- c->hmac_tfm = crypto_alloc_shash(hmac_name, 0, CRYPTO_ALG_ASYNC);
+ c->hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0);
if (IS_ERR(c->hmac_tfm)) {
err = PTR_ERR(c->hmac_tfm);
ubifs_err(c, "Can not allocate %s: %d", hmac_name, err);
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index d1d5e96350dd..b0c5f06128b5 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -1675,6 +1675,12 @@ int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
if (!ubifs_authenticated(c))
return 0;
+ if (!c->nroot) {
+ err = ubifs_read_nnode(c, NULL, 0);
+ if (err)
+ return err;
+ }
+
desc = ubifs_hash_get_desc(c);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -1685,12 +1691,6 @@ int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
goto out;
}
- if (!c->nroot) {
- err = ubifs_read_nnode(c, NULL, 0);
- if (err)
- return err;
- }
-
cnode = (struct ubifs_cnode *)c->nroot;
while (cnode) {
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 75f961c4c044..0a0e65c07c6d 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -213,6 +213,38 @@ static int trun_remove_range(struct ubifs_info *c, struct replay_entry *r)
}
/**
+ * inode_still_linked - check whether inode in question will be re-linked.
+ * @c: UBIFS file-system description object
+ * @rino: replay entry to test
+ *
+ * O_TMPFILE files can be re-linked, this means link count goes from 0 to 1.
+ * This case needs special care, otherwise all references to the inode will
+ * be removed upon the first replay entry of an inode with link count 0
+ * is found.
+ */
+static bool inode_still_linked(struct ubifs_info *c, struct replay_entry *rino)
+{
+ struct replay_entry *r;
+
+ ubifs_assert(c, rino->deletion);
+ ubifs_assert(c, key_type(c, &rino->key) == UBIFS_INO_KEY);
+
+ /*
+ * Find the most recent entry for the inode behind @rino and check
+ * whether it is a deletion.
+ */
+ list_for_each_entry_reverse(r, &c->replay_list, list) {
+ ubifs_assert(c, r->sqnum >= rino->sqnum);
+ if (key_inum(c, &r->key) == key_inum(c, &rino->key))
+ return r->deletion == 0;
+
+ }
+
+ ubifs_assert(c, 0);
+ return false;
+}
+
+/**
* apply_replay_entry - apply a replay entry to the TNC.
* @c: UBIFS file-system description object
* @r: replay entry to apply
@@ -239,6 +271,11 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
{
ino_t inum = key_inum(c, &r->key);
+ if (inode_still_linked(c, r)) {
+ err = 0;
+ break;
+ }
+
err = ubifs_tnc_remove_ino(c, inum);
break;
}
@@ -533,6 +570,28 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
return data == 0xFFFFFFFF;
}
+/* authenticate_sleb_hash and authenticate_sleb_hmac are split out for stack usage */
+static int authenticate_sleb_hash(struct ubifs_info *c, struct shash_desc *log_hash, u8 *hash)
+{
+ SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
+
+ hash_desc->tfm = c->hash_tfm;
+ hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ ubifs_shash_copy_state(c, log_hash, hash_desc);
+ return crypto_shash_final(hash_desc, hash);
+}
+
+static int authenticate_sleb_hmac(struct ubifs_info *c, u8 *hash, u8 *hmac)
+{
+ SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
+
+ hmac_desc->tfm = c->hmac_tfm;
+ hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ return crypto_shash_digest(hmac_desc, hash, c->hash_len, hmac);
+}
+
/**
* authenticate_sleb - authenticate one scan LEB
* @c: UBIFS file-system description object
@@ -574,21 +633,12 @@ static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
if (snod->type == UBIFS_AUTH_NODE) {
struct ubifs_auth_node *auth = snod->node;
- SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
- SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
-
- hash_desc->tfm = c->hash_tfm;
- hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- ubifs_shash_copy_state(c, log_hash, hash_desc);
- err = crypto_shash_final(hash_desc, hash);
+ err = authenticate_sleb_hash(c, log_hash, hash);
if (err)
goto out;
- hmac_desc->tfm = c->hmac_tfm;
- hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_shash_digest(hmac_desc, hash, c->hash_len,
- hmac);
+ err = authenticate_sleb_hmac(c, hash, hmac);
if (err)
goto out;
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 75a69dd26d6e..3da90c951c23 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -63,6 +63,17 @@
/* Default time granularity in nanoseconds */
#define DEFAULT_TIME_GRAN 1000000000
+static int get_default_compressor(struct ubifs_info *c)
+{
+ if (ubifs_compr_present(c, UBIFS_COMPR_LZO))
+ return UBIFS_COMPR_LZO;
+
+ if (ubifs_compr_present(c, UBIFS_COMPR_ZLIB))
+ return UBIFS_COMPR_ZLIB;
+
+ return UBIFS_COMPR_NONE;
+}
+
/**
* create_default_filesystem - format empty UBI volume.
* @c: UBIFS file-system description object
@@ -207,7 +218,7 @@ static int create_default_filesystem(struct ubifs_info *c)
if (c->mount_opts.override_compr)
sup->default_compr = cpu_to_le16(c->mount_opts.compr_type);
else
- sup->default_compr = cpu_to_le16(UBIFS_COMPR_LZO);
+ sup->default_compr = cpu_to_le16(get_default_compressor(c));
generate_random_uuid(sup->uuid);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5df554a9f9c9..ae796e10f68b 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1357,6 +1357,12 @@ reread:
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ ret = -EIO;
+ goto out;
+ }
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8f2f56d9a1bb..e3d684ea3203 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -827,16 +827,20 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
- if (ret < 0)
- goto out_bh;
-
- strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
+ if (ret < 0) {
+ strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
+ pr_warn("incorrect volume identification, setting to "
+ "'InvalidName'\n");
+ } else {
+ strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
+ }
udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
- if (ret < 0)
+ if (ret < 0) {
+ ret = 0;
goto out_bh;
-
+ }
outstr[ret] = 0;
udf_debug("volSetIdent[] = '%s'\n", outstr);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 45234791fec2..5fcfa96463eb 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -351,6 +351,11 @@ try_again:
return u_len;
}
+/*
+ * Convert CS0 dstring to output charset. Warning: This function may truncate
+ * input string if it is too long as it is used for informational strings only
+ * and it is better to truncate the string than to refuse mounting a media.
+ */
int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
const uint8_t *ocu_i, int i_len)
{
@@ -359,9 +364,12 @@ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
if (i_len > 0) {
s_len = ocu_i[i_len - 1];
if (s_len >= i_len) {
- pr_err("incorrect dstring lengths (%d/%d)\n",
- s_len, i_len);
- return -EINVAL;
+ pr_warn("incorrect dstring lengths (%d/%d),"
+ " truncating\n", s_len, i_len);
+ s_len = i_len - 1;
+ /* 2-byte encoding? Need to round properly... */
+ if (ocu_i[0] == 16)
+ s_len -= (s_len - 1) & 2;
}
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 356d2b8568c1..59dc28047030 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -926,7 +926,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in(
wait_queue_entry_t *wq;
struct userfaultfd_wait_queue *uwq;
- VM_BUG_ON(!spin_is_locked(&wqh->lock));
+ lockdep_assert_held(&wqh->lock);
uwq = NULL;
if (!waitqueue_active(wqh))
@@ -1361,6 +1361,19 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
ret = -EINVAL;
if (!vma_can_userfault(cur))
goto out_unlock;
+
+ /*
+ * UFFDIO_COPY will fill file holes even without
+ * PROT_WRITE. This check enforces that if this is a
+ * MAP_SHARED, the process has write permission to the backing
+ * file. If VM_MAYWRITE is set it also enforces that on a
+ * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
+ * F_WRITE_SEAL can be taken until the vma is destroyed.
+ */
+ ret = -EPERM;
+ if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
+ goto out_unlock;
+
/*
* If this vma contains ending address, and huge pages
* check alignment.
@@ -1406,6 +1419,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
BUG_ON(!vma_can_userfault(vma));
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
vma->vm_userfaultfd_ctx.ctx != ctx);
+ WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
/*
* Nothing to do: this vma is already registered into this
@@ -1560,6 +1574,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (!vma->vm_userfaultfd_ctx.ctx)
goto skip;
+ WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
+
if (vma->vm_start > start)
start = vma->vm_start;
vma_end = min(end, vma->vm_end);
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index 9345802c99f7..999ad8d00d43 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -414,7 +414,6 @@ xfs_ag_extend_space(
struct aghdr_init_data *id,
xfs_extlen_t len)
{
- struct xfs_owner_info oinfo;
struct xfs_buf *bp;
struct xfs_agi *agi;
struct xfs_agf *agf;
@@ -448,17 +447,17 @@ xfs_ag_extend_space(
/*
* Free the new space.
*
- * XFS_RMAP_OWN_NULL is used here to tell the rmap btree that
+ * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
* this doesn't actually exist in the rmap btree.
*/
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
error = xfs_rmap_free(tp, bp, id->agno,
be32_to_cpu(agf->agf_length) - len,
- len, &oinfo);
+ len, &XFS_RMAP_OINFO_SKIP_UPDATE);
if (error)
return error;
return xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno,
be32_to_cpu(agf->agf_length) - len),
- len, &oinfo, XFS_AG_RESV_NONE);
+ len, &XFS_RMAP_OINFO_SKIP_UPDATE,
+ XFS_AG_RESV_NONE);
}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index e1c0c0d2f1b0..b715668886a4 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1594,7 +1594,6 @@ xfs_alloc_ag_vextent_small(
xfs_extlen_t *flenp, /* result length */
int *stat) /* status: 0-freelist, 1-normal/none */
{
- struct xfs_owner_info oinfo;
int error;
xfs_agblock_t fbno;
xfs_extlen_t flen;
@@ -1648,9 +1647,8 @@ xfs_alloc_ag_vextent_small(
* doesn't live in the free space, we need to clear
* out the OWN_AG rmap.
*/
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
error = xfs_rmap_free(args->tp, args->agbp, args->agno,
- fbno, 1, &oinfo);
+ fbno, 1, &XFS_RMAP_OINFO_AG);
if (error)
goto error0;
@@ -1694,28 +1692,28 @@ error0:
*/
STATIC int
xfs_free_ag_extent(
- xfs_trans_t *tp,
- xfs_buf_t *agbp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type type)
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type)
{
- xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
- xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
- int error; /* error return value */
- xfs_agblock_t gtbno; /* start of right neighbor block */
- xfs_extlen_t gtlen; /* length of right neighbor block */
- int haveleft; /* have a left neighbor block */
- int haveright; /* have a right neighbor block */
- int i; /* temp, result code */
- xfs_agblock_t ltbno; /* start of left neighbor block */
- xfs_extlen_t ltlen; /* length of left neighbor block */
- xfs_mount_t *mp; /* mount point struct for filesystem */
- xfs_agblock_t nbno; /* new starting block of freespace */
- xfs_extlen_t nlen; /* new length of freespace */
- xfs_perag_t *pag; /* per allocation group data */
+ struct xfs_mount *mp;
+ struct xfs_perag *pag;
+ struct xfs_btree_cur *bno_cur;
+ struct xfs_btree_cur *cnt_cur;
+ xfs_agblock_t gtbno; /* start of right neighbor */
+ xfs_extlen_t gtlen; /* length of right neighbor */
+ xfs_agblock_t ltbno; /* start of left neighbor */
+ xfs_extlen_t ltlen; /* length of left neighbor */
+ xfs_agblock_t nbno; /* new starting block of freesp */
+ xfs_extlen_t nlen; /* new length of freespace */
+ int haveleft; /* have a left neighbor */
+ int haveright; /* have a right neighbor */
+ int i;
+ int error;
bno_cur = cnt_cur = NULL;
mp = tp->t_mountp;
@@ -2314,10 +2312,11 @@ xfs_alloc_fix_freelist(
* repair/rmap.c in xfsprogs for details.
*/
memset(&targs, 0, sizeof(targs));
+ /* struct copy below */
if (flags & XFS_ALLOC_FLAG_NORMAP)
- xfs_rmap_skip_owner_update(&targs.oinfo);
+ targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
else
- xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
+ targs.oinfo = XFS_RMAP_OINFO_AG;
while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
if (error)
@@ -2435,7 +2434,6 @@ xfs_alloc_get_freelist(
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
- xfs_perag_put(pag);
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
if (btreeblk) {
@@ -2443,6 +2441,7 @@ xfs_alloc_get_freelist(
pag->pagf_btreeblks++;
logflags |= XFS_AGF_BTREEBLKS;
}
+ xfs_perag_put(pag);
xfs_alloc_log_agf(tp, agbp, logflags);
*bnop = bno;
@@ -3008,21 +3007,21 @@ out:
* Just break up the extent address and hand off to xfs_free_ag_extent
* after fixing up the freelist.
*/
-int /* error */
+int
__xfs_free_extent(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_fsblock_t bno, /* starting block number of extent */
- xfs_extlen_t len, /* length of extent */
- struct xfs_owner_info *oinfo, /* extent owner */
- enum xfs_ag_resv_type type, /* block reservation type */
- bool skip_discard)
+ struct xfs_trans *tp,
+ xfs_fsblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type,
+ bool skip_discard)
{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_buf *agbp;
- xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
- xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
- int error;
- unsigned int busy_flags = 0;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *agbp;
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
+ xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
+ int error;
+ unsigned int busy_flags = 0;
ASSERT(len != 0);
ASSERT(type != XFS_AG_RESV_AGFL);
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 00cd5ec4cb6b..d6ed5d2c07c2 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -182,7 +182,7 @@ __xfs_free_extent(
struct xfs_trans *tp, /* transaction pointer */
xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len, /* length of extent */
- struct xfs_owner_info *oinfo, /* extent owner */
+ const struct xfs_owner_info *oinfo, /* extent owner */
enum xfs_ag_resv_type type, /* block reservation type */
bool skip_discard);
@@ -191,7 +191,7 @@ xfs_free_extent(
struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
+ const struct xfs_owner_info *oinfo,
enum xfs_ag_resv_type type)
{
return __xfs_free_extent(tp, bno, len, oinfo, type, false);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 19e921d1586f..332eefa2700b 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -536,7 +536,7 @@ __xfs_bmap_add_free(
struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
- struct xfs_owner_info *oinfo,
+ const struct xfs_owner_info *oinfo,
bool skip_discard)
{
struct xfs_extent_free_item *new; /* new element */
@@ -564,7 +564,7 @@ __xfs_bmap_add_free(
if (oinfo)
new->xefi_oinfo = *oinfo;
else
- xfs_rmap_skip_owner_update(&new->xefi_oinfo);
+ new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
new->xefi_skip_discard = skip_discard;
trace_xfs_bmap_free_defer(tp->t_mountp,
XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
@@ -3453,7 +3453,7 @@ xfs_bmap_btalloc(
args.tp = ap->tp;
args.mp = mp;
args.fsbno = ap->blkno;
- xfs_rmap_skip_owner_update(&args.oinfo);
+ args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
/* Trim the allocation back to the maximum an AG can fit. */
args.maxlen = min(ap->length, mp->m_ag_max_usable);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 488dc8860fd7..09d3ea97cc15 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -186,7 +186,7 @@ int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
- xfs_filblks_t len, struct xfs_owner_info *oinfo,
+ xfs_filblks_t len, const struct xfs_owner_info *oinfo,
bool skip_discard);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
@@ -234,7 +234,7 @@ xfs_bmap_add_free(
struct xfs_trans *tp,
xfs_fsblock_t bno,
xfs_filblks_t len,
- struct xfs_owner_info *oinfo)
+ const struct xfs_owner_info *oinfo)
{
__xfs_bmap_add_free(tp, bno, len, oinfo, false);
}
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 34c6d7bd4d18..bbdae2b4559f 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
- return __this_address;
+ return false;
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
}
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index e792b167150a..94f00427de98 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -172,7 +172,13 @@
* reoccur.
*/
-static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
+static const struct xfs_defer_op_type *defer_op_types[] = {
+ [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type,
+ [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type,
+ [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type,
+ [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type,
+ [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
+};
/*
* For each pending item in the intake list, log its intent item and the
@@ -185,15 +191,15 @@ xfs_defer_create_intents(
{
struct list_head *li;
struct xfs_defer_pending *dfp;
+ const struct xfs_defer_op_type *ops;
list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
- dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
- dfp->dfp_count);
+ ops = defer_op_types[dfp->dfp_type];
+ dfp->dfp_intent = ops->create_intent(tp, dfp->dfp_count);
trace_xfs_defer_create_intent(tp->t_mountp, dfp);
- list_sort(tp->t_mountp, &dfp->dfp_work,
- dfp->dfp_type->diff_items);
+ list_sort(tp->t_mountp, &dfp->dfp_work, ops->diff_items);
list_for_each(li, &dfp->dfp_work)
- dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
+ ops->log_item(tp, dfp->dfp_intent, li);
}
}
@@ -204,14 +210,16 @@ xfs_defer_trans_abort(
struct list_head *dop_pending)
{
struct xfs_defer_pending *dfp;
+ const struct xfs_defer_op_type *ops;
trace_xfs_defer_trans_abort(tp, _RET_IP_);
/* Abort intent items that don't have a done item. */
list_for_each_entry(dfp, dop_pending, dfp_list) {
+ ops = defer_op_types[dfp->dfp_type];
trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
if (dfp->dfp_intent && !dfp->dfp_done) {
- dfp->dfp_type->abort_intent(dfp->dfp_intent);
+ ops->abort_intent(dfp->dfp_intent);
dfp->dfp_intent = NULL;
}
}
@@ -315,18 +323,20 @@ xfs_defer_cancel_list(
struct xfs_defer_pending *pli;
struct list_head *pwi;
struct list_head *n;
+ const struct xfs_defer_op_type *ops;
/*
* Free the pending items. Caller should already have arranged
* for the intent items to be released.
*/
list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
+ ops = defer_op_types[dfp->dfp_type];
trace_xfs_defer_cancel_list(mp, dfp);
list_del(&dfp->dfp_list);
list_for_each_safe(pwi, n, &dfp->dfp_work) {
list_del(pwi);
dfp->dfp_count--;
- dfp->dfp_type->cancel_item(pwi);
+ ops->cancel_item(pwi);
}
ASSERT(dfp->dfp_count == 0);
kmem_free(dfp);
@@ -350,7 +360,7 @@ xfs_defer_finish_noroll(
struct list_head *n;
void *state;
int error = 0;
- void (*cleanup_fn)(struct xfs_trans *, void *, int);
+ const struct xfs_defer_op_type *ops;
LIST_HEAD(dop_pending);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -373,18 +383,18 @@ xfs_defer_finish_noroll(
/* Log an intent-done item for the first pending item. */
dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
dfp_list);
+ ops = defer_op_types[dfp->dfp_type];
trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
- dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+ dfp->dfp_done = ops->create_done(*tp, dfp->dfp_intent,
dfp->dfp_count);
- cleanup_fn = dfp->dfp_type->finish_cleanup;
/* Finish the work items. */
state = NULL;
list_for_each_safe(li, n, &dfp->dfp_work) {
list_del(li);
dfp->dfp_count--;
- error = dfp->dfp_type->finish_item(*tp, li,
- dfp->dfp_done, &state);
+ error = ops->finish_item(*tp, li, dfp->dfp_done,
+ &state);
if (error == -EAGAIN) {
/*
* Caller wants a fresh transaction;
@@ -400,8 +410,8 @@ xfs_defer_finish_noroll(
* xfs_defer_cancel will take care of freeing
* all these lists and stuff.
*/
- if (cleanup_fn)
- cleanup_fn(*tp, state, error);
+ if (ops->finish_cleanup)
+ ops->finish_cleanup(*tp, state, error);
goto out;
}
}
@@ -413,20 +423,19 @@ xfs_defer_finish_noroll(
* a Fresh Transaction while Finishing
* Deferred Work" above.
*/
- dfp->dfp_intent = dfp->dfp_type->create_intent(*tp,
+ dfp->dfp_intent = ops->create_intent(*tp,
dfp->dfp_count);
dfp->dfp_done = NULL;
list_for_each(li, &dfp->dfp_work)
- dfp->dfp_type->log_item(*tp, dfp->dfp_intent,
- li);
+ ops->log_item(*tp, dfp->dfp_intent, li);
} else {
/* Done with the dfp, free it. */
list_del(&dfp->dfp_list);
kmem_free(dfp);
}
- if (cleanup_fn)
- cleanup_fn(*tp, state, error);
+ if (ops->finish_cleanup)
+ ops->finish_cleanup(*tp, state, error);
}
out:
@@ -486,8 +495,10 @@ xfs_defer_add(
struct list_head *li)
{
struct xfs_defer_pending *dfp = NULL;
+ const struct xfs_defer_op_type *ops;
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
+ BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
/*
* Add the item to a pending item at the end of the intake list.
@@ -497,15 +508,15 @@ xfs_defer_add(
if (!list_empty(&tp->t_dfops)) {
dfp = list_last_entry(&tp->t_dfops,
struct xfs_defer_pending, dfp_list);
- if (dfp->dfp_type->type != type ||
- (dfp->dfp_type->max_items &&
- dfp->dfp_count >= dfp->dfp_type->max_items))
+ ops = defer_op_types[dfp->dfp_type];
+ if (dfp->dfp_type != type ||
+ (ops->max_items && dfp->dfp_count >= ops->max_items))
dfp = NULL;
}
if (!dfp) {
dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
KM_SLEEP | KM_NOFS);
- dfp->dfp_type = defer_op_types[type];
+ dfp->dfp_type = type;
dfp->dfp_intent = NULL;
dfp->dfp_done = NULL;
dfp->dfp_count = 0;
@@ -517,14 +528,6 @@ xfs_defer_add(
dfp->dfp_count++;
}
-/* Initialize a deferred operation list. */
-void
-xfs_defer_init_op_type(
- const struct xfs_defer_op_type *type)
-{
- defer_op_types[type->type] = type;
-}
-
/*
* Move deferred ops from one transaction to another and reset the source to
* initial state. This is primarily used to carry state forward across
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index 2584a5b95b0d..7c28d7608ac6 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -9,20 +9,6 @@
struct xfs_defer_op_type;
/*
- * Save a log intent item and a list of extents, so that we can replay
- * whatever action had to happen to the extent list and file the log done
- * item.
- */
-struct xfs_defer_pending {
- const struct xfs_defer_op_type *dfp_type; /* function pointers */
- struct list_head dfp_list; /* pending items */
- void *dfp_intent; /* log intent item */
- void *dfp_done; /* log done item */
- struct list_head dfp_work; /* work items */
- unsigned int dfp_count; /* # extent items */
-};
-
-/*
* Header for deferred operation list.
*/
enum xfs_defer_ops_type {
@@ -34,6 +20,20 @@ enum xfs_defer_ops_type {
XFS_DEFER_OPS_TYPE_MAX,
};
+/*
+ * Save a log intent item and a list of extents, so that we can replay
+ * whatever action had to happen to the extent list and file the log done
+ * item.
+ */
+struct xfs_defer_pending {
+ struct list_head dfp_list; /* pending items */
+ struct list_head dfp_work; /* work items */
+ void *dfp_intent; /* log intent item */
+ void *dfp_done; /* log done item */
+ unsigned int dfp_count; /* # extent items */
+ enum xfs_defer_ops_type dfp_type;
+};
+
void xfs_defer_add(struct xfs_trans *tp, enum xfs_defer_ops_type type,
struct list_head *h);
int xfs_defer_finish_noroll(struct xfs_trans **tp);
@@ -43,8 +43,6 @@ void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
- enum xfs_defer_ops_type type;
- unsigned int max_items;
void (*abort_intent)(void *);
void *(*create_done)(struct xfs_trans *, void *, unsigned int);
int (*finish_item)(struct xfs_trans *, struct list_head *, void *,
@@ -54,8 +52,13 @@ struct xfs_defer_op_type {
int (*diff_items)(void *, struct list_head *, struct list_head *);
void *(*create_intent)(struct xfs_trans *, uint);
void (*log_item)(struct xfs_trans *, void *, struct list_head *);
+ unsigned int max_items;
};
-void xfs_defer_init_op_type(const struct xfs_defer_op_type *type);
+extern const struct xfs_defer_op_type xfs_bmap_update_defer_type;
+extern const struct xfs_defer_op_type xfs_refcount_update_defer_type;
+extern const struct xfs_defer_op_type xfs_rmap_update_defer_type;
+extern const struct xfs_defer_op_type xfs_extent_free_defer_type;
+extern const struct xfs_defer_op_type xfs_agfl_free_defer_type;
#endif /* __XFS_DEFER_H__ */
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 9995d5ae380b..9bb3c48843ec 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -916,6 +916,9 @@ static inline uint xfs_dinode_size(int version)
/*
* Values for di_format
+ *
+ * This enum is used in string mapping in xfs_trace.h; please keep the
+ * TRACE_DEFINE_ENUMs for it up to date.
*/
typedef enum xfs_dinode_fmt {
XFS_DINODE_FMT_DEV, /* xfs_dev_t */
@@ -925,6 +928,13 @@ typedef enum xfs_dinode_fmt {
XFS_DINODE_FMT_UUID /* added long ago, but never used */
} xfs_dinode_fmt_t;
+#define XFS_INODE_FORMAT_STR \
+ { XFS_DINODE_FMT_DEV, "dev" }, \
+ { XFS_DINODE_FMT_LOCAL, "local" }, \
+ { XFS_DINODE_FMT_EXTENTS, "extent" }, \
+ { XFS_DINODE_FMT_BTREE, "btree" }, \
+ { XFS_DINODE_FMT_UUID, "uuid" }
+
/*
* Inode minimum and maximum sizes.
*/
@@ -1083,6 +1093,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
#define XFS_OFFBNO_TO_AGINO(mp,b,o) \
((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o)))
+#define XFS_FSB_TO_INO(mp, b) ((xfs_ino_t)((b) << XFS_INO_OFFSET_BITS(mp)))
+#define XFS_AGB_TO_AGINO(mp, b) ((xfs_agino_t)((b) << XFS_INO_OFFSET_BITS(mp)))
#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL))
#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL))
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index a8f6db735d5d..d32152fc8a6c 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -288,7 +288,7 @@ xfs_ialloc_inode_init(
{
struct xfs_buf *fbuf;
struct xfs_dinode *free;
- int nbufs, blks_per_cluster, inodes_per_cluster;
+ int nbufs;
int version;
int i, j;
xfs_daddr_t d;
@@ -299,9 +299,7 @@ xfs_ialloc_inode_init(
* sizes, manipulate the inodes in buffers which are multiples of the
* blocks size.
*/
- blks_per_cluster = xfs_icluster_size_fsb(mp);
- inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
- nbufs = length / blks_per_cluster;
+ nbufs = length / mp->m_blocks_per_cluster;
/*
* Figure out what version number to use in the inodes we create. If
@@ -312,7 +310,7 @@ xfs_ialloc_inode_init(
*
* For v3 inodes, we also need to write the inode number into the inode,
* so calculate the first inode number of the chunk here as
- * XFS_OFFBNO_TO_AGINO() only works within a filesystem block, not
+ * XFS_AGB_TO_AGINO() only works within a filesystem block, not
* across multiple filesystem blocks (such as a cluster) and so cannot
* be used in the cluster buffer loop below.
*
@@ -324,8 +322,7 @@ xfs_ialloc_inode_init(
*/
if (xfs_sb_version_hascrc(&mp->m_sb)) {
version = 3;
- ino = XFS_AGINO_TO_INO(mp, agno,
- XFS_OFFBNO_TO_AGINO(mp, agbno, 0));
+ ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
/*
* log the initialisation that is about to take place as an
@@ -345,9 +342,10 @@ xfs_ialloc_inode_init(
/*
* Get the block.
*/
- d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
+ d = XFS_AGB_TO_DADDR(mp, agno, agbno +
+ (j * mp->m_blocks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
- mp->m_bsize * blks_per_cluster,
+ mp->m_bsize * mp->m_blocks_per_cluster,
XBF_UNMAPPED);
if (!fbuf)
return -ENOMEM;
@@ -355,7 +353,7 @@ xfs_ialloc_inode_init(
/* Initialize the inode buffers and log them appropriately. */
fbuf->b_ops = &xfs_inode_buf_ops;
xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
- for (i = 0; i < inodes_per_cluster; i++) {
+ for (i = 0; i < mp->m_inodes_per_cluster; i++) {
int ioffset = i << mp->m_sb.sb_inodelog;
uint isize = xfs_dinode_size(version);
@@ -445,7 +443,7 @@ xfs_align_sparse_ino(
return;
/* calculate the inode offset and align startino */
- offset = mod << mp->m_sb.sb_inopblog;
+ offset = XFS_AGB_TO_AGINO(mp, mod);
*startino -= offset;
/*
@@ -641,7 +639,7 @@ xfs_ialloc_ag_alloc(
args.tp = tp;
args.mp = tp->t_mountp;
args.fsbno = NULLFSBLOCK;
- xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INODES);
+ args.oinfo = XFS_RMAP_OINFO_INODES;
#ifdef DEBUG
/* randomly do sparse inode allocations */
@@ -692,7 +690,7 @@ xfs_ialloc_ag_alloc(
* but not to use them in the actual exact allocation.
*/
args.alignment = 1;
- args.minalignslop = xfs_ialloc_cluster_alignment(args.mp) - 1;
+ args.minalignslop = args.mp->m_cluster_align - 1;
/* Allow space for the inode btree to split. */
args.minleft = args.mp->m_in_maxlevels - 1;
@@ -727,7 +725,7 @@ xfs_ialloc_ag_alloc(
args.alignment = args.mp->m_dalign;
isaligned = 1;
} else
- args.alignment = xfs_ialloc_cluster_alignment(args.mp);
+ args.alignment = args.mp->m_cluster_align;
/*
* Need to figure out where to allocate the inode blocks.
* Ideally they should be spaced out through the a.g.
@@ -756,7 +754,7 @@ xfs_ialloc_ag_alloc(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.agbno = be32_to_cpu(agi->agi_root);
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
- args.alignment = xfs_ialloc_cluster_alignment(args.mp);
+ args.alignment = args.mp->m_cluster_align;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
@@ -797,7 +795,7 @@ sparse_alloc:
if (error)
return error;
- newlen = args.len << args.mp->m_sb.sb_inopblog;
+ newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
ASSERT(newlen <= XFS_INODES_PER_CHUNK);
allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
}
@@ -825,7 +823,7 @@ sparse_alloc:
/*
* Convert the results.
*/
- newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
+ newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
if (xfs_inobt_issparse(~allocmask)) {
/*
@@ -1019,7 +1017,7 @@ xfs_ialloc_ag_select(
*/
ineed = mp->m_ialloc_min_blks;
if (flags && ineed > 1)
- ineed += xfs_ialloc_cluster_alignment(mp);
+ ineed += mp->m_cluster_align;
longest = pag->pagf_longest;
if (!longest)
longest = pag->pagf_flcount > 0;
@@ -1849,14 +1847,12 @@ xfs_difree_inode_chunk(
int nextbit;
xfs_agblock_t agbno;
int contigblk;
- struct xfs_owner_info oinfo;
DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
- mp->m_ialloc_blks, &oinfo);
+ mp->m_ialloc_blks, &XFS_RMAP_OINFO_INODES);
return;
}
@@ -1900,7 +1896,7 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
- contigblk, &oinfo);
+ contigblk, &XFS_RMAP_OINFO_INODES);
/* reset range to current bit and carry on... */
startidx = endidx = nextbit;
@@ -2292,7 +2288,6 @@ xfs_imap(
xfs_agblock_t agbno; /* block number of inode in the alloc group */
xfs_agino_t agino; /* inode number within alloc group */
xfs_agnumber_t agno; /* allocation group number */
- int blks_per_cluster; /* num blocks per inode cluster */
xfs_agblock_t chunk_agbno; /* first block in inode chunk */
xfs_agblock_t cluster_agbno; /* first block in inode cluster */
int error; /* error code */
@@ -2338,8 +2333,6 @@ xfs_imap(
return -EINVAL;
}
- blks_per_cluster = xfs_icluster_size_fsb(mp);
-
/*
* For bulkstat and handle lookups, we have an untrusted inode number
* that we have to verify is valid. We cannot do this just by reading
@@ -2359,7 +2352,7 @@ xfs_imap(
* If the inode cluster size is the same as the blocksize or
* smaller we get to the buffer by simple arithmetics.
*/
- if (blks_per_cluster == 1) {
+ if (mp->m_blocks_per_cluster == 1) {
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
@@ -2388,12 +2381,13 @@ xfs_imap(
out_map:
ASSERT(agbno >= chunk_agbno);
cluster_agbno = chunk_agbno +
- ((offset_agbno / blks_per_cluster) * blks_per_cluster);
+ ((offset_agbno / mp->m_blocks_per_cluster) *
+ mp->m_blocks_per_cluster);
offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
XFS_INO_TO_OFFSET(mp, ino);
imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
- imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
+ imap->im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
/*
@@ -2726,8 +2720,8 @@ xfs_ialloc_has_inodes_at_extent(
xfs_agino_t low;
xfs_agino_t high;
- low = XFS_OFFBNO_TO_AGINO(cur->bc_mp, bno, 0);
- high = XFS_OFFBNO_TO_AGINO(cur->bc_mp, bno + len, 0) - 1;
+ low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
+ high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
return xfs_ialloc_has_inode_record(cur, low, high, exists);
}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 7fbf8af0b159..9b25e7a0df47 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -84,7 +84,7 @@ __xfs_inobt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
- xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
+ args.oinfo = XFS_RMAP_OINFO_INOBT;
args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
args.minlen = 1;
args.maxlen = 1;
@@ -136,12 +136,9 @@ __xfs_inobt_free_block(
struct xfs_buf *bp,
enum xfs_ag_resv_type resv)
{
- struct xfs_owner_info oinfo;
-
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
return xfs_free_extent(cur->bc_tp,
XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
- &oinfo, resv);
+ &XFS_RMAP_OINFO_INOBT, resv);
}
STATIC int
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 1aaa01c97517..d9eab657b63e 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -70,7 +70,7 @@ xfs_refcountbt_alloc_block(
args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
xfs_refc_block(args.mp));
- xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
+ args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1;
args.resv = XFS_AG_RESV_METADATA;
@@ -106,15 +106,13 @@ xfs_refcountbt_free_block(
struct xfs_buf *agbp = cur->bc_private.a.agbp;
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
- struct xfs_owner_info oinfo;
int error;
trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
- error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
+ error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC,
XFS_AG_RESV_METADATA);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
index 245af452840e..8ed885507dd8 100644
--- a/fs/xfs/libxfs/xfs_rmap.c
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -458,21 +458,21 @@ out:
*/
STATIC int
xfs_rmap_unmap(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- bool unwritten,
- struct xfs_owner_info *oinfo)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = cur->bc_mp;
- struct xfs_rmap_irec ltrec;
- uint64_t ltoff;
- int error = 0;
- int i;
- uint64_t owner;
- uint64_t offset;
- unsigned int flags;
- bool ignore_off;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ uint64_t ltoff;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ bool ignore_off;
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
@@ -653,16 +653,16 @@ out_error:
*/
int
xfs_rmap_free(
- struct xfs_trans *tp,
- struct xfs_buf *agbp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_btree_cur *cur;
- int error;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ int error;
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
@@ -710,23 +710,23 @@ xfs_rmap_is_mergeable(
*/
STATIC int
xfs_rmap_map(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- bool unwritten,
- struct xfs_owner_info *oinfo)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = cur->bc_mp;
- struct xfs_rmap_irec ltrec;
- struct xfs_rmap_irec gtrec;
- int have_gt;
- int have_lt;
- int error = 0;
- int i;
- uint64_t owner;
- uint64_t offset;
- unsigned int flags = 0;
- bool ignore_off;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ struct xfs_rmap_irec gtrec;
+ int have_gt;
+ int have_lt;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags = 0;
+ bool ignore_off;
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
ASSERT(owner != 0);
@@ -890,16 +890,16 @@ out_error:
*/
int
xfs_rmap_alloc(
- struct xfs_trans *tp,
- struct xfs_buf *agbp,
- xfs_agnumber_t agno,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_btree_cur *cur;
- int error;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ int error;
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
return 0;
@@ -929,16 +929,16 @@ xfs_rmap_alloc(
*/
STATIC int
xfs_rmap_convert(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- bool unwritten,
- struct xfs_owner_info *oinfo)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = cur->bc_mp;
- struct xfs_rmap_irec r[4]; /* neighbor extent entries */
- /* left is 0, right is 1, prev is 2 */
- /* new is 3 */
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec r[4]; /* neighbor extent entries */
+ /* left is 0, right is 1, */
+ /* prev is 2, new is 3 */
uint64_t owner;
uint64_t offset;
uint64_t new_endoff;
@@ -1354,16 +1354,16 @@ done:
*/
STATIC int
xfs_rmap_convert_shared(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- bool unwritten,
- struct xfs_owner_info *oinfo)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = cur->bc_mp;
- struct xfs_rmap_irec r[4]; /* neighbor extent entries */
- /* left is 0, right is 1, prev is 2 */
- /* new is 3 */
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec r[4]; /* neighbor extent entries */
+ /* left is 0, right is 1, */
+ /* prev is 2, new is 3 */
uint64_t owner;
uint64_t offset;
uint64_t new_endoff;
@@ -1743,20 +1743,20 @@ done:
*/
STATIC int
xfs_rmap_unmap_shared(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- bool unwritten,
- struct xfs_owner_info *oinfo)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = cur->bc_mp;
- struct xfs_rmap_irec ltrec;
- uint64_t ltoff;
- int error = 0;
- int i;
- uint64_t owner;
- uint64_t offset;
- unsigned int flags;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ uint64_t ltoff;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
@@ -1905,22 +1905,22 @@ out_error:
*/
STATIC int
xfs_rmap_map_shared(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- bool unwritten,
- struct xfs_owner_info *oinfo)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ const struct xfs_owner_info *oinfo)
{
- struct xfs_mount *mp = cur->bc_mp;
- struct xfs_rmap_irec ltrec;
- struct xfs_rmap_irec gtrec;
- int have_gt;
- int have_lt;
- int error = 0;
- int i;
- uint64_t owner;
- uint64_t offset;
- unsigned int flags = 0;
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ struct xfs_rmap_irec gtrec;
+ int have_gt;
+ int have_lt;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags = 0;
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
if (unwritten)
@@ -2459,18 +2459,18 @@ xfs_rmap_has_record(
*/
int
xfs_rmap_record_exists(
- struct xfs_btree_cur *cur,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
- bool *has_rmap)
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo,
+ bool *has_rmap)
{
- uint64_t owner;
- uint64_t offset;
- unsigned int flags;
- int has_record;
- struct xfs_rmap_irec irec;
- int error;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ int has_record;
+ struct xfs_rmap_irec irec;
+ int error;
xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
ASSERT(XFS_RMAP_NON_INODE_OWNER(owner) ||
@@ -2530,7 +2530,7 @@ xfs_rmap_has_other_keys(
struct xfs_btree_cur *cur,
xfs_agblock_t bno,
xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
+ const struct xfs_owner_info *oinfo,
bool *has_rmap)
{
struct xfs_rmap_irec low = {0};
@@ -2550,3 +2550,31 @@ xfs_rmap_has_other_keys(
*has_rmap = rks.has_rmap;
return error;
}
+
+const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
+ .oi_owner = XFS_RMAP_OWN_NULL,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_ANY_OWNER = {
+ .oi_owner = XFS_RMAP_OWN_UNKNOWN,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_FS = {
+ .oi_owner = XFS_RMAP_OWN_FS,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_LOG = {
+ .oi_owner = XFS_RMAP_OWN_LOG,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_AG = {
+ .oi_owner = XFS_RMAP_OWN_AG,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_INOBT = {
+ .oi_owner = XFS_RMAP_OWN_INOBT,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_INODES = {
+ .oi_owner = XFS_RMAP_OWN_INODES,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
+ .oi_owner = XFS_RMAP_OWN_REFC,
+};
+const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
+ .oi_owner = XFS_RMAP_OWN_COW,
+};
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
index 157dc722ad35..e21ed0294e5c 100644
--- a/fs/xfs/libxfs/xfs_rmap.h
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -7,16 +7,6 @@
#define __XFS_RMAP_H__
static inline void
-xfs_rmap_ag_owner(
- struct xfs_owner_info *oi,
- uint64_t owner)
-{
- oi->oi_owner = owner;
- oi->oi_offset = 0;
- oi->oi_flags = 0;
-}
-
-static inline void
xfs_rmap_ino_bmbt_owner(
struct xfs_owner_info *oi,
xfs_ino_t ino,
@@ -43,27 +33,13 @@ xfs_rmap_ino_owner(
oi->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
}
-static inline void
-xfs_rmap_skip_owner_update(
- struct xfs_owner_info *oi)
-{
- xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_NULL);
-}
-
static inline bool
xfs_rmap_should_skip_owner_update(
- struct xfs_owner_info *oi)
+ const struct xfs_owner_info *oi)
{
return oi->oi_owner == XFS_RMAP_OWN_NULL;
}
-static inline void
-xfs_rmap_any_owner_update(
- struct xfs_owner_info *oi)
-{
- xfs_rmap_ag_owner(oi, XFS_RMAP_OWN_UNKNOWN);
-}
-
/* Reverse mapping functions. */
struct xfs_buf;
@@ -103,12 +79,12 @@ xfs_rmap_irec_offset_unpack(
static inline void
xfs_owner_info_unpack(
- struct xfs_owner_info *oinfo,
- uint64_t *owner,
- uint64_t *offset,
- unsigned int *flags)
+ const struct xfs_owner_info *oinfo,
+ uint64_t *owner,
+ uint64_t *offset,
+ unsigned int *flags)
{
- unsigned int r = 0;
+ unsigned int r = 0;
*owner = oinfo->oi_owner;
*offset = oinfo->oi_offset;
@@ -137,10 +113,10 @@ xfs_owner_info_pack(
int xfs_rmap_alloc(struct xfs_trans *tp, struct xfs_buf *agbp,
xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
- struct xfs_owner_info *oinfo);
+ const struct xfs_owner_info *oinfo);
int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
- struct xfs_owner_info *oinfo);
+ const struct xfs_owner_info *oinfo);
int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, uint64_t owner, uint64_t offset,
@@ -218,11 +194,21 @@ int xfs_rmap_btrec_to_irec(union xfs_btree_rec *rec,
int xfs_rmap_has_record(struct xfs_btree_cur *cur, xfs_agblock_t bno,
xfs_extlen_t len, bool *exists);
int xfs_rmap_record_exists(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, struct xfs_owner_info *oinfo,
+ xfs_extlen_t len, const struct xfs_owner_info *oinfo,
bool *has_rmap);
int xfs_rmap_has_other_keys(struct xfs_btree_cur *cur, xfs_agblock_t bno,
- xfs_extlen_t len, struct xfs_owner_info *oinfo,
+ xfs_extlen_t len, const struct xfs_owner_info *oinfo,
bool *has_rmap);
int xfs_rmap_map_raw(struct xfs_btree_cur *cur, struct xfs_rmap_irec *rmap);
+extern const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_ANY_OWNER;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_FS;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_LOG;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_AG;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_INOBT;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_INODES;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_REFC;
+extern const struct xfs_owner_info XFS_RMAP_OINFO_COW;
+
#endif /* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index b228c821bae6..eaaff67e9626 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -505,6 +505,12 @@ xfs_rtmodify_summary_int(
uint first = (uint)((char *)sp - (char *)bp->b_addr);
*sp += delta;
+ if (mp->m_rsum_cache) {
+ if (*sp == 0 && log == mp->m_rsum_cache[bbno])
+ mp->m_rsum_cache[bbno]++;
+ if (*sp != 0 && log < mp->m_rsum_cache[bbno])
+ mp->m_rsum_cache[bbno] = log;
+ }
xfs_trans_log_buf(tp, bp, first, first + sizeof(*sp) - 1);
}
if (sum)
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index 95374ab2dee7..77d80106f989 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -199,7 +199,10 @@ xfs_symlink_local_to_remote(
ifp->if_bytes - 1);
}
-/* Verify the consistency of an inline symlink. */
+/*
+ * Verify the in-memory consistency of an inline symlink data fork. This
+ * does not do on-disk format checks.
+ */
xfs_failaddr_t
xfs_symlink_shortform_verify(
struct xfs_inode *ip)
@@ -215,9 +218,12 @@ xfs_symlink_shortform_verify(
size = ifp->if_bytes;
endp = sfp + size;
- /* Zero length symlinks can exist while we're deleting a remote one. */
- if (size == 0)
- return NULL;
+ /*
+ * Zero length symlinks should never occur in memory as they are
+ * never alllowed to exist on disk.
+ */
+ if (!size)
+ return __this_address;
/* No negative sizes or overly long symlink targets. */
if (size < 0 || size > XFS_SYMLINK_MAXLEN)
diff --git a/fs/xfs/libxfs/xfs_types.c b/fs/xfs/libxfs/xfs_types.c
index 33a5ca346baf..3306fc42cfad 100644
--- a/fs/xfs/libxfs/xfs_types.c
+++ b/fs/xfs/libxfs/xfs_types.c
@@ -87,16 +87,15 @@ xfs_agino_range(
* Calculate the first inode, which will be in the first
* cluster-aligned block after the AGFL.
*/
- bno = round_up(XFS_AGFL_BLOCK(mp) + 1,
- xfs_ialloc_cluster_alignment(mp));
- *first = XFS_OFFBNO_TO_AGINO(mp, bno, 0);
+ bno = round_up(XFS_AGFL_BLOCK(mp) + 1, mp->m_cluster_align);
+ *first = XFS_AGB_TO_AGINO(mp, bno);
/*
* Calculate the last inode, which will be at the end of the
* last (aligned) cluster that can be allocated in the AG.
*/
- bno = round_down(eoag, xfs_ialloc_cluster_alignment(mp));
- *last = XFS_OFFBNO_TO_AGINO(mp, bno, 0) - 1;
+ bno = round_down(eoag, mp->m_cluster_align);
+ *last = XFS_AGB_TO_AGINO(mp, bno) - 1;
}
/*
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index b9e6c89284c3..8f02855a019a 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -100,15 +100,37 @@ typedef void * xfs_failaddr_t;
*/
#define MAXNAMELEN 256
+/*
+ * This enum is used in string mapping in xfs_trace.h; please keep the
+ * TRACE_DEFINE_ENUMs for it up to date.
+ */
typedef enum {
XFS_LOOKUP_EQi, XFS_LOOKUP_LEi, XFS_LOOKUP_GEi
} xfs_lookup_t;
+#define XFS_AG_BTREE_CMP_FORMAT_STR \
+ { XFS_LOOKUP_EQi, "eq" }, \
+ { XFS_LOOKUP_LEi, "le" }, \
+ { XFS_LOOKUP_GEi, "ge" }
+
+/*
+ * This enum is used in string mapping in xfs_trace.h and scrub/trace.h;
+ * please keep the TRACE_DEFINE_ENUMs for it up to date.
+ */
typedef enum {
XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_RMAPi, XFS_BTNUM_BMAPi,
XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_REFCi, XFS_BTNUM_MAX
} xfs_btnum_t;
+#define XFS_BTNUM_STRINGS \
+ { XFS_BTNUM_BNOi, "bnobt" }, \
+ { XFS_BTNUM_CNTi, "cntbt" }, \
+ { XFS_BTNUM_RMAPi, "rmapbt" }, \
+ { XFS_BTNUM_BMAPi, "bmbt" }, \
+ { XFS_BTNUM_INOi, "inobt" }, \
+ { XFS_BTNUM_FINOi, "finobt" }, \
+ { XFS_BTNUM_REFCi, "refcbt" }
+
struct xfs_name {
const unsigned char *name;
int len;
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index 3068a9382feb..90955ab1e895 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -32,7 +32,6 @@ xchk_superblock_xref(
struct xfs_scrub *sc,
struct xfs_buf *bp)
{
- struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
xfs_agnumber_t agno = sc->sm->sm_agno;
xfs_agblock_t agbno;
@@ -49,8 +48,7 @@ xchk_superblock_xref(
xchk_xref_is_used_space(sc, agbno, 1);
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
@@ -484,7 +482,6 @@ STATIC void
xchk_agf_xref(
struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
xfs_agblock_t agbno;
int error;
@@ -502,8 +499,7 @@ xchk_agf_xref(
xchk_agf_xref_freeblks(sc);
xchk_agf_xref_cntbt(sc);
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_agf_xref_btreeblks(sc);
xchk_xref_is_not_shared(sc, agbno, 1);
xchk_agf_xref_refcblks(sc);
@@ -598,7 +594,6 @@ out:
/* AGFL */
struct xchk_agfl_info {
- struct xfs_owner_info oinfo;
unsigned int sz_entries;
unsigned int nr_entries;
xfs_agblock_t *entries;
@@ -609,15 +604,14 @@ struct xchk_agfl_info {
STATIC void
xchk_agfl_block_xref(
struct xfs_scrub *sc,
- xfs_agblock_t agbno,
- struct xfs_owner_info *oinfo)
+ xfs_agblock_t agbno)
{
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
xchk_xref_is_used_space(sc, agbno, 1);
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
- xchk_xref_is_owned_by(sc, agbno, 1, oinfo);
+ xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
xchk_xref_is_not_shared(sc, agbno, 1);
}
@@ -638,7 +632,7 @@ xchk_agfl_block(
else
xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
- xchk_agfl_block_xref(sc, agbno, priv);
+ xchk_agfl_block_xref(sc, agbno);
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return XFS_BTREE_QUERY_RANGE_ABORT;
@@ -662,7 +656,6 @@ STATIC void
xchk_agfl_xref(
struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
xfs_agblock_t agbno;
int error;
@@ -678,8 +671,7 @@ xchk_agfl_xref(
xchk_xref_is_used_space(sc, agbno, 1);
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_xref_is_not_shared(sc, agbno, 1);
/*
@@ -732,7 +724,6 @@ xchk_agfl(
}
/* Check the blocks in the AGFL. */
- xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
sc->sa.agfl_bp, xchk_agfl_block, &sai);
if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
@@ -791,7 +782,6 @@ STATIC void
xchk_agi_xref(
struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
struct xfs_mount *mp = sc->mp;
xfs_agblock_t agbno;
int error;
@@ -808,8 +798,7 @@ xchk_agi_xref(
xchk_xref_is_used_space(sc, agbno, 1);
xchk_xref_is_not_inode_chunk(sc, agbno, 1);
xchk_agi_xref_icounts(sc);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
- xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
xchk_xref_is_not_shared(sc, agbno, 1);
/* scrub teardown will take care of sc->sa for us */
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index f7568a4b5fe5..03d1e15cceba 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -646,7 +646,6 @@ int
xrep_agfl(
struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
struct xfs_bitmap agfl_extents;
struct xfs_mount *mp = sc->mp;
struct xfs_buf *agf_bp;
@@ -708,8 +707,8 @@ xrep_agfl(
goto err;
/* Dump any AGFL overflow. */
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
- return xrep_reap_extents(sc, &agfl_extents, &oinfo, XFS_AG_RESV_AGFL);
+ return xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
+ XFS_AG_RESV_AGFL);
err:
xfs_bitmap_destroy(&agfl_extents);
return error;
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 376bcb585ae6..44883e9112ad 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -125,12 +125,10 @@ xchk_allocbt(
struct xfs_scrub *sc,
xfs_btnum_t which)
{
- struct xfs_owner_info oinfo;
struct xfs_btree_cur *cur;
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
cur = which == XFS_BTNUM_BNO ? sc->sa.bno_cur : sc->sa.cnt_cur;
- return xchk_btree(sc, cur, xchk_allocbt_rec, &oinfo, NULL);
+ return xchk_btree(sc, cur, xchk_allocbt_rec, &XFS_RMAP_OINFO_AG, NULL);
}
int
diff --git a/fs/xfs/scrub/btree.c b/fs/xfs/scrub/btree.c
index 4ae959f7ad2c..6f94d1f7322d 100644
--- a/fs/xfs/scrub/btree.c
+++ b/fs/xfs/scrub/btree.c
@@ -583,31 +583,32 @@ xchk_btree_block_keys(
*/
int
xchk_btree(
- struct xfs_scrub *sc,
- struct xfs_btree_cur *cur,
- xchk_btree_rec_fn scrub_fn,
- struct xfs_owner_info *oinfo,
- void *private)
+ struct xfs_scrub *sc,
+ struct xfs_btree_cur *cur,
+ xchk_btree_rec_fn scrub_fn,
+ const struct xfs_owner_info *oinfo,
+ void *private)
{
- struct xchk_btree bs = { NULL };
- union xfs_btree_ptr ptr;
- union xfs_btree_ptr *pp;
- union xfs_btree_rec *recp;
- struct xfs_btree_block *block;
- int level;
- struct xfs_buf *bp;
- struct check_owner *co;
- struct check_owner *n;
- int i;
- int error = 0;
+ struct xchk_btree bs = {
+ .cur = cur,
+ .scrub_rec = scrub_fn,
+ .oinfo = oinfo,
+ .firstrec = true,
+ .private = private,
+ .sc = sc,
+ };
+ union xfs_btree_ptr ptr;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_rec *recp;
+ struct xfs_btree_block *block;
+ int level;
+ struct xfs_buf *bp;
+ struct check_owner *co;
+ struct check_owner *n;
+ int i;
+ int error = 0;
/* Initialize scrub state */
- bs.cur = cur;
- bs.scrub_rec = scrub_fn;
- bs.oinfo = oinfo;
- bs.firstrec = true;
- bs.private = private;
- bs.sc = sc;
for (i = 0; i < XFS_BTREE_MAXLEVELS; i++)
bs.firstkey[i] = true;
INIT_LIST_HEAD(&bs.to_check);
diff --git a/fs/xfs/scrub/btree.h b/fs/xfs/scrub/btree.h
index aada763cd006..5572e475f8ed 100644
--- a/fs/xfs/scrub/btree.h
+++ b/fs/xfs/scrub/btree.h
@@ -31,21 +31,21 @@ typedef int (*xchk_btree_rec_fn)(
struct xchk_btree {
/* caller-provided scrub state */
- struct xfs_scrub *sc;
- struct xfs_btree_cur *cur;
- xchk_btree_rec_fn scrub_rec;
- struct xfs_owner_info *oinfo;
- void *private;
+ struct xfs_scrub *sc;
+ struct xfs_btree_cur *cur;
+ xchk_btree_rec_fn scrub_rec;
+ const struct xfs_owner_info *oinfo;
+ void *private;
/* internal scrub state */
- union xfs_btree_rec lastrec;
- bool firstrec;
- union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
- bool firstkey[XFS_BTREE_MAXLEVELS];
- struct list_head to_check;
+ union xfs_btree_rec lastrec;
+ bool firstrec;
+ union xfs_btree_key lastkey[XFS_BTREE_MAXLEVELS];
+ bool firstkey[XFS_BTREE_MAXLEVELS];
+ struct list_head to_check;
};
int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
- xchk_btree_rec_fn scrub_fn, struct xfs_owner_info *oinfo,
+ xchk_btree_rec_fn scrub_fn, const struct xfs_owner_info *oinfo,
void *private);
#endif /* __XFS_SCRUB_BTREE_H__ */
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index 346b02abccf7..0c54ff55b901 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -313,8 +313,8 @@ xchk_set_incomplete(
*/
struct xchk_rmap_ownedby_info {
- struct xfs_owner_info *oinfo;
- xfs_filblks_t *blocks;
+ const struct xfs_owner_info *oinfo;
+ xfs_filblks_t *blocks;
};
STATIC int
@@ -347,15 +347,15 @@ int
xchk_count_rmap_ownedby_ag(
struct xfs_scrub *sc,
struct xfs_btree_cur *cur,
- struct xfs_owner_info *oinfo,
+ const struct xfs_owner_info *oinfo,
xfs_filblks_t *blocks)
{
- struct xchk_rmap_ownedby_info sroi;
+ struct xchk_rmap_ownedby_info sroi = {
+ .oinfo = oinfo,
+ .blocks = blocks,
+ };
- sroi.oinfo = oinfo;
*blocks = 0;
- sroi.blocks = blocks;
-
return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
&sroi);
}
diff --git a/fs/xfs/scrub/common.h b/fs/xfs/scrub/common.h
index 2d4324d12f9a..e26a430bd466 100644
--- a/fs/xfs/scrub/common.h
+++ b/fs/xfs/scrub/common.h
@@ -116,7 +116,7 @@ int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
void xchk_ag_btcur_free(struct xchk_ag *sa);
int xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
- struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
+ const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
int xchk_setup_ag_btree(struct xfs_scrub *sc, struct xfs_inode *ip,
bool force_log);
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 224dba937492..882dc56c5c21 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -44,6 +44,11 @@ xchk_setup_ag_iallocbt(
/* Inode btree scrubber. */
+struct xchk_iallocbt {
+ /* Number of inodes we see while scanning inobt. */
+ unsigned long long inodes;
+};
+
/*
* If we're checking the finobt, cross-reference with the inobt.
* Otherwise we're checking the inobt; if there is an finobt, make sure
@@ -82,15 +87,12 @@ xchk_iallocbt_chunk_xref(
xfs_agblock_t agbno,
xfs_extlen_t len)
{
- struct xfs_owner_info oinfo;
-
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return;
xchk_xref_is_used_space(sc, agbno, len);
xchk_iallocbt_chunk_xref_other(sc, irec, agino);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- xchk_xref_is_owned_by(sc, agbno, len, &oinfo);
+ xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
xchk_xref_is_not_shared(sc, agbno, len);
}
@@ -186,7 +188,6 @@ xchk_iallocbt_check_freemask(
struct xchk_btree *bs,
struct xfs_inobt_rec_incore *irec)
{
- struct xfs_owner_info oinfo;
struct xfs_imap imap;
struct xfs_mount *mp = bs->cur->bc_mp;
struct xfs_dinode *dip;
@@ -197,19 +198,16 @@ xchk_iallocbt_check_freemask(
xfs_agino_t chunkino;
xfs_agino_t clusterino;
xfs_agblock_t agbno;
- int blks_per_cluster;
uint16_t holemask;
uint16_t ir_holemask;
int error = 0;
/* Make sure the freemask matches the inode records. */
- blks_per_cluster = xfs_icluster_size_fsb(mp);
- nr_inodes = XFS_OFFBNO_TO_AGINO(mp, blks_per_cluster, 0);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
+ nr_inodes = mp->m_inodes_per_cluster;
for (agino = irec->ir_startino;
agino < irec->ir_startino + XFS_INODES_PER_CHUNK;
- agino += blks_per_cluster * mp->m_sb.sb_inopblock) {
+ agino += mp->m_inodes_per_cluster) {
fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
chunkino = agino - irec->ir_startino;
agbno = XFS_AGINO_TO_AGBNO(mp, agino);
@@ -230,17 +228,18 @@ xchk_iallocbt_check_freemask(
/* If any part of this is a hole, skip it. */
if (ir_holemask) {
xchk_xref_is_not_owned_by(bs->sc, agbno,
- blks_per_cluster, &oinfo);
+ mp->m_blocks_per_cluster,
+ &XFS_RMAP_OINFO_INODES);
continue;
}
- xchk_xref_is_owned_by(bs->sc, agbno, blks_per_cluster,
- &oinfo);
+ xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
+ &XFS_RMAP_OINFO_INODES);
/* Grab the inode cluster buffer. */
imap.im_blkno = XFS_AGB_TO_DADDR(mp, bs->cur->bc_private.a.agno,
agbno);
- imap.im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
+ imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
imap.im_boffset = 0;
error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap,
@@ -272,7 +271,7 @@ xchk_iallocbt_rec(
union xfs_btree_rec *rec)
{
struct xfs_mount *mp = bs->cur->bc_mp;
- xfs_filblks_t *inode_blocks = bs->private;
+ struct xchk_iallocbt *iabt = bs->private;
struct xfs_inobt_rec_incore irec;
uint64_t holes;
xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
@@ -306,12 +305,11 @@ xchk_iallocbt_rec(
/* Make sure this record is aligned to cluster and inoalignmnt size. */
agbno = XFS_AGINO_TO_AGBNO(mp, irec.ir_startino);
- if ((agbno & (xfs_ialloc_cluster_alignment(mp) - 1)) ||
- (agbno & (xfs_icluster_size_fsb(mp) - 1)))
+ if ((agbno & (mp->m_cluster_align - 1)) ||
+ (agbno & (mp->m_blocks_per_cluster - 1)))
xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
- *inode_blocks += XFS_B_TO_FSB(mp,
- irec.ir_count * mp->m_sb.sb_inodesize);
+ iabt->inodes += irec.ir_count;
/* Handle non-sparse inodes */
if (!xfs_inobt_issparse(irec.ir_holemask)) {
@@ -366,7 +364,6 @@ xchk_iallocbt_xref_rmap_btreeblks(
struct xfs_scrub *sc,
int which)
{
- struct xfs_owner_info oinfo;
xfs_filblks_t blocks;
xfs_extlen_t inobt_blocks = 0;
xfs_extlen_t finobt_blocks = 0;
@@ -388,9 +385,8 @@ xchk_iallocbt_xref_rmap_btreeblks(
return;
}
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
- error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
- &blocks);
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
+ &XFS_RMAP_OINFO_INOBT, &blocks);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != inobt_blocks + finobt_blocks)
@@ -405,21 +401,21 @@ STATIC void
xchk_iallocbt_xref_rmap_inodes(
struct xfs_scrub *sc,
int which,
- xfs_filblks_t inode_blocks)
+ unsigned long long inodes)
{
- struct xfs_owner_info oinfo;
xfs_filblks_t blocks;
+ xfs_filblks_t inode_blocks;
int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
/* Check that we saw as many inode blocks as the rmap knows about. */
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, &oinfo,
- &blocks);
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
+ &XFS_RMAP_OINFO_INODES, &blocks);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
+ inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
if (blocks != inode_blocks)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
}
@@ -431,14 +427,14 @@ xchk_iallocbt(
xfs_btnum_t which)
{
struct xfs_btree_cur *cur;
- struct xfs_owner_info oinfo;
- xfs_filblks_t inode_blocks = 0;
+ struct xchk_iallocbt iabt = {
+ .inodes = 0,
+ };
int error;
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
- error = xchk_btree(sc, cur, xchk_iallocbt_rec, &oinfo,
- &inode_blocks);
+ error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
+ &iabt);
if (error)
return error;
@@ -452,7 +448,7 @@ xchk_iallocbt(
* to inode chunks with free inodes.
*/
if (which == XFS_BTNUM_INO)
- xchk_iallocbt_xref_rmap_inodes(sc, which, inode_blocks);
+ xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
return error;
}
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index e386c9b0b4ab..e213efc194a1 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -509,7 +509,6 @@ xchk_inode_xref(
xfs_ino_t ino,
struct xfs_dinode *dip)
{
- struct xfs_owner_info oinfo;
xfs_agnumber_t agno;
xfs_agblock_t agbno;
int error;
@@ -526,8 +525,7 @@ xchk_inode_xref(
xchk_xref_is_used_space(sc, agbno, 1);
xchk_inode_xref_finobt(sc, ino);
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
- xchk_xref_is_owned_by(sc, agbno, 1, &oinfo);
+ xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_INODES);
xchk_xref_is_not_shared(sc, agbno, 1);
xchk_inode_xref_bmap(sc, dip);
diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
index e8c82b026083..708b4158eb90 100644
--- a/fs/xfs/scrub/refcount.c
+++ b/fs/xfs/scrub/refcount.c
@@ -383,7 +383,6 @@ xchk_refcountbt_rec(
STATIC void
xchk_refcount_xref_rmap(
struct xfs_scrub *sc,
- struct xfs_owner_info *oinfo,
xfs_filblks_t cow_blocks)
{
xfs_extlen_t refcbt_blocks = 0;
@@ -397,17 +396,16 @@ xchk_refcount_xref_rmap(
error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
if (!xchk_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
return;
- error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
- &blocks);
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
+ &XFS_RMAP_OINFO_REFC, &blocks);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != refcbt_blocks)
xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
/* Check that we saw as many cow blocks as the rmap knows about. */
- xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
- error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
- &blocks);
+ error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
+ &XFS_RMAP_OINFO_COW, &blocks);
if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
return;
if (blocks != cow_blocks)
@@ -419,17 +417,15 @@ int
xchk_refcountbt(
struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
xfs_agblock_t cow_blocks = 0;
int error;
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
error = xchk_btree(sc, sc->sa.refc_cur, xchk_refcountbt_rec,
- &oinfo, &cow_blocks);
+ &XFS_RMAP_OINFO_REFC, &cow_blocks);
if (error)
return error;
- xchk_refcount_xref_rmap(sc, &oinfo, cow_blocks);
+ xchk_refcount_xref_rmap(sc, cow_blocks);
return 0;
}
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index 4fc0a5ea7673..1c8eecfe52b8 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -299,14 +299,14 @@ xrep_calc_ag_resblks(
/* Allocate a block in an AG. */
int
xrep_alloc_ag_block(
- struct xfs_scrub *sc,
- struct xfs_owner_info *oinfo,
- xfs_fsblock_t *fsbno,
- enum xfs_ag_resv_type resv)
+ struct xfs_scrub *sc,
+ const struct xfs_owner_info *oinfo,
+ xfs_fsblock_t *fsbno,
+ enum xfs_ag_resv_type resv)
{
- struct xfs_alloc_arg args = {0};
- xfs_agblock_t bno;
- int error;
+ struct xfs_alloc_arg args = {0};
+ xfs_agblock_t bno;
+ int error;
switch (resv) {
case XFS_AG_RESV_AGFL:
@@ -505,7 +505,6 @@ xrep_put_freelist(
struct xfs_scrub *sc,
xfs_agblock_t agbno)
{
- struct xfs_owner_info oinfo;
int error;
/* Make sure there's space on the freelist. */
@@ -518,9 +517,8 @@ xrep_put_freelist(
* create an rmap for the block prior to merging it or else other
* parts will break.
*/
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.agno, agbno, 1,
- &oinfo);
+ &XFS_RMAP_OINFO_AG);
if (error)
return error;
@@ -538,17 +536,17 @@ xrep_put_freelist(
/* Dispose of a single block. */
STATIC int
xrep_reap_block(
- struct xfs_scrub *sc,
- xfs_fsblock_t fsbno,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type resv)
+ struct xfs_scrub *sc,
+ xfs_fsblock_t fsbno,
+ const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type resv)
{
- struct xfs_btree_cur *cur;
- struct xfs_buf *agf_bp = NULL;
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- bool has_other_rmap;
- int error;
+ struct xfs_btree_cur *cur;
+ struct xfs_buf *agf_bp = NULL;
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ bool has_other_rmap;
+ int error;
agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
@@ -612,15 +610,15 @@ out_free:
/* Dispose of every block of every extent in the bitmap. */
int
xrep_reap_extents(
- struct xfs_scrub *sc,
- struct xfs_bitmap *bitmap,
- struct xfs_owner_info *oinfo,
- enum xfs_ag_resv_type type)
+ struct xfs_scrub *sc,
+ struct xfs_bitmap *bitmap,
+ const struct xfs_owner_info *oinfo,
+ enum xfs_ag_resv_type type)
{
- struct xfs_bitmap_range *bmr;
- struct xfs_bitmap_range *n;
- xfs_fsblock_t fsbno;
- int error = 0;
+ struct xfs_bitmap_range *bmr;
+ struct xfs_bitmap_range *n;
+ xfs_fsblock_t fsbno;
+ int error = 0;
ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index 9de321eee4ab..f2fc18bb7605 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -21,8 +21,9 @@ int xrep_roll_ag_trans(struct xfs_scrub *sc);
bool xrep_ag_has_space(struct xfs_perag *pag, xfs_extlen_t nr_blocks,
enum xfs_ag_resv_type type);
xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc);
-int xrep_alloc_ag_block(struct xfs_scrub *sc, struct xfs_owner_info *oinfo,
- xfs_fsblock_t *fsbno, enum xfs_ag_resv_type resv);
+int xrep_alloc_ag_block(struct xfs_scrub *sc,
+ const struct xfs_owner_info *oinfo, xfs_fsblock_t *fsbno,
+ enum xfs_ag_resv_type resv);
int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
struct xfs_buf **bpp, xfs_btnum_t btnum,
const struct xfs_buf_ops *ops);
@@ -32,7 +33,7 @@ struct xfs_bitmap;
int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xfs_bitmap *btlist);
int xrep_reap_extents(struct xfs_scrub *sc, struct xfs_bitmap *exlist,
- struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
+ const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type);
struct xrep_find_ag_btree {
/* in: rmap owner of the btree we're looking for */
diff --git a/fs/xfs/scrub/rmap.c b/fs/xfs/scrub/rmap.c
index 5e293c129813..92a140c5b55e 100644
--- a/fs/xfs/scrub/rmap.c
+++ b/fs/xfs/scrub/rmap.c
@@ -174,24 +174,21 @@ int
xchk_rmapbt(
struct xfs_scrub *sc)
{
- struct xfs_owner_info oinfo;
-
- xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_AG);
return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
- &oinfo, NULL);
+ &XFS_RMAP_OINFO_AG, NULL);
}
/* xref check that the extent is owned by a given owner */
static inline void
xchk_xref_check_owner(
- struct xfs_scrub *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo,
- bool should_have_rmap)
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo,
+ bool should_have_rmap)
{
- bool has_rmap;
- int error;
+ bool has_rmap;
+ int error;
if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
return;
@@ -207,10 +204,10 @@ xchk_xref_check_owner(
/* xref check that the extent is owned by a given owner */
void
xchk_xref_is_owned_by(
- struct xfs_scrub *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo)
{
xchk_xref_check_owner(sc, bno, len, oinfo, true);
}
@@ -218,10 +215,10 @@ xchk_xref_is_owned_by(
/* xref check that the extent is not owned by a given owner */
void
xchk_xref_is_not_owned_by(
- struct xfs_scrub *sc,
- xfs_agblock_t bno,
- xfs_extlen_t len,
- struct xfs_owner_info *oinfo)
+ struct xfs_scrub *sc,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ const struct xfs_owner_info *oinfo)
{
xchk_xref_check_owner(sc, bno, len, oinfo, false);
}
diff --git a/fs/xfs/scrub/scrub.h b/fs/xfs/scrub/scrub.h
index af323b229c4b..22f754fba8e5 100644
--- a/fs/xfs/scrub/scrub.h
+++ b/fs/xfs/scrub/scrub.h
@@ -122,9 +122,9 @@ void xchk_xref_is_not_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
void xchk_xref_is_inode_chunk(struct xfs_scrub *sc, xfs_agblock_t agbno,
xfs_extlen_t len);
void xchk_xref_is_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
- xfs_extlen_t len, struct xfs_owner_info *oinfo);
+ xfs_extlen_t len, const struct xfs_owner_info *oinfo);
void xchk_xref_is_not_owned_by(struct xfs_scrub *sc, xfs_agblock_t agbno,
- xfs_extlen_t len, struct xfs_owner_info *oinfo);
+ xfs_extlen_t len, const struct xfs_owner_info *oinfo);
void xchk_xref_has_no_owner(struct xfs_scrub *sc, xfs_agblock_t agbno,
xfs_extlen_t len);
void xchk_xref_is_cow_staging(struct xfs_scrub *sc, xfs_agblock_t bno,
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 4e20f0e48232..8344b14031ef 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -12,6 +12,71 @@
#include <linux/tracepoint.h>
#include "xfs_bit.h"
+/*
+ * ftrace's __print_symbolic requires that all enum values be wrapped in the
+ * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
+ * ring buffer. Somehow this was only worth mentioning in the ftrace sample
+ * code.
+ */
+TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_INOi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi);
+
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PROBE);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_SB);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGF);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGFL);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_AGI);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BNOBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_CNTBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_INOBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FINOBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RMAPBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_REFCNTBT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_INODE);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BMBTD);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BMBTA);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_BMBTC);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_DIR);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_XATTR);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_SYMLINK);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PARENT);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTBITMAP);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_RTSUM);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_UQUOTA);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_GQUOTA);
+TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PQUOTA);
+
+#define XFS_SCRUB_TYPE_STRINGS \
+ { XFS_SCRUB_TYPE_PROBE, "probe" }, \
+ { XFS_SCRUB_TYPE_SB, "sb" }, \
+ { XFS_SCRUB_TYPE_AGF, "agf" }, \
+ { XFS_SCRUB_TYPE_AGFL, "agfl" }, \
+ { XFS_SCRUB_TYPE_AGI, "agi" }, \
+ { XFS_SCRUB_TYPE_BNOBT, "bnobt" }, \
+ { XFS_SCRUB_TYPE_CNTBT, "cntbt" }, \
+ { XFS_SCRUB_TYPE_INOBT, "inobt" }, \
+ { XFS_SCRUB_TYPE_FINOBT, "finobt" }, \
+ { XFS_SCRUB_TYPE_RMAPBT, "rmapbt" }, \
+ { XFS_SCRUB_TYPE_REFCNTBT, "refcountbt" }, \
+ { XFS_SCRUB_TYPE_INODE, "inode" }, \
+ { XFS_SCRUB_TYPE_BMBTD, "bmapbtd" }, \
+ { XFS_SCRUB_TYPE_BMBTA, "bmapbta" }, \
+ { XFS_SCRUB_TYPE_BMBTC, "bmapbtc" }, \
+ { XFS_SCRUB_TYPE_DIR, "directory" }, \
+ { XFS_SCRUB_TYPE_XATTR, "xattr" }, \
+ { XFS_SCRUB_TYPE_SYMLINK, "symlink" }, \
+ { XFS_SCRUB_TYPE_PARENT, "parent" }, \
+ { XFS_SCRUB_TYPE_RTBITMAP, "rtbitmap" }, \
+ { XFS_SCRUB_TYPE_RTSUM, "rtsummary" }, \
+ { XFS_SCRUB_TYPE_UQUOTA, "usrquota" }, \
+ { XFS_SCRUB_TYPE_GQUOTA, "grpquota" }, \
+ { XFS_SCRUB_TYPE_PQUOTA, "prjquota" }
+
DECLARE_EVENT_CLASS(xchk_class,
TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm,
int error),
@@ -36,10 +101,10 @@ DECLARE_EVENT_CLASS(xchk_class,
__entry->flags = sm->sm_flags;
__entry->error = error;
),
- TP_printk("dev %d:%d ino 0x%llx type %u agno %u inum %llu gen %u flags 0x%x error %d",
+ TP_printk("dev %d:%d ino 0x%llx type %s agno %u inum %llu gen %u flags 0x%x error %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->agno,
__entry->inum,
__entry->gen,
@@ -78,9 +143,9 @@ TRACE_EVENT(xchk_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u agno %u agbno %u error %d ret_ip %pS",
+ TP_printk("dev %d:%d type %s agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->agno,
__entry->bno,
__entry->error,
@@ -109,11 +174,11 @@ TRACE_EVENT(xchk_file_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino 0x%llx fork %d type %u offset %llu error %d ret_ip %pS",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %s offset %llu error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->offset,
__entry->error,
__entry->ret_ip)
@@ -144,9 +209,9 @@ DECLARE_EVENT_CLASS(xchk_block_error_class,
__entry->bno = bno;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u agno %u agbno %u ret_ip %pS",
+ TP_printk("dev %d:%d type %s agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->agno,
__entry->bno,
__entry->ret_ip)
@@ -176,10 +241,10 @@ DECLARE_EVENT_CLASS(xchk_ino_error_class,
__entry->type = sc->sm->sm_type;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino 0x%llx type %u ret_ip %pS",
+ TP_printk("dev %d:%d ino 0x%llx type %s ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->ret_ip)
)
@@ -213,11 +278,11 @@ DECLARE_EVENT_CLASS(xchk_fblock_error_class,
__entry->offset = offset;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino 0x%llx fork %d type %u offset %llu ret_ip %pS",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %s offset %llu ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->offset,
__entry->ret_ip)
);
@@ -244,9 +309,9 @@ TRACE_EVENT(xchk_incomplete,
__entry->type = sc->sm->sm_type;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u ret_ip %pS",
+ TP_printk("dev %d:%d type %s ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->ret_ip)
);
@@ -278,10 +343,10 @@ TRACE_EVENT(xchk_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
+ TP_printk("dev %d:%d type %s btree %s level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
- __entry->btnum,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->level,
__entry->ptr,
__entry->agno,
@@ -321,12 +386,12 @@ TRACE_EVENT(xchk_ifork_btree_op_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino 0x%llx fork %d type %u btnum %d level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %s btree %s level %d ptr %d agno %u agbno %u error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
- __entry->type,
- __entry->btnum,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->level,
__entry->ptr,
__entry->agno,
@@ -360,10 +425,10 @@ TRACE_EVENT(xchk_btree_error,
__entry->ptr = cur->bc_ptrs[level];
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pS",
+ TP_printk("dev %d:%d type %s btree %s level %d ptr %d agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
- __entry->btnum,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->level,
__entry->ptr,
__entry->agno,
@@ -400,12 +465,12 @@ TRACE_EVENT(xchk_ifork_btree_error,
__entry->ptr = cur->bc_ptrs[level];
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d ino 0x%llx fork %d type %u btnum %d level %d ptr %d agno %u agbno %u ret_ip %pS",
+ TP_printk("dev %d:%d ino 0x%llx fork %d type %s btree %s level %d ptr %d agno %u agbno %u ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->whichfork,
- __entry->type,
- __entry->btnum,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->level,
__entry->ptr,
__entry->agno,
@@ -439,10 +504,10 @@ DECLARE_EVENT_CLASS(xchk_sbtree_class,
__entry->nlevels = cur->bc_nlevels;
__entry->ptr = cur->bc_ptrs[level];
),
- TP_printk("dev %d:%d type %u btnum %d agno %u agbno %u level %d nlevels %d ptr %d",
+ TP_printk("dev %d:%d type %s btree %s agno %u agbno %u level %d nlevels %d ptr %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
- __entry->btnum,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->agno,
__entry->bno,
__entry->level,
@@ -473,9 +538,9 @@ TRACE_EVENT(xchk_xref_error,
__entry->error = error;
__entry->ret_ip = ret_ip;
),
- TP_printk("dev %d:%d type %u xref error %d ret_ip %pF",
+ TP_printk("dev %d:%d type %s xref error %d ret_ip %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->type,
+ __print_symbolic(__entry->type, XFS_SCRUB_TYPE_STRINGS),
__entry->error,
__entry->ret_ip)
);
@@ -598,11 +663,11 @@ TRACE_EVENT(xrep_init_btblock,
__entry->agbno = agbno;
__entry->btnum = btnum;
),
- TP_printk("dev %d:%d agno %u agbno %u btnum %d",
+ TP_printk("dev %d:%d agno %u agbno %u btree %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
- __entry->btnum)
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS))
)
TRACE_EVENT(xrep_findroot_block,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 494b4338446e..e5c23948a8ab 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -10,6 +10,9 @@ extern struct bio_set xfs_ioend_bioset;
/*
* Types of I/O for bmap clustering and I/O completion tracking.
+ *
+ * This enum is used in string mapping in xfs_trace.h; please keep the
+ * TRACE_DEFINE_ENUMs for it up to date.
*/
enum {
XFS_IO_HOLE, /* covers region without any block allocation */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 404e581f1ea1..1ee8c5539fa4 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1126,9 +1126,9 @@ xfs_free_file_space(
* page could be mmap'd and iomap_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily.
*/
- if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+ if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- (offset + len) & ~PAGE_MASK, LLONG_MAX);
+ round_down(offset + len, PAGE_SIZE), LLONG_MAX);
}
return error;
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index d9da66c718bb..74ddf66f4cfe 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -494,7 +494,6 @@ xfs_efi_recover(
int error = 0;
xfs_extent_t *extp;
xfs_fsblock_t startblock_fsb;
- struct xfs_owner_info oinfo;
ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
@@ -526,11 +525,11 @@ xfs_efi_recover(
return error;
efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
- xfs_rmap_any_owner_update(&oinfo);
for (i = 0; i < efip->efi_format.efi_nextents; i++) {
extp = &efip->efi_format.efi_extents[i];
error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
- extp->ext_len, &oinfo, false);
+ extp->ext_len,
+ &XFS_RMAP_OINFO_ANY_OWNER, false);
if (error)
goto abort_error;
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 093c2b8d7e20..ec2e63a7963b 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -252,7 +252,7 @@ xfs_growfs_data(
if (mp->m_sb.sb_imax_pct) {
uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
do_div(icount, 100);
- mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
+ mp->m_maxicount = XFS_FSB_TO_INO(mp, icount);
} else
mp->m_maxicount = 0;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 05db9540e459..ae667ba74a1c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2184,8 +2184,6 @@ xfs_ifree_cluster(
struct xfs_icluster *xic)
{
xfs_mount_t *mp = free_ip->i_mount;
- int blks_per_cluster;
- int inodes_per_cluster;
int nbufs;
int i, j;
int ioffset;
@@ -2199,11 +2197,9 @@ xfs_ifree_cluster(
inum = xic->first_ino;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
- blks_per_cluster = xfs_icluster_size_fsb(mp);
- inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
- nbufs = mp->m_ialloc_blks / blks_per_cluster;
+ nbufs = mp->m_ialloc_blks / mp->m_blocks_per_cluster;
- for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
+ for (j = 0; j < nbufs; j++, inum += mp->m_inodes_per_cluster) {
/*
* The allocation bitmap tells us which inodes of the chunk were
* physically allocated. Skip the cluster if an inode falls into
@@ -2211,7 +2207,7 @@ xfs_ifree_cluster(
*/
ioffset = inum - xic->first_ino;
if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
- ASSERT(ioffset % inodes_per_cluster == 0);
+ ASSERT(ioffset % mp->m_inodes_per_cluster == 0);
continue;
}
@@ -2227,7 +2223,7 @@ xfs_ifree_cluster(
* to mark all the active inodes on the buffer stale.
*/
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
- mp->m_bsize * blks_per_cluster,
+ mp->m_bsize * mp->m_blocks_per_cluster,
XBF_UNMAPPED);
if (!bp)
@@ -2242,7 +2238,7 @@ xfs_ifree_cluster(
* want it to fail. We can acheive this by adding a write
* verifier to the buffer.
*/
- bp->b_ops = &xfs_inode_buf_ops;
+ bp->b_ops = &xfs_inode_buf_ops;
/*
* Walk the inodes already attached to the buffer and mark them
@@ -2274,7 +2270,7 @@ xfs_ifree_cluster(
* transaction stale above, which means there is no point in
* even trying to lock them.
*/
- for (i = 0; i < inodes_per_cluster; i++) {
+ for (i = 0; i < mp->m_inodes_per_cluster; i++) {
retry:
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root,
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index fba115f4103a..5001dca361e9 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -241,6 +241,32 @@ xfs_compat_ioc_bulkstat(
int done;
int error;
+ /*
+ * Output structure handling functions. Depending on the command,
+ * either the xfs_bstat and xfs_inogrp structures are written out
+ * to userpace memory via bulkreq.ubuffer. Normally the compat
+ * functions and structure size are the correct ones to use ...
+ */
+ inumbers_fmt_pf inumbers_func = xfs_inumbers_fmt_compat;
+ bulkstat_one_pf bs_one_func = xfs_bulkstat_one_compat;
+ size_t bs_one_size = sizeof(struct compat_xfs_bstat);
+
+#ifdef CONFIG_X86_X32
+ if (in_x32_syscall()) {
+ /*
+ * ... but on x32 the input xfs_fsop_bulkreq has pointers
+ * which must be handled in the "compat" (32-bit) way, while
+ * the xfs_bstat and xfs_inogrp structures follow native 64-
+ * bit layout convention. So adjust accordingly, otherwise
+ * the data written out in compat layout will not match what
+ * x32 userspace expects.
+ */
+ inumbers_func = xfs_inumbers_fmt;
+ bs_one_func = xfs_bulkstat_one;
+ bs_one_size = sizeof(struct xfs_bstat);
+ }
+#endif
+
/* done = 1 if there are more stats to get and if bulkstat */
/* should be called again (unused here, but used in dmapi) */
@@ -272,15 +298,15 @@ xfs_compat_ioc_bulkstat(
if (cmd == XFS_IOC_FSINUMBERS_32) {
error = xfs_inumbers(mp, &inlast, &count,
- bulkreq.ubuffer, xfs_inumbers_fmt_compat);
+ bulkreq.ubuffer, inumbers_func);
} else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE_32) {
int res;
- error = xfs_bulkstat_one_compat(mp, inlast, bulkreq.ubuffer,
- sizeof(compat_xfs_bstat_t), NULL, &res);
+ error = bs_one_func(mp, inlast, bulkreq.ubuffer,
+ bs_one_size, NULL, &res);
} else if (cmd == XFS_IOC_FSBULKSTAT_32) {
error = xfs_bulkstat(mp, &inlast, &count,
- xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t),
+ bs_one_func, bs_one_size,
bulkreq.ubuffer, &done);
} else
error = -EINVAL;
@@ -336,6 +362,7 @@ xfs_compat_attrlist_by_handle(
{
int error;
attrlist_cursor_kern_t *cursor;
+ compat_xfs_fsop_attrlist_handlereq_t __user *p = arg;
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
@@ -370,6 +397,11 @@ xfs_compat_attrlist_by_handle(
if (error)
goto out_kfree;
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ error = -EFAULT;
+ goto out_kfree;
+ }
+
if (copy_to_user(compat_ptr(al_hreq.buffer), kbuf, al_hreq.buflen))
error = -EFAULT;
@@ -547,8 +579,12 @@ xfs_file_compat_ioctl(
case FS_IOC_GETFSMAP:
case XFS_IOC_SCRUB_METADATA:
return xfs_file_ioctl(filp, cmd, p);
-#ifndef BROKEN_X86_ALIGNMENT
- /* These are handled fine if no alignment issues */
+#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
+ /*
+ * These are handled fine if no alignment issues. To support x32
+ * which uses native 64-bit alignment we must emit these cases in
+ * addition to the ia-32 compat set below.
+ */
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
case XFS_IOC_RESVSP:
@@ -561,8 +597,16 @@ xfs_file_compat_ioctl(
case XFS_IOC_FSGROWFSDATA:
case XFS_IOC_FSGROWFSRT:
case XFS_IOC_ZERO_RANGE:
+#ifdef CONFIG_X86_X32
+ /*
+ * x32 special: this gets a different cmd number from the ia-32 compat
+ * case below; the associated data will match native 64-bit alignment.
+ */
+ case XFS_IOC_SWAPEXT:
+#endif
return xfs_file_ioctl(filp, cmd, p);
-#else
+#endif
+#if defined(BROKEN_X86_ALIGNMENT)
case XFS_IOC_ALLOCSP_32:
case XFS_IOC_FREESP_32:
case XFS_IOC_ALLOCSP64_32:
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index e9508ba01ed1..942e4aa5e729 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -167,20 +167,18 @@ xfs_bulkstat_ichunk_ra(
{
xfs_agblock_t agbno;
struct blk_plug plug;
- int blks_per_cluster;
- int inodes_per_cluster;
int i; /* inode chunk index */
agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
- blks_per_cluster = xfs_icluster_size_fsb(mp);
- inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
blk_start_plug(&plug);
for (i = 0; i < XFS_INODES_PER_CHUNK;
- i += inodes_per_cluster, agbno += blks_per_cluster) {
- if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) {
- xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster,
- &xfs_inode_buf_ops);
+ i += mp->m_inodes_per_cluster, agbno += mp->m_blocks_per_cluster) {
+ if (xfs_inobt_maskn(i, mp->m_inodes_per_cluster) &
+ ~irec->ir_free) {
+ xfs_btree_reada_bufs(mp, agno, agbno,
+ mp->m_blocks_per_cluster,
+ &xfs_inode_buf_ops);
}
}
blk_finish_plug(&plug);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 1fc9e9042e0e..9fe88d125f0a 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3850,7 +3850,6 @@ xlog_recover_do_icreate_pass2(
unsigned int count;
unsigned int isize;
xfs_agblock_t length;
- int blks_per_cluster;
int bb_per_cluster;
int cancel_count;
int nbufs;
@@ -3918,14 +3917,13 @@ xlog_recover_do_icreate_pass2(
* buffers for cancellation so we don't overwrite anything written after
* a cancellation.
*/
- blks_per_cluster = xfs_icluster_size_fsb(mp);
- bb_per_cluster = XFS_FSB_TO_BB(mp, blks_per_cluster);
- nbufs = length / blks_per_cluster;
+ bb_per_cluster = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
+ nbufs = length / mp->m_blocks_per_cluster;
for (i = 0, cancel_count = 0; i < nbufs; i++) {
xfs_daddr_t daddr;
daddr = XFS_AGB_TO_DADDR(mp, agno,
- agbno + i * blks_per_cluster);
+ agbno + i * mp->m_blocks_per_cluster);
if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
cancel_count++;
}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 02d15098dbee..b4d8c318be3c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -798,6 +798,10 @@ xfs_mountfs(
if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
mp->m_inode_cluster_size = new_size;
}
+ mp->m_blocks_per_cluster = xfs_icluster_size_fsb(mp);
+ mp->m_inodes_per_cluster = XFS_FSB_TO_INO(mp, mp->m_blocks_per_cluster);
+ mp->m_cluster_align = xfs_ialloc_cluster_alignment(mp);
+ mp->m_cluster_align_inodes = XFS_FSB_TO_INO(mp, mp->m_cluster_align);
/*
* If enabled, sparse inode chunk alignment is expected to match the
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 7964513c3128..7daafe064af8 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -89,6 +89,13 @@ typedef struct xfs_mount {
int m_logbsize; /* size of each log buffer */
uint m_rsumlevels; /* rt summary levels */
uint m_rsumsize; /* size of rt summary, bytes */
+ /*
+ * Optional cache of rt summary level per bitmap block with the
+ * invariant that m_rsum_cache[bbno] <= the minimum i for which
+ * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
+ * inode lock.
+ */
+ uint8_t *m_rsum_cache;
struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
struct xfs_inode *m_rsumip; /* pointer to summary inode */
struct xfs_inode *m_rootip; /* pointer to root directory */
@@ -101,6 +108,10 @@ typedef struct xfs_mount {
uint8_t m_agno_log; /* log #ag's */
uint8_t m_agino_log; /* #bits for agino in inum */
uint m_inode_cluster_size;/* min inode buf size */
+ unsigned int m_inodes_per_cluster;
+ unsigned int m_blocks_per_cluster;
+ unsigned int m_cluster_align;
+ unsigned int m_cluster_align_inodes;
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 73a1d77ec187..3091e4bc04ef 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot(
statp->f_files = limit;
statp->f_ffree =
(statp->f_files > dqp->q_res_icount) ?
- (statp->f_ffree - dqp->q_res_icount) : 0;
+ (statp->f_files - dqp->q_res_icount) : 0;
}
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 322a852ce284..c5b4fa004ca4 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -623,54 +623,47 @@ out:
}
/*
- * Remap parts of a file's data fork after a successful CoW.
+ * Remap part of the CoW fork into the data fork.
+ *
+ * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
+ * into the data fork; this function will remap what it can (at the end of the
+ * range) and update @end_fsb appropriately. Each remap gets its own
+ * transaction because we can end up merging and splitting bmbt blocks for
+ * every remap operation and we'd like to keep the block reservation
+ * requirements as low as possible.
*/
-int
-xfs_reflink_end_cow(
- struct xfs_inode *ip,
- xfs_off_t offset,
- xfs_off_t count)
+STATIC int
+xfs_reflink_end_cow_extent(
+ struct xfs_inode *ip,
+ xfs_fileoff_t offset_fsb,
+ xfs_fileoff_t *end_fsb)
{
- struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
- struct xfs_bmbt_irec got, del;
- struct xfs_trans *tp;
- xfs_fileoff_t offset_fsb;
- xfs_fileoff_t end_fsb;
- int error;
- unsigned int resblks;
- xfs_filblks_t rlen;
- struct xfs_iext_cursor icur;
-
- trace_xfs_reflink_end_cow(ip, offset, count);
+ struct xfs_bmbt_irec got, del;
+ struct xfs_iext_cursor icur;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+ xfs_filblks_t rlen;
+ unsigned int resblks;
+ int error;
/* No COW extents? That's easy! */
- if (ifp->if_bytes == 0)
+ if (ifp->if_bytes == 0) {
+ *end_fsb = offset_fsb;
return 0;
+ }
- offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
- end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
+ resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
+ XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
+ if (error)
+ return error;
/*
- * Start a rolling transaction to switch the mappings. We're
- * unlikely ever to have to remap 16T worth of single-block
- * extents, so just cap the worst case extent count to 2^32-1.
- * Stick a warning in just in case, and avoid 64-bit division.
+ * Lock the inode. We have to ijoin without automatic unlock because
+ * the lead transaction is the refcountbt record deletion; the data
+ * fork update follows as a deferred log item.
*/
- BUILD_BUG_ON(MAX_RW_COUNT > UINT_MAX);
- if (end_fsb - offset_fsb > UINT_MAX) {
- error = -EFSCORRUPTED;
- xfs_force_shutdown(ip->i_mount, SHUTDOWN_CORRUPT_INCORE);
- ASSERT(0);
- goto out;
- }
- resblks = XFS_NEXTENTADD_SPACE_RES(ip->i_mount,
- (unsigned int)(end_fsb - offset_fsb),
- XFS_DATA_FORK);
- error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
- resblks, 0, XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
- if (error)
- goto out;
-
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
@@ -679,80 +672,131 @@ xfs_reflink_end_cow(
* left by the time I/O completes for the loser of the race. In that
* case we are done.
*/
- if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+ if (!xfs_iext_lookup_extent_before(ip, ifp, end_fsb, &icur, &got) ||
+ got.br_startoff + got.br_blockcount <= offset_fsb) {
+ *end_fsb = offset_fsb;
goto out_cancel;
+ }
- /* Walk backwards until we're out of the I/O range... */
- while (got.br_startoff + got.br_blockcount > offset_fsb) {
- del = got;
- xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
-
- /* Extent delete may have bumped ext forward */
- if (!del.br_blockcount)
- goto prev_extent;
+ /*
+ * Structure copy @got into @del, then trim @del to the range that we
+ * were asked to remap. We preserve @got for the eventual CoW fork
+ * deletion; from now on @del represents the mapping that we're
+ * actually remapping.
+ */
+ del = got;
+ xfs_trim_extent(&del, offset_fsb, *end_fsb - offset_fsb);
- /*
- * Only remap real extent that contain data. With AIO
- * speculatively preallocations can leak into the range we
- * are called upon, and we need to skip them.
- */
- if (!xfs_bmap_is_real_extent(&got))
- goto prev_extent;
+ ASSERT(del.br_blockcount > 0);
- /* Unmap the old blocks in the data fork. */
- ASSERT(tp->t_firstblock == NULLFSBLOCK);
- rlen = del.br_blockcount;
- error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
- if (error)
- goto out_cancel;
+ /*
+ * Only remap real extents that contain data. With AIO, speculative
+ * preallocations can leak into the range we are called upon, and we
+ * need to skip them.
+ */
+ if (!xfs_bmap_is_real_extent(&got)) {
+ *end_fsb = del.br_startoff;
+ goto out_cancel;
+ }
- /* Trim the extent to whatever got unmapped. */
- if (rlen) {
- xfs_trim_extent(&del, del.br_startoff + rlen,
- del.br_blockcount - rlen);
- }
- trace_xfs_reflink_cow_remap(ip, &del);
+ /* Unmap the old blocks in the data fork. */
+ rlen = del.br_blockcount;
+ error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1);
+ if (error)
+ goto out_cancel;
- /* Free the CoW orphan record. */
- error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
- del.br_blockcount);
- if (error)
- goto out_cancel;
+ /* Trim the extent to whatever got unmapped. */
+ xfs_trim_extent(&del, del.br_startoff + rlen, del.br_blockcount - rlen);
+ trace_xfs_reflink_cow_remap(ip, &del);
- /* Map the new blocks into the data fork. */
- error = xfs_bmap_map_extent(tp, ip, &del);
- if (error)
- goto out_cancel;
+ /* Free the CoW orphan record. */
+ error = xfs_refcount_free_cow_extent(tp, del.br_startblock,
+ del.br_blockcount);
+ if (error)
+ goto out_cancel;
- /* Charge this new data fork mapping to the on-disk quota. */
- xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
- (long)del.br_blockcount);
+ /* Map the new blocks into the data fork. */
+ error = xfs_bmap_map_extent(tp, ip, &del);
+ if (error)
+ goto out_cancel;
- /* Remove the mapping from the CoW fork. */
- xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
+ /* Charge this new data fork mapping to the on-disk quota. */
+ xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
+ (long)del.br_blockcount);
- error = xfs_defer_finish(&tp);
- if (error)
- goto out_cancel;
- if (!xfs_iext_get_extent(ifp, &icur, &got))
- break;
- continue;
-prev_extent:
- if (!xfs_iext_prev_extent(ifp, &icur, &got))
- break;
- }
+ /* Remove the mapping from the CoW fork. */
+ xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
- goto out;
+ return error;
+
+ /* Update the caller about how much progress we made. */
+ *end_fsb = del.br_startoff;
return 0;
out_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out:
- trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
+ return error;
+}
+
+/*
+ * Remap parts of a file's data fork after a successful CoW.
+ */
+int
+xfs_reflink_end_cow(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t count)
+{
+ xfs_fileoff_t offset_fsb;
+ xfs_fileoff_t end_fsb;
+ int error = 0;
+
+ trace_xfs_reflink_end_cow(ip, offset, count);
+
+ offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
+ end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
+
+ /*
+ * Walk backwards until we're out of the I/O range. The loop function
+ * repeatedly cycles the ILOCK to allocate one transaction per remapped
+ * extent.
+ *
+ * If we're being called by writeback then the the pages will still
+ * have PageWriteback set, which prevents races with reflink remapping
+ * and truncate. Reflink remapping prevents races with writeback by
+ * taking the iolock and mmaplock before flushing the pages and
+ * remapping, which means there won't be any further writeback or page
+ * cache dirtying until the reflink completes.
+ *
+ * We should never have two threads issuing writeback for the same file
+ * region. There are also have post-eof checks in the writeback
+ * preparation code so that we don't bother writing out pages that are
+ * about to be truncated.
+ *
+ * If we're being called as part of directio write completion, the dio
+ * count is still elevated, which reflink and truncate will wait for.
+ * Reflink remapping takes the iolock and mmaplock and waits for
+ * pending dio to finish, which should prevent any directio until the
+ * remap completes. Multiple concurrent directio writes to the same
+ * region are handled by end_cow processing only occurring for the
+ * threads which succeed; the outcome of multiple overlapping direct
+ * writes is not well defined anyway.
+ *
+ * It's possible that a buffered write and a direct write could collide
+ * here (the buffered write stumbles in after the dio flushes and
+ * invalidates the page cache and immediately queues writeback), but we
+ * have never supported this 100%. If either disk write succeeds the
+ * blocks will be remapped.
+ */
+ while (end_fsb > offset_fsb && !error)
+ error = xfs_reflink_end_cow_extent(ip, offset_fsb, &end_fsb);
+
+ if (error)
+ trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
return error;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 926ed314ffba..ac0fcdad0c4e 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -64,8 +64,12 @@ xfs_rtany_summary(
int log; /* loop counter, log2 of ext. size */
xfs_suminfo_t sum; /* summary data */
+ /* There are no extents at levels < m_rsum_cache[bbno]. */
+ if (mp->m_rsum_cache && low < mp->m_rsum_cache[bbno])
+ low = mp->m_rsum_cache[bbno];
+
/*
- * Loop over logs of extent sizes. Order is irrelevant.
+ * Loop over logs of extent sizes.
*/
for (log = low; log <= high; log++) {
/*
@@ -80,13 +84,17 @@ xfs_rtany_summary(
*/
if (sum) {
*stat = 1;
- return 0;
+ goto out;
}
}
/*
* Found nothing, return failure.
*/
*stat = 0;
+out:
+ /* There were no extents at levels < log. */
+ if (mp->m_rsum_cache && log > mp->m_rsum_cache[bbno])
+ mp->m_rsum_cache[bbno] = log;
return 0;
}
@@ -853,6 +861,21 @@ out_trans_cancel:
return error;
}
+static void
+xfs_alloc_rsum_cache(
+ xfs_mount_t *mp, /* file system mount structure */
+ xfs_extlen_t rbmblocks) /* number of rt bitmap blocks */
+{
+ /*
+ * The rsum cache is initialized to all zeroes, which is trivially a
+ * lower bound on the minimum level with any free extents. We can
+ * continue without the cache if it couldn't be allocated.
+ */
+ mp->m_rsum_cache = kmem_zalloc_large(rbmblocks, KM_SLEEP);
+ if (!mp->m_rsum_cache)
+ xfs_warn(mp, "could not allocate realtime summary cache");
+}
+
/*
* Visible (exported) functions.
*/
@@ -881,6 +904,7 @@ xfs_growfs_rt(
xfs_extlen_t rsumblocks; /* current number of rt summary blks */
xfs_sb_t *sbp; /* old superblock */
xfs_fsblock_t sumbno; /* summary block number */
+ uint8_t *rsum_cache; /* old summary cache */
sbp = &mp->m_sb;
/*
@@ -937,6 +961,11 @@ xfs_growfs_rt(
error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip);
if (error)
return error;
+
+ rsum_cache = mp->m_rsum_cache;
+ if (nrbmblocks != sbp->sb_rbmblocks)
+ xfs_alloc_rsum_cache(mp, nrbmblocks);
+
/*
* Allocate a new (fake) mount/sb.
*/
@@ -1062,6 +1091,20 @@ error_cancel:
*/
kmem_free(nmp);
+ /*
+ * If we had to allocate a new rsum_cache, we either need to free the
+ * old one (if we succeeded) or free the new one and restore the old one
+ * (if there was an error).
+ */
+ if (rsum_cache != mp->m_rsum_cache) {
+ if (error) {
+ kmem_free(mp->m_rsum_cache);
+ mp->m_rsum_cache = rsum_cache;
+ } else {
+ kmem_free(rsum_cache);
+ }
+ }
+
return error;
}
@@ -1187,8 +1230,8 @@ xfs_rtmount_init(
}
/*
- * Get the bitmap and summary inodes into the mount structure
- * at mount time.
+ * Get the bitmap and summary inodes and the summary cache into the mount
+ * structure at mount time.
*/
int /* error */
xfs_rtmount_inodes(
@@ -1198,19 +1241,18 @@ xfs_rtmount_inodes(
xfs_sb_t *sbp;
sbp = &mp->m_sb;
- if (sbp->sb_rbmino == NULLFSINO)
- return 0;
error = xfs_iget(mp, NULL, sbp->sb_rbmino, 0, 0, &mp->m_rbmip);
if (error)
return error;
ASSERT(mp->m_rbmip != NULL);
- ASSERT(sbp->sb_rsumino != NULLFSINO);
+
error = xfs_iget(mp, NULL, sbp->sb_rsumino, 0, 0, &mp->m_rsumip);
if (error) {
xfs_irele(mp->m_rbmip);
return error;
}
ASSERT(mp->m_rsumip != NULL);
+ xfs_alloc_rsum_cache(mp, sbp->sb_rbmblocks);
return 0;
}
@@ -1218,6 +1260,7 @@ void
xfs_rtunmount_inodes(
struct xfs_mount *mp)
{
+ kmem_free(mp->m_rsum_cache);
if (mp->m_rbmip)
xfs_irele(mp->m_rbmip);
if (mp->m_rsumip)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index d3e6cd063688..c9097cb0b955 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -38,6 +38,7 @@
#include "xfs_refcount_item.h"
#include "xfs_bmap_item.h"
#include "xfs_reflink.h"
+#include "xfs_defer.h"
#include <linux/namei.h>
#include <linux/dax.h>
@@ -607,7 +608,7 @@ xfs_set_inode_alloc(
}
/* Get the last possible inode in the filesystem */
- agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
+ agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
/*
@@ -1149,7 +1150,7 @@ xfs_fs_statfs(
statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
statp->f_bavail = statp->f_bfree;
- fakeinos = statp->f_bfree << sbp->sb_inopblog;
+ fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
if (mp->m_maxicount)
statp->f_files = min_t(typeof(statp->f_files),
@@ -2085,11 +2086,6 @@ init_xfs_fs(void)
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
- xfs_extent_free_init_defer_op();
- xfs_rmap_update_init_defer_op();
- xfs_refcount_update_init_defer_op();
- xfs_bmap_update_init_defer_op();
-
xfs_dir_startup();
error = xfs_init_zones();
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index a3e98c64b6e3..b2c1177c717f 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -192,6 +192,7 @@ xfs_symlink(
pathlen = strlen(target_path);
if (pathlen >= XFS_SYMLINK_MAXLEN) /* total string too long */
return -ENAMETOOLONG;
+ ASSERT(pathlen > 0);
udqp = gdqp = NULL;
prid = xfs_get_initial_prid(dp);
@@ -378,6 +379,12 @@ out_release_inode:
/*
* Free a symlink that has blocks associated with it.
+ *
+ * Note: zero length symlinks are not allowed to exist. When we set the size to
+ * zero, also change it to a regular file so that it does not get written to
+ * disk as a zero length symlink. The inode is on the unlinked list already, so
+ * userspace cannot find this inode anymore, so this change is not user visible
+ * but allows us to catch corrupt zero-length symlinks in the verifiers.
*/
STATIC int
xfs_inactive_symlink_rmt(
@@ -412,13 +419,14 @@ xfs_inactive_symlink_rmt(
xfs_trans_ijoin(tp, ip, 0);
/*
- * Lock the inode, fix the size, and join it to the transaction.
- * Hold it so in the normal path, we still have it locked for
- * the second transaction. In the error paths we need it
+ * Lock the inode, fix the size, turn it into a regular file and join it
+ * to the transaction. Hold it so in the normal path, we still have it
+ * locked for the second transaction. In the error paths we need it
* held so the cancel won't rele it, see below.
*/
size = (int)ip->i_d.di_size;
ip->i_d.di_size = 0;
+ VFS_I(ip)->i_mode = (VFS_I(ip)->i_mode & ~S_IFMT) | S_IFREG;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
/*
* Find the block(s) so we can inval and unmap them.
@@ -494,17 +502,10 @@ xfs_inactive_symlink(
return -EIO;
xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- /*
- * Zero length symlinks _can_ exist.
- */
pathlen = (int)ip->i_d.di_size;
- if (!pathlen) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return 0;
- }
+ ASSERT(pathlen);
- if (pathlen < 0 || pathlen > XFS_SYMLINK_MAXLEN) {
+ if (pathlen <= 0 || pathlen > XFS_SYMLINK_MAXLEN) {
xfs_alert(mp, "%s: inode (0x%llx) bad symlink length (%d)",
__func__, (unsigned long long)ip->i_ino, pathlen);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -512,12 +513,12 @@ xfs_inactive_symlink(
return -EFSCORRUPTED;
}
+ /*
+ * Inline fork state gets removed by xfs_difree() so we have nothing to
+ * do here in that case.
+ */
if (ip->i_df.if_flags & XFS_IFINLINE) {
- if (ip->i_df.if_bytes > 0)
- xfs_idata_realloc(ip, -(ip->i_df.if_bytes),
- XFS_DATA_FORK);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- ASSERT(ip->i_df.if_bytes == 0);
return 0;
}
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 8a6532aae779..6fcc893dfc91 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -640,6 +640,16 @@ DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag);
DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid);
+/*
+ * ftrace's __print_symbolic requires that all enum values be wrapped in the
+ * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
+ * ring buffer. Somehow this was only worth mentioning in the ftrace sample
+ * code.
+ */
+TRACE_DEFINE_ENUM(PE_SIZE_PTE);
+TRACE_DEFINE_ENUM(PE_SIZE_PMD);
+TRACE_DEFINE_ENUM(PE_SIZE_PUD);
+
TRACE_EVENT(xfs_filemap_fault,
TP_PROTO(struct xfs_inode *ip, enum page_entry_size pe_size,
bool write_fault),
@@ -1208,6 +1218,12 @@ DEFINE_EVENT(xfs_readpage_class, name, \
DEFINE_READPAGE_EVENT(xfs_vm_readpage);
DEFINE_READPAGE_EVENT(xfs_vm_readpages);
+TRACE_DEFINE_ENUM(XFS_IO_HOLE);
+TRACE_DEFINE_ENUM(XFS_IO_DELALLOC);
+TRACE_DEFINE_ENUM(XFS_IO_UNWRITTEN);
+TRACE_DEFINE_ENUM(XFS_IO_OVERWRITE);
+TRACE_DEFINE_ENUM(XFS_IO_COW);
+
DECLARE_EVENT_CLASS(xfs_imap_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
int type, struct xfs_bmbt_irec *irec),
@@ -1885,11 +1901,11 @@ TRACE_EVENT(xfs_dir2_leafn_moveents,
{ 0, "target" }, \
{ 1, "temp" }
-#define XFS_INODE_FORMAT_STR \
- { 0, "invalid" }, \
- { 1, "local" }, \
- { 2, "extent" }, \
- { 3, "btree" }
+TRACE_DEFINE_ENUM(XFS_DINODE_FMT_DEV);
+TRACE_DEFINE_ENUM(XFS_DINODE_FMT_LOCAL);
+TRACE_DEFINE_ENUM(XFS_DINODE_FMT_EXTENTS);
+TRACE_DEFINE_ENUM(XFS_DINODE_FMT_BTREE);
+TRACE_DEFINE_ENUM(XFS_DINODE_FMT_UUID);
DECLARE_EVENT_CLASS(xfs_swap_extent_class,
TP_PROTO(struct xfs_inode *ip, int which),
@@ -2178,6 +2194,14 @@ DEFINE_DISCARD_EVENT(xfs_discard_exclude);
DEFINE_DISCARD_EVENT(xfs_discard_busy);
/* btree cursor events */
+TRACE_DEFINE_ENUM(XFS_BTNUM_BNOi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_CNTi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_BMAPi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_INOi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_FINOi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_RMAPi);
+TRACE_DEFINE_ENUM(XFS_BTNUM_REFCi);
+
DECLARE_EVENT_CLASS(xfs_btree_cur_class,
TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
TP_ARGS(cur, level, bp),
@@ -2197,9 +2221,9 @@ DECLARE_EVENT_CLASS(xfs_btree_cur_class,
__entry->ptr = cur->bc_ptrs[level];
__entry->daddr = bp ? bp->b_bn : -1;
),
- TP_printk("dev %d:%d btnum %d level %d/%d ptr %d daddr 0x%llx",
+ TP_printk("dev %d:%d btree %s level %d/%d ptr %d daddr 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->btnum,
+ __print_symbolic(__entry->btnum, XFS_BTNUM_STRINGS),
__entry->level,
__entry->nlevels,
__entry->ptr,
@@ -2276,7 +2300,7 @@ DECLARE_EVENT_CLASS(xfs_defer_pending_class,
),
TP_fast_assign(
__entry->dev = mp ? mp->m_super->s_dev : 0;
- __entry->type = dfp->dfp_type->type;
+ __entry->type = dfp->dfp_type;
__entry->intent = dfp->dfp_intent;
__entry->committed = dfp->dfp_done != NULL;
__entry->nr = dfp->dfp_count;
@@ -2405,7 +2429,7 @@ DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_agfl_free_deferred);
DECLARE_EVENT_CLASS(xfs_rmap_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
- struct xfs_owner_info *oinfo),
+ const struct xfs_owner_info *oinfo),
TP_ARGS(mp, agno, agbno, len, unwritten, oinfo),
TP_STRUCT__entry(
__field(dev_t, dev)
@@ -2440,7 +2464,7 @@ DECLARE_EVENT_CLASS(xfs_rmap_class,
DEFINE_EVENT(xfs_rmap_class, name, \
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
- struct xfs_owner_info *oinfo), \
+ const struct xfs_owner_info *oinfo), \
TP_ARGS(mp, agno, agbno, len, unwritten, oinfo))
/* simple AG-based error/%ip tracepoint class */
@@ -2610,10 +2634,9 @@ DEFINE_AG_ERROR_EVENT(xfs_ag_resv_init_error);
#define DEFINE_AG_EXTENT_EVENT(name) DEFINE_DISCARD_EVENT(name)
/* ag btree lookup tracepoint class */
-#define XFS_AG_BTREE_CMP_FORMAT_STR \
- { XFS_LOOKUP_EQ, "eq" }, \
- { XFS_LOOKUP_LE, "le" }, \
- { XFS_LOOKUP_GE, "ge" }
+TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi);
+TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi);
+TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi);
DECLARE_EVENT_CLASS(xfs_ag_btree_lookup_class,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t agbno, xfs_lookup_t dir),
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index a0c5dbda18aa..c6e1c5704a8c 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -223,13 +223,13 @@ void xfs_trans_dirty_buf(struct xfs_trans *, struct xfs_buf *);
bool xfs_trans_buf_is_dirty(struct xfs_buf *bp);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
-void xfs_extent_free_init_defer_op(void);
struct xfs_efd_log_item *xfs_trans_get_efd(struct xfs_trans *,
struct xfs_efi_log_item *,
uint);
int xfs_trans_free_extent(struct xfs_trans *,
struct xfs_efd_log_item *, xfs_fsblock_t,
- xfs_extlen_t, struct xfs_owner_info *,
+ xfs_extlen_t,
+ const struct xfs_owner_info *,
bool);
int xfs_trans_commit(struct xfs_trans *);
int xfs_trans_roll(struct xfs_trans **);
@@ -248,7 +248,6 @@ extern kmem_zone_t *xfs_trans_zone;
/* rmap updates */
enum xfs_rmap_intent_type;
-void xfs_rmap_update_init_defer_op(void);
struct xfs_rud_log_item *xfs_trans_get_rud(struct xfs_trans *tp,
struct xfs_rui_log_item *ruip);
int xfs_trans_log_finish_rmap_update(struct xfs_trans *tp,
@@ -260,7 +259,6 @@ int xfs_trans_log_finish_rmap_update(struct xfs_trans *tp,
/* refcount updates */
enum xfs_refcount_intent_type;
-void xfs_refcount_update_init_defer_op(void);
struct xfs_cud_log_item *xfs_trans_get_cud(struct xfs_trans *tp,
struct xfs_cui_log_item *cuip);
int xfs_trans_log_finish_refcount_update(struct xfs_trans *tp,
@@ -272,7 +270,6 @@ int xfs_trans_log_finish_refcount_update(struct xfs_trans *tp,
/* mapping updates */
enum xfs_bmap_intent_type;
-void xfs_bmap_update_init_defer_op(void);
struct xfs_bud_log_item *xfs_trans_get_bud(struct xfs_trans *tp,
struct xfs_bui_log_item *buip);
int xfs_trans_log_finish_bmap_update(struct xfs_trans *tp,
diff --git a/fs/xfs/xfs_trans_bmap.c b/fs/xfs/xfs_trans_bmap.c
index 741c558b2179..11cff449d055 100644
--- a/fs/xfs/xfs_trans_bmap.c
+++ b/fs/xfs/xfs_trans_bmap.c
@@ -17,6 +17,7 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_inode.h"
+#include "xfs_defer.h"
/*
* This routine is called to allocate a "bmap update done"
@@ -220,8 +221,7 @@ xfs_bmap_update_cancel_item(
kmem_free(bmap);
}
-static const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
- .type = XFS_DEFER_OPS_TYPE_BMAP,
+const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
.max_items = XFS_BUI_MAX_FAST_EXTENTS,
.diff_items = xfs_bmap_update_diff_items,
.create_intent = xfs_bmap_update_create_intent,
@@ -231,10 +231,3 @@ static const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
.finish_item = xfs_bmap_update_finish_item,
.cancel_item = xfs_bmap_update_cancel_item,
};
-
-/* Register the deferred op type. */
-void
-xfs_bmap_update_init_defer_op(void)
-{
- xfs_defer_init_op_type(&xfs_bmap_update_defer_type);
-}
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 855c0b651fd4..0710434eb240 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -18,6 +18,7 @@
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_trace.h"
+#include "xfs_defer.h"
/*
* This routine is called to allocate an "extent free done"
@@ -52,19 +53,20 @@ xfs_trans_get_efd(struct xfs_trans *tp,
*/
int
xfs_trans_free_extent(
- struct xfs_trans *tp,
- struct xfs_efd_log_item *efdp,
- xfs_fsblock_t start_block,
- xfs_extlen_t ext_len,
- struct xfs_owner_info *oinfo,
- bool skip_discard)
+ struct xfs_trans *tp,
+ struct xfs_efd_log_item *efdp,
+ xfs_fsblock_t start_block,
+ xfs_extlen_t ext_len,
+ const struct xfs_owner_info *oinfo,
+ bool skip_discard)
{
- struct xfs_mount *mp = tp->t_mountp;
- uint next_extent;
- xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, start_block);
- xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, start_block);
- struct xfs_extent *extp;
- int error;
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_extent *extp;
+ uint next_extent;
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, start_block);
+ xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp,
+ start_block);
+ int error;
trace_xfs_bmap_free_deferred(tp->t_mountp, agno, 0, agbno, ext_len);
@@ -206,8 +208,7 @@ xfs_extent_free_cancel_item(
kmem_free(free);
}
-static const struct xfs_defer_op_type xfs_extent_free_defer_type = {
- .type = XFS_DEFER_OPS_TYPE_FREE,
+const struct xfs_defer_op_type xfs_extent_free_defer_type = {
.max_items = XFS_EFI_MAX_FAST_EXTENTS,
.diff_items = xfs_extent_free_diff_items,
.create_intent = xfs_extent_free_create_intent,
@@ -274,8 +275,7 @@ xfs_agfl_free_finish_item(
/* sub-type with special handling for AGFL deferred frees */
-static const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
- .type = XFS_DEFER_OPS_TYPE_AGFL_FREE,
+const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
.max_items = XFS_EFI_MAX_FAST_EXTENTS,
.diff_items = xfs_extent_free_diff_items,
.create_intent = xfs_extent_free_create_intent,
@@ -285,11 +285,3 @@ static const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
.finish_item = xfs_agfl_free_finish_item,
.cancel_item = xfs_extent_free_cancel_item,
};
-
-/* Register the deferred op type. */
-void
-xfs_extent_free_init_defer_op(void)
-{
- xfs_defer_init_op_type(&xfs_extent_free_defer_type);
- xfs_defer_init_op_type(&xfs_agfl_free_defer_type);
-}
diff --git a/fs/xfs/xfs_trans_refcount.c b/fs/xfs/xfs_trans_refcount.c
index 523c55663954..6c947ff4faf6 100644
--- a/fs/xfs/xfs_trans_refcount.c
+++ b/fs/xfs/xfs_trans_refcount.c
@@ -16,6 +16,7 @@
#include "xfs_refcount_item.h"
#include "xfs_alloc.h"
#include "xfs_refcount.h"
+#include "xfs_defer.h"
/*
* This routine is called to allocate a "refcount update done"
@@ -227,8 +228,7 @@ xfs_refcount_update_cancel_item(
kmem_free(refc);
}
-static const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
- .type = XFS_DEFER_OPS_TYPE_REFCOUNT,
+const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.max_items = XFS_CUI_MAX_FAST_EXTENTS,
.diff_items = xfs_refcount_update_diff_items,
.create_intent = xfs_refcount_update_create_intent,
@@ -239,10 +239,3 @@ static const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.finish_cleanup = xfs_refcount_update_finish_cleanup,
.cancel_item = xfs_refcount_update_cancel_item,
};
-
-/* Register the deferred op type. */
-void
-xfs_refcount_update_init_defer_op(void)
-{
- xfs_defer_init_op_type(&xfs_refcount_update_defer_type);
-}
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
index 05b00e40251f..a42890931ecd 100644
--- a/fs/xfs/xfs_trans_rmap.c
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -16,6 +16,7 @@
#include "xfs_rmap_item.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
+#include "xfs_defer.h"
/* Set the map extent flags for this reverse mapping. */
static void
@@ -244,8 +245,7 @@ xfs_rmap_update_cancel_item(
kmem_free(rmap);
}
-static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
- .type = XFS_DEFER_OPS_TYPE_RMAP,
+const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.max_items = XFS_RUI_MAX_FAST_EXTENTS,
.diff_items = xfs_rmap_update_diff_items,
.create_intent = xfs_rmap_update_create_intent,
@@ -256,10 +256,3 @@ static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.finish_cleanup = xfs_rmap_update_finish_cleanup,
.cancel_item = xfs_rmap_update_cancel_item,
};
-
-/* Register the deferred op type. */
-void
-xfs_rmap_update_init_defer_op(void)
-{
- xfs_defer_init_op_type(&xfs_rmap_update_defer_type);
-}
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 3a26aa7ead23..6db9a6d40c85 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -73,7 +73,8 @@
#define ACPI_LV_RESOURCES 0x00010000
#define ACPI_LV_USER_REQUESTS 0x00020000
#define ACPI_LV_PACKAGE 0x00040000
-#define ACPI_LV_VERBOSITY1 0x0007FF40 | ACPI_LV_ALL_EXCEPTIONS
+#define ACPI_LV_EVALUATION 0x00080000
+#define ACPI_LV_VERBOSITY1 0x000FFF40 | ACPI_LV_ALL_EXCEPTIONS
/* Trace verbosity level 2 [Function tracing and memory allocation] */
@@ -141,6 +142,7 @@
#define ACPI_DB_INTERRUPTS ACPI_DEBUG_LEVEL (ACPI_LV_INTERRUPTS)
#define ACPI_DB_USER_REQUESTS ACPI_DEBUG_LEVEL (ACPI_LV_USER_REQUESTS)
#define ACPI_DB_PACKAGE ACPI_DEBUG_LEVEL (ACPI_LV_PACKAGE)
+#define ACPI_DB_EVALUATION ACPI_DEBUG_LEVEL (ACPI_LV_EVALUATION)
#define ACPI_DB_MUTEX ACPI_DEBUG_LEVEL (ACPI_LV_MUTEX)
#define ACPI_DB_EVENTS ACPI_DEBUG_LEVEL (ACPI_LV_EVENTS)
@@ -148,7 +150,7 @@
/* Defaults for debug_level, debug and normal */
-#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 14499757338f..de1804aeaf69 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -88,7 +88,14 @@ int acpi_pci_link_free_irq(acpi_handle handle);
struct pci_bus;
+#ifdef CONFIG_PCI
struct pci_dev *acpi_get_pci_dev(acpi_handle);
+#else
+static inline struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
+{
+ return NULL;
+}
+#endif
/* Arch-defined function to add a bus to the system */
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 0c19b68bf060..7aa38b648564 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20181003
+#define ACPI_CA_VERSION 0x20181213
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 517addd6b11d..0a977eca0a74 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -38,6 +38,7 @@
#define ACPI_SIG_XSDT "XSDT" /* Extended System Description Table */
#define ACPI_SIG_SSDT "SSDT" /* Secondary System Description Table */
#define ACPI_RSDP_NAME "RSDP" /* Short name for RSDP, not signature */
+#define ACPI_OEM_NAME "OEM" /* Short name for OEM, not signature */
/*
* All tables and structures must be byte-packed to match the ACPI
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 501f341d1d92..ea1ca49c9c1b 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -365,6 +365,29 @@ struct acpi_table_tcpa_server {
*
******************************************************************************/
+/* Revision 3 */
+
+struct acpi_table_tpm23 {
+ struct acpi_table_header header; /* Common ACPI table header */
+ u32 reserved;
+ u64 control_address;
+ u32 start_method;
+};
+
+/* Value for start_method above */
+
+#define ACPI_TPM23_ACPI_START_METHOD 2
+
+/*
+ * Optional trailer for revision 3. If start method is 2, there is a 4 byte
+ * reserved area of all zeros.
+ */
+struct acpi_tmp23_trailer {
+ u32 reserved;
+};
+
+/* Revision 4 */
+
struct acpi_table_tpm2 {
struct acpi_table_header header; /* Common ACPI table header */
u16 platform_class;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 66ceb12ebc63..2590627dbfcc 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -527,6 +527,10 @@ typedef u64 acpi_integer;
#define ACPI_VALIDATE_RSDP_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_SIG_RSDP, 8))
#define ACPI_MAKE_RSDP_SIG(dest) (memcpy (ACPI_CAST_PTR (char, (dest)), ACPI_SIG_RSDP, 8))
+/* Support for OEMx signature (x can be any character) */
+#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\
+ strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE)
+
/*
* Algorithm to obtain access bit width.
* Can be used with access_width of struct acpi_generic_address and access_size of
@@ -1273,6 +1277,8 @@ typedef enum {
#define ACPI_OSI_WIN_10_RS1 0x0E
#define ACPI_OSI_WIN_10_RS2 0x0F
#define ACPI_OSI_WIN_10_RS3 0x10
+#define ACPI_OSI_WIN_10_RS4 0x11
+#define ACPI_OSI_WIN_10_RS5 0x12
/* Definitions of getopt */
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index cf59e6210d27..4f34734e7f36 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -142,5 +142,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
extern int acpi_get_psd_map(struct cppc_cpudata **);
extern unsigned int cppc_get_transition_latency(int cpu);
+extern bool cpc_ffh_supported(void);
+extern int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val);
+extern int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val);
#endif /* _CPPC_ACPI_H*/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 7451b3bca83a..e3d21d014fcc 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -33,6 +33,10 @@
/* Kernel specific ACPICA configuration */
+#ifdef CONFIG_PCI
+#define ACPI_PCI_CONFIGURED
+#endif
+
#ifdef CONFIG_ACPI_REDUCED_HARDWARE_ONLY
#define ACPI_REDUCED_HARDWARE 1
#endif
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 73474bb52344..bb6cb347018c 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -26,6 +26,7 @@
#define p4d_clear(p4d) pgd_clear(p4d)
#define p4d_val(p4d) pgd_val(p4d)
#define p4d_populate(mm, p4d, pud) pgd_populate(mm, p4d, pud)
+#define p4d_populate_safe(mm, p4d, pud) pgd_populate(mm, p4d, pud)
#define p4d_page(p4d) pgd_page(p4d)
#define p4d_page_vaddr(p4d) pgd_page_vaddr(p4d)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index cdafa5edea49..20561a60db9c 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -17,8 +17,10 @@
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
-struct bug_entry {
+#ifdef CONFIG_BUG
+
#ifdef CONFIG_GENERIC_BUG
+struct bug_entry {
#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
unsigned long bug_addr;
#else
@@ -33,10 +35,8 @@ struct bug_entry {
unsigned short line;
#endif
unsigned short flags;
-#endif /* CONFIG_GENERIC_BUG */
};
-
-#ifdef CONFIG_BUG
+#endif /* CONFIG_GENERIC_BUG */
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 827e4d3bbc7a..8cc7b09c1bc7 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -16,6 +16,7 @@
#define __ASM_GENERIC_FIXMAP_H
#include <linux/bug.h>
+#include <linux/mm_types.h>
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 1d6dd38c0e5e..829bdb0d6327 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -31,6 +31,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
#define pgd_populate(mm, pgd, pud) do { } while (0)
+#define pgd_populate_safe(mm, pgd, pud) do { } while (0)
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 04cb913797bc..aebab905e6cd 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -26,6 +26,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
#define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd))
#define pgd_populate(mm, pgd, p4d) do { } while (0)
+#define pgd_populate_safe(mm, pgd, p4d) do { } while (0)
/*
* (p4ds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 9bef475db6fe..c77a1d301155 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -35,6 +35,7 @@ static inline void p4d_clear(p4d_t *p4d) { }
#define pud_ERROR(pud) (p4d_ERROR((pud).p4d))
#define p4d_populate(mm, p4d, pud) do { } while (0)
+#define p4d_populate_safe(mm, p4d, pud) do { } while (0)
/*
* (puds are folded into p4ds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 359fb935ded6..a9cac82e9a7a 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -375,7 +375,6 @@ static inline int pte_unused(pte_t pte)
#endif
#ifndef __HAVE_ARCH_PMD_SAME
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
@@ -385,21 +384,60 @@ static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+#endif
+
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
{
- BUILD_BUG();
- return 0;
+ return p4d_val(p4d_a) == p4d_val(p4d_b);
}
+#endif
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
{
- BUILD_BUG();
- return 0;
+ return pgd_val(pgd_a) == pgd_val(pgd_b);
}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+ WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+ set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+ WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+ set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+ WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+ set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+ WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+ set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+ WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+ set_pgd(pgdp, pgd); \
+})
+
#ifndef __HAVE_ARCH_DO_SWAP_PAGE
/*
* Some architectures support metadata associated with a page. When a
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index 22e6f412c595..a3e766dff917 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -234,34 +234,6 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
-static inline void crypto_stat_compress(struct acomp_req *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->compress_cnt);
- atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->decompress_cnt);
- atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
- }
-#endif
-}
-
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -274,10 +246,13 @@ static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
int ret;
+ crypto_stats_get(alg);
ret = tfm->compress(req);
- crypto_stat_compress(req, ret);
+ crypto_stats_compress(slen, ret, alg);
return ret;
}
@@ -293,10 +268,13 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int slen = req->slen;
int ret;
+ crypto_stats_get(alg);
ret = tfm->decompress(req);
- crypto_stat_decompress(req, ret);
+ crypto_stats_decompress(slen, ret, alg);
return ret;
}
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 0d765d7bfb82..9ad595f97c65 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -115,7 +115,6 @@ struct aead_request {
* @setkey: see struct skcipher_alg
* @encrypt: see struct skcipher_alg
* @decrypt: see struct skcipher_alg
- * @geniv: see struct skcipher_alg
* @ivsize: see struct skcipher_alg
* @chunksize: see struct skcipher_alg
* @init: Initialize the cryptographic transformation object. This function
@@ -142,8 +141,6 @@ struct aead_alg {
int (*init)(struct crypto_aead *tfm);
void (*exit)(struct crypto_aead *tfm);
- const char *geniv;
-
unsigned int ivsize;
unsigned int maxauthsize;
unsigned int chunksize;
@@ -306,34 +303,6 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
-static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
- }
-#endif
-}
-
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -356,13 +325,16 @@ static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = crypto_aead_alg(aead)->encrypt(req);
- crypto_stat_aead_encrypt(req, ret);
+ crypto_stats_aead_encrypt(cryptlen, alg, ret);
return ret;
}
@@ -391,15 +363,18 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto_alg *alg = aead->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
ret = crypto_aead_alg(aead)->decrypt(req);
- crypto_stat_aead_decrypt(req, ret);
+ crypto_stats_aead_decrypt(cryptlen, alg, ret);
return ret;
}
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index afac71119396..2d690494568c 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -271,62 +271,6 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
-static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->sign_cnt);
-#endif
-}
-
-static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->verify_cnt);
-#endif
-}
-
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -341,10 +285,13 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
int ret;
+ crypto_stats_get(calg);
ret = alg->encrypt(req);
- crypto_stat_akcipher_encrypt(req, ret);
+ crypto_stats_akcipher_encrypt(src_len, ret, calg);
return ret;
}
@@ -362,10 +309,13 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
+ unsigned int src_len = req->src_len;
int ret;
+ crypto_stats_get(calg);
ret = alg->decrypt(req);
- crypto_stat_akcipher_decrypt(req, ret);
+ crypto_stats_akcipher_decrypt(src_len, ret, calg);
return ret;
}
@@ -383,10 +333,12 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->sign(req);
- crypto_stat_akcipher_sign(req, ret);
+ crypto_stats_akcipher_sign(ret, calg);
return ret;
}
@@ -404,10 +356,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->verify(req);
- crypto_stat_akcipher_verify(req, ret);
+ crypto_stats_akcipher_verify(ret, calg);
return ret;
}
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
new file mode 100644
index 000000000000..1fc70a69d550
--- /dev/null
+++ b/include/crypto/chacha.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values and helper functions for the ChaCha and XChaCha stream ciphers.
+ *
+ * XChaCha extends ChaCha's nonce to 192 bits, while provably retaining ChaCha's
+ * security. Here they share the same key size, tfm context, and setkey
+ * function; only their IV size and encrypt/decrypt function differ.
+ *
+ * The ChaCha paper specifies 20, 12, and 8-round variants. In general, it is
+ * recommended to use the 20-round variant ChaCha20. However, the other
+ * variants can be needed in some performance-sensitive scenarios. The generic
+ * ChaCha code currently allows only the 20 and 12-round variants.
+ */
+
+#ifndef _CRYPTO_CHACHA_H
+#define _CRYPTO_CHACHA_H
+
+#include <crypto/skcipher.h>
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
+#define CHACHA_IV_SIZE 16
+
+#define CHACHA_KEY_SIZE 32
+#define CHACHA_BLOCK_SIZE 64
+#define CHACHAPOLY_IV_SIZE 12
+
+/* 192-bit nonce, then 64-bit stream position */
+#define XCHACHA_IV_SIZE 32
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+void chacha_block(u32 *state, u8 *stream, int nrounds);
+static inline void chacha20_block(u32 *state, u8 *stream)
+{
+ chacha_block(state, stream, 20);
+}
+void hchacha_block(const u32 *in, u32 *out, int nrounds);
+
+void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv);
+
+int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize);
+
+int crypto_chacha_crypt(struct skcipher_request *req);
+int crypto_xchacha_crypt(struct skcipher_request *req);
+
+#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
deleted file mode 100644
index f76302d99e2b..000000000000
--- a/include/crypto/chacha20.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Common values for the ChaCha20 algorithm
- */
-
-#ifndef _CRYPTO_CHACHA20_H
-#define _CRYPTO_CHACHA20_H
-
-#include <crypto/skcipher.h>
-#include <linux/types.h>
-#include <linux/crypto.h>
-
-#define CHACHA20_IV_SIZE 16
-#define CHACHA20_KEY_SIZE 32
-#define CHACHA20_BLOCK_SIZE 64
-
-struct chacha20_ctx {
- u32 key[8];
-};
-
-void chacha20_block(u32 *state, u8 *stream);
-void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
-int crypto_chacha20_crypt(struct skcipher_request *req);
-
-#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index bc7796600338..3b31c1b349ae 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -412,32 +412,6 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
-static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
- else
- atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
-#endif
-}
-
-static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->hash_cnt);
- atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
- }
-#endif
-}
-
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -552,10 +526,14 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crypto_ahash_reqtfm(req)->update(req);
- crypto_stat_ahash_update(req, ret);
+ crypto_stats_ahash_update(nbytes, ret, alg);
return ret;
}
diff --git a/include/crypto/hash_info.h b/include/crypto/hash_info.h
index 56f217d41f12..91786b68dbdb 100644
--- a/include/crypto/hash_info.h
+++ b/include/crypto/hash_info.h
@@ -15,6 +15,7 @@
#include <crypto/sha.h>
#include <crypto/md5.h>
+#include <crypto/streebog.h>
#include <uapi/linux/hash_info.h>
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
index 8db299c25566..40623f4457df 100644
--- a/include/crypto/internal/cryptouser.h
+++ b/include/crypto/internal/cryptouser.h
@@ -3,6 +3,11 @@
struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
-int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb);
+#ifdef CONFIG_CRYPTO_STATS
int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
-int crypto_dump_reportstat_done(struct netlink_callback *cb);
+#else
+static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs)
+{
+ return -ENOTSUPP;
+}
+#endif
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index e42f7063f245..453e867b4bd9 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -70,8 +70,6 @@ struct skcipher_walk {
unsigned int alignmask;
};
-extern const struct crypto_type crypto_givcipher_type;
-
static inline struct crypto_instance *skcipher_crypto_instance(
struct skcipher_instance *inst)
{
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index f517ba6d3a27..1a97e1601422 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -268,42 +268,6 @@ struct kpp_secret {
unsigned short len;
};
-static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret)
- atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->setsecret_cnt);
-#endif
-}
-
-static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
- if (ret)
- atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
-#endif
-}
-
-static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
-
- if (ret)
- atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
-#endif
-}
-
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -323,10 +287,12 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->set_secret(tfm, buffer, len);
- crypto_stat_kpp_set_secret(tfm, ret);
+ crypto_stats_kpp_set_secret(calg, ret);
return ret;
}
@@ -347,10 +313,12 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->generate_public_key(req);
- crypto_stat_kpp_generate_public_key(req, ret);
+ crypto_stats_kpp_generate_public_key(calg, ret);
return ret;
}
@@ -368,10 +336,12 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ struct crypto_alg *calg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(calg);
ret = alg->compute_shared_secret(req);
- crypto_stat_kpp_compute_shared_secret(req, ret);
+ crypto_stats_kpp_compute_shared_secret(calg, ret);
return ret;
}
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
new file mode 100644
index 000000000000..53c04423c582
--- /dev/null
+++ b/include/crypto/nhpoly1305.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values and helper functions for the NHPoly1305 hash function.
+ */
+
+#ifndef _NHPOLY1305_H
+#define _NHPOLY1305_H
+
+#include <crypto/hash.h>
+#include <crypto/poly1305.h>
+
+/* NH parameterization: */
+
+/* Endianness: little */
+/* Word size: 32 bits (works well on NEON, SSE2, AVX2) */
+
+/* Stride: 2 words (optimal on ARM32 NEON; works okay on other CPUs too) */
+#define NH_PAIR_STRIDE 2
+#define NH_MESSAGE_UNIT (NH_PAIR_STRIDE * 2 * sizeof(u32))
+
+/* Num passes (Toeplitz iteration count): 4, to give ε = 2^{-128} */
+#define NH_NUM_PASSES 4
+#define NH_HASH_BYTES (NH_NUM_PASSES * sizeof(u64))
+
+/* Max message size: 1024 bytes (32x compression factor) */
+#define NH_NUM_STRIDES 64
+#define NH_MESSAGE_WORDS (NH_PAIR_STRIDE * 2 * NH_NUM_STRIDES)
+#define NH_MESSAGE_BYTES (NH_MESSAGE_WORDS * sizeof(u32))
+#define NH_KEY_WORDS (NH_MESSAGE_WORDS + \
+ NH_PAIR_STRIDE * 2 * (NH_NUM_PASSES - 1))
+#define NH_KEY_BYTES (NH_KEY_WORDS * sizeof(u32))
+
+#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
+
+struct nhpoly1305_key {
+ struct poly1305_key poly_key;
+ u32 nh_key[NH_KEY_WORDS];
+};
+
+struct nhpoly1305_state {
+
+ /* Running total of polynomial evaluation */
+ struct poly1305_state poly_state;
+
+ /* Partial block buffer */
+ u8 buffer[NH_MESSAGE_UNIT];
+ unsigned int buflen;
+
+ /*
+ * Number of bytes remaining until the current NH message reaches
+ * NH_MESSAGE_BYTES. When nonzero, 'nh_hash' holds the partial NH hash.
+ */
+ unsigned int nh_remaining;
+
+ __le64 nh_hash[NH_NUM_PASSES];
+};
+
+typedef void (*nh_t)(const u32 *key, const u8 *message, size_t message_len,
+ __le64 hash[NH_NUM_PASSES]);
+
+int crypto_nhpoly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen);
+
+int crypto_nhpoly1305_init(struct shash_desc *desc);
+int crypto_nhpoly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen);
+int crypto_nhpoly1305_update_helper(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen,
+ nh_t nh_fn);
+int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst);
+int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst,
+ nh_t nh_fn);
+
+#endif /* _NHPOLY1305_H */
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index f718a19da82f..34317ed2071e 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -13,13 +13,21 @@
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
+struct poly1305_key {
+ u32 r[5]; /* key, base 2^26 */
+};
+
+struct poly1305_state {
+ u32 h[5]; /* accumulator, base 2^26 */
+};
+
struct poly1305_desc_ctx {
/* key */
- u32 r[5];
+ struct poly1305_key r;
/* finalize key */
u32 s[4];
/* accumulator */
- u32 h[5];
+ struct poly1305_state h;
/* partial buffer */
u8 buf[POLY1305_BLOCK_SIZE];
/* bytes used in partial buffer */
@@ -30,6 +38,22 @@ struct poly1305_desc_ctx {
bool sset;
};
+/*
+ * Poly1305 core functions. These implement the ε-almost-∆-universal hash
+ * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
+ * ("s key") at the end. They also only support block-aligned inputs.
+ */
+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
+static inline void poly1305_core_init(struct poly1305_state *state)
+{
+ memset(state->h, 0, sizeof(state->h));
+}
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_key *key,
+ const void *src, unsigned int nblocks);
+void poly1305_core_emit(const struct poly1305_state *state, void *dst);
+
+/* Crypto API helper functions for the Poly1305 MAC */
int crypto_poly1305_init(struct shash_desc *desc);
unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen);
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index 6d258f5b68f1..022a1b896b47 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -122,29 +122,6 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
-static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
- else
- atomic_inc(&tfm->base.__crt_alg->seed_cnt);
-#endif
-}
-
-static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
- unsigned int dlen, int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
- } else {
- atomic_inc(&tfm->base.__crt_alg->generate_cnt);
- atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
- }
-#endif
-}
-
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -163,10 +140,12 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
+ struct crypto_alg *alg = tfm->base.__crt_alg;
int ret;
+ crypto_stats_get(alg);
ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
- crypto_stat_rng_generate(tfm, dlen, ret);
+ crypto_stats_rng_generate(alg, dlen, ret);
return ret;
}
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 925f547cdcfa..e555294ed77f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -39,19 +39,6 @@ struct skcipher_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
-/**
- * struct skcipher_givcrypt_request - Crypto request with IV generation
- * @seq: Sequence number for IV generation
- * @giv: Space for generated IV
- * @creq: The crypto request itself
- */
-struct skcipher_givcrypt_request {
- u64 seq;
- u8 *giv;
-
- struct ablkcipher_request creq;
-};
-
struct crypto_skcipher {
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen);
@@ -486,32 +473,6 @@ static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
return container_of(tfm, struct crypto_sync_skcipher, base);
}
-static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
- int ret, struct crypto_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&alg->cipher_err_cnt);
- } else {
- atomic_inc(&alg->encrypt_cnt);
- atomic64_add(req->cryptlen, &alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
- int ret, struct crypto_alg *alg)
-{
-#ifdef CONFIG_CRYPTO_STATS
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&alg->cipher_err_cnt);
- } else {
- atomic_inc(&alg->decrypt_cnt);
- atomic64_add(req->cryptlen, &alg->decrypt_tlen);
- }
-#endif
-}
-
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
@@ -526,13 +487,16 @@ static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->encrypt(req);
- crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+ crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
return ret;
}
@@ -550,13 +514,16 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_alg *alg = tfm->base.__crt_alg;
+ unsigned int cryptlen = req->cryptlen;
int ret;
+ crypto_stats_get(alg);
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = tfm->decrypt(req);
- crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+ crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
return ret;
}
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
new file mode 100644
index 000000000000..4af119f7e07b
--- /dev/null
+++ b/include/crypto/streebog.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-2-Clause */
+/*
+ * Copyright (c) 2013 Alexey Degtyarev <alexey@renatasystems.org>
+ * Copyright (c) 2018 Vitaly Chikunov <vt@altlinux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#ifndef _CRYPTO_STREEBOG_H_
+#define _CRYPTO_STREEBOG_H_
+
+#include <linux/types.h>
+
+#define STREEBOG256_DIGEST_SIZE 32
+#define STREEBOG512_DIGEST_SIZE 64
+#define STREEBOG_BLOCK_SIZE 64
+
+struct streebog_uint512 {
+ u64 qword[8];
+};
+
+struct streebog_state {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 hash;
+ struct streebog_uint512 h;
+ struct streebog_uint512 N;
+ struct streebog_uint512 Sigma;
+ size_t fillsize;
+};
+
+#endif /* !_CRYPTO_STREEBOG_H_ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ccb5aa8468e0..9c56412bb2cf 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -133,6 +133,7 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_ops *phy_ops;
const char *phy_name;
void *phy_data;
+ unsigned int phy_force_vendor;
/* Synopsys PHY support */
const struct dw_hdmi_mpll_config *mpll_cfg;
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index d9c6d549f971..48a671e782ca 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi_phy_ops {
unsigned int *lane_mbps);
};
+struct dw_mipi_dsi_host_ops {
+ int (*attach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+ int (*detach)(void *priv_data,
+ struct mipi_dsi_device *dsi);
+};
+
struct dw_mipi_dsi_plat_data {
void __iomem *base;
unsigned int max_data_lanes;
@@ -27,6 +34,7 @@ struct dw_mipi_dsi_plat_data {
const struct drm_display_mode *mode);
const struct dw_mipi_dsi_phy_ops *phy_ops;
+ const struct dw_mipi_dsi_host_ops *host_ops;
void *priv_data;
};
@@ -35,10 +43,8 @@ struct dw_mipi_dsi *dw_mipi_dsi_probe(struct platform_device *pdev,
const struct dw_mipi_dsi_plat_data
*plat_data);
void dw_mipi_dsi_remove(struct dw_mipi_dsi *dsi);
-struct dw_mipi_dsi *dw_mipi_dsi_bind(struct platform_device *pdev,
- struct drm_encoder *encoder,
- const struct dw_mipi_dsi_plat_data
- *plat_data);
+int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder);
void dw_mipi_dsi_unbind(struct dw_mipi_dsi *dsi);
+void dw_mipi_dsi_set_slave(struct dw_mipi_dsi *dsi, struct dw_mipi_dsi *slave);
#endif /* __DW_MIPI_DSI__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 05350424a4d3..bdb0d5548f39 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -68,7 +68,6 @@
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_global.h>
#include <drm/drm_hashtab.h>
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
@@ -110,4 +109,10 @@ static inline bool drm_can_sleep(void)
return true;
}
+#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
+#else
+#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
+#endif
+
#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 1e810e0b7664..f9b35834c45d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -265,7 +265,6 @@ struct __drm_private_objs_state {
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
- * @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
* @async_update: hint for asynchronous plane update
* @planes: pointer to array of structures with per-plane data
@@ -284,6 +283,15 @@ struct drm_atomic_state {
struct kref ref;
struct drm_device *dev;
+
+ /**
+ * @allow_modeset:
+ *
+ * Allow full modeset. This is used by the ATOMIC IOCTL handler to
+ * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should
+ * never consult this flag, instead looking at the output of
+ * drm_atomic_crtc_needs_modeset().
+ */
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
bool async_update : 1;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 657af7b39379..58214be3bf3d 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -31,6 +31,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_util.h>
struct drm_atomic_state;
@@ -126,6 +127,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
void drm_atomic_helper_shutdown(struct drm_device *dev);
+struct drm_atomic_state *
+drm_atomic_helper_duplicate_state(struct drm_device *dev,
+ struct drm_modeset_acquire_ctx *ctx);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx);
@@ -144,51 +148,10 @@ int drm_atomic_helper_page_flip_target(
uint32_t flags,
uint32_t target,
struct drm_modeset_acquire_ctx *ctx);
-struct drm_encoder *
-drm_atomic_helper_best_encoder(struct drm_connector *connector);
-
-/* default implementations for state handling */
-void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-struct drm_crtc_state *
-drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
-void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
-void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
- struct drm_crtc_state *state);
-
-void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
- struct drm_plane_state *state);
-void drm_atomic_helper_plane_reset(struct drm_plane *plane);
-void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-struct drm_plane_state *
-drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
-void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
-void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-
-void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
- struct drm_connector_state *conn_state);
-void drm_atomic_helper_connector_reset(struct drm_connector *connector);
-void
-__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
- struct drm_connector_state *state);
-struct drm_connector_state *
-drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
-struct drm_atomic_state *
-drm_atomic_helper_duplicate_state(struct drm_device *dev,
- struct drm_modeset_acquire_ctx *ctx);
-void
-__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
-void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
- struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size,
struct drm_modeset_acquire_ctx *ctx);
-void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
- struct drm_private_state *state);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
new file mode 100644
index 000000000000..66c92cbd8e16
--- /dev/null
+++ b/include/drm/drm_atomic_state_helper.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct drm_crtc_state;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_connector;
+struct drm_connector_state;
+struct drm_private_obj;
+struct drm_private_state;
+struct drm_modeset_acquire_ctx;
+struct drm_device;
+
+void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+struct drm_crtc_state *
+drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
+void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
+void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void drm_atomic_helper_plane_reset(struct drm_plane *plane);
+void __drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+struct drm_plane_state *
+drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
+void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
+void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+
+void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
+ struct drm_connector_state *conn_state);
+void drm_atomic_helper_connector_reset(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+struct drm_connector_state *
+drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
+void
+__drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state);
+void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 9ccad6b062f2..9be2181b3ed7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -508,6 +508,18 @@ struct drm_connector_state {
* drm_writeback_signal_completion()
*/
struct drm_writeback_job *writeback_job;
+
+ /**
+ * @max_requested_bpc: Connector property to limit the maximum bit
+ * depth of the pixels.
+ */
+ u8 max_requested_bpc;
+
+ /**
+ * @max_bpc: Connector max_bpc based on the requested max_bpc property
+ * and the connector bpc limitations obtained from edid.
+ */
+ u8 max_bpc;
};
/**
@@ -960,6 +972,17 @@ struct drm_connector {
struct drm_property *scaling_mode_property;
/**
+ * @vrr_capable_property: Optional property to help userspace
+ * query hardware support for variable refresh rate on a connector.
+ * connector. Drivers can add the property to a connector by
+ * calling drm_connector_attach_vrr_capable_property().
+ *
+ * This should be updated only by calling
+ * drm_connector_set_vrr_capable_property().
+ */
+ struct drm_property *vrr_capable_property;
+
+ /**
* @content_protection_property: DRM ENUM property for content
* protection. See drm_connector_attach_content_protection_property().
*/
@@ -973,6 +996,12 @@ struct drm_connector {
*/
struct drm_property_blob *path_blob_ptr;
+ /**
+ * @max_bpc_property: Default connector property for the max bpc to be
+ * driven out of the connector.
+ */
+ struct drm_property *max_bpc_property;
+
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@ -1133,6 +1162,7 @@ int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
int connector_type);
+void drm_connector_attach_edid_property(struct drm_connector *connector);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
int drm_connector_attach_encoder(struct drm_connector *connector,
@@ -1192,30 +1222,6 @@ static inline void drm_connector_put(struct drm_connector *connector)
}
/**
- * drm_connector_reference - acquire a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_get() and should not be
- * used by new code.
- */
-static inline void drm_connector_reference(struct drm_connector *connector)
-{
- drm_connector_get(connector);
-}
-
-/**
- * drm_connector_unreference - release a connector reference
- * @connector: DRM connector
- *
- * This is a compatibility alias for drm_connector_put() and should not be
- * used by new code.
- */
-static inline void drm_connector_unreference(struct drm_connector *connector)
-{
- drm_connector_put(connector);
-}
-
-/**
* drm_connector_is_unregistered - has the connector been unregistered from
* userspace?
* @connector: DRM connector
@@ -1250,6 +1256,8 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
+int drm_connector_attach_vrr_capable_property(
+ struct drm_connector *connector);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
@@ -1266,8 +1274,12 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
+void drm_connector_set_vrr_capable_property(
+ struct drm_connector *connector, bool capable);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
+int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
+ int min, int max);
/**
* struct drm_tile_group - Tile group metadata
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b21437bc95bf..39c3900aab3c 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -291,6 +291,15 @@ struct drm_crtc_state {
u32 pageflip_flags;
/**
+ * @vrr_enabled:
+ *
+ * Indicates if variable refresh rate should be enabled for the CRTC.
+ * Support for the requested vrr state will depend on driver and
+ * hardware capabiltiy - lacking support is not treated as failure.
+ */
+ bool vrr_enabled;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 6914633037a5..d65f034843ce 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -57,12 +57,6 @@ int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
void drm_helper_resume_force_mode(struct drm_device *dev);
-int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
- struct drm_framebuffer *old_fb);
-int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb);
-
/* drm_probe_helper.c */
int drm_helper_probe_single_connector_modes(struct drm_connector
*connector, uint32_t maxX,
diff --git a/include/drm/drm_damage_helper.h b/include/drm/drm_damage_helper.h
new file mode 100644
index 000000000000..4487660b26b8
--- /dev/null
+++ b/include/drm/drm_damage_helper.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Deepak Rawat <drawat@vmware.com>
+ *
+ **************************************************************************/
+
+#ifndef DRM_DAMAGE_HELPER_H_
+#define DRM_DAMAGE_HELPER_H_
+
+#include <drm/drm_atomic_helper.h>
+
+/**
+ * drm_atomic_for_each_plane_damage - Iterator macro for plane damage.
+ * @iter: The iterator to advance.
+ * @rect: Return a rectangle in fb coordinate clipped to plane src.
+ *
+ * Note that if the first call to iterator macro return false then no need to do
+ * plane update. Iterator will return full plane src when damage is not passed
+ * by user-space.
+ */
+#define drm_atomic_for_each_plane_damage(iter, rect) \
+ while (drm_atomic_helper_damage_iter_next(iter, rect))
+
+/**
+ * struct drm_atomic_helper_damage_iter - Closure structure for damage iterator.
+ *
+ * This structure tracks state needed to walk the list of plane damage clips.
+ */
+struct drm_atomic_helper_damage_iter {
+ /* private: Plane src in whole number. */
+ struct drm_rect plane_src;
+ /* private: Rectangles in plane damage blob. */
+ const struct drm_rect *clips;
+ /* private: Number of rectangles in plane damage blob. */
+ uint32_t num_clips;
+ /* private: Current clip iterator is advancing on. */
+ uint32_t curr_clip;
+ /* private: Whether need full plane update. */
+ bool full_update;
+};
+
+void drm_plane_enable_fb_damage_clips(struct drm_plane *plane);
+void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state,
+ struct drm_plane_state *plane_state);
+int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned int flags,
+ unsigned int color, struct drm_clip_rect *clips,
+ unsigned int num_clips);
+void
+drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter,
+ const struct drm_plane_state *old_state,
+ const struct drm_plane_state *new_state);
+bool
+drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter,
+ struct drm_rect *rect);
+
+/**
+ * drm_helper_get_plane_damage_clips - Returns damage clips in &drm_rect.
+ * @state: Plane state.
+ *
+ * Returns plane damage rectangles in internal &drm_rect. Currently &drm_rect
+ * can be obtained by simply typecasting &drm_mode_rect. This is because both
+ * are signed 32 and during drm_atomic_check_only() it is verified that damage
+ * clips are inside fb.
+ *
+ * Return: Clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_rect *
+drm_helper_get_plane_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_rect *)drm_plane_get_damage_clips(state);
+}
+
+#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 2a3843f248cf..5736c942c85b 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -231,6 +231,8 @@
#define DP_DSC_MAX_BITS_PER_PIXEL_LOW 0x067 /* eDP 1.4 */
#define DP_DSC_MAX_BITS_PER_PIXEL_HI 0x068 /* eDP 1.4 */
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK (0x3 << 0)
+# define DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT 8
#define DP_DSC_DEC_COLOR_FORMAT_CAP 0x069
# define DP_DSC_RGB (1 << 0)
@@ -279,6 +281,8 @@
# define DP_DSC_THROUGHPUT_MODE_1_1000 (14 << 4)
#define DP_DSC_MAX_SLICE_WIDTH 0x06C
+#define DP_DSC_MIN_SLICE_WIDTH_VALUE 2560
+#define DP_DSC_SLICE_WIDTH_MULTIPLIER 320
#define DP_DSC_SLICE_CAP_2 0x06D
# define DP_DSC_16_PER_DP_DSC_SINK (1 << 0)
@@ -477,6 +481,7 @@
# define DP_AUX_FRAME_SYNC_VALID (1 << 0)
#define DP_DSC_ENABLE 0x160 /* DP 1.4 */
+# define DP_DECOMPRESSION_EN (1 << 0)
#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */
# define DP_PSR_ENABLE (1 << 0)
@@ -685,6 +690,8 @@
# define DP_EDP_12 0x01
# define DP_EDP_13 0x02
# define DP_EDP_14 0x03
+# define DP_EDP_14a 0x04 /* eDP 1.4a */
+# define DP_EDP_14b 0x05 /* eDP 1.4b */
#define DP_EDP_GENERAL_CAP_1 0x701
# define DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP (1 << 0)
@@ -905,6 +912,57 @@
#define DP_AUX_HDCP_KSV_FIFO 0x6802C
#define DP_AUX_HDCP_AINFO 0x6803B
+/* DP HDCP2.2 parameter offsets in DPCD address space */
+#define DP_HDCP_2_2_REG_RTX_OFFSET 0x69000
+#define DP_HDCP_2_2_REG_TXCAPS_OFFSET 0x69008
+#define DP_HDCP_2_2_REG_CERT_RX_OFFSET 0x6900B
+#define DP_HDCP_2_2_REG_RRX_OFFSET 0x69215
+#define DP_HDCP_2_2_REG_RX_CAPS_OFFSET 0x6921D
+#define DP_HDCP_2_2_REG_EKPUB_KM_OFFSET 0x69220
+#define DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET 0x692A0
+#define DP_HDCP_2_2_REG_M_OFFSET 0x692B0
+#define DP_HDCP_2_2_REG_HPRIME_OFFSET 0x692C0
+#define DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET 0x692E0
+#define DP_HDCP_2_2_REG_RN_OFFSET 0x692F0
+#define DP_HDCP_2_2_REG_LPRIME_OFFSET 0x692F8
+#define DP_HDCP_2_2_REG_EDKEY_KS_OFFSET 0x69318
+#define DP_HDCP_2_2_REG_RIV_OFFSET 0x69328
+#define DP_HDCP_2_2_REG_RXINFO_OFFSET 0x69330
+#define DP_HDCP_2_2_REG_SEQ_NUM_V_OFFSET 0x69332
+#define DP_HDCP_2_2_REG_VPRIME_OFFSET 0x69335
+#define DP_HDCP_2_2_REG_RECV_ID_LIST_OFFSET 0x69345
+#define DP_HDCP_2_2_REG_V_OFFSET 0x693E0
+#define DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET 0x693F0
+#define DP_HDCP_2_2_REG_K_OFFSET 0x693F3
+#define DP_HDCP_2_2_REG_STREAM_ID_TYPE_OFFSET 0x693F5
+#define DP_HDCP_2_2_REG_MPRIME_OFFSET 0x69473
+#define DP_HDCP_2_2_REG_RXSTATUS_OFFSET 0x69493
+#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
+#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+
+/* DP HDCP message start offsets in DPCD address space */
+#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
+#define DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKPUB_KM_OFFSET
+#define DP_HDCP_2_2_AKE_STORED_KM_OFFSET DP_HDCP_2_2_REG_EKH_KM_WR_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET DP_HDCP_2_2_REG_HPRIME_OFFSET
+#define DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET \
+ DP_HDCP_2_2_REG_EKH_KM_RD_OFFSET
+#define DP_HDCP_2_2_LC_INIT_OFFSET DP_HDCP_2_2_REG_RN_OFFSET
+#define DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET DP_HDCP_2_2_REG_LPRIME_OFFSET
+#define DP_HDCP_2_2_SKE_SEND_EKS_OFFSET DP_HDCP_2_2_REG_EDKEY_KS_OFFSET
+#define DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET DP_HDCP_2_2_REG_RXINFO_OFFSET
+#define DP_HDCP_2_2_REP_SEND_ACK_OFFSET DP_HDCP_2_2_REG_V_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET DP_HDCP_2_2_REG_SEQ_NUM_M_OFFSET
+#define DP_HDCP_2_2_REP_STREAM_READY_OFFSET DP_HDCP_2_2_REG_MPRIME_OFFSET
+
+#define HDCP_2_2_DP_RXSTATUS_LEN 1
+#define HDCP_2_2_DP_RXSTATUS_READY(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_RXSTATUS_H_PRIME(x) ((x) & BIT(1))
+#define HDCP_2_2_DP_RXSTATUS_PAIRING(x) ((x) & BIT(2))
+#define HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+#define HDCP_2_2_DP_RXSTATUS_LINK_FAILED(x) ((x) & BIT(4))
+
/* DP 1.2 Sideband message defines */
/* peer device type - DP 1.2a Table 2-92 */
#define DP_PEER_DEVICE_NONE 0x0
@@ -963,6 +1021,7 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_DSC_RECEIVER_CAP_SIZE 0xf
#define EDP_PSR_RECEIVER_CAP_SIZE 2
#define EDP_DISPLAY_CTL_CAP_SIZE 3
@@ -993,6 +1052,7 @@ struct dp_sdp_header {
#define EDP_SDP_HEADER_REVISION_MASK 0x1F
#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+#define DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 0x7F
struct edp_vsc_psr {
struct dp_sdp_header sdp_header;
@@ -1059,6 +1119,44 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
return dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT;
}
+/* DP/eDP DSC support */
+u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
+u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpc[DP_DSC_RECEIVER_CAP_SIZE],
+ u8 dsc_bpc[3]);
+
+static inline bool
+drm_dp_sink_supports_dsc(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_SUPPORT - DP_DSC_SUPPORT] &
+ DP_DSC_DECOMPRESSION_IS_SUPPORTED;
+}
+
+static inline u16
+drm_edp_dsc_sink_output_bpp(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ return dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_LOW - DP_DSC_SUPPORT] |
+ (dsc_dpcd[DP_DSC_MAX_BITS_PER_PIXEL_HI - DP_DSC_SUPPORT] &
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_MASK <<
+ DP_DSC_MAX_BITS_PER_PIXEL_HI_SHIFT);
+}
+
+static inline u32
+drm_dp_dsc_sink_max_slice_width(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+{
+ /* Max Slicewidth = Number of Pixels * 320 */
+ return dsc_dpcd[DP_DSC_MAX_SLICE_WIDTH - DP_DSC_SUPPORT] *
+ DP_DSC_SLICE_WIDTH_MULTIPLIER;
+}
+
+/* Forward Error Correction Support on DP 1.4 */
+static inline bool
+drm_dp_sink_supports_fec(const u8 fec_capable)
+{
+ return fec_capable & DP_FEC_CAPABLE;
+}
+
/*
* DisplayPort AUX channel
*/
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26a0766..59f005b419cf 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -409,7 +409,6 @@ struct drm_dp_payload {
struct drm_dp_mst_topology_state {
struct drm_private_state base;
int avail_slots;
- struct drm_atomic_state *state;
struct drm_dp_mst_topology_mgr *mgr;
};
@@ -498,11 +497,6 @@ struct drm_dp_mst_topology_mgr {
int pbn_div;
/**
- * @state: State information for topology manager
- */
- struct drm_dp_mst_topology_state *state;
-
- /**
* @funcs: Atomic helper callbacks
*/
const struct drm_private_state_funcs *funcs;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 3199ef70c007..35af23f5fa0d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -471,6 +471,8 @@ struct drm_driver {
* @gem_prime_export:
*
* export GEM -> dmabuf
+ *
+ * This defaults to drm_gem_prime_export() if not set.
*/
struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
struct drm_gem_object *obj, int flags);
@@ -478,6 +480,8 @@ struct drm_driver {
* @gem_prime_import:
*
* import dmabuf -> GEM
+ *
+ * This defaults to drm_gem_prime_import() if not set.
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -523,8 +527,10 @@ struct drm_driver {
* @dumb_map_offset:
*
* Allocate an offset in the drm device node's address space to be able to
- * memory map a dumb buffer. GEM-based drivers must use
- * drm_gem_create_mmap_offset() to implement this.
+ * memory map a dumb buffer.
+ *
+ * The default implementation is drm_gem_create_mmap_offset(). GEM based
+ * drivers must not overwrite this.
*
* Called by the user via ioctl.
*
@@ -544,6 +550,9 @@ struct drm_driver {
*
* Called by the user via ioctl.
*
+ * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
+ * must not overwrite this.
+ *
* Returns:
*
* Zero on success, negative errno on failure.
@@ -621,7 +630,6 @@ void drm_dev_unregister(struct drm_device *dev);
void drm_dev_get(struct drm_device *dev);
void drm_dev_put(struct drm_device *dev);
-void drm_dev_unref(struct drm_device *dev);
void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
new file mode 100644
index 000000000000..d03f1b83421a
--- /dev/null
+++ b/include/drm/drm_dsc.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright (C) 2018 Intel Corp.
+ *
+ * Authors:
+ * Manasi Navare <manasi.d.navare@intel.com>
+ */
+
+#ifndef DRM_DSC_H_
+#define DRM_DSC_H_
+
+#include <drm/drm_dp_helper.h>
+
+/* VESA Display Stream Compression DSC 1.2 constants */
+#define DSC_NUM_BUF_RANGES 15
+#define DSC_MUX_WORD_SIZE_8_10_BPC 48
+#define DSC_MUX_WORD_SIZE_12_BPC 64
+#define DSC_RC_PIXELS_PER_GROUP 3
+#define DSC_SCALE_DECREMENT_INTERVAL_MAX 4095
+#define DSC_RANGE_BPG_OFFSET_MASK 0x3f
+
+/* DSC Rate Control Constants */
+#define DSC_RC_MODEL_SIZE_CONST 8192
+#define DSC_RC_EDGE_FACTOR_CONST 6
+#define DSC_RC_TGT_OFFSET_HI_CONST 3
+#define DSC_RC_TGT_OFFSET_LO_CONST 3
+
+/* DSC PPS constants and macros */
+#define DSC_PPS_VERSION_MAJOR_SHIFT 4
+#define DSC_PPS_BPC_SHIFT 4
+#define DSC_PPS_MSB_SHIFT 8
+#define DSC_PPS_LSB_MASK (0xFF << 0)
+#define DSC_PPS_BPP_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_VBR_EN_SHIFT 2
+#define DSC_PPS_SIMPLE422_SHIFT 3
+#define DSC_PPS_CONVERT_RGB_SHIFT 4
+#define DSC_PPS_BLOCK_PRED_EN_SHIFT 5
+#define DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK (0x3 << 8)
+#define DSC_PPS_SCALE_DEC_INT_HIGH_MASK (0xF << 8)
+#define DSC_PPS_RC_TGT_OFFSET_HI_SHIFT 4
+#define DSC_PPS_RC_RANGE_MINQP_SHIFT 11
+#define DSC_PPS_RC_RANGE_MAXQP_SHIFT 6
+#define DSC_PPS_NATIVE_420_SHIFT 1
+#define DSC_1_2_MAX_LINEBUF_DEPTH_BITS 16
+#define DSC_1_2_MAX_LINEBUF_DEPTH_VAL 0
+#define DSC_1_1_MAX_LINEBUF_DEPTH_BITS 13
+
+/* Configuration for a single Rate Control model range */
+struct drm_dsc_rc_range_parameters {
+ /* Min Quantization Parameters allowed for this range */
+ u8 range_min_qp;
+ /* Max Quantization Parameters allowed for this range */
+ u8 range_max_qp;
+ /* Bits/group offset to apply to target for this group */
+ u8 range_bpg_offset;
+};
+
+struct drm_dsc_config {
+ /* Bits / component for previous reconstructed line buffer */
+ u8 line_buf_depth;
+ /* Bits per component to code (must be 8, 10, or 12) */
+ u8 bits_per_component;
+ /*
+ * Flag indicating to do RGB - YCoCg conversion
+ * and back (should be 1 for RGB input)
+ */
+ bool convert_rgb;
+ u8 slice_count;
+ /* Slice Width */
+ u16 slice_width;
+ /* Slice Height */
+ u16 slice_height;
+ /*
+ * 4:2:2 enable mode (from PPS, 4:2:2 conversion happens
+ * outside of DSC encode/decode algorithm)
+ */
+ bool enable422;
+ /* Picture Width */
+ u16 pic_width;
+ /* Picture Height */
+ u16 pic_height;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_high;
+ /* Offset to bits/group used by RC to determine QP adjustment */
+ u8 rc_tgt_offset_low;
+ /* Bits/pixel target << 4 (ie., 4 fractional bits) */
+ u16 bits_per_pixel;
+ /*
+ * Factor to determine if an edge is present based
+ * on the bits produced
+ */
+ u8 rc_edge_factor;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit1;
+ /* Slow down incrementing once the range reaches this value */
+ u8 rc_quant_incr_limit0;
+ /* Number of pixels to delay the initial transmission */
+ u16 initial_xmit_delay;
+ /* Number of pixels to delay the VLD on the decoder,not including SSM */
+ u16 initial_dec_delay;
+ /* Block prediction enable */
+ bool block_pred_enable;
+ /* Bits/group offset to use for first line of the slice */
+ u8 first_line_bpg_offset;
+ /* Value to use for RC model offset at slice start */
+ u16 initial_offset;
+ /* Thresholds defining each of the buffer ranges */
+ u16 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /* Parameters for each of the RC ranges */
+ struct drm_dsc_rc_range_parameters rc_range_params[DSC_NUM_BUF_RANGES];
+ /* Total size of RC model */
+ u16 rc_model_size;
+ /* Minimum QP where flatness information is sent */
+ u8 flatness_min_qp;
+ /* Maximum QP where flatness information is sent */
+ u8 flatness_max_qp;
+ /* Initial value for scale factor */
+ u8 initial_scale_value;
+ /* Decrement scale factor every scale_decrement_interval groups */
+ u16 scale_decrement_interval;
+ /* Increment scale factor every scale_increment_interval groups */
+ u16 scale_increment_interval;
+ /* Non-first line BPG offset to use */
+ u16 nfl_bpg_offset;
+ /* BPG offset used to enforce slice bit */
+ u16 slice_bpg_offset;
+ /* Final RC linear transformation offset value */
+ u16 final_offset;
+ /* Enable on-off VBR (ie., disable stuffing bits) */
+ bool vbr_enable;
+ /* Mux word size (in bits) for SSM mode */
+ u8 mux_word_size;
+ /*
+ * The (max) size in bytes of the "chunks" that are
+ * used in slice multiplexing
+ */
+ u16 slice_chunk_size;
+ /* Rate Control buffer siz in bits */
+ u16 rc_bits;
+ /* DSC Minor Version */
+ u8 dsc_version_minor;
+ /* DSC Major version */
+ u8 dsc_version_major;
+ /* Native 4:2:2 support */
+ bool native_422;
+ /* Native 4:2:0 support */
+ bool native_420;
+ /* Additional bits/grp for seconnd line of slice for native 4:2:0 */
+ u8 second_line_bpg_offset;
+ /* Num of bits deallocated for each grp that is not in second line of slice */
+ u16 nsl_bpg_offset;
+ /* Offset adj fr second line in Native 4:2:0 mode */
+ u16 second_line_offset_adj;
+};
+
+/**
+ * struct picture_parameter_set - Represents 128 bytes of Picture Parameter Set
+ *
+ * The VESA DSC standard defines picture parameter set (PPS) which display
+ * stream compression encoders must communicate to decoders.
+ * The PPS is encapsulated in 128 bytes (PPS 0 through PPS 127). The fields in
+ * this structure are as per Table 4.1 in Vesa DSC specification v1.1/v1.2.
+ * The PPS fields that span over more than a byte should be stored in Big Endian
+ * format.
+ */
+struct drm_dsc_picture_parameter_set {
+ /**
+ * @dsc_version:
+ * PPS0[3:0] - dsc_version_minor: Contains Minor version of DSC
+ * PPS0[7:4] - dsc_version_major: Contains major version of DSC
+ */
+ u8 dsc_version;
+ /**
+ * @pps_identifier:
+ * PPS1[7:0] - Application specific identifier that can be
+ * used to differentiate between different PPS tables.
+ */
+ u8 pps_identifier;
+ /**
+ * @pps_reserved:
+ * PPS2[7:0]- RESERVED Byte
+ */
+ u8 pps_reserved;
+ /**
+ * @pps_3:
+ * PPS3[3:0] - linebuf_depth: Contains linebuffer bit depth used to
+ * generate the bitstream. (0x0 - 16 bits for DSC 1.2, 0x8 - 8 bits,
+ * 0xA - 10 bits, 0xB - 11 bits, 0xC - 12 bits, 0xD - 13 bits,
+ * 0xE - 14 bits for DSC1.2, 0xF - 14 bits for DSC 1.2.
+ * PPS3[7:4] - bits_per_component: Bits per component for the original
+ * pixels of the encoded picture.
+ * 0x0 = 16bpc (allowed only when dsc_version_minor = 0x2)
+ * 0x8 = 8bpc, 0xA = 10bpc, 0xC = 12bpc, 0xE = 14bpc (also
+ * allowed only when dsc_minor_version = 0x2)
+ */
+ u8 pps_3;
+ /**
+ * @pps_4:
+ * PPS4[1:0] -These are the most significant 2 bits of
+ * compressed BPP bits_per_pixel[9:0] syntax element.
+ * PPS4[2] - vbr_enable: 0 = VBR disabled, 1 = VBR enabled
+ * PPS4[3] - simple_422: Indicates if decoder drops samples to
+ * reconstruct the 4:2:2 picture.
+ * PPS4[4] - Convert_rgb: Indicates if DSC color space conversion is
+ * active.
+ * PPS4[5] - blobk_pred_enable: Indicates if BP is used to code any
+ * groups in picture
+ * PPS4[7:6] - Reseved bits
+ */
+ u8 pps_4;
+ /**
+ * @bits_per_pixel_low:
+ * PPS5[7:0] - This indicates the lower significant 8 bits of
+ * the compressed BPP bits_per_pixel[9:0] element.
+ */
+ u8 bits_per_pixel_low;
+ /**
+ * @pic_height:
+ * PPS6[7:0], PPS7[7:0] -pic_height: Specifies the number of pixel rows
+ * within the raster.
+ */
+ __be16 pic_height;
+ /**
+ * @pic_width:
+ * PPS8[7:0], PPS9[7:0] - pic_width: Number of pixel columns within
+ * the raster.
+ */
+ __be16 pic_width;
+ /**
+ * @slice_height:
+ * PPS10[7:0], PPS11[7:0] - Slice height in units of pixels.
+ */
+ __be16 slice_height;
+ /**
+ * @slice_width:
+ * PPS12[7:0], PPS13[7:0] - Slice width in terms of pixels.
+ */
+ __be16 slice_width;
+ /**
+ * @chunk_size:
+ * PPS14[7:0], PPS15[7:0] - Size in units of bytes of the chunks
+ * that are used for slice multiplexing.
+ */
+ __be16 chunk_size;
+ /**
+ * @initial_xmit_delay_high:
+ * PPS16[1:0] - Most Significant two bits of initial transmission delay.
+ * It specifies the number of pixel times that the encoder waits before
+ * transmitting data from its rate buffer.
+ * PPS16[7:2] - Reserved
+ */
+ u8 initial_xmit_delay_high;
+ /**
+ * @initial_xmit_delay_low:
+ * PPS17[7:0] - Least significant 8 bits of initial transmission delay.
+ */
+ u8 initial_xmit_delay_low;
+ /**
+ * @initial_dec_delay:
+ *
+ * PPS18[7:0], PPS19[7:0] - Initial decoding delay which is the number
+ * of pixel times that the decoder accumulates data in its rate buffer
+ * before starting to decode and output pixels.
+ */
+ __be16 initial_dec_delay;
+ /**
+ * @pps20_reserved:
+ *
+ * PPS20[7:0] - Reserved
+ */
+ u8 pps20_reserved;
+ /**
+ * @initial_scale_value:
+ * PPS21[5:0] - Initial rcXformScale factor used at beginning
+ * of a slice.
+ * PPS21[7:6] - Reserved
+ */
+ u8 initial_scale_value;
+ /**
+ * @scale_increment_interval:
+ * PPS22[7:0], PPS23[7:0] - Number of group times between incrementing
+ * the rcXformScale factor at end of a slice.
+ */
+ __be16 scale_increment_interval;
+ /**
+ * @scale_decrement_interval_high:
+ * PPS24[3:0] - Higher 4 bits indicating number of group times between
+ * decrementing the rcXformScale factor at beginning of a slice.
+ * PPS24[7:4] - Reserved
+ */
+ u8 scale_decrement_interval_high;
+ /**
+ * @scale_decrement_interval_low:
+ * PPS25[7:0] - Lower 8 bits of scale decrement interval
+ */
+ u8 scale_decrement_interval_low;
+ /**
+ * @pps26_reserved:
+ * PPS26[7:0]
+ */
+ u8 pps26_reserved;
+ /**
+ * @first_line_bpg_offset:
+ * PPS27[4:0] - Number of additional bits that are allocated
+ * for each group on first line of a slice.
+ * PPS27[7:5] - Reserved
+ */
+ u8 first_line_bpg_offset;
+ /**
+ * @nfl_bpg_offset:
+ * PPS28[7:0], PPS29[7:0] - Number of bits including frac bits
+ * deallocated for each group for groups after the first line of slice.
+ */
+ __be16 nfl_bpg_offset;
+ /**
+ * @slice_bpg_offset:
+ * PPS30, PPS31[7:0] - Number of bits that are deallocated for each
+ * group to enforce the slice constraint.
+ */
+ __be16 slice_bpg_offset;
+ /**
+ * @initial_offset:
+ * PPS32,33[7:0] - Initial value for rcXformOffset
+ */
+ __be16 initial_offset;
+ /**
+ * @final_offset:
+ * PPS34,35[7:0] - Maximum end-of-slice value for rcXformOffset
+ */
+ __be16 final_offset;
+ /**
+ * @flatness_min_qp:
+ * PPS36[4:0] - Minimum QP at which flatness is signaled and
+ * flatness QP adjustment is made.
+ * PPS36[7:5] - Reserved
+ */
+ u8 flatness_min_qp;
+ /**
+ * @flatness_max_qp:
+ * PPS37[4:0] - Max QP at which flatness is signalled and
+ * the flatness adjustment is made.
+ * PPS37[7:5] - Reserved
+ */
+ u8 flatness_max_qp;
+ /**
+ * @rc_model_size:
+ * PPS38,39[7:0] - Number of bits within RC Model.
+ */
+ __be16 rc_model_size;
+ /**
+ * @rc_edge_factor:
+ * PPS40[3:0] - Ratio of current activity vs, previous
+ * activity to determine presence of edge.
+ * PPS40[7:4] - Reserved
+ */
+ u8 rc_edge_factor;
+ /**
+ * @rc_quant_incr_limit0:
+ * PPS41[4:0] - QP threshold used in short term RC
+ * PPS41[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit0;
+ /**
+ * @rc_quant_incr_limit1:
+ * PPS42[4:0] - QP threshold used in short term RC
+ * PPS42[7:5] - Reserved
+ */
+ u8 rc_quant_incr_limit1;
+ /**
+ * @rc_tgt_offset:
+ * PPS43[3:0] - Lower end of the variability range around the target
+ * bits per group that is allowed by short term RC.
+ * PPS43[7:4]- Upper end of the variability range around the target
+ * bits per group that i allowed by short term rc.
+ */
+ u8 rc_tgt_offset;
+ /**
+ * @rc_buf_thresh:
+ * PPS44[7:0] - PPS57[7:0] - Specifies the thresholds in RC model for
+ * the 15 ranges defined by 14 thresholds.
+ */
+ u8 rc_buf_thresh[DSC_NUM_BUF_RANGES - 1];
+ /**
+ * @rc_range_parameters:
+ * PPS58[7:0] - PPS87[7:0]
+ * Parameters that correspond to each of the 15 ranges.
+ */
+ __be16 rc_range_parameters[DSC_NUM_BUF_RANGES];
+ /**
+ * @native_422_420:
+ * PPS88[0] - 0 = Native 4:2:2 not used
+ * 1 = Native 4:2:2 used
+ * PPS88[1] - 0 = Native 4:2:0 not use
+ * 1 = Native 4:2:0 used
+ * PPS88[7:2] - Reserved 6 bits
+ */
+ u8 native_422_420;
+ /**
+ * @second_line_bpg_offset:
+ * PPS89[4:0] - Additional bits/group budget for the
+ * second line of a slice in Native 4:2:0 mode.
+ * Set to 0 if DSC minor version is 1 or native420 is 0.
+ * PPS89[7:5] - Reserved
+ */
+ u8 second_line_bpg_offset;
+ /**
+ * @nsl_bpg_offset:
+ * PPS90[7:0], PPS91[7:0] - Number of bits that are deallocated
+ * for each group that is not in the second line of a slice.
+ */
+ __be16 nsl_bpg_offset;
+ /**
+ * @second_line_offset_adj:
+ * PPS92[7:0], PPS93[7:0] - Used as offset adjustment for the second
+ * line in Native 4:2:0 mode.
+ */
+ __be16 second_line_offset_adj;
+ /**
+ * @pps_long_94_reserved:
+ * PPS 94, 95, 96, 97 - Reserved
+ */
+ u32 pps_long_94_reserved;
+ /**
+ * @pps_long_98_reserved:
+ * PPS 98, 99, 100, 101 - Reserved
+ */
+ u32 pps_long_98_reserved;
+ /**
+ * @pps_long_102_reserved:
+ * PPS 102, 103, 104, 105 - Reserved
+ */
+ u32 pps_long_102_reserved;
+ /**
+ * @pps_long_106_reserved:
+ * PPS 106, 107, 108, 109 - reserved
+ */
+ u32 pps_long_106_reserved;
+ /**
+ * @pps_long_110_reserved:
+ * PPS 110, 111, 112, 113 - reserved
+ */
+ u32 pps_long_110_reserved;
+ /**
+ * @pps_long_114_reserved:
+ * PPS 114 - 117 - reserved
+ */
+ u32 pps_long_114_reserved;
+ /**
+ * @pps_long_118_reserved:
+ * PPS 118 - 121 - reserved
+ */
+ u32 pps_long_118_reserved;
+ /**
+ * @pps_long_122_reserved:
+ * PPS 122- 125 - reserved
+ */
+ u32 pps_long_122_reserved;
+ /**
+ * @pps_short_126_reserved:
+ * PPS 126, 127 - reserved
+ */
+ __be16 pps_short_126_reserved;
+} __packed;
+
+/**
+ * struct drm_dsc_pps_infoframe - DSC infoframe carrying the Picture Parameter
+ * Set Metadata
+ *
+ * This structure represents the DSC PPS infoframe required to send the Picture
+ * Parameter Set metadata required before enabling VESA Display Stream
+ * Compression. This is based on the DP Secondary Data Packet structure and
+ * comprises of SDP Header as defined in drm_dp_helper.h and PPS payload.
+ *
+ * @pps_header: Header for PPS as per DP SDP header format
+ * @pps_payload: PPS payload fields as per DSC specification Table 4-1
+ */
+struct drm_dsc_pps_infoframe {
+ struct dp_sdp_header pps_header;
+ struct drm_dsc_picture_parameter_set pps_payload;
+} __packed;
+
+void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
+void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+ const struct drm_dsc_config *dsc_cfg);
+
+#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a65f0d155b0..8dbbe1eece1b 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -26,8 +26,6 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma);
void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma);
-void drm_fbdev_cma_set_suspend_unlocked(struct drm_fbdev_cma *fbdev_cma,
- bool state);
struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 26485acc51d7..84ac79219e4c 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -164,14 +164,14 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned authenticated :1;
+ bool authenticated;
/**
* @stereo_allowed:
*
* True when the client has asked us to expose stereo 3D mode flags.
*/
- unsigned stereo_allowed :1;
+ bool stereo_allowed;
/**
* @universal_planes:
@@ -179,10 +179,10 @@ struct drm_file {
* True if client understands CRTC primary planes and cursor planes
* in the plane list. Automatically set when @atomic is set.
*/
- unsigned universal_planes:1;
+ bool universal_planes;
/** @atomic: True if client understands atomic properties. */
- unsigned atomic:1;
+ bool atomic;
/**
* @aspect_ratio_allowed:
@@ -190,14 +190,14 @@ struct drm_file {
* True, if client can handle picture aspect ratios, and has requested
* to pass this information along with the mode.
*/
- unsigned aspect_ratio_allowed:1;
+ bool aspect_ratio_allowed;
/**
* @writeback_connectors:
*
* True if client understands writeback connectors
*/
- unsigned writeback_connectors:1;
+ bool writeback_connectors;
/**
* @is_master:
@@ -208,7 +208,7 @@ struct drm_file {
* See also the :ref:`section on primary nodes and authentication
* <drm_primary_node>`.
*/
- unsigned is_master:1;
+ bool is_master;
/**
* @master:
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 865ef60c17af..bcb389f04618 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -52,25 +52,86 @@ struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
- * @format: 4CC format identifier (DRM_FORMAT_*)
- * @depth: Color depth (number of bits per pixel excluding padding bits),
- * valid for a subset of RGB formats only. This is a legacy field, do not
- * use in new code and set to 0 for new formats.
- * @num_planes: Number of color planes (1 to 3)
- * @cpp: Number of bytes per pixel (per plane)
- * @hsub: Horizontal chroma subsampling factor
- * @vsub: Vertical chroma subsampling factor
- * @has_alpha: Does the format embeds an alpha component?
- * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
+ /** @format: 4CC format identifier (DRM_FORMAT_*) */
u32 format;
+
+ /**
+ * @depth:
+ *
+ * Color depth (number of bits per pixel excluding padding bits),
+ * valid for a subset of RGB formats only. This is a legacy field, do
+ * not use in new code and set to 0 for new formats.
+ */
u8 depth;
+
+ /** @num_planes: Number of color planes (1 to 3) */
u8 num_planes;
- u8 cpp[3];
+
+ union {
+ /**
+ * @cpp:
+ *
+ * Number of bytes per pixel (per plane), this is aliased with
+ * @char_per_block. It is deprecated in favour of using the
+ * triplet @char_per_block, @block_w, @block_h for better
+ * describing the pixel format.
+ */
+ u8 cpp[3];
+
+ /**
+ * @char_per_block:
+ *
+ * Number of bytes per block (per plane), where blocks are
+ * defined as a rectangle of pixels which are stored next to
+ * each other in a byte aligned memory region. Together with
+ * @block_w and @block_h this is used to properly describe tiles
+ * in tiled formats or to describe groups of pixels in packed
+ * formats for which the memory needed for a single pixel is not
+ * byte aligned.
+ *
+ * @cpp has been kept for historical reasons because there are
+ * a lot of places in drivers where it's used. In drm core for
+ * generic code paths the preferred way is to use
+ * @char_per_block, drm_format_info_block_width() and
+ * drm_format_info_block_height() which allows handling both
+ * block and non-block formats in the same way.
+ *
+ * For formats that are intended to be used only with non-linear
+ * modifiers both @cpp and @char_per_block must be 0 in the
+ * generic format table. Drivers could supply accurate
+ * information from their drm_mode_config.get_format_info hook
+ * if they want the core to be validating the pitch.
+ */
+ u8 char_per_block[3];
+ };
+
+ /**
+ * @block_w:
+ *
+ * Block width in pixels, this is intended to be accessed through
+ * drm_format_info_block_width()
+ */
+ u8 block_w[3];
+
+ /**
+ * @block_h:
+ *
+ * Block height in pixels, this is intended to be accessed through
+ * drm_format_info_block_height()
+ */
+ u8 block_h[3];
+
+ /** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
+ /** @vsub: Vertical chroma subsampling factor */
u8 vsub;
+
+ /** @has_alpha: Does the format embeds an alpha component? */
bool has_alpha;
+
+ /** @is_yuv: Is it a YUV format? */
bool is_yuv;
};
@@ -96,6 +157,12 @@ int drm_format_horz_chroma_subsampling(uint32_t format);
int drm_format_vert_chroma_subsampling(uint32_t format);
int drm_format_plane_width(int width, uint32_t format, int plane);
int drm_format_plane_height(int height, uint32_t format, int plane);
+unsigned int drm_format_info_block_width(const struct drm_format_info *info,
+ int plane);
+unsigned int drm_format_info_block_height(const struct drm_format_info *info,
+ int plane);
+uint64_t drm_format_info_min_pitch(const struct drm_format_info *info,
+ int plane, unsigned int buffer_width);
const char *drm_get_format_name(uint32_t format, struct drm_format_name_buf *buf);
#endif /* __DRM_FOURCC_H__ */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index c50502c656e5..c94acedfb08e 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -241,30 +241,6 @@ static inline void drm_framebuffer_put(struct drm_framebuffer *fb)
}
/**
- * drm_framebuffer_reference - acquire a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_get() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_get(fb);
-}
-
-/**
- * drm_framebuffer_unreference - release a framebuffer reference
- * @fb: DRM framebuffer
- *
- * This is a compatibility alias for drm_framebuffer_put() and should not be
- * used by new code.
- */
-static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
-{
- drm_framebuffer_put(fb);
-}
-
-/**
* drm_framebuffer_read_refcount - read the framebuffer reference count.
* @fb: framebuffer
*
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 3583b98a1718..c95727425284 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -38,6 +38,121 @@
#include <drm/drm_vma_manager.h>
+struct drm_gem_object;
+
+/**
+ * struct drm_gem_object_funcs - GEM object functions
+ */
+struct drm_gem_object_funcs {
+ /**
+ * @free:
+ *
+ * Deconstructor for drm_gem_objects.
+ *
+ * This callback is mandatory.
+ */
+ void (*free)(struct drm_gem_object *obj);
+
+ /**
+ * @open:
+ *
+ * Called upon GEM handle creation.
+ *
+ * This callback is optional.
+ */
+ int (*open)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @close:
+ *
+ * Called upon GEM handle release.
+ *
+ * This callback is optional.
+ */
+ void (*close)(struct drm_gem_object *obj, struct drm_file *file);
+
+ /**
+ * @print_info:
+ *
+ * If driver subclasses struct &drm_gem_object, it can implement this
+ * optional hook for printing additional driver specific info.
+ *
+ * drm_printf_indent() should be used in the callback passing it the
+ * indent argument.
+ *
+ * This callback is called from drm_gem_print_info().
+ *
+ * This callback is optional.
+ */
+ void (*print_info)(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
+
+ /**
+ * @export:
+ *
+ * Export backing buffer as a &dma_buf.
+ * If this is not set drm_gem_prime_export() is used.
+ *
+ * This callback is optional.
+ */
+ struct dma_buf *(*export)(struct drm_gem_object *obj, int flags);
+
+ /**
+ * @pin:
+ *
+ * Pin backing buffer in memory.
+ *
+ * This callback is optional.
+ */
+ int (*pin)(struct drm_gem_object *obj);
+
+ /**
+ * @unpin:
+ *
+ * Unpin backing buffer.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct drm_gem_object *obj);
+
+ /**
+ * @get_sg_table:
+ *
+ * Returns a Scatter-Gather table representation of the buffer.
+ * Used when exporting a buffer.
+ *
+ * This callback is mandatory if buffer export is supported.
+ */
+ struct sg_table *(*get_sg_table)(struct drm_gem_object *obj);
+
+ /**
+ * @vmap:
+ *
+ * Returns a virtual address for the buffer.
+ *
+ * This callback is optional.
+ */
+ void *(*vmap)(struct drm_gem_object *obj);
+
+ /**
+ * @vunmap:
+ *
+ * Releases the the address previously returned by @vmap.
+ *
+ * This callback is optional.
+ */
+ void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
+
+ /**
+ * @vm_ops:
+ *
+ * Virtual memory operations used with mmap.
+ *
+ * This is optional but necessary for mmap support.
+ */
+ const struct vm_operations_struct *vm_ops;
+};
+
/**
* struct drm_gem_object - GEM buffer object
*
@@ -146,6 +261,17 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
+
+ /**
+ * @funcs:
+ *
+ * Optional GEM object functions. If this is set, it will be used instead of the
+ * corresponding &drm_driver GEM callbacks.
+ *
+ * New drivers should use this.
+ *
+ */
+ const struct drm_gem_object_funcs *funcs;
};
/**
@@ -222,56 +348,6 @@ __drm_gem_object_put(struct drm_gem_object *obj)
void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
void drm_gem_object_put(struct drm_gem_object *obj);
-/**
- * drm_gem_object_reference - acquire a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_get() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_reference(struct drm_gem_object *obj)
-{
- drm_gem_object_get(obj);
-}
-
-/**
- * __drm_gem_object_unreference - raw function to release a GEM buffer object
- * reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for __drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- __drm_gem_object_put(obj);
-}
-
-/**
- * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put_unlocked() and should
- * not be used by new code.
- */
-static inline void
-drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
-{
- drm_gem_object_put_unlocked(obj);
-}
-
-/**
- * drm_gem_object_unreference - release a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This is a compatibility alias for drm_gem_object_put() and should not be
- * used by new code.
- */
-static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
-{
- drm_gem_object_put(obj);
-}
-
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -293,4 +369,9 @@ int drm_gem_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
+int drm_gem_pin(struct drm_gem_object *obj);
+void drm_gem_unpin(struct drm_gem_object *obj);
+void *drm_gem_vmap(struct drm_gem_object *obj);
+void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 19777145cf8e..07c504940ba1 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -103,4 +103,28 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj);
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+struct drm_gem_object *
+drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size);
+
+/**
+ * DRM_GEM_CMA_VMAP_DRIVER_OPS - CMA GEM driver operations ensuring a virtual
+ * address on the buffer
+ *
+ * This macro provides a shortcut for setting the default GEM operations in the
+ * &drm_driver structure for drivers that need the virtual address also on
+ * imported buffers.
+ */
+#define DRM_GEM_CMA_VMAP_DRIVER_OPS \
+ .gem_create_object = drm_cma_gem_create_object_default_funcs, \
+ .dumb_create = drm_gem_cma_dumb_create, \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table_vmap, \
+ .gem_prime_mmap = drm_gem_prime_mmap
+
+struct drm_gem_object *
+drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *drm,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
deleted file mode 100644
index 3a830602a2e4..000000000000
--- a/include/drm/drm_global.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#ifndef _DRM_GLOBAL_H_
-#define _DRM_GLOBAL_H_
-enum drm_global_types {
- DRM_GLOBAL_TTM_MEM = 0,
- DRM_GLOBAL_TTM_BO,
- DRM_GLOBAL_TTM_OBJECT,
- DRM_GLOBAL_NUM
-};
-
-struct drm_global_reference {
- enum drm_global_types global_type;
- size_t size;
- void *object;
- int (*init) (struct drm_global_reference *);
- void (*release) (struct drm_global_reference *);
-};
-
-void drm_global_init(void);
-void drm_global_release(void);
-int drm_global_item_ref(struct drm_global_reference *ref);
-void drm_global_item_unref(struct drm_global_reference *ref);
-
-#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 98e63d870139..a6de09c5e47f 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -38,4 +38,216 @@
#define DRM_HDCP_DDC_BSTATUS 0x41
#define DRM_HDCP_DDC_KSV_FIFO 0x43
+#define DRM_HDCP_1_4_SRM_ID 0x8
+#define DRM_HDCP_1_4_VRL_LENGTH_SIZE 3
+#define DRM_HDCP_1_4_DCP_SIG_SIZE 40
+
+/* Protocol message definition for HDCP2.2 specification */
+/*
+ * Protected content streams are classified into 2 types:
+ * - Type0: Can be transmitted with HDCP 1.4+
+ * - Type1: Can be transmitted with HDCP 2.2+
+ */
+#define HDCP_STREAM_TYPE0 0x00
+#define HDCP_STREAM_TYPE1 0x01
+
+/* HDCP2.2 Msg IDs */
+#define HDCP_2_2_NULL_MSG 1
+#define HDCP_2_2_AKE_INIT 2
+#define HDCP_2_2_AKE_SEND_CERT 3
+#define HDCP_2_2_AKE_NO_STORED_KM 4
+#define HDCP_2_2_AKE_STORED_KM 5
+#define HDCP_2_2_AKE_SEND_HPRIME 7
+#define HDCP_2_2_AKE_SEND_PAIRING_INFO 8
+#define HDCP_2_2_LC_INIT 9
+#define HDCP_2_2_LC_SEND_LPRIME 10
+#define HDCP_2_2_SKE_SEND_EKS 11
+#define HDCP_2_2_REP_SEND_RECVID_LIST 12
+#define HDCP_2_2_REP_SEND_ACK 15
+#define HDCP_2_2_REP_STREAM_MANAGE 16
+#define HDCP_2_2_REP_STREAM_READY 17
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+
+#define HDCP_2_2_RTX_LEN 8
+#define HDCP_2_2_RRX_LEN 8
+
+#define HDCP_2_2_K_PUB_RX_MOD_N_LEN 128
+#define HDCP_2_2_K_PUB_RX_EXP_E_LEN 3
+#define HDCP_2_2_K_PUB_RX_LEN (HDCP_2_2_K_PUB_RX_MOD_N_LEN + \
+ HDCP_2_2_K_PUB_RX_EXP_E_LEN)
+
+#define HDCP_2_2_DCP_LLC_SIG_LEN 384
+
+#define HDCP_2_2_E_KPUB_KM_LEN 128
+#define HDCP_2_2_E_KH_KM_M_LEN (16 + 16)
+#define HDCP_2_2_H_PRIME_LEN 32
+#define HDCP_2_2_E_KH_KM_LEN 16
+#define HDCP_2_2_RN_LEN 8
+#define HDCP_2_2_L_PRIME_LEN 32
+#define HDCP_2_2_E_DKEY_KS_LEN 16
+#define HDCP_2_2_RIV_LEN 8
+#define HDCP_2_2_SEQ_NUM_LEN 3
+#define HDCP_2_2_V_PRIME_HALF_LEN (HDCP_2_2_L_PRIME_LEN / 2)
+#define HDCP_2_2_RECEIVER_ID_LEN DRM_HDCP_KSV_LEN
+#define HDCP_2_2_MAX_DEVICE_COUNT 31
+#define HDCP_2_2_RECEIVER_IDS_MAX_LEN (HDCP_2_2_RECEIVER_ID_LEN * \
+ HDCP_2_2_MAX_DEVICE_COUNT)
+#define HDCP_2_2_MPRIME_LEN 32
+
+/* Following Macros take a byte at a time for bit(s) masking */
+/*
+ * TODO: This has to be changed for DP MST, as multiple stream on
+ * same port is possible.
+ * For HDCP2.2 on HDMI and DP SST this value is always 1.
+ */
+#define HDCP_2_2_MAX_CONTENT_STREAMS_CNT 1
+#define HDCP_2_2_TXCAP_MASK_LEN 2
+#define HDCP_2_2_RXCAPS_LEN 3
+#define HDCP_2_2_RX_REPEATER(x) ((x) & BIT(0))
+#define HDCP_2_2_DP_HDCP_CAPABLE(x) ((x) & BIT(1))
+#define HDCP_2_2_RXINFO_LEN 2
+
+/* HDCP1.x compliant device in downstream */
+#define HDCP_2_2_HDCP1_DEVICE_CONNECTED(x) ((x) & BIT(0))
+
+/* HDCP2.0 Compliant repeater in downstream */
+#define HDCP_2_2_HDCP_2_0_REP_CONNECTED(x) ((x) & BIT(1))
+#define HDCP_2_2_MAX_CASCADE_EXCEEDED(x) ((x) & BIT(2))
+#define HDCP_2_2_MAX_DEVS_EXCEEDED(x) ((x) & BIT(3))
+#define HDCP_2_2_DEV_COUNT_LO(x) (((x) & (0xF << 4)) >> 4)
+#define HDCP_2_2_DEV_COUNT_HI(x) ((x) & BIT(0))
+#define HDCP_2_2_DEPTH(x) (((x) & (0x7 << 1)) >> 1)
+
+struct hdcp2_cert_rx {
+ u8 receiver_id[HDCP_2_2_RECEIVER_ID_LEN];
+ u8 kpub_rx[HDCP_2_2_K_PUB_RX_LEN];
+ u8 reserved[2];
+ u8 dcp_signature[HDCP_2_2_DCP_LLC_SIG_LEN];
+} __packed;
+
+struct hdcp2_streamid_type {
+ u8 stream_id;
+ u8 stream_type;
+} __packed;
+
+/*
+ * The TxCaps field specified in the HDCP HDMI, DP specs
+ * This field is big endian as specified in the errata.
+ */
+struct hdcp2_tx_caps {
+ /* Transmitter must set this to 0x2 */
+ u8 version;
+
+ /* Reserved for HDCP and DP Spec. Read as Zero */
+ u8 tx_cap_mask[HDCP_2_2_TXCAP_MASK_LEN];
+} __packed;
+
+/* Main structures for HDCP2.2 protocol communication */
+struct hdcp2_ake_init {
+ u8 msg_id;
+ u8 r_tx[HDCP_2_2_RTX_LEN];
+ struct hdcp2_tx_caps tx_caps;
+} __packed;
+
+struct hdcp2_ake_send_cert {
+ u8 msg_id;
+ struct hdcp2_cert_rx cert_rx;
+ u8 r_rx[HDCP_2_2_RRX_LEN];
+ u8 rx_caps[HDCP_2_2_RXCAPS_LEN];
+} __packed;
+
+struct hdcp2_ake_no_stored_km {
+ u8 msg_id;
+ u8 e_kpub_km[HDCP_2_2_E_KPUB_KM_LEN];
+} __packed;
+
+struct hdcp2_ake_stored_km {
+ u8 msg_id;
+ u8 e_kh_km_m[HDCP_2_2_E_KH_KM_M_LEN];
+} __packed;
+
+struct hdcp2_ake_send_hprime {
+ u8 msg_id;
+ u8 h_prime[HDCP_2_2_H_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ake_send_pairing_info {
+ u8 msg_id;
+ u8 e_kh_km[HDCP_2_2_E_KH_KM_LEN];
+} __packed;
+
+struct hdcp2_lc_init {
+ u8 msg_id;
+ u8 r_n[HDCP_2_2_RN_LEN];
+} __packed;
+
+struct hdcp2_lc_send_lprime {
+ u8 msg_id;
+ u8 l_prime[HDCP_2_2_L_PRIME_LEN];
+} __packed;
+
+struct hdcp2_ske_send_eks {
+ u8 msg_id;
+ u8 e_dkey_ks[HDCP_2_2_E_DKEY_KS_LEN];
+ u8 riv[HDCP_2_2_RIV_LEN];
+} __packed;
+
+struct hdcp2_rep_send_receiverid_list {
+ u8 msg_id;
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u8 seq_num_v[HDCP_2_2_SEQ_NUM_LEN];
+ u8 v_prime[HDCP_2_2_V_PRIME_HALF_LEN];
+ u8 receiver_ids[HDCP_2_2_RECEIVER_IDS_MAX_LEN];
+} __packed;
+
+struct hdcp2_rep_send_ack {
+ u8 msg_id;
+ u8 v[HDCP_2_2_V_PRIME_HALF_LEN];
+} __packed;
+
+struct hdcp2_rep_stream_manage {
+ u8 msg_id;
+ u8 seq_num_m[HDCP_2_2_SEQ_NUM_LEN];
+ __be16 k;
+ struct hdcp2_streamid_type streams[HDCP_2_2_MAX_CONTENT_STREAMS_CNT];
+} __packed;
+
+struct hdcp2_rep_stream_ready {
+ u8 msg_id;
+ u8 m_prime[HDCP_2_2_MPRIME_LEN];
+} __packed;
+
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+/* HDCP2.2 TIMEOUTs in mSec */
+#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
+#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
+#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
+
+/* HDMI HDCP2.2 Register Offsets */
+#define HDCP_2_2_HDMI_REG_VER_OFFSET 0x50
+#define HDCP_2_2_HDMI_REG_WR_MSG_OFFSET 0x60
+#define HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET 0x70
+#define HDCP_2_2_HDMI_REG_RD_MSG_OFFSET 0x80
+#define HDCP_2_2_HDMI_REG_DBG_OFFSET 0xC0
+
+#define HDCP_2_2_HDMI_SUPPORT_MASK BIT(2)
+#define HDCP_2_2_RX_CAPS_VERSION_VAL 0x02
+#define HDCP_2_2_SEQ_NUM_MAX 0xFFFFFF
+#define HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN 200
+
+/* Below macros take a byte at a time and mask the bit(s) */
+#define HDCP_2_2_HDMI_RXSTATUS_LEN 2
+#define HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(x) ((x) & 0x3)
+#define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2))
+#define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3))
+
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef19064b0f..491528f48cfb 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -168,6 +168,12 @@ struct mipi_dsi_device_info {
* @format: pixel format for video mode
* @lanes: number of active data lanes
* @mode_flags: DSI operation mode related flags
+ * @hs_rate: maximum lane frequency for high speed mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
+ * @lp_rate: maximum lane frequency for low power mode in hertz, this should
+ * be set to the real limits of the hardware, zero is only accepted for
+ * legacy drivers
*/
struct mipi_dsi_device {
struct mipi_dsi_host *host;
@@ -178,6 +184,8 @@ struct mipi_dsi_device {
unsigned int lanes;
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ unsigned long hs_rate;
+ unsigned long lp_rate;
};
#define MIPI_DSI_MODULE_PREFIX "mipi-dsi:"
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 928e4172a0bb..572274ccbec7 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -52,6 +52,12 @@ struct drm_mode_config_funcs {
* requested metadata, but most of that is left to the driver. See
* &struct drm_mode_fb_cmd2 for details.
*
+ * To validate the pixel format and modifier drivers can use
+ * drm_any_plane_has_format() to make sure at least one plane supports
+ * the requested values. Note that the driver must first determine the
+ * actual modifier used if the request doesn't have it specified,
+ * ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -628,6 +634,15 @@ struct drm_mode_config {
*/
struct drm_property *prop_crtc_id;
/**
+ * @prop_fb_damage_clips: Optional plane property to mark damaged
+ * regions on the plane in framebuffer coordinates of the framebuffer
+ * attached to the plane.
+ *
+ * The layout of blob data is simply an array of &drm_mode_rect. Unlike
+ * plane src coordinates, damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property *prop_fb_damage_clips;
+ /**
* @prop_active: Default atomic CRTC property to control the active
* state, which is the simplified implementation for DPMS in atomic
* drivers.
@@ -639,6 +654,11 @@ struct drm_mode_config {
* connectors must be of and active must be set to disabled, too.
*/
struct drm_property *prop_mode_id;
+ /**
+ * @prop_vrr_enabled: Default atomic CRTC property to indicate
+ * whether variable refresh rate should be enabled on the CRTC.
+ */
+ struct drm_property *prop_vrr_enabled;
/**
* @dvi_i_subconnector_property: Optional DVI-I property to
@@ -809,6 +829,13 @@ struct drm_mode_config {
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
+
+ /**
+ * @quirk_addfb_prefer_xbgr_30bpp:
+ *
+ * Special hack for legacy ADDFB to keep nouveau userspace happy. Should
+ * only ever be set by the nouveau kernel driver.
+ */
bool quirk_addfb_prefer_xbgr_30bpp;
/**
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index a685d1bb21f2..a308f2d6496f 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -130,4 +130,63 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+/**
+ * DRM_MODESET_LOCK_ALL_BEGIN - Helper to acquire modeset locks
+ * @dev: drm device
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @flags: DRM_MODESET_ACQUIRE_* flags to pass to drm_modeset_acquire_init()
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * Use these macros to simplify grabbing all modeset locks using a local
+ * context. This has the advantage of reducing boilerplate, but also properly
+ * checking return values where appropriate.
+ *
+ * Any code run between BEGIN and END will be holding the modeset locks.
+ *
+ * This must be paired with DRM_MODESET_LOCK_ALL_END(). We will jump back and
+ * forth between the labels on deadlock and error conditions.
+ *
+ * Drivers can acquire additional modeset locks. If any lock acquisition
+ * fails, the control flow needs to jump to DRM_MODESET_LOCK_ALL_END() with
+ * the @ret parameter containing the return value of drm_modeset_lock().
+ *
+ * Returns:
+ * The only possible value of ret immediately after DRM_MODESET_LOCK_ALL_BEGIN()
+ * is 0, so no error checking is necessary
+ */
+#define DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, flags, ret) \
+ drm_modeset_acquire_init(&ctx, flags); \
+modeset_lock_retry: \
+ ret = drm_modeset_lock_all_ctx(dev, &ctx); \
+ if (ret) \
+ goto modeset_lock_fail;
+
+/**
+ * DRM_MODESET_LOCK_ALL_END - Helper to release and cleanup modeset locks
+ * @ctx: local modeset acquire context, will be dereferenced
+ * @ret: local ret/err/etc variable to track error status
+ *
+ * The other side of DRM_MODESET_LOCK_ALL_BEGIN(). It will bounce back to BEGIN
+ * if ret is -EDEADLK.
+ *
+ * It's important that you use the same ret variable for begin and end so
+ * deadlock conditions are properly handled.
+ *
+ * Returns:
+ * ret will be untouched unless it is -EDEADLK on entry. That means that if you
+ * successfully acquire the locks, ret will be whatever your code sets it to. If
+ * there is a deadlock or other failure with acquire or backoff, ret will be set
+ * to that failure. In both of these cases the code between BEGIN/END will not
+ * be run, so the failure will reflect the inability to grab the locks.
+ */
+#define DRM_MODESET_LOCK_ALL_END(ctx, ret) \
+modeset_lock_fail: \
+ if (ret == -EDEADLK) { \
+ ret = drm_modeset_backoff(&ctx); \
+ if (!ret) \
+ goto modeset_lock_retry; \
+ } \
+ drm_modeset_drop_locks(&ctx); \
+ drm_modeset_acquire_fini(&ctx);
+
#endif /* DRM_MODESET_LOCK_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 0a0834bef8bd..6078c700d9ba 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -173,6 +173,16 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
+ /**
+ * @fb_damage_clips:
+ *
+ * Blob representing damage (area in plane framebuffer that changed
+ * since last plane update) as an array of &drm_mode_rect in framebuffer
+ * coodinates of the attached framebuffer. Note that unlike plane src,
+ * damage clips are not in 16.16 fixed point.
+ */
+ struct drm_property_blob *fb_damage_clips;
+
/** @src: clipped source coordinates of the plane (in 16.16) */
/** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
@@ -798,5 +808,39 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
#define drm_for_each_plane(plane, dev) \
list_for_each_entry(plane, &(dev)->mode_config.plane_list, head)
+bool drm_any_plane_has_format(struct drm_device *dev,
+ u32 format, u64 modifier);
+/**
+ * drm_plane_get_damage_clips_count - Returns damage clips count.
+ * @state: Plane state.
+ *
+ * Simple helper to get the number of &drm_mode_rect clips set by user-space
+ * during plane update.
+ *
+ * Return: Number of clips in plane fb_damage_clips blob property.
+ */
+static inline unsigned int
+drm_plane_get_damage_clips_count(const struct drm_plane_state *state)
+{
+ return (state && state->fb_damage_clips) ?
+ state->fb_damage_clips->length/sizeof(struct drm_mode_rect) : 0;
+}
+
+/**
+ * drm_plane_get_damage_clips - Returns damage clips.
+ * @state: Plane state.
+ *
+ * Note that this function returns uapi type &drm_mode_rect. Drivers might
+ * instead be interested in internal &drm_rect which can be obtained by calling
+ * drm_helper_get_plane_damage_clips().
+ *
+ * Return: Damage clips in plane fb_damage_clips blob property.
+ */
+static inline struct drm_mode_rect *
+drm_plane_get_damage_clips(const struct drm_plane_state *state)
+{
+ return (struct drm_mode_rect *)((state && state->fb_damage_clips) ?
+ state->fb_damage_clips->data : NULL);
+}
#endif
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 26cee2934781..331ebd60b3a3 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -38,42 +38,7 @@
*/
#define DRM_PLANE_HELPER_NO_SCALING (1<<16)
-int drm_plane_helper_check_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_rect *src,
- struct drm_rect *dest,
- unsigned int rotation,
- int min_scale,
- int max_scale,
- bool can_position,
- bool can_update_disabled,
- bool *visible);
-int drm_primary_helper_update(struct drm_plane *plane,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_primary_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
-int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int crtc_x, int crtc_y,
- unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx);
-int drm_plane_helper_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
-/* For use by drm_crtc_helper.c */
-int drm_plane_helper_commit(struct drm_plane *plane,
- struct drm_plane_state *plane_state,
- struct drm_framebuffer *old_fb);
#endif
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d716d653b096..b03731a3f079 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -70,6 +70,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd);
+int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
@@ -93,9 +94,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
-void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
- void *addr);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 5b9efff35d6d..4a0a80d658c7 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -153,7 +153,8 @@ struct drm_property {
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
* userspace, e.g. the EDID, or the connector path property on DP
- * MST sinks.
+ * MST sinks. Kernel can update the value of an immutable property
+ * by calling drm_object_property_set_value().
*/
uint32_t flags;
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 425432b85a87..b1fe921f8e8f 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -131,10 +131,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
-void drm_syncobj_replace_fence(struct drm_syncobj *syncobj, u64 point,
+void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private,
- u32 handle, u64 point,
+ u32 handle, u64 point, u64 flags,
struct dma_fence **fence);
void drm_syncobj_free(struct kref *kref);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index d25a9603ab57..6ad9630d4f48 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -95,7 +95,7 @@ struct drm_vblank_crtc {
/**
* @queue: Wait queue for vblank waiters.
*/
- wait_queue_head_t queue; /**< VBLANK wait queue */
+ wait_queue_head_t queue;
/**
* @disable_timer: Disable timer for the delayed vblank disabling
* hysteresis logic. Vblank disabling is controlled through the
@@ -107,7 +107,7 @@ struct drm_vblank_crtc {
/**
* @seqlock: Protect vblank count and time.
*/
- seqlock_t seqlock; /* protects vblank count and time */
+ seqlock_t seqlock;
/**
* @count: Current software vblank counter.
@@ -123,7 +123,7 @@ struct drm_vblank_crtc {
* this refcount reaches 0 can the hardware interrupt be disabled using
* @disable_timer.
*/
- atomic_t refcount; /* number of users of vblank interruptsper crtc */
+ atomic_t refcount;
/**
* @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
*/
@@ -136,7 +136,7 @@ struct drm_vblank_crtc {
* call drm_crtc_vblank_off() and drm_crtc_vblank_on(), which explicitly
* save and restore the vblank count.
*/
- unsigned int inmodeset; /* Display driver is setting mode */
+ unsigned int inmodeset;
/**
* @pipe: drm_crtc_index() of the &drm_crtc corresponding to this
* structure.
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index d87b268f1781..47e19796c450 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -264,6 +264,7 @@ struct drm_sched_backend_ops {
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @num_jobs: the number of jobs in queue in the scheduler
+ * @ready: marks if the underlying HW is ready to work
*
* One scheduler is implemented for each hardware ring.
*/
@@ -283,22 +284,26 @@ struct drm_gpu_scheduler {
spinlock_t job_list_lock;
int hang_limit;
atomic_t num_jobs;
+ bool ready;
};
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
const char *name);
+
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
struct drm_sched_job *job);
void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
void drm_sched_job_kickout(struct drm_sched_job *s_job);
void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
@@ -326,4 +331,8 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
+ unsigned long remaining);
+
#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fd965ffbb92e..192667144693 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -365,16 +365,20 @@
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
/* AML/KBL Y GT2 */
-#define INTEL_AML_GT2_IDS(info) \
+#define INTEL_AML_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+/* AML/CFL Y GT2 */
+#define INTEL_AML_CFL_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x87CA, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
INTEL_KBL_GT4_IDS(info), \
- INTEL_AML_GT2_IDS(info)
+ INTEL_AML_KBL_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -407,17 +411,17 @@
/* WHL/CFL U GT1 */
#define INTEL_WHL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info)
+ INTEL_VGA_DEVICE(0x3EA1, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
/* WHL/CFL U GT2 */
#define INTEL_WHL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info)
+ INTEL_VGA_DEVICE(0x3EA0, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info)
/* WHL/CFL U GT3 */
#define INTEL_WHL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
+ INTEL_VGA_DEVICE(0x3EA2, info)
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
@@ -427,7 +431,8 @@
INTEL_CFL_U_GT3_IDS(info), \
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
- INTEL_WHL_U_GT3_IDS(info)
+ INTEL_WHL_U_GT3_IDS(info), \
+ INTEL_AML_CFL_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index fe9827d0ca8a..448aa5ea4722 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -10,10 +10,15 @@
#ifndef __LINUX_TINYDRM_H
#define __LINUX_TINYDRM_H
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
+#include <linux/mutex.h>
#include <drm/drm_simple_kms_helper.h>
+struct drm_clip_rect;
+struct drm_driver;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_framebuffer_funcs;
+
/**
* struct tinydrm_device - tinydrm device
*/
@@ -54,27 +59,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
}
/**
- * TINYDRM_GEM_DRIVER_OPS - default tinydrm gem operations
- *
- * This macro provides a shortcut for setting the tinydrm GEM operations in
- * the &drm_driver structure.
- */
-#define TINYDRM_GEM_DRIVER_OPS \
- .gem_free_object_unlocked = tinydrm_gem_cma_free_object, \
- .gem_print_info = drm_gem_cma_print_info, \
- .gem_vm_ops = &drm_gem_cma_vm_ops, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import = drm_gem_prime_import, \
- .gem_prime_export = drm_gem_prime_export, \
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, \
- .gem_prime_import_sg_table = tinydrm_gem_cma_prime_import_sg_table, \
- .gem_prime_vmap = drm_gem_cma_prime_vmap, \
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap, \
- .gem_prime_mmap = drm_gem_cma_prime_mmap, \
- .dumb_create = drm_gem_cma_dumb_create
-
-/**
* TINYDRM_MODE - tinydrm display mode
* @hd: Horizontal resolution, width
* @vd: Vertical resolution, height
@@ -97,11 +81,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.type = DRM_MODE_TYPE_DRIVER, \
.clock = 1 /* pass validation */
-void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
-struct drm_gem_object *
-tinydrm_gem_cma_prime_import_sg_table(struct drm_device *drm,
- struct dma_buf_attachment *attach,
- struct sg_table *sgt);
int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
const struct drm_framebuffer_funcs *fb_funcs,
struct drm_driver *driver);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e4fee8e02559..1021106438b2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -31,7 +31,6 @@
#define _TTM_BO_DRIVER_H_
#include <drm/drm_mm.h>
-#include <drm/drm_global.h>
#include <drm/drm_vma_manager.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
@@ -385,15 +384,6 @@ struct ttm_bo_driver {
};
/**
- * struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
- */
-
-struct ttm_bo_global_ref {
- struct drm_global_reference ref;
- struct ttm_mem_global *mem_glob;
-};
-
-/**
* struct ttm_bo_global - Buffer object driver global data.
*
* @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
@@ -407,7 +397,7 @@ struct ttm_bo_global_ref {
* @swap_lru: Lru list of buffer objects used for swapping.
*/
-struct ttm_bo_global {
+extern struct ttm_bo_global {
/**
* Constant after init.
@@ -416,12 +406,12 @@ struct ttm_bo_global {
struct kobject kobj;
struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
- struct mutex device_list_mutex;
spinlock_t lru_lock;
/**
- * Protected by device_list_mutex.
+ * Protected by ttm_global_mutex.
*/
+ unsigned int use_count;
struct list_head device_list;
/**
@@ -433,7 +423,7 @@ struct ttm_bo_global {
* Internal protection.
*/
atomic_t bo_count;
-};
+} ttm_bo_glob;
#define TTM_NUM_MEM_TYPES 8
@@ -578,9 +568,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
-void ttm_bo_global_release(struct drm_global_reference *ref);
-int ttm_bo_global_init(struct drm_global_reference *ref);
-
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@@ -598,7 +585,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
-int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
+int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index b0fdd1980034..621615fa7728 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -40,13 +40,13 @@
*
* @head: list head for thread-private list.
* @bo: refcounted buffer object pointer.
- * @shared: should the fence be added shared?
+ * @num_shared: How many shared fences we want to add.
*/
struct ttm_validate_buffer {
struct list_head head;
struct ttm_buffer_object *bo;
- bool shared;
+ unsigned int num_shared;
};
/**
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 737b5fed8003..3ff48a0a2d7b 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -63,7 +63,7 @@
#define TTM_MEM_MAX_ZONES 2
struct ttm_mem_zone;
-struct ttm_mem_global {
+extern struct ttm_mem_global {
struct kobject kobj;
struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
@@ -78,7 +78,7 @@ struct ttm_mem_global {
#else
struct ttm_mem_zone *zone_dma32;
#endif
-};
+} ttm_mem_glob;
extern int ttm_mem_global_init(struct ttm_mem_global *glob);
extern void ttm_mem_global_release(struct ttm_mem_global *glob);
diff --git a/include/dt-bindings/clock/bcm2835-aux.h b/include/dt-bindings/clock/bcm2835-aux.h
index d91156e2658d..bb79de383a3b 100644
--- a/include/dt-bindings/clock/bcm2835-aux.h
+++ b/include/dt-bindings/clock/bcm2835-aux.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define BCM2835_AUX_CLOCK_UART 0
diff --git a/include/dt-bindings/clock/bcm2835.h b/include/dt-bindings/clock/bcm2835.h
index a0c812b0fa39..2cec01f96897 100644
--- a/include/dt-bindings/clock/bcm2835.h
+++ b/include/dt-bindings/clock/bcm2835.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#define BCM2835_PLLA 0
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 3979d48c025f..db0763e96173 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -128,5 +128,23 @@
#define CLKID_VDEC_1 153
#define CLKID_VDEC_HEVC 156
#define CLKID_GEN_CLK 159
+#define CLKID_VID_PLL 166
+#define CLKID_VCLK 175
+#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV1 185
+#define CLKID_VCLK_DIV2 186
+#define CLKID_VCLK_DIV4 187
+#define CLKID_VCLK_DIV6 188
+#define CLKID_VCLK_DIV12 189
+#define CLKID_VCLK2_DIV1 190
+#define CLKID_VCLK2_DIV2 191
+#define CLKID_VCLK2_DIV4 192
+#define CLKID_VCLK2_DIV6 193
+#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI 199
+#define CLKID_CTS_ENCP 200
+#define CLKID_CTS_VDAC 201
+#define CLKID_HDMI_TX 202
+#define CLKID_HDMI 205
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 87b068f4a998..b3cef297d5df 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -274,6 +274,8 @@
#define IMX6QDL_CLK_EPIT1 261
#define IMX6QDL_CLK_EPIT2 262
#define IMX6QDL_CLK_MMDC_P0_IPG 263
-#define IMX6QDL_CLK_END 264
+#define IMX6QDL_CLK_DCIC1 264
+#define IMX6QDL_CLK_DCIC2 265
+#define IMX6QDL_CLK_END 266
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
new file mode 100644
index 000000000000..21d872e69cb1
--- /dev/null
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017~2018 NXP
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX7ULP_H
+#define __DT_BINDINGS_CLOCK_IMX7ULP_H
+
+/* SCG1 */
+
+#define IMX7ULP_CLK_DUMMY 0
+#define IMX7ULP_CLK_ROSC 1
+#define IMX7ULP_CLK_SOSC 2
+#define IMX7ULP_CLK_FIRC 3
+#define IMX7ULP_CLK_SPLL_PRE_SEL 4
+#define IMX7ULP_CLK_SPLL_PRE_DIV 5
+#define IMX7ULP_CLK_SPLL 6
+#define IMX7ULP_CLK_SPLL_POST_DIV1 7
+#define IMX7ULP_CLK_SPLL_POST_DIV2 8
+#define IMX7ULP_CLK_SPLL_PFD0 9
+#define IMX7ULP_CLK_SPLL_PFD1 10
+#define IMX7ULP_CLK_SPLL_PFD2 11
+#define IMX7ULP_CLK_SPLL_PFD3 12
+#define IMX7ULP_CLK_SPLL_PFD_SEL 13
+#define IMX7ULP_CLK_SPLL_SEL 14
+#define IMX7ULP_CLK_APLL_PRE_SEL 15
+#define IMX7ULP_CLK_APLL_PRE_DIV 16
+#define IMX7ULP_CLK_APLL 17
+#define IMX7ULP_CLK_APLL_POST_DIV1 18
+#define IMX7ULP_CLK_APLL_POST_DIV2 19
+#define IMX7ULP_CLK_APLL_PFD0 20
+#define IMX7ULP_CLK_APLL_PFD1 21
+#define IMX7ULP_CLK_APLL_PFD2 22
+#define IMX7ULP_CLK_APLL_PFD3 23
+#define IMX7ULP_CLK_APLL_PFD_SEL 24
+#define IMX7ULP_CLK_APLL_SEL 25
+#define IMX7ULP_CLK_UPLL 26
+#define IMX7ULP_CLK_SYS_SEL 27
+#define IMX7ULP_CLK_CORE_DIV 28
+#define IMX7ULP_CLK_BUS_DIV 29
+#define IMX7ULP_CLK_PLAT_DIV 30
+#define IMX7ULP_CLK_DDR_SEL 31
+#define IMX7ULP_CLK_DDR_DIV 32
+#define IMX7ULP_CLK_NIC_SEL 33
+#define IMX7ULP_CLK_NIC0_DIV 34
+#define IMX7ULP_CLK_GPU_DIV 35
+#define IMX7ULP_CLK_NIC1_DIV 36
+#define IMX7ULP_CLK_NIC1_BUS_DIV 37
+#define IMX7ULP_CLK_NIC1_EXT_DIV 38
+#define IMX7ULP_CLK_MIPI_PLL 39
+#define IMX7ULP_CLK_SIRC 40
+#define IMX7ULP_CLK_SOSC_BUS_CLK 41
+#define IMX7ULP_CLK_FIRC_BUS_CLK 42
+#define IMX7ULP_CLK_SPLL_BUS_CLK 43
+#define IMX7ULP_CLK_HSRUN_SYS_SEL 44
+#define IMX7ULP_CLK_HSRUN_CORE_DIV 45
+
+#define IMX7ULP_CLK_SCG1_END 46
+
+/* PCC2 */
+#define IMX7ULP_CLK_DMA1 0
+#define IMX7ULP_CLK_RGPIO2P1 1
+#define IMX7ULP_CLK_FLEXBUS 2
+#define IMX7ULP_CLK_SEMA42_1 3
+#define IMX7ULP_CLK_DMA_MUX1 4
+#define IMX7ULP_CLK_SNVS 5
+#define IMX7ULP_CLK_CAAM 6
+#define IMX7ULP_CLK_LPTPM4 7
+#define IMX7ULP_CLK_LPTPM5 8
+#define IMX7ULP_CLK_LPIT1 9
+#define IMX7ULP_CLK_LPSPI2 10
+#define IMX7ULP_CLK_LPSPI3 11
+#define IMX7ULP_CLK_LPI2C4 12
+#define IMX7ULP_CLK_LPI2C5 13
+#define IMX7ULP_CLK_LPUART4 14
+#define IMX7ULP_CLK_LPUART5 15
+#define IMX7ULP_CLK_FLEXIO1 16
+#define IMX7ULP_CLK_USB0 17
+#define IMX7ULP_CLK_USB1 18
+#define IMX7ULP_CLK_USB_PHY 19
+#define IMX7ULP_CLK_USB_PL301 20
+#define IMX7ULP_CLK_USDHC0 21
+#define IMX7ULP_CLK_USDHC1 22
+#define IMX7ULP_CLK_WDG1 23
+#define IMX7ULP_CLK_WDG2 24
+
+#define IMX7ULP_CLK_PCC2_END 25
+
+/* PCC3 */
+#define IMX7ULP_CLK_LPTPM6 0
+#define IMX7ULP_CLK_LPTPM7 1
+#define IMX7ULP_CLK_LPI2C6 2
+#define IMX7ULP_CLK_LPI2C7 3
+#define IMX7ULP_CLK_LPUART6 4
+#define IMX7ULP_CLK_LPUART7 5
+#define IMX7ULP_CLK_VIU 6
+#define IMX7ULP_CLK_DSI 7
+#define IMX7ULP_CLK_LCDIF 8
+#define IMX7ULP_CLK_MMDC 9
+#define IMX7ULP_CLK_PCTLC 10
+#define IMX7ULP_CLK_PCTLD 11
+#define IMX7ULP_CLK_PCTLE 12
+#define IMX7ULP_CLK_PCTLF 13
+#define IMX7ULP_CLK_GPU3D 14
+#define IMX7ULP_CLK_GPU2D 15
+
+#define IMX7ULP_CLK_PCC3_END 16
+
+/* SMC1 */
+#define IMX7ULP_CLK_ARM 0
+
+#define IMX7ULP_CLK_SMC1_END 1
+
+#endif /* __DT_BINDINGS_CLOCK_IMX7ULP_H */
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
new file mode 100644
index 000000000000..b53be41929be
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -0,0 +1,395 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MQ_H
+#define __DT_BINDINGS_CLOCK_IMX8MQ_H
+
+#define IMX8MQ_CLK_DUMMY 0
+#define IMX8MQ_CLK_32K 1
+#define IMX8MQ_CLK_25M 2
+#define IMX8MQ_CLK_27M 3
+#define IMX8MQ_CLK_EXT1 4
+#define IMX8MQ_CLK_EXT2 5
+#define IMX8MQ_CLK_EXT3 6
+#define IMX8MQ_CLK_EXT4 7
+
+/* ANAMIX PLL clocks */
+/* FRAC PLLs */
+/* ARM PLL */
+#define IMX8MQ_ARM_PLL_REF_SEL 8
+#define IMX8MQ_ARM_PLL_REF_DIV 9
+#define IMX8MQ_ARM_PLL 10
+#define IMX8MQ_ARM_PLL_BYPASS 11
+#define IMX8MQ_ARM_PLL_OUT 12
+
+/* GPU PLL */
+#define IMX8MQ_GPU_PLL_REF_SEL 13
+#define IMX8MQ_GPU_PLL_REF_DIV 14
+#define IMX8MQ_GPU_PLL 15
+#define IMX8MQ_GPU_PLL_BYPASS 16
+#define IMX8MQ_GPU_PLL_OUT 17
+
+/* VPU PLL */
+#define IMX8MQ_VPU_PLL_REF_SEL 18
+#define IMX8MQ_VPU_PLL_REF_DIV 19
+#define IMX8MQ_VPU_PLL 20
+#define IMX8MQ_VPU_PLL_BYPASS 21
+#define IMX8MQ_VPU_PLL_OUT 22
+
+/* AUDIO PLL1 */
+#define IMX8MQ_AUDIO_PLL1_REF_SEL 23
+#define IMX8MQ_AUDIO_PLL1_REF_DIV 24
+#define IMX8MQ_AUDIO_PLL1 25
+#define IMX8MQ_AUDIO_PLL1_BYPASS 26
+#define IMX8MQ_AUDIO_PLL1_OUT 27
+
+/* AUDIO PLL2 */
+#define IMX8MQ_AUDIO_PLL2_REF_SEL 28
+#define IMX8MQ_AUDIO_PLL2_REF_DIV 29
+#define IMX8MQ_AUDIO_PLL2 30
+#define IMX8MQ_AUDIO_PLL2_BYPASS 31
+#define IMX8MQ_AUDIO_PLL2_OUT 32
+
+/* VIDEO PLL1 */
+#define IMX8MQ_VIDEO_PLL1_REF_SEL 33
+#define IMX8MQ_VIDEO_PLL1_REF_DIV 34
+#define IMX8MQ_VIDEO_PLL1 35
+#define IMX8MQ_VIDEO_PLL1_BYPASS 36
+#define IMX8MQ_VIDEO_PLL1_OUT 37
+
+/* SYS1 PLL */
+#define IMX8MQ_SYS1_PLL1_REF_SEL 38
+#define IMX8MQ_SYS1_PLL1_REF_DIV 39
+#define IMX8MQ_SYS1_PLL1 40
+#define IMX8MQ_SYS1_PLL1_OUT 41
+#define IMX8MQ_SYS1_PLL1_OUT_DIV 42
+#define IMX8MQ_SYS1_PLL2 43
+#define IMX8MQ_SYS1_PLL2_DIV 44
+#define IMX8MQ_SYS1_PLL2_OUT 45
+
+/* SYS2 PLL */
+#define IMX8MQ_SYS2_PLL1_REF_SEL 46
+#define IMX8MQ_SYS2_PLL1_REF_DIV 47
+#define IMX8MQ_SYS2_PLL1 48
+#define IMX8MQ_SYS2_PLL1_OUT 49
+#define IMX8MQ_SYS2_PLL1_OUT_DIV 50
+#define IMX8MQ_SYS2_PLL2 51
+#define IMX8MQ_SYS2_PLL2_DIV 52
+#define IMX8MQ_SYS2_PLL2_OUT 53
+
+/* SYS3 PLL */
+#define IMX8MQ_SYS3_PLL1_REF_SEL 54
+#define IMX8MQ_SYS3_PLL1_REF_DIV 55
+#define IMX8MQ_SYS3_PLL1 56
+#define IMX8MQ_SYS3_PLL1_OUT 57
+#define IMX8MQ_SYS3_PLL1_OUT_DIV 58
+#define IMX8MQ_SYS3_PLL2 59
+#define IMX8MQ_SYS3_PLL2_DIV 60
+#define IMX8MQ_SYS3_PLL2_OUT 61
+
+/* DRAM PLL */
+#define IMX8MQ_DRAM_PLL1_REF_SEL 62
+#define IMX8MQ_DRAM_PLL1_REF_DIV 63
+#define IMX8MQ_DRAM_PLL1 64
+#define IMX8MQ_DRAM_PLL1_OUT 65
+#define IMX8MQ_DRAM_PLL1_OUT_DIV 66
+#define IMX8MQ_DRAM_PLL2 67
+#define IMX8MQ_DRAM_PLL2_DIV 68
+#define IMX8MQ_DRAM_PLL2_OUT 69
+
+/* SYS PLL DIV */
+#define IMX8MQ_SYS1_PLL_40M 70
+#define IMX8MQ_SYS1_PLL_80M 71
+#define IMX8MQ_SYS1_PLL_100M 72
+#define IMX8MQ_SYS1_PLL_133M 73
+#define IMX8MQ_SYS1_PLL_160M 74
+#define IMX8MQ_SYS1_PLL_200M 75
+#define IMX8MQ_SYS1_PLL_266M 76
+#define IMX8MQ_SYS1_PLL_400M 77
+#define IMX8MQ_SYS1_PLL_800M 78
+
+#define IMX8MQ_SYS2_PLL_50M 79
+#define IMX8MQ_SYS2_PLL_100M 80
+#define IMX8MQ_SYS2_PLL_125M 81
+#define IMX8MQ_SYS2_PLL_166M 82
+#define IMX8MQ_SYS2_PLL_200M 83
+#define IMX8MQ_SYS2_PLL_250M 84
+#define IMX8MQ_SYS2_PLL_333M 85
+#define IMX8MQ_SYS2_PLL_500M 86
+#define IMX8MQ_SYS2_PLL_1000M 87
+
+/* CCM ROOT clocks */
+/* A53 */
+#define IMX8MQ_CLK_A53_SRC 88
+#define IMX8MQ_CLK_A53_CG 89
+#define IMX8MQ_CLK_A53_DIV 90
+/* M4 */
+#define IMX8MQ_CLK_M4_SRC 91
+#define IMX8MQ_CLK_M4_CG 92
+#define IMX8MQ_CLK_M4_DIV 93
+/* VPU */
+#define IMX8MQ_CLK_VPU_SRC 94
+#define IMX8MQ_CLK_VPU_CG 95
+#define IMX8MQ_CLK_VPU_DIV 96
+/* GPU CORE */
+#define IMX8MQ_CLK_GPU_CORE_SRC 97
+#define IMX8MQ_CLK_GPU_CORE_CG 98
+#define IMX8MQ_CLK_GPU_CORE_DIV 99
+/* GPU SHADER */
+#define IMX8MQ_CLK_GPU_SHADER_SRC 100
+#define IMX8MQ_CLK_GPU_SHADER_CG 101
+#define IMX8MQ_CLK_GPU_SHADER_DIV 102
+
+/* BUS TYPE */
+/* MAIN AXI */
+#define IMX8MQ_CLK_MAIN_AXI 103
+/* ENET AXI */
+#define IMX8MQ_CLK_ENET_AXI 104
+/* NAND_USDHC_BUS */
+#define IMX8MQ_CLK_NAND_USDHC_BUS 105
+/* VPU BUS */
+#define IMX8MQ_CLK_VPU_BUS 106
+/* DISP_AXI */
+#define IMX8MQ_CLK_DISP_AXI 107
+/* DISP APB */
+#define IMX8MQ_CLK_DISP_APB 108
+/* DISP RTRM */
+#define IMX8MQ_CLK_DISP_RTRM 109
+/* USB_BUS */
+#define IMX8MQ_CLK_USB_BUS 110
+/* GPU_AXI */
+#define IMX8MQ_CLK_GPU_AXI 111
+/* GPU_AHB */
+#define IMX8MQ_CLK_GPU_AHB 112
+/* NOC */
+#define IMX8MQ_CLK_NOC 113
+/* NOC_APB */
+#define IMX8MQ_CLK_NOC_APB 115
+
+/* AHB */
+#define IMX8MQ_CLK_AHB 116
+/* AUDIO AHB */
+#define IMX8MQ_CLK_AUDIO_AHB 117
+
+/* DRAM_ALT */
+#define IMX8MQ_CLK_DRAM_ALT 118
+/* DRAM APB */
+#define IMX8MQ_CLK_DRAM_APB 119
+/* VPU_G1 */
+#define IMX8MQ_CLK_VPU_G1 120
+/* VPU_G2 */
+#define IMX8MQ_CLK_VPU_G2 121
+/* DISP_DTRC */
+#define IMX8MQ_CLK_DISP_DTRC 122
+/* DISP_DC8000 */
+#define IMX8MQ_CLK_DISP_DC8000 123
+/* PCIE_CTRL */
+#define IMX8MQ_CLK_PCIE1_CTRL 124
+/* PCIE_PHY */
+#define IMX8MQ_CLK_PCIE1_PHY 125
+/* PCIE_AUX */
+#define IMX8MQ_CLK_PCIE1_AUX 126
+/* DC_PIXEL */
+#define IMX8MQ_CLK_DC_PIXEL 127
+/* LCDIF_PIXEL */
+#define IMX8MQ_CLK_LCDIF_PIXEL 128
+/* SAI1~6 */
+#define IMX8MQ_CLK_SAI1 129
+
+#define IMX8MQ_CLK_SAI2 130
+
+#define IMX8MQ_CLK_SAI3 131
+
+#define IMX8MQ_CLK_SAI4 132
+
+#define IMX8MQ_CLK_SAI5 133
+
+#define IMX8MQ_CLK_SAI6 134
+/* SPDIF1 */
+#define IMX8MQ_CLK_SPDIF1 135
+/* SPDIF2 */
+#define IMX8MQ_CLK_SPDIF2 136
+/* ENET_REF */
+#define IMX8MQ_CLK_ENET_REF 137
+/* ENET_TIMER */
+#define IMX8MQ_CLK_ENET_TIMER 138
+/* ENET_PHY */
+#define IMX8MQ_CLK_ENET_PHY_REF 139
+/* NAND */
+#define IMX8MQ_CLK_NAND 140
+/* QSPI */
+#define IMX8MQ_CLK_QSPI 141
+/* USDHC1 */
+#define IMX8MQ_CLK_USDHC1 142
+/* USDHC2 */
+#define IMX8MQ_CLK_USDHC2 143
+/* I2C1 */
+#define IMX8MQ_CLK_I2C1 144
+/* I2C2 */
+#define IMX8MQ_CLK_I2C2 145
+/* I2C3 */
+#define IMX8MQ_CLK_I2C3 146
+/* I2C4 */
+#define IMX8MQ_CLK_I2C4 147
+/* UART1 */
+#define IMX8MQ_CLK_UART1 148
+/* UART2 */
+#define IMX8MQ_CLK_UART2 149
+/* UART3 */
+#define IMX8MQ_CLK_UART3 150
+/* UART4 */
+#define IMX8MQ_CLK_UART4 151
+/* USB_CORE_REF */
+#define IMX8MQ_CLK_USB_CORE_REF 152
+/* USB_PHY_REF */
+#define IMX8MQ_CLK_USB_PHY_REF 163
+/* ECSPI1 */
+#define IMX8MQ_CLK_ECSPI1 164
+/* ECSPI2 */
+#define IMX8MQ_CLK_ECSPI2 165
+/* PWM1 */
+#define IMX8MQ_CLK_PWM1 166
+/* PWM2 */
+#define IMX8MQ_CLK_PWM2 167
+/* PWM3 */
+#define IMX8MQ_CLK_PWM3 168
+/* PWM4 */
+#define IMX8MQ_CLK_PWM4 169
+/* GPT1 */
+#define IMX8MQ_CLK_GPT1 170
+/* WDOG */
+#define IMX8MQ_CLK_WDOG 171
+/* WRCLK */
+#define IMX8MQ_CLK_WRCLK 172
+/* DSI_CORE */
+#define IMX8MQ_CLK_DSI_CORE 173
+/* DSI_PHY */
+#define IMX8MQ_CLK_DSI_PHY_REF 174
+/* DSI_DBI */
+#define IMX8MQ_CLK_DSI_DBI 175
+/*DSI_ESC */
+#define IMX8MQ_CLK_DSI_ESC 176
+/* CSI1_CORE */
+#define IMX8MQ_CLK_CSI1_CORE 177
+/* CSI1_PHY */
+#define IMX8MQ_CLK_CSI1_PHY_REF 178
+/* CSI_ESC */
+#define IMX8MQ_CLK_CSI1_ESC 179
+/* CSI2_CORE */
+#define IMX8MQ_CLK_CSI2_CORE 170
+/* CSI2_PHY */
+#define IMX8MQ_CLK_CSI2_PHY_REF 181
+/* CSI2_ESC */
+#define IMX8MQ_CLK_CSI2_ESC 182
+/* PCIE2_CTRL */
+#define IMX8MQ_CLK_PCIE2_CTRL 183
+/* PCIE2_PHY */
+#define IMX8MQ_CLK_PCIE2_PHY 184
+/* PCIE2_AUX */
+#define IMX8MQ_CLK_PCIE2_AUX 185
+/* ECSPI3 */
+#define IMX8MQ_CLK_ECSPI3 186
+
+/* CCGR clocks */
+#define IMX8MQ_CLK_A53_ROOT 187
+#define IMX8MQ_CLK_DRAM_ROOT 188
+#define IMX8MQ_CLK_ECSPI1_ROOT 189
+#define IMX8MQ_CLK_ECSPI2_ROOT 180
+#define IMX8MQ_CLK_ECSPI3_ROOT 181
+#define IMX8MQ_CLK_ENET1_ROOT 182
+#define IMX8MQ_CLK_GPT1_ROOT 193
+#define IMX8MQ_CLK_I2C1_ROOT 194
+#define IMX8MQ_CLK_I2C2_ROOT 195
+#define IMX8MQ_CLK_I2C3_ROOT 196
+#define IMX8MQ_CLK_I2C4_ROOT 197
+#define IMX8MQ_CLK_M4_ROOT 198
+#define IMX8MQ_CLK_PCIE1_ROOT 199
+#define IMX8MQ_CLK_PCIE2_ROOT 200
+#define IMX8MQ_CLK_PWM1_ROOT 201
+#define IMX8MQ_CLK_PWM2_ROOT 202
+#define IMX8MQ_CLK_PWM3_ROOT 203
+#define IMX8MQ_CLK_PWM4_ROOT 204
+#define IMX8MQ_CLK_QSPI_ROOT 205
+#define IMX8MQ_CLK_SAI1_ROOT 206
+#define IMX8MQ_CLK_SAI2_ROOT 207
+#define IMX8MQ_CLK_SAI3_ROOT 208
+#define IMX8MQ_CLK_SAI4_ROOT 209
+#define IMX8MQ_CLK_SAI5_ROOT 210
+#define IMX8MQ_CLK_SAI6_ROOT 212
+#define IMX8MQ_CLK_UART1_ROOT 213
+#define IMX8MQ_CLK_UART2_ROOT 214
+#define IMX8MQ_CLK_UART3_ROOT 215
+#define IMX8MQ_CLK_UART4_ROOT 216
+#define IMX8MQ_CLK_USB1_CTRL_ROOT 217
+#define IMX8MQ_CLK_USB2_CTRL_ROOT 218
+#define IMX8MQ_CLK_USB1_PHY_ROOT 219
+#define IMX8MQ_CLK_USB2_PHY_ROOT 220
+#define IMX8MQ_CLK_USDHC1_ROOT 221
+#define IMX8MQ_CLK_USDHC2_ROOT 222
+#define IMX8MQ_CLK_WDOG1_ROOT 223
+#define IMX8MQ_CLK_WDOG2_ROOT 224
+#define IMX8MQ_CLK_WDOG3_ROOT 225
+#define IMX8MQ_CLK_GPU_ROOT 226
+#define IMX8MQ_CLK_HEVC_ROOT 227
+#define IMX8MQ_CLK_AVC_ROOT 228
+#define IMX8MQ_CLK_VP9_ROOT 229
+#define IMX8MQ_CLK_HEVC_INTER_ROOT 230
+#define IMX8MQ_CLK_DISP_ROOT 231
+#define IMX8MQ_CLK_HDMI_ROOT 232
+#define IMX8MQ_CLK_HDMI_PHY_ROOT 233
+#define IMX8MQ_CLK_VPU_DEC_ROOT 234
+#define IMX8MQ_CLK_CSI1_ROOT 235
+#define IMX8MQ_CLK_CSI2_ROOT 236
+#define IMX8MQ_CLK_RAWNAND_ROOT 237
+#define IMX8MQ_CLK_SDMA1_ROOT 238
+#define IMX8MQ_CLK_SDMA2_ROOT 239
+#define IMX8MQ_CLK_VPU_G1_ROOT 240
+#define IMX8MQ_CLK_VPU_G2_ROOT 241
+
+/* SCCG PLL GATE */
+#define IMX8MQ_SYS1_PLL_OUT 232
+#define IMX8MQ_SYS2_PLL_OUT 243
+#define IMX8MQ_SYS3_PLL_OUT 244
+#define IMX8MQ_DRAM_PLL_OUT 245
+
+#define IMX8MQ_GPT_3M_CLK 246
+
+#define IMX8MQ_CLK_IPG_ROOT 247
+#define IMX8MQ_CLK_IPG_AUDIO_ROOT 248
+#define IMX8MQ_CLK_SAI1_IPG 249
+#define IMX8MQ_CLK_SAI2_IPG 250
+#define IMX8MQ_CLK_SAI3_IPG 251
+#define IMX8MQ_CLK_SAI4_IPG 252
+#define IMX8MQ_CLK_SAI5_IPG 253
+#define IMX8MQ_CLK_SAI6_IPG 254
+
+/* DSI AHB/IPG clocks */
+/* rxesc clock */
+#define IMX8MQ_CLK_DSI_AHB 255
+/* txesc clock */
+#define IMX8MQ_CLK_DSI_IPG_DIV 256
+
+#define IMX8MQ_CLK_TMU_ROOT 265
+
+/* Display root clocks */
+#define IMX8MQ_CLK_DISP_AXI_ROOT 266
+#define IMX8MQ_CLK_DISP_APB_ROOT 267
+#define IMX8MQ_CLK_DISP_RTRM_ROOT 268
+
+#define IMX8MQ_CLK_OCOTP_ROOT 269
+
+#define IMX8MQ_CLK_DRAM_ALT_ROOT 270
+#define IMX8MQ_CLK_DRAM_CORE 271
+
+#define IMX8MQ_CLK_MU_ROOT 272
+#define IMX8MQ_VIDEO2_PLL_OUT 273
+
+#define IMX8MQ_CLK_CLKO2 274
+
+#define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275
+
+#define IMX8MQ_CLK_END 276
+#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/imx8qxp-clock.h b/include/dt-bindings/clock/imx8qxp-clock.h
new file mode 100644
index 000000000000..6fec3687f3c7
--- /dev/null
+++ b/include/dt-bindings/clock/imx8qxp-clock.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8QXP_H
+#define __DT_BINDINGS_CLOCK_IMX8QXP_H
+
+/* SCU Clocks */
+
+#define IMX8QXP_CLK_DUMMY 0
+
+/* CPU */
+#define IMX8QXP_A35_CLK 1
+
+/* LSIO SS */
+#define IMX8QXP_LSIO_MEM_CLK 2
+#define IMX8QXP_LSIO_BUS_CLK 3
+#define IMX8QXP_LSIO_PWM0_CLK 10
+#define IMX8QXP_LSIO_PWM1_CLK 11
+#define IMX8QXP_LSIO_PWM2_CLK 12
+#define IMX8QXP_LSIO_PWM3_CLK 13
+#define IMX8QXP_LSIO_PWM4_CLK 14
+#define IMX8QXP_LSIO_PWM5_CLK 15
+#define IMX8QXP_LSIO_PWM6_CLK 16
+#define IMX8QXP_LSIO_PWM7_CLK 17
+#define IMX8QXP_LSIO_GPT0_CLK 18
+#define IMX8QXP_LSIO_GPT1_CLK 19
+#define IMX8QXP_LSIO_GPT2_CLK 20
+#define IMX8QXP_LSIO_GPT3_CLK 21
+#define IMX8QXP_LSIO_GPT4_CLK 22
+#define IMX8QXP_LSIO_FSPI0_CLK 23
+#define IMX8QXP_LSIO_FSPI1_CLK 24
+
+/* Connectivity SS */
+#define IMX8QXP_CONN_AXI_CLK_ROOT 30
+#define IMX8QXP_CONN_AHB_CLK_ROOT 31
+#define IMX8QXP_CONN_IPG_CLK_ROOT 32
+#define IMX8QXP_CONN_SDHC0_CLK 40
+#define IMX8QXP_CONN_SDHC1_CLK 41
+#define IMX8QXP_CONN_SDHC2_CLK 42
+#define IMX8QXP_CONN_ENET0_ROOT_CLK 43
+#define IMX8QXP_CONN_ENET0_BYPASS_CLK 44
+#define IMX8QXP_CONN_ENET0_RGMII_CLK 45
+#define IMX8QXP_CONN_ENET1_ROOT_CLK 46
+#define IMX8QXP_CONN_ENET1_BYPASS_CLK 47
+#define IMX8QXP_CONN_ENET1_RGMII_CLK 48
+#define IMX8QXP_CONN_GPMI_BCH_IO_CLK 49
+#define IMX8QXP_CONN_GPMI_BCH_CLK 50
+#define IMX8QXP_CONN_USB2_ACLK 51
+#define IMX8QXP_CONN_USB2_BUS_CLK 52
+#define IMX8QXP_CONN_USB2_LPM_CLK 53
+
+/* HSIO SS */
+#define IMX8QXP_HSIO_AXI_CLK 60
+#define IMX8QXP_HSIO_PER_CLK 61
+
+/* Display controller SS */
+#define IMX8QXP_DC_AXI_EXT_CLK 70
+#define IMX8QXP_DC_AXI_INT_CLK 71
+#define IMX8QXP_DC_CFG_CLK 72
+#define IMX8QXP_DC0_PLL0_CLK 80
+#define IMX8QXP_DC0_PLL1_CLK 81
+#define IMX8QXP_DC0_DISP0_CLK 82
+#define IMX8QXP_DC0_DISP1_CLK 83
+
+/* MIPI-LVDS SS */
+#define IMX8QXP_MIPI_IPG_CLK 90
+#define IMX8QXP_MIPI0_PIXEL_CLK 100
+#define IMX8QXP_MIPI0_BYPASS_CLK 101
+#define IMX8QXP_MIPI0_LVDS_PIXEL_CLK 102
+#define IMX8QXP_MIPI0_LVDS_BYPASS_CLK 103
+#define IMX8QXP_MIPI0_LVDS_PHY_CLK 104
+#define IMX8QXP_MIPI0_I2C0_CLK 105
+#define IMX8QXP_MIPI0_I2C1_CLK 106
+#define IMX8QXP_MIPI0_PWM0_CLK 107
+#define IMX8QXP_MIPI1_PIXEL_CLK 108
+#define IMX8QXP_MIPI1_BYPASS_CLK 109
+#define IMX8QXP_MIPI1_LVDS_PIXEL_CLK 110
+#define IMX8QXP_MIPI1_LVDS_BYPASS_CLK 111
+#define IMX8QXP_MIPI1_LVDS_PHY_CLK 112
+#define IMX8QXP_MIPI1_I2C0_CLK 113
+#define IMX8QXP_MIPI1_I2C1_CLK 114
+#define IMX8QXP_MIPI1_PWM0_CLK 115
+
+/* IMG SS */
+#define IMX8QXP_IMG_AXI_CLK 120
+#define IMX8QXP_IMG_IPG_CLK 121
+#define IMX8QXP_IMG_PXL_CLK 122
+
+/* MIPI-CSI SS */
+#define IMX8QXP_CSI0_CORE_CLK 130
+#define IMX8QXP_CSI0_ESC_CLK 131
+#define IMX8QXP_CSI0_PWM0_CLK 132
+#define IMX8QXP_CSI0_I2C0_CLK 133
+
+/* PARALLER CSI SS */
+#define IMX8QXP_PARALLEL_CSI_DPLL_CLK 140
+#define IMX8QXP_PARALLEL_CSI_PIXEL_CLK 141
+#define IMX8QXP_PARALLEL_CSI_MCLK_CLK 142
+
+/* VPU SS */
+#define IMX8QXP_VPU_ENC_CLK 150
+#define IMX8QXP_VPU_DEC_CLK 151
+
+/* GPU SS */
+#define IMX8QXP_GPU0_CORE_CLK 160
+#define IMX8QXP_GPU0_SHADER_CLK 161
+
+/* ADMA SS */
+#define IMX8QXP_ADMA_IPG_CLK_ROOT 165
+#define IMX8QXP_ADMA_UART0_CLK 170
+#define IMX8QXP_ADMA_UART1_CLK 171
+#define IMX8QXP_ADMA_UART2_CLK 172
+#define IMX8QXP_ADMA_UART3_CLK 173
+#define IMX8QXP_ADMA_SPI0_CLK 174
+#define IMX8QXP_ADMA_SPI1_CLK 175
+#define IMX8QXP_ADMA_SPI2_CLK 176
+#define IMX8QXP_ADMA_SPI3_CLK 177
+#define IMX8QXP_ADMA_CAN0_CLK 178
+#define IMX8QXP_ADMA_CAN1_CLK 179
+#define IMX8QXP_ADMA_CAN2_CLK 180
+#define IMX8QXP_ADMA_I2C0_CLK 181
+#define IMX8QXP_ADMA_I2C1_CLK 182
+#define IMX8QXP_ADMA_I2C2_CLK 183
+#define IMX8QXP_ADMA_I2C3_CLK 184
+#define IMX8QXP_ADMA_FTM0_CLK 185
+#define IMX8QXP_ADMA_FTM1_CLK 186
+#define IMX8QXP_ADMA_ADC0_CLK 187
+#define IMX8QXP_ADMA_PWM_CLK 188
+#define IMX8QXP_ADMA_LCD_CLK 189
+
+#define IMX8QXP_SCU_CLK_END 190
+
+/* LPCG clocks */
+
+/* LSIO SS LPCG */
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_CLK 0
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_S_CLK 1
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_HF_CLK 2
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_SLV_CLK 3
+#define IMX8QXP_LSIO_LPCG_PWM0_IPG_MSTR_CLK 4
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_CLK 5
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_S_CLK 6
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_HF_CLK 7
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_SLV_CLK 8
+#define IMX8QXP_LSIO_LPCG_PWM1_IPG_MSTR_CLK 9
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_CLK 10
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_S_CLK 11
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_HF_CLK 12
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_SLV_CLK 13
+#define IMX8QXP_LSIO_LPCG_PWM2_IPG_MSTR_CLK 14
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_CLK 15
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_S_CLK 16
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_HF_CLK 17
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_SLV_CLK 18
+#define IMX8QXP_LSIO_LPCG_PWM3_IPG_MSTR_CLK 19
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_CLK 20
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_S_CLK 21
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_HF_CLK 22
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_SLV_CLK 23
+#define IMX8QXP_LSIO_LPCG_PWM4_IPG_MSTR_CLK 24
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_CLK 25
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_S_CLK 26
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_HF_CLK 27
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_SLV_CLK 28
+#define IMX8QXP_LSIO_LPCG_PWM5_IPG_MSTR_CLK 29
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_CLK 30
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_S_CLK 31
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_HF_CLK 32
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_SLV_CLK 33
+#define IMX8QXP_LSIO_LPCG_PWM6_IPG_MSTR_CLK 34
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_CLK 35
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_S_CLK 36
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_HF_CLK 37
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_SLV_CLK 38
+#define IMX8QXP_LSIO_LPCG_PWM7_IPG_MSTR_CLK 39
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_CLK 40
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_S_CLK 41
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_HF_CLK 42
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_SLV_CLK 43
+#define IMX8QXP_LSIO_LPCG_GPT0_IPG_MSTR_CLK 44
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_CLK 45
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_S_CLK 46
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_HF_CLK 47
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_SLV_CLK 48
+#define IMX8QXP_LSIO_LPCG_GPT1_IPG_MSTR_CLK 49
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_CLK 50
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_S_CLK 51
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_HF_CLK 52
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_SLV_CLK 53
+#define IMX8QXP_LSIO_LPCG_GPT2_IPG_MSTR_CLK 54
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_CLK 55
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_S_CLK 56
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_HF_CLK 57
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_SLV_CLK 58
+#define IMX8QXP_LSIO_LPCG_GPT3_IPG_MSTR_CLK 59
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_CLK 60
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_S_CLK 61
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_HF_CLK 62
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_SLV_CLK 63
+#define IMX8QXP_LSIO_LPCG_GPT4_IPG_MSTR_CLK 64
+#define IMX8QXP_LSIO_LPCG_FSPI0_HCLK 65
+#define IMX8QXP_LSIO_LPCG_FSPI0_IPG_CLK 66
+#define IMX8QXP_LSIO_LPCG_FSPI0_IPG_S_CLK 67
+#define IMX8QXP_LSIO_LPCG_FSPI0_IPG_SFCK 68
+#define IMX8QXP_LSIO_LPCG_FSPI1_HCLK 69
+#define IMX8QXP_LSIO_LPCG_FSPI1_IPG_CLK 70
+#define IMX8QXP_LSIO_LPCG_FSPI1_IPG_S_CLK 71
+#define IMX8QXP_LSIO_LPCG_FSPI1_IPG_SFCK 72
+
+#define IMX8QXP_LSIO_LPCG_CLK_END 73
+
+/* Connectivity SS LPCG */
+#define IMX8QXP_CONN_LPCG_SDHC0_IPG_CLK 0
+#define IMX8QXP_CONN_LPCG_SDHC0_PER_CLK 1
+#define IMX8QXP_CONN_LPCG_SDHC0_HCLK 2
+#define IMX8QXP_CONN_LPCG_SDHC1_IPG_CLK 3
+#define IMX8QXP_CONN_LPCG_SDHC1_PER_CLK 4
+#define IMX8QXP_CONN_LPCG_SDHC1_HCLK 5
+#define IMX8QXP_CONN_LPCG_SDHC2_IPG_CLK 6
+#define IMX8QXP_CONN_LPCG_SDHC2_PER_CLK 7
+#define IMX8QXP_CONN_LPCG_SDHC2_HCLK 8
+#define IMX8QXP_CONN_LPCG_GPMI_APB_CLK 9
+#define IMX8QXP_CONN_LPCG_GPMI_BCH_APB_CLK 10
+#define IMX8QXP_CONN_LPCG_GPMI_BCH_IO_CLK 11
+#define IMX8QXP_CONN_LPCG_GPMI_BCH_CLK 12
+#define IMX8QXP_CONN_LPCG_APBHDMA_CLK 13
+#define IMX8QXP_CONN_LPCG_ENET0_ROOT_CLK 14
+#define IMX8QXP_CONN_LPCG_ENET0_TX_CLK 15
+#define IMX8QXP_CONN_LPCG_ENET0_AHB_CLK 16
+#define IMX8QXP_CONN_LPCG_ENET0_IPG_S_CLK 17
+#define IMX8QXP_CONN_LPCG_ENET0_IPG_CLK 18
+
+#define IMX8QXP_CONN_LPCG_ENET1_ROOT_CLK 19
+#define IMX8QXP_CONN_LPCG_ENET1_TX_CLK 20
+#define IMX8QXP_CONN_LPCG_ENET1_AHB_CLK 21
+#define IMX8QXP_CONN_LPCG_ENET1_IPG_S_CLK 22
+#define IMX8QXP_CONN_LPCG_ENET1_IPG_CLK 23
+
+#define IMX8QXP_CONN_LPCG_CLK_END 24
+
+/* ADMA SS LPCG */
+#define IMX8QXP_ADMA_LPCG_UART0_IPG_CLK 0
+#define IMX8QXP_ADMA_LPCG_UART0_BAUD_CLK 1
+#define IMX8QXP_ADMA_LPCG_UART1_IPG_CLK 2
+#define IMX8QXP_ADMA_LPCG_UART1_BAUD_CLK 3
+#define IMX8QXP_ADMA_LPCG_UART2_IPG_CLK 4
+#define IMX8QXP_ADMA_LPCG_UART2_BAUD_CLK 5
+#define IMX8QXP_ADMA_LPCG_UART3_IPG_CLK 6
+#define IMX8QXP_ADMA_LPCG_UART3_BAUD_CLK 7
+#define IMX8QXP_ADMA_LPCG_SPI0_IPG_CLK 8
+#define IMX8QXP_ADMA_LPCG_SPI1_IPG_CLK 9
+#define IMX8QXP_ADMA_LPCG_SPI2_IPG_CLK 10
+#define IMX8QXP_ADMA_LPCG_SPI3_IPG_CLK 11
+#define IMX8QXP_ADMA_LPCG_SPI0_CLK 12
+#define IMX8QXP_ADMA_LPCG_SPI1_CLK 13
+#define IMX8QXP_ADMA_LPCG_SPI2_CLK 14
+#define IMX8QXP_ADMA_LPCG_SPI3_CLK 15
+#define IMX8QXP_ADMA_LPCG_CAN0_IPG_CLK 16
+#define IMX8QXP_ADMA_LPCG_CAN0_IPG_PE_CLK 17
+#define IMX8QXP_ADMA_LPCG_CAN0_IPG_CHI_CLK 18
+#define IMX8QXP_ADMA_LPCG_CAN1_IPG_CLK 19
+#define IMX8QXP_ADMA_LPCG_CAN1_IPG_PE_CLK 20
+#define IMX8QXP_ADMA_LPCG_CAN1_IPG_CHI_CLK 21
+#define IMX8QXP_ADMA_LPCG_CAN2_IPG_CLK 22
+#define IMX8QXP_ADMA_LPCG_CAN2_IPG_PE_CLK 23
+#define IMX8QXP_ADMA_LPCG_CAN2_IPG_CHI_CLK 24
+#define IMX8QXP_ADMA_LPCG_I2C0_CLK 25
+#define IMX8QXP_ADMA_LPCG_I2C1_CLK 26
+#define IMX8QXP_ADMA_LPCG_I2C2_CLK 27
+#define IMX8QXP_ADMA_LPCG_I2C3_CLK 28
+#define IMX8QXP_ADMA_LPCG_I2C0_IPG_CLK 29
+#define IMX8QXP_ADMA_LPCG_I2C1_IPG_CLK 30
+#define IMX8QXP_ADMA_LPCG_I2C2_IPG_CLK 31
+#define IMX8QXP_ADMA_LPCG_I2C3_IPG_CLK 32
+#define IMX8QXP_ADMA_LPCG_FTM0_CLK 33
+#define IMX8QXP_ADMA_LPCG_FTM1_CLK 34
+#define IMX8QXP_ADMA_LPCG_FTM0_IPG_CLK 35
+#define IMX8QXP_ADMA_LPCG_FTM1_IPG_CLK 36
+#define IMX8QXP_ADMA_LPCG_PWM_HI_CLK 37
+#define IMX8QXP_ADMA_LPCG_PWM_IPG_CLK 38
+#define IMX8QXP_ADMA_LPCG_LCD_PIX_CLK 39
+#define IMX8QXP_ADMA_LPCG_LCD_APB_CLK 40
+
+#define IMX8QXP_ADMA_LPCG_CLK_END 41
+
+#endif /* __DT_BINDINGS_CLOCK_IMX8QXP_H */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index a60f47b49231..5fe2923382d0 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -103,5 +103,9 @@
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
#define CLKID_NAND_CLK 112
+#define CLKID_ABP 124
+#define CLKID_PERIPH 126
+#define CLKID_AXI 128
+#define CLKID_L2_DRAM 130
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt7629-clk.h b/include/dt-bindings/clock/mt7629-clk.h
new file mode 100644
index 000000000000..ad8e6d7f0154
--- /dev/null
+++ b/include/dt-bindings/clock/mt7629-clk.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 MediaTek Inc.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7629_H
+#define _DT_BINDINGS_CLK_MT7629_H
+
+/* TOPCKGEN */
+#define CLK_TOP_TO_U2_PHY 0
+#define CLK_TOP_TO_U2_PHY_1P 1
+#define CLK_TOP_PCIE0_PIPE_EN 2
+#define CLK_TOP_PCIE1_PIPE_EN 3
+#define CLK_TOP_SSUSB_TX250M 4
+#define CLK_TOP_SSUSB_EQ_RX250M 5
+#define CLK_TOP_SSUSB_CDR_REF 6
+#define CLK_TOP_SSUSB_CDR_FB 7
+#define CLK_TOP_SATA_ASIC 8
+#define CLK_TOP_SATA_RBC 9
+#define CLK_TOP_TO_USB3_SYS 10
+#define CLK_TOP_P1_1MHZ 11
+#define CLK_TOP_4MHZ 12
+#define CLK_TOP_P0_1MHZ 13
+#define CLK_TOP_ETH_500M 14
+#define CLK_TOP_TXCLK_SRC_PRE 15
+#define CLK_TOP_RTC 16
+#define CLK_TOP_PWM_QTR_26M 17
+#define CLK_TOP_CPUM_TCK_IN 18
+#define CLK_TOP_TO_USB3_DA_TOP 19
+#define CLK_TOP_MEMPLL 20
+#define CLK_TOP_DMPLL 21
+#define CLK_TOP_DMPLL_D4 22
+#define CLK_TOP_DMPLL_D8 23
+#define CLK_TOP_SYSPLL_D2 24
+#define CLK_TOP_SYSPLL1_D2 25
+#define CLK_TOP_SYSPLL1_D4 26
+#define CLK_TOP_SYSPLL1_D8 27
+#define CLK_TOP_SYSPLL1_D16 28
+#define CLK_TOP_SYSPLL2_D2 29
+#define CLK_TOP_SYSPLL2_D4 30
+#define CLK_TOP_SYSPLL2_D8 31
+#define CLK_TOP_SYSPLL_D5 32
+#define CLK_TOP_SYSPLL3_D2 33
+#define CLK_TOP_SYSPLL3_D4 34
+#define CLK_TOP_SYSPLL_D7 35
+#define CLK_TOP_SYSPLL4_D2 36
+#define CLK_TOP_SYSPLL4_D4 37
+#define CLK_TOP_SYSPLL4_D16 38
+#define CLK_TOP_UNIVPLL 39
+#define CLK_TOP_UNIVPLL1_D2 40
+#define CLK_TOP_UNIVPLL1_D4 41
+#define CLK_TOP_UNIVPLL1_D8 42
+#define CLK_TOP_UNIVPLL_D3 43
+#define CLK_TOP_UNIVPLL2_D2 44
+#define CLK_TOP_UNIVPLL2_D4 45
+#define CLK_TOP_UNIVPLL2_D8 46
+#define CLK_TOP_UNIVPLL2_D16 47
+#define CLK_TOP_UNIVPLL_D5 48
+#define CLK_TOP_UNIVPLL3_D2 49
+#define CLK_TOP_UNIVPLL3_D4 50
+#define CLK_TOP_UNIVPLL3_D16 51
+#define CLK_TOP_UNIVPLL_D7 52
+#define CLK_TOP_UNIVPLL_D80_D4 53
+#define CLK_TOP_UNIV48M 54
+#define CLK_TOP_SGMIIPLL_D2 55
+#define CLK_TOP_CLKXTAL_D4 56
+#define CLK_TOP_HD_FAXI 57
+#define CLK_TOP_FAXI 58
+#define CLK_TOP_F_FAUD_INTBUS 59
+#define CLK_TOP_AP2WBHIF_HCLK 60
+#define CLK_TOP_10M_INFRAO 61
+#define CLK_TOP_MSDC30_1 62
+#define CLK_TOP_SPI 63
+#define CLK_TOP_SF 64
+#define CLK_TOP_FLASH 65
+#define CLK_TOP_TO_USB3_REF 66
+#define CLK_TOP_TO_USB3_MCU 67
+#define CLK_TOP_TO_USB3_DMA 68
+#define CLK_TOP_FROM_TOP_AHB 69
+#define CLK_TOP_FROM_TOP_AXI 70
+#define CLK_TOP_PCIE1_MAC_EN 71
+#define CLK_TOP_PCIE0_MAC_EN 72
+#define CLK_TOP_AXI_SEL 73
+#define CLK_TOP_MEM_SEL 74
+#define CLK_TOP_DDRPHYCFG_SEL 75
+#define CLK_TOP_ETH_SEL 76
+#define CLK_TOP_PWM_SEL 77
+#define CLK_TOP_F10M_REF_SEL 78
+#define CLK_TOP_NFI_INFRA_SEL 79
+#define CLK_TOP_FLASH_SEL 80
+#define CLK_TOP_UART_SEL 81
+#define CLK_TOP_SPI0_SEL 82
+#define CLK_TOP_SPI1_SEL 83
+#define CLK_TOP_MSDC50_0_SEL 84
+#define CLK_TOP_MSDC30_0_SEL 85
+#define CLK_TOP_MSDC30_1_SEL 86
+#define CLK_TOP_AP2WBMCU_SEL 87
+#define CLK_TOP_AP2WBHIF_SEL 88
+#define CLK_TOP_AUDIO_SEL 89
+#define CLK_TOP_AUD_INTBUS_SEL 90
+#define CLK_TOP_PMICSPI_SEL 91
+#define CLK_TOP_SCP_SEL 92
+#define CLK_TOP_ATB_SEL 93
+#define CLK_TOP_HIF_SEL 94
+#define CLK_TOP_SATA_SEL 95
+#define CLK_TOP_U2_SEL 96
+#define CLK_TOP_AUD1_SEL 97
+#define CLK_TOP_AUD2_SEL 98
+#define CLK_TOP_IRRX_SEL 99
+#define CLK_TOP_IRTX_SEL 100
+#define CLK_TOP_SATA_MCU_SEL 101
+#define CLK_TOP_PCIE0_MCU_SEL 102
+#define CLK_TOP_PCIE1_MCU_SEL 103
+#define CLK_TOP_SSUSB_MCU_SEL 104
+#define CLK_TOP_CRYPTO_SEL 105
+#define CLK_TOP_SGMII_REF_1_SEL 106
+#define CLK_TOP_10M_SEL 107
+#define CLK_TOP_NR_CLK 108
+
+/* INFRACFG */
+#define CLK_INFRA_MUX1_SEL 0
+#define CLK_INFRA_DBGCLK_PD 1
+#define CLK_INFRA_TRNG_PD 2
+#define CLK_INFRA_DEVAPC_PD 3
+#define CLK_INFRA_APXGPT_PD 4
+#define CLK_INFRA_SEJ_PD 5
+#define CLK_INFRA_NR_CLK 6
+
+/* PERICFG */
+#define CLK_PERIBUS_SEL 0
+#define CLK_PERI_PWM1_PD 1
+#define CLK_PERI_PWM2_PD 2
+#define CLK_PERI_PWM3_PD 3
+#define CLK_PERI_PWM4_PD 4
+#define CLK_PERI_PWM5_PD 5
+#define CLK_PERI_PWM6_PD 6
+#define CLK_PERI_PWM7_PD 7
+#define CLK_PERI_PWM_PD 8
+#define CLK_PERI_AP_DMA_PD 9
+#define CLK_PERI_MSDC30_1_PD 10
+#define CLK_PERI_UART0_PD 11
+#define CLK_PERI_UART1_PD 12
+#define CLK_PERI_UART2_PD 13
+#define CLK_PERI_UART3_PD 14
+#define CLK_PERI_BTIF_PD 15
+#define CLK_PERI_I2C0_PD 16
+#define CLK_PERI_SPI0_PD 17
+#define CLK_PERI_SNFI_PD 18
+#define CLK_PERI_NFI_PD 19
+#define CLK_PERI_NFIECC_PD 20
+#define CLK_PERI_FLASH_PD 21
+#define CLK_PERI_NR_CLK 22
+
+/* APMIXEDSYS */
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIV2PLL 2
+#define CLK_APMIXED_ETH1PLL 3
+#define CLK_APMIXED_ETH2PLL 4
+#define CLK_APMIXED_SGMIPLL 5
+#define CLK_APMIXED_MAIN_CORE_EN 6
+#define CLK_APMIXED_NR_CLK 7
+
+/* SSUSBSYS */
+#define CLK_SSUSB_U2_PHY_1P_EN 0
+#define CLK_SSUSB_U2_PHY_EN 1
+#define CLK_SSUSB_REF_EN 2
+#define CLK_SSUSB_SYS_EN 3
+#define CLK_SSUSB_MCU_EN 4
+#define CLK_SSUSB_DMA_EN 5
+#define CLK_SSUSB_NR_CLK 6
+
+/* PCIESYS */
+#define CLK_PCIE_P1_AUX_EN 0
+#define CLK_PCIE_P1_OBFF_EN 1
+#define CLK_PCIE_P1_AHB_EN 2
+#define CLK_PCIE_P1_AXI_EN 3
+#define CLK_PCIE_P1_MAC_EN 4
+#define CLK_PCIE_P1_PIPE_EN 5
+#define CLK_PCIE_P0_AUX_EN 6
+#define CLK_PCIE_P0_OBFF_EN 7
+#define CLK_PCIE_P0_AHB_EN 8
+#define CLK_PCIE_P0_AXI_EN 9
+#define CLK_PCIE_P0_MAC_EN 10
+#define CLK_PCIE_P0_PIPE_EN 11
+#define CLK_PCIE_NR_CLK 12
+
+/* ETHSYS */
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_GP0_EN 3
+#define CLK_ETH_ESW_EN 4
+#define CLK_ETH_NR_CLK 5
+
+/* SGMIISYS */
+#define CLK_SGMII_TX_EN 0
+#define CLK_SGMII_RX_EN 1
+#define CLK_SGMII_CDR_REF 2
+#define CLK_SGMII_CDR_FB 3
+#define CLK_SGMII_NR_CLK 4
+
+#endif /* _DT_BINDINGS_CLK_MT7629_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 58a242e656b1..ba84bbab5c83 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -180,6 +180,11 @@
#define USB30_MASTER_CLK_SRC 163
#define USB30_MOCK_UTMI_CLK_SRC 164
#define USB3_PHY_AUX_CLK_SRC 165
+#define GCC_USB3_CLKREF_CLK 166
+#define GCC_HDMI_CLKREF_CLK 167
+#define GCC_UFS_CLKREF_CLK 168
+#define GCC_PCIE_CLKREF_CLK 169
+#define GCC_RX1_USB2_CLKREF_CLK 170
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
@@ -204,5 +209,94 @@
#define GCC_TSIF_BCR 16
#define GCC_UFS_BCR 17
#define GCC_USB_30_BCR 18
+#define GCC_SYSTEM_NOC_BCR 19
+#define GCC_CONFIG_NOC_BCR 20
+#define GCC_AHB2PHY_EAST_BCR 21
+#define GCC_IMEM_BCR 22
+#define GCC_PIMEM_BCR 23
+#define GCC_MMSS_BCR 24
+#define GCC_QDSS_BCR 25
+#define GCC_WCSS_BCR 26
+#define GCC_BLSP1_BCR 27
+#define GCC_BLSP1_UART1_BCR 28
+#define GCC_BLSP1_UART2_BCR 29
+#define GCC_BLSP1_UART3_BCR 30
+#define GCC_CM_PHY_REFGEN1_BCR 31
+#define GCC_CM_PHY_REFGEN2_BCR 32
+#define GCC_BLSP2_BCR 33
+#define GCC_BLSP2_UART1_BCR 34
+#define GCC_BLSP2_UART2_BCR 35
+#define GCC_BLSP2_UART3_BCR 36
+#define GCC_SRAM_SENSOR_BCR 37
+#define GCC_PRNG_BCR 38
+#define GCC_TSIF_0_RESET 39
+#define GCC_TSIF_1_RESET 40
+#define GCC_TCSR_BCR 41
+#define GCC_BOOT_ROM_BCR 42
+#define GCC_MSG_RAM_BCR 43
+#define GCC_TLMM_BCR 44
+#define GCC_MPM_BCR 45
+#define GCC_SEC_CTRL_BCR 46
+#define GCC_SPMI_BCR 47
+#define GCC_SPDM_BCR 48
+#define GCC_CE1_BCR 49
+#define GCC_BIMC_BCR 50
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 51
+#define GCC_SNOC_BUS_TIMEOUT1_BCR 52
+#define GCC_SNOC_BUS_TIMEOUT3_BCR 53
+#define GCC_SNOC_BUS_TIMEOUT_EXTREF_BCR 54
+#define GCC_PNOC_BUS_TIMEOUT0_BCR 55
+#define GCC_CNOC_PERIPH_BUS_TIMEOUT1_BCR 56
+#define GCC_CNOC_PERIPH_BUS_TIMEOUT2_BCR 57
+#define GCC_CNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_CNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_CNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_CNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_CNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_CNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_CNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_CNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_APB2JTAG_BCR 66
+#define GCC_RBCPR_CX_BCR 67
+#define GCC_RBCPR_MX_BCR 68
+#define GCC_USB3_PHY_BCR 69
+#define GCC_USB3PHY_PHY_BCR 70
+#define GCC_USB3_DP_PHY_BCR 71
+#define GCC_SSC_BCR 72
+#define GCC_SSC_RESET 73
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 74
+#define GCC_PCIE_0_LINK_DOWN_BCR 75
+#define GCC_PCIE_0_PHY_BCR 76
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 77
+#define GCC_PCIE_PHY_BCR 78
+#define GCC_PCIE_PHY_NOCSR_COM_PHY_BCR 79
+#define GCC_PCIE_PHY_CFG_AHB_BCR 80
+#define GCC_PCIE_PHY_COM_BCR 81
+#define GCC_GPU_BCR 82
+#define GCC_SPSS_BCR 83
+#define GCC_OBT_ODT_BCR 84
+#define GCC_VS_BCR 85
+#define GCC_MSS_VS_RESET 86
+#define GCC_GPU_VS_RESET 87
+#define GCC_APC0_VS_RESET 88
+#define GCC_APC1_VS_RESET 89
+#define GCC_CNOC_BUS_TIMEOUT8_BCR 90
+#define GCC_CNOC_BUS_TIMEOUT9_BCR 91
+#define GCC_CNOC_BUS_TIMEOUT10_BCR 92
+#define GCC_CNOC_BUS_TIMEOUT11_BCR 93
+#define GCC_CNOC_BUS_TIMEOUT12_BCR 94
+#define GCC_CNOC_BUS_TIMEOUT13_BCR 95
+#define GCC_CNOC_BUS_TIMEOUT14_BCR 96
+#define GCC_CNOC_BUS_TIMEOUT_EXTREF_BCR 97
+#define GCC_AGGRE1_NOC_BCR 98
+#define GCC_AGGRE2_NOC_BCR 99
+#define GCC_DCC_BCR 100
+#define GCC_QREFS_VBG_CAL_BCR 101
+#define GCC_IPA_BCR 102
+#define GCC_GLM_BCR 103
+#define GCC_SKL_BCR 104
+#define GCC_MSMPU_BCR 105
+#define GCC_QUSB2PHY_PRIM_BCR 106
+#define GCC_QUSB2PHY_SEC_BCR 107
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index b8eae5a76503..968fa65b9c42 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -197,6 +197,8 @@
#define GCC_QSPI_CORE_CLK_SRC 187
#define GCC_QSPI_CORE_CLK 188
#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 189
+#define GCC_LPASS_Q6_AXI_CLK 190
+#define GCC_LPASS_SWAY_CLK 191
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gpucc-sdm845.h b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
new file mode 100644
index 000000000000..9690d901b50a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sdm845.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_GPU_CC_SDM845_H
+
+/* GPU_CC clock registers */
+#define GPU_CC_CX_GMU_CLK 0
+#define GPU_CC_CXO_CLK 1
+#define GPU_CC_GMU_CLK_SRC 2
+#define GPU_CC_PLL1 3
+
+/* GPU_CC Resets */
+#define GPUCC_GPU_CC_CX_BCR 0
+#define GPUCC_GPU_CC_GMU_BCR 1
+#define GPUCC_GPU_CC_XO_BCR 2
+
+/* GPU_CC GDSCRs */
+#define GPU_CX_GDSC 0
+#define GPU_GX_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lpass-sdm845.h b/include/dt-bindings/clock/qcom,lpass-sdm845.h
new file mode 100644
index 000000000000..659050846f61
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,lpass-sdm845.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_LPASS_SDM845_H
+
+#define LPASS_Q6SS_AHBM_AON_CLK 0
+#define LPASS_Q6SS_AHBS_AON_CLK 1
+#define LPASS_QDSP6SS_XO_CLK 2
+#define LPASS_QDSP6SS_SLEEP_CLK 3
+#define LPASS_QDSP6SS_CORE_CLK 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
index c585b82b9c05..3658b0c14966 100644
--- a/include/dt-bindings/clock/qcom,rpmcc.h
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -123,5 +123,9 @@
#define RPM_SMD_DIV_A_CLK3 73
#define RPM_SMD_LN_BB_CLK 74
#define RPM_SMD_LN_BB_A_CLK 75
+#define RPM_SMD_BIMC_GPU_CLK 76
+#define RPM_SMD_BIMC_GPU_A_CLK 77
+#define RPM_SMD_QPIC_CLK 78
+#define RPM_SMD_QPIC_CLK_A 79
#endif
diff --git a/include/dt-bindings/clock/r8a7795-cpg-mssr.h b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
index 948389641565..92b3e2a95179 100644
--- a/include/dt-bindings/clock/r8a7795-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7795-cpg-mssr.h
@@ -50,7 +50,7 @@
#define R8A7795_CLK_CANFD 39
#define R8A7795_CLK_HDMI 40
#define R8A7795_CLK_CSI0 41
-#define R8A7795_CLK_CSIREF 42
+/* CLK_CSIREF was removed */
#define R8A7795_CLK_CP 43
#define R8A7795_CLK_CPEX 44
#define R8A7795_CLK_R 45
diff --git a/include/dt-bindings/clock/r8a7796-cpg-mssr.h b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
index e6087f2f7e3a..c0957cf45840 100644
--- a/include/dt-bindings/clock/r8a7796-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a7796-cpg-mssr.h
@@ -56,7 +56,7 @@
#define R8A7796_CLK_CANFD 45
#define R8A7796_CLK_HDMI 46
#define R8A7796_CLK_CSI0 47
-#define R8A7796_CLK_CSIREF 48
+/* CLK_CSIREF was removed */
#define R8A7796_CLK_CP 49
#define R8A7796_CLK_CPEX 50
#define R8A7796_CLK_R 51
diff --git a/include/dt-bindings/clock/r8a77995-cpg-mssr.h b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
index 1eb11acfa563..fd701c4e87cf 100644
--- a/include/dt-bindings/clock/r8a77995-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a77995-cpg-mssr.h
@@ -35,8 +35,8 @@
#define R8A77995_CLK_CRD2 24
#define R8A77995_CLK_SD0H 25
#define R8A77995_CLK_SD0 26
-#define R8A77995_CLK_SSP2 27
-#define R8A77995_CLK_SSP1 28
+/* CLK_SSP2 was removed */
+/* CLK_SSP1 was removed */
#define R8A77995_CLK_RPC 29
#define R8A77995_CLK_RPCD2 30
#define R8A77995_CLK_ZA2 31
@@ -49,5 +49,6 @@
#define R8A77995_CLK_LV0 38
#define R8A77995_CLK_LV1 39
#define R8A77995_CLK_CP 40
+#define R8A77995_CLK_CPEX 41
#endif /* __DT_BINDINGS_CLOCK_R8A77995_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/rk3328-cru.h b/include/dt-bindings/clock/rk3328-cru.h
index a82a0109faff..bcaa4559ab1b 100644
--- a/include/dt-bindings/clock/rk3328-cru.h
+++ b/include/dt-bindings/clock/rk3328-cru.h
@@ -172,13 +172,14 @@
#define PCLK_HDCP 232
#define PCLK_DCF 233
#define PCLK_SARADC 234
+#define PCLK_ACODECPHY 235
/* hclk gates */
#define HCLK_PERI 308
#define HCLK_TSP 309
#define HCLK_GMAC 310
#define HCLK_I2S0_8CH 311
-#define HCLK_I2S1_8CH 313
+#define HCLK_I2S1_8CH 312
#define HCLK_I2S2_2CH 313
#define HCLK_SPDIF_8CH 314
#define HCLK_VOP 315
diff --git a/include/dt-bindings/clock/sun8i-de2.h b/include/dt-bindings/clock/sun8i-de2.h
index 3bed63b524aa..7768f73b051e 100644
--- a/include/dt-bindings/clock/sun8i-de2.h
+++ b/include/dt-bindings/clock/sun8i-de2.h
@@ -15,4 +15,7 @@
#define CLK_MIXER1 7
#define CLK_WB 8
+#define CLK_BUS_ROT 9
+#define CLK_ROT 10
+
#endif /* _DT_BINDINGS_CLOCK_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/clock/suniv-ccu-f1c100s.h b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
new file mode 100644
index 000000000000..f5ac155c9c70
--- /dev/null
+++ b/include/dt-bindings/clock/suniv-ccu-f1c100s.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (c) 2018 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUNIV_F1C100S_H_
+#define _DT_BINDINGS_CLK_SUNIV_F1C100S_H_
+
+#define CLK_CPU 11
+
+#define CLK_BUS_DMA 14
+#define CLK_BUS_MMC0 15
+#define CLK_BUS_MMC1 16
+#define CLK_BUS_DRAM 17
+#define CLK_BUS_SPI0 18
+#define CLK_BUS_SPI1 19
+#define CLK_BUS_OTG 20
+#define CLK_BUS_VE 21
+#define CLK_BUS_LCD 22
+#define CLK_BUS_DEINTERLACE 23
+#define CLK_BUS_CSI 24
+#define CLK_BUS_TVD 25
+#define CLK_BUS_TVE 26
+#define CLK_BUS_DE_BE 27
+#define CLK_BUS_DE_FE 28
+#define CLK_BUS_CODEC 29
+#define CLK_BUS_SPDIF 30
+#define CLK_BUS_IR 31
+#define CLK_BUS_RSB 32
+#define CLK_BUS_I2S0 33
+#define CLK_BUS_I2C0 34
+#define CLK_BUS_I2C1 35
+#define CLK_BUS_I2C2 36
+#define CLK_BUS_PIO 37
+#define CLK_BUS_UART0 38
+#define CLK_BUS_UART1 39
+#define CLK_BUS_UART2 40
+
+#define CLK_MMC0 41
+#define CLK_MMC0_SAMPLE 42
+#define CLK_MMC0_OUTPUT 43
+#define CLK_MMC1 44
+#define CLK_MMC1_SAMPLE 45
+#define CLK_MMC1_OUTPUT 46
+#define CLK_I2S 47
+#define CLK_SPDIF 48
+
+#define CLK_USB_PHY0 49
+
+#define CLK_DRAM_VE 50
+#define CLK_DRAM_CSI 51
+#define CLK_DRAM_DEINTERLACE 52
+#define CLK_DRAM_TVD 53
+#define CLK_DRAM_DE_FE 54
+#define CLK_DRAM_DE_BE 55
+
+#define CLK_DE_BE 56
+#define CLK_DE_FE 57
+#define CLK_TCON 58
+#define CLK_DEINTERLACE 59
+#define CLK_TVE2_CLK 60
+#define CLK_TVE1_CLK 61
+#define CLK_TVD 62
+#define CLK_CSI 63
+#define CLK_VE 64
+#define CLK_CODEC 65
+#define CLK_AVS 66
+
+#endif
diff --git a/include/dt-bindings/firmware/imx/rsrc.h b/include/dt-bindings/firmware/imx/rsrc.h
new file mode 100644
index 000000000000..4481f2d60d65
--- /dev/null
+++ b/include/dt-bindings/firmware/imx/rsrc.h
@@ -0,0 +1,559 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef __DT_BINDINGS_RSCRC_IMX_H
+#define __DT_BINDINGS_RSCRC_IMX_H
+
+/*
+ * These defines are used to indicate a resource. Resources include peripherals
+ * and bus masters (but not memory regions). Note items from list should
+ * never be changed or removed (only added to at the end of the list).
+ */
+
+#define IMX_SC_R_A53 0
+#define IMX_SC_R_A53_0 1
+#define IMX_SC_R_A53_1 2
+#define IMX_SC_R_A53_2 3
+#define IMX_SC_R_A53_3 4
+#define IMX_SC_R_A72 5
+#define IMX_SC_R_A72_0 6
+#define IMX_SC_R_A72_1 7
+#define IMX_SC_R_A72_2 8
+#define IMX_SC_R_A72_3 9
+#define IMX_SC_R_CCI 10
+#define IMX_SC_R_DB 11
+#define IMX_SC_R_DRC_0 12
+#define IMX_SC_R_DRC_1 13
+#define IMX_SC_R_GIC_SMMU 14
+#define IMX_SC_R_IRQSTR_M4_0 15
+#define IMX_SC_R_IRQSTR_M4_1 16
+#define IMX_SC_R_SMMU 17
+#define IMX_SC_R_GIC 18
+#define IMX_SC_R_DC_0_BLIT0 19
+#define IMX_SC_R_DC_0_BLIT1 20
+#define IMX_SC_R_DC_0_BLIT2 21
+#define IMX_SC_R_DC_0_BLIT_OUT 22
+#define IMX_SC_R_DC_0_CAPTURE0 23
+#define IMX_SC_R_DC_0_CAPTURE1 24
+#define IMX_SC_R_DC_0_WARP 25
+#define IMX_SC_R_DC_0_INTEGRAL0 26
+#define IMX_SC_R_DC_0_INTEGRAL1 27
+#define IMX_SC_R_DC_0_VIDEO0 28
+#define IMX_SC_R_DC_0_VIDEO1 29
+#define IMX_SC_R_DC_0_FRAC0 30
+#define IMX_SC_R_DC_0_FRAC1 31
+#define IMX_SC_R_DC_0 32
+#define IMX_SC_R_GPU_2_PID0 33
+#define IMX_SC_R_DC_0_PLL_0 34
+#define IMX_SC_R_DC_0_PLL_1 35
+#define IMX_SC_R_DC_1_BLIT0 36
+#define IMX_SC_R_DC_1_BLIT1 37
+#define IMX_SC_R_DC_1_BLIT2 38
+#define IMX_SC_R_DC_1_BLIT_OUT 39
+#define IMX_SC_R_DC_1_CAPTURE0 40
+#define IMX_SC_R_DC_1_CAPTURE1 41
+#define IMX_SC_R_DC_1_WARP 42
+#define IMX_SC_R_DC_1_INTEGRAL0 43
+#define IMX_SC_R_DC_1_INTEGRAL1 44
+#define IMX_SC_R_DC_1_VIDEO0 45
+#define IMX_SC_R_DC_1_VIDEO1 46
+#define IMX_SC_R_DC_1_FRAC0 47
+#define IMX_SC_R_DC_1_FRAC1 48
+#define IMX_SC_R_DC_1 49
+#define IMX_SC_R_GPU_3_PID0 50
+#define IMX_SC_R_DC_1_PLL_0 51
+#define IMX_SC_R_DC_1_PLL_1 52
+#define IMX_SC_R_SPI_0 53
+#define IMX_SC_R_SPI_1 54
+#define IMX_SC_R_SPI_2 55
+#define IMX_SC_R_SPI_3 56
+#define IMX_SC_R_UART_0 57
+#define IMX_SC_R_UART_1 58
+#define IMX_SC_R_UART_2 59
+#define IMX_SC_R_UART_3 60
+#define IMX_SC_R_UART_4 61
+#define IMX_SC_R_EMVSIM_0 62
+#define IMX_SC_R_EMVSIM_1 63
+#define IMX_SC_R_DMA_0_CH0 64
+#define IMX_SC_R_DMA_0_CH1 65
+#define IMX_SC_R_DMA_0_CH2 66
+#define IMX_SC_R_DMA_0_CH3 67
+#define IMX_SC_R_DMA_0_CH4 68
+#define IMX_SC_R_DMA_0_CH5 69
+#define IMX_SC_R_DMA_0_CH6 70
+#define IMX_SC_R_DMA_0_CH7 71
+#define IMX_SC_R_DMA_0_CH8 72
+#define IMX_SC_R_DMA_0_CH9 73
+#define IMX_SC_R_DMA_0_CH10 74
+#define IMX_SC_R_DMA_0_CH11 75
+#define IMX_SC_R_DMA_0_CH12 76
+#define IMX_SC_R_DMA_0_CH13 77
+#define IMX_SC_R_DMA_0_CH14 78
+#define IMX_SC_R_DMA_0_CH15 79
+#define IMX_SC_R_DMA_0_CH16 80
+#define IMX_SC_R_DMA_0_CH17 81
+#define IMX_SC_R_DMA_0_CH18 82
+#define IMX_SC_R_DMA_0_CH19 83
+#define IMX_SC_R_DMA_0_CH20 84
+#define IMX_SC_R_DMA_0_CH21 85
+#define IMX_SC_R_DMA_0_CH22 86
+#define IMX_SC_R_DMA_0_CH23 87
+#define IMX_SC_R_DMA_0_CH24 88
+#define IMX_SC_R_DMA_0_CH25 89
+#define IMX_SC_R_DMA_0_CH26 90
+#define IMX_SC_R_DMA_0_CH27 91
+#define IMX_SC_R_DMA_0_CH28 92
+#define IMX_SC_R_DMA_0_CH29 93
+#define IMX_SC_R_DMA_0_CH30 94
+#define IMX_SC_R_DMA_0_CH31 95
+#define IMX_SC_R_I2C_0 96
+#define IMX_SC_R_I2C_1 97
+#define IMX_SC_R_I2C_2 98
+#define IMX_SC_R_I2C_3 99
+#define IMX_SC_R_I2C_4 100
+#define IMX_SC_R_ADC_0 101
+#define IMX_SC_R_ADC_1 102
+#define IMX_SC_R_FTM_0 103
+#define IMX_SC_R_FTM_1 104
+#define IMX_SC_R_CAN_0 105
+#define IMX_SC_R_CAN_1 106
+#define IMX_SC_R_CAN_2 107
+#define IMX_SC_R_DMA_1_CH0 108
+#define IMX_SC_R_DMA_1_CH1 109
+#define IMX_SC_R_DMA_1_CH2 110
+#define IMX_SC_R_DMA_1_CH3 111
+#define IMX_SC_R_DMA_1_CH4 112
+#define IMX_SC_R_DMA_1_CH5 113
+#define IMX_SC_R_DMA_1_CH6 114
+#define IMX_SC_R_DMA_1_CH7 115
+#define IMX_SC_R_DMA_1_CH8 116
+#define IMX_SC_R_DMA_1_CH9 117
+#define IMX_SC_R_DMA_1_CH10 118
+#define IMX_SC_R_DMA_1_CH11 119
+#define IMX_SC_R_DMA_1_CH12 120
+#define IMX_SC_R_DMA_1_CH13 121
+#define IMX_SC_R_DMA_1_CH14 122
+#define IMX_SC_R_DMA_1_CH15 123
+#define IMX_SC_R_DMA_1_CH16 124
+#define IMX_SC_R_DMA_1_CH17 125
+#define IMX_SC_R_DMA_1_CH18 126
+#define IMX_SC_R_DMA_1_CH19 127
+#define IMX_SC_R_DMA_1_CH20 128
+#define IMX_SC_R_DMA_1_CH21 129
+#define IMX_SC_R_DMA_1_CH22 130
+#define IMX_SC_R_DMA_1_CH23 131
+#define IMX_SC_R_DMA_1_CH24 132
+#define IMX_SC_R_DMA_1_CH25 133
+#define IMX_SC_R_DMA_1_CH26 134
+#define IMX_SC_R_DMA_1_CH27 135
+#define IMX_SC_R_DMA_1_CH28 136
+#define IMX_SC_R_DMA_1_CH29 137
+#define IMX_SC_R_DMA_1_CH30 138
+#define IMX_SC_R_DMA_1_CH31 139
+#define IMX_SC_R_UNUSED1 140
+#define IMX_SC_R_UNUSED2 141
+#define IMX_SC_R_UNUSED3 142
+#define IMX_SC_R_UNUSED4 143
+#define IMX_SC_R_GPU_0_PID0 144
+#define IMX_SC_R_GPU_0_PID1 145
+#define IMX_SC_R_GPU_0_PID2 146
+#define IMX_SC_R_GPU_0_PID3 147
+#define IMX_SC_R_GPU_1_PID0 148
+#define IMX_SC_R_GPU_1_PID1 149
+#define IMX_SC_R_GPU_1_PID2 150
+#define IMX_SC_R_GPU_1_PID3 151
+#define IMX_SC_R_PCIE_A 152
+#define IMX_SC_R_SERDES_0 153
+#define IMX_SC_R_MATCH_0 154
+#define IMX_SC_R_MATCH_1 155
+#define IMX_SC_R_MATCH_2 156
+#define IMX_SC_R_MATCH_3 157
+#define IMX_SC_R_MATCH_4 158
+#define IMX_SC_R_MATCH_5 159
+#define IMX_SC_R_MATCH_6 160
+#define IMX_SC_R_MATCH_7 161
+#define IMX_SC_R_MATCH_8 162
+#define IMX_SC_R_MATCH_9 163
+#define IMX_SC_R_MATCH_10 164
+#define IMX_SC_R_MATCH_11 165
+#define IMX_SC_R_MATCH_12 166
+#define IMX_SC_R_MATCH_13 167
+#define IMX_SC_R_MATCH_14 168
+#define IMX_SC_R_PCIE_B 169
+#define IMX_SC_R_SATA_0 170
+#define IMX_SC_R_SERDES_1 171
+#define IMX_SC_R_HSIO_GPIO 172
+#define IMX_SC_R_MATCH_15 173
+#define IMX_SC_R_MATCH_16 174
+#define IMX_SC_R_MATCH_17 175
+#define IMX_SC_R_MATCH_18 176
+#define IMX_SC_R_MATCH_19 177
+#define IMX_SC_R_MATCH_20 178
+#define IMX_SC_R_MATCH_21 179
+#define IMX_SC_R_MATCH_22 180
+#define IMX_SC_R_MATCH_23 181
+#define IMX_SC_R_MATCH_24 182
+#define IMX_SC_R_MATCH_25 183
+#define IMX_SC_R_MATCH_26 184
+#define IMX_SC_R_MATCH_27 185
+#define IMX_SC_R_MATCH_28 186
+#define IMX_SC_R_LCD_0 187
+#define IMX_SC_R_LCD_0_PWM_0 188
+#define IMX_SC_R_LCD_0_I2C_0 189
+#define IMX_SC_R_LCD_0_I2C_1 190
+#define IMX_SC_R_PWM_0 191
+#define IMX_SC_R_PWM_1 192
+#define IMX_SC_R_PWM_2 193
+#define IMX_SC_R_PWM_3 194
+#define IMX_SC_R_PWM_4 195
+#define IMX_SC_R_PWM_5 196
+#define IMX_SC_R_PWM_6 197
+#define IMX_SC_R_PWM_7 198
+#define IMX_SC_R_GPIO_0 199
+#define IMX_SC_R_GPIO_1 200
+#define IMX_SC_R_GPIO_2 201
+#define IMX_SC_R_GPIO_3 202
+#define IMX_SC_R_GPIO_4 203
+#define IMX_SC_R_GPIO_5 204
+#define IMX_SC_R_GPIO_6 205
+#define IMX_SC_R_GPIO_7 206
+#define IMX_SC_R_GPT_0 207
+#define IMX_SC_R_GPT_1 208
+#define IMX_SC_R_GPT_2 209
+#define IMX_SC_R_GPT_3 210
+#define IMX_SC_R_GPT_4 211
+#define IMX_SC_R_KPP 212
+#define IMX_SC_R_MU_0A 213
+#define IMX_SC_R_MU_1A 214
+#define IMX_SC_R_MU_2A 215
+#define IMX_SC_R_MU_3A 216
+#define IMX_SC_R_MU_4A 217
+#define IMX_SC_R_MU_5A 218
+#define IMX_SC_R_MU_6A 219
+#define IMX_SC_R_MU_7A 220
+#define IMX_SC_R_MU_8A 221
+#define IMX_SC_R_MU_9A 222
+#define IMX_SC_R_MU_10A 223
+#define IMX_SC_R_MU_11A 224
+#define IMX_SC_R_MU_12A 225
+#define IMX_SC_R_MU_13A 226
+#define IMX_SC_R_MU_5B 227
+#define IMX_SC_R_MU_6B 228
+#define IMX_SC_R_MU_7B 229
+#define IMX_SC_R_MU_8B 230
+#define IMX_SC_R_MU_9B 231
+#define IMX_SC_R_MU_10B 232
+#define IMX_SC_R_MU_11B 233
+#define IMX_SC_R_MU_12B 234
+#define IMX_SC_R_MU_13B 235
+#define IMX_SC_R_ROM_0 236
+#define IMX_SC_R_FSPI_0 237
+#define IMX_SC_R_FSPI_1 238
+#define IMX_SC_R_IEE 239
+#define IMX_SC_R_IEE_R0 240
+#define IMX_SC_R_IEE_R1 241
+#define IMX_SC_R_IEE_R2 242
+#define IMX_SC_R_IEE_R3 243
+#define IMX_SC_R_IEE_R4 244
+#define IMX_SC_R_IEE_R5 245
+#define IMX_SC_R_IEE_R6 246
+#define IMX_SC_R_IEE_R7 247
+#define IMX_SC_R_SDHC_0 248
+#define IMX_SC_R_SDHC_1 249
+#define IMX_SC_R_SDHC_2 250
+#define IMX_SC_R_ENET_0 251
+#define IMX_SC_R_ENET_1 252
+#define IMX_SC_R_MLB_0 253
+#define IMX_SC_R_DMA_2_CH0 254
+#define IMX_SC_R_DMA_2_CH1 255
+#define IMX_SC_R_DMA_2_CH2 256
+#define IMX_SC_R_DMA_2_CH3 257
+#define IMX_SC_R_DMA_2_CH4 258
+#define IMX_SC_R_USB_0 259
+#define IMX_SC_R_USB_1 260
+#define IMX_SC_R_USB_0_PHY 261
+#define IMX_SC_R_USB_2 262
+#define IMX_SC_R_USB_2_PHY 263
+#define IMX_SC_R_DTCP 264
+#define IMX_SC_R_NAND 265
+#define IMX_SC_R_LVDS_0 266
+#define IMX_SC_R_LVDS_0_PWM_0 267
+#define IMX_SC_R_LVDS_0_I2C_0 268
+#define IMX_SC_R_LVDS_0_I2C_1 269
+#define IMX_SC_R_LVDS_1 270
+#define IMX_SC_R_LVDS_1_PWM_0 271
+#define IMX_SC_R_LVDS_1_I2C_0 272
+#define IMX_SC_R_LVDS_1_I2C_1 273
+#define IMX_SC_R_LVDS_2 274
+#define IMX_SC_R_LVDS_2_PWM_0 275
+#define IMX_SC_R_LVDS_2_I2C_0 276
+#define IMX_SC_R_LVDS_2_I2C_1 277
+#define IMX_SC_R_M4_0_PID0 278
+#define IMX_SC_R_M4_0_PID1 279
+#define IMX_SC_R_M4_0_PID2 280
+#define IMX_SC_R_M4_0_PID3 281
+#define IMX_SC_R_M4_0_PID4 282
+#define IMX_SC_R_M4_0_RGPIO 283
+#define IMX_SC_R_M4_0_SEMA42 284
+#define IMX_SC_R_M4_0_TPM 285
+#define IMX_SC_R_M4_0_PIT 286
+#define IMX_SC_R_M4_0_UART 287
+#define IMX_SC_R_M4_0_I2C 288
+#define IMX_SC_R_M4_0_INTMUX 289
+#define IMX_SC_R_M4_0_SIM 290
+#define IMX_SC_R_M4_0_WDOG 291
+#define IMX_SC_R_M4_0_MU_0B 292
+#define IMX_SC_R_M4_0_MU_0A0 293
+#define IMX_SC_R_M4_0_MU_0A1 294
+#define IMX_SC_R_M4_0_MU_0A2 295
+#define IMX_SC_R_M4_0_MU_0A3 296
+#define IMX_SC_R_M4_0_MU_1A 297
+#define IMX_SC_R_M4_1_PID0 298
+#define IMX_SC_R_M4_1_PID1 299
+#define IMX_SC_R_M4_1_PID2 300
+#define IMX_SC_R_M4_1_PID3 301
+#define IMX_SC_R_M4_1_PID4 302
+#define IMX_SC_R_M4_1_RGPIO 303
+#define IMX_SC_R_M4_1_SEMA42 304
+#define IMX_SC_R_M4_1_TPM 305
+#define IMX_SC_R_M4_1_PIT 306
+#define IMX_SC_R_M4_1_UART 307
+#define IMX_SC_R_M4_1_I2C 308
+#define IMX_SC_R_M4_1_INTMUX 309
+#define IMX_SC_R_M4_1_SIM 310
+#define IMX_SC_R_M4_1_WDOG 311
+#define IMX_SC_R_M4_1_MU_0B 312
+#define IMX_SC_R_M4_1_MU_0A0 313
+#define IMX_SC_R_M4_1_MU_0A1 314
+#define IMX_SC_R_M4_1_MU_0A2 315
+#define IMX_SC_R_M4_1_MU_0A3 316
+#define IMX_SC_R_M4_1_MU_1A 317
+#define IMX_SC_R_SAI_0 318
+#define IMX_SC_R_SAI_1 319
+#define IMX_SC_R_SAI_2 320
+#define IMX_SC_R_IRQSTR_SCU2 321
+#define IMX_SC_R_IRQSTR_DSP 322
+#define IMX_SC_R_ELCDIF_PLL 323
+#define IMX_SC_R_UNUSED6 324
+#define IMX_SC_R_AUDIO_PLL_0 325
+#define IMX_SC_R_PI_0 326
+#define IMX_SC_R_PI_0_PWM_0 327
+#define IMX_SC_R_PI_0_PWM_1 328
+#define IMX_SC_R_PI_0_I2C_0 329
+#define IMX_SC_R_PI_0_PLL 330
+#define IMX_SC_R_PI_1 331
+#define IMX_SC_R_PI_1_PWM_0 332
+#define IMX_SC_R_PI_1_PWM_1 333
+#define IMX_SC_R_PI_1_I2C_0 334
+#define IMX_SC_R_PI_1_PLL 335
+#define IMX_SC_R_SC_PID0 336
+#define IMX_SC_R_SC_PID1 337
+#define IMX_SC_R_SC_PID2 338
+#define IMX_SC_R_SC_PID3 339
+#define IMX_SC_R_SC_PID4 340
+#define IMX_SC_R_SC_SEMA42 341
+#define IMX_SC_R_SC_TPM 342
+#define IMX_SC_R_SC_PIT 343
+#define IMX_SC_R_SC_UART 344
+#define IMX_SC_R_SC_I2C 345
+#define IMX_SC_R_SC_MU_0B 346
+#define IMX_SC_R_SC_MU_0A0 347
+#define IMX_SC_R_SC_MU_0A1 348
+#define IMX_SC_R_SC_MU_0A2 349
+#define IMX_SC_R_SC_MU_0A3 350
+#define IMX_SC_R_SC_MU_1A 351
+#define IMX_SC_R_SYSCNT_RD 352
+#define IMX_SC_R_SYSCNT_CMP 353
+#define IMX_SC_R_DEBUG 354
+#define IMX_SC_R_SYSTEM 355
+#define IMX_SC_R_SNVS 356
+#define IMX_SC_R_OTP 357
+#define IMX_SC_R_VPU_PID0 358
+#define IMX_SC_R_VPU_PID1 359
+#define IMX_SC_R_VPU_PID2 360
+#define IMX_SC_R_VPU_PID3 361
+#define IMX_SC_R_VPU_PID4 362
+#define IMX_SC_R_VPU_PID5 363
+#define IMX_SC_R_VPU_PID6 364
+#define IMX_SC_R_VPU_PID7 365
+#define IMX_SC_R_VPU_UART 366
+#define IMX_SC_R_VPUCORE 367
+#define IMX_SC_R_VPUCORE_0 368
+#define IMX_SC_R_VPUCORE_1 369
+#define IMX_SC_R_VPUCORE_2 370
+#define IMX_SC_R_VPUCORE_3 371
+#define IMX_SC_R_DMA_4_CH0 372
+#define IMX_SC_R_DMA_4_CH1 373
+#define IMX_SC_R_DMA_4_CH2 374
+#define IMX_SC_R_DMA_4_CH3 375
+#define IMX_SC_R_DMA_4_CH4 376
+#define IMX_SC_R_ISI_CH0 377
+#define IMX_SC_R_ISI_CH1 378
+#define IMX_SC_R_ISI_CH2 379
+#define IMX_SC_R_ISI_CH3 380
+#define IMX_SC_R_ISI_CH4 381
+#define IMX_SC_R_ISI_CH5 382
+#define IMX_SC_R_ISI_CH6 383
+#define IMX_SC_R_ISI_CH7 384
+#define IMX_SC_R_MJPEG_DEC_S0 385
+#define IMX_SC_R_MJPEG_DEC_S1 386
+#define IMX_SC_R_MJPEG_DEC_S2 387
+#define IMX_SC_R_MJPEG_DEC_S3 388
+#define IMX_SC_R_MJPEG_ENC_S0 389
+#define IMX_SC_R_MJPEG_ENC_S1 390
+#define IMX_SC_R_MJPEG_ENC_S2 391
+#define IMX_SC_R_MJPEG_ENC_S3 392
+#define IMX_SC_R_MIPI_0 393
+#define IMX_SC_R_MIPI_0_PWM_0 394
+#define IMX_SC_R_MIPI_0_I2C_0 395
+#define IMX_SC_R_MIPI_0_I2C_1 396
+#define IMX_SC_R_MIPI_1 397
+#define IMX_SC_R_MIPI_1_PWM_0 398
+#define IMX_SC_R_MIPI_1_I2C_0 399
+#define IMX_SC_R_MIPI_1_I2C_1 400
+#define IMX_SC_R_CSI_0 401
+#define IMX_SC_R_CSI_0_PWM_0 402
+#define IMX_SC_R_CSI_0_I2C_0 403
+#define IMX_SC_R_CSI_1 404
+#define IMX_SC_R_CSI_1_PWM_0 405
+#define IMX_SC_R_CSI_1_I2C_0 406
+#define IMX_SC_R_HDMI 407
+#define IMX_SC_R_HDMI_I2S 408
+#define IMX_SC_R_HDMI_I2C_0 409
+#define IMX_SC_R_HDMI_PLL_0 410
+#define IMX_SC_R_HDMI_RX 411
+#define IMX_SC_R_HDMI_RX_BYPASS 412
+#define IMX_SC_R_HDMI_RX_I2C_0 413
+#define IMX_SC_R_ASRC_0 414
+#define IMX_SC_R_ESAI_0 415
+#define IMX_SC_R_SPDIF_0 416
+#define IMX_SC_R_SPDIF_1 417
+#define IMX_SC_R_SAI_3 418
+#define IMX_SC_R_SAI_4 419
+#define IMX_SC_R_SAI_5 420
+#define IMX_SC_R_GPT_5 421
+#define IMX_SC_R_GPT_6 422
+#define IMX_SC_R_GPT_7 423
+#define IMX_SC_R_GPT_8 424
+#define IMX_SC_R_GPT_9 425
+#define IMX_SC_R_GPT_10 426
+#define IMX_SC_R_DMA_2_CH5 427
+#define IMX_SC_R_DMA_2_CH6 428
+#define IMX_SC_R_DMA_2_CH7 429
+#define IMX_SC_R_DMA_2_CH8 430
+#define IMX_SC_R_DMA_2_CH9 431
+#define IMX_SC_R_DMA_2_CH10 432
+#define IMX_SC_R_DMA_2_CH11 433
+#define IMX_SC_R_DMA_2_CH12 434
+#define IMX_SC_R_DMA_2_CH13 435
+#define IMX_SC_R_DMA_2_CH14 436
+#define IMX_SC_R_DMA_2_CH15 437
+#define IMX_SC_R_DMA_2_CH16 438
+#define IMX_SC_R_DMA_2_CH17 439
+#define IMX_SC_R_DMA_2_CH18 440
+#define IMX_SC_R_DMA_2_CH19 441
+#define IMX_SC_R_DMA_2_CH20 442
+#define IMX_SC_R_DMA_2_CH21 443
+#define IMX_SC_R_DMA_2_CH22 444
+#define IMX_SC_R_DMA_2_CH23 445
+#define IMX_SC_R_DMA_2_CH24 446
+#define IMX_SC_R_DMA_2_CH25 447
+#define IMX_SC_R_DMA_2_CH26 448
+#define IMX_SC_R_DMA_2_CH27 449
+#define IMX_SC_R_DMA_2_CH28 450
+#define IMX_SC_R_DMA_2_CH29 451
+#define IMX_SC_R_DMA_2_CH30 452
+#define IMX_SC_R_DMA_2_CH31 453
+#define IMX_SC_R_ASRC_1 454
+#define IMX_SC_R_ESAI_1 455
+#define IMX_SC_R_SAI_6 456
+#define IMX_SC_R_SAI_7 457
+#define IMX_SC_R_AMIX 458
+#define IMX_SC_R_MQS_0 459
+#define IMX_SC_R_DMA_3_CH0 460
+#define IMX_SC_R_DMA_3_CH1 461
+#define IMX_SC_R_DMA_3_CH2 462
+#define IMX_SC_R_DMA_3_CH3 463
+#define IMX_SC_R_DMA_3_CH4 464
+#define IMX_SC_R_DMA_3_CH5 465
+#define IMX_SC_R_DMA_3_CH6 466
+#define IMX_SC_R_DMA_3_CH7 467
+#define IMX_SC_R_DMA_3_CH8 468
+#define IMX_SC_R_DMA_3_CH9 469
+#define IMX_SC_R_DMA_3_CH10 470
+#define IMX_SC_R_DMA_3_CH11 471
+#define IMX_SC_R_DMA_3_CH12 472
+#define IMX_SC_R_DMA_3_CH13 473
+#define IMX_SC_R_DMA_3_CH14 474
+#define IMX_SC_R_DMA_3_CH15 475
+#define IMX_SC_R_DMA_3_CH16 476
+#define IMX_SC_R_DMA_3_CH17 477
+#define IMX_SC_R_DMA_3_CH18 478
+#define IMX_SC_R_DMA_3_CH19 479
+#define IMX_SC_R_DMA_3_CH20 480
+#define IMX_SC_R_DMA_3_CH21 481
+#define IMX_SC_R_DMA_3_CH22 482
+#define IMX_SC_R_DMA_3_CH23 483
+#define IMX_SC_R_DMA_3_CH24 484
+#define IMX_SC_R_DMA_3_CH25 485
+#define IMX_SC_R_DMA_3_CH26 486
+#define IMX_SC_R_DMA_3_CH27 487
+#define IMX_SC_R_DMA_3_CH28 488
+#define IMX_SC_R_DMA_3_CH29 489
+#define IMX_SC_R_DMA_3_CH30 490
+#define IMX_SC_R_DMA_3_CH31 491
+#define IMX_SC_R_AUDIO_PLL_1 492
+#define IMX_SC_R_AUDIO_CLK_0 493
+#define IMX_SC_R_AUDIO_CLK_1 494
+#define IMX_SC_R_MCLK_OUT_0 495
+#define IMX_SC_R_MCLK_OUT_1 496
+#define IMX_SC_R_PMIC_0 497
+#define IMX_SC_R_PMIC_1 498
+#define IMX_SC_R_SECO 499
+#define IMX_SC_R_CAAM_JR1 500
+#define IMX_SC_R_CAAM_JR2 501
+#define IMX_SC_R_CAAM_JR3 502
+#define IMX_SC_R_SECO_MU_2 503
+#define IMX_SC_R_SECO_MU_3 504
+#define IMX_SC_R_SECO_MU_4 505
+#define IMX_SC_R_HDMI_RX_PWM_0 506
+#define IMX_SC_R_A35 507
+#define IMX_SC_R_A35_0 508
+#define IMX_SC_R_A35_1 509
+#define IMX_SC_R_A35_2 510
+#define IMX_SC_R_A35_3 511
+#define IMX_SC_R_DSP 512
+#define IMX_SC_R_DSP_RAM 513
+#define IMX_SC_R_CAAM_JR1_OUT 514
+#define IMX_SC_R_CAAM_JR2_OUT 515
+#define IMX_SC_R_CAAM_JR3_OUT 516
+#define IMX_SC_R_VPU_DEC_0 517
+#define IMX_SC_R_VPU_ENC_0 518
+#define IMX_SC_R_CAAM_JR0 519
+#define IMX_SC_R_CAAM_JR0_OUT 520
+#define IMX_SC_R_PMIC_2 521
+#define IMX_SC_R_DBLOGIC 522
+#define IMX_SC_R_HDMI_PLL_1 523
+#define IMX_SC_R_BOARD_R0 524
+#define IMX_SC_R_BOARD_R1 525
+#define IMX_SC_R_BOARD_R2 526
+#define IMX_SC_R_BOARD_R3 527
+#define IMX_SC_R_BOARD_R4 528
+#define IMX_SC_R_BOARD_R5 529
+#define IMX_SC_R_BOARD_R6 530
+#define IMX_SC_R_BOARD_R7 531
+#define IMX_SC_R_MJPEG_DEC_MP 532
+#define IMX_SC_R_MJPEG_ENC_MP 533
+#define IMX_SC_R_VPU_TS_0 534
+#define IMX_SC_R_VPU_MU_0 535
+#define IMX_SC_R_VPU_MU_1 536
+#define IMX_SC_R_VPU_MU_2 537
+#define IMX_SC_R_VPU_MU_3 538
+#define IMX_SC_R_VPU_ENC_1 539
+#define IMX_SC_R_VPU 540
+#define IMX_SC_R_LAST 541
+
+#endif /* __DT_BINDINGS_RSCRC_IMX_H */
diff --git a/include/dt-bindings/media/xilinx-vip.h b/include/dt-bindings/media/xilinx-vip.h
index 6298fec00685..94ed3edfcc70 100644
--- a/include/dt-bindings/media/xilinx-vip.h
+++ b/include/dt-bindings/media/xilinx-vip.h
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx Video IP Core
*
@@ -6,10 +7,6 @@
*
* Contacts: Hyun Kwon <hyun.kwon@xilinx.com>
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __DT_BINDINGS_MEDIA_XILINX_VIP_H__
diff --git a/include/dt-bindings/regulator/active-semi,8945a-regulator.h b/include/dt-bindings/regulator/active-semi,8945a-regulator.h
new file mode 100644
index 000000000000..9bdba5e3141a
--- /dev/null
+++ b/include/dt-bindings/regulator/active-semi,8945a-regulator.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 Microchip Technology, Inc. All rights reserved.
+ *
+ * Device Tree binding constants for the ACT8945A PMIC regulators
+ */
+
+#ifndef _DT_BINDINGS_REGULATOR_ACT8945A_H
+#define _DT_BINDINGS_REGULATOR_ACT8945A_H
+
+/*
+ * These constants should be used to specify regulator modes in device tree for
+ * ACT8945A regulators as follows:
+ * ACT8945A_REGULATOR_MODE_FIXED: It is specific to DCDC regulators and it
+ * specifies the usage of fixed-frequency
+ * PWM.
+ *
+ * ACT8945A_REGULATOR_MODE_NORMAL: It is specific to LDO regulators and it
+ * specifies the usage of normal mode.
+ *
+ * ACT8945A_REGULATOR_MODE_LOWPOWER: For DCDC and LDO regulators; it specify
+ * the usage of proprietary power-saving
+ * mode.
+ */
+
+#define ACT8945A_REGULATOR_MODE_FIXED 1
+#define ACT8945A_REGULATOR_MODE_NORMAL 2
+#define ACT8945A_REGULATOR_MODE_LOWPOWER 3
+
+#endif
diff --git a/include/dt-bindings/reset/sun8i-de2.h b/include/dt-bindings/reset/sun8i-de2.h
index 9526017432f0..1c36a6ac86d6 100644
--- a/include/dt-bindings/reset/sun8i-de2.h
+++ b/include/dt-bindings/reset/sun8i-de2.h
@@ -10,5 +10,6 @@
#define RST_MIXER0 0
#define RST_MIXER1 1
#define RST_WB 2
+#define RST_ROT 3
#endif /* _DT_BINDINGS_RESET_SUN8I_DE2_H_ */
diff --git a/include/dt-bindings/reset/suniv-ccu-f1c100s.h b/include/dt-bindings/reset/suniv-ccu-f1c100s.h
new file mode 100644
index 000000000000..6a4b4385fe5a
--- /dev/null
+++ b/include/dt-bindings/reset/suniv-ccu-f1c100s.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (C) 2018 Icenowy Zheng <icenowy@aosc.xyz>
+ *
+ */
+
+#ifndef _DT_BINDINGS_RST_SUNIV_F1C100S_H_
+#define _DT_BINDINGS_RST_SUNIV_F1C100S_H_
+
+#define RST_USB_PHY0 0
+#define RST_BUS_DMA 1
+#define RST_BUS_MMC0 2
+#define RST_BUS_MMC1 3
+#define RST_BUS_DRAM 4
+#define RST_BUS_SPI0 5
+#define RST_BUS_SPI1 6
+#define RST_BUS_OTG 7
+#define RST_BUS_VE 8
+#define RST_BUS_LCD 9
+#define RST_BUS_DEINTERLACE 10
+#define RST_BUS_CSI 11
+#define RST_BUS_TVD 12
+#define RST_BUS_TVE 13
+#define RST_BUS_DE_BE 14
+#define RST_BUS_DE_FE 15
+#define RST_BUS_CODEC 16
+#define RST_BUS_SPDIF 17
+#define RST_BUS_IR 18
+#define RST_BUS_RSB 19
+#define RST_BUS_I2S0 20
+#define RST_BUS_I2C0 21
+#define RST_BUS_I2C1 22
+#define RST_BUS_I2C2 23
+#define RST_BUS_UART0 24
+#define RST_BUS_UART1 25
+#define RST_BUS_UART2 26
+
+#endif /* _DT_BINDINGS_RST_SUNIV_F1C100S_H_ */
diff --git a/include/dt-bindings/sound/qcom,q6afe.h b/include/dt-bindings/sound/qcom,q6afe.h
index e2d3892240b8..1df06f8ad5c3 100644
--- a/include/dt-bindings/sound/qcom,q6afe.h
+++ b/include/dt-bindings/sound/qcom,q6afe.h
@@ -106,6 +106,7 @@
#define QUINARY_TDM_TX_6 101
#define QUINARY_TDM_RX_7 102
#define QUINARY_TDM_TX_7 103
+#define DISPLAY_PORT_RX 104
#endif /* __DT_BINDINGS_Q6_AFE_H__ */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 6502feb9524b..33771352dcd6 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -21,7 +21,6 @@
#include <linux/clocksource.h>
#include <linux/hrtimer.h>
-#include <linux/workqueue.h>
struct arch_timer_context {
/* Registers: control register, timer value */
@@ -52,9 +51,6 @@ struct arch_timer_cpu {
/* Background timer used when the guest is not running */
struct hrtimer bg_timer;
- /* Work queued with the above timer expires */
- struct work_struct expired;
-
/* Physical timer emulation */
struct hrtimer phys_timer;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index ed80f147bd50..87715f20b69a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -101,7 +101,7 @@ static inline bool has_acpi_companion(struct device *dev)
static inline void acpi_preset_companion(struct device *dev,
struct acpi_device *parent, u64 addr)
{
- ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, NULL));
+ ACPI_COMPANION_SET(dev, acpi_find_child_device(parent, addr, false));
}
static inline const char *acpi_dev_name(struct acpi_device *adev)
@@ -340,7 +340,14 @@ struct pci_dev;
int acpi_pci_irq_enable (struct pci_dev *dev);
void acpi_penalize_isa_irq(int irq, int active);
bool acpi_isa_irq_available(int irq);
+#ifdef CONFIG_PCI
void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+#else
+static inline void acpi_penalize_sci_irq(int irq, int trigger,
+ int polarity)
+{
+}
+#endif
void acpi_pci_irq_disable (struct pci_dev *dev);
extern int ec_read(u8 addr, u8 *val);
@@ -1054,6 +1061,17 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
}
#endif
+#if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C)
+bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c);
+#else
+static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares,
+ struct acpi_resource_i2c_serialbus **i2c)
+{
+ return false;
+}
+#endif
+
/* Device properties */
#ifdef CONFIG_ACPI
@@ -1313,4 +1331,14 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level)
}
#endif
+#ifdef CONFIG_ACPI
+extern int acpi_platform_notify(struct device *dev, enum kobject_action action);
+#else
+static inline int
+acpi_platform_notify(struct device *dev, enum kobject_action action)
+{
+ return 0;
+}
+#endif
+
#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
index 2d29f55923e3..2a629acb4c3f 100644
--- a/include/linux/adxl.h
+++ b/include/linux/adxl.h
@@ -7,12 +7,7 @@
#ifndef _LINUX_ADXL_H
#define _LINUX_ADXL_H
-#ifdef CONFIG_ACPI_ADXL
const char * const *adxl_get_component_names(void);
int adxl_decode(u64 addr, u64 component_values[]);
-#else
-static inline const char * const *adxl_get_component_names(void) { return NULL; }
-static inline int adxl_decode(u64 addr, u64 component_values[]) { return -EOPNOTSUPP; }
-#endif
#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 9334fbef7bae..a625c29a2ea2 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -115,8 +115,6 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
struct filename;
-extern void audit_log_session_info(struct audit_buffer *ab);
-
#define AUDIT_OFF 0
#define AUDIT_ON 1
#define AUDIT_LOCKED 2
@@ -153,8 +151,7 @@ extern void audit_log_link_denied(const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
-extern void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk);
+extern void audit_log_task_info(struct audit_buffer *ab);
extern int audit_update_lsm_rules(void);
@@ -202,8 +199,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
{
return 0;
}
-static inline void audit_log_task_info(struct audit_buffer *ab,
- struct task_struct *tsk)
+static inline void audit_log_task_info(struct audit_buffer *ab)
{ }
#define audit_enabled AUDIT_OFF
#endif /* CONFIG_AUDIT */
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index b2488055fd1d..7605b5919c3a 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -171,7 +171,7 @@ struct virtchnl_msg {
VIRTCHNL_CHECK_STRUCT_LEN(20, virtchnl_msg);
-/* Message descriptions and data structures.*/
+/* Message descriptions and data structures. */
/* VIRTCHNL_OP_VERSION
* VF posts its version number to the PF. PF responds with its version number
@@ -342,6 +342,8 @@ struct virtchnl_vsi_queue_config_info {
struct virtchnl_queue_pair_info qpair[1];
};
+VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
+
/* VIRTCHNL_OP_REQUEST_QUEUES
* VF sends this message to request the PF to allocate additional queues to
* this VF. Each VF gets a guaranteed number of queues on init but asking for
@@ -357,8 +359,6 @@ struct virtchnl_vf_res_request {
u16 num_queue_pairs;
};
-VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info);
-
/* VIRTCHNL_OP_CONFIG_IRQ_MAP
* VF uses this message to map vectors to queues.
* The rxq_map and txq_map fields are bitmaps used to indicate which queues
@@ -819,8 +819,8 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
if (msglen >= valid_len) {
struct virtchnl_tc_info *vti =
(struct virtchnl_tc_info *)msg;
- valid_len += vti->num_tc *
- sizeof(struct virtchnl_channel_info);
+ valid_len += (vti->num_tc - 1) *
+ sizeof(struct virtchnl_channel_info);
if (vti->num_tc == 0)
err_msg_format = true;
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 056fb627edb3..7380b094dcca 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -491,35 +491,40 @@ do { \
bio_clear_flag(bio, BIO_THROTTLED);\
(bio)->bi_disk = (bdev)->bd_disk; \
(bio)->bi_partno = (bdev)->bd_partno; \
+ bio_associate_blkg(bio); \
} while (0)
#define bio_copy_dev(dst, src) \
do { \
(dst)->bi_disk = (src)->bi_disk; \
(dst)->bi_partno = (src)->bi_partno; \
+ bio_clone_blkg_association(dst, src); \
} while (0)
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+void bio_associate_blkg_from_page(struct bio *bio, struct page *page);
#else
-static inline int bio_associate_blkcg_from_page(struct bio *bio,
- struct page *page) { return 0; }
+static inline void bio_associate_blkg_from_page(struct bio *bio,
+ struct page *page) { }
#endif
#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
-void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
+void bio_disassociate_blkg(struct bio *bio);
+void bio_associate_blkg(struct bio *bio);
+void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
-static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
+static inline void bio_disassociate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg(struct bio *bio) { }
+static inline void bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6d766a19f2bb..f025fd1e22e6 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -21,6 +21,7 @@
#include <linux/blkdev.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
+#include <linux/fs.h>
/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
@@ -122,11 +123,8 @@ struct blkcg_gq {
/* all non-root blkcg_gq's are guaranteed to have access to parent */
struct blkcg_gq *parent;
- /* request allocation list for this blkcg-q pair */
- struct request_list rl;
-
/* reference count */
- atomic_t refcnt;
+ struct percpu_ref refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -184,6 +182,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -230,22 +230,62 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+/**
+ * blkcg_css - find the current css
+ *
+ * Find the css associated with either the kthread or the current task.
+ * This may return a dying css, so it is up to the caller to use tryget logic
+ * to confirm it is alive and well.
+ */
+static inline struct cgroup_subsys_state *blkcg_css(void)
+{
+ struct cgroup_subsys_state *css;
+
+ css = kthread_blkcg();
+ if (css)
+ return css;
+ return task_css(current, io_cgrp_id);
+}
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
-static inline struct blkcg *bio_blkcg(struct bio *bio)
+/**
+ * __bio_blkcg - internal, inconsistent version to get blkcg
+ *
+ * DO NOT USE.
+ * This function is inconsistent and consequently is dangerous to use. The
+ * first part of the function returns a blkcg where a reference is owned by the
+ * bio. This means it does not need to be rcu protected as it cannot go away
+ * with the bio owning a reference to it. However, the latter potentially gets
+ * it from task_css(). This can race against task migration and the cgroup
+ * dying. It is also semantically different as it must be called rcu protected
+ * and is susceptible to failure when trying to get a reference to it.
+ * Therefore, it is not ok to assume that *_get() will always succeed on the
+ * blkcg returned here.
+ */
+static inline struct blkcg *__bio_blkcg(struct bio *bio)
{
- struct cgroup_subsys_state *css;
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return css_to_blkcg(blkcg_css());
+}
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- css = kthread_blkcg();
- if (css)
- return css_to_blkcg(css);
- return css_to_blkcg(task_css(current, io_cgrp_id));
+/**
+ * bio_blkcg - grab the blkcg associated with a bio
+ * @bio: target bio
+ *
+ * This returns the blkcg associated with a bio, %NULL if not associated.
+ * Callers are expected to either handle %NULL or know association has been
+ * done prior to calling this.
+ */
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return NULL;
}
static inline bool blk_cgroup_congested(void)
@@ -328,16 +368,12 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair. This function should be called
- * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
- * - see blk_queue_bypass_start() for details.
+ * under RCU read loc.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
-
- if (unlikely(blk_queue_bypass(q)))
- return NULL;
return __blkg_lookup(blkcg, q, false);
}
@@ -451,26 +487,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
+ percpu_ref_get(&blkg->refcnt);
}
/**
- * blkg_try_get - try and get a blkg reference
+ * blkg_tryget - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
-static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+static inline bool blkg_tryget(struct blkcg_gq *blkg)
{
- if (atomic_inc_not_zero(&blkg->refcnt))
- return blkg;
- return NULL;
+ return percpu_ref_tryget(&blkg->refcnt);
}
+/**
+ * blkg_tryget_closest - try and get a blkg ref on the closet blkg
+ * @blkg: blkg to get
+ *
+ * This walks up the blkg tree to find the closest non-dying blkg and returns
+ * the blkg that it did association with as it may not be the passed in blkg.
+ */
+static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
+{
+ while (blkg && !percpu_ref_tryget(&blkg->refcnt))
+ blkg = blkg->parent;
-void __blkg_release_rcu(struct rcu_head *rcu);
+ return blkg;
+}
/**
* blkg_put - put a blkg reference
@@ -478,9 +523,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ percpu_ref_put(&blkg->refcnt);
}
/**
@@ -515,94 +558,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-/**
- * blk_get_rl - get request_list to use
- * @q: request_queue of interest
- * @bio: bio which will be attached to the allocated request (may be %NULL)
- *
- * The caller wants to allocate a request from @q to use for @bio. Find
- * the request_list to use and obtain a reference on it. Should be called
- * under queue_lock. This function is guaranteed to return non-%NULL
- * request_list.
- */
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio)
-{
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
-
- rcu_read_lock();
-
- blkcg = bio_blkcg(bio);
-
- /* bypass blkg lookup and use @q->root_rl directly for root */
- if (blkcg == &blkcg_root)
- goto root_rl;
-
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg))
- goto root_rl;
-
- blkg_get(blkg);
- rcu_read_unlock();
- return &blkg->rl;
-root_rl:
- rcu_read_unlock();
- return &q->root_rl;
-}
-
-/**
- * blk_put_rl - put request_list
- * @rl: request_list to put
- *
- * Put the reference acquired by blk_get_rl(). Should be called under
- * queue_lock.
- */
-static inline void blk_put_rl(struct request_list *rl)
-{
- if (rl->blkg->blkcg != &blkcg_root)
- blkg_put(rl->blkg);
-}
-
-/**
- * blk_rq_set_rl - associate a request with a request_list
- * @rq: request of interest
- * @rl: target request_list
- *
- * Associate @rq with @rl so that accounting and freeing can know the
- * request_list @rq came from.
- */
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
-{
- rq->rl = rl;
-}
-
-/**
- * blk_rq_rl - return the request_list a request came from
- * @rq: request of interest
- *
- * Return the request_list @rq is allocated from.
- */
-static inline struct request_list *blk_rq_rl(struct request *rq)
-{
- return rq->rl;
-}
-
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
- struct request_queue *q);
-/**
- * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
- *
- * Should be used under queue_lock.
- */
-#define blk_queue_for_each_rl(rl, q) \
- for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
-
static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
{
int ret;
@@ -797,32 +752,34 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
+
+static inline void blkcg_bio_issue_init(struct bio *bio)
+{
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+}
+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
- struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- /* associate blkcg if bio hasn't attached one */
- bio_associate_blkcg(bio, &blkcg->css);
-
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
+
+ if (!bio->bi_blkg) {
+ char b[BDEVNAME_SIZE];
+
+ WARN_ONCE(1,
+ "no blkg associated for bio on block-device: %s\n",
+ bio_devname(bio, b));
+ bio_associate_blkg(bio);
}
+ blkg = bio->bi_blkg;
+
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
- blkg = blkg ?: q->root_blkg;
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
@@ -834,6 +791,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
+ blkcg_bio_issue_init(bio);
+
rcu_read_unlock();
return !throtl;
}
@@ -930,6 +889,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
+static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -939,12 +899,7 @@ static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
-static inline struct request_list *blk_get_rl(struct request_queue *q,
- struct bio *bio) { return &q->root_rl; }
-static inline void blk_put_rl(struct request_list *rl) { }
-static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
-static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
-
+static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
index 9f4c17f0d2d8..0b1f45c62623 100644
--- a/include/linux/blk-mq-pci.h
+++ b/include/linux/blk-mq-pci.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_PCI_H
#define _LINUX_BLK_MQ_PCI_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct pci_dev;
-int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
+int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
index b4ade198007d..7b6ecf9ac4c3 100644
--- a/include/linux/blk-mq-rdma.h
+++ b/include/linux/blk-mq-rdma.h
@@ -4,7 +4,7 @@
struct blk_mq_tag_set;
struct ib_device;
-int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec);
#endif /* _LINUX_BLK_MQ_RDMA_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
index 69b4da262c45..687ae287e1dc 100644
--- a/include/linux/blk-mq-virtio.h
+++ b/include/linux/blk-mq-virtio.h
@@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_VIRTIO_H
#define _LINUX_BLK_MQ_VIRTIO_H
-struct blk_mq_tag_set;
+struct blk_mq_queue_map;
struct virtio_device;
-int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
+int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);
#endif /* _LINUX_BLK_MQ_VIRTIO_H */
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2286dc12c6bc..0e030f5f76b6 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -37,7 +37,8 @@ struct blk_mq_hw_ctx {
struct blk_mq_ctx *dispatch_from;
unsigned int dispatch_busy;
- unsigned int nr_ctx;
+ unsigned short type;
+ unsigned short nr_ctx;
struct blk_mq_ctx **ctxs;
spinlock_t dispatch_wait_lock;
@@ -74,10 +75,31 @@ struct blk_mq_hw_ctx {
struct srcu_struct srcu[0];
};
+struct blk_mq_queue_map {
+ unsigned int *mq_map;
+ unsigned int nr_queues;
+ unsigned int queue_offset;
+};
+
+enum hctx_type {
+ HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */
+ HCTX_TYPE_READ, /* just for READ I/O */
+ HCTX_TYPE_POLL, /* polled I/O of any kind */
+
+ HCTX_MAX_TYPES,
+};
+
struct blk_mq_tag_set {
- unsigned int *mq_map;
+ /*
+ * map[] holds ctx -> hctx mappings, one map exists for each type
+ * that the driver wishes to support. There are no restrictions
+ * on maps being of the same size, and it's perfectly legal to
+ * share maps between types.
+ */
+ struct blk_mq_queue_map map[HCTX_MAX_TYPES];
+ unsigned int nr_maps; /* nr entries in map[] */
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues;
+ unsigned int nr_hw_queues; /* nr hw queues across maps */
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
@@ -99,6 +121,7 @@ struct blk_mq_queue_data {
typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
const struct blk_mq_queue_data *);
+typedef void (commit_rqs_fn)(struct blk_mq_hw_ctx *);
typedef bool (get_budget_fn)(struct blk_mq_hw_ctx *);
typedef void (put_budget_fn)(struct blk_mq_hw_ctx *);
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
@@ -109,11 +132,13 @@ typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
-typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
+typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
bool);
-typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
-typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
+typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
+typedef int (poll_fn)(struct blk_mq_hw_ctx *);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
+typedef bool (busy_fn)(struct request_queue *);
+typedef void (complete_fn)(struct request *);
struct blk_mq_ops {
@@ -123,6 +148,15 @@ struct blk_mq_ops {
queue_rq_fn *queue_rq;
/*
+ * If a driver uses bd->last to judge when to submit requests to
+ * hardware, it must define this function. In case of errors that
+ * make us stop issuing further requests, this hook serves the
+ * purpose of kicking the hardware (which the last request otherwise
+ * would have done).
+ */
+ commit_rqs_fn *commit_rqs;
+
+ /*
* Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
@@ -141,7 +175,7 @@ struct blk_mq_ops {
*/
poll_fn *poll;
- softirq_done_fn *complete;
+ complete_fn *complete;
/*
* Called when the block layer side of a hardware queue has been
@@ -165,6 +199,11 @@ struct blk_mq_ops {
/* Called from inside blk_get_request() */
void (*initialize_rq_fn)(struct request *rq);
+ /*
+ * If set, returns whether or not this queue currently is busy
+ */
+ busy_fn *busy;
+
map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS
@@ -218,6 +257,8 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+bool blk_mq_queue_inflight(struct request_queue *q);
+
enum {
/* return when out of requests */
BLK_MQ_REQ_NOWAIT = (__force blk_mq_req_flags_t)(1 << 0),
@@ -264,7 +305,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
-void blk_mq_complete_request(struct request *rq);
+bool blk_mq_complete_request(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio);
bool blk_mq_queue_stopped(struct request_queue *q);
@@ -288,24 +329,12 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
-int blk_mq_map_queues(struct blk_mq_tag_set *set);
+int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
-/**
- * blk_mq_mark_complete() - Set request state to complete
- * @rq: request to set to complete state
- *
- * Returns true if request state was successfully set to complete. If
- * successful, the caller is responsibile for seeing this request is ended, as
- * blk_mq_complete_request will not work again.
- */
-static inline bool blk_mq_mark_complete(struct request *rq)
-{
- return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
- MQ_RQ_IN_FLIGHT;
-}
+unsigned int blk_mq_rq_cpu(struct request *rq);
/*
* Driver command data is immediately after the request. So subtract request
@@ -328,4 +357,14 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
for ((i) = 0; (i) < (hctx)->nr_ctx && \
({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
+static inline blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx,
+ struct request *rq)
+{
+ if (rq->tag != -1)
+ return rq->tag | (hctx->queue_num << BLK_QC_T_SHIFT);
+
+ return rq->internal_tag | (hctx->queue_num << BLK_QC_T_SHIFT) |
+ BLK_QC_T_INTERNAL;
+}
+
#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1dcf652ba0aa..5c7e7f859a24 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -174,11 +174,11 @@ struct bio {
void *bi_private;
#ifdef CONFIG_BLK_CGROUP
/*
- * Optional ioc and css associated with this bio. Put on bio
- * release. Read comment on top of bio_associate_current().
+ * Represents the association of the css and request_queue for the bio.
+ * If a bio goes direct to device, it will not have a blkg as it will
+ * not have a request_queue associated with it. The reference is put
+ * on release of the bio.
*/
- struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
@@ -228,6 +228,7 @@ struct bio {
#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
* of this bio. */
#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
+#define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */
/* See BVEC_POOL_OFFSET below before adding new flags */
@@ -323,6 +324,8 @@ enum req_flag_bits {
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
+ __REQ_HIPRI,
+
/* for driver use */
__REQ_DRV,
__REQ_SWAP, /* swapping request. */
@@ -343,8 +346,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
-
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
+#define REQ_HIPRI (1ULL << __REQ_HIPRI)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
@@ -422,17 +425,6 @@ static inline bool blk_qc_t_valid(blk_qc_t cookie)
return cookie != BLK_QC_T_NONE;
}
-static inline blk_qc_t blk_tag_to_qc_t(unsigned int tag, unsigned int queue_num,
- bool internal)
-{
- blk_qc_t ret = tag | (queue_num << BLK_QC_T_SHIFT);
-
- if (internal)
- ret |= BLK_QC_T_INTERNAL;
-
- return ret;
-}
-
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
{
return (cookie & ~BLK_QC_T_INTERNAL) >> BLK_QC_T_SHIFT;
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4293dc1cd160..45552e6eae1e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -58,25 +58,6 @@ struct blk_stat_callback;
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
-#define BLK_RL_SYNCFULL (1U << 0)
-#define BLK_RL_ASYNCFULL (1U << 1)
-
-struct request_list {
- struct request_queue *q; /* the queue this rl belongs to */
-#ifdef CONFIG_BLK_CGROUP
- struct blkcg_gq *blkg; /* blkg this request pool belongs to */
-#endif
- /*
- * count[], starved[], and wait[] are indexed by
- * BLK_RW_SYNC/BLK_RW_ASYNC
- */
- int count[2];
- int starved[2];
- mempool_t *rq_pool;
- wait_queue_head_t wait[2];
- unsigned int flags;
-};
-
/*
* request flags */
typedef __u32 __bitwise req_flags_t;
@@ -85,8 +66,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_SORTED ((__force req_flags_t)(1 << 0))
/* drive already may have started this one */
#define RQF_STARTED ((__force req_flags_t)(1 << 1))
-/* uses tagged queueing */
-#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
/* may not be passed by ioscheduler */
#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
/* request for flush sequence */
@@ -150,8 +129,8 @@ enum mq_rq_state {
struct request {
struct request_queue *q;
struct blk_mq_ctx *mq_ctx;
+ struct blk_mq_hw_ctx *mq_hctx;
- int cpu;
unsigned int cmd_flags; /* op and common flags */
req_flags_t rq_flags;
@@ -245,11 +224,7 @@ struct request {
refcount_t ref;
unsigned int timeout;
-
- /* access through blk_rq_set_deadline, blk_rq_deadline */
- unsigned long __deadline;
-
- struct list_head timeout_list;
+ unsigned long deadline;
union {
struct __call_single_data csd;
@@ -264,10 +239,6 @@ struct request {
/* for bidi */
struct request *next_rq;
-
-#ifdef CONFIG_BLK_CGROUP
- struct request_list *rl; /* rl this rq is alloced from */
-#endif
};
static inline bool blk_op_is_scsi(unsigned int op)
@@ -311,41 +282,21 @@ static inline unsigned short req_get_ioprio(struct request *req)
struct blk_queue_ctx;
-typedef void (request_fn_proc) (struct request_queue *q);
typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
-typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
-typedef int (prep_rq_fn) (struct request_queue *, struct request *);
-typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
-typedef int (lld_busy_fn) (struct request_queue *q);
-typedef int (bsg_job_fn) (struct bsg_job *);
-typedef int (init_rq_fn)(struct request_queue *, struct request *, gfp_t);
-typedef void (exit_rq_fn)(struct request_queue *, struct request *);
enum blk_eh_timer_return {
BLK_EH_DONE, /* drivers has completed the command */
BLK_EH_RESET_TIMER, /* reset timer and try again */
};
-typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
-
enum blk_queue_state {
Queue_down,
Queue_up,
};
-struct blk_queue_tag {
- struct request **tag_index; /* map of busy tags */
- unsigned long *tag_map; /* bit map of free/busy tags */
- int max_depth; /* what we will send to device */
- int real_max_depth; /* what the array can hold */
- atomic_t refcnt; /* map can be shared */
- int alloc_policy; /* tag allocation policy */
- int next_tag; /* next tag */
-};
#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
@@ -444,40 +395,15 @@ struct request_queue {
struct list_head queue_head;
struct request *last_merge;
struct elevator_queue *elevator;
- int nr_rqs[2]; /* # allocated [a]sync rqs */
- int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
struct blk_queue_stats *stats;
struct rq_qos *rq_qos;
- /*
- * If blkcg is not used, @q->root_rl serves all requests. If blkcg
- * is used, root blkg allocates from @q->root_rl and all other
- * blkgs from their own blkg->rl. Which one to use should be
- * determined using bio_request_list().
- */
- struct request_list root_rl;
-
- request_fn_proc *request_fn;
make_request_fn *make_request_fn;
- poll_q_fn *poll_fn;
- prep_rq_fn *prep_rq_fn;
- unprep_rq_fn *unprep_rq_fn;
- softirq_done_fn *softirq_done_fn;
- rq_timed_out_fn *rq_timed_out_fn;
dma_drain_needed_fn *dma_drain_needed;
- lld_busy_fn *lld_busy_fn;
- /* Called just after a request is allocated */
- init_rq_fn *init_rq_fn;
- /* Called just before a request is freed */
- exit_rq_fn *exit_rq_fn;
- /* Called from inside blk_get_request() */
- void (*initialize_rq_fn)(struct request *rq);
const struct blk_mq_ops *mq_ops;
- unsigned int *mq_map;
-
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
unsigned int nr_queues;
@@ -488,17 +414,6 @@ struct request_queue {
struct blk_mq_hw_ctx **queue_hw_ctx;
unsigned int nr_hw_queues;
- /*
- * Dispatch queue sorting
- */
- sector_t end_sector;
- struct request *boundary_rq;
-
- /*
- * Delayed queue handling
- */
- struct delayed_work delay_work;
-
struct backing_dev_info *backing_dev_info;
/*
@@ -529,13 +444,7 @@ struct request_queue {
*/
gfp_t bounce_gfp;
- /*
- * protects queue structures from reentrancy. ->__queue_lock should
- * _never_ be used directly, it is queue private. always use
- * ->queue_lock.
- */
- spinlock_t __queue_lock;
- spinlock_t *queue_lock;
+ spinlock_t queue_lock;
/*
* queue kobject
@@ -545,7 +454,7 @@ struct request_queue {
/*
* mq queue kobject
*/
- struct kobject mq_kobj;
+ struct kobject *mq_kobj;
#ifdef CONFIG_BLK_DEV_INTEGRITY
struct blk_integrity integrity;
@@ -561,27 +470,12 @@ struct request_queue {
* queue settings
*/
unsigned long nr_requests; /* Max # of requests */
- unsigned int nr_congestion_on;
- unsigned int nr_congestion_off;
- unsigned int nr_batching;
unsigned int dma_drain_size;
void *dma_drain_buffer;
unsigned int dma_pad_mask;
unsigned int dma_alignment;
- struct blk_queue_tag *queue_tags;
-
- unsigned int nr_sorted;
- unsigned int in_flight[2];
-
- /*
- * Number of active block driver functions for which blk_drain_queue()
- * must wait. Must be incremented around functions that unlock the
- * queue_lock internally, e.g. scsi_request_fn().
- */
- unsigned int request_fn_active;
-
unsigned int rq_timeout;
int poll_nsec;
@@ -590,7 +484,6 @@ struct request_queue {
struct timer_list timeout;
struct work_struct timeout_work;
- struct list_head timeout_list;
struct list_head icq_list;
#ifdef CONFIG_BLK_CGROUP
@@ -645,11 +538,9 @@ struct request_queue {
struct mutex sysfs_lock;
- int bypass_depth;
atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
- bsg_job_fn *bsg_job_fn;
struct bsg_class_device bsg_dev;
#endif
@@ -669,12 +560,12 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
struct dentry *sched_debugfs_dir;
+ struct dentry *rqos_debugfs_dir;
#endif
bool mq_sysfs_init_done;
size_t cmd_size;
- void *rq_alloc_data;
struct work_struct release_work;
@@ -682,10 +573,8 @@ struct request_queue {
u64 write_hints[BLK_MAX_WRITE_HINTS];
};
-#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
#define QUEUE_FLAG_DYING 2 /* queue being torn down */
-#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
@@ -718,19 +607,15 @@ struct request_queue {
(1 << QUEUE_FLAG_ADD_RANDOM))
#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
- (1 << QUEUE_FLAG_SAME_COMP) | \
- (1 << QUEUE_FLAG_POLL))
+ (1 << QUEUE_FLAG_SAME_COMP))
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
-#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
-#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
#define blk_queue_noxmerges(q) \
@@ -757,32 +642,20 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
extern void blk_set_pm_only(struct request_queue *q);
extern void blk_clear_pm_only(struct request_queue *q);
-static inline int queue_in_flight(struct request_queue *q)
-{
- return q->in_flight[0] + q->in_flight[1];
-}
-
static inline bool blk_account_rq(struct request *rq)
{
return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
}
-#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
-/* rq->queuelist of dequeued request must be list_empty() */
-#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
-/*
- * Driver can handle struct request, if it either has an old style
- * request_fn defined, or is blk-mq based.
- */
-static inline bool queue_is_rq_based(struct request_queue *q)
+static inline bool queue_is_mq(struct request_queue *q)
{
- return q->request_fn || q->mq_ops;
+ return q->mq_ops;
}
static inline unsigned int blk_queue_cluster(struct request_queue *q)
@@ -845,27 +718,6 @@ static inline bool rq_is_sync(struct request *rq)
return op_is_sync(rq->cmd_flags);
}
-static inline bool blk_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- return rl->flags & flag;
-}
-
-static inline void blk_set_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags |= flag;
-}
-
-static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
-{
- unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
-
- rl->flags &= ~flag;
-}
-
static inline bool rq_mergeable(struct request *rq)
{
if (blk_rq_is_passthrough(rq))
@@ -902,16 +754,6 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
-/*
- * q->prep_rq_fn return values
- */
-enum {
- BLKPREP_OK, /* serve it */
- BLKPREP_KILL, /* fatal error, kill, return -EIO */
- BLKPREP_DEFER, /* leave on queue */
- BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
-};
-
extern unsigned long blk_max_low_pfn, blk_max_pfn;
/*
@@ -983,10 +825,8 @@ extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
-extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op,
blk_mq_req_flags_t flags);
-extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_lld_busy(struct request_queue *q);
extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
struct bio_set *bs, gfp_t gfp_mask,
@@ -996,7 +836,6 @@ extern void blk_rq_unprep_clone(struct request *rq);
extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
-extern void blk_delay_queue(struct request_queue *, unsigned long);
extern void blk_queue_split(struct request_queue *, struct bio **);
extern void blk_recount_segments(struct request_queue *, struct bio *);
extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
@@ -1009,15 +848,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
-extern void blk_start_queue(struct request_queue *q);
-extern void blk_start_queue_async(struct request_queue *q);
-extern void blk_stop_queue(struct request_queue *q);
extern void blk_sync_queue(struct request_queue *q);
-extern void __blk_stop_queue(struct request_queue *q);
-extern void __blk_run_queue(struct request_queue *q);
-extern void __blk_run_queue_uncond(struct request_queue *q);
-extern void blk_run_queue(struct request_queue *);
-extern void blk_run_queue_async(struct request_queue *q);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -1034,7 +865,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
-bool blk_poll(struct request_queue *q, blk_qc_t cookie);
+int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin);
static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
{
@@ -1172,13 +1003,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
return nr_bios;
}
-/*
- * Request issue related functions.
- */
-extern struct request *blk_peek_request(struct request_queue *q);
-extern void blk_start_request(struct request *rq);
-extern struct request *blk_fetch_request(struct request_queue *q);
-
void blk_steal_bios(struct bio_list *list, struct request *rq);
/*
@@ -1196,27 +1020,18 @@ void blk_steal_bios(struct bio_list *list, struct request *rq);
*/
extern bool blk_update_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
-extern void blk_finish_request(struct request *rq, blk_status_t error);
-extern bool blk_end_request(struct request *rq, blk_status_t error,
- unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request(struct request *rq, blk_status_t error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, blk_status_t error);
extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
-extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
-extern void blk_unprep_request(struct request *);
/*
* Access functions for manipulating queue properties
*/
-extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
- spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
-extern int blk_init_allocated_queue(struct request_queue *);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
@@ -1255,15 +1070,10 @@ extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
-extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
-extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
-extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
-extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
@@ -1299,8 +1109,7 @@ extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
- spinlock_t *lock);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
@@ -1317,9 +1126,10 @@ extern void blk_set_queue_dying(struct request_queue *);
* schedule() where blk_schedule_flush_plug() is called.
*/
struct blk_plug {
- struct list_head list; /* requests */
struct list_head mq_list; /* blk-mq requests */
struct list_head cb_list; /* md requires an unplug callback */
+ unsigned short rq_count;
+ bool multiple_queues;
};
#define BLK_MAX_REQUEST_COUNT 16
#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
@@ -1358,31 +1168,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
struct blk_plug *plug = tsk->plug;
return plug &&
- (!list_empty(&plug->list) ||
- !list_empty(&plug->mq_list) ||
+ (!list_empty(&plug->mq_list) ||
!list_empty(&plug->cb_list));
}
-/*
- * tag stuff
- */
-extern int blk_queue_start_tag(struct request_queue *, struct request *);
-extern struct request *blk_queue_find_tag(struct request_queue *, int);
-extern void blk_queue_end_tag(struct request_queue *, struct request *);
-extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
-extern void blk_queue_free_tags(struct request_queue *);
-extern int blk_queue_resize_tags(struct request_queue *, int);
-extern struct blk_queue_tag *blk_init_tags(int, int);
-extern void blk_free_tags(struct blk_queue_tag *);
-
-static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
- int tag)
-{
- if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
- return NULL;
- return bqt->tag_index[tag];
-}
-
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
@@ -1982,4 +1771,17 @@ static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
#endif /* CONFIG_BLOCK */
+static inline void blk_wake_io_task(struct task_struct *waiter)
+{
+ /*
+ * If we're polling, the task itself is doing the completions. For
+ * that case, we don't need to signal a wakeup, it's enough to just
+ * mark us as RUNNING.
+ */
+ if (waiter == current)
+ __set_current_state(TASK_RUNNING);
+ else
+ wake_up_process(waiter);
+}
+
#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 33014ae73103..e734f163bd0b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,6 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
+struct btf;
struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
@@ -52,6 +53,7 @@ struct bpf_map_ops {
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
};
@@ -126,6 +128,7 @@ static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
}
int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
@@ -268,15 +271,18 @@ struct bpf_prog_offload_ops {
int (*insn_hook)(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int (*finalize)(struct bpf_verifier_env *env);
+ int (*prepare)(struct bpf_prog *prog);
+ int (*translate)(struct bpf_prog *prog);
+ void (*destroy)(struct bpf_prog *prog);
};
struct bpf_prog_offload {
struct bpf_prog *prog;
struct net_device *netdev;
+ struct bpf_offload_dev *offdev;
void *dev_priv;
struct list_head offloads;
bool dev_state;
- const struct bpf_prog_offload_ops *dev_ops;
void *jited_image;
u32 jited_len;
};
@@ -293,9 +299,11 @@ struct bpf_prog_aux {
atomic_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
+ u32 max_pkt_offset;
u32 stack_depth;
u32 id;
- u32 func_cnt;
+ u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
bool offload_requested;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
@@ -312,6 +320,30 @@ struct bpf_prog_aux {
void *security;
#endif
struct bpf_prog_offload *offload;
+ struct btf *btf;
+ struct bpf_func_info *func_info;
+ /* bpf_line_info loaded from userspace. linfo->insn_off
+ * has the xlated insn offset.
+ * Both the main and sub prog share the same linfo.
+ * The subprog can access its first linfo by
+ * using the linfo_idx.
+ */
+ struct bpf_line_info *linfo;
+ /* jited_linfo is the jited addr of the linfo. It has a
+ * one to one mapping to linfo:
+ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off.
+ * Both the main and sub prog share the same jited_linfo.
+ * The subprog can access its first jited_linfo by
+ * using the linfo_idx.
+ */
+ void **jited_linfo;
+ u32 func_info_cnt;
+ u32 nr_linfo;
+ /* subprog can use linfo_idx to access its first linfo and
+ * jited_linfo.
+ * main prog always has linfo_idx == 0
+ */
+ u32 linfo_idx;
union {
struct work_struct work;
struct rcu_head rcu;
@@ -523,7 +555,8 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
+int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ union bpf_attr __user *uattr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
@@ -691,7 +724,8 @@ int bpf_map_offload_get_next_key(struct bpf_map *map,
bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
-struct bpf_offload_dev *bpf_offload_dev_create(void);
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops);
void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
struct net_device *netdev);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index d93e89761a8b..c233efc106c6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -38,6 +38,7 @@ enum bpf_reg_liveness {
REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */
+ REG_LIVE_DONE = 4, /* liveness won't be updating this register anymore */
};
struct bpf_reg_state {
@@ -203,6 +204,7 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
+ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
};
@@ -223,6 +225,7 @@ struct bpf_verifier_env {
bool allow_ptr_leaks;
bool seen_direct_write;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+ const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
@@ -245,7 +248,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
return cur_func(env)->regs;
}
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog);
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx);
int bpf_prog_offload_finalize(struct bpf_verifier_env *env);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 949e9af8d9d6..9cd00a37b8d3 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -28,6 +28,7 @@
#define PHY_ID_BCM89610 0x03625cd0
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
#define PHY_ID_BCM7268 0xae025090
#define PHY_ID_BCM7271 0xae0253b0
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 6aeaf6472665..b356e0006731 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -31,6 +31,9 @@ struct device;
struct scatterlist;
struct request_queue;
+typedef int (bsg_job_fn) (struct bsg_job *);
+typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *);
+
struct bsg_buffer {
unsigned int payload_len;
int sg_cnt;
@@ -72,7 +75,8 @@ struct bsg_job {
void bsg_job_done(struct bsg_job *job, int result,
unsigned int reply_payload_rcv_len);
struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
- bsg_job_fn *job_fn, int dd_job_size);
+ bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size);
+void bsg_remove_queue(struct request_queue *q);
void bsg_job_put(struct bsg_job *job);
int __must_check bsg_job_get(struct bsg_job *job);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index e076c4697049..12502e25e767 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
struct btf;
+struct btf_member;
struct btf_type;
union bpf_attr;
@@ -46,5 +47,24 @@ void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
+bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
+ const struct btf_member *m,
+ u32 expected_offset, u32 expected_size);
+
+#ifdef CONFIG_BPF_SYSCALL
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
+const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+#else
+static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
+ u32 type_id)
+{
+ return NULL;
+}
+static inline const char *btf_name_by_offset(const struct btf *btf,
+ u32 offset)
+{
+ return NULL;
+}
+#endif
#endif
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 9d12757a65b0..9968332cceed 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 60c51871b04b..e443fa9fa859 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/clk-provider.h
- *
* Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
* Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_CLK_PROVIDER_H
#define __LINUX_CLK_PROVIDER_H
@@ -601,6 +596,12 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
+ *
+ * Flags:
+ * CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
+ * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
+ * is set then the numerator and denominator are both the value read
+ * plus one.
*/
struct clk_fractional_divider {
struct clk_hw hw;
@@ -620,6 +621,8 @@ struct clk_fractional_divider {
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
+#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
+
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
const char *name, const char *parent_name, unsigned long flags,
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
index e0c362363c38..85f8cf9d1226 100644
--- a/include/linux/clk/clk-conf.h
+++ b/include/linux/clk/clk-conf.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki <s.nawrocki@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/types.h>
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 88720b443cd6..056be0d03722 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -169,6 +169,10 @@ typedef struct {
compat_sigset_word sig[_COMPAT_NSIG_WORDS];
} compat_sigset_t;
+int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
+ sigset_t *set, sigset_t *oldset,
+ size_t sigsetsize);
+
struct compat_sigaction {
#ifndef __ARCH_HAS_IRIX_SIGACTION
compat_uptr_t sa_handler;
@@ -558,6 +562,12 @@ asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct old_timespec32 __user *timeout,
const struct __compat_aio_sigset __user *usig);
+asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
+ compat_long_t min_nr,
+ compat_long_t nr,
+ struct io_event __user *events,
+ struct __kernel_timespec __user *timeout,
+ const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
@@ -643,11 +653,21 @@ asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *exp,
struct old_timespec32 __user *tsp,
void __user *sig);
+asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp,
+ compat_ulong_t __user *outp,
+ compat_ulong_t __user *exp,
+ struct __kernel_timespec __user *tsp,
+ void __user *sig);
asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
unsigned int nfds,
struct old_timespec32 __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
+asmlinkage long compat_sys_ppoll_time64(struct pollfd __user *ufds,
+ unsigned int nfds,
+ struct __kernel_timespec __user *tsp,
+ const compat_sigset_t __user *sigmask,
+ compat_size_t sigsetsize);
/* fs/signalfd.c */
asmlinkage long compat_sys_signalfd4(int ufd,
@@ -768,6 +788,9 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
struct old_timespec32 __user *uts, compat_size_t sigsetsize);
+asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese,
+ struct compat_siginfo __user *uinfo,
+ struct __kernel_timespec __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
@@ -873,6 +896,9 @@ asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages,
asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
+asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
struct old_timespec32 __user *timeout);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 06396c1cf127..fc5004a4b07d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -99,13 +99,22 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
- asm volatile("ANNOTATE_REACHABLE counter=%c0" \
- : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
- asm volatile("ANNOTATE_UNREACHABLE counter=%c0" \
- : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
+#define ASM_UNREACHABLE \
+ "999:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long 999b - .\n\t" \
+ ".popsection\n\t"
#else
#define annotate_reachable()
#define annotate_unreachable()
@@ -293,45 +302,6 @@ static inline void *offset_to_ptr(const int *off)
return (void *)((unsigned long)off + *off);
}
-#else /* __ASSEMBLY__ */
-
-#ifdef __KERNEL__
-#ifndef LINKER_SCRIPT
-
-#ifdef CONFIG_STACK_VALIDATION
-.macro ANNOTATE_UNREACHABLE counter:req
-\counter:
- .pushsection .discard.unreachable
- .long \counter\()b -.
- .popsection
-.endm
-
-.macro ANNOTATE_REACHABLE counter:req
-\counter:
- .pushsection .discard.reachable
- .long \counter\()b -.
- .popsection
-.endm
-
-.macro ASM_UNREACHABLE
-999:
- .pushsection .discard.unreachable
- .long 999b - .
- .popsection
-.endm
-#else /* CONFIG_STACK_VALIDATION */
-.macro ANNOTATE_UNREACHABLE counter:req
-.endm
-
-.macro ANNOTATE_REACHABLE counter:req
-.endm
-
-.macro ASM_UNREACHABLE
-.endm
-#endif /* CONFIG_STACK_VALIDATION */
-
-#endif /* LINKER_SCRIPT */
-#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index f8c400ba1929..fe07b680dd4a 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -37,7 +37,6 @@
# define __GCC4_has_attribute___designated_init__ 0
# define __GCC4_has_attribute___externally_visible__ 1
# define __GCC4_has_attribute___noclone__ 1
-# define __GCC4_has_attribute___optimize__ 1
# define __GCC4_has_attribute___nonstring__ 0
# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
#endif
@@ -163,17 +162,11 @@
/*
* Optional: not supported by clang
- * Note: icc does not recognize gcc's no-tracer
*
* gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
- * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute
*/
#if __has_attribute(__noclone__)
-# if __has_attribute(__optimize__)
-# define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
-# else
-# define __noclone __attribute__((__noclone__))
-# endif
+# define __noclone __attribute__((__noclone__))
#else
# define __noclone
#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4a3f9c09c92d..ba814f18cb4c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -104,6 +104,60 @@ struct ftrace_likely_data {
unsigned long constant;
};
+#ifdef CONFIG_ENABLE_MUST_CHECK
+#define __must_check __attribute__((__warn_unused_result__))
+#else
+#define __must_check
+#endif
+
+#if defined(CC_USING_HOTPATCH)
+#define notrace __attribute__((hotpatch(0, 0)))
+#else
+#define notrace __attribute__((__no_instrument_function__))
+#endif
+
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked __attribute__((__naked__)) notrace
+
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/*
+ * Force always-inline if the user requests it so via the .config.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
+ * Do not use __always_inline here, since currently it expands to inline again
+ * (which would break users of __always_inline).
+ */
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING)
+#define inline inline __attribute__((__always_inline__)) __gnu_inline \
+ __maybe_unused notrace
+#else
+#define inline inline __gnu_inline \
+ __maybe_unused notrace
+#endif
+
+#define __inline__ inline
+#define __inline inline
+
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
@@ -161,58 +215,4 @@ struct ftrace_likely_data {
#define __diag_error(compiler, version, option, comment) \
__diag_ ## compiler(version, error, option)
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
-#if defined(CC_USING_HOTPATCH)
-#define notrace __attribute__((hotpatch(0, 0)))
-#else
-#define notrace __attribute__((__no_instrument_function__))
-#endif
-
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- */
-#define __naked __attribute__((__naked__)) notrace
-
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-
-/*
- * Force always-inline if the user requests it so via the .config.
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well by using "unused"
- * function attribute, which is redundant but not harmful for gcc.
- * Prefer gnu_inline, so that extern inline functions do not emit an
- * externally visible function. This makes extern inline behave as per gnu89
- * semantics rather than c99. This prevents multiple symbol definition errors
- * of extern inline functions at link time.
- * A lot of inline functions can cause havoc with function tracing.
- * Do not use __always_inline here, since currently it expands to inline again
- * (which would break users of __always_inline).
- */
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- !defined(CONFIG_OPTIMIZE_INLINING)
-#define inline inline __attribute__((__always_inline__)) __gnu_inline \
- __maybe_unused notrace
-#else
-#define inline inline __gnu_inline \
- __maybe_unused notrace
-#endif
-
-#define __inline__ inline
-#define __inline inline
-
-/*
- * Rather then using noinline to prevent stack consumption, use
- * noinline_for_stack instead. For documentation reasons.
- */
-#define noinline_for_stack noinline
-
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/cordic.h b/include/linux/cordic.h
index cf68ca4a508c..3d656f54d64f 100644
--- a/include/linux/cordic.h
+++ b/include/linux/cordic.h
@@ -18,6 +18,15 @@
#include <linux/types.h>
+#define CORDIC_ANGLE_GEN 39797
+#define CORDIC_PRECISION_SHIFT 16
+#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
+
+#define CORDIC_FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
+#define CORDIC_FLOAT(X) (((X) >= 0) \
+ ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
+ : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
+
/**
* struct cordic_iq - i/q coordinate.
*
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 882a9b9e34bc..c86d6d8bdfed 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
}
#endif
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov);
+#else
+static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov) { }
+#endif
+
extern void arch_freq_prepare_all(void);
extern unsigned int arch_freq_get_on_cpu(int cpu);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e0cd2baa8380..fd586d0301e7 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -164,6 +164,8 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
+ CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE,
+ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index faed7a8977e8..4dff74f48d4b 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -33,6 +33,8 @@ struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
unsigned long long time; /* in US */
+ unsigned long long above; /* Number of times it's been too deep */
+ unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND
unsigned long long s2idle_usage;
unsigned long long s2idle_time; /* in US */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3634ad6fe202..902ec171fc6d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -49,7 +49,6 @@
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
-#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
#define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b
@@ -77,12 +76,6 @@
#define CRYPTO_ALG_NEED_FALLBACK 0x00000100
/*
- * This bit is set for symmetric key ciphers that have already been wrapped
- * with a generic IV generator to prevent them from being wrapped again.
- */
-#define CRYPTO_ALG_GENIV 0x00000200
-
-/*
* Set if the algorithm has passed automated run-time testing. Note that
* if there is no run-time testing for a given algorithm it is considered
* to have passed.
@@ -157,7 +150,6 @@ struct crypto_async_request;
struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
-struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -246,31 +238,16 @@ struct cipher_desc {
* be called in parallel with the same transformation object.
* @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
* and the conditions are exactly the same.
- * @givencrypt: Update the IV for encryption. With this function, a cipher
- * implementation may provide the function on how to update the IV
- * for encryption.
- * @givdecrypt: Update the IV for decryption. This is the reverse of
- * @givencrypt .
- * @geniv: The transformation implementation may use an "IV generator" provided
- * by the kernel crypto API. Several use cases have a predefined
- * approach how IVs are to be updated. For such use cases, the kernel
- * crypto API provides ready-to-use implementations that can be
- * referenced with this variable.
* @ivsize: IV size applicable for transformation. The consumer must provide an
* IV of exactly that size to perform the encrypt or decrypt operation.
*
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
+ * All fields except @ivsize are mandatory and must be filled.
*/
struct ablkcipher_alg {
int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int keylen);
int (*encrypt)(struct ablkcipher_request *req);
int (*decrypt)(struct ablkcipher_request *req);
- int (*givencrypt)(struct skcipher_givcrypt_request *req);
- int (*givdecrypt)(struct skcipher_givcrypt_request *req);
-
- const char *geniv;
unsigned int min_keysize;
unsigned int max_keysize;
@@ -284,10 +261,9 @@ struct ablkcipher_alg {
* @setkey: see struct ablkcipher_alg
* @encrypt: see struct ablkcipher_alg
* @decrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
* @ivsize: see struct ablkcipher_alg
*
- * All fields except @geniv and @ivsize are mandatory and must be filled.
+ * All fields except @ivsize are mandatory and must be filled.
*/
struct blkcipher_alg {
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
@@ -299,8 +275,6 @@ struct blkcipher_alg {
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
- const char *geniv;
-
unsigned int min_keysize;
unsigned int max_keysize;
unsigned int ivsize;
@@ -369,6 +343,115 @@ struct compress_alg {
unsigned int slen, u8 *dst, unsigned int *dlen);
};
+#ifdef CONFIG_CRYPTO_STATS
+/*
+ * struct crypto_istat_aead - statistics for AEAD algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for AEAD requests
+ */
+struct crypto_istat_aead {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_akcipher - statistics for akcipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @verify_cnt: number of verify operation
+ * @sign_cnt: number of sign requests
+ * @err_cnt: number of error for akcipher requests
+ */
+struct crypto_istat_akcipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t verify_cnt;
+ atomic64_t sign_cnt;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_cipher - statistics for cipher algorithm
+ * @encrypt_cnt: number of encrypt requests
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @err_cnt: number of error for cipher requests
+ */
+struct crypto_istat_cipher {
+ atomic64_t encrypt_cnt;
+ atomic64_t encrypt_tlen;
+ atomic64_t decrypt_cnt;
+ atomic64_t decrypt_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_compress - statistics for compress algorithm
+ * @compress_cnt: number of compress requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_cnt: number of decompress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @err_cnt: number of error for compress requests
+ */
+struct crypto_istat_compress {
+ atomic64_t compress_cnt;
+ atomic64_t compress_tlen;
+ atomic64_t decompress_cnt;
+ atomic64_t decompress_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_hash - statistics for has algorithm
+ * @hash_cnt: number of hash requests
+ * @hash_tlen: total data size hashed
+ * @err_cnt: number of error for hash requests
+ */
+struct crypto_istat_hash {
+ atomic64_t hash_cnt;
+ atomic64_t hash_tlen;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_kpp - statistics for KPP algorithm
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @err_cnt: number of error for KPP requests
+ */
+struct crypto_istat_kpp {
+ atomic64_t setsecret_cnt;
+ atomic64_t generate_public_key_cnt;
+ atomic64_t compute_shared_secret_cnt;
+ atomic64_t err_cnt;
+};
+
+/*
+ * struct crypto_istat_rng: statistics for RNG algorithm
+ * @generate_cnt: number of RNG generate requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @seed_cnt: number of times the RNG was seeded
+ * @err_cnt: number of error for RNG requests
+ */
+struct crypto_istat_rng {
+ atomic64_t generate_cnt;
+ atomic64_t generate_tlen;
+ atomic64_t seed_cnt;
+ atomic64_t err_cnt;
+};
+#endif /* CONFIG_CRYPTO_STATS */
#define cra_ablkcipher cra_u.ablkcipher
#define cra_blkcipher cra_u.blkcipher
@@ -454,32 +537,14 @@ struct compress_alg {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
- * All following statistics are for this crypto_alg
- * @encrypt_cnt: number of encrypt requests
- * @decrypt_cnt: number of decrypt requests
- * @compress_cnt: number of compress requests
- * @decompress_cnt: number of decompress requests
- * @generate_cnt: number of RNG generate requests
- * @seed_cnt: number of times the rng was seeded
- * @hash_cnt: number of hash requests
- * @sign_cnt: number of sign requests
- * @setsecret_cnt: number of setsecrey operation
- * @generate_public_key_cnt: number of generate_public_key operation
- * @verify_cnt: number of verify operation
- * @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @encrypt_tlen: total data size handled by encrypt requests
- * @decrypt_tlen: total data size handled by decrypt requests
- * @compress_tlen: total data size handled by compress requests
- * @decompress_tlen: total data size handled by decompress requests
- * @generate_tlen: total data size of generated data by the RNG
- * @hash_tlen: total data size hashed
- * @akcipher_err_cnt: number of error for akcipher requests
- * @cipher_err_cnt: number of error for akcipher requests
- * @compress_err_cnt: number of error for akcipher requests
- * @aead_err_cnt: number of error for akcipher requests
- * @hash_err_cnt: number of error for akcipher requests
- * @rng_err_cnt: number of error for akcipher requests
- * @kpp_err_cnt: number of error for akcipher requests
+ * @stats: union of all possible crypto_istat_xxx structures
+ * @stats.aead: statistics for AEAD algorithm
+ * @stats.akcipher: statistics for akcipher algorithm
+ * @stats.cipher: statistics for cipher algorithm
+ * @stats.compress: statistics for compress algorithm
+ * @stats.hash: statistics for hash algorithm
+ * @stats.rng: statistics for rng algorithm
+ * @stats.kpp: statistics for KPP algorithm
*
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
@@ -515,46 +580,86 @@ struct crypto_alg {
struct module *cra_module;
+#ifdef CONFIG_CRYPTO_STATS
union {
- atomic_t encrypt_cnt;
- atomic_t compress_cnt;
- atomic_t generate_cnt;
- atomic_t hash_cnt;
- atomic_t setsecret_cnt;
- };
- union {
- atomic64_t encrypt_tlen;
- atomic64_t compress_tlen;
- atomic64_t generate_tlen;
- atomic64_t hash_tlen;
- };
- union {
- atomic_t akcipher_err_cnt;
- atomic_t cipher_err_cnt;
- atomic_t compress_err_cnt;
- atomic_t aead_err_cnt;
- atomic_t hash_err_cnt;
- atomic_t rng_err_cnt;
- atomic_t kpp_err_cnt;
- };
- union {
- atomic_t decrypt_cnt;
- atomic_t decompress_cnt;
- atomic_t seed_cnt;
- atomic_t generate_public_key_cnt;
- };
- union {
- atomic64_t decrypt_tlen;
- atomic64_t decompress_tlen;
- };
- union {
- atomic_t verify_cnt;
- atomic_t compute_shared_secret_cnt;
- };
- atomic_t sign_cnt;
+ struct crypto_istat_aead aead;
+ struct crypto_istat_akcipher akcipher;
+ struct crypto_istat_cipher cipher;
+ struct crypto_istat_compress compress;
+ struct crypto_istat_hash hash;
+ struct crypto_istat_rng rng;
+ struct crypto_istat_kpp kpp;
+ } stats;
+#endif /* CONFIG_CRYPTO_STATS */
} CRYPTO_MINALIGN_ATTR;
+#ifdef CONFIG_CRYPTO_STATS
+void crypto_stats_init(struct crypto_alg *alg);
+void crypto_stats_get(struct crypto_alg *alg);
+void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
+void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg);
+void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg);
+void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg);
+void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret);
+void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_seed(struct crypto_alg *alg, int ret);
+void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret);
+void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg);
+#else
+static inline void crypto_stats_init(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_get(struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_ahash_final(unsigned int nbytes, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_encrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_decrypt(unsigned int src_len, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
+{}
+static inline void crypto_stats_rng_generate(struct crypto_alg *alg, unsigned int dlen, int ret)
+{}
+static inline void crypto_stats_skcipher_encrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+static inline void crypto_stats_skcipher_decrypt(unsigned int cryptlen, int ret, struct crypto_alg *alg)
+{}
+#endif
/*
* A helper struct for waiting for completion of async crypto ops
*/
@@ -800,14 +905,14 @@ static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
static inline u32 crypto_skcipher_type(u32 type)
{
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ type &= ~CRYPTO_ALG_TYPE_MASK;
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
return type;
}
static inline u32 crypto_skcipher_mask(u32 mask)
{
- mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
+ mask &= ~CRYPTO_ALG_TYPE_MASK;
mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
return mask;
}
@@ -973,38 +1078,6 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
return __crypto_ablkcipher_cast(req->base.tfm);
}
-static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
- } else {
- atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
- atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
- }
-#endif
-}
-
-static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
- int ret)
-{
-#ifdef CONFIG_CRYPTO_STATS
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
-
- if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
- } else {
- atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
- atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
- }
-#endif
-}
-
/**
* crypto_ablkcipher_encrypt() - encrypt plaintext
* @req: reference to the ablkcipher_request handle that holds all information
@@ -1020,10 +1093,13 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crt->encrypt(req);
- crypto_stat_ablkcipher_encrypt(req, ret);
+ crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
return ret;
}
@@ -1042,10 +1118,13 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+ struct crypto_alg *alg = crt->base->base.__crt_alg;
+ unsigned int nbytes = req->nbytes;
int ret;
+ crypto_stats_get(alg);
ret = crt->decrypt(req);
- crypto_stat_ablkcipher_decrypt(req, ret);
+ crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
return ret;
}
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 450b28db9533..0dd316a74a29 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,8 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+typedef unsigned long dax_entry_t;
+
struct iomap_ops;
struct dax_device;
struct dax_operations {
@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
-bool dax_lock_mapping_entry(struct page *page);
-void dax_unlock_mapping_entry(struct page *page);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline bool dax_lock_mapping_entry(struct page *page)
+static inline dax_entry_t dax_lock_page(struct page *page)
{
if (IS_DAX(page->mapping->host))
- return true;
- return false;
+ return ~0UL;
+ return 0;
}
-static inline void dax_unlock_mapping_entry(struct page *page)
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
}
#endif
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
deleted file mode 100644
index 92521471517f..000000000000
--- a/include/linux/dell-led.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DELL_LED_H__
-#define __DELL_LED_H__
-
-int dell_micmute_led_set(int on);
-
-#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index e4963b0f45da..fbffa74bfc1b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -131,6 +131,9 @@ struct devfreq_dev_profile {
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
+ * @suspend_freq: frequency of a device set during suspend phase.
+ * @resume_freq: frequency of a device set in resume phase.
+ * @suspend_count: suspend requests counter for a device.
* @total_trans: Number of devfreq transitions
* @trans_table: Statistics of devfreq transitions
* @time_in_state: Statistics of devfreq states
@@ -167,6 +170,10 @@ struct devfreq {
unsigned long scaling_max_freq;
bool stop_polling;
+ unsigned long suspend_freq;
+ unsigned long resume_freq;
+ atomic_t suspend_count;
+
/* information for device frequency transition */
unsigned int total_trans;
unsigned int *trans_table;
@@ -198,6 +205,9 @@ extern void devm_devfreq_remove_device(struct device *dev,
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
+extern void devfreq_suspend(void);
+extern void devfreq_resume(void);
+
/**
* update_devfreq() - Reevaluate the device and configure frequency
* @devfreq: the devfreq device
@@ -324,6 +334,9 @@ static inline int devfreq_resume_device(struct devfreq *devfreq)
return 0;
}
+static inline void devfreq_suspend(void) {}
+static inline void devfreq_resume(void) {}
+
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags)
{
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 02dba8cd033d..999e4b104410 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -541,6 +541,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
+struct dma_fence *dma_fence_get_stub(void);
u64 dma_fence_context_alloc(unsigned num);
#define DMA_FENCE_TRACE(f, fmt, args...) \
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index a52c6409bdc2..ba521d5506c9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -732,7 +732,7 @@ static inline void dmam_release_declared_memory(struct device *dev)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- unsigned long attrs = DMA_ATTR_NO_WARN;
+ unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
if (gfp & __GFP_NOWARN)
attrs |= DMA_ATTR_NO_WARN;
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 1d0c9ea8825d..342dabda9c7e 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -669,10 +669,4 @@ struct mem_ctl_info {
bool fake_inject_ue;
u16 fake_inject_count;
};
-
-/*
- * Maximum number of memory controllers in the coherent fabric.
- */
-#define EDAC_MAX_MCS 2 * MAX_NUMNODES
-
#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 100ce4a4aff6..becd5d76a207 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1000,13 +1000,11 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
@@ -1046,7 +1044,6 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern void efi_reserve_boot_services(void);
extern int efi_get_fdt_params(struct efi_fdt_params *params);
extern struct kobject *efi_kobj;
@@ -1715,9 +1712,19 @@ extern struct efi_runtime_work efi_rts_work;
extern struct workqueue_struct *efi_rts_wq;
struct linux_efi_memreserve {
- phys_addr_t next;
- phys_addr_t base;
- phys_addr_t size;
+ int size; // allocated size of the array
+ atomic_t count; // number of entries used
+ phys_addr_t next; // pa of next struct instance
+ struct {
+ phys_addr_t base;
+ phys_addr_t size;
+ } entry[0];
};
+#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
+ (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
+#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
+ / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 015bb59c0331..2e9e2763bf47 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -23,74 +23,6 @@ enum elv_merge {
ELEVATOR_DISCARD_MERGE = 3,
};
-typedef enum elv_merge (elevator_merge_fn) (struct request_queue *, struct request **,
- struct bio *);
-
-typedef void (elevator_merge_req_fn) (struct request_queue *, struct request *, struct request *);
-
-typedef void (elevator_merged_fn) (struct request_queue *, struct request *, enum elv_merge);
-
-typedef int (elevator_allow_bio_merge_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_allow_rq_merge_fn) (struct request_queue *,
- struct request *, struct request *);
-
-typedef void (elevator_bio_merged_fn) (struct request_queue *,
- struct request *, struct bio *);
-
-typedef int (elevator_dispatch_fn) (struct request_queue *, int);
-
-typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
-typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
-typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
-typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
-
-typedef void (elevator_init_icq_fn) (struct io_cq *);
-typedef void (elevator_exit_icq_fn) (struct io_cq *);
-typedef int (elevator_set_req_fn) (struct request_queue *, struct request *,
- struct bio *, gfp_t);
-typedef void (elevator_put_req_fn) (struct request *);
-typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
-typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
-
-typedef int (elevator_init_fn) (struct request_queue *,
- struct elevator_type *e);
-typedef void (elevator_exit_fn) (struct elevator_queue *);
-typedef void (elevator_registered_fn) (struct request_queue *);
-
-struct elevator_ops
-{
- elevator_merge_fn *elevator_merge_fn;
- elevator_merged_fn *elevator_merged_fn;
- elevator_merge_req_fn *elevator_merge_req_fn;
- elevator_allow_bio_merge_fn *elevator_allow_bio_merge_fn;
- elevator_allow_rq_merge_fn *elevator_allow_rq_merge_fn;
- elevator_bio_merged_fn *elevator_bio_merged_fn;
-
- elevator_dispatch_fn *elevator_dispatch_fn;
- elevator_add_req_fn *elevator_add_req_fn;
- elevator_activate_req_fn *elevator_activate_req_fn;
- elevator_deactivate_req_fn *elevator_deactivate_req_fn;
-
- elevator_completed_req_fn *elevator_completed_req_fn;
-
- elevator_request_list_fn *elevator_former_req_fn;
- elevator_request_list_fn *elevator_latter_req_fn;
-
- elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
- elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
-
- elevator_set_req_fn *elevator_set_req_fn;
- elevator_put_req_fn *elevator_put_req_fn;
-
- elevator_may_queue_fn *elevator_may_queue_fn;
-
- elevator_init_fn *elevator_init_fn;
- elevator_exit_fn *elevator_exit_fn;
- elevator_registered_fn *elevator_registered_fn;
-};
-
struct blk_mq_alloc_data;
struct blk_mq_hw_ctx;
@@ -137,17 +69,14 @@ struct elevator_type
struct kmem_cache *icq_cache;
/* fields provided by elevator implementation */
- union {
- struct elevator_ops sq;
- struct elevator_mq_ops mq;
- } ops;
+ struct elevator_mq_ops ops;
+
size_t icq_size; /* see iocontext.h */
size_t icq_align; /* ditto */
struct elv_fs_entry *elevator_attrs;
char elevator_name[ELV_NAME_MAX];
const char *elevator_alias;
struct module *elevator_owner;
- bool uses_mq;
#ifdef CONFIG_BLK_DEBUG_FS
const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
@@ -175,40 +104,25 @@ struct elevator_queue
struct kobject kobj;
struct mutex sysfs_lock;
unsigned int registered:1;
- unsigned int uses_mq:1;
DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
};
/*
* block elevator interface
*/
-extern void elv_dispatch_sort(struct request_queue *, struct request *);
-extern void elv_dispatch_add_tail(struct request_queue *, struct request *);
-extern void elv_add_request(struct request_queue *, struct request *, int);
-extern void __elv_add_request(struct request_queue *, struct request *, int);
extern enum elv_merge elv_merge(struct request_queue *, struct request **,
struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *,
enum elv_merge);
-extern void elv_bio_merged(struct request_queue *q, struct request *,
- struct bio *);
extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
-extern void elv_requeue_request(struct request_queue *, struct request *);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
-extern int elv_may_queue(struct request_queue *, unsigned int);
-extern void elv_completed_request(struct request_queue *, struct request *);
-extern int elv_set_request(struct request_queue *q, struct request *rq,
- struct bio *bio, gfp_t gfp_mask);
-extern void elv_put_request(struct request_queue *, struct request *);
-extern void elv_drain_elevator(struct request_queue *);
/*
* io scheduler registration
*/
-extern void __init load_default_elevator_module(void);
extern int elv_register(struct elevator_type *);
extern void elv_unregister(struct elevator_type *);
@@ -260,9 +174,5 @@ enum {
#define rq_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist)
-#else /* CONFIG_BLOCK */
-
-static inline void load_default_elevator_module(void) { }
-
#endif /* CONFIG_BLOCK */
#endif
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
new file mode 100644
index 000000000000..aa027f7bcb3e
--- /dev/null
+++ b/include/linux/energy_model.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ENERGY_MODEL_H
+#define _LINUX_ENERGY_MODEL_H
+#include <linux/cpumask.h>
+#include <linux/jump_label.h>
+#include <linux/kobject.h>
+#include <linux/rcupdate.h>
+#include <linux/sched/cpufreq.h>
+#include <linux/sched/topology.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_ENERGY_MODEL
+/**
+ * em_cap_state - Capacity state of a performance domain
+ * @frequency: The CPU frequency in KHz, for consistency with CPUFreq
+ * @power: The power consumed by 1 CPU at this level, in milli-watts
+ * @cost: The cost coefficient associated with this level, used during
+ * energy calculation. Equal to: power * max_frequency / frequency
+ */
+struct em_cap_state {
+ unsigned long frequency;
+ unsigned long power;
+ unsigned long cost;
+};
+
+/**
+ * em_perf_domain - Performance domain
+ * @table: List of capacity states, in ascending order
+ * @nr_cap_states: Number of capacity states
+ * @cpus: Cpumask covering the CPUs of the domain
+ *
+ * A "performance domain" represents a group of CPUs whose performance is
+ * scaled together. All CPUs of a performance domain must have the same
+ * micro-architecture. Performance domains often have a 1-to-1 mapping with
+ * CPUFreq policies.
+ */
+struct em_perf_domain {
+ struct em_cap_state *table;
+ int nr_cap_states;
+ unsigned long cpus[0];
+};
+
+#define EM_CPU_MAX_POWER 0xFFFF
+
+struct em_data_callback {
+ /**
+ * active_power() - Provide power at the next capacity state of a CPU
+ * @power : Active power at the capacity state in mW (modified)
+ * @freq : Frequency at the capacity state in kHz (modified)
+ * @cpu : CPU for which we do this operation
+ *
+ * active_power() must find the lowest capacity state of 'cpu' above
+ * 'freq' and update 'power' and 'freq' to the matching active power
+ * and frequency.
+ *
+ * The power is the one of a single CPU in the domain, expressed in
+ * milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
+ * range.
+ *
+ * Return 0 on success.
+ */
+ int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
+};
+#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
+
+struct em_perf_domain *em_cpu_get(int cpu);
+int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+ struct em_data_callback *cb);
+
+/**
+ * em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
+ * @pd : performance domain for which energy has to be estimated
+ * @max_util : highest utilization among CPUs of the domain
+ * @sum_util : sum of the utilization of all CPUs in the domain
+ *
+ * Return: the sum of the energy consumed by the CPUs of the domain assuming
+ * a capacity state satisfying the max utilization of the domain.
+ */
+static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util)
+{
+ unsigned long freq, scale_cpu;
+ struct em_cap_state *cs;
+ int i, cpu;
+
+ /*
+ * In order to predict the capacity state, map the utilization of the
+ * most utilized CPU of the performance domain to a requested frequency,
+ * like schedutil.
+ */
+ cpu = cpumask_first(to_cpumask(pd->cpus));
+ scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+ cs = &pd->table[pd->nr_cap_states - 1];
+ freq = map_util_freq(max_util, cs->frequency, scale_cpu);
+
+ /*
+ * Find the lowest capacity state of the Energy Model above the
+ * requested frequency.
+ */
+ for (i = 0; i < pd->nr_cap_states; i++) {
+ cs = &pd->table[i];
+ if (cs->frequency >= freq)
+ break;
+ }
+
+ /*
+ * The capacity of a CPU in the domain at that capacity state (cs)
+ * can be computed as:
+ *
+ * cs->freq * scale_cpu
+ * cs->cap = -------------------- (1)
+ * cpu_max_freq
+ *
+ * So, ignoring the costs of idle states (which are not available in
+ * the EM), the energy consumed by this CPU at that capacity state is
+ * estimated as:
+ *
+ * cs->power * cpu_util
+ * cpu_nrg = -------------------- (2)
+ * cs->cap
+ *
+ * since 'cpu_util / cs->cap' represents its percentage of busy time.
+ *
+ * NOTE: Although the result of this computation actually is in
+ * units of power, it can be manipulated as an energy value
+ * over a scheduling period, since it is assumed to be
+ * constant during that interval.
+ *
+ * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
+ * of two terms:
+ *
+ * cs->power * cpu_max_freq cpu_util
+ * cpu_nrg = ------------------------ * --------- (3)
+ * cs->freq scale_cpu
+ *
+ * The first term is static, and is stored in the em_cap_state struct
+ * as 'cs->cost'.
+ *
+ * Since all CPUs of the domain have the same micro-architecture, they
+ * share the same 'cs->cost', and the same CPU capacity. Hence, the
+ * total energy of the domain (which is the simple sum of the energy of
+ * all of its CPUs) can be factorized as:
+ *
+ * cs->cost * \Sum cpu_util
+ * pd_nrg = ------------------------ (4)
+ * scale_cpu
+ */
+ return cs->cost * sum_util / scale_cpu;
+}
+
+/**
+ * em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
+ * @pd : performance domain for which this must be done
+ *
+ * Return: the number of capacity states in the performance domain table
+ */
+static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+{
+ return pd->nr_cap_states;
+}
+
+#else
+struct em_perf_domain {};
+struct em_data_callback {};
+#define EM_DATA_CB(_active_power_cb) { }
+
+static inline int em_register_perf_domain(cpumask_t *span,
+ unsigned int nr_states, struct em_data_callback *cb)
+{
+ return -EINVAL;
+}
+static inline struct em_perf_domain *em_cpu_get(int cpu)
+{
+ return NULL;
+}
+static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
+ unsigned long max_util, unsigned long sum_util)
+{
+ return 0;
+}
+static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 572e11bb8696..2c0af7b00715 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -32,6 +32,7 @@
struct device;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
unsigned char *arch_get_platform_mac_address(void);
+int nvmem_get_mac_address(struct device *dev, void *addrbuf);
u32 eth_get_headlen(void *data, unsigned int max_len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index a5a60691e48b..9e2142795335 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -37,10 +37,11 @@
/* Events that user can request to be notified on */
#define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \
- FAN_CLOSE | FAN_OPEN)
+ FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC)
/* Events that require a permission response from user */
-#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM)
+#define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \
+ FAN_OPEN_EXEC_PERM)
/* Extra flags that may be reported with event or control handling of events */
#define FANOTIFY_EVENT_FLAGS (FAN_EVENT_ON_CHILD | FAN_ONDIR)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index de629b706d1d..8c8544b375eb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -449,6 +449,13 @@ struct sock_reuseport;
offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \
offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER) \
+ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
@@ -668,24 +675,10 @@ static inline u32 bpf_ctx_off_adjust_machine(u32 size)
return size;
}
-static inline bool bpf_ctx_narrow_align_ok(u32 off, u32 size_access,
- u32 size_default)
-{
- size_default = bpf_ctx_off_adjust_machine(size_default);
- size_access = bpf_ctx_off_adjust_machine(size_access);
-
-#ifdef __LITTLE_ENDIAN
- return (off & (size_default - 1)) == 0;
-#else
- return (off & (size_default - 1)) + size_access == size_default;
-#endif
-}
-
static inline bool
bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
{
- return bpf_ctx_narrow_align_ok(off, size, size_default) &&
- size <= size_default && (size & (size - 1)) == 0;
+ return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
@@ -732,6 +725,13 @@ void bpf_prog_free(struct bpf_prog *fp);
bool bpf_opcode_in_insntable(u8 code);
+void bpf_prog_free_linfo(struct bpf_prog *prog);
+void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+ const u32 *insn_to_jit_off);
+int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
+void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
+
struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags);
@@ -854,7 +854,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
extern int bpf_jit_enable;
extern int bpf_jit_harden;
extern int bpf_jit_kallsyms;
-extern int bpf_jit_limit;
+extern long bpf_jit_limit;
typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
@@ -866,6 +866,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
void bpf_jit_free(struct bpf_prog *fp);
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed);
+
struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 29ada609de03..ebc55098faee 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -14,4 +14,5 @@
#include <linux/firmware/imx/types.h>
#include <linux/firmware/imx/svc/misc.h>
+#include <linux/firmware/imx/svc/pm.h>
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/imx/svc/pm.h b/include/linux/firmware/imx/svc/pm.h
new file mode 100644
index 000000000000..1f6975dd37b0
--- /dev/null
+++ b/include/linux/firmware/imx/svc/pm.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP
+ *
+ * Header file containing the public API for the System Controller (SC)
+ * Power Management (PM) function. This includes functions for power state
+ * control, clock control, reset control, and wake-up event control.
+ *
+ * PM_SVC (SVC) Power Management Service
+ *
+ * Module for the Power Management (PM) service.
+ */
+
+#ifndef _SC_PM_API_H
+#define _SC_PM_API_H
+
+#include <linux/firmware/imx/sci.h>
+
+/*
+ * This type is used to indicate RPC PM function calls.
+ */
+enum imx_sc_pm_func {
+ IMX_SC_PM_FUNC_UNKNOWN = 0,
+ IMX_SC_PM_FUNC_SET_SYS_POWER_MODE = 19,
+ IMX_SC_PM_FUNC_SET_PARTITION_POWER_MODE = 1,
+ IMX_SC_PM_FUNC_GET_SYS_POWER_MODE = 2,
+ IMX_SC_PM_FUNC_SET_RESOURCE_POWER_MODE = 3,
+ IMX_SC_PM_FUNC_GET_RESOURCE_POWER_MODE = 4,
+ IMX_SC_PM_FUNC_REQ_LOW_POWER_MODE = 16,
+ IMX_SC_PM_FUNC_SET_CPU_RESUME_ADDR = 17,
+ IMX_SC_PM_FUNC_REQ_SYS_IF_POWER_MODE = 18,
+ IMX_SC_PM_FUNC_SET_CLOCK_RATE = 5,
+ IMX_SC_PM_FUNC_GET_CLOCK_RATE = 6,
+ IMX_SC_PM_FUNC_CLOCK_ENABLE = 7,
+ IMX_SC_PM_FUNC_SET_CLOCK_PARENT = 14,
+ IMX_SC_PM_FUNC_GET_CLOCK_PARENT = 15,
+ IMX_SC_PM_FUNC_RESET = 13,
+ IMX_SC_PM_FUNC_RESET_REASON = 10,
+ IMX_SC_PM_FUNC_BOOT = 8,
+ IMX_SC_PM_FUNC_REBOOT = 9,
+ IMX_SC_PM_FUNC_REBOOT_PARTITION = 12,
+ IMX_SC_PM_FUNC_CPU_START = 11,
+};
+
+/*
+ * Defines for ALL parameters
+ */
+#define IMX_SC_PM_CLK_ALL UINT8_MAX /* All clocks */
+
+/*
+ * Defines for SC PM Power Mode
+ */
+#define IMX_SC_PM_PW_MODE_OFF 0 /* Power off */
+#define IMX_SC_PM_PW_MODE_STBY 1 /* Power in standby */
+#define IMX_SC_PM_PW_MODE_LP 2 /* Power in low-power */
+#define IMX_SC_PM_PW_MODE_ON 3 /* Power on */
+
+/*
+ * Defines for SC PM CLK
+ */
+#define IMX_SC_PM_CLK_SLV_BUS 0 /* Slave bus clock */
+#define IMX_SC_PM_CLK_MST_BUS 1 /* Master bus clock */
+#define IMX_SC_PM_CLK_PER 2 /* Peripheral clock */
+#define IMX_SC_PM_CLK_PHY 3 /* Phy clock */
+#define IMX_SC_PM_CLK_MISC 4 /* Misc clock */
+#define IMX_SC_PM_CLK_MISC0 0 /* Misc 0 clock */
+#define IMX_SC_PM_CLK_MISC1 1 /* Misc 1 clock */
+#define IMX_SC_PM_CLK_MISC2 2 /* Misc 2 clock */
+#define IMX_SC_PM_CLK_MISC3 3 /* Misc 3 clock */
+#define IMX_SC_PM_CLK_MISC4 4 /* Misc 4 clock */
+#define IMX_SC_PM_CLK_CPU 2 /* CPU clock */
+#define IMX_SC_PM_CLK_PLL 4 /* PLL */
+#define IMX_SC_PM_CLK_BYPASS 4 /* Bypass clock */
+
+/*
+ * Defines for SC PM CLK Parent
+ */
+#define IMX_SC_PM_PARENT_XTAL 0 /* Parent is XTAL. */
+#define IMX_SC_PM_PARENT_PLL0 1 /* Parent is PLL0 */
+#define IMX_SC_PM_PARENT_PLL1 2 /* Parent is PLL1 or PLL0/2 */
+#define IMX_SC_PM_PARENT_PLL2 3 /* Parent in PLL2 or PLL0/4 */
+#define IMX_SC_PM_PARENT_BYPS 4 /* Parent is a bypass clock. */
+
+#endif /* _SC_PM_API_H */
diff --git a/include/linux/firmware/imx/types.h b/include/linux/firmware/imx/types.h
index 9cbf0c4a6069..80821100e85f 100644
--- a/include/linux/firmware/imx/types.h
+++ b/include/linux/firmware/imx/types.h
@@ -10,558 +10,6 @@
#define _SC_TYPES_H
/*
- * This type is used to indicate a resource. Resources include peripherals
- * and bus masters (but not memory regions). Note items from list should
- * never be changed or removed (only added to at the end of the list).
- */
-enum imx_sc_rsrc {
- IMX_SC_R_A53 = 0,
- IMX_SC_R_A53_0 = 1,
- IMX_SC_R_A53_1 = 2,
- IMX_SC_R_A53_2 = 3,
- IMX_SC_R_A53_3 = 4,
- IMX_SC_R_A72 = 5,
- IMX_SC_R_A72_0 = 6,
- IMX_SC_R_A72_1 = 7,
- IMX_SC_R_A72_2 = 8,
- IMX_SC_R_A72_3 = 9,
- IMX_SC_R_CCI = 10,
- IMX_SC_R_DB = 11,
- IMX_SC_R_DRC_0 = 12,
- IMX_SC_R_DRC_1 = 13,
- IMX_SC_R_GIC_SMMU = 14,
- IMX_SC_R_IRQSTR_M4_0 = 15,
- IMX_SC_R_IRQSTR_M4_1 = 16,
- IMX_SC_R_SMMU = 17,
- IMX_SC_R_GIC = 18,
- IMX_SC_R_DC_0_BLIT0 = 19,
- IMX_SC_R_DC_0_BLIT1 = 20,
- IMX_SC_R_DC_0_BLIT2 = 21,
- IMX_SC_R_DC_0_BLIT_OUT = 22,
- IMX_SC_R_DC_0_CAPTURE0 = 23,
- IMX_SC_R_DC_0_CAPTURE1 = 24,
- IMX_SC_R_DC_0_WARP = 25,
- IMX_SC_R_DC_0_INTEGRAL0 = 26,
- IMX_SC_R_DC_0_INTEGRAL1 = 27,
- IMX_SC_R_DC_0_VIDEO0 = 28,
- IMX_SC_R_DC_0_VIDEO1 = 29,
- IMX_SC_R_DC_0_FRAC0 = 30,
- IMX_SC_R_DC_0_FRAC1 = 31,
- IMX_SC_R_DC_0 = 32,
- IMX_SC_R_GPU_2_PID0 = 33,
- IMX_SC_R_DC_0_PLL_0 = 34,
- IMX_SC_R_DC_0_PLL_1 = 35,
- IMX_SC_R_DC_1_BLIT0 = 36,
- IMX_SC_R_DC_1_BLIT1 = 37,
- IMX_SC_R_DC_1_BLIT2 = 38,
- IMX_SC_R_DC_1_BLIT_OUT = 39,
- IMX_SC_R_DC_1_CAPTURE0 = 40,
- IMX_SC_R_DC_1_CAPTURE1 = 41,
- IMX_SC_R_DC_1_WARP = 42,
- IMX_SC_R_DC_1_INTEGRAL0 = 43,
- IMX_SC_R_DC_1_INTEGRAL1 = 44,
- IMX_SC_R_DC_1_VIDEO0 = 45,
- IMX_SC_R_DC_1_VIDEO1 = 46,
- IMX_SC_R_DC_1_FRAC0 = 47,
- IMX_SC_R_DC_1_FRAC1 = 48,
- IMX_SC_R_DC_1 = 49,
- IMX_SC_R_GPU_3_PID0 = 50,
- IMX_SC_R_DC_1_PLL_0 = 51,
- IMX_SC_R_DC_1_PLL_1 = 52,
- IMX_SC_R_SPI_0 = 53,
- IMX_SC_R_SPI_1 = 54,
- IMX_SC_R_SPI_2 = 55,
- IMX_SC_R_SPI_3 = 56,
- IMX_SC_R_UART_0 = 57,
- IMX_SC_R_UART_1 = 58,
- IMX_SC_R_UART_2 = 59,
- IMX_SC_R_UART_3 = 60,
- IMX_SC_R_UART_4 = 61,
- IMX_SC_R_EMVSIM_0 = 62,
- IMX_SC_R_EMVSIM_1 = 63,
- IMX_SC_R_DMA_0_CH0 = 64,
- IMX_SC_R_DMA_0_CH1 = 65,
- IMX_SC_R_DMA_0_CH2 = 66,
- IMX_SC_R_DMA_0_CH3 = 67,
- IMX_SC_R_DMA_0_CH4 = 68,
- IMX_SC_R_DMA_0_CH5 = 69,
- IMX_SC_R_DMA_0_CH6 = 70,
- IMX_SC_R_DMA_0_CH7 = 71,
- IMX_SC_R_DMA_0_CH8 = 72,
- IMX_SC_R_DMA_0_CH9 = 73,
- IMX_SC_R_DMA_0_CH10 = 74,
- IMX_SC_R_DMA_0_CH11 = 75,
- IMX_SC_R_DMA_0_CH12 = 76,
- IMX_SC_R_DMA_0_CH13 = 77,
- IMX_SC_R_DMA_0_CH14 = 78,
- IMX_SC_R_DMA_0_CH15 = 79,
- IMX_SC_R_DMA_0_CH16 = 80,
- IMX_SC_R_DMA_0_CH17 = 81,
- IMX_SC_R_DMA_0_CH18 = 82,
- IMX_SC_R_DMA_0_CH19 = 83,
- IMX_SC_R_DMA_0_CH20 = 84,
- IMX_SC_R_DMA_0_CH21 = 85,
- IMX_SC_R_DMA_0_CH22 = 86,
- IMX_SC_R_DMA_0_CH23 = 87,
- IMX_SC_R_DMA_0_CH24 = 88,
- IMX_SC_R_DMA_0_CH25 = 89,
- IMX_SC_R_DMA_0_CH26 = 90,
- IMX_SC_R_DMA_0_CH27 = 91,
- IMX_SC_R_DMA_0_CH28 = 92,
- IMX_SC_R_DMA_0_CH29 = 93,
- IMX_SC_R_DMA_0_CH30 = 94,
- IMX_SC_R_DMA_0_CH31 = 95,
- IMX_SC_R_I2C_0 = 96,
- IMX_SC_R_I2C_1 = 97,
- IMX_SC_R_I2C_2 = 98,
- IMX_SC_R_I2C_3 = 99,
- IMX_SC_R_I2C_4 = 100,
- IMX_SC_R_ADC_0 = 101,
- IMX_SC_R_ADC_1 = 102,
- IMX_SC_R_FTM_0 = 103,
- IMX_SC_R_FTM_1 = 104,
- IMX_SC_R_CAN_0 = 105,
- IMX_SC_R_CAN_1 = 106,
- IMX_SC_R_CAN_2 = 107,
- IMX_SC_R_DMA_1_CH0 = 108,
- IMX_SC_R_DMA_1_CH1 = 109,
- IMX_SC_R_DMA_1_CH2 = 110,
- IMX_SC_R_DMA_1_CH3 = 111,
- IMX_SC_R_DMA_1_CH4 = 112,
- IMX_SC_R_DMA_1_CH5 = 113,
- IMX_SC_R_DMA_1_CH6 = 114,
- IMX_SC_R_DMA_1_CH7 = 115,
- IMX_SC_R_DMA_1_CH8 = 116,
- IMX_SC_R_DMA_1_CH9 = 117,
- IMX_SC_R_DMA_1_CH10 = 118,
- IMX_SC_R_DMA_1_CH11 = 119,
- IMX_SC_R_DMA_1_CH12 = 120,
- IMX_SC_R_DMA_1_CH13 = 121,
- IMX_SC_R_DMA_1_CH14 = 122,
- IMX_SC_R_DMA_1_CH15 = 123,
- IMX_SC_R_DMA_1_CH16 = 124,
- IMX_SC_R_DMA_1_CH17 = 125,
- IMX_SC_R_DMA_1_CH18 = 126,
- IMX_SC_R_DMA_1_CH19 = 127,
- IMX_SC_R_DMA_1_CH20 = 128,
- IMX_SC_R_DMA_1_CH21 = 129,
- IMX_SC_R_DMA_1_CH22 = 130,
- IMX_SC_R_DMA_1_CH23 = 131,
- IMX_SC_R_DMA_1_CH24 = 132,
- IMX_SC_R_DMA_1_CH25 = 133,
- IMX_SC_R_DMA_1_CH26 = 134,
- IMX_SC_R_DMA_1_CH27 = 135,
- IMX_SC_R_DMA_1_CH28 = 136,
- IMX_SC_R_DMA_1_CH29 = 137,
- IMX_SC_R_DMA_1_CH30 = 138,
- IMX_SC_R_DMA_1_CH31 = 139,
- IMX_SC_R_UNUSED1 = 140,
- IMX_SC_R_UNUSED2 = 141,
- IMX_SC_R_UNUSED3 = 142,
- IMX_SC_R_UNUSED4 = 143,
- IMX_SC_R_GPU_0_PID0 = 144,
- IMX_SC_R_GPU_0_PID1 = 145,
- IMX_SC_R_GPU_0_PID2 = 146,
- IMX_SC_R_GPU_0_PID3 = 147,
- IMX_SC_R_GPU_1_PID0 = 148,
- IMX_SC_R_GPU_1_PID1 = 149,
- IMX_SC_R_GPU_1_PID2 = 150,
- IMX_SC_R_GPU_1_PID3 = 151,
- IMX_SC_R_PCIE_A = 152,
- IMX_SC_R_SERDES_0 = 153,
- IMX_SC_R_MATCH_0 = 154,
- IMX_SC_R_MATCH_1 = 155,
- IMX_SC_R_MATCH_2 = 156,
- IMX_SC_R_MATCH_3 = 157,
- IMX_SC_R_MATCH_4 = 158,
- IMX_SC_R_MATCH_5 = 159,
- IMX_SC_R_MATCH_6 = 160,
- IMX_SC_R_MATCH_7 = 161,
- IMX_SC_R_MATCH_8 = 162,
- IMX_SC_R_MATCH_9 = 163,
- IMX_SC_R_MATCH_10 = 164,
- IMX_SC_R_MATCH_11 = 165,
- IMX_SC_R_MATCH_12 = 166,
- IMX_SC_R_MATCH_13 = 167,
- IMX_SC_R_MATCH_14 = 168,
- IMX_SC_R_PCIE_B = 169,
- IMX_SC_R_SATA_0 = 170,
- IMX_SC_R_SERDES_1 = 171,
- IMX_SC_R_HSIO_GPIO = 172,
- IMX_SC_R_MATCH_15 = 173,
- IMX_SC_R_MATCH_16 = 174,
- IMX_SC_R_MATCH_17 = 175,
- IMX_SC_R_MATCH_18 = 176,
- IMX_SC_R_MATCH_19 = 177,
- IMX_SC_R_MATCH_20 = 178,
- IMX_SC_R_MATCH_21 = 179,
- IMX_SC_R_MATCH_22 = 180,
- IMX_SC_R_MATCH_23 = 181,
- IMX_SC_R_MATCH_24 = 182,
- IMX_SC_R_MATCH_25 = 183,
- IMX_SC_R_MATCH_26 = 184,
- IMX_SC_R_MATCH_27 = 185,
- IMX_SC_R_MATCH_28 = 186,
- IMX_SC_R_LCD_0 = 187,
- IMX_SC_R_LCD_0_PWM_0 = 188,
- IMX_SC_R_LCD_0_I2C_0 = 189,
- IMX_SC_R_LCD_0_I2C_1 = 190,
- IMX_SC_R_PWM_0 = 191,
- IMX_SC_R_PWM_1 = 192,
- IMX_SC_R_PWM_2 = 193,
- IMX_SC_R_PWM_3 = 194,
- IMX_SC_R_PWM_4 = 195,
- IMX_SC_R_PWM_5 = 196,
- IMX_SC_R_PWM_6 = 197,
- IMX_SC_R_PWM_7 = 198,
- IMX_SC_R_GPIO_0 = 199,
- IMX_SC_R_GPIO_1 = 200,
- IMX_SC_R_GPIO_2 = 201,
- IMX_SC_R_GPIO_3 = 202,
- IMX_SC_R_GPIO_4 = 203,
- IMX_SC_R_GPIO_5 = 204,
- IMX_SC_R_GPIO_6 = 205,
- IMX_SC_R_GPIO_7 = 206,
- IMX_SC_R_GPT_0 = 207,
- IMX_SC_R_GPT_1 = 208,
- IMX_SC_R_GPT_2 = 209,
- IMX_SC_R_GPT_3 = 210,
- IMX_SC_R_GPT_4 = 211,
- IMX_SC_R_KPP = 212,
- IMX_SC_R_MU_0A = 213,
- IMX_SC_R_MU_1A = 214,
- IMX_SC_R_MU_2A = 215,
- IMX_SC_R_MU_3A = 216,
- IMX_SC_R_MU_4A = 217,
- IMX_SC_R_MU_5A = 218,
- IMX_SC_R_MU_6A = 219,
- IMX_SC_R_MU_7A = 220,
- IMX_SC_R_MU_8A = 221,
- IMX_SC_R_MU_9A = 222,
- IMX_SC_R_MU_10A = 223,
- IMX_SC_R_MU_11A = 224,
- IMX_SC_R_MU_12A = 225,
- IMX_SC_R_MU_13A = 226,
- IMX_SC_R_MU_5B = 227,
- IMX_SC_R_MU_6B = 228,
- IMX_SC_R_MU_7B = 229,
- IMX_SC_R_MU_8B = 230,
- IMX_SC_R_MU_9B = 231,
- IMX_SC_R_MU_10B = 232,
- IMX_SC_R_MU_11B = 233,
- IMX_SC_R_MU_12B = 234,
- IMX_SC_R_MU_13B = 235,
- IMX_SC_R_ROM_0 = 236,
- IMX_SC_R_FSPI_0 = 237,
- IMX_SC_R_FSPI_1 = 238,
- IMX_SC_R_IEE = 239,
- IMX_SC_R_IEE_R0 = 240,
- IMX_SC_R_IEE_R1 = 241,
- IMX_SC_R_IEE_R2 = 242,
- IMX_SC_R_IEE_R3 = 243,
- IMX_SC_R_IEE_R4 = 244,
- IMX_SC_R_IEE_R5 = 245,
- IMX_SC_R_IEE_R6 = 246,
- IMX_SC_R_IEE_R7 = 247,
- IMX_SC_R_SDHC_0 = 248,
- IMX_SC_R_SDHC_1 = 249,
- IMX_SC_R_SDHC_2 = 250,
- IMX_SC_R_ENET_0 = 251,
- IMX_SC_R_ENET_1 = 252,
- IMX_SC_R_MLB_0 = 253,
- IMX_SC_R_DMA_2_CH0 = 254,
- IMX_SC_R_DMA_2_CH1 = 255,
- IMX_SC_R_DMA_2_CH2 = 256,
- IMX_SC_R_DMA_2_CH3 = 257,
- IMX_SC_R_DMA_2_CH4 = 258,
- IMX_SC_R_USB_0 = 259,
- IMX_SC_R_USB_1 = 260,
- IMX_SC_R_USB_0_PHY = 261,
- IMX_SC_R_USB_2 = 262,
- IMX_SC_R_USB_2_PHY = 263,
- IMX_SC_R_DTCP = 264,
- IMX_SC_R_NAND = 265,
- IMX_SC_R_LVDS_0 = 266,
- IMX_SC_R_LVDS_0_PWM_0 = 267,
- IMX_SC_R_LVDS_0_I2C_0 = 268,
- IMX_SC_R_LVDS_0_I2C_1 = 269,
- IMX_SC_R_LVDS_1 = 270,
- IMX_SC_R_LVDS_1_PWM_0 = 271,
- IMX_SC_R_LVDS_1_I2C_0 = 272,
- IMX_SC_R_LVDS_1_I2C_1 = 273,
- IMX_SC_R_LVDS_2 = 274,
- IMX_SC_R_LVDS_2_PWM_0 = 275,
- IMX_SC_R_LVDS_2_I2C_0 = 276,
- IMX_SC_R_LVDS_2_I2C_1 = 277,
- IMX_SC_R_M4_0_PID0 = 278,
- IMX_SC_R_M4_0_PID1 = 279,
- IMX_SC_R_M4_0_PID2 = 280,
- IMX_SC_R_M4_0_PID3 = 281,
- IMX_SC_R_M4_0_PID4 = 282,
- IMX_SC_R_M4_0_RGPIO = 283,
- IMX_SC_R_M4_0_SEMA42 = 284,
- IMX_SC_R_M4_0_TPM = 285,
- IMX_SC_R_M4_0_PIT = 286,
- IMX_SC_R_M4_0_UART = 287,
- IMX_SC_R_M4_0_I2C = 288,
- IMX_SC_R_M4_0_INTMUX = 289,
- IMX_SC_R_M4_0_SIM = 290,
- IMX_SC_R_M4_0_WDOG = 291,
- IMX_SC_R_M4_0_MU_0B = 292,
- IMX_SC_R_M4_0_MU_0A0 = 293,
- IMX_SC_R_M4_0_MU_0A1 = 294,
- IMX_SC_R_M4_0_MU_0A2 = 295,
- IMX_SC_R_M4_0_MU_0A3 = 296,
- IMX_SC_R_M4_0_MU_1A = 297,
- IMX_SC_R_M4_1_PID0 = 298,
- IMX_SC_R_M4_1_PID1 = 299,
- IMX_SC_R_M4_1_PID2 = 300,
- IMX_SC_R_M4_1_PID3 = 301,
- IMX_SC_R_M4_1_PID4 = 302,
- IMX_SC_R_M4_1_RGPIO = 303,
- IMX_SC_R_M4_1_SEMA42 = 304,
- IMX_SC_R_M4_1_TPM = 305,
- IMX_SC_R_M4_1_PIT = 306,
- IMX_SC_R_M4_1_UART = 307,
- IMX_SC_R_M4_1_I2C = 308,
- IMX_SC_R_M4_1_INTMUX = 309,
- IMX_SC_R_M4_1_SIM = 310,
- IMX_SC_R_M4_1_WDOG = 311,
- IMX_SC_R_M4_1_MU_0B = 312,
- IMX_SC_R_M4_1_MU_0A0 = 313,
- IMX_SC_R_M4_1_MU_0A1 = 314,
- IMX_SC_R_M4_1_MU_0A2 = 315,
- IMX_SC_R_M4_1_MU_0A3 = 316,
- IMX_SC_R_M4_1_MU_1A = 317,
- IMX_SC_R_SAI_0 = 318,
- IMX_SC_R_SAI_1 = 319,
- IMX_SC_R_SAI_2 = 320,
- IMX_SC_R_IRQSTR_SCU2 = 321,
- IMX_SC_R_IRQSTR_DSP = 322,
- IMX_SC_R_UNUSED5 = 323,
- IMX_SC_R_UNUSED6 = 324,
- IMX_SC_R_AUDIO_PLL_0 = 325,
- IMX_SC_R_PI_0 = 326,
- IMX_SC_R_PI_0_PWM_0 = 327,
- IMX_SC_R_PI_0_PWM_1 = 328,
- IMX_SC_R_PI_0_I2C_0 = 329,
- IMX_SC_R_PI_0_PLL = 330,
- IMX_SC_R_PI_1 = 331,
- IMX_SC_R_PI_1_PWM_0 = 332,
- IMX_SC_R_PI_1_PWM_1 = 333,
- IMX_SC_R_PI_1_I2C_0 = 334,
- IMX_SC_R_PI_1_PLL = 335,
- IMX_SC_R_SC_PID0 = 336,
- IMX_SC_R_SC_PID1 = 337,
- IMX_SC_R_SC_PID2 = 338,
- IMX_SC_R_SC_PID3 = 339,
- IMX_SC_R_SC_PID4 = 340,
- IMX_SC_R_SC_SEMA42 = 341,
- IMX_SC_R_SC_TPM = 342,
- IMX_SC_R_SC_PIT = 343,
- IMX_SC_R_SC_UART = 344,
- IMX_SC_R_SC_I2C = 345,
- IMX_SC_R_SC_MU_0B = 346,
- IMX_SC_R_SC_MU_0A0 = 347,
- IMX_SC_R_SC_MU_0A1 = 348,
- IMX_SC_R_SC_MU_0A2 = 349,
- IMX_SC_R_SC_MU_0A3 = 350,
- IMX_SC_R_SC_MU_1A = 351,
- IMX_SC_R_SYSCNT_RD = 352,
- IMX_SC_R_SYSCNT_CMP = 353,
- IMX_SC_R_DEBUG = 354,
- IMX_SC_R_SYSTEM = 355,
- IMX_SC_R_SNVS = 356,
- IMX_SC_R_OTP = 357,
- IMX_SC_R_VPU_PID0 = 358,
- IMX_SC_R_VPU_PID1 = 359,
- IMX_SC_R_VPU_PID2 = 360,
- IMX_SC_R_VPU_PID3 = 361,
- IMX_SC_R_VPU_PID4 = 362,
- IMX_SC_R_VPU_PID5 = 363,
- IMX_SC_R_VPU_PID6 = 364,
- IMX_SC_R_VPU_PID7 = 365,
- IMX_SC_R_VPU_UART = 366,
- IMX_SC_R_VPUCORE = 367,
- IMX_SC_R_VPUCORE_0 = 368,
- IMX_SC_R_VPUCORE_1 = 369,
- IMX_SC_R_VPUCORE_2 = 370,
- IMX_SC_R_VPUCORE_3 = 371,
- IMX_SC_R_DMA_4_CH0 = 372,
- IMX_SC_R_DMA_4_CH1 = 373,
- IMX_SC_R_DMA_4_CH2 = 374,
- IMX_SC_R_DMA_4_CH3 = 375,
- IMX_SC_R_DMA_4_CH4 = 376,
- IMX_SC_R_ISI_CH0 = 377,
- IMX_SC_R_ISI_CH1 = 378,
- IMX_SC_R_ISI_CH2 = 379,
- IMX_SC_R_ISI_CH3 = 380,
- IMX_SC_R_ISI_CH4 = 381,
- IMX_SC_R_ISI_CH5 = 382,
- IMX_SC_R_ISI_CH6 = 383,
- IMX_SC_R_ISI_CH7 = 384,
- IMX_SC_R_MJPEG_DEC_S0 = 385,
- IMX_SC_R_MJPEG_DEC_S1 = 386,
- IMX_SC_R_MJPEG_DEC_S2 = 387,
- IMX_SC_R_MJPEG_DEC_S3 = 388,
- IMX_SC_R_MJPEG_ENC_S0 = 389,
- IMX_SC_R_MJPEG_ENC_S1 = 390,
- IMX_SC_R_MJPEG_ENC_S2 = 391,
- IMX_SC_R_MJPEG_ENC_S3 = 392,
- IMX_SC_R_MIPI_0 = 393,
- IMX_SC_R_MIPI_0_PWM_0 = 394,
- IMX_SC_R_MIPI_0_I2C_0 = 395,
- IMX_SC_R_MIPI_0_I2C_1 = 396,
- IMX_SC_R_MIPI_1 = 397,
- IMX_SC_R_MIPI_1_PWM_0 = 398,
- IMX_SC_R_MIPI_1_I2C_0 = 399,
- IMX_SC_R_MIPI_1_I2C_1 = 400,
- IMX_SC_R_CSI_0 = 401,
- IMX_SC_R_CSI_0_PWM_0 = 402,
- IMX_SC_R_CSI_0_I2C_0 = 403,
- IMX_SC_R_CSI_1 = 404,
- IMX_SC_R_CSI_1_PWM_0 = 405,
- IMX_SC_R_CSI_1_I2C_0 = 406,
- IMX_SC_R_HDMI = 407,
- IMX_SC_R_HDMI_I2S = 408,
- IMX_SC_R_HDMI_I2C_0 = 409,
- IMX_SC_R_HDMI_PLL_0 = 410,
- IMX_SC_R_HDMI_RX = 411,
- IMX_SC_R_HDMI_RX_BYPASS = 412,
- IMX_SC_R_HDMI_RX_I2C_0 = 413,
- IMX_SC_R_ASRC_0 = 414,
- IMX_SC_R_ESAI_0 = 415,
- IMX_SC_R_SPDIF_0 = 416,
- IMX_SC_R_SPDIF_1 = 417,
- IMX_SC_R_SAI_3 = 418,
- IMX_SC_R_SAI_4 = 419,
- IMX_SC_R_SAI_5 = 420,
- IMX_SC_R_GPT_5 = 421,
- IMX_SC_R_GPT_6 = 422,
- IMX_SC_R_GPT_7 = 423,
- IMX_SC_R_GPT_8 = 424,
- IMX_SC_R_GPT_9 = 425,
- IMX_SC_R_GPT_10 = 426,
- IMX_SC_R_DMA_2_CH5 = 427,
- IMX_SC_R_DMA_2_CH6 = 428,
- IMX_SC_R_DMA_2_CH7 = 429,
- IMX_SC_R_DMA_2_CH8 = 430,
- IMX_SC_R_DMA_2_CH9 = 431,
- IMX_SC_R_DMA_2_CH10 = 432,
- IMX_SC_R_DMA_2_CH11 = 433,
- IMX_SC_R_DMA_2_CH12 = 434,
- IMX_SC_R_DMA_2_CH13 = 435,
- IMX_SC_R_DMA_2_CH14 = 436,
- IMX_SC_R_DMA_2_CH15 = 437,
- IMX_SC_R_DMA_2_CH16 = 438,
- IMX_SC_R_DMA_2_CH17 = 439,
- IMX_SC_R_DMA_2_CH18 = 440,
- IMX_SC_R_DMA_2_CH19 = 441,
- IMX_SC_R_DMA_2_CH20 = 442,
- IMX_SC_R_DMA_2_CH21 = 443,
- IMX_SC_R_DMA_2_CH22 = 444,
- IMX_SC_R_DMA_2_CH23 = 445,
- IMX_SC_R_DMA_2_CH24 = 446,
- IMX_SC_R_DMA_2_CH25 = 447,
- IMX_SC_R_DMA_2_CH26 = 448,
- IMX_SC_R_DMA_2_CH27 = 449,
- IMX_SC_R_DMA_2_CH28 = 450,
- IMX_SC_R_DMA_2_CH29 = 451,
- IMX_SC_R_DMA_2_CH30 = 452,
- IMX_SC_R_DMA_2_CH31 = 453,
- IMX_SC_R_ASRC_1 = 454,
- IMX_SC_R_ESAI_1 = 455,
- IMX_SC_R_SAI_6 = 456,
- IMX_SC_R_SAI_7 = 457,
- IMX_SC_R_AMIX = 458,
- IMX_SC_R_MQS_0 = 459,
- IMX_SC_R_DMA_3_CH0 = 460,
- IMX_SC_R_DMA_3_CH1 = 461,
- IMX_SC_R_DMA_3_CH2 = 462,
- IMX_SC_R_DMA_3_CH3 = 463,
- IMX_SC_R_DMA_3_CH4 = 464,
- IMX_SC_R_DMA_3_CH5 = 465,
- IMX_SC_R_DMA_3_CH6 = 466,
- IMX_SC_R_DMA_3_CH7 = 467,
- IMX_SC_R_DMA_3_CH8 = 468,
- IMX_SC_R_DMA_3_CH9 = 469,
- IMX_SC_R_DMA_3_CH10 = 470,
- IMX_SC_R_DMA_3_CH11 = 471,
- IMX_SC_R_DMA_3_CH12 = 472,
- IMX_SC_R_DMA_3_CH13 = 473,
- IMX_SC_R_DMA_3_CH14 = 474,
- IMX_SC_R_DMA_3_CH15 = 475,
- IMX_SC_R_DMA_3_CH16 = 476,
- IMX_SC_R_DMA_3_CH17 = 477,
- IMX_SC_R_DMA_3_CH18 = 478,
- IMX_SC_R_DMA_3_CH19 = 479,
- IMX_SC_R_DMA_3_CH20 = 480,
- IMX_SC_R_DMA_3_CH21 = 481,
- IMX_SC_R_DMA_3_CH22 = 482,
- IMX_SC_R_DMA_3_CH23 = 483,
- IMX_SC_R_DMA_3_CH24 = 484,
- IMX_SC_R_DMA_3_CH25 = 485,
- IMX_SC_R_DMA_3_CH26 = 486,
- IMX_SC_R_DMA_3_CH27 = 487,
- IMX_SC_R_DMA_3_CH28 = 488,
- IMX_SC_R_DMA_3_CH29 = 489,
- IMX_SC_R_DMA_3_CH30 = 490,
- IMX_SC_R_DMA_3_CH31 = 491,
- IMX_SC_R_AUDIO_PLL_1 = 492,
- IMX_SC_R_AUDIO_CLK_0 = 493,
- IMX_SC_R_AUDIO_CLK_1 = 494,
- IMX_SC_R_MCLK_OUT_0 = 495,
- IMX_SC_R_MCLK_OUT_1 = 496,
- IMX_SC_R_PMIC_0 = 497,
- IMX_SC_R_PMIC_1 = 498,
- IMX_SC_R_SECO = 499,
- IMX_SC_R_CAAM_JR1 = 500,
- IMX_SC_R_CAAM_JR2 = 501,
- IMX_SC_R_CAAM_JR3 = 502,
- IMX_SC_R_SECO_MU_2 = 503,
- IMX_SC_R_SECO_MU_3 = 504,
- IMX_SC_R_SECO_MU_4 = 505,
- IMX_SC_R_HDMI_RX_PWM_0 = 506,
- IMX_SC_R_A35 = 507,
- IMX_SC_R_A35_0 = 508,
- IMX_SC_R_A35_1 = 509,
- IMX_SC_R_A35_2 = 510,
- IMX_SC_R_A35_3 = 511,
- IMX_SC_R_DSP = 512,
- IMX_SC_R_DSP_RAM = 513,
- IMX_SC_R_CAAM_JR1_OUT = 514,
- IMX_SC_R_CAAM_JR2_OUT = 515,
- IMX_SC_R_CAAM_JR3_OUT = 516,
- IMX_SC_R_VPU_DEC_0 = 517,
- IMX_SC_R_VPU_ENC_0 = 518,
- IMX_SC_R_CAAM_JR0 = 519,
- IMX_SC_R_CAAM_JR0_OUT = 520,
- IMX_SC_R_PMIC_2 = 521,
- IMX_SC_R_DBLOGIC = 522,
- IMX_SC_R_HDMI_PLL_1 = 523,
- IMX_SC_R_BOARD_R0 = 524,
- IMX_SC_R_BOARD_R1 = 525,
- IMX_SC_R_BOARD_R2 = 526,
- IMX_SC_R_BOARD_R3 = 527,
- IMX_SC_R_BOARD_R4 = 528,
- IMX_SC_R_BOARD_R5 = 529,
- IMX_SC_R_BOARD_R6 = 530,
- IMX_SC_R_BOARD_R7 = 531,
- IMX_SC_R_MJPEG_DEC_MP = 532,
- IMX_SC_R_MJPEG_ENC_MP = 533,
- IMX_SC_R_VPU_TS_0 = 534,
- IMX_SC_R_VPU_MU_0 = 535,
- IMX_SC_R_VPU_MU_1 = 536,
- IMX_SC_R_VPU_MU_2 = 537,
- IMX_SC_R_VPU_MU_3 = 538,
- IMX_SC_R_VPU_ENC_1 = 539,
- IMX_SC_R_VPU = 540,
- IMX_SC_R_LAST
-};
-
-/* NOTE - please add by replacing some of the UNUSED from above! */
-
-/*
* This type is used to indicate a control.
*/
enum imx_sc_ctrl {
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c95c0807471f..6d52ce6af4ff 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1044,10 +1044,15 @@ bool opens_in_grace(struct net *);
* Obviously, the last two criteria only matter for POSIX locks.
*/
struct file_lock {
- struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_blocker; /* The lock, that is blocking us */
struct list_head fl_list; /* link into file_lock_context */
struct hlist_node fl_link; /* node in global lists */
- struct list_head fl_block; /* circular list of blocked processes */
+ struct list_head fl_blocked_requests; /* list of requests with
+ * ->fl_blocker pointing here
+ */
+ struct list_head fl_blocked_member; /* node in
+ * ->fl_blocker->fl_blocked_requests
+ */
fl_owner_t fl_owner;
unsigned int fl_flags;
unsigned char fl_type;
@@ -1119,7 +1124,7 @@ extern void locks_remove_file(struct file *);
extern void locks_release_private(struct file_lock *);
extern void posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
-extern int posix_unblock_lock(struct file_lock *);
+extern int locks_delete_block(struct file_lock *);
extern int vfs_test_lock(struct file *, struct file_lock *);
extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
@@ -1209,7 +1214,7 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
return -ENOLCK;
}
-static inline int posix_unblock_lock(struct file_lock *waiter)
+static inline int locks_delete_block(struct file_lock *waiter)
{
return -ENOENT;
}
@@ -2021,7 +2026,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
.ki_filp = filp,
.ki_flags = iocb_flags(filp),
.ki_hint = ki_hint_validate(file_write_hint(filp)),
- .ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0),
+ .ki_ioprio = get_current_ioprio(),
};
}
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 34cf0fdd7dc7..610815e3f1aa 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
int n_pages)
{
- atomic_sub(n_pages, &op->n_pages);
- if (atomic_read(&op->n_pages) <= 0)
+ if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
fscache_op_complete(&op->op, false);
}
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index fd1ce10553bf..2ccb08cb5d6a 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -26,30 +26,46 @@ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry
return __fsnotify_parent(path, dentry, mask);
}
-/* simple call site for access decisions */
+/*
+ * Simple wrapper to consolidate calls fsnotify_parent()/fsnotify() when
+ * an event is on a path.
+ */
+static inline int fsnotify_path(struct inode *inode, const struct path *path,
+ __u32 mask)
+{
+ int ret = fsnotify_parent(path, NULL, mask);
+
+ if (ret)
+ return ret;
+ return fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+}
+
+/* Simple call site for access decisions */
static inline int fsnotify_perm(struct file *file, int mask)
{
+ int ret;
const struct path *path = &file->f_path;
struct inode *inode = file_inode(file);
__u32 fsnotify_mask = 0;
- int ret;
if (file->f_mode & FMODE_NONOTIFY)
return 0;
if (!(mask & (MAY_READ | MAY_OPEN)))
return 0;
- if (mask & MAY_OPEN)
+ if (mask & MAY_OPEN) {
fsnotify_mask = FS_OPEN_PERM;
- else if (mask & MAY_READ)
- fsnotify_mask = FS_ACCESS_PERM;
- else
- BUG();
- ret = fsnotify_parent(path, NULL, fsnotify_mask);
- if (ret)
- return ret;
+ if (file->f_flags & __FMODE_EXEC) {
+ ret = fsnotify_path(inode, path, FS_OPEN_EXEC_PERM);
- return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ if (ret)
+ return ret;
+ }
+ } else if (mask & MAY_READ) {
+ fsnotify_mask = FS_ACCESS_PERM;
+ }
+
+ return fsnotify_path(inode, path, fsnotify_mask);
}
/*
@@ -180,10 +196,8 @@ static inline void fsnotify_access(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
@@ -198,10 +212,8 @@ static inline void fsnotify_modify(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
@@ -215,9 +227,10 @@ static inline void fsnotify_open(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
+ if (file->f_flags & __FMODE_EXEC)
+ mask |= FS_OPEN_EXEC;
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ fsnotify_path(inode, path, mask);
}
/*
@@ -233,10 +246,8 @@ static inline void fsnotify_close(struct file *file)
if (S_ISDIR(inode->i_mode))
mask |= FS_ISDIR;
- if (!(file->f_mode & FMODE_NONOTIFY)) {
- fsnotify_parent(path, NULL, mask);
- fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
- }
+ if (!(file->f_mode & FMODE_NONOTIFY))
+ fsnotify_path(inode, path, mask);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 135b973e44d1..7639774e7475 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -38,6 +38,7 @@
#define FS_DELETE 0x00000200 /* Subfile was deleted */
#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+#define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
@@ -45,6 +46,7 @@
#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+#define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */
#define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
@@ -62,11 +64,13 @@
#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
- FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
+ FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
-#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM)
+#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
+ FS_OPEN_EXEC_PERM)
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \
@@ -74,7 +78,8 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \
FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
- FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME)
+ FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \
+ FS_OPEN_EXEC | FS_OPEN_EXEC_PERM)
/* Extra flags that may be reported with event or control handling of events */
#define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a397907e8d72..5c990e891d6a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -420,6 +420,9 @@ enum {
};
void arch_ftrace_update_code(int command);
+void arch_ftrace_update_trampoline(struct ftrace_ops *ops);
+void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec);
+void arch_ftrace_trampoline_free(struct ftrace_ops *ops);
struct ftrace_rec_iter;
@@ -777,8 +780,8 @@ struct ftrace_ret_stack {
extern void return_to_handler(void);
extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
- unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp);
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
unsigned long ret, unsigned long *retp);
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 821ae502d3d8..ccaef0097785 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -9,9 +9,6 @@ struct inode;
struct mm_struct;
struct task_struct;
-extern int
-handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi);
-
/*
* Futexes are matched on equal values of this key.
* The key type depends on whether it's a shared or private mapping.
@@ -55,11 +52,6 @@ extern void exit_robust_list(struct task_struct *curr);
long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
u32 __user *uaddr2, u32 val2, u32 val3);
-#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
-#define futex_cmpxchg_enabled 1
-#else
-extern int futex_cmpxchg_enabled;
-#endif
#else
static inline void exit_robust_list(struct task_struct *curr)
{
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 70fc838e6773..06c0fd594097 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -17,6 +17,7 @@
#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
#include <linux/blk_types.h>
+#include <asm/local.h>
#ifdef CONFIG_BLOCK
@@ -89,6 +90,7 @@ struct disk_stats {
unsigned long merges[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
+ local_t in_flight[2];
};
#define PARTITION_META_INFO_VOLNAMELTH 64
@@ -122,14 +124,13 @@ struct hd_struct {
int make_it_fail;
#endif
unsigned long stamp;
- atomic_t in_flight[2];
#ifdef CONFIG_SMP
struct disk_stats __percpu *dkstats;
#else
struct disk_stats dkstats;
#endif
struct percpu_ref ref;
- struct rcu_head rcu_head;
+ struct rcu_work rcu_work;
};
#define GENHD_FL_REMOVABLE 1
@@ -295,8 +296,11 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-#define __part_stat_add(cpu, part, field, addnd) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
+#define part_stat_get_cpu(part, field, cpu) \
+ (per_cpu_ptr((part)->dkstats, (cpu))->field)
+
+#define part_stat_get(part, field) \
+ part_stat_get_cpu(part, field, smp_processor_id())
#define part_stat_read(part, field) \
({ \
@@ -333,10 +337,9 @@ static inline void free_part_stats(struct hd_struct *part)
#define part_stat_lock() ({ rcu_read_lock(); 0; })
#define part_stat_unlock() rcu_read_unlock()
-#define __part_stat_add(cpu, part, field, addnd) \
- ((part)->dkstats.field += addnd)
-
-#define part_stat_read(part, field) ((part)->dkstats.field)
+#define part_stat_get(part, field) ((part)->dkstats.field)
+#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field)
+#define part_stat_read(part, field) part_stat_get(part, field)
static inline void part_stat_set_all(struct hd_struct *part, int value)
{
@@ -362,22 +365,33 @@ static inline void free_part_stats(struct hd_struct *part)
part_stat_read(part, field[STAT_WRITE]) + \
part_stat_read(part, field[STAT_DISCARD]))
-#define part_stat_add(cpu, part, field, addnd) do { \
- __part_stat_add((cpu), (part), field, addnd); \
+#define __part_stat_add(part, field, addnd) \
+ (part_stat_get(part, field) += (addnd))
+
+#define part_stat_add(part, field, addnd) do { \
+ __part_stat_add((part), field, addnd); \
if ((part)->partno) \
- __part_stat_add((cpu), &part_to_disk((part))->part0, \
+ __part_stat_add(&part_to_disk((part))->part0, \
field, addnd); \
} while (0)
-#define part_stat_dec(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, -1)
-#define part_stat_inc(cpu, gendiskp, field) \
- part_stat_add(cpu, gendiskp, field, 1)
-#define part_stat_sub(cpu, gendiskp, field, subnd) \
- part_stat_add(cpu, gendiskp, field, -subnd)
-
-void part_in_flight(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
+#define part_stat_dec(gendiskp, field) \
+ part_stat_add(gendiskp, field, -1)
+#define part_stat_inc(gendiskp, field) \
+ part_stat_add(gendiskp, field, 1)
+#define part_stat_sub(gendiskp, field, subnd) \
+ part_stat_add(gendiskp, field, -subnd)
+
+#define part_stat_local_dec(gendiskp, field) \
+ local_dec(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_inc(gendiskp, field) \
+ local_inc(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read(gendiskp, field) \
+ local_read(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read_cpu(gendiskp, field, cpu) \
+ local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
+
+unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part);
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
@@ -398,8 +412,7 @@ static inline void free_part_info(struct hd_struct *part)
kfree(part->info);
}
-/* block/blk-core.c */
-extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
+void update_io_ticks(struct hd_struct *part, unsigned long now);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 76f8db0b0e71..0705164f928c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -510,18 +510,22 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
}
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node);
+ int node, bool hugepage);
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+ alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
#else
#define alloc_pages(gfp_mask, order) \
alloc_pages_node(numa_node_id(), gfp_mask, order)
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+ alloc_pages(gfp_mask, order)
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
+ alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, node)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index f2f887795d43..8aebcf822082 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -104,6 +104,7 @@ struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags);
void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
+void devm_gpiod_unhinge(struct device *dev, struct gpio_desc *desc);
void devm_gpiod_put_array(struct device *dev, struct gpio_descs *descs);
int gpiod_get_direction(struct gpio_desc *desc);
@@ -172,6 +173,10 @@ int desc_to_gpio(const struct gpio_desc *desc);
struct device_node;
struct fwnode_handle;
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label);
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const char *propname, int index,
@@ -245,6 +250,15 @@ static inline void gpiod_put(struct gpio_desc *desc)
WARN_ON(1);
}
+static inline void devm_gpiod_unhinge(struct device *dev,
+ struct gpio_desc *desc)
+{
+ might_sleep();
+
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline void gpiod_put_array(struct gpio_descs *descs)
{
might_sleep();
@@ -518,6 +532,15 @@ struct device_node;
struct fwnode_handle;
static inline
+struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
+ const char *propname, int index,
+ enum gpiod_flags dflags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
struct device_node *node,
const char *propname, int index,
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 4f3febc0f971..d2bacf502429 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -163,6 +163,9 @@ struct hdmi_avi_infoframe {
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_avi_infoframe_check(struct hdmi_avi_infoframe *frame);
enum hdmi_spd_sdi {
HDMI_SPD_SDI_UNKNOWN,
@@ -194,6 +197,9 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
const char *vendor, const char *product);
ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
size_t size);
+ssize_t hdmi_spd_infoframe_pack_only(const struct hdmi_spd_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_spd_infoframe_check(struct hdmi_spd_infoframe *frame);
enum hdmi_audio_coding_type {
HDMI_AUDIO_CODING_TYPE_STREAM,
@@ -272,6 +278,9 @@ struct hdmi_audio_infoframe {
int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_audio_infoframe_pack_only(const struct hdmi_audio_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_audio_infoframe_check(struct hdmi_audio_infoframe *frame);
enum hdmi_3d_structure {
HDMI_3D_STRUCTURE_INVALID = -1,
@@ -299,6 +308,9 @@ struct hdmi_vendor_infoframe {
int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+ssize_t hdmi_vendor_infoframe_pack_only(const struct hdmi_vendor_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_vendor_infoframe_check(struct hdmi_vendor_infoframe *frame);
union hdmi_vendor_any_infoframe {
struct {
@@ -330,10 +342,14 @@ union hdmi_infoframe {
struct hdmi_audio_infoframe audio;
};
-ssize_t
-hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
-int hdmi_infoframe_unpack(union hdmi_infoframe *frame, void *buffer);
+ssize_t hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer,
+ size_t size);
+ssize_t hdmi_infoframe_pack_only(const union hdmi_infoframe *frame,
+ void *buffer, size_t size);
+int hdmi_infoframe_check(union hdmi_infoframe *frame);
+int hdmi_infoframe_unpack(union hdmi_infoframe *frame,
+ const void *buffer, size_t size);
void hdmi_infoframe_log(const char *level, struct device *dev,
- union hdmi_infoframe *frame);
+ const union hdmi_infoframe *frame);
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 331dc377c275..dc12f5c4b076 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
* @attr_usage_id: Attribute usage id as per spec
* @report_id: Report id to look for
* @flag: Synchronous or asynchronous read
+* @is_signed: If true then fields < 32 bits will be sign-extended
*
* Issues a synchronous or asynchronous read request for an input attribute.
* Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id,
- enum sensor_hub_read_flags flag
+ enum sensor_hub_read_flags flag,
+ bool is_signed
);
/**
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 3892e9c8b2de..2e8957eac4d4 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * include/linux/hrtimer.h
- *
* hrtimers - High-resolution kernel timers
*
* Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
@@ -9,8 +8,6 @@
* data type definitions, declarations, prototypes
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * For licencing details see kernel-base/COPYING
*/
#ifndef _LINUX_HRTIMER_H
#define _LINUX_HRTIMER_H
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b3e24368930a..14131b6fae68 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -905,6 +905,13 @@ struct vmbus_channel {
bool probe_done;
+ /*
+ * We must offload the handling of the primary/sub channels
+ * from the single-threaded vmbus_connection.work_queue to
+ * two different workqueue, otherwise we can block
+ * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+ */
+ struct work_struct add_channel_work;
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 000000000000..73b0982cc519
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT BIT(7)
+
+#define I3C_CCC_ID(id, broadcast) \
+ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR BIT(0)
+#define I3C_CCC_EVENT_MR BIT(1)
+#define I3C_CCC_EVENT_HJ BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+ u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+ __be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+ __be16 read_len;
+ u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ * describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ * address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+ u8 dyn_addr;
+ union {
+ u8 dcr;
+ u8 lvr;
+ };
+ u8 bcr;
+ u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ * current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+ u8 count;
+ struct i3c_ccc_dev_desc master;
+ struct i3c_ccc_dev_desc slaves[0];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+ I3C_CCC_EXIT_TEST_MODE,
+ I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+ u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+ u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+ u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+ u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+ u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \
+ (((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ * information).
+ */
+struct i3c_ccc_getstatus {
+ __be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+ u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+ u8 addr;
+ __be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+ u8 count;
+ struct i3c_ccc_bridged_slave_desc bslaves[0];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+ I3C_SDR0_FSCL_MAX,
+ I3C_SDR1_FSCL_8MHZ,
+ I3C_SDR2_FSCL_6MHZ,
+ I3C_SDR3_FSCL_4MHZ,
+ I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+ I3C_TSCO_8NS,
+ I3C_TSCO_9NS,
+ I3C_TSCO_10NS,
+ I3C_TSCO_11NS,
+ I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ * little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+ u8 maxwr;
+ u8 maxrd;
+ u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode) BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+ u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+ I3C_CCC_SETXTIME_ST = 0x7f,
+ I3C_CCC_SETXTIME_DT = 0xbf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+ I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+ I3C_CCC_SETXTIME_TPH = 0x3f,
+ I3C_CCC_SETXTIME_TU = 0x9f,
+ I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ * &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+ u8 subcmd;
+ u8 data[0];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+ u8 supported_modes;
+ u8 state;
+ u8 frequency;
+ u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+ u16 len;
+ void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ * broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+ u8 addr;
+ struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 000000000000..5ecb055fd375
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ */
+enum i3c_error_code {
+ I3C_ERROR_UNKNOWN = 0,
+ I3C_ERROR_M0 = 1,
+ I3C_ERROR_M1,
+ I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_hdr_mode - HDR mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ */
+enum i3c_hdr_mode {
+ I3C_HDR_DDR,
+ I3C_HDR_TSP,
+ I3C_HDR_TSL,
+};
+
+/**
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_priv_xfer {
+ u8 rnw;
+ u16 len;
+ union {
+ void *in;
+ const void *out;
+ } data;
+ enum i3c_error_code err;
+};
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE (0 << 6)
+#define I3C_BCR_I3C_MASTER (1 << 6)
+#define I3C_BCR_HDR_CAP BIT(5)
+#define I3C_BCR_BRIDGE BIT(4)
+#define I3C_BCR_OFFLINE_CAP BIT(3)
+#define I3C_BCR_IBI_PAYLOAD BIT(2)
+#define I3C_BCR_IBI_REQ_CAP BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisional ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+ u64 pid;
+ u8 bcr;
+ u8 dcr;
+ u8 static_addr;
+ u8 dyn_addr;
+ u8 hdr_cap;
+ u8 max_read_ds;
+ u8 max_write_ds;
+ u8 max_ibi_len;
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .data = _drvdata, \
+ }
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART | \
+ I3C_MATCH_EXTRA_INFO, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .extra_info = _info, \
+ .data = _drvdata, \
+ }
+
+#define I3C_CLASS(_dcr, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_DCR, \
+ .dcr = _dcr, \
+ }
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ * which device to bind to this driver
+ */
+struct i3c_driver {
+ struct device_driver driver;
+ int (*probe)(struct i3c_device *dev);
+ int (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
+};
+
+static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct i3c_driver, driver);
+}
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+ void *data)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+ struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv) \
+ i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv) \
+ module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ int ret;
+
+ ret = i2c_add_driver(i2cdrv);
+ if (ret || !IS_ENABLED(CONFIG_I3C))
+ return ret;
+
+ ret = i3c_driver_register(i3cdrv);
+ if (ret)
+ i2c_del_driver(i2cdrv);
+
+ return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ if (IS_ENABLED(CONFIG_I3C))
+ i3c_driver_unregister(i3cdrv);
+
+ i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ * driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
+ module_driver(__i3cdrv, \
+ i3c_i2c_driver_register, \
+ i3c_i2c_driver_unregister)
+
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+
+void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+ unsigned int len;
+ const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ * IBI appears to have a payload that is bigger than this
+ * number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ * the system never runs out of IBI slots, otherwise you'll lose
+ * IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ * in a workqueue context. It is allowed to sleep and send new
+ * messages on the bus, though it's recommended to keep the
+ * processing done there as fast as possible to avoid delaying
+ * processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 000000000000..f13fd8b1dd79
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR 0x2
+#define I3C_BROADCAST_ADDR 0x7e
+#define I3C_MAX_ADDR GENMASK(6, 0)
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i2c_device;
+struct i3c_device;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ * list
+ * @master: I3C master that instantiated this device. Will be used to do
+ * I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ * add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+ struct list_head node;
+ struct i3c_master_controller *master;
+ void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x) ((x) << 5)
+#define I3C_LVR_I2C_FM_MODE BIT(4)
+
+#define I2C_MAX_ADDR GENMASK(9, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+ struct list_head node;
+ struct i2c_board_info base;
+ u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @boardinfo: pointer to the boardinfo attached to this I2C device
+ * @dev: I2C device object registered to the I2C framework
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ const struct i2c_dev_boardinfo *boardinfo;
+ struct i2c_client *dev;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ * there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+ struct work_struct work;
+ struct i3c_dev_desc *dev;
+ unsigned int len;
+ void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ * processed. Used by i3c_device_disable_ibi() to wait for
+ * all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ * work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ * this value is specified when calling
+ * i3c_device_request_ibi() and should not change at run
+ * time. All messages IBIs exceeding this limit should be
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+ * as fast as possible to not stall processing of other IBIs queued
+ * on the same workqueue).
+ * New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+ struct completion all_ibis_handled;
+ atomic_t pending_ibis;
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ * guarantee that the device will end up using this address,
+ * but try our best to assign this specific address to the
+ * device
+ * @static_addr: static address the I3C device listen on before it's been
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+ struct list_head node;
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
+ struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ * should never be accessed from I3C master controller drivers. Only core
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ * every time the I3C device is rediscovered with a different dynamic
+ * address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+ struct device dev;
+ struct i3c_dev_desc *desc;
+ struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS 11
+
+#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
+#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
+#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_RATE 400000
+#define I3C_BUS_TLOW_OD_MIN_NS 200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ * expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ * the bus. The only impact in this mode is that the
+ * high SCL pulse has to stay below 50ns to trick I2C
+ * devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ * on the bus
+ */
+enum i3c_bus_mode {
+ I3C_BUS_MODE_PURE,
+ I3C_BUS_MODE_MIXED_FAST,
+ I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ *
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+ I3C_ADDR_SLOT_FREE,
+ I3C_ADDR_SLOT_RSVD,
+ I3C_ADDR_SLOT_I2C_DEV,
+ I3C_ADDR_SLOT_I3C_DEV,
+ I3C_ADDR_SLOT_STATUS_MASK = 3,
+};
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ * this can change over the time. Will be used to let a master
+ * know whether it needs to request bus ownership before sending
+ * a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ * ease the DAA (Dynamic Address Assignment) procedure (see
+ * &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ * transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ * operations that have an impact on the whole bus and the devices
+ * connected to it. For example, when asking slaves to drop their
+ * dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ * to send I3C frames to these devices.
+ * Note that this lock does not protect against concurrency between
+ * devices: several drivers can send different I3C/I2C frames through
+ * the same master in parallel. This is the responsibility of the
+ * master to guarantee that frames are actually sent sequentially and
+ * not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+ struct i3c_dev_desc *cur_master;
+ int id;
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ enum i3c_bus_mode mode;
+ struct {
+ unsigned long i3c;
+ unsigned long i2c;
+ } scl_rate;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ * least call master_set_info() from there and set the bus mode.
+ * You can also put controller specific initialization in there.
+ * This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ * &i3c_master_controller_ops->bus_init().
+ * This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ * can be after a DAA or when a device is statically declared
+ * by the FW, in which case it will only have a static address
+ * and the dynamic address will be 0.
+ * When this function is called, device information have not
+ * been retrieved yet.
+ * This is a good place to attach master controller specific
+ * data to I3C devices.
+ * This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ * changed. It can be because the device has been powered
+ * down and has lost its address, or it can happen when a
+ * device had a static address and has been assigned a
+ * dynamic address with SETDASA.
+ * This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ * should send an ENTDAA CCC command and then add all devices
+ * discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ * Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ * attached or re-attached to the controller.
+ * This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ * otherwise.
+ * This method is optional, if not provided the core assumes
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ * This is a good place to attach master controller specific
+ * data to I2C devices.
+ * This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ * transfers, the core does not guarantee that buffers attached to
+ * the transfers are DMA-safe. If drivers want to have DMA-safe
+ * buffers, they should use the i2c_get_dma_safe_msg_buf()
+ * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ * framework.
+ * This method is mandatory.
+ * @i2c_funcs: expose the supported I2C functionalities.
+ * This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ * an IBI handler and the constraints of the IBI (maximum payload
+ * length and number of pre-allocated slots).
+ * Some controllers support less IBI-capable devices than regular
+ * devices, so this method might return -%EBUSY if there's no
+ * more space for an extra IBI registration
+ * This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ * should have been disabled with ->disable_irq() prior to that
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ * prior to ->enable_ibi(). The controller should first enable
+ * the IBI on the controller end (for example, unmask the hardware
+ * IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ * to the I3C device.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ * flag set and then deactivate the hardware IRQ on the
+ * controller end.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ * processed by its handler. The IBI slot should be put back
+ * in the IBI slot pool so that the controller can re-use it
+ * for a future IBI
+ * This method is mandatory only if ->request_ibi is not
+ * NULL.
+ */
+struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*do_daa)(struct i3c_master_controller *master);
+ bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+ int (*i2c_xfers)(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers);
+ u32 (*i2c_funcs)(struct i3c_master_controller *master);
+ int (*request_ibi)(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_dev_desc *dev);
+ int (*enable_ibi)(struct i3c_dev_desc *dev);
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ * added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+ * be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+ struct device dev;
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } boardinfo;
+ struct i3c_bus bus;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+ const struct i2c_msg *xfers,
+ int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+int i3c_master_unregister(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ * descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ * device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ * descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+ return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c74b0321922a..e7d29ae633cd 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/ata.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
#include <linux/bitops.h>
@@ -50,6 +50,7 @@ struct ide_request {
struct scsi_request sreq;
u8 sense[SCSI_SENSE_BUFFERSIZE];
u8 type;
+ void *special;
};
static inline struct ide_request *ide_req(struct request *rq)
@@ -529,6 +530,10 @@ struct ide_drive_s {
struct request_queue *queue; /* request queue */
+ bool (*prep_rq)(struct ide_drive_s *, struct request *);
+
+ struct blk_mq_tag_set tag_set;
+
struct request *rq; /* current request */
void *driver_data; /* extra driver data */
u16 *id; /* identification info */
@@ -612,6 +617,10 @@ struct ide_drive_s {
bool sense_rq_armed;
struct request *sense_rq;
struct request_sense sense_data;
+
+ /* async sense insertion */
+ struct work_struct rq_work;
+ struct list_head rq_list;
};
typedef struct ide_drive_s ide_drive_t;
@@ -1089,6 +1098,7 @@ extern int ide_pci_clk;
int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
void ide_kill_rq(ide_drive_t *, struct request *);
+void ide_insert_request_head(ide_drive_t *, struct request *);
void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
@@ -1208,7 +1218,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id);
-extern void do_ide_request(struct request_queue *);
+extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0ef67f837ae1..3b04e72315e1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -812,6 +812,8 @@ enum mesh_config_capab_flags {
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL = 0x40,
};
+#define IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE 0x1
+
/**
* mesh channel switch parameters element's flag indicator
*
@@ -1617,7 +1619,7 @@ struct ieee80211_he_mcs_nss_supp {
* struct ieee80211_he_operation - HE capabilities element
*
* This structure is the "HE operation element" fields as
- * described in P802.11ax_D2.0 section 9.4.2.238
+ * described in P802.11ax_D3.0 section 9.4.2.238
*/
struct ieee80211_he_operation {
__le32 he_oper_params;
@@ -2009,17 +2011,17 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
}
/* HE Operation defines */
-#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
-#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
-#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
-#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
-#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x00100000
-#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00200000
-#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
-#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
-#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
+#define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000
+#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000
/*
* ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
@@ -2044,7 +2046,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
he_oper_params = le32_to_cpu(he_oper->he_oper_params);
if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
oper_len += 3;
- if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+ if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS)
oper_len++;
/* Add the first byte (extension ID) to the total length */
@@ -2685,6 +2687,10 @@ enum ieee80211_tdls_actioncode {
*/
#define WLAN_EXT_CAPA9_FTM_INITIATOR BIT(7)
+/* Defines support for TWT Requester and TWT Responder */
+#define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5)
+#define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6)
+
/* TDLS specific payload type in the LLC/SNAP header */
#define WLAN_TDLS_SNAP_RFTYPE 0x2
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c20c7e197d07..627b788ba0ff 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -119,6 +119,8 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct net_device *br_fdb_find_port(const struct net_device *br_dev,
const unsigned char *addr,
__u16 vid);
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid);
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag);
#else
static inline struct net_device *
br_fdb_find_port(const struct net_device *br_dev,
@@ -127,6 +129,16 @@ br_fdb_find_port(const struct net_device *br_dev,
{
return NULL;
}
+
+static inline void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+}
+
+static inline bool
+br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+ return false;
+}
#endif
#endif
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 83ea4df6ab81..4cca4da7a6de 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -65,8 +65,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
#define VLAN_PRIO_SHIFT 13
-#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */
-#define VLAN_TAG_PRESENT VLAN_CFI_MASK
+#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */
#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
#define VLAN_N_VID 4096
@@ -78,10 +77,11 @@ static inline bool is_vlan_dev(const struct net_device *dev)
return dev->priv_flags & IFF_802_1Q_VLAN;
}
-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
-#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
+#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present)
+#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci)
#define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
-#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK)
+#define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK))
+#define skb_vlan_tag_get_prio(__skb) (((__skb)->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT)
static inline int vlan_get_rx_ctag_filter_info(struct net_device *dev)
{
@@ -133,6 +133,9 @@ struct vlan_pcpu_stats {
extern struct net_device *__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id);
+extern int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid,
+ void *arg), void *arg);
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -236,6 +239,14 @@ __vlan_find_dev_deep_rcu(struct net_device *real_dev,
return NULL;
}
+static inline int
+vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ return 0;
+}
+
static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
BUG();
@@ -461,6 +472,31 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
return skb;
}
+/**
+ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info
+ * @skb: skbuff to clear
+ *
+ * Clears the VLAN information from @skb
+ */
+static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb)
+{
+ skb->vlan_present = 0;
+}
+
+/**
+ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb
+ * @dst: skbuff to copy to
+ * @src: skbuff to copy from
+ *
+ * Copies VLAN information from @src to @dst (for branchless code)
+ */
+static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src)
+{
+ dst->vlan_present = src->vlan_present;
+ dst->vlan_proto = src->vlan_proto;
+ dst->vlan_tci = src->vlan_tci;
+}
+
/*
* __vlan_hwaccel_push_inside - pushes vlan tag to the payload
* @skb: skbuff to tag
@@ -475,7 +511,7 @@ static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
skb_vlan_tag_get(skb));
if (likely(skb))
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
return skb;
}
@@ -491,7 +527,8 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
__be16 vlan_proto, u16 vlan_tci)
{
skb->vlan_proto = vlan_proto;
- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
+ skb->vlan_tci = vlan_tci;
+ skb->vlan_present = 1;
}
/**
@@ -531,8 +568,6 @@ static inline int __vlan_hwaccel_get_tag(const struct sk_buff *skb,
}
}
-#define HAVE_VLAN_GET_TAG
-
/**
* vlan_get_tag - get the VLAN ID from the skb
* @skb: skbuff to query
diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
new file mode 100644
index 000000000000..00d7e8e919c6
--- /dev/null
+++ b/include/linux/indirect_call_wrapper.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_INDIRECT_CALL_WRAPPER_H
+#define _LINUX_INDIRECT_CALL_WRAPPER_H
+
+#ifdef CONFIG_RETPOLINE
+
+/*
+ * INDIRECT_CALL_$NR - wrapper for indirect calls with $NR known builtin
+ * @f: function pointer
+ * @f$NR: builtin functions names, up to $NR of them
+ * @__VA_ARGS__: arguments for @f
+ *
+ * Avoid retpoline overhead for known builtin, checking @f vs each of them and
+ * eventually invoking directly the builtin function. The functions are check
+ * in the given order. Fallback to the indirect call.
+ */
+#define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+ likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
+ })
+#define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+ likely(f == f2) ? f2(__VA_ARGS__) : \
+ INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
+ })
+
+#define INDIRECT_CALLABLE_DECLARE(f) f
+#define INDIRECT_CALLABLE_SCOPE
+
+#else
+#define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALLABLE_DECLARE(f)
+#define INDIRECT_CALLABLE_SCOPE static
+#endif
+
+/*
+ * We can use INDIRECT_CALL_$NR for ipv6 related functions only if ipv6 is
+ * builtin, this macro simplify dealing with indirect calls with only ipv4/ipv6
+ * alternatives
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) \
+ INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#elif IS_ENABLED(CONFIG_INET)
+#define INDIRECT_CALL_INET(f, f2, f1, ...) INDIRECT_CALL_1(f, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_INET(f, f2, f1, ...) f(__VA_ARGS__)
+#endif
+
+#endif
diff --git a/include/linux/init.h b/include/linux/init.h
index 9c2aba1dbabf..5255069f5a9f 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -146,7 +146,6 @@ extern unsigned int reset_devices;
/* used by init/main.c */
void setup_arch(char **);
void prepare_namespace(void);
-void __init load_default_modules(void);
int __init init_rootfs(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 1d6711c28271..c672f34235e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -247,10 +247,23 @@ struct irq_affinity_notify {
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
+ * @nr_sets: Length of passed in *sets array
+ * @sets: Number of affinitized sets
*/
struct irq_affinity {
int pre_vectors;
int post_vectors;
+ int nr_sets;
+ int *sets;
+};
+
+/**
+ * struct irq_affinity_desc - Interrupt affinity descriptor
+ * @mask: cpumask to hold the affinity assignment
+ */
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
@@ -299,7 +312,9 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+struct irq_affinity_desc *
+irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
+
int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -333,7 +348,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
return 0;
}
-static inline struct cpumask *
+static inline struct irq_affinity_desc *
irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
{
return NULL;
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
index 9e30ed6443db..e9bfe6972aed 100644
--- a/include/linux/ioprio.h
+++ b/include/linux/ioprio.h
@@ -71,6 +71,19 @@ static inline int task_nice_ioclass(struct task_struct *task)
}
/*
+ * If the calling process has set an I/O priority, use that. Otherwise, return
+ * the default I/O priority.
+ */
+static inline int get_current_ioprio(void)
+{
+ struct io_context *ioc = current->io_context;
+
+ if (ioc)
+ return ioc->ioprio;
+ return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
+}
+
+/*
* For inheritance, return the highest of the two given priorities
*/
extern int ioprio_best(unsigned short aprio, unsigned short bprio);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index c9bffda04a45..def2b2aac8b1 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -27,6 +27,7 @@
struct seq_file;
struct module;
struct msi_msg;
+struct irq_affinity_desc;
enum irqchip_irq_state;
/*
@@ -834,11 +835,12 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity);
+ struct module *owner,
+ const struct irq_affinity_desc *affinity);
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
/* use macros to avoid needing export.h for THIS_MODULE */
#define irq_alloc_descs(irq, from, cnt, node) \
diff --git a/include/linux/irq_sim.h b/include/linux/irq_sim.h
index 630a57e55db6..4500d453a63e 100644
--- a/include/linux/irq_sim.h
+++ b/include/linux/irq_sim.h
@@ -16,7 +16,7 @@
struct irq_sim_work_ctx {
struct irq_work work;
- int irq;
+ unsigned long *pending;
};
struct irq_sim_irq_ctx {
diff --git a/include/linux/irqchip.h b/include/linux/irqchip.h
index 89c34b200671..950e4b2458f0 100644
--- a/include/linux/irqchip.h
+++ b/include/linux/irqchip.h
@@ -19,7 +19,7 @@
* the association between their DT compatible string and their
* initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_DECLARE of the
* same file.
* @compstr: compatible string of the irqchip driver
* @fn: initialization function
@@ -30,7 +30,7 @@
* This macro must be used by the different irqchip drivers to declare
* the association between their version and their initialization function.
*
- * @name: name that must be unique accross all IRQCHIP_ACPI_DECLARE of the
+ * @name: name that must be unique across all IRQCHIP_ACPI_DECLARE of the
* same file.
* @subtable: Subtable to be identified in MADT
* @validate: Function to be called on that subtable to check its validity.
diff --git a/include/linux/irqchip/irq-madera.h b/include/linux/irqchip/irq-madera.h
new file mode 100644
index 000000000000..1160fa3769ae
--- /dev/null
+++ b/include/linux/irqchip/irq-madera.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interrupt support for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2016-2018 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef IRQCHIP_MADERA_H
+#define IRQCHIP_MADERA_H
+
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/core.h>
+
+#define MADERA_IRQ_FLL1_LOCK 0
+#define MADERA_IRQ_FLL2_LOCK 1
+#define MADERA_IRQ_FLL3_LOCK 2
+#define MADERA_IRQ_FLLAO_LOCK 3
+#define MADERA_IRQ_CLK_SYS_ERR 4
+#define MADERA_IRQ_CLK_ASYNC_ERR 5
+#define MADERA_IRQ_CLK_DSP_ERR 6
+#define MADERA_IRQ_HPDET 7
+#define MADERA_IRQ_MICDET1 8
+#define MADERA_IRQ_MICDET2 9
+#define MADERA_IRQ_JD1_RISE 10
+#define MADERA_IRQ_JD1_FALL 11
+#define MADERA_IRQ_JD2_RISE 12
+#define MADERA_IRQ_JD2_FALL 13
+#define MADERA_IRQ_MICD_CLAMP_RISE 14
+#define MADERA_IRQ_MICD_CLAMP_FALL 15
+#define MADERA_IRQ_DRC2_SIG_DET 16
+#define MADERA_IRQ_DRC1_SIG_DET 17
+#define MADERA_IRQ_ASRC1_IN1_LOCK 18
+#define MADERA_IRQ_ASRC1_IN2_LOCK 19
+#define MADERA_IRQ_ASRC2_IN1_LOCK 20
+#define MADERA_IRQ_ASRC2_IN2_LOCK 21
+#define MADERA_IRQ_DSP_IRQ1 22
+#define MADERA_IRQ_DSP_IRQ2 23
+#define MADERA_IRQ_DSP_IRQ3 24
+#define MADERA_IRQ_DSP_IRQ4 25
+#define MADERA_IRQ_DSP_IRQ5 26
+#define MADERA_IRQ_DSP_IRQ6 27
+#define MADERA_IRQ_DSP_IRQ7 28
+#define MADERA_IRQ_DSP_IRQ8 29
+#define MADERA_IRQ_DSP_IRQ9 30
+#define MADERA_IRQ_DSP_IRQ10 31
+#define MADERA_IRQ_DSP_IRQ11 32
+#define MADERA_IRQ_DSP_IRQ12 33
+#define MADERA_IRQ_DSP_IRQ13 34
+#define MADERA_IRQ_DSP_IRQ14 35
+#define MADERA_IRQ_DSP_IRQ15 36
+#define MADERA_IRQ_DSP_IRQ16 37
+#define MADERA_IRQ_HP1L_SC 38
+#define MADERA_IRQ_HP1R_SC 39
+#define MADERA_IRQ_HP2L_SC 40
+#define MADERA_IRQ_HP2R_SC 41
+#define MADERA_IRQ_HP3L_SC 42
+#define MADERA_IRQ_HP3R_SC 43
+#define MADERA_IRQ_SPKOUTL_SC 44
+#define MADERA_IRQ_SPKOUTR_SC 45
+#define MADERA_IRQ_HP1L_ENABLE_DONE 46
+#define MADERA_IRQ_HP1R_ENABLE_DONE 47
+#define MADERA_IRQ_HP2L_ENABLE_DONE 48
+#define MADERA_IRQ_HP2R_ENABLE_DONE 49
+#define MADERA_IRQ_HP3L_ENABLE_DONE 50
+#define MADERA_IRQ_HP3R_ENABLE_DONE 51
+#define MADERA_IRQ_SPKOUTL_ENABLE_DONE 52
+#define MADERA_IRQ_SPKOUTR_ENABLE_DONE 53
+#define MADERA_IRQ_SPK_SHUTDOWN 54
+#define MADERA_IRQ_SPK_OVERHEAT 55
+#define MADERA_IRQ_SPK_OVERHEAT_WARN 56
+#define MADERA_IRQ_GPIO1 57
+#define MADERA_IRQ_GPIO2 58
+#define MADERA_IRQ_GPIO3 59
+#define MADERA_IRQ_GPIO4 60
+#define MADERA_IRQ_GPIO5 61
+#define MADERA_IRQ_GPIO6 62
+#define MADERA_IRQ_GPIO7 63
+#define MADERA_IRQ_GPIO8 64
+#define MADERA_IRQ_DSP1_BUS_ERR 65
+#define MADERA_IRQ_DSP2_BUS_ERR 66
+#define MADERA_IRQ_DSP3_BUS_ERR 67
+#define MADERA_IRQ_DSP4_BUS_ERR 68
+#define MADERA_IRQ_DSP5_BUS_ERR 69
+#define MADERA_IRQ_DSP6_BUS_ERR 70
+#define MADERA_IRQ_DSP7_BUS_ERR 71
+
+#define MADERA_NUM_IRQ 72
+
+/*
+ * These wrapper functions are for use by other child drivers of the
+ * same parent MFD.
+ */
+static inline int madera_get_irq_mapping(struct madera *madera, int irq)
+{
+ if (!madera->irq_dev)
+ return -ENODEV;
+
+ return regmap_irq_get_virq(madera->irq_data, irq);
+}
+
+static inline int madera_request_irq(struct madera *madera, int irq,
+ const char *name,
+ irq_handler_t handler, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, name,
+ data);
+}
+
+static inline void madera_free_irq(struct madera *madera, int irq, void *data)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+static inline int madera_set_irq_wake(struct madera *madera, int irq, int on)
+{
+ irq = madera_get_irq_mapping(madera, irq);
+ if (irq < 0)
+ return irq;
+
+ return irq_set_irq_wake(irq, on);
+}
+
+#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 068aa46f0d55..35965f41d7be 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -43,6 +43,7 @@ struct irq_chip;
struct irq_data;
struct cpumask;
struct seq_file;
+struct irq_affinity_desc;
/* Number of irqs reserved for a legacy isa controller */
#define NUM_ISA_INTERRUPTS 16
@@ -266,7 +267,7 @@ extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
static inline struct fwnode_handle *of_node_to_fwnode(struct device_node *node)
{
@@ -449,7 +450,8 @@ static inline struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *par
extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity);
+ bool realloc,
+ const struct irq_affinity_desc *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index b708e5169d1d..0f919d5fe84f 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -575,6 +575,7 @@ struct transaction_s
enum {
T_RUNNING,
T_LOCKED,
+ T_SWITCH,
T_FLUSH,
T_COMMIT,
T_COMMIT_DFLUSH,
@@ -662,13 +663,13 @@ struct transaction_s
/*
* Number of outstanding updates running on this transaction
- * [t_handle_lock]
+ * [none]
*/
atomic_t t_updates;
/*
* Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [t_handle_lock]
+ * handle but not yet modified. [none]
*/
atomic_t t_outstanding_credits;
@@ -690,7 +691,7 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [t_handle_lock]
+ * How many handles used this transaction? [none]
*/
atomic_t t_handle_count;
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 9e4e638fb505..b9b1bc5f9669 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -143,6 +143,15 @@ extern const struct kexec_file_ops * const kexec_file_loaders[];
int kexec_image_probe_default(struct kimage *image, void *buf,
unsigned long buf_len);
+int kexec_image_post_load_cleanup_default(struct kimage *image);
+
+/*
+ * If kexec_buf.mem is set to this value, kexec_locate_mem_hole()
+ * will try to allocate free memory. Arch may overwrite it.
+ */
+#ifndef KEXEC_BUF_MEM_UNKNOWN
+#define KEXEC_BUF_MEM_UNKNOWN 0
+#endif
/**
* struct kexec_buf - parameters for finding a place for a buffer in memory
@@ -174,6 +183,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
bool get_value);
void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name);
+void * __weak arch_kexec_kernel_image_load(struct kimage *image);
int __weak arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
@@ -183,8 +193,6 @@ int __weak arch_kexec_apply_relocations(struct purgatory_info *pi,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab);
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *));
extern int kexec_add_buffer(struct kexec_buf *kbuf);
int kexec_locate_mem_hole(struct kexec_buf *kbuf);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e909413e4e38..e07e91daaacc 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -242,10 +242,13 @@ extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern int arch_populate_kprobe_blacklist(void);
extern bool arch_kprobe_on_func_entry(unsigned long offset);
extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
+extern int kprobe_add_ksym_blacklist(unsigned long entry);
+extern int kprobe_add_area_blacklist(unsigned long start, unsigned long end);
struct kprobe_insn_cache {
struct mutex mutex;
@@ -379,6 +382,9 @@ int enable_kprobe(struct kprobe *kp);
void dump_kprobe(struct kprobe *kp);
+void *alloc_insn_page(void);
+void free_insn_page(void *page);
+
#else /* !CONFIG_KPROBES: */
static inline int kprobes_built_in(void)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c926698040e0..c38cc5eb7e73 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -449,6 +449,7 @@ struct kvm {
#endif
long tlbs_dirty;
struct list_head devices;
+ bool manual_dirty_log_protect;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
struct srcu_struct srcu;
@@ -694,7 +695,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
@@ -753,7 +755,9 @@ int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty);
int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty);
+ struct kvm_dirty_log *log, bool *flush);
+int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log, bool *flush);
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -762,9 +766,13 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log);
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log);
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status);
+int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap);
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 7393a316d9fa..5263f87e1d2c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -51,6 +51,7 @@ struct led_classdev {
#define LED_PANIC_INDICATOR BIT(20)
#define LED_BRIGHT_HW_CHANGED BIT(21)
#define LED_RETAIN_AT_SHUTDOWN BIT(22)
+#define LED_INIT_DEFAULT_TRIGGER BIT(23)
/* set_brightness_work / blink_timer flags, atomic, private. */
unsigned long work_flags;
@@ -487,4 +488,24 @@ struct led_pattern {
int brightness;
};
+enum led_audio {
+ LED_AUDIO_MUTE, /* master mute LED */
+ LED_AUDIO_MICMUTE, /* mic mute LED */
+ NUM_AUDIO_LEDS
+};
+
+#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO)
+enum led_brightness ledtrig_audio_get(enum led_audio type);
+void ledtrig_audio_set(enum led_audio type, enum led_brightness state);
+#else
+static inline enum led_brightness ledtrig_audio_get(enum led_audio type)
+{
+ return LED_OFF;
+}
+static inline void ledtrig_audio_set(enum led_audio type,
+ enum led_brightness state)
+{
+}
+#endif
+
#endif /* __LINUX_LEDS_H_INCLUDED */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2fdeac1a420d..5d865a5d5cdc 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -90,7 +90,7 @@ typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
struct nvm_chk_meta *);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
dma_addr_t *);
@@ -357,6 +357,7 @@ struct nvm_geo {
u32 clba; /* sectors per chunk */
u16 csecs; /* sector size */
u16 sos; /* out-of-band area size */
+ bool ext; /* metadata in extended data buffer */
/* device write constrains */
u32 ws_min; /* minimum write size */
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 7c47b1a471d4..7e020782ade2 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -79,6 +79,12 @@
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
+#ifndef GLOBAL
+#define GLOBAL(name) \
+ .globl name ASM_NL \
+ name:
+#endif
+
#ifndef ENTRY
#define ENTRY(name) \
.globl name ASM_NL \
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index 22443d7fb5cd..a99c58866860 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -57,6 +57,15 @@ static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
__clear_bit(nr, addr);
}
+static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr,
+ int set)
+{
+ if (set)
+ linkmode_set_bit(nr, addr);
+ else
+ linkmode_clear_bit(nr, addr);
+}
+
static inline void linkmode_change_bit(int nr, volatile unsigned long *addr)
{
__change_bit(nr, addr);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1fd82ff99c65..c5335df2372f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -97,8 +97,6 @@ struct lock_class {
* Generation counter, when doing certain classes of graph walking,
* to ensure that we check one node only once:
*/
- unsigned int version;
-
int name_version;
const char *name;
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index bac395f1d00a..5228c62af416 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -139,8 +139,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
unsigned long addr);
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 517e60eecbcb..1293695245df 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -35,7 +35,7 @@ enum axp20x_variants {
#define AXP152_ALDO_OP_MODE 0x13
#define AXP152_LDO0_CTRL 0x15
#define AXP152_DCDC2_V_OUT 0x23
-#define AXP152_DCDC2_V_SCAL 0x25
+#define AXP152_DCDC2_V_RAMP 0x25
#define AXP152_DCDC1_V_OUT 0x26
#define AXP152_DCDC3_V_OUT 0x27
#define AXP152_ALDO12_V_OUT 0x28
@@ -53,7 +53,7 @@ enum axp20x_variants {
#define AXP20X_USB_OTG_STATUS 0x02
#define AXP20X_PWR_OUT_CTRL 0x12
#define AXP20X_DCDC2_V_OUT 0x23
-#define AXP20X_DCDC2_LDO3_V_SCAL 0x25
+#define AXP20X_DCDC2_LDO3_V_RAMP 0x25
#define AXP20X_DCDC3_V_OUT 0x27
#define AXP20X_LDO24_V_OUT 0x28
#define AXP20X_LDO3_V_OUT 0x29
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index b19c370fe81a..f346167c0e00 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -20,9 +20,6 @@
#define WM8994_NUM_AIF 3
struct wm8994_ldo_pdata {
- /** GPIOs to enable regulator, 0 or less if not available */
- int enable;
-
const struct regulator_init_data *init_data;
};
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 2da85b02e1c0..6fee8b1a4400 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -209,7 +209,7 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv)
/**
* linkmode_adv_to_mii_ctrl1000_t
- * advertising: the linkmode advertisement settings
+ * @advertising: the linkmode advertisement settings
*
* A small helper function that translates linkmode advertisement
* settings to phy autonegotiation advertisements for the
@@ -288,6 +288,25 @@ static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa)
}
/**
+ * mii_stat1000_mod_linkmode_lpa_t
+ * @advertising: target the linkmode advertisement settings
+ * @adv: value of the MII_STAT1000 register
+ *
+ * A small helper function that translates MII_STAT1000 bits, when in
+ * 1000Base-T mode, to linkmode advertisement settings. Other bits in
+ * advertising are not changes.
+ */
+static inline void mii_stat1000_mod_linkmode_lpa_t(unsigned long *advertising,
+ u32 lpa)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising, lpa & LPA_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising, lpa & LPA_1000FULL);
+}
+
+/**
* ethtool_adv_to_mii_adv_x
* @ethadv: the ethtool advertisement settings
*
@@ -354,50 +373,104 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_adv_mod_linkmode_adv_t
+ * @advertising:pointer to destination link mode.
+ * @adv: value of the MII_ADVERTISE register
+ *
+ * A small helper function that translates MII_ADVERTISE bits to
+ * linkmode advertisement settings. Leaves other bits unchanged.
+ */
+static inline void mii_adv_mod_linkmode_adv_t(unsigned long *advertising,
+ u32 adv)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising, adv & ADVERTISE_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising, adv & ADVERTISE_10FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising, adv & ADVERTISE_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising, adv & ADVERTISE_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising,
+ adv & ADVERTISE_PAUSE_CAP);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ advertising, adv & ADVERTISE_PAUSE_ASYM);
+}
+
+/**
* mii_adv_to_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
*
* A small helper function that translates MII_ADVERTISE bits
- * to linkmode advertisement settings.
+ * to linkmode advertisement settings. Clears the old value
+ * of advertising.
*/
static inline void mii_adv_to_linkmode_adv_t(unsigned long *advertising,
u32 adv)
{
linkmode_zero(advertising);
- if (adv & ADVERTISE_10HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- advertising);
- if (adv & ADVERTISE_10FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- advertising);
- if (adv & ADVERTISE_100HALF)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- advertising);
- if (adv & ADVERTISE_100FULL)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- advertising);
- if (adv & ADVERTISE_PAUSE_CAP)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
- if (adv & ADVERTISE_PAUSE_ASYM)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+ mii_adv_mod_linkmode_adv_t(advertising, adv);
+}
+
+/**
+ * mii_lpa_to_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Clears the
+ * old value of advertising
+ */
+static inline void mii_lpa_to_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_to_linkmode_adv_t(lp_advertising, lpa);
+
+ if (lpa & LPA_LPACK)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising);
+
+}
+
+/**
+ * mii_lpa_mod_linkmode_lpa_t
+ * @adv: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits, when in
+ * 1000Base-T mode, to linkmode LP advertisement settings. Leaves
+ * other bits unchanged.
+ */
+static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ mii_adv_mod_linkmode_adv_t(lp_advertising, lpa);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lp_advertising, lpa & LPA_LPACK);
}
/**
- * ethtool_adv_to_lcl_adv_t
- * @advertising:pointer to ethtool advertising
+ * linkmode_adv_to_lcl_adv_t
+ * @advertising:pointer to linkmode advertising
*
- * A small helper function that translates ethtool advertising to LVL
+ * A small helper function that translates linkmode advertising to LVL
* pause capabilities.
*/
-static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
+static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
{
u32 lcl_adv = 0;
- if (advertising & ADVERTISED_Pause)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (advertising & ADVERTISED_Asym_Pause)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising))
lcl_adv |= ADVERTISE_PAUSE_ASYM;
return lcl_adv;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index dca6ab4eaa99..36e412c3d657 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -226,6 +226,7 @@ enum {
MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT = 1ULL << 37,
MLX4_DEV_CAP_FLAG2_USER_MAC_EN = 1ULL << 38,
MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW = 1ULL << 39,
+ MLX4_DEV_CAP_FLAG2_SW_CQ_INIT = 1ULL << 40,
};
enum {
@@ -1136,7 +1137,8 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
- unsigned vector, int collapsed, int timestamp_en);
+ unsigned int vector, int collapsed, int timestamp_en,
+ void *buf_addr, bool user_cq);
void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
int *base, u8 flags, u8 usage);
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 31a750570c38..612c8c2f2466 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -60,7 +60,7 @@ struct mlx5_core_cq {
} tasklet_ctx;
int reset_notify_added;
struct list_head reset_notify;
- struct mlx5_eq *eq;
+ struct mlx5_eq_comp *eq;
u16 uid;
};
@@ -125,9 +125,9 @@ struct mlx5_cq_modify_params {
};
enum {
- CQE_SIZE_64 = 0,
- CQE_SIZE_128 = 1,
- CQE_SIZE_128_PAD = 2,
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
};
#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
@@ -135,8 +135,8 @@ enum {
static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{
- return padding_128_en ? CQE_SIZE_128_PAD :
- size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+ return padding_128_en ? CQE_STRIDE_128_PAD :
+ size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
}
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index b4c0457fbebd..8c4a820bd4c1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -212,6 +212,13 @@ enum {
MLX5_PFAULT_SUBTYPE_RDMA = 1,
};
+enum wqe_page_fault_type {
+ MLX5_WQE_PF_TYPE_RMP = 0,
+ MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
+ MLX5_WQE_PF_TYPE_RESP = 2,
+ MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
+};
+
enum {
MLX5_PERM_LOCAL_READ = 1 << 2,
MLX5_PERM_LOCAL_WRITE = 1 << 3,
@@ -294,9 +301,15 @@ enum {
MLX5_EVENT_QUEUE_TYPE_DCT = 6,
};
+/* mlx5 components can subscribe to any one of these events via
+ * mlx5_eq_notifier_register API.
+ */
enum mlx5_event {
+ /* Special value to subscribe to any event */
+ MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
+ /* HW events enum start: comp events are not subscribable */
MLX5_EVENT_TYPE_COMP = 0x0,
-
+ /* HW Async events enum start: subscribable events */
MLX5_EVENT_TYPE_PATH_MIG = 0x01,
MLX5_EVENT_TYPE_COMM_EST = 0x02,
MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
@@ -317,6 +330,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
+ MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
@@ -334,6 +348,8 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+
+ MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1,
};
enum {
@@ -405,6 +421,7 @@ enum {
MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
MLX5_OPCODE_BIND_MW = 0x18,
MLX5_OPCODE_CONFIG_CMD = 0x1f,
+ MLX5_OPCODE_ENHANCED_MPSW = 0x29,
MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
MLX5_RECV_OPCODE_SEND = 0x01,
@@ -766,6 +783,11 @@ static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
return (cqe->op_own >> 2) & 0x3;
}
+static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
+{
+ return cqe->op_own >> 4;
+}
+
static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
{
return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index aa5963b5d38e..4d16ba04790e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -46,10 +46,11 @@
#include <linux/mempool.h>
#include <linux/interrupt.h>
#include <linux/idr.h>
+#include <linux/notifier.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/srq.h>
+#include <linux/mlx5/eq.h>
#include <linux/timecounter.h>
#include <linux/ptp_clock_kernel.h>
@@ -85,18 +86,6 @@ enum {
};
enum {
- MLX5_EQ_VEC_PAGES = 0,
- MLX5_EQ_VEC_CMD = 1,
- MLX5_EQ_VEC_ASYNC = 2,
- MLX5_EQ_VEC_PFAULT = 3,
- MLX5_EQ_VEC_COMP_BASE,
-};
-
-enum {
- MLX5_MAX_IRQ_NAME = 32
-};
-
-enum {
MLX5_ATOMIC_MODE_OFFSET = 16,
MLX5_ATOMIC_MODE_IB_COMP = 1,
MLX5_ATOMIC_MODE_CX = 2,
@@ -205,16 +194,7 @@ struct mlx5_rsc_debug {
};
enum mlx5_dev_event {
- MLX5_DEV_EVENT_SYS_ERROR,
- MLX5_DEV_EVENT_PORT_UP,
- MLX5_DEV_EVENT_PORT_DOWN,
- MLX5_DEV_EVENT_PORT_INITIALIZED,
- MLX5_DEV_EVENT_LID_CHANGE,
- MLX5_DEV_EVENT_PKEY_CHANGE,
- MLX5_DEV_EVENT_GUID_CHANGE,
- MLX5_DEV_EVENT_CLIENT_REREG,
- MLX5_DEV_EVENT_PPS,
- MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT,
+ MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
};
enum mlx5_port_status {
@@ -222,14 +202,6 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
-enum mlx5_eq_type {
- MLX5_EQ_TYPE_COMP,
- MLX5_EQ_TYPE_ASYNC,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- MLX5_EQ_TYPE_PF,
-#endif
-};
-
struct mlx5_bfreg_info {
u32 *sys_pages;
int num_low_latency_bfregs;
@@ -297,6 +269,8 @@ struct mlx5_cmd_stats {
};
struct mlx5_cmd {
+ struct mlx5_nb nb;
+
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
@@ -366,51 +340,6 @@ struct mlx5_frag_buf_ctrl {
u8 log_frag_strides;
};
-struct mlx5_eq_tasklet {
- struct list_head list;
- struct list_head process_list;
- struct tasklet_struct task;
- /* lock on completion tasklet list */
- spinlock_t lock;
-};
-
-struct mlx5_eq_pagefault {
- struct work_struct work;
- /* Pagefaults lock */
- spinlock_t lock;
- struct workqueue_struct *wq;
- mempool_t *pool;
-};
-
-struct mlx5_cq_table {
- /* protect radix tree */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
-
-struct mlx5_eq {
- struct mlx5_core_dev *dev;
- struct mlx5_cq_table cq_table;
- __be32 __iomem *doorbell;
- u32 cons_index;
- struct mlx5_frag_buf buf;
- int size;
- unsigned int irqn;
- u8 eqn;
- int nent;
- u64 mask;
- struct list_head list;
- int index;
- struct mlx5_rsc_debug *dbg;
- enum mlx5_eq_type type;
- union {
- struct mlx5_eq_tasklet tasklet_ctx;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq_pagefault pf_ctx;
-#endif
- };
-};
-
struct mlx5_core_psv {
u32 psv_idx;
struct psv_layout {
@@ -463,36 +392,6 @@ struct mlx5_core_rsc_common {
struct completion free;
};
-struct mlx5_core_srq {
- struct mlx5_core_rsc_common common; /* must be first */
- u32 srqn;
- int max;
- size_t max_gs;
- size_t max_avail_gather;
- int wqe_shift;
- void (*event) (struct mlx5_core_srq *, enum mlx5_event);
-
- atomic_t refcount;
- struct completion free;
- u16 uid;
-};
-
-struct mlx5_eq_table {
- void __iomem *update_ci;
- void __iomem *update_arm_ci;
- struct list_head comp_eqs_list;
- struct mlx5_eq pages_eq;
- struct mlx5_eq async_eq;
- struct mlx5_eq cmd_eq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct mlx5_eq pfault_eq;
-#endif
- int num_comp_vectors;
- /* protect EQs list
- */
- spinlock_t lock;
-};
-
struct mlx5_uars_page {
void __iomem *map;
bool wc;
@@ -542,13 +441,8 @@ struct mlx5_core_health {
};
struct mlx5_qp_table {
- /* protect radix tree
- */
- spinlock_t lock;
- struct radix_tree_root tree;
-};
+ struct notifier_block nb;
-struct mlx5_srq_table {
/* protect radix tree
*/
spinlock_t lock;
@@ -575,11 +469,6 @@ struct mlx5_core_sriov {
int enabled_vfs;
};
-struct mlx5_irq_info {
- cpumask_var_t mask;
- char name[MLX5_MAX_IRQ_NAME];
-};
-
struct mlx5_fc_stats {
spinlock_t counters_idr_lock; /* protects counters_idr */
struct idr counters_idr;
@@ -593,10 +482,12 @@ struct mlx5_fc_stats {
unsigned long sampling_interval; /* jiffies */
};
+struct mlx5_events;
struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
-struct mlx5_pagefault;
+struct mlx5_devcom;
+struct mlx5_eq_table;
struct mlx5_rate_limit {
u32 rate;
@@ -619,37 +510,12 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
-enum port_module_event_status_type {
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
- MLX5_MODULE_STATUS_ERROR = 0x3,
- MLX5_MODULE_STATUS_NUM = 0x3,
-};
-
-enum port_module_event_error_type {
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN,
- MLX5_MODULE_EVENT_ERROR_NUM,
-};
-
-struct mlx5_port_module_event_stats {
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
-};
-
struct mlx5_priv {
char name[MLX5_MAX_NAME_LEN];
- struct mlx5_eq_table eq_table;
- struct mlx5_irq_info *irq_info;
+ struct mlx5_eq_table *eq_table;
/* pages stuff */
+ struct mlx5_nb pg_nb;
struct workqueue_struct *pg_wq;
struct rb_root page_root;
int fw_pages;
@@ -659,8 +525,6 @@ struct mlx5_priv {
struct mlx5_core_health health;
- struct mlx5_srq_table srq_table;
-
/* start: qp staff */
struct mlx5_qp_table qp_table;
struct dentry *qp_debugfs;
@@ -690,28 +554,18 @@ struct mlx5_priv {
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
-
- struct list_head waiting_events_list;
- bool is_accum_events;
+ struct mlx5_events *events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
struct mlx5_eswitch *eswitch;
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
+ struct mlx5_devcom *devcom;
unsigned long pci_dev_data;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
- struct mlx5_port_module_event_stats pme_stats;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void *pfault_ctx;
- struct srcu_struct pfault_srcu;
-#endif
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
};
@@ -736,44 +590,6 @@ enum mlx5_pagefault_type_flags {
MLX5_PFAULT_RDMA = 1 << 2,
};
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
- u32 bytes_committed;
- u32 token;
- u8 event_subtype;
- u8 type;
- union {
- /* Initiator or send message responder pagefault details. */
- struct {
- /* Received packet size, only valid for responders. */
- u32 packet_size;
- /*
- * Number of resource holding WQE, depends on type.
- */
- u32 wq_num;
- /*
- * WQE index. Refers to either the send queue or
- * receive queue, according to event_subtype.
- */
- u16 wqe_index;
- } wqe;
- /* RDMA responder pagefault details */
- struct {
- u32 r_key;
- /*
- * Received packet size, minimal size page fault
- * resolution required for forward progress.
- */
- u32 packet_size;
- u32 rdma_op_len;
- u64 rdma_va;
- } rdma;
- };
-
- struct mlx5_eq *eq;
- struct work_struct work;
-};
-
struct mlx5_td {
struct list_head tirs_list;
u32 tdn;
@@ -803,6 +619,8 @@ struct mlx5_pps {
};
struct mlx5_clock {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_nb pps_nb;
seqlock_t lock;
struct cyclecounter cycles;
struct timecounter tc;
@@ -810,7 +628,6 @@ struct mlx5_clock {
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
- struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
@@ -843,9 +660,6 @@ struct mlx5_core_dev {
/* sync interface state */
struct mutex intf_state_mutex;
unsigned long intf_state;
- void (*event) (struct mlx5_core_dev *dev,
- enum mlx5_dev_event event,
- unsigned long param);
struct mlx5_priv priv;
struct mlx5_profile *profile;
atomic_t num_qps;
@@ -859,9 +673,6 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
@@ -1070,13 +881,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *in);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_srq_attr *out);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- u16 lwm, int is_srq);
void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
@@ -1095,9 +899,9 @@ int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
s32 npages);
@@ -1108,9 +912,6 @@ void mlx5_unregister_debugfs(void);
void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn);
int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
@@ -1155,6 +956,9 @@ int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
bool map_wc, bool fast_path);
void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
+unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
+struct cpumask *
+mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
u8 roce_version, u8 roce_l3_type, const u8 *gid,
@@ -1202,23 +1006,21 @@ struct mlx5_interface {
void (*remove)(struct mlx5_core_dev *dev, void *context);
int (*attach)(struct mlx5_core_dev *dev, void *context);
void (*detach)(struct mlx5_core_dev *dev, void *context);
- void (*event)(struct mlx5_core_dev *dev, void *context,
- enum mlx5_dev_event event, unsigned long param);
- void (*pfault)(struct mlx5_core_dev *dev,
- void *context,
- struct mlx5_pagefault *pfault);
- void * (*get_dev)(void *context);
int protocol;
struct list_head list;
};
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
@@ -1306,10 +1108,4 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline const struct cpumask *
-mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
-{
- return dev->priv.irq_info[vector].mask;
-}
-
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/eq.h b/include/linux/mlx5/eq.h
new file mode 100644
index 000000000000..00045cc4ea11
--- /dev/null
+++ b/include/linux/mlx5/eq.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2018 Mellanox Technologies. */
+
+#ifndef MLX5_CORE_EQ_H
+#define MLX5_CORE_EQ_H
+
+enum {
+ MLX5_EQ_PAGEREQ_IDX = 0,
+ MLX5_EQ_CMD_IDX = 1,
+ MLX5_EQ_ASYNC_IDX = 2,
+ /* reserved to be used by mlx5_core ulps (mlx5e/mlx5_ib) */
+ MLX5_EQ_PFAULT_IDX = 3,
+ MLX5_EQ_MAX_ASYNC_EQS,
+ /* completion eqs vector indices start here */
+ MLX5_EQ_VEC_COMP_BASE = MLX5_EQ_MAX_ASYNC_EQS,
+};
+
+#define MLX5_NUM_CMD_EQE (32)
+#define MLX5_NUM_ASYNC_EQE (0x1000)
+#define MLX5_NUM_SPARE_EQE (0x80)
+
+struct mlx5_eq;
+struct mlx5_core_dev;
+
+struct mlx5_eq_param {
+ u8 index;
+ int nent;
+ u64 mask;
+ void *context;
+ irq_handler_t handler;
+};
+
+struct mlx5_eq *
+mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name,
+ struct mlx5_eq_param *param);
+int
+mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+
+struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc);
+void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm);
+
+/* The HCA will think the queue has overflowed if we
+ * don't tell it we've been processing events. We
+ * create EQs with MLX5_NUM_SPARE_EQE extra entries,
+ * so we must update our consumer index at
+ * least that often.
+ *
+ * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
+ */
+static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc)
+{
+ if (unlikely(cc >= MLX5_NUM_SPARE_EQE)) {
+ mlx5_eq_update_ci(eq, cc, 0);
+ cc = 0;
+ }
+ return cc;
+}
+
+struct mlx5_nb {
+ struct notifier_block nb;
+ u8 event_type;
+};
+
+#define mlx5_nb_cof(ptr, type, member) \
+ (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
+
+#define MLX5_NB_INIT(name, handler, event) do { \
+ (name)->nb.notifier_call = handler; \
+ (name)->event_type = MLX5_EVENT_TYPE_##event; \
+} while (0)
+
+#endif /* MLX5_CORE_EQ_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 5660f07d3be0..9df51da04621 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -86,6 +86,11 @@ struct mlx5_flow_spec {
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
};
+enum {
+ MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
@@ -96,7 +101,8 @@ struct mlx5_flow_destination {
struct {
u16 num;
u16 vhca_id;
- bool vhca_id_valid;
+ u32 reformat_id;
+ u8 flags;
} vport;
};
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dbff9ff28f2c..821b751485fb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -161,6 +161,8 @@ enum {
MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
+ MLX5_CMD_OP_SET_MONITOR_COUNTER = 0x774,
+ MLX5_CMD_OP_ARM_MONITOR_COUNTER = 0x775,
MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT = 0x782,
@@ -349,7 +351,7 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
- u8 reserved_at_14[0xb];
+ u8 reserved_at_15[0xb];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@@ -421,6 +423,16 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
+struct mlx5_ifc_nvgre_key_bits {
+ u8 hi[0x18];
+ u8 lo[0x8];
+};
+
+union mlx5_ifc_gre_key_bits {
+ struct mlx5_ifc_nvgre_key_bits nvgre;
+ u8 key[0x20];
+};
+
struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_0[0x8];
u8 source_sqn[0x18];
@@ -442,8 +454,7 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
+ union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8];
@@ -582,11 +593,13 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_at_0[0x1c];
- u8 fdb_multi_path_to_table[0x1];
- u8 reserved_at_1d[0x1];
+ u8 reserved_at_0[0x1a];
u8 multi_fdb_encap[0x1];
- u8 reserved_at_1e[0x1e1];
+ u8 reserved_at_1b[0x1];
+ u8 fdb_multi_path_to_table[0x1];
+ u8 reserved_at_1d[0x3];
+
+ u8 reserved_at_20[0x1e0];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -597,20 +610,28 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 reserved_at_800[0x7800];
};
+enum {
+ MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
+ MLX5_COUNTER_FLOW_ESWITCH = 0x1,
+};
+
struct mlx5_ifc_e_switch_cap_bits {
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_at_5[0x18];
+ u8 reserved_at_5[0x17];
+ u8 counter_eswitch_affinity[0x1];
u8 merged_eswitch[0x1];
u8 nic_vport_node_guid_modify[0x1];
u8 nic_vport_port_guid_modify[0x1];
u8 vxlan_encap_decap[0x1];
u8 nvgre_encap_decap[0x1];
- u8 reserved_at_22[0x9];
+ u8 reserved_at_22[0x1];
+ u8 log_max_fdb_encap_uplink[0x5];
+ u8 reserved_at_21[0x3];
u8 log_max_packet_reformat_context[0x5];
u8 reserved_2b[0x6];
u8 max_encap_header_size[0xa];
@@ -829,7 +850,7 @@ struct mlx5_ifc_vector_calc_cap_bits {
struct mlx5_ifc_calc_op calc2;
struct mlx5_ifc_calc_op calc3;
- u8 reserved_at_e0[0x720];
+ u8 reserved_at_c0[0x720];
};
enum {
@@ -883,6 +904,10 @@ enum {
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
+enum {
+ MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
+};
+
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];
@@ -1043,7 +1068,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vector_calc[0x1];
u8 umr_ptr_rlky[0x1];
u8 imaicl[0x1];
- u8 reserved_at_232[0x4];
+ u8 qp_packet_based[0x1];
+ u8 reserved_at_233[0x3];
u8 qkv[0x1];
u8 pkv[0x1];
u8 set_deth_sqpn[0x1];
@@ -1193,7 +1219,19 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_vhca_ports[0x8];
u8 reserved_at_618[0x6];
u8 sw_owner_id[0x1];
- u8 reserved_at_61f[0x1e1];
+ u8 reserved_at_61f[0x1];
+
+ u8 max_num_of_monitor_counters[0x10];
+ u8 num_ppcnt_monitor_counters[0x10];
+
+ u8 reserved_at_640[0x10];
+ u8 num_q_monitor_counters[0x10];
+
+ u8 reserved_at_660[0x40];
+
+ u8 uctx_cap[0x20];
+
+ u8 reserved_at_6c0[0x140];
};
enum mlx5_flow_destination_type {
@@ -1209,8 +1247,10 @@ enum mlx5_flow_destination_type {
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
+
u8 destination_eswitch_owner_vhca_id_valid[0x1];
- u8 reserved_at_21[0xf];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0xe];
u8 destination_eswitch_owner_vhca_id[0x10];
};
@@ -1220,6 +1260,14 @@ struct mlx5_ifc_flow_counter_list_bits {
u8 reserved_at_20[0x20];
};
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_struct_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
@@ -2249,7 +2297,8 @@ struct mlx5_ifc_qpc_bits {
u8 st[0x8];
u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_at_15[0x3];
+ u8 reserved_at_15[0x1];
+ u8 req_e2e_credit_mode[0x2];
u8 offload_type[0x4];
u8 end_padding_mode[0x2];
u8 reserved_at_1e[0x2];
@@ -2440,7 +2489,8 @@ struct mlx5_ifc_flow_context_bits {
u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_at_80[0x8];
+ u8 extended_destination[0x1];
+ u8 reserved_at_80[0x7];
u8 destination_list_size[0x18];
u8 reserved_at_a0[0x8];
@@ -2473,14 +2523,15 @@ struct mlx5_ifc_xrc_srqc_bits {
u8 wq_signature[0x1];
u8 cont_srq[0x1];
- u8 dbr_umem_valid[0x1];
+ u8 reserved_at_22[0x1];
u8 rlky[0x1];
u8 basic_cyclic_rcv_wqe[0x1];
u8 log_rq_stride[0x3];
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_at_46[0x2];
+ u8 reserved_at_46[0x1];
+ u8 dbr_umem_valid[0x1];
u8 cqn[0x18];
u8 reserved_at_60[0x20];
@@ -3795,6 +3846,83 @@ enum {
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
};
+struct mlx5_ifc_arm_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_arm_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+enum {
+ MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT = 0x0,
+ MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
+};
+
+enum mlx5_monitor_counter_ppcnt {
+ MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS = 0x0,
+ MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD = 0x1,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS = 0x2,
+ MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
+ MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS = 0x4,
+ MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS = 0x5,
+};
+
+enum {
+ MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER = 0x4,
+};
+
+struct mlx5_ifc_monitor_counter_output_bits {
+ u8 reserved_at_0[0x4];
+ u8 type[0x4];
+ u8 reserved_at_8[0x8];
+ u8 counter[0x10];
+
+ u8 counter_group_id[0x20];
+};
+
+#define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
+#define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 (1)
+#define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
+ MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
+
+struct mlx5_ifc_set_monitor_counter_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x10];
+ u8 num_of_counters[0x10];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
+};
+
+struct mlx5_ifc_set_monitor_counter_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10];
u8 reserved_at_10[0x10];
@@ -4660,7 +4788,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
- MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -5566,7 +5694,7 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits {
struct mlx5_ifc_modify_nic_vport_field_select_bits {
u8 reserved_at_0[0x12];
u8 affiliation[0x1];
- u8 reserved_at_e[0x1];
+ u8 reserved_at_13[0x1];
u8 disable_uc_local_lb[0x1];
u8 disable_mc_local_lb[0x1];
u8 node_guid[0x1];
@@ -6689,9 +6817,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_at_280[0x40];
+ u8 reserved_at_280[0x60];
+
u8 xrc_srq_umem_valid[0x1];
- u8 reserved_at_2c1[0x5bf];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
@@ -8160,7 +8291,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1];
u8 pptb[0x1];
- u8 port_access_reg_cap_mask_10_to_0[0xb];
+ u8 port_access_reg_cap_mask_10_to_09[0x2];
+ u8 ppcnt[0x1];
+ u8 port_access_reg_cap_mask_07_to_00[0x8];
};
struct mlx5_ifc_pcam_reg_bits {
@@ -9024,7 +9157,7 @@ struct mlx5_ifc_dcbx_param_bits {
u8 dcbx_cee_cap[0x1];
u8 dcbx_ieee_cap[0x1];
u8 dcbx_standby_cap[0x1];
- u8 reserved_at_0[0x5];
+ u8 reserved_at_3[0x5];
u8 port_number[0x8];
u8 reserved_at_10[0xa];
u8 max_application_table_size[6];
@@ -9272,7 +9405,9 @@ struct mlx5_ifc_umem_bits {
struct mlx5_ifc_uctx_bits {
u8 modify_field_select[0x40];
- u8 reserved_at_40[0x1c0];
+ u8 cap[0x20];
+
+ u8 reserved_at_60[0x1a0];
};
struct mlx5_ifc_create_umem_in_bits {
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 34aed6032f86..bf4bc01ffb0c 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -107,9 +107,6 @@ enum mlx5e_connector_type {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
-
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index fbe322c966bc..b26ea9077384 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -596,6 +596,11 @@ int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
int reset, void *out, int out_size);
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
static inline const char *mlx5_qp_type_str(int type)
{
switch (type) {
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
deleted file mode 100644
index 1b1f3c20c6a3..000000000000
--- a/include/linux/mlx5/srq.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_SRQ_H
-#define MLX5_SRQ_H
-
-#include <linux/mlx5/driver.h>
-
-enum {
- MLX5_SRQ_FLAG_ERR = (1 << 0),
- MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
- MLX5_SRQ_FLAG_RNDV = (1 << 2),
-};
-
-struct mlx5_srq_attr {
- u32 type;
- u32 flags;
- u32 log_size;
- u32 wqe_shift;
- u32 log_page_size;
- u32 wqe_cnt;
- u32 srqn;
- u32 xrcd;
- u32 page_offset;
- u32 cqn;
- u32 pd;
- u32 lwm;
- u32 user_index;
- u64 db_record;
- __be64 *pas;
- u32 tm_log_list_size;
- u32 tm_next_tag;
- u32 tm_hw_phase_cnt;
- u32 tm_sw_phase_cnt;
- u16 uid;
-};
-
-struct mlx5_core_dev;
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
-
-#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 7f5ca2cd3a32..a261d5528ff7 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -58,17 +58,6 @@ int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
int inlen);
void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
- u32 *rmpn);
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-
int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqtn);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5ed8f6292a53..2c471a2c43fa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -206,6 +206,11 @@ struct page {
#endif
} _struct_page_alignment;
+/*
+ * Used for sizing the vmemmap region on some architectures
+ */
+#define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page)))
+
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 4224902a8e22..4332199c71c2 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -42,6 +42,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
+#define SDIO_DEVICE_ID_CYPRESS_43012 43012
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 847705a6d0ec..077d797d1f60 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -314,7 +314,7 @@ enum zone_type {
* Architecture Limit
* ---------------------------
* parisc, ia64, sparc <4G
- * s390 <2G
+ * s390, powerpc <2G
* arm Various
* alpha Unlimited or 0-16MB.
*
@@ -783,6 +783,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif
+#if defined(CONFIG_SPARSEMEM)
+void memblocks_present(void);
+#else
+static inline void memblocks_present(void) {}
+#endif
+
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 01797cb4587e..f9bd2f34b99f 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -448,6 +448,23 @@ struct pci_epf_device_id {
kernel_ulong_t driver_data;
};
+/* i3c */
+
+#define I3C_MATCH_DCR 0x1
+#define I3C_MATCH_MANUF 0x2
+#define I3C_MATCH_PART 0x4
+#define I3C_MATCH_EXTRA_INFO 0x8
+
+struct i3c_device_id {
+ __u8 match_flags;
+ __u8 dcr;
+ __u16 manuf_id;
+ __u16 part_id;
+ __u16 extra_info;
+
+ const void *data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
@@ -565,7 +582,7 @@ struct platform_device_id {
/**
* struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
* @phy_id: The result of
- * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
+ * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&MII_PHYSID2)) & @phy_id_mask
* for this PHY type
* @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
* is used to terminate an array of struct mdio_device_id.
diff --git a/include/linux/module.h b/include/linux/module.h
index fce6b4335e36..d5453eb5a68b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -432,6 +432,10 @@ struct module {
unsigned int num_tracepoints;
tracepoint_ptr_t *tracepoints_ptrs;
#endif
+#ifdef CONFIG_BPF_EVENTS
+ unsigned int num_bpf_raw_events;
+ struct bpf_raw_event_map *bpf_raw_events;
+#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
unsigned int num_jump_entries;
@@ -486,6 +490,13 @@ struct module {
#define MODULE_ARCH_INIT {}
#endif
+#ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE
+static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym)
+{
+ return sym->st_value;
+}
+#endif
+
extern struct mutex module_mutex;
/* FIXME: It'd be nice to isolate modules during init, too, so they
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0e9c50052ff3..784fb52b9900 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -76,7 +76,7 @@ struct msi_desc {
unsigned int nvec_used;
struct device *dev;
struct msi_msg msg;
- struct cpumask *affinity;
+ struct irq_affinity_desc *affinity;
union {
/* PCI MSI/X specific data */
@@ -116,6 +116,8 @@ struct msi_desc {
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+#define for_each_msi_entry_safe(desc, tmp, dev) \
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#ifdef CONFIG_PCI_MSI
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
@@ -136,7 +138,7 @@ static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
#endif /* CONFIG_PCI_MSI */
struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct cpumask *affinity);
+ const struct irq_affinity_desc *affinity);
void free_msi_entry(struct msi_desc *entry);
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 9b57a9b1b081..cbf77168658c 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -377,6 +377,7 @@ struct cfi_fixup {
#define CFI_MFR_SHARP 0x00B0
#define CFI_MFR_SST 0x00BF
#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
+#define CFI_MFR_MICRON 0x002C /* Micron */
#define CFI_MFR_TOSHIBA 0x0098
#define CFI_MFR_WINBOND 0x00DA
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cd0be91bdefa..ba8fa9072aca 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -207,6 +207,7 @@ struct mtd_debug_info {
struct mtd_info {
u_char type;
uint32_t flags;
+ uint32_t orig_flags; /* Flags as before running mtd checks */
uint64_t size; // Total size of the MTD
/* "Major" erase size for the device. Naïve users may take this
@@ -386,7 +387,7 @@ static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
return dev_of_node(&mtd->dev);
}
-static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
{
return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
}
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index e10b126e148f..33e240acdc6d 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -203,9 +203,12 @@ enum nand_ecc_algo {
*/
#define NAND_IS_BOOT_MEDIUM 0x00400000
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS 0x00800000
/* Cell info constants */
#define NAND_CI_CHIPNR_MSK 0x03
@@ -245,49 +248,6 @@ struct nand_id {
};
/**
- * struct nand_controller_ops - Controller operations
- *
- * @attach_chip: this method is called after the NAND detection phase after
- * flash ID and MTD fields such as erase size, page size and OOB
- * size have been set up. ECC requirements are available if
- * provided by the NAND chip or device tree. Typically used to
- * choose the appropriate ECC configuration and allocate
- * associated resources.
- * This hook is optional.
- * @detach_chip: free all resources allocated/claimed in
- * nand_controller_ops->attach_chip().
- * This hook is optional.
- */
-struct nand_controller_ops {
- int (*attach_chip)(struct nand_chip *chip);
- void (*detach_chip)(struct nand_chip *chip);
-};
-
-/**
- * struct nand_controller - Structure used to describe a NAND controller
- *
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- * @ops: NAND controller operations.
- */
-struct nand_controller {
- spinlock_t lock;
- struct nand_chip *active;
- wait_queue_head_t wq;
- const struct nand_controller_ops *ops;
-};
-
-static inline void nand_controller_init(struct nand_controller *nfc)
-{
- nfc->active = NULL;
- spin_lock_init(&nfc->lock);
- init_waitqueue_head(&nfc->wq);
-}
-
-/**
* struct nand_ecc_step_info - ECC step information of ECC engine
* @stepsize: data bytes per ECC step
* @strengths: array of supported strengths
@@ -879,18 +839,21 @@ struct nand_op_parser {
/**
* struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
* @instrs: array of instructions to execute
* @ninstrs: length of the @instrs array
*
* The actual operation structure that will be passed to chip->exec_op().
*/
struct nand_operation {
+ unsigned int cs;
const struct nand_op_instr *instrs;
unsigned int ninstrs;
};
-#define NAND_OPERATION(_instrs) \
+#define NAND_OPERATION(_cs, _instrs) \
{ \
+ .cs = _cs, \
.instrs = _instrs, \
.ninstrs = ARRAY_SIZE(_instrs), \
}
@@ -898,11 +861,68 @@ struct nand_operation {
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * @setup_data_interface: setup the data interface and timing. If
+ * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
+ * means the configuration should not be applied but
+ * only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_data_interface *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: protection lock
+ * @active: the mtd device which holds the controller currently
+ * @wq: wait queue to sleep on if a NAND operation is in
+ * progress used instead of the per chip wait queue
+ * when a hw controller is available.
+ * @ops: NAND controller operations.
+ */
+struct nand_controller {
+ spinlock_t lock;
+ struct nand_chip *active;
+ wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+ nfc->active = NULL;
+ spin_lock_init(&nfc->lock);
+ init_waitqueue_head(&nfc->wq);
+}
/**
* struct nand_legacy - NAND chip legacy fields/hooks
* @IO_ADDR_R: address to read the 8 I/O lines of the flash device
* @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
* @read_byte: read one byte from the chip
* @write_byte: write a single byte to the chip on the low 8 I/O lines
* @write_buf: write data from the buffer to the chip
@@ -921,6 +941,8 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* @get_features: get the NAND chip features
* @chip_delay: chip dependent delay for transferring data from array to read
* regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
*
* If you look at this structure you're already wrong. These fields/hooks are
* all deprecated.
@@ -928,6 +950,7 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
struct nand_legacy {
void __iomem *IO_ADDR_R;
void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
u8 (*read_byte)(struct nand_chip *chip);
void (*write_byte)(struct nand_chip *chip, u8 byte);
void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
@@ -945,6 +968,7 @@ struct nand_legacy {
int (*get_features)(struct nand_chip *chip, int feature_addr,
u8 *subfeature_para);
int chip_delay;
+ struct nand_controller dummy_controller;
};
/**
@@ -955,17 +979,10 @@ struct nand_legacy {
* you're modifying an existing driver that is using those
* fields/hooks, you should consider reworking the driver
* avoid using them.
- * @select_chip: [REPLACEABLE] select chip nr
- * @exec_op: controller specific method to execute NAND operations.
- * This method replaces ->cmdfunc(),
- * ->legacy.{read,write}_{buf,byte,word}(),
- * ->legacy.dev_ready() and ->waifunc().
* @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @dummy_controller: dummy controller implementation for drivers that can
- * only control a single chip
* @state: [INTERN] the current state of the NAND device
* @oob_poi: "poison value buffer," used for laying out OOB data
* before writing
@@ -1012,11 +1029,11 @@ struct nand_legacy {
* this nand device will encounter their life times.
* @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
+ * @cur_cs: currently selected target. -1 means no target selected,
+ * otherwise we should always have cur_cs >= 0 &&
+ * cur_cs < numchips. NAND Controller drivers should not
+ * modify this value, but they're allowed to read it.
* @read_retries: [INTERN] the number of read retry modes supported
- * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If
- * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this
- * means the configuration should not be applied but
- * only checked.
* @bbt: [INTERN] bad block table pointer
* @bbt_td: [REPLACEABLE] bad block table descriptor for flash
* lookup.
@@ -1037,13 +1054,7 @@ struct nand_chip {
struct nand_legacy legacy;
- void (*select_chip)(struct nand_chip *chip, int cs);
- int (*exec_op)(struct nand_chip *chip,
- const struct nand_operation *op,
- bool check_only);
int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
- int (*setup_data_interface)(struct nand_chip *chip, int chipnr,
- const struct nand_data_interface *conf);
unsigned int options;
unsigned int bbt_options;
@@ -1073,6 +1084,8 @@ struct nand_chip {
struct nand_data_interface data_interface;
+ int cur_cs;
+
int read_retries;
flstate_t state;
@@ -1082,7 +1095,6 @@ struct nand_chip {
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1098,15 +1110,6 @@ struct nand_chip {
} manufacturer;
};
-static inline int nand_exec_op(struct nand_chip *chip,
- const struct nand_operation *op)
-{
- if (!chip->exec_op)
- return -ENOTSUPP;
-
- return chip->exec_op(chip, op, false);
-}
-
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
@@ -1345,5 +1348,12 @@ void nand_release(struct nand_chip *chip);
* instruction and have no physical pin to check it.
*/
int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+struct gpio_desc;
+int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
+ unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
index c759d403cbc0..78fc2d4218c8 100644
--- a/include/linux/mtd/sh_flctl.h
+++ b/include/linux/mtd/sh_flctl.h
@@ -1,20 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* SuperH FLCTL nand controller
*
* Copyright © 2008 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __SH_FLCTL_H__
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 7f0c7303575e..fa2d89e38e40 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -1,10 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MTD_SPI_NOR_H
@@ -23,7 +19,8 @@
#define SNOR_MFR_ATMEL CFI_MFR_ATMEL
#define SNOR_MFR_GIGADEVICE 0xc8
#define SNOR_MFR_INTEL CFI_MFR_INTEL
-#define SNOR_MFR_MICRON CFI_MFR_ST /* ST Micro <--> Micron */
+#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
+#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
@@ -236,6 +233,8 @@ enum spi_nor_option_flags {
SNOR_F_READY_XSR_RDY = BIT(4),
SNOR_F_USE_CLSR = BIT(5),
SNOR_F_BROKEN_RESET = BIT(6),
+ SNOR_F_4B_OPCODES = BIT(7),
+ SNOR_F_HAS_4BAIT = BIT(8),
};
/**
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 088ff96c3eb6..b92e2aa955b6 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -194,8 +194,10 @@ struct spinand_manufacturer {
};
/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
/**
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 857f8abf7b91..1377d085ef99 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -845,6 +845,8 @@ enum tc_setup_type {
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
TC_SETUP_QDISC_ETF,
+ TC_SETUP_ROOT_QDISC,
+ TC_SETUP_QDISC_GRED,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -863,9 +865,6 @@ enum bpf_netdev_command {
XDP_QUERY_PROG,
XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
- BPF_OFFLOAD_VERIFIER_PREP,
- BPF_OFFLOAD_TRANSLATE,
- BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
XDP_QUERY_XSK_UMEM,
@@ -891,15 +890,6 @@ struct netdev_bpf {
/* flags with which program was installed */
u32 prog_flags;
};
- /* BPF_OFFLOAD_VERIFIER_PREP */
- struct {
- struct bpf_prog *prog;
- const struct bpf_prog_offload_ops *ops; /* callee set */
- } verifier;
- /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */
- struct {
- struct bpf_prog *prog;
- } offload;
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
@@ -1175,7 +1165,7 @@ struct dev_ifalias {
* entries to skb and update idx with the number of entries.
*
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
- * u16 flags)
+ * u16 flags, struct netlink_ext_ack *extack)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
* struct net_device *dev, u32 filter_mask,
* int nlflags)
@@ -1397,10 +1387,16 @@ struct net_device_ops {
struct net_device *dev,
struct net_device *filter_dev,
int *idx);
-
+ int (*ndo_fdb_get)(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
- u16 flags);
+ u16 flags,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
@@ -2388,13 +2384,13 @@ struct pcpu_sw_netstats {
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
-};
+} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
@@ -2459,7 +2455,8 @@ enum netdev_cmd {
NETDEV_REGISTER,
NETDEV_UNREGISTER,
NETDEV_CHANGEMTU, /* notify after mtu change happened */
- NETDEV_CHANGEADDR,
+ NETDEV_CHANGEADDR, /* notify after the address change */
+ NETDEV_PRE_CHANGEADDR, /* notify before the address change */
NETDEV_GOING_DOWN,
NETDEV_CHANGENAME,
NETDEV_FEAT_CHANGE,
@@ -2521,6 +2518,11 @@ struct netdev_notifier_changelowerstate_info {
void *lower_state_info; /* is lower dev state */
};
+struct netdev_notifier_pre_changeaddr_info {
+ struct netdev_notifier_info info; /* must be first */
+ const unsigned char *dev_addr;
+};
+
static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
struct net_device *dev)
{
@@ -2615,7 +2617,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name);
struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
struct net_device *__dev_get_by_name(struct net *net, const char *name);
int dev_alloc_name(struct net_device *dev, const char *name);
-int dev_open(struct net_device *dev);
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack);
void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
@@ -3224,6 +3226,14 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
}
+static inline bool __netdev_sent_queue(struct net_device *dev,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ return __netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes,
+ xmit_more);
+}
+
static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
unsigned int pkts, unsigned int bytes)
{
@@ -3613,8 +3623,10 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr,
int dev_ifconf(struct net *net, struct ifconf *, int);
int dev_ethtool(struct net *net, struct ifreq *);
unsigned int dev_get_flags(const struct net_device *);
-int __dev_change_flags(struct net_device *, unsigned int flags);
-int dev_change_flags(struct net_device *, unsigned int);
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
+int dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack);
void __dev_notify_flags(struct net_device *, unsigned int old_flags,
unsigned int gchanges);
int dev_change_name(struct net_device *, const char *);
@@ -3627,7 +3639,10 @@ int dev_set_mtu_ext(struct net_device *dev, int mtu,
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
-int dev_set_mac_address(struct net_device *, struct sockaddr *);
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack);
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack);
int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
@@ -4068,6 +4083,16 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
int (*sync)(struct net_device *, const unsigned char *),
int (*unsync)(struct net_device *,
const unsigned char *));
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *, int),
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int));
void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list,
struct net_device *dev,
int (*unsync)(struct net_device *,
@@ -4332,9 +4357,10 @@ static inline bool can_checksum_protocol(netdev_features_t features,
}
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev);
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb);
#else
-static inline void netdev_rx_csum_fault(struct net_device *dev)
+static inline void netdev_rx_csum_fault(struct net_device *dev,
+ struct sk_buff *skb)
{
}
#endif
@@ -4360,7 +4386,7 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi
struct netdev_queue *txq, bool more)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int rc;
+ netdev_tx_t rc;
rc = __netdev_start_xmit(ops, skb, dev, more);
if (rc == NETDEV_TX_OK)
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 1d100efe74ec..f2e1e6b13ca4 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -303,11 +303,11 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
+ IPSET_CB_PROTO, /* ipset protocol */
IPSET_CB_DUMP, /* dump single set/all sets */
IPSET_CB_INDEX, /* set index */
IPSET_CB_PRIVATE, /* set private data */
IPSET_CB_ARG0, /* type specific */
- IPSET_CB_ARG1,
};
/* register and unregister set references */
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index b8d95564bd53..6989e2e4eabf 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
struct nf_conntrack_tuple tuple;
};
+enum grep_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+struct netns_proto_gre {
+ struct nf_proto_net nf;
+ rwlock_t keymap_lock;
+ struct list_head keymap_list;
+ unsigned int gre_timeouts[GRE_CT_MAX];
+};
+
/* add new tuple->key_reply pair to keymap */
int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t);
@@ -28,7 +41,5 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
/* delete keymap entries */
void nf_ct_gre_keymap_destroy(struct nf_conn *ct);
-void nf_nat_need_gre(void);
-
#endif /* __KERNEL__ */
#endif /* _CONNTRACK_PROTO_GRE_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 4a520d3304a2..cf09ab37b45b 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -62,18 +62,6 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id)
}
#endif /* CONFIG_PROVE_LOCKING */
-/*
- * nfnl_dereference - fetch RCU pointer when updates are prevented by subsys mutex
- *
- * @p: The pointer to read, prior to dereferencing
- * @ss: The nfnetlink subsystem ID
- *
- * Return the value of the specified RCU-protected pointer, but omit
- * the READ_ONCE(), because caller holds the NFNL subsystem mutex.
- */
-#define nfnl_dereference(p, ss) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(ss))
-
#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index fa0686500970..5f2614d02e03 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -17,43 +17,58 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
skb_dst_drop(skb);
}
+static inline struct nf_bridge_info *
+nf_bridge_info_get(const struct sk_buff *skb)
+{
+ return skb_ext_find(skb, SKB_EXT_BRIDGE_NF);
+}
+
+static inline bool nf_bridge_info_exists(const struct sk_buff *skb)
+{
+ return skb_ext_exist(skb, SKB_EXT_BRIDGE_NF);
+}
+
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
- if (skb->nf_bridge == NULL)
+ if (!nf_bridge)
return 0;
- nf_bridge = skb->nf_bridge;
return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
static inline struct net_device *
nf_bridge_get_physindev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physindev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physindev : NULL;
}
static inline struct net_device *
nf_bridge_get_physoutdev(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physoutdev : NULL;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge ? nf_bridge->physoutdev : NULL;
}
static inline bool nf_bridge_in_prerouting(const struct sk_buff *skb)
{
- return skb->nf_bridge && skb->nf_bridge->in_prerouting;
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ return nf_bridge && nf_bridge->in_prerouting;
}
#else
#define br_drop_fake_rtable(skb) do { } while (0)
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 4da90a6ab536..4e8add270200 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -34,8 +34,8 @@ struct netlink_skb_parms {
#define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds)
-extern void netlink_table_grab(void);
-extern void netlink_table_ungrab(void);
+void netlink_table_grab(void);
+void netlink_table_ungrab(void);
#define NL_CFG_F_NONROOT_RECV (1 << 0)
#define NL_CFG_F_NONROOT_SEND (1 << 1)
@@ -51,7 +51,7 @@ struct netlink_kernel_cfg {
bool (*compare)(struct net *net, struct sock *sk);
};
-extern struct sock *__netlink_kernel_create(struct net *net, int unit,
+struct sock *__netlink_kernel_create(struct net *net, int unit,
struct module *module,
struct netlink_kernel_cfg *cfg);
static inline struct sock *
@@ -110,24 +110,33 @@ struct netlink_ext_ack {
} \
} while (0)
-extern void netlink_kernel_release(struct sock *sk);
-extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
-extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
- const struct netlink_ext_ack *extack);
-extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-
-extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
-extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
- __u32 group, gfp_t allocation);
-extern int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
- __u32 portid, __u32 group, gfp_t allocation,
- int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
- void *filter_data);
-extern int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
-extern int netlink_register_notifier(struct notifier_block *nb);
-extern int netlink_unregister_notifier(struct notifier_block *nb);
+static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
+ u64 cookie)
+{
+ u64 __cookie = cookie;
+
+ memcpy(extack->cookie, &__cookie, sizeof(__cookie));
+ extack->cookie_len = sizeof(__cookie);
+}
+
+void netlink_kernel_release(struct sock *sk);
+int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
+int netlink_change_ngroups(struct sock *sk, unsigned int groups);
+void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
+void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack);
+int netlink_has_listeners(struct sock *sk, unsigned int group);
+
+int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
+int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
+ __u32 group, gfp_t allocation);
+int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb,
+ __u32 portid, __u32 group, gfp_t allocation,
+ int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
+ void *filter_data);
+int netlink_set_err(struct sock *ssk, __u32 portid, __u32 group, int code);
+int netlink_register_notifier(struct notifier_block *nb);
+int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
@@ -203,7 +212,7 @@ struct netlink_dump_control {
u16 min_dump_alloc;
};
-extern int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct netlink_dump_control *control);
static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
@@ -222,8 +231,8 @@ struct netlink_tap {
struct list_head list;
};
-extern int netlink_add_tap(struct netlink_tap *nt);
-extern int netlink_remove_tap(struct netlink_tap *nt);
+int netlink_add_tap(struct netlink_tap *nt);
+int netlink_remove_tap(struct netlink_tap *nt);
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *ns, int cap);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 496ff759f84c..91745cc3704c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -403,7 +403,6 @@ struct nvme_fc_port_template {
void **handle);
void (*delete_queue)(struct nvme_fc_local_port *,
unsigned int qidx, void *handle);
- void (*poll_queue)(struct nvme_fc_local_port *, void *handle);
int (*ls_req)(struct nvme_fc_local_port *,
struct nvme_fc_remote_port *,
struct nvmefc_ls_req *);
@@ -649,22 +648,6 @@ enum {
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
- NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 1),
- /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the cmd rcv handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
- NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 2),
- /* Bit 3: When 0, the LLDD is calling the op done handler
- * in a non-isr context, allowing the transport to finish
- * op completion in the calling context. When 1, the LLDD
- * is calling the op done handler in an ISR context,
- * requiring the transport to transition to a workqueue
- * for op completion.
- */
};
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
new file mode 100644
index 000000000000..03d87c0550a9
--- /dev/null
+++ b/include/linux/nvme-tcp.h
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * NVMe over Fabrics TCP protocol header.
+ * Copyright (c) 2018 Lightbits Labs. All rights reserved.
+ */
+
+#ifndef _LINUX_NVME_TCP_H
+#define _LINUX_NVME_TCP_H
+
+#include <linux/nvme.h>
+
+#define NVME_TCP_DISC_PORT 8009
+#define NVME_TCP_ADMIN_CCSZ SZ_8K
+#define NVME_TCP_DIGEST_LENGTH 4
+
+enum nvme_tcp_pfv {
+ NVME_TCP_PFV_1_0 = 0x0,
+};
+
+enum nvme_tcp_fatal_error_status {
+ NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
+ NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
+ NVME_TCP_FES_HDR_DIGEST_ERR = 0x03,
+ NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04,
+ NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05,
+ NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06,
+};
+
+enum nvme_tcp_digest_option {
+ NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
+ NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
+};
+
+enum nvme_tcp_pdu_type {
+ nvme_tcp_icreq = 0x0,
+ nvme_tcp_icresp = 0x1,
+ nvme_tcp_h2c_term = 0x2,
+ nvme_tcp_c2h_term = 0x3,
+ nvme_tcp_cmd = 0x4,
+ nvme_tcp_rsp = 0x5,
+ nvme_tcp_h2c_data = 0x6,
+ nvme_tcp_c2h_data = 0x7,
+ nvme_tcp_r2t = 0x9,
+};
+
+enum nvme_tcp_pdu_flags {
+ NVME_TCP_F_HDGST = (1 << 0),
+ NVME_TCP_F_DDGST = (1 << 1),
+ NVME_TCP_F_DATA_LAST = (1 << 2),
+ NVME_TCP_F_DATA_SUCCESS = (1 << 3),
+};
+
+/**
+ * struct nvme_tcp_hdr - nvme tcp pdu common header
+ *
+ * @type: pdu type
+ * @flags: pdu specific flags
+ * @hlen: pdu header length
+ * @pdo: pdu data offset
+ * @plen: pdu wire byte length
+ */
+struct nvme_tcp_hdr {
+ __u8 type;
+ __u8 flags;
+ __u8 hlen;
+ __u8 pdo;
+ __le32 plen;
+};
+
+/**
+ * struct nvme_tcp_icreq_pdu - nvme tcp initialize connection request pdu
+ *
+ * @hdr: pdu generic header
+ * @pfv: pdu version format
+ * @hpda: host pdu data alignment (dwords, 0's based)
+ * @digest: digest types enabled
+ * @maxr2t: maximum r2ts per request supported
+ */
+struct nvme_tcp_icreq_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 hpda;
+ __u8 digest;
+ __le32 maxr2t;
+ __u8 rsvd2[112];
+};
+
+/**
+ * struct nvme_tcp_icresp_pdu - nvme tcp initialize connection response pdu
+ *
+ * @hdr: pdu common header
+ * @pfv: pdu version format
+ * @cpda: controller pdu data alignment (dowrds, 0's based)
+ * @digest: digest types enabled
+ * @maxdata: maximum data capsules per r2t supported
+ */
+struct nvme_tcp_icresp_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 pfv;
+ __u8 cpda;
+ __u8 digest;
+ __le32 maxdata;
+ __u8 rsvd[112];
+};
+
+/**
+ * struct nvme_tcp_term_pdu - nvme tcp terminate connection pdu
+ *
+ * @hdr: pdu common header
+ * @fes: fatal error status
+ * @fei: fatal error information
+ */
+struct nvme_tcp_term_pdu {
+ struct nvme_tcp_hdr hdr;
+ __le16 fes;
+ __le32 fei;
+ __u8 rsvd[8];
+};
+
+/**
+ * struct nvme_tcp_cmd_pdu - nvme tcp command capsule pdu
+ *
+ * @hdr: pdu common header
+ * @cmd: nvme command
+ */
+struct nvme_tcp_cmd_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_command cmd;
+};
+
+/**
+ * struct nvme_tcp_rsp_pdu - nvme tcp response capsule pdu
+ *
+ * @hdr: pdu common header
+ * @hdr: nvme-tcp generic header
+ * @cqe: nvme completion queue entry
+ */
+struct nvme_tcp_rsp_pdu {
+ struct nvme_tcp_hdr hdr;
+ struct nvme_completion cqe;
+};
+
+/**
+ * struct nvme_tcp_r2t_pdu - nvme tcp ready-to-transfer pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @r2t_offset: offset from the start of the command data
+ * @r2t_length: length the host is allowed to send
+ */
+struct nvme_tcp_r2t_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 r2t_offset;
+ __le32 r2t_length;
+ __u8 rsvd[4];
+};
+
+/**
+ * struct nvme_tcp_data_pdu - nvme tcp data pdu
+ *
+ * @hdr: pdu common header
+ * @command_id: nvme command identifier which this relates to
+ * @ttag: transfer tag (controller generated)
+ * @data_offset: offset from the start of the command data
+ * @data_length: length of the data stream
+ */
+struct nvme_tcp_data_pdu {
+ struct nvme_tcp_hdr hdr;
+ __u16 command_id;
+ __u16 ttag;
+ __le32 data_offset;
+ __le32 data_length;
+ __u8 rsvd[4];
+};
+
+union nvme_tcp_pdu {
+ struct nvme_tcp_icreq_pdu icreq;
+ struct nvme_tcp_icresp_pdu icresp;
+ struct nvme_tcp_cmd_pdu cmd;
+ struct nvme_tcp_rsp_pdu rsp;
+ struct nvme_tcp_r2t_pdu r2t;
+ struct nvme_tcp_data_pdu data;
+};
+
+#endif /* _LINUX_NVME_TCP_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 818dbe9331be..bbcc83886899 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -52,15 +52,20 @@ enum {
enum {
NVMF_TRTYPE_RDMA = 1, /* RDMA */
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
+ NVMF_TRTYPE_TCP = 3, /* TCP/IP */
NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
NVMF_TRTYPE_MAX,
};
/* Transport Requirements codes for Discovery Log Page entry TREQ field */
enum {
- NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
- NVMF_TREQ_REQUIRED = 1, /* Required */
- NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+ NVMF_TREQ_NOT_SPECIFIED = 0, /* Not specified */
+ NVMF_TREQ_REQUIRED = 1, /* Required */
+ NVMF_TREQ_NOT_REQUIRED = 2, /* Not Required */
+#define NVME_TREQ_SECURE_CHANNEL_MASK \
+ (NVMF_TREQ_REQUIRED | NVMF_TREQ_NOT_REQUIRED)
+
+ NVMF_TREQ_DISABLE_SQFLOW = (1 << 2), /* Supports SQ flow control disable */
};
/* RDMA QP Service Type codes for Discovery Log Page entry TSAS
@@ -198,6 +203,11 @@ enum {
NVME_PS_FLAGS_NON_OP_STATE = 1 << 1,
};
+enum nvme_ctrl_attr {
+ NVME_CTRL_ATTR_HID_128_BIT = (1 << 0),
+ NVME_CTRL_ATTR_TBKAS = (1 << 6),
+};
+
struct nvme_id_ctrl {
__le16 vid;
__le16 ssvid;
@@ -214,7 +224,11 @@ struct nvme_id_ctrl {
__le32 rtd3e;
__le32 oaes;
__le32 ctratt;
- __u8 rsvd100[156];
+ __u8 rsvd100[28];
+ __le16 crdt1;
+ __le16 crdt2;
+ __le16 crdt3;
+ __u8 rsvd134[122];
__le16 oacs;
__u8 acl;
__u8 aerl;
@@ -481,12 +495,21 @@ enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
NVME_AER_NOTICE_ANA = 0x03,
+ NVME_AER_NOTICE_DISC_CHANGED = 0xf0,
};
enum {
- NVME_AEN_CFG_NS_ATTR = 1 << 8,
- NVME_AEN_CFG_FW_ACT = 1 << 9,
- NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
+ NVME_AEN_BIT_NS_ATTR = 8,
+ NVME_AEN_BIT_FW_ACT = 9,
+ NVME_AEN_BIT_ANA_CHANGE = 11,
+ NVME_AEN_BIT_DISC_CHANGE = 31,
+};
+
+enum {
+ NVME_AEN_CFG_NS_ATTR = 1 << NVME_AEN_BIT_NS_ATTR,
+ NVME_AEN_CFG_FW_ACT = 1 << NVME_AEN_BIT_FW_ACT,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << NVME_AEN_BIT_ANA_CHANGE,
+ NVME_AEN_CFG_DISC_CHANGE = 1 << NVME_AEN_BIT_DISC_CHANGE,
};
struct nvme_lba_range_type {
@@ -639,7 +662,12 @@ struct nvme_common_command {
__le32 cdw2[2];
__le64 metadata;
union nvme_data_ptr dptr;
- __le32 cdw10[6];
+ __le32 cdw10;
+ __le32 cdw11;
+ __le32 cdw12;
+ __le32 cdw13;
+ __le32 cdw14;
+ __le32 cdw15;
};
struct nvme_rw_command {
@@ -738,6 +766,15 @@ enum {
NVME_HOST_MEM_RETURN = (1 << 1),
};
+struct nvme_feat_host_behavior {
+ __u8 acre;
+ __u8 resv1[511];
+};
+
+enum {
+ NVME_ENABLE_ACRE = 1,
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -792,6 +829,7 @@ enum {
NVME_FEAT_RRL = 0x12,
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
+ NVME_FEAT_HOST_BEHAVIOR = 0x16,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -1030,6 +1068,10 @@ struct nvmf_disc_rsp_page_hdr {
struct nvmf_disc_rsp_page_entry entries[0];
};
+enum {
+ NVME_CONNECT_DISABLE_SQFLOW = (1 << 2),
+};
+
struct nvmf_connect_command {
__u8 opcode;
__u8 resv1;
@@ -1126,6 +1168,20 @@ struct nvme_command {
};
};
+struct nvme_error_slot {
+ __le64 error_count;
+ __le16 sqid;
+ __le16 cmdid;
+ __le16 status_field;
+ __le16 param_error_location;
+ __le64 lba;
+ __le32 nsid;
+ __u8 vs;
+ __u8 resv[3];
+ __le64 cs;
+ __u8 resv2[24];
+};
+
static inline bool nvme_is_write(struct nvme_command *cmd)
{
/*
@@ -1243,6 +1299,7 @@ enum {
NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_CRD = 0x1800,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/objagg.h b/include/linux/objagg.h
new file mode 100644
index 000000000000..34f38c186ea0
--- /dev/null
+++ b/include/linux/objagg.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#ifndef _OBJAGG_H
+#define _OBJAGG_H
+
+struct objagg_ops {
+ size_t obj_size;
+ void * (*delta_create)(void *priv, void *parent_obj, void *obj);
+ void (*delta_destroy)(void *priv, void *delta_priv);
+ void * (*root_create)(void *priv, void *obj);
+ void (*root_destroy)(void *priv, void *root_priv);
+};
+
+struct objagg;
+struct objagg_obj;
+
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj);
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj);
+
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj);
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj);
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv);
+void objagg_destroy(struct objagg *objagg);
+
+struct objagg_obj_stats {
+ unsigned int user_count;
+ unsigned int delta_user_count; /* includes delta object users */
+};
+
+struct objagg_obj_stats_info {
+ struct objagg_obj_stats stats;
+ struct objagg_obj *objagg_obj; /* associated object */
+ bool is_root;
+};
+
+struct objagg_stats {
+ unsigned int stats_info_count;
+ struct objagg_obj_stats_info stats_info[];
+};
+
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg);
+void objagg_stats_put(const struct objagg_stats *objagg_stats);
+
+#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index a5aee3c438ad..0fe5bef81a7e 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -66,7 +66,6 @@ struct device_node {
unsigned long _flags;
void *data;
#if defined(CONFIG_SPARC)
- const char *path_component_name;
unsigned int unique_id;
struct of_irq_controller *irq_trans;
#endif
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index b9cd9ebdf9b9..a713e5d156d8 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -76,6 +76,7 @@ extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
+extern void __init early_init_dt_scan_chosen_arch(unsigned long node);
extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size);
extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 90d81ee9e6a0..9cd72aab76fe 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -13,7 +13,6 @@
struct net_device;
extern int of_get_phy_mode(struct device_node *np);
extern const void *of_get_mac_address(struct device_node *np);
-extern int of_get_nvmem_mac_address(struct device_node *np, void *addr);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
static inline int of_get_phy_mode(struct device_node *np)
@@ -26,11 +25,6 @@ static inline const void *of_get_mac_address(struct device_node *np)
return NULL;
}
-static inline int of_get_nvmem_mac_address(struct device_node *np, void *addr)
-{
- return -ENODEV;
-}
-
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
{
return NULL;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 11c71c4ecf75..51a5a5217667 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1960,7 +1960,11 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
+#ifdef CONFIG_PCI
void pcibios_penalize_isa_irq(int irq, int active);
+#else
+static inline void pcibios_penalize_isa_irq(int irq, int active) {}
+#endif
int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 69f0abe1ba1a..d86d5a2477fc 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -545,6 +545,9 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F4 0x1534
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F3 0x1583
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F4 0x1584
+#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
+#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -2359,6 +2362,8 @@
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
+#define PCI_VENDOR_ID_USR 0x16ec
+
#define PCI_VENDOR_ID_VITESSE 0x1725
#define PCI_DEVICE_ID_VITESSE_VSC7174 0x7174
diff --git a/include/linux/pe.h b/include/linux/pe.h
index 143ce75be5f0..3482b18a48b5 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -166,7 +166,7 @@ struct mz_hdr {
uint16_t oem_info; /* oem specific */
uint16_t reserved1[10]; /* reserved */
uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
+ char message[]; /* message to print */
};
struct mz_reloc {
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 79b99d653e03..71b75643c432 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -41,7 +41,7 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
* cannot both change sem->state from readers_fast and start checking
* counters while we are here. So if we see !sem->state, we know that
* the writer won't be checking until we're past the preempt_enable()
- * and that one the synchronize_sched() is done, the writer will see
+ * and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
__this_cpu_inc(*sem->read_count);
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index bf309ff6f244..4641e850b204 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -102,8 +102,10 @@ struct arm_pmu {
int (*filter_match)(struct perf_event *event);
int num_events;
bool secure_access; /* 32-bit ARM only */
-#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
+#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
+#define ARMV8_PMUV3_EXT_COMMON_EVENT_BASE 0x4000
+ DECLARE_BITMAP(pmceid_ext_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
struct platform_device *plat_device;
struct pmu_hw_events __percpu *hw_events;
struct hlist_node node;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 53c500f0ca79..1d5c551a5add 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -262,8 +262,8 @@ struct pmu {
*/
int capabilities;
- int * __percpu pmu_disable_count;
- struct perf_cpu_context * __percpu pmu_cpu_context;
+ int __percpu *pmu_disable_count;
+ struct perf_cpu_context __percpu *pmu_cpu_context;
atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */
int task_ctx_nr;
int hrtimer_interval_ms;
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3ea87f774a76..da039f211c22 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -58,6 +58,11 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
#define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features)
#define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features)
+extern const int phy_10_100_features_array[4];
+extern const int phy_basic_t1_features_array[2];
+extern const int phy_gbit_features_array[2];
+extern const int phy_10gbit_features_array[1];
+
/*
* Set phydev->irq to PHY_POLL if interrupts are not supported,
* or not desired for this PHY. Set to PHY_IGNORE_INTERRUPT if
@@ -66,9 +71,8 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini
#define PHY_POLL -1
#define PHY_IGNORE_INTERRUPT -2
-#define PHY_HAS_INTERRUPT 0x00000001
-#define PHY_IS_INTERNAL 0x00000002
-#define PHY_RST_AFTER_CLK_EN 0x00000004
+#define PHY_IS_INTERNAL 0x00000001
+#define PHY_RST_AFTER_CLK_EN 0x00000002
#define MDIO_DEVICE_IS_PHY 0x80000000
/* Interface Mode definitions */
@@ -178,7 +182,6 @@ static inline const char *phy_modes(phy_interface_t interface)
#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
#define PHY_FORCE_TIMEOUT 10
-#define PHY_AN_TIMEOUT 10
#define PHY_MAX_ADDR 32
@@ -264,57 +267,27 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
-#define PHY_INTERRUPT_DISABLED 0x0
-#define PHY_INTERRUPT_ENABLED 0x80000000
+#define PHY_INTERRUPT_DISABLED false
+#define PHY_INTERRUPT_ENABLED true
/* PHY state machine states:
*
* DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will, depending on the PHY, set
- * the state to STARTING or READY
- *
- * STARTING: PHY device is coming up, and the ethernet driver is
- * not ready. PHY drivers may set this in the probe function.
- * If they do, they are responsible for making sure the state is
- * eventually set to indicate whether the PHY is UP or READY,
- * depending on the state when the PHY is done starting up.
- * - PHY driver will set the state to READY
- * - start will set the state to PENDING
+ * - PHY driver probe function will set the state to READY
*
* READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
- * probe will be set to this state by phy_probe(). If the PHY
- * driver knows the PHY is ready, and the PHY state is STARTING,
- * then it sets this STATE.
+ * probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * PENDING: PHY device is coming up, but the ethernet driver is
- * ready. phy_start will set this state if the PHY state is
- * STARTING.
- * - PHY driver will set the state to UP when the PHY is ready
- *
* UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to AN
- *
- * AN: The PHY is currently negotiating the link state. Link is
- * therefore down for now. phy_timer will set this state when it
- * detects the state is UP. config_aneg will set this state
- * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
- * - If autonegotiation finishes, but there's no link, it sets
- * the state to NOLINK.
- * - If aneg finishes with link, it sets the state to RUNNING,
- * and calls adjust_link
- * - If autonegotiation did not finish after an arbitrary amount
- * of time, autonegotiation should be tried again if the PHY
- * supports "magic" autonegotiation (back to AN)
- * - If it didn't finish, and no magic_aneg, move to FORCING.
+ * - timer moves to NOLINK or RUNNING
*
* NOLINK: PHY is up, but not currently plugged in.
- * - If the timer notes that the link comes back, we move to RUNNING
- * - config_aneg moves to AN
+ * - irq or timer will set RUNNING if link comes back
* - phy_stop moves to HALTED
*
* FORCING: PHY is being configured with forced settings
@@ -325,11 +298,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*
* RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - timer will set CHANGELINK if we're polling (this ensures the
- * link state is polled every other cycle of this state machine,
- * which makes it every other second)
- * - irq will set CHANGELINK
- * - config_aneg will set AN
+ * - irq or timer will set NOLINK if link goes down
* - phy_stop moves to HALTED
*
* CHANGELINK: PHY experienced a change in link state
@@ -349,16 +318,13 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*/
enum phy_state {
PHY_DOWN = 0,
- PHY_STARTING,
PHY_READY,
- PHY_PENDING,
+ PHY_HALTED,
PHY_UP,
- PHY_AN,
PHY_RUNNING,
PHY_NOLINK,
PHY_FORCING,
PHY_CHANGELINK,
- PHY_HALTED,
PHY_RESUMING
};
@@ -390,7 +356,6 @@ struct phy_c45_device_ids {
* giving up on the current attempt at acquiring a link
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
- * phy_queue: A work_queue for the phy_mac_interrupt
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
@@ -427,6 +392,9 @@ struct phy_device {
/* The most recently read link state */
unsigned link:1;
+ /* Interrupts are enabled */
+ unsigned interrupts:1;
+
enum phy_state state;
u32 dev_flags;
@@ -442,14 +410,11 @@ struct phy_device {
int pause;
int asym_pause;
- /* Enabled Interrupts */
- u32 interrupts;
-
- /* Union of PHY and Attached devices' supported modes */
- /* See mii.h for more info */
- u32 supported;
- u32 advertising;
- u32 lp_advertising;
+ /* Union of PHY and Attached devices' supported link modes */
+ /* See ethtool.h for more info */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising);
/* Energy efficient ethernet modes which should be prohibited */
u32 eee_broken_modes;
@@ -475,7 +440,6 @@ struct phy_device {
void *priv;
/* Interrupt and Polling infrastructure */
- struct work_struct phy_queue;
struct delayed_work state_queue;
struct mutex lock;
@@ -674,6 +638,10 @@ struct phy_driver {
#define PHY_ANY_ID "MATCH ANY PHY"
#define PHY_ANY_UID 0xffffffff
+#define PHY_ID_MATCH_EXACT(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 0)
+#define PHY_ID_MATCH_MODEL(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 4)
+#define PHY_ID_MATCH_VENDOR(id) .phy_id = (id), .phy_id_mask = GENMASK(31, 10)
+
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
@@ -697,9 +665,31 @@ struct phy_setting {
const struct phy_setting *
phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- size_t maxbit, bool exact);
+ bool exact);
size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask, size_t maxbit);
+ unsigned long *mask);
+
+static inline bool __phy_is_started(struct phy_device *phydev)
+{
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
+ return phydev->state >= PHY_UP;
+}
+
+/**
+ * phy_is_started - Convenience function to check whether PHY is started
+ * @phydev: The phy_device struct
+ */
+static inline bool phy_is_started(struct phy_device *phydev)
+{
+ bool started;
+
+ mutex_lock(&phydev->lock);
+ started = __phy_is_started(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return started;
+}
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
@@ -1050,11 +1040,9 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
int phy_drivers_register(struct phy_driver *new_driver, int n,
struct module *owner);
void phy_state_machine(struct work_struct *work);
-void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-void phy_trigger_machine(struct phy_device *phydev);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ee54453a40a0..9525567b1951 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -13,6 +13,7 @@ struct fixed_phy_status {
struct device_node;
#if IS_ENABLED(CONFIG_FIXED_PHY)
+extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
extern int fixed_phy_add(unsigned int irq, int phy_id,
struct fixed_phy_status *status,
int link_gpio);
@@ -47,6 +48,10 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev,
{
return -ENODEV;
}
+static inline int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_FIXED_PHY */
#endif /* __PHY_FIXED_H */
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index b37b05bfd1a6..4587ce362535 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -20,7 +20,7 @@ struct phy_device;
#include <linux/leds.h>
#include <linux/phy.h>
-#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
+#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h
index 85ad68f9206a..7fe80f1c7e08 100644
--- a/include/linux/platform_data/davinci_asp.h
+++ b/include/linux/platform_data/davinci_asp.h
@@ -79,6 +79,7 @@ struct davinci_mcasp_pdata {
/* McASP specific fields */
int tdm_slots;
u8 op_mode;
+ u8 dismod;
u8 num_serializer;
u8 *serial_dir;
u8 version;
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index f92a47e18034..a93841bfb9f7 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -17,6 +17,8 @@
#define __DAVINCI_GPIO_PLATFORM_H
struct davinci_gpio_platform_data {
+ bool no_auto_base;
+ u32 base;
u32 ngpio;
u32 gpio_unbanked;
};
diff --git a/include/linux/platform_data/mdio-gpio.h b/include/linux/platform_data/mdio-gpio.h
new file mode 100644
index 000000000000..13874fa6e767
--- /dev/null
+++ b/include/linux/platform_data/mdio-gpio.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MDIO-GPIO bus platform data structure
+ */
+
+#ifndef __LINUX_MDIO_GPIO_PDATA_H
+#define __LINUX_MDIO_GPIO_PDATA_H
+
+struct mdio_gpio_platform_data {
+ u32 phy_mask;
+ u32 phy_ignore_ta_mask;
+};
+
+#endif /* __LINUX_MDIO_GPIO_PDATA_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e723b78d8357..0bd9de116826 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/timer.h>
+#include <linux/hrtimer.h>
#include <linux/completion.h>
/*
@@ -608,7 +609,7 @@ struct dev_pm_info {
unsigned int should_wakeup:1;
#endif
#ifdef CONFIG_PM
- struct timer_list suspend_timer;
+ struct hrtimer suspend_timer;
unsigned long timer_expires;
struct work_struct work;
wait_queue_head_t wait_queue;
@@ -631,7 +632,7 @@ struct dev_pm_info {
enum rpm_status runtime_status;
int runtime_error;
int autosuspend_delay;
- unsigned long last_busy;
+ u64 last_busy;
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 3b5d7280e52e..dd364abb649a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -73,6 +73,7 @@ struct genpd_power_state {
struct genpd_lock_ops;
struct dev_pm_opp;
+struct opp_table;
struct generic_pm_domain {
struct device dev;
@@ -94,6 +95,7 @@ struct generic_pm_domain {
unsigned int performance_state; /* Aggregated max performance state */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
+ struct opp_table *opp_table; /* OPP table of the genpd */
unsigned int (*opp_to_performance_state)(struct generic_pm_domain *genpd,
struct dev_pm_opp *opp);
int (*set_performance_state)(struct generic_pm_domain *genpd,
@@ -134,6 +136,10 @@ struct gpd_link {
struct list_head master_node;
struct generic_pm_domain *slave;
struct list_head slave_node;
+
+ /* Sub-domain's per-master domain performance state */
+ unsigned int performance_state;
+ unsigned int prev_performance_state;
};
struct gpd_timing_data {
@@ -258,8 +264,8 @@ int of_genpd_add_subdomain(struct of_phandle_args *parent,
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
-unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np);
+unsigned int pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -300,8 +306,8 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
}
static inline unsigned int
-of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *np)
+pm_genpd_opp_to_performance_state(struct device *genpd_dev,
+ struct dev_pm_opp *opp)
{
return 0;
}
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 5d399eeef172..0a2a88e5a383 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -126,6 +126,9 @@ struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const char * name);
void dev_pm_opp_put_clkname(struct opp_table *opp_table);
struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, int (*set_opp)(struct dev_pm_set_opp_data *data));
void dev_pm_opp_unregister_set_opp_helper(struct opp_table *opp_table);
+struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index);
+void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev);
+int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
@@ -272,6 +275,18 @@ static inline struct opp_table *dev_pm_opp_set_clkname(struct device *dev, const
static inline void dev_pm_opp_put_clkname(struct opp_table *opp_table) {}
+static inline struct opp_table *dev_pm_opp_set_genpd_virt_dev(struct device *dev, struct device *virt_dev, int index)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline void dev_pm_opp_put_genpd_virt_dev(struct opp_table *opp_table, struct device *virt_dev) {}
+
+static inline int dev_pm_opp_xlate_performance_state(struct opp_table *src_table, struct opp_table *dst_table, unsigned int pstate)
+{
+ return -ENOTSUPP;
+}
+
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
return -ENOTSUPP;
@@ -305,8 +320,8 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
-struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np);
struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp);
+int of_get_required_opp_performance_state(struct device_node *np, int index);
#else
static inline int dev_pm_opp_of_add_table(struct device *dev)
{
@@ -341,13 +356,13 @@ static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device
return NULL;
}
-static inline struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev, struct device_node *np)
+static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
{
return NULL;
}
-static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
+static inline int of_get_required_opp_performance_state(struct device_node *np, int index)
{
- return NULL;
+ return -ENOTSUPP;
}
#endif
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index f0fc4700b6ff..54af4eef169f 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -51,7 +51,7 @@ extern void pm_runtime_no_callbacks(struct device *dev);
extern void pm_runtime_irq_safe(struct device *dev);
extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
-extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
+extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
static inline void pm_runtime_mark_last_busy(struct device *dev)
{
- WRITE_ONCE(dev->power.last_busy, jiffies);
+ WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get()));
}
static inline bool pm_runtime_is_irq_safe(struct device *dev)
@@ -168,7 +168,7 @@ static inline void __pm_runtime_use_autosuspend(struct device *dev,
bool use) {}
static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
int delay) {}
-static inline unsigned long pm_runtime_autosuspend_expiration(
+static inline u64 pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index 7b81dad712de..d0b37e937037 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* OMAP Smartreflex Defines and Routines
*
@@ -11,10 +12,6 @@
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Lesly A M <x0080970@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __POWER_SMARTREFLEX_H
@@ -303,9 +300,6 @@ void omap_sr_enable(struct voltagedomain *voltdm);
void omap_sr_disable(struct voltagedomain *voltdm);
void omap_sr_disable_reset_volt(struct voltagedomain *voltdm);
-/* API to register the pmic specific data with the smartreflex driver. */
-void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data);
-
/* Smartreflex driver hooks to be called from Smartreflex class driver */
int sr_enable(struct omap_sr *sr, unsigned long volt);
void sr_disable(struct omap_sr *sr);
@@ -320,7 +314,5 @@ static inline void omap_sr_enable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable(struct voltagedomain *voltdm) {}
static inline void omap_sr_disable_reset_volt(
struct voltagedomain *voltdm) {}
-static inline void omap_sr_register_pmic(
- struct omap_sr_pmic_data *pmic_data) {}
#endif
#endif
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index c01813c3fbe9..dd92b1a93919 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -53,9 +53,6 @@
#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-/* We use the MSB mostly because its available */
-#define PREEMPT_NEED_RESCHED 0x80000000
-
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
diff --git a/include/linux/printk.h b/include/linux/printk.h
index cf3eccfe1543..55aa96975fa2 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -166,11 +166,6 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
-asmlinkage __printf(5, 6) __cold
-int printk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, ...);
-
asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
diff --git a/include/linux/property.h b/include/linux/property.h
index ac8a1ebc4c1b..3789ec755fb6 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -311,4 +311,16 @@ fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+/* -------------------------------------------------------------------------- */
+/* Software fwnode support - when HW description is incomplete or missing */
+
+bool is_software_node(const struct fwnode_handle *fwnode);
+
+int software_node_notify(struct device *dev, unsigned long action);
+
+struct fwnode_handle *
+fwnode_create_software_node(const struct property_entry *properties,
+ const struct fwnode_handle *parent);
+void fwnode_remove_software_node(struct fwnode_handle *fwnode);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 8e0725aac0aa..7006008d5b72 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
+#include <linux/jump_label.h>
#include <linux/psi_types.h>
#include <linux/sched.h>
@@ -9,7 +10,7 @@ struct css_set;
#ifdef CONFIG_PSI
-extern bool psi_disabled;
+extern struct static_key_false psi_disabled;
void psi_init(void);
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index a15bc4d48752..b146181e8709 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -26,27 +26,38 @@
#include <linux/errno.h>
#include <linux/kmsg_dump.h>
#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/semaphore.h>
#include <linux/time.h>
#include <linux/types.h>
struct module;
-/* pstore record types (see fs/pstore/inode.c for filename templates) */
+/*
+ * pstore record types (see fs/pstore/platform.c for pstore_type_names[])
+ * These values may be written to storage (see EFI vars backend), so
+ * they are kind of an ABI. Be careful changing the mappings.
+ */
enum pstore_type_id {
+ /* Frontend storage types */
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
PSTORE_TYPE_CONSOLE = 2,
PSTORE_TYPE_FTRACE = 3,
- /* PPC64 partition types */
+
+ /* PPC64-specific partition types */
PSTORE_TYPE_PPC_RTAS = 4,
PSTORE_TYPE_PPC_OF = 5,
PSTORE_TYPE_PPC_COMMON = 6,
PSTORE_TYPE_PMSG = 7,
PSTORE_TYPE_PPC_OPAL = 8,
- PSTORE_TYPE_UNKNOWN = 255
+
+ /* End of the list */
+ PSTORE_TYPE_MAX
};
+const char *pstore_type_to_name(enum pstore_type_id type);
+enum pstore_type_id pstore_name_to_type(const char *name);
+
struct pstore_info;
/**
* struct pstore_record - details of a pstore record entry
@@ -85,12 +96,15 @@ struct pstore_record {
/**
* struct pstore_info - backend pstore driver structure
*
- * @owner: module which is repsonsible for this backend driver
+ * @owner: module which is responsible for this backend driver
* @name: name of the backend driver
*
- * @buf_lock: spinlock to serialize access to @buf
+ * @buf_lock: semaphore to serialize access to @buf
* @buf: preallocated crash dump buffer
- * @bufsize: size of @buf available for crash dump writes
+ * @bufsize: size of @buf available for crash dump bytes (must match
+ * smallest number of bytes available for writing to a
+ * backend entry, since compressed bytes don't take kindly
+ * to being truncated)
*
* @read_mutex: serializes @open, @read, @close, and @erase callbacks
* @flags: bitfield of frontends the backend can accept writes for
@@ -170,7 +184,7 @@ struct pstore_info {
struct module *owner;
char *name;
- spinlock_t buf_lock;
+ struct semaphore buf_lock;
char *buf;
size_t bufsize;
@@ -189,14 +203,13 @@ struct pstore_info {
};
/* Supported frontends */
-#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_CONSOLE (1 << 1)
-#define PSTORE_FLAGS_FTRACE (1 << 2)
-#define PSTORE_FLAGS_PMSG (1 << 3)
+#define PSTORE_FLAGS_DMESG BIT(0)
+#define PSTORE_FLAGS_CONSOLE BIT(1)
+#define PSTORE_FLAGS_FTRACE BIT(2)
+#define PSTORE_FLAGS_PMSG BIT(3)
extern int pstore_register(struct pstore_info *);
extern void pstore_unregister(struct pstore_info *);
-extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
struct pstore_ftrace_record {
unsigned long ip;
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 602d64725222..337971c41980 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/pstore.h>
#include <linux/types.h>
/*
@@ -30,6 +31,11 @@
* PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
*/
#define PRZ_FLAG_NO_LOCK BIT(0)
+/*
+ * If a PRZ should only have a single-boot lifetime, this marks it as
+ * getting wiped after its contents get copied out after boot.
+ */
+#define PRZ_FLAG_ZAP_OLD BIT(1)
struct persistent_ram_buffer;
struct rs_control;
@@ -42,17 +48,55 @@ struct persistent_ram_ecc_info {
uint16_t *par;
};
+/**
+ * struct persistent_ram_zone - Details of a persistent RAM zone (PRZ)
+ * used as a pstore backend
+ *
+ * @paddr: physical address of the mapped RAM area
+ * @size: size of mapping
+ * @label: unique name of this PRZ
+ * @type: frontend type for this PRZ
+ * @flags: holds PRZ_FLAGS_* bits
+ *
+ * @buffer_lock:
+ * locks access to @buffer "size" bytes and "start" offset
+ * @buffer:
+ * pointer to actual RAM area managed by this PRZ
+ * @buffer_size:
+ * bytes in @buffer->data (not including any trailing ECC bytes)
+ *
+ * @par_buffer:
+ * pointer into @buffer->data containing ECC bytes for @buffer->data
+ * @par_header:
+ * pointer into @buffer->data containing ECC bytes for @buffer header
+ * (i.e. all fields up to @data)
+ * @rs_decoder:
+ * RSLIB instance for doing ECC calculations
+ * @corrected_bytes:
+ * ECC corrected bytes accounting since boot
+ * @bad_blocks:
+ * ECC uncorrectable bytes accounting since boot
+ * @ecc_info:
+ * ECC configuration details
+ *
+ * @old_log:
+ * saved copy of @buffer->data prior to most recent wipe
+ * @old_log_size:
+ * bytes contained in @old_log
+ *
+ */
struct persistent_ram_zone {
phys_addr_t paddr;
size_t size;
void *vaddr;
char *label;
- struct persistent_ram_buffer *buffer;
- size_t buffer_size;
+ enum pstore_type_id type;
u32 flags;
+
raw_spinlock_t buffer_lock;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
- /* ECC correction */
char *par_buffer;
char *par_header;
struct rs_control *rs_decoder;
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 51349d124ee5..7121bbe76979 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -39,6 +39,15 @@ struct ptp_clock_request {
};
struct system_device_crosststamp;
+
+/**
+ * struct ptp_system_timestamp - system time corresponding to a PHC timestamp
+ */
+struct ptp_system_timestamp {
+ struct timespec64 pre_ts;
+ struct timespec64 post_ts;
+};
+
/**
* struct ptp_clock_info - decribes a PTP hardware clock
*
@@ -73,8 +82,18 @@ struct system_device_crosststamp;
* parameter delta: Desired change in nanoseconds.
*
* @gettime64: Reads the current time from the hardware clock.
+ * This method is deprecated. New drivers should implement
+ * the @gettimex64 method instead.
* parameter ts: Holds the result.
*
+ * @gettimex64: Reads the current time from the hardware clock and optionally
+ * also the system clock.
+ * parameter ts: Holds the PHC timestamp.
+ * parameter sts: If not NULL, it holds a pair of timestamps from
+ * the system clock. The first reading is made right before
+ * reading the lowest bits of the PHC timestamp and the second
+ * reading immediately follows that.
+ *
* @getcrosststamp: Reads the current time from the hardware clock and
* system clock simultaneously.
* parameter cts: Contains timestamp (device,system) pair,
@@ -124,6 +143,8 @@ struct ptp_clock_info {
int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta);
int (*adjtime)(struct ptp_clock_info *ptp, s64 delta);
int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts);
+ int (*gettimex64)(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts);
int (*getcrosststamp)(struct ptp_clock_info *ptp,
struct system_device_crosststamp *cts);
int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts);
@@ -247,4 +268,16 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp,
#endif
+static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->pre_ts);
+}
+
+static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts)
+{
+ if (sts)
+ ktime_get_real_ts64(&sts->post_ts);
+}
+
#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 6c2ffed907f5..edb9b040c94c 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_NOAUDIT 0x04
#define PTRACE_MODE_FSCREDS 0x08
#define PTRACE_MODE_REALCREDS 0x10
-#define PTRACE_MODE_SCHED 0x20
-#define PTRACE_MODE_IBPB 0x40
/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
-#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
/**
* ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
*/
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
-/**
- * ptrace_may_access - check whether the caller is permitted to access
- * a target task.
- * @task: target task
- * @mode: selects type of access and caller credentials
- *
- * Returns true on success, false on denial.
- *
- * Similar to ptrace_may_access(). Only to be called from context switch
- * code. Does not call into audit and the regular LSM hooks due to locking
- * constraints.
- */
-extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
-
static inline int ptrace_reparented(struct task_struct *child)
{
return !same_thread_group(child->real_parent, child->parent);
@@ -428,4 +411,5 @@ extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
+extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 56518adc31dd..d5199b507d79 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -349,42 +349,6 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
}
/**
- * pwm_set_polarity() - configure the polarity of a PWM signal
- * @pwm: PWM device
- * @polarity: new polarity of the PWM signal
- *
- * Note that the polarity cannot be configured while the PWM device is
- * enabled.
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct pwm_state state;
-
- if (!pwm)
- return -EINVAL;
-
- pwm_get_state(pwm, &state);
- if (state.polarity == polarity)
- return 0;
-
- /*
- * Changing the polarity of a running PWM without adjusting the
- * dutycycle/period value is a bit risky (can introduce glitches).
- * Return -EBUSY in this case.
- * Note that this is allowed when using pwm_apply_state() because
- * the user specifies all the parameters.
- */
- if (state.enabled)
- return -EBUSY;
-
- state.polarity = polarity;
- return pwm_apply_state(pwm, &state);
-}
-
-/**
* pwm_enable() - start a PWM output toggling
* @pwm: PWM device
*
@@ -483,12 +447,6 @@ static inline int pwm_capture(struct pwm_device *pwm,
return -EINVAL;
}
-static inline int pwm_set_polarity(struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- return -ENOTSUPP;
-}
-
static inline int pwm_enable(struct pwm_device *pwm)
{
return -EINVAL;
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index a47321a0d572..91c536a01b56 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -47,6 +47,7 @@
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -448,11 +449,24 @@ struct qed_mfw_tlv_iscsi {
bool tx_bytes_set;
};
+enum qed_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum qed_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
#define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
(void __iomem *)(reg_addr))
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
+#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
+ (void __iomem *)(reg_addr))
+
#define QED_COALESCE_MAX 0x1FF
#define QED_DEFAULT_RX_USECS 12
#define QED_DEFAULT_TX_USECS 48
@@ -1015,6 +1029,33 @@ struct qed_common_ops {
*/
int (*set_led)(struct qed_dev *cdev,
enum qed_led_mode mode);
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_is_32b - doorbell is 32b pr 64b
+ * @param db_is_user - doorbell recovery addresses are user or kernel space
+ */
+ int (*db_recovery_add)(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+ int (*db_recovery_del)(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
/**
* @brief update_drv_state - API to inform the change in the driver state.
diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h
index 8a16c3eb3dd0..c0578ba23c1a 100644
--- a/include/linux/rcupdate_wait.h
+++ b/include/linux/rcupdate_wait.h
@@ -31,21 +31,4 @@ do { \
#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__)
-/**
- * synchronize_rcu_mult - Wait concurrently for multiple grace periods
- * @...: List of call_rcu() functions for different grace periods to wait on
- *
- * This macro waits concurrently for multiple types of RCU grace periods.
- * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait
- * on concurrent RCU and RCU-tasks grace periods. Waiting on a give SRCU
- * domain requires you to write a wrapper function for that SRCU domain's
- * call_srcu() function, supplying the corresponding srcu_struct.
- *
- * If Tiny RCU, tell _wait_rcu_gp() does not bother waiting for RCU,
- * given that anywhere synchronize_rcu_mult() can be called is automatically
- * a grace period.
- */
-#define synchronize_rcu_mult(...) \
- _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__)
-
#endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index a367d59c301d..1781b6cb793c 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -1089,27 +1089,48 @@ int regmap_fields_read(struct regmap_field *field, unsigned int id,
int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
unsigned int mask, unsigned int val,
bool *change, bool async, bool force);
+/**
+ * struct regmap_irq_type - IRQ type definitions.
+ *
+ * @type_reg_offset: Offset register for the irq type setting.
+ * @type_rising_val: Register value to configure RISING type irq.
+ * @type_falling_val: Register value to configure FALLING type irq.
+ * @type_level_low_val: Register value to configure LEVEL_LOW type irq.
+ * @type_level_high_val: Register value to configure LEVEL_HIGH type irq.
+ * @types_supported: logical OR of IRQ_TYPE_* flags indicating supported types.
+ */
+struct regmap_irq_type {
+ unsigned int type_reg_offset;
+ unsigned int type_reg_mask;
+ unsigned int type_rising_val;
+ unsigned int type_falling_val;
+ unsigned int type_level_low_val;
+ unsigned int type_level_high_val;
+ unsigned int types_supported;
+};
/**
* struct regmap_irq - Description of an IRQ for the generic regmap irq_chip.
*
* @reg_offset: Offset of the status/mask register within the bank
* @mask: Mask used to flag/control the register.
- * @type_reg_offset: Offset register for the irq type setting.
- * @type_rising_mask: Mask bit to configure RISING type irq.
- * @type_falling_mask: Mask bit to configure FALLING type irq.
+ * @type: IRQ trigger type setting details if supported.
*/
struct regmap_irq {
unsigned int reg_offset;
unsigned int mask;
- unsigned int type_reg_offset;
- unsigned int type_rising_mask;
- unsigned int type_falling_mask;
+ struct regmap_irq_type type;
};
#define REGMAP_IRQ_REG(_irq, _off, _mask) \
[_irq] = { .reg_offset = (_off), .mask = (_mask) }
+#define REGMAP_IRQ_REG_LINE(_id, _reg_bits) \
+ [_id] = { \
+ .mask = BIT((_id) % (_reg_bits)), \
+ .reg_offset = (_id) / (_reg_bits), \
+ }
+
/**
* struct regmap_irq_chip - Description of a generic regmap irq_chip.
*
@@ -1131,6 +1152,12 @@ struct regmap_irq {
* @ack_invert: Inverted ack register: cleared bits for ack.
* @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @type_invert: Invert the type flags.
+ * @type_in_mask: Use the mask registers for controlling irq type. For
+ * interrupts defining type_rising/falling_mask use mask_base
+ * for edge configuration and never update bits in type_base.
+ * @clear_on_unmask: For chips with interrupts cleared on read: read the status
+ * registers before unmasking interrupts to clear any bits
+ * set when they were masked.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@@ -1169,6 +1196,8 @@ struct regmap_irq_chip {
bool wake_invert:1;
bool runtime_pm:1;
bool type_invert:1;
+ bool type_in_mask:1;
+ bool clear_on_unmask:1;
int num_regs;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 25602afd4844..f3f76051e8b0 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -508,7 +508,7 @@ static inline int regulator_get_error_flags(struct regulator *regulator,
static inline int regulator_set_load(struct regulator *regulator, int load_uA)
{
- return REGULATOR_MODE_NORMAL;
+ return 0;
}
static inline int regulator_allow_bypass(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index a9c030192147..389bcaf7900f 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -15,11 +15,12 @@
#ifndef __LINUX_REGULATOR_DRIVER_H_
#define __LINUX_REGULATOR_DRIVER_H_
-#define MAX_COUPLED 4
+#define MAX_COUPLED 2
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/regulator/consumer.h>
+#include <linux/ww_mutex.h>
struct gpio_desc;
struct regmap;
@@ -462,7 +463,7 @@ struct regulator_dev {
struct coupling_desc coupling_desc;
struct blocking_notifier_head notifier;
- struct mutex mutex; /* consumer lock */
+ struct ww_mutex mutex; /* consumer lock */
struct task_struct *mutex_owner;
int ref_cnt;
struct module *owner;
@@ -473,7 +474,6 @@ struct regulator_dev {
struct regmap *regmap;
struct delayed_work disable_work;
- int deferred_disables;
void *reg_data; /* regulator_dev data */
@@ -545,4 +545,7 @@ int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
+void regulator_lock(struct regulator_dev *rdev);
+void regulator_unlock(struct regulator_dev *rdev);
+
#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index a459a5e973a7..1d34a70ffda2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -158,6 +158,9 @@ struct regulation_constraints {
/* used for coupled regulators */
int max_spread;
+ /* used for changing voltage in steps */
+ int max_uV_step;
+
/* valid regulator operating modes for this machine */
unsigned int valid_modes_mask;
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index cb5aecd40f07..331d7d940c7a 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -33,7 +33,8 @@
#define PFUZE100_VGEN4 12
#define PFUZE100_VGEN5 13
#define PFUZE100_VGEN6 14
-#define PFUZE100_MAX_REGULATOR 15
+#define PFUZE100_COIN 15
+#define PFUZE100_MAX_REGULATOR 16
#define PFUZE200_SW1AB 0
#define PFUZE200_SW2 1
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 02166e815afb..2f0ffca35780 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -68,7 +68,6 @@ struct reservation_object_list {
* @seq: sequence count for managing RCU read-side synchronization
* @fence_excl: the exclusive fence, if there is one currently
* @fence: list of current shared fences
- * @staged: staged copy of shared fences for RCU updates
*/
struct reservation_object {
struct ww_mutex lock;
@@ -76,7 +75,6 @@ struct reservation_object {
struct dma_fence __rcu *fence_excl;
struct reservation_object_list __rcu *fence;
- struct reservation_object_list *staged;
};
#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
@@ -95,7 +93,6 @@ reservation_object_init(struct reservation_object *obj)
__seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
RCU_INIT_POINTER(obj->fence, NULL);
RCU_INIT_POINTER(obj->fence_excl, NULL);
- obj->staged = NULL;
}
/**
@@ -124,7 +121,6 @@ reservation_object_fini(struct reservation_object *obj)
kfree(fobj);
}
- kfree(obj->staged);
ww_mutex_destroy(&obj->lock);
}
@@ -218,6 +214,11 @@ reservation_object_trylock(struct reservation_object *obj)
static inline void
reservation_object_unlock(struct reservation_object *obj)
{
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* Test shared fence slot reservation */
+ if (obj->fence)
+ obj->fence->shared_max = obj->fence->shared_count;
+#endif
ww_mutex_unlock(&obj->lock);
}
@@ -265,7 +266,8 @@ reservation_object_get_excl_rcu(struct reservation_object *obj)
return fence;
}
-int reservation_object_reserve_shared(struct reservation_object *obj);
+int reservation_object_reserve_shared(struct reservation_object *obj,
+ unsigned int num_fences);
void reservation_object_add_shared_fence(struct reservation_object *obj,
struct dma_fence *fence);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index eb7111039247..20f9c6af7473 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -75,8 +75,19 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
+/*
+ * NULLS_MARKER() expects a hash value with the low
+ * bits mostly likely to be significant, and it discards
+ * the msb.
+ * We git it an address, in which the bottom 2 bits are
+ * always 0, and the msb might be significant.
+ * So we shift the address down one bit to align with
+ * expectations and avoid losing a significant bit.
+ */
+#define RHT_NULLS_MARKER(ptr) \
+ ((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
#define INIT_RHT_NULLS_HEAD(ptr) \
- ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
+ ((ptr) = RHT_NULLS_MARKER(&(ptr)))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
@@ -471,6 +482,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
+ struct rhash_head __rcu * const *head;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -478,13 +490,19 @@ static inline struct rhash_head *__rhashtable_lookup(
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
- rht_for_each_rcu(he, tbl, hash) {
- if (params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, he)) :
- rhashtable_compare(&arg, rht_obj(ht, he)))
- continue;
- return he;
- }
+ head = rht_bucket(tbl, hash);
+ do {
+ rht_for_each_rcu_continue(he, *head, tbl, hash) {
+ if (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, he)) :
+ rhashtable_compare(&arg, rht_obj(ht, he)))
+ continue;
+ return he;
+ }
+ /* An object might have been moved to a different hash chain,
+ * while we walk along it - better check and retry.
+ */
+ } while (he != RHT_NULLS_MARKER(head));
/* Ensure we see any new tables. */
smp_rmb();
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 804a50983ec5..14d558146aea 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -30,14 +30,24 @@ struct seq_file;
*/
struct sbitmap_word {
/**
- * @word: The bitmap word itself.
+ * @depth: Number of bits being used in @word/@cleared
*/
- unsigned long word;
+ unsigned long depth;
/**
- * @depth: Number of bits being used in @word.
+ * @word: word holding free bits
*/
- unsigned long depth;
+ unsigned long word ____cacheline_aligned_in_smp;
+
+ /**
+ * @cleared: word holding cleared bits
+ */
+ unsigned long cleared ____cacheline_aligned_in_smp;
+
+ /**
+ * @swap_lock: Held while swapping word <-> cleared
+ */
+ spinlock_t swap_lock;
} ____cacheline_aligned_in_smp;
/**
@@ -125,6 +135,11 @@ struct sbitmap_queue {
*/
struct sbq_wait_state *ws;
+ /*
+ * @ws_active: count of currently active ws waitqueues
+ */
+ atomic_t ws_active;
+
/**
* @round_robin: Allocate bits in strict round-robin order.
*/
@@ -250,12 +265,14 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
nr = SB_NR_TO_BIT(sb, start);
while (scanned < sb->depth) {
- struct sbitmap_word *word = &sb->map[index];
- unsigned int depth = min_t(unsigned int, word->depth - nr,
+ unsigned long word;
+ unsigned int depth = min_t(unsigned int,
+ sb->map[index].depth - nr,
sb->depth - scanned);
scanned += depth;
- if (!word->word)
+ word = sb->map[index].word & ~sb->map[index].cleared;
+ if (!word)
goto next;
/*
@@ -265,7 +282,7 @@ static inline void __sbitmap_for_each_set(struct sbitmap *sb,
*/
depth += nr;
while (1) {
- nr = find_next_bit(&word->word, depth, nr);
+ nr = find_next_bit(&word, depth, nr);
if (nr >= depth)
break;
if (!fn(sb, (index << sb->shift) + nr, data))
@@ -310,6 +327,19 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
+/*
+ * This one is special, since it doesn't actually clear the bit, rather it
+ * sets the corresponding bit in the ->cleared mask instead. Paired with
+ * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * will clear the previously freed entries in the corresponding ->word.
+ */
+static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
+{
+ unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
+
+ set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
+}
+
static inline void sbitmap_clear_bit_unlock(struct sbitmap *sb,
unsigned int bitnr)
{
@@ -321,8 +351,6 @@ static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
}
-unsigned int sbitmap_weight(const struct sbitmap *sb);
-
/**
* sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
* @sb: Bitmap to show.
@@ -531,4 +559,45 @@ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq);
*/
void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
+struct sbq_wait {
+ struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */
+ struct wait_queue_entry wait;
+};
+
+#define DEFINE_SBQ_WAIT(name) \
+ struct sbq_wait name = { \
+ .sbq = NULL, \
+ .wait = { \
+ .private = current, \
+ .func = autoremove_wake_function, \
+ .entry = LIST_HEAD_INIT((name).wait.entry), \
+ } \
+ }
+
+/*
+ * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
+ * internal state.
+ */
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state);
+
+/*
+ * Must be paired with sbitmap_prepare_to_wait().
+ */
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Wrapper around add_wait_queue(), which maintains some extra internal state
+ */
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait);
+
+/*
+ * Must be paired with sbitmap_add_wait_queue()
+ */
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
+
#endif /* __LINUX_SCALE_BITMAP_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a51c13c2b1a0..89541d248893 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,7 +176,7 @@ struct task_group;
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
* However, with slightly different timing the wakeup TASK_RUNNING store can
- * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
*
@@ -515,7 +515,7 @@ struct sched_dl_entity {
/*
* Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
+ * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
@@ -572,8 +572,10 @@ union rcu_special {
struct {
u8 blocked;
u8 need_qs;
+ u8 exp_hint; /* Hint for performance. */
+ u8 pad; /* No garbage from compiler! */
} b; /* Bits. */
- u16 s; /* Set of bits. */
+ u32 s; /* Set of bits. */
};
enum perf_event_task_context {
@@ -993,7 +995,7 @@ struct task_struct {
/* cg_list protected by css_set_lock and tsk->alloc_lock: */
struct list_head cg_list;
#endif
-#ifdef CONFIG_INTEL_RDT
+#ifdef CONFIG_RESCTRL
u32 closid;
u32 rmid;
#endif
@@ -1116,6 +1118,7 @@ struct task_struct {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
+ int curr_ret_depth;
/* Stack of return addresses for return function tracing: */
struct ftrace_ret_stack *ret_stack;
@@ -1453,6 +1456,8 @@ static inline bool is_percpu_thread(void)
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */
#define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
@@ -1484,6 +1489,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
static inline void
current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index 59667444669f..afa940cd50dc 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
+
+static inline unsigned long map_util_freq(unsigned long util,
+ unsigned long freq, unsigned long cap)
+{
+ return (freq + (freq >> 2)) * util / cap;
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* _LINUX_SCHED_CPUFREQ_H */
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 4a6582c27dea..b0fb1446fe04 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -16,7 +16,7 @@ enum hk_flags {
};
#ifdef CONFIG_CPU_ISOLATION
-DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
+DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
extern int housekeeping_any_cpu(enum hk_flags flags);
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { }
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
{
#ifdef CONFIG_CPU_ISOLATION
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
return housekeeping_test_cpu(cpu, flags);
#endif
return true;
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index aebb370a0006..3bfa6a0cbba4 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precendence
+ * so always make sure it makes precedence
*/
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644
index 000000000000..59d3736c454c
--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern struct static_key_false sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return static_branch_likely(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
diff --git a/include/linux/sched/stat.h b/include/linux/sched/stat.h
index f30954cc059d..568286411b43 100644
--- a/include/linux/sched/stat.h
+++ b/include/linux/sched/stat.h
@@ -8,7 +8,7 @@
* Various counters maintained by the scheduler and fork(),
* exposed via /proc, sys.c or used by drivers via these APIs.
*
- * ( Note that all these values are aquired without locking,
+ * ( Note that all these values are acquired without locking,
* so they can only be relied on in narrow circumstances. )
*/
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 6b9976180c1e..c31d3a47a47c 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -89,7 +89,6 @@ struct sched_domain {
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
- unsigned int smt_gain;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
@@ -202,6 +201,14 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
# define SD_INIT_NAME(type)
#endif
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
#else /* CONFIG_SMP */
struct sched_domain_attr;
@@ -217,6 +224,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
return true;
}
+#ifndef arch_scale_cpu_capacity
+static __always_inline
+unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
#endif /* !CONFIG_SMP */
static inline int task_node(const struct task_struct *p)
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index d37518e89db2..d9d9de3fcf8e 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
*
* See the SFF-8472 specification and related documents for the definition
* of these structure members. This can be obtained from
- * ftp://ftp.seagate.com/sff
+ * https://www.snia.org/technology-communities/sff/specifications
*/
struct sfp_eeprom_id {
struct sfp_eeprom_base base;
diff --git a/include/linux/signal.h b/include/linux/signal.h
index f428e86f4800..cc7e2c1cd444 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -273,6 +273,10 @@ extern int group_send_sig_info(int sig, struct kernel_siginfo *info,
struct task_struct *p, enum pid_type type);
extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
+extern int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
+ sigset_t *oldset, size_t sigsetsize);
+extern void restore_user_sigmask(const void __user *usigmask,
+ sigset_t *sigsaved);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0d1b2c3f127b..93f56fddd92a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -245,6 +245,7 @@ struct iov_iter;
struct napi_struct;
struct bpf_prog;
union bpf_attr;
+struct skb_ext;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
struct nf_conntrack {
@@ -254,7 +255,6 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
- refcount_t use;
enum {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
@@ -481,10 +481,11 @@ static inline void sock_zerocopy_get(struct ubuf_info *uarg)
}
void sock_zerocopy_put(struct ubuf_info *uarg);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg);
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref);
void sock_zerocopy_callback(struct ubuf_info *uarg, bool success);
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len);
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg);
@@ -615,6 +616,8 @@ typedef unsigned char *sk_buff_data_t;
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @offload_fwd_mark: Packet was L2-forwarded in hardware
+ * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @tc_redirected: packet was redirected by a tc action
@@ -633,6 +636,7 @@ typedef unsigned char *sk_buff_data_t;
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
+ * @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -662,6 +666,7 @@ typedef unsigned char *sk_buff_data_t;
* @data: Data head pointer
* @truesize: Buffer size
* @users: User count - see {datagram,tcp}.c
+ * @extensions: allocated extensions, valid if active_extensions is nonzero
*/
struct sk_buff {
@@ -709,15 +714,9 @@ struct sk_buff {
struct list_head tcp_tsorted_anchor;
};
-#ifdef CONFIG_XFRM
- struct sec_path *sp;
-#endif
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
unsigned long _nfct;
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- struct nf_bridge_info *nf_bridge;
-#endif
unsigned int len,
data_len;
__u16 mac_len,
@@ -744,7 +743,9 @@ struct sk_buff {
head_frag:1,
xmit_more:1,
pfmemalloc:1;
-
+#ifdef CONFIG_SKB_EXTENSIONS
+ __u8 active_extensions;
+#endif
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
*/
@@ -777,6 +778,14 @@ struct sk_buff {
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_VLAN_PRESENT_BIT 7
+#else
+#define PKT_VLAN_PRESENT_BIT 0
+#endif
+#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
+ __u8 __pkt_vlan_present_offset[0];
+ __u8 vlan_present:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
@@ -784,13 +793,13 @@ struct sk_buff {
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
- __u8 ipvs_property:1;
+ __u8 ipvs_property:1;
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
__u8 offload_fwd_mark:1;
- __u8 offload_mr_fwd_mark:1;
+ __u8 offload_l3_fwd_mark:1;
#endif
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
@@ -858,6 +867,11 @@ struct sk_buff {
*data;
unsigned int truesize;
refcount_t users;
+
+#ifdef CONFIG_SKB_EXTENSIONS
+ /* only useable after checking ->active_extensions != 0 */
+ struct skb_ext *extensions;
+#endif
};
#ifdef __KERNEL__
@@ -1317,10 +1331,14 @@ static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb)
return is_zcopy ? skb_uarg(skb) : NULL;
}
-static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
+static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg,
+ bool *have_ref)
{
if (skb && uarg && !skb_zcopy(skb)) {
- sock_zerocopy_get(uarg);
+ if (unlikely(have_ref && *have_ref))
+ *have_ref = false;
+ else
+ sock_zerocopy_get(uarg);
skb_shinfo(skb)->destructor_arg = uarg;
skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
}
@@ -1365,7 +1383,7 @@ static inline void skb_zcopy_abort(struct sk_buff *skb)
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- sock_zerocopy_put_abort(uarg);
+ sock_zerocopy_put_abort(uarg, false);
skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG;
}
}
@@ -1741,8 +1759,6 @@ static inline void skb_queue_head_init_class(struct sk_buff_head *list,
* The "__skb_xxxx()" functions are the non-atomic ones that
* can only be called with interrupts disabled.
*/
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk,
- struct sk_buff_head *list);
static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
@@ -2524,10 +2540,8 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len);
static inline void __skb_set_length(struct sk_buff *skb, unsigned int len)
{
- if (unlikely(skb_is_nonlinear(skb))) {
- WARN_ON(1);
+ if (WARN_ON(skb_is_nonlinear(skb)))
return;
- }
skb->len = len;
skb_set_tail_pointer(skb, len);
}
@@ -3325,6 +3339,9 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
}
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
struct msghdr *msg);
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len,
+ struct ahash_request *hash);
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
struct iov_iter *from, int len);
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
@@ -3345,7 +3362,6 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
unsigned int flags);
int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
int len);
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
@@ -3886,18 +3902,97 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
atomic_inc(&nfct->use);
}
#endif
+
+#ifdef CONFIG_SKB_EXTENSIONS
+enum skb_ext_id {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+ SKB_EXT_BRIDGE_NF,
+#endif
+#ifdef CONFIG_XFRM
+ SKB_EXT_SEC_PATH,
+#endif
+ SKB_EXT_NUM, /* must be last */
+};
+
+/**
+ * struct skb_ext - sk_buff extensions
+ * @refcnt: 1 on allocation, deallocated on 0
+ * @offset: offset to add to @data to obtain extension address
+ * @chunks: size currently allocated, stored in SKB_EXT_ALIGN_SHIFT units
+ * @data: start of extension data, variable sized
+ *
+ * Note: offsets/lengths are stored in chunks of 8 bytes, this allows
+ * to use 'u8' types while allowing up to 2kb worth of extension data.
+ */
+struct skb_ext {
+ refcount_t refcnt;
+ u8 offset[SKB_EXT_NUM]; /* in chunks of 8 bytes */
+ u8 chunks; /* same */
+ char data[0] __aligned(8);
+};
+
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
+void __skb_ext_put(struct skb_ext *ext);
+
+static inline void skb_ext_put(struct sk_buff *skb)
{
- if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
- kfree(nf_bridge);
+ if (skb->active_extensions)
+ __skb_ext_put(skb->extensions);
}
-static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+
+static inline void __skb_ext_copy(struct sk_buff *dst,
+ const struct sk_buff *src)
{
- if (nf_bridge)
- refcount_inc(&nf_bridge->use);
+ dst->active_extensions = src->active_extensions;
+
+ if (src->active_extensions) {
+ struct skb_ext *ext = src->extensions;
+
+ refcount_inc(&ext->refcnt);
+ dst->extensions = ext;
+ }
}
-#endif /* CONFIG_BRIDGE_NETFILTER */
+
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *src)
+{
+ skb_ext_put(dst);
+ __skb_ext_copy(dst, src);
+}
+
+static inline bool __skb_ext_exist(const struct skb_ext *ext, enum skb_ext_id i)
+{
+ return !!ext->offset[i];
+}
+
+static inline bool skb_ext_exist(const struct sk_buff *skb, enum skb_ext_id id)
+{
+ return skb->active_extensions & (1 << id);
+}
+
+static inline void skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
+{
+ if (skb_ext_exist(skb, id))
+ __skb_ext_del(skb, id);
+}
+
+static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
+{
+ if (skb_ext_exist(skb, id)) {
+ struct skb_ext *ext = skb->extensions;
+
+ return (void *)ext + (ext->offset[id] << 3);
+ }
+
+ return NULL;
+}
+#else
+static inline void skb_ext_put(struct sk_buff *skb) {}
+static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
+static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
+static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
+#endif /* CONFIG_SKB_EXTENSIONS */
+
static inline void nf_reset(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
@@ -3905,8 +4000,7 @@ static inline void nf_reset(struct sk_buff *skb)
skb->_nfct = 0;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(skb->nf_bridge);
- skb->nf_bridge = NULL;
+ skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
#endif
}
@@ -3924,7 +4018,7 @@ static inline void ipvs_reset(struct sk_buff *skb)
#endif
}
-/* Note: This doesn't put any conntrack and bridge info in dst. */
+/* Note: This doesn't put any conntrack info in dst. */
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
bool copy)
{
@@ -3932,10 +4026,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
dst->_nfct = src->_nfct;
nf_conntrack_get(skb_nfct(src));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- dst->nf_bridge = src->nf_bridge;
- nf_bridge_get(src->nf_bridge);
-#endif
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
if (copy)
dst->nf_trace = src->nf_trace;
@@ -3947,9 +4037,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(dst));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(dst->nf_bridge);
-#endif
__nf_copy(dst, src, true);
}
@@ -3971,12 +4058,19 @@ static inline void skb_init_secmark(struct sk_buff *skb)
{ }
#endif
+static inline int secpath_exists(const struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+ return skb_ext_exist(skb, SKB_EXT_SEC_PATH);
+#else
+ return 0;
+#endif
+}
+
static inline bool skb_irq_freeable(const struct sk_buff *skb)
{
return !skb->destructor &&
-#if IS_ENABLED(CONFIG_XFRM)
- !skb->sp &&
-#endif
+ !secpath_exists(skb) &&
!skb_nfct(skb) &&
!skb->_skb_refdst &&
!skb_has_frag_list(skb);
@@ -4022,10 +4116,10 @@ static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
return skb->dst_pending_confirm != 0;
}
-static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
+static inline struct sec_path *skb_sec_path(const struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- return skb->sp;
+ return skb_ext_find(skb, SKB_EXT_SEC_PATH);
#else
return NULL;
#endif
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 2a11e9d91dfa..178a3933a71b 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -36,6 +36,7 @@ struct sk_msg_sg {
struct scatterlist data[MAX_MSG_FRAGS + 1];
};
+/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
struct sk_msg_sg sg;
void *data;
@@ -416,6 +417,14 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
sk_psock_drop(sk, psock);
}
+static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
+{
+ if (psock->parser.enabled)
+ psock->parser.saved_data_ready(sk);
+ else
+ sk->sk_data_ready(sk);
+}
+
static inline void psock_set_prog(struct bpf_prog **pprog,
struct bpf_prog *prog)
{
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8b571e9b9f76..ab2041a00e01 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -286,6 +286,7 @@ struct ucred {
#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
#define MSG_MORE 0x8000 /* Sender will send more */
#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
+#define MSG_SENDPAGE_NOPOLICY 0x10000 /* sendpage() internal : do no apply policy */
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
#define MSG_EOF MSG_FIN
@@ -348,7 +349,8 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct timespec64;
+struct __kernel_timespec;
+struct old_timespec32;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
* forbid_cmsg_compat==false
@@ -357,8 +359,10 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
-extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec64 *timeout);
+extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ unsigned int vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout,
+ struct old_timespec32 __user *timeout32);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 9ec4c147abbc..b0674e330ef6 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -25,6 +25,7 @@ struct dma_chan;
struct pxa2xx_spi_master {
u16 num_chipselect;
u8 enable_dma;
+ bool is_slave;
/* DMA engine specific config */
bool (*dma_filter)(struct dma_chan *chan, void *param);
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 69ee30456864..3fe24500c5ee 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -57,10 +57,12 @@
/**
* enum spi_mem_data_dir - describes the direction of a SPI memory data
* transfer from the controller perspective
+ * @SPI_MEM_NO_DATA: no data transferred
* @SPI_MEM_DATA_IN: data coming from the SPI memory
- * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent to the SPI memory
*/
enum spi_mem_data_dir {
+ SPI_MEM_NO_DATA,
SPI_MEM_DATA_IN,
SPI_MEM_DATA_OUT,
};
@@ -123,6 +125,49 @@ struct spi_mem_op {
}
/**
+ * struct spi_mem_dirmap_info - Direct mapping information
+ * @op_tmpl: operation template that should be used by the direct mapping when
+ * the memory device is accessed
+ * @offset: absolute offset this direct mapping is pointing to
+ * @length: length in byte of this direct mapping
+ *
+ * These information are used by the controller specific implementation to know
+ * the portion of memory that is directly mapped and the spi_mem_op that should
+ * be used to access the device.
+ * A direct mapping is only valid for one direction (read or write) and this
+ * direction is directly encoded in the ->op_tmpl.data.dir field.
+ */
+struct spi_mem_dirmap_info {
+ struct spi_mem_op op_tmpl;
+ u64 offset;
+ u64 length;
+};
+
+/**
+ * struct spi_mem_dirmap_desc - Direct mapping descriptor
+ * @mem: the SPI memory device this direct mapping is attached to
+ * @info: information passed at direct mapping creation time
+ * @nodirmap: set to 1 if the SPI controller does not implement
+ * ->mem_ops->dirmap_create() or when this function returned an
+ * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}()
+ * calls will use spi_mem_exec_op() to access the memory. This is a
+ * degraded mode that allows spi_mem drivers to use the same code
+ * no matter whether the controller supports direct mapping or not
+ * @priv: field pointing to controller specific data
+ *
+ * Common part of a direct mapping descriptor. This object is created by
+ * spi_mem_dirmap_create() and controller implementation of ->create_dirmap()
+ * can create/attach direct mapping resources to the descriptor in the ->priv
+ * field.
+ */
+struct spi_mem_dirmap_desc {
+ struct spi_mem *mem;
+ struct spi_mem_dirmap_info info;
+ unsigned int nodirmap;
+ void *priv;
+};
+
+/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
* @drvpriv: spi_mem_driver private data
@@ -177,10 +222,32 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* Note that if the implementation of this function allocates memory
* dynamically, then it should do so with devm_xxx(), as we don't
* have a ->free_name() function.
+ * @dirmap_create: create a direct mapping descriptor that can later be used to
+ * access the memory device. This method is optional
+ * @dirmap_destroy: destroy a memory descriptor previous created by
+ * ->dirmap_create()
+ * @dirmap_read: read data from the memory device using the direct mapping
+ * created by ->dirmap_create(). The function can return less
+ * data than requested (for example when the request is crossing
+ * the currently mapped area), and the caller of
+ * spi_mem_dirmap_read() is responsible for calling it again in
+ * this case.
+ * @dirmap_write: write data to the memory device using the direct mapping
+ * created by ->dirmap_create(). The function can return less
+ * data than requested (for example when the request is crossing
+ * the currently mapped area), and the caller of
+ * spi_mem_dirmap_write() is responsible for calling it again in
+ * this case.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
* case for QSPI controllers.
+ *
+ * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct
+ * mapping from the CPU because doing that can stall the CPU waiting for the
+ * SPI mem transaction to finish, and this will make real-time maintainers
+ * unhappy and might make your system less reactive. Instead, drivers should
+ * use DMA to access this direct mapping.
*/
struct spi_controller_mem_ops {
int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
@@ -189,6 +256,12 @@ struct spi_controller_mem_ops {
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
const char *(*get_name)(struct spi_mem *mem);
+ int (*dirmap_create)(struct spi_mem_dirmap_desc *desc);
+ void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc);
+ ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf);
+ ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf);
};
/**
@@ -249,6 +322,15 @@ int spi_mem_exec_op(struct spi_mem *mem,
const char *spi_mem_get_name(struct spi_mem *mem);
+struct spi_mem_dirmap_desc *
+spi_mem_dirmap_create(struct spi_mem *mem,
+ const struct spi_mem_dirmap_info *info);
+void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc);
+ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf);
+ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, const void *buf);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 6be77fa5ab90..314d922ca607 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -154,7 +154,10 @@ struct spi_device {
#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
-#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
+#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
+#define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
+#define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
+#define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
int irq;
void *controller_state;
void *controller_data;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 67135d4a8a30..c614375cd264 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -38,20 +38,20 @@ struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key);
-#define init_srcu_struct(sp) \
+#define init_srcu_struct(ssp) \
({ \
static struct lock_class_key __srcu_key; \
\
- __init_srcu_struct((sp), #sp, &__srcu_key); \
+ __init_srcu_struct((ssp), #ssp, &__srcu_key); \
})
#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name },
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-int init_srcu_struct(struct srcu_struct *sp);
+int init_srcu_struct(struct srcu_struct *ssp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
@@ -67,28 +67,28 @@ int init_srcu_struct(struct srcu_struct *sp);
struct srcu_struct { };
#endif
-void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced);
-int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
-void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
-void synchronize_srcu(struct srcu_struct *sp);
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
+int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
+void synchronize_srcu(struct srcu_struct *ssp);
/**
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
-static inline void cleanup_srcu_struct(struct srcu_struct *sp)
+static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
{
- _cleanup_srcu_struct(sp, false);
+ _cleanup_srcu_struct(ssp, false);
}
/**
* cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory. Also,
@@ -103,16 +103,16 @@ static inline void cleanup_srcu_struct(struct srcu_struct *sp)
* (with high probability, anyway), and will also cause the srcu_struct
* to be leaked.
*/
-static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
+static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
{
- _cleanup_srcu_struct(sp, true);
+ _cleanup_srcu_struct(ssp, true);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
* srcu_read_lock_held - might we be in SRCU read-side critical section?
- * @sp: The srcu_struct structure to check
+ * @ssp: The srcu_struct structure to check
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
@@ -126,16 +126,16 @@ static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *sp)
* relies on normal RCU, it can be called from the CPU which
* is in the idle loop from an RCU point of view or offline.
*/
-static inline int srcu_read_lock_held(const struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
if (!debug_lockdep_rcu_enabled())
return 1;
- return lock_is_held(&sp->dep_map);
+ return lock_is_held(&ssp->dep_map);
}
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-static inline int srcu_read_lock_held(const struct srcu_struct *sp)
+static inline int srcu_read_lock_held(const struct srcu_struct *ssp)
{
return 1;
}
@@ -145,7 +145,7 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
/**
* srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
- * @sp: pointer to the srcu_struct, which is used to check that we
+ * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
* @c: condition to check for update-side use
*
@@ -154,29 +154,32 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* to 1. The @c argument will normally be a logical expression containing
* lockdep_is_held() calls.
*/
-#define srcu_dereference_check(p, sp, c) \
- __rcu_dereference_check((p), (c) || srcu_read_lock_held(sp), __rcu)
+#define srcu_dereference_check(p, ssp, c) \
+ __rcu_dereference_check((p), (c) || srcu_read_lock_held(ssp), __rcu)
/**
* srcu_dereference - fetch SRCU-protected pointer for later dereferencing
* @p: the pointer to fetch and protect for later dereferencing
- * @sp: pointer to the srcu_struct, which is used to check that we
+ * @ssp: pointer to the srcu_struct, which is used to check that we
* really are in an SRCU read-side critical section.
*
* Makes rcu_dereference_check() do the dirty work. If PROVE_RCU
* is enabled, invoking this outside of an RCU read-side critical
* section will result in an RCU-lockdep splat.
*/
-#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
+#define srcu_dereference(p, ssp) srcu_dereference_check((p), (ssp), 0)
/**
* srcu_dereference_notrace - no tracing and no lockdep calls from here
+ * @p: the pointer to fetch and protect for later dereferencing
+ * @ssp: pointer to the srcu_struct, which is used to check that we
+ * really are in an SRCU read-side critical section.
*/
-#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1)
+#define srcu_dereference_notrace(p, ssp) srcu_dereference_check((p), (ssp), 1)
/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
- * @sp: srcu_struct in which to register the new reader.
+ * @ssp: srcu_struct in which to register the new reader.
*
* Enter an SRCU read-side critical section. Note that SRCU read-side
* critical sections may be nested. However, it is illegal to
@@ -191,44 +194,44 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
* srcu_read_unlock() in an irq handler if the matching srcu_read_lock()
* was invoked in process context.
*/
-static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
+static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- retval = __srcu_read_lock(sp);
- rcu_lock_acquire(&(sp)->dep_map);
+ retval = __srcu_read_lock(ssp);
+ rcu_lock_acquire(&(ssp)->dep_map);
return retval;
}
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
static inline notrace int
-srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp)
{
int retval;
- retval = __srcu_read_lock(sp);
+ retval = __srcu_read_lock(ssp);
return retval;
}
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
- * @sp: srcu_struct in which to unregister the old reader.
+ * @ssp: srcu_struct in which to unregister the old reader.
* @idx: return value from corresponding srcu_read_lock().
*
* Exit an SRCU read-side critical section.
*/
-static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
- __releases(sp)
+static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx)
+ __releases(ssp)
{
- rcu_lock_release(&(sp)->dep_map);
- __srcu_read_unlock(sp, idx);
+ rcu_lock_release(&(ssp)->dep_map);
+ __srcu_read_unlock(ssp, idx);
}
/* Used by tracing, cannot be traced and cannot call lockdep. */
static inline notrace void
-srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp)
{
- __srcu_read_unlock(sp, idx);
+ __srcu_read_unlock(ssp, idx);
}
/**
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
index f41d2fb09f87..b19216aaaef2 100644
--- a/include/linux/srcutiny.h
+++ b/include/linux/srcutiny.h
@@ -60,7 +60,7 @@ void srcu_drive_gp(struct work_struct *wp);
#define DEFINE_STATIC_SRCU(name) \
static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name)
-void synchronize_srcu(struct srcu_struct *sp);
+void synchronize_srcu(struct srcu_struct *ssp);
/*
* Counts the new reader in the appropriate per-CPU element of the
@@ -68,36 +68,36 @@ void synchronize_srcu(struct srcu_struct *sp);
* __srcu_read_unlock() must be in the same handler instance. Returns an
* index that must be passed to the matching srcu_read_unlock().
*/
-static inline int __srcu_read_lock(struct srcu_struct *sp)
+static inline int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx);
- WRITE_ONCE(sp->srcu_lock_nesting[idx], sp->srcu_lock_nesting[idx] + 1);
+ idx = READ_ONCE(ssp->srcu_idx);
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1);
return idx;
}
-static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
+static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
- synchronize_srcu(sp);
+ synchronize_srcu(ssp);
}
-static inline void srcu_barrier(struct srcu_struct *sp)
+static inline void srcu_barrier(struct srcu_struct *ssp)
{
- synchronize_srcu(sp);
+ synchronize_srcu(ssp);
}
/* Defined here to avoid size increase for non-torture kernels. */
-static inline void srcu_torture_stats_print(struct srcu_struct *sp,
+static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
char *tt, char *tf)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n",
tt, tf, idx,
- READ_ONCE(sp->srcu_lock_nesting[!idx]),
- READ_ONCE(sp->srcu_lock_nesting[idx]));
+ READ_ONCE(ssp->srcu_lock_nesting[!idx]),
+ READ_ONCE(ssp->srcu_lock_nesting[idx]));
}
#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
index 0ae91b3a7406..6f292bd3e7db 100644
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -51,7 +51,7 @@ struct srcu_data {
unsigned long grpmask; /* Mask for leaf srcu_node */
/* ->srcu_data_have_cbs[]. */
int cpu;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
};
/*
@@ -138,8 +138,8 @@ struct srcu_struct {
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf);
+void synchronize_srcu_expedited(struct srcu_struct *ssp);
+void srcu_barrier(struct srcu_struct *ssp);
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf);
#endif
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 43106ffa6788..2ec128060239 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
- buf->bvec = NULL;
buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8a07a4f171d..a8f6d5d89524 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -18,6 +18,8 @@ struct notifier_block;
struct bio;
+struct pagevec;
+
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
@@ -369,7 +371,7 @@ static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask,
#endif
extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2ac3d13a915b..251979d2e709 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -296,12 +296,18 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
- struct timespec __user *timeout);
+ struct __kernel_timespec __user *timeout);
asmlinkage long sys_io_pgetevents(aio_context_t ctx_id,
long min_nr,
long nr,
struct io_event __user *events,
- struct timespec __user *timeout,
+ struct __kernel_timespec __user *timeout,
+ const struct __aio_sigset *sig);
+asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id,
+ long min_nr,
+ long nr,
+ struct io_event __user *events,
+ struct old_timespec32 __user *timeout,
const struct __aio_sigset *sig);
/* fs/xattr.c */
@@ -466,10 +472,16 @@ asmlinkage long sys_sendfile64(int out_fd, int in_fd,
/* fs/select.c */
asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
- fd_set __user *, struct timespec __user *,
+ fd_set __user *, struct __kernel_timespec __user *,
+ void __user *);
+asmlinkage long sys_pselect6_time32(int, fd_set __user *, fd_set __user *,
+ fd_set __user *, struct old_timespec32 __user *,
void __user *);
asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
- struct timespec __user *, const sigset_t __user *,
+ struct __kernel_timespec __user *, const sigset_t __user *,
+ size_t);
+asmlinkage long sys_ppoll_time32(struct pollfd __user *, unsigned int,
+ struct old_timespec32 __user *, const sigset_t __user *,
size_t);
/* fs/signalfd.c */
@@ -541,7 +553,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags);
/* kernel/futex.c */
asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
- struct timespec __user *utime, u32 __user *uaddr2,
+ struct __kernel_timespec __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long sys_get_robust_list(int pid,
struct robust_list_head __user * __user *head_ptr,
@@ -637,6 +649,10 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
const struct __kernel_timespec __user *uts,
size_t sigsetsize);
+asmlinkage long sys_rt_sigtimedwait_time32(const sigset_t __user *uthese,
+ siginfo_t __user *uinfo,
+ const struct old_timespec32 __user *uts,
+ size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
/* kernel/sys.c */
@@ -831,6 +847,9 @@ asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
struct __kernel_timespec __user *timeout);
+asmlinkage long sys_recvmmsg_time32(int fd, struct mmsghdr __user *msg,
+ unsigned int vlen, unsigned flags,
+ struct old_timespec32 __user *timeout);
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 987cefa337de..786816cf4aa5 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -234,7 +234,7 @@ int __must_check sysfs_create_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns);
int __must_check sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr);
+ const struct attribute * const *attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
@@ -243,7 +243,7 @@ void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
-void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr);
+void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *attr);
int __must_check sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr);
@@ -342,7 +342,7 @@ static inline int sysfs_create_file_ns(struct kobject *kobj,
}
static inline int sysfs_create_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
return 0;
}
@@ -377,7 +377,7 @@ static inline bool sysfs_remove_file_self(struct kobject *kobj,
}
static inline void sysfs_remove_files(struct kobject *kobj,
- const struct attribute **attr)
+ const struct attribute * const *attr)
{
}
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index b9626aa7e90c..3e2a80cc7b56 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -39,12 +39,13 @@ struct t10_pi_tuple {
static inline u32 t10_pi_ref_tag(struct request *rq)
{
+ unsigned int shift = ilog2(queue_logical_block_size(rq->q));
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
- return blk_rq_pos(rq) >>
- (rq->q->integrity.interval_exp - 9) & 0xffffffff;
-#else
- return -1U;
+ if (rq->q->integrity.interval_exp)
+ shift = rq->q->integrity.interval_exp;
#endif
+ return blk_rq_pos(rq) >> (shift - SECTOR_SHIFT) & 0xffffffff;
}
extern const struct blk_integrity_profile t10_pi_type1_crc;
diff --git a/include/linux/thinkpad_acpi.h b/include/linux/thinkpad_acpi.h
deleted file mode 100644
index 9fb317970c01..000000000000
--- a/include/linux/thinkpad_acpi.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __THINKPAD_ACPI_H__
-#define __THINKPAD_ACPI_H__
-
-/* These two functions return 0 if success, or negative error code
- (e g -ENODEV if no led present) */
-
-enum {
- TPACPI_LED_MUTE,
- TPACPI_LED_MICMUTE,
- TPACPI_LED_MAX,
-};
-
-int tpacpi_led_set(int whichled, bool on);
-
-#endif
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 61904a6c098f..118b9977080c 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -96,31 +96,6 @@ static inline int timespec_compare(const struct timespec *lhs, const struct time
return lhs->tv_nsec - rhs->tv_nsec;
}
-extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
-
-static inline struct timespec timespec_add(struct timespec lhs,
- struct timespec rhs)
-{
- struct timespec ts_delta;
-
- set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec,
- lhs.tv_nsec + rhs.tv_nsec);
- return ts_delta;
-}
-
-/*
- * sub = lhs - rhs, in normalized form
- */
-static inline struct timespec timespec_sub(struct timespec lhs,
- struct timespec rhs)
-{
- struct timespec ts_delta;
-
- set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
- lhs.tv_nsec - rhs.tv_nsec);
- return ts_delta;
-}
-
/*
* Returns true if the timespec is norm, false if denorm:
*/
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 29975e93fcb8..a8ab0f143ac4 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -262,18 +262,4 @@ void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
-/*
- * deprecated aliases, don't use in new code
- */
-#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
-
-static inline struct timespec64 current_kernel_time64(void)
-{
- struct timespec64 ts;
-
- ktime_get_coarse_real_ts64(&ts);
-
- return ts;
-}
-
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index a502616f7e1c..cc59cc9e0e84 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -6,15 +6,6 @@
* over time so we can remove the file here.
*/
-static inline void do_gettimeofday(struct timeval *tv)
-{
- struct timespec64 now;
-
- ktime_get_real_ts64(&now);
- tv->tv_sec = now.tv_sec;
- tv->tv_usec = now.tv_nsec/1000;
-}
-
static inline unsigned long get_seconds(void)
{
return ktime_get_real_seconds();
@@ -52,10 +43,4 @@ static inline void getboottime(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-/*
- * Persistent clock related interfaces
- */
-extern void read_persistent_clock(struct timespec *ts);
-extern int update_persistent_clock(struct timespec now);
-
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 4130a5497d40..8a62731673f7 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -471,7 +471,8 @@ void perf_event_detach_bpf_prog(struct perf_event *event);
int perf_event_query_prog_array(struct perf_event *event, void __user *info);
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
-struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
+struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
+void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
@@ -502,10 +503,13 @@ static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf
{
return -EOPNOTSUPP;
}
-static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+static inline struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{
return NULL;
}
+static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
+{
+}
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 40b0b4c1bf7b..df20f8bdbfa3 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
* tracehook_report_syscall_entry - task is about to attempt a system call
* @regs: user register state of current task
*
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just entered the kernel for a system call.
+ * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
+ * when the current task has just entered the kernel for a system call.
* Full user register state is available here. Changing the values
* in @regs can affect the system call number and arguments to be tried.
* It is safe to block here, preventing the system call from beginning.
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 538ba1a58f5b..9c3186578ce0 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -82,7 +82,7 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
static inline void tracepoint_synchronize_unregister(void)
{
synchronize_srcu(&tracepoint_srcu);
- synchronize_sched();
+ synchronize_rcu();
}
#else
static inline void tracepoint_synchronize_unregister(void)
@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
- int __maybe_unused idx = 0; \
+ int __maybe_unused __idx = 0; \
\
if (!(cond)) \
return; \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
* doesn't work from the idle path. \
*/ \
if (rcuidle) { \
- idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+ __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
rcu_irq_enter_irqson(); \
} \
\
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
\
if (rcuidle) { \
rcu_irq_exit_irqson(); \
- srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+ srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
} \
\
preempt_enable_notrace(); \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 414db2bce715..392138fe59b6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
extern void tty_release_struct(struct tty_struct *tty, int idx);
extern int tty_release(struct inode *inode, struct file *filp);
extern void tty_init_termios(struct tty_struct *tty);
+extern void tty_save_termios(struct tty_struct *tty);
extern int tty_standard_install(struct tty_driver *driver,
struct tty_struct *tty);
diff --git a/include/linux/types.h b/include/linux/types.h
index 9834e90aa010..c2615d6a019e 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -212,8 +212,8 @@ struct ustat {
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bit 0 of @next will be
- * clear under normal conditions -- as long as we use call_rcu(),
- * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
+ * clear under normal conditions -- as long as we use call_rcu() or
+ * call_srcu() to queue the callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 320d49d85484..2725c83395bf 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,13 @@ struct udp_sock {
unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */
+ no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
+ encap_enabled:1, /* This socket enabled encap
+ * processing; UDP tunnels and
+ * different encapsulation layer set
+ * this
+ */
+ gro_enabled:1; /* Can accept GRO packets */
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -71,6 +77,7 @@ struct udp_sock {
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
@@ -115,6 +122,23 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
return udp_sk(sk)->no_check6_rx;
}
+static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ int gso_size;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ gso_size = skb_shinfo(skb)->gso_size;
+ put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
+ }
+}
+
+static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
+{
+ return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+}
+
#define udp_portaddr_for_each_entry(__sk, list) \
hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 55ce99ddb912..ecf584f6b82d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
+#include <crypto/hash.h>
#include <uapi/linux/uio.h>
struct page;
@@ -266,9 +267,11 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i);
int import_iovec(int type, const struct iovec __user * uvector,
unsigned nr_segs, unsigned fast_segs,
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 4cdd515a4385..5e49e82c4368 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -407,11 +407,11 @@ struct usb_host_bos {
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr);
+ unsigned char type, void **ptr, size_t min);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
__usb_get_extra_descriptor((ifpoint)->extra, \
(ifpoint)->extralen, \
- type, (void **)ptr)
+ type, (void **)ptr, sizeof(**(ptr)))
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fdfd04e348f6..738a0c24874f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
- * writeback context.
+ * writeback context. Must be called after the bio has been associated with
+ * a device.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
- bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+ bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 564892e19f8c..f492e21c4aa2 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -554,6 +554,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
}
/**
+ * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context. Takes and releases the xa_lock while
+ * disabling softirqs. May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_bh(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock_bh(xa);
+
+ return curr;
+}
+
+/**
+ * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_cmpxchg() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context. Takes and releases the xa_lock while
+ * disabling interrupts. May sleep if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
+ void *old, void *entry, gfp_t gfp)
+{
+ void *curr;
+
+ xa_lock_irq(xa);
+ curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+ xa_unlock_irq(xa);
+
+ return curr;
+}
+
+/**
* xa_insert() - Store this entry in the XArray unless another entry is
* already present.
* @xa: XArray.
diff --git a/include/media/cec.h b/include/media/cec.h
index 3fe5e5d2bb7e..707411ef8ba2 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -155,6 +155,7 @@ struct cec_adapter {
unsigned int transmit_queue_sz;
struct list_head wait_queue;
struct cec_data *transmitting;
+ bool transmit_in_progress;
struct task_struct *kthread_config;
struct completion config_completion;
diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
index 79a566d7defd..5c31a7682492 100644
--- a/include/media/davinci/vpbe.h
+++ b/include/media/davinci/vpbe.h
@@ -100,10 +100,6 @@ struct vpbe_config {
struct vpbe_device;
struct vpbe_device_ops {
- /* crop cap for the display */
- int (*g_cropcap)(struct vpbe_device *vpbe_dev,
- struct v4l2_cropcap *cropcap);
-
/* Enumerate the outputs */
int (*enum_outputs)(struct vpbe_device *vpbe_dev,
struct v4l2_output *output);
diff --git a/include/media/media-request.h b/include/media/media-request.h
index 0ce75c35131f..bd36d7431698 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -68,7 +68,7 @@ struct media_request {
unsigned int access_count;
struct list_head objects;
unsigned int num_incomplete_objects;
- struct wait_queue_head poll_wait;
+ wait_queue_head_t poll_wait;
spinlock_t lock;
};
diff --git a/include/media/mpeg2-ctrls.h b/include/media/mpeg2-ctrls.h
new file mode 100644
index 000000000000..d21f40edc09e
--- /dev/null
+++ b/include/media/mpeg2-ctrls.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the MPEG2 state controls for use with stateless MPEG-2
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _MPEG2_CTRLS_H_
+#define _MPEG2_CTRLS_H_
+
+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103
+#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104
+
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
+
+struct v4l2_mpeg2_sequence {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
+ __u16 horizontal_size;
+ __u16 vertical_size;
+ __u32 vbv_buffer_size;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
+ __u8 profile_and_level_indication;
+ __u8 progressive_sequence;
+ __u8 chroma_format;
+ __u8 pad;
+};
+
+struct v4l2_mpeg2_picture {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
+ __u8 picture_coding_type;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
+ __u8 f_code[2][2];
+ __u8 intra_dc_precision;
+ __u8 picture_structure;
+ __u8 top_field_first;
+ __u8 frame_pred_frame_dct;
+ __u8 concealment_motion_vectors;
+ __u8 q_scale_type;
+ __u8 intra_vlc_format;
+ __u8 alternate_scan;
+ __u8 repeat_first_field;
+ __u8 progressive_frame;
+ __u8 pad;
+};
+
+struct v4l2_ctrl_mpeg2_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ struct v4l2_mpeg2_sequence sequence;
+ struct v4l2_mpeg2_picture picture;
+
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
+ __u8 quantiser_scale_code;
+
+ __u8 backward_ref_index;
+ __u8 forward_ref_index;
+ __u8 pad;
+};
+
+struct v4l2_ctrl_mpeg2_quantization {
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
+ __u8 load_intra_quantiser_matrix;
+ __u8 load_non_intra_quantiser_matrix;
+ __u8 load_chroma_intra_quantiser_matrix;
+ __u8 load_chroma_non_intra_quantiser_matrix;
+
+ __u8 intra_quantiser_matrix[64];
+ __u8 non_intra_quantiser_matrix[64];
+ __u8 chroma_intra_quantiser_matrix[64];
+ __u8 chroma_non_intra_quantiser_matrix[64];
+};
+
+#endif
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index bfa3017cecba..d621acadfbf3 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -277,6 +277,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
#define RC_MAP_SU3000 "rc-su3000"
+#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 82715645617b..0c511ed8ffb0 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -396,4 +396,9 @@ int v4l2_g_parm_cap(struct video_device *vdev,
int v4l2_s_parm_cap(struct video_device *vdev,
struct v4l2_subdev *sd, struct v4l2_streamparm *a);
+/* Compare two v4l2_fract structs */
+#define V4L2_FRACT_COMPARE(a, OP, b) \
+ ((u64)(a).numerator * (b).denominator OP \
+ (u64)(b).numerator * (a).denominator)
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 83ce0593b275..d63cf227b0ab 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,12 @@
#include <linux/videodev2.h>
#include <media/media-request.h>
+/*
+ * Include the mpeg2 stateless codec compound control definitions.
+ * This will move to the public headers once this API is fully stable.
+ */
+#include <media/mpeg2-ctrls.h>
+
/* forward references */
struct file;
struct v4l2_ctrl_handler;
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 456ac13eca1d..48531e57cc5a 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -74,10 +74,19 @@ struct v4l2_ctrl_handler;
* indicates that file->private_data points to &struct v4l2_fh.
* This flag is set by the core when v4l2_fh_init() is called.
* All new drivers should use it.
+ * @V4L2_FL_QUIRK_INVERTED_CROP:
+ * some old M2M drivers use g/s_crop/cropcap incorrectly: crop and
+ * compose are swapped. If this flag is set, then the selection
+ * targets are swapped in the g/s_crop/cropcap functions in v4l2-ioctl.c.
+ * This allows those drivers to correctly implement the selection API,
+ * but the old crop API will still work as expected in order to preserve
+ * backwards compatibility.
+ * Never set this flag for new drivers.
*/
enum v4l2_video_device_flags {
- V4L2_FL_REGISTERED = 0,
- V4L2_FL_USES_V4L2_FH = 1,
+ V4L2_FL_REGISTERED = 0,
+ V4L2_FL_USES_V4L2_FH = 1,
+ V4L2_FL_QUIRK_INVERTED_CROP = 2,
};
/* Priority helper functions */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 5848d92c30da..8533ece5026e 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -48,6 +48,9 @@ struct v4l2_fh;
* @vidioc_enum_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
* for metadata capture
+ * @vidioc_enum_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
+ * for metadata output
* @vidioc_g_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -80,6 +83,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_g_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_g_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_s_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -112,6 +117,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_s_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_s_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_try_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -146,6 +153,8 @@ struct v4l2_fh;
* Radio output
* @vidioc_try_fmt_meta_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
+ * @vidioc_try_fmt_meta_out: pointer to the function that implements
+ * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata output
* @vidioc_reqbufs: pointer to the function that implements
* :ref:`VIDIOC_REQBUFS <vidioc_reqbufs>` ioctl
* @vidioc_querybuf: pointer to the function that implements
@@ -220,12 +229,8 @@ struct v4l2_fh;
* :ref:`VIDIOC_G_MODULATOR <vidioc_g_modulator>` ioctl
* @vidioc_s_modulator: pointer to the function that implements
* :ref:`VIDIOC_S_MODULATOR <vidioc_g_modulator>` ioctl
- * @vidioc_cropcap: pointer to the function that implements
- * :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl
- * @vidioc_g_crop: pointer to the function that implements
- * :ref:`VIDIOC_G_CROP <vidioc_g_crop>` ioctl
- * @vidioc_s_crop: pointer to the function that implements
- * :ref:`VIDIOC_S_CROP <vidioc_g_crop>` ioctl
+ * @vidioc_g_pixelaspect: pointer to the function that implements
+ * the pixelaspect part of the :ref:`VIDIOC_CROPCAP <vidioc_cropcap>` ioctl
* @vidioc_g_selection: pointer to the function that implements
* :ref:`VIDIOC_G_SELECTION <vidioc_g_selection>` ioctl
* @vidioc_s_selection: pointer to the function that implements
@@ -318,6 +323,8 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
@@ -346,6 +353,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
@@ -374,6 +383,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
@@ -402,6 +413,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_meta_out)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
int (*vidioc_reqbufs)(struct file *file, void *fh,
@@ -491,12 +504,8 @@ struct v4l2_ioctl_ops {
int (*vidioc_s_modulator)(struct file *file, void *fh,
const struct v4l2_modulator *a);
/* Crop ioctls */
- int (*vidioc_cropcap)(struct file *file, void *fh,
- struct v4l2_cropcap *a);
- int (*vidioc_g_crop)(struct file *file, void *fh,
- struct v4l2_crop *a);
- int (*vidioc_s_crop)(struct file *file, void *fh,
- const struct v4l2_crop *a);
+ int (*vidioc_g_pixelaspect)(struct file *file, void *fh,
+ int buf_type, struct v4l2_fract *aspect);
int (*vidioc_g_selection)(struct file *file, void *fh,
struct v4l2_selection *s);
int (*vidioc_s_selection)(struct file *file, void *fh,
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 9102d6ca566e..47af609dc8f1 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -776,7 +776,11 @@ struct v4l2_subdev_internal_ops {
#define V4L2_SUBDEV_FL_IS_SPI (1U << 1)
/* Set this flag if this subdev needs a device node. */
#define V4L2_SUBDEV_FL_HAS_DEVNODE (1U << 2)
-/* Set this flag if this subdev generates events. */
+/*
+ * Set this flag if this subdev generates events.
+ * Note controls can send events, thus drivers exposing controls
+ * should set this flag.
+ */
#define V4L2_SUBDEV_FL_HAS_EVENTS (1U << 3)
struct regulator_bulk_data;
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index e86981d615ae..4a737b2c610b 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -239,6 +239,7 @@ struct vb2_queue;
* @num_planes: number of planes in the buffer
* on an internal driver queue.
* @timestamp: frame timestamp in ns.
+ * @request: the request this buffer is associated with.
* @req_obj: used to bind this buffer to a request. This
* request object has a refcount.
*/
@@ -249,6 +250,7 @@ struct vb2_buffer {
unsigned int memory;
unsigned int num_planes;
u64 timestamp;
+ struct media_request *request;
struct media_request_object req_obj;
/* private: internal use only
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 05c7df41d737..dbc795ec659e 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -194,35 +194,5 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
#endif
}
-#ifdef CONFIG_NET_CLS_ACT
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv);
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv);
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop);
-#else
-static inline
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- return 0;
-}
-
-static inline
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
-}
-
-static inline
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop)
-{
- return 0;
-}
-#endif
#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 1fa41b7a1be3..e0c41eb1c860 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -777,8 +777,10 @@ struct cfg80211_crypto_settings {
* @probe_resp: probe response template (AP mode only)
* @ftm_responder: enable FTM responder functionality; -1 for no change
* (which also implies no change in LCI/civic location data)
- * @lci: LCI subelement content
- * @civicloc: Civic location subelement content
+ * @lci: Measurement Report element content, starting with Measurement Token
+ * (measurement type 8)
+ * @civicloc: Measurement Report element content, starting with Measurement
+ * Token (measurement type 11)
* @lci_len: LCI data length
* @civicloc_len: Civic location data length
*/
@@ -1296,6 +1298,7 @@ struct cfg80211_tid_stats {
* @rx_beacon: number of beacons received from this peer
* @rx_beacon_signal_avg: signal strength average (in dBm) for beacons received
* from this peer
+ * @connected_to_gate: true if mesh STA has a path to mesh gate
* @rx_duration: aggregate PPDU duration(usecs) for all the frames from a peer
* @pertid: per-TID statistics, see &struct cfg80211_tid_stats, using the last
* (IEEE80211_NUM_TIDS) index for MSDUs not encapsulated in QoS-MPDUs.
@@ -1350,6 +1353,8 @@ struct station_info {
u64 rx_beacon;
u64 rx_duration;
u8 rx_beacon_signal_avg;
+ u8 connected_to_gate;
+
struct cfg80211_tid_stats *pertid;
s8 ack_signal;
s8 avg_ack_signal;
@@ -1559,6 +1564,10 @@ struct bss_parameters {
* @plink_timeout: If no tx activity is seen from a STA we've established
* peering with for longer than this time (in seconds), then remove it
* from the STA's list of peers. Default is 30 minutes.
+ * @dot11MeshConnectedToMeshGate: if set to true, advertise that this STA is
+ * connected to a mesh gate in mesh formation info. If false, the
+ * value in mesh formation is determined by the presence of root paths
+ * in the mesh path table
*/
struct mesh_config {
u16 dot11MeshRetryTimeout;
@@ -1578,6 +1587,7 @@ struct mesh_config {
u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
u8 dot11MeshHWMPRootMode;
+ bool dot11MeshConnectedToMeshGate;
u16 dot11MeshHWMPRannInterval;
bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
@@ -2815,7 +2825,7 @@ struct cfg80211_external_auth_params {
};
/**
- * cfg80211_ftm_responder_stats - FTM responder statistics
+ * struct cfg80211_ftm_responder_stats - FTM responder statistics
*
* @filled: bitflag of flags using the bits of &enum nl80211_ftm_stats to
* indicate the relevant values in this struct for them
@@ -2849,6 +2859,190 @@ struct cfg80211_ftm_responder_stats {
};
/**
+ * struct cfg80211_pmsr_ftm_result - FTM result
+ * @failure_reason: if this measurement failed (PMSR status is
+ * %NL80211_PMSR_STATUS_FAILURE), this gives a more precise
+ * reason than just "failure"
+ * @burst_index: if reporting partial results, this is the index
+ * in [0 .. num_bursts-1] of the burst that's being reported
+ * @num_ftmr_attempts: number of FTM request frames transmitted
+ * @num_ftmr_successes: number of FTM request frames acked
+ * @busy_retry_time: if failure_reason is %NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ * fill this to indicate in how many seconds a retry is deemed possible
+ * by the responder
+ * @num_bursts_exp: actual number of bursts exponent negotiated
+ * @burst_duration: actual burst duration negotiated
+ * @ftms_per_burst: actual FTMs per burst negotiated
+ * @lci_len: length of LCI information (if present)
+ * @civicloc_len: length of civic location information (if present)
+ * @lci: LCI data (may be %NULL)
+ * @civicloc: civic location data (may be %NULL)
+ * @rssi_avg: average RSSI over FTM action frames reported
+ * @rssi_spread: spread of the RSSI over FTM action frames reported
+ * @tx_rate: bitrate for transmitted FTM action frame response
+ * @rx_rate: bitrate of received FTM action frame
+ * @rtt_avg: average of RTTs measured (must have either this or @dist_avg)
+ * @rtt_variance: variance of RTTs measured (note that standard deviation is
+ * the square root of the variance)
+ * @rtt_spread: spread of the RTTs measured
+ * @dist_avg: average of distances (mm) measured
+ * (must have either this or @rtt_avg)
+ * @dist_variance: variance of distances measured (see also @rtt_variance)
+ * @dist_spread: spread of distances measured (see also @rtt_spread)
+ * @num_ftmr_attempts_valid: @num_ftmr_attempts is valid
+ * @num_ftmr_successes_valid: @num_ftmr_successes is valid
+ * @rssi_avg_valid: @rssi_avg is valid
+ * @rssi_spread_valid: @rssi_spread is valid
+ * @tx_rate_valid: @tx_rate is valid
+ * @rx_rate_valid: @rx_rate is valid
+ * @rtt_avg_valid: @rtt_avg is valid
+ * @rtt_variance_valid: @rtt_variance is valid
+ * @rtt_spread_valid: @rtt_spread is valid
+ * @dist_avg_valid: @dist_avg is valid
+ * @dist_variance_valid: @dist_variance is valid
+ * @dist_spread_valid: @dist_spread is valid
+ */
+struct cfg80211_pmsr_ftm_result {
+ const u8 *lci;
+ const u8 *civicloc;
+ unsigned int lci_len;
+ unsigned int civicloc_len;
+ enum nl80211_peer_measurement_ftm_failure_reasons failure_reason;
+ u32 num_ftmr_attempts, num_ftmr_successes;
+ s16 burst_index;
+ u8 busy_retry_time;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ s32 rssi_avg;
+ s32 rssi_spread;
+ struct rate_info tx_rate, rx_rate;
+ s64 rtt_avg;
+ s64 rtt_variance;
+ s64 rtt_spread;
+ s64 dist_avg;
+ s64 dist_variance;
+ s64 dist_spread;
+
+ u16 num_ftmr_attempts_valid:1,
+ num_ftmr_successes_valid:1,
+ rssi_avg_valid:1,
+ rssi_spread_valid:1,
+ tx_rate_valid:1,
+ rx_rate_valid:1,
+ rtt_avg_valid:1,
+ rtt_variance_valid:1,
+ rtt_spread_valid:1,
+ dist_avg_valid:1,
+ dist_variance_valid:1,
+ dist_spread_valid:1;
+};
+
+/**
+ * struct cfg80211_pmsr_result - peer measurement result
+ * @addr: address of the peer
+ * @host_time: host time (use ktime_get_boottime() adjust to the time when the
+ * measurement was made)
+ * @ap_tsf: AP's TSF at measurement time
+ * @status: status of the measurement
+ * @final: if reporting partial results, mark this as the last one; if not
+ * reporting partial results always set this flag
+ * @ap_tsf_valid: indicates the @ap_tsf value is valid
+ * @type: type of the measurement reported, note that we only support reporting
+ * one type at a time, but you can report multiple results separately and
+ * they're all aggregated for userspace.
+ */
+struct cfg80211_pmsr_result {
+ u64 host_time, ap_tsf;
+ enum nl80211_peer_measurement_status status;
+
+ u8 addr[ETH_ALEN];
+
+ u8 final:1,
+ ap_tsf_valid:1;
+
+ enum nl80211_peer_measurement_type type;
+
+ union {
+ struct cfg80211_pmsr_ftm_result ftm;
+ };
+};
+
+/**
+ * struct cfg80211_pmsr_ftm_request_peer - FTM request data
+ * @requested: indicates FTM is requested
+ * @preamble: frame preamble to use
+ * @burst_period: burst period to use
+ * @asap: indicates to use ASAP mode
+ * @num_bursts_exp: number of bursts exponent
+ * @burst_duration: burst duration
+ * @ftms_per_burst: number of FTMs per burst
+ * @ftmr_retries: number of retries for FTM request
+ * @request_lci: request LCI information
+ * @request_civicloc: request civic location information
+ *
+ * See also nl80211 for the respective attribute documentation.
+ */
+struct cfg80211_pmsr_ftm_request_peer {
+ enum nl80211_preamble preamble;
+ u16 burst_period;
+ u8 requested:1,
+ asap:1,
+ request_lci:1,
+ request_civicloc:1;
+ u8 num_bursts_exp;
+ u8 burst_duration;
+ u8 ftms_per_burst;
+ u8 ftmr_retries;
+};
+
+/**
+ * struct cfg80211_pmsr_request_peer - peer data for a peer measurement request
+ * @addr: MAC address
+ * @chandef: channel to use
+ * @report_ap_tsf: report the associated AP's TSF
+ * @ftm: FTM data, see &struct cfg80211_pmsr_ftm_request_peer
+ */
+struct cfg80211_pmsr_request_peer {
+ u8 addr[ETH_ALEN];
+ struct cfg80211_chan_def chandef;
+ u8 report_ap_tsf:1;
+ struct cfg80211_pmsr_ftm_request_peer ftm;
+};
+
+/**
+ * struct cfg80211_pmsr_request - peer measurement request
+ * @cookie: cookie, set by cfg80211
+ * @nl_portid: netlink portid - used by cfg80211
+ * @drv_data: driver data for this request, if required for aborting,
+ * not otherwise freed or anything by cfg80211
+ * @mac_addr: MAC address used for (randomised) request
+ * @mac_addr_mask: MAC address mask used for randomisation, bits that
+ * are 0 in the mask should be randomised, bits that are 1 should
+ * be taken from the @mac_addr
+ * @list: used by cfg80211 to hold on to the request
+ * @timeout: timeout (in milliseconds) for the whole operation, if
+ * zero it means there's no timeout
+ * @n_peers: number of peers to do measurements with
+ * @peers: per-peer measurement request data
+ */
+struct cfg80211_pmsr_request {
+ u64 cookie;
+ void *drv_data;
+ u32 n_peers;
+ u32 nl_portid;
+
+ u32 timeout;
+
+ u8 mac_addr[ETH_ALEN] __aligned(2);
+ u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+
+ struct list_head list;
+
+ struct cfg80211_pmsr_request_peer peers[];
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3183,6 +3377,8 @@ struct cfg80211_ftm_responder_stats {
*
* @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
* Statistics should be cumulative, currently no way to reset is provided.
+ * @start_pmsr: start peer measurement (e.g. FTM)
+ * @abort_pmsr: abort peer measurement
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3492,6 +3688,11 @@ struct cfg80211_ops {
int (*get_ftm_responder_stats)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_ftm_responder_stats *ftm_stats);
+
+ int (*start_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request);
};
/*
@@ -3864,6 +4065,42 @@ struct wiphy_iftype_ext_capab {
};
/**
+ * struct cfg80211_pmsr_capabilities - cfg80211 peer measurement capabilities
+ * @max_peers: maximum number of peers in a single measurement
+ * @report_ap_tsf: can report assoc AP's TSF for radio resource measurement
+ * @randomize_mac_addr: can randomize MAC address for measurement
+ * @ftm.supported: FTM measurement is supported
+ * @ftm.asap: ASAP-mode is supported
+ * @ftm.non_asap: non-ASAP-mode is supported
+ * @ftm.request_lci: can request LCI data
+ * @ftm.request_civicloc: can request civic location data
+ * @ftm.preambles: bitmap of preambles supported (&enum nl80211_preamble)
+ * @ftm.bandwidths: bitmap of bandwidths supported (&enum nl80211_chan_width)
+ * @ftm.max_bursts_exponent: maximum burst exponent supported
+ * (set to -1 if not limited; note that setting this will necessarily
+ * forbid using the value 15 to let the responder pick)
+ * @ftm.max_ftms_per_burst: maximum FTMs per burst supported (set to 0 if
+ * not limited)
+ */
+struct cfg80211_pmsr_capabilities {
+ unsigned int max_peers;
+ u8 report_ap_tsf:1,
+ randomize_mac_addr:1;
+
+ struct {
+ u32 preambles;
+ u32 bandwidths;
+ s8 max_bursts_exponent;
+ u8 max_ftms_per_burst;
+ u8 supported:1,
+ asap:1,
+ non_asap:1,
+ request_lci:1,
+ request_civicloc:1;
+ } ftm;
+};
+
+/**
* struct wiphy - wireless hardware description
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
@@ -4027,6 +4264,8 @@ struct wiphy_iftype_ext_capab {
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
+ *
+ * @pmsr_capa: peer measurement capabilities
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -4163,6 +4402,8 @@ struct wiphy {
u32 txq_memory_limit;
u32 txq_quantum;
+ const struct cfg80211_pmsr_capabilities *pmsr_capa;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -4365,6 +4606,9 @@ struct cfg80211_cqm_config;
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
* @cqm_config: (private) nl80211 RSSI monitor state
+ * @pmsr_list: (private) peer measurement requests
+ * @pmsr_lock: (private) peer measurements requests/results lock
+ * @pmsr_free_wk: (private) peer measurements cleanup work
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -4436,6 +4680,10 @@ struct wireless_dev {
#endif
struct cfg80211_cqm_config *cqm_config;
+
+ struct list_head pmsr_list;
+ spinlock_t pmsr_lock;
+ struct work_struct pmsr_free_wk;
};
static inline u8 *wdev_address(struct wireless_dev *wdev)
@@ -5328,7 +5576,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
* cfg80211 then sends a notification to userspace.
*/
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
- const u8 *macaddr, const u8 *ie, u8 ie_len, gfp_t gfp);
+ const u8 *macaddr, const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp);
/**
* DOC: RFkill integration
@@ -6630,6 +6879,31 @@ int cfg80211_external_auth_request(struct net_device *netdev,
struct cfg80211_external_auth_params *params,
gfp_t gfp);
+/**
+ * cfg80211_pmsr_report - report peer measurement result data
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @result: the result data
+ * @gfp: allocation flags
+ */
+void cfg80211_pmsr_report(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ struct cfg80211_pmsr_result *result,
+ gfp_t gfp);
+
+/**
+ * cfg80211_pmsr_complete - report peer measurement completed
+ * @wdev: the wireless device reporting the measurement
+ * @req: the original measurement request
+ * @gfp: allocation flags
+ *
+ * Report that the entire measurement completed, after this
+ * the request pointer will no longer be valid.
+ */
+void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ gfp_t gfp);
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 45db0c79462d..67f4293bc970 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -365,6 +365,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI,
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -392,6 +393,9 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME "msix_vec_per_pf_min"
#define DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE DEVLINK_PARAM_TYPE_U32
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy"
+#define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 23690c44e167..b3eefe8e18fd 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -36,7 +36,7 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_DSA,
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_GSWIP,
- DSA_TAG_PROTO_KSZ,
+ DSA_TAG_PROTO_KSZ9477,
DSA_TAG_PROTO_LAN9303,
DSA_TAG_PROTO_MTK,
DSA_TAG_PROTO_QCA,
@@ -113,6 +113,7 @@ struct dsa_device_ops {
struct packet_type *pt);
int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
int *offset);
+ unsigned int overhead;
};
struct dsa_switch_tree {
diff --git a/include/net/flow.h b/include/net/flow.h
index 8ce21793094e..93f2c9a0f098 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -38,8 +38,8 @@ struct flowi_common {
#define FLOWI_FLAG_KNOWN_NH 0x02
#define FLOWI_FLAG_SKIP_NH_OIF 0x04
__u32 flowic_secid;
- struct flowi_tunnel flowic_tun_key;
kuid_t flowic_uid;
+ struct flowi_tunnel flowic_tun_key;
};
union flowi_uli {
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 6a4586dcdede..2b26979efb48 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -209,8 +209,8 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
- FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
- FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
+ FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_vlan */
+ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */
FLOW_DISSECTOR_KEY_ENC_KEYID, /* struct flow_dissector_key_keyid */
@@ -221,7 +221,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
- FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_vlan */
FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 946bd53a9f81..ca23860adbb9 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -10,7 +10,7 @@
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
-};
+} __aligned(2 * sizeof(u64));
struct net_rate_estimator;
diff --git a/include/net/geneve.h b/include/net/geneve.h
index a7600ed55ea3..fc6a7e0a874a 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -60,6 +60,12 @@ struct genevehdr {
struct geneve_opt options[];
};
+static inline bool netif_is_geneve(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "geneve");
+}
+
#ifdef CONFIG_INET
struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
u8 name_assign_type, u16 dst_port);
diff --git a/include/net/gre.h b/include/net/gre.h
index 797142eee9cd..b60f212c16c6 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -37,8 +37,17 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err, __be16 proto, int nhs);
-bool is_gretap_dev(const struct net_device *dev);
-bool is_ip6gretap_dev(const struct net_device *dev);
+static inline bool netif_is_gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "gretap");
+}
+
+static inline bool netif_is_ip6gretap(const struct net_device *dev)
+{
+ return dev->rtnl_link_ops &&
+ !strcmp(dev->rtnl_link_ops->kind, "ip6gretap");
+}
static inline int gre_calc_hlen(__be16 o_flags)
{
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743a8eec..6ac3a5bd0117 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -41,7 +41,7 @@ struct net;
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
int icmp_rcv(struct sk_buff *skb);
-void icmp_err(struct sk_buff *skb, u32 info);
+int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
void icmp_out_count(struct net *net, unsigned char type);
diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
index 6e91e38a31da..9db98af46985 100644
--- a/include/net/inet6_hashtables.h
+++ b/include/net/inet6_hashtables.h
@@ -115,9 +115,8 @@ int inet6_hash(struct sock *sk);
((__sk)->sk_family == AF_INET6) && \
ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* _INET6_HASHTABLES_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 3ca969cbd161..975901a95c0f 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -2,6 +2,8 @@
#ifndef _INET_COMMON_H
#define _INET_COMMON_H
+#include <linux/indirect_call_wrapper.h>
+
extern const struct proto_ops inet_stream_ops;
extern const struct proto_ops inet_dgram_ops;
@@ -54,4 +56,11 @@ static inline void inet_ctl_sock_destroy(struct sock *sk)
sock_release(sk->sk_socket);
}
+#define indirect_call_gro_receive(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_2(cb, f2, f1, head, skb); \
+})
+
#endif
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 9141e95529e7..babb14136705 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -79,6 +79,7 @@ struct inet_ehash_bucket {
struct inet_bind_bucket {
possible_net_t ib_net;
+ int l3mdev;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
@@ -188,10 +189,21 @@ static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo)
hashinfo->ehash_locks = NULL;
}
+static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
struct inet_bind_bucket *
inet_bind_bucket_create(struct kmem_cache *cachep, struct net *net,
struct inet_bind_hashbucket *head,
- const unsigned short snum);
+ const unsigned short snum, int l3mdev);
void inet_bind_bucket_destroy(struct kmem_cache *cachep,
struct inet_bind_bucket *tb);
@@ -225,6 +237,7 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit);
+int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
@@ -282,9 +295,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_addrpair == (__cookie)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#else /* 32-bit arch */
#define INET_ADDR_COOKIE(__name, __saddr, __daddr) \
@@ -294,9 +306,8 @@ static inline struct sock *inet_lookup_listener(struct net *net,
(((__sk)->sk_portpair == (__ports)) && \
((__sk)->sk_daddr == (__saddr)) && \
((__sk)->sk_rcv_saddr == (__daddr)) && \
- (!(__sk)->sk_bound_dev_if || \
- ((__sk)->sk_bound_dev_if == (__dif)) || \
- ((__sk)->sk_bound_dev_if == (__sdif))) && \
+ (((__sk)->sk_bound_dev_if == (__dif)) || \
+ ((__sk)->sk_bound_dev_if == (__sdif))) && \
net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a80fd0ac4563..e8eef85006aa 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,6 +130,27 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
+static inline int inet_sk_bound_l3mdev(const struct sock *sk)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ struct net *net = sock_net(sk);
+
+ if (!net->ipv4.sysctl_tcp_l3mdev_accept)
+ return l3mdev_master_ifindex_by_index(net,
+ sk->sk_bound_dev_if);
+#endif
+
+ return 0;
+}
+
+static inline bool inet_bound_dev_eq(bool l3mdev_accept, int bound_dev_if,
+ int dif, int sdif)
+{
+ if (!bound_dev_if)
+ return !sdif || l3mdev_accept;
+ return bound_dev_if == dif || bound_dev_if == sdif;
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index 72593e171d14..8866bfce6121 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -155,6 +155,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int proto);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -421,7 +422,8 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len);
+ int fc_mx_len,
+ struct netlink_ext_ack *extack);
static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics)
{
if (fib_metrics != &dst_default_metrics &&
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 236e40ba06bf..69b4bcf880c9 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -69,6 +69,8 @@ struct ip6_tnl_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi6 *fl6);
+ int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info);
};
#ifdef CONFIG_INET
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index b0d022ff6ea1..cbcf35ce1b14 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -144,25 +144,6 @@ struct ip_tunnel {
bool ignore_df;
};
-#define TUNNEL_CSUM __cpu_to_be16(0x01)
-#define TUNNEL_ROUTING __cpu_to_be16(0x02)
-#define TUNNEL_KEY __cpu_to_be16(0x04)
-#define TUNNEL_SEQ __cpu_to_be16(0x08)
-#define TUNNEL_STRICT __cpu_to_be16(0x10)
-#define TUNNEL_REC __cpu_to_be16(0x20)
-#define TUNNEL_VERSION __cpu_to_be16(0x40)
-#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
-#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
-#define TUNNEL_OAM __cpu_to_be16(0x0200)
-#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
-#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
-#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
-#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
-#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
-
-#define TUNNEL_OPTIONS_PRESENT \
- (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
-
struct tnl_ptk_info {
__be16 flags;
__be16 proto;
@@ -311,6 +292,7 @@ struct ip_tunnel_encap_ops {
size_t (*encap_hlen)(struct ip_tunnel_encap *e);
int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
u8 *protocol, struct flowi4 *fl4);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
};
#define MAX_IPTUN_ENCAP_OPS 8
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 829650540780..daf80863d3a5 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -975,6 +975,8 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_forward(struct sk_buff *skb);
int ip6_input(struct sk_buff *skb);
int ip6_mc_input(struct sk_buff *skb);
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+ bool have_final);
int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h
index 3832099289c5..78fa0ac4613c 100644
--- a/include/net/l3mdev.h
+++ b/include/net/l3mdev.h
@@ -101,6 +101,17 @@ struct net_device *l3mdev_master_dev_rcu(const struct net_device *_dev)
return master;
}
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex);
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ rcu_read_lock();
+ ifindex = l3mdev_master_upper_ifindex_by_index_rcu(net, ifindex);
+ rcu_read_unlock();
+
+ return ifindex;
+}
+
u32 l3mdev_fib_table_rcu(const struct net_device *dev);
u32 l3mdev_fib_table_by_index(struct net *net, int ifindex);
static inline u32 l3mdev_fib_table(const struct net_device *dev)
@@ -208,6 +219,17 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex)
}
static inline
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
+{
+ return 0;
+}
+static inline
+int l3mdev_master_upper_ifindex_by_index(struct net *net, int ifindex)
+{
+ return 0;
+}
+
+static inline
struct net_device *l3mdev_master_dev_rcu(const struct net_device *dev)
{
return NULL;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 71985e95d2d9..88219cc137c3 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -467,7 +467,7 @@ struct ieee80211_mu_group_data {
};
/**
- * ieee80211_ftm_responder_params - FTM responder parameters
+ * struct ieee80211_ftm_responder_params - FTM responder parameters
*
* @lci: LCI subelement content
* @civicloc: CIVIC location subelement content
@@ -496,6 +496,8 @@ struct ieee80211_ftm_responder_params {
* @uora_ocw_range: UORA element's OCW Range field
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @he_support: does this BSS support HE
+ * @twt_requester: does this BSS support TWT requester (relevant for managed
+ * mode only, set if the AP advertises TWT responder role)
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -594,6 +596,7 @@ struct ieee80211_bss_conf {
u8 uora_ocw_range;
u16 frame_time_rts_th;
bool he_support;
+ bool twt_requester;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -3239,6 +3242,11 @@ enum ieee80211_reconfig_type {
* When the scan finishes, ieee80211_scan_completed() must be called;
* note that it also must be called when the scan cannot finish due to
* any error unless this callback returned a negative error code.
+ * This callback is also allowed to return the special return value 1,
+ * this indicates that hardware scan isn't desirable right now and a
+ * software scan should be done instead. A driver wishing to use this
+ * capability must ensure its (hardware) scan capabilities aren't
+ * advertised as more capable than mac80211's software scan is.
* The callback can sleep.
*
* @cancel_hw_scan: Ask the low-level tp cancel the active hw scan.
@@ -3623,6 +3631,9 @@ enum ieee80211_reconfig_type {
* skb is always a real frame, head may or may not be an A-MSDU.
* @get_ftm_responder_stats: Retrieve FTM responder statistics, if available.
* Statistics should be cumulative, currently no way to reset is provided.
+ *
+ * @start_pmsr: start peer measurement (e.g. FTM) (this call can sleep)
+ * @abort_pmsr: abort peer measurement (this call can sleep)
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -3911,6 +3922,10 @@ struct ieee80211_ops {
int (*get_ftm_responder_stats)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_ftm_responder_stats *ftm_stats);
+ int (*start_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
+ void (*abort_pmsr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *request);
};
/**
@@ -6091,6 +6106,14 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
* @txq: pointer obtained from station or virtual interface
*
* Returns the skb if successful, %NULL if no frame was available.
+ *
+ * Note that this must be called in an rcu_read_lock() critical section,
+ * which can only be released after the SKB was handled. Some pointers in
+ * skb->cb, e.g. the key pointer, are protected by by RCU and thus the
+ * critical section must persist not just for the duration of this call
+ * but for the duration of the frame handling.
+ * However, also note that while in the wake_tx_queue() method,
+ * rcu_read_lock() is already held.
*/
struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *txq);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index f58b384aa6c9..7c1ab9edba03 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -140,8 +140,8 @@ struct neighbour {
unsigned long updated;
rwlock_t lock;
refcount_t refcnt;
- struct sk_buff_head arp_queue;
unsigned int arp_queue_len_bytes;
+ struct sk_buff_head arp_queue;
struct timer_list timer;
unsigned long used;
atomic_t probes;
@@ -149,11 +149,13 @@ struct neighbour {
__u8 nud_state;
__u8 type;
__u8 dead;
+ u8 protocol;
seqlock_t ha_lock;
- unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))] __aligned(8);
struct hh_cache hh;
int (*output)(struct neighbour *, struct sk_buff *);
const struct neigh_ops *ops;
+ struct list_head gc_list;
struct rcu_head rcu;
struct net_device *dev;
u8 primary_key[0];
@@ -172,6 +174,7 @@ struct pneigh_entry {
possible_net_t net;
struct net_device *dev;
u8 flags;
+ u8 protocol;
u8 key[0];
};
@@ -214,6 +217,8 @@ struct neigh_table {
struct timer_list proxy_timer;
struct sk_buff_head proxy_queue;
atomic_t entries;
+ atomic_t gc_entries;
+ struct list_head gc_list;
rwlock_t lock;
unsigned long last_rand;
struct neigh_statistics __percpu *stats;
@@ -250,6 +255,7 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
+extern const struct nla_policy nda_policy[];
static inline bool neigh_key_eq16(const struct neighbour *n, const void *pkey)
{
@@ -454,6 +460,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
+ unsigned int hh_alen = 0;
unsigned int seq;
unsigned int hh_len;
@@ -461,16 +468,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
- /* this is inlined by gcc */
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
} else {
- unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+ hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
}
} while (read_seqretry(&hh->hh_lock, seq));
- skb_push(skb, hh_len);
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}
@@ -528,24 +552,6 @@ static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
} while (read_seqretry(&n->ha_lock, seq));
}
-static inline void neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
- int *notify)
-{
- u8 ndm_flags = 0;
-
- if (!(flags & NEIGH_UPDATE_F_ADMIN))
- return;
-
- ndm_flags |= (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
- if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
- if (ndm_flags & NTF_EXT_LEARNED)
- neigh->flags |= NTF_EXT_LEARNED;
- else
- neigh->flags &= ~NTF_EXT_LEARNED;
- *notify = 1;
- }
-}
-
static inline void neigh_update_is_router(struct neighbour *neigh, u32 flags,
int *notify)
{
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 74af19c3a8f7..4cd56808ac4e 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -6,12 +6,12 @@
static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
{
- skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
+ struct nf_bridge_info *b = skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
- if (likely(skb->nf_bridge))
- refcount_set(&(skb->nf_bridge->use), 1);
+ if (b)
+ memset(b, 0, sizeof(*b));
- return skb->nf_bridge;
+ return b;
}
void nf_bridge_update_protocol(struct sk_buff *skb);
@@ -22,12 +22,6 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
int (*okfn)(struct net *, struct sock *,
struct sk_buff *));
-static inline struct nf_bridge_info *
-nf_bridge_info_get(const struct sk_buff *skb)
-{
- return skb->nf_bridge;
-}
-
unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb);
static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
index cd24be4c4a99..13d55206bb9f 100644
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv4/nf_nat_masquerade.h
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv4_register_notifier(void);
+int nf_nat_masquerade_ipv4_register_notifier(void);
void nf_nat_masquerade_ipv4_unregister_notifier(void);
#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
index 0c3b5ebf0bb8..2917bf95c437 100644
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ b/include/net/netfilter/ipv6/nf_nat_masquerade.h
@@ -5,7 +5,7 @@
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
const struct net_device *out);
-void nf_nat_masquerade_ipv6_register_notifier(void);
+int nf_nat_masquerade_ipv6_register_notifier(void);
void nf_nat_masquerade_ipv6_unregister_notifier(void);
#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 7e012312cd61..249d0a5b12b8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -27,12 +27,17 @@
#include <net/netfilter/nf_conntrack_tuple.h>
+struct nf_ct_udp {
+ unsigned long stream_ts;
+};
+
/* per conntrack: protocol private data */
union nf_conntrack_proto {
/* insert conntrack proto private data here */
struct nf_ct_dccp dccp;
struct ip_ct_sctp sctp;
struct ip_ct_tcp tcp;
+ struct nf_ct_udp udp;
struct nf_ct_gre gre;
unsigned int tmpl_padto;
};
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index 79d8d16732b4..bc6745d3010e 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -46,9 +46,6 @@ struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
return acct;
};
-unsigned int seq_print_acct(struct seq_file *s, const struct nf_conn *ct,
- int dir);
-
/* Check if connection tracking accounting is enabled */
static inline bool nf_ct_acct_enabled(struct net *net)
{
@@ -61,8 +58,7 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
net->ct.sysctl_acct = enable;
}
-int nf_conntrack_acct_pernet_init(struct net *net);
-void nf_conntrack_acct_pernet_fini(struct net *net);
+void nf_conntrack_acct_pernet_init(struct net *net);
int nf_conntrack_acct_init(void);
void nf_conntrack_acct_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 3f1ce9a8776e..52b44192b43f 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -142,7 +142,7 @@ void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
struct nf_conntrack_expect *exp,
u32 portid, int report);
-int nf_conntrack_ecache_pernet_init(struct net *net);
+void nf_conntrack_ecache_pernet_init(struct net *net);
void nf_conntrack_ecache_pernet_fini(struct net *net);
int nf_conntrack_ecache_init(void);
@@ -182,10 +182,7 @@ static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
u32 portid,
int report) {}
-static inline int nf_conntrack_ecache_pernet_init(struct net *net)
-{
- return 0;
-}
+static inline void nf_conntrack_ecache_pernet_init(struct net *net) {}
static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
{
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 2492120b8097..ec52a8dc32fd 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -124,8 +124,7 @@ static inline void *nfct_help_data(const struct nf_conn *ct)
return (void *)help->data;
}
-int nf_conntrack_helper_pernet_init(struct net *net);
-void nf_conntrack_helper_pernet_fini(struct net *net);
+void nf_conntrack_helper_pernet_init(struct net *net);
int nf_conntrack_helper_init(void);
void nf_conntrack_helper_fini(void);
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index 3b661986be8f..0ed617bf0a3d 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -49,21 +49,12 @@ static inline void nf_ct_set_tstamp(struct net *net, bool enable)
}
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
-int nf_conntrack_tstamp_pernet_init(struct net *net);
-void nf_conntrack_tstamp_pernet_fini(struct net *net);
+void nf_conntrack_tstamp_pernet_init(struct net *net);
int nf_conntrack_tstamp_init(void);
void nf_conntrack_tstamp_fini(void);
#else
-static inline int nf_conntrack_tstamp_pernet_init(struct net *net)
-{
- return 0;
-}
-
-static inline void nf_conntrack_tstamp_pernet_fini(struct net *net)
-{
- return;
-}
+static inline void nf_conntrack_tstamp_pernet_init(struct net *net) {}
static inline int nf_conntrack_tstamp_init(void)
{
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 77e2761d4f2f..7d5cda7ce32a 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -95,10 +95,6 @@ void flow_offload_free(struct flow_offload *flow);
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
-int nf_flow_table_iterate(struct nf_flowtable *flow_table,
- void (*iter)(struct flow_offload *flow, void *data),
- void *data);
-
void nf_flow_table_cleanup(struct net_device *dev);
int nf_flow_table_init(struct nf_flowtable *flow_table);
diff --git a/include/net/netfilter/nf_nat_l3proto.h b/include/net/netfilter/nf_nat_l3proto.h
index d300b8f03972..d774ca0c4c5e 100644
--- a/include/net/netfilter/nf_nat_l3proto.h
+++ b/include/net/netfilter/nf_nat_l3proto.h
@@ -2,18 +2,11 @@
#ifndef _NF_NAT_L3PROTO_H
#define _NF_NAT_L3PROTO_H
-struct nf_nat_l4proto;
struct nf_nat_l3proto {
u8 l3proto;
- bool (*in_range)(const struct nf_conntrack_tuple *t,
- const struct nf_nat_range2 *range);
-
- u32 (*secure_port)(const struct nf_conntrack_tuple *t, __be16);
-
bool (*manip_pkt)(struct sk_buff *skb,
unsigned int iphdroff,
- const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype);
diff --git a/include/net/netfilter/nf_nat_l4proto.h b/include/net/netfilter/nf_nat_l4proto.h
index b4d6b29bca62..95a4655bd1ad 100644
--- a/include/net/netfilter/nf_nat_l4proto.h
+++ b/include/net/netfilter/nf_nat_l4proto.h
@@ -5,78 +5,12 @@
#include <net/netfilter/nf_nat.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
-struct nf_nat_range;
struct nf_nat_l3proto;
-struct nf_nat_l4proto {
- /* Protocol number. */
- u8 l4proto;
-
- /* Translate a packet to the target according to manip type.
- * Return true if succeeded.
- */
- bool (*manip_pkt)(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype);
-
- /* Is the manipable part of the tuple between min and max incl? */
- bool (*in_range)(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
- /* Alter the per-proto part of the tuple (depending on
- * maniptype), to give a unique tuple in the given range if
- * possible. Per-protocol part of tuple is initialized to the
- * incoming packet.
- */
- void (*unique_tuple)(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct);
-
- int (*nlattr_to_range)(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-};
-
-/* Protocol registration. */
-int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto);
-void nf_nat_l4proto_unregister(u8 l3proto,
- const struct nf_nat_l4proto *l4proto);
-
-const struct nf_nat_l4proto *__nf_nat_l4proto_find(u8 l3proto, u8 l4proto);
-
-/* Built-in protocols. */
-extern const struct nf_nat_l4proto nf_nat_l4proto_tcp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_udp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_icmp;
-extern const struct nf_nat_l4proto nf_nat_l4proto_icmpv6;
-extern const struct nf_nat_l4proto nf_nat_l4proto_unknown;
-#ifdef CONFIG_NF_NAT_PROTO_DCCP
-extern const struct nf_nat_l4proto nf_nat_l4proto_dccp;
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_SCTP
-extern const struct nf_nat_l4proto nf_nat_l4proto_sctp;
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
-extern const struct nf_nat_l4proto nf_nat_l4proto_udplite;
-#endif
-
-bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max);
-
-void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct, u16 *rover);
-
-int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range2 *range);
-
+/* Translate a packet to the target according to manip type. Return on success. */
+bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype);
#endif /*_NF_NAT_L4PROTO_H*/
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 9795d628a127..51cba0b8adf5 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -97,18 +97,14 @@ struct netns_ct {
struct delayed_work ecache_dwork;
bool ecache_dwork_pending;
#endif
+ bool auto_assign_helper_warned;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
- struct ctl_table_header *acct_sysctl_header;
- struct ctl_table_header *tstamp_sysctl_header;
- struct ctl_table_header *event_sysctl_header;
- struct ctl_table_header *helper_sysctl_header;
#endif
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_events;
int sysctl_acct;
int sysctl_auto_assign_helper;
- bool auto_assign_helper_warned;
int sysctl_tstamp;
int sysctl_checksum;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index e47503b4e4d1..104a6669e344 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -103,6 +103,9 @@ struct netns_ipv4 {
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ int sysctl_raw_l3mdev_accept;
+#endif
int sysctl_tcp_early_demux;
int sysctl_udp_early_demux;
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 9991e5ef52cc..59f45b1e9dac 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/rhashtable-types.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
@@ -53,6 +54,7 @@ struct netns_xfrm {
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
struct xfrm_policy_hthresh policy_hthresh;
+ struct list_head inexact_bins;
struct sock *nlsk;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 72ffb3120ced..40965fbbcd31 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -81,6 +81,14 @@ void __tcf_block_cb_unregister(struct tcf_block *block,
struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident);
int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
@@ -183,6 +191,32 @@ void tcf_block_cb_unregister(struct tcf_block *block,
{
}
+static inline
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ return 0;
+}
+
+static inline
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ return 0;
+}
+
+static inline
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
+static inline
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+}
+
static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
@@ -585,8 +619,8 @@ tcf_match_indev(struct sk_buff *skb, int ifindex)
}
#endif /* CONFIG_NET_CLS_IND */
-int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
- enum tc_setup_type type, void *type_data, bool err_stop);
+int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
+ void *type_data, bool err_stop);
enum tc_block_command {
TC_BLOCK_BIND,
@@ -609,6 +643,7 @@ struct tc_cls_common_offload {
struct tc_cls_u32_knode {
struct tcf_exts *exts;
+ struct tcf_result *res;
struct tc_u32_sel *sel;
u32 handle;
u32 val;
@@ -787,12 +822,21 @@ enum tc_mq_command {
TC_MQ_CREATE,
TC_MQ_DESTROY,
TC_MQ_STATS,
+ TC_MQ_GRAFT,
+};
+
+struct tc_mq_opt_offload_graft_params {
+ unsigned long queue;
+ u32 child_handle;
};
struct tc_mq_qopt_offload {
enum tc_mq_command command;
u32 handle;
- struct tc_qopt_offload_stats stats;
+ union {
+ struct tc_qopt_offload_stats stats;
+ struct tc_mq_opt_offload_graft_params graft_params;
+ };
};
enum tc_red_command {
@@ -800,13 +844,16 @@ enum tc_red_command {
TC_RED_DESTROY,
TC_RED_STATS,
TC_RED_XSTATS,
+ TC_RED_GRAFT,
};
struct tc_red_qopt_offload_params {
u32 min;
u32 max;
u32 probability;
+ u32 limit;
bool is_ecn;
+ bool is_harddrop;
struct gnet_stats_queue *qstats;
};
@@ -818,6 +865,51 @@ struct tc_red_qopt_offload {
struct tc_red_qopt_offload_params set;
struct tc_qopt_offload_stats stats;
struct red_stats *xstats;
+ u32 child_handle;
+ };
+};
+
+enum tc_gred_command {
+ TC_GRED_REPLACE,
+ TC_GRED_DESTROY,
+ TC_GRED_STATS,
+};
+
+struct tc_gred_vq_qopt_offload_params {
+ bool present;
+ u32 limit;
+ u32 prio;
+ u32 min;
+ u32 max;
+ bool is_ecn;
+ bool is_harddrop;
+ u32 probability;
+ /* Only need backlog, see struct tc_prio_qopt_offload_params */
+ u32 *backlog;
+};
+
+struct tc_gred_qopt_offload_params {
+ bool grio_on;
+ bool wred_on;
+ unsigned int dp_cnt;
+ unsigned int dp_def;
+ struct gnet_stats_queue *qstats;
+ struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload_stats {
+ struct gnet_stats_basic_packed bstats[MAX_DPs];
+ struct gnet_stats_queue qstats[MAX_DPs];
+ struct red_stats *xstats[MAX_DPs];
+};
+
+struct tc_gred_qopt_offload {
+ enum tc_gred_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_gred_qopt_offload_params set;
+ struct tc_gred_qopt_offload_stats stats;
};
};
@@ -854,4 +946,14 @@ struct tc_prio_qopt_offload {
};
};
+enum tc_root_command {
+ TC_ROOT_GRAFT,
+};
+
+struct tc_root_qopt_offload {
+ enum tc_root_command command;
+ u32 handle;
+ bool ingress;
+};
+
#endif
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 4fc75f7ae23b..92b3eaad6088 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -42,7 +42,10 @@ struct net_protocol {
int (*early_demux)(struct sk_buff *skb);
int (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb, u32 info);
+
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
unsigned int no_policy:1,
netns_ok:1,
/* does the protocol do more stringent
@@ -58,10 +61,12 @@ struct inet6_protocol {
void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
- void (*err_handler)(struct sk_buff *skb,
+ /* This returns an error if we weren't able to handle the error. */
+ int (*err_handler)(struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset,
__be32 info);
+
unsigned int flags; /* INET6_PROTO_xxx */
};
diff --git a/include/net/raw.h b/include/net/raw.h
index 9c9fa98a91a4..821ff4887f77 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -17,7 +17,7 @@
#ifndef _RAW_H
#define _RAW_H
-
+#include <net/inet_sock.h>
#include <net/protocol.h>
#include <linux/icmp.h>
@@ -61,6 +61,7 @@ void raw_seq_stop(struct seq_file *seq, void *v);
int raw_hash_sk(struct sock *sk);
void raw_unhash_sk(struct sock *sk);
+void raw_init(void);
struct raw_sock {
/* inet_sock has to be the first member */
@@ -74,4 +75,15 @@ static inline struct raw_sock *raw_sk(const struct sock *sk)
return (struct raw_sock *)sk;
}
+static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
#endif /* _RAW_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index cf26e5aacac4..e2091bb2b3a8 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -159,7 +159,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
struct net_device *rtnl_create_link(struct net *net, const char *ifname,
unsigned char name_assign_type,
const struct rtnl_link_ops *ops,
- struct nlattr *tb[]);
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4d736427a4cb..9481f2c142e2 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -24,6 +24,9 @@ struct bpf_flow_keys;
typedef int tc_setup_cb_t(enum tc_setup_type type,
void *type_data, void *cb_priv);
+typedef int tc_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
+ enum tc_setup_type type, void *type_data);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -579,6 +582,30 @@ void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
unsigned int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data)
+{
+ q->flags &= ~TCQ_F_OFFLOADED;
+ return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+}
+#endif
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 8dadc74c22e7..4588bdc2b8f0 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -71,7 +71,7 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM };
SCTP_NUM_AUTH_CHUNK_TYPES)
/* These are the different flavours of event. */
-enum sctp_event {
+enum sctp_event_type {
SCTP_EVENT_T_CHUNK = 1,
SCTP_EVENT_T_TIMEOUT,
SCTP_EVENT_T_OTHER,
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index ab9242e51d9e..1d13ec3f2707 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -151,8 +151,8 @@ int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
* sctp/input.c
*/
int sctp_rcv(struct sk_buff *skb);
-void sctp_v4_err(struct sk_buff *skb, u32 info);
-void sctp_hash_endpoint(struct sctp_endpoint *);
+int sctp_v4_err(struct sk_buff *skb, u32 info);
+int sctp_hash_endpoint(struct sctp_endpoint *ep);
void sctp_unhash_endpoint(struct sctp_endpoint *);
struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *,
struct sctphdr *, struct sctp_association **,
@@ -620,4 +620,9 @@ static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
return false;
}
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
#endif /* __net_sctp_h__ */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 9e3d32746430..24825a81829e 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -173,7 +173,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire;
__u8 sctp_get_chunk_type(struct sctp_chunk *chunk);
const struct sctp_sm_table_entry *sctp_sm_lookup_event(
struct net *net,
- enum sctp_event event_type,
+ enum sctp_event_type event_type,
enum sctp_state state,
union sctp_subtype event_subtype);
int sctp_chunk_iif(const struct sctp_chunk *);
@@ -313,7 +313,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
/* Prototypes for statetable processing. */
-int sctp_do_sm(struct net *net, enum sctp_event event_type,
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
union sctp_subtype subtype, enum sctp_state state,
struct sctp_endpoint *ep, struct sctp_association *asoc,
void *event_arg, gfp_t gfp);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index a11f93790476..003020eb6e66 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -96,7 +96,9 @@ struct sctp_stream;
struct sctp_bind_bucket {
unsigned short port;
- unsigned short fastreuse;
+ signed char fastreuse;
+ signed char fastreuseport;
+ kuid_t fastuid;
struct hlist_node node;
struct hlist_head owner;
struct net *net;
@@ -215,7 +217,7 @@ struct sctp_sock {
* These two structures must be grouped together for the usercopy
* whitelist region.
*/
- struct sctp_event_subscribe subscribe;
+ __u16 subscribe;
struct sctp_initmsg initmsg;
int user_frag;
@@ -1190,6 +1192,8 @@ int sctp_bind_addr_conflict(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *, struct sctp_sock *);
int sctp_bind_addr_state(const struct sctp_bind_addr *bp,
const union sctp_addr *addr);
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2);
union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
const union sctp_addr *addrs,
int addrcnt,
@@ -2073,8 +2077,12 @@ struct sctp_association {
int sent_cnt_removable;
+ __u16 subscribe;
+
__u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
__u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+ struct rcu_head rcu;
};
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 51b4e0626c34..bd922a0fe914 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -164,30 +164,39 @@ void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+static inline void sctp_ulpevent_type_set(__u16 *subscribe,
+ __u16 sn_type, __u8 on)
+{
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return;
+
+ if (on)
+ *subscribe |= (1 << (sn_type - SCTP_SN_TYPE_BASE));
+ else
+ *subscribe &= ~(1 << (sn_type - SCTP_SN_TYPE_BASE));
+}
+
/* Is this event type enabled? */
-static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
- struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_type_enabled(__u16 subscribe, __u16 sn_type)
{
- int offset = sn_type - SCTP_SN_TYPE_BASE;
- char *amask = (char *) mask;
+ if (sn_type > SCTP_SN_TYPE_MAX)
+ return false;
- if (offset >= sizeof(struct sctp_event_subscribe))
- return 0;
- return amask[offset];
+ return subscribe & (1 << (sn_type - SCTP_SN_TYPE_BASE));
}
/* Given an event subscription, is this event enabled? */
-static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
- struct sctp_event_subscribe *mask)
+static inline bool sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
+ __u16 subscribe)
{
__u16 sn_type;
- int enabled = 1;
- if (sctp_ulpevent_is_notification(event)) {
- sn_type = sctp_ulpevent_get_notification_type(event);
- enabled = sctp_ulpevent_type_enabled(sn_type, mask);
- }
- return enabled;
+ if (!sctp_ulpevent_is_notification(event))
+ return true;
+
+ sn_type = sctp_ulpevent_get_notification_type(event);
+
+ return sctp_ulpevent_type_enabled(subscribe, sn_type);
}
#endif /* __sctp_ulpevent_h__ */
diff --git a/include/net/seg6.h b/include/net/seg6.h
index 2567941a2f32..8b2dc6869fd1 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -16,7 +16,6 @@
#include <linux/net.h>
#include <linux/ipv6.h>
-#include <net/lwtunnel.h>
#include <linux/seg6.h>
#include <linux/rhashtable-types.h>
diff --git a/include/net/sock.h b/include/net/sock.h
index f665d74ae509..a6235c286ef9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1110,7 +1110,7 @@ struct proto {
unsigned int inuse_idx;
#endif
- bool (*stream_memory_free)(const struct sock *sk);
+ bool (*stream_memory_free)(const struct sock *sk, int wake);
bool (*stream_memory_read)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
@@ -1192,19 +1192,29 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */
-static inline bool sk_stream_memory_free(const struct sock *sk)
+static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
{
if (sk->sk_wmem_queued >= sk->sk_sndbuf)
return false;
return sk->sk_prot->stream_memory_free ?
- sk->sk_prot->stream_memory_free(sk) : true;
+ sk->sk_prot->stream_memory_free(sk, wake) : true;
}
-static inline bool sk_stream_is_writeable(const struct sock *sk)
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ return __sk_stream_memory_free(sk, 0);
+}
+
+static inline bool __sk_stream_is_writeable(const struct sock *sk, int wake)
{
return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
- sk_stream_memory_free(sk);
+ __sk_stream_memory_free(sk, wake);
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return __sk_stream_is_writeable(sk, 0);
}
static inline int sk_under_cgroup_hierarchy(struct sock *sk,
@@ -2340,22 +2350,39 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
/**
- * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
* @sk: socket sending this packet
* @tsflags: timestamping flags to use
* @tx_flags: completed with instructions for time stamping
+ * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno)
*
* Note: callers should take care of initial ``*tx_flags`` value (usually 0)
*/
-static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags,
- __u8 *tx_flags)
+static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags, __u32 *tskey)
{
- if (unlikely(tsflags))
+ if (unlikely(tsflags)) {
__sock_tx_timestamp(tsflags, tx_flags);
+ if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey &&
+ tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
+ *tskey = sk->sk_tskey++;
+ }
if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS)))
*tx_flags |= SKBTX_WIFI_STATUS;
}
+static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags,
+ __u8 *tx_flags)
+{
+ _sock_tx_timestamp(sk, tsflags, tx_flags, NULL);
+}
+
+static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
+{
+ _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags,
+ &skb_shinfo(skb)->tskey);
+}
+
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 881ecb1555bf..a7fdab5ee6c3 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -95,8 +95,8 @@ struct switchdev_obj_port_vlan {
u16 vid_end;
};
-#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
- container_of(obj, struct switchdev_obj_port_vlan, obj)
+#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_vlan, obj)
/* SWITCHDEV_OBJ_ID_PORT_MDB */
struct switchdev_obj_port_mdb {
@@ -105,8 +105,8 @@ struct switchdev_obj_port_mdb {
u16 vid;
};
-#define SWITCHDEV_OBJ_PORT_MDB(obj) \
- container_of(obj, struct switchdev_obj_port_mdb, obj)
+#define SWITCHDEV_OBJ_PORT_MDB(OBJ) \
+ container_of((OBJ), struct switchdev_obj_port_mdb, obj)
void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
void *data, void (*destructor)(void const *),
@@ -121,10 +121,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
* @switchdev_port_attr_get: Get a port attribute (see switchdev_attr).
*
* @switchdev_port_attr_set: Set a port attribute (see switchdev_attr).
- *
- * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*).
- *
- * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*).
*/
struct switchdev_ops {
int (*switchdev_port_attr_get)(struct net_device *dev,
@@ -132,11 +128,6 @@ struct switchdev_ops {
int (*switchdev_port_attr_set)(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans);
- int (*switchdev_port_obj_add)(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans);
- int (*switchdev_port_obj_del)(struct net_device *dev,
- const struct switchdev_obj *obj);
};
enum switchdev_notifier_type {
@@ -146,6 +137,11 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
+ SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
+
+ SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE,
+ SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE,
SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE,
SWITCHDEV_VXLAN_FDB_OFFLOADED,
@@ -153,6 +149,7 @@ enum switchdev_notifier_type {
struct switchdev_notifier_info {
struct net_device *dev;
+ struct netlink_ext_ack *extack;
};
struct switchdev_notifier_fdb_info {
@@ -163,12 +160,25 @@ struct switchdev_notifier_fdb_info {
offloaded:1;
};
+struct switchdev_notifier_port_obj_info {
+ struct switchdev_notifier_info info; /* must be first */
+ const struct switchdev_obj *obj;
+ struct switchdev_trans *trans;
+ bool handled;
+};
+
static inline struct net_device *
switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info)
{
return info->dev;
}
+static inline struct netlink_ext_ack *
+switchdev_notifier_info_to_extack(const struct switchdev_notifier_info *info)
+{
+ return info->extack;
+}
+
#ifdef CONFIG_NET_SWITCHDEV
void switchdev_deferred_process(void);
@@ -177,13 +187,22 @@ int switchdev_port_attr_get(struct net_device *dev,
int switchdev_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr);
int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj);
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack);
int switchdev_port_obj_del(struct net_device *dev,
const struct switchdev_obj *obj);
+
int register_switchdev_notifier(struct notifier_block *nb);
int unregister_switchdev_notifier(struct notifier_block *nb);
int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info);
+
+int register_switchdev_blocking_notifier(struct notifier_block *nb);
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb);
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack);
+
void switchdev_port_fwd_mark_set(struct net_device *dev,
struct net_device *group_dev,
bool joining);
@@ -191,6 +210,19 @@ void switchdev_port_fwd_mark_set(struct net_device *dev,
bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b);
+int switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj));
+
#define SWITCHDEV_SET_OPS(netdev, ops) ((netdev)->switchdev_ops = (ops))
#else
@@ -211,7 +243,8 @@ static inline int switchdev_port_attr_set(struct net_device *dev,
}
static inline int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -239,12 +272,55 @@ static inline int call_switchdev_notifiers(unsigned long val,
return NOTIFY_DONE;
}
+static inline int
+register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int
+call_switchdev_blocking_notifiers(unsigned long val,
+ struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
+{
+ return NOTIFY_DONE;
+}
+
static inline bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
return false;
}
+static inline int
+switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
+static inline int
+switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
#define SWITCHDEV_SET_OPS(netdev, ops) do {} while (0)
#endif
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a18914d20486..e0a65c067662 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -313,7 +313,7 @@ extern struct proto tcp_prot;
void tcp_tasklet_init(void);
-void tcp_v4_err(struct sk_buff *skb, u32);
+int tcp_v4_err(struct sk_buff *skb, u32);
void tcp_shutdown(struct sock *sk, int how);
@@ -1124,7 +1124,7 @@ void tcp_rate_check_app_limited(struct sock *sk);
*/
static inline int tcp_is_sack(const struct tcp_sock *tp)
{
- return tp->rx_opt.sack_ok;
+ return likely(tp->rx_opt.sack_ok);
}
static inline bool tcp_is_reno(const struct tcp_sock *tp)
@@ -1315,33 +1315,16 @@ static inline __sum16 tcp_v4_check(int len, __be32 saddr,
return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}
-static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
-{
- return __skb_checksum_complete(skb);
-}
-
static inline bool tcp_checksum_complete(struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
- __tcp_checksum_complete(skb);
+ __skb_checksum_complete(skb);
}
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
int tcp_filter(struct sock *sk, struct sk_buff *skb);
-
-#undef STATE_TRACE
-
-#ifdef STATE_TRACE
-static const char *statename[]={
- "Unused","Established","Syn Sent","Syn Recv",
- "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
- "Close Wait","Last ACK","Listen","Closing"
-};
-#endif
void tcp_set_state(struct sock *sk, int state);
-
void tcp_done(struct sock *sk);
-
int tcp_abort(struct sock *sk, int err);
static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
@@ -1385,7 +1368,7 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
- return tcp_win_from_space(sk, sk->sk_rcvbuf -
+ return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
atomic_read(&sk->sk_rmem_alloc));
}
@@ -1572,9 +1555,21 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
-struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family);
+#include <linux/jump_label.h>
+extern struct static_key tcp_md5_needed;
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family);
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
+{
+ if (!static_key_false(&tcp_md5_needed))
+ return NULL;
+ return __tcp_md5_do_lookup(sk, addr, family);
+}
+
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
@@ -1875,12 +1870,16 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
}
-static inline bool tcp_stream_memory_free(const struct sock *sk)
+/* @wake is one when sk_stream_write_space() calls us.
+ * This sends EPOLLOUT only if notsent_bytes is half the limit.
+ * This mimics the strategy used in sock_def_write_space().
+ */
+static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
{
const struct tcp_sock *tp = tcp_sk(sk);
u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
- return notsent_bytes < tcp_notsent_lowat(tp);
+ return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
}
#ifdef CONFIG_PROC_FS
diff --git a/include/net/tls.h b/include/net/tls.h
index bab5627ff5e3..2a6ac8d642af 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -76,6 +76,10 @@
*
* void (*unhash)(struct tls_device *device, struct sock *sk);
* This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ * Release the registered device and allocated resources
+ * @kref: Number of reference to tls_device
*/
struct tls_device {
char name[TLS_DEVICE_NAME_MAX];
@@ -83,6 +87,8 @@ struct tls_device {
int (*feature)(struct tls_device *device);
int (*hash)(struct tls_device *device, struct sock *sk);
void (*unhash)(struct tls_device *device, struct sock *sk);
+ void (*release)(struct kref *kref);
+ struct kref kref;
};
enum {
@@ -454,6 +460,15 @@ tls_offload_ctx_tx(const struct tls_context *tls_ctx)
return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
}
+static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
+{
+ struct tls_context *ctx = tls_get_ctx(sk);
+
+ if (!ctx)
+ return false;
+ return !!tls_sw_ctx_tx(ctx);
+}
+
static inline struct tls_offload_context_rx *
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
diff --git a/include/net/udp.h b/include/net/udp.h
index 9e82cb391dea..fd6d948755c8 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -252,6 +252,17 @@ static inline int udp_rqueue_get(struct sock *sk)
return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
}
+static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if,
+ int dif, int sdif)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept,
+ bound_dev_if, dif, sdif);
+#else
+ return inet_bound_dev_eq(true, bound_dev_if, dif, sdif);
+#endif
+}
+
/* net/ipv4/udp.c */
void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
@@ -272,7 +283,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
-void udp_err(struct sk_buff *, u32);
+int udp_err(struct sk_buff *, u32);
int udp_abort(struct sock *sk, int err);
int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udp_push_pending_frames(struct sock *sk);
@@ -406,17 +417,24 @@ static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
} while(0)
#if IS_ENABLED(CONFIG_IPV6)
-#define __UDPX_INC_STATS(sk, field) \
-do { \
- if ((sk)->sk_family == AF_INET) \
- __UDP_INC_STATS(sock_net(sk), field, 0); \
- else \
- __UDP6_INC_STATS(sock_net(sk), field, 0); \
-} while (0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ ipv4 ? (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics) : \
+ (IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_stats_in6 : \
+ sock_net(sk)->mib.udp_stats_in6); \
+})
#else
-#define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
+#define __UDPX_MIB(sk, ipv4) \
+({ \
+ IS_UDPLITE(sk) ? sock_net(sk)->mib.udplite_statistics : \
+ sock_net(sk)->mib.udp_statistics; \
+})
#endif
+#define __UDPX_INC_STATS(sk, field) \
+ __SNMP_INC_STATS(__UDPX_MIB(sk, (sk)->sk_family == AF_INET), field)
+
#ifdef CONFIG_PROC_FS
struct udp_seq_afinfo {
sa_family_t family;
@@ -450,4 +468,26 @@ DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
void udpv6_encap_enable(void);
#endif
+static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ struct sk_buff *skb, bool ipv4)
+{
+ struct sk_buff *segs;
+
+ /* the GSO CB lays after the UDP one, no need to save and restore any
+ * CB fragment
+ */
+ segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segs))) {
+ int segs_nr = skb_shinfo(skb)->gso_segs;
+
+ atomic_add(segs_nr, &sk->sk_drops);
+ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ consume_skb(skb);
+ return segs;
+}
+
#endif /* _UDP_H */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index fe680ab6b15a..b8137953fea3 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -30,6 +30,7 @@ struct udp_port_cfg {
__be16 local_udp_port;
__be16 peer_udp_port;
+ int bind_ifindex;
unsigned int use_udp_checksums:1,
use_udp6_tx_checksums:1,
use_udp6_rx_checksums:1,
@@ -64,6 +65,8 @@ static inline int udp_sock_create(struct net *net,
}
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
+typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
+ struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
@@ -76,6 +79,7 @@ struct udp_tunnel_sock_cfg {
/* Used for setting up udp_sock fields, see udp.h for details */
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
+ udp_tunnel_encap_err_lookup_t encap_err_lookup;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
@@ -165,6 +169,12 @@ static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
static inline void udp_tunnel_encap_enable(struct socket *sock)
{
+ struct udp_sock *up = udp_sk(sock->sk);
+
+ if (up->encap_enabled)
+ return;
+
+ up->encap_enabled = 1;
#if IS_ENABLED(CONFIG_IPV6)
if (sock->sk->sk_family == PF_INET6)
ipv6_stub->udpv6_encap_enable();
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 03431c148e16..236403eb5ba6 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -216,6 +216,7 @@ struct vxlan_config {
unsigned long age_interval;
unsigned int addrmax;
bool no_share;
+ enum ifla_vxlan_df df;
};
struct vxlan_dev_node {
@@ -420,11 +421,16 @@ struct switchdev_notifier_vxlan_fdb_info {
u8 eth_addr[ETH_ALEN];
__be32 vni;
bool offloaded;
+ bool added_by_user;
};
#if IS_ENABLED(CONFIG_VXLAN)
int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
struct switchdev_notifier_vxlan_fdb_info *fdb_info);
+int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb);
+void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni);
+
#else
static inline int
vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
@@ -432,6 +438,17 @@ vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni,
{
return -ENOENT;
}
+
+static inline int vxlan_fdb_replay(const struct net_device *dev, __be32 vni,
+ struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni)
+{
+}
#endif
#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 0eb390c205af..7298a53b9702 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -577,6 +577,7 @@ struct xfrm_policy {
/* This lock only affects elements except for entry. */
rwlock_t lock;
refcount_t refcnt;
+ u32 pos;
struct timer_list timer;
atomic_t genid;
@@ -589,6 +590,7 @@ struct xfrm_policy {
struct xfrm_lifetime_cur curlft;
struct xfrm_policy_walk_entry walk;
struct xfrm_policy_queue polq;
+ bool bydst_reinsert;
u8 type;
u8 action;
u8 flags;
@@ -596,6 +598,7 @@ struct xfrm_policy {
u16 family;
struct xfrm_sec_ctx *security;
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
+ struct hlist_node bydst_inexact_list;
struct rcu_head rcu;
};
@@ -1093,7 +1096,6 @@ struct xfrm_offload {
};
struct sec_path {
- refcount_t refcnt;
int len;
int olen;
@@ -1101,41 +1103,13 @@ struct sec_path {
struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
};
-static inline int secpath_exists(struct sk_buff *skb)
-{
-#ifdef CONFIG_XFRM
- return skb->sp != NULL;
-#else
- return 0;
-#endif
-}
-
-static inline struct sec_path *
-secpath_get(struct sec_path *sp)
-{
- if (sp)
- refcount_inc(&sp->refcnt);
- return sp;
-}
-
-void __secpath_destroy(struct sec_path *sp);
-
-static inline void
-secpath_put(struct sec_path *sp)
-{
- if (sp && refcount_dec_and_test(&sp->refcnt))
- __secpath_destroy(sp);
-}
-
-struct sec_path *secpath_dup(struct sec_path *src);
-int secpath_set(struct sk_buff *skb);
+struct sec_path *secpath_set(struct sk_buff *skb);
static inline void
secpath_reset(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- secpath_put(skb->sp);
- skb->sp = NULL;
+ skb_ext_del(skb, SKB_EXT_SEC_PATH);
#endif
}
@@ -1191,7 +1165,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
if (sk && sk->sk_policy[XFRM_POLICY_IN])
return __xfrm_policy_check(sk, ndir, skb, family);
- return (!net->xfrm.policy_count[dir] && !skb->sp) ||
+ return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
(skb_dst(skb)->flags & DST_NOPOLICY) ||
__xfrm_policy_check(sk, ndir, skb, family);
}
@@ -1552,6 +1526,7 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
struct xfrm_state *xfrm_state_alloc(struct net *net);
+void xfrm_state_free(struct xfrm_state *x);
struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
const struct flowi *fl,
@@ -1902,14 +1877,16 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n)
#ifdef CONFIG_XFRM
static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
{
- return skb->sp->xvec[skb->sp->len - 1];
+ struct sec_path *sp = skb_sec_path(skb);
+
+ return sp->xvec[sp->len - 1];
}
#endif
static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
{
#ifdef CONFIG_XFRM
- struct sec_path *sp = skb->sp;
+ struct sec_path *sp = skb_sec_path(skb);
if (!sp || !sp->olen || sp->len != sp->olen)
return NULL;
@@ -1967,7 +1944,7 @@ static inline void xfrm_dev_state_delete(struct xfrm_state *x)
static inline void xfrm_dev_state_free(struct xfrm_state *x)
{
struct xfrm_state_offload *xso = &x->xso;
- struct net_device *dev = xso->dev;
+ struct net_device *dev = xso->dev;
if (dev && dev->xfrmdev_ops) {
if (dev->xfrmdev_ops->xdo_dev_state_free)
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index c891ada3c5c2..d85e6befa26b 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -61,6 +61,9 @@ struct scsi_pointer {
/* flags preserved across unprep / reprep */
#define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
+/* for scmd->state */
+#define SCMD_STATE_COMPLETE 0
+
struct scsi_cmnd {
struct scsi_request req;
struct scsi_device *device;
@@ -145,6 +148,7 @@ struct scsi_cmnd {
int result; /* Status code from lower level driver */
int flags; /* Command flags */
+ unsigned long state; /* Command completion state */
unsigned char tag; /* SCSI-II queued command tag */
};
@@ -171,7 +175,7 @@ extern void *scsi_kmap_atomic_sg(struct scatterlist *sg, int sg_count,
size_t *offset, size_t *len);
extern void scsi_kunmap_atomic_sg(void *virt);
-extern int scsi_init_io(struct scsi_cmnd *cmd);
+extern blk_status_t scsi_init_io(struct scsi_cmnd *cmd);
#ifdef CONFIG_SCSI_DMA
extern int scsi_dma_map(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index c7bba2b24849..a862dc23c68d 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -69,7 +69,7 @@ struct scsi_device_handler {
int (*attach)(struct scsi_device *);
void (*detach)(struct scsi_device *);
int (*activate)(struct scsi_device *, activate_complete, void *);
- int (*prep_fn)(struct scsi_device *, struct request *);
+ blk_status_t (*prep_fn)(struct scsi_device *, struct request *);
int (*set_params)(struct scsi_device *, const char *);
void (*rescan)(struct scsi_device *);
};
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index fae8b465233e..6dffa8555a39 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -2,6 +2,7 @@
#ifndef _SCSI_SCSI_DRIVER_H
#define _SCSI_SCSI_DRIVER_H
+#include <linux/blk_types.h>
#include <linux/device.h>
struct module;
@@ -13,7 +14,7 @@ struct scsi_driver {
struct device_driver gendrv;
void (*rescan)(struct device *);
- int (*init_command)(struct scsi_cmnd *);
+ blk_status_t (*init_command)(struct scsi_cmnd *);
void (*uninit_command)(struct scsi_cmnd *);
int (*done)(struct scsi_cmnd *);
int (*eh_action)(struct scsi_cmnd *, int);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 5ea06d310a25..aa760df8c6b3 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -11,7 +11,6 @@
#include <linux/blk-mq.h>
#include <scsi/scsi.h>
-struct request_queue;
struct block_device;
struct completion;
struct module;
@@ -22,7 +21,6 @@ struct scsi_target;
struct Scsi_Host;
struct scsi_host_cmd_pool;
struct scsi_transport_template;
-struct blk_queue_tags;
/*
@@ -547,14 +545,8 @@ struct Scsi_Host {
struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;
- /*
- * Area to keep a shared tag map (if needed, will be
- * NULL if not).
- */
- union {
- struct blk_queue_tag *bqt;
- struct blk_mq_tag_set tag_set;
- };
+ /* Area to keep a shared tag map */
+ struct blk_mq_tag_set tag_set;
atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
@@ -648,7 +640,6 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
- unsigned use_blk_mq:1;
unsigned use_cmd_list:1;
/* Host responded with short (<36 bytes) INQUIRY result */
@@ -742,11 +733,6 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress;
}
-static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
-{
- return shost->use_blk_mq;
-}
-
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
extern void scsi_flush_work(struct Scsi_Host *);
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index e192a0caa850..6053d46e794e 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -23,19 +23,15 @@ static inline struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost,
int tag)
{
struct request *req = NULL;
+ u16 hwq;
if (tag == SCSI_NO_TAG)
return NULL;
- if (shost_use_blk_mq(shost)) {
- u16 hwq = blk_mq_unique_tag_to_hwq(tag);
-
- if (hwq < shost->tag_set.nr_hw_queues) {
- req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
- blk_mq_unique_tag_to_tag(tag));
- }
- } else {
- req = blk_map_queue_find_tag(shost->bqt, tag);
+ hwq = blk_mq_unique_tag_to_hwq(tag);
+ if (hwq < shost->tag_set.nr_hw_queues) {
+ req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq],
+ blk_mq_unique_tag_to_tag(tag));
}
if (!req)
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index 70997ab2146c..3fbd71c27ba3 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -116,4 +116,8 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
+int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
+ u32 *fcnt, u32 *bcnt);
+int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid,
+ u32 *num);
#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 56877660d5ba..5cc7af06c1ba 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1205,8 +1205,10 @@ void qman_dqrr_get_ithresh(struct qman_portal *portal, u8 *ithresh);
* qman_dqrr_set_ithresh - Set coalesce interrupt threshold
* @portal: portal to set the new value on
* @ithresh: new threshold value
+ *
+ * Returns 0 on success, or a negative error code.
*/
-void qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
+int qman_dqrr_set_ithresh(struct qman_portal *portal, u8 ithresh);
/**
* qman_dqrr_get_iperiod - Get coalesce interrupt period
@@ -1219,7 +1221,9 @@ void qman_portal_get_iperiod(struct qman_portal *portal, u32 *iperiod);
* qman_dqrr_set_iperiod - Set coalesce interrupt period
* @portal: portal to set the new value on
* @ithresh: new period value
+ *
+ * Returns 0 on success, or a negative error code.
*/
-void qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
+int qman_portal_set_iperiod(struct qman_portal *portal, u32 iperiod);
#endif /* __FSL_QMAN_H */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 562426812ab2..bf761e68a7c7 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,11 +26,9 @@
struct clk;
struct reset_control;
-#ifdef CONFIG_SMP
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
int tegra_pmc_cpu_remove_clamping(unsigned int cpuid);
-#endif /* CONFIG_SMP */
/*
* powergate and I/O rail APIs
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index ea8c93bbb0e0..0cdc3999ecfa 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -23,6 +23,7 @@ struct snd_compr_ops;
* struct snd_compr_runtime: runtime stream description
* @state: stream state
* @ops: pointer to DSP callbacks
+ * @dma_buffer_p: runtime dma buffer pointer
* @buffer: pointer to kernel buffer, valid only when not in mmap mode or
* DSP doesn't implement copy
* @buffer_size: size of the above buffer
@@ -37,6 +38,7 @@ struct snd_compr_ops;
struct snd_compr_runtime {
snd_pcm_state_t state;
struct snd_compr_ops *ops;
+ struct snd_dma_buffer *dma_buffer_p;
void *buffer;
u64 buffer_size;
u32 fragment_size;
@@ -175,6 +177,23 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
wake_up(&stream->runtime->sleep);
}
+/**
+ * snd_compr_set_runtime_buffer - Set the Compress runtime buffer
+ * @substream: compress substream to set
+ * @bufp: the buffer information, NULL to clear
+ *
+ * Copy the buffer information to runtime buffer when @bufp is non-NULL.
+ * Otherwise it clears the current buffer information.
+ */
+static inline void snd_compr_set_runtime_buffer(
+ struct snd_compr_stream *substream,
+ struct snd_dma_buffer *bufp)
+{
+ struct snd_compr_runtime *runtime = substream->runtime;
+
+ runtime->dma_buffer_p = bufp;
+}
+
int snd_compr_stop_error(struct snd_compr_stream *stream,
snd_pcm_state_t state);
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 0d98bb9068b1..7fa48b100936 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -236,6 +236,7 @@ struct hda_codec {
/* misc flags */
unsigned int in_freeing:1; /* being released */
unsigned int registered:1; /* codec was registered */
+ unsigned int display_power_control:1; /* needs display power */
unsigned int spdif_status_reset :1; /* needs to toggle SPDIF for each
* status change
* (e.g. Realtek codecs)
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
index 78626cde7081..2ec31b358950 100644
--- a/include/sound/hda_component.h
+++ b/include/sound/hda_component.h
@@ -5,10 +5,15 @@
#define __SOUND_HDA_COMPONENT_H
#include <drm/drm_audio_component.h>
+#include <sound/hdaudio.h>
+
+/* virtual idx for controller */
+#define HDA_CODEC_IDX_CONTROLLER HDA_MAX_CODECS
#ifdef CONFIG_SND_HDA_COMPONENT
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx,
+ bool enable);
int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
int dev_id, int rate);
int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
@@ -25,9 +30,9 @@ static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+static inline void snd_hdac_display_power(struct hdac_bus *bus,
+ unsigned int idx, bool enable)
{
- return 0;
}
static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
hda_nid_t nid, int dev_id, int rate)
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index cd1773d0e08f..b4fa1c775251 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -79,7 +79,6 @@ struct hdac_device {
/* misc flags */
atomic_t in_pm; /* suspend/resume being performed */
- bool link_power_control:1;
/* sysfs */
struct hdac_widget_tree *widgets;
@@ -99,6 +98,12 @@ enum {
HDA_DEV_ASOC,
};
+enum {
+ SND_SKL_PCI_BIND_AUTO, /* automatic selection based on pci class */
+ SND_SKL_PCI_BIND_LEGACY,/* bind only with legacy driver */
+ SND_SKL_PCI_BIND_ASOC /* bind only with ASoC driver */
+};
+
/* direction */
enum {
HDA_INPUT, HDA_OUTPUT
@@ -237,8 +242,6 @@ struct hdac_bus_ops {
/* get a response from the last command */
int (*get_response)(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
- /* control the link power */
- int (*link_power)(struct hdac_bus *bus, bool enable);
};
/*
@@ -363,7 +366,8 @@ struct hdac_bus {
/* DRM component interface */
struct drm_audio_component *audio_component;
- int drm_power_refcount;
+ long display_power_status;
+ bool display_power_active;
/* parameters required for enhanced capabilities */
int num_streams;
@@ -389,6 +393,7 @@ void snd_hdac_bus_queue_event(struct hdac_bus *bus, u32 res, u32 res_ex);
int snd_hdac_bus_add_device(struct hdac_bus *bus, struct hdac_device *codec);
void snd_hdac_bus_remove_device(struct hdac_bus *bus,
struct hdac_device *codec);
+void snd_hdac_bus_process_unsol_events(struct work_struct *work);
static inline void snd_hdac_codec_link_up(struct hdac_device *codec)
{
@@ -404,7 +409,6 @@ int snd_hdac_bus_send_cmd(struct hdac_bus *bus, unsigned int val);
int snd_hdac_bus_get_response(struct hdac_bus *bus, unsigned int addr,
unsigned int *res);
int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus);
-int snd_hdac_link_power(struct hdac_device *codec, bool enable);
bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_stop_chip(struct hdac_bus *bus);
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index 2dd37cada7c0..888a833d3b00 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
static inline int snd_interval_single(const struct snd_interval *i)
{
return (i->min == i->max ||
- (i->min + 1 == i->max && i->openmax));
+ (i->min + 1 == i->max && (i->openmin || i->openmax)));
}
static inline int snd_interval_value(const struct snd_interval *i)
{
+ if (i->openmin && !i->openmax)
+ return i->max;
return i->min;
}
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index fb0318f9b10f..6d69ed2bd7b1 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -116,12 +116,12 @@ int asoc_simple_card_clean_reference(struct snd_soc_card *card);
void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
struct snd_pcm_hw_params *params);
-void asoc_simple_card_parse_convert(struct device *dev, char *prefix,
+void asoc_simple_card_parse_convert(struct device *dev,
+ struct device_node *np, char *prefix,
struct asoc_simple_card_data *data);
int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
- char *prefix,
- int optional);
+ char *prefix);
int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
char *prefix);
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index f48f59e5b7b0..bb5e1e4ce8bf 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -24,6 +24,7 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index e45b2330d16a..266e64e3c24c 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -38,6 +38,20 @@ struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines);
/**
+ * snd_soc_acpi_mach_params: interface for machine driver configuration
+ *
+ * @acpi_ipc_irq_index: used for BYT-CR detection
+ * @platform: string used for HDaudio codec support
+ * @codec_mask: used for HDAudio support
+ */
+struct snd_soc_acpi_mach_params {
+ u32 acpi_ipc_irq_index;
+ const char *platform;
+ u32 codec_mask;
+ u32 dmic_num;
+};
+
+/**
* snd_soc_acpi_mach: ACPI-based machine descriptor. Most of the fields are
* related to the hardware, except for the firmware and topology file names.
* A platform supported by legacy and Sound Open Firmware (SOF) would expose
@@ -68,6 +82,7 @@ struct snd_soc_acpi_mach {
struct snd_soc_acpi_mach * (*machine_quirk)(void *arg);
const void *quirk_data;
void *pdata;
+ struct snd_soc_acpi_mach_params mach_params;
const char *sof_fw_filename;
const char *sof_tplg_filename;
const char *asoc_plat_name;
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f1dab1f4b194..8ec1de856ee7 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -553,12 +553,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
}
#endif
-#ifdef CONFIG_SND_SOC_AC97_BUS
struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
unsigned int id, unsigned int id_mask);
void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
+#ifdef CONFIG_SND_SOC_AC97_BUS
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
struct platform_device *pdev);
@@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime {
((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
(i)++)
#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
/* mixer control */
@@ -1477,10 +1477,20 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *rx_mask,
unsigned int *slots,
unsigned int *slot_width);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+void snd_soc_of_parse_node_prefix(struct device_node *np,
struct snd_soc_codec_conf *codec_conf,
struct device_node *of_node,
const char *propname);
+static inline
+void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
+ struct snd_soc_codec_conf *codec_conf,
+ struct device_node *of_node,
+ const char *propname)
+{
+ snd_soc_of_parse_node_prefix(card->dev->of_node,
+ codec_conf, of_node, propname);
+}
+
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname);
unsigned int snd_soc_of_parse_daifmt(struct device_node *np,
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index 2cbd6e42ad83..e4526f85c19d 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -221,9 +221,30 @@ DEFINE_EVENT(cache_set, bcache_journal_entry_full,
TP_ARGS(c)
);
-DEFINE_EVENT(bcache_bio, bcache_journal_write,
- TP_PROTO(struct bio *bio),
- TP_ARGS(bio)
+TRACE_EVENT(bcache_journal_write,
+ TP_PROTO(struct bio *bio, u32 keys),
+ TP_ARGS(bio, keys),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev )
+ __field(sector_t, sector )
+ __field(unsigned int, nr_sector )
+ __array(char, rwbs, 6 )
+ __field(u32, nr_keys )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio_dev(bio);
+ __entry->sector = bio->bi_iter.bi_sector;
+ __entry->nr_sector = bio->bi_iter.bi_size >> 9;
+ __entry->nr_keys = keys;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u keys %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector, __entry->nr_sector,
+ __entry->nr_keys)
);
/* Btree */
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 8568946f491d..2887503e4d12 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -92,7 +92,7 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
#define TP_STRUCT__entry_fsid __array(u8, fsid, BTRFS_FSID_SIZE)
#define TP_fast_assign_fsid(fs_info) \
- memcpy(__entry->fsid, fs_info->fsid, BTRFS_FSID_SIZE)
+ memcpy(__entry->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)
#define TP_STRUCT__entry_btrfs(args...) \
TP_STRUCT__entry( \
@@ -1048,6 +1048,8 @@ TRACE_EVENT(btrfs_trigger_flush,
{ FLUSH_DELAYED_ITEMS, "FLUSH_DELAYED_ITEMS"}, \
{ FLUSH_DELALLOC, "FLUSH_DELALLOC"}, \
{ FLUSH_DELALLOC_WAIT, "FLUSH_DELALLOC_WAIT"}, \
+ { FLUSH_DELAYED_REFS_NR, "FLUSH_DELAYED_REFS_NR"}, \
+ { FLUSH_DELAYED_REFS, "FLUSH_ELAYED_REFS"}, \
{ ALLOC_CHUNK, "ALLOC_CHUNK"}, \
{ COMMIT_TRANS, "COMMIT_TRANS"})
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 698e0d8a5ca4..d68e9e536814 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
(unsigned long) __entry->ino, __entry->drop)
);
+TRACE_EVENT(ext4_nfs_commit_metadata,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
TRACE_EVENT(ext4_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index 68b17c116907..fad7befa612d 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_next)
+ __field(struct file_lock *, fl_blocker)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_pid)
__field(unsigned int, fl_flags)
@@ -82,7 +82,7 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_pid = fl ? fl->fl_pid : 0;
__entry->fl_flags = fl ? fl->fl_flags : 0;
@@ -92,9 +92,9 @@ DECLARE_EVENT_CLASS(filelock_lock,
__entry->ret = ret;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
+ TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_pid=%u fl_flags=%s fl_type=%s fl_start=%lld fl_end=%lld ret=%d",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_next, __entry->fl_owner,
+ __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
__entry->fl_pid, show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_start, __entry->fl_end, __entry->ret)
@@ -125,7 +125,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__field(struct file_lock *, fl)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
- __field(struct file_lock *, fl_next)
+ __field(struct file_lock *, fl_blocker)
__field(fl_owner_t, fl_owner)
__field(unsigned int, fl_flags)
__field(unsigned char, fl_type)
@@ -137,7 +137,7 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl = fl ? fl : NULL;
__entry->s_dev = inode->i_sb->s_dev;
__entry->i_ino = inode->i_ino;
- __entry->fl_next = fl ? fl->fl_next : NULL;
+ __entry->fl_blocker = fl ? fl->fl_blocker : NULL;
__entry->fl_owner = fl ? fl->fl_owner : NULL;
__entry->fl_flags = fl ? fl->fl_flags : 0;
__entry->fl_type = fl ? fl->fl_type : 0;
@@ -145,9 +145,9 @@ DECLARE_EVENT_CLASS(filelock_lease,
__entry->fl_downgrade_time = fl ? fl->fl_downgrade_time : 0;
),
- TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_next=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
+ TP_printk("fl=0x%p dev=0x%x:0x%x ino=0x%lx fl_blocker=0x%p fl_owner=0x%p fl_flags=%s fl_type=%s fl_break_time=%lu fl_downgrade_time=%lu",
__entry->fl, MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
- __entry->i_ino, __entry->fl_next, __entry->fl_owner,
+ __entry->i_ino, __entry->fl_blocker, __entry->fl_owner,
show_fl_flags(__entry->fl_flags),
show_fl_type(__entry->fl_type),
__entry->fl_break_time, __entry->fl_downgrade_time)
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 00aa72ce0e7c..1efd7d9b25fe 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -244,6 +244,65 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
TP_ARGS(skb)
);
+DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret),
+
+ TP_STRUCT__entry(
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ret=%d", __entry->ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
+
+ TP_PROTO(int ret),
+
+ TP_ARGS(ret)
+);
+
#endif /* _TRACE_NET_H */
/* This part must be outside protection */
diff --git a/include/trace/events/objagg.h b/include/trace/events/objagg.h
new file mode 100644
index 000000000000..fcec0fc9eb0c
--- /dev/null
+++ b/include/trace/events/objagg.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM objagg
+
+#if !defined(__TRACE_OBJAGG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_OBJAGG_H
+
+#include <linux/tracepoint.h>
+
+struct objagg;
+struct objagg_obj;
+
+TRACE_EVENT(objagg_create,
+ TP_PROTO(const struct objagg *objagg),
+
+ TP_ARGS(objagg),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ ),
+
+ TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_destroy,
+ TP_PROTO(const struct objagg *objagg),
+
+ TP_ARGS(objagg),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ ),
+
+ TP_printk("objagg %p", __entry->objagg)
+);
+
+TRACE_EVENT(objagg_obj_create,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_destroy,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p", __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_get,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ unsigned int refcount),
+
+ TP_ARGS(objagg, obj, refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->refcount = refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, refcount %u",
+ __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_put,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ unsigned int refcount),
+
+ TP_ARGS(objagg, obj, refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(unsigned int, refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->refcount = refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, refcount %u",
+ __entry->objagg, __entry->obj, __entry->refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_assign,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ const struct objagg_obj *parent,
+ unsigned int parent_refcount),
+
+ TP_ARGS(objagg, obj, parent, parent_refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(const void *, parent)
+ __field(unsigned int, parent_refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->parent = parent;
+ __entry->parent_refcount = parent_refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+ __entry->objagg, __entry->obj,
+ __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_parent_unassign,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj,
+ const struct objagg_obj *parent,
+ unsigned int parent_refcount),
+
+ TP_ARGS(objagg, obj, parent, parent_refcount),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ __field(const void *, parent)
+ __field(unsigned int, parent_refcount)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ __entry->parent = parent;
+ __entry->parent_refcount = parent_refcount;
+ ),
+
+ TP_printk("objagg %p, obj %p, parent %p, parent_refcount %u",
+ __entry->objagg, __entry->obj,
+ __entry->parent, __entry->parent_refcount)
+);
+
+TRACE_EVENT(objagg_obj_root_create,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p",
+ __entry->objagg, __entry->obj)
+);
+
+TRACE_EVENT(objagg_obj_root_destroy,
+ TP_PROTO(const struct objagg *objagg,
+ const struct objagg_obj *obj),
+
+ TP_ARGS(objagg, obj),
+
+ TP_STRUCT__entry(
+ __field(const void *, objagg)
+ __field(const void *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->objagg = objagg;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("objagg %p, obj %p",
+ __entry->objagg, __entry->obj)
+);
+
+#endif /* __TRACE_OBJAGG_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index f07b270d4fc4..9a4bdfadab07 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
#ifdef CREATE_TRACE_POINTS
static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
{
+ unsigned int state;
+
#ifdef CONFIG_SCHED_DEBUG
BUG_ON(p != current);
#endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
if (preempt)
return TASK_REPORT_MAX;
- return 1 << task_state_index(p);
+ /*
+ * task_state_index() uses fls() and returns a value from 0-8 range.
+ * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
+ * it for left shift operation to get the correct task->state
+ * mapping.
+ */
+ state = task_state_index(p);
+
+ return state ? (1 << (state - 1)) : state;
}
#endif /* CREATE_TRACE_POINTS */
diff --git a/include/uapi/asm-generic/Kbuild.asm b/include/uapi/asm-generic/Kbuild.asm
index 21381449d98a..355c4ac2c0b0 100644
--- a/include/uapi/asm-generic/Kbuild.asm
+++ b/include/uapi/asm-generic/Kbuild.asm
@@ -3,6 +3,7 @@
#
mandatory-y += auxvec.h
mandatory-y += bitsperlong.h
+mandatory-y += bpf_perf_event.h
mandatory-y += byteorder.h
mandatory-y += errno.h
mandatory-y += fcntl.h
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 538546edbfbd..d90127298f12 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -738,9 +738,11 @@ __SYSCALL(__NR_statx, sys_statx)
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
#define __NR_rseq 293
__SYSCALL(__NR_rseq, sys_rseq)
+#define __NR_kexec_file_load 294
+__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
#undef __NR_syscalls
-#define __NR_syscalls 294
+#define __NR_syscalls 295
/*
* 32 bit systems traditionally used different
@@ -760,8 +762,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_newfstatat __NR3264_fstatat
#define __NR_fstat __NR3264_fstat
+#endif
#define __NR_mmap __NR3264_mmap
#define __NR_fadvise64 __NR3264_fadvise64
#ifdef __NR3264_stat
@@ -776,8 +780,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_fstatat64 __NR3264_fstatat
#define __NR_fstat64 __NR3264_fstat
+#endif
#define __NR_mmap2 __NR3264_mmap
#define __NR_fadvise64_64 __NR3264_fadvise64
#ifdef __NR3264_stat
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 370e9a5536ef..be84e43c1e19 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -326,6 +326,12 @@ struct drm_amdgpu_gem_userptr {
/* GFX9 and later: */
#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
+#define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
+#define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
+#define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
+#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
+#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 0cd40ebfa1b1..0b44260a5ee9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -151,6 +151,21 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+
+/*
+ * packed YCbCr420 2x2 tiled formats
+ * first 64 bits will contain Y,Cb,Cr components for a 2x2 tile
+ */
+/* [63:0] A3:A2:Y3:0:Cr0:0:Y2:0:A1:A0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_Y0L0 fourcc_code('Y', '0', 'L', '0')
+/* [63:0] X3:X2:Y3:0:Cr0:0:Y2:0:X1:X0:Y1:0:Cb0:0:Y0:0 1:1:8:2:8:2:8:2:1:1:8:2:8:2:8:2 little endian */
+#define DRM_FORMAT_X0L0 fourcc_code('X', '0', 'L', '0')
+
+/* [63:0] A3:A2:Y3:Cr0:Y2:A1:A0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_Y0L2 fourcc_code('Y', '0', 'L', '2')
+/* [63:0] X3:X2:Y3:Cr0:Y2:X1:X0:Y1:Cb0:Y0 1:1:10:10:10:1:1:10:10:10 little endian */
+#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
/*
* 2 plane RGB + A
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index d3e0fe31efc5..a439c2e67896 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -888,6 +888,25 @@ struct drm_mode_revoke_lease {
__u32 lessee_id;
};
+/**
+ * struct drm_mode_rect - Two dimensional rectangle.
+ * @x1: Horizontal starting coordinate (inclusive).
+ * @y1: Vertical starting coordinate (inclusive).
+ * @x2: Horizontal ending coordinate (exclusive).
+ * @y2: Vertical ending coordinate (exclusive).
+ *
+ * With drm subsystem using struct drm_rect to manage rectangular area this
+ * export it to user-space.
+ *
+ * Currently used by drm_mode_atomic blob property FB_DAMAGE_CLIPS.
+ */
+struct drm_mode_rect {
+ __s32 x1;
+ __s32 y1;
+ __s32 x2;
+ __s32 y2;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index a4446f452040..298b2e197744 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -412,6 +412,14 @@ typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
+/*
+ * Different modes of per-process Graphics Translation Table,
+ * see I915_PARAM_HAS_ALIASING_PPGTT
+ */
+#define I915_GEM_PPGTT_NONE 0
+#define I915_GEM_PPGTT_ALIASING 1
+#define I915_GEM_PPGTT_FULL 2
+
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index c06d0a5bdd80..91a16b333c69 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -105,14 +105,24 @@ struct drm_msm_gem_new {
__u32 handle; /* out */
};
-#define MSM_INFO_IOVA 0x01
-
-#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+/* Get or set GEM buffer info. The requested value can be passed
+ * directly in 'value', or for data larger than 64b 'value' is a
+ * pointer to userspace buffer, with 'len' specifying the number of
+ * bytes copied into that buffer. For info returned by pointer,
+ * calling the GEM_INFO ioctl with null 'value' will return the
+ * required buffer size in 'len'
+ */
+#define MSM_INFO_GET_OFFSET 0x00 /* get mmap() offset, returned by value */
+#define MSM_INFO_GET_IOVA 0x01 /* get iova, returned by value */
+#define MSM_INFO_SET_NAME 0x02 /* set the debug name (by pointer) */
+#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
struct drm_msm_gem_info {
__u32 handle; /* in */
- __u32 flags; /* in - combination of MSM_INFO_* flags */
- __u64 offset; /* out, mmap() offset or iova */
+ __u32 info; /* in - one of MSM_INFO_* */
+ __u64 value; /* in or out */
+ __u32 len; /* in or out */
+ __u32 pad;
};
#define MSM_PREP_READ 0x01
@@ -188,8 +198,11 @@ struct drm_msm_gem_submit_cmd {
*/
#define MSM_SUBMIT_BO_READ 0x0001
#define MSM_SUBMIT_BO_WRITE 0x0002
+#define MSM_SUBMIT_BO_DUMP 0x0004
-#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+#define MSM_SUBMIT_BO_FLAGS (MSM_SUBMIT_BO_READ | \
+ MSM_SUBMIT_BO_WRITE | \
+ MSM_SUBMIT_BO_DUMP)
struct drm_msm_gem_submit_bo {
__u32 flags; /* in, mask of MSM_SUBMIT_BO_x */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 7b6627783608..35c7d813c66e 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -36,6 +36,7 @@ extern "C" {
#define DRM_V3D_MMAP_BO 0x03
#define DRM_V3D_GET_PARAM 0x04
#define DRM_V3D_GET_BO_OFFSET 0x05
+#define DRM_V3D_SUBMIT_TFU 0x06
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -43,6 +44,7 @@ extern "C" {
#define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
#define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
#define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
+#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
@@ -58,10 +60,15 @@ struct drm_v3d_submit_cl {
* coordinate shader to determine where primitives land on the screen,
* then writes out the state updates and draw calls necessary per tile
* to the tile allocation BO.
+ *
+ * This BCL will block on any previous BCL submitted on the
+ * same FD, but not on any RCL or BCLs submitted by other
+ * clients -- that is left up to the submitter to control
+ * using in_sync_bcl if necessary.
*/
__u32 bcl_start;
- /** End address of the BCL (first byte after the BCL) */
+ /** End address of the BCL (first byte after the BCL) */
__u32 bcl_end;
/* Offset of the render command list.
@@ -69,10 +76,15 @@ struct drm_v3d_submit_cl {
* This is the second set of commands executed, which will either
* execute the tiles that have been set up by the BCL, or a fixed set
* of tiles (in the case of RCL-only blits).
+ *
+ * This RCL will block on this submit's BCL, and any previous
+ * RCL submitted on the same FD, but not on any RCL or BCLs
+ * submitted by other clients -- that is left up to the
+ * submitter to control using in_sync_rcl if necessary.
*/
__u32 rcl_start;
- /** End address of the RCL (first byte after the RCL) */
+ /** End address of the RCL (first byte after the RCL) */
__u32 rcl_end;
/** An optional sync object to wait on before starting the BCL. */
@@ -169,6 +181,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT0,
DRM_V3D_PARAM_V3D_CORE0_IDENT1,
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
+ DRM_V3D_PARAM_SUPPORTS_TFU,
};
struct drm_v3d_get_param {
@@ -187,6 +200,28 @@ struct drm_v3d_get_bo_offset {
__u32 offset;
};
+struct drm_v3d_submit_tfu {
+ __u32 icfg;
+ __u32 iia;
+ __u32 iis;
+ __u32 ica;
+ __u32 iua;
+ __u32 ioa;
+ __u32 ios;
+ __u32 coef[4];
+ /* First handle is the output BO, following are other inputs.
+ * 0 for unused.
+ */
+ __u32 bo_handles[4];
+ /* sync object to block on before running the TFU job. Each TFU
+ * job will execute in the order submitted to its FD. Synchronization
+ * against rendering jobs requires using sync objects.
+ */
+ __u32 in_sync;
+ /* Sync object to signal when the TFU job is done. */
+ __u32 out_sync;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 9a781f0611df..f06a789f34cd 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -47,6 +47,13 @@ extern "C" {
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
+#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
+#define VIRTGPU_EXECBUF_FLAGS (\
+ VIRTGPU_EXECBUF_FENCE_FD_IN |\
+ VIRTGPU_EXECBUF_FENCE_FD_OUT |\
+ 0)
+
struct drm_virtgpu_map {
__u64 offset; /* use for mmap system call */
__u32 handle;
@@ -54,12 +61,12 @@ struct drm_virtgpu_map {
};
struct drm_virtgpu_execbuffer {
- __u32 flags; /* for future use */
+ __u32 flags;
__u32 size;
__u64 command; /* void* */
__u64 bo_handles;
__u32 num_bo_handles;
- __u32 pad;
+ __s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
@@ -137,7 +144,7 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
#define DRM_IOCTL_VIRTGPU_EXECBUFFER \
- DRM_IOW(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_EXECBUFFER,\
struct drm_virtgpu_execbuffer)
#define DRM_IOCTL_VIRTGPU_GETPARAM \
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index ce43d340f010..8387e0af0f76 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -50,6 +50,8 @@ enum {
*
* IOCB_FLAG_RESFD - Set if the "aio_resfd" member of the "struct iocb"
* is valid.
+ * IOCB_FLAG_IOPRIO - Set if the "aio_reqprio" member of the "struct iocb"
+ * is valid.
*/
#define IOCB_FLAG_RESFD (1 << 0)
#define IOCB_FLAG_IOPRIO (1 << 1)
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 8f08ff9bdea0..6fa38d001d84 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -141,7 +141,7 @@ struct blk_zone_range {
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
-#define BLKGETZONESZ _IOW(0x12, 132, __u32)
-#define BLKGETNRZONES _IOW(0x12, 133, __u32)
+#define BLKGETZONESZ _IOR(0x12, 132, __u32)
+#define BLKGETNRZONES _IOR(0x12, 133, __u32)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 852dc17ab47a..91c43884f295 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -133,6 +133,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
};
+/* Note that tracing related programs such as
+ * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
+ * are not subject to a stable API since kernel internal data
+ * structures can change from release to release and may
+ * therefore break existing tracing BPF programs. Tracing BPF
+ * programs correspond to /a/ specific kernel which is to be
+ * analyzed, and not /a/ specific kernel /and/ all future ones.
+ */
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
@@ -232,6 +240,20 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+ * verifier will allow any alignment whatsoever. On platforms
+ * with strict alignment requirements for loads ands stores (such
+ * as sparc and mips) the verifier validates that all loads and
+ * stores provably follow this requirement. This flag turns that
+ * checking and enforcement off.
+ *
+ * It is mostly used for testing when we want to validate the
+ * context and memory access aspects of the verifier, but because
+ * of an unaligned access the alignment check would trigger before
+ * the one we are interested in.
+ */
+#define BPF_F_ANY_ALIGNMENT (1U << 1)
+
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
@@ -257,9 +279,6 @@ enum bpf_attach_type {
/* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2)
-/* flags for BPF_PROG_QUERY */
-#define BPF_F_QUERY_EFFECTIVE (1U << 0)
-
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object */
@@ -269,6 +288,12 @@ enum bpf_attach_type {
/* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5)
+/* Zero-initialize hash function seed. This should only be used for testing. */
+#define BPF_F_ZERO_SEED (1U << 6)
+
+/* flags for BPF_PROG_QUERY */
+#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -326,7 +351,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
- __u32 kern_version; /* checked when prog_type=kprobe */
+ __u32 kern_version; /* not used */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
@@ -335,6 +360,13 @@ union bpf_attr {
* (context accesses, allowed helpers, etc).
*/
__u32 expected_attach_type;
+ __u32 prog_btf_fd; /* fd pointing to BTF type data */
+ __u32 func_info_rec_size; /* userspace bpf_func_info size */
+ __aligned_u64 func_info; /* func info */
+ __u32 func_info_cnt; /* number of bpf_func_info records */
+ __u32 line_info_rec_size; /* userspace bpf_line_info size */
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -353,8 +385,11 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
__u32 prog_fd;
__u32 retval;
- __u32 data_size_in;
- __u32 data_size_out;
+ __u32 data_size_in; /* input: len of data_in */
+ __u32 data_size_out; /* input/output: len of data_out
+ * returns ENOSPC if data_out
+ * is too small.
+ */
__aligned_u64 data_in;
__aligned_u64 data_out;
__u32 repeat;
@@ -475,18 +510,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
- * Description
- * Pop an element from *map*.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
- * Description
- * Get an element from *map* without removing it.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1910,9 +1933,9 @@ union bpf_attr {
* is set to metric from route (IPv4/IPv6 only), and ifindex
* is set to the device index of the nexthop from the FIB lookup.
*
- * *plen* argument is the size of the passed in struct.
- * *flags* argument can be a combination of one or more of the
- * following values:
+ * *plen* argument is the size of the passed in struct.
+ * *flags* argument can be a combination of one or more of the
+ * following values:
*
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
@@ -1921,9 +1944,9 @@ union bpf_attr {
* Perform lookup from an egress perspective (default is
* ingress).
*
- * *ctx* is either **struct xdp_md** for XDP programs or
- * **struct sk_buff** tc cls_act programs.
- * Return
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+ * Return
* * < 0 if any input argument is invalid
* * 0 on success (packet is forwarded, nexthop neighbor exists)
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
@@ -2068,8 +2091,8 @@ union bpf_attr {
* translated to a keycode using the rc keymap, and reported as
* an input key down event. After a period a key up event is
* generated. This period can be extended by calling either
- * **bpf_rc_keydown** () again with the same values, or calling
- * **bpf_rc_repeat** ().
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
* Some protocols include a toggle bit, in case the button was
* released and pressed again between consecutive scancodes.
@@ -2152,29 +2175,30 @@ union bpf_attr {
* The *flags* meaning is specific for each map type,
* and has to be 0 for cgroup local storage.
*
- * Depending on the bpf program type, a local storage area
- * can be shared between multiple instances of the bpf program,
+ * Depending on the BPF program type, a local storage area
+ * can be shared between multiple instances of the BPF program,
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the BPF_STX_XADD instruction to alter
+ * For example, by using the **BPF_STX_XADD** instruction to alter
* the shared data.
* Return
- * Pointer to the local storage area.
+ * A pointer to the local storage area.
*
* int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
- * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
- * It checks the selected sk is matching the incoming
- * request in the skb.
+ * Select a **SO_REUSEPORT** socket from a
+ * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * It checks the selected socket is matching the incoming
+ * request in the socket buffer.
* Return
* 0 on success, or a negative error in case of failure.
*
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
- * and if non-NULL, released via **bpf_sk_release**\ ().
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* The *ctx* should point to the context of the program, such as
* the skb or socket (depending on the hook in use). This is used
@@ -2187,12 +2211,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2200,13 +2226,15 @@ union bpf_attr {
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
- * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
*
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for UDP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
- * and if non-NULL, released via **bpf_sk_release**\ ().
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* The *ctx* should point to the context of the program, such as
* the skb or socket (depending on the hook in use). This is used
@@ -2219,12 +2247,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2232,31 +2262,71 @@ union bpf_attr {
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
- * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
*
- * int bpf_sk_release(struct bpf_sock *sk)
+ * int bpf_sk_release(struct bpf_sock *sock)
* Description
- * Release the reference held by *sock*. *sock* must be a non-NULL
- * pointer that was returned from bpf_sk_lookup_xxx\ ().
+ * Release the reference held by *sock*. *sock* must be a
+ * non-**NULL** pointer that was returned from
+ * **bpf_sk_lookup_xxx**\ ().
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * Description
+ * Pop an element from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * Description
+ * Get an element from *map* without removing it.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
* Description
- * For socket policies, insert *len* bytes into msg at offset
+ * For socket policies, insert *len* bytes into *msg* at offset
* *start*.
*
* If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
- * *msg* it may want to insert metadata or options into the msg.
+ * *msg* it may want to insert metadata or options into the *msg*.
* This can later be read and used by any of the lower layer BPF
* hooks.
*
* This helper may fail if under memory pressure (a malloc
* fails) in these cases BPF programs will get an appropriate
* error and BPF programs will need to handle them.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * Description
+ * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * This may result in **ENOMEM** errors under certain situations if
+ * an allocation and copy are required due to a full ring buffer.
+ * However, the helper will try to avoid doing the allocation
+ * if possible. Other errors can occur if input parameters are
+ * invalid either due to *start* byte not being valid part of *msg*
+ * payload and/or *pop* value being to large.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * Description
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded pointer movement.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ * Return
+ * 0
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2349,7 +2419,9 @@ union bpf_attr {
FN(map_push_elem), \
FN(map_pop_elem), \
FN(map_peek_elem), \
- FN(msg_push_data),
+ FN(msg_push_data), \
+ FN(msg_pop_data), \
+ FN(rc_pointer_rel),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2405,6 +2477,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS (-1L)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -2422,6 +2497,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
+#define __bpf_md_ptr(type, name) \
+union { \
+ type name; \
+ __u64 :64; \
+} __attribute__((aligned(8)))
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -2456,7 +2537,9 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
- struct bpf_flow_keys *flow_keys;
+ __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
+ __u64 tstamp;
+ __u32 wire_len;
};
struct bpf_tunnel_key {
@@ -2572,8 +2655,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
- void *data;
- void *data_end;
+ __bpf_md_ptr(void *, data);
+ __bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -2582,6 +2665,7 @@ struct sk_msg_md {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 size; /* Total size of sk_msg */
};
struct sk_reuseport_md {
@@ -2589,8 +2673,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
- void *data;
- void *data_end; /* End of directly accessible data */
+ __bpf_md_ptr(void *, data);
+ /* End of directly accessible data */
+ __bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)
@@ -2631,6 +2716,18 @@ struct bpf_prog_info {
__u32 nr_jited_func_lens;
__aligned_u64 jited_ksyms;
__aligned_u64 jited_func_lens;
+ __u32 btf_id;
+ __u32 func_info_rec_size;
+ __aligned_u64 func_info;
+ __u32 nr_func_info;
+ __u32 nr_line_info;
+ __aligned_u64 line_info;
+ __aligned_u64 jited_line_info;
+ __u32 nr_jited_line_info;
+ __u32 line_info_rec_size;
+ __u32 jited_line_info_rec_size;
+ __u32 nr_prog_tags;
+ __aligned_u64 prog_tags;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -2942,4 +3039,19 @@ struct bpf_flow_keys {
};
};
+struct bpf_func_info {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
+#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
+
+struct bpf_line_info {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 972265f32871..7b7475ef2f17 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -34,13 +34,16 @@ struct btf_type {
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-31: unused
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
- * "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
union {
@@ -51,6 +54,7 @@ struct btf_type {
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
+#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
@@ -64,8 +68,10 @@ struct btf_type {
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_MAX 11
-#define NR_BTF_KINDS 12
+#define BTF_KIND_FUNC 12 /* Function */
+#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+#define BTF_KIND_MAX 13
+#define NR_BTF_KINDS 14
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -107,7 +113,29 @@ struct btf_array {
struct btf_member {
__u32 name_off;
__u32 type;
- __u32 offset; /* offset in bits */
+ /* If the type info kind_flag is set, the btf_member offset
+ * contains both member bitfield size and bit offset. The
+ * bitfield size is set for bitfield members. If the type
+ * info kind_flag is not set, the offset contains only bit
+ * offset.
+ */
+ __u32 offset;
+};
+
+/* If the struct/union type info kind_flag is set, the
+ * following two macros are used to access bitfield_size
+ * and bit_offset from btf_member.offset.
+ */
+#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
+#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
+
+/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
+ * The exact number of btf_param is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_param {
+ __u32 name_off;
+ __u32 type;
};
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 5ca1d21fc4a7..e0763bc4158e 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -269,6 +269,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
+#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index aff1356c2bb8..e974f4bb5378 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -458,6 +458,7 @@ struct btrfs_free_space_header {
#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
#define BTRFS_SUPER_FLAG_CHANGING_FSID (1ULL << 35)
+#define BTRFS_SUPER_FLAG_CHANGING_FSID_V2 (1ULL << 36)
/*
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 6dafbc3e4414..4dc1603919ce 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -76,45 +76,69 @@ struct crypto_user_alg {
__u32 cru_flags;
};
-struct crypto_stat {
- char type[CRYPTO_MAX_NAME];
- union {
- __u32 stat_encrypt_cnt;
- __u32 stat_compress_cnt;
- __u32 stat_generate_cnt;
- __u32 stat_hash_cnt;
- __u32 stat_setsecret_cnt;
- };
- union {
- __u64 stat_encrypt_tlen;
- __u64 stat_compress_tlen;
- __u64 stat_generate_tlen;
- __u64 stat_hash_tlen;
- };
- union {
- __u32 stat_akcipher_err_cnt;
- __u32 stat_cipher_err_cnt;
- __u32 stat_compress_err_cnt;
- __u32 stat_aead_err_cnt;
- __u32 stat_hash_err_cnt;
- __u32 stat_rng_err_cnt;
- __u32 stat_kpp_err_cnt;
- };
- union {
- __u32 stat_decrypt_cnt;
- __u32 stat_decompress_cnt;
- __u32 stat_seed_cnt;
- __u32 stat_generate_public_key_cnt;
- };
- union {
- __u64 stat_decrypt_tlen;
- __u64 stat_decompress_tlen;
- };
- union {
- __u32 stat_verify_cnt;
- __u32 stat_compute_shared_secret_cnt;
- };
- __u32 stat_sign_cnt;
+struct crypto_stat_aead {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_akcipher {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_verify_cnt;
+ __u64 stat_sign_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_cipher {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_encrypt_cnt;
+ __u64 stat_encrypt_tlen;
+ __u64 stat_decrypt_cnt;
+ __u64 stat_decrypt_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_compress {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_compress_cnt;
+ __u64 stat_compress_tlen;
+ __u64 stat_decompress_cnt;
+ __u64 stat_decompress_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_hash {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_hash_cnt;
+ __u64 stat_hash_tlen;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_kpp {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_setsecret_cnt;
+ __u64 stat_generate_public_key_cnt;
+ __u64 stat_compute_shared_secret_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_rng {
+ char type[CRYPTO_MAX_NAME];
+ __u64 stat_generate_cnt;
+ __u64 stat_generate_tlen;
+ __u64 stat_seed_cnt;
+ __u64 stat_err_cnt;
+};
+
+struct crypto_stat_larval {
+ char type[CRYPTO_MAX_NAME];
};
struct crypto_report_larval {
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 79407bbd296d..6e52d3660654 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -163,6 +163,11 @@ enum devlink_param_cmode {
DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
};
+enum devlink_param_fw_load_policy_value {
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER,
+ DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH,
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index c5358e0ae7c5..e4d6ddd93567 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -420,10 +420,12 @@ typedef struct elf64_shdr {
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
+#define NT_ARM_PAC_MASK 0x406 /* ARM pointer authentication code masks */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
+#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index c8f8e2455bf3..17be76aeb468 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -882,7 +882,7 @@ struct ethtool_rx_flow_spec {
__u32 location;
};
-/* How rings are layed out when accessing virtual functions or
+/* How rings are laid out when accessing virtual functions or
* offloaded queues is device specific. To allow users to do flow
* steering and specify these queues the ring cookie is partitioned
* into a 32bit queue index with an 8 bit virtual function id.
@@ -891,7 +891,7 @@ struct ethtool_rx_flow_spec {
* devices start supporting PCIe w/ARI. However at the moment I
* do not know of any devices that support this so I do not reserve
* space for this at this time. If a future patch consumes the next
- * byte it should be aware of this possiblity.
+ * byte it should be aware of this possibility.
*/
#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFLL
#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000LL
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index b86740d1c50a..909c98fcace2 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -10,11 +10,13 @@
#define FAN_CLOSE_WRITE 0x00000008 /* Writtable file closed */
#define FAN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
#define FAN_OPEN 0x00000020 /* File was opened */
+#define FAN_OPEN_EXEC 0x00001000 /* File was opened for exec */
#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
+#define FAN_OPEN_EXEC_PERM 0x00040000 /* File open/exec in perm check */
#define FAN_ONDIR 0x40000000 /* event occurred against dir */
diff --git a/include/uapi/linux/hash_info.h b/include/uapi/linux/hash_info.h
index eea5d02c58de..74a8609fcb4d 100644
--- a/include/uapi/linux/hash_info.h
+++ b/include/uapi/linux/hash_info.h
@@ -33,6 +33,8 @@ enum hash_algo {
HASH_ALGO_TGR_160,
HASH_ALGO_TGR_192,
HASH_ALGO_SM3_256,
+ HASH_ALGO_STREEBOG_256,
+ HASH_ALGO_STREEBOG_512,
HASH_ALGO__LAST
};
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index e41eda3c71f1..773e476a8e54 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -292,4 +292,25 @@ struct br_mcast_stats {
__u64 mcast_bytes[BR_MCAST_DIR_SIZE];
__u64 mcast_packets[BR_MCAST_DIR_SIZE];
};
+
+/* bridge boolean options
+ * BR_BOOLOPT_NO_LL_LEARN - disable learning from link-local packets
+ *
+ * IMPORTANT: if adding a new option do not forget to handle
+ * it in br_boolopt_toggle/get and bridge sysfs
+ */
+enum br_boolopt_id {
+ BR_BOOLOPT_NO_LL_LEARN,
+ BR_BOOLOPT_MAX
+};
+
+/* struct br_boolopt_multi - change multiple bridge boolean options
+ *
+ * @optval: new option values (bit per option)
+ * @optmask: options to change (bit per option)
+ */
+struct br_boolopt_multi {
+ __u32 optval;
+ __u32 optmask;
+};
#endif /* _UAPI_LINUX_IF_BRIDGE_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 1debfa42cba1..d6533828123a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -288,6 +288,7 @@ enum {
IFLA_BR_MCAST_IGMP_VERSION,
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
+ IFLA_BR_MULTI_BOOLOPT,
__IFLA_BR_MAX,
};
@@ -533,6 +534,7 @@ enum {
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
IFLA_VXLAN_TTL_INHERIT,
+ IFLA_VXLAN_DF,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
@@ -542,6 +544,14 @@ struct ifla_vxlan_port_range {
__be16 high;
};
+enum ifla_vxlan_df {
+ VXLAN_DF_UNSET = 0,
+ VXLAN_DF_SET,
+ VXLAN_DF_INHERIT,
+ __VXLAN_DF_END,
+ VXLAN_DF_MAX = __VXLAN_DF_END - 1,
+};
+
/* GENEVE section */
enum {
IFLA_GENEVE_UNSPEC,
@@ -557,10 +567,19 @@ enum {
IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
IFLA_GENEVE_LABEL,
IFLA_GENEVE_TTL_INHERIT,
+ IFLA_GENEVE_DF,
__IFLA_GENEVE_MAX
};
#define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1)
+enum ifla_geneve_df {
+ GENEVE_DF_UNSET = 0,
+ GENEVE_DF_SET,
+ GENEVE_DF_INHERIT,
+ __GENEVE_DF_END,
+ GENEVE_DF_MAX = __GENEVE_DF_END - 1,
+};
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index ee432cd3018c..23a6753b37df 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -59,6 +59,7 @@
#define TUNGETVNETBE _IOR('T', 223, int)
#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
#define TUNSETFILTEREBPF _IOR('T', 225, int)
+#define TUNSETCARRIER _IOW('T', 226, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 1b3d148c4560..7d9105533c7b 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -160,4 +160,24 @@ enum {
};
#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
+
+#define TUNNEL_CSUM __cpu_to_be16(0x01)
+#define TUNNEL_ROUTING __cpu_to_be16(0x02)
+#define TUNNEL_KEY __cpu_to_be16(0x04)
+#define TUNNEL_SEQ __cpu_to_be16(0x08)
+#define TUNNEL_STRICT __cpu_to_be16(0x10)
+#define TUNNEL_REC __cpu_to_be16(0x20)
+#define TUNNEL_VERSION __cpu_to_be16(0x40)
+#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
+#define TUNNEL_DONT_FRAGMENT __cpu_to_be16(0x0100)
+#define TUNNEL_OAM __cpu_to_be16(0x0200)
+#define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400)
+#define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800)
+#define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000)
+#define TUNNEL_NOCACHE __cpu_to_be16(0x2000)
+#define TUNNEL_ERSPAN_OPT __cpu_to_be16(0x4000)
+
+#define TUNNEL_OPTIONS_PRESENT \
+ (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT | TUNNEL_ERSPAN_OPT)
+
#endif /* _UAPI_IF_TUNNEL_H_ */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 48e8a225b985..f6052e70bf40 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -266,10 +266,14 @@ struct sockaddr_in {
#define IN_CLASSD(a) ((((long int) (a)) & 0xf0000000) == 0xe0000000)
#define IN_MULTICAST(a) IN_CLASSD(a)
-#define IN_MULTICAST_NET 0xF0000000
+#define IN_MULTICAST_NET 0xe0000000
-#define IN_EXPERIMENTAL(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
-#define IN_BADCLASS(a) IN_EXPERIMENTAL((a))
+#define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff)
+#define IN_EXPERIMENTAL(a) IN_BADCLASS((a))
+
+#define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000)
+#define IN_CLASSE_NET 0xffffffff
+#define IN_CLASSE_NSHIFT 0
/* Address to accept any incoming messages. */
#define INADDR_ANY ((unsigned long int) 0x00000000)
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 3eb5a4c3d60a..ae366b87426a 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -752,6 +752,15 @@
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b01eb502d49c..e622fd1fbd46 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -398,6 +398,24 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
__u32 n_success; /* to/from KFD */
};
+struct kfd_ioctl_get_dmabuf_info_args {
+ __u64 size; /* from KFD */
+ __u64 metadata_ptr; /* to KFD */
+ __u32 metadata_size; /* to KFD (space allocated by user)
+ * from KFD (actual metadata size)
+ */
+ __u32 gpu_id; /* from KFD */
+ __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
+struct kfd_ioctl_import_dmabuf_args {
+ __u64 va_addr; /* to KFD */
+ __u64 handle; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 dmabuf_fd; /* to KFD */
+};
+
#define AMDKFD_IOCTL_BASE 'K'
#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
@@ -486,7 +504,13 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
+#define AMDKFD_IOC_GET_DMABUF_INFO \
+ AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
+
+#define AMDKFD_IOC_IMPORT_DMABUF \
+ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1C
+#define AMDKFD_COMMAND_END 0x1E
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 2b7a652c9fa4..6d4ea4b6c922 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -492,6 +492,17 @@ struct kvm_dirty_log {
};
};
+/* for KVM_CLEAR_DIRTY_LOG */
+struct kvm_clear_dirty_log {
+ __u32 slot;
+ __u32 num_pages;
+ __u64 first_page;
+ union {
+ void __user *dirty_bitmap; /* one bit per page */
+ __u64 padding2;
+ };
+};
+
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
@@ -975,6 +986,8 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165
+#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166
+#define KVM_CAP_HYPERV_CPUID 167
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1421,6 +1434,12 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
+/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */
+#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
+
+/* Available with KVM_CAP_HYPERV_CPUID */
+#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/include/uapi/linux/ncsi.h b/include/uapi/linux/ncsi.h
index 0a26a5576645..a3f87c54fdb3 100644
--- a/include/uapi/linux/ncsi.h
+++ b/include/uapi/linux/ncsi.h
@@ -26,6 +26,12 @@
* @NCSI_CMD_SEND_CMD: send NC-SI command to network card.
* Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID
* and NCSI_ATTR_CHANNEL_ID.
+ * @NCSI_CMD_SET_PACKAGE_MASK: set a whitelist of allowed packages.
+ * Requires NCSI_ATTR_IFINDEX and NCSI_ATTR_PACKAGE_MASK.
+ * @NCSI_CMD_SET_CHANNEL_MASK: set a whitelist of allowed channels.
+ * Requires NCSI_ATTR_IFINDEX, NCSI_ATTR_PACKAGE_ID, and
+ * NCSI_ATTR_CHANNEL_MASK. If NCSI_ATTR_CHANNEL_ID is present it sets
+ * the primary channel.
* @NCSI_CMD_MAX: highest command number
*/
enum ncsi_nl_commands {
@@ -34,6 +40,8 @@ enum ncsi_nl_commands {
NCSI_CMD_SET_INTERFACE,
NCSI_CMD_CLEAR_INTERFACE,
NCSI_CMD_SEND_CMD,
+ NCSI_CMD_SET_PACKAGE_MASK,
+ NCSI_CMD_SET_CHANNEL_MASK,
__NCSI_CMD_AFTER_LAST,
NCSI_CMD_MAX = __NCSI_CMD_AFTER_LAST - 1
@@ -48,6 +56,10 @@ enum ncsi_nl_commands {
* @NCSI_ATTR_PACKAGE_ID: package ID
* @NCSI_ATTR_CHANNEL_ID: channel ID
* @NCSI_ATTR_DATA: command payload
+ * @NCSI_ATTR_MULTI_FLAG: flag to signal that multi-mode should be enabled with
+ * NCSI_CMD_SET_PACKAGE_MASK or NCSI_CMD_SET_CHANNEL_MASK.
+ * @NCSI_ATTR_PACKAGE_MASK: 32-bit mask of allowed packages.
+ * @NCSI_ATTR_CHANNEL_MASK: 32-bit mask of allowed channels.
* @NCSI_ATTR_MAX: highest attribute number
*/
enum ncsi_nl_attrs {
@@ -57,6 +69,9 @@ enum ncsi_nl_attrs {
NCSI_ATTR_PACKAGE_ID,
NCSI_ATTR_CHANNEL_ID,
NCSI_ATTR_DATA,
+ NCSI_ATTR_MULTI_FLAG,
+ NCSI_ATTR_PACKAGE_MASK,
+ NCSI_ATTR_CHANNEL_MASK,
__NCSI_ATTR_AFTER_LAST,
NCSI_ATTR_MAX = __NCSI_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index 998155444e0d..cd144e3099a3 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -28,6 +28,7 @@ enum {
NDA_MASTER,
NDA_LINK_NETNSID,
NDA_SRC_VNI,
+ NDA_PROTOCOL, /* Originator of entry */
__NDA_MAX
};
diff --git a/include/uapi/linux/net_namespace.h b/include/uapi/linux/net_namespace.h
index 0187c74d8889..9f9956809565 100644
--- a/include/uapi/linux/net_namespace.h
+++ b/include/uapi/linux/net_namespace.h
@@ -16,6 +16,8 @@ enum {
NETNSA_NSID,
NETNSA_PID,
NETNSA_FD,
+ NETNSA_TARGET_NSID,
+ NETNSA_CURRENT_NSID,
__NETNSA_MAX,
};
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 97ff3c17ec4d..e5b39721c6e4 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -155,8 +155,8 @@ enum txtime_flags {
};
struct sock_txtime {
- clockid_t clockid; /* reference clockid */
- __u32 flags; /* as defined by enum txtime_flags */
+ __kernel_clockid_t clockid;/* reference clockid */
+ __u32 flags; /* as defined by enum txtime_flags */
};
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index cca10e767cd8..ca9e63d6e0e4 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -34,10 +34,6 @@
/* only for userspace compatibility */
#ifndef __KERNEL__
-/* Generic cache responses from hook functions.
- <= 0x2000 is used for protocol-flags. */
-#define NFC_UNKNOWN 0x4000
-#define NFC_ALTERED 0x8000
/* NF_VERDICT_BITS should be 8 now, but userspace might break if this changes */
#define NF_VERDICT_BITS 16
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index 60236f694143..ea69ca21ff23 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -13,8 +13,9 @@
#include <linux/types.h>
-/* The protocol version */
-#define IPSET_PROTOCOL 6
+/* The protocol versions */
+#define IPSET_PROTOCOL 7
+#define IPSET_PROTOCOL_MIN 6
/* The max length of strings including NUL: set and type identifiers */
#define IPSET_MAXNAMELEN 32
@@ -38,17 +39,19 @@ enum ipset_cmd {
IPSET_CMD_TEST, /* 11: Test an element in a set */
IPSET_CMD_HEADER, /* 12: Get set header data only */
IPSET_CMD_TYPE, /* 13: Get set type */
+ IPSET_CMD_GET_BYNAME, /* 14: Get set index by name */
+ IPSET_CMD_GET_BYINDEX, /* 15: Get set name by index */
IPSET_MSG_MAX, /* Netlink message commands */
/* Commands in userspace: */
- IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 14: Enter restore mode */
- IPSET_CMD_HELP, /* 15: Get help */
- IPSET_CMD_VERSION, /* 16: Get program version */
- IPSET_CMD_QUIT, /* 17: Quit from interactive mode */
+ IPSET_CMD_RESTORE = IPSET_MSG_MAX, /* 16: Enter restore mode */
+ IPSET_CMD_HELP, /* 17: Get help */
+ IPSET_CMD_VERSION, /* 18: Get program version */
+ IPSET_CMD_QUIT, /* 19: Quit from interactive mode */
IPSET_CMD_MAX,
- IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 18: Commit buffered commands */
+ IPSET_CMD_COMMIT = IPSET_CMD_MAX, /* 20: Commit buffered commands */
};
/* Attributes at command level */
@@ -66,6 +69,7 @@ enum {
IPSET_ATTR_LINENO, /* 9: Restore lineno */
IPSET_ATTR_PROTOCOL_MIN, /* 10: Minimal supported version number */
IPSET_ATTR_REVISION_MIN = IPSET_ATTR_PROTOCOL_MIN, /* type rev min */
+ IPSET_ATTR_INDEX, /* 11: Kernel index of set */
__IPSET_ATTR_CMD_MAX,
};
#define IPSET_ATTR_CMD_MAX (__IPSET_ATTR_CMD_MAX - 1)
@@ -223,6 +227,7 @@ enum ipset_adt {
/* Sets are identified by an index in kernel space. Tweak with ip_set_id_t
* and IPSET_INVALID_ID if you want to increase the max number of sets.
+ * Also, IPSET_ATTR_INDEX must be changed.
*/
typedef __u16 ip_set_id_t;
diff --git a/include/uapi/linux/netfilter_decnet.h b/include/uapi/linux/netfilter_decnet.h
index 61f1c7dfd033..3c77f54560f2 100644
--- a/include/uapi/linux/netfilter_decnet.h
+++ b/include/uapi/linux/netfilter_decnet.h
@@ -15,16 +15,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_DN_SRC 0x0001
-/* Dest IP address. */
-#define NFC_DN_DST 0x0002
-/* Input device. */
-#define NFC_DN_IF_IN 0x0004
-/* Output device. */
-#define NFC_DN_IF_OUT 0x0008
-
/* kernel define is in netfilter_defs.h */
#define NF_DN_NUMHOOKS 7
#endif /* ! __KERNEL__ */
diff --git a/include/uapi/linux/netfilter_ipv4.h b/include/uapi/linux/netfilter_ipv4.h
index c3b060775e13..155e77d6a42d 100644
--- a/include/uapi/linux/netfilter_ipv4.h
+++ b/include/uapi/linux/netfilter_ipv4.h
@@ -13,34 +13,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_IP_SRC 0x0001
-/* Dest IP address. */
-#define NFC_IP_DST 0x0002
-/* Input device. */
-#define NFC_IP_IF_IN 0x0004
-/* Output device. */
-#define NFC_IP_IF_OUT 0x0008
-/* TOS. */
-#define NFC_IP_TOS 0x0010
-/* Protocol. */
-#define NFC_IP_PROTO 0x0020
-/* IP options. */
-#define NFC_IP_OPTIONS 0x0040
-/* Frag & flags. */
-#define NFC_IP_FRAG 0x0080
-
-/* Per-protocol information: only matters if proto match. */
-/* TCP flags. */
-#define NFC_IP_TCPFLAGS 0x0100
-/* Source port. */
-#define NFC_IP_SRC_PT 0x0200
-/* Dest port. */
-#define NFC_IP_DST_PT 0x0400
-/* Something else about the proto */
-#define NFC_IP_PROTO_UNKNOWN 0x2000
-
/* IP Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP_PRE_ROUTING 0
diff --git a/include/uapi/linux/netfilter_ipv6.h b/include/uapi/linux/netfilter_ipv6.h
index dc624fd24d25..80aa9b0799af 100644
--- a/include/uapi/linux/netfilter_ipv6.h
+++ b/include/uapi/linux/netfilter_ipv6.h
@@ -16,35 +16,6 @@
#include <limits.h> /* for INT_MIN, INT_MAX */
-/* IP Cache bits. */
-/* Src IP address. */
-#define NFC_IP6_SRC 0x0001
-/* Dest IP address. */
-#define NFC_IP6_DST 0x0002
-/* Input device. */
-#define NFC_IP6_IF_IN 0x0004
-/* Output device. */
-#define NFC_IP6_IF_OUT 0x0008
-/* TOS. */
-#define NFC_IP6_TOS 0x0010
-/* Protocol. */
-#define NFC_IP6_PROTO 0x0020
-/* IP options. */
-#define NFC_IP6_OPTIONS 0x0040
-/* Frag & flags. */
-#define NFC_IP6_FRAG 0x0080
-
-
-/* Per-protocol information: only matters if proto match. */
-/* TCP flags. */
-#define NFC_IP6_TCPFLAGS 0x0100
-/* Source port. */
-#define NFC_IP6_SRC_PT 0x0200
-/* Dest port. */
-#define NFC_IP6_DST_PT 0x0400
-/* Something else about the proto */
-#define NFC_IP6_PROTO_UNKNOWN 0x2000
-
/* IP6 Hooks */
/* After promisc drops, checksum checks. */
#define NF_IP6_PRE_ROUTING 0
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 486ed1f0c0bc..0a4d73317759 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
#define NETLINK_EXT_ACK 11
-#define NETLINK_DUMP_STRICT_CHK 12
+#define NETLINK_GET_STRICT_CHK 12
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 6d610bae30a9..31ae5c7f10e3 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1036,6 +1036,35 @@
* @NL80211_CMD_GET_FTM_RESPONDER_STATS: Retrieve FTM responder statistics, in
* the %NL80211_ATTR_FTM_RESPONDER_STATS attribute.
*
+ * @NL80211_CMD_PEER_MEASUREMENT_START: start a (set of) peer measurement(s)
+ * with the given parameters, which are encapsulated in the nested
+ * %NL80211_ATTR_PEER_MEASUREMENTS attribute. Optionally, MAC address
+ * randomization may be enabled and configured by specifying the
+ * %NL80211_ATTR_MAC and %NL80211_ATTR_MAC_MASK attributes.
+ * If a timeout is requested, use the %NL80211_ATTR_TIMEOUT attribute.
+ * A u64 cookie for further %NL80211_ATTR_COOKIE use is is returned in
+ * the netlink extended ack message.
+ *
+ * To cancel a measurement, close the socket that requested it.
+ *
+ * Measurement results are reported to the socket that requested the
+ * measurement using @NL80211_CMD_PEER_MEASUREMENT_RESULT when they
+ * become available, so applications must ensure a large enough socket
+ * buffer size.
+ *
+ * Depending on driver support it may or may not be possible to start
+ * multiple concurrent measurements.
+ * @NL80211_CMD_PEER_MEASUREMENT_RESULT: This command number is used for the
+ * result notification from the driver to the requesting socket.
+ * @NL80211_CMD_PEER_MEASUREMENT_COMPLETE: Notification only, indicating that
+ * the measurement completed, using the measurement cookie
+ * (%NL80211_ATTR_COOKIE).
+ *
+ * @NL80211_CMD_NOTIFY_RADAR: Notify the kernel that a radar signal was
+ * detected and reported by a neighboring device on the channel
+ * indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes
+ * determining the width and type.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1250,6 +1279,12 @@ enum nl80211_commands {
NL80211_CMD_GET_FTM_RESPONDER_STATS,
+ NL80211_CMD_PEER_MEASUREMENT_START,
+ NL80211_CMD_PEER_MEASUREMENT_RESULT,
+ NL80211_CMD_PEER_MEASUREMENT_COMPLETE,
+
+ NL80211_CMD_NOTIFY_RADAR,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1706,7 +1741,7 @@ enum nl80211_commands {
* the values passed in @NL80211_ATTR_SCAN_SSIDS (eg. if an SSID
* is included in the probe request, but the match attributes
* will never let it go through), -EINVAL may be returned.
- * If ommited, no filtering is done.
+ * If omitted, no filtering is done.
*
* @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported
* interface combinations. In each nested item, it contains attributes
@@ -1811,7 +1846,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_INACTIVITY_TIMEOUT: timeout value in seconds, this can be
* used by the drivers which has MLME in firmware and does not have support
- * to report per station tx/rx activity to free up the staion entry from
+ * to report per station tx/rx activity to free up the station entry from
* the list. This needs to be used when the driver advertises the
* capability to timeout the stations.
*
@@ -2172,7 +2207,7 @@ enum nl80211_commands {
*
* @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
* the specified band is to be adjusted before doing
- * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparison to figure out
* better BSSs. The attribute value is a packed structure
* value as specified by &struct nl80211_bss_select_rssi_adjust.
*
@@ -2254,6 +2289,16 @@ enum nl80211_commands {
* @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
* statistics, see &enum nl80211_ftm_responder_stats.
*
+ * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32),
+ * if the attribute is not given no timeout is requested. Note that 0 is an
+ * invalid value.
+ *
+ * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result)
+ * data, uses nested attributes specified in
+ * &enum nl80211_peer_measurement_attrs.
+ * This is also used for capability advertisement in the wiphy information,
+ * with the appropriate sub-attributes.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2699,6 +2744,10 @@ enum nl80211_attrs {
NL80211_ATTR_FTM_RESPONDER_STATS,
+ NL80211_ATTR_TIMEOUT,
+
+ NL80211_ATTR_PEER_MEASUREMENTS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3074,6 +3123,8 @@ enum nl80211_sta_bss_param {
* with an FCS error (u32, from this station). This count may not include
* some packets with an FCS error due to TA corruption. Hence this counter
* might not be fully accurate.
+ * @NL80211_STA_INFO_CONNECTED_TO_GATE: set to true if STA has a path to a
+ * mesh gate (u8, 0 or 1)
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3116,6 +3167,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_ACK_SIGNAL_AVG,
NL80211_STA_INFO_RX_MPDUS,
NL80211_STA_INFO_FCS_ERROR_COUNT,
+ NL80211_STA_INFO_CONNECTED_TO_GATE,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3895,6 +3947,11 @@ enum nl80211_mesh_power_mode {
* remove it from the STA's list of peers. You may set this to 0 to disable
* the removal of the STA. Default is 30 minutes.
*
+ * @NL80211_MESHCONF_CONNECTED_TO_GATE: If set to true then this mesh STA
+ * will advertise that it is connected to a gate in the mesh formation
+ * field. If left unset then the mesh formation field will only
+ * advertise such if there is an active root mesh path.
+ *
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
@@ -3927,6 +3984,7 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_POWER_MODE,
NL80211_MESHCONF_AWAKE_WINDOW,
NL80211_MESHCONF_PLINK_TIMEOUT,
+ NL80211_MESHCONF_CONNECTED_TO_GATE,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -4859,7 +4917,7 @@ enum nl80211_iface_limit_attrs {
* numbers = [ #{STA} <= 1, #{P2P-client,P2P-GO} <= 3 ], max = 4
* => allows a STA plus three P2P interfaces
*
- * The list of these four possiblities could completely be contained
+ * The list of these four possibilities could completely be contained
* within the %NL80211_ATTR_INTERFACE_COMBINATIONS attribute to indicate
* that any of these groups must match.
*
@@ -4889,7 +4947,7 @@ enum nl80211_if_combination_attrs {
* enum nl80211_plink_state - state of a mesh peer link finite state machine
*
* @NL80211_PLINK_LISTEN: initial state, considered the implicit
- * state of non existant mesh peer links
+ * state of non existent mesh peer links
* @NL80211_PLINK_OPN_SNT: mesh plink open frame has been sent to
* this mesh peer
* @NL80211_PLINK_OPN_RCVD: mesh plink open frame has been received
@@ -5381,7 +5439,7 @@ enum nl80211_timeout_reason {
* request parameters IE in the probe request
* @NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP: accept broadcast probe responses
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE: send probe request frames at
- * rate of at least 5.5M. In case non OCE AP is dicovered in the channel,
+ * rate of at least 5.5M. In case non OCE AP is discovered in the channel,
* only the first probe req in the channel will be sent in high rate.
* @NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION: allow probe request
* tx deferral (dot11FILSProbeDelay shall be set to 15ms)
@@ -5842,9 +5900,11 @@ enum nl80211_external_auth_action {
* @__NL80211_FTM_RESP_ATTR_INVALID: Invalid
* @NL80211_FTM_RESP_ATTR_ENABLED: FTM responder is enabled
* @NL80211_FTM_RESP_ATTR_LCI: The content of Measurement Report Element
- * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10)
+ * (9.4.2.22 in 802.11-2016) with type 8 - LCI (9.4.2.22.10),
+ * i.e. starting with the measurement token
* @NL80211_FTM_RESP_ATTR_CIVIC: The content of Measurement Report Element
- * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13)
+ * (9.4.2.22 in 802.11-2016) with type 11 - Civic (Section 9.4.2.22.13),
+ * i.e. starting with the measurement token
* @__NL80211_FTM_RESP_ATTR_LAST: Internal
* @NL80211_FTM_RESP_ATTR_MAX: highest FTM responder attribute.
*/
@@ -5906,4 +5966,386 @@ enum nl80211_ftm_responder_stats {
NL80211_FTM_STATS_MAX = __NL80211_FTM_STATS_AFTER_LAST - 1
};
+/**
+ * enum nl80211_preamble - frame preamble types
+ * @NL80211_PREAMBLE_LEGACY: legacy (HR/DSSS, OFDM, ERP PHY) preamble
+ * @NL80211_PREAMBLE_HT: HT preamble
+ * @NL80211_PREAMBLE_VHT: VHT preamble
+ * @NL80211_PREAMBLE_DMG: DMG preamble
+ */
+enum nl80211_preamble {
+ NL80211_PREAMBLE_LEGACY,
+ NL80211_PREAMBLE_HT,
+ NL80211_PREAMBLE_VHT,
+ NL80211_PREAMBLE_DMG,
+};
+
+/**
+ * enum nl80211_peer_measurement_type - peer measurement types
+ * @NL80211_PMSR_TYPE_INVALID: invalid/unused, needed as we use
+ * these numbers also for attributes
+ *
+ * @NL80211_PMSR_TYPE_FTM: flight time measurement
+ *
+ * @NUM_NL80211_PMSR_TYPES: internal
+ * @NL80211_PMSR_TYPE_MAX: highest type number
+ */
+enum nl80211_peer_measurement_type {
+ NL80211_PMSR_TYPE_INVALID,
+
+ NL80211_PMSR_TYPE_FTM,
+
+ NUM_NL80211_PMSR_TYPES,
+ NL80211_PMSR_TYPE_MAX = NUM_NL80211_PMSR_TYPES - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_status - peer measurement status
+ * @NL80211_PMSR_STATUS_SUCCESS: measurement completed successfully
+ * @NL80211_PMSR_STATUS_REFUSED: measurement was locally refused
+ * @NL80211_PMSR_STATUS_TIMEOUT: measurement timed out
+ * @NL80211_PMSR_STATUS_FAILURE: measurement failed, a type-dependent
+ * reason may be available in the response data
+ */
+enum nl80211_peer_measurement_status {
+ NL80211_PMSR_STATUS_SUCCESS,
+ NL80211_PMSR_STATUS_REFUSED,
+ NL80211_PMSR_STATUS_TIMEOUT,
+ NL80211_PMSR_STATUS_FAILURE,
+};
+
+/**
+ * enum nl80211_peer_measurement_req - peer measurement request attributes
+ * @__NL80211_PMSR_REQ_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_REQ_ATTR_DATA: This is a nested attribute with measurement
+ * type-specific request data inside. The attributes used are from the
+ * enums named nl80211_peer_measurement_<type>_req.
+ * @NL80211_PMSR_REQ_ATTR_GET_AP_TSF: include AP TSF timestamp, if supported
+ * (flag attribute)
+ *
+ * @NUM_NL80211_PMSR_REQ_ATTRS: internal
+ * @NL80211_PMSR_REQ_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_req {
+ __NL80211_PMSR_REQ_ATTR_INVALID,
+
+ NL80211_PMSR_REQ_ATTR_DATA,
+ NL80211_PMSR_REQ_ATTR_GET_AP_TSF,
+
+ /* keep last */
+ NUM_NL80211_PMSR_REQ_ATTRS,
+ NL80211_PMSR_REQ_ATTR_MAX = NUM_NL80211_PMSR_REQ_ATTRS - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_resp - peer measurement response attributes
+ * @__NL80211_PMSR_RESP_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_RESP_ATTR_DATA: This is a nested attribute with measurement
+ * type-specific results inside. The attributes used are from the enums
+ * named nl80211_peer_measurement_<type>_resp.
+ * @NL80211_PMSR_RESP_ATTR_STATUS: u32 value with the measurement status
+ * (using values from &enum nl80211_peer_measurement_status.)
+ * @NL80211_PMSR_RESP_ATTR_HOST_TIME: host time (%CLOCK_BOOTTIME) when the
+ * result was measured; this value is not expected to be accurate to
+ * more than 20ms. (u64, nanoseconds)
+ * @NL80211_PMSR_RESP_ATTR_AP_TSF: TSF of the AP that the interface
+ * doing the measurement is connected to when the result was measured.
+ * This shall be accurately reported if supported and requested
+ * (u64, usec)
+ * @NL80211_PMSR_RESP_ATTR_FINAL: If results are sent to the host partially
+ * (*e.g. with FTM per-burst data) this flag will be cleared on all but
+ * the last result; if all results are combined it's set on the single
+ * result.
+ * @NL80211_PMSR_RESP_ATTR_PAD: padding for 64-bit attributes, ignore
+ *
+ * @NUM_NL80211_PMSR_RESP_ATTRS: internal
+ * @NL80211_PMSR_RESP_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_resp {
+ __NL80211_PMSR_RESP_ATTR_INVALID,
+
+ NL80211_PMSR_RESP_ATTR_DATA,
+ NL80211_PMSR_RESP_ATTR_STATUS,
+ NL80211_PMSR_RESP_ATTR_HOST_TIME,
+ NL80211_PMSR_RESP_ATTR_AP_TSF,
+ NL80211_PMSR_RESP_ATTR_FINAL,
+ NL80211_PMSR_RESP_ATTR_PAD,
+
+ /* keep last */
+ NUM_NL80211_PMSR_RESP_ATTRS,
+ NL80211_PMSR_RESP_ATTR_MAX = NUM_NL80211_PMSR_RESP_ATTRS - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_peer_attrs - peer attributes for measurement
+ * @__NL80211_PMSR_PEER_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_PEER_ATTR_ADDR: peer's MAC address
+ * @NL80211_PMSR_PEER_ATTR_CHAN: channel definition, nested, using top-level
+ * attributes like %NL80211_ATTR_WIPHY_FREQ etc.
+ * @NL80211_PMSR_PEER_ATTR_REQ: This is a nested attribute indexed by
+ * measurement type, with attributes from the
+ * &enum nl80211_peer_measurement_req inside.
+ * @NL80211_PMSR_PEER_ATTR_RESP: This is a nested attribute indexed by
+ * measurement type, with attributes from the
+ * &enum nl80211_peer_measurement_resp inside.
+ *
+ * @NUM_NL80211_PMSR_PEER_ATTRS: internal
+ * @NL80211_PMSR_PEER_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_peer_attrs {
+ __NL80211_PMSR_PEER_ATTR_INVALID,
+
+ NL80211_PMSR_PEER_ATTR_ADDR,
+ NL80211_PMSR_PEER_ATTR_CHAN,
+ NL80211_PMSR_PEER_ATTR_REQ,
+ NL80211_PMSR_PEER_ATTR_RESP,
+
+ /* keep last */
+ NUM_NL80211_PMSR_PEER_ATTRS,
+ NL80211_PMSR_PEER_ATTR_MAX = NUM_NL80211_PMSR_PEER_ATTRS - 1,
+};
+
+/**
+ * enum nl80211_peer_measurement_attrs - peer measurement attributes
+ * @__NL80211_PMSR_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_ATTR_MAX_PEERS: u32 attribute used for capability
+ * advertisement only, indicates the maximum number of peers
+ * measurements can be done with in a single request
+ * @NL80211_PMSR_ATTR_REPORT_AP_TSF: flag attribute in capability
+ * indicating that the connected AP's TSF can be reported in
+ * measurement results
+ * @NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR: flag attribute in capability
+ * indicating that MAC address randomization is supported.
+ * @NL80211_PMSR_ATTR_TYPE_CAPA: capabilities reported by the device,
+ * this contains a nesting indexed by measurement type, and
+ * type-specific capabilities inside, which are from the enums
+ * named nl80211_peer_measurement_<type>_capa.
+ * @NL80211_PMSR_ATTR_PEERS: nested attribute, the nesting index is
+ * meaningless, just a list of peers to measure with, with the
+ * sub-attributes taken from
+ * &enum nl80211_peer_measurement_peer_attrs.
+ *
+ * @NUM_NL80211_PMSR_ATTR: internal
+ * @NL80211_PMSR_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_attrs {
+ __NL80211_PMSR_ATTR_INVALID,
+
+ NL80211_PMSR_ATTR_MAX_PEERS,
+ NL80211_PMSR_ATTR_REPORT_AP_TSF,
+ NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR,
+ NL80211_PMSR_ATTR_TYPE_CAPA,
+ NL80211_PMSR_ATTR_PEERS,
+
+ /* keep last */
+ NUM_NL80211_PMSR_ATTR,
+ NL80211_PMSR_ATTR_MAX = NUM_NL80211_PMSR_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_capa - FTM capabilities
+ * @__NL80211_PMSR_FTM_CAPA_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_CAPA_ATTR_ASAP: flag attribute indicating ASAP mode
+ * is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP: flag attribute indicating non-ASAP
+ * mode is supported
+ * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI: flag attribute indicating if LCI
+ * data can be requested during the measurement
+ * @NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC: flag attribute indicating if civic
+ * location data can be requested during the measurement
+ * @NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES: u32 bitmap attribute of bits
+ * from &enum nl80211_preamble.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS: bitmap of values from
+ * &enum nl80211_chan_width indicating the supported channel
+ * bandwidths for FTM. Note that a higher channel bandwidth may be
+ * configured to allow for other measurements types with different
+ * bandwidth requirement in the same measurement.
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT: u32 attribute indicating
+ * the maximum bursts exponent that can be used (if not present anything
+ * is valid)
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST: u32 attribute indicating
+ * the maximum FTMs per burst (if not present anything is valid)
+ *
+ * @NUM_NL80211_PMSR_FTM_CAPA_ATTR: internal
+ * @NL80211_PMSR_FTM_CAPA_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_capa {
+ __NL80211_PMSR_FTM_CAPA_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_CAPA_ATTR_ASAP,
+ NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP,
+ NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI,
+ NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC,
+ NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES,
+ NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_CAPA_ATTR,
+ NL80211_PMSR_FTM_CAPA_ATTR_MAX = NUM_NL80211_PMSR_FTM_CAPA_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_req - FTM request attributes
+ * @__NL80211_PMSR_FTM_REQ_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_REQ_ATTR_ASAP: ASAP mode requested (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE: preamble type (see
+ * &enum nl80211_preamble), optional for DMG (u32)
+ * @NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP: number of bursts exponent as in
+ * 802.11-2016 9.4.2.168 "Fine Timing Measurement Parameters element"
+ * (u8, 0-15, optional with default 15 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD: interval between bursts in units
+ * of 100ms (u16, optional with default 0)
+ * @NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION: burst duration, as in 802.11-2016
+ * Table 9-257 "Burst Duration field encoding" (u8, 0-15, optional with
+ * default 15 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST: number of successful FTM frames
+ * requested per burst
+ * (u8, 0-31, optional with default 0 i.e. "no preference")
+ * @NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES: number of FTMR frame retries
+ * (u8, default 3)
+ * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI: request LCI data (flag)
+ * @NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC: request civic location data
+ * (flag)
+ *
+ * @NUM_NL80211_PMSR_FTM_REQ_ATTR: internal
+ * @NL80211_PMSR_FTM_REQ_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_req {
+ __NL80211_PMSR_FTM_REQ_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_REQ_ATTR_ASAP,
+ NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE,
+ NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP,
+ NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD,
+ NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION,
+ NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES,
+ NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI,
+ NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_REQ_ATTR,
+ NL80211_PMSR_FTM_REQ_ATTR_MAX = NUM_NL80211_PMSR_FTM_REQ_ATTR - 1
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_failure_reasons - FTM failure reasons
+ * @NL80211_PMSR_FTM_FAILURE_UNSPECIFIED: unspecified failure, not used
+ * @NL80211_PMSR_FTM_FAILURE_NO_RESPONSE: no response from the FTM responder
+ * @NL80211_PMSR_FTM_FAILURE_REJECTED: FTM responder rejected measurement
+ * @NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL: we already know the peer is
+ * on a different channel, so can't measure (if we didn't know, we'd
+ * try and get no response)
+ * @NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE: peer can't actually do FTM
+ * @NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP: invalid T1/T4 timestamps
+ * received
+ * @NL80211_PMSR_FTM_FAILURE_PEER_BUSY: peer reports busy, you may retry
+ * later (see %NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME)
+ * @NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS: parameters were changed
+ * by the peer and are no longer supported
+ */
+enum nl80211_peer_measurement_ftm_failure_reasons {
+ NL80211_PMSR_FTM_FAILURE_UNSPECIFIED,
+ NL80211_PMSR_FTM_FAILURE_NO_RESPONSE,
+ NL80211_PMSR_FTM_FAILURE_REJECTED,
+ NL80211_PMSR_FTM_FAILURE_WRONG_CHANNEL,
+ NL80211_PMSR_FTM_FAILURE_PEER_NOT_CAPABLE,
+ NL80211_PMSR_FTM_FAILURE_INVALID_TIMESTAMP,
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY,
+ NL80211_PMSR_FTM_FAILURE_BAD_CHANGED_PARAMS,
+};
+
+/**
+ * enum nl80211_peer_measurement_ftm_resp - FTM response attributes
+ * @__NL80211_PMSR_FTM_RESP_ATTR_INVALID: invalid
+ *
+ * @NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON: FTM-specific failure reason
+ * (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX: optional, if bursts are reported
+ * as separate results then it will be the burst index 0...(N-1) and
+ * the top level will indicate partial results (u32)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS: number of FTM Request frames
+ * transmitted (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES: number of FTM Request frames
+ * that were acknowleged (u32, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME: retry time received from the
+ * busy peer (u32, seconds)
+ * @NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP: actual number of bursts exponent
+ * used by the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION: actual burst duration used by
+ * the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST: actual FTMs per burst used
+ * by the responder (similar to request, u8)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG: average RSSI across all FTM action
+ * frames (optional, s32, 1/2 dBm)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD: RSSI spread across all FTM action
+ * frames (optional, s32, 1/2 dBm)
+ * @NL80211_PMSR_FTM_RESP_ATTR_TX_RATE: bitrate we used for the response to the
+ * FTM action frame (optional, nested, using &enum nl80211_rate_info
+ * attributes)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RX_RATE: bitrate the responder used for the FTM
+ * action frame (optional, nested, using &enum nl80211_rate_info attrs)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG: average RTT (s64, picoseconds, optional
+ * but one of RTT/DIST must be present)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE: RTT variance (u64, ps^2, note that
+ * standard deviation is the square root of variance, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD: RTT spread (u64, picoseconds,
+ * optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG: average distance (s64, mm, optional
+ * but one of RTT/DIST must be present)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE: distance variance (u64, mm^2, note
+ * that standard deviation is the square root of variance, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD: distance spread (u64, mm, optional)
+ * @NL80211_PMSR_FTM_RESP_ATTR_LCI: LCI data from peer (binary, optional);
+ * this is the contents of the Measurement Report Element (802.11-2016
+ * 9.4.2.22.1) starting with the Measurement Token, with Measurement
+ * Type 8.
+ * @NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC: civic location data from peer
+ * (binary, optional);
+ * this is the contents of the Measurement Report Element (802.11-2016
+ * 9.4.2.22.1) starting with the Measurement Token, with Measurement
+ * Type 11.
+ * @NL80211_PMSR_FTM_RESP_ATTR_PAD: ignore, for u64/s64 padding only
+ *
+ * @NUM_NL80211_PMSR_FTM_RESP_ATTR: internal
+ * @NL80211_PMSR_FTM_RESP_ATTR_MAX: highest attribute number
+ */
+enum nl80211_peer_measurement_ftm_resp {
+ __NL80211_PMSR_FTM_RESP_ATTR_INVALID,
+
+ NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES,
+ NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
+ NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP,
+ NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION,
+ NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST,
+ NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_TX_RATE,
+ NL80211_PMSR_FTM_RESP_ATTR_RX_RATE,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE,
+ NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE,
+ NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD,
+ NL80211_PMSR_FTM_RESP_ATTR_LCI,
+ NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
+ NL80211_PMSR_FTM_RESP_ATTR_PAD,
+
+ /* keep last */
+ NUM_NL80211_PMSR_FTM_RESP_ATTR,
+ NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 401d0c1e612d..95d0db2a8350 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -485,6 +485,11 @@ enum {
TCA_FLOWER_IN_HW_COUNT,
+ TCA_FLOWER_KEY_PORT_SRC_MIN, /* be16 */
+ TCA_FLOWER_KEY_PORT_SRC_MAX, /* be16 */
+ TCA_FLOWER_KEY_PORT_DST_MIN, /* be16 */
+ TCA_FLOWER_KEY_PORT_DST_MAX, /* be16 */
+
__TCA_FLOWER_MAX,
};
@@ -518,6 +523,8 @@ enum {
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
+#define TCA_FLOWER_MASK_FLAGS_RANGE (1 << 0) /* Range-based match */
+
/* Match-all classifier */
enum {
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 89ee47c2f17d..0d18b1d1fbbc 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -291,11 +291,38 @@ enum {
TCA_GRED_DPS,
TCA_GRED_MAX_P,
TCA_GRED_LIMIT,
+ TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
__TCA_GRED_MAX,
};
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
+enum {
+ TCA_GRED_VQ_ENTRY_UNSPEC,
+ TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
+ __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+ TCA_GRED_VQ_UNSPEC,
+ TCA_GRED_VQ_PAD,
+ TCA_GRED_VQ_DP, /* u32 */
+ TCA_GRED_VQ_STAT_BYTES, /* u64 */
+ TCA_GRED_VQ_STAT_PACKETS, /* u32 */
+ TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
+ TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
+ TCA_GRED_VQ_STAT_PDROP, /* u32 */
+ TCA_GRED_VQ_STAT_OTHER, /* u32 */
+ TCA_GRED_VQ_FLAGS, /* u32 */
+ __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
struct tc_gred_qopt {
__u32 limit; /* HARD maximal queue length (bytes) */
__u32 qth_min; /* Min average length threshold (bytes) */
@@ -864,6 +891,8 @@ enum {
TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+ TCA_FQ_CE_THRESHOLD, /* DCTCP-like CE-marking threshold */
+
__TCA_FQ_MAX
};
@@ -882,6 +911,7 @@ struct tc_fq_qd_stats {
__u32 inactive_flows;
__u32 throttled_flows;
__u32 unthrottle_latency_ns;
+ __u64 ce_mark; /* packets above ce_threshold */
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b4875a93363a 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
#define PR_SET_SPECULATION_CTRL 53
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -219,4 +220,12 @@ struct prctl_mm_map {
# define PR_SPEC_DISABLE (1UL << 2)
# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+/* Reset arm64 pointer authentication keys */
+#define PR_PAC_RESET_KEYS 54
+# define PR_PAC_APIAKEY (1UL << 0)
+# define PR_PAC_APIBKEY (1UL << 1)
+# define PR_PAC_APDAKEY (1UL << 2)
+# define PR_PAC_APDBKEY (1UL << 3)
+# define PR_PAC_APGAKEY (1UL << 4)
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 3039bf6a742e..d73d83950265 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -84,6 +84,16 @@ struct ptp_sys_offset {
struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1];
};
+struct ptp_sys_offset_extended {
+ unsigned int n_samples; /* Desired number of measurements. */
+ unsigned int rsv[3]; /* Reserved for future use. */
+ /*
+ * Array of [system, phc, system] time stamps. The kernel will provide
+ * 3*n_samples time stamps.
+ */
+ struct ptp_clock_time ts[PTP_MAX_SAMPLES][3];
+};
+
struct ptp_sys_offset_precise {
struct ptp_clock_time device;
struct ptp_clock_time sys_realtime;
@@ -136,6 +146,8 @@ struct ptp_pin_desc {
#define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc)
#define PTP_SYS_OFFSET_PRECISE \
_IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise)
+#define PTP_SYS_OFFSET_EXTENDED \
+ _IOW(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index c81feb373d3e..d584073532b8 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -129,6 +129,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_STREAM_SCHEDULER_VALUE 124
#define SCTP_INTERLEAVING_SUPPORTED 125
#define SCTP_SENDMSG_CONNECT 126
+#define SCTP_EVENT 127
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -632,7 +633,9 @@ union sctp_notification {
*/
enum sctp_sn_type {
- SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_SN_TYPE_BASE = (1<<15),
+ SCTP_DATA_IO_EVENT = SCTP_SN_TYPE_BASE,
+#define SCTP_DATA_IO_EVENT SCTP_DATA_IO_EVENT
SCTP_ASSOC_CHANGE,
#define SCTP_ASSOC_CHANGE SCTP_ASSOC_CHANGE
SCTP_PEER_ADDR_CHANGE,
@@ -657,6 +660,8 @@ enum sctp_sn_type {
#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
SCTP_STREAM_CHANGE_EVENT,
#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
+ SCTP_SN_TYPE_MAX = SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_SN_TYPE_MAX SCTP_SN_TYPE_MAX
};
/* Notification error codes used to fill up the error fields in some
@@ -1150,6 +1155,12 @@ struct sctp_add_streams {
uint16_t sas_outstrms;
};
+struct sctp_event {
+ sctp_assoc_t se_assoc_id;
+ uint16_t se_type;
+ uint8_t se_on;
+};
+
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index f80135e5feaa..86dc24a96c90 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -243,6 +243,7 @@ enum
LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */
LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */
+ LINUX_MIB_TCPBACKLOGCOALESCE, /* TCPBacklogCoalesce */
LINUX_MIB_TCPOFOQUEUE, /* TCPOFOQueue */
LINUX_MIB_TCPOFODROP, /* TCPOFODrop */
LINUX_MIB_TCPOFOMERGE, /* TCPOFOMerge */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index e02d31986ff9..8bb6cc5f3235 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -266,6 +266,7 @@ enum {
TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
TCP_NLA_REORD_SEEN, /* reordering events seen */
+ TCP_NLA_SRTT, /* smoothed RTT in usecs */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09502de447f5..30baccb6c9c4 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -33,6 +33,7 @@ struct udphdr {
#define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */
#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */
#define UDP_SEGMENT 103 /* Set GSO segmentation size */
+#define UDP_GRO 104 /* This socket can receive UDP GRO packets */
/* UDP encapsulation types */
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
diff --git a/include/uapi/linux/v4l2-common.h b/include/uapi/linux/v4l2-common.h
index 4f7b892377cd..7d21c1634b4d 100644
--- a/include/uapi/linux/v4l2-common.h
+++ b/include/uapi/linux/v4l2-common.h
@@ -79,24 +79,11 @@
/* Current composing area plus all padding pixels */
#define V4L2_SEL_TGT_COMPOSE_PADDED 0x0103
-/* Backward compatibility target definitions --- to be removed. */
-#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
-#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
-#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
-#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
-#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
-
/* Selection flags */
#define V4L2_SEL_FLAG_GE (1 << 0)
#define V4L2_SEL_FLAG_LE (1 << 1)
#define V4L2_SEL_FLAG_KEEP_CONFIG (1 << 2)
-/* Backward compatibility flag definitions --- to be removed. */
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
-#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
-#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
-
struct v4l2_edid {
__u32 pad;
__u32 start_block;
@@ -105,4 +92,19 @@ struct v4l2_edid {
__u8 *edid;
};
+#ifndef __KERNEL__
+/* Backward compatibility target definitions --- to be removed. */
+#define V4L2_SEL_TGT_CROP_ACTIVE V4L2_SEL_TGT_CROP
+#define V4L2_SEL_TGT_COMPOSE_ACTIVE V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_ACTUAL V4L2_SEL_TGT_CROP
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_ACTUAL V4L2_SEL_TGT_COMPOSE
+#define V4L2_SUBDEV_SEL_TGT_CROP_BOUNDS V4L2_SEL_TGT_CROP_BOUNDS
+#define V4L2_SUBDEV_SEL_TGT_COMPOSE_BOUNDS V4L2_SEL_TGT_COMPOSE_BOUNDS
+
+/* Backward compatibility flag definitions --- to be removed. */
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_GE V4L2_SEL_FLAG_GE
+#define V4L2_SUBDEV_SEL_FLAG_SIZE_LE V4L2_SEL_FLAG_LE
+#define V4L2_SUBDEV_SEL_FLAG_KEEP_CONFIG V4L2_SEL_FLAG_KEEP_CONFIG
+#endif
+
#endif /* __V4L2_COMMON__ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 998983a6e6b7..3dcfc6148f99 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -404,9 +404,6 @@ enum v4l2_mpeg_video_multi_slice_mode {
#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250)
-#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251)
-
#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
@@ -1097,69 +1094,4 @@ enum v4l2_detect_md_mode {
#define V4L2_CID_DETECT_MD_THRESHOLD_GRID (V4L2_CID_DETECT_CLASS_BASE + 3)
#define V4L2_CID_DETECT_MD_REGION_GRID (V4L2_CID_DETECT_CLASS_BASE + 4)
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3
-#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4
-
-struct v4l2_mpeg2_sequence {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
- __u16 horizontal_size;
- __u16 vertical_size;
- __u32 vbv_buffer_size;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
- __u8 profile_and_level_indication;
- __u8 progressive_sequence;
- __u8 chroma_format;
- __u8 pad;
-};
-
-struct v4l2_mpeg2_picture {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
- __u8 picture_coding_type;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
- __u8 f_code[2][2];
- __u8 intra_dc_precision;
- __u8 picture_structure;
- __u8 top_field_first;
- __u8 frame_pred_frame_dct;
- __u8 concealment_motion_vectors;
- __u8 q_scale_type;
- __u8 intra_vlc_format;
- __u8 alternate_scan;
- __u8 repeat_first_field;
- __u8 progressive_frame;
- __u8 pad;
-};
-
-struct v4l2_ctrl_mpeg2_slice_params {
- __u32 bit_size;
- __u32 data_bit_offset;
-
- struct v4l2_mpeg2_sequence sequence;
- struct v4l2_mpeg2_picture picture;
-
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */
- __u8 quantiser_scale_code;
-
- __u8 backward_ref_index;
- __u8 forward_ref_index;
- __u8 pad;
-};
-
-struct v4l2_ctrl_mpeg2_quantization {
- /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */
- __u8 load_intra_quantiser_matrix;
- __u8 load_non_intra_quantiser_matrix;
- __u8 load_chroma_intra_quantiser_matrix;
- __u8 load_chroma_non_intra_quantiser_matrix;
-
- __u8 intra_quantiser_matrix[64];
- __u8 non_intra_quantiser_matrix[64];
- __u8 chroma_intra_quantiser_matrix[64];
- __u8 chroma_non_intra_quantiser_matrix[64];
-};
-
#endif
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 813102810f53..02bb7ad6e986 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -354,6 +354,21 @@ struct vfio_region_gfx_edid {
};
/*
+ * 10de vendor sub-type
+ *
+ * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space.
+ */
+#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1)
+
+/*
+ * 1014 vendor sub-type
+ *
+ * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU
+ * to do TLB invalidation on a GPU.
+ */
+#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1)
+
+/*
* The MSIX mappable capability informs that MSIX data of a BAR can be mmapped
* which allows direct access to non-MSIX registers which happened to be within
* the same system page.
@@ -363,6 +378,33 @@ struct vfio_region_gfx_edid {
*/
#define VFIO_REGION_INFO_CAP_MSIX_MAPPABLE 3
+/*
+ * Capability with compressed real address (aka SSA - small system address)
+ * where GPU RAM is mapped on a system bus. Used by a GPU for DMA routing
+ * and by the userspace to associate a NVLink bridge with a GPU.
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_SSATGT 4
+
+struct vfio_region_info_cap_nvlink2_ssatgt {
+ struct vfio_info_cap_header header;
+ __u64 tgt;
+};
+
+/*
+ * Capability with an NVLink link speed. The value is read by
+ * the NVlink2 bridge driver from the bridge's "ibm,nvlink-speed"
+ * property in the device tree. The value is fixed in the hardware
+ * and failing to provide the correct value results in the link
+ * not working with no indication from the driver why.
+ */
+#define VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD 5
+
+struct vfio_region_info_cap_nvlink2_lnkspd {
+ struct vfio_info_cap_header header;
+ __u32 link_speed;
+ __u32 __pad;
+};
+
/**
* VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
* struct vfio_irq_info)
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index c8e8ff810190..b5671ce2724f 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -145,6 +145,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
V4L2_BUF_TYPE_META_CAPTURE = 13,
+ V4L2_BUF_TYPE_META_OUTPUT = 14,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -469,6 +470,7 @@ struct v4l2_capability {
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
#define V4L2_CAP_STREAMING 0x04000000 /* streaming I/O ioctls */
+#define V4L2_CAP_META_OUTPUT 0x08000000 /* Is a metadata output device */
#define V4L2_CAP_TOUCH 0x10000000 /* Is a touch device */
@@ -689,6 +691,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_SUNXI_TILED_NV12 v4l2_fourcc('S', 'T', '1', '2') /* Sunxi Tiled NV12 Format */
+#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
/* 10bit raw bayer packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
#define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
@@ -879,6 +882,7 @@ struct v4l2_requestbuffers {
#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -1622,8 +1626,6 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
- struct v4l2_ctrl_mpeg2_slice_params __user *p_mpeg2_slice_params;
- struct v4l2_ctrl_mpeg2_quantization __user *p_mpeg2_quantization;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1669,8 +1671,6 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
- V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS = 0x0103,
- V4L2_CTRL_TYPE_MPEG2_QUANTIZATION = 0x0104,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h
index 449132c76b1c..1196e1c1d4f6 100644
--- a/include/uapi/linux/virtio_config.h
+++ b/include/uapi/linux/virtio_config.h
@@ -75,6 +75,9 @@
*/
#define VIRTIO_F_IOMMU_PLATFORM 33
+/* This feature indicates support for the packed virtqueue layout. */
+#define VIRTIO_F_RING_PACKED 34
+
/*
* Does the device support Single Root I/O Virtualization?
*/
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index f43c3c6171ff..8e88eba1fa7a 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -41,6 +41,7 @@
#include <linux/types.h>
#define VIRTIO_GPU_F_VIRGL 0
+#define VIRTIO_GPU_F_EDID 1
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -56,6 +57,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING,
VIRTIO_GPU_CMD_GET_CAPSET_INFO,
VIRTIO_GPU_CMD_GET_CAPSET,
+ VIRTIO_GPU_CMD_GET_EDID,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -76,6 +78,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_DISPLAY_INFO,
VIRTIO_GPU_RESP_OK_CAPSET_INFO,
VIRTIO_GPU_RESP_OK_CAPSET,
+ VIRTIO_GPU_RESP_OK_EDID,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -291,6 +294,21 @@ struct virtio_gpu_resp_capset {
__u8 capset_data[];
};
+/* VIRTIO_GPU_CMD_GET_EDID */
+struct virtio_gpu_cmd_get_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 scanout;
+ __le32 padding;
+};
+
+/* VIRTIO_GPU_RESP_OK_EDID */
+struct virtio_gpu_resp_edid {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 size;
+ __le32 padding;
+ __u8 edid[1024];
+};
+
#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0)
struct virtio_gpu_config {
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 6d5d5faa989b..2414f8af26b3 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -44,6 +44,13 @@
/* This means the buffer contains a list of buffer descriptors. */
#define VRING_DESC_F_INDIRECT 4
+/*
+ * Mark a descriptor as available or used in packed ring.
+ * Notice: they are defined as shifts instead of shifted values.
+ */
+#define VRING_PACKED_DESC_F_AVAIL 7
+#define VRING_PACKED_DESC_F_USED 15
+
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
* will still kick if it's out of buffers. */
@@ -53,6 +60,23 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
+/* Enable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_ENABLE 0x0
+/* Disable events in packed ring. */
+#define VRING_PACKED_EVENT_FLAG_DISABLE 0x1
+/*
+ * Enable events for a specific descriptor in packed ring.
+ * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
+ * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
+ */
+#define VRING_PACKED_EVENT_FLAG_DESC 0x2
+
+/*
+ * Wrap counter bit shift in event suppression structure
+ * of packed ring.
+ */
+#define VRING_PACKED_EVENT_F_WRAP_CTR 15
+
/* We support indirect buffer descriptors */
#define VIRTIO_RING_F_INDIRECT_DESC 28
@@ -171,4 +195,32 @@ static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
}
+struct vring_packed_desc_event {
+ /* Descriptor Ring Change Event Offset/Wrap Counter. */
+ __le16 off_wrap;
+ /* Descriptor Ring Change Event Flags. */
+ __le16 flags;
+};
+
+struct vring_packed_desc {
+ /* Buffer Address. */
+ __le64 addr;
+ /* Buffer Length. */
+ __le32 len;
+ /* Buffer ID. */
+ __le16 id;
+ /* The flags depending on descriptor type. */
+ __le16 flags;
+};
+
+struct vring_packed {
+ unsigned int num;
+
+ struct vring_packed_desc *desc;
+
+ struct vring_packed_desc_event *driver;
+
+ struct vring_packed_desc_event *device;
+};
+
#endif /* _UAPI_LINUX_VIRTIO_RING_H */
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index f0a547d86679..ae12826ed641 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -12,6 +12,7 @@
#define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475
#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c
#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
+#define SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL 0x7473636d
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -53,12 +54,24 @@ struct snd_firewire_event_motu_notification {
__u32 message; /* MOTU-specific bits. */
};
+struct snd_firewire_tascam_change {
+ unsigned int index;
+ __be32 before;
+ __be32 after;
+};
+
+struct snd_firewire_event_tascam_control {
+ unsigned int type;
+ struct snd_firewire_tascam_change changes[0];
+};
+
union snd_firewire_event {
struct snd_firewire_event_common common;
struct snd_firewire_event_lock_status lock_status;
struct snd_firewire_event_dice_notification dice_notification;
struct snd_firewire_event_efw_response efw_response;
struct snd_firewire_event_digi00x_message digi00x_message;
+ struct snd_firewire_event_tascam_control tascam_control;
struct snd_firewire_event_motu_notification motu_notification;
};
@@ -66,6 +79,7 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_IOCTL_GET_INFO _IOR('H', 0xf8, struct snd_firewire_get_info)
#define SNDRV_FIREWIRE_IOCTL_LOCK _IO('H', 0xf9)
#define SNDRV_FIREWIRE_IOCTL_UNLOCK _IO('H', 0xfa)
+#define SNDRV_FIREWIRE_IOCTL_TASCAM_STATE _IOR('H', 0xfb, struct snd_firewire_tascam_state)
#define SNDRV_FIREWIRE_TYPE_DICE 1
#define SNDRV_FIREWIRE_TYPE_FIREWORKS 2
@@ -88,4 +102,10 @@ struct snd_firewire_get_info {
* Returns -EBUSY if the driver is already streaming.
*/
+#define SNDRV_FIREWIRE_TASCAM_STATE_COUNT 64
+
+struct snd_firewire_tascam_state {
+ __be32 data[SNDRV_FIREWIRE_TASCAM_STATE_COUNT];
+};
+
#endif /* _UAPI_SOUND_FIREWIRE_H_INCLUDED */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index abbad94e14a1..e582e8e7527a 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -246,6 +246,9 @@ struct ipu_image {
struct v4l2_rect rect;
dma_addr_t phys0;
dma_addr_t phys1;
+ /* chroma plane offset overrides */
+ u32 u_offset;
+ u32 v_offset;
};
void ipu_cpmem_zero(struct ipuv3_channel *ch);
@@ -387,6 +390,12 @@ int ipu_ic_task_init(struct ipu_ic *ic,
int out_width, int out_height,
enum ipu_color_space in_cs,
enum ipu_color_space out_cs);
+int ipu_ic_task_init_rsc(struct ipu_ic *ic,
+ int in_width, int in_height,
+ int out_width, int out_height,
+ enum ipu_color_space in_cs,
+ enum ipu_color_space out_cs,
+ u32 rsc);
int ipu_ic_task_graphics_init(struct ipu_ic *ic,
enum ipu_color_space in_g_cs,
bool galpha_en, u32 galpha,
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 61f410fd74e4..4914b93a23f2 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
{
}
#endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-struct resource;
-void arch_xen_balloon_init(struct resource *hostmem_resource);
-#endif
diff --git a/include/xen/interface/hvm/start_info.h b/include/xen/interface/hvm/start_info.h
index 648415976ead..50af9ea2ff1e 100644
--- a/include/xen/interface/hvm/start_info.h
+++ b/include/xen/interface/hvm/start_info.h
@@ -33,7 +33,7 @@
* | magic | Contains the magic value XEN_HVM_START_MAGIC_VALUE
* | | ("xEn3" with the 0x80 bit of the "E" set).
* 4 +----------------+
- * | version | Version of this structure. Current version is 0. New
+ * | version | Version of this structure. Current version is 1. New
* | | versions are guaranteed to be backwards-compatible.
* 8 +----------------+
* | flags | SIF_xxx flags.
@@ -48,6 +48,15 @@
* 32 +----------------+
* | rsdp_paddr | Physical address of the RSDP ACPI data structure.
* 40 +----------------+
+ * | memmap_paddr | Physical address of the (optional) memory map. Only
+ * | | present in version 1 and newer of the structure.
+ * 48 +----------------+
+ * | memmap_entries | Number of entries in the memory map table. Zero
+ * | | if there is no memory map being provided. Only
+ * | | present in version 1 and newer of the structure.
+ * 52 +----------------+
+ * | reserved | Version 1 and newer only.
+ * 56 +----------------+
*
* The layout of each entry in the module structure is the following:
*
@@ -62,14 +71,52 @@
* | reserved |
* 32 +----------------+
*
+ * The layout of each entry in the memory map table is as follows:
+ *
+ * 0 +----------------+
+ * | addr | Base address
+ * 8 +----------------+
+ * | size | Size of mapping in bytes
+ * 16 +----------------+
+ * | type | Type of mapping as defined between the hypervisor
+ * | | and guest. See XEN_HVM_MEMMAP_TYPE_* values below.
+ * 20 +----------------|
+ * | reserved |
+ * 24 +----------------+
+ *
* The address and sizes are always a 64bit little endian unsigned integer.
*
* NB: Xen on x86 will always try to place all the data below the 4GiB
* boundary.
+ *
+ * Version numbers of the hvm_start_info structure have evolved like this:
+ *
+ * Version 0: Initial implementation.
+ *
+ * Version 1: Added the memmap_paddr/memmap_entries fields (plus 4 bytes of
+ * padding) to the end of the hvm_start_info struct. These new
+ * fields can be used to pass a memory map to the guest. The
+ * memory map is optional and so guests that understand version 1
+ * of the structure must check that memmap_entries is non-zero
+ * before trying to read the memory map.
*/
#define XEN_HVM_START_MAGIC_VALUE 0x336ec578
/*
+ * The values used in the type field of the memory map table entries are
+ * defined below and match the Address Range Types as defined in the "System
+ * Address Map Interfaces" section of the ACPI Specification. Please refer to
+ * section 15 in version 6.2 of the ACPI spec: http://uefi.org/specifications
+ */
+#define XEN_HVM_MEMMAP_TYPE_RAM 1
+#define XEN_HVM_MEMMAP_TYPE_RESERVED 2
+#define XEN_HVM_MEMMAP_TYPE_ACPI 3
+#define XEN_HVM_MEMMAP_TYPE_NVS 4
+#define XEN_HVM_MEMMAP_TYPE_UNUSABLE 5
+#define XEN_HVM_MEMMAP_TYPE_DISABLED 6
+#define XEN_HVM_MEMMAP_TYPE_PMEM 7
+
+/*
* C representation of the x86/HVM start info layout.
*
* The canonical definition of this layout is above, this is just a way to
@@ -86,6 +133,13 @@ struct hvm_start_info {
uint64_t cmdline_paddr; /* Physical address of the command line. */
uint64_t rsdp_paddr; /* Physical address of the RSDP ACPI data */
/* structure. */
+ /* All following fields only present in version 1 and newer */
+ uint64_t memmap_paddr; /* Physical address of an array of */
+ /* hvm_memmap_table_entry. */
+ uint32_t memmap_entries; /* Number of entries in the memmap table. */
+ /* Value will be zero if there is no memory */
+ /* map being provided. */
+ uint32_t reserved; /* Must be zero. */
};
struct hvm_modlist_entry {
@@ -95,4 +149,11 @@ struct hvm_modlist_entry {
uint64_t reserved;
};
+struct hvm_memmap_table_entry {
+ uint64_t addr; /* Base address of the memory region */
+ uint64_t size; /* Size of the memory region in bytes */
+ uint32_t type; /* Mapping type */
+ uint32_t reserved; /* Must be zero for Version 1. */
+};
+
#endif /* __XEN_PUBLIC_ARCH_X86_HVM_START_INFO_H__ */
diff --git a/include/xen/xen-front-pgdir-shbuf.h b/include/xen/xen-front-pgdir-shbuf.h
new file mode 100644
index 000000000000..150ef7ec51ec
--- /dev/null
+++ b/include/xen/xen-front-pgdir-shbuf.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen frontend/backend page directory based shared buffer
+ * helper module.
+ *
+ * Copyright (C) 2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_FRONT_PGDIR_SHBUF_H_
+#define __XEN_FRONT_PGDIR_SHBUF_H_
+
+#include <linux/kernel.h>
+
+#include <xen/grant_table.h>
+
+struct xen_front_pgdir_shbuf_ops;
+
+struct xen_front_pgdir_shbuf {
+ /*
+ * Number of references granted for the backend use:
+ *
+ * - for frontend allocated/imported buffers this holds the number
+ * of grant references for the page directory and the pages
+ * of the buffer
+ *
+ * - for the buffer provided by the backend this only holds the number
+ * of grant references for the page directory itself as grant
+ * references for the buffer will be provided by the backend.
+ */
+ int num_grefs;
+ grant_ref_t *grefs;
+ /* Page directory backing storage. */
+ u8 *directory;
+
+ /*
+ * Number of pages for the shared buffer itself (excluding the page
+ * directory).
+ */
+ int num_pages;
+ /*
+ * Backing storage of the shared buffer: these are the pages being
+ * shared.
+ */
+ struct page **pages;
+
+ struct xenbus_device *xb_dev;
+
+ /* These are the ops used internally depending on be_alloc mode. */
+ const struct xen_front_pgdir_shbuf_ops *ops;
+
+ /* Xen map handles for the buffer allocated by the backend. */
+ grant_handle_t *backend_map_handles;
+};
+
+struct xen_front_pgdir_shbuf_cfg {
+ struct xenbus_device *xb_dev;
+
+ /* Number of pages of the buffer backing storage. */
+ int num_pages;
+ /* Pages of the buffer to be shared. */
+ struct page **pages;
+
+ /*
+ * This is allocated outside because there are use-cases when
+ * the buffer structure is allocated as a part of a bigger one.
+ */
+ struct xen_front_pgdir_shbuf *pgdir;
+ /*
+ * Mode of grant reference sharing: if set then backend will share
+ * grant references to the buffer with the frontend.
+ */
+ int be_alloc;
+};
+
+int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg);
+
+grant_ref_t
+xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf);
+
+int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf);
+
+void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf);
+
+#endif /* __XEN_FRONT_PGDIR_SHBUF_H_ */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index d7a2678da77f..0e2156786ad2 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,9 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
+#include <xen/interface/hvm/start_info.h>
+extern struct hvm_start_info pvh_start_info;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
diff --git a/init/Kconfig b/init/Kconfig
index a4112e95724a..3e6be1694766 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -335,15 +335,6 @@ config HAVE_ARCH_AUDITSYSCALL
config AUDITSYSCALL
def_bool y
depends on AUDIT && HAVE_ARCH_AUDITSYSCALL
-
-config AUDIT_WATCH
- def_bool y
- depends on AUDITSYSCALL
- select FSNOTIFY
-
-config AUDIT_TREE
- def_bool y
- depends on AUDITSYSCALL
select FSNOTIFY
source "kernel/irq/Kconfig"
@@ -509,6 +500,15 @@ config PSI
Say N if unsure.
+config PSI_DEFAULT_DISABLED
+ bool "Require boot parameter to enable pressure stall information tracking"
+ default n
+ depends on PSI
+ help
+ If set, pressure stall information tracking will be disabled
+ per default but can be enabled through passing psi=1 on the
+ kernel commandline during boot.
+
endmenu # "CPU/Task time and stats accounting"
config CPU_ISOLATION
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index d1a5d885ce13..73e02ea5d5d1 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -53,9 +53,6 @@ static void __init handle_initrd(void)
ksys_mkdir("/old", 0700);
ksys_chdir("/old");
- /* try loading default modules from initrd */
- load_default_modules();
-
/*
* In case that a resume from disk is carried out by linuxrc or one of
* its children, we need to tell the freezer not to wait for us.
diff --git a/init/initramfs.c b/init/initramfs.c
index 640557788026..fca899622937 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -291,16 +291,6 @@ static int __init do_reset(void)
return 1;
}
-static int __init maybe_link(void)
-{
- if (nlink >= 2) {
- char *old = find_link(major, minor, ino, mode, collected);
- if (old)
- return (ksys_link(old, collected) < 0) ? -1 : 1;
- }
- return 0;
-}
-
static void __init clean_path(char *path, umode_t fmode)
{
struct kstat st;
@@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
}
}
+static int __init maybe_link(void)
+{
+ if (nlink >= 2) {
+ char *old = find_link(major, minor, ino, mode, collected);
+ if (old) {
+ clean_path(collected, 0);
+ return (ksys_link(old, collected) < 0) ? -1 : 1;
+ }
+ }
+ return 0;
+}
+
static __initdata int wfd;
static int __init do_name(void)
@@ -644,12 +646,6 @@ static int __init populate_rootfs(void)
#endif
}
flush_delayed_fput();
- /*
- * Try loading default modules from initramfs. This gives
- * us a chance to load before device_initcalls.
- */
- load_default_modules();
-
return 0;
}
rootfs_initcall(populate_rootfs);
diff --git a/init/main.c b/init/main.c
index ee147103ba1b..0f8cc626e634 100644
--- a/init/main.c
+++ b/init/main.c
@@ -737,10 +737,6 @@ asmlinkage __visible void __init start_kernel(void)
arch_post_acpi_subsys_init();
sfi_init_late();
- if (efi_enabled(EFI_RUNTIME_SERVICES)) {
- efi_free_boot_services();
- }
-
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
}
@@ -996,17 +992,6 @@ static void __init do_pre_smp_initcalls(void)
do_one_initcall(initcall_from_entry(fn));
}
-/*
- * This function requests modules which should be loaded by default and is
- * called twice right after initrd is mounted and right before init is
- * exec'd. If such modules are on either initrd or rootfs, they will be
- * loaded before control is passed to userland.
- */
-void __init load_default_modules(void)
-{
- load_default_elevator_module();
-}
-
static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
@@ -1046,12 +1031,12 @@ static void mark_readonly(void)
{
if (rodata_enabled) {
/*
- * load_module() results in W+X mappings, which are cleaned up
- * with call_rcu_sched(). Let's make sure that queued work is
+ * load_module() results in W+X mappings, which are cleaned
+ * up with call_rcu(). Let's make sure that queued work is
* flushed so that we don't hit false positives looking for
* insecure pages which are W+X.
*/
- rcu_barrier_sched();
+ rcu_barrier();
mark_rodata_ro();
rodata_test();
} else
@@ -1180,5 +1165,4 @@ static noinline void __init kernel_init_freeable(void)
*/
integrity_load_keys();
- load_default_modules();
}
diff --git a/kernel/Makefile b/kernel/Makefile
index 7343b3a9bff0..cde93d54c571 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -49,9 +49,6 @@ obj-$(CONFIG_PROFILING) += profile.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
obj-$(CONFIG_FUTEX) += futex.o
-ifeq ($(CONFIG_COMPAT),y)
-obj-$(CONFIG_FUTEX) += futex_compat.o
-endif
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += smp.o
ifneq ($(CONFIG_SMP),y)
@@ -76,9 +73,7 @@ obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_SMP) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
-obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
-obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o audit_fsnotify.o
-obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
+obj-$(CONFIG_AUDITSYSCALL) += auditsc.o audit_watch.o audit_fsnotify.o audit_tree.o
obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_KCOV) += kcov.o
obj-$(CONFIG_KPROBES) += kprobes.o
diff --git a/kernel/audit.c b/kernel/audit.c
index 2a8058764aa6..632d36059556 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -60,7 +60,6 @@
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/pid.h>
-#include <linux/slab.h>
#include <linux/audit.h>
@@ -400,7 +399,7 @@ static int audit_log_config_change(char *function_name, u32 new, u32 old,
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return rc;
- audit_log_format(ab, "%s=%u old=%u", function_name, new, old);
+ audit_log_format(ab, "%s=%u old=%u ", function_name, new, old);
audit_log_session_info(ab);
rc = audit_log_task_context(ab);
if (rc)
@@ -1067,7 +1066,7 @@ static void audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
*ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
if (unlikely(!*ab))
return;
- audit_log_format(*ab, "pid=%d uid=%u", pid, uid);
+ audit_log_format(*ab, "pid=%d uid=%u ", pid, uid);
audit_log_session_info(*ab);
audit_log_task_context(*ab);
}
@@ -1096,10 +1095,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
if (audit_enabled == AUDIT_OFF)
return;
+
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_FEATURE_CHANGE);
if (!ab)
return;
- audit_log_task_info(ab, current);
+ audit_log_task_info(ab);
audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
audit_feature_names[which], !!old_feature, !!new_feature,
!!old_lock, !!new_lock, res);
@@ -2042,7 +2042,7 @@ void audit_log_session_info(struct audit_buffer *ab)
unsigned int sessionid = audit_get_sessionid(current);
uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
- audit_log_format(ab, " auid=%u ses=%u", auid, sessionid);
+ audit_log_format(ab, "auid=%u ses=%u", auid, sessionid);
}
void audit_log_key(struct audit_buffer *ab, char *key)
@@ -2058,11 +2058,13 @@ void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap)
{
int i;
- audit_log_format(ab, " %s=", prefix);
- CAP_FOR_EACH_U32(i) {
- audit_log_format(ab, "%08x",
- cap->cap[CAP_LAST_U32 - i]);
+ if (cap_isclear(*cap)) {
+ audit_log_format(ab, " %s=0", prefix);
+ return;
}
+ audit_log_format(ab, " %s=", prefix);
+ CAP_FOR_EACH_U32(i)
+ audit_log_format(ab, "%08x", cap->cap[CAP_LAST_U32 - i]);
}
static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name)
@@ -2177,22 +2179,21 @@ void audit_log_name(struct audit_context *context, struct audit_names *n,
}
/* log the audit_names record type */
- audit_log_format(ab, " nametype=");
switch(n->type) {
case AUDIT_TYPE_NORMAL:
- audit_log_format(ab, "NORMAL");
+ audit_log_format(ab, " nametype=NORMAL");
break;
case AUDIT_TYPE_PARENT:
- audit_log_format(ab, "PARENT");
+ audit_log_format(ab, " nametype=PARENT");
break;
case AUDIT_TYPE_CHILD_DELETE:
- audit_log_format(ab, "DELETE");
+ audit_log_format(ab, " nametype=DELETE");
break;
case AUDIT_TYPE_CHILD_CREATE:
- audit_log_format(ab, "CREATE");
+ audit_log_format(ab, " nametype=CREATE");
break;
default:
- audit_log_format(ab, "UNKNOWN");
+ audit_log_format(ab, " nametype=UNKNOWN");
break;
}
@@ -2247,15 +2248,15 @@ out_null:
audit_log_format(ab, " exe=(null)");
}
-struct tty_struct *audit_get_tty(struct task_struct *tsk)
+struct tty_struct *audit_get_tty(void)
{
struct tty_struct *tty = NULL;
unsigned long flags;
- spin_lock_irqsave(&tsk->sighand->siglock, flags);
- if (tsk->signal)
- tty = tty_kref_get(tsk->signal->tty);
- spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+ spin_lock_irqsave(&current->sighand->siglock, flags);
+ if (current->signal)
+ tty = tty_kref_get(current->signal->tty);
+ spin_unlock_irqrestore(&current->sighand->siglock, flags);
return tty;
}
@@ -2264,25 +2265,24 @@ void audit_put_tty(struct tty_struct *tty)
tty_kref_put(tty);
}
-void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
+void audit_log_task_info(struct audit_buffer *ab)
{
const struct cred *cred;
- char comm[sizeof(tsk->comm)];
+ char comm[sizeof(current->comm)];
struct tty_struct *tty;
if (!ab)
return;
- /* tsk == current */
cred = current_cred();
- tty = audit_get_tty(tsk);
+ tty = audit_get_tty();
audit_log_format(ab,
" ppid=%d pid=%d auid=%u uid=%u gid=%u"
" euid=%u suid=%u fsuid=%u"
" egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
- task_ppid_nr(tsk),
- task_tgid_nr(tsk),
- from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
+ task_ppid_nr(current),
+ task_tgid_nr(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
from_kuid(&init_user_ns, cred->uid),
from_kgid(&init_user_ns, cred->gid),
from_kuid(&init_user_ns, cred->euid),
@@ -2292,11 +2292,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
from_kgid(&init_user_ns, cred->sgid),
from_kgid(&init_user_ns, cred->fsgid),
tty ? tty_name(tty) : "(none)",
- audit_get_sessionid(tsk));
+ audit_get_sessionid(current));
audit_put_tty(tty);
audit_log_format(ab, " comm=");
- audit_log_untrustedstring(ab, get_task_comm(comm, tsk));
- audit_log_d_path_exe(ab, tsk->mm);
+ audit_log_untrustedstring(ab, get_task_comm(comm, current));
+ audit_log_d_path_exe(ab, current->mm);
audit_log_task_context(ab);
}
EXPORT_SYMBOL(audit_log_task_info);
@@ -2317,7 +2317,7 @@ void audit_log_link_denied(const char *operation)
if (!ab)
return;
audit_log_format(ab, "op=%s", operation);
- audit_log_task_info(ab, current);
+ audit_log_task_info(ab);
audit_log_format(ab, " res=0");
audit_log_end(ab);
}
diff --git a/kernel/audit.h b/kernel/audit.h
index 214e14948370..91421679a168 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -210,6 +210,8 @@ struct audit_context {
extern bool audit_ever_enabled;
+extern void audit_log_session_info(struct audit_buffer *ab);
+
extern void audit_copy_inode(struct audit_names *name,
const struct dentry *dentry,
struct inode *inode);
@@ -262,11 +264,11 @@ extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
extern void audit_log_d_path_exe(struct audit_buffer *ab,
struct mm_struct *mm);
-extern struct tty_struct *audit_get_tty(struct task_struct *tsk);
+extern struct tty_struct *audit_get_tty(void);
extern void audit_put_tty(struct tty_struct *tty);
/* audit watch functions */
-#ifdef CONFIG_AUDIT_WATCH
+#ifdef CONFIG_AUDITSYSCALL
extern void audit_put_watch(struct audit_watch *watch);
extern void audit_get_watch(struct audit_watch *watch);
extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
@@ -299,9 +301,9 @@ extern int audit_exe_compare(struct task_struct *tsk, struct audit_fsnotify_mark
#define audit_mark_compare(m, i, d) 0
#define audit_exe_compare(t, m) (-EINVAL)
#define audit_dupe_exe(n, o) (-EINVAL)
-#endif /* CONFIG_AUDIT_WATCH */
+#endif /* CONFIG_AUDITSYSCALL */
-#ifdef CONFIG_AUDIT_TREE
+#ifdef CONFIG_AUDITSYSCALL
extern struct audit_chunk *audit_tree_lookup(const struct inode *inode);
extern void audit_put_chunk(struct audit_chunk *chunk);
extern bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree);
diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
index fba78047fb37..cf4512a33675 100644
--- a/kernel/audit_fsnotify.c
+++ b/kernel/audit_fsnotify.c
@@ -130,10 +130,8 @@ static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, c
ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return;
- audit_log_format(ab, "auid=%u ses=%u op=%s",
- from_kuid(&init_user_ns, audit_get_loginuid(current)),
- audit_get_sessionid(current), op);
- audit_log_format(ab, " path=");
+ audit_log_session_info(ab);
+ audit_log_format(ab, " op=%s path=", op);
audit_log_untrustedstring(ab, audit_mark->path);
audit_log_key(ab, rule->filterkey);
audit_log_format(ab, " list=%d res=1", rule->listnr);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index ea43181cde4a..d4af4d97f847 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -24,9 +24,9 @@ struct audit_tree {
struct audit_chunk {
struct list_head hash;
- struct fsnotify_mark mark;
+ unsigned long key;
+ struct fsnotify_mark *mark;
struct list_head trees; /* with root here */
- int dead;
int count;
atomic_long_t refs;
struct rcu_head head;
@@ -37,13 +37,25 @@ struct audit_chunk {
} owners[];
};
+struct audit_tree_mark {
+ struct fsnotify_mark mark;
+ struct audit_chunk *chunk;
+};
+
static LIST_HEAD(tree_list);
static LIST_HEAD(prune_list);
static struct task_struct *prune_thread;
/*
- * One struct chunk is attached to each inode of interest.
- * We replace struct chunk on tagging/untagging.
+ * One struct chunk is attached to each inode of interest through
+ * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
+ * untagging, the mark is stable as long as there is chunk attached. The
+ * association between mark and chunk is protected by hash_lock and
+ * audit_tree_group->mark_mutex. Thus as long as we hold
+ * audit_tree_group->mark_mutex and check that the mark is alive by
+ * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
+ * the current chunk.
+ *
* Rules have pointer to struct audit_tree.
* Rules have struct list_head rlist forming a list of rules over
* the same tree.
@@ -62,8 +74,12 @@ static struct task_struct *prune_thread;
* tree is refcounted; one reference for "some rules on rules_list refer to
* it", one for each chunk with pointer to it.
*
- * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
- * of watch contributes 1 to .refs).
+ * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
+ * one chunk reference. This reference is dropped either when a mark is going
+ * to be freed (corresponding inode goes away) or when chunk attached to the
+ * mark gets replaced. This reference must be dropped using
+ * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
+ * grace period as it protects RCU readers of the hash table.
*
* node.index allows to get from node.list to containing chunk.
* MSB of that sucker is stolen to mark taggings that we might have to
@@ -72,6 +88,7 @@ static struct task_struct *prune_thread;
*/
static struct fsnotify_group *audit_tree_group;
+static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
static struct audit_tree *alloc_tree(const char *s)
{
@@ -131,12 +148,43 @@ static void __put_chunk(struct rcu_head *rcu)
audit_put_chunk(chunk);
}
-static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
+/*
+ * Drop reference to the chunk that was held by the mark. This is the reference
+ * that gets dropped after we've removed the chunk from the hash table and we
+ * use it to make sure chunk cannot be freed before RCU grace period expires.
+ */
+static void audit_mark_put_chunk(struct audit_chunk *chunk)
{
- struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
call_rcu(&chunk->head, __put_chunk);
}
+static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
+{
+ return container_of(mark, struct audit_tree_mark, mark);
+}
+
+static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
+{
+ return audit_mark(mark)->chunk;
+}
+
+static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
+{
+ kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
+}
+
+static struct fsnotify_mark *alloc_mark(void)
+{
+ struct audit_tree_mark *amark;
+
+ amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
+ if (!amark)
+ return NULL;
+ fsnotify_init_mark(&amark->mark, audit_tree_group);
+ amark->mark.mask = FS_IN_IGNORED;
+ return &amark->mark;
+}
+
static struct audit_chunk *alloc_chunk(int count)
{
struct audit_chunk *chunk;
@@ -156,8 +204,6 @@ static struct audit_chunk *alloc_chunk(int count)
INIT_LIST_HEAD(&chunk->owners[i].list);
chunk->owners[i].index = i;
}
- fsnotify_init_mark(&chunk->mark, audit_tree_group);
- chunk->mark.mask = FS_IN_IGNORED;
return chunk;
}
@@ -172,36 +218,25 @@ static unsigned long inode_to_key(const struct inode *inode)
return (unsigned long)&inode->i_fsnotify_marks;
}
-/*
- * Function to return search key in our hash from chunk. Key 0 is special and
- * should never be present in the hash.
- */
-static unsigned long chunk_to_key(struct audit_chunk *chunk)
-{
- /*
- * We have a reference to the mark so it should be attached to a
- * connector.
- */
- if (WARN_ON_ONCE(!chunk->mark.connector))
- return 0;
- return (unsigned long)chunk->mark.connector->obj;
-}
-
static inline struct list_head *chunk_hash(unsigned long key)
{
unsigned long n = key / L1_CACHE_BYTES;
return chunk_hash_heads + n % HASH_SIZE;
}
-/* hash_lock & entry->lock is held by caller */
+/* hash_lock & mark->group->mark_mutex is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- unsigned long key = chunk_to_key(chunk);
struct list_head *list;
- if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
- return;
- list = chunk_hash(key);
+ /*
+ * Make sure chunk is fully initialized before making it visible in the
+ * hash. Pairs with a data dependency barrier in READ_ONCE() in
+ * audit_tree_lookup().
+ */
+ smp_wmb();
+ WARN_ON_ONCE(!chunk->key);
+ list = chunk_hash(chunk->key);
list_add_rcu(&chunk->hash, list);
}
@@ -213,7 +248,11 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
- if (chunk_to_key(p) == key) {
+ /*
+ * We use a data dependency barrier in READ_ONCE() to make sure
+ * the chunk we see is fully initialized.
+ */
+ if (READ_ONCE(p->key) == key) {
atomic_long_inc(&p->refs);
return p;
}
@@ -239,137 +278,159 @@ static struct audit_chunk *find_chunk(struct node *p)
return container_of(p, struct audit_chunk, owners[0]);
}
-static void untag_chunk(struct node *p)
+static void replace_mark_chunk(struct fsnotify_mark *mark,
+ struct audit_chunk *chunk)
+{
+ struct audit_chunk *old;
+
+ assert_spin_locked(&hash_lock);
+ old = mark_chunk(mark);
+ audit_mark(mark)->chunk = chunk;
+ if (chunk)
+ chunk->mark = mark;
+ if (old)
+ old->mark = NULL;
+}
+
+static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
{
- struct audit_chunk *chunk = find_chunk(p);
- struct fsnotify_mark *entry = &chunk->mark;
- struct audit_chunk *new = NULL;
struct audit_tree *owner;
- int size = chunk->count - 1;
int i, j;
- fsnotify_get_mark(entry);
+ new->key = old->key;
+ list_splice_init(&old->trees, &new->trees);
+ list_for_each_entry(owner, &new->trees, same_root)
+ owner->root = new;
+ for (i = j = 0; j < old->count; i++, j++) {
+ if (!old->owners[j].owner) {
+ i--;
+ continue;
+ }
+ owner = old->owners[j].owner;
+ new->owners[i].owner = owner;
+ new->owners[i].index = old->owners[j].index - j + i;
+ if (!owner) /* result of earlier fallback */
+ continue;
+ get_tree(owner);
+ list_replace_init(&old->owners[j].list, &new->owners[i].list);
+ }
+ replace_mark_chunk(old->mark, new);
+ /*
+ * Make sure chunk is fully initialized before making it visible in the
+ * hash. Pairs with a data dependency barrier in READ_ONCE() in
+ * audit_tree_lookup().
+ */
+ smp_wmb();
+ list_replace_rcu(&old->hash, &new->hash);
+}
- spin_unlock(&hash_lock);
+static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
+{
+ struct audit_tree *owner = p->owner;
+
+ if (owner->root == chunk) {
+ list_del_init(&owner->same_root);
+ owner->root = NULL;
+ }
+ list_del_init(&p->list);
+ p->owner = NULL;
+ put_tree(owner);
+}
- if (size)
- new = alloc_chunk(size);
+static int chunk_count_trees(struct audit_chunk *chunk)
+{
+ int i;
+ int ret = 0;
- mutex_lock(&entry->group->mark_mutex);
- spin_lock(&entry->lock);
+ for (i = 0; i < chunk->count; i++)
+ if (chunk->owners[i].owner)
+ ret++;
+ return ret;
+}
+
+static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
+{
+ struct audit_chunk *new;
+ int size;
+
+ mutex_lock(&audit_tree_group->mark_mutex);
/*
- * mark_mutex protects mark from getting detached and thus also from
- * mark->connector->obj getting NULL.
+ * mark_mutex stabilizes chunk attached to the mark so we can check
+ * whether it didn't change while we've dropped hash_lock.
*/
- if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
- spin_unlock(&entry->lock);
- mutex_unlock(&entry->group->mark_mutex);
- if (new)
- fsnotify_put_mark(&new->mark);
- goto out;
- }
-
- owner = p->owner;
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
+ mark_chunk(mark) != chunk)
+ goto out_mutex;
+ size = chunk_count_trees(chunk);
if (!size) {
- chunk->dead = 1;
spin_lock(&hash_lock);
list_del_init(&chunk->trees);
- if (owner->root == chunk)
- owner->root = NULL;
- list_del_init(&p->list);
list_del_rcu(&chunk->hash);
+ replace_mark_chunk(mark, NULL);
spin_unlock(&hash_lock);
- spin_unlock(&entry->lock);
- mutex_unlock(&entry->group->mark_mutex);
- fsnotify_destroy_mark(entry, audit_tree_group);
- goto out;
+ fsnotify_detach_mark(mark);
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ audit_mark_put_chunk(chunk);
+ fsnotify_free_mark(mark);
+ return;
}
+ new = alloc_chunk(size);
if (!new)
- goto Fallback;
+ goto out_mutex;
- if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj,
- FSNOTIFY_OBJ_TYPE_INODE, 1)) {
- fsnotify_put_mark(&new->mark);
- goto Fallback;
- }
-
- chunk->dead = 1;
spin_lock(&hash_lock);
- list_replace_init(&chunk->trees, &new->trees);
- if (owner->root == chunk) {
- list_del_init(&owner->same_root);
- owner->root = NULL;
- }
-
- for (i = j = 0; j <= size; i++, j++) {
- struct audit_tree *s;
- if (&chunk->owners[j] == p) {
- list_del_init(&p->list);
- i--;
- continue;
- }
- s = chunk->owners[j].owner;
- new->owners[i].owner = s;
- new->owners[i].index = chunk->owners[j].index - j + i;
- if (!s) /* result of earlier fallback */
- continue;
- get_tree(s);
- list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
- }
-
- list_replace_rcu(&chunk->hash, &new->hash);
- list_for_each_entry(owner, &new->trees, same_root)
- owner->root = new;
- spin_unlock(&hash_lock);
- spin_unlock(&entry->lock);
- mutex_unlock(&entry->group->mark_mutex);
- fsnotify_destroy_mark(entry, audit_tree_group);
- fsnotify_put_mark(&new->mark); /* drop initial reference */
- goto out;
-
-Fallback:
- // do the best we can
- spin_lock(&hash_lock);
- if (owner->root == chunk) {
- list_del_init(&owner->same_root);
- owner->root = NULL;
- }
- list_del_init(&p->list);
- p->owner = NULL;
- put_tree(owner);
+ /*
+ * This has to go last when updating chunk as once replace_chunk() is
+ * called, new RCU readers can see the new chunk.
+ */
+ replace_chunk(new, chunk);
spin_unlock(&hash_lock);
- spin_unlock(&entry->lock);
- mutex_unlock(&entry->group->mark_mutex);
-out:
- fsnotify_put_mark(entry);
- spin_lock(&hash_lock);
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ audit_mark_put_chunk(chunk);
+ return;
+
+out_mutex:
+ mutex_unlock(&audit_tree_group->mark_mutex);
}
+/* Call with group->mark_mutex held, releases it */
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct fsnotify_mark *entry;
+ struct fsnotify_mark *mark;
struct audit_chunk *chunk = alloc_chunk(1);
- if (!chunk)
+
+ if (!chunk) {
+ mutex_unlock(&audit_tree_group->mark_mutex);
return -ENOMEM;
+ }
- entry = &chunk->mark;
- if (fsnotify_add_inode_mark(entry, inode, 0)) {
- fsnotify_put_mark(entry);
+ mark = alloc_mark();
+ if (!mark) {
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ kfree(chunk);
+ return -ENOMEM;
+ }
+
+ if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ fsnotify_put_mark(mark);
+ kfree(chunk);
return -ENOSPC;
}
- spin_lock(&entry->lock);
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
- chunk->dead = 1;
- spin_unlock(&entry->lock);
- fsnotify_destroy_mark(entry, audit_tree_group);
- fsnotify_put_mark(entry);
+ fsnotify_detach_mark(mark);
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ fsnotify_free_mark(mark);
+ fsnotify_put_mark(mark);
+ kfree(chunk);
return 0;
}
+ replace_mark_chunk(mark, chunk);
chunk->owners[0].index = (1U << 31);
chunk->owners[0].owner = tree;
get_tree(tree);
@@ -378,35 +439,49 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
tree->root = chunk;
list_add(&tree->same_root, &chunk->trees);
}
+ chunk->key = inode_to_key(inode);
+ /*
+ * Inserting into the hash table has to go last as once we do that RCU
+ * readers can see the chunk.
+ */
insert_hash(chunk);
spin_unlock(&hash_lock);
- spin_unlock(&entry->lock);
- fsnotify_put_mark(entry); /* drop initial reference */
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ /*
+ * Drop our initial reference. When mark we point to is getting freed,
+ * we get notification through ->freeing_mark callback and cleanup
+ * chunk pointing to this mark.
+ */
+ fsnotify_put_mark(mark);
return 0;
}
/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct fsnotify_mark *old_entry, *chunk_entry;
- struct audit_tree *owner;
+ struct fsnotify_mark *mark;
struct audit_chunk *chunk, *old;
struct node *p;
int n;
- old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks,
- audit_tree_group);
- if (!old_entry)
+ mutex_lock(&audit_tree_group->mark_mutex);
+ mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
+ if (!mark)
return create_chunk(inode, tree);
- old = container_of(old_entry, struct audit_chunk, mark);
-
+ /*
+ * Found mark is guaranteed to be attached and mark_mutex protects mark
+ * from getting detached and thus it makes sure there is chunk attached
+ * to the mark.
+ */
/* are we already there? */
spin_lock(&hash_lock);
+ old = mark_chunk(mark);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
- fsnotify_put_mark(old_entry);
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ fsnotify_put_mark(mark);
return 0;
}
}
@@ -414,83 +489,38 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1);
if (!chunk) {
- fsnotify_put_mark(old_entry);
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ fsnotify_put_mark(mark);
return -ENOMEM;
}
- chunk_entry = &chunk->mark;
-
- mutex_lock(&old_entry->group->mark_mutex);
- spin_lock(&old_entry->lock);
- /*
- * mark_mutex protects mark from getting detached and thus also from
- * mark->connector->obj getting NULL.
- */
- if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
- /* old_entry is being shot, lets just lie */
- spin_unlock(&old_entry->lock);
- mutex_unlock(&old_entry->group->mark_mutex);
- fsnotify_put_mark(old_entry);
- fsnotify_put_mark(&chunk->mark);
- return -ENOENT;
- }
-
- if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj,
- FSNOTIFY_OBJ_TYPE_INODE, 1)) {
- spin_unlock(&old_entry->lock);
- mutex_unlock(&old_entry->group->mark_mutex);
- fsnotify_put_mark(chunk_entry);
- fsnotify_put_mark(old_entry);
- return -ENOSPC;
- }
-
- /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
- spin_lock(&chunk_entry->lock);
spin_lock(&hash_lock);
-
- /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
if (tree->goner) {
spin_unlock(&hash_lock);
- chunk->dead = 1;
- spin_unlock(&chunk_entry->lock);
- spin_unlock(&old_entry->lock);
- mutex_unlock(&old_entry->group->mark_mutex);
-
- fsnotify_destroy_mark(chunk_entry, audit_tree_group);
-
- fsnotify_put_mark(chunk_entry);
- fsnotify_put_mark(old_entry);
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ fsnotify_put_mark(mark);
+ kfree(chunk);
return 0;
}
- list_replace_init(&old->trees, &chunk->trees);
- for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
- struct audit_tree *s = old->owners[n].owner;
- p->owner = s;
- p->index = old->owners[n].index;
- if (!s) /* result of fallback in untag */
- continue;
- get_tree(s);
- list_replace_init(&old->owners[n].list, &p->list);
- }
+ p = &chunk->owners[chunk->count - 1];
p->index = (chunk->count - 1) | (1U<<31);
p->owner = tree;
get_tree(tree);
list_add(&p->list, &tree->chunks);
- list_replace_rcu(&old->hash, &chunk->hash);
- list_for_each_entry(owner, &chunk->trees, same_root)
- owner->root = chunk;
- old->dead = 1;
if (!tree->root) {
tree->root = chunk;
list_add(&tree->same_root, &chunk->trees);
}
+ /*
+ * This has to go last when updating chunk as once replace_chunk() is
+ * called, new RCU readers can see the new chunk.
+ */
+ replace_chunk(chunk, old);
spin_unlock(&hash_lock);
- spin_unlock(&chunk_entry->lock);
- spin_unlock(&old_entry->lock);
- mutex_unlock(&old_entry->group->mark_mutex);
- fsnotify_destroy_mark(old_entry, audit_tree_group);
- fsnotify_put_mark(chunk_entry); /* drop initial reference */
- fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
+ mutex_unlock(&audit_tree_group->mark_mutex);
+ fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
+ audit_mark_put_chunk(old);
+
return 0;
}
@@ -503,8 +533,7 @@ static void audit_tree_log_remove_rule(struct audit_krule *rule)
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return;
- audit_log_format(ab, "op=remove_rule");
- audit_log_format(ab, " dir=");
+ audit_log_format(ab, "op=remove_rule dir=");
audit_log_untrustedstring(ab, rule->tree->pathname);
audit_log_key(ab, rule->filterkey);
audit_log_format(ab, " list=%d res=1", rule->listnr);
@@ -534,22 +563,48 @@ static void kill_rules(struct audit_tree *tree)
}
/*
- * finish killing struct audit_tree
+ * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
+ * chunks. The function expects tagged chunks are all at the beginning of the
+ * chunks list.
*/
-static void prune_one(struct audit_tree *victim)
+static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
{
spin_lock(&hash_lock);
while (!list_empty(&victim->chunks)) {
struct node *p;
+ struct audit_chunk *chunk;
+ struct fsnotify_mark *mark;
+
+ p = list_first_entry(&victim->chunks, struct node, list);
+ /* have we run out of marked? */
+ if (tagged && !(p->index & (1U<<31)))
+ break;
+ chunk = find_chunk(p);
+ mark = chunk->mark;
+ remove_chunk_node(chunk, p);
+ /* Racing with audit_tree_freeing_mark()? */
+ if (!mark)
+ continue;
+ fsnotify_get_mark(mark);
+ spin_unlock(&hash_lock);
- p = list_entry(victim->chunks.next, struct node, list);
+ untag_chunk(chunk, mark);
+ fsnotify_put_mark(mark);
- untag_chunk(p);
+ spin_lock(&hash_lock);
}
spin_unlock(&hash_lock);
put_tree(victim);
}
+/*
+ * finish killing struct audit_tree
+ */
+static void prune_one(struct audit_tree *victim)
+{
+ prune_tree_chunks(victim, false);
+}
+
/* trim the uncommitted chunks from tree */
static void trim_marked(struct audit_tree *tree)
@@ -569,18 +624,11 @@ static void trim_marked(struct audit_tree *tree)
list_add(p, &tree->chunks);
}
}
+ spin_unlock(&hash_lock);
- while (!list_empty(&tree->chunks)) {
- struct node *node;
-
- node = list_entry(tree->chunks.next, struct node, list);
-
- /* have we run out of marked? */
- if (!(node->index & (1U<<31)))
- break;
+ prune_tree_chunks(tree, true);
- untag_chunk(node);
- }
+ spin_lock(&hash_lock);
if (!tree->root && !tree->goner) {
tree->goner = 1;
spin_unlock(&hash_lock);
@@ -661,7 +709,7 @@ void audit_trim_trees(void)
/* this could be NULL if the watch is dying else where... */
node->index |= 1U<<31;
if (iterate_mounts(compare_root,
- (void *)chunk_to_key(chunk),
+ (void *)(chunk->key),
root_mnt))
node->index &= ~(1U<<31);
}
@@ -959,10 +1007,6 @@ static void evict_chunk(struct audit_chunk *chunk)
int need_prune = 0;
int n;
- if (chunk->dead)
- return;
-
- chunk->dead = 1;
mutex_lock(&audit_filter_mutex);
spin_lock(&hash_lock);
while (!list_empty(&chunk->trees)) {
@@ -999,17 +1043,27 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
return 0;
}
-static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
+static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group)
{
- struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+ struct audit_chunk *chunk;
- evict_chunk(chunk);
+ mutex_lock(&mark->group->mark_mutex);
+ spin_lock(&hash_lock);
+ chunk = mark_chunk(mark);
+ replace_mark_chunk(mark, NULL);
+ spin_unlock(&hash_lock);
+ mutex_unlock(&mark->group->mark_mutex);
+ if (chunk) {
+ evict_chunk(chunk);
+ audit_mark_put_chunk(chunk);
+ }
/*
* We are guaranteed to have at least one reference to the mark from
* either the inode or the caller of fsnotify_destroy_mark().
*/
- BUG_ON(refcount_read(&entry->refcnt) < 1);
+ BUG_ON(refcount_read(&mark->refcnt) < 1);
}
static const struct fsnotify_ops audit_tree_ops = {
@@ -1022,6 +1076,8 @@ static int __init audit_tree_init(void)
{
int i;
+ audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
+
audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
if (IS_ERR(audit_tree_group))
audit_panic("cannot initialize fsnotify group for rectree watches");
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 787c7afdf829..20ef9ba134b0 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -245,10 +245,8 @@ static void audit_watch_log_rule_change(struct audit_krule *r, struct audit_watc
ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE);
if (!ab)
return;
- audit_log_format(ab, "auid=%u ses=%u op=%s",
- from_kuid(&init_user_ns, audit_get_loginuid(current)),
- audit_get_sessionid(current), op);
- audit_log_format(ab, " path=");
+ audit_log_session_info(ab);
+ audit_log_format(ab, "op=%s path=", op);
audit_log_untrustedstring(ab, w->path);
audit_log_key(ab, r->filterkey);
audit_log_format(ab, " list=%d res=1", r->listnr);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b2d1f043f17f..6593a5207fb0 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -200,7 +200,6 @@ static int audit_match_filetype(struct audit_context *ctx, int val)
* References in it _are_ dropped - at the same time we free/drop aux stuff.
*/
-#ifdef CONFIG_AUDIT_TREE
static void audit_set_auditable(struct audit_context *ctx)
{
if (!ctx->prio) {
@@ -245,12 +244,10 @@ static int grow_tree_refs(struct audit_context *ctx)
ctx->tree_count = 31;
return 1;
}
-#endif
static void unroll_tree_refs(struct audit_context *ctx,
struct audit_tree_refs *p, int count)
{
-#ifdef CONFIG_AUDIT_TREE
struct audit_tree_refs *q;
int n;
if (!p) {
@@ -274,7 +271,6 @@ static void unroll_tree_refs(struct audit_context *ctx,
}
ctx->trees = p;
ctx->tree_count = count;
-#endif
}
static void free_tree_refs(struct audit_context *ctx)
@@ -288,7 +284,6 @@ static void free_tree_refs(struct audit_context *ctx)
static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
{
-#ifdef CONFIG_AUDIT_TREE
struct audit_tree_refs *p;
int n;
if (!tree)
@@ -305,7 +300,6 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
if (audit_tree_match(p->c[n], tree))
return 1;
}
-#endif
return 0;
}
@@ -836,44 +830,6 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
rcu_read_unlock();
}
-/* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */
-static inline struct audit_context *audit_take_context(struct task_struct *tsk,
- int return_valid,
- long return_code)
-{
- struct audit_context *context = tsk->audit_context;
-
- if (!context)
- return NULL;
- context->return_valid = return_valid;
-
- /*
- * we need to fix up the return code in the audit logs if the actual
- * return codes are later going to be fixed up by the arch specific
- * signal handlers
- *
- * This is actually a test for:
- * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
- * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
- *
- * but is faster than a bunch of ||
- */
- if (unlikely(return_code <= -ERESTARTSYS) &&
- (return_code >= -ERESTART_RESTARTBLOCK) &&
- (return_code != -ENOIOCTLCMD))
- context->return_code = -EINTR;
- else
- context->return_code = return_code;
-
- if (context->in_syscall && !context->dummy) {
- audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_EXIT]);
- audit_filter_inodes(tsk, context);
- }
-
- audit_set_context(tsk, NULL);
- return context;
-}
-
static inline void audit_proctitle_free(struct audit_context *context)
{
kfree(context->proctitle.value);
@@ -1107,7 +1063,7 @@ static void audit_log_execve_info(struct audit_context *context,
}
/* write as much as we can to the audit log */
- if (len_buf > 0) {
+ if (len_buf >= 0) {
/* NOTE: some magic numbers here - basically if we
* can't fit a reasonable amount of data into the
* existing audit buffer, flush it and start with
@@ -1302,15 +1258,18 @@ static inline int audit_proctitle_rtrim(char *proctitle, int len)
return len;
}
-static void audit_log_proctitle(struct task_struct *tsk,
- struct audit_context *context)
+static void audit_log_proctitle(void)
{
int res;
char *buf;
char *msg = "(null)";
int len = strlen(msg);
+ struct audit_context *context = audit_context();
struct audit_buffer *ab;
+ if (!context || context->dummy)
+ return;
+
ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
if (!ab)
return; /* audit_panic or being filtered */
@@ -1323,7 +1282,7 @@ static void audit_log_proctitle(struct task_struct *tsk,
if (!buf)
goto out;
/* Historically called this from procfs naming */
- res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN);
+ res = get_cmdline(current, buf, MAX_PROCTITLE_AUDIT_LEN);
if (res == 0) {
kfree(buf);
goto out;
@@ -1343,15 +1302,15 @@ out:
audit_log_end(ab);
}
-static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
+static void audit_log_exit(void)
{
int i, call_panic = 0;
+ struct audit_context *context = audit_context();
struct audit_buffer *ab;
struct audit_aux_data *aux;
struct audit_names *n;
- /* tsk == current */
- context->personality = tsk->personality;
+ context->personality = current->personality;
ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL);
if (!ab)
@@ -1373,7 +1332,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
context->argv[3],
context->name_count);
- audit_log_task_info(ab, tsk);
+ audit_log_task_info(ab);
audit_log_key(ab, context->filterkey);
audit_log_end(ab);
@@ -1462,7 +1421,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
audit_log_name(context, n, NULL, i++, &call_panic);
}
- audit_log_proctitle(tsk, context);
+ audit_log_proctitle();
/* Send end of event record to help user space know we are finished */
ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -1480,22 +1439,31 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
*/
void __audit_free(struct task_struct *tsk)
{
- struct audit_context *context;
+ struct audit_context *context = tsk->audit_context;
- context = audit_take_context(tsk, 0, 0);
if (!context)
return;
- /* Check for system calls that do not go through the exit
- * function (e.g., exit_group), then free context block.
- * We use GFP_ATOMIC here because we might be doing this
- * in the context of the idle thread */
- /* that can happen only if we are called from do_exit() */
- if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
- audit_log_exit(context, tsk);
+ /* We are called either by do_exit() or the fork() error handling code;
+ * in the former case tsk == current and in the latter tsk is a
+ * random task_struct that doesn't doesn't have any meaningful data we
+ * need to log via audit_log_exit().
+ */
+ if (tsk == current && !context->dummy && context->in_syscall) {
+ context->return_valid = 0;
+ context->return_code = 0;
+
+ audit_filter_syscall(tsk, context,
+ &audit_filter_list[AUDIT_FILTER_EXIT]);
+ audit_filter_inodes(tsk, context);
+ if (context->current_state == AUDIT_RECORD_CONTEXT)
+ audit_log_exit();
+ }
+
if (!list_empty(&context->killed_trees))
audit_kill_trees(&context->killed_trees);
+ audit_set_context(tsk, NULL);
audit_free_context(context);
}
@@ -1565,17 +1533,40 @@ void __audit_syscall_exit(int success, long return_code)
{
struct audit_context *context;
- if (success)
- success = AUDITSC_SUCCESS;
- else
- success = AUDITSC_FAILURE;
-
- context = audit_take_context(current, success, return_code);
+ context = audit_context();
if (!context)
return;
- if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
- audit_log_exit(context, current);
+ if (!context->dummy && context->in_syscall) {
+ if (success)
+ context->return_valid = AUDITSC_SUCCESS;
+ else
+ context->return_valid = AUDITSC_FAILURE;
+
+ /*
+ * we need to fix up the return code in the audit logs if the
+ * actual return codes are later going to be fixed up by the
+ * arch specific signal handlers
+ *
+ * This is actually a test for:
+ * (rc == ERESTARTSYS ) || (rc == ERESTARTNOINTR) ||
+ * (rc == ERESTARTNOHAND) || (rc == ERESTART_RESTARTBLOCK)
+ *
+ * but is faster than a bunch of ||
+ */
+ if (unlikely(return_code <= -ERESTARTSYS) &&
+ (return_code >= -ERESTART_RESTARTBLOCK) &&
+ (return_code != -ENOIOCTLCMD))
+ context->return_code = -EINTR;
+ else
+ context->return_code = return_code;
+
+ audit_filter_syscall(current, context,
+ &audit_filter_list[AUDIT_FILTER_EXIT]);
+ audit_filter_inodes(current, context);
+ if (context->current_state == AUDIT_RECORD_CONTEXT)
+ audit_log_exit();
+ }
context->in_syscall = 0;
context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
@@ -1597,12 +1588,10 @@ void __audit_syscall_exit(int success, long return_code)
kfree(context->filterkey);
context->filterkey = NULL;
}
- audit_set_context(current, context);
}
static inline void handle_one(const struct inode *inode)
{
-#ifdef CONFIG_AUDIT_TREE
struct audit_context *context;
struct audit_tree_refs *p;
struct audit_chunk *chunk;
@@ -1627,12 +1616,10 @@ static inline void handle_one(const struct inode *inode)
return;
}
put_tree_ref(context, chunk);
-#endif
}
static void handle_path(const struct dentry *dentry)
{
-#ifdef CONFIG_AUDIT_TREE
struct audit_context *context;
struct audit_tree_refs *p;
const struct dentry *d, *parent;
@@ -1685,7 +1672,6 @@ retry:
return;
}
rcu_read_unlock();
-#endif
}
static struct audit_names *audit_alloc_name(struct audit_context *context,
@@ -2035,7 +2021,7 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
uid = from_kuid(&init_user_ns, task_uid(current));
oldloginuid = from_kuid(&init_user_ns, koldloginuid);
loginuid = from_kuid(&init_user_ns, kloginuid),
- tty = audit_get_tty(current);
+ tty = audit_get_tty();
audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
audit_log_task_context(ab);
@@ -2056,7 +2042,6 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
*/
int audit_set_loginuid(kuid_t loginuid)
{
- struct task_struct *task = current;
unsigned int oldsessionid, sessionid = AUDIT_SID_UNSET;
kuid_t oldloginuid;
int rc;
@@ -2075,8 +2060,8 @@ int audit_set_loginuid(kuid_t loginuid)
sessionid = (unsigned int)atomic_inc_return(&session_id);
}
- task->sessionid = sessionid;
- task->loginuid = loginuid;
+ current->sessionid = sessionid;
+ current->loginuid = loginuid;
out:
audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc);
return rc;
@@ -2513,10 +2498,9 @@ void audit_seccomp_actions_logged(const char *names, const char *old_names,
if (unlikely(!ab))
return;
- audit_log_format(ab, "op=seccomp-logging");
- audit_log_format(ab, " actions=%s", names);
- audit_log_format(ab, " old-actions=%s", old_names);
- audit_log_format(ab, " res=%d", res);
+ audit_log_format(ab,
+ "op=seccomp-logging actions=%s old-actions=%s res=%d",
+ names, old_names, res);
audit_log_end(ab);
}
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 24583da9ffd1..25632a75d630 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -382,6 +382,7 @@ static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
}
static int array_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index ee4c82667d65..715f9fcf4712 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5,6 +5,7 @@
#include <uapi/linux/types.h>
#include <linux/seq_file.h>
#include <linux/compiler.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/anon_inodes.h>
@@ -163,7 +164,7 @@
#define BITS_ROUNDUP_BYTES(bits) \
(BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
-#define BTF_INFO_MASK 0x0f00ffff
+#define BTF_INFO_MASK 0x8f00ffff
#define BTF_INT_MASK 0x0fffffff
#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
@@ -259,6 +260,8 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_VOLATILE] = "VOLATILE",
[BTF_KIND_CONST] = "CONST",
[BTF_KIND_RESTRICT] = "RESTRICT",
+ [BTF_KIND_FUNC] = "FUNC",
+ [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
};
struct btf_kind_operations {
@@ -271,6 +274,10 @@ struct btf_kind_operations {
const struct btf_type *struct_type,
const struct btf_member *member,
const struct btf_type *member_type);
+ int (*check_kflag_member)(struct btf_verifier_env *env,
+ const struct btf_type *struct_type,
+ const struct btf_member *member,
+ const struct btf_type *member_type);
void (*log_details)(struct btf_verifier_env *env,
const struct btf_type *t);
void (*seq_show)(const struct btf *btf, const struct btf_type *t,
@@ -281,6 +288,9 @@ struct btf_kind_operations {
static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
static struct btf_type btf_void;
+static int btf_resolve(struct btf_verifier_env *env,
+ const struct btf_type *t, u32 type_id);
+
static bool btf_type_is_modifier(const struct btf_type *t)
{
/* Some of them is not strictly a C modifier
@@ -306,15 +316,33 @@ static bool btf_type_is_modifier(const struct btf_type *t)
static bool btf_type_is_void(const struct btf_type *t)
{
- /* void => no type and size info.
- * Hence, FWD is also treated as void.
- */
- return t == &btf_void || BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+ return t == &btf_void;
+}
+
+static bool btf_type_is_fwd(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
+}
+
+static bool btf_type_is_func(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
+}
+
+static bool btf_type_is_func_proto(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
+}
+
+static bool btf_type_nosize(const struct btf_type *t)
+{
+ return btf_type_is_void(t) || btf_type_is_fwd(t) ||
+ btf_type_is_func(t) || btf_type_is_func_proto(t);
}
-static bool btf_type_is_void_or_null(const struct btf_type *t)
+static bool btf_type_nosize_or_null(const struct btf_type *t)
{
- return !t || btf_type_is_void(t);
+ return !t || btf_type_nosize(t);
}
/* union is only a special case of struct:
@@ -395,6 +423,25 @@ static u16 btf_type_vlen(const struct btf_type *t)
return BTF_INFO_VLEN(t->info);
}
+static bool btf_type_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static u32 btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
+ : member->offset;
+}
+
+static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
+ : 0;
+}
+
static u32 btf_type_int(const struct btf_type *t)
{
return *(u32 *)(t + 1);
@@ -420,13 +467,37 @@ static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
return kind_ops[BTF_INFO_KIND(t->info)];
}
-static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
+bool btf_name_offset_valid(const struct btf *btf, u32 offset)
{
return BTF_STR_OFFSET_VALID(offset) &&
offset < btf->hdr.str_len;
}
-static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
+/* Only C-style identifier is permitted. This can be relaxed if
+ * necessary.
+ */
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+{
+ /* offset must be valid */
+ const char *src = &btf->strings[offset];
+ const char *src_limit;
+
+ if (!isalpha(*src) && *src != '_')
+ return false;
+
+ /* set a limit on identifier length */
+ src_limit = src + KSYM_NAME_LEN;
+ src++;
+ while (*src && src < src_limit) {
+ if (!isalnum(*src) && *src != '_')
+ return false;
+ src++;
+ }
+
+ return !*src;
+}
+
+static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
{
if (!offset)
return "(anon)";
@@ -436,7 +507,15 @@ static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
return "(invalid-name-offset)";
}
-static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
+const char *btf_name_by_offset(const struct btf *btf, u32 offset)
+{
+ if (offset < btf->hdr.str_len)
+ return &btf->strings[offset];
+
+ return NULL;
+}
+
+const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
{
if (type_id > btf->nr_types)
return NULL;
@@ -466,6 +545,47 @@ static bool btf_type_int_is_regular(const struct btf_type *t)
return true;
}
+/*
+ * Check that given struct member is a regular int with expected
+ * offset and size.
+ */
+bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
+ const struct btf_member *m,
+ u32 expected_offset, u32 expected_size)
+{
+ const struct btf_type *t;
+ u32 id, int_data;
+ u8 nr_bits;
+
+ id = m->type;
+ t = btf_type_id_size(btf, &id, NULL);
+ if (!t || !btf_type_is_int(t))
+ return false;
+
+ int_data = btf_type_int(t);
+ nr_bits = BTF_INT_BITS(int_data);
+ if (btf_type_kflag(s)) {
+ u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
+ u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
+
+ /* if kflag set, int should be a regular int and
+ * bit offset should be at byte boundary.
+ */
+ return !bitfield_size &&
+ BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
+ BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
+ }
+
+ if (BTF_INT_OFFSET(int_data) ||
+ BITS_PER_BYTE_MASKED(m->offset) ||
+ BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
+ BITS_PER_BYTE_MASKED(nr_bits) ||
+ BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
+ return false;
+
+ return true;
+}
+
__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
const char *fmt, ...)
{
@@ -506,7 +626,7 @@ __printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
__btf_verifier_log(log, "[%u] %s %s%s",
env->log_type_id,
btf_kind_str[kind],
- btf_name_by_offset(btf, t->name_off),
+ __btf_name_by_offset(btf, t->name_off),
log_details ? " " : "");
if (log_details)
@@ -549,9 +669,17 @@ static void btf_verifier_log_member(struct btf_verifier_env *env,
if (env->phase != CHECK_META)
btf_verifier_log_type(env, struct_type, NULL);
- __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
- btf_name_by_offset(btf, member->name_off),
- member->type, member->offset);
+ if (btf_type_kflag(struct_type))
+ __btf_verifier_log(log,
+ "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
+ __btf_name_by_offset(btf, member->name_off),
+ member->type,
+ BTF_MEMBER_BITFIELD_SIZE(member->offset),
+ BTF_MEMBER_BIT_OFFSET(member->offset));
+ else
+ __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
+ __btf_name_by_offset(btf, member->name_off),
+ member->type, member->offset);
if (fmt && *fmt) {
__btf_verifier_log(log, " ");
@@ -740,11 +868,15 @@ static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
/* int, enum or void is a sink */
return !btf_type_needs_resolve(next_type);
case RESOLVE_PTR:
- /* int, enum, void, struct or array is a sink for ptr */
+ /* int, enum, void, struct, array, func or func_proto is a sink
+ * for ptr
+ */
return !btf_type_is_modifier(next_type) &&
!btf_type_is_ptr(next_type);
case RESOLVE_STRUCT_OR_ARRAY:
- /* int, enum, void or ptr is a sink for struct and array */
+ /* int, enum, void, ptr, func or func_proto is a sink
+ * for struct and array
+ */
return !btf_type_is_modifier(next_type) &&
!btf_type_is_array(next_type) &&
!btf_type_is_struct(next_type);
@@ -826,7 +958,7 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
u32 size = 0;
size_type = btf_type_by_id(btf, size_type_id);
- if (btf_type_is_void_or_null(size_type))
+ if (btf_type_nosize_or_null(size_type))
return NULL;
if (btf_type_has_size(size_type)) {
@@ -842,7 +974,7 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
size = btf->resolved_sizes[size_type_id];
size_type_id = btf->resolved_ids[size_type_id];
size_type = btf_type_by_id(btf, size_type_id);
- if (btf_type_is_void(size_type))
+ if (btf_type_nosize_or_null(size_type))
return NULL;
}
@@ -863,6 +995,38 @@ static int btf_df_check_member(struct btf_verifier_env *env,
return -EINVAL;
}
+static int btf_df_check_kflag_member(struct btf_verifier_env *env,
+ const struct btf_type *struct_type,
+ const struct btf_member *member,
+ const struct btf_type *member_type)
+{
+ btf_verifier_log_basic(env, struct_type,
+ "Unsupported check_kflag_member");
+ return -EINVAL;
+}
+
+/* Used for ptr, array and struct/union type members.
+ * int, enum and modifier types have their specific callback functions.
+ */
+static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
+ const struct btf_type *struct_type,
+ const struct btf_member *member,
+ const struct btf_type *member_type)
+{
+ if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Invalid member bitfield_size");
+ return -EINVAL;
+ }
+
+ /* bitfield size is 0, so member->offset represents bit offset only.
+ * It is safe to call non kflag check_member variants.
+ */
+ return btf_type_ops(member_type)->check_member(env, struct_type,
+ member,
+ member_type);
+}
+
static int btf_df_resolve(struct btf_verifier_env *env,
const struct resolve_vertex *v)
{
@@ -915,6 +1079,62 @@ static int btf_int_check_member(struct btf_verifier_env *env,
return 0;
}
+static int btf_int_check_kflag_member(struct btf_verifier_env *env,
+ const struct btf_type *struct_type,
+ const struct btf_member *member,
+ const struct btf_type *member_type)
+{
+ u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
+ u32 int_data = btf_type_int(member_type);
+ u32 struct_size = struct_type->size;
+ u32 nr_copy_bits;
+
+ /* a regular int type is required for the kflag int member */
+ if (!btf_type_int_is_regular(member_type)) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Invalid member base type");
+ return -EINVAL;
+ }
+
+ /* check sanity of bitfield size */
+ nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
+ struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
+ nr_int_data_bits = BTF_INT_BITS(int_data);
+ if (!nr_bits) {
+ /* Not a bitfield member, member offset must be at byte
+ * boundary.
+ */
+ if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Invalid member offset");
+ return -EINVAL;
+ }
+
+ nr_bits = nr_int_data_bits;
+ } else if (nr_bits > nr_int_data_bits) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Invalid member bitfield_size");
+ return -EINVAL;
+ }
+
+ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
+ nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
+ if (nr_copy_bits > BITS_PER_U64) {
+ btf_verifier_log_member(env, struct_type, member,
+ "nr_copy_bits exceeds 64");
+ return -EINVAL;
+ }
+
+ if (struct_size < bytes_offset ||
+ struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Member exceeds struct_size");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static s32 btf_int_check_meta(struct btf_verifier_env *env,
const struct btf_type *t,
u32 meta_left)
@@ -934,6 +1154,11 @@ static s32 btf_int_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ if (btf_type_kflag(t)) {
+ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+ return -EINVAL;
+ }
+
int_data = btf_type_int(t);
if (int_data & ~BTF_INT_MASK) {
btf_verifier_log_basic(env, t, "Invalid int_data:%x",
@@ -986,26 +1211,16 @@ static void btf_int_log(struct btf_verifier_env *env,
btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
}
-static void btf_int_bits_seq_show(const struct btf *btf,
- const struct btf_type *t,
- void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_bitfield_seq_show(void *data, u8 bits_offset,
+ u8 nr_bits, struct seq_file *m)
{
u16 left_shift_bits, right_shift_bits;
- u32 int_data = btf_type_int(t);
- u8 nr_bits = BTF_INT_BITS(int_data);
- u8 total_bits_offset;
u8 nr_copy_bytes;
u8 nr_copy_bits;
u64 print_num;
- /*
- * bits_offset is at most 7.
- * BTF_INT_OFFSET() cannot exceed 64 bits.
- */
- total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
- data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
- bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ data += BITS_ROUNDDOWN_BYTES(bits_offset);
+ bits_offset = BITS_PER_BYTE_MASKED(bits_offset);
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
@@ -1025,6 +1240,24 @@ static void btf_int_bits_seq_show(const struct btf *btf,
seq_printf(m, "0x%llx", print_num);
}
+
+static void btf_int_bits_seq_show(const struct btf *btf,
+ const struct btf_type *t,
+ void *data, u8 bits_offset,
+ struct seq_file *m)
+{
+ u32 int_data = btf_type_int(t);
+ u8 nr_bits = BTF_INT_BITS(int_data);
+ u8 total_bits_offset;
+
+ /*
+ * bits_offset is at most 7.
+ * BTF_INT_OFFSET() cannot exceed 64 bits.
+ */
+ total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
+ btf_bitfield_seq_show(data, total_bits_offset, nr_bits, m);
+}
+
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
u32 type_id, void *data, u8 bits_offset,
struct seq_file *m)
@@ -1074,6 +1307,7 @@ static const struct btf_kind_operations int_ops = {
.check_meta = btf_int_check_meta,
.resolve = btf_df_resolve,
.check_member = btf_int_check_member,
+ .check_kflag_member = btf_int_check_kflag_member,
.log_details = btf_int_log,
.seq_show = btf_int_seq_show,
};
@@ -1103,6 +1337,31 @@ static int btf_modifier_check_member(struct btf_verifier_env *env,
resolved_type);
}
+static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
+ const struct btf_type *struct_type,
+ const struct btf_member *member,
+ const struct btf_type *member_type)
+{
+ const struct btf_type *resolved_type;
+ u32 resolved_type_id = member->type;
+ struct btf_member resolved_member;
+ struct btf *btf = env->btf;
+
+ resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
+ if (!resolved_type) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Invalid member");
+ return -EINVAL;
+ }
+
+ resolved_member = *member;
+ resolved_member.type = resolved_type_id;
+
+ return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
+ &resolved_member,
+ resolved_type);
+}
+
static int btf_ptr_check_member(struct btf_verifier_env *env,
const struct btf_type *struct_type,
const struct btf_member *member,
@@ -1138,11 +1397,32 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ if (btf_type_kflag(t)) {
+ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+ return -EINVAL;
+ }
+
if (!BTF_TYPE_ID_VALID(t->type)) {
btf_verifier_log_type(env, t, "Invalid type_id");
return -EINVAL;
}
+ /* typedef type must have a valid name, and other ref types,
+ * volatile, const, restrict, should have a null name.
+ */
+ if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
+ if (!t->name_off ||
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+ } else {
+ if (t->name_off) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+ }
+
btf_verifier_log_type(env, t, NULL);
return 0;
@@ -1163,10 +1443,6 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
return -EINVAL;
}
- /* "typedef void new_void", "const void"...etc */
- if (btf_type_is_void(next_type))
- goto resolved;
-
if (!env_type_is_resolve_sink(env, next_type) &&
!env_type_is_resolved(env, next_type_id))
return env_stack_push(env, next_type, next_type_id);
@@ -1177,13 +1453,18 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
* save us a few type-following when we use it later (e.g. in
* pretty print).
*/
- if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
- !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
- btf_verifier_log_type(env, v->t, "Invalid type_id");
- return -EINVAL;
+ if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
+ if (env_type_is_resolved(env, next_type_id))
+ next_type = btf_type_id_resolve(btf, &next_type_id);
+
+ /* "typedef void new_void", "const void"...etc */
+ if (!btf_type_is_void(next_type) &&
+ !btf_type_is_fwd(next_type)) {
+ btf_verifier_log_type(env, v->t, "Invalid type_id");
+ return -EINVAL;
+ }
}
-resolved:
env_stack_pop_resolved(env, next_type_id, next_type_size);
return 0;
@@ -1196,7 +1477,6 @@ static int btf_ptr_resolve(struct btf_verifier_env *env,
const struct btf_type *t = v->t;
u32 next_type_id = t->type;
struct btf *btf = env->btf;
- u32 next_type_size = 0;
next_type = btf_type_by_id(btf, next_type_id);
if (!next_type) {
@@ -1204,10 +1484,6 @@ static int btf_ptr_resolve(struct btf_verifier_env *env,
return -EINVAL;
}
- /* "void *" */
- if (btf_type_is_void(next_type))
- goto resolved;
-
if (!env_type_is_resolve_sink(env, next_type) &&
!env_type_is_resolved(env, next_type_id))
return env_stack_push(env, next_type, next_type_id);
@@ -1234,13 +1510,18 @@ static int btf_ptr_resolve(struct btf_verifier_env *env,
resolved_type_id);
}
- if (!btf_type_id_size(btf, &next_type_id, &next_type_size) &&
- !btf_type_is_void(btf_type_id_resolve(btf, &next_type_id))) {
- btf_verifier_log_type(env, v->t, "Invalid type_id");
- return -EINVAL;
+ if (!btf_type_id_size(btf, &next_type_id, NULL)) {
+ if (env_type_is_resolved(env, next_type_id))
+ next_type = btf_type_id_resolve(btf, &next_type_id);
+
+ if (!btf_type_is_void(next_type) &&
+ !btf_type_is_fwd(next_type) &&
+ !btf_type_is_func_proto(next_type)) {
+ btf_verifier_log_type(env, v->t, "Invalid type_id");
+ return -EINVAL;
+ }
}
-resolved:
env_stack_pop_resolved(env, next_type_id, 0);
return 0;
@@ -1274,6 +1555,7 @@ static struct btf_kind_operations modifier_ops = {
.check_meta = btf_ref_type_check_meta,
.resolve = btf_modifier_resolve,
.check_member = btf_modifier_check_member,
+ .check_kflag_member = btf_modifier_check_kflag_member,
.log_details = btf_ref_type_log,
.seq_show = btf_modifier_seq_show,
};
@@ -1282,6 +1564,7 @@ static struct btf_kind_operations ptr_ops = {
.check_meta = btf_ref_type_check_meta,
.resolve = btf_ptr_resolve,
.check_member = btf_ptr_check_member,
+ .check_kflag_member = btf_generic_check_kflag_member,
.log_details = btf_ref_type_log,
.seq_show = btf_ptr_seq_show,
};
@@ -1300,16 +1583,30 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* fwd type must have a valid name */
+ if (!t->name_off ||
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
btf_verifier_log_type(env, t, NULL);
return 0;
}
+static void btf_fwd_type_log(struct btf_verifier_env *env,
+ const struct btf_type *t)
+{
+ btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
+}
+
static struct btf_kind_operations fwd_ops = {
.check_meta = btf_fwd_check_meta,
.resolve = btf_df_resolve,
.check_member = btf_df_check_member,
- .log_details = btf_ref_type_log,
+ .check_kflag_member = btf_df_check_kflag_member,
+ .log_details = btf_fwd_type_log,
.seq_show = btf_df_seq_show,
};
@@ -1356,11 +1653,22 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* array type should not have a name */
+ if (t->name_off) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
if (btf_type_vlen(t)) {
btf_verifier_log_type(env, t, "vlen != 0");
return -EINVAL;
}
+ if (btf_type_kflag(t)) {
+ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+ return -EINVAL;
+ }
+
if (t->size) {
btf_verifier_log_type(env, t, "size != 0");
return -EINVAL;
@@ -1396,7 +1704,7 @@ static int btf_array_resolve(struct btf_verifier_env *env,
/* Check array->index_type */
index_type_id = array->index_type;
index_type = btf_type_by_id(btf, index_type_id);
- if (btf_type_is_void_or_null(index_type)) {
+ if (btf_type_nosize_or_null(index_type)) {
btf_verifier_log_type(env, v->t, "Invalid index");
return -EINVAL;
}
@@ -1415,7 +1723,7 @@ static int btf_array_resolve(struct btf_verifier_env *env,
/* Check array->type */
elem_type_id = array->type;
elem_type = btf_type_by_id(btf, elem_type_id);
- if (btf_type_is_void_or_null(elem_type)) {
+ if (btf_type_nosize_or_null(elem_type)) {
btf_verifier_log_type(env, v->t,
"Invalid elem");
return -EINVAL;
@@ -1484,6 +1792,7 @@ static struct btf_kind_operations array_ops = {
.check_meta = btf_array_check_meta,
.resolve = btf_array_resolve,
.check_member = btf_array_check_member,
+ .check_kflag_member = btf_generic_check_kflag_member,
.log_details = btf_array_log,
.seq_show = btf_array_seq_show,
};
@@ -1522,6 +1831,7 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
u32 meta_needed, last_offset;
struct btf *btf = env->btf;
u32 struct_size = t->size;
+ u32 offset;
u16 i;
meta_needed = btf_type_vlen(t) * sizeof(*member);
@@ -1532,6 +1842,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* struct type either no name or a valid one */
+ if (t->name_off &&
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
btf_verifier_log_type(env, t, NULL);
last_offset = 0;
@@ -1543,6 +1860,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* struct member either no name or a valid one */
+ if (member->name_off &&
+ !btf_name_valid_identifier(btf, member->name_off)) {
+ btf_verifier_log_member(env, t, member, "Invalid name");
+ return -EINVAL;
+ }
/* A member cannot be in type void */
if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
btf_verifier_log_member(env, t, member,
@@ -1550,7 +1873,8 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
- if (is_union && member->offset) {
+ offset = btf_member_bit_offset(t, member);
+ if (is_union && offset) {
btf_verifier_log_member(env, t, member,
"Invalid member bits_offset");
return -EINVAL;
@@ -1560,20 +1884,20 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
* ">" instead of ">=" because the last member could be
* "char a[0];"
*/
- if (last_offset > member->offset) {
+ if (last_offset > offset) {
btf_verifier_log_member(env, t, member,
"Invalid member bits_offset");
return -EINVAL;
}
- if (BITS_ROUNDUP_BYTES(member->offset) > struct_size) {
+ if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
btf_verifier_log_member(env, t, member,
- "Memmber bits_offset exceeds its struct size");
+ "Member bits_offset exceeds its struct size");
return -EINVAL;
}
btf_verifier_log_member(env, t, member, NULL);
- last_offset = member->offset;
+ last_offset = offset;
}
return meta_needed;
@@ -1603,9 +1927,14 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
last_member_type = btf_type_by_id(env->btf,
last_member_type_id);
- err = btf_type_ops(last_member_type)->check_member(env, v->t,
- last_member,
- last_member_type);
+ if (btf_type_kflag(v->t))
+ err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
+ last_member,
+ last_member_type);
+ else
+ err = btf_type_ops(last_member_type)->check_member(env, v->t,
+ last_member,
+ last_member_type);
if (err)
return err;
}
@@ -1615,7 +1944,7 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
const struct btf_type *member_type = btf_type_by_id(env->btf,
member_type_id);
- if (btf_type_is_void_or_null(member_type)) {
+ if (btf_type_nosize_or_null(member_type)) {
btf_verifier_log_member(env, v->t, member,
"Invalid member");
return -EINVAL;
@@ -1627,9 +1956,14 @@ static int btf_struct_resolve(struct btf_verifier_env *env,
return env_stack_push(env, member_type, member_type_id);
}
- err = btf_type_ops(member_type)->check_member(env, v->t,
- member,
- member_type);
+ if (btf_type_kflag(v->t))
+ err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
+ member,
+ member_type);
+ else
+ err = btf_type_ops(member_type)->check_member(env, v->t,
+ member,
+ member_type);
if (err)
return err;
}
@@ -1657,17 +1991,26 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
for_each_member(i, t, member) {
const struct btf_type *member_type = btf_type_by_id(btf,
member->type);
- u32 member_offset = member->offset;
- u32 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
- u8 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
const struct btf_kind_operations *ops;
+ u32 member_offset, bitfield_size;
+ u32 bytes_offset;
+ u8 bits8_offset;
if (i)
seq_puts(m, seq);
- ops = btf_type_ops(member_type);
- ops->seq_show(btf, member_type, member->type,
- data + bytes_offset, bits8_offset, m);
+ member_offset = btf_member_bit_offset(t, member);
+ bitfield_size = btf_member_bitfield_size(t, member);
+ if (bitfield_size) {
+ btf_bitfield_seq_show(data, member_offset,
+ bitfield_size, m);
+ } else {
+ bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
+ bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
+ ops = btf_type_ops(member_type);
+ ops->seq_show(btf, member_type, member->type,
+ data + bytes_offset, bits8_offset, m);
+ }
}
seq_puts(m, "}");
}
@@ -1676,6 +2019,7 @@ static struct btf_kind_operations struct_ops = {
.check_meta = btf_struct_check_meta,
.resolve = btf_struct_resolve,
.check_member = btf_struct_check_member,
+ .check_kflag_member = btf_generic_check_kflag_member,
.log_details = btf_struct_log,
.seq_show = btf_struct_seq_show,
};
@@ -1705,6 +2049,41 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
return 0;
}
+static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
+ const struct btf_type *struct_type,
+ const struct btf_member *member,
+ const struct btf_type *member_type)
+{
+ u32 struct_bits_off, nr_bits, bytes_end, struct_size;
+ u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
+
+ struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
+ nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
+ if (!nr_bits) {
+ if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Member is not byte aligned");
+ return -EINVAL;
+ }
+
+ nr_bits = int_bitsize;
+ } else if (nr_bits > int_bitsize) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Invalid member bitfield_size");
+ return -EINVAL;
+ }
+
+ struct_size = struct_type->size;
+ bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
+ if (struct_size < bytes_end) {
+ btf_verifier_log_member(env, struct_type, member,
+ "Member exceeds struct_size");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static s32 btf_enum_check_meta(struct btf_verifier_env *env,
const struct btf_type *t,
u32 meta_left)
@@ -1724,12 +2103,24 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ if (btf_type_kflag(t)) {
+ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+ return -EINVAL;
+ }
+
if (t->size != sizeof(int)) {
btf_verifier_log_type(env, t, "Expected size:%zu",
sizeof(int));
return -EINVAL;
}
+ /* enum type either no name or a valid one */
+ if (t->name_off &&
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
btf_verifier_log_type(env, t, NULL);
for (i = 0; i < nr_enums; i++) {
@@ -1739,8 +2130,16 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
+ /* enum member must have a valid name */
+ if (!enums[i].name_off ||
+ !btf_name_valid_identifier(btf, enums[i].name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
+
btf_verifier_log(env, "\t%s val=%d\n",
- btf_name_by_offset(btf, enums[i].name_off),
+ __btf_name_by_offset(btf, enums[i].name_off),
enums[i].val);
}
@@ -1764,7 +2163,8 @@ static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
for (i = 0; i < nr_enums; i++) {
if (v == enums[i].val) {
seq_printf(m, "%s",
- btf_name_by_offset(btf, enums[i].name_off));
+ __btf_name_by_offset(btf,
+ enums[i].name_off));
return;
}
}
@@ -1776,10 +2176,249 @@ static struct btf_kind_operations enum_ops = {
.check_meta = btf_enum_check_meta,
.resolve = btf_df_resolve,
.check_member = btf_enum_check_member,
+ .check_kflag_member = btf_enum_check_kflag_member,
.log_details = btf_enum_log,
.seq_show = btf_enum_seq_show,
};
+static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
+ const struct btf_type *t,
+ u32 meta_left)
+{
+ u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
+
+ if (meta_left < meta_needed) {
+ btf_verifier_log_basic(env, t,
+ "meta_left:%u meta_needed:%u",
+ meta_left, meta_needed);
+ return -EINVAL;
+ }
+
+ if (t->name_off) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
+ if (btf_type_kflag(t)) {
+ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+ return -EINVAL;
+ }
+
+ btf_verifier_log_type(env, t, NULL);
+
+ return meta_needed;
+}
+
+static void btf_func_proto_log(struct btf_verifier_env *env,
+ const struct btf_type *t)
+{
+ const struct btf_param *args = (const struct btf_param *)(t + 1);
+ u16 nr_args = btf_type_vlen(t), i;
+
+ btf_verifier_log(env, "return=%u args=(", t->type);
+ if (!nr_args) {
+ btf_verifier_log(env, "void");
+ goto done;
+ }
+
+ if (nr_args == 1 && !args[0].type) {
+ /* Only one vararg */
+ btf_verifier_log(env, "vararg");
+ goto done;
+ }
+
+ btf_verifier_log(env, "%u %s", args[0].type,
+ __btf_name_by_offset(env->btf,
+ args[0].name_off));
+ for (i = 1; i < nr_args - 1; i++)
+ btf_verifier_log(env, ", %u %s", args[i].type,
+ __btf_name_by_offset(env->btf,
+ args[i].name_off));
+
+ if (nr_args > 1) {
+ const struct btf_param *last_arg = &args[nr_args - 1];
+
+ if (last_arg->type)
+ btf_verifier_log(env, ", %u %s", last_arg->type,
+ __btf_name_by_offset(env->btf,
+ last_arg->name_off));
+ else
+ btf_verifier_log(env, ", vararg");
+ }
+
+done:
+ btf_verifier_log(env, ")");
+}
+
+static struct btf_kind_operations func_proto_ops = {
+ .check_meta = btf_func_proto_check_meta,
+ .resolve = btf_df_resolve,
+ /*
+ * BTF_KIND_FUNC_PROTO cannot be directly referred by
+ * a struct's member.
+ *
+ * It should be a funciton pointer instead.
+ * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO)
+ *
+ * Hence, there is no btf_func_check_member().
+ */
+ .check_member = btf_df_check_member,
+ .check_kflag_member = btf_df_check_kflag_member,
+ .log_details = btf_func_proto_log,
+ .seq_show = btf_df_seq_show,
+};
+
+static s32 btf_func_check_meta(struct btf_verifier_env *env,
+ const struct btf_type *t,
+ u32 meta_left)
+{
+ if (!t->name_off ||
+ !btf_name_valid_identifier(env->btf, t->name_off)) {
+ btf_verifier_log_type(env, t, "Invalid name");
+ return -EINVAL;
+ }
+
+ if (btf_type_vlen(t)) {
+ btf_verifier_log_type(env, t, "vlen != 0");
+ return -EINVAL;
+ }
+
+ if (btf_type_kflag(t)) {
+ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
+ return -EINVAL;
+ }
+
+ btf_verifier_log_type(env, t, NULL);
+
+ return 0;
+}
+
+static struct btf_kind_operations func_ops = {
+ .check_meta = btf_func_check_meta,
+ .resolve = btf_df_resolve,
+ .check_member = btf_df_check_member,
+ .check_kflag_member = btf_df_check_kflag_member,
+ .log_details = btf_ref_type_log,
+ .seq_show = btf_df_seq_show,
+};
+
+static int btf_func_proto_check(struct btf_verifier_env *env,
+ const struct btf_type *t)
+{
+ const struct btf_type *ret_type;
+ const struct btf_param *args;
+ const struct btf *btf;
+ u16 nr_args, i;
+ int err;
+
+ btf = env->btf;
+ args = (const struct btf_param *)(t + 1);
+ nr_args = btf_type_vlen(t);
+
+ /* Check func return type which could be "void" (t->type == 0) */
+ if (t->type) {
+ u32 ret_type_id = t->type;
+
+ ret_type = btf_type_by_id(btf, ret_type_id);
+ if (!ret_type) {
+ btf_verifier_log_type(env, t, "Invalid return type");
+ return -EINVAL;
+ }
+
+ if (btf_type_needs_resolve(ret_type) &&
+ !env_type_is_resolved(env, ret_type_id)) {
+ err = btf_resolve(env, ret_type, ret_type_id);
+ if (err)
+ return err;
+ }
+
+ /* Ensure the return type is a type that has a size */
+ if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
+ btf_verifier_log_type(env, t, "Invalid return type");
+ return -EINVAL;
+ }
+ }
+
+ if (!nr_args)
+ return 0;
+
+ /* Last func arg type_id could be 0 if it is a vararg */
+ if (!args[nr_args - 1].type) {
+ if (args[nr_args - 1].name_off) {
+ btf_verifier_log_type(env, t, "Invalid arg#%u",
+ nr_args);
+ return -EINVAL;
+ }
+ nr_args--;
+ }
+
+ err = 0;
+ for (i = 0; i < nr_args; i++) {
+ const struct btf_type *arg_type;
+ u32 arg_type_id;
+
+ arg_type_id = args[i].type;
+ arg_type = btf_type_by_id(btf, arg_type_id);
+ if (!arg_type) {
+ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
+ err = -EINVAL;
+ break;
+ }
+
+ if (args[i].name_off &&
+ (!btf_name_offset_valid(btf, args[i].name_off) ||
+ !btf_name_valid_identifier(btf, args[i].name_off))) {
+ btf_verifier_log_type(env, t,
+ "Invalid arg#%u", i + 1);
+ err = -EINVAL;
+ break;
+ }
+
+ if (btf_type_needs_resolve(arg_type) &&
+ !env_type_is_resolved(env, arg_type_id)) {
+ err = btf_resolve(env, arg_type, arg_type_id);
+ if (err)
+ break;
+ }
+
+ if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
+ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int btf_func_check(struct btf_verifier_env *env,
+ const struct btf_type *t)
+{
+ const struct btf_type *proto_type;
+ const struct btf_param *args;
+ const struct btf *btf;
+ u16 nr_args, i;
+
+ btf = env->btf;
+ proto_type = btf_type_by_id(btf, t->type);
+
+ if (!proto_type || !btf_type_is_func_proto(proto_type)) {
+ btf_verifier_log_type(env, t, "Invalid type_id");
+ return -EINVAL;
+ }
+
+ args = (const struct btf_param *)(proto_type + 1);
+ nr_args = btf_type_vlen(proto_type);
+ for (i = 0; i < nr_args; i++) {
+ if (!args[i].name_off && args[i].type) {
+ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
[BTF_KIND_INT] = &int_ops,
[BTF_KIND_PTR] = &ptr_ops,
@@ -1792,6 +2431,8 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
[BTF_KIND_VOLATILE] = &modifier_ops,
[BTF_KIND_CONST] = &modifier_ops,
[BTF_KIND_RESTRICT] = &modifier_ops,
+ [BTF_KIND_FUNC] = &func_ops,
+ [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
};
static s32 btf_check_meta(struct btf_verifier_env *env,
@@ -1863,30 +2504,6 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
return 0;
}
-static int btf_resolve(struct btf_verifier_env *env,
- const struct btf_type *t, u32 type_id)
-{
- const struct resolve_vertex *v;
- int err = 0;
-
- env->resolve_mode = RESOLVE_TBD;
- env_stack_push(env, t, type_id);
- while (!err && (v = env_stack_peak(env))) {
- env->log_type_id = v->type_id;
- err = btf_type_ops(v->t)->resolve(env, v);
- }
-
- env->log_type_id = type_id;
- if (err == -E2BIG)
- btf_verifier_log_type(env, t,
- "Exceeded max resolving depth:%u",
- MAX_RESOLVE_DEPTH);
- else if (err == -EEXIST)
- btf_verifier_log_type(env, t, "Loop detected");
-
- return err;
-}
-
static bool btf_resolve_valid(struct btf_verifier_env *env,
const struct btf_type *t,
u32 type_id)
@@ -1920,6 +2537,39 @@ static bool btf_resolve_valid(struct btf_verifier_env *env,
return false;
}
+static int btf_resolve(struct btf_verifier_env *env,
+ const struct btf_type *t, u32 type_id)
+{
+ u32 save_log_type_id = env->log_type_id;
+ const struct resolve_vertex *v;
+ int err = 0;
+
+ env->resolve_mode = RESOLVE_TBD;
+ env_stack_push(env, t, type_id);
+ while (!err && (v = env_stack_peak(env))) {
+ env->log_type_id = v->type_id;
+ err = btf_type_ops(v->t)->resolve(env, v);
+ }
+
+ env->log_type_id = type_id;
+ if (err == -E2BIG) {
+ btf_verifier_log_type(env, t,
+ "Exceeded max resolving depth:%u",
+ MAX_RESOLVE_DEPTH);
+ } else if (err == -EEXIST) {
+ btf_verifier_log_type(env, t, "Loop detected");
+ }
+
+ /* Final sanity check */
+ if (!err && !btf_resolve_valid(env, t, type_id)) {
+ btf_verifier_log_type(env, t, "Invalid resolve state");
+ err = -EINVAL;
+ }
+
+ env->log_type_id = save_log_type_id;
+ return err;
+}
+
static int btf_check_all_types(struct btf_verifier_env *env)
{
struct btf *btf = env->btf;
@@ -1942,10 +2592,16 @@ static int btf_check_all_types(struct btf_verifier_env *env)
return err;
}
- if (btf_type_needs_resolve(t) &&
- !btf_resolve_valid(env, t, type_id)) {
- btf_verifier_log_type(env, t, "Invalid resolve state");
- return -EINVAL;
+ if (btf_type_is_func_proto(t)) {
+ err = btf_func_proto_check(env, t);
+ if (err)
+ return err;
+ }
+
+ if (btf_type_is_func(t)) {
+ err = btf_func_check(env, t);
+ if (err)
+ return err;
}
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1a796e0799ec..38de580abcc2 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -21,12 +21,14 @@
* Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/
+#include <uapi/linux/btf.h>
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <linux/random.h>
#include <linux/moduleloader.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/frame.h>
#include <linux/rbtree_latch.h>
#include <linux/kallsyms.h>
@@ -103,6 +105,91 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
}
EXPORT_SYMBOL_GPL(bpf_prog_alloc);
+int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog)
+{
+ if (!prog->aux->nr_linfo || !prog->jit_requested)
+ return 0;
+
+ prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo,
+ sizeof(*prog->aux->jited_linfo),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!prog->aux->jited_linfo)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void bpf_prog_free_jited_linfo(struct bpf_prog *prog)
+{
+ kfree(prog->aux->jited_linfo);
+ prog->aux->jited_linfo = NULL;
+}
+
+void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog)
+{
+ if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0])
+ bpf_prog_free_jited_linfo(prog);
+}
+
+/* The jit engine is responsible to provide an array
+ * for insn_off to the jited_off mapping (insn_to_jit_off).
+ *
+ * The idx to this array is the insn_off. Hence, the insn_off
+ * here is relative to the prog itself instead of the main prog.
+ * This array has one entry for each xlated bpf insn.
+ *
+ * jited_off is the byte off to the last byte of the jited insn.
+ *
+ * Hence, with
+ * insn_start:
+ * The first bpf insn off of the prog. The insn off
+ * here is relative to the main prog.
+ * e.g. if prog is a subprog, insn_start > 0
+ * linfo_idx:
+ * The prog's idx to prog->aux->linfo and jited_linfo
+ *
+ * jited_linfo[linfo_idx] = prog->bpf_func
+ *
+ * For i > linfo_idx,
+ *
+ * jited_linfo[i] = prog->bpf_func +
+ * insn_to_jit_off[linfo[i].insn_off - insn_start - 1]
+ */
+void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+ const u32 *insn_to_jit_off)
+{
+ u32 linfo_idx, insn_start, insn_end, nr_linfo, i;
+ const struct bpf_line_info *linfo;
+ void **jited_linfo;
+
+ if (!prog->aux->jited_linfo)
+ /* Userspace did not provide linfo */
+ return;
+
+ linfo_idx = prog->aux->linfo_idx;
+ linfo = &prog->aux->linfo[linfo_idx];
+ insn_start = linfo[0].insn_off;
+ insn_end = insn_start + prog->len;
+
+ jited_linfo = &prog->aux->jited_linfo[linfo_idx];
+ jited_linfo[0] = prog->bpf_func;
+
+ nr_linfo = prog->aux->nr_linfo - linfo_idx;
+
+ for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++)
+ /* The verifier ensures that linfo[i].insn_off is
+ * strictly increasing
+ */
+ jited_linfo[i] = prog->bpf_func +
+ insn_to_jit_off[linfo[i].insn_off - insn_start - 1];
+}
+
+void bpf_prog_free_linfo(struct bpf_prog *prog)
+{
+ bpf_prog_free_jited_linfo(prog);
+ kvfree(prog->aux->linfo);
+}
+
struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
gfp_t gfp_extra_flags)
{
@@ -292,6 +379,26 @@ static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
return ret;
}
+static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta)
+{
+ struct bpf_line_info *linfo;
+ u32 i, nr_linfo;
+
+ nr_linfo = prog->aux->nr_linfo;
+ if (!nr_linfo || !delta)
+ return;
+
+ linfo = prog->aux->linfo;
+
+ for (i = 0; i < nr_linfo; i++)
+ if (off < linfo[i].insn_off)
+ break;
+
+ /* Push all off < linfo[i].insn_off by delta */
+ for (; i < nr_linfo; i++)
+ linfo[i].insn_off += delta;
+}
+
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len)
{
@@ -347,6 +454,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
*/
BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
+ bpf_adj_linfo(prog_adj, off, insn_delta);
+
return prog_adj;
}
@@ -365,13 +474,11 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
}
#ifdef CONFIG_BPF_JIT
-# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
-
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
int bpf_jit_harden __read_mostly;
int bpf_jit_kallsyms __read_mostly;
-int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
+long bpf_jit_limit __read_mostly;
static __always_inline void
bpf_get_prog_addr_region(const struct bpf_prog *prog,
@@ -390,6 +497,8 @@ bpf_get_prog_addr_region(const struct bpf_prog *prog,
static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
{
const char *end = sym + KSYM_NAME_LEN;
+ const struct btf_type *type;
+ const char *func_name;
BUILD_BUG_ON(sizeof("bpf_prog_") +
sizeof(prog->tag) * 2 +
@@ -404,6 +513,16 @@ static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
sym = bin2hex(sym, prog->tag, sizeof(prog->tag));
+
+ /* prog->aux->name will be ignored if full btf name is available */
+ if (prog->aux->func_info_cnt) {
+ type = btf_type_by_id(prog->aux->btf,
+ prog->aux->func_info[prog->aux->func_idx].type_id);
+ func_name = btf_name_by_offset(prog->aux->btf, type->name_off);
+ snprintf(sym, (size_t)(end - sym), "_%s", func_name);
+ return;
+ }
+
if (prog->aux->name[0])
snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name);
else
@@ -580,16 +699,27 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
static atomic_long_t bpf_jit_current;
+/* Can be overridden by an arch's JIT compiler if it has a custom,
+ * dedicated BPF backend memory area, or if neither of the two
+ * below apply.
+ */
+u64 __weak bpf_jit_alloc_exec_limit(void)
+{
#if defined(MODULES_VADDR)
+ return MODULES_END - MODULES_VADDR;
+#else
+ return VMALLOC_END - VMALLOC_START;
+#endif
+}
+
static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
- bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
- PAGE_SIZE), INT_MAX);
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2,
+ PAGE_SIZE), LONG_MAX);
return 0;
}
pure_initcall(bpf_jit_charge_init);
-#endif
static int bpf_jit_charge_modmem(u32 pages)
{
@@ -609,6 +739,16 @@ static void bpf_jit_uncharge_modmem(u32 pages)
atomic_long_sub(pages, &bpf_jit_current);
}
+void *__weak bpf_jit_alloc_exec(unsigned long size)
+{
+ return module_alloc(size);
+}
+
+void __weak bpf_jit_free_exec(void *addr)
+{
+ module_memfree(addr);
+}
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -626,7 +766,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
if (bpf_jit_charge_modmem(pages))
return NULL;
- hdr = module_alloc(size);
+ hdr = bpf_jit_alloc_exec(size);
if (!hdr) {
bpf_jit_uncharge_modmem(pages);
return NULL;
@@ -650,7 +790,7 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
u32 pages = hdr->pages;
- module_memfree(hdr);
+ bpf_jit_free_exec(hdr);
bpf_jit_uncharge_modmem(pages);
}
@@ -672,6 +812,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+ const struct bpf_insn *insn, bool extra_pass,
+ u64 *func_addr, bool *func_addr_fixed)
+{
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+ u8 *addr;
+
+ *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
+ if (!*func_addr_fixed) {
+ /* Place-holder address till the last pass has collected
+ * all addresses for JITed subprograms in which case we
+ * can pick them up from prog->aux.
+ */
+ if (!extra_pass)
+ addr = NULL;
+ else if (prog->aux->func &&
+ off >= 0 && off < prog->aux->func_cnt)
+ addr = (u8 *)prog->aux->func[off]->bpf_func;
+ else
+ return -EINVAL;
+ } else {
+ /* Address of a BPF helper call. Since part of the core
+ * kernel, it's always at a fixed location. __bpf_call_base
+ * and the helper with imm relative to it are both in core
+ * kernel.
+ */
+ addr = (u8 *)__bpf_call_base + imm;
+ }
+
+ *func_addr = (unsigned long)addr;
+ return 0;
+}
+
static int bpf_jit_blind_insn(const struct bpf_insn *from,
const struct bpf_insn *aux,
struct bpf_insn *to_buff)
@@ -875,32 +1049,34 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
#define BPF_INSN_MAP(INSN_2, INSN_3) \
/* 32 bit ALU operations. */ \
/* Register based. */ \
- INSN_3(ALU, ADD, X), \
- INSN_3(ALU, SUB, X), \
- INSN_3(ALU, AND, X), \
- INSN_3(ALU, OR, X), \
- INSN_3(ALU, LSH, X), \
- INSN_3(ALU, RSH, X), \
- INSN_3(ALU, XOR, X), \
- INSN_3(ALU, MUL, X), \
- INSN_3(ALU, MOV, X), \
- INSN_3(ALU, DIV, X), \
- INSN_3(ALU, MOD, X), \
+ INSN_3(ALU, ADD, X), \
+ INSN_3(ALU, SUB, X), \
+ INSN_3(ALU, AND, X), \
+ INSN_3(ALU, OR, X), \
+ INSN_3(ALU, LSH, X), \
+ INSN_3(ALU, RSH, X), \
+ INSN_3(ALU, XOR, X), \
+ INSN_3(ALU, MUL, X), \
+ INSN_3(ALU, MOV, X), \
+ INSN_3(ALU, ARSH, X), \
+ INSN_3(ALU, DIV, X), \
+ INSN_3(ALU, MOD, X), \
INSN_2(ALU, NEG), \
INSN_3(ALU, END, TO_BE), \
INSN_3(ALU, END, TO_LE), \
/* Immediate based. */ \
- INSN_3(ALU, ADD, K), \
- INSN_3(ALU, SUB, K), \
- INSN_3(ALU, AND, K), \
- INSN_3(ALU, OR, K), \
- INSN_3(ALU, LSH, K), \
- INSN_3(ALU, RSH, K), \
- INSN_3(ALU, XOR, K), \
- INSN_3(ALU, MUL, K), \
- INSN_3(ALU, MOV, K), \
- INSN_3(ALU, DIV, K), \
- INSN_3(ALU, MOD, K), \
+ INSN_3(ALU, ADD, K), \
+ INSN_3(ALU, SUB, K), \
+ INSN_3(ALU, AND, K), \
+ INSN_3(ALU, OR, K), \
+ INSN_3(ALU, LSH, K), \
+ INSN_3(ALU, RSH, K), \
+ INSN_3(ALU, XOR, K), \
+ INSN_3(ALU, MUL, K), \
+ INSN_3(ALU, MOV, K), \
+ INSN_3(ALU, ARSH, K), \
+ INSN_3(ALU, DIV, K), \
+ INSN_3(ALU, MOD, K), \
/* 64 bit ALU operations. */ \
/* Register based. */ \
INSN_3(ALU64, ADD, X), \
@@ -1079,6 +1255,12 @@ select_insn:
DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32;
insn++;
CONT;
+ ALU_ARSH_X:
+ DST = (u64) (u32) ((*(s32 *) &DST) >> SRC);
+ CONT;
+ ALU_ARSH_K:
+ DST = (u64) (u32) ((*(s32 *) &DST) >> IMM);
+ CONT;
ALU64_ARSH_X:
(*(s64 *) &DST) >>= SRC;
CONT;
@@ -1525,13 +1707,20 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
* be JITed, but falls back to the interpreter.
*/
if (!bpf_prog_is_dev_bound(fp->aux)) {
+ *err = bpf_prog_alloc_jited_linfo(fp);
+ if (*err)
+ return fp;
+
fp = bpf_int_jit_compile(fp);
-#ifdef CONFIG_BPF_JIT_ALWAYS_ON
if (!fp->jited) {
+ bpf_prog_free_jited_linfo(fp);
+#ifdef CONFIG_BPF_JIT_ALWAYS_ON
*err = -ENOTSUPP;
return fp;
- }
#endif
+ } else {
+ bpf_prog_free_unused_jited_linfo(fp);
+ }
} else {
*err = bpf_prog_offload_compile(fp);
if (*err)
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 24aac0d0f412..8974b3755670 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -183,7 +183,7 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
* is not at a fixed memory location, with mixed length
* packets, which is bad for cache-line hotness.
*/
- frame_size = SKB_DATA_ALIGN(xdpf->len) + xdpf->headroom +
+ frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
pkt_data_start = xdpf->data - xdpf->headroom;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 2c1790288138..4b7c76765d9d 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -23,7 +23,7 @@
#define HTAB_CREATE_FLAG_MASK \
(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \
- BPF_F_RDONLY | BPF_F_WRONLY)
+ BPF_F_RDONLY | BPF_F_WRONLY | BPF_F_ZERO_SEED)
struct bucket {
struct hlist_nulls_head head;
@@ -244,6 +244,7 @@ static int htab_map_alloc_check(union bpf_attr *attr)
*/
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
+ bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
int numa_node = bpf_map_attr_numa_node(attr);
BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
@@ -257,6 +258,10 @@ static int htab_map_alloc_check(union bpf_attr *attr)
*/
return -EPERM;
+ if (zero_seed && !capable(CAP_SYS_ADMIN))
+ /* Guard against local DoS, and discourage production use. */
+ return -EPERM;
+
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */
return -EINVAL;
@@ -373,7 +378,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab->buckets)
goto free_htab;
- htab->hashrnd = get_random_int();
+ if (htab->map.map_flags & BPF_F_ZERO_SEED)
+ htab->hashrnd = 0;
+ else
+ htab->hashrnd = get_random_int();
+
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
raw_spin_lock_init(&htab->buckets[i].lock);
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index c97a8f968638..07a34ef562a0 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -1,14 +1,15 @@
//SPDX-License-Identifier: GPL-2.0
#include <linux/bpf-cgroup.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/bug.h>
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
+#include <uapi/linux/btf.h>
-DEFINE_PER_CPU(struct bpf_cgroup_storage*,
- bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
+DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
#ifdef CONFIG_CGROUP_BPF
@@ -139,7 +140,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
return -ENOENT;
new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
- map->value_size, __GFP_ZERO | GFP_USER,
+ map->value_size,
+ __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
map->numa_node);
if (!new)
return -ENOMEM;
@@ -308,6 +310,85 @@ static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
}
+static int cgroup_storage_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ struct btf_member *m;
+ u32 offset, size;
+
+ /* Key is expected to be of struct bpf_cgroup_storage_key type,
+ * which is:
+ * struct bpf_cgroup_storage_key {
+ * __u64 cgroup_inode_id;
+ * __u32 attach_type;
+ * };
+ */
+
+ /*
+ * Key_type must be a structure with two fields.
+ */
+ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ||
+ BTF_INFO_VLEN(key_type->info) != 2)
+ return -EINVAL;
+
+ /*
+ * The first field must be a 64 bit integer at 0 offset.
+ */
+ m = (struct btf_member *)(key_type + 1);
+ size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id);
+ if (!btf_member_is_reg_int(btf, key_type, m, 0, size))
+ return -EINVAL;
+
+ /*
+ * The second field must be a 32 bit integer at 64 bit offset.
+ */
+ m++;
+ offset = offsetof(struct bpf_cgroup_storage_key, attach_type);
+ size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type);
+ if (!btf_member_is_reg_int(btf, key_type, m, offset, size))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
+ struct seq_file *m)
+{
+ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
+ struct bpf_cgroup_storage_key *key = _key;
+ struct bpf_cgroup_storage *storage;
+ int cpu;
+
+ rcu_read_lock();
+ storage = cgroup_storage_lookup(map_to_storage(map), key, false);
+ if (!storage) {
+ rcu_read_unlock();
+ return;
+ }
+
+ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
+ stype = cgroup_storage_type(map);
+ if (stype == BPF_CGROUP_STORAGE_SHARED) {
+ seq_puts(m, ": ");
+ btf_type_seq_show(map->btf, map->btf_value_type_id,
+ &READ_ONCE(storage->buf)->data[0], m);
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, ": {\n");
+ for_each_possible_cpu(cpu) {
+ seq_printf(m, "\tcpu%d: ", cpu);
+ btf_type_seq_show(map->btf, map->btf_value_type_id,
+ per_cpu_ptr(storage->percpu_buf, cpu),
+ m);
+ seq_puts(m, "\n");
+ }
+ seq_puts(m, "}\n");
+ }
+ rcu_read_unlock();
+}
+
const struct bpf_map_ops cgroup_storage_map_ops = {
.map_alloc = cgroup_storage_map_alloc,
.map_free = cgroup_storage_map_free,
@@ -315,7 +396,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
.map_lookup_elem = cgroup_storage_lookup_elem,
.map_update_elem = cgroup_storage_update_elem,
.map_delete_elem = cgroup_storage_delete_elem,
- .map_check_btf = map_check_no_btf,
+ .map_check_btf = cgroup_storage_check_btf,
+ .map_seq_show_elem = cgroup_storage_seq_show_elem,
};
int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 9058317ba9de..abf1002080df 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -168,20 +168,59 @@ static size_t longest_prefix_match(const struct lpm_trie *trie,
const struct lpm_trie_node *node,
const struct bpf_lpm_trie_key *key)
{
- size_t prefixlen = 0;
- size_t i;
+ u32 limit = min(node->prefixlen, key->prefixlen);
+ u32 prefixlen = 0, i = 0;
- for (i = 0; i < trie->data_size; i++) {
- size_t b;
+ BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32));
+ BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32));
- b = 8 - fls(node->data[i] ^ key->data[i]);
- prefixlen += b;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT)
- if (prefixlen >= node->prefixlen || prefixlen >= key->prefixlen)
- return min(node->prefixlen, key->prefixlen);
+ /* data_size >= 16 has very small probability.
+ * We do not use a loop for optimal code generation.
+ */
+ if (trie->data_size >= 8) {
+ u64 diff = be64_to_cpu(*(__be64 *)node->data ^
+ *(__be64 *)key->data);
+
+ prefixlen = 64 - fls64(diff);
+ if (prefixlen >= limit)
+ return limit;
+ if (diff)
+ return prefixlen;
+ i = 8;
+ }
+#endif
+
+ while (trie->data_size >= i + 4) {
+ u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^
+ *(__be32 *)&key->data[i]);
+
+ prefixlen += 32 - fls(diff);
+ if (prefixlen >= limit)
+ return limit;
+ if (diff)
+ return prefixlen;
+ i += 4;
+ }
- if (b < 8)
- break;
+ if (trie->data_size >= i + 2) {
+ u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^
+ *(__be16 *)&key->data[i]);
+
+ prefixlen += 16 - fls(diff);
+ if (prefixlen >= limit)
+ return limit;
+ if (diff)
+ return prefixlen;
+ i += 2;
+ }
+
+ if (trie->data_size >= i + 1) {
+ prefixlen += 8 - fls(node->data[i] ^ key->data[i]);
+
+ if (prefixlen >= limit)
+ return limit;
}
return prefixlen;
@@ -689,6 +728,7 @@ free_stack:
}
static int trie_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 8e93c47f0779..54cf2b9c44a4 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -33,6 +33,7 @@
static DECLARE_RWSEM(bpf_devs_lock);
struct bpf_offload_dev {
+ const struct bpf_prog_offload_ops *ops;
struct list_head netdevs;
};
@@ -106,6 +107,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
err = -EINVAL;
goto err_unlock;
}
+ offload->offdev = ondev->offdev;
prog->aux->offload = offload;
list_add_tail(&offload->offloads, &ondev->progs);
dev_put(offload->netdev);
@@ -121,40 +123,20 @@ err_maybe_put:
return err;
}
-static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
- struct netdev_bpf *data)
+int bpf_prog_offload_verifier_prep(struct bpf_prog *prog)
{
- struct bpf_prog_offload *offload = prog->aux->offload;
- struct net_device *netdev;
-
- ASSERT_RTNL();
-
- if (!offload)
- return -ENODEV;
- netdev = offload->netdev;
-
- data->command = cmd;
-
- return netdev->netdev_ops->ndo_bpf(netdev, data);
-}
-
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
-{
- struct netdev_bpf data = {};
- int err;
-
- data.verifier.prog = env->prog;
+ struct bpf_prog_offload *offload;
+ int ret = -ENODEV;
- rtnl_lock();
- err = __bpf_offload_ndo(env->prog, BPF_OFFLOAD_VERIFIER_PREP, &data);
- if (err)
- goto exit_unlock;
+ down_read(&bpf_devs_lock);
+ offload = prog->aux->offload;
+ if (offload) {
+ ret = offload->offdev->ops->prepare(prog);
+ offload->dev_state = !ret;
+ }
+ up_read(&bpf_devs_lock);
- env->prog->aux->offload->dev_ops = data.verifier.ops;
- env->prog->aux->offload->dev_state = true;
-exit_unlock:
- rtnl_unlock();
- return err;
+ return ret;
}
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
@@ -166,7 +148,8 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload)
- ret = offload->dev_ops->insn_hook(env, insn_idx, prev_insn_idx);
+ ret = offload->offdev->ops->insn_hook(env, insn_idx,
+ prev_insn_idx);
up_read(&bpf_devs_lock);
return ret;
@@ -180,8 +163,8 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
down_read(&bpf_devs_lock);
offload = env->prog->aux->offload;
if (offload) {
- if (offload->dev_ops->finalize)
- ret = offload->dev_ops->finalize(env);
+ if (offload->offdev->ops->finalize)
+ ret = offload->offdev->ops->finalize(env);
else
ret = 0;
}
@@ -193,12 +176,9 @@ int bpf_prog_offload_finalize(struct bpf_verifier_env *env)
static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{
struct bpf_prog_offload *offload = prog->aux->offload;
- struct netdev_bpf data = {};
-
- data.offload.prog = prog;
if (offload->dev_state)
- WARN_ON(__bpf_offload_ndo(prog, BPF_OFFLOAD_DESTROY, &data));
+ offload->offdev->ops->destroy(prog);
/* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
bpf_prog_free_id(prog, true);
@@ -210,24 +190,22 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
void bpf_prog_offload_destroy(struct bpf_prog *prog)
{
- rtnl_lock();
down_write(&bpf_devs_lock);
if (prog->aux->offload)
__bpf_prog_offload_destroy(prog);
up_write(&bpf_devs_lock);
- rtnl_unlock();
}
static int bpf_prog_offload_translate(struct bpf_prog *prog)
{
- struct netdev_bpf data = {};
- int ret;
-
- data.offload.prog = prog;
+ struct bpf_prog_offload *offload;
+ int ret = -ENODEV;
- rtnl_lock();
- ret = __bpf_offload_ndo(prog, BPF_OFFLOAD_TRANSLATE, &data);
- rtnl_unlock();
+ down_read(&bpf_devs_lock);
+ offload = prog->aux->offload;
+ if (offload)
+ ret = offload->offdev->ops->translate(prog);
+ up_read(&bpf_devs_lock);
return ret;
}
@@ -655,7 +633,8 @@ unlock:
}
EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister);
-struct bpf_offload_dev *bpf_offload_dev_create(void)
+struct bpf_offload_dev *
+bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops)
{
struct bpf_offload_dev *offdev;
int err;
@@ -673,6 +652,7 @@ struct bpf_offload_dev *bpf_offload_dev_create(void)
if (!offdev)
return ERR_PTR(-ENOMEM);
+ offdev->ops = ops;
INIT_LIST_HEAD(&offdev->netdevs);
return offdev;
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 8bbd72d3a121..b384ea9f3254 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -7,6 +7,7 @@
#include <linux/bpf.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/capability.h>
#include "percpu_freelist.h"
#define QUEUE_STACK_CREATE_FLAG_MASK \
@@ -45,8 +46,12 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
/* Called from syscall */
static int queue_stack_map_alloc_check(union bpf_attr *attr)
{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 0 ||
+ attr->value_size == 0 ||
attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
return -EINVAL;
@@ -63,15 +68,10 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
{
int ret, numa_node = bpf_map_attr_numa_node(attr);
struct bpf_queue_stack *qs;
- u32 size, value_size;
- u64 queue_size, cost;
-
- size = attr->max_entries + 1;
- value_size = attr->value_size;
-
- queue_size = sizeof(*qs) + (u64) value_size * size;
+ u64 size, queue_size, cost;
- cost = queue_size;
+ size = (u64) attr->max_entries + 1;
+ cost = queue_size = sizeof(*qs) + size * attr->value_size;
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cf5040fd5434..0607db304def 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -456,6 +456,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
}
int map_check_no_btf(const struct bpf_map *map,
+ const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type)
{
@@ -478,7 +479,7 @@ static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
return -EINVAL;
if (map->ops->map_check_btf)
- ret = map->ops->map_check_btf(map, key_type, value_type);
+ ret = map->ops->map_check_btf(map, btf, key_type, value_type);
return ret;
}
@@ -1213,6 +1214,9 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
bpf_prog_kallsyms_del_all(prog);
+ btf_put(prog->aux->btf);
+ kvfree(prog->aux->func_info);
+ bpf_prog_free_linfo(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
@@ -1437,9 +1441,9 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
}
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
+#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt
-static int bpf_prog_load(union bpf_attr *attr)
+static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog;
@@ -1450,9 +1454,14 @@ static int bpf_prog_load(union bpf_attr *attr)
if (CHECK_ATTR(BPF_PROG_LOAD))
return -EINVAL;
- if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
+ if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT))
return -EINVAL;
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
+ (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
+ !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
/* copy eBPF program license from user space */
if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
sizeof(license) - 1) < 0)
@@ -1464,11 +1473,6 @@ static int bpf_prog_load(union bpf_attr *attr)
if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
return -E2BIG;
-
- if (type == BPF_PROG_TYPE_KPROBE &&
- attr->kern_version != LINUX_VERSION_CODE)
- return -EINVAL;
-
if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
type != BPF_PROG_TYPE_CGROUP_SKB &&
!capable(CAP_SYS_ADMIN))
@@ -1525,7 +1529,7 @@ static int bpf_prog_load(union bpf_attr *attr)
goto free_prog;
/* run eBPF verifier */
- err = bpf_check(&prog, attr);
+ err = bpf_check(&prog, attr, uattr);
if (err < 0)
goto free_used_maps;
@@ -1553,6 +1557,9 @@ static int bpf_prog_load(union bpf_attr *attr)
return err;
free_used_maps:
+ bpf_prog_free_linfo(prog);
+ kvfree(prog->aux->func_info);
+ btf_put(prog->aux->btf);
bpf_prog_kallsyms_del_subprogs(prog);
free_used_maps(prog->aux);
free_prog:
@@ -1597,6 +1604,7 @@ static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
bpf_prog_put(raw_tp->prog);
}
+ bpf_put_raw_tracepoint(raw_tp->btp);
kfree(raw_tp);
return 0;
}
@@ -1622,13 +1630,15 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
return -EFAULT;
tp_name[sizeof(tp_name) - 1] = 0;
- btp = bpf_find_raw_tracepoint(tp_name);
+ btp = bpf_get_raw_tracepoint(tp_name);
if (!btp)
return -ENOENT;
raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
- if (!raw_tp)
- return -ENOMEM;
+ if (!raw_tp) {
+ err = -ENOMEM;
+ goto out_put_btp;
+ }
raw_tp->btp = btp;
prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
@@ -1656,6 +1666,8 @@ out_put_prog:
bpf_prog_put(prog);
out_free_tp:
kfree(raw_tp);
+out_put_btp:
+ bpf_put_raw_tracepoint(btp);
return err;
}
@@ -2020,18 +2032,42 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
insns[i + 1].imm = 0;
continue;
}
-
- if (!bpf_dump_raw_ok() &&
- imm == (unsigned long)prog->aux) {
- insns[i].imm = 0;
- insns[i + 1].imm = 0;
- continue;
- }
}
return insns;
}
+static int set_info_rec_size(struct bpf_prog_info *info)
+{
+ /*
+ * Ensure info.*_rec_size is the same as kernel expected size
+ *
+ * or
+ *
+ * Only allow zero *_rec_size if both _rec_size and _cnt are
+ * zero. In this case, the kernel will set the expected
+ * _rec_size back to the info.
+ */
+
+ if ((info->nr_func_info || info->func_info_rec_size) &&
+ info->func_info_rec_size != sizeof(struct bpf_func_info))
+ return -EINVAL;
+
+ if ((info->nr_line_info || info->line_info_rec_size) &&
+ info->line_info_rec_size != sizeof(struct bpf_line_info))
+ return -EINVAL;
+
+ if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
+ info->jited_line_info_rec_size != sizeof(__u64))
+ return -EINVAL;
+
+ info->func_info_rec_size = sizeof(struct bpf_func_info);
+ info->line_info_rec_size = sizeof(struct bpf_line_info);
+ info->jited_line_info_rec_size = sizeof(__u64);
+
+ return 0;
+}
+
static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
@@ -2074,11 +2110,18 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
return -EFAULT;
}
+ err = set_info_rec_size(&info);
+ if (err)
+ return err;
+
if (!capable(CAP_SYS_ADMIN)) {
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
info.nr_jited_func_lens = 0;
+ info.nr_func_info = 0;
+ info.nr_line_info = 0;
+ info.nr_jited_line_info = 0;
goto done;
}
@@ -2160,7 +2203,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_jited_ksyms;
info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
- if (info.nr_jited_ksyms && ulen) {
+ if (ulen) {
if (bpf_dump_raw_ok()) {
unsigned long ksym_addr;
u64 __user *user_ksyms;
@@ -2191,7 +2234,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
ulen = info.nr_jited_func_lens;
info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
- if (info.nr_jited_func_lens && ulen) {
+ if (ulen) {
if (bpf_dump_raw_ok()) {
u32 __user *user_lens;
u32 func_len, i;
@@ -2216,6 +2259,77 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
}
}
+ if (prog->aux->btf)
+ info.btf_id = btf_id(prog->aux->btf);
+
+ ulen = info.nr_func_info;
+ info.nr_func_info = prog->aux->func_info_cnt;
+ if (info.nr_func_info && ulen) {
+ char __user *user_finfo;
+
+ user_finfo = u64_to_user_ptr(info.func_info);
+ ulen = min_t(u32, info.nr_func_info, ulen);
+ if (copy_to_user(user_finfo, prog->aux->func_info,
+ info.func_info_rec_size * ulen))
+ return -EFAULT;
+ }
+
+ ulen = info.nr_line_info;
+ info.nr_line_info = prog->aux->nr_linfo;
+ if (info.nr_line_info && ulen) {
+ __u8 __user *user_linfo;
+
+ user_linfo = u64_to_user_ptr(info.line_info);
+ ulen = min_t(u32, info.nr_line_info, ulen);
+ if (copy_to_user(user_linfo, prog->aux->linfo,
+ info.line_info_rec_size * ulen))
+ return -EFAULT;
+ }
+
+ ulen = info.nr_jited_line_info;
+ if (prog->aux->jited_linfo)
+ info.nr_jited_line_info = prog->aux->nr_linfo;
+ else
+ info.nr_jited_line_info = 0;
+ if (info.nr_jited_line_info && ulen) {
+ if (bpf_dump_raw_ok()) {
+ __u64 __user *user_linfo;
+ u32 i;
+
+ user_linfo = u64_to_user_ptr(info.jited_line_info);
+ ulen = min_t(u32, info.nr_jited_line_info, ulen);
+ for (i = 0; i < ulen; i++) {
+ if (put_user((__u64)(long)prog->aux->jited_linfo[i],
+ &user_linfo[i]))
+ return -EFAULT;
+ }
+ } else {
+ info.jited_line_info = 0;
+ }
+ }
+
+ ulen = info.nr_prog_tags;
+ info.nr_prog_tags = prog->aux->func_cnt ? : 1;
+ if (ulen) {
+ __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
+ u32 i;
+
+ user_prog_tags = u64_to_user_ptr(info.prog_tags);
+ ulen = min_t(u32, info.nr_prog_tags, ulen);
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ if (copy_to_user(user_prog_tags[i],
+ prog->aux->func[i]->tag,
+ BPF_TAG_SIZE))
+ return -EFAULT;
+ }
+ } else {
+ if (copy_to_user(user_prog_tags[0],
+ prog->tag, BPF_TAG_SIZE))
+ return -EFAULT;
+ }
+ }
+
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
@@ -2501,7 +2615,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = map_get_next_key(&attr);
break;
case BPF_PROG_LOAD:
- err = bpf_prog_load(&attr);
+ err = bpf_prog_load(&attr, uattr);
break;
case BPF_OBJ_PIN:
err = bpf_obj_pin(&attr);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1971ca325fb4..71d86e3024ae 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11,10 +11,12 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
+#include <uapi/linux/btf.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/bpf_verifier.h>
#include <linux/filter.h>
#include <net/netlink.h>
@@ -24,6 +26,7 @@
#include <linux/bsearch.h>
#include <linux/sort.h>
#include <linux/perf_event.h>
+#include <linux/ctype.h>
#include "disasm.h"
@@ -175,6 +178,7 @@ struct bpf_verifier_stack_elem {
#define BPF_COMPLEXITY_LIMIT_INSNS 131072
#define BPF_COMPLEXITY_LIMIT_STACK 1024
+#define BPF_COMPLEXITY_LIMIT_STATES 64
#define BPF_MAP_PTR_UNPRIV 1UL
#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \
@@ -213,6 +217,27 @@ struct bpf_call_arg_meta {
static DEFINE_MUTEX(bpf_verifier_lock);
+static const struct bpf_line_info *
+find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
+{
+ const struct bpf_line_info *linfo;
+ const struct bpf_prog *prog;
+ u32 i, nr_linfo;
+
+ prog = env->prog;
+ nr_linfo = prog->aux->nr_linfo;
+
+ if (!nr_linfo || insn_off >= prog->len)
+ return NULL;
+
+ linfo = prog->aux->linfo;
+ for (i = 1; i < nr_linfo; i++)
+ if (insn_off < linfo[i].insn_off)
+ break;
+
+ return &linfo[i - 1];
+}
+
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
va_list args)
{
@@ -263,6 +288,42 @@ __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...)
va_end(args);
}
+static const char *ltrim(const char *s)
+{
+ while (isspace(*s))
+ s++;
+
+ return s;
+}
+
+__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env,
+ u32 insn_off,
+ const char *prefix_fmt, ...)
+{
+ const struct bpf_line_info *linfo;
+
+ if (!bpf_verifier_log_needed(&env->log))
+ return;
+
+ linfo = find_linfo(env, insn_off);
+ if (!linfo || linfo == env->prev_linfo)
+ return;
+
+ if (prefix_fmt) {
+ va_list args;
+
+ va_start(args, prefix_fmt);
+ bpf_verifier_vlog(&env->log, prefix_fmt, args);
+ va_end(args);
+ }
+
+ verbose(env, "%s\n",
+ ltrim(btf_name_by_offset(env->prog->aux->btf,
+ linfo->line_off)));
+
+ env->prev_linfo = linfo;
+}
+
static bool type_is_pkt_pointer(enum bpf_reg_type type)
{
return type == PTR_TO_PACKET ||
@@ -336,12 +397,14 @@ static char slot_type_char[] = {
static void print_liveness(struct bpf_verifier_env *env,
enum bpf_reg_liveness live)
{
- if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN))
+ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
verbose(env, "_");
if (live & REG_LIVE_READ)
verbose(env, "r");
if (live & REG_LIVE_WRITTEN)
verbose(env, "w");
+ if (live & REG_LIVE_DONE)
+ verbose(env, "D");
}
static struct bpf_func_state *func(struct bpf_verifier_env *env,
@@ -1071,6 +1134,12 @@ static int mark_reg_read(struct bpf_verifier_env *env,
/* if read wasn't screened by an earlier write ... */
if (writes && state->live & REG_LIVE_WRITTEN)
break;
+ if (parent->live & REG_LIVE_DONE) {
+ verbose(env, "verifier BUG type %s var_off %lld off %d\n",
+ reg_type_str[parent->type],
+ parent->var_off.value, parent->off);
+ return -EFAULT;
+ }
/* ... then we depend on parent's value */
parent->live |= REG_LIVE_READ;
state = parent;
@@ -1217,6 +1286,10 @@ static int check_stack_write(struct bpf_verifier_env *env,
/* regular write of data into stack destroys any spilled ptr */
state->stack[spi].spilled_ptr.type = NOT_INIT;
+ /* Mark slots as STACK_MISC if they belonged to spilled ptr. */
+ if (state->stack[spi].slot_type[0] == STACK_SPILL)
+ for (i = 0; i < BPF_REG_SIZE; i++)
+ state->stack[spi].slot_type[i] = STACK_MISC;
/* only mark the slot as written if all 8 bytes were written
* otherwise read propagation may incorrectly stop too soon
@@ -1234,6 +1307,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
register_is_null(&cur->regs[value_regno]))
type = STACK_ZERO;
+ /* Mark slots affected by this stack write. */
for (i = 0; i < size; i++)
state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
type;
@@ -1455,6 +1529,17 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
verbose(env, "R%d offset is outside of the packet\n", regno);
return err;
}
+
+ /* __check_packet_access has made sure "off + size - 1" is within u16.
+ * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff,
+ * otherwise find_good_pkt_pointers would have refused to set range info
+ * that __check_packet_access would have rejected this pkt access.
+ * Therefore, "off + reg->umax_value + size - 1" won't overflow u32.
+ */
+ env->prog->aux->max_pkt_offset =
+ max_t(u32, env->prog->aux->max_pkt_offset,
+ off + reg->umax_value + size - 1);
+
return err;
}
@@ -3570,12 +3655,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
if (BPF_SRC(insn->code) == BPF_X) {
+ struct bpf_reg_state *src_reg = regs + insn->src_reg;
+ struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
+
if (BPF_CLASS(insn->code) == BPF_ALU64) {
/* case: R1 = R2
* copy register state to dest reg
*/
- regs[insn->dst_reg] = regs[insn->src_reg];
- regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
+ *dst_reg = *src_reg;
+ dst_reg->live |= REG_LIVE_WRITTEN;
} else {
/* R1 = (u32) R2 */
if (is_pointer_value(env, insn->src_reg)) {
@@ -3583,9 +3671,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
"R%d partial copy of pointer\n",
insn->src_reg);
return -EACCES;
+ } else if (src_reg->type == SCALAR_VALUE) {
+ *dst_reg = *src_reg;
+ dst_reg->live |= REG_LIVE_WRITTEN;
+ } else {
+ mark_reg_unknown(env, regs,
+ insn->dst_reg);
}
- mark_reg_unknown(env, regs, insn->dst_reg);
- coerce_reg_to_size(&regs[insn->dst_reg], 4);
+ coerce_reg_to_size(dst_reg, 4);
}
} else {
/* case: R = imm
@@ -3636,11 +3729,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
- if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
- verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
- return -EINVAL;
- }
-
if ((opcode == BPF_LSH || opcode == BPF_RSH ||
opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@ -3751,6 +3839,85 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
}
}
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ * 1 - branch will be taken and "goto target" will be executed
+ * 0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
+ */
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+ if (__is_pointer_value(false, reg))
+ return -1;
+
+ switch (opcode) {
+ case BPF_JEQ:
+ if (tnum_is_const(reg->var_off))
+ return !!tnum_equals_const(reg->var_off, val);
+ break;
+ case BPF_JNE:
+ if (tnum_is_const(reg->var_off))
+ return !tnum_equals_const(reg->var_off, val);
+ break;
+ case BPF_JSET:
+ if ((~reg->var_off.mask & reg->var_off.value) & val)
+ return 1;
+ if (!((reg->var_off.mask | reg->var_off.value) & val))
+ return 0;
+ break;
+ case BPF_JGT:
+ if (reg->umin_value > val)
+ return 1;
+ else if (reg->umax_value <= val)
+ return 0;
+ break;
+ case BPF_JSGT:
+ if (reg->smin_value > (s64)val)
+ return 1;
+ else if (reg->smax_value < (s64)val)
+ return 0;
+ break;
+ case BPF_JLT:
+ if (reg->umax_value < val)
+ return 1;
+ else if (reg->umin_value >= val)
+ return 0;
+ break;
+ case BPF_JSLT:
+ if (reg->smax_value < (s64)val)
+ return 1;
+ else if (reg->smin_value >= (s64)val)
+ return 0;
+ break;
+ case BPF_JGE:
+ if (reg->umin_value >= val)
+ return 1;
+ else if (reg->umax_value < val)
+ return 0;
+ break;
+ case BPF_JSGE:
+ if (reg->smin_value >= (s64)val)
+ return 1;
+ else if (reg->smax_value < (s64)val)
+ return 0;
+ break;
+ case BPF_JLE:
+ if (reg->umax_value <= val)
+ return 1;
+ else if (reg->umin_value > val)
+ return 0;
+ break;
+ case BPF_JSLE:
+ if (reg->smax_value <= (s64)val)
+ return 1;
+ else if (reg->smin_value > (s64)val)
+ return 0;
+ break;
+ }
+
+ return -1;
+}
+
/* Adjusts the register min/max values in the case that the dst_reg is the
* variable register that we are working on, and src_reg is a constant or we're
* simply doing a BPF_K check.
@@ -3782,6 +3949,13 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
*/
__mark_reg_known(false_reg, val);
break;
+ case BPF_JSET:
+ false_reg->var_off = tnum_and(false_reg->var_off,
+ tnum_const(~val));
+ if (is_power_of_2(val))
+ true_reg->var_off = tnum_or(true_reg->var_off,
+ tnum_const(val));
+ break;
case BPF_JGT:
false_reg->umax_value = min(false_reg->umax_value, val);
true_reg->umin_value = max(true_reg->umin_value, val + 1);
@@ -3854,6 +4028,13 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
*/
__mark_reg_known(false_reg, val);
break;
+ case BPF_JSET:
+ false_reg->var_off = tnum_and(false_reg->var_off,
+ tnum_const(~val));
+ if (is_power_of_2(val))
+ true_reg->var_off = tnum_or(true_reg->var_off,
+ tnum_const(val));
+ break;
case BPF_JGT:
true_reg->umax_value = min(true_reg->umax_value, val - 1);
false_reg->umin_value = max(false_reg->umin_value, val);
@@ -4152,21 +4333,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
dst_reg = &regs[insn->dst_reg];
- /* detect if R == 0 where R was initialized to zero earlier */
- if (BPF_SRC(insn->code) == BPF_K &&
- (opcode == BPF_JEQ || opcode == BPF_JNE) &&
- dst_reg->type == SCALAR_VALUE &&
- tnum_is_const(dst_reg->var_off)) {
- if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
- (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
- /* if (imm == imm) goto pc+off;
- * only follow the goto, ignore fall-through
- */
+ if (BPF_SRC(insn->code) == BPF_K) {
+ int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+
+ if (pred == 1) {
+ /* only follow the goto, ignore fall-through */
*insn_idx += insn->off;
return 0;
- } else {
- /* if (imm != imm) goto pc+off;
- * only follow fall-through branch, since
+ } else if (pred == 0) {
+ /* only follow fall-through branch, since
* that's where the program will go
*/
return 0;
@@ -4477,6 +4652,7 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
return 0;
if (w < 0 || w >= env->prog->len) {
+ verbose_linfo(env, t, "%d: ", t);
verbose(env, "jump out of range from insn %d to %d\n", t, w);
return -EINVAL;
}
@@ -4494,6 +4670,8 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env)
insn_stack[cur_stack++] = w;
return 1;
} else if ((insn_state[w] & 0xF0) == DISCOVERED) {
+ verbose_linfo(env, t, "%d: ", t);
+ verbose_linfo(env, w, "%d: ", w);
verbose(env, "back-edge from insn %d to %d\n", t, w);
return -EINVAL;
} else if (insn_state[w] == EXPLORED) {
@@ -4516,10 +4694,6 @@ static int check_cfg(struct bpf_verifier_env *env)
int ret = 0;
int i, t;
- ret = check_subprogs(env);
- if (ret < 0)
- return ret;
-
insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
if (!insn_state)
return -ENOMEM;
@@ -4628,6 +4802,277 @@ err_free:
return ret;
}
+/* The minimum supported BTF func info size */
+#define MIN_BPF_FUNCINFO_SIZE 8
+#define MAX_FUNCINFO_REC_SIZE 252
+
+static int check_btf_func(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ u32 i, nfuncs, urec_size, min_size, prev_offset;
+ u32 krec_size = sizeof(struct bpf_func_info);
+ struct bpf_func_info *krecord;
+ const struct btf_type *type;
+ struct bpf_prog *prog;
+ const struct btf *btf;
+ void __user *urecord;
+ int ret = 0;
+
+ nfuncs = attr->func_info_cnt;
+ if (!nfuncs)
+ return 0;
+
+ if (nfuncs != env->subprog_cnt) {
+ verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
+ return -EINVAL;
+ }
+
+ urec_size = attr->func_info_rec_size;
+ if (urec_size < MIN_BPF_FUNCINFO_SIZE ||
+ urec_size > MAX_FUNCINFO_REC_SIZE ||
+ urec_size % sizeof(u32)) {
+ verbose(env, "invalid func info rec size %u\n", urec_size);
+ return -EINVAL;
+ }
+
+ prog = env->prog;
+ btf = prog->aux->btf;
+
+ urecord = u64_to_user_ptr(attr->func_info);
+ min_size = min_t(u32, krec_size, urec_size);
+
+ krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
+ if (!krecord)
+ return -ENOMEM;
+
+ for (i = 0; i < nfuncs; i++) {
+ ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
+ if (ret) {
+ if (ret == -E2BIG) {
+ verbose(env, "nonzero tailing record in func info");
+ /* set the size kernel expects so loader can zero
+ * out the rest of the record.
+ */
+ if (put_user(min_size, &uattr->func_info_rec_size))
+ ret = -EFAULT;
+ }
+ goto err_free;
+ }
+
+ if (copy_from_user(&krecord[i], urecord, min_size)) {
+ ret = -EFAULT;
+ goto err_free;
+ }
+
+ /* check insn_off */
+ if (i == 0) {
+ if (krecord[i].insn_off) {
+ verbose(env,
+ "nonzero insn_off %u for the first func info record",
+ krecord[i].insn_off);
+ ret = -EINVAL;
+ goto err_free;
+ }
+ } else if (krecord[i].insn_off <= prev_offset) {
+ verbose(env,
+ "same or smaller insn offset (%u) than previous func info record (%u)",
+ krecord[i].insn_off, prev_offset);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (env->subprog_info[i].start != krecord[i].insn_off) {
+ verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ /* check type_id */
+ type = btf_type_by_id(btf, krecord[i].type_id);
+ if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) {
+ verbose(env, "invalid type id %d in func info",
+ krecord[i].type_id);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ prev_offset = krecord[i].insn_off;
+ urecord += urec_size;
+ }
+
+ prog->aux->func_info = krecord;
+ prog->aux->func_info_cnt = nfuncs;
+ return 0;
+
+err_free:
+ kvfree(krecord);
+ return ret;
+}
+
+static void adjust_btf_func(struct bpf_verifier_env *env)
+{
+ int i;
+
+ if (!env->prog->aux->func_info)
+ return;
+
+ for (i = 0; i < env->subprog_cnt; i++)
+ env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
+}
+
+#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
+ sizeof(((struct bpf_line_info *)(0))->line_col))
+#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
+
+static int check_btf_line(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
+ struct bpf_subprog_info *sub;
+ struct bpf_line_info *linfo;
+ struct bpf_prog *prog;
+ const struct btf *btf;
+ void __user *ulinfo;
+ int err;
+
+ nr_linfo = attr->line_info_cnt;
+ if (!nr_linfo)
+ return 0;
+
+ rec_size = attr->line_info_rec_size;
+ if (rec_size < MIN_BPF_LINEINFO_SIZE ||
+ rec_size > MAX_LINEINFO_REC_SIZE ||
+ rec_size & (sizeof(u32) - 1))
+ return -EINVAL;
+
+ /* Need to zero it in case the userspace may
+ * pass in a smaller bpf_line_info object.
+ */
+ linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!linfo)
+ return -ENOMEM;
+
+ prog = env->prog;
+ btf = prog->aux->btf;
+
+ s = 0;
+ sub = env->subprog_info;
+ ulinfo = u64_to_user_ptr(attr->line_info);
+ expected_size = sizeof(struct bpf_line_info);
+ ncopy = min_t(u32, expected_size, rec_size);
+ for (i = 0; i < nr_linfo; i++) {
+ err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
+ if (err) {
+ if (err == -E2BIG) {
+ verbose(env, "nonzero tailing record in line_info");
+ if (put_user(expected_size,
+ &uattr->line_info_rec_size))
+ err = -EFAULT;
+ }
+ goto err_free;
+ }
+
+ if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
+ err = -EFAULT;
+ goto err_free;
+ }
+
+ /*
+ * Check insn_off to ensure
+ * 1) strictly increasing AND
+ * 2) bounded by prog->len
+ *
+ * The linfo[0].insn_off == 0 check logically falls into
+ * the later "missing bpf_line_info for func..." case
+ * because the first linfo[0].insn_off must be the
+ * first sub also and the first sub must have
+ * subprog_info[0].start == 0.
+ */
+ if ((i && linfo[i].insn_off <= prev_offset) ||
+ linfo[i].insn_off >= prog->len) {
+ verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
+ i, linfo[i].insn_off, prev_offset,
+ prog->len);
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (!prog->insnsi[linfo[i].insn_off].code) {
+ verbose(env,
+ "Invalid insn code at line_info[%u].insn_off\n",
+ i);
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (!btf_name_by_offset(btf, linfo[i].line_off) ||
+ !btf_name_by_offset(btf, linfo[i].file_name_off)) {
+ verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ if (s != env->subprog_cnt) {
+ if (linfo[i].insn_off == sub[s].start) {
+ sub[s].linfo_idx = i;
+ s++;
+ } else if (sub[s].start < linfo[i].insn_off) {
+ verbose(env, "missing bpf_line_info for func#%u\n", s);
+ err = -EINVAL;
+ goto err_free;
+ }
+ }
+
+ prev_offset = linfo[i].insn_off;
+ ulinfo += rec_size;
+ }
+
+ if (s != env->subprog_cnt) {
+ verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
+ env->subprog_cnt - s, s);
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ prog->aux->linfo = linfo;
+ prog->aux->nr_linfo = nr_linfo;
+
+ return 0;
+
+err_free:
+ kvfree(linfo);
+ return err;
+}
+
+static int check_btf_info(struct bpf_verifier_env *env,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ struct btf *btf;
+ int err;
+
+ if (!attr->func_info_cnt && !attr->line_info_cnt)
+ return 0;
+
+ btf = btf_get_by_fd(attr->prog_btf_fd);
+ if (IS_ERR(btf))
+ return PTR_ERR(btf);
+ env->prog->aux->btf = btf;
+
+ err = check_btf_func(env, attr, uattr);
+ if (err)
+ return err;
+
+ err = check_btf_line(env, attr, uattr);
+ if (err)
+ return err;
+
+ return 0;
+}
+
/* check %cur's range satisfies %old's */
static bool range_within(struct bpf_reg_state *old,
struct bpf_reg_state *cur)
@@ -4674,6 +5119,102 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
return false;
}
+static void clean_func_state(struct bpf_verifier_env *env,
+ struct bpf_func_state *st)
+{
+ enum bpf_reg_liveness live;
+ int i, j;
+
+ for (i = 0; i < BPF_REG_FP; i++) {
+ live = st->regs[i].live;
+ /* liveness must not touch this register anymore */
+ st->regs[i].live |= REG_LIVE_DONE;
+ if (!(live & REG_LIVE_READ))
+ /* since the register is unused, clear its state
+ * to make further comparison simpler
+ */
+ __mark_reg_not_init(&st->regs[i]);
+ }
+
+ for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) {
+ live = st->stack[i].spilled_ptr.live;
+ /* liveness must not touch this stack slot anymore */
+ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE;
+ if (!(live & REG_LIVE_READ)) {
+ __mark_reg_not_init(&st->stack[i].spilled_ptr);
+ for (j = 0; j < BPF_REG_SIZE; j++)
+ st->stack[i].slot_type[j] = STACK_INVALID;
+ }
+ }
+}
+
+static void clean_verifier_state(struct bpf_verifier_env *env,
+ struct bpf_verifier_state *st)
+{
+ int i;
+
+ if (st->frame[0]->regs[0].live & REG_LIVE_DONE)
+ /* all regs in this state in all frames were already marked */
+ return;
+
+ for (i = 0; i <= st->curframe; i++)
+ clean_func_state(env, st->frame[i]);
+}
+
+/* the parentage chains form a tree.
+ * the verifier states are added to state lists at given insn and
+ * pushed into state stack for future exploration.
+ * when the verifier reaches bpf_exit insn some of the verifer states
+ * stored in the state lists have their final liveness state already,
+ * but a lot of states will get revised from liveness point of view when
+ * the verifier explores other branches.
+ * Example:
+ * 1: r0 = 1
+ * 2: if r1 == 100 goto pc+1
+ * 3: r0 = 2
+ * 4: exit
+ * when the verifier reaches exit insn the register r0 in the state list of
+ * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch
+ * of insn 2 and goes exploring further. At the insn 4 it will walk the
+ * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ.
+ *
+ * Since the verifier pushes the branch states as it sees them while exploring
+ * the program the condition of walking the branch instruction for the second
+ * time means that all states below this branch were already explored and
+ * their final liveness markes are already propagated.
+ * Hence when the verifier completes the search of state list in is_state_visited()
+ * we can call this clean_live_states() function to mark all liveness states
+ * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state'
+ * will not be used.
+ * This function also clears the registers and stack for states that !READ
+ * to simplify state merging.
+ *
+ * Important note here that walking the same branch instruction in the callee
+ * doesn't meant that the states are DONE. The verifier has to compare
+ * the callsites
+ */
+static void clean_live_states(struct bpf_verifier_env *env, int insn,
+ struct bpf_verifier_state *cur)
+{
+ struct bpf_verifier_state_list *sl;
+ int i;
+
+ sl = env->explored_states[insn];
+ if (!sl)
+ return;
+
+ while (sl != STATE_LIST_MARK) {
+ if (sl->state.curframe != cur->curframe)
+ goto next;
+ for (i = 0; i <= cur->curframe; i++)
+ if (sl->state.frame[i]->callsite != cur->frame[i]->callsite)
+ goto next;
+ clean_verifier_state(env, &sl->state);
+next:
+ sl = sl->next;
+ }
+}
+
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
@@ -4787,12 +5328,6 @@ static bool stacksafe(struct bpf_func_state *old,
{
int i, spi;
- /* if explored stack has more populated slots than current stack
- * such stacks are not equivalent
- */
- if (old->allocated_stack > cur->allocated_stack)
- return false;
-
/* walk slots of the explored stack and ignore any additional
* slots in the current stack, since explored(safe) state
* didn't use them
@@ -4800,12 +5335,21 @@ static bool stacksafe(struct bpf_func_state *old,
for (i = 0; i < old->allocated_stack; i++) {
spi = i / BPF_REG_SIZE;
- if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
+ if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) {
+ i += BPF_REG_SIZE - 1;
/* explored state didn't use this */
continue;
+ }
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;
+
+ /* explored stack has more populated slots than current stack
+ * and these slots were used
+ */
+ if (i >= cur->allocated_stack)
+ return false;
+
/* if old state was safe with misc data in the stack
* it will be safe with zero-initialized stack.
* The opposite is not true
@@ -4980,7 +5524,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
struct bpf_verifier_state_list *new_sl;
struct bpf_verifier_state_list *sl;
struct bpf_verifier_state *cur = env->cur_state, *new;
- int i, j, err;
+ int i, j, err, states_cnt = 0;
sl = env->explored_states[insn_idx];
if (!sl)
@@ -4989,6 +5533,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
*/
return 0;
+ clean_live_states(env, insn_idx, cur);
+
while (sl != STATE_LIST_MARK) {
if (states_equal(env, &sl->state, cur)) {
/* reached equivalent register/stack state,
@@ -5007,8 +5553,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
return 1;
}
sl = sl->next;
+ states_cnt++;
}
+ if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+ return 0;
+
/* there were no equivalent states, remember current one.
* technically the current state is not proven to be safe yet,
* but it will either reach outer most bpf_exit (which means it's safe)
@@ -5030,9 +5580,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
}
new_sl->next = env->explored_states[insn_idx];
env->explored_states[insn_idx] = new_sl;
- /* connect new state to parentage chain */
- for (i = 0; i < BPF_REG_FP; i++)
- cur_regs(env)[i].parent = &new->frame[new->curframe]->regs[i];
+ /* connect new state to parentage chain. Current frame needs all
+ * registers connected. Only r6 - r9 of the callers are alive (pushed
+ * to the stack implicitly by JITs) so in callers' frames connect just
+ * r6 - r9 as an optimization. Callers will have r1 - r5 connected to
+ * the state of the call instruction (with WRITTEN set), and r0 comes
+ * from callee with its full parentage chain, anyway.
+ */
+ for (j = 0; j <= cur->curframe; j++)
+ for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++)
+ cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i];
/* clear write marks in current state: the writes we did are not writes
* our child did, so they don't screen off its reads from us.
* (There are no read marks in current state, because reads always mark
@@ -5097,6 +5654,8 @@ static int do_check(struct bpf_verifier_env *env)
int insn_processed = 0;
bool do_print_state = false;
+ env->prev_linfo = NULL;
+
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
if (!state)
return -ENOMEM;
@@ -5148,6 +5707,9 @@ static int do_check(struct bpf_verifier_env *env)
goto process_bpf_exit;
}
+ if (signal_pending(current))
+ return -EAGAIN;
+
if (need_resched())
cond_resched();
@@ -5167,6 +5729,7 @@ static int do_check(struct bpf_verifier_env *env)
.private_data = env,
};
+ verbose_linfo(env, insn_idx, "; ");
verbose(env, "%d: ", insn_idx);
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
}
@@ -5650,7 +6213,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
return;
/* NOTE: fake 'exit' subprog should be updated as well. */
for (i = 0; i <= env->subprog_cnt; i++) {
- if (env->subprog_info[i].start < off)
+ if (env->subprog_info[i].start <= off)
continue;
env->subprog_info[i].start += len - 1;
}
@@ -5707,10 +6270,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
int i, cnt, size, ctx_field_size, delta = 0;
const int insn_cnt = env->prog->len;
struct bpf_insn insn_buf[16], *insn;
+ u32 target_size, size_default, off;
struct bpf_prog *new_prog;
enum bpf_access_type type;
bool is_narrower_load;
- u32 target_size;
if (ops->gen_prologue || env->seen_direct_write) {
if (!ops->gen_prologue) {
@@ -5803,9 +6366,9 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
* we will apply proper mask to the result.
*/
is_narrower_load = size < ctx_field_size;
+ size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
+ off = insn->off;
if (is_narrower_load) {
- u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size);
- u32 off = insn->off;
u8 size_code;
if (type == BPF_WRITE) {
@@ -5833,12 +6396,23 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
if (is_narrower_load && size < target_size) {
- if (ctx_field_size <= 4)
+ u8 shift = (off & (size_default - 1)) * 8;
+
+ if (ctx_field_size <= 4) {
+ if (shift)
+ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
+ insn->dst_reg,
+ shift);
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
- else
+ } else {
+ if (shift)
+ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
+ insn->dst_reg,
+ shift);
insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
(1 << size * 8) - 1);
+ }
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
@@ -5861,7 +6435,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
int i, j, subprog_start, subprog_end = 0, len, subprog;
struct bpf_insn *insn;
void *old_bpf_func;
- int err = -ENOMEM;
+ int err;
if (env->subprog_cnt <= 1)
return 0;
@@ -5892,6 +6466,11 @@ static int jit_subprogs(struct bpf_verifier_env *env)
insn->imm = 1;
}
+ err = bpf_prog_alloc_jited_linfo(prog);
+ if (err)
+ goto out_undo_insn;
+
+ err = -ENOMEM;
func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
goto out_undo_insn;
@@ -5911,12 +6490,21 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (bpf_prog_calc_tag(func[i]))
goto out_free;
func[i]->is_func = 1;
+ func[i]->aux->func_idx = i;
+ /* the btf and func_info will be freed only at prog->aux */
+ func[i]->aux->btf = prog->aux->btf;
+ func[i]->aux->func_info = prog->aux->func_info;
+
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
*/
func[i]->aux->name[0] = 'F';
func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
func[i]->jit_requested = 1;
+ func[i]->aux->linfo = prog->aux->linfo;
+ func[i]->aux->nr_linfo = prog->aux->nr_linfo;
+ func[i]->aux->jited_linfo = prog->aux->jited_linfo;
+ func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
@@ -5990,6 +6578,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
prog->bpf_func = func[0]->bpf_func;
prog->aux->func = func;
prog->aux->func_cnt = env->subprog_cnt;
+ bpf_prog_free_unused_jited_linfo(prog);
return 0;
out_free:
for (i = 0; i < env->subprog_cnt; i++)
@@ -6006,6 +6595,7 @@ out_undo_insn:
insn->off = 0;
insn->imm = env->insn_aux_data[i].call_imm;
}
+ bpf_prog_free_jited_linfo(prog);
return err;
}
@@ -6138,6 +6728,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
*/
prog->cb_access = 1;
env->prog->aux->stack_depth = MAX_BPF_STACK;
+ env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
@@ -6302,7 +6893,8 @@ static void free_states(struct bpf_verifier_env *env)
kfree(env->explored_states);
}
-int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
+int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
+ union bpf_attr __user *uattr)
{
struct bpf_verifier_env *env;
struct bpf_verifier_log *log;
@@ -6350,13 +6942,15 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
env->strict_alignment = true;
+ if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
+ env->strict_alignment = false;
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
if (bpf_prog_is_dev_bound(env->prog->aux)) {
- ret = bpf_prog_offload_verifier_prep(env);
+ ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret)
goto skip_full_check;
}
@@ -6370,6 +6964,14 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
env->allow_ptr_leaks = capable(CAP_SYS_ADMIN);
+ ret = check_subprogs(env);
+ if (ret < 0)
+ goto skip_full_check;
+
+ ret = check_btf_info(env, attr, uattr);
+ if (ret < 0)
+ goto skip_full_check;
+
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
@@ -6388,10 +6990,11 @@ skip_full_check:
free_states(env);
if (ret == 0)
- sanitize_dead_code(env);
+ ret = check_max_stack_depth(env);
+ /* instruction rewrites happen after this point */
if (ret == 0)
- ret = check_max_stack_depth(env);
+ sanitize_dead_code(env);
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
@@ -6431,6 +7034,9 @@ skip_full_check:
convert_pseudo_ld_imm64(env);
}
+ if (ret == 0)
+ adjust_btf_func(env);
+
err_release_maps:
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 6aaf5dd5383b..39eb36ba36ad 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -493,7 +493,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
}
/**
- * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
@@ -502,8 +502,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
-static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
+static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
{
lockdep_assert_held(&cgroup_mutex);
@@ -524,6 +524,35 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
}
/**
+ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get the effective css of @cgrp for @ss. The effective css is
+ * defined as the matching css of the nearest ancestor including self which
+ * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
+ * the root css is returned, so this function always returns a valid css.
+ *
+ * The returned css is not guaranteed to be online, and therefore it is the
+ * callers responsiblity to tryget a reference for it.
+ */
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ do {
+ css = cgroup_css(cgrp, ss);
+
+ if (css)
+ return css;
+ cgrp = cgroup_parent(cgrp);
+ } while (cgrp);
+
+ return init_css_set.subsys[ss->id];
+}
+
+/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
@@ -605,10 +634,11 @@ EXPORT_SYMBOL_GPL(of_css);
*
* Should be called under cgroup_[tree_]mutex.
*/
-#define for_each_e_css(css, ssid, cgrp) \
- for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
- if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
- ; \
+#define for_each_e_css(css, ssid, cgrp) \
+ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
+ if (!((css) = cgroup_e_css_by_mask(cgrp, \
+ cgroup_subsys[(ssid)]))) \
+ ; \
else
/**
@@ -1007,7 +1037,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
- template[i] = cgroup_e_css(cgrp, ss);
+ template[i] = cgroup_e_css_by_mask(cgrp, ss);
} else {
/*
* @ss is not in this hierarchy, so we don't want
@@ -3024,7 +3054,7 @@ static int cgroup_apply_control(struct cgroup *cgrp)
return ret;
/*
- * At this point, cgroup_e_css() results reflect the new csses
+ * At this point, cgroup_e_css_by_mask() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/
@@ -5343,7 +5373,7 @@ int __init cgroup_init(void)
cgroup_rstat_boot();
/*
- * The latency of the synchronize_sched() is too high for cgroups,
+ * The latency of the synchronize_rcu() is too high for cgroups,
* avoid it at the cost of forcing all readers into the slow path.
*/
rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3c7f3b4c453c..91d5c38eb7e5 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,6 +10,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task.h>
+#include <linux/sched/smt.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/oom.h>
@@ -367,6 +368,12 @@ static void lockdep_release_cpus_lock(void)
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { }
+
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -1011,6 +1018,7 @@ out:
* concurrent CPU hotplug via cpu_add_remove_lock.
*/
lockup_detector_cleanup();
+ arch_smt_update();
return ret;
}
@@ -1139,6 +1147,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
ret = cpuhp_up_callbacks(cpu, st, target);
out:
cpus_write_unlock();
+ arch_smt_update();
return ret;
}
@@ -2055,12 +2064,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
-/*
- * Architectures that need SMT-specific errata handling during SMT hotplug
- * should override this.
- */
-void __weak arch_smt_update(void) { };
-
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 79da61b49fa4..355d16acee6d 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -373,5 +373,10 @@ int dma_direct_supported(struct device *dev, u64 mask)
min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
- return mask >= phys_to_dma(dev, min_mask);
+ /*
+ * This check needs to be against the actual bit mask value, so
+ * use __phys_to_dma() here so that the SME encryption mask isn't
+ * part of the check.
+ */
+ return mask >= __phys_to_dma(dev, min_mask);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 84530ab358c3..67ecac337374 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5541,7 +5541,7 @@ out_put:
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
- .close = perf_mmap_close, /* non mergable */
+ .close = perf_mmap_close, /* non mergeable */
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
@@ -9918,7 +9918,7 @@ static void account_event(struct perf_event *event)
* call the perf scheduling hooks before proceeding to
* install events that need them.
*/
- synchronize_sched();
+ synchronize_rcu();
}
/*
* Now that we have waited for the sync_sched(), allow further
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index d6b56180827c..5befb338a18d 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -238,7 +238,7 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
}
/*
- * Contraints to check before allowing this new breakpoint counter:
+ * Constraints to check before allowing this new breakpoint counter:
*
* == Non-pinned counter == (Considered as pinned for now)
*
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 96d4bee83489..abbd8da9ac21 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -572,7 +572,9 @@ static void put_uprobe(struct uprobe *uprobe)
* gets called, we don't get a chance to remove uprobe from
* delayed_uprobe_list from remove_breakpoint(). Do it here.
*/
+ mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(uprobe, NULL);
+ mutex_unlock(&delayed_uprobe_lock);
kfree(uprobe);
}
}
@@ -829,7 +831,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
BUG_ON((uprobe->offset & ~PAGE_MASK) +
UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
- smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
+ smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
out:
@@ -2178,10 +2180,18 @@ static void handle_swbp(struct pt_regs *regs)
* After we hit the bp, _unregister + _register can install the
* new and not-yet-analyzed uprobe at the same address, restart.
*/
- smp_rmb(); /* pairs with wmb() in install_breakpoint() */
if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
goto out;
+ /*
+ * Pairs with the smp_wmb() in prepare_uprobe().
+ *
+ * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
+ * we must also see the stores to &uprobe->arch performed by the
+ * prepare_uprobe() call.
+ */
+ smp_rmb();
+
/* Tracing handlers use ->utask to communicate with fetch methods */
if (!get_utask())
goto out;
diff --git a/kernel/fork.c b/kernel/fork.c
index 07cddff89c7b..e2a5156bc9c3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -240,8 +240,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
* free_thread_stack() can be called in interrupt context,
* so cache the vm_struct.
*/
- if (stack)
+ if (stack) {
tsk->stack_vm_area = find_vm_area(stack);
+ tsk->stack = stack;
+ }
return stack;
#else
struct page *page = alloc_pages_node(node, THREADINFO_GFP,
@@ -288,7 +290,10 @@ static struct kmem_cache *thread_stack_cache;
static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
- return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
+ unsigned long *stack;
+ stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
+ tsk->stack = stack;
+ return stack;
}
static void free_thread_stack(struct task_struct *tsk)
diff --git a/kernel/futex.c b/kernel/futex.c
index f423f9b6577e..054105854e0e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -44,6 +44,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fs.h>
@@ -173,8 +174,10 @@
* double_lock_hb() and double_unlock_hb(), respectively.
*/
-#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
-int __read_mostly futex_cmpxchg_enabled;
+#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
+#define futex_cmpxchg_enabled 1
+#else
+static int __read_mostly futex_cmpxchg_enabled;
#endif
/*
@@ -1148,11 +1151,65 @@ out_error:
return ret;
}
+static int handle_exit_race(u32 __user *uaddr, u32 uval,
+ struct task_struct *tsk)
+{
+ u32 uval2;
+
+ /*
+ * If PF_EXITPIDONE is not yet set, then try again.
+ */
+ if (tsk && !(tsk->flags & PF_EXITPIDONE))
+ return -EAGAIN;
+
+ /*
+ * Reread the user space value to handle the following situation:
+ *
+ * CPU0 CPU1
+ *
+ * sys_exit() sys_futex()
+ * do_exit() futex_lock_pi()
+ * futex_lock_pi_atomic()
+ * exit_signals(tsk) No waiters:
+ * tsk->flags |= PF_EXITING; *uaddr == 0x00000PID
+ * mm_release(tsk) Set waiter bit
+ * exit_robust_list(tsk) { *uaddr = 0x80000PID;
+ * Set owner died attach_to_pi_owner() {
+ * *uaddr = 0xC0000000; tsk = get_task(PID);
+ * } if (!tsk->flags & PF_EXITING) {
+ * ... attach();
+ * tsk->flags |= PF_EXITPIDONE; } else {
+ * if (!(tsk->flags & PF_EXITPIDONE))
+ * return -EAGAIN;
+ * return -ESRCH; <--- FAIL
+ * }
+ *
+ * Returning ESRCH unconditionally is wrong here because the
+ * user space value has been changed by the exiting task.
+ *
+ * The same logic applies to the case where the exiting task is
+ * already gone.
+ */
+ if (get_futex_value_locked(&uval2, uaddr))
+ return -EFAULT;
+
+ /* If the user space value has changed, try again. */
+ if (uval2 != uval)
+ return -EAGAIN;
+
+ /*
+ * The exiting task did not have a robust list, the robust list was
+ * corrupted or the user space value in *uaddr is simply bogus.
+ * Give up and tell user space.
+ */
+ return -ESRCH;
+}
+
/*
* Lookup the task for the TID provided from user space and attach to
* it after doing proper sanity checks.
*/
-static int attach_to_pi_owner(u32 uval, union futex_key *key,
+static int attach_to_pi_owner(u32 __user *uaddr, u32 uval, union futex_key *key,
struct futex_pi_state **ps)
{
pid_t pid = uval & FUTEX_TID_MASK;
@@ -1162,12 +1219,15 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
/*
* We are the first waiter - try to look up the real owner and attach
* the new pi_state to it, but bail out when TID = 0 [1]
+ *
+ * The !pid check is paranoid. None of the call sites should end up
+ * with pid == 0, but better safe than sorry. Let the caller retry
*/
if (!pid)
- return -ESRCH;
+ return -EAGAIN;
p = find_get_task_by_vpid(pid);
if (!p)
- return -ESRCH;
+ return handle_exit_race(uaddr, uval, NULL);
if (unlikely(p->flags & PF_KTHREAD)) {
put_task_struct(p);
@@ -1187,7 +1247,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
* set, we know that the task has finished the
* cleanup:
*/
- int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
+ int ret = handle_exit_race(uaddr, uval, p);
raw_spin_unlock_irq(&p->pi_lock);
put_task_struct(p);
@@ -1244,7 +1304,7 @@ static int lookup_pi_state(u32 __user *uaddr, u32 uval,
* We are the first waiter - try to look up the owner based on
* @uval and attach to it.
*/
- return attach_to_pi_owner(uval, key, ps);
+ return attach_to_pi_owner(uaddr, uval, key, ps);
}
static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
@@ -1352,7 +1412,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* attach to the owner. If that fails, no harm done, we only
* set the FUTEX_WAITERS bit in the user space variable.
*/
- return attach_to_pi_owner(uval, key, ps);
+ return attach_to_pi_owner(uaddr, newval, key, ps);
}
/**
@@ -3360,7 +3420,7 @@ err_unlock:
* Process a futex-list entry, check whether it's owned by the
* dying task, and do notification if so:
*/
-int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
+static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
@@ -3555,10 +3615,10 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
- struct timespec __user *, utime, u32 __user *, uaddr2,
+ struct __kernel_timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
- struct timespec ts;
+ struct timespec64 ts;
ktime_t t, *tp = NULL;
u32 val2 = 0;
int cmd = op & FUTEX_CMD_MASK;
@@ -3568,12 +3628,12 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
cmd == FUTEX_WAIT_REQUEUE_PI)) {
if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG))))
return -EFAULT;
- if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
+ if (get_timespec64(&ts, utime))
return -EFAULT;
- if (!timespec_valid(&ts))
+ if (!timespec64_valid(&ts))
return -EINVAL;
- t = timespec_to_ktime(ts);
+ t = timespec64_to_ktime(ts);
if (cmd == FUTEX_WAIT)
t = ktime_add_safe(ktime_get(), t);
tp = &t;
@@ -3589,6 +3649,194 @@ SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
}
+#ifdef CONFIG_COMPAT
+/*
+ * Fetch a robust-list pointer. Bit 0 signals PI futexes:
+ */
+static inline int
+compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
+ compat_uptr_t __user *head, unsigned int *pi)
+{
+ if (get_user(*uentry, head))
+ return -EFAULT;
+
+ *entry = compat_ptr((*uentry) & ~1);
+ *pi = (unsigned int)(*uentry) & 1;
+
+ return 0;
+}
+
+static void __user *futex_uaddr(struct robust_list __user *entry,
+ compat_long_t futex_offset)
+{
+ compat_uptr_t base = ptr_to_compat(entry);
+ void __user *uaddr = compat_ptr(base + futex_offset);
+
+ return uaddr;
+}
+
+/*
+ * Walk curr->robust_list (very carefully, it's a userspace list!)
+ * and mark any locks found there dead, and notify any waiters.
+ *
+ * We silently return on any sign of list-walking problem.
+ */
+void compat_exit_robust_list(struct task_struct *curr)
+{
+ struct compat_robust_list_head __user *head = curr->compat_robust_list;
+ struct robust_list __user *entry, *next_entry, *pending;
+ unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+ unsigned int uninitialized_var(next_pi);
+ compat_uptr_t uentry, next_uentry, upending;
+ compat_long_t futex_offset;
+ int rc;
+
+ if (!futex_cmpxchg_enabled)
+ return;
+
+ /*
+ * Fetch the list head (which was registered earlier, via
+ * sys_set_robust_list()):
+ */
+ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
+ return;
+ /*
+ * Fetch the relative futex offset:
+ */
+ if (get_user(futex_offset, &head->futex_offset))
+ return;
+ /*
+ * Fetch any possibly pending lock-add first, and handle it
+ * if it exists:
+ */
+ if (compat_fetch_robust_entry(&upending, &pending,
+ &head->list_op_pending, &pip))
+ return;
+
+ next_entry = NULL; /* avoid warning with gcc */
+ while (entry != (struct robust_list __user *) &head->list) {
+ /*
+ * Fetch the next entry in the list before calling
+ * handle_futex_death:
+ */
+ rc = compat_fetch_robust_entry(&next_uentry, &next_entry,
+ (compat_uptr_t __user *)&entry->next, &next_pi);
+ /*
+ * A pending lock might already be on the list, so
+ * dont process it twice:
+ */
+ if (entry != pending) {
+ void __user *uaddr = futex_uaddr(entry, futex_offset);
+
+ if (handle_futex_death(uaddr, curr, pi))
+ return;
+ }
+ if (rc)
+ return;
+ uentry = next_uentry;
+ entry = next_entry;
+ pi = next_pi;
+ /*
+ * Avoid excessively long or circular lists:
+ */
+ if (!--limit)
+ break;
+
+ cond_resched();
+ }
+ if (pending) {
+ void __user *uaddr = futex_uaddr(pending, futex_offset);
+
+ handle_futex_death(uaddr, curr, pip);
+ }
+}
+
+COMPAT_SYSCALL_DEFINE2(set_robust_list,
+ struct compat_robust_list_head __user *, head,
+ compat_size_t, len)
+{
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
+ if (unlikely(len != sizeof(*head)))
+ return -EINVAL;
+
+ current->compat_robust_list = head;
+
+ return 0;
+}
+
+COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
+ compat_uptr_t __user *, head_ptr,
+ compat_size_t __user *, len_ptr)
+{
+ struct compat_robust_list_head __user *head;
+ unsigned long ret;
+ struct task_struct *p;
+
+ if (!futex_cmpxchg_enabled)
+ return -ENOSYS;
+
+ rcu_read_lock();
+
+ ret = -ESRCH;
+ if (!pid)
+ p = current;
+ else {
+ p = find_task_by_vpid(pid);
+ if (!p)
+ goto err_unlock;
+ }
+
+ ret = -EPERM;
+ if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+ goto err_unlock;
+
+ head = p->compat_robust_list;
+ rcu_read_unlock();
+
+ if (put_user(sizeof(*head), len_ptr))
+ return -EFAULT;
+ return put_user(ptr_to_compat(head), head_ptr);
+
+err_unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
+ struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
+ u32, val3)
+{
+ struct timespec64 ts;
+ ktime_t t, *tp = NULL;
+ int val2 = 0;
+ int cmd = op & FUTEX_CMD_MASK;
+
+ if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
+ cmd == FUTEX_WAIT_BITSET ||
+ cmd == FUTEX_WAIT_REQUEUE_PI)) {
+ if (get_old_timespec32(&ts, utime))
+ return -EFAULT;
+ if (!timespec64_valid(&ts))
+ return -EINVAL;
+
+ t = timespec64_to_ktime(ts);
+ if (cmd == FUTEX_WAIT)
+ t = ktime_add_safe(ktime_get(), t);
+ tp = &t;
+ }
+ if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
+ cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
+ val2 = (int) (unsigned long) utime;
+
+ return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
+}
+#endif /* CONFIG_COMPAT_32BIT_TIME */
+
static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
deleted file mode 100644
index 410a77a8f6e2..000000000000
--- a/kernel/futex_compat.c
+++ /dev/null
@@ -1,202 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/kernel/futex_compat.c
- *
- * Futex compatibililty routines.
- *
- * Copyright 2006, Red Hat, Inc., Ingo Molnar
- */
-
-#include <linux/linkage.h>
-#include <linux/compat.h>
-#include <linux/nsproxy.h>
-#include <linux/futex.h>
-#include <linux/ptrace.h>
-#include <linux/syscalls.h>
-
-#include <linux/uaccess.h>
-
-
-/*
- * Fetch a robust-list pointer. Bit 0 signals PI futexes:
- */
-static inline int
-fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
- compat_uptr_t __user *head, unsigned int *pi)
-{
- if (get_user(*uentry, head))
- return -EFAULT;
-
- *entry = compat_ptr((*uentry) & ~1);
- *pi = (unsigned int)(*uentry) & 1;
-
- return 0;
-}
-
-static void __user *futex_uaddr(struct robust_list __user *entry,
- compat_long_t futex_offset)
-{
- compat_uptr_t base = ptr_to_compat(entry);
- void __user *uaddr = compat_ptr(base + futex_offset);
-
- return uaddr;
-}
-
-/*
- * Walk curr->robust_list (very carefully, it's a userspace list!)
- * and mark any locks found there dead, and notify any waiters.
- *
- * We silently return on any sign of list-walking problem.
- */
-void compat_exit_robust_list(struct task_struct *curr)
-{
- struct compat_robust_list_head __user *head = curr->compat_robust_list;
- struct robust_list __user *entry, *next_entry, *pending;
- unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
- unsigned int uninitialized_var(next_pi);
- compat_uptr_t uentry, next_uentry, upending;
- compat_long_t futex_offset;
- int rc;
-
- if (!futex_cmpxchg_enabled)
- return;
-
- /*
- * Fetch the list head (which was registered earlier, via
- * sys_set_robust_list()):
- */
- if (fetch_robust_entry(&uentry, &entry, &head->list.next, &pi))
- return;
- /*
- * Fetch the relative futex offset:
- */
- if (get_user(futex_offset, &head->futex_offset))
- return;
- /*
- * Fetch any possibly pending lock-add first, and handle it
- * if it exists:
- */
- if (fetch_robust_entry(&upending, &pending,
- &head->list_op_pending, &pip))
- return;
-
- next_entry = NULL; /* avoid warning with gcc */
- while (entry != (struct robust_list __user *) &head->list) {
- /*
- * Fetch the next entry in the list before calling
- * handle_futex_death:
- */
- rc = fetch_robust_entry(&next_uentry, &next_entry,
- (compat_uptr_t __user *)&entry->next, &next_pi);
- /*
- * A pending lock might already be on the list, so
- * dont process it twice:
- */
- if (entry != pending) {
- void __user *uaddr = futex_uaddr(entry, futex_offset);
-
- if (handle_futex_death(uaddr, curr, pi))
- return;
- }
- if (rc)
- return;
- uentry = next_uentry;
- entry = next_entry;
- pi = next_pi;
- /*
- * Avoid excessively long or circular lists:
- */
- if (!--limit)
- break;
-
- cond_resched();
- }
- if (pending) {
- void __user *uaddr = futex_uaddr(pending, futex_offset);
-
- handle_futex_death(uaddr, curr, pip);
- }
-}
-
-COMPAT_SYSCALL_DEFINE2(set_robust_list,
- struct compat_robust_list_head __user *, head,
- compat_size_t, len)
-{
- if (!futex_cmpxchg_enabled)
- return -ENOSYS;
-
- if (unlikely(len != sizeof(*head)))
- return -EINVAL;
-
- current->compat_robust_list = head;
-
- return 0;
-}
-
-COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
- compat_uptr_t __user *, head_ptr,
- compat_size_t __user *, len_ptr)
-{
- struct compat_robust_list_head __user *head;
- unsigned long ret;
- struct task_struct *p;
-
- if (!futex_cmpxchg_enabled)
- return -ENOSYS;
-
- rcu_read_lock();
-
- ret = -ESRCH;
- if (!pid)
- p = current;
- else {
- p = find_task_by_vpid(pid);
- if (!p)
- goto err_unlock;
- }
-
- ret = -EPERM;
- if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
- goto err_unlock;
-
- head = p->compat_robust_list;
- rcu_read_unlock();
-
- if (put_user(sizeof(*head), len_ptr))
- return -EFAULT;
- return put_user(ptr_to_compat(head), head_ptr);
-
-err_unlock:
- rcu_read_unlock();
-
- return ret;
-}
-
-COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
- struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
- u32, val3)
-{
- struct timespec ts;
- ktime_t t, *tp = NULL;
- int val2 = 0;
- int cmd = op & FUTEX_CMD_MASK;
-
- if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
- cmd == FUTEX_WAIT_BITSET ||
- cmd == FUTEX_WAIT_REQUEUE_PI)) {
- if (compat_get_timespec(&ts, utime))
- return -EFAULT;
- if (!timespec_valid(&ts))
- return -EINVAL;
-
- t = timespec_to_ktime(ts);
- if (cmd == FUTEX_WAIT)
- t = ktime_add_safe(ktime_get(), t);
- tp = &t;
- }
- if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
- cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
- val2 = (int) (unsigned long) utime;
-
- return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
-}
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f4f29b9d90ee..45b68b4ea48b 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -94,15 +94,15 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
return nodes;
}
-static int irq_build_affinity_masks(const struct irq_affinity *affd,
- int startvec, int numvecs,
- cpumask_var_t *node_to_cpumask,
- const struct cpumask *cpu_mask,
- struct cpumask *nmsk,
- struct cpumask *masks)
+static int __irq_build_affinity_masks(const struct irq_affinity *affd,
+ int startvec, int numvecs, int firstvec,
+ cpumask_var_t *node_to_cpumask,
+ const struct cpumask *cpu_mask,
+ struct cpumask *nmsk,
+ struct irq_affinity_desc *masks)
{
int n, nodes, cpus_per_vec, extra_vecs, done = 0;
- int last_affv = affd->pre_vectors + numvecs;
+ int last_affv = firstvec + numvecs;
int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE;
@@ -117,12 +117,13 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_copy(masks + curvec, node_to_cpumask[n]);
- if (++done == numvecs)
- break;
+ cpumask_or(&masks[curvec].mask,
+ &masks[curvec].mask,
+ node_to_cpumask[n]);
if (++curvec == last_affv)
- curvec = affd->pre_vectors;
+ curvec = firstvec;
}
+ done = numvecs;
goto out;
}
@@ -130,7 +131,7 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */
- vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes;
+ vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
/* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
@@ -151,14 +152,15 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
cpus_per_vec++;
--extra_vecs;
}
- irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
+ irq_spread_init_one(&masks[curvec].mask, nmsk,
+ cpus_per_vec);
}
done += v;
if (done >= numvecs)
break;
if (curvec >= last_affv)
- curvec = affd->pre_vectors;
+ curvec = firstvec;
--nodes;
}
@@ -166,20 +168,77 @@ out:
return done;
}
+/*
+ * build affinity in two stages:
+ * 1) spread present CPU on these vectors
+ * 2) spread other possible CPUs on these vectors
+ */
+static int irq_build_affinity_masks(const struct irq_affinity *affd,
+ int startvec, int numvecs, int firstvec,
+ cpumask_var_t *node_to_cpumask,
+ struct irq_affinity_desc *masks)
+{
+ int curvec = startvec, nr_present, nr_others;
+ int ret = -ENOMEM;
+ cpumask_var_t nmsk, npresmsk;
+
+ if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
+ return ret;
+
+ if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
+ goto fail;
+
+ ret = 0;
+ /* Stabilize the cpumasks */
+ get_online_cpus();
+ build_node_to_cpumask(node_to_cpumask);
+
+ /* Spread on present CPUs starting from affd->pre_vectors */
+ nr_present = __irq_build_affinity_masks(affd, curvec, numvecs,
+ firstvec, node_to_cpumask,
+ cpu_present_mask, nmsk, masks);
+
+ /*
+ * Spread on non present CPUs starting from the next vector to be
+ * handled. If the spreading of present CPUs already exhausted the
+ * vector space, assign the non present CPUs to the already spread
+ * out vectors.
+ */
+ if (nr_present >= numvecs)
+ curvec = firstvec;
+ else
+ curvec = firstvec + nr_present;
+ cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
+ nr_others = __irq_build_affinity_masks(affd, curvec, numvecs,
+ firstvec, node_to_cpumask,
+ npresmsk, nmsk, masks);
+ put_online_cpus();
+
+ if (nr_present < numvecs)
+ WARN_ON(nr_present + nr_others < numvecs);
+
+ free_cpumask_var(npresmsk);
+
+ fail:
+ free_cpumask_var(nmsk);
+ return ret;
+}
+
/**
* irq_create_affinity_masks - Create affinity masks for multiqueue spreading
* @nvecs: The total number of vectors
* @affd: Description of the affinity requirements
*
- * Returns the masks pointer or NULL if allocation failed.
+ * Returns the irq_affinity_desc pointer or NULL if allocation failed.
*/
-struct cpumask *
+struct irq_affinity_desc *
irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
{
int affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
int curvec, usedvecs;
- cpumask_var_t nmsk, npresmsk, *node_to_cpumask;
- struct cpumask *masks = NULL;
+ cpumask_var_t *node_to_cpumask;
+ struct irq_affinity_desc *masks = NULL;
+ int i, nr_sets;
/*
* If there aren't any vectors left after applying the pre/post
@@ -188,15 +247,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (nvecs == affd->pre_vectors + affd->post_vectors)
return NULL;
- if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
- return NULL;
-
- if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
- goto outcpumsk;
-
node_to_cpumask = alloc_node_to_cpumask();
if (!node_to_cpumask)
- goto outnpresmsk;
+ return NULL;
masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
if (!masks)
@@ -204,32 +257,29 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Fill out vectors at the beginning that don't need affinity */
for (curvec = 0; curvec < affd->pre_vectors; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
-
- /* Stabilize the cpumasks */
- get_online_cpus();
- build_node_to_cpumask(node_to_cpumask);
-
- /* Spread on present CPUs starting from affd->pre_vectors */
- usedvecs = irq_build_affinity_masks(affd, curvec, affvecs,
- node_to_cpumask, cpu_present_mask,
- nmsk, masks);
-
+ cpumask_copy(&masks[curvec].mask, irq_default_affinity);
/*
- * Spread on non present CPUs starting from the next vector to be
- * handled. If the spreading of present CPUs already exhausted the
- * vector space, assign the non present CPUs to the already spread
- * out vectors.
+ * Spread on present CPUs starting from affd->pre_vectors. If we
+ * have multiple sets, build each sets affinity mask separately.
*/
- if (usedvecs >= affvecs)
- curvec = affd->pre_vectors;
- else
- curvec = affd->pre_vectors + usedvecs;
- cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
- usedvecs += irq_build_affinity_masks(affd, curvec, affvecs,
- node_to_cpumask, npresmsk,
- nmsk, masks);
- put_online_cpus();
+ nr_sets = affd->nr_sets;
+ if (!nr_sets)
+ nr_sets = 1;
+
+ for (i = 0, usedvecs = 0; i < nr_sets; i++) {
+ int this_vecs = affd->sets ? affd->sets[i] : affvecs;
+ int ret;
+
+ ret = irq_build_affinity_masks(affd, curvec, this_vecs,
+ curvec, node_to_cpumask, masks);
+ if (ret) {
+ kfree(masks);
+ masks = NULL;
+ goto outnodemsk;
+ }
+ curvec += this_vecs;
+ usedvecs += this_vecs;
+ }
/* Fill out vectors at the end that don't need affinity */
if (usedvecs >= affvecs)
@@ -237,14 +287,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
else
curvec = affd->pre_vectors + usedvecs;
for (; curvec < nvecs; curvec++)
- cpumask_copy(masks + curvec, irq_default_affinity);
+ cpumask_copy(&masks[curvec].mask, irq_default_affinity);
+
+ /* Mark the managed interrupts */
+ for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
+ masks[i].is_managed = 1;
outnodemsk:
free_node_to_cpumask(node_to_cpumask);
-outnpresmsk:
- free_cpumask_var(npresmsk);
-outcpumsk:
- free_cpumask_var(nmsk);
return masks;
}
@@ -258,13 +308,21 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
{
int resv = affd->pre_vectors + affd->post_vectors;
int vecs = maxvec - resv;
- int ret;
+ int set_vecs;
if (resv > minvec)
return 0;
- get_online_cpus();
- ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
- put_online_cpus();
- return ret;
+ if (affd->nr_sets) {
+ int i;
+
+ for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
+ set_vecs += affd->sets[i];
+ } else {
+ get_online_cpus();
+ set_vecs = cpumask_weight(cpu_possible_mask);
+ put_online_cpus();
+ }
+
+ return resv + min(set_vecs, vecs);
}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a2b3d9de999c..34e969069488 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -929,7 +929,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
break;
/*
* Bail out if the outer chip is not set up
- * and the interrrupt supposed to be started
+ * and the interrupt supposed to be started
* right away.
*/
if (WARN_ON(is_chained))
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index 6a682c229e10..5d5378ea0afe 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -169,7 +169,7 @@ static void devm_irq_desc_release(struct device *dev, void *res)
* @cnt: Number of consecutive irqs to allocate
* @node: Preferred node on which the irq descriptor should be allocated
* @owner: Owning module (can be NULL)
- * @affinity: Optional pointer to an affinity mask array of size @cnt
+ * @affinity: Optional pointer to an irq_affinity_desc array of size @cnt
* which hints where the irq descriptors should be allocated
* and which default affinities to use
*
@@ -179,7 +179,7 @@ static void devm_irq_desc_release(struct device *dev, void *res)
*/
int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from,
unsigned int cnt, int node, struct module *owner,
- const struct cpumask *affinity)
+ const struct irq_affinity_desc *affinity)
{
struct irq_desc_devres *dr;
int base;
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index 8b778e37dc6d..43e3d1be622c 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -56,7 +56,7 @@ int irq_reserve_ipi(struct irq_domain *domain,
unsigned int next;
/*
- * The IPI requires a seperate HW irq on each CPU. We require
+ * The IPI requires a separate HW irq on each CPU. We require
* that the destination mask is consecutive. If an
* implementation needs to support holes, it can reserve
* several IPI ranges.
@@ -172,7 +172,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
/*
* Get the real hardware irq number if the underlying implementation
- * uses a seperate irq per cpu. If the underlying implementation uses
+ * uses a separate irq per cpu. If the underlying implementation uses
* a single hardware irq for all cpus then the IPI send mechanism
* needs to take care of the cpu destinations.
*/
diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c
index dd20d0d528d4..98a20e1594ce 100644
--- a/kernel/irq/irq_sim.c
+++ b/kernel/irq/irq_sim.c
@@ -34,9 +34,20 @@ static struct irq_chip irq_sim_irqchip = {
static void irq_sim_handle_irq(struct irq_work *work)
{
struct irq_sim_work_ctx *work_ctx;
+ unsigned int offset = 0;
+ struct irq_sim *sim;
+ int irqnum;
work_ctx = container_of(work, struct irq_sim_work_ctx, work);
- handle_simple_irq(irq_to_desc(work_ctx->irq));
+ sim = container_of(work_ctx, struct irq_sim, work_ctx);
+
+ while (!bitmap_empty(work_ctx->pending, sim->irq_count)) {
+ offset = find_next_bit(work_ctx->pending,
+ sim->irq_count, offset);
+ clear_bit(offset, work_ctx->pending);
+ irqnum = irq_sim_irqnum(sim, offset);
+ handle_simple_irq(irq_to_desc(irqnum));
+ }
}
/**
@@ -63,6 +74,13 @@ int irq_sim_init(struct irq_sim *sim, unsigned int num_irqs)
return sim->irq_base;
}
+ sim->work_ctx.pending = bitmap_zalloc(num_irqs, GFP_KERNEL);
+ if (!sim->work_ctx.pending) {
+ kfree(sim->irqs);
+ irq_free_descs(sim->irq_base, num_irqs);
+ return -ENOMEM;
+ }
+
for (i = 0; i < num_irqs; i++) {
sim->irqs[i].irqnum = sim->irq_base + i;
sim->irqs[i].enabled = false;
@@ -89,6 +107,7 @@ EXPORT_SYMBOL_GPL(irq_sim_init);
void irq_sim_fini(struct irq_sim *sim)
{
irq_work_sync(&sim->work_ctx.work);
+ bitmap_free(sim->work_ctx.pending);
irq_free_descs(sim->irq_base, sim->irq_count);
kfree(sim->irqs);
}
@@ -143,7 +162,7 @@ EXPORT_SYMBOL_GPL(devm_irq_sim_init);
void irq_sim_fire(struct irq_sim *sim, unsigned int offset)
{
if (sim->irqs[offset].enabled) {
- sim->work_ctx.irq = irq_sim_irqnum(sim, offset);
+ set_bit(offset, sim->work_ctx.pending);
irq_work_queue(&sim->work_ctx.work);
}
}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 578d0e5f1b5b..ee062b7939d3 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -449,30 +449,34 @@ static void free_desc(unsigned int irq)
}
static int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity, struct module *owner)
+ const struct irq_affinity_desc *affinity,
+ struct module *owner)
{
- const struct cpumask *mask = NULL;
struct irq_desc *desc;
- unsigned int flags;
int i;
/* Validate affinity mask(s) */
if (affinity) {
- for (i = 0, mask = affinity; i < cnt; i++, mask++) {
- if (cpumask_empty(mask))
+ for (i = 0; i < cnt; i++, i++) {
+ if (cpumask_empty(&affinity[i].mask))
return -EINVAL;
}
}
- flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
- mask = NULL;
-
for (i = 0; i < cnt; i++) {
+ const struct cpumask *mask = NULL;
+ unsigned int flags = 0;
+
if (affinity) {
- node = cpu_to_node(cpumask_first(affinity));
- mask = affinity;
+ if (affinity->is_managed) {
+ flags = IRQD_AFFINITY_MANAGED |
+ IRQD_MANAGED_SHUTDOWN;
+ }
+ mask = &affinity->mask;
+ node = cpu_to_node(cpumask_first(mask));
affinity++;
}
+
desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc)
goto err;
@@ -575,7 +579,7 @@ static void free_desc(unsigned int irq)
}
static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity,
+ const struct irq_affinity_desc *affinity,
struct module *owner)
{
u32 i;
@@ -705,7 +709,7 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
*/
int __ref
__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity)
+ struct module *owner, const struct irq_affinity_desc *affinity)
{
int start, ret;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3366d11c3e02..8b0be4bd6565 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -969,7 +969,7 @@ const struct irq_domain_ops irq_domain_simple_ops = {
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
- int node, const struct cpumask *affinity)
+ int node, const struct irq_affinity_desc *affinity)
{
unsigned int hint;
@@ -1281,7 +1281,7 @@ int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
*/
int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
- bool realloc, const struct cpumask *affinity)
+ bool realloc, const struct irq_affinity_desc *affinity)
{
int i, ret, virq;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9dbdccab3b6a..a4888ce4667a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -915,7 +915,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
- * Interrupts which are not explicitely requested as threaded
+ * Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
* context. So we need to disable bh here to avoid deadlocks and other
* side effects.
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 1f0985adf193..30cc217b8631 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -14,6 +14,7 @@ struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
+ unsigned int managed_allocated;
bool initialized;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
@@ -145,6 +146,27 @@ static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
return best_cpu;
}
+/* Find the best CPU which has the lowest number of managed IRQs allocated */
+static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
+ const struct cpumask *msk)
+{
+ unsigned int cpu, best_cpu, allocated = UINT_MAX;
+ struct cpumap *cm;
+
+ best_cpu = UINT_MAX;
+
+ for_each_cpu(cpu, msk) {
+ cm = per_cpu_ptr(m->maps, cpu);
+
+ if (!cm->online || cm->managed_allocated > allocated)
+ continue;
+
+ best_cpu = cpu;
+ allocated = cm->managed_allocated;
+ }
+ return best_cpu;
+}
+
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
@@ -269,7 +291,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
if (cpumask_empty(msk))
return -EINVAL;
- cpu = matrix_find_best_cpu(m, msk);
+ cpu = matrix_find_best_cpu_managed(m, msk);
if (cpu == UINT_MAX)
return -ENOSPC;
@@ -282,6 +304,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
return -ENOSPC;
set_bit(bit, cm->alloc_map);
cm->allocated++;
+ cm->managed_allocated++;
m->total_allocated++;
*mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
@@ -395,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
clear_bit(bit, cm->alloc_map);
cm->allocated--;
+ if(managed)
+ cm->managed_allocated--;
if (cm->online)
m->total_allocated--;
@@ -464,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
m->system_map);
- seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
+ seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
- seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
- cpu, cm->available, cm->managed, cm->allocated,
+ seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
+ cpu, cm->available, cm->managed,
+ cm->managed_allocated, cm->allocated,
m->matrix_bits, cm->alloc_map);
}
cpus_read_unlock();
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 4ca2fd46645d..ad26fbcfbfc8 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -23,11 +23,11 @@
* @nvec: The number of vectors used in this entry
* @affinity: Optional pointer to an affinity mask array size of @nvec
*
- * If @affinity is not NULL then a an affinity array[@nvec] is allocated
- * and the affinity masks from @affinity are copied.
+ * If @affinity is not NULL then an affinity array[@nvec] is allocated
+ * and the affinity masks and flags from @affinity are copied.
*/
-struct msi_desc *
-alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity)
+struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
+ const struct irq_affinity_desc *affinity)
{
struct msi_desc *desc;
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index d867d6ddafdd..6d2fa6914b30 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -66,7 +66,7 @@ static int try_one_irq(struct irq_desc *desc, bool force)
raw_spin_lock(&desc->lock);
/*
- * PER_CPU, nested thread interrupts and interrupts explicitely
+ * PER_CPU, nested thread interrupts and interrupts explicitly
* marked polled are excluded from polling.
*/
if (irq_settings_is_per_cpu(desc) ||
@@ -76,7 +76,7 @@ static int try_one_irq(struct irq_desc *desc, bool force)
/*
* Do not poll disabled interrupts unless the spurious
- * disabled poller asks explicitely.
+ * disabled poller asks explicitly.
*/
if (irqd_irq_disabled(&desc->irq_data) && !force)
goto out;
@@ -292,7 +292,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
* So in case a thread is woken, we just note the fact and
* defer the analysis to the next hardware interrupt.
*
- * The threaded handlers store whether they sucessfully
+ * The threaded handlers store whether they successfully
* handled an interrupt and we check whether that number
* changed versus the last invocation.
*
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3ebd09efe72a..97959d7b77e2 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -56,7 +56,7 @@ struct kcov {
struct task_struct *t;
};
-static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
unsigned int mode;
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
return mode == needed_mode;
}
-static unsigned long canonicalize_ip(unsigned long ip)
+static notrace unsigned long canonicalize_ip(unsigned long ip)
{
#ifdef CONFIG_RANDOMIZE_BASE
ip -= kaslr_offset();
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 35cf0ad29718..f1d0e00a3971 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -16,6 +16,7 @@
#include <linux/file.h>
#include <linux/slab.h>
#include <linux/kexec.h>
+#include <linux/memblock.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/fs.h>
@@ -76,7 +77,7 @@ void * __weak arch_kexec_kernel_image_load(struct kimage *image)
return kexec_image_load_default(image);
}
-static int kexec_image_post_load_cleanup_default(struct kimage *image)
+int kexec_image_post_load_cleanup_default(struct kimage *image)
{
if (!image->fops || !image->fops->cleanup)
return 0;
@@ -499,8 +500,60 @@ static int locate_mem_hole_callback(struct resource *res, void *arg)
return locate_mem_hole_bottom_up(start, end, kbuf);
}
+#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+static int kexec_walk_memblock(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ return 0;
+}
+#else
+static int kexec_walk_memblock(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ int ret = 0;
+ u64 i;
+ phys_addr_t mstart, mend;
+ struct resource res = { };
+
+ if (kbuf->image->type == KEXEC_TYPE_CRASH)
+ return func(&crashk_res, kbuf);
+
+ if (kbuf->top_down) {
+ for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &mstart, &mend, NULL) {
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+ res.start = mstart;
+ res.end = mend - 1;
+ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+ } else {
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &mstart, &mend, NULL) {
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+ res.start = mstart;
+ res.end = mend - 1;
+ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+#endif
+
/**
- * arch_kexec_walk_mem - call func(data) on free memory regions
+ * kexec_walk_resources - call func(data) on free memory regions
* @kbuf: Context info for the search. Also passed to @func.
* @func: Function to call for each memory region.
*
@@ -508,8 +561,8 @@ static int locate_mem_hole_callback(struct resource *res, void *arg)
* and that value will be returned. If all free regions are visited without
* func returning non-zero, then zero will be returned.
*/
-int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
- int (*func)(struct resource *, void *))
+static int kexec_walk_resources(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
{
if (kbuf->image->type == KEXEC_TYPE_CRASH)
return walk_iomem_res_desc(crashk_res.desc,
@@ -532,7 +585,14 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf)
{
int ret;
- ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
+ /* Arch knows where to place */
+ if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
+ return 0;
+
+ if (IS_ENABLED(CONFIG_ARCH_DISCARD_MEMBLOCK))
+ ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
+ else
+ ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback);
return ret == 1 ? 0 : -EADDRNOTAVAIL;
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 90e98e233647..f4ddfdd2d07e 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -229,7 +229,7 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
struct kprobe_insn_page *kip, *next;
/* Ensure no-one is interrupted on the garbages */
- synchronize_sched();
+ synchronize_rcu();
list_for_each_entry_safe(kip, next, &c->pages, list) {
int i;
@@ -1382,7 +1382,7 @@ out:
if (ret) {
ap->flags |= KPROBE_FLAG_DISABLED;
list_del_rcu(&p->list);
- synchronize_sched();
+ synchronize_rcu();
}
}
}
@@ -1597,7 +1597,7 @@ int register_kprobe(struct kprobe *p)
ret = arm_kprobe(p);
if (ret) {
hlist_del_rcu(&p->hlist);
- synchronize_sched();
+ synchronize_rcu();
goto out;
}
}
@@ -1776,7 +1776,7 @@ void unregister_kprobes(struct kprobe **kps, int num)
kps[i]->addr = NULL;
mutex_unlock(&kprobe_mutex);
- synchronize_sched();
+ synchronize_rcu();
for (i = 0; i < num; i++)
if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]);
@@ -1966,7 +1966,7 @@ void unregister_kretprobes(struct kretprobe **rps, int num)
rps[i]->kp.addr = NULL;
mutex_unlock(&kprobe_mutex);
- synchronize_sched();
+ synchronize_rcu();
for (i = 0; i < num; i++) {
if (rps[i]->kp.addr) {
__unregister_kprobe_bottom(&rps[i]->kp);
@@ -2093,6 +2093,47 @@ void dump_kprobe(struct kprobe *kp)
}
NOKPROBE_SYMBOL(dump_kprobe);
+int kprobe_add_ksym_blacklist(unsigned long entry)
+{
+ struct kprobe_blacklist_entry *ent;
+ unsigned long offset = 0, size = 0;
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset))
+ return -EINVAL;
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ return -ENOMEM;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
+ INIT_LIST_HEAD(&ent->list);
+ list_add_tail(&ent->list, &kprobe_blacklist);
+
+ return (int)size;
+}
+
+/* Add all symbols in given area into kprobe blacklist */
+int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
+{
+ unsigned long entry;
+ int ret = 0;
+
+ for (entry = start; entry < end; entry += ret) {
+ ret = kprobe_add_ksym_blacklist(entry);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) /* In case of alias symbol */
+ ret = 1;
+ }
+ return 0;
+}
+
+int __init __weak arch_populate_kprobe_blacklist(void)
+{
+ return 0;
+}
+
/*
* Lookup and populate the kprobe_blacklist.
*
@@ -2104,26 +2145,24 @@ NOKPROBE_SYMBOL(dump_kprobe);
static int __init populate_kprobe_blacklist(unsigned long *start,
unsigned long *end)
{
+ unsigned long entry;
unsigned long *iter;
- struct kprobe_blacklist_entry *ent;
- unsigned long entry, offset = 0, size = 0;
+ int ret;
for (iter = start; iter < end; iter++) {
entry = arch_deref_entry_point((void *)*iter);
-
- if (!kernel_text_address(entry) ||
- !kallsyms_lookup_size_offset(entry, &size, &offset))
+ ret = kprobe_add_ksym_blacklist(entry);
+ if (ret == -EINVAL)
continue;
-
- ent = kmalloc(sizeof(*ent), GFP_KERNEL);
- if (!ent)
- return -ENOMEM;
- ent->start_addr = entry;
- ent->end_addr = entry + size;
- INIT_LIST_HEAD(&ent->list);
- list_add_tail(&ent->list, &kprobe_blacklist);
+ if (ret < 0)
+ return ret;
}
- return 0;
+
+ /* Symbols in __kprobes_text are blacklisted */
+ ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
+ (unsigned long)__kprobes_text_end);
+
+ return ret ? : arch_populate_kprobe_blacklist();
}
/* Module notifier call back, checking kprobes on the module */
diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 82d584225dc6..7702cb4064fc 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -61,7 +61,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
ops = container_of(fops, struct klp_ops, fops);
/*
- * A variant of synchronize_sched() is used to allow patching functions
+ * A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
@@ -72,7 +72,7 @@ static void notrace klp_ftrace_handler(unsigned long ip,
/*
* func should never be NULL because preemption should be disabled here
* and unregister_ftrace_function() does the equivalent of a
- * synchronize_sched() before the func_stack removal.
+ * synchronize_rcu() before the func_stack removal.
*/
if (WARN_ON_ONCE(!func))
goto unlock;
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index 5bc349805e03..304d5eb8a98c 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -52,7 +52,7 @@ static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
/*
* This function is just a stub to implement a hard force
- * of synchronize_sched(). This requires synchronizing
+ * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*/
static void klp_sync(struct work_struct *work)
@@ -175,7 +175,7 @@ void klp_cancel_transition(void)
void klp_update_patch_state(struct task_struct *task)
{
/*
- * A variant of synchronize_sched() is used to allow patching functions
+ * A variant of synchronize_rcu() is used to allow patching functions
* where RCU is not watching, see klp_synchronize_transition().
*/
preempt_disable_notrace();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 1efada2dd9dd..95932333a48b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -138,6 +138,9 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
+#ifndef CONFIG_DEBUG_LOCKDEP
+static
+#endif
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
static inline struct lock_class *hlock_class(struct held_lock *hlock)
@@ -626,7 +629,8 @@ static int static_obj(void *obj)
/*
* To make lock name printouts unique, we calculate a unique
- * class->name_version generation counter:
+ * class->name_version generation counter. The caller must hold the graph
+ * lock.
*/
static int count_matching_names(struct lock_class *new_class)
{
@@ -636,7 +640,7 @@ static int count_matching_names(struct lock_class *new_class)
if (!new_class->name)
return 0;
- list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) {
+ list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (new_class->key - new_class->subclass == class->key)
return class->name_version;
if (class->name && !strcmp(class->name, new_class->name))
@@ -789,7 +793,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
class->key = key;
class->name = lock->name;
class->subclass = subclass;
- INIT_LIST_HEAD(&class->lock_entry);
INIT_LIST_HEAD(&class->locks_before);
INIT_LIST_HEAD(&class->locks_after);
class->name_version = count_matching_names(class);
@@ -801,7 +804,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
/*
* Add it to the global list of classes:
*/
- list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
+ list_add_tail(&class->lock_entry, &all_lock_classes);
if (verbose(class)) {
graph_unlock();
@@ -3088,7 +3091,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
/*
* Initialize a lock instance's lock-class mapping info:
*/
-static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
+void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
int i;
@@ -3144,12 +3147,6 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name,
raw_local_irq_restore(flags);
}
}
-
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass)
-{
- __lockdep_init_map(lock, name, key, subclass);
-}
EXPORT_SYMBOL_GPL(lockdep_init_map);
struct lock_class_key __lockdep_no_validate__;
@@ -4126,6 +4123,9 @@ void lockdep_reset(void)
raw_local_irq_restore(flags);
}
+/*
+ * Remove all references to a lock class. The caller must hold the graph lock.
+ */
static void zap_class(struct lock_class *class)
{
int i;
@@ -4142,7 +4142,7 @@ static void zap_class(struct lock_class *class)
* Unhash the class and remove it from the all_lock_classes list:
*/
hlist_del_rcu(&class->hash_entry);
- list_del_rcu(&class->lock_entry);
+ list_del(&class->lock_entry);
RCU_INIT_POINTER(class->key, NULL);
RCU_INIT_POINTER(class->name, NULL);
@@ -4195,7 +4195,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
*
* sync_sched() is sufficient because the read-side is IRQ disable.
*/
- synchronize_sched();
+ synchronize_rcu();
/*
* XXX at this point we could return the resources to the pool;
@@ -4204,15 +4204,36 @@ void lockdep_free_key_range(void *start, unsigned long size)
*/
}
-void lockdep_reset_lock(struct lockdep_map *lock)
+/*
+ * Check whether any element of the @lock->class_cache[] array refers to a
+ * registered lock class. The caller must hold either the graph lock or the
+ * RCU read lock.
+ */
+static bool lock_class_cache_is_registered(struct lockdep_map *lock)
{
struct lock_class *class;
struct hlist_head *head;
- unsigned long flags;
int i, j;
- int locked;
+
+ for (i = 0; i < CLASSHASH_SIZE; i++) {
+ head = classhash_table + i;
+ hlist_for_each_entry_rcu(class, head, hash_entry) {
+ for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
+ if (lock->class_cache[j] == class)
+ return true;
+ }
+ }
+ return false;
+}
+
+void lockdep_reset_lock(struct lockdep_map *lock)
+{
+ struct lock_class *class;
+ unsigned long flags;
+ int j, locked;
raw_local_irq_save(flags);
+ locked = graph_lock();
/*
* Remove all classes this lock might have:
@@ -4229,25 +4250,14 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Debug check: in the end all mapped classes should
* be gone.
*/
- locked = graph_lock();
- for (i = 0; i < CLASSHASH_SIZE; i++) {
- head = classhash_table + i;
- hlist_for_each_entry_rcu(class, head, hash_entry) {
- int match = 0;
-
- for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
- match |= class == lock->class_cache[j];
-
- if (unlikely(match)) {
- if (debug_locks_off_graph_unlock()) {
- /*
- * We all just reset everything, how did it match?
- */
- WARN_ON(1);
- }
- goto out_restore;
- }
+ if (unlikely(lock_class_cache_is_registered(lock))) {
+ if (debug_locks_off_graph_unlock()) {
+ /*
+ * We all just reset everything, how did it match?
+ */
+ WARN_ON(1);
}
+ goto out_restore;
}
if (locked)
graph_unlock();
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 9aa713629387..771d4ca96dda 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter;
diff --git a/kernel/module.c b/kernel/module.c
index 49a405891587..d46c7814a00e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -495,9 +495,9 @@ struct find_symbol_arg {
const struct kernel_symbol *sym;
};
-static bool check_symbol(const struct symsearch *syms,
- struct module *owner,
- unsigned int symnum, void *data)
+static bool check_exported_symbol(const struct symsearch *syms,
+ struct module *owner,
+ unsigned int symnum, void *data)
{
struct find_symbol_arg *fsa = data;
@@ -555,9 +555,9 @@ static int cmp_name(const void *va, const void *vb)
return strcmp(a, kernel_symbol_name(b));
}
-static bool find_symbol_in_section(const struct symsearch *syms,
- struct module *owner,
- void *data)
+static bool find_exported_symbol_in_section(const struct symsearch *syms,
+ struct module *owner,
+ void *data)
{
struct find_symbol_arg *fsa = data;
struct kernel_symbol *sym;
@@ -565,13 +565,14 @@ static bool find_symbol_in_section(const struct symsearch *syms,
sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
sizeof(struct kernel_symbol), cmp_name);
- if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
+ if (sym != NULL && check_exported_symbol(syms, owner,
+ sym - syms->start, data))
return true;
return false;
}
-/* Find a symbol and return it, along with, (optional) crc and
+/* Find an exported symbol and return it, along with, (optional) crc and
* (optional) module which owns it. Needs preempt disabled or module_mutex. */
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
@@ -585,7 +586,7 @@ const struct kernel_symbol *find_symbol(const char *name,
fsa.gplok = gplok;
fsa.warn = warn;
- if (each_symbol_section(find_symbol_in_section, &fsa)) {
+ if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
if (owner)
*owner = fsa.owner;
if (crc)
@@ -2159,7 +2160,7 @@ static void free_module(struct module *mod)
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod);
/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
- synchronize_sched();
+ synchronize_rcu();
mutex_unlock(&module_mutex);
/* This may be empty, but that's OK */
@@ -2198,7 +2199,7 @@ EXPORT_SYMBOL_GPL(__symbol_get);
*
* You must hold the module_mutex.
*/
-static int verify_export_symbols(struct module *mod)
+static int verify_exported_symbols(struct module *mod)
{
unsigned int i;
struct module *owner;
@@ -2519,10 +2520,10 @@ static void free_modinfo(struct module *mod)
#ifdef CONFIG_KALLSYMS
-/* lookup symbol in given range of kernel_symbols */
-static const struct kernel_symbol *lookup_symbol(const char *name,
- const struct kernel_symbol *start,
- const struct kernel_symbol *stop)
+/* Lookup exported symbol in given range of kernel_symbols */
+static const struct kernel_symbol *lookup_exported_symbol(const char *name,
+ const struct kernel_symbol *start,
+ const struct kernel_symbol *stop)
{
return bsearch(name, start, stop - start,
sizeof(struct kernel_symbol), cmp_name);
@@ -2533,9 +2534,10 @@ static int is_exported(const char *name, unsigned long value,
{
const struct kernel_symbol *ks;
if (!mod)
- ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
+ ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
else
- ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
+ ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
+
return ks != NULL && kernel_symbol_value(ks) == value;
}
@@ -2682,7 +2684,7 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
/* Set types up while we still have access to sections. */
for (i = 0; i < mod->kallsyms->num_symtab; i++)
- mod->kallsyms->symtab[i].st_info
+ mod->kallsyms->symtab[i].st_size
= elf_type(&mod->kallsyms->symtab[i], info);
/* Now populate the cut down core kallsyms for after init. */
@@ -3093,6 +3095,11 @@ static int find_module_sections(struct module *mod, struct load_info *info)
sizeof(*mod->tracepoints_ptrs),
&mod->num_tracepoints);
#endif
+#ifdef CONFIG_BPF_EVENTS
+ mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
+ sizeof(*mod->bpf_raw_events),
+ &mod->num_bpf_raw_events);
+#endif
#ifdef HAVE_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
sizeof(*mod->jump_entries),
@@ -3507,15 +3514,15 @@ static noinline int do_init_module(struct module *mod)
/*
* We want to free module_init, but be aware that kallsyms may be
* walking this with preempt disabled. In all the failure paths, we
- * call synchronize_sched(), but we don't want to slow down the success
+ * call synchronize_rcu(), but we don't want to slow down the success
* path, so use actual RCU here.
* Note that module_alloc() on most architectures creates W+X page
* mappings which won't be cleaned up until do_free_init() runs. Any
* code such as mark_rodata_ro() which depends on those mappings to
* be cleaned up needs to sync with the queued work - ie
- * rcu_barrier_sched()
+ * rcu_barrier()
*/
- call_rcu_sched(&freeinit->rcu, do_free_init);
+ call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
@@ -3526,7 +3533,7 @@ fail_free_freeinit:
fail:
/* Try to protect us from buggy refcounters. */
mod->state = MODULE_STATE_GOING;
- synchronize_sched();
+ synchronize_rcu();
module_put(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
@@ -3592,7 +3599,7 @@ static int complete_formation(struct module *mod, struct load_info *info)
mutex_lock(&module_mutex);
/* Find duplicate symbols (must be called under lock). */
- err = verify_export_symbols(mod);
+ err = verify_exported_symbols(mod);
if (err < 0)
goto out;
@@ -3819,7 +3826,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
ddebug_cleanup:
ftrace_release_mod(mod);
dynamic_debug_remove(mod, info->debug);
- synchronize_sched();
+ synchronize_rcu();
kfree(mod->args);
free_arch_cleanup:
module_arch_cleanup(mod);
@@ -3834,7 +3841,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
mod_tree_remove(mod);
wake_up_all(&module_wq);
/* Wait for RCU-sched synchronizing before releasing mod->list. */
- synchronize_sched();
+ synchronize_rcu();
mutex_unlock(&module_mutex);
free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */
@@ -3911,18 +3918,22 @@ static inline int is_arm_mapping_symbol(const char *str)
&& (str[2] == '\0' || str[2] == '.');
}
-static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
{
return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
}
-static const char *get_ksymbol(struct module *mod,
- unsigned long addr,
- unsigned long *size,
- unsigned long *offset)
+/*
+ * Given a module and address, find the corresponding symbol and return its name
+ * while providing its size and offset if needed.
+ */
+static const char *find_kallsyms_symbol(struct module *mod,
+ unsigned long addr,
+ unsigned long *size,
+ unsigned long *offset)
{
unsigned int i, best = 0;
- unsigned long nextval;
+ unsigned long nextval, bestval;
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
/* At worse, next value is at end of module */
@@ -3931,34 +3942,40 @@ static const char *get_ksymbol(struct module *mod,
else
nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
+ bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
+
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
for (i = 1; i < kallsyms->num_symtab; i++) {
- if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ const Elf_Sym *sym = &kallsyms->symtab[i];
+ unsigned long thisval = kallsyms_symbol_value(sym);
+
+ if (sym->st_shndx == SHN_UNDEF)
continue;
/* We ignore unnamed symbols: they're uninformative
* and inserted at a whim. */
- if (*symname(kallsyms, i) == '\0'
- || is_arm_mapping_symbol(symname(kallsyms, i)))
+ if (*kallsyms_symbol_name(kallsyms, i) == '\0'
+ || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
continue;
- if (kallsyms->symtab[i].st_value <= addr
- && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
+ if (thisval <= addr && thisval > bestval) {
best = i;
- if (kallsyms->symtab[i].st_value > addr
- && kallsyms->symtab[i].st_value < nextval)
- nextval = kallsyms->symtab[i].st_value;
+ bestval = thisval;
+ }
+ if (thisval > addr && thisval < nextval)
+ nextval = thisval;
}
if (!best)
return NULL;
if (size)
- *size = nextval - kallsyms->symtab[best].st_value;
+ *size = nextval - bestval;
if (offset)
- *offset = addr - kallsyms->symtab[best].st_value;
- return symname(kallsyms, best);
+ *offset = addr - bestval;
+
+ return kallsyms_symbol_name(kallsyms, best);
}
void * __weak dereference_module_function_descriptor(struct module *mod,
@@ -3983,7 +4000,8 @@ const char *module_address_lookup(unsigned long addr,
if (mod) {
if (modname)
*modname = mod->name;
- ret = get_ksymbol(mod, addr, size, offset);
+
+ ret = find_kallsyms_symbol(mod, addr, size, offset);
}
/* Make a copy in here where it's safe */
if (ret) {
@@ -4006,9 +4024,10 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
if (within_module(addr, mod)) {
const char *sym;
- sym = get_ksymbol(mod, addr, NULL, NULL);
+ sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
if (!sym)
goto out;
+
strlcpy(symname, sym, KSYM_NAME_LEN);
preempt_enable();
return 0;
@@ -4031,7 +4050,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
if (within_module(addr, mod)) {
const char *sym;
- sym = get_ksymbol(mod, addr, size, offset);
+ sym = find_kallsyms_symbol(mod, addr, size, offset);
if (!sym)
goto out;
if (modname)
@@ -4060,9 +4079,11 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
continue;
kallsyms = rcu_dereference_sched(mod->kallsyms);
if (symnum < kallsyms->num_symtab) {
- *value = kallsyms->symtab[symnum].st_value;
- *type = kallsyms->symtab[symnum].st_info;
- strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
+ const Elf_Sym *sym = &kallsyms->symtab[symnum];
+
+ *value = kallsyms_symbol_value(sym);
+ *type = sym->st_size;
+ strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, *value, mod);
preempt_enable();
@@ -4074,15 +4095,19 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
return -ERANGE;
}
-static unsigned long mod_find_symname(struct module *mod, const char *name)
+/* Given a module and name of symbol, find and return the symbol's value */
+static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
{
unsigned int i;
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
- for (i = 0; i < kallsyms->num_symtab; i++)
- if (strcmp(name, symname(kallsyms, i)) == 0 &&
- kallsyms->symtab[i].st_shndx != SHN_UNDEF)
- return kallsyms->symtab[i].st_value;
+ for (i = 0; i < kallsyms->num_symtab; i++) {
+ const Elf_Sym *sym = &kallsyms->symtab[i];
+
+ if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
+ sym->st_shndx != SHN_UNDEF)
+ return kallsyms_symbol_value(sym);
+ }
return 0;
}
@@ -4097,12 +4122,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)
preempt_disable();
if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
if ((mod = find_module_all(name, colon - name, false)) != NULL)
- ret = mod_find_symname(mod, colon+1);
+ ret = find_kallsyms_symbol_value(mod, colon+1);
} else {
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- if ((ret = mod_find_symname(mod, name)) != 0)
+ if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
break;
}
}
@@ -4127,12 +4152,13 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < kallsyms->num_symtab; i++) {
+ const Elf_Sym *sym = &kallsyms->symtab[i];
- if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ if (sym->st_shndx == SHN_UNDEF)
continue;
- ret = fn(data, symname(kallsyms, i),
- mod, kallsyms->symtab[i].st_value);
+ ret = fn(data, kallsyms_symbol_name(kallsyms, i),
+ mod, kallsyms_symbol_value(sym));
if (ret != 0)
return ret;
}
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index f2075ce8e4b3..6b9a926fd86b 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -83,6 +83,7 @@ int mod_verify_sig(const void *mod, struct load_info *info)
}
return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len,
- NULL, VERIFYING_MODULE_SIGNATURE,
+ VERIFY_USE_SECONDARY_KEYRING,
+ VERIFYING_MODULE_SIGNATURE,
NULL, NULL);
}
diff --git a/kernel/padata.c b/kernel/padata.c
index d568cc56405f..3e2633ae3bca 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -720,7 +720,7 @@ int padata_start(struct padata_instance *pinst)
if (pinst->flags & PADATA_INVALID)
err = -EINVAL;
- __padata_start(pinst);
+ __padata_start(pinst);
mutex_unlock(&pinst->lock);
diff --git a/kernel/panic.c b/kernel/panic.c
index f6d549a29a5c..d10c340c43b0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -14,6 +14,7 @@
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
+#include <linux/vt_kern.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/ftrace.h>
@@ -237,7 +238,10 @@ void panic(const char *fmt, ...)
if (_crash_kexec_post_notifiers)
__crash_kexec(NULL);
- bust_spinlocks(0);
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ console_unblank();
/*
* We may have ended up stopping the CPU holding the lock (in
diff --git a/kernel/pid.c b/kernel/pid.c
index b2f6c506035d..20881598bdfa 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -233,8 +233,10 @@ out_unlock:
out_free:
spin_lock_irq(&pidmap_lock);
- while (++i <= ns->level)
- idr_remove(&ns->idr, (pid->numbers + i)->nr);
+ while (++i <= ns->level) {
+ upid = pid->numbers + i;
+ idr_remove(&upid->ns->idr, upid->nr);
+ }
/* On failure to allocate the first pid, reset the state */
if (ns->pid_allocated == PIDNS_ADDING)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 3a6c2f87699e..f8fe57d1022e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -298,3 +298,18 @@ config PM_GENERIC_DOMAINS_OF
config CPU_PM
bool
+
+config ENERGY_MODEL
+ bool "Energy Model for CPUs"
+ depends on SMP
+ depends on CPU_FREQ
+ default n
+ help
+ Several subsystems (thermal and/or the task scheduler for example)
+ can leverage information about the energy consumed by CPUs to make
+ smarter decisions. This config option enables the framework from
+ which subsystems can access the energy models.
+
+ The exact usage of the energy model is subsystem-dependent.
+
+ If in doubt, say N.
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index a3f79f0eef36..e7e47d9be1e5 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -15,3 +15,5 @@ obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o
obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
+
+obj-$(CONFIG_ENERGY_MODEL) += energy_model.o
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
new file mode 100644
index 000000000000..d9dc2c38764a
--- /dev/null
+++ b/kernel/power/energy_model.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Energy Model of CPUs
+ *
+ * Copyright (c) 2018, Arm ltd.
+ * Written by: Quentin Perret, Arm ltd.
+ */
+
+#define pr_fmt(fmt) "energy_model: " fmt
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/energy_model.h>
+#include <linux/sched/topology.h>
+#include <linux/slab.h>
+
+/* Mapping of each CPU to the performance domain to which it belongs. */
+static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
+
+/*
+ * Mutex serializing the registrations of performance domains and letting
+ * callbacks defined by drivers sleep.
+ */
+static DEFINE_MUTEX(em_pd_mutex);
+
+static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
+ struct em_data_callback *cb)
+{
+ unsigned long opp_eff, prev_opp_eff = ULONG_MAX;
+ unsigned long power, freq, prev_freq = 0;
+ int i, ret, cpu = cpumask_first(span);
+ struct em_cap_state *table;
+ struct em_perf_domain *pd;
+ u64 fmax;
+
+ if (!cb->active_power)
+ return NULL;
+
+ pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
+ if (!pd)
+ return NULL;
+
+ table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL);
+ if (!table)
+ goto free_pd;
+
+ /* Build the list of capacity states for this performance domain */
+ for (i = 0, freq = 0; i < nr_states; i++, freq++) {
+ /*
+ * active_power() is a driver callback which ceils 'freq' to
+ * lowest capacity state of 'cpu' above 'freq' and updates
+ * 'power' and 'freq' accordingly.
+ */
+ ret = cb->active_power(&power, &freq, cpu);
+ if (ret) {
+ pr_err("pd%d: invalid cap. state: %d\n", cpu, ret);
+ goto free_cs_table;
+ }
+
+ /*
+ * We expect the driver callback to increase the frequency for
+ * higher capacity states.
+ */
+ if (freq <= prev_freq) {
+ pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq);
+ goto free_cs_table;
+ }
+
+ /*
+ * The power returned by active_state() is expected to be
+ * positive, in milli-watts and to fit into 16 bits.
+ */
+ if (!power || power > EM_CPU_MAX_POWER) {
+ pr_err("pd%d: invalid power: %lu\n", cpu, power);
+ goto free_cs_table;
+ }
+
+ table[i].power = power;
+ table[i].frequency = prev_freq = freq;
+
+ /*
+ * The hertz/watts efficiency ratio should decrease as the
+ * frequency grows on sane platforms. But this isn't always
+ * true in practice so warn the user if a higher OPP is more
+ * power efficient than a lower one.
+ */
+ opp_eff = freq / power;
+ if (opp_eff >= prev_opp_eff)
+ pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n",
+ cpu, i, i - 1);
+ prev_opp_eff = opp_eff;
+ }
+
+ /* Compute the cost of each capacity_state. */
+ fmax = (u64) table[nr_states - 1].frequency;
+ for (i = 0; i < nr_states; i++) {
+ table[i].cost = div64_u64(fmax * table[i].power,
+ table[i].frequency);
+ }
+
+ pd->table = table;
+ pd->nr_cap_states = nr_states;
+ cpumask_copy(to_cpumask(pd->cpus), span);
+
+ return pd;
+
+free_cs_table:
+ kfree(table);
+free_pd:
+ kfree(pd);
+
+ return NULL;
+}
+
+/**
+ * em_cpu_get() - Return the performance domain for a CPU
+ * @cpu : CPU to find the performance domain for
+ *
+ * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't
+ * exist.
+ */
+struct em_perf_domain *em_cpu_get(int cpu)
+{
+ return READ_ONCE(per_cpu(em_data, cpu));
+}
+EXPORT_SYMBOL_GPL(em_cpu_get);
+
+/**
+ * em_register_perf_domain() - Register the Energy Model of a performance domain
+ * @span : Mask of CPUs in the performance domain
+ * @nr_states : Number of capacity states to register
+ * @cb : Callback functions providing the data of the Energy Model
+ *
+ * Create Energy Model tables for a performance domain using the callbacks
+ * defined in cb.
+ *
+ * If multiple clients register the same performance domain, all but the first
+ * registration will be ignored.
+ *
+ * Return 0 on success
+ */
+int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+ struct em_data_callback *cb)
+{
+ unsigned long cap, prev_cap = 0;
+ struct em_perf_domain *pd;
+ int cpu, ret = 0;
+
+ if (!span || !nr_states || !cb)
+ return -EINVAL;
+
+ /*
+ * Use a mutex to serialize the registration of performance domains and
+ * let the driver-defined callback functions sleep.
+ */
+ mutex_lock(&em_pd_mutex);
+
+ for_each_cpu(cpu, span) {
+ /* Make sure we don't register again an existing domain. */
+ if (READ_ONCE(per_cpu(em_data, cpu))) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+
+ /*
+ * All CPUs of a domain must have the same micro-architecture
+ * since they all share the same table.
+ */
+ cap = arch_scale_cpu_capacity(NULL, cpu);
+ if (prev_cap && prev_cap != cap) {
+ pr_err("CPUs of %*pbl must have the same capacity\n",
+ cpumask_pr_args(span));
+ ret = -EINVAL;
+ goto unlock;
+ }
+ prev_cap = cap;
+ }
+
+ /* Create the performance domain and add it to the Energy Model. */
+ pd = em_create_pd(span, nr_states, cb);
+ if (!pd) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ for_each_cpu(cpu, span) {
+ /*
+ * The per-cpu array can be read concurrently from em_cpu_get().
+ * The barrier enforces the ordering needed to make sure readers
+ * can only access well formed em_perf_domain structs.
+ */
+ smp_store_release(per_cpu_ptr(&em_data, cpu), pd);
+ }
+
+ pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span));
+unlock:
+ mutex_unlock(&em_pd_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(em_register_perf_domain);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 35b50823d83b..98e76cad128b 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -318,23 +318,12 @@ static int suspend_stats_show(struct seq_file *s, void *unused)
return 0;
}
-
-static int suspend_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, suspend_stats_show, NULL);
-}
-
-static const struct file_operations suspend_stats_operations = {
- .open = suspend_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(suspend_stats);
static int __init pm_debugfs_init(void)
{
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
- NULL, NULL, &suspend_stats_operations);
+ NULL, NULL, &suspend_stats_fops);
return 0;
}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 86d72ffb811b..b7a82502857a 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -184,7 +184,7 @@ static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
c->target_value = value;
}
-static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
+static int pm_qos_debug_show(struct seq_file *s, void *unused)
{
struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
struct pm_qos_constraints *c;
@@ -245,18 +245,7 @@ out:
return 0;
}
-static int pm_qos_dbg_open(struct inode *inode, struct file *file)
-{
- return single_open(file, pm_qos_dbg_show_requests,
- inode->i_private);
-}
-
-static const struct file_operations pm_qos_debug_fops = {
- .open = pm_qos_dbg_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pm_qos_debug);
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 1b2a029360b7..1306fe0c1dc6 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -403,6 +403,7 @@ DECLARE_WAIT_QUEUE_HEAD(log_wait);
static u64 syslog_seq;
static u32 syslog_idx;
static size_t syslog_partial;
+static bool syslog_time;
/* index and sequence number of the first record stored in the buffer */
static u64 log_first_seq;
@@ -752,6 +753,19 @@ struct devkmsg_user {
char buf[CONSOLE_EXT_LOG_MAX];
};
+static __printf(3, 4) __cold
+int devkmsg_emit(int facility, int level, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+ r = vprintk_emit(facility, level, NULL, 0, fmt, args);
+ va_end(args);
+
+ return r;
+}
+
static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
{
char *buf, *line;
@@ -810,7 +824,7 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
}
}
- printk_emit(facility, level, NULL, 0, "%s", line);
+ devkmsg_emit(facility, level, "%s", line);
kfree(buf);
return ret;
}
@@ -1213,50 +1227,39 @@ static inline void boot_delay_msec(int level)
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
-static size_t print_time(u64 ts, char *buf)
+static size_t print_syslog(unsigned int level, char *buf)
{
- unsigned long rem_nsec;
-
- if (!printk_time)
- return 0;
-
- rem_nsec = do_div(ts, 1000000000);
+ return sprintf(buf, "<%u>", level);
+}
- if (!buf)
- return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
+static size_t print_time(u64 ts, char *buf)
+{
+ unsigned long rem_nsec = do_div(ts, 1000000000);
return sprintf(buf, "[%5lu.%06lu] ",
(unsigned long)ts, rem_nsec / 1000);
}
-static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
+static size_t print_prefix(const struct printk_log *msg, bool syslog,
+ bool time, char *buf)
{
size_t len = 0;
- unsigned int prefix = (msg->facility << 3) | msg->level;
-
- if (syslog) {
- if (buf) {
- len += sprintf(buf, "<%u>", prefix);
- } else {
- len += 3;
- if (prefix > 999)
- len += 3;
- else if (prefix > 99)
- len += 2;
- else if (prefix > 9)
- len++;
- }
- }
- len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
+ if (syslog)
+ len = print_syslog((msg->facility << 3) | msg->level, buf);
+ if (time)
+ len += print_time(msg->ts_nsec, buf + len);
return len;
}
-static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *buf, size_t size)
+static size_t msg_print_text(const struct printk_log *msg, bool syslog,
+ bool time, char *buf, size_t size)
{
const char *text = log_text(msg);
size_t text_size = msg->text_len;
size_t len = 0;
+ char prefix[PREFIX_MAX];
+ const size_t prefix_len = print_prefix(msg, syslog, time, prefix);
do {
const char *next = memchr(text, '\n', text_size);
@@ -1271,19 +1274,17 @@ static size_t msg_print_text(const struct printk_log *msg, bool syslog, char *bu
}
if (buf) {
- if (print_prefix(msg, syslog, NULL) +
- text_len + 1 >= size - len)
+ if (prefix_len + text_len + 1 >= size - len)
break;
- len += print_prefix(msg, syslog, buf + len);
+ memcpy(buf + len, prefix, prefix_len);
+ len += prefix_len;
memcpy(buf + len, text, text_len);
len += text_len;
buf[len++] = '\n';
} else {
/* SYSLOG_ACTION_* buffer size only calculation */
- len += print_prefix(msg, syslog, NULL);
- len += text_len;
- len++;
+ len += prefix_len + text_len + 1;
}
text = next;
@@ -1318,9 +1319,17 @@ static int syslog_print(char __user *buf, int size)
break;
}
+ /*
+ * To keep reading/counting partial line consistent,
+ * use printk_time value as of the beginning of a line.
+ */
+ if (!syslog_partial)
+ syslog_time = printk_time;
+
skip = syslog_partial;
msg = log_from_idx(syslog_idx);
- n = msg_print_text(msg, true, text, LOG_LINE_MAX + PREFIX_MAX);
+ n = msg_print_text(msg, true, syslog_time, text,
+ LOG_LINE_MAX + PREFIX_MAX);
if (n - syslog_partial <= size) {
/* message fits into buffer, move forward */
syslog_idx = log_next(syslog_idx);
@@ -1360,11 +1369,13 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u64 next_seq;
u64 seq;
u32 idx;
+ bool time;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
+ time = printk_time;
logbuf_lock_irq();
/*
* Find first record that fits, including all following records,
@@ -1375,7 +1386,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
while (seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
- len += msg_print_text(msg, true, NULL, 0);
+ len += msg_print_text(msg, true, time, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -1386,7 +1397,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
while (len > size && seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
- len -= msg_print_text(msg, true, NULL, 0);
+ len -= msg_print_text(msg, true, time, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -1397,14 +1408,9 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
len = 0;
while (len >= 0 && seq < next_seq) {
struct printk_log *msg = log_from_idx(idx);
- int textlen;
+ int textlen = msg_print_text(msg, true, time, text,
+ LOG_LINE_MAX + PREFIX_MAX);
- textlen = msg_print_text(msg, true, text,
- LOG_LINE_MAX + PREFIX_MAX);
- if (textlen < 0) {
- len = textlen;
- break;
- }
idx = log_next(idx);
seq++;
@@ -1528,11 +1534,14 @@ int do_syslog(int type, char __user *buf, int len, int source)
} else {
u64 seq = syslog_seq;
u32 idx = syslog_idx;
+ bool time = syslog_partial ? syslog_time : printk_time;
while (seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
- error += msg_print_text(msg, true, NULL, 0);
+ error += msg_print_text(msg, true, time, NULL,
+ 0);
+ time = printk_time;
idx = log_next(idx);
seq++;
}
@@ -1935,21 +1944,6 @@ asmlinkage int vprintk(const char *fmt, va_list args)
}
EXPORT_SYMBOL(vprintk);
-asmlinkage int printk_emit(int facility, int level,
- const char *dict, size_t dictlen,
- const char *fmt, ...)
-{
- va_list args;
- int r;
-
- va_start(args, fmt);
- r = vprintk_emit(facility, level, dict, dictlen, fmt, args);
- va_end(args);
-
- return r;
-}
-EXPORT_SYMBOL(printk_emit);
-
int vprintk_default(const char *fmt, va_list args)
{
int r;
@@ -2005,6 +1999,7 @@ EXPORT_SYMBOL(printk);
#define LOG_LINE_MAX 0
#define PREFIX_MAX 0
+#define printk_time false
static u64 syslog_seq;
static u32 syslog_idx;
@@ -2028,8 +2023,8 @@ static void console_lock_spinning_enable(void) { }
static int console_lock_spinning_disable_and_check(void) { return 0; }
static void call_console_drivers(const char *ext_text, size_t ext_len,
const char *text, size_t len) {}
-static size_t msg_print_text(const struct printk_log *msg,
- bool syslog, char *buf, size_t size) { return 0; }
+static size_t msg_print_text(const struct printk_log *msg, bool syslog,
+ bool time, char *buf, size_t size) { return 0; }
static bool suppress_message_printing(int level) { return false; }
#endif /* CONFIG_PRINTK */
@@ -2387,8 +2382,7 @@ skip:
len += msg_print_text(msg,
console_msg_format & MSG_FORMAT_SYSLOG,
- text + len,
- sizeof(text) - len);
+ printk_time, text + len, sizeof(text) - len);
if (nr_ext_console_drivers) {
ext_len = msg_print_ext_header(ext_text,
sizeof(ext_text),
@@ -3112,7 +3106,7 @@ bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
goto out;
msg = log_from_idx(dumper->cur_idx);
- l = msg_print_text(msg, syslog, line, size);
+ l = msg_print_text(msg, syslog, printk_time, line, size);
dumper->cur_idx = log_next(dumper->cur_idx);
dumper->cur_seq++;
@@ -3183,6 +3177,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
u32 next_idx;
size_t l = 0;
bool ret = false;
+ bool time = printk_time;
if (!dumper->active)
goto out;
@@ -3206,7 +3201,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
- l += msg_print_text(msg, true, NULL, 0);
+ l += msg_print_text(msg, true, time, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -3217,7 +3212,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
while (l > size && seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
- l -= msg_print_text(msg, true, NULL, 0);
+ l -= msg_print_text(msg, true, time, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -3230,7 +3225,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
- l += msg_print_text(msg, syslog, buf + l, size - l);
+ l += msg_print_text(msg, syslog, time, buf + l, size - l);
idx = log_next(idx);
seq++;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 80b34dffdfb9..c2cee9db5204 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -261,9 +261,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
{
- if (mode & PTRACE_MODE_SCHED)
- return false;
-
if (mode & PTRACE_MODE_NOAUDIT)
return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
else
@@ -331,16 +328,9 @@ ok:
!ptrace_has_cap(mm->user_ns, mode)))
return -EPERM;
- if (mode & PTRACE_MODE_SCHED)
- return 0;
return security_ptrace_access_check(task, mode);
}
-bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
-{
- return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
-}
-
bool ptrace_may_access(struct task_struct *task, unsigned int mode)
{
int err;
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 2866166863f0..a393e24a9195 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -526,12 +526,14 @@ srcu_batches_completed(struct srcu_struct *sp) { return 0; }
static inline void rcu_force_quiescent_state(void) { }
static inline void show_rcu_gp_kthreads(void) { }
static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
+static inline void rcu_fwd_progress_check(unsigned long j) { }
#else /* #ifdef CONFIG_TINY_RCU */
unsigned long rcu_get_gp_seq(void);
unsigned long rcu_exp_batches_completed(void);
unsigned long srcu_batches_completed(struct srcu_struct *sp);
void show_rcu_gp_kthreads(void);
int rcu_get_gp_kthreads_prio(void);
+void rcu_fwd_progress_check(unsigned long j);
void rcu_force_quiescent_state(void);
extern struct workqueue_struct *rcu_gp_wq;
extern struct workqueue_struct *rcu_par_gp_wq;
@@ -539,8 +541,10 @@ extern struct workqueue_struct *rcu_par_gp_wq;
#ifdef CONFIG_RCU_NOCB_CPU
bool rcu_is_nocb_cpu(int cpu);
+void rcu_bind_current_to_nocb(void);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
+static inline void rcu_bind_current_to_nocb(void) { }
#endif
#endif /* __LINUX_RCU_H */
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 210c77460365..f6e85faa4ff4 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -56,6 +56,7 @@
#include <linux/vmalloc.h>
#include <linux/sched/debug.h>
#include <linux/sched/sysctl.h>
+#include <linux/oom.h>
#include "rcu.h"
@@ -80,13 +81,6 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@jos
/* Must be power of two minus one. */
#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
-torture_param(int, cbflood_inter_holdoff, HZ,
- "Holdoff between floods (jiffies)");
-torture_param(int, cbflood_intra_holdoff, 1,
- "Holdoff between bursts (jiffies)");
-torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
-torture_param(int, cbflood_n_per_burst, 20000,
- "# callbacks per burst in flood");
torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
"Extend readers by disabling bh (1), irqs (2), or preempt (4)");
torture_param(int, fqs_duration, 0,
@@ -138,12 +132,10 @@ module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
static int nrealreaders;
-static int ncbflooders;
static struct task_struct *writer_task;
static struct task_struct **fakewriter_tasks;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
-static struct task_struct **cbflood_task;
static struct task_struct *fqs_task;
static struct task_struct *boost_tasks[NR_CPUS];
static struct task_struct *stall_task;
@@ -181,7 +173,6 @@ static long n_rcu_torture_boosts;
static atomic_long_t n_rcu_torture_timers;
static long n_barrier_attempts;
static long n_barrier_successes; /* did rcu_barrier test succeed? */
-static atomic_long_t n_cbfloods;
static struct list_head rcu_torture_removed;
static int rcu_torture_writer_state;
@@ -259,6 +250,8 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
+static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
+
/*
* Allocate an element from the rcu_tortures pool.
*/
@@ -348,7 +341,8 @@ rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
* period, and we want a long delay occasionally to trigger
* force_quiescent_state. */
- if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
+ if (!rcu_fwd_cb_nodelay &&
+ !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
started = cur_ops->get_gp_seq();
ts = rcu_trace_clock_local();
if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
@@ -870,59 +864,6 @@ checkwait: stutter_wait("rcu_torture_boost");
return 0;
}
-static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
-{
-}
-
-/*
- * RCU torture callback-flood kthread. Repeatedly induces bursts of calls
- * to call_rcu() or analogous, increasing the probability of occurrence
- * of callback-overflow corner cases.
- */
-static int
-rcu_torture_cbflood(void *arg)
-{
- int err = 1;
- int i;
- int j;
- struct rcu_head *rhp;
-
- if (cbflood_n_per_burst > 0 &&
- cbflood_inter_holdoff > 0 &&
- cbflood_intra_holdoff > 0 &&
- cur_ops->call &&
- cur_ops->cb_barrier) {
- rhp = vmalloc(array3_size(cbflood_n_burst,
- cbflood_n_per_burst,
- sizeof(*rhp)));
- err = !rhp;
- }
- if (err) {
- VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
- goto wait_for_stop;
- }
- VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
- do {
- schedule_timeout_interruptible(cbflood_inter_holdoff);
- atomic_long_inc(&n_cbfloods);
- WARN_ON(signal_pending(current));
- for (i = 0; i < cbflood_n_burst; i++) {
- for (j = 0; j < cbflood_n_per_burst; j++) {
- cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
- rcu_torture_cbflood_cb);
- }
- schedule_timeout_interruptible(cbflood_intra_holdoff);
- WARN_ON(signal_pending(current));
- }
- cur_ops->cb_barrier();
- stutter_wait("rcu_torture_cbflood");
- } while (!torture_must_stop());
- vfree(rhp);
-wait_for_stop:
- torture_kthread_stopping("rcu_torture_cbflood");
- return 0;
-}
-
/*
* RCU torture force-quiescent-state kthread. Repeatedly induces
* bursts of calls to force_quiescent_state(), increasing the probability
@@ -1457,11 +1398,10 @@ rcu_torture_stats_print(void)
n_rcu_torture_boosts,
atomic_long_read(&n_rcu_torture_timers));
torture_onoff_stats();
- pr_cont("barrier: %ld/%ld:%ld ",
+ pr_cont("barrier: %ld/%ld:%ld\n",
n_barrier_successes,
n_barrier_attempts,
n_rcu_torture_barrier_error);
- pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
if (atomic_read(&n_rcu_torture_mberror) != 0 ||
@@ -1674,8 +1614,90 @@ static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
}
-/* Carry out grace-period forward-progress testing. */
-static int rcu_torture_fwd_prog(void *args)
+/* State for continuous-flood RCU callbacks. */
+struct rcu_fwd_cb {
+ struct rcu_head rh;
+ struct rcu_fwd_cb *rfc_next;
+ int rfc_gps;
+};
+static DEFINE_SPINLOCK(rcu_fwd_lock);
+static struct rcu_fwd_cb *rcu_fwd_cb_head;
+static struct rcu_fwd_cb **rcu_fwd_cb_tail = &rcu_fwd_cb_head;
+static long n_launders_cb;
+static unsigned long rcu_fwd_startat;
+static bool rcu_fwd_emergency_stop;
+#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
+#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
+#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
+#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
+static long n_launders_hist[2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)];
+
+static void rcu_torture_fwd_cb_hist(void)
+{
+ int i;
+ int j;
+
+ for (i = ARRAY_SIZE(n_launders_hist) - 1; i > 0; i--)
+ if (n_launders_hist[i] > 0)
+ break;
+ pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
+ __func__, jiffies - rcu_fwd_startat);
+ for (j = 0; j <= i; j++)
+ pr_cont(" %ds/%d: %ld",
+ j + 1, FWD_CBS_HIST_DIV, n_launders_hist[j]);
+ pr_cont("\n");
+}
+
+/* Callback function for continuous-flood RCU callbacks. */
+static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
+{
+ unsigned long flags;
+ int i;
+ struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
+ struct rcu_fwd_cb **rfcpp;
+
+ rfcp->rfc_next = NULL;
+ rfcp->rfc_gps++;
+ spin_lock_irqsave(&rcu_fwd_lock, flags);
+ rfcpp = rcu_fwd_cb_tail;
+ rcu_fwd_cb_tail = &rfcp->rfc_next;
+ WRITE_ONCE(*rfcpp, rfcp);
+ WRITE_ONCE(n_launders_cb, n_launders_cb + 1);
+ i = ((jiffies - rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
+ if (i >= ARRAY_SIZE(n_launders_hist))
+ i = ARRAY_SIZE(n_launders_hist) - 1;
+ n_launders_hist[i]++;
+ spin_unlock_irqrestore(&rcu_fwd_lock, flags);
+}
+
+/*
+ * Free all callbacks on the rcu_fwd_cb_head list, either because the
+ * test is over or because we hit an OOM event.
+ */
+static unsigned long rcu_torture_fwd_prog_cbfree(void)
+{
+ unsigned long flags;
+ unsigned long freed = 0;
+ struct rcu_fwd_cb *rfcp;
+
+ for (;;) {
+ spin_lock_irqsave(&rcu_fwd_lock, flags);
+ rfcp = rcu_fwd_cb_head;
+ if (!rfcp)
+ break;
+ rcu_fwd_cb_head = rfcp->rfc_next;
+ if (!rcu_fwd_cb_head)
+ rcu_fwd_cb_tail = &rcu_fwd_cb_head;
+ spin_unlock_irqrestore(&rcu_fwd_lock, flags);
+ kfree(rfcp);
+ freed++;
+ }
+ spin_unlock_irqrestore(&rcu_fwd_lock, flags);
+ return freed;
+}
+
+/* Carry out need_resched()/cond_resched() forward-progress testing. */
+static void rcu_torture_fwd_prog_nr(int *tested, int *tested_tries)
{
unsigned long cver;
unsigned long dur;
@@ -1686,56 +1708,186 @@ static int rcu_torture_fwd_prog(void *args)
int sd4;
bool selfpropcb = false;
unsigned long stopat;
- int tested = 0;
- int tested_tries = 0;
static DEFINE_TORTURE_RANDOM(trs);
- VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
- if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
- set_user_nice(current, MAX_NICE);
if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
init_rcu_head_on_stack(&fcs.rh);
selfpropcb = true;
}
+
+ /* Tight loop containing cond_resched(). */
+ if (selfpropcb) {
+ WRITE_ONCE(fcs.stop, 0);
+ cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
+ }
+ cver = READ_ONCE(rcu_torture_current_version);
+ gps = cur_ops->get_gp_seq();
+ sd = cur_ops->stall_dur() + 1;
+ sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
+ dur = sd4 + torture_random(&trs) % (sd - sd4);
+ WRITE_ONCE(rcu_fwd_startat, jiffies);
+ stopat = rcu_fwd_startat + dur;
+ while (time_before(jiffies, stopat) &&
+ !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
+ idx = cur_ops->readlock();
+ udelay(10);
+ cur_ops->readunlock(idx);
+ if (!fwd_progress_need_resched || need_resched())
+ cond_resched();
+ }
+ (*tested_tries)++;
+ if (!time_before(jiffies, stopat) &&
+ !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
+ (*tested)++;
+ cver = READ_ONCE(rcu_torture_current_version) - cver;
+ gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
+ WARN_ON(!cver && gps < 2);
+ pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
+ }
+ if (selfpropcb) {
+ WRITE_ONCE(fcs.stop, 1);
+ cur_ops->sync(); /* Wait for running CB to complete. */
+ cur_ops->cb_barrier(); /* Wait for queued callbacks. */
+ }
+
+ if (selfpropcb) {
+ WARN_ON(READ_ONCE(fcs.stop) != 2);
+ destroy_rcu_head_on_stack(&fcs.rh);
+ }
+}
+
+/* Carry out call_rcu() forward-progress testing. */
+static void rcu_torture_fwd_prog_cr(void)
+{
+ unsigned long cver;
+ unsigned long gps;
+ int i;
+ long n_launders;
+ long n_launders_cb_snap;
+ long n_launders_sa;
+ long n_max_cbs;
+ long n_max_gps;
+ struct rcu_fwd_cb *rfcp;
+ struct rcu_fwd_cb *rfcpn;
+ unsigned long stopat;
+ unsigned long stoppedat;
+
+ if (READ_ONCE(rcu_fwd_emergency_stop))
+ return; /* Get out of the way quickly, no GP wait! */
+
+ /* Loop continuously posting RCU callbacks. */
+ WRITE_ONCE(rcu_fwd_cb_nodelay, true);
+ cur_ops->sync(); /* Later readers see above write. */
+ WRITE_ONCE(rcu_fwd_startat, jiffies);
+ stopat = rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
+ n_launders = 0;
+ n_launders_cb = 0;
+ n_launders_sa = 0;
+ n_max_cbs = 0;
+ n_max_gps = 0;
+ for (i = 0; i < ARRAY_SIZE(n_launders_hist); i++)
+ n_launders_hist[i] = 0;
+ cver = READ_ONCE(rcu_torture_current_version);
+ gps = cur_ops->get_gp_seq();
+ while (time_before(jiffies, stopat) &&
+ !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
+ rfcp = READ_ONCE(rcu_fwd_cb_head);
+ rfcpn = NULL;
+ if (rfcp)
+ rfcpn = READ_ONCE(rfcp->rfc_next);
+ if (rfcpn) {
+ if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
+ ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
+ break;
+ rcu_fwd_cb_head = rfcpn;
+ n_launders++;
+ n_launders_sa++;
+ } else {
+ rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
+ if (WARN_ON_ONCE(!rfcp)) {
+ schedule_timeout_interruptible(1);
+ continue;
+ }
+ n_max_cbs++;
+ n_launders_sa = 0;
+ rfcp->rfc_gps = 0;
+ }
+ cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
+ cond_resched();
+ }
+ stoppedat = jiffies;
+ n_launders_cb_snap = READ_ONCE(n_launders_cb);
+ cver = READ_ONCE(rcu_torture_current_version) - cver;
+ gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
+ cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
+ (void)rcu_torture_fwd_prog_cbfree();
+
+ WRITE_ONCE(rcu_fwd_cb_nodelay, false);
+ if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop)) {
+ WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
+ pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
+ __func__,
+ stoppedat - rcu_fwd_startat, jiffies - stoppedat,
+ n_launders + n_max_cbs - n_launders_cb_snap,
+ n_launders, n_launders_sa,
+ n_max_gps, n_max_cbs, cver, gps);
+ rcu_torture_fwd_cb_hist();
+ }
+}
+
+
+/*
+ * OOM notifier, but this only prints diagnostic information for the
+ * current forward-progress test.
+ */
+static int rcutorture_oom_notify(struct notifier_block *self,
+ unsigned long notused, void *nfreed)
+{
+ WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
+ __func__);
+ rcu_torture_fwd_cb_hist();
+ rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rcu_fwd_startat) / 2));
+ WRITE_ONCE(rcu_fwd_emergency_stop, true);
+ smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
+ pr_info("%s: Freed %lu RCU callbacks.\n",
+ __func__, rcu_torture_fwd_prog_cbfree());
+ rcu_barrier();
+ pr_info("%s: Freed %lu RCU callbacks.\n",
+ __func__, rcu_torture_fwd_prog_cbfree());
+ rcu_barrier();
+ pr_info("%s: Freed %lu RCU callbacks.\n",
+ __func__, rcu_torture_fwd_prog_cbfree());
+ smp_mb(); /* Frees before return to avoid redoing OOM. */
+ (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
+ pr_info("%s returning after OOM processing.\n", __func__);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rcutorture_oom_nb = {
+ .notifier_call = rcutorture_oom_notify
+};
+
+/* Carry out grace-period forward-progress testing. */
+static int rcu_torture_fwd_prog(void *args)
+{
+ int tested = 0;
+ int tested_tries = 0;
+
+ VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
+ rcu_bind_current_to_nocb();
+ if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
+ set_user_nice(current, MAX_NICE);
do {
schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
- if (selfpropcb) {
- WRITE_ONCE(fcs.stop, 0);
- cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
- }
- cver = READ_ONCE(rcu_torture_current_version);
- gps = cur_ops->get_gp_seq();
- sd = cur_ops->stall_dur() + 1;
- sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
- dur = sd4 + torture_random(&trs) % (sd - sd4);
- stopat = jiffies + dur;
- while (time_before(jiffies, stopat) && !torture_must_stop()) {
- idx = cur_ops->readlock();
- udelay(10);
- cur_ops->readunlock(idx);
- if (!fwd_progress_need_resched || need_resched())
- cond_resched();
- }
- tested_tries++;
- if (!time_before(jiffies, stopat) && !torture_must_stop()) {
- tested++;
- cver = READ_ONCE(rcu_torture_current_version) - cver;
- gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
- WARN_ON(!cver && gps < 2);
- pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
- }
- if (selfpropcb) {
- WRITE_ONCE(fcs.stop, 1);
- cur_ops->sync(); /* Wait for running CB to complete. */
- cur_ops->cb_barrier(); /* Wait for queued callbacks. */
- }
+ WRITE_ONCE(rcu_fwd_emergency_stop, false);
+ register_oom_notifier(&rcutorture_oom_nb);
+ rcu_torture_fwd_prog_nr(&tested, &tested_tries);
+ rcu_torture_fwd_prog_cr();
+ unregister_oom_notifier(&rcutorture_oom_nb);
+
/* Avoid slow periods, better to test when busy. */
stutter_wait("rcu_torture_fwd_prog");
} while (!torture_must_stop());
- if (selfpropcb) {
- WARN_ON(READ_ONCE(fcs.stop) != 2);
- destroy_rcu_head_on_stack(&fcs.rh);
- }
/* Short runs might not contain a valid forward-progress attempt. */
WARN_ON(!tested && tested_tries >= 5);
pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
@@ -1748,7 +1900,8 @@ static int __init rcu_torture_fwd_prog_init(void)
{
if (!fwd_progress)
return 0; /* Not requested, so don't do it. */
- if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
+ if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 ||
+ cur_ops == &rcu_busted_ops) {
VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
return 0;
}
@@ -1968,8 +2121,6 @@ rcu_torture_cleanup(void)
cur_ops->name, gp_seq, flags);
torture_stop_kthread(rcu_torture_stats, stats_task);
torture_stop_kthread(rcu_torture_fqs, fqs_task);
- for (i = 0; i < ncbflooders; i++)
- torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
if (rcu_torture_can_boost())
cpuhp_remove_state(rcutor_hp);
@@ -2252,24 +2403,6 @@ rcu_torture_init(void)
goto unwind;
if (object_debug)
rcu_test_debug_objects();
- if (cbflood_n_burst > 0) {
- /* Create the cbflood threads */
- ncbflooders = (num_online_cpus() + 3) / 4;
- cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
- GFP_KERNEL);
- if (!cbflood_task) {
- VERBOSE_TOROUT_ERRSTRING("out of memory");
- firsterr = -ENOMEM;
- goto unwind;
- }
- for (i = 0; i < ncbflooders; i++) {
- firsterr = torture_create_kthread(rcu_torture_cbflood,
- NULL,
- cbflood_task[i]);
- if (firsterr)
- goto unwind;
- }
- }
torture_init_end();
return 0;
diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c
index b46e6683f8c9..32dfd6522548 100644
--- a/kernel/rcu/srcutiny.c
+++ b/kernel/rcu/srcutiny.c
@@ -37,30 +37,30 @@ int rcu_scheduler_active __read_mostly;
static LIST_HEAD(srcu_boot_list);
static bool srcu_init_done;
-static int init_srcu_struct_fields(struct srcu_struct *sp)
+static int init_srcu_struct_fields(struct srcu_struct *ssp)
{
- sp->srcu_lock_nesting[0] = 0;
- sp->srcu_lock_nesting[1] = 0;
- init_swait_queue_head(&sp->srcu_wq);
- sp->srcu_cb_head = NULL;
- sp->srcu_cb_tail = &sp->srcu_cb_head;
- sp->srcu_gp_running = false;
- sp->srcu_gp_waiting = false;
- sp->srcu_idx = 0;
- INIT_WORK(&sp->srcu_work, srcu_drive_gp);
- INIT_LIST_HEAD(&sp->srcu_work.entry);
+ ssp->srcu_lock_nesting[0] = 0;
+ ssp->srcu_lock_nesting[1] = 0;
+ init_swait_queue_head(&ssp->srcu_wq);
+ ssp->srcu_cb_head = NULL;
+ ssp->srcu_cb_tail = &ssp->srcu_cb_head;
+ ssp->srcu_gp_running = false;
+ ssp->srcu_gp_waiting = false;
+ ssp->srcu_idx = 0;
+ INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
+ INIT_LIST_HEAD(&ssp->srcu_work.entry);
return 0;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
- lockdep_init_map(&sp->dep_map, name, key, 0);
- return init_srcu_struct_fields(sp);
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
+ return init_srcu_struct_fields(ssp);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -68,15 +68,15 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/*
* init_srcu_struct - initialize a sleep-RCU structure
- * @sp: structure to initialize.
+ * @ssp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
-int init_srcu_struct(struct srcu_struct *sp)
+int init_srcu_struct(struct srcu_struct *ssp)
{
- return init_srcu_struct_fields(sp);
+ return init_srcu_struct_fields(ssp);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -84,22 +84,22 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
/*
* cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @sp: structure to clean up.
+ * @ssp: structure to clean up.
*
* Must invoke this after you are finished using a given srcu_struct that
* was initialized via init_srcu_struct(), else you leak memory.
*/
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{
- WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
+ WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
if (quiesced)
- WARN_ON(work_pending(&sp->srcu_work));
+ WARN_ON(work_pending(&ssp->srcu_work));
else
- flush_work(&sp->srcu_work);
- WARN_ON(sp->srcu_gp_running);
- WARN_ON(sp->srcu_gp_waiting);
- WARN_ON(sp->srcu_cb_head);
- WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
+ flush_work(&ssp->srcu_work);
+ WARN_ON(ssp->srcu_gp_running);
+ WARN_ON(ssp->srcu_gp_waiting);
+ WARN_ON(ssp->srcu_cb_head);
+ WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
}
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@@ -107,13 +107,13 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* Removes the count for the old reader from the appropriate element of
* the srcu_struct.
*/
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
- int newval = sp->srcu_lock_nesting[idx] - 1;
+ int newval = ssp->srcu_lock_nesting[idx] - 1;
- WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
- if (!newval && READ_ONCE(sp->srcu_gp_waiting))
- swake_up_one(&sp->srcu_wq);
+ WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
+ if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
+ swake_up_one(&ssp->srcu_wq);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -127,24 +127,24 @@ void srcu_drive_gp(struct work_struct *wp)
int idx;
struct rcu_head *lh;
struct rcu_head *rhp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
- sp = container_of(wp, struct srcu_struct, srcu_work);
- if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
+ ssp = container_of(wp, struct srcu_struct, srcu_work);
+ if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
return; /* Already running or nothing to do. */
/* Remove recently arrived callbacks and wait for readers. */
- WRITE_ONCE(sp->srcu_gp_running, true);
+ WRITE_ONCE(ssp->srcu_gp_running, true);
local_irq_disable();
- lh = sp->srcu_cb_head;
- sp->srcu_cb_head = NULL;
- sp->srcu_cb_tail = &sp->srcu_cb_head;
+ lh = ssp->srcu_cb_head;
+ ssp->srcu_cb_head = NULL;
+ ssp->srcu_cb_tail = &ssp->srcu_cb_head;
local_irq_enable();
- idx = sp->srcu_idx;
- WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
- WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
- swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
- WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
+ idx = ssp->srcu_idx;
+ WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
+ WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
+ swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
+ WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
/* Invoke the callbacks we removed above. */
while (lh) {
@@ -161,9 +161,9 @@ void srcu_drive_gp(struct work_struct *wp)
* at interrupt level, but the ->srcu_gp_running checks will
* straighten that out.
*/
- WRITE_ONCE(sp->srcu_gp_running, false);
- if (READ_ONCE(sp->srcu_cb_head))
- schedule_work(&sp->srcu_work);
+ WRITE_ONCE(ssp->srcu_gp_running, false);
+ if (READ_ONCE(ssp->srcu_cb_head))
+ schedule_work(&ssp->srcu_work);
}
EXPORT_SYMBOL_GPL(srcu_drive_gp);
@@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(srcu_drive_gp);
* Enqueue an SRCU callback on the specified srcu_struct structure,
* initiating grace-period processing if it is not already running.
*/
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func)
{
unsigned long flags;
@@ -179,14 +179,14 @@ void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rhp->func = func;
rhp->next = NULL;
local_irq_save(flags);
- *sp->srcu_cb_tail = rhp;
- sp->srcu_cb_tail = &rhp->next;
+ *ssp->srcu_cb_tail = rhp;
+ ssp->srcu_cb_tail = &rhp->next;
local_irq_restore(flags);
- if (!READ_ONCE(sp->srcu_gp_running)) {
+ if (!READ_ONCE(ssp->srcu_gp_running)) {
if (likely(srcu_init_done))
- schedule_work(&sp->srcu_work);
- else if (list_empty(&sp->srcu_work.entry))
- list_add(&sp->srcu_work.entry, &srcu_boot_list);
+ schedule_work(&ssp->srcu_work);
+ else if (list_empty(&ssp->srcu_work.entry))
+ list_add(&ssp->srcu_work.entry, &srcu_boot_list);
}
}
EXPORT_SYMBOL_GPL(call_srcu);
@@ -194,13 +194,13 @@ EXPORT_SYMBOL_GPL(call_srcu);
/*
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
*/
-void synchronize_srcu(struct srcu_struct *sp)
+void synchronize_srcu(struct srcu_struct *ssp)
{
struct rcu_synchronize rs;
init_rcu_head_on_stack(&rs.head);
init_completion(&rs.completion);
- call_srcu(sp, &rs.head, wakeme_after_rcu);
+ call_srcu(ssp, &rs.head, wakeme_after_rcu);
wait_for_completion(&rs.completion);
destroy_rcu_head_on_stack(&rs.head);
}
@@ -219,13 +219,13 @@ void __init rcu_scheduler_starting(void)
*/
void __init srcu_init(void)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
- sp = list_first_entry(&srcu_boot_list,
+ ssp = list_first_entry(&srcu_boot_list,
struct srcu_struct, srcu_work.entry);
- list_del_init(&sp->srcu_work.entry);
- schedule_work(&sp->srcu_work);
+ list_del_init(&ssp->srcu_work.entry);
+ schedule_work(&ssp->srcu_work);
}
}
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index a8846ed7f352..3600d88d8956 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -56,7 +56,7 @@ static LIST_HEAD(srcu_boot_list);
static bool __read_mostly srcu_init_done;
static void srcu_invoke_callbacks(struct work_struct *work);
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
static void process_srcu(struct work_struct *work);
/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
@@ -92,7 +92,7 @@ do { \
* srcu_read_unlock() running against them. So if the is_static parameter
* is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
*/
-static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
+static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
{
int cpu;
int i;
@@ -103,13 +103,13 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
struct srcu_node *snp_first;
/* Work out the overall tree geometry. */
- sp->level[0] = &sp->node[0];
+ ssp->level[0] = &ssp->node[0];
for (i = 1; i < rcu_num_lvls; i++)
- sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
+ ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
rcu_init_levelspread(levelspread, num_rcu_lvl);
/* Each pass through this loop initializes one srcu_node structure. */
- srcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_init(&ACCESS_PRIVATE(snp, lock));
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
@@ -120,17 +120,17 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
snp->srcu_gp_seq_needed_exp = 0;
snp->grplo = -1;
snp->grphi = -1;
- if (snp == &sp->node[0]) {
+ if (snp == &ssp->node[0]) {
/* Root node, special case. */
snp->srcu_parent = NULL;
continue;
}
/* Non-root node. */
- if (snp == sp->level[level + 1])
+ if (snp == ssp->level[level + 1])
level++;
- snp->srcu_parent = sp->level[level - 1] +
- (snp - sp->level[level]) /
+ snp->srcu_parent = ssp->level[level - 1] +
+ (snp - ssp->level[level]) /
levelspread[level - 1];
}
@@ -141,14 +141,14 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
ARRAY_SIZE(sdp->srcu_unlock_count));
level = rcu_num_lvls - 1;
- snp_first = sp->level[level];
+ snp_first = ssp->level[level];
for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
- sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
- sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
+ sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
sdp->mynode = &snp_first[cpu / levelspread[level]];
for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
if (snp->grplo < 0)
@@ -157,7 +157,7 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
}
sdp->cpu = cpu;
INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
- sdp->sp = sp;
+ sdp->ssp = ssp;
sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
if (is_static)
continue;
@@ -176,35 +176,35 @@ static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
* parameter is passed through to init_srcu_struct_nodes(), and
* also tells us that ->sda has already been wired up to srcu_data.
*/
-static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
+static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
{
- mutex_init(&sp->srcu_cb_mutex);
- mutex_init(&sp->srcu_gp_mutex);
- sp->srcu_idx = 0;
- sp->srcu_gp_seq = 0;
- sp->srcu_barrier_seq = 0;
- mutex_init(&sp->srcu_barrier_mutex);
- atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
- INIT_DELAYED_WORK(&sp->work, process_srcu);
+ mutex_init(&ssp->srcu_cb_mutex);
+ mutex_init(&ssp->srcu_gp_mutex);
+ ssp->srcu_idx = 0;
+ ssp->srcu_gp_seq = 0;
+ ssp->srcu_barrier_seq = 0;
+ mutex_init(&ssp->srcu_barrier_mutex);
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
+ INIT_DELAYED_WORK(&ssp->work, process_srcu);
if (!is_static)
- sp->sda = alloc_percpu(struct srcu_data);
- init_srcu_struct_nodes(sp, is_static);
- sp->srcu_gp_seq_needed_exp = 0;
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
- return sp->sda ? 0 : -ENOMEM;
+ ssp->sda = alloc_percpu(struct srcu_data);
+ init_srcu_struct_nodes(ssp, is_static);
+ ssp->srcu_gp_seq_needed_exp = 0;
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
+ return ssp->sda ? 0 : -ENOMEM;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
struct lock_class_key *key)
{
/* Don't re-initialize a lock while it is held. */
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
- lockdep_init_map(&sp->dep_map, name, key, 0);
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
+ return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
@@ -212,16 +212,16 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
/**
* init_srcu_struct - initialize a sleep-RCU structure
- * @sp: structure to initialize.
+ * @ssp: structure to initialize.
*
* Must invoke this on a given srcu_struct before passing that srcu_struct
* to any other function. Each srcu_struct represents a separate domain
* of SRCU protection.
*/
-int init_srcu_struct(struct srcu_struct *sp)
+int init_srcu_struct(struct srcu_struct *ssp)
{
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
- return init_srcu_struct_fields(sp, false);
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
+ return init_srcu_struct_fields(ssp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
@@ -231,37 +231,37 @@ EXPORT_SYMBOL_GPL(init_srcu_struct);
* First-use initialization of statically allocated srcu_struct
* structure. Wiring up the combining tree is more than can be
* done with compile-time initialization, so this check is added
- * to each update-side SRCU primitive. Use sp->lock, which -is-
+ * to each update-side SRCU primitive. Use ssp->lock, which -is-
* compile-time initialized, to resolve races involving multiple
* CPUs trying to garner first-use privileges.
*/
-static void check_init_srcu_struct(struct srcu_struct *sp)
+static void check_init_srcu_struct(struct srcu_struct *ssp)
{
unsigned long flags;
/* The smp_load_acquire() pairs with the smp_store_release(). */
- if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
+ if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
- spin_lock_irqsave_rcu_node(sp, flags);
- if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
return;
}
- init_srcu_struct_fields(sp, true);
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ init_srcu_struct_fields(ssp, true);
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
* Returns approximate total of the readers' ->srcu_lock_count[] values
* for the rank of per-CPU counters specified by idx.
*/
-static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
}
@@ -272,13 +272,13 @@ static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
* Returns approximate total of the readers' ->srcu_unlock_count[] values
* for the rank of per-CPU counters specified by idx.
*/
-static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
}
@@ -289,11 +289,11 @@ static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
* Return true if the number of pre-existing readers is determined to
* be zero.
*/
-static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
+static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
{
unsigned long unlocks;
- unlocks = srcu_readers_unlock_idx(sp, idx);
+ unlocks = srcu_readers_unlock_idx(ssp, idx);
/*
* Make sure that a lock is always counted if the corresponding
@@ -329,25 +329,25 @@ static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
* of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
* especially on 64-bit systems.
*/
- return srcu_readers_lock_idx(sp, idx) == unlocks;
+ return srcu_readers_lock_idx(ssp, idx) == unlocks;
}
/**
* srcu_readers_active - returns true if there are readers. and false
* otherwise
- * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
+ * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
*
* Note that this is not an atomic primitive, and can therefore suffer
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
-static bool srcu_readers_active(struct srcu_struct *sp)
+static bool srcu_readers_active(struct srcu_struct *ssp)
{
int cpu;
unsigned long sum = 0;
for_each_possible_cpu(cpu) {
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
sum += READ_ONCE(cpuc->srcu_lock_count[0]);
sum += READ_ONCE(cpuc->srcu_lock_count[1]);
@@ -363,44 +363,44 @@ static bool srcu_readers_active(struct srcu_struct *sp)
* Return grace-period delay, zero if there are expedited grace
* periods pending, SRCU_INTERVAL otherwise.
*/
-static unsigned long srcu_get_delay(struct srcu_struct *sp)
+static unsigned long srcu_get_delay(struct srcu_struct *ssp)
{
- if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
- READ_ONCE(sp->srcu_gp_seq_needed_exp)))
+ if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
+ READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
return 0;
return SRCU_INTERVAL;
}
/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
+void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced)
{
int cpu;
- if (WARN_ON(!srcu_get_delay(sp)))
+ if (WARN_ON(!srcu_get_delay(ssp)))
return; /* Just leak it! */
- if (WARN_ON(srcu_readers_active(sp)))
+ if (WARN_ON(srcu_readers_active(ssp)))
return; /* Just leak it! */
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&sp->work)))
+ if (WARN_ON(delayed_work_pending(&ssp->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&sp->work);
+ flush_delayed_work(&ssp->work);
}
for_each_possible_cpu(cpu)
if (quiesced) {
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
+ if (WARN_ON(delayed_work_pending(&per_cpu_ptr(ssp->sda, cpu)->work)))
return; /* Just leak it! */
} else {
- flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
+ flush_delayed_work(&per_cpu_ptr(ssp->sda, cpu)->work);
}
- if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
- WARN_ON(srcu_readers_active(sp))) {
+ if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
+ WARN_ON(srcu_readers_active(ssp))) {
pr_info("%s: Active srcu_struct %p state: %d\n",
- __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
+ __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
return; /* Caller forgot to stop doing call_srcu()? */
}
- free_percpu(sp->sda);
- sp->sda = NULL;
+ free_percpu(ssp->sda);
+ ssp->sda = NULL;
}
EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
@@ -409,12 +409,12 @@ EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
* srcu_struct.
* Returns an index that must be passed to the matching srcu_read_unlock().
*/
-int __srcu_read_lock(struct srcu_struct *sp)
+int __srcu_read_lock(struct srcu_struct *ssp)
{
int idx;
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
- this_cpu_inc(sp->sda->srcu_lock_count[idx]);
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
+ this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
smp_mb(); /* B */ /* Avoid leaking the critical section. */
return idx;
}
@@ -425,10 +425,10 @@ EXPORT_SYMBOL_GPL(__srcu_read_lock);
* element of the srcu_struct. Note that this may well be a different
* CPU than that which was incremented by the corresponding srcu_read_lock().
*/
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
{
smp_mb(); /* C */ /* Avoid leaking the critical section. */
- this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
+ this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
}
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
@@ -444,20 +444,22 @@ EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
* Start an SRCU grace period.
*/
-static void srcu_gp_start(struct srcu_struct *sp)
+static void srcu_gp_start(struct srcu_struct *ssp)
{
- struct srcu_data *sdp = this_cpu_ptr(sp->sda);
+ struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
int state;
- lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
+ lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
+ spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
+ rcu_seq_current(&ssp->srcu_gp_seq));
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_snap(&ssp->srcu_gp_seq));
+ spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
- rcu_seq_start(&sp->srcu_gp_seq);
- state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ rcu_seq_start(&ssp->srcu_gp_seq);
+ state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
}
@@ -511,7 +513,7 @@ static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
* just-completed grace period, the one corresponding to idx. If possible,
* schedule this invocation on the corresponding CPUs.
*/
-static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
+static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long mask, unsigned long delay)
{
int cpu;
@@ -519,7 +521,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
if (!(mask & (1 << (cpu - snp->grplo))))
continue;
- srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
}
}
@@ -532,7 +534,7 @@ static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
* are initiating callback invocation. This allows the ->srcu_have_cbs[]
* array to have a finite number of elements.
*/
-static void srcu_gp_end(struct srcu_struct *sp)
+static void srcu_gp_end(struct srcu_struct *ssp)
{
unsigned long cbdelay;
bool cbs;
@@ -546,28 +548,28 @@ static void srcu_gp_end(struct srcu_struct *sp)
struct srcu_node *snp;
/* Prevent more than one additional grace period. */
- mutex_lock(&sp->srcu_cb_mutex);
+ mutex_lock(&ssp->srcu_cb_mutex);
/* End the current grace period. */
- spin_lock_irq_rcu_node(sp);
- idx = rcu_seq_state(sp->srcu_gp_seq);
+ spin_lock_irq_rcu_node(ssp);
+ idx = rcu_seq_state(ssp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
- cbdelay = srcu_get_delay(sp);
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
- rcu_seq_end(&sp->srcu_gp_seq);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
- sp->srcu_gp_seq_needed_exp = gpseq;
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
+ cbdelay = srcu_get_delay(ssp);
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
+ rcu_seq_end(&ssp->srcu_gp_seq);
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
+ ssp->srcu_gp_seq_needed_exp = gpseq;
+ spin_unlock_irq_rcu_node(ssp);
+ mutex_unlock(&ssp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
/* Initiate callback invocation as needed. */
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
- srcu_for_each_node_breadth_first(sp, snp) {
+ srcu_for_each_node_breadth_first(ssp, snp) {
spin_lock_irq_rcu_node(snp);
cbs = false;
- last_lvl = snp >= sp->level[rcu_num_lvls - 1];
+ last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
if (last_lvl)
cbs = snp->srcu_have_cbs[idx] == gpseq;
snp->srcu_have_cbs[idx] = gpseq;
@@ -578,12 +580,12 @@ static void srcu_gp_end(struct srcu_struct *sp)
snp->srcu_data_have_cbs[idx] = 0;
spin_unlock_irq_rcu_node(snp);
if (cbs)
- srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
+ srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
/* Occasionally prevent srcu_data counter wrap. */
if (!(gpseq & counter_wrap_check) && last_lvl)
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irqsave_rcu_node(sdp, flags);
if (ULONG_CMP_GE(gpseq,
sdp->srcu_gp_seq_needed + 100))
@@ -596,18 +598,18 @@ static void srcu_gp_end(struct srcu_struct *sp)
}
/* Callback initiation done, allow grace periods after next. */
- mutex_unlock(&sp->srcu_cb_mutex);
+ mutex_unlock(&ssp->srcu_cb_mutex);
/* Start a new grace period if needed. */
- spin_lock_irq_rcu_node(sp);
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
+ spin_lock_irq_rcu_node(ssp);
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
if (!rcu_seq_state(gpseq) &&
- ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
- srcu_reschedule(sp, 0);
+ ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
+ srcu_gp_start(ssp);
+ spin_unlock_irq_rcu_node(ssp);
+ srcu_reschedule(ssp, 0);
} else {
- spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(ssp);
}
}
@@ -618,13 +620,13 @@ static void srcu_gp_end(struct srcu_struct *sp)
* but without expediting. To start a completely new grace period,
* whether expedited or not, use srcu_funnel_gp_start() instead.
*/
-static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
+static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
unsigned long s)
{
unsigned long flags;
for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
spin_lock_irqsave_rcu_node(snp, flags);
@@ -635,10 +637,10 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
spin_unlock_irqrestore_rcu_node(snp, flags);
}
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
+ ssp->srcu_gp_seq_needed_exp = s;
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
@@ -651,7 +653,7 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
* Note that this function also does the work of srcu_funnel_exp_start(),
* in some cases by directly invoking it.
*/
-static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
+static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
unsigned long s, bool do_norm)
{
unsigned long flags;
@@ -661,7 +663,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
/* Each pass through the loop does one level of the srcu_node tree. */
for (; snp != NULL; snp = snp->srcu_parent) {
- if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
spin_lock_irqsave_rcu_node(snp, flags);
if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
@@ -676,7 +678,7 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
return;
}
if (!do_norm)
- srcu_funnel_exp_start(sp, snp, s);
+ srcu_funnel_exp_start(ssp, snp, s);
return;
}
snp->srcu_have_cbs[idx] = s;
@@ -688,29 +690,29 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
}
/* Top of tree, must ensure the grace period will be started. */
- spin_lock_irqsave_rcu_node(sp, flags);
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
+ spin_lock_irqsave_rcu_node(ssp, flags);
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
* acquire setting up for initialization.
*/
- smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
+ smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
}
- if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
- sp->srcu_gp_seq_needed_exp = s;
+ if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
+ ssp->srcu_gp_seq_needed_exp = s;
/* If grace period not already done and none in progress, start it. */
- if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
- rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
- srcu_gp_start(sp);
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
+ rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
+ srcu_gp_start(ssp);
if (likely(srcu_init_done))
- queue_delayed_work(rcu_gp_wq, &sp->work,
- srcu_get_delay(sp));
- else if (list_empty(&sp->work.work.entry))
- list_add(&sp->work.work.entry, &srcu_boot_list);
+ queue_delayed_work(rcu_gp_wq, &ssp->work,
+ srcu_get_delay(ssp));
+ else if (list_empty(&ssp->work.work.entry))
+ list_add(&ssp->work.work.entry, &srcu_boot_list);
}
- spin_unlock_irqrestore_rcu_node(sp, flags);
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
}
/*
@@ -718,12 +720,12 @@ static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
* loop an additional time if there is an expedited grace period pending.
* The caller must ensure that ->srcu_idx is not changed while checking.
*/
-static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
+static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
{
for (;;) {
- if (srcu_readers_active_idx_check(sp, idx))
+ if (srcu_readers_active_idx_check(ssp, idx))
return true;
- if (--trycount + !srcu_get_delay(sp) <= 0)
+ if (--trycount + !srcu_get_delay(ssp) <= 0)
return false;
udelay(SRCU_RETRY_CHECK_DELAY);
}
@@ -734,7 +736,7 @@ static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
* use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
* us to wait for pre-existing readers in a starvation-free manner.
*/
-static void srcu_flip(struct srcu_struct *sp)
+static void srcu_flip(struct srcu_struct *ssp)
{
/*
* Ensure that if this updater saw a given reader's increment
@@ -746,7 +748,7 @@ static void srcu_flip(struct srcu_struct *sp)
*/
smp_mb(); /* E */ /* Pairs with B and C. */
- WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
/*
* Ensure that if the updater misses an __srcu_read_unlock()
@@ -779,7 +781,7 @@ static void srcu_flip(struct srcu_struct *sp)
* negligible when amoritized over that time period, and the extra latency
* of a needlessly non-expedited grace period is similarly negligible.
*/
-static bool srcu_might_be_idle(struct srcu_struct *sp)
+static bool srcu_might_be_idle(struct srcu_struct *ssp)
{
unsigned long curseq;
unsigned long flags;
@@ -788,7 +790,7 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* If the local srcu_data structure has callbacks, not idle. */
local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
+ sdp = this_cpu_ptr(ssp->sda);
if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
local_irq_restore(flags);
return false; /* Callbacks already present, so not idle. */
@@ -804,17 +806,17 @@ static bool srcu_might_be_idle(struct srcu_struct *sp)
/* First, see if enough time has passed since the last GP. */
t = ktime_get_mono_fast_ns();
if (exp_holdoff == 0 ||
- time_in_range_open(t, sp->srcu_last_gp_end,
- sp->srcu_last_gp_end + exp_holdoff))
+ time_in_range_open(t, ssp->srcu_last_gp_end,
+ ssp->srcu_last_gp_end + exp_holdoff))
return false; /* Too soon after last GP. */
/* Next, check for probable idleness. */
- curseq = rcu_seq_current(&sp->srcu_gp_seq);
+ curseq = rcu_seq_current(&ssp->srcu_gp_seq);
smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
- if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
+ if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
return false; /* Grace period in progress, so not idle. */
smp_mb(); /* Order ->srcu_gp_seq with prior access. */
- if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
+ if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
return false; /* GP # changed, so not idle. */
return true; /* With reasonable probability, idle! */
}
@@ -854,16 +856,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
* srcu_read_lock(), and srcu_read_unlock() that are all passed the same
* srcu_struct structure.
*/
-void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func, bool do_norm)
{
unsigned long flags;
+ int idx;
bool needexp = false;
bool needgp = false;
unsigned long s;
struct srcu_data *sdp;
- check_init_srcu_struct(sp);
+ check_init_srcu_struct(ssp);
if (debug_rcu_head_queue(rhp)) {
/* Probable double call_srcu(), so leak the callback. */
WRITE_ONCE(rhp->func, srcu_leak_callback);
@@ -871,13 +874,14 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
return;
}
rhp->func = func;
+ idx = srcu_read_lock(ssp);
local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
+ sdp = this_cpu_ptr(ssp->sda);
spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- s = rcu_seq_snap(&sp->srcu_gp_seq);
+ rcu_seq_current(&ssp->srcu_gp_seq));
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
sdp->srcu_gp_seq_needed = s;
@@ -889,14 +893,15 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
}
spin_unlock_irqrestore_rcu_node(sdp, flags);
if (needgp)
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
+ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
else if (needexp)
- srcu_funnel_exp_start(sp, sdp->mynode, s);
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
+ srcu_read_unlock(ssp, idx);
}
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
- * @sp: srcu_struct in queue the callback
+ * @ssp: srcu_struct in queue the callback
* @rhp: structure to be used for queueing the SRCU callback.
* @func: function to be invoked after the SRCU grace period
*
@@ -911,21 +916,21 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
* The callback will be invoked from process context, but must nevertheless
* be fast and must not block.
*/
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
rcu_callback_t func)
{
- __call_srcu(sp, rhp, func, true);
+ __call_srcu(ssp, rhp, func, true);
}
EXPORT_SYMBOL_GPL(call_srcu);
/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
-static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
+static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
{
struct rcu_synchronize rcu;
- RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
+ RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
@@ -934,10 +939,10 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
might_sleep();
- check_init_srcu_struct(sp);
+ check_init_srcu_struct(ssp);
init_completion(&rcu.completion);
init_rcu_head_on_stack(&rcu.head);
- __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
+ __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
@@ -953,7 +958,7 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
/**
* synchronize_srcu_expedited - Brute-force SRCU grace period
- * @sp: srcu_struct with which to synchronize.
+ * @ssp: srcu_struct with which to synchronize.
*
* Wait for an SRCU grace period to elapse, but be more aggressive about
* spinning rather than blocking when waiting.
@@ -961,15 +966,15 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
* Note that synchronize_srcu_expedited() has the same deadlock and
* memory-ordering properties as does synchronize_srcu().
*/
-void synchronize_srcu_expedited(struct srcu_struct *sp)
+void synchronize_srcu_expedited(struct srcu_struct *ssp)
{
- __synchronize_srcu(sp, rcu_gp_is_normal());
+ __synchronize_srcu(ssp, rcu_gp_is_normal());
}
EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
/**
* synchronize_srcu - wait for prior SRCU read-side critical-section completion
- * @sp: srcu_struct with which to synchronize.
+ * @ssp: srcu_struct with which to synchronize.
*
* Wait for the count to drain to zero of both indexes. To avoid the
* possible starvation of synchronize_srcu(), it waits for the count of
@@ -1011,12 +1016,12 @@ EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
* SRCU must also provide it. Note that detecting idleness is heuristic
* and subject to both false positives and negatives.
*/
-void synchronize_srcu(struct srcu_struct *sp)
+void synchronize_srcu(struct srcu_struct *ssp)
{
- if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
- synchronize_srcu_expedited(sp);
+ if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
+ synchronize_srcu_expedited(ssp);
else
- __synchronize_srcu(sp, true);
+ __synchronize_srcu(ssp, true);
}
EXPORT_SYMBOL_GPL(synchronize_srcu);
@@ -1026,36 +1031,36 @@ EXPORT_SYMBOL_GPL(synchronize_srcu);
static void srcu_barrier_cb(struct rcu_head *rhp)
{
struct srcu_data *sdp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
- sp = sdp->sp;
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
+ ssp = sdp->ssp;
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
+ complete(&ssp->srcu_barrier_completion);
}
/**
* srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
- * @sp: srcu_struct on which to wait for in-flight callbacks.
+ * @ssp: srcu_struct on which to wait for in-flight callbacks.
*/
-void srcu_barrier(struct srcu_struct *sp)
+void srcu_barrier(struct srcu_struct *ssp)
{
int cpu;
struct srcu_data *sdp;
- unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
+ unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
- check_init_srcu_struct(sp);
- mutex_lock(&sp->srcu_barrier_mutex);
- if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
+ check_init_srcu_struct(ssp);
+ mutex_lock(&ssp->srcu_barrier_mutex);
+ if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
smp_mb(); /* Force ordering following return. */
- mutex_unlock(&sp->srcu_barrier_mutex);
+ mutex_unlock(&ssp->srcu_barrier_mutex);
return; /* Someone else did our work for us. */
}
- rcu_seq_start(&sp->srcu_barrier_seq);
- init_completion(&sp->srcu_barrier_completion);
+ rcu_seq_start(&ssp->srcu_barrier_seq);
+ init_completion(&ssp->srcu_barrier_completion);
/* Initial count prevents reaching zero until all CBs are posted. */
- atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
/*
* Each pass through this loop enqueues a callback, but only
@@ -1066,39 +1071,39 @@ void srcu_barrier(struct srcu_struct *sp)
* grace period as the last callback already in the queue.
*/
for_each_possible_cpu(cpu) {
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
spin_lock_irq_rcu_node(sdp);
- atomic_inc(&sp->srcu_barrier_cpu_cnt);
+ atomic_inc(&ssp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
&sdp->srcu_barrier_head, 0)) {
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
- atomic_dec(&sp->srcu_barrier_cpu_cnt);
+ atomic_dec(&ssp->srcu_barrier_cpu_cnt);
}
spin_unlock_irq_rcu_node(sdp);
}
/* Remove the initial count, at which point reaching zero can happen. */
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
- complete(&sp->srcu_barrier_completion);
- wait_for_completion(&sp->srcu_barrier_completion);
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
+ complete(&ssp->srcu_barrier_completion);
+ wait_for_completion(&ssp->srcu_barrier_completion);
- rcu_seq_end(&sp->srcu_barrier_seq);
- mutex_unlock(&sp->srcu_barrier_mutex);
+ rcu_seq_end(&ssp->srcu_barrier_seq);
+ mutex_unlock(&ssp->srcu_barrier_mutex);
}
EXPORT_SYMBOL_GPL(srcu_barrier);
/**
* srcu_batches_completed - return batches completed.
- * @sp: srcu_struct on which to report batch completion.
+ * @ssp: srcu_struct on which to report batch completion.
*
* Report the number of batches, correlated with, but not necessarily
* precisely the same as, the number of grace periods that have elapsed.
*/
-unsigned long srcu_batches_completed(struct srcu_struct *sp)
+unsigned long srcu_batches_completed(struct srcu_struct *ssp)
{
- return sp->srcu_idx;
+ return ssp->srcu_idx;
}
EXPORT_SYMBOL_GPL(srcu_batches_completed);
@@ -1107,11 +1112,11 @@ EXPORT_SYMBOL_GPL(srcu_batches_completed);
* to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
* completed in that state.
*/
-static void srcu_advance_state(struct srcu_struct *sp)
+static void srcu_advance_state(struct srcu_struct *ssp)
{
int idx;
- mutex_lock(&sp->srcu_gp_mutex);
+ mutex_lock(&ssp->srcu_gp_mutex);
/*
* Because readers might be delayed for an extended period after
@@ -1123,47 +1128,47 @@ static void srcu_advance_state(struct srcu_struct *sp)
* The load-acquire ensures that we see the accesses performed
* by the prior grace period.
*/
- idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
+ idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
- spin_unlock_irq_rcu_node(sp);
- mutex_unlock(&sp->srcu_gp_mutex);
+ spin_lock_irq_rcu_node(ssp);
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
+ WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
+ spin_unlock_irq_rcu_node(ssp);
+ mutex_unlock(&ssp->srcu_gp_mutex);
return;
}
- idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
+ idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
if (idx == SRCU_STATE_IDLE)
- srcu_gp_start(sp);
- spin_unlock_irq_rcu_node(sp);
+ srcu_gp_start(ssp);
+ spin_unlock_irq_rcu_node(ssp);
if (idx != SRCU_STATE_IDLE) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
}
}
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 1)) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
+ idx = 1 ^ (ssp->srcu_idx & 1);
+ if (!try_check_zero(ssp, idx, 1)) {
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */
}
- srcu_flip(sp);
- rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
+ srcu_flip(ssp);
+ rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
}
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
/*
* SRCU read-side critical sections are normally short,
* so check at least twice in quick succession after a flip.
*/
- idx = 1 ^ (sp->srcu_idx & 1);
- if (!try_check_zero(sp, idx, 2)) {
- mutex_unlock(&sp->srcu_gp_mutex);
+ idx = 1 ^ (ssp->srcu_idx & 1);
+ if (!try_check_zero(ssp, idx, 2)) {
+ mutex_unlock(&ssp->srcu_gp_mutex);
return; /* readers present, retry later. */
}
- srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
+ srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
}
}
@@ -1179,14 +1184,14 @@ static void srcu_invoke_callbacks(struct work_struct *work)
struct rcu_cblist ready_cbs;
struct rcu_head *rhp;
struct srcu_data *sdp;
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
sdp = container_of(work, struct srcu_data, work.work);
- sp = sdp->sp;
+ ssp = sdp->ssp;
rcu_cblist_init(&ready_cbs);
spin_lock_irq_rcu_node(sdp);
rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
+ rcu_seq_current(&ssp->srcu_gp_seq));
if (sdp->srcu_cblist_invoking ||
!rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
spin_unlock_irq_rcu_node(sdp);
@@ -1212,7 +1217,7 @@ static void srcu_invoke_callbacks(struct work_struct *work)
spin_lock_irq_rcu_node(sdp);
rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
(void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
- rcu_seq_snap(&sp->srcu_gp_seq));
+ rcu_seq_snap(&ssp->srcu_gp_seq));
sdp->srcu_cblist_invoking = false;
more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
spin_unlock_irq_rcu_node(sdp);
@@ -1224,24 +1229,24 @@ static void srcu_invoke_callbacks(struct work_struct *work)
* Finished one round of SRCU grace period. Start another if there are
* more SRCU callbacks queued, otherwise put SRCU into not-running state.
*/
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
{
bool pushgp = true;
- spin_lock_irq_rcu_node(sp);
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
- if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
+ spin_lock_irq_rcu_node(ssp);
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
+ if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
pushgp = false;
}
- } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
+ } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
/* Outstanding request and no GP. Start one. */
- srcu_gp_start(sp);
+ srcu_gp_start(ssp);
}
- spin_unlock_irq_rcu_node(sp);
+ spin_unlock_irq_rcu_node(ssp);
if (pushgp)
- queue_delayed_work(rcu_gp_wq, &sp->work, delay);
+ queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
}
/*
@@ -1249,41 +1254,41 @@ static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
*/
static void process_srcu(struct work_struct *work)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
- sp = container_of(work, struct srcu_struct, work.work);
+ ssp = container_of(work, struct srcu_struct, work.work);
- srcu_advance_state(sp);
- srcu_reschedule(sp, srcu_get_delay(sp));
+ srcu_advance_state(ssp);
+ srcu_reschedule(ssp, srcu_get_delay(ssp));
}
void srcutorture_get_gp_data(enum rcutorture_type test_type,
- struct srcu_struct *sp, int *flags,
+ struct srcu_struct *ssp, int *flags,
unsigned long *gp_seq)
{
if (test_type != SRCU_FLAVOR)
return;
*flags = 0;
- *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
+ *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
}
EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
{
int cpu;
int idx;
unsigned long s0 = 0, s1 = 0;
- idx = sp->srcu_idx & 0x1;
+ idx = ssp->srcu_idx & 0x1;
pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
- tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
+ tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
for_each_possible_cpu(cpu) {
unsigned long l0, l1;
unsigned long u0, u1;
long c0, c1;
struct srcu_data *sdp;
- sdp = per_cpu_ptr(sp->sda, cpu);
+ sdp = per_cpu_ptr(ssp->sda, cpu);
u0 = sdp->srcu_unlock_count[!idx];
u1 = sdp->srcu_unlock_count[idx];
@@ -1318,14 +1323,14 @@ early_initcall(srcu_bootup_announce);
void __init srcu_init(void)
{
- struct srcu_struct *sp;
+ struct srcu_struct *ssp;
srcu_init_done = true;
while (!list_empty(&srcu_boot_list)) {
- sp = list_first_entry(&srcu_boot_list, struct srcu_struct,
+ ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
work.work.entry);
- check_init_srcu_struct(sp);
- list_del_init(&sp->work.work.entry);
- queue_work(rcu_gp_wq, &sp->work.work);
+ check_init_srcu_struct(ssp);
+ list_del_init(&ssp->work.work.entry);
+ queue_work(rcu_gp_wq, &ssp->work.work);
}
}
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 3f943efcf61c..be10036fa621 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -44,15 +44,15 @@ static const struct {
__INIT_HELD(rcu_read_lock_held)
},
[RCU_SCHED_SYNC] = {
- .sync = synchronize_sched,
- .call = call_rcu_sched,
- .wait = rcu_barrier_sched,
+ .sync = synchronize_rcu,
+ .call = call_rcu,
+ .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_sched_held)
},
[RCU_BH_SYNC] = {
- .sync = synchronize_rcu_bh,
- .call = call_rcu_bh,
- .wait = rcu_barrier_bh,
+ .sync = synchronize_rcu,
+ .call = call_rcu,
+ .wait = rcu_barrier,
__INIT_HELD(rcu_read_lock_bh_held)
},
};
@@ -125,8 +125,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
rsp->gp_state = GP_PENDING;
spin_unlock_irq(&rsp->rss_lock);
- BUG_ON(need_wait && need_sync);
-
+ WARN_ON_ONCE(need_wait && need_sync);
if (need_sync) {
gp_ops[rsp->gp_type].sync();
rsp->gp_state = GP_PASSED;
@@ -139,7 +138,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* Nobody has yet been allowed the 'fast' path and thus we can
* avoid doing any sync(). The callback will get 'dropped'.
*/
- BUG_ON(rsp->gp_state != GP_PASSED);
+ WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
}
}
@@ -166,8 +165,8 @@ static void rcu_sync_func(struct rcu_head *rhp)
struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
unsigned long flags;
- BUG_ON(rsp->gp_state != GP_PASSED);
- BUG_ON(rsp->cb_state == CB_IDLE);
+ WARN_ON_ONCE(rsp->gp_state != GP_PASSED);
+ WARN_ON_ONCE(rsp->cb_state == CB_IDLE);
spin_lock_irqsave(&rsp->rss_lock, flags);
if (rsp->gp_count) {
@@ -225,7 +224,7 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
{
int cb_state;
- BUG_ON(rsp->gp_count);
+ WARN_ON_ONCE(rsp->gp_count);
spin_lock_irq(&rsp->rss_lock);
if (rsp->cb_state == CB_REPLAY)
@@ -235,6 +234,6 @@ void rcu_sync_dtor(struct rcu_sync *rsp)
if (cb_state != CB_IDLE) {
gp_ops[rsp->gp_type].wait();
- BUG_ON(rsp->cb_state != CB_IDLE);
+ WARN_ON_ONCE(rsp->cb_state != CB_IDLE);
}
}
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 121f833acd04..9180158756d2 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -207,6 +207,19 @@ static int rcu_gp_in_progress(void)
return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
}
+/*
+ * Return the number of callbacks queued on the specified CPU.
+ * Handles both the nocbs and normal cases.
+ */
+static long rcu_get_n_cbs_cpu(int cpu)
+{
+ struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+ if (rcu_segcblist_is_enabled(&rdp->cblist)) /* Online normal CPU? */
+ return rcu_segcblist_n_cbs(&rdp->cblist);
+ return rcu_get_n_cbs_nocb_cpu(rdp); /* Works for offline, too. */
+}
+
void rcu_softirq_qs(void)
{
rcu_qs();
@@ -500,16 +513,29 @@ void rcu_force_quiescent_state(void)
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
+ * Convert a ->gp_state value to a character string.
+ */
+static const char *gp_state_getname(short gs)
+{
+ if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
+ return "???";
+ return gp_state_names[gs];
+}
+
+/*
* Show the state of the grace-period kthreads.
*/
void show_rcu_gp_kthreads(void)
{
int cpu;
+ unsigned long j;
struct rcu_data *rdp;
struct rcu_node *rnp;
- pr_info("%s: wait state: %d ->state: %#lx\n", rcu_state.name,
- rcu_state.gp_state, rcu_state.gp_kthread->state);
+ j = jiffies - READ_ONCE(rcu_state.gp_activity);
+ pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %ld\n",
+ rcu_state.name, gp_state_getname(rcu_state.gp_state),
+ rcu_state.gp_state, rcu_state.gp_kthread->state, j);
rcu_for_each_node_breadth_first(rnp) {
if (ULONG_CMP_GE(rcu_state.gp_seq, rnp->gp_seq_needed))
continue;
@@ -891,12 +917,12 @@ void rcu_irq_enter_irqson(void)
}
/**
- * rcu_is_watching - see if RCU thinks that the current CPU is idle
+ * rcu_is_watching - see if RCU thinks that the current CPU is not idle
*
* Return true if RCU is watching the running CPU, which means that this
* CPU can safely enter RCU read-side critical sections. In other words,
- * if the current CPU is in its idle loop and is neither in an interrupt
- * or NMI handler, return true.
+ * if the current CPU is not in its idle loop or is in an interrupt or
+ * NMI handler, return true.
*/
bool notrace rcu_is_watching(void)
{
@@ -1143,16 +1169,6 @@ static void record_gp_stall_check_time(void)
}
/*
- * Convert a ->gp_state value to a character string.
- */
-static const char *gp_state_getname(short gs)
-{
- if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
- return "???";
- return gp_state_names[gs];
-}
-
-/*
* Complain about starvation of grace-period kthread.
*/
static void rcu_check_gp_kthread_starvation(void)
@@ -1262,8 +1278,7 @@ static void print_other_cpu_stall(unsigned long gp_seq)
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
- totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
- cpu)->cblist);
+ totqlen += rcu_get_n_cbs_cpu(cpu);
pr_cont("(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
smp_processor_id(), (long)(jiffies - rcu_state.gp_start),
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
@@ -1323,8 +1338,7 @@ static void print_cpu_stall(void)
raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
print_cpu_stall_info_end();
for_each_possible_cpu(cpu)
- totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(&rcu_data,
- cpu)->cblist);
+ totqlen += rcu_get_n_cbs_cpu(cpu);
pr_cont(" (t=%lu jiffies g=%ld q=%lu)\n",
jiffies - rcu_state.gp_start,
(long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
@@ -1986,7 +2000,8 @@ static void rcu_gp_cleanup(void)
WRITE_ONCE(rcu_state.gp_activity, jiffies);
raw_spin_lock_irq_rcu_node(rnp);
- gp_duration = jiffies - rcu_state.gp_start;
+ rcu_state.gp_end = jiffies;
+ gp_duration = rcu_state.gp_end - rcu_state.gp_start;
if (gp_duration > rcu_state.gp_max)
rcu_state.gp_max = gp_duration;
@@ -2032,9 +2047,9 @@ static void rcu_gp_cleanup(void)
rnp = rcu_get_root();
raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
- /* Declare grace period done. */
- rcu_seq_end(&rcu_state.gp_seq);
+ /* Declare grace period done, trace first to use old GP number. */
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
+ rcu_seq_end(&rcu_state.gp_seq);
rcu_state.gp_state = RCU_GP_IDLE;
/* Check for GP requests since above loop. */
rdp = this_cpu_ptr(&rcu_data);
@@ -2600,10 +2615,10 @@ static void force_quiescent_state(void)
* This function checks for grace-period requests that fail to motivate
* RCU to come out of its idle mode.
*/
-static void
-rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
+void
+rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
+ const unsigned long gpssdelay)
{
- const unsigned long gpssdelay = rcu_jiffies_till_stall_check() * HZ;
unsigned long flags;
unsigned long j;
struct rcu_node *rnp_root = rcu_get_root();
@@ -2655,6 +2670,48 @@ rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp)
}
/*
+ * Do a forward-progress check for rcutorture. This is normally invoked
+ * due to an OOM event. The argument "j" gives the time period during
+ * which rcutorture would like progress to have been made.
+ */
+void rcu_fwd_progress_check(unsigned long j)
+{
+ unsigned long cbs;
+ int cpu;
+ unsigned long max_cbs = 0;
+ int max_cpu = -1;
+ struct rcu_data *rdp;
+
+ if (rcu_gp_in_progress()) {
+ pr_info("%s: GP age %lu jiffies\n",
+ __func__, jiffies - rcu_state.gp_start);
+ show_rcu_gp_kthreads();
+ } else {
+ pr_info("%s: Last GP end %lu jiffies ago\n",
+ __func__, jiffies - rcu_state.gp_end);
+ preempt_disable();
+ rdp = this_cpu_ptr(&rcu_data);
+ rcu_check_gp_start_stall(rdp->mynode, rdp, j);
+ preempt_enable();
+ }
+ for_each_possible_cpu(cpu) {
+ cbs = rcu_get_n_cbs_cpu(cpu);
+ if (!cbs)
+ continue;
+ if (max_cpu < 0)
+ pr_info("%s: callbacks", __func__);
+ pr_cont(" %d: %lu", cpu, cbs);
+ if (cbs <= max_cbs)
+ continue;
+ max_cbs = cbs;
+ max_cpu = cpu;
+ }
+ if (max_cpu >= 0)
+ pr_cont("\n");
+}
+EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
+
+/*
* This does the RCU core processing work for the specified rcu_data
* structures. This may be called only from the CPU to whom the rdp
* belongs.
@@ -2690,7 +2747,7 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
local_irq_restore(flags);
}
- rcu_check_gp_start_stall(rnp, rdp);
+ rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
/* If there are callbacks ready, invoke them. */
if (rcu_segcblist_ready_cbs(&rdp->cblist))
@@ -2826,7 +2883,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, int cpu, bool lazy)
* Very early boot, before rcu_init(). Initialize if needed
* and then drop through to queue the callback.
*/
- BUG_ON(cpu != -1);
+ WARN_ON_ONCE(cpu != -1);
WARN_ON_ONCE(!rcu_is_watching());
if (rcu_segcblist_empty(&rdp->cblist))
rcu_segcblist_init(&rdp->cblist);
@@ -3485,7 +3542,8 @@ static int __init rcu_spawn_gp_kthread(void)
rcu_scheduler_fully_active = 1;
t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
rnp = rcu_get_root();
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rcu_state.gp_kthread = t;
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index 703e19ff532d..d90b02b53c0e 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -57,7 +57,7 @@ struct rcu_node {
/* some rcu_state fields as well as */
/* following. */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq. */
- unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */
+ unsigned long gp_seq_needed; /* Track furthest future GP request. */
unsigned long completedqs; /* All QSes done for this node. */
unsigned long qsmask; /* CPUs or groups that need to switch in */
/* order for current grace period to proceed.*/
@@ -163,7 +163,7 @@ union rcu_noqs {
struct rcu_data {
/* 1) quiescent-state and grace-period handling : */
unsigned long gp_seq; /* Track rsp->rcu_gp_seq counter. */
- unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed ctr. */
+ unsigned long gp_seq_needed; /* Track furthest future GP request. */
union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */
bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
@@ -328,6 +328,8 @@ struct rcu_state {
/* force_quiescent_state(). */
unsigned long gp_start; /* Time at which GP started, */
/* but in jiffies. */
+ unsigned long gp_end; /* Time last GP ended, again */
+ /* in jiffies. */
unsigned long gp_activity; /* Time of last GP kthread */
/* activity in jiffies. */
unsigned long gp_req_activity; /* Time of last GP request */
@@ -398,17 +400,6 @@ static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;
#define RCU_NAME rcu_name
#endif /* #else #ifdef CONFIG_TRACING */
-/*
- * RCU implementation internal declarations:
- */
-extern struct rcu_state rcu_sched_state;
-
-extern struct rcu_state rcu_bh_state;
-
-#ifdef CONFIG_PREEMPT_RCU
-extern struct rcu_state rcu_preempt_state;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-
int rcu_dynticks_snap(struct rcu_data *rdp);
#ifdef CONFIG_RCU_BOOST
@@ -466,6 +457,7 @@ static void __init rcu_spawn_nocb_kthreads(void);
static void __init rcu_organize_nocb_kthreads(void);
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool init_nocb_callback_list(struct rcu_data *rdp);
+static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp);
static void rcu_bind_gp_kthread(void);
static bool rcu_nohz_full_cpu(void);
static void rcu_dynticks_task_enter(void);
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index 8d18c1014e2b..928fe5893a57 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -450,10 +450,12 @@ static void sync_rcu_exp_select_cpus(smp_call_func_t func)
}
INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
preempt_disable();
- cpu = cpumask_next(rnp->grplo - 1, cpu_online_mask);
+ cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
/* If all offline, queue the work on an unbound CPU. */
- if (unlikely(cpu > rnp->grphi))
+ if (unlikely(cpu > rnp->grphi - rnp->grplo))
cpu = WORK_CPU_UNBOUND;
+ else
+ cpu += rnp->grplo;
queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
preempt_enable();
rnp->exp_need_flush = true;
@@ -690,8 +692,10 @@ static void sync_rcu_exp_handler(void *unused)
*/
if (t->rcu_read_lock_nesting > 0) {
raw_spin_lock_irqsave_rcu_node(rnp, flags);
- if (rnp->expmask & rdp->grpmask)
+ if (rnp->expmask & rdp->grpmask) {
rdp->deferred_qs = true;
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true);
+ }
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 05915e536336..1b3dd2fc0cd6 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -397,6 +397,11 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
return rnp->gp_tasks != NULL;
}
+/* Bias and limit values for ->rcu_read_lock_nesting. */
+#define RCU_NEST_BIAS INT_MAX
+#define RCU_NEST_NMAX (-INT_MAX / 2)
+#define RCU_NEST_PMAX (INT_MAX / 2)
+
/*
* Preemptible RCU implementation for rcu_read_lock().
* Just increment ->rcu_read_lock_nesting, shared state will be updated
@@ -405,6 +410,8 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
void __rcu_read_lock(void)
{
current->rcu_read_lock_nesting++;
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+ WARN_ON_ONCE(current->rcu_read_lock_nesting > RCU_NEST_PMAX);
barrier(); /* critical section after entry code. */
}
EXPORT_SYMBOL_GPL(__rcu_read_lock);
@@ -424,20 +431,18 @@ void __rcu_read_unlock(void)
--t->rcu_read_lock_nesting;
} else {
barrier(); /* critical section before exit code. */
- t->rcu_read_lock_nesting = INT_MIN;
+ t->rcu_read_lock_nesting = -RCU_NEST_BIAS;
barrier(); /* assign before ->rcu_read_unlock_special load */
if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
rcu_read_unlock_special(t);
barrier(); /* ->rcu_read_unlock_special load before assign */
t->rcu_read_lock_nesting = 0;
}
-#ifdef CONFIG_PROVE_LOCKING
- {
- int rrln = READ_ONCE(t->rcu_read_lock_nesting);
+ if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
+ int rrln = t->rcu_read_lock_nesting;
- WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
}
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -597,7 +602,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
*/
static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
- return (this_cpu_ptr(&rcu_data)->deferred_qs ||
+ return (__this_cpu_read(rcu_data.deferred_qs) ||
READ_ONCE(t->rcu_read_unlock_special.s)) &&
t->rcu_read_lock_nesting <= 0;
}
@@ -617,11 +622,11 @@ static void rcu_preempt_deferred_qs(struct task_struct *t)
if (!rcu_preempt_need_deferred_qs(t))
return;
if (couldrecurse)
- t->rcu_read_lock_nesting -= INT_MIN;
+ t->rcu_read_lock_nesting -= RCU_NEST_BIAS;
local_irq_save(flags);
rcu_preempt_deferred_qs_irqrestore(t, flags);
if (couldrecurse)
- t->rcu_read_lock_nesting += INT_MIN;
+ t->rcu_read_lock_nesting += RCU_NEST_BIAS;
}
/*
@@ -642,13 +647,21 @@ static void rcu_read_unlock_special(struct task_struct *t)
local_irq_save(flags);
irqs_were_disabled = irqs_disabled_flags(flags);
- if ((preempt_bh_were_disabled || irqs_were_disabled) &&
- t->rcu_read_unlock_special.b.blocked) {
+ if (preempt_bh_were_disabled || irqs_were_disabled) {
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
/* Need to defer quiescent state until everything is enabled. */
- raise_softirq_irqoff(RCU_SOFTIRQ);
+ if (irqs_were_disabled) {
+ /* Enabling irqs does not reschedule, so... */
+ raise_softirq_irqoff(RCU_SOFTIRQ);
+ } else {
+ /* Enabling BH or preempt does reschedule, so... */
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ }
local_irq_restore(flags);
return;
}
+ WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, false);
rcu_preempt_deferred_qs_irqrestore(t, flags);
}
@@ -1464,7 +1477,8 @@ static void __init rcu_spawn_boost_kthreads(void)
for_each_possible_cpu(cpu)
per_cpu(rcu_cpu_has_work, cpu) = 0;
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+ if (WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec), "%s: Could not start rcub kthread, OOM is now expected behavior\n", __func__))
+ return;
rcu_for_each_leaf_node(rnp)
(void)rcu_spawn_one_boost_kthread(rnp);
}
@@ -1997,7 +2011,7 @@ static bool rcu_nocb_cpu_needs_barrier(int cpu)
* (if a callback is in fact needed). This is associated with an
* atomic_inc() in the caller.
*/
- ret = atomic_long_read(&rdp->nocb_q_count);
+ ret = rcu_get_n_cbs_nocb_cpu(rdp);
#ifdef CONFIG_PROVE_RCU
rhp = READ_ONCE(rdp->nocb_head);
@@ -2052,7 +2066,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
TPS("WakeNotPoll"));
return;
}
- len = atomic_long_read(&rdp->nocb_q_count);
+ len = rcu_get_n_cbs_nocb_cpu(rdp);
if (old_rhpp == &rdp->nocb_head) {
if (!irqs_disabled_flags(flags)) {
/* ... if queue was empty ... */
@@ -2101,11 +2115,11 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
trace_rcu_kfree_callback(rcu_state.name, rhp,
(unsigned long)rhp->func,
-atomic_long_read(&rdp->nocb_q_count_lazy),
- -atomic_long_read(&rdp->nocb_q_count));
+ -rcu_get_n_cbs_nocb_cpu(rdp));
else
trace_rcu_callback(rcu_state.name, rhp,
-atomic_long_read(&rdp->nocb_q_count_lazy),
- -atomic_long_read(&rdp->nocb_q_count));
+ -rcu_get_n_cbs_nocb_cpu(rdp));
/*
* If called from an extended quiescent state with interrupts
@@ -2322,13 +2336,14 @@ static int rcu_nocb_kthread(void *arg)
tail = rdp->nocb_follower_tail;
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
- BUG_ON(!list);
+ if (WARN_ON_ONCE(!list))
+ continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeNonEmpty"));
/* Each pass through the following loop invokes a callback. */
trace_rcu_batch_start(rcu_state.name,
atomic_long_read(&rdp->nocb_q_count_lazy),
- atomic_long_read(&rdp->nocb_q_count), -1);
+ rcu_get_n_cbs_nocb_cpu(rdp), -1);
c = cl = 0;
while (list) {
next = list->next;
@@ -2495,7 +2510,8 @@ static void rcu_spawn_one_nocb_kthread(int cpu)
/* Spawn the kthread for this CPU. */
t = kthread_run(rcu_nocb_kthread, rdp_spawn,
"rcuo%c/%d", rcu_state.abbr, cpu);
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo kthread, OOM is now expected behavior\n", __func__))
+ return;
WRITE_ONCE(rdp_spawn->nocb_kthread, t);
}
@@ -2587,6 +2603,26 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return true;
}
+/*
+ * Bind the current task to the offloaded CPUs. If there are no offloaded
+ * CPUs, leave the task unbound. Splat if the bind attempt fails.
+ */
+void rcu_bind_current_to_nocb(void)
+{
+ if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
+ WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
+}
+EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
+
+/*
+ * Return the number of RCU callbacks still queued from the specified
+ * CPU, which must be a nocbs CPU.
+ */
+static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
+{
+ return atomic_long_read(&rdp->nocb_q_count);
+}
+
#else /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool rcu_nocb_cpu_needs_barrier(int cpu)
@@ -2647,6 +2683,11 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
return false;
}
+static unsigned long rcu_get_n_cbs_nocb_cpu(struct rcu_data *rdp)
+{
+ return 0;
+}
+
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
/*
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f203b94f6b5b..1971869c4072 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -335,8 +335,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Initialize and register callbacks for each crcu_array element. */
for (i = 0; i < n; i++) {
if (checktiny &&
- (crcu_array[i] == call_rcu ||
- crcu_array[i] == call_rcu_bh)) {
+ (crcu_array[i] == call_rcu)) {
might_sleep();
continue;
}
@@ -352,8 +351,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
/* Wait for all callbacks to be invoked. */
for (i = 0; i < n; i++) {
if (checktiny &&
- (crcu_array[i] == call_rcu ||
- crcu_array[i] == call_rcu_bh))
+ (crcu_array[i] == call_rcu))
continue;
for (j = 0; j < i; j++)
if (crcu_array[j] == crcu_array[i])
@@ -822,7 +820,8 @@ static int __init rcu_spawn_tasks_kthread(void)
struct task_struct *t;
t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
- BUG_ON(IS_ERR(t));
+ if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
+ return 0;
smp_mb(); /* Ensure others see full kthread. */
WRITE_ONCE(rcu_tasks_kthread_ptr, t);
return 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 091e089063be..f66920173370 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -697,7 +697,7 @@ static void set_load_weight(struct task_struct *p, bool update_load)
/*
* SCHED_IDLE tasks get minimal weight:
*/
- if (idle_policy(p->policy)) {
+ if (task_has_idle_policy(p)) {
load->weight = scale_load(WEIGHT_IDLEPRIO);
load->inv_weight = WMULT_IDLEPRIO;
p->se.runnable_weight = load->weight;
@@ -2857,7 +2857,7 @@ unsigned long nr_running(void)
* preemption, thus the result might have a time-of-check-to-time-of-use
* race. The caller is responsible to use it correctly, for example:
*
- * - from a non-preemptable section (of course)
+ * - from a non-preemptible section (of course)
*
* - from a thread that is bound to a single CPU
*
@@ -4191,7 +4191,7 @@ recheck:
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
*/
- if (idle_policy(p->policy) && !idle_policy(policy)) {
+ if (task_has_idle_policy(p) && !idle_policy(policy)) {
if (!can_nice(p, task_nice(p)))
return -EPERM;
}
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
#ifdef CONFIG_SCHED_SMT
/*
- * The sched_smt_present static key needs to be evaluated on every
- * hotplug event because at boot time SMT might be disabled when
- * the number of booted CPUs is limited.
- *
- * If then later a sibling gets hotplugged, then the key would stay
- * off and SMT scheduling would never be functional.
+ * When going up, increment the number of cores with SMT present.
*/
- if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
- static_branch_enable_cpuslocked(&sched_smt_present);
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
#endif
set_cpu_active(cpu, true);
@@ -5788,7 +5783,15 @@ int sched_cpu_deactivate(unsigned int cpu)
*
* Do sync before park smpboot threads to take care the rcu boost case.
*/
- synchronize_rcu_mult(call_rcu, call_rcu_sched);
+ synchronize_rcu();
+
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
if (!sched_smp_initialized)
return 0;
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 5e54cbcae673..22bd8980f32f 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Scheduler code and data structures related to cpufreq.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include "sched.h"
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3fffad3bc8a8..033ec7c45f13 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -1,18 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CPUFreq governor based on scheduler-provided CPU utilization data.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "sched.h"
+#include <linux/sched/cpufreq.h>
#include <trace/events/power.h>
struct sugov_tunables {
@@ -167,7 +165,7 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
unsigned int freq = arch_scale_freq_invariant() ?
policy->cpuinfo.max_freq : policy->cur;
- freq = (freq + (freq >> 2)) * util / max;
+ freq = map_util_freq(util, freq, max);
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
return sg_policy->next_freq;
@@ -197,15 +195,13 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
* based on the task model parameters and gives the minimal utilization
* required to meet deadlines.
*/
-static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
+ unsigned long max, enum schedutil_type type)
{
- struct rq *rq = cpu_rq(sg_cpu->cpu);
- unsigned long util, irq, max;
-
- sg_cpu->max = max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
- sg_cpu->bw_dl = cpu_bw_dl(rq);
+ unsigned long dl_util, util, irq;
+ struct rq *rq = cpu_rq(cpu);
- if (rt_rq_is_runnable(&rq->rt))
+ if (type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt))
return max;
/*
@@ -223,22 +219,31 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
* utilization (PELT windows are synchronized) we can directly add them
* to obtain the CPU's actual utilization.
*/
- util = cpu_util_cfs(rq);
+ util = util_cfs;
util += cpu_util_rt(rq);
+ dl_util = cpu_util_dl(rq);
+
/*
- * We do not make cpu_util_dl() a permanent part of this sum because we
- * want to use cpu_bw_dl() later on, but we need to check if the
- * CFS+RT+DL sum is saturated (ie. no idle time) such that we select
- * f_max when there is no idle time.
+ * For frequency selection we do not make cpu_util_dl() a permanent part
+ * of this sum because we want to use cpu_bw_dl() later on, but we need
+ * to check if the CFS+RT+DL sum is saturated (ie. no idle time) such
+ * that we select f_max when there is no idle time.
*
* NOTE: numerical errors or stop class might cause us to not quite hit
* saturation when we should -- something for later.
*/
- if ((util + cpu_util_dl(rq)) >= max)
+ if (util + dl_util >= max)
return max;
/*
+ * OTOH, for energy computation we need the estimated running time, so
+ * include util_dl and ignore dl_bw.
+ */
+ if (type == ENERGY_UTIL)
+ util += dl_util;
+
+ /*
* There is still idle time; further improve the number by using the
* irq metric. Because IRQ/steal time is hidden from the task clock we
* need to scale the task numbers:
@@ -260,7 +265,22 @@ static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
* bw_dl as requested freq. However, cpufreq is not yet ready for such
* an interface. So, we only do the latter for now.
*/
- return min(max, util + sg_cpu->bw_dl);
+ if (type == FREQUENCY_UTIL)
+ util += cpu_bw_dl(rq);
+
+ return min(max, util);
+}
+
+static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
+{
+ struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long util = cpu_util_cfs(rq);
+ unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+
+ sg_cpu->max = max;
+ sg_cpu->bw_dl = cpu_bw_dl(rq);
+
+ return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL);
}
/**
@@ -601,7 +621,7 @@ static struct kobj_type sugov_tunables_ktype = {
/********************** cpufreq governor interface *********************/
-static struct cpufreq_governor schedutil_gov;
+struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
{
@@ -860,7 +880,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
sg_policy->need_freq_update = true;
}
-static struct cpufreq_governor schedutil_gov = {
+struct cpufreq_governor schedutil_gov = {
.name = "schedutil",
.owner = THIS_MODULE,
.dynamic_switching = true,
@@ -883,3 +903,36 @@ static int __init sugov_register(void)
return cpufreq_register_governor(&schedutil_gov);
}
fs_initcall(sugov_register);
+
+#ifdef CONFIG_ENERGY_MODEL
+extern bool sched_energy_update;
+extern struct mutex sched_energy_mutex;
+
+static void rebuild_sd_workfn(struct work_struct *work)
+{
+ mutex_lock(&sched_energy_mutex);
+ sched_energy_update = true;
+ rebuild_sched_domains();
+ sched_energy_update = false;
+ mutex_unlock(&sched_energy_mutex);
+}
+static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
+
+/*
+ * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
+ * on governor changes to make sure the scheduler knows about it.
+ */
+void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
+ struct cpufreq_governor *old_gov)
+{
+ if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
+ /*
+ * When called from the cpufreq_register_driver() path, the
+ * cpu_hotplug_lock is already held, so use a work item to
+ * avoid nested locking in rebuild_sched_domains().
+ */
+ schedule_work(&rebuild_sd_work);
+ }
+
+}
+#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 0796f938c4f0..ba4a143bdcf3 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -525,7 +525,7 @@ void account_idle_ticks(unsigned long ticks)
/*
* Perform (stime * rtime) / total, but avoid multiplication overflow by
- * loosing precision when the numbers are big.
+ * losing precision when the numbers are big.
*/
static u64 scale_stime(u64 stime, u64 rtime, u64 total)
{
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 91e4202b0634..fb8b7b5d745d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -727,7 +727,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
- * Documentation/scheduler/sched-deadline.txt for more informations).
+ * Documentation/scheduler/sched-deadline.txt for more information).
*
* This function returns true if:
*
@@ -1695,6 +1695,14 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
}
#endif
+static inline void set_next_task(struct rq *rq, struct task_struct *p)
+{
+ p->se.exec_start = rq_clock_task(rq);
+
+ /* You can't push away the running task */
+ dequeue_pushable_dl_task(rq, p);
+}
+
static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
struct dl_rq *dl_rq)
{
@@ -1750,10 +1758,8 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
BUG_ON(!dl_se);
p = dl_task_of(dl_se);
- p->se.exec_start = rq_clock_task(rq);
- /* Running task will never be pushed. */
- dequeue_pushable_dl_task(rq, p);
+ set_next_task(rq, p);
if (hrtick_enabled(rq))
start_hrtick_dl(rq, p);
@@ -1808,12 +1814,7 @@ static void task_fork_dl(struct task_struct *p)
static void set_curr_task_dl(struct rq *rq)
{
- struct task_struct *p = rq->curr;
-
- p->se.exec_start = rq_clock_task(rq);
-
- /* You can't push away the running task */
- dequeue_pushable_dl_task(rq, p);
+ set_next_task(rq, rq->curr);
}
#ifdef CONFIG_SMP
@@ -2041,10 +2042,8 @@ static int push_dl_task(struct rq *rq)
return 0;
retry:
- if (unlikely(next_task == rq->curr)) {
- WARN_ON(1);
+ if (WARN_ON(next_task == rq->curr))
return 0;
- }
/*
* If next_task preempts rq->curr, and rq->curr
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 6383aa6a60ca..02bd5f969b21 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -974,7 +974,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
#endif
P(policy);
P(prio);
- if (p->policy == SCHED_DEADLINE) {
+ if (task_has_dl_policy(p)) {
P(dl.runtime);
P(dl.deadline);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ac855b2f4774..d1907506318a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -38,7 +38,7 @@
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_latency = 6000000ULL;
-unsigned int normalized_sysctl_sched_latency = 6000000ULL;
+static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
/*
* The initial- and re-scaling of tunables is configurable
@@ -58,8 +58,8 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_L
*
* (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 750000ULL;
-unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
+unsigned int sysctl_sched_min_granularity = 750000ULL;
+static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
/*
* This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity
@@ -81,8 +81,8 @@ unsigned int sysctl_sched_child_runs_first __read_mostly;
*
* (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
-unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
+unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
+static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
@@ -94,6 +94,14 @@ int __weak arch_asym_cpu_priority(int cpu)
{
return -cpu;
}
+
+/*
+ * The margin used when comparing utilization with CPU capacity:
+ * util * margin < capacity * 1024
+ *
+ * (default: ~20%)
+ */
+static unsigned int capacity_margin = 1280;
#endif
#ifdef CONFIG_CFS_BANDWIDTH
@@ -110,14 +118,6 @@ int __weak arch_asym_cpu_priority(int cpu)
unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#endif
-/*
- * The margin used when comparing utilization with CPU capacity:
- * util * margin < capacity * 1024
- *
- * (default: ~20%)
- */
-unsigned int capacity_margin = 1280;
-
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
{
lw->weight += inc;
@@ -703,9 +703,9 @@ void init_entity_runnable_average(struct sched_entity *se)
memset(sa, 0, sizeof(*sa));
/*
- * Tasks are intialized with full load to be seen as heavy tasks until
+ * Tasks are initialized with full load to be seen as heavy tasks until
* they get a chance to stabilize to their real load level.
- * Group entities are intialized with zero load to reflect the fact that
+ * Group entities are initialized with zero load to reflect the fact that
* nothing has been attached to the task group yet.
*/
if (entity_is_task(se))
@@ -2734,6 +2734,17 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
WRITE_ONCE(*ptr, res); \
} while (0)
+/*
+ * Remove and clamp on negative, from a local variable.
+ *
+ * A variant of sub_positive(), which does not use explicit load-store
+ * and is thus optimized for local variable updates.
+ */
+#define lsub_positive(_ptr, _val) do { \
+ typeof(_ptr) ptr = (_ptr); \
+ *ptr -= min_t(typeof(*ptr), *ptr, _val); \
+} while (0)
+
#ifdef CONFIG_SMP
static inline void
enqueue_runnable_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -3604,7 +3615,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
{
struct util_est ue = READ_ONCE(p->se.avg.util_est);
- return max(ue.ewma, ue.enqueued);
+ return (max(ue.ewma, ue.enqueued) | UTIL_AVG_UNCHANGED);
}
static inline unsigned long task_util_est(struct task_struct *p)
@@ -3622,7 +3633,7 @@ static inline void util_est_enqueue(struct cfs_rq *cfs_rq,
/* Update root cfs_rq's estimated utilization */
enqueued = cfs_rq->avg.util_est.enqueued;
- enqueued += (_task_util_est(p) | UTIL_AVG_UNCHANGED);
+ enqueued += _task_util_est(p);
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, enqueued);
}
@@ -3650,8 +3661,7 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
/* Update root cfs_rq's estimated utilization */
ue.enqueued = cfs_rq->avg.util_est.enqueued;
- ue.enqueued -= min_t(unsigned int, ue.enqueued,
- (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+ ue.enqueued -= min_t(unsigned int, ue.enqueued, _task_util_est(p));
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
/*
@@ -3966,8 +3976,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
- * - Substract its load from the cfs_rq->runnable_avg.
- * - Substract its previous weight from cfs_rq->load.weight.
+ * - Subtract its load from the cfs_rq->runnable_avg.
+ * - Subtract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
@@ -4640,7 +4650,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
- cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ lsub_positive(&cfs_b->runtime, runtime);
}
/*
@@ -4774,7 +4784,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires)
- cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ lsub_positive(&cfs_b->runtime, runtime);
cfs_b->distribute_running = 0;
raw_spin_unlock(&cfs_b->lock);
}
@@ -5072,6 +5082,24 @@ static inline void hrtick_update(struct rq *rq)
}
#endif
+#ifdef CONFIG_SMP
+static inline unsigned long cpu_util(int cpu);
+static unsigned long capacity_of(int cpu);
+
+static inline bool cpu_overutilized(int cpu)
+{
+ return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+}
+
+static inline void update_overutilized_status(struct rq *rq)
+{
+ if (!READ_ONCE(rq->rd->overutilized) && cpu_overutilized(rq->cpu))
+ WRITE_ONCE(rq->rd->overutilized, SG_OVERUTILIZED);
+}
+#else
+static inline void update_overutilized_status(struct rq *rq) { }
+#endif
+
/*
* The enqueue_task method is called before nr_running is
* increased. Here we update the fair scheduling stats and
@@ -5129,8 +5157,26 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_group(se);
}
- if (!se)
+ if (!se) {
add_nr_running(rq, 1);
+ /*
+ * Since new tasks are assigned an initial util_avg equal to
+ * half of the spare capacity of their CPU, tiny tasks have the
+ * ability to cross the overutilized threshold, which will
+ * result in the load balancer ruining all the task placement
+ * done by EAS. As a way to mitigate that effect, do not account
+ * for the first enqueue operation of new tasks during the
+ * overutilized flag detection.
+ *
+ * A better way of solving this problem would be to wait for
+ * the PELT signals of tasks to converge before taking them
+ * into account, but that is not straightforward to implement,
+ * and the following generally works well enough in practice.
+ */
+ if (flags & ENQUEUE_WAKEUP)
+ update_overutilized_status(rq);
+
+ }
hrtick_update(rq);
}
@@ -6241,7 +6287,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
util = READ_ONCE(cfs_rq->avg.util_avg);
/* Discount task's util from CPU's util */
- util -= min_t(unsigned int, util, task_util(p));
+ lsub_positive(&util, task_util(p));
/*
* Covered cases:
@@ -6290,10 +6336,9 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
* properly fix the execl regression and it helps in further
* reducing the chances for the above race.
*/
- if (unlikely(task_on_rq_queued(p) || current == p)) {
- estimated -= min_t(unsigned int, estimated,
- (_task_util_est(p) | UTIL_AVG_UNCHANGED));
- }
+ if (unlikely(task_on_rq_queued(p) || current == p))
+ lsub_positive(&estimated, _task_util_est(p));
+
util = max(util, estimated);
}
@@ -6333,6 +6378,213 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
}
/*
+ * Predicts what cpu_util(@cpu) would return if @p was migrated (and enqueued)
+ * to @dst_cpu.
+ */
+static unsigned long cpu_util_next(int cpu, struct task_struct *p, int dst_cpu)
+{
+ struct cfs_rq *cfs_rq = &cpu_rq(cpu)->cfs;
+ unsigned long util_est, util = READ_ONCE(cfs_rq->avg.util_avg);
+
+ /*
+ * If @p migrates from @cpu to another, remove its contribution. Or,
+ * if @p migrates from another CPU to @cpu, add its contribution. In
+ * the other cases, @cpu is not impacted by the migration, so the
+ * util_avg should already be correct.
+ */
+ if (task_cpu(p) == cpu && dst_cpu != cpu)
+ sub_positive(&util, task_util(p));
+ else if (task_cpu(p) != cpu && dst_cpu == cpu)
+ util += task_util(p);
+
+ if (sched_feat(UTIL_EST)) {
+ util_est = READ_ONCE(cfs_rq->avg.util_est.enqueued);
+
+ /*
+ * During wake-up, the task isn't enqueued yet and doesn't
+ * appear in the cfs_rq->avg.util_est.enqueued of any rq,
+ * so just add it (if needed) to "simulate" what will be
+ * cpu_util() after the task has been enqueued.
+ */
+ if (dst_cpu == cpu)
+ util_est += _task_util_est(p);
+
+ util = max(util, util_est);
+ }
+
+ return min(util, capacity_orig_of(cpu));
+}
+
+/*
+ * compute_energy(): Estimates the energy that would be consumed if @p was
+ * migrated to @dst_cpu. compute_energy() predicts what will be the utilization
+ * landscape of the * CPUs after the task migration, and uses the Energy Model
+ * to compute what would be the energy if we decided to actually migrate that
+ * task.
+ */
+static long
+compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
+{
+ long util, max_util, sum_util, energy = 0;
+ int cpu;
+
+ for (; pd; pd = pd->next) {
+ max_util = sum_util = 0;
+ /*
+ * The capacity state of CPUs of the current rd can be driven by
+ * CPUs of another rd if they belong to the same performance
+ * domain. So, account for the utilization of these CPUs too
+ * by masking pd with cpu_online_mask instead of the rd span.
+ *
+ * If an entire performance domain is outside of the current rd,
+ * it will not appear in its pd list and will not be accounted
+ * by compute_energy().
+ */
+ for_each_cpu_and(cpu, perf_domain_span(pd), cpu_online_mask) {
+ util = cpu_util_next(cpu, p, dst_cpu);
+ util = schedutil_energy_util(cpu, util);
+ max_util = max(util, max_util);
+ sum_util += util;
+ }
+
+ energy += em_pd_energy(pd->em_pd, max_util, sum_util);
+ }
+
+ return energy;
+}
+
+/*
+ * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
+ * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
+ * spare capacity in each performance domain and uses it as a potential
+ * candidate to execute the task. Then, it uses the Energy Model to figure
+ * out which of the CPU candidates is the most energy-efficient.
+ *
+ * The rationale for this heuristic is as follows. In a performance domain,
+ * all the most energy efficient CPU candidates (according to the Energy
+ * Model) are those for which we'll request a low frequency. When there are
+ * several CPUs for which the frequency request will be the same, we don't
+ * have enough data to break the tie between them, because the Energy Model
+ * only includes active power costs. With this model, if we assume that
+ * frequency requests follow utilization (e.g. using schedutil), the CPU with
+ * the maximum spare capacity in a performance domain is guaranteed to be among
+ * the best candidates of the performance domain.
+ *
+ * In practice, it could be preferable from an energy standpoint to pack
+ * small tasks on a CPU in order to let other CPUs go in deeper idle states,
+ * but that could also hurt our chances to go cluster idle, and we have no
+ * ways to tell with the current Energy Model if this is actually a good
+ * idea or not. So, find_energy_efficient_cpu() basically favors
+ * cluster-packing, and spreading inside a cluster. That should at least be
+ * a good thing for latency, and this is consistent with the idea that most
+ * of the energy savings of EAS come from the asymmetry of the system, and
+ * not so much from breaking the tie between identical CPUs. That's also the
+ * reason why EAS is enabled in the topology code only for systems where
+ * SD_ASYM_CPUCAPACITY is set.
+ *
+ * NOTE: Forkees are not accepted in the energy-aware wake-up path because
+ * they don't have any useful utilization data yet and it's not possible to
+ * forecast their impact on energy consumption. Consequently, they will be
+ * placed by find_idlest_cpu() on the least loaded CPU, which might turn out
+ * to be energy-inefficient in some use-cases. The alternative would be to
+ * bias new tasks towards specific types of CPUs first, or to try to infer
+ * their util_avg from the parent task, but those heuristics could hurt
+ * other use-cases too. So, until someone finds a better way to solve this,
+ * let's keep things simple by re-using the existing slow path.
+ */
+
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+{
+ unsigned long prev_energy = ULONG_MAX, best_energy = ULONG_MAX;
+ struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ int cpu, best_energy_cpu = prev_cpu;
+ struct perf_domain *head, *pd;
+ unsigned long cpu_cap, util;
+ struct sched_domain *sd;
+
+ rcu_read_lock();
+ pd = rcu_dereference(rd->pd);
+ if (!pd || READ_ONCE(rd->overutilized))
+ goto fail;
+ head = pd;
+
+ /*
+ * Energy-aware wake-up happens on the lowest sched_domain starting
+ * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
+ */
+ sd = rcu_dereference(*this_cpu_ptr(&sd_asym_cpucapacity));
+ while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
+ sd = sd->parent;
+ if (!sd)
+ goto fail;
+
+ sync_entity_load_avg(&p->se);
+ if (!task_util_est(p))
+ goto unlock;
+
+ for (; pd; pd = pd->next) {
+ unsigned long cur_energy, spare_cap, max_spare_cap = 0;
+ int max_spare_cap_cpu = -1;
+
+ for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
+ if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+ continue;
+
+ /* Skip CPUs that will be overutilized. */
+ util = cpu_util_next(cpu, p, cpu);
+ cpu_cap = capacity_of(cpu);
+ if (cpu_cap * 1024 < util * capacity_margin)
+ continue;
+
+ /* Always use prev_cpu as a candidate. */
+ if (cpu == prev_cpu) {
+ prev_energy = compute_energy(p, prev_cpu, head);
+ best_energy = min(best_energy, prev_energy);
+ continue;
+ }
+
+ /*
+ * Find the CPU with the maximum spare capacity in
+ * the performance domain
+ */
+ spare_cap = cpu_cap - util;
+ if (spare_cap > max_spare_cap) {
+ max_spare_cap = spare_cap;
+ max_spare_cap_cpu = cpu;
+ }
+ }
+
+ /* Evaluate the energy impact of using this CPU. */
+ if (max_spare_cap_cpu >= 0) {
+ cur_energy = compute_energy(p, max_spare_cap_cpu, head);
+ if (cur_energy < best_energy) {
+ best_energy = cur_energy;
+ best_energy_cpu = max_spare_cap_cpu;
+ }
+ }
+ }
+unlock:
+ rcu_read_unlock();
+
+ /*
+ * Pick the best CPU if prev_cpu cannot be used, or if it saves at
+ * least 6% of the energy used by prev_cpu.
+ */
+ if (prev_energy == ULONG_MAX)
+ return best_energy_cpu;
+
+ if ((prev_energy - best_energy) > (prev_energy >> 4))
+ return best_energy_cpu;
+
+ return prev_cpu;
+
+fail:
+ rcu_read_unlock();
+
+ return -1;
+}
+
+/*
* select_task_rq_fair: Select target runqueue for the waking task in domains
* that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
* SD_BALANCE_FORK, or SD_BALANCE_EXEC.
@@ -6355,8 +6607,16 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
- want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
- && cpumask_test_cpu(cpu, &p->cpus_allowed);
+
+ if (static_branch_unlikely(&sched_energy_present)) {
+ new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+ if (new_cpu >= 0)
+ return new_cpu;
+ new_cpu = prev_cpu;
+ }
+
+ want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
+ cpumask_test_cpu(cpu, &p->cpus_allowed);
}
rcu_read_lock();
@@ -6520,7 +6780,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
static void set_last_buddy(struct sched_entity *se)
{
- if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
+ if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
return;
for_each_sched_entity(se) {
@@ -6532,7 +6792,7 @@ static void set_last_buddy(struct sched_entity *se)
static void set_next_buddy(struct sched_entity *se)
{
- if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
+ if (entity_is_task(se) && unlikely(task_has_idle_policy(task_of(se))))
return;
for_each_sched_entity(se) {
@@ -6590,8 +6850,8 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;
/* Idle tasks are by definition preempted by non-idle tasks. */
- if (unlikely(curr->policy == SCHED_IDLE) &&
- likely(p->policy != SCHED_IDLE))
+ if (unlikely(task_has_idle_policy(curr)) &&
+ likely(!task_has_idle_policy(p)))
goto preempt;
/*
@@ -7012,7 +7272,7 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
if (p->sched_class != &fair_sched_class)
return 0;
- if (unlikely(p->policy == SCHED_IDLE))
+ if (unlikely(task_has_idle_policy(p)))
return 0;
/*
@@ -7896,16 +8156,16 @@ static bool update_nohz_stats(struct rq *rq, bool force)
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
* @group: sched_group whose statistics are to be updated.
- * @load_idx: Load index of sched_domain of this_cpu for load calc.
- * @local_group: Does group contain this_cpu.
* @sgs: variable to hold the statistics for this group.
- * @overload: Indicate pullable load (e.g. >1 runnable task).
+ * @sg_status: Holds flag indicating the status of the sched_group
*/
static inline void update_sg_lb_stats(struct lb_env *env,
- struct sched_group *group, int load_idx,
- int local_group, struct sg_lb_stats *sgs,
- bool *overload)
+ struct sched_group *group,
+ struct sg_lb_stats *sgs,
+ int *sg_status)
{
+ int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group));
+ int load_idx = get_sd_load_idx(env->sd, env->idle);
unsigned long load;
int i, nr_running;
@@ -7929,7 +8189,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
nr_running = rq->nr_running;
if (nr_running > 1)
- *overload = true;
+ *sg_status |= SG_OVERLOAD;
+
+ if (cpu_overutilized(i))
+ *sg_status |= SG_OVERUTILIZED;
#ifdef CONFIG_NUMA_BALANCING
sgs->nr_numa_running += rq->nr_numa_running;
@@ -7945,7 +8208,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if (env->sd->flags & SD_ASYM_CPUCAPACITY &&
sgs->group_misfit_task_load < rq->misfit_task_load) {
sgs->group_misfit_task_load = rq->misfit_task_load;
- *overload = 1;
+ *sg_status |= SG_OVERLOAD;
}
}
@@ -8090,17 +8353,14 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
struct sched_group *sg = env->sd->groups;
struct sg_lb_stats *local = &sds->local_stat;
struct sg_lb_stats tmp_sgs;
- int load_idx;
- bool overload = false;
bool prefer_sibling = child && child->flags & SD_PREFER_SIBLING;
+ int sg_status = 0;
#ifdef CONFIG_NO_HZ_COMMON
if (env->idle == CPU_NEWLY_IDLE && READ_ONCE(nohz.has_blocked))
env->flags |= LBF_NOHZ_STATS;
#endif
- load_idx = get_sd_load_idx(env->sd, env->idle);
-
do {
struct sg_lb_stats *sgs = &tmp_sgs;
int local_group;
@@ -8115,8 +8375,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
update_group_capacity(env->sd, env->dst_cpu);
}
- update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
- &overload);
+ update_sg_lb_stats(env, sg, sgs, &sg_status);
if (local_group)
goto next_group;
@@ -8165,9 +8424,15 @@ next_group:
env->fbq_type = fbq_classify_group(&sds->busiest_stat);
if (!env->sd->parent) {
+ struct root_domain *rd = env->dst_rq->rd;
+
/* update overload indicator if we are at root domain */
- if (READ_ONCE(env->dst_rq->rd->overload) != overload)
- WRITE_ONCE(env->dst_rq->rd->overload, overload);
+ WRITE_ONCE(rd->overload, sg_status & SG_OVERLOAD);
+
+ /* Update over-utilization (tipping point, U >= 0) indicator */
+ WRITE_ONCE(rd->overutilized, sg_status & SG_OVERUTILIZED);
+ } else if (sg_status & SG_OVERUTILIZED) {
+ WRITE_ONCE(env->dst_rq->rd->overutilized, SG_OVERUTILIZED);
}
}
@@ -8394,6 +8659,14 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
* this level.
*/
update_sd_lb_stats(env, &sds);
+
+ if (static_branch_unlikely(&sched_energy_present)) {
+ struct root_domain *rd = env->dst_rq->rd;
+
+ if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
+ goto out_balanced;
+ }
+
local = &sds.local_stat;
busiest = &sds.busiest_stat;
@@ -8910,13 +9183,22 @@ out_all_pinned:
sd->nr_balance_failed = 0;
out_one_pinned:
+ ld_moved = 0;
+
+ /*
+ * idle_balance() disregards balance intervals, so we could repeatedly
+ * reach this code, which would lead to balance_interval skyrocketting
+ * in a short amount of time. Skip the balance_interval increase logic
+ * to avoid that.
+ */
+ if (env.idle == CPU_NEWLY_IDLE)
+ goto out;
+
/* tune up the balancing interval */
- if (((env.flags & LBF_ALL_PINNED) &&
- sd->balance_interval < MAX_PINNED_INTERVAL) ||
- (sd->balance_interval < sd->max_interval))
+ if ((env.flags & LBF_ALL_PINNED &&
+ sd->balance_interval < MAX_PINNED_INTERVAL) ||
+ sd->balance_interval < sd->max_interval)
sd->balance_interval *= 2;
-
- ld_moved = 0;
out:
return ld_moved;
}
@@ -9281,7 +9563,7 @@ static void nohz_balancer_kick(struct rq *rq)
}
}
- sd = rcu_dereference(per_cpu(sd_asym, cpu));
+ sd = rcu_dereference(per_cpu(sd_asym_packing, cpu));
if (sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (i == cpu ||
@@ -9533,9 +9815,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
return false;
}
- /*
- * barrier, pairs with nohz_balance_enter_idle(), ensures ...
- */
+ /* could be _relaxed() */
flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
if (!(flags & NOHZ_KICK_MASK))
return false;
@@ -9785,6 +10065,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
task_tick_numa(rq, curr);
update_misfit_status(curr, rq);
+ update_overutilized_status(task_rq(curr));
}
/*
diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c
index e6802181900f..81faddba9e20 100644
--- a/kernel/sched/isolation.c
+++ b/kernel/sched/isolation.c
@@ -8,14 +8,14 @@
*/
#include "sched.h"
-DEFINE_STATIC_KEY_FALSE(housekeeping_overriden);
-EXPORT_SYMBOL_GPL(housekeeping_overriden);
+DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
+EXPORT_SYMBOL_GPL(housekeeping_overridden);
static cpumask_var_t housekeeping_mask;
static unsigned int housekeeping_flags;
int housekeeping_any_cpu(enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
return smp_processor_id();
@@ -24,7 +24,7 @@ EXPORT_SYMBOL_GPL(housekeeping_any_cpu);
const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
return housekeeping_mask;
return cpu_possible_mask;
@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(housekeeping_cpumask);
void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
set_cpus_allowed_ptr(t, housekeeping_mask);
}
@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(housekeeping_affine);
bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
return cpumask_test_cpu(cpu, housekeeping_mask);
return true;
@@ -53,7 +53,7 @@ void __init housekeeping_init(void)
if (!housekeeping_flags)
return;
- static_branch_enable(&housekeeping_overriden);
+ static_branch_enable(&housekeeping_overridden);
if (housekeeping_flags & HK_FLAG_TICK)
sched_tick_offload_init();
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 76e0eaf4654e..3cd8a3a795d2 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -210,7 +210,7 @@ static int membarrier_register_global_expedited(void)
* future scheduler executions will observe the new
* thread flag state for this mm.
*/
- synchronize_sched();
+ synchronize_rcu();
}
atomic_or(MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY,
&mm->membarrier_state);
@@ -246,7 +246,7 @@ static int membarrier_register_private_expedited(int flags)
* Ensure all future scheduler executions will observe the
* new thread flag state for this process.
*/
- synchronize_sched();
+ synchronize_rcu();
}
atomic_or(state, &mm->membarrier_state);
@@ -298,7 +298,7 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
if (tick_nohz_full_enabled())
return -EINVAL;
if (num_online_cpus() > 1)
- synchronize_sched();
+ synchronize_rcu();
return 0;
case MEMBARRIER_CMD_GLOBAL_EXPEDITED:
return membarrier_global_expedited();
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 3d7355d7c3e3..fe24de3fbc93 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -136,8 +136,18 @@
static int psi_bug __read_mostly;
-bool psi_disabled __read_mostly;
-core_param(psi_disabled, psi_disabled, bool, 0644);
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+bool psi_enable;
+#else
+bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+ return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
/* Running averages - we need to be higher-res than loadavg */
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
void __init psi_init(void)
{
- if (psi_disabled)
+ if (!psi_enable) {
+ static_branch_enable(&psi_disabled);
return;
+ }
psi_period = jiffies_to_nsecs(PSI_FREQ);
group_init(&psi_system);
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
*flags = current->flags & PF_MEMSTALL;
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (*flags)
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgroup)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return 0;
cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
void psi_cgroup_free(struct cgroup *cgroup)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
cancel_delayed_work_sync(&cgroup->psi.clock_work);
@@ -637,7 +649,7 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled) {
+ if (static_branch_likely(&psi_disabled)) {
/*
* Lame to do this here, but the scheduler cannot be locked
* from the outside, so we move cgroups from inside sched/.
@@ -673,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
int full;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
update_stats(group);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a21ea6021929..e4f398ad9e73 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1498,6 +1498,14 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
#endif
}
+static inline void set_next_task(struct rq *rq, struct task_struct *p)
+{
+ p->se.exec_start = rq_clock_task(rq);
+
+ /* The running task is never eligible for pushing */
+ dequeue_pushable_task(rq, p);
+}
+
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
struct rt_rq *rt_rq)
{
@@ -1518,7 +1526,6 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
static struct task_struct *_pick_next_task_rt(struct rq *rq)
{
struct sched_rt_entity *rt_se;
- struct task_struct *p;
struct rt_rq *rt_rq = &rq->rt;
do {
@@ -1527,10 +1534,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
rt_rq = group_rt_rq(rt_se);
} while (rt_rq);
- p = rt_task_of(rt_se);
- p->se.exec_start = rq_clock_task(rq);
-
- return p;
+ return rt_task_of(rt_se);
}
static struct task_struct *
@@ -1573,8 +1577,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
p = _pick_next_task_rt(rq);
- /* The running task is never eligible for pushing */
- dequeue_pushable_task(rq, p);
+ set_next_task(rq, p);
rt_queue_push_tasks(rq);
@@ -1810,10 +1813,8 @@ static int push_rt_task(struct rq *rq)
return 0;
retry:
- if (unlikely(next_task == rq->curr)) {
- WARN_ON(1);
+ if (WARN_ON(next_task == rq->curr))
return 0;
- }
/*
* It's possible that the next_task slipped in of
@@ -2355,12 +2356,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
static void set_curr_task_rt(struct rq *rq)
{
- struct task_struct *p = rq->curr;
-
- p->se.exec_start = rq_clock_task(rq);
-
- /* The running task is never eligible for pushing */
- dequeue_pushable_task(rq, p);
+ set_next_task(rq, rq->curr);
}
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 618577fc9aa8..0ba08924e017 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -23,6 +23,7 @@
#include <linux/sched/prio.h>
#include <linux/sched/rt.h>
#include <linux/sched/signal.h>
+#include <linux/sched/smt.h>
#include <linux/sched/stat.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/task.h>
@@ -44,6 +45,7 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delayacct.h>
+#include <linux/energy_model.h>
#include <linux/init_task.h>
#include <linux/kprobes.h>
#include <linux/kthread.h>
@@ -176,6 +178,11 @@ static inline bool valid_policy(int policy)
rt_policy(policy) || dl_policy(policy);
}
+static inline int task_has_idle_policy(struct task_struct *p)
+{
+ return idle_policy(p->policy);
+}
+
static inline int task_has_rt_policy(struct task_struct *p)
{
return rt_policy(p->policy);
@@ -631,7 +638,7 @@ struct dl_rq {
/*
* Deadline values of the currently executing and the
* earliest ready task on this rq. Caching these facilitates
- * the decision wether or not a ready but not running task
+ * the decision whether or not a ready but not running task
* should migrate somewhere else.
*/
struct {
@@ -703,6 +710,16 @@ static inline bool sched_asym_prefer(int a, int b)
return arch_asym_cpu_priority(a) > arch_asym_cpu_priority(b);
}
+struct perf_domain {
+ struct em_perf_domain *em_pd;
+ struct perf_domain *next;
+ struct rcu_head rcu;
+};
+
+/* Scheduling group status flags */
+#define SG_OVERLOAD 0x1 /* More than one runnable task on a CPU. */
+#define SG_OVERUTILIZED 0x2 /* One or more CPUs are over-utilized. */
+
/*
* We add the notion of a root-domain which will be used to define per-domain
* variables. Each exclusive cpuset essentially defines an island domain by
@@ -725,6 +742,9 @@ struct root_domain {
*/
int overload;
+ /* Indicate one or more cpus over-utilized (tipping point) */
+ int overutilized;
+
/*
* The bit corresponding to a CPU gets set here if such CPU has more
* than one runnable -deadline task (as it is below for RT tasks).
@@ -755,6 +775,12 @@ struct root_domain {
struct cpupri cpupri;
unsigned long max_cpu_capacity;
+
+ /*
+ * NULL-terminated list of performance domains intersecting with the
+ * CPUs of the rd. Protected by RCU.
+ */
+ struct perf_domain *pd;
};
extern struct root_domain def_root_domain;
@@ -936,9 +962,6 @@ static inline int cpu_of(struct rq *rq)
#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
extern void __update_idle_core(struct rq *rq);
static inline void update_idle_core(struct rq *rq)
@@ -1287,7 +1310,8 @@ DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DECLARE_PER_CPU(struct sched_domain *, sd_numa);
-DECLARE_PER_CPU(struct sched_domain *, sd_asym);
+DECLARE_PER_CPU(struct sched_domain *, sd_asym_packing);
+DECLARE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
extern struct static_key_false sched_asym_cpucapacity;
struct sched_group_capacity {
@@ -1431,7 +1455,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
- * successfuly executed on another CPU. We must ensure that updates of
+ * successfully executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();
@@ -1796,12 +1820,12 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
rq->nr_running = prev_nr + count;
- if (prev_nr < 2 && rq->nr_running >= 2) {
#ifdef CONFIG_SMP
+ if (prev_nr < 2 && rq->nr_running >= 2) {
if (!READ_ONCE(rq->rd->overload))
WRITE_ONCE(rq->rd->overload, 1);
-#endif
}
+#endif
sched_update_tick_dependency(rq);
}
@@ -1857,27 +1881,6 @@ unsigned long arch_scale_freq_capacity(int cpu)
#endif
#ifdef CONFIG_SMP
-#ifndef arch_scale_cpu_capacity
-static __always_inline
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
- if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
- return sd->smt_gain / sd->span_weight;
-
- return SCHED_CAPACITY_SCALE;
-}
-#endif
-#else
-#ifndef arch_scale_cpu_capacity
-static __always_inline
-unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
-{
- return SCHED_CAPACITY_SCALE;
-}
-#endif
-#endif
-
-#ifdef CONFIG_SMP
#ifdef CONFIG_PREEMPT
static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
@@ -2209,6 +2212,31 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
+/**
+ * enum schedutil_type - CPU utilization type
+ * @FREQUENCY_UTIL: Utilization used to select frequency
+ * @ENERGY_UTIL: Utilization used during energy calculation
+ *
+ * The utilization signals of all scheduling classes (CFS/RT/DL) and IRQ time
+ * need to be aggregated differently depending on the usage made of them. This
+ * enum is used within schedutil_freq_util() to differentiate the types of
+ * utilization expected by the callers, and adjust the aggregation accordingly.
+ */
+enum schedutil_type {
+ FREQUENCY_UTIL,
+ ENERGY_UTIL,
+};
+
+unsigned long schedutil_freq_util(int cpu, unsigned long util_cfs,
+ unsigned long max, enum schedutil_type type);
+
+static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
+{
+ unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+
+ return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
+}
+
static inline unsigned long cpu_bw_dl(struct rq *rq)
{
return (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> BW_SHIFT;
@@ -2235,6 +2263,11 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
{
return READ_ONCE(rq->avg_rt.util_avg);
}
+#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
+static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
+{
+ return cfs;
+}
#endif
#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
@@ -2264,3 +2297,13 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
return util;
}
#endif
+
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
+#else
+#define perf_domain_span(pd) NULL
+#endif
+
+#ifdef CONFIG_SMP
+extern struct static_key_false sched_energy_present;
+#endif
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 4904c4677000..aa0de240fb41 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
{
int clear = 0, set = TSK_RUNNING;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
{
int clear = TSK_RUNNING, set = 0;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
static inline void psi_ttwu_dequeue(struct task_struct *p)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
/*
* Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
static inline void psi_task_tick(struct rq *rq)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (unlikely(rq->curr->flags & PF_MEMSTALL))
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 8d7f15ba5916..3f35ba1d8fde 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -201,6 +201,199 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
return 1;
}
+DEFINE_STATIC_KEY_FALSE(sched_energy_present);
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+DEFINE_MUTEX(sched_energy_mutex);
+bool sched_energy_update;
+
+static void free_pd(struct perf_domain *pd)
+{
+ struct perf_domain *tmp;
+
+ while (pd) {
+ tmp = pd->next;
+ kfree(pd);
+ pd = tmp;
+ }
+}
+
+static struct perf_domain *find_pd(struct perf_domain *pd, int cpu)
+{
+ while (pd) {
+ if (cpumask_test_cpu(cpu, perf_domain_span(pd)))
+ return pd;
+ pd = pd->next;
+ }
+
+ return NULL;
+}
+
+static struct perf_domain *pd_init(int cpu)
+{
+ struct em_perf_domain *obj = em_cpu_get(cpu);
+ struct perf_domain *pd;
+
+ if (!obj) {
+ if (sched_debug())
+ pr_info("%s: no EM found for CPU%d\n", __func__, cpu);
+ return NULL;
+ }
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return NULL;
+ pd->em_pd = obj;
+
+ return pd;
+}
+
+static void perf_domain_debug(const struct cpumask *cpu_map,
+ struct perf_domain *pd)
+{
+ if (!sched_debug() || !pd)
+ return;
+
+ printk(KERN_DEBUG "root_domain %*pbl:", cpumask_pr_args(cpu_map));
+
+ while (pd) {
+ printk(KERN_CONT " pd%d:{ cpus=%*pbl nr_cstate=%d }",
+ cpumask_first(perf_domain_span(pd)),
+ cpumask_pr_args(perf_domain_span(pd)),
+ em_pd_nr_cap_states(pd->em_pd));
+ pd = pd->next;
+ }
+
+ printk(KERN_CONT "\n");
+}
+
+static void destroy_perf_domain_rcu(struct rcu_head *rp)
+{
+ struct perf_domain *pd;
+
+ pd = container_of(rp, struct perf_domain, rcu);
+ free_pd(pd);
+}
+
+static void sched_energy_set(bool has_eas)
+{
+ if (!has_eas && static_branch_unlikely(&sched_energy_present)) {
+ if (sched_debug())
+ pr_info("%s: stopping EAS\n", __func__);
+ static_branch_disable_cpuslocked(&sched_energy_present);
+ } else if (has_eas && !static_branch_unlikely(&sched_energy_present)) {
+ if (sched_debug())
+ pr_info("%s: starting EAS\n", __func__);
+ static_branch_enable_cpuslocked(&sched_energy_present);
+ }
+}
+
+/*
+ * EAS can be used on a root domain if it meets all the following conditions:
+ * 1. an Energy Model (EM) is available;
+ * 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
+ * 3. the EM complexity is low enough to keep scheduling overheads low;
+ * 4. schedutil is driving the frequency of all CPUs of the rd;
+ *
+ * The complexity of the Energy Model is defined as:
+ *
+ * C = nr_pd * (nr_cpus + nr_cs)
+ *
+ * with parameters defined as:
+ * - nr_pd: the number of performance domains
+ * - nr_cpus: the number of CPUs
+ * - nr_cs: the sum of the number of capacity states of all performance
+ * domains (for example, on a system with 2 performance domains,
+ * with 10 capacity states each, nr_cs = 2 * 10 = 20).
+ *
+ * It is generally not a good idea to use such a model in the wake-up path on
+ * very complex platforms because of the associated scheduling overheads. The
+ * arbitrary constraint below prevents that. It makes EAS usable up to 16 CPUs
+ * with per-CPU DVFS and less than 8 capacity states each, for example.
+ */
+#define EM_MAX_COMPLEXITY 2048
+
+extern struct cpufreq_governor schedutil_gov;
+static bool build_perf_domains(const struct cpumask *cpu_map)
+{
+ int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
+ struct perf_domain *pd = NULL, *tmp;
+ int cpu = cpumask_first(cpu_map);
+ struct root_domain *rd = cpu_rq(cpu)->rd;
+ struct cpufreq_policy *policy;
+ struct cpufreq_governor *gov;
+
+ /* EAS is enabled for asymmetric CPU capacity topologies. */
+ if (!per_cpu(sd_asym_cpucapacity, cpu)) {
+ if (sched_debug()) {
+ pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
+ cpumask_pr_args(cpu_map));
+ }
+ goto free;
+ }
+
+ for_each_cpu(i, cpu_map) {
+ /* Skip already covered CPUs. */
+ if (find_pd(pd, i))
+ continue;
+
+ /* Do not attempt EAS if schedutil is not being used. */
+ policy = cpufreq_cpu_get(i);
+ if (!policy)
+ goto free;
+ gov = policy->governor;
+ cpufreq_cpu_put(policy);
+ if (gov != &schedutil_gov) {
+ if (rd->pd)
+ pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
+ cpumask_pr_args(cpu_map));
+ goto free;
+ }
+
+ /* Create the new pd and add it to the local list. */
+ tmp = pd_init(i);
+ if (!tmp)
+ goto free;
+ tmp->next = pd;
+ pd = tmp;
+
+ /*
+ * Count performance domains and capacity states for the
+ * complexity check.
+ */
+ nr_pd++;
+ nr_cs += em_pd_nr_cap_states(pd->em_pd);
+ }
+
+ /* Bail out if the Energy Model complexity is too high. */
+ if (nr_pd * (nr_cs + nr_cpus) > EM_MAX_COMPLEXITY) {
+ WARN(1, "rd %*pbl: Failed to start EAS, EM complexity is too high\n",
+ cpumask_pr_args(cpu_map));
+ goto free;
+ }
+
+ perf_domain_debug(cpu_map, pd);
+
+ /* Attach the new list of performance domains to the root domain. */
+ tmp = rd->pd;
+ rcu_assign_pointer(rd->pd, pd);
+ if (tmp)
+ call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
+
+ return !!pd;
+
+free:
+ free_pd(pd);
+ tmp = rd->pd;
+ rcu_assign_pointer(rd->pd, NULL);
+ if (tmp)
+ call_rcu(&tmp->rcu, destroy_perf_domain_rcu);
+
+ return false;
+}
+#else
+static void free_pd(struct perf_domain *pd) { }
+#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
+
static void free_rootdomain(struct rcu_head *rcu)
{
struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
@@ -211,6 +404,7 @@ static void free_rootdomain(struct rcu_head *rcu)
free_cpumask_var(rd->rto_mask);
free_cpumask_var(rd->online);
free_cpumask_var(rd->span);
+ free_pd(rd->pd);
kfree(rd);
}
@@ -397,7 +591,8 @@ DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared);
DEFINE_PER_CPU(struct sched_domain *, sd_numa);
-DEFINE_PER_CPU(struct sched_domain *, sd_asym);
+DEFINE_PER_CPU(struct sched_domain *, sd_asym_packing);
+DEFINE_PER_CPU(struct sched_domain *, sd_asym_cpucapacity);
DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity);
static void update_top_cache_domain(int cpu)
@@ -423,7 +618,10 @@ static void update_top_cache_domain(int cpu)
rcu_assign_pointer(per_cpu(sd_numa, cpu), sd);
sd = highest_flag_domain(cpu, SD_ASYM_PACKING);
- rcu_assign_pointer(per_cpu(sd_asym, cpu), sd);
+ rcu_assign_pointer(per_cpu(sd_asym_packing, cpu), sd);
+
+ sd = lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY);
+ rcu_assign_pointer(per_cpu(sd_asym_cpucapacity, cpu), sd);
}
/*
@@ -1133,7 +1331,6 @@ sd_init(struct sched_domain_topology_level *tl,
.last_balance = jiffies,
.balance_interval = sd_weight,
- .smt_gain = 0,
.max_newidle_lb_cost = 0,
.next_decay_max_lb_cost = jiffies,
.child = child,
@@ -1164,7 +1361,6 @@ sd_init(struct sched_domain_topology_level *tl,
if (sd->flags & SD_SHARE_CPUCAPACITY) {
sd->imbalance_pct = 110;
- sd->smt_gain = 1178; /* ~15% */
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
sd->imbalance_pct = 117;
@@ -1934,6 +2130,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
+ bool __maybe_unused has_eas = false;
int i, j, n;
int new_topology;
@@ -1961,8 +2158,8 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
/* Destroy deleted domains: */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < n && !new_topology; j++) {
- if (cpumask_equal(doms_cur[i], doms_new[j])
- && dattrs_equal(dattr_cur, i, dattr_new, j))
+ if (cpumask_equal(doms_cur[i], doms_new[j]) &&
+ dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* No match - a current sched domain not in new doms_new[] */
@@ -1982,8 +2179,8 @@ match1:
/* Build new domains: */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < n && !new_topology; j++) {
- if (cpumask_equal(doms_new[i], doms_cur[j])
- && dattrs_equal(dattr_new, i, dattr_cur, j))
+ if (cpumask_equal(doms_new[i], doms_cur[j]) &&
+ dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* No match - add a new doms_new */
@@ -1992,6 +2189,24 @@ match2:
;
}
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+ /* Build perf. domains: */
+ for (i = 0; i < ndoms_new; i++) {
+ for (j = 0; j < n && !sched_energy_update; j++) {
+ if (cpumask_equal(doms_new[i], doms_cur[j]) &&
+ cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
+ has_eas = true;
+ goto match3;
+ }
+ }
+ /* No match - add perf. domains for a new rd */
+ has_eas |= build_perf_domains(doms_new[i]);
+match3:
+ ;
+ }
+ sched_energy_set(has_eas);
+#endif
+
/* Remember the new sched domains: */
if (doms_cur != &fallback_doms)
free_sched_domains(doms_cur, ndoms_cur);
diff --git a/kernel/signal.c b/kernel/signal.c
index 9a32bc2088c9..53e07d97ffe0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2735,6 +2735,84 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
}
EXPORT_SYMBOL(sigprocmask);
+/*
+ * The api helps set app-provided sigmasks.
+ *
+ * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
+ * epoll_pwait where a new sigmask is passed from userland for the syscalls.
+ */
+int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
+ sigset_t *oldset, size_t sigsetsize)
+{
+ if (!usigmask)
+ return 0;
+
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+ if (copy_from_user(set, usigmask, sizeof(sigset_t)))
+ return -EFAULT;
+
+ *oldset = current->blocked;
+ set_current_blocked(set);
+
+ return 0;
+}
+EXPORT_SYMBOL(set_user_sigmask);
+
+#ifdef CONFIG_COMPAT
+int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
+ sigset_t *set, sigset_t *oldset,
+ size_t sigsetsize)
+{
+ if (!usigmask)
+ return 0;
+
+ if (sigsetsize != sizeof(compat_sigset_t))
+ return -EINVAL;
+ if (get_compat_sigset(set, usigmask))
+ return -EFAULT;
+
+ *oldset = current->blocked;
+ set_current_blocked(set);
+
+ return 0;
+}
+EXPORT_SYMBOL(set_compat_user_sigmask);
+#endif
+
+/*
+ * restore_user_sigmask:
+ * usigmask: sigmask passed in from userland.
+ * sigsaved: saved sigmask when the syscall started and changed the sigmask to
+ * usigmask.
+ *
+ * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
+ * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
+ */
+void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
+{
+
+ if (!usigmask)
+ return;
+ /*
+ * When signals are pending, do not restore them here.
+ * Restoring sigmask here can lead to delivering signals that the above
+ * syscalls are intended to block because of the sigmask passed in.
+ */
+ if (signal_pending(current)) {
+ current->saved_sigmask = *sigsaved;
+ set_restore_sigmask();
+ return;
+ }
+
+ /*
+ * This is needed because the fast syscall return path does not restore
+ * saved_sigmask when signals are not pending.
+ */
+ set_current_blocked(sigsaved);
+}
+EXPORT_SYMBOL(restore_user_sigmask);
+
/**
* sys_rt_sigprocmask - change the list of currently blocked signals
* @how: whether to add, remove, or set signals
@@ -3254,7 +3332,71 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
return ret;
}
+#ifdef CONFIG_COMPAT_32BIT_TIME
+SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
+ siginfo_t __user *, uinfo,
+ const struct old_timespec32 __user *, uts,
+ size_t, sigsetsize)
+{
+ sigset_t these;
+ struct timespec64 ts;
+ kernel_siginfo_t info;
+ int ret;
+
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&these, uthese, sizeof(these)))
+ return -EFAULT;
+
+ if (uts) {
+ if (get_old_timespec32(&ts, uts))
+ return -EFAULT;
+ }
+
+ ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
+
+ if (ret > 0 && uinfo) {
+ if (copy_siginfo_to_user(uinfo, &info))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+#endif
+
#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
+ struct compat_siginfo __user *, uinfo,
+ struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
+{
+ sigset_t s;
+ struct timespec64 t;
+ kernel_siginfo_t info;
+ long ret;
+
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (get_compat_sigset(&s, uthese))
+ return -EFAULT;
+
+ if (uts) {
+ if (get_timespec64(&t, uts))
+ return -EFAULT;
+ }
+
+ ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
+
+ if (ret > 0 && uinfo) {
+ if (copy_siginfo_to_user32(uinfo, &info))
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
struct compat_siginfo __user *, uinfo,
struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
@@ -3285,6 +3427,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
return ret;
}
#endif
+#endif
/**
* sys_kill - send a signal to a process
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index e42892926244..b193a59fc05b 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -11,6 +11,7 @@
*/
#include <linux/stackleak.h>
+#include <linux/kprobes.h>
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
#include <linux/jump_label.h>
@@ -47,7 +48,7 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
#define skip_erasing() false
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
-asmlinkage void stackleak_erase(void)
+asmlinkage void notrace stackleak_erase(void)
{
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
unsigned long kstack_ptr = current->lowest_stack;
@@ -101,8 +102,9 @@ asmlinkage void stackleak_erase(void)
/* Reset the 'lowest_stack' value for the next syscall */
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
}
+NOKPROBE_SYMBOL(stackleak_erase);
-void __used stackleak_track_stack(void)
+void __used notrace stackleak_track_stack(void)
{
/*
* N.B. stackleak_erase() fills the kernel stack with the poison value,
diff --git a/kernel/sys.c b/kernel/sys.c
index 123bd73046ec..64b5a230f38d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -121,6 +121,9 @@
#ifndef SVE_GET_VL
# define SVE_GET_VL() (-EINVAL)
#endif
+#ifndef PAC_RESET_KEYS
+# define PAC_RESET_KEYS(a, b) (-EINVAL)
+#endif
/*
* this is where the system-wide overflow UID and GID are defined, for
@@ -2476,6 +2479,11 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
return -EINVAL;
error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
break;
+ case PR_PAC_RESET_KEYS:
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+ error = PAC_RESET_KEYS(me, arg2);
+ break;
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index df556175be50..ab9d0e3c6d50 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -284,7 +284,9 @@ COND_SYSCALL_COMPAT(move_pages);
COND_SYSCALL(perf_event_open);
COND_SYSCALL(accept4);
COND_SYSCALL(recvmmsg);
+COND_SYSCALL(recvmmsg_time32);
COND_SYSCALL_COMPAT(recvmmsg);
+COND_SYSCALL_COMPAT(recvmmsg_time64);
/*
* Architecture specific syscalls: see further below
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index fa5de5e8de61..2c97e8c2d29f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Alarmtimer interface
*
@@ -10,10 +11,6 @@
* Copyright (C) 2010 IBM Corperation
*
* Author: John Stultz <john.stultz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/time.h>
#include <linux/hrtimer.h>
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 8c0e4092f661..5e77662dd2d9 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/clockevents.c
- *
* This file contains functions which manage clock event devices.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/clockchips.h>
@@ -39,10 +35,8 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
u64 clc = (u64) latch << evt->shift;
u64 rnd;
- if (unlikely(!evt->mult)) {
+ if (WARN_ON(!evt->mult))
evt->mult = 1;
- WARN_ON(1);
- }
rnd = (u64) evt->mult - 1;
/*
@@ -164,10 +158,8 @@ void clockevents_switch_state(struct clock_event_device *dev,
* on it, so fix it up and emit a warning:
*/
if (clockevent_state_oneshot(dev)) {
- if (unlikely(!dev->mult)) {
+ if (WARN_ON(!dev->mult))
dev->mult = 1;
- WARN_ON(1);
- }
}
}
}
@@ -315,10 +307,8 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
int64_t delta;
int rc;
- if (unlikely(expires < 0)) {
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(expires < 0))
return -ETIME;
- }
dev->next_event = expires;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ffe081623aec..3bcc19ceb073 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -1,26 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * linux/kernel/time/clocksource.c
- *
* This file contains the functions which manage clocksource drivers.
*
* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * TODO WishList:
- * o Allow clocksource drivers to be unregistered
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9cdd74bd2d27..f5cfa1b73d6f 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1,34 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/hrtimer.c
- *
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
*
* High-resolution kernel timers
*
- * In contrast to the low-resolution timeout API implemented in
- * kernel/timer.c, hrtimers provide finer resolution and accuracy
- * depending on system configuration and capabilities.
- *
- * These timers are currently used for:
- * - itimers
- * - POSIX timers
- * - nanosleep
- * - precise in-kernel timing
+ * In contrast to the low-resolution timeout API, aka timer wheel,
+ * hrtimers provide finer resolution and accuracy depending on system
+ * configuration and capabilities.
*
* Started by: Thomas Gleixner and Ingo Molnar
*
* Credits:
- * based on kernel/timer.c
+ * Based on the original timer wheel code
*
* Help, testing, suggestions, bugfixes, improvements were
* provided by:
*
* George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
* et. al.
- *
- * For licencing details see kernel-base/COPYING
*/
#include <linux/cpu.h>
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 9a65713c8309..02068b2d5862 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/itimer.c
- *
* Copyright (C) 1992 Darren Senn
*/
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 497719127bf9..dc1b6f1929f9 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -1,25 +1,9 @@
-/***********************************************************************
-* linux/kernel/time/jiffies.c
-*
-* This file contains the jiffies based clocksource.
-*
-* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 2 of the License, or
-* (at your option) any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*
-************************************************************************/
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This file contains the jiffies based clocksource.
+ *
+ * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
+ */
#include <linux/clocksource.h>
#include <linux/jiffies.h>
#include <linux/module.h>
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index c5e0cba3b39c..36a2bef00125 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rtc.h>
-#include <linux/math64.h>
#include "ntp_internal.h"
#include "timekeeping_internal.h"
@@ -555,17 +554,9 @@ static void sync_rtc_clock(void)
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
-int __weak update_persistent_clock(struct timespec now)
-{
- return -ENODEV;
-}
-
int __weak update_persistent_clock64(struct timespec64 now64)
{
- struct timespec now;
-
- now = timespec64_to_timespec(now64);
- return update_persistent_clock(now);
+ return -ENODEV;
}
#endif
diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
index fe56c4e06c51..425bbfce6819 100644
--- a/kernel/time/posix-clock.c
+++ b/kernel/time/posix-clock.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * posix-clock.c - support for dynamic clock devices
+ * Support for dynamic clock devices
*
* Copyright (C) 2010 OMICRON electronics GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/device.h>
#include <linux/export.h>
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 989ccf028bde..a51895486e5e 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Dummy stubs used when CONFIG_POSIX_TIMERS=n
*
* Created by: Nicolas Pitre, July 2016
* Copyright: (C) 2016 Linaro Limited
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/linkage.h>
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index bd62b5eeb5a0..0e84bb72a3da 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -1,34 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * linux/kernel/posix-timers.c
- *
- *
* 2002-10-15 Posix Clocks & timers
* by George Anzinger george@mvista.com
- *
* Copyright (C) 2002 2003 by MontaVista Software.
*
* 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
* Copyright (C) 2004 Boris Hu
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
- */
-
-/* These are all the functions necessary to implement
- * POSIX clocks & timers
+ * These are all the functions necessary to implement POSIX clocks & timers
*/
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -289,9 +268,6 @@ static void common_hrtimer_rearm(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
- if (!timr->it_interval)
- return;
-
timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
timr->it_interval);
hrtimer_restart(timer);
@@ -317,7 +293,7 @@ void posixtimer_rearm(struct kernel_siginfo *info)
if (!timr)
return;
- if (timr->it_requeue_pending == info->si_sys_private) {
+ if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
timr->kclock->timer_rearm(timr);
timr->it_active = 1;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index cbc72c2c1fca..094b82ca95e5 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * sched_clock.c: Generic sched_clock() support, to extend low level
- * hardware time counters to full 64-bit ns values.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Generic sched_clock() support, to extend low level hardware time
+ * counters to full 64-bit ns values.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
diff --git a/kernel/time/test_udelay.c b/kernel/time/test_udelay.c
index b0928ab3270f..77c63005dc4e 100644
--- a/kernel/time/test_udelay.c
+++ b/kernel/time/test_udelay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* udelay() test kernel module
*
@@ -7,15 +8,6 @@
* Specifying usecs of 0 or negative values will run multiples tests.
*
* Copyright (C) 2014 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/debugfs.h>
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
index a59641fb88b6..5be6154e2fd2 100644
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-broadcast-hrtimer.c
- * This file emulates a local clock event device
- * via a pseudo clock device.
+ * Emulate a local clock event device via a pseudo clock device.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index be0aac2b4300..803fa67aace9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-broadcast.c
- *
* This file contains functions which emulate a local clock-event
* device via a broadcast event source.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 14de3727b18e..529143b4c8d2 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-common.c
- *
* This file contains the base functions to manage periodic tick
* related events.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 6fe615d57ebb..f9745d47425a 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -1,15 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-oneshot.c
- *
* This file contains functions which manage high resolution tick
* related events.
*
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
- *
- * This code is licenced under the GPL version 2. For details see
- * kernel-base/COPYING.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 69e673b88474..6fa52cd6df0b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/tick-sched.c
- *
* Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
* Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
@@ -8,8 +7,6 @@
* No idle tick implementation for low and high resolution timers
*
* Started by: Thomas Gleixner and Ingo Molnar
- *
- * Distribute under GPLv2.
*/
#include <linux/cpu.h>
#include <linux/err.h>
diff --git a/kernel/time/time.c b/kernel/time/time.c
index ad204cf6d001..2edb5088a70b 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -1,14 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time.c
- *
* Copyright (C) 1991, 1992 Linus Torvalds
*
- * This file contains the interface functions for the various
- * time related system calls: time, stime, gettimeofday, settimeofday,
- * adjtime
- */
-/*
- * Modification history kernel/time.c
+ * This file contains the interface functions for the various time related
+ * system calls: time, stime, gettimeofday, settimeofday, adjtime
+ *
+ * Modification history:
*
* 1993-09-02 Philip Gladstone
* Created file with time related functions from sched/core.c and adjtimex()
@@ -387,42 +384,6 @@ time64_t mktime64(const unsigned int year0, const unsigned int mon0,
EXPORT_SYMBOL(mktime64);
/**
- * set_normalized_timespec - set timespec sec and nsec parts and normalize
- *
- * @ts: pointer to timespec variable to be set
- * @sec: seconds to set
- * @nsec: nanoseconds to set
- *
- * Set seconds and nanoseconds field of a timespec variable and
- * normalize to the timespec storage format
- *
- * Note: The tv_nsec part is always in the range of
- * 0 <= tv_nsec < NSEC_PER_SEC
- * For negative values only the tv_sec field is negative !
- */
-void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
-{
- while (nsec >= NSEC_PER_SEC) {
- /*
- * The following asm() prevents the compiler from
- * optimising this loop into a modulo operation. See
- * also __iter_div_u64_rem() in include/linux/time.h
- */
- asm("" : "+rm"(nsec));
- nsec -= NSEC_PER_SEC;
- ++sec;
- }
- while (nsec < 0) {
- asm("" : "+rm"(nsec));
- nsec += NSEC_PER_SEC;
- --sec;
- }
- ts->tv_sec = sec;
- ts->tv_nsec = nsec;
-}
-EXPORT_SYMBOL(set_normalized_timespec);
-
-/**
* ns_to_timespec - Convert nanoseconds to timespec
* @nsec: the nanoseconds value to be converted
*
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
index f83bbb81600b..7ed0e0fb5831 100644
--- a/kernel/time/timeconst.bc
+++ b/kernel/time/timeconst.bc
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
scale=0
define gcd(a,b) {
diff --git a/kernel/time/timeconv.c b/kernel/time/timeconv.c
index 7142580ad94f..589e0a552129 100644
--- a/kernel/time/timeconv.c
+++ b/kernel/time/timeconv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.0+
/*
* Copyright (C) 1993, 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
* This file is part of the GNU C Library.
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
index 8afd78932bdf..85b98e727306 100644
--- a/kernel/time/timecounter.c
+++ b/kernel/time/timecounter.c
@@ -1,20 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * linux/kernel/time/timecounter.c
- *
- * based on code that migrated away from
- * linux/kernel/time/clocksource.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Based on clocksource code. See commit 74d23cc704d1
*/
-
#include <linux/export.h>
#include <linux/timecounter.h>
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2d110c948805..ac5dbf2cd4a2 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/time/timekeeping.c
- *
- * Kernel timekeeping code and accessor functions
- *
- * This code was moved from linux/kernel/timer.c.
- * Please see that file for copyright and history logs.
- *
+ * Kernel timekeeping code and accessor functions. Based on code from
+ * timer.c, moved in commit 8524070b7982.
*/
-
#include <linux/timekeeper_internal.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -50,7 +45,9 @@ enum timekeeping_adv_mode {
static struct {
seqcount_t seq;
struct timekeeper timekeeper;
-} tk_core ____cacheline_aligned;
+} tk_core ____cacheline_aligned = {
+ .seq = SEQCNT_ZERO(tk_core.seq),
+};
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
@@ -1467,7 +1464,7 @@ u64 timekeeping_max_deferment(void)
}
/**
- * read_persistent_clock - Return time from the persistent clock.
+ * read_persistent_clock64 - Return time from the persistent clock.
*
* Weak dummy function for arches that do not yet support it.
* Reads the time from the battery backed persistent clock.
@@ -1475,20 +1472,12 @@ u64 timekeeping_max_deferment(void)
*
* XXX - Do be sure to remove it once all arches implement it.
*/
-void __weak read_persistent_clock(struct timespec *ts)
+void __weak read_persistent_clock64(struct timespec64 *ts)
{
ts->tv_sec = 0;
ts->tv_nsec = 0;
}
-void __weak read_persistent_clock64(struct timespec64 *ts64)
-{
- struct timespec ts;
-
- read_persistent_clock(&ts);
- *ts64 = timespec_to_timespec64(ts);
-}
-
/**
* read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
* from the boot.
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
index 238e4be60229..86489950d690 100644
--- a/kernel/time/timekeeping_debug.c
+++ b/kernel/time/timekeeping_debug.c
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* debugfs file to track time spent in suspend
*
* Copyright (c) 2011, Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/debugfs.h>
@@ -28,7 +19,7 @@
static unsigned int sleep_time_bin[NUM_BINS] = {0};
-static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
+static int tk_debug_sleep_time_show(struct seq_file *s, void *data)
{
unsigned int bin;
seq_puts(s, " time (secs) count\n");
@@ -42,18 +33,7 @@ static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
}
return 0;
}
-
-static int tk_debug_sleep_time_open(struct inode *inode, struct file *file)
-{
- return single_open(file, tk_debug_show_sleep_time, NULL);
-}
-
-static const struct file_operations tk_debug_sleep_time_fops = {
- .open = tk_debug_sleep_time_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(tk_debug_sleep_time);
static int __init tk_debug_sleep_time_init(void)
{
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index fa49cd753dea..444156debfa0 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * linux/kernel/timer.c
- *
* Kernel internal timers
*
* Copyright (C) 1991, 1992 Linus Torvalds
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index d647dabdac97..98ba50dcb1b2 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -1,13 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * kernel/time/timer_list.c
- *
* List pending timers
*
* Copyright(C) 2006, Red Hat, Inc., Ingo Molnar
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/proc_fs.h>
diff --git a/kernel/torture.c b/kernel/torture.c
index 17d91f5fba2a..bbf6d473e50c 100644
--- a/kernel/torture.c
+++ b/kernel/torture.c
@@ -194,11 +194,23 @@ torture_onoff(void *arg)
int cpu;
int maxcpu = -1;
DEFINE_TORTURE_RANDOM(rand);
+ int ret;
VERBOSE_TOROUT_STRING("torture_onoff task started");
for_each_online_cpu(cpu)
maxcpu = cpu;
WARN_ON(maxcpu < 0);
+ if (!IS_MODULE(CONFIG_TORTURE_TEST))
+ for_each_possible_cpu(cpu) {
+ if (cpu_online(cpu))
+ continue;
+ ret = cpu_up(cpu);
+ if (ret && verbose) {
+ pr_alert("%s" TORTURE_FLAG
+ "%s: Initial online %d: errno %d\n",
+ __func__, torture_type, cpu, ret);
+ }
+ }
if (maxcpu == 0) {
VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
@@ -233,16 +245,15 @@ stop:
*/
int torture_onoff_init(long ooholdoff, long oointerval)
{
- int ret = 0;
-
#ifdef CONFIG_HOTPLUG_CPU
onoff_holdoff = ooholdoff;
onoff_interval = oointerval;
if (onoff_interval <= 0)
return 0;
- ret = torture_create_kthread(torture_onoff, NULL, onoff_task);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
- return ret;
+ return torture_create_kthread(torture_onoff, NULL, onoff_task);
+#else /* #ifdef CONFIG_HOTPLUG_CPU */
+ return 0;
+#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
}
EXPORT_SYMBOL_GPL(torture_onoff_init);
@@ -513,15 +524,13 @@ static int torture_shutdown(void *arg)
*/
int torture_shutdown_init(int ssecs, void (*cleanup)(void))
{
- int ret = 0;
-
torture_shutdown_hook = cleanup;
if (ssecs > 0) {
shutdown_time = ktime_add(ktime_get(), ktime_set(ssecs, 0));
- ret = torture_create_kthread(torture_shutdown, NULL,
+ return torture_create_kthread(torture_shutdown, NULL,
shutdown_task);
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(torture_shutdown_init);
@@ -620,13 +629,10 @@ static int torture_stutter(void *arg)
/*
* Initialize and kick off the torture_stutter kthread.
*/
-int torture_stutter_init(int s)
+int torture_stutter_init(const int s)
{
- int ret;
-
stutter = s;
- ret = torture_create_kthread(torture_stutter, NULL, stutter_task);
- return ret;
+ return torture_create_kthread(torture_stutter, NULL, stutter_task);
}
EXPORT_SYMBOL_GPL(torture_stutter_init);
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2868d85f1fb1..fac0ddf8a8e2 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return NULL;
- if (!bio->bi_css)
+ if (!bio->bi_blkg)
return NULL;
- return cgroup_get_kernfs_id(bio->bi_css->cgroup);
+ return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
}
#else
static union kernfs_node_id *
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 08fcfe440c63..9ddb6fddb4e0 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -17,6 +17,43 @@
#include "trace_probe.h"
#include "trace.h"
+#ifdef CONFIG_MODULES
+struct bpf_trace_module {
+ struct module *module;
+ struct list_head list;
+};
+
+static LIST_HEAD(bpf_trace_modules);
+static DEFINE_MUTEX(bpf_module_mutex);
+
+static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
+{
+ struct bpf_raw_event_map *btp, *ret = NULL;
+ struct bpf_trace_module *btm;
+ unsigned int i;
+
+ mutex_lock(&bpf_module_mutex);
+ list_for_each_entry(btm, &bpf_trace_modules, list) {
+ for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
+ btp = &btm->module->bpf_raw_events[i];
+ if (!strcmp(btp->tp->name, name)) {
+ if (try_module_get(btm->module))
+ ret = btp;
+ goto out;
+ }
+ }
+ }
+out:
+ mutex_unlock(&bpf_module_mutex);
+ return ret;
+}
+#else
+static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
+{
+ return NULL;
+}
+#endif /* CONFIG_MODULES */
+
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
@@ -196,11 +233,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
i++;
} else if (fmt[i] == 'p' || fmt[i] == 's') {
mod[fmt_cnt]++;
- i++;
- if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+ /* disallow any further format extensions */
+ if (fmt[i + 1] != 0 &&
+ !isspace(fmt[i + 1]) &&
+ !ispunct(fmt[i + 1]))
return -EINVAL;
fmt_cnt++;
- if (fmt[i - 1] == 's') {
+ if (fmt[i] == 's') {
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;
@@ -1074,7 +1113,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
extern struct bpf_raw_event_map __start__bpf_raw_tp[];
extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
-struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
+struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
{
struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
@@ -1082,7 +1121,16 @@ struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
if (!strcmp(btp->tp->name, name))
return btp;
}
- return NULL;
+
+ return bpf_get_raw_tracepoint_module(name);
+}
+
+void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
+{
+ struct module *mod = __module_address((unsigned long)btp);
+
+ if (mod)
+ module_put(mod);
}
static __always_inline
@@ -1220,3 +1268,52 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
return err;
}
+
+#ifdef CONFIG_MODULES
+int bpf_event_notify(struct notifier_block *nb, unsigned long op, void *module)
+{
+ struct bpf_trace_module *btm, *tmp;
+ struct module *mod = module;
+
+ if (mod->num_bpf_raw_events == 0 ||
+ (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
+ return 0;
+
+ mutex_lock(&bpf_module_mutex);
+
+ switch (op) {
+ case MODULE_STATE_COMING:
+ btm = kzalloc(sizeof(*btm), GFP_KERNEL);
+ if (btm) {
+ btm->module = module;
+ list_add(&btm->list, &bpf_trace_modules);
+ }
+ break;
+ case MODULE_STATE_GOING:
+ list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
+ if (btm->module == module) {
+ list_del(&btm->list);
+ kfree(btm);
+ break;
+ }
+ }
+ break;
+ }
+
+ mutex_unlock(&bpf_module_mutex);
+
+ return 0;
+}
+
+static struct notifier_block bpf_module_nb = {
+ .notifier_call = bpf_event_notify,
+};
+
+int __init bpf_event_init(void)
+{
+ register_module_notifier(&bpf_module_nb);
+ return 0;
+}
+
+fs_initcall(bpf_event_init);
+#endif /* CONFIG_MODULES */
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f536f601bd46..f0ff24173a0b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -173,7 +173,7 @@ static void ftrace_sync(struct work_struct *work)
{
/*
* This function is just a stub to implement a hard force
- * of synchronize_sched(). This requires synchronizing
+ * of synchronize_rcu(). This requires synchronizing
* tasks even in userspace and idle.
*
* Yes, function tracing is rude.
@@ -817,7 +817,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace)
{
- int index = trace->depth;
+ int index = current->curr_ret_stack;
function_profile_call(trace->func, 0, NULL, NULL);
@@ -852,7 +852,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
if (!fgraph_graph_time) {
int index;
- index = trace->depth;
+ index = current->curr_ret_stack;
/* Append this call time to the parent time to subtract */
if (index)
@@ -934,7 +934,7 @@ ftrace_profile_write(struct file *filp, const char __user *ubuf,
ftrace_profile_enabled = 0;
/*
* unregister_ftrace_profiler calls stop_machine
- * so this acts like an synchronize_sched.
+ * so this acts like an synchronize_rcu.
*/
unregister_ftrace_profiler();
}
@@ -1086,7 +1086,7 @@ struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
/*
* Some of the ops may be dynamically allocated,
- * they are freed after a synchronize_sched().
+ * they are freed after a synchronize_rcu().
*/
preempt_disable_notrace();
@@ -1286,7 +1286,7 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
{
if (!hash || hash == EMPTY_HASH)
return;
- call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
+ call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
}
void ftrace_free_filter(struct ftrace_ops *ops)
@@ -1501,7 +1501,7 @@ static bool hash_contains_ip(unsigned long ip,
* the ip is not in the ops->notrace_hash.
*
* This needs to be called with preemption disabled as
- * the hashes are freed with call_rcu_sched().
+ * the hashes are freed with call_rcu().
*/
static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
@@ -4496,7 +4496,7 @@ unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
if (ftrace_enabled && !ftrace_hash_empty(hash))
ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
&old_hash_ops);
- synchronize_sched();
+ synchronize_rcu();
hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
hlist_del(&entry->hlist);
@@ -5314,7 +5314,7 @@ ftrace_graph_release(struct inode *inode, struct file *file)
mutex_unlock(&graph_lock);
/* Wait till all users are no longer using the old hash */
- synchronize_sched();
+ synchronize_rcu();
free_ftrace_hash(old_hash);
}
@@ -5460,6 +5460,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
if (ops->flags & FTRACE_OPS_FL_ENABLED)
ftrace_shutdown(ops, 0);
ops->flags |= FTRACE_OPS_FL_DELETED;
+ ftrace_free_filter(ops);
mutex_unlock(&ftrace_lock);
}
@@ -5707,7 +5708,7 @@ void ftrace_release_mod(struct module *mod)
list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
if (mod_map->mod == mod) {
list_del_rcu(&mod_map->list);
- call_rcu_sched(&mod_map->rcu, ftrace_free_mod_map);
+ call_rcu(&mod_map->rcu, ftrace_free_mod_map);
break;
}
}
@@ -5927,7 +5928,7 @@ ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
struct ftrace_mod_map *mod_map;
const char *ret = NULL;
- /* mod_map is freed via call_rcu_sched() */
+ /* mod_map is freed via call_rcu() */
preempt_disable();
list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
@@ -6262,7 +6263,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
/*
* Some of the ops may be dynamically allocated,
- * they must be freed after a synchronize_sched().
+ * they must be freed after a synchronize_rcu().
*/
preempt_disable_notrace();
@@ -6433,7 +6434,7 @@ static void clear_ftrace_pids(struct trace_array *tr)
rcu_assign_pointer(tr->function_pids, NULL);
/* Wait till all users are no longer using pid filtering */
- synchronize_sched();
+ synchronize_rcu();
trace_free_pid_list(pid_list);
}
@@ -6580,7 +6581,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf,
rcu_assign_pointer(tr->function_pids, pid_list);
if (filtered_pids) {
- synchronize_sched();
+ synchronize_rcu();
trace_free_pid_list(filtered_pids);
} else if (pid_list) {
/* Register a probe to set whether to ignore the tracing of a task */
@@ -6814,6 +6815,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
/* Make sure the tasks see the -1 first: */
smp_wmb();
t->ret_stack = ret_stack_list[start++];
@@ -7038,6 +7040,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
{
t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
/*
* The idle task has no parent, it either has its own
* stack or no stack at all.
@@ -7068,6 +7071,7 @@ void ftrace_graph_init_task(struct task_struct *t)
/* Make sure we do not use the parent ret_stack */
t->ret_stack = NULL;
t->curr_ret_stack = -1;
+ t->curr_ret_depth = -1;
if (ftrace_graph_active) {
struct ftrace_ret_stack *ret_stack;
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 65bd4616220d..4f3247a53259 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1834,7 +1834,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
* There could have been a race between checking
* record_disable and incrementing it.
*/
- synchronize_sched();
+ synchronize_rcu();
for_each_buffer_cpu(buffer, cpu) {
cpu_buffer = buffer->buffers[cpu];
rb_check_pages(cpu_buffer);
@@ -3151,7 +3151,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
- * The caller should call synchronize_sched() after this.
+ * The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable(struct ring_buffer *buffer)
{
@@ -3253,7 +3253,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
* This prevents all writes to the buffer. Any attempt to write
* to the buffer after this will fail and return NULL.
*
- * The caller should call synchronize_sched() after this.
+ * The caller should call synchronize_rcu() after this.
*/
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
{
@@ -4191,7 +4191,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
void
ring_buffer_read_prepare_sync(void)
{
- synchronize_sched();
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
@@ -4363,7 +4363,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
atomic_inc(&cpu_buffer->record_disabled);
/* Make sure all commits have finished */
- synchronize_sched();
+ synchronize_rcu();
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -4496,7 +4496,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
goto out;
/*
- * We can't do a synchronize_sched here because this
+ * We can't do a synchronize_rcu here because this
* function can be called in atomic context.
* Normally this will be called from the same CPU as cpu.
* If not it's up to the caller to protect this.
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ff1c4b20cd0a..51612b4a603f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1681,7 +1681,7 @@ void tracing_reset(struct trace_buffer *buf, int cpu)
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
- synchronize_sched();
+ synchronize_rcu();
ring_buffer_reset_cpu(buffer, cpu);
ring_buffer_record_enable(buffer);
@@ -1698,7 +1698,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_disable(buffer);
/* Make sure all commits have finished */
- synchronize_sched();
+ synchronize_rcu();
buf->time_start = buffer_ftrace_now(buf, buf->cpu);
@@ -2250,7 +2250,7 @@ void trace_buffered_event_disable(void)
preempt_enable();
/* Wait for all current users to finish */
- synchronize_sched();
+ synchronize_rcu();
for_each_tracing_cpu(cpu) {
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
@@ -5398,7 +5398,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
if (tr->current_trace->reset)
tr->current_trace->reset(tr);
- /* Current trace needs to be nop_trace before synchronize_sched */
+ /* Current trace needs to be nop_trace before synchronize_rcu */
tr->current_trace = &nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE
@@ -5412,7 +5412,7 @@ static int tracing_set_tracer(struct trace_array *tr, const char *buf)
* The update_max_tr is called from interrupts disabled
* so a synchronized_sched() is sufficient.
*/
- synchronize_sched();
+ synchronize_rcu();
free_snapshot(tr);
}
#endif
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3b8c0e24ab30..447bd96ee658 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -512,12 +512,44 @@ enum {
* can only be modified by current, we can reuse trace_recursion.
*/
TRACE_IRQ_BIT,
+
+ /* Set if the function is in the set_graph_function file */
+ TRACE_GRAPH_BIT,
+
+ /*
+ * In the very unlikely case that an interrupt came in
+ * at a start of graph tracing, and we want to trace
+ * the function in that interrupt, the depth can be greater
+ * than zero, because of the preempted start of a previous
+ * trace. In an even more unlikely case, depth could be 2
+ * if a softirq interrupted the start of graph tracing,
+ * followed by an interrupt preempting a start of graph
+ * tracing in the softirq, and depth can even be 3
+ * if an NMI came in at the start of an interrupt function
+ * that preempted a softirq start of a function that
+ * preempted normal context!!!! Luckily, it can't be
+ * greater than 3, so the next two bits are a mask
+ * of what the depth is when we set TRACE_GRAPH_BIT
+ */
+
+ TRACE_GRAPH_DEPTH_START_BIT,
+ TRACE_GRAPH_DEPTH_END_BIT,
};
#define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0)
#define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
#define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit)))
+#define trace_recursion_depth() \
+ (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
+#define trace_recursion_set_depth(depth) \
+ do { \
+ current->trace_recursion &= \
+ ~(3 << TRACE_GRAPH_DEPTH_START_BIT); \
+ current->trace_recursion |= \
+ ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT; \
+ } while (0)
+
#define TRACE_CONTEXT_BITS 4
#define TRACE_FTRACE_START TRACE_FTRACE_BIT
@@ -843,8 +875,9 @@ extern void __trace_graph_return(struct trace_array *tr,
extern struct ftrace_hash *ftrace_graph_hash;
extern struct ftrace_hash *ftrace_graph_notrace_hash;
-static inline int ftrace_graph_addr(unsigned long addr)
+static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{
+ unsigned long addr = trace->func;
int ret = 0;
preempt_disable_notrace();
@@ -855,6 +888,14 @@ static inline int ftrace_graph_addr(unsigned long addr)
}
if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+
+ /*
+ * This needs to be cleared on the return functions
+ * when the depth is zero.
+ */
+ trace_recursion_set(TRACE_GRAPH_BIT);
+ trace_recursion_set_depth(trace->depth);
+
/*
* If no irqs are to be traced, but a set_graph_function
* is set, and called by an interrupt handler, we still
@@ -872,6 +913,13 @@ out:
return ret;
}
+static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+{
+ if (trace_recursion_test(TRACE_GRAPH_BIT) &&
+ trace->depth == trace_recursion_depth())
+ trace_recursion_clear(TRACE_GRAPH_BIT);
+}
+
static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
int ret = 0;
@@ -885,7 +933,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
return ret;
}
#else
-static inline int ftrace_graph_addr(unsigned long addr)
+static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
{
return 1;
}
@@ -894,6 +942,8 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
{
return 0;
}
+static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+{ }
#endif /* CONFIG_DYNAMIC_FTRACE */
extern unsigned int fgraph_max_depth;
@@ -901,7 +951,8 @@ extern unsigned int fgraph_max_depth;
static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
{
/* trace it when it is-nested-in or is a function enabled. */
- return !(trace->depth || ftrace_graph_addr(trace->func)) ||
+ return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
+ ftrace_graph_addr(trace)) ||
(trace->depth < 0) ||
(fgraph_max_depth && trace->depth >= fgraph_max_depth);
}
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 84a65173b1e9..27821480105e 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -570,11 +570,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
}
}
+ kfree(op_stack);
+ kfree(inverts);
return prog;
out_free:
kfree(op_stack);
- kfree(prog_stack);
kfree(inverts);
+ kfree(prog_stack);
return ERR_PTR(ret);
}
@@ -1614,7 +1616,7 @@ static int process_system_preds(struct trace_subsystem_dir *dir,
/*
* The calls can still be using the old filters.
- * Do a synchronize_sched() and to ensure all calls are
+ * Do a synchronize_rcu() and to ensure all calls are
* done with them before we free them.
*/
tracepoint_synchronize_unregister();
@@ -1718,6 +1720,7 @@ static int create_filter(struct trace_event_call *call,
err = process_preds(call, filter_string, *filterp, pe);
if (err && set_str)
append_filter_err(pe, *filterp);
+ create_filter_finish(pe);
return err;
}
@@ -1845,7 +1848,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir,
if (filter) {
/*
* No event actually uses the system filter
- * we can free it without synchronize_sched().
+ * we can free it without synchronize_rcu().
*/
__free_filter(system->filter);
system->filter = filter;
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 2152d1e530cb..cd12ecb66eb9 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -732,8 +732,10 @@ int set_trigger_filter(char *filter_str,
/* The filter is for the 'trigger' event, not the triggered event */
ret = create_event_filter(file->event_call, filter_str, false, &filter);
- if (ret)
- goto out;
+ /*
+ * If create_event_filter() fails, filter still needs to be freed.
+ * Which the calling code will do with data->filter.
+ */
assign:
tmp = rcu_access_pointer(data->filter);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 169b3c44ee97..086af4f5c3e8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
struct trace_seq *s, u32 flags);
/* Add a function return address to the trace stack on thread info.*/
-int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+static int
+ftrace_push_return_trace(unsigned long ret, unsigned long func,
unsigned long frame_pointer, unsigned long *retp)
{
unsigned long long calltime;
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
current->ret_stack[index].retp = retp;
#endif
- *depth = current->curr_ret_stack;
+ return 0;
+}
+
+int function_graph_enter(unsigned long ret, unsigned long func,
+ unsigned long frame_pointer, unsigned long *retp)
+{
+ struct ftrace_graph_ent trace;
+
+ trace.func = func;
+ trace.depth = ++current->curr_ret_depth;
+
+ if (ftrace_push_return_trace(ret, func,
+ frame_pointer, retp))
+ goto out;
+
+ /* Only trace if the calling function expects to */
+ if (!ftrace_graph_entry(&trace))
+ goto out_ret;
return 0;
+ out_ret:
+ current->curr_ret_stack--;
+ out:
+ current->curr_ret_depth--;
+ return -EBUSY;
}
/* Retrieve a function return address to the trace stack on thread info.*/
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
trace->func = current->ret_stack[index].func;
trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun);
- trace->depth = index;
+ trace->depth = current->curr_ret_depth--;
+ /*
+ * We still want to trace interrupts coming in if
+ * max_depth is set to 1. Make sure the decrement is
+ * seen before ftrace_graph_return.
+ */
+ barrier();
}
/*
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
ftrace_pop_return_trace(&trace, &ret, frame_pointer);
trace.rettime = trace_clock_local();
+ ftrace_graph_return(&trace);
+ /*
+ * The ftrace_graph_return() may still access the current
+ * ret_stack structure, we need to make sure the update of
+ * curr_ret_stack is after that.
+ */
barrier();
current->curr_ret_stack--;
/*
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
return ret;
}
- /*
- * The trace should run after decrementing the ret counter
- * in case an interrupt were to come in. We don't want to
- * lose the interrupt if max_depth is set.
- */
- ftrace_graph_return(&trace);
-
if (unlikely(!ret)) {
ftrace_graph_stop();
WARN_ON(1);
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
int cpu;
int pc;
+ ftrace_graph_addr_finish(trace);
+
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
+ ftrace_graph_addr_finish(trace);
+
if (tracing_thresh &&
(trace->rettime - trace->calltime < tracing_thresh))
return;
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index b7357f9f82a3..98ea6d28df15 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -208,6 +208,8 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
unsigned long flags;
int pc;
+ ftrace_graph_addr_finish(trace);
+
if (!func_prolog_dec(tr, &data, &flags))
return;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index fec67188c4d2..adc153ab51c0 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -333,7 +333,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
* event_call related objects, which will be accessed in
* the kprobe_trace_func/kretprobe_trace_func.
*/
- synchronize_sched();
+ synchronize_rcu();
kfree(link); /* Ignored if link == NULL */
}
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index a86b303e6c67..7d04b9890755 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -270,6 +270,8 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
unsigned long flags;
int pc;
+ ftrace_graph_addr_finish(trace);
+
if (!func_prolog_preempt_disable(tr, &data, &pc))
return;
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index a3be42304485..46f2ab1e08a9 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -92,7 +92,7 @@ static __init int release_early_probes(void)
while (early_probes) {
tmp = early_probes;
early_probes = tmp->next;
- call_rcu_sched(tmp, rcu_free_old_probes);
+ call_rcu(tmp, rcu_free_old_probes);
}
return 0;
@@ -123,7 +123,7 @@ static inline void release_probes(struct tracepoint_func *old)
* cover both cases. So let us chain the SRCU and sched RCU
* callbacks to wait for both grace periods.
*/
- call_rcu_sched(&tp_probes->rcu, rcu_free_old_probes);
+ call_rcu(&tp_probes->rcu, rcu_free_old_probes);
}
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0280deac392e..392be4b252f6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3396,7 +3396,7 @@ static void put_unbound_pool(struct worker_pool *pool)
del_timer_sync(&pool->mayday_timer);
/* sched-RCU protected to allow dereferences from get_work_pool() */
- call_rcu_sched(&pool->rcu, rcu_free_pool);
+ call_rcu(&pool->rcu, rcu_free_pool);
}
/**
@@ -3503,14 +3503,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
- call_rcu_sched(&pwq->rcu, rcu_free_pwq);
+ call_rcu(&pwq->rcu, rcu_free_pwq);
/*
* If we're the last pwq going away, @wq is already dead and no one
* is gonna access it anymore. Schedule RCU free.
*/
if (is_last)
- call_rcu_sched(&wq->rcu, rcu_free_wq);
+ call_rcu(&wq->rcu, rcu_free_wq);
}
/**
@@ -4195,7 +4195,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
- call_rcu_sched(&wq->rcu, rcu_free_wq);
+ call_rcu(&wq->rcu, rcu_free_wq);
} else {
/*
* We're the sole accessor of @wq at this point. Directly
diff --git a/lib/Kconfig b/lib/Kconfig
index d5a5e2ebf286..79bc2eef9c14 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -624,3 +624,6 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+
+config OBJAGG
+ tristate "objagg" if COMPILE_TEST
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1af29b8224fd..b3c91b9e32f8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1976,6 +1976,16 @@ config TEST_MEMCAT_P
If unsure, say N.
+config TEST_OBJAGG
+ tristate "Perform selftest on object aggreration manager"
+ default n
+ depends on OBJAGG
+ help
+ Enable this option to test object aggregation manager on boot
+ (or module load).
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index db06d1237898..e1b59da71418 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -20,7 +20,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
idr.o int_sqrt.o extable.o \
- sha1.o chacha20.o irq_regs.o argv_split.o \
+ sha1.o chacha.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
@@ -75,6 +75,7 @@ obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
+obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -274,3 +275,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_OBJAGG) += objagg.o
diff --git a/lib/chacha20.c b/lib/chacha.c
index d907fec6a9ed..a46d2832dbab 100644
--- a/lib/chacha20.c
+++ b/lib/chacha.c
@@ -1,5 +1,5 @@
/*
- * ChaCha20 256-bit cipher algorithm, RFC7539
+ * The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
*
* Copyright (C) 2015 Martin Willi
*
@@ -14,17 +14,16 @@
#include <linux/bitops.h>
#include <linux/cryptohash.h>
#include <asm/unaligned.h>
-#include <crypto/chacha20.h>
+#include <crypto/chacha.h>
-void chacha20_block(u32 *state, u8 *stream)
+static void chacha_permute(u32 *x, int nrounds)
{
- u32 x[16];
int i;
- for (i = 0; i < ARRAY_SIZE(x); i++)
- x[i] = state[i];
+ /* whitelist the allowed round counts */
+ WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
- for (i = 0; i < 20; i += 2) {
+ for (i = 0; i < nrounds; i += 2) {
x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
@@ -65,10 +64,54 @@ void chacha20_block(u32 *state, u8 *stream)
x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
+}
+
+/**
+ * chacha_block - generate one keystream block and increment block counter
+ * @state: input state matrix (16 32-bit words)
+ * @stream: output keystream block (64 bytes)
+ * @nrounds: number of rounds (20 or 12; 20 is recommended)
+ *
+ * This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
+ * The caller has already converted the endianness of the input. This function
+ * also handles incrementing the block counter in the input matrix.
+ */
+void chacha_block(u32 *state, u8 *stream, int nrounds)
+{
+ u32 x[16];
+ int i;
+
+ memcpy(x, state, 64);
+
+ chacha_permute(x, nrounds);
for (i = 0; i < ARRAY_SIZE(x); i++)
put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
-EXPORT_SYMBOL(chacha20_block);
+EXPORT_SYMBOL(chacha_block);
+
+/**
+ * hchacha_block - abbreviated ChaCha core, for XChaCha
+ * @in: input state matrix (16 32-bit words)
+ * @out: output (8 32-bit words)
+ * @nrounds: number of rounds (20 or 12; 20 is recommended)
+ *
+ * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
+ * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
+ * skips the final addition of the initial state, and outputs only certain words
+ * of the state. It should not be used for streaming directly.
+ */
+void hchacha_block(const u32 *in, u32 *out, int nrounds)
+{
+ u32 x[16];
+
+ memcpy(x, in, 64);
+
+ chacha_permute(x, nrounds);
+
+ memcpy(&out[0], &x[0], 16);
+ memcpy(&out[4], &x[12], 16);
+}
+EXPORT_SYMBOL(hchacha_block);
diff --git a/lib/cordic.c b/lib/cordic.c
index 6cf477839ebd..8ef27c12956f 100644
--- a/lib/cordic.c
+++ b/lib/cordic.c
@@ -16,15 +16,6 @@
#include <linux/module.h>
#include <linux/cordic.h>
-#define CORDIC_ANGLE_GEN 39797
-#define CORDIC_PRECISION_SHIFT 16
-#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
-
-#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
-#define FLOAT(X) (((X) >= 0) \
- ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
- : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
-
static const s32 arctan_table[] = {
2949120,
1740967,
@@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta)
coord.q = 0;
angle = 0;
- theta = FIXED(theta);
+ theta = CORDIC_FIXED(theta);
signtheta = (theta < 0) ? -1 : 1;
- theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) -
- FIXED(180) * signtheta;
+ theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
+ CORDIC_FIXED(180) * signtheta;
- if (FLOAT(theta) > 90) {
- theta -= FIXED(180);
+ if (CORDIC_FLOAT(theta) > 90) {
+ theta -= CORDIC_FIXED(180);
signx = -1;
- } else if (FLOAT(theta) < -90) {
- theta += FIXED(180);
+ } else if (CORDIC_FLOAT(theta) < -90) {
+ theta += CORDIC_FIXED(180);
signx = -1;
}
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 70935ed91125..14afeeb7d6ef 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -135,7 +135,6 @@ static void fill_pool(void)
if (!new)
return;
- kmemleak_ignore(new);
raw_spin_lock_irqsave(&pool_lock, flags);
hlist_add_head(&new->node, &obj_pool);
debug_objects_allocated++;
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
if (!obj)
goto free;
- kmemleak_ignore(obj);
hlist_add_head(&obj->node, &objects);
}
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
- SLAB_DEBUG_OBJECTS, NULL);
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
+ NULL);
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
diff --git a/lib/gcd.c b/lib/gcd.c
index 227dea924425..7948ab27f0a4 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -10,7 +10,7 @@
* has decent hardware division.
*/
-#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS)
+#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
/* If __ffs is available, the even/odd algorithm benchmarks slower. */
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7ebccb5c1637..1928009f506e 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -6,6 +6,7 @@
#include <linux/vmalloc.h>
#include <linux/splice.h>
#include <net/checksum.h>
+#include <linux/scatterlist.h>
#define PIPE_PARANOIA /* for now */
@@ -560,6 +561,38 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
+static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
+ __wsum *csum, struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, r;
+ size_t off = 0;
+ __wsum sum = *csum, next;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &r);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), r = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
+ char *p = kmap_atomic(pipe->bufs[idx].page);
+ next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
+ sum = csum_block_add(sum, next, off);
+ kunmap_atomic(p);
+ i->idx = idx;
+ i->iov_offset = r + chunk;
+ n -= chunk;
+ off += chunk;
+ addr += chunk;
+ }
+ i->count -= bytes;
+ *csum = sum;
+ return bytes;
+}
+
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
@@ -1432,14 +1465,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
struct iov_iter *i)
{
const char *from = addr;
+ __wsum *csum = csump;
__wsum sum, next;
size_t off = 0;
+
+ if (unlikely(iov_iter_is_pipe(i)))
+ return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+
sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
+ if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
}
@@ -1474,6 +1512,21 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
+size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
+ struct iov_iter *i)
+{
+ struct ahash_request *hash = hashp;
+ struct scatterlist sg;
+ size_t copied;
+
+ copied = copy_to_iter(addr, bytes, i);
+ sg_init_one(&sg, addr, copied);
+ ahash_request_set_crypt(hash, &sg, NULL, copied);
+ crypto_ahash_update(hash);
+ return copied;
+}
+EXPORT_SYMBOL(hash_and_copy_to_iter);
+
int iov_iter_npages(const struct iov_iter *i, int maxpages)
{
size_t size = i->count;
diff --git a/lib/objagg.c b/lib/objagg.c
new file mode 100644
index 000000000000..c9b457a91153
--- /dev/null
+++ b/lib/objagg.c
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rhashtable.h>
+#include <linux/list.h>
+#include <linux/sort.h>
+#include <linux/objagg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/objagg.h>
+
+struct objagg {
+ const struct objagg_ops *ops;
+ void *priv;
+ struct rhashtable obj_ht;
+ struct rhashtable_params ht_params;
+ struct list_head obj_list;
+ unsigned int obj_count;
+};
+
+struct objagg_obj {
+ struct rhash_head ht_node; /* member of objagg->obj_ht */
+ struct list_head list; /* member of objagg->obj_list */
+ struct objagg_obj *parent; /* if the object is nested, this
+ * holds pointer to parent, otherwise NULL
+ */
+ union {
+ void *delta_priv; /* user delta private */
+ void *root_priv; /* user root private */
+ };
+ unsigned int refcount; /* counts number of users of this object
+ * including nested objects
+ */
+ struct objagg_obj_stats stats;
+ unsigned long obj[0];
+};
+
+static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
+{
+ return ++objagg_obj->refcount;
+}
+
+static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
+{
+ return --objagg_obj->refcount;
+}
+
+static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count++;
+ objagg_obj->stats.delta_user_count++;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count++;
+}
+
+static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
+{
+ objagg_obj->stats.user_count--;
+ objagg_obj->stats.delta_user_count--;
+ if (objagg_obj->parent)
+ objagg_obj->parent->stats.delta_user_count--;
+}
+
+static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
+{
+ /* Nesting is not supported, so we can use ->parent
+ * to figure out if the object is root.
+ */
+ return !objagg_obj->parent;
+}
+
+/**
+ * objagg_obj_root_priv - obtains root private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Either the object is root itself when the private is returned
+ * directly, or the parent is root and its private is returned
+ * instead.
+ *
+ * Returns a user private root pointer.
+ */
+const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return objagg_obj->root_priv;
+ WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
+ return objagg_obj->parent->root_priv;
+}
+EXPORT_SYMBOL(objagg_obj_root_priv);
+
+/**
+ * objagg_obj_delta_priv - obtains delta private for an object
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private delta pointer or NULL in case the passed
+ * object is root.
+ */
+const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
+{
+ if (objagg_obj_is_root(objagg_obj))
+ return NULL;
+ return objagg_obj->delta_priv;
+}
+EXPORT_SYMBOL(objagg_obj_delta_priv);
+
+/**
+ * objagg_obj_raw - obtains object user private pointer
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
+ */
+const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
+{
+ return objagg_obj->obj;
+}
+EXPORT_SYMBOL(objagg_obj_raw);
+
+static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
+{
+ return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
+}
+
+static int objagg_obj_parent_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj,
+ struct objagg_obj *parent)
+{
+ void *delta_priv;
+
+ delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
+ objagg_obj->obj);
+ if (IS_ERR(delta_priv))
+ return PTR_ERR(delta_priv);
+
+ /* User returned a delta private, that means that
+ * our object can be aggregated into the parent.
+ */
+ objagg_obj->parent = parent;
+ objagg_obj->delta_priv = delta_priv;
+ objagg_obj_ref_inc(objagg_obj->parent);
+ trace_objagg_obj_parent_assign(objagg, objagg_obj,
+ parent,
+ parent->refcount);
+ return 0;
+}
+
+static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ struct objagg_obj *objagg_obj_cur;
+ int err;
+
+ list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
+ /* Nesting is not supported. In case the object
+ * is not root, it cannot be assigned as parent.
+ */
+ if (!objagg_obj_is_root(objagg_obj_cur))
+ continue;
+ err = objagg_obj_parent_assign(objagg, objagg_obj,
+ objagg_obj_cur);
+ if (!err)
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj);
+
+static void objagg_obj_parent_unassign(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_parent_unassign(objagg, objagg_obj,
+ objagg_obj->parent,
+ objagg_obj->parent->refcount);
+ objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
+ __objagg_obj_put(objagg, objagg_obj->parent);
+}
+
+static int objagg_obj_root_create(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
+ objagg_obj->obj);
+ if (IS_ERR(objagg_obj->root_priv))
+ return PTR_ERR(objagg_obj->root_priv);
+
+ trace_objagg_obj_root_create(objagg, objagg_obj);
+ return 0;
+}
+
+static void objagg_obj_root_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_root_destroy(objagg, objagg_obj);
+ objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
+}
+
+static int objagg_obj_init(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ int err;
+
+ /* Try to find if the object can be aggregated under an existing one. */
+ err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
+ if (!err)
+ return 0;
+ /* If aggregation is not possible, make the object a root. */
+ return objagg_obj_root_create(objagg, objagg_obj);
+}
+
+static void objagg_obj_fini(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_is_root(objagg_obj))
+ objagg_obj_parent_unassign(objagg, objagg_obj);
+ else
+ objagg_obj_root_destroy(objagg, objagg_obj);
+}
+
+static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+ int err;
+
+ objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
+ GFP_KERNEL);
+ if (!objagg_obj)
+ return ERR_PTR(-ENOMEM);
+ objagg_obj_ref_inc(objagg_obj);
+ memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
+
+ err = objagg_obj_init(objagg, objagg_obj);
+ if (err)
+ goto err_obj_init;
+
+ err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ if (err)
+ goto err_ht_insert;
+ list_add(&objagg_obj->list, &objagg->obj_list);
+ objagg->obj_count++;
+ trace_objagg_obj_create(objagg, objagg_obj);
+
+ return objagg_obj;
+
+err_ht_insert:
+ objagg_obj_fini(objagg, objagg_obj);
+err_obj_init:
+ kfree(objagg_obj);
+ return ERR_PTR(err);
+}
+
+static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ /* First, try to find the object exactly as user passed it,
+ * perhaps it is already in use.
+ */
+ objagg_obj = objagg_obj_lookup(objagg, obj);
+ if (objagg_obj) {
+ objagg_obj_ref_inc(objagg_obj);
+ return objagg_obj;
+ }
+
+ return objagg_obj_create(objagg, obj);
+}
+
+/**
+ * objagg_obj_get - gets an object within objagg instance
+ * @objagg: objagg instance
+ * @obj: user-specific private object pointer
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Size of the "obj" memory is specified in "objagg->ops".
+ *
+ * There are 3 main options this function wraps:
+ * 1) The object according to "obj" already exist. In that case
+ * the reference counter is incrementes and the object is returned.
+ * 2) The object does not exist, but it can be aggregated within
+ * another object. In that case, user ops->delta_create() is called
+ * to obtain delta data and a new object is created with returned
+ * user-delta private pointer.
+ * 3) The object does not exist and cannot be aggregated into
+ * any of the existing objects. In that case, user ops->root_create()
+ * is called to create the root and a new object is created with
+ * returned user-root private pointer.
+ *
+ * Returns a pointer to objagg object instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
+{
+ struct objagg_obj *objagg_obj;
+
+ objagg_obj = __objagg_obj_get(objagg, obj);
+ if (IS_ERR(objagg_obj))
+ return objagg_obj;
+ objagg_obj_stats_inc(objagg_obj);
+ trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
+ return objagg_obj;
+}
+EXPORT_SYMBOL(objagg_obj_get);
+
+static void objagg_obj_destroy(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_destroy(objagg, objagg_obj);
+ --objagg->obj_count;
+ list_del(&objagg_obj->list);
+ rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
+ objagg->ht_params);
+ objagg_obj_fini(objagg, objagg_obj);
+ kfree(objagg_obj);
+}
+
+static void __objagg_obj_put(struct objagg *objagg,
+ struct objagg_obj *objagg_obj)
+{
+ if (!objagg_obj_ref_dec(objagg_obj))
+ objagg_obj_destroy(objagg, objagg_obj);
+}
+
+/**
+ * objagg_obj_put - puts an object within objagg instance
+ * @objagg: objagg instance
+ * @objagg_obj: objagg object instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Symmetric to objagg_obj_get().
+ */
+void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
+{
+ trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
+ objagg_obj_stats_dec(objagg_obj);
+ __objagg_obj_put(objagg, objagg_obj);
+}
+EXPORT_SYMBOL(objagg_obj_put);
+
+/**
+ * objagg_create - creates a new objagg instance
+ * @ops: user-specific callbacks
+ * @priv: pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The purpose of the library is to provide an infrastructure to
+ * aggregate user-specified objects. Library does not care about the type
+ * of the object. User fills-up ops which take care of the specific
+ * user object manipulation.
+ *
+ * As a very stupid example, consider integer numbers. For example
+ * number 8 as a root object. That can aggregate number 9 with delta 1,
+ * number 10 with delta 2, etc. This example is implemented as
+ * a part of a testing module in test_objagg.c file.
+ *
+ * Each objagg instance contains multiple trees. Each tree node is
+ * represented by "an object". In the current implementation there can be
+ * only roots and leafs nodes. Leaf nodes are called deltas.
+ * But in general, this can be easily extended for intermediate nodes.
+ * In that extension, a delta would be associated with all non-root
+ * nodes.
+ *
+ * Returns a pointer to newly created objagg instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
+{
+ struct objagg *objagg;
+ int err;
+
+ if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
+ !ops->delta_create || !ops->delta_destroy))
+ return ERR_PTR(-EINVAL);
+ objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
+ if (!objagg)
+ return ERR_PTR(-ENOMEM);
+ objagg->ops = ops;
+ objagg->priv = priv;
+ INIT_LIST_HEAD(&objagg->obj_list);
+
+ objagg->ht_params.key_len = ops->obj_size;
+ objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
+ objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
+
+ err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ trace_objagg_create(objagg);
+ return objagg;
+
+err_rhashtable_init:
+ kfree(objagg);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(objagg_create);
+
+/**
+ * objagg_destroy - destroys a new objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_destroy(struct objagg *objagg)
+{
+ trace_objagg_destroy(objagg);
+ WARN_ON(!list_empty(&objagg->obj_list));
+ rhashtable_destroy(&objagg->obj_ht);
+ kfree(objagg);
+}
+EXPORT_SYMBOL(objagg_destroy);
+
+static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
+{
+ const struct objagg_obj_stats_info *stats_info1 = a;
+ const struct objagg_obj_stats_info *stats_info2 = b;
+
+ if (stats_info1->is_root != stats_info2->is_root)
+ return stats_info2->is_root - stats_info1->is_root;
+ if (stats_info1->stats.delta_user_count !=
+ stats_info2->stats.delta_user_count)
+ return stats_info2->stats.delta_user_count -
+ stats_info1->stats.delta_user_count;
+ return stats_info2->stats.user_count - stats_info1->stats.user_count;
+}
+
+/**
+ * objagg_stats_get - obtains stats of the objagg instance
+ * @objagg: objagg instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * The returned structure contains statistics of all object
+ * currently in use, ordered by following rules:
+ * 1) Root objects are always on lower indexes than the rest.
+ * 2) Objects with higher delta user count are always on lower
+ * indexes.
+ * 3) In case more objects have the same delta user count,
+ * the objects are ordered by user count.
+ *
+ * Returns a pointer to stats instance in case of success,
+ * otherwise it returns pointer error using ERR_PTR macro.
+ */
+const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
+{
+ struct objagg_stats *objagg_stats;
+ struct objagg_obj *objagg_obj;
+ size_t alloc_size;
+ int i;
+
+ alloc_size = sizeof(*objagg_stats) +
+ sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
+ objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
+ if (!objagg_stats)
+ return ERR_PTR(-ENOMEM);
+
+ i = 0;
+ list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
+ memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
+ sizeof(objagg_stats->stats_info[0].stats));
+ objagg_stats->stats_info[i].objagg_obj = objagg_obj;
+ objagg_stats->stats_info[i].is_root =
+ objagg_obj_is_root(objagg_obj);
+ i++;
+ }
+ objagg_stats->stats_info_count = i;
+
+ sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
+ sizeof(struct objagg_obj_stats_info),
+ objagg_stats_info_sort_cmp_func, NULL);
+
+ return objagg_stats;
+}
+EXPORT_SYMBOL(objagg_stats_get);
+
+/**
+ * objagg_stats_puts - puts stats of the objagg instance
+ * @objagg_stats: objagg instance stats
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void objagg_stats_put(const struct objagg_stats *objagg_stats)
+{
+ kfree(objagg_stats);
+}
+EXPORT_SYMBOL(objagg_stats_put);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index de10b8c0bff6..9877682e49c7 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
- call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
+ call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 1106bb6aa01e..14d51548bea6 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
while (radix_tree_is_internal_node(node)) {
unsigned offset;
- if (node == RADIX_TREE_RETRY)
- goto restart;
parent = entry_to_node(node);
offset = radix_tree_descend(parent, &node, index);
slot = parent->slots + offset;
+ if (node == RADIX_TREE_RETRY)
+ goto restart;
if (parent->shift == 0)
break;
}
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 2f8b61dfd9b0..7ed43eaa02ef 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -18,6 +18,21 @@ quiet_cmd_unroll = UNROLL $@
ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
+
+ifdef CONFIG_CC_IS_CLANG
+# clang ppc port does not yet support -maltivec when -msoft-float is
+# enabled. A future release of clang will resolve this
+# https://bugs.llvm.org/show_bug.cgi?id=31177
+CFLAGS_REMOVE_altivec1.o += -msoft-float
+CFLAGS_REMOVE_altivec2.o += -msoft-float
+CFLAGS_REMOVE_altivec4.o += -msoft-float
+CFLAGS_REMOVE_altivec8.o += -msoft-float
+CFLAGS_REMOVE_altivec8.o += -msoft-float
+CFLAGS_REMOVE_vpermxor1.o += -msoft-float
+CFLAGS_REMOVE_vpermxor2.o += -msoft-float
+CFLAGS_REMOVE_vpermxor4.o += -msoft-float
+CFLAGS_REMOVE_vpermxor8.o += -msoft-float
+endif
endif
# The GCC option -ffreestanding is required in order to compile code containing
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 30526afa8343..852ffa5160f1 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
- static struct rhash_head __rcu *rhnull =
- (struct rhash_head __rcu *)NULLS_MARKER(0);
+ static struct rhash_head __rcu *rhnull;
unsigned int index = hash & ((1 << tbl->nest) - 1);
unsigned int size = tbl->size >> tbl->nest;
unsigned int subhash = hash;
@@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
subhash >>= shift;
}
- if (!ntbl)
+ if (!ntbl) {
+ if (!rhnull)
+ INIT_RHT_NULLS_HEAD(rhnull);
return &rhnull;
+ }
return &ntbl[subhash].bucket;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index fdd1b8aa8ac6..65c2d06250a6 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -20,6 +20,47 @@
#include <linux/sbitmap.h>
#include <linux/seq_file.h>
+/*
+ * See if we have deferred clears that we can batch move
+ */
+static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
+{
+ unsigned long mask, val;
+ unsigned long __maybe_unused flags;
+ bool ret = false;
+
+ /* Silence bogus lockdep warning */
+#if defined(CONFIG_LOCKDEP)
+ local_irq_save(flags);
+#endif
+ spin_lock(&sb->map[index].swap_lock);
+
+ if (!sb->map[index].cleared)
+ goto out_unlock;
+
+ /*
+ * First get a stable cleared mask, setting the old mask to 0.
+ */
+ do {
+ mask = sb->map[index].cleared;
+ } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
+
+ /*
+ * Now clear the masked bits in our free word
+ */
+ do {
+ val = sb->map[index].word;
+ } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
+
+ ret = true;
+out_unlock:
+ spin_unlock(&sb->map[index].swap_lock);
+#if defined(CONFIG_LOCKDEP)
+ local_irq_restore(flags);
+#endif
+ return ret;
+}
+
int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
gfp_t flags, int node)
{
@@ -59,6 +100,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
for (i = 0; i < sb->map_nr; i++) {
sb->map[i].depth = min(depth, bits_per_word);
depth -= sb->map[i].depth;
+ spin_lock_init(&sb->map[i].swap_lock);
}
return 0;
}
@@ -69,6 +111,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int bits_per_word = 1U << sb->shift;
unsigned int i;
+ for (i = 0; i < sb->map_nr; i++)
+ sbitmap_deferred_clear(sb, i);
+
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -111,6 +156,24 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return nr;
}
+static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
+ unsigned int alloc_hint, bool round_robin)
+{
+ int nr;
+
+ do {
+ nr = __sbitmap_get_word(&sb->map[index].word,
+ sb->map[index].depth, alloc_hint,
+ !round_robin);
+ if (nr != -1)
+ break;
+ if (!sbitmap_deferred_clear(sb, index))
+ break;
+ } while (1);
+
+ return nr;
+}
+
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
{
unsigned int i, index;
@@ -118,24 +181,28 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
index = SB_NR_TO_INDEX(sb, alloc_hint);
+ /*
+ * Unless we're doing round robin tag allocation, just use the
+ * alloc_hint to find the right word index. No point in looping
+ * twice in find_next_zero_bit() for that case.
+ */
+ if (round_robin)
+ alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
+ else
+ alloc_hint = 0;
+
for (i = 0; i < sb->map_nr; i++) {
- nr = __sbitmap_get_word(&sb->map[index].word,
- sb->map[index].depth,
- SB_NR_TO_BIT(sb, alloc_hint),
- !round_robin);
+ nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
+ round_robin);
if (nr != -1) {
nr += index << sb->shift;
break;
}
/* Jump to next index. */
- index++;
- alloc_hint = index << sb->shift;
-
- if (index >= sb->map_nr) {
+ alloc_hint = 0;
+ if (++index >= sb->map_nr)
index = 0;
- alloc_hint = 0;
- }
}
return nr;
@@ -151,6 +218,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
index = SB_NR_TO_INDEX(sb, alloc_hint);
for (i = 0; i < sb->map_nr; i++) {
+again:
nr = __sbitmap_get_word(&sb->map[index].word,
min(sb->map[index].depth, shallow_depth),
SB_NR_TO_BIT(sb, alloc_hint), true);
@@ -159,6 +227,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
break;
}
+ if (sbitmap_deferred_clear(sb, index))
+ goto again;
+
/* Jump to next index. */
index++;
alloc_hint = index << sb->shift;
@@ -178,7 +249,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
unsigned int i;
for (i = 0; i < sb->map_nr; i++) {
- if (sb->map[i].word)
+ if (sb->map[i].word & ~sb->map[i].cleared)
return true;
}
return false;
@@ -191,9 +262,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
+ unsigned long mask = word->word & ~word->cleared;
unsigned long ret;
- ret = find_first_zero_bit(&word->word, word->depth);
+ ret = find_first_zero_bit(&mask, word->depth);
if (ret < word->depth)
return true;
}
@@ -201,23 +273,36 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
}
EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
-unsigned int sbitmap_weight(const struct sbitmap *sb)
+static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
{
unsigned int i, weight = 0;
for (i = 0; i < sb->map_nr; i++) {
const struct sbitmap_word *word = &sb->map[i];
- weight += bitmap_weight(&word->word, word->depth);
+ if (set)
+ weight += bitmap_weight(&word->word, word->depth);
+ else
+ weight += bitmap_weight(&word->cleared, word->depth);
}
return weight;
}
-EXPORT_SYMBOL_GPL(sbitmap_weight);
+
+static unsigned int sbitmap_weight(const struct sbitmap *sb)
+{
+ return __sbitmap_weight(sb, true);
+}
+
+static unsigned int sbitmap_cleared(const struct sbitmap *sb)
+{
+ return __sbitmap_weight(sb, false);
+}
void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
{
seq_printf(m, "depth=%u\n", sb->depth);
- seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
+ seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
+ seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
seq_printf(m, "map_nr=%u\n", sb->map_nr);
}
@@ -325,6 +410,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->min_shallow_depth = UINT_MAX;
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
+ atomic_set(&sbq->ws_active, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
@@ -440,6 +526,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
{
int i, wake_index;
+ if (!atomic_read(&sbq->ws_active))
+ return NULL;
+
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
@@ -509,7 +598,8 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
unsigned int cpu)
{
- sbitmap_clear_bit_unlock(&sbq->sb, nr);
+ sbitmap_deferred_clear_bit(&sbq->sb, nr);
+
/*
* Pairs with the memory barrier in set_current_state() to ensure the
* proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
@@ -564,6 +654,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
+ seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
@@ -579,3 +670,48 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_show);
+
+void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ if (!sbq_wait->sbq) {
+ sbq_wait->sbq = sbq;
+ atomic_inc(&sbq->ws_active);
+ }
+ add_wait_queue(&ws->wait, &sbq_wait->wait);
+}
+EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
+
+void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
+{
+ list_del_init(&sbq_wait->wait.entry);
+ if (sbq_wait->sbq) {
+ atomic_dec(&sbq_wait->sbq->ws_active);
+ sbq_wait->sbq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
+
+void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
+ struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait, int state)
+{
+ if (!sbq_wait->sbq) {
+ atomic_inc(&sbq->ws_active);
+ sbq_wait->sbq = sbq;
+ }
+ prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
+}
+EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
+
+void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
+ struct sbq_wait *sbq_wait)
+{
+ finish_wait(&ws->wait, &sbq_wait->wait);
+ if (sbq_wait->sbq) {
+ atomic_dec(&sbq->ws_active);
+ sbq_wait->sbq = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index aa22bcaec1dc..f3e570722a7e 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -39,6 +39,7 @@
#define SKB_HASH 0x1234aaab
#define SKB_QUEUE_MAP 123
#define SKB_VLAN_TCI 0xffff
+#define SKB_VLAN_PRESENT 1
#define SKB_DEV_IFINDEX 577
#define SKB_DEV_TYPE 588
@@ -725,8 +726,8 @@ static struct bpf_test tests[] = {
CLASSIC,
{ },
{
- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT },
- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }
+ { 1, SKB_VLAN_TCI },
+ { 10, SKB_VLAN_TCI }
},
},
{
@@ -739,8 +740,8 @@ static struct bpf_test tests[] = {
CLASSIC,
{ },
{
- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
},
},
{
@@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = {
#endif
{ },
{
- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
+ { 1, SKB_VLAN_PRESENT },
+ { 10, SKB_VLAN_PRESENT }
},
.fill_helper = bpf_fill_maxinsns6,
.expected_errcode = -ENOTSUPP,
@@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
+ skb->vlan_present = SKB_VLAN_PRESENT;
skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net);
skb->dev = &dev;
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index d5a06addeb27..bf864c73e462 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -5,6 +5,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include <asm/page.h>
#ifdef CONFIG_MIPS
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 626f580b4ff7..5144899d3c6b 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
const char *q = *result++;
size_t amount = strlen(q);
- strncpy(p, q, amount);
+ memcpy(p, q, amount);
p += amount;
*p++ = ' ';
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e3ddd836491f..d82d022111e0 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
dev_info(test_dev->dev, "removing interface\n");
misc_deregister(&test_dev->misc_dev);
- kfree(&test_dev->misc_dev.name);
mutex_unlock(&test_dev->config_mutex);
mutex_unlock(&test_dev->trigger_mutex);
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
new file mode 100644
index 000000000000..ab57144bb0cd
--- /dev/null
+++ b/lib/test_objagg.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/objagg.h>
+
+struct tokey {
+ unsigned int id;
+};
+
+#define NUM_KEYS 32
+
+static int key_id_index(unsigned int key_id)
+{
+ if (key_id >= NUM_KEYS) {
+ WARN_ON(1);
+ return 0;
+ }
+ return key_id;
+}
+
+#define BUF_LEN 128
+
+struct world {
+ unsigned int root_count;
+ unsigned int delta_count;
+ char next_root_buf[BUF_LEN];
+ struct objagg_obj *objagg_objs[NUM_KEYS];
+ unsigned int key_refs[NUM_KEYS];
+};
+
+struct root {
+ struct tokey key;
+ char buf[BUF_LEN];
+};
+
+struct delta {
+ unsigned int key_id_diff;
+};
+
+static struct objagg_obj *world_obj_get(struct world *world,
+ struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+ struct tokey key;
+ int err;
+
+ key.id = key_id;
+ objagg_obj = objagg_obj_get(objagg, &key);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return objagg_obj;
+ }
+ if (!world->key_refs[key_id_index(key_id)]) {
+ world->objagg_objs[key_id_index(key_id)] = objagg_obj;
+ } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
+ pr_err("Key %u: God another object for the same key.\n",
+ key_id);
+ err = -EINVAL;
+ goto err_key_id_check;
+ }
+ world->key_refs[key_id_index(key_id)]++;
+ return objagg_obj;
+
+err_key_id_check:
+ objagg_obj_put(objagg, objagg_obj);
+ return ERR_PTR(err);
+}
+
+static void world_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id)
+{
+ struct objagg_obj *objagg_obj;
+
+ if (!world->key_refs[key_id_index(key_id)])
+ return;
+ objagg_obj = world->objagg_objs[key_id_index(key_id)];
+ objagg_obj_put(objagg, objagg_obj);
+ world->key_refs[key_id_index(key_id)]--;
+}
+
+#define MAX_KEY_ID_DIFF 5
+
+static void *delta_create(void *priv, void *parent_obj, void *obj)
+{
+ struct tokey *parent_key = parent_obj;
+ struct world *world = priv;
+ struct tokey *key = obj;
+ int diff = key->id - parent_key->id;
+ struct delta *delta;
+
+ if (diff < 0 || diff > MAX_KEY_ID_DIFF)
+ return ERR_PTR(-EINVAL);
+
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->key_id_diff = diff;
+ world->delta_count++;
+ return delta;
+}
+
+static void delta_destroy(void *priv, void *delta_priv)
+{
+ struct delta *delta = delta_priv;
+ struct world *world = priv;
+
+ world->delta_count--;
+ kfree(delta);
+}
+
+static void *root_create(void *priv, void *obj)
+{
+ struct world *world = priv;
+ struct tokey *key = obj;
+ struct root *root;
+
+ root = kzalloc(sizeof(*root), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(-ENOMEM);
+ memcpy(&root->key, key, sizeof(root->key));
+ memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
+ world->root_count++;
+ return root;
+}
+
+static void root_destroy(void *priv, void *root_priv)
+{
+ struct root *root = root_priv;
+ struct world *world = priv;
+
+ world->root_count--;
+ kfree(root);
+}
+
+static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_create_root)
+{
+ unsigned int orig_root_count = world->root_count;
+ struct objagg_obj *objagg_obj;
+ const struct root *root;
+ int err;
+
+ if (should_create_root)
+ prandom_bytes(world->next_root_buf,
+ sizeof(world->next_root_buf));
+
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj)) {
+ pr_err("Key %u: Failed to get object.\n", key_id);
+ return PTR_ERR(objagg_obj);
+ }
+ if (should_create_root) {
+ if (world->root_count != orig_root_count + 1) {
+ pr_err("Key %u: Root was not created\n", key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly created\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_root_count;
+ }
+ }
+ root = objagg_obj_root_priv(objagg_obj);
+ if (root->key.id != key_id) {
+ pr_err("Key %u: Root has unexpected key id\n", key_id);
+ err = -EINVAL;
+ goto err_check_key_id;
+ }
+ if (should_create_root &&
+ memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
+ pr_err("Key %u: Buffer does not match the expected content\n",
+ key_id);
+ err = -EINVAL;
+ goto err_check_buf;
+ }
+ return 0;
+
+err_check_buf:
+err_check_key_id:
+err_check_root_count:
+ objagg_obj_put(objagg, objagg_obj);
+ return err;
+}
+
+static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
+ unsigned int key_id, bool should_destroy_root)
+{
+ unsigned int orig_root_count = world->root_count;
+
+ world_obj_put(world, objagg, key_id);
+
+ if (should_destroy_root) {
+ if (world->root_count != orig_root_count - 1) {
+ pr_err("Key %u: Root was not destroyed\n", key_id);
+ return -EINVAL;
+ }
+ } else {
+ if (world->root_count != orig_root_count) {
+ pr_err("Key %u: Root was incorrectly destroyed\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int check_stats_zero(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int err = 0;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != 0) {
+ pr_err("Stats: Object count is not zero while it should be\n");
+ err = -EINVAL;
+ }
+
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int check_stats_nodelta(struct objagg *objagg)
+{
+ const struct objagg_stats *stats;
+ int i;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+
+ if (stats->stats_info_count != NUM_KEYS) {
+ pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
+ NUM_KEYS, stats->stats_info_count);
+ err = -EINVAL;
+ goto stats_put;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ if (stats->stats_info[i].stats.user_count != 2) {
+ pr_err("Stats: incorrect user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ if (stats->stats_info[i].stats.delta_user_count != 2) {
+ pr_err("Stats: incorrect delta user count\n");
+ err = -EINVAL;
+ goto stats_put;
+ }
+ }
+ err = 0;
+
+stats_put:
+ objagg_stats_put(stats);
+ return err;
+}
+
+static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void delta_destroy_dummy(void *priv, void *delta_priv)
+{
+}
+
+static const struct objagg_ops nodelta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_create = delta_create_dummy,
+ .delta_destroy = delta_destroy_dummy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+static int test_nodelta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&nodelta_ops, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_first_zero;
+
+ /* First round of gets, the root objects should be created */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, true);
+ if (err)
+ goto err_obj_first_get;
+ }
+
+ /* Do the second round of gets, all roots are already created,
+ * make sure that no new root is created
+ */
+ for (i = 0; i < NUM_KEYS; i++) {
+ err = test_nodelta_obj_get(&world, objagg, i, false);
+ if (err)
+ goto err_obj_second_get;
+ }
+
+ err = check_stats_nodelta(objagg);
+ if (err)
+ goto err_stats_nodelta;
+
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, false);
+ if (err)
+ goto err_obj_first_put;
+ }
+ for (i = NUM_KEYS - 1; i >= 0; i--) {
+ err = test_nodelta_obj_put(&world, objagg, i, true);
+ if (err)
+ goto err_obj_second_put;
+ }
+
+ err = check_stats_zero(objagg);
+ if (err)
+ goto err_stats_second_zero;
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_stats_nodelta:
+err_obj_first_put:
+err_obj_second_get:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+
+ i = NUM_KEYS;
+err_obj_first_get:
+err_obj_second_put:
+ for (i--; i >= 0; i--)
+ world_obj_put(&world, objagg, i);
+err_stats_first_zero:
+err_stats_second_zero:
+ objagg_destroy(objagg);
+ return err;
+}
+
+static const struct objagg_ops delta_ops = {
+ .obj_size = sizeof(struct tokey),
+ .delta_create = delta_create,
+ .delta_destroy = delta_destroy,
+ .root_create = root_create,
+ .root_destroy = root_destroy,
+};
+
+enum action {
+ ACTION_GET,
+ ACTION_PUT,
+};
+
+enum expect_delta {
+ EXPECT_DELTA_SAME,
+ EXPECT_DELTA_INC,
+ EXPECT_DELTA_DEC,
+};
+
+enum expect_root {
+ EXPECT_ROOT_SAME,
+ EXPECT_ROOT_INC,
+ EXPECT_ROOT_DEC,
+};
+
+struct expect_stats_info {
+ struct objagg_obj_stats stats;
+ bool is_root;
+ unsigned int key_id;
+};
+
+struct expect_stats {
+ unsigned int info_count;
+ struct expect_stats_info info[NUM_KEYS];
+};
+
+struct action_item {
+ unsigned int key_id;
+ enum action action;
+ enum expect_delta expect_delta;
+ enum expect_root expect_root;
+ struct expect_stats expect_stats;
+};
+
+#define EXPECT_STATS(count, ...) \
+{ \
+ .info_count = count, \
+ .info = { __VA_ARGS__ } \
+}
+
+#define ROOT(key_id, user_count, delta_user_count) \
+ {{user_count, delta_user_count}, true, key_id}
+
+#define DELTA(key_id, user_count) \
+ {{user_count, user_count}, false, key_id}
+
+static const struct action_item action_items[] = {
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(1, ROOT(1, 1, 1)),
+ }, /* r: 1 d: */
+ {
+ 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
+ }, /* r: 1, 7 d: */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
+ DELTA(3, 1)),
+ }, /* r: 1, 7 d: 3^1 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
+ DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 5^1 */
+ {
+ 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
+ {
+ 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
+ {
+ 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
+ DELTA(8, 2), DELTA(5, 1)),
+ }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30 d: 8^7, 8^7 */
+ {
+ 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
+ EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
+ DELTA(8, 2)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7 */
+ {
+ 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 3), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 2), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
+ DELTA(8, 1), DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 8^7, 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1)),
+ }, /* r: 7, 30, 5 d: 6^5 */
+ {
+ 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 7, 30, 5 d: 6^5, 8^5 */
+ {
+ 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 30, 5 d: 6^5, 8^5 */
+ {
+ 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
+ EXPECT_STATS(3, ROOT(5, 1, 3),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: 5 d: 6^5, 8^5 */
+ {
+ 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
+ EXPECT_STATS(3, ROOT(5, 0, 2),
+ DELTA(6, 1), DELTA(8, 1)),
+ }, /* r: d: 6^5, 8^5 */
+ {
+ 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
+ EXPECT_STATS(2, ROOT(5, 0, 1),
+ DELTA(8, 1)),
+ }, /* r: d: 6^5 */
+ {
+ 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
+ EXPECT_STATS(0, ),
+ }, /* r: d: */
+};
+
+static int check_expect(struct world *world,
+ const struct action_item *action_item,
+ unsigned int orig_delta_count,
+ unsigned int orig_root_count)
+{
+ unsigned int key_id = action_item->key_id;
+
+ switch (action_item->expect_delta) {
+ case EXPECT_DELTA_SAME:
+ if (orig_delta_count != world->delta_count) {
+ pr_err("Key %u: Delta count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_delta_count + 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_DELTA_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_delta_count - 1 != world->delta_count) {
+ pr_err("Key %u: Delta count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ }
+
+ switch (action_item->expect_root) {
+ case EXPECT_ROOT_SAME:
+ if (orig_root_count != world->root_count) {
+ pr_err("Key %u: Root count changed while expected to remain the same.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_INC:
+ if (WARN_ON(action_item->action == ACTION_PUT))
+ return -EINVAL;
+ if (orig_root_count + 1 != world->root_count) {
+ pr_err("Key %u: Root count was not incremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ break;
+ case EXPECT_ROOT_DEC:
+ if (WARN_ON(action_item->action == ACTION_GET))
+ return -EINVAL;
+ if (orig_root_count - 1 != world->root_count) {
+ pr_err("Key %u: Root count was not decremented.\n",
+ key_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
+{
+ const struct tokey *root_key;
+ const struct delta *delta;
+ unsigned int key_id;
+
+ root_key = objagg_obj_root_priv(objagg_obj);
+ key_id = root_key->id;
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (delta)
+ key_id += delta->key_id_diff;
+ return key_id;
+}
+
+static int
+check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (stats_info->is_root != expect_stats_info->is_root) {
+ if (errmsg)
+ *errmsg = "Incorrect root/delta indication";
+ return -EINVAL;
+ }
+ if (stats_info->stats.user_count !=
+ expect_stats_info->stats.user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect user count";
+ return -EINVAL;
+ }
+ if (stats_info->stats.delta_user_count !=
+ expect_stats_info->stats.delta_user_count) {
+ if (errmsg)
+ *errmsg = "Incorrect delta user count";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
+ const struct expect_stats_info *expect_stats_info,
+ const char **errmsg)
+{
+ if (obj_to_key_id(stats_info->objagg_obj) !=
+ expect_stats_info->key_id) {
+ if (errmsg)
+ *errmsg = "incorrect key id";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int check_expect_stats_neigh(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ int pos)
+{
+ int i;
+ int err;
+
+ for (i = pos - 1; i >= 0; i--) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ for (i = pos + 1; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (err)
+ break;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[pos], NULL);
+ if (!err)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __check_expect_stats(const struct objagg_stats *stats,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ int i;
+ int err;
+
+ if (stats->stats_info_count != expect_stats->info_count) {
+ *errmsg = "Unexpected object count";
+ return -EINVAL;
+ }
+
+ for (i = 0; i < stats->stats_info_count; i++) {
+ err = check_expect_stats_nums(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err)
+ return err;
+ err = check_expect_stats_key_id(&stats->stats_info[i],
+ &expect_stats->info[i], errmsg);
+ if (err) {
+ /* It is possible that one of the neighbor stats with
+ * same numbers have the correct key id, so check it
+ */
+ err = check_expect_stats_neigh(stats, expect_stats, i);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+static int check_expect_stats(struct objagg *objagg,
+ const struct expect_stats *expect_stats,
+ const char **errmsg)
+{
+ const struct objagg_stats *stats;
+ int err;
+
+ stats = objagg_stats_get(objagg);
+ if (IS_ERR(stats))
+ return PTR_ERR(stats);
+ err = __check_expect_stats(stats, expect_stats, errmsg);
+ objagg_stats_put(stats);
+ return err;
+}
+
+static int test_delta_action_item(struct world *world,
+ struct objagg *objagg,
+ const struct action_item *action_item,
+ bool inverse)
+{
+ unsigned int orig_delta_count = world->delta_count;
+ unsigned int orig_root_count = world->root_count;
+ unsigned int key_id = action_item->key_id;
+ enum action action = action_item->action;
+ struct objagg_obj *objagg_obj;
+ const char *errmsg;
+ int err;
+
+ if (inverse)
+ action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
+
+ switch (action) {
+ case ACTION_GET:
+ objagg_obj = world_obj_get(world, objagg, key_id);
+ if (IS_ERR(objagg_obj))
+ return PTR_ERR(objagg_obj);
+ break;
+ case ACTION_PUT:
+ world_obj_put(world, objagg, key_id);
+ break;
+ }
+
+ if (inverse)
+ return 0;
+ err = check_expect(world, action_item,
+ orig_delta_count, orig_root_count);
+ if (err)
+ goto errout;
+
+ errmsg = NULL;
+ err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
+ if (err) {
+ pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
+ goto errout;
+ }
+
+ return 0;
+
+errout:
+ /* This can only happen when action is not inversed.
+ * So in case of an error, cleanup by doing inverse action.
+ */
+ test_delta_action_item(world, objagg, action_item, true);
+ return err;
+}
+
+static int test_delta(void)
+{
+ struct world world = {};
+ struct objagg *objagg;
+ int i;
+ int err;
+
+ objagg = objagg_create(&delta_ops, &world);
+ if (IS_ERR(objagg))
+ return PTR_ERR(objagg);
+
+ for (i = 0; i < ARRAY_SIZE(action_items); i++) {
+ err = test_delta_action_item(&world, objagg,
+ &action_items[i], false);
+ if (err)
+ goto err_do_action_item;
+ }
+
+ objagg_destroy(objagg);
+ return 0;
+
+err_do_action_item:
+ for (i--; i >= 0; i--)
+ test_delta_action_item(&world, objagg, &action_items[i], true);
+
+ objagg_destroy(objagg);
+ return err;
+}
+
+static int __init test_objagg_init(void)
+{
+ int err;
+
+ err = test_nodelta();
+ if (err)
+ return err;
+ return test_delta();
+}
+
+static void __exit test_objagg_exit(void)
+{
+}
+
+module_init(test_objagg_init);
+module_exit(test_objagg_exit);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for objagg");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 82ac39ce5310..6a8ac7626797 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -20,11 +20,11 @@
#include <linux/module.h>
#include <linux/rcupdate.h>
#include <linux/rhashtable.h>
-#include <linux/semaphore.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/wait.h>
#define MAX_ENTRIES 1000000
#define TEST_INSERT_FAIL INT_MAX
@@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = {
.automatic_shrinking = false,
};
-static struct semaphore prestart_sem;
-static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
+static atomic_t startup_count;
+static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
const struct rhashtable_params params)
@@ -634,9 +634,12 @@ static int threadfunc(void *data)
int i, step, err = 0, insert_retries = 0;
struct thread_data *tdata = data;
- up(&prestart_sem);
- if (down_interruptible(&startup_sem))
- pr_err(" thread[%d]: down_interruptible failed\n", tdata->id);
+ if (atomic_dec_and_test(&startup_count))
+ wake_up(&startup_wait);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
+ pr_err(" thread[%d]: interrupted\n", tdata->id);
+ goto out;
+ }
for (i = 0; i < tdata->entries; i++) {
tdata->objs[i].value.id = i;
@@ -755,7 +758,7 @@ static int __init test_rht_init(void)
pr_info("Testing concurrent rhashtable access from %d threads\n",
tcount);
- sema_init(&prestart_sem, 1 - tcount);
+ atomic_set(&startup_count, tcount);
tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
if (!tdata)
return -ENOMEM;
@@ -781,15 +784,18 @@ static int __init test_rht_init(void)
tdata[i].objs = objs + i * entries;
tdata[i].task = kthread_run(threadfunc, &tdata[i],
"rhashtable_thrad[%d]", i);
- if (IS_ERR(tdata[i].task))
+ if (IS_ERR(tdata[i].task)) {
pr_err(" kthread_run failed for thread %d\n", i);
- else
+ atomic_dec(&startup_count);
+ } else {
started_threads++;
+ }
}
- if (down_interruptible(&prestart_sem))
- pr_err(" down interruptible failed\n");
- for (i = 0; i < tcount; i++)
- up(&startup_sem);
+ if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
+ pr_err(" wait_event interruptible failed\n");
+ /* count is 0 now, set it to -1 and wake up all threads together */
+ atomic_dec(&startup_count);
+ wake_up_all(&startup_wait);
for (i = 0; i < tcount; i++) {
if (IS_ERR(tdata[i].task))
continue;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0598e86af8fc..4676c0a1eeca 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }
} while (0)
#endif
+static void *xa_mk_index(unsigned long index)
+{
+ return xa_mk_value(index & LONG_MAX);
+}
+
static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
{
- return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
+ return xa_store(xa, index, xa_mk_index(index), gfp);
}
static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
{
u32 id = 0;
- XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX),
+ XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
gfp) != 0);
XA_BUG_ON(xa, id != index);
}
static void xa_erase_index(struct xarray *xa, unsigned long index)
{
- XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
+ XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
XA_BUG_ON(xa, xa_load(xa, index) != NULL);
}
@@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)
xas_set(&xas, 0);
xas_for_each(&xas, entry, ULONG_MAX) {
- xas_store(&xas, xa_mk_value(xas.xa_index));
+ xas_store(&xas, xa_mk_index(xas.xa_index));
}
xas_unlock(&xas);
@@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
xa_set_mark(xa, index + 2, XA_MARK_1);
XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
- xa_store_order(xa, index, order, xa_mk_value(index),
+ xa_store_order(xa, index, order, xa_mk_index(index),
GFP_KERNEL);
for (i = base; i < next; i++) {
XA_STATE(xas, xa, i);
@@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)
xas_set(&xas, j);
do {
xas_lock(&xas);
- xas_store(&xas, xa_mk_value(j));
+ xas_store(&xas, xa_mk_index(j));
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
}
@@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)
xas_set(&xas, 0);
j = i;
xas_for_each(&xas, entry, ULONG_MAX) {
- XA_BUG_ON(xa, entry != xa_mk_value(j));
+ XA_BUG_ON(xa, entry != xa_mk_index(j));
xas_store(&xas, NULL);
j++;
}
@@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
unsigned long min = index & ~((1UL << order) - 1);
unsigned long max = min + (1UL << order);
- xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL);
- XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index));
- XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index));
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
xas_lock(&xas);
- XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
xas_unlock(&xas);
- XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
- XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
+ XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
+ XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
XA_BUG_ON(xa, xa_load(xa, max) != NULL);
XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
@@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
xas_unlock(&xas);
XA_BUG_ON(xa, !xa_empty(xa));
}
+
+static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
+ unsigned int order)
+{
+ XA_STATE(xas, xa, 0);
+ void *entry;
+ int n = 0;
+
+ xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
+
+ xas_lock(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ n++;
+ }
+ XA_BUG_ON(xa, n != 1);
+ xas_set(&xas, index + 1);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+ XA_BUG_ON(xa, entry != xa_mk_index(index));
+ n++;
+ }
+ XA_BUG_ON(xa, n != 2);
+ xas_unlock(&xas);
+
+ xa_destroy(xa);
+}
#endif
static noinline void check_multi_store(struct xarray *xa)
@@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa)
for (i = 0; i < max_order; i++) {
for (j = 0; j < max_order; j++) {
- xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
- xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
+ xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
+ xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
for (k = 0; k < max_order; k++) {
void *entry = xa_load(xa, (1UL << k) - 1);
if ((i < k) && (j < k))
XA_BUG_ON(xa, entry != NULL);
else
- XA_BUG_ON(xa, entry != xa_mk_value(j));
+ XA_BUG_ON(xa, entry != xa_mk_index(j));
}
xa_erase(xa, 0);
@@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa)
check_multi_store_1(xa, (1UL << i) + 1, i);
}
check_multi_store_2(xa, 4095, 9);
+
+ for (i = 1; i < 20; i++) {
+ check_multi_store_3(xa, 0, i);
+ check_multi_store_3(xa, 1UL << i, i);
+ }
#endif
}
@@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void)
xa_destroy(&xa0);
id = 0xfffffffeU;
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
GFP_KERNEL) != 0);
XA_BUG_ON(&xa0, id != 0xfffffffeU);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
GFP_KERNEL) != 0);
XA_BUG_ON(&xa0, id != 0xffffffffU);
- XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0),
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
GFP_KERNEL) != -ENOSPC);
XA_BUG_ON(&xa0, id != 0xffffffffU);
xa_destroy(&xa0);
+
+ id = 10;
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
+ GFP_KERNEL) != -ENOSPC);
+ XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
+ XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
+ GFP_KERNEL) != -ENOSPC);
+ xa_erase_index(&xa0, 3);
+ XA_BUG_ON(&xa0, !xa_empty(&xa0));
}
static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -610,11 +655,11 @@ retry:
xas_lock(&xas);
xas_for_each_conflict(&xas, entry) {
XA_BUG_ON(xa, !xa_is_value(entry));
- XA_BUG_ON(xa, entry < xa_mk_value(start));
- XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1));
+ XA_BUG_ON(xa, entry < xa_mk_index(start));
+ XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
count++;
}
- xas_store(&xas, xa_mk_value(start));
+ xas_store(&xas, xa_mk_index(start));
xas_unlock(&xas);
if (xas_nomem(&xas, GFP_KERNEL)) {
count = 0;
@@ -622,9 +667,9 @@ retry:
}
XA_BUG_ON(xa, xas_error(&xas));
XA_BUG_ON(xa, count != present);
- XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start));
+ XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
- xa_mk_value(start));
+ xa_mk_index(start));
xa_erase_index(xa, start);
}
@@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
for (j = 0; j < index; j++) {
XA_STATE(xas, xa, j + index);
xa_store_index(xa, index - 1, GFP_KERNEL);
- xa_store_order(xa, index, i, xa_mk_value(index),
+ xa_store_order(xa, index, i, xa_mk_index(index),
GFP_KERNEL);
rcu_read_lock();
xas_for_each(&xas, entry, ULONG_MAX) {
@@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa)
j = 0;
index = 0;
xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
- XA_BUG_ON(xa, xa_mk_value(index) != entry);
+ XA_BUG_ON(xa, xa_mk_index(index) != entry);
XA_BUG_ON(xa, index != j++);
}
}
@@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa)
xa_destroy(xa);
}
+static noinline void check_find_3(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+ unsigned long i, j, k;
+ void *entry;
+
+ for (i = 0; i < 100; i++) {
+ for (j = 0; j < 100; j++) {
+ for (k = 0; k < 100; k++) {
+ xas_set(&xas, j);
+ xas_for_each_marked(&xas, entry, k, XA_MARK_0)
+ ;
+ if (j > k)
+ XA_BUG_ON(xa,
+ xas.xa_node != XAS_RESTART);
+ }
+ }
+ xa_store_index(xa, i, GFP_KERNEL);
+ xa_set_mark(xa, i, XA_MARK_0);
+ }
+ xa_destroy(xa);
+}
+
static noinline void check_find(struct xarray *xa)
{
check_find_1(xa);
check_find_2(xa);
+ check_find_3(xa);
check_multi_find(xa);
check_multi_find_2(xa);
}
@@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa)
for (index = 0; index < (1UL << (order + 5));
index += (1UL << order)) {
xa_store_order(xa, index, order,
- xa_mk_value(index), GFP_KERNEL);
+ xa_mk_index(index), GFP_KERNEL);
XA_BUG_ON(xa, xa_load(xa, index) !=
- xa_mk_value(index));
+ xa_mk_index(index));
XA_BUG_ON(xa, xa_find_entry(xa,
- xa_mk_value(index)) != index);
+ xa_mk_index(index)) != index);
}
XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
xa_destroy(xa);
@@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa)
XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
- XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1);
+ XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
xa_erase_index(xa, ULONG_MAX);
XA_BUG_ON(xa, !xa_empty(xa));
}
@@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
XA_BUG_ON(xa, xas.xa_index != i);
if (i == 0 || i == idx)
- XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
else
XA_BUG_ON(xa, entry != NULL);
}
@@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
XA_BUG_ON(xa, xas.xa_index != i);
if (i == 0 || i == idx)
- XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
else
XA_BUG_ON(xa, entry != NULL);
} while (i > 0);
@@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa)
do {
void *entry = xas_prev(&xas);
i--;
- XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
XA_BUG_ON(xa, i != xas.xa_index);
} while (i != 0);
@@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa)
do {
void *entry = xas_next(&xas);
- XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
XA_BUG_ON(xa, i != xas.xa_index);
i++;
} while (i < (1 << 16));
@@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa)
void *entry = xas_prev(&xas);
i--;
if ((i < (1 << 8)) || (i >= (1 << 15)))
- XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
else
XA_BUG_ON(xa, entry != NULL);
XA_BUG_ON(xa, i != xas.xa_index);
@@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa)
do {
void *entry = xas_next(&xas);
if ((i < (1 << 8)) || (i >= (1 << 15)))
- XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, entry != xa_mk_index(i));
else
XA_BUG_ON(xa, entry != NULL);
XA_BUG_ON(xa, i != xas.xa_index);
@@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa,
if (xas_error(&xas))
goto unlock;
for (i = 0; i < (1U << order); i++) {
- XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i)));
+ XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
xas_next(&xas);
}
unlock:
@@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa,
if (xas_error(&xas))
goto unlock;
for (i = 0; i < (1UL << order); i++) {
- void *old = xas_store(&xas, xa_mk_value(base + i));
+ void *old = xas_store(&xas, xa_mk_index(base + i));
if (xas.xa_index == index)
- XA_BUG_ON(xa, old != xa_mk_value(base + i));
+ XA_BUG_ON(xa, old != xa_mk_index(base + i));
else
XA_BUG_ON(xa, old != NULL);
xas_next(&xas);
@@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,
unsigned long last)
{
#ifdef CONFIG_XARRAY_MULTI
- xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL);
+ xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
- XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first));
- XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first));
+ XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
+ XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
@@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa)
XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
rcu_read_unlock();
- xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
+ xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
GFP_KERNEL);
XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
diff --git a/lib/xarray.c b/lib/xarray.c
index bbacca576593..5f3f9311de89 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
entry = xa_head(xas->xa);
xas->xa_node = NULL;
if (xas->xa_index > max_index(entry))
- goto bounds;
+ goto out;
if (!xa_is_node(entry)) {
if (xa_marked(xas->xa, mark))
return entry;
@@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
}
out:
- if (!max)
+ if (xas->xa_index > max)
goto max;
-bounds:
- xas->xa_node = XAS_BOUNDS;
- return NULL;
+ return set_bounds(xas);
max:
xas->xa_node = XAS_RESTART;
return NULL;
diff --git a/mm/gup.c b/mm/gup.c
index aa43620a3270..8cb68a50dbdf 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -702,12 +702,11 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) {
- int ret;
ret = get_gate_page(mm, start & PAGE_MASK,
gup_flags, &vma,
pages ? &pages[i] : NULL);
if (ret)
- return i ? : ret;
+ goto out;
ctx.page_mask = 0;
goto next_page;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 55478ab3c83b..e84a10b0d310 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -629,40 +629,30 @@ release:
* available
* never: never stall for any thp allocation
*/
-static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma, unsigned long addr)
+static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
{
const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
- gfp_t this_node = 0;
-
-#ifdef CONFIG_NUMA
- struct mempolicy *pol;
- /*
- * __GFP_THISNODE is used only when __GFP_DIRECT_RECLAIM is not
- * specified, to express a general desire to stay on the current
- * node for optimistic allocation attempts. If the defrag mode
- * and/or madvise hint requires the direct reclaim then we prefer
- * to fallback to other node rather than node reclaim because that
- * can lead to excessive reclaim even though there is free memory
- * on other nodes. We expect that NUMA preferences are specified
- * by memory policies.
- */
- pol = get_vma_policy(vma, addr);
- if (pol->mode != MPOL_BIND)
- this_node = __GFP_THISNODE;
- mpol_cond_put(pol);
-#endif
+ /* Always do synchronous compaction */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
+
+ /* Kick kcompactd and fail quickly */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM | this_node;
+ return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+
+ /* Synchronous compaction if madvised, otherwise kick kcompactd */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
- __GFP_KSWAPD_RECLAIM | this_node);
+ return GFP_TRANSHUGE_LIGHT |
+ (vma_madvised ? __GFP_DIRECT_RECLAIM :
+ __GFP_KSWAPD_RECLAIM);
+
+ /* Only do synchronous compaction if madvised */
if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
- return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
- this_node);
- return GFP_TRANSHUGE_LIGHT | this_node;
+ return GFP_TRANSHUGE_LIGHT |
+ (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
+
+ return GFP_TRANSHUGE_LIGHT;
}
/* Caller must hold page table lock. */
@@ -734,8 +724,8 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
pte_free(vma->vm_mm, pgtable);
return ret;
}
- gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
- page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, vma, haddr, numa_node_id());
+ gfp = alloc_hugepage_direct_gfpmask(vma);
+ page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
return VM_FAULT_FALLBACK;
@@ -1305,9 +1295,8 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
alloc:
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow()) {
- huge_gfp = alloc_hugepage_direct_gfpmask(vma, haddr);
- new_page = alloc_pages_vma(huge_gfp, HPAGE_PMD_ORDER, vma,
- haddr, numa_node_id());
+ huge_gfp = alloc_hugepage_direct_gfpmask(vma);
+ new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
} else
new_page = NULL;
@@ -2155,23 +2144,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
*/
old_pmd = pmdp_invalidate(vma, haddr, pmd);
-#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration = is_pmd_migration_entry(old_pmd);
- if (pmd_migration) {
+ if (unlikely(pmd_migration)) {
swp_entry_t entry;
entry = pmd_to_swp_entry(old_pmd);
page = pfn_to_page(swp_offset(entry));
- } else
-#endif
+ write = is_write_migration_entry(entry);
+ young = false;
+ soft_dirty = pmd_swp_soft_dirty(old_pmd);
+ } else {
page = pmd_page(old_pmd);
+ if (pmd_dirty(old_pmd))
+ SetPageDirty(page);
+ write = pmd_write(old_pmd);
+ young = pmd_young(old_pmd);
+ soft_dirty = pmd_soft_dirty(old_pmd);
+ }
VM_BUG_ON_PAGE(!page_count(page), page);
page_ref_add(page, HPAGE_PMD_NR - 1);
- if (pmd_dirty(old_pmd))
- SetPageDirty(page);
- write = pmd_write(old_pmd);
- young = pmd_young(old_pmd);
- soft_dirty = pmd_soft_dirty(old_pmd);
/*
* Withdraw the table only after we mark the pmd entry invalid.
@@ -2350,7 +2341,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
}
}
-static void freeze_page(struct page *page)
+static void unmap_page(struct page *page)
{
enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
@@ -2365,7 +2356,7 @@ static void freeze_page(struct page *page)
VM_BUG_ON_PAGE(!unmap_success, page);
}
-static void unfreeze_page(struct page *page)
+static void remap_page(struct page *page)
{
int i;
if (PageTransHuge(page)) {
@@ -2402,6 +2393,12 @@ static void __split_huge_page_tail(struct page *head, int tail,
(1L << PG_unevictable) |
(1L << PG_dirty)));
+ /* ->mapping in first tail page is compound_mapcount */
+ VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
+ page_tail);
+ page_tail->mapping = head->mapping;
+ page_tail->index = head->index + tail;
+
/* Page flags must be visible before we make the page non-compound. */
smp_wmb();
@@ -2422,12 +2419,6 @@ static void __split_huge_page_tail(struct page *head, int tail,
if (page_is_idle(head))
set_page_idle(page_tail);
- /* ->mapping in first tail page is compound_mapcount */
- VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
- page_tail);
- page_tail->mapping = head->mapping;
-
- page_tail->index = head->index + tail;
page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
/*
@@ -2439,12 +2430,11 @@ static void __split_huge_page_tail(struct page *head, int tail,
}
static void __split_huge_page(struct page *page, struct list_head *list,
- unsigned long flags)
+ pgoff_t end, unsigned long flags)
{
struct page *head = compound_head(page);
struct zone *zone = page_zone(head);
struct lruvec *lruvec;
- pgoff_t end = -1;
int i;
lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -2452,9 +2442,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
/* complete memcg works before add pages to LRU */
mem_cgroup_split_huge_fixup(head);
- if (!PageAnon(page))
- end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
-
for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
__split_huge_page_tail(head, i, lruvec, list);
/* Some pages can be beyond i_size: drop them from page cache */
@@ -2483,7 +2470,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
- unfreeze_page(head);
+ remap_page(head);
for (i = 0; i < HPAGE_PMD_NR; i++) {
struct page *subpage = head + i;
@@ -2626,6 +2613,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
int count, mapcount, extra_pins, ret;
bool mlocked;
unsigned long flags;
+ pgoff_t end;
VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2648,6 +2636,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
ret = -EBUSY;
goto out;
}
+ end = -1;
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
@@ -2661,10 +2650,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
anon_vma = NULL;
i_mmap_lock_read(mapping);
+
+ /*
+ *__split_huge_page() may need to trim off pages beyond EOF:
+ * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
+ * which cannot be nested inside the page tree lock. So note
+ * end now: i_size itself may be changed at any moment, but
+ * head page lock is good enough to serialize the trimming.
+ */
+ end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
}
/*
- * Racy check if we can split the page, before freeze_page() will
+ * Racy check if we can split the page, before unmap_page() will
* split PMDs
*/
if (!can_split_huge_page(head, &extra_pins)) {
@@ -2673,7 +2671,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
}
mlocked = PageMlocked(page);
- freeze_page(head);
+ unmap_page(head);
VM_BUG_ON_PAGE(compound_mapcount(head), head);
/* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2707,7 +2705,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
if (mapping)
__dec_node_page_state(page, NR_SHMEM_THPS);
spin_unlock(&pgdata->split_queue_lock);
- __split_huge_page(page, list, flags);
+ __split_huge_page(page, list, end, flags);
if (PageSwapCache(head)) {
swp_entry_t entry = { .val = page_private(head) };
@@ -2727,7 +2725,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
fail: if (mapping)
xa_unlock(&mapping->i_pages);
spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
- unfreeze_page(head);
+ remap_page(head);
ret = -EBUSY;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7f2a28ab46d5..a80832487981 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1248,10 +1248,11 @@ void free_huge_page(struct page *page)
(struct hugepage_subpool *)page_private(page);
bool restore_reserve;
- set_page_private(page, 0);
- page->mapping = NULL;
VM_BUG_ON_PAGE(page_count(page), page);
VM_BUG_ON_PAGE(page_mapcount(page), page);
+
+ set_page_private(page, 0);
+ page->mapping = NULL;
restore_reserve = PagePrivate(page);
ClearPagePrivate(page);
@@ -4080,7 +4081,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
- ret = -EFAULT;
+ ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c13625c1ad5e..43ce2f4d2551 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
@@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* collapse_shmem - collapse small tmpfs/shmem pages into huge one.
*
* Basic scheme is simple, details are more complex:
- * - allocate and freeze a new huge page;
+ * - allocate and lock a new huge page;
* - scan page cache replacing old pages with the new one
* + swap in pages if necessary;
* + fill in gaps;
@@ -1295,11 +1295,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
* - if replacing succeeds:
* + copy data over;
* + free old pages;
- * + unfreeze huge page;
+ * + unlock huge page;
* - if replacing failed;
* + put all pages back and unfreeze them;
* + restore gaps in the page cache;
- * + free huge page;
+ * + unlock and free huge page;
*/
static void collapse_shmem(struct mm_struct *mm,
struct address_space *mapping, pgoff_t start,
@@ -1329,19 +1329,6 @@ static void collapse_shmem(struct mm_struct *mm,
goto out;
}
- new_page->index = start;
- new_page->mapping = mapping;
- __SetPageSwapBacked(new_page);
- __SetPageLocked(new_page);
- BUG_ON(!page_ref_freeze(new_page, 1));
-
- /*
- * At this point the new_page is 'frozen' (page_count() is zero),
- * locked and not up-to-date. It's safe to insert it into the page
- * cache, because nobody would be able to map it or use it in other
- * way until we unfreeze it.
- */
-
/* This will be less messy when we use multi-index entries */
do {
xas_lock_irq(&xas);
@@ -1349,19 +1336,44 @@ static void collapse_shmem(struct mm_struct *mm,
if (!xas_error(&xas))
break;
xas_unlock_irq(&xas);
- if (!xas_nomem(&xas, GFP_KERNEL))
+ if (!xas_nomem(&xas, GFP_KERNEL)) {
+ mem_cgroup_cancel_charge(new_page, memcg, true);
+ result = SCAN_FAIL;
goto out;
+ }
} while (1);
+ __SetPageLocked(new_page);
+ __SetPageSwapBacked(new_page);
+ new_page->index = start;
+ new_page->mapping = mapping;
+
+ /*
+ * At this point the new_page is locked and not up-to-date.
+ * It's safe to insert it into the page cache, because nobody would
+ * be able to map it or use it in another way until we unlock it.
+ */
+
xas_set(&xas, start);
for (index = start; index < end; index++) {
struct page *page = xas_next(&xas);
VM_BUG_ON(index != xas.xa_index);
if (!page) {
+ /*
+ * Stop if extent has been truncated or hole-punched,
+ * and is now completely empty.
+ */
+ if (index == start) {
+ if (!xas_next_entry(&xas, end - 1)) {
+ result = SCAN_TRUNCATED;
+ goto xa_locked;
+ }
+ xas_set(&xas, index);
+ }
if (!shmem_charge(mapping->host, 1)) {
result = SCAN_FAIL;
- break;
+ goto xa_locked;
}
xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
nr_none++;
@@ -1376,13 +1388,12 @@ static void collapse_shmem(struct mm_struct *mm,
result = SCAN_FAIL;
goto xa_unlocked;
}
- xas_lock_irq(&xas);
- xas_set(&xas, index);
} else if (trylock_page(page)) {
get_page(page);
+ xas_unlock_irq(&xas);
} else {
result = SCAN_PAGE_LOCK;
- break;
+ goto xa_locked;
}
/*
@@ -1391,17 +1402,24 @@ static void collapse_shmem(struct mm_struct *mm,
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageUptodate(page), page);
- VM_BUG_ON_PAGE(PageTransCompound(page), page);
+
+ /*
+ * If file was truncated then extended, or hole-punched, before
+ * we locked the first page, then a THP might be there already.
+ */
+ if (PageTransCompound(page)) {
+ result = SCAN_PAGE_COMPOUND;
+ goto out_unlock;
+ }
if (page_mapping(page) != mapping) {
result = SCAN_TRUNCATED;
goto out_unlock;
}
- xas_unlock_irq(&xas);
if (isolate_lru_page(page)) {
result = SCAN_DEL_PAGE_LRU;
- goto out_isolate_failed;
+ goto out_unlock;
}
if (page_mapped(page))
@@ -1421,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
*/
if (!page_ref_freeze(page, 3)) {
result = SCAN_PAGE_COUNT;
- goto out_lru;
+ xas_unlock_irq(&xas);
+ putback_lru_page(page);
+ goto out_unlock;
}
/*
@@ -1433,71 +1453,74 @@ static void collapse_shmem(struct mm_struct *mm,
/* Finally, replace with the new page. */
xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
continue;
-out_lru:
- xas_unlock_irq(&xas);
- putback_lru_page(page);
-out_isolate_failed:
- unlock_page(page);
- put_page(page);
- goto xa_unlocked;
out_unlock:
unlock_page(page);
put_page(page);
- break;
+ goto xa_unlocked;
}
- xas_unlock_irq(&xas);
+ __inc_node_page_state(new_page, NR_SHMEM_THPS);
+ if (nr_none) {
+ struct zone *zone = page_zone(new_page);
+
+ __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
+ __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+ }
+
+xa_locked:
+ xas_unlock_irq(&xas);
xa_unlocked:
+
if (result == SCAN_SUCCEED) {
struct page *page, *tmp;
- struct zone *zone = page_zone(new_page);
/*
* Replacing old pages with new one has succeeded, now we
* need to copy the content and free the old pages.
*/
+ index = start;
list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+ while (index < page->index) {
+ clear_highpage(new_page + (index % HPAGE_PMD_NR));
+ index++;
+ }
copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
page);
list_del(&page->lru);
- unlock_page(page);
- page_ref_unfreeze(page, 1);
page->mapping = NULL;
+ page_ref_unfreeze(page, 1);
ClearPageActive(page);
ClearPageUnevictable(page);
+ unlock_page(page);
put_page(page);
+ index++;
}
-
- local_irq_disable();
- __inc_node_page_state(new_page, NR_SHMEM_THPS);
- if (nr_none) {
- __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
- __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+ while (index < end) {
+ clear_highpage(new_page + (index % HPAGE_PMD_NR));
+ index++;
}
- local_irq_enable();
- /*
- * Remove pte page tables, so we can re-fault
- * the page as huge.
- */
- retract_page_tables(mapping, start);
-
- /* Everything is ready, let's unfreeze the new_page */
- set_page_dirty(new_page);
SetPageUptodate(new_page);
- page_ref_unfreeze(new_page, HPAGE_PMD_NR);
+ page_ref_add(new_page, HPAGE_PMD_NR - 1);
+ set_page_dirty(new_page);
mem_cgroup_commit_charge(new_page, memcg, false, true);
lru_cache_add_anon(new_page);
- unlock_page(new_page);
+ /*
+ * Remove pte page tables, so we can re-fault the page as huge.
+ */
+ retract_page_tables(mapping, start);
*hpage = NULL;
khugepaged_pages_collapsed++;
} else {
struct page *page;
+
/* Something went wrong: roll back page cache changes */
- shmem_uncharge(mapping->host, nr_none);
xas_lock_irq(&xas);
+ mapping->nrpages -= nr_none;
+ shmem_uncharge(mapping->host, nr_none);
+
xas_set(&xas, start);
xas_for_each(&xas, page, end - 1) {
page = list_first_entry_or_null(&pagelist,
@@ -1519,19 +1542,18 @@ xa_unlocked:
xas_store(&xas, page);
xas_pause(&xas);
xas_unlock_irq(&xas);
- putback_lru_page(page);
unlock_page(page);
+ putback_lru_page(page);
xas_lock_irq(&xas);
}
VM_BUG_ON(nr_none);
xas_unlock_irq(&xas);
- /* Unfreeze new_page, caller would take care about freeing it */
- page_ref_unfreeze(new_page, 1);
mem_cgroup_cancel_charge(new_page, memcg, true);
- unlock_page(new_page);
new_page->mapping = NULL;
}
+
+ unlock_page(new_page);
out:
VM_BUG_ON(!list_empty(&pagelist));
/* TODO: tracepoints */
@@ -1631,7 +1653,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
int progress = 0;
VM_BUG_ON(!pages);
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/memblock.c b/mm/memblock.c
index 9a2d5ae81ae1..81ae63ca78d0 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1727,7 +1727,7 @@ static int __init_memblock memblock_search(struct memblock_type *type, phys_addr
return -1;
}
-bool __init memblock_is_reserved(phys_addr_t addr)
+bool __init_memblock memblock_is_reserved(phys_addr_t addr)
{
return memblock_search(&memblock.reserved, addr) != -1;
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0cd3de3550f0..7c72f2a95785 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
LIST_HEAD(tokill);
int rc = -EBUSY;
loff_t start;
+ dax_entry_t cookie;
/*
* Prevent the inode from being freed while we are interrogating
@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
*/
- if (!dax_lock_mapping_entry(page))
+ cookie = dax_lock_page(page);
+ if (!cookie)
goto out;
if (hwpoison_filter(page)) {
@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
rc = 0;
unlock:
- dax_unlock_mapping_entry(page);
+ dax_unlock_page(page, cookie);
out:
/* drop pgmap ref acquired in caller */
put_dev_pagemap(pgmap);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 5837a067124d..d4496d9d34f5 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1116,8 +1116,8 @@ static struct page *new_page(struct page *page, unsigned long start)
} else if (PageTransHuge(page)) {
struct page *thp;
- thp = alloc_pages_vma(GFP_TRANSHUGE, HPAGE_PMD_ORDER, vma,
- address, numa_node_id());
+ thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
+ HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
@@ -1662,7 +1662,7 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
* freeing by another task. It is the caller's responsibility to free the
* extra reference for shared policies.
*/
-struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct mempolicy *pol = __get_vma_policy(vma, addr);
@@ -2011,6 +2011,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
* @vma: Pointer to VMA or NULL if not available.
* @addr: Virtual Address of the allocation. Must be inside the VMA.
* @node: Which node to prefer for allocation (modulo policy).
+ * @hugepage: for hugepages try only the preferred node if possible
*
* This function allocates a page from the kernel page pool and applies
* a NUMA policy associated with the VMA or the current process.
@@ -2021,7 +2022,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
*/
struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
- unsigned long addr, int node)
+ unsigned long addr, int node, bool hugepage)
{
struct mempolicy *pol;
struct page *page;
@@ -2039,6 +2040,31 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
goto out;
}
+ if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
+ int hpage_node = node;
+
+ /*
+ * For hugepage allocation and non-interleave policy which
+ * allows the current node (or other explicitly preferred
+ * node) we only try to allocate from the current/preferred
+ * node and don't fall back to other nodes, as the cost of
+ * remote accesses would likely offset THP benefits.
+ *
+ * If the policy is interleave, or does not allow the current
+ * node in its nodemask, we allocate the standard way.
+ */
+ if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
+ hpage_node = pol->v.preferred_node;
+
+ nmask = policy_nodemask(gfp, pol);
+ if (!nmask || node_isset(hpage_node, *nmask)) {
+ mpol_cond_put(pol);
+ page = __alloc_pages_node(hpage_node,
+ gfp | __GFP_THISNODE, order);
+ goto out;
+ }
+ }
+
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
diff --git a/mm/mmap.c b/mm/mmap.c
index 6c04292e16a7..7bb64381e77c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2066,6 +2066,15 @@ found_highest:
return gap_end;
}
+
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr) (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
*
@@ -2085,8 +2094,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
struct vm_unmapped_area_info info;
+ const unsigned long mmap_end = arch_get_mmap_end(addr);
- if (len > TASK_SIZE - mmap_min_addr)
+ if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -2095,7 +2105,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
@@ -2104,7 +2114,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
- info.high_limit = TASK_SIZE;
+ info.high_limit = mmap_end;
info.align_mask = 0;
return vm_unmapped_area(&info);
}
@@ -2124,9 +2134,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
+ const unsigned long mmap_end = arch_get_mmap_end(addr);
/* requested length too big for entire address space */
- if (len > TASK_SIZE - mmap_min_addr)
+ if (len > mmap_end - mmap_min_addr)
return -ENOMEM;
if (flags & MAP_FIXED)
@@ -2136,7 +2147,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (addr) {
addr = PAGE_ALIGN(addr);
vma = find_vma_prev(mm, addr, &prev);
- if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ if (mmap_end - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
return addr;
@@ -2145,7 +2156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = mm->mmap_base;
+ info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
info.align_mask = 0;
addr = vm_unmapped_area(&info);
@@ -2159,7 +2170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
VM_BUG_ON(addr != -ENOMEM);
info.flags = 0;
info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
+ info.high_limit = mmap_end;
addr = vm_unmapped_area(&info);
}
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
index 2a9fbc4a37d5..f2f03c655807 100644
--- a/mm/mmu_gather.c
+++ b/mm/mmu_gather.c
@@ -199,7 +199,7 @@ void tlb_table_flush(struct mmu_gather *tlb)
if (*batch) {
tlb_table_invalidate(tlb);
- call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ call_rcu(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6847177dc4a1..e95b5b7c9c3d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5542,6 +5542,18 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
cond_resched();
}
}
+#ifdef CONFIG_SPARSEMEM
+ /*
+ * If the zone does not span the rest of the section then
+ * we should at least initialize those pages. Otherwise we
+ * could blow up on a poisoned page in some paths which depend
+ * on full sections being initialized (e.g. memory hotplug).
+ */
+ while (end_pfn % PAGES_PER_SECTION) {
+ __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid);
+ end_pfn++;
+ }
+#endif
}
#ifdef CONFIG_ZONE_DEVICE
@@ -5813,8 +5825,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
unsigned long size)
{
struct pglist_data *pgdat = zone->zone_pgdat;
+ int zone_idx = zone_idx(zone) + 1;
- pgdat->nr_zones = zone_idx(zone) + 1;
+ if (zone_idx > pgdat->nr_zones)
+ pgdat->nr_zones = zone_idx;
zone->zone_start_pfn = zone_start_pfn;
@@ -7800,11 +7814,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
+ struct page *head = compound_head(page);
+ unsigned int skip_pages;
- if (!hugepage_migration_supported(page_hstate(page)))
+ if (!hugepage_migration_supported(page_hstate(head)))
goto unmovable;
- iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
+ skip_pages = (1 << compound_order(head)) - (page - head);
+ iter += skip_pages - 1;
continue;
}
diff --git a/mm/page_io.c b/mm/page_io.c
index d4d1c89bcddd..3475733b1926 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -140,7 +140,7 @@ out:
unlock_page(page);
WRITE_ONCE(bio->bi_private, NULL);
bio_put(bio);
- wake_up_process(waiter);
+ blk_wake_io_task(waiter);
put_task_struct(waiter);
}
@@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
goto out;
}
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
- bio_associate_blkcg_from_page(bio, page);
+ bio_associate_blkg_from_page(bio, page);
count_swpout_vm_event(page);
set_page_writeback(page);
unlock_page(page);
@@ -405,11 +405,12 @@ int swap_readpage(struct page *page, bool synchronous)
bio_get(bio);
qc = submit_bio(bio);
while (synchronous) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+
if (!READ_ONCE(bio->bi_private))
break;
- if (!blk_poll(disk->queue, qc))
+ if (!blk_poll(disk->queue, qc, true))
break;
}
__set_current_state(TASK_RUNNING);
diff --git a/mm/rmap.c b/mm/rmap.c
index 1e79fac3186b..85b7f9423352 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
address + PAGE_SIZE);
} else {
/*
- * We should not need to notify here as we reach this
- * case only from freeze_page() itself only call from
- * split_huge_page_to_list() so everything below must
- * be true:
- * - page is not anonymous
- * - page is locked
- *
- * So as it is a locked file back page thus it can not
- * be remove from the page cache and replace by a new
- * page before mmu_notifier_invalidate_range_end so no
+ * This is a locked file-backed page, thus it cannot
+ * be removed from the page cache and replaced by a new
+ * page before mmu_notifier_invalidate_range_end, so no
* concurrent thread might update its page table to
* point at new page while a device still is using this
* page.
diff --git a/mm/shmem.c b/mm/shmem.c
index d44991ea5ed4..375f3ac19bb8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, long pages)
if (!shmem_inode_acct_block(inode, pages))
return false;
+ /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
+ inode->i_mapping->nrpages += pages;
+
spin_lock_irqsave(&info->lock, flags);
info->alloced += pages;
inode->i_blocks += pages * BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock_irqrestore(&info->lock, flags);
- inode->i_mapping->nrpages += pages;
return true;
}
@@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode, long pages)
struct shmem_inode_info *info = SHMEM_I(inode);
unsigned long flags;
+ /* nrpages adjustment done by __delete_from_page_cache() or caller */
+
spin_lock_irqsave(&info->lock, flags);
info->alloced -= pages;
inode->i_blocks -= pages * BLOCKS_PER_PAGE;
@@ -657,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping,
{
void *old;
- xa_lock_irq(&mapping->i_pages);
- old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0);
- xa_unlock_irq(&mapping->i_pages);
+ old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);
if (old != radswap)
return -ENOENT;
free_swap_and_cache(radix_to_swp_entry(radswap));
@@ -756,7 +758,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
break;
index = indices[pvec.nr - 1] + 1;
pagevec_remove_exceptionals(&pvec);
- check_move_unevictable_pages(pvec.pages, pvec.nr);
+ check_move_unevictable_pages(&pvec);
pagevec_release(&pvec);
cond_resched();
}
@@ -1435,7 +1437,7 @@ static struct page *shmem_alloc_hugepage(gfp_t gfp,
shmem_pseudo_vma_init(&pvma, info, hindex);
page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
- HPAGE_PMD_ORDER, &pvma, 0, numa_node_id());
+ HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
shmem_pseudo_vma_destroy(&pvma);
if (page)
prep_transhuge_page(page);
@@ -1509,11 +1511,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
{
struct page *oldpage, *newpage;
struct address_space *swap_mapping;
+ swp_entry_t entry;
pgoff_t swap_index;
int error;
oldpage = *pagep;
- swap_index = page_private(oldpage);
+ entry.val = page_private(oldpage);
+ swap_index = swp_offset(entry);
swap_mapping = page_mapping(oldpage);
/*
@@ -1532,7 +1536,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
__SetPageLocked(newpage);
__SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
- set_page_private(newpage, swap_index);
+ set_page_private(newpage, entry.val);
SetPageSwapCache(newpage);
/*
@@ -2214,6 +2218,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
struct page *page;
pte_t _dst_pte, *dst_pte;
int ret;
+ pgoff_t offset, max_off;
ret = -ENOMEM;
if (!shmem_inode_acct_block(inode, 1))
@@ -2236,7 +2241,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
*pagep = page;
shmem_inode_unacct_blocks(inode, 1);
/* don't free the page */
- return -EFAULT;
+ return -ENOENT;
}
} else { /* mfill_zeropage_atomic */
clear_highpage(page);
@@ -2251,6 +2256,12 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
__SetPageSwapBacked(page);
__SetPageUptodate(page);
+ ret = -EFAULT;
+ offset = linear_page_index(dst_vma, dst_addr);
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ if (unlikely(offset >= max_off))
+ goto out_release;
+
ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
if (ret)
goto out_release;
@@ -2265,9 +2276,25 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
_dst_pte = mk_pte(page, dst_vma->vm_page_prot);
if (dst_vma->vm_flags & VM_WRITE)
_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+ else {
+ /*
+ * We don't set the pte dirty if the vma has no
+ * VM_WRITE permission, so mark the page dirty or it
+ * could be freed from under us. We could do it
+ * unconditionally before unlock_page(), but doing it
+ * only if VM_WRITE is not set is faster.
+ */
+ set_page_dirty(page);
+ }
- ret = -EEXIST;
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+
+ ret = -EFAULT;
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ if (unlikely(offset >= max_off))
+ goto out_release_uncharge_unlock;
+
+ ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_release_uncharge_unlock;
@@ -2285,13 +2312,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
- unlock_page(page);
pte_unmap_unlock(dst_pte, ptl);
+ unlock_page(page);
ret = 0;
out:
return ret;
out_release_uncharge_unlock:
pte_unmap_unlock(dst_pte, ptl);
+ ClearPageDirty(page);
+ delete_from_page_cache(page);
out_release_uncharge:
mem_cgroup_cancel_charge(page, memcg, false);
out_release:
diff --git a/mm/slab.c b/mm/slab.c
index 2a5654bb3b3f..3abb9feb3818 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -962,10 +962,10 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
* To protect lockless access to n->shared during irq disabled context.
* If n->shared isn't NULL in irq disabled context, accessing to it is
* guaranteed to be valid until irq is re-enabled, because it will be
- * freed after synchronize_sched().
+ * freed after synchronize_rcu().
*/
if (old_shared && force_change)
- synchronize_sched();
+ synchronize_rcu();
fail:
kfree(old_shared);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7eb8dc136c1c..9c11e8a937d2 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -724,7 +724,7 @@ void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
css_get(&s->memcg_params.memcg->css);
s->memcg_params.deact_fn = deact_fn;
- call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
+ call_rcu(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
}
void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
@@ -839,11 +839,11 @@ static void flush_memcg_workqueue(struct kmem_cache *s)
mutex_unlock(&slab_mutex);
/*
- * SLUB deactivates the kmem_caches through call_rcu_sched. Make
+ * SLUB deactivates the kmem_caches through call_rcu. Make
* sure all registered rcu callbacks have been invoked.
*/
if (IS_ENABLED(CONFIG_SLUB))
- rcu_barrier_sched();
+ rcu_barrier();
/*
* SLAB and SLUB create memcg kmem_caches through workqueue and SLUB
diff --git a/mm/sparse.c b/mm/sparse.c
index 33307fc05c4d..3abc8cc50201 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -240,6 +240,22 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
}
/*
+ * Mark all memblocks as present using memory_present(). This is a
+ * convienence function that is useful for a number of arches
+ * to mark all of the systems memory as present during initialization.
+ */
+void __init memblocks_present(void)
+{
+ struct memblock_region *reg;
+
+ for_each_memblock(memory, reg) {
+ memory_present(memblock_get_region_node(reg),
+ memblock_region_memory_base_pfn(reg),
+ memblock_region_memory_end_pfn(reg));
+ }
+}
+
+/*
* Subtle, we encode the real pfn into the mem_map such that
* the identity pfn - section_mem_map will return the actual
* physical page frame number.
diff --git a/mm/swap.c b/mm/swap.c
index aa483719922e..5d786019eab9 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -823,8 +823,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
- VM_BUG_ON(NR_CPUS != 1 &&
- !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
+ lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
if (!list)
SetPageLRU(page_tail);
diff --git a/mm/truncate.c b/mm/truncate.c
index 45d68e90b703..798e7ccfb030 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -517,9 +517,13 @@ void truncate_inode_pages_final(struct address_space *mapping)
*/
xa_lock_irq(&mapping->i_pages);
xa_unlock_irq(&mapping->i_pages);
-
- truncate_inode_pages(mapping, 0);
}
+
+ /*
+ * Cleancache needs notification even if there are no pages or shadow
+ * entries.
+ */
+ truncate_inode_pages(mapping, 0);
}
EXPORT_SYMBOL(truncate_inode_pages_final);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 5029f241908f..458acda96f20 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -33,6 +33,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
void *page_kaddr;
int ret;
struct page *page;
+ pgoff_t offset, max_off;
+ struct inode *inode;
if (!*pagep) {
ret = -ENOMEM;
@@ -48,7 +50,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
/* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) {
- ret = -EFAULT;
+ ret = -ENOENT;
*pagep = page;
/* don't free the page */
goto out;
@@ -73,8 +75,17 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
if (dst_vma->vm_flags & VM_WRITE)
_dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
- ret = -EEXIST;
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+ if (dst_vma->vm_file) {
+ /* the shmem MAP_PRIVATE case requires checking the i_size */
+ inode = dst_vma->vm_file->f_inode;
+ offset = linear_page_index(dst_vma, dst_addr);
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ ret = -EFAULT;
+ if (unlikely(offset >= max_off))
+ goto out_release_uncharge_unlock;
+ }
+ ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_release_uncharge_unlock;
@@ -108,11 +119,22 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
pte_t _dst_pte, *dst_pte;
spinlock_t *ptl;
int ret;
+ pgoff_t offset, max_off;
+ struct inode *inode;
_dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
dst_vma->vm_page_prot));
- ret = -EEXIST;
dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+ if (dst_vma->vm_file) {
+ /* the shmem MAP_PRIVATE case requires checking the i_size */
+ inode = dst_vma->vm_file->f_inode;
+ offset = linear_page_index(dst_vma, dst_addr);
+ max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+ ret = -EFAULT;
+ if (unlikely(offset >= max_off))
+ goto out_unlock;
+ }
+ ret = -EEXIST;
if (!pte_none(*dst_pte))
goto out_unlock;
set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
@@ -205,8 +227,9 @@ retry:
if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
goto out_unlock;
/*
- * Only allow __mcopy_atomic_hugetlb on userfaultfd
- * registered ranges.
+ * Check the vma is registered in uffd, this is
+ * required to enforce the VM_MAYWRITE check done at
+ * uffd registration time.
*/
if (!dst_vma->vm_userfaultfd_ctx.ctx)
goto out_unlock;
@@ -274,7 +297,7 @@ retry:
cond_resched();
- if (unlikely(err == -EFAULT)) {
+ if (unlikely(err == -ENOENT)) {
up_read(&dst_mm->mmap_sem);
BUG_ON(!page);
@@ -380,7 +403,17 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
{
ssize_t err;
- if (vma_is_anonymous(dst_vma)) {
+ /*
+ * The normal page fault path for a shmem will invoke the
+ * fault, fill the hole in the file and COW it right away. The
+ * result generates plain anonymous memory. So when we are
+ * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
+ * generate anonymous memory directly without actually filling
+ * the hole. For the MAP_PRIVATE case the robustness check
+ * only happens in the pagetable (to verify it's still none)
+ * and not in the radix tree.
+ */
+ if (!(dst_vma->vm_flags & VM_SHARED)) {
if (!zeropage)
err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
dst_addr, src_addr, page);
@@ -449,13 +482,9 @@ retry:
if (!dst_vma)
goto out_unlock;
/*
- * Be strict and only allow __mcopy_atomic on userfaultfd
- * registered ranges to prevent userland errors going
- * unnoticed. As far as the VM consistency is concerned, it
- * would be perfectly safe to remove this check, but there's
- * no useful usage for __mcopy_atomic ouside of userfaultfd
- * registered ranges. This is after all why these are ioctls
- * belonging to the userfaultfd and not syscalls.
+ * Check the vma is registered in uffd, this is required to
+ * enforce the VM_MAYWRITE check done at uffd registration
+ * time.
*/
if (!dst_vma->vm_userfaultfd_ctx.ctx)
goto out_unlock;
@@ -489,7 +518,8 @@ retry:
* dst_vma.
*/
err = -ENOMEM;
- if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
+ if (!(dst_vma->vm_flags & VM_SHARED) &&
+ unlikely(anon_vma_prepare(dst_vma)))
goto out_unlock;
while (src_addr < src_start + len) {
@@ -530,7 +560,7 @@ retry:
src_addr, &page, zeropage);
cond_resched();
- if (unlikely(err == -EFAULT)) {
+ if (unlikely(err == -ENOENT)) {
void *page_kaddr;
up_read(&dst_mm->mmap_sem);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 62ac0c488624..24ab1f7394ab 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -46,6 +46,7 @@
#include <linux/delayacct.h>
#include <linux/sysctl.h>
#include <linux/oom.h>
+#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/printk.h>
#include <linux/dax.h>
@@ -4182,17 +4183,16 @@ int page_evictable(struct page *page)
return ret;
}
-#ifdef CONFIG_SHMEM
/**
- * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
- * @pages: array of pages to check
- * @nr_pages: number of pages to check
+ * check_move_unevictable_pages - check pages for evictability and move to
+ * appropriate zone lru list
+ * @pvec: pagevec with lru pages to check
*
- * Checks pages for evictability and moves them to the appropriate lru list.
- *
- * This function is only used for SysV IPC SHM_UNLOCK.
+ * Checks pages for evictability, if an evictable page is in the unevictable
+ * lru list, moves it to the appropriate evictable lru list. This function
+ * should be only used for lru pages.
*/
-void check_move_unevictable_pages(struct page **pages, int nr_pages)
+void check_move_unevictable_pages(struct pagevec *pvec)
{
struct lruvec *lruvec;
struct pglist_data *pgdat = NULL;
@@ -4200,8 +4200,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
int pgrescued = 0;
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pages[i];
+ for (i = 0; i < pvec->nr; i++) {
+ struct page *page = pvec->pages[i];
struct pglist_data *pagepgdat = page_pgdat(page);
pgscanned++;
@@ -4233,4 +4233,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
spin_unlock_irq(&pgdat->lru_lock);
}
}
-#endif /* CONFIG_SHMEM */
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
index 24915e0bb9ea..6c152f9ea26e 100644
--- a/net/6lowpan/debugfs.c
+++ b/net/6lowpan/debugfs.c
@@ -232,18 +232,7 @@ static int lowpan_context_show(struct seq_file *file, void *offset)
return 0;
}
-
-static int lowpan_context_open(struct inode *inode, struct file *file)
-{
- return single_open(file, lowpan_context_show, inode->i_private);
-}
-
-static const struct file_operations lowpan_context_fops = {
- .open = lowpan_context_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(lowpan_context);
static int lowpan_short_addr_get(void *data, u64 *val)
{
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 5e9950453955..dc4411165e43 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -330,6 +330,7 @@ static void vlan_transfer_features(struct net_device *dev,
vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
+ vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
netdev_update_features(vlandev);
}
@@ -357,6 +358,7 @@ static int __vlan_device_event(struct net_device *dev, unsigned long event)
static int vlan_device_event(struct notifier_block *unused, unsigned long event,
void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vlan_group *grp;
struct vlan_info *vlan_info;
@@ -459,7 +461,8 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
vlan = vlan_dev_priv(vlandev);
if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
- dev_change_flags(vlandev, flgs | IFF_UP);
+ dev_change_flags(vlandev, flgs | IFF_UP,
+ extack);
netif_stacked_transfer_operstate(dev, vlandev);
}
break;
@@ -647,93 +650,6 @@ out:
return err;
}
-static struct sk_buff *vlan_gro_receive(struct list_head *head,
- struct sk_buff *skb)
-{
- const struct packet_offload *ptype;
- unsigned int hlen, off_vlan;
- struct sk_buff *pp = NULL;
- struct vlan_hdr *vhdr;
- struct sk_buff *p;
- __be16 type;
- int flush = 1;
-
- off_vlan = skb_gro_offset(skb);
- hlen = off_vlan + sizeof(*vhdr);
- vhdr = skb_gro_header_fast(skb, off_vlan);
- if (skb_gro_header_hard(skb, hlen)) {
- vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
- if (unlikely(!vhdr))
- goto out;
- }
-
- type = vhdr->h_vlan_encapsulated_proto;
-
- rcu_read_lock();
- ptype = gro_find_receive_by_type(type);
- if (!ptype)
- goto out_unlock;
-
- flush = 0;
-
- list_for_each_entry(p, head, list) {
- struct vlan_hdr *vhdr2;
-
- if (!NAPI_GRO_CB(p)->same_flow)
- continue;
-
- vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
- if (compare_vlan_header(vhdr, vhdr2))
- NAPI_GRO_CB(p)->same_flow = 0;
- }
-
- skb_gro_pull(skb, sizeof(*vhdr));
- skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
- pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
-
-out_unlock:
- rcu_read_unlock();
-out:
- skb_gro_flush_final(skb, pp, flush);
-
- return pp;
-}
-
-static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
-{
- struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
- __be16 type = vhdr->h_vlan_encapsulated_proto;
- struct packet_offload *ptype;
- int err = -ENOENT;
-
- rcu_read_lock();
- ptype = gro_find_complete_by_type(type);
- if (ptype)
- err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
-
- rcu_read_unlock();
- return err;
-}
-
-static struct packet_offload vlan_packet_offloads[] __read_mostly = {
- {
- .type = cpu_to_be16(ETH_P_8021Q),
- .priority = 10,
- .callbacks = {
- .gro_receive = vlan_gro_receive,
- .gro_complete = vlan_gro_complete,
- },
- },
- {
- .type = cpu_to_be16(ETH_P_8021AD),
- .priority = 10,
- .callbacks = {
- .gro_receive = vlan_gro_receive,
- .gro_complete = vlan_gro_complete,
- },
- },
-};
-
static int __net_init vlan_init_net(struct net *net)
{
struct vlan_net *vn = net_generic(net, vlan_net_id);
@@ -761,7 +677,6 @@ static struct pernet_operations vlan_net_ops = {
static int __init vlan_proto_init(void)
{
int err;
- unsigned int i;
pr_info("%s v%s\n", vlan_fullname, vlan_version);
@@ -785,9 +700,6 @@ static int __init vlan_proto_init(void)
if (err < 0)
goto err5;
- for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
- dev_add_offload(&vlan_packet_offloads[i]);
-
vlan_ioctl_set(vlan_ioctl_handler);
return 0;
@@ -805,13 +717,8 @@ err0:
static void __exit vlan_cleanup_module(void)
{
- unsigned int i;
-
vlan_ioctl_set(NULL);
- for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
- dev_remove_offload(&vlan_packet_offloads[i]);
-
vlan_netlink_fini();
unregister_netdevice_notifier(&vlan_notifier_block);
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 44df1c3df02d..c46daf09a501 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -92,6 +92,18 @@ static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
return NULL;
}
+static inline netdev_features_t vlan_tnl_features(struct net_device *real_dev)
+{
+ netdev_features_t ret;
+
+ ret = real_dev->hw_enc_features &
+ (NETIF_F_CSUM_MASK | NETIF_F_ALL_TSO | NETIF_F_GSO_ENCAP_ALL);
+
+ if ((ret & NETIF_F_GSO_ENCAP_ALL) && (ret & NETIF_F_CSUM_MASK))
+ return (ret & ~NETIF_F_CSUM_MASK) | NETIF_F_HW_CSUM;
+ return 0;
+}
+
#define vlan_group_for_each_dev(grp, i, dev) \
for ((i) = 0; i < VLAN_PROTO_NUM * VLAN_N_VID; i++) \
if (((dev) = __vlan_group_get_device((grp), (i) / VLAN_N_VID, \
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4f60e86f4b8d..a313165e7a67 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -57,7 +57,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
}
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
@@ -223,6 +223,33 @@ static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vi
return -ENODEV;
}
+int vlan_for_each(struct net_device *dev,
+ int (*action)(struct net_device *dev, int vid, void *arg),
+ void *arg)
+{
+ struct vlan_vid_info *vid_info;
+ struct vlan_info *vlan_info;
+ struct net_device *vdev;
+ int ret;
+
+ ASSERT_RTNL();
+
+ vlan_info = rtnl_dereference(dev->vlan_info);
+ if (!vlan_info)
+ return 0;
+
+ list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
+ vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
+ vid_info->vid);
+ ret = action(vdev, vid_info->vid, arg);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(vlan_for_each);
+
int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
{
struct net_device *real_dev = vlan_info->real_dev;
@@ -426,3 +453,102 @@ bool vlan_uses_dev(const struct net_device *dev)
return vlan_info->grp.nr_vlan_devs ? true : false;
}
EXPORT_SYMBOL(vlan_uses_dev);
+
+static struct sk_buff *vlan_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
+{
+ const struct packet_offload *ptype;
+ unsigned int hlen, off_vlan;
+ struct sk_buff *pp = NULL;
+ struct vlan_hdr *vhdr;
+ struct sk_buff *p;
+ __be16 type;
+ int flush = 1;
+
+ off_vlan = skb_gro_offset(skb);
+ hlen = off_vlan + sizeof(*vhdr);
+ vhdr = skb_gro_header_fast(skb, off_vlan);
+ if (skb_gro_header_hard(skb, hlen)) {
+ vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
+ if (unlikely(!vhdr))
+ goto out;
+ }
+
+ type = vhdr->h_vlan_encapsulated_proto;
+
+ rcu_read_lock();
+ ptype = gro_find_receive_by_type(type);
+ if (!ptype)
+ goto out_unlock;
+
+ flush = 0;
+
+ list_for_each_entry(p, head, list) {
+ struct vlan_hdr *vhdr2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
+ if (compare_vlan_header(vhdr, vhdr2))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ skb_gro_pull(skb, sizeof(*vhdr));
+ skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
+ pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ skb_gro_flush_final(skb, pp, flush);
+
+ return pp;
+}
+
+static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
+ __be16 type = vhdr->h_vlan_encapsulated_proto;
+ struct packet_offload *ptype;
+ int err = -ENOENT;
+
+ rcu_read_lock();
+ ptype = gro_find_complete_by_type(type);
+ if (ptype)
+ err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
+
+ rcu_read_unlock();
+ return err;
+}
+
+static struct packet_offload vlan_packet_offloads[] __read_mostly = {
+ {
+ .type = cpu_to_be16(ETH_P_8021Q),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = vlan_gro_receive,
+ .gro_complete = vlan_gro_complete,
+ },
+ },
+ {
+ .type = cpu_to_be16(ETH_P_8021AD),
+ .priority = 10,
+ .callbacks = {
+ .gro_receive = vlan_gro_receive,
+ .gro_complete = vlan_gro_complete,
+ },
+ },
+};
+
+static int __init vlan_offload_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
+ dev_add_offload(&vlan_packet_offloads[i]);
+
+ return 0;
+}
+
+fs_initcall(vlan_offload_init);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ff720f1ebf73..b2d9c8f27cd7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -562,6 +562,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE |
+ NETIF_F_GSO_ENCAP_ALL |
NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC |
NETIF_F_ALL_FCOE;
@@ -572,6 +573,7 @@ static int vlan_dev_init(struct net_device *dev)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
dev->vlan_features = real_dev->vlan_features & ~NETIF_F_ALL_FCOE;
+ dev->hw_enc_features = vlan_tnl_features(real_dev);
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
diff --git a/net/Kconfig b/net/Kconfig
index f235edb593ba..5cb9de1aaf88 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -51,6 +51,9 @@ config NET_INGRESS
config NET_EGRESS
bool
+config SKB_EXTENSIONS
+ bool
+
menu "Networking options"
source "net/packet/Kconfig"
@@ -184,6 +187,7 @@ config BRIDGE_NETFILTER
depends on NETFILTER && INET
depends on NETFILTER_ADVANCED
select NETFILTER_FAMILY_BRIDGE
+ select SKB_EXTENSIONS
default m
---help---
Enabling this option will let arptables resp. iptables see bridged
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index f75816f58107..c386e6981416 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -22,7 +22,6 @@
config BATMAN_ADV
tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
depends on NET
- select CRC16
select LIBCRC32C
help
B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
@@ -48,6 +47,7 @@ config BATMAN_ADV_BATMAN_V
config BATMAN_ADV_BLA
bool "Bridge Loop Avoidance"
depends on BATMAN_ADV && INET
+ select CRC16
default y
help
This option enables BLA (Bridge Loop Avoidance), a mechanism
@@ -82,6 +82,7 @@ config BATMAN_ADV_NC
config BATMAN_ADV_MCAST
bool "Multicast optimisation"
depends on BATMAN_ADV && INET && !(BRIDGE=m && BATMAN_ADV=y)
+ default y
help
This option enables the multicast optimisation which aims to
reduce the air overhead while improving the reliability of
@@ -100,12 +101,13 @@ config BATMAN_ADV_DEBUGFS
config BATMAN_ADV_DEBUG
bool "B.A.T.M.A.N. debugging"
- depends on BATMAN_ADV_DEBUGFS
+ depends on BATMAN_ADV
help
This is an option for use by developers; most people should
say N here. This enables compilation of support for
- outputting debugging information to the kernel log. The
- output is controlled via the module parameter debug.
+ outputting debugging information to the debugfs log or tracing
+ buffer. The output is controlled via the batadv netdev specific
+ log_level setting.
config BATMAN_ADV_TRACING
bool "B.A.T.M.A.N. tracing support"
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index d2227091029f..f97e566f0402 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -34,7 +34,6 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
-#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/pkt_sched.h>
@@ -2585,13 +2584,14 @@ static void batadv_iv_gw_print(struct batadv_priv *bat_priv,
* batadv_iv_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @gw_node: Gateway to be dumped
*
* Return: Error code, or 0 on success
*/
-static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_gw_node *gw_node)
{
@@ -2611,13 +2611,16 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS);
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_GATEWAYS);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
+ genl_dump_check_consistent(cb, hdr);
+
ret = -EMSGSIZE;
if (curr_gw == gw_node)
@@ -2668,13 +2671,15 @@ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
int idx_skip = cb->args[0];
int idx = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ cb->seq = bat_priv->gw.generation << 1 | 1;
+
+ hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
if (idx++ < idx_skip)
continue;
- if (batadv_iv_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
- bat_priv, gw_node)) {
+ if (batadv_iv_gw_dump_entry(msg, portid, cb, bat_priv,
+ gw_node)) {
idx_skip = idx - 1;
goto unlock;
}
@@ -2682,7 +2687,7 @@ static void batadv_iv_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
idx_skip = idx;
unlock:
- rcu_read_unlock();
+ spin_unlock_bh(&bat_priv->gw.list_lock);
cb->args[0] = idx_skip;
}
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 6baec4e68898..90e33f84d37a 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -27,11 +27,13 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
+#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -915,13 +917,14 @@ static void batadv_v_gw_print(struct batadv_priv *bat_priv,
* batadv_v_gw_dump_entry() - Dump a gateway into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @gw_node: Gateway to be dumped
*
* Return: Error code, or 0 on success
*/
-static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_gw_node *gw_node)
{
@@ -941,13 +944,16 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI, BATADV_CMD_GET_GATEWAYS);
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_GATEWAYS);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
+ genl_dump_check_consistent(cb, hdr);
+
ret = -EMSGSIZE;
if (curr_gw == gw_node) {
@@ -1018,13 +1024,15 @@ static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
int idx_skip = cb->args[0];
int idx = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(gw_node, &bat_priv->gw.gateway_list, list) {
+ spin_lock_bh(&bat_priv->gw.list_lock);
+ cb->seq = bat_priv->gw.generation << 1 | 1;
+
+ hlist_for_each_entry(gw_node, &bat_priv->gw.gateway_list, list) {
if (idx++ < idx_skip)
continue;
- if (batadv_v_gw_dump_entry(msg, portid, cb->nlh->nlmsg_seq,
- bat_priv, gw_node)) {
+ if (batadv_v_gw_dump_entry(msg, portid, cb, bat_priv,
+ gw_node)) {
idx_skip = idx - 1;
goto unlock;
}
@@ -1032,7 +1040,7 @@ static void batadv_v_gw_dump(struct sk_buff *msg, struct netlink_callback *cb,
idx_skip = idx;
unlock:
- rcu_read_unlock();
+ spin_unlock_bh(&bat_priv->gw.list_lock);
cb->args[0] = idx_skip;
}
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5f1aeeded0e3..5fdde2947802 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -2094,14 +2094,15 @@ out:
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @primary_if: primary interface
* @claim: entry to dump
*
* Return: 0 or error code.
*/
static int
-batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
struct batadv_bla_claim *claim)
{
@@ -2111,13 +2112,16 @@ batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
void *hdr;
int ret = -EINVAL;
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI, BATADV_CMD_GET_BLA_CLAIM);
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_BLA_CLAIM);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
+ genl_dump_check_consistent(cb, hdr);
+
is_own = batadv_compare_eth(claim->backbone_gw->orig,
primary_addr);
@@ -2153,28 +2157,33 @@ out:
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @primary_if: primary interface
- * @head: bucket to dump
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: always 0.
*/
static int
-batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
- struct hlist_head *head, int *idx_skip)
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_skip)
{
struct batadv_bla_claim *claim;
int idx = 0;
int ret = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(claim, head, hash_entry) {
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_skip)
continue;
- ret = batadv_bla_claim_dump_entry(msg, portid, seq,
+ ret = batadv_bla_claim_dump_entry(msg, portid, cb,
primary_if, claim);
if (ret) {
*idx_skip = idx - 1;
@@ -2184,7 +2193,7 @@ batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
*idx_skip = 0;
unlock:
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
return ret;
}
@@ -2204,7 +2213,6 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
- struct hlist_head *head;
int idx = cb->args[1];
int ifindex;
int ret = 0;
@@ -2230,11 +2238,8 @@ int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
}
while (bucket < hash->size) {
- head = &hash->table[bucket];
-
- if (batadv_bla_claim_dump_bucket(msg, portid,
- cb->nlh->nlmsg_seq,
- primary_if, head, &idx))
+ if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
+ hash, bucket, &idx))
break;
bucket++;
}
@@ -2325,14 +2330,15 @@ out:
* netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @primary_if: primary interface
* @backbone_gw: entry to dump
*
* Return: 0 or error code.
*/
static int
-batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
struct batadv_bla_backbone_gw *backbone_gw)
{
@@ -2343,13 +2349,16 @@ batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
void *hdr;
int ret = -EINVAL;
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI, BATADV_CMD_GET_BLA_BACKBONE);
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_BLA_BACKBONE);
if (!hdr) {
ret = -ENOBUFS;
goto out;
}
+ genl_dump_check_consistent(cb, hdr);
+
is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
spin_lock_bh(&backbone_gw->crc_lock);
@@ -2386,28 +2395,33 @@ out:
* a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @primary_if: primary interface
- * @head: bucket to dump
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: always 0.
*/
static int
-batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_hard_iface *primary_if,
- struct hlist_head *head, int *idx_skip)
+ struct batadv_hashtable *hash,
+ unsigned int bucket, int *idx_skip)
{
struct batadv_bla_backbone_gw *backbone_gw;
int idx = 0;
int ret = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_skip)
continue;
- ret = batadv_bla_backbone_dump_entry(msg, portid, seq,
+ ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
primary_if, backbone_gw);
if (ret) {
*idx_skip = idx - 1;
@@ -2417,7 +2431,7 @@ batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
*idx_skip = 0;
unlock:
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
return ret;
}
@@ -2437,7 +2451,6 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
- struct hlist_head *head;
int idx = cb->args[1];
int ifindex;
int ret = 0;
@@ -2463,11 +2476,8 @@ int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
}
while (bucket < hash->size) {
- head = &hash->table[bucket];
-
- if (batadv_bla_backbone_dump_bucket(msg, portid,
- cb->nlh->nlmsg_seq,
- primary_if, head, &idx))
+ if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
+ hash, bucket, &idx))
break;
bucket++;
}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 8b608a2e2653..d4a7702e48d8 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -19,6 +19,7 @@
#include "debugfs.h"
#include "main.h"
+#include <asm/current.h>
#include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/err.h>
@@ -27,6 +28,7 @@
#include <linux/fs.h>
#include <linux/netdevice.h>
#include <linux/printk.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
#include <linux/stddef.h>
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index a60bacf7120b..b9ffe1826527 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -863,23 +863,27 @@ out:
* netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @dat_entry: entry to dump
*
* Return: 0 or error code.
*/
static int
-batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_dat_entry *dat_entry)
{
int msecs;
void *hdr;
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI, BATADV_CMD_GET_DAT_CACHE);
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_DAT_CACHE);
if (!hdr)
return -ENOBUFS;
+ genl_dump_check_consistent(cb, hdr);
+
msecs = jiffies_to_msecs(jiffies - dat_entry->last_update);
if (nla_put_in_addr(msg, BATADV_ATTR_DAT_CACHE_IP4ADDRESS,
@@ -901,27 +905,31 @@ batadv_dat_cache_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
* a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
- * @head: bucket to dump
+ * @cb: Control block containing additional options
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: 0 or error code.
*/
static int
-batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
- struct hlist_head *head, int *idx_skip)
+batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_skip)
{
struct batadv_dat_entry *dat_entry;
int idx = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(dat_entry, &hash->table[bucket], hash_entry) {
if (idx < *idx_skip)
goto skip;
- if (batadv_dat_cache_dump_entry(msg, portid, seq,
- dat_entry)) {
- rcu_read_unlock();
+ if (batadv_dat_cache_dump_entry(msg, portid, cb, dat_entry)) {
+ spin_unlock_bh(&hash->list_locks[bucket]);
*idx_skip = idx;
return -EMSGSIZE;
@@ -930,7 +938,7 @@ batadv_dat_cache_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
skip:
idx++;
}
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
return 0;
}
@@ -951,7 +959,6 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_hashtable *hash;
struct batadv_priv *bat_priv;
int bucket = cb->args[0];
- struct hlist_head *head;
int idx = cb->args[1];
int ifindex;
int ret = 0;
@@ -977,10 +984,7 @@ int batadv_dat_cache_dump(struct sk_buff *msg, struct netlink_callback *cb)
}
while (bucket < hash->size) {
- head = &hash->table[bucket];
-
- if (batadv_dat_cache_dump_bucket(msg, portid,
- cb->nlh->nlmsg_seq, head,
+ if (batadv_dat_cache_dump_bucket(msg, portid, cb, hash, bucket,
&idx))
break;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 140c61a3f1ec..9d8e5eda2314 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -377,6 +377,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
+ bat_priv->gw.generation++;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -472,6 +473,7 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
if (!hlist_unhashed(&gw_node->list)) {
hlist_del_init_rcu(&gw_node->list);
batadv_gw_node_put(gw_node);
+ bat_priv->gw.generation++;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
@@ -518,6 +520,7 @@ void batadv_gw_node_free(struct batadv_priv *bat_priv)
&bat_priv->gw.gateway_list, list) {
hlist_del_init_rcu(&gw_node->list);
batadv_gw_node_put(gw_node);
+ bat_priv->gw.generation++;
}
spin_unlock_bh(&bat_priv->gw.list_lock);
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 781c5b6e6e8e..508f4416dfc9 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -951,6 +951,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
batadv_check_known_mac_addr(hard_iface->net_dev);
kref_get(&hard_iface->refcount);
list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
+ batadv_hardif_generation++;
return hard_iface;
@@ -993,6 +994,7 @@ void batadv_hardif_remove_interfaces(void)
list_for_each_entry_safe(hard_iface, hard_iface_tmp,
&batadv_hardif_list, list) {
list_del_rcu(&hard_iface->list);
+ batadv_hardif_generation++;
batadv_hardif_remove_interface(hard_iface);
}
rtnl_unlock();
@@ -1054,6 +1056,7 @@ static int batadv_hard_if_event(struct notifier_block *this,
case NETDEV_UNREGISTER:
case NETDEV_PRE_TYPE_CHANGE:
list_del_rcu(&hard_iface->list);
+ batadv_hardif_generation++;
batadv_hardif_remove_interface(hard_iface);
break;
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 7b49e4001778..9194f4d891b1 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -32,6 +32,8 @@ static void batadv_hash_init(struct batadv_hashtable *hash)
INIT_HLIST_HEAD(&hash->table[i]);
spin_lock_init(&hash->list_locks[i]);
}
+
+ atomic_set(&hash->generation, 0);
}
/**
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 9490a7ca2ba6..0e36fa1c7c3e 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -21,6 +21,7 @@
#include "main.h"
+#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/list.h>
#include <linux/rculist.h>
@@ -58,6 +59,9 @@ struct batadv_hashtable {
/** @size: size of hashtable */
u32 size;
+
+ /** @generation: current (generation) sequence number */
+ atomic_t generation;
};
/* allocates and clears the hash */
@@ -112,6 +116,7 @@ static inline int batadv_hash_add(struct batadv_hashtable *hash,
/* no duplicate found in list, add new element */
hlist_add_head_rcu(data_node, head);
+ atomic_inc(&hash->generation);
ret = 0;
@@ -154,6 +159,7 @@ static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
data_save = node;
hlist_del_rcu(node);
+ atomic_inc(&hash->generation);
break;
}
spin_unlock_bh(&hash->list_locks[index]);
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index 6beb5f067810..02e55b78132f 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -43,6 +43,8 @@
#include "debugfs.h"
#include "trace.h"
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+
#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
@@ -92,33 +94,6 @@ static int batadv_fdebug_log(struct batadv_priv_debug_log *debug_log,
return 0;
}
-/**
- * batadv_debug_log() - Add debug log entry
- * @bat_priv: the bat priv with all the soft interface information
- * @fmt: format string
- *
- * Return: 0 on success or negative error number in case of failure
- */
-int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV",
- jiffies_to_msecs(jiffies), &vaf);
-
- trace_batadv_dbg(bat_priv, &vaf);
-
- va_end(args);
-
- return 0;
-}
-
static int batadv_log_open(struct inode *inode, struct file *file)
{
if (!try_module_get(THIS_MODULE))
@@ -259,3 +234,34 @@ void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
kfree(bat_priv->debug_log);
bat_priv->debug_log = NULL;
}
+
+#endif /* CONFIG_BATMAN_ADV_DEBUGFS */
+
+/**
+ * batadv_debug_log() - Add debug log entry
+ * @bat_priv: the bat priv with all the soft interface information
+ * @fmt: format string
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUGFS
+ batadv_fdebug_log(bat_priv->debug_log, "[%10u] %pV",
+ jiffies_to_msecs(jiffies), &vaf);
+#endif
+
+ trace_batadv_dbg(bat_priv, &vaf);
+
+ va_end(args);
+
+ return 0;
+}
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 69c0d85bceb3..d1ed839fd32b 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -74,6 +74,7 @@
* list traversals just rcu-locked
*/
struct list_head batadv_hardif_list;
+unsigned int batadv_hardif_generation;
static int (*batadv_rx_handler[256])(struct sk_buff *skb,
struct batadv_hard_iface *recv_if);
@@ -186,6 +187,8 @@ int batadv_mesh_init(struct net_device *soft_iface)
INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
INIT_HLIST_HEAD(&bat_priv->tp_list);
+ bat_priv->gw.generation = 0;
+
ret = batadv_v_mesh_init(bat_priv);
if (ret < 0)
goto err;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 2002b70e18db..b572066325e4 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -25,7 +25,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2018.4"
+#define BATADV_SOURCE_VERSION "2019.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -247,6 +247,7 @@ static inline int batadv_print_vid(unsigned short vid)
}
extern struct list_head batadv_hardif_list;
+extern unsigned int batadv_hardif_generation;
extern unsigned char batadv_broadcast_addr[];
extern struct workqueue_struct *batadv_event_workqueue;
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 86725d792e15..69244e4598f5 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1365,22 +1365,26 @@ int batadv_mcast_mesh_info_put(struct sk_buff *msg,
* to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @orig_node: originator to dump the multicast flags of
*
* Return: 0 or error code.
*/
static int
-batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_orig_node *orig_node)
{
void *hdr;
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI, BATADV_CMD_GET_MCAST_FLAGS);
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
+ BATADV_CMD_GET_MCAST_FLAGS);
if (!hdr)
return -ENOBUFS;
+ genl_dump_check_consistent(cb, hdr);
+
if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
orig_node->orig)) {
genlmsg_cancel(msg, hdr);
@@ -1405,21 +1409,26 @@ batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
* table to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
- * @head: bucket to dump
+ * @cb: Control block containing additional options
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
* @idx_skip: How many entries to skip
*
* Return: 0 or error code.
*/
static int
-batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
- struct hlist_head *head, long *idx_skip)
+batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
+ struct batadv_hashtable *hash,
+ unsigned int bucket, long *idx_skip)
{
struct batadv_orig_node *orig_node;
long idx = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
&orig_node->capa_initialized))
continue;
@@ -1427,9 +1436,8 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
if (idx < *idx_skip)
goto skip;
- if (batadv_mcast_flags_dump_entry(msg, portid, seq,
- orig_node)) {
- rcu_read_unlock();
+ if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
+ spin_unlock_bh(&hash->list_locks[bucket]);
*idx_skip = idx;
return -EMSGSIZE;
@@ -1438,7 +1446,7 @@ batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
skip:
idx++;
}
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
return 0;
}
@@ -1447,7 +1455,7 @@ skip:
* __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
* @msg: buffer for the message
* @portid: netlink port
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: the bat priv with all the soft interface information
* @bucket: current bucket to dump
* @idx: index in current bucket to the next entry to dump
@@ -1455,19 +1463,17 @@ skip:
* Return: 0 or error code.
*/
static int
-__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid, u32 seq,
+__batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv, long *bucket, long *idx)
{
struct batadv_hashtable *hash = bat_priv->orig_hash;
long bucket_tmp = *bucket;
- struct hlist_head *head;
long idx_tmp = *idx;
while (bucket_tmp < hash->size) {
- head = &hash->table[bucket_tmp];
-
- if (batadv_mcast_flags_dump_bucket(msg, portid, seq, head,
- &idx_tmp))
+ if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
+ *bucket, &idx_tmp))
break;
bucket_tmp++;
@@ -1550,8 +1556,7 @@ int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
return ret;
bat_priv = netdev_priv(primary_if->soft_iface);
- ret = __batadv_mcast_flags_dump(msg, portid, cb->nlh->nlmsg_seq,
- bat_priv, bucket, idx);
+ ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
batadv_hardif_put(primary_if);
return ret;
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 0d9459b69bdb..2dc3304cee54 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -29,11 +29,11 @@
#include <linux/if_ether.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/printk.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/stddef.h>
#include <linux/types.h>
@@ -445,23 +445,27 @@ out:
* batadv_netlink_dump_hardif_entry() - Dump one hard interface into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @hard_iface: Hard interface to dump
*
* Return: error code, or 0 on success
*/
static int
-batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_netlink_dump_hardif_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_hard_iface *hard_iface)
{
struct net_device *net_dev = hard_iface->net_dev;
void *hdr;
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, NLM_F_MULTI,
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_HARDIFS);
if (!hdr)
return -EMSGSIZE;
+ genl_dump_check_consistent(cb, hdr);
+
if (nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
net_dev->ifindex) ||
nla_put_string(msg, BATADV_ATTR_HARD_IFNAME,
@@ -498,7 +502,6 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_hard_iface *hard_iface;
int ifindex;
int portid = NETLINK_CB(cb->skb).portid;
- int seq = cb->nlh->nlmsg_seq;
int skip = cb->args[0];
int i = 0;
@@ -516,23 +519,24 @@ batadv_netlink_dump_hardifs(struct sk_buff *msg, struct netlink_callback *cb)
return -ENODEV;
}
- rcu_read_lock();
+ rtnl_lock();
+ cb->seq = batadv_hardif_generation << 1 | 1;
- list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
if (i++ < skip)
continue;
- if (batadv_netlink_dump_hardif_entry(msg, portid, seq,
+ if (batadv_netlink_dump_hardif_entry(msg, portid, cb,
hard_iface)) {
i--;
break;
}
}
- rcu_read_unlock();
+ rtnl_unlock();
dev_put(soft_iface);
diff --git a/net/batman-adv/trace.c b/net/batman-adv/trace.c
index 3d57f9981f25..8e1024217cff 100644
--- a/net/batman-adv/trace.c
+++ b/net/batman-adv/trace.c
@@ -16,7 +16,5 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/module.h>
-
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/net/batman-adv/trace.h b/net/batman-adv/trace.h
index 3acda26a30ca..104784be94d7 100644
--- a/net/batman-adv/trace.h
+++ b/net/batman-adv/trace.h
@@ -21,7 +21,13 @@
#include "main.h"
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
#include <linux/tracepoint.h>
+#include <linux/types.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM batadv
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index d21624c44665..8dcd4968cde7 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1145,14 +1145,15 @@ out:
* batadv_tt_local_dump_entry() - Dump one TT local entry into a message
* @msg :Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
* @common: tt local & tt global common data
*
* Return: Error code, or 0 on success
*/
static int
-batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv,
struct batadv_tt_common_entry *common)
{
@@ -1173,12 +1174,14 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
batadv_softif_vlan_put(vlan);
- hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family,
- NLM_F_MULTI,
+ hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
+ &batadv_netlink_family, NLM_F_MULTI,
BATADV_CMD_GET_TRANSTABLE_LOCAL);
if (!hdr)
return -ENOBUFS;
+ genl_dump_check_consistent(cb, hdr);
+
if (nla_put(msg, BATADV_ATTR_TT_ADDRESS, ETH_ALEN, common->addr) ||
nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
@@ -1201,34 +1204,39 @@ batadv_tt_local_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
* batadv_tt_local_dump_bucket() - Dump one TT local bucket into a message
* @msg: Netlink message to dump into
* @portid: Port making netlink request
- * @seq: Sequence number of netlink message
+ * @cb: Control block containing additional options
* @bat_priv: The bat priv with all the soft interface information
- * @head: Pointer to the list containing the local tt entries
+ * @hash: hash to dump
+ * @bucket: bucket index to dump
* @idx_s: Number of entries to skip
*
* Return: Error code, or 0 on success
*/
static int
-batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid, u32 seq,
+batadv_tt_local_dump_bucket(struct sk_buff *msg, u32 portid,
+ struct netlink_callback *cb,
struct batadv_priv *bat_priv,
- struct hlist_head *head, int *idx_s)
+ struct batadv_hashtable *hash, unsigned int bucket,
+ int *idx_s)
{
struct batadv_tt_common_entry *common;
int idx = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(common, head, hash_entry) {
+ spin_lock_bh(&hash->list_locks[bucket]);
+ cb->seq = atomic_read(&hash->generation) << 1 | 1;
+
+ hlist_for_each_entry(common, &hash->table[bucket], hash_entry) {
if (idx++ < *idx_s)
continue;
- if (batadv_tt_local_dump_entry(msg, portid, seq, bat_priv,
+ if (batadv_tt_local_dump_entry(msg, portid, cb, bat_priv,
common)) {
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
*idx_s = idx - 1;
return -EMSGSIZE;
}
}
- rcu_read_unlock();
+ spin_unlock_bh(&hash->list_locks[bucket]);
*idx_s = 0;
return 0;
@@ -1248,7 +1256,6 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
struct batadv_priv *bat_priv;
struct batadv_hard_iface *primary_if = NULL;
struct batadv_hashtable *hash;
- struct hlist_head *head;
int ret;
int ifindex;
int bucket = cb->args[0];
@@ -1276,10 +1283,8 @@ int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb)
hash = bat_priv->tt.local_hash;
while (bucket < hash->size) {
- head = &hash->table[bucket];
-
- if (batadv_tt_local_dump_bucket(msg, portid, cb->nlh->nlmsg_seq,
- bat_priv, head, &idx))
+ if (batadv_tt_local_dump_bucket(msg, portid, cb, bat_priv,
+ hash, bucket, &idx))
break;
bucket++;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 45b5592de816..cbe17da36fcb 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1096,12 +1096,15 @@ struct batadv_priv_gw {
/** @gateway_list: list of available gateway nodes */
struct hlist_head gateway_list;
- /** @list_lock: lock protecting gateway_list & curr_gw */
+ /** @list_lock: lock protecting gateway_list, curr_gw, generation */
spinlock_t list_lock;
/** @curr_gw: pointer to currently selected gateway node */
struct batadv_gw_node __rcu *curr_gw;
+ /** @generation: current (generation) sequence number */
+ unsigned int generation;
+
/**
* @mode: gateway operation: off, client or server (see batadv_gw_modes)
*/
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 828e87fe8027..9d79c7de234a 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -607,7 +607,7 @@ static void ifup(struct net_device *netdev)
int err;
rtnl_lock();
- err = dev_open(netdev);
+ err = dev_open(netdev, NULL);
if (err < 0)
BT_INFO("iface %s cannot be opened (%d)", netdev->name, err);
rtnl_unlock();
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index ef9928d7b4fb..ac2826ce162b 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5711,6 +5711,12 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
return true;
}
+ /* Check if request ended in Command Status - no way to retreive
+ * any extra parameters in this case.
+ */
+ if (hdr->evt == HCI_EV_CMD_STATUS)
+ return false;
+
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
hdr->evt);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e8c9ef1e1922..ca73d36cc149 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -1556,7 +1556,7 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
mgmt_get_connectable(hdev);
- if (!is_advertising_allowed(hdev, connectable))
+ if (!is_advertising_allowed(hdev, connectable))
return -EPERM;
/* Set require_privacy to true only when non-connectable
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2146e0f3b6f8..2a7fb517d460 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7650,17 +7650,7 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
return 0;
}
-static int l2cap_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, l2cap_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations l2cap_debugfs_fops = {
- .open = l2cap_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
static struct dentry *l2cap_debugfs;
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index b98225d65e87..1a635df80643 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2166,17 +2166,7 @@ static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x)
return 0;
}
-static int rfcomm_dlc_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rfcomm_dlc_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations rfcomm_dlc_debugfs_fops = {
- .open = rfcomm_dlc_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rfcomm_dlc_debugfs);
static struct dentry *rfcomm_dlc_debugfs;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d606e9212291..aa0db1d1bd9b 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1020,17 +1020,7 @@ static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
return 0;
}
-static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations rfcomm_sock_debugfs_fops = {
- .open = rfcomm_sock_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rfcomm_sock_debugfs);
static struct dentry *rfcomm_sock_debugfs;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8f0f9279eac9..529b38996d8b 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1173,17 +1173,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
return 0;
}
-static int sco_debugfs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sco_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations sco_debugfs_fops = {
- .open = sco_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sco_debugfs);
static struct dentry *sco_debugfs;
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index c822e626761b..621146d04c03 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -1390,7 +1390,7 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
if (!smp)
return NULL;
- smp->tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ smp->tfm_aes = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(smp->tfm_aes)) {
BT_ERR("Unable to create AES crypto context");
goto zfree_smp;
@@ -3233,7 +3233,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
if (!smp)
return ERR_PTR(-ENOMEM);
- tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ tfm_aes = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(tfm_aes)) {
BT_ERR("Unable to create AES crypto context");
kzfree(smp);
@@ -3906,13 +3906,13 @@ int __init bt_selftest_smp(void)
struct crypto_kpp *tfm_ecdh;
int err;
- tfm_aes = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ tfm_aes = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(tfm_aes)) {
BT_ERR("Unable to create AES crypto context");
return PTR_ERR(tfm_aes);
}
- tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
if (IS_ERR(tfm_cmac)) {
BT_ERR("Unable to create CMAC crypto context");
crypto_free_cipher(tfm_aes);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index c89c22c49015..fa2644d276ef 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
return ret;
}
-static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
+ u32 *time)
{
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
enum bpf_cgroup_storage_type stype;
u64 time_start, time_spent = 0;
- u32 ret = 0, i;
+ u32 i;
for_each_cgroup_storage_type(stype) {
storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
repeat = 1;
time_start = ktime_get_ns();
for (i = 0; i < repeat; i++) {
- ret = bpf_test_run_one(prog, ctx, storage);
+ *ret = bpf_test_run_one(prog, ctx, storage);
if (need_resched()) {
if (signal_pending(current))
break;
@@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
for_each_cgroup_storage_type(stype)
bpf_cgroup_storage_free(storage[stype]);
- return ret;
+ return 0;
}
static int bpf_test_finish(const union bpf_attr *kattr,
@@ -74,8 +75,18 @@ static int bpf_test_finish(const union bpf_attr *kattr,
{
void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
int err = -EFAULT;
+ u32 copy_size = size;
+
+ /* Clamp copy if the user has provided a size hint, but copy the full
+ * buffer if not to retain old behaviour.
+ */
+ if (kattr->test.data_size_out &&
+ copy_size > kattr->test.data_size_out) {
+ copy_size = kattr->test.data_size_out;
+ err = -ENOSPC;
+ }
- if (data_out && copy_to_user(data_out, data, size))
+ if (data_out && copy_to_user(data_out, data, copy_size))
goto out;
if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
goto out;
@@ -83,7 +94,8 @@ static int bpf_test_finish(const union bpf_attr *kattr,
goto out;
if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
goto out;
- err = 0;
+ if (err != -ENOSPC)
+ err = 0;
out:
return err;
}
@@ -165,7 +177,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
__skb_push(skb, hh_len);
if (is_direct_pkt_access)
bpf_compute_data_pointers(skb);
- retval = bpf_test_run(prog, skb, repeat, &duration);
+ ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
+ if (ret) {
+ kfree_skb(skb);
+ kfree(sk);
+ return ret;
+ }
if (!is_l2) {
if (skb_headroom(skb) < hh_len) {
int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,11 +229,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
xdp.rxq = &rxqueue->xdp_rxq;
- retval = bpf_test_run(prog, &xdp, repeat, &duration);
+ ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
+ if (ret)
+ goto out;
if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
xdp.data_end != xdp.data + size)
size = xdp.data_end - xdp.data;
ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
+out:
kfree(data);
return ret;
}
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 360ad66c21e9..a5174e5001d8 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -31,6 +31,8 @@
*/
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct netdev_notifier_pre_changeaddr_info *prechaddr_info;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net_bridge_port *p;
struct net_bridge *br;
@@ -56,6 +58,17 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
br_mtu_auto_adjust(br);
break;
+ case NETDEV_PRE_CHANGEADDR:
+ if (br->dev->addr_assign_type == NET_ADDR_SET)
+ break;
+ prechaddr_info = ptr;
+ err = dev_pre_changeaddr_notify(br->dev,
+ prechaddr_info->dev_addr,
+ extack);
+ if (err)
+ return notifier_from_errno(err);
+ break;
+
case NETDEV_CHANGEADDR:
spin_lock_bh(&br->lock);
br_fdb_changeaddr(p, dev->dev_addr);
@@ -175,6 +188,82 @@ static struct notifier_block br_switchdev_notifier = {
.notifier_call = br_switchdev_event,
};
+/* br_boolopt_toggle - change user-controlled boolean option
+ *
+ * @br: bridge device
+ * @opt: id of the option to change
+ * @on: new option value
+ * @extack: extack for error messages
+ *
+ * Changes the value of the respective boolean option to @on taking care of
+ * any internal option value mapping and configuration.
+ */
+int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on,
+ struct netlink_ext_ack *extack)
+{
+ switch (opt) {
+ case BR_BOOLOPT_NO_LL_LEARN:
+ br_opt_toggle(br, BROPT_NO_LL_LEARN, on);
+ break;
+ default:
+ /* shouldn't be called with unsupported options */
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt)
+{
+ switch (opt) {
+ case BR_BOOLOPT_NO_LL_LEARN:
+ return br_opt_get(br, BROPT_NO_LL_LEARN);
+ default:
+ /* shouldn't be called with unsupported options */
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+int br_boolopt_multi_toggle(struct net_bridge *br,
+ struct br_boolopt_multi *bm,
+ struct netlink_ext_ack *extack)
+{
+ unsigned long bitmap = bm->optmask;
+ int err = 0;
+ int opt_id;
+
+ for_each_set_bit(opt_id, &bitmap, BR_BOOLOPT_MAX) {
+ bool on = !!(bm->optval & BIT(opt_id));
+
+ err = br_boolopt_toggle(br, opt_id, on, extack);
+ if (err) {
+ br_debug(br, "boolopt multi-toggle error: option: %d current: %d new: %d error: %d\n",
+ opt_id, br_boolopt_get(br, opt_id), on, err);
+ break;
+ }
+ }
+
+ return err;
+}
+
+void br_boolopt_multi_get(const struct net_bridge *br,
+ struct br_boolopt_multi *bm)
+{
+ u32 optval = 0;
+ int opt_id;
+
+ for (opt_id = 0; opt_id < BR_BOOLOPT_MAX; opt_id++)
+ optval |= (br_boolopt_get(br, opt_id) << opt_id);
+
+ bm->optval = optval;
+ bm->optmask = GENMASK((BR_BOOLOPT_MAX - 1), 0);
+}
+
+/* private bridge options, controlled by the kernel */
void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on)
{
bool cur = !!br_opt_get(br, opt);
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index c6abf927f0c9..013323b6dbe4 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -131,9 +131,17 @@ static int br_dev_init(struct net_device *dev)
return err;
}
+ err = br_mdb_hash_init(br);
+ if (err) {
+ free_percpu(br->stats);
+ br_fdb_hash_fini(br);
+ return err;
+ }
+
err = br_vlan_init(br);
if (err) {
free_percpu(br->stats);
+ br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
return err;
}
@@ -142,6 +150,7 @@ static int br_dev_init(struct net_device *dev)
if (err) {
free_percpu(br->stats);
br_vlan_flush(br);
+ br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
}
br_set_lockdep_class(dev);
@@ -156,6 +165,7 @@ static void br_dev_uninit(struct net_device *dev)
br_multicast_dev_del(br);
br_multicast_uninit_stats(br);
br_vlan_flush(br);
+ br_mdb_hash_fini(br);
br_fdb_hash_fini(br);
free_percpu(br->stats);
}
@@ -393,6 +403,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fdb_add = br_fdb_add,
.ndo_fdb_del = br_fdb_delete,
.ndo_fdb_dump = br_fdb_dump,
+ .ndo_fdb_get = br_fdb_get,
.ndo_bridge_getlink = br_getlink,
.ndo_bridge_setlink = br_setlink,
.ndo_bridge_dellink = br_dellink,
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index e56ba3912a90..fe3c758791ca 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -773,6 +773,32 @@ skip:
return err;
}
+int br_fdb_get(struct sk_buff *skb,
+ struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct net_bridge_fdb_entry *f;
+ int err = 0;
+
+ rcu_read_lock();
+ f = br_fdb_find_rcu(br, addr, vid);
+ if (!f) {
+ NL_SET_ERR_MSG(extack, "Fdb entry not found");
+ err = -ENOENT;
+ goto errout;
+ }
+
+ err = fdb_fill_info(skb, br, f, portid, seq,
+ RTM_NEWNEIGH, 0);
+errout:
+ rcu_read_unlock();
+ return err;
+}
+
/* Update (create or replace) forwarding database entry */
static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
const u8 *addr, u16 state, u16 flags, u16 vid,
@@ -1164,3 +1190,23 @@ void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
spin_unlock_bh(&br->hash_lock);
}
+
+void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
+{
+ struct net_bridge_fdb_entry *f;
+ struct net_bridge_port *p;
+
+ ASSERT_RTNL();
+
+ p = br_port_get_rtnl(dev);
+ if (!p)
+ return;
+
+ spin_lock_bh(&p->br->hash_lock);
+ hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
+ if (f->dst == p && f->key.vlan_id == vid)
+ f->offloaded = 0;
+ }
+ spin_unlock_bh(&p->br->hash_lock);
+}
+EXPORT_SYMBOL_GPL(br_fdb_clear_offload);
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 9b46d2dc4c22..41f0a696a65f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -650,7 +650,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
if (br_fdb_insert(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
- err = nbp_vlan_init(p);
+ if (br->dev->addr_assign_type != NET_ADDR_SET) {
+ /* Ask for permission to use this MAC address now, even if we
+ * don't end up choosing it below.
+ */
+ err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
+ if (err)
+ goto err7;
+ }
+
+ err = nbp_vlan_init(p, extack);
if (err) {
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
goto err7;
@@ -741,3 +750,15 @@ void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
if (mask & BR_NEIGH_SUPPRESS)
br_recalculate_neigh_suppress_enabled(br);
}
+
+bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
+{
+ struct net_bridge_port *p;
+
+ p = br_port_get_rtnl_rcu(dev);
+ if (!p)
+ return false;
+
+ return p->flags & flag;
+}
+EXPORT_SYMBOL_GPL(br_port_flag_is_set);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 3ddca11f44c2..5ea7e56119c1 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -188,7 +188,9 @@ static void __br_handle_local_finish(struct sk_buff *skb)
u16 vid = 0;
/* check if vlan is allowed, to avoid spoofing */
- if (p->flags & BR_LEARNING && br_should_learn(p, skb, &vid))
+ if ((p->flags & BR_LEARNING) &&
+ !br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
+ br_should_learn(p, skb, &vid))
br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
}
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index a7ea2d431714..f69c8d91dc81 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -78,82 +78,72 @@ static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
+ int idx = 0, s_idx = cb->args[1], err = 0;
struct net_bridge *br = netdev_priv(dev);
- struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_mdb_entry *mp;
struct nlattr *nest, *nest2;
- int i, err = 0;
- int idx = 0, s_idx = cb->args[1];
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
return 0;
- mdb = rcu_dereference(br->mdb);
- if (!mdb)
- return 0;
-
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
- for (i = 0; i < mdb->max; i++) {
- struct net_bridge_mdb_entry *mp;
+ hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port *port;
- hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
- if (idx < s_idx)
- goto skip;
+ if (idx < s_idx)
+ goto skip;
- nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
- if (nest2 == NULL) {
- err = -EMSGSIZE;
- goto out;
- }
+ nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
+ if (!nest2) {
+ err = -EMSGSIZE;
+ break;
+ }
- for (pp = &mp->ports;
- (p = rcu_dereference(*pp)) != NULL;
- pp = &p->next) {
- struct nlattr *nest_ent;
- struct br_mdb_entry e;
-
- port = p->port;
- if (!port)
- continue;
-
- memset(&e, 0, sizeof(e));
- e.ifindex = port->dev->ifindex;
- e.vid = p->addr.vid;
- __mdb_entry_fill_flags(&e, p->flags);
- if (p->addr.proto == htons(ETH_P_IP))
- e.addr.u.ip4 = p->addr.u.ip4;
+ for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
+ pp = &p->next) {
+ struct nlattr *nest_ent;
+ struct br_mdb_entry e;
+
+ port = p->port;
+ if (!port)
+ continue;
+
+ memset(&e, 0, sizeof(e));
+ e.ifindex = port->dev->ifindex;
+ e.vid = p->addr.vid;
+ __mdb_entry_fill_flags(&e, p->flags);
+ if (p->addr.proto == htons(ETH_P_IP))
+ e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- if (p->addr.proto == htons(ETH_P_IPV6))
- e.addr.u.ip6 = p->addr.u.ip6;
+ if (p->addr.proto == htons(ETH_P_IPV6))
+ e.addr.u.ip6 = p->addr.u.ip6;
#endif
- e.addr.proto = p->addr.proto;
- nest_ent = nla_nest_start(skb,
- MDBA_MDB_ENTRY_INFO);
- if (!nest_ent) {
- nla_nest_cancel(skb, nest2);
- err = -EMSGSIZE;
- goto out;
- }
- if (nla_put_nohdr(skb, sizeof(e), &e) ||
- nla_put_u32(skb,
- MDBA_MDB_EATTR_TIMER,
- br_timer_value(&p->timer))) {
- nla_nest_cancel(skb, nest_ent);
- nla_nest_cancel(skb, nest2);
- err = -EMSGSIZE;
- goto out;
- }
- nla_nest_end(skb, nest_ent);
+ e.addr.proto = p->addr.proto;
+ nest_ent = nla_nest_start(skb, MDBA_MDB_ENTRY_INFO);
+ if (!nest_ent) {
+ nla_nest_cancel(skb, nest2);
+ err = -EMSGSIZE;
+ goto out;
}
- nla_nest_end(skb, nest2);
- skip:
- idx++;
+ if (nla_put_nohdr(skb, sizeof(e), &e) ||
+ nla_put_u32(skb,
+ MDBA_MDB_EATTR_TIMER,
+ br_timer_value(&p->timer))) {
+ nla_nest_cancel(skb, nest_ent);
+ nla_nest_cancel(skb, nest2);
+ err = -EMSGSIZE;
+ goto out;
+ }
+ nla_nest_end(skb, nest_ent);
}
+ nla_nest_end(skb, nest2);
+skip:
+ idx++;
}
out:
@@ -203,8 +193,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
- /* In theory this could be wrapped to 0... */
- cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
+ cb->seq = net->dev_base_seq;
for_each_netdev_rcu(net, dev) {
if (dev->priv_flags & IFF_EBRIDGE) {
@@ -297,7 +286,6 @@ static void br_mdb_complete(struct net_device *dev, int err, void *priv)
struct br_mdb_complete_info *data = priv;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port *port = data->port;
struct net_bridge *br = port->br;
@@ -306,8 +294,7 @@ static void br_mdb_complete(struct net_device *dev, int err, void *priv)
goto err;
spin_lock_bh(&br->multicast_lock);
- mdb = mlock_dereference(br->mdb, br);
- mp = br_mdb_ip_get(mdb, &data->ip);
+ mp = br_mdb_ip_get(br, &data->ip);
if (!mp)
goto out;
for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
@@ -344,7 +331,7 @@ static void br_mdb_switchdev_host_port(struct net_device *dev,
mdb.obj.orig_dev = dev;
switch (type) {
case RTM_NEWMDB:
- switchdev_port_obj_add(lower_dev, &mdb.obj);
+ switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
break;
case RTM_DELMDB:
switchdev_port_obj_del(lower_dev, &mdb.obj);
@@ -394,7 +381,7 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
__mdb_entry_to_br_ip(entry, &complete_info->ip);
mdb.obj.complete_priv = complete_info;
mdb.obj.complete = br_mdb_complete;
- if (switchdev_port_obj_add(port_dev, &mdb.obj))
+ if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL))
kfree(complete_info);
}
} else if (p && port_dev && type == RTM_DELMDB) {
@@ -588,14 +575,12 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
- struct net_bridge_mdb_htable *mdb;
unsigned long now = jiffies;
int err;
- mdb = mlock_dereference(br->mdb, br);
- mp = br_mdb_ip_get(mdb, group);
+ mp = br_mdb_ip_get(br, group);
if (!mp) {
- mp = br_multicast_new_group(br, port, group);
+ mp = br_multicast_new_group(br, group);
err = PTR_ERR_OR_ZERO(mp);
if (err)
return err;
@@ -696,7 +681,6 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
{
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
@@ -709,9 +693,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
__mdb_entry_to_br_ip(entry, &ip);
spin_lock_bh(&br->multicast_lock);
- mdb = mlock_dereference(br->mdb, br);
-
- mp = br_mdb_ip_get(mdb, &ip);
+ mp = br_mdb_ip_get(br, &ip);
if (!mp)
goto unlock;
@@ -728,7 +710,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
- call_rcu_bh(&p->rcu, br_multicast_free_pg);
+ kfree_rcu(p, rcu);
err = 0;
if (!mp->ports && !mp->host_joined &&
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6bac0d6b7b94..3aeff0895669 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,6 +37,14 @@
#include "br_private.h"
+static const struct rhashtable_params br_mdb_rht_params = {
+ .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
+ .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
+ .key_len = sizeof(struct br_ip),
+ .automatic_shrinking = true,
+ .locks_mul = 1,
+};
+
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
static void br_multicast_add_router(struct net_bridge *br,
@@ -54,7 +62,6 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
const struct in6_addr *group,
__u16 vid, const unsigned char *src);
#endif
-unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
{
@@ -73,89 +80,58 @@ static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
return 0;
}
-static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip,
- __u16 vid)
-{
- return jhash_2words((__force u32)ip, vid, mdb->secret) & (mdb->max - 1);
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
- const struct in6_addr *ip,
- __u16 vid)
+static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
+ struct br_ip *dst)
{
- return jhash_2words(ipv6_addr_hash(ip), vid,
- mdb->secret) & (mdb->max - 1);
+ return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
}
-#endif
-static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
- struct br_ip *ip)
-{
- switch (ip->proto) {
- case htons(ETH_P_IP):
- return __br_ip4_hash(mdb, ip->u.ip4, ip->vid);
-#if IS_ENABLED(CONFIG_IPV6)
- case htons(ETH_P_IPV6):
- return __br_ip6_hash(mdb, &ip->u.ip6, ip->vid);
-#endif
- }
- return 0;
-}
-
-static struct net_bridge_mdb_entry *__br_mdb_ip_get(
- struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
+struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
+ struct br_ip *dst)
{
- struct net_bridge_mdb_entry *mp;
-
- hlist_for_each_entry_rcu(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
- if (br_ip_equal(&mp->addr, dst))
- return mp;
- }
+ struct net_bridge_mdb_entry *ent;
- return NULL;
-}
+ lockdep_assert_held_once(&br->multicast_lock);
-struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge_mdb_htable *mdb,
- struct br_ip *dst)
-{
- if (!mdb)
- return NULL;
+ rcu_read_lock();
+ ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
+ rcu_read_unlock();
- return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
+ return ent;
}
-static struct net_bridge_mdb_entry *br_mdb_ip4_get(
- struct net_bridge_mdb_htable *mdb, __be32 dst, __u16 vid)
+static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
+ __be32 dst, __u16 vid)
{
struct br_ip br_dst;
+ memset(&br_dst, 0, sizeof(br_dst));
br_dst.u.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
br_dst.vid = vid;
- return br_mdb_ip_get(mdb, &br_dst);
+ return br_mdb_ip_get(br, &br_dst);
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct net_bridge_mdb_entry *br_mdb_ip6_get(
- struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst,
- __u16 vid)
+static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
+ const struct in6_addr *dst,
+ __u16 vid)
{
struct br_ip br_dst;
+ memset(&br_dst, 0, sizeof(br_dst));
br_dst.u.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
br_dst.vid = vid;
- return br_mdb_ip_get(mdb, &br_dst);
+ return br_mdb_ip_get(br, &br_dst);
}
#endif
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
struct sk_buff *skb, u16 vid)
{
- struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
struct br_ip ip;
if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
@@ -164,6 +140,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
if (BR_INPUT_SKB_CB(skb)->igmp)
return NULL;
+ memset(&ip, 0, sizeof(ip));
ip.proto = skb->protocol;
ip.vid = vid;
@@ -180,70 +157,13 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
return NULL;
}
- return br_mdb_ip_get(mdb, &ip);
-}
-
-static void br_mdb_free(struct rcu_head *head)
-{
- struct net_bridge_mdb_htable *mdb =
- container_of(head, struct net_bridge_mdb_htable, rcu);
- struct net_bridge_mdb_htable *old = mdb->old;
-
- mdb->old = NULL;
- kfree(old->mhash);
- kfree(old);
-}
-
-static int br_mdb_copy(struct net_bridge_mdb_htable *new,
- struct net_bridge_mdb_htable *old,
- int elasticity)
-{
- struct net_bridge_mdb_entry *mp;
- int maxlen;
- int len;
- int i;
-
- for (i = 0; i < old->max; i++)
- hlist_for_each_entry(mp, &old->mhash[i], hlist[old->ver])
- hlist_add_head(&mp->hlist[new->ver],
- &new->mhash[br_ip_hash(new, &mp->addr)]);
-
- if (!elasticity)
- return 0;
-
- maxlen = 0;
- for (i = 0; i < new->max; i++) {
- len = 0;
- hlist_for_each_entry(mp, &new->mhash[i], hlist[new->ver])
- len++;
- if (len > maxlen)
- maxlen = len;
- }
-
- return maxlen > elasticity ? -EINVAL : 0;
-}
-
-void br_multicast_free_pg(struct rcu_head *head)
-{
- struct net_bridge_port_group *p =
- container_of(head, struct net_bridge_port_group, rcu);
-
- kfree(p);
-}
-
-static void br_multicast_free_group(struct rcu_head *head)
-{
- struct net_bridge_mdb_entry *mp =
- container_of(head, struct net_bridge_mdb_entry, rcu);
-
- kfree(mp);
+ return br_mdb_ip_get_rcu(br, &ip);
}
static void br_multicast_group_expired(struct timer_list *t)
{
struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
struct net_bridge *br = mp->br;
- struct net_bridge_mdb_htable *mdb;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || timer_pending(&mp->timer))
@@ -255,12 +175,11 @@ static void br_multicast_group_expired(struct timer_list *t)
if (mp->ports)
goto out;
- mdb = mlock_dereference(br->mdb, br);
-
- hlist_del_rcu(&mp->hlist[mdb->ver]);
- mdb->size--;
+ rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
+ br_mdb_rht_params);
+ hlist_del_rcu(&mp->mdb_node);
- call_rcu_bh(&mp->rcu, br_multicast_free_group);
+ kfree_rcu(mp, rcu);
out:
spin_unlock(&br->multicast_lock);
@@ -269,14 +188,11 @@ out:
static void br_multicast_del_pg(struct net_bridge *br,
struct net_bridge_port_group *pg)
{
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
- mdb = mlock_dereference(br->mdb, br);
-
- mp = br_mdb_ip_get(mdb, &pg->addr);
+ mp = br_mdb_ip_get(br, &pg->addr);
if (WARN_ON(!mp))
return;
@@ -291,7 +207,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
del_timer(&p->timer);
br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
p->flags);
- call_rcu_bh(&p->rcu, br_multicast_free_pg);
+ kfree_rcu(p, rcu);
if (!mp->ports && !mp->host_joined &&
netif_running(br->dev))
@@ -319,53 +235,6 @@ out:
spin_unlock(&br->multicast_lock);
}
-static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
- int elasticity)
-{
- struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
- struct net_bridge_mdb_htable *mdb;
- int err;
-
- mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
- if (!mdb)
- return -ENOMEM;
-
- mdb->max = max;
- mdb->old = old;
-
- mdb->mhash = kcalloc(max, sizeof(*mdb->mhash), GFP_ATOMIC);
- if (!mdb->mhash) {
- kfree(mdb);
- return -ENOMEM;
- }
-
- mdb->size = old ? old->size : 0;
- mdb->ver = old ? old->ver ^ 1 : 0;
-
- if (!old || elasticity)
- get_random_bytes(&mdb->secret, sizeof(mdb->secret));
- else
- mdb->secret = old->secret;
-
- if (!old)
- goto out;
-
- err = br_mdb_copy(mdb, old, elasticity);
- if (err) {
- kfree(mdb->mhash);
- kfree(mdb);
- return err;
- }
-
- br_mdb_rehash_seq++;
- call_rcu_bh(&mdb->rcu, br_mdb_free);
-
-out:
- rcu_assign_pointer(*mdbp, mdb);
-
- return 0;
-}
-
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
__be32 group,
u8 *igmp_type)
@@ -589,111 +458,19 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
return NULL;
}
-static struct net_bridge_mdb_entry *br_multicast_get_group(
- struct net_bridge *br, struct net_bridge_port *port,
- struct br_ip *group, int hash)
-{
- struct net_bridge_mdb_htable *mdb;
- struct net_bridge_mdb_entry *mp;
- unsigned int count = 0;
- unsigned int max;
- int elasticity;
- int err;
-
- mdb = rcu_dereference_protected(br->mdb, 1);
- hlist_for_each_entry(mp, &mdb->mhash[hash], hlist[mdb->ver]) {
- count++;
- if (unlikely(br_ip_equal(group, &mp->addr)))
- return mp;
- }
-
- elasticity = 0;
- max = mdb->max;
-
- if (unlikely(count > br->hash_elasticity && count)) {
- if (net_ratelimit())
- br_info(br, "Multicast hash table "
- "chain limit reached: %s\n",
- port ? port->dev->name : br->dev->name);
-
- elasticity = br->hash_elasticity;
- }
-
- if (mdb->size >= max) {
- max *= 2;
- if (unlikely(max > br->hash_max)) {
- br_warn(br, "Multicast hash table maximum of %d "
- "reached, disabling snooping: %s\n",
- br->hash_max,
- port ? port->dev->name : br->dev->name);
- err = -E2BIG;
-disable:
- br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
- goto err;
- }
- }
-
- if (max > mdb->max || elasticity) {
- if (mdb->old) {
- if (net_ratelimit())
- br_info(br, "Multicast hash table "
- "on fire: %s\n",
- port ? port->dev->name : br->dev->name);
- err = -EEXIST;
- goto err;
- }
-
- err = br_mdb_rehash(&br->mdb, max, elasticity);
- if (err) {
- br_warn(br, "Cannot rehash multicast "
- "hash table, disabling snooping: %s, %d, %d\n",
- port ? port->dev->name : br->dev->name,
- mdb->size, err);
- goto disable;
- }
-
- err = -EAGAIN;
- goto err;
- }
-
- return NULL;
-
-err:
- mp = ERR_PTR(err);
- return mp;
-}
-
struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
- struct net_bridge_port *p,
struct br_ip *group)
{
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
- int hash;
int err;
- mdb = rcu_dereference_protected(br->mdb, 1);
- if (!mdb) {
- err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
- if (err)
- return ERR_PTR(err);
- goto rehash;
- }
-
- hash = br_ip_hash(mdb, group);
- mp = br_multicast_get_group(br, p, group, hash);
- switch (PTR_ERR(mp)) {
- case 0:
- break;
-
- case -EAGAIN:
-rehash:
- mdb = rcu_dereference_protected(br->mdb, 1);
- hash = br_ip_hash(mdb, group);
- break;
+ mp = br_mdb_ip_get(br, group);
+ if (mp)
+ return mp;
- default:
- goto out;
+ if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+ br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
+ return ERR_PTR(-E2BIG);
}
mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
@@ -703,11 +480,15 @@ rehash:
mp->br = br;
mp->addr = *group;
timer_setup(&mp->timer, br_multicast_group_expired, 0);
+ err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
+ br_mdb_rht_params);
+ if (err) {
+ kfree(mp);
+ mp = ERR_PTR(err);
+ } else {
+ hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
+ }
- hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
- mdb->size++;
-
-out:
return mp;
}
@@ -768,7 +549,7 @@ static int br_multicast_add_group(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- mp = br_multicast_new_group(br, port, group);
+ mp = br_multicast_new_group(br, group);
err = PTR_ERR(mp);
if (IS_ERR(mp))
goto err;
@@ -837,6 +618,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
if (ipv6_addr_is_ll_all_nodes(group))
return 0;
+ memset(&br_group, 0, sizeof(br_group));
br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
@@ -1483,7 +1265,7 @@ static void br_ip4_multicast_query(struct net_bridge *br,
goto out;
}
- mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
+ mp = br_mdb_ip4_get(br, group, vid);
if (!mp)
goto out;
@@ -1567,7 +1349,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
goto out;
}
- mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
+ mp = br_mdb_ip6_get(br, group, vid);
if (!mp)
goto out;
@@ -1601,7 +1383,6 @@ br_multicast_leave_group(struct net_bridge *br,
struct bridge_mcast_own_query *own_query,
const unsigned char *src)
{
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
unsigned long now;
@@ -1612,8 +1393,7 @@ br_multicast_leave_group(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- mdb = mlock_dereference(br->mdb, br);
- mp = br_mdb_ip_get(mdb, group);
+ mp = br_mdb_ip_get(br, group);
if (!mp)
goto out;
@@ -1629,7 +1409,7 @@ br_multicast_leave_group(struct net_bridge *br,
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
- call_rcu_bh(&p->rcu, br_multicast_free_pg);
+ kfree_rcu(p, rcu);
br_mdb_notify(br->dev, port, group, RTM_DELMDB,
p->flags);
@@ -1961,8 +1741,7 @@ static void br_ip6_multicast_query_expired(struct timer_list *t)
void br_multicast_init(struct net_bridge *br)
{
- br->hash_elasticity = 4;
- br->hash_max = 512;
+ br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
br->multicast_last_member_count = 2;
@@ -1999,6 +1778,7 @@ void br_multicast_init(struct net_bridge *br)
timer_setup(&br->ip6_own_query.timer,
br_ip6_multicast_query_expired, 0);
#endif
+ INIT_HLIST_HEAD(&br->mdb_list);
}
static void __br_multicast_open(struct net_bridge *br,
@@ -2033,40 +1813,20 @@ void br_multicast_stop(struct net_bridge *br)
void br_multicast_dev_del(struct net_bridge *br)
{
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
- struct hlist_node *n;
- u32 ver;
- int i;
+ struct hlist_node *tmp;
spin_lock_bh(&br->multicast_lock);
- mdb = mlock_dereference(br->mdb, br);
- if (!mdb)
- goto out;
-
- br->mdb = NULL;
-
- ver = mdb->ver;
- for (i = 0; i < mdb->max; i++) {
- hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
- hlist[ver]) {
- del_timer(&mp->timer);
- call_rcu_bh(&mp->rcu, br_multicast_free_group);
- }
- }
-
- if (mdb->old) {
- spin_unlock_bh(&br->multicast_lock);
- rcu_barrier_bh();
- spin_lock_bh(&br->multicast_lock);
- WARN_ON(mdb->old);
+ hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
+ del_timer(&mp->timer);
+ rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
+ br_mdb_rht_params);
+ hlist_del_rcu(&mp->mdb_node);
+ kfree_rcu(mp, rcu);
}
-
- mdb->old = mdb;
- call_rcu_bh(&mdb->rcu, br_mdb_free);
-
-out:
spin_unlock_bh(&br->multicast_lock);
+
+ rcu_barrier();
}
int br_multicast_set_router(struct net_bridge *br, unsigned long val)
@@ -2176,9 +1936,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
int br_multicast_toggle(struct net_bridge *br, unsigned long val)
{
- struct net_bridge_mdb_htable *mdb;
struct net_bridge_port *port;
- int err = 0;
spin_lock_bh(&br->multicast_lock);
if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
@@ -2192,21 +1950,6 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
if (!netif_running(br->dev))
goto unlock;
- mdb = mlock_dereference(br->mdb, br);
- if (mdb) {
- if (mdb->old) {
- err = -EEXIST;
-rollback:
- br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
- goto unlock;
- }
-
- err = br_mdb_rehash(&br->mdb, mdb->max,
- br->hash_elasticity);
- if (err)
- goto rollback;
- }
-
br_multicast_open(br);
list_for_each_entry(port, &br->port_list, list)
__br_multicast_enable_port(port);
@@ -2214,7 +1957,7 @@ rollback:
unlock:
spin_unlock_bh(&br->multicast_lock);
- return err;
+ return 0;
}
bool br_multicast_enabled(const struct net_device *dev)
@@ -2271,45 +2014,6 @@ unlock:
return 0;
}
-int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
-{
- int err = -EINVAL;
- u32 old;
- struct net_bridge_mdb_htable *mdb;
-
- spin_lock_bh(&br->multicast_lock);
- if (!is_power_of_2(val))
- goto unlock;
-
- mdb = mlock_dereference(br->mdb, br);
- if (mdb && val < mdb->size)
- goto unlock;
-
- err = 0;
-
- old = br->hash_max;
- br->hash_max = val;
-
- if (mdb) {
- if (mdb->old) {
- err = -EEXIST;
-rollback:
- br->hash_max = old;
- goto unlock;
- }
-
- err = br_mdb_rehash(&br->mdb, br->hash_max,
- br->hash_elasticity);
- if (err)
- goto rollback;
- }
-
-unlock:
- spin_unlock_bh(&br->multicast_lock);
-
- return err;
-}
-
int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
{
/* Currently we support only version 2 and 3 */
@@ -2646,3 +2350,13 @@ void br_multicast_get_stats(const struct net_bridge *br,
}
memcpy(dest, &tdst, sizeof(*dest));
}
+
+int br_mdb_hash_init(struct net_bridge *br)
+{
+ return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
+}
+
+void br_mdb_hash_fini(struct net_bridge *br)
+{
+ rhashtable_destroy(&br->mdb_hash_tbl);
+}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index b1b5e8516724..d21a23698410 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -132,10 +132,7 @@ static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage);
static void nf_bridge_info_free(struct sk_buff *skb)
{
- if (skb->nf_bridge) {
- nf_bridge_put(skb->nf_bridge);
- skb->nf_bridge = NULL;
- }
+ skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
}
static inline struct net_device *bridge_parent(const struct net_device *dev)
@@ -148,19 +145,7 @@ static inline struct net_device *bridge_parent(const struct net_device *dev)
static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
{
- struct nf_bridge_info *nf_bridge = skb->nf_bridge;
-
- if (refcount_read(&nf_bridge->use) > 1) {
- struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
-
- if (tmp) {
- memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
- refcount_set(&tmp->use, 1);
- }
- nf_bridge_put(nf_bridge);
- nf_bridge = tmp;
- }
- return nf_bridge;
+ return skb_ext_add(skb, SKB_EXT_BRIDGE_NF);
}
unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
@@ -247,7 +232,9 @@ drop:
void nf_bridge_update_protocol(struct sk_buff *skb)
{
- switch (skb->nf_bridge->orig_proto) {
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ switch (nf_bridge->orig_proto) {
case BRNF_PROTO_8021Q:
skb->protocol = htons(ETH_P_8021Q);
break;
@@ -506,7 +493,6 @@ static unsigned int br_nf_pre_routing(void *priv,
if (br_validate_ipv4(state->net, skb))
return NF_DROP;
- nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
return NF_DROP;
if (!setup_pre_routing(skb))
@@ -569,7 +555,8 @@ static unsigned int br_nf_forward_ip(void *priv,
struct net_device *parent;
u_int8_t pf;
- if (!skb->nf_bridge)
+ nf_bridge = nf_bridge_info_get(skb);
+ if (!nf_bridge)
return NF_ACCEPT;
/* Need exclusive nf_bridge_info since we might have multiple
@@ -671,10 +658,8 @@ static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff
return 0;
}
- if (data->vlan_tci) {
- skb->vlan_tci = data->vlan_tci;
- skb->vlan_proto = data->vlan_proto;
- }
+ if (data->vlan_proto)
+ __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci);
skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
__skb_push(skb, data->encap_size);
@@ -703,7 +688,9 @@ br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
{
- if (skb->nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ if (nf_bridge->orig_proto == BRNF_PROTO_PPPOE)
return PPPOE_SES_HLEN;
return 0;
}
@@ -740,8 +727,13 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
data = this_cpu_ptr(&brnf_frag_data_storage);
- data->vlan_tci = skb->vlan_tci;
- data->vlan_proto = skb->vlan_proto;
+ if (skb_vlan_tag_present(skb)) {
+ data->vlan_tci = skb->vlan_tci;
+ data->vlan_proto = skb->vlan_proto;
+ } else {
+ data->vlan_proto = 0;
+ }
+
data->encap_size = nf_bridge_encap_header_len(skb);
data->size = ETH_HLEN + data->encap_size;
@@ -836,7 +828,9 @@ static unsigned int ip_sabotage_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ if (nf_bridge && !nf_bridge->in_prerouting &&
!netif_is_l3_master(skb->dev)) {
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
@@ -874,7 +868,9 @@ static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
static int br_nf_dev_xmit(struct sk_buff *skb)
{
- if (skb->nf_bridge && skb->nf_bridge->bridged_dnat) {
+ const struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ if (nf_bridge && nf_bridge->bridged_dnat) {
br_nf_pre_routing_finish_bridge_slow(skb);
return 1;
}
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index 96c072e71ea2..94039f588f1d 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -224,8 +224,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
if (br_validate_ipv6(state->net, skb))
return NF_DROP;
- nf_bridge_put(skb->nf_bridge);
- if (!nf_bridge_alloc(skb))
+ nf_bridge = nf_bridge_alloc(skb);
+ if (!nf_bridge)
return NF_DROP;
if (!setup_pre_routing(skb))
return NF_DROP;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3345f1984542..9c07591b0232 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -525,7 +525,8 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
}
static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
- int cmd, struct bridge_vlan_info *vinfo, bool *changed)
+ int cmd, struct bridge_vlan_info *vinfo, bool *changed,
+ struct netlink_ext_ack *extack)
{
bool curr_change;
int err = 0;
@@ -537,11 +538,11 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
* per-VLAN entry as well
*/
err = nbp_vlan_add(p, vinfo->vid, vinfo->flags,
- &curr_change);
+ &curr_change, extack);
} else {
vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
err = br_vlan_add(br, vinfo->vid, vinfo->flags,
- &curr_change);
+ &curr_change, extack);
}
if (curr_change)
*changed = true;
@@ -568,7 +569,8 @@ static int br_process_vlan_info(struct net_bridge *br,
struct net_bridge_port *p, int cmd,
struct bridge_vlan_info *vinfo_curr,
struct bridge_vlan_info **vinfo_last,
- bool *changed)
+ bool *changed,
+ struct netlink_ext_ack *extack)
{
if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
return -EINVAL;
@@ -598,7 +600,8 @@ static int br_process_vlan_info(struct net_bridge *br,
sizeof(struct bridge_vlan_info));
for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
tmp_vinfo.vid = v;
- err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed);
+ err = br_vlan_info(br, p, cmd, &tmp_vinfo, changed,
+ extack);
if (err)
break;
}
@@ -607,13 +610,14 @@ static int br_process_vlan_info(struct net_bridge *br,
return err;
}
- return br_vlan_info(br, p, cmd, vinfo_curr, changed);
+ return br_vlan_info(br, p, cmd, vinfo_curr, changed, extack);
}
static int br_afspec(struct net_bridge *br,
struct net_bridge_port *p,
struct nlattr *af_spec,
- int cmd, bool *changed)
+ int cmd, bool *changed,
+ struct netlink_ext_ack *extack)
{
struct bridge_vlan_info *vinfo_curr = NULL;
struct bridge_vlan_info *vinfo_last = NULL;
@@ -643,7 +647,8 @@ static int br_afspec(struct net_bridge *br,
return -EINVAL;
vinfo_curr = nla_data(attr);
err = br_process_vlan_info(br, p, cmd, vinfo_curr,
- &vinfo_last, changed);
+ &vinfo_last, changed,
+ extack);
if (err)
return err;
break;
@@ -850,7 +855,8 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
}
/* Change state and parameters on port. */
-int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct net_bridge *br = (struct net_bridge *)netdev_priv(dev);
struct nlattr *tb[IFLA_BRPORT_MAX + 1];
@@ -897,7 +903,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
}
if (afspec)
- err = br_afspec(br, p, afspec, RTM_SETLINK, &changed);
+ err = br_afspec(br, p, afspec, RTM_SETLINK, &changed, extack);
if (changed)
br_ifinfo_notify(RTM_NEWLINK, br, p);
@@ -923,7 +929,7 @@ int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
if (!p && !(dev->priv_flags & IFF_EBRIDGE))
return -EINVAL;
- err = br_afspec(br, p, afspec, RTM_DELLINK, &changed);
+ err = br_afspec(br, p, afspec, RTM_DELLINK, &changed, NULL);
if (changed)
/* Send RTM_NEWLINK because userspace
* expects RTM_NEWLINK for vlan dels
@@ -1035,6 +1041,8 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
[IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
+ [IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN,
+ .len = sizeof(struct br_boolopt_multi) },
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
@@ -1103,7 +1111,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
__u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
- err = __br_vlan_set_default_pvid(br, defpvid);
+ err = __br_vlan_set_default_pvid(br, defpvid, extack);
if (err)
return err;
}
@@ -1167,9 +1175,7 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
if (data[IFLA_BR_MCAST_SNOOPING]) {
u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
- err = br_multicast_toggle(br, mcast_snooping);
- if (err)
- return err;
+ br_multicast_toggle(br, mcast_snooping);
}
if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
@@ -1187,19 +1193,12 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
return err;
}
- if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
- u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
-
- br->hash_elasticity = val;
- }
+ if (data[IFLA_BR_MCAST_HASH_ELASTICITY])
+ br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
+ RHT_ELASTICITY);
- if (data[IFLA_BR_MCAST_HASH_MAX]) {
- u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
-
- err = br_multicast_set_hash_max(br, hash_max);
- if (err)
- return err;
- }
+ if (data[IFLA_BR_MCAST_HASH_MAX])
+ br->hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
@@ -1296,6 +1295,15 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
}
#endif
+ if (data[IFLA_BR_MULTI_BOOLOPT]) {
+ struct br_boolopt_multi *bm;
+
+ bm = nla_data(data[IFLA_BR_MULTI_BOOLOPT]);
+ err = br_boolopt_multi_toggle(br, bm, extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -1374,6 +1382,7 @@ static size_t br_get_size(const struct net_device *brdev)
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
#endif
+ nla_total_size(sizeof(struct br_boolopt_multi)) + /* IFLA_BR_MULTI_BOOLOPT */
0;
}
@@ -1387,6 +1396,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
u32 stp_enabled = br->stp_enabled;
u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
u8 vlan_enabled = br_vlan_enabled(br->dev);
+ struct br_boolopt_multi bm;
u64 clockval;
clockval = br_timer_value(&br->hello_timer);
@@ -1403,6 +1413,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
return -EMSGSIZE;
+ br_boolopt_multi_get(br, &bm);
if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
@@ -1420,7 +1431,8 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
br->topology_change_detected) ||
- nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
+ nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr) ||
+ nla_put(skb, IFLA_BR_MULTI_BOOLOPT, sizeof(bm), &bm))
return -EMSGSIZE;
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
@@ -1442,8 +1454,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
br_opt_get(br, BROPT_MULTICAST_QUERIER)) ||
nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED)) ||
- nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
- br->hash_elasticity) ||
+ nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY, RHT_ELASTICITY) ||
nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
br->multicast_last_member_count) ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 04c19a37e500..d240b3e7919f 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -31,6 +31,8 @@
#define BR_PORT_BITS 10
#define BR_MAX_PORTS (1<<BR_PORT_BITS)
+#define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+
#define BR_VERSION "2.3"
/* Control of forwarding link local multicast */
@@ -213,23 +215,14 @@ struct net_bridge_port_group {
};
struct net_bridge_mdb_entry {
- struct hlist_node hlist[2];
+ struct rhash_head rhnode;
struct net_bridge *br;
struct net_bridge_port_group __rcu *ports;
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
bool host_joined;
-};
-
-struct net_bridge_mdb_htable {
- struct hlist_head *mhash;
- struct rcu_head rcu;
- struct net_bridge_mdb_htable *old;
- u32 size;
- u32 max;
- u32 secret;
- u32 ver;
+ struct hlist_node mdb_node;
};
struct net_bridge_port {
@@ -328,6 +321,7 @@ enum net_bridge_opts {
BROPT_NEIGH_SUPPRESS_ENABLED,
BROPT_MTU_SET_BY_USER,
BROPT_VLAN_STATS_PER_PORT,
+ BROPT_NO_LL_LEARN,
};
struct net_bridge {
@@ -380,7 +374,6 @@ struct net_bridge {
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
- u32 hash_elasticity;
u32 hash_max;
u32 multicast_last_member_count;
@@ -399,7 +392,9 @@ struct net_bridge {
unsigned long multicast_query_response_interval;
unsigned long multicast_startup_query_interval;
- struct net_bridge_mdb_htable __rcu *mdb;
+ struct rhashtable mdb_hash_tbl;
+
+ struct hlist_head mdb_list;
struct hlist_head router_list;
struct timer_list multicast_router_timer;
@@ -507,6 +502,14 @@ static inline int br_opt_get(const struct net_bridge *br,
return test_bit(opt, &br->options);
}
+int br_boolopt_toggle(struct net_bridge *br, enum br_boolopt_id opt, bool on,
+ struct netlink_ext_ack *extack);
+int br_boolopt_get(const struct net_bridge *br, enum br_boolopt_id opt);
+int br_boolopt_multi_toggle(struct net_bridge *br,
+ struct br_boolopt_multi *bm,
+ struct netlink_ext_ack *extack);
+void br_boolopt_multi_get(const struct net_bridge *br,
+ struct br_boolopt_multi *bm);
void br_opt_toggle(struct net_bridge *br, enum net_bridge_opts opt, bool on);
/* br_device.c */
@@ -572,6 +575,9 @@ int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
const unsigned char *addr, u16 vid, u16 nlh_flags);
int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev, struct net_device *fdev, int *idx);
+int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev,
+ const unsigned char *addr, u16 vid, u32 portid, u32 seq,
+ struct netlink_ext_ack *extack);
int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
@@ -650,7 +656,6 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
/* br_multicast.c */
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
-extern unsigned int br_mdb_rehash_seq;
int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
struct sk_buff *skb, u16 vid);
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
@@ -675,17 +680,15 @@ int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val);
int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val);
#endif
struct net_bridge_mdb_entry *
-br_mdb_ip_get(struct net_bridge_mdb_htable *mdb, struct br_ip *dst);
+br_mdb_ip_get(struct net_bridge *br, struct br_ip *dst);
struct net_bridge_mdb_entry *
-br_multicast_new_group(struct net_bridge *br, struct net_bridge_port *port,
- struct br_ip *group);
-void br_multicast_free_pg(struct rcu_head *head);
+br_multicast_new_group(struct net_bridge *br, struct br_ip *group);
struct net_bridge_port_group *
br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char flags, const unsigned char *src);
-void br_mdb_init(void);
-void br_mdb_uninit(void);
+int br_mdb_hash_init(struct net_bridge *br);
+void br_mdb_hash_fini(struct net_bridge *br);
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
struct br_ip *group, int type, u8 flags);
void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
@@ -697,6 +700,8 @@ void br_multicast_uninit_stats(struct net_bridge *br);
void br_multicast_get_stats(const struct net_bridge *br,
const struct net_bridge_port *p,
struct br_mcast_stats *dest);
+void br_mdb_init(void);
+void br_mdb_uninit(void);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -822,6 +827,15 @@ static inline void br_mdb_uninit(void)
{
}
+static inline int br_mdb_hash_init(struct net_bridge *br)
+{
+ return 0;
+}
+
+static inline void br_mdb_hash_fini(struct net_bridge *br)
+{
+}
+
static inline void br_multicast_count(struct net_bridge *br,
const struct net_bridge_port *p,
const struct sk_buff *skb,
@@ -857,7 +871,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
struct net_bridge_vlan_group *vg,
struct sk_buff *skb);
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags,
- bool *changed);
+ bool *changed, struct netlink_ext_ack *extack);
int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid);
@@ -870,12 +884,13 @@ int br_vlan_set_stats(struct net_bridge *br, unsigned long val);
int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val);
int br_vlan_init(struct net_bridge *br);
int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
-int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid);
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
+ struct netlink_ext_ack *extack);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
- bool *changed);
+ bool *changed, struct netlink_ext_ack *extack);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
void nbp_vlan_flush(struct net_bridge_port *port);
-int nbp_vlan_init(struct net_bridge_port *port);
+int nbp_vlan_init(struct net_bridge_port *port, struct netlink_ext_ack *extack);
int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
void br_vlan_get_stats(const struct net_bridge_vlan *v,
struct br_vlan_stats *stats);
@@ -912,7 +927,7 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
int err = 0;
if (skb_vlan_tag_present(skb)) {
- *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
+ *vid = skb_vlan_tag_get_id(skb);
} else {
*vid = 0;
err = -EINVAL;
@@ -960,7 +975,7 @@ static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
}
static inline int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags,
- bool *changed)
+ bool *changed, struct netlink_ext_ack *extack)
{
*changed = false;
return -EOPNOTSUPP;
@@ -985,7 +1000,7 @@ static inline int br_vlan_init(struct net_bridge *br)
}
static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
- bool *changed)
+ bool *changed, struct netlink_ext_ack *extack)
{
*changed = false;
return -EOPNOTSUPP;
@@ -1006,7 +1021,8 @@ static inline struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group
return NULL;
}
-static inline int nbp_vlan_init(struct net_bridge_port *port)
+static inline int nbp_vlan_init(struct net_bridge_port *port,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -1127,7 +1143,8 @@ int br_netlink_init(void);
void br_netlink_fini(void);
void br_ifinfo_notify(int event, const struct net_bridge *br,
const struct net_bridge_port *port);
-int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
+int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags,
+ struct netlink_ext_ack *extack);
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
u32 filter_mask, int nlflags);
@@ -1162,7 +1179,8 @@ int br_switchdev_set_port_flag(struct net_bridge_port *p,
unsigned long mask);
void br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb,
int type);
-int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags);
+int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack);
int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid);
static inline void br_switchdev_frame_unmark(struct sk_buff *skb)
@@ -1194,7 +1212,8 @@ static inline int br_switchdev_set_port_flag(struct net_bridge_port *p,
}
static inline int br_switchdev_port_vlan_add(struct net_device *dev,
- u16 vid, u16 flags)
+ u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
diff --git a/net/bridge/br_switchdev.c b/net/bridge/br_switchdev.c
index b993df770675..035ff59d9cbd 100644
--- a/net/bridge/br_switchdev.c
+++ b/net/bridge/br_switchdev.c
@@ -140,7 +140,8 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
}
}
-int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags)
+int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan v = {
.obj.orig_dev = dev,
@@ -150,7 +151,7 @@ int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags)
.vid_end = vid,
};
- return switchdev_port_obj_add(dev, &v.obj);
+ return switchdev_port_obj_add(dev, &v.obj, extack);
}
int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 60182bef6341..b05b94e9c595 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -328,6 +328,27 @@ static ssize_t flush_store(struct device *d,
}
static DEVICE_ATTR_WO(flush);
+static ssize_t no_linklocal_learn_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%d\n", br_boolopt_get(br, BR_BOOLOPT_NO_LL_LEARN));
+}
+
+static int set_no_linklocal_learn(struct net_bridge *br, unsigned long val)
+{
+ return br_boolopt_toggle(br, BR_BOOLOPT_NO_LL_LEARN, !!val, NULL);
+}
+
+static ssize_t no_linklocal_learn_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, set_no_linklocal_learn);
+}
+static DEVICE_ATTR_RW(no_linklocal_learn);
+
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t multicast_router_show(struct device *d,
struct device_attribute *attr, char *buf)
@@ -403,13 +424,13 @@ static DEVICE_ATTR_RW(multicast_querier);
static ssize_t hash_elasticity_show(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct net_bridge *br = to_bridge(d);
- return sprintf(buf, "%u\n", br->hash_elasticity);
+ return sprintf(buf, "%u\n", RHT_ELASTICITY);
}
static int set_elasticity(struct net_bridge *br, unsigned long val)
{
- br->hash_elasticity = val;
+ br_warn(br, "the hash_elasticity option has been deprecated and is always %u\n",
+ RHT_ELASTICITY);
return 0;
}
@@ -428,10 +449,16 @@ static ssize_t hash_max_show(struct device *d, struct device_attribute *attr,
return sprintf(buf, "%u\n", br->hash_max);
}
+static int set_hash_max(struct net_bridge *br, unsigned long val)
+{
+ br->hash_max = val;
+ return 0;
+}
+
static ssize_t hash_max_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
- return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
+ return store_bridge_parm(d, buf, len, set_hash_max);
}
static DEVICE_ATTR_RW(hash_max);
@@ -841,6 +868,7 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_gc_timer.attr,
&dev_attr_group_addr.attr,
&dev_attr_flush.attr,
+ &dev_attr_no_linklocal_learn.attr,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&dev_attr_multicast_router.attr,
&dev_attr_multicast_snooping.attr,
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 7c87a2fe5248..88715edb119a 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -320,9 +320,6 @@ static ssize_t brport_store(struct kobject *kobj,
if (!rtnl_trylock())
return restart_syscall();
- if (!p->dev || !p->br)
- goto out_unlock;
-
if (brport_attr->store_raw) {
char *buf_copy;
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index e84be08b8285..4a2f31157ef5 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -80,14 +80,14 @@ static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
- u16 vid, u16 flags)
+ u16 vid, u16 flags, struct netlink_ext_ack *extack)
{
int err;
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q add.
*/
- err = br_switchdev_port_vlan_add(dev, vid, flags);
+ err = br_switchdev_port_vlan_add(dev, vid, flags, extack);
if (err == -EOPNOTSUPP)
return vlan_vid_add(dev, br->vlan_proto, vid);
return err;
@@ -139,7 +139,9 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
/* Returns a master vlan, if it didn't exist it gets created. In all cases a
* a reference is taken to the master vlan before returning.
*/
-static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
+static struct net_bridge_vlan *
+br_vlan_get_master(struct net_bridge *br, u16 vid,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *masterv;
@@ -150,7 +152,7 @@ static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid
bool changed;
/* missing global ctx, create it now */
- if (br_vlan_add(br, vid, 0, &changed))
+ if (br_vlan_add(br, vid, 0, &changed, extack))
return NULL;
masterv = br_vlan_find(vg, vid);
if (WARN_ON(!masterv))
@@ -214,7 +216,8 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
* 4. same as 3 but with both master and brentry flags set so the entry
* will be used for filtering in both the port and the bridge
*/
-static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
+static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *masterv = NULL;
struct net_bridge_port *p = NULL;
@@ -239,7 +242,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
* This ensures tagged traffic enters the bridge when
* promiscuous mode is disabled by br_manage_promisc().
*/
- err = __vlan_vid_add(dev, br, v->vid, flags);
+ err = __vlan_vid_add(dev, br, v->vid, flags, extack);
if (err)
goto out;
@@ -249,12 +252,12 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
err = br_vlan_add(br, v->vid,
flags | BRIDGE_VLAN_INFO_BRENTRY,
- &changed);
+ &changed, extack);
if (err)
goto out_filt;
}
- masterv = br_vlan_get_master(br, v->vid);
+ masterv = br_vlan_get_master(br, v->vid, extack);
if (!masterv)
goto out_filt;
v->brvlan = masterv;
@@ -269,7 +272,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
v->stats = masterv->stats;
}
} else {
- err = br_switchdev_port_vlan_add(dev, v->vid, flags);
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
if (err && err != -EOPNOTSUPP)
goto out;
}
@@ -421,7 +424,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
}
if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
if (p && (p->flags & BR_VLAN_TUNNEL) &&
br_handle_egress_vlan_tunnel(skb, v)) {
@@ -494,8 +497,8 @@ static bool __allowed_ingress(const struct net_bridge *br,
__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
else
/* Priority-tagged Frame.
- * At this point, We know that skb->vlan_tci had
- * VLAN_TAG_PRESENT bit and its VID field was 0x000.
+ * At this point, we know that skb->vlan_tci VID
+ * field was 0.
* We update only VID field and preserve PCP field.
*/
skb->vlan_tci |= pvid;
@@ -591,11 +594,12 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
static int br_vlan_add_existing(struct net_bridge *br,
struct net_bridge_vlan_group *vg,
struct net_bridge_vlan *vlan,
- u16 flags, bool *changed)
+ u16 flags, bool *changed,
+ struct netlink_ext_ack *extack)
{
int err;
- err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags);
+ err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
if (err && err != -EOPNOTSUPP)
return err;
@@ -634,7 +638,8 @@ err_flags:
* Must be called with vid in range from 1 to 4094 inclusive.
* changed must be true only if the vlan was created or updated
*/
-int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
+int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
+ struct netlink_ext_ack *extack)
{
struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *vlan;
@@ -646,7 +651,8 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
vg = br_vlan_group(br);
vlan = br_vlan_find(vg, vid);
if (vlan)
- return br_vlan_add_existing(br, vg, vlan, flags, changed);
+ return br_vlan_add_existing(br, vg, vlan, flags, changed,
+ extack);
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
if (!vlan)
@@ -663,7 +669,7 @@ int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
vlan->br = br;
if (flags & BRIDGE_VLAN_INFO_BRENTRY)
refcount_set(&vlan->refcnt, 1);
- ret = __vlan_add(vlan, flags);
+ ret = __vlan_add(vlan, flags, extack);
if (ret) {
free_percpu(vlan->stats);
kfree(vlan);
@@ -914,7 +920,8 @@ static void br_vlan_disable_default_pvid(struct net_bridge *br)
br->default_pvid = 0;
}
-int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
+int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
+ struct netlink_ext_ack *extack)
{
const struct net_bridge_vlan *pvent;
struct net_bridge_vlan_group *vg;
@@ -946,7 +953,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY,
- &vlchange);
+ &vlchange, extack);
if (err)
goto out;
br_vlan_delete(br, old_pvid);
@@ -966,7 +973,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
err = nbp_vlan_add(p, pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED,
- &vlchange);
+ &vlchange, extack);
if (err)
goto err_port;
nbp_vlan_delete(p, old_pvid);
@@ -988,7 +995,7 @@ err_port:
nbp_vlan_add(p, old_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED,
- &vlchange);
+ &vlchange, NULL);
nbp_vlan_delete(p, pvid);
}
@@ -998,7 +1005,7 @@ err_port:
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED |
BRIDGE_VLAN_INFO_BRENTRY,
- &vlchange);
+ &vlchange, NULL);
br_vlan_delete(br, pvid);
}
goto out;
@@ -1021,7 +1028,7 @@ int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
err = -EPERM;
goto out;
}
- err = __br_vlan_set_default_pvid(br, pvid);
+ err = __br_vlan_set_default_pvid(br, pvid, NULL);
out:
return err;
}
@@ -1047,7 +1054,7 @@ int br_vlan_init(struct net_bridge *br)
rcu_assign_pointer(br->vlgrp, vg);
ret = br_vlan_add(br, 1,
BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
- BRIDGE_VLAN_INFO_BRENTRY, &changed);
+ BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
if (ret)
goto err_vlan_add;
@@ -1064,7 +1071,7 @@ err_rhtbl:
goto out;
}
-int nbp_vlan_init(struct net_bridge_port *p)
+int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
{
struct switchdev_attr attr = {
.orig_dev = p->br->dev,
@@ -1097,7 +1104,7 @@ int nbp_vlan_init(struct net_bridge_port *p)
ret = nbp_vlan_add(p, p->br->default_pvid,
BRIDGE_VLAN_INFO_PVID |
BRIDGE_VLAN_INFO_UNTAGGED,
- &changed);
+ &changed, extack);
if (ret)
goto err_vlan_add;
}
@@ -1122,7 +1129,7 @@ err_vlan_enabled:
* changed must be true only if the vlan was created or updated
*/
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
- bool *changed)
+ bool *changed, struct netlink_ext_ack *extack)
{
struct net_bridge_vlan *vlan;
int ret;
@@ -1133,7 +1140,7 @@ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
vlan = br_vlan_find(nbp_vlan_group(port), vid);
if (vlan) {
/* Pass the flags to the hardware bridge */
- ret = br_switchdev_port_vlan_add(port->dev, vid, flags);
+ ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
if (ret && ret != -EOPNOTSUPP)
return ret;
*changed = __vlan_add_flags(vlan, flags);
@@ -1147,7 +1154,7 @@ int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
vlan->vid = vid;
vlan->port = port;
- ret = __vlan_add(vlan, flags);
+ ret = __vlan_add(vlan, flags, extack);
if (ret)
kfree(vlan);
else
@@ -1217,9 +1224,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v,
int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
{
struct net_bridge_vlan_group *vg;
+ struct net_bridge_port *p;
ASSERT_RTNL();
- if (netif_is_bridge_master(dev))
+ p = br_port_get_check_rtnl(dev);
+ if (p)
+ vg = nbp_vlan_group(p);
+ else if (netif_is_bridge_master(dev))
vg = br_vlan_group(netdev_priv(dev));
else
return -EINVAL;
diff --git a/net/can/raw.c b/net/can/raw.c
index 3aab7664933f..c70207537488 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -771,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
if (err < 0)
goto free_skb;
- sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
+ skb_setup_tx_timestamp(skb, sk->sk_tsflags);
skb->dev = dev;
skb->sk = sk;
diff --git a/net/compat.c b/net/compat.c
index 47a614b370cd..f7084780a8f8 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -810,34 +810,23 @@ COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len
return __compat_sys_recvfrom(fd, buf, len, flags, addr, addrlen);
}
-static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
- unsigned int vlen, unsigned int flags,
- struct old_timespec32 __user *timeout)
+COMPAT_SYSCALL_DEFINE5(recvmmsg_time64, int, fd, struct compat_mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags,
+ struct __kernel_timespec __user *, timeout)
{
- int datagrams;
- struct timespec64 ktspec;
-
- if (timeout == NULL)
- return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
- flags | MSG_CMSG_COMPAT, NULL);
-
- if (compat_get_timespec64(&ktspec, timeout))
- return -EFAULT;
-
- datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
- flags | MSG_CMSG_COMPAT, &ktspec);
- if (datagrams > 0 && compat_put_timespec64(&ktspec, timeout))
- datagrams = -EFAULT;
-
- return datagrams;
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, timeout, NULL);
}
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct old_timespec32 __user *, timeout)
{
- return __compat_sys_recvmmsg(fd, mmsg, vlen, flags, timeout);
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT, NULL, timeout);
}
+#endif
COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
{
@@ -925,8 +914,9 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
ret = __compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
break;
case SYS_RECVMMSG:
- ret = __compat_sys_recvmmsg(a0, compat_ptr(a1), a[2], a[3],
- compat_ptr(a[4]));
+ ret = __sys_recvmmsg(a0, compat_ptr(a1), a[2],
+ a[3] | MSG_CMSG_COMPAT, NULL,
+ compat_ptr(a[4]));
break;
case SYS_ACCEPT4:
ret = __sys_accept4(a0, compat_ptr(a1), compat_ptr(a[2]), a[3]);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 57f3a6fcfc1e..b2651bb6d2a3 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -408,27 +408,20 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
EXPORT_SYMBOL(skb_kill_datagram);
-/**
- * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
- * @skb: buffer to copy
- * @offset: offset in the buffer to start copying from
- * @to: iovec iterator to copy to
- * @len: amount of data to copy from buffer to iovec
- */
-int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
- struct iov_iter *to, int len)
+int __skb_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len, bool fault_short,
+ size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
+ void *data)
{
int start = skb_headlen(skb);
int i, copy = start - offset, start_off = offset, n;
struct sk_buff *frag_iter;
- trace_skb_copy_datagram_iovec(skb, len);
-
/* Copy header. */
if (copy > 0) {
if (copy > len)
copy = len;
- n = copy_to_iter(skb->data + offset, copy, to);
+ n = cb(skb->data + offset, copy, data, to);
offset += n;
if (n != copy)
goto short_copy;
@@ -445,11 +438,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
end = start + skb_frag_size(frag);
if ((copy = end - offset) > 0) {
+ struct page *page = skb_frag_page(frag);
+ u8 *vaddr = kmap(page);
+
if (copy > len)
copy = len;
- n = copy_page_to_iter(skb_frag_page(frag),
- frag->page_offset + offset -
- start, copy, to);
+ n = cb(vaddr + frag->page_offset +
+ offset - start, copy, data, to);
+ kunmap(page);
offset += n;
if (n != copy)
goto short_copy;
@@ -468,8 +464,8 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
if ((copy = end - offset) > 0) {
if (copy > len)
copy = len;
- if (skb_copy_datagram_iter(frag_iter, offset - start,
- to, copy))
+ if (__skb_datagram_iter(frag_iter, offset - start,
+ to, copy, fault_short, cb, data))
goto fault;
if ((len -= copy) == 0)
return 0;
@@ -490,11 +486,50 @@ fault:
return -EFAULT;
short_copy:
- if (iov_iter_count(to))
+ if (fault_short || iov_iter_count(to))
goto fault;
return 0;
}
+
+/**
+ * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
+ * and update a hash.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying from
+ * @to: iovec iterator to copy to
+ * @len: amount of data to copy from buffer to iovec
+ * @hash: hash request to update
+ */
+int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len,
+ struct ahash_request *hash)
+{
+ return __skb_datagram_iter(skb, offset, to, len, true,
+ hash_and_copy_to_iter, hash);
+}
+EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
+
+static size_t simple_copy_to_iter(const void *addr, size_t bytes,
+ void *data __always_unused, struct iov_iter *i)
+{
+ return copy_to_iter(addr, bytes, i);
+}
+
+/**
+ * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying from
+ * @to: iovec iterator to copy to
+ * @len: amount of data to copy from buffer to iovec
+ */
+int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
+ struct iov_iter *to, int len)
+{
+ trace_skb_copy_datagram_iovec(skb, len);
+ return __skb_datagram_iter(skb, offset, to, len, false,
+ simple_copy_to_iter, NULL);
+}
EXPORT_SYMBOL(skb_copy_datagram_iter);
/**
@@ -645,131 +680,22 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
}
EXPORT_SYMBOL(zerocopy_sg_from_iter);
+/**
+ * skb_copy_and_csum_datagram_iter - Copy datagram to an iovec iterator
+ * and update a checksum.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying from
+ * @to: iovec iterator to copy to
+ * @len: amount of data to copy from buffer to iovec
+ * @csump: checksum pointer
+ */
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len,
__wsum *csump)
{
- int start = skb_headlen(skb);
- int i, copy = start - offset, start_off = offset;
- struct sk_buff *frag_iter;
- int pos = 0;
- int n;
-
- /* Copy header. */
- if (copy > 0) {
- if (copy > len)
- copy = len;
- n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
- offset += n;
- if (n != copy)
- goto fault;
- if ((len -= copy) == 0)
- return 0;
- pos = copy;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- int end;
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- WARN_ON(start > offset + len);
-
- end = start + skb_frag_size(frag);
- if ((copy = end - offset) > 0) {
- __wsum csum2 = 0;
- struct page *page = skb_frag_page(frag);
- u8 *vaddr = kmap(page);
-
- if (copy > len)
- copy = len;
- n = csum_and_copy_to_iter(vaddr + frag->page_offset +
- offset - start, copy,
- &csum2, to);
- kunmap(page);
- offset += n;
- if (n != copy)
- goto fault;
- *csump = csum_block_add(*csump, csum2, pos);
- if (!(len -= copy))
- return 0;
- pos += copy;
- }
- start = end;
- }
-
- skb_walk_frags(skb, frag_iter) {
- int end;
-
- WARN_ON(start > offset + len);
-
- end = start + frag_iter->len;
- if ((copy = end - offset) > 0) {
- __wsum csum2 = 0;
- if (copy > len)
- copy = len;
- if (skb_copy_and_csum_datagram(frag_iter,
- offset - start,
- to, copy,
- &csum2))
- goto fault;
- *csump = csum_block_add(*csump, csum2, pos);
- if ((len -= copy) == 0)
- return 0;
- offset += copy;
- pos += copy;
- }
- start = end;
- }
- if (!len)
- return 0;
-
-fault:
- iov_iter_revert(to, offset - start_off);
- return -EFAULT;
-}
-
-__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
-{
- __sum16 sum;
-
- sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
- if (likely(!sum)) {
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
- !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
- }
- if (!skb_shared(skb))
- skb->csum_valid = !sum;
- return sum;
-}
-EXPORT_SYMBOL(__skb_checksum_complete_head);
-
-__sum16 __skb_checksum_complete(struct sk_buff *skb)
-{
- __wsum csum;
- __sum16 sum;
-
- csum = skb_checksum(skb, 0, skb->len, 0);
-
- /* skb->csum holds pseudo checksum */
- sum = csum_fold(csum_add(skb->csum, csum));
- if (likely(!sum)) {
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
- !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
- }
-
- if (!skb_shared(skb)) {
- /* Save full packet checksum */
- skb->csum = csum;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum_complete_sw = 1;
- skb->csum_valid = !sum;
- }
-
- return sum;
+ return __skb_datagram_iter(skb, offset, to, len, true,
+ csum_and_copy_to_iter, csump);
}
-EXPORT_SYMBOL(__skb_checksum_complete);
/**
* skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
@@ -810,7 +736,7 @@ int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
- netdev_rx_csum_fault(NULL);
+ netdev_rx_csum_fault(NULL, skb);
}
return 0;
fault:
diff --git a/net/core/dev.c b/net/core/dev.c
index ddc551f24ba2..1b5a4410be0e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -145,6 +145,7 @@
#include <linux/sctp.h>
#include <net/udp_tunnel.h>
#include <linux/net_namespace.h>
+#include <linux/indirect_call_wrapper.h>
#include "net-sysfs.h"
@@ -162,6 +163,9 @@ static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_info(unsigned long val,
struct netdev_notifier_info *info);
+static int call_netdevice_notifiers_extack(unsigned long val,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack);
static struct napi_struct *napi_by_id(unsigned int napi_id);
/*
@@ -1361,7 +1365,7 @@ void netdev_notify_peers(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_notify_peers);
-static int __dev_open(struct net_device *dev)
+static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
int ret;
@@ -1377,7 +1381,7 @@ static int __dev_open(struct net_device *dev)
*/
netpoll_poll_disable(dev);
- ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
+ ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack);
ret = notifier_to_errno(ret);
if (ret)
return ret;
@@ -1406,7 +1410,8 @@ static int __dev_open(struct net_device *dev)
/**
* dev_open - prepare an interface for use.
- * @dev: device to open
+ * @dev: device to open
+ * @extack: netlink extended ack
*
* Takes a device from down to up state. The device's private open
* function is invoked and then the multicast lists are loaded. Finally
@@ -1416,14 +1421,14 @@ static int __dev_open(struct net_device *dev)
* Calling this function on an active interface is a nop. On a failure
* a negative errno code is returned.
*/
-int dev_open(struct net_device *dev)
+int dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
{
int ret;
if (dev->flags & IFF_UP)
return 0;
- ret = __dev_open(dev);
+ ret = __dev_open(dev, extack);
if (ret < 0)
return ret;
@@ -1585,6 +1590,7 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd)
N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN)
N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO)
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
+ N(PRE_CHANGEADDR)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
@@ -1733,6 +1739,18 @@ static int call_netdevice_notifiers_info(unsigned long val,
return raw_notifier_call_chain(&netdev_chain, val, info);
}
+static int call_netdevice_notifiers_extack(unsigned long val,
+ struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_notifier_info info = {
+ .dev = dev,
+ .extack = extack,
+ };
+
+ return call_netdevice_notifiers_info(val, &info);
+}
+
/**
* call_netdevice_notifiers - call all network notifier blocks
* @val: value passed unmodified to notifier function
@@ -1744,11 +1762,7 @@ static int call_netdevice_notifiers_info(unsigned long val,
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
{
- struct netdev_notifier_info info = {
- .dev = dev,
- };
-
- return call_netdevice_notifiers_info(val, &info);
+ return call_netdevice_notifiers_extack(val, dev, NULL);
}
EXPORT_SYMBOL(call_netdevice_notifiers);
@@ -2175,6 +2189,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
return active;
}
+static void reset_xps_maps(struct net_device *dev,
+ struct xps_dev_maps *dev_maps,
+ bool is_rxqs_map)
+{
+ if (is_rxqs_map) {
+ static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+ RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+ } else {
+ RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+ }
+ static_key_slow_dec_cpuslocked(&xps_needed);
+ kfree_rcu(dev_maps, rcu);
+}
+
static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
struct xps_dev_maps *dev_maps, unsigned int nr_ids,
u16 offset, u16 count, bool is_rxqs_map)
@@ -2186,18 +2214,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
j < nr_ids;)
active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
count);
- if (!active) {
- if (is_rxqs_map) {
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
- } else {
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+ if (!active)
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
- for (i = offset + (count - 1); count--; i--)
- netdev_queue_numa_node_write(
- netdev_get_tx_queue(dev, i),
- NUMA_NO_NODE);
+ if (!is_rxqs_map) {
+ for (i = offset + (count - 1); count--; i--) {
+ netdev_queue_numa_node_write(
+ netdev_get_tx_queue(dev, i),
+ NUMA_NO_NODE);
}
- kfree_rcu(dev_maps, rcu);
}
}
@@ -2234,10 +2259,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
false);
out_no_maps:
- if (static_key_enabled(&xps_rxqs_needed))
- static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
-
- static_key_slow_dec_cpuslocked(&xps_needed);
mutex_unlock(&xps_map_mutex);
cpus_read_unlock();
}
@@ -2355,9 +2376,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
if (!new_dev_maps)
goto out_no_new_maps;
- static_key_slow_inc_cpuslocked(&xps_needed);
- if (is_rxqs_map)
- static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+ if (!dev_maps) {
+ /* Increment static keys at most once per type */
+ static_key_slow_inc_cpuslocked(&xps_needed);
+ if (is_rxqs_map)
+ static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+ }
for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
j < nr_ids;) {
@@ -2455,13 +2479,8 @@ out_no_new_maps:
}
/* free map if not active */
- if (!active) {
- if (is_rxqs_map)
- RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
- else
- RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
- kfree_rcu(dev_maps, rcu);
- }
+ if (!active)
+ reset_xps_maps(dev, dev_maps, is_rxqs_map);
out_no_maps:
mutex_unlock(&xps_map_mutex);
@@ -3091,10 +3110,17 @@ EXPORT_SYMBOL(__skb_gso_segment);
/* Take action when hardware reception checksum errors are detected. */
#ifdef CONFIG_BUG
-void netdev_rx_csum_fault(struct net_device *dev)
+void netdev_rx_csum_fault(struct net_device *dev, struct sk_buff *skb)
{
if (net_ratelimit()) {
pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
+ if (dev)
+ pr_err("dev features: %pNF\n", &dev->features);
+ pr_err("skb len=%u data_len=%u pkt_type=%u gso_size=%u gso_type=%u nr_frags=%u ip_summed=%u csum=%x csum_complete_sw=%d csum_valid=%d csum_level=%u\n",
+ skb->len, skb->data_len, skb->pkt_type,
+ skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_type,
+ skb_shinfo(skb)->nr_frags, skb->ip_summed, skb->csum,
+ skb->csum_complete_sw, skb->csum_valid, skb->csum_level);
dump_stack();
}
}
@@ -4520,9 +4546,14 @@ static int netif_rx_internal(struct sk_buff *skb)
int netif_rx(struct sk_buff *skb)
{
+ int ret;
+
trace_netif_rx_entry(skb);
- return netif_rx_internal(skb);
+ ret = netif_rx_internal(skb);
+ trace_netif_rx_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(netif_rx);
@@ -4537,6 +4568,7 @@ int netif_rx_ni(struct sk_buff *skb)
if (local_softirq_pending())
do_softirq();
preempt_enable();
+ trace_netif_rx_ni_exit(err);
return err;
}
@@ -4889,7 +4921,7 @@ skip_classify:
* and set skb->priority like in vlan_do_receive()
* For the time being, just ignore Priority Code Point
*/
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
type = skb->protocol;
@@ -5009,7 +5041,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
struct net_device *orig_dev = skb->dev;
struct packet_type *pt_prev = NULL;
- list_del(&skb->list);
+ skb_list_del_init(skb);
__netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
if (!pt_prev)
continue;
@@ -5165,7 +5197,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
net_timestamp_check(netdev_tstamp_prequeue, skb);
- list_del(&skb->list);
+ skb_list_del_init(skb);
if (!skb_defer_rx_timestamp(skb))
list_add_tail(&skb->list, &sublist);
}
@@ -5176,7 +5208,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
rcu_read_lock();
list_for_each_entry_safe(skb, next, head, list) {
xdp_prog = rcu_dereference(skb->dev->xdp_prog);
- list_del(&skb->list);
+ skb_list_del_init(skb);
if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
list_add_tail(&skb->list, &sublist);
}
@@ -5195,7 +5227,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
if (cpu >= 0) {
/* Will be handled, remove from list */
- list_del(&skb->list);
+ skb_list_del_init(skb);
enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
}
}
@@ -5222,9 +5254,14 @@ static void netif_receive_skb_list_internal(struct list_head *head)
*/
int netif_receive_skb(struct sk_buff *skb)
{
+ int ret;
+
trace_netif_receive_skb_entry(skb);
- return netif_receive_skb_internal(skb);
+ ret = netif_receive_skb_internal(skb);
+ trace_netif_receive_skb_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(netif_receive_skb);
@@ -5244,9 +5281,12 @@ void netif_receive_skb_list(struct list_head *head)
if (list_empty(head))
return;
- list_for_each_entry(skb, head, list)
- trace_netif_receive_skb_list_entry(skb);
+ if (trace_netif_receive_skb_list_entry_enabled()) {
+ list_for_each_entry(skb, head, list)
+ trace_netif_receive_skb_list_entry(skb);
+ }
netif_receive_skb_list_internal(head);
+ trace_netif_receive_skb_list_exit(0);
}
EXPORT_SYMBOL(netif_receive_skb_list);
@@ -5299,6 +5339,8 @@ static void flush_all_backlogs(void)
put_online_cpus();
}
+INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
static int napi_gro_complete(struct sk_buff *skb)
{
struct packet_offload *ptype;
@@ -5318,7 +5360,9 @@ static int napi_gro_complete(struct sk_buff *skb)
if (ptype->type != type || !ptype->callbacks.gro_complete)
continue;
- err = ptype->callbacks.gro_complete(skb, 0);
+ err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
+ ipv6_gro_complete, inet_gro_complete,
+ skb, 0);
break;
}
rcu_read_unlock();
@@ -5357,11 +5401,13 @@ static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
*/
void napi_gro_flush(struct napi_struct *napi, bool flush_old)
{
- u32 i;
+ unsigned long bitmask = napi->gro_bitmask;
+ unsigned int i, base = ~0U;
- for (i = 0; i < GRO_HASH_BUCKETS; i++) {
- if (test_bit(i, &napi->gro_bitmask))
- __napi_gro_flush_chain(napi, i, flush_old);
+ while ((i = ffs(bitmask)) != 0) {
+ bitmask >>= i;
+ base += i;
+ __napi_gro_flush_chain(napi, base, flush_old);
}
}
EXPORT_SYMBOL(napi_gro_flush);
@@ -5386,7 +5432,9 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
}
diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
- diffs |= p->vlan_tci ^ skb->vlan_tci;
+ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+ if (skb_vlan_tag_present(p))
+ diffs |= p->vlan_tci ^ skb->vlan_tci;
diffs |= skb_metadata_dst_cmp(p, skb);
diffs |= skb_metadata_differs(p, skb);
if (maclen == ETH_HLEN)
@@ -5461,6 +5509,10 @@ static void gro_flush_oldest(struct list_head *head)
napi_gro_complete(oldest);
}
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
+ struct sk_buff *));
static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
u32 hash = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
@@ -5510,7 +5562,9 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->csum_valid = 0;
}
- pp = ptype->callbacks.gro_receive(gro_head, skb);
+ pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+ ipv6_gro_receive, inet_gro_receive,
+ gro_head, skb);
break;
}
rcu_read_unlock();
@@ -5634,12 +5688,17 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
+ gro_result_t ret;
+
skb_mark_napi_id(skb, napi);
trace_napi_gro_receive_entry(skb);
skb_gro_reset_offset(skb);
- return napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
+ trace_napi_gro_receive_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(napi_gro_receive);
@@ -5652,7 +5711,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
__skb_pull(skb, skb_headlen(skb));
/* restore the reserve we had after netdev_alloc_skb_ip_align() */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb->dev = napi->dev;
skb->skb_iif = 0;
@@ -5757,6 +5816,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
gro_result_t napi_gro_frags(struct napi_struct *napi)
{
+ gro_result_t ret;
struct sk_buff *skb = napi_frags_skb(napi);
if (!skb)
@@ -5764,7 +5824,10 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
trace_napi_gro_frags_entry(skb);
- return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_frags_exit(ret);
+
+ return ret;
}
EXPORT_SYMBOL(napi_gro_frags);
@@ -5780,10 +5843,11 @@ __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
/* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
+ /* See comments in __skb_checksum_complete(). */
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
+ netdev_rx_csum_fault(skb->dev, skb);
}
NAPI_GRO_CB(skb)->csum = wsum;
@@ -6204,8 +6268,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
napi->skb = NULL;
napi->poll = poll;
if (weight > NAPI_POLL_WEIGHT)
- pr_err_once("netif_napi_add() called with weight %d on device %s\n",
- weight, dev->name);
+ netdev_err_once(dev, "%s() called with weight %d\n", __func__,
+ weight);
napi->weight = weight;
list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
@@ -7462,7 +7526,8 @@ unsigned int dev_get_flags(const struct net_device *dev)
}
EXPORT_SYMBOL(dev_get_flags);
-int __dev_change_flags(struct net_device *dev, unsigned int flags)
+int __dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack)
{
unsigned int old_flags = dev->flags;
int ret;
@@ -7499,7 +7564,7 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
if (old_flags & IFF_UP)
__dev_close(dev);
else
- ret = __dev_open(dev);
+ ret = __dev_open(dev, extack);
}
if ((flags ^ dev->gflags) & IFF_PROMISC) {
@@ -7559,16 +7624,18 @@ void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
* dev_change_flags - change device settings
* @dev: device
* @flags: device state flags
+ * @extack: netlink extended ack
*
* Change settings on device based state flags. The flags are
* in the userspace exported format.
*/
-int dev_change_flags(struct net_device *dev, unsigned int flags)
+int dev_change_flags(struct net_device *dev, unsigned int flags,
+ struct netlink_ext_ack *extack)
{
int ret;
unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
- ret = __dev_change_flags(dev, flags);
+ ret = __dev_change_flags(dev, flags, extack);
if (ret < 0)
return ret;
@@ -7701,13 +7768,36 @@ void dev_set_group(struct net_device *dev, int new_group)
EXPORT_SYMBOL(dev_set_group);
/**
+ * dev_pre_changeaddr_notify - Call NETDEV_PRE_CHANGEADDR.
+ * @dev: device
+ * @addr: new address
+ * @extack: netlink extended ack
+ */
+int dev_pre_changeaddr_notify(struct net_device *dev, const char *addr,
+ struct netlink_ext_ack *extack)
+{
+ struct netdev_notifier_pre_changeaddr_info info = {
+ .info.dev = dev,
+ .info.extack = extack,
+ .dev_addr = addr,
+ };
+ int rc;
+
+ rc = call_netdevice_notifiers_info(NETDEV_PRE_CHANGEADDR, &info.info);
+ return notifier_to_errno(rc);
+}
+EXPORT_SYMBOL(dev_pre_changeaddr_notify);
+
+/**
* dev_set_mac_address - Change Media Access Control Address
* @dev: device
* @sa: new address
+ * @extack: netlink extended ack
*
* Change the hardware (MAC) address of the device
*/
-int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa,
+ struct netlink_ext_ack *extack)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
@@ -7718,6 +7808,9 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
return -EINVAL;
if (!netif_device_present(dev))
return -ENODEV;
+ err = dev_pre_changeaddr_notify(dev, sa->sa_data, extack);
+ if (err)
+ return err;
err = ops->ndo_set_mac_address(dev, sa);
if (err)
return err;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index d884d8f5f0e5..a6723b306717 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -278,6 +278,103 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list,
EXPORT_SYMBOL(__hw_addr_sync_dev);
/**
+ * __hw_addr_ref_sync_dev - Synchronize device's multicast address list taking
+ * into account references
+ * @list: address list to synchronize
+ * @dev: device to sync
+ * @sync: function to call if address or reference on it should be added
+ * @unsync: function to call if address or some reference on it should removed
+ *
+ * This function is intended to be called from the ndo_set_rx_mode
+ * function of devices that require explicit address or references on it
+ * add/remove notifications. The unsync function may be NULL in which case
+ * the addresses or references on it requiring removal will simply be
+ * removed without any notification to the device. That is responsibility of
+ * the driver to identify and distribute address or references on it between
+ * internal address tables.
+ **/
+int __hw_addr_ref_sync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*sync)(struct net_device *,
+ const unsigned char *, int),
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int))
+{
+ struct netdev_hw_addr *ha, *tmp;
+ int err, ref_cnt;
+
+ /* first go through and flush out any unsynced/stale entries */
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ /* sync if address is not used */
+ if ((ha->sync_cnt << 1) <= ha->refcount)
+ continue;
+
+ /* if fails defer unsyncing address */
+ ref_cnt = ha->refcount - ha->sync_cnt;
+ if (unsync && unsync(dev, ha->addr, ref_cnt))
+ continue;
+
+ ha->refcount = (ref_cnt << 1) + 1;
+ ha->sync_cnt = ref_cnt;
+ __hw_addr_del_entry(list, ha, false, false);
+ }
+
+ /* go through and sync updated/new entries to the list */
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ /* sync if address added or reused */
+ if ((ha->sync_cnt << 1) >= ha->refcount)
+ continue;
+
+ ref_cnt = ha->refcount - ha->sync_cnt;
+ err = sync(dev, ha->addr, ref_cnt);
+ if (err)
+ return err;
+
+ ha->refcount = ref_cnt << 1;
+ ha->sync_cnt = ref_cnt;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(__hw_addr_ref_sync_dev);
+
+/**
+ * __hw_addr_ref_unsync_dev - Remove synchronized addresses and references on
+ * it from device
+ * @list: address list to remove synchronized addresses (references on it) from
+ * @dev: device to sync
+ * @unsync: function to call if address and references on it should be removed
+ *
+ * Remove all addresses that were added to the device by
+ * __hw_addr_ref_sync_dev(). This function is intended to be called from the
+ * ndo_stop or ndo_open functions on devices that require explicit address (or
+ * references on it) add/remove notifications. If the unsync function pointer
+ * is NULL then this function can be used to just reset the sync_cnt for the
+ * addresses in the list.
+ **/
+void __hw_addr_ref_unsync_dev(struct netdev_hw_addr_list *list,
+ struct net_device *dev,
+ int (*unsync)(struct net_device *,
+ const unsigned char *, int))
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ if (!ha->sync_cnt)
+ continue;
+
+ /* if fails defer unsyncing address */
+ if (unsync && unsync(dev, ha->addr, ha->sync_cnt))
+ continue;
+
+ ha->refcount -= ha->sync_cnt - 1;
+ ha->sync_cnt = 0;
+ __hw_addr_del_entry(list, ha, false, false);
+ }
+}
+EXPORT_SYMBOL(__hw_addr_ref_unsync_dev);
+
+/**
* __hw_addr_unsync_dev - Remove synchronized addresses from device
* @list: address list to remove synchronized addresses from
* @dev: device to sync
@@ -401,6 +498,9 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
ASSERT_RTNL();
+ err = dev_pre_changeaddr_notify(dev, addr, NULL);
+ if (err)
+ return err;
err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 90e8aa36881e..31380fd5a4e2 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -234,7 +234,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
switch (cmd) {
case SIOCSIFFLAGS: /* Set interface flags */
- return dev_change_flags(dev, ifr->ifr_flags);
+ return dev_change_flags(dev, ifr->ifr_flags, NULL);
case SIOCSIFMETRIC: /* Set the metric on the interface
(currently unused) */
@@ -246,7 +246,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
case SIOCSIFHWADDR:
if (dev->addr_len > sizeof(struct sockaddr))
return -EINVAL;
- return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr, NULL);
case SIOCSIFHWBROADCAST:
if (ifr->ifr_hwaddr.sa_family != dev->type)
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 3a4b29a13d31..abb0da9d7b4b 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2692,6 +2692,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME,
.type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME,
+ .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
diff --git a/net/core/filter.c b/net/core/filter.c
index e521c5ebc7d1..447dd1bad31f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -296,22 +296,18 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
break;
case SKF_AD_VLAN_TAG:
- case SKF_AD_VLAN_TAG_PRESENT:
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
/* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
*insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
offsetof(struct sk_buff, vlan_tci));
- if (skb_field == SKF_AD_VLAN_TAG) {
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
- ~VLAN_TAG_PRESENT);
- } else {
- /* dst_reg >>= 12 */
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
- /* dst_reg &= 1 */
+ break;
+ case SKF_AD_VLAN_TAG_PRESENT:
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
- }
break;
}
@@ -467,7 +463,8 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
bool ldx_off_ok = offset <= S16_MAX;
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
- *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
+ if (offset)
+ *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
*insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
size, 2 + endian + (!ldx_off_ok * 2));
if (ldx_off_ok) {
@@ -2428,6 +2425,174 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = {
.arg4_type = ARG_ANYTHING,
};
+static void sk_msg_shift_left(struct sk_msg *msg, int i)
+{
+ int prev;
+
+ do {
+ prev = i;
+ sk_msg_iter_var_next(i);
+ msg->sg.data[prev] = msg->sg.data[i];
+ } while (i != msg->sg.end);
+
+ sk_msg_iter_prev(msg, end);
+}
+
+static void sk_msg_shift_right(struct sk_msg *msg, int i)
+{
+ struct scatterlist tmp, sge;
+
+ sk_msg_iter_next(msg, end);
+ sge = sk_msg_elem_cpy(msg, i);
+ sk_msg_iter_var_next(i);
+ tmp = sk_msg_elem_cpy(msg, i);
+
+ while (i != msg->sg.end) {
+ msg->sg.data[i] = sge;
+ sk_msg_iter_var_next(i);
+ sge = tmp;
+ tmp = sk_msg_elem_cpy(msg, i);
+ }
+}
+
+BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ u32, len, u64, flags)
+{
+ u32 i = 0, l, space, offset = 0;
+ u64 last = start + len;
+ int pop;
+
+ if (unlikely(flags))
+ return -EINVAL;
+
+ /* First find the starting scatterlist element */
+ i = msg->sg.start;
+ do {
+ l = sk_msg_elem(msg, i)->length;
+
+ if (start < offset + l)
+ break;
+ offset += l;
+ sk_msg_iter_var_next(i);
+ } while (i != msg->sg.end);
+
+ /* Bounds checks: start and pop must be inside message */
+ if (start >= offset + l || last >= msg->sg.size)
+ return -EINVAL;
+
+ space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
+
+ pop = len;
+ /* --------------| offset
+ * -| start |-------- len -------|
+ *
+ * |----- a ----|-------- pop -------|----- b ----|
+ * |______________________________________________| length
+ *
+ *
+ * a: region at front of scatter element to save
+ * b: region at back of scatter element to save when length > A + pop
+ * pop: region to pop from element, same as input 'pop' here will be
+ * decremented below per iteration.
+ *
+ * Two top-level cases to handle when start != offset, first B is non
+ * zero and second B is zero corresponding to when a pop includes more
+ * than one element.
+ *
+ * Then if B is non-zero AND there is no space allocate space and
+ * compact A, B regions into page. If there is space shift ring to
+ * the rigth free'ing the next element in ring to place B, leaving
+ * A untouched except to reduce length.
+ */
+ if (start != offset) {
+ struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
+ int a = start;
+ int b = sge->length - pop - a;
+
+ sk_msg_iter_var_next(i);
+
+ if (pop < sge->length - a) {
+ if (space) {
+ sge->length = a;
+ sk_msg_shift_right(msg, i);
+ nsge = sk_msg_elem(msg, i);
+ get_page(sg_page(sge));
+ sg_set_page(nsge,
+ sg_page(sge),
+ b, sge->offset + pop + a);
+ } else {
+ struct page *page, *orig;
+ u8 *to, *from;
+
+ page = alloc_pages(__GFP_NOWARN |
+ __GFP_COMP | GFP_ATOMIC,
+ get_order(a + b));
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ sge->length = a;
+ orig = sg_page(sge);
+ from = sg_virt(sge);
+ to = page_address(page);
+ memcpy(to, from, a);
+ memcpy(to + a, from + a + pop, b);
+ sg_set_page(sge, page, a + b, 0);
+ put_page(orig);
+ }
+ pop = 0;
+ } else if (pop >= sge->length - a) {
+ sge->length = a;
+ pop -= (sge->length - a);
+ }
+ }
+
+ /* From above the current layout _must_ be as follows,
+ *
+ * -| offset
+ * -| start
+ *
+ * |---- pop ---|---------------- b ------------|
+ * |____________________________________________| length
+ *
+ * Offset and start of the current msg elem are equal because in the
+ * previous case we handled offset != start and either consumed the
+ * entire element and advanced to the next element OR pop == 0.
+ *
+ * Two cases to handle here are first pop is less than the length
+ * leaving some remainder b above. Simply adjust the element's layout
+ * in this case. Or pop >= length of the element so that b = 0. In this
+ * case advance to next element decrementing pop.
+ */
+ while (pop) {
+ struct scatterlist *sge = sk_msg_elem(msg, i);
+
+ if (pop < sge->length) {
+ sge->length -= pop;
+ sge->offset += pop;
+ pop = 0;
+ } else {
+ pop -= sge->length;
+ sk_msg_shift_left(msg, i);
+ }
+ sk_msg_iter_var_next(i);
+ }
+
+ sk_mem_uncharge(msg->sk, len - pop);
+ msg->sg.size -= (len - pop);
+ sk_msg_compute_data_pointers(msg);
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_msg_pop_data_proto = {
+ .func = bpf_msg_pop_data,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
{
return task_get_classid(skb);
@@ -3908,6 +4073,26 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
+BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
+ struct bpf_map *, map, u64, flags, void *, data, u64, size)
+{
+ if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+ return -EINVAL;
+
+ return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
+}
+
+static const struct bpf_func_proto bpf_sockopt_event_output_proto = {
+ .func = bpf_sockopt_event_output,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_PTR_TO_MEM,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
+};
+
BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
@@ -4825,47 +5010,40 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
#ifdef CONFIG_INET
static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
- struct sk_buff *skb, u8 family, u8 proto)
+ int dif, int sdif, u8 family, u8 proto)
{
bool refcounted = false;
struct sock *sk = NULL;
- int dif = 0;
-
- if (skb->dev)
- dif = skb->dev->ifindex;
if (family == AF_INET) {
__be32 src4 = tuple->ipv4.saddr;
__be32 dst4 = tuple->ipv4.daddr;
- int sdif = inet_sdif(skb);
if (proto == IPPROTO_TCP)
- sk = __inet_lookup(net, &tcp_hashinfo, skb, 0,
+ sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport,
dif, sdif, &refcounted);
else
sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
dst4, tuple->ipv4.dport,
- dif, sdif, &udp_table, skb);
+ dif, sdif, &udp_table, NULL);
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
- u16 hnum = ntohs(tuple->ipv6.dport);
- int sdif = inet6_sdif(skb);
if (proto == IPPROTO_TCP)
- sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
+ sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
src6, tuple->ipv6.sport,
- dst6, hnum,
+ dst6, ntohs(tuple->ipv6.dport),
dif, sdif, &refcounted);
else if (likely(ipv6_bpf_stub))
sk = ipv6_bpf_stub->udp6_lib_lookup(net,
src6, tuple->ipv6.sport,
- dst6, hnum,
+ dst6, tuple->ipv6.dport,
dif, sdif,
- &udp_table, skb);
+ &udp_table, NULL);
#endif
}
@@ -4882,31 +5060,34 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
* callers to satisfy BPF_CALL declarations.
*/
static unsigned long
-bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
- u8 proto, u64 netns_id, u64 flags)
+__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+ u64 flags)
{
- struct net *caller_net;
struct sock *sk = NULL;
u8 family = AF_UNSPEC;
struct net *net;
+ int sdif;
family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
- if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
+ if (unlikely(family == AF_UNSPEC || flags ||
+ !((s32)netns_id < 0 || netns_id <= S32_MAX)))
goto out;
- if (skb->dev)
- caller_net = dev_net(skb->dev);
+ if (family == AF_INET)
+ sdif = inet_sdif(skb);
else
- caller_net = sock_net(skb->sk);
- if (netns_id) {
+ sdif = inet6_sdif(skb);
+
+ if ((s32)netns_id < 0) {
+ net = caller_net;
+ sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
+ } else {
net = get_net_ns_by_id(caller_net, netns_id);
if (unlikely(!net))
goto out;
- sk = sk_lookup(net, tuple, skb, family, proto);
+ sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
put_net(net);
- } else {
- net = caller_net;
- sk = sk_lookup(net, tuple, skb, family, proto);
}
if (sk)
@@ -4915,6 +5096,25 @@ out:
return (unsigned long) sk;
}
+static unsigned long
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+ u8 proto, u64 netns_id, u64 flags)
+{
+ struct net *caller_net;
+ int ifindex;
+
+ if (skb->dev) {
+ caller_net = dev_net(skb->dev);
+ ifindex = skb->dev->ifindex;
+ } else {
+ caller_net = sock_net(skb->sk);
+ ifindex = 0;
+ }
+
+ return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex,
+ proto, netns_id, flags);
+}
+
BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
{
@@ -4964,6 +5164,87 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_SOCKET,
};
+
+BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ struct net *caller_net = dev_net(ctx->rxq->dev);
+ int ifindex = ctx->rxq->dev->ifindex;
+
+ return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
+ IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+ .func = bpf_xdp_sk_lookup_udp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+ struct net *caller_net = dev_net(ctx->rxq->dev);
+ int ifindex = ctx->rxq->dev->ifindex;
+
+ return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
+ IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
+ .func = bpf_xdp_sk_lookup_tcp,
+ .gpl_only = false,
+ .pkt_access = true,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
+ IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
+ .func = bpf_sock_addr_sk_lookup_tcp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
+ struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+ return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
+ IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
+ .func = bpf_sock_addr_sk_lookup_udp,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
#endif /* CONFIG_INET */
bool bpf_helper_changes_pkt_data(void *func)
@@ -4986,6 +5267,7 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_xdp_adjust_meta ||
func == bpf_msg_pull_data ||
func == bpf_msg_push_data ||
+ func == bpf_msg_pop_data ||
func == bpf_xdp_adjust_tail ||
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
func == bpf_lwt_seg6_store_bytes ||
@@ -5070,6 +5352,14 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_cookie_sock_addr_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_sock_addr_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_sock_addr_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+#endif /* CONFIG_INET */
default:
return bpf_base_func_proto(func_id);
}
@@ -5214,6 +5504,14 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_xdp_adjust_tail_proto;
case BPF_FUNC_fib_lookup:
return &bpf_xdp_fib_lookup_proto;
+#ifdef CONFIG_INET
+ case BPF_FUNC_sk_lookup_udp:
+ return &bpf_xdp_sk_lookup_udp_proto;
+ case BPF_FUNC_sk_lookup_tcp:
+ return &bpf_xdp_sk_lookup_tcp_proto;
+ case BPF_FUNC_sk_release:
+ return &bpf_sk_release_proto;
+#endif
default:
return bpf_base_func_proto(func_id);
}
@@ -5240,6 +5538,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_socket_cookie_sock_ops_proto;
case BPF_FUNC_get_local_storage:
return &bpf_get_local_storage_proto;
+ case BPF_FUNC_perf_event_output:
+ return &bpf_sockopt_event_output_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5264,6 +5564,8 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_msg_pull_data_proto;
case BPF_FUNC_msg_push_data:
return &bpf_msg_push_data_proto;
+ case BPF_FUNC_msg_pop_data:
+ return &bpf_msg_pop_data_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -5436,8 +5738,12 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
if (size != size_default)
return false;
break;
- case bpf_ctx_range(struct __sk_buff, flow_keys):
- if (size != sizeof(struct bpf_flow_keys *))
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ if (size != sizeof(__u64))
+ return false;
+ break;
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ if (size != sizeof(__u64))
return false;
break;
default:
@@ -5465,8 +5771,10 @@ static bool sk_filter_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range(struct __sk_buff, data_end):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -5490,7 +5798,8 @@ static bool cg_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
case bpf_ctx_range(struct __sk_buff, data):
case bpf_ctx_range(struct __sk_buff, data_end):
@@ -5505,6 +5814,10 @@ static bool cg_skb_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, priority):
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
break;
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ if (!capable(CAP_SYS_ADMIN))
+ return false;
+ break;
default:
return false;
}
@@ -5531,7 +5844,9 @@ static bool lwt_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -5741,6 +6056,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, priority):
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
break;
default:
return false;
@@ -5757,7 +6073,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
return false;
}
@@ -5959,7 +6275,9 @@ static bool sk_skb_is_valid_access(int off, int size,
switch (off) {
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -5995,6 +6313,9 @@ static bool sk_msg_is_valid_access(int off, int size,
if (type == BPF_WRITE)
return false;
+ if (off % size != 0)
+ return false;
+
switch (off) {
case offsetof(struct sk_msg_md, data):
info->reg_type = PTR_TO_PACKET;
@@ -6006,16 +6327,20 @@ static bool sk_msg_is_valid_access(int off, int size,
if (size != sizeof(__u64))
return false;
break;
- default:
+ case bpf_ctx_range(struct sk_msg_md, family):
+ case bpf_ctx_range(struct sk_msg_md, remote_ip4):
+ case bpf_ctx_range(struct sk_msg_md, local_ip4):
+ case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
+ case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
+ case bpf_ctx_range(struct sk_msg_md, remote_port):
+ case bpf_ctx_range(struct sk_msg_md, local_port):
+ case bpf_ctx_range(struct sk_msg_md, size):
if (size != sizeof(__u32))
return false;
- }
-
- if (off < 0 || off >= sizeof(struct sk_msg_md))
- return false;
- if (off % size != 0)
+ break;
+ default:
return false;
-
+ }
return true;
}
@@ -6040,12 +6365,14 @@ static bool flow_dissector_is_valid_access(int off, int size,
case bpf_ctx_range(struct __sk_buff, data_end):
info->reg_type = PTR_TO_PACKET_END;
break;
- case bpf_ctx_range(struct __sk_buff, flow_keys):
+ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
info->reg_type = PTR_TO_FLOW_KEYS;
break;
case bpf_ctx_range(struct __sk_buff, tc_classid):
case bpf_ctx_range(struct __sk_buff, data_meta):
case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+ case bpf_ctx_range(struct __sk_buff, tstamp):
+ case bpf_ctx_range(struct __sk_buff, wire_len):
return false;
}
@@ -6140,19 +6467,19 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
break;
case offsetof(struct __sk_buff, vlan_present):
- case offsetof(struct __sk_buff, vlan_tci):
- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+ *target_size = 1;
+ *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
+ PKT_VLAN_PRESENT_OFFSET());
+ if (PKT_VLAN_PRESENT_BIT)
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
+ if (PKT_VLAN_PRESENT_BIT < 7)
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
+ break;
+ case offsetof(struct __sk_buff, vlan_tci):
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
bpf_target_off(struct sk_buff, vlan_tci, 2,
target_size));
- if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
- *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
- ~VLAN_TAG_PRESENT);
- } else {
- *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
- *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
- }
break;
case offsetof(struct __sk_buff, cb[0]) ...
@@ -6355,6 +6682,33 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
si->src_reg, off);
break;
+
+ case offsetof(struct __sk_buff, tstamp):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
+
+ if (type == BPF_WRITE)
+ *insn++ = BPF_STX_MEM(BPF_DW,
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ tstamp, 8,
+ target_size));
+ else
+ *insn++ = BPF_LDX_MEM(BPF_DW,
+ si->dst_reg, si->src_reg,
+ bpf_target_off(struct sk_buff,
+ tstamp, 8,
+ target_size));
+ break;
+
+ case offsetof(struct __sk_buff, wire_len):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
+
+ off = si->off;
+ off -= offsetof(struct __sk_buff, wire_len);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, pkt_len);
+ *target_size = 4;
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
}
return insn - insn_buf;
@@ -7071,6 +7425,9 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
int off;
#endif
+ /* convert ctx uses the fact sg element is first in struct */
+ BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
+
switch (si->off) {
case offsetof(struct sk_msg_md, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
@@ -7183,6 +7540,12 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
offsetof(struct sock_common, skc_num));
break;
+
+ case offsetof(struct sk_msg_md, size):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_msg_sg, size));
+ break;
}
return insn - insn_buf;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 588f475019d4..9f2840510e63 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -783,6 +783,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
/* Pass parameters to the BPF program */
cb->qdisc_cb.flow_keys = &flow_keys;
flow_keys.nhoff = nhoff;
+ flow_keys.thoff = nhoff;
bpf_compute_data_pointers((struct sk_buff *)skb);
result = BPF_PROG_RUN(attached, skb);
@@ -790,9 +791,12 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
/* Restore state */
memcpy(cb, &cb_saved, sizeof(cb_saved));
+ flow_keys.nhoff = clamp_t(u16, flow_keys.nhoff, 0, skb->len);
+ flow_keys.thoff = clamp_t(u16, flow_keys.thoff,
+ flow_keys.nhoff, skb->len);
+
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
- key_control->thoff = min_t(u16, key_control->thoff, skb->len);
rcu_read_unlock();
return result == BPF_OK;
}
@@ -952,8 +956,7 @@ proto_again:
if (!vlan) {
key_vlan->vlan_id = skb_vlan_tag_get_id(skb);
- key_vlan->vlan_priority =
- (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT);
+ key_vlan->vlan_priority = skb_vlan_tag_get_prio(skb);
} else {
key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) &
VLAN_VID_MASK;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index 4b54e5f107c6..acf45ddbe924 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -84,6 +84,7 @@ void gro_cells_destroy(struct gro_cells *gcells)
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+ napi_disable(&cell->napi);
netif_napi_del(&cell->napi);
__skb_queue_purge(&cell->napi_skbs);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 41954e42a2de..763a7b08df67 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -118,21 +118,77 @@ unsigned long neigh_rand_reach_time(unsigned long base)
}
EXPORT_SYMBOL(neigh_rand_reach_time);
+static void neigh_mark_dead(struct neighbour *n)
+{
+ n->dead = 1;
+ if (!list_empty(&n->gc_list)) {
+ list_del_init(&n->gc_list);
+ atomic_dec(&n->tbl->gc_entries);
+ }
+}
+
+static void neigh_update_gc_list(struct neighbour *n)
+{
+ bool on_gc_list, exempt_from_gc;
+
+ write_lock_bh(&n->tbl->lock);
+ write_lock(&n->lock);
+
+ /* remove from the gc list if new state is permanent or if neighbor
+ * is externally learned; otherwise entry should be on the gc list
+ */
+ exempt_from_gc = n->nud_state & NUD_PERMANENT ||
+ n->flags & NTF_EXT_LEARNED;
+ on_gc_list = !list_empty(&n->gc_list);
+
+ if (exempt_from_gc && on_gc_list) {
+ list_del_init(&n->gc_list);
+ atomic_dec(&n->tbl->gc_entries);
+ } else if (!exempt_from_gc && !on_gc_list) {
+ /* add entries to the tail; cleaning removes from the front */
+ list_add_tail(&n->gc_list, &n->tbl->gc_list);
+ atomic_inc(&n->tbl->gc_entries);
+ }
+
+ write_unlock(&n->lock);
+ write_unlock_bh(&n->tbl->lock);
+}
-static bool neigh_del(struct neighbour *n, __u8 state, __u8 flags,
- struct neighbour __rcu **np, struct neigh_table *tbl)
+static bool neigh_update_ext_learned(struct neighbour *neigh, u32 flags,
+ int *notify)
+{
+ bool rc = false;
+ u8 ndm_flags;
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN))
+ return rc;
+
+ ndm_flags = (flags & NEIGH_UPDATE_F_EXT_LEARNED) ? NTF_EXT_LEARNED : 0;
+ if ((neigh->flags ^ ndm_flags) & NTF_EXT_LEARNED) {
+ if (ndm_flags & NTF_EXT_LEARNED)
+ neigh->flags |= NTF_EXT_LEARNED;
+ else
+ neigh->flags &= ~NTF_EXT_LEARNED;
+ rc = true;
+ *notify = 1;
+ }
+
+ return rc;
+}
+
+static bool neigh_del(struct neighbour *n, struct neighbour __rcu **np,
+ struct neigh_table *tbl)
{
bool retval = false;
write_lock(&n->lock);
- if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state) &&
- !(n->flags & flags)) {
+ if (refcount_read(&n->refcnt) == 1) {
struct neighbour *neigh;
neigh = rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock));
rcu_assign_pointer(*np, neigh);
- n->dead = 1;
+ neigh_mark_dead(n);
retval = true;
}
write_unlock(&n->lock);
@@ -158,7 +214,7 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock)))) {
if (n == ndel)
- return neigh_del(n, 0, 0, np, tbl);
+ return neigh_del(n, np, tbl);
np = &n->next;
}
return false;
@@ -166,32 +222,29 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
+ int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ unsigned long tref = jiffies - 5 * HZ;
+ struct neighbour *n, *tmp;
int shrunk = 0;
- int i;
- struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
write_lock_bh(&tbl->lock);
- nht = rcu_dereference_protected(tbl->nht,
- lockdep_is_held(&tbl->lock));
- for (i = 0; i < (1 << nht->hash_shift); i++) {
- struct neighbour *n;
- struct neighbour __rcu **np;
- np = &nht->hash_buckets[i];
- while ((n = rcu_dereference_protected(*np,
- lockdep_is_held(&tbl->lock))) != NULL) {
- /* Neighbour record may be discarded if:
- * - nobody refers to it.
- * - it is not permanent
- */
- if (neigh_del(n, NUD_PERMANENT, NTF_EXT_LEARNED, np,
- tbl)) {
- shrunk = 1;
- continue;
- }
- np = &n->next;
+ list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
+ if (refcount_read(&n->refcnt) == 1) {
+ bool remove = false;
+
+ write_lock(&n->lock);
+ if ((n->nud_state == NUD_FAILED) ||
+ time_after(tref, n->updated))
+ remove = true;
+ write_unlock(&n->lock);
+
+ if (remove && neigh_remove_one(n, tbl))
+ shrunk++;
+ if (shrunk >= max_clean)
+ break;
}
}
@@ -260,8 +313,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
lockdep_is_held(&tbl->lock)));
write_lock(&n->lock);
neigh_del_timer(n);
- n->dead = 1;
-
+ neigh_mark_dead(n);
if (refcount_read(&n->refcnt) != 1) {
/* The most unpleasant situation.
We must destroy neighbour entry,
@@ -321,13 +373,18 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
}
EXPORT_SYMBOL(neigh_ifdown);
-static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
+static struct neighbour *neigh_alloc(struct neigh_table *tbl,
+ struct net_device *dev,
+ bool exempt_from_gc)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
- entries = atomic_inc_return(&tbl->entries) - 1;
+ if (exempt_from_gc)
+ goto do_alloc;
+
+ entries = atomic_inc_return(&tbl->gc_entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
@@ -340,6 +397,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
}
}
+do_alloc:
n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
if (!n)
goto out_entries;
@@ -358,11 +416,15 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
n->tbl = tbl;
refcount_set(&n->refcnt, 1);
n->dead = 1;
+ INIT_LIST_HEAD(&n->gc_list);
+
+ atomic_inc(&tbl->entries);
out:
return n;
out_entries:
- atomic_dec(&tbl->entries);
+ if (!exempt_from_gc)
+ atomic_dec(&tbl->gc_entries);
goto out;
}
@@ -505,13 +567,15 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
}
EXPORT_SYMBOL(neigh_lookup_nodev);
-struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
- struct net_device *dev, bool want_ref)
+static struct neighbour *___neigh_create(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev,
+ bool exempt_from_gc, bool want_ref)
{
+ struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev, exempt_from_gc);
u32 hash_val;
unsigned int key_len = tbl->key_len;
int error;
- struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
struct neigh_hash_table *nht;
if (!n) {
@@ -574,6 +638,9 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
}
n->dead = 0;
+ if (!exempt_from_gc)
+ list_add_tail(&n->gc_list, &n->tbl->gc_list);
+
if (want_ref)
neigh_hold(n);
rcu_assign_pointer(n->next,
@@ -591,6 +658,12 @@ out_neigh_release:
neigh_release(n);
goto out;
}
+
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref)
+{
+ return ___neigh_create(tbl, pkey, dev, false, want_ref);
+}
EXPORT_SYMBOL(__neigh_create);
static u32 pneigh_hash(const void *pkey, unsigned int key_len)
@@ -652,6 +725,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
if (!n)
goto out;
+ n->protocol = 0;
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
@@ -854,7 +928,7 @@ static void neigh_periodic_work(struct work_struct *work)
(state == NUD_FAILED ||
time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
- n->dead = 1;
+ neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
continue;
@@ -1137,9 +1211,11 @@ static void neigh_update_hhs(struct neighbour *neigh)
Caller MUST hold reference count on the entry.
*/
-int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- u32 flags, u32 nlmsg_pid)
+static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
+ u8 new, u32 flags, u32 nlmsg_pid,
+ struct netlink_ext_ack *extack)
{
+ bool ext_learn_change = false;
u8 old;
int err;
int notify = 0;
@@ -1155,10 +1231,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
- if (neigh->dead)
+ if (neigh->dead) {
+ NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
goto out;
+ }
- neigh_update_ext_learned(neigh, flags, &notify);
+ ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
if (!(new & NUD_VALID)) {
neigh_del_timer(neigh);
@@ -1193,8 +1271,10 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
use it, otherwise discard the request.
*/
err = -EINVAL;
- if (!(old & NUD_VALID))
+ if (!(old & NUD_VALID)) {
+ NL_SET_ERR_MSG(extack, "No link layer address given");
goto out;
+ }
lladdr = neigh->ha;
}
@@ -1302,11 +1382,20 @@ out:
neigh_update_is_router(neigh, flags, &notify);
write_unlock_bh(&neigh->lock);
+ if (((new ^ old) & NUD_PERMANENT) || ext_learn_change)
+ neigh_update_gc_list(neigh);
+
if (notify)
neigh_update_notify(neigh, nlmsg_pid);
return err;
}
+
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ u32 flags, u32 nlmsg_pid)
+{
+ return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
+}
EXPORT_SYMBOL(neigh_update);
/* Update the neigh to listen temporarily for probe responses, even if it is
@@ -1571,6 +1660,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
unsigned long phsize;
INIT_LIST_HEAD(&tbl->parms_list);
+ INIT_LIST_HEAD(&tbl->gc_list);
list_add(&tbl->parms.list, &tbl->parms_list);
write_pnet(&tbl->parms.net, &init_net);
refcount_set(&tbl->parms.refcnt, 1);
@@ -1662,6 +1752,19 @@ static struct neigh_table *neigh_find_table(int family)
return tbl;
}
+const struct nla_policy nda_policy[NDA_MAX+1] = {
+ [NDA_DST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [NDA_LLADDR] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [NDA_CACHEINFO] = { .len = sizeof(struct nda_cacheinfo) },
+ [NDA_PROBES] = { .type = NLA_U32 },
+ [NDA_VLAN] = { .type = NLA_U16 },
+ [NDA_PORT] = { .type = NLA_U16 },
+ [NDA_VNI] = { .type = NLA_U32 },
+ [NDA_IFINDEX] = { .type = NLA_U32 },
+ [NDA_MASTER] = { .type = NLA_U32 },
+ [NDA_PROTOCOL] = { .type = NLA_U8 },
+};
+
static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -1678,8 +1781,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
- if (dst_attr == NULL)
+ if (!dst_attr) {
+ NL_SET_ERR_MSG(extack, "Network address not specified");
goto out;
+ }
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
@@ -1694,8 +1799,10 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tbl == NULL)
return -EAFNOSUPPORT;
- if (nla_len(dst_attr) < (int)tbl->key_len)
+ if (nla_len(dst_attr) < (int)tbl->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address");
goto out;
+ }
if (ndm->ndm_flags & NTF_PROXY) {
err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
@@ -1711,10 +1818,9 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- err = neigh_update(neigh, NULL, NUD_FAILED,
- NEIGH_UPDATE_F_OVERRIDE |
- NEIGH_UPDATE_F_ADMIN,
- NETLINK_CB(skb).portid);
+ err = __neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE | NEIGH_UPDATE_F_ADMIN,
+ NETLINK_CB(skb).portid, extack);
write_lock_bh(&tbl->lock);
neigh_release(neigh);
neigh_remove_one(neigh, tbl);
@@ -1736,16 +1842,19 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device *dev = NULL;
struct neighbour *neigh;
void *dst, *lladdr;
+ u8 protocol = 0;
int err;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL, extack);
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, nda_policy, extack);
if (err < 0)
goto out;
err = -EINVAL;
- if (tb[NDA_DST] == NULL)
+ if (!tb[NDA_DST]) {
+ NL_SET_ERR_MSG(extack, "Network address not specified");
goto out;
+ }
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
@@ -1755,19 +1864,27 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
+ if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len) {
+ NL_SET_ERR_MSG(extack, "Invalid link address");
goto out;
+ }
}
tbl = neigh_find_table(ndm->ndm_family);
if (tbl == NULL)
return -EAFNOSUPPORT;
- if (nla_len(tb[NDA_DST]) < (int)tbl->key_len)
+ if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address");
goto out;
+ }
+
dst = nla_data(tb[NDA_DST]);
lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
+ if (tb[NDA_PROTOCOL])
+ protocol = nla_get_u8(tb[NDA_PROTOCOL]);
+
if (ndm->ndm_flags & NTF_PROXY) {
struct pneigh_entry *pn;
@@ -1775,22 +1892,30 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
pn = pneigh_lookup(tbl, net, dst, dev, 1);
if (pn) {
pn->flags = ndm->ndm_flags;
+ if (protocol)
+ pn->protocol = protocol;
err = 0;
}
goto out;
}
- if (dev == NULL)
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Device not specified");
goto out;
+ }
neigh = neigh_lookup(tbl, dst, dev);
if (neigh == NULL) {
+ bool exempt_from_gc;
+
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
goto out;
}
- neigh = __neigh_lookup_errno(tbl, dst, dev);
+ exempt_from_gc = ndm->ndm_state & NUD_PERMANENT ||
+ ndm->ndm_flags & NTF_EXT_LEARNED;
+ neigh = ___neigh_create(tbl, dst, dev, exempt_from_gc, true);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
goto out;
@@ -1817,8 +1942,12 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh,
neigh_event_send(neigh, NULL);
err = 0;
} else
- err = neigh_update(neigh, lladdr, ndm->ndm_state, flags,
- NETLINK_CB(skb).portid);
+ err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
+ NETLINK_CB(skb).portid, extack);
+
+ if (protocol)
+ neigh->protocol = protocol;
+
neigh_release(neigh);
out:
@@ -2312,6 +2441,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
goto nla_put_failure;
+ if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -2343,6 +2475,9 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
goto nla_put_failure;
+ if (pn->protocol && nla_put_u8(skb, NDA_PROTOCOL, pn->protocol))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -2494,16 +2629,21 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
ndm = nlmsg_data(nlh);
if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_ifindex ||
- ndm->ndm_state || ndm->ndm_flags || ndm->ndm_type) {
+ ndm->ndm_state || ndm->ndm_type) {
NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor dump request");
return -EINVAL;
}
+ if (ndm->ndm_flags & ~NTF_PROXY) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor dump request");
+ return -EINVAL;
+ }
+
err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ nda_policy, extack);
} else {
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
- NULL, extack);
+ nda_policy, extack);
}
if (err < 0)
return err;
@@ -2515,17 +2655,9 @@ static int neigh_valid_dump_req(const struct nlmsghdr *nlh,
/* all new attributes should require strict_check */
switch (i) {
case NDA_IFINDEX:
- if (nla_len(tb[i]) != sizeof(u32)) {
- NL_SET_ERR_MSG(extack, "Invalid IFINDEX attribute in neighbor dump request");
- return -EINVAL;
- }
filter->dev_idx = nla_get_u32(tb[i]);
break;
case NDA_MASTER:
- if (nla_len(tb[i]) != sizeof(u32)) {
- NL_SET_ERR_MSG(extack, "Invalid MASTER attribute in neighbor dump request");
- return -EINVAL;
- }
filter->master_idx = nla_get_u32(tb[i]);
break;
default:
@@ -2585,6 +2717,186 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+static int neigh_valid_get_req(const struct nlmsghdr *nlh,
+ struct neigh_table **tbl,
+ void **dst, int *dev_idx, u8 *ndm_flags,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[NDA_MAX + 1];
+ struct ndmsg *ndm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for neighbor get request");
+ return -EINVAL;
+ }
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
+ ndm->ndm_type) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for neighbor get request");
+ return -EINVAL;
+ }
+
+ if (ndm->ndm_flags & ~NTF_PROXY) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for neighbor get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ nda_policy, extack);
+ if (err < 0)
+ return err;
+
+ *ndm_flags = ndm->ndm_flags;
+ *dev_idx = ndm->ndm_ifindex;
+ *tbl = neigh_find_table(ndm->ndm_family);
+ if (*tbl == NULL) {
+ NL_SET_ERR_MSG(extack, "Unsupported family in header for neighbor get request");
+ return -EAFNOSUPPORT;
+ }
+
+ for (i = 0; i <= NDA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NDA_DST:
+ if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
+ NL_SET_ERR_MSG(extack, "Invalid network address in neighbor get request");
+ return -EINVAL;
+ }
+ *dst = nla_data(tb[i]);
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in neighbor get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline size_t neigh_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(struct nda_cacheinfo))
+ + nla_total_size(4) /* NDA_PROBES */
+ + nla_total_size(1); /* NDA_PROTOCOL */
+}
+
+static int neigh_get_reply(struct net *net, struct neighbour *neigh,
+ u32 pid, u32 seq)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = nlmsg_new(neigh_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
+ if (err) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ err = rtnl_unicast(skb, net, pid);
+errout:
+ return err;
+}
+
+static inline size_t pneigh_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(1); /* NDA_PROTOCOL */
+}
+
+static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
+ u32 pid, u32 seq, struct neigh_table *tbl)
+{
+ struct sk_buff *skb;
+ int err = 0;
+
+ skb = nlmsg_new(pneigh_nlmsg_size(), GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
+ if (err) {
+ kfree_skb(skb);
+ goto errout;
+ }
+
+ err = rtnl_unicast(skb, net, pid);
+errout:
+ return err;
+}
+
+static int neigh_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(in_skb->sk);
+ struct net_device *dev = NULL;
+ struct neigh_table *tbl = NULL;
+ struct neighbour *neigh;
+ void *dst = NULL;
+ u8 ndm_flags = 0;
+ int dev_idx = 0;
+ int err;
+
+ err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
+ extack);
+ if (err < 0)
+ return err;
+
+ if (dev_idx) {
+ dev = __dev_get_by_index(net, dev_idx);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Unknown device ifindex");
+ return -ENODEV;
+ }
+ }
+
+ if (!dst) {
+ NL_SET_ERR_MSG(extack, "Network address not specified");
+ return -EINVAL;
+ }
+
+ if (ndm_flags & NTF_PROXY) {
+ struct pneigh_entry *pn;
+
+ pn = pneigh_lookup(tbl, net, dst, dev, 0);
+ if (!pn) {
+ NL_SET_ERR_MSG(extack, "Proxy neighbour entry not found");
+ return -ENOENT;
+ }
+ return pneigh_get_reply(net, pn, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, tbl);
+ }
+
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "No device specified");
+ return -EINVAL;
+ }
+
+ neigh = neigh_lookup(tbl, dst, dev);
+ if (!neigh) {
+ NL_SET_ERR_MSG(extack, "Neighbour entry not found");
+ return -ENOENT;
+ }
+
+ err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq);
+
+ neigh_release(neigh);
+
+ return err;
+}
+
void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
{
int chain;
@@ -2631,7 +2943,7 @@ void __neigh_for_each_release(struct neigh_table *tbl,
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
- n->dead = 1;
+ neigh_mark_dead(n);
} else
np = &n->next;
write_unlock(&n->lock);
@@ -2992,15 +3304,6 @@ static const struct seq_operations neigh_stat_seq_ops = {
};
#endif /* CONFIG_PROC_FS */
-static inline size_t neigh_nlmsg_size(void)
-{
- return NLMSG_ALIGN(sizeof(struct ndmsg))
- + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
- + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
- + nla_total_size(sizeof(struct nda_cacheinfo))
- + nla_total_size(4); /* NDA_PROBES */
-}
-
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid)
{
@@ -3384,7 +3687,7 @@ static int __init neigh_init(void)
{
rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0);
+ rtnl_register(PF_UNSPEC, RTM_GETNEIGH, neigh_get, neigh_dump_info, 0);
rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
0);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bd67c4d0fcfd..ff9fd2bb4ce4 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -337,7 +337,7 @@ NETDEVICE_SHOW_RW(mtu, fmt_dec);
static int change_flags(struct net_device *dev, unsigned long new_flags)
{
- return dev_change_flags(dev, (unsigned int)new_flags);
+ return dev_change_flags(dev, (unsigned int)new_flags, NULL);
}
static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index fefe72774aeb..b02fb19df2cc 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -669,6 +669,7 @@ static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = {
[NETNSA_NSID] = { .type = NLA_S32 },
[NETNSA_PID] = { .type = NLA_U32 },
[NETNSA_FD] = { .type = NLA_U32 },
+ [NETNSA_TARGET_NSID] = { .type = NLA_S32 },
};
static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -735,23 +736,38 @@ static int rtnl_net_get_size(void)
{
return NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ nla_total_size(sizeof(s32)) /* NETNSA_NSID */
+ + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */
;
}
-static int rtnl_net_fill(struct sk_buff *skb, u32 portid, u32 seq, int flags,
- int cmd, struct net *net, int nsid)
+struct net_fill_args {
+ u32 portid;
+ u32 seq;
+ int flags;
+ int cmd;
+ int nsid;
+ bool add_ref;
+ int ref_nsid;
+};
+
+static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args)
{
struct nlmsghdr *nlh;
struct rtgenmsg *rth;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rth), flags);
+ nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth),
+ args->flags);
if (!nlh)
return -EMSGSIZE;
rth = nlmsg_data(nlh);
rth->rtgen_family = AF_UNSPEC;
- if (nla_put_s32(skb, NETNSA_NSID, nsid))
+ if (nla_put_s32(skb, NETNSA_NSID, args->nsid))
+ goto nla_put_failure;
+
+ if (args->add_ref &&
+ nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid))
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -767,10 +783,15 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[NETNSA_MAX + 1];
+ struct net_fill_args fillargs = {
+ .portid = NETLINK_CB(skb).portid,
+ .seq = nlh->nlmsg_seq,
+ .cmd = RTM_NEWNSID,
+ };
+ struct net *peer, *target = net;
struct nlattr *nla;
struct sk_buff *msg;
- struct net *peer;
- int err, id;
+ int err;
err = nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
rtnl_net_policy, extack);
@@ -782,6 +803,11 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
} else if (tb[NETNSA_FD]) {
peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
nla = tb[NETNSA_FD];
+ } else if (tb[NETNSA_NSID]) {
+ peer = get_net_ns_by_id(net, nla_get_u32(tb[NETNSA_NSID]));
+ if (!peer)
+ peer = ERR_PTR(-ENOENT);
+ nla = tb[NETNSA_NSID];
} else {
NL_SET_ERR_MSG(extack, "Peer netns reference is missing");
return -EINVAL;
@@ -793,15 +819,29 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer);
}
+ if (tb[NETNSA_TARGET_NSID]) {
+ int id = nla_get_s32(tb[NETNSA_TARGET_NSID]);
+
+ target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id);
+ if (IS_ERR(target)) {
+ NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]);
+ NL_SET_ERR_MSG(extack,
+ "Target netns reference is invalid");
+ err = PTR_ERR(target);
+ goto out;
+ }
+ fillargs.add_ref = true;
+ fillargs.ref_nsid = peernet2id(net, peer);
+ }
+
msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
if (!msg) {
err = -ENOMEM;
goto out;
}
- id = peernet2id(net, peer);
- err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
- RTM_NEWNSID, net, id);
+ fillargs.nsid = peernet2id(target, peer);
+ err = rtnl_net_fill(msg, &fillargs);
if (err < 0)
goto err_out;
@@ -811,14 +851,17 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh,
err_out:
nlmsg_free(msg);
out:
+ if (fillargs.add_ref)
+ put_net(target);
put_net(peer);
return err;
}
struct rtnl_net_dump_cb {
- struct net *net;
+ struct net *tgt_net;
+ struct net *ref_net;
struct sk_buff *skb;
- struct netlink_callback *cb;
+ struct net_fill_args fillargs;
int idx;
int s_idx;
};
@@ -831,9 +874,10 @@ static int rtnl_net_dumpid_one(int id, void *peer, void *data)
if (net_cb->idx < net_cb->s_idx)
goto cont;
- ret = rtnl_net_fill(net_cb->skb, NETLINK_CB(net_cb->cb->skb).portid,
- net_cb->cb->nlh->nlmsg_seq, NLM_F_MULTI,
- RTM_NEWNSID, net_cb->net, id);
+ net_cb->fillargs.nsid = id;
+ if (net_cb->fillargs.add_ref)
+ net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
+ ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs);
if (ret < 0)
return ret;
@@ -842,33 +886,96 @@ cont:
return 0;
}
+static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk,
+ struct rtnl_net_dump_cb *net_cb,
+ struct netlink_callback *cb)
+{
+ struct netlink_ext_ack *extack = cb->extack;
+ struct nlattr *tb[NETNSA_MAX + 1];
+ int err, i;
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX,
+ rtnl_net_policy, extack);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i <= NETNSA_MAX; i++) {
+ if (!tb[i])
+ continue;
+
+ if (i == NETNSA_TARGET_NSID) {
+ struct net *net;
+
+ net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i]));
+ if (IS_ERR(net)) {
+ NL_SET_BAD_ATTR(extack, tb[i]);
+ NL_SET_ERR_MSG(extack,
+ "Invalid target network namespace id");
+ return PTR_ERR(net);
+ }
+ net_cb->fillargs.add_ref = true;
+ net_cb->ref_net = net_cb->tgt_net;
+ net_cb->tgt_net = net;
+ } else {
+ NL_SET_BAD_ATTR(extack, tb[i]);
+ NL_SET_ERR_MSG(extack,
+ "Unsupported attribute in dump request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct net *net = sock_net(skb->sk);
struct rtnl_net_dump_cb net_cb = {
- .net = net,
+ .tgt_net = sock_net(skb->sk),
.skb = skb,
- .cb = cb,
+ .fillargs = {
+ .portid = NETLINK_CB(cb->skb).portid,
+ .seq = cb->nlh->nlmsg_seq,
+ .flags = NLM_F_MULTI,
+ .cmd = RTM_NEWNSID,
+ },
.idx = 0,
.s_idx = cb->args[0],
};
+ int err = 0;
- if (cb->strict_check &&
- nlmsg_attrlen(cb->nlh, sizeof(struct rtgenmsg))) {
- NL_SET_ERR_MSG(cb->extack, "Unknown data in network namespace id dump request");
- return -EINVAL;
+ if (cb->strict_check) {
+ err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb);
+ if (err < 0)
+ goto end;
}
- spin_lock_bh(&net->nsid_lock);
- idr_for_each(&net->netns_ids, rtnl_net_dumpid_one, &net_cb);
- spin_unlock_bh(&net->nsid_lock);
+ spin_lock_bh(&net_cb.tgt_net->nsid_lock);
+ if (net_cb.fillargs.add_ref &&
+ !net_eq(net_cb.ref_net, net_cb.tgt_net) &&
+ !spin_trylock_bh(&net_cb.ref_net->nsid_lock)) {
+ spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
+ err = -EAGAIN;
+ goto end;
+ }
+ idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb);
+ if (net_cb.fillargs.add_ref &&
+ !net_eq(net_cb.ref_net, net_cb.tgt_net))
+ spin_unlock_bh(&net_cb.ref_net->nsid_lock);
+ spin_unlock_bh(&net_cb.tgt_net->nsid_lock);
cb->args[0] = net_cb.idx;
- return skb->len;
+end:
+ if (net_cb.fillargs.add_ref)
+ put_net(net_cb.tgt_net);
+ return err < 0 ? err : skb->len;
}
static void rtnl_net_notifyid(struct net *net, int cmd, int id)
{
+ struct net_fill_args fillargs = {
+ .cmd = cmd,
+ .nsid = id,
+ };
struct sk_buff *msg;
int err = -ENOMEM;
@@ -876,7 +983,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id)
if (!msg)
goto out;
- err = rtnl_net_fill(msg, 0, 0, 0, cmd, net, id);
+ err = rtnl_net_fill(msg, &fillargs);
if (err < 0)
goto err_out;
@@ -917,7 +1024,8 @@ static int __init net_ns_init(void)
init_net_initialized = true;
up_write(&pernet_ops_rwsem);
- register_pernet_subsys(&net_ns_ops);
+ if (register_pernet_subsys(&net_ns_ops))
+ panic("Could not register network namespace subsystems");
rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2b9fdbc43205..361aabffb8c0 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -663,7 +663,7 @@ int netpoll_setup(struct netpoll *np)
np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
- err = dev_open(ndev);
+ err = dev_open(ndev, NULL);
if (err) {
np_err(np, "failed to open %s\n", ndev->name);
@@ -801,7 +801,7 @@ void __netpoll_cleanup(struct netpoll *np)
ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL);
- call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
+ call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
} else
RCU_INIT_POINTER(np->dev->npinfo, NULL);
}
@@ -812,7 +812,7 @@ void __netpoll_free(struct netpoll *np)
ASSERT_RTNL();
/* Wait for transmitting packets to finish before freeing. */
- synchronize_rcu_bh();
+ synchronize_rcu();
__netpoll_cleanup(np);
kfree(np);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 33d9227a8b80..48f61885fd6f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -59,7 +59,7 @@
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
-#define RTNL_MAX_TYPE 49
+#define RTNL_MAX_TYPE 50
#define RTNL_SLAVE_MAX_TYPE 36
struct rtnl_link {
@@ -2444,7 +2444,7 @@ static int do_setlink(const struct sk_buff *skb,
sa->sa_family = dev->type;
memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
dev->addr_len);
- err = dev_set_mac_address(dev, sa);
+ err = dev_set_mac_address(dev, sa, extack);
kfree(sa);
if (err)
goto errout;
@@ -2489,7 +2489,8 @@ static int do_setlink(const struct sk_buff *skb,
}
if (ifm->ifi_flags || ifm->ifi_change) {
- err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
+ err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
+ extack);
if (err < 0)
goto errout;
}
@@ -2870,7 +2871,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
old_flags = dev->flags;
if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
- err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
+ err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm),
+ NULL);
if (err < 0)
return err;
}
@@ -2885,9 +2887,11 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
}
EXPORT_SYMBOL(rtnl_configure_link);
-struct net_device *rtnl_create_link(struct net *net,
- const char *ifname, unsigned char name_assign_type,
- const struct rtnl_link_ops *ops, struct nlattr *tb[])
+struct net_device *rtnl_create_link(struct net *net, const char *ifname,
+ unsigned char name_assign_type,
+ const struct rtnl_link_ops *ops,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
{
struct net_device *dev;
unsigned int num_tx_queues = 1;
@@ -2903,11 +2907,15 @@ struct net_device *rtnl_create_link(struct net *net,
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
- if (num_tx_queues < 1 || num_tx_queues > 4096)
+ if (num_tx_queues < 1 || num_tx_queues > 4096) {
+ NL_SET_ERR_MSG(extack, "Invalid number of transmit queues");
return ERR_PTR(-EINVAL);
+ }
- if (num_rx_queues < 1 || num_rx_queues > 4096)
+ if (num_rx_queues < 1 || num_rx_queues > 4096) {
+ NL_SET_ERR_MSG(extack, "Invalid number of receive queues");
return ERR_PTR(-EINVAL);
+ }
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
@@ -2965,20 +2973,24 @@ static int rtnl_group_changelink(const struct sk_buff *skb,
return 0;
}
-static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct netlink_ext_ack *extack)
+static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr **attr, struct netlink_ext_ack *extack)
{
+ struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
+ unsigned char name_assign_type = NET_NAME_USER;
+ struct nlattr *linkinfo[IFLA_INFO_MAX + 1];
+ const struct rtnl_link_ops *m_ops = NULL;
+ struct net_device *master_dev = NULL;
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
- const struct rtnl_link_ops *m_ops = NULL;
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net *dest_net, *link_net;
+ struct nlattr **slave_data;
+ char kind[MODULE_NAME_LEN];
struct net_device *dev;
- struct net_device *master_dev = NULL;
struct ifinfomsg *ifm;
- char kind[MODULE_NAME_LEN];
char ifname[IFNAMSIZ];
- struct nlattr *tb[IFLA_MAX+1];
- struct nlattr *linkinfo[IFLA_INFO_MAX+1];
- unsigned char name_assign_type = NET_NAME_USER;
+ struct nlattr **data;
int err;
#ifdef CONFIG_MODULES
@@ -3034,193 +3046,200 @@ replay:
ops = NULL;
}
- if (1) {
- struct nlattr *attr[RTNL_MAX_TYPE + 1];
- struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1];
- struct nlattr **data = NULL;
- struct nlattr **slave_data = NULL;
- struct net *dest_net, *link_net = NULL;
-
- if (ops) {
- if (ops->maxtype > RTNL_MAX_TYPE)
- return -EINVAL;
+ data = NULL;
+ if (ops) {
+ if (ops->maxtype > RTNL_MAX_TYPE)
+ return -EINVAL;
- if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
- err = nla_parse_nested(attr, ops->maxtype,
- linkinfo[IFLA_INFO_DATA],
- ops->policy, NULL);
- if (err < 0)
- return err;
- data = attr;
- }
- if (ops->validate) {
- err = ops->validate(tb, data, extack);
- if (err < 0)
- return err;
- }
+ if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
+ err = nla_parse_nested(attr, ops->maxtype,
+ linkinfo[IFLA_INFO_DATA],
+ ops->policy, extack);
+ if (err < 0)
+ return err;
+ data = attr;
+ }
+ if (ops->validate) {
+ err = ops->validate(tb, data, extack);
+ if (err < 0)
+ return err;
}
+ }
- if (m_ops) {
- if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
- return -EINVAL;
+ slave_data = NULL;
+ if (m_ops) {
+ if (m_ops->slave_maxtype > RTNL_SLAVE_MAX_TYPE)
+ return -EINVAL;
- if (m_ops->slave_maxtype &&
- linkinfo[IFLA_INFO_SLAVE_DATA]) {
- err = nla_parse_nested(slave_attr,
- m_ops->slave_maxtype,
- linkinfo[IFLA_INFO_SLAVE_DATA],
- m_ops->slave_policy,
- NULL);
- if (err < 0)
- return err;
- slave_data = slave_attr;
- }
+ if (m_ops->slave_maxtype &&
+ linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ err = nla_parse_nested(slave_attr, m_ops->slave_maxtype,
+ linkinfo[IFLA_INFO_SLAVE_DATA],
+ m_ops->slave_policy, extack);
+ if (err < 0)
+ return err;
+ slave_data = slave_attr;
}
+ }
- if (dev) {
- int status = 0;
-
- if (nlh->nlmsg_flags & NLM_F_EXCL)
- return -EEXIST;
- if (nlh->nlmsg_flags & NLM_F_REPLACE)
- return -EOPNOTSUPP;
+ if (dev) {
+ int status = 0;
- if (linkinfo[IFLA_INFO_DATA]) {
- if (!ops || ops != dev->rtnl_link_ops ||
- !ops->changelink)
- return -EOPNOTSUPP;
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
- err = ops->changelink(dev, tb, data, extack);
- if (err < 0)
- return err;
- status |= DO_SETLINK_NOTIFY;
- }
+ if (linkinfo[IFLA_INFO_DATA]) {
+ if (!ops || ops != dev->rtnl_link_ops ||
+ !ops->changelink)
+ return -EOPNOTSUPP;
- if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
- if (!m_ops || !m_ops->slave_changelink)
- return -EOPNOTSUPP;
+ err = ops->changelink(dev, tb, data, extack);
+ if (err < 0)
+ return err;
+ status |= DO_SETLINK_NOTIFY;
+ }
- err = m_ops->slave_changelink(master_dev, dev,
- tb, slave_data,
- extack);
- if (err < 0)
- return err;
- status |= DO_SETLINK_NOTIFY;
- }
+ if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
+ if (!m_ops || !m_ops->slave_changelink)
+ return -EOPNOTSUPP;
- return do_setlink(skb, dev, ifm, extack, tb, ifname,
- status);
+ err = m_ops->slave_changelink(master_dev, dev, tb,
+ slave_data, extack);
+ if (err < 0)
+ return err;
+ status |= DO_SETLINK_NOTIFY;
}
- if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
- if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
- return rtnl_group_changelink(skb, net,
+ return do_setlink(skb, dev, ifm, extack, tb, ifname, status);
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
+ return rtnl_group_changelink(skb, net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, extack, tb);
- return -ENODEV;
- }
+ return -ENODEV;
+ }
- if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
- return -EOPNOTSUPP;
+ if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
+ return -EOPNOTSUPP;
- if (!ops) {
+ if (!ops) {
#ifdef CONFIG_MODULES
- if (kind[0]) {
- __rtnl_unlock();
- request_module("rtnl-link-%s", kind);
- rtnl_lock();
- ops = rtnl_link_ops_get(kind);
- if (ops)
- goto replay;
- }
-#endif
- return -EOPNOTSUPP;
+ if (kind[0]) {
+ __rtnl_unlock();
+ request_module("rtnl-link-%s", kind);
+ rtnl_lock();
+ ops = rtnl_link_ops_get(kind);
+ if (ops)
+ goto replay;
}
+#endif
+ NL_SET_ERR_MSG(extack, "Unknown device type");
+ return -EOPNOTSUPP;
+ }
- if (!ops->setup)
- return -EOPNOTSUPP;
+ if (!ops->setup)
+ return -EOPNOTSUPP;
- if (!ifname[0]) {
- snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
- name_assign_type = NET_NAME_ENUM;
- }
+ if (!ifname[0]) {
+ snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+ name_assign_type = NET_NAME_ENUM;
+ }
- dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
+ dest_net = rtnl_link_get_net_capable(skb, net, tb, CAP_NET_ADMIN);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
- if (tb[IFLA_LINK_NETNSID]) {
- int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
+ if (tb[IFLA_LINK_NETNSID]) {
+ int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
- link_net = get_net_ns_by_id(dest_net, id);
- if (!link_net) {
- err = -EINVAL;
- goto out;
- }
- err = -EPERM;
- if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
- goto out;
- }
-
- dev = rtnl_create_link(link_net ? : dest_net, ifname,
- name_assign_type, ops, tb);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
+ link_net = get_net_ns_by_id(dest_net, id);
+ if (!link_net) {
+ NL_SET_ERR_MSG(extack, "Unknown network namespace id");
+ err = -EINVAL;
goto out;
}
+ err = -EPERM;
+ if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
+ goto out;
+ } else {
+ link_net = NULL;
+ }
- dev->ifindex = ifm->ifi_index;
+ dev = rtnl_create_link(link_net ? : dest_net, ifname,
+ name_assign_type, ops, tb, extack);
+ if (IS_ERR(dev)) {
+ err = PTR_ERR(dev);
+ goto out;
+ }
- if (ops->newlink) {
- err = ops->newlink(link_net ? : net, dev, tb, data,
- extack);
- /* Drivers should call free_netdev() in ->destructor
- * and unregister it on failure after registration
- * so that device could be finally freed in rtnl_unlock.
- */
- if (err < 0) {
- /* If device is not registered at all, free it now */
- if (dev->reg_state == NETREG_UNINITIALIZED)
- free_netdev(dev);
- goto out;
- }
- } else {
- err = register_netdevice(dev);
- if (err < 0) {
+ dev->ifindex = ifm->ifi_index;
+
+ if (ops->newlink) {
+ err = ops->newlink(link_net ? : net, dev, tb, data, extack);
+ /* Drivers should call free_netdev() in ->destructor
+ * and unregister it on failure after registration
+ * so that device could be finally freed in rtnl_unlock.
+ */
+ if (err < 0) {
+ /* If device is not registered at all, free it now */
+ if (dev->reg_state == NETREG_UNINITIALIZED)
free_netdev(dev);
- goto out;
- }
+ goto out;
+ }
+ } else {
+ err = register_netdevice(dev);
+ if (err < 0) {
+ free_netdev(dev);
+ goto out;
}
- err = rtnl_configure_link(dev, ifm);
+ }
+ err = rtnl_configure_link(dev, ifm);
+ if (err < 0)
+ goto out_unregister;
+ if (link_net) {
+ err = dev_change_net_namespace(dev, dest_net, ifname);
if (err < 0)
goto out_unregister;
- if (link_net) {
- err = dev_change_net_namespace(dev, dest_net, ifname);
- if (err < 0)
- goto out_unregister;
- }
- if (tb[IFLA_MASTER]) {
- err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]),
- extack);
- if (err)
- goto out_unregister;
- }
+ }
+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]), extack);
+ if (err)
+ goto out_unregister;
+ }
out:
- if (link_net)
- put_net(link_net);
- put_net(dest_net);
- return err;
+ if (link_net)
+ put_net(link_net);
+ put_net(dest_net);
+ return err;
out_unregister:
- if (ops->newlink) {
- LIST_HEAD(list_kill);
+ if (ops->newlink) {
+ LIST_HEAD(list_kill);
- ops->dellink(dev, &list_kill);
- unregister_netdevice_many(&list_kill);
- } else {
- unregister_netdevice(dev);
- }
- goto out;
+ ops->dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ } else {
+ unregister_netdevice(dev);
}
+ goto out;
+}
+
+static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr **attr;
+ int ret;
+
+ attr = kmalloc_array(RTNL_MAX_TYPE + 1, sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ ret = __rtnl_newlink(skb, nlh, attr, extack);
+ kfree(attr);
+ return ret;
}
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3800,6 +3819,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
{
int err;
+ if (dev->type != ARPHRD_ETHER)
+ return -EINVAL;
+
netif_addr_lock_bh(dev);
err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
if (err)
@@ -3999,6 +4021,160 @@ out:
return skb->len;
}
+static int valid_fdb_get_strict(const struct nlmsghdr *nlh,
+ struct nlattr **tb, u8 *ndm_flags,
+ int *br_idx, int *brport_idx, u8 **addr,
+ u16 *vid, struct netlink_ext_ack *extack)
+{
+ struct ndmsg *ndm;
+ int err, i;
+
+ if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ndm))) {
+ NL_SET_ERR_MSG(extack, "Invalid header for fdb get request");
+ return -EINVAL;
+ }
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_pad1 || ndm->ndm_pad2 || ndm->ndm_state ||
+ ndm->ndm_type) {
+ NL_SET_ERR_MSG(extack, "Invalid values in header for fdb get request");
+ return -EINVAL;
+ }
+
+ if (ndm->ndm_flags & ~(NTF_MASTER | NTF_SELF)) {
+ NL_SET_ERR_MSG(extack, "Invalid flags in header for fdb get request");
+ return -EINVAL;
+ }
+
+ err = nlmsg_parse_strict(nlh, sizeof(struct ndmsg), tb, NDA_MAX,
+ nda_policy, extack);
+ if (err < 0)
+ return err;
+
+ *ndm_flags = ndm->ndm_flags;
+ *brport_idx = ndm->ndm_ifindex;
+ for (i = 0; i <= NDA_MAX; ++i) {
+ if (!tb[i])
+ continue;
+
+ switch (i) {
+ case NDA_MASTER:
+ *br_idx = nla_get_u32(tb[i]);
+ break;
+ case NDA_LLADDR:
+ if (nla_len(tb[i]) != ETH_ALEN) {
+ NL_SET_ERR_MSG(extack, "Invalid address in fdb get request");
+ return -EINVAL;
+ }
+ *addr = nla_data(tb[i]);
+ break;
+ case NDA_VLAN:
+ err = fdb_vid_parse(tb[i], vid, extack);
+ if (err)
+ return err;
+ break;
+ case NDA_VNI:
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported attribute in fdb get request");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rtnl_fdb_get(struct sk_buff *in_skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = NULL, *br_dev = NULL;
+ const struct net_device_ops *ops = NULL;
+ struct net *net = sock_net(in_skb->sk);
+ struct nlattr *tb[NDA_MAX + 1];
+ struct sk_buff *skb;
+ int brport_idx = 0;
+ u8 ndm_flags = 0;
+ int br_idx = 0;
+ u8 *addr = NULL;
+ u16 vid = 0;
+ int err;
+
+ err = valid_fdb_get_strict(nlh, tb, &ndm_flags, &br_idx,
+ &brport_idx, &addr, &vid, extack);
+ if (err < 0)
+ return err;
+
+ if (brport_idx) {
+ dev = __dev_get_by_index(net, brport_idx);
+ if (!dev) {
+ NL_SET_ERR_MSG(extack, "Unknown device ifindex");
+ return -ENODEV;
+ }
+ }
+
+ if (br_idx) {
+ if (dev) {
+ NL_SET_ERR_MSG(extack, "Master and device are mutually exclusive");
+ return -EINVAL;
+ }
+
+ br_dev = __dev_get_by_index(net, br_idx);
+ if (!br_dev) {
+ NL_SET_ERR_MSG(extack, "Invalid master ifindex");
+ return -EINVAL;
+ }
+ ops = br_dev->netdev_ops;
+ }
+
+ if (dev) {
+ if (!ndm_flags || (ndm_flags & NTF_MASTER)) {
+ if (!(dev->priv_flags & IFF_BRIDGE_PORT)) {
+ NL_SET_ERR_MSG(extack, "Device is not a bridge port");
+ return -EINVAL;
+ }
+ br_dev = netdev_master_upper_dev_get(dev);
+ if (!br_dev) {
+ NL_SET_ERR_MSG(extack, "Master of device not found");
+ return -EINVAL;
+ }
+ ops = br_dev->netdev_ops;
+ } else {
+ if (!(ndm_flags & NTF_SELF)) {
+ NL_SET_ERR_MSG(extack, "Missing NTF_SELF");
+ return -EINVAL;
+ }
+ ops = dev->netdev_ops;
+ }
+ }
+
+ if (!br_dev && !dev) {
+ NL_SET_ERR_MSG(extack, "No device specified");
+ return -ENODEV;
+ }
+
+ if (!ops || !ops->ndo_fdb_get) {
+ NL_SET_ERR_MSG(extack, "Fdb get operation not supported by device");
+ return -EOPNOTSUPP;
+ }
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ if (br_dev)
+ dev = br_dev;
+ err = ops->ndo_fdb_get(skb, tb, dev, addr, vid,
+ NETLINK_CB(in_skb).portid,
+ nlh->nlmsg_seq, extack);
+ if (err)
+ goto out;
+
+ return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
+out:
+ kfree_skb(skb);
+ return err;
+}
+
static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
unsigned int attrnum, unsigned int flag)
{
@@ -4310,7 +4486,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
- err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags);
+ err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh, flags,
+ extack);
if (err)
goto out;
@@ -4322,7 +4499,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
err = -EOPNOTSUPP;
else
err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh,
- flags);
+ flags,
+ extack);
if (!err) {
flags &= ~BRIDGE_FLAGS_SELF;
@@ -5057,7 +5235,7 @@ void __init rtnetlink_init(void)
rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0);
rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0);
+ rtnl_register(PF_BRIDGE, RTM_GETNEIGH, rtnl_fdb_get, rtnl_fdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0);
rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a8217e221e19..37317ffec146 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,9 @@
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
+#ifdef CONFIG_SKB_EXTENSIONS
+static struct kmem_cache *skbuff_ext_cache __ro_after_init;
+#endif
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
@@ -606,7 +609,6 @@ fastpath:
void skb_release_head_state(struct sk_buff *skb)
{
skb_dst_drop(skb);
- secpath_reset(skb);
if (skb->destructor) {
WARN_ON(in_irq());
skb->destructor(skb);
@@ -614,9 +616,7 @@ void skb_release_head_state(struct sk_buff *skb)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb_nfct(skb));
#endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- nf_bridge_put(skb->nf_bridge);
-#endif
+ skb_ext_put(skb);
}
/* Free everything but the sk_buff shell. */
@@ -796,9 +796,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->dev = old->dev;
memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old);
-#ifdef CONFIG_XFRM
- new->sp = secpath_get(old->sp);
-#endif
+ __skb_ext_copy(new, old);
__nf_copy(new, old, false);
/* Note : this field could be in headers_start/headers_end section
@@ -1089,7 +1087,7 @@ void sock_zerocopy_put(struct ubuf_info *uarg)
}
EXPORT_SYMBOL_GPL(sock_zerocopy_put);
-void sock_zerocopy_put_abort(struct ubuf_info *uarg)
+void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
{
if (uarg) {
struct sock *sk = skb_from_uarg(uarg)->sk;
@@ -1097,7 +1095,8 @@ void sock_zerocopy_put_abort(struct ubuf_info *uarg)
atomic_dec(&sk->sk_zckey);
uarg->len--;
- sock_zerocopy_put(uarg);
+ if (have_uref)
+ sock_zerocopy_put(uarg);
}
}
EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
@@ -1105,6 +1104,12 @@ EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length);
+int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
+{
+ return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
+}
+EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
+
int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
struct msghdr *msg, int len,
struct ubuf_info *uarg)
@@ -1131,7 +1136,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
return err;
}
- skb_zcopy_set(skb, uarg);
+ skb_zcopy_set(skb, uarg, NULL);
return skb->len - orig_len;
}
EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
@@ -1151,7 +1156,7 @@ static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
if (skb_copy_ubufs(nskb, GFP_ATOMIC))
return -EIO;
}
- skb_zcopy_set(nskb, skb_uarg(orig));
+ skb_zcopy_set(nskb, skb_uarg(orig), NULL);
}
return 0;
}
@@ -1925,8 +1930,6 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
struct sk_buff *insp = NULL;
do {
- BUG_ON(!list);
-
if (list->len <= eat) {
/* Eaten as whole. */
eat -= list->len;
@@ -2366,19 +2369,6 @@ error:
}
EXPORT_SYMBOL_GPL(skb_send_sock_locked);
-/* Send skb data on a socket. */
-int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
-{
- int ret = 0;
-
- lock_sock(sk);
- ret = skb_send_sock_locked(sk, skb, offset, len);
- release_sock(sk);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(skb_send_sock);
-
/**
* skb_store_bits - store bits from kernel buffer to skb
* @skb: destination buffer
@@ -2645,6 +2635,65 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
}
EXPORT_SYMBOL(skb_copy_and_csum_bits);
+__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
+{
+ __sum16 sum;
+
+ sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
+ /* See comments in __skb_checksum_complete(). */
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev, skb);
+ }
+ if (!skb_shared(skb))
+ skb->csum_valid = !sum;
+ return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete_head);
+
+/* This function assumes skb->csum already holds pseudo header's checksum,
+ * which has been changed from the hardware checksum, for example, by
+ * __skb_checksum_validate_complete(). And, the original skb->csum must
+ * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
+ *
+ * It returns non-zero if the recomputed checksum is still invalid, otherwise
+ * zero. The new checksum is stored back into skb->csum unless the skb is
+ * shared.
+ */
+__sum16 __skb_checksum_complete(struct sk_buff *skb)
+{
+ __wsum csum;
+ __sum16 sum;
+
+ csum = skb_checksum(skb, 0, skb->len, 0);
+
+ sum = csum_fold(csum_add(skb->csum, csum));
+ /* This check is inverted, because we already knew the hardware
+ * checksum is invalid before calling this function. So, if the
+ * re-computed checksum is valid instead, then we have a mismatch
+ * between the original skb->csum and skb_checksum(). This means either
+ * the original hardware checksum is incorrect or we screw up skb->csum
+ * when moving skb->data around.
+ */
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev, skb);
+ }
+
+ if (!skb_shared(skb)) {
+ /* Save full packet checksum */
+ skb->csum = csum;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum_complete_sw = 1;
+ skb->csum_valid = !sum;
+ }
+
+ return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete);
+
static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
{
net_warn_ratelimited(
@@ -2962,28 +3011,6 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head
}
EXPORT_SYMBOL(skb_append);
-/**
- * skb_insert - insert a buffer
- * @old: buffer to insert before
- * @newsk: buffer to insert
- * @list: list to use
- *
- * Place a packet before a given packet in a list. The list locks are
- * taken and this function is atomic with respect to other list locked
- * calls.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- __skb_insert(newsk, old->prev, old, list);
- spin_unlock_irqrestore(&list->lock, flags);
-}
-EXPORT_SYMBOL(skb_insert);
-
static inline void skb_split_inside_header(struct sk_buff *skb,
struct sk_buff* skb1,
const u32 len, const int pos)
@@ -3873,6 +3900,46 @@ done:
}
EXPORT_SYMBOL_GPL(skb_gro_receive);
+#ifdef CONFIG_SKB_EXTENSIONS
+#define SKB_EXT_ALIGN_VALUE 8
+#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
+
+static const u8 skb_ext_type_len[] = {
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
+#endif
+#ifdef CONFIG_XFRM
+ [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
+#endif
+};
+
+static __always_inline unsigned int skb_ext_total_length(void)
+{
+ return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
+#endif
+#ifdef CONFIG_XFRM
+ skb_ext_type_len[SKB_EXT_SEC_PATH] +
+#endif
+ 0;
+}
+
+static void skb_extensions_init(void)
+{
+ BUILD_BUG_ON(SKB_EXT_NUM >= 8);
+ BUILD_BUG_ON(skb_ext_total_length() > 255);
+
+ skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
+ SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
+ 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
+}
+#else
+static void skb_extensions_init(void) {}
+#endif
+
void __init skb_init(void)
{
skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
@@ -3887,6 +3954,7 @@ void __init skb_init(void)
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
+ skb_extensions_init();
}
static int
@@ -4856,7 +4924,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
#ifdef CONFIG_NET_SWITCHDEV
skb->offload_fwd_mark = 0;
- skb->offload_mr_fwd_mark = 0;
+ skb->offload_l3_fwd_mark = 0;
#endif
if (!xnet)
@@ -5128,7 +5196,7 @@ int skb_vlan_pop(struct sk_buff *skb)
int err;
if (likely(skb_vlan_tag_present(skb))) {
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
} else {
if (unlikely(!eth_type_vlan(skb->protocol)))
return 0;
@@ -5525,3 +5593,148 @@ void skb_condense(struct sk_buff *skb)
*/
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
}
+
+#ifdef CONFIG_SKB_EXTENSIONS
+static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
+{
+ return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
+}
+
+static struct skb_ext *skb_ext_alloc(void)
+{
+ struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
+
+ if (new) {
+ memset(new->offset, 0, sizeof(new->offset));
+ refcount_set(&new->refcnt, 1);
+ }
+
+ return new;
+}
+
+static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
+ unsigned int old_active)
+{
+ struct skb_ext *new;
+
+ if (refcount_read(&old->refcnt) == 1)
+ return old;
+
+ new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
+ if (!new)
+ return NULL;
+
+ memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
+ refcount_set(&new->refcnt, 1);
+
+#ifdef CONFIG_XFRM
+ if (old_active & (1 << SKB_EXT_SEC_PATH)) {
+ struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
+ unsigned int i;
+
+ for (i = 0; i < sp->len; i++)
+ xfrm_state_hold(sp->xvec[i]);
+ }
+#endif
+ __skb_ext_put(old);
+ return new;
+}
+
+/**
+ * skb_ext_add - allocate space for given extension, COW if needed
+ * @skb: buffer
+ * @id: extension to allocate space for
+ *
+ * Allocates enough space for the given extension.
+ * If the extension is already present, a pointer to that extension
+ * is returned.
+ *
+ * If the skb was cloned, COW applies and the returned memory can be
+ * modified without changing the extension space of clones buffers.
+ *
+ * Returns pointer to the extension or NULL on allocation failure.
+ */
+void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
+{
+ struct skb_ext *new, *old = NULL;
+ unsigned int newlen, newoff;
+
+ if (skb->active_extensions) {
+ old = skb->extensions;
+
+ new = skb_ext_maybe_cow(old, skb->active_extensions);
+ if (!new)
+ return NULL;
+
+ if (__skb_ext_exist(new, id))
+ goto set_active;
+
+ newoff = new->chunks;
+ } else {
+ newoff = SKB_EXT_CHUNKSIZEOF(*new);
+
+ new = skb_ext_alloc();
+ if (!new)
+ return NULL;
+ }
+
+ newlen = newoff + skb_ext_type_len[id];
+ new->chunks = newlen;
+ new->offset[id] = newoff;
+set_active:
+ skb->extensions = new;
+ skb->active_extensions |= 1 << id;
+ return skb_ext_get_ptr(new, id);
+}
+EXPORT_SYMBOL(skb_ext_add);
+
+#ifdef CONFIG_XFRM
+static void skb_ext_put_sp(struct sec_path *sp)
+{
+ unsigned int i;
+
+ for (i = 0; i < sp->len; i++)
+ xfrm_state_put(sp->xvec[i]);
+}
+#endif
+
+void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
+{
+ struct skb_ext *ext = skb->extensions;
+
+ skb->active_extensions &= ~(1 << id);
+ if (skb->active_extensions == 0) {
+ skb->extensions = NULL;
+ __skb_ext_put(ext);
+#ifdef CONFIG_XFRM
+ } else if (id == SKB_EXT_SEC_PATH &&
+ refcount_read(&ext->refcnt) == 1) {
+ struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
+
+ skb_ext_put_sp(sp);
+ sp->len = 0;
+#endif
+ }
+}
+EXPORT_SYMBOL(__skb_ext_del);
+
+void __skb_ext_put(struct skb_ext *ext)
+{
+ /* If this is last clone, nothing can increment
+ * it after check passes. Avoids one atomic op.
+ */
+ if (refcount_read(&ext->refcnt) == 1)
+ goto free_now;
+
+ if (!refcount_dec_and_test(&ext->refcnt))
+ return;
+free_now:
+#ifdef CONFIG_XFRM
+ if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
+ skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
+#endif
+
+ kmem_cache_free(skbuff_ext_cache, ext);
+}
+EXPORT_SYMBOL(__skb_ext_put);
+#endif /* CONFIG_SKB_EXTENSIONS */
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 56a99d0c9aa0..d6d5c20d7044 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -94,6 +94,9 @@ int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
}
while (len) {
+ if (sk_msg_full(dst))
+ return -ENOSPC;
+
sge_len = sge->length - off;
sge_off = sge->offset + off;
if (sge_len > len)
@@ -403,7 +406,7 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
msg->skb = skb;
sk_psock_queue_msg(psock, msg);
- sk->sk_data_ready(sk);
+ sk_psock_data_ready(sk, psock);
return copied;
}
@@ -572,6 +575,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
{
rcu_assign_sk_user_data(sk, NULL);
sk_psock_cork_free(psock);
+ sk_psock_zap_ingress(psock);
sk_psock_restore_proto(sk, psock);
write_lock_bh(&sk->sk_callback_lock);
@@ -580,7 +584,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
write_unlock_bh(&sk->sk_callback_lock);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
- call_rcu_sched(&psock->rcu, sk_psock_destroy);
+ call_rcu(&psock->rcu, sk_psock_destroy);
}
EXPORT_SYMBOL_GPL(sk_psock_drop);
@@ -669,6 +673,22 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
bool ingress;
switch (verdict) {
+ case __SK_PASS:
+ sk_other = psock->sk;
+ if (sock_flag(sk_other, SOCK_DEAD) ||
+ !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
+ goto out_free;
+ }
+ if (atomic_read(&sk_other->sk_rmem_alloc) <=
+ sk_other->sk_rcvbuf) {
+ struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
+
+ tcp->bpf.flags |= BPF_F_INGRESS;
+ skb_queue_tail(&psock->ingress_skb, skb);
+ schedule_work(&psock->work);
+ break;
+ }
+ goto out_free;
case __SK_REDIRECT:
sk_other = tcp_skb_bpf_redirect_fetch(skb);
if (unlikely(!sk_other))
@@ -735,7 +755,7 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
}
/* Called with socket lock held. */
-static void sk_psock_data_ready(struct sock *sk)
+static void sk_psock_strp_data_ready(struct sock *sk)
{
struct sk_psock *psock;
@@ -783,7 +803,7 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
return;
parser->saved_data_ready = sk->sk_data_ready;
- sk->sk_data_ready = sk_psock_data_ready;
+ sk->sk_data_ready = sk_psock_strp_data_ready;
sk->sk_write_space = sk_psock_write_space;
parser->enabled = true;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index 080a880a1761..f00902c532cc 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -567,6 +567,8 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
lock_sock(sk);
sk->sk_bound_dev_if = index;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
sk_dst_reset(sk);
release_sock(sk);
@@ -698,6 +700,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+ sk_dst_reset(sk);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -950,10 +953,12 @@ set_rcvbuf:
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
- if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
- else
+ } else if (val != sk->sk_mark) {
sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
case SO_RXQ_OVFL:
@@ -1014,7 +1019,10 @@ set_rcvbuf:
case SO_ZEROCOPY:
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
- if (sk->sk_protocol != IPPROTO_TCP)
+ if (!((sk->sk_type == SOCK_STREAM &&
+ sk->sk_protocol == IPPROTO_TCP) ||
+ (sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP)))
ret = -ENOTSUPP;
} else if (sk->sk_family != PF_RDS) {
ret = -ENOTSUPP;
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index ba5cba56f574..d8fe3e549373 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -187,6 +187,7 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
call_rcu(&old_reuse->rcu, reuseport_free_rcu);
return 0;
}
+EXPORT_SYMBOL(reuseport_add_sock);
void reuseport_detach_sock(struct sock *sk)
{
diff --git a/net/core/stream.c b/net/core/stream.c
index 7d329fb1f553..e94bb02a5629 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -32,7 +32,7 @@ void sk_stream_write_space(struct sock *sk)
struct socket *sock = sk->sk_socket;
struct socket_wq *wq;
- if (sk_stream_is_writeable(sk) && sock) {
+ if (__sk_stream_is_writeable(sk, 1) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 37b4667128a3..d67ec17f2cc8 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -28,6 +28,8 @@ static int two __maybe_unused = 2;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
+static long long_one __maybe_unused = 1;
+static long long_max __maybe_unused = LONG_MAX;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -289,6 +291,17 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
+
+static int
+proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
+}
#endif
static struct ctl_table net_core_table[] = {
@@ -398,10 +411,11 @@ static struct ctl_table net_core_table[] = {
{
.procname = "bpf_jit_limit",
.data = &bpf_jit_limit,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(long),
.mode = 0600,
- .proc_handler = proc_dointvec_minmax_bpf_restricted,
- .extra1 = &one,
+ .proc_handler = proc_dolongvec_minmax_bpf_restricted,
+ .extra1 = &long_one,
+ .extra2 = &long_max,
},
#endif
{
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 8e08cea6f178..26a21d97b6b0 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -231,7 +231,7 @@ EXPORT_SYMBOL(dccp_req_err);
* check at all. A more general error queue to queue errors for later handling
* is probably better.
*/
-static void dccp_v4_err(struct sk_buff *skb, u32 info)
+static int dccp_v4_err(struct sk_buff *skb, u32 info)
{
const struct iphdr *iph = (struct iphdr *)skb->data;
const u8 offset = iph->ihl << 2;
@@ -259,16 +259,18 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
inet_iif(skb), 0);
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
- return;
+ return -ENOENT;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
- return;
+ return 0;
}
seq = dccp_hdr_seq(dh);
- if (sk->sk_state == DCCP_NEW_SYN_RECV)
- return dccp_req_err(sk, seq);
+ if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+ dccp_req_err(sk, seq);
+ return 0;
+ }
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
@@ -357,6 +359,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
out:
bh_unlock_sock(sk);
sock_put(sk);
+ return 0;
}
static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6344f1b18a6a..d5740bad5b18 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -68,7 +68,7 @@ static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
}
-static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
@@ -96,16 +96,18 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sk) {
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
- return;
+ return -ENOENT;
}
if (sk->sk_state == DCCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
- return;
+ return 0;
}
seq = dccp_hdr_seq(dh);
- if (sk->sk_state == DCCP_NEW_SYN_RECV)
- return dccp_req_err(sk, seq);
+ if (sk->sk_state == DCCP_NEW_SYN_RECV) {
+ dccp_req_err(sk, seq);
+ return 0;
+ }
bh_lock_sock(sk);
if (sock_owned_by_user(sk))
@@ -183,6 +185,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
out:
bh_unlock_sock(sk);
sock_put(sk);
+ return 0;
}
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 43733accf58e..2cc5fbb1b29e 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -948,6 +948,7 @@ int inet_dccp_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
goto out;
+ sk->sk_max_ack_backlog = backlog;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
@@ -960,7 +961,6 @@ int inet_dccp_listen(struct socket *sock, int backlog)
if (err)
goto out;
}
- sk->sk_max_ack_backlog = backlog;
err = 0;
out:
@@ -1139,8 +1139,11 @@ static int __init dccp_init(void)
rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL);
if (rc)
goto out_fail;
- rc = -ENOBUFS;
inet_hashinfo_init(&dccp_hashinfo);
+ rc = inet_hashinfo2_init_mod(&dccp_hashinfo);
+ if (rc)
+ goto out_fail;
+ rc = -ENOBUFS;
dccp_hashinfo.bind_bucket_cachep =
kmem_cache_create("dccp_bind_bucket",
sizeof(struct inet_bind_bucket), 0,
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 7d6ff983ba2c..bdccc46a2921 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -192,7 +192,7 @@ static int check_port(__le16 port)
static unsigned short port_alloc(struct sock *sk)
{
struct dn_scp *scp = DN_SK(sk);
-static unsigned short port = 0x2000;
+ static unsigned short port = 0x2000;
unsigned short i_port = port;
while(check_port(cpu_to_le16(++port)) != 0) {
@@ -2405,7 +2405,7 @@ static void __exit decnet_exit(void)
proto_unregister(&dn_proto);
- rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */
+ rcu_barrier(); /* Wait for completion of call_rcu()'s */
}
module_exit(decnet_exit);
#endif
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 48c41918fb35..91e52973ee13 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -44,6 +44,10 @@ config NET_DSA_TAG_GSWIP
config NET_DSA_TAG_KSZ
bool
+config NET_DSA_TAG_KSZ9477
+ bool
+ select NET_DSA_TAG_KSZ
+
config NET_DSA_TAG_LAN9303
bool
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index a69c1790bbfc..aee909bcddc4 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -55,8 +55,8 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
#ifdef CONFIG_NET_DSA_TAG_GSWIP
[DSA_TAG_PROTO_GSWIP] = &gswip_netdev_ops,
#endif
-#ifdef CONFIG_NET_DSA_TAG_KSZ
- [DSA_TAG_PROTO_KSZ] = &ksz_netdev_ops,
+#ifdef CONFIG_NET_DSA_TAG_KSZ9477
+ [DSA_TAG_PROTO_KSZ9477] = &ksz9477_netdev_ops,
#endif
#ifdef CONFIG_NET_DSA_TAG_LAN9303
[DSA_TAG_PROTO_LAN9303] = &lan9303_netdev_ops,
@@ -91,8 +91,8 @@ const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
#ifdef CONFIG_NET_DSA_TAG_GSWIP
[DSA_TAG_PROTO_GSWIP] = "gswip",
#endif
-#ifdef CONFIG_NET_DSA_TAG_KSZ
- [DSA_TAG_PROTO_KSZ] = "ksz",
+#ifdef CONFIG_NET_DSA_TAG_KSZ9477
+ [DSA_TAG_PROTO_KSZ9477] = "ksz9477",
#endif
#ifdef CONFIG_NET_DSA_TAG_LAN9303
[DSA_TAG_PROTO_LAN9303] = "lan9303",
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 9e4fd04ab53c..026a05774bf7 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -210,7 +210,7 @@ extern const struct dsa_device_ops edsa_netdev_ops;
extern const struct dsa_device_ops gswip_netdev_ops;
/* tag_ksz.c */
-extern const struct dsa_device_ops ksz_netdev_ops;
+extern const struct dsa_device_ops ksz9477_netdev_ops;
/* tag_lan9303.c */
extern const struct dsa_device_ops lan9303_netdev_ops;
diff --git a/net/dsa/master.c b/net/dsa/master.c
index c90ee3227dea..71bb15f491c8 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -158,8 +158,59 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
cpu_dp->orig_ethtool_ops = NULL;
}
+static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *dev = to_net_dev(d);
+ struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+ return sprintf(buf, "%s\n",
+ dsa_tag_protocol_to_str(cpu_dp->tag_ops));
+}
+static DEVICE_ATTR_RO(tagging);
+
+static struct attribute *dsa_slave_attrs[] = {
+ &dev_attr_tagging.attr,
+ NULL
+};
+
+static const struct attribute_group dsa_group = {
+ .name = "dsa",
+ .attrs = dsa_slave_attrs,
+};
+
+static void dsa_master_set_mtu(struct net_device *dev, struct dsa_port *cpu_dp)
+{
+ unsigned int mtu = ETH_DATA_LEN + cpu_dp->tag_ops->overhead;
+ int err;
+
+ rtnl_lock();
+ if (mtu <= dev->max_mtu) {
+ err = dev_set_mtu(dev, mtu);
+ if (err)
+ netdev_dbg(dev, "Unable to set MTU to include for DSA overheads\n");
+ }
+ rtnl_unlock();
+}
+
+static void dsa_master_reset_mtu(struct net_device *dev)
+{
+ int err;
+
+ rtnl_lock();
+ err = dev_set_mtu(dev, ETH_DATA_LEN);
+ if (err)
+ netdev_dbg(dev,
+ "Unable to reset MTU to exclude DSA overheads\n");
+ rtnl_unlock();
+}
+
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
+ int ret;
+
+ dsa_master_set_mtu(dev, cpu_dp);
+
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
* sent to the tag format's receive function.
@@ -168,12 +219,22 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
dev->dsa_ptr = cpu_dp;
- return dsa_master_ethtool_setup(dev);
+ ret = dsa_master_ethtool_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
+ if (ret)
+ dsa_master_ethtool_teardown(dev);
+
+ return ret;
}
void dsa_master_teardown(struct net_device *dev)
{
+ sysfs_remove_group(&dev->dev.kobj, &dsa_group);
dsa_master_ethtool_teardown(dev);
+ dsa_master_reset_mtu(dev);
dev->dsa_ptr = NULL;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index ed0595459df1..2d7e01b23572 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -252,9 +252,6 @@ int dsa_port_vlan_add(struct dsa_port *dp,
.vlan = vlan,
};
- if (netif_is_bridge_master(vlan->obj.orig_dev))
- return -EOPNOTSUPP;
-
if (br_vlan_enabled(dp->bridge_dev))
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_ADD, &info);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 7d0c19e7edcf..a3fcc1d01615 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1050,35 +1050,12 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
static const struct switchdev_ops dsa_slave_switchdev_ops = {
.switchdev_port_attr_get = dsa_slave_port_attr_get,
.switchdev_port_attr_set = dsa_slave_port_attr_set,
- .switchdev_port_obj_add = dsa_slave_port_obj_add,
- .switchdev_port_obj_del = dsa_slave_port_obj_del,
};
static struct device_type dsa_type = {
.name = "dsa",
};
-static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *dev = to_net_dev(d);
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- return sprintf(buf, "%s\n",
- dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops));
-}
-static DEVICE_ATTR_RO(tagging);
-
-static struct attribute *dsa_slave_attrs[] = {
- &dev_attr_tagging.attr,
- NULL
-};
-
-static const struct attribute_group dsa_group = {
- .name = "dsa",
- .attrs = dsa_slave_attrs,
-};
-
static void dsa_slave_phylink_validate(struct net_device *dev,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1374,14 +1351,8 @@ int dsa_slave_create(struct dsa_port *port)
goto out_phy;
}
- ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group);
- if (ret)
- goto out_unreg;
-
return 0;
-out_unreg:
- unregister_netdev(slave_dev);
out_phy:
rtnl_lock();
phylink_disconnect_phy(p->dp->pl);
@@ -1405,7 +1376,6 @@ void dsa_slave_destroy(struct net_device *slave_dev)
rtnl_unlock();
dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
- sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group);
unregister_netdev(slave_dev);
phylink_destroy(dp->pl);
free_percpu(p->stats64);
@@ -1557,6 +1527,44 @@ err_fdb_work_init:
return NOTIFY_BAD;
}
+static int
+dsa_slave_switchdev_port_obj_event(unsigned long event,
+ struct net_device *netdev,
+ struct switchdev_notifier_port_obj_info *port_obj_info)
+{
+ int err = -EOPNOTSUPP;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = dsa_slave_port_obj_add(netdev, port_obj_info->obj,
+ port_obj_info->trans);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = dsa_slave_port_obj_del(netdev, port_obj_info->obj);
+ break;
+ }
+
+ port_obj_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD: /* fall through */
+ case SWITCHDEV_PORT_OBJ_DEL:
+ return dsa_slave_switchdev_port_obj_event(event, dev, ptr);
+ }
+
+ return NOTIFY_DONE;
+}
+
static struct notifier_block dsa_slave_nb __read_mostly = {
.notifier_call = dsa_slave_netdevice_event,
};
@@ -1565,8 +1573,13 @@ static struct notifier_block dsa_slave_switchdev_notifier = {
.notifier_call = dsa_slave_switchdev_event,
};
+static struct notifier_block dsa_slave_switchdev_blocking_notifier = {
+ .notifier_call = dsa_slave_switchdev_blocking_event,
+};
+
int dsa_slave_register_notifier(void)
{
+ struct notifier_block *nb;
int err;
err = register_netdevice_notifier(&dsa_slave_nb);
@@ -1577,8 +1590,15 @@ int dsa_slave_register_notifier(void)
if (err)
goto err_switchdev_nb;
+ nb = &dsa_slave_switchdev_blocking_notifier;
+ err = register_switchdev_blocking_notifier(nb);
+ if (err)
+ goto err_switchdev_blocking_nb;
+
return 0;
+err_switchdev_blocking_nb:
+ unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
err_switchdev_nb:
unregister_netdevice_notifier(&dsa_slave_nb);
return err;
@@ -1586,8 +1606,14 @@ err_switchdev_nb:
void dsa_slave_unregister_notifier(void)
{
+ struct notifier_block *nb;
int err;
+ nb = &dsa_slave_switchdev_blocking_notifier;
+ err = unregister_switchdev_blocking_notifier(nb);
+ if (err)
+ pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err);
+
err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier);
if (err)
pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 2b06bb91318b..4aa1d368a5ae 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -174,6 +174,7 @@ static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
const struct dsa_device_ops brcm_netdev_ops = {
.xmit = brcm_tag_xmit,
.rcv = brcm_tag_rcv,
+ .overhead = BRCM_TAG_LEN,
};
#endif
@@ -196,5 +197,6 @@ static struct sk_buff *brcm_tag_rcv_prepend(struct sk_buff *skb,
const struct dsa_device_ops brcm_prepend_netdev_ops = {
.xmit = brcm_tag_xmit_prepend,
.rcv = brcm_tag_rcv_prepend,
+ .overhead = BRCM_TAG_LEN,
};
#endif
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index cd13cfc542ce..8b2f92e3f3a2 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -149,4 +149,5 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
const struct dsa_device_ops dsa_netdev_ops = {
.xmit = dsa_xmit,
.rcv = dsa_rcv,
+ .overhead = DSA_HLEN,
};
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 4083326b806e..f5b87ee5c94e 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -168,4 +168,5 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
const struct dsa_device_ops edsa_netdev_ops = {
.xmit = edsa_xmit,
.rcv = edsa_rcv,
+ .overhead = EDSA_HLEN,
};
diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c
index 49e9b73f1be3..cb6f82ffe5eb 100644
--- a/net/dsa/tag_gswip.c
+++ b/net/dsa/tag_gswip.c
@@ -106,4 +106,5 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb,
const struct dsa_device_ops gswip_netdev_ops = {
.xmit = gswip_tag_xmit,
.rcv = gswip_tag_rcv,
+ .overhead = GSWIP_RX_HEADER_LEN,
};
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index 0f62effad88f..da71b9e2af52 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -14,34 +14,18 @@
#include <net/dsa.h>
#include "dsa_priv.h"
-/* For Ingress (Host -> KSZ), 2 bytes are added before FCS.
- * ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
- * ---------------------------------------------------------------------------
- * tag0 : Prioritization (not used now)
- * tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
- *
- * For Egress (KSZ -> Host), 1 byte is added before FCS.
- * ---------------------------------------------------------------------------
- * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
- * ---------------------------------------------------------------------------
- * tag0 : zero-based value represents port
- * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
- */
-
-#define KSZ_INGRESS_TAG_LEN 2
-#define KSZ_EGRESS_TAG_LEN 1
+/* Typically only one byte is used for tail tag. */
+#define KSZ_EGRESS_TAG_LEN 1
-static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
+static struct sk_buff *ksz_common_xmit(struct sk_buff *skb,
+ struct net_device *dev, int len)
{
- struct dsa_port *dp = dsa_slave_to_port(dev);
struct sk_buff *nskb;
int padlen;
- u8 *tag;
padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
- if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
+ if (skb_tailroom(skb) >= padlen + len) {
/* Let dsa_slave_xmit() free skb */
if (__skb_put_padto(skb, skb->len + padlen, false))
return NULL;
@@ -49,7 +33,7 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
nskb = skb;
} else {
nskb = alloc_skb(NET_IP_ALIGN + skb->len +
- padlen + KSZ_INGRESS_TAG_LEN, GFP_ATOMIC);
+ padlen + len, GFP_ATOMIC);
if (!nskb)
return NULL;
skb_reserve(nskb, NET_IP_ALIGN);
@@ -70,33 +54,88 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
consume_skb(skb);
}
- tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
- tag[0] = 0;
- tag[1] = 1 << dp->index; /* destination port */
-
return nskb;
}
-static struct sk_buff *ksz_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt)
+static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned int port, unsigned int len)
{
- u8 *tag;
- int source_port;
+ skb->dev = dsa_master_find_slave(dev, 0, port);
+ if (!skb->dev)
+ return NULL;
- tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ pskb_trim_rcsum(skb, skb->len - len);
- source_port = tag[0] & 7;
+ return skb;
+}
- skb->dev = dsa_master_find_slave(dev, 0, source_port);
- if (!skb->dev)
+/*
+ * For Ingress (Host -> KSZ9477), 2 bytes are added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|tag1(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : Prioritization (not used now)
+ * tag1 : each bit represents port (eg, 0x01=port1, 0x02=port2, 0x10=port5)
+ *
+ * For Egress (KSZ9477 -> Host), 1 byte is added before FCS.
+ * ---------------------------------------------------------------------------
+ * DA(6bytes)|SA(6bytes)|....|Data(nbytes)|tag0(1byte)|FCS(4bytes)
+ * ---------------------------------------------------------------------------
+ * tag0 : zero-based value represents port
+ * (eg, 0x00=port1, 0x02=port3, 0x06=port7)
+ */
+
+#define KSZ9477_INGRESS_TAG_LEN 2
+#define KSZ9477_PTP_TAG_LEN 4
+#define KSZ9477_PTP_TAG_INDICATION 0x80
+
+#define KSZ9477_TAIL_TAG_OVERRIDE BIT(9)
+#define KSZ9477_TAIL_TAG_LOOKUP BIT(10)
+
+static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct sk_buff *nskb;
+ u16 *tag;
+ u8 *addr;
+
+ nskb = ksz_common_xmit(skb, dev, KSZ9477_INGRESS_TAG_LEN);
+ if (!nskb)
return NULL;
- pskb_trim_rcsum(skb, skb->len - KSZ_EGRESS_TAG_LEN);
+ /* Tag encoding */
+ tag = skb_put(nskb, KSZ9477_INGRESS_TAG_LEN);
+ addr = skb_mac_header(nskb);
- return skb;
+ *tag = BIT(dp->index);
+
+ if (is_link_local_ether_addr(addr))
+ *tag |= KSZ9477_TAIL_TAG_OVERRIDE;
+
+ *tag = cpu_to_be16(*tag);
+
+ return nskb;
+}
+
+static struct sk_buff *ksz9477_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt)
+{
+ /* Tag decoding */
+ u8 *tag = skb_tail_pointer(skb) - KSZ_EGRESS_TAG_LEN;
+ unsigned int port = tag[0] & 7;
+ unsigned int len = KSZ_EGRESS_TAG_LEN;
+
+ /* Extra 4-bytes PTP timestamp */
+ if (tag[0] & KSZ9477_PTP_TAG_INDICATION)
+ len += KSZ9477_PTP_TAG_LEN;
+
+ return ksz_common_rcv(skb, dev, port, len);
}
-const struct dsa_device_ops ksz_netdev_ops = {
- .xmit = ksz_xmit,
- .rcv = ksz_rcv,
+const struct dsa_device_ops ksz9477_netdev_ops = {
+ .xmit = ksz9477_xmit,
+ .rcv = ksz9477_rcv,
+ .overhead = KSZ9477_INGRESS_TAG_LEN,
};
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index 548c00254c07..f48889e46ff7 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -140,4 +140,5 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
const struct dsa_device_ops lan9303_netdev_ops = {
.xmit = lan9303_xmit,
.rcv = lan9303_rcv,
+ .overhead = LAN9303_TAG_LEN,
};
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index 11535bc70743..f39f4dfeda34 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -109,4 +109,5 @@ const struct dsa_device_ops mtk_netdev_ops = {
.xmit = mtk_tag_xmit,
.rcv = mtk_tag_rcv,
.flow_dissect = mtk_tag_flow_dissect,
+ .overhead = MTK_HDR_LEN,
};
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 613f4ee97771..ed4f6dc26365 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -101,4 +101,5 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
const struct dsa_device_ops qca_netdev_ops = {
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
+ .overhead = QCA_HDR_LEN,
};
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 56197f0d9608..b40756ed6e57 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -84,4 +84,5 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
const struct dsa_device_ops trailer_netdev_ops = {
.xmit = trailer_xmit,
.rcv = trailer_rcv,
+ .overhead = 4,
};
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index fd8faa0dfa61..4c520110b04f 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -47,6 +47,7 @@
#include <linux/inet.h>
#include <linux/ip.h>
#include <linux/netdevice.h>
+#include <linux/nvmem-consumer.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
@@ -165,15 +166,17 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
- if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
- if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
- skb->pkt_type = PACKET_BROADCAST;
- else
- skb->pkt_type = PACKET_MULTICAST;
+ if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
+ dev->dev_addr))) {
+ if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+ if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ } else {
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
}
- else if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
- dev->dev_addr)))
- skb->pkt_type = PACKET_OTHERHOST;
/*
* Some variants of DSA tagging don't have an ethertype field
@@ -548,3 +551,40 @@ int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr)
return 0;
}
EXPORT_SYMBOL(eth_platform_get_mac_address);
+
+/**
+ * Obtain the MAC address from an nvmem cell named 'mac-address' associated
+ * with given device.
+ *
+ * @dev: Device with which the mac-address cell is associated.
+ * @addrbuf: Buffer to which the MAC address will be copied on success.
+ *
+ * Returns 0 on success or a negative error number on failure.
+ */
+int nvmem_get_mac_address(struct device *dev, void *addrbuf)
+{
+ struct nvmem_cell *cell;
+ const void *mac;
+ size_t len;
+
+ cell = nvmem_cell_get(dev, "mac-address");
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ mac = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(mac))
+ return PTR_ERR(mac);
+
+ if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
+ kfree(mac);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(addrbuf, mac);
+ kfree(mac);
+
+ return 0;
+}
+EXPORT_SYMBOL(nvmem_get_mac_address);
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index ca53efa17be1..8bec827081cd 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
const struct ipv6hdr *hdr = ipv6_hdr(skb);
struct neighbour *n;
+ if (!daddr)
+ return -EINVAL;
+
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
*/
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index b231e40f006a..0c25c0bcc4da 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -242,7 +242,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
* dev_set_mac_address require RTNL_LOCK
*/
rtnl_lock();
- rc = dev_set_mac_address(dev, &addr);
+ rc = dev_set_mac_address(dev, &addr, NULL);
rtnl_unlock();
if (rc)
goto dev_unregister;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 1fbe2f815474..0dfb72c46671 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -208,6 +208,7 @@ int inet_listen(struct socket *sock, int backlog)
if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN)))
goto out;
+ sk->sk_max_ack_backlog = backlog;
/* Really, if the socket is already in listen state
* we can only allow the backlog to be adjusted.
*/
@@ -231,7 +232,6 @@ int inet_listen(struct socket *sock, int backlog)
goto out;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
}
- sk->sk_max_ack_backlog = backlog;
err = 0;
out:
@@ -1385,6 +1385,10 @@ out:
}
EXPORT_SYMBOL(inet_gso_segment);
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+ struct sk_buff *));
struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
{
const struct net_offload *ops;
@@ -1494,7 +1498,8 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
- pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
+ ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -1556,6 +1561,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
return -EINVAL;
}
+INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
int inet_gro_complete(struct sk_buff *skb, int nhoff)
{
__be16 newlen = htons(skb->len - nhoff);
@@ -1581,7 +1588,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
* because any hdr with option will have been flushed in
* inet_gro_receive().
*/
- err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph));
+ err = INDIRECT_CALL_2(ops->callbacks.gro_complete,
+ tcp4_gro_complete, udp4_gro_complete,
+ skb, nhoff + sizeof(*iph));
out_unlock:
rcu_read_unlock();
@@ -1964,6 +1973,8 @@ static int __init inet_init(void)
/* Add UDP-Lite (RFC 3828) */
udplite4_register();
+ raw_init();
+
ping_init();
/*
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index a34602ae27de..04ba321ae5ce 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -952,17 +952,18 @@ static int inet_abc_len(__be32 addr)
{
int rc = -1; /* Something else, probably a multicast. */
- if (ipv4_is_zeronet(addr))
+ if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
rc = 0;
else {
__u32 haddr = ntohl(addr);
-
if (IN_CLASSA(haddr))
rc = 8;
else if (IN_CLASSB(haddr))
rc = 16;
else if (IN_CLASSC(haddr))
rc = 24;
+ else if (IN_CLASSE(haddr))
+ rc = 32;
}
return rc;
@@ -1100,7 +1101,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr)
inet_del_ifa(in_dev, ifap, 1);
break;
}
- ret = dev_change_flags(dev, ifr->ifr_flags);
+ ret = dev_change_flags(dev, ifr->ifr_flags, NULL);
break;
case SIOCSIFADDR: /* Set interface address (and family) */
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 9e1c840596c5..5459f41fc26f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -125,10 +125,13 @@ static void esp_output_done(struct crypto_async_request *base, int err)
void *tmp;
struct xfrm_state *x;
- if (xo && (xo->flags & XFRM_DEV_RESUME))
- x = skb->sp->xvec[skb->sp->len - 1];
- else
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+ struct sec_path *sp = skb_sec_path(skb);
+
+ x = sp->xvec[sp->len - 1];
+ } else {
x = skb_dst(skb)->xfrm;
+ }
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 58834a10c0be..8756e0e790d2 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -46,11 +46,12 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
xo = xfrm_offload(skb);
if (!xo || !(xo->flags & CRYPTO_DONE)) {
- err = secpath_set(skb);
- if (err)
+ struct sec_path *sp = secpath_set(skb);
+
+ if (!sp)
goto out;
- if (skb->sp->len == XFRM_MAX_DEPTH)
+ if (sp->len == XFRM_MAX_DEPTH)
goto out;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
@@ -59,8 +60,8 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head,
if (!x)
goto out;
- skb->sp->xvec[skb->sp->len++] = x;
- skb->sp->olen++;
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
xo = xfrm_offload(skb);
if (!xo) {
@@ -114,6 +115,7 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
struct crypto_aead *aead;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sec_path *sp;
if (!xo)
return ERR_PTR(-EINVAL);
@@ -121,7 +123,8 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
return ERR_PTR(-EINVAL);
- x = skb->sp->xvec[skb->sp->len - 1];
+ sp = skb_sec_path(skb);
+ x = sp->xvec[sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b5c3937ca6ec..5022bc63863a 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1076,7 +1076,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
if (!fi)
goto failure;
fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
- cfg->fc_mx_len);
+ cfg->fc_mx_len, extack);
if (unlikely(IS_ERR(fi->fib_metrics))) {
err = PTR_ERR(fi->fib_metrics);
kfree(fi);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 500a59906b87..0c9f171fb085 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -3,6 +3,7 @@
#include <linux/socket.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
+#include <linux/icmp.h>
#include <linux/udp.h>
#include <linux/types.h>
#include <linux/kernel.h>
@@ -1003,15 +1004,89 @@ static int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
return 0;
}
+static int gue_err_proto_handler(int proto, struct sk_buff *skb, u32 info)
+{
+ const struct net_protocol *ipprot = rcu_dereference(inet_protos[proto]);
+
+ if (ipprot && ipprot->err_handler) {
+ if (!ipprot->err_handler(skb, info))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int gue_err(struct sk_buff *skb, u32 info)
+{
+ int transport_offset = skb_transport_offset(skb);
+ struct guehdr *guehdr;
+ size_t optlen;
+ int ret;
+
+ if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+ switch (guehdr->version) {
+ case 0: /* Full GUE header present */
+ break;
+ case 1: {
+ /* Direct encasulation of IPv4 or IPv6 */
+ skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
+
+ switch (((struct iphdr *)guehdr)->version) {
+ case 4:
+ ret = gue_err_proto_handler(IPPROTO_IPIP, skb, info);
+ goto out;
+#if IS_ENABLED(CONFIG_IPV6)
+ case 6:
+ ret = gue_err_proto_handler(IPPROTO_IPV6, skb, info);
+ goto out;
+#endif
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+ default: /* Undefined version */
+ return -EOPNOTSUPP;
+ }
+
+ if (guehdr->control)
+ return -ENOENT;
+
+ optlen = guehdr->hlen << 2;
+
+ if (validate_gue_flags(guehdr, optlen))
+ return -EINVAL;
+
+ /* Handling exceptions for direct UDP encapsulation in GUE would lead to
+ * recursion. Besides, this kind of encapsulation can't even be
+ * configured currently. Discard this.
+ */
+ if (guehdr->proto_ctype == IPPROTO_UDP)
+ return -EOPNOTSUPP;
+
+ skb_set_transport_header(skb, -(int)sizeof(struct icmphdr));
+ ret = gue_err_proto_handler(guehdr->proto_ctype, skb, info);
+
+out:
+ skb_set_transport_header(skb, transport_offset);
+ return ret;
+}
+
static const struct ip_tunnel_encap_ops fou_iptun_ops = {
.encap_hlen = fou_encap_hlen,
.build_header = fou_build_header,
+ .err_handler = gue_err,
};
static const struct ip_tunnel_encap_ops gue_iptun_ops = {
.encap_hlen = gue_encap_hlen,
.build_header = gue_build_header,
+ .err_handler = gue_err,
};
static int ip_tunnel_encap_add_fou_ops(void)
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 7efe740c06eb..a4bf22ee3aed 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -151,20 +151,25 @@ drop:
return NET_RX_DROP;
}
-static void gre_err(struct sk_buff *skb, u32 info)
+static int gre_err(struct sk_buff *skb, u32 info)
{
const struct gre_protocol *proto;
const struct iphdr *iph = (const struct iphdr *)skb->data;
u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f;
+ int err = 0;
if (ver >= GREPROTO_MAX)
- return;
+ return -EINVAL;
rcu_read_lock();
proto = rcu_dereference(gre_proto[ver]);
if (proto && proto->err_handler)
proto->err_handler(skb, info);
+ else
+ err = -EPROTONOSUPPORT;
rcu_read_unlock();
+
+ return err;
}
static const struct net_protocol net_gre_protocol = {
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d832beed6e3a..065997f414e6 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1079,7 +1079,7 @@ error:
goto drop;
}
-void icmp_err(struct sk_buff *skb, u32 info)
+int icmp_err(struct sk_buff *skb, u32 info)
{
struct iphdr *iph = (struct iphdr *)skb->data;
int offset = iph->ihl<<2;
@@ -1094,13 +1094,15 @@ void icmp_err(struct sk_buff *skb, u32 info)
*/
if (icmph->type != ICMP_ECHOREPLY) {
ping_err(skb, offset, info);
- return;
+ return 0;
}
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP);
else if (type == ICMP_REDIRECT)
ipv4_redirect(skb, net, 0, IPPROTO_ICMP);
+
+ return 0;
}
/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 15e7f7915a21..6ea523d71947 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -183,7 +183,9 @@ inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *
int i, low, high, attempt_half;
struct inet_bind_bucket *tb;
u32 remaining, offset;
+ int l3mdev;
+ l3mdev = inet_sk_bound_l3mdev(sk);
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
other_half_scan:
inet_get_local_port_range(net, &low, &high);
@@ -219,7 +221,8 @@ other_parity_scan:
hinfo->bhash_size)];
spin_lock_bh(&head->lock);
inet_bind_bucket_for_each(tb, &head->chain)
- if (net_eq(ib_net(tb), net) && tb->port == port) {
+ if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+ tb->port == port) {
if (!inet_csk_bind_conflict(sk, tb, false, false))
goto success;
goto next_port;
@@ -293,6 +296,9 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
struct net *net = sock_net(sk);
struct inet_bind_bucket *tb = NULL;
kuid_t uid = sock_i_uid(sk);
+ int l3mdev;
+
+ l3mdev = inet_sk_bound_l3mdev(sk);
if (!port) {
head = inet_csk_find_open_port(sk, &tb, &port);
@@ -306,11 +312,12 @@ int inet_csk_get_port(struct sock *sk, unsigned short snum)
hinfo->bhash_size)];
spin_lock_bh(&head->lock);
inet_bind_bucket_for_each(tb, &head->chain)
- if (net_eq(ib_net(tb), net) && tb->port == port)
+ if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+ tb->port == port)
goto tb_found;
tb_not_found:
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
- net, head, port);
+ net, head, port, l3mdev);
if (!tb)
goto fail_unlock;
tb_found:
@@ -874,7 +881,6 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
reqsk_queue_alloc(&icsk->icsk_accept_queue);
- sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
inet_csk_delack_init(sk);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 4e5bc4b2f14e..1a4e9ff02762 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -998,7 +998,9 @@ next_chunk:
if (!inet_diag_bc_sk(bc, sk))
goto next_normal;
- sock_hold(sk);
+ if (!refcount_inc_not_zero(&sk->sk_refcnt))
+ goto next_normal;
+
num_arr[accum] = num;
sk_arr[accum] = sk;
if (++accum == SKARR_SZ)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 411dd7a90046..942265d65eb3 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -65,12 +65,14 @@ static u32 sk_ehashfn(const struct sock *sk)
struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
struct net *net,
struct inet_bind_hashbucket *head,
- const unsigned short snum)
+ const unsigned short snum,
+ int l3mdev)
{
struct inet_bind_bucket *tb = kmem_cache_alloc(cachep, GFP_ATOMIC);
if (tb) {
write_pnet(&tb->ib_net, net);
+ tb->l3mdev = l3mdev;
tb->port = snum;
tb->fastreuse = 0;
tb->fastreuseport = 0;
@@ -135,6 +137,7 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
table->bhash_size);
struct inet_bind_hashbucket *head = &table->bhash[bhash];
struct inet_bind_bucket *tb;
+ int l3mdev;
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
@@ -143,6 +146,8 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
return -ENOENT;
}
if (tb->port != port) {
+ l3mdev = inet_sk_bound_l3mdev(sk);
+
/* NOTE: using tproxy and redirecting skbs to a proxy
* on a different listener port breaks the assumption
* that the listener socket's icsk_bind_hash is the same
@@ -150,12 +155,13 @@ int __inet_inherit_port(const struct sock *sk, struct sock *child)
* create a new bind bucket for the child here. */
inet_bind_bucket_for_each(tb, &head->chain) {
if (net_eq(ib_net(tb), sock_net(sk)) &&
- tb->port == port)
+ tb->l3mdev == l3mdev && tb->port == port)
break;
}
if (!tb) {
tb = inet_bind_bucket_create(table->bind_bucket_cachep,
- sock_net(sk), head, port);
+ sock_net(sk), head, port,
+ l3mdev);
if (!tb) {
spin_unlock(&head->lock);
return -ENOMEM;
@@ -228,26 +234,16 @@ static inline int compute_score(struct sock *sk, struct net *net,
const int dif, const int sdif, bool exact_dif)
{
int score = -1;
- struct inet_sock *inet = inet_sk(sk);
- if (net_eq(sock_net(sk), net) && inet->inet_num == hnum &&
+ if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
!ipv6_only_sock(sk)) {
- __be32 rcv_saddr = inet->inet_rcv_saddr;
+ if (sk->sk_rcv_saddr != daddr)
+ return -1;
+
+ if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
+ return -1;
+
score = sk->sk_family == PF_INET ? 2 : 1;
- if (rcv_saddr) {
- if (rcv_saddr != daddr)
- return -1;
- score += 4;
- }
- if (sk->sk_bound_dev_if || exact_dif) {
- bool dev_match = (sk->sk_bound_dev_if == dif ||
- sk->sk_bound_dev_if == sdif);
-
- if (!dev_match)
- return -1;
- if (sk->sk_bound_dev_if)
- score += 4;
- }
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
}
@@ -303,26 +299,12 @@ struct sock *__inet_lookup_listener(struct net *net,
const __be32 daddr, const unsigned short hnum,
const int dif, const int sdif)
{
- unsigned int hash = inet_lhashfn(net, hnum);
- struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
- bool exact_dif = inet_exact_dif_match(net, skb);
struct inet_listen_hashbucket *ilb2;
- struct sock *sk, *result = NULL;
- int score, hiscore = 0;
+ struct sock *result = NULL;
unsigned int hash2;
- u32 phash = 0;
-
- if (ilb->count <= 10 || !hashinfo->lhash2)
- goto port_lookup;
-
- /* Too many sk in the ilb bucket (which is hashed by port alone).
- * Try lhash2 (which is hashed by port and addr) instead.
- */
hash2 = ipv4_portaddr_hash(net, daddr, hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
- if (ilb2->count > ilb->count)
- goto port_lookup;
result = inet_lhash2_lookup(net, ilb2, skb, doff,
saddr, sport, daddr, hnum,
@@ -331,34 +313,12 @@ struct sock *__inet_lookup_listener(struct net *net,
goto done;
/* Lookup lhash2 with INADDR_ANY */
-
hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
- if (ilb2->count > ilb->count)
- goto port_lookup;
result = inet_lhash2_lookup(net, ilb2, skb, doff,
- saddr, sport, daddr, hnum,
+ saddr, sport, htonl(INADDR_ANY), hnum,
dif, sdif);
- goto done;
-
-port_lookup:
- sk_for_each_rcu(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr,
- dif, sdif, exact_dif);
- if (score > hiscore) {
- if (sk->sk_reuseport) {
- phash = inet_ehashfn(net, daddr, hnum,
- saddr, sport);
- result = reuseport_select_sock(sk, phash,
- skb, doff);
- if (result)
- goto done;
- }
- result = sk;
- hiscore = score;
- }
- }
done:
if (unlikely(IS_ERR(result)))
return NULL;
@@ -675,6 +635,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
u32 remaining, offset;
int ret, i, low, high;
static u32 hint;
+ int l3mdev;
if (port) {
head = &hinfo->bhash[inet_bhashfn(net, port,
@@ -693,6 +654,8 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
return ret;
}
+ l3mdev = inet_sk_bound_l3mdev(sk);
+
inet_get_local_port_range(net, &low, &high);
high++; /* [32768, 60999] -> [32768, 61000[ */
remaining = high - low;
@@ -719,7 +682,8 @@ other_parity_scan:
* the established check is already unique enough.
*/
inet_bind_bucket_for_each(tb, &head->chain) {
- if (net_eq(ib_net(tb), net) && tb->port == port) {
+ if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
+ tb->port == port) {
if (tb->fastreuse >= 0 ||
tb->fastreuseport >= 0)
goto next_port;
@@ -732,7 +696,7 @@ other_parity_scan:
}
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
- net, head, port);
+ net, head, port, l3mdev);
if (!tb) {
spin_unlock_bh(&head->lock);
return -ENOMEM;
@@ -798,13 +762,22 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
+static void init_hashinfo_lhash2(struct inet_hashinfo *h)
+{
+ int i;
+
+ for (i = 0; i <= h->lhash2_mask; i++) {
+ spin_lock_init(&h->lhash2[i].lock);
+ INIT_HLIST_HEAD(&h->lhash2[i].head);
+ h->lhash2[i].count = 0;
+ }
+}
+
void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
unsigned long numentries, int scale,
unsigned long low_limit,
unsigned long high_limit)
{
- unsigned int i;
-
h->lhash2 = alloc_large_system_hash(name,
sizeof(*h->lhash2),
numentries,
@@ -814,13 +787,23 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
&h->lhash2_mask,
low_limit,
high_limit);
+ init_hashinfo_lhash2(h);
+}
- for (i = 0; i <= h->lhash2_mask; i++) {
- spin_lock_init(&h->lhash2[i].lock);
- INIT_HLIST_HEAD(&h->lhash2[i].head);
- h->lhash2[i].count = 0;
- }
+int inet_hashinfo2_init_mod(struct inet_hashinfo *h)
+{
+ h->lhash2 = kmalloc_array(INET_LHTABLE_SIZE, sizeof(*h->lhash2), GFP_KERNEL);
+ if (!h->lhash2)
+ return -ENOMEM;
+
+ h->lhash2_mask = INET_LHTABLE_SIZE - 1;
+ /* INET_LHTABLE_SIZE must be a power of 2 */
+ BUG_ON(INET_LHTABLE_SIZE & h->lhash2_mask);
+
+ init_hashinfo_lhash2(h);
+ return 0;
}
+EXPORT_SYMBOL_GPL(inet_hashinfo2_init_mod);
int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
{
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 32662e9e5d21..00ec819f949b 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -69,9 +69,17 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
__IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
__IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
+#ifdef CONFIG_NET_SWITCHDEV
+ if (skb->offload_l3_fwd_mark) {
+ consume_skb(skb);
+ return 0;
+ }
+#endif
+
if (unlikely(opt->optlen))
ip_forward_options(skb);
+ skb->tstamp = 0;
return dst_output(net, sk, skb);
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index d6ee343fdb86..867be8f7f1fa 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -346,10 +346,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct rb_node **rbn, *parent;
struct sk_buff *skb1, *prev_tail;
+ int ihl, end, skb1_run_end;
struct net_device *dev;
unsigned int fragsize;
int flags, offset;
- int ihl, end;
int err = -ENOENT;
u8 ecn;
@@ -419,7 +419,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
* overlapping fragment, the entire datagram (and any constituent
* fragments) MUST be silently discarded.
*
- * We do the same here for IPv4 (and increment an snmp counter).
+ * We do the same here for IPv4 (and increment an snmp counter) but
+ * we do not want to drop the whole queue in response to a duplicate
+ * fragment.
*/
err = -EINVAL;
@@ -444,13 +446,17 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
do {
parent = *rbn;
skb1 = rb_to_skb(parent);
+ skb1_run_end = skb1->ip_defrag_offset +
+ FRAG_CB(skb1)->frag_run_len;
if (end <= skb1->ip_defrag_offset)
rbn = &parent->rb_left;
- else if (offset >= skb1->ip_defrag_offset +
- FRAG_CB(skb1)->frag_run_len)
+ else if (offset >= skb1_run_end)
rbn = &parent->rb_right;
- else /* Found an overlap with skb1. */
- goto overlap;
+ else if (offset >= skb1->ip_defrag_offset &&
+ end <= skb1_run_end)
+ goto err; /* No new data, potential duplicate */
+ else
+ goto overlap; /* Found an overlap */
} while (*rbn);
/* Here we have parent properly set, and rbn pointing to
* one of its NULL left/right children. Insert skb.
@@ -515,6 +521,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct rb_node *rbn;
int len;
int ihlen;
+ int delta;
int err;
u8 ecn;
@@ -556,10 +563,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
if (len > 65535)
goto out_oversize;
+ delta = - head->truesize;
+
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_nomem;
+ delta += head->truesize;
+ if (delta)
+ add_frag_mem_limit(qp->q.net, delta);
+
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 38befe829caf..c7a7bd58a23c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -121,8 +121,8 @@ static unsigned int ipgre_net_id __read_mostly;
static unsigned int gre_tap_net_id __read_mostly;
static unsigned int erspan_net_id __read_mostly;
-static void ipgre_err(struct sk_buff *skb, u32 info,
- const struct tnl_ptk_info *tpi)
+static int ipgre_err(struct sk_buff *skb, u32 info,
+ const struct tnl_ptk_info *tpi)
{
/* All the routers (except for Linux) return only
@@ -146,17 +146,32 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
unsigned int data_len = 0;
struct ip_tunnel *t;
+ if (tpi->proto == htons(ETH_P_TEB))
+ itn = net_generic(net, gre_tap_net_id);
+ else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+ tpi->proto == htons(ETH_P_ERSPAN2))
+ itn = net_generic(net, erspan_net_id);
+ else
+ itn = net_generic(net, ipgre_net_id);
+
+ iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
+ iph->daddr, iph->saddr, tpi->key);
+
+ if (!t)
+ return -ENOENT;
+
switch (type) {
default:
case ICMP_PARAMETERPROB:
- return;
+ return 0;
case ICMP_DEST_UNREACH:
switch (code) {
case ICMP_SR_FAILED:
case ICMP_PORT_UNREACH:
/* Impossible event. */
- return;
+ return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -168,7 +183,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
case ICMP_TIME_EXCEEDED:
if (code != ICMP_EXC_TTL)
- return;
+ return 0;
data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
break;
@@ -176,40 +191,27 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
break;
}
- if (tpi->proto == htons(ETH_P_TEB))
- itn = net_generic(net, gre_tap_net_id);
- else if (tpi->proto == htons(ETH_P_ERSPAN) ||
- tpi->proto == htons(ETH_P_ERSPAN2))
- itn = net_generic(net, erspan_net_id);
- else
- itn = net_generic(net, ipgre_net_id);
-
- iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
- t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
- iph->daddr, iph->saddr, tpi->key);
-
- if (!t)
- return;
-
#if IS_ENABLED(CONFIG_IPV6)
if (tpi->proto == htons(ETH_P_IPV6) &&
!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
type, data_len))
- return;
+ return 0;
#endif
if (t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
- return;
+ return 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
- return;
+ return 0;
if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
t->err_count++;
else
t->err_count = 1;
t->err_time = jiffies;
+
+ return 0;
}
static void gre_err(struct sk_buff *skb, u32 info)
@@ -1339,12 +1341,6 @@ static void ipgre_tap_setup(struct net_device *dev)
ip_tunnel_setup(dev, gre_tap_net_id);
}
-bool is_gretap_dev(const struct net_device *dev)
-{
- return dev->netdev_ops == &gre_tap_netdev_ops;
-}
-EXPORT_SYMBOL_GPL(is_gretap_dev);
-
static int ipgre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1601,7 +1597,7 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &ipgre_tap_ops, tb);
+ &ipgre_tap_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 35a786c0aaa0..26921f6b3b92 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -188,51 +188,50 @@ bool ip_call_ra_chain(struct sk_buff *skb)
return false;
}
-static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
{
- __skb_pull(skb, skb_network_header_len(skb));
-
- rcu_read_lock();
- {
- int protocol = ip_hdr(skb)->protocol;
- const struct net_protocol *ipprot;
- int raw;
+ const struct net_protocol *ipprot;
+ int raw, ret;
- resubmit:
- raw = raw_local_deliver(skb, protocol);
+resubmit:
+ raw = raw_local_deliver(skb, protocol);
- ipprot = rcu_dereference(inet_protos[protocol]);
- if (ipprot) {
- int ret;
-
- if (!ipprot->no_policy) {
- if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- kfree_skb(skb);
- goto out;
- }
- nf_reset(skb);
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot) {
+ if (!ipprot->no_policy) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ kfree_skb(skb);
+ return;
}
- ret = ipprot->handler(skb);
- if (ret < 0) {
- protocol = -ret;
- goto resubmit;
+ nf_reset(skb);
+ }
+ ret = ipprot->handler(skb);
+ if (ret < 0) {
+ protocol = -ret;
+ goto resubmit;
+ }
+ __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
+ } else {
+ if (!raw) {
+ if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
+ icmp_send(skb, ICMP_DEST_UNREACH,
+ ICMP_PROT_UNREACH, 0);
}
- __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
+ kfree_skb(skb);
} else {
- if (!raw) {
- if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
- __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_PROT_UNREACH, 0);
- }
- kfree_skb(skb);
- } else {
- __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
- consume_skb(skb);
- }
+ __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
+ consume_skb(skb);
}
}
- out:
+}
+
+static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ __skb_pull(skb, skb_network_header_len(skb));
+
+ rcu_read_lock();
+ ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
rcu_read_unlock();
return 0;
@@ -547,7 +546,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
- list_del(&skb->list);
+ skb_list_del_init(skb);
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
@@ -594,7 +593,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- list_del(&skb->list);
+ skb_list_del_init(skb);
skb = ip_rcv_core(skb, net);
if (skb == NULL)
continue;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c09219e7f230..c80188875f39 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -533,6 +533,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
+ skb_ext_copy(to, from);
#if IS_ENABLED(CONFIG_IP_VS)
to->ipvs_property = from->ipvs_property;
#endif
@@ -867,6 +868,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
+ struct ubuf_info *uarg = NULL;
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
@@ -880,8 +882,8 @@ static int __ip_append_data(struct sock *sk,
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
unsigned int wmem_alloc_delta = 0;
+ bool paged, extra_uref;
u32 tskey = 0;
- bool paged;
skb = skb_peek_tail(queue);
@@ -916,6 +918,20 @@ static int __ip_append_data(struct sock *sk,
(!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
csummode = CHECKSUM_PARTIAL;
+ if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
+ uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
+ if (!uarg)
+ return -ENOBUFS;
+ extra_uref = true;
+ if (rt->dst.dev->features & NETIF_F_SG &&
+ csummode == CHECKSUM_PARTIAL) {
+ paged = true;
+ } else {
+ uarg->zerocopy = 0;
+ skb_zcopy_set(skb, uarg, &extra_uref);
+ }
+ }
+
cork->length += length;
/* So, what's going on in the loop below?
@@ -939,7 +955,7 @@ static int __ip_append_data(struct sock *sk,
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
- unsigned int pagedlen = 0;
+ unsigned int pagedlen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
@@ -956,6 +972,7 @@ alloc_new_skb:
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
+ pagedlen = 0;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
@@ -1000,12 +1017,6 @@ alloc_new_skb:
skb->csum = 0;
skb_reserve(skb, hh_len);
- /* only the initial fragment is time stamped */
- skb_shinfo(skb)->tx_flags = cork->tx_flags;
- cork->tx_flags = 0;
- skb_shinfo(skb)->tskey = tskey;
- tskey = 0;
-
/*
* Find where to start putting bytes.
*/
@@ -1038,6 +1049,13 @@ alloc_new_skb:
exthdrlen = 0;
csummode = CHECKSUM_NONE;
+ /* only the initial fragment is time stamped */
+ skb_shinfo(skb)->tx_flags = cork->tx_flags;
+ cork->tx_flags = 0;
+ skb_shinfo(skb)->tskey = tskey;
+ tskey = 0;
+ skb_zcopy_set(skb, uarg, &extra_uref);
+
if ((flags & MSG_CONFIRM) && !skb_prev)
skb_set_dst_pending_confirm(skb, 1);
@@ -1067,7 +1085,7 @@ alloc_new_skb:
err = -EFAULT;
goto error;
}
- } else {
+ } else if (!uarg || !uarg->zerocopy) {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
@@ -1097,6 +1115,10 @@ alloc_new_skb:
skb->data_len += copy;
skb->truesize += copy;
wmem_alloc_delta += copy;
+ } else {
+ err = skb_zerocopy_iter_dgram(skb, from, copy);
+ if (err < 0)
+ goto error;
}
offset += copy;
length -= copy;
@@ -1109,6 +1131,8 @@ alloc_new_skb:
error_efault:
err = -EFAULT;
error:
+ if (uarg)
+ sock_zerocopy_put_abort(uarg, extra_uref);
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index c248e0dccbe1..9a0e67b52a4e 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -120,7 +120,7 @@ int __iptunnel_pull_header(struct sk_buff *skb, int hdr_len,
}
skb_clear_hash_if_not_l4(skb);
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_set_queue_mapping(skb, 0);
skb_scrub_packet(skb, xnet);
@@ -151,6 +151,7 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
sizeof(struct in6_addr));
else
dst->key.u.ipv4.dst = src->key.u.ipv4.src;
+ dst->key.tun_flags = src->key.tun_flags;
dst->mode = src->mode | IP_TUNNEL_INFO_TX;
return res;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 88212615bf4c..b9a9873c25c6 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -220,7 +220,7 @@ static int __init ic_open_devs(void)
for_each_netdev(&init_net, dev) {
if (!(dev->flags & IFF_LOOPBACK) && !netdev_uses_dsa(dev))
continue;
- if (dev_change_flags(dev, dev->flags | IFF_UP) < 0)
+ if (dev_change_flags(dev, dev->flags | IFF_UP, NULL) < 0)
pr_err("IP-Config: Failed to open %s\n", dev->name);
}
@@ -238,7 +238,7 @@ static int __init ic_open_devs(void)
if (ic_proto_enabled && !able)
continue;
oflags = dev->flags;
- if (dev_change_flags(dev, oflags | IFF_UP) < 0) {
+ if (dev_change_flags(dev, oflags | IFF_UP, NULL) < 0) {
pr_err("IP-Config: Failed to open %s\n",
dev->name);
continue;
@@ -315,7 +315,7 @@ static void __init ic_close_devs(void)
dev = d->dev;
if (d != ic_dev && !netdev_uses_dsa(dev)) {
pr_debug("IP-Config: Downing %s\n", dev->name);
- dev_change_flags(dev, d->flags);
+ dev_change_flags(dev, d->flags, NULL);
}
kfree(d);
}
@@ -429,6 +429,8 @@ static int __init ic_defaults(void)
ic_netmask = htonl(IN_CLASSB_NET);
else if (IN_CLASSC(ntohl(ic_myaddr)))
ic_netmask = htonl(IN_CLASSC_NET);
+ else if (IN_CLASSE(ntohl(ic_myaddr)))
+ ic_netmask = htonl(IN_CLASSE_NET);
else {
pr_err("IP-Config: Unable to guess netmask for address %pI4\n",
&ic_myaddr);
@@ -1361,18 +1363,7 @@ static int ntp_servers_seq_show(struct seq_file *seq, void *v)
}
return 0;
}
-
-static int ntp_servers_seq_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ntp_servers_seq_show, NULL);
-}
-
-static const struct file_operations ntp_servers_seq_fops = {
- .open = ntp_servers_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ntp_servers_seq);
#endif /* CONFIG_PROC_FS */
/*
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index e65287c27e3d..57c5dd283a2c 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -140,6 +140,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
struct ip_tunnel *t;
int err = 0;
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->daddr, iph->saddr, 0);
+ if (!t) {
+ err = -ENOENT;
+ goto out;
+ }
+
switch (type) {
case ICMP_DEST_UNREACH:
switch (code) {
@@ -167,13 +174,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
goto out;
}
- t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
- iph->daddr, iph->saddr, 0);
- if (!t) {
- err = -ENOENT;
- goto out;
- }
-
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
ipv4_update_pmtu(skb, net, info, t->parms.link, iph->protocol);
goto out;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a6defbec4f1b..ddbf8c9a1abb 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -69,6 +69,8 @@
#include <net/nexthop.h>
#include <net/switchdev.h>
+#include <linux/nospec.h>
+
struct ipmr_rule {
struct fib_rule common;
};
@@ -506,7 +508,7 @@ static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
dev->flags |= IFF_MULTICAST;
if (!ipmr_init_vif_indev(dev))
goto failure;
- if (dev_open(dev))
+ if (dev_open(dev, NULL))
goto failure;
dev_hold(dev);
}
@@ -589,7 +591,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
if (!ipmr_init_vif_indev(dev))
goto failure;
- if (dev_open(dev))
+ if (dev_open(dev, NULL))
goto failure;
dev_hold(dev);
@@ -1612,6 +1614,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
+ vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1686,6 +1689,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
+ vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
@@ -1802,7 +1806,7 @@ static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
struct vif_device *out_vif = &mrt->vif_table[out_vifi];
struct vif_device *in_vif = &mrt->vif_table[in_vifi];
- if (!skb->offload_mr_fwd_mark)
+ if (!skb->offload_l3_fwd_mark)
return false;
if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
return false;
@@ -1820,8 +1824,7 @@ static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
/* Processing handlers for ipmr_forward */
static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
- int in_vifi, struct sk_buff *skb,
- struct mfc_cache *c, int vifi)
+ int in_vifi, struct sk_buff *skb, int vifi)
{
const struct iphdr *iph = ip_hdr(skb);
struct vif_device *vif = &mrt->vif_table[vifi];
@@ -2027,7 +2030,7 @@ forward:
if (skb2)
ipmr_queue_xmit(net, mrt, true_vifi,
- skb2, c, psend);
+ skb2, psend);
}
psend = ct;
}
@@ -2039,9 +2042,9 @@ last_forward:
if (skb2)
ipmr_queue_xmit(net, mrt, true_vifi, skb2,
- c, psend);
+ psend);
} else {
- ipmr_queue_xmit(net, mrt, true_vifi, skb, c, psend);
+ ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
return;
}
}
diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c
index 6d218f5a2e71..ca9a5fefdefa 100644
--- a/net/ipv4/metrics.c
+++ b/net/ipv4/metrics.c
@@ -6,7 +6,8 @@
#include <net/tcp.h>
static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len, u32 *metrics)
+ int fc_mx_len, u32 *metrics,
+ struct netlink_ext_ack *extack)
{
bool ecn_ca = false;
struct nlattr *nla;
@@ -21,19 +22,26 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
if (!type)
continue;
- if (type > RTAX_MAX)
+ if (type > RTAX_MAX) {
+ NL_SET_ERR_MSG(extack, "Invalid metric type");
return -EINVAL;
+ }
if (type == RTAX_CC_ALGO) {
char tmp[TCP_CA_NAME_MAX];
nla_strlcpy(tmp, nla, sizeof(tmp));
val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
- if (val == TCP_CA_UNSPEC)
+ if (val == TCP_CA_UNSPEC) {
+ NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm");
return -EINVAL;
+ }
} else {
- if (nla_len(nla) != sizeof(u32))
+ if (nla_len(nla) != sizeof(u32)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla,
+ "Invalid attribute in metrics");
return -EINVAL;
+ }
val = nla_get_u32(nla);
}
if (type == RTAX_ADVMSS && val > 65535 - 40)
@@ -42,8 +50,10 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
val = 65535 - 15;
if (type == RTAX_HOPLIMIT && val > 255)
val = 255;
- if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
+ if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) {
+ NL_SET_ERR_MSG(extack, "Unknown flag set in feature mask in metrics attribute");
return -EINVAL;
+ }
metrics[type - 1] = val;
}
@@ -54,7 +64,8 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx,
}
struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
- int fc_mx_len)
+ int fc_mx_len,
+ struct netlink_ext_ack *extack)
{
struct dst_metrics *fib_metrics;
int err;
@@ -66,7 +77,8 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
if (unlikely(!fib_metrics))
return ERR_PTR(-ENOMEM);
- err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics);
+ err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics,
+ extack);
if (!err) {
refcount_set(&fib_metrics->refcnt, 1);
} else {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 184bf2e0a1ed..80f72cc5ca8d 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -156,15 +156,10 @@ config NF_NAT_SNMP_BASIC
To compile it as a module, choose M here. If unsure, say N.
-config NF_NAT_PROTO_GRE
- tristate
- depends on NF_CT_PROTO_GRE
-
config NF_NAT_PPTP
tristate
depends on NF_CONNTRACK
default NF_CONNTRACK_PPTP
- select NF_NAT_PROTO_GRE
config NF_NAT_H323
tristate
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 367993adf4d3..fd7122e0e2c9 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,7 +3,7 @@
# Makefile for the netfilter modules on top of IPv4.
#
-nf_nat_ipv4-y := nf_nat_l3proto_ipv4.o nf_nat_proto_icmp.o
+nf_nat_ipv4-y := nf_nat_l3proto_ipv4.o
nf_nat_ipv4-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
@@ -28,9 +28,6 @@ nf_nat_snmp_basic-y := nf_nat_snmp_basic.asn1.o nf_nat_snmp_basic_main.o
$(obj)/nf_nat_snmp_basic_main.o: $(obj)/nf_nat_snmp_basic.asn1.h
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
-# NAT protocols (nf_nat)
-obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
-
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 2c8d313ae216..b61977db9b7f 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -56,18 +56,15 @@ struct clusterip_config {
#endif
enum clusterip_hashmode hash_mode; /* which hashing mode */
u_int32_t hash_initval; /* hash initialization */
- struct rcu_head rcu;
-
+ struct rcu_head rcu; /* for call_rcu_bh */
+ struct net *net; /* netns for pernet list */
char ifname[IFNAMSIZ]; /* device ifname */
- struct notifier_block notifier; /* refresh c->ifindex in it */
};
#ifdef CONFIG_PROC_FS
static const struct file_operations clusterip_proc_fops;
#endif
-static unsigned int clusterip_net_id __read_mostly;
-
struct clusterip_net {
struct list_head configs;
/* lock protects the configs list */
@@ -75,51 +72,66 @@ struct clusterip_net {
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *procdir;
+ /* mutex protects the config->pde*/
+ struct mutex mutex;
#endif
};
+static unsigned int clusterip_net_id __read_mostly;
+static inline struct clusterip_net *clusterip_pernet(struct net *net)
+{
+ return net_generic(net, clusterip_net_id);
+}
+
static inline void
clusterip_config_get(struct clusterip_config *c)
{
refcount_inc(&c->refcount);
}
-
static void clusterip_config_rcu_free(struct rcu_head *head)
{
- kfree(container_of(head, struct clusterip_config, rcu));
+ struct clusterip_config *config;
+ struct net_device *dev;
+
+ config = container_of(head, struct clusterip_config, rcu);
+ dev = dev_get_by_name(config->net, config->ifname);
+ if (dev) {
+ dev_mc_del(dev, config->clustermac);
+ dev_put(dev);
+ }
+ kfree(config);
}
static inline void
clusterip_config_put(struct clusterip_config *c)
{
if (refcount_dec_and_test(&c->refcount))
- call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
+ call_rcu(&c->rcu, clusterip_config_rcu_free);
}
/* decrease the count of entries using/referencing this config. If last
* entry(rule) is removed, remove the config from lists, but don't free it
* yet, since proc-files could still be holding references */
static inline void
-clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
+clusterip_config_entry_put(struct clusterip_config *c)
{
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+ struct clusterip_net *cn = clusterip_pernet(c->net);
local_bh_disable();
if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
+ list_del_rcu(&c->list);
+ spin_unlock(&cn->lock);
+ local_bh_enable();
/* In case anyone still accesses the file, the open/close
* functions are also incrementing the refcount on their own,
* so it's safe to remove the entry even if it's in use. */
#ifdef CONFIG_PROC_FS
+ mutex_lock(&cn->mutex);
if (cn->procdir)
proc_remove(c->pde);
+ mutex_unlock(&cn->mutex);
#endif
- list_del_rcu(&c->list);
- spin_unlock(&cn->lock);
- local_bh_enable();
-
- unregister_netdevice_notifier(&c->notifier);
-
return;
}
local_bh_enable();
@@ -129,7 +141,7 @@ static struct clusterip_config *
__clusterip_config_find(struct net *net, __be32 clusterip)
{
struct clusterip_config *c;
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+ struct clusterip_net *cn = clusterip_pernet(net);
list_for_each_entry_rcu(c, &cn->configs, list) {
if (c->clusterip == clusterip)
@@ -181,32 +193,37 @@ clusterip_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ struct clusterip_net *cn = clusterip_pernet(net);
struct clusterip_config *c;
- c = container_of(this, struct clusterip_config, notifier);
- switch (event) {
- case NETDEV_REGISTER:
- if (!strcmp(dev->name, c->ifname)) {
- c->ifindex = dev->ifindex;
- dev_mc_add(dev, c->clustermac);
- }
- break;
- case NETDEV_UNREGISTER:
- if (dev->ifindex == c->ifindex) {
- dev_mc_del(dev, c->clustermac);
- c->ifindex = -1;
- }
- break;
- case NETDEV_CHANGENAME:
- if (!strcmp(dev->name, c->ifname)) {
- c->ifindex = dev->ifindex;
- dev_mc_add(dev, c->clustermac);
- } else if (dev->ifindex == c->ifindex) {
- dev_mc_del(dev, c->clustermac);
- c->ifindex = -1;
+ spin_lock_bh(&cn->lock);
+ list_for_each_entry_rcu(c, &cn->configs, list) {
+ switch (event) {
+ case NETDEV_REGISTER:
+ if (!strcmp(dev->name, c->ifname)) {
+ c->ifindex = dev->ifindex;
+ dev_mc_add(dev, c->clustermac);
+ }
+ break;
+ case NETDEV_UNREGISTER:
+ if (dev->ifindex == c->ifindex) {
+ dev_mc_del(dev, c->clustermac);
+ c->ifindex = -1;
+ }
+ break;
+ case NETDEV_CHANGENAME:
+ if (!strcmp(dev->name, c->ifname)) {
+ c->ifindex = dev->ifindex;
+ dev_mc_add(dev, c->clustermac);
+ } else if (dev->ifindex == c->ifindex) {
+ dev_mc_del(dev, c->clustermac);
+ c->ifindex = -1;
+ }
+ break;
}
- break;
}
+ spin_unlock_bh(&cn->lock);
return NOTIFY_DONE;
}
@@ -215,30 +232,44 @@ static struct clusterip_config *
clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
__be32 ip, const char *iniface)
{
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+ struct clusterip_net *cn = clusterip_pernet(net);
struct clusterip_config *c;
+ struct net_device *dev;
int err;
+ if (iniface[0] == '\0') {
+ pr_info("Please specify an interface name\n");
+ return ERR_PTR(-EINVAL);
+ }
+
c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c)
return ERR_PTR(-ENOMEM);
- strcpy(c->ifname, iniface);
- c->ifindex = -1;
- c->clusterip = ip;
+ dev = dev_get_by_name(net, iniface);
+ if (!dev) {
+ pr_info("no such interface %s\n", iniface);
+ kfree(c);
+ return ERR_PTR(-ENOENT);
+ }
+ c->ifindex = dev->ifindex;
+ strcpy(c->ifname, dev->name);
memcpy(&c->clustermac, &i->clustermac, ETH_ALEN);
+ dev_mc_add(dev, c->clustermac);
+ dev_put(dev);
+
+ c->clusterip = ip;
c->num_total_nodes = i->num_total_nodes;
clusterip_config_init_nodelist(c, i);
c->hash_mode = i->hash_mode;
c->hash_initval = i->hash_initval;
+ c->net = net;
refcount_set(&c->refcount, 1);
spin_lock_bh(&cn->lock);
if (__clusterip_config_find(net, ip)) {
- spin_unlock_bh(&cn->lock);
- kfree(c);
-
- return ERR_PTR(-EBUSY);
+ err = -EBUSY;
+ goto out_config_put;
}
list_add_rcu(&c->list, &cn->configs);
@@ -250,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
/* create proc dir entry */
sprintf(buffer, "%pI4", &ip);
+ mutex_lock(&cn->mutex);
c->pde = proc_create_data(buffer, 0600,
cn->procdir,
&clusterip_proc_fops, c);
+ mutex_unlock(&cn->mutex);
if (!c->pde) {
err = -ENOMEM;
goto err;
@@ -260,22 +293,17 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
}
#endif
- c->notifier.notifier_call = clusterip_netdev_event;
- err = register_netdevice_notifier(&c->notifier);
- if (!err) {
- refcount_set(&c->entries, 1);
- return c;
- }
+ refcount_set(&c->entries, 1);
+ return c;
#ifdef CONFIG_PROC_FS
- proc_remove(c->pde);
err:
#endif
spin_lock_bh(&cn->lock);
list_del_rcu(&c->list);
+out_config_put:
spin_unlock_bh(&cn->lock);
clusterip_config_put(c);
-
return ERR_PTR(err);
}
@@ -475,34 +503,20 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
&e->ip.dst.s_addr);
return -EINVAL;
} else {
- struct net_device *dev;
-
- if (e->ip.iniface[0] == '\0') {
- pr_info("Please specify an interface name\n");
- return -EINVAL;
- }
-
- dev = dev_get_by_name(par->net, e->ip.iniface);
- if (!dev) {
- pr_info("no such interface %s\n",
- e->ip.iniface);
- return -ENOENT;
- }
- dev_put(dev);
-
config = clusterip_config_init(par->net, cipinfo,
e->ip.dst.s_addr,
e->ip.iniface);
if (IS_ERR(config))
return PTR_ERR(config);
}
- }
+ } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
+ return -EINVAL;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) {
pr_info("cannot load conntrack support for proto=%u\n",
par->family);
- clusterip_config_entry_put(par->net, config);
+ clusterip_config_entry_put(config);
clusterip_config_put(config);
return ret;
}
@@ -524,7 +538,7 @@ static void clusterip_tg_destroy(const struct xt_tgdtor_param *par)
/* if no more entries are referencing the config, remove it
* from the list and destroy the proc entry */
- clusterip_config_entry_put(par->net, cipinfo->config);
+ clusterip_config_entry_put(cipinfo->config);
clusterip_config_put(cipinfo->config);
@@ -806,7 +820,7 @@ static const struct file_operations clusterip_proc_fops = {
static int clusterip_net_init(struct net *net)
{
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+ struct clusterip_net *cn = clusterip_pernet(net);
int ret;
INIT_LIST_HEAD(&cn->configs);
@@ -824,6 +838,7 @@ static int clusterip_net_init(struct net *net)
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
}
+ mutex_init(&cn->mutex);
#endif /* CONFIG_PROC_FS */
return 0;
@@ -831,13 +846,15 @@ static int clusterip_net_init(struct net *net)
static void clusterip_net_exit(struct net *net)
{
- struct clusterip_net *cn = net_generic(net, clusterip_net_id);
+ struct clusterip_net *cn = clusterip_pernet(net);
+
#ifdef CONFIG_PROC_FS
+ mutex_lock(&cn->mutex);
proc_remove(cn->procdir);
cn->procdir = NULL;
+ mutex_unlock(&cn->mutex);
#endif
nf_unregister_net_hook(net, &cip_arp_ops);
- WARN_ON_ONCE(!list_empty(&cn->configs));
}
static struct pernet_operations clusterip_net_ops = {
@@ -847,6 +864,10 @@ static struct pernet_operations clusterip_net_ops = {
.size = sizeof(struct clusterip_net),
};
+struct notifier_block cip_netdev_notifier = {
+ .notifier_call = clusterip_netdev_event
+};
+
static int __init clusterip_tg_init(void)
{
int ret;
@@ -859,11 +880,17 @@ static int __init clusterip_tg_init(void)
if (ret < 0)
goto cleanup_subsys;
+ ret = register_netdevice_notifier(&cip_netdev_notifier);
+ if (ret < 0)
+ goto unregister_target;
+
pr_info("ClusterIP Version %s loaded successfully\n",
CLUSTERIP_VERSION);
return 0;
+unregister_target:
+ xt_unregister_target(&clusterip_tg_reg);
cleanup_subsys:
unregister_pernet_subsys(&clusterip_net_ops);
return ret;
@@ -873,11 +900,12 @@ static void __exit clusterip_tg_exit(void)
{
pr_info("ClusterIP Version %s unloading\n", CLUSTERIP_VERSION);
+ unregister_netdevice_notifier(&cip_netdev_notifier);
xt_unregister_target(&clusterip_tg_reg);
unregister_pernet_subsys(&clusterip_net_ops);
- /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
- rcu_barrier_bh();
+ /* Wait for completion of call_rcu()'s (clusterip_config_rcu_free) */
+ rcu_barrier();
}
module_init(clusterip_tg_init);
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index ce1512b02cb2..fd3f9e8a74da 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
int ret;
ret = xt_register_target(&masquerade_tg_reg);
+ if (ret)
+ return ret;
- if (ret == 0)
- nf_nat_masquerade_ipv4_register_notifier();
+ ret = nf_nat_masquerade_ipv4_register_notifier();
+ if (ret)
+ xt_unregister_target(&masquerade_tg_reg);
return ret;
}
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 78a67f961d86..2687db015b6f 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -62,22 +62,8 @@ static void nf_nat_ipv4_decode_session(struct sk_buff *skb,
}
#endif /* CONFIG_XFRM */
-static bool nf_nat_ipv4_in_range(const struct nf_conntrack_tuple *t,
- const struct nf_nat_range2 *range)
-{
- return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
- ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
-}
-
-static u32 nf_nat_ipv4_secure_port(const struct nf_conntrack_tuple *t,
- __be16 dport)
-{
- return secure_ipv4_port_ephemeral(t->src.u3.ip, t->dst.u3.ip, dport);
-}
-
static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
- const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype)
{
@@ -90,8 +76,8 @@ static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb,
iph = (void *)skb->data + iphdroff;
hdroff = iphdroff + iph->ihl * 4;
- if (!l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff, hdroff,
- target, maniptype))
+ if (!nf_nat_l4proto_manip_pkt(skb, &nf_nat_l3proto_ipv4, iphdroff,
+ hdroff, target, maniptype))
return false;
iph = (void *)skb->data + iphdroff;
@@ -161,8 +147,6 @@ static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
.l3proto = NFPROTO_IPV4,
- .in_range = nf_nat_ipv4_in_range,
- .secure_port = nf_nat_ipv4_secure_port,
.manip_pkt = nf_nat_ipv4_manip_pkt,
.csum_update = nf_nat_ipv4_csum_update,
.csum_recalc = nf_nat_ipv4_csum_recalc,
@@ -186,7 +170,6 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
unsigned int hdrlen = ip_hdrlen(skb);
- const struct nf_nat_l4proto *l4proto;
struct nf_conntrack_tuple target;
unsigned long statusbit;
@@ -217,9 +200,8 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
if (!(ct->status & statusbit))
return 1;
- l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, inside->ip.protocol);
if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp),
- l4proto, &ct->tuplehash[!dir].tuple, !manip))
+ &ct->tuplehash[!dir].tuple, !manip))
return 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -233,8 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
/* Change outer to look like the reply to an incoming packet */
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
- l4proto = __nf_nat_l4proto_find(NFPROTO_IPV4, 0);
- if (!nf_nat_ipv4_manip_pkt(skb, 0, l4proto, &target, manip))
+ if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip))
return 0;
return 1;
@@ -391,26 +372,12 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_ipv4_unregister_fn);
static int __init nf_nat_l3proto_ipv4_init(void)
{
- int err;
-
- err = nf_nat_l4proto_register(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
- if (err < 0)
- goto err1;
- err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
- if (err < 0)
- goto err2;
- return err;
-
-err2:
- nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
-err1:
- return err;
+ return nf_nat_l3proto_register(&nf_nat_l3proto_ipv4);
}
static void __exit nf_nat_l3proto_ipv4_exit(void)
{
nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv4);
- nf_nat_l4proto_unregister(NFPROTO_IPV4, &nf_nat_l4proto_icmp);
}
MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
index a9d5e013e555..41327bb99093 100644
--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -147,28 +147,50 @@ static struct notifier_block masq_inet_notifier = {
.notifier_call = masq_inet_event,
};
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+static int masq_refcnt;
+static DEFINE_MUTEX(masq_mutex);
-void nf_nat_masquerade_ipv4_register_notifier(void)
+int nf_nat_masquerade_ipv4_register_notifier(void)
{
+ int ret = 0;
+
+ mutex_lock(&masq_mutex);
/* check if the notifier was already set */
- if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
- return;
+ if (++masq_refcnt > 1)
+ goto out_unlock;
/* Register for device down reports */
- register_netdevice_notifier(&masq_dev_notifier);
+ ret = register_netdevice_notifier(&masq_dev_notifier);
+ if (ret)
+ goto err_dec;
/* Register IP address change reports */
- register_inetaddr_notifier(&masq_inet_notifier);
+ ret = register_inetaddr_notifier(&masq_inet_notifier);
+ if (ret)
+ goto err_unregister;
+
+ mutex_unlock(&masq_mutex);
+ return ret;
+
+err_unregister:
+ unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+ masq_refcnt--;
+out_unlock:
+ mutex_unlock(&masq_mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
void nf_nat_masquerade_ipv4_unregister_notifier(void)
{
+ mutex_lock(&masq_mutex);
/* check if the notifier still has clients */
- if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
- return;
+ if (--masq_refcnt > 0)
+ goto out_unlock;
unregister_netdevice_notifier(&masq_dev_notifier);
unregister_inetaddr_notifier(&masq_inet_notifier);
+out_unlock:
+ mutex_unlock(&masq_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 5d259a12e25f..68b4d450391b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -299,8 +299,6 @@ pptp_inbound_pkt(struct sk_buff *skb,
static int __init nf_nat_helper_pptp_init(void)
{
- nf_nat_need_gre();
-
BUG_ON(nf_nat_pptp_hook_outbound != NULL);
RCU_INIT_POINTER(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
deleted file mode 100644
index 00fda6331ce5..000000000000
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ /dev/null
@@ -1,150 +0,0 @@
-/*
- * nf_nat_proto_gre.c
- *
- * NAT protocol helper module for GRE.
- *
- * GRE is a generic encapsulation protocol, which is generally not very
- * suited for NAT, as it has no protocol-specific part as port numbers.
- *
- * It has an optional key field, which may help us distinguishing two
- * connections between the same two hosts.
- *
- * GRE is defined in RFC 1701 and RFC 1702, as well as RFC 2784
- *
- * PPTP is built on top of a modified version of GRE, and has a mandatory
- * field called "CallID", which serves us for the same purpose as the key
- * field in plain GRE.
- *
- * Documentation about PPTP can be found in RFC 2637
- *
- * (C) 2000-2005 by Harald Welte <laforge@gnumonks.org>
- *
- * Development of this code funded by Astaro AG (http://www.astaro.com/)
- *
- * (C) 2006-2012 Patrick McHardy <kaber@trash.net>
- *
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-#include <linux/netfilter/nf_conntrack_proto_gre.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
-
-/* generate unique tuple ... */
-static void
-gre_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- static u_int16_t key;
- __be16 *keyptr;
- unsigned int min, i, range_size;
-
- /* If there is no master conntrack we are not PPTP,
- do not change tuples */
- if (!ct->master)
- return;
-
- if (maniptype == NF_NAT_MANIP_SRC)
- keyptr = &tuple->src.u.gre.key;
- else
- keyptr = &tuple->dst.u.gre.key;
-
- if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
- pr_debug("%p: NATing GRE PPTP\n", ct);
- min = 1;
- range_size = 0xffff;
- } else {
- min = ntohs(range->min_proto.gre.key);
- range_size = ntohs(range->max_proto.gre.key) - min + 1;
- }
-
- pr_debug("min = %u, range_size = %u\n", min, range_size);
-
- for (i = 0; ; ++key) {
- *keyptr = htons(min + key % range_size);
- if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
- return;
- }
-
- pr_debug("%p: no NAT mapping\n", ct);
- return;
-}
-
-/* manipulate a GRE packet according to maniptype */
-static bool
-gre_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- const struct gre_base_hdr *greh;
- struct pptp_gre_header *pgreh;
-
- /* pgreh includes two optional 32bit fields which are not required
- * to be there. That's where the magic '8' comes from */
- if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8))
- return false;
-
- greh = (void *)skb->data + hdroff;
- pgreh = (struct pptp_gre_header *)greh;
-
- /* we only have destination manip of a packet, since 'source key'
- * is not present in the packet itself */
- if (maniptype != NF_NAT_MANIP_DST)
- return true;
-
- switch (greh->flags & GRE_VERSION) {
- case GRE_VERSION_0:
- /* We do not currently NAT any GREv0 packets.
- * Try to behave like "nf_nat_proto_unknown" */
- break;
- case GRE_VERSION_1:
- pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
- pgreh->call_id = tuple->dst.u.gre.key;
- break;
- default:
- pr_debug("can't nat unknown GRE version\n");
- return false;
- }
- return true;
-}
-
-static const struct nf_nat_l4proto gre = {
- .l4proto = IPPROTO_GRE,
- .manip_pkt = gre_manip_pkt,
- .in_range = nf_nat_l4proto_in_range,
- .unique_tuple = gre_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
-
-static int __init nf_nat_proto_gre_init(void)
-{
- return nf_nat_l4proto_register(NFPROTO_IPV4, &gre);
-}
-
-static void __exit nf_nat_proto_gre_fini(void)
-{
- nf_nat_l4proto_unregister(NFPROTO_IPV4, &gre);
-}
-
-module_init(nf_nat_proto_gre_init);
-module_exit(nf_nat_proto_gre_fini);
-
-void nf_nat_need_gre(void)
-{
- return;
-}
-EXPORT_SYMBOL_GPL(nf_nat_need_gre);
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
deleted file mode 100644
index 6d7cf1d79baf..000000000000
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/ip.h>
-#include <linux/icmp.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static bool
-icmp_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max)
-{
- return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
- ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
-}
-
-static void
-icmp_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- static u_int16_t id;
- unsigned int range_size;
- unsigned int i;
-
- range_size = ntohs(range->max_proto.icmp.id) -
- ntohs(range->min_proto.icmp.id) + 1;
- /* If no range specified... */
- if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
- range_size = 0xFFFF;
-
- for (i = 0; ; ++id) {
- tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
- (id % range_size));
- if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
- return;
- }
- return;
-}
-
-static bool
-icmp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct icmphdr *hdr;
-
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
- return false;
-
- hdr = (struct icmphdr *)(skb->data + hdroff);
- inet_proto_csum_replace2(&hdr->checksum, skb,
- hdr->un.echo.id, tuple->src.u.icmp.id, false);
- hdr->un.echo.id = tuple->src.u.icmp.id;
- return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
- .l4proto = IPPROTO_ICMP,
- .manip_pkt = icmp_manip_pkt,
- .in_range = icmp_in_range,
- .unique_tuple = icmp_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index 5cd06ba3535d..aa8304c618b8 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -102,6 +102,7 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
/* Send RST reply */
void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
{
+ struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
struct iphdr *niph;
const struct tcphdr *oth;
@@ -147,10 +148,11 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
* build the eth header using the original destination's MAC as the
* source, and send the RST packet directly.
*/
- if (oldskb->nf_bridge) {
+ br_indev = nf_bridge_get_physindev(oldskb);
+ if (br_indev) {
struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = nf_bridge_get_physindev(oldskb);
+ nskb->dev = br_indev;
niph->tot_len = htons(nskb->len);
ip_send_check(niph);
if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index f1193e1e928a..6847de1d1db8 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
if (ret < 0)
return ret;
- nf_nat_masquerade_ipv4_register_notifier();
+ ret = nf_nat_masquerade_ipv4_register_notifier();
+ if (ret)
+ nft_unregister_expr(&nft_masq_ipv4_type);
return ret;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 70289682a670..c3610b37bb4c 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -219,6 +219,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL),
SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL),
SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED),
+ SNMP_MIB_ITEM("TCPBacklogCoalesce", LINUX_MIB_TCPBACKLOGCOALESCE),
SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT),
SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 32a691b7ce2c..92d249e053be 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -29,6 +29,7 @@
#include <net/protocol.h>
struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
+EXPORT_SYMBOL(inet_protos);
const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
EXPORT_SYMBOL(inet_offloads);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8ca3eb06ba04..c55a5432cf37 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -131,8 +131,7 @@ struct sock *__raw_v4_lookup(struct net *net, struct sock *sk,
if (net_eq(sock_net(sk), net) && inet->inet_num == num &&
!(inet->inet_daddr && inet->inet_daddr != raddr) &&
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
- !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != sdif))
+ raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
goto found; /* gotcha */
}
sk = NULL;
@@ -391,7 +390,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb->ip_summed = CHECKSUM_NONE;
- sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
+ skb_setup_tx_timestamp(skb, sockc->tsflags);
if (flags & MSG_CONFIRM)
skb_set_dst_pending_confirm(skb, 1);
@@ -805,7 +804,7 @@ out:
return copied;
}
-static int raw_init(struct sock *sk)
+static int raw_sk_init(struct sock *sk)
{
struct raw_sock *rp = raw_sk(sk);
@@ -970,7 +969,7 @@ struct proto raw_prot = {
.connect = ip4_datagram_connect,
.disconnect = __udp_disconnect,
.ioctl = raw_ioctl,
- .init = raw_init,
+ .init = raw_sk_init,
.setsockopt = raw_setsockopt,
.getsockopt = raw_getsockopt,
.sendmsg = raw_sendmsg,
@@ -1134,3 +1133,27 @@ void __init raw_proc_exit(void)
unregister_pernet_subsys(&raw_net_ops);
}
#endif /* CONFIG_PROC_FS */
+
+static void raw_sysctl_init_net(struct net *net)
+{
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ net->ipv4.sysctl_raw_l3mdev_accept = 1;
+#endif
+}
+
+static int __net_init raw_sysctl_init(struct net *net)
+{
+ raw_sysctl_init_net(net);
+ return 0;
+}
+
+static struct pernet_operations __net_initdata raw_sysctl_ops = {
+ .init = raw_sysctl_init,
+};
+
+void __init raw_init(void)
+{
+ raw_sysctl_init_net(&init_net);
+ if (register_pernet_subsys(&raw_sysctl_ops))
+ panic("RAW: failed to init sysctl parameters.\n");
+}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index c0a9d26c06ce..ce92f73cf104 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1677,7 +1677,7 @@ static void ip_handle_martian_source(struct net_device *dev,
print_hex_dump(KERN_WARNING, "ll header: ",
DUMP_PREFIX_OFFSET, 16, 1,
skb_mac_header(skb),
- dev->hard_header_len, true);
+ dev->hard_header_len, false);
}
}
#endif
@@ -2849,6 +2849,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
err = -rt->dst.error;
} else {
fl4.flowi4_iif = LOOPBACK_IFINDEX;
+ skb->dev = net->loopback_dev;
rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 891ed2f91467..ba0fc4b18465 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -602,6 +602,17 @@ static struct ctl_table ipv4_net_table[] = {
.mode = 0644,
.proc_handler = ipv4_ping_group_range,
},
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ {
+ .procname = "raw_l3mdev_accept",
+ .data = &init_net.ipv4.sysctl_raw_l3mdev_accept,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
{
.procname = "tcp_ecn",
.data = &init_net.ipv4.sysctl_tcp_ecn,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 9e6bc4d6daa7..27e2f6837062 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1423,7 +1423,7 @@ do_error:
if (copied + copied_syn)
goto out;
out_err:
- sock_zerocopy_put_abort(uarg);
+ sock_zerocopy_put_abort(uarg, true);
err = sk_stream_error(sk, flags, err);
/* make sure we wake any epoll edge trigger waiter */
if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
@@ -2088,7 +2088,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
}
continue;
- found_ok_skb:
+found_ok_skb:
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
@@ -2147,7 +2147,7 @@ skip_copy:
sk_eat_skb(sk, skb);
continue;
- found_fin_ok:
+found_fin_ok:
/* Process the FIN. */
++*seq;
if (!(flags & MSG_PEEK))
@@ -2241,10 +2241,6 @@ void tcp_set_state(struct sock *sk, int state)
* socket sitting in hash tables.
*/
inet_sk_state_store(sk, state);
-
-#ifdef STATE_TRACE
- SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
-#endif
}
EXPORT_SYMBOL_GPL(tcp_set_state);
@@ -3246,6 +3242,7 @@ static size_t tcp_opt_stats_get_size(void)
nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
+ nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
0;
}
@@ -3299,6 +3296,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
TCP_NLA_PAD);
nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
+ nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
return stats;
}
@@ -3658,8 +3656,11 @@ bool tcp_alloc_md5sig_pool(void)
if (unlikely(!tcp_md5sig_pool_populated)) {
mutex_lock(&tcp_md5sig_mutex);
- if (!tcp_md5sig_pool_populated)
+ if (!tcp_md5sig_pool_populated) {
__tcp_alloc_md5sig_pool();
+ if (tcp_md5sig_pool_populated)
+ static_key_slow_inc(&tcp_md5_needed);
+ }
mutex_unlock(&tcp_md5sig_mutex);
}
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 9277abdd822a..0f497fc49c3f 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -128,7 +128,12 @@ static const u32 bbr_probe_rtt_mode_ms = 200;
/* Skip TSO below the following bandwidth (bits/sec): */
static const int bbr_min_tso_rate = 1200000;
-/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck. */
+/* Pace at ~1% below estimated bw, on average, to reduce queue at bottleneck.
+ * In order to help drive the network toward lower queues and low latency while
+ * maintaining high utilization, the average pacing rate aims to be slightly
+ * lower than the estimated bandwidth. This is an important aspect of the
+ * design.
+ */
static const int bbr_pacing_margin_percent = 1;
/* We use a high_gain value of 2/ln(2) because it's the smallest pacing gain
@@ -247,13 +252,7 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
}
-/* Pace using current bw estimate and a gain factor. In order to help drive the
- * network toward lower queues while maintaining high utilization and low
- * latency, the average pacing rate aims to be slightly (~1%) lower than the
- * estimated bandwidth. This is an important aspect of the design. In this
- * implementation this slightly lower pacing rate is achieved implicitly by not
- * including link-layer headers in the packet size used for the pacing rate.
- */
+/* Pace using current bw estimate and a gain factor. */
static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3b45fe530f91..1bb7321a256d 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -8,6 +8,7 @@
#include <linux/wait.h>
#include <net/inet_common.h>
+#include <net/tls.h>
static bool tcp_bpf_stream_read(const struct sock *sk)
{
@@ -198,7 +199,7 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
msg->sg.start = i;
msg->sg.size -= apply_bytes;
sk_psock_queue_msg(psock, tmp);
- sk->sk_data_ready(sk);
+ sk_psock_data_ready(sk, psock);
} else {
sk_msg_free(sk, tmp);
kfree(tmp);
@@ -218,6 +219,8 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
u32 off;
while (1) {
+ bool has_tx_ulp;
+
sge = sk_msg_elem(msg, msg->sg.start);
size = (apply && apply_bytes < sge->length) ?
apply_bytes : sge->length;
@@ -226,7 +229,15 @@ static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
tcp_rate_check_app_limited(sk);
retry:
- ret = do_tcp_sendpages(sk, page, off, size, flags);
+ has_tx_ulp = tls_sw_has_ctx_tx(sk);
+ if (has_tx_ulp) {
+ flags |= MSG_SENDPAGE_NOPOLICY;
+ ret = kernel_sendpage_locked(sk,
+ page, off, size, flags);
+ } else {
+ ret = do_tcp_sendpages(sk, page, off, size, flags);
+ }
+
if (ret <= 0)
return ret;
if (apply)
@@ -289,12 +300,23 @@ static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
{
bool cork = false, enospc = msg->sg.start == msg->sg.end;
struct sock *sk_redir;
- u32 tosend;
+ u32 tosend, delta = 0;
int ret;
more_data:
- if (psock->eval == __SK_NONE)
+ if (psock->eval == __SK_NONE) {
+ /* Track delta in msg size to add/subtract it on SK_DROP from
+ * returned to user copied size. This ensures user doesn't
+ * get a positive return code with msg_cut_data and SK_DROP
+ * verdict.
+ */
+ delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
+ if (msg->sg.size < delta)
+ delta -= msg->sg.size;
+ else
+ delta = 0;
+ }
if (msg->cork_bytes &&
msg->cork_bytes > msg->sg.size && !enospc) {
@@ -350,7 +372,7 @@ more_data:
default:
sk_msg_free_partial(sk, msg, tosend);
sk_msg_apply_bytes(psock, tosend);
- *copied -= tosend;
+ *copied -= (tosend + delta);
return -EACCES;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1e37c1388189..76858b14ebe9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -579,10 +579,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
u32 delta_us;
- if (!delta)
- delta = 1;
- delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
- tcp_rcv_rtt_update(tp, delta_us, 0);
+ if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+ if (!delta)
+ delta = 1;
+ delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ tcp_rcv_rtt_update(tp, delta_us, 0);
+ }
}
}
@@ -1863,16 +1865,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
/* Emulate SACKs for SACKless connection: account for a new dupack. */
-static void tcp_add_reno_sack(struct sock *sk)
+static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
{
- struct tcp_sock *tp = tcp_sk(sk);
- u32 prior_sacked = tp->sacked_out;
+ if (num_dupack) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 prior_sacked = tp->sacked_out;
+ s32 delivered;
- tp->sacked_out++;
- tcp_check_reno_reordering(sk, 0);
- if (tp->sacked_out > prior_sacked)
- tp->delivered++; /* Some out-of-order packet is delivered */
- tcp_verify_left_out(tp);
+ tp->sacked_out += num_dupack;
+ tcp_check_reno_reordering(sk, 0);
+ delivered = tp->sacked_out - prior_sacked;
+ if (delivered > 0)
+ tp->delivered += delivered;
+ tcp_verify_left_out(tp);
+ }
}
/* Account for ACK, ACKing some data in Reno Recovery phase. */
@@ -2457,8 +2463,8 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
tp->prior_cwnd - 1;
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
- } else if ((flag & FLAG_RETRANS_DATA_ACKED) &&
- !(flag & FLAG_LOST_RETRANS)) {
+ } else if ((flag & (FLAG_RETRANS_DATA_ACKED | FLAG_LOST_RETRANS)) ==
+ FLAG_RETRANS_DATA_ACKED) {
sndcnt = min_t(int, delta,
max_t(int, tp->prr_delivered - tp->prr_out,
newly_acked_sacked) + 1);
@@ -2634,7 +2640,7 @@ void tcp_enter_recovery(struct sock *sk, bool ece_ack)
/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
* recovered or spurious. Otherwise retransmits more on partial ACKs.
*/
-static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
+static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
int *rexmit)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2653,7 +2659,7 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
return;
if (after(tp->snd_nxt, tp->high_seq)) {
- if (flag & FLAG_DATA_SACKED || is_dupack)
+ if (flag & FLAG_DATA_SACKED || num_dupack)
tp->frto = 0; /* Step 3.a. loss was real */
} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
tp->high_seq = tp->snd_nxt;
@@ -2679,8 +2685,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
/* A Reno DUPACK means new data in F-RTO step 2.b above are
* delivered. Lower inflight to clock out (re)tranmissions.
*/
- if (after(tp->snd_nxt, tp->high_seq) && is_dupack)
- tcp_add_reno_sack(sk);
+ if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
+ tcp_add_reno_sack(sk, num_dupack);
else if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
}
@@ -2757,13 +2763,13 @@ static bool tcp_force_fast_retransmit(struct sock *sk)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
- bool is_dupack, int *ack_flag, int *rexmit)
+ int num_dupack, int *ack_flag, int *rexmit)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int fast_rexmit = 0, flag = *ack_flag;
- bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
- tcp_force_fast_retransmit(sk));
+ bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
+ tcp_force_fast_retransmit(sk));
if (!tp->packets_out && tp->sacked_out)
tp->sacked_out = 0;
@@ -2810,8 +2816,8 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
- if (tcp_is_reno(tp) && is_dupack)
- tcp_add_reno_sack(sk);
+ if (tcp_is_reno(tp))
+ tcp_add_reno_sack(sk, num_dupack);
} else {
if (tcp_try_undo_partial(sk, prior_snd_una))
return;
@@ -2826,7 +2832,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tcp_identify_packet_loss(sk, ack_flag);
break;
case TCP_CA_Loss:
- tcp_process_loss(sk, flag, is_dupack, rexmit);
+ tcp_process_loss(sk, flag, num_dupack, rexmit);
tcp_identify_packet_loss(sk, ack_flag);
if (!(icsk->icsk_ca_state == TCP_CA_Open ||
(*ack_flag & FLAG_LOST_RETRANS)))
@@ -2837,8 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
- if (is_dupack)
- tcp_add_reno_sack(sk);
+ tcp_add_reno_sack(sk, num_dupack);
}
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
@@ -2910,9 +2915,11 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED) {
u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
- u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
- seq_rtt_us = ca_rtt_us = delta_us;
+ if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+ seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+ ca_rtt_us = seq_rtt_us;
+ }
}
rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
if (seq_rtt_us < 0)
@@ -3558,7 +3565,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
bool is_sack_reneg = tp->is_sack_reneg;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
- bool is_dupack = false;
+ int num_dupack = 0;
int prior_packets = tp->packets_out;
u32 delivered = tp->delivered;
u32 lost = tp->lost;
@@ -3610,7 +3617,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (flag & FLAG_UPDATE_TS_RECENT)
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
- if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
+ if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
+ FLAG_SND_UNA_ADVANCED) {
/* Window is constant, pure forward advance.
* No more checks are required.
* Note, we use the fact that SND.UNA>=SND.WL2.
@@ -3668,8 +3676,13 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_set_xmit_timer(sk);
if (tcp_ack_is_dubious(sk, flag)) {
- is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
- tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
+ if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
+ num_dupack = 1;
+ /* Consider if pure acks were aggregated in tcp_add_backlog() */
+ if (!(flag & FLAG_DATA))
+ num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+ }
+ tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit);
}
@@ -3687,7 +3700,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK) {
- tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
+ tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit);
tcp_newly_delivered(sk, delivered, flag);
}
@@ -3712,7 +3725,7 @@ old_ack:
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
&sack_state);
- tcp_fastretrans_alert(sk, prior_snd_una, is_dupack, &flag,
+ tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
&rexmit);
tcp_newly_delivered(sk, delivered, flag);
tcp_xmit_recovery(sk, rexmit);
@@ -4602,13 +4615,12 @@ end:
}
}
-static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
- bool *fragstolen)
+static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
+ bool *fragstolen)
{
int eaten;
struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
- __skb_pull(skb, hdrlen);
eaten = (tail &&
tcp_try_coalesce(sk, tail,
skb, fragstolen)) ? 1 : 0;
@@ -4659,7 +4671,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
- if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) {
+ if (tcp_queue_rcv(sk, skb, &fragstolen)) {
WARN_ON_ONCE(fragstolen); /* should not happen */
__kfree_skb(skb);
}
@@ -4719,7 +4731,7 @@ queue_and_out:
goto drop;
}
- eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
+ eaten = tcp_queue_rcv(sk, skb, &fragstolen);
if (skb->len)
tcp_event_data_recv(sk, skb);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -5595,8 +5607,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
- eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
- &fragstolen);
+ __skb_pull(skb, tcp_header_len);
+ eaten = tcp_queue_rcv(sk, skb, &fragstolen);
tcp_event_data_recv(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index de47038afdf0..efc6fef692ff 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -423,7 +423,7 @@ EXPORT_SYMBOL(tcp_req_err);
*
*/
-void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
@@ -446,20 +446,21 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
inet_iif(icmp_skb), 0);
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
- return;
+ return -ENOENT;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
- return;
+ return 0;
}
seq = ntohl(th->seq);
- if (sk->sk_state == TCP_NEW_SYN_RECV)
- return tcp_req_err(sk, seq,
- type == ICMP_PARAMETERPROB ||
- type == ICMP_TIME_EXCEEDED ||
- (type == ICMP_DEST_UNREACH &&
- (code == ICMP_NET_UNREACH ||
- code == ICMP_HOST_UNREACH)));
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
+ type == ICMP_TIME_EXCEEDED ||
+ (type == ICMP_DEST_UNREACH &&
+ (code == ICMP_NET_UNREACH ||
+ code == ICMP_HOST_UNREACH)));
+ return 0;
+ }
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
@@ -541,7 +542,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
skb = tcp_rtx_queue_head(sk);
- BUG_ON(!skb);
tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
@@ -613,6 +613,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
out:
bh_unlock_sock(sk);
sock_put(sk);
+ return 0;
}
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
@@ -969,10 +970,13 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
* We need to maintain these in the sk structure.
*/
+struct static_key tcp_md5_needed __read_mostly;
+EXPORT_SYMBOL(tcp_md5_needed);
+
/* Find the Key structure for an address. */
-struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family)
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+ const union tcp_md5_addr *addr,
+ int family)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
@@ -1010,7 +1014,7 @@ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
}
return best_match;
}
-EXPORT_SYMBOL(tcp_md5_do_lookup);
+EXPORT_SYMBOL(__tcp_md5_do_lookup);
static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
const union tcp_md5_addr *addr,
@@ -1618,12 +1622,14 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
-
- /* Only socket owner can try to collapse/prune rx queues
- * to reduce memory overhead, so add a little headroom here.
- * Few sockets backlog are possibly concurrently non empty.
- */
- limit += 64*1024;
+ struct skb_shared_info *shinfo;
+ const struct tcphdr *th;
+ struct tcphdr *thtail;
+ struct sk_buff *tail;
+ unsigned int hdrlen;
+ bool fragstolen;
+ u32 gso_segs;
+ int delta;
/* In case all data was pulled from skb frags (in __pskb_pull_tail()),
* we can fix skb->truesize to its real value to avoid future drops.
@@ -1633,6 +1639,86 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
*/
skb_condense(skb);
+ skb_dst_drop(skb);
+
+ if (unlikely(tcp_checksum_complete(skb))) {
+ bh_unlock_sock(sk);
+ __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+ __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
+ return true;
+ }
+
+ /* Attempt coalescing to last skb in backlog, even if we are
+ * above the limits.
+ * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
+ */
+ th = (const struct tcphdr *)skb->data;
+ hdrlen = th->doff * 4;
+ shinfo = skb_shinfo(skb);
+
+ if (!shinfo->gso_size)
+ shinfo->gso_size = skb->len - hdrlen;
+
+ if (!shinfo->gso_segs)
+ shinfo->gso_segs = 1;
+
+ tail = sk->sk_backlog.tail;
+ if (!tail)
+ goto no_coalesce;
+ thtail = (struct tcphdr *)tail->data;
+
+ if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
+ TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
+ ((TCP_SKB_CB(tail)->tcp_flags |
+ TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_URG) ||
+ ((TCP_SKB_CB(tail)->tcp_flags ^
+ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
+#ifdef CONFIG_TLS_DEVICE
+ tail->decrypted != skb->decrypted ||
+#endif
+ thtail->doff != th->doff ||
+ memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
+ goto no_coalesce;
+
+ __skb_pull(skb, hdrlen);
+ if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
+ thtail->window = th->window;
+
+ TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
+
+ if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
+ TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
+
+ TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
+
+ if (TCP_SKB_CB(skb)->has_rxtstamp) {
+ TCP_SKB_CB(tail)->has_rxtstamp = true;
+ tail->tstamp = skb->tstamp;
+ skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+ }
+
+ /* Not as strict as GRO. We only need to carry mss max value */
+ skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
+ skb_shinfo(tail)->gso_size);
+
+ gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
+ skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+
+ sk->sk_backlog.len += delta;
+ __NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPBACKLOGCOALESCE);
+ kfree_skb_partial(skb, fragstolen);
+ return false;
+ }
+ __skb_push(skb, hdrlen);
+
+no_coalesce:
+ /* Only socket owner can try to collapse/prune rx queues
+ * to reduce memory overhead, so add a little headroom here.
+ * Few sockets backlog are possibly concurrently non empty.
+ */
+ limit += 64*1024;
+
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
@@ -2573,8 +2659,8 @@ static int __net_init tcp_sk_init(struct net *net)
* which are too large can cause TCP streams to be bursty.
*/
net->ipv4.sysctl_tcp_tso_win_divisor = 3;
- /* Default TSQ limit of four TSO segments */
- net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
+ /* Default TSQ limit of 16 TSO segments */
+ net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
/* rfc5961 challenge ack rate limiting */
net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
net->ipv4.sysctl_tcp_min_tso_segs = 2;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 870b0a335061..0fbf7d4df9da 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -10,6 +10,7 @@
* TCPv4 GSO/GRO support
*/
+#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
#include <net/tcp.h>
#include <net/protocol.h>
@@ -305,7 +306,8 @@ int tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
-static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
@@ -318,7 +320,7 @@ static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *
return tcp_gro_receive(head, skb);
}
-static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
+INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
{
const struct iphdr *iph = ip_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f510cad0b3e..730bc44dbad9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -233,16 +233,14 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
if (init_rcv_wnd)
*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
- (*rcv_wscale) = 0;
+ *rcv_wscale = 0;
if (wscale_ok) {
/* Set window scaling on max possible window */
space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
space = max_t(u32, space, sysctl_rmem_max);
space = min_t(u32, space, *window_clamp);
- while (space > U16_MAX && (*rcv_wscale) < TCP_MAX_WSCALE) {
- space >>= 1;
- (*rcv_wscale)++;
- }
+ *rcv_wscale = clamp_t(int, ilog2(space) - 15,
+ 0, TCP_MAX_WSCALE);
}
/* Set the clamp no higher than max representable value */
(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
@@ -596,7 +594,8 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
*md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG
- if (unlikely(rcu_access_pointer(tp->md5sig_info))) {
+ if (static_key_false(&tcp_md5_needed) &&
+ rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) {
opts->options |= OPTION_MD5;
@@ -732,7 +731,8 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
*md5 = NULL;
#ifdef CONFIG_TCP_MD5SIG
- if (unlikely(rcu_access_pointer(tp->md5sig_info))) {
+ if (static_key_false(&tcp_md5_needed) &&
+ rcu_access_pointer(tp->md5sig_info)) {
*md5 = tp->af_specific->md5_lookup(sk, sk);
if (*md5) {
opts->options |= OPTION_MD5;
@@ -1904,24 +1904,27 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
* This algorithm is from John Heffner.
*/
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
- bool *is_cwnd_limited, u32 max_segs)
+ bool *is_cwnd_limited,
+ bool *is_rwnd_limited,
+ u32 max_segs)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- u32 age, send_win, cong_win, limit, in_flight;
+ u32 send_win, cong_win, limit, in_flight;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *head;
int win_divisor;
-
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- goto send_now;
+ s64 delta;
if (icsk->icsk_ca_state >= TCP_CA_Recovery)
goto send_now;
/* Avoid bursty behavior by allowing defer
- * only if the last write was recent.
+ * only if the last write was recent (1 ms).
+ * Note that tp->tcp_wstamp_ns can be in the future if we have
+ * packets waiting in a qdisc or device for EDT delivery.
*/
- if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
+ delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
+ if (delta > 0)
goto send_now;
in_flight = tcp_packets_in_flight(tp);
@@ -1968,15 +1971,33 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_rtx_queue_head(sk);
if (!head)
goto send_now;
- age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
+ delta = tp->tcp_clock_cache - head->tstamp;
/* If next ACK is likely to come too late (half srtt), do not defer */
- if (age < (tp->srtt_us >> 4))
+ if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
goto send_now;
- /* Ok, it looks like it is advisable to defer. */
+ /* Ok, it looks like it is advisable to defer.
+ * Three cases are tracked :
+ * 1) We are cwnd-limited
+ * 2) We are rwnd-limited
+ * 3) We are application limited.
+ */
+ if (cong_win < send_win) {
+ if (cong_win <= skb->len) {
+ *is_cwnd_limited = true;
+ return true;
+ }
+ } else {
+ if (send_win <= skb->len) {
+ *is_rwnd_limited = true;
+ return true;
+ }
+ }
- if (cong_win < send_win && cong_win <= skb->len)
- *is_cwnd_limited = true;
+ /* If this packet won't get more data, do not wait. */
+ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
+ TCP_SKB_CB(skb)->eor)
+ goto send_now;
return true;
@@ -2212,8 +2233,9 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit = max_t(unsigned long,
2 * skb->truesize,
sk->sk_pacing_rate >> sk->sk_pacing_shift);
- limit = min_t(unsigned long, limit,
- sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
+ if (sk->sk_pacing_status == SK_PACING_NONE)
+ limit = min_t(unsigned long, limit,
+ sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
limit <<= factor;
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
@@ -2356,7 +2378,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
} else {
if (!push_one &&
tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
- max_segs))
+ &is_rwnd_limited, max_segs))
break;
}
@@ -2494,15 +2516,18 @@ void tcp_send_loss_probe(struct sock *sk)
goto rearm_timer;
}
skb = skb_rb_last(&sk->tcp_rtx_queue);
+ if (unlikely(!skb)) {
+ WARN_ONCE(tp->packets_out,
+ "invalid inflight: %u state %u cwnd %u mss %d\n",
+ tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+ inet_csk(sk)->icsk_pending = 0;
+ return;
+ }
/* At most one outstanding TLP retransmission. */
if (tp->tlp_high_seq)
goto rearm_timer;
- /* Retransmit last segment. */
- if (WARN_ON(!skb))
- goto rearm_timer;
-
if (skb_still_in_host_queue(sk, skb))
goto rearm_timer;
@@ -2920,7 +2945,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
trace_tcp_retransmit_skb(sk, skb);
} else if (err != -EBUSY) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
}
return err;
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 5f8b6d3cd855..f87dbc78b6bc 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -40,15 +40,17 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
u32 elapsed, start_ts;
+ s32 remaining;
start_ts = tcp_retransmit_stamp(sk);
if (!icsk->icsk_user_timeout || !start_ts)
return icsk->icsk_rto;
elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
- if (elapsed >= icsk->icsk_user_timeout)
+ remaining = icsk->icsk_user_timeout - elapsed;
+ if (remaining <= 0)
return 1; /* user timeout has passed; fire ASAP */
- else
- return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed));
+
+ return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
}
/**
@@ -209,7 +211,7 @@ static bool retransmits_timed_out(struct sock *sk,
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
timeout = jiffies_to_msecs(timeout);
}
- return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout;
+ return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
}
/* A write timeout has occurred. Process the after effects. */
@@ -376,7 +378,7 @@ static void tcp_probe_timer(struct sock *sk)
return;
}
- if (icsk->icsk_probes_out > max_probes) {
+ if (icsk->icsk_probes_out >= max_probes) {
abort: tcp_write_err(sk);
} else {
/* Only send another probe if we didn't close things up. */
@@ -482,11 +484,12 @@ void tcp_retransmit_timer(struct sock *sk)
goto out_reset_timer;
}
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
if (tcp_write_timeout(sk))
goto out;
if (icsk->icsk_retransmits == 0) {
- int mib_idx;
+ int mib_idx = 0;
if (icsk->icsk_ca_state == TCP_CA_Recovery) {
if (tcp_is_sack(tp))
@@ -501,10 +504,9 @@ void tcp_retransmit_timer(struct sock *sk)
mib_idx = LINUX_MIB_TCPSACKFAILURES;
else
mib_idx = LINUX_MIB_TCPRENOFAILURES;
- } else {
- mib_idx = LINUX_MIB_TCPTIMEOUTS;
}
- __NET_INC_STATS(sock_net(sk), mib_idx);
+ if (mib_idx)
+ __NET_INC_STATS(sock_net(sk), mib_idx);
}
tcp_enter_loss(sk);
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index c0630013c1ae..33bf8e9c8663 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -149,34 +149,40 @@ drop:
}
#endif
-static void tunnel4_err(struct sk_buff *skb, u32 info)
+static int tunnel4_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->err_handler(skb, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
#if IS_ENABLED(CONFIG_IPV6)
-static void tunnel64_err(struct sk_buff *skb, u32 info)
+static int tunnel64_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->err_handler(skb, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
#endif
#if IS_ENABLED(CONFIG_MPLS)
-static void tunnelmpls4_err(struct sk_buff *skb, u32 info)
+static int tunnelmpls4_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
for_each_tunnel_rcu(tunnelmpls4_handlers, handler)
if (!handler->err_handler(skb, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
#endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1976fddb9e00..3fb0ed5e4789 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -105,6 +105,7 @@
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
+#include <net/ip_tunnels.h>
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
@@ -115,6 +116,7 @@
#include "udp_impl.h"
#include <net/sock_reuseport.h>
#include <net/addrconf.h>
+#include <net/udp_tunnel.h>
struct udp_table udp_table __read_mostly;
EXPORT_SYMBOL(udp_table);
@@ -371,21 +373,19 @@ static int compute_score(struct sock *sk, struct net *net,
{
int score;
struct inet_sock *inet;
+ bool dev_match;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
ipv6_only_sock(sk))
return -1;
- score = (sk->sk_family == PF_INET) ? 2 : 1;
- inet = inet_sk(sk);
+ if (sk->sk_rcv_saddr != daddr)
+ return -1;
- if (inet->inet_rcv_saddr) {
- if (inet->inet_rcv_saddr != daddr)
- return -1;
- score += 4;
- }
+ score = (sk->sk_family == PF_INET) ? 2 : 1;
+ inet = inet_sk(sk);
if (inet->inet_daddr) {
if (inet->inet_daddr != saddr)
return -1;
@@ -398,15 +398,11 @@ static int compute_score(struct sock *sk, struct net *net,
score += 4;
}
- if (sk->sk_bound_dev_if || exact_dif) {
- bool dev_match = (sk->sk_bound_dev_if == dif ||
- sk->sk_bound_dev_if == sdif);
-
- if (!dev_match)
- return -1;
- if (sk->sk_bound_dev_if)
- score += 4;
- }
+ dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+ dif, sdif);
+ if (!dev_match)
+ return -1;
+ score += 4;
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
@@ -465,65 +461,30 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
__be16 sport, __be32 daddr, __be16 dport, int dif,
int sdif, struct udp_table *udptable, struct sk_buff *skb)
{
- struct sock *sk, *result;
+ struct sock *result;
unsigned short hnum = ntohs(dport);
- unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
- struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+ unsigned int hash2, slot2;
+ struct udp_hslot *hslot2;
bool exact_dif = udp_lib_exact_dif_match(net, skb);
- int score, badness;
- u32 hash = 0;
- if (hslot->count > 10) {
- hash2 = ipv4_portaddr_hash(net, daddr, hnum);
+ hash2 = ipv4_portaddr_hash(net, daddr, hnum);
+ slot2 = hash2 & udptable->mask;
+ hslot2 = &udptable->hash2[slot2];
+
+ result = udp4_lib_lookup2(net, saddr, sport,
+ daddr, hnum, dif, sdif,
+ exact_dif, hslot2, skb);
+ if (!result) {
+ hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
- if (hslot->count < hslot2->count)
- goto begin;
result = udp4_lib_lookup2(net, saddr, sport,
- daddr, hnum, dif, sdif,
+ htonl(INADDR_ANY), hnum, dif, sdif,
exact_dif, hslot2, skb);
- if (!result) {
- unsigned int old_slot2 = slot2;
- hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
- slot2 = hash2 & udptable->mask;
- /* avoid searching the same slot again. */
- if (unlikely(slot2 == old_slot2))
- return result;
-
- hslot2 = &udptable->hash2[slot2];
- if (hslot->count < hslot2->count)
- goto begin;
-
- result = udp4_lib_lookup2(net, saddr, sport,
- daddr, hnum, dif, sdif,
- exact_dif, hslot2, skb);
- }
- if (unlikely(IS_ERR(result)))
- return NULL;
- return result;
- }
-begin:
- result = NULL;
- badness = 0;
- sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif, sdif, exact_dif);
- if (score > badness) {
- if (sk->sk_reuseport) {
- hash = udp_ehashfn(net, daddr, hnum,
- saddr, sport);
- result = reuseport_select_sock(sk, hash, skb,
- sizeof(struct udphdr));
- if (unlikely(IS_ERR(result)))
- return NULL;
- if (result)
- return result;
- }
- result = sk;
- badness = score;
- }
}
+ if (unlikely(IS_ERR(result)))
+ return NULL;
return result;
}
EXPORT_SYMBOL_GPL(__udp4_lib_lookup);
@@ -585,6 +546,89 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
return true;
}
+DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
+void udp_encap_enable(void)
+{
+ static_branch_inc(&udp_encap_needed_key);
+}
+EXPORT_SYMBOL(udp_encap_enable);
+
+/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
+ * through error handlers in encapsulations looking for a match.
+ */
+static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info)
+{
+ int i;
+
+ for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
+ int (*handler)(struct sk_buff *skb, u32 info);
+
+ if (!iptun_encaps[i])
+ continue;
+ handler = rcu_dereference(iptun_encaps[i]->err_handler);
+ if (handler && !handler(skb, info))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/* Try to match ICMP errors to UDP tunnels by looking up a socket without
+ * reversing source and destination port: this will match tunnels that force the
+ * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
+ * lwtunnels might actually break this assumption by being configured with
+ * different destination ports on endpoints, in this case we won't be able to
+ * trace ICMP messages back to them.
+ *
+ * If this doesn't match any socket, probe tunnels with arbitrary destination
+ * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
+ * we've sent packets to won't necessarily match the local destination port.
+ *
+ * Then ask the tunnel implementation to match the error against a valid
+ * association.
+ *
+ * Return an error if we can't find a match, the socket if we need further
+ * processing, zero otherwise.
+ */
+static struct sock *__udp4_lib_err_encap(struct net *net,
+ const struct iphdr *iph,
+ struct udphdr *uh,
+ struct udp_table *udptable,
+ struct sk_buff *skb, u32 info)
+{
+ int network_offset, transport_offset;
+ struct sock *sk;
+
+ network_offset = skb_network_offset(skb);
+ transport_offset = skb_transport_offset(skb);
+
+ /* Network header needs to point to the outer IPv4 header inside ICMP */
+ skb_reset_network_header(skb);
+
+ /* Transport header needs to point to the UDP header */
+ skb_set_transport_header(skb, iph->ihl << 2);
+
+ sk = __udp4_lib_lookup(net, iph->daddr, uh->source,
+ iph->saddr, uh->dest, skb->dev->ifindex, 0,
+ udptable, NULL);
+ if (sk) {
+ int (*lookup)(struct sock *sk, struct sk_buff *skb);
+ struct udp_sock *up = udp_sk(sk);
+
+ lookup = READ_ONCE(up->encap_err_lookup);
+ if (!lookup || lookup(sk, skb))
+ sk = NULL;
+ }
+
+ if (!sk)
+ sk = ERR_PTR(__udp4_lib_err_encap_no_sk(skb, info));
+
+ skb_set_transport_header(skb, transport_offset);
+ skb_set_network_header(skb, network_offset);
+
+ return sk;
+}
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -596,13 +640,14 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
* to find the appropriate port.
*/
-void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
+int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{
struct inet_sock *inet;
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
+ bool tunnel = false;
struct sock *sk;
int harderr;
int err;
@@ -612,8 +657,21 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
iph->saddr, uh->source, skb->dev->ifindex,
inet_sdif(skb), udptable, NULL);
if (!sk) {
- __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
- return; /* No socket for error */
+ /* No socket for error: try tunnels before discarding */
+ sk = ERR_PTR(-ENOENT);
+ if (static_branch_unlikely(&udp_encap_needed_key)) {
+ sk = __udp4_lib_err_encap(net, iph, uh, udptable, skb,
+ info);
+ if (!sk)
+ return 0;
+ }
+
+ if (IS_ERR(sk)) {
+ __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
+ return PTR_ERR(sk);
+ }
+
+ tunnel = true;
}
err = 0;
@@ -656,6 +714,10 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
* RFC1122: OK. Passes ICMP errors back to application, as per
* 4.1.3.3.
*/
+ if (tunnel) {
+ /* ...not for tunnels though: we don't have a sending socket */
+ goto out;
+ }
if (!inet->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
@@ -665,12 +727,12 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
sk->sk_err = err;
sk->sk_error_report(sk);
out:
- return;
+ return 0;
}
-void udp_err(struct sk_buff *skb, u32 info)
+int udp_err(struct sk_buff *skb, u32 info)
{
- __udp4_lib_err(skb, info, &udp_table);
+ return __udp4_lib_err(skb, info, &udp_table);
}
/*
@@ -1713,6 +1775,10 @@ try_again:
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
+
+ if (udp_sk(sk)->gro_enabled)
+ udp_cmsg_recv(msg, sk, skb);
+
if (inet->cmsg_flags)
ip_cmsg_recv_offset(msg, sk, skb, sizeof(struct udphdr), off);
@@ -1889,13 +1955,6 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return 0;
}
-DEFINE_STATIC_KEY_FALSE(udp_encap_needed_key);
-void udp_encap_enable(void)
-{
- static_branch_enable(&udp_encap_needed_key);
-}
-EXPORT_SYMBOL(udp_encap_enable);
-
/* returns:
* -1: error
* 0: success
@@ -1904,7 +1963,7 @@ EXPORT_SYMBOL(udp_encap_enable);
* Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed.
*/
-static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk);
@@ -2007,6 +2066,27 @@ drop:
return -1;
}
+static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *next, *segs;
+ int ret;
+
+ if (likely(!udp_unexpected_gso(sk, skb)))
+ return udp_queue_rcv_one_skb(sk, skb);
+
+ BUILD_BUG_ON(sizeof(struct udp_skb_cb) > SKB_SGO_CB_OFFSET);
+ __skb_push(skb, -skb_mac_offset(skb));
+ segs = udp_rcv_segment(sk, skb, true);
+ for (skb = segs; skb; skb = next) {
+ next = skb->next;
+ __skb_pull(skb, skb_transport_offset(skb));
+ ret = udp_queue_rcv_one_skb(sk, skb);
+ if (ret > 0)
+ ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
+ }
+ return 0;
+}
+
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
@@ -2398,11 +2478,15 @@ void udp_destroy_sock(struct sock *sk)
bool slow = lock_sock_fast(sk);
udp_flush_pending_frames(sk);
unlock_sock_fast(sk, slow);
- if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
- void (*encap_destroy)(struct sock *sk);
- encap_destroy = READ_ONCE(up->encap_destroy);
- if (encap_destroy)
- encap_destroy(sk);
+ if (static_branch_unlikely(&udp_encap_needed_key)) {
+ if (up->encap_type) {
+ void (*encap_destroy)(struct sock *sk);
+ encap_destroy = READ_ONCE(up->encap_destroy);
+ if (encap_destroy)
+ encap_destroy(sk);
+ }
+ if (up->encap_enabled)
+ static_branch_dec(&udp_encap_needed_key);
}
}
@@ -2447,7 +2531,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
/* FALLTHROUGH */
case UDP_ENCAP_L2TPINUDP:
up->encap_type = val;
- udp_encap_enable();
+ lock_sock(sk);
+ udp_tunnel_encap_enable(sk->sk_socket);
+ release_sock(sk);
break;
default:
err = -ENOPROTOOPT;
@@ -2469,6 +2555,14 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
up->gso_size = val;
break;
+ case UDP_GRO:
+ lock_sock(sk);
+ if (valbool)
+ udp_tunnel_encap_enable(sk->sk_socket);
+ up->gro_enabled = valbool;
+ release_sock(sk);
+ break;
+
/*
* UDP-Lite's partial checksum coverage (RFC 3828).
*/
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index e7d18b140287..322672655419 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -7,7 +7,7 @@
#include <net/inet_common.h>
int __udp4_lib_rcv(struct sk_buff *, struct udp_table *, int);
-void __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
+int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
int udp_v4_get_port(struct sock *sk, unsigned short snum);
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 802f2bc00d69..64f9715173ac 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -13,6 +13,7 @@
#include <linux/skbuff.h>
#include <net/udp.h>
#include <net/protocol.h>
+#include <net/inet_common.h>
static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
@@ -343,6 +344,56 @@ out:
return segs;
}
+#define UDP_GRO_CNT_MAX 64
+static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
+ struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ struct sk_buff *pp = NULL;
+ struct udphdr *uh2;
+ struct sk_buff *p;
+
+ /* requires non zero csum, for symmetry with GSO */
+ if (!uh->check) {
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ /* pull encapsulating udp header */
+ skb_gro_pull(skb, sizeof(struct udphdr));
+ skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+
+ list_for_each_entry(p, head, list) {
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ uh2 = udp_hdr(p);
+
+ /* Match ports only, as csum is always non zero */
+ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ /* Terminate the flow on len mismatch or if it grow "too much".
+ * Under small packet flood GRO count could elsewhere grow a lot
+ * leading to execessive truesize values
+ */
+ if (!skb_gro_receive(p, skb) &&
+ NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
+ pp = p;
+ else if (uh->len != uh2->len)
+ pp = p;
+
+ return pp;
+ }
+
+ /* mismatch, but we never need to flush */
+ return NULL;
+}
+
+INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+ __be16 sport, __be16 dport));
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
struct udphdr *uh, udp_lookup_t lookup)
{
@@ -353,23 +404,28 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
int flush = 1;
struct sock *sk;
+ rcu_read_lock();
+ sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+ udp4_lib_lookup_skb, skb, uh->source, uh->dest);
+ if (!sk)
+ goto out_unlock;
+
+ if (udp_sk(sk)->gro_enabled) {
+ pp = call_gro_receive(udp_gro_receive_segment, head, skb);
+ rcu_read_unlock();
+ return pp;
+ }
+
if (NAPI_GRO_CB(skb)->encap_mark ||
(skb->ip_summed != CHECKSUM_PARTIAL &&
NAPI_GRO_CB(skb)->csum_cnt == 0 &&
- !NAPI_GRO_CB(skb)->csum_valid))
- goto out;
+ !NAPI_GRO_CB(skb)->csum_valid) ||
+ !udp_sk(sk)->gro_receive)
+ goto out_unlock;
/* mark that this skb passed once through the tunnel gro layer */
NAPI_GRO_CB(skb)->encap_mark = 1;
- rcu_read_lock();
- sk = (*lookup)(skb, uh->source, uh->dest);
-
- if (sk && udp_sk(sk)->gro_receive)
- goto unflush;
- goto out_unlock;
-
-unflush:
flush = 0;
list_for_each_entry(p, head, list) {
@@ -394,14 +450,13 @@ unflush:
out_unlock:
rcu_read_unlock();
-out:
skb_gro_flush_final(skb, pp, flush);
return pp;
}
EXPORT_SYMBOL(udp_gro_receive);
-static struct sk_buff *udp4_gro_receive(struct list_head *head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
{
struct udphdr *uh = udp_gro_udphdr(skb);
@@ -427,6 +482,19 @@ flush:
return NULL;
}
+static int udp_gro_complete_segment(struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+ return 0;
+}
+
int udp_gro_complete(struct sk_buff *skb, int nhoff,
udp_lookup_t lookup)
{
@@ -437,16 +505,22 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
uh->len = newlen;
- /* Set encapsulation before calling into inner gro_complete() functions
- * to make them set up the inner offsets.
- */
- skb->encapsulation = 1;
-
rcu_read_lock();
- sk = (*lookup)(skb, uh->source, uh->dest);
- if (sk && udp_sk(sk)->gro_complete)
+ sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+ udp4_lib_lookup_skb, skb, uh->source, uh->dest);
+ if (sk && udp_sk(sk)->gro_enabled) {
+ err = udp_gro_complete_segment(skb);
+ } else if (sk && udp_sk(sk)->gro_complete) {
+ skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
+ : SKB_GSO_UDP_TUNNEL;
+
+ /* Set encapsulation before calling into inner gro_complete()
+ * functions to make them set up the inner offsets.
+ */
+ skb->encapsulation = 1;
err = udp_sk(sk)->gro_complete(sk, skb,
nhoff + sizeof(struct udphdr));
+ }
rcu_read_unlock();
if (skb->remcsum_offload)
@@ -456,18 +530,14 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
}
EXPORT_SYMBOL(udp_gro_complete);
-static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct iphdr *iph = ip_hdr(skb);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
- if (uh->check) {
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ if (uh->check)
uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
iph->daddr, 0);
- } else {
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
}
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 6539ff15e9a3..be8b5b2157d8 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -20,6 +20,23 @@ int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
if (err < 0)
goto error;
+ if (cfg->bind_ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(net, cfg->bind_ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto error;
+ }
+
+ err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
+ dev->name, strlen(dev->name) + 1);
+ dev_put(dev);
+
+ if (err < 0)
+ goto error;
+ }
+
udp_addr.sin_family = AF_INET;
udp_addr.sin_addr = cfg->local_ip;
udp_addr.sin_port = cfg->local_udp_port;
@@ -68,6 +85,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
udp_sk(sk)->encap_type = cfg->encap_type;
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
+ udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
udp_sk(sk)->gro_receive = cfg->gro_receive;
udp_sk(sk)->gro_complete = cfg->gro_complete;
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 8545457752fb..39c7f17d916f 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -25,9 +25,9 @@ static int udplite_rcv(struct sk_buff *skb)
return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
}
-static void udplite_err(struct sk_buff *skb, u32 info)
+static int udplite_err(struct sk_buff *skb, u32 info)
{
- __udp4_lib_err(skb, info, &udplite_table);
+ return __udp4_lib_err(skb, info, &udplite_table);
}
static const struct net_protocol udplite_protocol = {
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
index 8dd0e6ab8606..35c54865dc42 100644
--- a/net/ipv4/xfrm4_protocol.c
+++ b/net/ipv4/xfrm4_protocol.c
@@ -106,13 +106,15 @@ static int xfrm4_esp_rcv(struct sk_buff *skb)
return 0;
}
-static void xfrm4_esp_err(struct sk_buff *skb, u32 info)
+static int xfrm4_esp_err(struct sk_buff *skb, u32 info)
{
struct xfrm4_protocol *handler;
for_each_protocol_rcu(esp4_handlers, handler)
if (!handler->err_handler(skb, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static int xfrm4_ah_rcv(struct sk_buff *skb)
@@ -132,13 +134,15 @@ static int xfrm4_ah_rcv(struct sk_buff *skb)
return 0;
}
-static void xfrm4_ah_err(struct sk_buff *skb, u32 info)
+static int xfrm4_ah_err(struct sk_buff *skb, u32 info)
{
struct xfrm4_protocol *handler;
for_each_protocol_rcu(ah4_handlers, handler)
if (!handler->err_handler(skb, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
@@ -158,13 +162,15 @@ static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
return 0;
}
-static void xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
+static int xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
{
struct xfrm4_protocol *handler;
for_each_protocol_rcu(ipcomp4_handlers, handler)
if (!handler->err_handler(skb, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static const struct net_protocol esp4_protocol = {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 045597b9a7c0..521e471f1cf9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2820,7 +2820,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
dev = __dev_get_by_name(net, p.name);
if (!dev)
goto err_exit;
- err = dev_open(dev);
+ err = dev_open(dev, NULL);
}
}
#endif
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 94999058e110..cca3b3603c42 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -433,7 +433,6 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr)
{
- unsigned int hash = inet6_acaddr_hash(net, addr);
struct net_device *nh_dev;
struct ifacaddr6 *aca;
bool found = false;
@@ -441,7 +440,9 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
rcu_read_lock();
if (dev)
found = ipv6_chk_acast_dev(dev, addr);
- else
+ else {
+ unsigned int hash = inet6_acaddr_hash(net, addr);
+
hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
aca_addr_lst) {
nh_dev = fib6_info_nh_dev(aca->aca_rt);
@@ -452,6 +453,7 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
break;
}
}
+ }
rcu_read_unlock();
return found;
}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1ede7a16a0be..bde08aa549f3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -772,6 +772,7 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
case IPV6_2292PKTINFO:
{
struct net_device *dev = NULL;
+ int src_idx;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) {
err = -EINVAL;
@@ -779,12 +780,15 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk,
}
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ src_idx = src_info->ipi6_ifindex;
- if (src_info->ipi6_ifindex) {
+ if (src_idx) {
if (fl6->flowi6_oif &&
- src_info->ipi6_ifindex != fl6->flowi6_oif)
+ src_idx != fl6->flowi6_oif &&
+ (sk->sk_bound_dev_if != fl6->flowi6_oif ||
+ !sk_dev_equal_l3scope(sk, src_idx)))
return -EINVAL;
- fl6->flowi6_oif = src_info->ipi6_ifindex;
+ fl6->flowi6_oif = src_idx;
}
addr_type = __ipv6_addr_type(&src_info->ipi6_addr);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 63b2b66f9dfa..5afe9f83374d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -145,10 +145,13 @@ static void esp_output_done(struct crypto_async_request *base, int err)
void *tmp;
struct xfrm_state *x;
- if (xo && (xo->flags & XFRM_DEV_RESUME))
- x = skb->sp->xvec[skb->sp->len - 1];
- else
+ if (xo && (xo->flags & XFRM_DEV_RESUME)) {
+ struct sec_path *sp = skb_sec_path(skb);
+
+ x = sp->xvec[sp->len - 1];
+ } else {
x = skb_dst(skb)->xfrm;
+ }
tmp = ESP_SKB_CB(skb)->tmp;
esp_ssg_unref(x, tmp);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 6177e2171171..d46b4eb645c2 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -68,11 +68,12 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
xo = xfrm_offload(skb);
if (!xo || !(xo->flags & CRYPTO_DONE)) {
- err = secpath_set(skb);
- if (err)
+ struct sec_path *sp = secpath_set(skb);
+
+ if (!sp)
goto out;
- if (skb->sp->len == XFRM_MAX_DEPTH)
+ if (sp->len == XFRM_MAX_DEPTH)
goto out;
x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
@@ -81,8 +82,8 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head,
if (!x)
goto out;
- skb->sp->xvec[skb->sp->len++] = x;
- skb->sp->olen++;
+ sp->xvec[sp->len++] = x;
+ sp->olen++;
xo = xfrm_offload(skb);
if (!xo) {
@@ -141,6 +142,7 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
struct crypto_aead *aead;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sec_path *sp;
if (!xo)
return ERR_PTR(-EINVAL);
@@ -148,7 +150,8 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
return ERR_PTR(-EINVAL);
- x = skb->sp->xvec[skb->sp->len - 1];
+ sp = skb_sec_path(skb);
+ x = sp->xvec[sp->len - 1];
aead = x->data;
esph = ip_esp_hdr(skb);
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 6de3c04b0f30..bd675c61deb1 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -4,6 +4,7 @@
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/udp.h>
+#include <linux/icmpv6.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <net/fou.h>
@@ -69,14 +70,87 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
return 0;
}
+static int gue6_err_proto_handler(int proto, struct sk_buff *skb,
+ struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, u32 info)
+{
+ const struct inet6_protocol *ipprot;
+
+ ipprot = rcu_dereference(inet6_protos[proto]);
+ if (ipprot && ipprot->err_handler) {
+ if (!ipprot->err_handler(skb, opt, type, code, offset, info))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int gue6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info)
+{
+ int transport_offset = skb_transport_offset(skb);
+ struct guehdr *guehdr;
+ size_t optlen;
+ int ret;
+
+ if (skb->len < sizeof(struct udphdr) + sizeof(struct guehdr))
+ return -EINVAL;
+
+ guehdr = (struct guehdr *)&udp_hdr(skb)[1];
+
+ switch (guehdr->version) {
+ case 0: /* Full GUE header present */
+ break;
+ case 1: {
+ /* Direct encasulation of IPv4 or IPv6 */
+ skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
+
+ switch (((struct iphdr *)guehdr)->version) {
+ case 4:
+ ret = gue6_err_proto_handler(IPPROTO_IPIP, skb, opt,
+ type, code, offset, info);
+ goto out;
+ case 6:
+ ret = gue6_err_proto_handler(IPPROTO_IPV6, skb, opt,
+ type, code, offset, info);
+ goto out;
+ default:
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+ default: /* Undefined version */
+ return -EOPNOTSUPP;
+ }
+
+ if (guehdr->control)
+ return -ENOENT;
+
+ optlen = guehdr->hlen << 2;
+
+ if (validate_gue_flags(guehdr, optlen))
+ return -EINVAL;
+
+ skb_set_transport_header(skb, -(int)sizeof(struct icmp6hdr));
+ ret = gue6_err_proto_handler(guehdr->proto_ctype, skb,
+ opt, type, code, offset, info);
+
+out:
+ skb_set_transport_header(skb, transport_offset);
+ return ret;
+}
+
+
static const struct ip6_tnl_encap_ops fou_ip6tun_ops = {
.encap_hlen = fou_encap_hlen,
.build_header = fou6_build_header,
+ .err_handler = gue6_err,
};
static const struct ip6_tnl_encap_ops gue_ip6tun_ops = {
.encap_hlen = gue_encap_hlen,
.build_header = gue6_build_header,
+ .err_handler = gue6_err,
};
static int ip6_tnl_encap_add_fou_ops(void)
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index c9c53ade55c3..5d7aa2c2770c 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -84,7 +84,7 @@ static inline struct sock *icmpv6_sk(struct net *net)
return net->ipv6.icmp_sk[smp_processor_id()];
}
-static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
/* icmpv6_notify checks 8 bytes can be pulled, icmp6hdr is 8 bytes */
@@ -100,6 +100,8 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
ping_err(skb, offset, ntohl(info));
+
+ return 0;
}
static int icmpv6_rcv(struct sk_buff *skb);
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 3d7c7460a0c5..f3515ebe9b3a 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -102,22 +102,13 @@ static inline int compute_score(struct sock *sk, struct net *net,
if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
sk->sk_family == PF_INET6) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return -1;
+
+ if (!inet_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
+ return -1;
score = 1;
- if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
- return -1;
- score++;
- }
- if (sk->sk_bound_dev_if || exact_dif) {
- bool dev_match = (sk->sk_bound_dev_if == dif ||
- sk->sk_bound_dev_if == sdif);
-
- if (!dev_match)
- return -1;
- if (sk->sk_bound_dev_if)
- score++;
- }
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
}
@@ -166,26 +157,12 @@ struct sock *inet6_lookup_listener(struct net *net,
const __be16 sport, const struct in6_addr *daddr,
const unsigned short hnum, const int dif, const int sdif)
{
- unsigned int hash = inet_lhashfn(net, hnum);
- struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash];
- bool exact_dif = inet6_exact_dif_match(net, skb);
struct inet_listen_hashbucket *ilb2;
- struct sock *sk, *result = NULL;
- int score, hiscore = 0;
+ struct sock *result = NULL;
unsigned int hash2;
- u32 phash = 0;
-
- if (ilb->count <= 10 || !hashinfo->lhash2)
- goto port_lookup;
-
- /* Too many sk in the ilb bucket (which is hashed by port alone).
- * Try lhash2 (which is hashed by port and addr) instead.
- */
hash2 = ipv6_portaddr_hash(net, daddr, hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
- if (ilb2->count > ilb->count)
- goto port_lookup;
result = inet6_lhash2_lookup(net, ilb2, skb, doff,
saddr, sport, daddr, hnum,
@@ -194,33 +171,12 @@ struct sock *inet6_lookup_listener(struct net *net,
goto done;
/* Lookup lhash2 with in6addr_any */
-
hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
ilb2 = inet_lhash2_bucket(hashinfo, hash2);
- if (ilb2->count > ilb->count)
- goto port_lookup;
result = inet6_lhash2_lookup(net, ilb2, skb, doff,
- saddr, sport, daddr, hnum,
+ saddr, sport, &in6addr_any, hnum,
dif, sdif);
- goto done;
-
-port_lookup:
- sk_for_each(sk, &ilb->head) {
- score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
- if (score > hiscore) {
- if (sk->sk_reuseport) {
- phash = inet6_ehashfn(net, daddr, hnum,
- saddr, sport);
- result = reuseport_select_sock(sk, phash,
- skb, doff);
- if (result)
- goto done;
- }
- result = sk;
- hiscore = score;
- }
- }
done:
if (unlikely(IS_ERR(result)))
return NULL;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 515adbdba1d2..229e55c99021 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -423,7 +423,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
}
-static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct net *net = dev_net(skb->dev);
@@ -433,13 +433,13 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IPV6),
offset) < 0)
- return;
+ return -EINVAL;
ipv6h = (const struct ipv6hdr *)skb->data;
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
tpi.key, tpi.proto);
if (!t)
- return;
+ return -ENOENT;
switch (type) {
struct ipv6_tlv_tnl_enc_lim *tel;
@@ -449,14 +449,14 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
t->parms.name);
if (code != ICMPV6_PORT_UNREACH)
break;
- return;
+ return 0;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
break;
}
- return;
+ return 0;
case ICMPV6_PARAMPROB:
teli = 0;
if (code == ICMPV6_HDR_FIELD)
@@ -472,14 +472,14 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
- return;
+ return 0;
case ICMPV6_PKT_TOOBIG:
ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
- return;
+ return 0;
case NDISC_REDIRECT:
ip6_redirect(skb, net, skb->dev->ifindex, 0,
sock_net_uid(net, NULL));
- return;
+ return 0;
}
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -487,6 +487,8 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
else
t->err_count = 1;
t->err_time = jiffies;
+
+ return 0;
}
static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
@@ -1883,12 +1885,6 @@ static void ip6gre_tap_setup(struct net_device *dev)
netif_keep_dst(dev);
}
-bool is_ip6gretap_dev(const struct net_device *dev)
-{
- return dev->netdev_ops == &ip6gre_tap_netdev_ops;
-}
-EXPORT_SYMBOL_GPL(is_ip6gretap_dev);
-
static bool ip6gre_netlink_encap_parms(struct nlattr *data[],
struct ip_tunnel_encap *ipencap)
{
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 96577e742afd..c7ed2b6d5a1d 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
list_for_each_entry_safe(skb, next, head, list) {
struct dst_entry *dst;
- list_del(&skb->list);
+ skb_list_del_init(skb);
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
@@ -296,7 +296,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- list_del(&skb->list);
+ skb_list_del_init(skb);
skb = ip6_rcv_core(skb, dev, net);
if (skb == NULL)
continue;
@@ -319,28 +319,26 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
/*
* Deliver the packet to the host
*/
-
-
-static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
+ bool have_final)
{
const struct inet6_protocol *ipprot;
struct inet6_dev *idev;
unsigned int nhoff;
- int nexthdr;
bool raw;
- bool have_final = false;
/*
* Parse extension headers
*/
- rcu_read_lock();
resubmit:
idev = ip6_dst_idev(skb_dst(skb));
- if (!pskb_pull(skb, skb_transport_offset(skb)))
- goto discard;
nhoff = IP6CB(skb)->nhoff;
- nexthdr = skb_network_header(skb)[nhoff];
+ if (!have_final) {
+ if (!pskb_pull(skb, skb_transport_offset(skb)))
+ goto discard;
+ nexthdr = skb_network_header(skb)[nhoff];
+ }
resubmit_final:
raw = raw6_local_deliver(skb, nexthdr);
@@ -359,6 +357,8 @@ resubmit_final:
}
} else if (ipprot->flags & INET6_PROTO_FINAL) {
const struct ipv6hdr *hdr;
+ int sdif = inet6_sdif(skb);
+ struct net_device *dev;
/* Only do this once for first final protocol */
have_final = true;
@@ -371,9 +371,19 @@ resubmit_final:
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
hdr = ipv6_hdr(skb);
+
+ /* skb->dev passed may be master dev for vrfs. */
+ if (sdif) {
+ dev = dev_get_by_index_rcu(net, sdif);
+ if (!dev)
+ goto discard;
+ } else {
+ dev = skb->dev;
+ }
+
if (ipv6_addr_is_multicast(&hdr->daddr) &&
- !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
- &hdr->saddr) &&
+ !ipv6_chk_mcast_addr(dev, &hdr->daddr,
+ &hdr->saddr) &&
!ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
goto discard;
}
@@ -411,13 +421,19 @@ resubmit_final:
consume_skb(skb);
}
}
- rcu_read_unlock();
- return 0;
+ return;
discard:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
- rcu_read_unlock();
kfree_skb(skb);
+}
+
+static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ rcu_read_lock();
+ ip6_protocol_deliver_rcu(net, skb, 0, false);
+ rcu_read_unlock();
+
return 0;
}
@@ -432,15 +448,32 @@ EXPORT_SYMBOL_GPL(ip6_input);
int ip6_mc_input(struct sk_buff *skb)
{
+ int sdif = inet6_sdif(skb);
const struct ipv6hdr *hdr;
+ struct net_device *dev;
bool deliver;
__IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
__in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
skb->len);
+ /* skb->dev passed may be master dev for vrfs. */
+ if (sdif) {
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif);
+ if (!dev) {
+ rcu_read_unlock();
+ kfree_skb(skb);
+ return -ENODEV;
+ }
+ } else {
+ dev = skb->dev;
+ }
+
hdr = ipv6_hdr(skb);
- deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
+ deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL);
+ if (sdif)
+ rcu_read_unlock();
#ifdef CONFIG_IPV6_MROUTE
/*
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index c7e495f12011..5c045691c302 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -20,6 +20,23 @@
#include "ip6_offload.h"
+/* All GRO functions are always builtin, except UDP over ipv6, which lays in
+ * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
+ * when ipv6 is built as a module
+ */
+#if IS_BUILTIN(CONFIG_IPV6)
+#define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
+#else
+#define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
+#endif
+
+#define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
+({ \
+ unlikely(gro_recursion_inc_test(skb)) ? \
+ NAPI_GRO_CB(skb)->flush |= 1, NULL : \
+ INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
+})
+
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
{
const struct net_offload *ops = NULL;
@@ -164,8 +181,12 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
return len;
}
-static struct sk_buff *ipv6_gro_receive(struct list_head *head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+ struct sk_buff *));
+INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
+ struct sk_buff *skb)
{
const struct net_offload *ops;
struct sk_buff *pp = NULL;
@@ -229,14 +250,21 @@ static struct sk_buff *ipv6_gro_receive(struct list_head *head,
* XXX skbs on the gro_list have all been parsed and pulled
* already so we don't need to compare nlen
* (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
- * memcmp() alone below is suffcient, right?
+ * memcmp() alone below is sufficient, right?
*/
if ((first_word & htonl(0xF00FFFFF)) ||
- memcmp(&iph->nexthdr, &iph2->nexthdr,
- nlen - offsetof(struct ipv6hdr, nexthdr))) {
+ !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+ !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+ *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+not_same_flow:
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
+ if (unlikely(nlen > sizeof(struct ipv6hdr))) {
+ if (memcmp(iph + 1, iph2 + 1,
+ nlen - sizeof(struct ipv6hdr)))
+ goto not_same_flow;
+ }
/* flush if Traffic Class fields are different */
NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
NAPI_GRO_CB(p)->flush |= flush;
@@ -253,7 +281,8 @@ static struct sk_buff *ipv6_gro_receive(struct list_head *head,
skb_gro_postpull_rcsum(skb, iph, nlen);
- pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
+ ops->callbacks.gro_receive, head, skb);
out_unlock:
rcu_read_unlock();
@@ -294,7 +323,9 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
return inet_gro_receive(head, skb);
}
-static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct net_offload *ops;
struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
@@ -313,7 +344,8 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
goto out_unlock;
- err = ops->callbacks.gro_complete(skb, nhoff);
+ err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
+ udp6_gro_complete, skb, nhoff);
out_unlock:
rcu_read_unlock();
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 89e0d5118afe..5f9fa0302b5a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
+ unsigned int head_room;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
int hlimit = -1;
u32 mtu;
- if (opt) {
- unsigned int head_room;
+ head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
+ if (opt)
+ head_room += opt->opt_nflen + opt->opt_flen;
- /* First: exthdrs may take lots of space (~8K for now)
- MAX_HEADER is not enough.
- */
- head_room = opt->opt_nflen + opt->opt_flen;
- seg_len += head_room;
- head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
-
- if (skb_headroom(skb) < head_room) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
- if (!skb2) {
- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTDISCARDS);
- kfree_skb(skb);
- return -ENOBUFS;
- }
- if (skb->sk)
- skb_set_owner_w(skb2, skb->sk);
- consume_skb(skb);
- skb = skb2;
+ if (unlikely(skb_headroom(skb) < head_room)) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
+ if (!skb2) {
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTDISCARDS);
+ kfree_skb(skb);
+ return -ENOBUFS;
}
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ consume_skb(skb);
+ skb = skb2;
+ }
+
+ if (opt) {
+ seg_len += opt->opt_nflen + opt->opt_flen;
+
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
+
if (opt->opt_nflen)
ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
&fl6->saddr);
@@ -378,6 +378,14 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
+#ifdef CONFIG_NET_SWITCHDEV
+ if (skb->offload_l3_fwd_mark) {
+ consume_skb(skb);
+ return 0;
+ }
+#endif
+
+ skb->tstamp = 0;
return dst_output(net, sk, skb);
}
@@ -574,6 +582,7 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
+ skb_ext_copy(to, from);
skb_copy_secmark(to, from);
}
@@ -1245,6 +1254,7 @@ static int __ip6_append_data(struct sock *sk,
{
struct sk_buff *skb, *skb_prev = NULL;
unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
+ struct ubuf_info *uarg = NULL;
int exthdrlen = 0;
int dst_exthdrlen = 0;
int hh_len;
@@ -1257,7 +1267,7 @@ static int __ip6_append_data(struct sock *sk,
int csummode = CHECKSUM_NONE;
unsigned int maxnonfragsize, headersize;
unsigned int wmem_alloc_delta = 0;
- bool paged;
+ bool paged, extra_uref;
skb = skb_peek_tail(queue);
if (!skb) {
@@ -1322,6 +1332,20 @@ emsgsize:
rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
csummode = CHECKSUM_PARTIAL;
+ if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
+ uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
+ if (!uarg)
+ return -ENOBUFS;
+ extra_uref = true;
+ if (rt->dst.dev->features & NETIF_F_SG &&
+ csummode == CHECKSUM_PARTIAL) {
+ paged = true;
+ } else {
+ uarg->zerocopy = 0;
+ skb_zcopy_set(skb, uarg, &extra_uref);
+ }
+ }
+
/*
* Let's try using as much space as possible.
* Use MTU if total length of the message fits into the MTU.
@@ -1354,7 +1378,7 @@ emsgsize:
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
- unsigned int pagedlen = 0;
+ unsigned int pagedlen;
alloc_new_skb:
/* There's no room in the current skb */
if (skb)
@@ -1378,6 +1402,7 @@ alloc_new_skb:
if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
fraglen = datalen + fragheaderlen;
+ pagedlen = 0;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
@@ -1439,12 +1464,6 @@ alloc_new_skb:
skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
dst_exthdrlen);
- /* Only the initial fragment is time stamped */
- skb_shinfo(skb)->tx_flags = cork->tx_flags;
- cork->tx_flags = 0;
- skb_shinfo(skb)->tskey = tskey;
- tskey = 0;
-
/*
* Find where to start putting bytes
*/
@@ -1476,6 +1495,13 @@ alloc_new_skb:
exthdrlen = 0;
dst_exthdrlen = 0;
+ /* Only the initial fragment is time stamped */
+ skb_shinfo(skb)->tx_flags = cork->tx_flags;
+ cork->tx_flags = 0;
+ skb_shinfo(skb)->tskey = tskey;
+ tskey = 0;
+ skb_zcopy_set(skb, uarg, &extra_uref);
+
if ((flags & MSG_CONFIRM) && !skb_prev)
skb_set_dst_pending_confirm(skb, 1);
@@ -1505,7 +1531,7 @@ alloc_new_skb:
err = -EFAULT;
goto error;
}
- } else {
+ } else if (!uarg || !uarg->zerocopy) {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
@@ -1535,6 +1561,10 @@ alloc_new_skb:
skb->data_len += copy;
skb->truesize += copy;
wmem_alloc_delta += copy;
+ } else {
+ err = skb_zerocopy_iter_dgram(skb, from, copy);
+ if (err < 0)
+ goto error;
}
offset += copy;
length -= copy;
@@ -1547,6 +1577,8 @@ alloc_new_skb:
error_efault:
err = -EFAULT;
error:
+ if (uarg)
+ sock_zerocopy_put_abort(uarg, extra_uref);
cork->length -= length;
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a9d06d4dd057..99179b9c8384 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -901,6 +901,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
+ ipv6h = ipv6_hdr(skb);
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
goto drop;
if (iptunnel_pull_header(skb, 0, tpi->proto, false))
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index b283f293ee4a..ad1a9ccd4b44 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -15,7 +15,7 @@
int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
{
- struct sockaddr_in6 udp6_addr;
+ struct sockaddr_in6 udp6_addr = {};
int err;
struct socket *sock = NULL;
@@ -31,6 +31,22 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
if (err < 0)
goto error;
}
+ if (cfg->bind_ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(net, cfg->bind_ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto error;
+ }
+
+ err = kernel_setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE,
+ dev->name, strlen(dev->name) + 1);
+ dev_put(dev);
+
+ if (err < 0)
+ goto error;
+ }
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
@@ -42,6 +58,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
goto error;
if (cfg->peer_udp_port) {
+ memset(&udp6_addr, 0, sizeof(udp6_addr));
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index eeaf7455d51e..706fe42e4928 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -318,6 +318,7 @@ static int vti6_rcv(struct sk_buff *skb)
return 0;
}
+ ipv6h = ipv6_hdr(skb);
if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++;
rcu_read_unlock();
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index e2ea691e42c6..8276f1224f16 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -52,6 +52,8 @@
#include <net/ip6_checksum.h>
#include <linux/netconf.h>
+#include <linux/nospec.h>
+
struct ip6mr_rule {
struct fib_rule common;
};
@@ -655,7 +657,7 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
return NULL;
}
- if (dev_open(dev))
+ if (dev_open(dev, NULL))
goto failure;
dev_hold(dev);
@@ -1841,6 +1843,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
+ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.mifi];
if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1915,6 +1918,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
+ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.mifi];
if (VIF_EXISTS(mrt, vr.mifi)) {
@@ -1968,7 +1972,7 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct
*/
static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *c, int vifi)
+ struct sk_buff *skb, int vifi)
{
struct ipv6hdr *ipv6h;
struct vif_device *vif = &mrt->vif_table[vifi];
@@ -2134,15 +2138,14 @@ forward:
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
- ip6mr_forward2(net, mrt, skb2,
- c, psend);
+ ip6mr_forward2(net, mrt, skb2, psend);
}
psend = ct;
}
}
last_forward:
if (psend != -1) {
- ip6mr_forward2(net, mrt, skb, c, psend);
+ ip6mr_forward2(net, mrt, skb, psend);
return;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 381ce38940ae..973e215c3114 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -486,7 +486,7 @@ sticky_done:
retv = -EFAULT;
break;
}
- if (sk->sk_bound_dev_if && pkt.ipi6_ifindex != sk->sk_bound_dev_if)
+ if (!sk_dev_equal_l3scope(sk, pkt.ipi6_ifindex))
goto e_inval;
np->sticky_pktinfo.ipi6_ifindex = pkt.ipi6_ifindex;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 5ae8e1c51079..8b075f0bc351 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
unsigned int hh_len;
struct dst_entry *dst;
struct flowi6 fl6 = {
- .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
+ .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
+ rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
.flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 200c0c235565..9ea43d5256e0 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o
-nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
+nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o
nf_nat_ipv6-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o
obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 491f808e356a..29c7f1915a96 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
int err;
err = xt_register_target(&masquerade_tg6_reg);
- if (err == 0)
- nf_nat_masquerade_ipv6_register_notifier();
+ if (err)
+ return err;
+
+ err = nf_nat_masquerade_ipv6_register_notifier();
+ if (err)
+ xt_unregister_target(&masquerade_tg6_reg);
return err;
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index d219979c3e52..181da2c40f9a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -341,7 +341,7 @@ static bool
nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_device *dev)
{
struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len;
+ int payload_len, delta;
u8 ecn;
inet_frag_kill(&fq->q);
@@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
return false;
}
+ delta = - head->truesize;
+
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
return false;
+ delta += head->truesize;
+ if (delta)
+ add_frag_mem_limit(fq->q.net, delta);
+
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index ca6d38698b1a..23022447eb49 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -61,22 +61,8 @@ static void nf_nat_ipv6_decode_session(struct sk_buff *skb,
}
#endif
-static bool nf_nat_ipv6_in_range(const struct nf_conntrack_tuple *t,
- const struct nf_nat_range2 *range)
-{
- return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
- ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
-}
-
-static u32 nf_nat_ipv6_secure_port(const struct nf_conntrack_tuple *t,
- __be16 dport)
-{
- return secure_ipv6_port_ephemeral(t->src.u3.ip6, t->dst.u3.ip6, dport);
-}
-
static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
unsigned int iphdroff,
- const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *target,
enum nf_nat_manip_type maniptype)
{
@@ -96,8 +82,8 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
goto manip_addr;
if ((frag_off & htons(~0x7)) == 0 &&
- !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
- target, maniptype))
+ !nf_nat_l4proto_manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
+ target, maniptype))
return false;
/* must reload, offset might have changed */
@@ -171,8 +157,6 @@ static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
.l3proto = NFPROTO_IPV6,
- .secure_port = nf_nat_ipv6_secure_port,
- .in_range = nf_nat_ipv6_in_range,
.manip_pkt = nf_nat_ipv6_manip_pkt,
.csum_update = nf_nat_ipv6_csum_update,
.csum_recalc = nf_nat_ipv6_csum_recalc,
@@ -196,7 +180,6 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
} *inside;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
- const struct nf_nat_l4proto *l4proto;
struct nf_conntrack_tuple target;
unsigned long statusbit;
@@ -227,9 +210,8 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
if (!(ct->status & statusbit))
return 1;
- l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, inside->ip6.nexthdr);
if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6),
- l4proto, &ct->tuplehash[!dir].tuple, !manip))
+ &ct->tuplehash[!dir].tuple, !manip))
return 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -244,8 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
}
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
- l4proto = __nf_nat_l4proto_find(NFPROTO_IPV6, IPPROTO_ICMPV6);
- if (!nf_nat_ipv6_manip_pkt(skb, 0, l4proto, &target, manip))
+ if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip))
return 0;
return 1;
@@ -415,26 +396,12 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_ipv6_unregister_fn);
static int __init nf_nat_l3proto_ipv6_init(void)
{
- int err;
-
- err = nf_nat_l4proto_register(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
- if (err < 0)
- goto err1;
- err = nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
- if (err < 0)
- goto err2;
- return err;
-
-err2:
- nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
-err1:
- return err;
+ return nf_nat_l3proto_register(&nf_nat_l3proto_ipv6);
}
static void __exit nf_nat_l3proto_ipv6_exit(void)
{
nf_nat_l3proto_unregister(&nf_nat_l3proto_ipv6);
- nf_nat_l4proto_unregister(NFPROTO_IPV6, &nf_nat_l4proto_icmpv6);
}
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 3e4bf2286abe..0ad0da5a2600 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -132,8 +132,8 @@ static void iterate_cleanup_work(struct work_struct *work)
* of ipv6 addresses being deleted), we also need to add an upper
* limit to the number of queued work items.
*/
-static int masq_inet_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+static int masq_inet6_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = ptr;
const struct net_device *dev;
@@ -171,30 +171,53 @@ static int masq_inet_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct notifier_block masq_inet_notifier = {
- .notifier_call = masq_inet_event,
+static struct notifier_block masq_inet6_notifier = {
+ .notifier_call = masq_inet6_event,
};
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+static int masq_refcnt;
+static DEFINE_MUTEX(masq_mutex);
-void nf_nat_masquerade_ipv6_register_notifier(void)
+int nf_nat_masquerade_ipv6_register_notifier(void)
{
+ int ret = 0;
+
+ mutex_lock(&masq_mutex);
/* check if the notifier is already set */
- if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
- return;
+ if (++masq_refcnt > 1)
+ goto out_unlock;
+
+ ret = register_netdevice_notifier(&masq_dev_notifier);
+ if (ret)
+ goto err_dec;
+
+ ret = register_inet6addr_notifier(&masq_inet6_notifier);
+ if (ret)
+ goto err_unregister;
- register_netdevice_notifier(&masq_dev_notifier);
- register_inet6addr_notifier(&masq_inet_notifier);
+ mutex_unlock(&masq_mutex);
+ return ret;
+
+err_unregister:
+ unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+ masq_refcnt--;
+out_unlock:
+ mutex_unlock(&masq_mutex);
+ return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
void nf_nat_masquerade_ipv6_unregister_notifier(void)
{
+ mutex_lock(&masq_mutex);
/* check if the notifier still has clients */
- if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
- return;
+ if (--masq_refcnt > 0)
+ goto out_unlock;
- unregister_inet6addr_notifier(&masq_inet_notifier);
+ unregister_inet6addr_notifier(&masq_inet6_notifier);
unregister_netdevice_notifier(&masq_dev_notifier);
+out_unlock:
+ mutex_unlock(&masq_mutex);
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
diff --git a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c b/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
deleted file mode 100644
index d9bf42ba44fa..000000000000
--- a/net/ipv6/netfilter/nf_nat_proto_icmpv6.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2011 Patrick Mchardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Based on Rusty Russell's IPv4 ICMP NAT code. Development of IPv6
- * NAT funded by Astaro.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/icmpv6.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static bool
-icmpv6_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max)
-{
- return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
- ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
-}
-
-static void
-icmpv6_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- static u16 id;
- unsigned int range_size;
- unsigned int i;
-
- range_size = ntohs(range->max_proto.icmp.id) -
- ntohs(range->min_proto.icmp.id) + 1;
-
- if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
- range_size = 0xffff;
-
- for (i = 0; ; ++id) {
- tuple->src.u.icmp.id = htons(ntohs(range->min_proto.icmp.id) +
- (id % range_size));
- if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
- return;
- }
-}
-
-static bool
-icmpv6_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct icmp6hdr *hdr;
-
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
- return false;
-
- hdr = (struct icmp6hdr *)(skb->data + hdroff);
- l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
- tuple, maniptype);
- if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
- hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
- inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
- hdr->icmp6_identifier,
- tuple->src.u.icmp.id, false);
- hdr->icmp6_identifier = tuple->src.u.icmp.id;
- }
- return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_icmpv6 = {
- .l4proto = IPPROTO_ICMPV6,
- .manip_pkt = icmpv6_manip_pkt,
- .in_range = icmpv6_in_range,
- .unique_tuple = icmpv6_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 24858402e374..b9c8a763c863 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -131,6 +131,7 @@ EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
{
+ struct net_device *br_indev __maybe_unused;
struct sk_buff *nskb;
struct tcphdr _otcph;
const struct tcphdr *otcph;
@@ -197,15 +198,18 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
* build the eth header using the original destination's MAC as the
* source, and send the RST packet directly.
*/
- if (oldskb->nf_bridge) {
+ br_indev = nf_bridge_get_physindev(oldskb);
+ if (br_indev) {
struct ethhdr *oeth = eth_hdr(oldskb);
- nskb->dev = nf_bridge_get_physindev(oldskb);
+ nskb->dev = br_indev;
nskb->protocol = htons(ETH_P_IPV6);
ip6h->payload_len = htons(sizeof(struct tcphdr));
if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
- oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ oeth->h_source, oeth->h_dest, nskb->len) < 0) {
+ kfree_skb(nskb);
return;
+ }
dev_queue_xmit(nskb);
} else
#endif
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index dd0122f3cffe..e06c82e9dfcd 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
if (ret < 0)
return ret;
- nf_nat_masquerade_ipv6_register_notifier();
+ ret = nf_nat_masquerade_ipv6_register_notifier();
+ if (ret)
+ nft_unregister_expr(&nft_masq_ipv6_type);
return ret;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 5e0efd3954e9..5a426226c762 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -86,9 +86,8 @@ struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
continue;
- if (sk->sk_bound_dev_if &&
- sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != sdif)
+ if (!raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
+ dif, sdif))
continue;
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
@@ -658,6 +657,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->ip_summed = CHECKSUM_NONE;
+ skb_setup_tx_timestamp(skb, sockc->tsflags);
+
if (flags & MSG_CONFIRM)
skb_set_dst_pending_confirm(skb, 1);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 5c3c92713096..a5bb59ee50ac 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
{
struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
struct sk_buff *fp, *head = fq->q.fragments;
- int payload_len;
+ int payload_len, delta;
unsigned int nhoff;
int sum_truesize;
u8 ecn;
@@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
if (payload_len > IPV6_MAXPLEN)
goto out_oversize;
+ delta = - head->truesize;
+
/* Head of list must not be cloned. */
if (skb_unclone(head, GFP_ATOMIC))
goto out_oom;
+ delta += head->truesize;
+ if (delta)
+ add_frag_mem_limit(fq->q.net, delta);
+
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
@@ -378,6 +384,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
kfree_skb_partial(fp, headstolen);
} else {
+ fp->sk = NULL;
if (!skb_shinfo(head)->frag_list)
skb_shinfo(head)->frag_list = fp;
head->data_len += fp->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 059f0531f7c1..194bc162866d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2977,7 +2977,8 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
if (!rt)
goto out;
- rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len);
+ rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
+ extack);
if (IS_ERR(rt->fib6_metrics)) {
err = PTR_ERR(rt->fib6_metrics);
/* Do not leave garbage there. */
@@ -3710,7 +3711,7 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
if (!f6i)
return ERR_PTR(-ENOMEM);
- f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0);
+ f6i->fib6_metrics = ip_fib_metrics_init(net, NULL, 0, NULL);
f6i->dst_nocount = true;
f6i->dst_host = true;
f6i->fib6_protocol = RTPROT_KERNEL;
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index a8854dd3e9c5..8181ee7e1e27 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct flowi6 fl6;
+ memset(&fl6, 0, sizeof(fl6));
fl6.daddr = hdr->daddr;
fl6.saddr = hdr->saddr;
fl6.flowlabel = ip6_flowinfo(hdr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 03e6b7a2bc53..b81eb7cb815e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -349,7 +349,7 @@ static void tcp_v6_mtu_reduced(struct sock *sk)
}
}
-static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
@@ -371,17 +371,19 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!sk) {
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
- return;
+ return -ENOENT;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
- return;
+ return 0;
}
seq = ntohl(th->seq);
fatal = icmpv6_err_convert(type, code, &err);
- if (sk->sk_state == TCP_NEW_SYN_RECV)
- return tcp_req_err(sk, seq, fatal);
+ if (sk->sk_state == TCP_NEW_SYN_RECV) {
+ tcp_req_err(sk, seq, fatal);
+ return 0;
+ }
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -467,6 +469,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
out:
bh_unlock_sock(sk);
sock_put(sk);
+ return 0;
}
@@ -734,6 +737,7 @@ static void tcp_v6_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
{
+ bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
struct inet_request_sock *ireq = inet_rsk(req);
const struct ipv6_pinfo *np = inet6_sk(sk_listener);
@@ -741,7 +745,7 @@ static void tcp_v6_init_req(struct request_sock *req,
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
/* So that link locals have meaning */
- if (!sk_listener->sk_bound_dev_if &&
+ if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = tcp_v6_iif(skb);
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index e72947c99454..3179c425d7ff 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -9,14 +9,15 @@
*
* TCPv6 GSO/GRO support
*/
+#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static struct sk_buff *tcp6_gro_receive(struct list_head *head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
/* Don't bother verifying checksum if we're going to flush anyway. */
if (!NAPI_GRO_CB(skb)->flush &&
@@ -29,7 +30,7 @@ static struct sk_buff *tcp6_gro_receive(struct list_head *head,
return tcp_gro_receive(head, skb);
}
-static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
+INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
struct tcphdr *th = tcp_hdr(skb);
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index dae25cad05cd..1991dede7367 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -134,24 +134,28 @@ drop:
return 0;
}
-static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_tunnel *handler;
for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
-static void tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int tunnel46_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_tunnel *handler;
for_each_tunnel_rcu(tunnel46_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static const struct inet6_protocol tunnel6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d2d97d07ef27..9cbf363172bd 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -45,6 +45,7 @@
#include <net/raw.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
+#include <net/ip6_tunnel.h>
#include <net/xfrm.h>
#include <net/inet_hashtables.h>
#include <net/inet6_hashtables.h>
@@ -117,12 +118,16 @@ static int compute_score(struct sock *sk, struct net *net,
{
int score;
struct inet_sock *inet;
+ bool dev_match;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return -1;
+
score = 0;
inet = inet_sk(sk);
@@ -132,27 +137,16 @@ static int compute_score(struct sock *sk, struct net *net,
score++;
}
- if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
- if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
- return -1;
- score++;
- }
-
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
- if (sk->sk_bound_dev_if || exact_dif) {
- bool dev_match = (sk->sk_bound_dev_if == dif ||
- sk->sk_bound_dev_if == sdif);
-
- if (!dev_match)
- return -1;
- if (sk->sk_bound_dev_if)
- score++;
- }
+ dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
+ if (!dev_match)
+ return -1;
+ score++;
if (sk->sk_incoming_cpu == raw_smp_processor_id())
score++;
@@ -200,66 +194,32 @@ struct sock *__udp6_lib_lookup(struct net *net,
int dif, int sdif, struct udp_table *udptable,
struct sk_buff *skb)
{
- struct sock *sk, *result;
unsigned short hnum = ntohs(dport);
- unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
- struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+ unsigned int hash2, slot2;
+ struct udp_hslot *hslot2;
+ struct sock *result;
bool exact_dif = udp6_lib_exact_dif_match(net, skb);
- int score, badness;
- u32 hash = 0;
- if (hslot->count > 10) {
- hash2 = ipv6_portaddr_hash(net, daddr, hnum);
+ hash2 = ipv6_portaddr_hash(net, daddr, hnum);
+ slot2 = hash2 & udptable->mask;
+ hslot2 = &udptable->hash2[slot2];
+
+ result = udp6_lib_lookup2(net, saddr, sport,
+ daddr, hnum, dif, sdif, exact_dif,
+ hslot2, skb);
+ if (!result) {
+ hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
+
hslot2 = &udptable->hash2[slot2];
- if (hslot->count < hslot2->count)
- goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
- daddr, hnum, dif, sdif, exact_dif,
- hslot2, skb);
- if (!result) {
- unsigned int old_slot2 = slot2;
- hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
- slot2 = hash2 & udptable->mask;
- /* avoid searching the same slot again. */
- if (unlikely(slot2 == old_slot2))
- return result;
-
- hslot2 = &udptable->hash2[slot2];
- if (hslot->count < hslot2->count)
- goto begin;
-
- result = udp6_lib_lookup2(net, saddr, sport,
- daddr, hnum, dif, sdif,
- exact_dif, hslot2,
- skb);
- }
- if (unlikely(IS_ERR(result)))
- return NULL;
- return result;
- }
-begin:
- result = NULL;
- badness = -1;
- sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
- sdif, exact_dif);
- if (score > badness) {
- if (sk->sk_reuseport) {
- hash = udp6_ehashfn(net, daddr, hnum,
- saddr, sport);
- result = reuseport_select_sock(sk, hash, skb,
- sizeof(struct udphdr));
- if (unlikely(IS_ERR(result)))
- return NULL;
- if (result)
- return result;
- }
- result = sk;
- badness = score;
- }
+ &in6addr_any, hnum, dif, sdif,
+ exact_dif, hslot2,
+ skb);
}
+ if (unlikely(IS_ERR(result)))
+ return NULL;
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
@@ -329,6 +289,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int err;
int is_udplite = IS_UDPLITE(sk);
bool checksum_valid = false;
+ struct udp_mib *mib;
int is_udp4;
if (flags & MSG_ERRQUEUE)
@@ -352,6 +313,7 @@ try_again:
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
+ mib = __UDPX_MIB(sk, is_udp4);
/*
* If checksum is needed at all, try to do it while copying the
@@ -380,24 +342,13 @@ try_again:
if (unlikely(err)) {
if (!peeked) {
atomic_inc(&sk->sk_drops);
- if (is_udp4)
- UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
- is_udplite);
- else
- UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
- is_udplite);
+ SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
}
kfree_skb(skb);
return err;
}
- if (!peeked) {
- if (is_udp4)
- UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
- is_udplite);
- else
- UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
- is_udplite);
- }
+ if (!peeked)
+ SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
sock_recv_ts_and_drops(msg, sk, skb);
@@ -421,6 +372,9 @@ try_again:
*addr_len = sizeof(*sin6);
}
+ if (udp_sk(sk)->gro_enabled)
+ udp_cmsg_recv(msg, sk, skb);
+
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
@@ -443,17 +397,8 @@ try_again:
csum_copy_err:
if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
udp_skb_destructor)) {
- if (is_udp4) {
- UDP_INC_STATS(sock_net(sk),
- UDP_MIB_CSUMERRORS, is_udplite);
- UDP_INC_STATS(sock_net(sk),
- UDP_MIB_INERRORS, is_udplite);
- } else {
- UDP6_INC_STATS(sock_net(sk),
- UDP_MIB_CSUMERRORS, is_udplite);
- UDP6_INC_STATS(sock_net(sk),
- UDP_MIB_INERRORS, is_udplite);
- }
+ SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
+ SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
}
kfree_skb(skb);
@@ -463,15 +408,106 @@ csum_copy_err:
goto try_again;
}
-void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
- u8 type, u8 code, int offset, __be32 info,
- struct udp_table *udptable)
+DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
+void udpv6_encap_enable(void)
+{
+ static_branch_inc(&udpv6_encap_needed_key);
+}
+EXPORT_SYMBOL(udpv6_encap_enable);
+
+/* Handler for tunnels with arbitrary destination ports: no socket lookup, go
+ * through error handlers in encapsulations looking for a match.
+ */
+static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
+ struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, u32 info)
+{
+ int i;
+
+ for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
+ int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, u32 info);
+
+ if (!ip6tun_encaps[i])
+ continue;
+ handler = rcu_dereference(ip6tun_encaps[i]->err_handler);
+ if (handler && !handler(skb, opt, type, code, offset, info))
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/* Try to match ICMP errors to UDP tunnels by looking up a socket without
+ * reversing source and destination port: this will match tunnels that force the
+ * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
+ * lwtunnels might actually break this assumption by being configured with
+ * different destination ports on endpoints, in this case we won't be able to
+ * trace ICMP messages back to them.
+ *
+ * If this doesn't match any socket, probe tunnels with arbitrary destination
+ * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
+ * we've sent packets to won't necessarily match the local destination port.
+ *
+ * Then ask the tunnel implementation to match the error against a valid
+ * association.
+ *
+ * Return an error if we can't find a match, the socket if we need further
+ * processing, zero otherwise.
+ */
+static struct sock *__udp6_lib_err_encap(struct net *net,
+ const struct ipv6hdr *hdr, int offset,
+ struct udphdr *uh,
+ struct udp_table *udptable,
+ struct sk_buff *skb,
+ struct inet6_skb_parm *opt,
+ u8 type, u8 code, __be32 info)
+{
+ int network_offset, transport_offset;
+ struct sock *sk;
+
+ network_offset = skb_network_offset(skb);
+ transport_offset = skb_transport_offset(skb);
+
+ /* Network header needs to point to the outer IPv6 header inside ICMP */
+ skb_reset_network_header(skb);
+
+ /* Transport header needs to point to the UDP header */
+ skb_set_transport_header(skb, offset);
+
+ sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
+ &hdr->saddr, uh->dest,
+ inet6_iif(skb), 0, udptable, skb);
+ if (sk) {
+ int (*lookup)(struct sock *sk, struct sk_buff *skb);
+ struct udp_sock *up = udp_sk(sk);
+
+ lookup = READ_ONCE(up->encap_err_lookup);
+ if (!lookup || lookup(sk, skb))
+ sk = NULL;
+ }
+
+ if (!sk) {
+ sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
+ offset, info));
+ }
+
+ skb_set_transport_header(skb, transport_offset);
+ skb_set_network_header(skb, network_offset);
+
+ return sk;
+}
+
+int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ u8 type, u8 code, int offset, __be32 info,
+ struct udp_table *udptable)
{
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
+ bool tunnel = false;
struct sock *sk;
int harderr;
int err;
@@ -480,9 +516,23 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
inet6_iif(skb), inet6_sdif(skb), udptable, skb);
if (!sk) {
- __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
- ICMP6_MIB_INERRORS);
- return;
+ /* No socket for error: try tunnels before discarding */
+ sk = ERR_PTR(-ENOENT);
+ if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+ sk = __udp6_lib_err_encap(net, hdr, offset, uh,
+ udptable, skb,
+ opt, type, code, info);
+ if (!sk)
+ return 0;
+ }
+
+ if (IS_ERR(sk)) {
+ __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
+ ICMP6_MIB_INERRORS);
+ return PTR_ERR(sk);
+ }
+
+ tunnel = true;
}
harderr = icmpv6_err_convert(type, code, &err);
@@ -496,10 +546,19 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
harderr = 1;
}
if (type == NDISC_REDIRECT) {
- ip6_sk_redirect(skb, sk);
+ if (tunnel) {
+ ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
+ sk->sk_mark, sk->sk_uid);
+ } else {
+ ip6_sk_redirect(skb, sk);
+ }
goto out;
}
+ /* Tunnels don't have an application socket: don't pass errors back */
+ if (tunnel)
+ goto out;
+
if (!np->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
@@ -510,7 +569,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk->sk_err = err;
sk->sk_error_report(sk);
out:
- return;
+ return 0;
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
@@ -541,21 +600,14 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return 0;
}
-static __inline__ void udpv6_err(struct sk_buff *skb,
- struct inet6_skb_parm *opt, u8 type,
- u8 code, int offset, __be32 info)
+static __inline__ int udpv6_err(struct sk_buff *skb,
+ struct inet6_skb_parm *opt, u8 type,
+ u8 code, int offset, __be32 info)
{
- __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
+ return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
-DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
-void udpv6_encap_enable(void)
-{
- static_branch_enable(&udpv6_encap_needed_key);
-}
-EXPORT_SYMBOL(udpv6_encap_enable);
-
-static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int is_udplite = IS_UDPLITE(sk);
@@ -638,10 +690,32 @@ drop:
return -1;
}
+static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *next, *segs;
+ int ret;
+
+ if (likely(!udp_unexpected_gso(sk, skb)))
+ return udpv6_queue_rcv_one_skb(sk, skb);
+
+ __skb_push(skb, -skb_mac_offset(skb));
+ segs = udp_rcv_segment(sk, skb, false);
+ for (skb = segs; skb; skb = next) {
+ next = skb->next;
+ __skb_pull(skb, skb_transport_offset(skb));
+
+ ret = udpv6_queue_rcv_one_skb(sk, skb);
+ if (ret > 0)
+ ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
+ true);
+ }
+ return 0;
+}
+
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
- int dif, unsigned short hnum)
+ int dif, int sdif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
@@ -653,7 +727,7 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
(inet->inet_dport && inet->inet_dport != rmt_port) ||
(!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
- (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
+ !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
(!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
return false;
@@ -687,6 +761,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
unsigned int offset = offsetof(typeof(*sk), sk_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
int dif = inet6_iif(skb);
+ int sdif = inet6_sdif(skb);
struct hlist_node *node;
struct sk_buff *nskb;
@@ -701,7 +776,8 @@ start_lookup:
sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
- uh->source, saddr, dif, hnum))
+ uh->source, saddr, dif, sdif,
+ hnum))
continue;
/* If zero checksum and no_check is not on for
* the socket then skip it.
@@ -1458,11 +1534,15 @@ void udpv6_destroy_sock(struct sock *sk)
udp_v6_flush_pending_frames(sk);
release_sock(sk);
- if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
- void (*encap_destroy)(struct sock *sk);
- encap_destroy = READ_ONCE(up->encap_destroy);
- if (encap_destroy)
- encap_destroy(sk);
+ if (static_branch_unlikely(&udpv6_encap_needed_key)) {
+ if (up->encap_type) {
+ void (*encap_destroy)(struct sock *sk);
+ encap_destroy = READ_ONCE(up->encap_destroy);
+ if (encap_destroy)
+ encap_destroy(sk);
+ }
+ if (up->encap_enabled)
+ static_branch_dec(&udpv6_encap_needed_key);
}
inet6_destroy_sock(sk);
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 7903e21c178b..5730e6503cb4 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -9,8 +9,8 @@
#include <net/transp_v6.h>
int __udp6_lib_rcv(struct sk_buff *, struct udp_table *, int);
-void __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
- __be32, struct udp_table *);
+int __udp6_lib_err(struct sk_buff *, struct inet6_skb_parm *, u8, u8, int,
+ __be32, struct udp_table *);
int udp_v6_get_port(struct sock *sk, unsigned short snum);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 1b8e161ac527..83b11d0ac091 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -11,6 +11,7 @@
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
#include <net/ipv6.h>
#include <net/udp.h>
@@ -114,8 +115,8 @@ out:
return segs;
}
-static struct sk_buff *udp6_gro_receive(struct list_head *head,
- struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
{
struct udphdr *uh = udp_gro_udphdr(skb);
@@ -142,18 +143,14 @@ flush:
return NULL;
}
-static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
{
const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
- if (uh->check) {
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
+ if (uh->check)
uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
&ipv6h->daddr, 0);
- } else {
- skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
- }
return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb);
}
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 5000ad6878e6..a125aebc29e5 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -20,11 +20,12 @@ static int udplitev6_rcv(struct sk_buff *skb)
return __udp6_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE);
}
-static void udplitev6_err(struct sk_buff *skb,
+static int udplitev6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
- __udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table);
+ return __udp6_lib_err(skb, opt, type, code, offset, info,
+ &udplite_table);
}
static const struct inet6_protocol udplitev6_protocol = {
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 9ef490dddcea..a52cb3fc6df5 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -86,14 +86,16 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
{
struct net *net = dev_net(skb->dev);
struct xfrm_state *x = NULL;
+ struct sec_path *sp;
int i = 0;
- if (secpath_set(skb)) {
+ sp = secpath_set(skb);
+ if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
- if (1 + skb->sp->len == XFRM_MAX_DEPTH) {
+ if (1 + sp->len == XFRM_MAX_DEPTH) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
}
@@ -145,7 +147,7 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
goto drop;
}
- skb->sp->xvec[skb->sp->len++] = x;
+ sp->xvec[sp->len++] = x;
spin_lock(&x->lock);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index d35bcf92969c..769f8f78d3b8 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -262,7 +262,6 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
if (xdst->u.rt6.rt6i_idev->dev == dev) {
struct inet6_dev *loopback_idev =
in6_dev_get(dev_net(dev)->loopback_dev);
- BUG_ON(!loopback_idev);
do {
in6_dev_put(xdst->u.rt6.rt6i_idev);
diff --git a/net/ipv6/xfrm6_protocol.c b/net/ipv6/xfrm6_protocol.c
index b2dc8ce49378..cc979b702c89 100644
--- a/net/ipv6/xfrm6_protocol.c
+++ b/net/ipv6/xfrm6_protocol.c
@@ -80,14 +80,16 @@ static int xfrm6_esp_rcv(struct sk_buff *skb)
return 0;
}
-static void xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int xfrm6_esp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_protocol *handler;
for_each_protocol_rcu(esp6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static int xfrm6_ah_rcv(struct sk_buff *skb)
@@ -107,14 +109,16 @@ static int xfrm6_ah_rcv(struct sk_buff *skb)
return 0;
}
-static void xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int xfrm6_ah_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_protocol *handler;
for_each_protocol_rcu(ah6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static int xfrm6_ipcomp_rcv(struct sk_buff *skb)
@@ -134,14 +138,16 @@ static int xfrm6_ipcomp_rcv(struct sk_buff *skb)
return 0;
}
-static void xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int xfrm6_ipcomp_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct xfrm6_protocol *handler;
for_each_protocol_rcu(ipcomp6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
- break;
+ return 0;
+
+ return -ENOENT;
}
static const struct inet6_protocol esp6_protocol = {
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4a46df8441c9..f5b4febeaa25 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -144,6 +144,9 @@ static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
+
+ if (spi == XFRM6_TUNNEL_SPI_MAX)
+ break;
}
for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 0bed4cc20603..78ea5a739d10 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1873,30 +1873,26 @@ static void iucv_callback_txdone(struct iucv_path *path,
struct sock *sk = path->private;
struct sk_buff *this = NULL;
struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
- struct sk_buff *list_skb = list->next;
+ struct sk_buff *list_skb;
unsigned long flags;
bh_lock_sock(sk);
- if (!skb_queue_empty(list)) {
- spin_lock_irqsave(&list->lock, flags);
- while (list_skb != (struct sk_buff *)list) {
- if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
- this = list_skb;
- break;
- }
- list_skb = list_skb->next;
+ spin_lock_irqsave(&list->lock, flags);
+ skb_queue_walk(list, list_skb) {
+ if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
+ this = list_skb;
+ break;
}
- if (this)
- __skb_unlink(this, list);
-
- spin_unlock_irqrestore(&list->lock, flags);
+ }
+ if (this)
+ __skb_unlink(this, list);
+ spin_unlock_irqrestore(&list->lock, flags);
- if (this) {
- kfree_skb(this);
- /* wake up any process waiting for sending */
- iucv_sock_wake_msglim(sk);
- }
+ if (this) {
+ kfree_skb(this);
+ /* wake up any process waiting for sending */
+ iucv_sock_wake_msglim(sk);
}
if (sk->sk_state == IUCV_CLOSING) {
@@ -2284,11 +2280,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
list = &iucv->send_skb_q;
spin_lock_irqsave(&list->lock, flags);
- if (skb_queue_empty(list))
- goto out_unlock;
- list_skb = list->next;
- nskb = list_skb->next;
- while (list_skb != (struct sk_buff *)list) {
+ skb_queue_walk_safe(list, list_skb, nskb) {
if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
switch (n) {
case TX_NOTIFY_OK:
@@ -2321,10 +2313,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
}
break;
}
- list_skb = nskb;
- nskb = nskb->next;
}
-out_unlock:
spin_unlock_irqrestore(&list->lock, flags);
if (sk->sk_state == IUCV_CLOSING) {
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9d61266526e7..655c787f9d54 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2020,7 +2020,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
static inline int pfkey_xfrm_policy2sec_ctx_size(const struct xfrm_policy *xp)
{
- struct xfrm_sec_ctx *xfrm_ctx = xp->security;
+ struct xfrm_sec_ctx *xfrm_ctx = xp->security;
if (xfrm_ctx) {
int len = sizeof(struct sadb_x_sec_ctx);
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index 8da86ceca33d..309dee76724e 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -47,6 +47,24 @@ int l3mdev_master_ifindex_rcu(const struct net_device *dev)
EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
/**
+ * l3mdev_master_upper_ifindex_by_index - get index of upper l3 master
+ * device
+ * @net: network namespace for device index lookup
+ * @ifindex: targeted interface
+ */
+int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+
+ dev = dev_get_by_index_rcu(net, ifindex);
+ while (dev && !netif_is_l3_master(dev))
+ dev = netdev_master_upper_dev_get(dev);
+
+ return dev ? dev->ifindex : 0;
+}
+EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
+
+/**
* l3mdev_fib_table - get FIB table id associated with an L3
* master interface
* @dev: targeted interface
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index f869e35d0974..be471fe95048 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -57,14 +57,13 @@ comment "Some wireless drivers require a rate control algorithm"
depends on MAC80211 && MAC80211_HAS_RC=n
config MAC80211_MESH
- bool "Enable mac80211 mesh networking (pre-802.11s) support"
+ bool "Enable mac80211 mesh networking support"
depends on MAC80211
---help---
- This options enables support of Draft 802.11s mesh networking.
- The implementation is based on Draft 2.08 of the Mesh Networking
- amendment. However, no compliance with that draft is claimed or even
- possible, as drafts leave a number of identifiers to be defined after
- ratification. For more information visit http://o11s.org/.
+ Select this option to enable 802.11 mesh operation in mac80211
+ drivers that support it. 802.11 mesh connects multiple stations
+ over (possibly multi-hop) wireless links to form a single logical
+ LAN.
config MAC80211_LEDS
bool "Enable LED triggers"
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 51622333d460..de65fe3ed9cc 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -800,8 +800,8 @@ static int ieee80211_set_ftm_responder_params(
u8 *pos;
int len;
- if ((!lci || !lci_len) && (!civicloc || !civicloc_len))
- return 1;
+ if (!lci_len && !civicloc_len)
+ return 0;
bss_conf = &sdata->vif.bss_conf;
old = bss_conf->ftmr_params;
@@ -2028,6 +2028,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
nconf->dot11MeshAwakeWindowDuration;
if (_chg_mesh_attr(NL80211_MESHCONF_PLINK_TIMEOUT, mask))
conf->plink_timeout = nconf->plink_timeout;
+ if (_chg_mesh_attr(NL80211_MESHCONF_CONNECTED_TO_GATE, mask))
+ conf->dot11MeshConnectedToMeshGate =
+ nconf->dot11MeshConnectedToMeshGate;
ieee80211_mbss_info_change_notify(sdata, BSS_CHANGED_BEACON);
return 0;
}
@@ -2891,7 +2894,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
beacon->proberesp_ies_len + beacon->assocresp_ies_len +
- beacon->probe_resp_len;
+ beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len;
new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
if (!new_beacon)
@@ -2934,8 +2937,9 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
pos += beacon->probe_resp_len;
}
- if (beacon->ftm_responder)
- new_beacon->ftm_responder = beacon->ftm_responder;
+
+ /* might copy -1, meaning no changes requested */
+ new_beacon->ftm_responder = beacon->ftm_responder;
if (beacon->lci) {
new_beacon->lci_len = beacon->lci_len;
new_beacon->lci = pos;
@@ -3849,6 +3853,26 @@ ieee80211_get_ftm_responder_stats(struct wiphy *wiphy,
return drv_get_ftm_responder_stats(local, sdata, ftm_stats);
}
+static int
+ieee80211_start_pmsr(struct wiphy *wiphy, struct wireless_dev *dev,
+ struct cfg80211_pmsr_request *request)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev);
+
+ return drv_start_pmsr(local, sdata, request);
+}
+
+static void
+ieee80211_abort_pmsr(struct wiphy *wiphy, struct wireless_dev *dev,
+ struct cfg80211_pmsr_request *request)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(dev);
+
+ return drv_abort_pmsr(local, sdata, request);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3944,4 +3968,6 @@ const struct cfg80211_ops mac80211_config_ops = {
.tx_control_port = ieee80211_tx_control_port,
.get_txq_stats = ieee80211_get_txq_stats,
.get_ftm_responder_stats = ieee80211_get_ftm_responder_stats,
+ .start_pmsr = ieee80211_start_pmsr,
+ .abort_pmsr = ieee80211_abort_pmsr,
};
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index c813207bb123..cff0fb3578c9 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -641,6 +641,8 @@ IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
IEEE80211_IF_FILE(power_mode, u.mesh.mshcfg.power_mode, DEC);
IEEE80211_IF_FILE(dot11MeshAwakeWindowDuration,
u.mesh.mshcfg.dot11MeshAwakeWindowDuration, DEC);
+IEEE80211_IF_FILE(dot11MeshConnectedToMeshGate,
+ u.mesh.mshcfg.dot11MeshConnectedToMeshGate, DEC);
#endif
#define DEBUGFS_ADD_MODE(name, mode) \
@@ -762,6 +764,7 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
MESHPARAMS_ADD(power_mode);
MESHPARAMS_ADD(dot11MeshAwakeWindowDuration);
+ MESHPARAMS_ADD(dot11MeshConnectedToMeshGate);
#undef MESHPARAMS_ADD
}
#endif
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index af5185a836e5..b753194710ad 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -795,22 +795,22 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
#define PRINT_NSS_SUPP(f, n) \
do { \
- int i; \
+ int _i; \
u16 v = le16_to_cpu(nss->f); \
p += scnprintf(p, buf_sz + buf - p, n ": %#.4x\n", v); \
- for (i = 0; i < 8; i += 2) { \
- switch ((v >> i) & 0x3) { \
+ for (_i = 0; _i < 8; _i += 2) { \
+ switch ((v >> _i) & 0x3) { \
case 0: \
- PRINT(n "-%d-SUPPORT-0-7", i / 2); \
+ PRINT(n "-%d-SUPPORT-0-7", _i / 2); \
break; \
case 1: \
- PRINT(n "-%d-SUPPORT-0-9", i / 2); \
+ PRINT(n "-%d-SUPPORT-0-9", _i / 2); \
break; \
case 2: \
- PRINT(n "-%d-SUPPORT-0-11", i / 2); \
+ PRINT(n "-%d-SUPPORT-0-11", _i / 2); \
break; \
case 3: \
- PRINT(n "-%d-NOT-SUPPORTED", i / 2); \
+ PRINT(n "-%d-NOT-SUPPORTED", _i / 2); \
break; \
} \
} \
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 0b1747a2313d..3e0d5922a440 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1199,6 +1199,40 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
return ret;
}
+static inline int drv_start_pmsr(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_pmsr_request *request)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_start_pmsr(local, sdata);
+
+ if (local->ops->start_pmsr)
+ ret = local->ops->start_pmsr(&local->hw, &sdata->vif, request);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
+static inline void drv_abort_pmsr(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_pmsr_request *request)
+{
+ trace_drv_abort_pmsr(local, sdata);
+
+ might_sleep();
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ if (local->ops->abort_pmsr)
+ local->ops->abort_pmsr(&local->hw, &sdata->vif, request);
+ trace_drv_return_void(local);
+}
+
static inline int drv_start_nan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct cfg80211_nan_conf *conf)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 10a05062e4a0..7dfb4e2f98b2 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -500,6 +500,7 @@ struct ieee80211_if_managed {
unsigned int uapsd_max_sp_len;
int wmm_last_param_set;
+ int mu_edca_last_param_set;
u8 use_4addr;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5836ddeac9e3..4a6ff1482a9f 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -7,6 +7,7 @@
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (c) 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1015,6 +1016,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (local->open_count == 0)
ieee80211_clear_tx_pending(local);
+ sdata->vif.bss_conf.beacon_int = 0;
+
/*
* If the interface goes down while suspended, presumably because
* the device was unplugged and that happens before our resume,
@@ -1799,7 +1802,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
}
ieee80211_assign_perm_addr(local, ndev->perm_addr, type);
- if (params && is_valid_ether_addr(params->macaddr))
+ if (is_valid_ether_addr(params->macaddr))
memcpy(ndev->dev_addr, params->macaddr, ETH_ALEN);
else
memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
@@ -1868,11 +1871,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ieee80211_setup_sdata(sdata, type);
if (ndev) {
- if (params) {
- ndev->ieee80211_ptr->use_4addr = params->use_4addr;
- if (type == NL80211_IFTYPE_STATION)
- sdata->u.mgd.use_4addr = params->use_4addr;
- }
+ ndev->ieee80211_ptr->use_4addr = params->use_4addr;
+ if (type == NL80211_IFTYPE_STATION)
+ sdata->u.mgd.use_4addr = params->use_4addr;
ndev->features |= local->hw.netdev_features;
@@ -1949,6 +1950,8 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
WARN(local->open_count, "%s: open count remains %d\n",
wiphy_name(local->hw.wiphy), local->open_count);
+ ieee80211_txq_teardown_flows(local);
+
mutex_lock(&local->iflist_mtx);
list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
list_del(&sdata->list);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 83e71e6b2ebe..87a729926734 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1221,8 +1221,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION) &&
!ieee80211_hw_check(hw, NO_AUTO_VIF)) {
+ struct vif_params params = {0};
+
result = ieee80211_if_add(local, "wlan%d", NET_NAME_ENUM, NULL,
- NL80211_IFTYPE_STATION, NULL);
+ NL80211_IFTYPE_STATION, &params);
if (result)
wiphy_warn(local->hw.wiphy,
"Failed to add default virtual iface\n");
@@ -1262,7 +1264,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
rtnl_unlock();
ieee80211_led_exit(local);
ieee80211_wep_free(local);
- ieee80211_txq_teardown_flows(local);
fail_flows:
destroy_workqueue(local->workqueue);
fail_workqueue:
@@ -1288,7 +1289,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&local->ifa6_notifier);
#endif
- ieee80211_txq_teardown_flows(local);
rtnl_lock();
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 8bad414c52ad..c90452aa0c42 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -254,6 +254,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u8 *pos, neighbors;
u8 meshconf_len = sizeof(struct ieee80211_meshconf_ie);
+ bool is_connected_to_gate = ifmsh->num_gates > 0 ||
+ ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol ||
+ ifmsh->mshcfg.dot11MeshConnectedToMeshGate;
if (skb_tailroom(skb) < 2 + meshconf_len)
return -ENOMEM;
@@ -278,7 +281,7 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
/* Mesh Formation Info - number of neighbors */
neighbors = atomic_read(&ifmsh->estab_plinks);
neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
- *pos++ = neighbors << 1;
+ *pos++ = (neighbors << 1) | is_connected_to_gate;
/* Mesh capability */
*pos = 0x00;
*pos |= ifmsh->mshcfg.dot11MeshForwarding ?
@@ -1191,7 +1194,8 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (!sdata->u.mesh.user_mpm ||
sdata->u.mesh.mshcfg.rssi_threshold == 0 ||
sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
- mesh_neighbour_update(sdata, mgmt->sa, &elems);
+ mesh_neighbour_update(sdata, mgmt->sa, &elems,
+ rx_status);
}
if (ifmsh->sync_ops)
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 21526630bf65..cad6592c52a1 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -273,7 +273,8 @@ int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
/* Mesh plinks */
void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
- u8 *hw_addr, struct ieee802_11_elems *ie);
+ u8 *hw_addr, struct ieee802_11_elems *ie,
+ struct ieee80211_rx_status *rx_status);
bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
void mesh_plink_timer(struct timer_list *t);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5b5b0f95ffd1..33055c8ed37e 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -513,7 +513,8 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
static struct sta_info *
mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta = NULL;
@@ -521,11 +522,17 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
if (sdata->u.mesh.user_mpm ||
sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
if (mesh_peer_accepts_plinks(elems) &&
- mesh_plink_availables(sdata))
+ mesh_plink_availables(sdata)) {
+ int sig = 0;
+
+ if (ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM))
+ sig = rx_status->signal;
+
cfg80211_notify_new_peer_candidate(sdata->dev, addr,
elems->ie_start,
elems->total_len,
- GFP_KERNEL);
+ sig, GFP_KERNEL);
+ }
} else
sta = __mesh_sta_info_alloc(sdata, addr);
@@ -538,13 +545,15 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
* @sdata: local meshif
* @addr: peer's address
* @elems: IEs from beacon or mesh peering frame.
+ * @rx_status: rx status for the frame for signal reporting
*
* Return existing or newly allocated sta_info under RCU read lock.
* (re)initialize with given IEs.
*/
static struct sta_info *
mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
- u8 *addr, struct ieee802_11_elems *elems) __acquires(RCU)
+ u8 *addr, struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status) __acquires(RCU)
{
struct sta_info *sta = NULL;
@@ -555,7 +564,7 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
} else {
rcu_read_unlock();
/* can't run atomic */
- sta = mesh_sta_info_alloc(sdata, addr, elems);
+ sta = mesh_sta_info_alloc(sdata, addr, elems, rx_status);
if (!sta) {
rcu_read_lock();
return NULL;
@@ -576,20 +585,25 @@ mesh_sta_info_get(struct ieee80211_sub_if_data *sdata,
* @sdata: local meshif
* @addr: peer's address
* @elems: IEs from beacon or mesh peering frame
+ * @rx_status: rx status for the frame for signal reporting
*
* Initiates peering if appropriate.
*/
void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
u8 *hw_addr,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta;
u32 changed = 0;
- sta = mesh_sta_info_get(sdata, hw_addr, elems);
+ sta = mesh_sta_info_get(sdata, hw_addr, elems, rx_status);
if (!sta)
goto out;
+ sta->mesh->connected_to_gate = elems->mesh_config->meshconf_form &
+ IEEE80211_MESHCONF_FORM_CONNECTED_TO_GATE;
+
if (mesh_peer_accepts_plinks(elems) &&
sta->mesh->plink_state == NL80211_PLINK_LISTEN &&
sdata->u.mesh.accepting_plinks &&
@@ -1069,7 +1083,8 @@ out:
static void
mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
- struct ieee802_11_elems *elems)
+ struct ieee802_11_elems *elems,
+ struct ieee80211_rx_status *rx_status)
{
struct sta_info *sta;
@@ -1134,7 +1149,7 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
if (event == OPN_ACPT) {
rcu_read_unlock();
/* allocate sta entry if necessary and update info */
- sta = mesh_sta_info_get(sdata, mgmt->sa, elems);
+ sta = mesh_sta_info_get(sdata, mgmt->sa, elems, rx_status);
if (!sta) {
mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
goto unlock_rcu;
@@ -1200,5 +1215,5 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
return;
}
ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
- mesh_process_plink_frame(sdata, mgmt, &elems);
+ mesh_process_plink_frame(sdata, mgmt, &elems, rx_status);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d2bc8d57c87e..687821567287 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -916,6 +916,15 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->ap_vht_cap);
+ /*
+ * If AP doesn't support HT, mark HE as disabled.
+ * If on the 5GHz band, make sure it supports VHT.
+ */
+ if (ifmgd->flags & IEEE80211_STA_DISABLE_HT ||
+ (sband->band == NL80211_BAND_5GHZ &&
+ ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
ieee80211_add_he_ie(sdata, skb, sband);
@@ -1869,7 +1878,7 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_tx_queue_params params[IEEE80211_NUM_ACS];
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
size_t left;
- int count, ac;
+ int count, mu_edca_count, ac;
const u8 *pos;
u8 uapsd_queues = 0;
@@ -1889,9 +1898,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
uapsd_queues = ifmgd->uapsd_queues;
count = wmm_param[6] & 0x0f;
- if (count == ifmgd->wmm_last_param_set)
+ /* -1 is the initial value of ifmgd->mu_edca_last_param_set.
+ * if mu_edca was preset before and now it disappeared tell
+ * the driver about it.
+ */
+ mu_edca_count = mu_edca ? mu_edca->mu_qos_info & 0x0f : -1;
+ if (count == ifmgd->wmm_last_param_set &&
+ mu_edca_count == ifmgd->mu_edca_last_param_set)
return false;
ifmgd->wmm_last_param_set = count;
+ ifmgd->mu_edca_last_param_set = mu_edca_count;
pos = wmm_param + 8;
left = wmm_param_len - 8;
@@ -2766,6 +2782,7 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct sta_info *sta;
+ bool result = true;
sdata_info(sdata, "authenticated\n");
ifmgd->auth_data->done = true;
@@ -2778,15 +2795,18 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, bssid);
if (!sta) {
WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
- return false;
+ result = false;
+ goto out;
}
if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
sdata_info(sdata, "failed moving %pM to auth\n", bssid);
- return false;
+ result = false;
+ goto out;
}
- mutex_unlock(&sdata->local->sta_mtx);
- return true;
+out:
+ mutex_unlock(&sdata->local->sta_mtx);
+ return result;
}
static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
@@ -3058,6 +3078,19 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
}
}
+static bool ieee80211_twt_req_supported(const struct sta_info *sta,
+ const struct ieee802_11_elems *elems)
+{
+ if (elems->ext_capab_len < 10)
+ return false;
+
+ if (!(elems->ext_capab[9] & WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT))
+ return false;
+
+ return sta->sta.he_cap.he_cap_elem.mac_cap_info[0] &
+ IEEE80211_HE_MAC_CAP0_TWT_RES;
+}
+
static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss,
struct ieee80211_mgmt *mgmt, size_t len)
@@ -3211,16 +3244,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out;
}
- /*
- * If AP doesn't support HT, or it doesn't have HE mandatory IEs, mark
- * HE as disabled. If on the 5GHz band, make sure it supports VHT.
- */
- if (ifmgd->flags & IEEE80211_STA_DISABLE_HT ||
- (sband->band == NL80211_BAND_5GHZ &&
- ifmgd->flags & IEEE80211_STA_DISABLE_VHT) ||
- (!elems.he_cap && !elems.he_operation))
- ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
-
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
(!elems.he_cap || !elems.he_operation)) {
mutex_unlock(&sdata->local->sta_mtx);
@@ -3247,8 +3270,11 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta);
bss_conf->he_support = sta->sta.he_cap.has_he;
+ bss_conf->twt_requester =
+ ieee80211_twt_req_supported(sta, &elems);
} else {
bss_conf->he_support = false;
+ bss_conf->twt_requester = false;
}
if (bss_conf->he_support) {
@@ -3333,6 +3359,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* 4-bit value.
*/
ifmgd->wmm_last_param_set = -1;
+ ifmgd->mu_edca_last_param_set = -1;
if (ifmgd->flags & IEEE80211_STA_DISABLE_WMM) {
ieee80211_set_wmm_default(sdata, false, false);
@@ -4656,8 +4683,10 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
}
- if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE) &&
- ieee80211_get_he_sta_cap(sband)) {
+ if (!ieee80211_get_he_sta_cap(sband))
+ ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE)) {
const struct cfg80211_bss_ies *ies;
const u8 *he_oper_ie;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3bd3b5769797..45aad3d3108c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,6 +143,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
/* allocate extra bitmaps */
if (status->chains)
len += 4 * hweight8(status->chains);
+ /* vendor presence bitmap */
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
+ len += 4;
if (ieee80211_have_rx_timestamp(status)) {
len = ALIGN(len, 8);
@@ -207,8 +210,6 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
- /* vendor presence bitmap */
- len += 4;
/* alignment for fixed 6-byte vendor data header */
len = ALIGN(len, 2);
/* vendor data header */
@@ -753,6 +754,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
struct ieee80211_sub_if_data *monitor_sdata =
rcu_dereference(local->monitor_sdata);
bool only_monitor = false;
+ unsigned int min_head_len;
if (status->flag & RX_FLAG_RADIOTAP_HE)
rtap_space += sizeof(struct ieee80211_radiotap_he);
@@ -760,12 +762,18 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
+ if (status->flag & RX_FLAG_RADIOTAP_LSIG)
+ rtap_space += sizeof(struct ieee80211_radiotap_lsig);
+
if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) {
- struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data;
+ struct ieee80211_vendor_radiotap *rtap =
+ (void *)(origskb->data + rtap_space);
rtap_space += sizeof(*rtap) + rtap->len + rtap->pad;
}
+ min_head_len = rtap_space;
+
/*
* First, we may need to make a copy of the skb because
* (1) we need to modify it for radiotap (if not present), and
@@ -775,18 +783,23 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
* the SKB because it has a bad FCS/PLCP checksum.
*/
- if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
- if (unlikely(origskb->len <= FCS_LEN)) {
- /* driver bug */
- WARN_ON(1);
- dev_kfree_skb(origskb);
- return NULL;
+ if (!(status->flag & RX_FLAG_NO_PSDU)) {
+ if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
+ if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
+ /* driver bug */
+ WARN_ON(1);
+ dev_kfree_skb(origskb);
+ return NULL;
+ }
+ present_fcs_len = FCS_LEN;
}
- present_fcs_len = FCS_LEN;
+
+ /* also consider the hdr->frame_control */
+ min_head_len += 2;
}
- /* ensure hdr->frame_control and vendor radiotap data are in skb head */
- if (!pskb_may_pull(origskb, 2 + rtap_space)) {
+ /* ensure that the expected data elements are in skb head */
+ if (!pskb_may_pull(origskb, min_head_len)) {
dev_kfree_skb(origskb);
return NULL;
}
@@ -1403,6 +1416,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
@@ -3063,7 +3077,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
cfg80211_sta_opmode_change_notify(sdata->dev,
rx->sta->addr,
&sta_opmode,
- GFP_KERNEL);
+ GFP_ATOMIC);
goto handled;
}
case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
@@ -3100,7 +3114,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
cfg80211_sta_opmode_change_notify(sdata->dev,
rx->sta->addr,
&sta_opmode,
- GFP_KERNEL);
+ GFP_ATOMIC);
goto handled;
}
default:
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5d2a11777718..95413413f98c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -356,7 +356,7 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
- bool hw_scan = local->ops->hw_scan;
+ bool hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
bool was_scanning = local->scanning;
struct cfg80211_scan_request *scan_req;
struct ieee80211_sub_if_data *scan_sdata;
@@ -606,6 +606,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
+ bool hw_scan = local->ops->hw_scan;
int rc;
lockdep_assert_held(&local->mtx);
@@ -620,7 +621,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
return 0;
}
- if (local->ops->hw_scan) {
+ again:
+ if (hw_scan) {
u8 *ies;
local->hw_scan_ies_bufsize = local->scan_ies_len + req->ie_len;
@@ -679,7 +681,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
else
memcpy(local->scan_addr, sdata->vif.addr, ETH_ALEN);
- if (local->ops->hw_scan) {
+ if (hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
} else if ((req->n_channels == 1) &&
(req->channels[0] == local->_oper_chandef.chan)) {
@@ -722,7 +724,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(local);
- if (local->ops->hw_scan) {
+ if (hw_scan) {
WARN_ON(!ieee80211_prep_hw_scan(local));
rc = drv_hw_scan(local, sdata, local->hw_scan_req);
} else {
@@ -740,6 +742,18 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
RCU_INIT_POINTER(local->scan_sdata, NULL);
}
+ if (hw_scan && rc == 1) {
+ /*
+ * we can't fall back to software for P2P-GO
+ * as it must update NoA etc.
+ */
+ if (ieee80211_vif_type_p2p(&sdata->vif) ==
+ NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+ hw_scan = false;
+ goto again;
+ }
+
return rc;
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index fb8c2252ac0e..c4a8f115ed33 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2253,11 +2253,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
}
if (tidstats && !cfg80211_sinfo_alloc_tid_stats(sinfo, GFP_KERNEL)) {
- for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
- struct cfg80211_tid_stats *tidstats = &sinfo->pertid[i];
-
- sta_set_tidstats(sta, tidstats, i);
- }
+ for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++)
+ sta_set_tidstats(sta, &sinfo->pertid[i], i);
}
if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -2267,7 +2264,8 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
BIT_ULL(NL80211_STA_INFO_PLINK_STATE) |
BIT_ULL(NL80211_STA_INFO_LOCAL_PM) |
BIT_ULL(NL80211_STA_INFO_PEER_PM) |
- BIT_ULL(NL80211_STA_INFO_NONPEER_PM);
+ BIT_ULL(NL80211_STA_INFO_NONPEER_PM) |
+ BIT_ULL(NL80211_STA_INFO_CONNECTED_TO_GATE);
sinfo->llid = sta->mesh->llid;
sinfo->plid = sta->mesh->plid;
@@ -2279,6 +2277,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo,
sinfo->local_pm = sta->mesh->local_pm;
sinfo->peer_pm = sta->mesh->peer_pm;
sinfo->nonpeer_pm = sta->mesh->nonpeer_pm;
+ sinfo->connected_to_gate = sta->mesh->connected_to_gate;
#endif
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 9a04327d71d1..8eb29041be54 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -364,6 +364,7 @@ DECLARE_EWMA(mesh_fail_avg, 20, 8)
* @nonpeer_pm: STA power save mode towards non-peer neighbors
* @processed_beacon: set to true after peer rates and capabilities are
* processed
+ * @connected_to_gate: true if mesh STA has a path to a mesh gate
* @fail_avg: moving percentage of failed MSDUs
*/
struct mesh_sta {
@@ -381,6 +382,7 @@ struct mesh_sta {
u8 plink_retries;
bool processed_beacon;
+ bool connected_to_gate;
enum nl80211_plink_state plink_state;
u32 plink_timeout;
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index aa4afbf0abaf..3f0b96e1e02f 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -556,6 +556,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
}
ieee80211_led_tx(local);
+
+ if (skb_has_frag_list(skb)) {
+ kfree_skb_list(skb_shinfo(skb)->frag_list);
+ skb_shinfo(skb)->frag_list = NULL;
+ }
}
/*
@@ -964,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
/* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
sta->status_stats.last_tdls_pkt_time = jiffies;
+ } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ return;
} else {
ieee80211_lost_packet(sta, info);
}
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 588c51a67c89..35ea0dcb55e6 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1052,10 +1052,10 @@ TRACE_EVENT(drv_ampdu_action,
);
TRACE_EVENT(drv_get_survey,
- TP_PROTO(struct ieee80211_local *local, int idx,
+ TP_PROTO(struct ieee80211_local *local, int _idx,
struct survey_info *survey),
- TP_ARGS(local, idx, survey),
+ TP_ARGS(local, _idx, survey),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -1064,7 +1064,7 @@ TRACE_EVENT(drv_get_survey,
TP_fast_assign(
LOCAL_ASSIGN;
- __entry->idx = idx;
+ __entry->idx = _idx;
),
TP_printk(
@@ -1882,6 +1882,18 @@ TRACE_EVENT(drv_del_nan_func,
)
);
+DEFINE_EVENT(local_sdata_evt, drv_start_pmsr,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_abort_pmsr,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
/*
* Tracing for API calls that drivers call.
*/
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e0ccee23fbcd..f170d6c6629a 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
info->hw_queue = tx->sdata->vif.cab_queue;
- /* no stations in PS mode */
- if (!atomic_read(&ps->num_sta_ps))
+ /* no stations in PS mode and no buffered packets */
+ if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
return TX_CONTINUE;
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
@@ -3218,6 +3218,9 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
return false;
+ if (skb_is_gso(skb))
+ return false;
+
if (!txq)
return false;
@@ -3242,7 +3245,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
tin = &txqi->tin;
flow = fq_flow_classify(fq, tin, skb, fq_flow_get_default_func);
head = skb_peek_tail(&flow->queue);
- if (!head)
+ if (!head || skb_is_gso(head))
goto out;
orig_len = head->len;
@@ -3583,7 +3586,7 @@ begin:
skb_queue_splice_tail(&tx.skbs, &txqi->frags);
}
- if (skb && skb_has_frag_list(skb) &&
+ if (skb_has_frag_list(skb) &&
!ieee80211_hw_check(&local->hw, TX_FRAG_LIST)) {
if (skb_linearize(skb)) {
ieee80211_free_txskb(&local->hw, skb);
@@ -4579,7 +4582,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
if (qos) {
- __le16 qos = cpu_to_le16(7);
+ __le16 qoshdr = cpu_to_le16(7);
BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
IEEE80211_STYPE_NULLFUNC) !=
@@ -4588,7 +4591,7 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
skb->priority = 7;
skb_set_queue_mapping(skb, IEEE80211_AC_VO);
- skb_put_data(skb, &qos, sizeof(qos));
+ skb_put_data(skb, &qoshdr, sizeof(qoshdr));
}
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index bec424316ea4..d0eb38b890aa 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -299,16 +299,16 @@ out:
spin_unlock_bh(&fq->lock);
}
-void ieee80211_wake_txqs(unsigned long data)
+static void
+__releases(&local->queue_stop_reason_lock)
+__acquires(&local->queue_stop_reason_lock)
+_ieee80211_wake_txqs(struct ieee80211_local *local, unsigned long *flags)
{
- struct ieee80211_local *local = (struct ieee80211_local *)data;
struct ieee80211_sub_if_data *sdata;
int n_acs = IEEE80211_NUM_ACS;
- unsigned long flags;
int i;
rcu_read_lock();
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
if (local->hw.queues < IEEE80211_NUM_ACS)
n_acs = 1;
@@ -317,7 +317,7 @@ void ieee80211_wake_txqs(unsigned long data)
if (local->queue_stop_reasons[i])
continue;
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, *flags);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
@@ -329,13 +329,22 @@ void ieee80211_wake_txqs(unsigned long data)
__ieee80211_wake_txqs(sdata, ac);
}
}
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ spin_lock_irqsave(&local->queue_stop_reason_lock, *flags);
}
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
rcu_read_unlock();
}
+void ieee80211_wake_txqs(unsigned long data)
+{
+ struct ieee80211_local *local = (struct ieee80211_local *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ _ieee80211_wake_txqs(local, &flags);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+}
+
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
{
struct ieee80211_sub_if_data *sdata;
@@ -371,7 +380,8 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason,
- bool refcounted)
+ bool refcounted,
+ unsigned long *flags)
{
struct ieee80211_local *local = hw_to_local(hw);
@@ -405,8 +415,19 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
} else
tasklet_schedule(&local->tx_pending_tasklet);
- if (local->ops->wake_tx_queue)
- tasklet_schedule(&local->wake_txqs_tasklet);
+ /*
+ * Calling _ieee80211_wake_txqs here can be a problem because it may
+ * release queue_stop_reason_lock which has been taken by
+ * __ieee80211_wake_queue's caller. It is certainly not very nice to
+ * release someone's lock, but it is fine because all the callers of
+ * __ieee80211_wake_queue call it right before releasing the lock.
+ */
+ if (local->ops->wake_tx_queue) {
+ if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER)
+ tasklet_schedule(&local->wake_txqs_tasklet);
+ else
+ _ieee80211_wake_txqs(local, flags);
+ }
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -417,7 +438,7 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
unsigned long flags;
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- __ieee80211_wake_queue(hw, queue, reason, refcounted);
+ __ieee80211_wake_queue(hw, queue, reason, refcounted, &flags);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
@@ -514,7 +535,7 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
false);
__skb_queue_tail(&local->pending[queue], skb);
__ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
- false);
+ false, &flags);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
@@ -547,7 +568,7 @@ void ieee80211_add_pending_skbs(struct ieee80211_local *local,
for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
- false);
+ false, &flags);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
@@ -605,7 +626,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for_each_set_bit(i, &queues, hw->queues)
- __ieee80211_wake_queue(hw, i, reason, refcounted);
+ __ieee80211_wake_queue(hw, i, reason, refcounted, &flags);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
@@ -1202,6 +1223,8 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA &&
elen >= (sizeof(*elems->mu_edca_param_set) + 1)) {
elems->mu_edca_param_set = (void *)&pos[1];
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
} else if (pos[0] == WLAN_EID_EXT_HE_CAPABILITY) {
elems->he_cap = (void *)&pos[1];
elems->he_cap_len = elen - 1;
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 73e8f347802e..bfe9ed9f4c48 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -30,13 +30,13 @@ int ieee80211_wep_init(struct ieee80211_local *local)
/* start WEP IV from a random value */
get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN);
- local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
+ local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, 0);
if (IS_ERR(local->wep_tx_tfm)) {
local->wep_rx_tfm = ERR_PTR(-EINVAL);
return PTR_ERR(local->wep_tx_tfm);
}
- local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
+ local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, 0);
if (IS_ERR(local->wep_rx_tfm)) {
crypto_free_cipher(local->wep_tx_tfm);
local->wep_tx_tfm = ERR_PTR(-EINVAL);
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index 1dae77c54009..87505600dbb2 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -73,10 +73,15 @@ enum {
#define NCSI_OEM_MFR_BCM_ID 0x113d
/* Broadcom specific OEM Command */
#define NCSI_OEM_BCM_CMD_GMA 0x01 /* CMD ID for Get MAC */
+/* Mellanox specific OEM Command */
+#define NCSI_OEM_MLX_CMD_GMA 0x00 /* CMD ID for Get MAC */
+#define NCSI_OEM_MLX_CMD_GMA_PARAM 0x1b /* Parameter for GMA */
/* OEM Command payload lengths*/
#define NCSI_OEM_BCM_CMD_GMA_LEN 12
+#define NCSI_OEM_MLX_CMD_GMA_LEN 8
/* Mac address offset in OEM response */
#define BCM_MAC_ADDR_OFFSET 28
+#define MLX_MAC_ADDR_OFFSET 8
struct ncsi_channel_version {
@@ -222,6 +227,10 @@ struct ncsi_package {
unsigned int channel_num; /* Number of channels */
struct list_head channels; /* List of chanels */
struct list_head node; /* Form list of packages */
+
+ bool multi_channel; /* Enable multiple channels */
+ u32 channel_whitelist; /* Channels to configure */
+ struct ncsi_channel *preferred_channel; /* Primary channel */
};
struct ncsi_request {
@@ -287,16 +296,16 @@ struct ncsi_dev_priv {
#define NCSI_DEV_PROBED 1 /* Finalized NCSI topology */
#define NCSI_DEV_HWA 2 /* Enabled HW arbitration */
#define NCSI_DEV_RESHUFFLE 4
+#define NCSI_DEV_RESET 8 /* Reset state of NC */
unsigned int gma_flag; /* OEM GMA flag */
spinlock_t lock; /* Protect the NCSI device */
#if IS_ENABLED(CONFIG_IPV6)
unsigned int inet6_addr_num; /* Number of IPv6 addresses */
#endif
+ unsigned int package_probe_id;/* Current ID during probe */
unsigned int package_num; /* Number of packages */
struct list_head packages; /* List of packages */
struct ncsi_channel *hot_channel; /* Channel was ever active */
- struct ncsi_package *force_package; /* Force a specific package */
- struct ncsi_channel *force_channel; /* Force a specific channel */
struct ncsi_request requests[256]; /* Request table */
unsigned int request_id; /* Last used request ID */
#define NCSI_REQ_START_IDX 1
@@ -309,6 +318,9 @@ struct ncsi_dev_priv {
struct list_head node; /* Form NCSI device list */
#define NCSI_MAX_VLAN_VIDS 15
struct list_head vlan_vids; /* List of active VLAN IDs */
+
+ bool multi_package; /* Enable multiple packages */
+ u32 package_whitelist; /* Packages to configure */
};
struct ncsi_cmd_arg {
@@ -341,6 +353,7 @@ extern spinlock_t ncsi_dev_lock;
list_for_each_entry_rcu(nc, &np->channels, node)
/* Resources */
+int ncsi_reset_dev(struct ncsi_dev *nd);
void ncsi_start_channel_monitor(struct ncsi_channel *nc);
void ncsi_stop_channel_monitor(struct ncsi_channel *nc);
struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
@@ -361,6 +374,13 @@ struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
void ncsi_free_request(struct ncsi_request *nr);
struct ncsi_dev *ncsi_find_dev(struct net_device *dev);
int ncsi_process_next_channel(struct ncsi_dev_priv *ndp);
+bool ncsi_channel_has_link(struct ncsi_channel *channel);
+bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
+ struct ncsi_channel *channel);
+int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
+ struct ncsi_package *np,
+ struct ncsi_channel *disable,
+ struct ncsi_channel *enable);
/* Packet handlers */
u32 ncsi_calculate_checksum(unsigned char *data, int len);
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 25e483e8278b..26d67e27551f 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -50,13 +50,15 @@ static int ncsi_validate_aen_pkt(struct ncsi_aen_pkt_hdr *h,
static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
struct ncsi_aen_pkt_hdr *h)
{
- struct ncsi_aen_lsc_pkt *lsc;
- struct ncsi_channel *nc;
+ struct ncsi_channel *nc, *tmp;
struct ncsi_channel_mode *ncm;
- bool chained;
- int state;
unsigned long old_data, data;
+ struct ncsi_aen_lsc_pkt *lsc;
+ struct ncsi_package *np;
+ bool had_link, has_link;
unsigned long flags;
+ bool chained;
+ int state;
/* Find the NCSI channel */
ncsi_find_package_and_channel(ndp, h->common.channel, NULL, &nc);
@@ -73,6 +75,9 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
ncm->data[2] = data;
ncm->data[4] = ntohl(lsc->oem_status);
+ had_link = !!(old_data & 0x1);
+ has_link = !!(data & 0x1);
+
netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
nc->id, data & 0x1 ? "up" : "down");
@@ -80,22 +85,60 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
state = nc->state;
spin_unlock_irqrestore(&nc->lock, flags);
- if (!((old_data ^ data) & 0x1) || chained)
- return 0;
- if (!(state == NCSI_CHANNEL_INACTIVE && (data & 0x1)) &&
- !(state == NCSI_CHANNEL_ACTIVE && !(data & 0x1)))
+ if (state == NCSI_CHANNEL_INACTIVE)
+ netdev_warn(ndp->ndev.dev,
+ "NCSI: Inactive channel %u received AEN!\n",
+ nc->id);
+
+ if ((had_link == has_link) || chained)
return 0;
- if (!(ndp->flags & NCSI_DEV_HWA) &&
- state == NCSI_CHANNEL_ACTIVE)
- ndp->flags |= NCSI_DEV_RESHUFFLE;
+ if (!ndp->multi_package && !nc->package->multi_channel) {
+ if (had_link) {
+ ndp->flags |= NCSI_DEV_RESHUFFLE;
+ ncsi_stop_channel_monitor(nc);
+ spin_lock_irqsave(&ndp->lock, flags);
+ list_add_tail_rcu(&nc->link, &ndp->channel_queue);
+ spin_unlock_irqrestore(&ndp->lock, flags);
+ return ncsi_process_next_channel(ndp);
+ }
+ /* Configured channel came up */
+ return 0;
+ }
- ncsi_stop_channel_monitor(nc);
- spin_lock_irqsave(&ndp->lock, flags);
- list_add_tail_rcu(&nc->link, &ndp->channel_queue);
- spin_unlock_irqrestore(&ndp->lock, flags);
+ if (had_link) {
+ ncm = &nc->modes[NCSI_MODE_TX_ENABLE];
+ if (ncsi_channel_is_last(ndp, nc)) {
+ /* No channels left, reconfigure */
+ return ncsi_reset_dev(&ndp->ndev);
+ } else if (ncm->enable) {
+ /* Need to failover Tx channel */
+ ncsi_update_tx_channel(ndp, nc->package, nc, NULL);
+ }
+ } else if (has_link && nc->package->preferred_channel == nc) {
+ /* Return Tx to preferred channel */
+ ncsi_update_tx_channel(ndp, nc->package, NULL, nc);
+ } else if (has_link) {
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ NCSI_FOR_EACH_CHANNEL(np, tmp) {
+ /* Enable Tx on this channel if the current Tx
+ * channel is down.
+ */
+ ncm = &tmp->modes[NCSI_MODE_TX_ENABLE];
+ if (ncm->enable &&
+ !ncsi_channel_has_link(tmp)) {
+ ncsi_update_tx_channel(ndp, nc->package,
+ tmp, nc);
+ break;
+ }
+ }
+ }
+ }
- return ncsi_process_next_channel(ndp);
+ /* Leave configured channels active in a multi-channel scenario so
+ * AEN events are still received.
+ */
+ return 0;
}
static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index bfc43b28c7a6..31359d5e14ad 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -28,6 +28,29 @@
LIST_HEAD(ncsi_dev_list);
DEFINE_SPINLOCK(ncsi_dev_lock);
+bool ncsi_channel_has_link(struct ncsi_channel *channel)
+{
+ return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
+}
+
+bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
+ struct ncsi_channel *channel)
+{
+ struct ncsi_package *np;
+ struct ncsi_channel *nc;
+
+ NCSI_FOR_EACH_PACKAGE(ndp, np)
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
+ if (nc == channel)
+ continue;
+ if (nc->state == NCSI_CHANNEL_ACTIVE &&
+ ncsi_channel_has_link(nc))
+ return false;
+ }
+
+ return true;
+}
+
static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
{
struct ncsi_dev *nd = &ndp->ndev;
@@ -52,7 +75,7 @@ static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
continue;
}
- if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
+ if (ncsi_channel_has_link(nc)) {
spin_unlock_irqrestore(&nc->lock, flags);
nd->link_up = 1;
goto report;
@@ -113,10 +136,8 @@ static void ncsi_channel_monitor(struct timer_list *t)
default:
netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
nc->id);
- if (!(ndp->flags & NCSI_DEV_HWA)) {
- ncsi_report_link(ndp, true);
- ndp->flags |= NCSI_DEV_RESHUFFLE;
- }
+ ncsi_report_link(ndp, true);
+ ndp->flags |= NCSI_DEV_RESHUFFLE;
ncsi_stop_channel_monitor(nc);
@@ -269,6 +290,7 @@ struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
np->ndp = ndp;
spin_lock_init(&np->lock);
INIT_LIST_HEAD(&np->channels);
+ np->channel_whitelist = UINT_MAX;
spin_lock_irqsave(&ndp->lock, flags);
tmp = ncsi_find_package(ndp, id);
@@ -442,12 +464,14 @@ static void ncsi_request_timeout(struct timer_list *t)
static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
- struct ncsi_package *np = ndp->active_package;
- struct ncsi_channel *nc = ndp->active_channel;
+ struct ncsi_package *np;
+ struct ncsi_channel *nc, *tmp;
struct ncsi_cmd_arg nca;
unsigned long flags;
int ret;
+ np = ndp->active_package;
+ nc = ndp->active_channel;
nca.ndp = ndp;
nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
switch (nd->state) {
@@ -523,6 +547,15 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
if (ret)
goto error;
+ NCSI_FOR_EACH_CHANNEL(np, tmp) {
+ /* If there is another channel active on this package
+ * do not deselect the package.
+ */
+ if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
+ nd->state = ncsi_dev_state_suspend_done;
+ break;
+ }
+ }
break;
case ncsi_dev_state_suspend_deselect:
ndp->pending_req_num = 1;
@@ -541,8 +574,10 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
- ncsi_process_next_channel(ndp);
-
+ if (ndp->flags & NCSI_DEV_RESET)
+ ncsi_reset_dev(nd);
+ else
+ ncsi_process_next_channel(ndp);
break;
default:
netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
@@ -675,12 +710,38 @@ static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
return ret;
}
+static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
+{
+ union {
+ u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
+ u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
+ } u;
+ int ret = 0;
+
+ nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
+
+ memset(&u, 0, sizeof(u));
+ u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
+ u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
+ u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
+
+ nca->data = u.data_u8;
+
+ ret = ncsi_xmit_cmd(nca);
+ if (ret)
+ netdev_err(nca->ndp->ndev.dev,
+ "NCSI: Failed to transmit cmd 0x%x during configure\n",
+ nca->type);
+ return ret;
+}
+
/* OEM Command handlers initialization */
static struct ncsi_oem_gma_handler {
unsigned int mfr_id;
int (*handler)(struct ncsi_cmd_arg *nca);
} ncsi_oem_gma_handlers[] = {
- { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm }
+ { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
+ { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
};
static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
@@ -717,13 +778,144 @@ static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
#endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
+/* Determine if a given channel from the channel_queue should be used for Tx */
+static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
+ struct ncsi_channel *nc)
+{
+ struct ncsi_channel_mode *ncm;
+ struct ncsi_channel *channel;
+ struct ncsi_package *np;
+
+ /* Check if any other channel has Tx enabled; a channel may have already
+ * been configured and removed from the channel queue.
+ */
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ if (!ndp->multi_package && np != nc->package)
+ continue;
+ NCSI_FOR_EACH_CHANNEL(np, channel) {
+ ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
+ if (ncm->enable)
+ return false;
+ }
+ }
+
+ /* This channel is the preferred channel and has link */
+ list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
+ np = channel->package;
+ if (np->preferred_channel &&
+ ncsi_channel_has_link(np->preferred_channel)) {
+ return np->preferred_channel == nc;
+ }
+ }
+
+ /* This channel has link */
+ if (ncsi_channel_has_link(nc))
+ return true;
+
+ list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
+ if (ncsi_channel_has_link(channel))
+ return false;
+
+ /* No other channel has link; default to this one */
+ return true;
+}
+
+/* Change the active Tx channel in a multi-channel setup */
+int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
+ struct ncsi_package *package,
+ struct ncsi_channel *disable,
+ struct ncsi_channel *enable)
+{
+ struct ncsi_cmd_arg nca;
+ struct ncsi_channel *nc;
+ struct ncsi_package *np;
+ int ret = 0;
+
+ if (!package->multi_channel && !ndp->multi_package)
+ netdev_warn(ndp->ndev.dev,
+ "NCSI: Trying to update Tx channel in single-channel mode\n");
+ nca.ndp = ndp;
+ nca.req_flags = 0;
+
+ /* Find current channel with Tx enabled */
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ if (disable)
+ break;
+ if (!ndp->multi_package && np != package)
+ continue;
+
+ NCSI_FOR_EACH_CHANNEL(np, nc)
+ if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
+ disable = nc;
+ break;
+ }
+ }
+
+ /* Find a suitable channel for Tx */
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ if (enable)
+ break;
+ if (!ndp->multi_package && np != package)
+ continue;
+ if (!(ndp->package_whitelist & (0x1 << np->id)))
+ continue;
+
+ if (np->preferred_channel &&
+ ncsi_channel_has_link(np->preferred_channel)) {
+ enable = np->preferred_channel;
+ break;
+ }
+
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
+ if (!(np->channel_whitelist & 0x1 << nc->id))
+ continue;
+ if (nc->state != NCSI_CHANNEL_ACTIVE)
+ continue;
+ if (ncsi_channel_has_link(nc)) {
+ enable = nc;
+ break;
+ }
+ }
+ }
+
+ if (disable == enable)
+ return -1;
+
+ if (!enable)
+ return -1;
+
+ if (disable) {
+ nca.channel = disable->id;
+ nca.package = disable->package->id;
+ nca.type = NCSI_PKT_CMD_DCNT;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ netdev_err(ndp->ndev.dev,
+ "Error %d sending DCNT\n",
+ ret);
+ }
+
+ netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
+
+ nca.channel = enable->id;
+ nca.package = enable->package->id;
+ nca.type = NCSI_PKT_CMD_ECNT;
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ netdev_err(ndp->ndev.dev,
+ "Error %d sending ECNT\n",
+ ret);
+
+ return ret;
+}
+
static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
{
- struct ncsi_dev *nd = &ndp->ndev;
- struct net_device *dev = nd->dev;
struct ncsi_package *np = ndp->active_package;
struct ncsi_channel *nc = ndp->active_channel;
struct ncsi_channel *hot_nc = NULL;
+ struct ncsi_dev *nd = &ndp->ndev;
+ struct net_device *dev = nd->dev;
struct ncsi_cmd_arg nca;
unsigned char index;
unsigned long flags;
@@ -845,20 +1037,29 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
} else if (nd->state == ncsi_dev_state_config_ebf) {
nca.type = NCSI_PKT_CMD_EBF;
nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
- nd->state = ncsi_dev_state_config_ecnt;
+ if (ncsi_channel_is_tx(ndp, nc))
+ nd->state = ncsi_dev_state_config_ecnt;
+ else
+ nd->state = ncsi_dev_state_config_ec;
#if IS_ENABLED(CONFIG_IPV6)
if (ndp->inet6_addr_num > 0 &&
(nc->caps[NCSI_CAP_GENERIC].cap &
NCSI_CAP_GENERIC_MC))
nd->state = ncsi_dev_state_config_egmf;
- else
- nd->state = ncsi_dev_state_config_ecnt;
} else if (nd->state == ncsi_dev_state_config_egmf) {
nca.type = NCSI_PKT_CMD_EGMF;
nca.dwords[0] = nc->caps[NCSI_CAP_MC].cap;
- nd->state = ncsi_dev_state_config_ecnt;
+ if (ncsi_channel_is_tx(ndp, nc))
+ nd->state = ncsi_dev_state_config_ecnt;
+ else
+ nd->state = ncsi_dev_state_config_ec;
#endif /* CONFIG_IPV6 */
} else if (nd->state == ncsi_dev_state_config_ecnt) {
+ if (np->preferred_channel &&
+ nc != np->preferred_channel)
+ netdev_info(ndp->ndev.dev,
+ "NCSI: Tx failed over to channel %u\n",
+ nc->id);
nca.type = NCSI_PKT_CMD_ECNT;
nd->state = ncsi_dev_state_config_ec;
} else if (nd->state == ncsi_dev_state_config_ec) {
@@ -889,6 +1090,16 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
nc->id);
spin_lock_irqsave(&nc->lock, flags);
+ nc->state = NCSI_CHANNEL_ACTIVE;
+
+ if (ndp->flags & NCSI_DEV_RESET) {
+ /* A reset event happened during config, start it now */
+ nc->reconfigure_needed = false;
+ spin_unlock_irqrestore(&nc->lock, flags);
+ ncsi_reset_dev(nd);
+ break;
+ }
+
if (nc->reconfigure_needed) {
/* This channel's configuration has been updated
* part-way during the config state - start the
@@ -909,10 +1120,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
hot_nc = nc;
- nc->state = NCSI_CHANNEL_ACTIVE;
} else {
hot_nc = NULL;
- nc->state = NCSI_CHANNEL_INACTIVE;
netdev_dbg(ndp->ndev.dev,
"NCSI: channel %u link down after config\n",
nc->id);
@@ -940,43 +1149,35 @@ error:
static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
{
- struct ncsi_package *np, *force_package;
- struct ncsi_channel *nc, *found, *hot_nc, *force_channel;
+ struct ncsi_channel *nc, *found, *hot_nc;
struct ncsi_channel_mode *ncm;
- unsigned long flags;
+ unsigned long flags, cflags;
+ struct ncsi_package *np;
+ bool with_link;
spin_lock_irqsave(&ndp->lock, flags);
hot_nc = ndp->hot_channel;
- force_channel = ndp->force_channel;
- force_package = ndp->force_package;
spin_unlock_irqrestore(&ndp->lock, flags);
- /* Force a specific channel whether or not it has link if we have been
- * configured to do so
- */
- if (force_package && force_channel) {
- found = force_channel;
- ncm = &found->modes[NCSI_MODE_LINK];
- if (!(ncm->data[2] & 0x1))
- netdev_info(ndp->ndev.dev,
- "NCSI: Channel %u forced, but it is link down\n",
- found->id);
- goto out;
- }
-
- /* The search is done once an inactive channel with up
- * link is found.
+ /* By default the search is done once an inactive channel with up
+ * link is found, unless a preferred channel is set.
+ * If multi_package or multi_channel are configured all channels in the
+ * whitelist are added to the channel queue.
*/
found = NULL;
+ with_link = false;
NCSI_FOR_EACH_PACKAGE(ndp, np) {
- if (ndp->force_package && np != ndp->force_package)
+ if (!(ndp->package_whitelist & (0x1 << np->id)))
continue;
NCSI_FOR_EACH_CHANNEL(np, nc) {
- spin_lock_irqsave(&nc->lock, flags);
+ if (!(np->channel_whitelist & (0x1 << nc->id)))
+ continue;
+
+ spin_lock_irqsave(&nc->lock, cflags);
if (!list_empty(&nc->link) ||
nc->state != NCSI_CHANNEL_INACTIVE) {
- spin_unlock_irqrestore(&nc->lock, flags);
+ spin_unlock_irqrestore(&nc->lock, cflags);
continue;
}
@@ -988,32 +1189,49 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
ncm = &nc->modes[NCSI_MODE_LINK];
if (ncm->data[2] & 0x1) {
- spin_unlock_irqrestore(&nc->lock, flags);
found = nc;
- goto out;
+ with_link = true;
}
- spin_unlock_irqrestore(&nc->lock, flags);
+ /* If multi_channel is enabled configure all valid
+ * channels whether or not they currently have link
+ * so they will have AENs enabled.
+ */
+ if (with_link || np->multi_channel) {
+ spin_lock_irqsave(&ndp->lock, flags);
+ list_add_tail_rcu(&nc->link,
+ &ndp->channel_queue);
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: Channel %u added to queue (link %s)\n",
+ nc->id,
+ ncm->data[2] & 0x1 ? "up" : "down");
+ }
+
+ spin_unlock_irqrestore(&nc->lock, cflags);
+
+ if (with_link && !np->multi_channel)
+ break;
}
+ if (with_link && !ndp->multi_package)
+ break;
}
- if (!found) {
+ if (list_empty(&ndp->channel_queue) && found) {
+ netdev_info(ndp->ndev.dev,
+ "NCSI: No channel with link found, configuring channel %u\n",
+ found->id);
+ spin_lock_irqsave(&ndp->lock, flags);
+ list_add_tail_rcu(&found->link, &ndp->channel_queue);
+ spin_unlock_irqrestore(&ndp->lock, flags);
+ } else if (!found) {
netdev_warn(ndp->ndev.dev,
- "NCSI: No channel found with link\n");
+ "NCSI: No channel found to configure!\n");
ncsi_report_link(ndp, true);
return -ENODEV;
}
- ncm = &found->modes[NCSI_MODE_LINK];
- netdev_dbg(ndp->ndev.dev,
- "NCSI: Channel %u added to queue (link %s)\n",
- found->id, ncm->data[2] & 0x1 ? "up" : "down");
-
-out:
- spin_lock_irqsave(&ndp->lock, flags);
- list_add_tail_rcu(&found->link, &ndp->channel_queue);
- spin_unlock_irqrestore(&ndp->lock, flags);
-
return ncsi_process_next_channel(ndp);
}
@@ -1050,35 +1268,6 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
return false;
}
-static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
-{
- struct ncsi_package *np;
- struct ncsi_channel *nc;
- unsigned long flags;
-
- /* Move all available channels to processing queue */
- spin_lock_irqsave(&ndp->lock, flags);
- NCSI_FOR_EACH_PACKAGE(ndp, np) {
- NCSI_FOR_EACH_CHANNEL(np, nc) {
- WARN_ON_ONCE(nc->state != NCSI_CHANNEL_INACTIVE ||
- !list_empty(&nc->link));
- ncsi_stop_channel_monitor(nc);
- list_add_tail_rcu(&nc->link, &ndp->channel_queue);
- }
- }
- spin_unlock_irqrestore(&ndp->lock, flags);
-
- /* We can have no channels in extremely case */
- if (list_empty(&ndp->channel_queue)) {
- netdev_err(ndp->ndev.dev,
- "NCSI: No available channels for HWA\n");
- ncsi_report_link(ndp, false);
- return -ENOENT;
- }
-
- return ncsi_process_next_channel(ndp);
-}
-
static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
{
struct ncsi_dev *nd = &ndp->ndev;
@@ -1110,70 +1299,28 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
nd->state = ncsi_dev_state_probe_package;
break;
case ncsi_dev_state_probe_package:
- ndp->pending_req_num = 16;
+ ndp->pending_req_num = 1;
- /* Select all possible packages */
nca.type = NCSI_PKT_CMD_SP;
nca.bytes[0] = 1;
+ nca.package = ndp->package_probe_id;
nca.channel = NCSI_RESERVED_CHANNEL;
- for (index = 0; index < 8; index++) {
- nca.package = index;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
-
- /* Disable all possible packages */
- nca.type = NCSI_PKT_CMD_DP;
- for (index = 0; index < 8; index++) {
- nca.package = index;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
- }
-
+ ret = ncsi_xmit_cmd(&nca);
+ if (ret)
+ goto error;
nd->state = ncsi_dev_state_probe_channel;
break;
case ncsi_dev_state_probe_channel:
- if (!ndp->active_package)
- ndp->active_package = list_first_or_null_rcu(
- &ndp->packages, struct ncsi_package, node);
- else if (list_is_last(&ndp->active_package->node,
- &ndp->packages))
- ndp->active_package = NULL;
- else
- ndp->active_package = list_next_entry(
- ndp->active_package, node);
-
- /* All available packages and channels are enumerated. The
- * enumeration happens for once when the NCSI interface is
- * started. So we need continue to start the interface after
- * the enumeration.
- *
- * We have to choose an active channel before configuring it.
- * Note that we possibly don't have active channel in extreme
- * situation.
- */
+ ndp->active_package = ncsi_find_package(ndp,
+ ndp->package_probe_id);
if (!ndp->active_package) {
- ndp->flags |= NCSI_DEV_PROBED;
- if (ncsi_check_hwa(ndp))
- ncsi_enable_hwa(ndp);
- else
- ncsi_choose_active_channel(ndp);
- return;
+ /* No response */
+ nd->state = ncsi_dev_state_probe_dp;
+ schedule_work(&ndp->work);
+ break;
}
-
- /* Select the active package */
- ndp->pending_req_num = 1;
- nca.type = NCSI_PKT_CMD_SP;
- nca.bytes[0] = 1;
- nca.package = ndp->active_package->id;
- nca.channel = NCSI_RESERVED_CHANNEL;
- ret = ncsi_xmit_cmd(&nca);
- if (ret)
- goto error;
-
nd->state = ncsi_dev_state_probe_cis;
+ schedule_work(&ndp->work);
break;
case ncsi_dev_state_probe_cis:
ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
@@ -1222,22 +1369,35 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
case ncsi_dev_state_probe_dp:
ndp->pending_req_num = 1;
- /* Deselect the active package */
+ /* Deselect the current package */
nca.type = NCSI_PKT_CMD_DP;
- nca.package = ndp->active_package->id;
+ nca.package = ndp->package_probe_id;
nca.channel = NCSI_RESERVED_CHANNEL;
ret = ncsi_xmit_cmd(&nca);
if (ret)
goto error;
- /* Scan channels in next package */
- nd->state = ncsi_dev_state_probe_channel;
+ /* Probe next package */
+ ndp->package_probe_id++;
+ if (ndp->package_probe_id >= 8) {
+ /* Probe finished */
+ ndp->flags |= NCSI_DEV_PROBED;
+ break;
+ }
+ nd->state = ncsi_dev_state_probe_package;
+ ndp->active_package = NULL;
break;
default:
netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
nd->state);
}
+ if (ndp->flags & NCSI_DEV_PROBED) {
+ /* Check if all packages have HWA support */
+ ncsi_check_hwa(ndp);
+ ncsi_choose_active_channel(ndp);
+ }
+
return;
error:
netdev_err(ndp->ndev.dev,
@@ -1556,6 +1716,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
INIT_LIST_HEAD(&ndp->channel_queue);
INIT_LIST_HEAD(&ndp->vlan_vids);
INIT_WORK(&ndp->work, ncsi_dev_work);
+ ndp->package_whitelist = UINT_MAX;
/* Initialize private NCSI device */
spin_lock_init(&ndp->lock);
@@ -1592,26 +1753,19 @@ EXPORT_SYMBOL_GPL(ncsi_register_dev);
int ncsi_start_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
- int ret;
if (nd->state != ncsi_dev_state_registered &&
nd->state != ncsi_dev_state_functional)
return -ENOTTY;
if (!(ndp->flags & NCSI_DEV_PROBED)) {
+ ndp->package_probe_id = 0;
nd->state = ncsi_dev_state_probe;
schedule_work(&ndp->work);
return 0;
}
- if (ndp->flags & NCSI_DEV_HWA) {
- netdev_info(ndp->ndev.dev, "NCSI: Enabling HWA mode\n");
- ret = ncsi_enable_hwa(ndp);
- } else {
- ret = ncsi_choose_active_channel(ndp);
- }
-
- return ret;
+ return ncsi_reset_dev(nd);
}
EXPORT_SYMBOL_GPL(ncsi_start_dev);
@@ -1624,7 +1778,10 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
int old_state;
unsigned long flags;
- /* Stop the channel monitor and reset channel's state */
+ /* Stop the channel monitor on any active channels. Don't reset the
+ * channel state so we know which were active when ncsi_start_dev()
+ * is next called.
+ */
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
ncsi_stop_channel_monitor(nc);
@@ -1632,7 +1789,6 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
spin_lock_irqsave(&nc->lock, flags);
chained = !list_empty(&nc->link);
old_state = nc->state;
- nc->state = NCSI_CHANNEL_INACTIVE;
spin_unlock_irqrestore(&nc->lock, flags);
WARN_ON_ONCE(chained ||
@@ -1645,6 +1801,92 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
}
EXPORT_SYMBOL_GPL(ncsi_stop_dev);
+int ncsi_reset_dev(struct ncsi_dev *nd)
+{
+ struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
+ struct ncsi_channel *nc, *active, *tmp;
+ struct ncsi_package *np;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ndp->lock, flags);
+
+ if (!(ndp->flags & NCSI_DEV_RESET)) {
+ /* Haven't been called yet, check states */
+ switch (nd->state & ncsi_dev_state_major) {
+ case ncsi_dev_state_registered:
+ case ncsi_dev_state_probe:
+ /* Not even probed yet - do nothing */
+ spin_unlock_irqrestore(&ndp->lock, flags);
+ return 0;
+ case ncsi_dev_state_suspend:
+ case ncsi_dev_state_config:
+ /* Wait for the channel to finish its suspend/config
+ * operation; once it finishes it will check for
+ * NCSI_DEV_RESET and reset the state.
+ */
+ ndp->flags |= NCSI_DEV_RESET;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+ return 0;
+ }
+ } else {
+ switch (nd->state) {
+ case ncsi_dev_state_suspend_done:
+ case ncsi_dev_state_config_done:
+ case ncsi_dev_state_functional:
+ /* Ok */
+ break;
+ default:
+ /* Current reset operation happening */
+ spin_unlock_irqrestore(&ndp->lock, flags);
+ return 0;
+ }
+ }
+
+ if (!list_empty(&ndp->channel_queue)) {
+ /* Clear any channel queue we may have interrupted */
+ list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
+ list_del_init(&nc->link);
+ }
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
+ active = NULL;
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ NCSI_FOR_EACH_CHANNEL(np, nc) {
+ spin_lock_irqsave(&nc->lock, flags);
+
+ if (nc->state == NCSI_CHANNEL_ACTIVE) {
+ active = nc;
+ nc->state = NCSI_CHANNEL_INVISIBLE;
+ spin_unlock_irqrestore(&nc->lock, flags);
+ ncsi_stop_channel_monitor(nc);
+ break;
+ }
+
+ spin_unlock_irqrestore(&nc->lock, flags);
+ }
+ if (active)
+ break;
+ }
+
+ if (!active) {
+ /* Done */
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->flags &= ~NCSI_DEV_RESET;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+ return ncsi_choose_active_channel(ndp);
+ }
+
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->flags |= NCSI_DEV_RESET;
+ ndp->active_channel = active;
+ ndp->active_package = active->package;
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
+ nd->state = ncsi_dev_state_suspend;
+ schedule_work(&ndp->work);
+ return 0;
+}
+
void ncsi_unregister_dev(struct ncsi_dev *nd)
{
struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 33314381b4f5..5d782445d2fc 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -30,6 +30,9 @@ static const struct nla_policy ncsi_genl_policy[NCSI_ATTR_MAX + 1] = {
[NCSI_ATTR_PACKAGE_ID] = { .type = NLA_U32 },
[NCSI_ATTR_CHANNEL_ID] = { .type = NLA_U32 },
[NCSI_ATTR_DATA] = { .type = NLA_BINARY, .len = 2048 },
+ [NCSI_ATTR_MULTI_FLAG] = { .type = NLA_FLAG },
+ [NCSI_ATTR_PACKAGE_MASK] = { .type = NLA_U32 },
+ [NCSI_ATTR_CHANNEL_MASK] = { .type = NLA_U32 },
};
static struct ncsi_dev_priv *ndp_from_ifindex(struct net *net, u32 ifindex)
@@ -69,7 +72,7 @@ static int ncsi_write_channel_info(struct sk_buff *skb,
nla_put_u32(skb, NCSI_CHANNEL_ATTR_LINK_STATE, m->data[2]);
if (nc->state == NCSI_CHANNEL_ACTIVE)
nla_put_flag(skb, NCSI_CHANNEL_ATTR_ACTIVE);
- if (ndp->force_channel == nc)
+ if (nc == nc->package->preferred_channel)
nla_put_flag(skb, NCSI_CHANNEL_ATTR_FORCED);
nla_put_u32(skb, NCSI_CHANNEL_ATTR_VERSION_MAJOR, nc->version.version);
@@ -114,7 +117,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
if (!pnest)
return -ENOMEM;
nla_put_u32(skb, NCSI_PKG_ATTR_ID, np->id);
- if (ndp->force_package == np)
+ if ((0x1 << np->id) == ndp->package_whitelist)
nla_put_flag(skb, NCSI_PKG_ATTR_FORCED);
cnest = nla_nest_start(skb, NCSI_PKG_ATTR_CHANNEL_LIST);
if (!cnest) {
@@ -290,49 +293,58 @@ static int ncsi_set_interface_nl(struct sk_buff *msg, struct genl_info *info)
package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
package = NULL;
- spin_lock_irqsave(&ndp->lock, flags);
-
NCSI_FOR_EACH_PACKAGE(ndp, np)
if (np->id == package_id)
package = np;
if (!package) {
/* The user has set a package that does not exist */
- spin_unlock_irqrestore(&ndp->lock, flags);
return -ERANGE;
}
channel = NULL;
- if (!info->attrs[NCSI_ATTR_CHANNEL_ID]) {
- /* Allow any channel */
- channel_id = NCSI_RESERVED_CHANNEL;
- } else {
+ if (info->attrs[NCSI_ATTR_CHANNEL_ID]) {
channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
NCSI_FOR_EACH_CHANNEL(package, nc)
- if (nc->id == channel_id)
+ if (nc->id == channel_id) {
channel = nc;
+ break;
+ }
+ if (!channel) {
+ netdev_info(ndp->ndev.dev,
+ "NCSI: Channel %u does not exist!\n",
+ channel_id);
+ return -ERANGE;
+ }
}
- if (channel_id != NCSI_RESERVED_CHANNEL && !channel) {
- /* The user has set a channel that does not exist on this
- * package
- */
- spin_unlock_irqrestore(&ndp->lock, flags);
- netdev_info(ndp->ndev.dev, "NCSI: Channel %u does not exist!\n",
- channel_id);
- return -ERANGE;
- }
-
- ndp->force_package = package;
- ndp->force_channel = channel;
+ spin_lock_irqsave(&ndp->lock, flags);
+ ndp->package_whitelist = 0x1 << package->id;
+ ndp->multi_package = false;
spin_unlock_irqrestore(&ndp->lock, flags);
- netdev_info(ndp->ndev.dev, "Set package 0x%x, channel 0x%x%s as preferred\n",
- package_id, channel_id,
- channel_id == NCSI_RESERVED_CHANNEL ? " (any)" : "");
+ spin_lock_irqsave(&package->lock, flags);
+ package->multi_channel = false;
+ if (channel) {
+ package->channel_whitelist = 0x1 << channel->id;
+ package->preferred_channel = channel;
+ } else {
+ /* Allow any channel */
+ package->channel_whitelist = UINT_MAX;
+ package->preferred_channel = NULL;
+ }
+ spin_unlock_irqrestore(&package->lock, flags);
+
+ if (channel)
+ netdev_info(ndp->ndev.dev,
+ "Set package 0x%x, channel 0x%x as preferred\n",
+ package_id, channel_id);
+ else
+ netdev_info(ndp->ndev.dev, "Set package 0x%x as preferred\n",
+ package_id);
- /* Bounce the NCSI channel to set changes */
- ncsi_stop_dev(&ndp->ndev);
- ncsi_start_dev(&ndp->ndev);
+ /* Update channel configuration */
+ if (!(ndp->flags & NCSI_DEV_RESET))
+ ncsi_reset_dev(&ndp->ndev);
return 0;
}
@@ -340,6 +352,7 @@ static int ncsi_set_interface_nl(struct sk_buff *msg, struct genl_info *info)
static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
{
struct ncsi_dev_priv *ndp;
+ struct ncsi_package *np;
unsigned long flags;
if (!info || !info->attrs)
@@ -353,16 +366,24 @@ static int ncsi_clear_interface_nl(struct sk_buff *msg, struct genl_info *info)
if (!ndp)
return -ENODEV;
- /* Clear any override */
+ /* Reset any whitelists and disable multi mode */
spin_lock_irqsave(&ndp->lock, flags);
- ndp->force_package = NULL;
- ndp->force_channel = NULL;
+ ndp->package_whitelist = UINT_MAX;
+ ndp->multi_package = false;
spin_unlock_irqrestore(&ndp->lock, flags);
+
+ NCSI_FOR_EACH_PACKAGE(ndp, np) {
+ spin_lock_irqsave(&np->lock, flags);
+ np->multi_channel = false;
+ np->channel_whitelist = UINT_MAX;
+ np->preferred_channel = NULL;
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
netdev_info(ndp->ndev.dev, "NCSI: Cleared preferred package/channel\n");
- /* Bounce the NCSI channel to set changes */
- ncsi_stop_dev(&ndp->ndev);
- ncsi_start_dev(&ndp->ndev);
+ /* Update channel configuration */
+ if (!(ndp->flags & NCSI_DEV_RESET))
+ ncsi_reset_dev(&ndp->ndev);
return 0;
}
@@ -563,6 +584,138 @@ int ncsi_send_netlink_err(struct net_device *dev,
return nlmsg_unicast(net->genl_sock, skb, snd_portid);
}
+static int ncsi_set_package_mask_nl(struct sk_buff *msg,
+ struct genl_info *info)
+{
+ struct ncsi_dev_priv *ndp;
+ unsigned long flags;
+ int rc;
+
+ if (!info || !info->attrs)
+ return -EINVAL;
+
+ if (!info->attrs[NCSI_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (!info->attrs[NCSI_ATTR_PACKAGE_MASK])
+ return -EINVAL;
+
+ ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
+ nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
+ if (!ndp)
+ return -ENODEV;
+
+ spin_lock_irqsave(&ndp->lock, flags);
+ if (nla_get_flag(info->attrs[NCSI_ATTR_MULTI_FLAG])) {
+ if (ndp->flags & NCSI_DEV_HWA) {
+ ndp->multi_package = true;
+ rc = 0;
+ } else {
+ netdev_err(ndp->ndev.dev,
+ "NCSI: Can't use multiple packages without HWA\n");
+ rc = -EPERM;
+ }
+ } else {
+ ndp->multi_package = false;
+ rc = 0;
+ }
+
+ if (!rc)
+ ndp->package_whitelist =
+ nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_MASK]);
+ spin_unlock_irqrestore(&ndp->lock, flags);
+
+ if (!rc) {
+ /* Update channel configuration */
+ if (!(ndp->flags & NCSI_DEV_RESET))
+ ncsi_reset_dev(&ndp->ndev);
+ }
+
+ return rc;
+}
+
+static int ncsi_set_channel_mask_nl(struct sk_buff *msg,
+ struct genl_info *info)
+{
+ struct ncsi_package *np, *package;
+ struct ncsi_channel *nc, *channel;
+ u32 package_id, channel_id;
+ struct ncsi_dev_priv *ndp;
+ unsigned long flags;
+
+ if (!info || !info->attrs)
+ return -EINVAL;
+
+ if (!info->attrs[NCSI_ATTR_IFINDEX])
+ return -EINVAL;
+
+ if (!info->attrs[NCSI_ATTR_PACKAGE_ID])
+ return -EINVAL;
+
+ if (!info->attrs[NCSI_ATTR_CHANNEL_MASK])
+ return -EINVAL;
+
+ ndp = ndp_from_ifindex(get_net(sock_net(msg->sk)),
+ nla_get_u32(info->attrs[NCSI_ATTR_IFINDEX]));
+ if (!ndp)
+ return -ENODEV;
+
+ package_id = nla_get_u32(info->attrs[NCSI_ATTR_PACKAGE_ID]);
+ package = NULL;
+ NCSI_FOR_EACH_PACKAGE(ndp, np)
+ if (np->id == package_id) {
+ package = np;
+ break;
+ }
+ if (!package)
+ return -ERANGE;
+
+ spin_lock_irqsave(&package->lock, flags);
+
+ channel = NULL;
+ if (info->attrs[NCSI_ATTR_CHANNEL_ID]) {
+ channel_id = nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_ID]);
+ NCSI_FOR_EACH_CHANNEL(np, nc)
+ if (nc->id == channel_id) {
+ channel = nc;
+ break;
+ }
+ if (!channel) {
+ spin_unlock_irqrestore(&package->lock, flags);
+ return -ERANGE;
+ }
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: Channel %u set as preferred channel\n",
+ channel->id);
+ }
+
+ package->channel_whitelist =
+ nla_get_u32(info->attrs[NCSI_ATTR_CHANNEL_MASK]);
+ if (package->channel_whitelist == 0)
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: Package %u set to all channels disabled\n",
+ package->id);
+
+ package->preferred_channel = channel;
+
+ if (nla_get_flag(info->attrs[NCSI_ATTR_MULTI_FLAG])) {
+ package->multi_channel = true;
+ netdev_info(ndp->ndev.dev,
+ "NCSI: Multi-channel enabled on package %u\n",
+ package_id);
+ } else {
+ package->multi_channel = false;
+ }
+
+ spin_unlock_irqrestore(&package->lock, flags);
+
+ /* Update channel configuration */
+ if (!(ndp->flags & NCSI_DEV_RESET))
+ ncsi_reset_dev(&ndp->ndev);
+
+ return 0;
+}
+
static const struct genl_ops ncsi_ops[] = {
{
.cmd = NCSI_CMD_PKG_INFO,
@@ -589,6 +742,18 @@ static const struct genl_ops ncsi_ops[] = {
.doit = ncsi_send_cmd_nl,
.flags = GENL_ADMIN_PERM,
},
+ {
+ .cmd = NCSI_CMD_SET_PACKAGE_MASK,
+ .policy = ncsi_genl_policy,
+ .doit = ncsi_set_package_mask_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NCSI_CMD_SET_CHANNEL_MASK,
+ .policy = ncsi_genl_policy,
+ .doit = ncsi_set_channel_mask_nl,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_family ncsi_genl_family __ro_after_init = {
diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h
index 4d3f06be38bd..2a6d83a596c9 100644
--- a/net/ncsi/ncsi-pkt.h
+++ b/net/ncsi/ncsi-pkt.h
@@ -165,6 +165,15 @@ struct ncsi_rsp_oem_pkt {
unsigned char data[]; /* Payload data */
};
+/* Mellanox Response Data */
+struct ncsi_rsp_oem_mlx_pkt {
+ unsigned char cmd_rev; /* Command Revision */
+ unsigned char cmd; /* Command ID */
+ unsigned char param; /* Parameter */
+ unsigned char optional; /* Optional data */
+ unsigned char data[]; /* Data */
+};
+
/* Broadcom Response Data */
struct ncsi_rsp_oem_bcm_pkt {
unsigned char ver; /* Payload Version */
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 77e07ba3f493..dc07fcc7938e 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -256,7 +256,7 @@ static int ncsi_rsp_handler_dcnt(struct ncsi_request *nr)
if (!ncm->enable)
return 0;
- ncm->enable = 1;
+ ncm->enable = 0;
return 0;
}
@@ -611,6 +611,45 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr)
return 0;
}
+/* Response handler for Mellanox command Get Mac Address */
+static int ncsi_rsp_handler_oem_mlx_gma(struct ncsi_request *nr)
+{
+ struct ncsi_dev_priv *ndp = nr->ndp;
+ struct net_device *ndev = ndp->ndev.dev;
+ const struct net_device_ops *ops = ndev->netdev_ops;
+ struct ncsi_rsp_oem_pkt *rsp;
+ struct sockaddr saddr;
+ int ret = 0;
+
+ /* Get the response header */
+ rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+
+ saddr.sa_family = ndev->type;
+ ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ memcpy(saddr.sa_data, &rsp->data[MLX_MAC_ADDR_OFFSET], ETH_ALEN);
+ ret = ops->ndo_set_mac_address(ndev, &saddr);
+ if (ret < 0)
+ netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
+
+ return ret;
+}
+
+/* Response handler for Mellanox card */
+static int ncsi_rsp_handler_oem_mlx(struct ncsi_request *nr)
+{
+ struct ncsi_rsp_oem_mlx_pkt *mlx;
+ struct ncsi_rsp_oem_pkt *rsp;
+
+ /* Get the response header */
+ rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp);
+ mlx = (struct ncsi_rsp_oem_mlx_pkt *)(rsp->data);
+
+ if (mlx->cmd == NCSI_OEM_MLX_CMD_GMA &&
+ mlx->param == NCSI_OEM_MLX_CMD_GMA_PARAM)
+ return ncsi_rsp_handler_oem_mlx_gma(nr);
+ return 0;
+}
+
/* Response handler for Broadcom command Get Mac Address */
static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
{
@@ -655,7 +694,7 @@ static struct ncsi_rsp_oem_handler {
unsigned int mfr_id;
int (*handler)(struct ncsi_request *nr);
} ncsi_rsp_oem_handlers[] = {
- { NCSI_OEM_MFR_MLX_ID, NULL },
+ { NCSI_OEM_MFR_MLX_ID, ncsi_rsp_handler_oem_mlx },
{ NCSI_OEM_MFR_BCM_ID, ncsi_rsp_handler_oem_bcm }
};
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 2ab870ef233a..beb3a69ce1d4 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -403,21 +403,6 @@ config NF_NAT_NEEDED
depends on NF_NAT
default y
-config NF_NAT_PROTO_DCCP
- bool
- depends on NF_NAT && NF_CT_PROTO_DCCP
- default NF_NAT && NF_CT_PROTO_DCCP
-
-config NF_NAT_PROTO_UDPLITE
- bool
- depends on NF_NAT && NF_CT_PROTO_UDPLITE
- default NF_NAT && NF_CT_PROTO_UDPLITE
-
-config NF_NAT_PROTO_SCTP
- bool
- default NF_NAT && NF_CT_PROTO_SCTP
- depends on NF_NAT && NF_CT_PROTO_SCTP
-
config NF_NAT_AMANDA
tristate
depends on NF_CONNTRACK && NF_NAT
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4ddf3ef51ece..1ae65a314d7a 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -47,12 +47,7 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
-nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
- nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
-
-# NAT protocols (nf_nat)
-nf_nat-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
-nf_nat-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
+nf_nat-y := nf_nat_core.o nf_nat_proto.o nf_nat_helper.o
# generic transport layer logging
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index c00b6a2e8e3c..980000fc3b50 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -219,10 +219,6 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
- /* MAC can be src only */
- if (!(opt->flags & IPSET_DIM_TWO_SRC))
- return 0;
-
ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
if (ip < map->first_ip || ip > map->last_ip)
return -IPSET_ERR_BITMAP_RANGE;
@@ -233,7 +229,14 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
return -EINVAL;
e.id = ip_to_id(map, ip);
- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+
+ if (opt->flags & IPSET_DIM_ONE_SRC)
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ else
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
+ if (is_zero_ether_addr(e.ether))
+ return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
}
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 1577f2f76060..45a257695bef 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -771,11 +771,21 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
* The commands are serialized by the nfnl mutex.
*/
+static inline u8 protocol(const struct nlattr * const tb[])
+{
+ return nla_get_u8(tb[IPSET_ATTR_PROTOCOL]);
+}
+
static inline bool
protocol_failed(const struct nlattr * const tb[])
{
- return !tb[IPSET_ATTR_PROTOCOL] ||
- nla_get_u8(tb[IPSET_ATTR_PROTOCOL]) != IPSET_PROTOCOL;
+ return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) != IPSET_PROTOCOL;
+}
+
+static inline bool
+protocol_min_failed(const struct nlattr * const tb[])
+{
+ return !tb[IPSET_ATTR_PROTOCOL] || protocol(tb) < IPSET_PROTOCOL_MIN;
}
static inline u32
@@ -889,7 +899,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
u32 flags = flag_exist(nlh);
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_TYPENAME] ||
!attr[IPSET_ATTR_REVISION] ||
@@ -1027,7 +1037,7 @@ static int ip_set_destroy(struct net *net, struct sock *ctnl,
ip_set_id_t i;
int ret = 0;
- if (unlikely(protocol_failed(attr)))
+ if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
/* Must wait for flush to be really finished in list:set */
@@ -1105,7 +1115,7 @@ static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
struct ip_set *s;
ip_set_id_t i;
- if (unlikely(protocol_failed(attr)))
+ if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
if (!attr[IPSET_ATTR_SETNAME]) {
@@ -1147,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
ip_set_id_t i;
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_SETNAME2]))
return -IPSET_ERR_PROTOCOL;
@@ -1196,7 +1206,7 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb,
ip_set_id_t from_id, to_id;
char from_name[IPSET_MAXNAMELEN];
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_SETNAME2]))
return -IPSET_ERR_PROTOCOL;
@@ -1291,6 +1301,7 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
nla_parse(cda, IPSET_ATTR_CMD_MAX, attr, nlh->nlmsg_len - min_len,
ip_set_setname_policy, NULL);
+ cb->args[IPSET_CB_PROTO] = nla_get_u8(cda[IPSET_ATTR_PROTOCOL]);
if (cda[IPSET_ATTR_SETNAME]) {
struct ip_set *set;
@@ -1392,7 +1403,8 @@ dump_last:
ret = -EMSGSIZE;
goto release_refcount;
}
- if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+ if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL,
+ cb->args[IPSET_CB_PROTO]) ||
nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
goto nla_put_failure;
if (dump_flags & IPSET_FLAG_LIST_SETNAME)
@@ -1407,6 +1419,9 @@ dump_last:
nla_put_u8(skb, IPSET_ATTR_REVISION,
set->revision))
goto nla_put_failure;
+ if (cb->args[IPSET_CB_PROTO] > IPSET_PROTOCOL_MIN &&
+ nla_put_net16(skb, IPSET_ATTR_INDEX, htons(index)))
+ goto nla_put_failure;
ret = set->variant->head(set, skb);
if (ret < 0)
goto release_refcount;
@@ -1466,7 +1481,7 @@ static int ip_set_dump(struct net *net, struct sock *ctnl, struct sk_buff *skb,
const struct nlattr * const attr[],
struct netlink_ext_ack *extack)
{
- if (unlikely(protocol_failed(attr)))
+ if (unlikely(protocol_min_failed(attr)))
return -IPSET_ERR_PROTOCOL;
{
@@ -1560,7 +1575,7 @@ static int ip_set_uadd(struct net *net, struct sock *ctnl, struct sk_buff *skb,
bool use_lineno;
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!((attr[IPSET_ATTR_DATA] != NULL) ^
(attr[IPSET_ATTR_ADT] != NULL)) ||
@@ -1615,7 +1630,7 @@ static int ip_set_udel(struct net *net, struct sock *ctnl, struct sk_buff *skb,
bool use_lineno;
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!((attr[IPSET_ATTR_DATA] != NULL) ^
(attr[IPSET_ATTR_ADT] != NULL)) ||
@@ -1667,7 +1682,7 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {};
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME] ||
!attr[IPSET_ATTR_DATA] ||
!flag_nested(attr[IPSET_ATTR_DATA])))
@@ -1704,7 +1719,7 @@ static int ip_set_header(struct net *net, struct sock *ctnl,
struct nlmsghdr *nlh2;
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_SETNAME]))
return -IPSET_ERR_PROTOCOL;
@@ -1720,7 +1735,7 @@ static int ip_set_header(struct net *net, struct sock *ctnl,
IPSET_CMD_HEADER);
if (!nlh2)
goto nlmsg_failure;
- if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
@@ -1761,7 +1776,7 @@ static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
const char *typename;
int ret = 0;
- if (unlikely(protocol_failed(attr) ||
+ if (unlikely(protocol_min_failed(attr) ||
!attr[IPSET_ATTR_TYPENAME] ||
!attr[IPSET_ATTR_FAMILY]))
return -IPSET_ERR_PROTOCOL;
@@ -1780,7 +1795,7 @@ static int ip_set_type(struct net *net, struct sock *ctnl, struct sk_buff *skb,
IPSET_CMD_TYPE);
if (!nlh2)
goto nlmsg_failure;
- if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
@@ -1831,6 +1846,111 @@ static int ip_set_protocol(struct net *net, struct sock *ctnl,
goto nlmsg_failure;
if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
goto nla_put_failure;
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL_MIN, IPSET_PROTOCOL_MIN))
+ goto nla_put_failure;
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+/* Get set by name or index, from userspace */
+
+static int ip_set_byname(struct net *net, struct sock *ctnl,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[],
+ struct netlink_ext_ack *extack)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ ip_set_id_t id = IPSET_INVALID_ID;
+ const struct ip_set *set;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ !attr[IPSET_ATTR_SETNAME]))
+ return -IPSET_ERR_PROTOCOL;
+
+ set = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), &id);
+ if (id == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb2)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_GET_BYNAME);
+ if (!nlh2)
+ goto nlmsg_failure;
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
+ nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
+ nla_put_net16(skb2, IPSET_ATTR_INDEX, htons(id)))
+ goto nla_put_failure;
+ nlmsg_end(skb2, nlh2);
+
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb2, nlh2);
+nlmsg_failure:
+ kfree_skb(skb2);
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy ip_set_index_policy[IPSET_ATTR_CMD_MAX + 1] = {
+ [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
+ [IPSET_ATTR_INDEX] = { .type = NLA_U16 },
+};
+
+static int ip_set_byindex(struct net *net, struct sock *ctnl,
+ struct sk_buff *skb, const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[],
+ struct netlink_ext_ack *extack)
+{
+ struct ip_set_net *inst = ip_set_pernet(net);
+ struct sk_buff *skb2;
+ struct nlmsghdr *nlh2;
+ ip_set_id_t id = IPSET_INVALID_ID;
+ const struct ip_set *set;
+ int ret = 0;
+
+ if (unlikely(protocol_failed(attr) ||
+ !attr[IPSET_ATTR_INDEX]))
+ return -IPSET_ERR_PROTOCOL;
+
+ id = ip_set_get_h16(attr[IPSET_ATTR_INDEX]);
+ if (id >= inst->ip_set_max)
+ return -ENOENT;
+ set = ip_set(inst, id);
+ if (set == NULL)
+ return -ENOENT;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb2)
+ return -ENOMEM;
+
+ nlh2 = start_msg(skb2, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
+ IPSET_CMD_GET_BYINDEX);
+ if (!nlh2)
+ goto nlmsg_failure;
+ if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, protocol(attr)) ||
+ nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name))
+ goto nla_put_failure;
nlmsg_end(skb2, nlh2);
ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).portid, MSG_DONTWAIT);
@@ -1916,6 +2036,16 @@ static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
.attr_count = IPSET_ATTR_CMD_MAX,
.policy = ip_set_protocol_policy,
},
+ [IPSET_CMD_GET_BYNAME] = {
+ .call = ip_set_byname,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_setname_policy,
+ },
+ [IPSET_CMD_GET_BYINDEX] = {
+ .call = ip_set_byindex,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ .policy = ip_set_index_policy,
+ },
};
static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
@@ -1961,7 +2091,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
goto done;
}
- if (req_version->version != IPSET_PROTOCOL) {
+ if (req_version->version < IPSET_PROTOCOL_MIN) {
ret = -EPROTO;
goto done;
}
@@ -2024,9 +2154,11 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
}
nfnl_lock(NFNL_SUBSYS_IPSET);
set = ip_set(inst, req_get->set.index);
- strncpy(req_get->set.name, set ? set->name : "",
- IPSET_MAXNAMELEN);
+ ret = strscpy(req_get->set.name, set ? set->name : "",
+ IPSET_MAXNAMELEN);
nfnl_unlock(NFNL_SUBSYS_IPSET);
+ if (ret < 0)
+ goto done;
goto copy;
}
default:
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index e287da68d5fa..2c9609929c71 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -67,7 +67,7 @@ tune_ahash_max(u8 curr, u32 multi)
/* A hash bucket */
struct hbucket {
- struct rcu_head rcu; /* for call_rcu_bh */
+ struct rcu_head rcu; /* for call_rcu */
/* Which positions are used in the array */
DECLARE_BITMAP(used, AHASH_MAX_TUNED);
u8 size; /* size of the array */
@@ -664,7 +664,7 @@ retry:
spin_unlock_bh(&set->lock);
/* Give time to other readers of the set */
- synchronize_rcu_bh();
+ synchronize_rcu();
pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
orig->htable_bits, orig, t->htable_bits, t);
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index 1ab5ed2f6839..c830c68142ff 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -36,9 +36,6 @@ MODULE_ALIAS("ip_set_hash:ip,mac");
/* Type specific function prefix */
#define HTYPE hash_ipmac
-/* Zero valued element is not supported */
-static const unsigned char invalid_ether[ETH_ALEN] = { 0 };
-
/* IPv4 variant */
/* Member elements */
@@ -103,8 +100,12 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
- if (ether_addr_equal(e.ether, invalid_ether))
+ if (opt->flags & IPSET_DIM_ONE_SRC)
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ else
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
+ if (is_zero_ether_addr(e.ether))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip);
@@ -140,7 +141,7 @@ hash_ipmac4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
- if (ether_addr_equal(e.ether, invalid_ether))
+ if (is_zero_ether_addr(e.ether))
return -IPSET_ERR_HASH_ELEM;
return adtfn(set, &e, &ext, &ext, flags);
@@ -211,16 +212,16 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
};
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
- /* MAC can be src only */
- if (!(opt->flags & IPSET_DIM_TWO_SRC))
- return 0;
-
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
- memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
- if (ether_addr_equal(e.ether, invalid_ether))
+ if (opt->flags & IPSET_DIM_ONE_SRC)
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ else
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
+ if (is_zero_ether_addr(e.ether))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
@@ -260,7 +261,7 @@ hash_ipmac6_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
- if (ether_addr_equal(e.ether, invalid_ether))
+ if (is_zero_ether_addr(e.ether))
return -IPSET_ERR_HASH_ELEM;
return adtfn(set, &e, &ext, &ext, flags);
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
index f9d5a2a1e3d0..4fe5f243d0a3 100644
--- a/net/netfilter/ipset/ip_set_hash_mac.c
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -81,15 +81,15 @@ hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
- /* MAC can be src only */
- if (!(opt->flags & IPSET_DIM_ONE_SRC))
- return 0;
-
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
- ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ if (opt->flags & IPSET_DIM_ONE_SRC)
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
+ else
+ ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
+
if (is_zero_ether_addr(e.ether))
return -EINVAL;
return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4eef55da0878..8da228da53ae 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -531,8 +531,8 @@ nla_put_failure:
ret = -EMSGSIZE;
} else {
cb->args[IPSET_CB_ARG0] = i;
+ ipset_nest_end(skb, atd);
}
- ipset_nest_end(skb, atd);
out:
rcu_read_unlock();
return ret;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 83395bf6dc35..432141f04af3 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
static struct notifier_block ip_vs_dst_notifier = {
.notifier_call = ip_vs_dst_event,
+#ifdef CONFIG_IP_VS_IPV6
+ .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
+#endif
};
int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index 02ca7df793f5..9cd180bda092 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
struct nf_conntrack_zone zone;
int cpu;
u32 jiffies32;
+ bool dead;
struct rcu_head rcu_head;
};
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
conn->zone = *zone;
conn->cpu = raw_smp_processor_id();
conn->jiffies32 = (u32)jiffies;
- spin_lock(&list->list_lock);
+ conn->dead = false;
+ spin_lock_bh(&list->list_lock);
if (list->dead == true) {
kmem_cache_free(conncount_conn_cachep, conn);
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
return NF_CONNCOUNT_SKIP;
}
list_add_tail(&conn->node, &list->head);
list->count++;
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
return NF_CONNCOUNT_ADDED;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
{
bool free_entry = false;
- spin_lock(&list->list_lock);
+ spin_lock_bh(&list->list_lock);
- if (list->count == 0) {
- spin_unlock(&list->list_lock);
- return free_entry;
+ if (conn->dead) {
+ spin_unlock_bh(&list->list_lock);
+ return free_entry;
}
list->count--;
+ conn->dead = true;
list_del_rcu(&conn->node);
- if (list->count == 0)
+ if (list->count == 0) {
+ list->dead = true;
free_entry = true;
+ }
- spin_unlock(&list->list_lock);
+ spin_unlock_bh(&list->list_lock);
call_rcu(&conn->rcu_head, __conn_free);
return free_entry;
}
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
{
spin_lock_init(&list->list_lock);
INIT_LIST_HEAD(&list->head);
- list->count = 1;
+ list->count = 0;
list->dead = false;
}
EXPORT_SYMBOL_GPL(nf_conncount_list_init);
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
struct nf_conn *found_ct;
unsigned int collected = 0;
bool free_entry = false;
+ bool ret = false;
list_for_each_entry_safe(conn, conn_n, &list->head, node) {
found = find_or_evict(net, list, conn, &free_entry);
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
if (collected > CONNCOUNT_GC_MAX_NODES)
return false;
}
- return false;
+
+ spin_lock_bh(&list->list_lock);
+ if (!list->count) {
+ list->dead = true;
+ ret = true;
+ }
+ spin_unlock_bh(&list->list_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
while (gc_count) {
rbconn = gc_nodes[--gc_count];
spin_lock(&rbconn->list.list_lock);
- if (rbconn->list.count == 0 && rbconn->list.dead == false) {
- rbconn->list.dead = true;
- rb_erase(&rbconn->node, root);
- call_rcu(&rbconn->rcu_head, __tree_nodes_free);
- }
+ rb_erase(&rbconn->node, root);
+ call_rcu(&rbconn->rcu_head, __tree_nodes_free);
spin_unlock(&rbconn->list.list_lock);
}
}
@@ -414,8 +425,9 @@ insert_tree(struct net *net,
nf_conncount_list_init(&rbconn->list);
list_add(&conn->node, &rbconn->list.head);
count = 1;
+ rbconn->list.count = count;
- rb_link_node(&rbconn->node, parent, rbnode);
+ rb_link_node_rcu(&rbconn->node, parent, rbnode);
rb_insert_color(&rbconn->node, root);
out_unlock:
spin_unlock_bh(&nf_conncount_locks[hash % CONNCOUNT_LOCK_SLOTS]);
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index 1d66de5151b2..49e523cc49d0 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -25,102 +25,15 @@ static bool nf_ct_acct __read_mostly;
module_param_named(acct, nf_ct_acct, bool, 0644);
MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
-#ifdef CONFIG_SYSCTL
-static struct ctl_table acct_sysctl_table[] = {
- {
- .procname = "nf_conntrack_acct",
- .data = &init_net.ct.sysctl_acct,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {}
-};
-#endif /* CONFIG_SYSCTL */
-
-unsigned int
-seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
-{
- struct nf_conn_acct *acct;
- struct nf_conn_counter *counter;
-
- acct = nf_conn_acct_find(ct);
- if (!acct)
- return 0;
-
- counter = acct->counter;
- seq_printf(s, "packets=%llu bytes=%llu ",
- (unsigned long long)atomic64_read(&counter[dir].packets),
- (unsigned long long)atomic64_read(&counter[dir].bytes));
-
- return 0;
-};
-EXPORT_SYMBOL_GPL(seq_print_acct);
-
static const struct nf_ct_ext_type acct_extend = {
.len = sizeof(struct nf_conn_acct),
.align = __alignof__(struct nf_conn_acct),
.id = NF_CT_EXT_ACCT,
};
-#ifdef CONFIG_SYSCTL
-static int nf_conntrack_acct_init_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
- GFP_KERNEL);
- if (!table)
- goto out;
-
- table[0].data = &net->ct.sysctl_acct;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
-
- net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
- table);
- if (!net->ct.acct_sysctl_header) {
- pr_err("can't register to sysctl\n");
- goto out_register;
- }
- return 0;
-
-out_register:
- kfree(table);
-out:
- return -ENOMEM;
-}
-
-static void nf_conntrack_acct_fini_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = net->ct.acct_sysctl_header->ctl_table_arg;
- unregister_net_sysctl_table(net->ct.acct_sysctl_header);
- kfree(table);
-}
-#else
-static int nf_conntrack_acct_init_sysctl(struct net *net)
-{
- return 0;
-}
-
-static void nf_conntrack_acct_fini_sysctl(struct net *net)
-{
-}
-#endif
-
-int nf_conntrack_acct_pernet_init(struct net *net)
+void nf_conntrack_acct_pernet_init(struct net *net)
{
net->ct.sysctl_acct = nf_ct_acct;
- return nf_conntrack_acct_init_sysctl(net);
-}
-
-void nf_conntrack_acct_pernet_fini(struct net *net)
-{
- nf_conntrack_acct_fini_sysctl(net);
}
int nf_conntrack_acct_init(void)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index e92e749aff53..e87c21e47efe 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2110,10 +2110,7 @@ i_see_dead_people:
list_for_each_entry(net, net_exit_list, exit_list) {
nf_conntrack_proto_pernet_fini(net);
- nf_conntrack_helper_pernet_fini(net);
nf_conntrack_ecache_pernet_fini(net);
- nf_conntrack_tstamp_pernet_fini(net);
- nf_conntrack_acct_pernet_fini(net);
nf_conntrack_expect_pernet_fini(net);
free_percpu(net->ct.stat);
free_percpu(net->ct.pcpu_lists);
@@ -2410,32 +2407,19 @@ int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_expect_pernet_init(net);
if (ret < 0)
goto err_expect;
- ret = nf_conntrack_acct_pernet_init(net);
- if (ret < 0)
- goto err_acct;
- ret = nf_conntrack_tstamp_pernet_init(net);
- if (ret < 0)
- goto err_tstamp;
- ret = nf_conntrack_ecache_pernet_init(net);
- if (ret < 0)
- goto err_ecache;
- ret = nf_conntrack_helper_pernet_init(net);
- if (ret < 0)
- goto err_helper;
+
+ nf_conntrack_acct_pernet_init(net);
+ nf_conntrack_tstamp_pernet_init(net);
+ nf_conntrack_ecache_pernet_init(net);
+ nf_conntrack_helper_pernet_init(net);
+
ret = nf_conntrack_proto_pernet_init(net);
if (ret < 0)
goto err_proto;
return 0;
err_proto:
- nf_conntrack_helper_pernet_fini(net);
-err_helper:
nf_conntrack_ecache_pernet_fini(net);
-err_ecache:
- nf_conntrack_tstamp_pernet_fini(net);
-err_tstamp:
- nf_conntrack_acct_pernet_fini(net);
-err_acct:
nf_conntrack_expect_pernet_fini(net);
err_expect:
free_percpu(net->ct.stat);
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index c11822a7d2bf..3d042f8ff183 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -336,85 +336,21 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
#define NF_CT_EVENTS_DEFAULT 1
static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
-#ifdef CONFIG_SYSCTL
-static struct ctl_table event_sysctl_table[] = {
- {
- .procname = "nf_conntrack_events",
- .data = &init_net.ct.sysctl_events,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {}
-};
-#endif /* CONFIG_SYSCTL */
-
static const struct nf_ct_ext_type event_extend = {
.len = sizeof(struct nf_conntrack_ecache),
.align = __alignof__(struct nf_conntrack_ecache),
.id = NF_CT_EXT_ECACHE,
};
-#ifdef CONFIG_SYSCTL
-static int nf_conntrack_event_init_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
- GFP_KERNEL);
- if (!table)
- goto out;
-
- table[0].data = &net->ct.sysctl_events;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
-
- net->ct.event_sysctl_header =
- register_net_sysctl(net, "net/netfilter", table);
- if (!net->ct.event_sysctl_header) {
- pr_err("can't register to sysctl\n");
- goto out_register;
- }
- return 0;
-
-out_register:
- kfree(table);
-out:
- return -ENOMEM;
-}
-
-static void nf_conntrack_event_fini_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = net->ct.event_sysctl_header->ctl_table_arg;
- unregister_net_sysctl_table(net->ct.event_sysctl_header);
- kfree(table);
-}
-#else
-static int nf_conntrack_event_init_sysctl(struct net *net)
-{
- return 0;
-}
-
-static void nf_conntrack_event_fini_sysctl(struct net *net)
-{
-}
-#endif /* CONFIG_SYSCTL */
-
-int nf_conntrack_ecache_pernet_init(struct net *net)
+void nf_conntrack_ecache_pernet_init(struct net *net)
{
net->ct.sysctl_events = nf_ct_events;
INIT_DELAYED_WORK(&net->ct.ecache_dwork, ecache_work);
- return nf_conntrack_event_init_sysctl(net);
}
void nf_conntrack_ecache_pernet_fini(struct net *net)
{
cancel_delayed_work_sync(&net->ct.ecache_dwork);
- nf_conntrack_event_fini_sysctl(net);
}
int nf_conntrack_ecache_init(void)
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index e24b762ffa1d..274baf1dab87 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -42,67 +42,6 @@ module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
MODULE_PARM_DESC(nf_conntrack_helper,
"Enable automatic conntrack helper assignment (default 0)");
-#ifdef CONFIG_SYSCTL
-static struct ctl_table helper_sysctl_table[] = {
- {
- .procname = "nf_conntrack_helper",
- .data = &init_net.ct.sysctl_auto_assign_helper,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {}
-};
-
-static int nf_conntrack_helper_init_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
- GFP_KERNEL);
- if (!table)
- goto out;
-
- table[0].data = &net->ct.sysctl_auto_assign_helper;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
-
- net->ct.helper_sysctl_header =
- register_net_sysctl(net, "net/netfilter", table);
-
- if (!net->ct.helper_sysctl_header) {
- pr_err("nf_conntrack_helper: can't register to sysctl.\n");
- goto out_register;
- }
- return 0;
-
-out_register:
- kfree(table);
-out:
- return -ENOMEM;
-}
-
-static void nf_conntrack_helper_fini_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = net->ct.helper_sysctl_header->ctl_table_arg;
- unregister_net_sysctl_table(net->ct.helper_sysctl_header);
- kfree(table);
-}
-#else
-static int nf_conntrack_helper_init_sysctl(struct net *net)
-{
- return 0;
-}
-
-static void nf_conntrack_helper_fini_sysctl(struct net *net)
-{
-}
-#endif /* CONFIG_SYSCTL */
-
/* Stupid hash, but collision free for the default registrations of the
* helpers currently in the kernel. */
static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
@@ -533,16 +472,10 @@ static const struct nf_ct_ext_type helper_extend = {
.id = NF_CT_EXT_HELPER,
};
-int nf_conntrack_helper_pernet_init(struct net *net)
+void nf_conntrack_helper_pernet_init(struct net *net)
{
net->ct.auto_assign_helper_warned = false;
net->ct.sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
- return nf_conntrack_helper_init_sysctl(net);
-}
-
-void nf_conntrack_helper_pernet_fini(struct net *net)
-{
- nf_conntrack_helper_fini_sysctl(net);
}
int nf_conntrack_helper_init(void)
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4ae8e528943a..1213beb5a714 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -47,7 +47,6 @@
#include <net/netfilter/nf_conntrack_synproxy.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l4proto.h>
#include <net/netfilter/nf_nat_helper.h>
#endif
@@ -1688,6 +1687,22 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
return 0;
}
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+static void ctnetlink_change_mark(struct nf_conn *ct,
+ const struct nlattr * const cda[])
+{
+ u32 mark, newmark, mask = 0;
+
+ if (cda[CTA_MARK_MASK])
+ mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+
+ mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+ newmark = (ct->mark & mask) ^ mark;
+ if (newmark != ct->mark)
+ ct->mark = newmark;
+}
+#endif
+
static const struct nla_policy protoinfo_policy[CTA_PROTOINFO_MAX+1] = {
[CTA_PROTOINFO_TCP] = { .type = NLA_NESTED },
[CTA_PROTOINFO_DCCP] = { .type = NLA_NESTED },
@@ -1883,7 +1898,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
- ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+ ctnetlink_change_mark(ct, cda);
#endif
if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
@@ -2027,7 +2042,7 @@ ctnetlink_create_conntrack(struct net *net,
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK])
- ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+ ctnetlink_change_mark(ct, cda);
#endif
/* setup master conntrack: this is a confirmed expectation */
@@ -2524,14 +2539,7 @@ ctnetlink_glue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
}
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (cda[CTA_MARK]) {
- u32 mask = 0, mark, newmark;
- if (cda[CTA_MARK_MASK])
- mask = ~ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
-
- mark = ntohl(nla_get_be32(cda[CTA_MARK]));
- newmark = (ct->mark & mask) ^ mark;
- if (newmark != ct->mark)
- ct->mark = newmark;
+ ctnetlink_change_mark(ct, cda);
}
#endif
return 0;
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 40643af7137e..859f5d07a915 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -175,8 +175,7 @@ static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
static
int nf_ct_l4proto_register_sysctl(struct net *net,
- struct nf_proto_net *pn,
- const struct nf_conntrack_l4proto *l4proto)
+ struct nf_proto_net *pn)
{
int err = 0;
@@ -198,9 +197,7 @@ int nf_ct_l4proto_register_sysctl(struct net *net,
}
static
-void nf_ct_l4proto_unregister_sysctl(struct net *net,
- struct nf_proto_net *pn,
- const struct nf_conntrack_l4proto *l4proto)
+void nf_ct_l4proto_unregister_sysctl(struct nf_proto_net *pn)
{
#ifdef CONFIG_SYSCTL
if (pn->ctl_table_header != NULL)
@@ -252,7 +249,7 @@ int nf_ct_l4proto_pernet_register_one(struct net *net,
if (pn == NULL)
goto out;
- ret = nf_ct_l4proto_register_sysctl(net, pn, l4proto);
+ ret = nf_ct_l4proto_register_sysctl(net, pn);
if (ret < 0)
goto out;
@@ -296,7 +293,7 @@ void nf_ct_l4proto_pernet_unregister_one(struct net *net,
return;
pn->users--;
- nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
+ nf_ct_l4proto_unregister_sysctl(pn);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
@@ -946,16 +943,14 @@ int nf_conntrack_proto_pernet_init(struct net *net)
if (err < 0)
return err;
err = nf_ct_l4proto_register_sysctl(net,
- pn,
- &nf_conntrack_l4proto_generic);
+ pn);
if (err < 0)
return err;
err = nf_ct_l4proto_pernet_register(net, builtin_l4proto,
ARRAY_SIZE(builtin_l4proto));
if (err < 0) {
- nf_ct_l4proto_unregister_sysctl(net, pn,
- &nf_conntrack_l4proto_generic);
+ nf_ct_l4proto_unregister_sysctl(pn);
return err;
}
@@ -971,9 +966,7 @@ void nf_conntrack_proto_pernet_fini(struct net *net)
nf_ct_l4proto_pernet_unregister(net, builtin_l4proto,
ARRAY_SIZE(builtin_l4proto));
pn->users--;
- nf_ct_l4proto_unregister_sysctl(net,
- pn,
- &nf_conntrack_l4proto_generic);
+ nf_ct_l4proto_unregister_sysctl(pn);
}
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 9b48dc8b4b88..8899b51aad44 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -43,24 +43,12 @@
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
-enum grep_conntrack {
- GRE_CT_UNREPLIED,
- GRE_CT_REPLIED,
- GRE_CT_MAX
-};
-
static const unsigned int gre_timeouts[GRE_CT_MAX] = {
[GRE_CT_UNREPLIED] = 30*HZ,
[GRE_CT_REPLIED] = 180*HZ,
};
static unsigned int proto_gre_net_id __read_mostly;
-struct netns_proto_gre {
- struct nf_proto_net nf;
- rwlock_t keymap_lock;
- struct list_head keymap_list;
- unsigned int gre_timeouts[GRE_CT_MAX];
-};
static inline struct netns_proto_gre *gre_pernet(struct net *net)
{
@@ -332,9 +320,49 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
};
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+#ifdef CONFIG_SYSCTL
+static struct ctl_table gre_sysctl_table[] = {
+ {
+ .procname = "nf_conntrack_gre_timeout",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "nf_conntrack_gre_timeout_stream",
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {}
+};
+#endif
+
+static int gre_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *nf,
+ struct netns_proto_gre *net_gre)
+{
+#ifdef CONFIG_SYSCTL
+ int i;
+
+ if (nf->ctl_table)
+ return 0;
+
+ nf->ctl_table = kmemdup(gre_sysctl_table,
+ sizeof(gre_sysctl_table),
+ GFP_KERNEL);
+ if (!nf->ctl_table)
+ return -ENOMEM;
+
+ for (i = 0; i < GRE_CT_MAX; i++)
+ nf->ctl_table[i].data = &net_gre->gre_timeouts[i];
+#endif
+ return 0;
+}
+
static int gre_init_net(struct net *net)
{
struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_proto_net *nf = &net_gre->nf;
int i;
rwlock_init(&net_gre->keymap_lock);
@@ -342,7 +370,7 @@ static int gre_init_net(struct net *net)
for (i = 0; i < GRE_CT_MAX; i++)
net_gre->gre_timeouts[i] = gre_timeouts[i];
- return 0;
+ return gre_kmemdup_sysctl_table(net, nf, net_gre);
}
/* protocol helper struct */
@@ -402,6 +430,8 @@ static int __init nf_ct_proto_gre_init(void)
{
int ret;
+ BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
+
ret = register_pernet_subsys(&proto_gre_net_ops);
if (ret < 0)
goto out_pernet;
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index c879d8d78cfd..b4f5d5e82031 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -29,7 +29,7 @@
static const unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_UNREPLIED] = 30*HZ,
- [UDP_CT_REPLIED] = 180*HZ,
+ [UDP_CT_REPLIED] = 120*HZ,
};
static unsigned int *udp_get_timeouts(struct net *net)
@@ -100,11 +100,21 @@ static int udp_packet(struct nf_conn *ct,
if (!timeouts)
timeouts = udp_get_timeouts(nf_ct_net(ct));
+ if (!nf_ct_is_confirmed(ct))
+ ct->proto.udp.stream_ts = 2 * HZ + jiffies;
+
/* If we've seen traffic both ways, this is some kind of UDP
- stream. Extend timeout. */
+ * stream. Set Assured.
+ */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
- nf_ct_refresh_acct(ct, ctinfo, skb,
- timeouts[UDP_CT_REPLIED]);
+ unsigned long extra = timeouts[UDP_CT_UNREPLIED];
+
+ /* Still active after two seconds? Extend timeout. */
+ if (time_after(jiffies, ct->proto.udp.stream_ts))
+ extra = timeouts[UDP_CT_REPLIED];
+
+ nf_ct_refresh_acct(ct, ctinfo, skb, extra);
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index a975efd6b8c3..9da303461069 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -115,12 +115,12 @@ static void nf_ct_sack_block_adjust(struct sk_buff *skb,
/* TCP SACK sequence number adjustment */
static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
unsigned int protoff,
- struct tcphdr *tcph,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- unsigned int dir, optoff, optend;
+ struct tcphdr *tcph = (void *)skb->data + protoff;
struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ unsigned int dir, optoff, optend;
optoff = protoff + sizeof(struct tcphdr);
optend = protoff + tcph->doff * 4;
@@ -128,6 +128,7 @@ static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
if (!skb_make_writable(skb, optend))
return 0;
+ tcph = (void *)skb->data + protoff;
dir = CTINFO2DIR(ctinfo);
while (optoff < optend) {
@@ -207,7 +208,7 @@ int nf_ct_seq_adjust(struct sk_buff *skb,
ntohl(newack));
tcph->ack_seq = newack;
- res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ res = nf_ct_sack_adjust(skb, protoff, ct, ctinfo);
out:
spin_unlock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 463d17d349c1..b6177fd73304 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -267,6 +267,24 @@ static const char* l4proto_name(u16 proto)
return "unknown";
}
+static unsigned int
+seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir)
+{
+ struct nf_conn_acct *acct;
+ struct nf_conn_counter *counter;
+
+ acct = nf_conn_acct_find(ct);
+ if (!acct)
+ return 0;
+
+ counter = acct->counter;
+ seq_printf(s, "packets=%llu bytes=%llu ",
+ (unsigned long long)atomic64_read(&counter[dir].packets),
+ (unsigned long long)atomic64_read(&counter[dir].bytes));
+
+ return 0;
+}
+
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
@@ -514,36 +532,53 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
static struct ctl_table_header *nf_ct_netfilter_header;
+enum nf_ct_sysctl_index {
+ NF_SYSCTL_CT_MAX,
+ NF_SYSCTL_CT_COUNT,
+ NF_SYSCTL_CT_BUCKETS,
+ NF_SYSCTL_CT_CHECKSUM,
+ NF_SYSCTL_CT_LOG_INVALID,
+ NF_SYSCTL_CT_EXPECT_MAX,
+ NF_SYSCTL_CT_ACCT,
+ NF_SYSCTL_CT_HELPER,
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ NF_SYSCTL_CT_EVENTS,
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ NF_SYSCTL_CT_TIMESTAMP,
+#endif
+};
+
static struct ctl_table nf_ct_sysctl_table[] = {
- {
+ [NF_SYSCTL_CT_MAX] = {
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NF_SYSCTL_CT_COUNT] = {
.procname = "nf_conntrack_count",
.data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
- {
+ [NF_SYSCTL_CT_BUCKETS] = {
.procname = "nf_conntrack_buckets",
.data = &nf_conntrack_htable_size_user,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = nf_conntrack_hash_sysctl,
},
- {
+ [NF_SYSCTL_CT_CHECKSUM] = {
.procname = "nf_conntrack_checksum",
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
- {
+ [NF_SYSCTL_CT_LOG_INVALID] = {
.procname = "nf_conntrack_log_invalid",
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
@@ -552,13 +587,45 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
- {
+ [NF_SYSCTL_CT_EXPECT_MAX] = {
.procname = "nf_conntrack_expect_max",
.data = &nf_ct_expect_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ [NF_SYSCTL_CT_ACCT] = {
+ .procname = "nf_conntrack_acct",
+ .data = &init_net.ct.sysctl_acct,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ [NF_SYSCTL_CT_HELPER] = {
+ .procname = "nf_conntrack_helper",
+ .data = &init_net.ct.sysctl_auto_assign_helper,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ [NF_SYSCTL_CT_EVENTS] = {
+ .procname = "nf_conntrack_events",
+ .data = &init_net.ct.sysctl_events,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ [NF_SYSCTL_CT_TIMESTAMP] = {
+ .procname = "nf_conntrack_timestamp",
+ .data = &init_net.ct.sysctl_tstamp,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
{ }
};
@@ -582,16 +649,28 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
if (!table)
goto out_kmemdup;
- table[1].data = &net->ct.count;
- table[3].data = &net->ct.sysctl_checksum;
- table[4].data = &net->ct.sysctl_log_invalid;
+ table[NF_SYSCTL_CT_COUNT].data = &net->ct.count;
+ table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
+ table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
+#endif
/* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
+ if (net->user_ns != &init_user_ns) {
+ table[NF_SYSCTL_CT_MAX].procname = NULL;
+ table[NF_SYSCTL_CT_ACCT].procname = NULL;
+ table[NF_SYSCTL_CT_HELPER].procname = NULL;
+#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
+ table[NF_SYSCTL_CT_TIMESTAMP].procname = NULL;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ table[NF_SYSCTL_CT_EVENTS].procname = NULL;
+#endif
+ }
if (!net_eq(&init_net, net))
- table[2].mode = 0444;
+ table[NF_SYSCTL_CT_BUCKETS].mode = 0444;
net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
if (!net->ct.sysctl_header)
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 56766cb26e40..705b912bd91f 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -22,83 +22,15 @@ static bool nf_ct_tstamp __read_mostly;
module_param_named(tstamp, nf_ct_tstamp, bool, 0644);
MODULE_PARM_DESC(tstamp, "Enable connection tracking flow timestamping.");
-#ifdef CONFIG_SYSCTL
-static struct ctl_table tstamp_sysctl_table[] = {
- {
- .procname = "nf_conntrack_timestamp",
- .data = &init_net.ct.sysctl_tstamp,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {}
-};
-#endif /* CONFIG_SYSCTL */
-
static const struct nf_ct_ext_type tstamp_extend = {
.len = sizeof(struct nf_conn_tstamp),
.align = __alignof__(struct nf_conn_tstamp),
.id = NF_CT_EXT_TSTAMP,
};
-#ifdef CONFIG_SYSCTL
-static int nf_conntrack_tstamp_init_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
- GFP_KERNEL);
- if (!table)
- goto out;
-
- table[0].data = &net->ct.sysctl_tstamp;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
-
- net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter",
- table);
- if (!net->ct.tstamp_sysctl_header) {
- pr_err("can't register to sysctl\n");
- goto out_register;
- }
- return 0;
-
-out_register:
- kfree(table);
-out:
- return -ENOMEM;
-}
-
-static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
-{
- struct ctl_table *table;
-
- table = net->ct.tstamp_sysctl_header->ctl_table_arg;
- unregister_net_sysctl_table(net->ct.tstamp_sysctl_header);
- kfree(table);
-}
-#else
-static int nf_conntrack_tstamp_init_sysctl(struct net *net)
-{
- return 0;
-}
-
-static void nf_conntrack_tstamp_fini_sysctl(struct net *net)
-{
-}
-#endif
-
-int nf_conntrack_tstamp_pernet_init(struct net *net)
+void nf_conntrack_tstamp_pernet_init(struct net *net)
{
net->ct.sysctl_tstamp = nf_ct_tstamp;
- return nf_conntrack_tstamp_init_sysctl(net);
-}
-
-void nf_conntrack_tstamp_pernet_fini(struct net *net)
-{
- nf_conntrack_tstamp_fini_sysctl(net);
}
int nf_conntrack_tstamp_init(void)
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index b7a4816add76..fa0844e2a68d 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -247,9 +247,10 @@ flow_offload_lookup(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_lookup);
-int nf_flow_table_iterate(struct nf_flowtable *flow_table,
- void (*iter)(struct flow_offload *flow, void *data),
- void *data)
+static int
+nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data),
+ void *data)
{
struct flow_offload_tuple_rhash *tuplehash;
struct rhashtable_iter hti;
@@ -279,40 +280,19 @@ int nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
-EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return (__s32)(flow->timeout - (u32)jiffies) <= 0;
}
-static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
+static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
- struct flow_offload_tuple_rhash *tuplehash;
- struct rhashtable_iter hti;
- struct flow_offload *flow;
-
- rhashtable_walk_enter(&flow_table->rhashtable, &hti);
- rhashtable_walk_start(&hti);
+ struct nf_flowtable *flow_table = data;
- while ((tuplehash = rhashtable_walk_next(&hti))) {
- if (IS_ERR(tuplehash)) {
- if (PTR_ERR(tuplehash) != -EAGAIN)
- break;
- continue;
- }
- if (tuplehash->tuple.dir)
- continue;
-
- flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
-
- if (nf_flow_has_expired(flow) ||
- (flow->flags & (FLOW_OFFLOAD_DYING |
- FLOW_OFFLOAD_TEARDOWN)))
- flow_offload_del(flow_table, flow);
- }
- rhashtable_walk_stop(&hti);
- rhashtable_walk_exit(&hti);
+ if (nf_flow_has_expired(flow) ||
+ (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
+ flow_offload_del(flow_table, flow);
}
static void nf_flow_offload_work_gc(struct work_struct *work)
@@ -320,7 +300,7 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
struct nf_flowtable *flow_table;
flow_table = container_of(work, struct nf_flowtable, gc_work.work);
- nf_flow_offload_gc_step(flow_table);
+ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
@@ -504,7 +484,7 @@ void nf_flow_table_free(struct nf_flowtable *flow_table)
mutex_unlock(&flowtable_lock);
cancel_delayed_work_sync(&flow_table->gc_work);
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
- nf_flow_offload_gc_step(flow_table);
+ nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
rhashtable_destroy(&flow_table->rhashtable);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index a8c5c846aec1..3a0d6880b7c9 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -156,22 +156,20 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
const struct net_device *out,
const struct nf_loginfo *loginfo, const char *prefix)
{
+ const struct net_device *physoutdev __maybe_unused;
+ const struct net_device *physindev __maybe_unused;
+
nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
'0' + loginfo->u.log.level, prefix,
in ? in->name : "",
out ? out->name : "");
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (skb->nf_bridge) {
- const struct net_device *physindev;
- const struct net_device *physoutdev;
-
- physindev = nf_bridge_get_physindev(skb);
- if (physindev && in != physindev)
- nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
- physoutdev = nf_bridge_get_physoutdev(skb);
- if (physoutdev && out != physoutdev)
- nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
- }
+ physindev = nf_bridge_get_physindev(skb);
+ if (physindev && in != physindev)
+ nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
+ physoutdev = nf_bridge_get_physoutdev(skb);
+ if (physoutdev && out != physoutdev)
+ nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
#endif
}
EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index e2b196054dfc..d159e9e7835b 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -23,7 +23,6 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
@@ -38,8 +37,6 @@ static spinlock_t nf_nat_locks[CONNTRACK_LOCKS];
static DEFINE_MUTEX(nf_nat_proto_mutex);
static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
__read_mostly;
-static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
- __read_mostly;
static unsigned int nat_net_id __read_mostly;
static struct hlist_head *nf_nat_bysource __read_mostly;
@@ -67,13 +64,6 @@ __nf_nat_l3proto_find(u8 family)
return rcu_dereference(nf_nat_l3protos[family]);
}
-inline const struct nf_nat_l4proto *
-__nf_nat_l4proto_find(u8 family, u8 protonum)
-{
- return rcu_dereference(nf_nat_l4protos[family][protonum]);
-}
-EXPORT_SYMBOL_GPL(__nf_nat_l4proto_find);
-
#ifdef CONFIG_XFRM
static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
@@ -117,7 +107,8 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
dst = skb_dst(skb);
if (dst->xfrm)
dst = ((struct xfrm_dst *)dst)->route;
- dst_hold(dst);
+ if (!dst_hold_safe(dst))
+ return -EHOSTUNREACH;
if (sk && !net_eq(net, sock_net(sk)))
sk = NULL;
@@ -172,27 +163,66 @@ nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
}
EXPORT_SYMBOL(nf_nat_used_tuple);
+static bool nf_nat_inet_in_range(const struct nf_conntrack_tuple *t,
+ const struct nf_nat_range2 *range)
+{
+ if (t->src.l3num == NFPROTO_IPV4)
+ return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) &&
+ ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip);
+
+ return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 &&
+ ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0;
+}
+
+/* Is the manipable part of the tuple between min and max incl? */
+static bool l4proto_in_range(const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype,
+ const union nf_conntrack_man_proto *min,
+ const union nf_conntrack_man_proto *max)
+{
+ __be16 port;
+
+ switch (tuple->dst.protonum) {
+ case IPPROTO_ICMP: /* fallthrough */
+ case IPPROTO_ICMPV6:
+ return ntohs(tuple->src.u.icmp.id) >= ntohs(min->icmp.id) &&
+ ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
+ case IPPROTO_GRE: /* all fall though */
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ case IPPROTO_DCCP:
+ case IPPROTO_SCTP:
+ if (maniptype == NF_NAT_MANIP_SRC)
+ port = tuple->src.u.all;
+ else
+ port = tuple->dst.u.all;
+
+ return ntohs(port) >= ntohs(min->all) &&
+ ntohs(port) <= ntohs(max->all);
+ default:
+ return true;
+ }
+}
+
/* If we source map this tuple so reply looks like reply_tuple, will
* that meet the constraints of range.
*/
-static int in_range(const struct nf_nat_l3proto *l3proto,
- const struct nf_nat_l4proto *l4proto,
- const struct nf_conntrack_tuple *tuple,
+static int in_range(const struct nf_conntrack_tuple *tuple,
const struct nf_nat_range2 *range)
{
/* If we are supposed to map IPs, then we must be in the
* range specified, otherwise let this drag us onto a new src IP.
*/
if (range->flags & NF_NAT_RANGE_MAP_IPS &&
- !l3proto->in_range(tuple, range))
+ !nf_nat_inet_in_range(tuple, range))
return 0;
- if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) ||
- l4proto->in_range(tuple, NF_NAT_MANIP_SRC,
- &range->min_proto, &range->max_proto))
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED))
return 1;
- return 0;
+ return l4proto_in_range(tuple, NF_NAT_MANIP_SRC,
+ &range->min_proto, &range->max_proto);
}
static inline int
@@ -211,8 +241,6 @@ same_src(const struct nf_conn *ct,
static int
find_appropriate_src(struct net *net,
const struct nf_conntrack_zone *zone,
- const struct nf_nat_l3proto *l3proto,
- const struct nf_nat_l4proto *l4proto,
const struct nf_conntrack_tuple *tuple,
struct nf_conntrack_tuple *result,
const struct nf_nat_range2 *range)
@@ -229,7 +257,7 @@ find_appropriate_src(struct net *net,
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
result->dst = tuple->dst;
- if (in_range(l3proto, l4proto, result, range))
+ if (in_range(result, range))
return 1;
}
}
@@ -310,6 +338,123 @@ find_best_ips_proto(const struct nf_conntrack_zone *zone,
}
}
+/* Alter the per-proto part of the tuple (depending on maniptype), to
+ * give a unique tuple in the given range if possible.
+ *
+ * Per-protocol part of tuple is initialized to the incoming packet.
+ */
+static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_nat_range2 *range,
+ enum nf_nat_manip_type maniptype,
+ const struct nf_conn *ct)
+{
+ unsigned int range_size, min, max, i, attempts;
+ __be16 *keyptr;
+ u16 off;
+ static const unsigned int max_attempts = 128;
+
+ switch (tuple->dst.protonum) {
+ case IPPROTO_ICMP: /* fallthrough */
+ case IPPROTO_ICMPV6:
+ /* id is same for either direction... */
+ keyptr = &tuple->src.u.icmp.id;
+ min = range->min_proto.icmp.id;
+ range_size = ntohs(range->max_proto.icmp.id) -
+ ntohs(range->min_proto.icmp.id) + 1;
+ goto find_free_id;
+#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
+ case IPPROTO_GRE:
+ /* If there is no master conntrack we are not PPTP,
+ do not change tuples */
+ if (!ct->master)
+ return;
+
+ if (maniptype == NF_NAT_MANIP_SRC)
+ keyptr = &tuple->src.u.gre.key;
+ else
+ keyptr = &tuple->dst.u.gre.key;
+
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+ min = 1;
+ range_size = 65535;
+ } else {
+ min = ntohs(range->min_proto.gre.key);
+ range_size = ntohs(range->max_proto.gre.key) - min + 1;
+ }
+ goto find_free_id;
+#endif
+ case IPPROTO_UDP: /* fallthrough */
+ case IPPROTO_UDPLITE: /* fallthrough */
+ case IPPROTO_TCP: /* fallthrough */
+ case IPPROTO_SCTP: /* fallthrough */
+ case IPPROTO_DCCP: /* fallthrough */
+ if (maniptype == NF_NAT_MANIP_SRC)
+ keyptr = &tuple->src.u.all;
+ else
+ keyptr = &tuple->dst.u.all;
+
+ break;
+ default:
+ return;
+ }
+
+ /* If no range specified... */
+ if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
+ /* If it's dst rewrite, can't change port */
+ if (maniptype == NF_NAT_MANIP_DST)
+ return;
+
+ if (ntohs(*keyptr) < 1024) {
+ /* Loose convention: >> 512 is credential passing */
+ if (ntohs(*keyptr) < 512) {
+ min = 1;
+ range_size = 511 - min + 1;
+ } else {
+ min = 600;
+ range_size = 1023 - min + 1;
+ }
+ } else {
+ min = 1024;
+ range_size = 65535 - 1024 + 1;
+ }
+ } else {
+ min = ntohs(range->min_proto.all);
+ max = ntohs(range->max_proto.all);
+ if (unlikely(max < min))
+ swap(max, min);
+ range_size = max - min + 1;
+ }
+
+find_free_id:
+ if (range->flags & NF_NAT_RANGE_PROTO_OFFSET)
+ off = (ntohs(*keyptr) - ntohs(range->base_proto.all));
+ else
+ off = prandom_u32();
+
+ attempts = range_size;
+ if (attempts > max_attempts)
+ attempts = max_attempts;
+
+ /* We are in softirq; doing a search of the entire range risks
+ * soft lockup when all tuples are already used.
+ *
+ * If we can't find any free port from first offset, pick a new
+ * one and try again, with ever smaller search window.
+ */
+another_round:
+ for (i = 0; i < attempts; i++, off++) {
+ *keyptr = htons(min + off % range_size);
+ if (!nf_nat_used_tuple(tuple, ct))
+ return;
+ }
+
+ if (attempts >= range_size || attempts < 16)
+ return;
+ attempts /= 2;
+ off = prandom_u32();
+ goto another_round;
+}
+
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
* we change the source to map into the range. For NF_INET_PRE_ROUTING
* and NF_INET_LOCAL_OUT, we change the destination to map into the
@@ -324,17 +469,10 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
const struct nf_conntrack_zone *zone;
- const struct nf_nat_l3proto *l3proto;
- const struct nf_nat_l4proto *l4proto;
struct net *net = nf_ct_net(ct);
zone = nf_ct_zone(ct);
- rcu_read_lock();
- l3proto = __nf_nat_l3proto_find(orig_tuple->src.l3num);
- l4proto = __nf_nat_l4proto_find(orig_tuple->src.l3num,
- orig_tuple->dst.protonum);
-
/* 1) If this srcip/proto/src-proto-part is currently mapped,
* and that same mapping gives a unique tuple within the given
* range, use that.
@@ -346,16 +484,16 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
if (maniptype == NF_NAT_MANIP_SRC &&
!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
/* try the original tuple first */
- if (in_range(l3proto, l4proto, orig_tuple, range)) {
+ if (in_range(orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
*tuple = *orig_tuple;
- goto out;
+ return;
}
- } else if (find_appropriate_src(net, zone, l3proto, l4proto,
+ } else if (find_appropriate_src(net, zone,
orig_tuple, tuple, range)) {
pr_debug("get_unique_tuple: Found current src map\n");
if (!nf_nat_used_tuple(tuple, ct))
- goto out;
+ return;
}
}
@@ -371,21 +509,19 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
- l4proto->in_range(tuple, maniptype,
+ l4proto_in_range(tuple, maniptype,
&range->min_proto,
&range->max_proto) &&
(range->min_proto.all == range->max_proto.all ||
!nf_nat_used_tuple(tuple, ct)))
- goto out;
+ return;
} else if (!nf_nat_used_tuple(tuple, ct)) {
- goto out;
+ return;
}
}
/* Last chance: get protocol to try to obtain unique tuple. */
- l4proto->unique_tuple(l3proto, tuple, range, maniptype, ct);
-out:
- rcu_read_unlock();
+ nf_nat_l4proto_unique_tuple(tuple, range, maniptype, ct);
}
struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct)
@@ -501,16 +637,13 @@ static unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_dir dir)
{
const struct nf_nat_l3proto *l3proto;
- const struct nf_nat_l4proto *l4proto;
struct nf_conntrack_tuple target;
/* We are aiming to look like inverse of other direction. */
nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
l3proto = __nf_nat_l3proto_find(target.src.l3num);
- l4proto = __nf_nat_l4proto_find(target.src.l3num,
- target.dst.protonum);
- if (!l3proto->manip_pkt(skb, 0, l4proto, &target, mtype))
+ if (!l3proto->manip_pkt(skb, 0, &target, mtype))
return NF_DROP;
return NF_ACCEPT;
@@ -666,16 +799,6 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
return 0;
}
-static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
-{
- struct nf_nat_proto_clean clean = {
- .l3proto = l3proto,
- .l4proto = l4proto,
- };
-
- nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
-}
-
static void nf_nat_l3proto_clean(u8 l3proto)
{
struct nf_nat_proto_clean clean = {
@@ -685,82 +808,8 @@ static void nf_nat_l3proto_clean(u8 l3proto)
nf_ct_iterate_destroy(nf_nat_proto_remove, &clean);
}
-/* Protocol registration. */
-int nf_nat_l4proto_register(u8 l3proto, const struct nf_nat_l4proto *l4proto)
-{
- const struct nf_nat_l4proto **l4protos;
- unsigned int i;
- int ret = 0;
-
- mutex_lock(&nf_nat_proto_mutex);
- if (nf_nat_l4protos[l3proto] == NULL) {
- l4protos = kmalloc_array(IPPROTO_MAX,
- sizeof(struct nf_nat_l4proto *),
- GFP_KERNEL);
- if (l4protos == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < IPPROTO_MAX; i++)
- RCU_INIT_POINTER(l4protos[i], &nf_nat_l4proto_unknown);
-
- /* Before making proto_array visible to lockless readers,
- * we must make sure its content is committed to memory.
- */
- smp_wmb();
-
- nf_nat_l4protos[l3proto] = l4protos;
- }
-
- if (rcu_dereference_protected(
- nf_nat_l4protos[l3proto][l4proto->l4proto],
- lockdep_is_held(&nf_nat_proto_mutex)
- ) != &nf_nat_l4proto_unknown) {
- ret = -EBUSY;
- goto out;
- }
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto], l4proto);
- out:
- mutex_unlock(&nf_nat_proto_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(nf_nat_l4proto_register);
-
-/* No one stores the protocol anywhere; simply delete it. */
-void nf_nat_l4proto_unregister(u8 l3proto, const struct nf_nat_l4proto *l4proto)
-{
- mutex_lock(&nf_nat_proto_mutex);
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto][l4proto->l4proto],
- &nf_nat_l4proto_unknown);
- mutex_unlock(&nf_nat_proto_mutex);
- synchronize_rcu();
-
- nf_nat_l4proto_clean(l3proto, l4proto->l4proto);
-}
-EXPORT_SYMBOL_GPL(nf_nat_l4proto_unregister);
-
int nf_nat_l3proto_register(const struct nf_nat_l3proto *l3proto)
{
- mutex_lock(&nf_nat_proto_mutex);
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_TCP],
- &nf_nat_l4proto_tcp);
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDP],
- &nf_nat_l4proto_udp);
-#ifdef CONFIG_NF_NAT_PROTO_DCCP
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_DCCP],
- &nf_nat_l4proto_dccp);
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_SCTP
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_SCTP],
- &nf_nat_l4proto_sctp);
-#endif
-#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
- RCU_INIT_POINTER(nf_nat_l4protos[l3proto->l3proto][IPPROTO_UDPLITE],
- &nf_nat_l4proto_udplite);
-#endif
- mutex_unlock(&nf_nat_proto_mutex);
-
RCU_INIT_POINTER(nf_nat_l3protos[l3proto->l3proto], l3proto);
return 0;
}
@@ -801,12 +850,26 @@ static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
[CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
};
+static int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
+ struct nf_nat_range2 *range)
+{
+ if (tb[CTA_PROTONAT_PORT_MIN]) {
+ range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
+ range->max_proto.all = range->min_proto.all;
+ range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+ if (tb[CTA_PROTONAT_PORT_MAX]) {
+ range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
+ range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+ }
+ return 0;
+}
+
static int nfnetlink_parse_nat_proto(struct nlattr *attr,
const struct nf_conn *ct,
struct nf_nat_range2 *range)
{
struct nlattr *tb[CTA_PROTONAT_MAX+1];
- const struct nf_nat_l4proto *l4proto;
int err;
err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr,
@@ -814,11 +877,7 @@ static int nfnetlink_parse_nat_proto(struct nlattr *attr,
if (err < 0)
return err;
- l4proto = __nf_nat_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
- if (l4proto->nlattr_to_range)
- err = l4proto->nlattr_to_range(tb, range);
-
- return err;
+ return nf_nat_l4proto_nlattr_to_range(tb, range);
}
static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
@@ -1081,7 +1140,6 @@ static int __init nf_nat_init(void)
static void __exit nf_nat_cleanup(void)
{
struct nf_nat_proto_clean clean = {};
- unsigned int i;
nf_ct_iterate_destroy(nf_nat_proto_clean, &clean);
@@ -1089,10 +1147,6 @@ static void __exit nf_nat_cleanup(void)
nf_ct_helper_expectfn_unregister(&follow_master_nat);
RCU_INIT_POINTER(nf_nat_hook, NULL);
- synchronize_rcu();
-
- for (i = 0; i < NFPROTO_NUMPROTO; i++)
- kfree(nf_nat_l4protos[i]);
synchronize_net();
kvfree(nf_nat_bysource);
unregister_pernet_subsys(&nat_net_ops);
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
new file mode 100644
index 000000000000..f83bf9d8c9f5
--- /dev/null
+++ b/net/netfilter/nf_nat_proto.c
@@ -0,0 +1,343 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+
+#include <linux/dccp.h>
+#include <linux/sctp.h>
+#include <net/sctp/checksum.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_core.h>
+#include <net/netfilter/nf_nat_l3proto.h>
+#include <net/netfilter/nf_nat_l4proto.h>
+
+static void
+__udp_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, struct udphdr *hdr,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype, bool do_csum)
+{
+ __be16 *portptr, newport;
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ /* Get rid of src port */
+ newport = tuple->src.u.udp.port;
+ portptr = &hdr->source;
+ } else {
+ /* Get rid of dst port */
+ newport = tuple->dst.u.udp.port;
+ portptr = &hdr->dest;
+ }
+ if (do_csum) {
+ l3proto->csum_update(skb, iphdroff, &hdr->check,
+ tuple, maniptype);
+ inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
+ false);
+ if (!hdr->check)
+ hdr->check = CSUM_MANGLED_0;
+ }
+ *portptr = newport;
+}
+
+static bool udp_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+ struct udphdr *hdr;
+ bool do_csum;
+
+ if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ return false;
+
+ hdr = (struct udphdr *)(skb->data + hdroff);
+ do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
+
+ __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum);
+ return true;
+}
+
+static bool udplite_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+ struct udphdr *hdr;
+
+ if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ return false;
+
+ hdr = (struct udphdr *)(skb->data + hdroff);
+ __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true);
+#endif
+ return true;
+}
+
+static bool
+sctp_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ struct sctphdr *hdr;
+ int hdrsize = 8;
+
+ /* This could be an inner header returned in imcp packet; in such
+ * cases we cannot update the checksum field since it is outside
+ * of the 8 bytes of transport layer headers we are guaranteed.
+ */
+ if (skb->len >= hdroff + sizeof(*hdr))
+ hdrsize = sizeof(*hdr);
+
+ if (!skb_make_writable(skb, hdroff + hdrsize))
+ return false;
+
+ hdr = (struct sctphdr *)(skb->data + hdroff);
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ /* Get rid of src port */
+ hdr->source = tuple->src.u.sctp.port;
+ } else {
+ /* Get rid of dst port */
+ hdr->dest = tuple->dst.u.sctp.port;
+ }
+
+ if (hdrsize < sizeof(*hdr))
+ return true;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ hdr->checksum = sctp_compute_cksum(skb, hdroff);
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+#endif
+ return true;
+}
+
+static bool
+tcp_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+ struct tcphdr *hdr;
+ __be16 *portptr, newport, oldport;
+ int hdrsize = 8; /* TCP connection tracking guarantees this much */
+
+ /* this could be a inner header returned in icmp packet; in such
+ cases we cannot update the checksum field since it is outside of
+ the 8 bytes of transport layer headers we are guaranteed */
+ if (skb->len >= hdroff + sizeof(struct tcphdr))
+ hdrsize = sizeof(struct tcphdr);
+
+ if (!skb_make_writable(skb, hdroff + hdrsize))
+ return false;
+
+ hdr = (struct tcphdr *)(skb->data + hdroff);
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ /* Get rid of src port */
+ newport = tuple->src.u.tcp.port;
+ portptr = &hdr->source;
+ } else {
+ /* Get rid of dst port */
+ newport = tuple->dst.u.tcp.port;
+ portptr = &hdr->dest;
+ }
+
+ oldport = *portptr;
+ *portptr = newport;
+
+ if (hdrsize < sizeof(*hdr))
+ return true;
+
+ l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
+ inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
+ return true;
+}
+
+static bool
+dccp_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ struct dccp_hdr *hdr;
+ __be16 *portptr, oldport, newport;
+ int hdrsize = 8; /* DCCP connection tracking guarantees this much */
+
+ if (skb->len >= hdroff + sizeof(struct dccp_hdr))
+ hdrsize = sizeof(struct dccp_hdr);
+
+ if (!skb_make_writable(skb, hdroff + hdrsize))
+ return false;
+
+ hdr = (struct dccp_hdr *)(skb->data + hdroff);
+
+ if (maniptype == NF_NAT_MANIP_SRC) {
+ newport = tuple->src.u.dccp.port;
+ portptr = &hdr->dccph_sport;
+ } else {
+ newport = tuple->dst.u.dccp.port;
+ portptr = &hdr->dccph_dport;
+ }
+
+ oldport = *portptr;
+ *portptr = newport;
+
+ if (hdrsize < sizeof(*hdr))
+ return true;
+
+ l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
+ tuple, maniptype);
+ inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
+ false);
+#endif
+ return true;
+}
+
+static bool
+icmp_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+ struct icmphdr *hdr;
+
+ if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ return false;
+
+ hdr = (struct icmphdr *)(skb->data + hdroff);
+ inet_proto_csum_replace2(&hdr->checksum, skb,
+ hdr->un.echo.id, tuple->src.u.icmp.id, false);
+ hdr->un.echo.id = tuple->src.u.icmp.id;
+ return true;
+}
+
+static bool
+icmpv6_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+ struct icmp6hdr *hdr;
+
+ if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+ return false;
+
+ hdr = (struct icmp6hdr *)(skb->data + hdroff);
+ l3proto->csum_update(skb, iphdroff, &hdr->icmp6_cksum,
+ tuple, maniptype);
+ if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST ||
+ hdr->icmp6_type == ICMPV6_ECHO_REPLY) {
+ inet_proto_csum_replace2(&hdr->icmp6_cksum, skb,
+ hdr->icmp6_identifier,
+ tuple->src.u.icmp.id, false);
+ hdr->icmp6_identifier = tuple->src.u.icmp.id;
+ }
+ return true;
+}
+
+/* manipulate a GRE packet according to maniptype */
+static bool
+gre_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+#if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE)
+ const struct gre_base_hdr *greh;
+ struct pptp_gre_header *pgreh;
+
+ /* pgreh includes two optional 32bit fields which are not required
+ * to be there. That's where the magic '8' comes from */
+ if (!skb_make_writable(skb, hdroff + sizeof(*pgreh) - 8))
+ return false;
+
+ greh = (void *)skb->data + hdroff;
+ pgreh = (struct pptp_gre_header *)greh;
+
+ /* we only have destination manip of a packet, since 'source key'
+ * is not present in the packet itself */
+ if (maniptype != NF_NAT_MANIP_DST)
+ return true;
+
+ switch (greh->flags & GRE_VERSION) {
+ case GRE_VERSION_0:
+ /* We do not currently NAT any GREv0 packets.
+ * Try to behave like "nf_nat_proto_unknown" */
+ break;
+ case GRE_VERSION_1:
+ pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key));
+ pgreh->call_id = tuple->dst.u.gre.key;
+ break;
+ default:
+ pr_debug("can't nat unknown GRE version\n");
+ return false;
+ }
+#endif
+ return true;
+}
+
+bool nf_nat_l4proto_manip_pkt(struct sk_buff *skb,
+ const struct nf_nat_l3proto *l3proto,
+ unsigned int iphdroff, unsigned int hdroff,
+ const struct nf_conntrack_tuple *tuple,
+ enum nf_nat_manip_type maniptype)
+{
+ switch (tuple->dst.protonum) {
+ case IPPROTO_TCP:
+ return tcp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_UDP:
+ return udp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_UDPLITE:
+ return udplite_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_SCTP:
+ return sctp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_ICMP:
+ return icmp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_ICMPV6:
+ return icmpv6_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_DCCP:
+ return dccp_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ case IPPROTO_GRE:
+ return gre_manip_pkt(skb, l3proto, iphdroff, hdroff,
+ tuple, maniptype);
+ }
+
+ /* If we don't know protocol -- no error, pass it unmodified. */
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_nat_l4proto_manip_pkt);
diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
deleted file mode 100644
index 5d849d835561..000000000000
--- a/net/netfilter/nf_nat_proto_common.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/random.h>
-#include <linux/netfilter.h>
-#include <linux/export.h>
-
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-bool nf_nat_l4proto_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max)
-{
- __be16 port;
-
- if (maniptype == NF_NAT_MANIP_SRC)
- port = tuple->src.u.all;
- else
- port = tuple->dst.u.all;
-
- return ntohs(port) >= ntohs(min->all) &&
- ntohs(port) <= ntohs(max->all);
-}
-EXPORT_SYMBOL_GPL(nf_nat_l4proto_in_range);
-
-void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct,
- u16 *rover)
-{
- unsigned int range_size, min, max, i;
- __be16 *portptr;
- u_int16_t off;
-
- if (maniptype == NF_NAT_MANIP_SRC)
- portptr = &tuple->src.u.all;
- else
- portptr = &tuple->dst.u.all;
-
- /* If no range specified... */
- if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) {
- /* If it's dst rewrite, can't change port */
- if (maniptype == NF_NAT_MANIP_DST)
- return;
-
- if (ntohs(*portptr) < 1024) {
- /* Loose convention: >> 512 is credential passing */
- if (ntohs(*portptr) < 512) {
- min = 1;
- range_size = 511 - min + 1;
- } else {
- min = 600;
- range_size = 1023 - min + 1;
- }
- } else {
- min = 1024;
- range_size = 65535 - 1024 + 1;
- }
- } else {
- min = ntohs(range->min_proto.all);
- max = ntohs(range->max_proto.all);
- if (unlikely(max < min))
- swap(max, min);
- range_size = max - min + 1;
- }
-
- if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
- off = l3proto->secure_port(tuple, maniptype == NF_NAT_MANIP_SRC
- ? tuple->dst.u.all
- : tuple->src.u.all);
- } else if (range->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY) {
- off = prandom_u32();
- } else if (range->flags & NF_NAT_RANGE_PROTO_OFFSET) {
- off = (ntohs(*portptr) - ntohs(range->base_proto.all));
- } else {
- off = *rover;
- }
-
- for (i = 0; ; ++off) {
- *portptr = htons(min + off % range_size);
- if (++i != range_size && nf_nat_used_tuple(tuple, ct))
- continue;
- if (!(range->flags & (NF_NAT_RANGE_PROTO_RANDOM_ALL|
- NF_NAT_RANGE_PROTO_OFFSET)))
- *rover = off;
- return;
- }
-}
-EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
- struct nf_nat_range2 *range)
-{
- if (tb[CTA_PROTONAT_PORT_MIN]) {
- range->min_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
- range->max_proto.all = range->min_proto.all;
- range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
- }
- if (tb[CTA_PROTONAT_PORT_MAX]) {
- range->max_proto.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
- range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(nf_nat_l4proto_nlattr_to_range);
-#endif
diff --git a/net/netfilter/nf_nat_proto_dccp.c b/net/netfilter/nf_nat_proto_dccp.c
deleted file mode 100644
index 67ea0d83aa5a..000000000000
--- a/net/netfilter/nf_nat_proto_dccp.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * DCCP NAT protocol helper
- *
- * Copyright (c) 2005, 2006, 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/dccp.h>
-
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static u_int16_t dccp_port_rover;
-
-static void
-dccp_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
- &dccp_port_rover);
-}
-
-static bool
-dccp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct dccp_hdr *hdr;
- __be16 *portptr, oldport, newport;
- int hdrsize = 8; /* DCCP connection tracking guarantees this much */
-
- if (skb->len >= hdroff + sizeof(struct dccp_hdr))
- hdrsize = sizeof(struct dccp_hdr);
-
- if (!skb_make_writable(skb, hdroff + hdrsize))
- return false;
-
- hdr = (struct dccp_hdr *)(skb->data + hdroff);
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- newport = tuple->src.u.dccp.port;
- portptr = &hdr->dccph_sport;
- } else {
- newport = tuple->dst.u.dccp.port;
- portptr = &hdr->dccph_dport;
- }
-
- oldport = *portptr;
- *portptr = newport;
-
- if (hdrsize < sizeof(*hdr))
- return true;
-
- l3proto->csum_update(skb, iphdroff, &hdr->dccph_checksum,
- tuple, maniptype);
- inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport,
- false);
- return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
- .l4proto = IPPROTO_DCCP,
- .manip_pkt = dccp_manip_pkt,
- .in_range = nf_nat_l4proto_in_range,
- .unique_tuple = dccp_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
deleted file mode 100644
index 1c5d9b65fbba..000000000000
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/sctp.h>
-#include <net/sctp/checksum.h>
-
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static u_int16_t nf_sctp_port_rover;
-
-static void
-sctp_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
- &nf_sctp_port_rover);
-}
-
-static bool
-sctp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct sctphdr *hdr;
- int hdrsize = 8;
-
- /* This could be an inner header returned in imcp packet; in such
- * cases we cannot update the checksum field since it is outside
- * of the 8 bytes of transport layer headers we are guaranteed.
- */
- if (skb->len >= hdroff + sizeof(*hdr))
- hdrsize = sizeof(*hdr);
-
- if (!skb_make_writable(skb, hdroff + hdrsize))
- return false;
-
- hdr = (struct sctphdr *)(skb->data + hdroff);
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- /* Get rid of src port */
- hdr->source = tuple->src.u.sctp.port;
- } else {
- /* Get rid of dst port */
- hdr->dest = tuple->dst.u.sctp.port;
- }
-
- if (hdrsize < sizeof(*hdr))
- return true;
-
- if (skb->ip_summed != CHECKSUM_PARTIAL) {
- hdr->checksum = sctp_compute_cksum(skb, hdroff);
- skb->ip_summed = CHECKSUM_NONE;
- }
-
- return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
- .l4proto = IPPROTO_SCTP,
- .manip_pkt = sctp_manip_pkt,
- .in_range = nf_nat_l4proto_in_range,
- .unique_tuple = sctp_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
diff --git a/net/netfilter/nf_nat_proto_tcp.c b/net/netfilter/nf_nat_proto_tcp.c
deleted file mode 100644
index f15fcd475f98..000000000000
--- a/net/netfilter/nf_nat_proto_tcp.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/tcp.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter/nfnetlink_conntrack.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-#include <net/netfilter/nf_nat_core.h>
-
-static u16 tcp_port_rover;
-
-static void
-tcp_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
- &tcp_port_rover);
-}
-
-static bool
-tcp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct tcphdr *hdr;
- __be16 *portptr, newport, oldport;
- int hdrsize = 8; /* TCP connection tracking guarantees this much */
-
- /* this could be a inner header returned in icmp packet; in such
- cases we cannot update the checksum field since it is outside of
- the 8 bytes of transport layer headers we are guaranteed */
- if (skb->len >= hdroff + sizeof(struct tcphdr))
- hdrsize = sizeof(struct tcphdr);
-
- if (!skb_make_writable(skb, hdroff + hdrsize))
- return false;
-
- hdr = (struct tcphdr *)(skb->data + hdroff);
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- /* Get rid of src port */
- newport = tuple->src.u.tcp.port;
- portptr = &hdr->source;
- } else {
- /* Get rid of dst port */
- newport = tuple->dst.u.tcp.port;
- portptr = &hdr->dest;
- }
-
- oldport = *portptr;
- *portptr = newport;
-
- if (hdrsize < sizeof(*hdr))
- return true;
-
- l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
- inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false);
- return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
- .l4proto = IPPROTO_TCP,
- .manip_pkt = tcp_manip_pkt,
- .in_range = nf_nat_l4proto_in_range,
- .unique_tuple = tcp_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
diff --git a/net/netfilter/nf_nat_proto_udp.c b/net/netfilter/nf_nat_proto_udp.c
deleted file mode 100644
index 5790f70a83b2..000000000000
--- a/net/netfilter/nf_nat_proto_udp.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_core.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static u16 udp_port_rover;
-
-static void
-udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
- &udp_port_rover);
-}
-
-static void
-__udp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, struct udphdr *hdr,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype, bool do_csum)
-{
- __be16 *portptr, newport;
-
- if (maniptype == NF_NAT_MANIP_SRC) {
- /* Get rid of src port */
- newport = tuple->src.u.udp.port;
- portptr = &hdr->source;
- } else {
- /* Get rid of dst port */
- newport = tuple->dst.u.udp.port;
- portptr = &hdr->dest;
- }
- if (do_csum) {
- l3proto->csum_update(skb, iphdroff, &hdr->check,
- tuple, maniptype);
- inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
- false);
- if (!hdr->check)
- hdr->check = CSUM_MANGLED_0;
- }
- *portptr = newport;
-}
-
-static bool udp_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct udphdr *hdr;
- bool do_csum;
-
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
- return false;
-
- hdr = (struct udphdr *)(skb->data + hdroff);
- do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
-
- __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum);
- return true;
-}
-
-#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
-static u16 udplite_port_rover;
-
-static bool udplite_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- struct udphdr *hdr;
-
- if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
- return false;
-
- hdr = (struct udphdr *)(skb->data + hdroff);
- __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true);
- return true;
-}
-
-static void
-udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
- &udplite_port_rover);
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
- .l4proto = IPPROTO_UDPLITE,
- .manip_pkt = udplite_manip_pkt,
- .in_range = nf_nat_l4proto_in_range,
- .unique_tuple = udplite_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
-#endif /* CONFIG_NF_NAT_PROTO_UDPLITE */
-
-const struct nf_nat_l4proto nf_nat_l4proto_udp = {
- .l4proto = IPPROTO_UDP,
- .manip_pkt = udp_manip_pkt,
- .in_range = nf_nat_l4proto_in_range,
- .unique_tuple = udp_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- .nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
diff --git a/net/netfilter/nf_nat_proto_unknown.c b/net/netfilter/nf_nat_proto_unknown.c
deleted file mode 100644
index c5db3e251232..000000000000
--- a/net/netfilter/nf_nat_proto_unknown.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/* The "unknown" protocol. This is what is used for protocols we
- * don't understand. It's returned by ip_ct_find_proto().
- */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/init.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type manip_type,
- const union nf_conntrack_man_proto *min,
- const union nf_conntrack_man_proto *max)
-{
- return true;
-}
-
-static void unknown_unique_tuple(const struct nf_nat_l3proto *l3proto,
- struct nf_conntrack_tuple *tuple,
- const struct nf_nat_range2 *range,
- enum nf_nat_manip_type maniptype,
- const struct nf_conn *ct)
-{
- /* Sorry: we can't help you; if it's not unique, we can't frob
- * anything.
- */
- return;
-}
-
-static bool
-unknown_manip_pkt(struct sk_buff *skb,
- const struct nf_nat_l3proto *l3proto,
- unsigned int iphdroff, unsigned int hdroff,
- const struct nf_conntrack_tuple *tuple,
- enum nf_nat_manip_type maniptype)
-{
- return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_unknown = {
- .manip_pkt = unknown_manip_pkt,
- .in_range = unknown_in_range,
- .unique_tuple = unknown_unique_tuple,
-};
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 1f3086074981..aa1be643d7a0 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -18,6 +18,7 @@
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
@@ -316,6 +317,9 @@ static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
static void nf_nat_sip_expected(struct nf_conn *ct,
struct nf_conntrack_expect *exp)
{
+ struct nf_conn_help *help = nfct_help(ct->master);
+ struct nf_conntrack_expect *pair_exp;
+ int range_set_for_snat = 0;
struct nf_nat_range2 range;
/* This must be a fresh one. */
@@ -327,15 +331,42 @@ static void nf_nat_sip_expected(struct nf_conn *ct,
range.min_addr = range.max_addr = exp->saved_addr;
nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
- /* Change src to where master sends to, but only if the connection
- * actually came from the same source. */
- if (nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
+ /* Do media streams SRC manip according with the parameters
+ * found in the paired expectation.
+ */
+ if (exp->class != SIP_EXPECT_SIGNALLING) {
+ spin_lock_bh(&nf_conntrack_expect_lock);
+ hlist_for_each_entry(pair_exp, &help->expectations, lnode) {
+ if (pair_exp->tuple.src.l3num == nf_ct_l3num(ct) &&
+ pair_exp->tuple.dst.protonum == ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum &&
+ nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3, &pair_exp->saved_addr) &&
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.all == pair_exp->saved_proto.all) {
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+ range.min_proto.all = range.max_proto.all = pair_exp->tuple.dst.u.all;
+ range.min_addr = range.max_addr = pair_exp->tuple.dst.u3;
+ range_set_for_snat = 1;
+ break;
+ }
+ }
+ spin_unlock_bh(&nf_conntrack_expect_lock);
+ }
+
+ /* When no paired expectation has been found, change src to
+ * where master sends to, but only if the connection actually came
+ * from the same source.
+ */
+ if (!range_set_for_snat &&
+ nf_inet_addr_cmp(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3,
&ct->master->tuplehash[exp->dir].tuple.src.u3)) {
range.flags = NF_NAT_RANGE_MAP_IPS;
range.min_addr = range.max_addr
= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
- nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+ range_set_for_snat = 1;
}
+
+ /* Perform SRC manip. */
+ if (range_set_for_snat)
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
}
static unsigned int nf_nat_sip_expect(struct sk_buff *skb, unsigned int protoff,
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index d67a96a25a68..a36a77bae1d6 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -46,6 +46,24 @@ void nf_unregister_queue_handler(struct net *net)
}
EXPORT_SYMBOL(nf_unregister_queue_handler);
+static void nf_queue_entry_release_br_nf_refs(struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ if (nf_bridge) {
+ struct net_device *physdev;
+
+ physdev = nf_bridge_get_physindev(skb);
+ if (physdev)
+ dev_put(physdev);
+ physdev = nf_bridge_get_physoutdev(skb);
+ if (physdev)
+ dev_put(physdev);
+ }
+#endif
+}
+
void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
{
struct nf_hook_state *state = &entry->state;
@@ -57,20 +75,28 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
dev_put(state->out);
if (state->sk)
sock_put(state->sk);
+
+ nf_queue_entry_release_br_nf_refs(entry->skb);
+}
+EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
+
+static void nf_queue_entry_get_br_nf_refs(struct sk_buff *skb)
+{
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (entry->skb->nf_bridge) {
+ struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
+
+ if (nf_bridge) {
struct net_device *physdev;
- physdev = nf_bridge_get_physindev(entry->skb);
+ physdev = nf_bridge_get_physindev(skb);
if (physdev)
- dev_put(physdev);
- physdev = nf_bridge_get_physoutdev(entry->skb);
+ dev_hold(physdev);
+ physdev = nf_bridge_get_physoutdev(skb);
if (physdev)
- dev_put(physdev);
+ dev_hold(physdev);
}
#endif
}
-EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs);
/* Bump dev refs so they don't vanish while packet is out */
void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
@@ -83,18 +109,8 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry)
dev_hold(state->out);
if (state->sk)
sock_hold(state->sk);
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (entry->skb->nf_bridge) {
- struct net_device *physdev;
- physdev = nf_bridge_get_physindev(entry->skb);
- if (physdev)
- dev_hold(physdev);
- physdev = nf_bridge_get_physoutdev(entry->skb);
- if (physdev)
- dev_hold(physdev);
- }
-#endif
+ nf_queue_entry_get_br_nf_refs(entry->skb);
}
EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 42487d01a3ed..fec814dace5a 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1216,7 +1216,8 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name))
goto nla_put_failure;
- if (basechain->stats && nft_dump_stats(skb, basechain->stats))
+ if (rcu_access_pointer(basechain->stats) &&
+ nft_dump_stats(skb, rcu_dereference(basechain->stats)))
goto nla_put_failure;
}
@@ -1392,7 +1393,8 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
return newstats;
}
-static void nft_chain_stats_replace(struct nft_base_chain *chain,
+static void nft_chain_stats_replace(struct net *net,
+ struct nft_base_chain *chain,
struct nft_stats __percpu *newstats)
{
struct nft_stats __percpu *oldstats;
@@ -1400,8 +1402,9 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
if (newstats == NULL)
return;
- if (chain->stats) {
- oldstats = nfnl_dereference(chain->stats, NFNL_SUBSYS_NFTABLES);
+ if (rcu_access_pointer(chain->stats)) {
+ oldstats = rcu_dereference_protected(chain->stats,
+ lockdep_commit_lock_is_held(net));
rcu_assign_pointer(chain->stats, newstats);
synchronize_rcu();
free_percpu(oldstats);
@@ -1439,9 +1442,10 @@ static void nf_tables_chain_destroy(struct nft_ctx *ctx)
struct nft_base_chain *basechain = nft_base_chain(chain);
module_put(basechain->type->owner);
- free_percpu(basechain->stats);
- if (basechain->stats)
+ if (rcu_access_pointer(basechain->stats)) {
static_branch_dec(&nft_counters_enabled);
+ free_percpu(rcu_dereference_raw(basechain->stats));
+ }
kfree(chain->name);
kfree(basechain);
} else {
@@ -1590,7 +1594,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
kfree(basechain);
return PTR_ERR(stats);
}
- basechain->stats = stats;
+ rcu_assign_pointer(basechain->stats, stats);
static_branch_inc(&nft_counters_enabled);
}
@@ -2291,15 +2295,52 @@ struct nft_rule_dump_ctx {
char *chain;
};
+static int __nf_tables_dump_rules(struct sk_buff *skb,
+ unsigned int *idx,
+ struct netlink_callback *cb,
+ const struct nft_table *table,
+ const struct nft_chain *chain)
+{
+ struct net *net = sock_net(skb->sk);
+ unsigned int s_idx = cb->args[0];
+ const struct nft_rule *rule;
+ int rc = 1;
+
+ list_for_each_entry_rcu(rule, &chain->rules, list) {
+ if (!nft_is_active(net, rule))
+ goto cont;
+ if (*idx < s_idx)
+ goto cont;
+ if (*idx > s_idx) {
+ memset(&cb->args[1], 0,
+ sizeof(cb->args) - sizeof(cb->args[0]));
+ }
+ if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NFT_MSG_NEWRULE,
+ NLM_F_MULTI | NLM_F_APPEND,
+ table->family,
+ table, chain, rule) < 0)
+ goto out_unfinished;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+ (*idx)++;
+ }
+ rc = 0;
+out_unfinished:
+ cb->args[0] = *idx;
+ return rc;
+}
+
static int nf_tables_dump_rules(struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
const struct nft_rule_dump_ctx *ctx = cb->data;
- const struct nft_table *table;
+ struct nft_table *table;
const struct nft_chain *chain;
- const struct nft_rule *rule;
- unsigned int idx = 0, s_idx = cb->args[0];
+ unsigned int idx = 0;
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
@@ -2313,37 +2354,34 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0)
continue;
- list_for_each_entry_rcu(chain, &table->chains, list) {
- if (ctx && ctx->chain &&
- strcmp(ctx->chain, chain->name) != 0)
- continue;
+ if (ctx && ctx->chain) {
+ struct rhlist_head *list, *tmp;
- list_for_each_entry_rcu(rule, &chain->rules, list) {
- if (!nft_is_active(net, rule))
- goto cont;
- if (idx < s_idx)
- goto cont;
- if (idx > s_idx)
- memset(&cb->args[1], 0,
- sizeof(cb->args) - sizeof(cb->args[0]));
- if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NFT_MSG_NEWRULE,
- NLM_F_MULTI | NLM_F_APPEND,
- table->family,
- table, chain, rule) < 0)
- goto done;
-
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
+ list = rhltable_lookup(&table->chains_ht, ctx->chain,
+ nft_chain_ht_params);
+ if (!list)
+ goto done;
+
+ rhl_for_each_entry_rcu(chain, tmp, list, rhlhead) {
+ if (!nft_is_active(net, chain))
+ continue;
+ __nf_tables_dump_rules(skb, &idx,
+ cb, table, chain);
+ break;
}
+ goto done;
}
+
+ list_for_each_entry_rcu(chain, &table->chains, list) {
+ if (__nf_tables_dump_rules(skb, &idx, cb, table, chain))
+ goto done;
+ }
+
+ if (ctx && ctx->table)
+ break;
}
done:
rcu_read_unlock();
-
- cb->args[0] = idx;
return skb->len;
}
@@ -2457,7 +2495,7 @@ err:
static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
struct nft_rule *rule)
{
- struct nft_expr *expr;
+ struct nft_expr *expr, *next;
/*
* Careful: some expressions might not be initialized in case this
@@ -2465,8 +2503,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
*/
expr = nft_expr_first(rule);
while (expr != nft_expr_last(rule) && expr->ops) {
+ next = nft_expr_next(expr);
nf_tables_expr_destroy(ctx, expr);
- expr = nft_expr_next(expr);
+ expr = next;
}
kfree(rule);
}
@@ -2589,17 +2628,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
if (chain->use == UINT_MAX)
return -EOVERFLOW;
- }
-
- if (nla[NFTA_RULE_POSITION]) {
- if (!(nlh->nlmsg_flags & NLM_F_CREATE))
- return -EOPNOTSUPP;
- pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
- old_rule = __nft_rule_lookup(chain, pos_handle);
- if (IS_ERR(old_rule)) {
- NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
- return PTR_ERR(old_rule);
+ if (nla[NFTA_RULE_POSITION]) {
+ pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+ old_rule = __nft_rule_lookup(chain, pos_handle);
+ if (IS_ERR(old_rule)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
+ return PTR_ERR(old_rule);
+ }
}
}
@@ -2669,21 +2705,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
}
if (nlh->nlmsg_flags & NLM_F_REPLACE) {
- if (!nft_is_active_next(net, old_rule)) {
- err = -ENOENT;
- goto err2;
- }
- trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
- old_rule);
+ trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
if (trans == NULL) {
err = -ENOMEM;
goto err2;
}
- nft_deactivate_next(net, old_rule);
- chain->use--;
-
- if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
- err = -ENOMEM;
+ err = nft_delrule(&ctx, old_rule);
+ if (err < 0) {
+ nft_trans_destroy(trans);
goto err2;
}
@@ -6189,7 +6218,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
return;
basechain = nft_base_chain(trans->ctx.chain);
- nft_chain_stats_replace(basechain, nft_trans_chain_stats(trans));
+ nft_chain_stats_replace(trans->ctx.net, basechain,
+ nft_trans_chain_stats(trans));
switch (nft_trans_chain_policy(trans)) {
case NF_DROP:
@@ -6324,7 +6354,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
}
-static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain)
+static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
{
struct nft_rule **g0, **g1;
bool next_genbit;
@@ -6441,11 +6471,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
/* step 2. Make rules_gen_X visible to packet path */
list_for_each_entry(table, &net->nft.tables, list) {
- list_for_each_entry(chain, &table->chains, list) {
- if (!nft_is_active_next(net, chain))
- continue;
- nf_tables_commit_chain_active(net, chain);
- }
+ list_for_each_entry(chain, &table->chains, list)
+ nf_tables_commit_chain(net, chain);
}
/*
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 3fbce3b9c5ec..a50500232b0a 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -101,7 +101,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
struct nft_stats *stats;
base_chain = nft_base_chain(chain);
- if (!base_chain->stats)
+ if (!rcu_access_pointer(base_chain->stats))
return;
local_bh_disable();
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index a518eb162344..109b0d27345a 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -455,7 +455,8 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
case IPPROTO_TCP:
timeouts = nf_tcp_pernet(net)->timeouts;
break;
- case IPPROTO_UDP:
+ case IPPROTO_UDP: /* fallthrough */
+ case IPPROTO_UDPLITE:
timeouts = nf_udp_pernet(net)->timeouts;
break;
case IPPROTO_DCCP:
@@ -471,11 +472,21 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
timeouts = nf_sctp_pernet(net)->timeouts;
#endif
break;
+ case IPPROTO_GRE:
+#ifdef CONFIG_NF_CT_PROTO_GRE
+ if (l4proto->net_id) {
+ struct netns_proto_gre *net_gre;
+
+ net_gre = net_generic(net, *l4proto->net_id);
+ timeouts = net_gre->gre_timeouts;
+ }
+#endif
+ break;
case 255:
timeouts = &nf_generic_pernet(net)->timeout;
break;
default:
- WARN_ON_ONCE(1);
+ WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
break;
}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 332c69d27b47..b1f9c5303f02 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -148,7 +148,7 @@ static void
instance_put(struct nfulnl_instance *inst)
{
if (inst && refcount_dec_and_test(&inst->use))
- call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
+ call_rcu(&inst->rcu, nfulnl_instance_free_rcu);
}
static void nfulnl_timer(struct timer_list *t);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 43041f087eb3..0dcc3592d053 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -727,13 +727,13 @@ nf_queue_entry_dup(struct nf_queue_entry *e)
*/
static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
{
- if (skb->nf_bridge)
+ if (nf_bridge_info_get(skb))
__skb_push(skb, skb->network_header - skb->mac_header);
}
static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
{
- if (skb->nf_bridge)
+ if (nf_bridge_info_get(skb))
__skb_pull(skb, skb->network_header - skb->mac_header);
}
#else
@@ -904,23 +904,22 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
static int
dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
{
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ int physinif, physoutif;
+
+ physinif = nf_bridge_get_physinif(entry->skb);
+ physoutif = nf_bridge_get_physoutif(entry->skb);
+
+ if (physinif == ifindex || physoutif == ifindex)
+ return 1;
+#endif
if (entry->state.in)
if (entry->state.in->ifindex == ifindex)
return 1;
if (entry->state.out)
if (entry->state.out->ifindex == ifindex)
return 1;
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
- if (entry->skb->nf_bridge) {
- int physinif, physoutif;
- physinif = nf_bridge_get_physinif(entry->skb);
- physoutif = nf_bridge_get_physoutif(entry->skb);
-
- if (physinif == ifindex || physoutif == ifindex)
- return 1;
- }
-#endif
return 0;
}
@@ -1148,8 +1147,9 @@ static int nfqa_parse_bridge(struct nf_queue_entry *entry,
if (!tb[NFQA_VLAN_TCI] || !tb[NFQA_VLAN_PROTO])
return -EINVAL;
- entry->skb->vlan_tci = ntohs(nla_get_be16(tb[NFQA_VLAN_TCI]));
- entry->skb->vlan_proto = nla_get_be16(tb[NFQA_VLAN_PROTO]);
+ __vlan_hwaccel_put_tag(entry->skb,
+ nla_get_be16(tb[NFQA_VLAN_PROTO]),
+ ntohs(nla_get_be16(tb[NFQA_VLAN_TCI])));
}
if (nfqa[NFQA_L2HDR]) {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 9d0ede474224..7334e0b80a5e 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -520,6 +520,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
void *info)
{
struct xt_match *match = expr->ops->data;
+ struct module *me = match->me;
struct xt_mtdtor_param par;
par.net = ctx->net;
@@ -530,7 +531,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
par.match->destroy(&par);
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
- module_put(match->me);
+ module_put(me);
}
static void
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index e82d9a966c45..974525eb92df 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
{
int err;
- register_netdevice_notifier(&flow_offload_netdev_notifier);
+ err = register_netdevice_notifier(&flow_offload_netdev_notifier);
+ if (err)
+ goto err;
err = nft_register_expr(&nft_flow_offload_type);
if (err < 0)
@@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
register_expr:
unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+err:
return err;
}
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 6180626c3f80..6df486c5ebd3 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -229,7 +229,7 @@ void nft_meta_get_eval(const struct nft_expr *expr,
}
#ifdef CONFIG_XFRM
case NFT_META_SECPATH:
- nft_reg_store8(dest, !!skb->sp);
+ nft_reg_store8(dest, secpath_exists(skb));
break;
#endif
#ifdef CONFIG_NF_TABLES_BRIDGE
diff --git a/net/netfilter/nft_xfrm.c b/net/netfilter/nft_xfrm.c
index 5322609f7662..b08865ec5ed3 100644
--- a/net/netfilter/nft_xfrm.c
+++ b/net/netfilter/nft_xfrm.c
@@ -161,7 +161,7 @@ static void nft_xfrm_get_eval_in(const struct nft_xfrm *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- const struct sec_path *sp = pkt->skb->sp;
+ const struct sec_path *sp = skb_sec_path(pkt->skb);
const struct xfrm_state *state;
if (sp == NULL || sp->len <= priv->spnum) {
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index dec843cadf46..9e05c86ba5c4 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
return 0;
}
-static void __net_exit xt_rateest_net_exit(struct net *net)
-{
- struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
- WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
-}
-
static struct pernet_operations xt_rateest_net_ops = {
.init = xt_rateest_net_init,
- .exit = xt_rateest_net_exit,
.id = &xt_rateest_id,
.size = sizeof(struct xt_rateest_net),
};
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 3e7d259e5d8d..28e27a32d9b9 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -260,7 +260,7 @@ static inline void
dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
{
hlist_del_rcu(&ent->node);
- call_rcu_bh(&ent->rcu, dsthash_free_rcu);
+ call_rcu(&ent->rcu, dsthash_free_rcu);
ht->count--;
}
static void htable_gc(struct work_struct *work);
@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
/* copy match config into hashtable config */
ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
-
- if (ret)
+ if (ret) {
+ vfree(hinfo);
return ret;
+ }
hinfo->cfg.size = size;
if (hinfo->cfg.max == 0)
@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
int ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
-
if (ret)
return ret;
@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
int ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
-
if (ret)
return ret;
@@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
-
if (ret)
return ret;
@@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
return ret;
ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
-
if (ret)
return ret;
@@ -1329,7 +1326,7 @@ static void __exit hashlimit_mt_exit(void)
xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
unregister_pernet_subsys(&hashlimit_net_ops);
- rcu_barrier_bh();
+ rcu_barrier();
kmem_cache_destroy(hashlimit_cachep);
}
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index 9d6d67b953ac..4034d70bff39 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -33,7 +33,7 @@ physdev_mt(const struct sk_buff *skb, struct xt_action_param *par)
/* Not a bridged IP packet or no info available yet:
* LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
* the destination device will be a bridge. */
- if (!skb->nf_bridge) {
+ if (!nf_bridge_info_exists(skb)) {
/* Return MATCH if the invert flags of the used options are on */
if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
!(info->invert & XT_PHYSDEV_OP_BRIDGED))
diff --git a/net/netfilter/xt_policy.c b/net/netfilter/xt_policy.c
index 13f8ccf946d6..aa84e8121c93 100644
--- a/net/netfilter/xt_policy.c
+++ b/net/netfilter/xt_policy.c
@@ -56,7 +56,7 @@ match_policy_in(const struct sk_buff *skb, const struct xt_policy_info *info,
unsigned short family)
{
const struct xt_policy_elem *e;
- const struct sec_path *sp = skb->sp;
+ const struct sec_path *sp = skb_sec_path(skb);
int strict = info->flags & XT_POLICY_MATCH_STRICT;
int i, pos;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6bb9f3cde0b0..3c023d6120f6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1706,7 +1706,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
nlk->flags &= ~NETLINK_F_EXT_ACK;
err = 0;
break;
- case NETLINK_DUMP_STRICT_CHK:
+ case NETLINK_GET_STRICT_CHK:
if (val)
nlk->flags |= NETLINK_F_STRICT_CHK;
else
@@ -1806,7 +1806,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
return -EFAULT;
err = 0;
break;
- case NETLINK_DUMP_STRICT_CHK:
+ case NETLINK_GET_STRICT_CHK:
if (len < sizeof(int))
return -EINVAL;
len = sizeof(int);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 85ae53d8fd09..e47ebbbe71b8 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -301,7 +301,7 @@ static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
key->eth.vlan.tpid = vlan->vlan_tpid;
}
return skb_vlan_push(skb, vlan->vlan_tpid,
- ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
+ ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK);
}
/* 'src' is already properly masked. */
@@ -822,8 +822,10 @@ static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *sk
__skb_dst_copy(skb, data->dst);
*OVS_CB(skb) = data->cb;
skb->inner_protocol = data->inner_protocol;
- skb->vlan_tci = data->vlan_tci;
- skb->vlan_proto = data->vlan_proto;
+ if (data->vlan_tci & VLAN_CFI_MASK)
+ __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci & ~VLAN_CFI_MASK);
+ else
+ __vlan_hwaccel_clear_tag(skb);
/* Reconstruct the MAC header. */
skb_push(skb, data->l2_len);
@@ -867,7 +869,10 @@ static void prepare_frag(struct vport *vport, struct sk_buff *skb,
data->cb = *OVS_CB(skb);
data->inner_protocol = skb->inner_protocol;
data->network_offset = orig_network_offset;
- data->vlan_tci = skb->vlan_tci;
+ if (skb_vlan_tag_present(skb))
+ data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
+ else
+ data->vlan_tci = 0;
data->vlan_proto = skb->vlan_proto;
data->mac_proto = mac_proto;
data->l2_len = hlen;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index a4660c48ff01..cd94f925495a 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1166,7 +1166,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
if (err) {
net_warn_ratelimited("openvswitch: zone: %u "
- "execeeds conntrack limit\n",
+ "exceeds conntrack limit\n",
info->zone.id);
return err;
}
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 35966da84769..57e07768c9d1 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -325,7 +325,7 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh,
return -ENOMEM;
vh = (struct vlan_head *)skb->data;
- key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT);
+ key_vh->tci = vh->tci | htons(VLAN_CFI_MASK);
key_vh->tpid = vh->tpid;
if (unlikely(untag_vlan)) {
@@ -358,7 +358,7 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
int res;
if (skb_vlan_tag_present(skb)) {
- key->eth.vlan.tci = htons(skb->vlan_tci);
+ key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK);
key->eth.vlan.tpid = skb->vlan_proto;
} else {
/* Parse outer vlan tag in the non-accelerated case. */
@@ -597,7 +597,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
* skb_vlan_pop(), which will later shift the ethertype into
* skb->protocol.
*/
- if (key->eth.cvlan.tci & htons(VLAN_TAG_PRESENT))
+ if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK))
skb->protocol = key->eth.cvlan.tpid;
else
skb->protocol = key->eth.type;
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index c670dd24b8b7..ba01fc4270bd 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -60,7 +60,7 @@ struct ovs_tunnel_info {
struct vlan_head {
__be16 tpid; /* Vlan type. Generally 802.1q or 802.1ad.*/
- __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */
+ __be16 tci; /* 0 if no VLAN, VLAN_CFI_MASK set otherwise. */
};
#define OVS_SW_FLOW_KEY_METADATA_SIZE \
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 865ecef68196..435a4bdf8f89 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -990,9 +990,9 @@ static int validate_vlan_from_nlattrs(const struct sw_flow_match *match,
if (a[OVS_KEY_ATTR_VLAN])
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (!(tci & htons(VLAN_CFI_MASK))) {
if (tci) {
- OVS_NLERR(log, "%s TCI does not have VLAN_TAG_PRESENT bit set.",
+ OVS_NLERR(log, "%s TCI does not have VLAN_CFI_MASK bit set.",
(inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
} else if (nla_len(a[OVS_KEY_ATTR_ENCAP])) {
@@ -1013,9 +1013,9 @@ static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
__be16 tci = 0;
__be16 tpid = 0;
bool encap_valid = !!(match->key->eth.vlan.tci &
- htons(VLAN_TAG_PRESENT));
+ htons(VLAN_CFI_MASK));
bool i_encap_valid = !!(match->key->eth.cvlan.tci &
- htons(VLAN_TAG_PRESENT));
+ htons(VLAN_CFI_MASK));
if (!(key_attrs & (1 << OVS_KEY_ATTR_ENCAP))) {
/* Not a VLAN. */
@@ -1039,8 +1039,8 @@ static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match *match,
(inner) ? "C-VLAN" : "VLAN", ntohs(tpid));
return -EINVAL;
}
- if (!(tci & htons(VLAN_TAG_PRESENT))) {
- OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.",
+ if (!(tci & htons(VLAN_CFI_MASK))) {
+ OVS_NLERR(log, "%s TCI mask does not have exact match for VLAN_CFI_MASK bit.",
(inner) ? "C-VLAN" : "VLAN");
return -EINVAL;
}
@@ -1095,7 +1095,7 @@ static int parse_vlan_from_nlattrs(struct sw_flow_match *match,
if (err)
return err;
- encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_TAG_PRESENT));
+ encap_valid = !!(match->key->eth.vlan.tci & htons(VLAN_CFI_MASK));
if (encap_valid) {
err = __parse_vlan_from_nlattrs(match, key_attrs, true, a,
is_mask, log);
@@ -2943,7 +2943,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
vlan = nla_data(a);
if (!eth_type_vlan(vlan->vlan_tpid))
return -EINVAL;
- if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
+ if (!(vlan->vlan_tci & htons(VLAN_CFI_MASK)))
return -EINVAL;
vlan_tci = vlan->vlan_tci;
break;
@@ -2959,7 +2959,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
/* Prohibit push MPLS other than to a white list
* for packets that have a known tag order.
*/
- if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+ if (vlan_tci & htons(VLAN_CFI_MASK) ||
(eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6) &&
eth_type != htons(ETH_P_ARP) &&
@@ -2971,7 +2971,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
}
case OVS_ACTION_ATTR_POP_MPLS:
- if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
+ if (vlan_tci & htons(VLAN_CFI_MASK) ||
!eth_p_mpls(eth_type))
return -EINVAL;
@@ -3036,7 +3036,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
case OVS_ACTION_ATTR_POP_ETH:
if (mac_proto != MAC_PROTO_ETHERNET)
return -EINVAL;
- if (vlan_tci & htons(VLAN_TAG_PRESENT))
+ if (vlan_tci & htons(VLAN_CFI_MASK))
return -EINVAL;
mac_proto = MAC_PROTO_NONE;
break;
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 5aaf3babfc3f..acb6077b7478 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -93,7 +93,7 @@ static struct vport *geneve_tnl_create(const struct vport_parms *parms)
return ERR_CAST(dev);
}
- err = dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 0e72d95b0e8f..c38a62464b85 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -68,7 +68,7 @@ static struct vport *gre_tnl_create(const struct vport_parms *parms)
return ERR_CAST(dev);
}
- err = dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 2e5e7a41d8ef..9bec22e3e9e8 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -84,7 +84,6 @@ static struct net_device *get_dpdev(const struct datapath *dp)
struct vport *local;
local = ovs_vport_ovsl(dp, OVSP_LOCAL);
- BUG_ON(!local);
return local->dev;
}
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 7e6301b2ec4d..8f16f11f7ad3 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -131,7 +131,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
return ERR_CAST(dev);
}
- err = dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index a74650e98f42..eedacdebcd4c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1965,7 +1965,7 @@ retry:
skb->mark = sk->sk_mark;
skb->tstamp = sockc.transmit_time;
- sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
+ skb_setup_tx_timestamp(skb, sockc.tsflags);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
@@ -2460,7 +2460,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->priority = po->sk.sk_priority;
skb->mark = po->sk.sk_mark;
skb->tstamp = sockc->transmit_time;
- sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
+ skb_setup_tx_timestamp(skb, sockc->tsflags);
skb_zcopy_set_nouarg(skb, ph.raw);
skb_reserve(skb, hlen);
@@ -2625,8 +2625,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out;
}
err = -ENXIO;
@@ -2823,8 +2825,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out;
}
err = -ENXIO;
@@ -2898,7 +2902,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
}
- sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
+ skb_setup_tx_timestamp(skb, sockc.tsflags);
if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
!packet_extra_vlan_len_allowed(dev, skb)) {
diff --git a/net/rds/message.c b/net/rds/message.c
index 4b00b1152a5f..f139420ba1f6 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -308,16 +308,27 @@ out:
/*
* RDS ops use this to grab SG entries from the rm's sg pool.
*/
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+ int *ret)
{
struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
struct scatterlist *sg_ret;
- WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
- WARN_ON(!nents);
+ if (WARN_ON(!ret))
+ return NULL;
- if (rm->m_used_sgs + nents > rm->m_total_sgs)
+ if (nents <= 0) {
+ pr_warn("rds: alloc sgs failed! nents <= 0\n");
+ *ret = -EINVAL;
return NULL;
+ }
+
+ if (rm->m_used_sgs + nents > rm->m_total_sgs) {
+ pr_warn("rds: alloc sgs failed! total %d used %d nents %d\n",
+ rm->m_total_sgs, rm->m_used_sgs, nents);
+ *ret = -ENOMEM;
+ return NULL;
+ }
sg_ret = &sg_first[rm->m_used_sgs];
sg_init_table(sg_ret, nents);
@@ -332,6 +343,7 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
unsigned int i;
int num_sgs = ceil(total_len, PAGE_SIZE);
int extra_bytes = num_sgs * sizeof(struct scatterlist);
+ int ret;
rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
if (!rm)
@@ -340,10 +352,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
rm->data.op_nents = ceil(total_len, PAGE_SIZE);
- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
if (!rm->data.op_sg) {
rds_message_put(rm);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
for (i = 0; i < rm->data.op_nents; ++i) {
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 98237feb607a..182ab8430594 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -517,9 +517,10 @@ static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
return tot_pages;
}
-int rds_rdma_extra_size(struct rds_rdma_args *args)
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+ struct rds_iov_vector *iov)
{
- struct rds_iovec vec;
+ struct rds_iovec *vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
@@ -530,13 +531,23 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
if (args->nr_local == 0)
return -EINVAL;
+ iov->iov = kcalloc(args->nr_local,
+ sizeof(struct rds_iovec),
+ GFP_KERNEL);
+ if (!iov->iov)
+ return -ENOMEM;
+
+ vec = &iov->iov[0];
+
+ if (copy_from_user(vec, local_vec, args->nr_local *
+ sizeof(struct rds_iovec)))
+ return -EFAULT;
+ iov->len = args->nr_local;
+
/* figure out the number of pages in the vector */
- for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec)))
- return -EFAULT;
+ for (i = 0; i < args->nr_local; i++, vec++) {
- nr_pages = rds_pages_in_vec(&vec);
+ nr_pages = rds_pages_in_vec(vec);
if (nr_pages == 0)
return -EINVAL;
@@ -558,15 +569,15 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg)
+ struct cmsghdr *cmsg,
+ struct rds_iov_vector *vec)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
- struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
- int iov_size;
+ struct rds_iovec *iovs;
unsigned int i, j;
int ret = 0;
@@ -586,31 +597,23 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
goto out_ret;
}
- /* Check whether to allocate the iovec area */
- iov_size = args->nr_local * sizeof(struct rds_iovec);
- if (args->nr_local > UIO_FASTIOV) {
- iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
- if (!iovs) {
- ret = -ENOMEM;
- goto out_ret;
- }
+ if (vec->len != args->nr_local) {
+ ret = -EINVAL;
+ goto out_ret;
}
- if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
- ret = -EFAULT;
- goto out;
- }
+ iovs = vec->iov;
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
- goto out;
+ goto out_ret;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
- goto out;
+ goto out_ret;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
@@ -620,11 +623,9 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
- op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
- if (!op->op_sg) {
- ret = -ENOMEM;
- goto out;
- }
+ op->op_sg = rds_message_alloc_sgs(rm, nr_pages, &ret);
+ if (!op->op_sg)
+ goto out_pages;
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
@@ -635,7 +636,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
- goto out;
+ goto out_pages;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
@@ -681,7 +682,7 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
- goto out;
+ goto out_pages;
else
ret = 0;
@@ -714,13 +715,11 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
- goto out;
+ goto out_pages;
}
op->op_bytes = nr_bytes;
-out:
- if (iovs != iovstack)
- sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
+out_pages:
kfree(pages);
out_ret:
if (ret)
@@ -838,11 +837,9 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
- rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
- if (!rm->atomic.op_sg) {
- ret = -ENOMEM;
+ rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1, &ret);
+ if (!rm->atomic.op_sg)
goto err;
- }
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 6bfaf05b63b2..02ec4a3b2799 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -386,6 +386,18 @@ static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q)
INIT_LIST_HEAD(&q->zcookie_head);
}
+struct rds_iov_vector {
+ struct rds_iovec *iov;
+ int len;
+};
+
+struct rds_iov_vector_arr {
+ struct rds_iov_vector *vec;
+ int len;
+ int indx;
+ int incr;
+};
+
struct rds_message {
refcount_t m_refcount;
struct list_head m_sock_item;
@@ -827,7 +839,8 @@ rds_conn_connecting(struct rds_connection *conn)
/* message.c */
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents,
+ int *ret);
int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from,
bool zcopy);
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
@@ -904,13 +917,13 @@ int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_rdma_extra_size(struct rds_rdma_args *args);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
+int rds_rdma_extra_size(struct rds_rdma_args *args,
+ struct rds_iov_vector *iov);
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
+ struct cmsghdr *cmsg,
+ struct rds_iov_vector *vec);
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg);
void rds_rdma_free_op(struct rm_rdma_op *ro);
diff --git a/net/rds/send.c b/net/rds/send.c
index fe785ee819dd..3d822bad7de9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -876,13 +876,18 @@ out:
* rds_message is getting to be quite complicated, and we'd like to allocate
* it all in one go. This figures out how big it needs to be up front.
*/
-static int rds_rm_size(struct msghdr *msg, int num_sgs)
+static int rds_rm_size(struct msghdr *msg, int num_sgs,
+ struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
int size = 0;
int cmsg_groups = 0;
int retval;
bool zcopy_cookie = false;
+ struct rds_iov_vector *iov, *tmp_iov;
+
+ if (num_sgs < 0)
+ return -EINVAL;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -893,8 +898,24 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs)
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
+ if (vct->indx >= vct->len) {
+ vct->len += vct->incr;
+ tmp_iov =
+ krealloc(vct->vec,
+ vct->len *
+ sizeof(struct rds_iov_vector),
+ GFP_KERNEL);
+ if (!tmp_iov) {
+ vct->len -= vct->incr;
+ return -ENOMEM;
+ }
+ vct->vec = tmp_iov;
+ }
+ iov = &vct->vec[vct->indx];
+ memset(iov, 0, sizeof(struct rds_iov_vector));
+ vct->indx++;
cmsg_groups |= 1;
- retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+ retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov);
if (retval < 0)
return retval;
size += retval;
@@ -951,10 +972,11 @@ static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm,
}
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
- struct msghdr *msg, int *allocated_mr)
+ struct msghdr *msg, int *allocated_mr,
+ struct rds_iov_vector_arr *vct)
{
struct cmsghdr *cmsg;
- int ret = 0;
+ int ret = 0, ind = 0;
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
@@ -968,7 +990,10 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
- ret = rds_cmsg_rdma_args(rs, rm, cmsg);
+ if (ind >= vct->indx)
+ return -ENOMEM;
+ ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]);
+ ind++;
break;
case RDS_CMSG_RDMA_DEST:
@@ -1084,6 +1109,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY));
int num_sgs = ceil(payload_len, PAGE_SIZE);
int namelen;
+ struct rds_iov_vector_arr vct;
+ int ind;
+
+ memset(&vct, 0, sizeof(vct));
+
+ /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */
+ vct.incr = 1;
/* Mirror Linux UDP mirror of BSD error message compatibility */
/* XXX: Perhaps MSG_MORE someday */
@@ -1220,7 +1252,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX);
}
/* size of rm including all sgs */
- ret = rds_rm_size(msg, num_sgs);
+ ret = rds_rm_size(msg, num_sgs, &vct);
if (ret < 0)
goto out;
@@ -1232,11 +1264,9 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
/* Attach data to the rm */
if (payload_len) {
- rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
- if (!rm->data.op_sg) {
- ret = -ENOMEM;
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs, &ret);
+ if (!rm->data.op_sg)
goto out;
- }
ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy);
if (ret)
goto out;
@@ -1270,7 +1300,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
rm->m_conn_path = cpath;
/* Parse any control messages the user may have included. */
- ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+ ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct);
if (ret) {
/* Trigger connection so that its ready for the next retry */
if (ret == -EAGAIN)
@@ -1348,9 +1378,18 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
if (ret)
goto out;
rds_message_put(rm);
+
+ for (ind = 0; ind < vct.indx; ind++)
+ kfree(vct.vec[ind].iov);
+ kfree(vct.vec);
+
return payload_len;
out:
+ for (ind = 0; ind < vct.indx; ind++)
+ kfree(vct.vec[ind].iov);
+ kfree(vct.vec);
+
/* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
* If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
* or in any other way, we need to destroy the MR again */
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 0f8465852254..41a5cd4b5c0e 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -16,7 +16,6 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9c1b0729aebf..d4b8355737d8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -21,8 +21,6 @@
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/rhashtable.h>
-#include <linux/list.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/sch_generic.h>
@@ -1522,227 +1520,8 @@ out_module_put:
return skb->len;
}
-struct tcf_action_net {
- struct rhashtable egdev_ht;
-};
-
-static unsigned int tcf_action_net_id;
-
-struct tcf_action_egdev_cb {
- struct list_head list;
- tc_setup_cb_t *cb;
- void *cb_priv;
-};
-
-struct tcf_action_egdev {
- struct rhash_head ht_node;
- const struct net_device *dev;
- unsigned int refcnt;
- struct list_head cb_list;
-};
-
-static const struct rhashtable_params tcf_action_egdev_ht_params = {
- .key_offset = offsetof(struct tcf_action_egdev, dev),
- .head_offset = offsetof(struct tcf_action_egdev, ht_node),
- .key_len = sizeof(const struct net_device *),
-};
-
-static struct tcf_action_egdev *
-tcf_action_egdev_lookup(const struct net_device *dev)
-{
- struct net *net = dev_net(dev);
- struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
-
- return rhashtable_lookup_fast(&tan->egdev_ht, &dev,
- tcf_action_egdev_ht_params);
-}
-
-static struct tcf_action_egdev *
-tcf_action_egdev_get(const struct net_device *dev)
-{
- struct tcf_action_egdev *egdev;
- struct tcf_action_net *tan;
-
- egdev = tcf_action_egdev_lookup(dev);
- if (egdev)
- goto inc_ref;
-
- egdev = kzalloc(sizeof(*egdev), GFP_KERNEL);
- if (!egdev)
- return NULL;
- INIT_LIST_HEAD(&egdev->cb_list);
- egdev->dev = dev;
- tan = net_generic(dev_net(dev), tcf_action_net_id);
- rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node,
- tcf_action_egdev_ht_params);
-
-inc_ref:
- egdev->refcnt++;
- return egdev;
-}
-
-static void tcf_action_egdev_put(struct tcf_action_egdev *egdev)
-{
- struct tcf_action_net *tan;
-
- if (--egdev->refcnt)
- return;
- tan = net_generic(dev_net(egdev->dev), tcf_action_net_id);
- rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node,
- tcf_action_egdev_ht_params);
- kfree(egdev);
-}
-
-static struct tcf_action_egdev_cb *
-tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- struct tcf_action_egdev_cb *egdev_cb;
-
- list_for_each_entry(egdev_cb, &egdev->cb_list, list)
- if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv)
- return egdev_cb;
- return NULL;
-}
-
-static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev,
- enum tc_setup_type type,
- void *type_data, bool err_stop)
-{
- struct tcf_action_egdev_cb *egdev_cb;
- int ok_count = 0;
- int err;
-
- list_for_each_entry(egdev_cb, &egdev->cb_list, list) {
- err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv);
- if (err) {
- if (err_stop)
- return err;
- } else {
- ok_count++;
- }
- }
- return ok_count;
-}
-
-static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- struct tcf_action_egdev_cb *egdev_cb;
-
- egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
- if (WARN_ON(egdev_cb))
- return -EEXIST;
- egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL);
- if (!egdev_cb)
- return -ENOMEM;
- egdev_cb->cb = cb;
- egdev_cb->cb_priv = cb_priv;
- list_add(&egdev_cb->list, &egdev->cb_list);
- return 0;
-}
-
-static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- struct tcf_action_egdev_cb *egdev_cb;
-
- egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv);
- if (WARN_ON(!egdev_cb))
- return;
- list_del(&egdev_cb->list);
- kfree(egdev_cb);
-}
-
-static int __tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev);
- int err;
-
- if (!egdev)
- return -ENOMEM;
- err = tcf_action_egdev_cb_add(egdev, cb, cb_priv);
- if (err)
- goto err_cb_add;
- return 0;
-
-err_cb_add:
- tcf_action_egdev_put(egdev);
- return err;
-}
-int tc_setup_cb_egdev_register(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- int err;
-
- rtnl_lock();
- err = __tc_setup_cb_egdev_register(dev, cb, cb_priv);
- rtnl_unlock();
- return err;
-}
-EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register);
-
-static void __tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
-
- if (WARN_ON(!egdev))
- return;
- tcf_action_egdev_cb_del(egdev, cb, cb_priv);
- tcf_action_egdev_put(egdev);
-}
-void tc_setup_cb_egdev_unregister(const struct net_device *dev,
- tc_setup_cb_t *cb, void *cb_priv)
-{
- rtnl_lock();
- __tc_setup_cb_egdev_unregister(dev, cb, cb_priv);
- rtnl_unlock();
-}
-EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister);
-
-int tc_setup_cb_egdev_call(const struct net_device *dev,
- enum tc_setup_type type, void *type_data,
- bool err_stop)
-{
- struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev);
-
- if (!egdev)
- return 0;
- return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop);
-}
-EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call);
-
-static __net_init int tcf_action_net_init(struct net *net)
-{
- struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
-
- return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params);
-}
-
-static void __net_exit tcf_action_net_exit(struct net *net)
-{
- struct tcf_action_net *tan = net_generic(net, tcf_action_net_id);
-
- rhashtable_destroy(&tan->egdev_ht);
-}
-
-static struct pernet_operations tcf_action_net_ops = {
- .init = tcf_action_net_init,
- .exit = tcf_action_net_exit,
- .id = &tcf_action_net_id,
- .size = sizeof(struct tcf_action_net),
-};
-
static int __init tc_action_init(void)
{
- int err;
-
- err = register_pernet_subsys(&tcf_action_net_ops);
- if (err)
- return err;
-
rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 37c9b8f0e10f..ec8ec55e0fe8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -85,7 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
int ovr, int bind, bool rtnl_held,
struct netlink_ext_ack *extack)
{
- int ret = 0, err;
+ int ret = 0, tcfp_result = TC_ACT_OK, err, size;
struct nlattr *tb[TCA_POLICE_MAX + 1];
struct tc_police *parm;
struct tcf_police *police;
@@ -93,7 +93,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, police_net_id);
struct tcf_police_params *new;
bool exists = false;
- int size;
if (nla == NULL)
return -EINVAL;
@@ -160,6 +159,16 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
goto failure;
}
+ if (tb[TCA_POLICE_RESULT]) {
+ tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
+ if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
+ NL_SET_ERR_MSG(extack,
+ "goto chain not allowed on fallback");
+ err = -EINVAL;
+ goto failure;
+ }
+ }
+
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (unlikely(!new)) {
err = -ENOMEM;
@@ -167,6 +176,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
/* No failure allowed after this point */
+ new->tcfp_result = tcfp_result;
new->tcfp_mtu = parm->mtu;
if (!new->tcfp_mtu) {
new->tcfp_mtu = ~0;
@@ -196,16 +206,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (tb[TCA_POLICE_AVRATE])
new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
- if (tb[TCA_POLICE_RESULT]) {
- new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
- if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) {
- NL_SET_ERR_MSG(extack,
- "goto chain not allowed on fallback");
- err = -EINVAL;
- goto failure;
- }
- }
-
spin_lock_bh(&police->tcf_lock);
spin_lock_bh(&police->tcfp_lock);
police->tcfp_t_c = ktime_get_ns();
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 4cca8f274662..c3b90fadaff6 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -210,9 +210,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
struct tcf_tunnel_key *t;
bool exists = false;
__be16 dst_port = 0;
+ __be64 key_id = 0;
int opts_len = 0;
- __be64 key_id;
- __be16 flags;
+ __be16 flags = 0;
u8 tos, ttl;
int ret = 0;
int err;
@@ -246,15 +246,15 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
case TCA_TUNNEL_KEY_ACT_RELEASE:
break;
case TCA_TUNNEL_KEY_ACT_SET:
- if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
- NL_SET_ERR_MSG(extack, "Missing tunnel key id");
- ret = -EINVAL;
- goto err_out;
- }
+ if (tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
+ __be32 key32;
- key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));
+ key32 = nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]);
+ key_id = key32_to_tunnel_id(key32);
+ flags = TUNNEL_KEY;
+ }
- flags = TUNNEL_KEY | TUNNEL_CSUM;
+ flags |= TUNNEL_CSUM;
if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
flags &= ~TUNNEL_CSUM;
@@ -508,10 +508,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
struct ip_tunnel_key *key = &info->key;
__be32 key_id = tunnel_id_to_key32(key->tun_id);
- if (nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id) ||
+ if (((key->tun_flags & TUNNEL_KEY) &&
+ nla_put_be32(skb, TCA_TUNNEL_KEY_ENC_KEY_ID, key_id)) ||
tunnel_key_dump_addresses(skb,
&params->tcft_enc_metadata->u.tun_info) ||
- nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT, key->tp_dst) ||
+ (key->tp_dst &&
+ nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_DST_PORT,
+ key->tp_dst)) ||
nla_put_u8(skb, TCA_TUNNEL_KEY_NO_CSUM,
!(key->tun_flags & TUNNEL_CSUM)) ||
tunnel_key_opts_dump(skb, info))
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index ba677d54a7af..93fdaf707313 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -63,7 +63,7 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
/* extract existing tag (and guarantee no hw-accel tag) */
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
} else {
/* in-payload vlan tag, pop it */
err = __skb_vlan_pop(skb, &tci);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f427a1e00e7e..8ce2a0507970 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -25,6 +25,7 @@
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/rhashtable.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -365,6 +366,245 @@ static void tcf_chain_flush(struct tcf_chain *chain)
}
}
+static struct tcf_block *tc_dev_ingress_block(struct net_device *dev)
+{
+ const struct Qdisc_class_ops *cops;
+ struct Qdisc *qdisc;
+
+ if (!dev_ingress_queue(dev))
+ return NULL;
+
+ qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
+ if (!qdisc)
+ return NULL;
+
+ cops = qdisc->ops->cl_ops;
+ if (!cops)
+ return NULL;
+
+ if (!cops->tcf_block)
+ return NULL;
+
+ return cops->tcf_block(qdisc, TC_H_MIN_INGRESS, NULL);
+}
+
+static struct rhashtable indr_setup_block_ht;
+
+struct tc_indr_block_dev {
+ struct rhash_head ht_node;
+ struct net_device *dev;
+ unsigned int refcnt;
+ struct list_head cb_list;
+ struct tcf_block *block;
+};
+
+struct tc_indr_block_cb {
+ struct list_head list;
+ void *cb_priv;
+ tc_indr_block_bind_cb_t *cb;
+ void *cb_ident;
+};
+
+static const struct rhashtable_params tc_indr_setup_block_ht_params = {
+ .key_offset = offsetof(struct tc_indr_block_dev, dev),
+ .head_offset = offsetof(struct tc_indr_block_dev, ht_node),
+ .key_len = sizeof(struct net_device *),
+};
+
+static struct tc_indr_block_dev *
+tc_indr_block_dev_lookup(struct net_device *dev)
+{
+ return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
+ tc_indr_setup_block_ht_params);
+}
+
+static struct tc_indr_block_dev *tc_indr_block_dev_get(struct net_device *dev)
+{
+ struct tc_indr_block_dev *indr_dev;
+
+ indr_dev = tc_indr_block_dev_lookup(dev);
+ if (indr_dev)
+ goto inc_ref;
+
+ indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
+ if (!indr_dev)
+ return NULL;
+
+ INIT_LIST_HEAD(&indr_dev->cb_list);
+ indr_dev->dev = dev;
+ indr_dev->block = tc_dev_ingress_block(dev);
+ if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
+ tc_indr_setup_block_ht_params)) {
+ kfree(indr_dev);
+ return NULL;
+ }
+
+inc_ref:
+ indr_dev->refcnt++;
+ return indr_dev;
+}
+
+static void tc_indr_block_dev_put(struct tc_indr_block_dev *indr_dev)
+{
+ if (--indr_dev->refcnt)
+ return;
+
+ rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
+ tc_indr_setup_block_ht_params);
+ kfree(indr_dev);
+}
+
+static struct tc_indr_block_cb *
+tc_indr_block_cb_lookup(struct tc_indr_block_dev *indr_dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ struct tc_indr_block_cb *indr_block_cb;
+
+ list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
+ if (indr_block_cb->cb == cb &&
+ indr_block_cb->cb_ident == cb_ident)
+ return indr_block_cb;
+ return NULL;
+}
+
+static struct tc_indr_block_cb *
+tc_indr_block_cb_add(struct tc_indr_block_dev *indr_dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ struct tc_indr_block_cb *indr_block_cb;
+
+ indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
+ if (indr_block_cb)
+ return ERR_PTR(-EEXIST);
+
+ indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
+ if (!indr_block_cb)
+ return ERR_PTR(-ENOMEM);
+
+ indr_block_cb->cb_priv = cb_priv;
+ indr_block_cb->cb = cb;
+ indr_block_cb->cb_ident = cb_ident;
+ list_add(&indr_block_cb->list, &indr_dev->cb_list);
+
+ return indr_block_cb;
+}
+
+static void tc_indr_block_cb_del(struct tc_indr_block_cb *indr_block_cb)
+{
+ list_del(&indr_block_cb->list);
+ kfree(indr_block_cb);
+}
+
+static void tc_indr_block_ing_cmd(struct tc_indr_block_dev *indr_dev,
+ struct tc_indr_block_cb *indr_block_cb,
+ enum tc_block_command command)
+{
+ struct tc_block_offload bo = {
+ .command = command,
+ .binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
+ .block = indr_dev->block,
+ };
+
+ if (!indr_dev->block)
+ return;
+
+ indr_block_cb->cb(indr_dev->dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
+ &bo);
+}
+
+int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ struct tc_indr_block_cb *indr_block_cb;
+ struct tc_indr_block_dev *indr_dev;
+ int err;
+
+ indr_dev = tc_indr_block_dev_get(dev);
+ if (!indr_dev)
+ return -ENOMEM;
+
+ indr_block_cb = tc_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
+ err = PTR_ERR_OR_ZERO(indr_block_cb);
+ if (err)
+ goto err_dev_put;
+
+ tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_BIND);
+ return 0;
+
+err_dev_put:
+ tc_indr_block_dev_put(indr_dev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(__tc_indr_block_cb_register);
+
+int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ int err;
+
+ rtnl_lock();
+ err = __tc_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
+ rtnl_unlock();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(tc_indr_block_cb_register);
+
+void __tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ struct tc_indr_block_cb *indr_block_cb;
+ struct tc_indr_block_dev *indr_dev;
+
+ indr_dev = tc_indr_block_dev_lookup(dev);
+ if (!indr_dev)
+ return;
+
+ indr_block_cb = tc_indr_block_cb_lookup(indr_dev, cb, cb_ident);
+ if (!indr_block_cb)
+ return;
+
+ /* Send unbind message if required to free any block cbs. */
+ tc_indr_block_ing_cmd(indr_dev, indr_block_cb, TC_BLOCK_UNBIND);
+ tc_indr_block_cb_del(indr_block_cb);
+ tc_indr_block_dev_put(indr_dev);
+}
+EXPORT_SYMBOL_GPL(__tc_indr_block_cb_unregister);
+
+void tc_indr_block_cb_unregister(struct net_device *dev,
+ tc_indr_block_bind_cb_t *cb, void *cb_ident)
+{
+ rtnl_lock();
+ __tc_indr_block_cb_unregister(dev, cb, cb_ident);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(tc_indr_block_cb_unregister);
+
+static void tc_indr_block_call(struct tcf_block *block, struct net_device *dev,
+ struct tcf_block_ext_info *ei,
+ enum tc_block_command command,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_indr_block_cb *indr_block_cb;
+ struct tc_indr_block_dev *indr_dev;
+ struct tc_block_offload bo = {
+ .command = command,
+ .binder_type = ei->binder_type,
+ .block = block,
+ .extack = extack,
+ };
+
+ indr_dev = tc_indr_block_dev_lookup(dev);
+ if (!indr_dev)
+ return;
+
+ indr_dev->block = command == TC_BLOCK_BIND ? block : NULL;
+
+ list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
+ indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
+ &bo);
+}
+
static bool tcf_block_offload_in_use(struct tcf_block *block)
{
return block->offloadcnt;
@@ -406,12 +646,17 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND, extack);
if (err == -EOPNOTSUPP)
goto no_offload_dev_inc;
- return err;
+ if (err)
+ return err;
+
+ tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
+ return 0;
no_offload_dev_inc:
if (tcf_block_offload_in_use(block))
return -EOPNOTSUPP;
block->nooffloaddevcnt++;
+ tc_indr_block_call(block, dev, ei, TC_BLOCK_BIND, extack);
return 0;
}
@@ -421,6 +666,8 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
struct net_device *dev = q->dev_queue->dev;
int err;
+ tc_indr_block_call(block, dev, ei, TC_BLOCK_UNBIND, NULL);
+
if (!dev->netdev_ops->ndo_setup_tc)
goto no_offload_dev_dec;
err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND, NULL);
@@ -1023,29 +1270,6 @@ void tcf_block_cb_unregister(struct tcf_block *block,
}
EXPORT_SYMBOL(tcf_block_cb_unregister);
-static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
- void *type_data, bool err_stop)
-{
- struct tcf_block_cb *block_cb;
- int ok_count = 0;
- int err;
-
- /* Make sure all netdevs sharing this block are offload-capable. */
- if (block->nooffloaddevcnt && err_stop)
- return -EOPNOTSUPP;
-
- list_for_each_entry(block_cb, &block->cb_list, list) {
- err = block_cb->cb(type, type_data, block_cb->cb_priv);
- if (err) {
- if (err_stop)
- return err;
- } else {
- ok_count++;
- }
- }
- return ok_count;
-}
-
/* Main classifier routine: scans classifier chain attached
* to this qdisc, (optionally) tests for protocol and asks
* specific classifiers.
@@ -2268,54 +2492,26 @@ int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
}
EXPORT_SYMBOL(tcf_exts_dump_stats);
-static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
- enum tc_setup_type type,
- void *type_data, bool err_stop)
+int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
+ void *type_data, bool err_stop)
{
+ struct tcf_block_cb *block_cb;
int ok_count = 0;
-#ifdef CONFIG_NET_CLS_ACT
- const struct tc_action *a;
- struct net_device *dev;
- int i, ret;
+ int err;
- if (!tcf_exts_has_actions(exts))
- return 0;
+ /* Make sure all netdevs sharing this block are offload-capable. */
+ if (block->nooffloaddevcnt && err_stop)
+ return -EOPNOTSUPP;
- for (i = 0; i < exts->nr_actions; i++) {
- a = exts->actions[i];
- if (!a->ops->get_dev)
- continue;
- dev = a->ops->get_dev(a);
- if (!dev)
- continue;
- ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
- a->ops->put_dev(dev);
- if (ret < 0)
- return ret;
- ok_count += ret;
+ list_for_each_entry(block_cb, &block->cb_list, list) {
+ err = block_cb->cb(type, type_data, block_cb->cb_priv);
+ if (err) {
+ if (err_stop)
+ return err;
+ } else {
+ ok_count++;
+ }
}
-#endif
- return ok_count;
-}
-
-int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
- enum tc_setup_type type, void *type_data, bool err_stop)
-{
- int ok_count;
- int ret;
-
- ret = tcf_block_cb_call(block, type, type_data, err_stop);
- if (ret < 0)
- return ret;
- ok_count = ret;
-
- if (!exts || ok_count)
- return ok_count;
- ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
- if (ret < 0)
- return ret;
- ok_count += ret;
-
return ok_count;
}
EXPORT_SYMBOL(tc_setup_cb_call);
@@ -2355,6 +2551,11 @@ static int __init tc_filter_init(void)
if (err)
goto err_register_pernet_subsys;
+ err = rhashtable_init(&indr_setup_block_ht,
+ &tc_indr_setup_block_ht_params);
+ if (err)
+ goto err_rhash_setup_block_ht;
+
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
@@ -2366,6 +2567,8 @@ static int __init tc_filter_init(void)
return 0;
+err_rhash_setup_block_ht:
+ unregister_pernet_subsys(&tcf_net_ops);
err_register_pernet_subsys:
destroy_workqueue(tc_filter_wq);
return err;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index fa6fe2fe0f32..a95cb240a606 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -169,7 +169,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
if (oldprog)
tcf_block_offload_dec(block, &oldprog->gen_flags);
- err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
+ err = tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
if (prog) {
if (err < 0) {
cls_bpf_offload_cmd(tp, oldprog, prog, extack);
@@ -234,7 +234,7 @@ static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
cls_bpf.name = prog->bpf_name;
cls_bpf.exts_integrated = prog->exts_integrated;
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false);
}
static int cls_bpf_init(struct tcf_proto *tp)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index c6c327874abc..dad04e710493 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -55,6 +55,8 @@ struct fl_flow_key {
struct flow_dissector_key_ip ip;
struct flow_dissector_key_ip enc_ip;
struct flow_dissector_key_enc_opts enc_opts;
+ struct flow_dissector_key_ports tp_min;
+ struct flow_dissector_key_ports tp_max;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct fl_flow_mask_range {
@@ -65,6 +67,7 @@ struct fl_flow_mask_range {
struct fl_flow_mask {
struct fl_flow_key key;
struct fl_flow_mask_range range;
+ u32 flags;
struct rhash_head ht_node;
struct rhashtable ht;
struct rhashtable_params filter_ht_params;
@@ -179,13 +182,89 @@ static void fl_clear_masked_range(struct fl_flow_key *key,
memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
}
-static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
- struct fl_flow_key *mkey)
+static bool fl_range_port_dst_cmp(struct cls_fl_filter *filter,
+ struct fl_flow_key *key,
+ struct fl_flow_key *mkey)
+{
+ __be16 min_mask, max_mask, min_val, max_val;
+
+ min_mask = htons(filter->mask->key.tp_min.dst);
+ max_mask = htons(filter->mask->key.tp_max.dst);
+ min_val = htons(filter->key.tp_min.dst);
+ max_val = htons(filter->key.tp_max.dst);
+
+ if (min_mask && max_mask) {
+ if (htons(key->tp.dst) < min_val ||
+ htons(key->tp.dst) > max_val)
+ return false;
+
+ /* skb does not have min and max values */
+ mkey->tp_min.dst = filter->mkey.tp_min.dst;
+ mkey->tp_max.dst = filter->mkey.tp_max.dst;
+ }
+ return true;
+}
+
+static bool fl_range_port_src_cmp(struct cls_fl_filter *filter,
+ struct fl_flow_key *key,
+ struct fl_flow_key *mkey)
+{
+ __be16 min_mask, max_mask, min_val, max_val;
+
+ min_mask = htons(filter->mask->key.tp_min.src);
+ max_mask = htons(filter->mask->key.tp_max.src);
+ min_val = htons(filter->key.tp_min.src);
+ max_val = htons(filter->key.tp_max.src);
+
+ if (min_mask && max_mask) {
+ if (htons(key->tp.src) < min_val ||
+ htons(key->tp.src) > max_val)
+ return false;
+
+ /* skb does not have min and max values */
+ mkey->tp_min.src = filter->mkey.tp_min.src;
+ mkey->tp_max.src = filter->mkey.tp_max.src;
+ }
+ return true;
+}
+
+static struct cls_fl_filter *__fl_lookup(struct fl_flow_mask *mask,
+ struct fl_flow_key *mkey)
{
return rhashtable_lookup_fast(&mask->ht, fl_key_get_start(mkey, mask),
mask->filter_ht_params);
}
+static struct cls_fl_filter *fl_lookup_range(struct fl_flow_mask *mask,
+ struct fl_flow_key *mkey,
+ struct fl_flow_key *key)
+{
+ struct cls_fl_filter *filter, *f;
+
+ list_for_each_entry_rcu(filter, &mask->filters, list) {
+ if (!fl_range_port_dst_cmp(filter, key, mkey))
+ continue;
+
+ if (!fl_range_port_src_cmp(filter, key, mkey))
+ continue;
+
+ f = __fl_lookup(mask, mkey);
+ if (f)
+ return f;
+ }
+ return NULL;
+}
+
+static struct cls_fl_filter *fl_lookup(struct fl_flow_mask *mask,
+ struct fl_flow_key *mkey,
+ struct fl_flow_key *key)
+{
+ if ((mask->flags & TCA_FLOWER_MASK_FLAGS_RANGE))
+ return fl_lookup_range(mask, mkey, key);
+
+ return __fl_lookup(mask, mkey);
+}
+
static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
@@ -208,7 +287,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
fl_set_masked_key(&skb_mkey, &skb_key, mask);
- f = fl_lookup(mask, &skb_mkey);
+ f = fl_lookup(mask, &skb_mkey, &skb_key);
if (f && !tc_skip_sw(f->flags)) {
*res = f->res;
return tcf_exts_exec(skb, &f->exts, res);
@@ -289,8 +368,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f;
- tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
- &cls_flower, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
tcf_block_offload_dec(block, &f->flags);
}
@@ -312,8 +390,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
- err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
- &cls_flower, skip_sw);
+ err = tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw);
if (err < 0) {
fl_hw_destroy_filter(tp, f, NULL);
return err;
@@ -339,8 +416,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
cls_flower.exts = &f->exts;
cls_flower.classid = f->res.classid;
- tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER,
- &cls_flower, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
}
static bool __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
@@ -514,6 +590,31 @@ static void fl_set_key_val(struct nlattr **tb,
memcpy(mask, nla_data(tb[mask_type]), len);
}
+static int fl_set_key_port_range(struct nlattr **tb, struct fl_flow_key *key,
+ struct fl_flow_key *mask)
+{
+ fl_set_key_val(tb, &key->tp_min.dst,
+ TCA_FLOWER_KEY_PORT_DST_MIN, &mask->tp_min.dst,
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_min.dst));
+ fl_set_key_val(tb, &key->tp_max.dst,
+ TCA_FLOWER_KEY_PORT_DST_MAX, &mask->tp_max.dst,
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_max.dst));
+ fl_set_key_val(tb, &key->tp_min.src,
+ TCA_FLOWER_KEY_PORT_SRC_MIN, &mask->tp_min.src,
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_min.src));
+ fl_set_key_val(tb, &key->tp_max.src,
+ TCA_FLOWER_KEY_PORT_SRC_MAX, &mask->tp_max.src,
+ TCA_FLOWER_UNSPEC, sizeof(key->tp_max.src));
+
+ if ((mask->tp_min.dst && mask->tp_max.dst &&
+ htons(key->tp_max.dst) <= htons(key->tp_min.dst)) ||
+ (mask->tp_min.src && mask->tp_max.src &&
+ htons(key->tp_max.src) <= htons(key->tp_min.src)))
+ return -EINVAL;
+
+ return 0;
+}
+
static int fl_set_key_mpls(struct nlattr **tb,
struct flow_dissector_key_mpls *key_val,
struct flow_dissector_key_mpls *key_mask)
@@ -921,6 +1022,14 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
sizeof(key->arp.tha));
}
+ if (key->basic.ip_proto == IPPROTO_TCP ||
+ key->basic.ip_proto == IPPROTO_UDP ||
+ key->basic.ip_proto == IPPROTO_SCTP) {
+ ret = fl_set_key_port_range(tb, key, mask);
+ if (ret)
+ return ret;
+ }
+
if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
tb[TCA_FLOWER_KEY_ENC_IPV4_DST]) {
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
@@ -1038,8 +1147,9 @@ static void fl_init_dissector(struct flow_dissector *dissector,
FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
- FL_KEY_SET_IF_MASKED(mask, keys, cnt,
- FLOW_DISSECTOR_KEY_PORTS, tp);
+ if (FL_KEY_IS_MASKED(mask, tp) ||
+ FL_KEY_IS_MASKED(mask, tp_min) || FL_KEY_IS_MASKED(mask, tp_max))
+ FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_PORTS, tp);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
FLOW_DISSECTOR_KEY_IP, ip);
FL_KEY_SET_IF_MASKED(mask, keys, cnt,
@@ -1086,6 +1196,10 @@ static struct fl_flow_mask *fl_create_new_mask(struct cls_fl_head *head,
fl_mask_copy(newmask, mask);
+ if ((newmask->key.tp_min.dst && newmask->key.tp_max.dst) ||
+ (newmask->key.tp_min.src && newmask->key.tp_max.src))
+ newmask->flags |= TCA_FLOWER_MASK_FLAGS_RANGE;
+
err = fl_init_mask_hashtable(newmask);
if (err)
goto errout_free;
@@ -1238,18 +1352,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (err)
goto errout_idr;
- if (!tc_skip_sw(fnew->flags)) {
- if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
- err = -EEXIST;
- goto errout_mask;
- }
-
- err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
- fnew->mask->filter_ht_params);
- if (err)
- goto errout_mask;
+ if (!fold && __fl_lookup(fnew->mask, &fnew->mkey)) {
+ err = -EEXIST;
+ goto errout_mask;
}
+ err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+ fnew->mask->filter_ht_params);
+ if (err)
+ goto errout_mask;
+
if (!tc_skip_hw(fnew->flags)) {
err = fl_hw_replace_filter(tp, fnew, extack);
if (err)
@@ -1260,10 +1372,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
if (fold) {
- if (!tc_skip_sw(fold->flags))
- rhashtable_remove_fast(&fold->mask->ht,
- &fold->ht_node,
- fold->mask->filter_ht_params);
+ rhashtable_remove_fast(&fold->mask->ht,
+ &fold->ht_node,
+ fold->mask->filter_ht_params);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, NULL);
}
@@ -1303,9 +1414,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *f = arg;
- if (!tc_skip_sw(f->flags))
- rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
- f->mask->filter_ht_params);
+ rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
+ f->mask->filter_ht_params);
__fl_delete(tp, f, extack);
*last = list_empty(&head->masks);
return 0;
@@ -1388,8 +1498,7 @@ static void fl_hw_create_tmplt(struct tcf_chain *chain,
/* We don't care if driver (any of them) fails to handle this
* call. It serves just as a hint for it.
*/
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
- &cls_flower, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
}
static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
@@ -1402,8 +1511,7 @@ static void fl_hw_destroy_tmplt(struct tcf_chain *chain,
cls_flower.command = TC_CLSFLOWER_TMPLT_DESTROY;
cls_flower.cookie = (unsigned long) tmplt;
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSFLOWER,
- &cls_flower, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
}
static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain,
@@ -1476,6 +1584,26 @@ static int fl_dump_key_val(struct sk_buff *skb,
return 0;
}
+static int fl_dump_key_port_range(struct sk_buff *skb, struct fl_flow_key *key,
+ struct fl_flow_key *mask)
+{
+ if (fl_dump_key_val(skb, &key->tp_min.dst, TCA_FLOWER_KEY_PORT_DST_MIN,
+ &mask->tp_min.dst, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp_min.dst)) ||
+ fl_dump_key_val(skb, &key->tp_max.dst, TCA_FLOWER_KEY_PORT_DST_MAX,
+ &mask->tp_max.dst, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp_max.dst)) ||
+ fl_dump_key_val(skb, &key->tp_min.src, TCA_FLOWER_KEY_PORT_SRC_MIN,
+ &mask->tp_min.src, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp_min.src)) ||
+ fl_dump_key_val(skb, &key->tp_max.src, TCA_FLOWER_KEY_PORT_SRC_MAX,
+ &mask->tp_max.src, TCA_FLOWER_UNSPEC,
+ sizeof(key->tp_max.src)))
+ return -1;
+
+ return 0;
+}
+
static int fl_dump_key_mpls(struct sk_buff *skb,
struct flow_dissector_key_mpls *mpls_key,
struct flow_dissector_key_mpls *mpls_mask)
@@ -1812,6 +1940,12 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
sizeof(key->arp.tha))))
goto nla_put_failure;
+ if ((key->basic.ip_proto == IPPROTO_TCP ||
+ key->basic.ip_proto == IPPROTO_UDP ||
+ key->basic.ip_proto == IPPROTO_SCTP) &&
+ fl_dump_key_port_range(skb, key, mask))
+ goto nla_put_failure;
+
if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
(fl_dump_key_val(skb, &key->enc_ipv4.src,
TCA_FLOWER_KEY_ENC_IPV4_SRC, &mask->enc_ipv4.src,
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 856fa79d4ffd..0e408ee9dcec 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -71,7 +71,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp,
cls_mall.command = TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = cookie;
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
tcf_block_offload_dec(block, &head->flags);
}
@@ -90,8 +90,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
cls_mall.exts = &head->exts;
cls_mall.cookie = cookie;
- err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL,
- &cls_mall, skip_sw);
+ err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw);
if (err < 0) {
mall_destroy_hw_filter(tp, head, cookie, NULL);
return err;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4b28fd44576d..dcea21004604 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -491,7 +491,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
cls_u32.hnode.handle = h->handle;
cls_u32.hnode.prio = h->prio;
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false);
}
static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
@@ -509,7 +509,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
cls_u32.hnode.handle = h->handle;
cls_u32.hnode.prio = h->prio;
- err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
+ err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) {
u32_clear_hw_hnode(tp, h, NULL);
return err;
@@ -533,7 +533,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
cls_u32.command = TC_CLSU32_DELETE_KNODE;
cls_u32.knode.handle = n->handle;
- tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false);
+ tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false);
tcf_block_offload_dec(block, &n->flags);
}
@@ -558,11 +558,12 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
+ cls_u32.knode.res = &n->res;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;
- err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
+ err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw);
if (err < 0) {
u32_remove_hw_knode(tp, n, NULL);
return err;
@@ -1206,6 +1207,7 @@ static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
cls_u32.knode.mask = 0;
#endif
cls_u32.knode.sel = &n->sel;
+ cls_u32.knode.res = &n->res;
cls_u32.knode.exts = &n->exts;
if (n->ht_down)
cls_u32.knode.link_handle = ht->handle;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ca3b0f46de53..7e4d1ccf4c87 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -335,7 +335,6 @@ out:
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
{
unsigned long cl;
- struct Qdisc *leaf;
const struct Qdisc_class_ops *cops = p->ops->cl_ops;
if (cops == NULL)
@@ -344,8 +343,7 @@ static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
if (cl == 0)
return NULL;
- leaf = cops->leaf(p, cl);
- return leaf;
+ return cops->leaf(p, cl);
}
/* Find queueing discipline by name */
@@ -540,7 +538,7 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
if (--tab->refcnt == 0) {
list_del(&tab->list);
- call_rcu_bh(&tab->rcu, stab_kfree_rcu);
+ call_rcu(&tab->rcu, stab_kfree_rcu);
}
}
EXPORT_SYMBOL(qdisc_put_stab);
@@ -810,6 +808,71 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
}
EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
+int qdisc_offload_dump_helper(struct Qdisc *sch, enum tc_setup_type type,
+ void *type_data)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ int err;
+
+ sch->flags &= ~TCQ_F_OFFLOADED;
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return 0;
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
+ if (err == -EOPNOTSUPP)
+ return 0;
+
+ if (!err)
+ sch->flags |= TCQ_F_OFFLOADED;
+
+ return err;
+}
+EXPORT_SYMBOL(qdisc_offload_dump_helper);
+
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+ bool any_qdisc_is_offloaded;
+ int err;
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
+ err = dev->netdev_ops->ndo_setup_tc(dev, type, type_data);
+
+ /* Don't report error if the graft is part of destroy operation. */
+ if (!err || !new || new == &noop_qdisc)
+ return;
+
+ /* Don't report error if the parent, the old child and the new
+ * one are not offloaded.
+ */
+ any_qdisc_is_offloaded = new->flags & TCQ_F_OFFLOADED;
+ any_qdisc_is_offloaded |= sch && sch->flags & TCQ_F_OFFLOADED;
+ any_qdisc_is_offloaded |= old && old->flags & TCQ_F_OFFLOADED;
+
+ if (any_qdisc_is_offloaded)
+ NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
+}
+EXPORT_SYMBOL(qdisc_offload_graft_helper);
+
+static void qdisc_offload_graft_root(struct net_device *dev,
+ struct Qdisc *new, struct Qdisc *old,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_root_qopt_offload graft_offload = {
+ .command = TC_ROOT_GRAFT,
+ .handle = new ? new->handle : 0,
+ .ingress = (new && new->flags & TCQ_F_INGRESS) ||
+ (old && old->flags & TCQ_F_INGRESS),
+ };
+
+ qdisc_offload_graft_helper(dev, NULL, new, old,
+ TC_SETUP_ROOT_QDISC, &graft_offload, extack);
+}
+
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event)
{
@@ -957,7 +1020,6 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
{
struct Qdisc *q = old;
struct net *net = dev_net(dev);
- int err = 0;
if (parent == NULL) {
unsigned int i, num_q, ingress;
@@ -977,6 +1039,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
+ qdisc_offload_graft_root(dev, new, old, extack);
+
if (new && new->ops->attach)
goto skip;
@@ -1012,28 +1076,29 @@ skip:
dev_activate(dev);
} else {
const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
+ unsigned long cl;
+ int err;
/* Only support running class lockless if parent is lockless */
if (new && (new->flags & TCQ_F_NOLOCK) &&
parent && !(parent->flags & TCQ_F_NOLOCK))
new->flags &= ~TCQ_F_NOLOCK;
- err = -EOPNOTSUPP;
- if (cops && cops->graft) {
- unsigned long cl = cops->find(parent, classid);
+ if (!cops || !cops->graft)
+ return -EOPNOTSUPP;
- if (cl) {
- err = cops->graft(parent, cl, new, &old,
- extack);
- } else {
- NL_SET_ERR_MSG(extack, "Specified class not found");
- err = -ENOENT;
- }
+ cl = cops->find(parent, classid);
+ if (!cl) {
+ NL_SET_ERR_MSG(extack, "Specified class not found");
+ return -ENOENT;
}
- if (!err)
- notify_and_destroy(net, skb, n, classid, old, new);
+
+ err = cops->graft(parent, cl, new, &old, extack);
+ if (err)
+ return err;
+ notify_and_destroy(net, skb, n, classid, old, new);
}
- return err;
+ return 0;
}
static int qdisc_block_indexes_set(struct Qdisc *sch, struct nlattr **tca,
diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
index 1538d6fa8165..1150f22983df 100644
--- a/net/sched/sch_etf.c
+++ b/net/sched/sch_etf.c
@@ -30,7 +30,7 @@ struct etf_sched_data {
int queue;
s32 delta; /* in ns */
ktime_t last; /* The txtime of the last skb sent to the netdevice. */
- struct rb_root head;
+ struct rb_root_cached head;
struct qdisc_watchdog watchdog;
ktime_t (*get_time)(void);
};
@@ -104,7 +104,7 @@ static struct sk_buff *etf_peek_timesortedlist(struct Qdisc *sch)
struct etf_sched_data *q = qdisc_priv(sch);
struct rb_node *p;
- p = rb_first(&q->head);
+ p = rb_first_cached(&q->head);
if (!p)
return NULL;
@@ -117,8 +117,10 @@ static void reset_watchdog(struct Qdisc *sch)
struct sk_buff *skb = etf_peek_timesortedlist(sch);
ktime_t next;
- if (!skb)
+ if (!skb) {
+ qdisc_watchdog_cancel(&q->watchdog);
return;
+ }
next = ktime_sub_ns(skb->tstamp, q->delta);
qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next));
@@ -154,8 +156,9 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
struct sk_buff **to_free)
{
struct etf_sched_data *q = qdisc_priv(sch);
- struct rb_node **p = &q->head.rb_node, *parent = NULL;
+ struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
ktime_t txtime = nskb->tstamp;
+ bool leftmost = true;
if (!is_packet_valid(sch, nskb)) {
report_sock_error(nskb, EINVAL,
@@ -168,13 +171,15 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
parent = *p;
skb = rb_to_skb(parent);
- if (ktime_after(txtime, skb->tstamp))
+ if (ktime_after(txtime, skb->tstamp)) {
p = &parent->rb_right;
- else
+ leftmost = false;
+ } else {
p = &parent->rb_left;
+ }
}
rb_link_node(&nskb->rbnode, parent, p);
- rb_insert_color(&nskb->rbnode, &q->head);
+ rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost);
qdisc_qstats_backlog_inc(sch, nskb);
sch->q.qlen++;
@@ -185,12 +190,42 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
return NET_XMIT_SUCCESS;
}
-static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
- bool drop)
+static void timesortedlist_drop(struct Qdisc *sch, struct sk_buff *skb,
+ ktime_t now)
+{
+ struct etf_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *to_free = NULL;
+ struct sk_buff *tmp = NULL;
+
+ skb_rbtree_walk_from_safe(skb, tmp) {
+ if (ktime_after(skb->tstamp, now))
+ break;
+
+ rb_erase_cached(&skb->rbnode, &q->head);
+
+ /* The rbnode field in the skb re-uses these fields, now that
+ * we are done with the rbnode, reset them.
+ */
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->dev = qdisc_dev(sch);
+
+ report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
+
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_drop(skb, sch, &to_free);
+ qdisc_qstats_overlimit(sch);
+ sch->q.qlen--;
+ }
+
+ kfree_skb_list(to_free);
+}
+
+static void timesortedlist_remove(struct Qdisc *sch, struct sk_buff *skb)
{
struct etf_sched_data *q = qdisc_priv(sch);
- rb_erase(&skb->rbnode, &q->head);
+ rb_erase_cached(&skb->rbnode, &q->head);
/* The rbnode field in the skb re-uses these fields, now that
* we are done with the rbnode, reset them.
@@ -201,19 +236,9 @@ static void timesortedlist_erase(struct Qdisc *sch, struct sk_buff *skb,
qdisc_qstats_backlog_dec(sch, skb);
- if (drop) {
- struct sk_buff *to_free = NULL;
+ qdisc_bstats_update(sch, skb);
- report_sock_error(skb, ECANCELED, SO_EE_CODE_TXTIME_MISSED);
-
- qdisc_drop(skb, sch, &to_free);
- kfree_skb_list(to_free);
- qdisc_qstats_overlimit(sch);
- } else {
- qdisc_bstats_update(sch, skb);
-
- q->last = skb->tstamp;
- }
+ q->last = skb->tstamp;
sch->q.qlen--;
}
@@ -232,7 +257,7 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
/* Drop if packet has expired while in queue. */
if (ktime_before(skb->tstamp, now)) {
- timesortedlist_erase(sch, skb, true);
+ timesortedlist_drop(sch, skb, now);
skb = NULL;
goto out;
}
@@ -241,7 +266,7 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
* txtime from deadline to (now + delta).
*/
if (q->deadline_mode) {
- timesortedlist_erase(sch, skb, false);
+ timesortedlist_remove(sch, skb);
skb->tstamp = now;
goto out;
}
@@ -250,7 +275,7 @@ static struct sk_buff *etf_dequeue_timesortedlist(struct Qdisc *sch)
/* Dequeue only if now is within the [txtime - delta, txtime] range. */
if (ktime_after(now, next))
- timesortedlist_erase(sch, skb, false);
+ timesortedlist_remove(sch, skb);
else
skb = NULL;
@@ -386,14 +411,14 @@ static int etf_init(struct Qdisc *sch, struct nlattr *opt,
static void timesortedlist_clear(struct Qdisc *sch)
{
struct etf_sched_data *q = qdisc_priv(sch);
- struct rb_node *p = rb_first(&q->head);
+ struct rb_node *p = rb_first_cached(&q->head);
while (p) {
struct sk_buff *skb = rb_to_skb(p);
p = rb_next(p);
- rb_erase(&skb->rbnode, &q->head);
+ rb_erase_cached(&skb->rbnode, &q->head);
rtnl_kfree_skbs(skb, skb);
sch->q.qlen--;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 25a7cf6d380f..1a662f2bb7bb 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -94,6 +94,7 @@ struct fq_sched_data {
u32 flow_refill_delay;
u32 flow_plimit; /* max packets per flow */
unsigned long flow_max_rate; /* optional max rate per flow */
+ u64 ce_threshold;
u32 orphan_mask; /* mask for orphaned skb */
u32 low_rate_threshold;
struct rb_root *fq_root;
@@ -107,6 +108,7 @@ struct fq_sched_data {
u64 stat_gc_flows;
u64 stat_internal_packets;
u64 stat_throttled;
+ u64 stat_ce_mark;
u64 stat_flows_plimit;
u64 stat_pkts_too_long;
u64 stat_allocation_errors;
@@ -412,16 +414,21 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_get_ns();
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
unsigned long rate;
u32 plen;
+ u64 now;
+
+ if (!sch->q.qlen)
+ return NULL;
skb = fq_dequeue_head(sch, &q->internal);
if (skb)
goto out;
+
+ now = ktime_get_ns();
fq_check_throttled(q, now);
begin:
head = &q->new_flows;
@@ -454,6 +461,11 @@ begin:
fq_flow_set_throttled(q, f);
goto begin;
}
+ if (time_next_packet &&
+ (s64)(now - time_next_packet - q->ce_threshold) > 0) {
+ INET_ECN_set_ce(skb);
+ q->stat_ce_mark++;
+ }
}
skb = fq_dequeue_head(sch, f);
@@ -657,6 +669,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
[TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
[TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
+ [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
};
static int fq_change(struct Qdisc *sch, struct nlattr *opt,
@@ -736,6 +749,10 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
if (tb[TCA_FQ_ORPHAN_MASK])
q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
+ if (tb[TCA_FQ_CE_THRESHOLD])
+ q->ce_threshold = (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
+
if (!err) {
sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
@@ -786,6 +803,10 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
q->low_rate_threshold = 550000 / 8;
+
+ /* Default ce_threshold of 4294 seconds */
+ q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
+
qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt)
@@ -799,6 +820,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct fq_sched_data *q = qdisc_priv(sch);
+ u64 ce_threshold = q->ce_threshold;
struct nlattr *opts;
opts = nla_nest_start(skb, TCA_OPTIONS);
@@ -807,6 +829,8 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
/* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
+ do_div(ce_threshold, NSEC_PER_USEC);
+
if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
@@ -819,6 +843,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
q->low_rate_threshold) ||
+ nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
goto nla_put_failure;
@@ -848,6 +873,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.throttled_flows = q->throttled_flows;
st.unthrottle_latency_ns = min_t(unsigned long,
q->unthrottle_latency_ns, ~0U);
+ st.ce_mark = q->stat_ce_mark;
sch_tree_unlock(sch);
return gnet_stats_copy_app(d, &st, sizeof(st));
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index de1663f7d3ad..66ba2ce2320f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1372,7 +1372,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
if (!tp_head) {
RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
/* Wait for flying RCU callback before it is freed. */
- rcu_barrier_bh();
+ rcu_barrier();
return;
}
@@ -1380,10 +1380,10 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
&miniqp->miniq1 : &miniqp->miniq2;
/* We need to make sure that readers won't see the miniq
- * we are about to modify. So wait until previous call_rcu_bh callback
+ * we are about to modify. So wait until previous call_rcu callback
* is done.
*/
- rcu_barrier_bh();
+ rcu_barrier();
miniq->filter_list = tp_head;
rcu_assign_pointer(*miniqp->p_miniq, miniq);
@@ -1392,7 +1392,7 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
* block potential new user of miniq_old until all readers
* are not seeing it.
*/
- call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func);
+ call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
}
EXPORT_SYMBOL(mini_qdisc_pair_swap);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 4a042abf844c..234afbf9115b 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -23,19 +23,23 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
+#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/red.h>
#define GRED_DEF_PRIO (MAX_DPs / 2)
#define GRED_VQ_MASK (MAX_DPs - 1)
+#define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
+
struct gred_sched_data;
struct gred_sched;
struct gred_sched_data {
u32 limit; /* HARD maximal queue length */
u32 DP; /* the drop parameters */
- u32 bytesin; /* bytes seen on virtualQ so far*/
+ u32 red_flags; /* virtualQ version of red_flags */
+ u64 bytesin; /* bytes seen on virtualQ so far*/
u32 packetsin; /* packets seen on virtualQ so far*/
u32 backlog; /* bytes on the virtualQ */
u8 prio; /* the prio of this vq */
@@ -139,14 +143,27 @@ static inline void gred_store_wred_set(struct gred_sched *table,
table->wred_set.qidlestart = q->vars.qidlestart;
}
-static inline int gred_use_ecn(struct gred_sched *t)
+static int gred_use_ecn(struct gred_sched_data *q)
+{
+ return q->red_flags & TC_RED_ECN;
+}
+
+static int gred_use_harddrop(struct gred_sched_data *q)
{
- return t->red_flags & TC_RED_ECN;
+ return q->red_flags & TC_RED_HARDDROP;
}
-static inline int gred_use_harddrop(struct gred_sched *t)
+static bool gred_per_vq_red_flags_used(struct gred_sched *table)
{
- return t->red_flags & TC_RED_HARDDROP;
+ unsigned int i;
+
+ /* Local per-vq flags couldn't have been set unless global are 0 */
+ if (table->red_flags)
+ return false;
+ for (i = 0; i < MAX_DPs; i++)
+ if (table->tab[i] && table->tab[i]->red_flags)
+ return true;
+ return false;
}
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -212,7 +229,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
case RED_PROB_MARK:
qdisc_qstats_overlimit(sch);
- if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
+ if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
}
@@ -222,7 +239,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
case RED_HARD_MARK:
qdisc_qstats_overlimit(sch);
- if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
+ if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
goto congestion_drop;
@@ -295,15 +312,103 @@ static void gred_reset(struct Qdisc *sch)
}
}
+static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
+{
+ struct gred_sched *table = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct tc_gred_qopt_offload opt = {
+ .command = command,
+ .handle = sch->handle,
+ .parent = sch->parent,
+ };
+
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
+ return;
+
+ if (command == TC_GRED_REPLACE) {
+ unsigned int i;
+
+ opt.set.grio_on = gred_rio_mode(table);
+ opt.set.wred_on = gred_wred_mode(table);
+ opt.set.dp_cnt = table->DPs;
+ opt.set.dp_def = table->def;
+
+ for (i = 0; i < table->DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+
+ if (!q)
+ continue;
+ opt.set.tab[i].present = true;
+ opt.set.tab[i].limit = q->limit;
+ opt.set.tab[i].prio = q->prio;
+ opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
+ opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
+ opt.set.tab[i].is_ecn = gred_use_ecn(q);
+ opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
+ opt.set.tab[i].probability = q->parms.max_P;
+ opt.set.tab[i].backlog = &q->backlog;
+ }
+ opt.set.qstats = &sch->qstats;
+ }
+
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
+}
+
+static int gred_offload_dump_stats(struct Qdisc *sch)
+{
+ struct gred_sched *table = qdisc_priv(sch);
+ struct tc_gred_qopt_offload *hw_stats;
+ unsigned int i;
+ int ret;
+
+ hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
+ if (!hw_stats)
+ return -ENOMEM;
+
+ hw_stats->command = TC_GRED_STATS;
+ hw_stats->handle = sch->handle;
+ hw_stats->parent = sch->parent;
+
+ for (i = 0; i < MAX_DPs; i++)
+ if (table->tab[i])
+ hw_stats->stats.xstats[i] = &table->tab[i]->stats;
+
+ ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
+ /* Even if driver returns failure adjust the stats - in case offload
+ * ended but driver still wants to adjust the values.
+ */
+ for (i = 0; i < MAX_DPs; i++) {
+ if (!table->tab[i])
+ continue;
+ table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
+ table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
+ table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
+
+ _bstats_update(&sch->bstats,
+ hw_stats->stats.bstats[i].bytes,
+ hw_stats->stats.bstats[i].packets);
+ sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
+ sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
+ sch->qstats.drops += hw_stats->stats.qstats[i].drops;
+ sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
+ sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
+ }
+
+ kfree(hw_stats);
+ return ret;
+}
+
static inline void gred_destroy_vq(struct gred_sched_data *q)
{
kfree(q);
}
-static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
+static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
+ struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct tc_gred_sopt *sopt;
+ bool red_flags_changed;
int i;
if (!dps)
@@ -311,13 +416,28 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
sopt = nla_data(dps);
- if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
- sopt->def_DP >= sopt->DPs)
+ if (sopt->DPs > MAX_DPs) {
+ NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
+ return -EINVAL;
+ }
+ if (sopt->DPs == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "number of virtual queues can't be 0");
+ return -EINVAL;
+ }
+ if (sopt->def_DP >= sopt->DPs) {
+ NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
return -EINVAL;
+ }
+ if (sopt->flags && gred_per_vq_red_flags_used(table)) {
+ NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
+ return -EINVAL;
+ }
sch_tree_lock(sch);
table->DPs = sopt->DPs;
table->def = sopt->def_DP;
+ red_flags_changed = table->red_flags != sopt->flags;
table->red_flags = sopt->flags;
/*
@@ -337,6 +457,12 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
gred_disable_wred_mode(table);
}
+ if (red_flags_changed)
+ for (i = 0; i < table->DPs; i++)
+ if (table->tab[i])
+ table->tab[i]->red_flags =
+ table->red_flags & GRED_VQ_RED_FLAGS;
+
for (i = table->DPs; i < MAX_DPs; i++) {
if (table->tab[i]) {
pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
@@ -346,25 +472,30 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
}
}
+ gred_offload(sch, TC_GRED_REPLACE);
return 0;
}
static inline int gred_change_vq(struct Qdisc *sch, int dp,
struct tc_gred_qopt *ctl, int prio,
u8 *stab, u32 max_P,
- struct gred_sched_data **prealloc)
+ struct gred_sched_data **prealloc,
+ struct netlink_ext_ack *extack)
{
struct gred_sched *table = qdisc_priv(sch);
struct gred_sched_data *q = table->tab[dp];
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
return -EINVAL;
+ }
if (!q) {
table->tab[dp] = q = *prealloc;
*prealloc = NULL;
if (!q)
return -ENOMEM;
+ q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
}
q->DP = dp;
@@ -384,14 +515,127 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
return 0;
}
+static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
+ [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
+ [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
+};
+
+static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
+ [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
+};
+
static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
[TCA_GRED_STAB] = { .len = 256 },
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
[TCA_GRED_MAX_P] = { .type = NLA_U32 },
[TCA_GRED_LIMIT] = { .type = NLA_U32 },
+ [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
};
+static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
+{
+ struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
+ u32 dp;
+
+ nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, NULL);
+
+ dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
+
+ if (tb[TCA_GRED_VQ_FLAGS])
+ table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
+}
+
+static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
+{
+ const struct nlattr *attr;
+ int rem;
+
+ nla_for_each_nested(attr, vqs, rem) {
+ switch (nla_type(attr)) {
+ case TCA_GRED_VQ_ENTRY:
+ gred_vq_apply(table, attr);
+ break;
+ }
+ }
+}
+
+static int gred_vq_validate(struct gred_sched *table, u32 cdp,
+ const struct nlattr *entry,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
+ int err;
+ u32 dp;
+
+ err = nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy,
+ extack);
+ if (err < 0)
+ return err;
+
+ if (!tb[TCA_GRED_VQ_DP]) {
+ NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
+ return -EINVAL;
+ }
+ dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
+ if (dp >= table->DPs) {
+ NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
+ return -EINVAL;
+ }
+ if (dp != cdp && !table->tab[dp]) {
+ NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
+ return -EINVAL;
+ }
+
+ if (tb[TCA_GRED_VQ_FLAGS]) {
+ u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
+
+ if (table->red_flags && table->red_flags != red_flags) {
+ NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
+ return -EINVAL;
+ }
+ if (red_flags & ~GRED_VQ_RED_FLAGS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "invalid RED flags specified");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
+ struct nlattr *vqs, struct netlink_ext_ack *extack)
+{
+ const struct nlattr *attr;
+ int rem, err;
+
+ err = nla_validate_nested(vqs, TCA_GRED_VQ_ENTRY_MAX,
+ gred_vqe_policy, extack);
+ if (err < 0)
+ return err;
+
+ nla_for_each_nested(attr, vqs, rem) {
+ switch (nla_type(attr)) {
+ case TCA_GRED_VQ_ENTRY:
+ err = gred_vq_validate(table, cdp, attr, extack);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
+ return -EINVAL;
+ }
+ }
+
+ if (rem > 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int gred_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@@ -406,29 +650,39 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
if (opt == NULL)
return -EINVAL;
- err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
+ err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, extack);
if (err < 0)
return err;
if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
if (tb[TCA_GRED_LIMIT] != NULL)
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
- return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
}
if (tb[TCA_GRED_PARMS] == NULL ||
tb[TCA_GRED_STAB] == NULL ||
- tb[TCA_GRED_LIMIT] != NULL)
+ tb[TCA_GRED_LIMIT] != NULL) {
+ NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
return -EINVAL;
+ }
max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
- err = -EINVAL;
ctl = nla_data(tb[TCA_GRED_PARMS]);
stab = nla_data(tb[TCA_GRED_STAB]);
- if (ctl->DP >= table->DPs)
- goto errout;
+ if (ctl->DP >= table->DPs) {
+ NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
+ return -EINVAL;
+ }
+
+ if (tb[TCA_GRED_VQ_LIST]) {
+ err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
+ extack);
+ if (err)
+ return err;
+ }
if (gred_rio_mode(table)) {
if (ctl->prio == 0) {
@@ -448,9 +702,13 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
sch_tree_lock(sch);
- err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
+ err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
+ extack);
if (err < 0)
- goto errout_locked;
+ goto err_unlock_free;
+
+ if (tb[TCA_GRED_VQ_LIST])
+ gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
if (gred_rio_mode(table)) {
gred_disable_wred_mode(table);
@@ -458,12 +716,15 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
gred_enable_wred_mode(table);
}
- err = 0;
+ sch_tree_unlock(sch);
+ kfree(prealloc);
+
+ gred_offload(sch, TC_GRED_REPLACE);
+ return 0;
-errout_locked:
+err_unlock_free:
sch_tree_unlock(sch);
kfree(prealloc);
-errout:
return err;
}
@@ -476,12 +737,15 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
if (!opt)
return -EINVAL;
- err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
+ err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, extack);
if (err < 0)
return err;
- if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
+ if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "virtual queue configuration can't be specified at initialization time");
return -EINVAL;
+ }
if (tb[TCA_GRED_LIMIT])
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
@@ -489,13 +753,13 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
sch->limit = qdisc_dev(sch)->tx_queue_len
* psched_mtu(qdisc_dev(sch));
- return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
}
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct gred_sched *table = qdisc_priv(sch);
- struct nlattr *parms, *opts = NULL;
+ struct nlattr *parms, *vqs, *opts = NULL;
int i;
u32 max_p[MAX_DPs];
struct tc_gred_sopt sopt = {
@@ -505,6 +769,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
.flags = table->red_flags,
};
+ if (gred_offload_dump_stats(sch))
+ goto nla_put_failure;
+
opts = nla_nest_start(skb, TCA_OPTIONS);
if (opts == NULL)
goto nla_put_failure;
@@ -522,6 +789,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
goto nla_put_failure;
+ /* Old style all-in-one dump of VQs */
parms = nla_nest_start(skb, TCA_GRED_PARMS);
if (parms == NULL)
goto nla_put_failure;
@@ -572,6 +840,58 @@ append_opt:
nla_nest_end(skb, parms);
+ /* Dump the VQs again, in more structured way */
+ vqs = nla_nest_start(skb, TCA_GRED_VQ_LIST);
+ if (!vqs)
+ goto nla_put_failure;
+
+ for (i = 0; i < MAX_DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+ struct nlattr *vq;
+
+ if (!q)
+ continue;
+
+ vq = nla_nest_start(skb, TCA_GRED_VQ_ENTRY);
+ if (!vq)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
+ goto nla_put_failure;
+
+ /* Stats */
+ if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
+ TCA_GRED_VQ_PAD))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
+ gred_backlog(table, q, sch)))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
+ q->stats.prob_drop))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
+ q->stats.prob_mark))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
+ q->stats.forced_drop))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
+ q->stats.forced_mark))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, vq);
+ }
+ nla_nest_end(skb, vqs);
+
return nla_nest_end(skb, opts);
nla_put_failure:
@@ -588,6 +908,7 @@ static void gred_destroy(struct Qdisc *sch)
if (table->tab[i])
gred_destroy_vq(table->tab[i]);
}
+ gred_offload(sch, TC_GRED_DESTROY);
}
static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index f20f3a0f8424..203659bc3906 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -38,9 +38,8 @@ static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
}
-static void mq_offload_stats(struct Qdisc *sch)
+static int mq_offload_stats(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
struct tc_mq_qopt_offload opt = {
.command = TC_MQ_STATS,
.handle = sch->handle,
@@ -50,8 +49,7 @@ static void mq_offload_stats(struct Qdisc *sch)
},
};
- if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc)
- dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
+ return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
}
static void mq_destroy(struct Qdisc *sch)
@@ -171,9 +169,8 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
spin_unlock_bh(qdisc_lock(qdisc));
}
- mq_offload_stats(sch);
- return 0;
+ return mq_offload_stats(sch);
}
static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
@@ -196,6 +193,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+ struct tc_mq_qopt_offload graft_offload;
struct net_device *dev = qdisc_dev(sch);
if (dev->flags & IFF_UP)
@@ -206,6 +204,14 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
if (dev->flags & IFF_UP)
dev_activate(dev);
+
+ graft_offload.handle = sch->handle;
+ graft_offload.graft_params.queue = cl - 1;
+ graft_offload.graft_params.child_handle = new ? new->handle : 0;
+ graft_offload.command = TC_MQ_GRAFT;
+
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
+ TC_SETUP_QDISC_MQ, &graft_offload, extack);
return 0;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 2c38e3d07924..75046ec72144 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -77,6 +77,10 @@ struct netem_sched_data {
/* internal t(ime)fifo qdisc uses t_root and sch->limit */
struct rb_root t_root;
+ /* a linear queue; reduces rbtree rebalancing when jitter is low */
+ struct sk_buff *t_head;
+ struct sk_buff *t_tail;
+
/* optional qdisc for classful handling (NULL at netem init) */
struct Qdisc *qdisc;
@@ -369,26 +373,39 @@ static void tfifo_reset(struct Qdisc *sch)
rb_erase(&skb->rbnode, &q->t_root);
rtnl_kfree_skbs(skb, skb);
}
+
+ rtnl_kfree_skbs(q->t_head, q->t_tail);
+ q->t_head = NULL;
+ q->t_tail = NULL;
}
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
u64 tnext = netem_skb_cb(nskb)->time_to_send;
- struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
- while (*p) {
- struct sk_buff *skb;
-
- parent = *p;
- skb = rb_to_skb(parent);
- if (tnext >= netem_skb_cb(skb)->time_to_send)
- p = &parent->rb_right;
+ if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
+ if (q->t_tail)
+ q->t_tail->next = nskb;
else
- p = &parent->rb_left;
+ q->t_head = nskb;
+ q->t_tail = nskb;
+ } else {
+ struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
+
+ while (*p) {
+ struct sk_buff *skb;
+
+ parent = *p;
+ skb = rb_to_skb(parent);
+ if (tnext >= netem_skb_cb(skb)->time_to_send)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&nskb->rbnode, parent, p);
+ rb_insert_color(&nskb->rbnode, &q->t_root);
}
- rb_link_node(&nskb->rbnode, parent, p);
- rb_insert_color(&nskb->rbnode, &q->t_root);
sch->q.qlen++;
}
@@ -431,6 +448,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
int count = 1;
int rc = NET_XMIT_SUCCESS;
+ /* Do not fool qdisc_drop_all() */
+ skb->prev = NULL;
+
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
@@ -530,9 +550,16 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
t_skb = skb_rb_last(&q->t_root);
t_last = netem_skb_cb(t_skb);
if (!last ||
- t_last->time_to_send > last->time_to_send) {
+ t_last->time_to_send > last->time_to_send)
+ last = t_last;
+ }
+ if (q->t_tail) {
+ struct netem_skb_cb *t_last =
+ netem_skb_cb(q->t_tail);
+
+ if (!last ||
+ t_last->time_to_send > last->time_to_send)
last = t_last;
- }
}
if (last) {
@@ -611,11 +638,38 @@ static void get_slot_next(struct netem_sched_data *q, u64 now)
q->slot.bytes_left = q->slot_config.max_bytes;
}
+static struct sk_buff *netem_peek(struct netem_sched_data *q)
+{
+ struct sk_buff *skb = skb_rb_first(&q->t_root);
+ u64 t1, t2;
+
+ if (!skb)
+ return q->t_head;
+ if (!q->t_head)
+ return skb;
+
+ t1 = netem_skb_cb(skb)->time_to_send;
+ t2 = netem_skb_cb(q->t_head)->time_to_send;
+ if (t1 < t2)
+ return skb;
+ return q->t_head;
+}
+
+static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
+{
+ if (skb == q->t_head) {
+ q->t_head = skb->next;
+ if (!q->t_head)
+ q->t_tail = NULL;
+ } else {
+ rb_erase(&skb->rbnode, &q->t_root);
+ }
+}
+
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
struct netem_sched_data *q = qdisc_priv(sch);
struct sk_buff *skb;
- struct rb_node *p;
tfifo_dequeue:
skb = __qdisc_dequeue_head(&sch->q);
@@ -625,20 +679,18 @@ deliver:
qdisc_bstats_update(sch, skb);
return skb;
}
- p = rb_first(&q->t_root);
- if (p) {
+ skb = netem_peek(q);
+ if (skb) {
u64 time_to_send;
u64 now = ktime_get_ns();
- skb = rb_to_skb(p);
-
/* if more time remaining? */
time_to_send = netem_skb_cb(skb)->time_to_send;
if (q->slot.slot_next && q->slot.slot_next < time_to_send)
get_slot_next(q, now);
- if (time_to_send <= now && q->slot.slot_next <= now) {
- rb_erase(p, &q->t_root);
+ if (time_to_send <= now && q->slot.slot_next <= now) {
+ netem_erase_head(q, skb);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
skb->next = NULL;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f8af98621179..cdf68706e40f 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -220,7 +220,6 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
- qdisc_put(child);
}
for (i = oldbands; i < q->bands; i++) {
@@ -230,6 +229,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
}
sch_tree_unlock(sch);
+
+ for (i = q->bands; i < oldbands; i++)
+ qdisc_put(q->queues[i]);
return 0;
}
@@ -251,7 +253,6 @@ static int prio_init(struct Qdisc *sch, struct nlattr *opt,
static int prio_dump_offload(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
struct tc_prio_qopt_offload hw_stats = {
.command = TC_PRIO_STATS,
.handle = sch->handle,
@@ -263,21 +264,8 @@ static int prio_dump_offload(struct Qdisc *sch)
},
},
};
- int err;
-
- sch->flags &= ~TCQ_F_OFFLOADED;
- if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
- return 0;
-
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
- &hw_stats);
- if (err == -EOPNOTSUPP)
- return 0;
- if (!err)
- sch->flags |= TCQ_F_OFFLOADED;
-
- return err;
+ return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
}
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -309,43 +297,22 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct prio_sched_data *q = qdisc_priv(sch);
struct tc_prio_qopt_offload graft_offload;
- struct net_device *dev = qdisc_dev(sch);
unsigned long band = arg - 1;
- bool any_qdisc_is_offloaded;
- int err;
if (new == NULL)
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->queues[band]);
- if (!tc_can_offload(dev))
- return 0;
-
graft_offload.handle = sch->handle;
graft_offload.parent = sch->parent;
graft_offload.graft_params.band = band;
graft_offload.graft_params.child_handle = new->handle;
graft_offload.command = TC_PRIO_GRAFT;
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
- &graft_offload);
-
- /* Don't report error if the graft is part of destroy operation. */
- if (err && new != &noop_qdisc) {
- /* Don't report error if the parent, the old child and the new
- * one are not offloaded.
- */
- any_qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
- any_qdisc_is_offloaded |= new->flags & TCQ_F_OFFLOADED;
- if (*old)
- any_qdisc_is_offloaded |= (*old)->flags &
- TCQ_F_OFFLOADED;
-
- if (any_qdisc_is_offloaded)
- NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
- }
-
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
+ TC_SETUP_QDISC_PRIO, &graft_offload,
+ extack);
return 0;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3ce6c0a2c493..9df9942340ea 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -166,7 +166,9 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.min = q->parms.qth_min >> q->parms.Wlog;
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
+ opt.set.limit = q->limit;
opt.set.is_ecn = red_use_ecn(q);
+ opt.set.is_harddrop = red_use_harddrop(q);
opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
@@ -193,10 +195,10 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
static int red_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
+ struct Qdisc *old_child = NULL, *child = NULL;
struct red_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_RED_MAX + 1];
struct tc_red_qopt *ctl;
- struct Qdisc *child = NULL;
int err;
u32 max_P;
@@ -233,7 +235,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
if (child) {
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
q->qdisc->qstats.backlog);
- qdisc_put(q->qdisc);
+ old_child = q->qdisc;
q->qdisc = child;
}
@@ -252,7 +254,11 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
red_start_of_idle_period(&q->vars);
sch_tree_unlock(sch);
+
red_offload(sch, true);
+
+ if (old_child)
+ qdisc_put(old_child);
return 0;
}
@@ -279,9 +285,8 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
return red_change(sch, opt, extack);
}
-static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
+static int red_dump_offload_stats(struct Qdisc *sch)
{
- struct net_device *dev = qdisc_dev(sch);
struct tc_red_qopt_offload hw_stats = {
.command = TC_RED_STATS,
.handle = sch->handle,
@@ -291,22 +296,8 @@ static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
.stats.qstats = &sch->qstats,
},
};
- int err;
-
- sch->flags &= ~TCQ_F_OFFLOADED;
-
- if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
- return 0;
-
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
- &hw_stats);
- if (err == -EOPNOTSUPP)
- return 0;
- if (!err)
- sch->flags |= TCQ_F_OFFLOADED;
-
- return err;
+ return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
}
static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
@@ -324,7 +315,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
};
int err;
- err = red_dump_offload_stats(sch, &opt);
+ err = red_dump_offload_stats(sch);
if (err)
goto nla_put_failure;
@@ -377,6 +368,21 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
return 0;
}
+static void red_graft_offload(struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ struct netlink_ext_ack *extack)
+{
+ struct tc_red_qopt_offload graft_offload = {
+ .handle = sch->handle,
+ .parent = sch->parent,
+ .child_handle = new->handle,
+ .command = TC_RED_GRAFT,
+ };
+
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
+ TC_SETUP_QDISC_RED, &graft_offload, extack);
+}
+
static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct Qdisc **old, struct netlink_ext_ack *extack)
{
@@ -386,6 +392,8 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
new = &noop_qdisc;
*old = qdisc_replace(sch, new, &q->qdisc);
+
+ red_graft_offload(sch, new, *old, extack);
return 0;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6a28b96e779e..201c888604e4 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
asoc->flowlabel = sp->flowlabel;
asoc->dscp = sp->dscp;
- /* Initialize default path MTU. */
- asoc->pathmtu = sp->pathmtu;
-
/* Set association default SACK delay */
asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
asoc->sackfreq = sp->sackfreq;
@@ -135,6 +132,8 @@ static struct sctp_association *sctp_association_init(
*/
asoc->max_burst = sp->max_burst;
+ asoc->subscribe = sp->subscribe;
+
/* initialize association timers */
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
@@ -252,6 +251,10 @@ static struct sctp_association *sctp_association_init(
0, gfp))
goto fail_init;
+ /* Initialize default path MTU. */
+ asoc->pathmtu = sp->pathmtu;
+ sctp_assoc_update_frag_point(asoc);
+
/* Assume that peer would support both address types unless we are
* told otherwise.
*/
@@ -434,7 +437,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
WARN_ON(atomic_read(&asoc->rmem_alloc));
- kfree(asoc);
+ kfree_rcu(asoc, rcu);
SCTP_DBG_OBJCNT_DEC(assoc);
}
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 7df3704982f5..ebf28adba789 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -337,6 +337,34 @@ int sctp_bind_addr_match(struct sctp_bind_addr *bp,
return match;
}
+int sctp_bind_addrs_check(struct sctp_sock *sp,
+ struct sctp_sock *sp2, int cnt2)
+{
+ struct sctp_bind_addr *bp2 = &sp2->ep->base.bind_addr;
+ struct sctp_bind_addr *bp = &sp->ep->base.bind_addr;
+ struct sctp_sockaddr_entry *laddr, *laddr2;
+ bool exist = false;
+ int cnt = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(laddr, &bp->address_list, list) {
+ list_for_each_entry_rcu(laddr2, &bp2->address_list, list) {
+ if (sp->pf->af->cmp_addr(&laddr->a, &laddr2->a) &&
+ laddr->valid && laddr2->valid) {
+ exist = true;
+ goto next;
+ }
+ }
+ cnt = 0;
+ break;
+next:
+ cnt++;
+ }
+ rcu_read_unlock();
+
+ return (cnt == cnt2) ? 0 : (exist ? -EEXIST : 1);
+}
+
/* Does the address 'addr' conflict with any addresses in
* the bp.
*/
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index ce8087846f05..64bef313d436 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -86,11 +86,10 @@ void sctp_datamsg_free(struct sctp_datamsg *msg)
/* Final destructruction of datamsg memory. */
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{
+ struct sctp_association *asoc = NULL;
struct list_head *pos, *temp;
struct sctp_chunk *chunk;
- struct sctp_sock *sp;
struct sctp_ulpevent *ev;
- struct sctp_association *asoc = NULL;
int error = 0, notify;
/* If we failed, we may need to notify. */
@@ -108,9 +107,8 @@ static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
else
error = asoc->outqueue.error;
- sp = sctp_sk(asoc->base.sk);
- notify = sctp_ulpevent_type_enabled(SCTP_SEND_FAILED,
- &sp->subscribe);
+ notify = sctp_ulpevent_type_enabled(asoc->subscribe,
+ SCTP_SEND_FAILED);
}
/* Generate a SEND FAILED event only if enabled. */
@@ -191,6 +189,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
* the packet
*/
max_data = asoc->frag_point;
+ if (unlikely(!max_data)) {
+ max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
+ sctp_datachk_len(&asoc->stream));
+ pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
+ __func__, asoc, max_data);
+ }
/* If the the peer requested that we authenticate DATA chunks
* we need to account for bundling of the AUTH chunks along with
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5c36a99882ed..d7a649d240e5 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -57,6 +57,7 @@
#include <net/sctp/checksum.h>
#include <net/net_namespace.h>
#include <linux/rhashtable.h>
+#include <net/sock_reuseport.h>
/* Forward declarations for internal helpers. */
static int sctp_rcv_ootb(struct sk_buff *);
@@ -65,8 +66,10 @@ static struct sctp_association *__sctp_rcv_lookup(struct net *net,
const union sctp_addr *paddr,
const union sctp_addr *laddr,
struct sctp_transport **transportp);
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
- const union sctp_addr *laddr);
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
+ struct net *net, struct sk_buff *skb,
+ const union sctp_addr *laddr,
+ const union sctp_addr *daddr);
static struct sctp_association *__sctp_lookup_association(
struct net *net,
const union sctp_addr *local,
@@ -171,7 +174,7 @@ int sctp_rcv(struct sk_buff *skb)
asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport);
if (!asoc)
- ep = __sctp_rcv_lookup_endpoint(net, &dest);
+ ep = __sctp_rcv_lookup_endpoint(net, skb, &dest, &src);
/* Retrieve the common input handling substructure. */
rcvr = asoc ? &asoc->base : &ep->base;
@@ -574,7 +577,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_transport *t)
* is probably better.
*
*/
-void sctp_v4_err(struct sk_buff *skb, __u32 info)
+int sctp_v4_err(struct sk_buff *skb, __u32 info)
{
const struct iphdr *iph = (const struct iphdr *)skb->data;
const int ihlen = iph->ihl * 4;
@@ -599,7 +602,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
skb->transport_header = savesctp;
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
- return;
+ return -ENOENT;
}
/* Warning: The sock lock is held. Remember to call
* sctp_err_finish!
@@ -653,6 +656,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
out_unlock:
sctp_err_finish(sk, transport);
+ return 0;
}
/*
@@ -720,43 +724,87 @@ discard:
}
/* Insert endpoint into the hash table. */
-static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
+static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
{
- struct net *net = sock_net(ep->base.sk);
- struct sctp_ep_common *epb;
+ struct sock *sk = ep->base.sk;
+ struct net *net = sock_net(sk);
struct sctp_hashbucket *head;
+ struct sctp_ep_common *epb;
epb = &ep->base;
-
epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
+ if (sk->sk_reuseport) {
+ bool any = sctp_is_ep_boundall(sk);
+ struct sctp_ep_common *epb2;
+ struct list_head *list;
+ int cnt = 0, err = 1;
+
+ list_for_each(list, &ep->base.bind_addr.address_list)
+ cnt++;
+
+ sctp_for_each_hentry(epb2, &head->chain) {
+ struct sock *sk2 = epb2->sk;
+
+ if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
+ !uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
+ !sk2->sk_reuseport)
+ continue;
+
+ err = sctp_bind_addrs_check(sctp_sk(sk2),
+ sctp_sk(sk), cnt);
+ if (!err) {
+ err = reuseport_add_sock(sk, sk2, any);
+ if (err)
+ return err;
+ break;
+ } else if (err < 0) {
+ return err;
+ }
+ }
+
+ if (err) {
+ err = reuseport_alloc(sk, any);
+ if (err)
+ return err;
+ }
+ }
+
write_lock(&head->lock);
hlist_add_head(&epb->node, &head->chain);
write_unlock(&head->lock);
+ return 0;
}
/* Add an endpoint to the hash. Local BH-safe. */
-void sctp_hash_endpoint(struct sctp_endpoint *ep)
+int sctp_hash_endpoint(struct sctp_endpoint *ep)
{
+ int err;
+
local_bh_disable();
- __sctp_hash_endpoint(ep);
+ err = __sctp_hash_endpoint(ep);
local_bh_enable();
+
+ return err;
}
/* Remove endpoint from the hash table. */
static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
- struct net *net = sock_net(ep->base.sk);
+ struct sock *sk = ep->base.sk;
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
epb = &ep->base;
- epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
+ epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
+ if (rcu_access_pointer(sk->sk_reuseport_cb))
+ reuseport_detach_sock(sk);
+
write_lock(&head->lock);
hlist_del_init(&epb->node);
write_unlock(&head->lock);
@@ -770,16 +818,35 @@ void sctp_unhash_endpoint(struct sctp_endpoint *ep)
local_bh_enable();
}
+static inline __u32 sctp_hashfn(const struct net *net, __be16 lport,
+ const union sctp_addr *paddr, __u32 seed)
+{
+ __u32 addr;
+
+ if (paddr->sa.sa_family == AF_INET6)
+ addr = jhash(&paddr->v6.sin6_addr, 16, seed);
+ else
+ addr = (__force __u32)paddr->v4.sin_addr.s_addr;
+
+ return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
+ (__force __u32)lport, net_hash_mix(net), seed);
+}
+
/* Look up an endpoint. */
-static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
- const union sctp_addr *laddr)
+static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
+ struct net *net, struct sk_buff *skb,
+ const union sctp_addr *laddr,
+ const union sctp_addr *paddr)
{
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
+ struct sock *sk;
+ __be16 lport;
int hash;
- hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port));
+ lport = laddr->v4.sin_port;
+ hash = sctp_ep_hashfn(net, ntohs(lport));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
sctp_for_each_hentry(epb, &head->chain) {
@@ -791,6 +858,15 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net,
ep = sctp_sk(net->sctp.ctl_sock)->ep;
hit:
+ sk = ep->base.sk;
+ if (sk->sk_reuseport) {
+ __u32 phash = sctp_hashfn(net, lport, paddr, 0);
+
+ sk = reuseport_select_sock(sk, phash, skb,
+ sizeof(struct sctphdr));
+ if (sk)
+ ep = sctp_sk(sk)->ep;
+ }
sctp_endpoint_hold(ep);
read_unlock(&head->lock);
return ep;
@@ -829,35 +905,17 @@ out:
static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
{
const struct sctp_transport *t = data;
- const union sctp_addr *paddr = &t->ipaddr;
- const struct net *net = sock_net(t->asoc->base.sk);
- __be16 lport = htons(t->asoc->base.bind_addr.port);
- __u32 addr;
-
- if (paddr->sa.sa_family == AF_INET6)
- addr = jhash(&paddr->v6.sin6_addr, 16, seed);
- else
- addr = (__force __u32)paddr->v4.sin_addr.s_addr;
- return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
- (__force __u32)lport, net_hash_mix(net), seed);
+ return sctp_hashfn(sock_net(t->asoc->base.sk),
+ htons(t->asoc->base.bind_addr.port),
+ &t->ipaddr, seed);
}
static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
{
const struct sctp_hash_cmp_arg *x = data;
- const union sctp_addr *paddr = x->paddr;
- const struct net *net = x->net;
- __be16 lport = x->lport;
- __u32 addr;
- if (paddr->sa.sa_family == AF_INET6)
- addr = jhash(&paddr->v6.sin6_addr, 16, seed);
- else
- addr = (__force __u32)paddr->v4.sin_addr.s_addr;
-
- return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
- (__force __u32)lport, net_hash_mix(net), seed);
+ return sctp_hashfn(x->net, x->lport, x->paddr, seed);
}
static const struct rhashtable_params sctp_hash_params = {
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index fc6c5e4bffa5..b9ed271b7ef7 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -101,6 +101,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
addr->a.v6.sin6_port = 0;
+ addr->a.v6.sin6_flowinfo = 0;
addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
@@ -138,7 +139,7 @@ static struct notifier_block sctp_inet6addr_notifier = {
};
/* ICMP error handler. */
-static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct inet6_dev *idev;
@@ -147,7 +148,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct sctp_transport *transport;
struct ipv6_pinfo *np;
__u16 saveip, savesctp;
- int err;
+ int err, ret = 0;
struct net *net = dev_net(skb->dev);
idev = in6_dev_get(skb->dev);
@@ -163,6 +164,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb->transport_header = savesctp;
if (!sk) {
__ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
+ ret = -ENOENT;
goto out;
}
@@ -202,6 +204,8 @@ out_unlock:
out:
if (likely(idev != NULL))
in6_dev_put(idev);
+
+ return ret;
}
static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b0e74a3e77ec..025f48e14a91 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -410,6 +410,7 @@ static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
head->truesize += skb->truesize;
head->data_len += skb->len;
head->len += skb->len;
+ refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
__skb_header_release(skb);
}
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index c0817f7a8964..a8c4c33377bc 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -53,7 +53,7 @@
int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \
void *arg) { \
int error = 0; \
- enum sctp_event event_type; union sctp_subtype subtype; \
+ enum sctp_event_type event_type; union sctp_subtype subtype; \
enum sctp_state state; \
struct sctp_endpoint *ep; \
\
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 4a4fd1971255..f4ac6c592e13 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
asoc->c.sinit_max_instreams, gfp))
goto clean_up;
+ /* Update frag_point when stream_interleave may get changed. */
+ sctp_assoc_update_frag_point(asoc);
+
if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
goto clean_up;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 85d393090238..1d143bc3f73d 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -52,7 +52,7 @@
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
-static int sctp_cmd_interpreter(enum sctp_event event_type,
+static int sctp_cmd_interpreter(enum sctp_event_type event_type,
union sctp_subtype subtype,
enum sctp_state state,
struct sctp_endpoint *ep,
@@ -61,7 +61,7 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
enum sctp_disposition status,
struct sctp_cmd_seq *commands,
gfp_t gfp);
-static int sctp_side_effects(enum sctp_event event_type,
+static int sctp_side_effects(enum sctp_event_type event_type,
union sctp_subtype subtype,
enum sctp_state state,
struct sctp_endpoint *ep,
@@ -623,7 +623,7 @@ static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
struct sctp_association *asoc,
- enum sctp_event event_type,
+ enum sctp_event_type event_type,
union sctp_subtype subtype,
struct sctp_chunk *chunk,
unsigned int error)
@@ -1162,7 +1162,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc)
* If you want to understand all of lksctp, this is a
* good place to start.
*/
-int sctp_do_sm(struct net *net, enum sctp_event event_type,
+int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
union sctp_subtype subtype, enum sctp_state state,
struct sctp_endpoint *ep, struct sctp_association *asoc,
void *event_arg, gfp_t gfp)
@@ -1199,7 +1199,7 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type,
/*****************************************************************
* This the master state function side effect processing function.
*****************************************************************/
-static int sctp_side_effects(enum sctp_event event_type,
+static int sctp_side_effects(enum sctp_event_type event_type,
union sctp_subtype subtype,
enum sctp_state state,
struct sctp_endpoint *ep,
@@ -1285,7 +1285,7 @@ bail:
********************************************************************/
/* This is the side-effect interpreter. */
-static int sctp_cmd_interpreter(enum sctp_event event_type,
+static int sctp_cmd_interpreter(enum sctp_event_type event_type,
union sctp_subtype subtype,
enum sctp_state state,
struct sctp_endpoint *ep,
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 691d9dc620e3..d239b94aa48c 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -79,7 +79,7 @@ static const struct sctp_sm_table_entry bug = {
const struct sctp_sm_table_entry *sctp_sm_lookup_event(
struct net *net,
- enum sctp_event event_type,
+ enum sctp_event_type event_type,
enum sctp_state state,
union sctp_subtype event_subtype)
{
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bf618d1b41fd..f93c3cf9e567 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2230,7 +2230,7 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (sp->recvrcvinfo)
sctp_ulpevent_read_rcvinfo(event, msg);
/* Check if we allow SCTP_SNDRCVINFO. */
- if (sp->subscribe.sctp_data_io_event)
+ if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_DATA_IO_EVENT))
sctp_ulpevent_read_sndrcvinfo(event, msg);
err = copied;
@@ -2304,22 +2304,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
unsigned int optlen)
{
+ struct sctp_event_subscribe subscribe;
+ __u8 *sn_type = (__u8 *)&subscribe;
+ struct sctp_sock *sp = sctp_sk(sk);
struct sctp_association *asoc;
- struct sctp_ulpevent *event;
+ int i;
if (optlen > sizeof(struct sctp_event_subscribe))
return -EINVAL;
- if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
+
+ if (copy_from_user(&subscribe, optval, optlen))
return -EFAULT;
+ for (i = 0; i < optlen; i++)
+ sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
+ sn_type[i]);
+
+ list_for_each_entry(asoc, &sp->ep->asocs, asocs)
+ asoc->subscribe = sctp_sk(sk)->subscribe;
+
/* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
* if there is no data to be sent or retransmit, the stack will
* immediately send up this notification.
*/
- if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
- &sctp_sk(sk)->subscribe)) {
- asoc = sctp_id2assoc(sk, 0);
+ if (sctp_ulpevent_type_enabled(sp->subscribe, SCTP_SENDER_DRY_EVENT)) {
+ struct sctp_ulpevent *event;
+ asoc = sctp_id2assoc(sk, 0);
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
event = sctp_ulpevent_make_sender_dry_event(asoc,
GFP_USER | __GFP_NOWARN);
@@ -3324,8 +3335,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
__u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
sizeof(struct sctp_data_chunk);
- min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT,
- datasize);
+ min_len = sctp_min_frag_point(sp, datasize);
max_len = SCTP_MAX_CHUNK_LEN - datasize;
if (val < min_len || val > max_len)
@@ -4261,6 +4271,57 @@ static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
return 0;
}
+static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_ulpevent *event;
+ struct sctp_event param;
+ int retval = 0;
+
+ if (optlen < sizeof(param)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ optlen = sizeof(param);
+ if (copy_from_user(&param, optval, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (param.se_type < SCTP_SN_TYPE_BASE ||
+ param.se_type > SCTP_SN_TYPE_MAX) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ asoc = sctp_id2assoc(sk, param.se_assoc_id);
+ if (!asoc) {
+ sctp_ulpevent_type_set(&sctp_sk(sk)->subscribe,
+ param.se_type, param.se_on);
+ goto out;
+ }
+
+ sctp_ulpevent_type_set(&asoc->subscribe, param.se_type, param.se_on);
+
+ if (param.se_type == SCTP_SENDER_DRY_EVENT && param.se_on) {
+ if (sctp_outq_is_empty(&asoc->outqueue)) {
+ event = sctp_ulpevent_make_sender_dry_event(asoc,
+ GFP_USER | __GFP_NOWARN);
+ if (!event) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ asoc->stream.si->enqueue_event(&asoc->ulpq, event);
+ }
+ }
+
+out:
+ return retval;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -4458,6 +4519,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_REUSE_PORT:
retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
break;
+ case SCTP_EVENT:
+ retval = sctp_setsockopt_event(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -4706,7 +4770,7 @@ static int sctp_init_sock(struct sock *sk)
/* Initialize default event subscriptions. By default, all the
* options are off.
*/
- memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe));
+ sp->subscribe = 0;
/* Default Peer Address Parameters. These defaults can
* be modified via SCTP_PEER_ADDR_PARAMS
@@ -5251,14 +5315,24 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
int __user *optlen)
{
+ struct sctp_event_subscribe subscribe;
+ __u8 *sn_type = (__u8 *)&subscribe;
+ int i;
+
if (len == 0)
return -EINVAL;
if (len > sizeof(struct sctp_event_subscribe))
len = sizeof(struct sctp_event_subscribe);
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
+
+ for (i = 0; i < len; i++)
+ sn_type[i] = sctp_ulpevent_type_enabled(sctp_sk(sk)->subscribe,
+ SCTP_SN_TYPE_BASE + i);
+
+ if (copy_to_user(optval, &subscribe, len))
return -EFAULT;
+
return 0;
}
@@ -7393,6 +7467,37 @@ static int sctp_getsockopt_reuse_port(struct sock *sk, int len,
return 0;
}
+static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_association *asoc;
+ struct sctp_event param;
+ __u16 subscribe;
+
+ if (len < sizeof(param))
+ return -EINVAL;
+
+ len = sizeof(param);
+ if (copy_from_user(&param, optval, len))
+ return -EFAULT;
+
+ if (param.se_type < SCTP_SN_TYPE_BASE ||
+ param.se_type > SCTP_SN_TYPE_MAX)
+ return -EINVAL;
+
+ asoc = sctp_id2assoc(sk, param.se_assoc_id);
+ subscribe = asoc ? asoc->subscribe : sctp_sk(sk)->subscribe;
+ param.se_on = sctp_ulpevent_type_enabled(subscribe, param.se_type);
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+
+ if (copy_to_user(optval, &param, len))
+ return -EFAULT;
+
+ return 0;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -7591,6 +7696,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_REUSE_PORT:
retval = sctp_getsockopt_reuse_port(sk, len, optval, optlen);
break;
+ case SCTP_EVENT:
+ retval = sctp_getsockopt_event(sk, len, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -7628,8 +7736,10 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
- bool reuse = (sk->sk_reuse || sctp_sk(sk)->reuse);
+ struct sctp_sock *sp = sctp_sk(sk);
+ bool reuse = (sk->sk_reuse || sp->reuse);
struct sctp_bind_hashbucket *head; /* hash list */
+ kuid_t uid = sock_i_uid(sk);
struct sctp_bind_bucket *pp;
unsigned short snum;
int ret;
@@ -7705,7 +7815,10 @@ pp_found:
pr_debug("%s: found a possible match\n", __func__);
- if (pp->fastreuse && reuse && sk->sk_state != SCTP_SS_LISTENING)
+ if ((pp->fastreuse && reuse &&
+ sk->sk_state != SCTP_SS_LISTENING) ||
+ (pp->fastreuseport && sk->sk_reuseport &&
+ uid_eq(pp->fastuid, uid)))
goto success;
/* Run through the list of sockets bound to the port
@@ -7719,16 +7832,18 @@ pp_found:
* in an endpoint.
*/
sk_for_each_bound(sk2, &pp->owner) {
- struct sctp_endpoint *ep2;
- ep2 = sctp_sk(sk2)->ep;
+ struct sctp_sock *sp2 = sctp_sk(sk2);
+ struct sctp_endpoint *ep2 = sp2->ep;
if (sk == sk2 ||
- (reuse && (sk2->sk_reuse || sctp_sk(sk2)->reuse) &&
- sk2->sk_state != SCTP_SS_LISTENING))
+ (reuse && (sk2->sk_reuse || sp2->reuse) &&
+ sk2->sk_state != SCTP_SS_LISTENING) ||
+ (sk->sk_reuseport && sk2->sk_reuseport &&
+ uid_eq(uid, sock_i_uid(sk2))))
continue;
- if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr,
- sctp_sk(sk2), sctp_sk(sk))) {
+ if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
+ addr, sp2, sp)) {
ret = (long)sk2;
goto fail_unlock;
}
@@ -7751,19 +7866,32 @@ pp_not_found:
pp->fastreuse = 1;
else
pp->fastreuse = 0;
- } else if (pp->fastreuse &&
- (!reuse || sk->sk_state == SCTP_SS_LISTENING))
- pp->fastreuse = 0;
+
+ if (sk->sk_reuseport) {
+ pp->fastreuseport = 1;
+ pp->fastuid = uid;
+ } else {
+ pp->fastreuseport = 0;
+ }
+ } else {
+ if (pp->fastreuse &&
+ (!reuse || sk->sk_state == SCTP_SS_LISTENING))
+ pp->fastreuse = 0;
+
+ if (pp->fastreuseport &&
+ (!sk->sk_reuseport || !uid_eq(pp->fastuid, uid)))
+ pp->fastreuseport = 0;
+ }
/* We are set, so fill up all the data in the hash table
* entry, tie the socket list information with the rest of the
* sockets FIXME: Blurry, NPI (ipg).
*/
success:
- if (!sctp_sk(sk)->bind_hash) {
+ if (!sp->bind_hash) {
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &pp->owner);
- sctp_sk(sk)->bind_hash = pp;
+ sp->bind_hash = pp;
}
ret = 0;
@@ -7836,8 +7964,7 @@ static int sctp_listen_start(struct sock *sk, int backlog)
}
sk->sk_max_ack_backlog = backlog;
- sctp_hash_endpoint(ep);
- return 0;
+ return sctp_hash_endpoint(ep);
}
/*
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 0a78cdf86463..a6bf21579466 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -140,7 +140,7 @@ static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *cevent;
- struct sk_buff *pos;
+ struct sk_buff *pos, *loc;
pos = skb_peek_tail(&ulpq->reasm);
if (!pos) {
@@ -166,23 +166,30 @@ static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
return;
}
+ loc = NULL;
skb_queue_walk(&ulpq->reasm, pos) {
cevent = sctp_skb2event(pos);
if (event->stream < cevent->stream ||
(event->stream == cevent->stream &&
- MID_lt(event->mid, cevent->mid)))
+ MID_lt(event->mid, cevent->mid))) {
+ loc = pos;
break;
-
+ }
if (event->stream == cevent->stream &&
event->mid == cevent->mid &&
!(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
(event->msg_flags & SCTP_DATA_FIRST_FRAG ||
- event->fsn < cevent->fsn))
+ event->fsn < cevent->fsn)) {
+ loc = pos;
break;
+ }
}
- __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
+ if (!loc)
+ __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
+ else
+ __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
}
static struct sctp_ulpevent *sctp_intl_retrieve_partial(
@@ -383,7 +390,7 @@ static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
{
struct sctp_ulpevent *cevent;
- struct sk_buff *pos;
+ struct sk_buff *pos, *loc;
pos = skb_peek_tail(&ulpq->lobby);
if (!pos) {
@@ -403,18 +410,25 @@ static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
return;
}
+ loc = NULL;
skb_queue_walk(&ulpq->lobby, pos) {
cevent = (struct sctp_ulpevent *)pos->cb;
- if (cevent->stream > event->stream)
+ if (cevent->stream > event->stream) {
+ loc = pos;
break;
-
+ }
if (cevent->stream == event->stream &&
- MID_lt(event->mid, cevent->mid))
+ MID_lt(event->mid, cevent->mid)) {
+ loc = pos;
break;
+ }
}
- __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
+ if (!loc)
+ __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
+ else
+ __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
}
static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
@@ -489,7 +503,7 @@ static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
sk_incoming_cpu_update(sk);
}
- if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+ if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
goto out_free;
if (skb_list)
@@ -980,17 +994,19 @@ static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
struct sock *sk = ulpq->asoc->base.sk;
struct sctp_ulpevent *ev = NULL;
- if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
- &sctp_sk(sk)->subscribe))
+ if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
+ SCTP_PARTIAL_DELIVERY_EVENT))
return;
ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
sid, mid, flags, gfp);
if (ev) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
- if (!sctp_sk(sk)->data_ready_signalled) {
- sctp_sk(sk)->data_ready_signalled = 1;
+ if (!sp->data_ready_signalled) {
+ sp->data_ready_signalled = 1;
sk->sk_data_ready(sk);
}
}
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 331cc734e3db..5dde92101743 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -219,7 +219,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
sk_incoming_cpu_update(sk);
}
/* Check if the user wishes to receive this event. */
- if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
+ if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
goto out_free;
/* If we are in partial delivery mode, post to the lobby until
@@ -1129,16 +1129,16 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
{
struct sctp_ulpevent *ev = NULL;
- struct sock *sk;
struct sctp_sock *sp;
+ struct sock *sk;
if (!ulpq->pd_mode)
return;
sk = ulpq->asoc->base.sk;
sp = sctp_sk(sk);
- if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
- &sctp_sk(sk)->subscribe))
+ if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
+ SCTP_PARTIAL_DELIVERY_EVENT))
ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
SCTP_PARTIAL_DELIVERY_ABORTED,
0, 0, 0, gfp);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5fbaf1901571..c4da4a78d369 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -147,8 +147,14 @@ static int smc_release(struct socket *sock)
sk->sk_shutdown |= SHUTDOWN_MASK;
}
if (smc->clcsock) {
+ if (smc->use_fallback && sk->sk_state == SMC_LISTEN) {
+ /* wake up clcsock accept */
+ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+ }
+ mutex_lock(&smc->clcsock_release_lock);
sock_release(smc->clcsock);
smc->clcsock = NULL;
+ mutex_unlock(&smc->clcsock_release_lock);
}
if (smc->use_fallback) {
if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
@@ -205,6 +211,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
spin_lock_init(&smc->conn.send_lock);
sk->sk_prot->hash(sk);
sk_refcnt_debug_inc(sk);
+ mutex_init(&smc->clcsock_release_lock);
return sk;
}
@@ -301,14 +308,17 @@ static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
}
-/* register a new rmb, optionally send confirm_rkey msg to register with peer */
+/* register a new rmb, send confirm_rkey msg to register with peer */
static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
bool conf_rkey)
{
- /* register memory region for new rmb */
- if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
- rmb_desc->regerr = 1;
- return -EFAULT;
+ if (!rmb_desc->wr_reg) {
+ /* register memory region for new rmb */
+ if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
+ rmb_desc->regerr = 1;
+ return -EFAULT;
+ }
+ rmb_desc->wr_reg = 1;
}
if (!conf_rkey)
return 0;
@@ -337,8 +347,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
struct smc_clc_msg_decline dclc;
rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
- SMC_CLC_DECLINE);
- return rc;
+ SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+ return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
}
if (link->llc_confirm_rc)
@@ -365,8 +375,8 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc)
struct smc_clc_msg_decline dclc;
rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
- SMC_CLC_DECLINE);
- return rc;
+ SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+ return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
}
/* send add link reject message, only one link supported for now */
@@ -535,7 +545,8 @@ static int smc_connect_clc(struct smc_sock *smc, int smc_type,
if (rc)
return rc;
/* receive SMC Accept CLC message */
- return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT);
+ return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
+ CLC_WAIT_TIME);
}
/* setup for RDMA connection of client */
@@ -583,8 +594,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
local_contact);
} else {
- if (!smc->conn.rmb_desc->reused &&
- smc_reg_rmb(link, smc->conn.rmb_desc, true))
+ if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
local_contact);
}
@@ -821,7 +831,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
struct socket *new_clcsock = NULL;
struct sock *lsk = &lsmc->sk;
struct sock *new_sk;
- int rc;
+ int rc = -EINVAL;
release_sock(lsk);
new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
@@ -834,7 +844,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
}
*new_smc = smc_sk(new_sk);
- rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+ mutex_lock(&lsmc->clcsock_release_lock);
+ if (lsmc->clcsock)
+ rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+ mutex_unlock(&lsmc->clcsock_release_lock);
lock_sock(lsk);
if (rc < 0)
lsk->sk_err = -rc;
@@ -968,8 +981,8 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
struct smc_clc_msg_decline dclc;
rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
- SMC_CLC_DECLINE);
- return rc;
+ SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+ return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
}
if (link->llc_confirm_resp_rc)
@@ -989,8 +1002,8 @@ static int smc_serv_conf_first_link(struct smc_sock *smc)
struct smc_clc_msg_decline dclc;
rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
- SMC_CLC_DECLINE);
- return rc;
+ SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
+ return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
}
smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
@@ -1145,10 +1158,8 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
if (local_contact != SMC_FIRST_CONTACT) {
- if (!new_smc->conn.rmb_desc->reused) {
- if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
- return SMC_CLC_DECL_ERR_REGRMB;
- }
+ if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
+ return SMC_CLC_DECL_ERR_REGRMB;
}
smc_rmb_sync_sg_for_device(&new_smc->conn);
@@ -1184,7 +1195,6 @@ static int smc_listen_rdma_finish(struct smc_sock *new_smc,
return 0;
decline:
- mutex_unlock(&smc_create_lgr_pending);
smc_listen_decline(new_smc, reason_code, local_contact);
return reason_code;
}
@@ -1225,7 +1235,7 @@ static void smc_listen_work(struct work_struct *work)
*/
pclc = (struct smc_clc_msg_proposal *)&buf;
reason_code = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
- SMC_CLC_PROPOSAL);
+ SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
if (reason_code) {
smc_listen_decline(new_smc, reason_code, 0);
return;
@@ -1275,7 +1285,7 @@ static void smc_listen_work(struct work_struct *work)
/* receive SMC Confirm CLC message */
reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
- SMC_CLC_CONFIRM);
+ SMC_CLC_CONFIRM, CLC_WAIT_TIME);
if (reason_code) {
mutex_unlock(&smc_create_lgr_pending);
smc_listen_decline(new_smc, reason_code, local_contact);
@@ -1284,8 +1294,10 @@ static void smc_listen_work(struct work_struct *work)
/* finish worker */
if (!ism_supported) {
- if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+ if (smc_listen_rdma_finish(new_smc, &cclc, local_contact)) {
+ mutex_unlock(&smc_create_lgr_pending);
return;
+ }
}
smc_conn_save_peer_info(new_smc, &cclc);
mutex_unlock(&smc_create_lgr_pending);
@@ -1357,7 +1369,6 @@ static int smc_listen(struct socket *sock, int backlog)
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
- INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
sock_hold(sk); /* sock_hold in tcp_listen_worker */
if (!schedule_work(&smc->tcp_listen_work))
sock_put(sk);
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 08786ace6010..5721416d0605 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -219,6 +219,10 @@ struct smc_sock { /* smc sock container */
* started, waiting for unsent
* data to be sent
*/
+ struct mutex clcsock_release_lock;
+ /* protects clcsock of a listen
+ * socket
+ * */
};
static inline struct smc_sock *smc_sk(const struct sock *sk)
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 89c3a8c7859a..776e9dfc915d 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -265,7 +265,7 @@ out:
* clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
*/
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
- u8 expected_type)
+ u8 expected_type, unsigned long timeout)
{
long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
struct sock *clc_sk = smc->clcsock->sk;
@@ -285,7 +285,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
* sizeof(struct smc_clc_msg_hdr)
*/
krflags = MSG_PEEK | MSG_WAITALL;
- smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
+ clc_sk->sk_rcvtimeo = timeout;
iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
sizeof(struct smc_clc_msg_hdr));
len = sock_recvmsg(smc->clcsock, &msg, krflags);
@@ -297,7 +297,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
if (clc_sk->sk_err) {
reason_code = -clc_sk->sk_err;
- smc->sk.sk_err = clc_sk->sk_err;
+ if (clc_sk->sk_err == EAGAIN &&
+ expected_type == SMC_CLC_DECLINE)
+ clc_sk->sk_err = 0; /* reset for fallback usage */
+ else
+ smc->sk.sk_err = clc_sk->sk_err;
goto out;
}
if (!len) { /* peer has performed orderly shutdown */
@@ -306,7 +310,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
goto out;
}
if (len < 0) {
- smc->sk.sk_err = -len;
+ if (len != -EAGAIN || expected_type != SMC_CLC_DECLINE)
+ smc->sk.sk_err = -len;
reason_code = len;
goto out;
}
@@ -346,7 +351,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
out:
- smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
+ clc_sk->sk_rcvtimeo = rcvtimeo;
return reason_code;
}
@@ -374,10 +379,8 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
sizeof(struct smc_clc_msg_decline));
if (len < sizeof(struct smc_clc_msg_decline))
- smc->sk.sk_err = EPROTO;
- if (len < 0)
- smc->sk.sk_err = -len;
- return sock_error(&smc->sk);
+ len = -EPROTO;
+ return len > 0 ? 0 : len;
}
/* send CLC PROPOSAL message across internal TCP socket */
@@ -536,7 +539,6 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
struct smc_link *link;
struct msghdr msg;
struct kvec vec;
- int rc = 0;
int len;
memset(&aclc, 0, sizeof(aclc));
@@ -589,13 +591,8 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
vec.iov_len = ntohs(aclc.hdr.length);
len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
ntohs(aclc.hdr.length));
- if (len < ntohs(aclc.hdr.length)) {
- if (len >= 0)
- new_smc->sk.sk_err = EPROTO;
- else
- new_smc->sk.sk_err = new_smc->clcsock->sk->sk_err;
- rc = sock_error(&new_smc->sk);
- }
+ if (len < ntohs(aclc.hdr.length))
+ len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
- return rc;
+ return len > 0 ? 0 : len;
}
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 18da89b681c2..24658e8c0de4 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -27,6 +27,7 @@
#define SMC_TYPE_D 1 /* SMC-D only */
#define SMC_TYPE_B 3 /* SMC-R and SMC-D */
#define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */
+#define CLC_WAIT_TIME_SHORT HZ /* short wait time on clcsock */
#define SMC_CLC_DECL_MEM 0x01010000 /* insufficient memory resources */
#define SMC_CLC_DECL_TIMEOUT_CL 0x02010000 /* timeout w4 QP confirm link */
#define SMC_CLC_DECL_TIMEOUT_AL 0x02020000 /* timeout w4 QP add link */
@@ -182,7 +183,7 @@ struct smcd_dev;
int smc_clc_prfx_match(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop);
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
- u8 expected_type);
+ u8 expected_type, unsigned long timeout);
int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
struct smc_ib_device *smcibdev, u8 ibport, u8 gid[],
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 1c9fa7f0261a..35c1cdc93e1c 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -149,6 +149,8 @@ static int smc_link_send_delete(struct smc_link *lnk)
return -ENOTCONN;
}
+static void smc_lgr_free(struct smc_link_group *lgr);
+
static void smc_lgr_free_work(struct work_struct *work)
{
struct smc_link_group *lgr = container_of(to_delayed_work(work),
@@ -171,8 +173,11 @@ free:
spin_unlock_bh(&smc_lgr_list.lock);
if (!lgr->is_smcd && !lgr->terminating) {
+ struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
/* try to send del link msg, on error free lgr immediately */
- if (!smc_link_send_delete(&lgr->lnk[SMC_SINGLE_LINK])) {
+ if (lnk->state == SMC_LNK_ACTIVE &&
+ !smc_link_send_delete(lnk)) {
/* reschedule in case we never receive a response */
smc_lgr_schedule_free_work(lgr);
return;
@@ -295,8 +300,13 @@ static void smc_buf_unuse(struct smc_connection *conn,
conn->sndbuf_desc->used = 0;
if (conn->rmb_desc) {
if (!conn->rmb_desc->regerr) {
- conn->rmb_desc->reused = 1;
conn->rmb_desc->used = 0;
+ if (!lgr->is_smcd) {
+ /* unregister rmb with peer */
+ smc_llc_do_delete_rkey(
+ &lgr->lnk[SMC_SINGLE_LINK],
+ conn->rmb_desc);
+ }
} else {
/* buf registration failed, reuse not possible */
write_lock_bh(&lgr->rmbs_lock);
@@ -410,7 +420,7 @@ static void smc_lgr_free_bufs(struct smc_link_group *lgr)
}
/* remove a link group */
-void smc_lgr_free(struct smc_link_group *lgr)
+static void smc_lgr_free(struct smc_link_group *lgr)
{
smc_lgr_free_bufs(lgr);
if (lgr->is_smcd)
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index cf98f4d6093e..b00287989a3d 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -109,6 +109,9 @@ struct smc_link {
int llc_testlink_time; /* testlink interval */
struct completion llc_confirm_rkey; /* wait 4 rx of cnf rkey */
int llc_confirm_rkey_rc; /* rc from cnf rkey msg */
+ struct completion llc_delete_rkey; /* wait 4 rx of del rkey */
+ int llc_delete_rkey_rc; /* rc from del rkey msg */
+ struct mutex llc_delete_rkey_mutex; /* serialize usage */
};
/* For now we just allow one parallel link per link group. The SMC protocol
@@ -127,7 +130,7 @@ struct smc_buf_desc {
struct page *pages;
int len; /* length of buffer */
u32 used; /* currently used / unused */
- u8 reused : 1; /* new created / reused */
+ u8 wr_reg : 1; /* mem region registered */
u8 regerr : 1; /* err during registration */
union {
struct { /* SMC-R */
@@ -243,7 +246,6 @@ struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
-void smc_lgr_free(struct smc_link_group *lgr);
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_terminate(struct smc_link_group *lgr);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 9c916c709ca7..a6d3623d06f4 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -238,6 +238,29 @@ static int smc_llc_send_confirm_rkey(struct smc_link *link,
return rc;
}
+/* send LLC delete rkey request */
+static int smc_llc_send_delete_rkey(struct smc_link *link,
+ struct smc_buf_desc *rmb_desc)
+{
+ struct smc_llc_msg_delete_rkey *rkeyllc;
+ struct smc_wr_tx_pend_priv *pend;
+ struct smc_wr_buf *wr_buf;
+ int rc;
+
+ rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+ if (rc)
+ return rc;
+ rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
+ memset(rkeyllc, 0, sizeof(*rkeyllc));
+ rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
+ rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
+ rkeyllc->num_rkeys = 1;
+ rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+ /* send llc message */
+ rc = smc_wr_tx_send(link, pend);
+ return rc;
+}
+
/* prepare an add link message */
static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc,
struct smc_link *link, u8 mac[], u8 gid[],
@@ -509,7 +532,9 @@ static void smc_llc_rx_delete_rkey(struct smc_link *link,
int i, max;
if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
- /* unused as long as we don't send this type of msg */
+ link->llc_delete_rkey_rc = llc->hd.flags &
+ SMC_LLC_FLAG_RKEY_NEG;
+ complete(&link->llc_delete_rkey);
} else {
max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
for (i = 0; i < max; i++) {
@@ -610,6 +635,8 @@ int smc_llc_link_init(struct smc_link *link)
init_completion(&link->llc_add);
init_completion(&link->llc_add_resp);
init_completion(&link->llc_confirm_rkey);
+ init_completion(&link->llc_delete_rkey);
+ mutex_init(&link->llc_delete_rkey_mutex);
init_completion(&link->llc_testlink_resp);
INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
return 0;
@@ -650,8 +677,11 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
{
int rc;
+ /* protected by mutex smc_create_lgr_pending */
reinit_completion(&link->llc_confirm_rkey);
- smc_llc_send_confirm_rkey(link, rmb_desc);
+ rc = smc_llc_send_confirm_rkey(link, rmb_desc);
+ if (rc)
+ return rc;
/* receive CONFIRM RKEY response from server over RoCE fabric */
rc = wait_for_completion_interruptible_timeout(&link->llc_confirm_rkey,
SMC_LLC_WAIT_TIME);
@@ -660,6 +690,29 @@ int smc_llc_do_confirm_rkey(struct smc_link *link,
return 0;
}
+/* unregister an rtoken at the remote peer */
+int smc_llc_do_delete_rkey(struct smc_link *link,
+ struct smc_buf_desc *rmb_desc)
+{
+ int rc;
+
+ mutex_lock(&link->llc_delete_rkey_mutex);
+ reinit_completion(&link->llc_delete_rkey);
+ rc = smc_llc_send_delete_rkey(link, rmb_desc);
+ if (rc)
+ goto out;
+ /* receive DELETE RKEY response from server over RoCE fabric */
+ rc = wait_for_completion_interruptible_timeout(&link->llc_delete_rkey,
+ SMC_LLC_WAIT_TIME);
+ if (rc <= 0 || link->llc_delete_rkey_rc)
+ rc = -EFAULT;
+ else
+ rc = 0;
+out:
+ mutex_unlock(&link->llc_delete_rkey_mutex);
+ return rc;
+}
+
/***************************** init, exit, misc ******************************/
static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index 9e2ff088e301..461c0c3ef76e 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -49,6 +49,8 @@ void smc_llc_link_inactive(struct smc_link *link);
void smc_llc_link_clear(struct smc_link *link);
int smc_llc_do_confirm_rkey(struct smc_link *link,
struct smc_buf_desc *rmb_desc);
+int smc_llc_do_delete_rkey(struct smc_link *link,
+ struct smc_buf_desc *rmb_desc);
int smc_llc_init(void) __init;
#endif /* SMC_LLC_H */
diff --git a/net/socket.c b/net/socket.c
index 334fcc617ef2..e89884e2197b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2341,8 +2341,9 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg,
* Linux recvmmsg interface
*/
-int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec64 *timeout)
+static int do_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ unsigned int vlen, unsigned int flags,
+ struct timespec64 *timeout)
{
int fput_needed, err, datagrams;
struct socket *sock;
@@ -2451,25 +2452,32 @@ out_put:
return datagrams;
}
-static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
- unsigned int vlen, unsigned int flags,
- struct __kernel_timespec __user *timeout)
+int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
+ unsigned int vlen, unsigned int flags,
+ struct __kernel_timespec __user *timeout,
+ struct old_timespec32 __user *timeout32)
{
int datagrams;
struct timespec64 timeout_sys;
- if (flags & MSG_CMSG_COMPAT)
- return -EINVAL;
-
- if (!timeout)
- return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
+ if (timeout && get_timespec64(&timeout_sys, timeout))
+ return -EFAULT;
- if (get_timespec64(&timeout_sys, timeout))
+ if (timeout32 && get_old_timespec32(&timeout_sys, timeout32))
return -EFAULT;
- datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys);
+ if (!timeout && !timeout32)
+ return do_recvmmsg(fd, mmsg, vlen, flags, NULL);
+
+ datagrams = do_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys);
- if (datagrams > 0 && put_timespec64(&timeout_sys, timeout))
+ if (datagrams <= 0)
+ return datagrams;
+
+ if (timeout && put_timespec64(&timeout_sys, timeout))
+ datagrams = -EFAULT;
+
+ if (timeout32 && put_old_timespec32(&timeout_sys, timeout32))
datagrams = -EFAULT;
return datagrams;
@@ -2479,8 +2487,23 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
struct __kernel_timespec __user *, timeout)
{
- return do_sys_recvmmsg(fd, mmsg, vlen, flags, timeout);
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
+ return __sys_recvmmsg(fd, mmsg, vlen, flags, timeout, NULL);
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+SYSCALL_DEFINE5(recvmmsg_time32, int, fd, struct mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags,
+ struct old_timespec32 __user *, timeout)
+{
+ if (flags & MSG_CMSG_COMPAT)
+ return -EINVAL;
+
+ return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL, timeout);
}
+#endif
#ifdef __ARCH_WANT_SYS_SOCKETCALL
/* Argument list sizes for sys_socketcall */
@@ -2600,8 +2623,15 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
a[2], true);
break;
case SYS_RECVMMSG:
- err = do_sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2],
- a[3], (struct __kernel_timespec __user *)a[4]);
+ if (IS_ENABLED(CONFIG_64BIT) || !IS_ENABLED(CONFIG_64BIT_TIME))
+ err = __sys_recvmmsg(a0, (struct mmsghdr __user *)a1,
+ a[2], a[3],
+ (struct __kernel_timespec __user *)a[4],
+ NULL);
+ else
+ err = __sys_recvmmsg(a0, (struct mmsghdr __user *)a1,
+ a[2], a[3], NULL,
+ (struct old_timespec32 __user *)a[4]);
break;
case SYS_ACCEPT4:
err = __sys_accept4(a0, (struct sockaddr __user *)a1,
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 5d3f252659f1..ba765473d1f0 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1791,6 +1791,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
for (i=0; i < rqstp->rq_enc_pages_num; i++)
__free_page(rqstp->rq_enc_pages[i]);
kfree(rqstp->rq_enc_pages);
+ rqstp->rq_release_snd_buf = NULL;
}
static int
@@ -1799,6 +1800,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
int first, last, i;
+ if (rqstp->rq_release_snd_buf)
+ rqstp->rq_release_snd_buf(rqstp);
+
if (snd_buf->page_len == 0) {
rqstp->rq_enc_pages_num = 0;
return 0;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index ae3b8145da35..24cbddc44c88 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1915,6 +1915,13 @@ call_connect_status(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
+ /* Check if the task was already transmitted */
+ if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+ xprt_end_transmit(task);
+ task->tk_action = call_transmit_status;
+ return;
+ }
+
dprint_status(task);
trace_rpc_connect_status(task);
@@ -1945,6 +1952,7 @@ call_connect_status(struct rpc_task *task)
/* retry with existing socket, after a delay */
rpc_delay(task, 3*HZ);
/* fall through */
+ case -ENOTCONN:
case -EAGAIN:
/* Check for timeouts before looping back to call_bind */
case -ETIMEDOUT:
@@ -2302,6 +2310,7 @@ out_retry:
task->tk_status = 0;
/* Note: rpc_verify_header() may have freed the RPC slot */
if (task->tk_rqstp == req) {
+ xdr_free_bvec(&req->rq_rcv_buf);
req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 9062967575c4..7e55cfc69697 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -175,7 +175,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
return -1;
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
!skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
+ netdev_rx_csum_fault(skb->dev, skb);
return 0;
no_checksum:
if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 86bea4520c4d..73547d17d3c6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -67,7 +67,6 @@
*/
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
-static void xprt_connect_status(struct rpc_task *task);
static void xprt_destroy(struct rpc_xprt *xprt);
static DEFINE_SPINLOCK(xprt_list_lock);
@@ -680,7 +679,9 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
- xprt_wake_pending_tasks(xprt, -EAGAIN);
+ else if (xprt->snd_task)
+ rpc_wake_up_queued_task_set_status(&xprt->pending,
+ xprt->snd_task, -ENOTCONN);
spin_unlock_bh(&xprt->transport_lock);
}
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
@@ -820,46 +821,25 @@ void xprt_connect(struct rpc_task *task)
if (!xprt_connected(xprt)) {
task->tk_timeout = task->tk_rqstp->rq_timeout;
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
- rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
+ rpc_sleep_on(&xprt->pending, task, NULL);
if (test_bit(XPRT_CLOSING, &xprt->state))
return;
if (xprt_test_and_set_connecting(xprt))
return;
- xprt->stat.connect_start = jiffies;
- xprt->ops->connect(xprt, task);
+ /* Race breaker */
+ if (!xprt_connected(xprt)) {
+ xprt->stat.connect_start = jiffies;
+ xprt->ops->connect(xprt, task);
+ } else {
+ xprt_clear_connecting(xprt);
+ task->tk_status = 0;
+ rpc_wake_up_queued_task(&xprt->pending, task);
+ }
}
xprt_release_write(xprt, task);
}
-static void xprt_connect_status(struct rpc_task *task)
-{
- switch (task->tk_status) {
- case 0:
- dprintk("RPC: %5u xprt_connect_status: connection established\n",
- task->tk_pid);
- break;
- case -ECONNREFUSED:
- case -ECONNRESET:
- case -ECONNABORTED:
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -EPIPE:
- case -EAGAIN:
- dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid);
- break;
- case -ETIMEDOUT:
- dprintk("RPC: %5u xprt_connect_status: connect attempt timed "
- "out\n", task->tk_pid);
- break;
- default:
- dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
- "server %s\n", task->tk_pid, -task->tk_status,
- task->tk_rqstp->rq_xprt->servername);
- task->tk_status = -EIO;
- }
-}
-
enum xprt_xid_rb_cmp {
XID_RB_EQUAL,
XID_RB_LEFT,
@@ -1623,6 +1603,8 @@ xprt_request_init(struct rpc_task *task)
req->rq_snd_buf.buflen = 0;
req->rq_rcv_buf.len = 0;
req->rq_rcv_buf.buflen = 0;
+ req->rq_snd_buf.bvec = NULL;
+ req->rq_rcv_buf.bvec = NULL;
req->rq_release_snd_buf = NULL;
xprt_reset_majortimeo(req);
dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ae77c71c1f64..f0b3700cec95 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -330,18 +330,16 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
{
size_t i,n;
- if (!(buf->flags & XDRBUF_SPARSE_PAGES))
+ if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
return want;
- if (want > buf->page_len)
- want = buf->page_len;
n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < n; i++) {
if (buf->pages[i])
continue;
buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
if (!buf->pages[i]) {
- buf->page_len = (i * PAGE_SIZE) - buf->page_base;
- return buf->page_len;
+ i *= PAGE_SIZE;
+ return i > buf->page_base ? i - buf->page_base : 0;
}
}
return want;
@@ -378,8 +376,8 @@ static ssize_t
xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
size_t count)
{
- struct kvec kvec = { 0 };
- return xs_read_kvec(sock, msg, flags | MSG_TRUNC, &kvec, count, 0);
+ iov_iter_discard(&msg->msg_iter, READ, count);
+ return sock_recvmsg(sock, msg, flags);
}
static ssize_t
@@ -398,16 +396,17 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
goto out;
if (ret != want)
- goto eagain;
+ goto out;
seek = 0;
} else {
seek -= buf->head[0].iov_len;
offset += buf->head[0].iov_len;
}
- if (seek < buf->page_len) {
- want = xs_alloc_sparse_pages(buf,
- min_t(size_t, count - offset, buf->page_len),
- GFP_NOWAIT);
+
+ want = xs_alloc_sparse_pages(buf,
+ min_t(size_t, count - offset, buf->page_len),
+ GFP_NOWAIT);
+ if (seek < want) {
ret = xs_read_bvec(sock, msg, flags, buf->bvec,
xdr_buf_pagecount(buf),
want + buf->page_base,
@@ -418,12 +417,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
goto out;
if (ret != want)
- goto eagain;
+ goto out;
seek = 0;
} else {
- seek -= buf->page_len;
- offset += buf->page_len;
+ seek -= want;
+ offset += want;
}
+
if (seek < buf->tail[0].iov_len) {
want = min_t(size_t, count - offset, buf->tail[0].iov_len);
ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
@@ -433,17 +433,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
goto out;
if (ret != want)
- goto eagain;
+ goto out;
} else
offset += buf->tail[0].iov_len;
ret = -EMSGSIZE;
- msg->msg_flags |= MSG_TRUNC;
out:
*read = offset - seek_init;
return ret;
-eagain:
- ret = -EAGAIN;
- goto out;
sock_err:
offset += seek;
goto out;
@@ -486,19 +482,20 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
if (transport->recv.offset == transport->recv.len) {
if (xs_read_stream_request_done(transport))
msg->msg_flags |= MSG_EOR;
- return transport->recv.copied;
+ return read;
}
switch (ret) {
+ default:
+ break;
+ case -EFAULT:
case -EMSGSIZE:
- return transport->recv.copied;
+ msg->msg_flags |= MSG_TRUNC;
+ return read;
case 0:
return -ESHUTDOWN;
- default:
- if (ret < 0)
- return ret;
}
- return -EAGAIN;
+ return ret < 0 ? ret : read;
}
static size_t
@@ -537,7 +534,7 @@ xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
ret = xs_read_stream_request(transport, msg, flags, req);
if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
- xprt_complete_bc_request(req, ret);
+ xprt_complete_bc_request(req, transport->recv.copied);
return ret;
}
@@ -570,7 +567,7 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
spin_lock(&xprt->queue_lock);
if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
- xprt_complete_rqst(req->rq_task, ret);
+ xprt_complete_rqst(req->rq_task, transport->recv.copied);
xprt_unpin_rqst(req);
out:
spin_unlock(&xprt->queue_lock);
@@ -591,10 +588,8 @@ xs_read_stream(struct sock_xprt *transport, int flags)
if (ret <= 0)
goto out_err;
transport->recv.offset = ret;
- if (ret != want) {
- ret = -EAGAIN;
- goto out_err;
- }
+ if (transport->recv.offset != want)
+ return transport->recv.offset;
transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
RPC_FRAGMENT_SIZE_MASK;
transport->recv.offset -= sizeof(transport->recv.fraghdr);
@@ -602,6 +597,9 @@ xs_read_stream(struct sock_xprt *transport, int flags)
}
switch (be32_to_cpu(transport->recv.calldir)) {
+ default:
+ msg.msg_flags |= MSG_TRUNC;
+ break;
case RPC_CALL:
ret = xs_read_stream_call(transport, &msg, flags);
break;
@@ -616,6 +614,9 @@ xs_read_stream(struct sock_xprt *transport, int flags)
goto out_err;
read += ret;
if (transport->recv.offset < transport->recv.len) {
+ if (!(msg.msg_flags & MSG_TRUNC))
+ return read;
+ msg.msg_flags = 0;
ret = xs_read_discard(transport->sock, &msg, flags,
transport->recv.len - transport->recv.offset);
if (ret <= 0)
@@ -623,7 +624,7 @@ xs_read_stream(struct sock_xprt *transport, int flags)
transport->recv.offset += ret;
read += ret;
if (transport->recv.offset != transport->recv.len)
- return -EAGAIN;
+ return read;
}
if (xs_read_stream_request_done(transport)) {
trace_xs_stream_read_request(transport);
@@ -633,13 +634,7 @@ xs_read_stream(struct sock_xprt *transport, int flags)
transport->recv.len = 0;
return read;
out_err:
- switch (ret) {
- case 0:
- case -ESHUTDOWN:
- xprt_force_disconnect(&transport->xprt);
- return -ESHUTDOWN;
- }
- return ret;
+ return ret != 0 ? ret : -ESHUTDOWN;
}
static void xs_stream_data_receive(struct sock_xprt *transport)
@@ -648,12 +643,12 @@ static void xs_stream_data_receive(struct sock_xprt *transport)
ssize_t ret = 0;
mutex_lock(&transport->recv_mutex);
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
if (transport->sock == NULL)
goto out;
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
for (;;) {
ret = xs_read_stream(transport, MSG_DONTWAIT);
- if (ret <= 0)
+ if (ret < 0)
break;
read += ret;
cond_resched();
@@ -1222,6 +1217,8 @@ static void xs_reset_transport(struct sock_xprt *transport)
trace_rpc_socket_close(xprt, sock);
sock_release(sock);
+
+ xprt_disconnect_done(xprt);
}
/**
@@ -1242,8 +1239,6 @@ static void xs_close(struct rpc_xprt *xprt)
xs_reset_transport(transport);
xprt->reestablish_timeout = 0;
-
- xprt_disconnect_done(xprt);
}
static void xs_inject_disconnect(struct rpc_xprt *xprt)
@@ -1345,10 +1340,10 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
int err;
mutex_lock(&transport->recv_mutex);
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
sk = transport->inet;
if (sk == NULL)
goto out;
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
for (;;) {
skb = skb_recv_udp(sk, 0, 1, &err);
if (skb == NULL)
@@ -1494,8 +1489,6 @@ static void xs_tcp_state_change(struct sock *sk)
&transport->sock_state))
xprt_clear_connecting(xprt);
clear_bit(XPRT_CLOSING, &xprt->state);
- if (sk->sk_err)
- xprt_wake_pending_tasks(xprt, -sk->sk_err);
/* Trigger the socket release */
xs_tcp_force_close(xprt);
}
@@ -2097,8 +2090,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
trace_rpc_socket_connect(xprt, sock, 0);
status = 0;
out:
- xprt_unlock_connect(xprt, transport);
xprt_clear_connecting(xprt);
+ xprt_unlock_connect(xprt, transport);
xprt_wake_pending_tasks(xprt, status);
}
@@ -2334,8 +2327,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
}
status = -EAGAIN;
out:
- xprt_unlock_connect(xprt, transport);
xprt_clear_connecting(xprt);
+ xprt_unlock_connect(xprt, transport);
xprt_wake_pending_tasks(xprt, status);
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 74b9d916a58b..5df9d1138ac9 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -353,34 +353,35 @@ static size_t switchdev_obj_size(const struct switchdev_obj *obj)
return 0;
}
-static int __switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
+ struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
{
- const struct switchdev_ops *ops = dev->switchdev_ops;
- struct net_device *lower_dev;
- struct list_head *iter;
- int err = -EOPNOTSUPP;
-
- if (ops && ops->switchdev_port_obj_add)
- return ops->switchdev_port_obj_add(dev, obj, trans);
+ int rc;
+ int err;
- /* Switch device port(s) may be stacked under
- * bond/team/vlan dev, so recurse down to add object on
- * each port.
- */
+ struct switchdev_notifier_port_obj_info obj_info = {
+ .obj = obj,
+ .trans = trans,
+ .handled = false,
+ };
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = __switchdev_port_obj_add(lower_dev, obj, trans);
- if (err)
- break;
+ rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
+ err = notifier_to_errno(rc);
+ if (err) {
+ WARN_ON(!obj_info.handled);
+ return err;
}
-
- return err;
+ if (!obj_info.handled)
+ return -EOPNOTSUPP;
+ return 0;
}
static int switchdev_port_obj_add_now(struct net_device *dev,
- const struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
struct switchdev_trans trans;
int err;
@@ -397,7 +398,8 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
*/
trans.ph_prepare = true;
- err = __switchdev_port_obj_add(dev, obj, &trans);
+ err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
+ dev, obj, &trans, extack);
if (err) {
/* Prepare phase failed: abort the transaction. Any
* resources reserved in the prepare phase are
@@ -416,7 +418,8 @@ static int switchdev_port_obj_add_now(struct net_device *dev,
*/
trans.ph_prepare = false;
- err = __switchdev_port_obj_add(dev, obj, &trans);
+ err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
+ dev, obj, &trans, extack);
WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
switchdev_trans_items_warn_destroy(dev, &trans);
@@ -429,7 +432,7 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
const struct switchdev_obj *obj = data;
int err;
- err = switchdev_port_obj_add_now(dev, obj);
+ err = switchdev_port_obj_add_now(dev, obj, NULL);
if (err && err != -EOPNOTSUPP)
netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
err, obj->id);
@@ -459,38 +462,21 @@ static int switchdev_port_obj_add_defer(struct net_device *dev,
* in case SWITCHDEV_F_DEFER flag is not set.
*/
int switchdev_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj)
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
{
if (obj->flags & SWITCHDEV_F_DEFER)
return switchdev_port_obj_add_defer(dev, obj);
ASSERT_RTNL();
- return switchdev_port_obj_add_now(dev, obj);
+ return switchdev_port_obj_add_now(dev, obj, extack);
}
EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
static int switchdev_port_obj_del_now(struct net_device *dev,
const struct switchdev_obj *obj)
{
- const struct switchdev_ops *ops = dev->switchdev_ops;
- struct net_device *lower_dev;
- struct list_head *iter;
- int err = -EOPNOTSUPP;
-
- if (ops && ops->switchdev_port_obj_del)
- return ops->switchdev_port_obj_del(dev, obj);
-
- /* Switch device port(s) may be stacked under
- * bond/team/vlan dev, so recurse down to delete object on
- * each port.
- */
-
- netdev_for_each_lower_dev(dev, lower_dev, iter) {
- err = switchdev_port_obj_del_now(lower_dev, obj);
- if (err)
- break;
- }
-
- return err;
+ return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
+ dev, obj, NULL, NULL);
}
static void switchdev_port_obj_del_deferred(struct net_device *dev,
@@ -535,6 +521,7 @@ int switchdev_port_obj_del(struct net_device *dev,
EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
+static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
/**
* register_switchdev_notifier - Register notifier
@@ -572,10 +559,38 @@ int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
struct switchdev_notifier_info *info)
{
info->dev = dev;
+ info->extack = NULL;
return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
}
EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
+int register_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
+
+ return blocking_notifier_chain_register(chain, nb);
+}
+EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
+
+int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
+{
+ struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
+
+ return blocking_notifier_chain_unregister(chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
+
+int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
+ struct switchdev_notifier_info *info,
+ struct netlink_ext_ack *extack)
+{
+ info->dev = dev;
+ info->extack = extack;
+ return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
+ val, info);
+}
+EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
+
bool switchdev_port_same_parent_id(struct net_device *a,
struct net_device *b)
{
@@ -595,3 +610,109 @@ bool switchdev_port_same_parent_id(struct net_device *a,
return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
}
EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
+
+static int __switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack))
+{
+ struct netlink_ext_ack *extack;
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
+
+ if (check_cb(dev)) {
+ /* This flag is only checked if the return value is success. */
+ port_obj_info->handled = true;
+ return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
+ extack);
+ }
+
+ /* Switch ports might be stacked under e.g. a LAG. Ignore the
+ * unsupported devices, another driver might be able to handle them. But
+ * propagate to the callers any hard errors.
+ *
+ * If the driver does its own bookkeeping of stacked ports, it's not
+ * necessary to go through this helper.
+ */
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
+ check_cb, add_cb);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return err;
+}
+
+int switchdev_handle_port_obj_add(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*add_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack))
+{
+ int err;
+
+ err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
+ add_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
+
+static int __switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj))
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+ int err = -EOPNOTSUPP;
+
+ if (check_cb(dev)) {
+ /* This flag is only checked if the return value is success. */
+ port_obj_info->handled = true;
+ return del_cb(dev, port_obj_info->obj);
+ }
+
+ /* Switch ports might be stacked under e.g. a LAG. Ignore the
+ * unsupported devices, another driver might be able to handle them. But
+ * propagate to the callers any hard errors.
+ *
+ * If the driver does its own bookkeeping of stacked ports, it's not
+ * necessary to go through this helper.
+ */
+ netdev_for_each_lower_dev(dev, lower_dev, iter) {
+ err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
+ check_cb, del_cb);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ return err;
+}
+
+int switchdev_handle_port_obj_del(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ int (*del_cb)(struct net_device *dev,
+ const struct switchdev_obj *obj))
+{
+ int err;
+
+ err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
+ del_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index aca168f2abb1..c86aba0282af 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,7 +9,9 @@ tipc-y += addr.o bcast.o bearer.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o monitor.o name_table.o net.o \
netlink.o netlink_compat.o node.o socket.o eth_media.o \
- topsrv.o socket.o group.o
+ topsrv.o socket.o group.o trace.o
+
+CFLAGS_trace.o += -I$(src)
tipc-$(CONFIG_TIPC_MEDIA_UDP) += udp_media.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e65c3a8551e4..fb2c0d8f359f 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -43,6 +43,7 @@
#include "bcast.h"
#include "netlink.h"
#include "udp_media.h"
+#include "trace.h"
#define MAX_ADDR_STR 60
@@ -99,7 +100,7 @@ static struct tipc_media *media_find_id(u8 type)
/**
* tipc_media_addr_printf - record media address in print buffer
*/
-void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
+int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
char addr_str[MAX_ADDR_STR];
struct tipc_media *m;
@@ -114,9 +115,10 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
for (i = 0; i < sizeof(a->value); i++)
- ret += scnprintf(buf - ret, len + ret,
- "-%02x", a->value[i]);
+ ret += scnprintf(buf + ret, len - ret,
+ "-%x", a->value[i]);
}
+ return ret;
}
/**
@@ -607,6 +609,7 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
if (!b)
return NOTIFY_DONE;
+ trace_tipc_l2_device_event(dev, b, evt);
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 394290cbbb1d..7f4c569594a5 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -207,7 +207,7 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
-void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
+int tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
struct nlattr *attrs[]);
void tipc_disable_l2_media(struct tipc_bearer *b);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 836727e363c4..2792a3cae682 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -43,6 +43,7 @@
#include "discover.h"
#include "netlink.h"
#include "monitor.h"
+#include "trace.h"
#include <linux/pkt_sched.h>
@@ -105,7 +106,7 @@ struct tipc_stats {
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
- * @last_retransmitted: sequence number of most recently retransmitted message
+ * @prev_from: sequence number of most previous retransmission request
* @stale_cnt: counter for number of identical retransmit attempts
* @stale_limit: time when repeated identical retransmits must force link reset
* @ackers: # of peers that needs to ack each packet before it can be released
@@ -163,7 +164,7 @@ struct tipc_link {
u16 limit;
} backlog[5];
u16 snd_nxt;
- u16 last_retransm;
+ u16 prev_from;
u16 window;
u16 stale_cnt;
unsigned long stale_limit;
@@ -186,9 +187,6 @@ struct tipc_link {
u16 acked;
struct tipc_link *bc_rcvlink;
struct tipc_link *bc_sndlink;
- unsigned long prev_retr;
- u16 prev_from;
- u16 prev_to;
u8 nack_state;
bool bc_peer_is_up;
@@ -210,7 +208,7 @@ enum {
BC_NACK_SND_SUPPRESS,
};
-#define TIPC_BC_RETR_LIMIT 10 /* [ms] */
+#define TIPC_BC_RETR_LIM msecs_to_jiffies(10) /* [ms] */
/*
* Interval between NACKs when packets arrive out of order
@@ -359,9 +357,11 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
rcv_l->bc_peer_is_up = true;
rcv_l->state = LINK_ESTABLISHED;
tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
+ trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
tipc_link_reset(rcv_l);
rcv_l->state = LINK_RESET;
if (!snd_l->ackers) {
+ trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
tipc_link_reset(snd_l);
snd_l->state = LINK_RESET;
__skb_queue_purge(xmitq);
@@ -525,6 +525,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
l = *link;
strcpy(l->name, tipc_bclink_name);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
tipc_link_reset(l);
l->state = LINK_RESET;
l->ackers = 0;
@@ -549,6 +550,7 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
int tipc_link_fsm_evt(struct tipc_link *l, int evt)
{
int rc = 0;
+ int old_state = l->state;
switch (l->state) {
case LINK_RESETTING:
@@ -695,10 +697,12 @@ int tipc_link_fsm_evt(struct tipc_link *l, int evt)
default:
pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
}
+ trace_tipc_link_fsm(l->name, old_state, l->state, evt);
return rc;
illegal_evt:
pr_err("Illegal FSM event %x in state %x on link %s\n",
evt, l->state, l->name);
+ trace_tipc_link_fsm(l->name, old_state, l->state, evt);
return rc;
}
@@ -743,6 +747,18 @@ static void link_profile_stats(struct tipc_link *l)
l->stats.msg_length_profile[6]++;
}
+/**
+ * tipc_link_too_silent - check if link is "too silent"
+ * @l: tipc link to be checked
+ *
+ * Returns true if the link 'silent_intv_cnt' is about to reach the
+ * 'abort_limit' value, otherwise false
+ */
+bool tipc_link_too_silent(struct tipc_link *l)
+{
+ return (l->silent_intv_cnt + 2 > l->abort_limit);
+}
+
/* tipc_link_timeout - perform periodic task as instructed from node timeout
*/
int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
@@ -756,6 +772,8 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
u16 bc_acked = l->bc_rcvlink->acked;
struct tipc_mon_state *mstate = &l->mon_state;
+ trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
+ trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
switch (l->state) {
case LINK_ESTABLISHED:
case LINK_SYNCHING:
@@ -818,6 +836,7 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
skb_queue_tail(&l->wakeupq, skb);
l->stats.link_congs++;
+ trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
return -ELINKCONG;
}
@@ -948,6 +967,10 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
}
__skb_dequeue(list);
__skb_queue_tail(transmq, skb);
+ /* next retransmit attempt */
+ if (link_is_bc_sndlink(l))
+ TIPC_SKB_CB(skb)->nxt_retr =
+ jiffies + TIPC_BC_RETR_LIM;
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
@@ -995,6 +1018,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
hdr = buf_msg(skb);
l->backlog[msg_importance(hdr)].len--;
__skb_queue_tail(&l->transmq, skb);
+ /* next retransmit attempt */
+ if (link_is_bc_sndlink(l))
+ TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
+
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
msg_set_seqno(hdr, seqno);
@@ -1036,14 +1063,20 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
if (!skb)
return 0;
+ if (less(to, from))
+ return 0;
+ trace_tipc_link_retrans(r, from, to, &l->transmq);
/* Detect repeated retransmit failures on same packet */
- if (r->last_retransm != buf_seqno(skb)) {
- r->last_retransm = buf_seqno(skb);
+ if (r->prev_from != from) {
+ r->prev_from = from;
r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
r->stale_cnt = 0;
} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
+ trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
+ trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
if (link_is_bc_sndlink(l))
return TIPC_LINK_DOWN_EVT;
return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
@@ -1055,6 +1088,11 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
continue;
if (more(msg_seqno(hdr), to))
break;
+ if (link_is_bc_sndlink(l)) {
+ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+ continue;
+ TIPC_SKB_CB(skb)->nxt_retr = jiffies + TIPC_BC_RETR_LIM;
+ }
_skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
if (!_skb)
return 0;
@@ -1398,6 +1436,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
l->stats.sent_nacks++;
skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, skb);
+ trace_tipc_proto_build(skb, false, l->name);
}
void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
@@ -1561,6 +1600,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
char *if_name;
int rc = 0;
+ trace_tipc_proto_rcv(skb, false, l->name);
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
@@ -1571,8 +1611,11 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
hdr = buf_msg(skb);
data = msg_data(hdr);
- if (!tipc_link_validate_msg(l, hdr))
+ if (!tipc_link_validate_msg(l, hdr)) {
+ trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
goto exit;
+ }
switch (mtyp) {
case RESET_MSG:
@@ -1737,42 +1780,6 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
l->rcv_nxt = peers_snd_nxt;
}
-/* link_bc_retr eval()- check if the indicated range can be retransmitted now
- * - Adjust permitted range if there is overlap with previous retransmission
- */
-static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
-{
- unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
-
- if (less(*to, *from))
- return false;
-
- /* New retransmission request */
- if ((elapsed > TIPC_BC_RETR_LIMIT) ||
- less(*to, l->prev_from) || more(*from, l->prev_to)) {
- l->prev_from = *from;
- l->prev_to = *to;
- l->prev_retr = jiffies;
- return true;
- }
-
- /* Inside range of previous retransmit */
- if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
- return false;
-
- /* Fully or partially outside previous range => exclude overlap */
- if (less(*from, l->prev_from)) {
- *to = l->prev_from - 1;
- l->prev_from = *from;
- }
- if (more(*to, l->prev_to)) {
- *from = l->prev_to + 1;
- l->prev_to = *to;
- }
- l->prev_retr = jiffies;
- return true;
-}
-
/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
*/
int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
@@ -1803,8 +1810,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
if (more(peers_snd_nxt, l->rcv_nxt + l->window))
return rc;
- if (link_bc_retr_eval(snd_l, &from, &to))
- rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
+ rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
l->snd_nxt = peers_snd_nxt;
if (link_bc_rcv_gap(l))
@@ -1852,6 +1858,7 @@ void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
if (!more(acked, l->acked))
return;
+ trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
/* Skip over packets peer has already acked */
skb_queue_walk(&snd_l->transmq, skb) {
if (more(buf_seqno(skb), l->acked))
@@ -2255,3 +2262,122 @@ void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
{
l->abort_limit = limit;
}
+
+char *tipc_link_name_ext(struct tipc_link *l, char *buf)
+{
+ if (!l)
+ scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
+ else if (link_is_bc_sndlink(l))
+ scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
+ else if (link_is_bc_rcvlink(l))
+ scnprintf(buf, TIPC_MAX_LINK_NAME,
+ "broadcast-receiver, peer %x", l->addr);
+ else
+ memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
+
+ return buf;
+}
+
+/**
+ * tipc_link_dump - dump TIPC link data
+ * @l: tipc link to be dumped
+ * @dqueues: bitmask to decide if any link queue to be dumped?
+ * - TIPC_DUMP_NONE: don't dump link queues
+ * - TIPC_DUMP_TRANSMQ: dump link transmq queue
+ * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
+ * - TIPC_DUMP_DEFERDQ: dump link deferd queue
+ * - TIPC_DUMP_INPUTQ: dump link input queue
+ * - TIPC_DUMP_WAKEUP: dump link wakeup queue
+ * - TIPC_DUMP_ALL: dump all the link queues above
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
+{
+ int i = 0;
+ size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
+ struct sk_buff_head *list;
+ struct sk_buff *hskb, *tskb;
+ u32 len;
+
+ if (!l) {
+ i += scnprintf(buf, sz, "link data: (null)\n");
+ return i;
+ }
+
+ i += scnprintf(buf, sz, "link data: %x", l->addr);
+ i += scnprintf(buf + i, sz - i, " %x", l->state);
+ i += scnprintf(buf + i, sz - i, " %u", l->in_session);
+ i += scnprintf(buf + i, sz - i, " %u", l->session);
+ i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
+ i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
+ i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
+ i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
+ i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
+ i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
+ i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
+ i += scnprintf(buf + i, sz - i, " %u", l->stale_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", l->acked);
+
+ list = &l->transmq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = &l->deferdq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = &l->backlogq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ list = l->inputq;
+ len = skb_queue_len(list);
+ hskb = skb_peek(list);
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
+ (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
+ (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
+
+ if (dqueues & TIPC_DUMP_TRANSMQ) {
+ i += scnprintf(buf + i, sz - i, "transmq: ");
+ i += tipc_list_dump(&l->transmq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_BACKLOGQ) {
+ i += scnprintf(buf + i, sz - i,
+ "backlogq: <%u %u %u %u %u>, ",
+ l->backlog[TIPC_LOW_IMPORTANCE].len,
+ l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
+ l->backlog[TIPC_HIGH_IMPORTANCE].len,
+ l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
+ l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
+ i += tipc_list_dump(&l->backlogq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_DEFERDQ) {
+ i += scnprintf(buf + i, sz - i, "deferdq: ");
+ i += tipc_list_dump(&l->deferdq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_INPUTQ) {
+ i += scnprintf(buf + i, sz - i, "inputq: ");
+ i += tipc_list_dump(l->inputq, false, buf + i);
+ }
+ if (dqueues & TIPC_DUMP_WAKEUP) {
+ i += scnprintf(buf + i, sz - i, "wakeup: ");
+ i += tipc_list_dump(&l->wakeupq, false, buf + i);
+ }
+
+ return i;
+}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 90488c538a4e..8439e0ee53a8 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -109,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
u16 tipc_link_acked(struct tipc_link *l);
u32 tipc_link_id(struct tipc_link *l);
char *tipc_link_name(struct tipc_link *l);
+char *tipc_link_name_ext(struct tipc_link *l, char *buf);
u32 tipc_link_state(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
@@ -147,4 +148,5 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
struct sk_buff_head *xmitq);
int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *xmitq);
+bool tipc_link_too_silent(struct tipc_link *l);
#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index a2879e6ec5b6..a0924956bb61 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -105,6 +105,7 @@ struct tipc_skb_cb {
u32 bytes_read;
u32 orig_member;
struct sk_buff *tail;
+ unsigned long nxt_retr;
bool validated;
u16 chain_imp;
u16 ackers;
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 6376467e78f8..21f6ccc89401 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -951,8 +951,11 @@ static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
u32 node;
struct nlattr *con[TIPC_NLA_CON_MAX + 1];
- nla_parse_nested(con, TIPC_NLA_CON_MAX,
- sock[TIPC_NLA_SOCK_CON], NULL, NULL);
+ err = nla_parse_nested(con, TIPC_NLA_CON_MAX,
+ sock[TIPC_NLA_SOCK_CON], NULL, NULL);
+
+ if (err)
+ return err;
node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2afc4f8c37a7..db2a6c3e0be9 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -43,6 +43,7 @@
#include "monitor.h"
#include "discover.h"
#include "netlink.h"
+#include "trace.h"
#define INVALID_NODE_SIG 0x10000
#define NODE_CLEANUP_AFTER 300000
@@ -432,6 +433,7 @@ static struct tipc_node *tipc_node_create(struct net *net, u32 addr,
break;
}
list_add_tail_rcu(&n->list, &temp_node->list);
+ trace_tipc_node_create(n, true, " ");
exit:
spin_unlock_bh(&tn->node_list_lock);
return n;
@@ -459,6 +461,7 @@ static void tipc_node_delete_from_list(struct tipc_node *node)
static void tipc_node_delete(struct tipc_node *node)
{
+ trace_tipc_node_delete(node, true, " ");
tipc_node_delete_from_list(node);
del_timer_sync(&node->timer);
@@ -584,12 +587,15 @@ static void tipc_node_clear_links(struct tipc_node *node)
/* tipc_node_cleanup - delete nodes that does not
* have active links for NODE_CLEANUP_AFTER time
*/
-static int tipc_node_cleanup(struct tipc_node *peer)
+static bool tipc_node_cleanup(struct tipc_node *peer)
{
struct tipc_net *tn = tipc_net(peer->net);
bool deleted = false;
- spin_lock_bh(&tn->node_list_lock);
+ /* If lock held by tipc_node_stop() the node will be deleted anyway */
+ if (!spin_trylock_bh(&tn->node_list_lock))
+ return false;
+
tipc_node_write_lock(peer);
if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
@@ -613,6 +619,7 @@ static void tipc_node_timeout(struct timer_list *t)
int bearer_id;
int rc = 0;
+ trace_tipc_node_timeout(n, false, " ");
if (!node_is_up(n) && tipc_node_cleanup(n)) {
/*Removing the reference of Timer*/
tipc_node_put(n);
@@ -621,6 +628,12 @@ static void tipc_node_timeout(struct timer_list *t)
__skb_queue_head_init(&xmitq);
+ /* Initial node interval to value larger (10 seconds), then it will be
+ * recalculated with link lowest tolerance
+ */
+ tipc_node_read_lock(n);
+ n->keepalive_intv = 10000;
+ tipc_node_read_unlock(n);
for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
tipc_node_read_lock(n);
le = &n->links[bearer_id];
@@ -672,6 +685,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
pr_debug("Established link <%s> on network plane %c\n",
tipc_link_name(nl), tipc_link_plane(nl));
+ trace_tipc_node_link_up(n, true, " ");
/* Ensure that a STATE message goes first */
tipc_link_build_state_msg(nl, xmitq);
@@ -774,6 +788,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
if (tipc_link_peer_is_down(l))
tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
tipc_link_fsm_evt(l, LINK_RESET_EVT);
tipc_link_reset(l);
tipc_link_build_reset_msg(l, xmitq);
@@ -791,6 +806,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
@@ -823,6 +839,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
/* Defuse pending tipc_node_link_up() */
tipc_link_fsm_evt(l, LINK_RESET_EVT);
}
+ trace_tipc_node_link_down(n, true, "node link down or deleted!");
tipc_node_write_unlock(n);
if (delete)
tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
@@ -1012,6 +1029,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
*respond = false;
goto exit;
}
+ trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
tipc_link_reset(l);
tipc_link_fsm_evt(l, LINK_RESET_EVT);
if (n->state == NODE_FAILINGOVER)
@@ -1051,6 +1069,7 @@ static void tipc_node_reset_links(struct tipc_node *n)
pr_warn("Resetting all links to %x\n", n->addr);
+ trace_tipc_node_reset_links(n, true, " ");
for (i = 0; i < MAX_BEARERS; i++) {
tipc_node_link_down(n, i, false);
}
@@ -1226,11 +1245,13 @@ static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
pr_err("Unknown node fsm state %x\n", state);
break;
}
+ trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
n->state = state;
return;
illegal_evt:
pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
+ trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
}
static void node_lost_contact(struct tipc_node *n,
@@ -1244,6 +1265,7 @@ static void node_lost_contact(struct tipc_node *n,
pr_debug("Lost contact with %x\n", n->addr);
n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
+ trace_tipc_node_lost_contact(n, true, " ");
/* Clean up broadcast state */
tipc_bcast_remove_peer(n->net, n->bc_entry.link);
@@ -1540,6 +1562,10 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
if (!skb_queue_empty(&be->inputq1))
tipc_node_mcast_rcv(n);
+ /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
+ if (!skb_queue_empty(&n->bc_entry.namedq))
+ tipc_named_rcv(net, &n->bc_entry.namedq);
+
/* If reassembly or retransmission failure => reset all links to peer */
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_reset_links(n);
@@ -1568,6 +1594,10 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
struct tipc_media_addr *maddr;
int pb_id;
+ if (trace_tipc_node_check_state_enabled()) {
+ trace_tipc_skb_dump(skb, false, "skb for node state check");
+ trace_tipc_node_check_state(n, true, " ");
+ }
l = n->links[bearer_id].link;
if (!l)
return false;
@@ -1585,8 +1615,11 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
}
}
- if (!tipc_link_validate_msg(l, hdr))
+ if (!tipc_link_validate_msg(l, hdr)) {
+ trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
return false;
+ }
/* Check and update node accesibility if applicable */
if (state == SELF_UP_PEER_COMING) {
@@ -1616,6 +1649,8 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
syncpt = oseqno + exp_pkts - 1;
if (pl && tipc_link_is_up(pl)) {
__tipc_node_link_down(n, &pb_id, xmitq, &maddr);
+ trace_tipc_node_link_down(n, true,
+ "node link down <- failover!");
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
@@ -2422,3 +2457,65 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
return skb->len;
}
+
+u32 tipc_node_get_addr(struct tipc_node *node)
+{
+ return (node) ? node->addr : 0;
+}
+
+/**
+ * tipc_node_dump - dump TIPC node data
+ * @n: tipc node to be dumped
+ * @more: dump more?
+ * - false: dump only tipc node data
+ * - true: dump node link data as well
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
+
+ if (!n) {
+ i += scnprintf(buf, sz, "node data: (null)\n");
+ return i;
+ }
+
+ i += scnprintf(buf, sz, "node data: %x", n->addr);
+ i += scnprintf(buf + i, sz - i, " %x", n->state);
+ i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
+ i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
+ i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
+ i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
+ i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
+ i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", n->working_links);
+ i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
+ i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
+
+ if (!more)
+ return i;
+
+ i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
+ i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
+ i += scnprintf(buf + i, sz - i, " media: ");
+ i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
+ i += scnprintf(buf + i, sz - i, "\n");
+ i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
+ i += scnprintf(buf + i, sz - i, " inputq: ");
+ i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
+
+ i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
+ i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
+ i += scnprintf(buf + i, sz - i, " media: ");
+ i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
+ i += scnprintf(buf + i, sz - i, "\n");
+ i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
+ i += scnprintf(buf + i, sz - i, " inputq: ");
+ i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
+
+ i += scnprintf(buf + i, sz - i, "bclink:\n ");
+ i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
+
+ return i;
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 03f5efb62cfb..4f59a30e989a 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -65,6 +65,7 @@ enum {
void tipc_node_stop(struct net *net);
bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
+u32 tipc_node_get_addr(struct tipc_node *node);
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
struct tipc_bearer *bearer,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b57b1be7252b..1217c90a363b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -46,6 +46,7 @@
#include "bcast.h"
#include "netlink.h"
#include "group.h"
+#include "trace.h"
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
@@ -233,6 +234,7 @@ static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
*/
static void tsk_advance_rx_queue(struct sock *sk)
{
+ trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
@@ -247,6 +249,7 @@ static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
if (!tipc_msg_reverse(onode, &skb, err))
return;
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
dnode = msg_destnode(buf_msg(skb));
selector = msg_origport(buf_msg(skb));
tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
@@ -482,6 +485,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
tsk_set_unreliable(tsk, true);
}
+ trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
return 0;
}
@@ -571,6 +575,7 @@ static int tipc_release(struct socket *sock)
tsk = tipc_sk(sk);
lock_sock(sk);
+ trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_ERR_NO_PORT);
sk->sk_shutdown = SHUTDOWN_MASK;
tipc_sk_leave(tsk);
@@ -718,6 +723,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
__poll_t revents = 0;
sock_poll_wait(file, sock, wait);
+ trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
@@ -804,9 +810,12 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
/* Send message if build was successful */
- if (unlikely(rc == dlen))
+ if (unlikely(rc == dlen)) {
+ trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
+ TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
&tsk->cong_link_cnt);
+ }
tipc_nlist_purge(&dsts);
@@ -880,7 +889,6 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_group *grp = tsk->group;
struct net *net = sock_net(sk);
struct tipc_member *mb = NULL;
u32 node, port;
@@ -894,7 +902,9 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
/* Block or return if destination link or member is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tipc_dest_find(&tsk->cong_links, node, 0) &&
- !tipc_group_cong(grp, node, port, blks, &mb));
+ tsk->group &&
+ !tipc_group_cong(tsk->group, node, port, blks,
+ &mb));
if (unlikely(rc))
return rc;
@@ -924,7 +934,6 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
struct tipc_sock *tsk = tipc_sk(sk);
struct list_head *cong_links = &tsk->cong_links;
int blks = tsk_blocks(GROUP_H_SIZE + dlen);
- struct tipc_group *grp = tsk->group;
struct tipc_msg *hdr = &tsk->phdr;
struct tipc_member *first = NULL;
struct tipc_member *mbr = NULL;
@@ -941,9 +950,10 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
type = msg_nametype(hdr);
inst = dest->addr.name.name.instance;
scope = msg_lookup_scope(hdr);
- exclude = tipc_group_exclude(grp);
while (++lookups < 4) {
+ exclude = tipc_group_exclude(tsk->group);
+
first = NULL;
/* Look for a non-congested destination member, if any */
@@ -952,7 +962,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
&dstcnt, exclude, false))
return -EHOSTUNREACH;
tipc_dest_pop(&dsts, &node, &port);
- cong = tipc_group_cong(grp, node, port, blks, &mbr);
+ cong = tipc_group_cong(tsk->group, node, port, blks,
+ &mbr);
if (!cong)
break;
if (mbr == first)
@@ -971,7 +982,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
/* Block or return if destination link or member is congested */
rc = tipc_wait_for_cond(sock, &timeout,
!tipc_dest_find(cong_links, node, 0) &&
- !tipc_group_cong(grp, node, port,
+ tsk->group &&
+ !tipc_group_cong(tsk->group, node, port,
blks, &mbr));
if (unlikely(rc))
return rc;
@@ -1006,8 +1018,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_group *grp = tsk->group;
- struct tipc_nlist *dsts = tipc_group_dests(grp);
+ struct tipc_nlist *dsts;
struct tipc_mc_method *method = &tsk->mc_method;
bool ack = method->mandatory && method->rcast;
int blks = tsk_blocks(MCAST_H_SIZE + dlen);
@@ -1016,15 +1027,17 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
struct sk_buff_head pkts;
int rc = -EHOSTUNREACH;
- if (!dsts->local && !dsts->remote)
- return -EHOSTUNREACH;
-
/* Block or return if any destination link or member is congested */
- rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt &&
- !tipc_group_bc_cong(grp, blks));
+ rc = tipc_wait_for_cond(sock, &timeout,
+ !tsk->cong_link_cnt && tsk->group &&
+ !tipc_group_bc_cong(tsk->group, blks));
if (unlikely(rc))
return rc;
+ dsts = tipc_group_dests(tsk->group);
+ if (!dsts->local && !dsts->remote)
+ return -EHOSTUNREACH;
+
/* Complete message header */
if (dest) {
msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
@@ -1036,7 +1049,7 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
msg_set_hdr_sz(hdr, GROUP_H_SIZE);
msg_set_destport(hdr, 0);
msg_set_destnode(hdr, 0);
- msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(grp));
+ msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
/* Avoid getting stuck with repeated forced replicasts */
msg_set_grp_bc_ack_req(hdr, ack);
@@ -1208,8 +1221,10 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
bool conn_cong;
/* Ignore if connection cannot be validated: */
- if (!tsk_peer_msg(tsk, hdr))
+ if (!tsk_peer_msg(tsk, hdr)) {
+ trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
goto exit;
+ }
if (unlikely(msg_errcode(hdr))) {
tipc_set_sk_state(sk, TIPC_DISCONNECTING);
@@ -1377,6 +1392,7 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue)))
return -ENOMEM;
+ trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tipc_dest_push(clinks, dnode, 0);
@@ -1454,6 +1470,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
if (unlikely(rc != send))
break;
+ trace_tipc_sk_sendstream(sk, skb_peek(&pkts),
+ TIPC_DUMP_SK_SNDQ, " ");
rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
if (unlikely(rc == -ELINKCONG)) {
tsk->cong_link_cnt = 1;
@@ -2128,6 +2146,7 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
struct sk_buff_head inputq;
int limit, err = TIPC_OK;
+ trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
TIPC_SKB_CB(skb)->bytes_read = 0;
__skb_queue_head_init(&inputq);
__skb_queue_tail(&inputq, skb);
@@ -2147,17 +2166,25 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
(!grp && msg_in_group(hdr)))
err = TIPC_ERR_NO_PORT;
else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
+ trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
+ "err_overload2!");
atomic_inc(&sk->sk_drops);
err = TIPC_ERR_OVERLOAD;
}
if (unlikely(err)) {
- tipc_skb_reject(net, err, skb, xmitq);
+ if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
+ "@filter_rcv!");
+ __skb_queue_tail(xmitq, skb);
+ }
err = TIPC_OK;
continue;
}
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
+ trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
+ "rcvq >90% allocated!");
sk->sk_data_ready(sk);
}
}
@@ -2223,14 +2250,21 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
if (!sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
- if (likely(!sk_add_backlog(sk, skb, lim)))
+ if (likely(!sk_add_backlog(sk, skb, lim))) {
+ trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
+ "bklg & rcvq >90% allocated!");
continue;
+ }
+ trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
/* Overload => reject message back to sender */
onode = tipc_own_addr(sock_net(sk));
atomic_inc(&sk->sk_drops);
- if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
+ trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
+ "@sk_enqueue!");
__skb_queue_tail(xmitq, skb);
+ }
break;
}
}
@@ -2279,6 +2313,8 @@ void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
/* Prepare for message rejection */
if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
continue;
+
+ trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
xmit:
dnode = msg_destnode(buf_msg(skb));
tipc_node_xmit_skb(net, skb, dnode, dport);
@@ -2552,6 +2588,7 @@ static int tipc_shutdown(struct socket *sock, int how)
lock_sock(sk);
+ trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
sk->sk_shutdown = SEND_SHUTDOWN;
@@ -2724,11 +2761,15 @@ void tipc_sk_reinit(struct net *net)
rhashtable_walk_start(&iter);
while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
- spin_lock_bh(&tsk->sk.sk_lock.slock);
+ sock_hold(&tsk->sk);
+ rhashtable_walk_stop(&iter);
+ lock_sock(&tsk->sk);
msg = &tsk->phdr;
msg_set_prevnode(msg, tipc_own_addr(net));
msg_set_orignode(msg, tipc_own_addr(net));
- spin_unlock_bh(&tsk->sk.sk_lock.slock);
+ release_sock(&tsk->sk);
+ rhashtable_walk_start(&iter);
+ sock_put(&tsk->sk);
}
rhashtable_walk_stop(&iter);
@@ -3564,3 +3605,187 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+
+/**
+ * tipc_sk_filtering - check if a socket should be traced
+ * @sk: the socket to be examined
+ * @sysctl_tipc_sk_filter[]: the socket tuple for filtering,
+ * (portid, sock type, name type, name lower, name upper)
+ *
+ * Returns true if the socket meets the socket tuple data
+ * (value 0 = 'any') or when there is no tuple set (all = 0),
+ * otherwise false
+ */
+bool tipc_sk_filtering(struct sock *sk)
+{
+ struct tipc_sock *tsk;
+ struct publication *p;
+ u32 _port, _sktype, _type, _lower, _upper;
+ u32 type = 0, lower = 0, upper = 0;
+
+ if (!sk)
+ return true;
+
+ tsk = tipc_sk(sk);
+
+ _port = sysctl_tipc_sk_filter[0];
+ _sktype = sysctl_tipc_sk_filter[1];
+ _type = sysctl_tipc_sk_filter[2];
+ _lower = sysctl_tipc_sk_filter[3];
+ _upper = sysctl_tipc_sk_filter[4];
+
+ if (!_port && !_sktype && !_type && !_lower && !_upper)
+ return true;
+
+ if (_port)
+ return (_port == tsk->portid);
+
+ if (_sktype && _sktype != sk->sk_type)
+ return false;
+
+ if (tsk->published) {
+ p = list_first_entry_or_null(&tsk->publications,
+ struct publication, binding_sock);
+ if (p) {
+ type = p->type;
+ lower = p->lower;
+ upper = p->upper;
+ }
+ }
+
+ if (!tipc_sk_type_connectionless(sk)) {
+ type = tsk->conn_type;
+ lower = tsk->conn_instance;
+ upper = tsk->conn_instance;
+ }
+
+ if ((_type && _type != type) || (_lower && _lower != lower) ||
+ (_upper && _upper != upper))
+ return false;
+
+ return true;
+}
+
+u32 tipc_sock_get_portid(struct sock *sk)
+{
+ return (sk) ? (tipc_sk(sk))->portid : 0;
+}
+
+/**
+ * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
+ * both the rcv and backlog queues are considered
+ * @sk: tipc sk to be checked
+ * @skb: tipc msg to be checked
+ *
+ * Returns true if the socket rx queue allocation is > 90%, otherwise false
+ */
+
+bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
+{
+ atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
+ unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
+ unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
+
+ return (qsize > lim * 90 / 100);
+}
+
+/**
+ * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
+ * only the rcv queue is considered
+ * @sk: tipc sk to be checked
+ * @skb: tipc msg to be checked
+ *
+ * Returns true if the socket rx queue allocation is > 90%, otherwise false
+ */
+
+bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
+{
+ unsigned int lim = rcvbuf_limit(sk, skb);
+ unsigned int qsize = sk_rmem_alloc_get(sk);
+
+ return (qsize > lim * 90 / 100);
+}
+
+/**
+ * tipc_sk_dump - dump TIPC socket
+ * @sk: tipc sk to be dumped
+ * @dqueues: bitmask to decide if any socket queue to be dumped?
+ * - TIPC_DUMP_NONE: don't dump socket queues
+ * - TIPC_DUMP_SK_SNDQ: dump socket send queue
+ * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
+ * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
+ * - TIPC_DUMP_ALL: dump all the socket queues above
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
+{
+ int i = 0;
+ size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
+ struct tipc_sock *tsk;
+ struct publication *p;
+ bool tsk_connected;
+
+ if (!sk) {
+ i += scnprintf(buf, sz, "sk data: (null)\n");
+ return i;
+ }
+
+ tsk = tipc_sk(sk);
+ tsk_connected = !tipc_sk_type_connectionless(sk);
+
+ i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
+ i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
+ i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
+ if (tsk_connected) {
+ i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
+ i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
+ }
+ i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
+ if (tsk->published) {
+ p = list_first_entry_or_null(&tsk->publications,
+ struct publication, binding_sock);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->type : 0);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->lower : 0);
+ i += scnprintf(buf + i, sz - i, " %u", (p) ? p->upper : 0);
+ }
+ i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
+ i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
+ i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
+ i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
+ i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
+ i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
+ i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
+ i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
+
+ if (dqueues & TIPC_DUMP_SK_SNDQ) {
+ i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
+ i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
+ }
+
+ if (dqueues & TIPC_DUMP_SK_RCVQ) {
+ i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
+ i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
+ }
+
+ if (dqueues & TIPC_DUMP_SK_BKLGQ) {
+ i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
+ i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
+ if (sk->sk_backlog.tail != sk->sk_backlog.head) {
+ i += scnprintf(buf + i, sz - i, " tail ");
+ i += tipc_skb_dump(sk->sk_backlog.tail, false,
+ buf + i);
+ }
+ }
+
+ return i;
+}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 5e575f205afe..235b9679acee 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -71,4 +71,8 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
int tipc_dump_start(struct netlink_callback *cb);
int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
int tipc_dump_done(struct netlink_callback *cb);
+u32 tipc_sock_get_portid(struct sock *sk);
+bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb);
+bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb);
+
#endif
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 1a779b1e8510..3481e4906bd6 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -34,6 +34,7 @@
*/
#include "core.h"
+#include "trace.h"
#include <linux/sysctl.h>
@@ -54,6 +55,13 @@ static struct ctl_table tipc_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "sk_filter",
+ .data = &sysctl_tipc_sk_filter,
+ .maxlen = sizeof(sysctl_tipc_sk_filter),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
{}
};
diff --git a/net/tipc/trace.c b/net/tipc/trace.c
new file mode 100644
index 000000000000..964823841efe
--- /dev/null
+++ b/net/tipc/trace.c
@@ -0,0 +1,206 @@
+/*
+ * net/tipc/trace.c: TIPC tracepoints code
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+/**
+ * socket tuples for filtering in socket traces:
+ * (portid, sock type, name type, name lower, name upper)
+ */
+unsigned long sysctl_tipc_sk_filter[5] __read_mostly = {0, };
+
+/**
+ * tipc_skb_dump - dump TIPC skb data
+ * @skb: skb to be dumped
+ * @more: dump more?
+ * - false: dump only tipc msg data
+ * - true: dump kernel-related skb data and tipc cb[] array as well
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? SKB_LMAX : SKB_LMIN;
+ struct tipc_msg *hdr;
+ struct tipc_skb_cb *skbcb;
+
+ if (!skb) {
+ i += scnprintf(buf, sz, "msg: (null)\n");
+ return i;
+ }
+
+ hdr = buf_msg(skb);
+ skbcb = TIPC_SKB_CB(skb);
+
+ /* tipc msg data section */
+ i += scnprintf(buf, sz, "msg: %u", msg_user(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_type(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_hdr_sz(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_data_sz(hdr));
+ i += scnprintf(buf + i, sz - i, " %x", msg_orignode(hdr));
+ i += scnprintf(buf + i, sz - i, " %x", msg_destnode(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_seqno(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_ack(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bcast_ack(hdr));
+ switch (msg_user(hdr)) {
+ case LINK_PROTOCOL:
+ i += scnprintf(buf + i, sz - i, " %c", msg_net_plane(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_probe(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_peer_stopping(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_session(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_next_sent(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_seq_gap(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bc_snd_nxt(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_bc_gap(hdr));
+ break;
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ case CONN_MANAGER:
+ case SOCK_WAKEUP:
+ i += scnprintf(buf + i, sz - i, " | %u", msg_origport(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_destport(hdr));
+ switch (msg_type(hdr)) {
+ case TIPC_NAMED_MSG:
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nametype(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nameinst(hdr));
+ break;
+ case TIPC_MCAST_MSG:
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nametype(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_namelower(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_nameupper(hdr));
+ break;
+ default:
+ break;
+ };
+ i += scnprintf(buf + i, sz - i, " | %u",
+ msg_src_droppable(hdr));
+ i += scnprintf(buf + i, sz - i, " %u",
+ msg_dest_droppable(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_errcode(hdr));
+ i += scnprintf(buf + i, sz - i, " %u", msg_reroute_cnt(hdr));
+ break;
+ default:
+ /* need more? */
+ break;
+ };
+
+ i += scnprintf(buf + i, sz - i, "\n");
+ if (!more)
+ return i;
+
+ /* kernel-related skb data section */
+ i += scnprintf(buf + i, sz - i, "skb: %s",
+ (skb->dev) ? skb->dev->name : "n/a");
+ i += scnprintf(buf + i, sz - i, " %u", skb->len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->data_len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->hdr_len);
+ i += scnprintf(buf + i, sz - i, " %u", skb->truesize);
+ i += scnprintf(buf + i, sz - i, " %u", skb_cloned(skb));
+ i += scnprintf(buf + i, sz - i, " %p", skb->sk);
+ i += scnprintf(buf + i, sz - i, " %u", skb_shinfo(skb)->nr_frags);
+ i += scnprintf(buf + i, sz - i, " %llx",
+ ktime_to_ms(skb_get_ktime(skb)));
+ i += scnprintf(buf + i, sz - i, " %llx\n",
+ ktime_to_ms(skb_hwtstamps(skb)->hwtstamp));
+
+ /* tipc skb cb[] data section */
+ i += scnprintf(buf + i, sz - i, "cb[]: %u", skbcb->bytes_read);
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->orig_member);
+ i += scnprintf(buf + i, sz - i, " %u",
+ jiffies_to_msecs(skbcb->nxt_retr));
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->validated);
+ i += scnprintf(buf + i, sz - i, " %u", skbcb->chain_imp);
+ i += scnprintf(buf + i, sz - i, " %u\n", skbcb->ackers);
+
+ return i;
+}
+
+/**
+ * tipc_list_dump - dump TIPC skb list/queue
+ * @list: list of skbs to be dumped
+ * @more: dump more?
+ * - false: dump only the head & tail skbs
+ * - true: dump the first & last 5 skbs
+ * @buf: returned buffer of dump data in format
+ */
+int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf)
+{
+ int i = 0;
+ size_t sz = (more) ? LIST_LMAX : LIST_LMIN;
+ u32 count, len;
+ struct sk_buff *hskb, *tskb, *skb, *tmp;
+
+ if (!list) {
+ i += scnprintf(buf, sz, "(null)\n");
+ return i;
+ }
+
+ len = skb_queue_len(list);
+ i += scnprintf(buf, sz, "len = %d\n", len);
+
+ if (!len)
+ return i;
+
+ if (!more) {
+ hskb = skb_peek(list);
+ i += scnprintf(buf + i, sz - i, " head ");
+ i += tipc_skb_dump(hskb, false, buf + i);
+ if (len > 1) {
+ tskb = skb_peek_tail(list);
+ i += scnprintf(buf + i, sz - i, " tail ");
+ i += tipc_skb_dump(tskb, false, buf + i);
+ }
+ } else {
+ count = 0;
+ skb_queue_walk_safe(list, skb, tmp) {
+ count++;
+ if (count == 6)
+ i += scnprintf(buf + i, sz - i, " .\n .\n");
+ if (count > 5 && count <= len - 5)
+ continue;
+ i += scnprintf(buf + i, sz - i, " #%d ", count);
+ i += tipc_skb_dump(skb, false, buf + i);
+ }
+ }
+ return i;
+}
diff --git a/net/tipc/trace.h b/net/tipc/trace.h
new file mode 100644
index 000000000000..4d8e00483afc
--- /dev/null
+++ b/net/tipc/trace.h
@@ -0,0 +1,431 @@
+/*
+ * net/tipc/trace.h: TIPC tracepoints
+ *
+ * Copyright (c) 2018, Ericsson AB
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "ASIS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tipc
+
+#if !defined(_TIPC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TIPC_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "core.h"
+#include "link.h"
+#include "socket.h"
+#include "node.h"
+
+#define SKB_LMIN (100)
+#define SKB_LMAX (SKB_LMIN * 2)
+#define LIST_LMIN (SKB_LMIN * 3)
+#define LIST_LMAX (SKB_LMIN * 11)
+#define SK_LMIN (SKB_LMIN * 2)
+#define SK_LMAX (SKB_LMIN * 11)
+#define LINK_LMIN (SKB_LMIN)
+#define LINK_LMAX (SKB_LMIN * 16)
+#define NODE_LMIN (SKB_LMIN)
+#define NODE_LMAX (SKB_LMIN * 11)
+
+#ifndef __TIPC_TRACE_ENUM
+#define __TIPC_TRACE_ENUM
+enum {
+ TIPC_DUMP_NONE = 0,
+
+ TIPC_DUMP_TRANSMQ = 1,
+ TIPC_DUMP_BACKLOGQ = (1 << 1),
+ TIPC_DUMP_DEFERDQ = (1 << 2),
+ TIPC_DUMP_INPUTQ = (1 << 3),
+ TIPC_DUMP_WAKEUP = (1 << 4),
+
+ TIPC_DUMP_SK_SNDQ = (1 << 8),
+ TIPC_DUMP_SK_RCVQ = (1 << 9),
+ TIPC_DUMP_SK_BKLGQ = (1 << 10),
+ TIPC_DUMP_ALL = 0xffffu
+};
+#endif
+
+/* Link & Node FSM states: */
+#define state_sym(val) \
+ __print_symbolic(val, \
+ {(0xe), "ESTABLISHED" },\
+ {(0xe << 4), "ESTABLISHING" },\
+ {(0x1 << 8), "RESET" },\
+ {(0x2 << 12), "RESETTING" },\
+ {(0xd << 16), "PEER_RESET" },\
+ {(0xf << 20), "FAILINGOVER" },\
+ {(0xc << 24), "SYNCHING" },\
+ {(0xdd), "SELF_DOWN_PEER_DOWN" },\
+ {(0xaa), "SELF_UP_PEER_UP" },\
+ {(0xd1), "SELF_DOWN_PEER_LEAVING" },\
+ {(0xac), "SELF_UP_PEER_COMING" },\
+ {(0xca), "SELF_COMING_PEER_UP" },\
+ {(0x1d), "SELF_LEAVING_PEER_DOWN" },\
+ {(0xf0), "FAILINGOVER" },\
+ {(0xcc), "SYNCHING" })
+
+/* Link & Node FSM events: */
+#define evt_sym(val) \
+ __print_symbolic(val, \
+ {(0xec1ab1e), "ESTABLISH_EVT" },\
+ {(0x9eed0e), "PEER_RESET_EVT" },\
+ {(0xfa110e), "FAILURE_EVT" },\
+ {(0x10ca1d0e), "RESET_EVT" },\
+ {(0xfa110bee), "FAILOVER_BEGIN_EVT" },\
+ {(0xfa110ede), "FAILOVER_END_EVT" },\
+ {(0xc1ccbee), "SYNCH_BEGIN_EVT" },\
+ {(0xc1ccede), "SYNCH_END_EVT" },\
+ {(0xece), "SELF_ESTABL_CONTACT_EVT" },\
+ {(0x1ce), "SELF_LOST_CONTACT_EVT" },\
+ {(0x9ece), "PEER_ESTABL_CONTACT_EVT" },\
+ {(0x91ce), "PEER_LOST_CONTACT_EVT" },\
+ {(0xfbe), "FAILOVER_BEGIN_EVT" },\
+ {(0xfee), "FAILOVER_END_EVT" },\
+ {(0xcbe), "SYNCH_BEGIN_EVT" },\
+ {(0xcee), "SYNCH_END_EVT" })
+
+/* Bearer, net device events: */
+#define dev_evt_sym(val) \
+ __print_symbolic(val, \
+ {(NETDEV_CHANGE), "NETDEV_CHANGE" },\
+ {(NETDEV_GOING_DOWN), "NETDEV_GOING_DOWN" },\
+ {(NETDEV_UP), "NETDEV_UP" },\
+ {(NETDEV_CHANGEMTU), "NETDEV_CHANGEMTU" },\
+ {(NETDEV_CHANGEADDR), "NETDEV_CHANGEADDR" },\
+ {(NETDEV_UNREGISTER), "NETDEV_UNREGISTER" },\
+ {(NETDEV_CHANGENAME), "NETDEV_CHANGENAME" })
+
+extern unsigned long sysctl_tipc_sk_filter[5] __read_mostly;
+
+int tipc_skb_dump(struct sk_buff *skb, bool more, char *buf);
+int tipc_list_dump(struct sk_buff_head *list, bool more, char *buf);
+int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf);
+int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf);
+int tipc_node_dump(struct tipc_node *n, bool more, char *buf);
+bool tipc_sk_filtering(struct sock *sk);
+
+DECLARE_EVENT_CLASS(tipc_skb_class,
+
+ TP_PROTO(struct sk_buff *skb, bool more, const char *header),
+
+ TP_ARGS(skb, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __dynamic_array(char, buf, (more) ? SKB_LMAX : SKB_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ tipc_skb_dump(skb, more, __get_str(buf));
+ ),
+
+ TP_printk("%s\n%s", __get_str(header), __get_str(buf))
+)
+
+#define DEFINE_SKB_EVENT(name) \
+DEFINE_EVENT(tipc_skb_class, name, \
+ TP_PROTO(struct sk_buff *skb, bool more, const char *header), \
+ TP_ARGS(skb, more, header))
+DEFINE_SKB_EVENT(tipc_skb_dump);
+DEFINE_SKB_EVENT(tipc_proto_build);
+DEFINE_SKB_EVENT(tipc_proto_rcv);
+
+DECLARE_EVENT_CLASS(tipc_list_class,
+
+ TP_PROTO(struct sk_buff_head *list, bool more, const char *header),
+
+ TP_ARGS(list, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __dynamic_array(char, buf, (more) ? LIST_LMAX : LIST_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ tipc_list_dump(list, more, __get_str(buf));
+ ),
+
+ TP_printk("%s\n%s", __get_str(header), __get_str(buf))
+);
+
+#define DEFINE_LIST_EVENT(name) \
+DEFINE_EVENT(tipc_list_class, name, \
+ TP_PROTO(struct sk_buff_head *list, bool more, const char *header), \
+ TP_ARGS(list, more, header))
+DEFINE_LIST_EVENT(tipc_list_dump);
+
+DECLARE_EVENT_CLASS(tipc_sk_class,
+
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues,
+ const char *header),
+
+ TP_ARGS(sk, skb, dqueues, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __field(u32, portid)
+ __dynamic_array(char, buf, (dqueues) ? SK_LMAX : SK_LMIN)
+ __dynamic_array(char, skb_buf, (skb) ? SKB_LMIN : 1)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ __entry->portid = tipc_sock_get_portid(sk);
+ tipc_sk_dump(sk, dqueues, __get_str(buf));
+ if (skb)
+ tipc_skb_dump(skb, false, __get_str(skb_buf));
+ else
+ *(__get_str(skb_buf)) = '\0';
+ ),
+
+ TP_printk("<%u> %s\n%s%s", __entry->portid, __get_str(header),
+ __get_str(skb_buf), __get_str(buf))
+);
+
+#define DEFINE_SK_EVENT_FILTER(name) \
+DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
+ const char *header), \
+ TP_ARGS(sk, skb, dqueues, header), \
+ TP_CONDITION(tipc_sk_filtering(sk)))
+DEFINE_SK_EVENT_FILTER(tipc_sk_dump);
+DEFINE_SK_EVENT_FILTER(tipc_sk_create);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendmcast);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendmsg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_sendstream);
+DEFINE_SK_EVENT_FILTER(tipc_sk_poll);
+DEFINE_SK_EVENT_FILTER(tipc_sk_filter_rcv);
+DEFINE_SK_EVENT_FILTER(tipc_sk_advance_rx);
+DEFINE_SK_EVENT_FILTER(tipc_sk_rej_msg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_drop_msg);
+DEFINE_SK_EVENT_FILTER(tipc_sk_release);
+DEFINE_SK_EVENT_FILTER(tipc_sk_shutdown);
+
+#define DEFINE_SK_EVENT_FILTER_COND(name, cond) \
+DEFINE_EVENT_CONDITION(tipc_sk_class, name, \
+ TP_PROTO(struct sock *sk, struct sk_buff *skb, u16 dqueues, \
+ const char *header), \
+ TP_ARGS(sk, skb, dqueues, header), \
+ TP_CONDITION(tipc_sk_filtering(sk) && (cond)))
+DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit1, tipc_sk_overlimit1(sk, skb));
+DEFINE_SK_EVENT_FILTER_COND(tipc_sk_overlimit2, tipc_sk_overlimit2(sk, skb));
+
+DECLARE_EVENT_CLASS(tipc_link_class,
+
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header),
+
+ TP_ARGS(l, dqueues, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __array(char, name, TIPC_MAX_LINK_NAME)
+ __dynamic_array(char, buf, (dqueues) ? LINK_LMAX : LINK_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ tipc_link_name_ext(l, __entry->name);
+ tipc_link_dump(l, dqueues, __get_str(buf));
+ ),
+
+ TP_printk("<%s> %s\n%s", __entry->name, __get_str(header),
+ __get_str(buf))
+);
+
+#define DEFINE_LINK_EVENT(name) \
+DEFINE_EVENT(tipc_link_class, name, \
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
+ TP_ARGS(l, dqueues, header))
+DEFINE_LINK_EVENT(tipc_link_dump);
+DEFINE_LINK_EVENT(tipc_link_conges);
+DEFINE_LINK_EVENT(tipc_link_timeout);
+DEFINE_LINK_EVENT(tipc_link_reset);
+
+#define DEFINE_LINK_EVENT_COND(name, cond) \
+DEFINE_EVENT_CONDITION(tipc_link_class, name, \
+ TP_PROTO(struct tipc_link *l, u16 dqueues, const char *header), \
+ TP_ARGS(l, dqueues, header), \
+ TP_CONDITION(cond))
+DEFINE_LINK_EVENT_COND(tipc_link_too_silent, tipc_link_too_silent(l));
+
+DECLARE_EVENT_CLASS(tipc_link_transmq_class,
+
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+
+ TP_ARGS(r, f, t, tq),
+
+ TP_STRUCT__entry(
+ __array(char, name, TIPC_MAX_LINK_NAME)
+ __field(u16, from)
+ __field(u16, to)
+ __field(u32, len)
+ __field(u16, fseqno)
+ __field(u16, lseqno)
+ ),
+
+ TP_fast_assign(
+ tipc_link_name_ext(r, __entry->name);
+ __entry->from = f;
+ __entry->to = t;
+ __entry->len = skb_queue_len(tq);
+ __entry->fseqno = msg_seqno(buf_msg(skb_peek(tq)));
+ __entry->lseqno = msg_seqno(buf_msg(skb_peek_tail(tq)));
+ ),
+
+ TP_printk("<%s> retrans req: [%u-%u] transmq: %u [%u-%u]\n",
+ __entry->name, __entry->from, __entry->to,
+ __entry->len, __entry->fseqno, __entry->lseqno)
+);
+
+DEFINE_EVENT(tipc_link_transmq_class, tipc_link_retrans,
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+ TP_ARGS(r, f, t, tq)
+);
+
+DEFINE_EVENT_PRINT(tipc_link_transmq_class, tipc_link_bc_ack,
+ TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
+ TP_ARGS(r, f, t, tq),
+ TP_printk("<%s> acked: [%u-%u] transmq: %u [%u-%u]\n",
+ __entry->name, __entry->from, __entry->to,
+ __entry->len, __entry->fseqno, __entry->lseqno)
+);
+
+DECLARE_EVENT_CLASS(tipc_node_class,
+
+ TP_PROTO(struct tipc_node *n, bool more, const char *header),
+
+ TP_ARGS(n, more, header),
+
+ TP_STRUCT__entry(
+ __string(header, header)
+ __field(u32, addr)
+ __dynamic_array(char, buf, (more) ? NODE_LMAX : NODE_LMIN)
+ ),
+
+ TP_fast_assign(
+ __assign_str(header, header);
+ __entry->addr = tipc_node_get_addr(n);
+ tipc_node_dump(n, more, __get_str(buf));
+ ),
+
+ TP_printk("<%x> %s\n%s", __entry->addr, __get_str(header),
+ __get_str(buf))
+);
+
+#define DEFINE_NODE_EVENT(name) \
+DEFINE_EVENT(tipc_node_class, name, \
+ TP_PROTO(struct tipc_node *n, bool more, const char *header), \
+ TP_ARGS(n, more, header))
+DEFINE_NODE_EVENT(tipc_node_dump);
+DEFINE_NODE_EVENT(tipc_node_create);
+DEFINE_NODE_EVENT(tipc_node_delete);
+DEFINE_NODE_EVENT(tipc_node_lost_contact);
+DEFINE_NODE_EVENT(tipc_node_timeout);
+DEFINE_NODE_EVENT(tipc_node_link_up);
+DEFINE_NODE_EVENT(tipc_node_link_down);
+DEFINE_NODE_EVENT(tipc_node_reset_links);
+DEFINE_NODE_EVENT(tipc_node_check_state);
+
+DECLARE_EVENT_CLASS(tipc_fsm_class,
+
+ TP_PROTO(const char *name, u32 os, u32 ns, int evt),
+
+ TP_ARGS(name, os, ns, evt),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u32, os)
+ __field(u32, ns)
+ __field(u32, evt)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->os = os;
+ __entry->ns = ns;
+ __entry->evt = evt;
+ ),
+
+ TP_printk("<%s> %s--(%s)->%s\n", __get_str(name),
+ state_sym(__entry->os), evt_sym(__entry->evt),
+ state_sym(__entry->ns))
+);
+
+#define DEFINE_FSM_EVENT(fsm_name) \
+DEFINE_EVENT(tipc_fsm_class, fsm_name, \
+ TP_PROTO(const char *name, u32 os, u32 ns, int evt), \
+ TP_ARGS(name, os, ns, evt))
+DEFINE_FSM_EVENT(tipc_link_fsm);
+DEFINE_FSM_EVENT(tipc_node_fsm);
+
+TRACE_EVENT(tipc_l2_device_event,
+
+ TP_PROTO(struct net_device *dev, struct tipc_bearer *b,
+ unsigned long evt),
+
+ TP_ARGS(dev, b, evt),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev->name)
+ __string(b_name, b->name)
+ __field(unsigned long, evt)
+ __field(u8, b_up)
+ __field(u8, carrier)
+ __field(u8, oper)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev->name);
+ __assign_str(b_name, b->name);
+ __entry->evt = evt;
+ __entry->b_up = test_bit(0, &b->up);
+ __entry->carrier = netif_carrier_ok(dev);
+ __entry->oper = netif_oper_up(dev);
+ ),
+
+ TP_printk("%s on: <%s>/<%s> oper: %s carrier: %s bearer: %s\n",
+ dev_evt_sym(__entry->evt), __get_str(dev_name),
+ __get_str(b_name), (__entry->oper) ? "up" : "down",
+ (__entry->carrier) ? "ok" : "notok",
+ (__entry->b_up) ? "up" : "down")
+);
+
+#endif /* _TIPC_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 10dc59ce9c82..4d85d71f16e2 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -245,10 +245,8 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
}
err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr);
- if (err) {
- kfree_skb(_skb);
+ if (err)
goto out;
- }
}
err = 0;
out:
@@ -681,6 +679,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
if (err)
goto err;
+ if (remote.proto != local.proto) {
+ err = -EINVAL;
+ goto err;
+ }
+
/* Checking remote ip address */
rmcast = tipc_udp_is_mcast_addr(&remote);
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 311cec8e533d..78cb4a584080 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -55,8 +55,10 @@ enum {
static struct proto *saved_tcpv6_prot;
static DEFINE_MUTEX(tcpv6_prot_mutex);
+static struct proto *saved_tcpv4_prot;
+static DEFINE_MUTEX(tcpv4_prot_mutex);
static LIST_HEAD(device_list);
-static DEFINE_MUTEX(device_mutex);
+static DEFINE_SPINLOCK(device_spinlock);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static struct proto_ops tls_sw_proto_ops;
@@ -538,11 +540,14 @@ static struct tls_context *create_ctx(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tls_context *ctx;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
if (!ctx)
return NULL;
icsk->icsk_ulp_data = ctx;
+ ctx->setsockopt = sk->sk_prot->setsockopt;
+ ctx->getsockopt = sk->sk_prot->getsockopt;
+ ctx->sk_proto_close = sk->sk_prot->close;
return ctx;
}
@@ -552,7 +557,7 @@ static int tls_hw_prot(struct sock *sk)
struct tls_device *dev;
int rc = 0;
- mutex_lock(&device_mutex);
+ spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
if (dev->feature && dev->feature(dev)) {
ctx = create_ctx(sk);
@@ -570,7 +575,7 @@ static int tls_hw_prot(struct sock *sk)
}
}
out:
- mutex_unlock(&device_mutex);
+ spin_unlock_bh(&device_spinlock);
return rc;
}
@@ -579,12 +584,17 @@ static void tls_hw_unhash(struct sock *sk)
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_device *dev;
- mutex_lock(&device_mutex);
+ spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->unhash)
+ if (dev->unhash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
dev->unhash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
}
- mutex_unlock(&device_mutex);
+ spin_unlock_bh(&device_spinlock);
ctx->unhash(sk);
}
@@ -595,12 +605,17 @@ static int tls_hw_hash(struct sock *sk)
int err;
err = ctx->hash(sk);
- mutex_lock(&device_mutex);
+ spin_lock_bh(&device_spinlock);
list_for_each_entry(dev, &device_list, dev_list) {
- if (dev->hash)
+ if (dev->hash) {
+ kref_get(&dev->kref);
+ spin_unlock_bh(&device_spinlock);
err |= dev->hash(dev, sk);
+ kref_put(&dev->kref, dev->release);
+ spin_lock_bh(&device_spinlock);
+ }
}
- mutex_unlock(&device_mutex);
+ spin_unlock_bh(&device_spinlock);
if (err)
tls_hw_unhash(sk);
@@ -675,9 +690,6 @@ static int tls_init(struct sock *sk)
rc = -ENOMEM;
goto out;
}
- ctx->setsockopt = sk->sk_prot->setsockopt;
- ctx->getsockopt = sk->sk_prot->getsockopt;
- ctx->sk_proto_close = sk->sk_prot->close;
/* Build IPv6 TLS whenever the address of tcpv6 _prot changes */
if (ip_ver == TLSV6 &&
@@ -690,6 +702,16 @@ static int tls_init(struct sock *sk)
mutex_unlock(&tcpv6_prot_mutex);
}
+ if (ip_ver == TLSV4 &&
+ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) {
+ mutex_lock(&tcpv4_prot_mutex);
+ if (likely(sk->sk_prot != saved_tcpv4_prot)) {
+ build_protos(tls_prots[TLSV4], sk->sk_prot);
+ smp_store_release(&saved_tcpv4_prot, sk->sk_prot);
+ }
+ mutex_unlock(&tcpv4_prot_mutex);
+ }
+
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
update_sk_prot(sk, ctx);
@@ -699,17 +721,17 @@ out:
void tls_register_device(struct tls_device *device)
{
- mutex_lock(&device_mutex);
+ spin_lock_bh(&device_spinlock);
list_add_tail(&device->dev_list, &device_list);
- mutex_unlock(&device_mutex);
+ spin_unlock_bh(&device_spinlock);
}
EXPORT_SYMBOL(tls_register_device);
void tls_unregister_device(struct tls_device *device)
{
- mutex_lock(&device_mutex);
+ spin_lock_bh(&device_spinlock);
list_del(&device->dev_list);
- mutex_unlock(&device_mutex);
+ spin_unlock_bh(&device_spinlock);
}
EXPORT_SYMBOL(tls_unregister_device);
@@ -721,8 +743,6 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
static int __init tls_register(void)
{
- build_protos(tls_prots[TLSV4], &tcp_prot);
-
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 7b1af8b59cd2..11cdc8f7db63 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -686,16 +686,24 @@ static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk,
struct sk_psock *psock;
struct sock *sk_redir;
struct tls_rec *rec;
+ bool enospc, policy;
int err = 0, send;
- bool enospc;
+ u32 delta = 0;
+ policy = !(flags & MSG_SENDPAGE_NOPOLICY);
psock = sk_psock_get(sk);
- if (!psock)
+ if (!psock || !policy)
return tls_push_record(sk, flags, record_type);
more_data:
enospc = sk_msg_full(msg);
- if (psock->eval == __SK_NONE)
+ if (psock->eval == __SK_NONE) {
+ delta = msg->sg.size;
psock->eval = sk_psock_msg_verdict(sk, psock, msg);
+ if (delta < msg->sg.size)
+ delta -= msg->sg.size;
+ else
+ delta = 0;
+ }
if (msg->cork_bytes && msg->cork_bytes > msg->sg.size &&
!enospc && !full_record) {
err = -ENOSPC;
@@ -743,7 +751,7 @@ more_data:
msg->apply_bytes -= send;
if (msg->sg.size == 0)
tls_free_open_rec(sk);
- *copied -= send;
+ *copied -= (send + delta);
err = -EACCES;
}
@@ -935,10 +943,12 @@ fallback_to_reg_send:
tls_ctx->tx.overhead_size);
}
- ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_pl,
- try_to_copy);
- if (ret < 0)
- goto trim_sgl;
+ if (try_to_copy) {
+ ret = sk_msg_memcopy_from_iter(sk, &msg->msg_iter,
+ msg_pl, try_to_copy);
+ if (ret < 0)
+ goto trim_sgl;
+ }
/* Open records defined only if successfully copied, otherwise
* we would trim the sg but not reset the open record frags.
@@ -1010,8 +1020,8 @@ send_end:
return copied ? copied : ret;
}
-int tls_sw_sendpage(struct sock *sk, struct page *page,
- int offset, size_t size, int flags)
+int tls_sw_do_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
{
long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -1026,15 +1036,7 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
int ret = 0;
bool eor;
- if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
- MSG_SENDPAGE_NOTLAST))
- return -ENOTSUPP;
-
- /* No MSG_EOR from splice, only look at MSG_MORE */
eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
-
- lock_sock(sk);
-
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
/* Wait till there is any pending write on socket */
@@ -1138,10 +1140,34 @@ wait_for_memory:
}
sendpage_end:
ret = sk_stream_error(sk, flags, ret);
- release_sock(sk);
return copied ? copied : ret;
}
+int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
+ return -ENOTSUPP;
+
+ return tls_sw_do_sendpage(sk, page, offset, size, flags);
+}
+
+int tls_sw_sendpage(struct sock *sk, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int ret;
+
+ if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
+ return -ENOTSUPP;
+
+ lock_sock(sk);
+ ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
+ release_sock(sk);
+ return ret;
+}
+
static struct sk_buff *tls_wait_data(struct sock *sk, struct sk_psock *psock,
int flags, long timeo, int *err)
{
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ab27a2872935..43a1dec08825 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -107,6 +107,7 @@
#include <linux/mutex.h>
#include <linux/net.h>
#include <linux/poll.h>
+#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/smp.h>
#include <linux/socket.h>
@@ -504,9 +505,13 @@ out:
static int __vsock_bind_stream(struct vsock_sock *vsk,
struct sockaddr_vm *addr)
{
- static u32 port = LAST_RESERVED_PORT + 1;
+ static u32 port = 0;
struct sockaddr_vm new_addr;
+ if (!port)
+ port = LAST_RESERVED_PORT + 1 +
+ prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
+
vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
if (addr->svm_port == VMADDR_PORT_ANY) {
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index cb332adb84cd..c361ce782412 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -264,6 +264,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
}
static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+ struct sockaddr_vm *dst,
+ enum vmci_transport_packet_type type,
+ u64 size,
+ u64 mode,
+ struct vmci_transport_waiting_info *wait,
+ u16 proto,
+ struct vmci_handle handle)
+{
+ struct vmci_transport_packet *pkt;
+ int err;
+
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+ mode, wait, proto, handle,
+ true);
+ kfree(pkt);
+
+ return err;
+}
+
+static int
vmci_transport_send_control_pkt(struct sock *sk,
enum vmci_transport_packet_type type,
u64 size,
@@ -272,9 +297,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
u16 proto,
struct vmci_handle handle)
{
- struct vmci_transport_packet *pkt;
struct vsock_sock *vsk;
- int err;
vsk = vsock_sk(sk);
@@ -284,17 +307,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
if (!vsock_addr_bound(&vsk->remote_addr))
return -EINVAL;
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
- err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
- &vsk->remote_addr, type, size,
- mode, wait, proto, handle,
- true);
- kfree(pkt);
-
- return err;
+ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+ &vsk->remote_addr,
+ type, size, mode,
+ wait, proto, handle);
}
static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -312,12 +328,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
static int vmci_transport_send_reset(struct sock *sk,
struct vmci_transport_packet *pkt)
{
+ struct sockaddr_vm *dst_ptr;
+ struct sockaddr_vm dst;
+ struct vsock_sock *vsk;
+
if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
return 0;
- return vmci_transport_send_control_pkt(sk,
- VMCI_TRANSPORT_PACKET_TYPE_RST,
- 0, 0, NULL, VSOCK_PROTO_INVALID,
- VMCI_INVALID_HANDLE);
+
+ vsk = vsock_sk(sk);
+
+ if (!vsock_addr_bound(&vsk->local_addr))
+ return -EINVAL;
+
+ if (vsock_addr_bound(&vsk->remote_addr)) {
+ dst_ptr = &vsk->remote_addr;
+ } else {
+ vsock_addr_init(&dst, pkt->dg.src.context,
+ pkt->src_port);
+ dst_ptr = &dst;
+ }
+ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+ VMCI_TRANSPORT_PACKET_TYPE_RST,
+ 0, 0, NULL, VSOCK_PROTO_INVALID,
+ VMCI_INVALID_HANDLE);
}
static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 1d84f91bbfb0..72a224ce8e0a 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o
+cfg80211-y += pmsr.o
cfg80211-$(CONFIG_OF) += of.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2db713d18f71..7dc1bbd0888f 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -6,6 +6,7 @@
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2018 Intel Corporation
*/
#include <linux/export.h>
@@ -747,6 +748,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
case NL80211_CHAN_WIDTH_20:
if (!ht_cap->ht_supported)
return false;
+ /* fall through */
case NL80211_CHAN_WIDTH_20_NOHT:
prohibited_flags |= IEEE80211_CHAN_NO_20MHZ;
width = 20;
@@ -769,6 +771,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
return false;
+ /* fall through */
case NL80211_CHAN_WIDTH_80:
if (!vht_cap->vht_supported)
return false;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 5bd01058b9e6..623dfe5e211c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -4,6 +4,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -190,11 +191,25 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
return err;
}
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ if (!wdev->netdev)
+ continue;
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE);
+ }
+ nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
+
wiphy_net_set(&rdev->wiphy, net);
err = device_rename(&rdev->wiphy.dev, dev_name(&rdev->wiphy.dev));
WARN_ON(err);
+ nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
+ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ if (!wdev->netdev)
+ continue;
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
+ }
+
return 0;
}
@@ -664,6 +679,34 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
#endif
+ if (WARN_ON(wiphy->pmsr_capa && !wiphy->pmsr_capa->ftm.supported))
+ return -EINVAL;
+
+ if (wiphy->pmsr_capa && wiphy->pmsr_capa->ftm.supported) {
+ if (WARN_ON(!wiphy->pmsr_capa->ftm.asap &&
+ !wiphy->pmsr_capa->ftm.non_asap))
+ return -EINVAL;
+ if (WARN_ON(!wiphy->pmsr_capa->ftm.preambles ||
+ !wiphy->pmsr_capa->ftm.bandwidths))
+ return -EINVAL;
+ if (WARN_ON(wiphy->pmsr_capa->ftm.preambles &
+ ~(BIT(NL80211_PREAMBLE_LEGACY) |
+ BIT(NL80211_PREAMBLE_HT) |
+ BIT(NL80211_PREAMBLE_VHT) |
+ BIT(NL80211_PREAMBLE_DMG))))
+ return -EINVAL;
+ if (WARN_ON(wiphy->pmsr_capa->ftm.bandwidths &
+ ~(BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_5) |
+ BIT(NL80211_CHAN_WIDTH_10))))
+ return -EINVAL;
+ }
+
/*
* if a wiphy has unsupported modes for regulatory channel enforcement,
* opt-out of enforcement checking
@@ -1087,6 +1130,8 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
ASSERT_RTNL();
ASSERT_WDEV_LOCK(wdev);
+ cfg80211_pmsr_wdev_down(wdev);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
__cfg80211_leave_ibss(rdev, dev, true);
@@ -1174,6 +1219,9 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
spin_lock_init(&wdev->event_lock);
INIT_LIST_HEAD(&wdev->mgmt_registrations);
spin_lock_init(&wdev->mgmt_registrations_lock);
+ INIT_LIST_HEAD(&wdev->pmsr_list);
+ spin_lock_init(&wdev->pmsr_lock);
+ INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
/*
* We get here also when the interface changes network namespaces,
diff --git a/net/wireless/core.h b/net/wireless/core.h
index c61dbba8bf47..c5d6f3418601 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,6 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -530,4 +531,8 @@ void cfg80211_stop_nan(struct cfg80211_registered_device *rdev,
void cfg80211_cqm_config_free(struct wireless_dev *wdev);
+void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid);
+void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev);
+void cfg80211_pmsr_free_wk(struct work_struct *work);
+
#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 6beab0cfcb99..55214fe925b2 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -75,7 +75,7 @@ static void *lib80211_ccmp_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+ priv->tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(priv->tfm)) {
priv->tfm = NULL;
goto fail;
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index b5e235573c8a..35f06563207d 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -99,7 +99,7 @@ static void *lib80211_tkip_init(int key_idx)
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
priv->tx_tfm_arc4 = NULL;
goto fail;
@@ -111,7 +111,7 @@ static void *lib80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_cipher("arc4", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
priv->rx_tfm_arc4 = NULL;
goto fail;
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index 6015f6b542a6..20c1ad63ad44 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -48,13 +48,13 @@ static void *lib80211_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_cipher("arc4", 0, 0);
if (IS_ERR(priv->tx_tfm)) {
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_cipher("arc4", 0, 0);
if (IS_ERR(priv->rx_tfm)) {
priv->rx_tfm = NULL;
goto fail;
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 12b3edf70a7b..1615e503f8e3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -272,11 +272,11 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
p1 = (u8*)(ht_capa);
p2 = (u8*)(ht_capa_mask);
- for (i = 0; i<sizeof(*ht_capa); i++)
+ for (i = 0; i < sizeof(*ht_capa); i++)
p1[i] &= p2[i];
}
-/* Do a logical ht_capa &= ht_capa_mask. */
+/* Do a logical vht_capa &= vht_capa_mask. */
void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
const struct ieee80211_vht_cap *vht_capa_mask)
{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 744b5851bbf9..5e49492d5911 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -240,7 +240,63 @@ nl80211_ftm_responder_policy[NL80211_FTM_RESP_ATTR_MAX + 1] = {
.len = U8_MAX },
};
-static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+static const struct nla_policy
+nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = {
+ [NL80211_PMSR_FTM_REQ_ATTR_ASAP] = { .type = NLA_FLAG },
+ [NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE] = { .type = NLA_U32 },
+ [NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP] =
+ NLA_POLICY_MAX(NLA_U8, 15),
+ [NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 },
+ [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] =
+ NLA_POLICY_MAX(NLA_U8, 15),
+ [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] =
+ NLA_POLICY_MAX(NLA_U8, 15),
+ [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 },
+ [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG },
+ [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG },
+};
+
+static const struct nla_policy
+nl80211_pmsr_req_data_policy[NL80211_PMSR_TYPE_MAX + 1] = {
+ [NL80211_PMSR_TYPE_FTM] =
+ NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX,
+ nl80211_pmsr_ftm_req_attr_policy),
+};
+
+static const struct nla_policy
+nl80211_pmsr_req_attr_policy[NL80211_PMSR_REQ_ATTR_MAX + 1] = {
+ [NL80211_PMSR_REQ_ATTR_DATA] =
+ NLA_POLICY_NESTED(NL80211_PMSR_TYPE_MAX,
+ nl80211_pmsr_req_data_policy),
+ [NL80211_PMSR_REQ_ATTR_GET_AP_TSF] = { .type = NLA_FLAG },
+};
+
+static const struct nla_policy
+nl80211_psmr_peer_attr_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = {
+ [NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR,
+ /*
+ * we could specify this again to be the top-level policy,
+ * but that would open us up to recursion problems ...
+ */
+ [NL80211_PMSR_PEER_ATTR_CHAN] = { .type = NLA_NESTED },
+ [NL80211_PMSR_PEER_ATTR_REQ] =
+ NLA_POLICY_NESTED(NL80211_PMSR_REQ_ATTR_MAX,
+ nl80211_pmsr_req_attr_policy),
+ [NL80211_PMSR_PEER_ATTR_RESP] = { .type = NLA_REJECT },
+};
+
+static const struct nla_policy
+nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = {
+ [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_REJECT },
+ [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_REJECT },
+ [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT },
+ [NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT },
+ [NL80211_PMSR_ATTR_PEERS] =
+ NLA_POLICY_NESTED_ARRAY(NL80211_PMSR_PEER_ATTR_MAX,
+ nl80211_psmr_peer_attr_policy),
+};
+
+const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING,
.len = 20-1 },
@@ -497,6 +553,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
.type = NLA_NESTED,
.validation_data = nl80211_ftm_responder_policy,
},
+ [NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
+ [NL80211_ATTR_PEER_MEASUREMENTS] =
+ NLA_POLICY_NESTED(NL80211_PMSR_FTM_REQ_ATTR_MAX,
+ nl80211_pmsr_attr_policy),
};
/* policy for the key attributes */
@@ -637,9 +697,9 @@ nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = {
[NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 },
};
-static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
- struct cfg80211_registered_device **rdev,
- struct wireless_dev **wdev)
+int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
+ struct cfg80211_registered_device **rdev,
+ struct wireless_dev **wdev)
{
int err;
@@ -684,8 +744,8 @@ static int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
}
/* message building helper */
-static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
- int flags, u8 cmd)
+void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, u8 cmd)
{
/* since there is no private header just add the generic one */
return genlmsg_put(skb, portid, seq, &nl80211_fam, flags, cmd);
@@ -1615,6 +1675,91 @@ static int nl80211_add_commands_unsplit(struct cfg80211_registered_device *rdev,
return -ENOBUFS;
}
+static int
+nl80211_send_pmsr_ftm_capa(const struct cfg80211_pmsr_capabilities *cap,
+ struct sk_buff *msg)
+{
+ struct nlattr *ftm;
+
+ if (!cap->ftm.supported)
+ return 0;
+
+ ftm = nla_nest_start(msg, NL80211_PMSR_TYPE_FTM);
+ if (!ftm)
+ return -ENOBUFS;
+
+ if (cap->ftm.asap && nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_ASAP))
+ return -ENOBUFS;
+ if (cap->ftm.non_asap &&
+ nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP))
+ return -ENOBUFS;
+ if (cap->ftm.request_lci &&
+ nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI))
+ return -ENOBUFS;
+ if (cap->ftm.request_civicloc &&
+ nla_put_flag(msg, NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC))
+ return -ENOBUFS;
+ if (nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES,
+ cap->ftm.preambles))
+ return -ENOBUFS;
+ if (nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS,
+ cap->ftm.bandwidths))
+ return -ENOBUFS;
+ if (cap->ftm.max_bursts_exponent >= 0 &&
+ nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT,
+ cap->ftm.max_bursts_exponent))
+ return -ENOBUFS;
+ if (cap->ftm.max_ftms_per_burst &&
+ nla_put_u32(msg, NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST,
+ cap->ftm.max_ftms_per_burst))
+ return -ENOBUFS;
+
+ nla_nest_end(msg, ftm);
+ return 0;
+}
+
+static int nl80211_send_pmsr_capa(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
+{
+ const struct cfg80211_pmsr_capabilities *cap = rdev->wiphy.pmsr_capa;
+ struct nlattr *pmsr, *caps;
+
+ if (!cap)
+ return 0;
+
+ /*
+ * we don't need to clean up anything here since the caller
+ * will genlmsg_cancel() if we fail
+ */
+
+ pmsr = nla_nest_start(msg, NL80211_ATTR_PEER_MEASUREMENTS);
+ if (!pmsr)
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_PMSR_ATTR_MAX_PEERS, cap->max_peers))
+ return -ENOBUFS;
+
+ if (cap->report_ap_tsf &&
+ nla_put_flag(msg, NL80211_PMSR_ATTR_REPORT_AP_TSF))
+ return -ENOBUFS;
+
+ if (cap->randomize_mac_addr &&
+ nla_put_flag(msg, NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR))
+ return -ENOBUFS;
+
+ caps = nla_nest_start(msg, NL80211_PMSR_ATTR_TYPE_CAPA);
+ if (!caps)
+ return -ENOBUFS;
+
+ if (nl80211_send_pmsr_ftm_capa(cap, msg))
+ return -ENOBUFS;
+
+ nla_nest_end(msg, caps);
+ nla_nest_end(msg, pmsr);
+
+ return 0;
+}
+
struct nl80211_dump_wiphy_state {
s64 filter_wiphy;
long start;
@@ -1706,6 +1851,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 1:
if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
sizeof(u32) * rdev->wiphy.n_cipher_suites,
@@ -1752,6 +1898,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 2:
if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
rdev->wiphy.interface_modes))
@@ -1759,6 +1906,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 3:
nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
if (!nl_bands)
@@ -1784,6 +1932,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->chan_start++;
if (state->split)
break;
+ /* fall through */
default:
/* add frequencies */
nl_freqs = nla_nest_start(
@@ -1837,6 +1986,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 4:
nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS);
if (!nl_cmds)
@@ -1863,6 +2013,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 5:
if (rdev->ops->remain_on_channel &&
(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
@@ -1880,6 +2031,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 6:
#ifdef CONFIG_PM
if (nl80211_send_wowlan(msg, rdev, state->split))
@@ -1890,6 +2042,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
#else
state->split_start++;
#endif
+ /* fall through */
case 7:
if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
rdev->wiphy.software_iftypes))
@@ -1902,6 +2055,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
state->split_start++;
if (state->split)
break;
+ /* fall through */
case 8:
if ((rdev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
@@ -2118,6 +2272,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
goto nla_put_failure;
}
+ state->split_start++;
+ break;
+ case 14:
+ if (nl80211_send_pmsr_capa(rdev, msg))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -2318,9 +2478,9 @@ static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
wdev->iftype == NL80211_IFTYPE_P2P_GO;
}
-static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
- struct genl_info *info,
- struct cfg80211_chan_def *chandef)
+int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_chan_def *chandef)
{
struct netlink_ext_ack *extack = info->extack;
struct nlattr **attrs = info->attrs;
@@ -2794,12 +2954,6 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static inline u64 wdev_id(struct wireless_dev *wdev)
-{
- return (u64)wdev->identifier |
- ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
-}
-
static int nl80211_send_chandef(struct sk_buff *msg,
const struct cfg80211_chan_def *chandef)
{
@@ -2832,14 +2986,15 @@ static int nl80211_send_chandef(struct sk_buff *msg,
static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, bool removal)
+ struct wireless_dev *wdev,
+ enum nl80211_commands cmd)
{
struct net_device *dev = wdev->netdev;
- u8 cmd = NL80211_CMD_NEW_INTERFACE;
void *hdr;
- if (removal)
- cmd = NL80211_CMD_DEL_INTERFACE;
+ WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE &&
+ cmd != NL80211_CMD_DEL_INTERFACE &&
+ cmd != NL80211_CMD_SET_INTERFACE);
hdr = nl80211hdr_put(msg, portid, seq, flags, cmd);
if (!hdr)
@@ -2987,7 +3142,8 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
}
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- rdev, wdev, false) < 0) {
+ rdev, wdev,
+ NL80211_CMD_NEW_INTERFACE) < 0) {
goto out;
}
if_idx++;
@@ -3017,7 +3173,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
- rdev, wdev, false) < 0) {
+ rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -3207,6 +3363,12 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
if (!err && params.use_4addr != -1)
dev->ieee80211_ptr->use_4addr = params.use_4addr;
+ if (change && !err) {
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ nl80211_notify_iface(rdev, wdev, NL80211_CMD_SET_INTERFACE);
+ }
+
return err;
}
@@ -3298,7 +3460,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
}
if (nl80211_send_iface(msg, info->snd_portid, info->snd_seq, 0,
- rdev, wdev, false) < 0) {
+ rdev, wdev, NL80211_CMD_NEW_INTERFACE) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -4521,8 +4683,7 @@ static int parse_station_flags(struct genl_info *info,
return 0;
}
-static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
- int attr)
+bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr)
{
struct nlattr *rate;
u32 bitrate;
@@ -4731,6 +4892,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid,
PUT_SINFO(LOCAL_PM, local_pm, u32);
PUT_SINFO(PEER_PM, peer_pm, u32);
PUT_SINFO(NONPEER_PM, nonpeer_pm, u32);
+ PUT_SINFO(CONNECTED_TO_GATE, connected_to_gate, u8);
if (sinfo->filled & BIT_ULL(NL80211_STA_INFO_BSS_PARAM)) {
bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
@@ -6122,7 +6284,9 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
nla_put_u16(msg, NL80211_MESHCONF_AWAKE_WINDOW,
cur_params.dot11MeshAwakeWindowDuration) ||
nla_put_u32(msg, NL80211_MESHCONF_PLINK_TIMEOUT,
- cur_params.plink_timeout))
+ cur_params.plink_timeout) ||
+ nla_put_u8(msg, NL80211_MESHCONF_CONNECTED_TO_GATE,
+ cur_params.dot11MeshConnectedToMeshGate))
goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
@@ -6179,6 +6343,7 @@ nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = {
NL80211_MESH_POWER_MAX),
[NL80211_MESHCONF_AWAKE_WINDOW] = { .type = NLA_U16 },
[NL80211_MESHCONF_PLINK_TIMEOUT] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_CONNECTED_TO_GATE] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
};
static const struct nla_policy
@@ -6290,6 +6455,9 @@ do { \
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, mask,
NL80211_MESHCONF_RSSI_THRESHOLD,
nla_get_s32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConnectedToMeshGate, mask,
+ NL80211_MESHCONF_CONNECTED_TO_GATE,
+ nla_get_u8);
/*
* Check HT operation mode based on
* IEEE 802.11-2016 9.4.2.57 HT Operation element.
@@ -6855,8 +7023,8 @@ static int parse_bss_select(struct nlattr *nla, struct wiphy *wiphy,
return 0;
}
-static int nl80211_parse_random_mac(struct nlattr **attrs,
- u8 *mac_addr, u8 *mac_addr_mask)
+int nl80211_parse_random_mac(struct nlattr **attrs,
+ u8 *mac_addr, u8 *mac_addr_mask)
{
int i;
@@ -7822,6 +7990,60 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
return err;
}
+static int nl80211_notify_radar_detection(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_chan_def chandef;
+ enum nl80211_dfs_regions dfs_region;
+ int err;
+
+ dfs_region = reg_get_dfs_region(wiphy);
+ if (dfs_region == NL80211_DFS_UNSET) {
+ GENL_SET_ERR_MSG(info,
+ "DFS Region is not set. Unexpected Radar indication");
+ return -EINVAL;
+ }
+
+ err = nl80211_parse_chandef(rdev, info, &chandef);
+ if (err) {
+ GENL_SET_ERR_MSG(info, "Unable to extract chandef info");
+ return err;
+ }
+
+ err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype);
+ if (err < 0) {
+ GENL_SET_ERR_MSG(info, "chandef is invalid");
+ return err;
+ }
+
+ if (err == 0) {
+ GENL_SET_ERR_MSG(info,
+ "Unexpected Radar indication for chandef/iftype");
+ return -EINVAL;
+ }
+
+ /* Do not process this notification if radar is already detected
+ * by kernel on this channel, and return success.
+ */
+ if (chandef.chan->dfs_state == NL80211_DFS_UNAVAILABLE)
+ return 0;
+
+ cfg80211_set_dfs_state(wiphy, &chandef, NL80211_DFS_UNAVAILABLE);
+
+ cfg80211_sched_dfs_chan_update(rdev);
+
+ memcpy(&rdev->radar_chandef, &chandef, sizeof(chandef));
+
+ /* Propagate this notification to other radios as well */
+ queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk);
+
+ return 0;
+}
+
static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -7870,6 +8092,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
}
memset(&params, 0, sizeof(params));
+ params.beacon_csa.ftm_responder = -1;
if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
!info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
@@ -8929,8 +9152,10 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_CONTROL_PORT_OVER_NL80211]) {
int r = validate_pae_over_nl80211(rdev, info);
- if (r < 0)
+ if (r < 0) {
+ kzfree(connkeys);
return r;
+ }
ibss.control_port_over_nl80211 = true;
}
@@ -13898,6 +14123,22 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_PEER_MEASUREMENT_START,
+ .doit = nl80211_pmsr_start,
+ .policy = nl80211_policy,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_NOTIFY_RADAR,
+ .doit = nl80211_notify_radar_detection,
+ .policy = nl80211_policy,
+ .flags = GENL_UNS_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_family nl80211_fam __ro_after_init = {
@@ -13945,15 +14186,11 @@ void nl80211_notify_iface(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
- WARN_ON(cmd != NL80211_CMD_NEW_INTERFACE &&
- cmd != NL80211_CMD_DEL_INTERFACE);
-
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev,
- cmd == NL80211_CMD_DEL_INTERFACE) < 0) {
+ if (nl80211_send_iface(msg, 0, 0, 0, rdev, wdev, cmd) < 0) {
nlmsg_free(msg);
return;
}
@@ -14572,7 +14809,8 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
}
void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
- const u8* ie, u8 ie_len, gfp_t gfp)
+ const u8 *ie, u8 ie_len,
+ int sig_dbm, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -14598,7 +14836,9 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
(ie_len && ie &&
- nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
+ nla_put(msg, NL80211_ATTR_IE, ie_len, ie)) ||
+ (sig_dbm &&
+ nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -15881,6 +16121,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
} else if (wdev->conn_owner_nlportid == notify->portid) {
schedule_work(&wdev->disconnect_wk);
}
+
+ cfg80211_release_pmsr(wdev, notify->portid);
}
spin_lock_bh(&rdev->beacon_registrations_lock);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 79e47fe60c35..531c82dcba6b 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Portions of this file
+ * Copyright (C) 2018 Intel Corporation
+ */
#ifndef __NET_WIRELESS_NL80211_H
#define __NET_WIRELESS_NL80211_H
@@ -6,6 +10,30 @@
int nl80211_init(void);
void nl80211_exit(void);
+
+extern const struct nla_policy nl80211_policy[NUM_NL80211_ATTR];
+
+void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
+ int flags, u8 cmd);
+bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
+ int attr);
+
+static inline u64 wdev_id(struct wireless_dev *wdev)
+{
+ return (u64)wdev->identifier |
+ ((u64)wiphy_to_rdev(wdev->wiphy)->wiphy_idx << 32);
+}
+
+int nl80211_prepare_wdev_dump(struct netlink_callback *cb,
+ struct cfg80211_registered_device **rdev,
+ struct wireless_dev **wdev);
+
+int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
+ struct genl_info *info,
+ struct cfg80211_chan_def *chandef);
+int nl80211_parse_random_mac(struct nlattr **attrs,
+ u8 *mac_addr, u8 *mac_addr_mask);
+
void nl80211_notify_wiphy(struct cfg80211_registered_device *rdev,
enum nl80211_commands cmd);
void nl80211_notify_iface(struct cfg80211_registered_device *rdev,
@@ -95,4 +123,8 @@ void nl80211_send_ap_stopped(struct wireless_dev *wdev);
void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+/* peer measurement */
+int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info);
+int nl80211_pmsr_dump_results(struct sk_buff *skb, struct netlink_callback *cb);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
new file mode 100644
index 000000000000..de9286703280
--- /dev/null
+++ b/net/wireless/pmsr.c
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Intel Corporation
+ */
+#ifndef __PMSR_H
+#define __PMSR_H
+#include <net/cfg80211.h>
+#include "core.h"
+#include "nl80211.h"
+#include "rdev-ops.h"
+
+static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
+ struct nlattr *ftmreq,
+ struct cfg80211_pmsr_request_peer *out,
+ struct genl_info *info)
+{
+ const struct cfg80211_pmsr_capabilities *capa = rdev->wiphy.pmsr_capa;
+ struct nlattr *tb[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1];
+ u32 preamble = NL80211_PREAMBLE_DMG; /* only optional in DMG */
+
+ /* validate existing data */
+ if (!(rdev->wiphy.pmsr_capa->ftm.bandwidths & BIT(out->chandef.width))) {
+ NL_SET_ERR_MSG(info->extack, "FTM: unsupported bandwidth");
+ return -EINVAL;
+ }
+
+ /* no validation needed - was already done via nested policy */
+ nla_parse_nested(tb, NL80211_PMSR_FTM_REQ_ATTR_MAX, ftmreq, NULL, NULL);
+
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE])
+ preamble = nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]);
+
+ /* set up values - struct is 0-initialized */
+ out->ftm.requested = true;
+
+ switch (out->chandef.chan->band) {
+ case NL80211_BAND_60GHZ:
+ /* optional */
+ break;
+ default:
+ if (!tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE]) {
+ NL_SET_ERR_MSG(info->extack,
+ "FTM: must specify preamble");
+ return -EINVAL;
+ }
+ }
+
+ if (!(capa->ftm.preambles & BIT(preamble))) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE],
+ "FTM: invalid preamble");
+ return -EINVAL;
+ }
+
+ out->ftm.preamble = preamble;
+
+ out->ftm.burst_period = 0;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
+ out->ftm.burst_period =
+ nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+
+ out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
+ if (out->ftm.asap && !capa->ftm.asap) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP],
+ "FTM: ASAP mode not supported");
+ return -EINVAL;
+ }
+
+ if (!out->ftm.asap && !capa->ftm.non_asap) {
+ NL_SET_ERR_MSG(info->extack,
+ "FTM: non-ASAP mode not supported");
+ return -EINVAL;
+ }
+
+ out->ftm.num_bursts_exp = 0;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
+ out->ftm.num_bursts_exp =
+ nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+
+ if (capa->ftm.max_bursts_exponent >= 0 &&
+ out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP],
+ "FTM: max NUM_BURSTS_EXP must be set lower than the device limit");
+ return -EINVAL;
+ }
+
+ out->ftm.burst_duration = 15;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
+ out->ftm.burst_duration =
+ nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+
+ out->ftm.ftms_per_burst = 0;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
+ out->ftm.ftms_per_burst =
+ nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST]);
+
+ if (capa->ftm.max_ftms_per_burst &&
+ (out->ftm.ftms_per_burst > capa->ftm.max_ftms_per_burst ||
+ out->ftm.ftms_per_burst == 0)) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST],
+ "FTM: FTMs per burst must be set lower than the device limit but non-zero");
+ return -EINVAL;
+ }
+
+ out->ftm.ftmr_retries = 3;
+ if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
+ out->ftm.ftmr_retries =
+ nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+
+ out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
+ if (out->ftm.request_lci && !capa->ftm.request_lci) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI],
+ "FTM: LCI request not supported");
+ }
+
+ out->ftm.request_civicloc =
+ !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC];
+ if (out->ftm.request_civicloc && !capa->ftm.request_civicloc) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC],
+ "FTM: civic location request not supported");
+ }
+
+ return 0;
+}
+
+static int pmsr_parse_peer(struct cfg80211_registered_device *rdev,
+ struct nlattr *peer,
+ struct cfg80211_pmsr_request_peer *out,
+ struct genl_info *info)
+{
+ struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1];
+ struct nlattr *req[NL80211_PMSR_REQ_ATTR_MAX + 1];
+ struct nlattr *treq;
+ int err, rem;
+
+ /* no validation needed - was already done via nested policy */
+ nla_parse_nested(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, NULL, NULL);
+
+ if (!tb[NL80211_PMSR_PEER_ATTR_ADDR] ||
+ !tb[NL80211_PMSR_PEER_ATTR_CHAN] ||
+ !tb[NL80211_PMSR_PEER_ATTR_REQ]) {
+ NL_SET_ERR_MSG_ATTR(info->extack, peer,
+ "insufficient peer data");
+ return -EINVAL;
+ }
+
+ memcpy(out->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN);
+
+ /* reuse info->attrs */
+ memset(info->attrs, 0, sizeof(*info->attrs) * (NL80211_ATTR_MAX + 1));
+ /* need to validate here, we don't want to have validation recursion */
+ err = nla_parse_nested(info->attrs, NL80211_ATTR_MAX,
+ tb[NL80211_PMSR_PEER_ATTR_CHAN],
+ nl80211_policy, info->extack);
+ if (err)
+ return err;
+
+ err = nl80211_parse_chandef(rdev, info, &out->chandef);
+ if (err)
+ return err;
+
+ /* no validation needed - was already done via nested policy */
+ nla_parse_nested(req, NL80211_PMSR_REQ_ATTR_MAX,
+ tb[NL80211_PMSR_PEER_ATTR_REQ],
+ NULL, NULL);
+
+ if (!req[NL80211_PMSR_REQ_ATTR_DATA]) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[NL80211_PMSR_PEER_ATTR_REQ],
+ "missing request type/data");
+ return -EINVAL;
+ }
+
+ if (req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF])
+ out->report_ap_tsf = true;
+
+ if (out->report_ap_tsf && !rdev->wiphy.pmsr_capa->report_ap_tsf) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ req[NL80211_PMSR_REQ_ATTR_GET_AP_TSF],
+ "reporting AP TSF is not supported");
+ return -EINVAL;
+ }
+
+ nla_for_each_nested(treq, req[NL80211_PMSR_REQ_ATTR_DATA], rem) {
+ switch (nla_type(treq)) {
+ case NL80211_PMSR_TYPE_FTM:
+ err = pmsr_parse_ftm(rdev, treq, out, info);
+ break;
+ default:
+ NL_SET_ERR_MSG_ATTR(info->extack, treq,
+ "unsupported measurement type");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info)
+{
+ struct nlattr *reqattr = info->attrs[NL80211_ATTR_PEER_MEASUREMENTS];
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = info->user_ptr[1];
+ struct cfg80211_pmsr_request *req;
+ struct nlattr *peers, *peer;
+ int count, rem, err, idx;
+
+ if (!rdev->wiphy.pmsr_capa)
+ return -EOPNOTSUPP;
+
+ if (!reqattr)
+ return -EINVAL;
+
+ peers = nla_find(nla_data(reqattr), nla_len(reqattr),
+ NL80211_PMSR_ATTR_PEERS);
+ if (!peers)
+ return -EINVAL;
+
+ count = 0;
+ nla_for_each_nested(peer, peers, rem) {
+ count++;
+
+ if (count > rdev->wiphy.pmsr_capa->max_peers) {
+ NL_SET_ERR_MSG_ATTR(info->extack, peer,
+ "Too many peers used");
+ return -EINVAL;
+ }
+ }
+
+ req = kzalloc(struct_size(req, peers, count), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ if (info->attrs[NL80211_ATTR_TIMEOUT])
+ req->timeout = nla_get_u32(info->attrs[NL80211_ATTR_TIMEOUT]);
+
+ if (info->attrs[NL80211_ATTR_MAC]) {
+ if (!rdev->wiphy.pmsr_capa->randomize_mac_addr) {
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ info->attrs[NL80211_ATTR_MAC],
+ "device cannot randomize MAC address");
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ err = nl80211_parse_random_mac(info->attrs, req->mac_addr,
+ req->mac_addr_mask);
+ if (err)
+ goto out_err;
+ } else {
+ memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]),
+ ETH_ALEN);
+ memset(req->mac_addr_mask, 0xff, ETH_ALEN);
+ }
+
+ idx = 0;
+ nla_for_each_nested(peer, peers, rem) {
+ /* NB: this reuses info->attrs, but we no longer need it */
+ err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
+ if (err)
+ goto out_err;
+ idx++;
+ }
+
+ req->n_peers = count;
+ req->cookie = cfg80211_assign_cookie(rdev);
+
+ err = rdev_start_pmsr(rdev, wdev, req);
+ if (err)
+ goto out_err;
+
+ list_add_tail(&req->list, &wdev->pmsr_list);
+
+ nl_set_extack_cookie_u64(info->extack, req->cookie);
+ return 0;
+out_err:
+ kfree(req);
+ return err;
+}
+
+void cfg80211_pmsr_complete(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ gfp_t gfp)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ trace_cfg80211_pmsr_complete(wdev->wiphy, wdev, req->cookie);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ goto free_request;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0,
+ NL80211_CMD_PEER_MEASUREMENT_COMPLETE);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD))
+ goto free_msg;
+
+ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
+ NL80211_ATTR_PAD))
+ goto free_msg;
+
+ genlmsg_end(msg, hdr);
+ genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
+ goto free_request;
+free_msg:
+ nlmsg_free(msg);
+free_request:
+ spin_lock_bh(&wdev->pmsr_lock);
+ list_del(&req->list);
+ spin_unlock_bh(&wdev->pmsr_lock);
+ kfree(req);
+}
+EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
+
+static int nl80211_pmsr_send_ftm_res(struct sk_buff *msg,
+ struct cfg80211_pmsr_result *res)
+{
+ if (res->status == NL80211_PMSR_STATUS_FAILURE) {
+ if (nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON,
+ res->ftm.failure_reason))
+ goto error;
+
+ if (res->ftm.failure_reason ==
+ NL80211_PMSR_FTM_FAILURE_PEER_BUSY &&
+ res->ftm.busy_retry_time &&
+ nla_put_u32(msg, NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME,
+ res->ftm.busy_retry_time))
+ goto error;
+
+ return 0;
+ }
+
+#define PUT(tp, attr, val) \
+ do { \
+ if (nla_put_##tp(msg, \
+ NL80211_PMSR_FTM_RESP_ATTR_##attr, \
+ res->ftm.val)) \
+ goto error; \
+ } while (0)
+
+#define PUTOPT(tp, attr, val) \
+ do { \
+ if (res->ftm.val##_valid) \
+ PUT(tp, attr, val); \
+ } while (0)
+
+#define PUT_U64(attr, val) \
+ do { \
+ if (nla_put_u64_64bit(msg, \
+ NL80211_PMSR_FTM_RESP_ATTR_##attr,\
+ res->ftm.val, \
+ NL80211_PMSR_FTM_RESP_ATTR_PAD)) \
+ goto error; \
+ } while (0)
+
+#define PUTOPT_U64(attr, val) \
+ do { \
+ if (res->ftm.val##_valid) \
+ PUT_U64(attr, val); \
+ } while (0)
+
+ if (res->ftm.burst_index >= 0)
+ PUT(u32, BURST_INDEX, burst_index);
+ PUTOPT(u32, NUM_FTMR_ATTEMPTS, num_ftmr_attempts);
+ PUTOPT(u32, NUM_FTMR_SUCCESSES, num_ftmr_successes);
+ PUT(u8, NUM_BURSTS_EXP, num_bursts_exp);
+ PUT(u8, BURST_DURATION, burst_duration);
+ PUT(u8, FTMS_PER_BURST, ftms_per_burst);
+ PUTOPT(s32, RSSI_AVG, rssi_avg);
+ PUTOPT(s32, RSSI_SPREAD, rssi_spread);
+ if (res->ftm.tx_rate_valid &&
+ !nl80211_put_sta_rate(msg, &res->ftm.tx_rate,
+ NL80211_PMSR_FTM_RESP_ATTR_TX_RATE))
+ goto error;
+ if (res->ftm.rx_rate_valid &&
+ !nl80211_put_sta_rate(msg, &res->ftm.rx_rate,
+ NL80211_PMSR_FTM_RESP_ATTR_RX_RATE))
+ goto error;
+ PUTOPT_U64(RTT_AVG, rtt_avg);
+ PUTOPT_U64(RTT_VARIANCE, rtt_variance);
+ PUTOPT_U64(RTT_SPREAD, rtt_spread);
+ PUTOPT_U64(DIST_AVG, dist_avg);
+ PUTOPT_U64(DIST_VARIANCE, dist_variance);
+ PUTOPT_U64(DIST_SPREAD, dist_spread);
+ if (res->ftm.lci && res->ftm.lci_len &&
+ nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_LCI,
+ res->ftm.lci_len, res->ftm.lci))
+ goto error;
+ if (res->ftm.civicloc && res->ftm.civicloc_len &&
+ nla_put(msg, NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC,
+ res->ftm.civicloc_len, res->ftm.civicloc))
+ goto error;
+#undef PUT
+#undef PUTOPT
+#undef PUT_U64
+#undef PUTOPT_U64
+
+ return 0;
+error:
+ return -ENOSPC;
+}
+
+static int nl80211_pmsr_send_result(struct sk_buff *msg,
+ struct cfg80211_pmsr_result *res)
+{
+ struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata;
+
+ pmsr = nla_nest_start(msg, NL80211_ATTR_PEER_MEASUREMENTS);
+ if (!pmsr)
+ goto error;
+
+ peers = nla_nest_start(msg, NL80211_PMSR_ATTR_PEERS);
+ if (!peers)
+ goto error;
+
+ peer = nla_nest_start(msg, 1);
+ if (!peer)
+ goto error;
+
+ if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, res->addr))
+ goto error;
+
+ resp = nla_nest_start(msg, NL80211_PMSR_PEER_ATTR_RESP);
+ if (!resp)
+ goto error;
+
+ if (nla_put_u32(msg, NL80211_PMSR_RESP_ATTR_STATUS, res->status) ||
+ nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_HOST_TIME,
+ res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
+ goto error;
+
+ if (res->ap_tsf_valid &&
+ nla_put_u64_64bit(msg, NL80211_PMSR_RESP_ATTR_AP_TSF,
+ res->host_time, NL80211_PMSR_RESP_ATTR_PAD))
+ goto error;
+
+ if (res->final && nla_put_flag(msg, NL80211_PMSR_RESP_ATTR_FINAL))
+ goto error;
+
+ data = nla_nest_start(msg, NL80211_PMSR_RESP_ATTR_DATA);
+ if (!data)
+ goto error;
+
+ typedata = nla_nest_start(msg, res->type);
+ if (!typedata)
+ goto error;
+
+ switch (res->type) {
+ case NL80211_PMSR_TYPE_FTM:
+ if (nl80211_pmsr_send_ftm_res(msg, res))
+ goto error;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ nla_nest_end(msg, typedata);
+ nla_nest_end(msg, data);
+ nla_nest_end(msg, resp);
+ nla_nest_end(msg, peer);
+ nla_nest_end(msg, peers);
+ nla_nest_end(msg, pmsr);
+
+ return 0;
+error:
+ return -ENOSPC;
+}
+
+void cfg80211_pmsr_report(struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *req,
+ struct cfg80211_pmsr_result *result,
+ gfp_t gfp)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ trace_cfg80211_pmsr_report(wdev->wiphy, wdev, req->cookie,
+ result->addr);
+
+ /*
+ * Currently, only variable items are LCI and civic location,
+ * both of which are reasonably short so we don't need to
+ * worry about them here for the allocation.
+ */
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_PEER_MEASUREMENT_RESULT);
+ if (!hdr)
+ goto free;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
+ NL80211_ATTR_PAD))
+ goto free;
+
+ if (nla_put_u64_64bit(msg, NL80211_ATTR_COOKIE, req->cookie,
+ NL80211_ATTR_PAD))
+ goto free;
+
+ err = nl80211_pmsr_send_result(msg, result);
+ if (err) {
+ pr_err_ratelimited("peer measurement result: message didn't fit!");
+ goto free;
+ }
+
+ genlmsg_end(msg, hdr);
+ genlmsg_unicast(wiphy_net(wdev->wiphy), msg, req->nl_portid);
+ return;
+free:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL_GPL(cfg80211_pmsr_report);
+
+void cfg80211_pmsr_free_wk(struct work_struct *work)
+{
+ struct wireless_dev *wdev = container_of(work, struct wireless_dev,
+ pmsr_free_wk);
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_pmsr_request *req, *tmp;
+ LIST_HEAD(free_list);
+
+ spin_lock_bh(&wdev->pmsr_lock);
+ list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) {
+ if (req->nl_portid)
+ continue;
+ list_move_tail(&req->list, &free_list);
+ }
+ spin_unlock_bh(&wdev->pmsr_lock);
+
+ list_for_each_entry_safe(req, tmp, &free_list, list) {
+ wdev_lock(wdev);
+ rdev_abort_pmsr(rdev, wdev, req);
+ wdev_unlock(wdev);
+
+ kfree(req);
+ }
+}
+
+void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
+{
+ struct cfg80211_pmsr_request *req;
+ bool found = false;
+
+ spin_lock_bh(&wdev->pmsr_lock);
+ list_for_each_entry(req, &wdev->pmsr_list, list) {
+ found = true;
+ req->nl_portid = 0;
+ }
+ spin_unlock_bh(&wdev->pmsr_lock);
+
+ if (found)
+ schedule_work(&wdev->pmsr_free_wk);
+ flush_work(&wdev->pmsr_free_wk);
+ WARN_ON(!list_empty(&wdev->pmsr_list));
+}
+
+void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid)
+{
+ struct cfg80211_pmsr_request *req;
+
+ spin_lock_bh(&wdev->pmsr_lock);
+ list_for_each_entry(req, &wdev->pmsr_list, list) {
+ if (req->nl_portid == portid) {
+ req->nl_portid = 0;
+ schedule_work(&wdev->pmsr_free_wk);
+ }
+ }
+ spin_unlock_bh(&wdev->pmsr_lock);
+}
+
+#endif /* __PMSR_H */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 51380b5c32f2..5cb48d135fab 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1247,4 +1247,29 @@ rdev_get_ftm_responder_stats(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_start_pmsr(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_start_pmsr(&rdev->wiphy, wdev, request->cookie);
+ if (rdev->ops->start_pmsr)
+ ret = rdev->ops->start_pmsr(&rdev->wiphy, wdev, request);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
+static inline void
+rdev_abort_pmsr(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_pmsr_request *request)
+{
+ trace_rdev_abort_pmsr(&rdev->wiphy, wdev, request->cookie);
+ if (rdev->ops->abort_pmsr)
+ rdev->ops->abort_pmsr(&rdev->wiphy, wdev, request);
+ trace_rdev_return_void(&rdev->wiphy);
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d0e7472dd9fd..5123667f4569 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1183,7 +1183,7 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
switch (ftype) {
case CFG80211_BSS_FTYPE_BEACON:
ies->from_beacon = true;
- /* fall through to assign */
+ /* fall through */
case CFG80211_BSS_FTYPE_UNKNOWN:
rcu_assign_pointer(tmp.pub.beacon_ies, ies);
break;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index d536b07582f8..f741d8376a46 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void)
* All devices must be idle as otherwise if you are actively
* scanning some new beacon hints could be learned and would
* count as new regulatory hints.
+ * Also if there is any other active beaconing interface we
+ * need not issue a disconnect hint and reset any info such
+ * as chan dfs state, etc.
*/
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
wdev_lock(wdev);
- if (wdev->conn || wdev->current_bss)
+ if (wdev->conn || wdev->current_bss ||
+ cfg80211_beaconing_iface_active(wdev))
is_all_idle = false;
wdev_unlock(wdev);
}
@@ -1171,6 +1175,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
rdev->wiphy.ht_capa_mod_mask);
+ cfg80211_oper_and_vht_capa(&connect->vht_capa_mask,
+ rdev->wiphy.vht_capa_mod_mask);
if (connkeys && connkeys->def >= 0) {
int idx;
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index c6a9446b4e6b..44b2ce1bb13a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -361,6 +361,24 @@ DECLARE_EVENT_CLASS(wiphy_wdev_evt,
TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
);
+DECLARE_EVENT_CLASS(wiphy_wdev_cookie_evt,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie),
+ TP_ARGS(wiphy, wdev, cookie),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u64, cookie)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->cookie = cookie;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie: %lld",
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ (unsigned long long)__entry->cookie)
+);
+
DEFINE_EVENT(wiphy_wdev_evt, rdev_return_wdev,
TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
TP_ARGS(wiphy, wdev)
@@ -770,9 +788,9 @@ DEFINE_EVENT(wiphy_netdev_mac_evt, rdev_set_wds_peer,
);
TRACE_EVENT(rdev_dump_station,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
u8 *mac),
- TP_ARGS(wiphy, netdev, idx, mac),
+ TP_ARGS(wiphy, netdev, _idx, mac),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -783,7 +801,7 @@ TRACE_EVENT(rdev_dump_station,
WIPHY_ASSIGN;
NETDEV_ASSIGN;
MAC_ASSIGN(sta_mac, mac);
- __entry->idx = idx;
+ __entry->idx = _idx;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: " MAC_PR_FMT ", idx: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(sta_mac),
@@ -847,9 +865,9 @@ DEFINE_EVENT(mpath_evt, rdev_get_mpath,
);
TRACE_EVENT(rdev_dump_mpath,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
u8 *dst, u8 *next_hop),
- TP_ARGS(wiphy, netdev, idx, dst, next_hop),
+ TP_ARGS(wiphy, netdev, _idx, dst, next_hop),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -862,7 +880,7 @@ TRACE_EVENT(rdev_dump_mpath,
NETDEV_ASSIGN;
MAC_ASSIGN(dst, dst);
MAC_ASSIGN(next_hop, next_hop);
- __entry->idx = idx;
+ __entry->idx = _idx;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
MAC_PR_FMT ", next hop: " MAC_PR_FMT,
@@ -892,9 +910,9 @@ TRACE_EVENT(rdev_get_mpp,
);
TRACE_EVENT(rdev_dump_mpp,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx,
u8 *dst, u8 *mpp),
- TP_ARGS(wiphy, netdev, idx, mpp, dst),
+ TP_ARGS(wiphy, netdev, _idx, mpp, dst),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -907,7 +925,7 @@ TRACE_EVENT(rdev_dump_mpp,
NETDEV_ASSIGN;
MAC_ASSIGN(dst, dst);
MAC_ASSIGN(mpp, mpp);
- __entry->idx = idx;
+ __entry->idx = _idx;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d, destination: "
MAC_PR_FMT ", mpp: " MAC_PR_FMT,
@@ -1673,8 +1691,8 @@ TRACE_EVENT(rdev_tdls_mgmt,
);
TRACE_EVENT(rdev_dump_survey,
- TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int idx),
- TP_ARGS(wiphy, netdev, idx),
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, int _idx),
+ TP_ARGS(wiphy, netdev, _idx),
TP_STRUCT__entry(
WIPHY_ENTRY
NETDEV_ENTRY
@@ -1683,7 +1701,7 @@ TRACE_EVENT(rdev_dump_survey,
TP_fast_assign(
WIPHY_ASSIGN;
NETDEV_ASSIGN;
- __entry->idx = idx;
+ __entry->idx = _idx;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", index: %d",
WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->idx)
@@ -2502,6 +2520,16 @@ TRACE_EVENT(rdev_get_ftm_responder_stats,
__entry->out_of_window)
);
+DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_start_pmsr,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie),
+ TP_ARGS(wiphy, wdev, cookie)
+);
+
+DEFINE_EVENT(wiphy_wdev_cookie_evt, rdev_abort_pmsr,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie),
+ TP_ARGS(wiphy, wdev, cookie)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -3294,6 +3322,46 @@ TRACE_EVENT(cfg80211_stop_iface,
TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
+
+TRACE_EVENT(cfg80211_pmsr_report,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+ u64 cookie, const u8 *addr),
+ TP_ARGS(wiphy, wdev, cookie, addr),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u64, cookie)
+ MAC_ENTRY(addr)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->cookie = cookie;
+ MAC_ASSIGN(addr, addr);
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld, " MAC_PR_FMT,
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ (unsigned long long)__entry->cookie,
+ MAC_PR_ARG(addr))
+);
+
+TRACE_EVENT(cfg80211_pmsr_complete,
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie),
+ TP_ARGS(wiphy, wdev, cookie),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ WDEV_ENTRY
+ __field(u64, cookie)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ WDEV_ASSIGN;
+ __entry->cookie = cookie;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", cookie:%lld",
+ WIPHY_PR_ARG, WDEV_PR_ARG,
+ (unsigned long long)__entry->cookie)
+);
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index ef14d80ca03e..cd48cdd582c0 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1421,6 +1421,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
ies[pos + ext],
ext == 2))
pos = skip_ie(ies, ielen, pos);
+ else
+ break;
}
} else {
pos = skip_ie(ies, ielen, pos);
@@ -2013,33 +2015,32 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
case IEEE80211_VHT_CHANWIDTH_160MHZ:
if (supp_width == 0 &&
(ext_nss_bw == 1 || ext_nss_bw == 2))
- return DIV_ROUND_UP(max_vht_nss, 2);
+ return max_vht_nss / 2;
if (supp_width == 0 &&
ext_nss_bw == 3)
- return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ return (3 * max_vht_nss) / 4;
if (supp_width == 1 &&
ext_nss_bw == 3)
return 2 * max_vht_nss;
break;
case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
- if (supp_width == 0 &&
- (ext_nss_bw == 1 || ext_nss_bw == 2))
+ if (supp_width == 0 && ext_nss_bw == 1)
return 0; /* not possible */
if (supp_width == 0 &&
ext_nss_bw == 2)
- return DIV_ROUND_UP(max_vht_nss, 2);
+ return max_vht_nss / 2;
if (supp_width == 0 &&
ext_nss_bw == 3)
- return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ return (3 * max_vht_nss) / 4;
if (supp_width == 1 &&
ext_nss_bw == 0)
return 0; /* not possible */
if (supp_width == 1 &&
ext_nss_bw == 1)
- return DIV_ROUND_UP(max_vht_nss, 2);
+ return max_vht_nss / 2;
if (supp_width == 1 &&
ext_nss_bw == 2)
- return DIV_ROUND_UP(3 * max_vht_nss, 4);
+ return (3 * max_vht_nss) / 4;
break;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d49aa79b7997..5121729b8b63 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
}
len = *skb->data;
- needed = 1 + (len >> 4) + (len & 0x0f);
+ needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
if (!pskb_may_pull(skb, needed)) {
/* packet is too short to hold the addresses it claims
@@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr,
sk_for_each(s, &x25_list)
if ((!strcmp(addr->x25_addr,
x25_sk(s)->source_addr.x25_addr) ||
- !strcmp(addr->x25_addr,
+ !strcmp(x25_sk(s)->source_addr.x25_addr,
null_x25_address.x25_addr)) &&
s->sk_state == TCP_LISTEN) {
/*
@@ -688,11 +688,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
- len = strlen(addr->sx25_addr.x25_addr);
- for (i = 0; i < len; i++) {
- if (!isdigit(addr->sx25_addr.x25_addr[i])) {
- rc = -EINVAL;
- goto out;
+ /* check for the null_x25_address */
+ if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
+
+ len = strlen(addr->sx25_addr.x25_addr);
+ for (i = 0; i < len; i++) {
+ if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+ rc = -EINVAL;
+ goto out;
+ }
}
}
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 3c12cae32001..afb26221d8a8 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -142,6 +142,15 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
sk->sk_state_change(sk);
break;
}
+ case X25_CALL_REQUEST:
+ /* call collision */
+ x25->causediag.cause = 0x01;
+ x25->causediag.diagnostic = 0x48;
+
+ x25_write_internal(sk, X25_CLEAR_REQUEST);
+ x25_disconnect(sk, EISCONN, 0x01, 0x48);
+ break;
+
case X25_CLEAR_REQUEST:
if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
goto out_clear;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 07156f43d295..a03268454a27 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -366,6 +366,7 @@ static int xsk_release(struct socket *sock)
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
+ xdp_put_umem(xs->umem);
sock_orphan(sk);
sock->sk = NULL;
@@ -713,18 +714,6 @@ static const struct proto_ops xsk_proto_ops = {
.sendpage = sock_no_sendpage,
};
-static void xsk_destruct(struct sock *sk)
-{
- struct xdp_sock *xs = xdp_sk(sk);
-
- if (!sock_flag(sk, SOCK_DEAD))
- return;
-
- xdp_put_umem(xs->umem);
-
- sk_refcnt_debug_dec(sk);
-}
-
static int xsk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
@@ -751,9 +740,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_family = PF_XDP;
- sk->sk_destruct = xsk_destruct;
- sk_refcnt_debug_inc(sk);
-
sock_set_flag(sk, SOCK_RCU_FREE);
xs = xdp_sk(sk);
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 140270a13d54..5d43aaa17027 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -5,6 +5,7 @@ config XFRM
bool
depends on NET
select GRO_CELLS
+ select SKB_EXTENSIONS
config XFRM_OFFLOAD
bool
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 144c137886b1..b8736f56e7f7 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -32,6 +32,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
struct softnet_data *sd;
netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sec_path *sp;
if (!xo)
return skb;
@@ -39,7 +40,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
if (!(features & NETIF_F_HW_ESP))
esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
- x = skb->sp->xvec[skb->sp->len - 1];
+ sp = skb_sec_path(skb);
+ x = sp->xvec[sp->len - 1];
if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
return skb;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 684c0bc01e2c..b3b613660d44 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -38,8 +38,6 @@ struct xfrm_trans_cb {
#define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
-static struct kmem_cache *secpath_cachep __ro_after_init;
-
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
@@ -111,56 +109,24 @@ static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
return ret;
}
-void __secpath_destroy(struct sec_path *sp)
+struct sec_path *secpath_set(struct sk_buff *skb)
{
- int i;
- for (i = 0; i < sp->len; i++)
- xfrm_state_put(sp->xvec[i]);
- kmem_cache_free(secpath_cachep, sp);
-}
-EXPORT_SYMBOL(__secpath_destroy);
+ struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
-struct sec_path *secpath_dup(struct sec_path *src)
-{
- struct sec_path *sp;
-
- sp = kmem_cache_alloc(secpath_cachep, GFP_ATOMIC);
+ sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
if (!sp)
return NULL;
- sp->len = 0;
- sp->olen = 0;
+ if (tmp) /* reused existing one (was COW'd if needed) */
+ return sp;
+ /* allocated new secpath */
memset(sp->ovec, 0, sizeof(sp->ovec));
+ sp->olen = 0;
+ sp->len = 0;
- if (src) {
- int i;
-
- memcpy(sp, src, sizeof(*sp));
- for (i = 0; i < sp->len; i++)
- xfrm_state_hold(sp->xvec[i]);
- }
- refcount_set(&sp->refcnt, 1);
return sp;
}
-EXPORT_SYMBOL(secpath_dup);
-
-int secpath_set(struct sk_buff *skb)
-{
- struct sec_path *sp;
-
- /* Allocate new secpath or COW existing one. */
- if (!skb->sp || refcount_read(&skb->sp->refcnt) != 1) {
- sp = secpath_dup(skb->sp);
- if (!sp)
- return -ENOMEM;
-
- if (skb->sp)
- secpath_put(skb->sp);
- skb->sp = sp;
- }
- return 0;
-}
EXPORT_SYMBOL(secpath_set);
/* Fetch spi and seq from ipsec header */
@@ -236,6 +202,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
bool xfrm_gro = false;
bool crypto_done = false;
struct xfrm_offload *xo = xfrm_offload(skb);
+ struct sec_path *sp;
if (encap_type < 0) {
x = xfrm_input_state(skb);
@@ -312,8 +279,8 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
break;
}
- err = secpath_set(skb);
- if (err) {
+ sp = secpath_set(skb);
+ if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
goto drop;
}
@@ -328,7 +295,9 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
daddr = (xfrm_address_t *)(skb_network_header(skb) +
XFRM_SPI_SKB_CB(skb)->daddroff);
do {
- if (skb->sp->len == XFRM_MAX_DEPTH) {
+ sp = skb_sec_path(skb);
+
+ if (sp->len == XFRM_MAX_DEPTH) {
secpath_reset(skb);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
goto drop;
@@ -344,7 +313,13 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb->mark = xfrm_smark_get(skb->mark, x);
- skb->sp->xvec[skb->sp->len++] = x;
+ sp->xvec[sp->len++] = x;
+
+ skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+ goto drop;
+ }
lock:
spin_lock(&x->lock);
@@ -385,7 +360,6 @@ lock:
XFRM_SKB_CB(skb)->seq.input.low = seq;
XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
- skb_dst_force(skb);
dev_hold(skb->dev);
if (crypto_done)
@@ -468,8 +442,9 @@ resume:
nf_reset(skb);
if (decaps) {
- if (skb->sp)
- skb->sp->olen = 0;
+ sp = skb_sec_path(skb);
+ if (sp)
+ sp->olen = 0;
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return 0;
@@ -480,8 +455,9 @@ resume:
err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async);
if (xfrm_gro) {
- if (skb->sp)
- skb->sp->olen = 0;
+ sp = skb_sec_path(skb);
+ if (sp)
+ sp->olen = 0;
skb_dst_drop(skb);
gro_cells_receive(&gro_cells, skb);
return err;
@@ -546,11 +522,6 @@ void __init xfrm_input_init(void)
if (err)
gro_cells.cells = NULL;
- secpath_cachep = kmem_cache_create("secpath_cache",
- sizeof(struct sec_path),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
- NULL);
-
for_each_possible_cpu(i) {
struct xfrm_trans_tasklet *trans;
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index d679fa0f44b3..6be8c7df15bb 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -251,7 +251,7 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
struct xfrm_if *xi;
bool xnet;
- if (err && !skb->sp)
+ if (err && !secpath_exists(skb))
return 0;
x = xfrm_input_state(skb);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 4ae87c5ce2e3..9333153bafda 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -102,6 +102,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
skb_dst_force(skb);
if (!skb_dst(skb)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ err = -EHOSTUNREACH;
goto error_nolock;
}
@@ -218,19 +219,16 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
if (xfrm_dev_offload_ok(skb, x)) {
struct sec_path *sp;
- sp = secpath_dup(skb->sp);
+ sp = secpath_set(skb);
if (!sp) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
kfree_skb(skb);
return -ENOMEM;
}
- if (skb->sp)
- secpath_put(skb->sp);
- skb->sp = sp;
skb->encapsulation = 1;
sp->olen++;
- sp->xvec[skb->sp->len++] = x;
+ sp->xvec[sp->len++] = x;
xfrm_state_hold(x);
if (skb_is_gso(skb)) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 119a427d9b2b..934492bad8e0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -26,6 +26,7 @@
#include <linux/cache.h>
#include <linux/cpu.h>
#include <linux/audit.h>
+#include <linux/rhashtable.h>
#include <net/dst.h>
#include <net/flow.h>
#include <net/xfrm.h>
@@ -45,6 +46,99 @@ struct xfrm_flo {
u8 flags;
};
+/* prefixes smaller than this are stored in lists, not trees. */
+#define INEXACT_PREFIXLEN_IPV4 16
+#define INEXACT_PREFIXLEN_IPV6 48
+
+struct xfrm_pol_inexact_node {
+ struct rb_node node;
+ union {
+ xfrm_address_t addr;
+ struct rcu_head rcu;
+ };
+ u8 prefixlen;
+
+ struct rb_root root;
+
+ /* the policies matching this node, can be empty list */
+ struct hlist_head hhead;
+};
+
+/* xfrm inexact policy search tree:
+ * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
+ * |
+ * +---- root_d: sorted by daddr:prefix
+ * | |
+ * | xfrm_pol_inexact_node
+ * | |
+ * | +- root: sorted by saddr/prefix
+ * | | |
+ * | | xfrm_pol_inexact_node
+ * | | |
+ * | | + root: unused
+ * | | |
+ * | | + hhead: saddr:daddr policies
+ * | |
+ * | +- coarse policies and all any:daddr policies
+ * |
+ * +---- root_s: sorted by saddr:prefix
+ * | |
+ * | xfrm_pol_inexact_node
+ * | |
+ * | + root: unused
+ * | |
+ * | + hhead: saddr:any policies
+ * |
+ * +---- coarse policies and all any:any policies
+ *
+ * Lookups return four candidate lists:
+ * 1. any:any list from top-level xfrm_pol_inexact_bin
+ * 2. any:daddr list from daddr tree
+ * 3. saddr:daddr list from 2nd level daddr tree
+ * 4. saddr:any list from saddr tree
+ *
+ * This result set then needs to be searched for the policy with
+ * the lowest priority. If two results have same prio, youngest one wins.
+ */
+
+struct xfrm_pol_inexact_key {
+ possible_net_t net;
+ u32 if_id;
+ u16 family;
+ u8 dir, type;
+};
+
+struct xfrm_pol_inexact_bin {
+ struct xfrm_pol_inexact_key k;
+ struct rhash_head head;
+ /* list containing '*:*' policies */
+ struct hlist_head hhead;
+
+ seqcount_t count;
+ /* tree sorted by daddr/prefix */
+ struct rb_root root_d;
+
+ /* tree sorted by saddr/prefix */
+ struct rb_root root_s;
+
+ /* slow path below */
+ struct list_head inexact_bins;
+ struct rcu_head rcu;
+};
+
+enum xfrm_pol_inexact_candidate_type {
+ XFRM_POL_CAND_BOTH,
+ XFRM_POL_CAND_SADDR,
+ XFRM_POL_CAND_DADDR,
+ XFRM_POL_CAND_ANY,
+
+ XFRM_POL_CAND_MAX,
+};
+
+struct xfrm_pol_inexact_candidates {
+ struct hlist_head *res[XFRM_POL_CAND_MAX];
+};
+
static DEFINE_SPINLOCK(xfrm_if_cb_lock);
static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
@@ -55,6 +149,9 @@ static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
static struct kmem_cache *xfrm_dst_cache __ro_after_init;
static __read_mostly seqcount_t xfrm_policy_hash_generation;
+static struct rhashtable xfrm_policy_inexact_table;
+static const struct rhashtable_params xfrm_pol_inexact_params;
+
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
static int stale_bundle(struct dst_entry *dst);
static int xfrm_bundle_ok(struct xfrm_dst *xdst);
@@ -64,6 +161,25 @@ static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
int dir);
+static struct xfrm_pol_inexact_bin *
+xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
+ u32 if_id);
+
+static struct xfrm_pol_inexact_bin *
+xfrm_policy_inexact_lookup_rcu(struct net *net,
+ u8 type, u16 family, u8 dir, u32 if_id);
+static struct xfrm_policy *
+xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
+ bool excl);
+static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
+ struct xfrm_policy *policy);
+
+static bool
+xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
+ struct xfrm_pol_inexact_bin *b,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr);
+
static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
{
return refcount_inc_not_zero(&policy->refcnt);
@@ -269,6 +385,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
if (policy) {
write_pnet(&policy->xp_net, net);
INIT_LIST_HEAD(&policy->walk.all);
+ INIT_HLIST_NODE(&policy->bydst_inexact_list);
INIT_HLIST_NODE(&policy->bydst);
INIT_HLIST_NODE(&policy->byidx);
rwlock_init(&policy->lock);
@@ -365,7 +482,7 @@ static struct hlist_head *policy_hash_bysel(struct net *net,
hash = __sel_hash(sel, family, hmask, dbits, sbits);
if (hash == hmask + 1)
- return &net->xfrm.policy_inexact[dir];
+ return NULL;
return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
@@ -563,6 +680,533 @@ static void xfrm_hash_resize(struct work_struct *work)
mutex_unlock(&hash_resize_mutex);
}
+static void xfrm_hash_reset_inexact_table(struct net *net)
+{
+ struct xfrm_pol_inexact_bin *b;
+
+ lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
+
+ list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins)
+ INIT_HLIST_HEAD(&b->hhead);
+}
+
+/* Make sure *pol can be inserted into fastbin.
+ * Useful to check that later insert requests will be sucessful
+ * (provided xfrm_policy_lock is held throughout).
+ */
+static struct xfrm_pol_inexact_bin *
+xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
+{
+ struct xfrm_pol_inexact_bin *bin, *prev;
+ struct xfrm_pol_inexact_key k = {
+ .family = pol->family,
+ .type = pol->type,
+ .dir = dir,
+ .if_id = pol->if_id,
+ };
+ struct net *net = xp_net(pol);
+
+ lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
+
+ write_pnet(&k.net, net);
+ bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
+ xfrm_pol_inexact_params);
+ if (bin)
+ return bin;
+
+ bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
+ if (!bin)
+ return NULL;
+
+ bin->k = k;
+ INIT_HLIST_HEAD(&bin->hhead);
+ bin->root_d = RB_ROOT;
+ bin->root_s = RB_ROOT;
+ seqcount_init(&bin->count);
+
+ prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
+ &bin->k, &bin->head,
+ xfrm_pol_inexact_params);
+ if (!prev) {
+ list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
+ return bin;
+ }
+
+ kfree(bin);
+
+ return IS_ERR(prev) ? NULL : prev;
+}
+
+static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
+ int family, u8 prefixlen)
+{
+ if (xfrm_addr_any(addr, family))
+ return true;
+
+ if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
+ return true;
+
+ if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
+ return true;
+
+ return false;
+}
+
+static bool
+xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
+{
+ const xfrm_address_t *addr;
+ bool saddr_any, daddr_any;
+ u8 prefixlen;
+
+ addr = &policy->selector.saddr;
+ prefixlen = policy->selector.prefixlen_s;
+
+ saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
+ policy->family,
+ prefixlen);
+ addr = &policy->selector.daddr;
+ prefixlen = policy->selector.prefixlen_d;
+ daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
+ policy->family,
+ prefixlen);
+ return saddr_any && daddr_any;
+}
+
+static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
+ const xfrm_address_t *addr, u8 prefixlen)
+{
+ node->addr = *addr;
+ node->prefixlen = prefixlen;
+}
+
+static struct xfrm_pol_inexact_node *
+xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
+{
+ struct xfrm_pol_inexact_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (node)
+ xfrm_pol_inexact_node_init(node, addr, prefixlen);
+
+ return node;
+}
+
+static int xfrm_policy_addr_delta(const xfrm_address_t *a,
+ const xfrm_address_t *b,
+ u8 prefixlen, u16 family)
+{
+ unsigned int pdw, pbi;
+ int delta = 0;
+
+ switch (family) {
+ case AF_INET:
+ if (sizeof(long) == 4 && prefixlen == 0)
+ return ntohl(a->a4) - ntohl(b->a4);
+ return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
+ (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
+ case AF_INET6:
+ pdw = prefixlen >> 5;
+ pbi = prefixlen & 0x1f;
+
+ if (pdw) {
+ delta = memcmp(a->a6, b->a6, pdw << 2);
+ if (delta)
+ return delta;
+ }
+ if (pbi) {
+ u32 mask = ~0u << (32 - pbi);
+
+ delta = (ntohl(a->a6[pdw]) & mask) -
+ (ntohl(b->a6[pdw]) & mask);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return delta;
+}
+
+static void xfrm_policy_inexact_list_reinsert(struct net *net,
+ struct xfrm_pol_inexact_node *n,
+ u16 family)
+{
+ unsigned int matched_s, matched_d;
+ struct hlist_node *newpos = NULL;
+ struct xfrm_policy *policy, *p;
+
+ matched_s = 0;
+ matched_d = 0;
+
+ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ bool matches_s, matches_d;
+
+ if (!policy->bydst_reinsert)
+ continue;
+
+ WARN_ON_ONCE(policy->family != family);
+
+ policy->bydst_reinsert = false;
+ hlist_for_each_entry(p, &n->hhead, bydst) {
+ if (policy->priority >= p->priority)
+ newpos = &p->bydst;
+ else
+ break;
+ }
+
+ if (newpos)
+ hlist_add_behind(&policy->bydst, newpos);
+ else
+ hlist_add_head(&policy->bydst, &n->hhead);
+
+ /* paranoia checks follow.
+ * Check that the reinserted policy matches at least
+ * saddr or daddr for current node prefix.
+ *
+ * Matching both is fine, matching saddr in one policy
+ * (but not daddr) and then matching only daddr in another
+ * is a bug.
+ */
+ matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
+ &n->addr,
+ n->prefixlen,
+ family) == 0;
+ matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
+ &n->addr,
+ n->prefixlen,
+ family) == 0;
+ if (matches_s && matches_d)
+ continue;
+
+ WARN_ON_ONCE(!matches_s && !matches_d);
+ if (matches_s)
+ matched_s++;
+ if (matches_d)
+ matched_d++;
+ WARN_ON_ONCE(matched_s && matched_d);
+ }
+}
+
+static void xfrm_policy_inexact_node_reinsert(struct net *net,
+ struct xfrm_pol_inexact_node *n,
+ struct rb_root *new,
+ u16 family)
+{
+ struct rb_node **p, *parent = NULL;
+ struct xfrm_pol_inexact_node *node;
+
+ /* we should not have another subtree here */
+ WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
+
+ p = &new->rb_node;
+ while (*p) {
+ u8 prefixlen;
+ int delta;
+
+ parent = *p;
+ node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
+
+ prefixlen = min(node->prefixlen, n->prefixlen);
+
+ delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
+ prefixlen, family);
+ if (delta < 0) {
+ p = &parent->rb_left;
+ } else if (delta > 0) {
+ p = &parent->rb_right;
+ } else {
+ struct xfrm_policy *tmp;
+
+ hlist_for_each_entry(tmp, &node->hhead, bydst)
+ tmp->bydst_reinsert = true;
+ hlist_for_each_entry(tmp, &n->hhead, bydst)
+ tmp->bydst_reinsert = true;
+
+ INIT_HLIST_HEAD(&node->hhead);
+ xfrm_policy_inexact_list_reinsert(net, node, family);
+
+ if (node->prefixlen == n->prefixlen) {
+ kfree_rcu(n, rcu);
+ return;
+ }
+
+ rb_erase(*p, new);
+ kfree_rcu(n, rcu);
+ n = node;
+ n->prefixlen = prefixlen;
+ *p = new->rb_node;
+ parent = NULL;
+ }
+ }
+
+ rb_link_node_rcu(&n->node, parent, p);
+ rb_insert_color(&n->node, new);
+}
+
+/* merge nodes v and n */
+static void xfrm_policy_inexact_node_merge(struct net *net,
+ struct xfrm_pol_inexact_node *v,
+ struct xfrm_pol_inexact_node *n,
+ u16 family)
+{
+ struct xfrm_pol_inexact_node *node;
+ struct xfrm_policy *tmp;
+ struct rb_node *rnode;
+
+ /* To-be-merged node v has a subtree.
+ *
+ * Dismantle it and insert its nodes to n->root.
+ */
+ while ((rnode = rb_first(&v->root)) != NULL) {
+ node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
+ rb_erase(&node->node, &v->root);
+ xfrm_policy_inexact_node_reinsert(net, node, &n->root,
+ family);
+ }
+
+ hlist_for_each_entry(tmp, &v->hhead, bydst)
+ tmp->bydst_reinsert = true;
+ hlist_for_each_entry(tmp, &n->hhead, bydst)
+ tmp->bydst_reinsert = true;
+
+ INIT_HLIST_HEAD(&n->hhead);
+ xfrm_policy_inexact_list_reinsert(net, n, family);
+}
+
+static struct xfrm_pol_inexact_node *
+xfrm_policy_inexact_insert_node(struct net *net,
+ struct rb_root *root,
+ xfrm_address_t *addr,
+ u16 family, u8 prefixlen, u8 dir)
+{
+ struct xfrm_pol_inexact_node *cached = NULL;
+ struct rb_node **p, *parent = NULL;
+ struct xfrm_pol_inexact_node *node;
+
+ p = &root->rb_node;
+ while (*p) {
+ int delta;
+
+ parent = *p;
+ node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
+
+ delta = xfrm_policy_addr_delta(addr, &node->addr,
+ node->prefixlen,
+ family);
+ if (delta == 0 && prefixlen >= node->prefixlen) {
+ WARN_ON_ONCE(cached); /* ipsec policies got lost */
+ return node;
+ }
+
+ if (delta < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+
+ if (prefixlen < node->prefixlen) {
+ delta = xfrm_policy_addr_delta(addr, &node->addr,
+ prefixlen,
+ family);
+ if (delta)
+ continue;
+
+ /* This node is a subnet of the new prefix. It needs
+ * to be removed and re-inserted with the smaller
+ * prefix and all nodes that are now also covered
+ * by the reduced prefixlen.
+ */
+ rb_erase(&node->node, root);
+
+ if (!cached) {
+ xfrm_pol_inexact_node_init(node, addr,
+ prefixlen);
+ cached = node;
+ } else {
+ /* This node also falls within the new
+ * prefixlen. Merge the to-be-reinserted
+ * node and this one.
+ */
+ xfrm_policy_inexact_node_merge(net, node,
+ cached, family);
+ kfree_rcu(node, rcu);
+ }
+
+ /* restart */
+ p = &root->rb_node;
+ parent = NULL;
+ }
+ }
+
+ node = cached;
+ if (!node) {
+ node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
+ if (!node)
+ return NULL;
+ }
+
+ rb_link_node_rcu(&node->node, parent, p);
+ rb_insert_color(&node->node, root);
+
+ return node;
+}
+
+static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
+{
+ struct xfrm_pol_inexact_node *node;
+ struct rb_node *rn = rb_first(r);
+
+ while (rn) {
+ node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
+
+ xfrm_policy_inexact_gc_tree(&node->root, rm);
+ rn = rb_next(rn);
+
+ if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
+ WARN_ON_ONCE(rm);
+ continue;
+ }
+
+ rb_erase(&node->node, r);
+ kfree_rcu(node, rcu);
+ }
+}
+
+static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
+{
+ write_seqcount_begin(&b->count);
+ xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
+ xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
+ write_seqcount_end(&b->count);
+
+ if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
+ !hlist_empty(&b->hhead)) {
+ WARN_ON_ONCE(net_exit);
+ return;
+ }
+
+ if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
+ xfrm_pol_inexact_params) == 0) {
+ list_del(&b->inexact_bins);
+ kfree_rcu(b, rcu);
+ }
+}
+
+static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
+{
+ struct net *net = read_pnet(&b->k.net);
+
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ __xfrm_policy_inexact_prune_bin(b, false);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+}
+
+static void __xfrm_policy_inexact_flush(struct net *net)
+{
+ struct xfrm_pol_inexact_bin *bin, *t;
+
+ lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
+
+ list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
+ __xfrm_policy_inexact_prune_bin(bin, false);
+}
+
+static struct hlist_head *
+xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
+ struct xfrm_policy *policy, u8 dir)
+{
+ struct xfrm_pol_inexact_node *n;
+ struct net *net;
+
+ net = xp_net(policy);
+ lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
+
+ if (xfrm_policy_inexact_insert_use_any_list(policy))
+ return &bin->hhead;
+
+ if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
+ policy->family,
+ policy->selector.prefixlen_d)) {
+ write_seqcount_begin(&bin->count);
+ n = xfrm_policy_inexact_insert_node(net,
+ &bin->root_s,
+ &policy->selector.saddr,
+ policy->family,
+ policy->selector.prefixlen_s,
+ dir);
+ write_seqcount_end(&bin->count);
+ if (!n)
+ return NULL;
+
+ return &n->hhead;
+ }
+
+ /* daddr is fixed */
+ write_seqcount_begin(&bin->count);
+ n = xfrm_policy_inexact_insert_node(net,
+ &bin->root_d,
+ &policy->selector.daddr,
+ policy->family,
+ policy->selector.prefixlen_d, dir);
+ write_seqcount_end(&bin->count);
+ if (!n)
+ return NULL;
+
+ /* saddr is wildcard */
+ if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
+ policy->family,
+ policy->selector.prefixlen_s))
+ return &n->hhead;
+
+ write_seqcount_begin(&bin->count);
+ n = xfrm_policy_inexact_insert_node(net,
+ &n->root,
+ &policy->selector.saddr,
+ policy->family,
+ policy->selector.prefixlen_s, dir);
+ write_seqcount_end(&bin->count);
+ if (!n)
+ return NULL;
+
+ return &n->hhead;
+}
+
+static struct xfrm_policy *
+xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
+{
+ struct xfrm_pol_inexact_bin *bin;
+ struct xfrm_policy *delpol;
+ struct hlist_head *chain;
+ struct net *net;
+
+ bin = xfrm_policy_inexact_alloc_bin(policy, dir);
+ if (!bin)
+ return ERR_PTR(-ENOMEM);
+
+ net = xp_net(policy);
+ lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
+
+ chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
+ if (!chain) {
+ __xfrm_policy_inexact_prune_bin(bin, false);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ delpol = xfrm_policy_insert_list(chain, policy, excl);
+ if (delpol && excl) {
+ __xfrm_policy_inexact_prune_bin(bin, false);
+ return ERR_PTR(-EEXIST);
+ }
+
+ chain = &net->xfrm.policy_inexact[dir];
+ xfrm_policy_insert_inexact_list(chain, policy);
+
+ if (delpol)
+ __xfrm_policy_inexact_prune_bin(bin, false);
+
+ return delpol;
+}
+
static void xfrm_hash_rebuild(struct work_struct *work)
{
struct net *net = container_of(work, struct net,
@@ -592,7 +1236,50 @@ static void xfrm_hash_rebuild(struct work_struct *work)
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ /* make sure that we can insert the indirect policies again before
+ * we start with destructive action.
+ */
+ list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
+ struct xfrm_pol_inexact_bin *bin;
+ u8 dbits, sbits;
+
+ dir = xfrm_policy_id2dir(policy->index);
+ if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
+ continue;
+
+ if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
+ if (policy->family == AF_INET) {
+ dbits = rbits4;
+ sbits = lbits4;
+ } else {
+ dbits = rbits6;
+ sbits = lbits6;
+ }
+ } else {
+ if (policy->family == AF_INET) {
+ dbits = lbits4;
+ sbits = rbits4;
+ } else {
+ dbits = lbits6;
+ sbits = rbits6;
+ }
+ }
+
+ if (policy->selector.prefixlen_d < dbits ||
+ policy->selector.prefixlen_s < sbits)
+ continue;
+
+ bin = xfrm_policy_inexact_alloc_bin(policy, dir);
+ if (!bin)
+ goto out_unlock;
+
+ if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
+ goto out_unlock;
+ }
+
/* reset the bydst and inexact table in all directions */
+ xfrm_hash_reset_inexact_table(net);
+
for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
hmask = net->xfrm.policy_bydst[dir].hmask;
@@ -616,15 +1303,23 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
- if (policy->walk.dead ||
- xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+ if (policy->walk.dead)
+ continue;
+ dir = xfrm_policy_id2dir(policy->index);
+ if (dir >= XFRM_POLICY_MAX) {
/* skip socket policies */
continue;
}
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
- policy->family,
- xfrm_policy_id2dir(policy->index));
+ policy->family, dir);
+ if (!chain) {
+ void *p = xfrm_policy_inexact_insert(policy, dir, 0);
+
+ WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
+ continue;
+ }
+
hlist_for_each_entry(pol, chain, bydst) {
if (policy->priority >= pol->priority)
newpos = &pol->bydst;
@@ -637,6 +1332,8 @@ static void xfrm_hash_rebuild(struct work_struct *work)
hlist_add_head_rcu(&policy->bydst, chain);
}
+out_unlock:
+ __xfrm_policy_inexact_flush(net);
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
mutex_unlock(&hash_resize_mutex);
@@ -740,18 +1437,97 @@ static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
return false;
}
-int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
+static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
{
- struct net *net = xp_net(policy);
- struct xfrm_policy *pol;
- struct xfrm_policy *delpol;
- struct hlist_head *chain;
- struct hlist_node *newpos;
+ const struct xfrm_pol_inexact_key *k = data;
+ u32 a = k->type << 24 | k->dir << 16 | k->family;
+
+ return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
+ seed);
+}
+
+static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
+{
+ const struct xfrm_pol_inexact_bin *b = data;
+
+ return xfrm_pol_bin_key(&b->k, 0, seed);
+}
+
+static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct xfrm_pol_inexact_key *key = arg->key;
+ const struct xfrm_pol_inexact_bin *b = ptr;
+ int ret;
+
+ if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
+ return -1;
+
+ ret = b->k.dir ^ key->dir;
+ if (ret)
+ return ret;
+
+ ret = b->k.type ^ key->type;
+ if (ret)
+ return ret;
+
+ ret = b->k.family ^ key->family;
+ if (ret)
+ return ret;
+
+ return b->k.if_id ^ key->if_id;
+}
+
+static const struct rhashtable_params xfrm_pol_inexact_params = {
+ .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
+ .hashfn = xfrm_pol_bin_key,
+ .obj_hashfn = xfrm_pol_bin_obj,
+ .obj_cmpfn = xfrm_pol_bin_cmp,
+ .automatic_shrinking = true,
+};
+
+static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
+ struct xfrm_policy *policy)
+{
+ struct xfrm_policy *pol, *delpol = NULL;
+ struct hlist_node *newpos = NULL;
+ int i = 0;
+
+ hlist_for_each_entry(pol, chain, bydst_inexact_list) {
+ if (pol->type == policy->type &&
+ pol->if_id == policy->if_id &&
+ !selector_cmp(&pol->selector, &policy->selector) &&
+ xfrm_policy_mark_match(policy, pol) &&
+ xfrm_sec_ctx_match(pol->security, policy->security) &&
+ !WARN_ON(delpol)) {
+ delpol = pol;
+ if (policy->priority > pol->priority)
+ continue;
+ } else if (policy->priority >= pol->priority) {
+ newpos = &pol->bydst_inexact_list;
+ continue;
+ }
+ if (delpol)
+ break;
+ }
+
+ if (newpos)
+ hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
+ else
+ hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
+
+ hlist_for_each_entry(pol, chain, bydst_inexact_list) {
+ pol->pos = i;
+ i++;
+ }
+}
+
+static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
+ struct xfrm_policy *policy,
+ bool excl)
+{
+ struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
- spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
- delpol = NULL;
- newpos = NULL;
hlist_for_each_entry(pol, chain, bydst) {
if (pol->type == policy->type &&
pol->if_id == policy->if_id &&
@@ -759,24 +1535,45 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_policy_mark_match(policy, pol) &&
xfrm_sec_ctx_match(pol->security, policy->security) &&
!WARN_ON(delpol)) {
- if (excl) {
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- return -EEXIST;
- }
+ if (excl)
+ return ERR_PTR(-EEXIST);
delpol = pol;
if (policy->priority > pol->priority)
continue;
} else if (policy->priority >= pol->priority) {
- newpos = &pol->bydst;
+ newpos = pol;
continue;
}
if (delpol)
break;
}
+
if (newpos)
- hlist_add_behind_rcu(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
else
hlist_add_head_rcu(&policy->bydst, chain);
+
+ return delpol;
+}
+
+int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
+{
+ struct net *net = xp_net(policy);
+ struct xfrm_policy *delpol;
+ struct hlist_head *chain;
+
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
+ if (chain)
+ delpol = xfrm_policy_insert_list(chain, policy, excl);
+ else
+ delpol = xfrm_policy_inexact_insert(policy, dir, excl);
+
+ if (IS_ERR(delpol)) {
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ return PTR_ERR(delpol);
+ }
+
__xfrm_policy_link(policy, dir);
/* After previous checking, family can either be AF_INET or AF_INET6 */
@@ -806,43 +1603,96 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
}
EXPORT_SYMBOL(xfrm_policy_insert);
+static struct xfrm_policy *
+__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
+ u8 type, int dir,
+ struct xfrm_selector *sel,
+ struct xfrm_sec_ctx *ctx)
+{
+ struct xfrm_policy *pol;
+
+ if (!chain)
+ return NULL;
+
+ hlist_for_each_entry(pol, chain, bydst) {
+ if (pol->type == type &&
+ pol->if_id == if_id &&
+ (mark & pol->mark.m) == pol->mark.v &&
+ !selector_cmp(sel, &pol->selector) &&
+ xfrm_sec_ctx_match(ctx, pol->security))
+ return pol;
+ }
+
+ return NULL;
+}
+
struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err)
{
- struct xfrm_policy *pol, *ret;
+ struct xfrm_pol_inexact_bin *bin = NULL;
+ struct xfrm_policy *pol, *ret = NULL;
struct hlist_head *chain;
*err = 0;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
chain = policy_hash_bysel(net, sel, sel->family, dir);
- ret = NULL;
- hlist_for_each_entry(pol, chain, bydst) {
- if (pol->type == type &&
- pol->if_id == if_id &&
- (mark & pol->mark.m) == pol->mark.v &&
- !selector_cmp(sel, &pol->selector) &&
- xfrm_sec_ctx_match(ctx, pol->security)) {
- xfrm_pol_hold(pol);
- if (delete) {
- *err = security_xfrm_policy_delete(
- pol->security);
- if (*err) {
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- return pol;
- }
- __xfrm_policy_unlink(pol, dir);
+ if (!chain) {
+ struct xfrm_pol_inexact_candidates cand;
+ int i;
+
+ bin = xfrm_policy_inexact_lookup(net, type,
+ sel->family, dir, if_id);
+ if (!bin) {
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ return NULL;
+ }
+
+ if (!xfrm_policy_find_inexact_candidates(&cand, bin,
+ &sel->saddr,
+ &sel->daddr)) {
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ return NULL;
+ }
+
+ pol = NULL;
+ for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
+ struct xfrm_policy *tmp;
+
+ tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
+ if_id, type, dir,
+ sel, ctx);
+ if (!tmp)
+ continue;
+
+ if (!pol || tmp->pos < pol->pos)
+ pol = tmp;
+ }
+ } else {
+ pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
+ sel, ctx);
+ }
+
+ if (pol) {
+ xfrm_pol_hold(pol);
+ if (delete) {
+ *err = security_xfrm_policy_delete(pol->security);
+ if (*err) {
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ return pol;
}
- ret = pol;
- break;
+ __xfrm_policy_unlink(pol, dir);
}
+ ret = pol;
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
if (ret && delete)
xfrm_policy_kill(ret);
+ if (bin && delete)
+ xfrm_policy_inexact_prune_bin(bin);
return ret;
}
EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
@@ -892,36 +1742,19 @@ EXPORT_SYMBOL(xfrm_policy_byid);
static inline int
xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
{
- int dir, err = 0;
+ struct xfrm_policy *pol;
+ int err = 0;
- for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
- struct xfrm_policy *pol;
- int i;
+ list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ if (pol->walk.dead ||
+ xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
+ pol->type != type)
+ continue;
- hlist_for_each_entry(pol,
- &net->xfrm.policy_inexact[dir], bydst) {
- if (pol->type != type)
- continue;
- err = security_xfrm_policy_delete(pol->security);
- if (err) {
- xfrm_audit_policy_delete(pol, 0, task_valid);
- return err;
- }
- }
- for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
- hlist_for_each_entry(pol,
- net->xfrm.policy_bydst[dir].table + i,
- bydst) {
- if (pol->type != type)
- continue;
- err = security_xfrm_policy_delete(
- pol->security);
- if (err) {
- xfrm_audit_policy_delete(pol, 0,
- task_valid);
- return err;
- }
- }
+ err = security_xfrm_policy_delete(pol->security);
+ if (err) {
+ xfrm_audit_policy_delete(pol, 0, task_valid);
+ return err;
}
}
return err;
@@ -937,6 +1770,7 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
{
int dir, err = 0, cnt = 0;
+ struct xfrm_policy *pol;
spin_lock_bh(&net->xfrm.xfrm_policy_lock);
@@ -944,48 +1778,25 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
if (err)
goto out;
- for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
- struct xfrm_policy *pol;
- int i;
-
- again1:
- hlist_for_each_entry(pol,
- &net->xfrm.policy_inexact[dir], bydst) {
- if (pol->type != type)
- continue;
- __xfrm_policy_unlink(pol, dir);
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- cnt++;
-
- xfrm_audit_policy_delete(pol, 1, task_valid);
-
- xfrm_policy_kill(pol);
-
- spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- goto again1;
- }
-
- for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
- again2:
- hlist_for_each_entry(pol,
- net->xfrm.policy_bydst[dir].table + i,
- bydst) {
- if (pol->type != type)
- continue;
- __xfrm_policy_unlink(pol, dir);
- spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
- cnt++;
-
- xfrm_audit_policy_delete(pol, 1, task_valid);
- xfrm_policy_kill(pol);
-
- spin_lock_bh(&net->xfrm.xfrm_policy_lock);
- goto again2;
- }
- }
+again:
+ list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+ dir = xfrm_policy_id2dir(pol->index);
+ if (pol->walk.dead ||
+ dir >= XFRM_POLICY_MAX ||
+ pol->type != type)
+ continue;
+ __xfrm_policy_unlink(pol, dir);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+ cnt++;
+ xfrm_audit_policy_delete(pol, 1, task_valid);
+ xfrm_policy_kill(pol);
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ goto again;
}
- if (!cnt)
+ if (cnt)
+ __xfrm_policy_inexact_flush(net);
+ else
err = -ESRCH;
out:
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -1084,21 +1895,188 @@ static int xfrm_policy_match(const struct xfrm_policy *pol,
if (match)
ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
dir);
-
return ret;
}
+static struct xfrm_pol_inexact_node *
+xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
+ seqcount_t *count,
+ const xfrm_address_t *addr, u16 family)
+{
+ const struct rb_node *parent;
+ int seq;
+
+again:
+ seq = read_seqcount_begin(count);
+
+ parent = rcu_dereference_raw(r->rb_node);
+ while (parent) {
+ struct xfrm_pol_inexact_node *node;
+ int delta;
+
+ node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
+
+ delta = xfrm_policy_addr_delta(addr, &node->addr,
+ node->prefixlen, family);
+ if (delta < 0) {
+ parent = rcu_dereference_raw(parent->rb_left);
+ continue;
+ } else if (delta > 0) {
+ parent = rcu_dereference_raw(parent->rb_right);
+ continue;
+ }
+
+ return node;
+ }
+
+ if (read_seqcount_retry(count, seq))
+ goto again;
+
+ return NULL;
+}
+
+static bool
+xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
+ struct xfrm_pol_inexact_bin *b,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr)
+{
+ struct xfrm_pol_inexact_node *n;
+ u16 family;
+
+ if (!b)
+ return false;
+
+ family = b->k.family;
+ memset(cand, 0, sizeof(*cand));
+ cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
+
+ n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
+ family);
+ if (n) {
+ cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
+ n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
+ family);
+ if (n)
+ cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
+ }
+
+ n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
+ family);
+ if (n)
+ cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
+
+ return true;
+}
+
+static struct xfrm_pol_inexact_bin *
+xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
+ u8 dir, u32 if_id)
+{
+ struct xfrm_pol_inexact_key k = {
+ .family = family,
+ .type = type,
+ .dir = dir,
+ .if_id = if_id,
+ };
+
+ write_pnet(&k.net, net);
+
+ return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
+ xfrm_pol_inexact_params);
+}
+
+static struct xfrm_pol_inexact_bin *
+xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
+ u8 dir, u32 if_id)
+{
+ struct xfrm_pol_inexact_bin *bin;
+
+ lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
+
+ rcu_read_lock();
+ bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
+ rcu_read_unlock();
+
+ return bin;
+}
+
+static struct xfrm_policy *
+__xfrm_policy_eval_candidates(struct hlist_head *chain,
+ struct xfrm_policy *prefer,
+ const struct flowi *fl,
+ u8 type, u16 family, int dir, u32 if_id)
+{
+ u32 priority = prefer ? prefer->priority : ~0u;
+ struct xfrm_policy *pol;
+
+ if (!chain)
+ return NULL;
+
+ hlist_for_each_entry_rcu(pol, chain, bydst) {
+ int err;
+
+ if (pol->priority > priority)
+ break;
+
+ err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
+ if (err) {
+ if (err != -ESRCH)
+ return ERR_PTR(err);
+
+ continue;
+ }
+
+ if (prefer) {
+ /* matches. Is it older than *prefer? */
+ if (pol->priority == priority &&
+ prefer->pos < pol->pos)
+ return prefer;
+ }
+
+ return pol;
+ }
+
+ return NULL;
+}
+
+static struct xfrm_policy *
+xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
+ struct xfrm_policy *prefer,
+ const struct flowi *fl,
+ u8 type, u16 family, int dir, u32 if_id)
+{
+ struct xfrm_policy *tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
+ tmp = __xfrm_policy_eval_candidates(cand->res[i],
+ prefer,
+ fl, type, family, dir,
+ if_id);
+ if (!tmp)
+ continue;
+
+ if (IS_ERR(tmp))
+ return tmp;
+ prefer = tmp;
+ }
+
+ return prefer;
+}
+
static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
const struct flowi *fl,
u16 family, u8 dir,
u32 if_id)
{
- int err;
- struct xfrm_policy *pol, *ret;
+ struct xfrm_pol_inexact_candidates cand;
const xfrm_address_t *daddr, *saddr;
+ struct xfrm_pol_inexact_bin *bin;
+ struct xfrm_policy *pol, *ret;
struct hlist_head *chain;
unsigned int sequence;
- u32 priority;
+ int err;
daddr = xfrm_flowi_daddr(fl, family);
saddr = xfrm_flowi_saddr(fl, family);
@@ -1112,7 +2090,6 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
chain = policy_hash_direct(net, daddr, saddr, family, dir);
} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
- priority = ~0U;
ret = NULL;
hlist_for_each_entry_rcu(pol, chain, bydst) {
err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
@@ -1125,29 +2102,23 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
}
} else {
ret = pol;
- priority = ret->priority;
break;
}
}
- chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry_rcu(pol, chain, bydst) {
- if ((pol->priority >= priority) && ret)
- break;
+ bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
+ if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
+ daddr))
+ goto skip_inexact;
- err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
- if (err) {
- if (err == -ESRCH)
- continue;
- else {
- ret = ERR_PTR(err);
- goto fail;
- }
- } else {
- ret = pol;
- break;
- }
+ pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
+ family, dir, if_id);
+ if (pol) {
+ ret = pol;
+ if (IS_ERR(pol))
+ goto fail;
}
+skip_inexact:
if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
goto retry;
@@ -1239,6 +2210,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
/* Socket policies are not hashed. */
if (!hlist_unhashed(&pol->bydst)) {
hlist_del_rcu(&pol->bydst);
+ hlist_del_init(&pol->bydst_inexact_list);
hlist_del(&pol->byidx);
}
@@ -1811,7 +2783,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
pq->timeout = pq->timeout << 1;
if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
xfrm_pol_hold(pol);
- goto out;
+ goto out;
}
dst_release(dst);
@@ -2225,11 +3197,12 @@ EXPORT_SYMBOL(xfrm_lookup_route);
static inline int
xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
{
+ struct sec_path *sp = skb_sec_path(skb);
struct xfrm_state *x;
- if (!skb->sp || idx < 0 || idx >= skb->sp->len)
+ if (!sp || idx < 0 || idx >= sp->len)
return 0;
- x = skb->sp->xvec[idx];
+ x = sp->xvec[idx];
if (!x->type->reject)
return 0;
return x->type->reject(x, skb, fl);
@@ -2329,6 +3302,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
struct flowi fl;
int xerr_idx = -1;
const struct xfrm_if_cb *ifcb;
+ struct sec_path *sp;
struct xfrm_if *xi;
u32 if_id = 0;
@@ -2353,11 +3327,12 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
nf_nat_decode_session(skb, &fl, family);
/* First, check used SA against their selectors. */
- if (skb->sp) {
+ sp = skb_sec_path(skb);
+ if (sp) {
int i;
- for (i = skb->sp->len-1; i >= 0; i--) {
- struct xfrm_state *x = skb->sp->xvec[i];
+ for (i = sp->len - 1; i >= 0; i--) {
+ struct xfrm_state *x = sp->xvec[i];
if (!xfrm_selector_match(&x->sel, &fl, family)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
return 0;
@@ -2384,7 +3359,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
}
if (!pol) {
- if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
+ if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
xfrm_secpath_reject(xerr_idx, skb, &fl);
XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
return 0;
@@ -2413,7 +3388,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
#endif
if (pol->action == XFRM_POLICY_ALLOW) {
- struct sec_path *sp;
static struct sec_path dummy;
struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
@@ -2421,7 +3395,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
int ti = 0;
int i, k;
- if ((sp = skb->sp) == NULL)
+ sp = skb_sec_path(skb);
+ if (!sp)
sp = &dummy;
for (pi = 0; pi < npols; pi++) {
@@ -2816,13 +3791,17 @@ static void xfrm_statistics_fini(struct net *net)
static int __net_init xfrm_policy_init(struct net *net)
{
unsigned int hmask, sz;
- int dir;
+ int dir, err;
- if (net_eq(net, &init_net))
+ if (net_eq(net, &init_net)) {
xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
sizeof(struct xfrm_dst),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
+ err = rhashtable_init(&xfrm_policy_inexact_table,
+ &xfrm_pol_inexact_params);
+ BUG_ON(err);
+ }
hmask = 8 - 1;
sz = (hmask+1) * sizeof(struct hlist_head);
@@ -2857,6 +3836,7 @@ static int __net_init xfrm_policy_init(struct net *net)
seqlock_init(&net->xfrm.policy_hthresh.lock);
INIT_LIST_HEAD(&net->xfrm.policy_all);
+ INIT_LIST_HEAD(&net->xfrm.inexact_bins);
INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
return 0;
@@ -2875,6 +3855,7 @@ out_byidx:
static void xfrm_policy_fini(struct net *net)
{
+ struct xfrm_pol_inexact_bin *b, *t;
unsigned int sz;
int dir;
@@ -2900,6 +3881,11 @@ static void xfrm_policy_fini(struct net *net)
sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
xfrm_hash_free(net->xfrm.policy_byidx, sz);
+
+ spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+ list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
+ __xfrm_policy_inexact_prune_bin(b, true);
+ spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
}
static int __net_init xfrm_net_init(struct net *net)
@@ -3065,7 +4051,7 @@ static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *
}
}
chain = &net->xfrm.policy_inexact[dir];
- hlist_for_each_entry(pol, chain, bydst) {
+ hlist_for_each_entry(pol, chain, bydst_inexact_list) {
if ((pol->priority >= priority) && ret)
break;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dc4a9f1fb941..23c92891758a 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -426,6 +426,12 @@ static void xfrm_put_mode(struct xfrm_mode *mode)
module_put(mode->owner);
}
+void xfrm_state_free(struct xfrm_state *x)
+{
+ kmem_cache_free(xfrm_state_cache, x);
+}
+EXPORT_SYMBOL(xfrm_state_free);
+
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
tasklet_hrtimer_cancel(&x->mtimer);
@@ -452,7 +458,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x)
}
xfrm_dev_state_free(x);
security_xfrm_state_free(x);
- kmem_cache_free(xfrm_state_cache, x);
+ xfrm_state_free(x);
}
static void xfrm_state_gc_task(struct work_struct *work)
@@ -788,7 +794,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&net->xfrm.xfrm_state_lock);
si->sadcnt = net->xfrm.state_num;
- si->sadhcnt = net->xfrm.state_hmask;
+ si->sadhcnt = net->xfrm.state_hmask + 1;
si->sadhmcnt = xfrm_state_hashmax;
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c9a84e22f5d5..277c1c46fe94 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2288,13 +2288,13 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
}
- kfree(x);
+ xfrm_state_free(x);
kfree(xp);
return 0;
free_state:
- kfree(x);
+ xfrm_state_free(x);
nomem:
return err;
}
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index be0a961450bc..35444f4a846b 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -208,12 +208,20 @@ endif
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
+BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
+ $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
+ readelf -S ./llvm_btf_verify.o | grep BTF; \
+ /bin/rm -f ./llvm_btf_verify.o)
+ifneq ($(BTF_LLVM_PROBE),)
+ EXTRA_CFLAGS += -g
+else
ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
EXTRA_CFLAGS += -g
LLC_FLAGS += -mattr=dwarfris
DWARF2BTF = y
endif
+endif
# Trick to allow make to be run from this directory
all:
diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c
index e6d7e0fe155b..eae7b635343d 100644
--- a/samples/bpf/bpf_load.c
+++ b/samples/bpf/bpf_load.c
@@ -54,6 +54,25 @@ static int populate_prog_array(const char *event, int prog_fd)
return 0;
}
+static int write_kprobe_events(const char *val)
+{
+ int fd, ret, flags;
+
+ if (val == NULL)
+ return -1;
+ else if (val[0] == '\0')
+ flags = O_WRONLY | O_TRUNC;
+ else
+ flags = O_WRONLY | O_APPEND;
+
+ fd = open("/sys/kernel/debug/tracing/kprobe_events", flags);
+
+ ret = write(fd, val, strlen(val));
+ close(fd);
+
+ return ret;
+}
+
static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
{
bool is_socket = strncmp(event, "socket", 6) == 0;
@@ -165,10 +184,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
#ifdef __x86_64__
if (strncmp(event, "sys_", 4) == 0) {
- snprintf(buf, sizeof(buf),
- "echo '%c:__x64_%s __x64_%s' >> /sys/kernel/debug/tracing/kprobe_events",
- is_kprobe ? 'p' : 'r', event, event);
- err = system(buf);
+ snprintf(buf, sizeof(buf), "%c:__x64_%s __x64_%s",
+ is_kprobe ? 'p' : 'r', event, event);
+ err = write_kprobe_events(buf);
if (err >= 0) {
need_normal_check = false;
event_prefix = "__x64_";
@@ -176,10 +194,9 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
}
#endif
if (need_normal_check) {
- snprintf(buf, sizeof(buf),
- "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
- is_kprobe ? 'p' : 'r', event, event);
- err = system(buf);
+ snprintf(buf, sizeof(buf), "%c:%s %s",
+ is_kprobe ? 'p' : 'r', event, event);
+ err = write_kprobe_events(buf);
if (err < 0) {
printf("failed to create kprobe '%s' error '%s'\n",
event, strerror(errno));
@@ -284,8 +301,8 @@ static int load_maps(struct bpf_map_data *maps, int nr_maps,
numa_node);
}
if (map_fd[i] < 0) {
- printf("failed to create a map: %d %s\n",
- errno, strerror(errno));
+ printf("failed to create map %d (%s): %d %s\n",
+ i, maps[i].name, errno, strerror(errno));
return 1;
}
maps[i].fd = map_fd[i];
@@ -519,7 +536,7 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
return 1;
/* clear all kprobes */
- i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
+ i = write_kprobe_events("");
/* scan over all elf sections to get license and map info */
for (i = 1; i < ehdr.e_shnum; i++) {
diff --git a/samples/bpf/xdp1_user.c b/samples/bpf/xdp1_user.c
index b02c531510ed..0a197f86ac43 100644
--- a/samples/bpf/xdp1_user.c
+++ b/samples/bpf/xdp1_user.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include <libgen.h>
#include <sys/resource.h>
+#include <net/if.h>
#include "bpf_util.h"
#include "bpf/bpf.h"
@@ -34,26 +35,24 @@ static void int_exit(int sig)
static void poll_stats(int map_fd, int interval)
{
unsigned int nr_cpus = bpf_num_possible_cpus();
- const unsigned int nr_keys = 256;
- __u64 values[nr_cpus], prev[nr_keys][nr_cpus];
- __u32 key;
+ __u64 values[nr_cpus], prev[UINT8_MAX] = { 0 };
int i;
- memset(prev, 0, sizeof(prev));
-
while (1) {
+ __u32 key = UINT32_MAX;
+
sleep(interval);
- for (key = 0; key < nr_keys; key++) {
+ while (bpf_map_get_next_key(map_fd, &key, &key) != -1) {
__u64 sum = 0;
assert(bpf_map_lookup_elem(map_fd, &key, values) == 0);
for (i = 0; i < nr_cpus; i++)
- sum += (values[i] - prev[key][i]);
- if (sum)
+ sum += values[i];
+ if (sum > prev[key])
printf("proto %u: %10llu pkt/s\n",
- key, sum / interval);
- memcpy(prev[key], values, sizeof(values));
+ key, (sum - prev[key]) / interval);
+ prev[key] = sum;
}
}
}
@@ -61,7 +60,7 @@ static void poll_stats(int map_fd, int interval)
static void usage(const char *prog)
{
fprintf(stderr,
- "usage: %s [OPTS] IFINDEX\n\n"
+ "usage: %s [OPTS] IFACE\n\n"
"OPTS:\n"
" -S use skb-mode\n"
" -N enforce native mode\n",
@@ -104,7 +103,11 @@ int main(int argc, char **argv)
return 1;
}
- ifindex = strtoul(argv[optind], NULL, 0);
+ ifindex = if_nametoindex(argv[1]);
+ if (!ifindex) {
+ perror("if_nametoindex");
+ return 1;
+ }
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
prog_load_attr.file = filename;
diff --git a/samples/v4l/v4l2-pci-skeleton.c b/samples/v4l/v4l2-pci-skeleton.c
index f520e3aef9c6..27ec30952cfa 100644
--- a/samples/v4l/v4l2-pci-skeleton.c
+++ b/samples/v4l/v4l2-pci-skeleton.c
@@ -80,13 +80,13 @@ struct skeleton {
};
struct skel_buffer {
- struct vb2_buffer vb;
+ struct vb2_v4l2_buffer vb;
struct list_head list;
};
-static inline struct skel_buffer *to_skel_buffer(struct vb2_buffer *vb2)
+static inline struct skel_buffer *to_skel_buffer(struct vb2_v4l2_buffer *vbuf)
{
- return container_of(vb2, struct skel_buffer, vb);
+ return container_of(vbuf, struct skel_buffer, vb);
}
static const struct pci_device_id skeleton_pci_tbl[] = {
@@ -212,8 +212,9 @@ static int buffer_prepare(struct vb2_buffer *vb)
*/
static void buffer_queue(struct vb2_buffer *vb)
{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct skeleton *skel = vb2_get_drv_priv(vb->vb2_queue);
- struct skel_buffer *buf = to_skel_buffer(vb);
+ struct skel_buffer *buf = to_skel_buffer(vbuf);
unsigned long flags;
spin_lock_irqsave(&skel->qlock, flags);
@@ -232,7 +233,7 @@ static void return_all_buffers(struct skeleton *skel,
spin_lock_irqsave(&skel->qlock, flags);
list_for_each_entry_safe(buf, node, &skel->buf_list, list) {
- vb2_buffer_done(&buf->vb, state);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
list_del(&buf->list);
}
spin_unlock_irqrestore(&skel->qlock, flags);
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index bb015551c2d9..3d09844405c9 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -115,9 +115,7 @@ __cc-option = $(call try-run,\
# Do not attempt to build with gcc plugins during cc-option tests.
# (And this uses delayed resolution so the flags will be up to date.)
-# In addition, do not include the asm macros which are built later.
-CC_OPTION_FILTERED = $(GCC_PLUGINS_CFLAGS) $(ASM_MACRO_FLAGS)
-CC_OPTION_CFLAGS = $(filter-out $(CC_OPTION_FILTERED),$(KBUILD_CFLAGS))
+CC_OPTION_CFLAGS = $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index a8e7ba9f73e8..6a6be9f440cf 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -236,10 +236,8 @@ ifdef CONFIG_GCOV_KERNEL
objtool_args += --no-unreachable
endif
ifdef CONFIG_RETPOLINE
-ifneq ($(RETPOLINE_CFLAGS),)
objtool_args += --retpoline
endif
-endif
ifdef CONFIG_MODVERSIONS
diff --git a/scripts/Makefile.gcc-plugins b/scripts/Makefile.gcc-plugins
index 46c5c6809806..048179d8c07f 100644
--- a/scripts/Makefile.gcc-plugins
+++ b/scripts/Makefile.gcc-plugins
@@ -36,6 +36,12 @@ ifdef CONFIG_GCC_PLUGIN_STACKLEAK
endif
export DISABLE_STACKLEAK_PLUGIN
+gcc-plugin-$(CONFIG_GCC_PLUGIN_ARM_SSP_PER_TASK) += arm_ssp_per_task_plugin.so
+ifdef CONFIG_GCC_PLUGIN_ARM_SSP_PER_TASK
+ DISABLE_ARM_SSP_PER_TASK_PLUGIN += -fplugin-arg-arm_ssp_per_task_plugin-disable
+endif
+export DISABLE_ARM_SSP_PER_TASK_PLUGIN
+
# All the plugin CFLAGS are collected here in case a build target needs to
# filter them out of the KBUILD_CFLAGS.
GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index c883ec55654f..377f373db6c0 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -573,6 +573,27 @@ foreach my $entry (@mode_permission_funcs) {
}
$mode_perms_search = "(?:${mode_perms_search})";
+our %deprecated_apis = (
+ "synchronize_rcu_bh" => "synchronize_rcu",
+ "synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited",
+ "call_rcu_bh" => "call_rcu",
+ "rcu_barrier_bh" => "rcu_barrier",
+ "synchronize_sched" => "synchronize_rcu",
+ "synchronize_sched_expedited" => "synchronize_rcu_expedited",
+ "call_rcu_sched" => "call_rcu",
+ "rcu_barrier_sched" => "rcu_barrier",
+ "get_state_synchronize_sched" => "get_state_synchronize_rcu",
+ "cond_synchronize_sched" => "cond_synchronize_rcu",
+);
+
+#Create a search pattern for all these strings to speed up a loop below
+our $deprecated_apis_search = "";
+foreach my $entry (keys %deprecated_apis) {
+ $deprecated_apis_search .= '|' if ($deprecated_apis_search ne "");
+ $deprecated_apis_search .= $entry;
+}
+$deprecated_apis_search = "(?:${deprecated_apis_search})";
+
our $mode_perms_world_writable = qr{
S_IWUGO |
S_IWOTH |
@@ -6368,6 +6389,20 @@ sub process {
"please use device_initcall() or more appropriate function instead of __initcall() (see include/linux/init.h)\n" . $herecurr);
}
+# check for spin_is_locked(), suggest lockdep instead
+ if ($line =~ /\bspin_is_locked\(/) {
+ WARN("USE_LOCKDEP",
+ "Where possible, use lockdep_assert_held instead of assertions based on spin_is_locked\n" . $herecurr);
+ }
+
+# check for deprecated apis
+ if ($line =~ /\b($deprecated_apis_search)\b\s*\(/) {
+ my $deprecated_api = $1;
+ my $new_api = $deprecated_apis{$deprecated_api};
+ WARN("DEPRECATED_API",
+ "Deprecated use of '$deprecated_api', prefer '$new_api' instead\n" . $herecurr);
+ }
+
# check for various structs that are normally const (ops, kgdb, device_tree)
# and avoid what seem like struct definitions 'struct foo {'
if ($line !~ /\bconst\b/ &&
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 8081b6cf67d2..34414c6efad6 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -47,8 +47,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
if ($arch eq 'aarch64') {
- #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
- $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+ #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
+ $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/checksyscalls.sh b/scripts/checksyscalls.sh
index ee3dfb5be6cd..cf931003395f 100755
--- a/scripts/checksyscalls.sh
+++ b/scripts/checksyscalls.sh
@@ -150,6 +150,7 @@ cat << EOF
#define __IGNORE_uselib
#define __IGNORE__sysctl
#define __IGNORE_arch_prctl
+#define __IGNORE_nfsservctl
/* ... including the "new" 32-bit uid syscalls */
#define __IGNORE_lchown32
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
deleted file mode 100644
index 3a09c97ad87d..000000000000
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ /dev/null
@@ -1,78 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-///
-/// Use drm_*_get() and drm_*_put() helpers instead of drm_*_reference() and
-/// drm_*_unreference() helpers.
-///
-// Confidence: High
-// Copyright: (C) 2017 NVIDIA Corporation
-// Options: --no-includes --include-headers
-//
-
-virtual patch
-virtual report
-
-@depends on patch@
-expression object;
-@@
-
-(
-- drm_connector_reference(object)
-+ drm_connector_get(object)
-|
-- drm_connector_unreference(object)
-+ drm_connector_put(object)
-|
-- drm_framebuffer_reference(object)
-+ drm_framebuffer_get(object)
-|
-- drm_framebuffer_unreference(object)
-+ drm_framebuffer_put(object)
-|
-- drm_gem_object_reference(object)
-+ drm_gem_object_get(object)
-|
-- drm_gem_object_unreference(object)
-+ drm_gem_object_put(object)
-|
-- __drm_gem_object_unreference(object)
-+ __drm_gem_object_put(object)
-|
-- drm_gem_object_unreference_unlocked(object)
-+ drm_gem_object_put_unlocked(object)
-|
-- drm_dev_unref(object)
-+ drm_dev_put(object)
-)
-
-@r depends on report@
-expression object;
-position p;
-@@
-
-(
-drm_connector_unreference@p(object)
-|
-drm_connector_reference@p(object)
-|
-drm_framebuffer_unreference@p(object)
-|
-drm_framebuffer_reference@p(object)
-|
-drm_gem_object_unreference@p(object)
-|
-drm_gem_object_reference@p(object)
-|
-__drm_gem_object_unreference(object)
-|
-drm_gem_object_unreference_unlocked(object)
-|
-drm_dev_unref@p(object)
-)
-
-@script:python depends on report@
-object << r.object;
-p << r.p;
-@@
-
-msg="WARNING: use get/put helpers to reference and dereference %s" % (object)
-coccilib.report.print_report(p[0], msg)
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index 0d5c799688f0..d45f7f36b859 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -190,4 +190,8 @@ config STACKLEAK_RUNTIME_DISABLE
runtime to control kernel stack erasing for kernels built with
CONFIG_GCC_PLUGIN_STACKLEAK.
+config GCC_PLUGIN_ARM_SSP_PER_TASK
+ bool
+ depends on GCC_PLUGINS && ARM
+
endif
diff --git a/scripts/gcc-plugins/arm_ssp_per_task_plugin.c b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
new file mode 100644
index 000000000000..de70b8470971
--- /dev/null
+++ b/scripts/gcc-plugins/arm_ssp_per_task_plugin.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "gcc-common.h"
+
+__visible int plugin_is_GPL_compatible;
+
+static unsigned int sp_mask, canary_offset;
+
+static unsigned int arm_pertask_ssp_rtl_execute(void)
+{
+ rtx_insn *insn;
+
+ for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
+ const char *sym;
+ rtx body;
+ rtx masked_sp;
+
+ /*
+ * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard
+ */
+ if (!INSN_P(insn))
+ continue;
+ body = PATTERN(insn);
+ if (GET_CODE(body) != SET ||
+ GET_CODE(SET_SRC(body)) != SYMBOL_REF)
+ continue;
+ sym = XSTR(SET_SRC(body), 0);
+ if (strcmp(sym, "__stack_chk_guard"))
+ continue;
+
+ /*
+ * Replace the source of the SET insn with an expression that
+ * produces the address of the copy of the stack canary value
+ * stored in struct thread_info
+ */
+ masked_sp = gen_reg_rtx(Pmode);
+
+ emit_insn_before(gen_rtx_SET(masked_sp,
+ gen_rtx_AND(Pmode,
+ stack_pointer_rtx,
+ GEN_INT(sp_mask))),
+ insn);
+
+ SET_SRC(body) = gen_rtx_PLUS(Pmode, masked_sp,
+ GEN_INT(canary_offset));
+ }
+ return 0;
+}
+
+#define PASS_NAME arm_pertask_ssp_rtl
+
+#define NO_GATE
+#include "gcc-generate-rtl-pass.h"
+
+__visible int plugin_init(struct plugin_name_args *plugin_info,
+ struct plugin_gcc_version *version)
+{
+ const char * const plugin_name = plugin_info->base_name;
+ const int argc = plugin_info->argc;
+ const struct plugin_argument *argv = plugin_info->argv;
+ int tso = 0;
+ int i;
+
+ if (!plugin_default_version_check(version, &gcc_version)) {
+ error(G_("incompatible gcc/plugin versions"));
+ return 1;
+ }
+
+ for (i = 0; i < argc; ++i) {
+ if (!strcmp(argv[i].key, "disable"))
+ return 0;
+
+ /* all remaining options require a value */
+ if (!argv[i].value) {
+ error(G_("no value supplied for option '-fplugin-arg-%s-%s'"),
+ plugin_name, argv[i].key);
+ return 1;
+ }
+
+ if (!strcmp(argv[i].key, "tso")) {
+ tso = atoi(argv[i].value);
+ continue;
+ }
+
+ if (!strcmp(argv[i].key, "offset")) {
+ canary_offset = atoi(argv[i].value);
+ continue;
+ }
+ error(G_("unknown option '-fplugin-arg-%s-%s'"),
+ plugin_name, argv[i].key);
+ return 1;
+ }
+
+ /* create the mask that produces the base of the stack */
+ sp_mask = ~((1U << (12 + tso)) - 1);
+
+ PASS_INFO(arm_pertask_ssp_rtl, "expand", 1, PASS_POS_INSERT_AFTER);
+
+ register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP,
+ NULL, &arm_pertask_ssp_rtl_pass_info);
+
+ return 0;
+}
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
index 2f48da98b5d4..dbd37460c573 100644
--- a/scripts/gcc-plugins/stackleak_plugin.c
+++ b/scripts/gcc-plugins/stackleak_plugin.c
@@ -363,10 +363,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
PASS_POS_INSERT_BEFORE);
/*
- * The stackleak_cleanup pass should be executed after the
- * "reload" pass, when the stack frame size is final.
+ * The stackleak_cleanup pass should be executed before the "*free_cfg"
+ * pass. It's the moment when the stack frame size is already final,
+ * function prologues and epilogues are generated, and the
+ * machine-dependent code transformations are not done.
*/
- PASS_INFO(stackleak_cleanup, "reload", 1, PASS_POS_INSERT_AFTER);
+ PASS_INFO(stackleak_cleanup, "*free_cfg", 1, PASS_POS_INSERT_BEFORE);
if (!plugin_default_version_check(version, &gcc_version)) {
error(G_("incompatible gcc/plugin versions"));
diff --git a/scripts/mod/Makefile b/scripts/mod/Makefile
index a5b4af47987a..42c5d50f2bcc 100644
--- a/scripts/mod/Makefile
+++ b/scripts/mod/Makefile
@@ -4,8 +4,6 @@ OBJECT_FILES_NON_STANDARD := y
hostprogs-y := modpost mk_elfconfig
always := $(hostprogs-y) empty.o
-CFLAGS_REMOVE_empty.o := $(ASM_MACRO_FLAGS)
-
modpost-objs := modpost.o file2alias.o sumversion.o
devicetable-offsets-file := devicetable-offsets.h
diff --git a/scripts/spdxcheck.py b/scripts/spdxcheck.py
index 5056fb3b897d..e559c6294c39 100755
--- a/scripts/spdxcheck.py
+++ b/scripts/spdxcheck.py
@@ -168,6 +168,7 @@ class id_parser(object):
self.curline = 0
try:
for line in fd:
+ line = line.decode(locale.getpreferredencoding(False), errors='ignore')
self.curline += 1
if self.curline > maxlines:
break
@@ -249,12 +250,13 @@ if __name__ == '__main__':
try:
if len(args.path) and args.path[0] == '-':
- parser.parse_lines(sys.stdin, args.maxlines, '-')
+ stdin = os.fdopen(sys.stdin.fileno(), 'rb')
+ parser.parse_lines(stdin, args.maxlines, '-')
else:
if args.path:
for p in args.path:
if os.path.isfile(p):
- parser.parse_lines(open(p), args.maxlines, p)
+ parser.parse_lines(open(p, 'rb'), args.maxlines, p)
elif os.path.isdir(p):
scan_git_subtree(repo.head.reference.commit.tree, p)
else:
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 7493c0ee51cc..db00e3e30a59 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -395,7 +395,7 @@ usage(void)
* When we have processed a group that starts off with a known-false
* #if/#elif sequence (which has therefore been deleted) followed by a
* #elif that we don't understand and therefore must keep, we edit the
- * latter into a #if to keep the nesting correct. We use strncpy() to
+ * latter into a #if to keep the nesting correct. We use memcpy() to
* overwrite the 4 byte token "elif" with "if " without a '\0' byte.
*
* When we find a true #elif in a group, the following block will
@@ -450,7 +450,7 @@ static void Idrop (void) { Fdrop(); ignoreon(); }
static void Itrue (void) { Ftrue(); ignoreon(); }
static void Ifalse(void) { Ffalse(); ignoreon(); }
/* modify this line */
-static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); }
+static void Mpass (void) { memcpy(keyword, "if ", 4); Pelif(); }
static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); }
static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 8963203319ea..3f80a684c232 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -15,7 +15,7 @@
#include <linux/ctype.h>
#include <linux/security.h>
#include <linux/vmalloc.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/mount.h>
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
index 136f2a047836..af03d98c7552 100644
--- a/security/apparmor/crypto.c
+++ b/security/apparmor/crypto.c
@@ -112,7 +112,7 @@ static int __init init_profile_hash(void)
if (!apparmor_initialized)
return 0;
- tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
int error = PTR_ERR(tfm);
AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
diff --git a/security/commoncap.c b/security/commoncap.c
index 18a4fdf6f6eb..232db019f051 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -9,7 +9,6 @@
#include <linux/capability.h>
#include <linux/audit.h>
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/lsm_hooks.h>
diff --git a/security/inode.c b/security/inode.c
index 8dd9ca8848e4..b7772a9b315e 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -13,7 +13,8 @@
*/
/* #define DEBUG */
-#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
@@ -341,7 +342,4 @@ static int __init securityfs_init(void)
#endif
return 0;
}
-
core_initcall(securityfs_init);
-MODULE_LICENSE("GPL");
-
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 8c25f949ebdb..43e2dc3a60d0 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -15,7 +15,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/crypto.h>
#include <linux/xattr.h>
#include <linux/evm.h>
@@ -97,8 +97,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
mutex_lock(&mutex);
if (*tfm)
goto out;
- *tfm = crypto_alloc_shash(algo, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_NOLOAD);
+ *tfm = crypto_alloc_shash(algo, 0, CRYPTO_NOLOAD);
if (IS_ERR(*tfm)) {
rc = PTR_ERR(*tfm);
pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 7f3f54d89a6e..5ecaa3d6fe0b 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -16,7 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/crypto.h>
#include <linux/audit.h>
#include <linux/xattr.h>
@@ -592,6 +592,3 @@ error:
}
late_initcall(init_evm);
-
-MODULE_DESCRIPTION("Extended Verification Module");
-MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
index 46408b9e62e8..7faf98c20373 100644
--- a/security/integrity/evm/evm_posix_acl.c
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -9,7 +9,6 @@
* the Free Software Foundation, version 2 of the License.
*/
-#include <linux/module.h>
#include <linux/xattr.h>
#include <linux/evm.h>
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index 77de71b7794c..015aea8fdf1e 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -17,7 +17,7 @@
#include <linux/audit.h>
#include <linux/uaccess.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/mutex.h>
#include "evm.h"
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 1ea05da2323d..88f04b3380d4 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -16,7 +16,7 @@
* using a rbtree tree.
*/
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/file.h>
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 99dd1d53fc35..c7505fb122d4 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -12,7 +12,6 @@
* Implements must_appraise_or_measure, collect_measurement,
* appraise_measurement, store_measurement and store_template.
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/fs.h>
@@ -336,7 +335,7 @@ void ima_audit_measurement(struct integrity_iint_cache *iint,
audit_log_untrustedstring(ab, filename);
audit_log_format(ab, " hash=\"%s:%s\"", algo_name, hash);
- audit_log_task_info(ab, current);
+ audit_log_task_info(ab);
audit_log_end(ab);
iint->flags |= IMA_AUDITED;
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index deec1804a00a..2e11e750a067 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2 of the License.
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/xattr.h>
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 3183cc23d0f8..0af792833f42 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -20,7 +20,7 @@
#include <linux/fcntl.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 59d834219cd6..6bb42a9c5e47 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -17,7 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/err.h>
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 1b88d58e1325..616a88f95b92 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -1,4 +1,6 @@
/*
+ * Integrity Measurement Architecture
+ *
* Copyright (C) 2005,2006,2007,2008 IBM Corporation
*
* Authors:
@@ -560,6 +562,3 @@ static int __init init_ima(void)
}
late_initcall(init_ima); /* Start IMA after the TPM is available */
-
-MODULE_DESCRIPTION("Integrity Measurement Architecture");
-MODULE_LICENSE("GPL");
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index 8c9499867c91..fcf5b2729063 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -10,7 +10,7 @@
* - initialize default measure policy rules
*
*/
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/list.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -580,9 +580,9 @@ void ima_update_policy(void)
ima_update_policy_flag();
}
+/* Keep the enumeration in sync with the policy_tokens! */
enum {
- Opt_err = -1,
- Opt_measure = 1, Opt_dont_measure,
+ Opt_measure, Opt_dont_measure,
Opt_appraise, Opt_dont_appraise,
Opt_audit, Opt_hash, Opt_dont_hash,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
@@ -592,10 +592,10 @@ enum {
Opt_uid_gt, Opt_euid_gt, Opt_fowner_gt,
Opt_uid_lt, Opt_euid_lt, Opt_fowner_lt,
Opt_appraise_type, Opt_permit_directio,
- Opt_pcr
+ Opt_pcr, Opt_err
};
-static match_table_t policy_tokens = {
+static const match_table_t policy_tokens = {
{Opt_measure, "measure"},
{Opt_dont_measure, "dont_measure"},
{Opt_appraise, "appraise"},
@@ -1103,7 +1103,7 @@ void ima_policy_stop(struct seq_file *m, void *v)
{
}
-#define pt(token) policy_tokens[token + Opt_err].pattern
+#define pt(token) policy_tokens[token].pattern
#define mt(token) mask_tokens[token]
/*
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index b186819bd5aa..0e41dc1df1d4 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -21,7 +21,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include "ima.h"
diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
index 6daa3b6ff9ed..efac03047919 100644
--- a/security/keys/encrypted-keys/ecryptfs_format.c
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
@@ -15,7 +15,8 @@
* the Free Software Foundation, version 2 of the License.
*/
-#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/string.h>
#include "ecryptfs_format.h"
u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok)
@@ -77,5 +78,3 @@ int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
return 0;
}
EXPORT_SYMBOL(ecryptfs_fill_auth_tok);
-
-MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index d92cbf9687c3..a3891ae9fa0f 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -342,7 +342,7 @@ static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
struct crypto_shash *tfm;
int err;
- tfm = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash(hmac_alg, 0, 0);
if (IS_ERR(tfm)) {
pr_err("encrypted_key: can't alloc %s transform: %ld\n",
hmac_alg, PTR_ERR(tfm));
@@ -984,7 +984,7 @@ static int __init init_encrypted(void)
{
int ret;
- hash_tfm = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+ hash_tfm = crypto_alloc_shash(hash_alg, 0, 0);
if (IS_ERR(hash_tfm)) {
pr_err("encrypted_key: can't allocate %s transform: %ld\n",
hash_alg, PTR_ERR(hash_tfm));
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
index cbf0bc127a73..dc3d18cae642 100644
--- a/security/keys/encrypted-keys/masterkey_trusted.c
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -15,7 +15,6 @@
*/
#include <linux/uaccess.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <keys/trusted-type.h>
#include <keys/encrypted-type.h>
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 7207e6094dc1..634e96b380e8 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -9,7 +9,6 @@
* 2 of the Licence, or (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <keys/keyring-type.h>
diff --git a/security/keys/key.c b/security/keys/key.c
index d97c9394b5dd..44a80d6741a1 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/poison.h>
#include <linux/sched.h>
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 18619690ce77..e8093d025966 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
index 783978842f13..70e65a2ff207 100644
--- a/security/keys/keyctl_pkey.c
+++ b/security/keys/keyctl_pkey.c
@@ -25,7 +25,7 @@ static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
}
enum {
- Opt_err = -1,
+ Opt_err,
Opt_enc, /* "enc=<encoding>" eg. "enc=oaep" */
Opt_hash, /* "hash=<digest-name>" eg. "hash=sha1" */
};
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 41bcf57e96f2..eadebb92986a 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/security/keys/permission.c b/security/keys/permission.c
index f68dc04d614e..06df9d5e7572 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/security.h>
#include "internal.h"
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 5af2934965d8..d2b802072693 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/fs.h>
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index d5b25e535d3a..8b8994920620 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 114f7408feee..301f0e300dbd 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -11,7 +11,7 @@
* See Documentation/security/keys/request-key.rst
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/err.h>
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 424e1d90412e..87ea2f54dedc 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -11,7 +11,6 @@
* See Documentation/security/keys/request-key.rst
*/
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/seq_file.h>
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ff6789365a12..4d98f4f87236 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -711,7 +711,7 @@ static int key_unseal(struct trusted_key_payload *p,
}
enum {
- Opt_err = -1,
+ Opt_err,
Opt_new, Opt_load, Opt_update,
Opt_keyhandle, Opt_keyauth, Opt_blobauth,
Opt_pcrinfo, Opt_pcrlock, Opt_migratable,
@@ -1199,14 +1199,14 @@ static int __init trusted_shash_alloc(void)
{
int ret;
- hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+ hmacalg = crypto_alloc_shash(hmac_alg, 0, 0);
if (IS_ERR(hmacalg)) {
pr_info("trusted_key: could not allocate crypto %s\n",
hmac_alg);
return PTR_ERR(hmacalg);
}
- hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+ hashalg = crypto_alloc_shash(hash_alg, 0, 0);
if (IS_ERR(hashalg)) {
pr_info("trusted_key: could not allocate crypto %s\n",
hash_alg);
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 9f558bedba23..5666fe0352f7 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -9,7 +9,7 @@
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
diff --git a/security/security.c b/security/security.c
index 04d173eb93f6..d670136dda2c 100644
--- a/security/security.c
+++ b/security/security.c
@@ -17,7 +17,7 @@
#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/dcache.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/lsm_hooks.h>
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a67459eb62d5..0f27db6d94a9 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2934,7 +2934,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
return rc;
/* Allow all mounts performed by the kernel */
- if (flags & MS_KERNMOUNT)
+ if (flags & (MS_KERNMOUNT | MS_SUBMOUNT))
return 0;
ad.type = LSM_AUDIT_DATA_DENTRY;
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 23e762d529fa..ba8eedf42b90 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -81,7 +81,7 @@ enum {
};
#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
-extern char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX];
+extern const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX];
/*
* type_datum properties
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 74b951f55608..9cec81209617 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -80,6 +80,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETSTATS, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_NEWCACHEREPORT, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETCHAIN, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -158,7 +161,11 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
switch (sclass) {
case SECCLASS_NETLINK_ROUTE_SOCKET:
- /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
+ /* RTM_MAX always points to RTM_SETxxxx, ie RTM_NEWxxx + 3.
+ * If the BUILD_BUG_ON() below fails you must update the
+ * structures at the top of this file with the new mappings
+ * before updating the BUILD_BUG_ON() macro!
+ */
BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
sizeof(nlmsg_route_perms));
@@ -170,6 +177,10 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
break;
case SECCLASS_NETLINK_XFRM_SOCKET:
+ /* If the BUILD_BUG_ON() below fails you must update the
+ * structures at the top of this file with the new mappings
+ * before updating the BUILD_BUG_ON() macro!
+ */
BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
sizeof(nlmsg_xfrm_perms));
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index b7efa2296969..5e05f5b902d7 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -440,16 +440,17 @@ int mls_setup_user_range(struct policydb *p,
/*
* Convert the MLS fields in the security context
- * structure `c' from the values specified in the
- * policy `oldp' to the values specified in the policy `newp'.
+ * structure `oldc' from the values specified in the
+ * policy `oldp' to the values specified in the policy `newp',
+ * storing the resulting context in `newc'.
*/
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
- struct context *c)
+ struct context *oldc,
+ struct context *newc)
{
struct level_datum *levdatum;
struct cat_datum *catdatum;
- struct ebitmap bitmap;
struct ebitmap_node *node;
int l, i;
@@ -459,28 +460,25 @@ int mls_convert_context(struct policydb *oldp,
for (l = 0; l < 2; l++) {
levdatum = hashtab_search(newp->p_levels.table,
sym_name(oldp, SYM_LEVELS,
- c->range.level[l].sens - 1));
+ oldc->range.level[l].sens - 1));
if (!levdatum)
return -EINVAL;
- c->range.level[l].sens = levdatum->level->sens;
+ newc->range.level[l].sens = levdatum->level->sens;
- ebitmap_init(&bitmap);
- ebitmap_for_each_positive_bit(&c->range.level[l].cat, node, i) {
+ ebitmap_for_each_positive_bit(&oldc->range.level[l].cat,
+ node, i) {
int rc;
catdatum = hashtab_search(newp->p_cats.table,
sym_name(oldp, SYM_CATS, i));
if (!catdatum)
return -EINVAL;
- rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
+ rc = ebitmap_set_bit(&newc->range.level[l].cat,
+ catdatum->value - 1, 1);
if (rc)
return rc;
-
- cond_resched();
}
- ebitmap_destroy(&c->range.level[l].cat);
- c->range.level[l].cat = bitmap;
}
return 0;
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 67093647576d..7954b1e60b64 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -46,7 +46,8 @@ int mls_range_set(struct context *context, struct mls_range *range);
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
- struct context *context);
+ struct context *oldc,
+ struct context *newc);
int mls_compute_sid(struct policydb *p,
struct context *scontext,
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f4eadd3f7350..a50d625e7946 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -909,13 +909,21 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
if (!c->context[0].user) {
pr_err("SELinux: SID %s was never defined.\n",
c->u.name);
+ sidtab_destroy(s);
+ goto out;
+ }
+ if (c->sid[0] == SECSID_NULL || c->sid[0] > SECINITSID_NUM) {
+ pr_err("SELinux: Initial SID %s out of range.\n",
+ c->u.name);
+ sidtab_destroy(s);
goto out;
}
- rc = sidtab_insert(s, c->sid[0], &c->context[0]);
+ rc = sidtab_set_initial(s, c->sid[0], &c->context[0]);
if (rc) {
pr_err("SELinux: unable to load initial SID %s.\n",
c->u.name);
+ sidtab_destroy(s);
goto out;
}
}
@@ -2108,6 +2116,7 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
{
int i, j, rc;
u32 nel, len;
+ __be64 prefixbuf[1];
__le32 buf[3];
struct ocontext *l, *c;
u32 nodebuf[8];
@@ -2217,21 +2226,30 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
goto out;
break;
}
- case OCON_IBPKEY:
- rc = next_entry(nodebuf, fp, sizeof(u32) * 4);
+ case OCON_IBPKEY: {
+ u32 pkey_lo, pkey_hi;
+
+ rc = next_entry(prefixbuf, fp, sizeof(u64));
+ if (rc)
+ goto out;
+
+ /* we need to have subnet_prefix in CPU order */
+ c->u.ibpkey.subnet_prefix = be64_to_cpu(prefixbuf[0]);
+
+ rc = next_entry(buf, fp, sizeof(u32) * 2);
if (rc)
goto out;
- c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf));
+ pkey_lo = le32_to_cpu(buf[0]);
+ pkey_hi = le32_to_cpu(buf[1]);
- if (nodebuf[2] > 0xffff ||
- nodebuf[3] > 0xffff) {
+ if (pkey_lo > U16_MAX || pkey_hi > U16_MAX) {
rc = -EINVAL;
goto out;
}
- c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]);
- c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]);
+ c->u.ibpkey.low_pkey = pkey_lo;
+ c->u.ibpkey.high_pkey = pkey_hi;
rc = context_read_and_validate(&c->context[0],
p,
@@ -2239,7 +2257,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
if (rc)
goto out;
break;
- case OCON_IBENDPORT:
+ }
+ case OCON_IBENDPORT: {
+ u32 port;
+
rc = next_entry(buf, fp, sizeof(u32) * 2);
if (rc)
goto out;
@@ -2249,12 +2270,13 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
if (rc)
goto out;
- if (buf[1] > 0xff || buf[1] == 0) {
+ port = le32_to_cpu(buf[1]);
+ if (port > U8_MAX || port == 0) {
rc = -EINVAL;
goto out;
}
- c->u.ibendport.port = le32_to_cpu(buf[1]);
+ c->u.ibendport.port = port;
rc = context_read_and_validate(&c->context[0],
p,
@@ -2262,7 +2284,8 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
if (rc)
goto out;
break;
- }
+ } /* end case */
+ } /* end switch */
}
}
rc = 0;
@@ -3105,6 +3128,7 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
{
unsigned int i, j, rc;
size_t nel, len;
+ __be64 prefixbuf[1];
__le32 buf[3];
u32 nodebuf[8];
struct ocontext *c;
@@ -3192,12 +3216,17 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
return rc;
break;
case OCON_IBPKEY:
- *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix);
+ /* subnet_prefix is in CPU order */
+ prefixbuf[0] = cpu_to_be64(c->u.ibpkey.subnet_prefix);
- nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey);
- nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey);
+ rc = put_entry(prefixbuf, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
- rc = put_entry(nodebuf, sizeof(u32), 4, fp);
+ buf[0] = cpu_to_le32(c->u.ibpkey.low_pkey);
+ buf[1] = cpu_to_le32(c->u.ibpkey.high_pkey);
+
+ rc = put_entry(buf, sizeof(u32), 2, fp);
if (rc)
return rc;
rc = context_write(p, &c->context[0], fp);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 12e414394530..dd44126c8d14 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -71,7 +71,7 @@
#include "audit.h"
/* Policy capability names */
-char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
+const char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = {
"network_peer_controls",
"open_perms",
"extended_socket_class",
@@ -776,7 +776,7 @@ static int security_compute_validatetrans(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
if (!user)
tclass = unmap_class(&state->ss->map, orig_tclass);
@@ -876,7 +876,7 @@ int security_bounded_transition(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
rc = -EINVAL;
old_context = sidtab_search(sidtab, old_sid);
@@ -1034,7 +1034,7 @@ void security_compute_xperms_decision(struct selinux_state *state,
goto allow;
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
@@ -1123,7 +1123,7 @@ void security_compute_av(struct selinux_state *state,
goto allow;
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
@@ -1177,7 +1177,7 @@ void security_compute_av_user(struct selinux_state *state,
goto allow;
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
@@ -1315,7 +1315,7 @@ static int security_sid_to_context_core(struct selinux_state *state,
}
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
if (force)
context = sidtab_search_force(sidtab, sid);
else
@@ -1483,7 +1483,7 @@ static int security_context_to_sid_core(struct selinux_state *state,
}
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
rc = string_to_context_struct(policydb, sidtab, scontext2,
&context, def_sid);
if (rc == -EINVAL && force) {
@@ -1668,7 +1668,7 @@ static int security_compute_sid(struct selinux_state *state,
}
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
scontext = sidtab_search(sidtab, ssid);
if (!scontext) {
@@ -1880,19 +1880,6 @@ int security_change_sid(struct selinux_state *state,
out_sid, false);
}
-/* Clone the SID into the new SID table. */
-static int clone_sid(u32 sid,
- struct context *context,
- void *arg)
-{
- struct sidtab *s = arg;
-
- if (sid > SECINITSID_NUM)
- return sidtab_insert(s, sid, context);
- else
- return 0;
-}
-
static inline int convert_context_handle_invalid_context(
struct selinux_state *state,
struct context *context)
@@ -1920,101 +1907,84 @@ struct convert_context_args {
/*
* Convert the values in the security context
- * structure `c' from the values specified
+ * structure `oldc' from the values specified
* in the policy `p->oldp' to the values specified
- * in the policy `p->newp'. Verify that the
- * context is valid under the new policy.
+ * in the policy `p->newp', storing the new context
+ * in `newc'. Verify that the context is valid
+ * under the new policy.
*/
-static int convert_context(u32 key,
- struct context *c,
- void *p)
+static int convert_context(struct context *oldc, struct context *newc, void *p)
{
struct convert_context_args *args;
- struct context oldc;
struct ocontext *oc;
- struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
- int rc = 0;
-
- if (key <= SECINITSID_NUM)
- goto out;
+ int rc;
args = p;
- if (c->str) {
- struct context ctx;
-
- rc = -ENOMEM;
- s = kstrdup(c->str, GFP_KERNEL);
+ if (oldc->str) {
+ s = kstrdup(oldc->str, GFP_KERNEL);
if (!s)
- goto out;
+ return -ENOMEM;
rc = string_to_context_struct(args->newp, NULL, s,
- &ctx, SECSID_NULL);
- kfree(s);
- if (!rc) {
- pr_info("SELinux: Context %s became valid (mapped).\n",
- c->str);
- /* Replace string with mapped representation. */
- kfree(c->str);
- memcpy(c, &ctx, sizeof(*c));
- goto out;
- } else if (rc == -EINVAL) {
+ newc, SECSID_NULL);
+ if (rc == -EINVAL) {
/* Retain string representation for later mapping. */
- rc = 0;
- goto out;
- } else {
+ context_init(newc);
+ newc->str = s;
+ newc->len = oldc->len;
+ return 0;
+ }
+ kfree(s);
+ if (rc) {
/* Other error condition, e.g. ENOMEM. */
pr_err("SELinux: Unable to map context %s, rc = %d.\n",
- c->str, -rc);
- goto out;
+ oldc->str, -rc);
+ return rc;
}
+ pr_info("SELinux: Context %s became valid (mapped).\n",
+ oldc->str);
+ return 0;
}
- rc = context_cpy(&oldc, c);
- if (rc)
- goto out;
+ context_init(newc);
/* Convert the user. */
rc = -EINVAL;
usrdatum = hashtab_search(args->newp->p_users.table,
- sym_name(args->oldp, SYM_USERS, c->user - 1));
+ sym_name(args->oldp,
+ SYM_USERS, oldc->user - 1));
if (!usrdatum)
goto bad;
- c->user = usrdatum->value;
+ newc->user = usrdatum->value;
/* Convert the role. */
rc = -EINVAL;
role = hashtab_search(args->newp->p_roles.table,
- sym_name(args->oldp, SYM_ROLES, c->role - 1));
+ sym_name(args->oldp, SYM_ROLES, oldc->role - 1));
if (!role)
goto bad;
- c->role = role->value;
+ newc->role = role->value;
/* Convert the type. */
rc = -EINVAL;
typdatum = hashtab_search(args->newp->p_types.table,
- sym_name(args->oldp, SYM_TYPES, c->type - 1));
+ sym_name(args->oldp,
+ SYM_TYPES, oldc->type - 1));
if (!typdatum)
goto bad;
- c->type = typdatum->value;
+ newc->type = typdatum->value;
/* Convert the MLS fields if dealing with MLS policies */
if (args->oldp->mls_enabled && args->newp->mls_enabled) {
- rc = mls_convert_context(args->oldp, args->newp, c);
+ rc = mls_convert_context(args->oldp, args->newp, oldc, newc);
if (rc)
goto bad;
- } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
- /*
- * Switching between MLS and non-MLS policy:
- * free any storage used by the MLS fields in the
- * context for all existing entries in the sidtab.
- */
- mls_context_destroy(c);
} else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
/*
* Switching between non-MLS and MLS policy:
@@ -2032,38 +2002,30 @@ static int convert_context(u32 key,
" the initial SIDs list\n");
goto bad;
}
- range = &oc->context[0].range;
- rc = mls_range_set(c, range);
+ rc = mls_range_set(newc, &oc->context[0].range);
if (rc)
goto bad;
}
/* Check the validity of the new context. */
- if (!policydb_context_isvalid(args->newp, c)) {
- rc = convert_context_handle_invalid_context(args->state,
- &oldc);
+ if (!policydb_context_isvalid(args->newp, newc)) {
+ rc = convert_context_handle_invalid_context(args->state, oldc);
if (rc)
goto bad;
}
- context_destroy(&oldc);
-
- rc = 0;
-out:
- return rc;
+ return 0;
bad:
/* Map old representation to string and save it. */
- rc = context_struct_to_string(args->oldp, &oldc, &s, &len);
+ rc = context_struct_to_string(args->oldp, oldc, &s, &len);
if (rc)
return rc;
- context_destroy(&oldc);
- context_destroy(c);
- c->str = s;
- c->len = len;
+ context_destroy(newc);
+ newc->str = s;
+ newc->len = len;
pr_info("SELinux: Context %s became invalid (unmapped).\n",
- c->str);
- rc = 0;
- goto out;
+ newc->str);
+ return 0;
}
static void security_load_policycaps(struct selinux_state *state)
@@ -2103,11 +2065,11 @@ static int security_preserve_bools(struct selinux_state *state,
int security_load_policy(struct selinux_state *state, void *data, size_t len)
{
struct policydb *policydb;
- struct sidtab *sidtab;
+ struct sidtab *oldsidtab, *newsidtab;
struct policydb *oldpolicydb, *newpolicydb;
- struct sidtab oldsidtab, newsidtab;
struct selinux_mapping *oldmapping;
struct selinux_map newmap;
+ struct sidtab_convert_params convert_params;
struct convert_context_args args;
u32 seqno;
int rc = 0;
@@ -2121,27 +2083,37 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
newpolicydb = oldpolicydb + 1;
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+
+ newsidtab = kmalloc(sizeof(*newsidtab), GFP_KERNEL);
+ if (!newsidtab) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (!state->initialized) {
rc = policydb_read(policydb, fp);
- if (rc)
+ if (rc) {
+ kfree(newsidtab);
goto out;
+ }
policydb->len = len;
rc = selinux_set_mapping(policydb, secclass_map,
&state->ss->map);
if (rc) {
+ kfree(newsidtab);
policydb_destroy(policydb);
goto out;
}
- rc = policydb_load_isids(policydb, sidtab);
+ rc = policydb_load_isids(policydb, newsidtab);
if (rc) {
+ kfree(newsidtab);
policydb_destroy(policydb);
goto out;
}
+ state->ss->sidtab = newsidtab;
security_load_policycaps(state);
state->initialized = 1;
seqno = ++state->ss->latest_granting;
@@ -2154,13 +2126,11 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
goto out;
}
-#if 0
- sidtab_hash_eval(sidtab, "sids");
-#endif
-
rc = policydb_read(newpolicydb, fp);
- if (rc)
+ if (rc) {
+ kfree(newsidtab);
goto out;
+ }
newpolicydb->len = len;
/* If switching between different policy types, log MLS status */
@@ -2169,10 +2139,11 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
else if (!policydb->mls_enabled && newpolicydb->mls_enabled)
pr_info("SELinux: Enabling MLS support...\n");
- rc = policydb_load_isids(newpolicydb, &newsidtab);
+ rc = policydb_load_isids(newpolicydb, newsidtab);
if (rc) {
pr_err("SELinux: unable to load the initial SIDs\n");
policydb_destroy(newpolicydb);
+ kfree(newsidtab);
goto out;
}
@@ -2186,12 +2157,7 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
goto err;
}
- /* Clone the SID table. */
- sidtab_shutdown(sidtab);
-
- rc = sidtab_map(sidtab, clone_sid, &newsidtab);
- if (rc)
- goto err;
+ oldsidtab = state->ss->sidtab;
/*
* Convert the internal representations of contexts
@@ -2200,7 +2166,12 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
args.state = state;
args.oldp = policydb;
args.newp = newpolicydb;
- rc = sidtab_map(&newsidtab, convert_context, &args);
+
+ convert_params.func = convert_context;
+ convert_params.args = &args;
+ convert_params.target = newsidtab;
+
+ rc = sidtab_convert(oldsidtab, &convert_params);
if (rc) {
pr_err("SELinux: unable to convert the internal"
" representation of contexts in the new SID"
@@ -2210,12 +2181,11 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
/* Save the old policydb and SID table to free later. */
memcpy(oldpolicydb, policydb, sizeof(*policydb));
- sidtab_set(&oldsidtab, sidtab);
/* Install the new policydb and SID table. */
write_lock_irq(&state->ss->policy_rwlock);
memcpy(policydb, newpolicydb, sizeof(*policydb));
- sidtab_set(sidtab, &newsidtab);
+ state->ss->sidtab = newsidtab;
security_load_policycaps(state);
oldmapping = state->ss->map.mapping;
state->ss->map.mapping = newmap.mapping;
@@ -2225,7 +2195,8 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
/* Free the old policydb and SID table. */
policydb_destroy(oldpolicydb);
- sidtab_destroy(&oldsidtab);
+ sidtab_destroy(oldsidtab);
+ kfree(oldsidtab);
kfree(oldmapping);
avc_ss_reset(state->avc, seqno);
@@ -2239,7 +2210,8 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len)
err:
kfree(newmap.mapping);
- sidtab_destroy(&newsidtab);
+ sidtab_destroy(newsidtab);
+ kfree(newsidtab);
policydb_destroy(newpolicydb);
out:
@@ -2276,7 +2248,7 @@ int security_port_sid(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_PORT];
while (c) {
@@ -2322,7 +2294,7 @@ int security_ib_pkey_sid(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_IBPKEY];
while (c) {
@@ -2368,7 +2340,7 @@ int security_ib_endport_sid(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_IBENDPORT];
while (c) {
@@ -2414,7 +2386,7 @@ int security_netif_sid(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_NETIF];
while (c) {
@@ -2479,7 +2451,7 @@ int security_node_sid(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
switch (domain) {
case AF_INET: {
@@ -2579,7 +2551,7 @@ int security_get_user_sids(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
context_init(&usercon);
@@ -2681,7 +2653,7 @@ static inline int __security_genfs_sid(struct selinux_state *state,
u32 *sid)
{
struct policydb *policydb = &state->ss->policydb;
- struct sidtab *sidtab = &state->ss->sidtab;
+ struct sidtab *sidtab = state->ss->sidtab;
int len;
u16 sclass;
struct genfs *genfs;
@@ -2767,7 +2739,7 @@ int security_fs_use(struct selinux_state *state, struct super_block *sb)
read_lock(&state->ss->policy_rwlock);
policydb = &state->ss->policydb;
- sidtab = &state->ss->sidtab;
+ sidtab = state->ss->sidtab;
c = policydb->ocontexts[OCON_FSUSE];
while (c) {
@@ -2973,7 +2945,7 @@ int security_sid_mls_copy(struct selinux_state *state,
u32 sid, u32 mls_sid, u32 *new_sid)
{
struct policydb *policydb = &state->ss->policydb;
- struct sidtab *sidtab = &state->ss->sidtab;
+ struct sidtab *sidtab = state->ss->sidtab;
struct context *context1;
struct context *context2;
struct context newcon;
@@ -3064,7 +3036,7 @@ int security_net_peersid_resolve(struct selinux_state *state,
u32 *peer_sid)
{
struct policydb *policydb = &state->ss->policydb;
- struct sidtab *sidtab = &state->ss->sidtab;
+ struct sidtab *sidtab = state->ss->sidtab;
int rc;
struct context *nlbl_ctx;
struct context *xfrm_ctx;
@@ -3425,7 +3397,7 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
goto out;
}
- ctxt = sidtab_search(&state->ss->sidtab, sid);
+ ctxt = sidtab_search(state->ss->sidtab, sid);
if (unlikely(!ctxt)) {
WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n",
sid);
@@ -3588,7 +3560,7 @@ int security_netlbl_secattr_to_sid(struct selinux_state *state,
u32 *sid)
{
struct policydb *policydb = &state->ss->policydb;
- struct sidtab *sidtab = &state->ss->sidtab;
+ struct sidtab *sidtab = state->ss->sidtab;
int rc;
struct context *ctx;
struct context ctx_new;
@@ -3666,7 +3638,7 @@ int security_netlbl_sid_to_secattr(struct selinux_state *state,
read_lock(&state->ss->policy_rwlock);
rc = -ENOENT;
- ctx = sidtab_search(&state->ss->sidtab, sid);
+ ctx = sidtab_search(state->ss->sidtab, sid);
if (ctx == NULL)
goto out;
diff --git a/security/selinux/ss/services.h b/security/selinux/ss/services.h
index 24c7bdcc8075..9a36de860368 100644
--- a/security/selinux/ss/services.h
+++ b/security/selinux/ss/services.h
@@ -24,7 +24,7 @@ struct selinux_map {
};
struct selinux_ss {
- struct sidtab sidtab;
+ struct sidtab *sidtab;
struct policydb policydb;
rwlock_t policy_rwlock;
u32 latest_granting;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index fd75a12fa8fc..e63a90ff2728 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -2,108 +2,164 @@
/*
* Implementation of the SID table type.
*
- * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
+ *
+ * Copyright (C) 2018 Red Hat, Inc.
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
-#include <linux/errno.h>
+#include <linux/atomic.h>
#include "flask.h"
#include "security.h"
#include "sidtab.h"
-#define SIDTAB_HASH(sid) \
-(sid & SIDTAB_HASH_MASK)
-
int sidtab_init(struct sidtab *s)
{
- int i;
-
- s->htable = kmalloc_array(SIDTAB_SIZE, sizeof(*s->htable), GFP_ATOMIC);
- if (!s->htable)
- return -ENOMEM;
- for (i = 0; i < SIDTAB_SIZE; i++)
- s->htable[i] = NULL;
- s->nel = 0;
- s->next_sid = 1;
- s->shutdown = 0;
+ u32 i;
+
+ memset(s->roots, 0, sizeof(s->roots));
+
+ for (i = 0; i < SIDTAB_RCACHE_SIZE; i++)
+ atomic_set(&s->rcache[i], -1);
+
+ for (i = 0; i < SECINITSID_NUM; i++)
+ s->isids[i].set = 0;
+
+ atomic_set(&s->count, 0);
+
+ s->convert = NULL;
+
spin_lock_init(&s->lock);
return 0;
}
-int sidtab_insert(struct sidtab *s, u32 sid, struct context *context)
+int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
{
- int hvalue;
- struct sidtab_node *prev, *cur, *newnode;
-
- if (!s)
- return -ENOMEM;
-
- hvalue = SIDTAB_HASH(sid);
- prev = NULL;
- cur = s->htable[hvalue];
- while (cur && sid > cur->sid) {
- prev = cur;
- cur = cur->next;
- }
+ struct sidtab_isid_entry *entry;
+ int rc;
- if (cur && sid == cur->sid)
- return -EEXIST;
+ if (sid == 0 || sid > SECINITSID_NUM)
+ return -EINVAL;
- newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC);
- if (!newnode)
- return -ENOMEM;
+ entry = &s->isids[sid - 1];
- newnode->sid = sid;
- if (context_cpy(&newnode->context, context)) {
- kfree(newnode);
- return -ENOMEM;
- }
+ rc = context_cpy(&entry->context, context);
+ if (rc)
+ return rc;
- if (prev) {
- newnode->next = prev->next;
- wmb();
- prev->next = newnode;
- } else {
- newnode->next = s->htable[hvalue];
- wmb();
- s->htable[hvalue] = newnode;
+ entry->set = 1;
+ return 0;
+}
+
+static u32 sidtab_level_from_count(u32 count)
+{
+ u32 capacity = SIDTAB_LEAF_ENTRIES;
+ u32 level = 0;
+
+ while (count > capacity) {
+ capacity <<= SIDTAB_INNER_SHIFT;
+ ++level;
}
+ return level;
+}
- s->nel++;
- if (sid >= s->next_sid)
- s->next_sid = sid + 1;
+static int sidtab_alloc_roots(struct sidtab *s, u32 level)
+{
+ u32 l;
+
+ if (!s->roots[0].ptr_leaf) {
+ s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!s->roots[0].ptr_leaf)
+ return -ENOMEM;
+ }
+ for (l = 1; l <= level; ++l)
+ if (!s->roots[l].ptr_inner) {
+ s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!s->roots[l].ptr_inner)
+ return -ENOMEM;
+ s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
+ }
return 0;
}
-static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+static struct context *sidtab_do_lookup(struct sidtab *s, u32 index, int alloc)
{
- int hvalue;
- struct sidtab_node *cur;
+ union sidtab_entry_inner *entry;
+ u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
+
+ /* find the level of the subtree we need */
+ level = sidtab_level_from_count(index + 1);
+ capacity_shift = level * SIDTAB_INNER_SHIFT;
- if (!s)
+ /* allocate roots if needed */
+ if (alloc && sidtab_alloc_roots(s, level) != 0)
return NULL;
- hvalue = SIDTAB_HASH(sid);
- cur = s->htable[hvalue];
- while (cur && sid > cur->sid)
- cur = cur->next;
-
- if (force && cur && sid == cur->sid && cur->context.len)
- return &cur->context;
-
- if (!cur || sid != cur->sid || cur->context.len) {
- /* Remap invalid SIDs to the unlabeled SID. */
- sid = SECINITSID_UNLABELED;
- hvalue = SIDTAB_HASH(sid);
- cur = s->htable[hvalue];
- while (cur && sid > cur->sid)
- cur = cur->next;
- if (!cur || sid != cur->sid)
+ /* lookup inside the subtree */
+ entry = &s->roots[level];
+ while (level != 0) {
+ capacity_shift -= SIDTAB_INNER_SHIFT;
+ --level;
+
+ entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
+ leaf_index &= ((u32)1 << capacity_shift) - 1;
+
+ if (!entry->ptr_inner) {
+ if (alloc)
+ entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!entry->ptr_inner)
+ return NULL;
+ }
+ }
+ if (!entry->ptr_leaf) {
+ if (alloc)
+ entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_ATOMIC);
+ if (!entry->ptr_leaf)
return NULL;
}
+ return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES].context;
+}
+
+static struct context *sidtab_lookup(struct sidtab *s, u32 index)
+{
+ u32 count = (u32)atomic_read(&s->count);
+
+ if (index >= count)
+ return NULL;
+
+ /* read entries after reading count */
+ smp_rmb();
+
+ return sidtab_do_lookup(s, index, 0);
+}
+
+static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+{
+ return s->isids[sid - 1].set ? &s->isids[sid - 1].context : NULL;
+}
+
+static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+{
+ struct context *context;
+
+ if (sid != 0) {
+ if (sid > SECINITSID_NUM)
+ context = sidtab_lookup(s, sid - (SECINITSID_NUM + 1));
+ else
+ context = sidtab_lookup_initial(s, sid);
+ if (context && (!context->len || force))
+ return context;
+ }
- return &cur->context;
+ return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
}
struct context *sidtab_search(struct sidtab *s, u32 sid)
@@ -116,191 +172,324 @@ struct context *sidtab_search_force(struct sidtab *s, u32 sid)
return sidtab_search_core(s, sid, 1);
}
-int sidtab_map(struct sidtab *s,
- int (*apply) (u32 sid,
- struct context *context,
- void *args),
- void *args)
+static int sidtab_find_context(union sidtab_entry_inner entry,
+ u32 *pos, u32 count, u32 level,
+ struct context *context, u32 *index)
{
- int i, rc = 0;
- struct sidtab_node *cur;
-
- if (!s)
- goto out;
+ int rc;
+ u32 i;
+
+ if (level != 0) {
+ struct sidtab_node_inner *node = entry.ptr_inner;
+
+ i = 0;
+ while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
+ rc = sidtab_find_context(node->entries[i],
+ pos, count, level - 1,
+ context, index);
+ if (rc == 0)
+ return 0;
+ i++;
+ }
+ } else {
+ struct sidtab_node_leaf *node = entry.ptr_leaf;
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = s->htable[i];
- while (cur) {
- rc = apply(cur->sid, &cur->context, args);
- if (rc)
- goto out;
- cur = cur->next;
+ i = 0;
+ while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
+ if (context_cmp(&node->entries[i].context, context)) {
+ *index = *pos;
+ return 0;
+ }
+ (*pos)++;
+ i++;
}
}
-out:
- return rc;
+ return -ENOENT;
}
-static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
+static void sidtab_rcache_update(struct sidtab *s, u32 index, u32 pos)
{
- BUG_ON(loc >= SIDTAB_CACHE_LEN);
-
- while (loc > 0) {
- s->cache[loc] = s->cache[loc - 1];
- loc--;
+ while (pos > 0) {
+ atomic_set(&s->rcache[pos], atomic_read(&s->rcache[pos - 1]));
+ --pos;
}
- s->cache[0] = n;
+ atomic_set(&s->rcache[0], (int)index);
}
-static inline u32 sidtab_search_context(struct sidtab *s,
- struct context *context)
+static void sidtab_rcache_push(struct sidtab *s, u32 index)
{
- int i;
- struct sidtab_node *cur;
-
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = s->htable[i];
- while (cur) {
- if (context_cmp(&cur->context, context)) {
- sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
- return cur->sid;
- }
- cur = cur->next;
- }
- }
- return 0;
+ sidtab_rcache_update(s, index, SIDTAB_RCACHE_SIZE - 1);
}
-static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
+static int sidtab_rcache_search(struct sidtab *s, struct context *context,
+ u32 *index)
{
- int i;
- struct sidtab_node *node;
+ u32 i;
- for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
- node = s->cache[i];
- if (unlikely(!node))
+ for (i = 0; i < SIDTAB_RCACHE_SIZE; i++) {
+ int v = atomic_read(&s->rcache[i]);
+
+ if (v < 0)
+ continue;
+
+ if (context_cmp(sidtab_do_lookup(s, (u32)v, 0), context)) {
+ sidtab_rcache_update(s, (u32)v, i);
+ *index = (u32)v;
return 0;
- if (context_cmp(&node->context, context)) {
- sidtab_update_cache(s, node, i);
- return node->sid;
}
}
- return 0;
+ return -ENOENT;
}
-int sidtab_context_to_sid(struct sidtab *s,
- struct context *context,
- u32 *out_sid)
+static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
+ u32 *index)
{
- u32 sid;
- int ret = 0;
unsigned long flags;
+ u32 count = (u32)atomic_read(&s->count);
+ u32 count_locked, level, pos;
+ struct sidtab_convert_params *convert;
+ struct context *dst, *dst_convert;
+ int rc;
+
+ rc = sidtab_rcache_search(s, context, index);
+ if (rc == 0)
+ return 0;
+
+ level = sidtab_level_from_count(count);
+
+ /* read entries after reading count */
+ smp_rmb();
+
+ pos = 0;
+ rc = sidtab_find_context(s->roots[level], &pos, count, level,
+ context, index);
+ if (rc == 0) {
+ sidtab_rcache_push(s, *index);
+ return 0;
+ }
- *out_sid = SECSID_NULL;
+ /* lock-free search failed: lock, re-search, and insert if not found */
+ spin_lock_irqsave(&s->lock, flags);
- sid = sidtab_search_cache(s, context);
- if (!sid)
- sid = sidtab_search_context(s, context);
- if (!sid) {
- spin_lock_irqsave(&s->lock, flags);
- /* Rescan now that we hold the lock. */
- sid = sidtab_search_context(s, context);
- if (sid)
- goto unlock_out;
- /* No SID exists for the context. Allocate a new one. */
- if (s->next_sid == UINT_MAX || s->shutdown) {
- ret = -ENOMEM;
- goto unlock_out;
+ convert = s->convert;
+ count_locked = (u32)atomic_read(&s->count);
+ level = sidtab_level_from_count(count_locked);
+
+ /* if count has changed before we acquired the lock, then catch up */
+ while (count < count_locked) {
+ if (context_cmp(sidtab_do_lookup(s, count, 0), context)) {
+ sidtab_rcache_push(s, count);
+ *index = count;
+ rc = 0;
+ goto out_unlock;
}
- sid = s->next_sid++;
- if (context->len)
- pr_info("SELinux: Context %s is not valid (left unmapped).\n",
- context->str);
- ret = sidtab_insert(s, sid, context);
- if (ret)
- s->next_sid--;
-unlock_out:
- spin_unlock_irqrestore(&s->lock, flags);
+ ++count;
}
- if (ret)
- return ret;
+ /* insert context into new entry */
+ rc = -ENOMEM;
+ dst = sidtab_do_lookup(s, count, 1);
+ if (!dst)
+ goto out_unlock;
+
+ rc = context_cpy(dst, context);
+ if (rc)
+ goto out_unlock;
+
+ /*
+ * if we are building a new sidtab, we need to convert the context
+ * and insert it there as well
+ */
+ if (convert) {
+ rc = -ENOMEM;
+ dst_convert = sidtab_do_lookup(convert->target, count, 1);
+ if (!dst_convert) {
+ context_destroy(dst);
+ goto out_unlock;
+ }
- *out_sid = sid;
- return 0;
+ rc = convert->func(context, dst_convert, convert->args);
+ if (rc) {
+ context_destroy(dst);
+ goto out_unlock;
+ }
+
+ /* at this point we know the insert won't fail */
+ atomic_set(&convert->target->count, count + 1);
+ }
+
+ if (context->len)
+ pr_info("SELinux: Context %s is not valid (left unmapped).\n",
+ context->str);
+
+ sidtab_rcache_push(s, count);
+ *index = count;
+
+ /* write entries before writing new count */
+ smp_wmb();
+
+ atomic_set(&s->count, count + 1);
+
+ rc = 0;
+out_unlock:
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
}
-void sidtab_hash_eval(struct sidtab *h, char *tag)
+int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
{
- int i, chain_len, slots_used, max_chain_len;
- struct sidtab_node *cur;
-
- slots_used = 0;
- max_chain_len = 0;
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = h->htable[i];
- if (cur) {
- slots_used++;
- chain_len = 0;
- while (cur) {
- chain_len++;
- cur = cur->next;
- }
+ int rc;
+ u32 i;
+
+ for (i = 0; i < SECINITSID_NUM; i++) {
+ struct sidtab_isid_entry *entry = &s->isids[i];
- if (chain_len > max_chain_len)
- max_chain_len = chain_len;
+ if (entry->set && context_cmp(context, &entry->context)) {
+ *sid = i + 1;
+ return 0;
}
}
- pr_debug("%s: %d entries and %d/%d buckets used, longest "
- "chain length %d\n", tag, h->nel, slots_used, SIDTAB_SIZE,
- max_chain_len);
+ rc = sidtab_reverse_lookup(s, context, sid);
+ if (rc)
+ return rc;
+ *sid += SECINITSID_NUM + 1;
+ return 0;
}
-void sidtab_destroy(struct sidtab *s)
+static int sidtab_convert_tree(union sidtab_entry_inner *edst,
+ union sidtab_entry_inner *esrc,
+ u32 *pos, u32 count, u32 level,
+ struct sidtab_convert_params *convert)
{
- int i;
- struct sidtab_node *cur, *temp;
-
- if (!s)
- return;
-
- for (i = 0; i < SIDTAB_SIZE; i++) {
- cur = s->htable[i];
- while (cur) {
- temp = cur;
- cur = cur->next;
- context_destroy(&temp->context);
- kfree(temp);
+ int rc;
+ u32 i;
+
+ if (level != 0) {
+ if (!edst->ptr_inner) {
+ edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!edst->ptr_inner)
+ return -ENOMEM;
+ }
+ i = 0;
+ while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
+ rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
+ &esrc->ptr_inner->entries[i],
+ pos, count, level - 1,
+ convert);
+ if (rc)
+ return rc;
+ i++;
+ }
+ } else {
+ if (!edst->ptr_leaf) {
+ edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
+ GFP_KERNEL);
+ if (!edst->ptr_leaf)
+ return -ENOMEM;
+ }
+ i = 0;
+ while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
+ rc = convert->func(&esrc->ptr_leaf->entries[i].context,
+ &edst->ptr_leaf->entries[i].context,
+ convert->args);
+ if (rc)
+ return rc;
+ (*pos)++;
+ i++;
}
- s->htable[i] = NULL;
+ cond_resched();
}
- kfree(s->htable);
- s->htable = NULL;
- s->nel = 0;
- s->next_sid = 1;
+ return 0;
}
-void sidtab_set(struct sidtab *dst, struct sidtab *src)
+int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
{
unsigned long flags;
- int i;
-
- spin_lock_irqsave(&src->lock, flags);
- dst->htable = src->htable;
- dst->nel = src->nel;
- dst->next_sid = src->next_sid;
- dst->shutdown = 0;
- for (i = 0; i < SIDTAB_CACHE_LEN; i++)
- dst->cache[i] = NULL;
- spin_unlock_irqrestore(&src->lock, flags);
+ u32 count, level, pos;
+ int rc;
+
+ spin_lock_irqsave(&s->lock, flags);
+
+ /* concurrent policy loads are not allowed */
+ if (s->convert) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return -EBUSY;
+ }
+
+ count = (u32)atomic_read(&s->count);
+ level = sidtab_level_from_count(count);
+
+ /* allocate last leaf in the new sidtab (to avoid race with
+ * live convert)
+ */
+ rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
+ if (rc) {
+ spin_unlock_irqrestore(&s->lock, flags);
+ return rc;
+ }
+
+ /* set count in case no new entries are added during conversion */
+ atomic_set(&params->target->count, count);
+
+ /* enable live convert of new entries */
+ s->convert = params;
+
+ /* we can safely do the rest of the conversion outside the lock */
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ pr_info("SELinux: Converting %u SID table entries...\n", count);
+
+ /* convert all entries not covered by live convert */
+ pos = 0;
+ rc = sidtab_convert_tree(&params->target->roots[level],
+ &s->roots[level], &pos, count, level, params);
+ if (rc) {
+ /* we need to keep the old table - disable live convert */
+ spin_lock_irqsave(&s->lock, flags);
+ s->convert = NULL;
+ spin_unlock_irqrestore(&s->lock, flags);
+ }
+ return rc;
}
-void sidtab_shutdown(struct sidtab *s)
+static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
{
- unsigned long flags;
+ u32 i;
- spin_lock_irqsave(&s->lock, flags);
- s->shutdown = 1;
- spin_unlock_irqrestore(&s->lock, flags);
+ if (level != 0) {
+ struct sidtab_node_inner *node = entry.ptr_inner;
+
+ if (!node)
+ return;
+
+ for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
+ sidtab_destroy_tree(node->entries[i], level - 1);
+ kfree(node);
+ } else {
+ struct sidtab_node_leaf *node = entry.ptr_leaf;
+
+ if (!node)
+ return;
+
+ for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
+ context_destroy(&node->entries[i].context);
+ kfree(node);
+ }
+}
+
+void sidtab_destroy(struct sidtab *s)
+{
+ u32 i, level;
+
+ for (i = 0; i < SECINITSID_NUM; i++)
+ if (s->isids[i].set)
+ context_destroy(&s->isids[i].context);
+
+ level = SIDTAB_MAX_LEVEL;
+ while (level && !s->roots[level].ptr_inner)
+ --level;
+
+ sidtab_destroy_tree(s->roots[level], level);
}
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index a1a1d2617b6f..bbd5c0d1f3bd 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -1,56 +1,96 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * A security identifier table (sidtab) is a hash table
+ * A security identifier table (sidtab) is a lookup table
* of security context structures indexed by SID value.
*
- * Author : Stephen Smalley, <sds@tycho.nsa.gov>
+ * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
+ * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
+ *
+ * Copyright (C) 2018 Red Hat, Inc.
*/
#ifndef _SS_SIDTAB_H_
#define _SS_SIDTAB_H_
+#include <linux/spinlock_types.h>
+#include <linux/log2.h>
+
#include "context.h"
-struct sidtab_node {
- u32 sid; /* security identifier */
- struct context context; /* security context structure */
- struct sidtab_node *next;
+struct sidtab_entry_leaf {
+ struct context context;
+};
+
+struct sidtab_node_inner;
+struct sidtab_node_leaf;
+
+union sidtab_entry_inner {
+ struct sidtab_node_inner *ptr_inner;
+ struct sidtab_node_leaf *ptr_leaf;
+};
+
+/* align node size to page boundary */
+#define SIDTAB_NODE_ALLOC_SHIFT PAGE_SHIFT
+#define SIDTAB_NODE_ALLOC_SIZE PAGE_SIZE
+
+#define size_to_shift(size) ((size) == 1 ? 1 : (const_ilog2((size) - 1) + 1))
+
+#define SIDTAB_INNER_SHIFT \
+ (SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
+#define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
+#define SIDTAB_LEAF_ENTRIES \
+ (SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry_leaf))
+
+#define SIDTAB_MAX_BITS 31 /* limited to INT_MAX due to atomic_t range */
+#define SIDTAB_MAX (((u32)1 << SIDTAB_MAX_BITS) - 1)
+/* ensure enough tree levels for SIDTAB_MAX entries */
+#define SIDTAB_MAX_LEVEL \
+ DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
+ SIDTAB_INNER_SHIFT)
+
+struct sidtab_node_leaf {
+ struct sidtab_entry_leaf entries[SIDTAB_LEAF_ENTRIES];
};
-#define SIDTAB_HASH_BITS 7
-#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
-#define SIDTAB_HASH_MASK (SIDTAB_HASH_BUCKETS-1)
+struct sidtab_node_inner {
+ union sidtab_entry_inner entries[SIDTAB_INNER_ENTRIES];
+};
-#define SIDTAB_SIZE SIDTAB_HASH_BUCKETS
+struct sidtab_isid_entry {
+ int set;
+ struct context context;
+};
+
+struct sidtab_convert_params {
+ int (*func)(struct context *oldc, struct context *newc, void *args);
+ void *args;
+ struct sidtab *target;
+};
+
+#define SIDTAB_RCACHE_SIZE 3
struct sidtab {
- struct sidtab_node **htable;
- unsigned int nel; /* number of elements */
- unsigned int next_sid; /* next SID to allocate */
- unsigned char shutdown;
-#define SIDTAB_CACHE_LEN 3
- struct sidtab_node *cache[SIDTAB_CACHE_LEN];
+ union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
+ atomic_t count;
+ struct sidtab_convert_params *convert;
spinlock_t lock;
+
+ /* reverse lookup cache */
+ atomic_t rcache[SIDTAB_RCACHE_SIZE];
+
+ /* index == SID - 1 (no entry for SECSID_NULL) */
+ struct sidtab_isid_entry isids[SECINITSID_NUM];
};
int sidtab_init(struct sidtab *s);
-int sidtab_insert(struct sidtab *s, u32 sid, struct context *context);
+int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
struct context *sidtab_search(struct sidtab *s, u32 sid);
struct context *sidtab_search_force(struct sidtab *s, u32 sid);
-int sidtab_map(struct sidtab *s,
- int (*apply) (u32 sid,
- struct context *context,
- void *args),
- void *args);
+int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
-int sidtab_context_to_sid(struct sidtab *s,
- struct context *context,
- u32 *sid);
+int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
-void sidtab_hash_eval(struct sidtab *h, char *tag);
void sidtab_destroy(struct sidtab *s);
-void sidtab_set(struct sidtab *dst, struct sidtab *src);
-void sidtab_shutdown(struct sidtab *s);
#endif /* _SS_SIDTAB_H_ */
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 91dc3783ed94..bd7d18bdb147 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -230,7 +230,7 @@ static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
u32 *sid, int ckall)
{
u32 sid_session = SECSID_NULL;
- struct sec_path *sp = skb->sp;
+ struct sec_path *sp = skb_sec_path(skb);
if (sp) {
int i;
@@ -408,7 +408,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
struct common_audit_data *ad)
{
int i;
- struct sec_path *sp = skb->sp;
+ struct sec_path *sp = skb_sec_path(skb);
u32 peer_sid = SECINITSID_UNLABELED;
if (sp) {
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index d3d9d9f1edb0..badffc8271c8 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -106,7 +106,7 @@ void tomoyo_convert_time(time64_t time64, struct tomoyo_time *stamp)
* @string: String representation for permissions in foo/bar/buz format.
* @keyword: Keyword to find from @string/
*
- * Returns ture if @keyword was found in @string, false otherwise.
+ * Returns true if @keyword was found in @string, false otherwise.
*
* This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2.
*/
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 1eddf8fa188f..8797d42e2b76 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -776,7 +776,7 @@ static int check_codec(struct aoa_codec *codec,
struct codec_connection *cc;
/* if the codec has a 'codec' node, we require a reference */
- if (codec->node && (strcmp(codec->node->name, "codec") == 0)) {
+ if (of_node_name_eq(codec->node, "codec")) {
snprintf(propname, sizeof(propname),
"platform-%s-codec-ref", codec->name);
ref = of_get_property(ldev->sound, propname, NULL);
@@ -1008,8 +1008,8 @@ static int aoa_fabric_layout_probe(struct soundbus_dev *sdev)
return -ENODEV;
/* by breaking out we keep a reference */
- while ((sound = of_get_next_child(sdev->ofdev.dev.of_node, sound))) {
- if (sound->type && strcasecmp(sound->type, "soundchip") == 0)
+ for_each_child_of_node(sdev->ofdev.dev.of_node, sound) {
+ if (of_node_is_type(sound, "soundchip"))
break;
}
if (!sound)
diff --git a/sound/aoa/soundbus/core.c b/sound/aoa/soundbus/core.c
index 70bcaa7f93dd..065d3a55725e 100644
--- a/sound/aoa/soundbus/core.c
+++ b/sound/aoa/soundbus/core.c
@@ -74,11 +74,11 @@ static int soundbus_uevent(struct device *dev, struct kobj_uevent_env *env)
of = &soundbus_dev->ofdev;
/* stuff we want to pass to /sbin/hotplug */
- retval = add_uevent_var(env, "OF_NAME=%s", of->dev.of_node->name);
+ retval = add_uevent_var(env, "OF_NAME=%pOFn", of->dev.of_node);
if (retval)
return retval;
- retval = add_uevent_var(env, "OF_TYPE=%s", of->dev.of_node->type);
+ retval = add_uevent_var(env, "OF_TYPE=%s", of_node_get_device_type(of->dev.of_node));
if (retval)
return retval;
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index bd7c5029fc59..c3f57a3fb1a5 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -154,7 +154,7 @@ static int i2sbus_add_dev(struct macio_dev *macio,
struct device_node *np)
{
struct i2sbus_dev *dev;
- struct device_node *child = NULL, *sound = NULL;
+ struct device_node *child, *sound = NULL;
struct resource *r;
int i, layout = 0, rlen, ok = force;
char node_name[6];
@@ -177,8 +177,8 @@ static int i2sbus_add_dev(struct macio_dev *macio,
return 0;
i = 0;
- while ((child = of_get_next_child(np, child))) {
- if (strcmp(child->name, "sound") == 0) {
+ for_each_child_of_node(np, child) {
+ if (of_node_name_eq(child, "sound")) {
i++;
sound = child;
}
diff --git a/sound/aoa/soundbus/sysfs.c b/sound/aoa/soundbus/sysfs.c
index 81da020bddef..a2d55e15afbb 100644
--- a/sound/aoa/soundbus/sysfs.c
+++ b/sound/aoa/soundbus/sysfs.c
@@ -1,18 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/stat.h>
/* FIX UP */
#include "soundbus.h"
-#define soundbus_config_of_attr(field, format_string) \
-static ssize_t \
-field##_show (struct device *dev, struct device_attribute *attr, \
- char *buf) \
-{ \
- struct soundbus_dev *mdev = to_soundbus_device (dev); \
- return sprintf (buf, format_string, mdev->ofdev.dev.of_node->field); \
-}
-
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -25,17 +17,33 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
strcat(buf, "\n");
length = strlen(buf);
} else {
- length = sprintf(buf, "of:N%sT%s\n",
- of->dev.of_node->name, of->dev.of_node->type);
+ length = sprintf(buf, "of:N%pOFn%c%s\n",
+ of->dev.of_node, 'T',
+ of_node_get_device_type(of->dev.of_node));
}
return length;
}
static DEVICE_ATTR_RO(modalias);
-soundbus_config_of_attr (name, "%s\n");
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct soundbus_dev *sdev = to_soundbus_device(dev);
+ struct platform_device *of = &sdev->ofdev;
+
+ return sprintf(buf, "%pOFn\n", of->dev.of_node);
+}
static DEVICE_ATTR_RO(name);
-soundbus_config_of_attr (type, "%s\n");
+
+static ssize_t type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct soundbus_dev *sdev = to_soundbus_device(dev);
+ struct platform_device *of = &sdev->ofdev;
+
+ return sprintf(buf, "%s\n", of_node_get_device_type(of->dev.of_node));
+}
static DEVICE_ATTR_RO(type);
struct attribute *soundbus_dev_attrs[] = {
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 26b5e245b074..a5b09e75e787 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -171,7 +171,8 @@ static int snd_compr_free(struct inode *inode, struct file *f)
}
data->stream.ops->free(&data->stream);
- kfree(data->stream.runtime->buffer);
+ if (!data->stream.runtime->dma_buffer_p)
+ kfree(data->stream.runtime->buffer);
kfree(data->stream.runtime);
kfree(data);
return 0;
@@ -505,7 +506,7 @@ static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
struct snd_compr_params *params)
{
unsigned int buffer_size;
- void *buffer;
+ void *buffer = NULL;
buffer_size = params->buffer.fragment_size * params->buffer.fragments;
if (stream->ops->copy) {
@@ -514,7 +515,18 @@ static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
* the data from core
*/
} else {
- buffer = kmalloc(buffer_size, GFP_KERNEL);
+ if (stream->runtime->dma_buffer_p) {
+
+ if (buffer_size > stream->runtime->dma_buffer_p->bytes)
+ dev_err(&stream->device->dev,
+ "Not enough DMA buffer");
+ else
+ buffer = stream->runtime->dma_buffer_p->area;
+
+ } else {
+ buffer = kmalloc(buffer_size, GFP_KERNEL);
+ }
+
if (!buffer)
return -ENOMEM;
}
diff --git a/sound/core/control.c b/sound/core/control.c
index 9aa15bfc7936..fad7db402443 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -348,67 +348,100 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
return 0;
}
-/**
- * snd_ctl_add - add the control instance to the card
- * @card: the card instance
- * @kcontrol: the control instance to add
- *
- * Adds the control instance created via snd_ctl_new() or
- * snd_ctl_new1() to the given card. Assigns also an unique
- * numid used for fast search.
- *
- * It frees automatically the control which cannot be added.
- *
- * Return: Zero if successful, or a negative error code on failure.
- *
- */
-int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+enum snd_ctl_add_mode {
+ CTL_ADD_EXCLUSIVE, CTL_REPLACE, CTL_ADD_ON_REPLACE,
+};
+
+/* add/replace a new kcontrol object; call with card->controls_rwsem locked */
+static int __snd_ctl_add_replace(struct snd_card *card,
+ struct snd_kcontrol *kcontrol,
+ enum snd_ctl_add_mode mode)
{
struct snd_ctl_elem_id id;
unsigned int idx;
unsigned int count;
- int err = -EINVAL;
+ struct snd_kcontrol *old;
+ int err;
- if (! kcontrol)
- return err;
- if (snd_BUG_ON(!card || !kcontrol->info))
- goto error;
id = kcontrol->id;
if (id.index > UINT_MAX - kcontrol->count)
- goto error;
+ return -EINVAL;
- down_write(&card->controls_rwsem);
- if (snd_ctl_find_id(card, &id)) {
- up_write(&card->controls_rwsem);
- dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
- id.iface,
- id.device,
- id.subdevice,
- id.name,
- id.index);
- err = -EBUSY;
- goto error;
- }
- if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
- up_write(&card->controls_rwsem);
- err = -ENOMEM;
- goto error;
+ old = snd_ctl_find_id(card, &id);
+ if (!old) {
+ if (mode == CTL_REPLACE)
+ return -EINVAL;
+ } else {
+ if (mode == CTL_ADD_EXCLUSIVE) {
+ dev_err(card->dev,
+ "control %i:%i:%i:%s:%i is already present\n",
+ id.iface, id.device, id.subdevice, id.name,
+ id.index);
+ return -EBUSY;
+ }
+
+ err = snd_ctl_remove(card, old);
+ if (err < 0)
+ return err;
}
+
+ if (snd_ctl_find_hole(card, kcontrol->count) < 0)
+ return -ENOMEM;
+
list_add_tail(&kcontrol->list, &card->controls);
card->controls_count += kcontrol->count;
kcontrol->id.numid = card->last_numid + 1;
card->last_numid += kcontrol->count;
+
id = kcontrol->id;
count = kcontrol->count;
- up_write(&card->controls_rwsem);
for (idx = 0; idx < count; idx++, id.index++, id.numid++)
snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+
+ return 0;
+}
+
+static int snd_ctl_add_replace(struct snd_card *card,
+ struct snd_kcontrol *kcontrol,
+ enum snd_ctl_add_mode mode)
+{
+ int err = -EINVAL;
+
+ if (! kcontrol)
+ return err;
+ if (snd_BUG_ON(!card || !kcontrol->info))
+ goto error;
+
+ down_write(&card->controls_rwsem);
+ err = __snd_ctl_add_replace(card, kcontrol, mode);
+ up_write(&card->controls_rwsem);
+ if (err < 0)
+ goto error;
return 0;
error:
snd_ctl_free_one(kcontrol);
return err;
}
+
+/**
+ * snd_ctl_add - add the control instance to the card
+ * @card: the card instance
+ * @kcontrol: the control instance to add
+ *
+ * Adds the control instance created via snd_ctl_new() or
+ * snd_ctl_new1() to the given card. Assigns also an unique
+ * numid used for fast search.
+ *
+ * It frees automatically the control which cannot be added.
+ *
+ * Return: Zero if successful, or a negative error code on failure.
+ *
+ */
+int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+{
+ return snd_ctl_add_replace(card, kcontrol, CTL_ADD_EXCLUSIVE);
+}
EXPORT_SYMBOL(snd_ctl_add);
/**
@@ -428,53 +461,8 @@ EXPORT_SYMBOL(snd_ctl_add);
int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
bool add_on_replace)
{
- struct snd_ctl_elem_id id;
- unsigned int count;
- unsigned int idx;
- struct snd_kcontrol *old;
- int ret;
-
- if (!kcontrol)
- return -EINVAL;
- if (snd_BUG_ON(!card || !kcontrol->info)) {
- ret = -EINVAL;
- goto error;
- }
- id = kcontrol->id;
- down_write(&card->controls_rwsem);
- old = snd_ctl_find_id(card, &id);
- if (!old) {
- if (add_on_replace)
- goto add;
- up_write(&card->controls_rwsem);
- ret = -EINVAL;
- goto error;
- }
- ret = snd_ctl_remove(card, old);
- if (ret < 0) {
- up_write(&card->controls_rwsem);
- goto error;
- }
-add:
- if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
- up_write(&card->controls_rwsem);
- ret = -ENOMEM;
- goto error;
- }
- list_add_tail(&kcontrol->list, &card->controls);
- card->controls_count += kcontrol->count;
- kcontrol->id.numid = card->last_numid + 1;
- card->last_numid += kcontrol->count;
- id = kcontrol->id;
- count = kcontrol->count;
- up_write(&card->controls_rwsem);
- for (idx = 0; idx < count; idx++, id.index++, id.numid++)
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
- return 0;
-
-error:
- snd_ctl_free_one(kcontrol);
- return ret;
+ return snd_ctl_add_replace(card, kcontrol,
+ add_on_replace ? CTL_ADD_ON_REPLACE : CTL_REPLACE);
}
EXPORT_SYMBOL(snd_ctl_replace);
@@ -1361,9 +1349,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
kctl->tlv.c = snd_ctl_elem_user_tlv;
/* This function manage to free the instance on failure. */
- err = snd_ctl_add(card, kctl);
- if (err < 0)
- return err;
+ down_write(&card->controls_rwsem);
+ err = __snd_ctl_add_replace(card, kctl, CTL_ADD_EXCLUSIVE);
+ if (err < 0) {
+ snd_ctl_free_one(kctl);
+ goto unlock;
+ }
offset = snd_ctl_get_ioff(kctl, &info->id);
snd_ctl_build_ioff(&info->id, kctl, offset);
/*
@@ -1374,10 +1365,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
* which locks the element.
*/
- down_write(&card->controls_rwsem);
card->user_ctl_count++;
- up_write(&card->controls_rwsem);
+ unlock:
+ up_write(&card->controls_rwsem);
return 0;
}
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index fdb9b92fc8d6..01b9d62eef14 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -25,6 +25,7 @@
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/pcm.h>
@@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
return -EFAULT;
if (stream < 0 || stream > 1)
return -EINVAL;
+ stream = array_index_nospec(stream, 2);
if (get_user(subdevice, &info->subdevice))
return -EFAULT;
mutex_lock(&register_mutex);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 66c90f486af9..818dff1de545 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -36,6 +36,7 @@
#include <sound/timer.h>
#include <sound/minors.h>
#include <linux/uio.h>
+#include <linux/delay.h>
#include "pcm_local.h"
@@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
* and this may lead to a deadlock when the code path takes read sem
* twice (e.g. one in snd_pcm_action_nonatomic() and another in
* snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
- * spin until it gets the lock.
+ * sleep until all the readers are completed without blocking by writer.
*/
-static inline void down_write_nonblock(struct rw_semaphore *lock)
+static inline void down_write_nonfifo(struct rw_semaphore *lock)
{
while (!down_write_trylock(lock))
- cond_resched();
+ msleep(1);
}
#define PCM_LOCK_DEFAULT 0
@@ -1967,7 +1968,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
res = -ENOMEM;
goto _nolock;
}
- down_write_nonblock(&snd_pcm_link_rwsem);
+ down_write_nonfifo(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
@@ -2014,7 +2015,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
struct snd_pcm_substream *s;
int res = 0;
- down_write_nonblock(&snd_pcm_link_rwsem);
+ down_write_nonfifo(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
@@ -2369,7 +2370,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
static void pcm_release_private(struct snd_pcm_substream *substream)
{
- snd_pcm_unlink(substream);
+ if (snd_pcm_stream_linked(substream))
+ snd_pcm_unlink(substream);
}
void snd_pcm_release_substream(struct snd_pcm_substream *substream)
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 8a146b039276..052e00590259 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -41,6 +41,7 @@ config SND_OXFW
* Mackie(Loud) U.420/U.420d
* TASCAM FireOne
* Stanton Controllers & Systems 1 Deck/Mixer
+ * APOGEE duet FireWire
To compile this driver as a module, choose M here: the module
will be called snd-oxfw.
@@ -161,5 +162,6 @@ config SND_FIREFACE
help
Say Y here to include support for RME fireface series.
* Fireface 400
+ * Fireface 800
endif # SND_FIREWIRE
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index 54cdd4ffa9ce..ac20acf48fc6 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
__entry->index = index;
),
TP_printk(
- "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
+ "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
__entry->second,
__entry->cycle,
__entry->src,
@@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
__entry->dest = fw_parent_device(s->unit)->node_id;
__entry->payload_quadlets = payload_length / 4;
__entry->data_blocks = data_blocks,
- __entry->data_blocks = s->data_block_counter,
+ __entry->data_block_counter = s->data_block_counter,
__entry->packet_index = s->packet_index;
__entry->irq = !!in_interrupt();
__entry->index = index;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9be76c808fcc..3ada55ed5381 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -654,15 +654,17 @@ end:
}
static int handle_in_packet_without_header(struct amdtp_stream *s,
- unsigned int payload_quadlets, unsigned int cycle,
+ unsigned int payload_length, unsigned int cycle,
unsigned int index)
{
__be32 *buffer;
+ unsigned int payload_quadlets;
unsigned int data_blocks;
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
buffer = s->buffer.packets[s->packet_index].buffer;
+ payload_quadlets = payload_length / 4;
data_blocks = payload_quadlets / s->data_block_quadlets;
trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 672d13488454..d91874275d2c 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -408,7 +408,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
/* Apogee Electronics, Ensemble */
- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
+ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
/* ESI, Quatafire610 */
SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
/* AcousticReality, eARMasterOne */
diff --git a/sound/firewire/fireface/Makefile b/sound/firewire/fireface/Makefile
index 8f807284ba54..79a7d6d99d72 100644
--- a/sound/firewire/fireface/Makefile
+++ b/sound/firewire/fireface/Makefile
@@ -1,3 +1,4 @@
snd-fireface-objs := ff.o ff-transaction.o ff-midi.o ff-proc.o amdtp-ff.o \
- ff-stream.o ff-pcm.o ff-hwdep.o ff-protocol-ff400.o
+ ff-stream.o ff-pcm.o ff-hwdep.o ff-protocol-ff400.o \
+ ff-protocol-ff800.o
obj-$(CONFIG_SND_FIREFACE) += snd-fireface.o
diff --git a/sound/firewire/fireface/ff-pcm.c b/sound/firewire/fireface/ff-pcm.c
index bf47f9ec8703..d0bc96b20a65 100644
--- a/sound/firewire/fireface/ff-pcm.c
+++ b/sound/firewire/fireface/ff-pcm.c
@@ -8,11 +8,6 @@
#include "ff.h"
-static inline unsigned int get_multiplier_mode_with_index(unsigned int index)
-{
- return ((int)index - 1) / 2;
-}
-
static int hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_pcm_hw_rule *rule)
{
@@ -24,10 +19,16 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
struct snd_interval t = {
.min = UINT_MAX, .max = 0, .integer = 1
};
- unsigned int i, mode;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); i++) {
- mode = get_multiplier_mode_with_index(i);
+ enum snd_ff_stream_mode mode;
+ int err;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
+ if (err < 0)
+ continue;
+
if (!snd_interval_test(c, pcm_channels[mode]))
continue;
@@ -49,10 +50,16 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
struct snd_interval t = {
.min = UINT_MAX, .max = 0, .integer = 1
};
- unsigned int i, mode;
+ unsigned int i;
for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); i++) {
- mode = get_multiplier_mode_with_index(i);
+ enum snd_ff_stream_mode mode;
+ int err;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
+ if (err < 0)
+ continue;
+
if (!snd_interval_test(r, amdtp_rate_table[i]))
continue;
@@ -66,7 +73,6 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
static void limit_channels_and_rates(struct snd_pcm_hardware *hw,
const unsigned int *pcm_channels)
{
- unsigned int mode;
unsigned int rate, channels;
int i;
@@ -76,7 +82,12 @@ static void limit_channels_and_rates(struct snd_pcm_hardware *hw,
hw->rate_max = 0;
for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); i++) {
- mode = get_multiplier_mode_with_index(i);
+ enum snd_ff_stream_mode mode;
+ int err;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
+ if (err < 0)
+ continue;
channels = pcm_channels[mode];
if (pcm_channels[mode] == 0)
@@ -141,7 +152,7 @@ static int pcm_open(struct snd_pcm_substream *substream)
if (err < 0)
goto release_lock;
- err = ff->spec->protocol->get_clock(ff, &rate, &src);
+ err = snd_ff_transaction_get_clock(ff, &rate, &src);
if (err < 0)
goto release_lock;
diff --git a/sound/firewire/fireface/ff-proc.c b/sound/firewire/fireface/ff-proc.c
index 40ccbfd8ef89..a0c550dabe9a 100644
--- a/sound/firewire/fireface/ff-proc.c
+++ b/sound/firewire/fireface/ff-proc.c
@@ -12,16 +12,205 @@ static void proc_dump_clock_config(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ff *ff = entry->private_data;
+ __le32 reg;
+ u32 data;
+ unsigned int rate;
+ const char *src;
+ int err;
- ff->spec->protocol->dump_clock_config(ff, buffer);
+ err = snd_fw_transaction(ff->unit, TCODE_READ_BLOCK_REQUEST,
+ SND_FF_REG_CLOCK_CONFIG, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return;
+
+ data = le32_to_cpu(reg);
+
+ snd_iprintf(buffer, "Output S/PDIF format: %s (Emphasis: %s)\n",
+ (data & 0x20) ? "Professional" : "Consumer",
+ (data & 0x40) ? "on" : "off");
+
+ snd_iprintf(buffer, "Optical output interface format: %s\n",
+ ((data >> 8) & 0x01) ? "S/PDIF" : "ADAT");
+
+ snd_iprintf(buffer, "Word output single speed: %s\n",
+ ((data >> 8) & 0x20) ? "on" : "off");
+
+ snd_iprintf(buffer, "S/PDIF input interface: %s\n",
+ ((data >> 8) & 0x02) ? "Optical" : "Coaxial");
+
+ switch ((data >> 1) & 0x03) {
+ case 0x01:
+ rate = 32000;
+ break;
+ case 0x00:
+ rate = 44100;
+ break;
+ case 0x03:
+ rate = 48000;
+ break;
+ case 0x02:
+ default:
+ return;
+ }
+
+ if (data & 0x08)
+ rate *= 2;
+ else if (data & 0x10)
+ rate *= 4;
+
+ snd_iprintf(buffer, "Sampling rate: %d\n", rate);
+
+ if (data & 0x01) {
+ src = "Internal";
+ } else {
+ switch ((data >> 10) & 0x07) {
+ case 0x00:
+ src = "ADAT1";
+ break;
+ case 0x01:
+ src = "ADAT2";
+ break;
+ case 0x03:
+ src = "S/PDIF";
+ break;
+ case 0x04:
+ src = "Word";
+ break;
+ case 0x05:
+ src = "LTC";
+ break;
+ default:
+ return;
+ }
+ }
+
+ snd_iprintf(buffer, "Sync to clock source: %s\n", src);
}
static void proc_dump_sync_status(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_ff *ff = entry->private_data;
+ __le32 reg;
+ u32 data;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ SND_FF_REG_SYNC_STATUS, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return;
+
+ data = le32_to_cpu(reg);
+
+ snd_iprintf(buffer, "External source detection:\n");
+
+ snd_iprintf(buffer, "Word Clock:");
+ if ((data >> 24) & 0x20) {
+ if ((data >> 24) & 0x40)
+ snd_iprintf(buffer, "sync\n");
+ else
+ snd_iprintf(buffer, "lock\n");
+ } else {
+ snd_iprintf(buffer, "none\n");
+ }
+
+ snd_iprintf(buffer, "S/PDIF:");
+ if ((data >> 16) & 0x10) {
+ if ((data >> 16) & 0x04)
+ snd_iprintf(buffer, "sync\n");
+ else
+ snd_iprintf(buffer, "lock\n");
+ } else {
+ snd_iprintf(buffer, "none\n");
+ }
+
+ snd_iprintf(buffer, "ADAT1:");
+ if ((data >> 8) & 0x04) {
+ if ((data >> 8) & 0x10)
+ snd_iprintf(buffer, "sync\n");
+ else
+ snd_iprintf(buffer, "lock\n");
+ } else {
+ snd_iprintf(buffer, "none\n");
+ }
+
+ snd_iprintf(buffer, "ADAT2:");
+ if ((data >> 8) & 0x08) {
+ if ((data >> 8) & 0x20)
+ snd_iprintf(buffer, "sync\n");
+ else
+ snd_iprintf(buffer, "lock\n");
+ } else {
+ snd_iprintf(buffer, "none\n");
+ }
+
+ snd_iprintf(buffer, "\nUsed external source:\n");
+
+ if (((data >> 22) & 0x07) == 0x07) {
+ snd_iprintf(buffer, "None\n");
+ } else {
+ switch ((data >> 22) & 0x07) {
+ case 0x00:
+ snd_iprintf(buffer, "ADAT1:");
+ break;
+ case 0x01:
+ snd_iprintf(buffer, "ADAT2:");
+ break;
+ case 0x03:
+ snd_iprintf(buffer, "S/PDIF:");
+ break;
+ case 0x04:
+ snd_iprintf(buffer, "Word:");
+ break;
+ case 0x07:
+ snd_iprintf(buffer, "Nothing:");
+ break;
+ case 0x02:
+ case 0x05:
+ case 0x06:
+ default:
+ snd_iprintf(buffer, "unknown:");
+ break;
+ }
+
+ if ((data >> 25) & 0x07) {
+ switch ((data >> 25) & 0x07) {
+ case 0x01:
+ snd_iprintf(buffer, "32000\n");
+ break;
+ case 0x02:
+ snd_iprintf(buffer, "44100\n");
+ break;
+ case 0x03:
+ snd_iprintf(buffer, "48000\n");
+ break;
+ case 0x04:
+ snd_iprintf(buffer, "64000\n");
+ break;
+ case 0x05:
+ snd_iprintf(buffer, "88200\n");
+ break;
+ case 0x06:
+ snd_iprintf(buffer, "96000\n");
+ break;
+ case 0x07:
+ snd_iprintf(buffer, "128000\n");
+ break;
+ case 0x08:
+ snd_iprintf(buffer, "176400\n");
+ break;
+ case 0x09:
+ snd_iprintf(buffer, "192000\n");
+ break;
+ case 0x00:
+ snd_iprintf(buffer, "unknown\n");
+ break;
+ }
+ }
+ }
- ff->spec->protocol->dump_sync_status(ff, buffer);
+ snd_iprintf(buffer, "Multiplied:");
+ snd_iprintf(buffer, "%d\n", (data & 0x3ff) * 250);
}
static void add_node(struct snd_ff *ff, struct snd_info_entry *root,
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index 64c3cb0fb926..2280fab9b3c7 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -14,85 +14,60 @@
#define FF400_ISOC_COMM_START 0x000080100508ull
#define FF400_TX_PACKET_FORMAT 0x00008010050cull
#define FF400_ISOC_COMM_STOP 0x000080100510ull
-#define FF400_SYNC_STATUS 0x0000801c0000ull
-#define FF400_FETCH_PCM_FRAMES 0x0000801c0000ull /* For block request. */
-#define FF400_CLOCK_CONFIG 0x0000801c0004ull
-#define FF400_MIDI_HIGH_ADDR 0x0000801003f4ull
-#define FF400_MIDI_RX_PORT_0 0x000080180000ull
-#define FF400_MIDI_RX_PORT_1 0x000080190000ull
-
-static int ff400_get_clock(struct snd_ff *ff, unsigned int *rate,
- enum snd_ff_clock_src *src)
+/*
+ * Fireface 400 manages isochronous channel number in 3 bit field. Therefore,
+ * we can allocate between 0 and 7 channel.
+ */
+static int keep_resources(struct snd_ff *ff, unsigned int rate)
{
- __le32 reg;
- u32 data;
+ enum snd_ff_stream_mode mode;
+ int i;
int err;
- err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
- FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
+ // Check whether the given value is supported or not.
+ for (i = 0; i < CIP_SFC_COUNT; i++) {
+ if (amdtp_rate_table[i] == rate)
+ break;
+ }
+ if (i >= CIP_SFC_COUNT)
+ return -EINVAL;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
if (err < 0)
return err;
- data = le32_to_cpu(reg);
- /* Calculate sampling rate. */
- switch ((data >> 1) & 0x03) {
- case 0x01:
- *rate = 32000;
- break;
- case 0x00:
- *rate = 44100;
- break;
- case 0x03:
- *rate = 48000;
- break;
- case 0x02:
- default:
- return -EIO;
- }
-
- if (data & 0x08)
- *rate *= 2;
- else if (data & 0x10)
- *rate *= 4;
+ /* Keep resources for in-stream. */
+ ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
+ err = fw_iso_resources_allocate(&ff->tx_resources,
+ amdtp_stream_get_max_payload(&ff->tx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ return err;
- /* Calculate source of clock. */
- if (data & 0x01) {
- *src = SND_FF_CLOCK_SRC_INTERNAL;
- } else {
- /* TODO: 0x00, 0x01, 0x02, 0x06, 0x07? */
- switch ((data >> 10) & 0x07) {
- case 0x03:
- *src = SND_FF_CLOCK_SRC_SPDIF;
- break;
- case 0x04:
- *src = SND_FF_CLOCK_SRC_WORD;
- break;
- case 0x05:
- *src = SND_FF_CLOCK_SRC_LTC;
- break;
- case 0x00:
- default:
- *src = SND_FF_CLOCK_SRC_ADAT;
- break;
- }
- }
+ /* Keep resources for out-stream. */
+ err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
+ ff->spec->pcm_playback_channels[mode]);
+ if (err < 0)
+ return err;
+ ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
+ err = fw_iso_resources_allocate(&ff->rx_resources,
+ amdtp_stream_get_max_payload(&ff->rx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ fw_iso_resources_free(&ff->tx_resources);
- return 0;
+ return err;
}
static int ff400_begin_session(struct snd_ff *ff, unsigned int rate)
{
__le32 reg;
- int i, err;
+ int err;
- /* Check whether the given value is supported or not. */
- for (i = 0; i < CIP_SFC_COUNT; i++) {
- if (amdtp_rate_table[i] == rate)
- break;
- }
- if (i == CIP_SFC_COUNT)
- return -EINVAL;
+ err = keep_resources(ff, rate);
+ if (err < 0)
+ return err;
/* Set the number of data blocks transferred in a second. */
reg = cpu_to_le32(rate);
@@ -142,233 +117,45 @@ static void ff400_finish_session(struct snd_ff *ff)
FF400_ISOC_COMM_STOP, &reg, sizeof(reg), 0);
}
-static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
+static void ff400_handle_midi_msg(struct snd_ff *ff, __le32 *buf, size_t length)
{
- __le32 *reg;
int i;
- int err;
- reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
- if (reg == NULL)
- return -ENOMEM;
+ for (i = 0; i < length / 4; i++) {
+ u32 quad = le32_to_cpu(buf[i]);
+ u8 byte;
+ unsigned int index;
+ struct snd_rawmidi_substream *substream;
- if (enable) {
+ /* Message in first port. */
/*
- * Each quadlet is corresponding to data channels in a data
- * blocks in reverse order. Precisely, quadlets for available
- * data channels should be enabled. Here, I take second best
- * to fetch PCM frames from all of data channels regardless of
- * stf.
+ * This value may represent the index of this unit when the same
+ * units are on the same IEEE 1394 bus. This driver doesn't use
+ * it.
*/
- for (i = 0; i < 18; ++i)
- reg[i] = cpu_to_le32(0x00000001);
- }
-
- err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
- FF400_FETCH_PCM_FRAMES, reg,
- sizeof(__le32) * 18, 0);
- kfree(reg);
- return err;
-}
-
-static void ff400_dump_sync_status(struct snd_ff *ff,
- struct snd_info_buffer *buffer)
-{
- __le32 reg;
- u32 data;
- int err;
-
- err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
- FF400_SYNC_STATUS, &reg, sizeof(reg), 0);
- if (err < 0)
- return;
-
- data = le32_to_cpu(reg);
-
- snd_iprintf(buffer, "External source detection:\n");
-
- snd_iprintf(buffer, "Word Clock:");
- if ((data >> 24) & 0x20) {
- if ((data >> 24) & 0x40)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "S/PDIF:");
- if ((data >> 16) & 0x10) {
- if ((data >> 16) & 0x04)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "ADAT:");
- if ((data >> 8) & 0x04) {
- if ((data >> 8) & 0x10)
- snd_iprintf(buffer, "sync\n");
- else
- snd_iprintf(buffer, "lock\n");
- } else {
- snd_iprintf(buffer, "none\n");
- }
-
- snd_iprintf(buffer, "\nUsed external source:\n");
-
- if (((data >> 22) & 0x07) == 0x07) {
- snd_iprintf(buffer, "None\n");
- } else {
- switch ((data >> 22) & 0x07) {
- case 0x00:
- snd_iprintf(buffer, "ADAT:");
- break;
- case 0x03:
- snd_iprintf(buffer, "S/PDIF:");
- break;
- case 0x04:
- snd_iprintf(buffer, "Word:");
- break;
- case 0x07:
- snd_iprintf(buffer, "Nothing:");
- break;
- case 0x01:
- case 0x02:
- case 0x05:
- case 0x06:
- default:
- snd_iprintf(buffer, "unknown:");
- break;
- }
-
- if ((data >> 25) & 0x07) {
- switch ((data >> 25) & 0x07) {
- case 0x01:
- snd_iprintf(buffer, "32000\n");
- break;
- case 0x02:
- snd_iprintf(buffer, "44100\n");
- break;
- case 0x03:
- snd_iprintf(buffer, "48000\n");
- break;
- case 0x04:
- snd_iprintf(buffer, "64000\n");
- break;
- case 0x05:
- snd_iprintf(buffer, "88200\n");
- break;
- case 0x06:
- snd_iprintf(buffer, "96000\n");
- break;
- case 0x07:
- snd_iprintf(buffer, "128000\n");
- break;
- case 0x08:
- snd_iprintf(buffer, "176400\n");
- break;
- case 0x09:
- snd_iprintf(buffer, "192000\n");
- break;
- case 0x00:
- snd_iprintf(buffer, "unknown\n");
- break;
+ index = (quad >> 8) & 0xff;
+ if (index > 0) {
+ substream = READ_ONCE(ff->tx_midi_substreams[0]);
+ if (substream != NULL) {
+ byte = quad & 0xff;
+ snd_rawmidi_receive(substream, &byte, 1);
}
}
- }
-
- snd_iprintf(buffer, "Multiplied:");
- snd_iprintf(buffer, "%d\n", (data & 0x3ff) * 250);
-}
-static void ff400_dump_clock_config(struct snd_ff *ff,
- struct snd_info_buffer *buffer)
-{
- __le32 reg;
- u32 data;
- unsigned int rate;
- const char *src;
- int err;
-
- err = snd_fw_transaction(ff->unit, TCODE_READ_BLOCK_REQUEST,
- FF400_CLOCK_CONFIG, &reg, sizeof(reg), 0);
- if (err < 0)
- return;
-
- data = le32_to_cpu(reg);
-
- snd_iprintf(buffer, "Output S/PDIF format: %s (Emphasis: %s)\n",
- (data & 0x20) ? "Professional" : "Consumer",
- (data & 0x40) ? "on" : "off");
-
- snd_iprintf(buffer, "Optical output interface format: %s\n",
- ((data >> 8) & 0x01) ? "S/PDIF" : "ADAT");
-
- snd_iprintf(buffer, "Word output single speed: %s\n",
- ((data >> 8) & 0x20) ? "on" : "off");
-
- snd_iprintf(buffer, "S/PDIF input interface: %s\n",
- ((data >> 8) & 0x02) ? "Optical" : "Coaxial");
-
- switch ((data >> 1) & 0x03) {
- case 0x01:
- rate = 32000;
- break;
- case 0x00:
- rate = 44100;
- break;
- case 0x03:
- rate = 48000;
- break;
- case 0x02:
- default:
- return;
- }
-
- if (data & 0x08)
- rate *= 2;
- else if (data & 0x10)
- rate *= 4;
-
- snd_iprintf(buffer, "Sampling rate: %d\n", rate);
-
- if (data & 0x01) {
- src = "Internal";
- } else {
- switch ((data >> 10) & 0x07) {
- case 0x00:
- src = "ADAT";
- break;
- case 0x03:
- src = "S/PDIF";
- break;
- case 0x04:
- src = "Word";
- break;
- case 0x05:
- src = "LTC";
- break;
- default:
- return;
+ /* Message in second port. */
+ index = (quad >> 24) & 0xff;
+ if (index > 0) {
+ substream = READ_ONCE(ff->tx_midi_substreams[1]);
+ if (substream != NULL) {
+ byte = (quad >> 16) & 0xff;
+ snd_rawmidi_receive(substream, &byte, 1);
+ }
}
}
-
- snd_iprintf(buffer, "Sync to clock source: %s\n", src);
}
const struct snd_ff_protocol snd_ff_protocol_ff400 = {
- .get_clock = ff400_get_clock,
+ .handle_midi_msg = ff400_handle_midi_msg,
.begin_session = ff400_begin_session,
.finish_session = ff400_finish_session,
- .switch_fetching_mode = ff400_switch_fetching_mode,
-
- .dump_sync_status = ff400_dump_sync_status,
- .dump_clock_config = ff400_dump_clock_config,
-
- .midi_high_addr_reg = FF400_MIDI_HIGH_ADDR,
- .midi_rx_port_0_reg = FF400_MIDI_RX_PORT_0,
- .midi_rx_port_1_reg = FF400_MIDI_RX_PORT_1,
};
diff --git a/sound/firewire/fireface/ff-protocol-ff800.c b/sound/firewire/fireface/ff-protocol-ff800.c
new file mode 100644
index 000000000000..2acbf6039770
--- /dev/null
+++ b/sound/firewire/fireface/ff-protocol-ff800.c
@@ -0,0 +1,143 @@
+/*
+ * ff-protocol-ff800.c - a part of driver for RME Fireface series
+ *
+ * Copyright (c) 2018 Takashi Sakamoto
+ *
+ * Licensed under the terms of the GNU General Public License, version 2.
+ */
+
+#include <linux/delay.h>
+
+#include "ff.h"
+
+#define FF800_STF 0x0000fc88f000
+#define FF800_RX_PACKET_FORMAT 0x0000fc88f004
+#define FF800_ALLOC_TX_STREAM 0x0000fc88f008
+#define FF800_ISOC_COMM_START 0x0000fc88f00c
+#define FF800_TX_S800_FLAG 0x00000800
+#define FF800_ISOC_COMM_STOP 0x0000fc88f010
+
+#define FF800_TX_PACKET_ISOC_CH 0x0000801c0008
+
+static int allocate_rx_resources(struct snd_ff *ff)
+{
+ u32 data;
+ __le32 reg;
+ int err;
+
+ // Controllers should allocate isochronous resources for rx stream.
+ err = fw_iso_resources_allocate(&ff->rx_resources,
+ amdtp_stream_get_max_payload(&ff->rx_stream),
+ fw_parent_device(ff->unit)->max_speed);
+ if (err < 0)
+ return err;
+
+ // Set isochronous channel and the number of quadlets of rx packets.
+ data = ff->rx_stream.data_block_quadlets << 3;
+ data = (data << 8) | ff->rx_resources.channel;
+ reg = cpu_to_le32(data);
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_RX_PACKET_FORMAT, &reg, sizeof(reg), 0);
+}
+
+static int allocate_tx_resources(struct snd_ff *ff)
+{
+ __le32 reg;
+ unsigned int count;
+ unsigned int tx_isoc_channel;
+ int err;
+
+ reg = cpu_to_le32(ff->tx_stream.data_block_quadlets);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_ALLOC_TX_STREAM, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ // Wait till the format of tx packet is available.
+ count = 0;
+ while (count++ < 10) {
+ u32 data;
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ FF800_TX_PACKET_ISOC_CH, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ data = le32_to_cpu(reg);
+ if (data != 0xffffffff) {
+ tx_isoc_channel = data;
+ break;
+ }
+
+ msleep(50);
+ }
+ if (count >= 10)
+ return -ETIMEDOUT;
+
+ // NOTE: this is a makeshift to start OHCI 1394 IR context in the
+ // channel. On the other hand, 'struct fw_iso_resources.allocated' is
+ // not true and it's not deallocated at stop.
+ ff->tx_resources.channel = tx_isoc_channel;
+
+ return 0;
+}
+
+static int ff800_begin_session(struct snd_ff *ff, unsigned int rate)
+{
+ __le32 reg;
+ int err;
+
+ reg = cpu_to_le32(rate);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_STF, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+
+ // If starting isochronous communication immediately, change of STF has
+ // no effect. In this case, the communication runs based on former STF.
+ // Let's sleep for a bit.
+ msleep(100);
+
+ err = allocate_rx_resources(ff);
+ if (err < 0)
+ return err;
+
+ err = allocate_tx_resources(ff);
+ if (err < 0)
+ return err;
+
+ reg = cpu_to_le32(0x80000000);
+ reg |= cpu_to_le32(ff->tx_stream.data_block_quadlets);
+ if (fw_parent_device(ff->unit)->max_speed == SCODE_800)
+ reg |= cpu_to_le32(FF800_TX_S800_FLAG);
+ return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_ISOC_COMM_START, &reg, sizeof(reg), 0);
+}
+
+static void ff800_finish_session(struct snd_ff *ff)
+{
+ __le32 reg;
+
+ reg = cpu_to_le32(0x80000000);
+ snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
+ FF800_ISOC_COMM_STOP, &reg, sizeof(reg), 0);
+}
+
+static void ff800_handle_midi_msg(struct snd_ff *ff, __le32 *buf, size_t length)
+{
+ int i;
+
+ for (i = 0; i < length / 4; i++) {
+ u8 byte = le32_to_cpu(buf[i]) & 0xff;
+ struct snd_rawmidi_substream *substream;
+
+ substream = READ_ONCE(ff->tx_midi_substreams[0]);
+ if (substream)
+ snd_rawmidi_receive(substream, &byte, 1);
+ }
+}
+
+const struct snd_ff_protocol snd_ff_protocol_ff800 = {
+ .handle_midi_msg = ff800_handle_midi_msg,
+ .begin_session = ff800_begin_session,
+ .finish_session = ff800_finish_session,
+};
diff --git a/sound/firewire/fireface/ff-stream.c b/sound/firewire/fireface/ff-stream.c
index 78880922120e..a490e4553721 100644
--- a/sound/firewire/fireface/ff-stream.c
+++ b/sound/firewire/fireface/ff-stream.c
@@ -10,73 +10,71 @@
#define CALLBACK_TIMEOUT_MS 200
-static int get_rate_mode(unsigned int rate, unsigned int *mode)
+int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
+ enum snd_ff_stream_mode *mode)
{
- int i;
-
- for (i = 0; i < CIP_SFC_COUNT; i++) {
- if (amdtp_rate_table[i] == rate)
- break;
- }
-
- if (i == CIP_SFC_COUNT)
+ static const enum snd_ff_stream_mode modes[] = {
+ [CIP_SFC_32000] = SND_FF_STREAM_MODE_LOW,
+ [CIP_SFC_44100] = SND_FF_STREAM_MODE_LOW,
+ [CIP_SFC_48000] = SND_FF_STREAM_MODE_LOW,
+ [CIP_SFC_88200] = SND_FF_STREAM_MODE_MID,
+ [CIP_SFC_96000] = SND_FF_STREAM_MODE_MID,
+ [CIP_SFC_176400] = SND_FF_STREAM_MODE_HIGH,
+ [CIP_SFC_192000] = SND_FF_STREAM_MODE_HIGH,
+ };
+
+ if (sfc >= CIP_SFC_COUNT)
return -EINVAL;
- *mode = ((int)i - 1) / 2;
+ *mode = modes[sfc];
return 0;
}
-/*
- * Fireface 400 manages isochronous channel number in 3 bit field. Therefore,
- * we can allocate between 0 and 7 channel.
- */
-static int keep_resources(struct snd_ff *ff, unsigned int rate)
+static void release_resources(struct snd_ff *ff)
{
- int mode;
- int err;
-
- err = get_rate_mode(rate, &mode);
- if (err < 0)
- return err;
+ fw_iso_resources_free(&ff->tx_resources);
+ fw_iso_resources_free(&ff->rx_resources);
+}
- /* Keep resources for in-stream. */
- err = amdtp_ff_set_parameters(&ff->tx_stream, rate,
- ff->spec->pcm_capture_channels[mode]);
- if (err < 0)
- return err;
- ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
- err = fw_iso_resources_allocate(&ff->tx_resources,
- amdtp_stream_get_max_payload(&ff->tx_stream),
- fw_parent_device(ff->unit)->max_speed);
- if (err < 0)
- return err;
+static int switch_fetching_mode(struct snd_ff *ff, bool enable)
+{
+ unsigned int count;
+ __le32 *reg;
+ int i;
+ int err;
- /* Keep resources for out-stream. */
- err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
- ff->spec->pcm_playback_channels[mode]);
- if (err < 0)
- return err;
- ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
- err = fw_iso_resources_allocate(&ff->rx_resources,
- amdtp_stream_get_max_payload(&ff->rx_stream),
- fw_parent_device(ff->unit)->max_speed);
- if (err < 0)
- fw_iso_resources_free(&ff->tx_resources);
+ count = 0;
+ for (i = 0; i < SND_FF_STREAM_MODE_COUNT; ++i)
+ count = max(count, ff->spec->pcm_playback_channels[i]);
+
+ reg = kcalloc(count, sizeof(__le32), GFP_KERNEL);
+ if (!reg)
+ return -ENOMEM;
+
+ if (!enable) {
+ /*
+ * Each quadlet is corresponding to data channels in a data
+ * blocks in reverse order. Precisely, quadlets for available
+ * data channels should be enabled. Here, I take second best
+ * to fetch PCM frames from all of data channels regardless of
+ * stf.
+ */
+ for (i = 0; i < count; ++i)
+ reg[i] = cpu_to_le32(0x00000001);
+ }
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+ SND_FF_REG_FETCH_PCM_FRAMES, reg,
+ sizeof(__le32) * count, 0);
+ kfree(reg);
return err;
}
-static void release_resources(struct snd_ff *ff)
-{
- fw_iso_resources_free(&ff->tx_resources);
- fw_iso_resources_free(&ff->rx_resources);
-}
-
static inline void finish_session(struct snd_ff *ff)
{
ff->spec->protocol->finish_session(ff);
- ff->spec->protocol->switch_fetching_mode(ff, false);
+ switch_fetching_mode(ff, false);
}
static int init_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
@@ -149,7 +147,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
if (ff->substreams_counter == 0)
return 0;
- err = ff->spec->protocol->get_clock(ff, &curr_rate, &src);
+ err = snd_ff_transaction_get_clock(ff, &curr_rate, &src);
if (err < 0)
return err;
if (curr_rate != rate ||
@@ -168,9 +166,29 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
* packets. Then, the device transfers packets.
*/
if (!amdtp_stream_running(&ff->rx_stream)) {
- err = keep_resources(ff, rate);
+ enum snd_ff_stream_mode mode;
+ int i;
+
+ for (i = 0; i < CIP_SFC_COUNT; ++i) {
+ if (amdtp_rate_table[i] == rate)
+ break;
+ }
+ if (i >= CIP_SFC_COUNT)
+ return -EINVAL;
+
+ err = snd_ff_stream_get_multiplier_mode(i, &mode);
if (err < 0)
- goto error;
+ return err;
+
+ err = amdtp_ff_set_parameters(&ff->tx_stream, rate,
+ ff->spec->pcm_capture_channels[mode]);
+ if (err < 0)
+ return err;
+
+ err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
+ ff->spec->pcm_playback_channels[mode]);
+ if (err < 0)
+ return err;
err = ff->spec->protocol->begin_session(ff, rate);
if (err < 0)
@@ -188,7 +206,7 @@ int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
goto error;
}
- err = ff->spec->protocol->switch_fetching_mode(ff, true);
+ err = switch_fetching_mode(ff, true);
if (err < 0)
goto error;
}
diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
index 332b29f8ed75..5f4ddfd55403 100644
--- a/sound/firewire/fireface/ff-transaction.c
+++ b/sound/firewire/fireface/ff-transaction.c
@@ -8,6 +8,72 @@
#include "ff.h"
+#define SND_FF_REG_MIDI_RX_PORT_0 0x000080180000ull
+#define SND_FF_REG_MIDI_RX_PORT_1 0x000080190000ull
+
+int snd_ff_transaction_get_clock(struct snd_ff *ff, unsigned int *rate,
+ enum snd_ff_clock_src *src)
+{
+ __le32 reg;
+ u32 data;
+ int err;
+
+ err = snd_fw_transaction(ff->unit, TCODE_READ_QUADLET_REQUEST,
+ SND_FF_REG_CLOCK_CONFIG, &reg, sizeof(reg), 0);
+ if (err < 0)
+ return err;
+ data = le32_to_cpu(reg);
+
+ /* Calculate sampling rate. */
+ switch ((data >> 1) & 0x03) {
+ case 0x01:
+ *rate = 32000;
+ break;
+ case 0x00:
+ *rate = 44100;
+ break;
+ case 0x03:
+ *rate = 48000;
+ break;
+ case 0x02:
+ default:
+ return -EIO;
+ }
+
+ if (data & 0x08)
+ *rate *= 2;
+ else if (data & 0x10)
+ *rate *= 4;
+
+ /* Calculate source of clock. */
+ if (data & 0x01) {
+ *src = SND_FF_CLOCK_SRC_INTERNAL;
+ } else {
+ /* TODO: 0x02, 0x06, 0x07? */
+ switch ((data >> 10) & 0x07) {
+ case 0x00:
+ *src = SND_FF_CLOCK_SRC_ADAT1;
+ break;
+ case 0x01:
+ *src = SND_FF_CLOCK_SRC_ADAT2;
+ break;
+ case 0x03:
+ *src = SND_FF_CLOCK_SRC_SPDIF;
+ break;
+ case 0x04:
+ *src = SND_FF_CLOCK_SRC_WORD;
+ break;
+ case 0x05:
+ *src = SND_FF_CLOCK_SRC_LTC;
+ break;
+ default:
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
static void finish_transmit_midi_msg(struct snd_ff *ff, unsigned int port,
int rcode)
{
@@ -90,10 +156,10 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
fill_midi_buf(ff, port, i, buf[i]);
if (port == 0) {
- addr = ff->spec->protocol->midi_rx_port_0_reg;
+ addr = SND_FF_REG_MIDI_RX_PORT_0;
callback = finish_transmit_midi0_msg;
} else {
- addr = ff->spec->protocol->midi_rx_port_1_reg;
+ addr = SND_FF_REG_MIDI_RX_PORT_1;
callback = finish_transmit_midi1_msg;
}
@@ -140,42 +206,10 @@ static void handle_midi_msg(struct fw_card *card, struct fw_request *request,
{
struct snd_ff *ff = callback_data;
__le32 *buf = data;
- u32 quad;
- u8 byte;
- unsigned int index;
- struct snd_rawmidi_substream *substream;
- int i;
fw_send_response(card, request, RCODE_COMPLETE);
- for (i = 0; i < length / 4; i++) {
- quad = le32_to_cpu(buf[i]);
-
- /* Message in first port. */
- /*
- * This value may represent the index of this unit when the same
- * units are on the same IEEE 1394 bus. This driver doesn't use
- * it.
- */
- index = (quad >> 8) & 0xff;
- if (index > 0) {
- substream = READ_ONCE(ff->tx_midi_substreams[0]);
- if (substream != NULL) {
- byte = quad & 0xff;
- snd_rawmidi_receive(substream, &byte, 1);
- }
- }
-
- /* Message in second port. */
- index = (quad >> 24) & 0xff;
- if (index > 0) {
- substream = READ_ONCE(ff->tx_midi_substreams[1]);
- if (substream != NULL) {
- byte = (quad >> 16) & 0xff;
- snd_rawmidi_receive(substream, &byte, 1);
- }
- }
- }
+ ff->spec->protocol->handle_midi_msg(ff, buf, length);
}
static int allocate_own_address(struct snd_ff *ff, int i)
@@ -203,36 +237,33 @@ static int allocate_own_address(struct snd_ff *ff, int i)
}
/*
- * The configuration to start asynchronous transactions for MIDI messages is in
- * 0x'0000'8010'051c. This register includes the other options, thus this driver
- * doesn't touch it and leaves the decision to userspace. The userspace MUST add
- * 0x04000000 to write transactions to the register to receive any MIDI
- * messages.
- *
- * Here, I just describe MIDI-related offsets of the register, in little-endian
- * order.
- *
* Controllers are allowed to register higher 4 bytes of address to receive
- * the transactions. The register is 0x'0000'8010'03f4. On the other hand, the
- * controllers are not allowed to register lower 4 bytes of the address. They
- * are forced to select from 4 options by writing corresponding bits to
- * 0x'0000'8010'051c.
+ * the transactions. Different models have different registers for this purpose;
+ * e.g. 0x'0000'8010'03f4 for Fireface 400.
+ * The controllers are not allowed to register lower 4 bytes of the address.
+ * They are forced to select one of 4 options for the part of address by writing
+ * corresponding bits to 0x'0000'8010'051f.
+ *
+ * The 3rd-6th bits of this register are flags to indicate lower 4 bytes of
+ * address to which the device transferrs the transactions. In short:
+ * - 0x20: 0x'....'....'0000'0180
+ * - 0x10: 0x'....'....'0000'0100
+ * - 0x08: 0x'....'....'0000'0080
+ * - 0x04: 0x'....'....'0000'0000
*
- * The 3rd-6th bits in MSB of this register are used to indicate lower 4 bytes
- * of address to which the device transferrs the transactions.
- * - 6th: 0x'....'....'0000'0180
- * - 5th: 0x'....'....'0000'0100
- * - 4th: 0x'....'....'0000'0080
- * - 3rd: 0x'....'....'0000'0000
+ * This driver configure 0x'....'....'0000'0000 to receive MIDI messages from
+ * units. The 3rd bit of the register should be configured, however this driver
+ * deligates this task to userspace applications due to a restriction that this
+ * register is write-only and the other bits have own effects.
*
- * This driver configure 0x'....'....'0000'0000 for units to receive MIDI
- * messages. 3rd bit of the register should be configured, however this driver
- * deligates this task to user space applications due to a restriction that
- * this register is write-only and the other bits have own effects.
+ * Unlike Fireface 800, Fireface 400 cancels transferring asynchronous
+ * transactions when the 1st and 2nd of the register stand. These two bits have
+ * the same effect.
+ * - 0x02, 0x01: cancel transferring
*
- * The 1st and 2nd bits in LSB of this register are used to cancel transferring
- * asynchronous transactions. These two bits have the same effect.
- * - 1st/2nd: cancel transferring
+ * On the other hand, the bits have no effect on Fireface 800. This model
+ * cancels asynchronous transactions when the higher 4 bytes of address is
+ * overwritten with zero.
*/
int snd_ff_transaction_reregister(struct snd_ff *ff)
{
@@ -247,7 +278,7 @@ int snd_ff_transaction_reregister(struct snd_ff *ff)
addr = (fw_card->node_id << 16) | (ff->async_handler.offset >> 32);
reg = cpu_to_le32(addr);
return snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- ff->spec->protocol->midi_high_addr_reg,
+ ff->spec->midi_high_addr,
&reg, sizeof(reg), 0);
}
@@ -288,7 +319,7 @@ void snd_ff_transaction_unregister(struct snd_ff *ff)
/* Release higher 4 bytes of address. */
reg = cpu_to_le32(0x00000000);
snd_fw_transaction(ff->unit, TCODE_WRITE_QUADLET_REQUEST,
- ff->spec->protocol->midi_high_addr_reg,
+ ff->spec->midi_high_addr,
&reg, sizeof(reg), 0);
fw_core_remove_address_handler(&ff->async_handler);
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index 3f61cfeace69..36575f4159d1 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -145,6 +145,16 @@ static void snd_ff_remove(struct fw_unit *unit)
fw_unit_put(ff->unit);
}
+static const struct snd_ff_spec spec_ff800 = {
+ .name = "Fireface800",
+ .pcm_capture_channels = {28, 20, 12},
+ .pcm_playback_channels = {28, 20, 12},
+ .midi_in_ports = 1,
+ .midi_out_ports = 1,
+ .protocol = &snd_ff_protocol_ff800,
+ .midi_high_addr = 0x000200000320ull,
+};
+
static const struct snd_ff_spec spec_ff400 = {
.name = "Fireface400",
.pcm_capture_channels = {18, 14, 10},
@@ -152,9 +162,22 @@ static const struct snd_ff_spec spec_ff400 = {
.midi_in_ports = 2,
.midi_out_ports = 2,
.protocol = &snd_ff_protocol_ff400,
+ .midi_high_addr = 0x0000801003f4ull,
};
static const struct ieee1394_device_id snd_ff_id_table[] = {
+ /* Fireface 800 */
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_RME,
+ .specifier_id = OUI_RME,
+ .version = 0x000001,
+ .model_id = 0x101800,
+ .driver_data = (kernel_ulong_t)&spec_ff800,
+ },
/* Fireface 400 */
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
@@ -162,7 +185,7 @@ static const struct ieee1394_device_id snd_ff_id_table[] = {
IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = OUI_RME,
- .specifier_id = 0x000a35,
+ .specifier_id = OUI_RME,
.version = 0x000002,
.model_id = 0x101800,
.driver_data = (kernel_ulong_t)&spec_ff400,
diff --git a/sound/firewire/fireface/ff.h b/sound/firewire/fireface/ff.h
index 64df44beb950..7dfc7745a914 100644
--- a/sound/firewire/fireface/ff.h
+++ b/sound/firewire/fireface/ff.h
@@ -31,23 +31,34 @@
#include "../amdtp-stream.h"
#include "../iso-resources.h"
-#define SND_FF_STREAM_MODES 3
-
#define SND_FF_MAXIMIM_MIDI_QUADS 9
#define SND_FF_IN_MIDI_PORTS 2
#define SND_FF_OUT_MIDI_PORTS 2
+#define SND_FF_REG_SYNC_STATUS 0x0000801c0000ull
+/* For block write request. */
+#define SND_FF_REG_FETCH_PCM_FRAMES 0x0000801c0000ull
+#define SND_FF_REG_CLOCK_CONFIG 0x0000801c0004ull
+
+enum snd_ff_stream_mode {
+ SND_FF_STREAM_MODE_LOW = 0,
+ SND_FF_STREAM_MODE_MID,
+ SND_FF_STREAM_MODE_HIGH,
+ SND_FF_STREAM_MODE_COUNT,
+};
+
struct snd_ff_protocol;
struct snd_ff_spec {
const char *const name;
- const unsigned int pcm_capture_channels[SND_FF_STREAM_MODES];
- const unsigned int pcm_playback_channels[SND_FF_STREAM_MODES];
+ const unsigned int pcm_capture_channels[SND_FF_STREAM_MODE_COUNT];
+ const unsigned int pcm_playback_channels[SND_FF_STREAM_MODE_COUNT];
unsigned int midi_in_ports;
unsigned int midi_out_ports;
const struct snd_ff_protocol *protocol;
+ u64 midi_high_addr;
};
struct snd_ff {
@@ -89,31 +100,24 @@ struct snd_ff {
enum snd_ff_clock_src {
SND_FF_CLOCK_SRC_INTERNAL,
SND_FF_CLOCK_SRC_SPDIF,
- SND_FF_CLOCK_SRC_ADAT,
+ SND_FF_CLOCK_SRC_ADAT1,
+ SND_FF_CLOCK_SRC_ADAT2,
SND_FF_CLOCK_SRC_WORD,
SND_FF_CLOCK_SRC_LTC,
- /* TODO: perhaps ADAT2 and TCO exists. */
+ /* TODO: perhaps TCO exists. */
};
struct snd_ff_protocol {
- int (*get_clock)(struct snd_ff *ff, unsigned int *rate,
- enum snd_ff_clock_src *src);
+ void (*handle_midi_msg)(struct snd_ff *ff, __le32 *buf, size_t length);
int (*begin_session)(struct snd_ff *ff, unsigned int rate);
void (*finish_session)(struct snd_ff *ff);
- int (*switch_fetching_mode)(struct snd_ff *ff, bool enable);
-
- void (*dump_sync_status)(struct snd_ff *ff,
- struct snd_info_buffer *buffer);
- void (*dump_clock_config)(struct snd_ff *ff,
- struct snd_info_buffer *buffer);
-
- u64 midi_high_addr_reg;
- u64 midi_rx_port_0_reg;
- u64 midi_rx_port_1_reg;
};
+extern const struct snd_ff_protocol snd_ff_protocol_ff800;
extern const struct snd_ff_protocol snd_ff_protocol_ff400;
+int snd_ff_transaction_get_clock(struct snd_ff *ff, unsigned int *rate,
+ enum snd_ff_clock_src *src);
int snd_ff_transaction_register(struct snd_ff *ff);
int snd_ff_transaction_reregister(struct snd_ff *ff);
void snd_ff_transaction_unregister(struct snd_ff *ff);
@@ -125,6 +129,8 @@ int amdtp_ff_add_pcm_hw_constraints(struct amdtp_stream *s,
int amdtp_ff_init(struct amdtp_stream *s, struct fw_unit *unit,
enum amdtp_stream_direction dir);
+int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
+ enum snd_ff_stream_mode *mode);
int snd_ff_stream_init_duplex(struct snd_ff *ff);
void snd_ff_stream_destroy_duplex(struct snd_ff *ff);
int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate);
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index afb78d90384b..3d27f3378d5d 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -20,6 +20,7 @@
#define VENDOR_LACIE 0x00d04b
#define VENDOR_TASCAM 0x00022e
#define OUI_STANTON 0x001260
+#define OUI_APOGEE 0x0003db
#define MODEL_SATELLITE 0x00200f
@@ -397,6 +398,13 @@ static const struct ieee1394_device_id oxfw_id_table[] = {
.vendor_id = OUI_STANTON,
.model_id = 0x002000,
},
+ // APOGEE, duet FireWire
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID,
+ .vendor_id = OUI_APOGEE,
+ .model_id = 0x01dddd,
+ },
{ }
};
MODULE_DEVICE_TABLE(ieee1394, oxfw_id_table);
diff --git a/sound/firewire/tascam/amdtp-tascam.c b/sound/firewire/tascam/amdtp-tascam.c
index ab482423c165..a52d1f76c610 100644
--- a/sound/firewire/tascam/amdtp-tascam.c
+++ b/sound/firewire/tascam/amdtp-tascam.c
@@ -117,6 +117,55 @@ int amdtp_tscm_add_pcm_hw_constraints(struct amdtp_stream *s,
return amdtp_stream_add_pcm_hw_constraints(s, runtime);
}
+static void read_status_messages(struct amdtp_stream *s,
+ __be32 *buffer, unsigned int data_blocks)
+{
+ struct snd_tscm *tscm = container_of(s, struct snd_tscm, tx_stream);
+ bool used = READ_ONCE(tscm->hwdep->used);
+ int i;
+
+ for (i = 0; i < data_blocks; i++) {
+ unsigned int index;
+ __be32 before;
+ __be32 after;
+
+ index = be32_to_cpu(buffer[0]) % SNDRV_FIREWIRE_TASCAM_STATE_COUNT;
+ before = tscm->state[index];
+ after = buffer[s->data_block_quadlets - 1];
+
+ if (used && index > 4 && index < 16) {
+ __be32 mask;
+
+ if (index == 5)
+ mask = cpu_to_be32(~0x0000ffff);
+ else if (index == 6)
+ mask = cpu_to_be32(~0x0000ffff);
+ else if (index == 8)
+ mask = cpu_to_be32(~0x000f0f00);
+ else
+ mask = cpu_to_be32(~0x00000000);
+
+ if ((before ^ after) & mask) {
+ struct snd_firewire_tascam_change *entry =
+ &tscm->queue[tscm->push_pos];
+
+ spin_lock_irq(&tscm->lock);
+ entry->index = index;
+ entry->before = before;
+ entry->after = after;
+ if (++tscm->push_pos >= SND_TSCM_QUEUE_COUNT)
+ tscm->push_pos = 0;
+ spin_unlock_irq(&tscm->lock);
+
+ wake_up(&tscm->hwdep_wait);
+ }
+ }
+
+ tscm->state[index] = after;
+ buffer += s->data_block_quadlets;
+ }
+}
+
static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
__be32 *buffer,
unsigned int data_blocks,
@@ -128,7 +177,7 @@ static unsigned int process_tx_data_blocks(struct amdtp_stream *s,
if (data_blocks > 0 && pcm)
read_pcm_s32(s, pcm, buffer, data_blocks);
- /* A place holder for control messages. */
+ read_status_messages(s, buffer, data_blocks);
return data_blocks;
}
diff --git a/sound/firewire/tascam/tascam-hwdep.c b/sound/firewire/tascam/tascam-hwdep.c
index 4e4c1e9020e8..0414abf5daa8 100644
--- a/sound/firewire/tascam/tascam-hwdep.c
+++ b/sound/firewire/tascam/tascam-hwdep.c
@@ -16,18 +16,93 @@
#include "tascam.h"
+static long tscm_hwdep_read_locked(struct snd_tscm *tscm, char __user *buf,
+ long count, loff_t *offset)
+{
+ struct snd_firewire_event_lock_status event = {
+ .type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+ };
+
+ event.status = (tscm->dev_lock_count > 0);
+ tscm->dev_lock_changed = false;
+ count = min_t(long, count, sizeof(event));
+
+ spin_unlock_irq(&tscm->lock);
+
+ if (copy_to_user(buf, &event, count))
+ return -EFAULT;
+
+ return count;
+}
+
+static long tscm_hwdep_read_queue(struct snd_tscm *tscm, char __user *buf,
+ long remained, loff_t *offset)
+{
+ char __user *pos = buf;
+ unsigned int type = SNDRV_FIREWIRE_EVENT_TASCAM_CONTROL;
+ struct snd_firewire_tascam_change *entries = tscm->queue;
+ long count;
+
+ // At least, one control event can be copied.
+ if (remained < sizeof(type) + sizeof(*entries)) {
+ spin_unlock_irq(&tscm->lock);
+ return -EINVAL;
+ }
+
+ // Copy the type field later.
+ count = sizeof(type);
+ remained -= sizeof(type);
+ pos += sizeof(type);
+
+ while (true) {
+ unsigned int head_pos;
+ unsigned int tail_pos;
+ unsigned int length;
+
+ if (tscm->pull_pos == tscm->push_pos)
+ break;
+ else if (tscm->pull_pos < tscm->push_pos)
+ tail_pos = tscm->push_pos;
+ else
+ tail_pos = SND_TSCM_QUEUE_COUNT;
+ head_pos = tscm->pull_pos;
+
+ length = (tail_pos - head_pos) * sizeof(*entries);
+ if (remained < length)
+ length = rounddown(remained, sizeof(*entries));
+ if (length == 0)
+ break;
+
+ spin_unlock_irq(&tscm->lock);
+ if (copy_to_user(pos, &entries[head_pos], length))
+ return -EFAULT;
+
+ spin_lock_irq(&tscm->lock);
+
+ tscm->pull_pos = tail_pos % SND_TSCM_QUEUE_COUNT;
+
+ count += length;
+ remained -= length;
+ pos += length;
+ }
+
+ spin_unlock_irq(&tscm->lock);
+
+ if (copy_to_user(buf, &type, sizeof(type)))
+ return -EFAULT;
+
+ return count;
+}
+
static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
loff_t *offset)
{
struct snd_tscm *tscm = hwdep->private_data;
DEFINE_WAIT(wait);
- union snd_firewire_event event = {
- .lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
- };
spin_lock_irq(&tscm->lock);
- while (!tscm->dev_lock_changed) {
+ while (!tscm->dev_lock_changed && tscm->push_pos == tscm->pull_pos) {
prepare_to_wait(&tscm->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irq(&tscm->lock);
schedule();
@@ -37,15 +112,15 @@ static long hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
spin_lock_irq(&tscm->lock);
}
- event.lock_status.status = (tscm->dev_lock_count > 0);
- tscm->dev_lock_changed = false;
-
- spin_unlock_irq(&tscm->lock);
-
- count = min_t(long, count, sizeof(event.lock_status));
-
- if (copy_to_user(buf, &event, count))
- return -EFAULT;
+ // NOTE: The acquired lock should be released in callee side.
+ if (tscm->dev_lock_changed) {
+ count = tscm_hwdep_read_locked(tscm, buf, count, offset);
+ } else if (tscm->push_pos != tscm->pull_pos) {
+ count = tscm_hwdep_read_queue(tscm, buf, count, offset);
+ } else {
+ spin_unlock_irq(&tscm->lock);
+ count = 0;
+ }
return count;
}
@@ -59,7 +134,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
poll_wait(file, &tscm->hwdep_wait, wait);
spin_lock_irq(&tscm->lock);
- if (tscm->dev_lock_changed)
+ if (tscm->dev_lock_changed || tscm->push_pos != tscm->pull_pos)
events = EPOLLIN | EPOLLRDNORM;
else
events = 0;
@@ -123,6 +198,14 @@ static int hwdep_unlock(struct snd_tscm *tscm)
return err;
}
+static int tscm_hwdep_state(struct snd_tscm *tscm, void __user *arg)
+{
+ if (copy_to_user(arg, tscm->state, sizeof(tscm->state)))
+ return -EFAULT;
+
+ return 0;
+}
+
static int hwdep_release(struct snd_hwdep *hwdep, struct file *file)
{
struct snd_tscm *tscm = hwdep->private_data;
@@ -147,6 +230,8 @@ static int hwdep_ioctl(struct snd_hwdep *hwdep, struct file *file,
return hwdep_lock(tscm);
case SNDRV_FIREWIRE_IOCTL_UNLOCK:
return hwdep_unlock(tscm);
+ case SNDRV_FIREWIRE_IOCTL_TASCAM_STATE:
+ return tscm_hwdep_state(tscm, (void __user *)arg);
default:
return -ENOIOCTLCMD;
}
@@ -185,5 +270,7 @@ int snd_tscm_create_hwdep_device(struct snd_tscm *tscm)
hwdep->private_data = tscm;
hwdep->exclusive = true;
+ tscm->hwdep = hwdep;
+
return err;
}
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index a5bd167eb5d9..6a411ee0dcf1 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -62,6 +62,8 @@ struct snd_fw_async_midi_port {
int consume_bytes;
};
+#define SND_TSCM_QUEUE_COUNT 16
+
struct snd_tscm {
struct snd_card *card;
struct fw_unit *unit;
@@ -89,6 +91,13 @@ struct snd_tscm {
/* For MIDI message outgoing transactions. */
struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX];
+
+ // A cache of status information in tx isoc packets.
+ __be32 state[SNDRV_FIREWIRE_TASCAM_STATE_COUNT];
+ struct snd_hwdep *hwdep;
+ struct snd_firewire_tascam_change queue[SND_TSCM_QUEUE_COUNT];
+ unsigned int pull_pos;
+ unsigned int push_pos;
};
#define TSCM_ADDR_BASE 0xffff00000000ull
diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
index 714a51721a31..012305177f68 100644
--- a/sound/hda/hdac_bus.c
+++ b/sound/hda/hdac_bus.c
@@ -9,8 +9,6 @@
#include <sound/hdaudio.h>
#include "trace.h"
-static void process_unsol_events(struct work_struct *work);
-
static const struct hdac_bus_ops default_ops = {
.command = snd_hdac_bus_send_cmd,
.get_response = snd_hdac_bus_get_response,
@@ -37,7 +35,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
bus->io_ops = io_ops;
INIT_LIST_HEAD(&bus->stream_list);
INIT_LIST_HEAD(&bus->codec_list);
- INIT_WORK(&bus->unsol_work, process_unsol_events);
+ INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);
spin_lock_init(&bus->reg_lock);
mutex_init(&bus->cmd_mutex);
bus->irq = -1;
@@ -148,7 +146,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_bus_queue_event);
/*
* process queued unsolicited events
*/
-static void process_unsol_events(struct work_struct *work)
+void snd_hdac_bus_process_unsol_events(struct work_struct *work)
{
struct hdac_bus *bus = container_of(work, struct hdac_bus, unsol_work);
struct hdac_device *codec;
@@ -171,6 +169,7 @@ static void process_unsol_events(struct work_struct *work)
drv->unsol_event(codec, res);
}
}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_process_unsol_events);
/**
* snd_hdac_bus_add_device - Add a codec to bus
diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c
index 6e46a9c73aed..a6d37b9d6413 100644
--- a/sound/hda/hdac_component.c
+++ b/sound/hda/hdac_component.c
@@ -54,41 +54,44 @@ EXPORT_SYMBOL_GPL(snd_hdac_set_codec_wakeup);
/**
* snd_hdac_display_power - Power up / down the power refcount
* @bus: HDA core bus
+ * @idx: HDA codec address, pass HDA_CODEC_IDX_CONTROLLER for controller
* @enable: power up or down
*
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with graphics driver.
+ * This function is used by either HD-audio controller or codec driver that
+ * needs the interaction with graphics driver.
*
- * This function manages a refcount and calls the get_power() and
+ * This function updates the power status, and calls the get_power() and
* put_power() ops accordingly, toggling the codec wakeup, too.
- *
- * Returns zero for success or a negative error code.
*/
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+void snd_hdac_display_power(struct hdac_bus *bus, unsigned int idx, bool enable)
{
struct drm_audio_component *acomp = bus->audio_component;
- if (!acomp || !acomp->ops)
- return -ENODEV;
-
dev_dbg(bus->dev, "display power %s\n",
enable ? "enable" : "disable");
+ if (enable)
+ set_bit(idx, &bus->display_power_status);
+ else
+ clear_bit(idx, &bus->display_power_status);
- if (enable) {
- if (!bus->drm_power_refcount++) {
+ if (!acomp || !acomp->ops)
+ return;
+
+ if (bus->display_power_status) {
+ if (!bus->display_power_active) {
if (acomp->ops->get_power)
acomp->ops->get_power(acomp->dev);
snd_hdac_set_codec_wakeup(bus, true);
snd_hdac_set_codec_wakeup(bus, false);
+ bus->display_power_active = true;
}
} else {
- WARN_ON(!bus->drm_power_refcount);
- if (!--bus->drm_power_refcount)
+ if (bus->display_power_active) {
if (acomp->ops->put_power)
acomp->ops->put_power(acomp->dev);
+ bus->display_power_active = false;
+ }
}
-
- return 0;
}
EXPORT_SYMBOL_GPL(snd_hdac_display_power);
@@ -321,10 +324,12 @@ int snd_hdac_acomp_exit(struct hdac_bus *bus)
if (!acomp)
return 0;
- WARN_ON(bus->drm_power_refcount);
- if (bus->drm_power_refcount > 0 && acomp->ops)
+ if (WARN_ON(bus->display_power_active) && acomp->ops)
acomp->ops->put_power(acomp->dev);
+ bus->display_power_active = false;
+ bus->display_power_status = 0;
+
component_master_del(dev, &hdac_component_master_ops);
bus->audio_component = NULL;
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index dbf02a3a8d2f..95b073ee4b32 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -622,23 +622,6 @@ int snd_hdac_power_down_pm(struct hdac_device *codec)
EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm);
#endif
-/**
- * snd_hdac_link_power - Enable/disable the link power for a codec
- * @codec: the codec object
- * @bool: enable or disable the link power
- */
-int snd_hdac_link_power(struct hdac_device *codec, bool enable)
-{
- if (!codec->link_power_control)
- return 0;
-
- if (codec->bus->ops->link_power)
- return codec->bus->ops->link_power(codec->bus, enable);
- else
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(snd_hdac_link_power);
-
/* codec vendor labels */
struct hda_vendor_id {
unsigned int id;
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 32453f81b95a..3a5008837576 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
if (err < 0) {
if (chip->release_dma)
chip->release_dma(chip, chip->dma_private_data, chip->dma1);
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->playback_substream = substream;
@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
if (err < 0) {
if (chip->release_dma)
chip->release_dma(chip, chip->dma_private_data, chip->dma2);
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->capture_substream = substream;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index f4459d1a9d67..27b468f057dd 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
- int shift = (kcontrol->private_value >> 8) & 0xff;
+ int shift = (kcontrol->private_value >> 8) & 0x0f;
int mask = (kcontrol->private_value >> 16) & 0xff;
// int invert = (kcontrol->private_value >> 24) & 0xff;
unsigned short value, old, new;
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index a31fe1550903..aad74e809797 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -1183,7 +1183,7 @@ static int snd_card_asihpi_capture_prepare(struct snd_pcm_substream *substream)
static u64 snd_card_asihpi_capture_formats(struct snd_card_asihpi *asihpi,
u32 h_stream)
{
- struct hpi_format hpi_format;
+ struct hpi_format hpi_format;
u16 format;
u16 err;
u32 h_control;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 6ebe817801ea..1f25e6d029d8 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/tlv.h>
@@ -1026,6 +1027,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
if (ipcm->channels > 32)
return -EINVAL;
pcm = &emu->fx8010.pcm[ipcm->substream];
@@ -1072,6 +1075,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
pcm = &emu->fx8010.pcm[ipcm->substream];
mutex_lock(&emu->fx8010.lock);
spin_lock_irq(&emu->reg_lock);
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 4235907b7858..0d38c006e182 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -226,6 +226,68 @@ config SND_HDA_POWER_SAVE_DEFAULT
The default time-out value in seconds for HD-audio automatic
power-save mode. 0 means to disable the power-save mode.
+if SND_HDA_INTEL
+
+# The options below should not be enabled by distributions or
+# users. They are selected by Intel/Skylake or SOF drivers when they
+# register for a PCI ID which is also handled by the HDAudio legacy
+# driver. When this option is selected and the DSP is detected based on
+# the PCI class/subclass/prog-if, the probe of the HDAudio legacy
+# aborts. This mechanism removes the need for distributions to use
+# blacklists. It can be bypassed with module parameters should the
+# Intel/Skylake or SOF drivers fail to handle a specific platform.
+
+config SND_HDA_INTEL_DSP_DETECTION_SKL
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ Skylake machines.
+
+config SND_HDA_INTEL_DSP_DETECTION_APL
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ Broxton/ApolloLake machines
+
+config SND_HDA_INTEL_DSP_DETECTION_KBL
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ KabyLake machines
+
+config SND_HDA_INTEL_DSP_DETECTION_GLK
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ GeminiLake machines
+
+config SND_HDA_INTEL_DSP_DETECTION_CNL
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ CannonLake machines
+
+config SND_HDA_INTEL_DSP_DETECTION_CFL
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ CoffeeLake machines
+
+config SND_HDA_INTEL_DSP_DETECTION_ICL
+ bool
+ help
+ This option is selected by SOF or SST drivers, not users or distros.
+ It enables DSP detection based on PCI class information for
+ IceLake machines
+
+endif ## SND_HDA_INTEL
+
endif
endmenu
diff --git a/sound/pci/hda/dell_wmi_helper.c b/sound/pci/hda/dell_wmi_helper.c
deleted file mode 100644
index bbd6c87a4ed6..000000000000
--- a/sound/pci/hda/dell_wmi_helper.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Helper functions for Dell Mic Mute LED control;
- * to be included from codec driver
- */
-
-#if IS_ENABLED(CONFIG_DELL_LAPTOP)
-#include <linux/dell-led.h>
-
-static int (*dell_micmute_led_set_func)(int);
-
-static void dell_micmute_update(struct hda_codec *codec)
-{
- struct hda_gen_spec *spec = codec->spec;
-
- dell_micmute_led_set_func(spec->micmute_led.led_value);
-}
-
-static void alc_fixup_dell_wmi(struct hda_codec *codec,
- const struct hda_fixup *fix, int action)
-{
- bool removefunc = false;
-
- if (action == HDA_FIXUP_ACT_PROBE) {
- if (!dell_micmute_led_set_func)
- dell_micmute_led_set_func = symbol_request(dell_micmute_led_set);
- if (!dell_micmute_led_set_func) {
- codec_warn(codec, "Failed to find dell wmi symbol dell_micmute_led_set\n");
- return;
- }
-
- removefunc = (dell_micmute_led_set_func(false) < 0) ||
- (snd_hda_gen_add_micmute_led(codec,
- dell_micmute_update) < 0);
- }
-
- if (dell_micmute_led_set_func && (action == HDA_FIXUP_ACT_FREE || removefunc)) {
- symbol_put(dell_micmute_led_set);
- dell_micmute_led_set_func = NULL;
- }
-}
-
-#else /* CONFIG_DELL_LAPTOP */
-static void alc_fixup_dell_wmi(struct hda_codec *codec,
- const struct hda_fixup *fix, int action)
-{
-}
-
-#endif /* CONFIG_DELL_LAPTOP */
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 0957813939e5..9f8d59e7e89f 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -36,6 +36,7 @@
#include "hda_beep.h"
#include "hda_jack.h"
#include <sound/hda_hwdep.h>
+#include <sound/hda_component.h>
#define codec_in_pm(codec) snd_hdac_is_in_pm(&codec->core)
#define hda_codec_is_power_on(codec) snd_hdac_is_power_on(&codec->core)
@@ -799,6 +800,13 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec)
static unsigned int hda_set_power_state(struct hda_codec *codec,
unsigned int power_state);
+/* enable/disable display power per codec */
+static void codec_display_power(struct hda_codec *codec, bool enable)
+{
+ if (codec->display_power_control)
+ snd_hdac_display_power(&codec->bus->core, codec->addr, enable);
+}
+
/* also called from hda_bind.c */
void snd_hda_codec_register(struct hda_codec *codec)
{
@@ -806,7 +814,7 @@ void snd_hda_codec_register(struct hda_codec *codec)
return;
if (device_is_registered(hda_codec_dev(codec))) {
snd_hda_register_beep_device(codec);
- snd_hdac_link_power(&codec->core, true);
+ codec_display_power(codec, true);
pm_runtime_enable(hda_codec_dev(codec));
/* it was powered up in snd_hda_codec_new(), now all done */
snd_hda_power_down(codec);
@@ -834,7 +842,7 @@ static int snd_hda_codec_dev_free(struct snd_device *device)
codec->in_freeing = 1;
snd_hdac_device_unregister(&codec->core);
- snd_hdac_link_power(&codec->core, false);
+ codec_display_power(codec, false);
put_device(hda_codec_dev(codec));
return 0;
}
@@ -2926,7 +2934,7 @@ static int hda_codec_runtime_suspend(struct device *dev)
(codec_has_clkstop(codec) && codec_has_epss(codec) &&
(state & AC_PWRST_CLK_STOP_OK)))
snd_hdac_codec_link_down(&codec->core);
- snd_hdac_link_power(&codec->core, false);
+ codec_display_power(codec, false);
return 0;
}
@@ -2934,7 +2942,7 @@ static int hda_codec_runtime_resume(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
- snd_hdac_link_power(&codec->core, true);
+ codec_display_power(codec, true);
snd_hdac_codec_link_up(&codec->core);
hda_call_codec_resume(codec);
pm_runtime_mark_last_busy(dev);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index fe2506672a72..532e081f8b8a 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -989,20 +989,9 @@ static int azx_get_response(struct hdac_bus *bus, unsigned int addr,
return azx_rirb_get_response(bus, addr, res);
}
-static int azx_link_power(struct hdac_bus *bus, bool enable)
-{
- struct azx *chip = bus_to_azx(bus);
-
- if (chip->ops->link_power)
- return chip->ops->link_power(chip, enable);
- else
- return -EINVAL;
-}
-
static const struct hdac_bus_ops bus_core_ops = {
.command = azx_send_cmd,
.get_response = azx_get_response,
- .link_power = azx_link_power,
};
#ifdef CONFIG_SND_HDA_DSP_LOADER
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index c95097bb5a0c..e0c3fcbaa028 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -37,7 +37,7 @@
#else
#define AZX_DCAPS_I915_COMPONENT 0 /* NOP */
#endif
-/* 14 unused */
+#define AZX_DCAPS_INTEL_SHARED (1 << 14) /* shared with ASoC */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
/* 17 unused */
@@ -50,11 +50,7 @@
/* 24 unused */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
-#ifdef CONFIG_SND_HDA_I915
-#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
-#else
-#define AZX_DCAPS_I915_POWERWELL 0 /* NOP */
-#endif
+/* 27 unused */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 276150f29cda..4095cd7c56c6 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -29,6 +29,7 @@
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/module.h>
+#include <linux/leds.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/tlv.h>
@@ -4035,6 +4036,36 @@ int snd_hda_gen_add_micmute_led(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_gen_add_micmute_led);
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+static void call_ledtrig_micmute(struct hda_codec *codec)
+{
+ struct hda_gen_spec *spec = codec->spec;
+
+ ledtrig_audio_set(LED_AUDIO_MICMUTE,
+ spec->micmute_led.led_value ? LED_ON : LED_OFF);
+}
+#endif
+
+/**
+ * snd_hda_gen_fixup_micmute_led - A fixup for mic-mute LED trigger
+ *
+ * Pass this function to the quirk entry if another driver supports the
+ * audio mic-mute LED trigger. Then this will bind the mixer capture switch
+ * change with the LED.
+ *
+ * Note that this fixup has to be called after other fixup that sets
+ * cap_sync_hook. Otherwise the chaining wouldn't work.
+ */
+void snd_hda_gen_fixup_micmute_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+#if IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
+ if (action == HDA_FIXUP_ACT_PROBE)
+ snd_hda_gen_add_micmute_led(codec, call_ledtrig_micmute);
+#endif
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_fixup_micmute_led);
+
/*
* parse digital I/Os and set up NIDs in BIOS auto-parse mode
*/
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 10123664fa61..78d77042b05a 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -357,5 +357,7 @@ int snd_hda_gen_fix_pin_power(struct hda_codec *codec, hda_nid_t pin);
int snd_hda_gen_add_micmute_led(struct hda_codec *codec,
void (*hook)(struct hda_codec *));
+void snd_hda_gen_fixup_micmute_led(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action);
#endif /* __SOUND_HDA_GENERIC_H */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index d8eb2b5f51ae..e42cc2230977 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -172,6 +172,9 @@ module_param_array(beep_mode, bool, NULL, 0444);
MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
"(0=off, 1=on) (default=1).");
#endif
+static int skl_pci_binding;
+module_param_named(pci_binding, skl_pci_binding, int, 0444);
+MODULE_PARM_DESC(pci_binding, "PCI binding (0=auto, 1=only legacy, 2=only asoc");
#ifdef CONFIG_PM
static int param_set_xint(const char *val, const struct kernel_param *kp);
@@ -310,31 +313,28 @@ enum {
#define AZX_DCAPS_INTEL_HASWELL \
(/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_COUNT_LPIB_DELAY |\
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_I915_POWERWELL | AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_SNOOP_TYPE(SCH))
/* Broadwell HDMI can't use position buffer reliably, force to use LPIB */
#define AZX_DCAPS_INTEL_BROADWELL \
(/*AZX_DCAPS_ALIGN_BUFSIZE |*/ AZX_DCAPS_POSFIX_LPIB |\
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_I915_POWERWELL | AZX_DCAPS_SNOOP_TYPE(SCH))
+ AZX_DCAPS_SNOOP_TYPE(SCH))
#define AZX_DCAPS_INTEL_BAYTRAIL \
- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_I915_POWERWELL)
+ (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_I915_COMPONENT)
#define AZX_DCAPS_INTEL_BRASWELL \
(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
- AZX_DCAPS_I915_COMPONENT | AZX_DCAPS_I915_POWERWELL)
+ AZX_DCAPS_I915_COMPONENT)
#define AZX_DCAPS_INTEL_SKYLAKE \
(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
- AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_I915_POWERWELL)
+ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
#define AZX_DCAPS_INTEL_BROXTON \
(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
- AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
- AZX_DCAPS_I915_POWERWELL)
+ AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
@@ -360,6 +360,7 @@ enum {
AZX_DCAPS_NO_64BIT |\
AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
+#define AZX_DCAPS_INTEL_DSP_DETECTION(conf) (IS_ENABLED(CONFIG_SND_HDA_INTEL_DSP_DETECTION_##conf) ? AZX_DCAPS_INTEL_SHARED : 0)
/*
* vga_switcheroo support
*/
@@ -591,8 +592,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
struct pci_dev *pci = chip->pci;
u32 val;
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- snd_hdac_set_codec_wakeup(bus, true);
+ snd_hdac_set_codec_wakeup(bus, true);
if (chip->driver_type == AZX_DRIVER_SKL) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
@@ -604,8 +604,8 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
val = val | INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
}
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- snd_hdac_set_codec_wakeup(bus, false);
+
+ snd_hdac_set_codec_wakeup(bus, false);
/* reduce dma latency to avoid noise */
if (IS_BXT(pci))
@@ -667,13 +667,8 @@ static int azx_position_check(struct azx *chip, struct azx_dev *azx_dev)
return 0;
}
-/* Enable/disable i915 display power for the link */
-static int azx_intel_link_power(struct azx *chip, bool enable)
-{
- struct hdac_bus *bus = azx_bus(chip);
-
- return snd_hdac_display_power(bus, enable);
-}
+#define display_power(chip, enable) \
+ snd_hdac_display_power(azx_bus(chip), HDA_CODEC_IDX_CONTROLLER, enable)
/*
* Check whether the current DMA position is acceptable for updating
@@ -930,35 +925,75 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
mutex_unlock(&card_list_lock);
return 0;
}
-#else
-#define azx_add_card_list(chip) /* NOP */
-#define azx_del_card_list(chip) /* NOP */
-#endif /* CONFIG_PM */
-#ifdef CONFIG_PM_SLEEP
/*
* power management
*/
-static int azx_suspend(struct device *dev)
+static bool azx_is_pm_ready(struct snd_card *card)
{
- struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
struct hda_intel *hda;
- struct hdac_bus *bus;
if (!card)
- return 0;
-
+ return false;
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
if (chip->disabled || hda->init_failed || !chip->running)
+ return false;
+ return true;
+}
+
+static void __azx_runtime_suspend(struct azx *chip)
+{
+ azx_stop_chip(chip);
+ azx_enter_link_reset(chip);
+ azx_clear_irq_pending(chip);
+ display_power(chip, false);
+}
+
+static void __azx_runtime_resume(struct azx *chip)
+{
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct hdac_bus *bus = azx_bus(chip);
+ struct hda_codec *codec;
+ int status;
+
+ display_power(chip, true);
+ if (hda->need_i915_power)
+ snd_hdac_i915_set_bclk(bus);
+
+ /* Read STATESTS before controller reset */
+ status = azx_readw(chip, STATESTS);
+
+ azx_init_pci(chip);
+ hda_intel_init_chip(chip, true);
+
+ if (status) {
+ list_for_each_codec(codec, &chip->bus)
+ if (status & (1 << codec->addr))
+ schedule_delayed_work(&codec->jackpoll_work,
+ codec->jackpoll_interval);
+ }
+
+ /* power down again for link-controlled chips */
+ if (!hda->need_i915_power)
+ display_power(chip, false);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int azx_suspend(struct device *dev)
+{
+ struct snd_card *card = dev_get_drvdata(dev);
+ struct azx *chip;
+ struct hdac_bus *bus;
+
+ if (!azx_is_pm_ready(card))
return 0;
+ chip = card->private_data;
bus = azx_bus(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- azx_clear_irq_pending(chip);
- azx_stop_chip(chip);
- azx_enter_link_reset(chip);
+ __azx_runtime_suspend(chip);
if (bus->irq >= 0) {
free_irq(bus->irq, chip);
bus->irq = -1;
@@ -966,9 +1001,6 @@ static int azx_suspend(struct device *dev)
if (chip->msi)
pci_disable_msi(chip->pci);
- if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- && hda->need_i915_power)
- snd_hdac_display_power(bus, false);
trace_azx_suspend(chip);
return 0;
@@ -976,41 +1008,19 @@ static int azx_suspend(struct device *dev)
static int azx_resume(struct device *dev)
{
- struct pci_dev *pci = to_pci_dev(dev);
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
- struct hda_intel *hda;
- struct hdac_bus *bus;
- if (!card)
+ if (!azx_is_pm_ready(card))
return 0;
chip = card->private_data;
- hda = container_of(chip, struct hda_intel, chip);
- bus = azx_bus(chip);
- if (chip->disabled || hda->init_failed || !chip->running)
- return 0;
-
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- snd_hdac_display_power(bus, true);
- if (hda->need_i915_power)
- snd_hdac_i915_set_bclk(bus);
- }
-
if (chip->msi)
- if (pci_enable_msi(pci) < 0)
+ if (pci_enable_msi(chip->pci) < 0)
chip->msi = 0;
if (azx_acquire_irq(chip, 1) < 0)
return -EIO;
- azx_init_pci(chip);
-
- hda_intel_init_chip(chip, true);
-
- /* power down again for link-controlled chips */
- if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
- !hda->need_i915_power)
- snd_hdac_display_power(bus, false);
-
+ __azx_runtime_resume(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
trace_azx_resume(chip);
@@ -1045,21 +1055,14 @@ static int azx_thaw_noirq(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int azx_runtime_suspend(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
- struct hda_intel *hda;
- if (!card)
+ if (!azx_is_pm_ready(card))
return 0;
-
chip = card->private_data;
- hda = container_of(chip, struct hda_intel, chip);
- if (chip->disabled || hda->init_failed)
- return 0;
-
if (!azx_has_pm_runtime(chip))
return 0;
@@ -1067,13 +1070,7 @@ static int azx_runtime_suspend(struct device *dev)
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
STATESTS_INT_MASK);
- azx_stop_chip(chip);
- azx_enter_link_reset(chip);
- azx_clear_irq_pending(chip);
- if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- && hda->need_i915_power)
- snd_hdac_display_power(azx_bus(chip), false);
-
+ __azx_runtime_suspend(chip);
trace_azx_runtime_suspend(chip);
return 0;
}
@@ -1082,51 +1079,18 @@ static int azx_runtime_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip;
- struct hda_intel *hda;
- struct hdac_bus *bus;
- struct hda_codec *codec;
- int status;
- if (!card)
+ if (!azx_is_pm_ready(card))
return 0;
-
chip = card->private_data;
- hda = container_of(chip, struct hda_intel, chip);
- bus = azx_bus(chip);
- if (chip->disabled || hda->init_failed)
- return 0;
-
if (!azx_has_pm_runtime(chip))
return 0;
-
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- snd_hdac_display_power(bus, true);
- if (hda->need_i915_power)
- snd_hdac_i915_set_bclk(bus);
- }
-
- /* Read STATESTS before controller reset */
- status = azx_readw(chip, STATESTS);
-
- azx_init_pci(chip);
- hda_intel_init_chip(chip, true);
-
- if (status) {
- list_for_each_codec(codec, &chip->bus)
- if (status & (1 << codec->addr))
- schedule_delayed_work(&codec->jackpoll_work,
- codec->jackpoll_interval);
- }
+ __azx_runtime_resume(chip);
/* disable controller Wake Up event*/
azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
~STATESTS_INT_MASK);
- /* power down again for link-controlled chips */
- if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL) &&
- !hda->need_i915_power)
- snd_hdac_display_power(bus, false);
-
trace_azx_runtime_resume(chip);
return 0;
}
@@ -1167,6 +1131,8 @@ static const struct dev_pm_ops azx_pm = {
#define AZX_PM_OPS &azx_pm
#else
+#define azx_add_card_list(chip) /* NOP */
+#define azx_del_card_list(chip) /* NOP */
#define AZX_PM_OPS NULL
#endif /* CONFIG_PM */
@@ -1374,11 +1340,8 @@ static int azx_free(struct azx *chip)
#ifdef CONFIG_SND_HDA_PATCH_LOADER
release_firmware(chip->fw);
#endif
+ display_power(chip, false);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- if (hda->need_i915_power)
- snd_hdac_display_power(bus, false);
- }
if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT)
snd_hdac_i915_exit(bus);
kfree(hda);
@@ -1935,8 +1898,7 @@ static int azx_first_init(struct azx *chip)
/* initialize chip */
azx_init_pci(chip);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- snd_hdac_i915_set_bclk(bus);
+ snd_hdac_i915_set_bclk(bus);
hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
@@ -2078,7 +2040,6 @@ static const struct hda_controller_ops pci_hda_ops = {
.disable_msi_reset_irq = disable_msi_reset_irq,
.pcm_mmap_prepare = pcm_mmap_prepare,
.position_check = azx_position_check,
- .link_power = azx_intel_link_power,
};
static int azx_probe(struct pci_dev *pci,
@@ -2091,6 +2052,28 @@ static int azx_probe(struct pci_dev *pci,
bool schedule_probe;
int err;
+ /* check if this driver can be used on SKL+ Intel platforms */
+ if (pci_id->driver_data & AZX_DCAPS_INTEL_SHARED) {
+ switch (skl_pci_binding) {
+ case SND_SKL_PCI_BIND_AUTO:
+ if (pci->class != 0x040300) {
+ dev_info(&pci->dev, "The DSP is enabled on this platform, aborting probe\n");
+ return -ENODEV;
+ }
+ dev_info(&pci->dev, "No DSP detected, continuing HDaudio legacy probe\n");
+ break;
+ case SND_SKL_PCI_BIND_LEGACY:
+ dev_info(&pci->dev, "Module parameter forced binding with HDaudio legacy, bypassed detection logic\n");
+ break;
+ case SND_SKL_PCI_BIND_ASOC:
+ dev_info(&pci->dev, "Module parameter forced binding with SKL+ ASoC driver, aborting probe\n");
+ return -ENODEV;
+ default:
+ dev_err(&pci->dev, "invalid value for skl_pci_binding module parameter, ignored\n");
+ break;
+ }
+ }
+
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
@@ -2169,6 +2152,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1849, 0x0397, "Asrock N68C-S UCC", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
@@ -2243,10 +2228,13 @@ static int azx_probe_continue(struct azx *chip)
goto out_free;
} else {
/* don't bother any longer */
- chip->driver_caps &=
- ~(AZX_DCAPS_I915_COMPONENT | AZX_DCAPS_I915_POWERWELL);
+ chip->driver_caps &= ~AZX_DCAPS_I915_COMPONENT;
}
}
+
+ /* HSW/BDW controllers need this power */
+ if (CONTROLLER_IN_GPU(pci))
+ hda->need_i915_power = 1;
}
/* Request display power well for the HDA controller or codec. For
@@ -2254,18 +2242,7 @@ static int azx_probe_continue(struct azx *chip)
* this power. For other platforms, like Baytrail/Braswell, only the
* display codec needs the power and it can be released after probe.
*/
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- /* HSW/BDW controllers need this power */
- if (CONTROLLER_IN_GPU(pci))
- hda->need_i915_power = 1;
-
- err = snd_hdac_display_power(bus, true);
- if (err < 0) {
- dev_err(chip->card->dev,
- "Cannot turn on display power on i915\n");
- goto i915_power_fail;
- }
- }
+ display_power(chip, true);
err = azx_first_init(chip);
if (err < 0)
@@ -2313,11 +2290,8 @@ static int azx_probe_continue(struct azx *chip)
pm_runtime_put_autosuspend(&pci->dev);
out_free:
- if ((chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
- && !hda->need_i915_power)
- snd_hdac_display_power(bus, false);
-
-i915_power_fail:
+ if (err < 0 || !hda->need_i915_power)
+ display_power(chip, false);
if (err < 0)
hda->init_failed = 1;
complete_all(&hda->probe_wait);
@@ -2406,34 +2380,48 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE |
+ AZX_DCAPS_INTEL_DSP_DETECTION(SKL)
+ },
/* Kabylake */
{ PCI_DEVICE(0x8086, 0xa171),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Kabylake-LP */
{ PCI_DEVICE(0x8086, 0x9d71),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE |
+ AZX_DCAPS_INTEL_DSP_DETECTION(KBL)
+ },
/* Kabylake-H */
{ PCI_DEVICE(0x8086, 0xa2f0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE },
/* Coffelake */
{ PCI_DEVICE(0x8086, 0xa348),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE |
+ AZX_DCAPS_INTEL_DSP_DETECTION(CFL)
+ },
/* Cannonlake */
{ PCI_DEVICE(0x8086, 0x9dc8),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE |
+ AZX_DCAPS_INTEL_DSP_DETECTION(CNL)
+ },
/* Icelake */
{ PCI_DEVICE(0x8086, 0x34c8),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE |
+ AZX_DCAPS_INTEL_DSP_DETECTION(ICL)
+ },
/* Broxton-P(Apollolake) */
{ PCI_DEVICE(0x8086, 0x5a98),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON |
+ AZX_DCAPS_INTEL_DSP_DETECTION(APL)
+ },
/* Broxton-T */
{ PCI_DEVICE(0x8086, 0x1a98),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
/* Gemini-Lake */
{ PCI_DEVICE(0x8086, 0x3198),
- .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON |
+ AZX_DCAPS_INTEL_DSP_DETECTION(GLK)
+ },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
@@ -2496,6 +2484,10 @@ static const struct pci_device_id azx_ids[] = {
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD Stoney */
+ { PCI_DEVICE(0x1022, 0x157a),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+ AZX_DCAPS_PM_RUNTIME },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index c499727920e6..74b46952fc98 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -339,9 +339,15 @@ void snd_hda_jack_report_sync(struct hda_codec *codec)
if (jack->nid) {
if (!jack->jack || jack->block_report)
continue;
- state = get_jack_plug_state(jack->pin_sense);
- snd_jack_report(jack->jack,
- state ? jack->type : 0);
+ state = jack->button_state;
+ if (get_jack_plug_state(jack->pin_sense))
+ state |= jack->type;
+ snd_jack_report(jack->jack, state);
+ if (jack->button_state) {
+ snd_jack_report(jack->jack,
+ state & ~jack->button_state);
+ jack->button_state = 0; /* button released */
+ }
}
}
EXPORT_SYMBOL_GPL(snd_hda_jack_report_sync);
@@ -379,15 +385,19 @@ static void hda_free_jack_priv(struct snd_jack *jack)
* @nid: pin NID to assign
* @name: string name for the jack
* @phantom_jack: flag to deal as a phantom jack
+ * @type: jack type bits to be reported, 0 for guessing from pincfg
+ * @keymap: optional jack / key mapping
*
* This assigns a jack-detection kctl to the given pin. The kcontrol
* will have the given name and index.
*/
int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, bool phantom_jack)
+ const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap)
{
struct hda_jack_tbl *jack;
- int err, state, type;
+ const struct hda_jack_keymap *map;
+ int err, state, buttons;
jack = snd_hda_jack_tbl_new(codec, nid);
if (!jack)
@@ -395,16 +405,30 @@ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
if (jack->jack)
return 0; /* already created */
- type = get_input_jack_type(codec, nid);
- err = snd_jack_new(codec->card, name, type,
+ if (!type)
+ type = get_input_jack_type(codec, nid);
+
+ buttons = 0;
+ if (keymap) {
+ for (map = keymap; map->type; map++)
+ buttons |= map->type;
+ }
+
+ err = snd_jack_new(codec->card, name, type | buttons,
&jack->jack, true, phantom_jack);
if (err < 0)
return err;
jack->phantom_jack = !!phantom_jack;
jack->type = type;
+ jack->button_state = 0;
jack->jack->private_data = jack;
jack->jack->private_free = hda_free_jack_priv;
+ if (keymap) {
+ for (map = keymap; map->type; map++)
+ snd_jack_set_key(jack->jack, map->type, map->key);
+ }
+
state = snd_hda_jack_detect(codec, nid);
snd_jack_report(jack->jack, state ? jack->type : 0);
@@ -437,7 +461,7 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
if (phantom_jack)
/* Example final name: "Internal Mic Phantom Jack" */
strncat(name, " Phantom", sizeof(name) - strlen(name) - 1);
- err = snd_hda_jack_add_kctl(codec, nid, name, phantom_jack);
+ err = snd_hda_jack_add_kctl(codec, nid, name, phantom_jack, 0, NULL);
if (err < 0)
return err;
@@ -508,19 +532,25 @@ int snd_hda_jack_add_kctls(struct hda_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_hda_jack_add_kctls);
-static void call_jack_callback(struct hda_codec *codec,
+static void call_jack_callback(struct hda_codec *codec, unsigned int res,
struct hda_jack_tbl *jack)
{
struct hda_jack_callback *cb;
- for (cb = jack->callback; cb; cb = cb->next)
+ for (cb = jack->callback; cb; cb = cb->next) {
+ cb->jack = jack;
+ cb->unsol_res = res;
cb->func(codec, cb);
+ }
if (jack->gated_jack) {
struct hda_jack_tbl *gated =
snd_hda_jack_tbl_get(codec, jack->gated_jack);
if (gated) {
- for (cb = gated->callback; cb; cb = cb->next)
+ for (cb = gated->callback; cb; cb = cb->next) {
+ cb->jack = gated;
+ cb->unsol_res = res;
cb->func(codec, cb);
+ }
}
}
}
@@ -540,7 +570,7 @@ void snd_hda_jack_unsol_event(struct hda_codec *codec, unsigned int res)
return;
event->jack_dirty = 1;
- call_jack_callback(codec, event);
+ call_jack_callback(codec, res, event);
snd_hda_jack_report_sync(codec);
}
EXPORT_SYMBOL_GPL(snd_hda_jack_unsol_event);
@@ -566,7 +596,7 @@ void snd_hda_jack_poll_all(struct hda_codec *codec)
if (old_sense == get_jack_plug_state(jack->pin_sense))
continue;
changes = 1;
- call_jack_callback(codec, jack);
+ call_jack_callback(codec, 0, jack);
}
if (changes)
snd_hda_jack_report_sync(codec);
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index e9814c0168ea..1d713201c160 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -13,6 +13,7 @@
#define __SOUND_HDA_JACK_H
#include <linux/err.h>
+#include <sound/jack.h>
struct auto_pin_cfg;
struct hda_jack_tbl;
@@ -24,6 +25,8 @@ struct hda_jack_callback {
hda_nid_t nid;
hda_jack_callback_fn func;
unsigned int private_data; /* arbitrary data */
+ unsigned int unsol_res; /* unsolicited event bits */
+ struct hda_jack_tbl *jack; /* associated jack entry */
struct hda_jack_callback *next;
};
@@ -40,9 +43,15 @@ struct hda_jack_tbl {
hda_nid_t gating_jack; /* valid when gating jack plugged */
hda_nid_t gated_jack; /* gated is dependent on this jack */
int type;
+ int button_state;
struct snd_jack *jack;
};
+struct hda_jack_keymap {
+ enum snd_jack_types type;
+ int key;
+};
+
struct hda_jack_tbl *
snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid);
struct hda_jack_tbl *
@@ -82,7 +91,8 @@ static inline bool snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, bool phantom_jack);
+ const char *name, bool phantom_jack,
+ int type, const struct hda_jack_keymap *keymap);
int snd_hda_jack_add_kctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg);
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index dd7d4242d6d2..83befd8d43e8 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -31,6 +31,7 @@
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/time.h>
+#include <linux/string.h>
#include <sound/core.h>
#include <sound/initval.h>
@@ -344,6 +345,8 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
int err;
unsigned short gcap;
int irq_id = platform_get_irq(pdev, 0);
+ const char *sname;
+ struct device_node *root;
err = hda_tegra_init_chip(chip, pdev);
if (err)
@@ -401,8 +404,23 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
return -ENODEV;
}
+ /* driver name */
strcpy(card->driver, "tegra-hda");
- strcpy(card->shortname, "tegra-hda");
+
+ root = of_find_node_by_path("/");
+ sname = of_get_property(root, "compatible", NULL);
+ of_node_put(root);
+ if (!sname) {
+ dev_err(card->dev,
+ "failed to get compatible property from root node\n");
+ return -ENODEV;
+ }
+ /* shortname for card */
+ if (strlen(sname) > sizeof(card->shortname))
+ dev_info(card->dev, "truncating shortname for card\n");
+ strncpy(card->shortname, sname, sizeof(card->shortname));
+
+ /* longname for card */
snprintf(card->longname, sizeof(card->longname),
"%s at 0x%lx irq %i",
card->shortname, bus->addr, bus->irq);
@@ -513,7 +531,7 @@ static void hda_tegra_probe_work(struct work_struct *work)
goto out_free;
/* create codec instances */
- err = azx_probe_codecs(chip, 0);
+ err = azx_probe_codecs(chip, 8);
if (err < 0)
goto out_free;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0a567634e5fa..e5bdbc245682 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -1081,6 +1081,18 @@ enum {
QUIRK_AE5,
};
+#ifdef CONFIG_PCI
+#define ca0132_quirk(spec) ((spec)->quirk)
+#define ca0132_use_pci_mmio(spec) ((spec)->use_pci_mmio)
+#define ca0132_use_alt_functions(spec) ((spec)->use_alt_functions)
+#define ca0132_use_alt_controls(spec) ((spec)->use_alt_controls)
+#else
+#define ca0132_quirk(spec) ({ (void)(spec); QUIRK_NONE; })
+#define ca0132_use_alt_functions(spec) ({ (void)(spec); false; })
+#define ca0132_use_pci_mmio(spec) ({ (void)(spec); false; })
+#define ca0132_use_alt_controls(spec) ({ (void)(spec); false; })
+#endif
+
static const struct hda_pintbl alienware_pincfgs[] = {
{ 0x0b, 0x90170110 }, /* Builtin Speaker */
{ 0x0c, 0x411111f0 }, /* N/A */
@@ -3101,7 +3113,7 @@ static void dspload_post_setup(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
codec_dbg(codec, "---- dspload_post_setup ------\n");
- if (!spec->use_alt_functions) {
+ if (!ca0132_use_alt_functions(spec)) {
/*set DSP speaker to 2.0 configuration*/
chipio_write(codec, XRAM_XRAM_INST_OFFSET(0x18), 0x08080080);
chipio_write(codec, XRAM_XRAM_INST_OFFSET(0x19), 0x3f800000);
@@ -3333,7 +3345,7 @@ static void ca0132_gpio_init(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_AE5:
snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
@@ -3344,6 +3356,8 @@ static void ca0132_gpio_init(struct hda_codec *codec)
snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x5B);
break;
+ default:
+ break;
}
}
@@ -3353,7 +3367,7 @@ static void ca0132_gpio_setup(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DIRECTION, 0x07);
@@ -3372,6 +3386,8 @@ static void ca0132_gpio_setup(struct hda_codec *codec)
snd_hda_codec_write(codec, 0x01, 0,
AC_VERB_SET_GPIO_DATA, 0x0C);
break;
+ default:
+ break;
}
}
@@ -4172,7 +4188,7 @@ static void ca0132_alt_select_out_quirk_handler(struct hda_codec *codec)
switch (spec->cur_out_type) {
case SPEAKER_OUT:
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
ca0113_mmio_gpio_set(codec, 7, false);
ca0113_mmio_gpio_set(codec, 4, true);
@@ -4203,10 +4219,12 @@ static void ca0132_alt_select_out_quirk_handler(struct hda_codec *codec)
chipio_set_control_param(codec, 0x0d, 0xa4);
chipio_write(codec, 0x18b03c, 0x00000012);
break;
+ default:
+ break;
}
break;
case HEADPHONE_OUT:
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
ca0113_mmio_gpio_set(codec, 7, true);
ca0113_mmio_gpio_set(codec, 4, true);
@@ -4238,10 +4256,12 @@ static void ca0132_alt_select_out_quirk_handler(struct hda_codec *codec)
chipio_set_control_param(codec, 0x0d, 0xa1);
chipio_write(codec, 0x18b03c, 0x00000012);
break;
+ default:
+ break;
}
break;
case SURROUND_OUT:
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
ca0113_mmio_gpio_set(codec, 7, false);
ca0113_mmio_gpio_set(codec, 4, true);
@@ -4272,6 +4292,8 @@ static void ca0132_alt_select_out_quirk_handler(struct hda_codec *codec)
chipio_set_control_param(codec, 0x0d, 0xa4);
chipio_write(codec, 0x18b03c, 0x00000012);
break;
+ default:
+ break;
}
break;
}
@@ -4446,7 +4468,7 @@ static void ca0132_unsol_hp_delayed(struct work_struct *work)
to_delayed_work(work), struct ca0132_spec, unsol_hp_work);
struct hda_jack_tbl *jack;
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_select_out(spec->codec);
else
ca0132_select_out(spec->codec);
@@ -4530,14 +4552,14 @@ static int ca0132_alt_set_vipsource(struct hda_codec *codec, int val)
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
if (spec->in_enum_val == REAR_LINE_IN)
tmp = FLOAT_ZERO;
else {
- if (spec->quirk == QUIRK_SBZ)
+ if (ca0132_quirk(spec) == QUIRK_SBZ)
tmp = FLOAT_THREE;
else
tmp = FLOAT_ONE;
@@ -4549,7 +4571,7 @@ static int ca0132_alt_set_vipsource(struct hda_codec *codec, int val)
codec_dbg(codec, "%s: on.", __func__);
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_16_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_16_000);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
chipio_set_conn_rate(codec, 0x0F, SR_16_000);
if (spec->effects_switch[VOICE_FOCUS - EFFECT_START_NID])
@@ -4645,7 +4667,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->cur_mic_type) {
case REAR_MIC:
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_R3D:
ca0113_mmio_gpio_set(codec, 0, false);
@@ -4669,14 +4691,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x0000000C);
@@ -4689,12 +4711,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x0000004C);
break;
+ default:
+ break;
}
ca0132_alt_mic_boost_set(codec, spec->mic_boost_enum_val);
break;
case REAR_LINE_IN:
ca0132_mic_boost_set(codec, 0);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_R3D:
ca0113_mmio_gpio_set(codec, 0, false);
@@ -4705,28 +4729,32 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
case QUIRK_AE5:
ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
break;
+ default:
+ break;
}
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_AE5:
chipio_write(codec, 0x18B098, 0x00000000);
chipio_write(codec, 0x18B09C, 0x00000000);
break;
+ default:
+ break;
}
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
break;
case FRONT_MIC:
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_R3D:
ca0113_mmio_gpio_set(codec, 0, true);
@@ -4748,7 +4776,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
@@ -4756,7 +4784,7 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x000000CC);
@@ -4765,6 +4793,8 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x0000004C);
break;
+ default:
+ break;
}
ca0132_alt_mic_boost_set(codec, spec->mic_boost_enum_val);
break;
@@ -4859,7 +4889,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
val = 0;
/* If Voice Focus on SBZ, set to two channel. */
- if ((nid == VOICE_FOCUS) && (spec->use_pci_mmio)
+ if ((nid == VOICE_FOCUS) && ca0132_use_pci_mmio(spec)
&& (spec->cur_mic_type != REAR_LINE_IN)) {
if (spec->effects_switch[CRYSTAL_VOICE -
EFFECT_START_NID]) {
@@ -4878,7 +4908,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
* For SBZ noise reduction, there's an extra command
* to module ID 0x47. No clue why.
*/
- if ((nid == NOISE_REDUCTION) && (spec->use_pci_mmio)
+ if ((nid == NOISE_REDUCTION) && ca0132_use_pci_mmio(spec)
&& (spec->cur_mic_type != REAR_LINE_IN)) {
if (spec->effects_switch[CRYSTAL_VOICE -
EFFECT_START_NID]) {
@@ -4894,7 +4924,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
}
/* If rear line in disable effects. */
- if (spec->use_alt_functions &&
+ if (ca0132_use_alt_functions(spec) &&
spec->in_enum_val == REAR_LINE_IN)
val = 0;
}
@@ -4924,7 +4954,7 @@ static int ca0132_pe_switch_set(struct hda_codec *codec)
codec_dbg(codec, "ca0132_pe_switch_set: val=%ld\n",
spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID]);
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_select_out(codec);
i = OUT_EFFECT_START_NID - EFFECT_START_NID;
@@ -4984,7 +5014,7 @@ static int ca0132_cvoice_switch_set(struct hda_codec *codec)
/* set correct vipsource */
oldval = stop_mic1(codec);
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ret |= ca0132_alt_set_vipsource(codec, 1);
else
ret |= ca0132_set_vipsource(codec, 1);
@@ -5053,7 +5083,7 @@ static int ca0132_vnode_switch_set(struct snd_kcontrol *kcontrol,
auto_jack =
spec->vnode_lswitch[VNID_HP_ASEL - VNODE_START_NID];
if (!auto_jack) {
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_select_out(codec);
else
ca0132_select_out(codec);
@@ -5070,7 +5100,7 @@ static int ca0132_vnode_switch_set(struct snd_kcontrol *kcontrol,
}
if (nid == VNID_HP_ASEL) {
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_select_out(codec);
else
ca0132_select_out(codec);
@@ -5784,7 +5814,7 @@ static int ca0132_switch_put(struct snd_kcontrol *kcontrol,
/* mic boost */
if (nid == spec->input_pins[0]) {
spec->cur_mic_boost = *valp;
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
if (spec->in_enum_val != REAR_LINE_IN)
changed = ca0132_mic_boost_set(codec, *valp);
} else {
@@ -6080,7 +6110,7 @@ static int add_fx_switch(struct hda_codec *codec, hda_nid_t nid,
/* If using alt_controls, add FX: prefix. But, don't add FX:
* prefix to OutFX or InFX enable controls.
*/
- if ((spec->use_alt_controls) && (nid <= IN_EFFECT_END_NID))
+ if (ca0132_use_alt_controls(spec) && (nid <= IN_EFFECT_END_NID))
sprintf(namestr, "FX: %s %s Switch", pfx, dirstr[dir]);
else
sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]);
@@ -6357,7 +6387,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
return err;
}
/* Setup vmaster with surround slaves for desktop ca0132 devices */
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
snd_hda_set_vmaster_tlv(codec, spec->dacs[0], HDA_OUTPUT,
spec->tlv);
snd_hda_add_vmaster(codec, "Master Playback Volume",
@@ -6377,7 +6407,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
for (i = 0; i < num_fx; i++) {
/* Desktop cards break if Echo Cancellation is used. */
- if (spec->use_pci_mmio) {
+ if (ca0132_use_pci_mmio(spec)) {
if (i == (ECHO_CANCELLATION - IN_EFFECT_START_NID +
OUT_EFFECTS_COUNT))
continue;
@@ -6394,7 +6424,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
* EQ presets, and Smart Volume presets. Also, change names to add FX
* prefix, and change PlayEnhancement and CrystalVoice to match.
*/
- if (spec->use_alt_controls) {
+ if (ca0132_use_alt_controls(spec)) {
err = ca0132_alt_add_svm_enum(codec);
if (err < 0)
return err;
@@ -6448,7 +6478,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
* to select the new outputs and inputs, plus add the new mic boost
* setting control.
*/
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
err = ca0132_alt_add_output_enum(codec);
if (err < 0)
return err;
@@ -6459,14 +6489,14 @@ static int ca0132_build_controls(struct hda_codec *codec)
* ZxR only has microphone input, there is no front panel
* header on the card, and aux-in is handled by the DBPro board.
*/
- if (spec->quirk != QUIRK_ZXR) {
+ if (ca0132_quirk(spec) != QUIRK_ZXR) {
err = ca0132_alt_add_input_enum(codec);
if (err < 0)
return err;
}
}
- if (spec->quirk == QUIRK_AE5) {
+ if (ca0132_quirk(spec) == QUIRK_AE5) {
err = ae5_add_headphone_gain_enum(codec);
if (err < 0)
return err;
@@ -6475,7 +6505,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
return err;
}
- if (spec->quirk == QUIRK_ZXR) {
+ if (ca0132_quirk(spec) == QUIRK_ZXR) {
err = zxr_add_headphone_gain_switch(codec);
if (err < 0)
return err;
@@ -6505,7 +6535,7 @@ static int ca0132_build_controls(struct hda_codec *codec)
return err;
}
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_add_chmap_ctls(codec);
return 0;
@@ -6583,7 +6613,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
info = snd_hda_codec_pcm_new(codec, "CA0132 Analog");
if (!info)
return -ENOMEM;
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
info->own_chmap = true;
info->stream[SNDRV_PCM_STREAM_PLAYBACK].chmap
= ca0132_alt_chmaps;
@@ -6597,7 +6627,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
/* With the DSP enabled, desktops don't use this ADC. */
- if (!spec->use_alt_functions) {
+ if (!ca0132_use_alt_functions(spec)) {
info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
if (!info)
return -ENOMEM;
@@ -6795,7 +6825,7 @@ static void ca0132_init_dmic(struct hda_codec *codec)
* Bit 6: set to select Data2, clear for Data1
* Bit 7: set to enable DMic, clear for AMic
*/
- if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+ if (ca0132_quirk(spec) == QUIRK_ALIENWARE_M17XR4)
val = 0x33;
else
val = 0x23;
@@ -6877,7 +6907,7 @@ static void ca0132_alt_init_analog_mics(struct hda_codec *codec)
/* Mic 1 Setup */
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- if (spec->quirk == QUIRK_R3DI) {
+ if (ca0132_quirk(spec) == QUIRK_R3DI) {
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
tmp = FLOAT_ONE;
} else
@@ -6887,7 +6917,7 @@ static void ca0132_alt_init_analog_mics(struct hda_codec *codec)
/* Mic 2 setup (not present on desktop cards) */
chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, SR_96_000);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
chipio_set_conn_rate(codec, 0x0F, SR_96_000);
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x01, tmp);
@@ -6949,7 +6979,7 @@ static void sbz_chipio_startup_data(struct hda_codec *codec)
chipio_set_stream_channels(codec, 0x0C, 6);
chipio_set_stream_control(codec, 0x0C, 1);
/* No clue what these control */
- if (spec->quirk == QUIRK_SBZ) {
+ if (ca0132_quirk(spec) == QUIRK_SBZ) {
chipio_write_no_mutex(codec, 0x190030, 0x0001e0c0);
chipio_write_no_mutex(codec, 0x190034, 0x0001e1c1);
chipio_write_no_mutex(codec, 0x190038, 0x0001e4c2);
@@ -6962,7 +6992,7 @@ static void sbz_chipio_startup_data(struct hda_codec *codec)
chipio_write_no_mutex(codec, 0x190054, 0x0001edc9);
chipio_write_no_mutex(codec, 0x190058, 0x0001eaca);
chipio_write_no_mutex(codec, 0x19005c, 0x0001ebcb);
- } else if (spec->quirk == QUIRK_ZXR) {
+ } else if (ca0132_quirk(spec) == QUIRK_ZXR) {
chipio_write_no_mutex(codec, 0x190038, 0x000140c2);
chipio_write_no_mutex(codec, 0x19003c, 0x000141c3);
chipio_write_no_mutex(codec, 0x190040, 0x000150c4);
@@ -6992,7 +7022,7 @@ static void ca0132_alt_dsp_scp_startup(struct hda_codec *codec)
* why this is, but multiple tests have confirmed it.
*/
for (i = 0; i < 2; i++) {
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_AE5:
tmp = 0x00000003;
@@ -7021,6 +7051,8 @@ static void ca0132_alt_dsp_scp_startup(struct hda_codec *codec)
tmp = 0x00000000;
dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
break;
+ default:
+ break;
}
msleep(100);
}
@@ -7043,7 +7075,7 @@ static void ca0132_alt_dsp_initial_mic_setup(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
chipio_write(codec, 0x18b098, 0x0000000c);
chipio_write(codec, 0x18b09C, 0x0000000c);
@@ -7052,6 +7084,8 @@ static void ca0132_alt_dsp_initial_mic_setup(struct hda_codec *codec)
chipio_write(codec, 0x18b098, 0x0000000c);
chipio_write(codec, 0x18b09c, 0x0000004c);
break;
+ default:
+ break;
}
}
@@ -7273,7 +7307,7 @@ static void r3d_setup_defaults(struct hda_codec *codec)
/* Set speaker source? */
dspio_set_uint_param(codec, 0x32, 0x00, tmp);
- if (spec->quirk == QUIRK_R3DI)
+ if (ca0132_quirk(spec) == QUIRK_R3DI)
r3di_gpio_dsp_status_set(codec, R3DI_DSP_DOWNLOADED);
/* Setup effect defaults */
@@ -7420,7 +7454,7 @@ static void ca0132_init_flags(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
chipio_set_control_flag(codec, CONTROL_FLAG_DSP_96KHZ, 1);
chipio_set_control_flag(codec, CONTROL_FLAG_DAC_96KHZ, 1);
chipio_set_control_flag(codec, CONTROL_FLAG_ADC_B_96KHZ, 1);
@@ -7453,7 +7487,7 @@ static void ca0132_init_params(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
chipio_set_conn_rate(codec, MEM_CONNID_WUH, SR_48_000);
chipio_set_conn_rate(codec, 0x0B, SR_48_000);
chipio_set_control_param(codec, CONTROL_PARAM_SPDIF1_SOURCE, 0);
@@ -7490,7 +7524,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
* can use the default firmware, but I'll leave the option in case
* it needs it again.
*/
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_R3D:
case QUIRK_AE5:
@@ -7564,7 +7598,7 @@ static void ca0132_download_dsp(struct hda_codec *codec)
}
/* For codecs using alt functions, this is already done earlier */
- if (spec->dsp_state == DSP_DOWNLOADED && (!spec->use_alt_functions))
+ if (spec->dsp_state == DSP_DOWNLOADED && !ca0132_use_alt_functions(spec))
ca0132_set_dsp_msr(codec, true);
}
@@ -7601,7 +7635,7 @@ static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
struct ca0132_spec *spec = codec->spec;
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_select_in(codec);
else
ca0132_select_mic(codec);
@@ -7616,7 +7650,7 @@ static void ca0132_init_unsol(struct hda_codec *codec)
snd_hda_jack_detect_enable_callback(codec, UNSOL_TAG_DSP,
ca0132_process_dsp_response);
/* Front headphone jack detection */
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
snd_hda_jack_detect_enable_callback(codec,
spec->unsol_tag_front_hp, hp_callback);
}
@@ -7706,7 +7740,7 @@ static void ca0132_init_chip(struct hda_codec *codec)
mutex_init(&spec->chipio_mutex);
spec->cur_out_type = SPEAKER_OUT;
- if (!spec->use_alt_functions)
+ if (!ca0132_use_alt_functions(spec))
spec->cur_mic_type = DIGITAL_MIC;
else
spec->cur_mic_type = REAR_MIC;
@@ -7732,7 +7766,7 @@ static void ca0132_init_chip(struct hda_codec *codec)
* Sets defaults for the effect slider controls, only for alternative
* ca0132 codecs. Also sets x-bass crossover frequency to 80hz.
*/
- if (spec->use_alt_controls) {
+ if (ca0132_use_alt_controls(spec)) {
spec->xbass_xover_freq = 8;
for (i = 0; i < EFFECT_LEVEL_SLIDERS; i++)
spec->fx_ctl_val[i] = effect_slider_defaults[i];
@@ -7747,7 +7781,7 @@ static void ca0132_init_chip(struct hda_codec *codec)
* the daughter board. So, there is no input enum control, and we need
* to make sure that spec->in_enum_val is set properly.
*/
- if (spec->quirk == QUIRK_ZXR)
+ if (ca0132_quirk(spec) == QUIRK_ZXR)
spec->in_enum_val = REAR_MIC;
#ifdef ENABLE_TUNING_CONTROLS
@@ -8088,27 +8122,27 @@ static void ca0132_mmio_init(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
writel(0x00000001, spec->mem_base + 0x400);
else
writel(0x00000000, spec->mem_base + 0x400);
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
writel(0x00000001, spec->mem_base + 0x408);
else
writel(0x00000000, spec->mem_base + 0x408);
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
writel(0x00000001, spec->mem_base + 0x40c);
else
writel(0x00000000, spec->mem_base + 0x40C);
- if (spec->quirk == QUIRK_ZXR)
+ if (ca0132_quirk(spec) == QUIRK_ZXR)
writel(0x00880640, spec->mem_base + 0x01C);
else
writel(0x00880680, spec->mem_base + 0x01C);
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
writel(0x00000080, spec->mem_base + 0xC0C);
else
writel(0x00000083, spec->mem_base + 0xC0C);
@@ -8116,7 +8150,7 @@ static void ca0132_mmio_init(struct hda_codec *codec)
writel(0x00000030, spec->mem_base + 0xC00);
writel(0x00000000, spec->mem_base + 0xC04);
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
writel(0x00000000, spec->mem_base + 0xC0C);
else
writel(0x00000003, spec->mem_base + 0xC0C);
@@ -8125,7 +8159,7 @@ static void ca0132_mmio_init(struct hda_codec *codec)
writel(0x00000003, spec->mem_base + 0xC0C);
writel(0x00000003, spec->mem_base + 0xC0C);
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
writel(0x00000001, spec->mem_base + 0xC08);
else
writel(0x000000C1, spec->mem_base + 0xC08);
@@ -8136,7 +8170,7 @@ static void ca0132_mmio_init(struct hda_codec *codec)
writel(0x000000C1, spec->mem_base + 0xC08);
writel(0x00000080, spec->mem_base + 0xC04);
- if (spec->quirk == QUIRK_AE5) {
+ if (ca0132_quirk(spec) == QUIRK_AE5) {
writel(0x00000000, spec->mem_base + 0x42c);
writel(0x00000000, spec->mem_base + 0x46c);
writel(0x00000000, spec->mem_base + 0x4ac);
@@ -8211,7 +8245,7 @@ static void ca0132_alt_init(struct hda_codec *codec)
ca0132_alt_vol_setup(codec);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
codec_dbg(codec, "SBZ alt_init");
ca0132_gpio_init(codec);
@@ -8248,6 +8282,8 @@ static void ca0132_alt_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->chip_init_verbs);
snd_hda_sequence_write(codec, spec->desktop_init_verbs);
break;
+ default:
+ break;
}
}
@@ -8274,7 +8310,7 @@ static int ca0132_init(struct hda_codec *codec)
spec->dsp_reload = true;
spec->dsp_state = DSP_DOWNLOAD_INIT;
} else {
- if (spec->quirk == QUIRK_SBZ)
+ if (ca0132_quirk(spec) == QUIRK_SBZ)
sbz_dsp_startup_check(codec);
return 0;
}
@@ -8284,12 +8320,12 @@ static int ca0132_init(struct hda_codec *codec)
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
- if (spec->use_pci_mmio)
+ if (ca0132_use_pci_mmio(spec))
ca0132_mmio_init(codec);
snd_hda_power_up_pm(codec);
- if (spec->quirk == QUIRK_AE5)
+ if (ca0132_quirk(spec) == QUIRK_AE5)
ae5_register_set(codec);
ca0132_init_unsol(codec);
@@ -8298,14 +8334,14 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->base_init_verbs);
- if (spec->use_alt_functions)
+ if (ca0132_use_alt_functions(spec))
ca0132_alt_init(codec);
ca0132_download_dsp(codec);
ca0132_refresh_widget_caps(codec);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_R3DI:
case QUIRK_R3D:
r3d_setup_defaults(codec);
@@ -8334,7 +8370,7 @@ static int ca0132_init(struct hda_codec *codec)
init_input(codec, cfg->dig_in_pin, spec->dig_in);
- if (!spec->use_alt_functions) {
+ if (!ca0132_use_alt_functions(spec)) {
snd_hda_sequence_write(codec, spec->chip_init_verbs);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
VENDOR_CHIPIO_PARAM_EX_ID_SET, 0x0D);
@@ -8342,11 +8378,11 @@ static int ca0132_init(struct hda_codec *codec)
VENDOR_CHIPIO_PARAM_EX_VALUE_SET, 0x20);
}
- if (spec->quirk == QUIRK_SBZ)
+ if (ca0132_quirk(spec) == QUIRK_SBZ)
ca0132_gpio_setup(codec);
snd_hda_sequence_write(codec, spec->spec_init_verbs);
- if (spec->use_alt_functions) {
+ if (ca0132_use_alt_functions(spec)) {
ca0132_alt_select_out(codec);
ca0132_alt_select_in(codec);
} else {
@@ -8391,7 +8427,7 @@ static void ca0132_free(struct hda_codec *codec)
cancel_delayed_work_sync(&spec->unsol_hp_work);
snd_hda_power_up(codec);
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
sbz_exit_chip(codec);
break;
@@ -8407,13 +8443,15 @@ static void ca0132_free(struct hda_codec *codec)
case QUIRK_R3DI:
r3di_gpio_shutdown(codec);
break;
+ default:
+ break;
}
snd_hda_sequence_write(codec, spec->base_exit_verbs);
ca0132_exit_chip(codec);
snd_hda_power_down(codec);
- if (spec->mem_base)
+ if (IS_ENABLED(CONFIG_PCI) && spec->mem_base)
pci_iounmap(codec->bus->pci, spec->mem_base);
kfree(spec->spec_init_verbs);
kfree(codec->spec);
@@ -8461,12 +8499,12 @@ static void ca0132_config(struct hda_codec *codec)
spec->multiout.dac_nids = spec->dacs;
spec->multiout.num_dacs = 3;
- if (!spec->use_alt_functions)
+ if (!ca0132_use_alt_functions(spec))
spec->multiout.max_channels = 2;
else
spec->multiout.max_channels = 6;
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_ALIENWARE:
codec_dbg(codec, "%s: QUIRK_ALIENWARE applied.\n", __func__);
snd_hda_apply_pincfgs(codec, alienware_pincfgs);
@@ -8491,9 +8529,11 @@ static void ca0132_config(struct hda_codec *codec)
codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
snd_hda_apply_pincfgs(codec, ae5_pincfgs);
break;
+ default:
+ break;
}
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_ALIENWARE:
spec->num_outputs = 2;
spec->out_pins[0] = 0x0b; /* speaker out */
@@ -8654,7 +8694,7 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
* Since desktop cards use pci_mmio, this can be used to determine
* whether or not to use these verbs instead of a separate bool.
*/
- if (spec->use_pci_mmio)
+ if (ca0132_use_pci_mmio(spec))
spec->desktop_init_verbs = ca0132_init_verbs1;
spec->spec_init_verbs = kcalloc(NUM_SPEC_VERBS,
sizeof(struct hda_verb),
@@ -8729,11 +8769,10 @@ static int patch_ca0132(struct hda_codec *codec)
spec->quirk = quirk->value;
else
spec->quirk = QUIRK_NONE;
-
- if (spec->quirk == QUIRK_SBZ)
+ if (ca0132_quirk(spec) == QUIRK_SBZ)
sbz_detect_quirk(codec);
- if (spec->quirk == QUIRK_ZXR_DBPRO)
+ if (ca0132_quirk(spec) == QUIRK_ZXR_DBPRO)
codec->patch_ops = dbpro_patch_ops;
else
codec->patch_ops = ca0132_patch_ops;
@@ -8746,7 +8785,7 @@ static int patch_ca0132(struct hda_codec *codec)
spec->num_mixers = 1;
/* Set which mixers each quirk uses. */
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
spec->mixers[0] = desktop_mixer;
snd_hda_codec_set_name(codec, "Sound Blaster Z");
@@ -8775,7 +8814,7 @@ static int patch_ca0132(struct hda_codec *codec)
}
/* Setup whether or not to use alt functions/controls/pci_mmio */
- switch (spec->quirk) {
+ switch (ca0132_quirk(spec)) {
case QUIRK_SBZ:
case QUIRK_R3D:
case QUIRK_AE5:
@@ -8796,6 +8835,7 @@ static int patch_ca0132(struct hda_codec *codec)
break;
}
+#ifdef CONFIG_PCI
if (spec->use_pci_mmio) {
spec->mem_base = pci_iomap(codec->bus->pci, 2, 0xC20);
if (spec->mem_base == NULL) {
@@ -8803,6 +8843,7 @@ static int patch_ca0132(struct hda_codec *codec)
spec->quirk = QUIRK_NONE;
}
}
+#endif
spec->base_init_verbs = ca0132_base_init_verbs;
spec->base_exit_verbs = ca0132_base_exit_verbs;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 950e02e71766..51cc6589443f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -923,6 +923,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 67099cbb6be2..46f88dc7b7e8 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2142,7 +2142,7 @@ static int generic_hdmi_build_jack(struct hda_codec *codec, int pcm_idx)
strncat(hdmi_str, " Phantom",
sizeof(hdmi_str) - strlen(hdmi_str) - 1);
ret = snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str,
- phantom_jack);
+ phantom_jack, 0, NULL);
if (ret < 0)
return ret;
jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
@@ -2616,11 +2616,7 @@ static int intel_hsw_common_init(struct hda_codec *codec, hda_nid_t vendor_nid)
intel_haswell_enable_all_pins(codec, true);
intel_haswell_fixup_enable_dp12(codec);
- /* For Haswell/Broadwell, the controller is also in the power well and
- * can cover the codec power request, and so need not set this flag.
- */
- if (!is_haswell(codec) && !is_broadwell(codec))
- codec->core.link_power_control = 1;
+ codec->display_power_control = 1;
codec->patch_ops.set_power_state = haswell_set_power_state;
codec->depop_delay = 0;
@@ -2656,7 +2652,7 @@ static int patch_i915_byt_hdmi(struct hda_codec *codec)
/* For Valleyview/Cherryview, only the display codec is in the display
* power well and can use link_power ops to request/release the power.
*/
- codec->core.link_power_control = 1;
+ codec->display_power_control = 1;
codec->depop_delay = 0;
codec->auto_runtime_pm = 1;
@@ -3834,6 +3830,10 @@ HDA_CODEC_ENTRY(0x10de0020, "Tegra30 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0022, "Tegra114 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0028, "Tegra124 HDMI", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0029, "Tegra210 HDMI/DP", patch_tegra_hdmi),
+HDA_CODEC_ENTRY(0x10de002d, "Tegra186 HDMI/DP0", patch_tegra_hdmi),
+HDA_CODEC_ENTRY(0x10de002e, "Tegra186 HDMI/DP1", patch_tegra_hdmi),
+HDA_CODEC_ENTRY(0x10de002f, "Tegra194 HDMI/DP2", patch_tegra_hdmi),
+HDA_CODEC_ENTRY(0x10de0030, "Tegra194 HDMI/DP3", patch_tegra_hdmi),
HDA_CODEC_ENTRY(0x10de0040, "GPU 40 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi),
HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 970bc44a378b..a4f4a9dd488d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -388,6 +388,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0285:
case 0x10ec0298:
case 0x10ec0289:
+ case 0x10ec0300:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
case 0x10ec0275:
@@ -2830,6 +2831,7 @@ enum {
ALC269_TYPE_ALC215,
ALC269_TYPE_ALC225,
ALC269_TYPE_ALC294,
+ ALC269_TYPE_ALC300,
ALC269_TYPE_ALC700,
};
@@ -2864,6 +2866,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC215:
case ALC269_TYPE_ALC225:
case ALC269_TYPE_ALC294:
+ case ALC269_TYPE_ALC300:
case ALC269_TYPE_ALC700:
ssids = alc269_ssids;
break;
@@ -4985,9 +4988,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
{ 0x19, 0x21a11010 }, /* dock mic */
{ }
};
+ /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
+ * the speaker output becomes too low by some reason on Thinkpads with
+ * ALC298 codec
+ */
+ static hda_nid_t preferred_pairs[] = {
+ 0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
+ 0
+ };
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gen.preferred_dacs = preferred_pairs;
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
snd_hda_apply_pincfgs(codec, pincfgs);
} else if (action == HDA_FIXUP_ACT_INIT) {
@@ -5358,6 +5370,76 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
spec->gen.preferred_dacs = preferred_pairs;
}
+/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
+static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ snd_hda_override_wcaps(codec, 0x03, 0);
+}
+
+static const struct hda_jack_keymap alc_headset_btn_keymap[] = {
+ { SND_JACK_BTN_0, KEY_PLAYPAUSE },
+ { SND_JACK_BTN_1, KEY_VOICECOMMAND },
+ { SND_JACK_BTN_2, KEY_VOLUMEUP },
+ { SND_JACK_BTN_3, KEY_VOLUMEDOWN },
+ {}
+};
+
+static void alc_headset_btn_callback(struct hda_codec *codec,
+ struct hda_jack_callback *jack)
+{
+ int report = 0;
+
+ if (jack->unsol_res & (7 << 13))
+ report |= SND_JACK_BTN_0;
+
+ if (jack->unsol_res & (1 << 16 | 3 << 8))
+ report |= SND_JACK_BTN_1;
+
+ /* Volume up key */
+ if (jack->unsol_res & (7 << 23))
+ report |= SND_JACK_BTN_2;
+
+ /* Volume down key */
+ if (jack->unsol_res & (7 << 10))
+ report |= SND_JACK_BTN_3;
+
+ jack->jack->button_state = report;
+}
+
+static void alc_fixup_headset_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_jack_detect_enable_callback(codec, 0x55,
+ alc_headset_btn_callback);
+ snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
+ SND_JACK_HEADSET, alc_headset_btn_keymap);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ switch (codec->core.vendor_id) {
+ case 0x10ec0225:
+ case 0x10ec0295:
+ case 0x10ec0299:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+ alc_update_coef_idx(codec, 0x44, 0x007f << 8, 0x0045 << 8);
+ break;
+ case 0x10ec0236:
+ case 0x10ec0256:
+ alc_write_coef_idx(codec, 0x48, 0xd011);
+ alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
+ break;
+ }
+ break;
+ }
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -5368,9 +5450,6 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
hda_fixup_thinkpad_acpi(codec, fix, action);
}
-/* for dell wmi mic mute led */
-#include "dell_wmi_helper.c"
-
/* for alc295_fixup_hp_top_speakers */
#include "hp_x360_helper.c"
@@ -5448,7 +5527,7 @@ enum {
ALC292_FIXUP_TPT440_DOCK,
ALC292_FIXUP_TPT440,
ALC283_FIXUP_HEADSET_MIC,
- ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+ ALC255_FIXUP_MIC_MUTE_LED,
ALC282_FIXUP_ASPIRE_V5_PINS,
ALC280_FIXUP_HP_GPIO4,
ALC286_FIXUP_HP_GPIO_LED,
@@ -5493,8 +5572,16 @@ enum {
ALC298_FIXUP_TPT470_DOCK,
ALC255_FIXUP_DUMMY_LINEOUT_VERB,
ALC255_FIXUP_DELL_HEADSET_MIC,
+ ALC256_FIXUP_HUAWEI_MBXP_PINS,
ALC295_FIXUP_HP_X360,
ALC221_FIXUP_HP_HEADSET_MIC,
+ ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+ ALC295_FIXUP_HP_AUTO_MUTE,
+ ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+ ALC294_FIXUP_ASUS_MIC,
+ ALC294_FIXUP_ASUS_HEADSET_MIC,
+ ALC294_FIXUP_ASUS_SPK,
+ ALC225_FIXUP_HEADSET_JACK,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5659,6 +5746,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC269_FIXUP_HP_MUTE_LED_MIC3] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_hp_mute_led_mic3,
+ .chained = true,
+ .chain_id = ALC295_FIXUP_HP_AUTO_MUTE
},
[ALC269_FIXUP_HP_GPIO_LED] = {
.type = HDA_FIXUP_FUNC,
@@ -5740,7 +5829,7 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode,
.chained = true,
- .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
+ .chain_id = ALC255_FIXUP_MIC_MUTE_LED
},
[ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = {
.type = HDA_FIXUP_FUNC,
@@ -5764,6 +5853,24 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MIC
},
+ [ALC256_FIXUP_HUAWEI_MBXP_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ {0x12, 0x90a60130},
+ {0x13, 0x40000000},
+ {0x14, 0x90170110},
+ {0x18, 0x411111f0},
+ {0x19, 0x04a11040},
+ {0x1a, 0x411111f0},
+ {0x1b, 0x90170112},
+ {0x1d, 0x40759a05},
+ {0x1e, 0x411111f0},
+ {0x21, 0x04211020},
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_MIC_MUTE_LED
+ },
[ALC269_FIXUP_ASUS_X101_FUNC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc269_fixup_x101_headset_mic,
@@ -5966,7 +6073,7 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode_alc255,
.chained = true,
- .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
+ .chain_id = ALC255_FIXUP_MIC_MUTE_LED
},
[ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = {
.type = HDA_FIXUP_FUNC,
@@ -6001,9 +6108,9 @@ static const struct hda_fixup alc269_fixups[] = {
{ },
},
},
- [ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED] = {
+ [ALC255_FIXUP_MIC_MUTE_LED] = {
.type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_dell_wmi,
+ .v.func = snd_hda_gen_fixup_micmute_led,
},
[ALC282_FIXUP_ASPIRE_V5_PINS] = {
.type = HDA_FIXUP_PINS,
@@ -6062,7 +6169,7 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode_dell_alc288,
.chained = true,
- .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
+ .chain_id = ALC255_FIXUP_MIC_MUTE_LED
},
[ALC288_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_PINS,
@@ -6362,6 +6469,59 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MIC
},
+ [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_invalidate_dacs,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ },
+ [ALC295_FIXUP_HP_AUTO_MUTE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_auto_mute_via_amp,
+ },
+ [ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
+ [ALC294_FIXUP_ASUS_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x13, 0x90a60160 }, /* use as internal mic */
+ { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC294_FIXUP_ASUS_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC294_FIXUP_ASUS_SPK] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Set EAPD high */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x40 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x8800 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ },
+ [ALC225_FIXUP_HEADSET_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_jack,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6376,7 +6536,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+ SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -6490,6 +6654,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6500,6 +6665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -6532,6 +6698,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -6595,6 +6762,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x19e5, 0x3200, "Huawei MBX", ALC255_FIXUP_MIC_MUTE_LED),
+ SND_PCI_QUIRK(0x19e5, 0x3201, "Huawei MBX", ALC255_FIXUP_MIC_MUTE_LED),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
#if 0
@@ -6720,7 +6890,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "alc255-dell2"},
{.id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc293-dell1"},
{.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
- {.id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, .name = "alc255-dell-mute"},
+ {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
{.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
{.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
{.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
@@ -6759,6 +6929,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
{.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
{.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
+ {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-sense-combo"},
{}
};
#define ALC225_STANDARD_PINS \
@@ -7034,6 +7205,15 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x19, 0x03a11020},
{0x21, 0x0321101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+ {0x12, 0x90a60130},
+ {0x14, 0x90170110},
+ {0x19, 0x04a11040},
+ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60120},
{0x14, 0x90170110},
@@ -7097,6 +7277,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_MIC,
+ {0x14, 0x90170110},
+ {0x1b, 0x90a70130},
+ {0x21, 0x04211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_STANDARD_PINS,
{0x17, 0x21014020},
@@ -7169,6 +7357,37 @@ static void alc269_fill_coef(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x4, 0, 1<<11);
}
+static void alc294_hp_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ int i, val;
+
+ if (!hp_pin)
+ return;
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ msleep(100);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
+ alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
+
+ /* Wait for depop procedure finish */
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
+ for (i = 0; i < 20 && val & 0x0080; i++) {
+ msleep(50);
+ val = alc_read_coefex_idx(codec, 0x58, 0x01);
+ }
+ /* Set HP depop to auto mode */
+ alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b);
+ msleep(50);
+}
+
/*
*/
static int patch_alc269(struct hda_codec *codec)
@@ -7294,6 +7513,11 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC294;
spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
+ alc294_hp_init(codec);
+ break;
+ case 0x10ec0300:
+ spec->codec_variant = ALC269_TYPE_ALC300;
+ spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
break;
case 0x10ec0700:
case 0x10ec0701:
@@ -7301,6 +7525,7 @@ static int patch_alc269(struct hda_codec *codec)
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
+ alc294_hp_init(codec);
break;
}
@@ -8405,6 +8630,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 568575b72f2f..4089feb8c68e 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -3,12 +3,11 @@
* to be included from codec driver
*/
-#if IS_ENABLED(CONFIG_THINKPAD_ACPI)
+#if IS_ENABLED(CONFIG_THINKPAD_ACPI) && IS_REACHABLE(CONFIG_LEDS_TRIGGER_AUDIO)
#include <linux/acpi.h>
-#include <linux/thinkpad_acpi.h>
+#include <linux/leds.h>
-static int (*led_set_func)(int, bool);
static void (*old_vmaster_hook)(void *, int);
static bool is_thinkpad(struct hda_codec *codec)
@@ -23,50 +22,20 @@ static void update_tpacpi_mute_led(void *private_data, int enabled)
if (old_vmaster_hook)
old_vmaster_hook(private_data, enabled);
- if (led_set_func)
- led_set_func(TPACPI_LED_MUTE, !enabled);
-}
-
-static void update_tpacpi_micmute(struct hda_codec *codec)
-{
- struct hda_gen_spec *spec = codec->spec;
-
- led_set_func(TPACPI_LED_MICMUTE, spec->micmute_led.led_value);
+ ledtrig_audio_set(LED_AUDIO_MUTE, enabled ? LED_OFF : LED_ON);
}
static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct hda_gen_spec *spec = codec->spec;
- bool removefunc = false;
if (action == HDA_FIXUP_ACT_PROBE) {
if (!is_thinkpad(codec))
return;
- if (!led_set_func)
- led_set_func = symbol_request(tpacpi_led_set);
- if (!led_set_func) {
- codec_warn(codec,
- "Failed to find thinkpad-acpi symbol tpacpi_led_set\n");
- return;
- }
-
- removefunc = true;
- if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
- old_vmaster_hook = spec->vmaster_mute.hook;
- spec->vmaster_mute.hook = update_tpacpi_mute_led;
- removefunc = false;
- }
- if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
- !snd_hda_gen_add_micmute_led(codec,
- update_tpacpi_micmute))
- removefunc = false;
- }
-
- if (led_set_func && (action == HDA_FIXUP_ACT_FREE || removefunc)) {
- symbol_put(tpacpi_led_set);
- led_set_func = NULL;
- old_vmaster_hook = NULL;
+ old_vmaster_hook = spec->vmaster_mute.hook;
+ spec->vmaster_mute.hook = update_tpacpi_mute_led;
+ snd_hda_gen_fixup_micmute_led(codec, fix, action);
}
}
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 1bff4b1b39cd..ba99ff0e93e0 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -30,6 +30,7 @@
#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
- int mapped_channel;
+ unsigned int channel = info->channel;
- if (snd_BUG_ON(info->channel >= hdsp->max_channels))
+ if (snd_BUG_ON(channel >= hdsp->max_channels))
return -EINVAL;
+ channel = array_index_nospec(channel, hdsp->max_channels);
- if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
+ if (hdsp->channel_map[channel] < 0)
return -EINVAL;
- info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
+ info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
info->first = 0;
info->step = 32;
return 0;
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 48dd44f8e914..d692e4070167 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -908,7 +908,7 @@ static void detect_byte_swap(struct snd_pmac *chip)
/* if seems that Keylargo can't byte-swap */
for (mio = chip->node->parent; mio; mio = mio->parent) {
- if (strcmp(mio->name, "mac-io") == 0) {
+ if (of_node_name_eq(mio, "mac-io")) {
if (of_device_is_compatible(mio, "Keylargo"))
chip->can_byte_swap = 0;
break;
@@ -1313,7 +1313,7 @@ int snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
} else if (chip->is_pbook_G3) {
struct device_node* mio;
for (mio = chip->node->parent; mio; mio = mio->parent) {
- if (strcmp(mio->name, "mac-io") == 0) {
+ if (of_node_name_eq(mio, "mac-io")) {
struct resource r;
if (of_address_to_resource(mio, 0, &r) == 0)
chip->macio_base =
diff --git a/sound/ppc/tumbler.c b/sound/ppc/tumbler.c
index 0779a2912237..6d7ffffcce95 100644
--- a/sound/ppc/tumbler.c
+++ b/sound/ppc/tumbler.c
@@ -1365,8 +1365,8 @@ int snd_pmac_tumbler_init(struct snd_pmac *chip)
mix->anded_reset = 0;
mix->reset_on_sleep = 1;
- for (np = chip->node->child; np; np = np->sibling) {
- if (!strcmp(np->name, "sound")) {
+ for_each_child_of_node(chip->node, np) {
+ if (of_node_name_eq(np, "sound")) {
if (of_get_property(np, "has-anded-reset", NULL))
mix->anded_reset = 1;
if (of_get_property(np, "layout-id", NULL))
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 1cf11cf51e1d..6592a422a047 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -46,13 +46,11 @@ source "sound/soc/atmel/Kconfig"
source "sound/soc/au1x/Kconfig"
source "sound/soc/bcm/Kconfig"
source "sound/soc/cirrus/Kconfig"
-source "sound/soc/davinci/Kconfig"
source "sound/soc/dwc/Kconfig"
source "sound/soc/fsl/Kconfig"
source "sound/soc/hisilicon/Kconfig"
source "sound/soc/jz4740/Kconfig"
source "sound/soc/nuc900/Kconfig"
-source "sound/soc/omap/Kconfig"
source "sound/soc/kirkwood/Kconfig"
source "sound/soc/img/Kconfig"
source "sound/soc/intel/Kconfig"
@@ -70,9 +68,11 @@ source "sound/soc/sti/Kconfig"
source "sound/soc/stm/Kconfig"
source "sound/soc/sunxi/Kconfig"
source "sound/soc/tegra/Kconfig"
+source "sound/soc/ti/Kconfig"
source "sound/soc/txx9/Kconfig"
source "sound/soc/uniphier/Kconfig"
source "sound/soc/ux500/Kconfig"
+source "sound/soc/xilinx/Kconfig"
source "sound/soc/xtensa/Kconfig"
source "sound/soc/zte/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 62a5f87c3cfc..48c48c1c893c 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_SND_SOC) += atmel/
obj-$(CONFIG_SND_SOC) += au1x/
obj-$(CONFIG_SND_SOC) += bcm/
obj-$(CONFIG_SND_SOC) += cirrus/
-obj-$(CONFIG_SND_SOC) += davinci/
obj-$(CONFIG_SND_SOC) += dwc/
obj-$(CONFIG_SND_SOC) += fsl/
obj-$(CONFIG_SND_SOC) += hisilicon/
@@ -41,7 +40,6 @@ obj-$(CONFIG_SND_SOC) += mediatek/
obj-$(CONFIG_SND_SOC) += meson/
obj-$(CONFIG_SND_SOC) += mxs/
obj-$(CONFIG_SND_SOC) += nuc900/
-obj-$(CONFIG_SND_SOC) += omap/
obj-$(CONFIG_SND_SOC) += kirkwood/
obj-$(CONFIG_SND_SOC) += pxa/
obj-$(CONFIG_SND_SOC) += qcom/
@@ -54,8 +52,10 @@ obj-$(CONFIG_SND_SOC) += sti/
obj-$(CONFIG_SND_SOC) += stm/
obj-$(CONFIG_SND_SOC) += sunxi/
obj-$(CONFIG_SND_SOC) += tegra/
+obj-$(CONFIG_SND_SOC) += ti/
obj-$(CONFIG_SND_SOC) += txx9/
obj-$(CONFIG_SND_SOC) += uniphier/
obj-$(CONFIG_SND_SOC) += ux500/
+obj-$(CONFIG_SND_SOC) += xilinx/
obj-$(CONFIG_SND_SOC) += xtensa/
obj-$(CONFIG_SND_SOC) += zte/
diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
index 58c1dcb4d255..33ebec990c2f 100644
--- a/sound/soc/amd/Kconfig
+++ b/sound/soc/amd/Kconfig
@@ -19,3 +19,9 @@ config SND_SOC_AMD_CZ_RT5645_MACH
depends on SND_SOC_AMD_ACP && I2C
help
This option enables machine driver for rt5645.
+
+config SND_SOC_AMD_ACP3x
+ tristate "AMD Audio Coprocessor-v3.x support"
+ depends on X86 && PCI
+ help
+ This option enables ACP v3.x I2S support on AMD platform
diff --git a/sound/soc/amd/Makefile b/sound/soc/amd/Makefile
index 79b0622fa5d3..8e1c571c3161 100644
--- a/sound/soc/amd/Makefile
+++ b/sound/soc/amd/Makefile
@@ -5,3 +5,4 @@ snd-soc-acp-rt5645-mach-objs := acp-rt5645.o
obj-$(CONFIG_SND_SOC_AMD_ACP) += acp_audio_dma.o
obj-$(CONFIG_SND_SOC_AMD_CZ_DA7219MX98357_MACH) += snd-soc-acp-da7219mx98357-mach.o
obj-$(CONFIG_SND_SOC_AMD_CZ_RT5645_MACH) += snd-soc-acp-rt5645-mach.o
+obj-$(CONFIG_SND_SOC_AMD_ACP3x) += raven/
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index 3f813ea5210a..a5daad973ce5 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -403,7 +403,7 @@ static struct regulator_config acp_da7219_cfg = {
static struct regulator_ops acp_da7219_ops = {
};
-static struct regulator_desc acp_da7219_desc = {
+static const struct regulator_desc acp_da7219_desc = {
.name = "reg-fixed-1.8V",
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index cdebab2f8ce5..f4011bebc7ec 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -303,11 +303,10 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
}
/* Create page table entries in ACP SRAM for the allocated memory */
-static void acp_pte_config(void __iomem *acp_mmio, struct page *pg,
+static void acp_pte_config(void __iomem *acp_mmio, dma_addr_t addr,
u16 num_of_pages, u32 pte_offset)
{
u16 page_idx;
- u64 addr;
u32 low;
u32 high;
u32 offset;
@@ -317,7 +316,6 @@ static void acp_pte_config(void __iomem *acp_mmio, struct page *pg,
/* Load the low address of page int ACP SRAM through SRBM */
acp_reg_write((offset + (page_idx * 8)),
acp_mmio, mmACP_SRBM_Targ_Idx_Addr);
- addr = page_to_phys(pg);
low = lower_32_bits(addr);
high = upper_32_bits(addr);
@@ -333,7 +331,7 @@ static void acp_pte_config(void __iomem *acp_mmio, struct page *pg,
acp_reg_write(high, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
/* Move to next physically contiguos page */
- pg++;
+ addr += PAGE_SIZE;
}
}
@@ -343,7 +341,7 @@ static void config_acp_dma(void __iomem *acp_mmio,
{
u16 ch_acp_sysmem, ch_acp_i2s;
- acp_pte_config(acp_mmio, rtd->pg, rtd->num_of_pages,
+ acp_pte_config(acp_mmio, rtd->dma_addr, rtd->num_of_pages,
rtd->pte_offset);
if (rtd->direction == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -850,7 +848,6 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
int status;
uint64_t size;
u32 val = 0;
- struct page *pg;
struct snd_pcm_runtime *runtime;
struct audio_substream_data *rtd;
struct snd_soc_pcm_runtime *prtd = substream->private_data;
@@ -986,16 +983,14 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
return status;
memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
- pg = virt_to_page(substream->dma_buffer.area);
- if (pg) {
+ if (substream->dma_buffer.area) {
acp_set_sram_bank_state(rtd->acp_mmio, 0, true);
/* Save for runtime private data */
- rtd->pg = pg;
+ rtd->dma_addr = substream->dma_buffer.addr;
rtd->order = get_order(size);
/* Fill the page table entries in ACP SRAM */
- rtd->pg = pg;
rtd->size = size;
rtd->num_of_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
rtd->direction = substream->stream;
@@ -1151,18 +1146,21 @@ static int acp_dma_new(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd,
DRV_NAME);
struct audio_drv_data *adata = dev_get_drvdata(component->dev);
+ struct device *parent = component->dev->parent;
switch (adata->asic_type) {
case CHIP_STONEY:
ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV,
- NULL, ST_MIN_BUFFER,
+ parent,
+ ST_MIN_BUFFER,
ST_MAX_BUFFER);
break;
default:
ret = snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
SNDRV_DMA_TYPE_DEV,
- NULL, MIN_BUFFER,
+ parent,
+ MIN_BUFFER,
MAX_BUFFER);
break;
}
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index dbbb1a85638d..e5ab6c6040a6 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -123,7 +123,7 @@ enum acp_dma_priority_level {
};
struct audio_substream_data {
- struct page *pg;
+ dma_addr_t dma_addr;
unsigned int order;
u16 num_of_pages;
u16 i2s_instance;
diff --git a/sound/soc/amd/raven/Makefile b/sound/soc/amd/raven/Makefile
new file mode 100644
index 000000000000..108d1acf189b
--- /dev/null
+++ b/sound/soc/amd/raven/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0+
+# Raven Ridge platform Support
+snd-pci-acp3x-objs := pci-acp3x.o
+snd-acp3x-pcm-dma-objs := acp3x-pcm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_ACP3x) += snd-pci-acp3x.o
+obj-$(CONFIG_SND_SOC_AMD_ACP3x) += snd-acp3x-pcm-dma.o
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
new file mode 100644
index 000000000000..022a8912c8a2
--- /dev/null
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// AMD ALSA SoC PCM Driver
+//
+//Copyright 2016 Advanced Micro Devices, Inc.
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "acp3x.h"
+
+#define DRV_NAME "acp3x-i2s-audio"
+
+struct i2s_dev_data {
+ bool tdm_mode;
+ unsigned int i2s_irq;
+ u32 tdm_fmt;
+ void __iomem *acp3x_base;
+ struct snd_pcm_substream *play_stream;
+ struct snd_pcm_substream *capture_stream;
+};
+
+struct i2s_stream_instance {
+ u16 num_pages;
+ u16 channels;
+ u32 xfer_resolution;
+ struct page *pg;
+ void __iomem *acp3x_base;
+};
+
+static const struct snd_pcm_hardware acp3x_pcm_hardware_playback = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .rate_min = 8000,
+ .rate_max = 96000,
+ .buffer_bytes_max = PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE,
+ .period_bytes_min = PLAYBACK_MIN_PERIOD_SIZE,
+ .period_bytes_max = PLAYBACK_MAX_PERIOD_SIZE,
+ .periods_min = PLAYBACK_MIN_NUM_PERIODS,
+ .periods_max = PLAYBACK_MAX_NUM_PERIODS,
+};
+
+static const struct snd_pcm_hardware acp3x_pcm_hardware_capture = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
+};
+
+static int acp3x_power_on(void __iomem *acp3x_base, bool on)
+{
+ u16 val, mask;
+ u32 timeout;
+
+ if (on == true) {
+ val = 1;
+ mask = ACP3x_POWER_ON;
+ } else {
+ val = 0;
+ mask = ACP3x_POWER_OFF;
+ }
+
+ rv_writel(val, acp3x_base + mmACP_PGFSM_CONTROL);
+ timeout = 0;
+ while (true) {
+ val = rv_readl(acp3x_base + mmACP_PGFSM_STATUS);
+ if ((val & ACP3x_POWER_OFF_IN_PROGRESS) == mask)
+ break;
+ if (timeout > 100) {
+ pr_err("ACP3x power state change failure\n");
+ return -ENODEV;
+ }
+ timeout++;
+ cpu_relax();
+ }
+ return 0;
+}
+
+static int acp3x_reset(void __iomem *acp3x_base)
+{
+ u32 val, timeout;
+
+ rv_writel(1, acp3x_base + mmACP_SOFT_RESET);
+ timeout = 0;
+ while (true) {
+ val = rv_readl(acp3x_base + mmACP_SOFT_RESET);
+ if ((val & ACP3x_SOFT_RESET__SoftResetAudDone_MASK) ||
+ timeout > 100) {
+ if (val & ACP3x_SOFT_RESET__SoftResetAudDone_MASK)
+ break;
+ return -ENODEV;
+ }
+ timeout++;
+ cpu_relax();
+ }
+
+ rv_writel(0, acp3x_base + mmACP_SOFT_RESET);
+ timeout = 0;
+ while (true) {
+ val = rv_readl(acp3x_base + mmACP_SOFT_RESET);
+ if (!val || timeout > 100) {
+ if (!val)
+ break;
+ return -ENODEV;
+ }
+ timeout++;
+ cpu_relax();
+ }
+ return 0;
+}
+
+static int acp3x_init(void __iomem *acp3x_base)
+{
+ int ret;
+
+ /* power on */
+ ret = acp3x_power_on(acp3x_base, true);
+ if (ret) {
+ pr_err("ACP3x power on failed\n");
+ return ret;
+ }
+ /* Reset */
+ ret = acp3x_reset(acp3x_base);
+ if (ret) {
+ pr_err("ACP3x reset failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static int acp3x_deinit(void __iomem *acp3x_base)
+{
+ int ret;
+
+ /* Reset */
+ ret = acp3x_reset(acp3x_base);
+ if (ret) {
+ pr_err("ACP3x reset failed\n");
+ return ret;
+ }
+ /* power off */
+ ret = acp3x_power_on(acp3x_base, false);
+ if (ret) {
+ pr_err("ACP3x power off failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
+{
+ u16 play_flag, cap_flag;
+ u32 val;
+ struct i2s_dev_data *rv_i2s_data = dev_id;
+
+ if (!rv_i2s_data)
+ return IRQ_NONE;
+
+ play_flag = 0;
+ cap_flag = 0;
+ val = rv_readl(rv_i2s_data->acp3x_base + mmACP_EXTERNAL_INTR_STAT);
+ if ((val & BIT(BT_TX_THRESHOLD)) && rv_i2s_data->play_stream) {
+ rv_writel(BIT(BT_TX_THRESHOLD), rv_i2s_data->acp3x_base +
+ mmACP_EXTERNAL_INTR_STAT);
+ snd_pcm_period_elapsed(rv_i2s_data->play_stream);
+ play_flag = 1;
+ }
+
+ if ((val & BIT(BT_RX_THRESHOLD)) && rv_i2s_data->capture_stream) {
+ rv_writel(BIT(BT_RX_THRESHOLD), rv_i2s_data->acp3x_base +
+ mmACP_EXTERNAL_INTR_STAT);
+ snd_pcm_period_elapsed(rv_i2s_data->capture_stream);
+ cap_flag = 1;
+ }
+
+ if (play_flag | cap_flag)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
+{
+ u16 page_idx;
+ u64 addr;
+ u32 low, high, val, acp_fifo_addr;
+ struct page *pg = rtd->pg;
+
+ /* 8 scratch registers used to map one 64 bit address */
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+ val = 0;
+ else
+ val = rtd->num_pages * 8;
+
+ /* Group Enable */
+ rv_writel(ACP_SRAM_PTE_OFFSET | BIT(31), rtd->acp3x_base +
+ mmACPAXI2AXI_ATU_BASE_ADDR_GRP_1);
+ rv_writel(PAGE_SIZE_4K_ENABLE, rtd->acp3x_base +
+ mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_1);
+
+ for (page_idx = 0; page_idx < rtd->num_pages; page_idx++) {
+ /* Load the low address of page int ACP SRAM through SRBM */
+ addr = page_to_phys(pg);
+ low = lower_32_bits(addr);
+ high = upper_32_bits(addr);
+
+ rv_writel(low, rtd->acp3x_base + mmACP_SCRATCH_REG_0 + val);
+ high |= BIT(31);
+ rv_writel(high, rtd->acp3x_base + mmACP_SCRATCH_REG_0 + val
+ + 4);
+ /* Move to next physically contiguos page */
+ val += 8;
+ pg++;
+ }
+
+ if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ /* Config ringbuffer */
+ rv_writel(MEM_WINDOW_START, rtd->acp3x_base +
+ mmACP_BT_TX_RINGBUFADDR);
+ rv_writel(MAX_BUFFER, rtd->acp3x_base +
+ mmACP_BT_TX_RINGBUFSIZE);
+ rv_writel(DMA_SIZE, rtd->acp3x_base + mmACP_BT_TX_DMA_SIZE);
+
+ /* Config audio fifo */
+ acp_fifo_addr = ACP_SRAM_PTE_OFFSET + (rtd->num_pages * 8)
+ + PLAYBACK_FIFO_ADDR_OFFSET;
+ rv_writel(acp_fifo_addr, rtd->acp3x_base +
+ mmACP_BT_TX_FIFOADDR);
+ rv_writel(FIFO_SIZE, rtd->acp3x_base + mmACP_BT_TX_FIFOSIZE);
+ } else {
+ /* Config ringbuffer */
+ rv_writel(MEM_WINDOW_START + MAX_BUFFER, rtd->acp3x_base +
+ mmACP_BT_RX_RINGBUFADDR);
+ rv_writel(MAX_BUFFER, rtd->acp3x_base +
+ mmACP_BT_RX_RINGBUFSIZE);
+ rv_writel(DMA_SIZE, rtd->acp3x_base + mmACP_BT_RX_DMA_SIZE);
+
+ /* Config audio fifo */
+ acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
+ (rtd->num_pages * 8) + CAPTURE_FIFO_ADDR_OFFSET;
+ rv_writel(acp_fifo_addr, rtd->acp3x_base +
+ mmACP_BT_RX_FIFOADDR);
+ rv_writel(FIFO_SIZE, rtd->acp3x_base + mmACP_BT_RX_FIFOSIZE);
+ }
+
+ /* Enable watermark/period interrupt to host */
+ rv_writel(BIT(BT_TX_THRESHOLD) | BIT(BT_RX_THRESHOLD),
+ rtd->acp3x_base + mmACP_EXTERNAL_INTR_CNTL);
+}
+
+static int acp3x_dma_open(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *prtd = substream->private_data;
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
+ DRV_NAME);
+ struct i2s_dev_data *adata = dev_get_drvdata(component->dev);
+
+ struct i2s_stream_instance *i2s_data = kzalloc(sizeof(struct i2s_stream_instance),
+ GFP_KERNEL);
+ if (!i2s_data)
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ runtime->hw = acp3x_pcm_hardware_playback;
+ else
+ runtime->hw = acp3x_pcm_hardware_capture;
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(component->dev, "set integer constraint failed\n");
+ kfree(i2s_data);
+ return ret;
+ }
+
+ if (!adata->play_stream && !adata->capture_stream)
+ rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ adata->play_stream = substream;
+ else
+ adata->capture_stream = substream;
+
+ i2s_data->acp3x_base = adata->acp3x_base;
+ runtime->private_data = i2s_data;
+ return 0;
+}
+
+static int acp3x_dma_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ int status;
+ u64 size;
+ struct page *pg;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct i2s_stream_instance *rtd = runtime->private_data;
+
+ if (!rtd)
+ return -EINVAL;
+
+ size = params_buffer_bytes(params);
+ status = snd_pcm_lib_malloc_pages(substream, size);
+ if (status < 0)
+ return status;
+
+ memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
+ pg = virt_to_page(substream->dma_buffer.area);
+ if (pg) {
+ rtd->pg = pg;
+ rtd->num_pages = (PAGE_ALIGN(size) >> PAGE_SHIFT);
+ config_acp3x_dma(rtd, substream->stream);
+ status = 0;
+ } else {
+ status = -ENOMEM;
+ }
+ return status;
+}
+
+static snd_pcm_uframes_t acp3x_dma_pointer(struct snd_pcm_substream *substream)
+{
+ u32 pos = 0;
+ struct i2s_stream_instance *rtd = substream->runtime->private_data;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ pos = rv_readl(rtd->acp3x_base +
+ mmACP_BT_TX_LINKPOSITIONCNTR);
+ else
+ pos = rv_readl(rtd->acp3x_base +
+ mmACP_BT_RX_LINKPOSITIONCNTR);
+
+ if (pos >= MAX_BUFFER)
+ pos = 0;
+
+ return bytes_to_frames(substream->runtime, pos);
+}
+
+static int acp3x_dma_new(struct snd_soc_pcm_runtime *rtd)
+{
+ return snd_pcm_lib_preallocate_pages_for_all(rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+ NULL, MIN_BUFFER,
+ MAX_BUFFER);
+}
+
+static int acp3x_dma_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_pages(substream);
+}
+
+static int acp3x_dma_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ return snd_pcm_lib_default_mmap(substream, vma);
+}
+
+static int acp3x_dma_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *prtd = substream->private_data;
+ struct i2s_stream_instance *rtd = substream->runtime->private_data;
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(prtd,
+ DRV_NAME);
+ struct i2s_dev_data *adata = dev_get_drvdata(component->dev);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ adata->play_stream = NULL;
+ else
+ adata->capture_stream = NULL;
+
+ /* Disable ACP irq, when the current stream is being closed and
+ * another stream is also not active.
+ */
+ if (!adata->play_stream && !adata->capture_stream)
+ rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+ kfree(rtd);
+ return 0;
+}
+
+static struct snd_pcm_ops acp3x_dma_ops = {
+ .open = acp3x_dma_open,
+ .close = acp3x_dma_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = acp3x_dma_hw_params,
+ .hw_free = acp3x_dma_hw_free,
+ .pointer = acp3x_dma_pointer,
+ .mmap = acp3x_dma_mmap,
+};
+
+
+static int acp3x_dai_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
+{
+
+ struct i2s_dev_data *adata = snd_soc_dai_get_drvdata(cpu_dai);
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ adata->tdm_mode = false;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ adata->tdm_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int acp3x_dai_set_tdm_slot(struct snd_soc_dai *cpu_dai, u32 tx_mask,
+ u32 rx_mask, int slots, int slot_width)
+{
+ u32 val = 0;
+ u16 slot_len;
+
+ struct i2s_dev_data *adata = snd_soc_dai_get_drvdata(cpu_dai);
+
+ switch (slot_width) {
+ case SLOT_WIDTH_8:
+ slot_len = 8;
+ break;
+ case SLOT_WIDTH_16:
+ slot_len = 16;
+ break;
+ case SLOT_WIDTH_24:
+ slot_len = 24;
+ break;
+ case SLOT_WIDTH_32:
+ slot_len = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = rv_readl(adata->acp3x_base + mmACP_BTTDM_ITER);
+ rv_writel((val | 0x2), adata->acp3x_base + mmACP_BTTDM_ITER);
+ val = rv_readl(adata->acp3x_base + mmACP_BTTDM_IRER);
+ rv_writel((val | 0x2), adata->acp3x_base + mmACP_BTTDM_IRER);
+
+ val = (FRM_LEN | (slots << 15) | (slot_len << 18));
+ rv_writel(val, adata->acp3x_base + mmACP_BTTDM_TXFRMT);
+ rv_writel(val, adata->acp3x_base + mmACP_BTTDM_RXFRMT);
+
+ adata->tdm_fmt = val;
+ return 0;
+}
+
+static int acp3x_dai_i2s_hwparams(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u32 val = 0;
+ struct i2s_stream_instance *rtd = substream->runtime->private_data;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_U8:
+ case SNDRV_PCM_FORMAT_S8:
+ rtd->xfer_resolution = 0x0;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ rtd->xfer_resolution = 0x02;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ rtd->xfer_resolution = 0x04;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ rtd->xfer_resolution = 0x05;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val = rv_readl(rtd->acp3x_base + mmACP_BTTDM_ITER);
+ val = val | (rtd->xfer_resolution << 3);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ rv_writel(val, rtd->acp3x_base + mmACP_BTTDM_ITER);
+ else
+ rv_writel(val, rtd->acp3x_base + mmACP_BTTDM_IRER);
+
+ return 0;
+}
+
+static int acp3x_dai_i2s_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ int ret = 0;
+ struct i2s_stream_instance *rtd = substream->runtime->private_data;
+ u32 val, period_bytes;
+
+ period_bytes = frames_to_bytes(substream->runtime,
+ substream->runtime->period_size);
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ rv_writel(period_bytes, rtd->acp3x_base +
+ mmACP_BT_TX_INTR_WATERMARK_SIZE);
+ val = rv_readl(rtd->acp3x_base + mmACP_BTTDM_ITER);
+ val = val | BIT(0);
+ rv_writel(val, rtd->acp3x_base + mmACP_BTTDM_ITER);
+ } else {
+ rv_writel(period_bytes, rtd->acp3x_base +
+ mmACP_BT_RX_INTR_WATERMARK_SIZE);
+ val = rv_readl(rtd->acp3x_base + mmACP_BTTDM_IRER);
+ val = val | BIT(0);
+ rv_writel(val, rtd->acp3x_base + mmACP_BTTDM_IRER);
+ }
+ rv_writel(1, rtd->acp3x_base + mmACP_BTTDM_IER);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ val = rv_readl(rtd->acp3x_base + mmACP_BTTDM_ITER);
+ val = val & ~BIT(0);
+ rv_writel(val, rtd->acp3x_base + mmACP_BTTDM_ITER);
+ } else {
+ val = rv_readl(rtd->acp3x_base + mmACP_BTTDM_IRER);
+ val = val & ~BIT(0);
+ rv_writel(val, rtd->acp3x_base + mmACP_BTTDM_IRER);
+ }
+ rv_writel(0, rtd->acp3x_base + mmACP_BTTDM_IER);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+struct snd_soc_dai_ops acp3x_dai_i2s_ops = {
+ .hw_params = acp3x_dai_i2s_hwparams,
+ .trigger = acp3x_dai_i2s_trigger,
+ .set_fmt = acp3x_dai_i2s_set_fmt,
+ .set_tdm_slot = acp3x_dai_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver acp3x_i2s_dai_driver = {
+ .playback = {
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+
+ .rate_min = 8000,
+ .rate_max = 96000,
+ },
+ .capture = {
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 |
+ SNDRV_PCM_FMTBIT_U8 |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ .channels_min = 2,
+ .channels_max = 2,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &acp3x_dai_i2s_ops,
+};
+
+static const struct snd_soc_component_driver acp3x_i2s_component = {
+ .name = DRV_NAME,
+ .ops = &acp3x_dma_ops,
+ .pcm_new = acp3x_dma_new,
+};
+
+static int acp3x_audio_probe(struct platform_device *pdev)
+{
+ int status;
+ struct resource *res;
+ struct i2s_dev_data *adata;
+ unsigned int irqflags;
+
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "platform_data not retrieved\n");
+ return -ENODEV;
+ }
+ irqflags = *((unsigned int *)(pdev->dev.platform_data));
+
+ adata = devm_kzalloc(&pdev->dev, sizeof(struct i2s_dev_data),
+ GFP_KERNEL);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
+ return -ENODEV;
+ }
+
+ adata->acp3x_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "IORESOURCE_IRQ FAILED\n");
+ return -ENODEV;
+ }
+
+ adata->i2s_irq = res->start;
+ adata->play_stream = NULL;
+ adata->capture_stream = NULL;
+
+ dev_set_drvdata(&pdev->dev, adata);
+ /* Initialize ACP */
+ status = acp3x_init(adata->acp3x_base);
+ if (status)
+ return -ENODEV;
+ status = devm_snd_soc_register_component(&pdev->dev,
+ &acp3x_i2s_component,
+ &acp3x_i2s_dai_driver, 1);
+ if (status) {
+ dev_err(&pdev->dev, "Fail to register acp i2s dai\n");
+ goto dev_err;
+ }
+ status = devm_request_irq(&pdev->dev, adata->i2s_irq, i2s_irq_handler,
+ irqflags, "ACP3x_I2S_IRQ", adata);
+ if (status) {
+ dev_err(&pdev->dev, "ACP3x I2S IRQ request failed\n");
+ goto dev_err;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 10000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ return 0;
+dev_err:
+ status = acp3x_deinit(adata->acp3x_base);
+ if (status)
+ dev_err(&pdev->dev, "ACP de-init failed\n");
+ else
+ dev_info(&pdev->dev, "ACP de-initialized\n");
+ /*ignore device status and return driver probe error*/
+ return -ENODEV;
+}
+
+static int acp3x_audio_remove(struct platform_device *pdev)
+{
+ int ret;
+ struct i2s_dev_data *adata = dev_get_drvdata(&pdev->dev);
+
+ ret = acp3x_deinit(adata->acp3x_base);
+ if (ret)
+ dev_err(&pdev->dev, "ACP de-init failed\n");
+ else
+ dev_info(&pdev->dev, "ACP de-initialized\n");
+
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int acp3x_resume(struct device *dev)
+{
+ int status;
+ u32 val;
+ struct i2s_dev_data *adata = dev_get_drvdata(dev);
+
+ status = acp3x_init(adata->acp3x_base);
+ if (status)
+ return -ENODEV;
+
+ if (adata->play_stream && adata->play_stream->runtime) {
+ struct i2s_stream_instance *rtd =
+ adata->play_stream->runtime->private_data;
+ config_acp3x_dma(rtd, SNDRV_PCM_STREAM_PLAYBACK);
+ rv_writel((rtd->xfer_resolution << 3),
+ rtd->acp3x_base + mmACP_BTTDM_ITER);
+ if (adata->tdm_mode == true) {
+ rv_writel(adata->tdm_fmt, adata->acp3x_base +
+ mmACP_BTTDM_TXFRMT);
+ val = rv_readl(adata->acp3x_base + mmACP_BTTDM_ITER);
+ rv_writel((val | 0x2), adata->acp3x_base +
+ mmACP_BTTDM_ITER);
+ }
+ }
+
+ if (adata->capture_stream && adata->capture_stream->runtime) {
+ struct i2s_stream_instance *rtd =
+ adata->capture_stream->runtime->private_data;
+ config_acp3x_dma(rtd, SNDRV_PCM_STREAM_CAPTURE);
+ rv_writel((rtd->xfer_resolution << 3),
+ rtd->acp3x_base + mmACP_BTTDM_IRER);
+ if (adata->tdm_mode == true) {
+ rv_writel(adata->tdm_fmt, adata->acp3x_base +
+ mmACP_BTTDM_RXFRMT);
+ val = rv_readl(adata->acp3x_base + mmACP_BTTDM_IRER);
+ rv_writel((val | 0x2), adata->acp3x_base +
+ mmACP_BTTDM_IRER);
+ }
+ }
+
+ rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+ return 0;
+}
+
+
+static int acp3x_pcm_runtime_suspend(struct device *dev)
+{
+ int status;
+ struct i2s_dev_data *adata = dev_get_drvdata(dev);
+
+ status = acp3x_deinit(adata->acp3x_base);
+ if (status)
+ dev_err(dev, "ACP de-init failed\n");
+ else
+ dev_info(dev, "ACP de-initialized\n");
+
+ rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+
+ return 0;
+}
+
+static int acp3x_pcm_runtime_resume(struct device *dev)
+{
+ int status;
+ struct i2s_dev_data *adata = dev_get_drvdata(dev);
+
+ status = acp3x_init(adata->acp3x_base);
+ if (status)
+ return -ENODEV;
+ rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
+ return 0;
+}
+
+static const struct dev_pm_ops acp3x_pm_ops = {
+ .runtime_suspend = acp3x_pcm_runtime_suspend,
+ .runtime_resume = acp3x_pcm_runtime_resume,
+ .resume = acp3x_resume,
+};
+
+static struct platform_driver acp3x_dma_driver = {
+ .probe = acp3x_audio_probe,
+ .remove = acp3x_audio_remove,
+ .driver = {
+ .name = "acp3x_rv_i2s",
+ .pm = &acp3x_pm_ops,
+ },
+};
+
+module_platform_driver(acp3x_dma_driver);
+
+MODULE_AUTHOR("Maruthi.Bayyavarapu@amd.com");
+MODULE_AUTHOR("Vijendar.Mukunda@amd.com");
+MODULE_DESCRIPTION("AMD ACP 3.x PCM Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/amd/raven/acp3x.h b/sound/soc/amd/raven/acp3x.h
new file mode 100644
index 000000000000..4f2cadd90a87
--- /dev/null
+++ b/sound/soc/amd/raven/acp3x.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ALSA SoC PCM Driver
+ *
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ */
+
+#include "chip_offset_byte.h"
+
+#define ACP3x_PHY_BASE_ADDRESS 0x1240000
+#define ACP3x_I2S_MODE 0
+#define ACP3x_REG_START 0x1240000
+#define ACP3x_REG_END 0x1250200
+#define I2S_MODE 0x04
+#define BT_TX_THRESHOLD 26
+#define BT_RX_THRESHOLD 25
+#define ACP3x_POWER_ON 0x00
+#define ACP3x_POWER_ON_IN_PROGRESS 0x01
+#define ACP3x_POWER_OFF 0x02
+#define ACP3x_POWER_OFF_IN_PROGRESS 0x03
+#define ACP3x_SOFT_RESET__SoftResetAudDone_MASK 0x00010001
+
+#define ACP_SRAM_PTE_OFFSET 0x02050000
+#define PAGE_SIZE_4K_ENABLE 0x2
+#define MEM_WINDOW_START 0x4000000
+#define PLAYBACK_FIFO_ADDR_OFFSET 0x400
+#define CAPTURE_FIFO_ADDR_OFFSET 0x500
+
+#define PLAYBACK_MIN_NUM_PERIODS 2
+#define PLAYBACK_MAX_NUM_PERIODS 8
+#define PLAYBACK_MAX_PERIOD_SIZE 16384
+#define PLAYBACK_MIN_PERIOD_SIZE 4096
+#define CAPTURE_MIN_NUM_PERIODS 2
+#define CAPTURE_MAX_NUM_PERIODS 8
+#define CAPTURE_MAX_PERIOD_SIZE 16384
+#define CAPTURE_MIN_PERIOD_SIZE 4096
+
+#define MAX_BUFFER (PLAYBACK_MAX_PERIOD_SIZE * PLAYBACK_MAX_NUM_PERIODS)
+#define MIN_BUFFER MAX_BUFFER
+#define FIFO_SIZE 0x100
+#define DMA_SIZE 0x40
+#define FRM_LEN 0x100
+
+#define SLOT_WIDTH_8 0x08
+#define SLOT_WIDTH_16 0x10
+#define SLOT_WIDTH_24 0x18
+#define SLOT_WIDTH_32 0x20
+
+
+static inline u32 rv_readl(void __iomem *base_addr)
+{
+ return readl(base_addr - ACP3x_PHY_BASE_ADDRESS);
+}
+
+static inline void rv_writel(u32 val, void __iomem *base_addr)
+{
+ writel(val, base_addr - ACP3x_PHY_BASE_ADDRESS);
+}
diff --git a/sound/soc/amd/raven/chip_offset_byte.h b/sound/soc/amd/raven/chip_offset_byte.h
new file mode 100644
index 000000000000..9c1fac58fb2a
--- /dev/null
+++ b/sound/soc/amd/raven/chip_offset_byte.h
@@ -0,0 +1,639 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * AMD ACP 3.0 Register Documentation
+ *
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _acp_ip_OFFSET_HEADER
+#define _acp_ip_OFFSET_HEADER
+// Registers from ACP_DMA block
+
+#define mmACP_DMA_CNTL_0 0x1240000
+#define mmACP_DMA_CNTL_1 0x1240004
+#define mmACP_DMA_CNTL_2 0x1240008
+#define mmACP_DMA_CNTL_3 0x124000C
+#define mmACP_DMA_CNTL_4 0x1240010
+#define mmACP_DMA_CNTL_5 0x1240014
+#define mmACP_DMA_CNTL_6 0x1240018
+#define mmACP_DMA_CNTL_7 0x124001C
+#define mmACP_DMA_DSCR_STRT_IDX_0 0x1240020
+#define mmACP_DMA_DSCR_STRT_IDX_1 0x1240024
+#define mmACP_DMA_DSCR_STRT_IDX_2 0x1240028
+#define mmACP_DMA_DSCR_STRT_IDX_3 0x124002C
+#define mmACP_DMA_DSCR_STRT_IDX_4 0x1240030
+#define mmACP_DMA_DSCR_STRT_IDX_5 0x1240034
+#define mmACP_DMA_DSCR_STRT_IDX_6 0x1240038
+#define mmACP_DMA_DSCR_STRT_IDX_7 0x124003C
+#define mmACP_DMA_DSCR_CNT_0 0x1240040
+#define mmACP_DMA_DSCR_CNT_1 0x1240044
+#define mmACP_DMA_DSCR_CNT_2 0x1240048
+#define mmACP_DMA_DSCR_CNT_3 0x124004C
+#define mmACP_DMA_DSCR_CNT_4 0x1240050
+#define mmACP_DMA_DSCR_CNT_5 0x1240054
+#define mmACP_DMA_DSCR_CNT_6 0x1240058
+#define mmACP_DMA_DSCR_CNT_7 0x124005C
+#define mmACP_DMA_PRIO_0 0x1240060
+#define mmACP_DMA_PRIO_1 0x1240064
+#define mmACP_DMA_PRIO_2 0x1240068
+#define mmACP_DMA_PRIO_3 0x124006C
+#define mmACP_DMA_PRIO_4 0x1240070
+#define mmACP_DMA_PRIO_5 0x1240074
+#define mmACP_DMA_PRIO_6 0x1240078
+#define mmACP_DMA_PRIO_7 0x124007C
+#define mmACP_DMA_CUR_DSCR_0 0x1240080
+#define mmACP_DMA_CUR_DSCR_1 0x1240084
+#define mmACP_DMA_CUR_DSCR_2 0x1240088
+#define mmACP_DMA_CUR_DSCR_3 0x124008C
+#define mmACP_DMA_CUR_DSCR_4 0x1240090
+#define mmACP_DMA_CUR_DSCR_5 0x1240094
+#define mmACP_DMA_CUR_DSCR_6 0x1240098
+#define mmACP_DMA_CUR_DSCR_7 0x124009C
+#define mmACP_DMA_CUR_TRANS_CNT_0 0x12400A0
+#define mmACP_DMA_CUR_TRANS_CNT_1 0x12400A4
+#define mmACP_DMA_CUR_TRANS_CNT_2 0x12400A8
+#define mmACP_DMA_CUR_TRANS_CNT_3 0x12400AC
+#define mmACP_DMA_CUR_TRANS_CNT_4 0x12400B0
+#define mmACP_DMA_CUR_TRANS_CNT_5 0x12400B4
+#define mmACP_DMA_CUR_TRANS_CNT_6 0x12400B8
+#define mmACP_DMA_CUR_TRANS_CNT_7 0x12400BC
+#define mmACP_DMA_ERR_STS_0 0x12400C0
+#define mmACP_DMA_ERR_STS_1 0x12400C4
+#define mmACP_DMA_ERR_STS_2 0x12400C8
+#define mmACP_DMA_ERR_STS_3 0x12400CC
+#define mmACP_DMA_ERR_STS_4 0x12400D0
+#define mmACP_DMA_ERR_STS_5 0x12400D4
+#define mmACP_DMA_ERR_STS_6 0x12400D8
+#define mmACP_DMA_ERR_STS_7 0x12400DC
+#define mmACP_DMA_DESC_BASE_ADDR 0x12400E0
+#define mmACP_DMA_DESC_MAX_NUM_DSCR 0x12400E4
+#define mmACP_DMA_CH_STS 0x12400E8
+#define mmACP_DMA_CH_GROUP 0x12400EC
+#define mmACP_DMA_CH_RST_STS 0x12400F0
+
+
+// Registers from ACP_AXI2AXIATU block
+
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_1 0x1240C00
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_1 0x1240C04
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_2 0x1240C08
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_2 0x1240C0C
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_3 0x1240C10
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_3 0x1240C14
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_4 0x1240C18
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_4 0x1240C1C
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_5 0x1240C20
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_5 0x1240C24
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_6 0x1240C28
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_6 0x1240C2C
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_7 0x1240C30
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_7 0x1240C34
+#define mmACPAXI2AXI_ATU_PAGE_SIZE_GRP_8 0x1240C38
+#define mmACPAXI2AXI_ATU_BASE_ADDR_GRP_8 0x1240C3C
+#define mmACPAXI2AXI_ATU_CTRL 0x1240C40
+
+
+// Registers from ACP_CLKRST block
+
+#define mmACP_SOFT_RESET 0x1241000
+#define mmACP_CONTROL 0x1241004
+#define mmACP_STATUS 0x1241008
+#define mmACP_DSP0_OCD_HALT_ON_RST 0x124100C
+#define mmACP_DYNAMIC_CG_MASTER_CONTROL 0x1241010
+
+
+// Registers from ACP_MISC block
+
+#define mmACP_EXTERNAL_INTR_ENB 0x1241800
+#define mmACP_EXTERNAL_INTR_CNTL 0x1241804
+#define mmACP_EXTERNAL_INTR_STAT 0x1241808
+#define mmACP_DSP0_INTR_CNTL 0x124180C
+#define mmACP_DSP0_INTR_STAT 0x1241810
+#define mmACP_DSP_SW_INTR_CNTL 0x1241814
+#define mmACP_DSP_SW_INTR_STAT 0x1241818
+#define mmACP_SW_INTR_TRIG 0x124181C
+#define mmACP_SMU_MAILBOX 0x1241820
+#define mmDSP_INTERRUPT_ROUTING_CTRL 0x1241824
+#define mmACP_DSP0_WATCHDOG_TIMER_CNTL 0x1241828
+#define mmACP_DSP0_EXT_TIMER1_CNTL 0x124182C
+#define mmACP_DSP0_EXT_TIMER2_CNTL 0x1241830
+#define mmACP_DSP0_EXT_TIMER3_CNTL 0x1241834
+#define mmACP_DSP0_EXT_TIMER4_CNTL 0x1241838
+#define mmACP_DSP0_EXT_TIMER5_CNTL 0x124183C
+#define mmACP_DSP0_EXT_TIMER6_CNTL 0x1241840
+#define mmACP_DSP0_EXT_TIMER1_CURR_VALUE 0x1241844
+#define mmACP_DSP0_EXT_TIMER2_CURR_VALUE 0x1241848
+#define mmACP_DSP0_EXT_TIMER3_CURR_VALUE 0x124184C
+#define mmACP_DSP0_EXT_TIMER4_CURR_VALUE 0x1241850
+#define mmACP_DSP0_EXT_TIMER5_CURR_VALUE 0x1241854
+#define mmACP_DSP0_EXT_TIMER6_CURR_VALUE 0x1241858
+#define mmACP_FW_STATUS 0x124185C
+#define mmACP_TIMER 0x1241874
+#define mmACP_TIMER_CNTL 0x1241878
+#define mmACP_PGMEM_CTRL 0x12418C0
+#define mmACP_ERROR_STATUS 0x12418C4
+#define mmACP_SW_I2S_ERROR_REASON 0x12418C8
+#define mmACP_MEM_PG_STS 0x12418CC
+
+
+// Registers from ACP_PGFSM block
+
+#define mmACP_I2S_PIN_CONFIG 0x1241400
+#define mmACP_PAD_PULLUP_PULLDOWN_CTRL 0x1241404
+#define mmACP_PAD_DRIVE_STRENGTH_CTRL 0x1241408
+#define mmACP_SW_PAD_KEEPER_EN 0x124140C
+#define mmACP_SW_WAKE_EN 0x1241410
+#define mmACP_I2S_WAKE_EN 0x1241414
+#define mmACP_PME_EN 0x1241418
+#define mmACP_PGFSM_CONTROL 0x124141C
+#define mmACP_PGFSM_STATUS 0x1241420
+
+
+// Registers from ACP_SCRATCH block
+
+#define mmACP_SCRATCH_REG_0 0x1250000
+#define mmACP_SCRATCH_REG_1 0x1250004
+#define mmACP_SCRATCH_REG_2 0x1250008
+#define mmACP_SCRATCH_REG_3 0x125000C
+#define mmACP_SCRATCH_REG_4 0x1250010
+#define mmACP_SCRATCH_REG_5 0x1250014
+#define mmACP_SCRATCH_REG_6 0x1250018
+#define mmACP_SCRATCH_REG_7 0x125001C
+#define mmACP_SCRATCH_REG_8 0x1250020
+#define mmACP_SCRATCH_REG_9 0x1250024
+#define mmACP_SCRATCH_REG_10 0x1250028
+#define mmACP_SCRATCH_REG_11 0x125002C
+#define mmACP_SCRATCH_REG_12 0x1250030
+#define mmACP_SCRATCH_REG_13 0x1250034
+#define mmACP_SCRATCH_REG_14 0x1250038
+#define mmACP_SCRATCH_REG_15 0x125003C
+#define mmACP_SCRATCH_REG_16 0x1250040
+#define mmACP_SCRATCH_REG_17 0x1250044
+#define mmACP_SCRATCH_REG_18 0x1250048
+#define mmACP_SCRATCH_REG_19 0x125004C
+#define mmACP_SCRATCH_REG_20 0x1250050
+#define mmACP_SCRATCH_REG_21 0x1250054
+#define mmACP_SCRATCH_REG_22 0x1250058
+#define mmACP_SCRATCH_REG_23 0x125005C
+#define mmACP_SCRATCH_REG_24 0x1250060
+#define mmACP_SCRATCH_REG_25 0x1250064
+#define mmACP_SCRATCH_REG_26 0x1250068
+#define mmACP_SCRATCH_REG_27 0x125006C
+#define mmACP_SCRATCH_REG_28 0x1250070
+#define mmACP_SCRATCH_REG_29 0x1250074
+#define mmACP_SCRATCH_REG_30 0x1250078
+#define mmACP_SCRATCH_REG_31 0x125007C
+#define mmACP_SCRATCH_REG_32 0x1250080
+#define mmACP_SCRATCH_REG_33 0x1250084
+#define mmACP_SCRATCH_REG_34 0x1250088
+#define mmACP_SCRATCH_REG_35 0x125008C
+#define mmACP_SCRATCH_REG_36 0x1250090
+#define mmACP_SCRATCH_REG_37 0x1250094
+#define mmACP_SCRATCH_REG_38 0x1250098
+#define mmACP_SCRATCH_REG_39 0x125009C
+#define mmACP_SCRATCH_REG_40 0x12500A0
+#define mmACP_SCRATCH_REG_41 0x12500A4
+#define mmACP_SCRATCH_REG_42 0x12500A8
+#define mmACP_SCRATCH_REG_43 0x12500AC
+#define mmACP_SCRATCH_REG_44 0x12500B0
+#define mmACP_SCRATCH_REG_45 0x12500B4
+#define mmACP_SCRATCH_REG_46 0x12500B8
+#define mmACP_SCRATCH_REG_47 0x12500BC
+#define mmACP_SCRATCH_REG_48 0x12500C0
+#define mmACP_SCRATCH_REG_49 0x12500C4
+#define mmACP_SCRATCH_REG_50 0x12500C8
+#define mmACP_SCRATCH_REG_51 0x12500CC
+#define mmACP_SCRATCH_REG_52 0x12500D0
+#define mmACP_SCRATCH_REG_53 0x12500D4
+#define mmACP_SCRATCH_REG_54 0x12500D8
+#define mmACP_SCRATCH_REG_55 0x12500DC
+#define mmACP_SCRATCH_REG_56 0x12500E0
+#define mmACP_SCRATCH_REG_57 0x12500E4
+#define mmACP_SCRATCH_REG_58 0x12500E8
+#define mmACP_SCRATCH_REG_59 0x12500EC
+#define mmACP_SCRATCH_REG_60 0x12500F0
+#define mmACP_SCRATCH_REG_61 0x12500F4
+#define mmACP_SCRATCH_REG_62 0x12500F8
+#define mmACP_SCRATCH_REG_63 0x12500FC
+#define mmACP_SCRATCH_REG_64 0x1250100
+#define mmACP_SCRATCH_REG_65 0x1250104
+#define mmACP_SCRATCH_REG_66 0x1250108
+#define mmACP_SCRATCH_REG_67 0x125010C
+#define mmACP_SCRATCH_REG_68 0x1250110
+#define mmACP_SCRATCH_REG_69 0x1250114
+#define mmACP_SCRATCH_REG_70 0x1250118
+#define mmACP_SCRATCH_REG_71 0x125011C
+#define mmACP_SCRATCH_REG_72 0x1250120
+#define mmACP_SCRATCH_REG_73 0x1250124
+#define mmACP_SCRATCH_REG_74 0x1250128
+#define mmACP_SCRATCH_REG_75 0x125012C
+#define mmACP_SCRATCH_REG_76 0x1250130
+#define mmACP_SCRATCH_REG_77 0x1250134
+#define mmACP_SCRATCH_REG_78 0x1250138
+#define mmACP_SCRATCH_REG_79 0x125013C
+#define mmACP_SCRATCH_REG_80 0x1250140
+#define mmACP_SCRATCH_REG_81 0x1250144
+#define mmACP_SCRATCH_REG_82 0x1250148
+#define mmACP_SCRATCH_REG_83 0x125014C
+#define mmACP_SCRATCH_REG_84 0x1250150
+#define mmACP_SCRATCH_REG_85 0x1250154
+#define mmACP_SCRATCH_REG_86 0x1250158
+#define mmACP_SCRATCH_REG_87 0x125015C
+#define mmACP_SCRATCH_REG_88 0x1250160
+#define mmACP_SCRATCH_REG_89 0x1250164
+#define mmACP_SCRATCH_REG_90 0x1250168
+#define mmACP_SCRATCH_REG_91 0x125016C
+#define mmACP_SCRATCH_REG_92 0x1250170
+#define mmACP_SCRATCH_REG_93 0x1250174
+#define mmACP_SCRATCH_REG_94 0x1250178
+#define mmACP_SCRATCH_REG_95 0x125017C
+#define mmACP_SCRATCH_REG_96 0x1250180
+#define mmACP_SCRATCH_REG_97 0x1250184
+#define mmACP_SCRATCH_REG_98 0x1250188
+#define mmACP_SCRATCH_REG_99 0x125018C
+#define mmACP_SCRATCH_REG_100 0x1250190
+#define mmACP_SCRATCH_REG_101 0x1250194
+#define mmACP_SCRATCH_REG_102 0x1250198
+#define mmACP_SCRATCH_REG_103 0x125019C
+#define mmACP_SCRATCH_REG_104 0x12501A0
+#define mmACP_SCRATCH_REG_105 0x12501A4
+#define mmACP_SCRATCH_REG_106 0x12501A8
+#define mmACP_SCRATCH_REG_107 0x12501AC
+#define mmACP_SCRATCH_REG_108 0x12501B0
+#define mmACP_SCRATCH_REG_109 0x12501B4
+#define mmACP_SCRATCH_REG_110 0x12501B8
+#define mmACP_SCRATCH_REG_111 0x12501BC
+#define mmACP_SCRATCH_REG_112 0x12501C0
+#define mmACP_SCRATCH_REG_113 0x12501C4
+#define mmACP_SCRATCH_REG_114 0x12501C8
+#define mmACP_SCRATCH_REG_115 0x12501CC
+#define mmACP_SCRATCH_REG_116 0x12501D0
+#define mmACP_SCRATCH_REG_117 0x12501D4
+#define mmACP_SCRATCH_REG_118 0x12501D8
+#define mmACP_SCRATCH_REG_119 0x12501DC
+#define mmACP_SCRATCH_REG_120 0x12501E0
+#define mmACP_SCRATCH_REG_121 0x12501E4
+#define mmACP_SCRATCH_REG_122 0x12501E8
+#define mmACP_SCRATCH_REG_123 0x12501EC
+#define mmACP_SCRATCH_REG_124 0x12501F0
+#define mmACP_SCRATCH_REG_125 0x12501F4
+#define mmACP_SCRATCH_REG_126 0x12501F8
+#define mmACP_SCRATCH_REG_127 0x12501FC
+#define mmACP_SCRATCH_REG_128 0x1250200
+
+
+// Registers from ACP_SW_ACLK block
+
+#define mmSW_CORB_Base_Address 0x1243200
+#define mmSW_CORB_Write_Pointer 0x1243204
+#define mmSW_CORB_Read_Pointer 0x1243208
+#define mmSW_CORB_Control 0x124320C
+#define mmSW_CORB_Size 0x1243214
+#define mmSW_RIRB_Base_Address 0x1243218
+#define mmSW_RIRB_Write_Pointer 0x124321C
+#define mmSW_RIRB_Response_Interrupt_Count 0x1243220
+#define mmSW_RIRB_Control 0x1243224
+#define mmSW_RIRB_Size 0x1243228
+#define mmSW_RIRB_FIFO_MIN_THDL 0x124322C
+#define mmSW_imm_cmd_UPPER_WORD 0x1243230
+#define mmSW_imm_cmd_LOWER_QWORD 0x1243234
+#define mmSW_imm_resp_UPPER_WORD 0x1243238
+#define mmSW_imm_resp_LOWER_QWORD 0x124323C
+#define mmSW_imm_cmd_sts 0x1243240
+#define mmSW_BRA_BASE_ADDRESS 0x1243244
+#define mmSW_BRA_TRANSFER_SIZE 0x1243248
+#define mmSW_BRA_DMA_BUSY 0x124324C
+#define mmSW_BRA_RESP 0x1243250
+#define mmSW_BRA_RESP_FRAME_ADDR 0x1243254
+#define mmSW_BRA_CURRENT_TRANSFER_SIZE 0x1243258
+#define mmSW_STATE_CHANGE_STATUS_0TO7 0x124325C
+#define mmSW_STATE_CHANGE_STATUS_8TO11 0x1243260
+#define mmSW_STATE_CHANGE_STATUS_MASK_0to7 0x1243264
+#define mmSW_STATE_CHANGE_STATUS_MASK_8to11 0x1243268
+#define mmSW_CLK_FREQUENCY_CTRL 0x124326C
+#define mmSW_ERROR_INTR_MASK 0x1243270
+#define mmSW_PHY_TEST_MODE_DATA_OFF 0x1243274
+
+
+// Registers from ACP_SW_SWCLK block
+
+#define mmACP_SW_EN 0x1243000
+#define mmACP_SW_EN_STATUS 0x1243004
+#define mmACP_SW_FRAMESIZE 0x1243008
+#define mmACP_SW_SSP_Counter 0x124300C
+#define mmACP_SW_Audio_TX_EN 0x1243010
+#define mmACP_SW_Audio_TX_EN_STATUS 0x1243014
+#define mmACP_SW_Audio_TX_Frame_Format 0x1243018
+#define mmACP_SW_Audio_TX_SampleInterval 0x124301C
+#define mmACP_SW_Audio_TX_Hctrl_DP0 0x1243020
+#define mmACP_SW_Audio_TX_Hctrl_DP1 0x1243024
+#define mmACP_SW_Audio_TX_Hctrl_DP2 0x1243028
+#define mmACP_SW_Audio_TX_Hctrl_DP3 0x124302C
+#define mmACP_SW_Audio_TX_offset_DP0 0x1243030
+#define mmACP_SW_Audio_TX_offset_DP1 0x1243034
+#define mmACP_SW_Audio_TX_offset_DP2 0x1243038
+#define mmACP_SW_Audio_TX_offset_DP3 0x124303C
+#define mmACP_SW_Audio_TX_Channel_Enable_DP0 0x1243040
+#define mmACP_SW_Audio_TX_Channel_Enable_DP1 0x1243044
+#define mmACP_SW_Audio_TX_Channel_Enable_DP2 0x1243048
+#define mmACP_SW_Audio_TX_Channel_Enable_DP3 0x124304C
+#define mmACP_SW_BT_TX_EN 0x1243050
+#define mmACP_SW_BT_TX_EN_STATUS 0x1243054
+#define mmACP_SW_BT_TX_Frame_Format 0x1243058
+#define mmACP_SW_BT_TX_SampleInterval 0x124305C
+#define mmACP_SW_BT_TX_Hctrl 0x1243060
+#define mmACP_SW_BT_TX_offset 0x1243064
+#define mmACP_SW_BT_TX_Channel_Enable_DP0 0x1243068
+#define mmACP_SW_Headset_TX_EN 0x124306C
+#define mmACP_SW_Headset_TX_EN_STATUS 0x1243070
+#define mmACP_SW_Headset_TX_Frame_Format 0x1243074
+#define mmACP_SW_Headset_TX_SampleInterval 0x1243078
+#define mmACP_SW_Headset_TX_Hctrl 0x124307C
+#define mmACP_SW_Headset_TX_offset 0x1243080
+#define mmACP_SW_Headset_TX_Channel_Enable_DP0 0x1243084
+#define mmACP_SW_Audio_RX_EN 0x1243088
+#define mmACP_SW_Audio_RX_EN_STATUS 0x124308C
+#define mmACP_SW_Audio_RX_Frame_Format 0x1243090
+#define mmACP_SW_Audio_RX_SampleInterval 0x1243094
+#define mmACP_SW_Audio_RX_Hctrl_DP0 0x1243098
+#define mmACP_SW_Audio_RX_Hctrl_DP1 0x124309C
+#define mmACP_SW_Audio_RX_Hctrl_DP2 0x1243100
+#define mmACP_SW_Audio_RX_Hctrl_DP3 0x1243104
+#define mmACP_SW_Audio_RX_offset_DP0 0x1243108
+#define mmACP_SW_Audio_RX_offset_DP1 0x124310C
+#define mmACP_SW_Audio_RX_offset_DP2 0x1243110
+#define mmACP_SW_Audio_RX_offset_DP3 0x1243114
+#define mmACP_SW_Audio_RX_Channel_Enable_DP0 0x1243118
+#define mmACP_SW_Audio_RX_Channel_Enable_DP1 0x124311C
+#define mmACP_SW_Audio_RX_Channel_Enable_DP2 0x1243120
+#define mmACP_SW_Audio_RX_Channel_Enable_DP3 0x1243124
+#define mmACP_SW_BT_RX_EN 0x1243128
+#define mmACP_SW_BT_RX_EN_STATUS 0x124312C
+#define mmACP_SW_BT_RX_Frame_Format 0x1243130
+#define mmACP_SW_BT_RX_SampleInterval 0x1243134
+#define mmACP_SW_BT_RX_Hctrl 0x1243138
+#define mmACP_SW_BT_RX_offset 0x124313C
+#define mmACP_SW_BT_RX_Channel_Enable_DP0 0x1243140
+#define mmACP_SW_Headset_RX_EN 0x1243144
+#define mmACP_SW_Headset_RX_EN_STATUS 0x1243148
+#define mmACP_SW_Headset_RX_Frame_Format 0x124314C
+#define mmACP_SW_Headset_RX_SampleInterval 0x1243150
+#define mmACP_SW_Headset_RX_Hctrl 0x1243154
+#define mmACP_SW_Headset_RX_offset 0x1243158
+#define mmACP_SW_Headset_RX_Channel_Enable_DP0 0x124315C
+#define mmACP_SW_BPT_PORT_EN 0x1243160
+#define mmACP_SW_BPT_PORT_EN_STATUS 0x1243164
+#define mmACP_SW_BPT_PORT_Frame_Format 0x1243168
+#define mmACP_SW_BPT_PORT_SampleInterval 0x124316C
+#define mmACP_SW_BPT_PORT_Hctrl 0x1243170
+#define mmACP_SW_BPT_PORT_offset 0x1243174
+#define mmACP_SW_BPT_PORT_Channel_Enable 0x1243178
+#define mmACP_SW_BPT_PORT_First_byte_addr 0x124317C
+#define mmACP_SW_CLK_RESUME_CTRL 0x1243180
+#define mmACP_SW_CLK_RESUME_Delay_Cntr 0x1243184
+#define mmACP_SW_BUS_RESET_CTRL 0x1243188
+#define mmACP_SW_PRBS_ERR_STATUS 0x124318C
+
+
+// Registers from ACP_AUDIO_BUFFERS block
+
+#define mmACP_I2S_RX_RINGBUFADDR 0x1242000
+#define mmACP_I2S_RX_RINGBUFSIZE 0x1242004
+#define mmACP_I2S_RX_LINKPOSITIONCNTR 0x1242008
+#define mmACP_I2S_RX_FIFOADDR 0x124200C
+#define mmACP_I2S_RX_FIFOSIZE 0x1242010
+#define mmACP_I2S_RX_DMA_SIZE 0x1242014
+#define mmACP_I2S_RX_LINEARPOSITIONCNTR_HIGH 0x1242018
+#define mmACP_I2S_RX_LINEARPOSITIONCNTR_LOW 0x124201C
+#define mmACP_I2S_RX_INTR_WATERMARK_SIZE 0x1242020
+#define mmACP_I2S_TX_RINGBUFADDR 0x1242024
+#define mmACP_I2S_TX_RINGBUFSIZE 0x1242028
+#define mmACP_I2S_TX_LINKPOSITIONCNTR 0x124202C
+#define mmACP_I2S_TX_FIFOADDR 0x1242030
+#define mmACP_I2S_TX_FIFOSIZE 0x1242034
+#define mmACP_I2S_TX_DMA_SIZE 0x1242038
+#define mmACP_I2S_TX_LINEARPOSITIONCNTR_HIGH 0x124203C
+#define mmACP_I2S_TX_LINEARPOSITIONCNTR_LOW 0x1242040
+#define mmACP_I2S_TX_INTR_WATERMARK_SIZE 0x1242044
+#define mmACP_BT_RX_RINGBUFADDR 0x1242048
+#define mmACP_BT_RX_RINGBUFSIZE 0x124204C
+#define mmACP_BT_RX_LINKPOSITIONCNTR 0x1242050
+#define mmACP_BT_RX_FIFOADDR 0x1242054
+#define mmACP_BT_RX_FIFOSIZE 0x1242058
+#define mmACP_BT_RX_DMA_SIZE 0x124205C
+#define mmACP_BT_RX_LINEARPOSITIONCNTR_HIGH 0x1242060
+#define mmACP_BT_RX_LINEARPOSITIONCNTR_LOW 0x1242064
+#define mmACP_BT_RX_INTR_WATERMARK_SIZE 0x1242068
+#define mmACP_BT_TX_RINGBUFADDR 0x124206C
+#define mmACP_BT_TX_RINGBUFSIZE 0x1242070
+#define mmACP_BT_TX_LINKPOSITIONCNTR 0x1242074
+#define mmACP_BT_TX_FIFOADDR 0x1242078
+#define mmACP_BT_TX_FIFOSIZE 0x124207C
+#define mmACP_BT_TX_DMA_SIZE 0x1242080
+#define mmACP_BT_TX_LINEARPOSITIONCNTR_HIGH 0x1242084
+#define mmACP_BT_TX_LINEARPOSITIONCNTR_LOW 0x1242088
+#define mmACP_BT_TX_INTR_WATERMARK_SIZE 0x124208C
+#define mmACP_HS_RX_RINGBUFADDR 0x1242090
+#define mmACP_HS_RX_RINGBUFSIZE 0x1242094
+#define mmACP_HS_RX_LINKPOSITIONCNTR 0x1242098
+#define mmACP_HS_RX_FIFOADDR 0x124209C
+#define mmACP_HS_RX_FIFOSIZE 0x12420A0
+#define mmACP_HS_RX_DMA_SIZE 0x12420A4
+#define mmACP_HS_RX_LINEARPOSITIONCNTR_HIGH 0x12420A8
+#define mmACP_HS_RX_LINEARPOSITIONCNTR_LOW 0x12420AC
+#define mmACP_HS_RX_INTR_WATERMARK_SIZE 0x12420B0
+#define mmACP_HS_TX_RINGBUFADDR 0x12420B4
+#define mmACP_HS_TX_RINGBUFSIZE 0x12420B8
+#define mmACP_HS_TX_LINKPOSITIONCNTR 0x12420BC
+#define mmACP_HS_TX_FIFOADDR 0x12420C0
+#define mmACP_HS_TX_FIFOSIZE 0x12420C4
+#define mmACP_HS_TX_DMA_SIZE 0x12420C8
+#define mmACP_HS_TX_LINEARPOSITIONCNTR_HIGH 0x12420CC
+#define mmACP_HS_TX_LINEARPOSITIONCNTR_LOW 0x12420D0
+#define mmACP_HS_TX_INTR_WATERMARK_SIZE 0x12420D4
+
+
+// Registers from ACP_I2S_TDM block
+
+#define mmACP_I2STDM_IER 0x1242400
+#define mmACP_I2STDM_IRER 0x1242404
+#define mmACP_I2STDM_RXFRMT 0x1242408
+#define mmACP_I2STDM_ITER 0x124240C
+#define mmACP_I2STDM_TXFRMT 0x1242410
+
+
+// Registers from ACP_BT_TDM block
+
+#define mmACP_BTTDM_IER 0x1242800
+#define mmACP_BTTDM_IRER 0x1242804
+#define mmACP_BTTDM_RXFRMT 0x1242808
+#define mmACP_BTTDM_ITER 0x124280C
+#define mmACP_BTTDM_TXFRMT 0x1242810
+
+
+// Registers from AZALIA_IP block
+
+#define mmAudio_Az_Global_Capabilities 0x1200000
+#define mmAudio_Az_Minor_Version 0x1200002
+#define mmAudio_Az_Major_Version 0x1200003
+#define mmAudio_Az_Output_Payload_Capability 0x1200004
+#define mmAudio_Az_Input_Payload_Capability 0x1200006
+#define mmAudio_Az_Global_Control 0x1200008
+#define mmAudio_Az_Wake_Enable 0x120000C
+#define mmAudio_Az_State_Change_Status 0x120000E
+#define mmAudio_Az_Global_Status 0x1200010
+#define mmAudio_Az_Linked_List_Capability_Header 0x1200014
+#define mmAudio_Az_Output_Stream_Payload_Capability 0x1200018
+#define mmAudio_Az_Input_Stream_Payload_Capability 0x120001A
+#define mmAudio_Az_Interrupt_Control 0x1200020
+#define mmAudio_Az_Interrupt_Status 0x1200024
+#define mmAudio_Az_Wall_Clock_Counter 0x1200030
+#define mmAudio_Az_Stream_Synchronization 0x1200038
+#define mmAudio_Az_CORB_Lower_Base_Address 0x1200040
+#define mmAudio_Az_CORB_Upper_Base_Address 0x1200044
+#define mmAudio_Az_CORB_Write_Pointer 0x1200048
+#define mmAudio_Az_CORB_Read_Pointer 0x120004A
+#define mmAudio_Az_CORB_Control 0x120004C
+#define mmAudio_Az_CORB_Status 0x120004D
+#define mmAudio_Az_CORB_Size 0x120004E
+#define mmAudio_Az_RIRB_Lower_Base_Address 0x1200050
+#define mmAudio_Az_RIRB_Upper_Base_Address 0x1200054
+#define mmAudio_Az_RIRB_Write_Pointer 0x1200058
+#define mmAudio_Az_RIRB_Response_Interrupt_Count 0x120005A
+#define mmAudio_Az_RIRB_Control 0x120005C
+#define mmAudio_Az_RIRB_Status 0x120005D
+#define mmAudio_Az_RIRB_Size 0x120005E
+#define mmAudio_Az_Immediate_Command_Output_Interface 0x1200060
+#define mmAudio_Az_Immediate_Response_Input_Interface 0x1200064
+#define mmAudio_Az_Immediate_Command_Status 0x1200068
+#define mmAudio_Az_DPLBASE 0x1200070
+#define mmAudio_Az_DPUBASE 0x1200074
+#define mmAudio_Az_Input_SD0CTL_and_STS 0x1200080
+#define mmAudio_Az_Input_SD0LPIB 0x1200084
+#define mmAudio_Az_Input_SD0CBL 0x1200088
+#define mmAudio_Az_Input_SD0LVI 0x120008C
+#define mmAudio_Az_Input_SD0FIFOS 0x1200090
+#define mmAudio_Az_Input_SD0FMT 0x1200092
+#define mmAudio_Az_Input_SD0BDPL 0x1200098
+#define mmAudio_Az_Input_SD0BDPU 0x120009C
+#define mmAudio_Az_Input_SD1CTL_and_STS 0x12000A0
+#define mmAudio_Az_Input_SD1LPIB 0x12000A4
+#define mmAudio_Az_Input_SD1CBL 0x12000A8
+#define mmAudio_Az_Input_SD1LVI 0x12000AC
+#define mmAudio_Az_Input_SD1FIFOS 0x12000B0
+#define mmAudio_Az_Input_SD1FMT 0x12000B2
+#define mmAudio_Az_Input_SD1BDPL 0x12000B8
+#define mmAudio_Az_Input_SD1BDPU 0x12000BC
+#define mmAudio_Az_Input_SD2CTL_and_STS 0x12000C0
+#define mmAudio_Az_Input_SD2LPIB 0x12000C4
+#define mmAudio_Az_Input_SD2CBL 0x12000C8
+#define mmAudio_Az_Input_SD2LVI 0x12000CC
+#define mmAudio_Az_Input_SD2FIFOS 0x12000D0
+#define mmAudio_Az_Input_SD2FMT 0x12000D2
+#define mmAudio_Az_Input_SD2BDPL 0x12000D8
+#define mmAudio_Az_Input_SD2BDPU 0x12000DC
+#define mmAudio_Az_Input_SD3CTL_and_STS 0x12000E0
+#define mmAudio_Az_Input_SD3LPIB 0x12000E4
+#define mmAudio_Az_Input_SD3CBL 0x12000E8
+#define mmAudio_Az_Input_SD3LVI 0x12000EC
+#define mmAudio_Az_Input_SD3FIFOS 0x12000F0
+#define mmAudio_Az_Input_SD3FMT 0x12000F2
+#define mmAudio_Az_Input_SD3BDPL 0x12000F8
+#define mmAudio_Az_Input_SD3BDPU 0x12000FC
+#define mmAudio_Az_Output_SD0CTL_and_STS 0x1200100
+#define mmAudio_Az_Output_SD0LPIB 0x1200104
+#define mmAudio_Az_Output_SD0CBL 0x1200108
+#define mmAudio_Az_Output_SD0LVI 0x120010C
+#define mmAudio_Az_Output_SD0FIFOS 0x1200110
+#define mmAudio_Az_Output_SD0FMT 0x1200112
+#define mmAudio_Az_Output_SD0BDPL 0x1200118
+#define mmAudio_Az_Output_SD0BDPU 0x120011C
+#define mmAudio_Az_Output_SD1CTL_and_STS 0x1200120
+#define mmAudio_Az_Output_SD1LPIB 0x1200124
+#define mmAudio_Az_Output_SD1CBL 0x1200128
+#define mmAudio_Az_Output_SD1LVI 0x120012C
+#define mmAudio_Az_Output_SD1FIFOS 0x1200130
+#define mmAudio_Az_Output_SD1FMT 0x1200132
+#define mmAudio_Az_Output_SD1BDPL 0x1200138
+#define mmAudio_Az_Output_SD1BDPU 0x120013C
+#define mmAudio_Az_Output_SD2CTL_and_STS 0x1200140
+#define mmAudio_Az_Output_SD2LPIB 0x1200144
+#define mmAudio_Az_Output_SD2CBL 0x1200148
+#define mmAudio_Az_Output_SD2LVI 0x120014C
+#define mmAudio_Az_Output_SD2FIFOS 0x1200150
+#define mmAudio_Az_Output_SD2FMT 0x1200152
+#define mmAudio_Az_Output_SD2BDPL 0x1200158
+#define mmAudio_Az_Output_SD2BDPU 0x120015C
+#define mmAudio_Az_Output_SD3CTL_and_STS 0x1200160
+#define mmAudio_Az_Output_SD3LPIB 0x1200164
+#define mmAudio_Az_Output_SD3CBL 0x1200168
+#define mmAudio_Az_Output_SD3LVI 0x120016C
+#define mmAudio_Az_Output_SD3FIFOS 0x1200170
+#define mmAudio_Az_Output_SD3FMT 0x1200172
+#define mmAudio_Az_Output_SD3BDPL 0x1200178
+#define mmAudio_Az_Output_SD3BDPU 0x120017C
+#define mmAudioAZ_Misc_Control_Register_1 0x1200180
+#define mmAudioAZ_Misc_Control_Register_2 0x1200182
+#define mmAudioAZ_Misc_Control_Register_3 0x1200183
+#define mmAudio_AZ_Multiple_Links_Capability_Header 0x1200200
+#define mmAudio_AZ_Multiple_Links_Capability_Declaration 0x1200204
+#define mmAudio_AZ_Link0_Capabilities 0x1200240
+#define mmAudio_AZ_Link0_Control 0x1200244
+#define mmAudio_AZ_Link0_Output_Stream_ID 0x1200248
+#define mmAudio_AZ_Link0_SDI_Identifier 0x120024C
+#define mmAudio_AZ_Link0_Per_Stream_Overhead 0x1200250
+#define mmAudio_AZ_Link0_Wall_Frame_Counter 0x1200258
+#define mmAudio_AZ_Link0_Output_Payload_Capability_L 0x1200260
+#define mmAudio_AZ_Link0_Output_Payload_Capability_U 0x1200264
+#define mmAudio_AZ_Link0_Input_Payload_Capability_L 0x1200270
+#define mmAudio_AZ_Link0_Input_Payload_Capability_U 0x1200274
+#define mmAudio_Az_Input_SD0LICBA 0x1202084
+#define mmAudio_Az_Input_SD1LICBA 0x12020A4
+#define mmAudio_Az_Input_SD2LICBA 0x12020C4
+#define mmAudio_Az_Input_SD3LICBA 0x12020E4
+#define mmAudio_Az_Output_SD0LICBA 0x1202104
+#define mmAudio_Az_Output_SD1LICBA 0x1202124
+#define mmAudio_Az_Output_SD2LICBA 0x1202144
+#define mmAudio_Az_Output_SD3LICBA 0x1202164
+#define mmAUDIO_AZ_POWER_MANAGEMENT_CONTROL 0x1204000
+#define mmAUDIO_AZ_IOC_SOFTRST_CONTROL 0x1204004
+#define mmAUDIO_AZ_IOC_CLKGATE_CONTROL 0x1204008
+
+
+// Registers from ACP_AZALIA block
+
+#define mmACP_AZ_PAGE0_LBASE_ADDR 0x1243800
+#define mmACP_AZ_PAGE0_UBASE_ADDR 0x1243804
+#define mmACP_AZ_PAGE0_PGEN_SIZE 0x1243808
+#define mmACP_AZ_PAGE0_OFFSET 0x124380C
+#define mmACP_AZ_PAGE1_LBASE_ADDR 0x1243810
+#define mmACP_AZ_PAGE1_UBASE_ADDR 0x1243814
+#define mmACP_AZ_PAGE1_PGEN_SIZE 0x1243818
+#define mmACP_AZ_PAGE1_OFFSET 0x124381C
+#define mmACP_AZ_PAGE2_LBASE_ADDR 0x1243820
+#define mmACP_AZ_PAGE2_UBASE_ADDR 0x1243824
+#define mmACP_AZ_PAGE2_PGEN_SIZE 0x1243828
+#define mmACP_AZ_PAGE2_OFFSET 0x124382C
+#define mmACP_AZ_PAGE3_LBASE_ADDR 0x1243830
+#define mmACP_AZ_PAGE3_UBASE_ADDR 0x1243834
+#define mmACP_AZ_PAGE3_PGEN_SIZE 0x1243838
+#define mmACP_AZ_PAGE3_OFFSET 0x124383C
+#define mmACP_AZ_PAGE4_LBASE_ADDR 0x1243840
+#define mmACP_AZ_PAGE4_UBASE_ADDR 0x1243844
+#define mmACP_AZ_PAGE4_PGEN_SIZE 0x1243848
+#define mmACP_AZ_PAGE4_OFFSET 0x124384C
+#define mmACP_AZ_PAGE5_LBASE_ADDR 0x1243850
+#define mmACP_AZ_PAGE5_UBASE_ADDR 0x1243854
+#define mmACP_AZ_PAGE5_PGEN_SIZE 0x1243858
+#define mmACP_AZ_PAGE5_OFFSET 0x124385C
+#define mmACP_AZ_PAGE6_LBASE_ADDR 0x1243860
+#define mmACP_AZ_PAGE6_UBASE_ADDR 0x1243864
+#define mmACP_AZ_PAGE6_PGEN_SIZE 0x1243868
+#define mmACP_AZ_PAGE6_OFFSET 0x124386C
+#define mmACP_AZ_PAGE7_LBASE_ADDR 0x1243870
+#define mmACP_AZ_PAGE7_UBASE_ADDR 0x1243874
+#define mmACP_AZ_PAGE7_PGEN_SIZE 0x1243878
+#define mmACP_AZ_PAGE7_OFFSET 0x124387C
+
+
+#endif
diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
new file mode 100644
index 000000000000..facec2472b34
--- /dev/null
+++ b/sound/soc/amd/raven/pci-acp3x.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// AMD ACP PCI Driver
+//
+//Copyright 2016 Advanced Micro Devices, Inc.
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include "acp3x.h"
+
+struct acp3x_dev_data {
+ void __iomem *acp3x_base;
+ bool acp3x_audio_mode;
+ struct resource *res;
+ struct platform_device *pdev;
+};
+
+static int snd_acp3x_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ int ret;
+ u32 addr, val;
+ struct acp3x_dev_data *adata;
+ struct platform_device_info pdevinfo;
+ unsigned int irqflags;
+
+ if (pci_enable_device(pci)) {
+ dev_err(&pci->dev, "pci_enable_device failed\n");
+ return -ENODEV;
+ }
+
+ ret = pci_request_regions(pci, "AMD ACP3x audio");
+ if (ret < 0) {
+ dev_err(&pci->dev, "pci_request_regions failed\n");
+ goto disable_pci;
+ }
+
+ adata = devm_kzalloc(&pci->dev, sizeof(struct acp3x_dev_data),
+ GFP_KERNEL);
+ if (!adata) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+
+ /* check for msi interrupt support */
+ ret = pci_enable_msi(pci);
+ if (ret)
+ /* msi is not enabled */
+ irqflags = IRQF_SHARED;
+ else
+ /* msi is enabled */
+ irqflags = 0;
+
+ addr = pci_resource_start(pci, 0);
+ adata->acp3x_base = ioremap(addr, pci_resource_len(pci, 0));
+ if (!adata->acp3x_base) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+ pci_set_master(pci);
+ pci_set_drvdata(pci, adata);
+
+ val = rv_readl(adata->acp3x_base + mmACP_I2S_PIN_CONFIG);
+ switch (val) {
+ case I2S_MODE:
+ adata->res = devm_kzalloc(&pci->dev,
+ sizeof(struct resource) * 2,
+ GFP_KERNEL);
+ if (!adata->res) {
+ ret = -ENOMEM;
+ goto unmap_mmio;
+ }
+
+ adata->res[0].name = "acp3x_i2s_iomem";
+ adata->res[0].flags = IORESOURCE_MEM;
+ adata->res[0].start = addr;
+ adata->res[0].end = addr + (ACP3x_REG_END - ACP3x_REG_START);
+
+ adata->res[1].name = "acp3x_i2s_irq";
+ adata->res[1].flags = IORESOURCE_IRQ;
+ adata->res[1].start = pci->irq;
+ adata->res[1].end = pci->irq;
+
+ adata->acp3x_audio_mode = ACP3x_I2S_MODE;
+
+ memset(&pdevinfo, 0, sizeof(pdevinfo));
+ pdevinfo.name = "acp3x_rv_i2s";
+ pdevinfo.id = 0;
+ pdevinfo.parent = &pci->dev;
+ pdevinfo.num_res = 2;
+ pdevinfo.res = adata->res;
+ pdevinfo.data = &irqflags;
+ pdevinfo.size_data = sizeof(irqflags);
+
+ adata->pdev = platform_device_register_full(&pdevinfo);
+ if (IS_ERR(adata->pdev)) {
+ dev_err(&pci->dev, "cannot register %s device\n",
+ pdevinfo.name);
+ ret = PTR_ERR(adata->pdev);
+ goto unmap_mmio;
+ }
+ break;
+ default:
+ dev_err(&pci->dev, "Invalid ACP audio mode : %d\n", val);
+ ret = -ENODEV;
+ goto unmap_mmio;
+ }
+ return 0;
+
+unmap_mmio:
+ pci_disable_msi(pci);
+ iounmap(adata->acp3x_base);
+release_regions:
+ pci_release_regions(pci);
+disable_pci:
+ pci_disable_device(pci);
+
+ return ret;
+}
+
+static void snd_acp3x_remove(struct pci_dev *pci)
+{
+ struct acp3x_dev_data *adata = pci_get_drvdata(pci);
+
+ platform_device_unregister(adata->pdev);
+ iounmap(adata->acp3x_base);
+
+ pci_disable_msi(pci);
+ pci_release_regions(pci);
+ pci_disable_device(pci);
+}
+
+static const struct pci_device_id snd_acp3x_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x15e2),
+ .class = PCI_CLASS_MULTIMEDIA_OTHER << 8,
+ .class_mask = 0xffffff },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, snd_acp3x_ids);
+
+static struct pci_driver acp3x_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = snd_acp3x_ids,
+ .probe = snd_acp3x_probe,
+ .remove = snd_acp3x_remove,
+};
+
+module_pci_driver(acp3x_driver);
+
+MODULE_AUTHOR("Maruthi.Bayyavarapu@amd.com");
+MODULE_DESCRIPTION("AMD ACP3x PCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 9cc4f1848c9b..62bdb7e333b8 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -35,6 +35,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ADAU7002
select SND_SOC_ADS117X
select SND_SOC_AK4104 if SPI_MASTER
+ select SND_SOC_AK4118 if I2C
select SND_SOC_AK4458 if I2C
select SND_SOC_AK4535 if I2C
select SND_SOC_AK4554
@@ -392,6 +393,11 @@ config SND_SOC_AK4104
tristate "AKM AK4104 CODEC"
depends on SPI_MASTER
+config SND_SOC_AK4118
+ tristate "AKM AK4118 CODEC"
+ depends on I2C
+ select REGMAP_I2C
+
config SND_SOC_AK4458
tristate "AKM AK4458 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 8ffab8c8dbfa..66f55d185620 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -27,6 +27,7 @@ snd-soc-adav801-objs := adav801.o
snd-soc-adav803-objs := adav803.o
snd-soc-ads117x-objs := ads117x.o
snd-soc-ak4104-objs := ak4104.o
+snd-soc-ak4118-objs := ak4118.o
snd-soc-ak4458-objs := ak4458.o
snd-soc-ak4535-objs := ak4535.o
snd-soc-ak4554-objs := ak4554.o
@@ -290,6 +291,7 @@ obj-$(CONFIG_SND_SOC_ADAV801) += snd-soc-adav801.o
obj-$(CONFIG_SND_SOC_ADAV803) += snd-soc-adav803.o
obj-$(CONFIG_SND_SOC_ADS117X) += snd-soc-ads117x.o
obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o
+obj-$(CONFIG_SND_SOC_AK4118) += snd-soc-ak4118.o
obj-$(CONFIG_SND_SOC_AK4458) += snd-soc-ak4458.o
obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
obj-$(CONFIG_SND_SOC_AK4554) += snd-soc-ak4554.o
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index 32bc545c19cf..6dec8a65eafc 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -13,7 +13,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
#include <sound/asoundef.h>
#include <sound/core.h>
@@ -268,8 +268,8 @@ static const struct regmap_config ak4104_regmap = {
static int ak4104_spi_probe(struct spi_device *spi)
{
- struct device_node *np = spi->dev.of_node;
struct ak4104_private *ak4104;
+ struct gpio_desc *reset_gpiod;
unsigned int val;
int ret;
@@ -297,19 +297,11 @@ static int ak4104_spi_probe(struct spi_device *spi)
return ret;
}
- if (np) {
- enum of_gpio_flags flags;
- int gpio = of_get_named_gpio_flags(np, "reset-gpio", 0, &flags);
-
- if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(&spi->dev, gpio,
- flags & OF_GPIO_ACTIVE_LOW ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
- "ak4104 reset");
- if (ret < 0)
- return ret;
- }
- }
+ reset_gpiod = devm_gpiod_get_optional(&spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpiod) &&
+ PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
/* read the 'reserved' register - according to the datasheet, it
* should contain 0x5b. Not a good way to verify the presence of
diff --git a/sound/soc/codecs/ak4118.c b/sound/soc/codecs/ak4118.c
new file mode 100644
index 000000000000..238ab29f2bf4
--- /dev/null
+++ b/sound/soc/codecs/ak4118.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ak4118.c -- Asahi Kasei ALSA Soc Audio driver
+ *
+ * Copyright 2018 DEVIALET
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <sound/asoundef.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#define AK4118_REG_CLK_PWR_CTL 0x00
+#define AK4118_REG_FORMAT_CTL 0x01
+#define AK4118_REG_IO_CTL0 0x02
+#define AK4118_REG_IO_CTL1 0x03
+#define AK4118_REG_INT0_MASK 0x04
+#define AK4118_REG_INT1_MASK 0x05
+#define AK4118_REG_RCV_STATUS0 0x06
+#define AK4118_REG_RCV_STATUS1 0x07
+#define AK4118_REG_RXCHAN_STATUS0 0x08
+#define AK4118_REG_RXCHAN_STATUS1 0x09
+#define AK4118_REG_RXCHAN_STATUS2 0x0a
+#define AK4118_REG_RXCHAN_STATUS3 0x0b
+#define AK4118_REG_RXCHAN_STATUS4 0x0c
+#define AK4118_REG_TXCHAN_STATUS0 0x0d
+#define AK4118_REG_TXCHAN_STATUS1 0x0e
+#define AK4118_REG_TXCHAN_STATUS2 0x0f
+#define AK4118_REG_TXCHAN_STATUS3 0x10
+#define AK4118_REG_TXCHAN_STATUS4 0x11
+#define AK4118_REG_BURST_PREAMB_PC0 0x12
+#define AK4118_REG_BURST_PREAMB_PC1 0x13
+#define AK4118_REG_BURST_PREAMB_PD0 0x14
+#define AK4118_REG_BURST_PREAMB_PD1 0x15
+#define AK4118_REG_QSUB_CTL 0x16
+#define AK4118_REG_QSUB_TRACK 0x17
+#define AK4118_REG_QSUB_INDEX 0x18
+#define AK4118_REG_QSUB_MIN 0x19
+#define AK4118_REG_QSUB_SEC 0x1a
+#define AK4118_REG_QSUB_FRAME 0x1b
+#define AK4118_REG_QSUB_ZERO 0x1c
+#define AK4118_REG_QSUB_ABS_MIN 0x1d
+#define AK4118_REG_QSUB_ABS_SEC 0x1e
+#define AK4118_REG_QSUB_ABS_FRAME 0x1f
+#define AK4118_REG_GPE 0x20
+#define AK4118_REG_GPDR 0x21
+#define AK4118_REG_GPSCR 0x22
+#define AK4118_REG_GPLR 0x23
+#define AK4118_REG_DAT_MASK_DTS 0x24
+#define AK4118_REG_RX_DETECT 0x25
+#define AK4118_REG_STC_DAT_DETECT 0x26
+#define AK4118_REG_RXCHAN_STATUS5 0x27
+#define AK4118_REG_TXCHAN_STATUS5 0x28
+#define AK4118_REG_MAX 0x29
+
+#define AK4118_REG_FORMAT_CTL_DIF0 (1 << 4)
+#define AK4118_REG_FORMAT_CTL_DIF1 (1 << 5)
+#define AK4118_REG_FORMAT_CTL_DIF2 (1 << 6)
+
+struct ak4118_priv {
+ struct regmap *regmap;
+ struct gpio_desc *reset;
+ struct gpio_desc *irq;
+ struct snd_soc_component *component;
+};
+
+static const struct reg_default ak4118_reg_defaults[] = {
+ {AK4118_REG_CLK_PWR_CTL, 0x43},
+ {AK4118_REG_FORMAT_CTL, 0x6a},
+ {AK4118_REG_IO_CTL0, 0x88},
+ {AK4118_REG_IO_CTL1, 0x48},
+ {AK4118_REG_INT0_MASK, 0xee},
+ {AK4118_REG_INT1_MASK, 0xb5},
+ {AK4118_REG_RCV_STATUS0, 0x00},
+ {AK4118_REG_RCV_STATUS1, 0x10},
+ {AK4118_REG_TXCHAN_STATUS0, 0x00},
+ {AK4118_REG_TXCHAN_STATUS1, 0x00},
+ {AK4118_REG_TXCHAN_STATUS2, 0x00},
+ {AK4118_REG_TXCHAN_STATUS3, 0x00},
+ {AK4118_REG_TXCHAN_STATUS4, 0x00},
+ {AK4118_REG_GPE, 0x77},
+ {AK4118_REG_GPDR, 0x00},
+ {AK4118_REG_GPSCR, 0x00},
+ {AK4118_REG_GPLR, 0x00},
+ {AK4118_REG_DAT_MASK_DTS, 0x3f},
+ {AK4118_REG_RX_DETECT, 0x00},
+ {AK4118_REG_STC_DAT_DETECT, 0x00},
+ {AK4118_REG_TXCHAN_STATUS5, 0x00},
+};
+
+static const char * const ak4118_input_select_txt[] = {
+ "RX0", "RX1", "RX2", "RX3", "RX4", "RX5", "RX6", "RX7",
+};
+static SOC_ENUM_SINGLE_DECL(ak4118_insel_enum, AK4118_REG_IO_CTL1, 0x0,
+ ak4118_input_select_txt);
+
+static const struct snd_kcontrol_new ak4118_input_mux_controls =
+ SOC_DAPM_ENUM("Input Select", ak4118_insel_enum);
+
+static const char * const ak4118_iec958_fs_txt[] = {
+ "44100", "48000", "32000", "22050", "11025", "24000", "16000", "88200",
+ "8000", "96000", "64000", "176400", "192000",
+};
+
+static const int ak4118_iec958_fs_val[] = {
+ 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xE,
+};
+
+static SOC_VALUE_ENUM_SINGLE_DECL(ak4118_iec958_fs_enum, AK4118_REG_RCV_STATUS1,
+ 0x4, 0x4, ak4118_iec958_fs_txt,
+ ak4118_iec958_fs_val);
+
+static struct snd_kcontrol_new ak4118_iec958_controls[] = {
+ SOC_SINGLE("IEC958 Parity Errors", AK4118_REG_RCV_STATUS0, 0, 1, 0),
+ SOC_SINGLE("IEC958 No Audio", AK4118_REG_RCV_STATUS0, 1, 1, 0),
+ SOC_SINGLE("IEC958 PLL Lock", AK4118_REG_RCV_STATUS0, 4, 1, 1),
+ SOC_SINGLE("IEC958 Non PCM", AK4118_REG_RCV_STATUS0, 6, 1, 0),
+ SOC_ENUM("IEC958 Sampling Freq", ak4118_iec958_fs_enum),
+};
+
+static const struct snd_soc_dapm_widget ak4118_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("INRX0"),
+ SND_SOC_DAPM_INPUT("INRX1"),
+ SND_SOC_DAPM_INPUT("INRX2"),
+ SND_SOC_DAPM_INPUT("INRX3"),
+ SND_SOC_DAPM_INPUT("INRX4"),
+ SND_SOC_DAPM_INPUT("INRX5"),
+ SND_SOC_DAPM_INPUT("INRX6"),
+ SND_SOC_DAPM_INPUT("INRX7"),
+ SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0,
+ &ak4118_input_mux_controls),
+};
+
+static const struct snd_soc_dapm_route ak4118_dapm_routes[] = {
+ {"Input Mux", "RX0", "INRX0"},
+ {"Input Mux", "RX1", "INRX1"},
+ {"Input Mux", "RX2", "INRX2"},
+ {"Input Mux", "RX3", "INRX3"},
+ {"Input Mux", "RX4", "INRX4"},
+ {"Input Mux", "RX5", "INRX5"},
+ {"Input Mux", "RX6", "INRX6"},
+ {"Input Mux", "RX7", "INRX7"},
+};
+
+
+static int ak4118_set_dai_fmt_master(struct ak4118_priv *ak4118,
+ unsigned int format)
+{
+ int dif;
+
+ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ dif = AK4118_REG_FORMAT_CTL_DIF0 | AK4118_REG_FORMAT_CTL_DIF2;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ dif = AK4118_REG_FORMAT_CTL_DIF0 | AK4118_REG_FORMAT_CTL_DIF1;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ dif = AK4118_REG_FORMAT_CTL_DIF2;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return dif;
+}
+
+static int ak4118_set_dai_fmt_slave(struct ak4118_priv *ak4118,
+ unsigned int format)
+{
+ int dif;
+
+ switch (format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ dif = AK4118_REG_FORMAT_CTL_DIF0 | AK4118_REG_FORMAT_CTL_DIF1 |
+ AK4118_REG_FORMAT_CTL_DIF2;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ dif = AK4118_REG_FORMAT_CTL_DIF1 | AK4118_REG_FORMAT_CTL_DIF2;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return dif;
+}
+
+static int ak4118_set_dai_fmt(struct snd_soc_dai *dai,
+ unsigned int format)
+{
+ struct snd_soc_component *component = dai->component;
+ struct ak4118_priv *ak4118 = snd_soc_component_get_drvdata(component);
+ int dif;
+ int ret = 0;
+
+ switch (format & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ /* component is master */
+ dif = ak4118_set_dai_fmt_master(ak4118, format);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ /*component is slave */
+ dif = ak4118_set_dai_fmt_slave(ak4118, format);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ goto exit;
+ }
+
+ /* format not supported */
+ if (dif < 0) {
+ ret = dif;
+ goto exit;
+ }
+
+ ret = regmap_update_bits(ak4118->regmap, AK4118_REG_FORMAT_CTL,
+ AK4118_REG_FORMAT_CTL_DIF0 |
+ AK4118_REG_FORMAT_CTL_DIF1 |
+ AK4118_REG_FORMAT_CTL_DIF2, dif);
+ if (ret < 0)
+ goto exit;
+
+exit:
+ return ret;
+}
+
+static int ak4118_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ return 0;
+}
+
+static const struct snd_soc_dai_ops ak4118_dai_ops = {
+ .hw_params = ak4118_hw_params,
+ .set_fmt = ak4118_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver ak4118_dai = {
+ .name = "ak4118-hifi",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE
+ },
+ .ops = &ak4118_dai_ops,
+};
+
+static irqreturn_t ak4118_irq_handler(int irq, void *data)
+{
+ struct ak4118_priv *ak4118 = data;
+ struct snd_soc_component *component = ak4118->component;
+ struct snd_kcontrol_new *kctl_new;
+ struct snd_kcontrol *kctl;
+ struct snd_ctl_elem_id *id;
+ unsigned int i;
+
+ if (!component)
+ return IRQ_NONE;
+
+ for (i = 0; i < ARRAY_SIZE(ak4118_iec958_controls); i++) {
+ kctl_new = &ak4118_iec958_controls[i];
+ kctl = snd_soc_card_get_kcontrol(component->card,
+ kctl_new->name);
+ if (!kctl)
+ continue;
+ id = &kctl->id;
+ snd_ctl_notify(component->card->snd_card,
+ SNDRV_CTL_EVENT_MASK_VALUE, id);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ak4118_probe(struct snd_soc_component *component)
+{
+ struct ak4118_priv *ak4118 = snd_soc_component_get_drvdata(component);
+ int ret = 0;
+
+ ak4118->component = component;
+
+ /* release reset */
+ gpiod_set_value(ak4118->reset, 0);
+
+ /* unmask all int1 sources */
+ ret = regmap_write(ak4118->regmap, AK4118_REG_INT1_MASK, 0x00);
+ if (ret < 0) {
+ dev_err(component->dev,
+ "failed to write regmap 0x%x 0x%x: %d\n",
+ AK4118_REG_INT1_MASK, 0x00, ret);
+ return ret;
+ }
+
+ /* rx detect enable on all channels */
+ ret = regmap_write(ak4118->regmap, AK4118_REG_RX_DETECT, 0xff);
+ if (ret < 0) {
+ dev_err(component->dev,
+ "failed to write regmap 0x%x 0x%x: %d\n",
+ AK4118_REG_RX_DETECT, 0xff, ret);
+ return ret;
+ }
+
+ ret = snd_soc_add_component_controls(component, ak4118_iec958_controls,
+ ARRAY_SIZE(ak4118_iec958_controls));
+ if (ret) {
+ dev_err(component->dev,
+ "failed to add component kcontrols: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ak4118_remove(struct snd_soc_component *component)
+{
+ struct ak4118_priv *ak4118 = snd_soc_component_get_drvdata(component);
+
+ /* hold reset */
+ gpiod_set_value(ak4118->reset, 1);
+}
+
+static const struct snd_soc_component_driver soc_component_drv_ak4118 = {
+ .probe = ak4118_probe,
+ .remove = ak4118_remove,
+ .dapm_widgets = ak4118_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4118_dapm_widgets),
+ .dapm_routes = ak4118_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak4118_dapm_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config ak4118_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .reg_defaults = ak4118_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(ak4118_reg_defaults),
+
+ .cache_type = REGCACHE_NONE,
+ .max_register = AK4118_REG_MAX - 1,
+};
+
+static int ak4118_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct ak4118_priv *ak4118;
+ int ret;
+
+ ak4118 = devm_kzalloc(&i2c->dev, sizeof(struct ak4118_priv),
+ GFP_KERNEL);
+ if (ak4118 == NULL)
+ return -ENOMEM;
+
+ ak4118->regmap = devm_regmap_init_i2c(i2c, &ak4118_regmap);
+ if (IS_ERR(ak4118->regmap))
+ return PTR_ERR(ak4118->regmap);
+
+ i2c_set_clientdata(i2c, ak4118);
+
+ ak4118->reset = devm_gpiod_get(&i2c->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ak4118->reset)) {
+ ret = PTR_ERR(ak4118->reset);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&i2c->dev, "Failed to get reset: %d\n", ret);
+ return ret;
+ }
+
+ ak4118->irq = devm_gpiod_get(&i2c->dev, "irq", GPIOD_IN);
+ if (IS_ERR(ak4118->irq)) {
+ ret = PTR_ERR(ak4118->irq);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&i2c->dev, "Failed to get IRQ: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&i2c->dev, gpiod_to_irq(ak4118->irq),
+ NULL, ak4118_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "ak4118-irq", ak4118);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Fail to request_irq: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_register_component(&i2c->dev, &soc_component_drv_ak4118,
+ &ak4118_dai, 1);
+}
+
+static int ak4118_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_component(&i2c->dev);
+ return 0;
+}
+
+static const struct of_device_id ak4118_of_match[] = {
+ { .compatible = "asahi-kasei,ak4118", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ak4118_of_match);
+
+static const struct i2c_device_id ak4118_id_table[] = {
+ { "ak4118", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ak4118_id_table);
+
+static struct i2c_driver ak4118_i2c_driver = {
+ .driver = {
+ .name = "ak4118",
+ .of_match_table = of_match_ptr(ak4118_of_match),
+ },
+ .id_table = ak4118_id_table,
+ .probe = ak4118_i2c_probe,
+ .remove = ak4118_i2c_remove,
+};
+
+module_i2c_driver(ak4118_i2c_driver);
+
+MODULE_DESCRIPTION("Asahi Kasei AK4118 ALSA SoC driver");
+MODULE_AUTHOR("Adrien Charruel <adrien.charruel@devialet.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
index 299ada4dfaa0..70d4c89bd6fc 100644
--- a/sound/soc/codecs/ak4458.c
+++ b/sound/soc/codecs/ak4458.c
@@ -456,7 +456,7 @@ static int ak4458_startup(struct snd_pcm_substream *substream,
return ret;
}
-static struct snd_soc_dai_ops ak4458_dai_ops = {
+static const struct snd_soc_dai_ops ak4458_dai_ops = {
.startup = ak4458_startup,
.hw_params = ak4458_hw_params,
.set_fmt = ak4458_set_dai_fmt,
diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
index 448bb90c9c8e..8179512129d3 100644
--- a/sound/soc/codecs/ak5558.c
+++ b/sound/soc/codecs/ak5558.c
@@ -130,16 +130,12 @@ static int ak5558_hw_params(struct snd_pcm_substream *substream,
u8 bits;
int pcm_width = max(params_physical_width(params), ak5558->slot_width);
- /* set master/slave audio interface */
- bits = snd_soc_component_read32(component, AK5558_02_CONTROL1);
- bits &= ~AK5558_BITS;
-
switch (pcm_width) {
case 16:
- bits |= AK5558_DIF_24BIT_MODE;
+ bits = AK5558_DIF_24BIT_MODE;
break;
case 32:
- bits |= AK5558_DIF_32BIT_MODE;
+ bits = AK5558_DIF_32BIT_MODE;
break;
default:
return -EINVAL;
@@ -168,18 +164,15 @@ static int ak5558_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
}
/* set master/slave audio interface */
- format = snd_soc_component_read32(component, AK5558_02_CONTROL1);
- format &= ~AK5558_DIF;
-
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
- format |= AK5558_DIF_I2S_MODE;
+ format = AK5558_DIF_I2S_MODE;
break;
case SND_SOC_DAIFMT_LEFT_J:
- format |= AK5558_DIF_MSB_MODE;
+ format = AK5558_DIF_MSB_MODE;
break;
case SND_SOC_DAIFMT_DSP_B:
- format |= AK5558_DIF_MSB_MODE;
+ format = AK5558_DIF_MSB_MODE;
break;
default:
return -EINVAL;
@@ -246,7 +239,7 @@ static int ak5558_startup(struct snd_pcm_substream *substream,
&ak5558_rate_constraints);
}
-static struct snd_soc_dai_ops ak5558_dai_ops = {
+static const struct snd_soc_dai_ops ak5558_dai_ops = {
.startup = ak5558_startup,
.hw_params = ak5558_hw_params,
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 3c266eeb89bf..33d74f163bd7 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -29,8 +29,8 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
/*
* The codec isn't really big-endian or little-endian, since the I2S
@@ -658,8 +658,8 @@ static const struct regmap_config cs4270_regmap = {
static int cs4270_i2c_probe(struct i2c_client *i2c_client,
const struct i2c_device_id *id)
{
- struct device_node *np = i2c_client->dev.of_node;
struct cs4270_private *cs4270;
+ struct gpio_desc *reset_gpiod;
unsigned int val;
int ret, i;
@@ -678,20 +678,11 @@ static int cs4270_i2c_probe(struct i2c_client *i2c_client,
if (ret < 0)
return ret;
- /* See if we have a way to bring the codec out of reset */
- if (np) {
- enum of_gpio_flags flags;
- int gpio = of_get_named_gpio_flags(np, "reset-gpio", 0, &flags);
-
- if (gpio_is_valid(gpio)) {
- ret = devm_gpio_request_one(&i2c_client->dev, gpio,
- flags & OF_GPIO_ACTIVE_LOW ?
- GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
- "cs4270 reset");
- if (ret < 0)
- return ret;
- }
- }
+ reset_gpiod = devm_gpiod_get_optional(&i2c_client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpiod) &&
+ PTR_ERR(reset_gpiod) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
cs4270->regmap = devm_regmap_init_i2c(i2c_client, &cs4270_regmap);
if (IS_ERR(cs4270->regmap))
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index 71322e0410ee..da921da50ef0 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -30,9 +30,39 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
+#define MAX_MODESWITCH_DELAY 70
+static int modeswitch_delay;
+module_param(modeswitch_delay, uint, 0644);
+
+static int wakeup_delay;
+module_param(wakeup_delay, uint, 0644);
+
struct dmic {
struct gpio_desc *gpio_en;
int wakeup_delay;
+ /* Delay after DMIC mode switch */
+ int modeswitch_delay;
+};
+
+int dmic_daiops_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct dmic *dmic = snd_soc_component_get_drvdata(component);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_STOP:
+ if (dmic->modeswitch_delay)
+ mdelay(dmic->modeswitch_delay);
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops dmic_dai_ops = {
+ .trigger = dmic_daiops_trigger,
};
static int dmic_aif_event(struct snd_soc_dapm_widget *w,
@@ -68,6 +98,7 @@ static struct snd_soc_dai_driver dmic_dai = {
| SNDRV_PCM_FMTBIT_S24_LE
| SNDRV_PCM_FMTBIT_S16_LE,
},
+ .ops = &dmic_dai_ops,
};
static int dmic_component_probe(struct snd_soc_component *component)
@@ -85,6 +116,15 @@ static int dmic_component_probe(struct snd_soc_component *component)
device_property_read_u32(component->dev, "wakeup-delay-ms",
&dmic->wakeup_delay);
+ device_property_read_u32(component->dev, "modeswitch-delay-ms",
+ &dmic->modeswitch_delay);
+ if (wakeup_delay)
+ dmic->wakeup_delay = wakeup_delay;
+ if (modeswitch_delay)
+ dmic->modeswitch_delay = modeswitch_delay;
+
+ if (dmic->modeswitch_delay > MAX_MODESWITCH_DELAY)
+ dmic->modeswitch_delay = MAX_MODESWITCH_DELAY;
snd_soc_component_set_drvdata(component, dmic);
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
index 2aaa83028e55..ffecdaaa8cf2 100644
--- a/sound/soc/codecs/hdac_hda.c
+++ b/sound/soc/codecs/hdac_hda.c
@@ -46,7 +46,7 @@ static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
struct snd_soc_dai *dai);
-static struct snd_soc_dai_ops hdac_hda_dai_ops = {
+static const struct snd_soc_dai_ops hdac_hda_dai_ops = {
.startup = hdac_hda_dai_open,
.shutdown = hdac_hda_dai_close,
.prepare = hdac_hda_dai_prepare,
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 4e9854889a95..3ab2949c1dfa 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -121,8 +121,16 @@ struct hdac_hdmi_dai_port_map {
struct hdac_hdmi_cvt *cvt;
};
+/*
+ * pin to port mapping table where the value indicate the pin number and
+ * the index indicate the port number with 1 base.
+ */
+static const int icl_pin2port_map[] = {0x4, 0x6, 0x8, 0xa, 0xb};
+
struct hdac_hdmi_drv_data {
unsigned int vendor_nid;
+ const int *port_map; /* pin to port mapping table */
+ int port_num;
};
struct hdac_hdmi_priv {
@@ -1329,11 +1337,12 @@ static int hdac_hdmi_add_pin(struct hdac_device *hdev, hda_nid_t nid)
return 0;
}
-#define INTEL_VENDOR_NID 0x08
-#define INTEL_GLK_VENDOR_NID 0x0b
+#define INTEL_VENDOR_NID_0x2 0x02
+#define INTEL_VENDOR_NID_0x8 0x08
+#define INTEL_VENDOR_NID_0xb 0x0b
#define INTEL_GET_VENDOR_VERB 0xf81
#define INTEL_SET_VENDOR_VERB 0x781
-#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
+#define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
#define INTEL_EN_ALL_PIN_CVTS 0x01 /* enable 2nd & 3rd pins and convertors */
static void hdac_hdmi_skl_enable_all_pins(struct hdac_device *hdev)
@@ -1538,7 +1547,26 @@ free_widgets:
static int hdac_hdmi_pin2port(void *aptr, int pin)
{
- return pin - 4; /* map NID 0x05 -> port #1 */
+ struct hdac_device *hdev = aptr;
+ struct hdac_hdmi_priv *hdmi = hdev_to_hdmi_priv(hdev);
+ const int *map = hdmi->drv_data->port_map;
+ int i;
+
+ if (!hdmi->drv_data->port_num)
+ return pin - 4; /* map NID 0x05 -> port #1 */
+
+ /*
+ * looking for the pin number in the mapping table and return
+ * the index which indicate the port number
+ */
+ for (i = 0; i < hdmi->drv_data->port_num; i++) {
+ if (pin == map[i])
+ return i + 1;
+ }
+
+ /* return -1 if pin number exceeds our expectation */
+ dev_err(&hdev->dev, "Can't find the port for pin %d\n", pin);
+ return -1;
}
static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
@@ -1549,9 +1577,18 @@ static void hdac_hdmi_eld_notify_cb(void *aptr, int port, int pipe)
struct hdac_hdmi_port *hport = NULL;
struct snd_soc_component *component = hdmi->component;
int i;
-
- /* Don't know how this mapping is derived */
- hda_nid_t pin_nid = port + 0x04;
+ hda_nid_t pin_nid;
+
+ if (!hdmi->drv_data->port_num) {
+ /* for legacy platforms */
+ pin_nid = port + 0x04;
+ } else if (port < hdmi->drv_data->port_num) {
+ /* get pin number from the pin2port mapping table */
+ pin_nid = hdmi->drv_data->port_map[port - 1];
+ } else {
+ dev_err(&hdev->dev, "Can't find the pin for port %d\n", port);
+ return;
+ }
dev_dbg(&hdev->dev, "%s: for pin:%d port=%d\n", __func__,
pin_nid, pipe);
@@ -1973,12 +2010,18 @@ static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
return port->eld.info.spk_alloc;
}
+static struct hdac_hdmi_drv_data intel_icl_drv_data = {
+ .vendor_nid = INTEL_VENDOR_NID_0x2,
+ .port_map = icl_pin2port_map,
+ .port_num = ARRAY_SIZE(icl_pin2port_map),
+};
+
static struct hdac_hdmi_drv_data intel_glk_drv_data = {
- .vendor_nid = INTEL_GLK_VENDOR_NID,
+ .vendor_nid = INTEL_VENDOR_NID_0xb,
};
static struct hdac_hdmi_drv_data intel_drv_data = {
- .vendor_nid = INTEL_VENDOR_NID,
+ .vendor_nid = INTEL_VENDOR_NID_0x8,
};
static int hdac_hdmi_dev_probe(struct hdac_device *hdev)
@@ -2031,13 +2074,7 @@ static int hdac_hdmi_dev_probe(struct hdac_device *hdev)
* Turned off in the runtime_suspend during the first explicit
* pm_runtime_suspend call.
*/
- ret = snd_hdac_display_power(hdev->bus, true);
- if (ret < 0) {
- dev_err(&hdev->dev,
- "Cannot turn on display power on i915 err: %d\n",
- ret);
- return ret;
- }
+ snd_hdac_display_power(hdev->bus, hdev->addr, true);
ret = hdac_hdmi_parse_and_map_nid(hdev, &hdmi_dais, &num_dais);
if (ret < 0) {
@@ -2065,6 +2102,8 @@ static int hdac_hdmi_dev_remove(struct hdac_device *hdev)
struct hdac_hdmi_port *port, *port_next;
int i;
+ snd_hdac_display_power(hdev->bus, hdev->addr, false);
+
list_for_each_entry_safe(pcm, pcm_next, &hdmi->pcm_list, head) {
pcm->cvt = NULL;
if (list_empty(&pcm->port_list))
@@ -2170,7 +2209,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
struct hdac_device *hdev = dev_to_hdac_dev(dev);
struct hdac_bus *bus = hdev->bus;
struct hdac_ext_link *hlink = NULL;
- int err;
dev_dbg(dev, "Enter: %s\n", __func__);
@@ -2187,11 +2225,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
*/
snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
AC_PWRST_D3);
- err = snd_hdac_display_power(bus, false);
- if (err < 0) {
- dev_err(dev, "Cannot turn on display power on i915\n");
- return err;
- }
hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
if (!hlink) {
@@ -2201,6 +2234,8 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
snd_hdac_ext_bus_link_put(bus, hlink);
+ snd_hdac_display_power(bus, hdev->addr, false);
+
return 0;
}
@@ -2209,7 +2244,6 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
struct hdac_device *hdev = dev_to_hdac_dev(dev);
struct hdac_bus *bus = hdev->bus;
struct hdac_ext_link *hlink = NULL;
- int err;
dev_dbg(dev, "Enter: %s\n", __func__);
@@ -2225,11 +2259,7 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
snd_hdac_ext_bus_link_get(bus, hlink);
- err = snd_hdac_display_power(bus, true);
- if (err < 0) {
- dev_err(dev, "Cannot turn on display power on i915\n");
- return err;
- }
+ snd_hdac_display_power(bus, hdev->addr, true);
hdac_hdmi_skl_enable_all_pins(hdev);
hdac_hdmi_skl_enable_dp12(hdev);
@@ -2259,6 +2289,8 @@ static const struct hda_device_id hdmi_list[] = {
&intel_glk_drv_data),
HDA_CODEC_EXT_ENTRY(0x8086280d, 0x100000, "Geminilake HDMI",
&intel_glk_drv_data),
+ HDA_CODEC_EXT_ENTRY(0x8086280f, 0x100000, "Icelake HDMI",
+ &intel_icl_drv_data),
{}
};
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index a09d01318f79..9c8616a7b61c 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -724,14 +724,39 @@ static struct snd_soc_dai_driver max98373_dai[] = {
}
};
+static void max98373_reset(struct max98373_priv *max98373, struct device *dev)
+{
+ int ret, reg, count;
+
+ /* Software Reset */
+ ret = regmap_update_bits(max98373->regmap,
+ MAX98373_R2000_SW_RESET,
+ MAX98373_SOFT_RESET,
+ MAX98373_SOFT_RESET);
+ if (ret)
+ dev_err(dev, "Reset command failed. (ret:%d)\n", ret);
+
+ count = 0;
+ while (count < 3) {
+ usleep_range(10000, 11000);
+ /* Software Reset Verification */
+ ret = regmap_read(max98373->regmap,
+ MAX98373_R21FF_REV_ID, &reg);
+ if (!ret) {
+ dev_info(dev, "Reset completed (retry:%d)\n", count);
+ return;
+ }
+ count++;
+ }
+ dev_err(dev, "Reset failed. (ret:%d)\n", ret);
+}
+
static int max98373_probe(struct snd_soc_component *component)
{
struct max98373_priv *max98373 = snd_soc_component_get_drvdata(component);
/* Software Reset */
- regmap_write(max98373->regmap,
- MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
- usleep_range(10000, 11000);
+ max98373_reset(max98373, component->dev);
/* IV default slot configuration */
regmap_write(max98373->regmap,
@@ -818,9 +843,7 @@ static int max98373_resume(struct device *dev)
{
struct max98373_priv *max98373 = dev_get_drvdata(dev);
- regmap_write(max98373->regmap,
- MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
- usleep_range(10000, 11000);
+ max98373_reset(max98373, dev);
regcache_cache_only(max98373->regmap, false);
regcache_sync(max98373->regmap);
return 0;
diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c
index 4ea3287162ad..8600c5439e1e 100644
--- a/sound/soc/codecs/max9867.c
+++ b/sound/soc/codecs/max9867.c
@@ -1,12 +1,10 @@
-/*
- * max9867.c -- max9867 ALSA SoC Audio driver
- *
- * Copyright 2013-15 Maxim Integrated Products
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// MAX9867 ALSA SoC codec driver
+//
+// Copyright 2013-2015 Maxim Integrated Products
+// Copyright 2018 Ladislav Michl <ladis@linux-mips.org>
+//
#include <linux/delay.h>
#include <linux/i2c.h>
@@ -23,254 +21,237 @@ static const char *const max9867_spmode[] = {
"Stereo Single", "Mono Single",
"Stereo Single Fast", "Mono Single Fast"
};
-static const char *const max9867_sidetone_text[] = {
- "None", "Left", "Right", "LeftRight", "LeftRightDiv2",
-};
static const char *const max9867_filter_text[] = {"IIR", "FIR"};
static SOC_ENUM_SINGLE_DECL(max9867_filter, MAX9867_CODECFLTR, 7,
max9867_filter_text);
static SOC_ENUM_SINGLE_DECL(max9867_spkmode, MAX9867_MODECONFIG, 0,
max9867_spmode);
-static SOC_ENUM_SINGLE_DECL(max9867_sidetone, MAX9867_DACGAIN, 6,
- max9867_sidetone_text);
-static DECLARE_TLV_DB_SCALE(max9860_capture_tlv, -600, 200, 0);
-static DECLARE_TLV_DB_SCALE(max9860_mic_tlv, 2000, 100, 1);
-static DECLARE_TLV_DB_SCALE(max9860_adc_left_tlv, -1200, 100, 1);
-static DECLARE_TLV_DB_SCALE(max9860_adc_right_tlv, -1200, 100, 1);
-static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max98088_micboost_tlv,
- 0, 1, TLV_DB_SCALE_ITEM(0, 2000, 0),
- 2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_master_tlv,
+ 0, 2, TLV_DB_SCALE_ITEM(-8600, 200, 1),
+ 3, 17, TLV_DB_SCALE_ITEM(-7800, 400, 0),
+ 18, 25, TLV_DB_SCALE_ITEM(-2000, 200, 0),
+ 26, 34, TLV_DB_SCALE_ITEM( -500, 100, 0),
+ 35, 40, TLV_DB_SCALE_ITEM( 350, 50, 0),
+);
+static DECLARE_TLV_DB_SCALE(max9867_mic_tlv, 0, 100, 0);
+static DECLARE_TLV_DB_SCALE(max9867_line_tlv, -600, 200, 0);
+static DECLARE_TLV_DB_SCALE(max9867_adc_tlv, -1200, 100, 0);
+static DECLARE_TLV_DB_SCALE(max9867_dac_tlv, -1500, 100, 0);
+static DECLARE_TLV_DB_SCALE(max9867_dacboost_tlv, 0, 600, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_micboost_tlv,
+ 0, 2, TLV_DB_SCALE_ITEM(-2000, 2000, 1),
+ 3, 3, TLV_DB_SCALE_ITEM(3000, 0, 0),
);
static const struct snd_kcontrol_new max9867_snd_controls[] = {
- SOC_DOUBLE_R("Master Playback Volume", MAX9867_LEFTVOL,
- MAX9867_RIGHTVOL, 0, 63, 1),
- SOC_DOUBLE_R_TLV("Capture Volume", MAX9867_LEFTMICGAIN,
- MAX9867_RIGHTMICGAIN,
- 0, 15, 1, max9860_capture_tlv),
- SOC_DOUBLE_R_TLV("Mic Volume", MAX9867_LEFTMICGAIN,
- MAX9867_RIGHTMICGAIN, 0, 31, 1, max9860_mic_tlv),
- SOC_DOUBLE_R_TLV("Mic Boost Volume", MAX9867_LEFTMICGAIN,
- MAX9867_RIGHTMICGAIN, 5, 3, 0, max98088_micboost_tlv),
- SOC_ENUM("Digital Sidetone Src", max9867_sidetone),
- SOC_SINGLE("Sidetone Volume", MAX9867_DACGAIN, 0, 31, 1),
- SOC_SINGLE("DAC Volume", MAX9867_DACLEVEL, 4, 3, 0),
- SOC_SINGLE("DAC Attenuation", MAX9867_DACLEVEL, 0, 15, 1),
- SOC_SINGLE_TLV("ADC Left Volume", MAX9867_ADCLEVEL,
- 4, 15, 1, max9860_adc_left_tlv),
- SOC_SINGLE_TLV("ADC Right Volume", MAX9867_ADCLEVEL,
- 0, 15, 1, max9860_adc_right_tlv),
+ SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL,
+ MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv),
+ SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL,
+ MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv),
+ SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN,
+ MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv),
+ SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN,
+ MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv),
+ SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1),
+ SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1,
+ max9867_dac_tlv),
+ SOC_SINGLE_TLV("Digital Boost Playback Volume", MAX9867_DACLEVEL, 4, 3, 0,
+ max9867_dacboost_tlv),
+ SOC_DOUBLE_TLV("Digital Capture Volume", MAX9867_ADCLEVEL, 0, 4, 15, 1,
+ max9867_adc_tlv),
SOC_ENUM("Speaker Mode", max9867_spkmode),
SOC_SINGLE("Volume Smoothing Switch", MAX9867_MODECONFIG, 6, 1, 0),
- SOC_SINGLE("ZCD Switch", MAX9867_MODECONFIG, 5, 1, 0),
+ SOC_SINGLE("Line ZC Switch", MAX9867_MODECONFIG, 5, 1, 0),
SOC_ENUM("DSP Filter", max9867_filter),
};
-static const char *const max9867_mux[] = {"None", "Mic", "Line", "Mic_Line"};
+/* Input mixer */
+static const struct snd_kcontrol_new max9867_input_mixer_controls[] = {
+ SOC_DAPM_DOUBLE("Line Capture Switch", MAX9867_INPUTCONFIG, 7, 5, 1, 0),
+ SOC_DAPM_DOUBLE("Mic Capture Switch", MAX9867_INPUTCONFIG, 6, 4, 1, 0),
+};
+
+/* Output mixer */
+static const struct snd_kcontrol_new max9867_output_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("Line Bypass Switch",
+ MAX9867_LEFTLINELVL, MAX9867_RIGHTLINELVL, 6, 1, 1),
+};
-static SOC_ENUM_SINGLE_DECL(max9867_mux_enum,
- MAX9867_INPUTCONFIG, MAX9867_INPUT_SHIFT,
- max9867_mux);
+/* Sidetone mixer */
+static const struct snd_kcontrol_new max9867_sidetone_mixer_controls[] = {
+ SOC_DAPM_DOUBLE("Sidetone Switch", MAX9867_SIDETONE, 6, 7, 1, 0),
+};
-static const struct snd_kcontrol_new max9867_dapm_mux_controls =
- SOC_DAPM_ENUM("Route", max9867_mux_enum);
+/* Line out switch */
+static const struct snd_kcontrol_new max9867_line_out_control =
+ SOC_DAPM_DOUBLE_R("Switch",
+ MAX9867_LEFTVOL, MAX9867_RIGHTVOL, 6, 1, 1);
-static const struct snd_kcontrol_new max9867_left_dapm_control =
- SOC_DAPM_SINGLE("Switch", MAX9867_PWRMAN, 6, 1, 0);
-static const struct snd_kcontrol_new max9867_right_dapm_control =
- SOC_DAPM_SINGLE("Switch", MAX9867_PWRMAN, 5, 1, 0);
-static const struct snd_kcontrol_new max9867_line_dapm_control =
- SOC_DAPM_SINGLE("Switch", MAX9867_LEFTLINELVL, 6, 1, 1);
static const struct snd_soc_dapm_widget max9867_dapm_widgets[] = {
- SND_SOC_DAPM_AIF_IN("DAI_OUT", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_DAC("Left DAC", NULL, MAX9867_PWRMAN, 3, 0),
- SND_SOC_DAPM_DAC("Right DAC", NULL, MAX9867_PWRMAN, 2, 0),
- SND_SOC_DAPM_MIXER("Output Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_OUTPUT("HPOUT"),
-
- SND_SOC_DAPM_AIF_IN("DAI_IN", "HiFi Capture", 0, SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_ADC("Left ADC", "HiFi Capture", MAX9867_PWRMAN, 1, 0),
- SND_SOC_DAPM_ADC("Right ADC", "HiFi Capture", MAX9867_PWRMAN, 0, 0),
- SND_SOC_DAPM_MUX("Input Mux", SND_SOC_NOPM, 0, 0,
- &max9867_dapm_mux_controls),
-
- SND_SOC_DAPM_MIXER("Input Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
- SND_SOC_DAPM_SWITCH("Left Line", MAX9867_LEFTLINELVL, 6, 1,
- &max9867_left_dapm_control),
- SND_SOC_DAPM_SWITCH("Right Line", MAX9867_RIGTHLINELVL, 6, 1,
- &max9867_right_dapm_control),
- SND_SOC_DAPM_SWITCH("Line Mixer", SND_SOC_NOPM, 0, 0,
- &max9867_line_dapm_control),
- SND_SOC_DAPM_INPUT("LINE_IN"),
+ SND_SOC_DAPM_INPUT("MICL"),
+ SND_SOC_DAPM_INPUT("MICR"),
+ SND_SOC_DAPM_INPUT("LINL"),
+ SND_SOC_DAPM_INPUT("LINR"),
+
+ SND_SOC_DAPM_PGA("Left Line Input", MAX9867_PWRMAN, 6, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Line Input", MAX9867_PWRMAN, 5, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER_NAMED_CTL("Input Mixer", SND_SOC_NOPM, 0, 0,
+ max9867_input_mixer_controls,
+ ARRAY_SIZE(max9867_input_mixer_controls)),
+ SND_SOC_DAPM_ADC("ADCL", "HiFi Capture", MAX9867_PWRMAN, 1, 0),
+ SND_SOC_DAPM_ADC("ADCR", "HiFi Capture", MAX9867_PWRMAN, 0, 0),
+
+ SND_SOC_DAPM_MIXER("Digital", SND_SOC_NOPM, 0, 0,
+ max9867_sidetone_mixer_controls,
+ ARRAY_SIZE(max9867_sidetone_mixer_controls)),
+ SND_SOC_DAPM_MIXER_NAMED_CTL("Output Mixer", SND_SOC_NOPM, 0, 0,
+ max9867_output_mixer_controls,
+ ARRAY_SIZE(max9867_output_mixer_controls)),
+ SND_SOC_DAPM_DAC("DACL", "HiFi Playback", MAX9867_PWRMAN, 3, 0),
+ SND_SOC_DAPM_DAC("DACR", "HiFi Playback", MAX9867_PWRMAN, 2, 0),
+ SND_SOC_DAPM_SWITCH("Master Playback", SND_SOC_NOPM, 0, 0,
+ &max9867_line_out_control),
+ SND_SOC_DAPM_OUTPUT("LOUT"),
+ SND_SOC_DAPM_OUTPUT("ROUT"),
};
static const struct snd_soc_dapm_route max9867_audio_map[] = {
- {"Left DAC", NULL, "DAI_OUT"},
- {"Right DAC", NULL, "DAI_OUT"},
- {"Output Mixer", NULL, "Left DAC"},
- {"Output Mixer", NULL, "Right DAC"},
- {"HPOUT", NULL, "Output Mixer"},
-
- {"Left ADC", NULL, "DAI_IN"},
- {"Right ADC", NULL, "DAI_IN"},
- {"Input Mixer", NULL, "Left ADC"},
- {"Input Mixer", NULL, "Right ADC"},
- {"Input Mux", "Line", "Input Mixer"},
- {"Input Mux", "Mic", "Input Mixer"},
- {"Input Mux", "Mic_Line", "Input Mixer"},
- {"Right Line", "Switch", "Input Mux"},
- {"Left Line", "Switch", "Input Mux"},
- {"LINE_IN", NULL, "Left Line"},
- {"LINE_IN", NULL, "Right Line"},
+ {"Left Line Input", NULL, "LINL"},
+ {"Right Line Input", NULL, "LINR"},
+ {"Input Mixer", "Mic Capture Switch", "MICL"},
+ {"Input Mixer", "Mic Capture Switch", "MICR"},
+ {"Input Mixer", "Line Capture Switch", "Left Line Input"},
+ {"Input Mixer", "Line Capture Switch", "Right Line Input"},
+ {"ADCL", NULL, "Input Mixer"},
+ {"ADCR", NULL, "Input Mixer"},
+
+ {"Digital", "Sidetone Switch", "ADCL"},
+ {"Digital", "Sidetone Switch", "ADCR"},
+ {"DACL", NULL, "Digital"},
+ {"DACR", NULL, "Digital"},
+
+ {"Output Mixer", "Line Bypass Switch", "Left Line Input"},
+ {"Output Mixer", "Line Bypass Switch", "Right Line Input"},
+ {"Output Mixer", NULL, "DACL"},
+ {"Output Mixer", NULL, "DACR"},
+ {"Master Playback", "Switch", "Output Mixer"},
+ {"LOUT", NULL, "Master Playback"},
+ {"ROUT", NULL, "Master Playback"},
+};
+
+static const unsigned int max9867_rates_44k1[] = {
+ 11025, 22050, 44100,
+};
+
+static const struct snd_pcm_hw_constraint_list max9867_constraints_44k1 = {
+ .list = max9867_rates_44k1,
+ .count = ARRAY_SIZE(max9867_rates_44k1),
};
-enum rates {
- pcm_rate_8, pcm_rate_16, pcm_rate_24,
- pcm_rate_32, pcm_rate_44,
- pcm_rate_48, max_pcm_rate,
+static const unsigned int max9867_rates_48k[] = {
+ 8000, 16000, 32000, 48000,
};
-static const struct ni_div_rates {
- u32 mclk;
- u16 ni[max_pcm_rate];
-} ni_div[] = {
- {11289600, {0x116A, 0x22D4, 0x343F, 0x45A9, 0x6000, 0x687D} },
- {12000000, {0x1062, 0x20C5, 0x3127, 0x4189, 0x5A51, 0x624E} },
- {12288000, {0x1000, 0x2000, 0x3000, 0x4000, 0x5833, 0x6000} },
- {13000000, {0x0F20, 0x1E3F, 0x2D5F, 0x3C7F, 0x535F, 0x5ABE} },
- {19200000, {0x0A3D, 0x147B, 0x1EB8, 0x28F6, 0x3873, 0x3D71} },
- {24000000, {0x1062, 0x20C5, 0x1893, 0x4189, 0x5A51, 0x624E} },
- {26000000, {0x0F20, 0x1E3F, 0x16AF, 0x3C7F, 0x535F, 0x5ABE} },
- {27000000, {0x0E90, 0x1D21, 0x15D8, 0x3A41, 0x5048, 0x5762} },
+static const struct snd_pcm_hw_constraint_list max9867_constraints_48k = {
+ .list = max9867_rates_48k,
+ .count = ARRAY_SIZE(max9867_rates_48k),
};
-static inline int get_ni_value(int mclk, int rate)
+struct max9867_priv {
+ struct regmap *regmap;
+ const struct snd_pcm_hw_constraint_list *constraints;
+ unsigned int sysclk, pclk;
+ bool master, dsp_a;
+};
+
+static int max9867_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
{
- int i, ret = 0;
+ struct max9867_priv *max9867 =
+ snd_soc_component_get_drvdata(dai->component);
- /* find the closest rate index*/
- for (i = 0; i < ARRAY_SIZE(ni_div); i++) {
- if (ni_div[i].mclk >= mclk)
- break;
- }
- if (i == ARRAY_SIZE(ni_div))
- return -EINVAL;
+ if (max9867->constraints)
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, max9867->constraints);
- switch (rate) {
- case 8000:
- return ni_div[i].ni[pcm_rate_8];
- case 16000:
- return ni_div[i].ni[pcm_rate_16];
- case 32000:
- return ni_div[i].ni[pcm_rate_32];
- case 44100:
- return ni_div[i].ni[pcm_rate_44];
- case 48000:
- return ni_div[i].ni[pcm_rate_48];
- default:
- pr_err("%s wrong rate %d\n", __func__, rate);
- ret = -EINVAL;
- }
- return ret;
+ return 0;
}
static int max9867_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
+ int value;
+ unsigned long int rate, ratio;
struct snd_soc_component *component = dai->component;
struct max9867_priv *max9867 = snd_soc_component_get_drvdata(component);
- unsigned int ni_h, ni_l;
- int value;
+ unsigned int ni = DIV_ROUND_CLOSEST_ULL(96ULL * 0x10000 * params_rate(params),
+ max9867->pclk);
- value = get_ni_value(max9867->sysclk, params_rate(params));
- if (value < 0)
- return value;
-
- ni_h = (0xFF00 & value) >> 8;
- ni_l = 0x00FF & value;
/* set up the ni value */
regmap_update_bits(max9867->regmap, MAX9867_AUDIOCLKHIGH,
- MAX9867_NI_HIGH_MASK, ni_h);
+ MAX9867_NI_HIGH_MASK, (0xFF00 & ni) >> 8);
regmap_update_bits(max9867->regmap, MAX9867_AUDIOCLKLOW,
- MAX9867_NI_LOW_MASK, ni_l);
- if (!max9867->master) {
- /*
- * digital pll locks on to any externally supplied LRCLK signal
- * and also enable rapid lock mode.
- */
- regmap_update_bits(max9867->regmap, MAX9867_AUDIOCLKLOW,
- MAX9867_RAPID_LOCK, MAX9867_RAPID_LOCK);
- regmap_update_bits(max9867->regmap, MAX9867_AUDIOCLKHIGH,
- MAX9867_PLL, MAX9867_PLL);
- } else {
- unsigned long int bclk_rate, pclk_bclk_ratio;
- int bclk_value;
-
- bclk_rate = params_rate(params) * 2 * params_width(params);
- pclk_bclk_ratio = max9867->pclk/bclk_rate;
- switch (params_width(params)) {
- case 8:
- case 16:
- switch (pclk_bclk_ratio) {
- case 2:
- bclk_value = MAX9867_IFC1B_PCLK_2;
- break;
- case 4:
- bclk_value = MAX9867_IFC1B_PCLK_4;
- break;
+ MAX9867_NI_LOW_MASK, 0x00FF & ni);
+ if (max9867->master) {
+ if (max9867->dsp_a) {
+ value = MAX9867_IFC1B_48X;
+ } else {
+ rate = params_rate(params) * 2 * params_width(params);
+ ratio = max9867->pclk / rate;
+ switch (params_width(params)) {
case 8:
- bclk_value = MAX9867_IFC1B_PCLK_8;
- break;
case 16:
- bclk_value = MAX9867_IFC1B_PCLK_16;
+ switch (ratio) {
+ case 2:
+ value = MAX9867_IFC1B_PCLK_2;
+ break;
+ case 4:
+ value = MAX9867_IFC1B_PCLK_4;
+ break;
+ case 8:
+ value = MAX9867_IFC1B_PCLK_8;
+ break;
+ case 16:
+ value = MAX9867_IFC1B_PCLK_16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 24:
+ value = MAX9867_IFC1B_48X;
+ break;
+ case 32:
+ value = MAX9867_IFC1B_64X;
break;
default:
- dev_err(component->dev,
- "unsupported sampling rate\n");
return -EINVAL;
}
- break;
- case 24:
- bclk_value = MAX9867_IFC1B_24BIT;
- break;
- case 32:
- bclk_value = MAX9867_IFC1B_32BIT;
- break;
- default:
- dev_err(component->dev, "unsupported sampling rate\n");
- return -EINVAL;
}
regmap_update_bits(max9867->regmap, MAX9867_IFC1B,
- MAX9867_IFC1B_BCLK_MASK, bclk_value);
+ MAX9867_IFC1B_BCLK_MASK, value);
+ } else {
+ /*
+ * digital pll locks on to any externally supplied LRCLK signal
+ * and also enable rapid lock mode.
+ */
+ regmap_update_bits(max9867->regmap, MAX9867_AUDIOCLKLOW,
+ MAX9867_RAPID_LOCK, MAX9867_RAPID_LOCK);
+ regmap_update_bits(max9867->regmap, MAX9867_AUDIOCLKHIGH,
+ MAX9867_PLL, MAX9867_PLL);
}
return 0;
}
-static int max9867_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_component *component = dai->component;
- struct max9867_priv *max9867 = snd_soc_component_get_drvdata(component);
-
- regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
- MAX9867_SHTDOWN_MASK, MAX9867_SHTDOWN_MASK);
- return 0;
-}
-
static int max9867_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_component *component = dai->component;
struct max9867_priv *max9867 = snd_soc_component_get_drvdata(component);
- if (mute)
- regmap_update_bits(max9867->regmap, MAX9867_DACLEVEL,
- MAX9867_DAC_MUTE_MASK, MAX9867_DAC_MUTE_MASK);
- else
- regmap_update_bits(max9867->regmap, MAX9867_DACLEVEL,
- MAX9867_DAC_MUTE_MASK, 0);
- return 0;
+ return regmap_update_bits(max9867->regmap, MAX9867_DACLEVEL,
+ 1 << 6, !!mute << 6);
}
static int max9867_set_dai_sysclk(struct snd_soc_dai *codec_dai,
@@ -283,21 +264,29 @@ static int max9867_set_dai_sysclk(struct snd_soc_dai *codec_dai,
/* Set the prescaler based on the master clock frequency*/
if (freq >= 10000000 && freq <= 20000000) {
value |= MAX9867_PSCLK_10_20;
- max9867->pclk = freq;
+ max9867->pclk = freq;
} else if (freq >= 20000000 && freq <= 40000000) {
value |= MAX9867_PSCLK_20_40;
- max9867->pclk = freq/2;
+ max9867->pclk = freq / 2;
} else if (freq >= 40000000 && freq <= 60000000) {
value |= MAX9867_PSCLK_40_60;
- max9867->pclk = freq/4;
+ max9867->pclk = freq / 4;
} else {
dev_err(component->dev,
"Invalid clock frequency %uHz (required 10-60MHz)\n",
freq);
return -EINVAL;
}
- value = value << MAX9867_PSCLK_SHIFT;
+ if (freq % 48000 == 0)
+ max9867->constraints = &max9867_constraints_48k;
+ else if (freq % 44100 == 0)
+ max9867->constraints = &max9867_constraints_44k1;
+ else
+ dev_warn(component->dev,
+ "Unable to set exact rate with %uHz clock frequency\n",
+ freq);
max9867->sysclk = freq;
+ value = value << MAX9867_PSCLK_SHIFT;
/* exact integer mode is not supported */
value &= ~MAX9867_FREQ_MASK;
regmap_update_bits(max9867->regmap, MAX9867_SYSCLK,
@@ -310,16 +299,17 @@ static int max9867_dai_set_fmt(struct snd_soc_dai *codec_dai,
{
struct snd_soc_component *component = codec_dai->component;
struct max9867_priv *max9867 = snd_soc_component_get_drvdata(component);
- u8 iface1A = 0, iface1B = 0;
+ u8 iface1A, iface1B;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
- max9867->master = 1;
- iface1A |= MAX9867_MASTER;
+ max9867->master = true;
+ iface1A = MAX9867_MASTER;
+ iface1B = MAX9867_IFC1B_48X;
break;
case SND_SOC_DAIFMT_CBS_CFS:
- max9867->master = 0;
- iface1A &= ~MAX9867_MASTER;
+ max9867->master = false;
+ iface1A = iface1B = 0;
break;
default:
return -EINVAL;
@@ -327,9 +317,11 @@ static int max9867_dai_set_fmt(struct snd_soc_dai *codec_dai,
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
+ max9867->dsp_a = false;
iface1A |= MAX9867_I2S_DLY;
break;
case SND_SOC_DAIFMT_DSP_A:
+ max9867->dsp_a = true;
iface1A |= MAX9867_TDM_MODE | MAX9867_SDOUT_HIZ;
break;
default:
@@ -355,21 +347,18 @@ static int max9867_dai_set_fmt(struct snd_soc_dai *codec_dai,
regmap_write(max9867->regmap, MAX9867_IFC1A, iface1A);
regmap_write(max9867->regmap, MAX9867_IFC1B, iface1B);
+
return 0;
}
static const struct snd_soc_dai_ops max9867_dai_ops = {
- .set_fmt = max9867_dai_set_fmt,
.set_sysclk = max9867_set_dai_sysclk,
- .prepare = max9867_prepare,
+ .set_fmt = max9867_dai_set_fmt,
.digital_mute = max9867_mute,
- .hw_params = max9867_dai_hw_params,
+ .startup = max9867_startup,
+ .hw_params = max9867_dai_hw_params,
};
-#define MAX9867_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
- SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
-#define MAX9867_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
-
static struct snd_soc_dai_driver max9867_dai[] = {
{
.name = "max9867-aif1",
@@ -377,42 +366,74 @@ static struct snd_soc_dai_driver max9867_dai[] = {
.stream_name = "HiFi Playback",
.channels_min = 2,
.channels_max = 2,
- .rates = MAX9867_RATES,
- .formats = MAX9867_FORMATS,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "HiFi Capture",
.channels_min = 2,
.channels_max = 2,
- .rates = MAX9867_RATES,
- .formats = MAX9867_FORMATS,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &max9867_dai_ops,
.symmetric_rates = 1,
}
};
-#ifdef CONFIG_PM_SLEEP
-static int max9867_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int max9867_suspend(struct snd_soc_component *component)
{
- struct max9867_priv *max9867 = dev_get_drvdata(dev);
+ snd_soc_component_force_bias_level(component, SND_SOC_BIAS_OFF);
- /* Drop down to power saving mode when system is suspended */
- regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
- MAX9867_SHTDOWN_MASK, ~MAX9867_SHTDOWN_MASK);
return 0;
}
-static int max9867_resume(struct device *dev)
+static int max9867_resume(struct snd_soc_component *component)
{
- struct max9867_priv *max9867 = dev_get_drvdata(dev);
+ snd_soc_component_force_bias_level(component, SND_SOC_BIAS_STANDBY);
- regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
- MAX9867_SHTDOWN_MASK, MAX9867_SHTDOWN_MASK);
return 0;
}
+#else
+#define max9867_suspend NULL
+#define max9867_resume NULL
#endif
+static int max9867_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ int err;
+ struct max9867_priv *max9867 = snd_soc_component_get_drvdata(component);
+
+ switch (level) {
+ case SND_SOC_BIAS_STANDBY:
+ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
+ err = regcache_sync(max9867->regmap);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
+ MAX9867_SHTDOWN, MAX9867_SHTDOWN);
+ if (err)
+ return err;
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ err = regmap_update_bits(max9867->regmap, MAX9867_PWRMAN,
+ MAX9867_SHTDOWN, 0);
+ if (err)
+ return err;
+
+ regcache_mark_dirty(max9867->regmap);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_component_driver max9867_component = {
.controls = max9867_snd_controls,
.num_controls = ARRAY_SIZE(max9867_snd_controls),
@@ -420,6 +441,9 @@ static const struct snd_soc_component_driver max9867_component = {
.num_dapm_routes = ARRAY_SIZE(max9867_audio_map),
.dapm_widgets = max9867_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(max9867_dapm_widgets),
+ .suspend = max9867_suspend,
+ .resume = max9867_resume,
+ .set_bias_level = max9867_set_bias_level,
.idle_bias_on = 1,
.use_pmdown_time = 1,
.endianness = 1,
@@ -450,8 +474,8 @@ static const struct reg_default max9867_reg[] = {
{ 0x0B, 0x00 },
{ 0x0C, 0x00 },
{ 0x0D, 0x00 },
- { 0x0E, 0x00 },
- { 0x0F, 0x00 },
+ { 0x0E, 0x40 },
+ { 0x0F, 0x40 },
{ 0x10, 0x00 },
{ 0x11, 0x00 },
{ 0x12, 0x00 },
@@ -476,10 +500,9 @@ static int max9867_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max9867_priv *max9867;
- int ret = 0, reg;
+ int ret, reg;
- max9867 = devm_kzalloc(&i2c->dev,
- sizeof(*max9867), GFP_KERNEL);
+ max9867 = devm_kzalloc(&i2c->dev, sizeof(*max9867), GFP_KERNEL);
if (!max9867)
return -ENOMEM;
@@ -490,8 +513,7 @@ static int max9867_i2c_probe(struct i2c_client *i2c,
dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
return ret;
}
- ret = regmap_read(max9867->regmap,
- MAX9867_REVISION, &reg);
+ ret = regmap_read(max9867->regmap, MAX9867_REVISION, &reg);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read: %d\n", ret);
return ret;
@@ -499,10 +521,8 @@ static int max9867_i2c_probe(struct i2c_client *i2c,
dev_info(&i2c->dev, "device revision: %x\n", reg);
ret = devm_snd_soc_register_component(&i2c->dev, &max9867_component,
max9867_dai, ARRAY_SIZE(max9867_dai));
- if (ret < 0) {
+ if (ret < 0)
dev_err(&i2c->dev, "Failed to register component: %d\n", ret);
- return ret;
- }
return ret;
}
@@ -518,15 +538,10 @@ static const struct of_device_id max9867_of_match[] = {
};
MODULE_DEVICE_TABLE(of, max9867_of_match);
-static const struct dev_pm_ops max9867_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(max9867_suspend, max9867_resume)
-};
-
static struct i2c_driver max9867_i2c_driver = {
.driver = {
.name = "max9867",
.of_match_table = of_match_ptr(max9867_of_match),
- .pm = &max9867_pm_ops,
},
.probe = max9867_i2c_probe,
.id_table = max9867_i2c_id,
@@ -534,6 +549,6 @@ static struct i2c_driver max9867_i2c_driver = {
module_i2c_driver(max9867_i2c_driver);
-MODULE_AUTHOR("anish kumar <yesanishhere@gmail.com>");
-MODULE_DESCRIPTION("ALSA SoC MAX9867 driver");
+MODULE_AUTHOR("Ladislav Michl <ladis@linux-mips.org>");
+MODULE_DESCRIPTION("ASoC MAX9867 driver");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max9867.h b/sound/soc/codecs/max9867.h
index 55cd9976ff47..2277798291a1 100644
--- a/sound/soc/codecs/max9867.h
+++ b/sound/soc/codecs/max9867.h
@@ -26,13 +26,11 @@
#define MAX9867_PSCLK_10_20 0x1
#define MAX9867_PSCLK_20_40 0x2
#define MAX9867_PSCLK_40_60 0x3
-#define MAX9867_AUDIOCLKHIGH 0x06
-#define MAX9867_NI_HIGH_WIDTH 0x7
-#define MAX9867_NI_HIGH_MASK 0x7F
-#define MAX9867_NI_LOW_MASK 0x7F
-#define MAX9867_NI_LOW_SHIFT 0x1
-#define MAX9867_PLL (1<<7)
-#define MAX9867_AUDIOCLKLOW 0x07
+#define MAX9867_AUDIOCLKHIGH 0x06
+#define MAX9867_NI_HIGH_MASK 0x7F
+#define MAX9867_NI_LOW_MASK 0xFE
+#define MAX9867_PLL (1<<7)
+#define MAX9867_AUDIOCLKLOW 0x07
#define MAX9867_RAPID_LOCK 0x01
#define MAX9867_IFC1A 0x08
#define MAX9867_MASTER (1<<7)
@@ -43,40 +41,29 @@
#define MAX9867_BCI_MODE (1<<5)
#define MAX9867_IFC1B 0x09
#define MAX9867_IFC1B_BCLK_MASK 7
-#define MAX9867_IFC1B_32BIT 0x01
-#define MAX9867_IFC1B_24BIT 0x02
-#define MAX9867_IFC1B_PCLK_2 4
-#define MAX9867_IFC1B_PCLK_4 5
-#define MAX9867_IFC1B_PCLK_8 6
-#define MAX9867_IFC1B_PCLK_16 7
+#define MAX9867_IFC1B_64X 0x01
+#define MAX9867_IFC1B_48X 0x02
+#define MAX9867_IFC1B_PCLK_2 0x04
+#define MAX9867_IFC1B_PCLK_4 0x05
+#define MAX9867_IFC1B_PCLK_8 0x06
+#define MAX9867_IFC1B_PCLK_16 0x07
#define MAX9867_CODECFLTR 0x0a
-#define MAX9867_DACGAIN 0x0b
+#define MAX9867_SIDETONE 0x0b
#define MAX9867_DACLEVEL 0x0c
-#define MAX9867_DAC_MUTE_SHIFT 0x6
-#define MAX9867_DAC_MUTE_WIDTH 0x1
-#define MAX9867_DAC_MUTE_MASK (0x1<<MAX9867_DAC_MUTE_SHIFT)
#define MAX9867_ADCLEVEL 0x0d
#define MAX9867_LEFTLINELVL 0x0e
-#define MAX9867_RIGTHLINELVL 0x0f
+#define MAX9867_RIGHTLINELVL 0x0f
#define MAX9867_LEFTVOL 0x10
#define MAX9867_RIGHTVOL 0x11
#define MAX9867_LEFTMICGAIN 0x12
#define MAX9867_RIGHTMICGAIN 0x13
#define MAX9867_INPUTCONFIG 0x14
-#define MAX9867_INPUT_SHIFT 0x6
#define MAX9867_MICCONFIG 0x15
#define MAX9867_MODECONFIG 0x16
#define MAX9867_PWRMAN 0x17
-#define MAX9867_SHTDOWN_MASK (1<<7)
+#define MAX9867_SHTDOWN 0x80
#define MAX9867_REVISION 0xff
#define MAX9867_CACHEREGNUM 10
-/* codec private data */
-struct max9867_priv {
- struct regmap *regmap;
- unsigned int sysclk;
- unsigned int pclk;
- unsigned int master;
-};
#endif
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index e3c8cd17daf2..4dd1a609756b 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -585,7 +585,7 @@ static int nau8540_calc_fll_param(unsigned int fll_in,
fvco_max = 0;
fvco_sel = ARRAY_SIZE(mclk_src_scaling);
for (i = 0; i < ARRAY_SIZE(mclk_src_scaling); i++) {
- fvco = 256 * fs * 2 * mclk_src_scaling[i].param;
+ fvco = 256ULL * fs * 2 * mclk_src_scaling[i].param;
if (fvco > NAU_FVCO_MIN && fvco < NAU_FVCO_MAX &&
fvco_max < fvco) {
fvco_max = fvco;
diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
index 622ce947f134..c6152a044416 100644
--- a/sound/soc/codecs/nau8822.c
+++ b/sound/soc/codecs/nau8822.c
@@ -1,18 +1,14 @@
-/*
- * nau8822.c -- NAU8822 ALSA Soc Audio Codec driver
- *
- * Copyright 2017 Nuvoton Technology Corp.
- *
- * Author: David Lin <ctlin0@nuvoton.com>
- * Co-author: John Hsu <kchsu0@nuvoton.com>
- * Co-author: Seven Li <wtli@nuvoton.com>
- *
- * Based on WM8974.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// nau8822.c -- NAU8822 ALSA Soc Audio driver
+//
+// Copyright 2017 Nuvoton Technology Crop.
+//
+// Author: David Lin <ctlin0@nuvoton.com>
+// Co-author: John Hsu <kchsu0@nuvoton.com>
+// Co-author: Seven Li <wtli@nuvoton.com>
+//
+// Based on WM8974.c
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h
index aa79c969cd44..9c552983a293 100644
--- a/sound/soc/codecs/nau8822.h
+++ b/sound/soc/codecs/nau8822.h
@@ -1,13 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * nau8822.h -- NAU8822 Soc Audio Codec driver
+ * nau8822.h -- NAU8822 ALSA SoC Audio driver
+ *
+ * Copyright 2017 Nuvoton Technology Crop.
*
* Author: David Lin <ctlin0@nuvoton.com>
* Co-author: John Hsu <kchsu0@nuvoton.com>
* Co-author: Seven Li <wtli@nuvoton.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __NAU8822_H__
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index b9fed99d8b5e..7bbcbf5f05c8 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -424,10 +424,8 @@ static u32 nau8825_xtalk_sidetone(u32 sig_org, u32 sig_cros)
{
u32 gain, sidetone;
- if (unlikely(sig_org == 0) || unlikely(sig_cros == 0)) {
- WARN_ON(1);
+ if (WARN_ON(sig_org == 0 || sig_cros == 0))
return 0;
- }
sig_org = nau8825_intlog10_dec3(sig_org);
sig_cros = nau8825_intlog10_dec3(sig_cros);
diff --git a/sound/soc/codecs/pcm186x.h b/sound/soc/codecs/pcm186x.h
index 2c6ba55bf394..bb3f0c42a1cd 100644
--- a/sound/soc/codecs/pcm186x.h
+++ b/sound/soc/codecs/pcm186x.h
@@ -139,7 +139,7 @@ enum pcm186x_type {
#define PCM186X_MAX_REGISTER PCM186X_CURR_TRIM_CTRL
/* PCM186X_PAGE */
-#define PCM186X_RESET 0xff
+#define PCM186X_RESET 0xfe
/* PCM186X_ADCX_INPUT_SEL_X */
#define PCM186X_ADC_INPUT_SEL_POL BIT(7)
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
index 494d9d662be8..6714aa8d9026 100644
--- a/sound/soc/codecs/pcm3060.c
+++ b/sound/soc/codecs/pcm3060.c
@@ -198,23 +198,25 @@ static const struct snd_kcontrol_new pcm3060_dapm_controls[] = {
};
static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = {
- SND_SOC_DAPM_OUTPUT("OUTL+"),
- SND_SOC_DAPM_OUTPUT("OUTR+"),
- SND_SOC_DAPM_OUTPUT("OUTL-"),
- SND_SOC_DAPM_OUTPUT("OUTR-"),
+ SND_SOC_DAPM_DAC("DAC", "Playback", PCM3060_REG64,
+ PCM3060_REG_SHIFT_DAPSV, 1),
+
+ SND_SOC_DAPM_OUTPUT("OUTL"),
+ SND_SOC_DAPM_OUTPUT("OUTR"),
SND_SOC_DAPM_INPUT("INL"),
SND_SOC_DAPM_INPUT("INR"),
+
+ SND_SOC_DAPM_ADC("ADC", "Capture", PCM3060_REG64,
+ PCM3060_REG_SHIFT_ADPSV, 1),
};
static const struct snd_soc_dapm_route pcm3060_dapm_map[] = {
- { "OUTL+", NULL, "Playback" },
- { "OUTR+", NULL, "Playback" },
- { "OUTL-", NULL, "Playback" },
- { "OUTR-", NULL, "Playback" },
+ { "OUTL", NULL, "DAC" },
+ { "OUTR", NULL, "DAC" },
- { "Capture", NULL, "INL" },
- { "Capture", NULL, "INR" },
+ { "ADC", NULL, "INL" },
+ { "ADC", NULL, "INR" },
};
/* soc component */
@@ -274,9 +276,23 @@ EXPORT_SYMBOL(pcm3060_regmap);
/* device */
+static void pcm3060_parse_dt(const struct device_node *np,
+ struct pcm3060_priv *priv)
+{
+ priv->out_se = of_property_read_bool(np, "ti,out-single-ended");
+}
+
int pcm3060_probe(struct device *dev)
{
int rc;
+ struct pcm3060_priv *priv = dev_get_drvdata(dev);
+
+ if (dev->of_node)
+ pcm3060_parse_dt(dev->of_node, priv);
+
+ if (priv->out_se)
+ regmap_update_bits(priv->regmap, PCM3060_REG64,
+ PCM3060_REG_SE, PCM3060_REG_SE);
rc = devm_snd_soc_register_component(dev, &pcm3060_soc_comp_driver,
pcm3060_dai,
diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h
index fd89a68aa8a7..6a027b4a845d 100644
--- a/sound/soc/codecs/pcm3060.h
+++ b/sound/soc/codecs/pcm3060.h
@@ -25,6 +25,7 @@ struct pcm3060_priv_dai {
struct pcm3060_priv {
struct regmap *regmap;
struct pcm3060_priv_dai dai[PCM3060_DAI_IDS_NUM];
+ u8 out_se: 1;
};
int pcm3060_probe(struct device *dev);
@@ -36,7 +37,9 @@ int pcm3060_remove(struct device *dev);
#define PCM3060_REG_MRST 0x80
#define PCM3060_REG_SRST 0x40
#define PCM3060_REG_ADPSV 0x20
+#define PCM3060_REG_SHIFT_ADPSV 0x05
#define PCM3060_REG_DAPSV 0x10
+#define PCM3060_REG_SHIFT_DAPSV 0x04
#define PCM3060_REG_SE 0x01
#define PCM3060_REG65 0x41
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 52cc950c9fd1..08d3fe192e65 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -133,10 +133,6 @@ static const struct snd_kcontrol_new pcm3168a_snd_controls[] = {
SOC_DOUBLE("DAC2 Invert Switch", PCM3168A_DAC_INV, 2, 3, 1, 0),
SOC_DOUBLE("DAC3 Invert Switch", PCM3168A_DAC_INV, 4, 5, 1, 0),
SOC_DOUBLE("DAC4 Invert Switch", PCM3168A_DAC_INV, 6, 7, 1, 0),
- SOC_DOUBLE_STS("DAC1 Zero Flag", PCM3168A_DAC_ZERO, 0, 1, 1, 0),
- SOC_DOUBLE_STS("DAC2 Zero Flag", PCM3168A_DAC_ZERO, 2, 3, 1, 0),
- SOC_DOUBLE_STS("DAC3 Zero Flag", PCM3168A_DAC_ZERO, 4, 5, 1, 0),
- SOC_DOUBLE_STS("DAC4 Zero Flag", PCM3168A_DAC_ZERO, 6, 7, 1, 0),
SOC_ENUM("DAC Volume Control Type", pcm3168a_dac_volume_type),
SOC_ENUM("DAC Volume Rate Multiplier", pcm3168a_dac_att_mult),
SOC_ENUM("DAC De-Emphasis", pcm3168a_dac_demp),
@@ -176,9 +172,6 @@ static const struct snd_kcontrol_new pcm3168a_snd_controls[] = {
SOC_DOUBLE("ADC1 Mute Switch", PCM3168A_ADC_MUTE, 0, 1, 1, 0),
SOC_DOUBLE("ADC2 Mute Switch", PCM3168A_ADC_MUTE, 2, 3, 1, 0),
SOC_DOUBLE("ADC3 Mute Switch", PCM3168A_ADC_MUTE, 4, 5, 1, 0),
- SOC_DOUBLE_STS("ADC1 Overflow Flag", PCM3168A_ADC_OV, 0, 1, 1, 0),
- SOC_DOUBLE_STS("ADC2 Overflow Flag", PCM3168A_ADC_OV, 2, 3, 1, 0),
- SOC_DOUBLE_STS("ADC3 Overflow Flag", PCM3168A_ADC_OV, 4, 5, 1, 0),
SOC_ENUM("ADC Volume Control Type", pcm3168a_adc_volume_type),
SOC_ENUM("ADC Volume Rate Multiplier", pcm3168a_adc_att_mult),
SOC_ENUM("ADC Overflow Flag Polarity", pcm3168a_adc_ov_pol),
@@ -504,6 +497,10 @@ static int pcm3168a_startup(struct snd_pcm_substream *substream,
unsigned int fmt;
unsigned int sample_min;
unsigned int channel_max;
+ unsigned int channel_maxs[] = {
+ 6, /* rx */
+ 8 /* tx */
+ };
if (tx)
fmt = pcm3168a->dac_fmt;
@@ -528,18 +525,9 @@ static int pcm3168a_startup(struct snd_pcm_substream *substream,
channel_max = 2;
break;
case PCM3168A_FMT_LEFT_J:
- sample_min = 24;
- if (tx)
- channel_max = 8;
- else
- channel_max = 6;
- break;
case PCM3168A_FMT_I2S:
sample_min = 24;
- if (tx)
- channel_max = 8;
- else
- channel_max = 6;
+ channel_max = channel_maxs[tx];
break;
default:
sample_min = 24;
@@ -770,15 +758,22 @@ err_clk:
}
EXPORT_SYMBOL_GPL(pcm3168a_probe);
-void pcm3168a_remove(struct device *dev)
+static void pcm3168a_disable(struct device *dev)
{
struct pcm3168a_priv *pcm3168a = dev_get_drvdata(dev);
- pm_runtime_disable(dev);
regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
- pcm3168a->supplies);
+ pcm3168a->supplies);
clk_disable_unprepare(pcm3168a->scki);
}
+
+void pcm3168a_remove(struct device *dev)
+{
+ pm_runtime_disable(dev);
+#ifndef CONFIG_PM
+ pcm3168a_disable(dev);
+#endif
+}
EXPORT_SYMBOL_GPL(pcm3168a_remove);
#ifdef CONFIG_PM
@@ -833,10 +828,7 @@ static int pcm3168a_rt_suspend(struct device *dev)
regcache_cache_only(pcm3168a->regmap, true);
- regulator_bulk_disable(ARRAY_SIZE(pcm3168a->supplies),
- pcm3168a->supplies);
-
- clk_disable_unprepare(pcm3168a->scki);
+ pcm3168a_disable(dev);
return 0;
}
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index f0f2d4fd3769..6cb1653be804 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -53,6 +53,8 @@ struct pcm512x_priv {
unsigned long overclock_pll;
unsigned long overclock_dac;
unsigned long overclock_dsp;
+ int mute;
+ struct mutex mutex;
};
/*
@@ -384,6 +386,61 @@ static const struct soc_enum pcm512x_veds =
SOC_ENUM_SINGLE(PCM512x_DIGITAL_MUTE_2, PCM512x_VEDS_SHIFT, 4,
pcm512x_ramp_step_text);
+static int pcm512x_update_mute(struct pcm512x_priv *pcm512x)
+{
+ return regmap_update_bits(
+ pcm512x->regmap, PCM512x_MUTE, PCM512x_RQML | PCM512x_RQMR,
+ (!!(pcm512x->mute & 0x5) << PCM512x_RQML_SHIFT)
+ | (!!(pcm512x->mute & 0x3) << PCM512x_RQMR_SHIFT));
+}
+
+static int pcm512x_digital_playback_switch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_component_get_drvdata(component);
+
+ mutex_lock(&pcm512x->mutex);
+ ucontrol->value.integer.value[0] = !(pcm512x->mute & 0x4);
+ ucontrol->value.integer.value[1] = !(pcm512x->mute & 0x2);
+ mutex_unlock(&pcm512x->mutex);
+
+ return 0;
+}
+
+static int pcm512x_digital_playback_switch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ struct pcm512x_priv *pcm512x = snd_soc_component_get_drvdata(component);
+ int ret, changed = 0;
+
+ mutex_lock(&pcm512x->mutex);
+
+ if ((pcm512x->mute & 0x4) == (ucontrol->value.integer.value[0] << 2)) {
+ pcm512x->mute ^= 0x4;
+ changed = 1;
+ }
+ if ((pcm512x->mute & 0x2) == (ucontrol->value.integer.value[1] << 1)) {
+ pcm512x->mute ^= 0x2;
+ changed = 1;
+ }
+
+ if (changed) {
+ ret = pcm512x_update_mute(pcm512x);
+ if (ret != 0) {
+ dev_err(component->dev,
+ "Failed to update digital mute: %d\n", ret);
+ mutex_unlock(&pcm512x->mutex);
+ return ret;
+ }
+ }
+
+ mutex_unlock(&pcm512x->mutex);
+
+ return changed;
+}
+
static const struct snd_kcontrol_new pcm512x_controls[] = {
SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
@@ -391,8 +448,15 @@ SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
-SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
- PCM512x_RQMR_SHIFT, 1, 1),
+{
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Digital Playback Switch",
+ .index = 0,
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = snd_ctl_boolean_stereo_info,
+ .get = pcm512x_digital_playback_switch_get,
+ .put = pcm512x_digital_playback_switch_put
+},
SOC_SINGLE("Deemphasis Switch", PCM512x_DSP, PCM512x_DEMP_SHIFT, 1, 1),
SOC_ENUM("DSP Program", pcm512x_dsp_program),
@@ -1319,10 +1383,61 @@ static int pcm512x_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return 0;
}
+static int pcm512x_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+ struct pcm512x_priv *pcm512x = snd_soc_component_get_drvdata(component);
+ int ret;
+ unsigned int mute_det;
+
+ mutex_lock(&pcm512x->mutex);
+
+ if (mute) {
+ pcm512x->mute |= 0x1;
+ ret = regmap_update_bits(pcm512x->regmap, PCM512x_MUTE,
+ PCM512x_RQML | PCM512x_RQMR,
+ PCM512x_RQML | PCM512x_RQMR);
+ if (ret != 0) {
+ dev_err(component->dev,
+ "Failed to set digital mute: %d\n", ret);
+ mutex_unlock(&pcm512x->mutex);
+ return ret;
+ }
+
+ regmap_read_poll_timeout(pcm512x->regmap,
+ PCM512x_ANALOG_MUTE_DET,
+ mute_det, (mute_det & 0x3) == 0,
+ 200, 10000);
+
+ mutex_unlock(&pcm512x->mutex);
+ } else {
+ pcm512x->mute &= ~0x1;
+ ret = pcm512x_update_mute(pcm512x);
+ if (ret != 0) {
+ dev_err(component->dev,
+ "Failed to update digital mute: %d\n", ret);
+ mutex_unlock(&pcm512x->mutex);
+ return ret;
+ }
+
+ regmap_read_poll_timeout(pcm512x->regmap,
+ PCM512x_ANALOG_MUTE_DET,
+ mute_det,
+ (mute_det & 0x3)
+ == ((~pcm512x->mute >> 1) & 0x3),
+ 200, 10000);
+ }
+
+ mutex_unlock(&pcm512x->mutex);
+
+ return 0;
+}
+
static const struct snd_soc_dai_ops pcm512x_dai_ops = {
.startup = pcm512x_dai_startup,
.hw_params = pcm512x_hw_params,
.set_fmt = pcm512x_set_fmt,
+ .digital_mute = pcm512x_digital_mute,
};
static struct snd_soc_dai_driver pcm512x_dai = {
@@ -1388,6 +1503,8 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
if (!pcm512x)
return -ENOMEM;
+ mutex_init(&pcm512x->mutex);
+
dev_set_drvdata(dev, pcm512x);
pcm512x->regmap = regmap;
diff --git a/sound/soc/codecs/pcm512x.h b/sound/soc/codecs/pcm512x.h
index d70d9c0c2088..9dda8693498e 100644
--- a/sound/soc/codecs/pcm512x.h
+++ b/sound/soc/codecs/pcm512x.h
@@ -112,7 +112,9 @@
#define PCM512x_RQST_SHIFT 4
/* Page 0, Register 3 - mute */
+#define PCM512x_RQMR (1 << 0)
#define PCM512x_RQMR_SHIFT 0
+#define PCM512x_RQML (1 << 4)
#define PCM512x_RQML_SHIFT 4
/* Page 0, Register 4 - PLL */
diff --git a/sound/soc/codecs/rt5660.c b/sound/soc/codecs/rt5660.c
index 27f7445b2432..e74b2e8cd423 100644
--- a/sound/soc/codecs/rt5660.c
+++ b/sound/soc/codecs/rt5660.c
@@ -1246,6 +1246,7 @@ MODULE_DEVICE_TABLE(of, rt5660_of_match);
static const struct acpi_device_id rt5660_acpi_match[] = {
{ "10EC5660", 0 },
+ { "10EC3277", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5660_acpi_match);
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 7eb2cbd39d6e..da6647015708 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/acpi.h>
+#include <linux/regulator/consumer.h>
#include <linux/workqueue.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -33,6 +34,9 @@
#define RT5663_DEVICE_ID_2 0x6451
#define RT5663_DEVICE_ID_1 0x6406
+#define RT5663_POWER_ON_DELAY_MS 300
+#define RT5663_SUPPLY_CURRENT_UA 500000
+
enum {
CODEC_VER_1,
CODEC_VER_0,
@@ -48,6 +52,11 @@ struct impedance_mapping_table {
unsigned int dc_offset_r_manual_mic;
};
+static const char *const rt5663_supply_names[] = {
+ "avdd",
+ "cpvdd",
+};
+
struct rt5663_priv {
struct snd_soc_component *component;
struct rt5663_platform_data pdata;
@@ -56,6 +65,7 @@ struct rt5663_priv {
struct snd_soc_jack *hs_jack;
struct timer_list btn_check_timer;
struct impedance_mapping_table *imp_table;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(rt5663_supply_names)];
int codec_ver;
int sysclk;
@@ -3483,7 +3493,7 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
{
struct rt5663_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct rt5663_priv *rt5663;
- int ret;
+ int ret, i;
unsigned int val;
struct regmap *regmap;
@@ -3500,12 +3510,44 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
else
rt5663_parse_dp(rt5663, &i2c->dev);
+ for (i = 0; i < ARRAY_SIZE(rt5663->supplies); i++)
+ rt5663->supplies[i].supply = rt5663_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev,
+ ARRAY_SIZE(rt5663->supplies),
+ rt5663->supplies);
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ /* Set load for regulator. */
+ for (i = 0; i < ARRAY_SIZE(rt5663->supplies); i++) {
+ ret = regulator_set_load(rt5663->supplies[i].consumer,
+ RT5663_SUPPLY_CURRENT_UA);
+ if (ret < 0) {
+ dev_err(&i2c->dev,
+ "Failed to set regulator load on %s, ret: %d\n",
+ rt5663->supplies[i].supply, ret);
+ return ret;
+ }
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(rt5663->supplies),
+ rt5663->supplies);
+
+ if (ret) {
+ dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+ msleep(RT5663_POWER_ON_DELAY_MS);
+
regmap = devm_regmap_init_i2c(i2c, &temp_regmap);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
dev_err(&i2c->dev, "Failed to allocate temp register map: %d\n",
ret);
- return ret;
+ goto err_enable;
}
ret = regmap_read(regmap, RT5663_VENDOR_ID_2, &val);
@@ -3530,14 +3572,15 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
dev_err(&i2c->dev,
"Device with ID register %#x is not rt5663\n",
val);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_enable;
}
if (IS_ERR(rt5663->regmap)) {
ret = PTR_ERR(rt5663->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
- return ret;
+ goto err_enable;
}
/* reset and calibrate */
@@ -3635,20 +3678,32 @@ static int rt5663_i2c_probe(struct i2c_client *i2c,
ret = request_irq(i2c->irq, rt5663_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
| IRQF_ONESHOT, "rt5663", rt5663);
- if (ret)
+ if (ret) {
dev_err(&i2c->dev, "%s Failed to reguest IRQ: %d\n",
__func__, ret);
+ goto err_enable;
+ }
}
ret = devm_snd_soc_register_component(&i2c->dev,
&soc_component_dev_rt5663,
rt5663_dai, ARRAY_SIZE(rt5663_dai));
- if (ret) {
- if (i2c->irq)
- free_irq(i2c->irq, rt5663);
- }
+ if (ret)
+ goto err_enable;
+ return 0;
+
+
+ /*
+ * Error after enabling regulators should goto err_enable
+ * to disable regulators.
+ */
+err_enable:
+ if (i2c->irq)
+ free_irq(i2c->irq, rt5663);
+
+ regulator_bulk_disable(ARRAY_SIZE(rt5663->supplies), rt5663->supplies);
return ret;
}
@@ -3659,6 +3714,8 @@ static int rt5663_i2c_remove(struct i2c_client *i2c)
if (i2c->irq)
free_irq(i2c->irq, rt5663);
+ regulator_bulk_disable(ARRAY_SIZE(rt5663->supplies), rt5663->supplies);
+
return 0;
}
diff --git a/sound/soc/codecs/simple-amplifier.c b/sound/soc/codecs/simple-amplifier.c
index 85524acf3e9c..c07e8a80b4b7 100644
--- a/sound/soc/codecs/simple-amplifier.c
+++ b/sound/soc/codecs/simple-amplifier.c
@@ -19,6 +19,7 @@
#include <linux/gpio/consumer.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <sound/soc.h>
#define DRV_NAME "simple-amplifier"
@@ -58,11 +59,14 @@ static const struct snd_soc_dapm_widget simple_amp_dapm_widgets[] = {
(SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)),
SND_SOC_DAPM_OUTPUT("OUTL"),
SND_SOC_DAPM_OUTPUT("OUTR"),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("VCC", 20, 0),
};
static const struct snd_soc_dapm_route simple_amp_dapm_routes[] = {
{ "DRV", NULL, "INL" },
{ "DRV", NULL, "INR" },
+ { "OUTL", NULL, "VCC" },
+ { "OUTR", NULL, "VCC" },
{ "OUTL", NULL, "DRV" },
{ "OUTR", NULL, "DRV" },
};
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 36aebdb8f55c..aaba39295079 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -378,7 +378,7 @@ static struct snd_soc_component_driver soc_codec_dev_tas6424 = {
.non_legacy_dai_naming = 1,
};
-static struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
+static const struct snd_soc_dai_ops tas6424_speaker_dai_ops = {
.hw_params = tas6424_hw_params,
.set_fmt = tas6424_set_dai_fmt,
.set_tdm_slot = tas6424_set_dai_tdm_slot,
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 608ad49ad978..c6048d95c6d3 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -1095,7 +1095,7 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
if (freq/i > 20000000) {
dev_err(aic31xx->dev, "%s: Too high mclk frequency %u\n",
__func__, freq);
- return -EINVAL;
+ return -EINVAL;
}
aic31xx->p_div = i;
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 6a271e6e6b8f..6aa0edf8c5ef 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1260,6 +1260,16 @@ static int aic3x_set_dai_fmt(struct snd_soc_dai *codec_dai,
aic3x->master = 0;
iface_areg &= ~(BIT_CLK_MASTER | WORD_CLK_MASTER);
break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ aic3x->master = 1;
+ iface_areg |= BIT_CLK_MASTER;
+ iface_areg &= ~WORD_CLK_MASTER;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ aic3x->master = 1;
+ iface_areg |= WORD_CLK_MASTER;
+ iface_areg &= ~BIT_CLK_MASTER;
+ break;
default:
return -EINVAL;
}
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index a957eaeb7bc1..32907b1e20cf 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -394,7 +394,7 @@ static int dac33_hard_power(struct snd_soc_component *component, int power)
if (ret != 0) {
dev_err(component->dev,
"Failed to enable supplies: %d\n", ret);
- goto exit;
+ goto exit;
}
if (dac33->power_gpio >= 0)
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 61294c787f27..409bed30a4e4 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -60,7 +60,7 @@ static int wm8998_asrc_ev(struct snd_soc_dapm_widget *w,
dev_warn(component->dev,
"Unsupported ASRC rate1 (%s)\n",
arizona_sample_rate_val_to_name(val));
- return -EINVAL;
+ return -EINVAL;
}
break;
default:
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index ccdf088461b7..54c306707c02 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -325,8 +325,7 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
if (wm9705->mfd_pdata) {
wm9705->ac97 = wm9705->mfd_pdata->ac97;
regmap = wm9705->mfd_pdata->regmap;
- } else {
-#ifdef CONFIG_SND_SOC_AC97_BUS
+ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
wm9705->ac97 = snd_soc_new_ac97_component(component, WM9705_VENDOR_ID,
WM9705_VENDOR_ID_MASK);
if (IS_ERR(wm9705->ac97)) {
@@ -339,7 +338,8 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
snd_soc_free_ac97_component(wm9705->ac97);
return PTR_ERR(regmap);
}
-#endif
+ } else {
+ return -ENXIO;
}
snd_soc_component_set_drvdata(component, wm9705->ac97);
@@ -350,14 +350,12 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
static void wm9705_soc_remove(struct snd_soc_component *component)
{
-#ifdef CONFIG_SND_SOC_AC97_BUS
struct wm9705_priv *wm9705 = snd_soc_component_get_drvdata(component);
- if (!wm9705->mfd_pdata) {
+ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9705->mfd_pdata) {
snd_soc_component_exit_regmap(component);
snd_soc_free_ac97_component(wm9705->ac97);
}
-#endif
}
static const struct snd_soc_component_driver soc_component_dev_wm9705 = {
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index e873baa9e778..01949eaba4fd 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -642,8 +642,7 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
if (wm9712->mfd_pdata) {
wm9712->ac97 = wm9712->mfd_pdata->ac97;
regmap = wm9712->mfd_pdata->regmap;
- } else {
-#ifdef CONFIG_SND_SOC_AC97_BUS
+ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
int ret;
wm9712->ac97 = snd_soc_new_ac97_component(component, WM9712_VENDOR_ID,
@@ -660,7 +659,8 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
snd_soc_free_ac97_component(wm9712->ac97);
return PTR_ERR(regmap);
}
-#endif
+ } else {
+ return -ENXIO;
}
snd_soc_component_init_regmap(component, regmap);
@@ -673,14 +673,12 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
static void wm9712_soc_remove(struct snd_soc_component *component)
{
-#ifdef CONFIG_SND_SOC_AC97_BUS
struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
- if (!wm9712->mfd_pdata) {
+ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9712->mfd_pdata) {
snd_soc_component_exit_regmap(component);
snd_soc_free_ac97_component(wm9712->ac97);
}
-#endif
}
static const struct snd_soc_component_driver soc_component_dev_wm9712 = {
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 643863bb32e0..5a2fdf4f69bf 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1214,8 +1214,7 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
if (wm9713->mfd_pdata) {
wm9713->ac97 = wm9713->mfd_pdata->ac97;
regmap = wm9713->mfd_pdata->regmap;
- } else {
-#ifdef CONFIG_SND_SOC_AC97_BUS
+ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
wm9713->ac97 = snd_soc_new_ac97_component(component, WM9713_VENDOR_ID,
WM9713_VENDOR_ID_MASK);
if (IS_ERR(wm9713->ac97))
@@ -1225,7 +1224,8 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
snd_soc_free_ac97_component(wm9713->ac97);
return PTR_ERR(regmap);
}
-#endif
+ } else {
+ return -ENXIO;
}
snd_soc_component_init_regmap(component, regmap);
@@ -1238,14 +1238,12 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
static void wm9713_soc_remove(struct snd_soc_component *component)
{
-#ifdef CONFIG_SND_SOC_AC97_BUS
struct wm9713_priv *wm9713 = snd_soc_component_get_drvdata(component);
- if (!wm9713->mfd_pdata) {
+ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9713->mfd_pdata) {
snd_soc_component_exit_regmap(component);
snd_soc_free_ac97_component(wm9713->ac97);
}
-#endif
}
static const struct snd_soc_component_driver soc_component_dev_wm9713 = {
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index a53dc174bbf0..1dd291cebe67 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
{
- u16 scratch[4];
+ unsigned int scratch[4];
+ unsigned int addr = dsp->base + ADSP2_SCRATCH0;
+ unsigned int i;
int ret;
- ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0,
- scratch, sizeof(scratch));
- if (ret) {
- adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
- return;
+ for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
+ ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
+ if (ret) {
+ adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
+ return;
+ }
}
adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
- be16_to_cpu(scratch[0]),
- be16_to_cpu(scratch[1]),
- be16_to_cpu(scratch[2]),
- be16_to_cpu(scratch[3]));
+ scratch[0], scratch[1], scratch[2], scratch[3]);
}
static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
{
- u32 scratch[2];
+ unsigned int scratch[2];
int ret;
- ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
- scratch, sizeof(scratch));
-
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
+ &scratch[0]);
if (ret) {
- adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
+ adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
return;
}
- scratch[0] = be32_to_cpu(scratch[0]);
- scratch[1] = be32_to_cpu(scratch[1]);
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
+ &scratch[1]);
+ if (ret) {
+ adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
+ return;
+ }
adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
scratch[0] & 0xFFFF,
@@ -2416,7 +2419,7 @@ static int wm_adsp_create_name(struct wm_adsp *dsp)
return 0;
}
-int wm_adsp1_init(struct wm_adsp *dsp)
+static int wm_adsp_common_init(struct wm_adsp *dsp)
{
int ret;
@@ -2425,11 +2428,17 @@ int wm_adsp1_init(struct wm_adsp *dsp)
return ret;
INIT_LIST_HEAD(&dsp->alg_regions);
+ INIT_LIST_HEAD(&dsp->ctl_list);
mutex_init(&dsp->pwr_lock);
return 0;
}
+
+int wm_adsp1_init(struct wm_adsp *dsp)
+{
+ return wm_adsp_common_init(dsp);
+}
EXPORT_SYMBOL_GPL(wm_adsp1_init);
int wm_adsp1_event(struct snd_soc_dapm_widget *w,
@@ -2914,7 +2923,7 @@ int wm_adsp2_init(struct wm_adsp *dsp)
{
int ret;
- ret = wm_adsp_create_name(dsp);
+ ret = wm_adsp_common_init(dsp);
if (ret)
return ret;
@@ -2936,12 +2945,8 @@ int wm_adsp2_init(struct wm_adsp *dsp)
break;
}
- INIT_LIST_HEAD(&dsp->alg_regions);
- INIT_LIST_HEAD(&dsp->ctl_list);
INIT_WORK(&dsp->boot_work, wm_adsp2_boot_work);
- mutex_init(&dsp->pwr_lock);
-
return 0;
}
EXPORT_SYMBOL_GPL(wm_adsp2_init);
diff --git a/sound/soc/davinci/Kconfig b/sound/soc/davinci/Kconfig
deleted file mode 100644
index 778faff28e0e..000000000000
--- a/sound/soc/davinci/Kconfig
+++ /dev/null
@@ -1,106 +0,0 @@
-config SND_DAVINCI_SOC
- tristate
- depends on ARCH_DAVINCI
- select SND_EDMA_SOC
-
-config SND_EDMA_SOC
- tristate "SoC Audio for Texas Instruments chips using eDMA"
- depends on TI_EDMA
- select SND_SOC_GENERIC_DMAENGINE_PCM
- help
- Say Y or M here if you want audio support for TI SoC which uses eDMA.
- The following line of SoCs are supported by this platform driver:
- - daVinci devices
- - AM335x
- - AM437x/AM438x
- - DRA7xx family
-
-config SND_DAVINCI_SOC_I2S
- tristate "DaVinci Multichannel Buffered Serial Port (McBSP) support"
- depends on SND_EDMA_SOC
- help
- Say Y or M here if you want to have support for McBSP IP found in
- Texas Instruments DaVinci DA850 SoCs.
-
-config SND_DAVINCI_SOC_MCASP
- tristate "Multichannel Audio Serial Port (McASP) support"
- depends on SND_SDMA_SOC || SND_EDMA_SOC
- help
- Say Y or M here if you want to have support for McASP IP found in
- various Texas Instruments SoCs like:
- - daVinci devices
- - Sitara line of SoCs (AM335x, AM438x, etc)
- - DRA7x devices
-
-config SND_DAVINCI_SOC_VCIF
- tristate
-
-config SND_DAVINCI_SOC_GENERIC_EVM
- tristate
- select SND_SOC_TLV320AIC3X
- select SND_DAVINCI_SOC_MCASP
-
-config SND_AM33XX_SOC_EVM
- tristate "SoC Audio for the AM33XX chip based boards"
- depends on SND_EDMA_SOC && SOC_AM33XX && I2C
- select SND_DAVINCI_SOC_GENERIC_EVM
- help
- Say Y or M if you want to add support for SoC audio on AM33XX
- boards using McASP and TLV320AIC3X codec. For example AM335X-EVM,
- AM335X-EVMSK, and BeagelBone with AudioCape boards have this
- setup.
-
-config SND_DAVINCI_SOC_EVM
- tristate "SoC Audio support for DaVinci DM6446, DM355 or DM365 EVM"
- depends on SND_EDMA_SOC && I2C
- depends on MACH_DAVINCI_EVM || MACH_DAVINCI_DM355_EVM || MACH_DAVINCI_DM365_EVM
- select SND_DAVINCI_SOC_GENERIC_EVM
- help
- Say Y if you want to add support for SoC audio on TI
- DaVinci DM6446, DM355 or DM365 EVM platforms.
-
-choice
- prompt "DM365 codec select"
- depends on SND_DAVINCI_SOC_EVM
- depends on MACH_DAVINCI_DM365_EVM
-
-config SND_DM365_AIC3X_CODEC
- tristate "Audio Codec - AIC3101"
- help
- Say Y if you want to add support for AIC3101 audio codec
-
-config SND_DM365_VOICE_CODEC
- tristate "Voice Codec - CQ93VC"
- select MFD_DAVINCI_VOICECODEC
- select SND_DAVINCI_SOC_VCIF
- select SND_SOC_CQ0093VC
- help
- Say Y if you want to add support for SoC On-chip voice codec
-endchoice
-
-config SND_DM6467_SOC_EVM
- tristate "SoC Audio support for DaVinci DM6467 EVM"
- depends on SND_EDMA_SOC && MACH_DAVINCI_DM6467_EVM && I2C
- select SND_DAVINCI_SOC_GENERIC_EVM
- select SND_SOC_SPDIF
-
- help
- Say Y if you want to add support for SoC audio on TI
-
-config SND_DA830_SOC_EVM
- tristate "SoC Audio support for DA830/OMAP-L137 EVM"
- depends on SND_EDMA_SOC && MACH_DAVINCI_DA830_EVM && I2C
- select SND_DAVINCI_SOC_GENERIC_EVM
-
- help
- Say Y if you want to add support for SoC audio on TI
- DA830/OMAP-L137 EVM
-
-config SND_DA850_SOC_EVM
- tristate "SoC Audio support for DA850/OMAP-L138 EVM"
- depends on SND_EDMA_SOC && MACH_DAVINCI_DA850_EVM && I2C
- select SND_DAVINCI_SOC_GENERIC_EVM
- help
- Say Y if you want to add support for SoC audio on TI
- DA850/OMAP-L138 EVM
-
diff --git a/sound/soc/davinci/Makefile b/sound/soc/davinci/Makefile
deleted file mode 100644
index 23c6592eb31a..000000000000
--- a/sound/soc/davinci/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# DAVINCI Platform Support
-snd-soc-edma-objs := edma-pcm.o
-snd-soc-davinci-i2s-objs := davinci-i2s.o
-snd-soc-davinci-mcasp-objs:= davinci-mcasp.o
-snd-soc-davinci-vcif-objs:= davinci-vcif.o
-
-obj-$(CONFIG_SND_EDMA_SOC) += snd-soc-edma.o
-obj-$(CONFIG_SND_DAVINCI_SOC_I2S) += snd-soc-davinci-i2s.o
-obj-$(CONFIG_SND_DAVINCI_SOC_MCASP) += snd-soc-davinci-mcasp.o
-obj-$(CONFIG_SND_DAVINCI_SOC_VCIF) += snd-soc-davinci-vcif.o
-
-# Generic DAVINCI/AM33xx Machine Support
-snd-soc-evm-objs := davinci-evm.o
-
-obj-$(CONFIG_SND_DAVINCI_SOC_GENERIC_EVM) += snd-soc-evm.o
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 6ec19fb4a934..2e75b5bc5f1d 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -221,7 +221,7 @@ config SND_SOC_PHYCORE_AC97
config SND_SOC_EUKREA_TLV320
tristate "Eukrea TLV320"
- depends on ARCH_MXC && I2C
+ depends on ARCH_MXC && !ARM64 && I2C
select SND_SOC_TLV320AIC23_I2C
select SND_SOC_IMX_AUDMUX
select SND_SOC_IMX_SSI
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 44433b20435c..81f2fe2c6d23 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -571,17 +571,17 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
}
/* Common settings for corresponding Freescale CPU DAI driver */
- if (strstr(cpu_np->name, "ssi")) {
+ if (of_node_name_eq(cpu_np, "ssi")) {
/* Only SSI needs to configure AUDMUX */
ret = fsl_asoc_card_audmux_init(np, priv);
if (ret) {
dev_err(&pdev->dev, "failed to init audmux\n");
goto asrc_fail;
}
- } else if (strstr(cpu_np->name, "esai")) {
+ } else if (of_node_name_eq(cpu_np, "esai")) {
priv->cpu_priv.sysclk_id[1] = ESAI_HCKT_EXTAL;
priv->cpu_priv.sysclk_id[0] = ESAI_HCKR_EXTAL;
- } else if (strstr(cpu_np->name, "sai")) {
+ } else if (of_node_name_eq(cpu_np, "sai")) {
priv->cpu_priv.sysclk_id[1] = FSL_SAI_CLK_MAST1;
priv->cpu_priv.sysclk_id[0] = FSL_SAI_CLK_MAST1;
}
diff --git a/sound/soc/fsl/fsl_ssi_dbg.c b/sound/soc/fsl/fsl_ssi_dbg.c
index 1255dfe19eef..6f6294149476 100644
--- a/sound/soc/fsl/fsl_ssi_dbg.c
+++ b/sound/soc/fsl/fsl_ssi_dbg.c
@@ -124,17 +124,7 @@ static int fsl_ssi_stats_show(struct seq_file *s, void *unused)
return 0;
}
-static int fsl_ssi_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, fsl_ssi_stats_show, inode->i_private);
-}
-
-static const struct file_operations fsl_ssi_stats_ops = {
- .open = fsl_ssi_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(fsl_ssi_stats);
int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev)
{
@@ -144,7 +134,7 @@ int fsl_ssi_debugfs_create(struct fsl_ssi_dbg *ssi_dbg, struct device *dev)
ssi_dbg->dbg_stats = debugfs_create_file("stats", 0444,
ssi_dbg->dbg_dir, ssi_dbg,
- &fsl_ssi_stats_ops);
+ &fsl_ssi_stats_fops);
if (!ssi_dbg->dbg_stats) {
debugfs_remove(ssi_dbg->dbg_dir);
return -ENOMEM;
diff --git a/sound/soc/generic/Kconfig b/sound/soc/generic/Kconfig
index c954be0a0f96..92c2cf06f40a 100644
--- a/sound/soc/generic/Kconfig
+++ b/sound/soc/generic/Kconfig
@@ -6,6 +6,7 @@ config SND_SIMPLE_CARD
select SND_SIMPLE_CARD_UTILS
help
This option enables generic simple sound card support
+ It also support DPCM of multi CPU single Codec ststem.
config SND_SIMPLE_SCU_CARD
tristate "ASoC Simple SCU sound card support"
@@ -20,8 +21,9 @@ config SND_AUDIO_GRAPH_CARD
depends on OF
select SND_SIMPLE_CARD_UTILS
help
- This option enables generic simple simple sound card support
+ This option enables generic simple sound card support
with OF-graph DT bindings.
+ It also support DPCM of multi CPU single Codec ststem.
config SND_AUDIO_GRAPH_SCU_CARD
tristate "ASoC Audio Graph SCU sound card support"
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 25c819e402e1..0d6144560a1e 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -23,19 +23,29 @@
struct graph_card_data {
struct snd_soc_card snd_card;
struct graph_dai_props {
- struct asoc_simple_dai cpu_dai;
- struct asoc_simple_dai codec_dai;
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
struct snd_soc_dai_link_component codecs; /* single codec */
struct snd_soc_dai_link_component platform;
+ struct asoc_simple_card_data adata;
+ struct snd_soc_codec_conf *codec_conf;
unsigned int mclk_fs;
} *dai_props;
- unsigned int mclk_fs;
struct asoc_simple_jack hp_jack;
struct asoc_simple_jack mic_jack;
struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dais;
+ struct snd_soc_codec_conf *codec_conf;
struct gpio_desc *pa_gpio;
};
+#define graph_priv_to_card(priv) (&(priv)->snd_card)
+#define graph_priv_to_props(priv, i) ((priv)->dai_props + (i))
+#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
+#define graph_priv_to_link(priv, i) (graph_priv_to_card(priv)->dai_link + (i))
+
+#define PREFIX "audio-graph-card,"
+
static int asoc_graph_card_outdrv_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -63,11 +73,6 @@ static const struct snd_soc_dapm_widget asoc_graph_card_dapm_widgets[] = {
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
};
-#define graph_priv_to_card(priv) (&(priv)->snd_card)
-#define graph_priv_to_props(priv, i) ((priv)->dai_props + (i))
-#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
-#define graph_priv_to_link(priv, i) (graph_priv_to_card(priv)->dai_link + (i))
-
static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -75,13 +80,13 @@ static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
int ret;
- ret = asoc_simple_card_clk_enable(&dai_props->cpu_dai);
+ ret = asoc_simple_card_clk_enable(dai_props->cpu_dai);
if (ret)
return ret;
- ret = asoc_simple_card_clk_enable(&dai_props->codec_dai);
+ ret = asoc_simple_card_clk_enable(dai_props->codec_dai);
if (ret)
- asoc_simple_card_clk_disable(&dai_props->cpu_dai);
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
return ret;
}
@@ -92,9 +97,9 @@ static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(&dai_props->cpu_dai);
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
- asoc_simple_card_clk_disable(&dai_props->codec_dai);
+ asoc_simple_card_clk_disable(dai_props->codec_dai);
}
static int asoc_graph_card_hw_params(struct snd_pcm_substream *substream,
@@ -108,9 +113,7 @@ static int asoc_graph_card_hw_params(struct snd_pcm_substream *substream,
unsigned int mclk, mclk_fs = 0;
int ret = 0;
- if (priv->mclk_fs)
- mclk_fs = priv->mclk_fs;
- else if (dai_props->mclk_fs)
+ if (dai_props->mclk_fs)
mclk_fs = dai_props->mclk_fs;
if (mclk_fs) {
@@ -139,86 +142,238 @@ static const struct snd_soc_ops asoc_graph_card_ops = {
static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *codec = rtd->codec_dai;
- struct snd_soc_dai *cpu = rtd->cpu_dai;
- struct graph_dai_props *dai_props =
- graph_priv_to_props(priv, rtd->num);
- int ret;
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+ int ret = 0;
- ret = asoc_simple_card_init_dai(codec, &dai_props->codec_dai);
+ ret = asoc_simple_card_init_dai(rtd->codec_dai,
+ dai_props->codec_dai);
if (ret < 0)
return ret;
- ret = asoc_simple_card_init_dai(cpu, &dai_props->cpu_dai);
+ ret = asoc_simple_card_init_dai(rtd->cpu_dai,
+ dai_props->cpu_dai);
if (ret < 0)
return ret;
return 0;
}
-static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
- struct graph_card_data *priv,
- int idx)
+static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+
+ asoc_simple_card_convert_fixup(&dai_props->adata, params);
+
+ return 0;
+}
+
+static int asoc_graph_card_dai_link_of_dpcm(struct device_node *top,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct graph_card_data *priv,
+ int *dai_idx, int link_idx,
+ int *conf_idx, int is_cpu)
{
struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, idx);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, idx);
- struct asoc_simple_dai *cpu_dai = &dai_props->cpu_dai;
- struct asoc_simple_dai *codec_dai = &dai_props->codec_dai;
- struct device_node *cpu_ep = of_get_next_child(cpu_port, NULL);
- struct device_node *codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- struct device_node *rcpu_ep = of_graph_get_remote_endpoint(codec_ep);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, link_idx);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, link_idx);
+ struct device_node *ep = is_cpu ? cpu_ep : codec_ep;
+ struct device_node *port = of_get_parent(ep);
+ struct device_node *ports = of_get_parent(port);
+ struct device_node *node = of_graph_get_port_parent(ep);
+ struct asoc_simple_dai *dai;
+ struct snd_soc_dai_link_component *codecs = dai_link->codecs;
int ret;
- if (rcpu_ep != cpu_ep) {
- dev_err(dev, "remote-endpoint mismatch (%s/%s/%s)\n",
- cpu_ep->name, codec_ep->name, rcpu_ep->name);
- ret = -EINVAL;
- goto dai_link_of_err;
+ dev_dbg(dev, "link_of DPCM (for %s)\n", is_cpu ? "CPU" : "Codec");
+
+ of_property_read_u32(top, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(ports, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(port, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(ep, "mclk-fs", &dai_props->mclk_fs);
+
+ asoc_simple_card_parse_convert(dev, top, NULL, &dai_props->adata);
+ asoc_simple_card_parse_convert(dev, node, PREFIX, &dai_props->adata);
+ asoc_simple_card_parse_convert(dev, ports, NULL, &dai_props->adata);
+ asoc_simple_card_parse_convert(dev, port, NULL, &dai_props->adata);
+ asoc_simple_card_parse_convert(dev, ep, NULL, &dai_props->adata);
+
+ of_node_put(ports);
+ of_node_put(port);
+
+ if (is_cpu) {
+
+ /* BE is dummy */
+ codecs->of_node = NULL;
+ codecs->dai_name = "snd-soc-dummy-dai";
+ codecs->name = "snd-soc-dummy";
+
+ /* FE settings */
+ dai_link->dynamic = 1;
+ dai_link->dpcm_merged_format = 1;
+
+ dai =
+ dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+
+ ret = asoc_simple_card_parse_graph_cpu(ep, dai_link);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, dai);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+ return ret;
+
+ /* card->num_links includes Codec */
+ asoc_simple_card_canonicalize_cpu(dai_link,
+ of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
+ } else {
+ struct snd_soc_codec_conf *cconf;
+
+ /* FE is dummy */
+ dai_link->cpu_of_node = NULL;
+ dai_link->cpu_dai_name = "snd-soc-dummy-dai";
+ dai_link->cpu_name = "snd-soc-dummy";
+
+ /* BE settings */
+ dai_link->no_pcm = 1;
+ dai_link->be_hw_params_fixup = asoc_graph_card_be_hw_params_fixup;
+
+ dai =
+ dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+
+ cconf =
+ dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
+
+ ret = asoc_simple_card_parse_graph_codec(ep, dai_link);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, dai);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "be.%s",
+ codecs->dai_name);
+ if (ret < 0)
+ return ret;
+
+ /* check "prefix" from top node */
+ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
+ "prefix");
+ snd_soc_of_parse_node_prefix(node, cconf, codecs->of_node,
+ PREFIX "prefix");
+ snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node,
+ "prefix");
+ snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node,
+ "prefix");
}
+ ret = asoc_simple_card_of_parse_tdm(ep, dai);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_canonicalize_dailink(dai_link);
+ if (ret < 0)
+ return ret;
+
ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
NULL, &dai_link->dai_fmt);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
+
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+ dai_link->ops = &asoc_graph_card_ops;
+ dai_link->init = asoc_graph_card_dai_init;
+
+ return 0;
+}
+
+static int asoc_graph_card_dai_link_of(struct device_node *top,
+ struct device_node *cpu_ep,
+ struct device_node *codec_ep,
+ struct graph_card_data *priv,
+ int *dai_idx, int link_idx)
+{
+ struct device *dev = graph_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, link_idx);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, link_idx);
+ struct device_node *cpu_port = of_get_parent(cpu_ep);
+ struct device_node *codec_port = of_get_parent(codec_ep);
+ struct device_node *cpu_ports = of_get_parent(cpu_port);
+ struct device_node *codec_ports = of_get_parent(codec_port);
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
+ int ret;
+
+ dev_dbg(dev, "link_of\n");
+
+ cpu_dai =
+ dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+ codec_dai =
+ dai_props->codec_dai = &priv->dais[(*dai_idx)++];
- of_property_read_u32(cpu_ep, "mclk-fs", &dai_props->mclk_fs);
- of_property_read_u32(codec_ep, "mclk-fs", &dai_props->mclk_fs);
+ /* Factor to mclk, used in hw_params() */
+ of_property_read_u32(top, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(cpu_ports, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(codec_ports, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(cpu_port, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(codec_port, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(cpu_ep, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(codec_ep, "mclk-fs", &dai_props->mclk_fs);
+ of_node_put(cpu_port);
+ of_node_put(cpu_ports);
+ of_node_put(codec_port);
+ of_node_put(codec_ports);
+
+ ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
+ NULL, &dai_link->dai_fmt);
+ if (ret < 0)
+ return ret;
ret = asoc_simple_card_parse_graph_cpu(cpu_ep, dai_link);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_parse_graph_codec(codec_ep, dai_link);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_of_parse_tdm(cpu_ep, cpu_dai);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_of_parse_tdm(codec_ep, codec_dai);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_parse_clk_cpu(dev, cpu_ep, dai_link, cpu_dai);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_parse_clk_codec(dev, codec_ep, dai_link, codec_dai);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_canonicalize_dailink(dai_link);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
dai_link->codecs->dai_name);
if (ret < 0)
- goto dai_link_of_err;
+ return ret;
dai_link->ops = &asoc_graph_card_ops;
dai_link->init = asoc_graph_card_dai_init;
@@ -226,12 +381,7 @@ static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
asoc_simple_card_canonicalize_cpu(dai_link,
of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
-dai_link_of_err:
- of_node_put(cpu_ep);
- of_node_put(rcpu_ep);
- of_node_put(codec_ep);
-
- return ret;
+ return 0;
}
static int asoc_graph_card_parse_of(struct graph_card_data *priv)
@@ -239,44 +389,173 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
struct of_phandle_iterator it;
struct device *dev = graph_priv_to_dev(priv);
struct snd_soc_card *card = graph_priv_to_card(priv);
- struct device_node *node = dev->of_node;
- int rc, idx = 0;
- int ret;
+ struct device_node *top = dev->of_node;
+ struct device_node *node = top;
+ struct device_node *cpu_port;
+ struct device_node *cpu_ep = NULL;
+ struct device_node *codec_ep = NULL;
+ struct device_node *codec_port = NULL;
+ struct device_node *codec_port_old = NULL;
+ int rc, ret;
+ int link_idx, dai_idx, conf_idx;
+ int cpu;
ret = asoc_simple_card_of_parse_widgets(card, NULL);
if (ret < 0)
return ret;
- ret = asoc_simple_card_of_parse_routing(card, NULL, 1);
+ ret = asoc_simple_card_of_parse_routing(card, NULL);
if (ret < 0)
return ret;
- /* Factor to mclk, used in hw_params() */
- of_property_read_u32(node, "mclk-fs", &priv->mclk_fs);
-
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
- ret = asoc_graph_card_dai_link_of(it.node, priv, idx++);
- if (ret < 0) {
- of_node_put(it.node);
-
- return ret;
+ link_idx = 0;
+ dai_idx = 0;
+ conf_idx = 0;
+ codec_port_old = NULL;
+ for (cpu = 1; cpu >= 0; cpu--) {
+ /*
+ * Detect all CPU first, and Detect all Codec 2nd.
+ *
+ * In Normal sound case, all DAIs are detected
+ * as "CPU-Codec".
+ *
+ * In DPCM sound case,
+ * all CPUs are detected as "CPU-dummy", and
+ * all Codecs are detected as "dummy-Codec".
+ * To avoid random sub-device numbering,
+ * detect "dummy-Codec" in last;
+ */
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ cpu_port = it.node;
+ cpu_ep = NULL;
+ while (1) {
+ cpu_ep = of_get_next_child(cpu_port, cpu_ep);
+ if (!cpu_ep)
+ break;
+
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ codec_port = of_get_parent(codec_ep);
+
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+ dev_dbg(dev, "%pOFf <-> %pOFf\n", cpu_ep, codec_ep);
+
+ if (of_get_child_count(codec_port) > 1) {
+ /*
+ * for DPCM sound
+ */
+ if (!cpu) {
+ if (codec_port_old == codec_port)
+ continue;
+ codec_port_old = codec_port;
+ }
+ ret = asoc_graph_card_dai_link_of_dpcm(
+ top, cpu_ep, codec_ep, priv,
+ &dai_idx, link_idx++,
+ &conf_idx, cpu);
+ } else if (cpu) {
+ /*
+ * for Normal sound
+ */
+ ret = asoc_graph_card_dai_link_of(
+ top, cpu_ep, codec_ep, priv,
+ &dai_idx, link_idx++);
+ }
+ if (ret < 0)
+ return ret;
+ }
}
}
return asoc_simple_card_parse_card_name(card, NULL);
}
-static int asoc_graph_get_dais_count(struct device *dev)
+static void asoc_graph_get_dais_count(struct device *dev,
+ int *link_num,
+ int *dais_num,
+ int *ccnf_num)
{
struct of_phandle_iterator it;
struct device_node *node = dev->of_node;
- int count = 0;
+ struct device_node *cpu_port;
+ struct device_node *cpu_ep;
+ struct device_node *codec_ep;
+ struct device_node *codec_port;
+ struct device_node *codec_port_old;
+ struct device_node *codec_port_old2;
int rc;
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0)
- count++;
-
- return count;
+ /*
+ * link_num : number of links.
+ * CPU-Codec / CPU-dummy / dummy-Codec
+ * dais_num : number of DAIs
+ * ccnf_num : number of codec_conf
+ * same number for "dummy-Codec"
+ *
+ * ex1)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 7
+ * CPU2 -/ ccnf : 1
+ * CPU3 --- Codec2
+ *
+ * => 5 links = 2xCPU-Codec + 2xCPU-dummy + 1xdummy-Codec
+ * => 7 DAIs = 4xCPU + 3xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex2)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 6
+ * CPU2 -/ ccnf : 1
+ * CPU3 -/
+ *
+ * => 5 links = 1xCPU-Codec + 3xCPU-dummy + 1xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex3)
+ * CPU0 --- Codec0 link : 6
+ * CPU1 -/ dais : 6
+ * CPU2 --- Codec1 ccnf : 2
+ * CPU3 -/
+ *
+ * => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 2 ccnf = 2xdummy-Codec
+ */
+ codec_port_old = NULL;
+ codec_port_old2 = NULL;
+ of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
+ cpu_port = it.node;
+ cpu_ep = NULL;
+ while (1) {
+ cpu_ep = of_get_next_child(cpu_port, cpu_ep);
+ if (!cpu_ep)
+ break;
+
+ codec_ep = of_graph_get_remote_endpoint(cpu_ep);
+ codec_port = of_get_parent(codec_ep);
+
+ of_node_put(codec_ep);
+ of_node_put(codec_port);
+
+ (*link_num)++;
+ (*dais_num)++;
+
+ if (codec_port_old == codec_port) {
+ if (codec_port_old2 != codec_port_old) {
+ (*link_num)++;
+ (*ccnf_num)++;
+ }
+
+ codec_port_old2 = codec_port_old;
+ continue;
+ }
+
+ (*dais_num)++;
+ codec_port_old = codec_port;
+ }
+ }
}
static int asoc_graph_soc_card_probe(struct snd_soc_card *card)
@@ -300,22 +579,27 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
struct graph_card_data *priv;
struct snd_soc_dai_link *dai_link;
struct graph_dai_props *dai_props;
+ struct asoc_simple_dai *dais;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
- int num, ret, i;
+ struct snd_soc_codec_conf *cconf;
+ int lnum = 0, dnum = 0, cnum = 0;
+ int ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- num = asoc_graph_get_dais_count(dev);
- if (num == 0)
+ asoc_graph_get_dais_count(dev, &lnum, &dnum, &cnum);
+ if (!lnum || !dnum)
return -EINVAL;
- dai_props = devm_kcalloc(dev, num, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, num, sizeof(*dai_link), GFP_KERNEL);
- if (!dai_props || !dai_link)
+ dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
+ dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
+ dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
+ cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
+ if (!dai_props || !dai_link || !dais)
return -ENOMEM;
/*
@@ -324,7 +608,7 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
* see
* soc-core.c :: snd_soc_init_multicodec()
*/
- for (i = 0; i < num; i++) {
+ for (i = 0; i < lnum; i++) {
dai_link[i].codecs = &dai_props[i].codecs;
dai_link[i].num_codecs = 1;
dai_link[i].platform = &dai_props[i].platform;
@@ -339,16 +623,20 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
priv->dai_props = dai_props;
priv->dai_link = dai_link;
+ priv->dais = dais;
+ priv->codec_conf = cconf;
/* Init snd_soc_card */
card = graph_priv_to_card(priv);
- card->owner = THIS_MODULE;
- card->dev = dev;
- card->dai_link = dai_link;
- card->num_links = num;
- card->dapm_widgets = asoc_graph_card_dapm_widgets;
- card->num_dapm_widgets = ARRAY_SIZE(asoc_graph_card_dapm_widgets);
- card->probe = asoc_graph_soc_card_probe;
+ card->owner = THIS_MODULE;
+ card->dev = dev;
+ card->dai_link = dai_link;
+ card->num_links = lnum;
+ card->dapm_widgets = asoc_graph_card_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(asoc_graph_card_dapm_widgets);
+ card->probe = asoc_graph_soc_card_probe;
+ card->codec_conf = cconf;
+ card->num_configs = cnum;
ret = asoc_graph_card_parse_of(priv);
if (ret < 0) {
@@ -379,6 +667,7 @@ static int asoc_graph_card_remove(struct platform_device *pdev)
static const struct of_device_id asoc_graph_of_match[] = {
{ .compatible = "audio-graph-card", },
+ { .compatible = "audio-graph-scu-card", },
{},
};
MODULE_DEVICE_TABLE(of, asoc_graph_of_match);
diff --git a/sound/soc/generic/audio-graph-scu-card.c b/sound/soc/generic/audio-graph-scu-card.c
index b83bb31021a9..e1b192ea147b 100644
--- a/sound/soc/generic/audio-graph-scu-card.c
+++ b/sound/soc/generic/audio-graph-scu-card.c
@@ -24,14 +24,18 @@
struct graph_card_data {
struct snd_soc_card snd_card;
- struct snd_soc_codec_conf codec_conf;
struct graph_dai_props {
- struct asoc_simple_dai dai;
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
struct snd_soc_dai_link_component codecs;
struct snd_soc_dai_link_component platform;
+ struct asoc_simple_card_data adata;
+ struct snd_soc_codec_conf *codec_conf;
} *dai_props;
struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dais;
struct asoc_simple_card_data adata;
+ struct snd_soc_codec_conf *codec_conf;
};
#define graph_priv_to_card(priv) (&(priv)->snd_card)
@@ -39,13 +43,24 @@ struct graph_card_data {
#define graph_priv_to_dev(priv) (graph_priv_to_card(priv)->dev)
#define graph_priv_to_link(priv, i) (graph_priv_to_card(priv)->dai_link + (i))
+#define PREFIX "audio-graph-card,"
+
static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+ int ret = 0;
+
+ ret = asoc_simple_card_clk_enable(dai_props->cpu_dai);
+ if (ret)
+ return ret;
- return asoc_simple_card_clk_enable(&dai_props->dai);
+ ret = asoc_simple_card_clk_enable(dai_props->codec_dai);
+ if (ret)
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
+
+ return ret;
}
static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
@@ -54,7 +69,9 @@ static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(&dai_props->dai);
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
+
+ asoc_simple_card_clk_disable(dai_props->codec_dai);
}
static const struct snd_soc_ops asoc_graph_card_ops = {
@@ -65,39 +82,49 @@ static const struct snd_soc_ops asoc_graph_card_ops = {
static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai;
- struct snd_soc_dai_link *dai_link;
- struct graph_dai_props *dai_props;
- int num = rtd->num;
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+ int ret = 0;
- dai_link = graph_priv_to_link(priv, num);
- dai_props = graph_priv_to_props(priv, num);
- dai = dai_link->dynamic ?
- rtd->cpu_dai :
- rtd->codec_dai;
+ ret = asoc_simple_card_init_dai(rtd->codec_dai,
+ dai_props->codec_dai);
+ if (ret < 0)
+ return ret;
- return asoc_simple_card_init_dai(dai, &dai_props->dai);
+ ret = asoc_simple_card_init_dai(rtd->cpu_dai,
+ dai_props->cpu_dai);
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
+
+ asoc_simple_card_convert_fixup(&dai_props->adata, params);
+ /* overwrite by top level adata if exist */
asoc_simple_card_convert_fixup(&priv->adata, params);
return 0;
}
-static int asoc_graph_card_dai_link_of(struct device_node *ep,
+static int asoc_graph_card_dai_link_of(struct device_node *cpu_ep,
+ struct device_node *codec_ep,
struct graph_card_data *priv,
- unsigned int daifmt,
- int idx, int is_fe)
+ int *dai_idx, int link_idx,
+ int *conf_idx, int is_fe)
{
struct device *dev = graph_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, idx);
- struct graph_dai_props *dai_props = graph_priv_to_props(priv, idx);
+ struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, link_idx);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, link_idx);
struct snd_soc_card *card = graph_priv_to_card(priv);
+ struct device_node *ep = is_fe ? cpu_ep : codec_ep;
+ struct device_node *node = of_graph_get_port_parent(ep);
+ struct asoc_simple_dai *dai;
int ret;
if (is_fe) {
@@ -113,11 +140,14 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
+ dai =
+ dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+
ret = asoc_simple_card_parse_graph_cpu(ep, dai_link);
if (ret)
return ret;
- ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, &dai_props->dai);
+ ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, dai);
if (ret < 0)
return ret;
@@ -131,6 +161,8 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
asoc_simple_card_canonicalize_cpu(dai_link,
of_graph_get_endpoint_count(dai_link->cpu_of_node) == 1);
} else {
+ struct snd_soc_codec_conf *cconf;
+
/* FE is dummy */
dai_link->cpu_of_node = NULL;
dai_link->cpu_dai_name = "snd-soc-dummy-dai";
@@ -140,11 +172,17 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = asoc_graph_card_be_hw_params_fixup;
+ dai =
+ dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+
+ cconf =
+ dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
+
ret = asoc_simple_card_parse_graph_codec(ep, dai_link);
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, &dai_props->dai);
+ ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, dai);
if (ret < 0)
return ret;
@@ -154,13 +192,20 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
if (ret < 0)
return ret;
- snd_soc_of_parse_audio_prefix(card,
- &priv->codec_conf,
+ /* check "prefix" from top node */
+ snd_soc_of_parse_audio_prefix(card, cconf,
dai_link->codecs->of_node,
"prefix");
+ /* check "prefix" from each node if top doesn't have */
+ if (!cconf->of_node)
+ snd_soc_of_parse_node_prefix(node, cconf,
+ dai_link->codecs->of_node,
+ PREFIX "prefix");
}
- ret = asoc_simple_card_of_parse_tdm(ep, &dai_props->dai);
+ asoc_simple_card_parse_convert(dev, node, PREFIX, &dai_props->adata);
+
+ ret = asoc_simple_card_of_parse_tdm(ep, dai);
if (ret)
return ret;
@@ -168,7 +213,11 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
if (ret < 0)
return ret;
- dai_link->dai_fmt = daifmt;
+ ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
+ NULL, &dai_link->dai_fmt);
+ if (ret < 0)
+ return ret;
+
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
dai_link->ops = &asoc_graph_card_ops;
@@ -186,11 +235,9 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
struct device_node *cpu_port;
struct device_node *cpu_ep;
struct device_node *codec_ep;
- struct device_node *rcpu_ep;
struct device_node *codec_port;
struct device_node *codec_port_old;
- unsigned int daifmt = 0;
- int dai_idx, ret;
+ int dai_idx, link_idx, conf_idx, ret;
int rc, codec;
if (!node)
@@ -201,47 +248,20 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
* see simple-card
*/
- ret = asoc_simple_card_of_parse_routing(card, NULL, 0);
+ ret = asoc_simple_card_of_parse_routing(card, NULL);
if (ret < 0)
return ret;
- asoc_simple_card_parse_convert(dev, NULL, &priv->adata);
+ asoc_simple_card_parse_convert(dev, node, NULL, &priv->adata);
/*
* it supports multi CPU, single CODEC only here
* see asoc_graph_get_dais_count
*/
- /* find 1st codec */
- of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
- cpu_port = it.node;
- cpu_ep = of_get_next_child(cpu_port, NULL);
- codec_ep = of_graph_get_remote_endpoint(cpu_ep);
- rcpu_ep = of_graph_get_remote_endpoint(codec_ep);
-
- of_node_put(cpu_ep);
- of_node_put(codec_ep);
- of_node_put(rcpu_ep);
-
- if (!codec_ep)
- continue;
-
- if (rcpu_ep != cpu_ep) {
- dev_err(dev, "remote-endpoint missmatch (%s/%s/%s)\n",
- cpu_ep->name, codec_ep->name, rcpu_ep->name);
- ret = -EINVAL;
- goto parse_of_err;
- }
-
- ret = asoc_simple_card_parse_daifmt(dev, cpu_ep, codec_ep,
- NULL, &daifmt);
- if (ret < 0) {
- of_node_put(cpu_port);
- goto parse_of_err;
- }
- }
-
+ link_idx = 0;
dai_idx = 0;
+ conf_idx = 0;
codec_port_old = NULL;
for (codec = 0; codec < 2; codec++) {
/*
@@ -257,31 +277,23 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
of_node_put(cpu_ep);
of_node_put(codec_ep);
+ of_node_put(cpu_port);
of_node_put(codec_port);
+ it.node = NULL;
if (codec) {
- if (!codec_port)
- continue;
-
if (codec_port_old == codec_port)
continue;
codec_port_old = codec_port;
-
- /* Back-End (= Codec) */
- ret = asoc_graph_card_dai_link_of(codec_ep, priv, daifmt, dai_idx++, 0);
- if (ret < 0) {
- of_node_put(cpu_port);
- goto parse_of_err;
- }
- } else {
- /* Front-End (= CPU) */
- ret = asoc_graph_card_dai_link_of(cpu_ep, priv, daifmt, dai_idx++, 1);
- if (ret < 0) {
- of_node_put(cpu_port);
- goto parse_of_err;
- }
}
+
+ ret = asoc_graph_card_dai_link_of(cpu_ep, codec_ep,
+ priv, &dai_idx,
+ link_idx++, &conf_idx,
+ !codec);
+ if (ret < 0)
+ goto parse_of_err;
}
}
@@ -289,13 +301,24 @@ static int asoc_graph_card_parse_of(struct graph_card_data *priv)
if (ret)
goto parse_of_err;
+ if ((card->num_links != link_idx) ||
+ (card->num_configs != conf_idx)) {
+ dev_err(dev, "dai_link or codec_config wrong (%d/%d, %d/%d)\n",
+ card->num_links, link_idx, card->num_configs, conf_idx);
+ ret = -EINVAL;
+ goto parse_of_err;
+ }
+
ret = 0;
parse_of_err:
return ret;
}
-static int asoc_graph_get_dais_count(struct device *dev)
+static void asoc_graph_get_dais_count(struct device *dev,
+ int *link_num,
+ int *dais_num,
+ int *ccnf_num)
{
struct of_phandle_iterator it;
struct device_node *node = dev->of_node;
@@ -304,10 +327,48 @@ static int asoc_graph_get_dais_count(struct device *dev)
struct device_node *codec_ep;
struct device_node *codec_port;
struct device_node *codec_port_old;
- int count = 0;
+ struct device_node *codec_port_old2;
int rc;
+ /*
+ * link_num : number of links.
+ * CPU-Codec / CPU-dummy / dummy-Codec
+ * dais_num : number of DAIs
+ * ccnf_num : number of codec_conf
+ * same number for dummy-Codec
+ *
+ * ex1)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 7
+ * CPU2 -/ ccnf : 1
+ * CPU3 --- Codec2
+ *
+ * => 5 links = 2xCPU-Codec + 2xCPU-dummy + 1xdummy-Codec
+ * => 7 DAIs = 4xCPU + 3xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex2)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 6
+ * CPU2 -/ ccnf : 1
+ * CPU3 -/
+ *
+ * => 5 links = 1xCPU-Codec + 3xCPU-dummy + 1xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex3)
+ * CPU0 --- Codec0 link : 6
+ * CPU1 -/ dais : 6
+ * CPU2 --- Codec1 ccnf : 2
+ * CPU3 -/
+ *
+ * => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 2 ccnf = 2xdummy-Codec
+ */
codec_port_old = NULL;
+ codec_port_old2 = NULL;
of_for_each_phandle(&it, rc, node, "dais", NULL, 0) {
cpu_port = it.node;
cpu_ep = of_get_next_child(cpu_port, NULL);
@@ -318,20 +379,22 @@ static int asoc_graph_get_dais_count(struct device *dev)
of_node_put(codec_ep);
of_node_put(codec_port);
- if (cpu_ep)
- count++;
+ (*link_num)++;
+ (*dais_num)++;
- if (!codec_port)
- continue;
+ if (codec_port_old == codec_port) {
+ if (codec_port_old2 != codec_port_old) {
+ (*link_num)++;
+ (*ccnf_num)++;
+ }
- if (codec_port_old == codec_port)
+ codec_port_old2 = codec_port_old;
continue;
+ }
- count++;
+ (*dais_num)++;
codec_port_old = codec_port;
}
-
- return count;
}
static int asoc_graph_card_probe(struct platform_device *pdev)
@@ -339,22 +402,27 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
struct graph_card_data *priv;
struct snd_soc_dai_link *dai_link;
struct graph_dai_props *dai_props;
+ struct asoc_simple_dai *dais;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
- int num, ret, i;
+ struct snd_soc_codec_conf *cconf;
+ int lnum = 0, dnum = 0, cnum = 0;
+ int ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- num = asoc_graph_get_dais_count(dev);
- if (num == 0)
+ asoc_graph_get_dais_count(dev, &lnum, &dnum, &cnum);
+ if (!lnum || !dnum)
return -EINVAL;
- dai_props = devm_kcalloc(dev, num, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, num, sizeof(*dai_link), GFP_KERNEL);
- if (!dai_props || !dai_link)
+ dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
+ dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
+ dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
+ cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
+ if (!dai_props || !dai_link || !dais)
return -ENOMEM;
/*
@@ -363,7 +431,7 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
* see
* soc-core.c :: snd_soc_init_multicodec()
*/
- for (i = 0; i < num; i++) {
+ for (i = 0; i < lnum; i++) {
dai_link[i].codecs = &dai_props[i].codecs;
dai_link[i].num_codecs = 1;
dai_link[i].platform = &dai_props[i].platform;
@@ -371,15 +439,17 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
priv->dai_props = dai_props;
priv->dai_link = dai_link;
+ priv->dais = dais;
+ priv->codec_conf = cconf;
/* Init snd_soc_card */
card = graph_priv_to_card(priv);
card->owner = THIS_MODULE;
card->dev = dev;
card->dai_link = priv->dai_link;
- card->num_links = num;
- card->codec_conf = &priv->codec_conf;
- card->num_configs = 1;
+ card->num_links = lnum;
+ card->codec_conf = cconf;
+ card->num_configs = cnum;
ret = asoc_graph_card_parse_of(priv);
if (ret < 0) {
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index f34cc6cddfa2..b807a47515eb 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -32,10 +32,11 @@ void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
}
EXPORT_SYMBOL_GPL(asoc_simple_card_convert_fixup);
-void asoc_simple_card_parse_convert(struct device *dev, char *prefix,
+void asoc_simple_card_parse_convert(struct device *dev,
+ struct device_node *np,
+ char *prefix,
struct asoc_simple_card_data *data)
{
- struct device_node *np = dev->of_node;
char prop[128];
if (!prefix)
@@ -151,21 +152,19 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
}
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_card_name);
-static void asoc_simple_card_clk_register(struct asoc_simple_dai *dai,
- struct clk *clk)
-{
- dai->clk = clk;
-}
-
int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai)
{
- return clk_prepare_enable(dai->clk);
+ if (dai)
+ return clk_prepare_enable(dai->clk);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(asoc_simple_card_clk_enable);
void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai)
{
- clk_disable_unprepare(dai->clk);
+ if (dai)
+ clk_disable_unprepare(dai->clk);
}
EXPORT_SYMBOL_GPL(asoc_simple_card_clk_disable);
@@ -200,7 +199,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
if (!IS_ERR(clk)) {
simple_dai->sysclk = clk_get_rate(clk);
- asoc_simple_card_clk_register(simple_dai, clk);
+ simple_dai->clk = clk;
} else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
simple_dai->sysclk = val;
} else {
@@ -272,13 +271,24 @@ static int asoc_simple_card_get_dai_id(struct device_node *ep)
{
struct device_node *node;
struct device_node *endpoint;
+ struct of_endpoint info;
int i, id;
int ret;
+ /* use driver specified DAI ID if exist */
ret = snd_soc_get_dai_id(ep);
if (ret != -ENOTSUPP)
return ret;
+ /* use endpoint/port reg if exist */
+ ret = of_graph_parse_endpoint(ep, &info);
+ if (ret == 0) {
+ if (info.id)
+ return info.id;
+ if (info.port)
+ return info.port;
+ }
+
node = of_graph_get_port_parent(ep);
/*
@@ -348,6 +358,9 @@ int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
{
int ret;
+ if (!simple_dai)
+ return 0;
+
if (simple_dai->sysclk) {
ret = snd_soc_dai_set_sysclk(dai, 0, simple_dai->sysclk,
simple_dai->clk_direction);
@@ -415,8 +428,7 @@ int asoc_simple_card_clean_reference(struct snd_soc_card *card)
EXPORT_SYMBOL_GPL(asoc_simple_card_clean_reference);
int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
- char *prefix,
- int optional)
+ char *prefix)
{
struct device_node *node = card->dev->of_node;
char prop[128];
@@ -426,11 +438,8 @@ int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
snprintf(prop, sizeof(prop), "%s%s", prefix, "routing");
- if (!of_property_read_bool(node, prop)) {
- if (optional)
- return 0;
- return -EINVAL;
- }
+ if (!of_property_read_bool(node, prop))
+ return 0;
return snd_soc_of_parse_audio_routing(card, prop);
}
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 5a3f59aa4ba5..37e001cf9cd1 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -18,16 +18,19 @@
struct simple_card_data {
struct snd_soc_card snd_card;
struct simple_dai_props {
- struct asoc_simple_dai cpu_dai;
- struct asoc_simple_dai codec_dai;
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
struct snd_soc_dai_link_component codecs; /* single codec */
struct snd_soc_dai_link_component platform;
+ struct asoc_simple_card_data adata;
+ struct snd_soc_codec_conf *codec_conf;
unsigned int mclk_fs;
} *dai_props;
- unsigned int mclk_fs;
struct asoc_simple_jack hp_jack;
struct asoc_simple_jack mic_jack;
struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dais;
+ struct snd_soc_codec_conf *codec_conf;
};
#define simple_priv_to_card(priv) (&(priv)->snd_card)
@@ -47,13 +50,13 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
simple_priv_to_props(priv, rtd->num);
int ret;
- ret = asoc_simple_card_clk_enable(&dai_props->cpu_dai);
+ ret = asoc_simple_card_clk_enable(dai_props->cpu_dai);
if (ret)
return ret;
- ret = asoc_simple_card_clk_enable(&dai_props->codec_dai);
+ ret = asoc_simple_card_clk_enable(dai_props->codec_dai);
if (ret)
- asoc_simple_card_clk_disable(&dai_props->cpu_dai);
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
return ret;
}
@@ -65,14 +68,17 @@ static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(&dai_props->cpu_dai);
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
- asoc_simple_card_clk_disable(&dai_props->codec_dai);
+ asoc_simple_card_clk_disable(dai_props->codec_dai);
}
static int asoc_simple_set_clk_rate(struct asoc_simple_dai *simple_dai,
unsigned long rate)
{
+ if (!simple_dai)
+ return 0;
+
if (!simple_dai->clk)
return 0;
@@ -94,19 +100,17 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
unsigned int mclk, mclk_fs = 0;
int ret = 0;
- if (priv->mclk_fs)
- mclk_fs = priv->mclk_fs;
- else if (dai_props->mclk_fs)
+ if (dai_props->mclk_fs)
mclk_fs = dai_props->mclk_fs;
if (mclk_fs) {
mclk = params_rate(params) * mclk_fs;
- ret = asoc_simple_set_clk_rate(&dai_props->codec_dai, mclk);
+ ret = asoc_simple_set_clk_rate(dai_props->codec_dai, mclk);
if (ret < 0)
return ret;
- ret = asoc_simple_set_clk_rate(&dai_props->cpu_dai, mclk);
+ ret = asoc_simple_set_clk_rate(dai_props->cpu_dai, mclk);
if (ret < 0)
return ret;
@@ -134,33 +138,169 @@ static const struct snd_soc_ops asoc_simple_card_ops = {
static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
{
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *codec = rtd->codec_dai;
- struct snd_soc_dai *cpu = rtd->cpu_dai;
- struct simple_dai_props *dai_props =
- simple_priv_to_props(priv, rtd->num);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
int ret;
- ret = asoc_simple_card_init_dai(codec, &dai_props->codec_dai);
+ ret = asoc_simple_card_init_dai(rtd->codec_dai,
+ dai_props->codec_dai);
if (ret < 0)
return ret;
- ret = asoc_simple_card_init_dai(cpu, &dai_props->cpu_dai);
+ ret = asoc_simple_card_init_dai(rtd->cpu_dai,
+ dai_props->cpu_dai);
if (ret < 0)
return ret;
return 0;
}
-static int asoc_simple_card_dai_link_of(struct device_node *node,
+static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
+
+ asoc_simple_card_convert_fixup(&dai_props->adata, params);
+
+ return 0;
+}
+
+static int asoc_simple_card_dai_link_of_dpcm(struct device_node *top,
+ struct device_node *node,
+ struct device_node *np,
+ struct device_node *codec,
+ struct simple_card_data *priv,
+ int *dai_idx, int link_idx,
+ int *conf_idx, int is_fe,
+ bool is_top_level_node)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, link_idx);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, link_idx);
+ struct asoc_simple_dai *dai;
+ struct snd_soc_dai_link_component *codecs = dai_link->codecs;
+
+ char prop[128];
+ char *prefix = "";
+ int ret;
+
+ /* For single DAI link & old style of DT node */
+ if (is_top_level_node)
+ prefix = PREFIX;
+
+ if (is_fe) {
+ int is_single_links = 0;
+
+ /* BE is dummy */
+ codecs->of_node = NULL;
+ codecs->dai_name = "snd-soc-dummy-dai";
+ codecs->name = "snd-soc-dummy";
+
+ /* FE settings */
+ dai_link->dynamic = 1;
+ dai_link->dpcm_merged_format = 1;
+
+ dai =
+ dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+
+ ret = asoc_simple_card_parse_cpu(np, dai_link, DAI, CELL,
+ &is_single_links);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, dai);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "fe.%s",
+ dai_link->cpu_dai_name);
+ if (ret < 0)
+ return ret;
+
+ asoc_simple_card_canonicalize_cpu(dai_link, is_single_links);
+ } else {
+ struct snd_soc_codec_conf *cconf;
+
+ /* FE is dummy */
+ dai_link->cpu_of_node = NULL;
+ dai_link->cpu_dai_name = "snd-soc-dummy-dai";
+ dai_link->cpu_name = "snd-soc-dummy";
+
+ /* BE settings */
+ dai_link->no_pcm = 1;
+ dai_link->be_hw_params_fixup = asoc_simple_card_be_hw_params_fixup;
+
+ dai =
+ dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+
+ cconf =
+ dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
+
+ ret = asoc_simple_card_parse_codec(np, dai_link, DAI, CELL);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, dai);
+ if (ret < 0)
+ return ret;
+
+ ret = asoc_simple_card_set_dailink_name(dev, dai_link,
+ "be.%s",
+ codecs->dai_name);
+ if (ret < 0)
+ return ret;
+
+ /* check "prefix" from top node */
+ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node,
+ PREFIX "prefix");
+ snd_soc_of_parse_node_prefix(node, cconf, codecs->of_node,
+ "prefix");
+ snd_soc_of_parse_node_prefix(np, cconf, codecs->of_node,
+ "prefix");
+ }
+
+ asoc_simple_card_parse_convert(dev, top, PREFIX, &dai_props->adata);
+ asoc_simple_card_parse_convert(dev, node, prefix, &dai_props->adata);
+ asoc_simple_card_parse_convert(dev, np, NULL, &dai_props->adata);
+
+ ret = asoc_simple_card_of_parse_tdm(np, dai);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_canonicalize_dailink(dai_link);
+ if (ret < 0)
+ return ret;
+
+ snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
+ of_property_read_u32(top, PREFIX "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(node, prop, &dai_props->mclk_fs);
+ of_property_read_u32(np, prop, &dai_props->mclk_fs);
+
+ ret = asoc_simple_card_parse_daifmt(dev, node, codec,
+ prefix, &dai_link->dai_fmt);
+ if (ret < 0)
+ return ret;
+
+ dai_link->dpcm_playback = 1;
+ dai_link->dpcm_capture = 1;
+ dai_link->ops = &asoc_simple_card_ops;
+ dai_link->init = asoc_simple_card_dai_init;
+
+ return 0;
+}
+
+static int asoc_simple_card_dai_link_of(struct device_node *top,
+ struct device_node *node,
struct simple_card_data *priv,
- int idx,
+ int *dai_idx, int link_idx,
bool is_top_level_node)
{
struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
- struct asoc_simple_dai *cpu_dai = &dai_props->cpu_dai;
- struct asoc_simple_dai *codec_dai = &dai_props->codec_dai;
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, link_idx);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, link_idx);
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
struct device_node *cpu = NULL;
struct device_node *plat = NULL;
struct device_node *codec = NULL;
@@ -193,12 +333,21 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
goto dai_link_of_err;
}
+ cpu_dai =
+ dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+ codec_dai =
+ dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+
ret = asoc_simple_card_parse_daifmt(dev, node, codec,
prefix, &dai_link->dai_fmt);
if (ret < 0)
goto dai_link_of_err;
- of_property_read_u32(node, "mclk-fs", &dai_props->mclk_fs);
+ snprintf(prop, sizeof(prop), "%smclk-fs", prefix);
+ of_property_read_u32(top, PREFIX "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(node, prop, &dai_props->mclk_fs);
+ of_property_read_u32(cpu, prop, &dai_props->mclk_fs);
+ of_property_read_u32(codec, prop, &dai_props->mclk_fs);
ret = asoc_simple_card_parse_cpu(cpu, dai_link,
DAI, CELL, &single_cpu);
@@ -286,61 +435,148 @@ static int asoc_simple_card_parse_aux_devs(struct device_node *node,
static int asoc_simple_card_parse_of(struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *top = dev->of_node;
struct snd_soc_card *card = simple_priv_to_card(priv);
- struct device_node *dai_link;
- struct device_node *node = dev->of_node;
- int ret;
-
- if (!node)
+ struct device_node *node;
+ struct device_node *np;
+ struct device_node *codec;
+ bool is_fe;
+ int ret, loop;
+ int dai_idx, link_idx, conf_idx;
+
+ if (!top)
return -EINVAL;
- dai_link = of_get_child_by_name(node, PREFIX "dai-link");
-
ret = asoc_simple_card_of_parse_widgets(card, PREFIX);
if (ret < 0)
- goto card_parse_end;
+ return ret;
- ret = asoc_simple_card_of_parse_routing(card, PREFIX, 1);
+ ret = asoc_simple_card_of_parse_routing(card, PREFIX);
if (ret < 0)
- goto card_parse_end;
-
- /* Factor to mclk, used in hw_params() */
- of_property_read_u32(node, PREFIX "mclk-fs", &priv->mclk_fs);
+ return ret;
/* Single/Muti DAI link(s) & New style of DT node */
- if (dai_link) {
- struct device_node *np = NULL;
- int i = 0;
-
- for_each_child_of_node(node, np) {
- dev_dbg(dev, "\tlink %d:\n", i);
- ret = asoc_simple_card_dai_link_of(np, priv,
- i, false);
- if (ret < 0) {
- of_node_put(np);
- goto card_parse_end;
+ loop = 1;
+ link_idx = 0;
+ dai_idx = 0;
+ conf_idx = 0;
+ node = of_get_child_by_name(top, PREFIX "dai-link");
+ if (!node) {
+ node = dev->of_node;
+ loop = 0;
+ }
+
+ do {
+ /* DPCM */
+ if (of_get_child_count(node) > 2) {
+ for_each_child_of_node(node, np) {
+ codec = of_get_child_by_name(node,
+ loop ? "codec" :
+ PREFIX "codec");
+ if (!codec)
+ return -ENODEV;
+
+ is_fe = (np != codec);
+
+ ret = asoc_simple_card_dai_link_of_dpcm(
+ top, node, np, codec, priv,
+ &dai_idx, link_idx++, &conf_idx,
+ is_fe, !loop);
}
- i++;
+ } else {
+ ret = asoc_simple_card_dai_link_of(
+ top, node, priv,
+ &dai_idx, link_idx++, !loop);
}
- } else {
- /* For single DAI link & old style of DT node */
- ret = asoc_simple_card_dai_link_of(node, priv, 0, true);
if (ret < 0)
- goto card_parse_end;
- }
+ return ret;
+
+ node = of_get_next_child(top, node);
+ } while (loop && node);
ret = asoc_simple_card_parse_card_name(card, PREFIX);
if (ret < 0)
- goto card_parse_end;
-
- ret = asoc_simple_card_parse_aux_devs(node, priv);
+ return ret;
-card_parse_end:
- of_node_put(dai_link);
+ ret = asoc_simple_card_parse_aux_devs(top, priv);
return ret;
}
+static void asoc_simple_card_get_dais_count(struct device *dev,
+ int *link_num,
+ int *dais_num,
+ int *ccnf_num)
+{
+ struct device_node *top = dev->of_node;
+ struct device_node *node;
+ int loop;
+ int num;
+
+ /*
+ * link_num : number of links.
+ * CPU-Codec / CPU-dummy / dummy-Codec
+ * dais_num : number of DAIs
+ * ccnf_num : number of codec_conf
+ * same number for "dummy-Codec"
+ *
+ * ex1)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 7
+ * CPU2 -/ ccnf : 1
+ * CPU3 --- Codec2
+ *
+ * => 5 links = 2xCPU-Codec + 2xCPU-dummy + 1xdummy-Codec
+ * => 7 DAIs = 4xCPU + 3xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex2)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 6
+ * CPU2 -/ ccnf : 1
+ * CPU3 -/
+ *
+ * => 5 links = 1xCPU-Codec + 3xCPU-dummy + 1xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex3)
+ * CPU0 --- Codec0 link : 6
+ * CPU1 -/ dais : 6
+ * CPU2 --- Codec1 ccnf : 2
+ * CPU3 -/
+ *
+ * => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 2 ccnf = 2xdummy-Codec
+ */
+ if (!top) {
+ (*link_num) = 1;
+ (*dais_num) = 2;
+ (*ccnf_num) = 0;
+ return;
+ }
+
+ loop = 1;
+ node = of_get_child_by_name(top, PREFIX "dai-link");
+ if (!node) {
+ node = top;
+ loop = 0;
+ }
+
+ do {
+ num = of_get_child_count(node);
+ (*dais_num) += num;
+ if (num > 2) {
+ (*link_num) += num;
+ (*ccnf_num)++;
+ } else {
+ (*link_num)++;
+ }
+ node = of_get_next_child(top, node);
+ } while (loop && node);
+}
+
static int asoc_simple_soc_card_probe(struct snd_soc_card *card)
{
struct simple_card_data *priv = snd_soc_card_get_drvdata(card);
@@ -362,25 +598,28 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct simple_card_data *priv;
struct snd_soc_dai_link *dai_link;
struct simple_dai_props *dai_props;
+ struct asoc_simple_dai *dais;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct snd_soc_card *card;
- int num, ret, i;
-
- /* Get the number of DAI links */
- if (np && of_get_child_by_name(np, PREFIX "dai-link"))
- num = of_get_child_count(np);
- else
- num = 1;
+ struct snd_soc_codec_conf *cconf;
+ int lnum = 0, dnum = 0, cnum = 0;
+ int ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- dai_props = devm_kcalloc(dev, num, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, num, sizeof(*dai_link), GFP_KERNEL);
- if (!dai_props || !dai_link)
+ asoc_simple_card_get_dais_count(dev, &lnum, &dnum, &cnum);
+ if (!lnum || !dnum)
+ return -EINVAL;
+
+ dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
+ dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
+ dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
+ cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
+ if (!dai_props || !dai_link || !dais)
return -ENOMEM;
/*
@@ -389,7 +628,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
* see
* soc-core.c :: snd_soc_init_multicodec()
*/
- for (i = 0; i < num; i++) {
+ for (i = 0; i < lnum; i++) {
dai_link[i].codecs = &dai_props[i].codecs;
dai_link[i].num_codecs = 1;
dai_link[i].platform = &dai_props[i].platform;
@@ -397,13 +636,17 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
priv->dai_props = dai_props;
priv->dai_link = dai_link;
+ priv->dais = dais;
+ priv->codec_conf = cconf;
/* Init snd_soc_card */
card = simple_priv_to_card(priv);
card->owner = THIS_MODULE;
card->dev = dev;
card->dai_link = priv->dai_link;
- card->num_links = num;
+ card->num_links = lnum;
+ card->codec_conf = cconf;
+ card->num_configs = cnum;
card->probe = asoc_simple_soc_card_probe;
if (np && of_device_is_available(np)) {
@@ -419,6 +662,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct asoc_simple_card_info *cinfo;
struct snd_soc_dai_link_component *codecs;
struct snd_soc_dai_link_component *platform;
+ int dai_idx = 0;
cinfo = dev->platform_data;
if (!cinfo) {
@@ -435,6 +679,9 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
return -EINVAL;
}
+ dai_props->cpu_dai = &priv->dais[dai_idx++];
+ dai_props->codec_dai = &priv->dais[dai_idx++];
+
codecs = dai_link->codecs;
codecs->name = cinfo->codec;
codecs->dai_name = cinfo->codec_dai.name;
@@ -448,10 +695,10 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
dai_link->cpu_dai_name = cinfo->cpu_dai.name;
dai_link->dai_fmt = cinfo->daifmt;
dai_link->init = asoc_simple_card_dai_init;
- memcpy(&priv->dai_props->cpu_dai, &cinfo->cpu_dai,
- sizeof(priv->dai_props->cpu_dai));
- memcpy(&priv->dai_props->codec_dai, &cinfo->codec_dai,
- sizeof(priv->dai_props->codec_dai));
+ memcpy(priv->dai_props->cpu_dai, &cinfo->cpu_dai,
+ sizeof(*priv->dai_props->cpu_dai));
+ memcpy(priv->dai_props->codec_dai, &cinfo->codec_dai,
+ sizeof(*priv->dai_props->codec_dai));
}
snd_soc_card_set_drvdata(card, priv);
@@ -476,6 +723,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
static const struct of_device_id asoc_simple_of_match[] = {
{ .compatible = "simple-audio-card", },
+ { .compatible = "simple-scu-audio-card", },
{},
};
MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index 85b46f0eae0f..9d7299d536a8 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -21,14 +21,18 @@
struct simple_card_data {
struct snd_soc_card snd_card;
- struct snd_soc_codec_conf codec_conf;
struct simple_dai_props {
- struct asoc_simple_dai dai;
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
struct snd_soc_dai_link_component codecs;
struct snd_soc_dai_link_component platform;
+ struct asoc_simple_card_data adata;
+ struct snd_soc_codec_conf *codec_conf;
} *dai_props;
struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dais;
struct asoc_simple_card_data adata;
+ struct snd_soc_codec_conf *codec_conf;
};
#define simple_priv_to_card(priv) (&(priv)->snd_card)
@@ -46,8 +50,17 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
+ int ret;
+
+ ret = asoc_simple_card_clk_enable(dai_props->cpu_dai);
+ if (ret)
+ return ret;
+
+ ret = asoc_simple_card_clk_enable(dai_props->codec_dai);
+ if (ret)
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
- return asoc_simple_card_clk_enable(&dai_props->dai);
+ return ret;
}
static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
@@ -57,7 +70,9 @@ static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(&dai_props->dai);
+ asoc_simple_card_clk_disable(dai_props->cpu_dai);
+
+ asoc_simple_card_clk_disable(dai_props->codec_dai);
}
static const struct snd_soc_ops asoc_simple_card_ops = {
@@ -67,42 +82,57 @@ static const struct snd_soc_ops asoc_simple_card_ops = {
static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct snd_soc_dai *dai;
- struct snd_soc_dai_link *dai_link;
- struct simple_dai_props *dai_props;
- int num = rtd->num;
+ struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
+ int ret;
+
+ ret = asoc_simple_card_init_dai(rtd->codec_dai,
+ dai_props->codec_dai);
+ if (ret < 0)
+ return ret;
- dai_link = simple_priv_to_link(priv, num);
- dai_props = simple_priv_to_props(priv, num);
- dai = dai_link->dynamic ?
- rtd->cpu_dai :
- rtd->codec_dai;
+ ret = asoc_simple_card_init_dai(rtd->cpu_dai,
+ dai_props->cpu_dai);
+ if (ret < 0)
+ return ret;
- return asoc_simple_card_init_dai(dai, &dai_props->dai);
+ return 0;
}
static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hw_params *params)
{
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, rtd->num);
+
+ asoc_simple_card_convert_fixup(&dai_props->adata, params);
+ /* overwrite by top level adata if exist */
asoc_simple_card_convert_fixup(&priv->adata, params);
return 0;
}
-static int asoc_simple_card_dai_link_of(struct device_node *np,
+static int asoc_simple_card_dai_link_of(struct device_node *link,
+ struct device_node *np,
+ struct device_node *codec,
struct simple_card_data *priv,
- unsigned int daifmt,
- int idx, bool is_fe)
+ int *dai_idx, int link_idx,
+ int *conf_idx, int is_fe,
+ bool is_top_level_node)
{
struct device *dev = simple_priv_to_dev(priv);
- struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
- struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
+ struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, link_idx);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, link_idx);
struct snd_soc_card *card = simple_priv_to_card(priv);
+ struct asoc_simple_dai *dai;
+ char *prefix = "";
int ret;
+ /* For single DAI link & old style of DT node */
+ if (is_top_level_node)
+ prefix = PREFIX;
+
if (is_fe) {
int is_single_links = 0;
struct snd_soc_dai_link_component *codecs;
@@ -117,12 +147,15 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
dai_link->dynamic = 1;
dai_link->dpcm_merged_format = 1;
+ dai =
+ dai_props->cpu_dai = &priv->dais[(*dai_idx)++];
+
ret = asoc_simple_card_parse_cpu(np, dai_link, DAI, CELL,
&is_single_links);
if (ret)
return ret;
- ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, &dai_props->dai);
+ ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, dai);
if (ret < 0)
return ret;
@@ -134,6 +167,8 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
asoc_simple_card_canonicalize_cpu(dai_link, is_single_links);
} else {
+ struct snd_soc_codec_conf *cconf;
+
/* FE is dummy */
dai_link->cpu_of_node = NULL;
dai_link->cpu_dai_name = "snd-soc-dummy-dai";
@@ -143,11 +178,17 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
dai_link->no_pcm = 1;
dai_link->be_hw_params_fixup = asoc_simple_card_be_hw_params_fixup;
+ dai =
+ dai_props->codec_dai = &priv->dais[(*dai_idx)++];
+
+ cconf =
+ dai_props->codec_conf = &priv->codec_conf[(*conf_idx)++];
+
ret = asoc_simple_card_parse_codec(np, dai_link, DAI, CELL);
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, &dai_props->dai);
+ ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, dai);
if (ret < 0)
return ret;
@@ -157,13 +198,20 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
if (ret < 0)
return ret;
- snd_soc_of_parse_audio_prefix(card,
- &priv->codec_conf,
+ /* check "prefix" from top node */
+ snd_soc_of_parse_audio_prefix(card, cconf,
dai_link->codecs->of_node,
PREFIX "prefix");
+ /* check "prefix" from each node if top doesn't have */
+ if (!cconf->of_node)
+ snd_soc_of_parse_node_prefix(np, cconf,
+ dai_link->codecs->of_node,
+ "prefix");
}
- ret = asoc_simple_card_of_parse_tdm(np, &dai_props->dai);
+ asoc_simple_card_parse_convert(dev, link, prefix, &dai_props->adata);
+
+ ret = asoc_simple_card_of_parse_tdm(np, dai);
if (ret)
return ret;
@@ -171,7 +219,11 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
if (ret < 0)
return ret;
- dai_link->dai_fmt = daifmt;
+ ret = asoc_simple_card_parse_daifmt(dev, link, codec,
+ prefix, &dai_link->dai_fmt);
+ if (ret < 0)
+ return ret;
+
dai_link->dpcm_playback = 1;
dai_link->dpcm_capture = 1;
dai_link->ops = &asoc_simple_card_ops;
@@ -184,52 +236,136 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv)
{
struct device *dev = simple_priv_to_dev(priv);
+ struct device_node *top = dev->of_node;
+ struct device_node *node;
struct device_node *np;
+ struct device_node *codec;
struct snd_soc_card *card = simple_priv_to_card(priv);
- struct device_node *node = dev->of_node;
- unsigned int daifmt = 0;
bool is_fe;
- int ret, i;
+ int ret, loop;
+ int dai_idx, link_idx, conf_idx;
- if (!node)
+ if (!top)
return -EINVAL;
ret = asoc_simple_card_of_parse_widgets(card, PREFIX);
if (ret < 0)
return ret;
- ret = asoc_simple_card_of_parse_routing(card, PREFIX, 0);
+ ret = asoc_simple_card_of_parse_routing(card, PREFIX);
if (ret < 0)
return ret;
- asoc_simple_card_parse_convert(dev, PREFIX, &priv->adata);
+ asoc_simple_card_parse_convert(dev, top, PREFIX, &priv->adata);
- /* find 1st codec */
- np = of_get_child_by_name(node, PREFIX "codec");
- if (!np)
- return -ENODEV;
+ loop = 1;
+ link_idx = 0;
+ dai_idx = 0;
+ conf_idx = 0;
+ node = of_get_child_by_name(top, PREFIX "dai-link");
+ if (!node) {
+ node = dev->of_node;
+ loop = 0;
+ }
+
+ do {
+ codec = of_get_child_by_name(node,
+ loop ? "codec" : PREFIX "codec");
+ if (!codec)
+ return -ENODEV;
+
+ for_each_child_of_node(node, np) {
+ is_fe = (np != codec);
+
+ ret = asoc_simple_card_dai_link_of(node, np, codec, priv,
+ &dai_idx, link_idx++,
+ &conf_idx,
+ is_fe, !loop);
+ if (ret < 0)
+ return ret;
+ }
+ node = of_get_next_child(top, node);
+ } while (loop && node);
- ret = asoc_simple_card_parse_daifmt(dev, node, np, PREFIX, &daifmt);
+ ret = asoc_simple_card_parse_card_name(card, PREFIX);
if (ret < 0)
return ret;
- i = 0;
- for_each_child_of_node(node, np) {
- is_fe = false;
- if (strcmp(np->name, PREFIX "cpu") == 0)
- is_fe = true;
+ return 0;
+}
- ret = asoc_simple_card_dai_link_of(np, priv, daifmt, i, is_fe);
- if (ret < 0)
- return ret;
- i++;
+static void asoc_simple_card_get_dais_count(struct device *dev,
+ int *link_num,
+ int *dais_num,
+ int *ccnf_num)
+{
+ struct device_node *top = dev->of_node;
+ struct device_node *node;
+ int loop;
+ int num;
+
+ /*
+ * link_num : number of links.
+ * CPU-Codec / CPU-dummy / dummy-Codec
+ * dais_num : number of DAIs
+ * ccnf_num : number of codec_conf
+ * same number for "dummy-Codec"
+ *
+ * ex1)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 7
+ * CPU2 -/ ccnf : 1
+ * CPU3 --- Codec2
+ *
+ * => 5 links = 2xCPU-Codec + 2xCPU-dummy + 1xdummy-Codec
+ * => 7 DAIs = 4xCPU + 3xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex2)
+ * CPU0 --- Codec0 link : 5
+ * CPU1 --- Codec1 dais : 6
+ * CPU2 -/ ccnf : 1
+ * CPU3 -/
+ *
+ * => 5 links = 1xCPU-Codec + 3xCPU-dummy + 1xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 1 ccnf = 1xdummy-Codec
+ *
+ * ex3)
+ * CPU0 --- Codec0 link : 6
+ * CPU1 -/ dais : 6
+ * CPU2 --- Codec1 ccnf : 2
+ * CPU3 -/
+ *
+ * => 6 links = 0xCPU-Codec + 4xCPU-dummy + 2xdummy-Codec
+ * => 6 DAIs = 4xCPU + 2xCodec
+ * => 2 ccnf = 2xdummy-Codec
+ */
+ if (!top) {
+ (*link_num) = 1;
+ (*dais_num) = 2;
+ (*ccnf_num) = 0;
+ return;
}
- ret = asoc_simple_card_parse_card_name(card, PREFIX);
- if (ret < 0)
- return ret;
+ loop = 1;
+ node = of_get_child_by_name(top, PREFIX "dai-link");
+ if (!node) {
+ node = top;
+ loop = 0;
+ }
- return 0;
+ do {
+ num = of_get_child_count(node);
+ (*dais_num) += num;
+ if (num > 2) {
+ (*link_num) += num;
+ (*ccnf_num)++;
+ } else {
+ (*link_num)++;
+ }
+ node = of_get_next_child(top, node);
+ } while (loop && node);
}
static int asoc_simple_card_probe(struct platform_device *pdev)
@@ -237,21 +373,27 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct simple_card_data *priv;
struct snd_soc_dai_link *dai_link;
struct simple_dai_props *dai_props;
+ struct asoc_simple_dai *dais;
struct snd_soc_card *card;
+ struct snd_soc_codec_conf *cconf;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- int num, ret, i;
+ int ret, i;
+ int lnum = 0, dnum = 0, cnum = 0;
/* Allocate the private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- num = of_get_child_count(np);
+ asoc_simple_card_get_dais_count(dev, &lnum, &dnum, &cnum);
+ if (!lnum || !dnum)
+ return -EINVAL;
- dai_props = devm_kcalloc(dev, num, sizeof(*dai_props), GFP_KERNEL);
- dai_link = devm_kcalloc(dev, num, sizeof(*dai_link), GFP_KERNEL);
- if (!dai_props || !dai_link)
+ dai_props = devm_kcalloc(dev, lnum, sizeof(*dai_props), GFP_KERNEL);
+ dai_link = devm_kcalloc(dev, lnum, sizeof(*dai_link), GFP_KERNEL);
+ dais = devm_kcalloc(dev, dnum, sizeof(*dais), GFP_KERNEL);
+ cconf = devm_kcalloc(dev, cnum, sizeof(*cconf), GFP_KERNEL);
+ if (!dai_props || !dai_link || !dais)
return -ENOMEM;
/*
@@ -260,7 +402,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
* see
* soc-core.c :: snd_soc_init_multicodec()
*/
- for (i = 0; i < num; i++) {
+ for (i = 0; i < lnum; i++) {
dai_link[i].codecs = &dai_props[i].codecs;
dai_link[i].num_codecs = 1;
dai_link[i].platform = &dai_props[i].platform;
@@ -268,15 +410,17 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
priv->dai_props = dai_props;
priv->dai_link = dai_link;
+ priv->dais = dais;
+ priv->codec_conf = cconf;
/* Init snd_soc_card */
card = simple_priv_to_card(priv);
card->owner = THIS_MODULE;
card->dev = dev;
card->dai_link = priv->dai_link;
- card->num_links = num;
- card->codec_conf = &priv->codec_conf;
- card->num_configs = 1;
+ card->num_links = lnum;
+ card->codec_conf = cconf;
+ card->num_configs = cnum;
ret = asoc_simple_card_parse_of(priv);
if (ret < 0) {
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 0caa1f4eb94d..2fd1b61e8331 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -101,22 +101,107 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
codec, then enable this option by saying Y or m. This is a
recommended option
+config SND_SOC_INTEL_SKYLAKE
+ tristate "All Skylake/SST Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKL
+ select SND_SOC_INTEL_APL
+ select SND_SOC_INTEL_KBL
+ select SND_SOC_INTEL_GLK
+ select SND_SOC_INTEL_CNL
+ select SND_SOC_INTEL_CFL
+ help
+ This is a backwards-compatible option to select all devices
+ supported by the Intel SST/Skylake driver. This option is no
+ longer recommended and will be deprecated when the SOF
+ driver is introduced. Distributions should explicitly
+ select which platform uses this driver.
+
+config SND_SOC_INTEL_SKL
+ tristate "Skylake Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKYLAKE_FAMILY
+ help
+ If you have a Intel Skylake platform with the DSP enabled
+ in the BIOS then enable this option by saying Y or m.
+
+config SND_SOC_INTEL_APL
+ tristate "Broxton/ApolloLake Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKYLAKE_FAMILY
+ help
+ If you have a Intel Broxton/ApolloLake platform with the DSP
+ enabled in the BIOS then enable this option by saying Y or m.
+
+config SND_SOC_INTEL_KBL
+ tristate "Kabylake Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKYLAKE_FAMILY
+ help
+ If you have a Intel Kabylake platform with the DSP
+ enabled in the BIOS then enable this option by saying Y or m.
+
+config SND_SOC_INTEL_GLK
+ tristate "GeminiLake Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKYLAKE_FAMILY
+ help
+ If you have a Intel GeminiLake platform with the DSP
+ enabled in the BIOS then enable this option by saying Y or m.
+
+config SND_SOC_INTEL_CNL
+ tristate "CannonLake/WhiskyLake Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKYLAKE_FAMILY
+ help
+ If you have a Intel CNL/WHL platform with the DSP
+ enabled in the BIOS then enable this option by saying Y or m.
+
+config SND_SOC_INTEL_CFL
+ tristate "CoffeeLake Platforms"
+ depends on PCI && ACPI
+ select SND_SOC_INTEL_SKYLAKE_FAMILY
+ help
+ If you have a Intel CoffeeLake platform with the DSP
+ enabled in the BIOS then enable this option by saying Y or m.
+
+config SND_SOC_INTEL_SKYLAKE_FAMILY
+ tristate
+ select SND_SOC_INTEL_SKYLAKE_COMMON
+
+if SND_SOC_INTEL_SKYLAKE_FAMILY
+
config SND_SOC_INTEL_SKYLAKE_SSP_CLK
tristate
-config SND_SOC_INTEL_SKYLAKE
- tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
- depends on PCI && ACPI
+config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
+ bool "HDAudio codec support"
+ help
+ If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
+ GeminiLake or CannonLake platform with an HDaudio codec
+ then enable this option by saying Y
+
+config SND_SOC_INTEL_SKYLAKE_COMMON
+ tristate
select SND_HDA_EXT_CORE
select SND_HDA_DSP_LOADER
select SND_SOC_TOPOLOGY
select SND_SOC_INTEL_SST
+ select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
+ select SND_HDA_INTEL_DSP_DETECTION_SKL if SND_SOC_INTEL_SKL
+ select SND_HDA_INTEL_DSP_DETECTION_APL if SND_SOC_INTEL_APL
+ select SND_HDA_INTEL_DSP_DETECTION_KBL if SND_SOC_INTEL_KBL
+ select SND_HDA_INTEL_DSP_DETECTION_GLK if SND_SOC_INTEL_GLK
+ select SND_HDA_INTEL_DSP_DETECTION_CNL if SND_SOC_INTEL_CNL
+ select SND_HDA_INTEL_DSP_DETECTION_CFL if SND_SOC_INTEL_CFL
select SND_SOC_ACPI_INTEL_MATCH
help
If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
GeminiLake or CannonLake platform with the DSP enabled in the BIOS
then enable this option by saying Y or m.
+endif ## SND_SOC_INTEL_SKYLAKE_FAMILY
+
config SND_SOC_ACPI_INTEL_MATCH
tristate
select SND_SOC_ACPI if ACPI
diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c
index c90b04cc071d..ac542535b9d5 100644
--- a/sound/soc/intel/atom/sst/sst_acpi.c
+++ b/sound/soc/intel/atom/sst/sst_acpi.c
@@ -341,6 +341,10 @@ static int sst_acpi_probe(struct platform_device *pdev)
byt_rvp_platform_data.res_info = &bytcr_res_info;
}
+ /* update machine parameters */
+ mach->mach_params.acpi_ipc_irq_index =
+ pdata->res_info->acpi_ipc_irq_index;
+
plat_dev = platform_device_register_data(dev, pdata->platform, -1,
NULL, 0);
if (IS_ERR(plat_dev)) {
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index 27413ebae956..b8c456753f01 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -354,14 +354,14 @@ static int sst_request_fw(struct intel_sst_drv *sst)
const struct firmware *fw;
retval = request_firmware(&fw, sst->firmware_name, sst->dev);
- if (fw == NULL) {
- dev_err(sst->dev, "fw is returning as null\n");
- return -EINVAL;
- }
if (retval) {
dev_err(sst->dev, "request fw failed %d\n", retval);
return retval;
}
+ if (fw == NULL) {
+ dev_err(sst->dev, "fw is returning as null\n");
+ return -EINVAL;
+ }
mutex_lock(&sst->sst_lock);
retval = sst_cache_and_parse_fw(sst, fw);
mutex_unlock(&sst->sst_lock);
diff --git a/sound/soc/intel/atom/sst/sst_pvt.c b/sound/soc/intel/atom/sst/sst_pvt.c
index af93244b4868..00a37a09dc9b 100644
--- a/sound/soc/intel/atom/sst/sst_pvt.c
+++ b/sound/soc/intel/atom/sst/sst_pvt.c
@@ -166,11 +166,11 @@ int sst_create_ipc_msg(struct ipc_post **arg, bool large)
{
struct ipc_post *msg;
- msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (large) {
- msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
+ msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_KERNEL);
if (!msg->mailbox_data) {
kfree(msg);
return -ENOMEM;
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index 73ca1350aa31..0a7e40d06395 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -172,7 +172,7 @@ config SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH
endif ## SND_SST_ATOM_HIFI2_PLATFORM
-if SND_SOC_INTEL_SKYLAKE
+if SND_SOC_INTEL_SKL
config SND_SOC_INTEL_SKL_RT286_MACH
tristate "SKL with RT286 I2S mode"
@@ -212,6 +212,10 @@ config SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH
Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
+endif ## SND_SOC_INTEL_SKL
+
+if SND_SOC_INTEL_APL
+
config SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH
tristate "Broxton with DA7219 and MAX98357A in I2S Mode"
depends on MFD_INTEL_LPSS && I2C && ACPI
@@ -239,6 +243,10 @@ config SND_SOC_INTEL_BXT_RT298_MACH
Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
+endif ## SND_SOC_INTEL_APL
+
+if SND_SOC_INTEL_KBL
+
config SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH
tristate "KBL with RT5663 and MAX98927 in I2S Mode"
depends on MFD_INTEL_LPSS && I2C && ACPI
@@ -293,15 +301,19 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
Say Y if you have such a device.
If unsure select "N".
-config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
- tristate "SKL/KBL/BXT/APL with HDA Codecs"
+config SND_SOC_INTEL_KBL_RT5660_MACH
+ tristate "KBL with RT5660 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_RT5660
select SND_SOC_HDAC_HDMI
- select SND_SOC_HDAC_HDA
help
- This adds support for ASoC machine driver for Intel platforms
- SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
- Say Y or m if you have such a device. This is a recommended option.
- If unsure select "N".
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for RT5660 I2S audio codec.
+ Say Y if you have such a device.
+
+endif ## SND_SOC_INTEL_KBL
+
+if SND_SOC_INTEL_GLK
config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
tristate "GLK with RT5682 and MAX98357A in I2S Mode"
@@ -317,6 +329,20 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
Say Y if you have such a device.
If unsure select "N".
-endif ## SND_SOC_INTEL_SKYLAKE
+endif ## SND_SOC_INTEL_GLK
+
+if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
+
+config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
+ tristate "SKL/KBL/BXT/APL with HDA Codecs"
+ select SND_SOC_HDAC_HDMI
+ # SND_SOC_HDAC_HDA is already selected
+ help
+ This adds support for ASoC machine driver for Intel platforms
+ SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
+ Say Y or m if you have such a device. This is a recommended option.
+ If unsure select "N".
+
+endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
endif ## SND_SOC_INTEL_MACH
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 5381e27df9cc..bf072ea299b7 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -20,6 +20,7 @@ snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
+snd-soc-kbl_rt5660-objs := kbl_rt5660.o
snd-soc-skl_rt286-objs := skl_rt286.o
snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH) += snd-soc-kbl_da7219_max9
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH) += snd-soc-kbl_da7219_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH) += snd-soc-kbl_rt5663_rt5514_max98927.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5660_MACH) += snd-soc-kbl_rt5660.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 8587bd3d1cc1..a22366ce33c4 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -29,7 +29,6 @@
#include <linux/input.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
-#include <asm/platform_sst_audio.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -674,6 +673,33 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF2 |
BYT_RT5640_MCLK_EN),
},
+ { /* Point of View Mobii TAB-P1005W-232 (V2.0) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "POV"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "I102A"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
+ {
+ /* Prowise PT301 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Prowise"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PT301"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
@@ -1152,10 +1178,7 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
* (will be overridden if DMI quirk is detected)
*/
if (is_valleyview()) {
- struct sst_platform_info *p_info = mach->pdata;
- const struct sst_res_info *res_info = p_info->res_info;
-
- if (res_info->acpi_ipc_irq_index == 0)
+ if (mach->mach_params.acpi_ipc_irq_index == 0)
is_bytcr = true;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index c44298130720..e528995668b7 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -32,7 +32,6 @@
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
-#include <asm/platform_sst_audio.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -920,10 +919,7 @@ static int snd_byt_rt5651_mc_probe(struct platform_device *pdev)
* (will be overridden if DMI quirk is detected)
*/
if (x86_match_cpu(baytrail_cpu_ids)) {
- struct sst_platform_info *p_info = mach->pdata;
- const struct sst_res_info *res_info = p_info->res_info;
-
- if (res_info->acpi_ipc_irq_index == 0)
+ if (mach->mach_params.acpi_ipc_irq_index == 0)
is_bytcr = true;
}
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index db6976f4ddaa..08a5152e635a 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -19,6 +19,7 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
+#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -35,6 +36,8 @@
#define CHT_PLAT_CLK_3_HZ 19200000
#define CHT_CODEC_DAI "HiFi"
+#define QUIRK_PMC_PLT_CLK_0 0x01
+
struct cht_mc_private {
struct clk *mclk;
struct snd_soc_jack jack;
@@ -385,11 +388,43 @@ static struct snd_soc_card snd_soc_card_cht = {
.num_controls = ARRAY_SIZE(cht_mc_controls),
};
+static const struct dmi_system_id cht_max98090_quirk_table[] = {
+ {
+ /* Clapper model Chromebook */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
+ },
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+ },
+ {
+ /* Gnawty model Chromebook (Acer Chromebook CB3-111) */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
+ },
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+ },
+ {
+ /* Swanky model Chromebook (Toshiba Chromebook 2) */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Swanky"),
+ },
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+ },
+ {}
+};
+
static int snd_cht_mc_probe(struct platform_device *pdev)
{
+ const struct dmi_system_id *dmi_id;
struct device *dev = &pdev->dev;
int ret_val = 0;
struct cht_mc_private *drv;
+ const char *mclk_name;
+ int quirks = 0;
+
+ dmi_id = dmi_first_match(cht_max98090_quirk_table);
+ if (dmi_id)
+ quirks = (unsigned long)dmi_id->driver_data;
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
@@ -411,11 +446,16 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
snd_soc_card_cht.dev = &pdev->dev;
snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
- drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+ if (quirks & QUIRK_PMC_PLT_CLK_0)
+ mclk_name = "pmc_plt_clk_0";
+ else
+ mclk_name = "pmc_plt_clk_3";
+
+ drv->mclk = devm_clk_get(&pdev->dev, mclk_name);
if (IS_ERR(drv->mclk)) {
dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(drv->mclk));
+ "Failed to get MCLK from %s: %ld\n",
+ mclk_name, PTR_ERR(drv->mclk));
return PTR_ERR(drv->mclk);
}
diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
index f5a5ea6a093c..250a356a0cbf 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
@@ -27,7 +27,6 @@
#include <linux/dmi.h>
#include <linux/slab.h>
#include <asm/cpu_device_id.h>
-#include <asm/platform_sst_audio.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -585,10 +584,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
* (will be overridden if DMI quirk is detected)
*/
if (is_valleyview()) {
- struct sst_platform_info *p_info = mach->pdata;
- const struct sst_res_info *res_info = p_info->res_info;
-
- if (res_info->acpi_ipc_irq_index == 0)
+ if (mach->mach_params.acpi_ipc_irq_index == 0)
is_bytcr = true;
}
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index 51f0d45d6f8f..9de64f447e7b 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -403,7 +403,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
const char *i2c_name;
int i;
- drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
if (!drv)
return -ENOMEM;
diff --git a/sound/soc/intel/boards/glk_rt5682_max98357a.c b/sound/soc/intel/boards/glk_rt5682_max98357a.c
index c4b94e2617c5..c74c4f17316f 100644
--- a/sound/soc/intel/boards/glk_rt5682_max98357a.c
+++ b/sound/soc/intel/boards/glk_rt5682_max98357a.c
@@ -603,7 +603,7 @@ static int geminilake_audio_probe(struct platform_device *pdev)
{
struct glk_card_private *ctx;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
index 3fa1c3ca6d37..723a4935ed76 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -262,9 +262,9 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
jack = &ctx->kabylake_headset;
snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
- snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
da7219_aad_jack_det(component, &ctx->kabylake_headset);
@@ -441,7 +441,7 @@ static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
}
-static struct snd_soc_ops skylaye_refcap_ops = {
+static struct snd_soc_ops skylake_refcap_ops = {
.startup = kabylake_refcap_startup,
};
@@ -525,7 +525,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.dpcm_capture = 1,
.nonatomic = 1,
.dynamic = 1,
- .ops = &skylaye_refcap_ops,
+ .ops = &skylake_refcap_ops,
},
[KBL_DPCM_AUDIO_DMIC_CP] = {
.name = "Kbl Audio DMIC cap",
@@ -736,7 +736,7 @@ static struct snd_soc_dai_link kabylake_max98927_dais[] = {
.dpcm_capture = 1,
.nonatomic = 1,
.dynamic = 1,
- .ops = &skylaye_refcap_ops,
+ .ops = &skylake_refcap_ops,
},
[KBL_DPCM_AUDIO_DMIC_CP] = {
.name = "Kbl Audio DMIC cap",
@@ -935,7 +935,7 @@ static int kabylake_audio_probe(struct platform_device *pdev)
{
struct kbl_codec_private *ctx;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
diff --git a/sound/soc/intel/boards/kbl_rt5660.c b/sound/soc/intel/boards/kbl_rt5660.c
new file mode 100644
index 000000000000..3255e0029276
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_rt5660.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018-19 Canonical Corporation.
+
+/*
+ * Intel Kabylake I2S Machine Driver with RT5660 Codec
+ *
+ * Modified from:
+ * Intel Kabylake I2S Machine driver supporting MAXIM98357a and
+ * DA7219 codecs
+ * Also referred to:
+ * Intel Broadwell I2S Machine driver supporting RT5677 codec
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/acpi.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include "../../codecs/hdac_hdmi.h"
+#include "../../codecs/rt5660.h"
+
+#define KBL_RT5660_CODEC_DAI "rt5660-aif1"
+#define DUAL_CHANNEL 2
+
+static struct snd_soc_card *kabylake_audio_card;
+static struct snd_soc_jack skylake_hdmi[3];
+static struct snd_soc_jack lineout_jack;
+static struct snd_soc_jack mic_jack;
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_codec_private {
+ struct gpio_desc *gpio_lo_mute;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+ KBL_DPCM_AUDIO_HDMI3_PB,
+};
+
+#define GPIO_LINEOUT_MUTE_INDEX 0
+#define GPIO_LINEOUT_DET_INDEX 3
+#define GPIO_LINEIN_DET_INDEX 4
+
+static const struct acpi_gpio_params lineout_mute_gpio = { GPIO_LINEOUT_MUTE_INDEX, 0, true };
+static const struct acpi_gpio_params lineout_det_gpio = { GPIO_LINEOUT_DET_INDEX, 0, false };
+static const struct acpi_gpio_params mic_det_gpio = { GPIO_LINEIN_DET_INDEX, 0, false };
+
+
+static const struct acpi_gpio_mapping acpi_rt5660_gpios[] = {
+ { "lineout-mute-gpios", &lineout_mute_gpio, 1 },
+ { "lineout-det-gpios", &lineout_det_gpio, 1 },
+ { "mic-det-gpios", &mic_det_gpio, 1 },
+ { NULL },
+};
+
+static struct snd_soc_jack_pin lineout_jack_pin = {
+ .pin = "Line Out",
+ .mask = SND_JACK_LINEOUT,
+};
+
+static struct snd_soc_jack_pin mic_jack_pin = {
+ .pin = "Line In",
+ .mask = SND_JACK_MICROPHONE,
+};
+
+static struct snd_soc_jack_gpio lineout_jack_gpio = {
+ .name = "lineout-det",
+ .report = SND_JACK_LINEOUT,
+ .debounce_time = 200,
+};
+
+static struct snd_soc_jack_gpio mic_jack_gpio = {
+ .name = "mic-det",
+ .report = SND_JACK_MICROPHONE,
+ .debounce_time = 200,
+};
+
+static int kabylake_5660_event_lineout(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct kbl_codec_private *priv = snd_soc_card_get_drvdata(dapm->card);
+
+ gpiod_set_value_cansleep(priv->gpio_lo_mute,
+ !(SND_SOC_DAPM_EVENT_ON(event)));
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new kabylake_rt5660_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Line In"),
+ SOC_DAPM_PIN_SWITCH("Line Out"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_rt5660_widgets[] = {
+ SND_SOC_DAPM_MIC("Line In", NULL),
+ SND_SOC_DAPM_LINE("Line Out", kabylake_5660_event_lineout),
+};
+
+static const struct snd_soc_dapm_route kabylake_rt5660_map[] = {
+ /* other jacks */
+ {"IN1P", NULL, "Line In"},
+ {"IN2P", NULL, "Line In"},
+ {"Line Out", NULL, "LOUTR"},
+ {"Line Out", NULL, "LOUTL"},
+
+ /* CODEC BE connections */
+ { "AIF1 Playback", NULL, "ssp0 Tx"},
+ { "ssp0 Tx", NULL, "codec0_out"},
+
+ { "codec0_in", NULL, "ssp0 Rx" },
+ { "ssp0 Rx", NULL, "AIF1 Capture" },
+
+ { "hifi1", NULL, "iDisp1 Tx"},
+ { "iDisp1 Tx", NULL, "iDisp1_out"},
+ { "hifi2", NULL, "iDisp2 Tx"},
+ { "iDisp2 Tx", NULL, "iDisp2_out"},
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+};
+
+static int kabylake_ssp0_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+ /* The ADSP will convert the FE rate to 48k, stereo */
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = DUAL_CHANNEL;
+
+ /* set SSP0 to 24 bit */
+ snd_mask_none(fmt);
+ snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
+
+ return 0;
+}
+
+static int kabylake_rt5660_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+
+ ret = devm_acpi_dev_add_driver_gpios(component->dev, acpi_rt5660_gpios);
+ if (ret)
+ dev_warn(component->dev, "Failed to add driver gpios\n");
+
+ /* Request rt5660 GPIO for lineout mute control, return if fails */
+ ctx->gpio_lo_mute = devm_gpiod_get(component->dev, "lineout-mute",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->gpio_lo_mute)) {
+ dev_err(component->dev, "Can't find GPIO_MUTE# gpio\n");
+ return PTR_ERR(ctx->gpio_lo_mute);
+ }
+
+ /* Create and initialize headphone jack, this jack is not mandatory, don't return if fails */
+ ret = snd_soc_card_jack_new(rtd->card, "Lineout Jack",
+ SND_JACK_LINEOUT, &lineout_jack,
+ &lineout_jack_pin, 1);
+ if (ret)
+ dev_warn(component->dev, "Can't create Lineout jack\n");
+ else {
+ lineout_jack_gpio.gpiod_dev = component->dev;
+ ret = snd_soc_jack_add_gpios(&lineout_jack, 1,
+ &lineout_jack_gpio);
+ if (ret)
+ dev_warn(component->dev, "Can't add Lineout jack gpio\n");
+ }
+
+ /* Create and initialize mic jack, this jack is not mandatory, don't return if fails */
+ ret = snd_soc_card_jack_new(rtd->card, "Mic Jack",
+ SND_JACK_MICROPHONE, &mic_jack,
+ &mic_jack_pin, 1);
+ if (ret)
+ dev_warn(component->dev, "Can't create mic jack\n");
+ else {
+ mic_jack_gpio.gpiod_dev = component->dev;
+ ret = snd_soc_jack_add_gpios(&mic_jack, 1, &mic_jack_gpio);
+ if (ret)
+ dev_warn(component->dev, "Can't add mic jack gpio\n");
+ }
+
+ /* Here we enable some dapms in advance to reduce the pop noise for recording via line-in */
+ snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
+ snd_soc_dapm_force_enable_pin(dapm, "BST1");
+ snd_soc_dapm_force_enable_pin(dapm, "BST2");
+
+ return 0;
+}
+
+static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = device;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI1_PB);
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI2_PB);
+}
+
+static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI3_PB);
+}
+
+static int kabylake_rt5660_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_sysclk(codec_dai,
+ RT5660_SCLK_S_PLL1, params_rate(params) * 512,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(rtd->dev, "snd_soc_dai_set_sysclk err = %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ RT5660_PLL1_S_BCLK,
+ params_rate(params) * 50,
+ params_rate(params) * 512);
+ if (ret < 0)
+ dev_err(codec_dai->dev, "can't set codec pll: %d\n", ret);
+
+ return ret;
+}
+
+static struct snd_soc_ops kabylake_rt5660_ops = {
+ .hw_params = kabylake_rt5660_hw_params,
+};
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_rt5660_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+/* kabylake digital audio interface glue - connects rt5660 codec <--> CPU */
+static struct snd_soc_dai_link kabylake_rt5660_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_rt5660_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_rt5660_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = "i2c-10EC3277:00",
+ .codec_dai_name = KBL_RT5660_CODEC_DAI,
+ .init = kabylake_rt5660_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp0_fixup,
+ .ops = &kabylake_rt5660_ops,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 1,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 2,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 3,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+
+#define NAME_SIZE 32
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ struct snd_soc_component *component = NULL;
+ int err, i = 0;
+ char jack_name[NAME_SIZE];
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &skylake_hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &skylake_hdmi[i]);
+ if (err < 0)
+ return err;
+
+ i++;
+
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
+
+/* kabylake audio machine driver for rt5660 */
+static struct snd_soc_card kabylake_audio_card_rt5660 = {
+ .name = "kblrt5660",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_rt5660_dais,
+ .num_links = ARRAY_SIZE(kabylake_rt5660_dais),
+ .controls = kabylake_rt5660_controls,
+ .num_controls = ARRAY_SIZE(kabylake_rt5660_controls),
+ .dapm_widgets = kabylake_rt5660_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_rt5660_widgets),
+ .dapm_routes = kabylake_rt5660_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_rt5660_map),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_codec_private *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card =
+ (struct snd_soc_card *)pdev->id_entry->driver_data;
+
+ kabylake_audio_card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
+ return devm_snd_soc_register_card(&pdev->dev, kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ {
+ .name = "kbl_rt5660",
+ .driver_data =
+ (kernel_ulong_t)&kabylake_audio_card_rt5660,
+ },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_rt5660",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio Machine driver-RT5660 in I2S mode");
+MODULE_AUTHOR("Hui Wang <hui.wang@canonical.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_rt5660");
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 99e1320c485f..d71475200b08 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -25,9 +25,9 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/rt5663.h"
#include "../../codecs/hdac_hdmi.h"
-#include "../skylake/skl.h"
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -586,7 +586,7 @@ static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
&constraints_16000);
}
-static struct snd_soc_ops skylaye_refcap_ops = {
+static struct snd_soc_ops skylake_refcap_ops = {
.startup = kabylake_refcap_startup,
};
@@ -655,7 +655,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.dpcm_capture = 1,
.nonatomic = 1,
.dynamic = 1,
- .ops = &skylaye_refcap_ops,
+ .ops = &skylake_refcap_ops,
},
[KBL_DPCM_AUDIO_DMIC_CP] = {
.name = "Kbl Audio DMIC cap",
@@ -969,7 +969,7 @@ static struct snd_soc_card kabylake_audio_card_rt5663 = {
static int kabylake_audio_probe(struct platform_device *pdev)
{
struct kbl_rt5663_private *ctx;
- struct skl_machine_pdata *pdata;
+ struct snd_soc_acpi_mach *mach;
int ret;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -984,9 +984,9 @@ static int kabylake_audio_probe(struct platform_device *pdev)
kabylake_audio_card->dev = &pdev->dev;
snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
- pdata = dev_get_drvdata(&pdev->dev);
- if (pdata)
- dmic_constraints = pdata->dmic_num == 2 ?
+ mach = (&pdev->dev)->platform_data;
+ if (mach)
+ dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
ctx->mclk = devm_clk_get(&pdev->dev, "ssp1_mclk");
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index a737c915d46a..7044d8c2b187 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -26,10 +26,10 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/rt5514.h"
#include "../../codecs/rt5663.h"
#include "../../codecs/hdac_hdmi.h"
-#include "../skylake/skl.h"
#define KBL_REALTEK_CODEC_DAI "rt5663-aif"
#define KBL_REALTEK_DMIC_CODEC_DAI "rt5514-aif1"
@@ -648,7 +648,7 @@ static struct snd_soc_card kabylake_audio_card = {
static int kabylake_audio_probe(struct platform_device *pdev)
{
struct kbl_codec_private *ctx;
- struct skl_machine_pdata *pdata;
+ struct snd_soc_acpi_mach *mach;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -659,9 +659,9 @@ static int kabylake_audio_probe(struct platform_device *pdev)
kabylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&kabylake_audio_card, ctx);
- pdata = dev_get_drvdata(&pdev->dev);
- if (pdata)
- dmic_constraints = pdata->dmic_num == 2 ?
+ mach = (&pdev->dev)->platform_data;
+ if (mach)
+ dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
return devm_snd_soc_register_card(&pdev->dev, &kabylake_audio_card);
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
index b415dd4c85f5..b9a21e64ead2 100644
--- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -12,8 +12,8 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/hdac_hdmi.h"
-#include "../skylake/skl.h"
#include "skl_hda_dsp_common.h"
static const struct snd_soc_dapm_widget skl_hda_widgets[] = {
@@ -101,17 +101,17 @@ static struct snd_soc_card hda_soc_card = {
#define IDISP_ROUTE_COUNT (IDISP_DAI_COUNT * 2)
#define IDISP_CODEC_MASK 0x4
-static int skl_hda_fill_card_info(struct skl_machine_pdata *pdata)
+static int skl_hda_fill_card_info(struct snd_soc_acpi_mach_params *mach_params)
{
struct snd_soc_card *card = &hda_soc_card;
struct snd_soc_dai_link *dai_link;
u32 codec_count, codec_mask;
int i, num_links, num_route;
- codec_mask = pdata->codec_mask;
+ codec_mask = mach_params->codec_mask;
codec_count = hweight_long(codec_mask);
- if (codec_count == 1 && pdata->codec_mask & IDISP_CODEC_MASK) {
+ if (codec_count == 1 && codec_mask & IDISP_CODEC_MASK) {
num_links = IDISP_DAI_COUNT;
num_route = IDISP_ROUTE_COUNT;
} else if (codec_count == 2 && codec_mask & IDISP_CODEC_MASK) {
@@ -127,30 +127,30 @@ static int skl_hda_fill_card_info(struct skl_machine_pdata *pdata)
card->num_dapm_routes = num_route;
for_each_card_prelinks(card, i, dai_link)
- dai_link->platform_name = pdata->platform;
+ dai_link->platform_name = mach_params->platform;
return 0;
}
static int skl_hda_audio_probe(struct platform_device *pdev)
{
- struct skl_machine_pdata *pdata;
+ struct snd_soc_acpi_mach *mach;
struct skl_hda_private *ctx;
int ret;
dev_dbg(&pdev->dev, "%s: entry\n", __func__);
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
- pdata = dev_get_drvdata(&pdev->dev);
- if (!pdata)
+ mach = (&pdev->dev)->platform_data;
+ if (!mach)
return -EINVAL;
- ret = skl_hda_fill_card_info(pdata);
+ ret = skl_hda_fill_card_info(&mach->mach_params);
if (ret < 0) {
dev_err(&pdev->dev, "Unsupported HDAudio/iDisp configuration found\n");
return ret;
@@ -158,7 +158,7 @@ static int skl_hda_audio_probe(struct platform_device *pdev)
ctx->pcm_count = hda_soc_card.num_links;
ctx->dai_index = 1; /* hdmi codec dai name starts from index 1 */
- ctx->platform_name = pdata->platform;
+ ctx->platform_name = mach->mach_params.platform;
hda_soc_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&hda_soc_card, ctx);
diff --git a/sound/soc/intel/boards/skl_nau88l25_max98357a.c b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
index d31482b8c9bb..0922106bd323 100644
--- a/sound/soc/intel/boards/skl_nau88l25_max98357a.c
+++ b/sound/soc/intel/boards/skl_nau88l25_max98357a.c
@@ -21,9 +21,9 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include "../../codecs/nau8825.h"
#include "../../codecs/hdac_hdmi.h"
-#include "../skylake/skl.h"
#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_MAXIM_CODEC_DAI "HiFi"
@@ -400,7 +400,7 @@ static int skylake_refcap_startup(struct snd_pcm_substream *substream)
&constraints_16000);
}
-static const struct snd_soc_ops skylaye_refcap_ops = {
+static const struct snd_soc_ops skylake_refcap_ops = {
.startup = skylake_refcap_startup,
};
@@ -447,7 +447,7 @@ static struct snd_soc_dai_link skylake_dais[] = {
.dpcm_capture = 1,
.nonatomic = 1,
.dynamic = 1,
- .ops = &skylaye_refcap_ops,
+ .ops = &skylake_refcap_ops,
},
[SKL_DPCM_AUDIO_DMIC_CP] = {
.name = "Skl Audio DMIC cap",
@@ -641,7 +641,7 @@ static struct snd_soc_card skylake_audio_card = {
static int skylake_audio_probe(struct platform_device *pdev)
{
struct skl_nau8825_private *ctx;
- struct skl_machine_pdata *pdata;
+ struct snd_soc_acpi_mach *mach;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -652,9 +652,9 @@ static int skylake_audio_probe(struct platform_device *pdev)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
- pdata = dev_get_drvdata(&pdev->dev);
- if (pdata)
- dmic_constraints = pdata->dmic_num == 2 ?
+ mach = (&pdev->dev)->platform_data;
+ if (mach)
+ dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
diff --git a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
index e877bb60beb1..8433c521d39f 100644
--- a/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
+++ b/sound/soc/intel/boards/skl_nau88l25_ssm4567.c
@@ -23,11 +23,11 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
+#include <sound/soc-acpi.h>
#include <sound/jack.h>
#include <sound/pcm_params.h>
#include "../../codecs/nau8825.h"
#include "../../codecs/hdac_hdmi.h"
-#include "../skylake/skl.h"
#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi"
#define SKL_SSM_CODEC_DAI "ssm4567-hifi"
@@ -449,7 +449,7 @@ static int skylake_refcap_startup(struct snd_pcm_substream *substream)
&constraints_16000);
}
-static const struct snd_soc_ops skylaye_refcap_ops = {
+static const struct snd_soc_ops skylake_refcap_ops = {
.startup = skylake_refcap_startup,
};
@@ -496,7 +496,7 @@ static struct snd_soc_dai_link skylake_dais[] = {
.dpcm_capture = 1,
.nonatomic = 1,
.dynamic = 1,
- .ops = &skylaye_refcap_ops,
+ .ops = &skylake_refcap_ops,
},
[SKL_DPCM_AUDIO_DMIC_CP] = {
.name = "Skl Audio DMIC cap",
@@ -694,7 +694,7 @@ static struct snd_soc_card skylake_audio_card = {
static int skylake_audio_probe(struct platform_device *pdev)
{
struct skl_nau88125_private *ctx;
- struct skl_machine_pdata *pdata;
+ struct snd_soc_acpi_mach *mach;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -705,9 +705,9 @@ static int skylake_audio_probe(struct platform_device *pdev)
skylake_audio_card.dev = &pdev->dev;
snd_soc_card_set_drvdata(&skylake_audio_card, ctx);
- pdata = dev_get_drvdata(&pdev->dev);
- if (pdata)
- dmic_constraints = pdata->dmic_num == 2 ?
+ mach = (&pdev->dev)->platform_data;
+ if (mach)
+ dmic_constraints = mach->mach_params.dmic_num == 2 ?
&constraints_dmic_2ch : &constraints_dmic_channels;
return devm_snd_soc_register_card(&pdev->dev, &skylake_audio_card);
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index c1f50a079d34..56c81e20b5bf 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -7,7 +7,7 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
soc-acpi-intel-hsw-bdw-match.o \
soc-acpi-intel-skl-match.o soc-acpi-intel-kbl-match.o \
soc-acpi-intel-bxt-match.o soc-acpi-intel-glk-match.o \
- soc-acpi-intel-cnl-match.o \
+ soc-acpi-intel-cnl-match.o soc-acpi-intel-icl-match.o \
soc-acpi-intel-hda-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
index f39386e540d3..61dedc103b19 100644
--- a/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-bxt-match.c
@@ -6,9 +6,41 @@
*
*/
+#include <linux/dmi.h>
#include <sound/soc-acpi.h>
#include <sound/soc-acpi-intel-match.h>
+enum {
+ APL_RVP,
+};
+
+static const struct dmi_system_id apl_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
+ DMI_MATCH(DMI_BOARD_NAME, "Apollolake RVP1A"),
+ },
+ .driver_data = (void *)(APL_RVP),
+ },
+ {}
+};
+
+static struct snd_soc_acpi_mach *apl_quirk(void *arg)
+{
+ struct snd_soc_acpi_mach *mach = arg;
+ const struct dmi_system_id *dmi_id;
+ unsigned long apl_machine_id;
+
+ dmi_id = dmi_first_match(apl_table);
+ if (dmi_id) {
+ apl_machine_id = (unsigned long)dmi_id->driver_data;
+ if (apl_machine_id == APL_RVP)
+ return NULL;
+ }
+
+ return mach;
+}
+
static struct snd_soc_acpi_codecs bxt_codecs = {
.num_codecs = 1,
.codecs = {"MX98357A"}
@@ -19,6 +51,9 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
.id = "INT343A",
.drv_name = "bxt_alc298s_i2s",
.fw_filename = "intel/dsp_fw_bxtn.bin",
+ .sof_fw_filename = "intel/sof-apl.ri",
+ .sof_tplg_filename = "intel/sof-apl-rt298.tplg",
+ .asoc_plat_name = "0000:00:0e.0",
},
{
.id = "DLGS7219",
@@ -47,6 +82,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[] = {
{
.id = "INT34C3",
.drv_name = "bxt_tdf8532",
+ .machine_quirk = apl_quirk,
.sof_fw_filename = "intel/sof-apl.ri",
.sof_tplg_filename = "intel/sof-apl-tdf8532.tplg",
.asoc_plat_name = "0000:00:0e.0",
diff --git a/sound/soc/intel/common/soc-acpi-intel-icl-match.c b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
new file mode 100644
index 000000000000..33b441dca4d3
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-icl-match.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * soc-apci-intel-icl-match.c - tables and support for ICL ACPI enumeration.
+ *
+ * Copyright (c) 2018, Intel Corporation.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
+
+static struct skl_machine_pdata icl_pdata = {
+ .use_tplg_pcm = true,
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[] = {
+ {
+ .id = "INT34C2",
+ .drv_name = "icl_rt274",
+ .fw_filename = "intel/dsp_fw_icl.bin",
+ .pdata = &icl_pdata,
+ .sof_fw_filename = "intel/sof-icl.ri",
+ .sof_tplg_filename = "intel/sof-icl-rt274.tplg",
+ .asoc_plat_name = "0000:00:1f.3",
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_icl_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index a317b7790fce..e6fa6f470526 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -96,6 +96,16 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
.quirk_data = &kbl_7219_98927_codecs,
.pdata = &skl_dmic_data
},
+ {
+ .id = "10EC5660",
+ .drv_name = "kbl_rt5660",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ },
+ {
+ .id = "10EC3277",
+ .drv_name = "kbl_rt5660",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_kbl_machines);
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index 8bfb8b0fa3d5..b0e6fb93eaf8 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -247,6 +247,14 @@ static const struct skl_dsp_ops dsp_ops[] = {
.init_fw = cnl_sst_init_fw,
.cleanup = cnl_sst_dsp_cleanup
},
+ {
+ .id = 0xa348,
+ .num_cores = 4,
+ .loader_ops = bxt_get_loader_ops,
+ .init = cnl_sst_dsp_init,
+ .init_fw = cnl_sst_init_fw,
+ .cleanup = cnl_sst_dsp_cleanup
+ },
};
const struct skl_dsp_ops *skl_get_dsp_ops(int pci_id)
diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
index 01a050cf8775..5d125a3df527 100644
--- a/sound/soc/intel/skylake/skl-nhlt.c
+++ b/sound/soc/intel/skylake/skl-nhlt.c
@@ -180,6 +180,9 @@ int skl_get_dmic_geo(struct skl *skl)
unsigned int dmic_geo = 0;
u8 j;
+ if (!nhlt)
+ return 0;
+
epnt = (struct nhlt_endpoint *)nhlt->desc;
for (j = 0; j < nhlt->endpoint_count; j++) {
diff --git a/sound/soc/intel/skylake/skl-sst-ipc.c b/sound/soc/intel/skylake/skl-sst-ipc.c
index 5234fafb758a..9f3ce73593ae 100644
--- a/sound/soc/intel/skylake/skl-sst-ipc.c
+++ b/sound/soc/intel/skylake/skl-sst-ipc.c
@@ -249,6 +249,8 @@ enum skl_ipc_glb_reply {
IPC_GLB_REPLY_INVALID_CONFIG_DATA_LEN = 121,
IPC_GLB_REPLY_GATEWAY_NOT_INITIALIZED = 140,
IPC_GLB_REPLY_GATEWAY_NOT_EXIST = 141,
+ IPC_GLB_REPLY_SCLK_ALREADY_RUNNING = 150,
+ IPC_GLB_REPLY_MCLK_ALREADY_RUNNING = 151,
IPC_GLB_REPLY_PPL_NOT_INITIALIZED = 160,
IPC_GLB_REPLY_PPL_NOT_EXIST = 161,
@@ -392,18 +394,47 @@ int skl_ipc_process_notification(struct sst_generic_ipc *ipc,
return 0;
}
-static int skl_ipc_set_reply_error_code(u32 reply)
+struct skl_ipc_err_map {
+ const char *msg;
+ enum skl_ipc_glb_reply reply;
+ int err;
+};
+
+static struct skl_ipc_err_map skl_err_map[] = {
+ {"DSP out of memory", IPC_GLB_REPLY_OUT_OF_MEMORY, -ENOMEM},
+ {"DSP busy", IPC_GLB_REPLY_BUSY, -EBUSY},
+ {"SCLK already running", IPC_GLB_REPLY_SCLK_ALREADY_RUNNING,
+ IPC_GLB_REPLY_SCLK_ALREADY_RUNNING},
+ {"MCLK already running", IPC_GLB_REPLY_MCLK_ALREADY_RUNNING,
+ IPC_GLB_REPLY_MCLK_ALREADY_RUNNING},
+};
+
+static int skl_ipc_set_reply_error_code(struct sst_generic_ipc *ipc, u32 reply)
{
- switch (reply) {
- case IPC_GLB_REPLY_OUT_OF_MEMORY:
- return -ENOMEM;
+ int i;
- case IPC_GLB_REPLY_BUSY:
- return -EBUSY;
+ for (i = 0; i < ARRAY_SIZE(skl_err_map); i++) {
+ if (skl_err_map[i].reply == reply)
+ break;
+ }
- default:
+ if (i == ARRAY_SIZE(skl_err_map)) {
+ dev_err(ipc->dev, "ipc FW reply: %d FW Error Code: %u\n",
+ reply,
+ ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
return -EINVAL;
}
+
+ if (skl_err_map[i].err < 0)
+ dev_err(ipc->dev, "ipc FW reply: %s FW Error Code: %u\n",
+ skl_err_map[i].msg,
+ ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
+ else
+ dev_info(ipc->dev, "ipc FW reply: %s FW Error Code: %u\n",
+ skl_err_map[i].msg,
+ ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
+
+ return skl_err_map[i].err;
}
void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
@@ -441,10 +472,7 @@ void skl_ipc_process_reply(struct sst_generic_ipc *ipc,
}
} else {
- msg->errno = skl_ipc_set_reply_error_code(reply);
- dev_err(ipc->dev, "ipc FW reply: reply=%d\n", reply);
- dev_err(ipc->dev, "FW Error Code: %u\n",
- ipc->dsp->fw_ops.get_fw_errcode(ipc->dsp));
+ msg->errno = skl_ipc_set_reply_error_code(ipc, reply);
switch (IPC_GLB_NOTIFY_MSG_TYPE(header.primary)) {
case IPC_GLB_LOAD_MULTIPLE_MODS:
case IPC_GLB_LOAD_LIBRARY:
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 29225623b4b4..60c94836bf5b 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -37,7 +37,12 @@
#include "skl.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
#include "../../../soc/codecs/hdac_hda.h"
+#endif
+static int skl_pci_binding;
+module_param_named(pci_binding, skl_pci_binding, int, 0444);
+MODULE_PARM_DESC(pci_binding, "PCI binding (0=auto, 1=only legacy, 2=only asoc");
/*
* initialize the PCI registers
@@ -309,7 +314,7 @@ static int skl_suspend(struct device *dev)
struct pci_dev *pci = to_pci_dev(dev);
struct hdac_bus *bus = pci_get_drvdata(pci);
struct skl *skl = bus_to_skl(bus);
- int ret = 0;
+ int ret;
/*
* Do not suspend if streams which are marked ignore suspend are
@@ -331,14 +336,10 @@ static int skl_suspend(struct device *dev)
skl->skl_sst->fw_loaded = false;
}
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
- ret = snd_hdac_display_power(bus, false);
- if (ret < 0)
- dev_err(bus->dev,
- "Cannot turn OFF display power on i915\n");
- }
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
- return ret;
+ return 0;
}
static int skl_resume(struct device *dev)
@@ -350,14 +351,8 @@ static int skl_resume(struct device *dev)
int ret;
/* Turned OFF in HDMI codec driver after codec reconfiguration */
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
- ret = snd_hdac_display_power(bus, true);
- if (ret < 0) {
- dev_err(bus->dev,
- "Cannot turn on display power on i915\n");
- return ret;
- }
- }
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
/*
* resume only when we are not in suspend active, otherwise need to
@@ -515,7 +510,7 @@ static int skl_find_machine(struct skl *skl, void *driver_data)
if (pdata) {
skl->use_tplg_pcm = pdata->use_tplg_pcm;
- pdata->dmic_num = skl_get_dmic_geo(skl);
+ mach->mach_params.dmic_num = skl_get_dmic_geo(skl);
}
return 0;
@@ -525,7 +520,6 @@ static int skl_machine_device_register(struct skl *skl)
{
struct snd_soc_acpi_mach *mach = skl->mach;
struct hdac_bus *bus = skl_to_bus(skl);
- struct skl_machine_pdata *pdata;
struct platform_device *pdev;
int ret;
@@ -535,6 +529,16 @@ static int skl_machine_device_register(struct skl *skl)
return -EIO;
}
+ mach->mach_params.platform = dev_name(bus->dev);
+ mach->mach_params.codec_mask = bus->codec_mask;
+
+ ret = platform_device_add_data(pdev, (const void *)mach, sizeof(*mach));
+ if (ret) {
+ dev_err(bus->dev, "failed to add machine device platform data\n");
+ platform_device_put(pdev);
+ return ret;
+ }
+
ret = platform_device_add(pdev);
if (ret) {
dev_err(bus->dev, "failed to add machine device\n");
@@ -542,12 +546,6 @@ static int skl_machine_device_register(struct skl *skl)
return -EIO;
}
- if (mach->pdata) {
- pdata = (struct skl_machine_pdata *)mach->pdata;
- pdata->platform = dev_name(bus->dev);
- pdata->codec_mask = bus->codec_mask;
- dev_set_drvdata(&pdev->dev, mach->pdata);
- }
skl->i2s_dev = pdev;
@@ -658,6 +656,8 @@ static void skl_clock_device_unregister(struct skl *skl)
platform_device_unregister(skl->clk_dev);
}
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
+
#define IDISP_INTEL_VENDOR_ID 0x80860000
/*
@@ -676,6 +676,8 @@ static void load_codec_module(struct hda_codec *codec)
#endif
}
+#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
+
/*
* Probe the given codec address
*/
@@ -685,9 +687,11 @@ static int probe_codec(struct hdac_bus *bus, int addr)
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res = -1;
struct skl *skl = bus_to_skl(bus);
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
struct hdac_hda_priv *hda_codec;
- struct hdac_device *hdev;
int err;
+#endif
+ struct hdac_device *hdev;
mutex_lock(&bus->cmd_mutex);
snd_hdac_bus_send_cmd(bus, cmd);
@@ -697,6 +701,7 @@ static int probe_codec(struct hdac_bus *bus, int addr)
return -EIO;
dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res);
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec),
GFP_KERNEL);
if (!hda_codec)
@@ -715,6 +720,13 @@ static int probe_codec(struct hdac_bus *bus, int addr)
load_codec_module(&hda_codec->codec);
}
return 0;
+#else
+ hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ return snd_hdac_ext_bus_device_init(bus, addr, hdev);
+#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
}
/* Codec initialization */
@@ -767,11 +779,9 @@ static int skl_i915_init(struct hdac_bus *bus)
if (err < 0)
return err;
- err = snd_hdac_display_power(bus, true);
- if (err < 0)
- dev_err(bus->dev, "Cannot turn on display power on i915\n");
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
- return err;
+ return 0;
}
static void skl_probe_work(struct work_struct *work)
@@ -807,21 +817,10 @@ static void skl_probe_work(struct work_struct *work)
return;
}
- if (bus->ppcap) {
- err = skl_machine_device_register(skl);
- if (err < 0) {
- dev_err(bus->dev, "machine register failed: %d\n", err);
- goto out_err;
- }
- }
-
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
- err = snd_hdac_display_power(bus, false);
- if (err < 0) {
- dev_err(bus->dev, "Cannot turn off display power on i915\n");
- skl_machine_device_unregister(skl);
- return;
- }
+ err = skl_machine_device_register(skl);
+ if (err < 0) {
+ dev_err(bus->dev, "machine register failed: %d\n", err);
+ goto out_err;
}
/*
@@ -830,6 +829,9 @@ static void skl_probe_work(struct work_struct *work)
list_for_each_entry(hlink, &bus->hlink_list, list)
snd_hdac_ext_bus_link_put(bus, hlink);
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
/* configure PM */
pm_runtime_put_noidle(bus->dev);
pm_runtime_allow(bus->dev);
@@ -839,7 +841,7 @@ static void skl_probe_work(struct work_struct *work)
out_err:
if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- err = snd_hdac_display_power(bus, false);
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
}
/*
@@ -870,7 +872,7 @@ static int skl_create(struct pci_dev *pci,
hbus = skl_to_hbus(skl);
bus = skl_to_bus(skl);
-#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDA)
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
ext_ops = snd_soc_hdac_hda_get_ops();
#endif
snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops);
@@ -912,6 +914,12 @@ static int skl_first_init(struct hdac_bus *bus)
snd_hdac_bus_parse_capabilities(bus);
+ /* check if PPCAP exists */
+ if (!bus->ppcap) {
+ dev_err(bus->dev, "bus ppcap not set, HDaudio or DSP not present?\n");
+ return -ENODEV;
+ }
+
if (skl_acquire_irq(bus, 0) < 0)
return -EBUSY;
@@ -921,23 +929,25 @@ static int skl_first_init(struct hdac_bus *bus)
gcap = snd_hdac_chip_readw(bus, GCAP);
dev_dbg(bus->dev, "chipset global capabilities = 0x%x\n", gcap);
- /* allow 64bit DMA address if supported by H/W */
- if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
- dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(bus->dev, DMA_BIT_MASK(32));
- dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
- }
-
/* read number of streams from GCAP register */
cp_streams = (gcap >> 8) & 0x0f;
pb_streams = (gcap >> 12) & 0x0f;
- if (!pb_streams && !cp_streams)
+ if (!pb_streams && !cp_streams) {
+ dev_err(bus->dev, "no streams found in GCAP definitions?\n");
return -EIO;
+ }
bus->num_streams = cp_streams + pb_streams;
+ /* allow 64bit DMA address if supported by H/W */
+ if (!dma_set_mask(bus->dev, DMA_BIT_MASK(64))) {
+ dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(64));
+ } else {
+ dma_set_mask(bus->dev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(bus->dev, DMA_BIT_MASK(32));
+ }
+
/* initialize streams */
snd_hdac_ext_stream_init_all
(bus, 0, cp_streams, SNDRV_PCM_STREAM_CAPTURE);
@@ -962,6 +972,36 @@ static int skl_probe(struct pci_dev *pci,
struct hdac_bus *bus = NULL;
int err;
+ switch (skl_pci_binding) {
+ case SND_SKL_PCI_BIND_AUTO:
+ /*
+ * detect DSP by checking class/subclass/prog-id information
+ * class=04 subclass 03 prog-if 00: no DSP, use legacy driver
+ * class=04 subclass 01 prog-if 00: DSP is present
+ * (and may be required e.g. for DMIC or SSP support)
+ * class=04 subclass 03 prog-if 80: use DSP or legacy mode
+ */
+ if (pci->class == 0x040300) {
+ dev_info(&pci->dev, "The DSP is not enabled on this platform, aborting probe\n");
+ return -ENODEV;
+ }
+ if (pci->class != 0x040100 && pci->class != 0x040380) {
+ dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, aborting probe\n", pci->class);
+ return -ENODEV;
+ }
+ dev_info(&pci->dev, "DSP detected with PCI class/subclass/prog-if info 0x%06x\n", pci->class);
+ break;
+ case SND_SKL_PCI_BIND_LEGACY:
+ dev_info(&pci->dev, "Module parameter forced binding with HDaudio legacy, aborting probe\n");
+ return -ENODEV;
+ case SND_SKL_PCI_BIND_ASOC:
+ dev_info(&pci->dev, "Module parameter forced binding with SKL driver, bypassed detection logic\n");
+ break;
+ default:
+ dev_err(&pci->dev, "invalid value for skl_pci_binding module parameter, ignored\n");
+ break;
+ }
+
/* we use ext core ops, so provide NULL for ops here */
err = skl_create(pci, NULL, &skl);
if (err < 0)
@@ -970,8 +1010,10 @@ static int skl_probe(struct pci_dev *pci,
bus = skl_to_bus(skl);
err = skl_first_init(bus);
- if (err < 0)
+ if (err < 0) {
+ dev_err(bus->dev, "skl_first_init failed with err: %d\n", err);
goto out_free;
+ }
skl->pci_id = pci->device;
@@ -980,37 +1022,48 @@ static int skl_probe(struct pci_dev *pci,
skl->nhlt = skl_nhlt_init(bus->dev);
if (skl->nhlt == NULL) {
+#if !IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
+ dev_err(bus->dev, "no nhlt info found\n");
err = -ENODEV;
goto out_free;
- }
-
- err = skl_nhlt_create_sysfs(skl);
- if (err < 0)
- goto out_nhlt_free;
+#else
+ dev_warn(bus->dev, "no nhlt info found, continuing to try to enable HDaudio codec\n");
+#endif
+ } else {
- skl_nhlt_update_topology_bin(skl);
+ err = skl_nhlt_create_sysfs(skl);
+ if (err < 0) {
+ dev_err(bus->dev, "skl_nhlt_create_sysfs failed with err: %d\n", err);
+ goto out_nhlt_free;
+ }
- pci_set_drvdata(skl->pci, bus);
+ skl_nhlt_update_topology_bin(skl);
- /* check if dsp is there */
- if (bus->ppcap) {
/* create device for dsp clk */
err = skl_clock_device_register(skl);
- if (err < 0)
+ if (err < 0) {
+ dev_err(bus->dev, "skl_clock_device_register failed with err: %d\n", err);
goto out_clk_free;
+ }
+ }
- err = skl_find_machine(skl, (void *)pci_id->driver_data);
- if (err < 0)
- goto out_nhlt_free;
+ pci_set_drvdata(skl->pci, bus);
- err = skl_init_dsp(skl);
- if (err < 0) {
- dev_dbg(bus->dev, "error failed to register dsp\n");
- goto out_nhlt_free;
- }
- skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
- skl->skl_sst->clock_power_gating = skl_clock_power_gating;
+
+ err = skl_find_machine(skl, (void *)pci_id->driver_data);
+ if (err < 0) {
+ dev_err(bus->dev, "skl_find_machine failed with err: %d\n", err);
+ goto out_nhlt_free;
+ }
+
+ err = skl_init_dsp(skl);
+ if (err < 0) {
+ dev_dbg(bus->dev, "error failed to register dsp\n");
+ goto out_nhlt_free;
}
+ skl->skl_sst->enable_miscbdcge = skl_enable_miscbdcge;
+ skl->skl_sst->clock_power_gating = skl_clock_power_gating;
+
if (bus->mlcap)
snd_hdac_ext_bus_get_ml_capabilities(bus);
@@ -1018,8 +1071,10 @@ static int skl_probe(struct pci_dev *pci,
/* create device for soc dmic */
err = skl_dmic_device_register(skl);
- if (err < 0)
+ if (err < 0) {
+ dev_err(bus->dev, "skl_dmic_device_register failed with err: %d\n", err);
goto out_dsp_free;
+ }
schedule_work(&skl->probe_work);
@@ -1087,21 +1142,36 @@ static void skl_remove(struct pci_dev *pci)
/* PCI IDs */
static const struct pci_device_id skl_ids[] = {
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKL)
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
.driver_data = (unsigned long)&snd_soc_acpi_intel_skl_machines},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_APL)
/* BXT-P */
{ PCI_DEVICE(0x8086, 0x5a98),
.driver_data = (unsigned long)&snd_soc_acpi_intel_bxt_machines},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_KBL)
/* KBL */
{ PCI_DEVICE(0x8086, 0x9D71),
.driver_data = (unsigned long)&snd_soc_acpi_intel_kbl_machines},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_GLK)
/* GLK */
{ PCI_DEVICE(0x8086, 0x3198),
.driver_data = (unsigned long)&snd_soc_acpi_intel_glk_machines},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL)
/* CNL */
{ PCI_DEVICE(0x8086, 0x9dc8),
.driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines},
+#endif
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CFL)
+ /* CFL */
+ { PCI_DEVICE(0x8086, 0xa348),
+ .driver_data = (unsigned long)&snd_soc_acpi_intel_cnl_machines},
+#endif
{ 0, }
};
MODULE_DEVICE_TABLE(pci, skl_ids);
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 8d48cd7c56c8..85f8bb6687dc 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -119,10 +119,7 @@ struct skl_dma_params {
};
struct skl_machine_pdata {
- u32 dmic_num;
bool use_tplg_pcm; /* use dais and dai links from topology */
- const char *platform;
- u32 codec_mask;
};
struct skl_dsp_ops {
diff --git a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
index 192f4d7b37b6..bff7d71d0742 100644
--- a/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
+++ b/sound/soc/mediatek/mt6797/mt6797-afe-pcm.c
@@ -828,7 +828,7 @@ static int mt6797_afe_pcm_dev_probe(struct platform_device *pdev)
/* request irq */
irq_id = platform_get_irq(pdev, 0);
if (!irq_id) {
- dev_err(dev, "%s no irq found\n", dev->of_node->name);
+ dev_err(dev, "%pOFn no irq found\n", dev->of_node);
return -ENXIO;
}
ret = devm_request_irq(dev, irq_id, mt6797_afe_irq_handler,
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index c0b6697503fd..166aed28330d 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1092,7 +1092,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
irq_id = platform_get_irq(pdev, 0);
if (irq_id <= 0) {
- dev_err(afe->dev, "np %s no irq\n", afe->dev->of_node->name);
+ dev_err(afe->dev, "np %pOFn no irq\n", afe->dev->of_node);
return irq_id < 0 ? irq_id : -ENXIO;
}
ret = devm_request_irq(afe->dev, irq_id, mt8173_afe_irq_handler,
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
index 8b8426ed2363..8779fe23671d 100644
--- a/sound/soc/meson/Kconfig
+++ b/sound/soc/meson/Kconfig
@@ -54,6 +54,7 @@ config SND_MESON_AXG_SOUND_CARD
imply SND_MESON_AXG_TDMIN
imply SND_MESON_AXG_TDMOUT
imply SND_MESON_AXG_SPDIFOUT
+ imply SND_MESON_AXG_SPDIFIN
imply SND_MESON_AXG_PDM
help
Select Y or M to add support for the AXG SoC sound card
@@ -67,6 +68,13 @@ config SND_MESON_AXG_SPDIFOUT
Select Y or M to add support for SPDIF output serializer embedded
in the Amlogic AXG SoC family
+config SND_MESON_AXG_SPDIFIN
+ tristate "Amlogic AXG SPDIF Input Support"
+ imply SND_SOC_SPDIF
+ help
+ Select Y or M to add support for SPDIF input embedded
+ in the Amlogic AXG SoC family
+
config SND_MESON_AXG_PDM
tristate "Amlogic AXG PDM Input Support"
imply SND_SOC_DMIC
@@ -74,5 +82,4 @@ config SND_MESON_AXG_PDM
help
Select Y or M to add support for PDM input embedded
in the Amlogic AXG SoC family
-
endmenu
diff --git a/sound/soc/meson/Makefile b/sound/soc/meson/Makefile
index 4cd25104029d..b45dfb9e2f88 100644
--- a/sound/soc/meson/Makefile
+++ b/sound/soc/meson/Makefile
@@ -8,6 +8,7 @@ snd-soc-meson-axg-tdm-interface-objs := axg-tdm-interface.o
snd-soc-meson-axg-tdmin-objs := axg-tdmin.o
snd-soc-meson-axg-tdmout-objs := axg-tdmout.o
snd-soc-meson-axg-sound-card-objs := axg-card.o
+snd-soc-meson-axg-spdifin-objs := axg-spdifin.o
snd-soc-meson-axg-spdifout-objs := axg-spdifout.o
snd-soc-meson-axg-pdm-objs := axg-pdm.o
@@ -19,5 +20,6 @@ obj-$(CONFIG_SND_MESON_AXG_TDM_INTERFACE) += snd-soc-meson-axg-tdm-interface.o
obj-$(CONFIG_SND_MESON_AXG_TDMIN) += snd-soc-meson-axg-tdmin.o
obj-$(CONFIG_SND_MESON_AXG_TDMOUT) += snd-soc-meson-axg-tdmout.o
obj-$(CONFIG_SND_MESON_AXG_SOUND_CARD) += snd-soc-meson-axg-sound-card.o
+obj-$(CONFIG_SND_MESON_AXG_SPDIFIN) += snd-soc-meson-axg-spdifin.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFOUT) += snd-soc-meson-axg-spdifout.o
obj-$(CONFIG_SND_MESON_AXG_PDM) += snd-soc-meson-axg-pdm.o
diff --git a/sound/soc/meson/axg-fifo.h b/sound/soc/meson/axg-fifo.h
index cb6c4013ca33..d9f516cfbeda 100644
--- a/sound/soc/meson/axg-fifo.h
+++ b/sound/soc/meson/axg-fifo.h
@@ -25,7 +25,8 @@ struct snd_soc_pcm_runtime;
SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S20_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
- SNDRV_PCM_FMTBIT_S32_LE)
+ SNDRV_PCM_FMTBIT_S32_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
#define AXG_FIFO_BURST 8
#define AXG_FIFO_MIN_CNT 64
diff --git a/sound/soc/meson/axg-spdifin.c b/sound/soc/meson/axg-spdifin.c
new file mode 100644
index 000000000000..01b2035fa841
--- /dev/null
+++ b/sound/soc/meson/axg-spdifin.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm_params.h>
+
+#define SPDIFIN_CTRL0 0x00
+#define SPDIFIN_CTRL0_EN BIT(31)
+#define SPDIFIN_CTRL0_RST_OUT BIT(29)
+#define SPDIFIN_CTRL0_RST_IN BIT(28)
+#define SPDIFIN_CTRL0_WIDTH_SEL BIT(24)
+#define SPDIFIN_CTRL0_STATUS_CH_SHIFT 11
+#define SPDIFIN_CTRL0_STATUS_SEL GENMASK(10, 8)
+#define SPDIFIN_CTRL0_SRC_SEL GENMASK(5, 4)
+#define SPDIFIN_CTRL0_CHK_VALID BIT(3)
+#define SPDIFIN_CTRL1 0x04
+#define SPDIFIN_CTRL1_BASE_TIMER GENMASK(19, 0)
+#define SPDIFIN_CTRL1_IRQ_MASK GENMASK(27, 20)
+#define SPDIFIN_CTRL2 0x08
+#define SPDIFIN_THRES_PER_REG 3
+#define SPDIFIN_THRES_WIDTH 10
+#define SPDIFIN_CTRL3 0x0c
+#define SPDIFIN_CTRL4 0x10
+#define SPDIFIN_TIMER_PER_REG 4
+#define SPDIFIN_TIMER_WIDTH 8
+#define SPDIFIN_CTRL5 0x14
+#define SPDIFIN_CTRL6 0x18
+#define SPDIFIN_STAT0 0x1c
+#define SPDIFIN_STAT0_MODE GENMASK(30, 28)
+#define SPDIFIN_STAT0_MAXW GENMASK(17, 8)
+#define SPDIFIN_STAT0_IRQ GENMASK(7, 0)
+#define SPDIFIN_IRQ_MODE_CHANGED BIT(2)
+#define SPDIFIN_STAT1 0x20
+#define SPDIFIN_STAT2 0x24
+#define SPDIFIN_MUTE_VAL 0x28
+
+#define SPDIFIN_MODE_NUM 7
+
+struct axg_spdifin_cfg {
+ const unsigned int *mode_rates;
+ unsigned int ref_rate;
+};
+
+struct axg_spdifin {
+ const struct axg_spdifin_cfg *conf;
+ struct regmap *map;
+ struct clk *refclk;
+ struct clk *pclk;
+};
+
+/*
+ * TODO:
+ * It would have been nice to check the actual rate against the sample rate
+ * requested in hw_params(). Unfortunately, I was not able to make the mode
+ * detection and IRQ work reliably:
+ *
+ * 1. IRQs are generated on mode change only, so there is no notification
+ * on transition between no signal and mode 0 (32kHz).
+ * 2. Mode detection very often has glitches, and may detects the
+ * lowest or the highest mode before zeroing in on the actual mode.
+ *
+ * This makes calling snd_pcm_stop() difficult to get right. Even notifying
+ * the kcontrol would be very unreliable at this point.
+ * Let's keep things simple until the magic spell that makes this work is
+ * found.
+ */
+
+static unsigned int axg_spdifin_get_rate(struct axg_spdifin *priv)
+{
+ unsigned int stat, mode, rate = 0;
+
+ regmap_read(priv->map, SPDIFIN_STAT0, &stat);
+ mode = FIELD_GET(SPDIFIN_STAT0_MODE, stat);
+
+ /*
+ * If max width is zero, we are not capturing anything.
+ * Also Sometimes, when the capture is on but there is no data,
+ * mode is SPDIFIN_MODE_NUM, but not always ...
+ */
+ if (FIELD_GET(SPDIFIN_STAT0_MAXW, stat) &&
+ mode < SPDIFIN_MODE_NUM)
+ rate = priv->conf->mode_rates[mode];
+
+ return rate;
+}
+
+static int axg_spdifin_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+
+ /* Apply both reset */
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0,
+ SPDIFIN_CTRL0_RST_OUT |
+ SPDIFIN_CTRL0_RST_IN,
+ 0);
+
+ /* Clear out reset before in reset */
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0,
+ SPDIFIN_CTRL0_RST_OUT, SPDIFIN_CTRL0_RST_OUT);
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0,
+ SPDIFIN_CTRL0_RST_IN, SPDIFIN_CTRL0_RST_IN);
+
+ return 0;
+}
+
+static int axg_spdifin_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret) {
+ dev_err(dai->dev,
+ "failed to enable spdifin reference clock\n");
+ return ret;
+ }
+
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN,
+ SPDIFIN_CTRL0_EN);
+
+ return 0;
+}
+
+static void axg_spdifin_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0);
+ clk_disable_unprepare(priv->refclk);
+}
+
+static void axg_spdifin_write_mode_param(struct regmap *map, int mode,
+ unsigned int val,
+ unsigned int num_per_reg,
+ unsigned int base_reg,
+ unsigned int width)
+{
+ uint64_t offset = mode;
+ unsigned int reg, shift, rem;
+
+ rem = do_div(offset, num_per_reg);
+
+ reg = offset * regmap_get_reg_stride(map) + base_reg;
+ shift = width * (num_per_reg - 1 - rem);
+
+ regmap_update_bits(map, reg, GENMASK(width - 1, 0) << shift,
+ val << shift);
+}
+
+static void axg_spdifin_write_timer(struct regmap *map, int mode,
+ unsigned int val)
+{
+ axg_spdifin_write_mode_param(map, mode, val, SPDIFIN_TIMER_PER_REG,
+ SPDIFIN_CTRL4, SPDIFIN_TIMER_WIDTH);
+}
+
+static void axg_spdifin_write_threshold(struct regmap *map, int mode,
+ unsigned int val)
+{
+ axg_spdifin_write_mode_param(map, mode, val, SPDIFIN_THRES_PER_REG,
+ SPDIFIN_CTRL2, SPDIFIN_THRES_WIDTH);
+}
+
+static unsigned int axg_spdifin_mode_timer(struct axg_spdifin *priv,
+ int mode,
+ unsigned int rate)
+{
+ /*
+ * Number of period of the reference clock during a period of the
+ * input signal reference clock
+ */
+ return rate / (128 * priv->conf->mode_rates[mode]);
+}
+
+static int axg_spdifin_sample_mode_config(struct snd_soc_dai *dai,
+ struct axg_spdifin *priv)
+{
+ unsigned int rate, t_next;
+ int ret, i = SPDIFIN_MODE_NUM - 1;
+
+ /* Set spdif input reference clock */
+ ret = clk_set_rate(priv->refclk, priv->conf->ref_rate);
+ if (ret) {
+ dev_err(dai->dev, "reference clock rate set failed\n");
+ return ret;
+ }
+
+ /*
+ * The rate actually set might be slightly different, get
+ * the actual rate for the following mode calculation
+ */
+ rate = clk_get_rate(priv->refclk);
+
+ /* HW will update mode every 1ms */
+ regmap_update_bits(priv->map, SPDIFIN_CTRL1,
+ SPDIFIN_CTRL1_BASE_TIMER,
+ FIELD_PREP(SPDIFIN_CTRL1_BASE_TIMER, rate / 1000));
+
+ /* Threshold based on the minimum width between two edges */
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0,
+ SPDIFIN_CTRL0_WIDTH_SEL, SPDIFIN_CTRL0_WIDTH_SEL);
+
+ /* Calculate the last timer which has no threshold */
+ t_next = axg_spdifin_mode_timer(priv, i, rate);
+ axg_spdifin_write_timer(priv->map, i, t_next);
+
+ do {
+ unsigned int t;
+
+ i -= 1;
+
+ /* Calculate the timer */
+ t = axg_spdifin_mode_timer(priv, i, rate);
+
+ /* Set the timer value */
+ axg_spdifin_write_timer(priv->map, i, t);
+
+ /* Set the threshold value */
+ axg_spdifin_write_threshold(priv->map, i, t + t_next);
+
+ /* Save the current timer for the next threshold calculation */
+ t_next = t;
+
+ } while (i > 0);
+
+ return 0;
+}
+
+static int axg_spdifin_dai_probe(struct snd_soc_dai *dai)
+{
+ struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dai->dev, "failed to enable pclk\n");
+ return ret;
+ }
+
+ ret = axg_spdifin_sample_mode_config(dai, priv);
+ if (ret) {
+ dev_err(dai->dev, "mode configuration failed\n");
+ clk_disable_unprepare(priv->pclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axg_spdifin_dai_remove(struct snd_soc_dai *dai)
+{
+ struct axg_spdifin *priv = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(priv->pclk);
+ return 0;
+}
+
+static const struct snd_soc_dai_ops axg_spdifin_ops = {
+ .prepare = axg_spdifin_prepare,
+ .startup = axg_spdifin_startup,
+ .shutdown = axg_spdifin_shutdown,
+};
+
+static int axg_spdifin_iec958_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+
+ return 0;
+}
+
+static int axg_spdifin_get_status_mask(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int i;
+
+ for (i = 0; i < 24; i++)
+ ucontrol->value.iec958.status[i] = 0xff;
+
+ return 0;
+}
+
+static int axg_spdifin_get_status(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_kcontrol_chip(kcontrol);
+ struct axg_spdifin *priv = snd_soc_component_get_drvdata(c);
+ int i, j;
+
+ for (i = 0; i < 6; i++) {
+ unsigned int val;
+
+ regmap_update_bits(priv->map, SPDIFIN_CTRL0,
+ SPDIFIN_CTRL0_STATUS_SEL,
+ FIELD_PREP(SPDIFIN_CTRL0_STATUS_SEL, i));
+
+ regmap_read(priv->map, SPDIFIN_STAT1, &val);
+
+ for (j = 0; j < 4; j++) {
+ unsigned int offset = i * 4 + j;
+
+ ucontrol->value.iec958.status[offset] =
+ (val >> (j * 8)) & 0xff;
+ }
+ }
+
+ return 0;
+}
+
+#define AXG_SPDIFIN_IEC958_MASK \
+ { \
+ .access = SNDRV_CTL_ELEM_ACCESS_READ, \
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+ .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, MASK), \
+ .info = axg_spdifin_iec958_info, \
+ .get = axg_spdifin_get_status_mask, \
+ }
+
+#define AXG_SPDIFIN_IEC958_STATUS \
+ { \
+ .access = (SNDRV_CTL_ELEM_ACCESS_READ | \
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE), \
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+ .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE), \
+ .info = axg_spdifin_iec958_info, \
+ .get = axg_spdifin_get_status, \
+ }
+
+static const char * const spdifin_chsts_src_texts[] = {
+ "A", "B",
+};
+
+static SOC_ENUM_SINGLE_DECL(axg_spdifin_chsts_src_enum, SPDIFIN_CTRL0,
+ SPDIFIN_CTRL0_STATUS_CH_SHIFT,
+ spdifin_chsts_src_texts);
+
+static int axg_spdifin_rate_lock_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 192000;
+
+ return 0;
+}
+
+static int axg_spdifin_rate_lock_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *c = snd_kcontrol_chip(kcontrol);
+ struct axg_spdifin *priv = snd_soc_component_get_drvdata(c);
+
+ ucontrol->value.integer.value[0] = axg_spdifin_get_rate(priv);
+
+ return 0;
+}
+
+#define AXG_SPDIFIN_LOCK_RATE(xname) \
+ { \
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+ .access = (SNDRV_CTL_ELEM_ACCESS_READ | \
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE), \
+ .get = axg_spdifin_rate_lock_get, \
+ .info = axg_spdifin_rate_lock_info, \
+ .name = xname, \
+ }
+
+static const struct snd_kcontrol_new axg_spdifin_controls[] = {
+ AXG_SPDIFIN_LOCK_RATE("Capture Rate Lock"),
+ SOC_DOUBLE("Capture Switch", SPDIFIN_CTRL0, 7, 6, 1, 1),
+ SOC_ENUM(SNDRV_CTL_NAME_IEC958("", CAPTURE, NONE) "Src",
+ axg_spdifin_chsts_src_enum),
+ AXG_SPDIFIN_IEC958_MASK,
+ AXG_SPDIFIN_IEC958_STATUS,
+};
+
+static const struct snd_soc_component_driver axg_spdifin_component_drv = {
+ .controls = axg_spdifin_controls,
+ .num_controls = ARRAY_SIZE(axg_spdifin_controls),
+};
+
+static const struct regmap_config axg_spdifin_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = SPDIFIN_MUTE_VAL,
+};
+
+static const unsigned int axg_spdifin_mode_rates[SPDIFIN_MODE_NUM] = {
+ 32000, 44100, 48000, 88200, 96000, 176400, 192000,
+};
+
+static const struct axg_spdifin_cfg axg_cfg = {
+ .mode_rates = axg_spdifin_mode_rates,
+ .ref_rate = 333333333,
+};
+
+static const struct of_device_id axg_spdifin_of_match[] = {
+ {
+ .compatible = "amlogic,axg-spdifin",
+ .data = &axg_cfg,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_spdifin_of_match);
+
+static struct snd_soc_dai_driver *
+axg_spdifin_get_dai_drv(struct device *dev, struct axg_spdifin *priv)
+{
+ struct snd_soc_dai_driver *drv;
+ int i;
+
+ drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return ERR_PTR(-ENOMEM);
+
+ drv->name = "SPDIF Input";
+ drv->ops = &axg_spdifin_ops;
+ drv->probe = axg_spdifin_dai_probe;
+ drv->remove = axg_spdifin_dai_remove;
+ drv->capture.stream_name = "Capture";
+ drv->capture.channels_min = 1;
+ drv->capture.channels_max = 2;
+ drv->capture.formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE;
+
+ for (i = 0; i < SPDIFIN_MODE_NUM; i++) {
+ unsigned int rb =
+ snd_pcm_rate_to_rate_bit(priv->conf->mode_rates[i]);
+
+ if (rb == SNDRV_PCM_RATE_KNOT)
+ return ERR_PTR(-EINVAL);
+
+ drv->capture.rates |= rb;
+ }
+
+ return drv;
+}
+
+static int axg_spdifin_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axg_spdifin *priv;
+ struct snd_soc_dai_driver *dai_drv;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->conf = of_device_get_match_data(dev);
+ if (!priv->conf) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->map = devm_regmap_init_mmio(dev, regs, &axg_spdifin_regmap_cfg);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(priv->map));
+ return PTR_ERR(priv->map);
+ }
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pclk: %d\n", ret);
+ return ret;
+ }
+
+ priv->refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(priv->refclk)) {
+ ret = PTR_ERR(priv->refclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get mclk: %d\n", ret);
+ return ret;
+ }
+
+ dai_drv = axg_spdifin_get_dai_drv(dev, priv);
+ if (IS_ERR(dai_drv)) {
+ dev_err(dev, "failed to get dai driver: %ld\n",
+ PTR_ERR(dai_drv));
+ return PTR_ERR(dai_drv);
+ }
+
+ return devm_snd_soc_register_component(dev, &axg_spdifin_component_drv,
+ dai_drv, 1);
+}
+
+static struct platform_driver axg_spdifin_pdrv = {
+ .probe = axg_spdifin_probe,
+ .driver = {
+ .name = "axg-spdifin",
+ .of_match_table = axg_spdifin_of_match,
+ },
+};
+module_platform_driver(axg_spdifin_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG SPDIF Input driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-toddr.c b/sound/soc/meson/axg-toddr.c
index c2c9bb312586..0e9ca3882ae5 100644
--- a/sound/soc/meson/axg-toddr.c
+++ b/sound/soc/meson/axg-toddr.c
@@ -25,6 +25,8 @@
#define CTRL0_TODDR_LSB_POS_MASK GENMASK(7, 3)
#define CTRL0_TODDR_LSB_POS(x) ((x) << 3)
+#define TODDR_MSB_POS 31
+
static int axg_toddr_pcm_new(struct snd_soc_pcm_runtime *rtd,
struct snd_soc_dai *dai)
{
@@ -36,14 +38,7 @@ static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct axg_fifo *fifo = snd_soc_dai_get_drvdata(dai);
- unsigned int type, width, msb = 31;
-
- /*
- * NOTE:
- * Almost all backend will place the MSB at bit 31, except SPDIF Input
- * which will put it at index 28. When adding support for the SPDIF
- * Input, we'll need to find which type of backend we are connected to.
- */
+ unsigned int type, width;
switch (params_physical_width(params)) {
case 8:
@@ -66,8 +61,8 @@ static int axg_toddr_dai_hw_params(struct snd_pcm_substream *substream,
CTRL0_TODDR_MSB_POS_MASK |
CTRL0_TODDR_LSB_POS_MASK,
CTRL0_TODDR_TYPE(type) |
- CTRL0_TODDR_MSB_POS(msb) |
- CTRL0_TODDR_LSB_POS(msb - (width - 1)));
+ CTRL0_TODDR_MSB_POS(TODDR_MSB_POS) |
+ CTRL0_TODDR_LSB_POS(TODDR_MSB_POS - (width - 1)));
return 0;
}
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
deleted file mode 100644
index 6dccea6fdaeb..000000000000
--- a/sound/soc/omap/Kconfig
+++ /dev/null
@@ -1,129 +0,0 @@
-config SND_OMAP_SOC
- tristate "SoC Audio for Texas Instruments OMAP chips (deprecated)"
- depends on (ARCH_OMAP && DMA_OMAP) || (ARM && COMPILE_TEST)
- select SND_SDMA_SOC
-
-config SND_SDMA_SOC
- tristate "SoC Audio for Texas Instruments chips using sDMA"
- depends on DMA_OMAP || COMPILE_TEST
- select SND_SOC_GENERIC_DMAENGINE_PCM
-
-config SND_OMAP_SOC_DMIC
- tristate
-
-config SND_OMAP_SOC_MCBSP
- tristate
-
-config SND_OMAP_SOC_MCPDM
- tristate
-
-config SND_OMAP_SOC_HDMI_AUDIO
- tristate "HDMI audio support for OMAP4+ based SoCs"
- depends on SND_SDMA_SOC
- help
- For HDMI audio to work OMAPDSS HDMI support should be
- enabled.
- The hdmi audio driver implements cpu-dai component using the
- callbacks provided by OMAPDSS and registers the component
- under DSS HDMI device. Omap-pcm is registered for platform
- component also under DSS HDMI device. Dummy codec is used as
- as codec component. The hdmi audio driver implements also
- the card and registers it under its own platform device.
- The device for the driver is registered by OMAPDSS hdmi
- driver.
-
-config SND_OMAP_SOC_N810
- tristate "SoC Audio support for Nokia N810"
- depends on SND_SDMA_SOC && MACH_NOKIA_N810 && I2C
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_TLV320AIC3X
- help
- Say Y if you want to add support for SoC audio on Nokia N810.
-
-config SND_OMAP_SOC_RX51
- tristate "SoC Audio support for Nokia N900 (RX-51)"
- depends on SND_SDMA_SOC && ARM && I2C
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_TLV320AIC3X
- select SND_SOC_TPA6130A2
- depends on GPIOLIB
- help
- Say Y if you want to add support for SoC audio on Nokia N900
- cellphone.
-
-config SND_OMAP_SOC_AMS_DELTA
- tristate "SoC Audio support for Amstrad E3 (Delta) videophone"
- depends on SND_SDMA_SOC && MACH_AMS_DELTA && TTY
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_CX20442
- help
- Say Y if you want to add support for SoC audio device connected to
- a handset and a speakerphone found on Amstrad E3 (Delta) videophone.
-
- Note that in order to get those devices fully supported, you have to
- build the kernel with standard serial port driver included and
- configured for at least 4 ports. Then, from userspace, you must load
- a line discipline #19 on the modem (ttyS3) serial line. The simplest
- way to achieve this is to install util-linux-ng and use the included
- ldattach utility. This can be started automatically from udev,
- a simple rule like this one should do the trick (it does for me):
- ACTION=="add", KERNEL=="controlC0", \
- RUN+="/usr/sbin/ldattach 19 /dev/ttyS3"
-
-config SND_OMAP_SOC_OSK5912
- tristate "SoC Audio support for omap osk5912"
- depends on SND_SDMA_SOC && MACH_OMAP_OSK && I2C
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_TLV320AIC23_I2C
- help
- Say Y if you want to add support for SoC audio on osk5912.
-
-config SND_OMAP_SOC_AM3517EVM
- tristate "SoC Audio support for OMAP3517 / AM3517 EVM"
- depends on SND_SDMA_SOC && MACH_OMAP3517EVM && I2C
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_TLV320AIC23_I2C
- help
- Say Y if you want to add support for SoC audio on the OMAP3517 / AM3517
- EVM.
-
-config SND_OMAP_SOC_OMAP_TWL4030
- tristate "SoC Audio support for TI SoC based boards with twl4030 codec"
- depends on TWL4030_CORE && SND_SDMA_SOC
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_TWL4030
- help
- Say Y if you want to add support for SoC audio on TI SoC based boards
- using twl4030 as c codec. This driver currently supports:
- - Beagleboard or Devkit8000
- - Gumstix Overo or CompuLab CM-T35/CM-T3730
- - IGEP v2
- - OMAP3EVM
- - SDP3430
- - Zoom2
-
-config SND_OMAP_SOC_OMAP_ABE_TWL6040
- tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
- depends on TWL6040_CORE && SND_SDMA_SOC && COMMON_CLK
- depends on ARCH_OMAP4 || (SOC_OMAP5 && MFD_PALMAS) || COMPILE_TEST
- select SND_OMAP_SOC_DMIC
- select SND_OMAP_SOC_MCPDM
- select SND_SOC_TWL6040
- select SND_SOC_DMIC
- select COMMON_CLK_PALMAS if (SOC_OMAP5 && MFD_PALMAS)
- select CLK_TWL6040
- help
- Say Y if you want to add support for SoC audio on OMAP boards using
- ABE and twl6040 codec. This driver currently supports:
- - SDP4430/Blaze boards
- - PandaBoard (4430)
- - PandaBoardES (4460)
- - omap5-uevm (5432)
-
-config SND_OMAP_SOC_OMAP3_PANDORA
- tristate "SoC Audio support for OMAP3 Pandora"
- depends on TWL4030_CORE && SND_SDMA_SOC && MACH_OMAP3_PANDORA
- select SND_OMAP_SOC_MCBSP
- select SND_SOC_TWL4030
- help
- Say Y if you want to add support for SoC audio on the OMAP3 Pandora.
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
deleted file mode 100644
index 53eba3413485..000000000000
--- a/sound/soc/omap/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# OMAP Platform Support
-snd-soc-sdma-objs := sdma-pcm.o
-snd-soc-omap-dmic-objs := omap-dmic.o
-snd-soc-omap-mcbsp-objs := omap-mcbsp.o mcbsp.o
-snd-soc-omap-mcpdm-objs := omap-mcpdm.o
-snd-soc-omap-hdmi-audio-objs := omap-hdmi-audio.o
-
-obj-$(CONFIG_SND_SDMA_SOC) += snd-soc-sdma.o
-obj-$(CONFIG_SND_OMAP_SOC_DMIC) += snd-soc-omap-dmic.o
-obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd-soc-omap-mcbsp.o
-obj-$(CONFIG_SND_OMAP_SOC_MCPDM) += snd-soc-omap-mcpdm.o
-obj-$(CONFIG_SND_OMAP_SOC_HDMI_AUDIO) += snd-soc-omap-hdmi-audio.o
-
-# OMAP Machine Support
-snd-soc-n810-objs := n810.o
-snd-soc-rx51-objs := rx51.o
-snd-soc-ams-delta-objs := ams-delta.o
-snd-soc-osk5912-objs := osk5912.o
-snd-soc-am3517evm-objs := am3517evm.o
-snd-soc-omap-abe-twl6040-objs := omap-abe-twl6040.o
-snd-soc-omap-twl4030-objs := omap-twl4030.o
-snd-soc-omap3pandora-objs := omap3pandora.o
-
-obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
-obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
-obj-$(CONFIG_SND_OMAP_SOC_AMS_DELTA) += snd-soc-ams-delta.o
-obj-$(CONFIG_SND_OMAP_SOC_OSK5912) += snd-soc-osk5912.o
-obj-$(CONFIG_SND_OMAP_SOC_AM3517EVM) += snd-soc-am3517evm.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040) += snd-soc-omap-abe-twl6040.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP_TWL4030) += snd-soc-omap-twl4030.o
-obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
deleted file mode 100644
index d5651026ec10..000000000000
--- a/sound/soc/omap/am3517evm.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * am3517evm.c -- ALSA SoC support for OMAP3517 / AM3517 EVM
- *
- * Author: Anuj Aggarwal <anuj.aggarwal@ti.com>
- *
- * Based on sound/soc/omap/beagle.c by Steve Sakoman
- *
- * Copyright (C) 2009 Texas Instruments Incorporated
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
- * whether express or implied; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-
-#include <asm/mach-types.h>
-#include <linux/platform_data/asoc-ti-mcbsp.h>
-
-#include "omap-mcbsp.h"
-
-#include "../codecs/tlv320aic23.h"
-
-#define CODEC_CLOCK 12000000
-
-static int am3517evm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- int ret;
-
- /* Set the codec system clock for DAC and ADC */
- ret = snd_soc_dai_set_sysclk(codec_dai, 0,
- CODEC_CLOCK, SND_SOC_CLOCK_IN);
- if (ret < 0)
- printk(KERN_ERR "can't set codec system clock\n");
-
- return ret;
-}
-
-static const struct snd_soc_ops am3517evm_ops = {
- .hw_params = am3517evm_hw_params,
-};
-
-/* am3517evm machine dapm widgets */
-static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = {
- SND_SOC_DAPM_HP("Line Out", NULL),
- SND_SOC_DAPM_LINE("Line In", NULL),
- SND_SOC_DAPM_MIC("Mic In", NULL),
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
- /* Line Out connected to LLOUT, RLOUT */
- {"Line Out", NULL, "LOUT"},
- {"Line Out", NULL, "ROUT"},
-
- {"LLINEIN", NULL, "Line In"},
- {"RLINEIN", NULL, "Line In"},
-
- {"MICIN", NULL, "Mic In"},
-};
-
-/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link am3517evm_dai = {
- .name = "TLV320AIC23",
- .stream_name = "AIC23",
- .cpu_dai_name = "omap-mcbsp.1",
- .codec_dai_name = "tlv320aic23-hifi",
- .platform_name = "omap-mcbsp.1",
- .codec_name = "tlv320aic23-codec.2-001a",
- .dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM,
- .ops = &am3517evm_ops,
-};
-
-/* Audio machine driver */
-static struct snd_soc_card snd_soc_am3517evm = {
- .name = "am3517evm",
- .owner = THIS_MODULE,
- .dai_link = &am3517evm_dai,
- .num_links = 1,
-
- .dapm_widgets = tlv320aic23_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
-};
-
-static struct platform_device *am3517evm_snd_device;
-
-static int __init am3517evm_soc_init(void)
-{
- int ret;
-
- if (!machine_is_omap3517evm())
- return -ENODEV;
- pr_info("OMAP3517 / AM3517 EVM SoC init\n");
-
- am3517evm_snd_device = platform_device_alloc("soc-audio", -1);
- if (!am3517evm_snd_device) {
- printk(KERN_ERR "Platform device allocation failed\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(am3517evm_snd_device, &snd_soc_am3517evm);
-
- ret = platform_device_add(am3517evm_snd_device);
- if (ret)
- goto err1;
-
- return 0;
-
-err1:
- printk(KERN_ERR "Unable to add platform device\n");
- platform_device_put(am3517evm_snd_device);
-
- return ret;
-}
-
-static void __exit am3517evm_soc_exit(void)
-{
- platform_device_unregister(am3517evm_snd_device);
-}
-
-module_init(am3517evm_soc_init);
-module_exit(am3517evm_soc_exit);
-
-MODULE_AUTHOR("Anuj Aggarwal <anuj.aggarwal@ti.com>");
-MODULE_DESCRIPTION("ALSA SoC OMAP3517 / AM3517 EVM");
-MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
deleted file mode 100644
index 79d4dc785e5c..000000000000
--- a/sound/soc/omap/mcbsp.c
+++ /dev/null
@@ -1,1104 +0,0 @@
-/*
- * sound/soc/omap/mcbsp.c
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Samuel Ortiz <samuel.ortiz@nokia.com>
- *
- * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
- * Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Multichannel mode not supported.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-
-#include <linux/platform_data/asoc-ti-mcbsp.h>
-
-#include "mcbsp.h"
-
-static void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
-{
- void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step;
-
- if (mcbsp->pdata->reg_size == 2) {
- ((u16 *)mcbsp->reg_cache)[reg] = (u16)val;
- writew_relaxed((u16)val, addr);
- } else {
- ((u32 *)mcbsp->reg_cache)[reg] = val;
- writel_relaxed(val, addr);
- }
-}
-
-static int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg, bool from_cache)
-{
- void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step;
-
- if (mcbsp->pdata->reg_size == 2) {
- return !from_cache ? readw_relaxed(addr) :
- ((u16 *)mcbsp->reg_cache)[reg];
- } else {
- return !from_cache ? readl_relaxed(addr) :
- ((u32 *)mcbsp->reg_cache)[reg];
- }
-}
-
-static void omap_mcbsp_st_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
-{
- writel_relaxed(val, mcbsp->st_data->io_base_st + reg);
-}
-
-static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
-{
- return readl_relaxed(mcbsp->st_data->io_base_st + reg);
-}
-
-#define MCBSP_READ(mcbsp, reg) \
- omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 0)
-#define MCBSP_WRITE(mcbsp, reg, val) \
- omap_mcbsp_write(mcbsp, OMAP_MCBSP_REG_##reg, val)
-#define MCBSP_READ_CACHE(mcbsp, reg) \
- omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 1)
-
-#define MCBSP_ST_READ(mcbsp, reg) \
- omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
-#define MCBSP_ST_WRITE(mcbsp, reg, val) \
- omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
-
-static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
-{
- dev_dbg(mcbsp->dev, "**** McBSP%d regs ****\n", mcbsp->id);
- dev_dbg(mcbsp->dev, "DRR2: 0x%04x\n",
- MCBSP_READ(mcbsp, DRR2));
- dev_dbg(mcbsp->dev, "DRR1: 0x%04x\n",
- MCBSP_READ(mcbsp, DRR1));
- dev_dbg(mcbsp->dev, "DXR2: 0x%04x\n",
- MCBSP_READ(mcbsp, DXR2));
- dev_dbg(mcbsp->dev, "DXR1: 0x%04x\n",
- MCBSP_READ(mcbsp, DXR1));
- dev_dbg(mcbsp->dev, "SPCR2: 0x%04x\n",
- MCBSP_READ(mcbsp, SPCR2));
- dev_dbg(mcbsp->dev, "SPCR1: 0x%04x\n",
- MCBSP_READ(mcbsp, SPCR1));
- dev_dbg(mcbsp->dev, "RCR2: 0x%04x\n",
- MCBSP_READ(mcbsp, RCR2));
- dev_dbg(mcbsp->dev, "RCR1: 0x%04x\n",
- MCBSP_READ(mcbsp, RCR1));
- dev_dbg(mcbsp->dev, "XCR2: 0x%04x\n",
- MCBSP_READ(mcbsp, XCR2));
- dev_dbg(mcbsp->dev, "XCR1: 0x%04x\n",
- MCBSP_READ(mcbsp, XCR1));
- dev_dbg(mcbsp->dev, "SRGR2: 0x%04x\n",
- MCBSP_READ(mcbsp, SRGR2));
- dev_dbg(mcbsp->dev, "SRGR1: 0x%04x\n",
- MCBSP_READ(mcbsp, SRGR1));
- dev_dbg(mcbsp->dev, "PCR0: 0x%04x\n",
- MCBSP_READ(mcbsp, PCR0));
- dev_dbg(mcbsp->dev, "***********************\n");
-}
-
-static irqreturn_t omap_mcbsp_irq_handler(int irq, void *dev_id)
-{
- struct omap_mcbsp *mcbsp = dev_id;
- u16 irqst;
-
- irqst = MCBSP_READ(mcbsp, IRQST);
- dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
-
- if (irqst & RSYNCERREN)
- dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
- if (irqst & RFSREN)
- dev_dbg(mcbsp->dev, "RX Frame Sync\n");
- if (irqst & REOFEN)
- dev_dbg(mcbsp->dev, "RX End Of Frame\n");
- if (irqst & RRDYEN)
- dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
- if (irqst & RUNDFLEN)
- dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
- if (irqst & ROVFLEN)
- dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
-
- if (irqst & XSYNCERREN)
- dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
- if (irqst & XFSXEN)
- dev_dbg(mcbsp->dev, "TX Frame Sync\n");
- if (irqst & XEOFEN)
- dev_dbg(mcbsp->dev, "TX End Of Frame\n");
- if (irqst & XRDYEN)
- dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
- if (irqst & XUNDFLEN)
- dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
- if (irqst & XOVFLEN)
- dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
- if (irqst & XEMPTYEOFEN)
- dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
-
- MCBSP_WRITE(mcbsp, IRQST, irqst);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
-{
- struct omap_mcbsp *mcbsp_tx = dev_id;
- u16 irqst_spcr2;
-
- irqst_spcr2 = MCBSP_READ(mcbsp_tx, SPCR2);
- dev_dbg(mcbsp_tx->dev, "TX IRQ callback : 0x%x\n", irqst_spcr2);
-
- if (irqst_spcr2 & XSYNC_ERR) {
- dev_err(mcbsp_tx->dev, "TX Frame Sync Error! : 0x%x\n",
- irqst_spcr2);
- /* Writing zero to XSYNC_ERR clears the IRQ */
- MCBSP_WRITE(mcbsp_tx, SPCR2, MCBSP_READ_CACHE(mcbsp_tx, SPCR2));
- }
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *dev_id)
-{
- struct omap_mcbsp *mcbsp_rx = dev_id;
- u16 irqst_spcr1;
-
- irqst_spcr1 = MCBSP_READ(mcbsp_rx, SPCR1);
- dev_dbg(mcbsp_rx->dev, "RX IRQ callback : 0x%x\n", irqst_spcr1);
-
- if (irqst_spcr1 & RSYNC_ERR) {
- dev_err(mcbsp_rx->dev, "RX Frame Sync Error! : 0x%x\n",
- irqst_spcr1);
- /* Writing zero to RSYNC_ERR clears the IRQ */
- MCBSP_WRITE(mcbsp_rx, SPCR1, MCBSP_READ_CACHE(mcbsp_rx, SPCR1));
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * omap_mcbsp_config simply write a config to the
- * appropriate McBSP.
- * You either call this function or set the McBSP registers
- * by yourself before calling omap_mcbsp_start().
- */
-void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
- const struct omap_mcbsp_reg_cfg *config)
-{
- dev_dbg(mcbsp->dev, "Configuring McBSP%d phys_base: 0x%08lx\n",
- mcbsp->id, mcbsp->phys_base);
-
- /* We write the given config */
- MCBSP_WRITE(mcbsp, SPCR2, config->spcr2);
- MCBSP_WRITE(mcbsp, SPCR1, config->spcr1);
- MCBSP_WRITE(mcbsp, RCR2, config->rcr2);
- MCBSP_WRITE(mcbsp, RCR1, config->rcr1);
- MCBSP_WRITE(mcbsp, XCR2, config->xcr2);
- MCBSP_WRITE(mcbsp, XCR1, config->xcr1);
- MCBSP_WRITE(mcbsp, SRGR2, config->srgr2);
- MCBSP_WRITE(mcbsp, SRGR1, config->srgr1);
- MCBSP_WRITE(mcbsp, MCR2, config->mcr2);
- MCBSP_WRITE(mcbsp, MCR1, config->mcr1);
- MCBSP_WRITE(mcbsp, PCR0, config->pcr0);
- if (mcbsp->pdata->has_ccr) {
- MCBSP_WRITE(mcbsp, XCCR, config->xccr);
- MCBSP_WRITE(mcbsp, RCCR, config->rccr);
- }
- /* Enable wakeup behavior */
- if (mcbsp->pdata->has_wakeup)
- MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
-
- /* Enable TX/RX sync error interrupts by default */
- if (mcbsp->irq)
- MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN |
- RUNDFLEN | ROVFLEN | XUNDFLEN | XOVFLEN);
-}
-
-/**
- * omap_mcbsp_dma_reg_params - returns the address of mcbsp data register
- * @id - mcbsp id
- * @stream - indicates the direction of data flow (rx or tx)
- *
- * Returns the address of mcbsp data transmit register or data receive register
- * to be used by DMA for transferring/receiving data based on the value of
- * @stream for the requested mcbsp given by @id
- */
-static int omap_mcbsp_dma_reg_params(struct omap_mcbsp *mcbsp,
- unsigned int stream)
-{
- int data_reg;
-
- if (mcbsp->pdata->reg_size == 2) {
- if (stream)
- data_reg = OMAP_MCBSP_REG_DRR1;
- else
- data_reg = OMAP_MCBSP_REG_DXR1;
- } else {
- if (stream)
- data_reg = OMAP_MCBSP_REG_DRR;
- else
- data_reg = OMAP_MCBSP_REG_DXR;
- }
-
- return mcbsp->phys_dma_base + data_reg * mcbsp->pdata->reg_step;
-}
-
-static void omap_st_on(struct omap_mcbsp *mcbsp)
-{
- unsigned int w;
-
- if (mcbsp->pdata->force_ick_on)
- mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, true);
-
- /* Disable Sidetone clock auto-gating for normal operation */
- w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
- MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
-
- /* Enable McBSP Sidetone */
- w = MCBSP_READ(mcbsp, SSELCR);
- MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
-
- /* Enable Sidetone from Sidetone Core */
- w = MCBSP_ST_READ(mcbsp, SSELCR);
- MCBSP_ST_WRITE(mcbsp, SSELCR, w | ST_SIDETONEEN);
-}
-
-static void omap_st_off(struct omap_mcbsp *mcbsp)
-{
- unsigned int w;
-
- w = MCBSP_ST_READ(mcbsp, SSELCR);
- MCBSP_ST_WRITE(mcbsp, SSELCR, w & ~(ST_SIDETONEEN));
-
- w = MCBSP_READ(mcbsp, SSELCR);
- MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
-
- /* Enable Sidetone clock auto-gating to reduce power consumption */
- w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
- MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
-
- if (mcbsp->pdata->force_ick_on)
- mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, false);
-}
-
-static void omap_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir)
-{
- u16 val, i;
-
- val = MCBSP_ST_READ(mcbsp, SSELCR);
-
- if (val & ST_COEFFWREN)
- MCBSP_ST_WRITE(mcbsp, SSELCR, val & ~(ST_COEFFWREN));
-
- MCBSP_ST_WRITE(mcbsp, SSELCR, val | ST_COEFFWREN);
-
- for (i = 0; i < 128; i++)
- MCBSP_ST_WRITE(mcbsp, SFIRCR, fir[i]);
-
- i = 0;
-
- val = MCBSP_ST_READ(mcbsp, SSELCR);
- while (!(val & ST_COEFFWRDONE) && (++i < 1000))
- val = MCBSP_ST_READ(mcbsp, SSELCR);
-
- MCBSP_ST_WRITE(mcbsp, SSELCR, val & ~(ST_COEFFWREN));
-
- if (i == 1000)
- dev_err(mcbsp->dev, "McBSP FIR load error!\n");
-}
-
-static void omap_st_chgain(struct omap_mcbsp *mcbsp)
-{
- u16 w;
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
-
- w = MCBSP_ST_READ(mcbsp, SSELCR);
-
- MCBSP_ST_WRITE(mcbsp, SGAINCR, ST_CH0GAIN(st_data->ch0gain) | \
- ST_CH1GAIN(st_data->ch1gain));
-}
-
-int omap_st_set_chgain(struct omap_mcbsp *mcbsp, int channel, s16 chgain)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
- int ret = 0;
-
- if (!st_data)
- return -ENOENT;
-
- spin_lock_irq(&mcbsp->lock);
- if (channel == 0)
- st_data->ch0gain = chgain;
- else if (channel == 1)
- st_data->ch1gain = chgain;
- else
- ret = -EINVAL;
-
- if (st_data->enabled)
- omap_st_chgain(mcbsp);
- spin_unlock_irq(&mcbsp->lock);
-
- return ret;
-}
-
-int omap_st_get_chgain(struct omap_mcbsp *mcbsp, int channel, s16 *chgain)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
- int ret = 0;
-
- if (!st_data)
- return -ENOENT;
-
- spin_lock_irq(&mcbsp->lock);
- if (channel == 0)
- *chgain = st_data->ch0gain;
- else if (channel == 1)
- *chgain = st_data->ch1gain;
- else
- ret = -EINVAL;
- spin_unlock_irq(&mcbsp->lock);
-
- return ret;
-}
-
-static int omap_st_start(struct omap_mcbsp *mcbsp)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
-
- if (st_data->enabled && !st_data->running) {
- omap_st_fir_write(mcbsp, st_data->taps);
- omap_st_chgain(mcbsp);
-
- if (!mcbsp->free) {
- omap_st_on(mcbsp);
- st_data->running = 1;
- }
- }
-
- return 0;
-}
-
-int omap_st_enable(struct omap_mcbsp *mcbsp)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
-
- if (!st_data)
- return -ENODEV;
-
- spin_lock_irq(&mcbsp->lock);
- st_data->enabled = 1;
- omap_st_start(mcbsp);
- spin_unlock_irq(&mcbsp->lock);
-
- return 0;
-}
-
-static int omap_st_stop(struct omap_mcbsp *mcbsp)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
-
- if (st_data->running) {
- if (!mcbsp->free) {
- omap_st_off(mcbsp);
- st_data->running = 0;
- }
- }
-
- return 0;
-}
-
-int omap_st_disable(struct omap_mcbsp *mcbsp)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
- int ret = 0;
-
- if (!st_data)
- return -ENODEV;
-
- spin_lock_irq(&mcbsp->lock);
- omap_st_stop(mcbsp);
- st_data->enabled = 0;
- spin_unlock_irq(&mcbsp->lock);
-
- return ret;
-}
-
-int omap_st_is_enabled(struct omap_mcbsp *mcbsp)
-{
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
-
- if (!st_data)
- return -ENODEV;
-
- return st_data->enabled;
-}
-
-/*
- * omap_mcbsp_set_rx_threshold configures the transmit threshold in words.
- * The threshold parameter is 1 based, and it is converted (threshold - 1)
- * for the THRSH2 register.
- */
-void omap_mcbsp_set_tx_threshold(struct omap_mcbsp *mcbsp, u16 threshold)
-{
- if (mcbsp->pdata->buffer_size == 0)
- return;
-
- if (threshold && threshold <= mcbsp->max_tx_thres)
- MCBSP_WRITE(mcbsp, THRSH2, threshold - 1);
-}
-
-/*
- * omap_mcbsp_set_rx_threshold configures the receive threshold in words.
- * The threshold parameter is 1 based, and it is converted (threshold - 1)
- * for the THRSH1 register.
- */
-void omap_mcbsp_set_rx_threshold(struct omap_mcbsp *mcbsp, u16 threshold)
-{
- if (mcbsp->pdata->buffer_size == 0)
- return;
-
- if (threshold && threshold <= mcbsp->max_rx_thres)
- MCBSP_WRITE(mcbsp, THRSH1, threshold - 1);
-}
-
-/*
- * omap_mcbsp_get_tx_delay returns the number of used slots in the McBSP FIFO
- */
-u16 omap_mcbsp_get_tx_delay(struct omap_mcbsp *mcbsp)
-{
- u16 buffstat;
-
- if (mcbsp->pdata->buffer_size == 0)
- return 0;
-
- /* Returns the number of free locations in the buffer */
- buffstat = MCBSP_READ(mcbsp, XBUFFSTAT);
-
- /* Number of slots are different in McBSP ports */
- return mcbsp->pdata->buffer_size - buffstat;
-}
-
-/*
- * omap_mcbsp_get_rx_delay returns the number of free slots in the McBSP FIFO
- * to reach the threshold value (when the DMA will be triggered to read it)
- */
-u16 omap_mcbsp_get_rx_delay(struct omap_mcbsp *mcbsp)
-{
- u16 buffstat, threshold;
-
- if (mcbsp->pdata->buffer_size == 0)
- return 0;
-
- /* Returns the number of used locations in the buffer */
- buffstat = MCBSP_READ(mcbsp, RBUFFSTAT);
- /* RX threshold */
- threshold = MCBSP_READ(mcbsp, THRSH1);
-
- /* Return the number of location till we reach the threshold limit */
- if (threshold <= buffstat)
- return 0;
- else
- return threshold - buffstat;
-}
-
-int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
-{
- void *reg_cache;
- int err;
-
- reg_cache = kzalloc(mcbsp->reg_cache_size, GFP_KERNEL);
- if (!reg_cache) {
- return -ENOMEM;
- }
-
- spin_lock(&mcbsp->lock);
- if (!mcbsp->free) {
- dev_err(mcbsp->dev, "McBSP%d is currently in use\n",
- mcbsp->id);
- err = -EBUSY;
- goto err_kfree;
- }
-
- mcbsp->free = false;
- mcbsp->reg_cache = reg_cache;
- spin_unlock(&mcbsp->lock);
-
- if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->request)
- mcbsp->pdata->ops->request(mcbsp->id - 1);
-
- /*
- * Make sure that transmitter, receiver and sample-rate generator are
- * not running before activating IRQs.
- */
- MCBSP_WRITE(mcbsp, SPCR1, 0);
- MCBSP_WRITE(mcbsp, SPCR2, 0);
-
- if (mcbsp->irq) {
- err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
- "McBSP", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request IRQ\n");
- goto err_clk_disable;
- }
- } else {
- err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
- "McBSP TX", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
- goto err_clk_disable;
- }
-
- err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
- "McBSP RX", (void *)mcbsp);
- if (err != 0) {
- dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
- goto err_free_irq;
- }
- }
-
- return 0;
-err_free_irq:
- free_irq(mcbsp->tx_irq, (void *)mcbsp);
-err_clk_disable:
- if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free)
- mcbsp->pdata->ops->free(mcbsp->id - 1);
-
- /* Disable wakeup behavior */
- if (mcbsp->pdata->has_wakeup)
- MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
-
- spin_lock(&mcbsp->lock);
- mcbsp->free = true;
- mcbsp->reg_cache = NULL;
-err_kfree:
- spin_unlock(&mcbsp->lock);
- kfree(reg_cache);
-
- return err;
-}
-
-void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
-{
- void *reg_cache;
-
- if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free)
- mcbsp->pdata->ops->free(mcbsp->id - 1);
-
- /* Disable wakeup behavior */
- if (mcbsp->pdata->has_wakeup)
- MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
-
- /* Disable interrupt requests */
- if (mcbsp->irq)
- MCBSP_WRITE(mcbsp, IRQEN, 0);
-
- if (mcbsp->irq) {
- free_irq(mcbsp->irq, (void *)mcbsp);
- } else {
- free_irq(mcbsp->rx_irq, (void *)mcbsp);
- free_irq(mcbsp->tx_irq, (void *)mcbsp);
- }
-
- reg_cache = mcbsp->reg_cache;
-
- /*
- * Select CLKS source from internal source unconditionally before
- * marking the McBSP port as free.
- * If the external clock source via MCBSP_CLKS pin has been selected the
- * system will refuse to enter idle if the CLKS pin source is not reset
- * back to internal source.
- */
- if (!mcbsp_omap1())
- omap2_mcbsp_set_clks_src(mcbsp, MCBSP_CLKS_PRCM_SRC);
-
- spin_lock(&mcbsp->lock);
- if (mcbsp->free)
- dev_err(mcbsp->dev, "McBSP%d was not reserved\n", mcbsp->id);
- else
- mcbsp->free = true;
- mcbsp->reg_cache = NULL;
- spin_unlock(&mcbsp->lock);
-
- kfree(reg_cache);
-}
-
-/*
- * Here we start the McBSP, by enabling transmitter, receiver or both.
- * If no transmitter or receiver is active prior calling, then sample-rate
- * generator and frame sync are started.
- */
-void omap_mcbsp_start(struct omap_mcbsp *mcbsp, int tx, int rx)
-{
- int enable_srg = 0;
- u16 w;
-
- if (mcbsp->st_data)
- omap_st_start(mcbsp);
-
- /* Only enable SRG, if McBSP is master */
- w = MCBSP_READ_CACHE(mcbsp, PCR0);
- if (w & (FSXM | FSRM | CLKXM | CLKRM))
- enable_srg = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
- MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
-
- if (enable_srg) {
- /* Start the sample generator */
- w = MCBSP_READ_CACHE(mcbsp, SPCR2);
- MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 6));
- }
-
- /* Enable transmitter and receiver */
- tx &= 1;
- w = MCBSP_READ_CACHE(mcbsp, SPCR2);
- MCBSP_WRITE(mcbsp, SPCR2, w | tx);
-
- rx &= 1;
- w = MCBSP_READ_CACHE(mcbsp, SPCR1);
- MCBSP_WRITE(mcbsp, SPCR1, w | rx);
-
- /*
- * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec
- * REVISIT: 100us may give enough time for two CLKSRG, however
- * due to some unknown PM related, clock gating etc. reason it
- * is now at 500us.
- */
- udelay(500);
-
- if (enable_srg) {
- /* Start frame sync */
- w = MCBSP_READ_CACHE(mcbsp, SPCR2);
- MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 7));
- }
-
- if (mcbsp->pdata->has_ccr) {
- /* Release the transmitter and receiver */
- w = MCBSP_READ_CACHE(mcbsp, XCCR);
- w &= ~(tx ? XDISABLE : 0);
- MCBSP_WRITE(mcbsp, XCCR, w);
- w = MCBSP_READ_CACHE(mcbsp, RCCR);
- w &= ~(rx ? RDISABLE : 0);
- MCBSP_WRITE(mcbsp, RCCR, w);
- }
-
- /* Dump McBSP Regs */
- omap_mcbsp_dump_reg(mcbsp);
-}
-
-void omap_mcbsp_stop(struct omap_mcbsp *mcbsp, int tx, int rx)
-{
- int idle;
- u16 w;
-
- /* Reset transmitter */
- tx &= 1;
- if (mcbsp->pdata->has_ccr) {
- w = MCBSP_READ_CACHE(mcbsp, XCCR);
- w |= (tx ? XDISABLE : 0);
- MCBSP_WRITE(mcbsp, XCCR, w);
- }
- w = MCBSP_READ_CACHE(mcbsp, SPCR2);
- MCBSP_WRITE(mcbsp, SPCR2, w & ~tx);
-
- /* Reset receiver */
- rx &= 1;
- if (mcbsp->pdata->has_ccr) {
- w = MCBSP_READ_CACHE(mcbsp, RCCR);
- w |= (rx ? RDISABLE : 0);
- MCBSP_WRITE(mcbsp, RCCR, w);
- }
- w = MCBSP_READ_CACHE(mcbsp, SPCR1);
- MCBSP_WRITE(mcbsp, SPCR1, w & ~rx);
-
- idle = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
- MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
-
- if (idle) {
- /* Reset the sample rate generator */
- w = MCBSP_READ_CACHE(mcbsp, SPCR2);
- MCBSP_WRITE(mcbsp, SPCR2, w & ~(1 << 6));
- }
-
- if (mcbsp->st_data)
- omap_st_stop(mcbsp);
-}
-
-int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
-{
- struct clk *fck_src;
- const char *src;
- int r;
-
- if (fck_src_id == MCBSP_CLKS_PAD_SRC)
- src = "pad_fck";
- else if (fck_src_id == MCBSP_CLKS_PRCM_SRC)
- src = "prcm_fck";
- else
- return -EINVAL;
-
- fck_src = clk_get(mcbsp->dev, src);
- if (IS_ERR(fck_src)) {
- dev_err(mcbsp->dev, "CLKS: could not clk_get() %s\n", src);
- return -EINVAL;
- }
-
- pm_runtime_put_sync(mcbsp->dev);
-
- r = clk_set_parent(mcbsp->fclk, fck_src);
- if (r) {
- dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
- src);
- clk_put(fck_src);
- return r;
- }
-
- pm_runtime_get_sync(mcbsp->dev);
-
- clk_put(fck_src);
-
- return 0;
-
-}
-
-#define max_thres(m) (mcbsp->pdata->buffer_size)
-#define valid_threshold(m, val) ((val) <= max_thres(m))
-#define THRESHOLD_PROP_BUILDER(prop) \
-static ssize_t prop##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
- \
- return sprintf(buf, "%u\n", mcbsp->prop); \
-} \
- \
-static ssize_t prop##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t size) \
-{ \
- struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
- unsigned long val; \
- int status; \
- \
- status = kstrtoul(buf, 0, &val); \
- if (status) \
- return status; \
- \
- if (!valid_threshold(mcbsp, val)) \
- return -EDOM; \
- \
- mcbsp->prop = val; \
- return size; \
-} \
- \
-static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store);
-
-THRESHOLD_PROP_BUILDER(max_tx_thres);
-THRESHOLD_PROP_BUILDER(max_rx_thres);
-
-static const char *dma_op_modes[] = {
- "element", "threshold",
-};
-
-static ssize_t dma_op_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
- int dma_op_mode, i = 0;
- ssize_t len = 0;
- const char * const *s;
-
- dma_op_mode = mcbsp->dma_op_mode;
-
- for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) {
- if (dma_op_mode == i)
- len += sprintf(buf + len, "[%s] ", *s);
- else
- len += sprintf(buf + len, "%s ", *s);
- }
- len += sprintf(buf + len, "\n");
-
- return len;
-}
-
-static ssize_t dma_op_mode_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
- int i;
-
- i = sysfs_match_string(dma_op_modes, buf);
- if (i < 0)
- return i;
-
- spin_lock_irq(&mcbsp->lock);
- if (!mcbsp->free) {
- size = -EBUSY;
- goto unlock;
- }
- mcbsp->dma_op_mode = i;
-
-unlock:
- spin_unlock_irq(&mcbsp->lock);
-
- return size;
-}
-
-static DEVICE_ATTR_RW(dma_op_mode);
-
-static const struct attribute *additional_attrs[] = {
- &dev_attr_max_tx_thres.attr,
- &dev_attr_max_rx_thres.attr,
- &dev_attr_dma_op_mode.attr,
- NULL,
-};
-
-static const struct attribute_group additional_attr_group = {
- .attrs = (struct attribute **)additional_attrs,
-};
-
-static ssize_t st_taps_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
- ssize_t status = 0;
- int i;
-
- spin_lock_irq(&mcbsp->lock);
- for (i = 0; i < st_data->nr_taps; i++)
- status += sprintf(&buf[status], (i ? ", %d" : "%d"),
- st_data->taps[i]);
- if (i)
- status += sprintf(&buf[status], "\n");
- spin_unlock_irq(&mcbsp->lock);
-
- return status;
-}
-
-static ssize_t st_taps_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
- struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
- int val, tmp, status, i = 0;
-
- spin_lock_irq(&mcbsp->lock);
- memset(st_data->taps, 0, sizeof(st_data->taps));
- st_data->nr_taps = 0;
-
- do {
- status = sscanf(buf, "%d%n", &val, &tmp);
- if (status < 0 || status == 0) {
- size = -EINVAL;
- goto out;
- }
- if (val < -32768 || val > 32767) {
- size = -EINVAL;
- goto out;
- }
- st_data->taps[i++] = val;
- buf += tmp;
- if (*buf != ',')
- break;
- buf++;
- } while (1);
-
- st_data->nr_taps = i;
-
-out:
- spin_unlock_irq(&mcbsp->lock);
-
- return size;
-}
-
-static DEVICE_ATTR_RW(st_taps);
-
-static const struct attribute *sidetone_attrs[] = {
- &dev_attr_st_taps.attr,
- NULL,
-};
-
-static const struct attribute_group sidetone_attr_group = {
- .attrs = (struct attribute **)sidetone_attrs,
-};
-
-static int omap_st_add(struct omap_mcbsp *mcbsp, struct resource *res)
-{
- struct omap_mcbsp_st_data *st_data;
- int err;
-
- st_data = devm_kzalloc(mcbsp->dev, sizeof(*mcbsp->st_data), GFP_KERNEL);
- if (!st_data)
- return -ENOMEM;
-
- st_data->mcbsp_iclk = clk_get(mcbsp->dev, "ick");
- if (IS_ERR(st_data->mcbsp_iclk)) {
- dev_warn(mcbsp->dev,
- "Failed to get ick, sidetone might be broken\n");
- st_data->mcbsp_iclk = NULL;
- }
-
- st_data->io_base_st = devm_ioremap(mcbsp->dev, res->start,
- resource_size(res));
- if (!st_data->io_base_st)
- return -ENOMEM;
-
- err = sysfs_create_group(&mcbsp->dev->kobj, &sidetone_attr_group);
- if (err)
- return err;
-
- mcbsp->st_data = st_data;
- return 0;
-}
-
-/*
- * McBSP1 and McBSP3 are directly mapped on 1610 and 1510.
- * 730 has only 2 McBSP, and both of them are MPU peripherals.
- */
-int omap_mcbsp_init(struct platform_device *pdev)
-{
- struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
- struct resource *res;
- int ret = 0;
-
- spin_lock_init(&mcbsp->lock);
- mcbsp->free = true;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
- if (!res)
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mcbsp->io_base))
- return PTR_ERR(mcbsp->io_base);
-
- mcbsp->phys_base = res->start;
- mcbsp->reg_cache_size = resource_size(res);
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
- if (!res)
- mcbsp->phys_dma_base = mcbsp->phys_base;
- else
- mcbsp->phys_dma_base = res->start;
-
- /*
- * OMAP1, 2 uses two interrupt lines: TX, RX
- * OMAP2430, OMAP3 SoC have combined IRQ line as well.
- * OMAP4 and newer SoC only have the combined IRQ line.
- * Use the combined IRQ if available since it gives better debugging
- * possibilities.
- */
- mcbsp->irq = platform_get_irq_byname(pdev, "common");
- if (mcbsp->irq == -ENXIO) {
- mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
-
- if (mcbsp->tx_irq == -ENXIO) {
- mcbsp->irq = platform_get_irq(pdev, 0);
- mcbsp->tx_irq = 0;
- } else {
- mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
- mcbsp->irq = 0;
- }
- }
-
- if (!pdev->dev.of_node) {
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
- if (!res) {
- dev_err(&pdev->dev, "invalid tx DMA channel\n");
- return -ENODEV;
- }
- mcbsp->dma_req[0] = res->start;
- mcbsp->dma_data[0].filter_data = &mcbsp->dma_req[0];
-
- res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
- if (!res) {
- dev_err(&pdev->dev, "invalid rx DMA channel\n");
- return -ENODEV;
- }
- mcbsp->dma_req[1] = res->start;
- mcbsp->dma_data[1].filter_data = &mcbsp->dma_req[1];
- } else {
- mcbsp->dma_data[0].filter_data = "tx";
- mcbsp->dma_data[1].filter_data = "rx";
- }
-
- mcbsp->dma_data[0].addr = omap_mcbsp_dma_reg_params(mcbsp, 0);
- mcbsp->dma_data[0].maxburst = 4;
-
- mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp, 1);
- mcbsp->dma_data[1].maxburst = 4;
-
- mcbsp->fclk = clk_get(&pdev->dev, "fck");
- if (IS_ERR(mcbsp->fclk)) {
- ret = PTR_ERR(mcbsp->fclk);
- dev_err(mcbsp->dev, "unable to get fck: %d\n", ret);
- return ret;
- }
-
- mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
- if (mcbsp->pdata->buffer_size) {
- /*
- * Initially configure the maximum thresholds to a safe value.
- * The McBSP FIFO usage with these values should not go under
- * 16 locations.
- * If the whole FIFO without safety buffer is used, than there
- * is a possibility that the DMA will be not able to push the
- * new data on time, causing channel shifts in runtime.
- */
- mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10;
- mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10;
-
- ret = sysfs_create_group(&mcbsp->dev->kobj,
- &additional_attr_group);
- if (ret) {
- dev_err(mcbsp->dev,
- "Unable to create additional controls\n");
- goto err_thres;
- }
- } else {
- mcbsp->max_tx_thres = -EINVAL;
- mcbsp->max_rx_thres = -EINVAL;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone");
- if (res) {
- ret = omap_st_add(mcbsp, res);
- if (ret) {
- dev_err(mcbsp->dev,
- "Unable to create sidetone controls\n");
- goto err_st;
- }
- }
-
- return 0;
-
-err_st:
- if (mcbsp->pdata->buffer_size)
- sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
-err_thres:
- clk_put(mcbsp->fclk);
- return ret;
-}
-
-void omap_mcbsp_cleanup(struct omap_mcbsp *mcbsp)
-{
- if (mcbsp->pdata->buffer_size)
- sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
-
- if (mcbsp->st_data) {
- sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group);
- clk_put(mcbsp->st_data->mcbsp_iclk);
- }
-}
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 943b44de1464..67159a6b90a8 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -79,7 +79,7 @@ config SND_PXA2XX_SOC_TOSA
tristate "SoC AC97 Audio support for Tosa"
depends on SND_PXA2XX_SOC && MACH_TOSA
depends on MFD_TC6393XB
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -89,7 +89,7 @@ config SND_PXA2XX_SOC_TOSA
config SND_PXA2XX_SOC_E740
tristate "SoC AC97 Audio support for e740"
depends on SND_PXA2XX_SOC && MACH_E740
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -99,7 +99,7 @@ config SND_PXA2XX_SOC_E740
config SND_PXA2XX_SOC_E750
tristate "SoC AC97 Audio support for e750"
depends on SND_PXA2XX_SOC && MACH_E750
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -109,7 +109,7 @@ config SND_PXA2XX_SOC_E750
config SND_PXA2XX_SOC_E800
tristate "SoC AC97 Audio support for e800"
depends on SND_PXA2XX_SOC && MACH_E800
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_SOC_WM9712
select SND_PXA2XX_SOC_AC97
help
@@ -120,7 +120,7 @@ config SND_PXA2XX_SOC_EM_X270
tristate "SoC Audio support for CompuLab EM-x270, eXeda and CM-X300"
depends on SND_PXA2XX_SOC && (MACH_EM_X270 || MACH_EXEDA || \
MACH_CM_X300)
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -131,7 +131,7 @@ config SND_PXA2XX_SOC_PALM27X
bool "SoC Audio support for Palm T|X, T5, E2 and LifeDrive"
depends on SND_PXA2XX_SOC && (MACH_PALMLD || MACH_PALMTX || \
MACH_PALMT5 || MACH_PALMTE2)
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -161,7 +161,7 @@ config SND_SOC_TTC_DKB
config SND_SOC_ZYLONITE
tristate "SoC Audio support for Marvell Zylonite"
depends on SND_PXA2XX_SOC && MACH_ZYLONITE
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_PXA2XX_SOC_AC97
select SND_PXA_SOC_SSP
select SND_SOC_WM9713
@@ -169,16 +169,6 @@ config SND_SOC_ZYLONITE
Say Y if you want to add support for SoC audio on the
Marvell Zylonite reference platform.
-config SND_SOC_RAUMFELD
- tristate "SoC Audio support Raumfeld audio adapter"
- depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
- depends on I2C && SPI_MASTER
- select SND_PXA_SOC_SSP
- select SND_SOC_CS4270
- select SND_SOC_AK4104
- help
- Say Y if you want to add support for SoC audio on Raumfeld devices
-
config SND_PXA2XX_SOC_HX4700
tristate "SoC Audio support for HP iPAQ hx4700"
depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
@@ -201,7 +191,7 @@ config SND_PXA2XX_SOC_MAGICIAN
config SND_PXA2XX_SOC_MIOA701
tristate "SoC Audio support for MIO A701"
depends on SND_PXA2XX_SOC && MACH_MIOA701
- depends on !AC97_BUS
+ depends on AC97_BUS=n
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9713
help
diff --git a/sound/soc/pxa/Makefile b/sound/soc/pxa/Makefile
index 5b265662f04f..0ab2a9dcb720 100644
--- a/sound/soc/pxa/Makefile
+++ b/sound/soc/pxa/Makefile
@@ -49,6 +49,5 @@ obj-$(CONFIG_SND_PXA2XX_SOC_MIOA701) += snd-soc-mioa701.o
obj-$(CONFIG_SND_PXA2XX_SOC_Z2) += snd-soc-z2.o
obj-$(CONFIG_SND_SOC_ZYLONITE) += snd-soc-zylonite.o
obj-$(CONFIG_SND_PXA2XX_SOC_IMOTE2) += snd-soc-imote2.o
-obj-$(CONFIG_SND_SOC_RAUMFELD) += snd-soc-raumfeld.o
obj-$(CONFIG_SND_MMP_SOC_BROWNSTONE) += snd-soc-brownstone.o
obj-$(CONFIG_SND_SOC_TTC_DKB) += snd-soc-ttc-dkb.o
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
deleted file mode 100644
index 111a907c4eb9..000000000000
--- a/sound/soc/pxa/raumfeld.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * raumfeld_audio.c -- SoC audio for Raumfeld audio devices
- *
- * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
- *
- * based on code from:
- *
- * Wolfson Microelectronics PLC.
- * Openedhand Ltd.
- * Liam Girdwood <lrg@slimlogic.co.uk>
- * Richard Purdie <richard@openedhand.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-
-#include <asm/mach-types.h>
-
-#include "pxa-ssp.h"
-
-#define GPIO_SPDIF_RESET (38)
-#define GPIO_MCLK_RESET (111)
-#define GPIO_CODEC_RESET (120)
-
-static struct i2c_client *max9486_client;
-static struct i2c_board_info max9486_hwmon_info = {
- I2C_BOARD_INFO("max9485", 0x63),
-};
-
-#define MAX9485_MCLK_FREQ_112896 0x22
-#define MAX9485_MCLK_FREQ_122880 0x23
-#define MAX9485_MCLK_FREQ_225792 0x32
-#define MAX9485_MCLK_FREQ_245760 0x33
-
-static void set_max9485_clk(char clk)
-{
- i2c_master_send(max9486_client, &clk, 1);
-}
-
-static void raumfeld_enable_audio(bool en)
-{
- if (en) {
- gpio_set_value(GPIO_MCLK_RESET, 1);
-
- /* wait some time to let the clocks become stable */
- msleep(100);
-
- gpio_set_value(GPIO_SPDIF_RESET, 1);
- gpio_set_value(GPIO_CODEC_RESET, 1);
- } else {
- gpio_set_value(GPIO_MCLK_RESET, 0);
- gpio_set_value(GPIO_SPDIF_RESET, 0);
- gpio_set_value(GPIO_CODEC_RESET, 0);
- }
-}
-
-/* CS4270 */
-static int raumfeld_cs4270_startup(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
- /* set freq to 0 to enable all possible codec sample rates */
- return snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
-}
-
-static void raumfeld_cs4270_shutdown(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
-
- /* set freq to 0 to enable all possible codec sample rates */
- snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0);
-}
-
-static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- unsigned int clk = 0;
- int ret = 0;
-
- switch (params_rate(params)) {
- case 44100:
- set_max9485_clk(MAX9485_MCLK_FREQ_112896);
- clk = 11289600;
- break;
- case 48000:
- set_max9485_clk(MAX9485_MCLK_FREQ_122880);
- clk = 12288000;
- break;
- case 88200:
- set_max9485_clk(MAX9485_MCLK_FREQ_225792);
- clk = 22579200;
- break;
- case 96000:
- set_max9485_clk(MAX9485_MCLK_FREQ_245760);
- clk = 24576000;
- break;
- default:
- return -EINVAL;
- }
-
- ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk, 0);
- if (ret < 0)
- return ret;
-
- /* setup the CPU DAI */
- ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_EXT, clk, 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static const struct snd_soc_ops raumfeld_cs4270_ops = {
- .startup = raumfeld_cs4270_startup,
- .shutdown = raumfeld_cs4270_shutdown,
- .hw_params = raumfeld_cs4270_hw_params,
-};
-
-static int raumfeld_analog_suspend(struct snd_soc_card *card)
-{
- raumfeld_enable_audio(false);
- return 0;
-}
-
-static int raumfeld_analog_resume(struct snd_soc_card *card)
-{
- raumfeld_enable_audio(true);
- return 0;
-}
-
-/* AK4104 */
-
-static int raumfeld_ak4104_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0, clk = 0;
-
- switch (params_rate(params)) {
- case 44100:
- set_max9485_clk(MAX9485_MCLK_FREQ_112896);
- clk = 11289600;
- break;
- case 48000:
- set_max9485_clk(MAX9485_MCLK_FREQ_122880);
- clk = 12288000;
- break;
- case 88200:
- set_max9485_clk(MAX9485_MCLK_FREQ_225792);
- clk = 22579200;
- break;
- case 96000:
- set_max9485_clk(MAX9485_MCLK_FREQ_245760);
- clk = 24576000;
- break;
- default:
- return -EINVAL;
- }
-
- /* setup the CPU DAI */
- ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_EXT, clk, 1);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static struct snd_soc_ops raumfeld_ak4104_ops = {
- .hw_params = raumfeld_ak4104_hw_params,
-};
-
-#define DAI_LINK_CS4270 \
-{ \
- .name = "CS4270", \
- .stream_name = "CS4270", \
- .cpu_dai_name = "pxa-ssp-dai.0", \
- .platform_name = "pxa-pcm-audio", \
- .codec_dai_name = "cs4270-hifi", \
- .codec_name = "cs4270.0-0048", \
- .dai_fmt = SND_SOC_DAIFMT_I2S | \
- SND_SOC_DAIFMT_NB_NF | \
- SND_SOC_DAIFMT_CBS_CFS, \
- .ops = &raumfeld_cs4270_ops, \
-}
-
-#define DAI_LINK_AK4104 \
-{ \
- .name = "ak4104", \
- .stream_name = "Playback", \
- .cpu_dai_name = "pxa-ssp-dai.1", \
- .codec_dai_name = "ak4104-hifi", \
- .platform_name = "pxa-pcm-audio", \
- .dai_fmt = SND_SOC_DAIFMT_I2S | \
- SND_SOC_DAIFMT_NB_NF | \
- SND_SOC_DAIFMT_CBS_CFS, \
- .ops = &raumfeld_ak4104_ops, \
- .codec_name = "spi0.0", \
-}
-
-static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] = {
- DAI_LINK_CS4270,
- DAI_LINK_AK4104,
-};
-
-static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] = {
- DAI_LINK_CS4270,
-};
-
-static struct snd_soc_card snd_soc_raumfeld_connector = {
- .name = "Raumfeld Connector",
- .owner = THIS_MODULE,
- .dai_link = snd_soc_raumfeld_connector_dai,
- .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
- .suspend_post = raumfeld_analog_suspend,
- .resume_pre = raumfeld_analog_resume,
-};
-
-static struct snd_soc_card snd_soc_raumfeld_speaker = {
- .name = "Raumfeld Speaker",
- .owner = THIS_MODULE,
- .dai_link = snd_soc_raumfeld_speaker_dai,
- .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
- .suspend_post = raumfeld_analog_suspend,
- .resume_pre = raumfeld_analog_resume,
-};
-
-static struct platform_device *raumfeld_audio_device;
-
-static int __init raumfeld_audio_init(void)
-{
- int ret;
-
- if (!machine_is_raumfeld_speaker() &&
- !machine_is_raumfeld_connector())
- return 0;
-
- max9486_client = i2c_new_device(i2c_get_adapter(0),
- &max9486_hwmon_info);
-
- if (!max9486_client)
- return -ENOMEM;
-
- set_max9485_clk(MAX9485_MCLK_FREQ_122880);
-
- /* Register analog device */
- raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
- if (!raumfeld_audio_device)
- return -ENOMEM;
-
- if (machine_is_raumfeld_speaker())
- platform_set_drvdata(raumfeld_audio_device,
- &snd_soc_raumfeld_speaker);
-
- if (machine_is_raumfeld_connector())
- platform_set_drvdata(raumfeld_audio_device,
- &snd_soc_raumfeld_connector);
-
- ret = platform_device_add(raumfeld_audio_device);
- if (ret < 0) {
- platform_device_put(raumfeld_audio_device);
- return ret;
- }
-
- raumfeld_enable_audio(true);
- return 0;
-}
-
-static void __exit raumfeld_audio_exit(void)
-{
- raumfeld_enable_audio(false);
-
- platform_device_unregister(raumfeld_audio_device);
-
- i2c_unregister_device(max9486_client);
-
- gpio_free(GPIO_MCLK_RESET);
- gpio_free(GPIO_CODEC_RESET);
- gpio_free(GPIO_SPDIF_RESET);
-}
-
-module_init(raumfeld_audio_init);
-module_exit(raumfeld_audio_exit);
-
-/* Module information */
-MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
-MODULE_DESCRIPTION("Raumfeld audio SoC");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 2a4c912d1e48..804ae0d93058 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -66,6 +66,7 @@ config SND_SOC_QDSP6_ASM
tristate
config SND_SOC_QDSP6_ASM_DAI
+ select SND_SOC_COMPRESS
tristate
config SND_SOC_QDSP6
@@ -100,6 +101,7 @@ config SND_SOC_SDM845
depends on QCOM_APR
select SND_SOC_QDSP6
select SND_SOC_QCOM_COMMON
+ select SND_SOC_RT5663
help
To add support for audio on Qualcomm Technologies Inc.
SDM845 SoC-based systems.
diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
index eb1b9da05dd4..4715527054e5 100644
--- a/sound/soc/qcom/common.c
+++ b/sound/soc/qcom/common.c
@@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
struct device_node *cpu = NULL;
struct device *dev = card->dev;
struct snd_soc_dai_link *link;
+ struct of_phandle_args args;
int ret, num_links;
ret = snd_soc_of_parse_card_name(card, "model");
@@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
goto err;
}
- link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
- if (!link->cpu_of_node) {
+ ret = of_parse_phandle_with_args(cpu, "sound-dai",
+ "#sound-dai-cells", 0, &args);
+ if (ret) {
dev_err(card->dev, "error getting cpu phandle\n");
- ret = -EINVAL;
goto err;
}
+ link->cpu_of_node = args.np;
+ link->id = args.args[0];
ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
if (ret) {
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index d07271ea4c45..028bce671cbc 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -91,7 +91,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream)
if (ret) {
dev_err(soc_runtime->dev,
"error writing to rdmactl reg: %d\n", ret);
- return ret;
+ return ret;
}
data->dma_ch = dma_ch;
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index 60ff4a2d3577..dc645ba4d8d0 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -341,6 +341,7 @@ static int q6afe_dai_prepare(struct snd_pcm_substream *substream,
switch (dai->id) {
case HDMI_RX:
+ case DISPLAY_PORT_RX:
q6afe_hdmi_port_prepare(dai_data->port[dai->id],
&dai_data->port_config[dai->id].hdmi);
break;
@@ -445,6 +446,7 @@ static int q6afe_mi2s_set_sysclk(struct snd_soc_dai *dai,
static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
{"HDMI Playback", NULL, "HDMI_RX"},
+ {"Display Port Playback", NULL, "DISPLAY_PORT_RX"},
{"Slimbus1 Playback", NULL, "SLIMBUS_1_RX"},
{"Slimbus2 Playback", NULL, "SLIMBUS_2_RX"},
{"Slimbus3 Playback", NULL, "SLIMBUS_3_RX"},
@@ -561,13 +563,13 @@ static const struct snd_soc_dapm_route q6afe_dapm_routes[] = {
{"QUAT_MI2S_TX", NULL, "Quaternary MI2S Capture"},
};
-static struct snd_soc_dai_ops q6hdmi_ops = {
+static const struct snd_soc_dai_ops q6hdmi_ops = {
.prepare = q6afe_dai_prepare,
.hw_params = q6hdmi_hw_params,
.shutdown = q6afe_dai_shutdown,
};
-static struct snd_soc_dai_ops q6i2s_ops = {
+static const struct snd_soc_dai_ops q6i2s_ops = {
.prepare = q6afe_dai_prepare,
.hw_params = q6i2s_hw_params,
.set_fmt = q6i2s_set_fmt,
@@ -575,14 +577,14 @@ static struct snd_soc_dai_ops q6i2s_ops = {
.set_sysclk = q6afe_mi2s_set_sysclk,
};
-static struct snd_soc_dai_ops q6slim_ops = {
+static const struct snd_soc_dai_ops q6slim_ops = {
.prepare = q6afe_dai_prepare,
.hw_params = q6slim_hw_params,
.shutdown = q6afe_dai_shutdown,
.set_channel_map = q6slim_set_channel_map,
};
-static struct snd_soc_dai_ops q6tdm_ops = {
+static const struct snd_soc_dai_ops q6tdm_ops = {
.prepare = q6afe_dai_prepare,
.shutdown = q6afe_dai_shutdown,
.set_sysclk = q6afe_mi2s_set_sysclk,
@@ -1090,6 +1092,25 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
Q6AFE_TDM_CAP_DAI("Quinary", 5, QUINARY_TDM_TX_5),
Q6AFE_TDM_CAP_DAI("Quinary", 6, QUINARY_TDM_TX_6),
Q6AFE_TDM_CAP_DAI("Quinary", 7, QUINARY_TDM_TX_7),
+ {
+ .playback = {
+ .stream_name = "Display Port Playback",
+ .rates = SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 2,
+ .channels_max = 8,
+ .rate_max = 192000,
+ .rate_min = 48000,
+ },
+ .ops = &q6hdmi_ops,
+ .id = DISPLAY_PORT_RX,
+ .name = "DISPLAY_PORT",
+ .probe = msm_dai_q6_dai_probe,
+ .remove = msm_dai_q6_dai_remove,
+ },
};
static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
@@ -1112,205 +1133,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
}
static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
- SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+ SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+ SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
"Secondary MI2S Playback SD1",
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+ SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+ SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
+ SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
+ SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+ SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+ SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+ SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+ SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+ SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
+ SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
0, 0, 0, 0),
- SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
+ SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0),
};
static const struct snd_soc_component_driver q6afe_dai_component = {
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
index 000775b4bba8..e0945f7a58c8 100644
--- a/sound/soc/qcom/qdsp6/q6afe.c
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -49,14 +49,14 @@
#define AFE_PORT_I2S_SD1 0x2
#define AFE_PORT_I2S_SD2 0x3
#define AFE_PORT_I2S_SD3 0x4
-#define AFE_PORT_I2S_SD0_MASK BIT(0x1)
-#define AFE_PORT_I2S_SD1_MASK BIT(0x2)
-#define AFE_PORT_I2S_SD2_MASK BIT(0x3)
-#define AFE_PORT_I2S_SD3_MASK BIT(0x4)
-#define AFE_PORT_I2S_SD0_1_MASK GENMASK(2, 1)
-#define AFE_PORT_I2S_SD2_3_MASK GENMASK(4, 3)
-#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(3, 1)
-#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(4, 1)
+#define AFE_PORT_I2S_SD0_MASK BIT(0x0)
+#define AFE_PORT_I2S_SD1_MASK BIT(0x1)
+#define AFE_PORT_I2S_SD2_MASK BIT(0x2)
+#define AFE_PORT_I2S_SD3_MASK BIT(0x3)
+#define AFE_PORT_I2S_SD0_1_MASK GENMASK(1, 0)
+#define AFE_PORT_I2S_SD2_3_MASK GENMASK(3, 2)
+#define AFE_PORT_I2S_SD0_1_2_MASK GENMASK(2, 0)
+#define AFE_PORT_I2S_SD0_1_2_3_MASK GENMASK(3, 0)
#define AFE_PORT_I2S_QUAD01 0x5
#define AFE_PORT_I2S_QUAD23 0x6
#define AFE_PORT_I2S_6CHS 0x7
@@ -71,6 +71,7 @@
/* Port IDs */
#define AFE_API_VERSION_HDMI_CONFIG 0x1
#define AFE_PORT_ID_MULTICHAN_HDMI_RX 0x100E
+#define AFE_PORT_ID_HDMI_OVER_DP_RX 0x6020
#define AFE_API_VERSION_SLIMBUS_CONFIG 0x1
/* Clock set API version */
@@ -704,6 +705,8 @@ static struct afe_port_map port_maps[AFE_PORT_MAX] = {
QUINARY_TDM_RX_7, 1, 1},
[QUINARY_TDM_TX_7] = { AFE_PORT_ID_QUINARY_TDM_TX_7,
QUINARY_TDM_TX_7, 0, 1},
+ [DISPLAY_PORT_RX] = { AFE_PORT_ID_HDMI_OVER_DP_RX,
+ DISPLAY_PORT_RX, 1, 1},
};
static void q6afe_port_free(struct kref *ref)
@@ -1384,6 +1387,7 @@ struct q6afe_port *q6afe_port_get_from_id(struct device *dev, int id)
switch (port_id) {
case AFE_PORT_ID_MULTICHAN_HDMI_RX:
+ case AFE_PORT_ID_HDMI_OVER_DP_RX:
cfg_type = AFE_PARAM_ID_HDMI_CONFIG;
break;
case AFE_PORT_ID_SLIMBUS_MULTI_CHAN_0_TX:
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index a16c71c03058..5b986b74dd36 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -10,6 +10,8 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
+#include <linux/spinlock.h>
+#include <sound/compress_driver.h>
#include <asm/dma.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
@@ -30,6 +32,15 @@
#define CAPTURE_MIN_PERIOD_SIZE 320
#define SID_MASK_DEFAULT 0xF
+/* Default values used if user space does not set */
+#define COMPR_PLAYBACK_MIN_FRAGMENT_SIZE (8 * 1024)
+#define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
+#define COMPR_PLAYBACK_MIN_NUM_FRAGMENTS (4)
+#define COMPR_PLAYBACK_MAX_NUM_FRAGMENTS (16 * 4)
+#define Q6ASM_DAI_TX_RX 0
+#define Q6ASM_DAI_TX 1
+#define Q6ASM_DAI_RX 2
+
enum stream_state {
Q6ASM_STREAM_IDLE = 0,
Q6ASM_STREAM_STOPPED,
@@ -38,11 +49,18 @@ enum stream_state {
struct q6asm_dai_rtd {
struct snd_pcm_substream *substream;
+ struct snd_compr_stream *cstream;
+ struct snd_compr_params codec_param;
+ struct snd_dma_buffer dma_buffer;
+ spinlock_t lock;
phys_addr_t phys;
unsigned int pcm_size;
unsigned int pcm_count;
unsigned int pcm_irq_pos; /* IRQ position */
unsigned int periods;
+ unsigned int bytes_sent;
+ unsigned int bytes_received;
+ unsigned int copied_total;
uint16_t bits_per_sample;
uint16_t source; /* Encoding source bit mask */
struct audio_client *audio_client;
@@ -122,7 +140,6 @@ static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
.rate_max = 48000, \
}, \
.name = "MultiMedia"#num, \
- .probe = fe_dai_probe, \
.id = MSM_FRONTEND_DAI_MULTIMEDIA##num, \
}
@@ -138,6 +155,21 @@ static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
.mask = 0,
};
+static const struct snd_compr_codec_caps q6asm_compr_caps = {
+ .num_descriptors = 1,
+ .descriptor[0].max_ch = 2,
+ .descriptor[0].sample_rates = { 8000, 11025, 12000, 16000, 22050,
+ 24000, 32000, 44100, 48000, 88200,
+ 96000, 176400, 192000 },
+ .descriptor[0].num_sample_rates = 13,
+ .descriptor[0].bit_rate[0] = 320,
+ .descriptor[0].bit_rate[1] = 128,
+ .descriptor[0].num_bitrates = 2,
+ .descriptor[0].profiles = 0,
+ .descriptor[0].modes = SND_AUDIOCHANMODE_MP3_STEREO,
+ .descriptor[0].formats = 0,
+};
+
static void event_handler(uint32_t opcode, uint32_t token,
uint32_t *payload, void *priv)
{
@@ -461,6 +493,306 @@ static struct snd_pcm_ops q6asm_dai_ops = {
.mmap = q6asm_dai_mmap,
};
+static void compress_event_handler(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv)
+{
+ struct q6asm_dai_rtd *prtd = priv;
+ struct snd_compr_stream *substream = prtd->cstream;
+ unsigned long flags;
+ uint64_t avail;
+
+ switch (opcode) {
+ case ASM_CLIENT_EVENT_CMD_RUN_DONE:
+ spin_lock_irqsave(&prtd->lock, flags);
+ if (!prtd->bytes_sent) {
+ q6asm_write_async(prtd->audio_client, prtd->pcm_count,
+ 0, 0, NO_TIMESTAMP);
+ prtd->bytes_sent += prtd->pcm_count;
+ }
+
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ break;
+
+ case ASM_CLIENT_EVENT_CMD_EOS_DONE:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ break;
+
+ case ASM_CLIENT_EVENT_DATA_WRITE_DONE:
+ spin_lock_irqsave(&prtd->lock, flags);
+
+ prtd->copied_total += prtd->pcm_count;
+ snd_compr_fragment_elapsed(substream);
+
+ if (prtd->state != Q6ASM_STREAM_RUNNING) {
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ break;
+ }
+
+ avail = prtd->bytes_received - prtd->bytes_sent;
+
+ if (avail >= prtd->pcm_count) {
+ q6asm_write_async(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ prtd->bytes_sent += prtd->pcm_count;
+ }
+
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int q6asm_dai_compr_open(struct snd_compr_stream *stream)
+{
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = c->dev;
+ struct q6asm_dai_rtd *prtd;
+ int stream_id, size, ret;
+
+ stream_id = cpu_dai->driver->id;
+ pdata = snd_soc_component_get_drvdata(c);
+ if (!pdata) {
+ dev_err(dev, "Drv data not found ..\n");
+ return -EINVAL;
+ }
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (!prtd)
+ return -ENOMEM;
+
+ prtd->cstream = stream;
+ prtd->audio_client = q6asm_audio_client_alloc(dev,
+ (q6asm_cb)compress_event_handler,
+ prtd, stream_id, LEGACY_PCM_MODE);
+ if (!prtd->audio_client) {
+ dev_err(dev, "Could not allocate memory\n");
+ kfree(prtd);
+ return -ENOMEM;
+ }
+
+ size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE *
+ COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size,
+ &prtd->dma_buffer);
+ if (ret) {
+ dev_err(dev, "Cannot allocate buffer(s)\n");
+ return ret;
+ }
+
+ if (pdata->sid < 0)
+ prtd->phys = prtd->dma_buffer.addr;
+ else
+ prtd->phys = prtd->dma_buffer.addr | (pdata->sid << 32);
+
+ snd_compr_set_runtime_buffer(stream, &prtd->dma_buffer);
+ spin_lock_init(&prtd->lock);
+ runtime->private_data = prtd;
+
+ return 0;
+}
+
+static int q6asm_dai_compr_free(struct snd_compr_stream *stream)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+
+ if (prtd->audio_client) {
+ if (prtd->state)
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+
+ snd_dma_free_pages(&prtd->dma_buffer);
+ q6asm_unmap_memory_regions(stream->direction,
+ prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ }
+ q6routing_stream_close(rtd->dai_link->id, stream->direction);
+ kfree(prtd);
+
+ return 0;
+}
+
+static int q6asm_dai_compr_set_params(struct snd_compr_stream *stream,
+ struct snd_compr_params *params)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ int dir = stream->direction;
+ struct q6asm_dai_data *pdata;
+ struct device *dev = c->dev;
+ int ret;
+
+ memcpy(&prtd->codec_param, params, sizeof(*params));
+
+ pdata = snd_soc_component_get_drvdata(c);
+ if (!pdata)
+ return -EINVAL;
+
+ if (!prtd || !prtd->audio_client) {
+ dev_err(dev, "private data null or audio client freed\n");
+ return -EINVAL;
+ }
+
+ prtd->periods = runtime->fragments;
+ prtd->pcm_count = runtime->fragment_size;
+ prtd->pcm_size = runtime->fragments * runtime->fragment_size;
+ prtd->bits_per_sample = 16;
+ if (dir == SND_COMPRESS_PLAYBACK) {
+ ret = q6asm_open_write(prtd->audio_client, params->codec.id,
+ prtd->bits_per_sample);
+
+ if (ret < 0) {
+ dev_err(dev, "q6asm_open_write failed\n");
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return ret;
+ }
+ }
+
+ prtd->session_id = q6asm_get_session_id(prtd->audio_client);
+ ret = q6routing_stream_open(rtd->dai_link->id, LEGACY_PCM_MODE,
+ prtd->session_id, dir);
+ if (ret) {
+ dev_err(dev, "Stream reg failed ret:%d\n", ret);
+ return ret;
+ }
+
+ ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys,
+ (prtd->pcm_size / prtd->periods),
+ prtd->periods);
+
+ if (ret < 0) {
+ dev_err(dev, "Buffer Mapping failed ret:%d\n", ret);
+ return -ENOMEM;
+ }
+
+ prtd->state = Q6ASM_STREAM_RUNNING;
+
+ return 0;
+}
+
+static int q6asm_dai_compr_trigger(struct snd_compr_stream *stream, int cmd)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ prtd->state = Q6ASM_STREAM_STOPPED;
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int q6asm_dai_compr_pointer(struct snd_compr_stream *stream,
+ struct snd_compr_tstamp *tstamp)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prtd->lock, flags);
+
+ tstamp->copied_total = prtd->copied_total;
+ tstamp->byte_offset = prtd->copied_total % prtd->pcm_size;
+
+ spin_unlock_irqrestore(&prtd->lock, flags);
+
+ return 0;
+}
+
+static int q6asm_dai_compr_ack(struct snd_compr_stream *stream,
+ size_t count)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&prtd->lock, flags);
+ prtd->bytes_received += count;
+ spin_unlock_irqrestore(&prtd->lock, flags);
+
+ return count;
+}
+
+static int q6asm_dai_compr_mmap(struct snd_compr_stream *stream,
+ struct vm_area_struct *vma)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6asm_dai_rtd *prtd = runtime->private_data;
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+ struct snd_soc_component *c = snd_soc_rtdcom_lookup(rtd, DRV_NAME);
+ struct device *dev = c->dev;
+
+ return dma_mmap_coherent(dev, vma,
+ prtd->dma_buffer.area, prtd->dma_buffer.addr,
+ prtd->dma_buffer.bytes);
+}
+
+static int q6asm_dai_compr_get_caps(struct snd_compr_stream *stream,
+ struct snd_compr_caps *caps)
+{
+ caps->direction = SND_COMPRESS_PLAYBACK;
+ caps->min_fragment_size = COMPR_PLAYBACK_MIN_FRAGMENT_SIZE;
+ caps->max_fragment_size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE;
+ caps->min_fragments = COMPR_PLAYBACK_MIN_NUM_FRAGMENTS;
+ caps->max_fragments = COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
+ caps->num_codecs = 1;
+ caps->codecs[0] = SND_AUDIOCODEC_MP3;
+
+ return 0;
+}
+
+static int q6asm_dai_compr_get_codec_caps(struct snd_compr_stream *stream,
+ struct snd_compr_codec_caps *codec)
+{
+ switch (codec->codec) {
+ case SND_AUDIOCODEC_MP3:
+ *codec = q6asm_compr_caps;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct snd_compr_ops q6asm_dai_compr_ops = {
+ .open = q6asm_dai_compr_open,
+ .free = q6asm_dai_compr_free,
+ .set_params = q6asm_dai_compr_set_params,
+ .pointer = q6asm_dai_compr_pointer,
+ .trigger = q6asm_dai_compr_trigger,
+ .get_caps = q6asm_dai_compr_get_caps,
+ .get_codec_caps = q6asm_dai_compr_get_codec_caps,
+ .mmap = q6asm_dai_compr_mmap,
+ .ack = q6asm_dai_compr_ack,
+};
+
static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
{
struct snd_pcm_substream *psubstream, *csubstream;
@@ -511,44 +843,12 @@ static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
}
}
-static const struct snd_soc_dapm_route afe_pcm_routes[] = {
- {"MM_DL1", NULL, "MultiMedia1 Playback" },
- {"MM_DL2", NULL, "MultiMedia2 Playback" },
- {"MM_DL3", NULL, "MultiMedia3 Playback" },
- {"MM_DL4", NULL, "MultiMedia4 Playback" },
- {"MM_DL5", NULL, "MultiMedia5 Playback" },
- {"MM_DL6", NULL, "MultiMedia6 Playback" },
- {"MM_DL7", NULL, "MultiMedia7 Playback" },
- {"MM_DL7", NULL, "MultiMedia8 Playback" },
- {"MultiMedia1 Capture", NULL, "MM_UL1"},
- {"MultiMedia2 Capture", NULL, "MM_UL2"},
- {"MultiMedia3 Capture", NULL, "MM_UL3"},
- {"MultiMedia4 Capture", NULL, "MM_UL4"},
- {"MultiMedia5 Capture", NULL, "MM_UL5"},
- {"MultiMedia6 Capture", NULL, "MM_UL6"},
- {"MultiMedia7 Capture", NULL, "MM_UL7"},
- {"MultiMedia8 Capture", NULL, "MM_UL8"},
-
-};
-
-static int fe_dai_probe(struct snd_soc_dai *dai)
-{
- struct snd_soc_dapm_context *dapm;
-
- dapm = snd_soc_component_get_dapm(dai->component);
- snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
- ARRAY_SIZE(afe_pcm_routes));
-
- return 0;
-}
-
-
static const struct snd_soc_component_driver q6asm_fe_dai_component = {
.name = DRV_NAME,
.ops = &q6asm_dai_ops,
.pcm_new = q6asm_dai_pcm_new,
.pcm_free = q6asm_dai_pcm_free,
-
+ .compr_ops = &q6asm_dai_compr_ops,
};
static struct snd_soc_dai_driver q6asm_fe_dais[] = {
@@ -562,6 +862,41 @@ static struct snd_soc_dai_driver q6asm_fe_dais[] = {
Q6ASM_FEDAI_DRIVER(8),
};
+static int of_q6asm_parse_dai_data(struct device *dev,
+ struct q6asm_dai_data *pdata)
+{
+ static struct snd_soc_dai_driver *dai_drv;
+ struct snd_soc_pcm_stream empty_stream;
+ struct device_node *node;
+ int ret, id, dir;
+
+ memset(&empty_stream, 0, sizeof(empty_stream));
+
+ for_each_child_of_node(dev->of_node, node) {
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret || id > MAX_SESSIONS || id < 0) {
+ dev_err(dev, "valid dai id not found:%d\n", ret);
+ continue;
+ }
+
+ dai_drv = &q6asm_fe_dais[id];
+
+ ret = of_property_read_u32(node, "direction", &dir);
+ if (ret)
+ continue;
+
+ if (dir == Q6ASM_DAI_RX)
+ dai_drv->capture = empty_stream;
+ else if (dir == Q6ASM_DAI_TX)
+ dai_drv->playback = empty_stream;
+
+ if (of_property_read_bool(node, "is-compress-dai"))
+ dai_drv->compress_new = snd_soc_new_compress;
+ }
+
+ return 0;
+}
+
static int q6asm_dai_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -582,6 +917,8 @@ static int q6asm_dai_probe(struct platform_device *pdev)
dev_set_drvdata(dev, pdata);
+ of_q6asm_parse_dai_data(dev, pdata);
+
return devm_snd_soc_register_component(dev, &q6asm_fe_dai_component,
q6asm_fe_dais,
ARRAY_SIZE(q6asm_fe_dais));
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index e1cfa846a1dc..4f85cb19a309 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -12,6 +12,7 @@
#include <linux/kref.h>
#include <linux/of.h>
#include <uapi/sound/asound.h>
+#include <uapi/sound/compress_params.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -36,6 +37,7 @@
#define ASM_PARAM_ID_ENCDEC_ENC_CFG_BLK_V2 0x00010DA3
#define ASM_SESSION_CMD_RUN_V2 0x00010DAA
#define ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2 0x00010DA5
+#define ASM_MEDIA_FMT_MP3 0x00010BE9
#define ASM_DATA_CMD_WRITE_V2 0x00010DAB
#define ASM_DATA_CMD_READ_V2 0x00010DAC
#define ASM_SESSION_CMD_SUSPEND 0x00010DEC
@@ -868,6 +870,9 @@ int q6asm_open_write(struct audio_client *ac, uint32_t format,
open->postprocopo_id = ASM_NULL_POPP_TOPOLOGY;
switch (format) {
+ case SND_AUDIOCODEC_MP3:
+ open->dec_fmt_id = ASM_MEDIA_FMT_MP3;
+ break;
case FORMAT_LINEAR_PCM:
open->dec_fmt_id = ASM_MEDIA_FMT_MULTI_CHANNEL_PCM_V2;
break;
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index c6b51571be94..ddcd9978cf57 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -453,6 +453,9 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
Q6ROUTING_RX_MIXERS(HDMI_RX) };
+static const struct snd_kcontrol_new display_port_mixer_controls[] = {
+ Q6ROUTING_RX_MIXERS(DISPLAY_PORT_RX) };
+
static const struct snd_kcontrol_new primary_mi2s_rx_mixer_controls[] = {
Q6ROUTING_RX_MIXERS(PRIMARY_MI2S_RX) };
@@ -655,6 +658,10 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
hdmi_mixer_controls,
ARRAY_SIZE(hdmi_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DISPLAY_PORT_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
+ display_port_mixer_controls,
+ ARRAY_SIZE(display_port_mixer_controls)),
+
SND_SOC_DAPM_MIXER("SLIMBUS_0_RX Audio Mixer", SND_SOC_NOPM, 0, 0,
slimbus_rx_mixer_controls,
ARRAY_SIZE(slimbus_rx_mixer_controls)),
@@ -833,6 +840,8 @@ static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
static const struct snd_soc_dapm_route intercon[] = {
Q6ROUTING_RX_DAPM_ROUTE("HDMI Mixer", "HDMI_RX"),
+ Q6ROUTING_RX_DAPM_ROUTE("DISPLAY_PORT_RX Audio Mixer",
+ "DISPLAY_PORT_RX"),
Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_0_RX Audio Mixer", "SLIMBUS_0_RX"),
Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_1_RX Audio Mixer", "SLIMBUS_1_RX"),
Q6ROUTING_RX_DAPM_ROUTE("SLIMBUS_2_RX Audio Mixer", "SLIMBUS_2_RX"),
@@ -909,6 +918,25 @@ static const struct snd_soc_dapm_route intercon[] = {
{"MM_UL6", NULL, "MultiMedia6 Mixer"},
{"MM_UL7", NULL, "MultiMedia7 Mixer"},
{"MM_UL8", NULL, "MultiMedia8 Mixer"},
+
+ {"MM_DL1", NULL, "MultiMedia1 Playback" },
+ {"MM_DL2", NULL, "MultiMedia2 Playback" },
+ {"MM_DL3", NULL, "MultiMedia3 Playback" },
+ {"MM_DL4", NULL, "MultiMedia4 Playback" },
+ {"MM_DL5", NULL, "MultiMedia5 Playback" },
+ {"MM_DL6", NULL, "MultiMedia6 Playback" },
+ {"MM_DL7", NULL, "MultiMedia7 Playback" },
+ {"MM_DL8", NULL, "MultiMedia8 Playback" },
+
+ {"MultiMedia1 Capture", NULL, "MM_UL1"},
+ {"MultiMedia2 Capture", NULL, "MM_UL2"},
+ {"MultiMedia3 Capture", NULL, "MM_UL3"},
+ {"MultiMedia4 Capture", NULL, "MM_UL4"},
+ {"MultiMedia5 Capture", NULL, "MM_UL5"},
+ {"MultiMedia6 Capture", NULL, "MM_UL6"},
+ {"MultiMedia7 Capture", NULL, "MM_UL7"},
+ {"MultiMedia8 Capture", NULL, "MM_UL8"},
+
};
static int routing_hw_params(struct snd_pcm_substream *substream,
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 9effbecc571f..1db8ef668223 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -6,18 +6,31 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
+#include <sound/jack.h>
+#include <sound/soc.h>
+#include <uapi/linux/input-event-codes.h>
#include "common.h"
#include "qdsp6/q6afe.h"
+#include "../codecs/rt5663.h"
#define DEFAULT_SAMPLE_RATE_48K 48000
#define DEFAULT_MCLK_RATE 24576000
-#define DEFAULT_BCLK_RATE 12288000
+#define TDM_BCLK_RATE 6144000
+#define MI2S_BCLK_RATE 1536000
+#define LEFT_SPK_TDM_TX_MASK 0x30
+#define RIGHT_SPK_TDM_TX_MASK 0xC0
+#define SPK_TDM_RX_MASK 0x03
+#define NUM_TDM_SLOTS 8
struct sdm845_snd_data {
+ struct snd_soc_jack jack;
+ bool jack_setup;
struct snd_soc_card *card;
uint32_t pri_mi2s_clk_count;
+ uint32_t sec_mi2s_clk_count;
uint32_t quat_tdm_clk_count;
};
@@ -28,12 +41,12 @@ static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int ret = 0;
+ int ret = 0, j;
int channels, slot_width;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- slot_width = 32;
+ slot_width = 16;
break;
default:
dev_err(rtd->dev, "%s: invalid param format 0x%x\n",
@@ -75,6 +88,35 @@ static int sdm845_tdm_snd_hw_params(struct snd_pcm_substream *substream,
goto end;
}
}
+
+ for (j = 0; j < rtd->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name_prefix, "Left")) {
+ ret = snd_soc_dai_set_tdm_slot(
+ codec_dai, LEFT_SPK_TDM_TX_MASK,
+ SPK_TDM_RX_MASK, NUM_TDM_SLOTS,
+ slot_width);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!strcmp(codec_dai->component->name_prefix, "Right")) {
+ ret = snd_soc_dai_set_tdm_slot(
+ codec_dai, RIGHT_SPK_TDM_TX_MASK,
+ SPK_TDM_RX_MASK, NUM_TDM_SLOTS,
+ slot_width);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "DEV1 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
end:
return ret;
}
@@ -84,9 +126,27 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret = 0;
switch (cpu_dai->id) {
+ case PRIMARY_MI2S_RX:
+ case PRIMARY_MI2S_TX:
+ /*
+ * Use ASRC for internal clocks, as PLL rate isn't multiple
+ * of BCLK.
+ */
+ rt5663_sel_asrc_clk_src(
+ codec_dai->component,
+ RT5663_DA_STEREO_FILTER | RT5663_AD_STEREO_FILTER,
+ RT5663_CLK_SEL_I2S1_ASRC);
+ ret = snd_soc_dai_set_sysclk(
+ codec_dai, RT5663_SCLK_S_MCLK, DEFAULT_MCLK_RATE,
+ SND_SOC_CLOCK_IN);
+ if (ret < 0)
+ dev_err(rtd->dev,
+ "snd_soc_dai_set_sysclk err = %d\n", ret);
+ break;
case QUATERNARY_TDM_RX_0:
case QUATERNARY_TDM_TX_0:
ret = sdm845_tdm_snd_hw_params(substream, params);
@@ -98,24 +158,87 @@ static int sdm845_snd_hw_params(struct snd_pcm_substream *substream,
return ret;
}
+static int sdm845_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_component *component;
+ struct snd_soc_dai_link *dai_link = rtd->dai_link;
+ struct snd_soc_card *card = rtd->card;
+ struct sdm845_snd_data *pdata = snd_soc_card_get_drvdata(card);
+ int i, rval;
+
+ if (!pdata->jack_setup) {
+ struct snd_jack *jack;
+
+ rval = snd_soc_card_jack_new(card, "Headset Jack",
+ SND_JACK_HEADSET |
+ SND_JACK_HEADPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3,
+ &pdata->jack, NULL, 0);
+
+ if (rval < 0) {
+ dev_err(card->dev, "Unable to add Headphone Jack\n");
+ return rval;
+ }
+
+ jack = pdata->jack.jack;
+
+ snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+ pdata->jack_setup = true;
+ }
+
+ for (i = 0 ; i < dai_link->num_codecs; i++) {
+ struct snd_soc_dai *dai = rtd->codec_dais[i];
+
+ component = dai->component;
+ rval = snd_soc_component_set_jack(
+ component, &pdata->jack, NULL);
+ if (rval != 0 && rval != -ENOTSUPP) {
+ dev_warn(card->dev, "Failed to set jack: %d\n", rval);
+ return rval;
+ }
+ }
+
+ return 0;
+}
+
+
static int sdm845_snd_startup(struct snd_pcm_substream *substream)
{
unsigned int fmt = SND_SOC_DAIFMT_CBS_CFS;
+ unsigned int codec_dai_fmt = SND_SOC_DAIFMT_CBS_CFS;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct sdm845_snd_data *data = snd_soc_card_get_drvdata(card);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int j;
+ int ret;
switch (cpu_dai->id) {
case PRIMARY_MI2S_RX:
case PRIMARY_MI2S_TX:
+ codec_dai_fmt |= SND_SOC_DAIFMT_NB_NF;
if (++(data->pri_mi2s_clk_count) == 1) {
snd_soc_dai_set_sysclk(cpu_dai,
Q6AFE_LPASS_CLK_ID_MCLK_1,
DEFAULT_MCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
snd_soc_dai_set_sysclk(cpu_dai,
Q6AFE_LPASS_CLK_ID_PRI_MI2S_IBIT,
- DEFAULT_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ MI2S_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+ snd_soc_dai_set_fmt(cpu_dai, fmt);
+ snd_soc_dai_set_fmt(codec_dai, codec_dai_fmt);
+ break;
+
+ case SECONDARY_MI2S_TX:
+ if (++(data->sec_mi2s_clk_count) == 1) {
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT,
+ MI2S_BCLK_RATE, SNDRV_PCM_STREAM_CAPTURE);
}
snd_soc_dai_set_fmt(cpu_dai, fmt);
break;
@@ -125,7 +248,35 @@ static int sdm845_snd_startup(struct snd_pcm_substream *substream)
if (++(data->quat_tdm_clk_count) == 1) {
snd_soc_dai_set_sysclk(cpu_dai,
Q6AFE_LPASS_CLK_ID_QUAD_TDM_IBIT,
- DEFAULT_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ TDM_BCLK_RATE, SNDRV_PCM_STREAM_PLAYBACK);
+ }
+
+ codec_dai_fmt |= SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_DSP_B;
+
+ for (j = 0; j < rtd->num_codecs; j++) {
+ codec_dai = rtd->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name_prefix,
+ "Left")) {
+ ret = snd_soc_dai_set_fmt(
+ codec_dai, codec_dai_fmt);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "Left TDM fmt err:%d\n", ret);
+ return ret;
+ }
+ }
+
+ if (!strcmp(codec_dai->component->name_prefix,
+ "Right")) {
+ ret = snd_soc_dai_set_fmt(
+ codec_dai, codec_dai_fmt);
+ if (ret < 0) {
+ dev_err(rtd->dev,
+ "Right TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
}
break;
@@ -156,6 +307,14 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
};
break;
+ case SECONDARY_MI2S_TX:
+ if (--(data->sec_mi2s_clk_count) == 0) {
+ snd_soc_dai_set_sysclk(cpu_dai,
+ Q6AFE_LPASS_CLK_ID_SEC_MI2S_IBIT,
+ 0, SNDRV_PCM_STREAM_CAPTURE);
+ }
+ break;
+
case QUATERNARY_TDM_RX_0:
case QUATERNARY_TDM_TX_0:
if (--(data->quat_tdm_clk_count) == 0) {
@@ -171,7 +330,7 @@ static void sdm845_snd_shutdown(struct snd_pcm_substream *substream)
}
}
-static struct snd_soc_ops sdm845_be_ops = {
+static const struct snd_soc_ops sdm845_be_ops = {
.hw_params = sdm845_snd_hw_params,
.startup = sdm845_snd_startup,
.shutdown = sdm845_snd_shutdown,
@@ -193,7 +352,15 @@ static int sdm845_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
return 0;
}
-static void sdm845_add_be_ops(struct snd_soc_card *card)
+static const struct snd_soc_dapm_widget sdm845_snd_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("Int Mic", NULL),
+};
+
+static void sdm845_add_ops(struct snd_soc_card *card)
{
struct snd_soc_dai_link *link;
int i;
@@ -203,6 +370,7 @@ static void sdm845_add_be_ops(struct snd_soc_card *card)
link->ops = &sdm845_be_ops;
link->be_hw_params_fixup = sdm845_be_hw_params_fixup;
}
+ link->init = sdm845_dai_init;
}
}
@@ -224,6 +392,8 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
goto data_alloc_fail;
}
+ card->dapm_widgets = sdm845_snd_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets);
card->dev = dev;
dev_set_drvdata(dev, card);
ret = qcom_snd_parse_of(card);
@@ -235,7 +405,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
data->card = card;
snd_soc_card_set_drvdata(card, data);
- sdm845_add_be_ops(card);
+ sdm845_add_ops(card);
ret = snd_soc_register_card(card);
if (ret) {
dev_err(dev, "Sound card registration failed\n");
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index 9e7b5fa4cf59..4ac78d7a4b2d 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -33,6 +33,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
.pcm_hardware = &snd_rockchip_hardware,
+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
.prealloc_buffer_size = 32 * 1024,
};
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 28327dd2c6cb..e821ccc70f47 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -249,28 +249,8 @@ int rsnd_adg_set_src_timesel_gen2(struct rsnd_mod *src_mod,
out = out << shift;
mask = 0x0f1f << shift;
- switch (id / 2) {
- case 0:
- rsnd_mod_bset(adg_mod, SRCIN_TIMSEL0, mask, in);
- rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL0, mask, out);
- break;
- case 1:
- rsnd_mod_bset(adg_mod, SRCIN_TIMSEL1, mask, in);
- rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL1, mask, out);
- break;
- case 2:
- rsnd_mod_bset(adg_mod, SRCIN_TIMSEL2, mask, in);
- rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL2, mask, out);
- break;
- case 3:
- rsnd_mod_bset(adg_mod, SRCIN_TIMSEL3, mask, in);
- rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL3, mask, out);
- break;
- case 4:
- rsnd_mod_bset(adg_mod, SRCIN_TIMSEL4, mask, in);
- rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL4, mask, out);
- break;
- }
+ rsnd_mod_bset(adg_mod, SRCIN_TIMSEL(id / 2), mask, in);
+ rsnd_mod_bset(adg_mod, SRCOUT_TIMSEL(id / 2), mask, out);
if (en)
rsnd_mod_bset(adg_mod, DIV_EN, en, en);
@@ -299,17 +279,7 @@ static void rsnd_adg_set_ssi_clk(struct rsnd_mod *ssi_mod, u32 val)
if (id == 8)
return;
- switch (id / 4) {
- case 0:
- rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL0, mask, val);
- break;
- case 1:
- rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL1, mask, val);
- break;
- case 2:
- rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL2, mask, val);
- break;
- }
+ rsnd_mod_bset(adg_mod, AUDIO_CLK_SEL(id / 4), mask, val);
dev_dbg(dev, "AUDIO_CLK_SEL is 0x%x\n", val);
}
@@ -613,7 +583,7 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
return -ENOMEM;
ret = rsnd_mod_init(priv, &adg->mod, &adg_ops,
- NULL, NULL, 0, 0);
+ NULL, 0, 0);
if (ret)
return ret;
diff --git a/sound/soc/sh/rcar/cmd.c b/sound/soc/sh/rcar/cmd.c
index cc191cd5fb82..e6bb6a9a0684 100644
--- a/sound/soc/sh/rcar/cmd.c
+++ b/sound/soc/sh/rcar/cmd.c
@@ -116,10 +116,11 @@ static int rsnd_cmd_stop(struct rsnd_mod *mod,
}
static struct rsnd_mod_ops rsnd_cmd_ops = {
- .name = CMD_NAME,
- .init = rsnd_cmd_init,
- .start = rsnd_cmd_start,
- .stop = rsnd_cmd_stop,
+ .name = CMD_NAME,
+ .init = rsnd_cmd_init,
+ .start = rsnd_cmd_start,
+ .stop = rsnd_cmd_stop,
+ .get_status = rsnd_mod_get_status,
};
static struct rsnd_mod *rsnd_cmd_mod_get(struct rsnd_priv *priv, int id)
@@ -162,7 +163,7 @@ int rsnd_cmd_probe(struct rsnd_priv *priv)
for_each_rsnd_cmd(cmd, priv, i) {
ret = rsnd_mod_init(priv, rsnd_mod_get(cmd),
&rsnd_cmd_ops, NULL,
- rsnd_mod_get_status, RSND_MOD_CMD, i);
+ RSND_MOD_CMD, i);
if (ret)
return ret;
}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f930f51b686f..59e250cc2e9d 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -123,8 +123,8 @@ void rsnd_mod_make_sure(struct rsnd_mod *mod, enum rsnd_mod_type type)
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
- dev_warn(dev, "%s[%d] is not your expected module\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
+ dev_warn(dev, "%s is not your expected module\n",
+ rsnd_mod_name(mod));
}
}
@@ -137,20 +137,69 @@ struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
return mod->ops->dma_req(io, mod);
}
-u32 *rsnd_mod_get_status(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
+#define MOD_NAME_NUM 5
+#define MOD_NAME_SIZE 16
+char *rsnd_mod_name(struct rsnd_mod *mod)
+{
+ static char names[MOD_NAME_NUM][MOD_NAME_SIZE];
+ static int num;
+ char *name = names[num];
+
+ num++;
+ if (num >= MOD_NAME_NUM)
+ num = 0;
+
+ /*
+ * Let's use same char to avoid pointlessness memory
+ * Thus, rsnd_mod_name() should be used immediately
+ * Don't keep pointer
+ */
+ if ((mod)->ops->id_sub) {
+ snprintf(name, MOD_NAME_SIZE, "%s[%d%d]",
+ mod->ops->name,
+ rsnd_mod_id(mod),
+ rsnd_mod_id_sub(mod));
+ } else {
+ snprintf(name, MOD_NAME_SIZE, "%s[%d]",
+ mod->ops->name,
+ rsnd_mod_id(mod));
+ }
+
+ return name;
+}
+
+u32 *rsnd_mod_get_status(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
enum rsnd_mod_type type)
{
return &mod->status;
}
+int rsnd_mod_id_raw(struct rsnd_mod *mod)
+{
+ return mod->id;
+}
+
+int rsnd_mod_id(struct rsnd_mod *mod)
+{
+ if ((mod)->ops->id)
+ return (mod)->ops->id(mod);
+
+ return rsnd_mod_id_raw(mod);
+}
+
+int rsnd_mod_id_sub(struct rsnd_mod *mod)
+{
+ if ((mod)->ops->id_sub)
+ return (mod)->ops->id_sub(mod);
+
+ return 0;
+}
+
int rsnd_mod_init(struct rsnd_priv *priv,
struct rsnd_mod *mod,
struct rsnd_mod_ops *ops,
struct clk *clk,
- u32* (*get_status)(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
- enum rsnd_mod_type type),
enum rsnd_mod_type type,
int id)
{
@@ -164,7 +213,6 @@ int rsnd_mod_init(struct rsnd_priv *priv,
mod->type = type;
mod->clk = clk;
mod->priv = priv;
- mod->get_status = get_status;
return ret;
}
@@ -228,7 +276,20 @@ int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
struct rsnd_mod *ctu_mod = rsnd_io_to_mod_ctu(io);
if (ctu_mod) {
- u32 converted_chan = rsnd_ctu_converted_channel(ctu_mod);
+ u32 converted_chan = rsnd_io_converted_chan(io);
+
+ /*
+ * !! Note !!
+ *
+ * converted_chan will be used for CTU,
+ * or TDM Split mode.
+ * User shouldn't use CTU with TDM Split mode.
+ */
+ if (rsnd_runtime_is_tdm_split(io)) {
+ struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
+
+ dev_err(dev, "CTU and TDM Split should be used\n");
+ }
if (converted_chan)
return converted_chan;
@@ -246,7 +307,7 @@ int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
rsnd_runtime_channel_original_with_params(io, params);
/* Use Multi SSI */
- if (rsnd_runtime_is_ssi_multi(io))
+ if (rsnd_runtime_is_multi_ssi(io))
chan /= rsnd_rdai_ssi_lane_get(rdai);
/* TDM Extend Mode needs 8ch */
@@ -256,7 +317,7 @@ int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
return chan;
}
-int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io)
+int rsnd_runtime_is_multi_ssi(struct rsnd_dai_stream *io)
{
struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
int lane = rsnd_rdai_ssi_lane_get(rdai);
@@ -267,11 +328,16 @@ int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io)
return (chan > 2) && (lane > 1);
}
-int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io)
+int rsnd_runtime_is_tdm(struct rsnd_dai_stream *io)
{
return rsnd_runtime_channel_for_ssi(io) >= 6;
}
+int rsnd_runtime_is_tdm_split(struct rsnd_dai_stream *io)
+{
+ return !!rsnd_flags_has(io, RSND_STREAM_TDM_SPLIT);
+}
+
/*
* ADINR function
*/
@@ -472,20 +538,19 @@ static int rsnd_status_update(u32 *status,
enum rsnd_mod_type *types = rsnd_mod_sequence[is_play]; \
for_each_rsnd_mod_arrays(i, mod, io, types, RSND_MOD_MAX) { \
int tmp = 0; \
- u32 *status = mod->get_status(io, mod, types[i]); \
+ u32 *status = mod->ops->get_status(mod, io, types[i]); \
int func_call = rsnd_status_update(status, \
__rsnd_mod_shift_##fn, \
__rsnd_mod_add_##fn, \
__rsnd_mod_call_##fn); \
- rsnd_dbg_dai_call(dev, "%s[%d]\t0x%08x %s\n", \
- rsnd_mod_name(mod), rsnd_mod_id(mod), *status, \
+ rsnd_dbg_dai_call(dev, "%s\t0x%08x %s\n", \
+ rsnd_mod_name(mod), *status, \
(func_call && (mod)->ops->fn) ? #fn : ""); \
if (func_call && (mod)->ops->fn) \
tmp = (mod)->ops->fn(mod, io, param); \
if (tmp && (tmp != -EPROBE_DEFER)) \
- dev_err(dev, "%s[%d] : %s error %d\n", \
- rsnd_mod_name(mod), rsnd_mod_id(mod), \
- #fn, tmp); \
+ dev_err(dev, "%s : %s error %d\n", \
+ rsnd_mod_name(mod), #fn, tmp); \
ret |= tmp; \
} \
ret; \
@@ -512,8 +577,8 @@ int rsnd_dai_connect(struct rsnd_mod *mod,
io->mod[type] = mod;
- dev_dbg(dev, "%s[%d] is connected to io (%s)\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
+ dev_dbg(dev, "%s is connected to io (%s)\n",
+ rsnd_mod_name(mod),
rsnd_io_is_play(io) ? "Playback" : "Capture");
return 0;
@@ -750,6 +815,7 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
switch (slots) {
case 2:
+ /* TDM Split Mode */
case 6:
case 8:
/* TDM Extend Mode */
@@ -965,6 +1031,82 @@ static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.prepare = rsnd_soc_dai_prepare,
};
+static void rsnd_parse_connect_simple(struct rsnd_priv *priv,
+ struct device_node *dai_np,
+ int dai_i, int is_play)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_dai *rdai = rsnd_rdai_get(priv, dai_i);
+ struct rsnd_dai_stream *io = is_play ?
+ &rdai->playback :
+ &rdai->capture;
+ struct device_node *ssiu_np = rsnd_ssiu_of_node(priv);
+ struct device_node *np;
+ int i, j;
+
+ if (!ssiu_np)
+ return;
+
+ if (!rsnd_io_to_mod_ssi(io))
+ return;
+
+ /*
+ * This driver assumes that it is TDM Split mode
+ * if it includes ssiu node
+ */
+ for (i = 0;; i++) {
+ struct device_node *node = is_play ?
+ of_parse_phandle(dai_np, "playback", i) :
+ of_parse_phandle(dai_np, "capture", i);
+
+ if (!node)
+ break;
+
+ j = 0;
+ for_each_child_of_node(ssiu_np, np) {
+ if (np == node) {
+ rsnd_flags_set(io, RSND_STREAM_TDM_SPLIT);
+ dev_dbg(dev, "%s is part of TDM Split\n", io->name);
+ }
+ j++;
+ }
+
+ }
+}
+
+static void rsnd_parse_connect_graph(struct rsnd_priv *priv,
+ struct rsnd_dai_stream *io,
+ struct device_node *endpoint)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct device_node *remote_port = of_graph_get_remote_port(endpoint);
+ struct device_node *remote_node = of_graph_get_remote_port_parent(endpoint);
+
+ if (!rsnd_io_to_mod_ssi(io))
+ return;
+
+ /* HDMI0 */
+ if (strstr(remote_node->full_name, "hdmi@fead0000")) {
+ rsnd_flags_set(io, RSND_STREAM_HDMI0);
+ dev_dbg(dev, "%s connected to HDMI0\n", io->name);
+ }
+
+ /* HDMI1 */
+ if (strstr(remote_node->full_name, "hdmi@feae0000")) {
+ rsnd_flags_set(io, RSND_STREAM_HDMI1);
+ dev_dbg(dev, "%s connected to HDMI1\n", io->name);
+ }
+
+ /*
+ * This driver assumes that it is TDM Split mode
+ * if remote node has multi endpoint
+ */
+ if (of_get_child_count(remote_port) > 1) {
+ rsnd_flags_set(io, RSND_STREAM_TDM_SPLIT);
+ dev_dbg(dev, "%s is part of TDM Split\n", io->name);
+ }
+}
+
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
struct rsnd_mod* (*mod_get)(struct rsnd_priv *priv, int id),
struct device_node *node,
@@ -1051,24 +1193,24 @@ static void __rsnd_dai_probe(struct rsnd_priv *priv,
drv->name = rdai->name;
drv->ops = &rsnd_soc_dai_ops;
- snprintf(rdai->playback.name, RSND_DAI_NAME_SIZE,
+ snprintf(io_playback->name, RSND_DAI_NAME_SIZE,
"DAI%d Playback", dai_i);
drv->playback.rates = RSND_RATES;
drv->playback.formats = RSND_FMTS;
drv->playback.channels_min = 2;
drv->playback.channels_max = 8;
- drv->playback.stream_name = rdai->playback.name;
+ drv->playback.stream_name = io_playback->name;
- snprintf(rdai->capture.name, RSND_DAI_NAME_SIZE,
+ snprintf(io_capture->name, RSND_DAI_NAME_SIZE,
"DAI%d Capture", dai_i);
drv->capture.rates = RSND_RATES;
drv->capture.formats = RSND_FMTS;
drv->capture.channels_min = 2;
drv->capture.channels_max = 8;
- drv->capture.stream_name = rdai->capture.name;
+ drv->capture.stream_name = io_capture->name;
- rdai->playback.rdai = rdai;
- rdai->capture.rdai = rdai;
+ io_playback->rdai = rdai;
+ io_capture->rdai = rdai;
rsnd_rdai_channels_set(rdai, 2); /* default 2ch */
rsnd_rdai_ssi_lane_set(rdai, 1); /* default 1lane */
rsnd_rdai_width_set(rdai, 32); /* default 32bit width */
@@ -1081,6 +1223,7 @@ static void __rsnd_dai_probe(struct rsnd_priv *priv,
break;
rsnd_parse_connect_ssi(rdai, playback, capture);
+ rsnd_parse_connect_ssiu(rdai, playback, capture);
rsnd_parse_connect_src(rdai, playback, capture);
rsnd_parse_connect_ctu(rdai, playback, capture);
rsnd_parse_connect_mix(rdai, playback, capture);
@@ -1137,12 +1280,23 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
if (is_graph) {
for_each_endpoint_of_node(dai_node, dai_np) {
__rsnd_dai_probe(priv, dai_np, dai_i);
- rsnd_ssi_parse_hdmi_connection(priv, dai_np, dai_i);
+ if (rsnd_is_gen3(priv)) {
+ struct rsnd_dai *rdai = rsnd_rdai_get(priv, dai_i);
+
+ rsnd_parse_connect_graph(priv, &rdai->playback, dai_np);
+ rsnd_parse_connect_graph(priv, &rdai->capture, dai_np);
+ }
dai_i++;
}
} else {
- for_each_child_of_node(dai_node, dai_np)
- __rsnd_dai_probe(priv, dai_np, dai_i++);
+ for_each_child_of_node(dai_node, dai_np) {
+ __rsnd_dai_probe(priv, dai_np, dai_i);
+ if (rsnd_is_gen3(priv)) {
+ rsnd_parse_connect_simple(priv, dai_np, dai_i, 1);
+ rsnd_parse_connect_simple(priv, dai_np, dai_i, 0);
+ }
+ dai_i++;
+ }
}
return 0;
@@ -1157,8 +1311,40 @@ static int rsnd_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+ struct snd_soc_pcm_runtime *fe = substream->private_data;
int ret;
+ /*
+ * rsnd assumes that it might be used under DPCM if user want to use
+ * channel / rate convert. Then, rsnd should be FE.
+ * And then, this function will be called *after* BE settings.
+ * this means, each BE already has fixuped hw_params.
+ * see
+ * dpcm_fe_dai_hw_params()
+ * dpcm_be_dai_hw_params()
+ */
+ io->converted_rate = 0;
+ io->converted_chan = 0;
+ if (fe->dai_link->dynamic) {
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct snd_soc_dpcm *dpcm;
+ struct snd_pcm_hw_params *be_params;
+ int stream = substream->stream;
+
+ for_each_dpcm_be(fe, stream, dpcm) {
+ be_params = &dpcm->hw_params;
+ if (params_channels(hw_params) != params_channels(be_params))
+ io->converted_chan = params_channels(be_params);
+ if (params_rate(hw_params) != params_rate(be_params))
+ io->converted_rate = params_rate(be_params);
+ }
+ if (io->converted_chan)
+ dev_dbg(dev, "convert channels = %d\n", io->converted_chan);
+ if (io->converted_rate)
+ dev_dbg(dev, "convert rate = %d\n", io->converted_rate);
+ }
+
ret = rsnd_dai_call(hw_params, io, substream, hw_params);
if (ret)
return ret;
@@ -1339,6 +1525,18 @@ int rsnd_kctrl_new(struct rsnd_mod *mod,
};
int ret;
+ /*
+ * 1) Avoid duplicate register (ex. MIXer case)
+ * 2) re-register if card was rebinded
+ */
+ list_for_each_entry(kctrl, &card->controls, list) {
+ struct rsnd_kctrl_cfg *c = kctrl->private_data;
+
+ if (strcmp(kctrl->id.name, name) == 0 &&
+ c->mod == mod)
+ return 0;
+ }
+
if (size > RSND_MAX_CHANNELS)
return -EINVAL;
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index ad702377a6c3..8cb06dab234e 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -72,10 +72,7 @@
struct rsnd_ctu {
struct rsnd_mod mod;
struct rsnd_kctrl_cfg_m pass;
- struct rsnd_kctrl_cfg_m sv0;
- struct rsnd_kctrl_cfg_m sv1;
- struct rsnd_kctrl_cfg_m sv2;
- struct rsnd_kctrl_cfg_m sv3;
+ struct rsnd_kctrl_cfg_m sv[4];
struct rsnd_kctrl_cfg_s reset;
int channels;
u32 flags;
@@ -107,13 +104,6 @@ static void rsnd_ctu_halt(struct rsnd_mod *mod)
rsnd_mod_write(mod, CTU_SWRSR, 0);
}
-int rsnd_ctu_converted_channel(struct rsnd_mod *mod)
-{
- struct rsnd_ctu *ctu = rsnd_mod_to_ctu(mod);
-
- return ctu->channels;
-}
-
static int rsnd_ctu_probe_(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
@@ -127,7 +117,7 @@ static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
struct rsnd_ctu *ctu = rsnd_mod_to_ctu(mod);
u32 cpmdr = 0;
u32 scmdr = 0;
- int i;
+ int i, j;
for (i = 0; i < RSND_MAX_CHANNELS; i++) {
u32 val = rsnd_kctrl_valm(ctu->pass, i);
@@ -146,45 +136,13 @@ static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
rsnd_mod_write(mod, CTU_SCMDR, scmdr);
- if (scmdr > 0) {
- rsnd_mod_write(mod, CTU_SV00R, rsnd_kctrl_valm(ctu->sv0, 0));
- rsnd_mod_write(mod, CTU_SV01R, rsnd_kctrl_valm(ctu->sv0, 1));
- rsnd_mod_write(mod, CTU_SV02R, rsnd_kctrl_valm(ctu->sv0, 2));
- rsnd_mod_write(mod, CTU_SV03R, rsnd_kctrl_valm(ctu->sv0, 3));
- rsnd_mod_write(mod, CTU_SV04R, rsnd_kctrl_valm(ctu->sv0, 4));
- rsnd_mod_write(mod, CTU_SV05R, rsnd_kctrl_valm(ctu->sv0, 5));
- rsnd_mod_write(mod, CTU_SV06R, rsnd_kctrl_valm(ctu->sv0, 6));
- rsnd_mod_write(mod, CTU_SV07R, rsnd_kctrl_valm(ctu->sv0, 7));
- }
- if (scmdr > 1) {
- rsnd_mod_write(mod, CTU_SV10R, rsnd_kctrl_valm(ctu->sv1, 0));
- rsnd_mod_write(mod, CTU_SV11R, rsnd_kctrl_valm(ctu->sv1, 1));
- rsnd_mod_write(mod, CTU_SV12R, rsnd_kctrl_valm(ctu->sv1, 2));
- rsnd_mod_write(mod, CTU_SV13R, rsnd_kctrl_valm(ctu->sv1, 3));
- rsnd_mod_write(mod, CTU_SV14R, rsnd_kctrl_valm(ctu->sv1, 4));
- rsnd_mod_write(mod, CTU_SV15R, rsnd_kctrl_valm(ctu->sv1, 5));
- rsnd_mod_write(mod, CTU_SV16R, rsnd_kctrl_valm(ctu->sv1, 6));
- rsnd_mod_write(mod, CTU_SV17R, rsnd_kctrl_valm(ctu->sv1, 7));
- }
- if (scmdr > 2) {
- rsnd_mod_write(mod, CTU_SV20R, rsnd_kctrl_valm(ctu->sv2, 0));
- rsnd_mod_write(mod, CTU_SV21R, rsnd_kctrl_valm(ctu->sv2, 1));
- rsnd_mod_write(mod, CTU_SV22R, rsnd_kctrl_valm(ctu->sv2, 2));
- rsnd_mod_write(mod, CTU_SV23R, rsnd_kctrl_valm(ctu->sv2, 3));
- rsnd_mod_write(mod, CTU_SV24R, rsnd_kctrl_valm(ctu->sv2, 4));
- rsnd_mod_write(mod, CTU_SV25R, rsnd_kctrl_valm(ctu->sv2, 5));
- rsnd_mod_write(mod, CTU_SV26R, rsnd_kctrl_valm(ctu->sv2, 6));
- rsnd_mod_write(mod, CTU_SV27R, rsnd_kctrl_valm(ctu->sv2, 7));
- }
- if (scmdr > 3) {
- rsnd_mod_write(mod, CTU_SV30R, rsnd_kctrl_valm(ctu->sv3, 0));
- rsnd_mod_write(mod, CTU_SV31R, rsnd_kctrl_valm(ctu->sv3, 1));
- rsnd_mod_write(mod, CTU_SV32R, rsnd_kctrl_valm(ctu->sv3, 2));
- rsnd_mod_write(mod, CTU_SV33R, rsnd_kctrl_valm(ctu->sv3, 3));
- rsnd_mod_write(mod, CTU_SV34R, rsnd_kctrl_valm(ctu->sv3, 4));
- rsnd_mod_write(mod, CTU_SV35R, rsnd_kctrl_valm(ctu->sv3, 5));
- rsnd_mod_write(mod, CTU_SV36R, rsnd_kctrl_valm(ctu->sv3, 6));
- rsnd_mod_write(mod, CTU_SV37R, rsnd_kctrl_valm(ctu->sv3, 7));
+ for (i = 0; i < 4; i++) {
+
+ if (i >= scmdr)
+ break;
+
+ for (j = 0; j < RSND_MAX_CHANNELS; j++)
+ rsnd_mod_write(mod, CTU_SVxxR(i, j), rsnd_kctrl_valm(ctu->sv[i], j));
}
rsnd_mod_write(mod, CTU_CTUIR, 0);
@@ -201,10 +159,10 @@ static void rsnd_ctu_value_reset(struct rsnd_dai_stream *io,
for (i = 0; i < RSND_MAX_CHANNELS; i++) {
rsnd_kctrl_valm(ctu->pass, i) = 0;
- rsnd_kctrl_valm(ctu->sv0, i) = 0;
- rsnd_kctrl_valm(ctu->sv1, i) = 0;
- rsnd_kctrl_valm(ctu->sv2, i) = 0;
- rsnd_kctrl_valm(ctu->sv3, i) = 0;
+ rsnd_kctrl_valm(ctu->sv[0], i) = 0;
+ rsnd_kctrl_valm(ctu->sv[1], i) = 0;
+ rsnd_kctrl_valm(ctu->sv[2], i) = 0;
+ rsnd_kctrl_valm(ctu->sv[3], i) = 0;
}
rsnd_kctrl_vals(ctu->reset) = 0;
}
@@ -233,43 +191,6 @@ static int rsnd_ctu_quit(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_ctu_hw_params(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *fe_params)
-{
- struct rsnd_ctu *ctu = rsnd_mod_to_ctu(mod);
- struct snd_soc_pcm_runtime *fe = substream->private_data;
-
- /*
- * CTU assumes that it is used under DPCM if user want to use
- * channel transfer. Then, CTU should be FE.
- * And then, this function will be called *after* BE settings.
- * this means, each BE already has fixuped hw_params.
- * see
- * dpcm_fe_dai_hw_params()
- * dpcm_be_dai_hw_params()
- */
- ctu->channels = 0;
- if (fe->dai_link->dynamic) {
- struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
- struct device *dev = rsnd_priv_to_dev(priv);
- struct snd_soc_dpcm *dpcm;
- struct snd_pcm_hw_params *be_params;
- int stream = substream->stream;
-
- for_each_dpcm_be(fe, stream, dpcm) {
- be_params = &dpcm->hw_params;
- if (params_channels(fe_params) != params_channels(be_params))
- ctu->channels = params_channels(be_params);
- }
-
- dev_dbg(dev, "CTU convert channels %d\n", ctu->channels);
- }
-
- return 0;
-}
-
static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct snd_soc_pcm_runtime *rtd)
@@ -291,7 +212,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV0",
rsnd_kctrl_accept_anytime,
NULL,
- &ctu->sv0, RSND_MAX_CHANNELS,
+ &ctu->sv[0], RSND_MAX_CHANNELS,
0x00FFFFFF);
if (ret < 0)
return ret;
@@ -300,7 +221,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV1",
rsnd_kctrl_accept_anytime,
NULL,
- &ctu->sv1, RSND_MAX_CHANNELS,
+ &ctu->sv[1], RSND_MAX_CHANNELS,
0x00FFFFFF);
if (ret < 0)
return ret;
@@ -309,7 +230,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV2",
rsnd_kctrl_accept_anytime,
NULL,
- &ctu->sv2, RSND_MAX_CHANNELS,
+ &ctu->sv[2], RSND_MAX_CHANNELS,
0x00FFFFFF);
if (ret < 0)
return ret;
@@ -318,7 +239,7 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
ret = rsnd_kctrl_new_m(mod, io, rtd, "CTU SV3",
rsnd_kctrl_accept_anytime,
NULL,
- &ctu->sv3, RSND_MAX_CHANNELS,
+ &ctu->sv[3], RSND_MAX_CHANNELS,
0x00FFFFFF);
if (ret < 0)
return ret;
@@ -334,13 +255,34 @@ static int rsnd_ctu_pcm_new(struct rsnd_mod *mod,
return ret;
}
+static int rsnd_ctu_id(struct rsnd_mod *mod)
+{
+ /*
+ * ctu00: -> 0, ctu01: -> 0, ctu02: -> 0, ctu03: -> 0
+ * ctu10: -> 1, ctu11: -> 1, ctu12: -> 1, ctu13: -> 1
+ */
+ return mod->id / 4;
+}
+
+static int rsnd_ctu_id_sub(struct rsnd_mod *mod)
+{
+ /*
+ * ctu00: -> 0, ctu01: -> 1, ctu02: -> 2, ctu03: -> 3
+ * ctu10: -> 0, ctu11: -> 1, ctu12: -> 2, ctu13: -> 3
+ */
+ return mod->id % 4;
+}
+
static struct rsnd_mod_ops rsnd_ctu_ops = {
.name = CTU_NAME,
.probe = rsnd_ctu_probe_,
.init = rsnd_ctu_init,
.quit = rsnd_ctu_quit,
- .hw_params = rsnd_ctu_hw_params,
.pcm_new = rsnd_ctu_pcm_new,
+ .get_status = rsnd_mod_get_status,
+ .id = rsnd_ctu_id,
+ .id_sub = rsnd_ctu_id_sub,
+ .id_cmd = rsnd_mod_id_raw,
};
struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id)
@@ -404,7 +346,7 @@ int rsnd_ctu_probe(struct rsnd_priv *priv)
}
ret = rsnd_mod_init(priv, rsnd_mod_get(ctu), &rsnd_ctu_ops,
- clk, rsnd_mod_get_status, RSND_MOD_CTU, i);
+ clk, RSND_MOD_CTU, i);
if (ret) {
of_node_put(np);
goto rsnd_ctu_probe_done;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 6d1947515dc8..0324a5c39619 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -174,8 +174,8 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- dev_dbg(dev, "%s[%d] %pad -> %pad\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
+ dev_dbg(dev, "%s %pad -> %pad\n",
+ rsnd_mod_name(mod),
&cfg.src_addr, &cfg.dst_addr);
ret = dmaengine_slave_config(dmaen->chan, &cfg);
@@ -218,7 +218,7 @@ struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
int i = 0;
for_each_child_of_node(of_node, np) {
- if (i == rsnd_mod_id(mod) && (!chan))
+ if (i == rsnd_mod_id_raw(mod) && (!chan))
chan = of_dma_request_slave_channel(np, name);
i++;
}
@@ -289,12 +289,13 @@ static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
}
static struct rsnd_mod_ops rsnd_dmaen_ops = {
- .name = "audmac",
- .prepare = rsnd_dmaen_prepare,
- .cleanup = rsnd_dmaen_cleanup,
- .start = rsnd_dmaen_start,
- .stop = rsnd_dmaen_stop,
- .pointer= rsnd_dmaen_pointer,
+ .name = "audmac",
+ .prepare = rsnd_dmaen_prepare,
+ .cleanup = rsnd_dmaen_cleanup,
+ .start = rsnd_dmaen_start,
+ .stop = rsnd_dmaen_stop,
+ .pointer = rsnd_dmaen_pointer,
+ .get_status = rsnd_mod_get_status,
};
/*
@@ -343,14 +344,16 @@ static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
const u8 *entry = NULL;
int id = 255;
int size = 0;
- if (mod == ssi) {
- int busif = rsnd_ssi_get_busif(io);
+ if ((mod == ssi) ||
+ (mod == ssiu)) {
+ int busif = rsnd_mod_id_sub(ssiu);
entry = gen2_id_table_ssiu;
size = ARRAY_SIZE(gen2_id_table_ssiu);
@@ -368,8 +371,7 @@ static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
if ((!entry) || (size <= id)) {
struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
- dev_err(dev, "unknown connection (%s[%d])\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
+ dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
/* use non-prohibited SRS number as error */
return 0x00; /* SSI00 */
@@ -477,10 +479,11 @@ static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
}
static struct rsnd_mod_ops rsnd_dmapp_ops = {
- .name = "audmac-pp",
- .start = rsnd_dmapp_start,
- .stop = rsnd_dmapp_stop,
- .quit = rsnd_dmapp_stop,
+ .name = "audmac-pp",
+ .start = rsnd_dmapp_start,
+ .stop = rsnd_dmapp_stop,
+ .quit = rsnd_dmapp_stop,
+ .get_status = rsnd_mod_get_status,
};
/*
@@ -529,13 +532,14 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
struct device *dev = rsnd_priv_to_dev(priv);
phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
- int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod);
+ int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
+ !!(rsnd_io_to_mod_ssiu(io) == mod);
int use_src = !!rsnd_io_to_mod_src(io);
int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
!!rsnd_io_to_mod_mix(io) ||
!!rsnd_io_to_mod_ctu(io);
int id = rsnd_mod_id(mod);
- int busif = rsnd_ssi_get_busif(io);
+ int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
struct dma_addr {
dma_addr_t out_addr;
dma_addr_t in_addr;
@@ -619,7 +623,7 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
struct rsnd_mod **mod_from,
struct rsnd_mod **mod_to)
{
- struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *ssi;
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
@@ -630,6 +634,28 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
struct device *dev = rsnd_priv_to_dev(priv);
int nr, i, idx;
+ /*
+ * It should use "rcar_sound,ssiu" on DT.
+ * But, we need to keep compatibility for old version.
+ *
+ * If it has "rcar_sound.ssiu", it will be used.
+ * If not, "rcar_sound.ssi" will be used.
+ * see
+ * rsnd_ssiu_dma_req()
+ * rsnd_ssi_dma_req()
+ */
+ if (rsnd_ssiu_of_node(priv)) {
+ struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
+
+ /* use SSIU */
+ ssi = ssiu;
+ if (this == rsnd_io_to_mod_ssi(io))
+ this = ssiu;
+ } else {
+ /* keep compatible, use SSI */
+ ssi = rsnd_io_to_mod_ssi(io);
+ }
+
if (!ssi)
return;
@@ -690,12 +716,10 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
*mod_to = mod[1];
}
- dev_dbg(dev, "module connection (this is %s[%d])\n",
- rsnd_mod_name(this), rsnd_mod_id(this));
+ dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
for (i = 0; i <= idx; i++) {
- dev_dbg(dev, " %s[%d]%s\n",
+ dev_dbg(dev, " %s%s\n",
rsnd_mod_name(mod[i] ? mod[i] : &mem),
- rsnd_mod_id (mod[i] ? mod[i] : &mem),
(mod[i] == *mod_from) ? " from" :
(mod[i] == *mod_to) ? " to" : "");
}
@@ -756,16 +780,14 @@ static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
*dma_mod = rsnd_mod_get(dma);
ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
- rsnd_mod_get_status, type, dma_id);
+ type, dma_id);
if (ret < 0)
return ret;
- dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
- rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
+ dev_dbg(dev, "%s %s -> %s\n",
+ rsnd_mod_name(*dma_mod),
rsnd_mod_name(mod_from ? mod_from : &mem),
- rsnd_mod_id (mod_from ? mod_from : &mem),
- rsnd_mod_name(mod_to ? mod_to : &mem),
- rsnd_mod_id (mod_to ? mod_to : &mem));
+ rsnd_mod_name(mod_to ? mod_to : &mem));
ret = attach(io, dma, mod_from, mod_to);
if (ret < 0)
@@ -823,5 +845,5 @@ int rsnd_dma_probe(struct rsnd_priv *priv)
priv->dma = dmac;
/* dummy mem mod for debug */
- return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, NULL, 0, 0);
+ return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
}
diff --git a/sound/soc/sh/rcar/dvc.c b/sound/soc/sh/rcar/dvc.c
index 2b16e0ce6bc5..8d91c0eb0880 100644
--- a/sound/soc/sh/rcar/dvc.c
+++ b/sound/soc/sh/rcar/dvc.c
@@ -40,11 +40,8 @@ struct rsnd_dvc {
struct rsnd_kctrl_cfg_s ren; /* Ramp Enable */
struct rsnd_kctrl_cfg_s rup; /* Ramp Rate Up */
struct rsnd_kctrl_cfg_s rdown; /* Ramp Rate Down */
- u32 flags;
};
-#define KCTRL_INITIALIZED (1 << 0)
-
#define rsnd_dvc_get(priv, id) ((struct rsnd_dvc *)(priv->dvc) + id)
#define rsnd_dvc_nr(priv) ((priv)->dvc_nr)
@@ -89,14 +86,8 @@ static void rsnd_dvc_volume_parameter(struct rsnd_dai_stream *io,
val[i] = rsnd_kctrl_valm(dvc->volume, i);
/* Enable Digital Volume */
- rsnd_mod_write(mod, DVC_VOL0R, val[0]);
- rsnd_mod_write(mod, DVC_VOL1R, val[1]);
- rsnd_mod_write(mod, DVC_VOL2R, val[2]);
- rsnd_mod_write(mod, DVC_VOL3R, val[3]);
- rsnd_mod_write(mod, DVC_VOL4R, val[4]);
- rsnd_mod_write(mod, DVC_VOL5R, val[5]);
- rsnd_mod_write(mod, DVC_VOL6R, val[6]);
- rsnd_mod_write(mod, DVC_VOL7R, val[7]);
+ for (i = 0; i < RSND_MAX_CHANNELS; i++)
+ rsnd_mod_write(mod, DVC_VOLxR(i), val[i]);
}
static void rsnd_dvc_volume_init(struct rsnd_dai_stream *io,
@@ -227,9 +218,6 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
int channels = rsnd_rdai_channels_get(rdai);
int ret;
- if (rsnd_flags_has(dvc, KCTRL_INITIALIZED))
- return 0;
-
/* Volume */
ret = rsnd_kctrl_new_m(mod, io, rtd,
is_play ?
@@ -285,8 +273,6 @@ static int rsnd_dvc_pcm_new(struct rsnd_mod *mod,
if (ret < 0)
return ret;
- rsnd_flags_set(dvc, KCTRL_INITIALIZED);
-
return 0;
}
@@ -306,6 +292,7 @@ static struct rsnd_mod_ops rsnd_dvc_ops = {
.init = rsnd_dvc_init,
.quit = rsnd_dvc_quit,
.pcm_new = rsnd_dvc_pcm_new,
+ .get_status = rsnd_mod_get_status,
};
struct rsnd_mod *rsnd_dvc_mod_get(struct rsnd_priv *priv, int id)
@@ -365,7 +352,7 @@ int rsnd_dvc_probe(struct rsnd_priv *priv)
}
ret = rsnd_mod_init(priv, rsnd_mod_get(dvc), &rsnd_dvc_ops,
- clk, rsnd_mod_get_status, RSND_MOD_DVC, i);
+ clk, RSND_MOD_DVC, i);
if (ret) {
of_node_put(np);
goto rsnd_dvc_probe_done;
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 1f7881cc16b2..7cda60188f41 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -26,8 +26,8 @@ struct rsnd_gen {
struct regmap *regmap[RSND_BASE_MAX];
/* RSND_REG_MAX base */
- struct regmap_field *regs[RSND_REG_MAX];
- const char *reg_name[RSND_REG_MAX];
+ struct regmap_field *regs[REG_MAX];
+ const char *reg_name[REG_MAX];
};
#define rsnd_priv_to_gen(p) ((struct rsnd_gen *)(p)->gen)
@@ -49,11 +49,11 @@ struct rsnd_regmap_field_conf {
}
/* single address mapping */
#define RSND_GEN_S_REG(id, offset) \
- RSND_REG_SET(RSND_REG_##id, offset, 0, #id)
+ RSND_REG_SET(id, offset, 0, #id)
/* multi address mapping */
#define RSND_GEN_M_REG(id, offset, _id_offset) \
- RSND_REG_SET(RSND_REG_##id, offset, _id_offset, #id)
+ RSND_REG_SET(id, offset, _id_offset, #id)
/*
* basic function
@@ -71,9 +71,17 @@ static int rsnd_is_accessible_reg(struct rsnd_priv *priv,
return 1;
}
-u32 rsnd_read(struct rsnd_priv *priv,
- struct rsnd_mod *mod, enum rsnd_reg reg)
+static int rsnd_mod_id_cmd(struct rsnd_mod *mod)
{
+ if (mod->ops->id_cmd)
+ return mod->ops->id_cmd(mod);
+
+ return rsnd_mod_id(mod);
+}
+
+u32 rsnd_mod_read(struct rsnd_mod *mod, enum rsnd_reg reg)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
u32 val;
@@ -81,35 +89,36 @@ u32 rsnd_read(struct rsnd_priv *priv,
if (!rsnd_is_accessible_reg(priv, gen, reg))
return 0;
- regmap_fields_read(gen->regs[reg], rsnd_mod_id(mod), &val);
+ regmap_fields_read(gen->regs[reg], rsnd_mod_id_cmd(mod), &val);
- dev_dbg(dev, "r %s[%d] - %-18s (%4d) : %08x\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
+ dev_dbg(dev, "r %s - %-18s (%4d) : %08x\n",
+ rsnd_mod_name(mod),
rsnd_reg_name(gen, reg), reg, val);
return val;
}
-void rsnd_write(struct rsnd_priv *priv,
- struct rsnd_mod *mod,
- enum rsnd_reg reg, u32 data)
+void rsnd_mod_write(struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 data)
{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
if (!rsnd_is_accessible_reg(priv, gen, reg))
return;
- regmap_fields_force_write(gen->regs[reg], rsnd_mod_id(mod), data);
+ regmap_fields_force_write(gen->regs[reg], rsnd_mod_id_cmd(mod), data);
- dev_dbg(dev, "w %s[%d] - %-18s (%4d) : %08x\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
+ dev_dbg(dev, "w %s - %-18s (%4d) : %08x\n",
+ rsnd_mod_name(mod),
rsnd_reg_name(gen, reg), reg, data);
}
-void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
- enum rsnd_reg reg, u32 mask, u32 data)
+void rsnd_mod_bset(struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 mask, u32 data)
{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
@@ -117,10 +126,10 @@ void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
return;
regmap_fields_force_update_bits(gen->regs[reg],
- rsnd_mod_id(mod), mask, data);
+ rsnd_mod_id_cmd(mod), mask, data);
- dev_dbg(dev, "b %s[%d] - %-18s (%4d) : %08x/%08x\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
+ dev_dbg(dev, "b %s - %-18s (%4d) : %08x/%08x\n",
+ rsnd_mod_name(mod),
rsnd_reg_name(gen, reg), reg, data, mask);
}
diff --git a/sound/soc/sh/rcar/mix.c b/sound/soc/sh/rcar/mix.c
index 8e3b57eaa708..a3e0370f5704 100644
--- a/sound/soc/sh/rcar/mix.c
+++ b/sound/soc/sh/rcar/mix.c
@@ -256,6 +256,7 @@ static struct rsnd_mod_ops rsnd_mix_ops = {
.init = rsnd_mix_init,
.quit = rsnd_mix_quit,
.pcm_new = rsnd_mix_pcm_new,
+ .get_status = rsnd_mod_get_status,
};
struct rsnd_mod *rsnd_mix_mod_get(struct rsnd_priv *priv, int id)
@@ -315,7 +316,7 @@ int rsnd_mix_probe(struct rsnd_priv *priv)
}
ret = rsnd_mod_init(priv, rsnd_mod_get(mix), &rsnd_mix_ops,
- clk, rsnd_mod_get_status, RSND_MOD_MIX, i);
+ clk, RSND_MOD_MIX, i);
if (ret) {
of_node_put(np);
goto rsnd_mix_probe_done;
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 4464d1d0a042..605e4b934982 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -42,165 +42,175 @@
*/
enum rsnd_reg {
/* SCU (MIX/CTU/DVC) */
- RSND_REG_SRC_I_BUSIF_MODE,
- RSND_REG_SRC_O_BUSIF_MODE,
- RSND_REG_SRC_ROUTE_MODE0,
- RSND_REG_SRC_SWRSR,
- RSND_REG_SRC_SRCIR,
- RSND_REG_SRC_ADINR,
- RSND_REG_SRC_IFSCR,
- RSND_REG_SRC_IFSVR,
- RSND_REG_SRC_SRCCR,
- RSND_REG_SRC_CTRL,
- RSND_REG_SRC_BSDSR,
- RSND_REG_SRC_BSISR,
- RSND_REG_SRC_INT_ENABLE0,
- RSND_REG_SRC_BUSIF_DALIGN,
- RSND_REG_SRCIN_TIMSEL0,
- RSND_REG_SRCIN_TIMSEL1,
- RSND_REG_SRCIN_TIMSEL2,
- RSND_REG_SRCIN_TIMSEL3,
- RSND_REG_SRCIN_TIMSEL4,
- RSND_REG_SRCOUT_TIMSEL0,
- RSND_REG_SRCOUT_TIMSEL1,
- RSND_REG_SRCOUT_TIMSEL2,
- RSND_REG_SRCOUT_TIMSEL3,
- RSND_REG_SRCOUT_TIMSEL4,
- RSND_REG_SCU_SYS_STATUS0,
- RSND_REG_SCU_SYS_STATUS1,
- RSND_REG_SCU_SYS_INT_EN0,
- RSND_REG_SCU_SYS_INT_EN1,
- RSND_REG_CMD_CTRL,
- RSND_REG_CMD_BUSIF_MODE,
- RSND_REG_CMD_BUSIF_DALIGN,
- RSND_REG_CMD_ROUTE_SLCT,
- RSND_REG_CMDOUT_TIMSEL,
- RSND_REG_CTU_SWRSR,
- RSND_REG_CTU_CTUIR,
- RSND_REG_CTU_ADINR,
- RSND_REG_CTU_CPMDR,
- RSND_REG_CTU_SCMDR,
- RSND_REG_CTU_SV00R,
- RSND_REG_CTU_SV01R,
- RSND_REG_CTU_SV02R,
- RSND_REG_CTU_SV03R,
- RSND_REG_CTU_SV04R,
- RSND_REG_CTU_SV05R,
- RSND_REG_CTU_SV06R,
- RSND_REG_CTU_SV07R,
- RSND_REG_CTU_SV10R,
- RSND_REG_CTU_SV11R,
- RSND_REG_CTU_SV12R,
- RSND_REG_CTU_SV13R,
- RSND_REG_CTU_SV14R,
- RSND_REG_CTU_SV15R,
- RSND_REG_CTU_SV16R,
- RSND_REG_CTU_SV17R,
- RSND_REG_CTU_SV20R,
- RSND_REG_CTU_SV21R,
- RSND_REG_CTU_SV22R,
- RSND_REG_CTU_SV23R,
- RSND_REG_CTU_SV24R,
- RSND_REG_CTU_SV25R,
- RSND_REG_CTU_SV26R,
- RSND_REG_CTU_SV27R,
- RSND_REG_CTU_SV30R,
- RSND_REG_CTU_SV31R,
- RSND_REG_CTU_SV32R,
- RSND_REG_CTU_SV33R,
- RSND_REG_CTU_SV34R,
- RSND_REG_CTU_SV35R,
- RSND_REG_CTU_SV36R,
- RSND_REG_CTU_SV37R,
- RSND_REG_MIX_SWRSR,
- RSND_REG_MIX_MIXIR,
- RSND_REG_MIX_ADINR,
- RSND_REG_MIX_MIXMR,
- RSND_REG_MIX_MVPDR,
- RSND_REG_MIX_MDBAR,
- RSND_REG_MIX_MDBBR,
- RSND_REG_MIX_MDBCR,
- RSND_REG_MIX_MDBDR,
- RSND_REG_MIX_MDBER,
- RSND_REG_DVC_SWRSR,
- RSND_REG_DVC_DVUIR,
- RSND_REG_DVC_ADINR,
- RSND_REG_DVC_DVUCR,
- RSND_REG_DVC_ZCMCR,
- RSND_REG_DVC_VOL0R,
- RSND_REG_DVC_VOL1R,
- RSND_REG_DVC_VOL2R,
- RSND_REG_DVC_VOL3R,
- RSND_REG_DVC_VOL4R,
- RSND_REG_DVC_VOL5R,
- RSND_REG_DVC_VOL6R,
- RSND_REG_DVC_VOL7R,
- RSND_REG_DVC_DVUER,
- RSND_REG_DVC_VRCTR,
- RSND_REG_DVC_VRPDR,
- RSND_REG_DVC_VRDBR,
+ SRC_I_BUSIF_MODE,
+ SRC_O_BUSIF_MODE,
+ SRC_ROUTE_MODE0,
+ SRC_SWRSR,
+ SRC_SRCIR,
+ SRC_ADINR,
+ SRC_IFSCR,
+ SRC_IFSVR,
+ SRC_SRCCR,
+ SRC_CTRL,
+ SRC_BSDSR,
+ SRC_BSISR,
+ SRC_INT_ENABLE0,
+ SRC_BUSIF_DALIGN,
+ SRCIN_TIMSEL0,
+ SRCIN_TIMSEL1,
+ SRCIN_TIMSEL2,
+ SRCIN_TIMSEL3,
+ SRCIN_TIMSEL4,
+ SRCOUT_TIMSEL0,
+ SRCOUT_TIMSEL1,
+ SRCOUT_TIMSEL2,
+ SRCOUT_TIMSEL3,
+ SRCOUT_TIMSEL4,
+ SCU_SYS_STATUS0,
+ SCU_SYS_STATUS1,
+ SCU_SYS_INT_EN0,
+ SCU_SYS_INT_EN1,
+ CMD_CTRL,
+ CMD_BUSIF_MODE,
+ CMD_BUSIF_DALIGN,
+ CMD_ROUTE_SLCT,
+ CMDOUT_TIMSEL,
+ CTU_SWRSR,
+ CTU_CTUIR,
+ CTU_ADINR,
+ CTU_CPMDR,
+ CTU_SCMDR,
+ CTU_SV00R,
+ CTU_SV01R,
+ CTU_SV02R,
+ CTU_SV03R,
+ CTU_SV04R,
+ CTU_SV05R,
+ CTU_SV06R,
+ CTU_SV07R,
+ CTU_SV10R,
+ CTU_SV11R,
+ CTU_SV12R,
+ CTU_SV13R,
+ CTU_SV14R,
+ CTU_SV15R,
+ CTU_SV16R,
+ CTU_SV17R,
+ CTU_SV20R,
+ CTU_SV21R,
+ CTU_SV22R,
+ CTU_SV23R,
+ CTU_SV24R,
+ CTU_SV25R,
+ CTU_SV26R,
+ CTU_SV27R,
+ CTU_SV30R,
+ CTU_SV31R,
+ CTU_SV32R,
+ CTU_SV33R,
+ CTU_SV34R,
+ CTU_SV35R,
+ CTU_SV36R,
+ CTU_SV37R,
+ MIX_SWRSR,
+ MIX_MIXIR,
+ MIX_ADINR,
+ MIX_MIXMR,
+ MIX_MVPDR,
+ MIX_MDBAR,
+ MIX_MDBBR,
+ MIX_MDBCR,
+ MIX_MDBDR,
+ MIX_MDBER,
+ DVC_SWRSR,
+ DVC_DVUIR,
+ DVC_ADINR,
+ DVC_DVUCR,
+ DVC_ZCMCR,
+ DVC_VOL0R,
+ DVC_VOL1R,
+ DVC_VOL2R,
+ DVC_VOL3R,
+ DVC_VOL4R,
+ DVC_VOL5R,
+ DVC_VOL6R,
+ DVC_VOL7R,
+ DVC_DVUER,
+ DVC_VRCTR,
+ DVC_VRPDR,
+ DVC_VRDBR,
/* ADG */
- RSND_REG_BRRA,
- RSND_REG_BRRB,
- RSND_REG_BRGCKR,
- RSND_REG_DIV_EN,
- RSND_REG_AUDIO_CLK_SEL0,
- RSND_REG_AUDIO_CLK_SEL1,
- RSND_REG_AUDIO_CLK_SEL2,
+ BRRA,
+ BRRB,
+ BRGCKR,
+ DIV_EN,
+ AUDIO_CLK_SEL0,
+ AUDIO_CLK_SEL1,
+ AUDIO_CLK_SEL2,
/* SSIU */
- RSND_REG_SSI_MODE,
- RSND_REG_SSI_MODE0,
- RSND_REG_SSI_MODE1,
- RSND_REG_SSI_MODE2,
- RSND_REG_SSI_CONTROL,
- RSND_REG_SSI_CTRL,
- RSND_REG_SSI_BUSIF0_MODE,
- RSND_REG_SSI_BUSIF0_ADINR,
- RSND_REG_SSI_BUSIF0_DALIGN,
- RSND_REG_SSI_BUSIF1_MODE,
- RSND_REG_SSI_BUSIF1_ADINR,
- RSND_REG_SSI_BUSIF1_DALIGN,
- RSND_REG_SSI_BUSIF2_MODE,
- RSND_REG_SSI_BUSIF2_ADINR,
- RSND_REG_SSI_BUSIF2_DALIGN,
- RSND_REG_SSI_BUSIF3_MODE,
- RSND_REG_SSI_BUSIF3_ADINR,
- RSND_REG_SSI_BUSIF3_DALIGN,
- RSND_REG_SSI_BUSIF4_MODE,
- RSND_REG_SSI_BUSIF4_ADINR,
- RSND_REG_SSI_BUSIF4_DALIGN,
- RSND_REG_SSI_BUSIF5_MODE,
- RSND_REG_SSI_BUSIF5_ADINR,
- RSND_REG_SSI_BUSIF5_DALIGN,
- RSND_REG_SSI_BUSIF6_MODE,
- RSND_REG_SSI_BUSIF6_ADINR,
- RSND_REG_SSI_BUSIF6_DALIGN,
- RSND_REG_SSI_BUSIF7_MODE,
- RSND_REG_SSI_BUSIF7_ADINR,
- RSND_REG_SSI_BUSIF7_DALIGN,
- RSND_REG_SSI_INT_ENABLE,
- RSND_REG_SSI_SYS_STATUS0,
- RSND_REG_SSI_SYS_STATUS1,
- RSND_REG_SSI_SYS_STATUS2,
- RSND_REG_SSI_SYS_STATUS3,
- RSND_REG_SSI_SYS_STATUS4,
- RSND_REG_SSI_SYS_STATUS5,
- RSND_REG_SSI_SYS_STATUS6,
- RSND_REG_SSI_SYS_STATUS7,
- RSND_REG_HDMI0_SEL,
- RSND_REG_HDMI1_SEL,
+ SSI_MODE,
+ SSI_MODE0,
+ SSI_MODE1,
+ SSI_MODE2,
+ SSI_CONTROL,
+ SSI_CTRL,
+ SSI_BUSIF0_MODE,
+ SSI_BUSIF1_MODE,
+ SSI_BUSIF2_MODE,
+ SSI_BUSIF3_MODE,
+ SSI_BUSIF4_MODE,
+ SSI_BUSIF5_MODE,
+ SSI_BUSIF6_MODE,
+ SSI_BUSIF7_MODE,
+ SSI_BUSIF0_ADINR,
+ SSI_BUSIF1_ADINR,
+ SSI_BUSIF2_ADINR,
+ SSI_BUSIF3_ADINR,
+ SSI_BUSIF4_ADINR,
+ SSI_BUSIF5_ADINR,
+ SSI_BUSIF6_ADINR,
+ SSI_BUSIF7_ADINR,
+ SSI_BUSIF0_DALIGN,
+ SSI_BUSIF1_DALIGN,
+ SSI_BUSIF2_DALIGN,
+ SSI_BUSIF3_DALIGN,
+ SSI_BUSIF4_DALIGN,
+ SSI_BUSIF5_DALIGN,
+ SSI_BUSIF6_DALIGN,
+ SSI_BUSIF7_DALIGN,
+ SSI_INT_ENABLE,
+ SSI_SYS_STATUS0,
+ SSI_SYS_STATUS1,
+ SSI_SYS_STATUS2,
+ SSI_SYS_STATUS3,
+ SSI_SYS_STATUS4,
+ SSI_SYS_STATUS5,
+ SSI_SYS_STATUS6,
+ SSI_SYS_STATUS7,
+ HDMI0_SEL,
+ HDMI1_SEL,
/* SSI */
- RSND_REG_SSICR,
- RSND_REG_SSISR,
- RSND_REG_SSITDR,
- RSND_REG_SSIRDR,
- RSND_REG_SSIWSR,
+ SSICR,
+ SSISR,
+ SSITDR,
+ SSIRDR,
+ SSIWSR,
- RSND_REG_MAX,
+ REG_MAX,
};
+#define SRCIN_TIMSEL(i) (SRCIN_TIMSEL0 + (i))
+#define SRCOUT_TIMSEL(i) (SRCOUT_TIMSEL0 + (i))
+#define CTU_SVxxR(i, j) (CTU_SV00R + (i * 8) + (j))
+#define DVC_VOLxR(i) (DVC_VOL0R + (i))
+#define AUDIO_CLK_SEL(i) (AUDIO_CLK_SEL0 + (i))
+#define SSI_BUSIF_MODE(i) (SSI_BUSIF0_MODE + (i))
+#define SSI_BUSIF_ADINR(i) (SSI_BUSIF0_ADINR + (i))
+#define SSI_BUSIF_DALIGN(i) (SSI_BUSIF0_DALIGN + (i))
+#define SSI_SYS_STATUS(i) (SSI_SYS_STATUS0 + (i))
+
struct rsnd_priv;
struct rsnd_mod;
@@ -210,20 +220,9 @@ struct rsnd_dai_stream;
/*
* R-Car basic functions
*/
-#define rsnd_mod_read(m, r) \
- rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
-#define rsnd_mod_write(m, r, d) \
- rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
-#define rsnd_mod_bset(m, r, s, d) \
- rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
-
-u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
-void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
- enum rsnd_reg reg, u32 data);
-void rsnd_force_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
- enum rsnd_reg reg, u32 data);
-void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
- u32 mask, u32 data);
+u32 rsnd_mod_read(struct rsnd_mod *mod, enum rsnd_reg reg);
+void rsnd_mod_write(struct rsnd_mod *mod, enum rsnd_reg reg, u32 data);
+void rsnd_mod_bset(struct rsnd_mod *mod, enum rsnd_reg reg, u32 mask, u32 data);
u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod);
@@ -301,6 +300,12 @@ struct rsnd_mod_ops {
int (*cleanup)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
+ u32 *(*get_status)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ enum rsnd_mod_type type);
+ int (*id)(struct rsnd_mod *mod);
+ int (*id_sub)(struct rsnd_mod *mod);
+ int (*id_cmd)(struct rsnd_mod *mod);
};
struct rsnd_dai_stream;
@@ -310,9 +315,6 @@ struct rsnd_mod {
struct rsnd_mod_ops *ops;
struct rsnd_priv *priv;
struct clk *clk;
- u32 *(*get_status)(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
- enum rsnd_mod_type type);
u32 status;
};
/*
@@ -375,8 +377,6 @@ struct rsnd_mod {
#define __rsnd_mod_call_pointer 0
#define rsnd_mod_to_priv(mod) ((mod)->priv)
-#define rsnd_mod_name(mod) ((mod)->ops->name)
-#define rsnd_mod_id(mod) ((mod)->id)
#define rsnd_mod_power_on(mod) clk_enable((mod)->clk)
#define rsnd_mod_power_off(mod) clk_disable((mod)->clk)
#define rsnd_mod_get(ip) (&(ip)->mod)
@@ -385,9 +385,6 @@ int rsnd_mod_init(struct rsnd_priv *priv,
struct rsnd_mod *mod,
struct rsnd_mod_ops *ops,
struct clk *clk,
- u32* (*get_status)(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
- enum rsnd_mod_type type),
enum rsnd_mod_type type,
int id);
void rsnd_mod_quit(struct rsnd_mod *mod);
@@ -396,9 +393,13 @@ struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
void rsnd_mod_interrupt(struct rsnd_mod *mod,
void (*callback)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io));
-u32 *rsnd_mod_get_status(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
+u32 *rsnd_mod_get_status(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
enum rsnd_mod_type type);
+int rsnd_mod_id(struct rsnd_mod *mod);
+int rsnd_mod_id_raw(struct rsnd_mod *mod);
+int rsnd_mod_id_sub(struct rsnd_mod *mod);
+char *rsnd_mod_name(struct rsnd_mod *mod);
struct rsnd_mod *rsnd_mod_next(int *iterator,
struct rsnd_dai_stream *io,
enum rsnd_mod_type *array,
@@ -430,8 +431,9 @@ int rsnd_runtime_channel_after_ctu_with_params(struct rsnd_dai_stream *io,
rsnd_runtime_channel_for_ssi_with_params(io, NULL)
int rsnd_runtime_channel_for_ssi_with_params(struct rsnd_dai_stream *io,
struct snd_pcm_hw_params *params);
-int rsnd_runtime_is_ssi_multi(struct rsnd_dai_stream *io);
-int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
+int rsnd_runtime_is_multi_ssi(struct rsnd_dai_stream *io);
+int rsnd_runtime_is_tdm(struct rsnd_dai_stream *io);
+int rsnd_runtime_is_tdm_split(struct rsnd_dai_stream *io);
/*
* DT
@@ -440,6 +442,7 @@ int rsnd_runtime_is_ssi_tdm(struct rsnd_dai_stream *io);
of_get_child_by_name(rsnd_priv_to_dev(priv)->of_node, node)
#define RSND_NODE_DAI "rcar_sound,dai"
#define RSND_NODE_SSI "rcar_sound,ssi"
+#define RSND_NODE_SSIU "rcar_sound,ssiu"
#define RSND_NODE_SRC "rcar_sound,src"
#define RSND_NODE_CTU "rcar_sound,ctu"
#define RSND_NODE_MIX "rcar_sound,mix"
@@ -456,8 +459,17 @@ struct rsnd_dai_stream {
struct rsnd_mod *dma;
struct rsnd_dai *rdai;
struct device *dmac_dev; /* for IPMMU */
+ u32 converted_rate; /* converted sampling rate */
+ int converted_chan; /* converted channels */
u32 parent_ssi_status;
+ u32 flags;
};
+
+/* flags */
+#define RSND_STREAM_HDMI0 (1 << 0) /* for HDMI0 */
+#define RSND_STREAM_HDMI1 (1 << 1) /* for HDMI1 */
+#define RSND_STREAM_TDM_SPLIT (1 << 2) /* for TDM split mode */
+
#define rsnd_io_to_mod(io, i) ((i) < RSND_MOD_MAX ? (io)->mod[(i)] : NULL)
#define rsnd_io_to_mod_ssi(io) rsnd_io_to_mod((io), RSND_MOD_SSI)
#define rsnd_io_to_mod_ssiu(io) rsnd_io_to_mod((io), RSND_MOD_SSIU)
@@ -472,6 +484,8 @@ struct rsnd_dai_stream {
#define rsnd_io_is_play(io) (&rsnd_io_to_rdai(io)->playback == io)
#define rsnd_io_to_runtime(io) ((io)->substream ? \
(io)->substream->runtime : NULL)
+#define rsnd_io_converted_rate(io) ((io)->converted_rate)
+#define rsnd_io_converted_chan(io) ((io)->converted_chan)
int rsnd_io_is_working(struct rsnd_dai_stream *io);
struct rsnd_dai {
@@ -712,18 +726,9 @@ extern const char * const volume_ramp_rate[];
int rsnd_ssi_probe(struct rsnd_priv *priv);
void rsnd_ssi_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
-int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io);
-int rsnd_ssi_get_busif(struct rsnd_dai_stream *io);
u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io);
-#define RSND_SSI_HDMI_PORT0 0xf0
-#define RSND_SSI_HDMI_PORT1 0xf1
-int rsnd_ssi_hdmi_port(struct rsnd_dai_stream *io);
-void rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
- struct device_node *endpoint,
- int dai_i);
-
#define rsnd_ssi_is_pin_sharing(io) \
__rsnd_ssi_is_pin_sharing(rsnd_io_to_mod_ssi(io))
int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
@@ -742,6 +747,10 @@ int rsnd_ssiu_attach(struct rsnd_dai_stream *io,
struct rsnd_mod *mod);
int rsnd_ssiu_probe(struct rsnd_priv *priv);
void rsnd_ssiu_remove(struct rsnd_priv *priv);
+void rsnd_parse_connect_ssiu(struct rsnd_dai *rdai,
+ struct device_node *playback,
+ struct device_node *capture);
+#define rsnd_ssiu_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_SSIU)
/*
* R-Car SRC
@@ -767,7 +776,6 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
*/
int rsnd_ctu_probe(struct rsnd_priv *priv);
void rsnd_ctu_remove(struct rsnd_priv *priv);
-int rsnd_ctu_converted_channel(struct rsnd_mod *mod);
struct rsnd_mod *rsnd_ctu_mod_get(struct rsnd_priv *priv, int id);
#define rsnd_ctu_of_node(priv) rsnd_parse_of_node(priv, RSND_NODE_CTU)
#define rsnd_parse_connect_ctu(rdai, playback, capture) \
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index cd38a43b976f..50348a2c9203 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -25,7 +25,6 @@ struct rsnd_src {
struct rsnd_mod *dma;
struct rsnd_kctrl_cfg_s sen; /* sync convert enable */
struct rsnd_kctrl_cfg_s sync; /* sync convert */
- u32 convert_rate; /* sampling rate convert */
int irq;
};
@@ -89,12 +88,12 @@ static u32 rsnd_src_convert_rate(struct rsnd_dai_stream *io,
return 0;
if (!rsnd_src_sync_is_enabled(mod))
- return src->convert_rate;
+ return rsnd_io_converted_rate(io);
convert_rate = src->sync.val;
if (!convert_rate)
- convert_rate = src->convert_rate;
+ convert_rate = rsnd_io_converted_rate(io);
if (!convert_rate)
convert_rate = runtime->rate;
@@ -135,40 +134,6 @@ unsigned int rsnd_src_get_rate(struct rsnd_priv *priv,
return rate;
}
-static int rsnd_src_hw_params(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *fe_params)
-{
- struct rsnd_src *src = rsnd_mod_to_src(mod);
- struct snd_soc_pcm_runtime *fe = substream->private_data;
-
- /*
- * SRC assumes that it is used under DPCM if user want to use
- * sampling rate convert. Then, SRC should be FE.
- * And then, this function will be called *after* BE settings.
- * this means, each BE already has fixuped hw_params.
- * see
- * dpcm_fe_dai_hw_params()
- * dpcm_be_dai_hw_params()
- */
- src->convert_rate = 0;
- if (fe->dai_link->dynamic) {
- int stream = substream->stream;
- struct snd_soc_dpcm *dpcm;
- struct snd_pcm_hw_params *be_params;
-
- for_each_dpcm_be(fe, stream, dpcm) {
- be_params = &dpcm->hw_params;
-
- if (params_rate(fe_params) != params_rate(be_params))
- src->convert_rate = params_rate(be_params);
- }
- }
-
- return 0;
-}
-
static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
struct rsnd_mod *mod)
{
@@ -349,9 +314,8 @@ static bool rsnd_src_error_occurred(struct rsnd_mod *mod)
status0 = rsnd_mod_read(mod, SCU_SYS_STATUS0);
status1 = rsnd_mod_read(mod, SCU_SYS_STATUS1);
if ((status0 & val0) || (status1 & val1)) {
- rsnd_dbg_irq_status(dev, "%s[%d] err status : 0x%08x, 0x%08x\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod),
- status0, status1);
+ rsnd_dbg_irq_status(dev, "%s err status : 0x%08x, 0x%08x\n",
+ rsnd_mod_name(mod), status0, status1);
ret = true;
}
@@ -527,16 +491,16 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod,
}
static struct rsnd_mod_ops rsnd_src_ops = {
- .name = SRC_NAME,
- .dma_req = rsnd_src_dma_req,
- .probe = rsnd_src_probe_,
- .init = rsnd_src_init,
- .quit = rsnd_src_quit,
- .start = rsnd_src_start,
- .stop = rsnd_src_stop,
- .irq = rsnd_src_irq,
- .hw_params = rsnd_src_hw_params,
- .pcm_new = rsnd_src_pcm_new,
+ .name = SRC_NAME,
+ .dma_req = rsnd_src_dma_req,
+ .probe = rsnd_src_probe_,
+ .init = rsnd_src_init,
+ .quit = rsnd_src_quit,
+ .start = rsnd_src_start,
+ .stop = rsnd_src_stop,
+ .irq = rsnd_src_irq,
+ .pcm_new = rsnd_src_pcm_new,
+ .get_status = rsnd_mod_get_status,
};
struct rsnd_mod *rsnd_src_mod_get(struct rsnd_priv *priv, int id)
@@ -605,8 +569,7 @@ int rsnd_src_probe(struct rsnd_priv *priv)
}
ret = rsnd_mod_init(priv, rsnd_mod_get(src),
- &rsnd_src_ops, clk, rsnd_mod_get_status,
- RSND_MOD_SRC, i);
+ &rsnd_src_ops, clk, RSND_MOD_SRC, i);
if (ret) {
of_node_put(np);
goto rsnd_src_probe_done;
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index fcb4df23248c..45ef295743ec 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -99,9 +99,7 @@ struct rsnd_ssi {
/* flags */
#define RSND_SSI_CLK_PIN_SHARE (1 << 0)
#define RSND_SSI_NO_BUSIF (1 << 1) /* SSI+DMA without BUSIF */
-#define RSND_SSI_HDMI0 (1 << 2) /* for HDMI0 */
-#define RSND_SSI_HDMI1 (1 << 3) /* for HDMI1 */
-#define RSND_SSI_PROBED (1 << 4)
+#define RSND_SSI_PROBED (1 << 2)
#define for_each_rsnd_ssi(pos, priv, i) \
for (i = 0; \
@@ -119,19 +117,7 @@ struct rsnd_ssi {
(rsnd_ssi_run_mods(io) & (1 << rsnd_mod_id(mod)))
#define rsnd_ssi_can_output_clk(mod) (!__rsnd_ssi_is_pin_sharing(mod))
-int rsnd_ssi_hdmi_port(struct rsnd_dai_stream *io)
-{
- struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-
- if (rsnd_flags_has(ssi, RSND_SSI_HDMI0))
- return RSND_SSI_HDMI_PORT0;
-
- if (rsnd_flags_has(ssi, RSND_SSI_HDMI1))
- return RSND_SSI_HDMI_PORT1;
-
- return 0;
-}
+static int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
{
@@ -150,11 +136,6 @@ int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
return use_busif;
}
-int rsnd_ssi_get_busif(struct rsnd_dai_stream *io)
-{
- return 0; /* BUSIF0 only for now */
-}
-
static void rsnd_ssi_status_clear(struct rsnd_mod *mod)
{
rsnd_mod_write(mod, SSISR, 0);
@@ -181,8 +162,7 @@ static void rsnd_ssi_status_check(struct rsnd_mod *mod,
udelay(5);
}
- dev_warn(dev, "%s[%d] status check failed\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
+ dev_warn(dev, "%s status check failed\n", rsnd_mod_name(mod));
}
static u32 rsnd_ssi_multi_slaves(struct rsnd_dai_stream *io)
@@ -224,7 +204,7 @@ static u32 rsnd_ssi_run_mods(struct rsnd_dai_stream *io)
u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
{
- if (rsnd_runtime_is_ssi_multi(io))
+ if (rsnd_runtime_is_multi_ssi(io))
return rsnd_ssi_multi_slaves(io);
return 0;
@@ -306,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
if (rsnd_ssi_is_multi_slave(mod, io))
return 0;
- if (ssi->rate) {
+ if (ssi->usrcnt > 1) {
if (ssi->rate != rate) {
dev_err(dev, "SSI parent/child should use same rate\n");
return -EINVAL;
@@ -320,6 +300,9 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
return 0;
}
+ if (rsnd_runtime_is_tdm_split(io))
+ chan = rsnd_io_converted_chan(io);
+
main_rate = rsnd_ssi_clk_query(rdai, rate, chan, &idx);
if (!main_rate) {
dev_err(dev, "unsupported clock rate\n");
@@ -346,9 +329,8 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
ssi->rate = rate;
ssi->chan = chan;
- dev_dbg(dev, "%s[%d] outputs %u Hz\n",
- rsnd_mod_name(mod),
- rsnd_mod_id(mod), rate);
+ dev_dbg(dev, "%s outputs %d chan %u Hz\n",
+ rsnd_mod_name(mod), chan, rate);
return 0;
}
@@ -379,14 +361,23 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct device *dev = rsnd_priv_to_dev(priv);
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
u32 cr_own = ssi->cr_own;
u32 cr_mode = ssi->cr_mode;
u32 wsr = ssi->wsr;
- int is_tdm;
+ int width;
+ int is_tdm, is_tdm_split;
+
+ is_tdm = rsnd_runtime_is_tdm(io);
+ is_tdm_split = rsnd_runtime_is_tdm_split(io);
- is_tdm = rsnd_runtime_is_ssi_tdm(io);
+ if (is_tdm)
+ dev_dbg(dev, "TDM mode\n");
+ if (is_tdm_split)
+ dev_dbg(dev, "TDM Split mode\n");
cr_own |= FORCE | rsnd_rdai_width_to_swl(rdai);
@@ -405,7 +396,7 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
* rsnd_ssiu_init_gen2()
*/
wsr = ssi->wsr;
- if (is_tdm) {
+ if (is_tdm || is_tdm_split) {
wsr |= WS_MODE;
cr_own |= CHNL_8;
}
@@ -421,7 +412,18 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_own |= TRMD;
cr_own &= ~DWL_MASK;
- switch (snd_pcm_format_width(runtime->format)) {
+ width = snd_pcm_format_width(runtime->format);
+ if (is_tdm_split) {
+ /*
+ * The SWL and DWL bits in SSICR should be fixed at 32-bit
+ * setting when TDM split mode.
+ * see datasheet
+ * Operation :: TDM Format Split Function (TDM Split Mode)
+ */
+ width = 32;
+ }
+
+ switch (width) {
case 8:
cr_own |= DWL_8;
break;
@@ -431,6 +433,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
case 24:
cr_own |= DWL_24;
break;
+ case 32:
+ cr_own |= DWL_32;
+ break;
}
if (rsnd_ssi_is_dma_mode(mod)) {
@@ -494,8 +499,7 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
return 0;
if (!ssi->usrcnt) {
- dev_err(dev, "%s[%d] usrcnt error\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
+ dev_err(dev, "%s usrcnt error\n", rsnd_mod_name(mod));
return -EIO;
}
@@ -654,8 +658,8 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
/* DMA only */
if (is_dma && (status & (UIRQ | OIRQ))) {
- rsnd_dbg_irq_status(dev, "%s[%d] err status : 0x%08x\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod), status);
+ rsnd_dbg_irq_status(dev, "%s err status : 0x%08x\n",
+ rsnd_mod_name(mod), status);
stop = true;
}
@@ -681,6 +685,41 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
+static u32 *rsnd_ssi_get_status(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ enum rsnd_mod_type type)
+{
+ /*
+ * SSIP (= SSI parent) needs to be special, otherwise,
+ * 2nd SSI might doesn't start. see also rsnd_mod_call()
+ *
+ * We can't include parent SSI status on SSI, because we don't know
+ * how many SSI requests parent SSI. Thus, it is localed on "io" now.
+ * ex) trouble case
+ * Playback: SSI0
+ * Capture : SSI1 (needs SSI0)
+ *
+ * 1) start Capture -> SSI0/SSI1 are started.
+ * 2) start Playback -> SSI0 doesn't work, because it is already
+ * marked as "started" on 1)
+ *
+ * OTOH, using each mod's status is good for MUX case.
+ * It doesn't need to start in 2nd start
+ * ex)
+ * IO-0: SRC0 -> CTU1 -+-> MUX -> DVC -> SSIU -> SSI0
+ * |
+ * IO-1: SRC1 -> CTU2 -+
+ *
+ * 1) start IO-0 -> start SSI0
+ * 2) start IO-1 -> SSI0 doesn't need to start, because it is
+ * already started on 1)
+ */
+ if (type == RSND_MOD_SSIP)
+ return &io->parent_ssi_status;
+
+ return rsnd_mod_get_status(mod, io, type);
+}
+
/*
* SSI PIO
*/
@@ -730,7 +769,7 @@ static int rsnd_ssi_common_probe(struct rsnd_mod *mod,
{
struct device *dev = rsnd_priv_to_dev(priv);
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int ret;
+ int ret = 0;
/*
* SSIP/SSIU/IRQ are not needed on
@@ -744,10 +783,6 @@ static int rsnd_ssi_common_probe(struct rsnd_mod *mod,
* see rsnd_ssi_pcm_new()
*/
- ret = rsnd_ssiu_attach(io, mod);
- if (ret < 0)
- return ret;
-
/*
* SSI might be called again as PIO fallback
* It is easy to manual handling for IRQ request/free
@@ -876,18 +911,19 @@ static int rsnd_ssi_prepare(struct rsnd_mod *mod,
}
static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
- .name = SSI_NAME,
- .probe = rsnd_ssi_common_probe,
- .remove = rsnd_ssi_common_remove,
- .init = rsnd_ssi_pio_init,
- .quit = rsnd_ssi_quit,
- .start = rsnd_ssi_start,
- .stop = rsnd_ssi_stop,
- .irq = rsnd_ssi_irq,
- .pointer = rsnd_ssi_pio_pointer,
- .pcm_new = rsnd_ssi_pcm_new,
- .hw_params = rsnd_ssi_hw_params,
- .prepare = rsnd_ssi_prepare,
+ .name = SSI_NAME,
+ .probe = rsnd_ssi_common_probe,
+ .remove = rsnd_ssi_common_remove,
+ .init = rsnd_ssi_pio_init,
+ .quit = rsnd_ssi_quit,
+ .start = rsnd_ssi_start,
+ .stop = rsnd_ssi_stop,
+ .irq = rsnd_ssi_irq,
+ .pointer = rsnd_ssi_pio_pointer,
+ .pcm_new = rsnd_ssi_pcm_new,
+ .hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
+ .get_status = rsnd_ssi_get_status,
};
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -928,8 +964,7 @@ static int rsnd_ssi_fallback(struct rsnd_mod *mod,
*/
mod->ops = &rsnd_ssi_pio_ops;
- dev_info(dev, "%s[%d] fallback to PIO mode\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
+ dev_info(dev, "%s fallback to PIO mode\n", rsnd_mod_name(mod));
return 0;
}
@@ -941,6 +976,17 @@ static struct dma_chan *rsnd_ssi_dma_req(struct rsnd_dai_stream *io,
int is_play = rsnd_io_is_play(io);
char *name;
+ /*
+ * It should use "rcar_sound,ssiu" on DT.
+ * But, we need to keep compatibility for old version.
+ *
+ * If it has "rcar_sound.ssiu", it will be used.
+ * If not, "rcar_sound.ssi" will be used.
+ * see
+ * rsnd_ssiu_dma_req()
+ * rsnd_dma_of_path()
+ */
+
if (rsnd_ssi_use_busif(io))
name = is_play ? "rxu" : "txu";
else
@@ -951,27 +997,27 @@ static struct dma_chan *rsnd_ssi_dma_req(struct rsnd_dai_stream *io,
}
static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
- .name = SSI_NAME,
- .dma_req = rsnd_ssi_dma_req,
- .probe = rsnd_ssi_dma_probe,
- .remove = rsnd_ssi_common_remove,
- .init = rsnd_ssi_init,
- .quit = rsnd_ssi_quit,
- .start = rsnd_ssi_start,
- .stop = rsnd_ssi_stop,
- .irq = rsnd_ssi_irq,
- .pcm_new = rsnd_ssi_pcm_new,
- .fallback = rsnd_ssi_fallback,
- .hw_params = rsnd_ssi_hw_params,
- .prepare = rsnd_ssi_prepare,
+ .name = SSI_NAME,
+ .dma_req = rsnd_ssi_dma_req,
+ .probe = rsnd_ssi_dma_probe,
+ .remove = rsnd_ssi_common_remove,
+ .init = rsnd_ssi_init,
+ .quit = rsnd_ssi_quit,
+ .start = rsnd_ssi_start,
+ .stop = rsnd_ssi_stop,
+ .irq = rsnd_ssi_irq,
+ .pcm_new = rsnd_ssi_pcm_new,
+ .fallback = rsnd_ssi_fallback,
+ .hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
+ .get_status = rsnd_ssi_get_status,
};
-int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
+static int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
{
return mod->ops == &rsnd_ssi_dma_ops;
}
-
/*
* ssi mod function
*/
@@ -1027,54 +1073,6 @@ void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
of_node_put(node);
}
-static void __rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
- struct rsnd_dai_stream *io,
- struct device_node *remote_ep)
-{
- struct device *dev = rsnd_priv_to_dev(priv);
- struct rsnd_mod *mod = rsnd_io_to_mod_ssi(io);
- struct rsnd_ssi *ssi;
- struct device_node *remote_node = of_graph_get_port_parent(remote_ep);
-
- /* support Gen3 only */
- if (!rsnd_is_gen3(priv))
- return;
-
- if (!mod)
- return;
-
- ssi = rsnd_mod_to_ssi(mod);
-
- /* HDMI0 */
- if (strstr(remote_node->full_name, "hdmi@fead0000")) {
- rsnd_flags_set(ssi, RSND_SSI_HDMI0);
- dev_dbg(dev, "%s[%d] connected to HDMI0\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
- }
-
- /* HDMI1 */
- if (strstr(remote_node->full_name, "hdmi@feae0000")) {
- rsnd_flags_set(ssi, RSND_SSI_HDMI1);
- dev_dbg(dev, "%s[%d] connected to HDMI1\n",
- rsnd_mod_name(mod), rsnd_mod_id(mod));
- }
-}
-
-void rsnd_ssi_parse_hdmi_connection(struct rsnd_priv *priv,
- struct device_node *endpoint,
- int dai_i)
-{
- struct rsnd_dai *rdai = rsnd_rdai_get(priv, dai_i);
- struct device_node *remote_ep;
-
- remote_ep = of_graph_get_remote_endpoint(endpoint);
- if (!remote_ep)
- return;
-
- __rsnd_ssi_parse_hdmi_connection(priv, &rdai->playback, remote_ep);
- __rsnd_ssi_parse_hdmi_connection(priv, &rdai->capture, remote_ep);
-}
-
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id)
{
if (WARN_ON(id < 0 || id >= rsnd_ssi_nr(priv)))
@@ -1091,41 +1089,6 @@ int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod)
return !!(rsnd_flags_has(rsnd_mod_to_ssi(mod), RSND_SSI_CLK_PIN_SHARE));
}
-static u32 *rsnd_ssi_get_status(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
- enum rsnd_mod_type type)
-{
- /*
- * SSIP (= SSI parent) needs to be special, otherwise,
- * 2nd SSI might doesn't start. see also rsnd_mod_call()
- *
- * We can't include parent SSI status on SSI, because we don't know
- * how many SSI requests parent SSI. Thus, it is localed on "io" now.
- * ex) trouble case
- * Playback: SSI0
- * Capture : SSI1 (needs SSI0)
- *
- * 1) start Capture -> SSI0/SSI1 are started.
- * 2) start Playback -> SSI0 doesn't work, because it is already
- * marked as "started" on 1)
- *
- * OTOH, using each mod's status is good for MUX case.
- * It doesn't need to start in 2nd start
- * ex)
- * IO-0: SRC0 -> CTU1 -+-> MUX -> DVC -> SSIU -> SSI0
- * |
- * IO-1: SRC1 -> CTU2 -+
- *
- * 1) start IO-0 -> start SSI0
- * 2) start IO-1 -> SSI0 doesn't need to start, because it is
- * already started on 1)
- */
- if (type == RSND_MOD_SSIP)
- return &io->parent_ssi_status;
-
- return rsnd_mod_get_status(io, mod, type);
-}
-
int rsnd_ssi_probe(struct rsnd_priv *priv)
{
struct device_node *node;
@@ -1192,7 +1155,7 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
ops = &rsnd_ssi_dma_ops;
ret = rsnd_mod_init(priv, rsnd_mod_get(ssi), ops, clk,
- rsnd_ssi_get_status, RSND_MOD_SSI, i);
+ RSND_MOD_SSI, i);
if (ret) {
of_node_put(np);
goto rsnd_ssi_probe_done;
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 39b67643b5dc..c5934adcfd01 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -12,8 +12,14 @@ struct rsnd_ssiu {
struct rsnd_mod mod;
u32 busif_status[8]; /* for BUSIF0 - BUSIF7 */
unsigned int usrcnt;
+ int id;
+ int id_sub;
};
+/* SSI_MODE */
+#define TDM_EXT (1 << 0)
+#define TDM_SPLIT (1 << 8)
+
#define rsnd_ssiu_nr(priv) ((priv)->ssiu_nr)
#define rsnd_mod_to_ssiu(_mod) container_of((_mod), struct rsnd_ssiu, mod)
#define for_each_rsnd_ssiu(pos, priv, i) \
@@ -22,6 +28,33 @@ struct rsnd_ssiu {
((pos) = ((struct rsnd_ssiu *)(priv)->ssiu + i)); \
i++)
+/*
+ * SSI Gen2 Gen3
+ * 0 BUSIF0-3 BUSIF0-7
+ * 1 BUSIF0-3 BUSIF0-7
+ * 2 BUSIF0-3 BUSIF0-7
+ * 3 BUSIF0 BUSIF0-7
+ * 4 BUSIF0 BUSIF0-7
+ * 5 BUSIF0 BUSIF0
+ * 6 BUSIF0 BUSIF0
+ * 7 BUSIF0 BUSIF0
+ * 8 BUSIF0 BUSIF0
+ * 9 BUSIF0-3 BUSIF0-7
+ * total 22 52
+ */
+static const int gen2_id[] = { 0, 4, 8, 12, 13, 14, 15, 16, 17, 18 };
+static const int gen3_id[] = { 0, 8, 16, 24, 32, 40, 41, 42, 43, 44 };
+
+static u32 *rsnd_ssiu_get_status(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ enum rsnd_mod_type type)
+{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+ int busif = rsnd_mod_id_sub(mod);
+
+ return &ssiu->busif_status[busif];
+}
+
static int rsnd_ssiu_init(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
@@ -32,6 +65,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
int id = rsnd_mod_id(mod);
u32 mask1, val1;
u32 mask2, val2;
+ int i;
/* clear status */
switch (id) {
@@ -40,16 +74,12 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
case 2:
case 3:
case 4:
- rsnd_mod_write(mod, SSI_SYS_STATUS0, 0xf << (id * 4));
- rsnd_mod_write(mod, SSI_SYS_STATUS2, 0xf << (id * 4));
- rsnd_mod_write(mod, SSI_SYS_STATUS4, 0xf << (id * 4));
- rsnd_mod_write(mod, SSI_SYS_STATUS6, 0xf << (id * 4));
+ for (i = 0; i < 4; i++)
+ rsnd_mod_write(mod, SSI_SYS_STATUS(i * 2), 0xf << (id * 4));
break;
case 9:
- rsnd_mod_write(mod, SSI_SYS_STATUS1, 0xf << 4);
- rsnd_mod_write(mod, SSI_SYS_STATUS3, 0xf << 4);
- rsnd_mod_write(mod, SSI_SYS_STATUS5, 0xf << 4);
- rsnd_mod_write(mod, SSI_SYS_STATUS7, 0xf << 4);
+ for (i = 0; i < 4; i++)
+ rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4));
break;
}
@@ -115,8 +145,9 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
}
static struct rsnd_mod_ops rsnd_ssiu_ops_gen1 = {
- .name = SSIU_NAME,
- .init = rsnd_ssiu_init,
+ .name = SSIU_NAME,
+ .init = rsnd_ssiu_init,
+ .get_status = rsnd_ssiu_get_status,
};
static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
@@ -124,7 +155,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
- int hdmi = rsnd_ssi_hdmi_port(io);
+ u32 has_hdmi0 = rsnd_flags_has(io, RSND_STREAM_HDMI0);
+ u32 has_hdmi1 = rsnd_flags_has(io, RSND_STREAM_HDMI1);
int ret;
u32 mode = 0;
@@ -134,20 +166,21 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
ssiu->usrcnt++;
- if (rsnd_runtime_is_ssi_tdm(io)) {
- /*
- * TDM Extend Mode
- * see
- * rsnd_ssi_config_init()
- */
- mode = 0x1;
- }
+ /*
+ * TDM Extend/Split Mode
+ * see
+ * rsnd_ssi_config_init()
+ */
+ if (rsnd_runtime_is_tdm(io))
+ mode = TDM_EXT;
+ else if (rsnd_runtime_is_tdm_split(io))
+ mode = TDM_SPLIT;
rsnd_mod_write(mod, SSI_MODE, mode);
if (rsnd_ssi_use_busif(io)) {
int id = rsnd_mod_id(mod);
- int busif = rsnd_ssi_get_busif(io);
+ int busif = rsnd_mod_id_sub(mod);
/*
* FIXME
@@ -162,46 +195,18 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
id, busif);
}
-#define RSND_WRITE_BUSIF(i) \
- rsnd_mod_write(mod, SSI_BUSIF##i##_ADINR, \
- rsnd_get_adinr_bit(mod, io) | \
- (rsnd_io_is_play(io) ? \
- rsnd_runtime_channel_after_ctu(io) : \
- rsnd_runtime_channel_original(io))); \
- rsnd_mod_write(mod, SSI_BUSIF##i##_MODE, \
- rsnd_get_busif_shift(io, mod) | 1); \
- rsnd_mod_write(mod, SSI_BUSIF##i##_DALIGN, \
- rsnd_get_dalign(mod, io))
-
- switch (busif) {
- case 0:
- RSND_WRITE_BUSIF(0);
- break;
- case 1:
- RSND_WRITE_BUSIF(1);
- break;
- case 2:
- RSND_WRITE_BUSIF(2);
- break;
- case 3:
- RSND_WRITE_BUSIF(3);
- break;
- case 4:
- RSND_WRITE_BUSIF(4);
- break;
- case 5:
- RSND_WRITE_BUSIF(5);
- break;
- case 6:
- RSND_WRITE_BUSIF(6);
- break;
- case 7:
- RSND_WRITE_BUSIF(7);
- break;
- }
+ rsnd_mod_write(mod, SSI_BUSIF_ADINR(busif),
+ rsnd_get_adinr_bit(mod, io) |
+ (rsnd_io_is_play(io) ?
+ rsnd_runtime_channel_after_ctu(io) :
+ rsnd_runtime_channel_original(io)));
+ rsnd_mod_write(mod, SSI_BUSIF_MODE(busif),
+ rsnd_get_busif_shift(io, mod) | 1);
+ rsnd_mod_write(mod, SSI_BUSIF_DALIGN(busif),
+ rsnd_get_dalign(mod, io));
}
- if (hdmi) {
+ if (has_hdmi0 || has_hdmi1) {
enum rsnd_mod_type rsnd_ssi_array[] = {
RSND_MOD_SSIM1,
RSND_MOD_SSIM2,
@@ -227,14 +232,10 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
rsnd_mod_id(pos) << shift;
}
- switch (hdmi) {
- case RSND_SSI_HDMI_PORT0:
+ if (has_hdmi0)
rsnd_mod_write(mod, HDMI0_SEL, val);
- break;
- case RSND_SSI_HDMI_PORT1:
+ if (has_hdmi1)
rsnd_mod_write(mod, HDMI1_SEL, val);
- break;
- }
}
return 0;
@@ -244,7 +245,7 @@ static int rsnd_ssiu_start_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- int busif = rsnd_ssi_get_busif(io);
+ int busif = rsnd_mod_id_sub(mod);
if (!rsnd_ssi_use_busif(io))
return 0;
@@ -262,7 +263,7 @@ static int rsnd_ssiu_stop_gen2(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
- int busif = rsnd_ssi_get_busif(io);
+ int busif = rsnd_mod_id_sub(mod);
if (!rsnd_ssi_use_busif(io))
return 0;
@@ -278,11 +279,53 @@ static int rsnd_ssiu_stop_gen2(struct rsnd_mod *mod,
return 0;
}
+static int rsnd_ssiu_id(struct rsnd_mod *mod)
+{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+
+ /* see rsnd_ssiu_probe() */
+ return ssiu->id;
+}
+
+static int rsnd_ssiu_id_sub(struct rsnd_mod *mod)
+{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+
+ /* see rsnd_ssiu_probe() */
+ return ssiu->id_sub;
+}
+
+static struct dma_chan *rsnd_ssiu_dma_req(struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ int is_play = rsnd_io_is_play(io);
+ char *name;
+
+ /*
+ * It should use "rcar_sound,ssiu" on DT.
+ * But, we need to keep compatibility for old version.
+ *
+ * If it has "rcar_sound.ssiu", it will be used.
+ * If not, "rcar_sound.ssi" will be used.
+ * see
+ * rsnd_ssi_dma_req()
+ * rsnd_dma_of_path()
+ */
+
+ name = is_play ? "rx" : "tx";
+
+ return rsnd_dma_request_channel(rsnd_ssiu_of_node(priv),
+ mod, name);
+}
+
static struct rsnd_mod_ops rsnd_ssiu_ops_gen2 = {
- .name = SSIU_NAME,
- .init = rsnd_ssiu_init_gen2,
- .start = rsnd_ssiu_start_gen2,
- .stop = rsnd_ssiu_stop_gen2,
+ .name = SSIU_NAME,
+ .dma_req = rsnd_ssiu_dma_req,
+ .init = rsnd_ssiu_init_gen2,
+ .start = rsnd_ssiu_start_gen2,
+ .stop = rsnd_ssiu_stop_gen2,
+ .get_status = rsnd_ssiu_get_status,
};
static struct rsnd_mod *rsnd_ssiu_mod_get(struct rsnd_priv *priv, int id)
@@ -293,36 +336,85 @@ static struct rsnd_mod *rsnd_ssiu_mod_get(struct rsnd_priv *priv, int id)
return rsnd_mod_get((struct rsnd_ssiu *)(priv->ssiu) + id);
}
-int rsnd_ssiu_attach(struct rsnd_dai_stream *io,
- struct rsnd_mod *ssi_mod)
+static void rsnd_parse_connect_ssiu_compatible(struct rsnd_priv *priv,
+ struct rsnd_dai_stream *io)
{
- struct rsnd_priv *priv = rsnd_io_to_priv(io);
- struct rsnd_mod *mod = rsnd_ssiu_mod_get(priv, rsnd_mod_id(ssi_mod));
+ struct rsnd_mod *ssi_mod = rsnd_io_to_mod_ssi(io);
+ struct rsnd_mod *mod;
+ struct rsnd_ssiu *ssiu;
+ int i;
- rsnd_mod_confirm_ssi(ssi_mod);
+ if (!ssi_mod)
+ return;
- return rsnd_dai_connect(mod, io, mod->type);
+ /* select BUSIF0 */
+ for_each_rsnd_ssiu(ssiu, priv, i) {
+ mod = rsnd_mod_get(ssiu);
+
+ if ((rsnd_mod_id(ssi_mod) == rsnd_mod_id(mod)) &&
+ (rsnd_mod_id_sub(mod) == 0)) {
+ rsnd_dai_connect(mod, io, mod->type);
+ return;
+ }
+ }
}
-static u32 *rsnd_ssiu_get_status(struct rsnd_dai_stream *io,
- struct rsnd_mod *mod,
- enum rsnd_mod_type type)
+void rsnd_parse_connect_ssiu(struct rsnd_dai *rdai,
+ struct device_node *playback,
+ struct device_node *capture)
{
- struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
- int busif = rsnd_ssi_get_busif(io);
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct device_node *node = rsnd_ssiu_of_node(priv);
+ struct device_node *np;
+ struct rsnd_mod *mod;
+ struct rsnd_dai_stream *io_p = &rdai->playback;
+ struct rsnd_dai_stream *io_c = &rdai->capture;
+ int i;
- return &ssiu->busif_status[busif];
+ /* use rcar_sound,ssiu if exist */
+ if (node) {
+ i = 0;
+ for_each_child_of_node(node, np) {
+ mod = rsnd_ssiu_mod_get(priv, i);
+ if (np == playback)
+ rsnd_dai_connect(mod, io_p, mod->type);
+ if (np == capture)
+ rsnd_dai_connect(mod, io_c, mod->type);
+ i++;
+ }
+
+ of_node_put(node);
+ }
+
+ /* Keep DT compatibility */
+ if (!rsnd_io_to_mod_ssiu(io_p))
+ rsnd_parse_connect_ssiu_compatible(priv, io_p);
+ if (!rsnd_io_to_mod_ssiu(io_c))
+ rsnd_parse_connect_ssiu_compatible(priv, io_c);
}
int rsnd_ssiu_probe(struct rsnd_priv *priv)
{
struct device *dev = rsnd_priv_to_dev(priv);
+ struct device_node *node;
struct rsnd_ssiu *ssiu;
struct rsnd_mod_ops *ops;
+ const int *list = NULL;
int i, nr, ret;
- /* same number to SSI */
- nr = priv->ssi_nr;
+ /*
+ * Keep DT compatibility.
+ * if it has "rcar_sound,ssiu", use it.
+ * if not, use "rcar_sound,ssi"
+ * see
+ * rsnd_ssiu_bufsif_to_id()
+ */
+ node = rsnd_ssiu_of_node(priv);
+ if (node)
+ nr = of_get_child_count(node);
+ else
+ nr = priv->ssi_nr;
+
ssiu = devm_kcalloc(dev, nr, sizeof(*ssiu), GFP_KERNEL);
if (!ssiu)
return -ENOMEM;
@@ -335,10 +427,46 @@ int rsnd_ssiu_probe(struct rsnd_priv *priv)
else
ops = &rsnd_ssiu_ops_gen2;
+ /* Keep compatibility */
+ nr = 0;
+ if ((node) &&
+ (ops == &rsnd_ssiu_ops_gen2)) {
+ ops->id = rsnd_ssiu_id;
+ ops->id_sub = rsnd_ssiu_id_sub;
+
+ if (rsnd_is_gen2(priv)) {
+ list = gen2_id;
+ nr = ARRAY_SIZE(gen2_id);
+ } else if (rsnd_is_gen3(priv)) {
+ list = gen3_id;
+ nr = ARRAY_SIZE(gen3_id);
+ } else {
+ dev_err(dev, "unknown SSIU\n");
+ return -ENODEV;
+ }
+ }
+
for_each_rsnd_ssiu(ssiu, priv, i) {
+ if (node) {
+ int j;
+
+ /*
+ * see
+ * rsnd_ssiu_get_id()
+ * rsnd_ssiu_get_id_sub()
+ */
+ for (j = 0; j < nr; j++) {
+ if (list[j] > i)
+ break;
+ ssiu->id = j;
+ ssiu->id_sub = i - list[ssiu->id];
+ }
+ } else {
+ ssiu->id = i;
+ }
+
ret = rsnd_mod_init(priv, rsnd_mod_get(ssiu),
- ops, NULL, rsnd_ssiu_get_status,
- RSND_MOD_SSIU, i);
+ ops, NULL, RSND_MOD_SSIU, i);
if (ret)
return ret;
}
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index b8e72b52db30..4fb29f0e561e 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
{
struct snd_soc_acpi_mach *mach;
+ struct snd_soc_acpi_mach *mach_alt;
for (mach = machines; mach->id[0]; mach++) {
if (acpi_dev_present(mach->id, NULL, -1)) {
- if (mach->machine_quirk)
- mach = mach->machine_quirk(mach);
+ if (mach->machine_quirk) {
+ mach_alt = mach->machine_quirk(mach);
+ if (!mach_alt)
+ continue; /* not full match, ignore */
+ mach = mach_alt;
+ }
+
return mach;
}
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 6ddcf12bc030..0462b3ec977a 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1467,7 +1467,7 @@ static int soc_link_dai_pcm_new(struct snd_soc_dai **dais, int num_dais,
for (i = 0; i < num_dais; ++i) {
struct snd_soc_dai_driver *drv = dais[i]->driver;
- if (!rtd->dai_link->no_pcm && drv->pcm_new)
+ if (drv->pcm_new)
ret = drv->pcm_new(rtd, dais[i]);
if (ret < 0) {
dev_err(dais[i]->dev,
@@ -2131,6 +2131,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
card->instantiated = 1;
+ dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
mutex_unlock(&card->mutex);
mutex_unlock(&client_mutex);
@@ -3484,12 +3485,11 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
}
EXPORT_SYMBOL_GPL(snd_soc_of_parse_tdm_slot);
-void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
- struct snd_soc_codec_conf *codec_conf,
- struct device_node *of_node,
- const char *propname)
+void snd_soc_of_parse_node_prefix(struct device_node *np,
+ struct snd_soc_codec_conf *codec_conf,
+ struct device_node *of_node,
+ const char *propname)
{
- struct device_node *np = card->dev->of_node;
const char *str;
int ret;
@@ -3502,7 +3502,7 @@ void snd_soc_of_parse_audio_prefix(struct snd_soc_card *card,
codec_conf->of_node = of_node;
codec_conf->name_prefix = str;
}
-EXPORT_SYMBOL_GPL(snd_soc_of_parse_audio_prefix);
+EXPORT_SYMBOL_GPL(snd_soc_of_parse_node_prefix);
int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname)
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index d597eba61992..bcb35cae2a2c 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -74,14 +74,14 @@ static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco)
return ret;
}
- dev_dbg(&sai->pdev->dev, "Set %s%s as synchro provider\n",
- sai->pdev->dev.of_node->name,
+ dev_dbg(&sai->pdev->dev, "Set %pOFn%s as synchro provider\n",
+ sai->pdev->dev.of_node,
synco == STM_SAI_SYNC_OUT_A ? "A" : "B");
prev_synco = FIELD_GET(SAI_GCR_SYNCOUT_MASK, readl_relaxed(sai->base));
if (prev_synco != STM_SAI_SYNC_OUT_NONE && synco != prev_synco) {
- dev_err(&sai->pdev->dev, "%s%s already set as sync provider\n",
- sai->pdev->dev.of_node->name,
+ dev_err(&sai->pdev->dev, "%pOFn%s already set as sync provider\n",
+ sai->pdev->dev.of_node,
prev_synco == STM_SAI_SYNC_OUT_A ? "A" : "B");
clk_disable_unprepare(sai->pclk);
return -EINVAL;
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index ea05cc91aa05..d4825700b63f 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -336,8 +336,7 @@ static int stm32_sai_mclk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
struct stm32_sai_sub_data *sai = mclk->sai_data;
- unsigned int div;
- int ret;
+ int div, ret;
div = stm32_sai_get_clk_div(sai, parent_rate, rate);
if (div < 0)
@@ -390,7 +389,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
char *mclk_name, *p, *s = (char *)pname;
int ret, i = 0;
- mclk = devm_kzalloc(dev, sizeof(mclk), GFP_KERNEL);
+ mclk = devm_kzalloc(dev, sizeof(*mclk), GFP_KERNEL);
if (!mclk)
return -ENOMEM;
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index 66aad0d3f9c7..8134c3c94229 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -31,7 +31,7 @@ config SND_SUN8I_CODEC_ANALOG
config SND_SUN50I_CODEC_ANALOG
tristate "Allwinner sun50i Codec Analog Controls Support"
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
- select SND_SUNXI_ADDA_PR_REGMAP
+ select SND_SUN8I_ADDA_PR_REGMAP
help
Say Y or M if you want to add support for the analog controls for
the codec embedded in Allwinner A64 SoC.
diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c
index 8f5f999df631..df1fed0aa001 100644
--- a/sound/soc/sunxi/sun50i-codec-analog.c
+++ b/sound/soc/sunxi/sun50i-codec-analog.c
@@ -274,6 +274,7 @@ static const struct snd_soc_dapm_widget sun50i_a64_codec_widgets[] = {
* stream widgets at the card level.
*/
+ SND_SOC_DAPM_REGULATOR_SUPPLY("hpvcc", 0, 0),
SND_SOC_DAPM_MUX("Headphone Source Playback Route",
SND_SOC_NOPM, 0, 0, sun50i_codec_hp_src),
SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN50I_ADDA_HP_CTRL,
@@ -361,6 +362,7 @@ static const struct snd_soc_dapm_route sun50i_a64_codec_routes[] = {
{ "Headphone Source Playback Route", "Mixer", "Left Mixer" },
{ "Headphone Source Playback Route", "Mixer", "Right Mixer" },
{ "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "Headphone Amp", NULL, "hpvcc" },
{ "HP", NULL, "Headphone Amp" },
/* Microphone Routes */
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 522a72fde78d..92c5de026c43 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -481,7 +481,11 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
{ "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
"AIF1 Slot 0 Right"},
- /* ADC routes */
+ /* ADC Routes */
+ { "AIF1 Slot 0 Right ADC", NULL, "ADC" },
+ { "AIF1 Slot 0 Left ADC", NULL, "ADC" },
+
+ /* ADC Mixer Routes */
{ "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
"AIF1 Slot 0 Left ADC" },
{ "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
@@ -605,16 +609,10 @@ err_pm_disable:
static int sun8i_codec_remove(struct platform_device *pdev)
{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
- struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
-
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
sun8i_codec_runtime_suspend(&pdev->dev);
- clk_disable_unprepare(scodec->clk_module);
- clk_disable_unprepare(scodec->clk_bus);
-
return 0;
}
diff --git a/sound/soc/ti/Kconfig b/sound/soc/ti/Kconfig
new file mode 100644
index 000000000000..4bf3c15d4e51
--- /dev/null
+++ b/sound/soc/ti/Kconfig
@@ -0,0 +1,209 @@
+menu "Audio support for Texas Instruments SoCs"
+depends on DMA_OMAP || TI_EDMA || COMPILE_TEST
+
+config SND_SOC_TI_EDMA_PCM
+ tristate
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
+config SND_SOC_TI_SDMA_PCM
+ tristate
+ select SND_SOC_GENERIC_DMAENGINE_PCM
+
+comment "Texas Instruments DAI support for:"
+config SND_SOC_DAVINCI_ASP
+ tristate "daVinci Audio Serial Port (ASP) or McBSP suport"
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ select SND_SOC_TI_EDMA_PCM
+ help
+ Say Y or M here if you want audio support via daVinci ASP or McBSP.
+ The driver only implements the ASP support which is a subset of
+ daVinci McBSP (w/o the multichannel support).
+
+config SND_SOC_DAVINCI_MCASP
+ tristate "Multichannel Audio Serial Port (McASP) support"
+ select SND_SOC_TI_EDMA_PCM if TI_EDMA
+ select SND_SOC_TI_SDMA_PCM if DMA_OMAP
+ help
+ Say Y or M here if you want to have support for McASP IP found in
+ various Texas Instruments SoCs like:
+ - daVinci devices
+ - Sitara line of SoCs (AM335x, AM438x, etc)
+ - DRA7x devices
+ - Keystone devices
+
+config SND_SOC_DAVINCI_VCIF
+ tristate "daVinci Voice Interface (VCIF) suport"
+ depends on ARCH_DAVINCI || COMPILE_TEST
+ select SND_SOC_TI_EDMA_PCM
+ help
+ Say Y or M here if you want audio support via daVinci VCIF.
+
+config SND_SOC_OMAP_DMIC
+ tristate "Digital Microphone Module (DMIC) support"
+ depends on ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST
+ select SND_SOC_TI_SDMA_PCM
+ help
+ Say Y or M here if you want to have support for DMIC IP found in
+ OMAP4 and OMAP5.
+
+config SND_SOC_OMAP_MCBSP
+ tristate "Multichannel Buffered Serial Port (McBSP) support"
+ depends on ARCH_OMAP || ARCH_OMAP1 || COMPILE_TEST
+ select SND_SOC_TI_SDMA_PCM
+ help
+ Say Y or M here if you want to have support for McBSP IP found in
+ Texas Instruments OMAP1/2/3/4/5 SoCs.
+
+config SND_SOC_OMAP_MCPDM
+ tristate "Multichannel PDM Controller (McPDM) support"
+ depends on ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST
+ select SND_SOC_TI_SDMA_PCM
+ help
+ Say Y or M here if you want to have support for McPDM IP found in
+ OMAP4 and OMAP5.
+
+comment "Audio support for boards with Texas Instruments SoCs"
+config SND_SOC_NOKIA_N810
+ tristate "SoC Audio support for Nokia N810"
+ depends on MACH_NOKIA_N810 && I2C
+ select SND_SOC_OMAP_MCBSP
+ select SND_SOC_TLV320AIC3X
+ help
+ Say Y or M if you want to add support for SoC audio on Nokia N810.
+
+config SND_SOC_NOKIA_RX51
+ tristate "SoC Audio support for Nokia RX-51"
+ depends on ARCH_OMAP3 && I2C && GPIOLIB
+ select SND_SOC_OMAP_MCBSP
+ select SND_SOC_TLV320AIC3X
+ select SND_SOC_TPA6130A2
+ help
+ Say Y or M if you want to add support for SoC audio on Nokia RX-51
+ hardware. This is also known as Nokia N900 product.
+
+config SND_SOC_OMAP3_PANDORA
+ tristate "SoC Audio support for OMAP3 Pandora"
+ depends on ARCH_OMAP3
+ depends on TWL4030_CORE
+ select SND_SOC_OMAP_MCBSP
+ select SND_SOC_TWL4030
+ help
+ Say Y or M if you want to add support for SoC audio on the OMAP3 Pandora.
+
+config SND_SOC_OMAP3_TWL4030
+ tristate "SoC Audio support for OMAP3 based boards with twl4030 codec"
+ depends on ARCH_OMAP3 || COMPILE_TEST
+ depends on TWL4030_CORE
+ select SND_SOC_OMAP_MCBSP
+ select SND_SOC_TWL4030
+ help
+ Say Y or M if you want to add support for SoC audio on OMAP3 based
+ boards using twl4030 as codec. This driver currently supports:
+ - Beagleboard or Devkit8000
+ - Gumstix Overo or CompuLab CM-T35/CM-T3730
+ - IGEP v2
+ - OMAP3EVM
+ - SDP3430
+ - Zoom2
+
+config SND_SOC_OMAP_ABE_TWL6040
+ tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
+ depends on TWL6040_CORE && COMMON_CLK
+ depends on ARCH_OMAP4 || (SOC_OMAP5 && MFD_PALMAS) || COMPILE_TEST
+ select SND_SOC_OMAP_DMIC
+ select SND_SOC_OMAP_MCPDM
+ select SND_SOC_TWL6040
+ help
+ Say Y or M if you want to add support for SoC audio on OMAP boards
+ using ABE and twl6040 codec. This driver currently supports:
+ - SDP4430/Blaze boards
+ - PandaBoard (4430)
+ - PandaBoardES (4460)
+ - OMAP5 uEVM
+
+config SND_SOC_OMAP_AMS_DELTA
+ tristate "SoC Audio support for Amstrad E3 (Delta) videophone"
+ depends on MACH_AMS_DELTA && TTY
+ select SND_SOC_OMAP_MCBSP
+ select SND_SOC_CX20442
+ help
+ Say Y or M if you want to add support for SoC audio device
+ connected to a handset and a speakerphone found on Amstrad E3 (Delta)
+ videophone.
+
+ Note that in order to get those devices fully supported, you have to
+ build the kernel with standard serial port driver included and
+ configured for at least 4 ports. Then, from userspace, you must load
+ a line discipline #19 on the modem (ttyS3) serial line. The simplest
+ way to achieve this is to install util-linux-ng and use the included
+ ldattach utility. This can be started automatically from udev,
+ a simple rule like this one should do the trick (it does for me):
+ ACTION=="add", KERNEL=="controlC0", \
+ RUN+="/usr/sbin/ldattach 19 /dev/ttyS3"
+
+config SND_SOC_OMAP_HDMI
+ tristate "OMAP4/5 HDMI audio support"
+ depends on OMAP4_DSS_HDMI || OMAP5_DSS_HDMI || COMPILE_TEST
+ select SND_SOC_TI_SDMA_PCM
+ help
+ For HDMI audio to work OMAPDSS HDMI support should be
+ enabled.
+ The hdmi audio driver implements cpu-dai component using the
+ callbacks provided by OMAPDSS and registers the component
+ under DSS HDMI device. Omap-pcm is registered for platform
+ component also under DSS HDMI device. Dummy codec is used as
+ as codec component. The hdmi audio driver implements also
+ the card and registers it under its own platform device.
+ The device for the driver is registered by OMAPDSS hdmi
+ driver.
+
+config SND_SOC_OMAP_OSK5912
+ tristate "SoC Audio support for omap osk5912"
+ depends on MACH_OMAP_OSK && I2C
+ select SND_SOC_OMAP_MCBSP
+ select SND_SOC_TLV320AIC23_I2C
+ help
+ Say Y or M if you want to add support for SoC audio on osk5912.
+
+config SND_SOC_DAVINCI_EVM
+ tristate "SoC Audio support for DaVinci EVMs"
+ depends on ARCH_DAVINCI && I2C
+ select SND_SOC_DAVINCI_ASP if MACH_DAVINCI_DM355_EVM
+ select SND_SOC_DAVINCI_ASP if SND_SOC_DM365_AIC3X_CODEC
+ select SND_SOC_DAVINCI_VCIF if SND_SOC_DM365_VOICE_CODEC
+ select SND_SOC_DAVINCI_ASP if MACH_DAVINCI_EVM # DM6446
+ select SND_SOC_DAVINCI_MCASP if MACH_DAVINCI_DM6467_EVM
+ select SND_SOC_SPDIF if MACH_DAVINCI_DM6467_EVM
+ select SND_SOC_DAVINCI_MCASP if MACH_DAVINCI_DA830_EVM
+ select SND_SOC_DAVINCI_MCASP if MACH_DAVINCI_DA850_EVM
+ select SND_SOC_TLV320AIC3X
+ help
+ Say Y if you want to add support for SoC audio on the following TI
+ DaVinci EVM platforms:
+ - DM355
+ - DM365
+ - DM6446
+ - DM6447
+ - DM830
+ - DM850
+
+choice
+ prompt "DM365 codec select"
+ depends on SND_SOC_DAVINCI_EVM
+ depends on MACH_DAVINCI_DM365_EVM
+
+config SND_SOC_DM365_AIC3X_CODEC
+ bool "Audio Codec - AIC3101"
+ help
+ Say Y if you want to add support for AIC3101 audio codec
+
+config SND_SOC_DM365_VOICE_CODEC
+ bool "Voice Codec - CQ93VC"
+ select MFD_DAVINCI_VOICECODEC
+ select SND_SOC_CQ0093VC
+ help
+ Say Y if you want to add support for SoC On-chip voice codec
+endchoice
+
+endmenu
+
diff --git a/sound/soc/ti/Makefile b/sound/soc/ti/Makefile
new file mode 100644
index 000000000000..08c44d56ef3e
--- /dev/null
+++ b/sound/soc/ti/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# Platform drivers
+snd-soc-ti-edma-objs := edma-pcm.o
+snd-soc-ti-sdma-objs := sdma-pcm.o
+
+obj-$(CONFIG_SND_SOC_TI_EDMA_PCM) += snd-soc-ti-edma.o
+obj-$(CONFIG_SND_SOC_TI_SDMA_PCM) += snd-soc-ti-sdma.o
+
+# CPU DAI drivers
+snd-soc-davinci-asp-objs := davinci-i2s.o
+snd-soc-davinci-mcasp-objs := davinci-mcasp.o
+snd-soc-davinci-vcif-objs := davinci-vcif.o
+snd-soc-omap-dmic-objs := omap-dmic.o
+snd-soc-omap-mcbsp-objs := omap-mcbsp.o omap-mcbsp-st.o
+snd-soc-omap-mcpdm-objs := omap-mcpdm.o
+
+obj-$(CONFIG_SND_SOC_DAVINCI_ASP) += snd-soc-davinci-asp.o
+obj-$(CONFIG_SND_SOC_DAVINCI_MCASP) += snd-soc-davinci-mcasp.o
+obj-$(CONFIG_SND_SOC_DAVINCI_VCIF) += snd-soc-davinci-vcif.o
+obj-$(CONFIG_SND_SOC_OMAP_DMIC) += snd-soc-omap-dmic.o
+obj-$(CONFIG_SND_SOC_OMAP_MCBSP) += snd-soc-omap-mcbsp.o
+obj-$(CONFIG_SND_SOC_OMAP_MCPDM) += snd-soc-omap-mcpdm.o
+
+# Machine drivers
+snd-soc-davinci-evm-objs := davinci-evm.o
+snd-soc-n810-objs := n810.o
+snd-soc-rx51-objs := rx51.o
+snd-soc-omap3pandora-objs := omap3pandora.o
+snd-soc-omap-twl4030-objs := omap-twl4030.o
+snd-soc-omap-abe-twl6040-objs := omap-abe-twl6040.o
+snd-soc-ams-delta-objs := ams-delta.o
+snd-soc-omap-hdmi-objs := omap-hdmi.o
+snd-soc-osk5912-objs := osk5912.o
+
+obj-$(CONFIG_SND_SOC_DAVINCI_EVM) += snd-soc-davinci-evm.o
+obj-$(CONFIG_SND_SOC_NOKIA_N810) += snd-soc-n810.o
+obj-$(CONFIG_SND_SOC_NOKIA_RX51) += snd-soc-rx51.o
+obj-$(CONFIG_SND_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
+obj-$(CONFIG_SND_SOC_OMAP3_TWL4030) += snd-soc-omap-twl4030.o
+obj-$(CONFIG_SND_SOC_OMAP_ABE_TWL6040) += snd-soc-omap-abe-twl6040.o
+obj-$(CONFIG_SND_SOC_OMAP_AMS_DELTA) += snd-soc-ams-delta.o
+obj-$(CONFIG_SND_SOC_OMAP_HDMI) += snd-soc-omap-hdmi.o
+obj-$(CONFIG_SND_SOC_OMAP_OSK5912) += snd-soc-osk5912.o
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/ti/ams-delta.c
index 4dce494dfbd3..4dce494dfbd3 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/ti/davinci-evm.c
index 7a369e0f2093..4869d6311510 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/ti/davinci-evm.c
@@ -170,7 +170,7 @@ static struct snd_soc_dai_link dm355_evm_dai = {
};
static struct snd_soc_dai_link dm365_evm_dai = {
-#ifdef CONFIG_SND_DM365_AIC3X_CODEC
+#ifdef CONFIG_SND_SOC_DM365_AIC3X_CODEC
.name = "TLV320AIC3X",
.stream_name = "AIC3X",
.cpu_dai_name = "davinci-mcbsp",
@@ -181,7 +181,7 @@ static struct snd_soc_dai_link dm365_evm_dai = {
.ops = &evm_ops,
.dai_fmt = SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_CBM_CFM |
SND_SOC_DAIFMT_IB_NF,
-#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
+#elif defined(CONFIG_SND_SOC_DM365_VOICE_CODEC)
.name = "Voice Codec - CQ93VC",
.stream_name = "CQ93",
.cpu_dai_name = "davinci-vcif",
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c
index a3206e65e5e5..a3206e65e5e5 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/ti/davinci-i2s.c
diff --git a/sound/soc/davinci/davinci-i2s.h b/sound/soc/ti/davinci-i2s.h
index 48dac3e2521a..48dac3e2521a 100644
--- a/sound/soc/davinci/davinci-i2s.h
+++ b/sound/soc/ti/davinci-i2s.h
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/ti/davinci-mcasp.c
index 267aee776b2d..eeda6d5565bc 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/ti/davinci-mcasp.c
@@ -28,6 +28,7 @@
#include <linux/of_device.h>
#include <linux/platform_data/davinci_asp.h>
#include <linux/math64.h>
+#include <linux/bitmap.h>
#include <sound/asoundef.h>
#include <sound/core.h>
@@ -38,7 +39,7 @@
#include <sound/dmaengine_pcm.h>
#include "edma-pcm.h"
-#include "../omap/sdma-pcm.h"
+#include "sdma-pcm.h"
#include "davinci-mcasp.h"
#define MCASP_MAX_AFIFO_DEPTH 64
@@ -84,6 +85,7 @@ struct davinci_mcasp {
u32 tdm_mask[2];
int slot_width;
u8 op_mode;
+ u8 dismod;
u8 num_serializer;
u8 *serial_dir;
u8 version;
@@ -95,6 +97,8 @@ struct davinci_mcasp {
int sysclk_freq;
bool bclk_master;
+ unsigned long pdir; /* Pin direction bitfield */
+
/* McASP FIFO related */
u8 txnumevt;
u8 rxnumevt;
@@ -169,6 +173,30 @@ static bool mcasp_is_synchronous(struct davinci_mcasp *mcasp)
return !(aclkxctl & TX_ASYNC) && rxfmctl & AFSRE;
}
+static inline void mcasp_set_clk_pdir(struct davinci_mcasp *mcasp, bool enable)
+{
+ u32 bit = PIN_BIT_AMUTE;
+
+ for_each_set_bit_from(bit, &mcasp->pdir, PIN_BIT_AFSR + 1) {
+ if (enable)
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
+ else
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
+ }
+}
+
+static inline void mcasp_set_axr_pdir(struct davinci_mcasp *mcasp, bool enable)
+{
+ u32 bit;
+
+ for_each_set_bit(bit, &mcasp->pdir, PIN_BIT_AFSR) {
+ if (enable)
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
+ else
+ mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, BIT(bit));
+ }
+}
+
static void mcasp_start_rx(struct davinci_mcasp *mcasp)
{
if (mcasp->rxnumevt) { /* enable FIFO */
@@ -192,6 +220,7 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
}
/* Activate serializer(s) */
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
/* Release RX state machine */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
@@ -219,7 +248,10 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
/* Start clocks */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
+ mcasp_set_clk_pdir(mcasp, true);
+
/* Activate serializer(s) */
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
/* wait for XDATA to be cleared */
@@ -228,6 +260,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
(cnt < 100000))
cnt++;
+ mcasp_set_axr_pdir(mcasp, true);
+
/* Release TX state machine */
mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
/* Release Frame Sync generator */
@@ -258,8 +292,10 @@ static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
* In synchronous mode stop the TX clocks if no other stream is
* running
*/
- if (mcasp_is_synchronous(mcasp) && !mcasp->streams)
+ if (mcasp_is_synchronous(mcasp) && !mcasp->streams) {
+ mcasp_set_clk_pdir(mcasp, false);
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, 0);
+ }
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, 0);
mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
@@ -285,6 +321,9 @@ static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
*/
if (mcasp_is_synchronous(mcasp) && mcasp->streams)
val = TXHCLKRST | TXCLKRST | TXFSRST;
+ else
+ mcasp_set_clk_pdir(mcasp, false);
+
mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, val);
mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
@@ -294,6 +333,8 @@ static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
}
+
+ mcasp_set_axr_pdir(mcasp, false);
}
static void davinci_mcasp_stop(struct davinci_mcasp *mcasp, int stream)
@@ -444,8 +485,13 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, ACLKX | ACLKR);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AFSX | AFSR);
+ /* BCLK */
+ set_bit(PIN_BIT_ACLKX, &mcasp->pdir);
+ set_bit(PIN_BIT_ACLKR, &mcasp->pdir);
+ /* Frame Sync */
+ set_bit(PIN_BIT_AFSX, &mcasp->pdir);
+ set_bit(PIN_BIT_AFSR, &mcasp->pdir);
+
mcasp->bclk_master = 1;
break;
case SND_SOC_DAIFMT_CBS_CFM:
@@ -456,8 +502,13 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, ACLKX | ACLKR);
- mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AFSX | AFSR);
+ /* BCLK */
+ set_bit(PIN_BIT_ACLKX, &mcasp->pdir);
+ set_bit(PIN_BIT_ACLKR, &mcasp->pdir);
+ /* Frame Sync */
+ clear_bit(PIN_BIT_AFSX, &mcasp->pdir);
+ clear_bit(PIN_BIT_AFSR, &mcasp->pdir);
+
mcasp->bclk_master = 1;
break;
case SND_SOC_DAIFMT_CBM_CFS:
@@ -468,8 +519,13 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
- mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, ACLKX | ACLKR);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AFSX | AFSR);
+ /* BCLK */
+ clear_bit(PIN_BIT_ACLKX, &mcasp->pdir);
+ clear_bit(PIN_BIT_ACLKR, &mcasp->pdir);
+ /* Frame Sync */
+ set_bit(PIN_BIT_AFSX, &mcasp->pdir);
+ set_bit(PIN_BIT_AFSR, &mcasp->pdir);
+
mcasp->bclk_master = 0;
break;
case SND_SOC_DAIFMT_CBM_CFM:
@@ -480,8 +536,13 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
- mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG,
- ACLKX | AFSX | ACLKR | AHCLKR | AFSR);
+ /* BCLK */
+ clear_bit(PIN_BIT_ACLKX, &mcasp->pdir);
+ clear_bit(PIN_BIT_ACLKR, &mcasp->pdir);
+ /* Frame Sync */
+ clear_bit(PIN_BIT_AFSX, &mcasp->pdir);
+ clear_bit(PIN_BIT_AFSR, &mcasp->pdir);
+
mcasp->bclk_master = 0;
break;
default:
@@ -596,11 +657,11 @@ static int davinci_mcasp_set_sysclk(struct snd_soc_dai *dai, int clk_id,
if (dir == SND_SOC_CLOCK_OUT) {
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE);
mcasp_set_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AHCLKX);
+ set_bit(PIN_BIT_AHCLKX, &mcasp->pdir);
} else {
mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKXCTL_REG, AHCLKXE);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_AHCLKRCTL_REG, AHCLKRE);
- mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AHCLKX);
+ clear_bit(PIN_BIT_AHCLKX, &mcasp->pdir);
}
mcasp->sysclk_freq = freq;
@@ -773,17 +834,23 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
mcasp->serial_dir[i]);
if (mcasp->serial_dir[i] == TX_MODE &&
tx_ser < max_active_serializers) {
- mcasp_set_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AXR(i));
mcasp_mod_bits(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
- DISMOD_LOW, DISMOD_MASK);
+ mcasp->dismod, DISMOD_MASK);
+ set_bit(PIN_BIT_AXR(i), &mcasp->pdir);
tx_ser++;
} else if (mcasp->serial_dir[i] == RX_MODE &&
rx_ser < max_active_serializers) {
- mcasp_clr_bits(mcasp, DAVINCI_MCASP_PDIR_REG, AXR(i));
+ clear_bit(PIN_BIT_AXR(i), &mcasp->pdir);
rx_ser++;
} else if (mcasp->serial_dir[i] == INACTIVE_MODE) {
mcasp_mod_bits(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
SRMOD_INACTIVE, SRMOD_MASK);
+ clear_bit(PIN_BIT_AXR(i), &mcasp->pdir);
+ } else if (mcasp->serial_dir[i] == TX_MODE) {
+ /* Unused TX pins, clear PDIR */
+ mcasp_mod_bits(mcasp, DAVINCI_MCASP_XRSRCTL_REG(i),
+ mcasp->dismod, DISMOD_MASK);
+ clear_bit(PIN_BIT_AXR(i), &mcasp->pdir);
}
}
@@ -1645,6 +1712,7 @@ static struct davinci_mcasp_pdata *davinci_mcasp_set_pdata_from_of(
if (pdev->dev.platform_data) {
pdata = pdev->dev.platform_data;
+ pdata->dismod = DISMOD_LOW;
return pdata;
} else if (match) {
pdata = devm_kmemdup(&pdev->dev, match->data, sizeof(*pdata),
@@ -1734,6 +1802,18 @@ static struct davinci_mcasp_pdata *davinci_mcasp_set_pdata_from_of(
if (ret >= 0)
pdata->sram_size_capture = val;
+ ret = of_property_read_u32(np, "dismod", &val);
+ if (ret >= 0) {
+ if (val == 0 || val == 2 || val == 3) {
+ pdata->dismod = DISMOD_VAL(val);
+ } else {
+ dev_warn(&pdev->dev, "Invalid dismod value: %u\n", val);
+ pdata->dismod = DISMOD_LOW;
+ }
+ } else {
+ pdata->dismod = DISMOD_LOW;
+ }
+
return pdata;
nodata:
@@ -1909,6 +1989,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
mcasp->version = pdata->version;
mcasp->txnumevt = pdata->txnumevt;
mcasp->rxnumevt = pdata->rxnumevt;
+ mcasp->dismod = pdata->dismod;
mcasp->dev = &pdev->dev;
@@ -2068,9 +2149,9 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
ret = davinci_mcasp_get_dma_type(mcasp);
switch (ret) {
case PCM_EDMA:
-#if IS_BUILTIN(CONFIG_SND_EDMA_SOC) || \
- (IS_MODULE(CONFIG_SND_DAVINCI_SOC_MCASP) && \
- IS_MODULE(CONFIG_SND_EDMA_SOC))
+#if IS_BUILTIN(CONFIG_SND_SOC_TI_EDMA_PCM) || \
+ (IS_MODULE(CONFIG_SND_SOC_DAVINCI_MCASP) && \
+ IS_MODULE(CONFIG_SND_SOC_TI_EDMA_PCM))
ret = edma_pcm_platform_register(&pdev->dev);
#else
dev_err(&pdev->dev, "Missing SND_EDMA_SOC\n");
@@ -2079,9 +2160,9 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
#endif
break;
case PCM_SDMA:
-#if IS_BUILTIN(CONFIG_SND_SDMA_SOC) || \
- (IS_MODULE(CONFIG_SND_DAVINCI_SOC_MCASP) && \
- IS_MODULE(CONFIG_SND_SDMA_SOC))
+#if IS_BUILTIN(CONFIG_SND_SOC_TI_SDMA_PCM) || \
+ (IS_MODULE(CONFIG_SND_SOC_DAVINCI_MCASP) && \
+ IS_MODULE(CONFIG_SND_SOC_TI_SDMA_PCM))
ret = sdma_pcm_platform_register(&pdev->dev, NULL, NULL);
#else
dev_err(&pdev->dev, "Missing SND_SDMA_SOC\n");
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/ti/davinci-mcasp.h
index afddc8010c54..5e4060d8fe56 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/ti/davinci-mcasp.h
@@ -108,27 +108,18 @@
/*
* DAVINCI_MCASP_PFUNC_REG - Pin Function / GPIO Enable Register Bits
- */
-#define AXR(n) (1<<n)
-#define PFUNC_AMUTE BIT(25)
-#define ACLKX BIT(26)
-#define AHCLKX BIT(27)
-#define AFSX BIT(28)
-#define ACLKR BIT(29)
-#define AHCLKR BIT(30)
-#define AFSR BIT(31)
-
-/*
* DAVINCI_MCASP_PDIR_REG - Pin Direction Register Bits
+ * DAVINCI_MCASP_PDOUT_REG - Pin output in GPIO mode
+ * DAVINCI_MCASP_PDSET_REG - Pin input in GPIO mode
*/
-#define AXR(n) (1<<n)
-#define PDIR_AMUTE BIT(25)
-#define ACLKX BIT(26)
-#define AHCLKX BIT(27)
-#define AFSX BIT(28)
-#define ACLKR BIT(29)
-#define AHCLKR BIT(30)
-#define AFSR BIT(31)
+#define PIN_BIT_AXR(n) (n)
+#define PIN_BIT_AMUTE 25
+#define PIN_BIT_ACLKX 26
+#define PIN_BIT_AHCLKX 27
+#define PIN_BIT_AFSX 28
+#define PIN_BIT_ACLKR 29
+#define PIN_BIT_AHCLKR 30
+#define PIN_BIT_AFSR 31
/*
* DAVINCI_MCASP_TXDITCTL_REG - Transmit DIT Control Register Bits
@@ -218,6 +209,7 @@
#define DISMOD_3STATE (0x0)
#define DISMOD_LOW (0x2 << 2)
#define DISMOD_HIGH (0x3 << 2)
+#define DISMOD_VAL(x) ((x) << 2)
#define DISMOD_MASK DISMOD_HIGH
#define TXSTATE BIT(4)
#define RXSTATE BIT(5)
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/ti/davinci-vcif.c
index 5415b72393fa..5415b72393fa 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/ti/davinci-vcif.c
diff --git a/sound/soc/davinci/edma-pcm.c b/sound/soc/ti/edma-pcm.c
index 59e588abe54b..59e588abe54b 100644
--- a/sound/soc/davinci/edma-pcm.c
+++ b/sound/soc/ti/edma-pcm.c
diff --git a/sound/soc/davinci/edma-pcm.h b/sound/soc/ti/edma-pcm.h
index b0957744851c..8058bdb0f032 100644
--- a/sound/soc/davinci/edma-pcm.h
+++ b/sound/soc/ti/edma-pcm.h
@@ -20,13 +20,13 @@
#ifndef __EDMA_PCM_H__
#define __EDMA_PCM_H__
-#if IS_ENABLED(CONFIG_SND_EDMA_SOC)
+#if IS_ENABLED(CONFIG_SND_SOC_TI_EDMA_PCM)
int edma_pcm_platform_register(struct device *dev);
#else
static inline int edma_pcm_platform_register(struct device *dev)
{
return 0;
}
-#endif /* CONFIG_SND_EDMA_SOC */
+#endif /* CONFIG_SND_SOC_TI_EDMA_PCM */
#endif /* __EDMA_PCM_H__ */
diff --git a/sound/soc/omap/n810.c b/sound/soc/ti/n810.c
index 9cfefe44a75f..9cfefe44a75f 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/ti/n810.c
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/ti/omap-abe-twl6040.c
index d5ae9eb8c756..fed45b41f9d3 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/ti/omap-abe-twl6040.c
@@ -36,6 +36,8 @@
#include "../codecs/twl6040.h"
struct abe_twl6040 {
+ struct snd_soc_card card;
+ struct snd_soc_dai_link dai_links[2];
int jack_detection; /* board can detect jack events */
int mclk_freq; /* MCLK frequency speed for twl6040 */
};
@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
ARRAY_SIZE(dmic_audio_map));
}
-/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
- {
- .name = "TWL6040",
- .stream_name = "TWL6040",
- .codec_dai_name = "twl6040-legacy",
- .codec_name = "twl6040-codec",
- .init = omap_abe_twl6040_init,
- .ops = &omap_abe_ops,
- },
- {
- .name = "DMIC",
- .stream_name = "DMIC Capture",
- .codec_dai_name = "dmic-hifi",
- .codec_name = "dmic-codec",
- .init = omap_abe_dmic_init,
- .ops = &omap_abe_dmic_ops,
- },
-};
-
-/* Audio machine driver */
-static struct snd_soc_card omap_abe_card = {
- .owner = THIS_MODULE,
-
- .dapm_widgets = twl6040_dapm_widgets,
- .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
- .dapm_routes = audio_map,
- .num_dapm_routes = ARRAY_SIZE(audio_map),
-};
-
static int omap_abe_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct snd_soc_card *card = &omap_abe_card;
+ struct snd_soc_card *card;
struct device_node *dai_node;
struct abe_twl6040 *priv;
int num_links = 0;
@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
return -ENODEV;
}
- card->dev = &pdev->dev;
-
priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
if (priv == NULL)
return -ENOMEM;
+ card = &priv->card;
+ card->dev = &pdev->dev;
+ card->owner = THIS_MODULE;
+ card->dapm_widgets = twl6040_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
+ card->dapm_routes = audio_map;
+ card->num_dapm_routes = ARRAY_SIZE(audio_map);
+
if (snd_soc_of_parse_card_name(card, "ti,model")) {
dev_err(&pdev->dev, "Card name is not provided\n");
return -ENODEV;
@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "McPDM node is not provided\n");
return -EINVAL;
}
- abe_twl6040_dai_links[0].cpu_of_node = dai_node;
- abe_twl6040_dai_links[0].platform_of_node = dai_node;
+
+ priv->dai_links[0].name = "DMIC";
+ priv->dai_links[0].stream_name = "TWL6040";
+ priv->dai_links[0].cpu_of_node = dai_node;
+ priv->dai_links[0].platform_of_node = dai_node;
+ priv->dai_links[0].codec_dai_name = "twl6040-legacy";
+ priv->dai_links[0].codec_name = "twl6040-codec";
+ priv->dai_links[0].init = omap_abe_twl6040_init;
+ priv->dai_links[0].ops = &omap_abe_ops;
dai_node = of_parse_phandle(node, "ti,dmic", 0);
if (dai_node) {
num_links = 2;
- abe_twl6040_dai_links[1].cpu_of_node = dai_node;
- abe_twl6040_dai_links[1].platform_of_node = dai_node;
+ priv->dai_links[1].name = "TWL6040";
+ priv->dai_links[1].stream_name = "DMIC Capture";
+ priv->dai_links[1].cpu_of_node = dai_node;
+ priv->dai_links[1].platform_of_node = dai_node;
+ priv->dai_links[1].codec_dai_name = "dmic-hifi";
+ priv->dai_links[1].codec_name = "dmic-codec";
+ priv->dai_links[1].init = omap_abe_dmic_init;
+ priv->dai_links[1].ops = &omap_abe_dmic_ops;
} else {
num_links = 1;
}
@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
return -ENODEV;
}
- card->dai_link = abe_twl6040_dai_links;
+ card->dai_link = priv->dai_links;
card->num_links = num_links;
snd_soc_card_set_drvdata(card, priv);
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/ti/omap-dmic.c
index fe966272bd0c..cba9645b6487 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/ti/omap-dmic.c
@@ -48,6 +48,8 @@ struct omap_dmic {
struct device *dev;
void __iomem *io_base;
struct clk *fclk;
+ struct pm_qos_request pm_qos_req;
+ int latency;
int fclk_freq;
int out_freq;
int clk_div;
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
mutex_lock(&dmic->mutex);
+ pm_qos_remove_request(&dmic->pm_qos_req);
+
if (!dai->active)
dmic->active = 0;
@@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
/* packet size is threshold * channels */
dma_data = snd_soc_dai_get_dma_data(dai, substream);
dma_data->maxburst = dmic->threshold * channels;
+ dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
+ params_rate(params);
return 0;
}
@@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
u32 ctrl;
+ if (pm_qos_request_active(&dmic->pm_qos_req))
+ pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
+
/* Configure uplink threshold */
omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
diff --git a/sound/soc/omap/omap-dmic.h b/sound/soc/ti/omap-dmic.h
index 231e728bff0e..231e728bff0e 100644
--- a/sound/soc/omap/omap-dmic.h
+++ b/sound/soc/ti/omap-dmic.h
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/ti/omap-hdmi.c
index 673a9eb153b2..673a9eb153b2 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/ti/omap-hdmi.c
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/ti/omap-mcbsp-priv.h
index 46ae1269a698..7865cda4bf0a 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/ti/omap-mcbsp-priv.h
@@ -1,28 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * sound/soc/omap/mcbsp.h
- *
* OMAP Multi-Channel Buffered Serial Port
*
* Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
* Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-#ifndef __ASOC_MCBSP_H
-#define __ASOC_MCBSP_H
+
+#ifndef __OMAP_MCBSP_PRIV_H__
+#define __OMAP_MCBSP_PRIV_H__
+
+#include <linux/platform_data/asoc-ti-mcbsp.h>
#ifdef CONFIG_ARCH_OMAP1
#define mcbsp_omap1() 1
@@ -30,8 +17,6 @@
#define mcbsp_omap1() 0
#endif
-#include <sound/dmaengine_pcm.h>
-
/* McBSP register numbers. Register address offset = num * reg_step */
enum {
/* Common registers */
@@ -85,15 +70,6 @@ enum {
OMAP_MCBSP_REG_SSELCR,
};
-/* OMAP3 sidetone control registers */
-#define OMAP_ST_REG_REV 0x00
-#define OMAP_ST_REG_SYSCONFIG 0x10
-#define OMAP_ST_REG_IRQSTATUS 0x18
-#define OMAP_ST_REG_IRQENABLE 0x1C
-#define OMAP_ST_REG_SGAINCR 0x24
-#define OMAP_ST_REG_SFIRCR 0x28
-#define OMAP_ST_REG_SSELCR 0x2C
-
/************************** McBSP SPCR1 bit definitions ***********************/
#define RRST BIT(0)
#define RRDY BIT(1)
@@ -202,24 +178,6 @@ enum {
#define SIDLEMODE(value) (((value) & 0x3) << 3)
#define CLOCKACTIVITY(value) (((value) & 0x3) << 8)
-/********************** McBSP SSELCR bit definitions ***********************/
-#define SIDETONEEN BIT(10)
-
-/********************** McBSP Sidetone SYSCONFIG bit definitions ***********/
-#define ST_AUTOIDLE BIT(0)
-
-/********************** McBSP Sidetone SGAINCR bit definitions *************/
-#define ST_CH0GAIN(value) ((value) & 0xffff) /* Bits 0:15 */
-#define ST_CH1GAIN(value) (((value) & 0xffff) << 16) /* Bits 16:31 */
-
-/********************** McBSP Sidetone SFIRCR bit definitions **************/
-#define ST_FIRCOEFF(value) ((value) & 0xffff) /* Bits 0:15 */
-
-/********************** McBSP Sidetone SSELCR bit definitions **************/
-#define ST_SIDETONEEN BIT(0)
-#define ST_COEFFWREN BIT(1)
-#define ST_COEFFWRDONE BIT(2)
-
/********************** McBSP DMA operating modes **************************/
#define MCBSP_DMA_MODE_ELEMENT 0
#define MCBSP_DMA_MODE_THRESHOLD 1
@@ -278,16 +236,7 @@ struct omap_mcbsp_reg_cfg {
u16 rccr;
};
-struct omap_mcbsp_st_data {
- void __iomem *io_base_st;
- struct clk *mcbsp_iclk;
- bool running;
- bool enabled;
- s16 taps[128]; /* Sidetone filter coefficients */
- int nr_taps; /* Number of filter coefficients in use */
- s16 ch0gain;
- s16 ch1gain;
-};
+struct omap_mcbsp_st_data;
struct omap_mcbsp {
struct device *dev;
@@ -330,29 +279,46 @@ struct omap_mcbsp {
struct pm_qos_request pm_qos_req;
};
-void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
- const struct omap_mcbsp_reg_cfg *config);
-void omap_mcbsp_set_tx_threshold(struct omap_mcbsp *mcbsp, u16 threshold);
-void omap_mcbsp_set_rx_threshold(struct omap_mcbsp *mcbsp, u16 threshold);
-u16 omap_mcbsp_get_tx_delay(struct omap_mcbsp *mcbsp);
-u16 omap_mcbsp_get_rx_delay(struct omap_mcbsp *mcbsp);
-int omap_mcbsp_get_dma_op_mode(struct omap_mcbsp *mcbsp);
-int omap_mcbsp_request(struct omap_mcbsp *mcbsp);
-void omap_mcbsp_free(struct omap_mcbsp *mcbsp);
-void omap_mcbsp_start(struct omap_mcbsp *mcbsp, int tx, int rx);
-void omap_mcbsp_stop(struct omap_mcbsp *mcbsp, int tx, int rx);
-
-/* McBSP functional clock source changing function */
-int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id);
+static inline void omap_mcbsp_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
+{
+ void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step;
+
+ if (mcbsp->pdata->reg_size == 2) {
+ ((u16 *)mcbsp->reg_cache)[reg] = (u16)val;
+ writew_relaxed((u16)val, addr);
+ } else {
+ ((u32 *)mcbsp->reg_cache)[reg] = val;
+ writel_relaxed(val, addr);
+ }
+}
+
+static inline int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg,
+ bool from_cache)
+{
+ void __iomem *addr = mcbsp->io_base + reg * mcbsp->pdata->reg_step;
+
+ if (mcbsp->pdata->reg_size == 2) {
+ return !from_cache ? readw_relaxed(addr) :
+ ((u16 *)mcbsp->reg_cache)[reg];
+ } else {
+ return !from_cache ? readl_relaxed(addr) :
+ ((u32 *)mcbsp->reg_cache)[reg];
+ }
+}
+
+#define MCBSP_READ(mcbsp, reg) \
+ omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 0)
+#define MCBSP_WRITE(mcbsp, reg, val) \
+ omap_mcbsp_write(mcbsp, OMAP_MCBSP_REG_##reg, val)
+#define MCBSP_READ_CACHE(mcbsp, reg) \
+ omap_mcbsp_read(mcbsp, OMAP_MCBSP_REG_##reg, 1)
+
/* Sidetone specific API */
-int omap_st_set_chgain(struct omap_mcbsp *mcbsp, int channel, s16 chgain);
-int omap_st_get_chgain(struct omap_mcbsp *mcbsp, int channel, s16 *chgain);
-int omap_st_enable(struct omap_mcbsp *mcbsp);
-int omap_st_disable(struct omap_mcbsp *mcbsp);
-int omap_st_is_enabled(struct omap_mcbsp *mcbsp);
+int omap_mcbsp_st_init(struct platform_device *pdev);
+void omap_mcbsp_st_cleanup(struct platform_device *pdev);
-int omap_mcbsp_init(struct platform_device *pdev);
-void omap_mcbsp_cleanup(struct omap_mcbsp *mcbsp);
+int omap_mcbsp_st_start(struct omap_mcbsp *mcbsp);
+int omap_mcbsp_st_stop(struct omap_mcbsp *mcbsp);
-#endif /* __ASOC_MCBSP_H */
+#endif /* __OMAP_MCBSP_PRIV_H__ */
diff --git a/sound/soc/ti/omap-mcbsp-st.c b/sound/soc/ti/omap-mcbsp-st.c
new file mode 100644
index 000000000000..1a3fe854e856
--- /dev/null
+++ b/sound/soc/ti/omap-mcbsp-st.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * McBSP Sidetone support
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com>
+ *
+ * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com>
+ * Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+
+#include "omap-mcbsp.h"
+#include "omap-mcbsp-priv.h"
+
+/* OMAP3 sidetone control registers */
+#define OMAP_ST_REG_REV 0x00
+#define OMAP_ST_REG_SYSCONFIG 0x10
+#define OMAP_ST_REG_IRQSTATUS 0x18
+#define OMAP_ST_REG_IRQENABLE 0x1C
+#define OMAP_ST_REG_SGAINCR 0x24
+#define OMAP_ST_REG_SFIRCR 0x28
+#define OMAP_ST_REG_SSELCR 0x2C
+
+/********************** McBSP SSELCR bit definitions ***********************/
+#define SIDETONEEN BIT(10)
+
+/********************** McBSP Sidetone SYSCONFIG bit definitions ***********/
+#define ST_AUTOIDLE BIT(0)
+
+/********************** McBSP Sidetone SGAINCR bit definitions *************/
+#define ST_CH0GAIN(value) ((value) & 0xffff) /* Bits 0:15 */
+#define ST_CH1GAIN(value) (((value) & 0xffff) << 16) /* Bits 16:31 */
+
+/********************** McBSP Sidetone SFIRCR bit definitions **************/
+#define ST_FIRCOEFF(value) ((value) & 0xffff) /* Bits 0:15 */
+
+/********************** McBSP Sidetone SSELCR bit definitions **************/
+#define ST_SIDETONEEN BIT(0)
+#define ST_COEFFWREN BIT(1)
+#define ST_COEFFWRDONE BIT(2)
+
+struct omap_mcbsp_st_data {
+ void __iomem *io_base_st;
+ struct clk *mcbsp_iclk;
+ bool running;
+ bool enabled;
+ s16 taps[128]; /* Sidetone filter coefficients */
+ int nr_taps; /* Number of filter coefficients in use */
+ s16 ch0gain;
+ s16 ch1gain;
+};
+
+static void omap_mcbsp_st_write(struct omap_mcbsp *mcbsp, u16 reg, u32 val)
+{
+ writel_relaxed(val, mcbsp->st_data->io_base_st + reg);
+}
+
+static int omap_mcbsp_st_read(struct omap_mcbsp *mcbsp, u16 reg)
+{
+ return readl_relaxed(mcbsp->st_data->io_base_st + reg);
+}
+
+#define MCBSP_ST_READ(mcbsp, reg) omap_mcbsp_st_read(mcbsp, OMAP_ST_REG_##reg)
+#define MCBSP_ST_WRITE(mcbsp, reg, val) \
+ omap_mcbsp_st_write(mcbsp, OMAP_ST_REG_##reg, val)
+
+static void omap_mcbsp_st_on(struct omap_mcbsp *mcbsp)
+{
+ unsigned int w;
+
+ if (mcbsp->pdata->force_ick_on)
+ mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, true);
+
+ /* Disable Sidetone clock auto-gating for normal operation */
+ w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
+ MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
+
+ /* Enable McBSP Sidetone */
+ w = MCBSP_READ(mcbsp, SSELCR);
+ MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
+
+ /* Enable Sidetone from Sidetone Core */
+ w = MCBSP_ST_READ(mcbsp, SSELCR);
+ MCBSP_ST_WRITE(mcbsp, SSELCR, w | ST_SIDETONEEN);
+}
+
+static void omap_mcbsp_st_off(struct omap_mcbsp *mcbsp)
+{
+ unsigned int w;
+
+ w = MCBSP_ST_READ(mcbsp, SSELCR);
+ MCBSP_ST_WRITE(mcbsp, SSELCR, w & ~(ST_SIDETONEEN));
+
+ w = MCBSP_READ(mcbsp, SSELCR);
+ MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
+
+ /* Enable Sidetone clock auto-gating to reduce power consumption */
+ w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
+ MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
+
+ if (mcbsp->pdata->force_ick_on)
+ mcbsp->pdata->force_ick_on(mcbsp->st_data->mcbsp_iclk, false);
+}
+
+static void omap_mcbsp_st_fir_write(struct omap_mcbsp *mcbsp, s16 *fir)
+{
+ u16 val, i;
+
+ val = MCBSP_ST_READ(mcbsp, SSELCR);
+
+ if (val & ST_COEFFWREN)
+ MCBSP_ST_WRITE(mcbsp, SSELCR, val & ~(ST_COEFFWREN));
+
+ MCBSP_ST_WRITE(mcbsp, SSELCR, val | ST_COEFFWREN);
+
+ for (i = 0; i < 128; i++)
+ MCBSP_ST_WRITE(mcbsp, SFIRCR, fir[i]);
+
+ i = 0;
+
+ val = MCBSP_ST_READ(mcbsp, SSELCR);
+ while (!(val & ST_COEFFWRDONE) && (++i < 1000))
+ val = MCBSP_ST_READ(mcbsp, SSELCR);
+
+ MCBSP_ST_WRITE(mcbsp, SSELCR, val & ~(ST_COEFFWREN));
+
+ if (i == 1000)
+ dev_err(mcbsp->dev, "McBSP FIR load error!\n");
+}
+
+static void omap_mcbsp_st_chgain(struct omap_mcbsp *mcbsp)
+{
+ u16 w;
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+
+ w = MCBSP_ST_READ(mcbsp, SSELCR);
+
+ MCBSP_ST_WRITE(mcbsp, SGAINCR, ST_CH0GAIN(st_data->ch0gain) |
+ ST_CH1GAIN(st_data->ch1gain));
+}
+
+static int omap_mcbsp_st_set_chgain(struct omap_mcbsp *mcbsp, int channel,
+ s16 chgain)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+ int ret = 0;
+
+ if (!st_data)
+ return -ENOENT;
+
+ spin_lock_irq(&mcbsp->lock);
+ if (channel == 0)
+ st_data->ch0gain = chgain;
+ else if (channel == 1)
+ st_data->ch1gain = chgain;
+ else
+ ret = -EINVAL;
+
+ if (st_data->enabled)
+ omap_mcbsp_st_chgain(mcbsp);
+ spin_unlock_irq(&mcbsp->lock);
+
+ return ret;
+}
+
+static int omap_mcbsp_st_get_chgain(struct omap_mcbsp *mcbsp, int channel,
+ s16 *chgain)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+ int ret = 0;
+
+ if (!st_data)
+ return -ENOENT;
+
+ spin_lock_irq(&mcbsp->lock);
+ if (channel == 0)
+ *chgain = st_data->ch0gain;
+ else if (channel == 1)
+ *chgain = st_data->ch1gain;
+ else
+ ret = -EINVAL;
+ spin_unlock_irq(&mcbsp->lock);
+
+ return ret;
+}
+
+static int omap_mcbsp_st_enable(struct omap_mcbsp *mcbsp)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+
+ if (!st_data)
+ return -ENODEV;
+
+ spin_lock_irq(&mcbsp->lock);
+ st_data->enabled = 1;
+ omap_mcbsp_st_start(mcbsp);
+ spin_unlock_irq(&mcbsp->lock);
+
+ return 0;
+}
+
+static int omap_mcbsp_st_disable(struct omap_mcbsp *mcbsp)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+ int ret = 0;
+
+ if (!st_data)
+ return -ENODEV;
+
+ spin_lock_irq(&mcbsp->lock);
+ omap_mcbsp_st_stop(mcbsp);
+ st_data->enabled = 0;
+ spin_unlock_irq(&mcbsp->lock);
+
+ return ret;
+}
+
+static int omap_mcbsp_st_is_enabled(struct omap_mcbsp *mcbsp)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+
+ if (!st_data)
+ return -ENODEV;
+
+ return st_data->enabled;
+}
+
+static ssize_t st_taps_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+ ssize_t status = 0;
+ int i;
+
+ spin_lock_irq(&mcbsp->lock);
+ for (i = 0; i < st_data->nr_taps; i++)
+ status += sprintf(&buf[status], (i ? ", %d" : "%d"),
+ st_data->taps[i]);
+ if (i)
+ status += sprintf(&buf[status], "\n");
+ spin_unlock_irq(&mcbsp->lock);
+
+ return status;
+}
+
+static ssize_t st_taps_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+ int val, tmp, status, i = 0;
+
+ spin_lock_irq(&mcbsp->lock);
+ memset(st_data->taps, 0, sizeof(st_data->taps));
+ st_data->nr_taps = 0;
+
+ do {
+ status = sscanf(buf, "%d%n", &val, &tmp);
+ if (status < 0 || status == 0) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (val < -32768 || val > 32767) {
+ size = -EINVAL;
+ goto out;
+ }
+ st_data->taps[i++] = val;
+ buf += tmp;
+ if (*buf != ',')
+ break;
+ buf++;
+ } while (1);
+
+ st_data->nr_taps = i;
+
+out:
+ spin_unlock_irq(&mcbsp->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(st_taps);
+
+static const struct attribute *sidetone_attrs[] = {
+ &dev_attr_st_taps.attr,
+ NULL,
+};
+
+static const struct attribute_group sidetone_attr_group = {
+ .attrs = (struct attribute **)sidetone_attrs,
+};
+
+int omap_mcbsp_st_start(struct omap_mcbsp *mcbsp)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+
+ if (st_data->enabled && !st_data->running) {
+ omap_mcbsp_st_fir_write(mcbsp, st_data->taps);
+ omap_mcbsp_st_chgain(mcbsp);
+
+ if (!mcbsp->free) {
+ omap_mcbsp_st_on(mcbsp);
+ st_data->running = 1;
+ }
+ }
+
+ return 0;
+}
+
+int omap_mcbsp_st_stop(struct omap_mcbsp *mcbsp)
+{
+ struct omap_mcbsp_st_data *st_data = mcbsp->st_data;
+
+ if (st_data->running) {
+ if (!mcbsp->free) {
+ omap_mcbsp_st_off(mcbsp);
+ st_data->running = 0;
+ }
+ }
+
+ return 0;
+}
+
+int omap_mcbsp_st_init(struct platform_device *pdev)
+{
+ struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
+ struct omap_mcbsp_st_data *st_data;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sidetone");
+ if (!res)
+ return 0;
+
+ st_data = devm_kzalloc(mcbsp->dev, sizeof(*mcbsp->st_data), GFP_KERNEL);
+ if (!st_data)
+ return -ENOMEM;
+
+ st_data->mcbsp_iclk = clk_get(mcbsp->dev, "ick");
+ if (IS_ERR(st_data->mcbsp_iclk)) {
+ dev_warn(mcbsp->dev,
+ "Failed to get ick, sidetone might be broken\n");
+ st_data->mcbsp_iclk = NULL;
+ }
+
+ st_data->io_base_st = devm_ioremap(mcbsp->dev, res->start,
+ resource_size(res));
+ if (!st_data->io_base_st)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(&mcbsp->dev->kobj, &sidetone_attr_group);
+ if (ret)
+ return ret;
+
+ mcbsp->st_data = st_data;
+
+ return 0;
+}
+
+void omap_mcbsp_st_cleanup(struct platform_device *pdev)
+{
+ struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
+
+ if (mcbsp->st_data) {
+ sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group);
+ clk_put(mcbsp->st_data->mcbsp_iclk);
+ }
+}
+
+static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int max = mc->max;
+ int min = mc->min;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = min;
+ uinfo->value.integer.max = max;
+ return 0;
+}
+
+#define OMAP_MCBSP_ST_CHANNEL_VOLUME(channel) \
+static int \
+omap_mcbsp_set_st_ch##channel##_volume(struct snd_kcontrol *kc, \
+ struct snd_ctl_elem_value *uc) \
+{ \
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
+ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
+ struct soc_mixer_control *mc = \
+ (struct soc_mixer_control *)kc->private_value; \
+ int max = mc->max; \
+ int min = mc->min; \
+ int val = uc->value.integer.value[0]; \
+ \
+ if (val < min || val > max) \
+ return -EINVAL; \
+ \
+ /* OMAP McBSP implementation uses index values 0..4 */ \
+ return omap_mcbsp_st_set_chgain(mcbsp, channel, val); \
+} \
+ \
+static int \
+omap_mcbsp_get_st_ch##channel##_volume(struct snd_kcontrol *kc, \
+ struct snd_ctl_elem_value *uc) \
+{ \
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
+ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
+ s16 chgain; \
+ \
+ if (omap_mcbsp_st_get_chgain(mcbsp, channel, &chgain)) \
+ return -EAGAIN; \
+ \
+ uc->value.integer.value[0] = chgain; \
+ return 0; \
+}
+
+OMAP_MCBSP_ST_CHANNEL_VOLUME(0)
+OMAP_MCBSP_ST_CHANNEL_VOLUME(1)
+
+static int omap_mcbsp_st_put_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
+ u8 value = ucontrol->value.integer.value[0];
+
+ if (value == omap_mcbsp_st_is_enabled(mcbsp))
+ return 0;
+
+ if (value)
+ omap_mcbsp_st_enable(mcbsp);
+ else
+ omap_mcbsp_st_disable(mcbsp);
+
+ return 1;
+}
+
+static int omap_mcbsp_st_get_mode(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
+
+ ucontrol->value.integer.value[0] = omap_mcbsp_st_is_enabled(mcbsp);
+ return 0;
+}
+
+#define OMAP_MCBSP_SOC_SINGLE_S16_EXT(xname, xmin, xmax, \
+ xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = omap_mcbsp_st_info_volsw, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.min = xmin, .max = xmax} }
+
+#define OMAP_MCBSP_ST_CONTROLS(port) \
+static const struct snd_kcontrol_new omap_mcbsp##port##_st_controls[] = { \
+SOC_SINGLE_EXT("McBSP" #port " Sidetone Switch", 1, 0, 1, 0, \
+ omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), \
+OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP" #port " Sidetone Channel 0 Volume", \
+ -32768, 32767, \
+ omap_mcbsp_get_st_ch0_volume, \
+ omap_mcbsp_set_st_ch0_volume), \
+OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP" #port " Sidetone Channel 1 Volume", \
+ -32768, 32767, \
+ omap_mcbsp_get_st_ch1_volume, \
+ omap_mcbsp_set_st_ch1_volume), \
+}
+
+OMAP_MCBSP_ST_CONTROLS(2);
+OMAP_MCBSP_ST_CONTROLS(3);
+
+int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd, int port_id)
+{
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if (!mcbsp->st_data) {
+ dev_warn(mcbsp->dev, "No sidetone data for port\n");
+ return 0;
+ }
+
+ switch (port_id) {
+ case 2: /* McBSP 2 */
+ return snd_soc_add_dai_controls(cpu_dai,
+ omap_mcbsp2_st_controls,
+ ARRAY_SIZE(omap_mcbsp2_st_controls));
+ case 3: /* McBSP 3 */
+ return snd_soc_add_dai_controls(cpu_dai,
+ omap_mcbsp3_st_controls,
+ ARRAY_SIZE(omap_mcbsp3_st_controls));
+ default:
+ dev_err(mcbsp->dev, "Port %d not supported\n", port_id);
+ break;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(omap_mcbsp_st_add_controls);
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c
index d0ebb6b9bfac..a395598f1f20 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/ti/omap-mcbsp.c
@@ -35,21 +35,12 @@
#include <sound/soc.h>
#include <sound/dmaengine_pcm.h>
-#include <linux/platform_data/asoc-ti-mcbsp.h>
-#include "mcbsp.h"
+#include "omap-mcbsp-priv.h"
#include "omap-mcbsp.h"
#include "sdma-pcm.h"
#define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_8000_96000)
-#define OMAP_MCBSP_SOC_SINGLE_S16_EXT(xname, xmin, xmax, \
- xhandler_get, xhandler_put) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = omap_mcbsp_st_info_volsw, \
- .get = xhandler_get, .put = xhandler_put, \
- .private_value = (unsigned long) &(struct soc_mixer_control) \
- {.min = xmin, .max = xmax} }
-
enum {
OMAP_MCBSP_WORD_8 = 0,
OMAP_MCBSP_WORD_12,
@@ -59,6 +50,699 @@ enum {
OMAP_MCBSP_WORD_32,
};
+static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
+{
+ dev_dbg(mcbsp->dev, "**** McBSP%d regs ****\n", mcbsp->id);
+ dev_dbg(mcbsp->dev, "DRR2: 0x%04x\n", MCBSP_READ(mcbsp, DRR2));
+ dev_dbg(mcbsp->dev, "DRR1: 0x%04x\n", MCBSP_READ(mcbsp, DRR1));
+ dev_dbg(mcbsp->dev, "DXR2: 0x%04x\n", MCBSP_READ(mcbsp, DXR2));
+ dev_dbg(mcbsp->dev, "DXR1: 0x%04x\n", MCBSP_READ(mcbsp, DXR1));
+ dev_dbg(mcbsp->dev, "SPCR2: 0x%04x\n", MCBSP_READ(mcbsp, SPCR2));
+ dev_dbg(mcbsp->dev, "SPCR1: 0x%04x\n", MCBSP_READ(mcbsp, SPCR1));
+ dev_dbg(mcbsp->dev, "RCR2: 0x%04x\n", MCBSP_READ(mcbsp, RCR2));
+ dev_dbg(mcbsp->dev, "RCR1: 0x%04x\n", MCBSP_READ(mcbsp, RCR1));
+ dev_dbg(mcbsp->dev, "XCR2: 0x%04x\n", MCBSP_READ(mcbsp, XCR2));
+ dev_dbg(mcbsp->dev, "XCR1: 0x%04x\n", MCBSP_READ(mcbsp, XCR1));
+ dev_dbg(mcbsp->dev, "SRGR2: 0x%04x\n", MCBSP_READ(mcbsp, SRGR2));
+ dev_dbg(mcbsp->dev, "SRGR1: 0x%04x\n", MCBSP_READ(mcbsp, SRGR1));
+ dev_dbg(mcbsp->dev, "PCR0: 0x%04x\n", MCBSP_READ(mcbsp, PCR0));
+ dev_dbg(mcbsp->dev, "***********************\n");
+}
+
+static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
+{
+ struct clk *fck_src;
+ const char *src;
+ int r;
+
+ if (fck_src_id == MCBSP_CLKS_PAD_SRC)
+ src = "pad_fck";
+ else if (fck_src_id == MCBSP_CLKS_PRCM_SRC)
+ src = "prcm_fck";
+ else
+ return -EINVAL;
+
+ fck_src = clk_get(mcbsp->dev, src);
+ if (IS_ERR(fck_src)) {
+ dev_err(mcbsp->dev, "CLKS: could not clk_get() %s\n", src);
+ return -EINVAL;
+ }
+
+ pm_runtime_put_sync(mcbsp->dev);
+
+ r = clk_set_parent(mcbsp->fclk, fck_src);
+ if (r) {
+ dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
+ src);
+ clk_put(fck_src);
+ return r;
+ }
+
+ pm_runtime_get_sync(mcbsp->dev);
+
+ clk_put(fck_src);
+
+ return 0;
+}
+
+static irqreturn_t omap_mcbsp_irq_handler(int irq, void *data)
+{
+ struct omap_mcbsp *mcbsp = data;
+ u16 irqst;
+
+ irqst = MCBSP_READ(mcbsp, IRQST);
+ dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
+
+ if (irqst & RSYNCERREN)
+ dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
+ if (irqst & RFSREN)
+ dev_dbg(mcbsp->dev, "RX Frame Sync\n");
+ if (irqst & REOFEN)
+ dev_dbg(mcbsp->dev, "RX End Of Frame\n");
+ if (irqst & RRDYEN)
+ dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
+ if (irqst & RUNDFLEN)
+ dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
+ if (irqst & ROVFLEN)
+ dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
+
+ if (irqst & XSYNCERREN)
+ dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
+ if (irqst & XFSXEN)
+ dev_dbg(mcbsp->dev, "TX Frame Sync\n");
+ if (irqst & XEOFEN)
+ dev_dbg(mcbsp->dev, "TX End Of Frame\n");
+ if (irqst & XRDYEN)
+ dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
+ if (irqst & XUNDFLEN)
+ dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
+ if (irqst & XOVFLEN)
+ dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
+ if (irqst & XEMPTYEOFEN)
+ dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
+
+ MCBSP_WRITE(mcbsp, IRQST, irqst);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *data)
+{
+ struct omap_mcbsp *mcbsp = data;
+ u16 irqst_spcr2;
+
+ irqst_spcr2 = MCBSP_READ(mcbsp, SPCR2);
+ dev_dbg(mcbsp->dev, "TX IRQ callback : 0x%x\n", irqst_spcr2);
+
+ if (irqst_spcr2 & XSYNC_ERR) {
+ dev_err(mcbsp->dev, "TX Frame Sync Error! : 0x%x\n",
+ irqst_spcr2);
+ /* Writing zero to XSYNC_ERR clears the IRQ */
+ MCBSP_WRITE(mcbsp, SPCR2, MCBSP_READ_CACHE(mcbsp, SPCR2));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t omap_mcbsp_rx_irq_handler(int irq, void *data)
+{
+ struct omap_mcbsp *mcbsp = data;
+ u16 irqst_spcr1;
+
+ irqst_spcr1 = MCBSP_READ(mcbsp, SPCR1);
+ dev_dbg(mcbsp->dev, "RX IRQ callback : 0x%x\n", irqst_spcr1);
+
+ if (irqst_spcr1 & RSYNC_ERR) {
+ dev_err(mcbsp->dev, "RX Frame Sync Error! : 0x%x\n",
+ irqst_spcr1);
+ /* Writing zero to RSYNC_ERR clears the IRQ */
+ MCBSP_WRITE(mcbsp, SPCR1, MCBSP_READ_CACHE(mcbsp, SPCR1));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * omap_mcbsp_config simply write a config to the
+ * appropriate McBSP.
+ * You either call this function or set the McBSP registers
+ * by yourself before calling omap_mcbsp_start().
+ */
+static void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
+ const struct omap_mcbsp_reg_cfg *config)
+{
+ dev_dbg(mcbsp->dev, "Configuring McBSP%d phys_base: 0x%08lx\n",
+ mcbsp->id, mcbsp->phys_base);
+
+ /* We write the given config */
+ MCBSP_WRITE(mcbsp, SPCR2, config->spcr2);
+ MCBSP_WRITE(mcbsp, SPCR1, config->spcr1);
+ MCBSP_WRITE(mcbsp, RCR2, config->rcr2);
+ MCBSP_WRITE(mcbsp, RCR1, config->rcr1);
+ MCBSP_WRITE(mcbsp, XCR2, config->xcr2);
+ MCBSP_WRITE(mcbsp, XCR1, config->xcr1);
+ MCBSP_WRITE(mcbsp, SRGR2, config->srgr2);
+ MCBSP_WRITE(mcbsp, SRGR1, config->srgr1);
+ MCBSP_WRITE(mcbsp, MCR2, config->mcr2);
+ MCBSP_WRITE(mcbsp, MCR1, config->mcr1);
+ MCBSP_WRITE(mcbsp, PCR0, config->pcr0);
+ if (mcbsp->pdata->has_ccr) {
+ MCBSP_WRITE(mcbsp, XCCR, config->xccr);
+ MCBSP_WRITE(mcbsp, RCCR, config->rccr);
+ }
+ /* Enable wakeup behavior */
+ if (mcbsp->pdata->has_wakeup)
+ MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
+
+ /* Enable TX/RX sync error interrupts by default */
+ if (mcbsp->irq)
+ MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN |
+ RUNDFLEN | ROVFLEN | XUNDFLEN | XOVFLEN);
+}
+
+/**
+ * omap_mcbsp_dma_reg_params - returns the address of mcbsp data register
+ * @mcbsp: omap_mcbsp struct for the McBSP instance
+ * @stream: Stream direction (playback/capture)
+ *
+ * Returns the address of mcbsp data transmit register or data receive register
+ * to be used by DMA for transferring/receiving data
+ */
+static int omap_mcbsp_dma_reg_params(struct omap_mcbsp *mcbsp,
+ unsigned int stream)
+{
+ int data_reg;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (mcbsp->pdata->reg_size == 2)
+ data_reg = OMAP_MCBSP_REG_DXR1;
+ else
+ data_reg = OMAP_MCBSP_REG_DXR;
+ } else {
+ if (mcbsp->pdata->reg_size == 2)
+ data_reg = OMAP_MCBSP_REG_DRR1;
+ else
+ data_reg = OMAP_MCBSP_REG_DRR;
+ }
+
+ return mcbsp->phys_dma_base + data_reg * mcbsp->pdata->reg_step;
+}
+
+/*
+ * omap_mcbsp_set_rx_threshold configures the transmit threshold in words.
+ * The threshold parameter is 1 based, and it is converted (threshold - 1)
+ * for the THRSH2 register.
+ */
+static void omap_mcbsp_set_tx_threshold(struct omap_mcbsp *mcbsp, u16 threshold)
+{
+ if (threshold && threshold <= mcbsp->max_tx_thres)
+ MCBSP_WRITE(mcbsp, THRSH2, threshold - 1);
+}
+
+/*
+ * omap_mcbsp_set_rx_threshold configures the receive threshold in words.
+ * The threshold parameter is 1 based, and it is converted (threshold - 1)
+ * for the THRSH1 register.
+ */
+static void omap_mcbsp_set_rx_threshold(struct omap_mcbsp *mcbsp, u16 threshold)
+{
+ if (threshold && threshold <= mcbsp->max_rx_thres)
+ MCBSP_WRITE(mcbsp, THRSH1, threshold - 1);
+}
+
+/*
+ * omap_mcbsp_get_tx_delay returns the number of used slots in the McBSP FIFO
+ */
+static u16 omap_mcbsp_get_tx_delay(struct omap_mcbsp *mcbsp)
+{
+ u16 buffstat;
+
+ /* Returns the number of free locations in the buffer */
+ buffstat = MCBSP_READ(mcbsp, XBUFFSTAT);
+
+ /* Number of slots are different in McBSP ports */
+ return mcbsp->pdata->buffer_size - buffstat;
+}
+
+/*
+ * omap_mcbsp_get_rx_delay returns the number of free slots in the McBSP FIFO
+ * to reach the threshold value (when the DMA will be triggered to read it)
+ */
+static u16 omap_mcbsp_get_rx_delay(struct omap_mcbsp *mcbsp)
+{
+ u16 buffstat, threshold;
+
+ /* Returns the number of used locations in the buffer */
+ buffstat = MCBSP_READ(mcbsp, RBUFFSTAT);
+ /* RX threshold */
+ threshold = MCBSP_READ(mcbsp, THRSH1);
+
+ /* Return the number of location till we reach the threshold limit */
+ if (threshold <= buffstat)
+ return 0;
+ else
+ return threshold - buffstat;
+}
+
+static int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
+{
+ void *reg_cache;
+ int err;
+
+ reg_cache = kzalloc(mcbsp->reg_cache_size, GFP_KERNEL);
+ if (!reg_cache)
+ return -ENOMEM;
+
+ spin_lock(&mcbsp->lock);
+ if (!mcbsp->free) {
+ dev_err(mcbsp->dev, "McBSP%d is currently in use\n", mcbsp->id);
+ err = -EBUSY;
+ goto err_kfree;
+ }
+
+ mcbsp->free = false;
+ mcbsp->reg_cache = reg_cache;
+ spin_unlock(&mcbsp->lock);
+
+ if(mcbsp->pdata->ops && mcbsp->pdata->ops->request)
+ mcbsp->pdata->ops->request(mcbsp->id - 1);
+
+ /*
+ * Make sure that transmitter, receiver and sample-rate generator are
+ * not running before activating IRQs.
+ */
+ MCBSP_WRITE(mcbsp, SPCR1, 0);
+ MCBSP_WRITE(mcbsp, SPCR2, 0);
+
+ if (mcbsp->irq) {
+ err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
+ "McBSP", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request IRQ\n");
+ goto err_clk_disable;
+ }
+ } else {
+ err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
+ "McBSP TX", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
+ goto err_clk_disable;
+ }
+
+ err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
+ "McBSP RX", (void *)mcbsp);
+ if (err != 0) {
+ dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
+ goto err_free_irq;
+ }
+ }
+
+ return 0;
+err_free_irq:
+ free_irq(mcbsp->tx_irq, (void *)mcbsp);
+err_clk_disable:
+ if(mcbsp->pdata->ops && mcbsp->pdata->ops->free)
+ mcbsp->pdata->ops->free(mcbsp->id - 1);
+
+ /* Disable wakeup behavior */
+ if (mcbsp->pdata->has_wakeup)
+ MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
+
+ spin_lock(&mcbsp->lock);
+ mcbsp->free = true;
+ mcbsp->reg_cache = NULL;
+err_kfree:
+ spin_unlock(&mcbsp->lock);
+ kfree(reg_cache);
+
+ return err;
+}
+
+static void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
+{
+ void *reg_cache;
+
+ if(mcbsp->pdata->ops && mcbsp->pdata->ops->free)
+ mcbsp->pdata->ops->free(mcbsp->id - 1);
+
+ /* Disable wakeup behavior */
+ if (mcbsp->pdata->has_wakeup)
+ MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
+
+ /* Disable interrupt requests */
+ if (mcbsp->irq)
+ MCBSP_WRITE(mcbsp, IRQEN, 0);
+
+ if (mcbsp->irq) {
+ free_irq(mcbsp->irq, (void *)mcbsp);
+ } else {
+ free_irq(mcbsp->rx_irq, (void *)mcbsp);
+ free_irq(mcbsp->tx_irq, (void *)mcbsp);
+ }
+
+ reg_cache = mcbsp->reg_cache;
+
+ /*
+ * Select CLKS source from internal source unconditionally before
+ * marking the McBSP port as free.
+ * If the external clock source via MCBSP_CLKS pin has been selected the
+ * system will refuse to enter idle if the CLKS pin source is not reset
+ * back to internal source.
+ */
+ if (!mcbsp_omap1())
+ omap2_mcbsp_set_clks_src(mcbsp, MCBSP_CLKS_PRCM_SRC);
+
+ spin_lock(&mcbsp->lock);
+ if (mcbsp->free)
+ dev_err(mcbsp->dev, "McBSP%d was not reserved\n", mcbsp->id);
+ else
+ mcbsp->free = true;
+ mcbsp->reg_cache = NULL;
+ spin_unlock(&mcbsp->lock);
+
+ kfree(reg_cache);
+}
+
+/*
+ * Here we start the McBSP, by enabling transmitter, receiver or both.
+ * If no transmitter or receiver is active prior calling, then sample-rate
+ * generator and frame sync are started.
+ */
+static void omap_mcbsp_start(struct omap_mcbsp *mcbsp, int stream)
+{
+ int tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int rx = !tx;
+ int enable_srg = 0;
+ u16 w;
+
+ if (mcbsp->st_data)
+ omap_mcbsp_st_start(mcbsp);
+
+ /* Only enable SRG, if McBSP is master */
+ w = MCBSP_READ_CACHE(mcbsp, PCR0);
+ if (w & (FSXM | FSRM | CLKXM | CLKRM))
+ enable_srg = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
+ MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
+
+ if (enable_srg) {
+ /* Start the sample generator */
+ w = MCBSP_READ_CACHE(mcbsp, SPCR2);
+ MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 6));
+ }
+
+ /* Enable transmitter and receiver */
+ tx &= 1;
+ w = MCBSP_READ_CACHE(mcbsp, SPCR2);
+ MCBSP_WRITE(mcbsp, SPCR2, w | tx);
+
+ rx &= 1;
+ w = MCBSP_READ_CACHE(mcbsp, SPCR1);
+ MCBSP_WRITE(mcbsp, SPCR1, w | rx);
+
+ /*
+ * Worst case: CLKSRG*2 = 8000khz: (1/8000) * 2 * 2 usec
+ * REVISIT: 100us may give enough time for two CLKSRG, however
+ * due to some unknown PM related, clock gating etc. reason it
+ * is now at 500us.
+ */
+ udelay(500);
+
+ if (enable_srg) {
+ /* Start frame sync */
+ w = MCBSP_READ_CACHE(mcbsp, SPCR2);
+ MCBSP_WRITE(mcbsp, SPCR2, w | (1 << 7));
+ }
+
+ if (mcbsp->pdata->has_ccr) {
+ /* Release the transmitter and receiver */
+ w = MCBSP_READ_CACHE(mcbsp, XCCR);
+ w &= ~(tx ? XDISABLE : 0);
+ MCBSP_WRITE(mcbsp, XCCR, w);
+ w = MCBSP_READ_CACHE(mcbsp, RCCR);
+ w &= ~(rx ? RDISABLE : 0);
+ MCBSP_WRITE(mcbsp, RCCR, w);
+ }
+
+ /* Dump McBSP Regs */
+ omap_mcbsp_dump_reg(mcbsp);
+}
+
+static void omap_mcbsp_stop(struct omap_mcbsp *mcbsp, int stream)
+{
+ int tx = (stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int rx = !tx;
+ int idle;
+ u16 w;
+
+ /* Reset transmitter */
+ tx &= 1;
+ if (mcbsp->pdata->has_ccr) {
+ w = MCBSP_READ_CACHE(mcbsp, XCCR);
+ w |= (tx ? XDISABLE : 0);
+ MCBSP_WRITE(mcbsp, XCCR, w);
+ }
+ w = MCBSP_READ_CACHE(mcbsp, SPCR2);
+ MCBSP_WRITE(mcbsp, SPCR2, w & ~tx);
+
+ /* Reset receiver */
+ rx &= 1;
+ if (mcbsp->pdata->has_ccr) {
+ w = MCBSP_READ_CACHE(mcbsp, RCCR);
+ w |= (rx ? RDISABLE : 0);
+ MCBSP_WRITE(mcbsp, RCCR, w);
+ }
+ w = MCBSP_READ_CACHE(mcbsp, SPCR1);
+ MCBSP_WRITE(mcbsp, SPCR1, w & ~rx);
+
+ idle = !((MCBSP_READ_CACHE(mcbsp, SPCR2) |
+ MCBSP_READ_CACHE(mcbsp, SPCR1)) & 1);
+
+ if (idle) {
+ /* Reset the sample rate generator */
+ w = MCBSP_READ_CACHE(mcbsp, SPCR2);
+ MCBSP_WRITE(mcbsp, SPCR2, w & ~(1 << 6));
+ }
+
+ if (mcbsp->st_data)
+ omap_mcbsp_st_stop(mcbsp);
+}
+
+#define max_thres(m) (mcbsp->pdata->buffer_size)
+#define valid_threshold(m, val) ((val) <= max_thres(m))
+#define THRESHOLD_PROP_BUILDER(prop) \
+static ssize_t prop##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
+ \
+ return sprintf(buf, "%u\n", mcbsp->prop); \
+} \
+ \
+static ssize_t prop##_store(struct device *dev, \
+ struct device_attribute *attr, \
+ const char *buf, size_t size) \
+{ \
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev); \
+ unsigned long val; \
+ int status; \
+ \
+ status = kstrtoul(buf, 0, &val); \
+ if (status) \
+ return status; \
+ \
+ if (!valid_threshold(mcbsp, val)) \
+ return -EDOM; \
+ \
+ mcbsp->prop = val; \
+ return size; \
+} \
+ \
+static DEVICE_ATTR(prop, 0644, prop##_show, prop##_store)
+
+THRESHOLD_PROP_BUILDER(max_tx_thres);
+THRESHOLD_PROP_BUILDER(max_rx_thres);
+
+static const char * const dma_op_modes[] = {
+ "element", "threshold",
+};
+
+static ssize_t dma_op_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
+ int dma_op_mode, i = 0;
+ ssize_t len = 0;
+ const char * const *s;
+
+ dma_op_mode = mcbsp->dma_op_mode;
+
+ for (s = &dma_op_modes[i]; i < ARRAY_SIZE(dma_op_modes); s++, i++) {
+ if (dma_op_mode == i)
+ len += sprintf(buf + len, "[%s] ", *s);
+ else
+ len += sprintf(buf + len, "%s ", *s);
+ }
+ len += sprintf(buf + len, "\n");
+
+ return len;
+}
+
+static ssize_t dma_op_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
+{
+ struct omap_mcbsp *mcbsp = dev_get_drvdata(dev);
+ int i;
+
+ i = sysfs_match_string(dma_op_modes, buf);
+ if (i < 0)
+ return i;
+
+ spin_lock_irq(&mcbsp->lock);
+ if (!mcbsp->free) {
+ size = -EBUSY;
+ goto unlock;
+ }
+ mcbsp->dma_op_mode = i;
+
+unlock:
+ spin_unlock_irq(&mcbsp->lock);
+
+ return size;
+}
+
+static DEVICE_ATTR_RW(dma_op_mode);
+
+static const struct attribute *additional_attrs[] = {
+ &dev_attr_max_tx_thres.attr,
+ &dev_attr_max_rx_thres.attr,
+ &dev_attr_dma_op_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group additional_attr_group = {
+ .attrs = (struct attribute **)additional_attrs,
+};
+
+/*
+ * McBSP1 and McBSP3 are directly mapped on 1610 and 1510.
+ * 730 has only 2 McBSP, and both of them are MPU peripherals.
+ */
+static int omap_mcbsp_init(struct platform_device *pdev)
+{
+ struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev);
+ struct resource *res;
+ int ret = 0;
+
+ spin_lock_init(&mcbsp->lock);
+ mcbsp->free = true;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mcbsp->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcbsp->io_base))
+ return PTR_ERR(mcbsp->io_base);
+
+ mcbsp->phys_base = res->start;
+ mcbsp->reg_cache_size = resource_size(res);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (!res)
+ mcbsp->phys_dma_base = mcbsp->phys_base;
+ else
+ mcbsp->phys_dma_base = res->start;
+
+ /*
+ * OMAP1, 2 uses two interrupt lines: TX, RX
+ * OMAP2430, OMAP3 SoC have combined IRQ line as well.
+ * OMAP4 and newer SoC only have the combined IRQ line.
+ * Use the combined IRQ if available since it gives better debugging
+ * possibilities.
+ */
+ mcbsp->irq = platform_get_irq_byname(pdev, "common");
+ if (mcbsp->irq == -ENXIO) {
+ mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
+
+ if (mcbsp->tx_irq == -ENXIO) {
+ mcbsp->irq = platform_get_irq(pdev, 0);
+ mcbsp->tx_irq = 0;
+ } else {
+ mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
+ mcbsp->irq = 0;
+ }
+ }
+
+ if (!pdev->dev.of_node) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
+ if (!res) {
+ dev_err(&pdev->dev, "invalid tx DMA channel\n");
+ return -ENODEV;
+ }
+ mcbsp->dma_req[0] = res->start;
+ mcbsp->dma_data[0].filter_data = &mcbsp->dma_req[0];
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
+ if (!res) {
+ dev_err(&pdev->dev, "invalid rx DMA channel\n");
+ return -ENODEV;
+ }
+ mcbsp->dma_req[1] = res->start;
+ mcbsp->dma_data[1].filter_data = &mcbsp->dma_req[1];
+ } else {
+ mcbsp->dma_data[0].filter_data = "tx";
+ mcbsp->dma_data[1].filter_data = "rx";
+ }
+
+ mcbsp->dma_data[0].addr = omap_mcbsp_dma_reg_params(mcbsp,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ mcbsp->dma_data[1].addr = omap_mcbsp_dma_reg_params(mcbsp,
+ SNDRV_PCM_STREAM_CAPTURE);
+
+ mcbsp->fclk = clk_get(&pdev->dev, "fck");
+ if (IS_ERR(mcbsp->fclk)) {
+ ret = PTR_ERR(mcbsp->fclk);
+ dev_err(mcbsp->dev, "unable to get fck: %d\n", ret);
+ return ret;
+ }
+
+ mcbsp->dma_op_mode = MCBSP_DMA_MODE_ELEMENT;
+ if (mcbsp->pdata->buffer_size) {
+ /*
+ * Initially configure the maximum thresholds to a safe value.
+ * The McBSP FIFO usage with these values should not go under
+ * 16 locations.
+ * If the whole FIFO without safety buffer is used, than there
+ * is a possibility that the DMA will be not able to push the
+ * new data on time, causing channel shifts in runtime.
+ */
+ mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10;
+ mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10;
+
+ ret = sysfs_create_group(&mcbsp->dev->kobj,
+ &additional_attr_group);
+ if (ret) {
+ dev_err(mcbsp->dev,
+ "Unable to create additional controls\n");
+ goto err_thres;
+ }
+ }
+
+ ret = omap_mcbsp_st_init(pdev);
+ if (ret)
+ goto err_st;
+
+ return 0;
+
+err_st:
+ if (mcbsp->pdata->buffer_size)
+ sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
+err_thres:
+ clk_put(mcbsp->fclk);
+ return ret;
+}
+
/*
* Stream DMA parameters. DMA request line and port address are set runtime
* since they are different between OMAP1 and later OMAPs
@@ -71,6 +755,10 @@ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream,
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
int words;
+ /* No need to proceed further if McBSP does not have FIFO */
+ if (mcbsp->pdata->buffer_size == 0)
+ return;
+
/*
* Configure McBSP threshold based on either:
* packet_size, when the sDMA is in packet mode, or based on the
@@ -201,27 +889,26 @@ static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *cpu_dai)
{
struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
- int err = 0, play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
mcbsp->active++;
- omap_mcbsp_start(mcbsp, play, !play);
+ omap_mcbsp_start(mcbsp, substream->stream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- omap_mcbsp_stop(mcbsp, play, !play);
+ omap_mcbsp_stop(mcbsp, substream->stream);
mcbsp->active--;
break;
default:
- err = -EINVAL;
+ return -EINVAL;
}
- return err;
+ return 0;
}
static snd_pcm_sframes_t omap_mcbsp_dai_delay(
@@ -234,6 +921,10 @@ static snd_pcm_sframes_t omap_mcbsp_dai_delay(
u16 fifo_use;
snd_pcm_sframes_t delay;
+ /* No need to proceed further if McBSP does not have FIFO */
+ if (mcbsp->pdata->buffer_size == 0)
+ return 0;
+
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
fifo_use = omap_mcbsp_get_tx_delay(mcbsp);
else
@@ -308,9 +999,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
pkt_size = channels;
}
- latency = ((((buffer_size - pkt_size) / channels) * 1000)
- / (params->rate_num / params->rate_den));
-
+ latency = (buffer_size - pkt_size) / channels;
+ latency = latency * USEC_PER_SEC /
+ (params->rate_num / params->rate_den);
mcbsp->latency[substream->stream] = latency;
omap_mcbsp_set_threshold(substream, pkt_size);
@@ -649,132 +1340,6 @@ static const struct snd_soc_component_driver omap_mcbsp_component = {
.name = "omap-mcbsp",
};
-static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- int max = mc->max;
- int min = mc->min;
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = 1;
- uinfo->value.integer.min = min;
- uinfo->value.integer.max = max;
- return 0;
-}
-
-#define OMAP_MCBSP_ST_CHANNEL_VOLUME(channel) \
-static int \
-omap_mcbsp_set_st_ch##channel##_volume(struct snd_kcontrol *kc, \
- struct snd_ctl_elem_value *uc) \
-{ \
- struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
- struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
- struct soc_mixer_control *mc = \
- (struct soc_mixer_control *)kc->private_value; \
- int max = mc->max; \
- int min = mc->min; \
- int val = uc->value.integer.value[0]; \
- \
- if (val < min || val > max) \
- return -EINVAL; \
- \
- /* OMAP McBSP implementation uses index values 0..4 */ \
- return omap_st_set_chgain(mcbsp, channel, val); \
-} \
- \
-static int \
-omap_mcbsp_get_st_ch##channel##_volume(struct snd_kcontrol *kc, \
- struct snd_ctl_elem_value *uc) \
-{ \
- struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \
- struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \
- s16 chgain; \
- \
- if (omap_st_get_chgain(mcbsp, channel, &chgain)) \
- return -EAGAIN; \
- \
- uc->value.integer.value[0] = chgain; \
- return 0; \
-}
-
-OMAP_MCBSP_ST_CHANNEL_VOLUME(0)
-OMAP_MCBSP_ST_CHANNEL_VOLUME(1)
-
-static int omap_mcbsp_st_put_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
- struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
- u8 value = ucontrol->value.integer.value[0];
-
- if (value == omap_st_is_enabled(mcbsp))
- return 0;
-
- if (value)
- omap_st_enable(mcbsp);
- else
- omap_st_disable(mcbsp);
-
- return 1;
-}
-
-static int omap_mcbsp_st_get_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
- struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
-
- ucontrol->value.integer.value[0] = omap_st_is_enabled(mcbsp);
- return 0;
-}
-
-#define OMAP_MCBSP_ST_CONTROLS(port) \
-static const struct snd_kcontrol_new omap_mcbsp##port##_st_controls[] = { \
-SOC_SINGLE_EXT("McBSP" #port " Sidetone Switch", 1, 0, 1, 0, \
- omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), \
-OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP" #port " Sidetone Channel 0 Volume", \
- -32768, 32767, \
- omap_mcbsp_get_st_ch0_volume, \
- omap_mcbsp_set_st_ch0_volume), \
-OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP" #port " Sidetone Channel 1 Volume", \
- -32768, 32767, \
- omap_mcbsp_get_st_ch1_volume, \
- omap_mcbsp_set_st_ch1_volume), \
-}
-
-OMAP_MCBSP_ST_CONTROLS(2);
-OMAP_MCBSP_ST_CONTROLS(3);
-
-int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd, int port_id)
-{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai);
-
- if (!mcbsp->st_data) {
- dev_warn(mcbsp->dev, "No sidetone data for port\n");
- return 0;
- }
-
- switch (port_id) {
- case 2: /* McBSP 2 */
- return snd_soc_add_dai_controls(cpu_dai,
- omap_mcbsp2_st_controls,
- ARRAY_SIZE(omap_mcbsp2_st_controls));
- case 3: /* McBSP 3 */
- return snd_soc_add_dai_controls(cpu_dai,
- omap_mcbsp3_st_controls,
- ARRAY_SIZE(omap_mcbsp3_st_controls));
- default:
- dev_err(mcbsp->dev, "Port %d not supported\n", port_id);
- break;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(omap_mcbsp_st_add_controls);
-
static struct omap_mcbsp_platform_data omap2420_pdata = {
.reg_step = 4,
.reg_size = 2,
@@ -862,6 +1427,11 @@ static int asoc_mcbsp_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (mcbsp->pdata->reg_size == 2) {
+ omap_mcbsp_dai.playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ omap_mcbsp_dai.capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev,
&omap_mcbsp_component,
&omap_mcbsp_dai, 1);
@@ -881,7 +1451,10 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)
if (pm_qos_request_active(&mcbsp->pm_qos_req))
pm_qos_remove_request(&mcbsp->pm_qos_req);
- omap_mcbsp_cleanup(mcbsp);
+ if (mcbsp->pdata->buffer_size)
+ sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
+
+ omap_mcbsp_st_cleanup(pdev);
clk_put(mcbsp->fclk);
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/ti/omap-mcbsp.h
index 2e3369c27be3..7911d24898c9 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/ti/omap-mcbsp.h
@@ -22,8 +22,10 @@
*
*/
-#ifndef __OMAP_I2S_H__
-#define __OMAP_I2S_H__
+#ifndef __OMAP_MCBSP_H__
+#define __OMAP_MCBSP_H__
+
+#include <sound/dmaengine_pcm.h>
/* Source clocks for McBSP sample rate generator */
enum omap_mcbsp_clksrg_clk {
@@ -41,4 +43,4 @@ enum omap_mcbsp_div {
int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd, int port_id);
-#endif
+#endif /* __OMAP_MCBSP_H__ */
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/ti/omap-mcpdm.c
index 4c1be36c2207..7d5bdc5a2890 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/ti/omap-mcpdm.c
@@ -54,6 +54,8 @@ struct omap_mcpdm {
unsigned long phys_base;
void __iomem *io_base;
int irq;
+ struct pm_qos_request pm_qos_req;
+ int latency[2];
struct mutex mutex;
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+ int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock(&mcpdm->mutex);
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
}
}
+ if (mcpdm->latency[stream2])
+ pm_qos_update_request(&mcpdm->pm_qos_req,
+ mcpdm->latency[stream2]);
+ else if (mcpdm->latency[stream1])
+ pm_qos_remove_request(&mcpdm->pm_qos_req);
+
+ mcpdm->latency[stream1] = 0;
+
mutex_unlock(&mcpdm->mutex);
}
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
int stream = substream->stream;
struct snd_dmaengine_dai_dma_data *dma_data;
u32 threshold;
- int channels;
+ int channels, latency;
int link_mask = 0;
channels = params_channels(params);
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
dma_data->maxburst =
(MCPDM_DN_THRES_MAX - threshold) * channels;
+ latency = threshold;
} else {
/* If playback is not running assume a stereo stream to come */
if (!mcpdm->config[!stream].link_mask)
mcpdm->config[!stream].link_mask = (0x3 << 3);
dma_data->maxburst = threshold * channels;
+ latency = (MCPDM_DN_THRES_MAX - threshold);
}
+ /*
+ * The DMA must act to a DMA request within latency time (usec) to avoid
+ * under/overflow
+ */
+ mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
+
+ if (!mcpdm->latency[stream])
+ mcpdm->latency[stream] = 10;
+
/* Check if we need to restart McPDM with this stream */
if (mcpdm->config[stream].link_mask &&
mcpdm->config[stream].link_mask != link_mask)
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
+ int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+ int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ int latency = mcpdm->latency[stream2];
+
+ /* Prevent omap hardware from hitting off between FIFO fills */
+ if (!latency || mcpdm->latency[stream1] < latency)
+ latency = mcpdm->latency[stream1];
+
+ if (pm_qos_request_active(pm_qos_req))
+ pm_qos_update_request(pm_qos_req, latency);
+ else if (latency)
+ pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
if (!omap_mcpdm_active(mcpdm)) {
omap_mcpdm_start(mcpdm);
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
free_irq(mcpdm->irq, (void *)mcpdm);
pm_runtime_disable(mcpdm->dev);
+ if (pm_qos_request_active(&mcpdm->pm_qos_req))
+ pm_qos_remove_request(&mcpdm->pm_qos_req);
+
return 0;
}
diff --git a/sound/soc/omap/omap-mcpdm.h b/sound/soc/ti/omap-mcpdm.h
index de8cf26595b1..de8cf26595b1 100644
--- a/sound/soc/omap/omap-mcpdm.h
+++ b/sound/soc/ti/omap-mcpdm.h
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/ti/omap-twl4030.c
index cccc316743fa..cccc316743fa 100644
--- a/sound/soc/omap/omap-twl4030.c
+++ b/sound/soc/ti/omap-twl4030.c
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/ti/omap3pandora.c
index 4e3de712159c..4e3de712159c 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/ti/omap3pandora.c
diff --git a/sound/soc/omap/osk5912.c b/sound/soc/ti/osk5912.c
index e4096779ca05..e4096779ca05 100644
--- a/sound/soc/omap/osk5912.c
+++ b/sound/soc/ti/osk5912.c
diff --git a/sound/soc/omap/rx51.c b/sound/soc/ti/rx51.c
index 57448bd5ad77..57448bd5ad77 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/ti/rx51.c
diff --git a/sound/soc/omap/sdma-pcm.c b/sound/soc/ti/sdma-pcm.c
index 21a9c2499d48..21a9c2499d48 100644
--- a/sound/soc/omap/sdma-pcm.c
+++ b/sound/soc/ti/sdma-pcm.c
diff --git a/sound/soc/omap/sdma-pcm.h b/sound/soc/ti/sdma-pcm.h
index 34a7f90b2587..cb0627c8dd34 100644
--- a/sound/soc/omap/sdma-pcm.h
+++ b/sound/soc/ti/sdma-pcm.h
@@ -7,7 +7,7 @@
#ifndef __SDMA_PCM_H__
#define __SDMA_PCM_H__
-#if IS_ENABLED(CONFIG_SND_SDMA_SOC)
+#if IS_ENABLED(CONFIG_SND_SOC_TI_SDMA_PCM)
int sdma_pcm_platform_register(struct device *dev,
char *txdmachan, char *rxdmachan);
#else
@@ -16,6 +16,6 @@ static inline int sdma_pcm_platform_register(struct device *dev,
{
return -ENODEV;
}
-#endif /* CONFIG_SND_SDMA_SOC */
+#endif /* CONFIG_SND_SOC_TI_SDMA_PCM */
#endif /* __SDMA_PCM_H__ */
diff --git a/sound/soc/xilinx/Kconfig b/sound/soc/xilinx/Kconfig
new file mode 100644
index 000000000000..25e287feb58c
--- /dev/null
+++ b/sound/soc/xilinx/Kconfig
@@ -0,0 +1,8 @@
+config SND_SOC_XILINX_I2S
+ tristate "Audio support for the the Xilinx I2S"
+ help
+ Select this option to enable Xilinx I2S Audio. This enables
+ I2S playback and capture using xilinx soft IP. In transmitter
+ mode, IP receives audio in AES format, extracts PCM and sends
+ PCM data. In receiver mode, IP receives PCM audio and
+ encapsulates PCM in AES format and sends AES data.
diff --git a/sound/soc/xilinx/Makefile b/sound/soc/xilinx/Makefile
new file mode 100644
index 000000000000..6c1209b9ee75
--- /dev/null
+++ b/sound/soc/xilinx/Makefile
@@ -0,0 +1,2 @@
+snd-soc-xlnx-i2s-objs := xlnx_i2s.o
+obj-$(CONFIG_SND_SOC_XILINX_I2S) += snd-soc-xlnx-i2s.o
diff --git a/sound/soc/xilinx/xlnx_i2s.c b/sound/soc/xilinx/xlnx_i2s.c
new file mode 100644
index 000000000000..d4ae9eff41ce
--- /dev/null
+++ b/sound/soc/xilinx/xlnx_i2s.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ASoC I2S audio support
+ *
+ * Copyright (C) 2018 Xilinx, Inc.
+ *
+ * Author: Praveen Vuppala <praveenv@xilinx.com>
+ * Author: Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#define DRV_NAME "xlnx_i2s"
+
+#define I2S_CORE_CTRL_OFFSET 0x08
+#define I2S_I2STIM_OFFSET 0x20
+#define I2S_CH0_OFFSET 0x30
+#define I2S_I2STIM_VALID_MASK GENMASK(7, 0)
+
+static int xlnx_i2s_set_sclkout_div(struct snd_soc_dai *cpu_dai,
+ int div_id, int div)
+{
+ void __iomem *base = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if (!div || (div & ~I2S_I2STIM_VALID_MASK))
+ return -EINVAL;
+
+ writel(div, base + I2S_I2STIM_OFFSET);
+
+ return 0;
+}
+
+static int xlnx_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *i2s_dai)
+{
+ u32 reg_off, chan_id;
+ void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+
+ chan_id = params_channels(params) / 2;
+
+ while (chan_id > 0) {
+ reg_off = I2S_CH0_OFFSET + ((chan_id - 1) * 4);
+ writel(chan_id, base + reg_off);
+ chan_id--;
+ }
+
+ return 0;
+}
+
+static int xlnx_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *i2s_dai)
+{
+ void __iomem *base = snd_soc_dai_get_drvdata(i2s_dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ writel(1, base + I2S_CORE_CTRL_OFFSET);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ writel(0, base + I2S_CORE_CTRL_OFFSET);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops xlnx_i2s_dai_ops = {
+ .trigger = xlnx_i2s_trigger,
+ .set_clkdiv = xlnx_i2s_set_sclkout_div,
+ .hw_params = xlnx_i2s_hw_params
+};
+
+static const struct snd_soc_component_driver xlnx_i2s_component = {
+ .name = DRV_NAME,
+};
+
+static const struct of_device_id xlnx_i2s_of_match[] = {
+ { .compatible = "xlnx,i2s-transmitter-1.0", },
+ { .compatible = "xlnx,i2s-receiver-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xlnx_i2s_of_match);
+
+static int xlnx_i2s_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ void __iomem *base;
+ struct snd_soc_dai_driver *dai_drv;
+ int ret;
+ u32 ch, format, data_width;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+
+ dai_drv = devm_kzalloc(&pdev->dev, sizeof(*dai_drv), GFP_KERNEL);
+ if (!dai_drv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ ret = of_property_read_u32(node, "xlnx,num-channels", &ch);
+ if (ret < 0) {
+ dev_err(dev, "cannot get supported channels\n");
+ return ret;
+ }
+ ch = ch * 2;
+
+ ret = of_property_read_u32(node, "xlnx,dwidth", &data_width);
+ if (ret < 0) {
+ dev_err(dev, "cannot get data width\n");
+ return ret;
+ }
+ switch (data_width) {
+ case 16:
+ format = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ case 24:
+ format = SNDRV_PCM_FMTBIT_S24_LE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (of_device_is_compatible(node, "xlnx,i2s-transmitter-1.0")) {
+ dai_drv->name = "xlnx_i2s_playback";
+ dai_drv->playback.stream_name = "Playback";
+ dai_drv->playback.formats = format;
+ dai_drv->playback.channels_min = ch;
+ dai_drv->playback.channels_max = ch;
+ dai_drv->playback.rates = SNDRV_PCM_RATE_8000_192000;
+ dai_drv->ops = &xlnx_i2s_dai_ops;
+ } else if (of_device_is_compatible(node, "xlnx,i2s-receiver-1.0")) {
+ dai_drv->name = "xlnx_i2s_capture";
+ dai_drv->capture.stream_name = "Capture";
+ dai_drv->capture.formats = format;
+ dai_drv->capture.channels_min = ch;
+ dai_drv->capture.channels_max = ch;
+ dai_drv->capture.rates = SNDRV_PCM_RATE_8000_192000;
+ dai_drv->ops = &xlnx_i2s_dai_ops;
+ } else {
+ return -ENODEV;
+ }
+
+ dev_set_drvdata(&pdev->dev, base);
+
+ ret = devm_snd_soc_register_component(&pdev->dev, &xlnx_i2s_component,
+ dai_drv, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "i2s component registration failed\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "%s DAI registered\n", dai_drv->name);
+
+ return ret;
+}
+
+static struct platform_driver xlnx_i2s_aud_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = xlnx_i2s_of_match,
+ },
+ .probe = xlnx_i2s_probe,
+};
+
+module_platform_driver(xlnx_i2s_aud_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Praveen Vuppala <praveenv@xilinx.com>");
+MODULE_AUTHOR("Maruthi Srinivas Bayyavarapu <maruthis@xilinx.com>");
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index e73c962590eb..883678ee971c 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
runtime->hw = snd_cs4231_playback;
err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
- if (err < 0) {
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+ if (err < 0)
return err;
- }
chip->playback_substream = substream;
chip->p_periods_sent = 0;
snd_pcm_set_sync(substream);
@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
runtime->hw = snd_cs4231_capture;
err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
- if (err < 0) {
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+ if (err < 0)
return err;
- }
chip->capture_substream = substream;
chip->c_periods_sent = 0;
snd_pcm_set_sync(substream);
@@ -2075,12 +2071,12 @@ static int cs4231_ebus_probe(struct platform_device *op)
static int cs4231_probe(struct platform_device *op)
{
#ifdef EBUS_SUPPORT
- if (!strcmp(op->dev.of_node->parent->name, "ebus"))
+ if (of_node_name_eq(op->dev.of_node->parent, "ebus"))
return cs4231_ebus_probe(op);
#endif
#ifdef SBUS_SUPPORT
- if (!strcmp(op->dev.of_node->parent->name, "sbus") ||
- !strcmp(op->dev.of_node->parent->name, "sbi"))
+ if (of_node_name_eq(op->dev.of_node->parent, "sbus") ||
+ of_node_name_eq(op->dev.of_node->parent, "sbi"))
return cs4231_sbus_probe(op);
#endif
return -ENODEV;
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index e557946718a9..d9fcae071b47 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -22,9 +22,9 @@
#include <sound/core.h>
#include <sound/hwdep.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "emux_voice.h"
-
#define TMP_CLIENT_ID 0x1001
/*
@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
return -EFAULT;
if (info.mode < 0 || info.mode >= EMUX_MD_END)
return -EINVAL;
+ info.mode = array_index_nospec(info.mode, EMUX_MD_END);
if (info.port < 0) {
for (i = 0; i < emu->num_ports; i++)
emu->portptrs[i]->ctrls[info.mode] = info.value;
} else {
- if (info.port < emu->num_ports)
+ if (info.port < emu->num_ports) {
+ info.port = array_index_nospec(info.port, emu->num_ports);
emu->portptrs[info.port]->ctrls[info.mode] = info.value;
+ }
}
return 0;
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 2bfe4e80a6b9..a105947eaf55 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -682,9 +682,12 @@ static int usb_audio_probe(struct usb_interface *intf,
__error:
if (chip) {
+ /* chip->active is inside the chip->card object,
+ * decrement before memory is possibly returned.
+ */
+ atomic_dec(&chip->active);
if (!chip->num_interfaces)
snd_card_free(chip->card);
- atomic_dec(&chip->active);
}
mutex_unlock(&register_mutex);
return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 849953e5775c..37fc0447c071 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3382,5 +3382,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.ifnum = QUIRK_NO_INTERFACE
}
},
+/* Dell WD19 Dock */
+{
+ USB_DEVICE(0x0bda, 0x402e),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Dell",
+ .product_name = "WD19 Dock",
+ .profile_name = "Dell-WD15-Dock",
+ .ifnum = QUIRK_NO_INTERFACE
+ }
+},
#undef USB_DEVICE_VENDOR_SPEC
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 8a945ece9869..96340f23f86d 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -19,6 +19,7 @@
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/midi.h>
+#include <linux/bits.h>
#include <sound/control.h>
#include <sound/core.h>
@@ -668,15 +669,133 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
}
/*
- * C-Media CM6206 is based on CM106 with two additional
- * registers that are not documented in the data sheet.
- * Values here are chosen based on sniffing USB traffic
- * under Windows.
+ * CM6206 registers from the CM6206 datasheet rev 2.1
*/
+#define CM6206_REG0_DMA_MASTER BIT(15)
+#define CM6206_REG0_SPDIFO_RATE_48K (2 << 12)
+#define CM6206_REG0_SPDIFO_RATE_96K (7 << 12)
+/* Bit 4 thru 11 is the S/PDIF category code */
+#define CM6206_REG0_SPDIFO_CAT_CODE_GENERAL (0 << 4)
+#define CM6206_REG0_SPDIFO_EMPHASIS_CD BIT(3)
+#define CM6206_REG0_SPDIFO_COPYRIGHT_NA BIT(2)
+#define CM6206_REG0_SPDIFO_NON_AUDIO BIT(1)
+#define CM6206_REG0_SPDIFO_PRO_FORMAT BIT(0)
+
+#define CM6206_REG1_TEST_SEL_CLK BIT(14)
+#define CM6206_REG1_PLLBIN_EN BIT(13)
+#define CM6206_REG1_SOFT_MUTE_EN BIT(12)
+#define CM6206_REG1_GPIO4_OUT BIT(11)
+#define CM6206_REG1_GPIO4_OE BIT(10)
+#define CM6206_REG1_GPIO3_OUT BIT(9)
+#define CM6206_REG1_GPIO3_OE BIT(8)
+#define CM6206_REG1_GPIO2_OUT BIT(7)
+#define CM6206_REG1_GPIO2_OE BIT(6)
+#define CM6206_REG1_GPIO1_OUT BIT(5)
+#define CM6206_REG1_GPIO1_OE BIT(4)
+#define CM6206_REG1_SPDIFO_INVALID BIT(3)
+#define CM6206_REG1_SPDIF_LOOP_EN BIT(2)
+#define CM6206_REG1_SPDIFO_DIS BIT(1)
+#define CM6206_REG1_SPDIFI_MIX BIT(0)
+
+#define CM6206_REG2_DRIVER_ON BIT(15)
+#define CM6206_REG2_HEADP_SEL_SIDE_CHANNELS (0 << 13)
+#define CM6206_REG2_HEADP_SEL_SURROUND_CHANNELS (1 << 13)
+#define CM6206_REG2_HEADP_SEL_CENTER_SUBW (2 << 13)
+#define CM6206_REG2_HEADP_SEL_FRONT_CHANNELS (3 << 13)
+#define CM6206_REG2_MUTE_HEADPHONE_RIGHT BIT(12)
+#define CM6206_REG2_MUTE_HEADPHONE_LEFT BIT(11)
+#define CM6206_REG2_MUTE_REAR_SURROUND_RIGHT BIT(10)
+#define CM6206_REG2_MUTE_REAR_SURROUND_LEFT BIT(9)
+#define CM6206_REG2_MUTE_SIDE_SURROUND_RIGHT BIT(8)
+#define CM6206_REG2_MUTE_SIDE_SURROUND_LEFT BIT(7)
+#define CM6206_REG2_MUTE_SUBWOOFER BIT(6)
+#define CM6206_REG2_MUTE_CENTER BIT(5)
+#define CM6206_REG2_MUTE_RIGHT_FRONT BIT(3)
+#define CM6206_REG2_MUTE_LEFT_FRONT BIT(3)
+#define CM6206_REG2_EN_BTL BIT(2)
+#define CM6206_REG2_MCUCLKSEL_1_5_MHZ (0)
+#define CM6206_REG2_MCUCLKSEL_3_MHZ (1)
+#define CM6206_REG2_MCUCLKSEL_6_MHZ (2)
+#define CM6206_REG2_MCUCLKSEL_12_MHZ (3)
+
+/* Bit 11..13 sets the sensitivity to FLY tuner volume control VP/VD signal */
+#define CM6206_REG3_FLYSPEED_DEFAULT (2 << 11)
+#define CM6206_REG3_VRAP25EN BIT(10)
+#define CM6206_REG3_MSEL1 BIT(9)
+#define CM6206_REG3_SPDIFI_RATE_44_1K BIT(0 << 7)
+#define CM6206_REG3_SPDIFI_RATE_48K BIT(2 << 7)
+#define CM6206_REG3_SPDIFI_RATE_32K BIT(3 << 7)
+#define CM6206_REG3_PINSEL BIT(6)
+#define CM6206_REG3_FOE BIT(5)
+#define CM6206_REG3_ROE BIT(4)
+#define CM6206_REG3_CBOE BIT(3)
+#define CM6206_REG3_LOSE BIT(2)
+#define CM6206_REG3_HPOE BIT(1)
+#define CM6206_REG3_SPDIFI_CANREC BIT(0)
+
+#define CM6206_REG5_DA_RSTN BIT(13)
+#define CM6206_REG5_AD_RSTN BIT(12)
+#define CM6206_REG5_SPDIFO_AD2SPDO BIT(12)
+#define CM6206_REG5_SPDIFO_SEL_FRONT (0 << 9)
+#define CM6206_REG5_SPDIFO_SEL_SIDE_SUR (1 << 9)
+#define CM6206_REG5_SPDIFO_SEL_CEN_LFE (2 << 9)
+#define CM6206_REG5_SPDIFO_SEL_REAR_SUR (3 << 9)
+#define CM6206_REG5_CODECM BIT(8)
+#define CM6206_REG5_EN_HPF BIT(7)
+#define CM6206_REG5_T_SEL_DSDA4 BIT(6)
+#define CM6206_REG5_T_SEL_DSDA3 BIT(5)
+#define CM6206_REG5_T_SEL_DSDA2 BIT(4)
+#define CM6206_REG5_T_SEL_DSDA1 BIT(3)
+#define CM6206_REG5_T_SEL_DSDAD_NORMAL 0
+#define CM6206_REG5_T_SEL_DSDAD_FRONT 4
+#define CM6206_REG5_T_SEL_DSDAD_S_SURROUND 5
+#define CM6206_REG5_T_SEL_DSDAD_CEN_LFE 6
+#define CM6206_REG5_T_SEL_DSDAD_R_SURROUND 7
+
static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
{
int err = 0, reg;
- int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
+ int val[] = {
+ /*
+ * Values here are chosen based on sniffing USB traffic
+ * under Windows.
+ *
+ * REG0: DAC is master, sample rate 48kHz, no copyright
+ */
+ CM6206_REG0_SPDIFO_RATE_48K |
+ CM6206_REG0_SPDIFO_COPYRIGHT_NA,
+ /*
+ * REG1: PLL binary search enable, soft mute enable.
+ */
+ CM6206_REG1_PLLBIN_EN |
+ CM6206_REG1_SOFT_MUTE_EN |
+ /*
+ * REG2: enable output drivers,
+ * select front channels to the headphone output,
+ * then mute the headphone channels, run the MCU
+ * at 1.5 MHz.
+ */
+ CM6206_REG2_DRIVER_ON |
+ CM6206_REG2_HEADP_SEL_FRONT_CHANNELS |
+ CM6206_REG2_MUTE_HEADPHONE_RIGHT |
+ CM6206_REG2_MUTE_HEADPHONE_LEFT,
+ /*
+ * REG3: default flyspeed, set 2.5V mic bias
+ * enable all line out ports and enable SPDIF
+ */
+ CM6206_REG3_FLYSPEED_DEFAULT |
+ CM6206_REG3_VRAP25EN |
+ CM6206_REG3_FOE |
+ CM6206_REG3_ROE |
+ CM6206_REG3_CBOE |
+ CM6206_REG3_LOSE |
+ CM6206_REG3_HPOE |
+ CM6206_REG3_SPDIFI_CANREC,
+ /* REG4 is just a bunch of GPIO lines */
+ 0x0000,
+ /* REG5: de-assert AD/DA reset signals */
+ CM6206_REG5_DA_RSTN |
+ CM6206_REG5_AD_RSTN };
for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
@@ -1373,6 +1492,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
+ case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index 83d76c345940..00c92eb854ce 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -1648,7 +1648,7 @@ static int had_create_jack(struct snd_intelhad *ctx,
* PM callbacks
*/
-static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
+static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
{
struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
int port;
@@ -1664,23 +1664,8 @@ static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
}
}
- return 0;
-}
-
-static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
-{
- struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
- int err;
+ snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
- err = hdmi_lpe_audio_runtime_suspend(dev);
- if (!err)
- snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
- return err;
-}
-
-static int hdmi_lpe_audio_runtime_resume(struct device *dev)
-{
- pm_runtime_mark_last_busy(dev);
return 0;
}
@@ -1688,8 +1673,10 @@ static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
{
struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
- hdmi_lpe_audio_runtime_resume(dev);
+ pm_runtime_mark_last_busy(dev);
+
snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0);
+
return 0;
}
@@ -1877,7 +1864,6 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
for_each_port(card_ctx, port) {
@@ -1908,8 +1894,6 @@ static int hdmi_lpe_audio_remove(struct platform_device *pdev)
static const struct dev_pm_ops hdmi_lpe_audio_pm = {
SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
- SET_RUNTIME_PM_OPS(hdmi_lpe_audio_runtime_suspend,
- hdmi_lpe_audio_runtime_resume, NULL)
};
static struct platform_driver hdmi_lpe_audio_driver = {
diff --git a/sound/xen/Kconfig b/sound/xen/Kconfig
index 4f1fceea82d2..e4d7beb4df1c 100644
--- a/sound/xen/Kconfig
+++ b/sound/xen/Kconfig
@@ -5,6 +5,7 @@ config SND_XEN_FRONTEND
depends on XEN
select SND_PCM
select XEN_XENBUS_FRONTEND
+ select XEN_FRONT_PGDIR_SHBUF
help
Choose this option if you want to enable a para-virtualized
frontend sound driver for Xen guest OSes.
diff --git a/sound/xen/Makefile b/sound/xen/Makefile
index 1e6470ecc2f2..24031775b715 100644
--- a/sound/xen/Makefile
+++ b/sound/xen/Makefile
@@ -3,7 +3,6 @@
snd_xen_front-objs := xen_snd_front.o \
xen_snd_front_cfg.o \
xen_snd_front_evtchnl.o \
- xen_snd_front_shbuf.o \
xen_snd_front_alsa.o
obj-$(CONFIG_SND_XEN_FRONTEND) += snd_xen_front.o
diff --git a/sound/xen/xen_snd_front.c b/sound/xen/xen_snd_front.c
index b089b13b5160..a9e5c2cd7698 100644
--- a/sound/xen/xen_snd_front.c
+++ b/sound/xen/xen_snd_front.c
@@ -16,12 +16,12 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
+#include <xen/xen-front-pgdir-shbuf.h>
#include <xen/interface/io/sndif.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_evtchnl.h"
-#include "xen_snd_front_shbuf.h"
static struct xensnd_req *
be_stream_prepare_req(struct xen_snd_front_evtchnl *evtchnl, u8 operation)
@@ -82,7 +82,7 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
}
int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
- struct xen_snd_front_shbuf *sh_buf,
+ struct xen_front_pgdir_shbuf *shbuf,
u8 format, unsigned int channels,
unsigned int rate, u32 buffer_sz,
u32 period_sz)
@@ -99,7 +99,8 @@ int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
req->op.open.pcm_rate = rate;
req->op.open.buffer_sz = buffer_sz;
req->op.open.period_sz = period_sz;
- req->op.open.gref_directory = xen_snd_front_shbuf_get_dir_start(sh_buf);
+ req->op.open.gref_directory =
+ xen_front_pgdir_shbuf_get_dir_start(shbuf);
mutex_unlock(&evtchnl->ring_io_lock);
ret = be_stream_do_io(evtchnl);
diff --git a/sound/xen/xen_snd_front.h b/sound/xen/xen_snd_front.h
index a2ea2463bcc5..05611f113b94 100644
--- a/sound/xen/xen_snd_front.h
+++ b/sound/xen/xen_snd_front.h
@@ -16,7 +16,7 @@
struct xen_snd_front_card_info;
struct xen_snd_front_evtchnl;
struct xen_snd_front_evtchnl_pair;
-struct xen_snd_front_shbuf;
+struct xen_front_pgdir_shbuf;
struct xensnd_query_hw_param;
struct xen_snd_front_info {
@@ -35,7 +35,7 @@ int xen_snd_front_stream_query_hw_param(struct xen_snd_front_evtchnl *evtchnl,
struct xensnd_query_hw_param *hw_param_resp);
int xen_snd_front_stream_prepare(struct xen_snd_front_evtchnl *evtchnl,
- struct xen_snd_front_shbuf *sh_buf,
+ struct xen_front_pgdir_shbuf *shbuf,
u8 format, unsigned int channels,
unsigned int rate, u32 buffer_sz,
u32 period_sz);
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
index 2cbd9679aca1..a7f413cb704d 100644
--- a/sound/xen/xen_snd_front_alsa.c
+++ b/sound/xen/xen_snd_front_alsa.c
@@ -15,17 +15,24 @@
#include <sound/pcm_params.h>
#include <xen/xenbus.h>
+#include <xen/xen-front-pgdir-shbuf.h>
#include "xen_snd_front.h"
#include "xen_snd_front_alsa.h"
#include "xen_snd_front_cfg.h"
#include "xen_snd_front_evtchnl.h"
-#include "xen_snd_front_shbuf.h"
struct xen_snd_front_pcm_stream_info {
struct xen_snd_front_info *front_info;
struct xen_snd_front_evtchnl_pair *evt_pair;
- struct xen_snd_front_shbuf sh_buf;
+
+ /* This is the shared buffer with its backing storage. */
+ struct xen_front_pgdir_shbuf shbuf;
+ u8 *buffer;
+ size_t buffer_sz;
+ int num_pages;
+ struct page **pages;
+
int index;
bool is_open;
@@ -214,12 +221,20 @@ static void stream_clear(struct xen_snd_front_pcm_stream_info *stream)
stream->out_frames = 0;
atomic_set(&stream->hw_ptr, 0);
xen_snd_front_evtchnl_pair_clear(stream->evt_pair);
- xen_snd_front_shbuf_clear(&stream->sh_buf);
+ memset(&stream->shbuf, 0, sizeof(stream->shbuf));
+ stream->buffer = NULL;
+ stream->buffer_sz = 0;
+ stream->pages = NULL;
+ stream->num_pages = 0;
}
static void stream_free(struct xen_snd_front_pcm_stream_info *stream)
{
- xen_snd_front_shbuf_free(&stream->sh_buf);
+ xen_front_pgdir_shbuf_unmap(&stream->shbuf);
+ xen_front_pgdir_shbuf_free(&stream->shbuf);
+ if (stream->buffer)
+ free_pages_exact(stream->buffer, stream->buffer_sz);
+ kfree(stream->pages);
stream_clear(stream);
}
@@ -421,10 +436,34 @@ static int alsa_close(struct snd_pcm_substream *substream)
return 0;
}
+static int shbuf_setup_backstore(struct xen_snd_front_pcm_stream_info *stream,
+ size_t buffer_sz)
+{
+ int i;
+
+ stream->buffer = alloc_pages_exact(stream->buffer_sz, GFP_KERNEL);
+ if (!stream->buffer)
+ return -ENOMEM;
+
+ stream->buffer_sz = buffer_sz;
+ stream->num_pages = DIV_ROUND_UP(stream->buffer_sz, PAGE_SIZE);
+ stream->pages = kcalloc(stream->num_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!stream->pages)
+ return -ENOMEM;
+
+ for (i = 0; i < stream->num_pages; i++)
+ stream->pages[i] = virt_to_page(stream->buffer + i * PAGE_SIZE);
+
+ return 0;
+}
+
static int alsa_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
+ struct xen_snd_front_info *front_info = stream->front_info;
+ struct xen_front_pgdir_shbuf_cfg buf_cfg;
int ret;
/*
@@ -432,19 +471,32 @@ static int alsa_hw_params(struct snd_pcm_substream *substream,
* so free the previously allocated shared buffer if any.
*/
stream_free(stream);
+ ret = shbuf_setup_backstore(stream, params_buffer_bytes(params));
+ if (ret < 0)
+ goto fail;
- ret = xen_snd_front_shbuf_alloc(stream->front_info->xb_dev,
- &stream->sh_buf,
- params_buffer_bytes(params));
- if (ret < 0) {
- stream_free(stream);
- dev_err(&stream->front_info->xb_dev->dev,
- "Failed to allocate buffers for stream with index %d\n",
- stream->index);
- return ret;
- }
+ memset(&buf_cfg, 0, sizeof(buf_cfg));
+ buf_cfg.xb_dev = front_info->xb_dev;
+ buf_cfg.pgdir = &stream->shbuf;
+ buf_cfg.num_pages = stream->num_pages;
+ buf_cfg.pages = stream->pages;
+
+ ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
+ if (ret < 0)
+ goto fail;
+
+ ret = xen_front_pgdir_shbuf_map(&stream->shbuf);
+ if (ret < 0)
+ goto fail;
return 0;
+
+fail:
+ stream_free(stream);
+ dev_err(&front_info->xb_dev->dev,
+ "Failed to allocate buffers for stream with index %d\n",
+ stream->index);
+ return ret;
}
static int alsa_hw_free(struct snd_pcm_substream *substream)
@@ -476,7 +528,7 @@ static int alsa_prepare(struct snd_pcm_substream *substream)
sndif_format = ret;
ret = xen_snd_front_stream_prepare(&stream->evt_pair->req,
- &stream->sh_buf,
+ &stream->shbuf,
sndif_format,
runtime->channels,
runtime->rate,
@@ -556,10 +608,10 @@ static int alsa_pb_copy_user(struct snd_pcm_substream *substream,
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
- if (unlikely(pos + count > stream->sh_buf.buffer_sz))
+ if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
- if (copy_from_user(stream->sh_buf.buffer + pos, src, count))
+ if (copy_from_user(stream->buffer + pos, src, count))
return -EFAULT;
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
@@ -571,10 +623,10 @@ static int alsa_pb_copy_kernel(struct snd_pcm_substream *substream,
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
- if (unlikely(pos + count > stream->sh_buf.buffer_sz))
+ if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
- memcpy(stream->sh_buf.buffer + pos, src, count);
+ memcpy(stream->buffer + pos, src, count);
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
}
@@ -586,14 +638,14 @@ static int alsa_cap_copy_user(struct snd_pcm_substream *substream,
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int ret;
- if (unlikely(pos + count > stream->sh_buf.buffer_sz))
+ if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
if (ret < 0)
return ret;
- return copy_to_user(dst, stream->sh_buf.buffer + pos, count) ?
+ return copy_to_user(dst, stream->buffer + pos, count) ?
-EFAULT : 0;
}
@@ -604,14 +656,14 @@ static int alsa_cap_copy_kernel(struct snd_pcm_substream *substream,
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
int ret;
- if (unlikely(pos + count > stream->sh_buf.buffer_sz))
+ if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
ret = xen_snd_front_stream_read(&stream->evt_pair->req, pos, count);
if (ret < 0)
return ret;
- memcpy(dst, stream->sh_buf.buffer + pos, count);
+ memcpy(dst, stream->buffer + pos, count);
return 0;
}
@@ -622,10 +674,10 @@ static int alsa_pb_fill_silence(struct snd_pcm_substream *substream,
{
struct xen_snd_front_pcm_stream_info *stream = stream_get(substream);
- if (unlikely(pos + count > stream->sh_buf.buffer_sz))
+ if (unlikely(pos + count > stream->buffer_sz))
return -EINVAL;
- memset(stream->sh_buf.buffer + pos, 0, count);
+ memset(stream->buffer + pos, 0, count);
return xen_snd_front_stream_write(&stream->evt_pair->req, pos, count);
}
diff --git a/sound/xen/xen_snd_front_shbuf.c b/sound/xen/xen_snd_front_shbuf.c
deleted file mode 100644
index 07ac176a41ba..000000000000
--- a/sound/xen/xen_snd_front_shbuf.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR MIT
-
-/*
- * Xen para-virtual sound device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#include <linux/kernel.h>
-#include <xen/xen.h>
-#include <xen/xenbus.h>
-
-#include "xen_snd_front_shbuf.h"
-
-grant_ref_t xen_snd_front_shbuf_get_dir_start(struct xen_snd_front_shbuf *buf)
-{
- if (!buf->grefs)
- return GRANT_INVALID_REF;
-
- return buf->grefs[0];
-}
-
-void xen_snd_front_shbuf_clear(struct xen_snd_front_shbuf *buf)
-{
- memset(buf, 0, sizeof(*buf));
-}
-
-void xen_snd_front_shbuf_free(struct xen_snd_front_shbuf *buf)
-{
- int i;
-
- if (buf->grefs) {
- for (i = 0; i < buf->num_grefs; i++)
- if (buf->grefs[i] != GRANT_INVALID_REF)
- gnttab_end_foreign_access(buf->grefs[i],
- 0, 0UL);
- kfree(buf->grefs);
- }
- kfree(buf->directory);
- free_pages_exact(buf->buffer, buf->buffer_sz);
- xen_snd_front_shbuf_clear(buf);
-}
-
-/*
- * number of grant references a page can hold with respect to the
- * xensnd_page_directory header
- */
-#define XENSND_NUM_GREFS_PER_PAGE ((XEN_PAGE_SIZE - \
- offsetof(struct xensnd_page_directory, gref)) / \
- sizeof(grant_ref_t))
-
-static void fill_page_dir(struct xen_snd_front_shbuf *buf,
- int num_pages_dir)
-{
- struct xensnd_page_directory *page_dir;
- unsigned char *ptr;
- int i, cur_gref, grefs_left, to_copy;
-
- ptr = buf->directory;
- grefs_left = buf->num_grefs - num_pages_dir;
- /*
- * skip grant references at the beginning, they are for pages granted
- * for the page directory itself
- */
- cur_gref = num_pages_dir;
- for (i = 0; i < num_pages_dir; i++) {
- page_dir = (struct xensnd_page_directory *)ptr;
- if (grefs_left <= XENSND_NUM_GREFS_PER_PAGE) {
- to_copy = grefs_left;
- page_dir->gref_dir_next_page = GRANT_INVALID_REF;
- } else {
- to_copy = XENSND_NUM_GREFS_PER_PAGE;
- page_dir->gref_dir_next_page = buf->grefs[i + 1];
- }
-
- memcpy(&page_dir->gref, &buf->grefs[cur_gref],
- to_copy * sizeof(grant_ref_t));
-
- ptr += XEN_PAGE_SIZE;
- grefs_left -= to_copy;
- cur_gref += to_copy;
- }
-}
-
-static int grant_references(struct xenbus_device *xb_dev,
- struct xen_snd_front_shbuf *buf,
- int num_pages_dir, int num_pages_buffer,
- int num_grefs)
-{
- grant_ref_t priv_gref_head;
- unsigned long frame;
- int ret, i, j, cur_ref;
- int otherend_id;
-
- ret = gnttab_alloc_grant_references(num_grefs, &priv_gref_head);
- if (ret)
- return ret;
-
- buf->num_grefs = num_grefs;
- otherend_id = xb_dev->otherend_id;
- j = 0;
-
- for (i = 0; i < num_pages_dir; i++) {
- cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
- if (cur_ref < 0) {
- ret = cur_ref;
- goto fail;
- }
-
- frame = xen_page_to_gfn(virt_to_page(buf->directory +
- XEN_PAGE_SIZE * i));
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
- buf->grefs[j++] = cur_ref;
- }
-
- for (i = 0; i < num_pages_buffer; i++) {
- cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
- if (cur_ref < 0) {
- ret = cur_ref;
- goto fail;
- }
-
- frame = xen_page_to_gfn(virt_to_page(buf->buffer +
- XEN_PAGE_SIZE * i));
- gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
- buf->grefs[j++] = cur_ref;
- }
-
- gnttab_free_grant_references(priv_gref_head);
- fill_page_dir(buf, num_pages_dir);
- return 0;
-
-fail:
- gnttab_free_grant_references(priv_gref_head);
- return ret;
-}
-
-static int alloc_int_buffers(struct xen_snd_front_shbuf *buf,
- int num_pages_dir, int num_pages_buffer,
- int num_grefs)
-{
- buf->grefs = kcalloc(num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
- if (!buf->grefs)
- return -ENOMEM;
-
- buf->directory = kcalloc(num_pages_dir, XEN_PAGE_SIZE, GFP_KERNEL);
- if (!buf->directory)
- goto fail;
-
- buf->buffer_sz = num_pages_buffer * XEN_PAGE_SIZE;
- buf->buffer = alloc_pages_exact(buf->buffer_sz, GFP_KERNEL);
- if (!buf->buffer)
- goto fail;
-
- return 0;
-
-fail:
- kfree(buf->grefs);
- buf->grefs = NULL;
- kfree(buf->directory);
- buf->directory = NULL;
- return -ENOMEM;
-}
-
-int xen_snd_front_shbuf_alloc(struct xenbus_device *xb_dev,
- struct xen_snd_front_shbuf *buf,
- unsigned int buffer_sz)
-{
- int num_pages_buffer, num_pages_dir, num_grefs;
- int ret;
-
- xen_snd_front_shbuf_clear(buf);
-
- num_pages_buffer = DIV_ROUND_UP(buffer_sz, XEN_PAGE_SIZE);
- /* number of pages the page directory consumes itself */
- num_pages_dir = DIV_ROUND_UP(num_pages_buffer,
- XENSND_NUM_GREFS_PER_PAGE);
- num_grefs = num_pages_buffer + num_pages_dir;
-
- ret = alloc_int_buffers(buf, num_pages_dir,
- num_pages_buffer, num_grefs);
- if (ret < 0)
- return ret;
-
- ret = grant_references(xb_dev, buf, num_pages_dir, num_pages_buffer,
- num_grefs);
- if (ret < 0)
- return ret;
-
- fill_page_dir(buf, num_pages_dir);
- return 0;
-}
diff --git a/sound/xen/xen_snd_front_shbuf.h b/sound/xen/xen_snd_front_shbuf.h
deleted file mode 100644
index d28e97c47b2c..000000000000
--- a/sound/xen/xen_snd_front_shbuf.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-
-/*
- * Xen para-virtual sound device
- *
- * Copyright (C) 2016-2018 EPAM Systems Inc.
- *
- * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
- */
-
-#ifndef __XEN_SND_FRONT_SHBUF_H
-#define __XEN_SND_FRONT_SHBUF_H
-
-#include <xen/grant_table.h>
-
-#include "xen_snd_front_evtchnl.h"
-
-struct xen_snd_front_shbuf {
- int num_grefs;
- grant_ref_t *grefs;
- u8 *directory;
- u8 *buffer;
- size_t buffer_sz;
-};
-
-grant_ref_t xen_snd_front_shbuf_get_dir_start(struct xen_snd_front_shbuf *buf);
-
-int xen_snd_front_shbuf_alloc(struct xenbus_device *xb_dev,
- struct xen_snd_front_shbuf *buf,
- unsigned int buffer_sz);
-
-void xen_snd_front_shbuf_clear(struct xen_snd_front_shbuf *buf);
-
-void xen_snd_front_shbuf_free(struct xen_snd_front_shbuf *buf);
-
-#endif /* __XEN_SND_FRONT_SHBUF_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index 9e52c86ccbd3..ff91192407d1 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -46,6 +46,7 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_TRAP,
PERF_REG_POWERPC_DAR,
PERF_REG_POWERPC_DSISR,
+ PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MAX,
};
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 89a048c2faec..28c4a502b419 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -331,6 +331,8 @@
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
+#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
+#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
diff --git a/tools/arch/x86/include/uapi/asm/prctl.h b/tools/arch/x86/include/uapi/asm/prctl.h
new file mode 100644
index 000000000000..5a6aac9fa41f
--- /dev/null
+++ b/tools/arch/x86/include/uapi/asm/prctl.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_X86_PRCTL_H
+#define _ASM_X86_PRCTL_H
+
+#define ARCH_SET_GS 0x1001
+#define ARCH_SET_FS 0x1002
+#define ARCH_GET_FS 0x1003
+#define ARCH_GET_GS 0x1004
+
+#define ARCH_GET_CPUID 0x1011
+#define ARCH_SET_CPUID 0x1012
+
+#define ARCH_MAP_VDSO_X32 0x2001
+#define ARCH_MAP_VDSO_32 0x2002
+#define ARCH_MAP_VDSO_64 0x2003
+
+#endif /* _ASM_X86_PRCTL_H */
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index edbe81534c6d..d07ccf8a23f7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -137,4 +137,10 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index f55a2daed59b..64b001b4f777 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -42,7 +42,8 @@ MAP COMMANDS
| | **percpu_array** | **stack_trace** | **cgroup_array** | **lru_hash**
| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
| | **devmap** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
-| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage** }
+| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
+| | **queue** | **stack** }
DESCRIPTION
===========
@@ -127,6 +128,10 @@ OPTIONS
-f, --bpffs
Show file names of pinned maps.
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
+
EXAMPLES
========
**# bpftool map show**
@@ -169,6 +174,67 @@ The following three commands are equivalent:
| **# bpftool map pin id 10 /sys/fs/bpf/map**
| **# bpftool map del pinned /sys/fs/bpf/map key 13 00 07 00**
+Note that map update can also be used in order to change the program references
+hold by a program array map. This can be used, for example, to change the
+programs used for tail-call jumps at runtime, without having to reload the
+entry-point program. Below is an example for this use case: we load a program
+defining a prog array map, and with a main function that contains a tail call
+to other programs that can be used either to "process" packets or to "debug"
+processing. Note that the prog array map MUST be pinned into the BPF virtual
+file system for the map update to work successfully, as kernel flushes prog
+array maps when they have no more references from user space (and the update
+would be lost as soon as bpftool exits).
+
+|
+| **# bpftool prog loadall tail_calls.o /sys/fs/bpf/foo type xdp**
+| **# bpftool prog --bpffs**
+
+::
+
+ 545: xdp name main_func tag 674b4b5597193dc3 gpl
+ loaded_at 2018-12-12T15:02:58+0000 uid 0
+ xlated 240B jited 257B memlock 4096B map_ids 294
+ pinned /sys/fs/bpf/foo/xdp
+ 546: xdp name bpf_func_process tag e369a529024751fc gpl
+ loaded_at 2018-12-12T15:02:58+0000 uid 0
+ xlated 200B jited 164B memlock 4096B
+ pinned /sys/fs/bpf/foo/process
+ 547: xdp name bpf_func_debug tag 0b597868bc7f0976 gpl
+ loaded_at 2018-12-12T15:02:58+0000 uid 0
+ xlated 200B jited 164B memlock 4096B
+ pinned /sys/fs/bpf/foo/debug
+
+**# bpftool map**
+
+::
+
+ 294: prog_array name jmp_table flags 0x0
+ key 4B value 4B max_entries 1 memlock 4096B
+ owner_prog_type xdp owner jited
+
+|
+| **# bpftool map pin id 294 /sys/fs/bpf/bar**
+| **# bpftool map dump pinned /sys/fs/bpf/bar**
+
+::
+
+ Found 0 elements
+
+|
+| **# bpftool map update pinned /sys/fs/bpf/bar key 0 0 0 0 value pinned /sys/fs/bpf/foo/debug**
+| **# bpftool map dump pinned /sys/fs/bpf/bar**
+
+::
+
+ key: 00 00 00 00 value: 22 02 00 00
+ Found 1 element
+
SEE ALSO
========
- **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index 408ec30d8872..ed87c9b619ad 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -136,4 +136,10 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index e3eb0eab7641..f4c5e5538bb8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -78,4 +78,10 @@ EXAMPLES
SEE ALSO
========
- **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index ac4e904b10fb..58c8369b77dd 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -15,18 +15,20 @@ SYNOPSIS
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
*COMMANDS* :=
- { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | **help** }
+ { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
+ | **loadall** | **help** }
MAP COMMANDS
=============
| **bpftool** **prog { show | list }** [*PROG*]
-| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}]
-| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}]
+| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
+| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
-| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* *MAP*
-| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* *MAP*
+| **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
+| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
+| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
+| **bpftool** **prog tracelog**
| **bpftool** **prog help**
|
| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
@@ -39,7 +41,9 @@ MAP COMMANDS
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6**
| }
-| *ATTACH_TYPE* := { **msg_verdict** | **skb_verdict** | **skb_parse** }
+| *ATTACH_TYPE* := {
+| **msg_verdict** | **skb_verdict** | **skb_parse** | **flow_dissector**
+| }
DESCRIPTION
@@ -52,7 +56,7 @@ DESCRIPTION
Output will start with program ID followed by program type and
zero or more named attributes (depending on kernel version).
- **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** }]
+ **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
Dump eBPF instructions of the program from the kernel. By
default, eBPF will be disassembled and printed to standard
output in human-readable format. In this case, **opcodes**
@@ -65,13 +69,23 @@ DESCRIPTION
built instead, and eBPF instructions will be presented with
CFG in DOT format, on standard output.
- **bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** }]
+ If the prog has line_info available, the source line will
+ be displayed by default. If **linum** is specified,
+ the filename, line number and line column will also be
+ displayed on top of the source line.
+
+ **bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** | **linum** }]
Dump jited image (host machine code) of the program.
If *FILE* is specified image will be written to a file,
otherwise it will be disassembled and printed to stdout.
**opcodes** controls if raw opcodes will be printed.
+ If the prog has line_info available, the source line will
+ be displayed by default. If **linum** is specified,
+ the filename, line number and line column will also be
+ displayed on top of the source line.
+
**bpftool prog pin** *PROG* *FILE*
Pin program *PROG* as *FILE*.
@@ -79,8 +93,11 @@ DESCRIPTION
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
- **bpftool prog load** *OBJ* *FILE* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*]
- Load bpf program from binary *OBJ* and pin as *FILE*.
+ **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*]
+ Load bpf program(s) from binary *OBJ* and pin as *PATH*.
+ **bpftool prog load** pins only the first program from the
+ *OBJ* as *PATH*. **bpftool prog loadall** pins all programs
+ from the *OBJ* under *PATH* directory.
**type** is optional, if not specified program type will be
inferred from section names.
By default bpftool will create new maps as declared in the ELF
@@ -92,18 +109,32 @@ DESCRIPTION
use, referring to it by **id** or through a **pinned** file.
If **dev** *NAME* is specified program will be loaded onto
given networking device (offload).
+ Optional **pinmaps** argument can be provided to pin all
+ maps under *MAP_DIR* directory.
- Note: *FILE* must be located in *bpffs* mount. It must not
+ Note: *PATH* must be located in *bpffs* mount. It must not
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
- **bpftool prog attach** *PROG* *ATTACH_TYPE* *MAP*
- Attach bpf program *PROG* (with type specified by *ATTACH_TYPE*)
- to the map *MAP*.
-
- **bpftool prog detach** *PROG* *ATTACH_TYPE* *MAP*
- Detach bpf program *PROG* (with type specified by *ATTACH_TYPE*)
- from the map *MAP*.
+ **bpftool prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
+ Attach bpf program *PROG* (with type specified by
+ *ATTACH_TYPE*). Most *ATTACH_TYPEs* require a *MAP*
+ parameter, with the exception of *flow_dissector* which is
+ attached to current networking name space.
+
+ **bpftool prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
+ Detach bpf program *PROG* (with type specified by
+ *ATTACH_TYPE*). Most *ATTACH_TYPEs* require a *MAP*
+ parameter, with the exception of *flow_dissector* which is
+ detached from the current networking name space.
+
+ **bpftool prog tracelog**
+ Dump the trace pipe of the system to the console (stdout).
+ Hit <Ctrl+C> to stop printing. BPF programs can write to this
+ trace pipe at runtime with the **bpf_trace_printk()** helper.
+ This should be used only for debugging purposes. For
+ streaming data from BPF programs to user space, one can use
+ perf events (see also **bpftool-map**\ (8)).
**bpftool prog help**
Print short help message.
@@ -124,86 +155,108 @@ OPTIONS
Generate human-readable JSON output. Implies **-j**.
-f, --bpffs
- Show file names of pinned programs.
+ When showing BPF programs, show file names of pinned
+ programs.
+
+ -m, --mapcompat
+ Allow loading maps with unknown map definitions.
+
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
EXAMPLES
========
**# bpftool prog show**
+
::
- 10: xdp name some_prog tag 005a3d2123620c8b gpl
- loaded_at Sep 29/20:11 uid 0
- xlated 528B jited 370B memlock 4096B map_ids 10
+ 10: xdp name some_prog tag 005a3d2123620c8b gpl
+ loaded_at 2017-09-29T20:11:00+0000 uid 0
+ xlated 528B jited 370B memlock 4096B map_ids 10
**# bpftool --json --pretty prog show**
::
- {
- "programs": [{
- "id": 10,
- "type": "xdp",
- "tag": "005a3d2123620c8b",
- "gpl_compatible": true,
- "loaded_at": "Sep 29/20:11",
- "uid": 0,
- "bytes_xlated": 528,
- "jited": true,
- "bytes_jited": 370,
- "bytes_memlock": 4096,
- "map_ids": [10
- ]
- }
- ]
- }
+ [{
+ "id": 10,
+ "type": "xdp",
+ "tag": "005a3d2123620c8b",
+ "gpl_compatible": true,
+ "loaded_at": 1506715860,
+ "uid": 0,
+ "bytes_xlated": 528,
+ "jited": true,
+ "bytes_jited": 370,
+ "bytes_memlock": 4096,
+ "map_ids": [10
+ ]
+ }
+ ]
|
| **# bpftool prog dump xlated id 10 file /tmp/t**
| **# ls -l /tmp/t**
-| -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
-**# bpftool prog dum jited tag 005a3d2123620c8b**
+::
+
+ -rw------- 1 root root 560 Jul 22 01:42 /tmp/t
+
+**# bpftool prog dump jited tag 005a3d2123620c8b**
::
- push %rbp
- mov %rsp,%rbp
- sub $0x228,%rsp
- sub $0x28,%rbp
- mov %rbx,0x0(%rbp)
+ 0: push %rbp
+ 1: mov %rsp,%rbp
+ 2: sub $0x228,%rsp
+ 3: sub $0x28,%rbp
+ 4: mov %rbx,0x0(%rbp)
|
| **# mount -t bpf none /sys/fs/bpf/**
| **# bpftool prog pin id 10 /sys/fs/bpf/prog**
| **# bpftool prog load ./my_prog.o /sys/fs/bpf/prog2**
| **# ls -l /sys/fs/bpf/**
-| -rw------- 1 root root 0 Jul 22 01:43 prog
-| -rw------- 1 root root 0 Jul 22 01:44 prog2
-**# bpftool prog dum jited pinned /sys/fs/bpf/prog opcodes**
+::
+
+ -rw------- 1 root root 0 Jul 22 01:43 prog
+ -rw------- 1 root root 0 Jul 22 01:44 prog2
+
+**# bpftool prog dump jited pinned /sys/fs/bpf/prog opcodes**
::
- push %rbp
- 55
- mov %rsp,%rbp
- 48 89 e5
- sub $0x228,%rsp
- 48 81 ec 28 02 00 00
- sub $0x28,%rbp
- 48 83 ed 28
- mov %rbx,0x0(%rbp)
- 48 89 5d 00
+ 0: push %rbp
+ 55
+ 1: mov %rsp,%rbp
+ 48 89 e5
+ 4: sub $0x228,%rsp
+ 48 81 ec 28 02 00 00
+ b: sub $0x28,%rbp
+ 48 83 ed 28
+ f: mov %rbx,0x0(%rbp)
+ 48 89 5d 00
|
| **# bpftool prog load xdp1_kern.o /sys/fs/bpf/xdp1 type xdp map name rxcnt id 7**
| **# bpftool prog show pinned /sys/fs/bpf/xdp1**
-| 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
-| loaded_at 2018-06-25T16:17:31-0700 uid 0
-| xlated 488B jited 336B memlock 4096B map_ids 7
-| **# rm /sys/fs/bpf/xdp1**
-|
+
+::
+
+ 9: xdp name xdp_prog1 tag 539ec6ce11b52f98 gpl
+ loaded_at 2018-06-25T16:17:31-0700 uid 0
+ xlated 488B jited 336B memlock 4096B map_ids 7
+
+**# rm /sys/fs/bpf/xdp1**
SEE ALSO
========
- **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 04cd4f92ab89..e1677e81ed59 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -60,8 +60,17 @@ OPTIONS
-m, --mapcompat
Allow loading maps with unknown map definitions.
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
+
SEE ALSO
========
- **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
- **bpftool-perf**\ (8), **bpftool-net**\ (8)
+ **bpf**\ (2),
+ **bpf-helpers**\ (7),
+ **bpftool-prog**\ (8),
+ **bpftool-map**\ (8),
+ **bpftool-cgroup**\ (8),
+ **bpftool-net**\ (8),
+ **bpftool-perf**\ (8)
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index dac7eff4c7e5..492f0f24e2d3 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -35,8 +35,6 @@ $(LIBBPF)-clean:
prefix ?= /usr/local
bash_compdir ?= /usr/share/bash-completion/completions
-CC = gcc
-
CFLAGS += -O2
CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow -Wno-missing-field-initializers
CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
@@ -53,7 +51,7 @@ ifneq ($(EXTRA_LDFLAGS),)
LDFLAGS += $(EXTRA_LDFLAGS)
endif
-LIBS = -lelf -lbfd -lopcodes $(LIBBPF)
+LIBS = -lelf $(LIBBPF)
INSTALL ?= install
RM ?= rm -f
@@ -90,7 +88,16 @@ include $(wildcard $(OUTPUT)*.d)
all: $(OUTPUT)bpftool
-SRCS = $(wildcard *.c)
+BFD_SRCS = jit_disasm.c
+
+SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
+
+ifeq ($(feature-libbfd),1)
+CFLAGS += -DHAVE_LIBBFD_SUPPORT
+SRCS += $(BFD_SRCS)
+LIBS += -lbfd -lopcodes
+endif
+
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 3f78e6404589..e4e4fab1b8c7 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -1,37 +1,8 @@
# bpftool(8) bash completion -*- shell-script -*-
#
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
# Copyright (C) 2017-2018 Netronome Systems, Inc.
#
-# This software is dual licensed under the GNU General License
-# Version 2, June 1991 as shown in the file COPYING in the top-level
-# directory of this source tree or the BSD 2-Clause License provided
-# below. You have the option to license this software under the
-# complete terms of either license.
-#
-# The BSD 2-Clause License:
-#
-# Redistribution and use in source and binary forms, with or
-# without modification, are permitted provided that the following
-# conditions are met:
-#
-# 1. Redistributions of source code must retain the above
-# copyright notice, this list of conditions and the following
-# disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials
-# provided with the distribution.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
-# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
-# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-# SOFTWARE.
-#
# Author: Quentin Monnet <quentin.monnet@netronome.com>
# Takes a list of words in argument; each one of them is added to COMPREPLY if
@@ -191,7 +162,7 @@ _bpftool()
# Deal with simplest keywords
case $prev in
- help|hex|opcodes|visual)
+ help|hex|opcodes|visual|linum)
return 0
;;
tag)
@@ -243,16 +214,20 @@ _bpftool()
# Completion depends on object and command in use
case $object in
prog)
- if [[ $command != "load" ]]; then
- case $prev in
- id)
- _bpftool_get_prog_ids
- return 0
- ;;
- esac
- fi
+ # Complete id, only for subcommands that use prog (but no map) ids
+ case $command in
+ show|list|dump|pin)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ return 0
+ ;;
+ esac
+ ;;
+ esac
local PROG_TYPE='id pinned tag'
+ local MAP_TYPE='id pinned'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
@@ -274,10 +249,10 @@ _bpftool()
*)
_bpftool_once_attr 'file'
if _bpftool_search_list 'xlated'; then
- COMPREPLY+=( $( compgen -W 'opcodes visual' -- \
+ COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
"$cur" ) )
else
- COMPREPLY+=( $( compgen -W 'opcodes' -- \
+ COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
"$cur" ) )
fi
return 0
@@ -293,23 +268,45 @@ _bpftool()
return 0
;;
attach|detach)
- if [[ ${#words[@]} == 7 ]]; then
- COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
- return 0
- fi
-
- if [[ ${#words[@]} == 6 ]]; then
- COMPREPLY=( $( compgen -W "msg_verdict skb_verdict skb_parse" -- "$cur" ) )
- return 0
- fi
-
- if [[ $prev == "$command" ]]; then
- COMPREPLY=( $( compgen -W "id pinned" -- "$cur" ) )
- return 0
- fi
- return 0
+ case $cword in
+ 3)
+ COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 4)
+ case $prev in
+ id)
+ _bpftool_get_prog_ids
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ 5)
+ COMPREPLY=( $( compgen -W 'msg_verdict skb_verdict \
+ skb_parse flow_dissector' -- "$cur" ) )
+ return 0
+ ;;
+ 6)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ 7)
+ case $prev in
+ id)
+ _bpftool_get_map_ids
+ ;;
+ pinned)
+ _filedir
+ ;;
+ esac
+ return 0
+ ;;
+ esac
;;
- load)
+ load|loadall)
local obj
if [[ ${#words[@]} -lt 6 ]]; then
@@ -338,7 +335,16 @@ _bpftool()
case $prev in
type)
- COMPREPLY=( $( compgen -W "socket kprobe kretprobe classifier action tracepoint raw_tracepoint xdp perf_event cgroup/skb cgroup/sock cgroup/dev lwt_in lwt_out lwt_xmit lwt_seg6local sockops sk_skb sk_msg lirc_mode2 cgroup/bind4 cgroup/bind6 cgroup/connect4 cgroup/connect6 cgroup/sendmsg4 cgroup/sendmsg6 cgroup/post_bind4 cgroup/post_bind6" -- \
+ COMPREPLY=( $( compgen -W "socket kprobe \
+ kretprobe classifier flow_dissector \
+ action tracepoint raw_tracepoint \
+ xdp perf_event cgroup/skb cgroup/sock \
+ cgroup/dev lwt_in lwt_out lwt_xmit \
+ lwt_seg6local sockops sk_skb sk_msg \
+ lirc_mode2 cgroup/bind4 cgroup/bind6 \
+ cgroup/connect4 cgroup/connect6 \
+ cgroup/sendmsg4 cgroup/sendmsg6 \
+ cgroup/post_bind4 cgroup/post_bind6" -- \
"$cur" ) )
return 0
;;
@@ -346,7 +352,7 @@ _bpftool()
_bpftool_get_map_ids
return 0
;;
- pinned)
+ pinned|pinmaps)
_filedir
return 0
;;
@@ -358,14 +364,18 @@ _bpftool()
COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
_bpftool_once_attr 'type'
_bpftool_once_attr 'dev'
+ _bpftool_once_attr 'pinmaps'
return 0
;;
esac
;;
+ tracelog)
+ return 0
+ ;;
*)
[[ $prev == $object ]] && \
COMPREPLY=( $( compgen -W 'dump help pin attach detach load \
- show list' -- "$cur" ) )
+ show list tracelog' -- "$cur" ) )
;;
esac
;;
@@ -400,7 +410,7 @@ _bpftool()
lru_percpu_hash lpm_trie array_of_maps \
hash_of_maps devmap sockmap cpumap xskmap \
sockhash cgroup_storage reuseport_sockarray \
- percpu_cgroup_storage' -- \
+ percpu_cgroup_storage queue stack' -- \
"$cur" ) )
return 0
;;
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 55bc512a1831..3f0629edbca5 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
#include <ctype.h>
@@ -32,7 +32,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
}
static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
- const void *data)
+ __u8 bit_offset, const void *data)
{
int actual_type_id;
@@ -40,7 +40,7 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
if (actual_type_id < 0)
return actual_type_id;
- return btf_dumper_do_type(d, actual_type_id, 0, data);
+ return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
}
static void btf_dumper_enum(const void *data, json_writer_t *jw)
@@ -73,20 +73,17 @@ static int btf_dumper_array(const struct btf_dumper *d, __u32 type_id,
return ret;
}
-static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
+static void btf_dumper_bitfield(__u32 nr_bits, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
{
int left_shift_bits, right_shift_bits;
- int nr_bits = BTF_INT_BITS(int_type);
- int total_bits_offset;
int bytes_to_copy;
int bits_to_copy;
__u64 print_num;
- total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
- data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
- bit_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
+ data += BITS_ROUNDDOWN_BYTES(bit_offset);
+ bit_offset = BITS_PER_BYTE_MASKED(bit_offset);
bits_to_copy = bit_offset + nr_bits;
bytes_to_copy = BITS_ROUNDUP_BYTES(bits_to_copy);
@@ -109,6 +106,22 @@ static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
jsonw_printf(jw, "%llu", print_num);
}
+
+static void btf_dumper_int_bits(__u32 int_type, __u8 bit_offset,
+ const void *data, json_writer_t *jw,
+ bool is_plain_text)
+{
+ int nr_bits = BTF_INT_BITS(int_type);
+ int total_bits_offset;
+
+ /* bits_offset is at most 7.
+ * BTF_INT_OFFSET() cannot exceed 64 bits.
+ */
+ total_bits_offset = bit_offset + BTF_INT_OFFSET(int_type);
+ btf_dumper_bitfield(nr_bits, total_bits_offset, data, jw,
+ is_plain_text);
+}
+
static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
const void *data, json_writer_t *jw,
bool is_plain_text)
@@ -180,6 +193,7 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
const struct btf_type *t;
struct btf_member *m;
const void *data_off;
+ int kind_flag;
int ret = 0;
int i, vlen;
@@ -187,18 +201,32 @@ static int btf_dumper_struct(const struct btf_dumper *d, __u32 type_id,
if (!t)
return -EINVAL;
+ kind_flag = BTF_INFO_KFLAG(t->info);
vlen = BTF_INFO_VLEN(t->info);
jsonw_start_object(d->jw);
m = (struct btf_member *)(t + 1);
for (i = 0; i < vlen; i++) {
- data_off = data + BITS_ROUNDDOWN_BYTES(m[i].offset);
+ __u32 bit_offset = m[i].offset;
+ __u32 bitfield_size = 0;
+
+ if (kind_flag) {
+ bitfield_size = BTF_MEMBER_BITFIELD_SIZE(bit_offset);
+ bit_offset = BTF_MEMBER_BIT_OFFSET(bit_offset);
+ }
+
jsonw_name(d->jw, btf__name_by_offset(d->btf, m[i].name_off));
- ret = btf_dumper_do_type(d, m[i].type,
- BITS_PER_BYTE_MASKED(m[i].offset),
- data_off);
- if (ret)
- break;
+ if (bitfield_size) {
+ btf_dumper_bitfield(bitfield_size, bit_offset,
+ data, d->jw, d->is_plain_text);
+ } else {
+ data_off = data + BITS_ROUNDDOWN_BYTES(bit_offset);
+ ret = btf_dumper_do_type(d, m[i].type,
+ BITS_PER_BYTE_MASKED(bit_offset),
+ data_off);
+ if (ret)
+ break;
+ }
}
jsonw_end_object(d->jw);
@@ -237,7 +265,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
- return btf_dumper_modifier(d, type_id, data);
+ return btf_dumper_modifier(d, type_id, bit_offset, data);
default:
jsonw_printf(d->jw, "(unsupported-kind");
return -EINVAL;
@@ -249,3 +277,206 @@ int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
{
return btf_dumper_do_type(d, type_id, 0, data);
}
+
+#define BTF_PRINT_ARG(...) \
+ do { \
+ pos += snprintf(func_sig + pos, size - pos, \
+ __VA_ARGS__); \
+ if (pos >= size) \
+ return -1; \
+ } while (0)
+#define BTF_PRINT_TYPE(type) \
+ do { \
+ pos = __btf_dumper_type_only(btf, type, func_sig, \
+ pos, size); \
+ if (pos == -1) \
+ return -1; \
+ } while (0)
+
+static int btf_dump_func(const struct btf *btf, char *func_sig,
+ const struct btf_type *func_proto,
+ const struct btf_type *func, int pos, int size);
+
+static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
+ char *func_sig, int pos, int size)
+{
+ const struct btf_type *proto_type;
+ const struct btf_array *array;
+ const struct btf_type *t;
+
+ if (!type_id) {
+ BTF_PRINT_ARG("void ");
+ return pos;
+ }
+
+ t = btf__type_by_id(btf, type_id);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_TYPEDEF:
+ BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_STRUCT:
+ BTF_PRINT_ARG("struct %s ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_UNION:
+ BTF_PRINT_ARG("union %s ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_ENUM:
+ BTF_PRINT_ARG("enum %s ",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_ARRAY:
+ array = (struct btf_array *)(t + 1);
+ BTF_PRINT_TYPE(array->type);
+ BTF_PRINT_ARG("[%d]", array->nelems);
+ break;
+ case BTF_KIND_PTR:
+ BTF_PRINT_TYPE(t->type);
+ BTF_PRINT_ARG("* ");
+ break;
+ case BTF_KIND_FWD:
+ BTF_PRINT_ARG("%s %s ",
+ BTF_INFO_KFLAG(t->info) ? "union" : "struct",
+ btf__name_by_offset(btf, t->name_off));
+ break;
+ case BTF_KIND_VOLATILE:
+ BTF_PRINT_ARG("volatile ");
+ BTF_PRINT_TYPE(t->type);
+ break;
+ case BTF_KIND_CONST:
+ BTF_PRINT_ARG("const ");
+ BTF_PRINT_TYPE(t->type);
+ break;
+ case BTF_KIND_RESTRICT:
+ BTF_PRINT_ARG("restrict ");
+ BTF_PRINT_TYPE(t->type);
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ pos = btf_dump_func(btf, func_sig, t, NULL, pos, size);
+ if (pos == -1)
+ return -1;
+ break;
+ case BTF_KIND_FUNC:
+ proto_type = btf__type_by_id(btf, t->type);
+ pos = btf_dump_func(btf, func_sig, proto_type, t, pos, size);
+ if (pos == -1)
+ return -1;
+ break;
+ case BTF_KIND_UNKN:
+ default:
+ return -1;
+ }
+
+ return pos;
+}
+
+static int btf_dump_func(const struct btf *btf, char *func_sig,
+ const struct btf_type *func_proto,
+ const struct btf_type *func, int pos, int size)
+{
+ int i, vlen;
+
+ BTF_PRINT_TYPE(func_proto->type);
+ if (func)
+ BTF_PRINT_ARG("%s(", btf__name_by_offset(btf, func->name_off));
+ else
+ BTF_PRINT_ARG("(");
+ vlen = BTF_INFO_VLEN(func_proto->info);
+ for (i = 0; i < vlen; i++) {
+ struct btf_param *arg = &((struct btf_param *)(func_proto + 1))[i];
+
+ if (i)
+ BTF_PRINT_ARG(", ");
+ if (arg->type) {
+ BTF_PRINT_TYPE(arg->type);
+ BTF_PRINT_ARG("%s",
+ btf__name_by_offset(btf, arg->name_off));
+ } else {
+ BTF_PRINT_ARG("...");
+ }
+ }
+ BTF_PRINT_ARG(")");
+
+ return pos;
+}
+
+void btf_dumper_type_only(const struct btf *btf, __u32 type_id, char *func_sig,
+ int size)
+{
+ int err;
+
+ func_sig[0] = '\0';
+ if (!btf)
+ return;
+
+ err = __btf_dumper_type_only(btf, type_id, func_sig, 0, size);
+ if (err < 0)
+ func_sig[0] = '\0';
+}
+
+static const char *ltrim(const char *s)
+{
+ while (isspace(*s))
+ s++;
+
+ return s;
+}
+
+void btf_dump_linfo_plain(const struct btf *btf,
+ const struct bpf_line_info *linfo,
+ const char *prefix, bool linum)
+{
+ const char *line = btf__name_by_offset(btf, linfo->line_off);
+
+ if (!line)
+ return;
+ line = ltrim(line);
+
+ if (!prefix)
+ prefix = "";
+
+ if (linum) {
+ const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+
+ /* More forgiving on file because linum option is
+ * expected to provide more info than the already
+ * available src line.
+ */
+ if (!file)
+ file = "";
+
+ printf("%s%s [file:%s line_num:%u line_col:%u]\n",
+ prefix, line, file,
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col),
+ BPF_LINE_INFO_LINE_COL(linfo->line_col));
+ } else {
+ printf("%s%s\n", prefix, line);
+ }
+}
+
+void btf_dump_linfo_json(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum)
+{
+ const char *line = btf__name_by_offset(btf, linfo->line_off);
+
+ if (line)
+ jsonw_string_field(json_wtr, "src", ltrim(line));
+
+ if (linum) {
+ const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+
+ if (file)
+ jsonw_string_field(json_wtr, "file", file);
+
+ if (BPF_LINE_INFO_LINE_NUM(linfo->line_col))
+ jsonw_int_field(json_wtr, "line_num",
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col));
+
+ if (BPF_LINE_INFO_LINE_COL(linfo->line_col))
+ jsonw_int_field(json_wtr, "line_col",
+ BPF_LINE_INFO_LINE_COL(linfo->line_col));
+ }
+}
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index f30b3a4a840b..31f0db41513f 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -1,39 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/list.h>
#include <stdlib.h>
diff --git a/tools/bpf/bpftool/cfg.h b/tools/bpf/bpftool/cfg.h
index 2cc9bd990b13..e144257ea6d2 100644
--- a/tools/bpf/bpftool/cfg.h
+++ b/tools/bpf/bpftool/cfg.h
@@ -1,39 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __BPF_TOOL_CFG_H
#define __BPF_TOOL_CFG_H
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index ee7a9765c6b3..4b5c8da2a7c0 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2017 Facebook
// Author: Roman Gushchin <guro@fb.com>
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 25af85304ebe..897483457bf0 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <ctype.h>
#include <errno.h>
@@ -46,8 +16,8 @@
#include <linux/magic.h>
#include <net/if.h>
#include <sys/mount.h>
+#include <sys/resource.h>
#include <sys/stat.h>
-#include <sys/types.h>
#include <sys/vfs.h>
#include <bpf.h>
@@ -58,7 +28,7 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
-void p_err(const char *fmt, ...)
+void __printf(1, 2) p_err(const char *fmt, ...)
{
va_list ap;
@@ -76,7 +46,7 @@ void p_err(const char *fmt, ...)
va_end(ap);
}
-void p_info(const char *fmt, ...)
+void __printf(1, 2) p_info(const char *fmt, ...)
{
va_list ap;
@@ -99,7 +69,15 @@ static bool is_bpffs(char *path)
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
-static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
+void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static int
+mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
{
bool bind_done = false;
@@ -121,25 +99,40 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
bind_done = true;
}
- if (mount("bpf", target, "bpf", 0, "mode=0700")) {
- snprintf(buff, bufflen, "mount -t bpf bpf %s failed: %s",
- target, strerror(errno));
+ if (mount(type, target, type, 0, "mode=0700")) {
+ snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
+ type, type, target, strerror(errno));
return -1;
}
return 0;
}
-int open_obj_pinned(char *path)
+int mount_tracefs(const char *target)
+{
+ char err_str[ERR_MAX_LEN];
+ int err;
+
+ err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
+ if (err) {
+ err_str[ERR_MAX_LEN - 1] = '\0';
+ p_err("can't mount tracefs: %s", err_str);
+ }
+
+ return err;
+}
+
+int open_obj_pinned(char *path, bool quiet)
{
int fd;
fd = bpf_obj_get(path);
if (fd < 0) {
- p_err("bpf obj get (%s): %s", path,
- errno == EACCES && !is_bpffs(dirname(path)) ?
- "directory not in bpf file system (bpffs)" :
- strerror(errno));
+ if (!quiet)
+ p_err("bpf obj get (%s): %s", path,
+ errno == EACCES && !is_bpffs(dirname(path)) ?
+ "directory not in bpf file system (bpffs)" :
+ strerror(errno));
return -1;
}
@@ -151,7 +144,7 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
enum bpf_obj_type type;
int fd;
- fd = open_obj_pinned(path);
+ fd = open_obj_pinned(path, false);
if (fd < 0)
return -1;
@@ -169,34 +162,29 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
return fd;
}
-int do_pin_fd(int fd, const char *name)
+int mount_bpffs_for_pin(const char *name)
{
char err_str[ERR_MAX_LEN];
char *file;
char *dir;
int err = 0;
- err = bpf_obj_pin(fd, name);
- if (!err)
- goto out;
-
file = malloc(strlen(name) + 1);
strcpy(file, name);
dir = dirname(file);
- if (errno != EPERM || is_bpffs(dir)) {
- p_err("can't pin the object (%s): %s", name, strerror(errno));
+ if (is_bpffs(dir))
+ /* nothing to do if already mounted */
+ goto out_free;
+
+ if (block_mount) {
+ p_err("no BPF file system found, not mounting it due to --nomount option");
+ err = -1;
goto out_free;
}
- /* Attempt to mount bpffs, then retry pinning. */
- err = mnt_bpffs(dir, err_str, ERR_MAX_LEN);
- if (!err) {
- err = bpf_obj_pin(fd, name);
- if (err)
- p_err("can't pin the object (%s): %s", name,
- strerror(errno));
- } else {
+ err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
+ if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount BPF file system to pin the object (%s): %s",
name, err_str);
@@ -204,10 +192,20 @@ int do_pin_fd(int fd, const char *name)
out_free:
free(file);
-out:
return err;
}
+int do_pin_fd(int fd, const char *name)
+{
+ int err;
+
+ err = mount_bpffs_for_pin(name);
+ if (err)
+ return err;
+
+ return bpf_obj_pin(fd, name);
+}
+
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
{
unsigned int id;
@@ -268,7 +266,7 @@ int get_fd_type(int fd)
char buf[512];
ssize_t n;
- snprintf(path, sizeof(path), "/proc/%d/fd/%d", getpid(), fd);
+ snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
n = readlink(path, buf, sizeof(buf));
if (n < 0) {
@@ -296,7 +294,7 @@ char *get_fdinfo(int fd, const char *key)
ssize_t n;
FILE *fdi;
- snprintf(path, sizeof(path), "/proc/%d/fdinfo/%d", getpid(), fd);
+ snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r");
if (!fdi) {
@@ -304,7 +302,7 @@ char *get_fdinfo(int fd, const char *key)
return NULL;
}
- while ((n = getline(&line, &line_n, fdi))) {
+ while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value;
int len;
@@ -384,7 +382,7 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
while ((ftse = fts_read(fts))) {
if (!(ftse->fts_info & FTS_F))
continue;
- fd = open_obj_pinned(ftse->fts_path);
+ fd = open_obj_pinned(ftse->fts_path, true);
if (fd < 0)
continue;
@@ -597,7 +595,7 @@ void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
if (!ifindex)
return;
- printf(" dev ");
+ printf(" offloaded_to ");
if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
printf("%s", name);
else
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index c75ffd9ce2bb..3ef3093560ba 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Based on:
*
@@ -19,29 +20,21 @@
#include <string.h>
#include <bfd.h>
#include <dis-asm.h>
-#include <sys/types.h>
#include <sys/stat.h>
#include <limits.h>
+#include <libbpf.h>
#include "json_writer.h"
#include "main.h"
static void get_exec_path(char *tpath, size_t size)
{
+ const char *path = "/proc/self/exe";
ssize_t len;
- char *path;
-
- snprintf(tpath, size, "/proc/%d/exe", (int) getpid());
- tpath[size - 1] = 0;
-
- path = strdup(tpath);
- assert(path);
len = readlink(path, tpath, size - 1);
assert(len > 0);
tpath[len] = 0;
-
- free(path);
}
static int oper_count;
@@ -77,10 +70,16 @@ static int fprintf_json(void *out, const char *fmt, ...)
}
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options)
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
{
+ const struct bpf_line_info *linfo = NULL;
disassembler_ftype disassemble;
struct disassemble_info info;
+ unsigned int nr_skip = 0;
int count, i, pc = 0;
char tpath[PATH_MAX];
bfd *bfdf;
@@ -109,7 +108,7 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (inf) {
bfdf->arch_info = inf;
} else {
- p_err("No libfd support for %s", arch);
+ p_err("No libbfd support for %s", arch);
return;
}
}
@@ -136,12 +135,26 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (json_output)
jsonw_start_array(json_wtr);
do {
+ if (prog_linfo) {
+ linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo,
+ func_ksym + pc,
+ func_idx,
+ nr_skip);
+ if (linfo)
+ nr_skip++;
+ }
+
if (json_output) {
jsonw_start_object(json_wtr);
oper_count = 0;
+ if (linfo)
+ btf_dump_linfo_json(btf, linfo, linum);
jsonw_name(json_wtr, "pc");
jsonw_printf(json_wtr, "\"0x%x\"", pc);
} else {
+ if (linfo)
+ btf_dump_linfo_plain(btf, linfo, "; ",
+ linum);
printf("%4x:\t", pc);
}
@@ -183,3 +196,9 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
bfd_close(bfdf);
}
+
+int disasm_init(void)
+{
+ bfd_init();
+ return 0;
+}
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index c6eef76322ae..bff7ee026680 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Simple streaming JSON writer
*
@@ -19,6 +20,7 @@
#include <malloc.h>
#include <inttypes.h>
#include <stdint.h>
+#include <linux/compiler.h>
#include "json_writer.h"
@@ -156,7 +158,8 @@ void jsonw_name(json_writer_t *self, const char *name)
putc(' ', self->out);
}
-void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
+void __printf(2, 0)
+jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
{
jsonw_eor(self);
putc('"', self->out);
@@ -164,7 +167,7 @@ void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap)
putc('"', self->out);
}
-void jsonw_printf(json_writer_t *self, const char *fmt, ...)
+void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...)
{
va_list ap;
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index 0fa2fb1b6351..c1ab51aed99c 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/*
* Simple streaming JSON writer
*
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 75a3296dc0bc..f44a1c2c4ea0 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -1,37 +1,6 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <bfd.h>
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
@@ -55,6 +24,7 @@ json_writer_t *json_wtr;
bool pretty_output;
bool json_output;
bool show_pinned;
+bool block_mount;
int bpf_flags;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@@ -344,6 +314,7 @@ int main(int argc, char **argv)
{ "version", no_argument, NULL, 'V' },
{ "bpffs", no_argument, NULL, 'f' },
{ "mapcompat", no_argument, NULL, 'm' },
+ { "nomount", no_argument, NULL, 'n' },
{ 0 }
};
int opt, ret;
@@ -352,13 +323,14 @@ int main(int argc, char **argv)
pretty_output = false;
json_output = false;
show_pinned = false;
+ block_mount = false;
bin_name = argv[0];
hash_init(prog_table.table);
hash_init(map_table.table);
opterr = 0;
- while ((opt = getopt_long(argc, argv, "Vhpjfm",
+ while ((opt = getopt_long(argc, argv, "Vhpjfmn",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@@ -385,6 +357,9 @@ int main(int argc, char **argv)
case 'm':
bpf_flags = MAPS_RELAX_COMPAT;
break;
+ case 'n':
+ block_mount = true;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
@@ -399,8 +374,6 @@ int main(int argc, char **argv)
if (argc < 0)
usage();
- bfd_init();
-
ret = cmd_select(cmds, argc, argv, do_help);
if (json_output)
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 28322ace2856..052c91d4dc55 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef __BPF_TOOL_H
#define __BPF_TOOL_H
@@ -74,10 +44,37 @@
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG }"
#define HELP_SPEC_OPTIONS \
- "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} | {-m|--mapcompat}"
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} |\n" \
+ "\t {-m|--mapcompat} | {-n|--nomount} }"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE }"
+static const char * const prog_type_name[] = {
+ [BPF_PROG_TYPE_UNSPEC] = "unspec",
+ [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
+ [BPF_PROG_TYPE_KPROBE] = "kprobe",
+ [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
+ [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
+ [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
+ [BPF_PROG_TYPE_XDP] = "xdp",
+ [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
+ [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
+ [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
+ [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
+ [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
+ [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
+ [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
+ [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
+ [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
+ [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
+ [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
+ [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
+ [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
+ [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
+ [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
+ [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
+};
+
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
@@ -89,6 +86,7 @@ extern const char *bin_name;
extern json_writer_t *json_wtr;
extern bool json_output;
extern bool show_pinned;
+extern bool block_mount;
extern int bpf_flags;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;
@@ -100,6 +98,10 @@ bool is_prefix(const char *pfx, const char *str);
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
void usage(void) __noreturn;
+void set_max_rlimit(void);
+
+int mount_tracefs(const char *target);
+
struct pinned_obj_table {
DECLARE_HASHTABLE(table, 16);
};
@@ -110,6 +112,9 @@ struct pinned_obj {
struct hlist_node hash;
};
+struct btf;
+struct bpf_line_info;
+
int build_pinned_obj_table(struct pinned_obj_table *table,
enum bpf_obj_type type);
void delete_pinned_obj_table(struct pinned_obj_table *tab);
@@ -127,8 +132,9 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
int get_fd_type(int fd);
const char *get_fd_type_name(enum bpf_obj_type type);
char *get_fdinfo(int fd, const char *key);
-int open_obj_pinned(char *path);
+int open_obj_pinned(char *path, bool quiet);
int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
+int mount_bpffs_for_pin(const char *name);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
int do_pin_fd(int fd, const char *name);
@@ -138,14 +144,38 @@ int do_event_pipe(int argc, char **argv);
int do_cgroup(int argc, char **arg);
int do_perf(int argc, char **arg);
int do_net(int argc, char **arg);
+int do_tracelog(int argc, char **arg);
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
int map_parse_fd(int *argc, char ***argv);
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
+struct bpf_prog_linfo;
+#ifdef HAVE_LIBBFD_SUPPORT
+void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum);
+int disasm_init(void);
+#else
+static inline
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options);
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
+{
+}
+static inline int disasm_init(void)
+{
+ p_err("No libbfd support");
+ return -1;
+}
+#endif
void print_data_json(uint8_t *data, size_t len);
void print_hex_data_json(uint8_t *data, size_t len);
@@ -170,6 +200,14 @@ struct btf_dumper {
*/
int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
const void *data);
+void btf_dumper_type_only(const struct btf *btf, __u32 func_type_id,
+ char *func_only, int size);
+
+void btf_dump_linfo_plain(const struct btf *btf,
+ const struct bpf_line_info *linfo,
+ const char *prefix, bool linum);
+void btf_dump_linfo_json(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum);
struct nlattr;
struct ifinfomsg;
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 7bf38f0e152e..2037e3dc864b 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <assert.h>
#include <errno.h>
@@ -52,28 +22,30 @@
#include "main.h"
static const char * const map_type_name[] = {
- [BPF_MAP_TYPE_UNSPEC] = "unspec",
- [BPF_MAP_TYPE_HASH] = "hash",
- [BPF_MAP_TYPE_ARRAY] = "array",
- [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
- [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
- [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
- [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
- [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
- [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
- [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
- [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
- [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
- [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
- [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
- [BPF_MAP_TYPE_DEVMAP] = "devmap",
- [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
- [BPF_MAP_TYPE_CPUMAP] = "cpumap",
- [BPF_MAP_TYPE_XSKMAP] = "xskmap",
- [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
- [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
- [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
+ [BPF_MAP_TYPE_UNSPEC] = "unspec",
+ [BPF_MAP_TYPE_HASH] = "hash",
+ [BPF_MAP_TYPE_ARRAY] = "array",
+ [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
+ [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
+ [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
+ [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
+ [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
+ [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
+ [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
+ [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
+ [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
+ [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
+ [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
+ [BPF_MAP_TYPE_DEVMAP] = "devmap",
+ [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
+ [BPF_MAP_TYPE_CPUMAP] = "cpumap",
+ [BPF_MAP_TYPE_XSKMAP] = "xskmap",
+ [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
+ [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
+ [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
[BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
+ [BPF_MAP_TYPE_QUEUE] = "queue",
+ [BPF_MAP_TYPE_STACK] = "stack",
};
static bool map_is_per_cpu(__u32 type)
@@ -215,70 +187,6 @@ err_end_obj:
return ret;
}
-static int get_btf(struct bpf_map_info *map_info, struct btf **btf)
-{
- struct bpf_btf_info btf_info = { 0 };
- __u32 len = sizeof(btf_info);
- __u32 last_size;
- int btf_fd;
- void *ptr;
- int err;
-
- err = 0;
- *btf = NULL;
- btf_fd = bpf_btf_get_fd_by_id(map_info->btf_id);
- if (btf_fd < 0)
- return 0;
-
- /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
- * let's start with a sane default - 4KiB here - and resize it only if
- * bpf_obj_get_info_by_fd() needs a bigger buffer.
- */
- btf_info.btf_size = 4096;
- last_size = btf_info.btf_size;
- ptr = malloc(last_size);
- if (!ptr) {
- err = -ENOMEM;
- goto exit_free;
- }
-
- bzero(ptr, last_size);
- btf_info.btf = ptr_to_u64(ptr);
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
-
- if (!err && btf_info.btf_size > last_size) {
- void *temp_ptr;
-
- last_size = btf_info.btf_size;
- temp_ptr = realloc(ptr, last_size);
- if (!temp_ptr) {
- err = -ENOMEM;
- goto exit_free;
- }
- ptr = temp_ptr;
- bzero(ptr, last_size);
- btf_info.btf = ptr_to_u64(ptr);
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
- }
-
- if (err || btf_info.btf_size > last_size) {
- err = errno;
- goto exit_free;
- }
-
- *btf = btf__new((__u8 *)btf_info.btf, btf_info.btf_size, NULL);
- if (IS_ERR(*btf)) {
- err = PTR_ERR(*btf);
- *btf = NULL;
- }
-
-exit_free:
- close(btf_fd);
- free(ptr);
-
- return err;
-}
-
static json_writer_t *get_btf_writer(void)
{
json_writer_t *jw = jsonw_new(stdout);
@@ -383,7 +291,10 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
printf(single_line ? " " : "\n");
printf("value:%c", break_names ? '\n' : ' ');
- fprint_hex(stdout, value, info->value_size, " ");
+ if (value)
+ fprint_hex(stdout, value, info->value_size, " ");
+ else
+ printf("<no entry>");
printf("\n");
} else {
@@ -398,8 +309,11 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
for (i = 0; i < n; i++) {
printf("value (CPU %02d):%c",
i, info->value_size > 16 ? '\n' : ' ');
- fprint_hex(stdout, value + i * step,
- info->value_size, " ");
+ if (value)
+ fprint_hex(stdout, value + i * step,
+ info->value_size, " ");
+ else
+ printf("<no entry>");
printf("\n");
}
}
@@ -543,7 +457,6 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
char *memlock;
memlock = get_fdinfo(fd, "memlock");
- close(fd);
jsonw_start_object(json_wtr);
@@ -570,6 +483,30 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
free(memlock);
+ if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
+ char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
+ char *owner_jited = get_fdinfo(fd, "owner_jited");
+
+ if (owner_prog_type) {
+ unsigned int prog_type = atoi(owner_prog_type);
+
+ if (prog_type < ARRAY_SIZE(prog_type_name))
+ jsonw_string_field(json_wtr, "owner_prog_type",
+ prog_type_name[prog_type]);
+ else
+ jsonw_uint_field(json_wtr, "owner_prog_type",
+ prog_type);
+ }
+ if (atoi(owner_jited))
+ jsonw_bool_field(json_wtr, "owner_jited", true);
+ else
+ jsonw_bool_field(json_wtr, "owner_jited", false);
+
+ free(owner_prog_type);
+ free(owner_jited);
+ }
+ close(fd);
+
if (!hash_empty(map_table.table)) {
struct pinned_obj *obj;
@@ -592,7 +529,6 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
char *memlock;
memlock = get_fdinfo(fd, "memlock");
- close(fd);
printf("%u: ", info->id);
if (info->type < ARRAY_SIZE(map_type_name))
@@ -613,6 +549,30 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
printf(" memlock %sB", memlock);
free(memlock);
+ if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
+ char *owner_prog_type = get_fdinfo(fd, "owner_prog_type");
+ char *owner_jited = get_fdinfo(fd, "owner_jited");
+
+ printf("\n\t");
+ if (owner_prog_type) {
+ unsigned int prog_type = atoi(owner_prog_type);
+
+ if (prog_type < ARRAY_SIZE(prog_type_name))
+ printf("owner_prog_type %s ",
+ prog_type_name[prog_type]);
+ else
+ printf("owner_prog_type %d ", prog_type);
+ }
+ if (atoi(owner_jited))
+ printf("owner jited");
+ else
+ printf("owner not jited");
+
+ free(owner_prog_type);
+ free(owner_jited);
+ }
+ close(fd);
+
printf("\n");
if (!hash_empty(map_table.table)) {
struct pinned_obj *obj;
@@ -731,7 +691,11 @@ static int dump_map_elem(int fd, void *key, void *value,
jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
jsonw_end_object(json_wtr);
} else {
- print_entry_error(map_info, key, strerror(lookup_errno));
+ if (errno == ENOENT)
+ print_entry_plain(map_info, key, NULL);
+ else
+ print_entry_error(map_info, key,
+ strerror(lookup_errno));
}
return 0;
@@ -765,7 +729,7 @@ static int do_dump(int argc, char **argv)
prev_key = NULL;
- err = get_btf(&info, &btf);
+ err = btf__get_from_id(info.btf_id, &btf);
if (err) {
p_err("failed to get btf");
goto exit_free;
@@ -909,7 +873,7 @@ static int do_lookup(int argc, char **argv)
}
/* here means bpf_map_lookup_elem() succeeded */
- err = get_btf(&info, &btf);
+ err = btf__get_from_id(info.btf_id, &btf);
if (err) {
p_err("failed to get btf");
goto exit_free;
@@ -1140,6 +1104,8 @@ static int do_create(int argc, char **argv)
return -1;
}
+ set_max_rlimit();
+
fd = bpf_create_map_xattr(&attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index bdaf4062e26e..0507dfaf7a8f 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
/* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index d441bb7035ca..db0e7de49d49 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
#define _GNU_SOURCE
diff --git a/tools/bpf/bpftool/netlink_dumper.c b/tools/bpf/bpftool/netlink_dumper.c
index 4e9f4531269f..550a0f537eed 100644
--- a/tools/bpf/bpftool/netlink_dumper.c
+++ b/tools/bpf/bpftool/netlink_dumper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
#include <stdlib.h>
diff --git a/tools/bpf/bpftool/netlink_dumper.h b/tools/bpf/bpftool/netlink_dumper.h
index e3516b586a34..774af6c62ef5 100644
--- a/tools/bpf/bpftool/netlink_dumper.h
+++ b/tools/bpf/bpftool/netlink_dumper.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
// Copyright (C) 2018 Facebook
#ifndef _NETLINK_DUMPER_H_
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index b76b77dcfd1f..f2a545e667c4 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
// Author: Yonghong Song <yhs@fb.com>
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 5302ee282409..2d1bb7d6ff51 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1,35 +1,5 @@
-/*
- * Copyright (C) 2017-2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#define _GNU_SOURCE
#include <errno.h>
@@ -47,44 +17,22 @@
#include <linux/err.h>
#include <bpf.h>
+#include <btf.h>
#include <libbpf.h>
#include "cfg.h"
#include "main.h"
#include "xlated_dumper.h"
-static const char * const prog_type_name[] = {
- [BPF_PROG_TYPE_UNSPEC] = "unspec",
- [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
- [BPF_PROG_TYPE_KPROBE] = "kprobe",
- [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
- [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
- [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
- [BPF_PROG_TYPE_XDP] = "xdp",
- [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
- [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
- [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
- [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
- [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
- [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
- [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
- [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
- [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
- [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
- [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
- [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
- [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
- [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
-};
-
static const char * const attach_type_strings[] = {
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
[BPF_SK_MSG_VERDICT] = "msg_verdict",
+ [BPF_FLOW_DISSECTOR] = "flow_dissector",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
-enum bpf_attach_type parse_attach_type(const char *str)
+static enum bpf_attach_type parse_attach_type(const char *str)
{
enum bpf_attach_type type;
@@ -357,10 +305,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
if (!hash_empty(prog_table.table)) {
struct pinned_obj *obj;
- printf("\n");
hash_for_each_possible(prog_table.table, obj, hash, info->id) {
if (obj->id == info->id)
- printf("\tpinned %s\n", obj->path);
+ printf("\n\tpinned %s", obj->path);
}
}
@@ -446,6 +393,10 @@ static int do_show(int argc, char **argv)
static int do_dump(int argc, char **argv)
{
+ unsigned int finfo_rec_size, linfo_rec_size, jited_linfo_rec_size;
+ void *func_info = NULL, *linfo = NULL, *jited_linfo = NULL;
+ unsigned int nr_finfo, nr_linfo = 0, nr_jited_linfo = 0;
+ struct bpf_prog_linfo *prog_linfo = NULL;
unsigned long *func_ksyms = NULL;
struct bpf_prog_info info = {};
unsigned int *func_lens = NULL;
@@ -454,11 +405,14 @@ static int do_dump(int argc, char **argv)
unsigned int nr_func_lens;
struct dump_data dd = {};
__u32 len = sizeof(info);
+ struct btf *btf = NULL;
unsigned int buf_size;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
+ char func_sig[1024];
unsigned char *buf;
+ bool linum = false;
__u32 *member_len;
__u64 *member_ptr;
ssize_t n;
@@ -466,6 +420,9 @@ static int do_dump(int argc, char **argv)
int fd;
if (is_prefix(*argv, "jited")) {
+ if (disasm_init())
+ return -1;
+
member_len = &info.jited_prog_len;
member_ptr = &info.jited_prog_insns;
} else if (is_prefix(*argv, "xlated")) {
@@ -499,6 +456,9 @@ static int do_dump(int argc, char **argv)
} else if (is_prefix(*argv, "visual")) {
visual = true;
NEXT_ARG();
+ } else if (is_prefix(*argv, "linum")) {
+ linum = true;
+ NEXT_ARG();
}
if (argc) {
@@ -547,6 +507,43 @@ static int do_dump(int argc, char **argv)
}
}
+ nr_finfo = info.nr_func_info;
+ finfo_rec_size = info.func_info_rec_size;
+ if (nr_finfo && finfo_rec_size) {
+ func_info = malloc(nr_finfo * finfo_rec_size);
+ if (!func_info) {
+ p_err("mem alloc failed");
+ close(fd);
+ goto err_free;
+ }
+ }
+
+ linfo_rec_size = info.line_info_rec_size;
+ if (info.nr_line_info && linfo_rec_size && info.btf_id) {
+ nr_linfo = info.nr_line_info;
+ linfo = malloc(nr_linfo * linfo_rec_size);
+ if (!linfo) {
+ p_err("mem alloc failed");
+ close(fd);
+ goto err_free;
+ }
+ }
+
+ jited_linfo_rec_size = info.jited_line_info_rec_size;
+ if (info.nr_jited_line_info &&
+ jited_linfo_rec_size &&
+ info.nr_jited_ksyms &&
+ info.nr_jited_func_lens &&
+ info.btf_id) {
+ nr_jited_linfo = info.nr_jited_line_info;
+ jited_linfo = malloc(nr_jited_linfo * jited_linfo_rec_size);
+ if (!jited_linfo) {
+ p_err("mem alloc failed");
+ close(fd);
+ goto err_free;
+ }
+ }
+
memset(&info, 0, sizeof(info));
*member_ptr = ptr_to_u64(buf);
@@ -555,6 +552,15 @@ static int do_dump(int argc, char **argv)
info.nr_jited_ksyms = nr_func_ksyms;
info.jited_func_lens = ptr_to_u64(func_lens);
info.nr_jited_func_lens = nr_func_lens;
+ info.nr_func_info = nr_finfo;
+ info.func_info_rec_size = finfo_rec_size;
+ info.func_info = ptr_to_u64(func_info);
+ info.nr_line_info = nr_linfo;
+ info.line_info_rec_size = linfo_rec_size;
+ info.line_info = ptr_to_u64(linfo);
+ info.nr_jited_line_info = nr_jited_linfo;
+ info.jited_line_info_rec_size = jited_linfo_rec_size;
+ info.jited_line_info = ptr_to_u64(jited_linfo);
err = bpf_obj_get_info_by_fd(fd, &info, &len);
close(fd);
@@ -578,6 +584,42 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
+ if (info.nr_func_info != nr_finfo) {
+ p_err("incorrect nr_func_info %d vs. expected %d",
+ info.nr_func_info, nr_finfo);
+ goto err_free;
+ }
+
+ if (info.func_info_rec_size != finfo_rec_size) {
+ p_err("incorrect func_info_rec_size %d vs. expected %d",
+ info.func_info_rec_size, finfo_rec_size);
+ goto err_free;
+ }
+
+ if (linfo && info.nr_line_info != nr_linfo) {
+ p_err("incorrect nr_line_info %u vs. expected %u",
+ info.nr_line_info, nr_linfo);
+ goto err_free;
+ }
+
+ if (info.line_info_rec_size != linfo_rec_size) {
+ p_err("incorrect line_info_rec_size %u vs. expected %u",
+ info.line_info_rec_size, linfo_rec_size);
+ goto err_free;
+ }
+
+ if (jited_linfo && info.nr_jited_line_info != nr_jited_linfo) {
+ p_err("incorrect nr_jited_line_info %u vs. expected %u",
+ info.nr_jited_line_info, nr_jited_linfo);
+ goto err_free;
+ }
+
+ if (info.jited_line_info_rec_size != jited_linfo_rec_size) {
+ p_err("incorrect jited_line_info_rec_size %u vs. expected %u",
+ info.jited_line_info_rec_size, jited_linfo_rec_size);
+ goto err_free;
+ }
+
if ((member_len == &info.jited_prog_len &&
info.jited_prog_insns == 0) ||
(member_len == &info.xlated_prog_len &&
@@ -586,6 +628,17 @@ static int do_dump(int argc, char **argv)
goto err_free;
}
+ if (info.btf_id && btf__get_from_id(info.btf_id, &btf)) {
+ p_err("failed to get btf");
+ goto err_free;
+ }
+
+ if (nr_linfo) {
+ prog_linfo = bpf_prog_linfo__new(&info);
+ if (!prog_linfo)
+ p_info("error in processing bpf_line_info. continue without it.");
+ }
+
if (filepath) {
fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
if (fd < 0) {
@@ -618,6 +671,7 @@ static int do_dump(int argc, char **argv)
if (info.nr_jited_func_lens && info.jited_func_lens) {
struct kernel_sym *sym = NULL;
+ struct bpf_func_info *record;
char sym_name[SYM_MAX_NAME];
unsigned char *img = buf;
__u64 *ksyms = NULL;
@@ -644,17 +698,33 @@ static int do_dump(int argc, char **argv)
strcpy(sym_name, "unknown");
}
+ if (func_info) {
+ record = func_info + i * finfo_rec_size;
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ }
+
if (json_output) {
jsonw_start_object(json_wtr);
+ if (func_info && func_sig[0] != '\0') {
+ jsonw_name(json_wtr, "proto");
+ jsonw_string(json_wtr, func_sig);
+ }
jsonw_name(json_wtr, "name");
jsonw_string(json_wtr, sym_name);
jsonw_name(json_wtr, "insns");
} else {
+ if (func_info && func_sig[0] != '\0')
+ printf("%s:\n", func_sig);
printf("%s:\n", sym_name);
}
- disasm_print_insn(img, lens[i], opcodes, name,
- disasm_opt);
+ disasm_print_insn(img, lens[i], opcodes,
+ name, disasm_opt, btf,
+ prog_linfo, ksyms[i], i,
+ linum);
+
img += lens[i];
if (json_output)
@@ -667,7 +737,7 @@ static int do_dump(int argc, char **argv)
jsonw_end_array(json_wtr);
} else {
disasm_print_insn(buf, *member_len, opcodes, name,
- disasm_opt);
+ disasm_opt, btf, NULL, 0, 0, false);
}
} else if (visual) {
if (json_output)
@@ -678,23 +748,37 @@ static int do_dump(int argc, char **argv)
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info.nr_jited_ksyms;
dd.jited_ksyms = (__u64 *) info.jited_ksyms;
+ dd.btf = btf;
+ dd.func_info = func_info;
+ dd.finfo_rec_size = finfo_rec_size;
+ dd.prog_linfo = prog_linfo;
if (json_output)
- dump_xlated_json(&dd, buf, *member_len, opcodes);
+ dump_xlated_json(&dd, buf, *member_len, opcodes,
+ linum);
else
- dump_xlated_plain(&dd, buf, *member_len, opcodes);
+ dump_xlated_plain(&dd, buf, *member_len, opcodes,
+ linum);
kernel_syms_destroy(&dd);
}
free(buf);
free(func_ksyms);
free(func_lens);
+ free(func_info);
+ free(linfo);
+ free(jited_linfo);
+ bpf_prog_linfo__free(prog_linfo);
return 0;
err_free:
free(buf);
free(func_ksyms);
free(func_lens);
+ free(func_info);
+ free(linfo);
+ free(jited_linfo);
+ bpf_prog_linfo__free(prog_linfo);
return -1;
}
@@ -714,37 +798,56 @@ struct map_replace {
char *name;
};
-int map_replace_compar(const void *p1, const void *p2)
+static int map_replace_compar(const void *p1, const void *p2)
{
const struct map_replace *a = p1, *b = p2;
return a->idx - b->idx;
}
-static int do_attach(int argc, char **argv)
+static int parse_attach_detach_args(int argc, char **argv, int *progfd,
+ enum bpf_attach_type *attach_type,
+ int *mapfd)
{
- enum bpf_attach_type attach_type;
- int err, mapfd, progfd;
-
- if (!REQ_ARGS(5)) {
- p_err("too few parameters for map attach");
+ if (!REQ_ARGS(3))
return -EINVAL;
- }
- progfd = prog_parse_fd(&argc, &argv);
- if (progfd < 0)
- return progfd;
+ *progfd = prog_parse_fd(&argc, &argv);
+ if (*progfd < 0)
+ return *progfd;
- attach_type = parse_attach_type(*argv);
- if (attach_type == __MAX_BPF_ATTACH_TYPE) {
- p_err("invalid attach type");
+ *attach_type = parse_attach_type(*argv);
+ if (*attach_type == __MAX_BPF_ATTACH_TYPE) {
+ p_err("invalid attach/detach type");
return -EINVAL;
}
+
+ if (*attach_type == BPF_FLOW_DISSECTOR) {
+ *mapfd = -1;
+ return 0;
+ }
+
NEXT_ARG();
+ if (!REQ_ARGS(2))
+ return -EINVAL;
+
+ *mapfd = map_parse_fd(&argc, &argv);
+ if (*mapfd < 0)
+ return *mapfd;
+
+ return 0;
+}
- mapfd = map_parse_fd(&argc, &argv);
- if (mapfd < 0)
- return mapfd;
+static int do_attach(int argc, char **argv)
+{
+ enum bpf_attach_type attach_type;
+ int err, progfd;
+ int mapfd;
+
+ err = parse_attach_detach_args(argc, argv,
+ &progfd, &attach_type, &mapfd);
+ if (err)
+ return err;
err = bpf_prog_attach(progfd, mapfd, attach_type, 0);
if (err) {
@@ -760,27 +863,13 @@ static int do_attach(int argc, char **argv)
static int do_detach(int argc, char **argv)
{
enum bpf_attach_type attach_type;
- int err, mapfd, progfd;
-
- if (!REQ_ARGS(5)) {
- p_err("too few parameters for map detach");
- return -EINVAL;
- }
+ int err, progfd;
+ int mapfd;
- progfd = prog_parse_fd(&argc, &argv);
- if (progfd < 0)
- return progfd;
-
- attach_type = parse_attach_type(*argv);
- if (attach_type == __MAX_BPF_ATTACH_TYPE) {
- p_err("invalid attach type");
- return -EINVAL;
- }
- NEXT_ARG();
-
- mapfd = map_parse_fd(&argc, &argv);
- if (mapfd < 0)
- return mapfd;
+ err = parse_attach_detach_args(argc, argv,
+ &progfd, &attach_type, &mapfd);
+ if (err)
+ return err;
err = bpf_prog_detach2(progfd, mapfd, attach_type);
if (err) {
@@ -792,15 +881,17 @@ static int do_detach(int argc, char **argv)
jsonw_null(json_wtr);
return 0;
}
-static int do_load(int argc, char **argv)
+
+static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_attach_type expected_attach_type;
struct bpf_object_open_attr attr = {
.prog_type = BPF_PROG_TYPE_UNSPEC,
};
struct map_replace *map_replace = NULL;
+ struct bpf_program *prog = NULL, *pos;
unsigned int old_map_fds = 0;
- struct bpf_program *prog;
+ const char *pinmaps = NULL;
struct bpf_object *obj;
struct bpf_map *map;
const char *pinfile;
@@ -845,6 +936,7 @@ static int do_load(int argc, char **argv)
}
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
+ void *new_map_replace;
char *endptr, *name;
int fd;
@@ -878,12 +970,15 @@ static int do_load(int argc, char **argv)
if (fd < 0)
goto err_free_reuse_maps;
- map_replace = reallocarray(map_replace, old_map_fds + 1,
- sizeof(*map_replace));
- if (!map_replace) {
+ new_map_replace = reallocarray(map_replace,
+ old_map_fds + 1,
+ sizeof(*map_replace));
+ if (!new_map_replace) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
}
+ map_replace = new_map_replace;
+
map_replace[old_map_fds].idx = idx;
map_replace[old_map_fds].name = name;
map_replace[old_map_fds].fd = fd;
@@ -905,6 +1000,13 @@ static int do_load(int argc, char **argv)
goto err_free_reuse_maps;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "pinmaps")) {
+ NEXT_ARG();
+
+ if (!REQ_ARGS(1))
+ goto err_free_reuse_maps;
+
+ pinmaps = GET_ARG();
} else {
p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
*argv);
@@ -918,26 +1020,25 @@ static int do_load(int argc, char **argv)
goto err_free_reuse_maps;
}
- prog = bpf_program__next(NULL, obj);
- if (!prog) {
- p_err("object file doesn't contain any bpf program");
- goto err_close_obj;
- }
+ bpf_object__for_each_program(pos, obj) {
+ enum bpf_prog_type prog_type = attr.prog_type;
- bpf_program__set_ifindex(prog, ifindex);
- if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
- const char *sec_name = bpf_program__title(prog, false);
+ if (attr.prog_type == BPF_PROG_TYPE_UNSPEC) {
+ const char *sec_name = bpf_program__title(pos, false);
- err = libbpf_prog_type_by_name(sec_name, &attr.prog_type,
- &expected_attach_type);
- if (err < 0) {
- p_err("failed to guess program type based on section name %s\n",
- sec_name);
- goto err_close_obj;
+ err = libbpf_prog_type_by_name(sec_name, &prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ p_err("failed to guess program type based on section name %s\n",
+ sec_name);
+ goto err_close_obj;
+ }
}
+
+ bpf_program__set_ifindex(pos, ifindex);
+ bpf_program__set_type(pos, prog_type);
+ bpf_program__set_expected_attach_type(pos, expected_attach_type);
}
- bpf_program__set_type(prog, attr.prog_type);
- bpf_program__set_expected_attach_type(prog, expected_attach_type);
qsort(map_replace, old_map_fds, sizeof(*map_replace),
map_replace_compar);
@@ -995,15 +1096,47 @@ static int do_load(int argc, char **argv)
goto err_close_obj;
}
+ set_max_rlimit();
+
err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
}
- if (do_pin_fd(bpf_program__fd(prog), pinfile))
+ err = mount_bpffs_for_pin(pinfile);
+ if (err)
goto err_close_obj;
+ if (first_prog_only) {
+ prog = bpf_program__next(NULL, obj);
+ if (!prog) {
+ p_err("object file doesn't contain any bpf program");
+ goto err_close_obj;
+ }
+
+ err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
+ if (err) {
+ p_err("failed to pin program %s",
+ bpf_program__title(prog, false));
+ goto err_close_obj;
+ }
+ } else {
+ err = bpf_object__pin_programs(obj, pinfile);
+ if (err) {
+ p_err("failed to pin all programs");
+ goto err_close_obj;
+ }
+ }
+
+ if (pinmaps) {
+ err = bpf_object__pin_maps(obj, pinmaps);
+ if (err) {
+ p_err("failed to pin all maps");
+ goto err_unpin;
+ }
+ }
+
if (json_output)
jsonw_null(json_wtr);
@@ -1014,6 +1147,11 @@ static int do_load(int argc, char **argv)
return 0;
+err_unpin:
+ if (first_prog_only)
+ unlink(pinfile);
+ else
+ bpf_object__unpin_programs(obj, pinfile);
err_close_obj:
bpf_object__close(obj);
err_free_reuse_maps:
@@ -1023,6 +1161,16 @@ err_free_reuse_maps:
return -1;
}
+static int do_load(int argc, char **argv)
+{
+ return load_with_options(argc, argv, true);
+}
+
+static int do_loadall(int argc, char **argv)
+{
+ return load_with_options(argc, argv, false);
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -1032,13 +1180,16 @@ static int do_help(int argc, char **argv)
fprintf(stderr,
"Usage: %s %s { show | list } [PROG]\n"
- " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n"
- " %s %s dump jited PROG [{ file FILE | opcodes }]\n"
+ " %s %s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
+ " %s %s dump jited PROG [{ file FILE | opcodes | linum }]\n"
" %s %s pin PROG FILE\n"
- " %s %s load OBJ FILE [type TYPE] [dev NAME] \\\n"
- " [map { idx IDX | name NAME } MAP]\n"
- " %s %s attach PROG ATTACH_TYPE MAP\n"
- " %s %s detach PROG ATTACH_TYPE MAP\n"
+ " %s %s { load | loadall } OBJ PATH \\\n"
+ " [type TYPE] [dev NAME] \\\n"
+ " [map { idx IDX | name NAME } MAP]\\\n"
+ " [pinmaps MAP_DIR]\n"
+ " %s %s attach PROG ATTACH_TYPE [MAP]\n"
+ " %s %s detach PROG ATTACH_TYPE [MAP]\n"
+ " %s %s tracelog\n"
" %s %s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
@@ -1047,15 +1198,17 @@ static int do_help(int argc, char **argv)
" tracepoint | raw_tracepoint | xdp | perf_event | cgroup/skb |\n"
" cgroup/sock | cgroup/dev | lwt_in | lwt_out | lwt_xmit |\n"
" lwt_seg6local | sockops | sk_skb | sk_msg | lirc_mode2 |\n"
+ " sk_reuseport | flow_dissector |\n"
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
" cgroup/sendmsg4 | cgroup/sendmsg6 }\n"
- " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse }\n"
+ " ATTACH_TYPE := { msg_verdict | skb_verdict | skb_parse |\n"
+ " flow_dissector }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]);
return 0;
}
@@ -1067,8 +1220,10 @@ static const struct cmd cmds[] = {
{ "dump", do_dump },
{ "pin", do_pin },
{ "load", do_load },
+ { "loadall", do_loadall },
{ "attach", do_attach },
{ "detach", do_detach },
+ { "tracelog", do_tracelog },
{ 0 }
};
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
new file mode 100644
index 000000000000..e80a5c79b38f
--- /dev/null
+++ b/tools/bpf/bpftool/tracelog.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2015-2017 Daniel Borkmann */
+/* Copyright (c) 2018 Netronome Systems, Inc. */
+
+#include <errno.h>
+#include <limits.h>
+#include <signal.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <linux/magic.h>
+#include <sys/fcntl.h>
+#include <sys/vfs.h>
+
+#include "main.h"
+
+#ifndef TRACEFS_MAGIC
+# define TRACEFS_MAGIC 0x74726163
+#endif
+
+#define _textify(x) #x
+#define textify(x) _textify(x)
+
+FILE *trace_pipe_fd;
+char *buff;
+
+static int validate_tracefs_mnt(const char *mnt, unsigned long magic)
+{
+ struct statfs st_fs;
+
+ if (statfs(mnt, &st_fs) < 0)
+ return -ENOENT;
+ if ((unsigned long)st_fs.f_type != magic)
+ return -ENOENT;
+
+ return 0;
+}
+
+static bool
+find_tracefs_mnt_single(unsigned long magic, char *mnt, const char *mntpt)
+{
+ size_t src_len;
+
+ if (validate_tracefs_mnt(mntpt, magic))
+ return false;
+
+ src_len = strlen(mntpt);
+ if (src_len + 1 >= PATH_MAX) {
+ p_err("tracefs mount point name too long");
+ return false;
+ }
+
+ strcpy(mnt, mntpt);
+ return true;
+}
+
+static bool get_tracefs_pipe(char *mnt)
+{
+ static const char * const known_mnts[] = {
+ "/sys/kernel/debug/tracing",
+ "/sys/kernel/tracing",
+ "/tracing",
+ "/trace",
+ };
+ const char *pipe_name = "/trace_pipe";
+ const char *fstype = "tracefs";
+ char type[100], format[32];
+ const char * const *ptr;
+ bool found = false;
+ FILE *fp;
+
+ for (ptr = known_mnts; ptr < known_mnts + ARRAY_SIZE(known_mnts); ptr++)
+ if (find_tracefs_mnt_single(TRACEFS_MAGIC, mnt, *ptr))
+ goto exit_found;
+
+ fp = fopen("/proc/mounts", "r");
+ if (!fp)
+ return false;
+
+ /* Allow room for NULL terminating byte and pipe file name */
+ snprintf(format, sizeof(format), "%%*s %%%zds %%99s %%*s %%*d %%*d\\n",
+ PATH_MAX - strlen(pipe_name) - 1);
+ while (fscanf(fp, format, mnt, type) == 2)
+ if (strcmp(type, fstype) == 0) {
+ found = true;
+ break;
+ }
+ fclose(fp);
+
+ /* The string from fscanf() might be truncated, check mnt is valid */
+ if (found && validate_tracefs_mnt(mnt, TRACEFS_MAGIC))
+ goto exit_found;
+
+ if (block_mount)
+ return false;
+
+ p_info("could not find tracefs, attempting to mount it now");
+ /* Most of the time, tracefs is automatically mounted by debugfs at
+ * /sys/kernel/debug/tracing when we try to access it. If we could not
+ * find it, it is likely that debugfs is not mounted. Let's give one
+ * attempt at mounting just tracefs at /sys/kernel/tracing.
+ */
+ strcpy(mnt, known_mnts[1]);
+ if (mount_tracefs(mnt))
+ return false;
+
+exit_found:
+ strcat(mnt, pipe_name);
+ return true;
+}
+
+static void exit_tracelog(int signum)
+{
+ fclose(trace_pipe_fd);
+ free(buff);
+
+ if (json_output) {
+ jsonw_end_array(json_wtr);
+ jsonw_destroy(&json_wtr);
+ }
+
+ exit(0);
+}
+
+int do_tracelog(int argc, char **argv)
+{
+ const struct sigaction act = {
+ .sa_handler = exit_tracelog
+ };
+ char trace_pipe[PATH_MAX];
+ size_t buff_len = 0;
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+
+ if (!get_tracefs_pipe(trace_pipe))
+ return -1;
+
+ trace_pipe_fd = fopen(trace_pipe, "r");
+ if (!trace_pipe_fd) {
+ p_err("could not open trace pipe: %s", strerror(errno));
+ return -1;
+ }
+
+ sigaction(SIGHUP, &act, NULL);
+ sigaction(SIGINT, &act, NULL);
+ sigaction(SIGTERM, &act, NULL);
+ while (1) {
+ ssize_t ret;
+
+ ret = getline(&buff, &buff_len, trace_pipe_fd);
+ if (ret <= 0) {
+ p_err("failed to read content from trace pipe: %s",
+ strerror(errno));
+ break;
+ }
+ if (json_output)
+ jsonw_string(json_wtr, buff);
+ else
+ printf("%s", buff);
+ }
+
+ fclose(trace_pipe_fd);
+ free(buff);
+ return -1;
+}
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 3284759df98a..7073dbe1ff27 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -1,39 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#define _GNU_SOURCE
#include <stdarg.h>
@@ -41,6 +7,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
+#include <libbpf.h>
#include "disasm.h"
#include "json_writer.h"
@@ -114,7 +81,7 @@ struct kernel_sym *kernel_syms_search(struct dump_data *dd,
sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
}
-static void print_insn(void *private_data, const char *fmt, ...)
+static void __printf(2, 3) print_insn(void *private_data, const char *fmt, ...)
{
va_list args;
@@ -123,7 +90,7 @@ static void print_insn(void *private_data, const char *fmt, ...)
va_end(args);
}
-static void
+static void __printf(2, 3)
print_insn_for_graph(void *private_data, const char *fmt, ...)
{
char buf[64], *p;
@@ -154,7 +121,8 @@ print_insn_for_graph(void *private_data, const char *fmt, ...)
printf("%s", buf);
}
-static void print_insn_json(void *private_data, const char *fmt, ...)
+static void __printf(2, 3)
+print_insn_json(void *private_data, const char *fmt, ...)
{
unsigned int l = strlen(fmt);
char chomped_fmt[l];
@@ -234,19 +202,25 @@ static const char *print_imm(void *private_data,
}
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
- bool opcodes)
+ bool opcodes, bool linum)
{
+ const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_json,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
+ struct bpf_func_info *record;
struct bpf_insn *insn = buf;
+ struct btf *btf = dd->btf;
bool double_insn = false;
+ unsigned int nr_skip = 0;
+ char func_sig[1024];
unsigned int i;
jsonw_start_array(json_wtr);
+ record = dd->func_info;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
@@ -255,6 +229,30 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
jsonw_start_object(json_wtr);
+
+ if (btf && record) {
+ if (record->insn_off == i) {
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ if (func_sig[0] != '\0') {
+ jsonw_name(json_wtr, "proto");
+ jsonw_string(json_wtr, func_sig);
+ }
+ record = (void *)record + dd->finfo_rec_size;
+ }
+ }
+
+ if (prog_linfo) {
+ const struct bpf_line_info *linfo;
+
+ linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
+ if (linfo) {
+ btf_dump_linfo_json(btf, linfo, linum);
+ nr_skip++;
+ }
+ }
+
jsonw_name(json_wtr, "disasm");
print_bpf_insn(&cbs, insn + i, true);
@@ -289,24 +287,52 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
}
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
- bool opcodes)
+ bool opcodes, bool linum)
{
+ const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn,
.cb_call = print_call,
.cb_imm = print_imm,
.private_data = dd,
};
+ struct bpf_func_info *record;
struct bpf_insn *insn = buf;
+ struct btf *btf = dd->btf;
+ unsigned int nr_skip = 0;
bool double_insn = false;
+ char func_sig[1024];
unsigned int i;
+ record = dd->func_info;
for (i = 0; i < len / sizeof(*insn); i++) {
if (double_insn) {
double_insn = false;
continue;
}
+ if (btf && record) {
+ if (record->insn_off == i) {
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ if (func_sig[0] != '\0')
+ printf("%s:\n", func_sig);
+ record = (void *)record + dd->finfo_rec_size;
+ }
+ }
+
+ if (prog_linfo) {
+ const struct bpf_line_info *linfo;
+
+ linfo = bpf_prog_linfo__lfind(prog_linfo, i, nr_skip);
+ if (linfo) {
+ btf_dump_linfo_plain(btf, linfo, "; ",
+ linum);
+ nr_skip++;
+ }
+ }
+
double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
printf("% 4d: ", i);
diff --git a/tools/bpf/bpftool/xlated_dumper.h b/tools/bpf/bpftool/xlated_dumper.h
index 33d86e2b369b..54847e174273 100644
--- a/tools/bpf/bpftool/xlated_dumper.h
+++ b/tools/bpf/bpftool/xlated_dumper.h
@@ -1,45 +1,13 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/*
- * Copyright (C) 2018 Netronome Systems, Inc.
- *
- * This software is dual licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree or the BSD 2-Clause License provided below. You have the
- * option to license this software under the complete terms of either license.
- *
- * The BSD 2-Clause License:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * 1. Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __BPF_TOOL_XLATED_DUMPER_H
#define __BPF_TOOL_XLATED_DUMPER_H
#define SYM_MAX_NAME 256
+struct bpf_prog_linfo;
+
struct kernel_sym {
unsigned long address;
char name[SYM_MAX_NAME];
@@ -51,6 +19,10 @@ struct dump_data {
__u32 sym_count;
__u64 *jited_ksyms;
__u32 nr_jited_ksyms;
+ struct btf *btf;
+ void *func_info;
+ __u32 finfo_rec_size;
+ const struct bpf_prog_linfo *prog_linfo;
char scratch_buff[SYM_MAX_NAME + 8];
};
@@ -58,9 +30,9 @@ void kernel_syms_load(struct dump_data *dd);
void kernel_syms_destroy(struct dump_data *dd);
struct kernel_sym *kernel_syms_search(struct dump_data *dd, unsigned long key);
void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
- bool opcodes);
+ bool opcodes, bool linum);
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
- bool opcodes);
+ bool opcodes, bool linum);
void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
unsigned int start_index);
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index f216b2f5c3d7..d47b8f73e2e7 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -31,8 +31,10 @@ FEATURE_TESTS_BASIC := \
backtrace \
dwarf \
dwarf_getlocations \
+ eventfd \
fortify-source \
sync-compare-and-swap \
+ get_current_dir_name \
glibc \
gtk2 \
gtk2-infobar \
@@ -68,7 +70,8 @@ FEATURE_TESTS_BASIC := \
sched_getcpu \
sdt \
setns \
- libopencsd
+ libopencsd \
+ libaio
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
# of all feature tests
@@ -114,7 +117,8 @@ FEATURE_DISPLAY ?= \
zlib \
lzma \
get_cpuid \
- bpf
+ bpf \
+ libaio
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 0516259be70f..2dbcc0d00f52 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -5,8 +5,10 @@ FILES= \
test-bionic.bin \
test-dwarf.bin \
test-dwarf_getlocations.bin \
+ test-eventfd.bin \
test-fortify-source.bin \
test-sync-compare-and-swap.bin \
+ test-get_current_dir_name.bin \
test-glibc.bin \
test-gtk2.bin \
test-gtk2-infobar.bin \
@@ -53,12 +55,14 @@ FILES= \
test-sdt.bin \
test-cxx.bin \
test-jvmti.bin \
+ test-jvmti-cmlr.bin \
test-sched_getcpu.bin \
test-setns.bin \
test-libopencsd.bin \
test-clang.bin \
test-llvm.bin \
- test-llvm-version.bin
+ test-llvm-version.bin \
+ test-libaio.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
@@ -101,6 +105,12 @@ $(OUTPUT)test-bionic.bin:
$(OUTPUT)test-libelf.bin:
$(BUILD) -lelf
+$(OUTPUT)test-eventfd.bin:
+ $(BUILD)
+
+$(OUTPUT)test-get_current_dir_name.bin:
+ $(BUILD)
+
$(OUTPUT)test-glibc.bin:
$(BUILD)
@@ -259,6 +269,9 @@ $(OUTPUT)test-cxx.bin:
$(OUTPUT)test-jvmti.bin:
$(BUILD)
+$(OUTPUT)test-jvmti-cmlr.bin:
+ $(BUILD)
+
$(OUTPUT)test-llvm.bin:
$(BUILDXX) -std=gnu++11 \
-I$(shell $(LLVM_CONFIG) --includedir) \
@@ -285,6 +298,9 @@ $(OUTPUT)test-clang.bin:
-include $(OUTPUT)*.d
+$(OUTPUT)test-libaio.bin:
+ $(BUILD) -lrt
+
###############################
clean:
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 8dc20a61341f..20cdaa4fc112 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -34,6 +34,10 @@
# include "test-libelf-mmap.c"
#undef main
+#define main main_test_get_current_dir_name
+# include "test-get_current_dir_name.c"
+#undef main
+
#define main main_test_glibc
# include "test-glibc.c"
#undef main
@@ -46,6 +50,10 @@
# include "test-dwarf_getlocations.c"
#undef main
+#define main main_test_eventfd
+# include "test-eventfd.c"
+#undef main
+
#define main main_test_libelf_getphdrnum
# include "test-libelf-getphdrnum.c"
#undef main
@@ -166,6 +174,10 @@
# include "test-libopencsd.c"
#undef main
+#define main main_test_libaio
+# include "test-libaio.c"
+#undef main
+
int main(int argc, char *argv[])
{
main_test_libpython();
@@ -174,9 +186,11 @@ int main(int argc, char *argv[])
main_test_hello();
main_test_libelf();
main_test_libelf_mmap();
+ main_test_get_current_dir_name();
main_test_glibc();
main_test_dwarf();
main_test_dwarf_getlocations();
+ main_test_eventfd();
main_test_libelf_getphdrnum();
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
@@ -204,6 +218,7 @@ int main(int argc, char *argv[])
main_test_sdt();
main_test_setns();
main_test_libopencsd();
+ main_test_libaio();
return 0;
}
diff --git a/tools/build/feature/test-eventfd.c b/tools/build/feature/test-eventfd.c
new file mode 100644
index 000000000000..f4de7ef00ccb
--- /dev/null
+++ b/tools/build/feature/test-eventfd.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+
+#include <sys/eventfd.h>
+
+int main(void)
+{
+ return eventfd(0, EFD_NONBLOCK);
+}
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
new file mode 100644
index 000000000000..573000f93212
--- /dev/null
+++ b/tools/build/feature/test-get_current_dir_name.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <stdlib.h>
+
+int main(void)
+{
+ free(get_current_dir_name());
+ return 0;
+}
diff --git a/tools/build/feature/test-jvmti-cmlr.c b/tools/build/feature/test-jvmti-cmlr.c
new file mode 100644
index 000000000000..c27b5b71a0f6
--- /dev/null
+++ b/tools/build/feature/test-jvmti-cmlr.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <jvmti.h>
+#include <jvmticmlr.h>
+
+int main(void)
+{
+ jvmtiCompiledMethodLoadInlineRecord rec __attribute__((unused));
+ jvmtiCompiledMethodLoadRecordHeader hdr __attribute__((unused));
+ PCStackInfo p __attribute__((unused));
+ return 0;
+}
diff --git a/tools/build/feature/test-jvmti.c b/tools/build/feature/test-jvmti.c
index 5cf31192f204..799916d2e3e3 100644
--- a/tools/build/feature/test-jvmti.c
+++ b/tools/build/feature/test-jvmti.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <jvmti.h>
-#include <jvmticmlr.h>
int main(void)
{
diff --git a/tools/build/feature/test-libaio.c b/tools/build/feature/test-libaio.c
new file mode 100644
index 000000000000..932133c9a265
--- /dev/null
+++ b/tools/build/feature/test-libaio.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <aio.h>
+
+int main(void)
+{
+ struct aiocb aiocb;
+
+ aiocb.aio_fildes = 0;
+ aiocb.aio_offset = 0;
+ aiocb.aio_buf = 0;
+ aiocb.aio_nbytes = 0;
+ aiocb.aio_reqprio = 0;
+ aiocb.aio_sigevent.sigev_notify = 1 /*SIGEV_NONE*/;
+
+ return (int)aio_return(&aiocb);
+}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index 5ff1246e6194..d68eb4fb40cc 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -1,6 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include <opencsd/c_api/opencsd_c_api.h>
+/*
+ * Check OpenCSD library version is sufficient to provide required features
+ */
+#define OCSD_MIN_VER ((0 << 16) | (10 << 8) | (0))
+#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
+#error "OpenCSD >= 0.10.0 is required"
+#endif
+
int main(void)
{
(void)ocsd_get_version();
diff --git a/tools/crypto/getstat.c b/tools/crypto/getstat.c
index 24115173a483..9e8ff76420fa 100644
--- a/tools/crypto/getstat.c
+++ b/tools/crypto/getstat.c
@@ -152,86 +152,86 @@ static int get_stat(const char *drivername)
if (tb[CRYPTOCFGA_STAT_HASH]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_HASH];
- struct crypto_stat *rhash =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tHash\n\tHash: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_hash *rhash =
+ (struct crypto_stat_hash *)RTA_DATA(rta);
+ printf("%s\tHash\n\tHash: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rhash->stat_hash_cnt, rhash->stat_hash_tlen,
- rhash->stat_hash_err_cnt);
+ rhash->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_COMPRESS]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS];
- struct crypto_stat *rblk =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tCompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_compress *rblk =
+ (struct crypto_stat_compress *)RTA_DATA(rta);
+ printf("%s\tCompress\n\tCompress: %llu bytes: %llu\n\tDecompress: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rblk->stat_compress_cnt, rblk->stat_compress_tlen,
rblk->stat_decompress_cnt, rblk->stat_decompress_tlen,
- rblk->stat_compress_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_ACOMP]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP];
- struct crypto_stat *rcomp =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tACompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_compress *rcomp =
+ (struct crypto_stat_compress *)RTA_DATA(rta);
+ printf("%s\tACompress\n\tCompress: %llu bytes: %llu\n\tDecompress: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rcomp->stat_compress_cnt, rcomp->stat_compress_tlen,
rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen,
- rcomp->stat_compress_err_cnt);
+ rcomp->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_AEAD]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD];
- struct crypto_stat *raead =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tAEAD\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_aead *raead =
+ (struct crypto_stat_aead *)RTA_DATA(rta);
+ printf("%s\tAEAD\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
raead->stat_encrypt_cnt, raead->stat_encrypt_tlen,
raead->stat_decrypt_cnt, raead->stat_decrypt_tlen,
- raead->stat_aead_err_cnt);
+ raead->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER];
- struct crypto_stat *rblk =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tCipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_cipher *rblk =
+ (struct crypto_stat_cipher *)RTA_DATA(rta);
+ printf("%s\tCipher\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
- rblk->stat_cipher_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER];
- struct crypto_stat *rblk =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tAkcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tSign: %u\n\tVerify: %u\n\tErrors: %u\n",
+ struct crypto_stat_akcipher *rblk =
+ (struct crypto_stat_akcipher *)RTA_DATA(rta);
+ printf("%s\tAkcipher\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tSign: %llu\n\tVerify: %llu\n\tErrors: %llu\n",
drivername,
rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
rblk->stat_sign_cnt, rblk->stat_verify_cnt,
- rblk->stat_akcipher_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_CIPHER]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER];
- struct crypto_stat *rblk =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_cipher *rblk =
+ (struct crypto_stat_cipher *)RTA_DATA(rta);
+ printf("%s\tcipher\n\tEncrypt: %llu bytes: %llu\n\tDecrypt: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
- rblk->stat_cipher_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_RNG]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG];
- struct crypto_stat *rrng =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tRNG\n\tSeed: %u\n\tGenerate: %u bytes: %llu\n\tErrors: %u\n",
+ struct crypto_stat_rng *rrng =
+ (struct crypto_stat_rng *)RTA_DATA(rta);
+ printf("%s\tRNG\n\tSeed: %llu\n\tGenerate: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rrng->stat_seed_cnt,
rrng->stat_generate_cnt, rrng->stat_generate_tlen,
- rrng->stat_rng_err_cnt);
+ rrng->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_KPP]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP];
- struct crypto_stat *rkpp =
- (struct crypto_stat *)RTA_DATA(rta);
- printf("%s\tKPP\n\tSetsecret: %u\n\tGenerate public key: %u\n\tCompute_shared_secret: %u\n\tErrors: %u\n",
+ struct crypto_stat_kpp *rkpp =
+ (struct crypto_stat_kpp *)RTA_DATA(rta);
+ printf("%s\tKPP\n\tSetsecret: %llu\n\tGenerate public key: %llu\n\tCompute_shared_secret: %llu\n\tErrors: %llu\n",
drivername,
rkpp->stat_setsecret_cnt,
rkpp->stat_generate_public_key_cnt,
rkpp->stat_compute_shared_secret_cnt,
- rkpp->stat_kpp_err_cnt);
+ rkpp->stat_err_cnt);
} else {
fprintf(stderr, "%s is of an unknown algorithm\n", drivername);
}
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index 094649667bae..2f5a12b88a86 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -59,4 +59,17 @@ static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
else
return 0;
}
+
+/**
+ * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
+ * @ptr: The pointer to cast.
+ *
+ * Explicitly cast an error-valued pointer to another pointer type in such a
+ * way as to make it clear that's what's going on.
+ */
+static inline void * __must_check ERR_CAST(__force const void *ptr)
+{
+ /* cast away the const */
+ return (void *) ptr;
+}
#endif /* _LINUX_ERR_H */
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 6935ef94e77a..857d9e22826e 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -116,6 +116,6 @@ int scnprintf(char * buf, size_t size, const char * fmt, ...);
#define round_down(x, y) ((x) & ~__round_mask(x, y))
#define current_gfp_context(k) 0
-#define synchronize_sched()
+#define synchronize_rcu()
#endif
diff --git a/tools/include/uapi/asm-generic/ioctls.h b/tools/include/uapi/asm-generic/ioctls.h
index 040651735662..cdc9f4ca8c27 100644
--- a/tools/include/uapi/asm-generic/ioctls.h
+++ b/tools/include/uapi/asm-generic/ioctls.h
@@ -79,6 +79,8 @@
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */
+#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816)
+#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816)
#define FIONCLEX 0x5450
#define FIOCLEX 0x5451
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 538546edbfbd..c7f3321fbe43 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -760,8 +760,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate __NR3264_ftruncate
#define __NR_lseek __NR3264_lseek
#define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_newfstatat __NR3264_fstatat
#define __NR_fstat __NR3264_fstat
+#endif
#define __NR_mmap __NR3264_mmap
#define __NR_fadvise64 __NR3264_fadvise64
#ifdef __NR3264_stat
@@ -776,8 +778,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_ftruncate64 __NR3264_ftruncate
#define __NR_llseek __NR3264_lseek
#define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR_fstatat64 __NR3264_fstatat
#define __NR_fstat64 __NR3264_fstat
+#endif
#define __NR_mmap2 __NR3264_mmap
#define __NR_fadvise64_64 __NR3264_fadvise64
#ifdef __NR3264_stat
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 7f5634ce8e88..a4446f452040 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT 52
+
typedef struct drm_i915_getparam {
__s32 param;
/*
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 852dc17ab47a..91c43884f295 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -133,6 +133,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
};
+/* Note that tracing related programs such as
+ * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT}
+ * are not subject to a stable API since kernel internal data
+ * structures can change from release to release and may
+ * therefore break existing tracing BPF programs. Tracing BPF
+ * programs correspond to /a/ specific kernel which is to be
+ * analyzed, and not /a/ specific kernel /and/ all future ones.
+ */
enum bpf_prog_type {
BPF_PROG_TYPE_UNSPEC,
BPF_PROG_TYPE_SOCKET_FILTER,
@@ -232,6 +240,20 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
+/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+ * verifier will allow any alignment whatsoever. On platforms
+ * with strict alignment requirements for loads ands stores (such
+ * as sparc and mips) the verifier validates that all loads and
+ * stores provably follow this requirement. This flag turns that
+ * checking and enforcement off.
+ *
+ * It is mostly used for testing when we want to validate the
+ * context and memory access aspects of the verifier, but because
+ * of an unaligned access the alignment check would trigger before
+ * the one we are interested in.
+ */
+#define BPF_F_ANY_ALIGNMENT (1U << 1)
+
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1
@@ -257,9 +279,6 @@ enum bpf_attach_type {
/* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2)
-/* flags for BPF_PROG_QUERY */
-#define BPF_F_QUERY_EFFECTIVE (1U << 0)
-
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object */
@@ -269,6 +288,12 @@ enum bpf_attach_type {
/* Flag for stack_map, store build_id+offset instead of pointer */
#define BPF_F_STACK_BUILD_ID (1U << 5)
+/* Zero-initialize hash function seed. This should only be used for testing. */
+#define BPF_F_ZERO_SEED (1U << 6)
+
+/* flags for BPF_PROG_QUERY */
+#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -326,7 +351,7 @@ union bpf_attr {
__u32 log_level; /* verbosity level of verifier */
__u32 log_size; /* size of user buffer */
__aligned_u64 log_buf; /* user supplied buffer */
- __u32 kern_version; /* checked when prog_type=kprobe */
+ __u32 kern_version; /* not used */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
@@ -335,6 +360,13 @@ union bpf_attr {
* (context accesses, allowed helpers, etc).
*/
__u32 expected_attach_type;
+ __u32 prog_btf_fd; /* fd pointing to BTF type data */
+ __u32 func_info_rec_size; /* userspace bpf_func_info size */
+ __aligned_u64 func_info; /* func info */
+ __u32 func_info_cnt; /* number of bpf_func_info records */
+ __u32 line_info_rec_size; /* userspace bpf_line_info size */
+ __aligned_u64 line_info; /* line info */
+ __u32 line_info_cnt; /* number of bpf_line_info records */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -353,8 +385,11 @@ union bpf_attr {
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
__u32 prog_fd;
__u32 retval;
- __u32 data_size_in;
- __u32 data_size_out;
+ __u32 data_size_in; /* input: len of data_in */
+ __u32 data_size_out; /* input/output: len of data_out
+ * returns ENOSPC if data_out
+ * is too small.
+ */
__aligned_u64 data_in;
__aligned_u64 data_out;
__u32 repeat;
@@ -475,18 +510,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
- * Description
- * Pop an element from *map*.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
- * Description
- * Get an element from *map* without removing it.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -1910,9 +1933,9 @@ union bpf_attr {
* is set to metric from route (IPv4/IPv6 only), and ifindex
* is set to the device index of the nexthop from the FIB lookup.
*
- * *plen* argument is the size of the passed in struct.
- * *flags* argument can be a combination of one or more of the
- * following values:
+ * *plen* argument is the size of the passed in struct.
+ * *flags* argument can be a combination of one or more of the
+ * following values:
*
* **BPF_FIB_LOOKUP_DIRECT**
* Do a direct table lookup vs full lookup using FIB
@@ -1921,9 +1944,9 @@ union bpf_attr {
* Perform lookup from an egress perspective (default is
* ingress).
*
- * *ctx* is either **struct xdp_md** for XDP programs or
- * **struct sk_buff** tc cls_act programs.
- * Return
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** tc cls_act programs.
+ * Return
* * < 0 if any input argument is invalid
* * 0 on success (packet is forwarded, nexthop neighbor exists)
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
@@ -2068,8 +2091,8 @@ union bpf_attr {
* translated to a keycode using the rc keymap, and reported as
* an input key down event. After a period a key up event is
* generated. This period can be extended by calling either
- * **bpf_rc_keydown** () again with the same values, or calling
- * **bpf_rc_repeat** ().
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
* Some protocols include a toggle bit, in case the button was
* released and pressed again between consecutive scancodes.
@@ -2152,29 +2175,30 @@ union bpf_attr {
* The *flags* meaning is specific for each map type,
* and has to be 0 for cgroup local storage.
*
- * Depending on the bpf program type, a local storage area
- * can be shared between multiple instances of the bpf program,
+ * Depending on the BPF program type, a local storage area
+ * can be shared between multiple instances of the BPF program,
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the BPF_STX_XADD instruction to alter
+ * For example, by using the **BPF_STX_XADD** instruction to alter
* the shared data.
* Return
- * Pointer to the local storage area.
+ * A pointer to the local storage area.
*
* int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
- * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
- * It checks the selected sk is matching the incoming
- * request in the skb.
+ * Select a **SO_REUSEPORT** socket from a
+ * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * It checks the selected socket is matching the incoming
+ * request in the socket buffer.
* Return
* 0 on success, or a negative error in case of failure.
*
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
- * and if non-NULL, released via **bpf_sk_release**\ ().
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* The *ctx* should point to the context of the program, such as
* the skb or socket (depending on the hook in use). This is used
@@ -2187,12 +2211,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2200,13 +2226,15 @@ union bpf_attr {
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
- * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
*
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for UDP socket matching *tuple*, optionally in a child
* network namespace *netns*. The return value must be checked,
- * and if non-NULL, released via **bpf_sk_release**\ ().
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
*
* The *ctx* should point to the context of the program, such as
* the skb or socket (depending on the hook in use). This is used
@@ -2219,12 +2247,14 @@ union bpf_attr {
* **sizeof**\ (*tuple*\ **->ipv6**)
* Look for an IPv6 socket.
*
- * If the *netns* is zero, then the socket lookup table in the
- * netns associated with the *ctx* will be used. For the TC hooks,
- * this in the netns of the device in the skb. For socket hooks,
- * this in the netns of the socket. If *netns* is non-zero, then
- * it specifies the ID of the netns relative to the netns
- * associated with the *ctx*.
+ * If the *netns* is a negative signed 32-bit integer, then the
+ * socket lookup table in the netns associated with the *ctx* will
+ * will be used. For the TC hooks, this is the netns of the device
+ * in the skb. For socket hooks, this is the netns of the socket.
+ * If *netns* is any other signed 32-bit value greater than or
+ * equal to zero then it specifies the ID of the netns relative to
+ * the netns associated with the *ctx*. *netns* values beyond the
+ * range of 32-bit integers are reserved for future use.
*
* All values for *flags* are reserved for future usage, and must
* be left at zero.
@@ -2232,31 +2262,71 @@ union bpf_attr {
* This helper is available only if the kernel was compiled with
* **CONFIG_NET** configuration option.
* Return
- * Pointer to *struct bpf_sock*, or NULL in case of failure.
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from **reuse->socks**\ [] using the hash of the tuple.
*
- * int bpf_sk_release(struct bpf_sock *sk)
+ * int bpf_sk_release(struct bpf_sock *sock)
* Description
- * Release the reference held by *sock*. *sock* must be a non-NULL
- * pointer that was returned from bpf_sk_lookup_xxx\ ().
+ * Release the reference held by *sock*. *sock* must be a
+ * non-**NULL** pointer that was returned from
+ * **bpf_sk_lookup_xxx**\ ().
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * Description
+ * Pop an element from *map*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * Description
+ * Get an element from *map* without removing it.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
* Description
- * For socket policies, insert *len* bytes into msg at offset
+ * For socket policies, insert *len* bytes into *msg* at offset
* *start*.
*
* If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
- * *msg* it may want to insert metadata or options into the msg.
+ * *msg* it may want to insert metadata or options into the *msg*.
* This can later be read and used by any of the lower layer BPF
* hooks.
*
* This helper may fail if under memory pressure (a malloc
* fails) in these cases BPF programs will get an appropriate
* error and BPF programs will need to handle them.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * Description
+ * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * This may result in **ENOMEM** errors under certain situations if
+ * an allocation and copy are required due to a full ring buffer.
+ * However, the helper will try to avoid doing the allocation
+ * if possible. Other errors can occur if input parameters are
+ * invalid either due to *start* byte not being valid part of *msg*
+ * payload and/or *pop* value being to large.
* Return
* 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * Description
+ * This helper is used in programs implementing IR decoding, to
+ * report a successfully decoded pointer movement.
+ *
+ * The *ctx* should point to the lirc sample as passed into
+ * the program.
+ *
+ * This helper is only available is the kernel was compiled with
+ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+ * "**y**".
+ * Return
+ * 0
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2349,7 +2419,9 @@ union bpf_attr {
FN(map_push_elem), \
FN(map_pop_elem), \
FN(map_peek_elem), \
- FN(msg_push_data),
+ FN(msg_push_data), \
+ FN(msg_pop_data), \
+ FN(rc_pointer_rel),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2405,6 +2477,9 @@ enum bpf_func_id {
/* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS (-1L)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -2422,6 +2497,12 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_SEG6_INLINE
};
+#define __bpf_md_ptr(type, name) \
+union { \
+ type name; \
+ __u64 :64; \
+} __attribute__((aligned(8)))
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -2456,7 +2537,9 @@ struct __sk_buff {
/* ... here. */
__u32 data_meta;
- struct bpf_flow_keys *flow_keys;
+ __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
+ __u64 tstamp;
+ __u32 wire_len;
};
struct bpf_tunnel_key {
@@ -2572,8 +2655,8 @@ enum sk_action {
* be added to the end of this structure
*/
struct sk_msg_md {
- void *data;
- void *data_end;
+ __bpf_md_ptr(void *, data);
+ __bpf_md_ptr(void *, data_end);
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
@@ -2582,6 +2665,7 @@ struct sk_msg_md {
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
+ __u32 size; /* Total size of sk_msg */
};
struct sk_reuseport_md {
@@ -2589,8 +2673,9 @@ struct sk_reuseport_md {
* Start of directly accessible data. It begins from
* the tcp/udp header.
*/
- void *data;
- void *data_end; /* End of directly accessible data */
+ __bpf_md_ptr(void *, data);
+ /* End of directly accessible data */
+ __bpf_md_ptr(void *, data_end);
/*
* Total length of packet (starting from the tcp/udp header).
* Note that the directly accessible bytes (data_end - data)
@@ -2631,6 +2716,18 @@ struct bpf_prog_info {
__u32 nr_jited_func_lens;
__aligned_u64 jited_ksyms;
__aligned_u64 jited_func_lens;
+ __u32 btf_id;
+ __u32 func_info_rec_size;
+ __aligned_u64 func_info;
+ __u32 nr_func_info;
+ __u32 nr_line_info;
+ __aligned_u64 line_info;
+ __aligned_u64 jited_line_info;
+ __u32 nr_jited_line_info;
+ __u32 line_info_rec_size;
+ __u32 jited_line_info_rec_size;
+ __u32 nr_prog_tags;
+ __aligned_u64 prog_tags;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -2942,4 +3039,19 @@ struct bpf_flow_keys {
};
};
+struct bpf_func_info {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10)
+#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff)
+
+struct bpf_line_info {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 972265f32871..7b7475ef2f17 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -34,13 +34,16 @@ struct btf_type {
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
* bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-31: unused
+ * bits 28-30: unused
+ * bit 31: kind_flag, currently used by
+ * struct, union and fwd
*/
__u32 info;
/* "size" is used by INT, ENUM, STRUCT and UNION.
* "size" tells the size of the type it is describing.
*
- * "type" is used by PTR, TYPEDEF, VOLATILE, CONST and RESTRICT.
+ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
+ * FUNC and FUNC_PROTO.
* "type" is a type_id referring to another type.
*/
union {
@@ -51,6 +54,7 @@ struct btf_type {
#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
+#define BTF_INFO_KFLAG(info) ((info) >> 31)
#define BTF_KIND_UNKN 0 /* Unknown */
#define BTF_KIND_INT 1 /* Integer */
@@ -64,8 +68,10 @@ struct btf_type {
#define BTF_KIND_VOLATILE 9 /* Volatile */
#define BTF_KIND_CONST 10 /* Const */
#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_MAX 11
-#define NR_BTF_KINDS 12
+#define BTF_KIND_FUNC 12 /* Function */
+#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+#define BTF_KIND_MAX 13
+#define NR_BTF_KINDS 14
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -107,7 +113,29 @@ struct btf_array {
struct btf_member {
__u32 name_off;
__u32 type;
- __u32 offset; /* offset in bits */
+ /* If the type info kind_flag is set, the btf_member offset
+ * contains both member bitfield size and bit offset. The
+ * bitfield size is set for bitfield members. If the type
+ * info kind_flag is not set, the offset contains only bit
+ * offset.
+ */
+ __u32 offset;
+};
+
+/* If the struct/union type info kind_flag is set, the
+ * following two macros are used to access bitfield_size
+ * and bit_offset from btf_member.offset.
+ */
+#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24)
+#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff)
+
+/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param".
+ * The exact number of btf_param is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_param {
+ __u32 name_off;
+ __u32 type;
};
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/tools/include/uapi/linux/fadvise.h b/tools/include/uapi/linux/fadvise.h
new file mode 100644
index 000000000000..0862b87434c2
--- /dev/null
+++ b/tools/include/uapi/linux/fadvise.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef FADVISE_H_INCLUDED
+#define FADVISE_H_INCLUDED
+
+#define POSIX_FADV_NORMAL 0 /* No further special treatment. */
+#define POSIX_FADV_RANDOM 1 /* Expect random page references. */
+#define POSIX_FADV_SEQUENTIAL 2 /* Expect sequential page references. */
+#define POSIX_FADV_WILLNEED 3 /* Will need these pages. */
+
+/*
+ * The advise values for POSIX_FADV_DONTNEED and POSIX_ADV_NOREUSE
+ * for s390-64 differ from the values for the rest of the world.
+ */
+#if defined(__s390x__)
+#define POSIX_FADV_DONTNEED 6 /* Don't need these pages. */
+#define POSIX_FADV_NOREUSE 7 /* Data will be accessed once. */
+#else
+#define POSIX_FADV_DONTNEED 4 /* Don't need these pages. */
+#define POSIX_FADV_NOREUSE 5 /* Data will be accessed once. */
+#endif
+
+#endif /* FADVISE_H_INCLUDED */
diff --git a/tools/include/uapi/linux/netlink.h b/tools/include/uapi/linux/netlink.h
index 486ed1f0c0bc..0a4d73317759 100644
--- a/tools/include/uapi/linux/netlink.h
+++ b/tools/include/uapi/linux/netlink.h
@@ -155,7 +155,7 @@ enum nlmsgerr_attrs {
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
#define NETLINK_EXT_ACK 11
-#define NETLINK_DUMP_STRICT_CHK 12
+#define NETLINK_GET_STRICT_CHK 12
struct nl_pktinfo {
__u32 group;
diff --git a/tools/include/uapi/linux/pkt_cls.h b/tools/include/uapi/linux/pkt_cls.h
new file mode 100644
index 000000000000..401d0c1e612d
--- /dev/null
+++ b/tools/include/uapi/linux/pkt_cls.h
@@ -0,0 +1,612 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_PKT_CLS_H
+#define __LINUX_PKT_CLS_H
+
+#include <linux/types.h>
+#include <linux/pkt_sched.h>
+
+#define TC_COOKIE_MAX_SIZE 16
+
+/* Action attributes */
+enum {
+ TCA_ACT_UNSPEC,
+ TCA_ACT_KIND,
+ TCA_ACT_OPTIONS,
+ TCA_ACT_INDEX,
+ TCA_ACT_STATS,
+ TCA_ACT_PAD,
+ TCA_ACT_COOKIE,
+ __TCA_ACT_MAX
+};
+
+#define TCA_ACT_MAX __TCA_ACT_MAX
+#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
+#define TCA_ACT_MAX_PRIO 32
+#define TCA_ACT_BIND 1
+#define TCA_ACT_NOBIND 0
+#define TCA_ACT_UNBIND 1
+#define TCA_ACT_NOUNBIND 0
+#define TCA_ACT_REPLACE 1
+#define TCA_ACT_NOREPLACE 0
+
+#define TC_ACT_UNSPEC (-1)
+#define TC_ACT_OK 0
+#define TC_ACT_RECLASSIFY 1
+#define TC_ACT_SHOT 2
+#define TC_ACT_PIPE 3
+#define TC_ACT_STOLEN 4
+#define TC_ACT_QUEUED 5
+#define TC_ACT_REPEAT 6
+#define TC_ACT_REDIRECT 7
+#define TC_ACT_TRAP 8 /* For hw path, this means "trap to cpu"
+ * and don't further process the frame
+ * in hardware. For sw path, this is
+ * equivalent of TC_ACT_STOLEN - drop
+ * the skb and act like everything
+ * is alright.
+ */
+#define TC_ACT_VALUE_MAX TC_ACT_TRAP
+
+/* There is a special kind of actions called "extended actions",
+ * which need a value parameter. These have a local opcode located in
+ * the highest nibble, starting from 1. The rest of the bits
+ * are used to carry the value. These two parts together make
+ * a combined opcode.
+ */
+#define __TC_ACT_EXT_SHIFT 28
+#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
+#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
+#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
+#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
+
+#define TC_ACT_JUMP __TC_ACT_EXT(1)
+#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
+#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
+
+/* Action type identifiers*/
+enum {
+ TCA_ID_UNSPEC=0,
+ TCA_ID_POLICE=1,
+ /* other actions go here */
+ __TCA_ID_MAX=255
+};
+
+#define TCA_ID_MAX __TCA_ID_MAX
+
+struct tc_police {
+ __u32 index;
+ int action;
+#define TC_POLICE_UNSPEC TC_ACT_UNSPEC
+#define TC_POLICE_OK TC_ACT_OK
+#define TC_POLICE_RECLASSIFY TC_ACT_RECLASSIFY
+#define TC_POLICE_SHOT TC_ACT_SHOT
+#define TC_POLICE_PIPE TC_ACT_PIPE
+
+ __u32 limit;
+ __u32 burst;
+ __u32 mtu;
+ struct tc_ratespec rate;
+ struct tc_ratespec peakrate;
+ int refcnt;
+ int bindcnt;
+ __u32 capab;
+};
+
+struct tcf_t {
+ __u64 install;
+ __u64 lastuse;
+ __u64 expires;
+ __u64 firstuse;
+};
+
+struct tc_cnt {
+ int refcnt;
+ int bindcnt;
+};
+
+#define tc_gen \
+ __u32 index; \
+ __u32 capab; \
+ int action; \
+ int refcnt; \
+ int bindcnt
+
+enum {
+ TCA_POLICE_UNSPEC,
+ TCA_POLICE_TBF,
+ TCA_POLICE_RATE,
+ TCA_POLICE_PEAKRATE,
+ TCA_POLICE_AVRATE,
+ TCA_POLICE_RESULT,
+ TCA_POLICE_TM,
+ TCA_POLICE_PAD,
+ __TCA_POLICE_MAX
+#define TCA_POLICE_RESULT TCA_POLICE_RESULT
+};
+
+#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
+
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW (1 << 0) /* don't offload filter to HW */
+#define TCA_CLS_FLAGS_SKIP_SW (1 << 1) /* don't use filter in SW */
+#define TCA_CLS_FLAGS_IN_HW (1 << 2) /* filter is offloaded to HW */
+#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
+#define TCA_CLS_FLAGS_VERBOSE (1 << 4) /* verbose logging */
+
+/* U32 filters */
+
+#define TC_U32_HTID(h) ((h)&0xFFF00000)
+#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
+#define TC_U32_HASH(h) (((h)>>12)&0xFF)
+#define TC_U32_NODE(h) ((h)&0xFFF)
+#define TC_U32_KEY(h) ((h)&0xFFFFF)
+#define TC_U32_UNSPEC 0
+#define TC_U32_ROOT (0xFFF00000)
+
+enum {
+ TCA_U32_UNSPEC,
+ TCA_U32_CLASSID,
+ TCA_U32_HASH,
+ TCA_U32_LINK,
+ TCA_U32_DIVISOR,
+ TCA_U32_SEL,
+ TCA_U32_POLICE,
+ TCA_U32_ACT,
+ TCA_U32_INDEV,
+ TCA_U32_PCNT,
+ TCA_U32_MARK,
+ TCA_U32_FLAGS,
+ TCA_U32_PAD,
+ __TCA_U32_MAX
+};
+
+#define TCA_U32_MAX (__TCA_U32_MAX - 1)
+
+struct tc_u32_key {
+ __be32 mask;
+ __be32 val;
+ int off;
+ int offmask;
+};
+
+struct tc_u32_sel {
+ unsigned char flags;
+ unsigned char offshift;
+ unsigned char nkeys;
+
+ __be16 offmask;
+ __u16 off;
+ short offoff;
+
+ short hoff;
+ __be32 hmask;
+ struct tc_u32_key keys[0];
+};
+
+struct tc_u32_mark {
+ __u32 val;
+ __u32 mask;
+ __u32 success;
+};
+
+struct tc_u32_pcnt {
+ __u64 rcnt;
+ __u64 rhit;
+ __u64 kcnts[0];
+};
+
+/* Flags */
+
+#define TC_U32_TERMINAL 1
+#define TC_U32_OFFSET 2
+#define TC_U32_VAROFFSET 4
+#define TC_U32_EAT 8
+
+#define TC_U32_MAXDEPTH 8
+
+
+/* RSVP filter */
+
+enum {
+ TCA_RSVP_UNSPEC,
+ TCA_RSVP_CLASSID,
+ TCA_RSVP_DST,
+ TCA_RSVP_SRC,
+ TCA_RSVP_PINFO,
+ TCA_RSVP_POLICE,
+ TCA_RSVP_ACT,
+ __TCA_RSVP_MAX
+};
+
+#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
+
+struct tc_rsvp_gpi {
+ __u32 key;
+ __u32 mask;
+ int offset;
+};
+
+struct tc_rsvp_pinfo {
+ struct tc_rsvp_gpi dpi;
+ struct tc_rsvp_gpi spi;
+ __u8 protocol;
+ __u8 tunnelid;
+ __u8 tunnelhdr;
+ __u8 pad;
+};
+
+/* ROUTE filter */
+
+enum {
+ TCA_ROUTE4_UNSPEC,
+ TCA_ROUTE4_CLASSID,
+ TCA_ROUTE4_TO,
+ TCA_ROUTE4_FROM,
+ TCA_ROUTE4_IIF,
+ TCA_ROUTE4_POLICE,
+ TCA_ROUTE4_ACT,
+ __TCA_ROUTE4_MAX
+};
+
+#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1)
+
+
+/* FW filter */
+
+enum {
+ TCA_FW_UNSPEC,
+ TCA_FW_CLASSID,
+ TCA_FW_POLICE,
+ TCA_FW_INDEV, /* used by CONFIG_NET_CLS_IND */
+ TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
+ TCA_FW_MASK,
+ __TCA_FW_MAX
+};
+
+#define TCA_FW_MAX (__TCA_FW_MAX - 1)
+
+/* TC index filter */
+
+enum {
+ TCA_TCINDEX_UNSPEC,
+ TCA_TCINDEX_HASH,
+ TCA_TCINDEX_MASK,
+ TCA_TCINDEX_SHIFT,
+ TCA_TCINDEX_FALL_THROUGH,
+ TCA_TCINDEX_CLASSID,
+ TCA_TCINDEX_POLICE,
+ TCA_TCINDEX_ACT,
+ __TCA_TCINDEX_MAX
+};
+
+#define TCA_TCINDEX_MAX (__TCA_TCINDEX_MAX - 1)
+
+/* Flow filter */
+
+enum {
+ FLOW_KEY_SRC,
+ FLOW_KEY_DST,
+ FLOW_KEY_PROTO,
+ FLOW_KEY_PROTO_SRC,
+ FLOW_KEY_PROTO_DST,
+ FLOW_KEY_IIF,
+ FLOW_KEY_PRIORITY,
+ FLOW_KEY_MARK,
+ FLOW_KEY_NFCT,
+ FLOW_KEY_NFCT_SRC,
+ FLOW_KEY_NFCT_DST,
+ FLOW_KEY_NFCT_PROTO_SRC,
+ FLOW_KEY_NFCT_PROTO_DST,
+ FLOW_KEY_RTCLASSID,
+ FLOW_KEY_SKUID,
+ FLOW_KEY_SKGID,
+ FLOW_KEY_VLAN_TAG,
+ FLOW_KEY_RXHASH,
+ __FLOW_KEY_MAX,
+};
+
+#define FLOW_KEY_MAX (__FLOW_KEY_MAX - 1)
+
+enum {
+ FLOW_MODE_MAP,
+ FLOW_MODE_HASH,
+};
+
+enum {
+ TCA_FLOW_UNSPEC,
+ TCA_FLOW_KEYS,
+ TCA_FLOW_MODE,
+ TCA_FLOW_BASECLASS,
+ TCA_FLOW_RSHIFT,
+ TCA_FLOW_ADDEND,
+ TCA_FLOW_MASK,
+ TCA_FLOW_XOR,
+ TCA_FLOW_DIVISOR,
+ TCA_FLOW_ACT,
+ TCA_FLOW_POLICE,
+ TCA_FLOW_EMATCHES,
+ TCA_FLOW_PERTURB,
+ __TCA_FLOW_MAX
+};
+
+#define TCA_FLOW_MAX (__TCA_FLOW_MAX - 1)
+
+/* Basic filter */
+
+enum {
+ TCA_BASIC_UNSPEC,
+ TCA_BASIC_CLASSID,
+ TCA_BASIC_EMATCHES,
+ TCA_BASIC_ACT,
+ TCA_BASIC_POLICE,
+ __TCA_BASIC_MAX
+};
+
+#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
+
+
+/* Cgroup classifier */
+
+enum {
+ TCA_CGROUP_UNSPEC,
+ TCA_CGROUP_ACT,
+ TCA_CGROUP_POLICE,
+ TCA_CGROUP_EMATCHES,
+ __TCA_CGROUP_MAX,
+};
+
+#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
+
+/* BPF classifier */
+
+#define TCA_BPF_FLAG_ACT_DIRECT (1 << 0)
+
+enum {
+ TCA_BPF_UNSPEC,
+ TCA_BPF_ACT,
+ TCA_BPF_POLICE,
+ TCA_BPF_CLASSID,
+ TCA_BPF_OPS_LEN,
+ TCA_BPF_OPS,
+ TCA_BPF_FD,
+ TCA_BPF_NAME,
+ TCA_BPF_FLAGS,
+ TCA_BPF_FLAGS_GEN,
+ TCA_BPF_TAG,
+ TCA_BPF_ID,
+ __TCA_BPF_MAX,
+};
+
+#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+
+/* Flower classifier */
+
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+
+ TCA_FLOWER_FLAGS,
+ TCA_FLOWER_KEY_VLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_KEY_ID, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+ TCA_FLOWER_KEY_TCP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST_MASK, /* be16 */
+ TCA_FLOWER_KEY_SCTP_SRC_MASK, /* be16 */
+ TCA_FLOWER_KEY_SCTP_DST_MASK, /* be16 */
+
+ TCA_FLOWER_KEY_SCTP_SRC, /* be16 */
+ TCA_FLOWER_KEY_SCTP_DST, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_UDP_SRC_PORT, /* be16 */
+ TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK, /* be16 */
+ TCA_FLOWER_KEY_ENC_UDP_DST_PORT, /* be16 */
+ TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK, /* be16 */
+
+ TCA_FLOWER_KEY_FLAGS, /* be32 */
+ TCA_FLOWER_KEY_FLAGS_MASK, /* be32 */
+
+ TCA_FLOWER_KEY_ICMPV4_CODE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ICMPV4_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ICMPV6_CODE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
+
+ TCA_FLOWER_KEY_ARP_SIP, /* be32 */
+ TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */
+ TCA_FLOWER_KEY_ARP_TIP, /* be32 */
+ TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */
+ TCA_FLOWER_KEY_ARP_OP, /* u8 */
+ TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */
+ TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */
+
+ TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */
+ TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */
+ TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
+ TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
+
+ TCA_FLOWER_KEY_TCP_FLAGS, /* be16 */
+ TCA_FLOWER_KEY_TCP_FLAGS_MASK, /* be16 */
+
+ TCA_FLOWER_KEY_IP_TOS, /* u8 */
+ TCA_FLOWER_KEY_IP_TOS_MASK, /* u8 */
+ TCA_FLOWER_KEY_IP_TTL, /* u8 */
+ TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
+
+ TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
+
+ TCA_FLOWER_KEY_ENC_OPTS,
+ TCA_FLOWER_KEY_ENC_OPTS_MASK,
+
+ TCA_FLOWER_IN_HW_COUNT,
+
+ __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
+ * attributes
+ */
+ __TCA_FLOWER_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
+ TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
+};
+
+/* Match-all classifier */
+
+enum {
+ TCA_MATCHALL_UNSPEC,
+ TCA_MATCHALL_CLASSID,
+ TCA_MATCHALL_ACT,
+ TCA_MATCHALL_FLAGS,
+ __TCA_MATCHALL_MAX,
+};
+
+#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
+
+/* Extended Matches */
+
+struct tcf_ematch_tree_hdr {
+ __u16 nmatches;
+ __u16 progid;
+};
+
+enum {
+ TCA_EMATCH_TREE_UNSPEC,
+ TCA_EMATCH_TREE_HDR,
+ TCA_EMATCH_TREE_LIST,
+ __TCA_EMATCH_TREE_MAX
+};
+#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
+
+struct tcf_ematch_hdr {
+ __u16 matchid;
+ __u16 kind;
+ __u16 flags;
+ __u16 pad; /* currently unused */
+};
+
+/* 0 1
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ * +-----------------------+-+-+---+
+ * | Unused |S|I| R |
+ * +-----------------------+-+-+---+
+ *
+ * R(2) ::= relation to next ematch
+ * where: 0 0 END (last ematch)
+ * 0 1 AND
+ * 1 0 OR
+ * 1 1 Unused (invalid)
+ * I(1) ::= invert result
+ * S(1) ::= simple payload
+ */
+#define TCF_EM_REL_END 0
+#define TCF_EM_REL_AND (1<<0)
+#define TCF_EM_REL_OR (1<<1)
+#define TCF_EM_INVERT (1<<2)
+#define TCF_EM_SIMPLE (1<<3)
+
+#define TCF_EM_REL_MASK 3
+#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
+
+enum {
+ TCF_LAYER_LINK,
+ TCF_LAYER_NETWORK,
+ TCF_LAYER_TRANSPORT,
+ __TCF_LAYER_MAX
+};
+#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1)
+
+/* Ematch type assignments
+ * 1..32767 Reserved for ematches inside kernel tree
+ * 32768..65535 Free to use, not reliable
+ */
+#define TCF_EM_CONTAINER 0
+#define TCF_EM_CMP 1
+#define TCF_EM_NBYTE 2
+#define TCF_EM_U32 3
+#define TCF_EM_META 4
+#define TCF_EM_TEXT 5
+#define TCF_EM_VLAN 6
+#define TCF_EM_CANID 7
+#define TCF_EM_IPSET 8
+#define TCF_EM_IPT 9
+#define TCF_EM_MAX 9
+
+enum {
+ TCF_EM_PROG_TC
+};
+
+enum {
+ TCF_EM_OPND_EQ,
+ TCF_EM_OPND_GT,
+ TCF_EM_OPND_LT
+};
+
+#endif
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index c0d7ea0bf5b6..b17201edfa09 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -212,6 +212,7 @@ struct prctl_mm_map {
#define PR_SET_SPECULATION_CTRL 53
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
+# define PR_SPEC_INDIRECT_BRANCH 1
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644
index 000000000000..6e89a5df49a4
--- /dev/null
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_BPF_H
+#define __LINUX_TC_BPF_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_BPF 13
+
+struct tc_act_bpf {
+ tc_gen;
+};
+
+enum {
+ TCA_ACT_BPF_UNSPEC,
+ TCA_ACT_BPF_TM,
+ TCA_ACT_BPF_PARMS,
+ TCA_ACT_BPF_OPS_LEN,
+ TCA_ACT_BPF_OPS,
+ TCA_ACT_BPF_FD,
+ TCA_ACT_BPF_NAME,
+ TCA_ACT_BPF_PAD,
+ TCA_ACT_BPF_TAG,
+ TCA_ACT_BPF_ID,
+ __TCA_ACT_BPF_MAX,
+};
+#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
+
+#endif
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 195ba486640f..2ed395b817cb 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python3
#
# top-like utility for displaying kvm statistics
#
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 7bc31c905018..197b40f5b5c6 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 425b480bda75..34d9c3619c96 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -66,7 +66,7 @@ ifndef VERBOSE
endif
FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-mmap bpf reallocarray
+FEATURE_TESTS = libelf libelf-mmap bpf reallocarray cxx
FEATURE_DISPLAY = libelf bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
@@ -145,14 +145,26 @@ include $(srctree)/tools/build/Makefile.include
BPF_IN := $(OUTPUT)libbpf-in.o
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
+VERSION_SCRIPT := libbpf.map
+
+GLOBAL_SYM_COUNT = $(shell readelf -s $(BPF_IN) | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
+VERSIONED_SYM_COUNT = $(shell readelf -s $(OUTPUT)libbpf.so | \
+ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_FILE)
+CXX_TEST_TARGET = $(OUTPUT)test_libbpf
+
+ifeq ($(feature-cxx), 1)
+ CMD_TARGETS += $(CXX_TEST_TARGET)
+endif
+
TARGETS = $(CMD_TARGETS)
all: fixdep all_cmd
-all_cmd: $(CMD_TARGETS)
+all_cmd: $(CMD_TARGETS) check
$(BPF_IN): force elfdep bpfdep
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
@@ -170,11 +182,27 @@ $(BPF_IN): force elfdep bpfdep
$(Q)$(MAKE) $(build)=libbpf
$(OUTPUT)libbpf.so: $(BPF_IN)
- $(QUIET_LINK)$(CC) --shared $^ -o $@
+ $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
+ $^ -o $@
$(OUTPUT)libbpf.a: $(BPF_IN)
$(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
+$(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
+ $(QUIET_LINK)$(CXX) $^ -lelf -o $@
+
+check: check_abi
+
+check_abi: $(OUTPUT)libbpf.so
+ @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
+ echo "Warning: Num of global symbols in $(BPF_IN)" \
+ "($(GLOBAL_SYM_COUNT)) does NOT match with num of" \
+ "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
+ "Please make sure all LIBBPF_API symbols are" \
+ "versioned in $(VERSION_SCRIPT)." >&2; \
+ exit 1; \
+ fi
+
define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -201,8 +229,8 @@ config-clean:
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
clean:
- $(call QUIET_CLEAN, libbpf) $(RM) *.o *~ $(TARGETS) *.a *.so .*.d .*.cmd \
- $(RM) LIBBPF-CFLAGS
+ $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
+ *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS
$(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
new file mode 100644
index 000000000000..056f38310722
--- /dev/null
+++ b/tools/lib/bpf/README.rst
@@ -0,0 +1,139 @@
+.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+libbpf API naming convention
+============================
+
+libbpf API provides access to a few logically separated groups of
+functions and types. Every group has its own naming convention
+described here. It's recommended to follow these conventions whenever a
+new function or type is added to keep libbpf API clean and consistent.
+
+All types and functions provided by libbpf API should have one of the
+following prefixes: ``bpf_``, ``btf_``, ``libbpf_``.
+
+System call wrappers
+--------------------
+
+System call wrappers are simple wrappers for commands supported by
+sys_bpf system call. These wrappers should go to ``bpf.h`` header file
+and map one-on-one to corresponding commands.
+
+For example ``bpf_map_lookup_elem`` wraps ``BPF_MAP_LOOKUP_ELEM``
+command of sys_bpf, ``bpf_prog_attach`` wraps ``BPF_PROG_ATTACH``, etc.
+
+Objects
+-------
+
+Another class of types and functions provided by libbpf API is "objects"
+and functions to work with them. Objects are high-level abstractions
+such as BPF program or BPF map. They're represented by corresponding
+structures such as ``struct bpf_object``, ``struct bpf_program``,
+``struct bpf_map``, etc.
+
+Structures are forward declared and access to their fields should be
+provided via corresponding getters and setters rather than directly.
+
+These objects are associated with corresponding parts of ELF object that
+contains compiled BPF programs.
+
+For example ``struct bpf_object`` represents ELF object itself created
+from an ELF file or from a buffer, ``struct bpf_program`` represents a
+program in ELF object and ``struct bpf_map`` is a map.
+
+Functions that work with an object have names built from object name,
+double underscore and part that describes function purpose.
+
+For example ``bpf_object__open`` consists of the name of corresponding
+object, ``bpf_object``, double underscore and ``open`` that defines the
+purpose of the function to open ELF file and create ``bpf_object`` from
+it.
+
+Another example: ``bpf_program__load`` is named for corresponding
+object, ``bpf_program``, that is separated from other part of the name
+by double underscore.
+
+All objects and corresponding functions other than BTF related should go
+to ``libbpf.h``. BTF types and functions should go to ``btf.h``.
+
+Auxiliary functions
+-------------------
+
+Auxiliary functions and types that don't fit well in any of categories
+described above should have ``libbpf_`` prefix, e.g.
+``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
+
+libbpf ABI
+==========
+
+libbpf can be both linked statically or used as DSO. To avoid possible
+conflicts with other libraries an application is linked with, all
+non-static libbpf symbols should have one of the prefixes mentioned in
+API documentation above. See API naming convention to choose the right
+name for a new symbol.
+
+Symbol visibility
+-----------------
+
+libbpf follow the model when all global symbols have visibility "hidden"
+by default and to make a symbol visible it has to be explicitly
+attributed with ``LIBBPF_API`` macro. For example:
+
+.. code-block:: c
+
+ LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
+
+This prevents from accidentally exporting a symbol, that is not supposed
+to be a part of ABI what, in turn, improves both libbpf developer- and
+user-experiences.
+
+ABI versionning
+---------------
+
+To make future ABI extensions possible libbpf ABI is versioned.
+Versioning is implemented by ``libbpf.map`` version script that is
+passed to linker.
+
+Version name is ``LIBBPF_`` prefix + three-component numeric version,
+starting from ``0.0.1``.
+
+Every time ABI is being changed, e.g. because a new symbol is added or
+semantic of existing symbol is changed, ABI version should be bumped.
+
+For example, if current state of ``libbpf.map`` is:
+
+.. code-block::
+ LIBBPF_0.0.1 {
+ global:
+ bpf_func_a;
+ bpf_func_b;
+ local:
+ \*;
+ };
+
+, and a new symbol ``bpf_func_c`` is being introduced, then
+``libbpf.map`` should be changed like this:
+
+.. code-block::
+ LIBBPF_0.0.1 {
+ global:
+ bpf_func_a;
+ bpf_func_b;
+ local:
+ \*;
+ };
+ LIBBPF_0.0.2 {
+ global:
+ bpf_func_c;
+ } LIBBPF_0.0.1;
+
+, where new version ``LIBBPF_0.0.2`` depends on the previous
+``LIBBPF_0.0.1``.
+
+Format of version script and ways to handle ABI changes, including
+incompatible ones, described in details in [1].
+
+Links
+=====
+
+[1] https://www.akkadia.org/drepper/dsohowto.pdf
+ (Chapter 3. Maintaining APIs and ABIs).
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 03f9bcc4ef50..3caaa3428774 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -173,9 +173,35 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
-1);
}
+static void *
+alloc_zero_tailing_info(const void *orecord, __u32 cnt,
+ __u32 actual_rec_size, __u32 expected_rec_size)
+{
+ __u64 info_len = actual_rec_size * cnt;
+ void *info, *nrecord;
+ int i;
+
+ info = malloc(info_len);
+ if (!info)
+ return NULL;
+
+ /* zero out bytes kernel does not understand */
+ nrecord = info;
+ for (i = 0; i < cnt; i++) {
+ memcpy(nrecord, orecord, expected_rec_size);
+ memset(nrecord + expected_rec_size, 0,
+ actual_rec_size - expected_rec_size);
+ orecord += actual_rec_size;
+ nrecord += actual_rec_size;
+ }
+
+ return info;
+}
+
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz)
{
+ void *finfo = NULL, *linfo = NULL;
union bpf_attr attr;
__u32 name_len;
int fd;
@@ -196,19 +222,72 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
attr.log_level = 0;
attr.kern_version = load_attr->kern_version;
attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.prog_btf_fd = load_attr->prog_btf_fd;
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_cnt = load_attr->func_info_cnt;
+ attr.func_info = ptr_to_u64(load_attr->func_info);
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_cnt = load_attr->line_info_cnt;
+ attr.line_info = ptr_to_u64(load_attr->line_info);
memcpy(attr.prog_name, load_attr->name,
min(name_len, BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
- if (fd >= 0 || !log_buf || !log_buf_sz)
+ if (fd >= 0)
return fd;
+ /* After bpf_prog_load, the kernel may modify certain attributes
+ * to give user space a hint how to deal with loading failure.
+ * Check to see whether we can make some changes and load again.
+ */
+ while (errno == E2BIG && (!finfo || !linfo)) {
+ if (!finfo && attr.func_info_cnt &&
+ attr.func_info_rec_size < load_attr->func_info_rec_size) {
+ /* try with corrected func info records */
+ finfo = alloc_zero_tailing_info(load_attr->func_info,
+ load_attr->func_info_cnt,
+ load_attr->func_info_rec_size,
+ attr.func_info_rec_size);
+ if (!finfo)
+ goto done;
+
+ attr.func_info = ptr_to_u64(finfo);
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ } else if (!linfo && attr.line_info_cnt &&
+ attr.line_info_rec_size <
+ load_attr->line_info_rec_size) {
+ linfo = alloc_zero_tailing_info(load_attr->line_info,
+ load_attr->line_info_cnt,
+ load_attr->line_info_rec_size,
+ attr.line_info_rec_size);
+ if (!linfo)
+ goto done;
+
+ attr.line_info = ptr_to_u64(linfo);
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ } else {
+ break;
+ }
+
+ fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+
+ if (fd >= 0)
+ goto done;
+ }
+
+ if (!log_buf || !log_buf_sz)
+ goto done;
+
/* Try again with log */
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_buf_sz;
attr.log_level = 1;
log_buf[0] = 0;
- return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+ fd = sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
+done:
+ free(finfo);
+ free(linfo);
+ return fd;
}
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@@ -231,9 +310,9 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
}
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, int strict_alignment,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz, int log_level)
+ size_t insns_cnt, __u32 prog_flags, const char *license,
+ __u32 kern_version, char *log_buf, size_t log_buf_sz,
+ int log_level)
{
union bpf_attr attr;
@@ -247,7 +326,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.log_level = log_level;
log_buf[0] = 0;
attr.kern_version = kern_version;
- attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
+ attr.prog_flags = prog_flags;
return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
}
@@ -415,6 +494,29 @@ int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
return ret;
}
+int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
+{
+ union bpf_attr attr;
+ int ret;
+
+ if (!test_attr->data_out && test_attr->data_size_out > 0)
+ return -EINVAL;
+
+ bzero(&attr, sizeof(attr));
+ attr.test.prog_fd = test_attr->prog_fd;
+ attr.test.data_in = ptr_to_u64(test_attr->data_in);
+ attr.test.data_out = ptr_to_u64(test_attr->data_out);
+ attr.test.data_size_in = test_attr->data_size_in;
+ attr.test.data_size_out = test_attr->data_size_out;
+ attr.test.repeat = test_attr->repeat;
+
+ ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+ test_attr->data_size_out = attr.test.data_size_out;
+ test_attr->retval = attr.test.retval;
+ test_attr->duration = attr.test.duration;
+ return ret;
+}
+
int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
{
union bpf_attr attr;
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 26a51538213c..8f09de482839 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -27,6 +27,10 @@
#include <stdbool.h>
#include <stddef.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
@@ -74,6 +78,13 @@ struct bpf_load_program_attr {
const char *license;
__u32 kern_version;
__u32 prog_ifindex;
+ __u32 prog_btf_fd;
+ __u32 func_info_rec_size;
+ const void *func_info;
+ __u32 func_info_cnt;
+ __u32 line_info_rec_size;
+ const void *line_info;
+ __u32 line_info_cnt;
};
/* Flags to direct loading requirements */
@@ -90,7 +101,7 @@ LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
char *log_buf, size_t log_buf_sz);
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
const struct bpf_insn *insns,
- size_t insns_cnt, int strict_alignment,
+ size_t insns_cnt, __u32 prog_flags,
const char *license, __u32 kern_version,
char *log_buf, size_t log_buf_sz,
int log_level);
@@ -110,6 +121,25 @@ LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
+
+struct bpf_prog_test_run_attr {
+ int prog_fd;
+ int repeat;
+ const void *data_in;
+ __u32 data_size_in;
+ void *data_out; /* optional */
+ __u32 data_size_out; /* in: max length of data_out
+ * out: length of data_out */
+ __u32 retval; /* out: return code of the BPF program */
+ __u32 duration; /* out: average per repetition in ns */
+};
+
+LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
+
+/*
+ * bpf_prog_test_run does not check that data_out is large enough. Consider
+ * using bpf_prog_test_run_xattr instead.
+ */
LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
__u32 size, void *data_out, __u32 *size_out,
__u32 *retval, __u32 *duration);
@@ -128,4 +158,9 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
__u64 *probe_offset, __u64 *probe_addr);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
#endif /* __LIBBPF_BPF_H */
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
new file mode 100644
index 000000000000..6978314ea7f6
--- /dev/null
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2018 Facebook */
+
+#include <string.h>
+#include <stdlib.h>
+#include <linux/err.h>
+#include <linux/bpf.h>
+#include "libbpf.h"
+
+#ifndef min
+#define min(x, y) ((x) < (y) ? (x) : (y))
+#endif
+
+struct bpf_prog_linfo {
+ void *raw_linfo;
+ void *raw_jited_linfo;
+ __u32 *nr_jited_linfo_per_func;
+ __u32 *jited_linfo_func_idx;
+ __u32 nr_linfo;
+ __u32 nr_jited_func;
+ __u32 rec_size;
+ __u32 jited_rec_size;
+};
+
+static int dissect_jited_func(struct bpf_prog_linfo *prog_linfo,
+ const __u64 *ksym_func, const __u32 *ksym_len)
+{
+ __u32 nr_jited_func, nr_linfo;
+ const void *raw_jited_linfo;
+ const __u64 *jited_linfo;
+ __u64 last_jited_linfo;
+ /*
+ * Index to raw_jited_linfo:
+ * i: Index for searching the next ksym_func
+ * prev_i: Index to the last found ksym_func
+ */
+ __u32 i, prev_i;
+ __u32 f; /* Index to ksym_func */
+
+ raw_jited_linfo = prog_linfo->raw_jited_linfo;
+ jited_linfo = raw_jited_linfo;
+ if (ksym_func[0] != *jited_linfo)
+ goto errout;
+
+ prog_linfo->jited_linfo_func_idx[0] = 0;
+ nr_jited_func = prog_linfo->nr_jited_func;
+ nr_linfo = prog_linfo->nr_linfo;
+
+ for (prev_i = 0, i = 1, f = 1;
+ i < nr_linfo && f < nr_jited_func;
+ i++) {
+ raw_jited_linfo += prog_linfo->jited_rec_size;
+ last_jited_linfo = *jited_linfo;
+ jited_linfo = raw_jited_linfo;
+
+ if (ksym_func[f] == *jited_linfo) {
+ prog_linfo->jited_linfo_func_idx[f] = i;
+
+ /* Sanity check */
+ if (last_jited_linfo - ksym_func[f - 1] + 1 >
+ ksym_len[f - 1])
+ goto errout;
+
+ prog_linfo->nr_jited_linfo_per_func[f - 1] =
+ i - prev_i;
+ prev_i = i;
+
+ /*
+ * The ksym_func[f] is found in jited_linfo.
+ * Look for the next one.
+ */
+ f++;
+ } else if (*jited_linfo <= last_jited_linfo) {
+ /* Ensure the addr is increasing _within_ a func */
+ goto errout;
+ }
+ }
+
+ if (f != nr_jited_func)
+ goto errout;
+
+ prog_linfo->nr_jited_linfo_per_func[nr_jited_func - 1] =
+ nr_linfo - prev_i;
+
+ return 0;
+
+errout:
+ return -EINVAL;
+}
+
+void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo)
+{
+ if (!prog_linfo)
+ return;
+
+ free(prog_linfo->raw_linfo);
+ free(prog_linfo->raw_jited_linfo);
+ free(prog_linfo->nr_jited_linfo_per_func);
+ free(prog_linfo->jited_linfo_func_idx);
+ free(prog_linfo);
+}
+
+struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
+{
+ struct bpf_prog_linfo *prog_linfo;
+ __u32 nr_linfo, nr_jited_func;
+
+ nr_linfo = info->nr_line_info;
+
+ if (!nr_linfo)
+ return NULL;
+
+ /*
+ * The min size that bpf_prog_linfo has to access for
+ * searching purpose.
+ */
+ if (info->line_info_rec_size <
+ offsetof(struct bpf_line_info, file_name_off))
+ return NULL;
+
+ prog_linfo = calloc(1, sizeof(*prog_linfo));
+ if (!prog_linfo)
+ return NULL;
+
+ /* Copy xlated line_info */
+ prog_linfo->nr_linfo = nr_linfo;
+ prog_linfo->rec_size = info->line_info_rec_size;
+ prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size);
+ if (!prog_linfo->raw_linfo)
+ goto err_free;
+ memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info,
+ nr_linfo * prog_linfo->rec_size);
+
+ nr_jited_func = info->nr_jited_ksyms;
+ if (!nr_jited_func ||
+ !info->jited_line_info ||
+ info->nr_jited_line_info != nr_linfo ||
+ info->jited_line_info_rec_size < sizeof(__u64) ||
+ info->nr_jited_func_lens != nr_jited_func ||
+ !info->jited_ksyms ||
+ !info->jited_func_lens)
+ /* Not enough info to provide jited_line_info */
+ return prog_linfo;
+
+ /* Copy jited_line_info */
+ prog_linfo->nr_jited_func = nr_jited_func;
+ prog_linfo->jited_rec_size = info->jited_line_info_rec_size;
+ prog_linfo->raw_jited_linfo = malloc(nr_linfo *
+ prog_linfo->jited_rec_size);
+ if (!prog_linfo->raw_jited_linfo)
+ goto err_free;
+ memcpy(prog_linfo->raw_jited_linfo,
+ (void *)(long)info->jited_line_info,
+ nr_linfo * prog_linfo->jited_rec_size);
+
+ /* Number of jited_line_info per jited func */
+ prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func *
+ sizeof(__u32));
+ if (!prog_linfo->nr_jited_linfo_per_func)
+ goto err_free;
+
+ /*
+ * For each jited func,
+ * the start idx to the "linfo" and "jited_linfo" array,
+ */
+ prog_linfo->jited_linfo_func_idx = malloc(nr_jited_func *
+ sizeof(__u32));
+ if (!prog_linfo->jited_linfo_func_idx)
+ goto err_free;
+
+ if (dissect_jited_func(prog_linfo,
+ (__u64 *)(long)info->jited_ksyms,
+ (__u32 *)(long)info->jited_func_lens))
+ goto err_free;
+
+ return prog_linfo;
+
+err_free:
+ bpf_prog_linfo__free(prog_linfo);
+ return NULL;
+}
+
+const struct bpf_line_info *
+bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
+ __u64 addr, __u32 func_idx, __u32 nr_skip)
+{
+ __u32 jited_rec_size, rec_size, nr_linfo, start, i;
+ const void *raw_jited_linfo, *raw_linfo;
+ const __u64 *jited_linfo;
+
+ if (func_idx >= prog_linfo->nr_jited_func)
+ return NULL;
+
+ nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx];
+ if (nr_skip >= nr_linfo)
+ return NULL;
+
+ start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip;
+ jited_rec_size = prog_linfo->jited_rec_size;
+ raw_jited_linfo = prog_linfo->raw_jited_linfo +
+ (start * jited_rec_size);
+ jited_linfo = raw_jited_linfo;
+ if (addr < *jited_linfo)
+ return NULL;
+
+ nr_linfo -= nr_skip;
+ rec_size = prog_linfo->rec_size;
+ raw_linfo = prog_linfo->raw_linfo + (start * rec_size);
+ for (i = 0; i < nr_linfo; i++) {
+ if (addr < *jited_linfo)
+ break;
+
+ raw_linfo += rec_size;
+ raw_jited_linfo += jited_rec_size;
+ jited_linfo = raw_jited_linfo;
+ }
+
+ return raw_linfo - rec_size;
+}
+
+const struct bpf_line_info *
+bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
+ __u32 insn_off, __u32 nr_skip)
+{
+ const struct bpf_line_info *linfo;
+ __u32 rec_size, nr_linfo, i;
+ const void *raw_linfo;
+
+ nr_linfo = prog_linfo->nr_linfo;
+ if (nr_skip >= nr_linfo)
+ return NULL;
+
+ rec_size = prog_linfo->rec_size;
+ raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size);
+ linfo = raw_linfo;
+ if (insn_off < linfo->insn_off)
+ return NULL;
+
+ nr_linfo -= nr_skip;
+ for (i = 0; i < nr_linfo; i++) {
+ if (insn_off < linfo->insn_off)
+ break;
+
+ raw_linfo += rec_size;
+ linfo = raw_linfo;
+ }
+
+ return raw_linfo - rec_size;
+}
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 449591aa9900..d682d3b8f7b9 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -37,6 +37,48 @@ struct btf {
int fd;
};
+struct btf_ext_info {
+ /*
+ * info points to a deep copy of the individual info section
+ * (e.g. func_info and line_info) from the .BTF.ext.
+ * It does not include the __u32 rec_size.
+ */
+ void *info;
+ __u32 rec_size;
+ __u32 len;
+};
+
+struct btf_ext {
+ struct btf_ext_info func_info;
+ struct btf_ext_info line_info;
+};
+
+struct btf_ext_info_sec {
+ __u32 sec_name_off;
+ __u32 num_info;
+ /* Followed by num_info * record_size number of bytes */
+ __u8 data[0];
+};
+
+/* The minimum bpf_func_info checked by the loader */
+struct bpf_func_info_min {
+ __u32 insn_off;
+ __u32 type_id;
+};
+
+/* The minimum bpf_line_info checked by the loader */
+struct bpf_line_info_min {
+ __u32 insn_off;
+ __u32 file_name_off;
+ __u32 line_off;
+ __u32 line_col;
+};
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64) (unsigned long) ptr;
+}
+
static int btf_add_type(struct btf *btf, struct btf_type *t)
{
if (btf->types_size - btf->nr_types < 2) {
@@ -165,6 +207,10 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
case BTF_KIND_ENUM:
next_type += vlen * sizeof(struct btf_enum);
break;
+ case BTF_KIND_FUNC_PROTO:
+ next_type += vlen * sizeof(struct btf_param);
+ break;
+ case BTF_KIND_FUNC:
case BTF_KIND_TYPEDEF:
case BTF_KIND_PTR:
case BTF_KIND_FWD:
@@ -393,3 +439,350 @@ const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
else
return NULL;
}
+
+int btf__get_from_id(__u32 id, struct btf **btf)
+{
+ struct bpf_btf_info btf_info = { 0 };
+ __u32 len = sizeof(btf_info);
+ __u32 last_size;
+ int btf_fd;
+ void *ptr;
+ int err;
+
+ err = 0;
+ *btf = NULL;
+ btf_fd = bpf_btf_get_fd_by_id(id);
+ if (btf_fd < 0)
+ return 0;
+
+ /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
+ * let's start with a sane default - 4KiB here - and resize it only if
+ * bpf_obj_get_info_by_fd() needs a bigger buffer.
+ */
+ btf_info.btf_size = 4096;
+ last_size = btf_info.btf_size;
+ ptr = malloc(last_size);
+ if (!ptr) {
+ err = -ENOMEM;
+ goto exit_free;
+ }
+
+ bzero(ptr, last_size);
+ btf_info.btf = ptr_to_u64(ptr);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+
+ if (!err && btf_info.btf_size > last_size) {
+ void *temp_ptr;
+
+ last_size = btf_info.btf_size;
+ temp_ptr = realloc(ptr, last_size);
+ if (!temp_ptr) {
+ err = -ENOMEM;
+ goto exit_free;
+ }
+ ptr = temp_ptr;
+ bzero(ptr, last_size);
+ btf_info.btf = ptr_to_u64(ptr);
+ err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ }
+
+ if (err || btf_info.btf_size > last_size) {
+ err = errno;
+ goto exit_free;
+ }
+
+ *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size, NULL);
+ if (IS_ERR(*btf)) {
+ err = PTR_ERR(*btf);
+ *btf = NULL;
+ }
+
+exit_free:
+ close(btf_fd);
+ free(ptr);
+
+ return err;
+}
+
+struct btf_ext_sec_copy_param {
+ __u32 off;
+ __u32 len;
+ __u32 min_rec_size;
+ struct btf_ext_info *ext_info;
+ const char *desc;
+};
+
+static int btf_ext_copy_info(struct btf_ext *btf_ext,
+ __u8 *data, __u32 data_size,
+ struct btf_ext_sec_copy_param *ext_sec,
+ btf_print_fn_t err_log)
+{
+ const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
+ const struct btf_ext_info_sec *sinfo;
+ struct btf_ext_info *ext_info;
+ __u32 info_left, record_size;
+ /* The start of the info sec (including the __u32 record_size). */
+ const void *info;
+
+ /* data and data_size do not include btf_ext_header from now on */
+ data = data + hdr->hdr_len;
+ data_size -= hdr->hdr_len;
+
+ if (ext_sec->off & 0x03) {
+ elog(".BTF.ext %s section is not aligned to 4 bytes\n",
+ ext_sec->desc);
+ return -EINVAL;
+ }
+
+ if (data_size < ext_sec->off ||
+ ext_sec->len > data_size - ext_sec->off) {
+ elog("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
+ ext_sec->desc, ext_sec->off, ext_sec->len);
+ return -EINVAL;
+ }
+
+ info = data + ext_sec->off;
+ info_left = ext_sec->len;
+
+ /* At least a record size */
+ if (info_left < sizeof(__u32)) {
+ elog(".BTF.ext %s record size not found\n", ext_sec->desc);
+ return -EINVAL;
+ }
+
+ /* The record size needs to meet the minimum standard */
+ record_size = *(__u32 *)info;
+ if (record_size < ext_sec->min_rec_size ||
+ record_size & 0x03) {
+ elog("%s section in .BTF.ext has invalid record size %u\n",
+ ext_sec->desc, record_size);
+ return -EINVAL;
+ }
+
+ sinfo = info + sizeof(__u32);
+ info_left -= sizeof(__u32);
+
+ /* If no records, return failure now so .BTF.ext won't be used. */
+ if (!info_left) {
+ elog("%s section in .BTF.ext has no records", ext_sec->desc);
+ return -EINVAL;
+ }
+
+ while (info_left) {
+ unsigned int sec_hdrlen = sizeof(struct btf_ext_info_sec);
+ __u64 total_record_size;
+ __u32 num_records;
+
+ if (info_left < sec_hdrlen) {
+ elog("%s section header is not found in .BTF.ext\n",
+ ext_sec->desc);
+ return -EINVAL;
+ }
+
+ num_records = sinfo->num_info;
+ if (num_records == 0) {
+ elog("%s section has incorrect num_records in .BTF.ext\n",
+ ext_sec->desc);
+ return -EINVAL;
+ }
+
+ total_record_size = sec_hdrlen +
+ (__u64)num_records * record_size;
+ if (info_left < total_record_size) {
+ elog("%s section has incorrect num_records in .BTF.ext\n",
+ ext_sec->desc);
+ return -EINVAL;
+ }
+
+ info_left -= total_record_size;
+ sinfo = (void *)sinfo + total_record_size;
+ }
+
+ ext_info = ext_sec->ext_info;
+ ext_info->len = ext_sec->len - sizeof(__u32);
+ ext_info->rec_size = record_size;
+ ext_info->info = malloc(ext_info->len);
+ if (!ext_info->info)
+ return -ENOMEM;
+ memcpy(ext_info->info, info + sizeof(__u32), ext_info->len);
+
+ return 0;
+}
+
+static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
+ __u8 *data, __u32 data_size,
+ btf_print_fn_t err_log)
+{
+ const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
+ struct btf_ext_sec_copy_param param = {
+ .off = hdr->func_info_off,
+ .len = hdr->func_info_len,
+ .min_rec_size = sizeof(struct bpf_func_info_min),
+ .ext_info = &btf_ext->func_info,
+ .desc = "func_info"
+ };
+
+ return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+}
+
+static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
+ __u8 *data, __u32 data_size,
+ btf_print_fn_t err_log)
+{
+ const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
+ struct btf_ext_sec_copy_param param = {
+ .off = hdr->line_info_off,
+ .len = hdr->line_info_len,
+ .min_rec_size = sizeof(struct bpf_line_info_min),
+ .ext_info = &btf_ext->line_info,
+ .desc = "line_info",
+ };
+
+ return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log);
+}
+
+static int btf_ext_parse_hdr(__u8 *data, __u32 data_size,
+ btf_print_fn_t err_log)
+{
+ const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
+
+ if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
+ data_size < hdr->hdr_len) {
+ elog("BTF.ext header not found");
+ return -EINVAL;
+ }
+
+ if (hdr->magic != BTF_MAGIC) {
+ elog("Invalid BTF.ext magic:%x\n", hdr->magic);
+ return -EINVAL;
+ }
+
+ if (hdr->version != BTF_VERSION) {
+ elog("Unsupported BTF.ext version:%u\n", hdr->version);
+ return -ENOTSUP;
+ }
+
+ if (hdr->flags) {
+ elog("Unsupported BTF.ext flags:%x\n", hdr->flags);
+ return -ENOTSUP;
+ }
+
+ if (data_size == hdr->hdr_len) {
+ elog("BTF.ext has no data\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void btf_ext__free(struct btf_ext *btf_ext)
+{
+ if (!btf_ext)
+ return;
+
+ free(btf_ext->func_info.info);
+ free(btf_ext->line_info.info);
+ free(btf_ext);
+}
+
+struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
+{
+ struct btf_ext *btf_ext;
+ int err;
+
+ err = btf_ext_parse_hdr(data, size, err_log);
+ if (err)
+ return ERR_PTR(err);
+
+ btf_ext = calloc(1, sizeof(struct btf_ext));
+ if (!btf_ext)
+ return ERR_PTR(-ENOMEM);
+
+ err = btf_ext_copy_func_info(btf_ext, data, size, err_log);
+ if (err) {
+ btf_ext__free(btf_ext);
+ return ERR_PTR(err);
+ }
+
+ err = btf_ext_copy_line_info(btf_ext, data, size, err_log);
+ if (err) {
+ btf_ext__free(btf_ext);
+ return ERR_PTR(err);
+ }
+
+ return btf_ext;
+}
+
+static int btf_ext_reloc_info(const struct btf *btf,
+ const struct btf_ext_info *ext_info,
+ const char *sec_name, __u32 insns_cnt,
+ void **info, __u32 *cnt)
+{
+ __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
+ __u32 i, record_size, existing_len, records_len;
+ struct btf_ext_info_sec *sinfo;
+ const char *info_sec_name;
+ __u64 remain_len;
+ void *data;
+
+ record_size = ext_info->rec_size;
+ sinfo = ext_info->info;
+ remain_len = ext_info->len;
+ while (remain_len > 0) {
+ records_len = sinfo->num_info * record_size;
+ info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
+ if (strcmp(info_sec_name, sec_name)) {
+ remain_len -= sec_hdrlen + records_len;
+ sinfo = (void *)sinfo + sec_hdrlen + records_len;
+ continue;
+ }
+
+ existing_len = (*cnt) * record_size;
+ data = realloc(*info, existing_len + records_len);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(data + existing_len, sinfo->data, records_len);
+ /* adjust insn_off only, the rest data will be passed
+ * to the kernel.
+ */
+ for (i = 0; i < sinfo->num_info; i++) {
+ __u32 *insn_off;
+
+ insn_off = data + existing_len + (i * record_size);
+ *insn_off = *insn_off / sizeof(struct bpf_insn) +
+ insns_cnt;
+ }
+ *info = data;
+ *cnt += sinfo->num_info;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *cnt)
+{
+ return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
+ insns_cnt, func_info, cnt);
+}
+
+int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt)
+{
+ return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
+ insns_cnt, line_info, cnt);
+}
+
+__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
+{
+ return btf_ext->func_info.rec_size;
+}
+
+__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
+{
+ return btf_ext->line_info.rec_size;
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b77e7080f7e7..b0610dcdae6b 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -6,15 +6,55 @@
#include <linux/types.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
#define BTF_ELF_SEC ".BTF"
+#define BTF_EXT_ELF_SEC ".BTF.ext"
struct btf;
+struct btf_ext;
struct btf_type;
+/*
+ * The .BTF.ext ELF section layout defined as
+ * struct btf_ext_header
+ * func_info subsection
+ *
+ * The func_info subsection layout:
+ * record size for struct bpf_func_info in the func_info subsection
+ * struct btf_sec_func_info for section #1
+ * a list of bpf_func_info records for section #1
+ * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
+ * but may not be identical
+ * struct btf_sec_func_info for section #2
+ * a list of bpf_func_info records for section #2
+ * ......
+ *
+ * Note that the bpf_func_info record size in .BTF.ext may not
+ * be the same as the one defined in include/uapi/linux/bpf.h.
+ * The loader should ensure that record_size meets minimum
+ * requirement and pass the record as is to the kernel. The
+ * kernel will handle the func_info properly based on its contents.
+ */
+struct btf_ext_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 func_info_off;
+ __u32 func_info_len;
+ __u32 line_info_off;
+ __u32 line_info_len;
+};
+
typedef int (*btf_print_fn_t)(const char *, ...)
__attribute__((format(printf, 1, 2)));
@@ -28,5 +68,23 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__fd(const struct btf *btf);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
+LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
+
+struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log);
+void btf_ext__free(struct btf_ext *btf_ext);
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *func_info_len);
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt);
+__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
+__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
#endif /* __LIBBPF_BTF_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index d6e62e90e8d4..169e347c76f6 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -9,7 +9,9 @@
* Copyright (C) 2017 Nicira, Inc.
*/
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
@@ -24,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/bpf.h>
#include <linux/btf.h>
+#include <linux/filter.h>
#include <linux/list.h>
#include <linux/limits.h>
#include <linux/perf_event.h>
@@ -114,6 +117,11 @@ void libbpf_set_print(libbpf_print_fn_t warn,
# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
#endif
+struct bpf_capabilities {
+ /* v4.14: kernel support for program & map names. */
+ __u32 name:1;
+};
+
/*
* bpf_prog should be a better name but it has been used in
* linux/filter.h.
@@ -124,6 +132,10 @@ struct bpf_program {
char *name;
int prog_ifindex;
char *section_name;
+ /* section_name with / replaced by _; makes recursive pinning
+ * in bpf_object__pin_programs easier
+ */
+ char *pin_name;
struct bpf_insn *insns;
size_t insns_cnt, main_prog_cnt;
enum bpf_prog_type type;
@@ -152,6 +164,16 @@ struct bpf_program {
bpf_program_clear_priv_t clear_priv;
enum bpf_attach_type expected_attach_type;
+ int btf_fd;
+ void *func_info;
+ __u32 func_info_rec_size;
+ __u32 func_info_cnt;
+
+ struct bpf_capabilities *caps;
+
+ void *line_info;
+ __u32 line_info_rec_size;
+ __u32 line_info_cnt;
};
struct bpf_map {
@@ -159,6 +181,7 @@ struct bpf_map {
char *name;
size_t offset;
int map_ifindex;
+ int inner_map_fd;
struct bpf_map_def def;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
@@ -208,10 +231,13 @@ struct bpf_object {
struct list_head list;
struct btf *btf;
+ struct btf_ext *btf_ext;
void *priv;
bpf_object_clear_priv_t clear_priv;
+ struct bpf_capabilities caps;
+
char path[];
};
#define obj_elf_valid(o) ((o)->efile.elf)
@@ -237,6 +263,10 @@ void bpf_program__unload(struct bpf_program *prog)
prog->instances.nr = -1;
zfree(&prog->instances.fds);
+
+ zclose(prog->btf_fd);
+ zfree(&prog->func_info);
+ zfree(&prog->line_info);
}
static void bpf_program__exit(struct bpf_program *prog)
@@ -253,6 +283,7 @@ static void bpf_program__exit(struct bpf_program *prog)
bpf_program__unload(prog);
zfree(&prog->name);
zfree(&prog->section_name);
+ zfree(&prog->pin_name);
zfree(&prog->insns);
zfree(&prog->reloc_desc);
@@ -261,6 +292,17 @@ static void bpf_program__exit(struct bpf_program *prog)
prog->idx = -1;
}
+static char *__bpf_program__pin_name(struct bpf_program *prog)
+{
+ char *name, *p;
+
+ name = p = strdup(prog->section_name);
+ while ((p = strchr(p, '/')))
+ *p = '_';
+
+ return name;
+}
+
static int
bpf_program__init(void *data, size_t size, char *section_name, int idx,
struct bpf_program *prog)
@@ -279,6 +321,13 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
goto errout;
}
+ prog->pin_name = __bpf_program__pin_name(prog);
+ if (!prog->pin_name) {
+ pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
+ idx, section_name);
+ goto errout;
+ }
+
prog->insns = malloc(size);
if (!prog->insns) {
pr_warning("failed to alloc insns for prog under section %s\n",
@@ -291,7 +340,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
prog->idx = idx;
prog->instances.fds = NULL;
prog->instances.nr = -1;
- prog->type = BPF_PROG_TYPE_KPROBE;
+ prog->type = BPF_PROG_TYPE_UNSPEC;
+ prog->btf_fd = -1;
return 0;
errout:
@@ -310,6 +360,7 @@ bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
if (err)
return err;
+ prog.caps = &obj->caps;
progs = obj->programs;
nr_progs = obj->nr_programs;
@@ -562,6 +613,14 @@ static int compare_bpf_map(const void *_a, const void *_b)
return a->offset - b->offset;
}
+static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
+{
+ if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
+ type == BPF_MAP_TYPE_HASH_OF_MAPS)
+ return true;
+ return false;
+}
+
static int
bpf_object__init_maps(struct bpf_object *obj, int flags)
{
@@ -625,13 +684,15 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
}
obj->nr_maps = nr_maps;
- /*
- * fill all fd with -1 so won't close incorrect
- * fd (fd=0 is stdin) when failure (zclose won't close
- * negative fd)).
- */
- for (i = 0; i < nr_maps; i++)
+ for (i = 0; i < nr_maps; i++) {
+ /*
+ * fill all fd with -1 so won't close incorrect
+ * fd (fd=0 is stdin) when failure (zclose won't close
+ * negative fd)).
+ */
obj->maps[i].fd = -1;
+ obj->maps[i].inner_map_fd = -1;
+ }
/*
* Fill obj->maps using data in "maps" section.
@@ -723,6 +784,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
{
Elf *elf = obj->efile.elf;
GElf_Ehdr *ep = &obj->efile.ehdr;
+ Elf_Data *btf_ext_data = NULL;
Elf_Scn *scn = NULL;
int idx = 0, err = 0;
@@ -784,6 +846,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
BTF_ELF_SEC, PTR_ERR(obj->btf));
obj->btf = NULL;
}
+ } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
+ btf_ext_data = data;
} else if (sh.sh_type == SHT_SYMTAB) {
if (obj->efile.symbols) {
pr_warning("bpf: multiple SYMTAB in %s\n",
@@ -845,6 +909,22 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
pr_warning("Corrupted ELF file: index of strtab invalid\n");
return LIBBPF_ERRNO__FORMAT;
}
+ if (btf_ext_data) {
+ if (!obj->btf) {
+ pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
+ BTF_EXT_ELF_SEC, BTF_ELF_SEC);
+ } else {
+ obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
+ btf_ext_data->d_size,
+ __pr_debug);
+ if (IS_ERR(obj->btf_ext)) {
+ pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
+ BTF_EXT_ELF_SEC,
+ PTR_ERR(obj->btf_ext));
+ obj->btf_ext = NULL;
+ }
+ }
+ }
if (obj->efile.maps_shndx >= 0) {
err = bpf_object__init_maps(obj, flags);
if (err)
@@ -1095,6 +1175,52 @@ err_free_new_name:
}
static int
+bpf_object__probe_name(struct bpf_object *obj)
+{
+ struct bpf_load_program_attr attr;
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int ret;
+
+ /* make sure basic loading works */
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insns = insns;
+ attr.insns_cnt = ARRAY_SIZE(insns);
+ attr.license = "GPL";
+
+ ret = bpf_load_program_xattr(&attr, NULL, 0);
+ if (ret < 0) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
+ __func__, cp, errno);
+ return -errno;
+ }
+ close(ret);
+
+ /* now try the same program, but with the name */
+
+ attr.name = "test";
+ ret = bpf_load_program_xattr(&attr, NULL, 0);
+ if (ret >= 0) {
+ obj->caps.name = 1;
+ close(ret);
+ }
+
+ return 0;
+}
+
+static int
+bpf_object__probe_caps(struct bpf_object *obj)
+{
+ return bpf_object__probe_name(obj);
+}
+
+static int
bpf_object__create_maps(struct bpf_object *obj)
{
struct bpf_create_map_attr create_attr = {};
@@ -1113,7 +1239,8 @@ bpf_object__create_maps(struct bpf_object *obj)
continue;
}
- create_attr.name = map->name;
+ if (obj->caps.name)
+ create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
create_attr.map_flags = def->map_flags;
@@ -1123,6 +1250,9 @@ bpf_object__create_maps(struct bpf_object *obj)
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
+ if (bpf_map_type__is_map_in_map(def->type) &&
+ map->inner_map_fd >= 0)
+ create_attr.inner_map_fd = map->inner_map_fd;
if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
create_attr.btf_fd = btf__fd(obj->btf);
@@ -1161,12 +1291,89 @@ bpf_object__create_maps(struct bpf_object *obj)
}
static int
+check_btf_ext_reloc_err(struct bpf_program *prog, int err,
+ void *btf_prog_info, const char *info_name)
+{
+ if (err != -ENOENT) {
+ pr_warning("Error in loading %s for sec %s.\n",
+ info_name, prog->section_name);
+ return err;
+ }
+
+ /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
+
+ if (btf_prog_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error
+ * out.
+ */
+ pr_warning("Error in relocating %s for sec %s.\n",
+ info_name, prog->section_name);
+ return err;
+ }
+
+ /*
+ * Have problem loading the very first info. Ignore
+ * the rest.
+ */
+ pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
+ info_name, prog->section_name, info_name);
+ return 0;
+}
+
+static int
+bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
+ const char *section_name, __u32 insn_offset)
+{
+ int err;
+
+ if (!insn_offset || prog->func_info) {
+ /*
+ * !insn_offset => main program
+ *
+ * For sub prog, the main program's func_info has to
+ * be loaded first (i.e. prog->func_info != NULL)
+ */
+ err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
+ section_name, insn_offset,
+ &prog->func_info,
+ &prog->func_info_cnt);
+ if (err)
+ return check_btf_ext_reloc_err(prog, err,
+ prog->func_info,
+ "bpf_func_info");
+
+ prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
+ }
+
+ if (!insn_offset || prog->line_info) {
+ err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
+ section_name, insn_offset,
+ &prog->line_info,
+ &prog->line_info_cnt);
+ if (err)
+ return check_btf_ext_reloc_err(prog, err,
+ prog->line_info,
+ "bpf_line_info");
+
+ prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
+ }
+
+ if (!insn_offset)
+ prog->btf_fd = btf__fd(obj->btf);
+
+ return 0;
+}
+
+static int
bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
struct reloc_desc *relo)
{
struct bpf_insn *insn, *new_insn;
struct bpf_program *text;
size_t new_cnt;
+ int err;
if (relo->type != RELO_CALL)
return -LIBBPF_ERRNO__RELOC;
@@ -1189,6 +1396,15 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
pr_warning("oom in prog realloc\n");
return -ENOMEM;
}
+
+ if (obj->btf_ext) {
+ err = bpf_program_reloc_btf_ext(prog, obj,
+ text->section_name,
+ prog->insns_cnt);
+ if (err)
+ return err;
+ }
+
memcpy(new_insn + prog->insns_cnt, text->insns,
text->insns_cnt * sizeof(*insn));
prog->insns = new_insn;
@@ -1208,7 +1424,17 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
{
int i, err;
- if (!prog || !prog->reloc_desc)
+ if (!prog)
+ return 0;
+
+ if (obj->btf_ext) {
+ err = bpf_program_reloc_btf_ext(prog, obj,
+ prog->section_name, 0);
+ if (err)
+ return err;
+ }
+
+ if (!prog->reloc_desc)
return 0;
for (i = 0; i < prog->nr_reloc; i++) {
@@ -1296,9 +1522,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
}
static int
-load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
- const char *name, struct bpf_insn *insns, int insns_cnt,
- char *license, __u32 kern_version, int *pfd, int prog_ifindex)
+load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
+ char *license, __u32 kern_version, int *pfd)
{
struct bpf_load_program_attr load_attr;
char *cp, errmsg[STRERR_BUFSIZE];
@@ -1306,15 +1531,22 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
int ret;
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
- load_attr.prog_type = type;
- load_attr.expected_attach_type = expected_attach_type;
- load_attr.name = name;
+ load_attr.prog_type = prog->type;
+ load_attr.expected_attach_type = prog->expected_attach_type;
+ if (prog->caps->name)
+ load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
load_attr.license = license;
load_attr.kern_version = kern_version;
- load_attr.prog_ifindex = prog_ifindex;
-
+ load_attr.prog_ifindex = prog->prog_ifindex;
+ load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
+ load_attr.func_info = prog->func_info;
+ load_attr.func_info_rec_size = prog->func_info_rec_size;
+ load_attr.func_info_cnt = prog->func_info_cnt;
+ load_attr.line_info = prog->line_info;
+ load_attr.line_info_rec_size = prog->line_info_rec_size;
+ load_attr.line_info_cnt = prog->line_info_cnt;
if (!load_attr.insns || !load_attr.insns_cnt)
return -EINVAL;
@@ -1394,10 +1626,8 @@ bpf_program__load(struct bpf_program *prog,
pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
prog->section_name, prog->instances.nr);
}
- err = load_program(prog->type, prog->expected_attach_type,
- prog->name, prog->insns, prog->insns_cnt,
- license, kern_version, &fd,
- prog->prog_ifindex);
+ err = load_program(prog, prog->insns, prog->insns_cnt,
+ license, kern_version, &fd);
if (!err)
prog->instances.fds[0] = fd;
goto out;
@@ -1425,11 +1655,9 @@ bpf_program__load(struct bpf_program *prog,
continue;
}
- err = load_program(prog->type, prog->expected_attach_type,
- prog->name, result.new_insn_ptr,
+ err = load_program(prog, result.new_insn_ptr,
result.new_insn_cnt,
- license, kern_version, &fd,
- prog->prog_ifindex);
+ license, kern_version, &fd);
if (err) {
pr_warning("Loading the %dth instance of program '%s' failed\n",
@@ -1495,12 +1723,12 @@ static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
case BPF_PROG_TYPE_LIRC_MODE2:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
- return false;
case BPF_PROG_TYPE_UNSPEC:
- case BPF_PROG_TYPE_KPROBE:
case BPF_PROG_TYPE_TRACEPOINT:
- case BPF_PROG_TYPE_PERF_EVENT:
case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ case BPF_PROG_TYPE_PERF_EVENT:
+ return false;
+ case BPF_PROG_TYPE_KPROBE:
default:
return true;
}
@@ -1627,6 +1855,7 @@ int bpf_object__load(struct bpf_object *obj)
obj->loaded = true;
+ CHECK_ERR(bpf_object__probe_caps(obj), err, out);
CHECK_ERR(bpf_object__create_maps(obj), err, out);
CHECK_ERR(bpf_object__relocate(obj), err, out);
CHECK_ERR(bpf_object__load_progs(obj), err, out);
@@ -1699,6 +1928,34 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
return 0;
}
+int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
+ int instance)
+{
+ int err;
+
+ err = check_path(path);
+ if (err)
+ return err;
+
+ if (prog == NULL) {
+ pr_warning("invalid program pointer\n");
+ return -EINVAL;
+ }
+
+ if (instance < 0 || instance >= prog->instances.nr) {
+ pr_warning("invalid prog instance %d of prog %s (max %d)\n",
+ instance, prog->section_name, prog->instances.nr);
+ return -EINVAL;
+ }
+
+ err = unlink(path);
+ if (err != 0)
+ return -errno;
+ pr_debug("unpinned program '%s'\n", path);
+
+ return 0;
+}
+
static int make_dir(const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
@@ -1733,6 +1990,11 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
return -EINVAL;
}
+ if (prog->instances.nr == 1) {
+ /* don't create subdirs when pinning single instance */
+ return bpf_program__pin_instance(prog, path, 0);
+ }
+
err = make_dir(path);
if (err)
return err;
@@ -1742,16 +2004,83 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
int len;
len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
+ if (len < 0) {
+ err = -EINVAL;
+ goto err_unpin;
+ } else if (len >= PATH_MAX) {
+ err = -ENAMETOOLONG;
+ goto err_unpin;
+ }
+
+ err = bpf_program__pin_instance(prog, buf, i);
+ if (err)
+ goto err_unpin;
+ }
+
+ return 0;
+
+err_unpin:
+ for (i = i - 1; i >= 0; i--) {
+ char buf[PATH_MAX];
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
+ if (len < 0)
+ continue;
+ else if (len >= PATH_MAX)
+ continue;
+
+ bpf_program__unpin_instance(prog, buf, i);
+ }
+
+ rmdir(path);
+
+ return err;
+}
+
+int bpf_program__unpin(struct bpf_program *prog, const char *path)
+{
+ int i, err;
+
+ err = check_path(path);
+ if (err)
+ return err;
+
+ if (prog == NULL) {
+ pr_warning("invalid program pointer\n");
+ return -EINVAL;
+ }
+
+ if (prog->instances.nr <= 0) {
+ pr_warning("no instances of prog %s to pin\n",
+ prog->section_name);
+ return -EINVAL;
+ }
+
+ if (prog->instances.nr == 1) {
+ /* don't create subdirs when pinning single instance */
+ return bpf_program__unpin_instance(prog, path, 0);
+ }
+
+ for (i = 0; i < prog->instances.nr; i++) {
+ char buf[PATH_MAX];
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
if (len < 0)
return -EINVAL;
else if (len >= PATH_MAX)
return -ENAMETOOLONG;
- err = bpf_program__pin_instance(prog, buf, i);
+ err = bpf_program__unpin_instance(prog, buf, i);
if (err)
return err;
}
+ err = rmdir(path);
+ if (err)
+ return -errno;
+
return 0;
}
@@ -1776,12 +2105,33 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
}
pr_debug("pinned map '%s'\n", path);
+
return 0;
}
-int bpf_object__pin(struct bpf_object *obj, const char *path)
+int bpf_map__unpin(struct bpf_map *map, const char *path)
+{
+ int err;
+
+ err = check_path(path);
+ if (err)
+ return err;
+
+ if (map == NULL) {
+ pr_warning("invalid map pointer\n");
+ return -EINVAL;
+ }
+
+ err = unlink(path);
+ if (err != 0)
+ return -errno;
+ pr_debug("unpinned map '%s'\n", path);
+
+ return 0;
+}
+
+int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
{
- struct bpf_program *prog;
struct bpf_map *map;
int err;
@@ -1803,28 +2153,142 @@ int bpf_object__pin(struct bpf_object *obj, const char *path)
len = snprintf(buf, PATH_MAX, "%s/%s", path,
bpf_map__name(map));
+ if (len < 0) {
+ err = -EINVAL;
+ goto err_unpin_maps;
+ } else if (len >= PATH_MAX) {
+ err = -ENAMETOOLONG;
+ goto err_unpin_maps;
+ }
+
+ err = bpf_map__pin(map, buf);
+ if (err)
+ goto err_unpin_maps;
+ }
+
+ return 0;
+
+err_unpin_maps:
+ while ((map = bpf_map__prev(map, obj))) {
+ char buf[PATH_MAX];
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ bpf_map__name(map));
+ if (len < 0)
+ continue;
+ else if (len >= PATH_MAX)
+ continue;
+
+ bpf_map__unpin(map, buf);
+ }
+
+ return err;
+}
+
+int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
+{
+ struct bpf_map *map;
+ int err;
+
+ if (!obj)
+ return -ENOENT;
+
+ bpf_map__for_each(map, obj) {
+ char buf[PATH_MAX];
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ bpf_map__name(map));
if (len < 0)
return -EINVAL;
else if (len >= PATH_MAX)
return -ENAMETOOLONG;
- err = bpf_map__pin(map, buf);
+ err = bpf_map__unpin(map, buf);
if (err)
return err;
}
+ return 0;
+}
+
+int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
+{
+ struct bpf_program *prog;
+ int err;
+
+ if (!obj)
+ return -ENOENT;
+
+ if (!obj->loaded) {
+ pr_warning("object not yet loaded; load it first\n");
+ return -ENOENT;
+ }
+
+ err = make_dir(path);
+ if (err)
+ return err;
+
bpf_object__for_each_program(prog, obj) {
char buf[PATH_MAX];
int len;
len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->section_name);
+ prog->pin_name);
+ if (len < 0) {
+ err = -EINVAL;
+ goto err_unpin_programs;
+ } else if (len >= PATH_MAX) {
+ err = -ENAMETOOLONG;
+ goto err_unpin_programs;
+ }
+
+ err = bpf_program__pin(prog, buf);
+ if (err)
+ goto err_unpin_programs;
+ }
+
+ return 0;
+
+err_unpin_programs:
+ while ((prog = bpf_program__prev(prog, obj))) {
+ char buf[PATH_MAX];
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ prog->pin_name);
+ if (len < 0)
+ continue;
+ else if (len >= PATH_MAX)
+ continue;
+
+ bpf_program__unpin(prog, buf);
+ }
+
+ return err;
+}
+
+int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
+{
+ struct bpf_program *prog;
+ int err;
+
+ if (!obj)
+ return -ENOENT;
+
+ bpf_object__for_each_program(prog, obj) {
+ char buf[PATH_MAX];
+ int len;
+
+ len = snprintf(buf, PATH_MAX, "%s/%s", path,
+ prog->pin_name);
if (len < 0)
return -EINVAL;
else if (len >= PATH_MAX)
return -ENAMETOOLONG;
- err = bpf_program__pin(prog, buf);
+ err = bpf_program__unpin(prog, buf);
if (err)
return err;
}
@@ -1832,6 +2296,23 @@ int bpf_object__pin(struct bpf_object *obj, const char *path)
return 0;
}
+int bpf_object__pin(struct bpf_object *obj, const char *path)
+{
+ int err;
+
+ err = bpf_object__pin_maps(obj, path);
+ if (err)
+ return err;
+
+ err = bpf_object__pin_programs(obj, path);
+ if (err) {
+ bpf_object__unpin_maps(obj, path);
+ return err;
+ }
+
+ return 0;
+}
+
void bpf_object__close(struct bpf_object *obj)
{
size_t i;
@@ -1845,6 +2326,7 @@ void bpf_object__close(struct bpf_object *obj)
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
btf__free(obj->btf);
+ btf_ext__free(obj->btf_ext);
for (i = 0; i < obj->nr_maps; i++) {
zfree(&obj->maps[i].name);
@@ -1918,23 +2400,26 @@ void *bpf_object__priv(struct bpf_object *obj)
}
static struct bpf_program *
-__bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
+__bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
{
- size_t idx;
+ size_t nr_programs = obj->nr_programs;
+ ssize_t idx;
- if (!obj->programs)
+ if (!nr_programs)
return NULL;
- /* First handler */
- if (prev == NULL)
- return &obj->programs[0];
- if (prev->obj != obj) {
+ if (!p)
+ /* Iter from the beginning */
+ return forward ? &obj->programs[0] :
+ &obj->programs[nr_programs - 1];
+
+ if (p->obj != obj) {
pr_warning("error: program handler doesn't match object\n");
return NULL;
}
- idx = (prev - obj->programs) + 1;
- if (idx >= obj->nr_programs)
+ idx = (p - obj->programs) + (forward ? 1 : -1);
+ if (idx >= obj->nr_programs || idx < 0)
return NULL;
return &obj->programs[idx];
}
@@ -1945,7 +2430,19 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
struct bpf_program *prog = prev;
do {
- prog = __bpf_program__next(prog, obj);
+ prog = __bpf_program__iter(prog, obj, true);
+ } while (prog && bpf_program__is_function_storage(prog, obj));
+
+ return prog;
+}
+
+struct bpf_program *
+bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
+{
+ struct bpf_program *prog = next;
+
+ do {
+ prog = __bpf_program__iter(prog, obj, false);
} while (prog && bpf_program__is_function_storage(prog, obj));
return prog;
@@ -2272,10 +2769,24 @@ void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
map->map_ifindex = ifindex;
}
-struct bpf_map *
-bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
+int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
{
- size_t idx;
+ if (!bpf_map_type__is_map_in_map(map->def.type)) {
+ pr_warning("error: unsupported map type\n");
+ return -EINVAL;
+ }
+ if (map->inner_map_fd != -1) {
+ pr_warning("error: inner_map_fd already specified\n");
+ return -EINVAL;
+ }
+ map->inner_map_fd = fd;
+ return 0;
+}
+
+static struct bpf_map *
+__bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
+{
+ ssize_t idx;
struct bpf_map *s, *e;
if (!obj || !obj->maps)
@@ -2284,22 +2795,40 @@ bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
s = obj->maps;
e = obj->maps + obj->nr_maps;
- if (prev == NULL)
- return s;
-
- if ((prev < s) || (prev >= e)) {
+ if ((m < s) || (m >= e)) {
pr_warning("error in %s: map handler doesn't belong to object\n",
__func__);
return NULL;
}
- idx = (prev - obj->maps) + 1;
- if (idx >= obj->nr_maps)
+ idx = (m - obj->maps) + i;
+ if (idx >= obj->nr_maps || idx < 0)
return NULL;
return &obj->maps[idx];
}
struct bpf_map *
+bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
+{
+ if (prev == NULL)
+ return obj->maps;
+
+ return __bpf_map__iter(prev, obj, 1);
+}
+
+struct bpf_map *
+bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
+{
+ if (next == NULL) {
+ if (!obj->nr_maps)
+ return NULL;
+ return obj->maps + obj->nr_maps - 1;
+ }
+
+ return __bpf_map__iter(next, obj, -1);
+}
+
+struct bpf_map *
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
{
struct bpf_map *pos;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 1f3468dad8b2..5f68d7b75215 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -16,6 +16,10 @@
#include <sys/types.h> // for size_t
#include <linux/bpf.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
@@ -71,6 +75,13 @@ struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
LIBBPF_API struct bpf_object *bpf_object__open_buffer(void *obj_buf,
size_t obj_buf_sz,
const char *name);
+LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
+LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
+ const char *path);
+LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
+ const char *path);
+LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
+ const char *path);
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
LIBBPF_API void bpf_object__close(struct bpf_object *object);
@@ -112,6 +123,9 @@ LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
(pos) != NULL; \
(pos) = bpf_program__next((pos), (obj)))
+LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
+ struct bpf_object *obj);
+
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *,
void *);
@@ -131,7 +145,11 @@ LIBBPF_API int bpf_program__fd(struct bpf_program *prog);
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
const char *path,
int instance);
+LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
+ const char *path,
+ int instance);
LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
+LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
struct bpf_insn;
@@ -260,6 +278,9 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
(pos) != NULL; \
(pos) = bpf_map__next((pos), (obj)))
+LIBBPF_API struct bpf_map *
+bpf_map__prev(struct bpf_map *map, struct bpf_object *obj);
+
LIBBPF_API int bpf_map__fd(struct bpf_map *map);
LIBBPF_API const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
LIBBPF_API const char *bpf_map__name(struct bpf_map *map);
@@ -274,6 +295,9 @@ LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
LIBBPF_API bool bpf_map__is_offload_neutral(struct bpf_map *map);
LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
+LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
+
+LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
LIBBPF_API long libbpf_get_error(const void *ptr);
@@ -317,4 +341,22 @@ int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie);
int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
+
+struct bpf_prog_linfo;
+struct bpf_prog_info;
+
+LIBBPF_API void bpf_prog_linfo__free(struct bpf_prog_linfo *prog_linfo);
+LIBBPF_API struct bpf_prog_linfo *
+bpf_prog_linfo__new(const struct bpf_prog_info *info);
+LIBBPF_API const struct bpf_line_info *
+bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
+ __u64 addr, __u32 func_idx, __u32 nr_skip);
+LIBBPF_API const struct bpf_line_info *
+bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
+ __u32 insn_off, __u32 nr_skip);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
#endif /* __LIBBPF_LIBBPF_H */
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
new file mode 100644
index 000000000000..cd02cd4e2cc3
--- /dev/null
+++ b/tools/lib/bpf/libbpf.map
@@ -0,0 +1,126 @@
+LIBBPF_0.0.1 {
+ global:
+ bpf_btf_get_fd_by_id;
+ bpf_create_map;
+ bpf_create_map_in_map;
+ bpf_create_map_in_map_node;
+ bpf_create_map_name;
+ bpf_create_map_node;
+ bpf_create_map_xattr;
+ bpf_load_btf;
+ bpf_load_program;
+ bpf_load_program_xattr;
+ bpf_map__btf_key_type_id;
+ bpf_map__btf_value_type_id;
+ bpf_map__def;
+ bpf_map__fd;
+ bpf_map__is_offload_neutral;
+ bpf_map__name;
+ bpf_map__next;
+ bpf_map__pin;
+ bpf_map__prev;
+ bpf_map__priv;
+ bpf_map__reuse_fd;
+ bpf_map__set_ifindex;
+ bpf_map__set_inner_map_fd;
+ bpf_map__set_priv;
+ bpf_map__unpin;
+ bpf_map_delete_elem;
+ bpf_map_get_fd_by_id;
+ bpf_map_get_next_id;
+ bpf_map_get_next_key;
+ bpf_map_lookup_and_delete_elem;
+ bpf_map_lookup_elem;
+ bpf_map_update_elem;
+ bpf_obj_get;
+ bpf_obj_get_info_by_fd;
+ bpf_obj_pin;
+ bpf_object__btf_fd;
+ bpf_object__close;
+ bpf_object__find_map_by_name;
+ bpf_object__find_map_by_offset;
+ bpf_object__find_program_by_title;
+ bpf_object__kversion;
+ bpf_object__load;
+ bpf_object__name;
+ bpf_object__next;
+ bpf_object__open;
+ bpf_object__open_buffer;
+ bpf_object__open_xattr;
+ bpf_object__pin;
+ bpf_object__pin_maps;
+ bpf_object__pin_programs;
+ bpf_object__priv;
+ bpf_object__set_priv;
+ bpf_object__unload;
+ bpf_object__unpin_maps;
+ bpf_object__unpin_programs;
+ bpf_perf_event_read_simple;
+ bpf_prog_attach;
+ bpf_prog_detach;
+ bpf_prog_detach2;
+ bpf_prog_get_fd_by_id;
+ bpf_prog_get_next_id;
+ bpf_prog_load;
+ bpf_prog_load_xattr;
+ bpf_prog_query;
+ bpf_prog_test_run;
+ bpf_prog_test_run_xattr;
+ bpf_program__fd;
+ bpf_program__is_kprobe;
+ bpf_program__is_perf_event;
+ bpf_program__is_raw_tracepoint;
+ bpf_program__is_sched_act;
+ bpf_program__is_sched_cls;
+ bpf_program__is_socket_filter;
+ bpf_program__is_tracepoint;
+ bpf_program__is_xdp;
+ bpf_program__load;
+ bpf_program__next;
+ bpf_program__nth_fd;
+ bpf_program__pin;
+ bpf_program__pin_instance;
+ bpf_program__prev;
+ bpf_program__priv;
+ bpf_program__set_expected_attach_type;
+ bpf_program__set_ifindex;
+ bpf_program__set_kprobe;
+ bpf_program__set_perf_event;
+ bpf_program__set_prep;
+ bpf_program__set_priv;
+ bpf_program__set_raw_tracepoint;
+ bpf_program__set_sched_act;
+ bpf_program__set_sched_cls;
+ bpf_program__set_socket_filter;
+ bpf_program__set_tracepoint;
+ bpf_program__set_type;
+ bpf_program__set_xdp;
+ bpf_program__title;
+ bpf_program__unload;
+ bpf_program__unpin;
+ bpf_program__unpin_instance;
+ bpf_prog_linfo__free;
+ bpf_prog_linfo__new;
+ bpf_prog_linfo__lfind_addr_func;
+ bpf_prog_linfo__lfind;
+ bpf_raw_tracepoint_open;
+ bpf_set_link_xdp_fd;
+ bpf_task_fd_query;
+ bpf_verify_program;
+ btf__fd;
+ btf__find_by_name;
+ btf__free;
+ btf__get_from_id;
+ btf__name_by_offset;
+ btf__new;
+ btf__resolve_size;
+ btf__resolve_type;
+ btf__type_by_id;
+ libbpf_attach_type_by_name;
+ libbpf_get_error;
+ libbpf_prog_type_by_name;
+ libbpf_set_print;
+ libbpf_strerror;
+ local:
+ *;
+};
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
index d83b17f8435c..4343e40588c6 100644
--- a/tools/lib/bpf/libbpf_errno.c
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -7,6 +7,7 @@
* Copyright (C) 2017 Nicira, Inc.
*/
+#undef _GNU_SOURCE
#include <stdio.h>
#include <string.h>
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp
new file mode 100644
index 000000000000..abf3fc25c9fa
--- /dev/null
+++ b/tools/lib/bpf/test_libbpf.cpp
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#include "libbpf.h"
+#include "bpf.h"
+#include "btf.h"
+
+/* do nothing, just make sure we can link successfully */
+
+int main(int argc, char *argv[])
+{
+ /* libbpf.h */
+ libbpf_set_print(NULL, NULL, NULL);
+
+ /* bpf.h */
+ bpf_prog_get_fd_by_id(0);
+
+ /* btf.h */
+ btf__new(NULL, 0, NULL);
+}
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
index 8862da80995a..d640a9761f09 100644
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ b/tools/lib/lockdep/include/liblockdep/common.h
@@ -44,6 +44,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip);
void lock_release(struct lockdep_map *lock, int nested,
unsigned long ip);
+void lockdep_reset_lock(struct lockdep_map *lock);
extern void debug_check_no_locks_freed(const void *from, unsigned long len);
#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index a80ac39f966e..2073d4e1f2f0 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -54,6 +54,7 @@ static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *l
static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
{
+ lockdep_reset_lock(&lock->dep_map);
return pthread_mutex_destroy(&lock->mutex);
}
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a96c3bf0fef1..365762e3a1ea 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -60,10 +60,10 @@ static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_
return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
}
-static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
+static inline int liblockdep_pthread_rwlock_trywrlock(liblockdep_pthread_rwlock_t *lock)
{
lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
+ return pthread_rwlock_trywrlock(&lock->rwlock) == 0 ? 1 : 0;
}
static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock)
@@ -79,7 +79,7 @@ static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock)
#define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock
#define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock
#define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock
-#define pthread_rwlock_trywlock liblockdep_pthread_rwlock_trywlock
+#define pthread_rwlock_trywrlock liblockdep_pthread_rwlock_trywrlock
#define pthread_rwlock_destroy liblockdep_rwlock_destroy
#endif
diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c
index 6002fcf2f9bc..348a9d0fb766 100644
--- a/tools/lib/lockdep/lockdep.c
+++ b/tools/lib/lockdep/lockdep.c
@@ -15,6 +15,11 @@ u32 prandom_u32(void)
abort();
}
+void print_irqtrace_events(struct task_struct *curr)
+{
+ abort();
+}
+
static struct new_utsname *init_utsname(void)
{
static struct new_utsname n = (struct new_utsname) {
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index 2e570a188f16..c8fbd0306960 100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
@@ -1,32 +1,47 @@
#! /bin/bash
# SPDX-License-Identifier: GPL-2.0
-make &> /dev/null
+if ! make >/dev/null; then
+ echo "Building liblockdep failed."
+ echo "FAILED!"
+ exit 1
+fi
-for i in `ls tests/*.c`; do
+find tests -name '*.c' | sort | while read -r i; do
testname=$(basename "$i" .c)
- gcc -o tests/$testname -pthread $i liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &> /dev/null
echo -ne "$testname... "
- if [ $(timeout 1 ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then
+ if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
+ timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
fi
- if [ -f "tests/$testname" ]; then
- rm tests/$testname
- fi
+ rm -f "tests/$testname"
done
-for i in `ls tests/*.c`; do
+find tests -name '*.c' | sort | while read -r i; do
testname=$(basename "$i" .c)
- gcc -o tests/$testname -pthread -Iinclude $i &> /dev/null
echo -ne "(PRELOAD) $testname... "
- if [ $(timeout 1 ./lockdep ./tests/$testname 2>&1 | wc -l) -gt 0 ]; then
+ if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
+ timeout 1 ./lockdep "tests/$testname" 2>&1 |
+ "tests/${testname}.sh"; then
echo "PASSED!"
else
echo "FAILED!"
fi
- if [ -f "tests/$testname" ]; then
- rm tests/$testname
+ rm -f "tests/$testname"
+done
+
+find tests -name '*.c' | sort | while read -r i; do
+ testname=$(basename "$i" .c)
+ echo -ne "(PRELOAD + Valgrind) $testname... "
+ if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
+ { timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
+ "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
+ ! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
+ echo "PASSED!"
+ else
+ echo "FAILED!"
fi
+ rm -f "tests/$testname"
done
diff --git a/tools/lib/lockdep/tests/AA.sh b/tools/lib/lockdep/tests/AA.sh
new file mode 100644
index 000000000000..f39b32865074
--- /dev/null
+++ b/tools/lib/lockdep/tests/AA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/ABA.sh b/tools/lib/lockdep/tests/ABA.sh
new file mode 100644
index 000000000000..f39b32865074
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
index 1460afd33d71..623313f54720 100644
--- a/tools/lib/lockdep/tests/ABBA.c
+++ b/tools/lib/lockdep/tests/ABBA.c
@@ -11,4 +11,7 @@ void main(void)
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(b, a);
+
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/ABBA.sh b/tools/lib/lockdep/tests/ABBA.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBA_2threads.sh b/tools/lib/lockdep/tests/ABBA_2threads.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBA_2threads.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBCCA.c b/tools/lib/lockdep/tests/ABBCCA.c
index a54c1b2af118..48446129d496 100644
--- a/tools/lib/lockdep/tests/ABBCCA.c
+++ b/tools/lib/lockdep/tests/ABBCCA.c
@@ -13,4 +13,8 @@ void main(void)
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(b, c);
LOCK_UNLOCK_2(c, a);
+
+ pthread_mutex_destroy(&c);
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/ABBCCA.sh b/tools/lib/lockdep/tests/ABBCCA.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBCCA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.c b/tools/lib/lockdep/tests/ABBCCDDA.c
index aa5d194e8869..3570bf7b3804 100644
--- a/tools/lib/lockdep/tests/ABBCCDDA.c
+++ b/tools/lib/lockdep/tests/ABBCCDDA.c
@@ -15,4 +15,9 @@ void main(void)
LOCK_UNLOCK_2(b, c);
LOCK_UNLOCK_2(c, d);
LOCK_UNLOCK_2(d, a);
+
+ pthread_mutex_destroy(&d);
+ pthread_mutex_destroy(&c);
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.sh b/tools/lib/lockdep/tests/ABBCCDDA.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABBCCDDA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCABC.c b/tools/lib/lockdep/tests/ABCABC.c
index b54a08e60416..a1c4659894cd 100644
--- a/tools/lib/lockdep/tests/ABCABC.c
+++ b/tools/lib/lockdep/tests/ABCABC.c
@@ -13,4 +13,8 @@ void main(void)
LOCK_UNLOCK_2(a, b);
LOCK_UNLOCK_2(c, a);
LOCK_UNLOCK_2(b, c);
+
+ pthread_mutex_destroy(&c);
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/ABCABC.sh b/tools/lib/lockdep/tests/ABCABC.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABCABC.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.c b/tools/lib/lockdep/tests/ABCDBCDA.c
index a56742250d86..335af1c90ab5 100644
--- a/tools/lib/lockdep/tests/ABCDBCDA.c
+++ b/tools/lib/lockdep/tests/ABCDBCDA.c
@@ -15,4 +15,9 @@ void main(void)
LOCK_UNLOCK_2(c, d);
LOCK_UNLOCK_2(b, c);
LOCK_UNLOCK_2(d, a);
+
+ pthread_mutex_destroy(&d);
+ pthread_mutex_destroy(&c);
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.sh b/tools/lib/lockdep/tests/ABCDBCDA.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABCDBCDA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.c b/tools/lib/lockdep/tests/ABCDBDDA.c
index 238a3353f3c3..3c5972863049 100644
--- a/tools/lib/lockdep/tests/ABCDBDDA.c
+++ b/tools/lib/lockdep/tests/ABCDBDDA.c
@@ -15,4 +15,9 @@ void main(void)
LOCK_UNLOCK_2(c, d);
LOCK_UNLOCK_2(b, d);
LOCK_UNLOCK_2(d, a);
+
+ pthread_mutex_destroy(&d);
+ pthread_mutex_destroy(&c);
+ pthread_mutex_destroy(&b);
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.sh b/tools/lib/lockdep/tests/ABCDBDDA.sh
new file mode 100644
index 000000000000..fc31c607a5a8
--- /dev/null
+++ b/tools/lib/lockdep/tests/ABCDBDDA.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/WW.sh b/tools/lib/lockdep/tests/WW.sh
new file mode 100644
index 000000000000..f39b32865074
--- /dev/null
+++ b/tools/lib/lockdep/tests/WW.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/unlock_balance.c b/tools/lib/lockdep/tests/unlock_balance.c
index 34cf32f689de..dba25064b50a 100644
--- a/tools/lib/lockdep/tests/unlock_balance.c
+++ b/tools/lib/lockdep/tests/unlock_balance.c
@@ -10,4 +10,6 @@ void main(void)
pthread_mutex_lock(&a);
pthread_mutex_unlock(&a);
pthread_mutex_unlock(&a);
+
+ pthread_mutex_destroy(&a);
}
diff --git a/tools/lib/lockdep/tests/unlock_balance.sh b/tools/lib/lockdep/tests/unlock_balance.sh
new file mode 100644
index 000000000000..c6e3952303fe
--- /dev/null
+++ b/tools/lib/lockdep/tests/unlock_balance.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+grep -q 'WARNING: bad unlock balance detected'
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index 95563b8e1ad7..ed61fb3a46c0 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -36,8 +36,6 @@ endif
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
CFLAGS += -I$(srctree)/tools/include/
-CFLAGS += -I$(srctree)/include/uapi
-CFLAGS += -I$(srctree)/include
SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index 6ca2a8bfe716..af9def589863 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -71,7 +71,7 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
*
* `argh`::
* token to explain the kind of argument this option wants. Keep it
- * homogenous across the repository.
+ * homogeneous across the repository.
*
* `help`::
* the short help associated to what the option does.
@@ -80,7 +80,7 @@ typedef int parse_opt_cb(const struct option *, const char *arg, int unset);
*
* `flags`::
* mask of parse_opt_option_flags.
- * PARSE_OPT_OPTARG: says that the argument is optionnal (not for BOOLEANs)
+ * PARSE_OPT_OPTARG: says that the argument is optional (not for BOOLEANs)
* PARSE_OPT_NOARG: says that this option takes no argument, for CALLBACKs
* PARSE_OPT_NONEG: says that this option cannot be negated
* PARSE_OPT_HIDDEN this option is skipped in the default usage, showed in
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 0b4e833088a4..941761d9923d 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -25,6 +25,7 @@ endef
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,AR,$(CROSS_COMPILE)ar)
$(call allow-override,NM,$(CROSS_COMPILE)nm)
+$(call allow-override,PKG_CONFIG,pkg-config)
EXT = -std=gnu99
INSTALL = install
@@ -47,6 +48,8 @@ prefix ?= /usr/local
libdir = $(prefix)/$(libdir_relative)
man_dir = $(prefix)/share/man
man_dir_SQ = '$(subst ','\'',$(man_dir))'
+pkgconfig_dir ?= $(word 1,$(shell $(PKG_CONFIG) \
+ --variable pc_path pkg-config | tr ":" " "))
export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
@@ -174,7 +177,7 @@ $(TE_IN): force
$(Q)$(MAKE) $(build)=libtraceevent
$(OUTPUT)libtraceevent.so.$(EVENT_PARSE_VERSION): $(TE_IN)
- $(QUIET_LINK)$(CC) --shared $^ -Wl,-soname,libtraceevent.so.$(EP_VERSION) -o $@
+ $(QUIET_LINK)$(CC) --shared $(LDFLAGS) $^ -Wl,-soname,libtraceevent.so.$(EP_VERSION) -o $@
@ln -sf $(@F) $(OUTPUT)libtraceevent.so
@ln -sf $(@F) $(OUTPUT)libtraceevent.so.$(EP_VERSION)
@@ -193,7 +196,7 @@ $(PLUGINS_IN): force
$(Q)$(MAKE) $(build)=$(plugin_obj)
$(OUTPUT)%.so: $(OUTPUT)%-in.o
- $(QUIET_LINK)$(CC) $(CFLAGS) -shared -nostartfiles -o $@ $^
+ $(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
define make_version.h
(echo '/* This file is automatically generated. Do not modify. */'; \
@@ -270,7 +273,19 @@ define do_generate_dynamic_list_file
fi
endef
-install_lib: all_cmd install_plugins
+PKG_CONFIG_FILE = libtraceevent.pc
+define do_install_pkgconfig_file
+ if [ -n "${pkgconfig_dir}" ]; then \
+ cp -f ${PKG_CONFIG_FILE}.template ${PKG_CONFIG_FILE}; \
+ sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE}; \
+ sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \
+ $(call do_install,$(PKG_CONFIG_FILE),$(pkgconfig_dir),644); \
+ else \
+ (echo Failed to locate pkg-config directory) 1>&2; \
+ fi
+endef
+
+install_lib: all_cmd install_plugins install_headers install_pkgconfig
$(call QUIET_INSTALL, $(LIB_TARGET)) \
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ)
@@ -279,18 +294,24 @@ install_plugins: $(PLUGINS)
$(call QUIET_INSTALL, trace_plugins) \
$(call do_install_plugins, $(PLUGINS))
+install_pkgconfig:
+ $(call QUIET_INSTALL, $(PKG_CONFIG_FILE)) \
+ $(call do_install_pkgconfig_file,$(prefix))
+
install_headers:
$(call QUIET_INSTALL, headers) \
$(call do_install,event-parse.h,$(prefix)/include/traceevent,644); \
$(call do_install,event-utils.h,$(prefix)/include/traceevent,644); \
+ $(call do_install,trace-seq.h,$(prefix)/include/traceevent,644); \
$(call do_install,kbuffer.h,$(prefix)/include/traceevent,644)
install: install_lib
clean:
$(call QUIET_CLEAN, libtraceevent) \
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd \
- $(RM) TRACEEVENT-CFLAGS tags TAGS
+ $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
+ $(RM) TRACEEVENT-CFLAGS tags TAGS; \
+ $(RM) $(PKG_CONFIG_FILE)
PHONY += force plugins
force:
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
index 61f7149085ee..8b31c0e00ba3 100644
--- a/tools/lib/traceevent/event-parse-api.c
+++ b/tools/lib/traceevent/event-parse-api.c
@@ -15,7 +15,7 @@
* This returns pointer to the first element of the events array
* If @tep is NULL, NULL is returned.
*/
-struct tep_event_format *tep_get_first_event(struct tep_handle *tep)
+struct tep_event *tep_get_first_event(struct tep_handle *tep)
{
if (tep && tep->events)
return tep->events[0];
@@ -51,7 +51,7 @@ void tep_set_flag(struct tep_handle *tep, int flag)
tep->flags |= flag;
}
-unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data)
+unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data)
{
unsigned short swap;
@@ -64,7 +64,7 @@ unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data)
return swap;
}
-unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data)
+unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data)
{
unsigned int swap;
@@ -80,7 +80,7 @@ unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data)
}
unsigned long long
-__tep_data2host8(struct tep_handle *pevent, unsigned long long data)
+tep_data2host8(struct tep_handle *pevent, unsigned long long data)
{
unsigned long long swap;
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
index b9bddde577f8..9a092dd4a86d 100644
--- a/tools/lib/traceevent/event-parse-local.h
+++ b/tools/lib/traceevent/event-parse-local.h
@@ -50,9 +50,9 @@ struct tep_handle {
unsigned int printk_count;
- struct tep_event_format **events;
+ struct tep_event **events;
int nr_events;
- struct tep_event_format **sort_events;
+ struct tep_event **sort_events;
enum tep_event_sort_type last_type;
int type_offset;
@@ -84,9 +84,16 @@ struct tep_handle {
struct tep_function_handler *func_handlers;
/* cache */
- struct tep_event_format *last_event;
+ struct tep_event *last_event;
char *trace_clock;
};
+void tep_free_event(struct tep_event *event);
+void tep_free_format_field(struct tep_format_field *field);
+
+unsigned short tep_data2host2(struct tep_handle *pevent, unsigned short data);
+unsigned int tep_data2host4(struct tep_handle *pevent, unsigned int data);
+unsigned long long tep_data2host8(struct tep_handle *pevent, unsigned long long data);
+
#endif /* _PARSE_EVENTS_INT_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 3692f29fee46..69a96e39f0ab 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -96,7 +96,7 @@ struct tep_function_handler {
static unsigned long long
process_defined_func(struct trace_seq *s, void *data, int size,
- struct tep_event_format *event, struct tep_print_arg *arg);
+ struct tep_event *event, struct tep_print_arg *arg);
static void free_func_handle(struct tep_function_handler *func);
@@ -739,16 +739,16 @@ void tep_print_printk(struct tep_handle *pevent)
}
}
-static struct tep_event_format *alloc_event(void)
+static struct tep_event *alloc_event(void)
{
- return calloc(1, sizeof(struct tep_event_format));
+ return calloc(1, sizeof(struct tep_event));
}
-static int add_event(struct tep_handle *pevent, struct tep_event_format *event)
+static int add_event(struct tep_handle *pevent, struct tep_event *event)
{
int i;
- struct tep_event_format **events = realloc(pevent->events, sizeof(event) *
- (pevent->nr_events + 1));
+ struct tep_event **events = realloc(pevent->events, sizeof(event) *
+ (pevent->nr_events + 1));
if (!events)
return -1;
@@ -1145,7 +1145,7 @@ static enum tep_event_type read_token(char **tok)
}
/**
- * tep_read_token - access to utilites to use the pevent parser
+ * tep_read_token - access to utilities to use the pevent parser
* @tok: The token to return
*
* This will parse tokens from the string given by
@@ -1355,7 +1355,7 @@ static unsigned int type_size(const char *name)
return 0;
}
-static int event_read_fields(struct tep_event_format *event, struct tep_format_field **fields)
+static int event_read_fields(struct tep_event *event, struct tep_format_field **fields)
{
struct tep_format_field *field = NULL;
enum tep_event_type type;
@@ -1642,7 +1642,7 @@ fail_expect:
return -1;
}
-static int event_read_format(struct tep_event_format *event)
+static int event_read_format(struct tep_event *event)
{
char *token;
int ret;
@@ -1675,11 +1675,11 @@ static int event_read_format(struct tep_event_format *event)
}
static enum tep_event_type
-process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg,
+process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
char **tok, enum tep_event_type type);
static enum tep_event_type
-process_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
enum tep_event_type type;
char *token;
@@ -1691,14 +1691,14 @@ process_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **to
}
static enum tep_event_type
-process_op(struct tep_event_format *event, struct tep_print_arg *arg, char **tok);
+process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok);
/*
* For __print_symbolic() and __print_flags, we need to completely
* evaluate the first argument, which defines what to print next.
*/
static enum tep_event_type
-process_field_arg(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_field_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
enum tep_event_type type;
@@ -1712,7 +1712,7 @@ process_field_arg(struct tep_event_format *event, struct tep_print_arg *arg, cha
}
static enum tep_event_type
-process_cond(struct tep_event_format *event, struct tep_print_arg *top, char **tok)
+process_cond(struct tep_event *event, struct tep_print_arg *top, char **tok)
{
struct tep_print_arg *arg, *left, *right;
enum tep_event_type type;
@@ -1768,7 +1768,7 @@ out_free:
}
static enum tep_event_type
-process_array(struct tep_event_format *event, struct tep_print_arg *top, char **tok)
+process_array(struct tep_event *event, struct tep_print_arg *top, char **tok)
{
struct tep_print_arg *arg;
enum tep_event_type type;
@@ -1870,7 +1870,7 @@ static int set_op_prio(struct tep_print_arg *arg)
/* Note, *tok does not get freed, but will most likely be saved */
static enum tep_event_type
-process_op(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
struct tep_print_arg *left, *right = NULL;
enum tep_event_type type;
@@ -2071,7 +2071,7 @@ out_free:
}
static enum tep_event_type
-process_entry(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg,
+process_entry(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
char **tok)
{
enum tep_event_type type;
@@ -2110,7 +2110,7 @@ process_entry(struct tep_event_format *event __maybe_unused, struct tep_print_ar
return TEP_EVENT_ERROR;
}
-static int alloc_and_process_delim(struct tep_event_format *event, char *next_token,
+static int alloc_and_process_delim(struct tep_event *event, char *next_token,
struct tep_print_arg **print_arg)
{
struct tep_print_arg *field;
@@ -2445,7 +2445,7 @@ static char *arg_eval (struct tep_print_arg *arg)
}
static enum tep_event_type
-process_fields(struct tep_event_format *event, struct tep_print_flag_sym **list, char **tok)
+process_fields(struct tep_event *event, struct tep_print_flag_sym **list, char **tok)
{
enum tep_event_type type;
struct tep_print_arg *arg = NULL;
@@ -2526,7 +2526,7 @@ out_free:
}
static enum tep_event_type
-process_flags(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_flags(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
struct tep_print_arg *field;
enum tep_event_type type;
@@ -2579,7 +2579,7 @@ out_free:
}
static enum tep_event_type
-process_symbols(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_symbols(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
struct tep_print_arg *field;
enum tep_event_type type;
@@ -2618,7 +2618,7 @@ out_free:
}
static enum tep_event_type
-process_hex_common(struct tep_event_format *event, struct tep_print_arg *arg,
+process_hex_common(struct tep_event *event, struct tep_print_arg *arg,
char **tok, enum tep_print_arg_type type)
{
memset(arg, 0, sizeof(*arg));
@@ -2641,20 +2641,20 @@ out:
}
static enum tep_event_type
-process_hex(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_hex(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
return process_hex_common(event, arg, tok, TEP_PRINT_HEX);
}
static enum tep_event_type
-process_hex_str(struct tep_event_format *event, struct tep_print_arg *arg,
+process_hex_str(struct tep_event *event, struct tep_print_arg *arg,
char **tok)
{
return process_hex_common(event, arg, tok, TEP_PRINT_HEX_STR);
}
static enum tep_event_type
-process_int_array(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_int_array(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
memset(arg, 0, sizeof(*arg));
arg->type = TEP_PRINT_INT_ARRAY;
@@ -2682,7 +2682,7 @@ out:
}
static enum tep_event_type
-process_dynamic_array(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_dynamic_array(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
struct tep_format_field *field;
enum tep_event_type type;
@@ -2746,7 +2746,7 @@ process_dynamic_array(struct tep_event_format *event, struct tep_print_arg *arg,
}
static enum tep_event_type
-process_dynamic_array_len(struct tep_event_format *event, struct tep_print_arg *arg,
+process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
char **tok)
{
struct tep_format_field *field;
@@ -2782,7 +2782,7 @@ process_dynamic_array_len(struct tep_event_format *event, struct tep_print_arg *
}
static enum tep_event_type
-process_paren(struct tep_event_format *event, struct tep_print_arg *arg, char **tok)
+process_paren(struct tep_event *event, struct tep_print_arg *arg, char **tok)
{
struct tep_print_arg *item_arg;
enum tep_event_type type;
@@ -2845,7 +2845,7 @@ process_paren(struct tep_event_format *event, struct tep_print_arg *arg, char **
static enum tep_event_type
-process_str(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg,
+process_str(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
char **tok)
{
enum tep_event_type type;
@@ -2874,7 +2874,7 @@ process_str(struct tep_event_format *event __maybe_unused, struct tep_print_arg
}
static enum tep_event_type
-process_bitmask(struct tep_event_format *event __maybe_unused, struct tep_print_arg *arg,
+process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
char **tok)
{
enum tep_event_type type;
@@ -2935,7 +2935,7 @@ static void remove_func_handler(struct tep_handle *pevent, char *func_name)
}
static enum tep_event_type
-process_func_handler(struct tep_event_format *event, struct tep_function_handler *func,
+process_func_handler(struct tep_event *event, struct tep_function_handler *func,
struct tep_print_arg *arg, char **tok)
{
struct tep_print_arg **next_arg;
@@ -2993,7 +2993,7 @@ err:
}
static enum tep_event_type
-process_function(struct tep_event_format *event, struct tep_print_arg *arg,
+process_function(struct tep_event *event, struct tep_print_arg *arg,
char *token, char **tok)
{
struct tep_function_handler *func;
@@ -3049,7 +3049,7 @@ process_function(struct tep_event_format *event, struct tep_print_arg *arg,
}
static enum tep_event_type
-process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg,
+process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
char **tok, enum tep_event_type type)
{
char *token;
@@ -3137,7 +3137,7 @@ process_arg_token(struct tep_event_format *event, struct tep_print_arg *arg,
return type;
}
-static int event_read_print_args(struct tep_event_format *event, struct tep_print_arg **list)
+static int event_read_print_args(struct tep_event *event, struct tep_print_arg **list)
{
enum tep_event_type type = TEP_EVENT_ERROR;
struct tep_print_arg *arg;
@@ -3195,7 +3195,7 @@ static int event_read_print_args(struct tep_event_format *event, struct tep_prin
return args;
}
-static int event_read_print(struct tep_event_format *event)
+static int event_read_print(struct tep_event *event)
{
enum tep_event_type type;
char *token;
@@ -3258,10 +3258,10 @@ static int event_read_print(struct tep_event_format *event)
* @name: the name of the common field to return
*
* Returns a common field from the event by the given @name.
- * This only searchs the common fields and not all field.
+ * This only searches the common fields and not all field.
*/
struct tep_format_field *
-tep_find_common_field(struct tep_event_format *event, const char *name)
+tep_find_common_field(struct tep_event *event, const char *name)
{
struct tep_format_field *format;
@@ -3283,7 +3283,7 @@ tep_find_common_field(struct tep_event_format *event, const char *name)
* This does not search common fields.
*/
struct tep_format_field *
-tep_find_field(struct tep_event_format *event, const char *name)
+tep_find_field(struct tep_event *event, const char *name)
{
struct tep_format_field *format;
@@ -3302,11 +3302,11 @@ tep_find_field(struct tep_event_format *event, const char *name)
* @name: the name of the field
*
* Returns a field by the given @name.
- * This searchs the common field names first, then
+ * This searches the common field names first, then
* the non-common ones if a common one was not found.
*/
struct tep_format_field *
-tep_find_any_field(struct tep_event_format *event, const char *name)
+tep_find_any_field(struct tep_event *event, const char *name)
{
struct tep_format_field *format;
@@ -3328,15 +3328,18 @@ tep_find_any_field(struct tep_event_format *event, const char *name)
unsigned long long tep_read_number(struct tep_handle *pevent,
const void *ptr, int size)
{
+ unsigned long long val;
+
switch (size) {
case 1:
return *(unsigned char *)ptr;
case 2:
- return tep_data2host2(pevent, ptr);
+ return tep_data2host2(pevent, *(unsigned short *)ptr);
case 4:
- return tep_data2host4(pevent, ptr);
+ return tep_data2host4(pevent, *(unsigned int *)ptr);
case 8:
- return tep_data2host8(pevent, ptr);
+ memcpy(&val, (ptr), sizeof(unsigned long long));
+ return tep_data2host8(pevent, val);
default:
/* BUG! */
return 0;
@@ -3375,7 +3378,7 @@ int tep_read_number_field(struct tep_format_field *field, const void *data,
static int get_common_info(struct tep_handle *pevent,
const char *type, int *offset, int *size)
{
- struct tep_event_format *event;
+ struct tep_event *event;
struct tep_format_field *field;
/*
@@ -3462,11 +3465,11 @@ static int events_id_cmp(const void *a, const void *b);
*
* Returns an event that has a given @id.
*/
-struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id)
+struct tep_event *tep_find_event(struct tep_handle *pevent, int id)
{
- struct tep_event_format **eventptr;
- struct tep_event_format key;
- struct tep_event_format *pkey = &key;
+ struct tep_event **eventptr;
+ struct tep_event key;
+ struct tep_event *pkey = &key;
/* Check cache first */
if (pevent->last_event && pevent->last_event->id == id)
@@ -3494,11 +3497,11 @@ struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id)
* This returns an event with a given @name and under the system
* @sys. If @sys is NULL the first event with @name is returned.
*/
-struct tep_event_format *
+struct tep_event *
tep_find_event_by_name(struct tep_handle *pevent,
const char *sys, const char *name)
{
- struct tep_event_format *event;
+ struct tep_event *event = NULL;
int i;
if (pevent->last_event &&
@@ -3523,7 +3526,7 @@ tep_find_event_by_name(struct tep_handle *pevent,
}
static unsigned long long
-eval_num_arg(void *data, int size, struct tep_event_format *event, struct tep_print_arg *arg)
+eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg)
{
struct tep_handle *pevent = event->pevent;
unsigned long long val = 0;
@@ -3838,7 +3841,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
/*
* data points to a bit mask of size bytes.
* In the kernel, this is an array of long words, thus
- * endianess is very important.
+ * endianness is very important.
*/
if (pevent->file_bigendian)
index = size - (len + 1);
@@ -3863,7 +3866,7 @@ static void print_bitmask_to_seq(struct tep_handle *pevent,
}
static void print_str_arg(struct trace_seq *s, void *data, int size,
- struct tep_event_format *event, const char *format,
+ struct tep_event *event, const char *format,
int len_arg, struct tep_print_arg *arg)
{
struct tep_handle *pevent = event->pevent;
@@ -4062,7 +4065,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
f = tep_find_any_field(event, arg->string.string);
arg->string.offset = f->offset;
}
- str_offset = tep_data2host4(pevent, data + arg->string.offset);
+ str_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->string.offset));
str_offset &= 0xffff;
print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
break;
@@ -4080,7 +4083,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
f = tep_find_any_field(event, arg->bitmask.bitmask);
arg->bitmask.offset = f->offset;
}
- bitmask_offset = tep_data2host4(pevent, data + arg->bitmask.offset);
+ bitmask_offset = tep_data2host4(pevent, *(unsigned int *)(data + arg->bitmask.offset));
bitmask_size = bitmask_offset >> 16;
bitmask_offset &= 0xffff;
print_bitmask_to_seq(pevent, s, format, len_arg,
@@ -4118,7 +4121,7 @@ out_warning_field:
static unsigned long long
process_defined_func(struct trace_seq *s, void *data, int size,
- struct tep_event_format *event, struct tep_print_arg *arg)
+ struct tep_event *event, struct tep_print_arg *arg)
{
struct tep_function_handler *func_handle = arg->func.func;
struct func_params *param;
@@ -4213,7 +4216,7 @@ static void free_args(struct tep_print_arg *args)
}
}
-static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event_format *event)
+static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event)
{
struct tep_handle *pevent = event->pevent;
struct tep_format_field *field, *ip_field;
@@ -4221,7 +4224,7 @@ static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, s
unsigned long long ip, val;
char *ptr;
void *bptr;
- int vsize;
+ int vsize = 0;
field = pevent->bprint_buf_field;
ip_field = pevent->bprint_ip_field;
@@ -4390,7 +4393,7 @@ out_free:
static char *
get_bprint_format(void *data, int size __maybe_unused,
- struct tep_event_format *event)
+ struct tep_event *event)
{
struct tep_handle *pevent = event->pevent;
unsigned long long addr;
@@ -4425,7 +4428,7 @@ get_bprint_format(void *data, int size __maybe_unused,
}
static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
- struct tep_event_format *event, struct tep_print_arg *arg)
+ struct tep_event *event, struct tep_print_arg *arg)
{
unsigned char *buf;
const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
@@ -4578,7 +4581,7 @@ static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
* %pISpc print an IP address based on sockaddr; p adds port.
*/
static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct tep_event_format *event,
+ void *data, int size, struct tep_event *event,
struct tep_print_arg *arg)
{
unsigned char *buf;
@@ -4615,7 +4618,7 @@ static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
}
static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct tep_event_format *event,
+ void *data, int size, struct tep_event *event,
struct tep_print_arg *arg)
{
char have_c = 0;
@@ -4665,7 +4668,7 @@ static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
}
static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct tep_event_format *event,
+ void *data, int size, struct tep_event *event,
struct tep_print_arg *arg)
{
char have_c = 0, have_p = 0;
@@ -4747,7 +4750,7 @@ static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
}
static int print_ip_arg(struct trace_seq *s, const char *ptr,
- void *data, int size, struct tep_event_format *event,
+ void *data, int size, struct tep_event *event,
struct tep_print_arg *arg)
{
char i = *ptr; /* 'i' or 'I' */
@@ -4854,7 +4857,7 @@ void tep_print_field(struct trace_seq *s, void *data,
}
void tep_print_fields(struct trace_seq *s, void *data,
- int size __maybe_unused, struct tep_event_format *event)
+ int size __maybe_unused, struct tep_event *event)
{
struct tep_format_field *field;
@@ -4866,7 +4869,7 @@ void tep_print_fields(struct trace_seq *s, void *data,
}
}
-static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event_format *event)
+static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
{
struct tep_handle *pevent = event->pevent;
struct tep_print_fmt *print_fmt = &event->print_fmt;
@@ -4881,7 +4884,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
char format[32];
int show_func;
int len_as_arg;
- int len_arg;
+ int len_arg = 0;
int len;
int ls;
@@ -4970,6 +4973,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
if (arg->type == TEP_PRINT_BSTRING) {
trace_seq_puts(s, arg->string.string);
+ arg = arg->next;
break;
}
@@ -5146,8 +5150,8 @@ void tep_data_lat_fmt(struct tep_handle *pevent,
static int migrate_disable_exists;
unsigned int lat_flags;
unsigned int pc;
- int lock_depth;
- int migrate_disable;
+ int lock_depth = 0;
+ int migrate_disable = 0;
int hardirq;
int softirq;
void *data = record->data;
@@ -5229,7 +5233,7 @@ int tep_data_type(struct tep_handle *pevent, struct tep_record *rec)
*
* This returns the event form a given @type;
*/
-struct tep_event_format *tep_data_event_from_type(struct tep_handle *pevent, int type)
+struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type)
{
return tep_find_event(pevent, type);
}
@@ -5313,9 +5317,9 @@ pid_from_cmdlist(struct tep_handle *pevent, const char *comm, struct cmdline *ne
* This returns the cmdline structure that holds a pid for a given
* comm, or NULL if none found. As there may be more than one pid for
* a given comm, the result of this call can be passed back into
- * a recurring call in the @next paramater, and then it will find the
+ * a recurring call in the @next parameter, and then it will find the
* next pid.
- * Also, it does a linear seach, so it may be slow.
+ * Also, it does a linear search, so it may be slow.
*/
struct cmdline *tep_data_pid_from_comm(struct tep_handle *pevent, const char *comm,
struct cmdline *next)
@@ -5387,7 +5391,7 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline)
* This parses the raw @data using the given @event information and
* writes the print format into the trace_seq.
*/
-void tep_event_info(struct trace_seq *s, struct tep_event_format *event,
+void tep_event_info(struct trace_seq *s, struct tep_event *event,
struct tep_record *record)
{
int print_pretty = 1;
@@ -5409,7 +5413,7 @@ void tep_event_info(struct trace_seq *s, struct tep_event_format *event,
static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
{
- if (!use_trace_clock)
+ if (!trace_clock || !use_trace_clock)
return true;
if (!strcmp(trace_clock, "local") || !strcmp(trace_clock, "global")
@@ -5428,7 +5432,7 @@ static bool is_timestamp_in_us(char *trace_clock, bool use_trace_clock)
* Returns the associated event for a given record, or NULL if non is
* is found.
*/
-struct tep_event_format *
+struct tep_event *
tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
{
int type;
@@ -5453,7 +5457,7 @@ tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record)
* Writes the tasks comm, pid and CPU to @s.
*/
void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
- struct tep_event_format *event,
+ struct tep_event *event,
struct tep_record *record)
{
void *data = record->data;
@@ -5481,7 +5485,7 @@ void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
* Writes the timestamp of the record into @s.
*/
void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
- struct tep_event_format *event,
+ struct tep_event *event,
struct tep_record *record,
bool use_trace_clock)
{
@@ -5531,7 +5535,7 @@ void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
* Writes the parsing of the record's data to @s.
*/
void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
- struct tep_event_format *event,
+ struct tep_event *event,
struct tep_record *record)
{
static const char *spaces = " "; /* 20 spaces */
@@ -5550,7 +5554,7 @@ void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock)
{
- struct tep_event_format *event;
+ struct tep_event *event;
event = tep_find_event_by_record(pevent, record);
if (!event) {
@@ -5572,8 +5576,8 @@ void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
static int events_id_cmp(const void *a, const void *b)
{
- struct tep_event_format * const * ea = a;
- struct tep_event_format * const * eb = b;
+ struct tep_event * const * ea = a;
+ struct tep_event * const * eb = b;
if ((*ea)->id < (*eb)->id)
return -1;
@@ -5586,8 +5590,8 @@ static int events_id_cmp(const void *a, const void *b)
static int events_name_cmp(const void *a, const void *b)
{
- struct tep_event_format * const * ea = a;
- struct tep_event_format * const * eb = b;
+ struct tep_event * const * ea = a;
+ struct tep_event * const * eb = b;
int res;
res = strcmp((*ea)->name, (*eb)->name);
@@ -5603,8 +5607,8 @@ static int events_name_cmp(const void *a, const void *b)
static int events_system_cmp(const void *a, const void *b)
{
- struct tep_event_format * const * ea = a;
- struct tep_event_format * const * eb = b;
+ struct tep_event * const * ea = a;
+ struct tep_event * const * eb = b;
int res;
res = strcmp((*ea)->system, (*eb)->system);
@@ -5618,9 +5622,9 @@ static int events_system_cmp(const void *a, const void *b)
return events_id_cmp(a, b);
}
-struct tep_event_format **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
+struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type sort_type)
{
- struct tep_event_format **events;
+ struct tep_event **events;
int (*sort)(const void *a, const void *b);
events = pevent->sort_events;
@@ -5703,7 +5707,7 @@ get_event_fields(const char *type, const char *name,
* Returns an allocated array of fields. The last item in the array is NULL.
* The array must be freed with free().
*/
-struct tep_format_field **tep_event_common_fields(struct tep_event_format *event)
+struct tep_format_field **tep_event_common_fields(struct tep_event *event)
{
return get_event_fields("common", event->name,
event->format.nr_common,
@@ -5717,7 +5721,7 @@ struct tep_format_field **tep_event_common_fields(struct tep_event_format *event
* Returns an allocated array of fields. The last item in the array is NULL.
* The array must be freed with free().
*/
-struct tep_format_field **tep_event_fields(struct tep_event_format *event)
+struct tep_format_field **tep_event_fields(struct tep_event *event)
{
return get_event_fields("event", event->name,
event->format.nr_fields,
@@ -5959,7 +5963,7 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
return 0;
}
-static int event_matches(struct tep_event_format *event,
+static int event_matches(struct tep_event *event,
int id, const char *sys_name,
const char *event_name)
{
@@ -5982,7 +5986,7 @@ static void free_handler(struct event_handler *handle)
free(handle);
}
-static int find_event_handle(struct tep_handle *pevent, struct tep_event_format *event)
+static int find_event_handle(struct tep_handle *pevent, struct tep_event *event)
{
struct event_handler *handle, **next;
@@ -6023,11 +6027,11 @@ static int find_event_handle(struct tep_handle *pevent, struct tep_event_format
*
* /sys/kernel/debug/tracing/events/.../.../format
*/
-enum tep_errno __tep_parse_format(struct tep_event_format **eventp,
+enum tep_errno __tep_parse_format(struct tep_event **eventp,
struct tep_handle *pevent, const char *buf,
unsigned long size, const char *sys)
{
- struct tep_event_format *event;
+ struct tep_event *event;
int ret;
init_input_buf(buf, size);
@@ -6132,12 +6136,12 @@ enum tep_errno __tep_parse_format(struct tep_event_format **eventp,
static enum tep_errno
__parse_event(struct tep_handle *pevent,
- struct tep_event_format **eventp,
+ struct tep_event **eventp,
const char *buf, unsigned long size,
const char *sys)
{
int ret = __tep_parse_format(eventp, pevent, buf, size, sys);
- struct tep_event_format *event = *eventp;
+ struct tep_event *event = *eventp;
if (event == NULL)
return ret;
@@ -6154,7 +6158,7 @@ __parse_event(struct tep_handle *pevent,
return 0;
event_add_failed:
- tep_free_format(event);
+ tep_free_event(event);
return ret;
}
@@ -6174,7 +6178,7 @@ event_add_failed:
* /sys/kernel/debug/tracing/events/.../.../format
*/
enum tep_errno tep_parse_format(struct tep_handle *pevent,
- struct tep_event_format **eventp,
+ struct tep_event **eventp,
const char *buf,
unsigned long size, const char *sys)
{
@@ -6198,7 +6202,7 @@ enum tep_errno tep_parse_format(struct tep_handle *pevent,
enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
unsigned long size, const char *sys)
{
- struct tep_event_format *event = NULL;
+ struct tep_event *event = NULL;
return __parse_event(pevent, &event, buf, size, sys);
}
@@ -6235,7 +6239,7 @@ int get_field_val(struct trace_seq *s, struct tep_format_field *field,
*
* On failure, it returns NULL.
*/
-void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event,
+void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
int *len, int err)
{
@@ -6282,7 +6286,7 @@ void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event,
*
* Returns 0 on success -1 on field not found.
*/
-int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event,
+int tep_get_field_val(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
@@ -6307,7 +6311,7 @@ int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event,
*
* Returns 0 on success -1 on field not found.
*/
-int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event,
+int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
@@ -6332,7 +6336,7 @@ int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event
*
* Returns 0 on success -1 on field not found.
*/
-int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event,
+int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err)
{
@@ -6358,7 +6362,7 @@ int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event,
* Returns: 0 on success, -1 field not found, or 1 if buffer is full.
*/
int tep_print_num_field(struct trace_seq *s, const char *fmt,
- struct tep_event_format *event, const char *name,
+ struct tep_event *event, const char *name,
struct tep_record *record, int err)
{
struct tep_format_field *field = tep_find_field(event, name);
@@ -6390,7 +6394,7 @@ int tep_print_num_field(struct trace_seq *s, const char *fmt,
* Returns: 0 on success, -1 field not found, or 1 if buffer is full.
*/
int tep_print_func_field(struct trace_seq *s, const char *fmt,
- struct tep_event_format *event, const char *name,
+ struct tep_event *event, const char *name,
struct tep_record *record, int err)
{
struct tep_format_field *field = tep_find_field(event, name);
@@ -6550,11 +6554,11 @@ int tep_unregister_print_function(struct tep_handle *pevent,
return -1;
}
-static struct tep_event_format *search_event(struct tep_handle *pevent, int id,
- const char *sys_name,
- const char *event_name)
+static struct tep_event *search_event(struct tep_handle *pevent, int id,
+ const char *sys_name,
+ const char *event_name)
{
- struct tep_event_format *event;
+ struct tep_event *event;
if (id >= 0) {
/* search by id */
@@ -6594,7 +6598,7 @@ int tep_register_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
- struct tep_event_format *event;
+ struct tep_event *event;
struct event_handler *handle;
event = search_event(pevent, id, sys_name, event_name);
@@ -6678,7 +6682,7 @@ int tep_unregister_event_handler(struct tep_handle *pevent, int id,
const char *sys_name, const char *event_name,
tep_event_handler_func func, void *context)
{
- struct tep_event_format *event;
+ struct tep_event *event;
struct event_handler *handle;
struct event_handler **next;
@@ -6730,6 +6734,13 @@ void tep_ref(struct tep_handle *pevent)
pevent->ref_count++;
}
+int tep_get_ref(struct tep_handle *tep)
+{
+ if (tep)
+ return tep->ref_count;
+ return 0;
+}
+
void tep_free_format_field(struct tep_format_field *field)
{
free(field->type);
@@ -6756,7 +6767,7 @@ static void free_formats(struct tep_format *format)
free_format_fields(format->fields);
}
-void tep_free_format(struct tep_event_format *event)
+void tep_free_event(struct tep_event *event)
{
free(event->name);
free(event->system);
@@ -6842,7 +6853,7 @@ void tep_free(struct tep_handle *pevent)
}
for (i = 0; i < pevent->nr_events; i++)
- tep_free_format(pevent->events[i]);
+ tep_free_event(pevent->events[i]);
while (pevent->handlers) {
handle = pevent->handlers;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 16bf4c890b6f..35d37087d3c5 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -57,11 +57,11 @@ struct tep_record {
/* ----------------------- tep ----------------------- */
struct tep_handle;
-struct tep_event_format;
+struct tep_event;
typedef int (*tep_event_handler_func)(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event,
+ struct tep_event *event,
void *context);
typedef int (*tep_plugin_load_func)(struct tep_handle *pevent);
@@ -143,7 +143,7 @@ enum tep_format_flags {
struct tep_format_field {
struct tep_format_field *next;
- struct tep_event_format *event;
+ struct tep_event *event;
char *type;
char *name;
char *alias;
@@ -277,7 +277,7 @@ struct tep_print_fmt {
struct tep_print_arg *args;
};
-struct tep_event_format {
+struct tep_event {
struct tep_handle *pevent;
char *name;
int id;
@@ -409,20 +409,6 @@ void tep_print_plugins(struct trace_seq *s,
typedef char *(tep_func_resolver_t)(void *priv,
unsigned long long *addrp, char **modp);
void tep_set_flag(struct tep_handle *tep, int flag);
-unsigned short __tep_data2host2(struct tep_handle *pevent, unsigned short data);
-unsigned int __tep_data2host4(struct tep_handle *pevent, unsigned int data);
-unsigned long long
-__tep_data2host8(struct tep_handle *pevent, unsigned long long data);
-
-#define tep_data2host2(pevent, ptr) __tep_data2host2(pevent, *(unsigned short *)(ptr))
-#define tep_data2host4(pevent, ptr) __tep_data2host4(pevent, *(unsigned int *)(ptr))
-#define tep_data2host8(pevent, ptr) \
-({ \
- unsigned long long __val; \
- \
- memcpy(&__val, (ptr), sizeof(unsigned long long)); \
- __tep_data2host8(pevent, __val); \
-})
static inline int tep_host_bigendian(void)
{
@@ -454,14 +440,14 @@ int tep_register_print_string(struct tep_handle *pevent, const char *fmt,
int tep_pid_is_registered(struct tep_handle *pevent, int pid);
void tep_print_event_task(struct tep_handle *pevent, struct trace_seq *s,
- struct tep_event_format *event,
+ struct tep_event *event,
struct tep_record *record);
void tep_print_event_time(struct tep_handle *pevent, struct trace_seq *s,
- struct tep_event_format *event,
+ struct tep_event *event,
struct tep_record *record,
bool use_trace_clock);
void tep_print_event_data(struct tep_handle *pevent, struct trace_seq *s,
- struct tep_event_format *event,
+ struct tep_event *event,
struct tep_record *record);
void tep_print_event(struct tep_handle *pevent, struct trace_seq *s,
struct tep_record *record, bool use_trace_clock);
@@ -472,32 +458,30 @@ int tep_parse_header_page(struct tep_handle *pevent, char *buf, unsigned long si
enum tep_errno tep_parse_event(struct tep_handle *pevent, const char *buf,
unsigned long size, const char *sys);
enum tep_errno tep_parse_format(struct tep_handle *pevent,
- struct tep_event_format **eventp,
+ struct tep_event **eventp,
const char *buf,
unsigned long size, const char *sys);
-void tep_free_format(struct tep_event_format *event);
-void tep_free_format_field(struct tep_format_field *field);
-void *tep_get_field_raw(struct trace_seq *s, struct tep_event_format *event,
+void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
int *len, int err);
-int tep_get_field_val(struct trace_seq *s, struct tep_event_format *event,
+int tep_get_field_val(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err);
-int tep_get_common_field_val(struct trace_seq *s, struct tep_event_format *event,
+int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err);
-int tep_get_any_field_val(struct trace_seq *s, struct tep_event_format *event,
+int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
const char *name, struct tep_record *record,
unsigned long long *val, int err);
int tep_print_num_field(struct trace_seq *s, const char *fmt,
- struct tep_event_format *event, const char *name,
+ struct tep_event *event, const char *name,
struct tep_record *record, int err);
int tep_print_func_field(struct trace_seq *s, const char *fmt,
- struct tep_event_format *event, const char *name,
+ struct tep_event *event, const char *name,
struct tep_record *record, int err);
int tep_register_event_handler(struct tep_handle *pevent, int id,
@@ -513,9 +497,9 @@ int tep_register_print_function(struct tep_handle *pevent,
int tep_unregister_print_function(struct tep_handle *pevent,
tep_func_handler func, char *name);
-struct tep_format_field *tep_find_common_field(struct tep_event_format *event, const char *name);
-struct tep_format_field *tep_find_field(struct tep_event_format *event, const char *name);
-struct tep_format_field *tep_find_any_field(struct tep_event_format *event, const char *name);
+struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name);
+struct tep_format_field *tep_find_field(struct tep_event *event, const char *name);
+struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name);
const char *tep_find_function(struct tep_handle *pevent, unsigned long long addr);
unsigned long long
@@ -524,19 +508,19 @@ unsigned long long tep_read_number(struct tep_handle *pevent, const void *ptr, i
int tep_read_number_field(struct tep_format_field *field, const void *data,
unsigned long long *value);
-struct tep_event_format *tep_get_first_event(struct tep_handle *tep);
+struct tep_event *tep_get_first_event(struct tep_handle *tep);
int tep_get_events_count(struct tep_handle *tep);
-struct tep_event_format *tep_find_event(struct tep_handle *pevent, int id);
+struct tep_event *tep_find_event(struct tep_handle *pevent, int id);
-struct tep_event_format *
+struct tep_event *
tep_find_event_by_name(struct tep_handle *pevent, const char *sys, const char *name);
-struct tep_event_format *
+struct tep_event *
tep_find_event_by_record(struct tep_handle *pevent, struct tep_record *record);
void tep_data_lat_fmt(struct tep_handle *pevent,
struct trace_seq *s, struct tep_record *record);
int tep_data_type(struct tep_handle *pevent, struct tep_record *rec);
-struct tep_event_format *tep_data_event_from_type(struct tep_handle *pevent, int type);
+struct tep_event *tep_data_event_from_type(struct tep_handle *pevent, int type);
int tep_data_pid(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_preempt_count(struct tep_handle *pevent, struct tep_record *rec);
int tep_data_flags(struct tep_handle *pevent, struct tep_record *rec);
@@ -549,15 +533,15 @@ int tep_cmdline_pid(struct tep_handle *pevent, struct cmdline *cmdline);
void tep_print_field(struct trace_seq *s, void *data,
struct tep_format_field *field);
void tep_print_fields(struct trace_seq *s, void *data,
- int size __maybe_unused, struct tep_event_format *event);
-void tep_event_info(struct trace_seq *s, struct tep_event_format *event,
- struct tep_record *record);
+ int size __maybe_unused, struct tep_event *event);
+void tep_event_info(struct trace_seq *s, struct tep_event *event,
+ struct tep_record *record);
int tep_strerror(struct tep_handle *pevent, enum tep_errno errnum,
- char *buf, size_t buflen);
+ char *buf, size_t buflen);
-struct tep_event_format **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
-struct tep_format_field **tep_event_common_fields(struct tep_event_format *event);
-struct tep_format_field **tep_event_fields(struct tep_event_format *event);
+struct tep_event **tep_list_events(struct tep_handle *pevent, enum tep_event_sort_type);
+struct tep_format_field **tep_event_common_fields(struct tep_event *event);
+struct tep_format_field **tep_event_fields(struct tep_event *event);
enum tep_endian {
TEP_LITTLE_ENDIAN = 0,
@@ -581,6 +565,7 @@ struct tep_handle *tep_alloc(void);
void tep_free(struct tep_handle *pevent);
void tep_ref(struct tep_handle *pevent);
void tep_unref(struct tep_handle *pevent);
+int tep_get_ref(struct tep_handle *tep);
/* access to the internal parser */
void tep_buffer_init(const char *buf, unsigned long long size);
@@ -712,7 +697,7 @@ struct tep_filter_arg {
struct tep_filter_type {
int event_id;
- struct tep_event_format *event;
+ struct tep_event *event;
struct tep_filter_arg *filter;
};
diff --git a/tools/lib/traceevent/libtraceevent.pc.template b/tools/lib/traceevent/libtraceevent.pc.template
new file mode 100644
index 000000000000..42e4d6cb6b9e
--- /dev/null
+++ b/tools/lib/traceevent/libtraceevent.pc.template
@@ -0,0 +1,10 @@
+prefix=INSTALL_PREFIX
+libdir=${prefix}/lib64
+includedir=${prefix}/include/traceevent
+
+Name: libtraceevent
+URL: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+Description: Linux kernel trace event library
+Version: LIB_VERSION
+Cflags: -I${includedir}
+Libs: -L${libdir} -ltraceevent
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index ed87cb56713d..cb5ce66dab6e 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -27,7 +27,7 @@ static struct tep_format_field cpu = {
struct event_list {
struct event_list *next;
- struct tep_event_format *event;
+ struct tep_event *event;
};
static void show_error(char *error_buf, const char *fmt, ...)
@@ -229,7 +229,7 @@ static void free_arg(struct tep_filter_arg *arg)
}
static int add_event(struct event_list **events,
- struct tep_event_format *event)
+ struct tep_event *event)
{
struct event_list *list;
@@ -243,7 +243,7 @@ static int add_event(struct event_list **events,
return 0;
}
-static int event_match(struct tep_event_format *event,
+static int event_match(struct tep_event *event,
regex_t *sreg, regex_t *ereg)
{
if (sreg) {
@@ -259,7 +259,7 @@ static enum tep_errno
find_event(struct tep_handle *pevent, struct event_list **events,
char *sys_name, char *event_name)
{
- struct tep_event_format *event;
+ struct tep_event *event;
regex_t ereg;
regex_t sreg;
int match = 0;
@@ -334,7 +334,7 @@ static void free_events(struct event_list *events)
}
static enum tep_errno
-create_arg_item(struct tep_event_format *event, const char *token,
+create_arg_item(struct tep_event *event, const char *token,
enum tep_event_type type, struct tep_filter_arg **parg, char *error_str)
{
struct tep_format_field *field;
@@ -940,7 +940,7 @@ static int collapse_tree(struct tep_filter_arg *arg,
}
static enum tep_errno
-process_filter(struct tep_event_format *event, struct tep_filter_arg **parg,
+process_filter(struct tep_event *event, struct tep_filter_arg **parg,
char *error_str, int not)
{
enum tep_event_type type;
@@ -1180,7 +1180,7 @@ process_filter(struct tep_event_format *event, struct tep_filter_arg **parg,
}
static enum tep_errno
-process_event(struct tep_event_format *event, const char *filter_str,
+process_event(struct tep_event *event, const char *filter_str,
struct tep_filter_arg **parg, char *error_str)
{
int ret;
@@ -1205,7 +1205,7 @@ process_event(struct tep_event_format *event, const char *filter_str,
}
static enum tep_errno
-filter_event(struct tep_event_filter *filter, struct tep_event_format *event,
+filter_event(struct tep_event_filter *filter, struct tep_event *event,
const char *filter_str, char *error_str)
{
struct tep_filter_type *filter_type;
@@ -1457,7 +1457,7 @@ static int copy_filter_type(struct tep_event_filter *filter,
struct tep_filter_type *filter_type)
{
struct tep_filter_arg *arg;
- struct tep_event_format *event;
+ struct tep_event *event;
const char *sys;
const char *name;
char *str;
@@ -1539,7 +1539,7 @@ int tep_update_trivial(struct tep_event_filter *dest, struct tep_event_filter *s
{
struct tep_handle *src_pevent;
struct tep_handle *dest_pevent;
- struct tep_event_format *event;
+ struct tep_event *event;
struct tep_filter_type *filter_type;
struct tep_filter_arg *arg;
char *str;
@@ -1683,11 +1683,11 @@ int tep_filter_event_has_trivial(struct tep_event_filter *filter,
}
}
-static int test_filter(struct tep_event_format *event, struct tep_filter_arg *arg,
+static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err);
static const char *
-get_comm(struct tep_event_format *event, struct tep_record *record)
+get_comm(struct tep_event *event, struct tep_record *record)
{
const char *comm;
int pid;
@@ -1698,7 +1698,7 @@ get_comm(struct tep_event_format *event, struct tep_record *record)
}
static unsigned long long
-get_value(struct tep_event_format *event,
+get_value(struct tep_event *event,
struct tep_format_field *field, struct tep_record *record)
{
unsigned long long val;
@@ -1734,11 +1734,11 @@ get_value(struct tep_event_format *event,
}
static unsigned long long
-get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg,
+get_arg_value(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err);
static unsigned long long
-get_exp_value(struct tep_event_format *event, struct tep_filter_arg *arg,
+get_exp_value(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
unsigned long long lval, rval;
@@ -1793,7 +1793,7 @@ get_exp_value(struct tep_event_format *event, struct tep_filter_arg *arg,
}
static unsigned long long
-get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg,
+get_arg_value(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
switch (arg->type) {
@@ -1817,7 +1817,7 @@ get_arg_value(struct tep_event_format *event, struct tep_filter_arg *arg,
return 0;
}
-static int test_num(struct tep_event_format *event, struct tep_filter_arg *arg,
+static int test_num(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
unsigned long long lval, rval;
@@ -1860,7 +1860,7 @@ static int test_num(struct tep_event_format *event, struct tep_filter_arg *arg,
static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
{
- struct tep_event_format *event;
+ struct tep_event *event;
struct tep_handle *pevent;
unsigned long long addr;
const char *val = NULL;
@@ -1908,7 +1908,7 @@ static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *
return val;
}
-static int test_str(struct tep_event_format *event, struct tep_filter_arg *arg,
+static int test_str(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
const char *val;
@@ -1939,7 +1939,7 @@ static int test_str(struct tep_event_format *event, struct tep_filter_arg *arg,
}
}
-static int test_op(struct tep_event_format *event, struct tep_filter_arg *arg,
+static int test_op(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
switch (arg->op.type) {
@@ -1961,7 +1961,7 @@ static int test_op(struct tep_event_format *event, struct tep_filter_arg *arg,
}
}
-static int test_filter(struct tep_event_format *event, struct tep_filter_arg *arg,
+static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
struct tep_record *record, enum tep_errno *err)
{
if (*err) {
diff --git a/tools/lib/traceevent/plugin_function.c b/tools/lib/traceevent/plugin_function.c
index 528acc75d81a..a73eca34a8f9 100644
--- a/tools/lib/traceevent/plugin_function.c
+++ b/tools/lib/traceevent/plugin_function.c
@@ -124,7 +124,7 @@ static int add_and_get_index(const char *parent, const char *child, int cpu)
}
static int function_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
struct tep_handle *pevent = event->pevent;
unsigned long long function;
diff --git a/tools/lib/traceevent/plugin_hrtimer.c b/tools/lib/traceevent/plugin_hrtimer.c
index 9aa05b4ca811..5db5e401275f 100644
--- a/tools/lib/traceevent/plugin_hrtimer.c
+++ b/tools/lib/traceevent/plugin_hrtimer.c
@@ -27,7 +27,7 @@
static int timer_expire_handler(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
trace_seq_printf(s, "hrtimer=");
@@ -47,7 +47,7 @@ static int timer_expire_handler(struct trace_seq *s,
static int timer_start_handler(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
trace_seq_printf(s, "hrtimer=");
diff --git a/tools/lib/traceevent/plugin_kmem.c b/tools/lib/traceevent/plugin_kmem.c
index 1beb4eaddfdf..0e3c601f9ed1 100644
--- a/tools/lib/traceevent/plugin_kmem.c
+++ b/tools/lib/traceevent/plugin_kmem.c
@@ -25,7 +25,7 @@
#include "trace-seq.h"
static int call_site_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
struct tep_format_field *field;
unsigned long long val, addr;
diff --git a/tools/lib/traceevent/plugin_kvm.c b/tools/lib/traceevent/plugin_kvm.c
index d13c22846fa9..754050eea467 100644
--- a/tools/lib/traceevent/plugin_kvm.c
+++ b/tools/lib/traceevent/plugin_kvm.c
@@ -249,7 +249,7 @@ static const char *find_exit_reason(unsigned isa, int val)
}
static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, const char *field)
+ struct tep_event *event, const char *field)
{
unsigned long long isa;
unsigned long long val;
@@ -270,7 +270,7 @@ static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
}
static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
unsigned long long info1 = 0, info2 = 0;
@@ -293,7 +293,7 @@ static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
static int kvm_emulate_insn_handler(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
unsigned long long rip, csbase, len, flags, failed;
int llen;
@@ -332,7 +332,7 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
if (print_exit_reason(s, record, event, "exit_code") < 0)
return -1;
@@ -346,7 +346,7 @@ static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_reco
}
static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
tep_print_num_field(s, "rip %llx ", event, "rip", record, 1);
@@ -372,7 +372,7 @@ union kvm_mmu_page_role {
};
static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
unsigned long long val;
static const char *access_str[] = {
@@ -387,7 +387,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
/*
* We can only use the structure if file is of the same
- * endianess.
+ * endianness.
*/
if (tep_is_file_bigendian(event->pevent) ==
tep_is_host_bigendian(event->pevent)) {
@@ -419,7 +419,7 @@ static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
static int kvm_mmu_get_page_handler(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
unsigned long long val;
diff --git a/tools/lib/traceevent/plugin_mac80211.c b/tools/lib/traceevent/plugin_mac80211.c
index da3855e7b86f..e38b9477aad2 100644
--- a/tools/lib/traceevent/plugin_mac80211.c
+++ b/tools/lib/traceevent/plugin_mac80211.c
@@ -26,7 +26,7 @@
#define INDENT 65
-static void print_string(struct trace_seq *s, struct tep_event_format *event,
+static void print_string(struct trace_seq *s, struct tep_event *event,
const char *name, const void *data)
{
struct tep_format_field *f = tep_find_field(event, name);
@@ -60,7 +60,7 @@ static void print_string(struct trace_seq *s, struct tep_event_format *event,
static int drv_bss_info_changed(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
void *data = record->data;
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index 77882272672f..834c9e378ff8 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -67,7 +67,7 @@ static void write_and_save_comm(struct tep_format_field *field,
static int sched_wakeup_handler(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
struct tep_format_field *field;
unsigned long long val;
@@ -96,7 +96,7 @@ static int sched_wakeup_handler(struct trace_seq *s,
static int sched_switch_handler(struct trace_seq *s,
struct tep_record *record,
- struct tep_event_format *event, void *context)
+ struct tep_event *event, void *context)
{
struct tep_format_field *field;
unsigned long long val;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 6dbb9fae0f9d..b8f3cca8e58b 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -31,6 +31,8 @@
#include "elf.h"
#include "warn.h"
+#define MAX_NAME_LEN 128
+
struct section *find_section_by_name(struct elf *elf, const char *name)
{
struct section *sec;
@@ -298,6 +300,8 @@ static int read_symbols(struct elf *elf)
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
list_for_each_entry(sym, &sec->symbol_list, list) {
+ char pname[MAX_NAME_LEN + 1];
+ size_t pnamelen;
if (sym->type != STT_FUNC)
continue;
sym->pfunc = sym->cfunc = sym;
@@ -305,14 +309,21 @@ static int read_symbols(struct elf *elf)
if (!coldstr)
continue;
- coldstr[0] = '\0';
- pfunc = find_symbol_by_name(elf, sym->name);
- coldstr[0] = '.';
+ pnamelen = coldstr - sym->name;
+ if (pnamelen > MAX_NAME_LEN) {
+ WARN("%s(): parent function name exceeds maximum length of %d characters",
+ sym->name, MAX_NAME_LEN);
+ return -1;
+ }
+
+ strncpy(pname, sym->name, pnamelen);
+ pname[pnamelen] = '\0';
+ pfunc = find_symbol_by_name(elf, pname);
if (!pfunc) {
WARN("%s(): can't find parent function",
sym->name);
- goto err;
+ return -1;
}
sym->pfunc = pfunc;
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 34750fc32714..0921a3c67381 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -58,6 +58,9 @@ SUBSYSTEM
'futex'::
Futex stressing benchmarks.
+'epoll'::
+ Eventpoll (epoll) stressing benchmarks.
+
'all'::
All benchmark subsystems.
@@ -203,6 +206,13 @@ Suite for evaluating requeue calls.
*lock-pi*::
Suite for evaluating futex lock_pi calls.
+SUITES FOR 'epoll'
+~~~~~~~~~~~~~~~~~~
+*wait*::
+Suite for evaluating concurrent epoll_wait calls.
+
+*ctl*::
+Suite for evaluating multiple epoll_ctl calls.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index 32f4a898e3f2..4ac7775fbc11 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -199,6 +199,12 @@ colors.*::
Colors for headers in the output of a sub-commands (top, report).
Default values are 'white', 'blue'.
+core.*::
+ core.proc-map-timeout::
+ Sets a timeout (in milliseconds) for parsing /proc/<pid>/maps files.
+ Can be overridden by the --proc-map-timeout option on supported
+ subcommands. The default timeout is 500ms.
+
tui.*, gtk.*::
Subcommands that can be configured here are 'top', 'report' and 'annotate'.
These values are booleans, for example:
@@ -515,6 +521,38 @@ diff.*::
Possible values are 'delta', 'delta-abs', 'ratio' and
'wdiff'. Default is 'delta'.
+trace.*::
+ trace.add_events::
+ Allows adding a set of events to add to the ones specified
+ by the user, or use as a default one if none was specified.
+ The initial use case is to add augmented_raw_syscalls.o to
+ activate the 'perf trace' logic that looks for syscall
+ pointer contents after the normal tracepoint payload.
+
+ trace.args_alignment::
+ Number of columns to align the argument list, default is 70,
+ use 40 for the strace default, zero to no alignment.
+
+ trace.no_inherit::
+ Do not follow children threads.
+
+ trace.show_arg_names::
+ Should syscall argument names be printed? If not then trace.show_zeros
+ will be set.
+
+ trace.show_duration::
+ Show syscall duration.
+
+ trace.show_prefix::
+ If set to 'yes' will show common string prefixes in tables. The default
+ is to remove the common prefix in things like "MAP_SHARED", showing just "SHARED".
+
+ trace.show_timestamp::
+ Show syscall start timestamp.
+
+ trace.show_zeros::
+ Do not suppress syscall arguments that are equal to zero.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 667c14e56031..138fb6e94b3c 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -172,7 +172,7 @@ like cycles and instructions and some software events.
Other PMUs and global measurements are normally root only.
Some event qualifiers, such as "any", are also root only.
-This can be overriden by setting the kernel.perf_event_paranoid
+This can be overridden by setting the kernel.perf_event_paranoid
sysctl to -1, which allows non root to use these events.
For accessing trace point events perf needs to have read access to
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 246dee081efd..d232b13ea713 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -435,6 +435,11 @@ Specify vmlinux path which has debuginfo.
--buildid-all::
Record build-id of all DSOs regardless whether it's actually hit or not.
+--aio[=n]::
+Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default: 1, max: 4).
+Asynchronous mode is supported only when linking Perf tool with libc library
+providing implementation for Posix AIO API.
+
--all-kernel::
Configure all used events to run in kernel space.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 474a4941f65d..1a27bfe05039 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -126,6 +126,14 @@ OPTIONS
And default sort keys are changed to comm, dso_from, symbol_from, dso_to
and symbol_to, see '--branch-stack'.
+ When the sort key symbol is specified, columns "IPC" and "IPC Coverage"
+ are enabled automatically. Column "IPC" reports the average IPC per function
+ and column "IPC coverage" reports the percentage of instructions with
+ sampled IPC in this function. IPC means Instruction Per Cycle. If it's low,
+ it indicates there may be a performance bottleneck when the function is
+ executed, such as a memory access bottleneck. If a function has high overhead
+ and low IPC, it's worth further analyzing it to optimize its performance.
+
If the --mem-mode option is used, the following sort keys are also available
(incompatible with --branch-stack):
symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
@@ -244,7 +252,7 @@ OPTIONS
Usually more convenient to use --branch-history for this.
value can be:
- - percent: diplay overhead percent (default)
+ - percent: display overhead percent (default)
- period: display event period
- count: display event count
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index a2b37ce48094..9e4def08d569 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -117,7 +117,7 @@ OPTIONS
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
- brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc.
+ brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index b10a90b6a718..4bc2085e5197 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -50,7 +50,7 @@ report::
/sys/bus/event_source/devices/<pmu>/format/*
Note that the last two syntaxes support prefix and glob matching in
- the PMU name to simplify creation of events accross multiple instances
+ the PMU name to simplify creation of events across multiple instances
of the same type of PMU in large systems (e.g. memory controller PMUs).
Multiple PMU instances are typical for uncore PMUs, so the prefix
'uncore_' is also ignored when performing this match.
@@ -277,7 +277,7 @@ echo 0 > /proc/sys/kernel/nmi_watchdog
for best results. Otherwise the bottlenecks may be inconsistent
on workload with changing phases.
-This enables --metric-only, unless overriden with --no-metric-only.
+This enables --metric-only, unless overridden with --no-metric-only.
To interpret the results it is usually needed to know on which
CPUs the workload runs on. If needed the CPUs can be forced using
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 808b664343c9..44d89fb9c788 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -70,6 +70,9 @@ Default is to monitor all CPUS.
--ignore-vmlinux::
Ignore vmlinux files.
+--kallsyms=<file>::
+ kallsyms pathname
+
-m <pages>::
--mmap-pages=<pages>::
Number of mmap data pages (must be a power of two) or size
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index e113450503d2..631e687be4eb 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -205,6 +205,12 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
because the file may be huge. A time out is needed in such cases.
This option sets the time out limit. The default value is 500 ms.
+--sort-events::
+ Do sorting on batches of events, use when noticing out of order events that
+ may happen, for instance, when a thread gets migrated to a different CPU
+ while processing a syscall.
+
+
PAGEFAULTS
----------
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index e30d20fb482d..07c1857c3d7a 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -294,11 +294,21 @@ ifndef NO_BIONIC
$(call feature_check,bionic)
ifeq ($(feature-bionic), 1)
BIONIC := 1
+ CFLAGS += -DLACKS_SIGQUEUE_PROTOTYPE
+ CFLAGS += -DLACKS_OPEN_MEMSTREAM_PROTOTYPE
EXTLIBS := $(filter-out -lrt,$(EXTLIBS))
EXTLIBS := $(filter-out -lpthread,$(EXTLIBS))
endif
endif
+ifeq ($(feature-eventfd), 1)
+ CFLAGS += -DHAVE_EVENTFD
+endif
+
+ifeq ($(feature-get_current_dir_name), 1)
+ CFLAGS += -DHAVE_GET_CURRENT_DIR_NAME
+endif
+
ifdef NO_LIBELF
NO_DWARF := 1
NO_DEMANGLE := 1
@@ -357,6 +367,12 @@ ifeq ($(feature-glibc), 1)
CFLAGS += -DHAVE_GLIBC_SUPPORT
endif
+ifeq ($(feature-libaio), 1)
+ ifndef NO_AIO
+ CFLAGS += -DHAVE_AIO_SUPPORT
+ endif
+endif
+
ifdef NO_DWARF
NO_LIBDW_DWARF_UNWIND := 1
endif
@@ -580,7 +596,7 @@ endif
ifndef NO_LIBCRYPTO
ifneq ($(feature-libcrypto), 1)
- msg := $(warning No libcrypto.h found, disables jitted code injection, please install libssl-devel or libssl-dev);
+ msg := $(warning No libcrypto.h found, disables jitted code injection, please install openssl-devel or libssl-dev);
NO_LIBCRYPTO := 1
else
CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT
@@ -847,6 +863,13 @@ ifndef NO_JVMTI
$(call feature_check,jvmti)
ifeq ($(feature-jvmti), 1)
$(call detected_var,JDIR)
+ ifndef NO_JVMTI_CMLR
+ FEATURE_CHECK_CFLAGS-jvmti-cmlr := $(FEATURE_CHECK_CFLAGS-jvmti)
+ $(call feature_check,jvmti-cmlr)
+ ifeq ($(feature-jvmti-cmlr), 1)
+ CFLAGS += -DHAVE_JVMTI_CMLR
+ endif
+ endif # NO_JVMTI_CMLR
else
$(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel)
NO_JVMTI := 1
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index d95655489f7e..bd23e3f30895 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -95,11 +95,19 @@ include ../scripts/utilities.mak
#
# Define NO_JVMTI if you do not want jvmti agent built
#
+# Define NO_JVMTI_CMLR (debug only) if you do not want to process CMLR
+# data for java source lines.
+#
# Define LIBCLANGLLVM if you DO want builtin clang and llvm support.
# When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
# llvm-config is not in $PATH.
-
+#
# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
+#
+# Define NO_AIO if you do not want support of Posix AIO based trace
+# streaming for record mode. Currently Posix AIO trace streaming is
+# supported only when linking with glibc.
+#
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -388,6 +396,7 @@ SHELL = $(SHELL_PATH)
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
+x86_arch_asm_uapi_dir := $(srctree)/tools/arch/x86/include/uapi/asm/
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
@@ -401,6 +410,12 @@ _dummy := $(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_ou
$(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl)
$(Q)$(SHELL) '$(drm_ioctl_tbl)' $(drm_hdr_dir) > $@
+fadvise_advice_array := $(beauty_outdir)/fadvise_advice_array.c
+fadvise_advice_tbl := $(srctree)/tools/perf/trace/beauty/fadvise.sh
+
+$(fadvise_advice_array): $(linux_uapi_dir)/in.h $(fadvise_advice_tbl)
+ $(Q)$(SHELL) '$(fadvise_advice_tbl)' $(linux_uapi_dir) > $@
+
pkey_alloc_access_rights_array := $(beauty_outdir)/pkey_alloc_access_rights_array.c
asm_generic_hdr_dir := $(srctree)/tools/include/uapi/asm-generic/
pkey_alloc_access_rights_tbl := $(srctree)/tools/perf/trace/beauty/pkey_alloc_access_rights.sh
@@ -466,7 +481,7 @@ $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_t
mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
-$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(arch_asm_uapi_dir)/mman.h $(mmap_flags_tbl)
+$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
$(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
mount_flags_array := $(beauty_outdir)/mount_flags_array.c
@@ -482,6 +497,18 @@ prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
$(prctl_option_array): $(prctl_hdr_dir)/prctl.h $(prctl_option_tbl)
$(Q)$(SHELL) '$(prctl_option_tbl)' $(prctl_hdr_dir) > $@
+x86_arch_prctl_code_array := $(beauty_outdir)/x86_arch_prctl_code_array.c
+x86_arch_prctl_code_tbl := $(srctree)/tools/perf/trace/beauty/x86_arch_prctl.sh
+
+$(x86_arch_prctl_code_array): $(x86_arch_asm_uapi_dir)/prctl.h $(x86_arch_prctl_code_tbl)
+ $(Q)$(SHELL) '$(x86_arch_prctl_code_tbl)' $(x86_arch_asm_uapi_dir) > $@
+
+rename_flags_array := $(beauty_outdir)/rename_flags_array.c
+rename_flags_tbl := $(srctree)/tools/perf/trace/beauty/rename_flags.sh
+
+$(rename_flags_array): $(linux_uapi_dir)/fs.h $(rename_flags_tbl)
+ $(Q)$(SHELL) '$(rename_flags_tbl)' $(linux_uapi_dir) > $@
+
arch_errno_name_array := $(beauty_outdir)/arch_errno_name_array.c
arch_errno_hdr_dir := $(srctree)/tools
arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
@@ -584,6 +611,7 @@ __build-dir = $(subst $(OUTPUT),,$(dir $@))
build-dir = $(if $(__build-dir),$(__build-dir),.)
prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \
+ $(fadvise_advice_array) \
$(pkey_alloc_access_rights_array) \
$(sndrv_pcm_ioctl_array) \
$(sndrv_ctl_ioctl_array) \
@@ -596,6 +624,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(mount_flags_array) \
$(perf_ioctl_array) \
$(prctl_option_array) \
+ $(x86_arch_prctl_code_array) \
+ $(rename_flags_array) \
$(arch_errno_name_array)
$(OUTPUT)%.o: %.c prepare FORCE
@@ -639,7 +669,7 @@ $(LIBJVMTI_IN): FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=jvmti obj=jvmti
$(OUTPUT)$(LIBJVMTI): $(LIBJVMTI_IN)
- $(QUIET_LINK)$(CC) -shared -Wl,-soname -Wl,$(LIBJVMTI) -o $@ $<
+ $(QUIET_LINK)$(CC) $(LDFLAGS) -shared -Wl,-soname -Wl,$(LIBJVMTI) -o $@ $<
endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
@@ -879,6 +909,7 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
$(OUTPUT)pmu-events/pmu-events.c \
+ $(OUTPUT)$(fadvise_advice_array) \
$(OUTPUT)$(madvise_behavior_array) \
$(OUTPUT)$(mmap_flags_array) \
$(OUTPUT)$(mount_flags_array) \
@@ -892,6 +923,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
+ $(OUTPUT)$(x86_arch_prctl_code_array) \
+ $(OUTPUT)$(rename_flags_array) \
$(OUTPUT)$(arch_errno_name_array)
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
diff --git a/tools/perf/arch/arc/annotate/instructions.c b/tools/perf/arch/arc/annotate/instructions.c
new file mode 100644
index 000000000000..2f00e995c7e3
--- /dev/null
+++ b/tools/perf/arch/arc/annotate/instructions.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+
+static int arc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
+{
+ arch->initialized = true;
+ arch->objdump.comment_char = ';';
+ return 0;
+}
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 82657c01a3b8..f3824ca7c20b 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -5,6 +5,13 @@
#include "../util/util.h"
#include "../util/debug.h"
+const char *const arc_triplets[] = {
+ "arc-linux-",
+ "arc-snps-linux-uclibc-",
+ "arc-snps-linux-gnu-",
+ NULL
+};
+
const char *const arm_triplets[] = {
"arm-eabi-",
"arm-linux-androideabi-",
@@ -147,7 +154,9 @@ static int perf_env__lookup_binutils_path(struct perf_env *env,
zfree(&buf);
}
- if (!strcmp(arch, "arm"))
+ if (!strcmp(arch, "arc"))
+ path_list = arc_triplets;
+ else if (!strcmp(arch, "arm"))
path_list = arm_triplets;
else if (!strcmp(arch, "arm64"))
path_list = arm64_triplets;
@@ -200,3 +209,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
return perf_env__lookup_binutils_path(env, "objdump", path);
}
+
+/*
+ * Some architectures have a single address space for kernel and user addresses,
+ * which makes it possible to determine if an address is in kernel space or user
+ * space.
+ */
+bool perf_env__single_address_space(struct perf_env *env)
+{
+ return strcmp(perf_env__arch(env), "sparc");
+}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2167001b18c5..c298a446d1f6 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -5,5 +5,6 @@
#include "../util/env.h"
int perf_env__lookup_objdump(struct perf_env *env, const char **path);
+bool perf_env__single_address_space(struct perf_env *env);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index 00e37b106913..1076393e6f43 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -62,7 +62,8 @@ static const char *reg_names[] = {
[PERF_REG_POWERPC_SOFTE] = "softe",
[PERF_REG_POWERPC_TRAP] = "trap",
[PERF_REG_POWERPC_DAR] = "dar",
- [PERF_REG_POWERPC_DSISR] = "dsisr"
+ [PERF_REG_POWERPC_DSISR] = "dsisr",
+ [PERF_REG_POWERPC_SIER] = "sier"
};
static inline const char *perf_reg_name(int id)
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index ec50939b0418..07fcd977d93e 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -52,6 +52,7 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(trap, PERF_REG_POWERPC_TRAP),
SMPL_REG(dar, PERF_REG_POWERPC_DAR),
SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
+ SMPL_REG(sier, PERF_REG_POWERPC_SIER),
SMPL_REG_END
};
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index a5d24ae5810d..c3e5f4ab0d3e 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -170,7 +170,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64)
*
* If the test passes %0 is returned, otherwise %-1 is returned. Use the
* verbose (-v) option to see all the instructions and whether or not they
- * decoded successfuly.
+ * decoded successfully.
*/
int test__insn_x86(struct test *test __maybe_unused, int subtest __maybe_unused)
{
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index fb0d71afee8b..af9a9f2600be 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -4,6 +4,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <regex.h>
#include "../../util/header.h"
@@ -70,9 +71,72 @@ get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
{
char *buf = malloc(128);
- if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
+ if (buf && __get_cpuid(buf, 128, "%s-%u-%X-%X$") < 0) {
free(buf);
return NULL;
}
return buf;
}
+
+/* Full CPUID format for x86 is vendor-family-model-stepping */
+static bool is_full_cpuid(const char *id)
+{
+ const char *tmp = id;
+ int count = 0;
+
+ while ((tmp = strchr(tmp, '-')) != NULL) {
+ count++;
+ tmp++;
+ }
+
+ if (count == 3)
+ return true;
+
+ return false;
+}
+
+int strcmp_cpuid_str(const char *mapcpuid, const char *id)
+{
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+ bool full_mapcpuid = is_full_cpuid(mapcpuid);
+ bool full_cpuid = is_full_cpuid(id);
+
+ /*
+ * Full CPUID format is required to identify a platform.
+ * Error out if the cpuid string is incomplete.
+ */
+ if (full_mapcpuid && !full_cpuid) {
+ pr_info("Invalid CPUID %s. Full CPUID is required, "
+ "vendor-family-model-stepping\n", id);
+ return 1;
+ }
+
+ if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", mapcpuid);
+ return 1;
+ }
+
+ match = !regexec(&re, id, 1, pmatch, 0);
+ regfree(&re);
+ if (match) {
+ size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
+ size_t cpuid_len;
+
+ /* If the full CPUID format isn't required,
+ * ignoring the stepping.
+ */
+ if (!full_mapcpuid && full_cpuid)
+ cpuid_len = strrchr(id, '-') - id;
+ else
+ cpuid_len = strlen(id);
+
+ /* Verify the entire string matched. */
+ if (match_len == cpuid_len)
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index db0ba8caf5a2..ba8ecaf52200 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -524,10 +524,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
struct perf_evsel *evsel)
{
int err;
+ char c;
if (!evsel)
return 0;
+ /*
+ * If supported, force pass-through config term (pt=1) even if user
+ * sets pt=0, which avoids senseless kernel errors.
+ */
+ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+ !(evsel->attr.config & 1)) {
+ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
+ evsel->attr.config |= 1;
+ }
+
err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
"cyc_thresh", "caps/psb_cyc",
evsel->attr.config);
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index b32409a0e546..081353d7b095 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -156,7 +156,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
if (strstr(cpuid, "Intel")) {
kvm->exit_reasons = vmx_exit_reasons;
kvm->exit_reasons_isa = "VMX";
- } else if (strstr(cpuid, "AMD")) {
+ } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
kvm->exit_reasons = svm_exit_reasons;
kvm->exit_reasons_isa = "SVM";
} else
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index eafce1a130a1..e4e321b6f883 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -7,6 +7,9 @@ perf-y += futex-wake-parallel.o
perf-y += futex-requeue.o
perf-y += futex-lock-pi.o
+perf-y += epoll-wait.o
+perf-y += epoll-ctl.o
+
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 6c9fcd757f31..fddb3ced9db6 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -38,6 +38,9 @@ int bench_futex_requeue(int argc, const char **argv);
/* pi futexes */
int bench_futex_lock_pi(int argc, const char **argv);
+int bench_epoll_wait(int argc, const char **argv);
+int bench_epoll_ctl(int argc, const char **argv);
+
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0
#define BENCH_FORMAT_SIMPLE_STR "simple"
@@ -48,4 +51,15 @@ int bench_futex_lock_pi(int argc, const char **argv);
extern int bench_format;
extern unsigned int bench_repeat;
+#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
+#include <pthread.h>
+#include <linux/compiler.h>
+static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr __maybe_unused,
+ size_t cpusetsize __maybe_unused,
+ cpu_set_t *cpuset __maybe_unused)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
new file mode 100644
index 000000000000..0c0a6e824934
--- /dev/null
+++ b/tools/perf/bench/epoll-ctl.c
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Davidlohr Bueso.
+ *
+ * Benchmark the various operations allowed for epoll_ctl(2).
+ * The idea is to concurrently stress a single epoll instance
+ */
+#ifdef HAVE_EVENTFD
+/* For the CLR_() macros */
+#include <string.h>
+#include <pthread.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+
+#include "../util/stat.h"
+#include <subcmd/parse-options.h>
+#include "bench.h"
+#include "cpumap.h"
+
+#include <err.h>
+
+#define printinfo(fmt, arg...) \
+ do { if (__verbose) printf(fmt, ## arg); } while (0)
+
+static unsigned int nthreads = 0;
+static unsigned int nsecs = 8;
+struct timeval start, end, runtime;
+static bool done, __verbose, randomize;
+
+/*
+ * epoll related shared variables.
+ */
+
+/* Maximum number of nesting allowed inside epoll sets */
+#define EPOLL_MAXNESTS 4
+
+enum {
+ OP_EPOLL_ADD,
+ OP_EPOLL_MOD,
+ OP_EPOLL_DEL,
+ EPOLL_NR_OPS,
+};
+
+static int epollfd;
+static int *epollfdp;
+static bool noaffinity;
+static unsigned int nested = 0;
+
+/* amount of fds to monitor, per thread */
+static unsigned int nfds = 64;
+
+static pthread_mutex_t thread_lock;
+static unsigned int threads_starting;
+static struct stats all_stats[EPOLL_NR_OPS];
+static pthread_cond_t thread_parent, thread_worker;
+
+struct worker {
+ int tid;
+ pthread_t thread;
+ unsigned long ops[EPOLL_NR_OPS];
+ int *fdmap;
+};
+
+static const struct option options[] = {
+ OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
+ OPT_UINTEGER('f', "nfds", &nfds, "Specify amount of file descriptors to monitor for each thread"),
+ OPT_BOOLEAN( 'n', "noaffinity", &noaffinity, "Disables CPU affinity"),
+ OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"),
+ OPT_BOOLEAN( 'R', "randomize", &randomize, "Perform random operations on random fds"),
+ OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
+ OPT_END()
+};
+
+static const char * const bench_epoll_ctl_usage[] = {
+ "perf bench epoll ctl <options>",
+ NULL
+};
+
+static void toggle_done(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
+{
+ /* inform all threads that we're done for the day */
+ done = true;
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &runtime);
+}
+
+static void nest_epollfd(void)
+{
+ unsigned int i;
+ struct epoll_event ev;
+
+ if (nested > EPOLL_MAXNESTS)
+ nested = EPOLL_MAXNESTS;
+ printinfo("Nesting level(s): %d\n", nested);
+
+ epollfdp = calloc(nested, sizeof(int));
+ if (!epollfd)
+ err(EXIT_FAILURE, "calloc");
+
+ for (i = 0; i < nested; i++) {
+ epollfdp[i] = epoll_create(1);
+ if (epollfd < 0)
+ err(EXIT_FAILURE, "epoll_create");
+ }
+
+ ev.events = EPOLLHUP; /* anything */
+ ev.data.u64 = i; /* any number */
+
+ for (i = nested - 1; i; i--) {
+ if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
+ epollfdp[i], &ev) < 0)
+ err(EXIT_FAILURE, "epoll_ctl");
+ }
+
+ if (epoll_ctl(epollfd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
+ err(EXIT_FAILURE, "epoll_ctl");
+}
+
+static inline void do_epoll_op(struct worker *w, int op, int fd)
+{
+ int error;
+ struct epoll_event ev;
+
+ ev.events = EPOLLIN;
+ ev.data.u64 = fd;
+
+ switch (op) {
+ case OP_EPOLL_ADD:
+ error = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+ break;
+ case OP_EPOLL_MOD:
+ ev.events = EPOLLOUT;
+ error = epoll_ctl(epollfd, EPOLL_CTL_MOD, fd, &ev);
+ break;
+ case OP_EPOLL_DEL:
+ error = epoll_ctl(epollfd, EPOLL_CTL_DEL, fd, NULL);
+ break;
+ default:
+ error = 1;
+ break;
+ }
+
+ if (!error)
+ w->ops[op]++;
+}
+
+static inline void do_random_epoll_op(struct worker *w)
+{
+ unsigned long rnd1 = random(), rnd2 = random();
+ int op, fd;
+
+ fd = w->fdmap[rnd1 % nfds];
+ op = rnd2 % EPOLL_NR_OPS;
+
+ do_epoll_op(w, op, fd);
+}
+
+static void *workerfn(void *arg)
+{
+ unsigned int i;
+ struct worker *w = (struct worker *) arg;
+ struct timespec ts = { .tv_sec = 0,
+ .tv_nsec = 250 };
+
+ pthread_mutex_lock(&thread_lock);
+ threads_starting--;
+ if (!threads_starting)
+ pthread_cond_signal(&thread_parent);
+ pthread_cond_wait(&thread_worker, &thread_lock);
+ pthread_mutex_unlock(&thread_lock);
+
+ /* Let 'em loose */
+ do {
+ /* random */
+ if (randomize) {
+ do_random_epoll_op(w);
+ } else {
+ for (i = 0; i < nfds; i++) {
+ do_epoll_op(w, OP_EPOLL_ADD, w->fdmap[i]);
+ do_epoll_op(w, OP_EPOLL_MOD, w->fdmap[i]);
+ do_epoll_op(w, OP_EPOLL_DEL, w->fdmap[i]);
+ }
+ }
+
+ nanosleep(&ts, NULL);
+ } while (!done);
+
+ return NULL;
+}
+
+static void init_fdmaps(struct worker *w, int pct)
+{
+ unsigned int i;
+ int inc;
+ struct epoll_event ev;
+
+ if (!pct)
+ return;
+
+ inc = 100/pct;
+ for (i = 0; i < nfds; i+=inc) {
+ ev.data.fd = w->fdmap[i];
+ ev.events = EPOLLIN;
+
+ if (epoll_ctl(epollfd, EPOLL_CTL_ADD, w->fdmap[i], &ev) < 0)
+ err(EXIT_FAILURE, "epoll_ct");
+ }
+}
+
+static int do_threads(struct worker *worker, struct cpu_map *cpu)
+{
+ pthread_attr_t thread_attr, *attrp = NULL;
+ cpu_set_t cpuset;
+ unsigned int i, j;
+ int ret;
+
+ if (!noaffinity)
+ pthread_attr_init(&thread_attr);
+
+ for (i = 0; i < nthreads; i++) {
+ struct worker *w = &worker[i];
+
+ w->tid = i;
+ w->fdmap = calloc(nfds, sizeof(int));
+ if (!w->fdmap)
+ return 1;
+
+ for (j = 0; j < nfds; j++) {
+ w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
+ if (w->fdmap[j] < 0)
+ err(EXIT_FAILURE, "eventfd");
+ }
+
+ /*
+ * Lets add 50% of the fdmap to the epoll instance, and
+ * do it before any threads are started; otherwise there is
+ * an initial bias of the call failing (mod and del ops).
+ */
+ if (randomize)
+ init_fdmaps(w, 50);
+
+ if (!noaffinity) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+
+ ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+ attrp = &thread_attr;
+ }
+
+ ret = pthread_create(&w->thread, attrp, workerfn,
+ (void *)(struct worker *) w);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_create");
+ }
+
+ if (!noaffinity)
+ pthread_attr_destroy(&thread_attr);
+
+ return ret;
+}
+
+static void print_summary(void)
+{
+ int i;
+ unsigned long avg[EPOLL_NR_OPS];
+ double stddev[EPOLL_NR_OPS];
+
+ for (i = 0; i < EPOLL_NR_OPS; i++) {
+ avg[i] = avg_stats(&all_stats[i]);
+ stddev[i] = stddev_stats(&all_stats[i]);
+ }
+
+ printf("\nAveraged %ld ADD operations (+- %.2f%%)\n",
+ avg[OP_EPOLL_ADD], rel_stddev_stats(stddev[OP_EPOLL_ADD],
+ avg[OP_EPOLL_ADD]));
+ printf("Averaged %ld MOD operations (+- %.2f%%)\n",
+ avg[OP_EPOLL_MOD], rel_stddev_stats(stddev[OP_EPOLL_MOD],
+ avg[OP_EPOLL_MOD]));
+ printf("Averaged %ld DEL operations (+- %.2f%%)\n",
+ avg[OP_EPOLL_DEL], rel_stddev_stats(stddev[OP_EPOLL_DEL],
+ avg[OP_EPOLL_DEL]));
+}
+
+int bench_epoll_ctl(int argc, const char **argv)
+{
+ int j, ret = 0;
+ struct sigaction act;
+ struct worker *worker = NULL;
+ struct cpu_map *cpu;
+ struct rlimit rl, prevrl;
+ unsigned int i;
+
+ argc = parse_options(argc, argv, options, bench_epoll_ctl_usage, 0);
+ if (argc) {
+ usage_with_options(bench_epoll_ctl_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ sigfillset(&act.sa_mask);
+ act.sa_sigaction = toggle_done;
+ sigaction(SIGINT, &act, NULL);
+
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ goto errmem;
+
+ /* a single, main epoll instance */
+ epollfd = epoll_create(1);
+ if (epollfd < 0)
+ err(EXIT_FAILURE, "epoll_create");
+
+ /*
+ * Deal with nested epolls, if any.
+ */
+ if (nested)
+ nest_epollfd();
+
+ /* default to the number of CPUs */
+ if (!nthreads)
+ nthreads = cpu->nr;
+
+ worker = calloc(nthreads, sizeof(*worker));
+ if (!worker)
+ goto errmem;
+
+ if (getrlimit(RLIMIT_NOFILE, &prevrl))
+ err(EXIT_FAILURE, "getrlimit");
+ rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
+ printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
+ (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
+ if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
+ err(EXIT_FAILURE, "setrlimit");
+
+ printf("Run summary [PID %d]: %d threads doing epoll_ctl ops "
+ "%d file-descriptors for %d secs.\n\n",
+ getpid(), nthreads, nfds, nsecs);
+
+ for (i = 0; i < EPOLL_NR_OPS; i++)
+ init_stats(&all_stats[i]);
+
+ pthread_mutex_init(&thread_lock, NULL);
+ pthread_cond_init(&thread_parent, NULL);
+ pthread_cond_init(&thread_worker, NULL);
+
+ threads_starting = nthreads;
+
+ gettimeofday(&start, NULL);
+
+ do_threads(worker, cpu);
+
+ pthread_mutex_lock(&thread_lock);
+ while (threads_starting)
+ pthread_cond_wait(&thread_parent, &thread_lock);
+ pthread_cond_broadcast(&thread_worker);
+ pthread_mutex_unlock(&thread_lock);
+
+ sleep(nsecs);
+ toggle_done(0, NULL, NULL);
+ printinfo("main thread: toggling done\n");
+
+ for (i = 0; i < nthreads; i++) {
+ ret = pthread_join(worker[i].thread, NULL);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_join");
+ }
+
+ /* cleanup & report results */
+ pthread_cond_destroy(&thread_parent);
+ pthread_cond_destroy(&thread_worker);
+ pthread_mutex_destroy(&thread_lock);
+
+ for (i = 0; i < nthreads; i++) {
+ unsigned long t[EPOLL_NR_OPS];
+
+ for (j = 0; j < EPOLL_NR_OPS; j++) {
+ t[j] = worker[i].ops[j];
+ update_stats(&all_stats[j], t[j]);
+ }
+
+ if (nfds == 1)
+ printf("[thread %2d] fdmap: %p [ add: %04ld; mod: %04ld; del: %04lds ops ]\n",
+ worker[i].tid, &worker[i].fdmap[0],
+ t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]);
+ else
+ printf("[thread %2d] fdmap: %p ... %p [ add: %04ld ops; mod: %04ld ops; del: %04ld ops ]\n",
+ worker[i].tid, &worker[i].fdmap[0],
+ &worker[i].fdmap[nfds-1],
+ t[OP_EPOLL_ADD], t[OP_EPOLL_MOD], t[OP_EPOLL_DEL]);
+ }
+
+ print_summary();
+
+ close(epollfd);
+ return ret;
+errmem:
+ err(EXIT_FAILURE, "calloc");
+}
+#endif // HAVE_EVENTFD
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
new file mode 100644
index 000000000000..5a11534e96a0
--- /dev/null
+++ b/tools/perf/bench/epoll-wait.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifdef HAVE_EVENTFD
+/*
+ * Copyright (C) 2018 Davidlohr Bueso.
+ *
+ * This program benchmarks concurrent epoll_wait(2) monitoring multiple
+ * file descriptors under one or two load balancing models. The first,
+ * and default, is the single/combined queueing (which refers to a single
+ * epoll instance for N worker threads):
+ *
+ * |---> [worker A]
+ * |---> [worker B]
+ * [combined queue] .---> [worker C]
+ * |---> [worker D]
+ * |---> [worker E]
+ *
+ * While the second model, enabled via --multiq option, uses multiple
+ * queueing (which refers to one epoll instance per worker). For example,
+ * short lived tcp connections in a high throughput httpd server will
+ * ditribute the accept()'ing connections across CPUs. In this case each
+ * worker does a limited amount of processing.
+ *
+ * [queue A] ---> [worker]
+ * [queue B] ---> [worker]
+ * [queue C] ---> [worker]
+ * [queue D] ---> [worker]
+ * [queue E] ---> [worker]
+ *
+ * Naturally, the single queue will enforce more concurrency on the epoll
+ * instance, and can therefore scale poorly compared to multiple queues.
+ * However, this is a benchmark raw data and must be taken with a grain of
+ * salt when choosing how to make use of sys_epoll.
+
+ * Each thread has a number of private, nonblocking file descriptors,
+ * referred to as fdmap. A writer thread will constantly be writing to
+ * the fdmaps of all threads, minimizing each threads's chances of
+ * epoll_wait not finding any ready read events and blocking as this
+ * is not what we want to stress. The size of the fdmap can be adjusted
+ * by the user; enlarging the value will increase the chances of
+ * epoll_wait(2) blocking as the lineal writer thread will take "longer",
+ * at least at a high level.
+ *
+ * Note that because fds are private to each thread, this workload does
+ * not stress scenarios where multiple tasks are awoken per ready IO; ie:
+ * EPOLLEXCLUSIVE semantics.
+ *
+ * The end result/metric is throughput: number of ops/second where an
+ * operation consists of:
+ *
+ * epoll_wait(2) + [others]
+ *
+ * ... where [others] is the cost of re-adding the fd (EPOLLET),
+ * or rearming it (EPOLLONESHOT).
+ *
+ *
+ * The purpose of this is program is that it be useful for measuring
+ * kernel related changes to the sys_epoll, and not comparing different
+ * IO polling methods, for example. Hence everything is very adhoc and
+ * outputs raw microbenchmark numbers. Also this uses eventfd, similar
+ * tools tend to use pipes or sockets, but the result is the same.
+ */
+
+/* For the CLR_() macros */
+#include <string.h>
+#include <pthread.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/epoll.h>
+#include <sys/eventfd.h>
+#include <sys/types.h>
+
+#include "../util/stat.h"
+#include <subcmd/parse-options.h>
+#include "bench.h"
+#include "cpumap.h"
+
+#include <err.h>
+
+#define printinfo(fmt, arg...) \
+ do { if (__verbose) { printf(fmt, ## arg); fflush(stdout); } } while (0)
+
+static unsigned int nthreads = 0;
+static unsigned int nsecs = 8;
+struct timeval start, end, runtime;
+static bool wdone, done, __verbose, randomize, nonblocking;
+
+/*
+ * epoll related shared variables.
+ */
+
+/* Maximum number of nesting allowed inside epoll sets */
+#define EPOLL_MAXNESTS 4
+
+static int epollfd;
+static int *epollfdp;
+static bool noaffinity;
+static unsigned int nested = 0;
+static bool et; /* edge-trigger */
+static bool oneshot;
+static bool multiq; /* use an epoll instance per thread */
+
+/* amount of fds to monitor, per thread */
+static unsigned int nfds = 64;
+
+static pthread_mutex_t thread_lock;
+static unsigned int threads_starting;
+static struct stats throughput_stats;
+static pthread_cond_t thread_parent, thread_worker;
+
+struct worker {
+ int tid;
+ int epollfd; /* for --multiq */
+ pthread_t thread;
+ unsigned long ops;
+ int *fdmap;
+};
+
+static const struct option options[] = {
+ /* general benchmark options */
+ OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
+ OPT_UINTEGER('f', "nfds", &nfds, "Specify amount of file descriptors to monitor for each thread"),
+ OPT_BOOLEAN( 'n', "noaffinity", &noaffinity, "Disables CPU affinity"),
+ OPT_BOOLEAN('R', "randomize", &randomize, "Enable random write behaviour (default is lineal)"),
+ OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
+
+ /* epoll specific options */
+ OPT_BOOLEAN( 'm', "multiq", &multiq, "Use multiple epoll instances (one per thread)"),
+ OPT_BOOLEAN( 'B', "nonblocking", &nonblocking, "Nonblocking epoll_wait(2) behaviour"),
+ OPT_UINTEGER( 'N', "nested", &nested, "Nesting level epoll hierarchy (default is 0, no nesting)"),
+ OPT_BOOLEAN( 'S', "oneshot", &oneshot, "Use EPOLLONESHOT semantics"),
+ OPT_BOOLEAN( 'E', "edge", &et, "Use Edge-triggered interface (default is LT)"),
+
+ OPT_END()
+};
+
+static const char * const bench_epoll_wait_usage[] = {
+ "perf bench epoll wait <options>",
+ NULL
+};
+
+
+/*
+ * Arrange the N elements of ARRAY in random order.
+ * Only effective if N is much smaller than RAND_MAX;
+ * if this may not be the case, use a better random
+ * number generator. -- Ben Pfaff.
+ */
+static void shuffle(void *array, size_t n, size_t size)
+{
+ char *carray = array;
+ void *aux;
+ size_t i;
+
+ if (n <= 1)
+ return;
+
+ aux = calloc(1, size);
+ if (!aux)
+ err(EXIT_FAILURE, "calloc");
+
+ for (i = 1; i < n; ++i) {
+ size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
+ j *= size;
+
+ memcpy(aux, &carray[j], size);
+ memcpy(&carray[j], &carray[i*size], size);
+ memcpy(&carray[i*size], aux, size);
+ }
+
+ free(aux);
+}
+
+
+static void *workerfn(void *arg)
+{
+ int fd, ret, r;
+ struct worker *w = (struct worker *) arg;
+ unsigned long ops = w->ops;
+ struct epoll_event ev;
+ uint64_t val;
+ int to = nonblocking? 0 : -1;
+ int efd = multiq ? w->epollfd : epollfd;
+
+ pthread_mutex_lock(&thread_lock);
+ threads_starting--;
+ if (!threads_starting)
+ pthread_cond_signal(&thread_parent);
+ pthread_cond_wait(&thread_worker, &thread_lock);
+ pthread_mutex_unlock(&thread_lock);
+
+ do {
+ /*
+ * Block undefinitely waiting for the IN event.
+ * In order to stress the epoll_wait(2) syscall,
+ * call it event per event, instead of a larger
+ * batch (max)limit.
+ */
+ do {
+ ret = epoll_wait(efd, &ev, 1, to);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0)
+ err(EXIT_FAILURE, "epoll_wait");
+
+ fd = ev.data.fd;
+
+ do {
+ r = read(fd, &val, sizeof(val));
+ } while (!done && (r < 0 && errno == EAGAIN));
+
+ if (et) {
+ ev.events = EPOLLIN | EPOLLET;
+ ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev);
+ }
+
+ if (oneshot) {
+ /* rearm the file descriptor with a new event mask */
+ ev.events |= EPOLLIN | EPOLLONESHOT;
+ ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev);
+ }
+
+ ops++;
+ } while (!done);
+
+ if (multiq)
+ close(w->epollfd);
+
+ w->ops = ops;
+ return NULL;
+}
+
+static void nest_epollfd(struct worker *w)
+{
+ unsigned int i;
+ struct epoll_event ev;
+ int efd = multiq ? w->epollfd : epollfd;
+
+ if (nested > EPOLL_MAXNESTS)
+ nested = EPOLL_MAXNESTS;
+
+ epollfdp = calloc(nested, sizeof(*epollfdp));
+ if (!epollfdp)
+ err(EXIT_FAILURE, "calloc");
+
+ for (i = 0; i < nested; i++) {
+ epollfdp[i] = epoll_create(1);
+ if (epollfdp[i] < 0)
+ err(EXIT_FAILURE, "epoll_create");
+ }
+
+ ev.events = EPOLLHUP; /* anything */
+ ev.data.u64 = i; /* any number */
+
+ for (i = nested - 1; i; i--) {
+ if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
+ epollfdp[i], &ev) < 0)
+ err(EXIT_FAILURE, "epoll_ctl");
+ }
+
+ if (epoll_ctl(efd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
+ err(EXIT_FAILURE, "epoll_ctl");
+}
+
+static void toggle_done(int sig __maybe_unused,
+ siginfo_t *info __maybe_unused,
+ void *uc __maybe_unused)
+{
+ /* inform all threads that we're done for the day */
+ done = true;
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &runtime);
+}
+
+static void print_summary(void)
+{
+ unsigned long avg = avg_stats(&throughput_stats);
+ double stddev = stddev_stats(&throughput_stats);
+
+ printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
+ avg, rel_stddev_stats(stddev, avg),
+ (int) runtime.tv_sec);
+}
+
+static int do_threads(struct worker *worker, struct cpu_map *cpu)
+{
+ pthread_attr_t thread_attr, *attrp = NULL;
+ cpu_set_t cpuset;
+ unsigned int i, j;
+ int ret, events = EPOLLIN;
+
+ if (oneshot)
+ events |= EPOLLONESHOT;
+ if (et)
+ events |= EPOLLET;
+
+ printinfo("starting worker/consumer %sthreads%s\n",
+ noaffinity ? "":"CPU affinity ",
+ nonblocking ? " (nonblocking)":"");
+ if (!noaffinity)
+ pthread_attr_init(&thread_attr);
+
+ for (i = 0; i < nthreads; i++) {
+ struct worker *w = &worker[i];
+
+ if (multiq) {
+ w->epollfd = epoll_create(1);
+ if (w->epollfd < 0)
+ err(EXIT_FAILURE, "epoll_create");
+
+ if (nested)
+ nest_epollfd(w);
+ }
+
+ w->tid = i;
+ w->fdmap = calloc(nfds, sizeof(int));
+ if (!w->fdmap)
+ return 1;
+
+ for (j = 0; j < nfds; j++) {
+ int efd = multiq ? w->epollfd : epollfd;
+ struct epoll_event ev;
+
+ w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
+ if (w->fdmap[j] < 0)
+ err(EXIT_FAILURE, "eventfd");
+
+ ev.data.fd = w->fdmap[j];
+ ev.events = events;
+
+ ret = epoll_ctl(efd, EPOLL_CTL_ADD,
+ w->fdmap[j], &ev);
+ if (ret < 0)
+ err(EXIT_FAILURE, "epoll_ctl");
+ }
+
+ if (!noaffinity) {
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+
+ ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+ attrp = &thread_attr;
+ }
+
+ ret = pthread_create(&w->thread, attrp, workerfn,
+ (void *)(struct worker *) w);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_create");
+ }
+
+ if (!noaffinity)
+ pthread_attr_destroy(&thread_attr);
+
+ return ret;
+}
+
+static void *writerfn(void *p)
+{
+ struct worker *worker = p;
+ size_t i, j, iter;
+ const uint64_t val = 1;
+ ssize_t sz;
+ struct timespec ts = { .tv_sec = 0,
+ .tv_nsec = 500 };
+
+ printinfo("starting writer-thread: doing %s writes ...\n",
+ randomize? "random":"lineal");
+
+ for (iter = 0; !wdone; iter++) {
+ if (randomize) {
+ shuffle((void *)worker, nthreads, sizeof(*worker));
+ }
+
+ for (i = 0; i < nthreads; i++) {
+ struct worker *w = &worker[i];
+
+ if (randomize) {
+ shuffle((void *)w->fdmap, nfds, sizeof(int));
+ }
+
+ for (j = 0; j < nfds; j++) {
+ do {
+ sz = write(w->fdmap[j], &val, sizeof(val));
+ } while (!wdone && (sz < 0 && errno == EAGAIN));
+ }
+ }
+
+ nanosleep(&ts, NULL);
+ }
+
+ printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
+ return NULL;
+}
+
+static int cmpworker(const void *p1, const void *p2)
+{
+
+ struct worker *w1 = (struct worker *) p1;
+ struct worker *w2 = (struct worker *) p2;
+ return w1->tid > w2->tid;
+}
+
+int bench_epoll_wait(int argc, const char **argv)
+{
+ int ret = 0;
+ struct sigaction act;
+ unsigned int i;
+ struct worker *worker = NULL;
+ struct cpu_map *cpu;
+ pthread_t wthread;
+ struct rlimit rl, prevrl;
+
+ argc = parse_options(argc, argv, options, bench_epoll_wait_usage, 0);
+ if (argc) {
+ usage_with_options(bench_epoll_wait_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ sigfillset(&act.sa_mask);
+ act.sa_sigaction = toggle_done;
+ sigaction(SIGINT, &act, NULL);
+
+ cpu = cpu_map__new(NULL);
+ if (!cpu)
+ goto errmem;
+
+ /* a single, main epoll instance */
+ if (!multiq) {
+ epollfd = epoll_create(1);
+ if (epollfd < 0)
+ err(EXIT_FAILURE, "epoll_create");
+
+ /*
+ * Deal with nested epolls, if any.
+ */
+ if (nested)
+ nest_epollfd(NULL);
+ }
+
+ printinfo("Using %s queue model\n", multiq ? "multi" : "single");
+ printinfo("Nesting level(s): %d\n", nested);
+
+ /* default to the number of CPUs and leave one for the writer pthread */
+ if (!nthreads)
+ nthreads = cpu->nr - 1;
+
+ worker = calloc(nthreads, sizeof(*worker));
+ if (!worker) {
+ goto errmem;
+ }
+
+ if (getrlimit(RLIMIT_NOFILE, &prevrl))
+ err(EXIT_FAILURE, "getrlimit");
+ rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
+ printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
+ (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
+ if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
+ err(EXIT_FAILURE, "setrlimit");
+
+ printf("Run summary [PID %d]: %d threads monitoring%s on "
+ "%d file-descriptors for %d secs.\n\n",
+ getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs);
+
+ init_stats(&throughput_stats);
+ pthread_mutex_init(&thread_lock, NULL);
+ pthread_cond_init(&thread_parent, NULL);
+ pthread_cond_init(&thread_worker, NULL);
+
+ threads_starting = nthreads;
+
+ gettimeofday(&start, NULL);
+
+ do_threads(worker, cpu);
+
+ pthread_mutex_lock(&thread_lock);
+ while (threads_starting)
+ pthread_cond_wait(&thread_parent, &thread_lock);
+ pthread_cond_broadcast(&thread_worker);
+ pthread_mutex_unlock(&thread_lock);
+
+ /*
+ * At this point the workers should be blocked waiting for read events
+ * to become ready. Launch the writer which will constantly be writing
+ * to each thread's fdmap.
+ */
+ ret = pthread_create(&wthread, NULL, writerfn,
+ (void *)(struct worker *) worker);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_create");
+
+ sleep(nsecs);
+ toggle_done(0, NULL, NULL);
+ printinfo("main thread: toggling done\n");
+
+ sleep(1); /* meh */
+ wdone = true;
+ ret = pthread_join(wthread, NULL);
+ if (ret)
+ err(EXIT_FAILURE, "pthread_join");
+
+ /* cleanup & report results */
+ pthread_cond_destroy(&thread_parent);
+ pthread_cond_destroy(&thread_worker);
+ pthread_mutex_destroy(&thread_lock);
+
+ /* sort the array back before reporting */
+ if (randomize)
+ qsort(worker, nthreads, sizeof(struct worker), cmpworker);
+
+ for (i = 0; i < nthreads; i++) {
+ unsigned long t = worker[i].ops/runtime.tv_sec;
+
+ update_stats(&throughput_stats, t);
+
+ if (nfds == 1)
+ printf("[thread %2d] fdmap: %p [ %04ld ops/sec ]\n",
+ worker[i].tid, &worker[i].fdmap[0], t);
+ else
+ printf("[thread %2d] fdmap: %p ... %p [ %04ld ops/sec ]\n",
+ worker[i].tid, &worker[i].fdmap[0],
+ &worker[i].fdmap[nfds-1], t);
+ }
+
+ print_summary();
+
+ close(epollfd);
+ return ret;
+errmem:
+ err(EXIT_FAILURE, "calloc");
+}
+#endif // HAVE_EVENTFD
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index db4853f209c7..31b53cc7d5bc 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -86,16 +86,4 @@ futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wak
return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
val, opflags);
}
-
-#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
-#include <pthread.h>
-#include <linux/compiler.h>
-static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr __maybe_unused,
- size_t cpusetsize __maybe_unused,
- cpu_set_t *cpuset __maybe_unused)
-{
- return 0;
-}
-#endif
-
#endif /* _FUTEX_H */
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index 17a6bcd01aa6..334c77ffc1d9 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -14,6 +14,7 @@
* mem ... memory access performance
* numa ... NUMA scheduling and MM performance
* futex ... Futex performance
+ * epoll ... Event poll performance
*/
#include "perf.h"
#include "util/util.h"
@@ -67,6 +68,15 @@ static struct bench futex_benchmarks[] = {
{ NULL, NULL, NULL }
};
+#ifdef HAVE_EVENTFD
+static struct bench epoll_benchmarks[] = {
+ { "wait", "Benchmark epoll concurrent epoll_waits", bench_epoll_wait },
+ { "ctl", "Benchmark epoll concurrent epoll_ctls", bench_epoll_ctl },
+ { "all", "Run all futex benchmarks", NULL },
+ { NULL, NULL, NULL }
+};
+#endif // HAVE_EVENTFD
+
struct collection {
const char *name;
const char *summary;
@@ -80,6 +90,9 @@ static struct collection collections[] = {
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
{"futex", "Futex stressing benchmarks", futex_benchmarks },
+#ifdef HAVE_EVENTFD
+ {"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
+#endif
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};
diff --git a/tools/perf/builtin-config.c b/tools/perf/builtin-config.c
index 514f70f95b57..d76f831f94c7 100644
--- a/tools/perf/builtin-config.c
+++ b/tools/perf/builtin-config.c
@@ -196,6 +196,7 @@ int cmd_config(int argc, const char **argv)
pr_err("Error: takes no arguments\n");
parse_options_usage(config_usage, config_options, "l", 1);
} else {
+do_action_list:
if (show_config(set) < 0) {
pr_err("Nothing configured, "
"please check your %s \n", config_filename);
@@ -204,10 +205,8 @@ int cmd_config(int argc, const char **argv)
}
break;
default:
- if (!argc) {
- usage_with_options(config_usage, config_options);
- break;
- }
+ if (!argc)
+ goto do_action_list;
for (i = 0; argv[i]; i++) {
char *var, *value;
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 1c41b4eaf73c..3d29d0524a89 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -189,7 +189,7 @@ static void add_man_viewer(const char *name)
while (*p)
p = &((*p)->next);
*p = zalloc(sizeof(**p) + len + 1);
- strncpy((*p)->name, name, len);
+ strcpy((*p)->name, name);
}
static int supported_man_viewer(const char *name, size_t len)
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 2b1ef704169f..3d4cbc4e87c7 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -1364,7 +1364,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
"show events other than"
" HLT (x86 only) or Wait state (s390 only)"
" that take longer than duration usecs"),
- OPT_UINTEGER(0, "proc-map-timeout", &kvm->opts.proc_map_timeout,
+ OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_END()
};
@@ -1394,7 +1394,6 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
kvm->opts.target.uses_mmap = false;
kvm->opts.target.uid_str = NULL;
kvm->opts.target.uid = UINT_MAX;
- kvm->opts.proc_map_timeout = 500;
symbol__init(NULL);
disable_buildid_cache();
@@ -1453,8 +1452,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
perf_session__set_id_hdr_size(kvm->session);
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
- kvm->evlist->threads, false,
- kvm->opts.proc_map_timeout, 1);
+ kvm->evlist->threads, false, 1);
err = kvm_live_open_events(kvm);
if (err)
goto out;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 488779bc4c8d..882285fb9f64 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -124,6 +124,210 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse
return 0;
}
+#ifdef HAVE_AIO_SUPPORT
+static int record__aio_write(struct aiocb *cblock, int trace_fd,
+ void *buf, size_t size, off_t off)
+{
+ int rc;
+
+ cblock->aio_fildes = trace_fd;
+ cblock->aio_buf = buf;
+ cblock->aio_nbytes = size;
+ cblock->aio_offset = off;
+ cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
+
+ do {
+ rc = aio_write(cblock);
+ if (rc == 0) {
+ break;
+ } else if (errno != EAGAIN) {
+ cblock->aio_fildes = -1;
+ pr_err("failed to queue perf data, error: %m\n");
+ break;
+ }
+ } while (1);
+
+ return rc;
+}
+
+static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
+{
+ void *rem_buf;
+ off_t rem_off;
+ size_t rem_size;
+ int rc, aio_errno;
+ ssize_t aio_ret, written;
+
+ aio_errno = aio_error(cblock);
+ if (aio_errno == EINPROGRESS)
+ return 0;
+
+ written = aio_ret = aio_return(cblock);
+ if (aio_ret < 0) {
+ if (aio_errno != EINTR)
+ pr_err("failed to write perf data, error: %m\n");
+ written = 0;
+ }
+
+ rem_size = cblock->aio_nbytes - written;
+
+ if (rem_size == 0) {
+ cblock->aio_fildes = -1;
+ /*
+ * md->refcount is incremented in perf_mmap__push() for
+ * every enqueued aio write request so decrement it because
+ * the request is now complete.
+ */
+ perf_mmap__put(md);
+ rc = 1;
+ } else {
+ /*
+ * aio write request may require restart with the
+ * reminder if the kernel didn't write whole
+ * chunk at once.
+ */
+ rem_off = cblock->aio_offset + written;
+ rem_buf = (void *)(cblock->aio_buf + written);
+ record__aio_write(cblock, cblock->aio_fildes,
+ rem_buf, rem_size, rem_off);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int record__aio_sync(struct perf_mmap *md, bool sync_all)
+{
+ struct aiocb **aiocb = md->aio.aiocb;
+ struct aiocb *cblocks = md->aio.cblocks;
+ struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
+ int i, do_suspend;
+
+ do {
+ do_suspend = 0;
+ for (i = 0; i < md->aio.nr_cblocks; ++i) {
+ if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
+ if (sync_all)
+ aiocb[i] = NULL;
+ else
+ return i;
+ } else {
+ /*
+ * Started aio write is not complete yet
+ * so it has to be waited before the
+ * next allocation.
+ */
+ aiocb[i] = &cblocks[i];
+ do_suspend = 1;
+ }
+ }
+ if (!do_suspend)
+ return -1;
+
+ while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
+ if (!(errno == EAGAIN || errno == EINTR))
+ pr_err("failed to sync perf data, error: %m\n");
+ }
+ } while (1);
+}
+
+static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
+{
+ struct record *rec = to;
+ int ret, trace_fd = rec->session->data->file.fd;
+
+ rec->samples++;
+
+ ret = record__aio_write(cblock, trace_fd, bf, size, off);
+ if (!ret) {
+ rec->bytes_written += size;
+ if (switch_output_size(rec))
+ trigger_hit(&switch_output_trigger);
+ }
+
+ return ret;
+}
+
+static off_t record__aio_get_pos(int trace_fd)
+{
+ return lseek(trace_fd, 0, SEEK_CUR);
+}
+
+static void record__aio_set_pos(int trace_fd, off_t pos)
+{
+ lseek(trace_fd, pos, SEEK_SET);
+}
+
+static void record__aio_mmap_read_sync(struct record *rec)
+{
+ int i;
+ struct perf_evlist *evlist = rec->evlist;
+ struct perf_mmap *maps = evlist->mmap;
+
+ if (!rec->opts.nr_cblocks)
+ return;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &maps[i];
+
+ if (map->base)
+ record__aio_sync(map, true);
+ }
+}
+
+static int nr_cblocks_default = 1;
+static int nr_cblocks_max = 4;
+
+static int record__aio_parse(const struct option *opt,
+ const char *str,
+ int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset) {
+ opts->nr_cblocks = 0;
+ } else {
+ if (str)
+ opts->nr_cblocks = strtol(str, NULL, 0);
+ if (!opts->nr_cblocks)
+ opts->nr_cblocks = nr_cblocks_default;
+ }
+
+ return 0;
+}
+#else /* HAVE_AIO_SUPPORT */
+static int nr_cblocks_max = 0;
+
+static int record__aio_sync(struct perf_mmap *md __maybe_unused, bool sync_all __maybe_unused)
+{
+ return -1;
+}
+
+static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
+ void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
+{
+ return -1;
+}
+
+static off_t record__aio_get_pos(int trace_fd __maybe_unused)
+{
+ return -1;
+}
+
+static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
+{
+}
+
+static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
+{
+}
+#endif
+
+static int record__aio_enabled(struct record *rec)
+{
+ return rec->opts.nr_cblocks > 0;
+}
+
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -329,7 +533,7 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode) < 0) {
+ opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -525,6 +729,8 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
int i;
int rc = 0;
struct perf_mmap *maps;
+ int trace_fd = rec->data.file.fd;
+ off_t off;
if (!evlist)
return 0;
@@ -536,13 +742,30 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
+ if (record__aio_enabled(rec))
+ off = record__aio_get_pos(trace_fd);
+
for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &maps[i];
if (map->base) {
- if (perf_mmap__push(map, rec, record__pushfn) != 0) {
- rc = -1;
- goto out;
+ if (!record__aio_enabled(rec)) {
+ if (perf_mmap__push(map, rec, record__pushfn) != 0) {
+ rc = -1;
+ goto out;
+ }
+ } else {
+ int idx;
+ /*
+ * Call record__aio_sync() to wait till map->data buffer
+ * becomes available after previous aio write request.
+ */
+ idx = record__aio_sync(map, false);
+ if (perf_mmap__aio_push(map, rec, idx, record__aio_pushfn, &off) != 0) {
+ record__aio_set_pos(trace_fd, off);
+ rc = -1;
+ goto out;
+ }
}
}
@@ -553,6 +776,9 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
}
}
+ if (record__aio_enabled(rec))
+ record__aio_set_pos(trace_fd, off);
+
/*
* Mark the round finished in case we wrote
* at least one event.
@@ -641,8 +867,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
process_synthesized_event,
&rec->session->machines.host,
- rec->opts.sample_address,
- rec->opts.proc_map_timeout);
+ rec->opts.sample_address);
thread_map__put(thread_map);
return err;
}
@@ -658,6 +883,8 @@ record__switch_output(struct record *rec, bool at_exit)
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
+ record__aio_mmap_read_sync(rec);
+
record__synthesize(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
@@ -857,7 +1084,7 @@ static int record__synthesize(struct record *rec, bool tail)
err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
process_synthesized_event, opts->sample_address,
- opts->proc_map_timeout, 1);
+ 1);
out:
return err;
}
@@ -1168,6 +1395,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__synthesize_workload(rec, true);
out_child:
+ record__aio_mmap_read_sync(rec);
+
if (forks) {
int exit_status;
@@ -1301,6 +1530,13 @@ static int perf_record_config(const char *var, const char *value, void *cb)
var = "call-graph.record-mode";
return perf_default_config(var, value, cb);
}
+#ifdef HAVE_AIO_SUPPORT
+ if (!strcmp(var, "record.aio")) {
+ rec->opts.nr_cblocks = strtol(value, NULL, 0);
+ if (!rec->opts.nr_cblocks)
+ rec->opts.nr_cblocks = nr_cblocks_default;
+ }
+#endif
return 0;
}
@@ -1546,7 +1782,6 @@ static struct record record = {
.uses_mmap = true,
.default_per_cpu = true,
},
- .proc_map_timeout = 500,
},
.tool = {
.sample = process_sample_event,
@@ -1676,7 +1911,7 @@ static struct option __record_options[] = {
parse_clockid),
OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
"opts", "AUX area tracing Snapshot Mode", ""),
- OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
+ OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
"Record namespaces events"),
@@ -1706,6 +1941,11 @@ static struct option __record_options[] = {
"signal"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
+#ifdef HAVE_AIO_SUPPORT
+ OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
+ &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
+ record__aio_parse),
+#endif
OPT_END()
};
@@ -1898,6 +2138,11 @@ int cmd_record(int argc, const char **argv)
goto out;
}
+ if (rec->opts.nr_cblocks > nr_cblocks_max)
+ rec->opts.nr_cblocks = nr_cblocks_max;
+ if (verbose > 0)
+ pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 257c9c18cb7e..4958095be4fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -85,6 +85,7 @@ struct report {
int socket_filter;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
struct branch_type_stat brtype_stat;
+ bool symbol_ipc;
};
static int report__config(const char *var, const char *value, void *cb)
@@ -129,7 +130,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
struct mem_info *mi;
struct branch_info *bi;
- if (!ui__has_annotation())
+ if (!ui__has_annotation() && !rep->symbol_ipc)
return 0;
hist__account_cycles(sample->branch_stack, al, sample,
@@ -174,7 +175,7 @@ static int hist_iter__branch_callback(struct hist_entry_iter *iter,
struct perf_evsel *evsel = iter->evsel;
int err;
- if (!ui__has_annotation())
+ if (!ui__has_annotation() && !rep->symbol_ipc)
return 0;
hist__account_cycles(sample->branch_stack, al, sample,
@@ -1133,6 +1134,7 @@ int cmd_report(int argc, const char **argv)
.mode = PERF_DATA_MODE_READ,
};
int ret = hists__init();
+ char sort_tmp[128];
if (ret < 0)
return ret;
@@ -1284,6 +1286,24 @@ repeat:
else
use_browser = 0;
+ if (sort_order && strstr(sort_order, "ipc")) {
+ parse_options_usage(report_usage, options, "s", 1);
+ goto error;
+ }
+
+ if (sort_order && strstr(sort_order, "symbol")) {
+ if (sort__mode == SORT_MODE__BRANCH) {
+ snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
+ sort_order, "ipc_lbr");
+ report.symbol_ipc = true;
+ } else {
+ snprintf(sort_tmp, sizeof(sort_tmp), "%s,%s",
+ sort_order, "ipc_null");
+ }
+
+ sort_order = sort_tmp;
+ }
+
if (setup_sorting(session->evlist) < 0) {
if (sort_order)
parse_options_usage(report_usage, options, "s", 1);
@@ -1311,7 +1331,7 @@ repeat:
* so don't allocate extra space that won't be used in the stdio
* implementation.
*/
- if (ui__has_annotation()) {
+ if (ui__has_annotation() || report.symbol_ipc) {
ret = symbol__annotation_init();
if (ret < 0)
goto error;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index b5bc85bd0bbe..3728b50e52e2 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -96,6 +96,7 @@ enum perf_output_field {
PERF_OUTPUT_UREGS = 1U << 27,
PERF_OUTPUT_METRIC = 1U << 28,
PERF_OUTPUT_MISC = 1U << 29,
+ PERF_OUTPUT_SRCCODE = 1U << 30,
};
struct output_option {
@@ -132,6 +133,7 @@ struct output_option {
{.str = "phys_addr", .field = PERF_OUTPUT_PHYS_ADDR},
{.str = "metric", .field = PERF_OUTPUT_METRIC},
{.str = "misc", .field = PERF_OUTPUT_MISC},
+ {.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
};
enum {
@@ -424,7 +426,7 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
pr_err("Display of DSO requested but no address to convert.\n");
return -EINVAL;
}
- if (PRINT_FIELD(SRCLINE) && !PRINT_FIELD(IP)) {
+ if ((PRINT_FIELD(SRCLINE) || PRINT_FIELD(SRCCODE)) && !PRINT_FIELD(IP)) {
pr_err("Display of source line number requested but sample IP is not\n"
"selected. Hence, no address to lookup the source line number.\n");
return -EINVAL;
@@ -566,44 +568,40 @@ out:
return 0;
}
-static int perf_sample__fprintf_iregs(struct perf_sample *sample,
- struct perf_event_attr *attr, FILE *fp)
+static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
+ FILE *fp
+)
{
- struct regs_dump *regs = &sample->intr_regs;
- uint64_t mask = attr->sample_regs_intr;
unsigned i = 0, r;
int printed = 0;
- if (!regs)
+ if (!regs || !regs->regs)
return 0;
+ printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
+
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
}
+ fprintf(fp, "\n");
+
return printed;
}
-static int perf_sample__fprintf_uregs(struct perf_sample *sample,
+static int perf_sample__fprintf_iregs(struct perf_sample *sample,
struct perf_event_attr *attr, FILE *fp)
{
- struct regs_dump *regs = &sample->user_regs;
- uint64_t mask = attr->sample_regs_user;
- unsigned i = 0, r;
- int printed = 0;
-
- if (!regs || !regs->regs)
- return 0;
-
- printed += fprintf(fp, " ABI:%" PRIu64 " ", regs->abi);
-
- for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
- u64 val = regs->regs[i++];
- printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
- }
+ return perf_sample__fprintf_regs(&sample->intr_regs,
+ attr->sample_regs_intr, fp);
+}
- return printed;
+static int perf_sample__fprintf_uregs(struct perf_sample *sample,
+ struct perf_event_attr *attr, FILE *fp)
+{
+ return perf_sample__fprintf_regs(&sample->user_regs,
+ attr->sample_regs_user, fp);
}
static int perf_sample__fprintf_start(struct perf_sample *sample,
@@ -728,8 +726,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
if (PRINT_FIELD(DSO)) {
memset(&alf, 0, sizeof(alf));
memset(&alt, 0, sizeof(alt));
- thread__find_map(thread, sample->cpumode, from, &alf);
- thread__find_map(thread, sample->cpumode, to, &alt);
+ thread__find_map_fb(thread, sample->cpumode, from, &alf);
+ thread__find_map_fb(thread, sample->cpumode, to, &alt);
}
printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -775,8 +773,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
from = br->entries[i].from;
to = br->entries[i].to;
- thread__find_symbol(thread, sample->cpumode, from, &alf);
- thread__find_symbol(thread, sample->cpumode, to, &alt);
+ thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
+ thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
if (PRINT_FIELD(DSO)) {
@@ -820,11 +818,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
from = br->entries[i].from;
to = br->entries[i].to;
- if (thread__find_map(thread, sample->cpumode, from, &alf) &&
+ if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
!alf.map->dso->adjust_symbols)
from = map__map_ip(alf.map, from);
- if (thread__find_map(thread, sample->cpumode, to, &alt) &&
+ if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
!alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to);
@@ -911,6 +909,22 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
return len;
}
+static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
+{
+ struct addr_location al;
+ int ret = 0;
+
+ memset(&al, 0, sizeof(al));
+ thread__find_map(thread, cpumode, addr, &al);
+ if (!al.map)
+ return 0;
+ ret = map__fprintf_srccode(al.map, al.addr, stdout,
+ &thread->srccode_state);
+ if (ret)
+ ret += printf("\n");
+ return ret;
+}
+
static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
struct perf_insn *x, u8 *inbuf, int len,
int insn, FILE *fp, int *total_cycles)
@@ -1002,6 +1016,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
x.cpumode, x.cpu, &lastsym, attr, fp);
printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
&x, buffer, len, 0, fp, &total_cycles);
+ if (PRINT_FIELD(SRCCODE))
+ printed += print_srccode(thread, x.cpumode, br->entries[nr - 1].from);
}
/* Print all blocks */
@@ -1031,12 +1047,16 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
if (ip == end) {
printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
&total_cycles);
+ if (PRINT_FIELD(SRCCODE))
+ printed += print_srccode(thread, x.cpumode, ip);
break;
} else {
printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
dump_insn(&x, ip, buffer + off, len - off, &ilen));
if (ilen == 0)
break;
+ if (PRINT_FIELD(SRCCODE))
+ print_srccode(thread, x.cpumode, ip);
insn++;
}
}
@@ -1067,6 +1087,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
dump_insn(&x, sample->ip, buffer, len, NULL));
+ if (PRINT_FIELD(SRCCODE))
+ print_srccode(thread, x.cpumode, sample->ip);
goto out;
}
for (off = 0; off <= end - start; off += ilen) {
@@ -1074,6 +1096,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
dump_insn(&x, start + off, buffer + off, len - off, &ilen));
if (ilen == 0)
break;
+ if (PRINT_FIELD(SRCCODE))
+ print_srccode(thread, x.cpumode, start + off);
}
out:
return printed;
@@ -1256,7 +1280,16 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp);
printed += perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
- return printed + fprintf(fp, "\n");
+ printed += fprintf(fp, "\n");
+ if (PRINT_FIELD(SRCCODE)) {
+ int ret = map__fprintf_srccode(al->map, al->addr, stdout,
+ &thread->srccode_state);
+ if (ret) {
+ printed += ret;
+ printed += printf("\n");
+ }
+ }
+ return printed;
}
static struct {
@@ -1796,6 +1829,12 @@ static void process_event(struct perf_script *script,
fprintf(fp, "%16" PRIx64, sample->phys_addr);
fprintf(fp, "\n");
+ if (PRINT_FIELD(SRCCODE)) {
+ if (map__fprintf_srccode(al->map, al->addr, stdout,
+ &thread->srccode_state))
+ printf("\n");
+ }
+
if (PRINT_FIELD(METRIC))
perf_sample__fprint_metric(script, thread, evsel, sample, fp);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index a635abfa77b6..1410d66192f7 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -709,7 +709,7 @@ static int parse_metric_groups(const struct option *opt,
return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
}
-static const struct option stat_options[] = {
+static struct option stat_options[] = {
OPT_BOOLEAN('T', "transaction", &transaction_run,
"hardware transaction statistics"),
OPT_CALLBACK('e', "event", &evsel_list, "event",
@@ -1599,6 +1599,12 @@ int cmd_stat(int argc, const char **argv)
return -ENOMEM;
parse_events__shrink_config_terms();
+
+ /* String-parsing callback-based options would segfault when negated */
+ set_option_flag(stat_options, 'e', "event", PARSE_OPT_NONEG);
+ set_option_flag(stat_options, 'M', "metrics", PARSE_OPT_NONEG);
+ set_option_flag(stat_options, 'G', "cgroup", PARSE_OPT_NONEG);
+
argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
(const char **) stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index a827919c6263..775b99833e51 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -43,6 +43,10 @@
#include "util/data.h"
#include "util/debug.h"
+#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
+FILE *open_memstream(char **ptr, size_t *sizeloc);
+#endif
+
#define SUPPORT_OLD_POWER_EVENTS 1
#define PWR_EVENT_EXIT -1
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index aa0c73e57924..fe3ecfb2e64b 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -46,6 +46,7 @@
#include "arch/common.h"
#include "util/debug.h"
+#include "util/ordered-events.h"
#include <assert.h>
#include <elf.h>
@@ -272,8 +273,6 @@ static void perf_top__print_sym_table(struct perf_top *top)
perf_top__header_snprintf(top, bf, sizeof(bf));
printf("%s\n", bf);
- perf_top__reset_sample_counters(top);
-
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (!top->record_opts.overwrite &&
@@ -553,8 +552,6 @@ static void perf_top__sort_new_samples(void *arg)
struct perf_evsel *evsel = t->sym_evsel;
struct hists *hists;
- perf_top__reset_sample_counters(t);
-
if (t->evlist->selected != NULL)
t->sym_evsel = t->evlist->selected;
@@ -571,6 +568,15 @@ static void perf_top__sort_new_samples(void *arg)
hists__collapse_resort(hists, NULL);
perf_evsel__output_resort(evsel, NULL);
+
+ if (t->lost || t->drop)
+ pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
+}
+
+static void stop_top(void)
+{
+ session_done = 1;
+ done = 1;
}
static void *display_thread_tui(void *arg)
@@ -595,7 +601,7 @@ static void *display_thread_tui(void *arg)
/*
* Initialize the uid_filter_str, in the future the TUI will allow
- * Zooming in/out UIDs. For now juse use whatever the user passed
+ * Zooming in/out UIDs. For now just use whatever the user passed
* via --uid.
*/
evlist__for_each_entry(top->evlist, pos) {
@@ -609,13 +615,13 @@ static void *display_thread_tui(void *arg)
!top->record_opts.overwrite,
&top->annotation_opts);
- done = 1;
+ stop_top();
return NULL;
}
static void display_sig(int sig __maybe_unused)
{
- done = 1;
+ stop_top();
}
static void display_setup_sig(void)
@@ -668,7 +674,7 @@ repeat:
if (perf_top__handle_keypress(top, c))
goto repeat;
- done = 1;
+ stop_top();
}
}
@@ -800,78 +806,61 @@ static void perf_event__process_sample(struct perf_tool *tool,
addr_location__put(&al);
}
+static void
+perf_top__process_lost(struct perf_top *top, union perf_event *event,
+ struct perf_evsel *evsel)
+{
+ struct hists *hists = evsel__hists(evsel);
+
+ top->lost += event->lost.lost;
+ top->lost_total += event->lost.lost;
+ hists->stats.total_lost += event->lost.lost;
+}
+
+static void
+perf_top__process_lost_samples(struct perf_top *top,
+ union perf_event *event,
+ struct perf_evsel *evsel)
+{
+ struct hists *hists = evsel__hists(evsel);
+
+ top->lost += event->lost_samples.lost;
+ top->lost_total += event->lost_samples.lost;
+ hists->stats.total_lost_samples += event->lost_samples.lost;
+}
+
+static u64 last_timestamp;
+
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
{
struct record_opts *opts = &top->record_opts;
struct perf_evlist *evlist = top->evlist;
- struct perf_sample sample;
- struct perf_evsel *evsel;
struct perf_mmap *md;
- struct perf_session *session = top->session;
union perf_event *event;
- struct machine *machine;
- int ret;
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
if (perf_mmap__read_init(md) < 0)
return;
while ((event = perf_mmap__read_event(md)) != NULL) {
- ret = perf_evlist__parse_sample(evlist, event, &sample);
- if (ret) {
- pr_err("Can't parse sample, err = %d\n", ret);
- goto next_event;
- }
+ int ret;
- evsel = perf_evlist__id2evsel(session->evlist, sample.id);
- assert(evsel != NULL);
-
- if (event->header.type == PERF_RECORD_SAMPLE)
- ++top->samples;
-
- switch (sample.cpumode) {
- case PERF_RECORD_MISC_USER:
- ++top->us_samples;
- if (top->hide_user_symbols)
- goto next_event;
- machine = &session->machines.host;
- break;
- case PERF_RECORD_MISC_KERNEL:
- ++top->kernel_samples;
- if (top->hide_kernel_symbols)
- goto next_event;
- machine = &session->machines.host;
- break;
- case PERF_RECORD_MISC_GUEST_KERNEL:
- ++top->guest_kernel_samples;
- machine = perf_session__find_machine(session,
- sample.pid);
- break;
- case PERF_RECORD_MISC_GUEST_USER:
- ++top->guest_us_samples;
- /*
- * TODO: we don't process guest user from host side
- * except simple counting.
- */
- goto next_event;
- default:
- if (event->header.type == PERF_RECORD_SAMPLE)
- goto next_event;
- machine = &session->machines.host;
+ ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
+ if (ret && ret != -1)
break;
- }
+ ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
+ if (ret)
+ break;
- if (event->header.type == PERF_RECORD_SAMPLE) {
- perf_event__process_sample(&top->tool, event, evsel,
- &sample, machine);
- } else if (event->header.type < PERF_RECORD_MAX) {
- hists__inc_nr_events(evsel__hists(evsel), event->header.type);
- machine__process_event(machine, event, &sample);
- } else
- ++session->evlist->stats.nr_unknown_events;
-next_event:
perf_mmap__consume(md);
+
+ if (top->qe.rotate) {
+ pthread_mutex_lock(&top->qe.mutex);
+ top->qe.rotate = false;
+ pthread_cond_signal(&top->qe.cond);
+ pthread_mutex_unlock(&top->qe.mutex);
+ }
}
perf_mmap__read_done(md);
@@ -881,10 +870,8 @@ static void perf_top__mmap_read(struct perf_top *top)
{
bool overwrite = top->record_opts.overwrite;
struct perf_evlist *evlist = top->evlist;
- unsigned long long start, end;
int i;
- start = rdclock();
if (overwrite)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
@@ -895,13 +882,6 @@ static void perf_top__mmap_read(struct perf_top *top)
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
- end = rdclock();
-
- if ((end - start) > (unsigned long long)top->delay_secs * NSEC_PER_SEC)
- ui__warning("Too slow to read ring buffer.\n"
- "Please try increasing the period (-c) or\n"
- "decreasing the freq (-F) or\n"
- "limiting the number of CPUs (-C)\n");
}
/*
@@ -1063,6 +1043,150 @@ static int callchain_param__setup_sample_type(struct callchain_param *callchain)
return 0;
}
+static struct ordered_events *rotate_queues(struct perf_top *top)
+{
+ struct ordered_events *in = top->qe.in;
+
+ if (top->qe.in == &top->qe.data[1])
+ top->qe.in = &top->qe.data[0];
+ else
+ top->qe.in = &top->qe.data[1];
+
+ return in;
+}
+
+static void *process_thread(void *arg)
+{
+ struct perf_top *top = arg;
+
+ while (!done) {
+ struct ordered_events *out, *in = top->qe.in;
+
+ if (!in->nr_events) {
+ usleep(100);
+ continue;
+ }
+
+ out = rotate_queues(top);
+
+ pthread_mutex_lock(&top->qe.mutex);
+ top->qe.rotate = true;
+ pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
+ pthread_mutex_unlock(&top->qe.mutex);
+
+ if (ordered_events__flush(out, OE_FLUSH__TOP))
+ pr_err("failed to process events\n");
+ }
+
+ return NULL;
+}
+
+/*
+ * Allow only 'top->delay_secs' seconds behind samples.
+ */
+static int should_drop(struct ordered_event *qevent, struct perf_top *top)
+{
+ union perf_event *event = qevent->event;
+ u64 delay_timestamp;
+
+ if (event->header.type != PERF_RECORD_SAMPLE)
+ return false;
+
+ delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
+ return delay_timestamp < last_timestamp;
+}
+
+static int deliver_event(struct ordered_events *qe,
+ struct ordered_event *qevent)
+{
+ struct perf_top *top = qe->data;
+ struct perf_evlist *evlist = top->evlist;
+ struct perf_session *session = top->session;
+ union perf_event *event = qevent->event;
+ struct perf_sample sample;
+ struct perf_evsel *evsel;
+ struct machine *machine;
+ int ret = -1;
+
+ if (should_drop(qevent, top)) {
+ top->drop++;
+ top->drop_total++;
+ return 0;
+ }
+
+ ret = perf_evlist__parse_sample(evlist, event, &sample);
+ if (ret) {
+ pr_err("Can't parse sample, err = %d\n", ret);
+ goto next_event;
+ }
+
+ evsel = perf_evlist__id2evsel(session->evlist, sample.id);
+ assert(evsel != NULL);
+
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ ++top->samples;
+
+ switch (sample.cpumode) {
+ case PERF_RECORD_MISC_USER:
+ ++top->us_samples;
+ if (top->hide_user_symbols)
+ goto next_event;
+ machine = &session->machines.host;
+ break;
+ case PERF_RECORD_MISC_KERNEL:
+ ++top->kernel_samples;
+ if (top->hide_kernel_symbols)
+ goto next_event;
+ machine = &session->machines.host;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ ++top->guest_kernel_samples;
+ machine = perf_session__find_machine(session,
+ sample.pid);
+ break;
+ case PERF_RECORD_MISC_GUEST_USER:
+ ++top->guest_us_samples;
+ /*
+ * TODO: we don't process guest user from host side
+ * except simple counting.
+ */
+ goto next_event;
+ default:
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ goto next_event;
+ machine = &session->machines.host;
+ break;
+ }
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ perf_event__process_sample(&top->tool, event, evsel,
+ &sample, machine);
+ } else if (event->header.type == PERF_RECORD_LOST) {
+ perf_top__process_lost(top, event, evsel);
+ } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
+ perf_top__process_lost_samples(top, event, evsel);
+ } else if (event->header.type < PERF_RECORD_MAX) {
+ hists__inc_nr_events(evsel__hists(evsel), event->header.type);
+ machine__process_event(machine, event, &sample);
+ } else
+ ++session->evlist->stats.nr_unknown_events;
+
+ ret = 0;
+next_event:
+ return ret;
+}
+
+static void init_process_thread(struct perf_top *top)
+{
+ ordered_events__init(&top->qe.data[0], deliver_event, top);
+ ordered_events__init(&top->qe.data[1], deliver_event, top);
+ ordered_events__set_copy_on_queue(&top->qe.data[0], true);
+ ordered_events__set_copy_on_queue(&top->qe.data[1], true);
+ top->qe.in = &top->qe.data[0];
+ pthread_mutex_init(&top->qe.mutex, NULL);
+ pthread_cond_init(&top->qe.cond, NULL);
+}
+
static int __cmd_top(struct perf_top *top)
{
char msg[512];
@@ -1070,7 +1194,7 @@ static int __cmd_top(struct perf_top *top)
struct perf_evsel_config_term *err_term;
struct perf_evlist *evlist = top->evlist;
struct record_opts *opts = &top->record_opts;
- pthread_t thread;
+ pthread_t thread, thread_process;
int ret;
top->session = perf_session__new(NULL, false, NULL);
@@ -1094,9 +1218,10 @@ static int __cmd_top(struct perf_top *top)
if (top->nr_threads_synthesize > 1)
perf_set_multithreaded();
+ init_process_thread(top);
+
machine__synthesize_threads(&top->session->machines.host, &opts->target,
top->evlist->threads, false,
- opts->proc_map_timeout,
top->nr_threads_synthesize);
if (top->nr_threads_synthesize > 1)
@@ -1135,10 +1260,15 @@ static int __cmd_top(struct perf_top *top)
perf_evlist__enable(top->evlist);
ret = -1;
+ if (pthread_create(&thread_process, NULL, process_thread, top)) {
+ ui__error("Could not create process thread.\n");
+ goto out_delete;
+ }
+
if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
display_thread), top)) {
ui__error("Could not create display thread.\n");
- goto out_delete;
+ goto out_join_thread;
}
if (top->realtime_prio) {
@@ -1173,6 +1303,9 @@ static int __cmd_top(struct perf_top *top)
ret = 0;
out_join:
pthread_join(thread, NULL);
+out_join_thread:
+ pthread_cond_signal(&top->qe.cond);
+ pthread_join(thread_process, NULL);
out_delete:
perf_session__delete(top->session);
top->session = NULL;
@@ -1256,7 +1389,6 @@ int cmd_top(int argc, const char **argv)
.target = {
.uses_mmap = true,
},
- .proc_map_timeout = 500,
/*
* FIXME: This will lose PERF_RECORD_MMAP and other metadata
* when we pause, fix that and reenable. Probably using a
@@ -1265,6 +1397,7 @@ int cmd_top(int argc, const char **argv)
* stays in overwrite mode. -acme
* */
.overwrite = 0,
+ .sample_time = true,
},
.max_stack = sysctl__max_stack(),
.annotation_opts = annotation__default_options,
@@ -1289,6 +1422,8 @@ int cmd_top(int argc, const char **argv)
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
"don't load vmlinux even if found"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
@@ -1367,7 +1502,7 @@ int cmd_top(int argc, const char **argv)
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
"width[,width...]",
"don't try to adjust column width, use these fixed values"),
- OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
+ OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
"branch any", "sample any taken branches",
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 835619476370..ebde59e61133 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -18,9 +18,11 @@
#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
+#include <bpf/bpf.h>
#include "builtin.h"
#include "util/cgroup.h"
#include "util/color.h"
+#include "util/config.h"
#include "util/debug.h"
#include "util/env.h"
#include "util/event.h"
@@ -75,6 +77,7 @@ struct trace {
struct {
int max;
struct syscall *table;
+ struct bpf_map *map;
struct {
struct perf_evsel *sys_enter,
*sys_exit,
@@ -99,6 +102,7 @@ struct trace {
struct {
size_t nr;
pid_t *entries;
+ struct bpf_map *map;
} filter_pids;
double duration_filter;
double runtime_ms;
@@ -108,6 +112,7 @@ struct trace {
} stats;
unsigned int max_stack;
unsigned int min_stack;
+ bool sort_events;
bool raw_augmented_syscalls;
bool not_ev_qualifier;
bool live;
@@ -122,9 +127,19 @@ struct trace {
bool show_tool_stats;
bool trace_syscalls;
bool kernel_syscallchains;
+ s16 args_alignment;
+ bool show_tstamp;
+ bool show_duration;
+ bool show_zeros;
+ bool show_arg_names;
+ bool show_string_prefix;
bool force;
bool vfs_getname;
int trace_pgfaults;
+ struct {
+ struct ordered_events data;
+ u64 last;
+ } oe;
};
struct tp_field {
@@ -256,7 +271,8 @@ static int perf_evsel__init_syscall_tp(struct perf_evsel *evsel)
struct syscall_tp *sc = evsel->priv = malloc(sizeof(struct syscall_tp));
if (evsel->priv != NULL) {
- if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr"))
+ if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
+ perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
goto out_delete;
return 0;
}
@@ -345,21 +361,25 @@ out_delete:
({ struct syscall_tp *fields = evsel->priv; \
fields->name.pointer(&fields->name, sample); })
-size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, int val)
+size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
{
int idx = val - sa->offset;
- if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL)
- return scnprintf(bf, size, intfmt, val);
+ if (idx < 0 || idx >= sa->nr_entries || sa->entries[idx] == NULL) {
+ size_t printed = scnprintf(bf, size, intfmt, val);
+ if (show_prefix)
+ printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sa->prefix);
+ return printed;
+ }
- return scnprintf(bf, size, "%s", sa->entries[idx]);
+ return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
}
static size_t __syscall_arg__scnprintf_strarray(char *bf, size_t size,
const char *intfmt,
struct syscall_arg *arg)
{
- return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->val);
+ return strarray__scnprintf(arg->parm, bf, size, intfmt, arg->show_string_prefix, arg->val);
}
static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
@@ -370,34 +390,32 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
-struct strarrays {
- int nr_entries;
- struct strarray **entries;
-};
-
-#define DEFINE_STRARRAYS(array) struct strarrays strarrays__##array = { \
- .nr_entries = ARRAY_SIZE(array), \
- .entries = array, \
-}
-
-size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
- struct syscall_arg *arg)
+size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val)
{
- struct strarrays *sas = arg->parm;
+ size_t printed;
int i;
for (i = 0; i < sas->nr_entries; ++i) {
struct strarray *sa = sas->entries[i];
- int idx = arg->val - sa->offset;
+ int idx = val - sa->offset;
if (idx >= 0 && idx < sa->nr_entries) {
if (sa->entries[idx] == NULL)
break;
- return scnprintf(bf, size, "%s", sa->entries[idx]);
+ return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", sa->entries[idx]);
}
}
- return scnprintf(bf, size, "%d", arg->val);
+ printed = scnprintf(bf, size, intfmt, val);
+ if (show_prefix)
+ printed += scnprintf(bf + printed, size - printed, " /* %s??? */", sas->entries[0]->prefix);
+ return printed;
+}
+
+size_t syscall_arg__scnprintf_strarrays(char *bf, size_t size,
+ struct syscall_arg *arg)
+{
+ return strarrays__scnprintf(arg->parm, bf, size, "%d", arg->show_string_prefix, arg->val);
}
#ifndef AT_FDCWD
@@ -408,9 +426,10 @@ static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
struct syscall_arg *arg)
{
int fd = arg->val;
+ const char *prefix = "AT_FD";
if (fd == AT_FDCWD)
- return scnprintf(bf, size, "CWD");
+ return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "CWD");
return syscall_arg__scnprintf_fd(bf, size, arg);
}
@@ -427,6 +446,13 @@ size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg
return scnprintf(bf, size, "%#lx", arg->val);
}
+size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg)
+{
+ if (arg->val == 0)
+ return scnprintf(bf, size, "NULL");
+ return syscall_arg__scnprintf_hex(bf, size, arg);
+}
+
size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg)
{
return scnprintf(bf, size, "%d", arg->val);
@@ -441,13 +467,13 @@ static const char *bpf_cmd[] = {
"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
"MAP_GET_NEXT_KEY", "PROG_LOAD",
};
-static DEFINE_STRARRAY(bpf_cmd);
+static DEFINE_STRARRAY(bpf_cmd, "BPF_");
static const char *epoll_ctl_ops[] = { "ADD", "DEL", "MOD", };
-static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, 1);
+static DEFINE_STRARRAY_OFFSET(epoll_ctl_ops, "EPOLL_CTL_", 1);
static const char *itimers[] = { "REAL", "VIRTUAL", "PROF", };
-static DEFINE_STRARRAY(itimers);
+static DEFINE_STRARRAY(itimers, "ITIMER_");
static const char *keyctl_options[] = {
"GET_KEYRING_ID", "JOIN_SESSION_KEYRING", "UPDATE", "REVOKE", "CHOWN",
@@ -456,7 +482,7 @@ static const char *keyctl_options[] = {
"ASSUME_AUTHORITY", "GET_SECURITY", "SESSION_TO_PARENT", "REJECT",
"INSTANTIATE_IOV", "INVALIDATE", "GET_PERSISTENT",
};
-static DEFINE_STRARRAY(keyctl_options);
+static DEFINE_STRARRAY(keyctl_options, "KEYCTL_");
static const char *whences[] = { "SET", "CUR", "END",
#ifdef SEEK_DATA
@@ -466,7 +492,7 @@ static const char *whences[] = { "SET", "CUR", "END",
"HOLE",
#endif
};
-static DEFINE_STRARRAY(whences);
+static DEFINE_STRARRAY(whences, "SEEK_");
static const char *fcntl_cmds[] = {
"DUPFD", "GETFD", "SETFD", "GETFL", "SETFL", "GETLK", "SETLK",
@@ -474,7 +500,7 @@ static const char *fcntl_cmds[] = {
"SETLK64", "SETLKW64", "SETOWN_EX", "GETOWN_EX",
"GETOWNER_UIDS",
};
-static DEFINE_STRARRAY(fcntl_cmds);
+static DEFINE_STRARRAY(fcntl_cmds, "F_");
static const char *fcntl_linux_specific_cmds[] = {
"SETLEASE", "GETLEASE", "NOTIFY", [5] = "CANCELLK", "DUPFD_CLOEXEC",
@@ -482,7 +508,7 @@ static const char *fcntl_linux_specific_cmds[] = {
"GET_RW_HINT", "SET_RW_HINT", "GET_FILE_RW_HINT", "SET_FILE_RW_HINT",
};
-static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, F_LINUX_SPECIFIC_BASE);
+static DEFINE_STRARRAY_OFFSET(fcntl_linux_specific_cmds, "F_", F_LINUX_SPECIFIC_BASE);
static struct strarray *fcntl_cmds_arrays[] = {
&strarray__fcntl_cmds,
@@ -496,29 +522,31 @@ static const char *rlimit_resources[] = {
"MEMLOCK", "AS", "LOCKS", "SIGPENDING", "MSGQUEUE", "NICE", "RTPRIO",
"RTTIME",
};
-static DEFINE_STRARRAY(rlimit_resources);
+static DEFINE_STRARRAY(rlimit_resources, "RLIMIT_");
static const char *sighow[] = { "BLOCK", "UNBLOCK", "SETMASK", };
-static DEFINE_STRARRAY(sighow);
+static DEFINE_STRARRAY(sighow, "SIG_");
static const char *clockid[] = {
"REALTIME", "MONOTONIC", "PROCESS_CPUTIME_ID", "THREAD_CPUTIME_ID",
"MONOTONIC_RAW", "REALTIME_COARSE", "MONOTONIC_COARSE", "BOOTTIME",
"REALTIME_ALARM", "BOOTTIME_ALARM", "SGI_CYCLE", "TAI"
};
-static DEFINE_STRARRAY(clockid);
+static DEFINE_STRARRAY(clockid, "CLOCK_");
static size_t syscall_arg__scnprintf_access_mode(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *suffix = "_OK";
size_t printed = 0;
int mode = arg->val;
if (mode == F_OK) /* 0 */
- return scnprintf(bf, size, "F");
+ return scnprintf(bf, size, "F%s", show_prefix ? suffix : "");
#define P_MODE(n) \
if (mode & n##_OK) { \
- printed += scnprintf(bf + printed, size - printed, "%s", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", #n, show_prefix ? suffix : ""); \
mode &= ~n##_OK; \
}
@@ -543,11 +571,13 @@ static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "O_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & O_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~O_##n; \
}
@@ -573,11 +603,13 @@ static size_t syscall_arg__scnprintf_pipe_flags(char *bf, size_t size,
static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "GRND_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & GRND_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~GRND_##n; \
}
@@ -632,12 +664,15 @@ static struct syscall_fmt {
} syscall_fmts[] = {
{ .name = "access",
.arg = { [1] = { .scnprintf = SCA_ACCMODE, /* mode */ }, }, },
+ { .name = "arch_prctl",
+ .arg = { [0] = { .scnprintf = SCA_X86_ARCH_PRCTL_CODE, /* code */ },
+ [1] = { .scnprintf = SCA_PTR, /* arg2 */ }, }, },
{ .name = "bind",
.arg = { [1] = { .scnprintf = SCA_SOCKADDR, /* umyaddr */ }, }, },
{ .name = "bpf",
.arg = { [0] = STRARRAY(cmd, bpf_cmd), }, },
{ .name = "brk", .hexret = true,
- .arg = { [0] = { .scnprintf = SCA_HEX, /* brk */ }, }, },
+ .arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
{ .name = "clock_gettime",
.arg = { [0] = STRARRAY(clk_id, clockid), }, },
{ .name = "clone", .errpid = true, .nr_args = 5,
@@ -715,18 +750,14 @@ static struct syscall_fmt {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
{ .name = "mknodat",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* fd */ }, }, },
- { .name = "mlock",
- .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
- { .name = "mlockall",
- .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
{ .name = "mmap", .hexret = true,
/* The standard mmap maps to old_mmap on s390x */
#if defined(__s390x__)
.alias = "old_mmap",
#endif
- .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ },
- [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
- [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ }, }, },
+ .arg = { [2] = { .scnprintf = SCA_MMAP_PROT, /* prot */ },
+ [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ },
+ [5] = { .scnprintf = SCA_HEX, /* offset */ }, }, },
{ .name = "mount",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
[3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
@@ -737,13 +768,7 @@ static struct syscall_fmt {
{ .name = "mq_unlink",
.arg = { [0] = { .scnprintf = SCA_FILENAME, /* u_name */ }, }, },
{ .name = "mremap", .hexret = true,
- .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ },
- [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ },
- [4] = { .scnprintf = SCA_HEX, /* new_addr */ }, }, },
- { .name = "munlock",
- .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
- { .name = "munmap",
- .arg = { [0] = { .scnprintf = SCA_HEX, /* addr */ }, }, },
+ .arg = { [3] = { .scnprintf = SCA_MREMAP_FLAGS, /* flags */ }, }, },
{ .name = "name_to_handle_at",
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
{ .name = "newfstatat",
@@ -772,7 +797,7 @@ static struct syscall_fmt {
[3] = { .scnprintf = SCA_INT, /* pkey */ }, }, },
{ .name = "poll", .timeout = true, },
{ .name = "ppoll", .timeout = true, },
- { .name = "prctl", .alias = "arch_prctl",
+ { .name = "prctl",
.arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
[1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
[2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
@@ -790,7 +815,12 @@ static struct syscall_fmt {
{ .name = "recvmsg",
.arg = { [2] = { .scnprintf = SCA_MSG_FLAGS, /* flags */ }, }, },
{ .name = "renameat",
- .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
+ .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
+ [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ }, }, },
+ { .name = "renameat2",
+ .arg = { [0] = { .scnprintf = SCA_FDAT, /* olddirfd */ },
+ [2] = { .scnprintf = SCA_FDAT, /* newdirfd */ },
+ [4] = { .scnprintf = SCA_RENAMEAT2_FLAGS, /* flags */ }, }, },
{ .name = "rt_sigaction",
.arg = { [0] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
{ .name = "rt_sigprocmask",
@@ -883,7 +913,7 @@ static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
* args_size: sum of the sizes of the syscall arguments, anything after that is augmented stuff: pathname for openat, etc.
*/
struct syscall {
- struct tep_event_format *tp_format;
+ struct tep_event *tp_format;
int nr_args;
int args_size;
bool is_exit;
@@ -894,6 +924,10 @@ struct syscall {
struct syscall_arg_fmt *arg_fmt;
};
+struct bpf_map_syscall_entry {
+ bool enabled;
+};
+
/*
* We need to have this 'calculated' boolean because in some cases we really
* don't know what is the duration of a syscall, for instance, when we start
@@ -1125,7 +1159,7 @@ static size_t syscall_arg__scnprintf_augmented_string(struct syscall_arg *arg, c
{
struct augmented_arg *augmented_arg = arg->augmented.args;
- return scnprintf(bf, size, "%.*s", augmented_arg->size, augmented_arg->value);
+ return scnprintf(bf, size, "\"%.*s\"", augmented_arg->size, augmented_arg->value);
}
static size_t syscall_arg__scnprintf_filename(char *bf, size_t size,
@@ -1194,8 +1228,12 @@ static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread
static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread,
u64 duration, bool duration_calculated, u64 tstamp, FILE *fp)
{
- size_t printed = trace__fprintf_tstamp(trace, tstamp, fp);
- printed += fprintf_duration(duration, duration_calculated, fp);
+ size_t printed = 0;
+
+ if (trace->show_tstamp)
+ printed = trace__fprintf_tstamp(trace, tstamp, fp);
+ if (trace->show_duration)
+ printed += fprintf_duration(duration, duration_calculated, fp);
return printed + trace__fprintf_comm_tid(trace, thread, fp);
}
@@ -1262,7 +1300,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
evlist->threads, trace__tool_process, false,
- trace->opts.proc_map_timeout, 1);
+ 1);
out:
if (err)
symbol__exit();
@@ -1314,8 +1352,8 @@ static int syscall__set_arg_fmts(struct syscall *sc)
strcmp(field->name, "path") == 0 ||
strcmp(field->name, "pathname") == 0))
sc->arg_fmt[idx].scnprintf = SCA_FILENAME;
- else if (field->flags & TEP_FIELD_IS_POINTER)
- sc->arg_fmt[idx].scnprintf = syscall_arg__scnprintf_hex;
+ else if ((field->flags & TEP_FIELD_IS_POINTER) || strstr(field->name, "addr"))
+ sc->arg_fmt[idx].scnprintf = SCA_PTR;
else if (strcmp(field->type, "pid_t") == 0)
sc->arg_fmt[idx].scnprintf = SCA_PID;
else if (strcmp(field->type, "umode_t") == 0)
@@ -1548,6 +1586,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
.mask = 0,
.trace = trace,
.thread = thread,
+ .show_string_prefix = trace->show_string_prefix,
};
struct thread_trace *ttrace = thread__priv(thread);
@@ -1579,6 +1618,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
* strarray for it.
*/
if (val == 0 &&
+ !trace->show_zeros &&
!(sc->arg_fmt &&
(sc->arg_fmt[arg.idx].show_zero ||
sc->arg_fmt[arg.idx].scnprintf == SCA_STRARRAY ||
@@ -1586,8 +1626,11 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
sc->arg_fmt[arg.idx].parm))
continue;
- printed += scnprintf(bf + printed, size - printed,
- "%s%s: ", printed ? ", " : "", field->name);
+ printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
+
+ if (trace->show_arg_names)
+ printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
+
printed += syscall__scnprintf_val(sc, bf + printed, size - printed, &arg, val);
}
} else if (IS_ERR(sc->tp_format)) {
@@ -1700,7 +1743,7 @@ static int trace__printf_interrupted_entry(struct trace *trace)
return 0;
printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->output);
- printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
+ printed += fprintf(trace->output, ")%-*s ...\n", trace->args_alignment, ttrace->entry_str);
ttrace->entry_pending = false;
++trace->nr_events_printed;
@@ -1757,7 +1800,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
{
char *msg;
void *args;
- size_t printed = 0;
+ int printed = 0;
struct thread *thread;
int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
int augmented_args_size = 0;
@@ -1806,8 +1849,13 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
+ int alignment = 0;
+
trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
- fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
+ printed = fprintf(trace->output, "%s)", ttrace->entry_str);
+ if (trace->args_alignment > printed)
+ alignment = trace->args_alignment - printed;
+ fprintf(trace->output, "%*s= ?\n", alignment, " ");
}
} else {
ttrace->entry_pending = true;
@@ -1902,7 +1950,8 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
u64 duration = 0;
bool duration_calculated = false;
struct thread *thread;
- int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0;
+ int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1, callchain_ret = 0, printed = 0;
+ int alignment = trace->args_alignment;
struct syscall *sc = trace__syscall_info(trace, evsel, id);
struct thread_trace *ttrace;
@@ -1950,28 +1999,37 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace->output);
if (ttrace->entry_pending) {
- fprintf(trace->output, "%-70s", ttrace->entry_str);
+ printed = fprintf(trace->output, "%s", ttrace->entry_str);
} else {
fprintf(trace->output, " ... [");
color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
fprintf(trace->output, "]: %s()", sc->name);
}
+ printed++; /* the closing ')' */
+
+ if (alignment > printed)
+ alignment -= printed;
+ else
+ alignment = 0;
+
+ fprintf(trace->output, ")%*s= ", alignment, " ");
+
if (sc->fmt == NULL) {
if (ret < 0)
goto errno_print;
signed_print:
- fprintf(trace->output, ") = %ld", ret);
+ fprintf(trace->output, "%ld", ret);
} else if (ret < 0) {
errno_print: {
char bf[STRERR_BUFSIZE];
const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
*e = errno_to_name(evsel, -ret);
- fprintf(trace->output, ") = -1 %s %s", e, emsg);
+ fprintf(trace->output, "-1 %s (%s)", e, emsg);
}
} else if (ret == 0 && sc->fmt->timeout)
- fprintf(trace->output, ") = 0 Timeout");
+ fprintf(trace->output, "0 (Timeout)");
else if (ttrace->ret_scnprintf) {
char bf[1024];
struct syscall_arg arg = {
@@ -1981,14 +2039,14 @@ errno_print: {
};
ttrace->ret_scnprintf(bf, sizeof(bf), &arg);
ttrace->ret_scnprintf = NULL;
- fprintf(trace->output, ") = %s", bf);
+ fprintf(trace->output, "%s", bf);
} else if (sc->fmt->hexret)
- fprintf(trace->output, ") = %#lx", ret);
+ fprintf(trace->output, "%#lx", ret);
else if (sc->fmt->errpid) {
struct thread *child = machine__find_thread(trace->host, ret, ret);
if (child != NULL) {
- fprintf(trace->output, ") = %ld", ret);
+ fprintf(trace->output, "%ld", ret);
if (child->comm_set)
fprintf(trace->output, " (%s)", thread__comm_str(child));
thread__put(child);
@@ -2169,7 +2227,7 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
trace__printf_interrupted_entry(trace);
trace__fprintf_tstamp(trace, sample->time, trace->output);
- if (trace->trace_syscalls)
+ if (trace->trace_syscalls && trace->show_duration)
fprintf(trace->output, "( ): ");
if (thread)
@@ -2540,7 +2598,7 @@ out_delete_sys_enter:
goto out;
}
-static int trace__set_ev_qualifier_filter(struct trace *trace)
+static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
{
int err = -1;
struct perf_evsel *sys_exit;
@@ -2565,9 +2623,93 @@ out_enomem:
goto out;
}
+#ifdef HAVE_LIBBPF_SUPPORT
+static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
+{
+ int fd = bpf_map__fd(trace->syscalls.map);
+ struct bpf_map_syscall_entry value = {
+ .enabled = !trace->not_ev_qualifier,
+ };
+ int err = 0;
+ size_t i;
+
+ for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
+ int key = trace->ev_qualifier_ids.entries[i];
+
+ err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
+{
+ int fd = bpf_map__fd(trace->syscalls.map);
+ struct bpf_map_syscall_entry value = {
+ .enabled = enabled,
+ };
+ int err = 0, key;
+
+ for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
+ err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int trace__init_syscalls_bpf_map(struct trace *trace)
+{
+ bool enabled = true;
+
+ if (trace->ev_qualifier_ids.nr)
+ enabled = trace->not_ev_qualifier;
+
+ return __trace__init_syscalls_bpf_map(trace, enabled);
+}
+#else
+static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
+{
+ return 0;
+}
+
+static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
+{
+ return 0;
+}
+#endif // HAVE_LIBBPF_SUPPORT
+
+static int trace__set_ev_qualifier_filter(struct trace *trace)
+{
+ if (trace->syscalls.map)
+ return trace__set_ev_qualifier_bpf_filter(trace);
+ return trace__set_ev_qualifier_tp_filter(trace);
+}
+
+static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused,
+ size_t npids __maybe_unused, pid_t *pids __maybe_unused)
+{
+ int err = 0;
+#ifdef HAVE_LIBBPF_SUPPORT
+ bool value = true;
+ int map_fd = bpf_map__fd(map);
+ size_t i;
+
+ for (i = 0; i < npids; ++i) {
+ err = bpf_map_update_elem(map_fd, &pids[i], &value, BPF_ANY);
+ if (err)
+ break;
+ }
+#endif
+ return err;
+}
+
static int trace__set_filter_loop_pids(struct trace *trace)
{
- unsigned int nr = 1;
+ unsigned int nr = 1, err;
pid_t pids[32] = {
getpid(),
};
@@ -2586,7 +2728,92 @@ static int trace__set_filter_loop_pids(struct trace *trace)
thread = parent;
}
- return perf_evlist__set_filter_pids(trace->evlist, nr, pids);
+ err = perf_evlist__set_tp_filter_pids(trace->evlist, nr, pids);
+ if (!err && trace->filter_pids.map)
+ err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
+
+ return err;
+}
+
+static int trace__set_filter_pids(struct trace *trace)
+{
+ int err = 0;
+ /*
+ * Better not use !target__has_task() here because we need to cover the
+ * case where no threads were specified in the command line, but a
+ * workload was, and in that case we will fill in the thread_map when
+ * we fork the workload in perf_evlist__prepare_workload.
+ */
+ if (trace->filter_pids.nr > 0) {
+ err = perf_evlist__set_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
+ trace->filter_pids.entries);
+ if (!err && trace->filter_pids.map) {
+ err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
+ trace->filter_pids.entries);
+ }
+ } else if (thread_map__pid(trace->evlist->threads, 0) == -1) {
+ err = trace__set_filter_loop_pids(trace);
+ }
+
+ return err;
+}
+
+static int __trace__deliver_event(struct trace *trace, union perf_event *event)
+{
+ struct perf_evlist *evlist = trace->evlist;
+ struct perf_sample sample;
+ int err;
+
+ err = perf_evlist__parse_sample(evlist, event, &sample);
+ if (err)
+ fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
+ else
+ trace__handle_event(trace, event, &sample);
+
+ return 0;
+}
+
+static int __trace__flush_events(struct trace *trace)
+{
+ u64 first = ordered_events__first_time(&trace->oe.data);
+ u64 flush = trace->oe.last - NSEC_PER_SEC;
+
+ /* Is there some thing to flush.. */
+ if (first && first < flush)
+ return ordered_events__flush_time(&trace->oe.data, flush);
+
+ return 0;
+}
+
+static int trace__flush_events(struct trace *trace)
+{
+ return !trace->sort_events ? 0 : __trace__flush_events(trace);
+}
+
+static int trace__deliver_event(struct trace *trace, union perf_event *event)
+{
+ int err;
+
+ if (!trace->sort_events)
+ return __trace__deliver_event(trace, event);
+
+ err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
+ if (err && err != -1)
+ return err;
+
+ err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
+ if (err)
+ return err;
+
+ return trace__flush_events(trace);
+}
+
+static int ordered_events__deliver_event(struct ordered_events *oe,
+ struct ordered_event *event)
+{
+ struct trace *trace = container_of(oe, struct trace, oe.data);
+
+ return __trace__deliver_event(trace, event->event);
}
static int trace__run(struct trace *trace, int argc, const char **argv)
@@ -2600,11 +2827,13 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
trace->live = true;
- if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
- goto out_error_raw_syscalls;
+ if (!trace->raw_augmented_syscalls) {
+ if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
+ goto out_error_raw_syscalls;
- if (trace->trace_syscalls)
- trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
+ if (trace->trace_syscalls)
+ trace->vfs_getname = perf_evlist__add_vfs_getname(evlist);
+ }
if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
@@ -2695,27 +2924,22 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_error_open;
}
- /*
- * Better not use !target__has_task() here because we need to cover the
- * case where no threads were specified in the command line, but a
- * workload was, and in that case we will fill in the thread_map when
- * we fork the workload in perf_evlist__prepare_workload.
- */
- if (trace->filter_pids.nr > 0)
- err = perf_evlist__set_filter_pids(evlist, trace->filter_pids.nr, trace->filter_pids.entries);
- else if (thread_map__pid(evlist->threads, 0) == -1)
- err = trace__set_filter_loop_pids(trace);
-
+ err = trace__set_filter_pids(trace);
if (err < 0)
goto out_error_mem;
+ if (trace->syscalls.map)
+ trace__init_syscalls_bpf_map(trace);
+
if (trace->ev_qualifier_ids.nr > 0) {
err = trace__set_ev_qualifier_filter(trace);
if (err < 0)
goto out_errno;
- pr_debug("event qualifier tracepoint filter: %s\n",
- trace->syscalls.events.sys_exit->filter);
+ if (trace->syscalls.events.sys_exit) {
+ pr_debug("event qualifier tracepoint filter: %s\n",
+ trace->syscalls.events.sys_exit->filter);
+ }
}
err = perf_evlist__apply_filters(evlist, &evsel);
@@ -2745,7 +2969,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
* Now that we already used evsel->attr to ask the kernel to setup the
* events, lets reuse evsel->attr.sample_max_stack as the limit in
* trace__resolve_callchain(), allowing per-event max-stack settings
- * to override an explicitely set --max-stack global setting.
+ * to override an explicitly set --max-stack global setting.
*/
evlist__for_each_entry(evlist, evsel) {
if (evsel__has_callchain(evsel) &&
@@ -2764,18 +2988,12 @@ again:
continue;
while ((event = perf_mmap__read_event(md)) != NULL) {
- struct perf_sample sample;
-
++trace->nr_events;
- err = perf_evlist__parse_sample(evlist, event, &sample);
- if (err) {
- fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
- goto next_event;
- }
+ err = trace__deliver_event(trace, event);
+ if (err)
+ goto out_disable;
- trace__handle_event(trace, event, &sample);
-next_event:
perf_mmap__consume(md);
if (interrupted)
@@ -2797,6 +3015,9 @@ next_event:
draining = true;
goto again;
+ } else {
+ if (trace__flush_events(trace))
+ goto out_disable;
}
} else {
goto again;
@@ -2807,6 +3028,9 @@ out_disable:
perf_evlist__disable(evlist);
+ if (trace->sort_events)
+ ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
+
if (!err) {
if (trace->summary)
trace__fprintf_thread_summary(trace, trace->output);
@@ -3104,8 +3328,8 @@ static int trace__set_duration(const struct option *opt, const char *str,
return 0;
}
-static int trace__set_filter_pids(const struct option *opt, const char *str,
- int unset __maybe_unused)
+static int trace__set_filter_pids_from_option(const struct option *opt, const char *str,
+ int unset __maybe_unused)
{
int ret = -1;
size_t i;
@@ -3315,6 +3539,68 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
return 0;
}
+static struct bpf_map *bpf__find_map_by_name(const char *name)
+{
+ struct bpf_object *obj, *tmp;
+
+ bpf_object__for_each_safe(obj, tmp) {
+ struct bpf_map *map = bpf_object__find_map_by_name(obj, name);
+ if (map)
+ return map;
+
+ }
+
+ return NULL;
+}
+
+static void trace__set_bpf_map_filtered_pids(struct trace *trace)
+{
+ trace->filter_pids.map = bpf__find_map_by_name("pids_filtered");
+}
+
+static void trace__set_bpf_map_syscalls(struct trace *trace)
+{
+ trace->syscalls.map = bpf__find_map_by_name("syscalls");
+}
+
+static int trace__config(const char *var, const char *value, void *arg)
+{
+ struct trace *trace = arg;
+ int err = 0;
+
+ if (!strcmp(var, "trace.add_events")) {
+ struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
+ "event selector. use 'perf list' to list available events",
+ parse_events_option);
+ err = parse_events_option(&o, value, 0);
+ } else if (!strcmp(var, "trace.show_timestamp")) {
+ trace->show_tstamp = perf_config_bool(var, value);
+ } else if (!strcmp(var, "trace.show_duration")) {
+ trace->show_duration = perf_config_bool(var, value);
+ } else if (!strcmp(var, "trace.show_arg_names")) {
+ trace->show_arg_names = perf_config_bool(var, value);
+ if (!trace->show_arg_names)
+ trace->show_zeros = true;
+ } else if (!strcmp(var, "trace.show_zeros")) {
+ bool new_show_zeros = perf_config_bool(var, value);
+ if (!trace->show_arg_names && !new_show_zeros) {
+ pr_warning("trace.show_zeros has to be set when trace.show_arg_names=no\n");
+ goto out;
+ }
+ trace->show_zeros = new_show_zeros;
+ } else if (!strcmp(var, "trace.show_prefix")) {
+ trace->show_string_prefix = perf_config_bool(var, value);
+ } else if (!strcmp(var, "trace.no_inherit")) {
+ trace->opts.no_inherit = perf_config_bool(var, value);
+ } else if (!strcmp(var, "trace.args_alignment")) {
+ int args_alignment = 0;
+ if (perf_config_int(&args_alignment, var, value) == 0)
+ trace->args_alignment = args_alignment;
+ }
+out:
+ return err;
+}
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -3337,10 +3623,13 @@ int cmd_trace(int argc, const char **argv)
.user_interval = ULLONG_MAX,
.no_buffering = true,
.mmap_pages = UINT_MAX,
- .proc_map_timeout = 500,
},
.output = stderr,
.show_comm = true,
+ .show_tstamp = true,
+ .show_duration = true,
+ .show_arg_names = true,
+ .args_alignment = 70,
.trace_syscalls = false,
.kernel_syscallchains = false,
.max_stack = UINT_MAX,
@@ -3363,7 +3652,7 @@ int cmd_trace(int argc, const char **argv)
OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
"trace events on existing thread id"),
OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
- "pids to filter (by the kernel)", trace__set_filter_pids),
+ "pids to filter (by the kernel)", trace__set_filter_pids_from_option),
OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
"system-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
@@ -3406,9 +3695,11 @@ int cmd_trace(int argc, const char **argv)
"Set the maximum stack depth when parsing the callchain, "
"anything beyond the specified depth will be ignored. "
"Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
+ OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
+ "Sort batch of events before processing, use if getting out of order events"),
OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
"print the PERF_RECORD_SAMPLE PERF_SAMPLE_ info, for debugging"),
- OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
+ OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
"per thread proc mmap processing timeout in ms"),
OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
trace__parse_cgroups),
@@ -3436,6 +3727,10 @@ int cmd_trace(int argc, const char **argv)
goto out;
}
+ err = perf_config(trace__config, &trace);
+ if (err)
+ goto out;
+
argc = parse_options_subcommand(argc, argv, trace_options, trace_subcommands,
trace_usage, PARSE_OPT_STOP_AT_NON_OPTION);
@@ -3451,8 +3746,11 @@ int cmd_trace(int argc, const char **argv)
goto out;
}
- if (evsel)
+ if (evsel) {
trace.syscalls.events.augmented = evsel;
+ trace__set_bpf_map_filtered_pids(&trace);
+ trace__set_bpf_map_syscalls(&trace);
+ }
err = bpf__setup_stdout(trace.evlist);
if (err) {
@@ -3497,6 +3795,11 @@ int cmd_trace(int argc, const char **argv)
}
}
+ if (trace.sort_events) {
+ ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
+ ordered_events__set_copy_on_queue(&trace.oe.data, true);
+ }
+
/*
* If we are augmenting syscalls, then combine what we put in the
* __augmented_syscalls__ BPF map with what is in the
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 9531f7bd7d9b..8e811ea0cf85 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -4,6 +4,7 @@
HEADERS='
include/uapi/drm/drm.h
include/uapi/drm/i915_drm.h
+include/uapi/linux/fadvise.h
include/uapi/linux/fcntl.h
include/uapi/linux/fs.h
include/uapi/linux/kcmp.h
@@ -21,6 +22,7 @@ include/uapi/linux/hw_breakpoint.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/required-features.h
arch/x86/include/asm/cpufeatures.h
+arch/x86/include/uapi/asm/prctl.h
arch/arm/include/uapi/asm/perf_regs.h
arch/arm64/include/uapi/asm/perf_regs.h
arch/powerpc/include/uapi/asm/perf_regs.h
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index 90a19336310b..53c233370fae 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -14,8 +14,8 @@
* code that will combine entry/exit in a strace like way.
*/
-#include <stdio.h>
-#include <linux/socket.h>
+#include <unistd.h>
+#include <pid_filter.h>
/* bpf-output associated map */
struct bpf_map SEC("maps") __augmented_syscalls__ = {
@@ -25,6 +25,17 @@ struct bpf_map SEC("maps") __augmented_syscalls__ = {
.max_entries = __NR_CPUS__,
};
+struct syscall {
+ bool enabled;
+};
+
+struct bpf_map SEC("maps") syscalls = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(struct syscall),
+ .max_entries = 512,
+};
+
struct syscall_enter_args {
unsigned long long common_tp_fields;
long syscall_nr;
@@ -44,8 +55,11 @@ struct augmented_filename {
};
#define SYS_OPEN 2
+#define SYS_ACCESS 21
#define SYS_OPENAT 257
+pid_filter(pids_filtered);
+
SEC("raw_syscalls:sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
@@ -53,10 +67,18 @@ int sys_enter(struct syscall_enter_args *args)
struct syscall_enter_args args;
struct augmented_filename filename;
} augmented_args;
+ struct syscall *syscall;
unsigned int len = sizeof(augmented_args);
const void *filename_arg = NULL;
+ if (pid_filter__has(&pids_filtered, getpid()))
+ return 0;
+
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+
+ syscall = bpf_map_lookup_elem(&syscalls, &augmented_args.args.syscall_nr);
+ if (syscall == NULL || !syscall->enabled)
+ return 0;
/*
* Yonghong and Edward Cree sayz:
*
@@ -98,6 +120,7 @@ int sys_enter(struct syscall_enter_args *args)
* after the ctx memory access to prevent their down stream merging.
*/
switch (augmented_args.args.syscall_nr) {
+ case SYS_ACCESS:
case SYS_OPEN: filename_arg = (const void *)args->args[0];
__asm__ __volatile__("": : :"memory");
break;
@@ -125,7 +148,19 @@ int sys_enter(struct syscall_enter_args *args)
SEC("raw_syscalls:sys_exit")
int sys_exit(struct syscall_exit_args *args)
{
- return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
+ struct syscall_exit_args exit_args;
+ struct syscall *syscall;
+
+ if (pid_filter__has(&pids_filtered, getpid()))
+ return 0;
+
+ probe_read(&exit_args, sizeof(exit_args), args);
+
+ syscall = bpf_map_lookup_elem(&syscalls, &exit_args.syscall_nr);
+ if (syscall == NULL || !syscall->enabled)
+ return 0;
+
+ return 1;
}
license(GPL);
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
index 52b6d87fe822..e667577207dc 100644
--- a/tools/perf/include/bpf/bpf.h
+++ b/tools/perf/include/bpf/bpf.h
@@ -18,6 +18,25 @@ struct bpf_map {
unsigned int numa_node;
};
+/*
+ * FIXME: this should receive .max_entries as a parameter, as careful
+ * tuning of these limits is needed to avoid hitting limits that
+ * prevents other BPF constructs, such as tracepoint handlers,
+ * to get installed, with cryptic messages from libbpf, etc.
+ * For the current need, 'perf trace --filter-pids', 64 should
+ * be good enough, but this surely needs to be revisited.
+ */
+#define pid_map(name, value_type) \
+struct bpf_map SEC("maps") name = { \
+ .type = BPF_MAP_TYPE_HASH, \
+ .key_size = sizeof(pid_t), \
+ .value_size = sizeof(value_type), \
+ .max_entries = 64, \
+}
+
+static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
+static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
+
#define SEC(NAME) __attribute__((section(NAME), used))
#define probe(function, vars) \
@@ -36,4 +55,6 @@ int _version SEC("version") = LINUX_VERSION_CODE;
static int (*probe_read)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read;
static int (*probe_read_str)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read_str;
+static int (*perf_event_output)(void *, struct bpf_map *, int, void *, unsigned long) = (void *)BPF_FUNC_perf_event_output;
+
#endif /* _PERF_BPF_H */
diff --git a/tools/perf/include/bpf/pid_filter.h b/tools/perf/include/bpf/pid_filter.h
new file mode 100644
index 000000000000..6e61c4bdf548
--- /dev/null
+++ b/tools/perf/include/bpf/pid_filter.h
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+#ifndef _PERF_BPF_PID_FILTER_
+#define _PERF_BPF_PID_FILTER_
+
+#include <bpf.h>
+
+#define pid_filter(name) pid_map(name, bool)
+
+static int pid_filter__add(struct bpf_map *pids, pid_t pid)
+{
+ bool value = true;
+ return bpf_map_update_elem(pids, &pid, &value, BPF_NOEXIST);
+}
+
+static bool pid_filter__has(struct bpf_map *pids, pid_t pid)
+{
+ return bpf_map_lookup_elem(pids, &pid) != NULL;
+}
+
+#endif // _PERF_BPF_PID_FILTER_
diff --git a/tools/perf/include/bpf/stdio.h b/tools/perf/include/bpf/stdio.h
index 2899cb7bfed8..316af5b2ff35 100644
--- a/tools/perf/include/bpf/stdio.h
+++ b/tools/perf/include/bpf/stdio.h
@@ -9,9 +9,6 @@ struct bpf_map SEC("maps") __bpf_stdout__ = {
.max_entries = __NR_CPUS__,
};
-static int (*perf_event_output)(void *, struct bpf_map *, int, void *, unsigned long) =
- (void *)BPF_FUNC_perf_event_output;
-
#define puts(from) \
({ const int __len = sizeof(from); \
char __from[__len] = from; \
diff --git a/tools/perf/include/bpf/unistd.h b/tools/perf/include/bpf/unistd.h
new file mode 100644
index 000000000000..ca7877f9a976
--- /dev/null
+++ b/tools/perf/include/bpf/unistd.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: LGPL-2.1
+
+#include <bpf.h>
+
+static int (*bpf_get_current_pid_tgid)(void) = (void *)BPF_FUNC_get_current_pid_tgid;
+
+static pid_t getpid(void)
+{
+ return bpf_get_current_pid_tgid();
+}
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index 6add3e982614..aea7b1fe85aa 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -6,7 +6,9 @@
#include <stdlib.h>
#include <err.h>
#include <jvmti.h>
+#ifdef HAVE_JVMTI_CMLR
#include <jvmticmlr.h>
+#endif
#include <limits.h>
#include "jvmti_agent.h"
@@ -27,6 +29,7 @@ static void print_error(jvmtiEnv *jvmti, const char *msg, jvmtiError ret)
}
}
+#ifdef HAVE_JVMTI_CMLR
static jvmtiError
do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
jvmti_line_info_t *tab, jint *nr)
@@ -125,6 +128,15 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
*nr_lines = lines_total;
return JVMTI_ERROR_NONE;
}
+#else /* HAVE_JVMTI_CMLR */
+
+static jvmtiError
+get_line_numbers(jvmtiEnv *jvmti __maybe_unused, const void *compile_info __maybe_unused,
+ jvmti_line_info_t **tab __maybe_unused, int *nr_lines __maybe_unused)
+{
+ return JVMTI_ERROR_NONE;
+}
+#endif /* HAVE_JVMTI_CMLR */
static void
copy_class_filename(const char * class_sign, const char * file_name, char * result, size_t max_length)
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 0ed4a34c74c4..388c6dd128b8 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -82,7 +82,7 @@ struct record_opts {
bool use_clockid;
clockid_t clockid;
u64 clockid_res_ns;
- unsigned int proc_map_timeout;
+ int nr_cblocks;
};
struct option;
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index bba3152ec54a..0b080b0352d8 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -433,7 +433,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -445,7 +445,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index 97c5d0784c6c..999cf3066363 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -317,7 +317,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"EventCode": "0x87",
"Counter": "0,1,2,3",
"UMask": "0x1",
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index bf243fe2a0ec..4ad425312bdc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -439,7 +439,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,7 +451,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index 920c89da9111..0d04bf9db000 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -322,7 +322,7 @@
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index bf0c51272068..141b1080429d 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -439,7 +439,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"CounterHTOff": "0,1,2,3"
},
@@ -451,7 +451,7 @@
"PEBS": "1",
"Counter": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
"L1_Hit_Indication": "1",
"CounterHTOff": "0,1,2,3"
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index 920c89da9111..0d04bf9db000 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -322,7 +322,7 @@
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
"Counter": "0,1,2,3",
"EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
+ "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
new file mode 100644
index 000000000000..143077c2caf4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -0,0 +1,10172 @@
+[
+ {
+ "EventCode": "0x24",
+ "UMask": "0x21",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x22",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x24",
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x27",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x38",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0x3f",
+ "BriefDescription": "All requests that miss L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xc1",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xc2",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xc4",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xd8",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe1",
+ "BriefDescription": "Demand Data Read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe2",
+ "BriefDescription": "RFO requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe4",
+ "BriefDescription": "L2 code requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xe7",
+ "BriefDescription": "Demand requests to L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xf8",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x24",
+ "UMask": "0xff",
+ "BriefDescription": "All L2 requests",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "All L2 requests.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x41",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "Errata": "SKL057",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x2E",
+ "UMask": "0x4f",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Counter": "0,1,2,3",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "Errata": "SKL057",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "AnyThread": "1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x48",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x51",
+ "UMask": "0x1",
+ "BriefDescription": "L1D data line replacements",
+ "Counter": "0,1,2,3",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x8",
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x1",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x2",
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x4",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x8",
+ "BriefDescription": "Demand and prefetch data reads",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x80",
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB2",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x11",
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x12",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x21",
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x41",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x42",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x81",
+ "BriefDescription": "All retired load instructions.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD0",
+ "UMask": "0x82",
+ "BriefDescription": "All retired store instructions.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "SampleAfterValue": "2000003",
+ "L1_Hit_Indication": "1",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache. ",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x20",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache. ",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x40",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready. ",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD1",
+ "UMask": "0x80",
+ "BriefDescription": "Retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "ELLC": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PublicDescription": "Counts retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD2",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x1",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x2",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x4",
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x8",
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Data_LA": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD3",
+ "UMask": "0x10",
+ "BriefDescription": "Retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "ELLC": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PublicDescription": "Counts retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xD4",
+ "UMask": "0x4",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xF0",
+ "UMask": "0x40",
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF1",
+ "UMask": "0x1f",
+ "BriefDescription": "L2 cache lines filling L2",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x4",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Deprecated": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF2",
+ "UMask": "0x4",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xF4",
+ "UMask": "0x10",
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "Counter": "0,1,2,3",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0200200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0400200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0800200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1000200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00800207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F800207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00800407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F800407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00800807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F800807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00801007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F801007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00802007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F802007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00803C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F803C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000018000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x0000010122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.ANY_RESPONSE",
+ "Deprecated": "1",
+ "MSRValue": "0x00000107F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08007C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100408000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080408000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0100400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0080400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01004007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00804007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80408000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F80400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F804007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
new file mode 100644
index 000000000000..36c903faed0b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -0,0 +1,164 @@
+[
+ {
+ "BriefDescription": "Instructions Per Cycle (per logical thread)",
+ "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline",
+ "MetricName": "UPI"
+ },
+ {
+ "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
+ "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ((UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1) )",
+ "MetricGroup": "Frontend",
+ "MetricName": "IFetch_Line_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
+ "MetricGroup": "DSB; Frontend_Bandwidth",
+ "MetricName": "DSB_Coverage"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (threaded)",
+ "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
+ "MetricGroup": "Pipeline;Summary",
+ "MetricName": "CPI"
+ },
+ {
+ "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Summary",
+ "MetricName": "CLKS"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots",
+ "MetricExpr": "4*(( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "SLOTS"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary",
+ "MetricName": "Instructions"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles)",
+ "MetricGroup": "SMT",
+ "MetricName": "CoreIPC"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Pipeline;Ports_Utilization",
+ "MetricName": "ILP"
+ },
+ {
+ "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
+ "MetricExpr": "2* (( RS_EVENTS.EMPTY_CYCLES - ICACHE_16B.IFDATA_STALL - ICACHE_64B.IFTAG_STALL ) / RS_EVENTS.EMPTY_END)",
+ "MetricGroup": "Unknown_Branches",
+ "MetricName": "BAClear_Cost"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any thread is active on the physical core",
+ "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "SMT",
+ "MetricName": "CORE_CLKS"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricGroup": "Memory_Bound;Memory_Lat",
+ "MetricName": "Load_Miss_Real_Latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (( L1D_PEND_MISS.PENDING_CYCLES_ANY / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES)",
+ "MetricGroup": "Memory_Bound;Memory_BW",
+ "MetricName": "MLP"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles) )",
+ "MetricGroup": "TLB",
+ "MetricName": "Page_Walks_Utilization"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "MetricGroup": "Summary",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 / duration_time",
+ "MetricGroup": "FLOPS;Summary",
+ "MetricName": "GFLOPs"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware threads were active",
+ "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
+ "MetricGroup": "SMT;Summary",
+ "MetricName": "SMT_2T_Utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC:u / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Summary",
+ "MetricName": "Kernel_Utilization"
+ },
+ {
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
new file mode 100644
index 000000000000..91b38de138f2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
@@ -0,0 +1,85 @@
+[
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x1",
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x2",
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x4",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x8",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x10",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x20",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x40",
+ "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC7",
+ "UMask": "0x80",
+ "BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 16 calculations per element.",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCA",
+ "UMask": "0x1e",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "Counter": "0,1,2,3",
+ "EventName": "FP_ASSIST.ANY",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
new file mode 100644
index 000000000000..954e64574ee2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
@@ -0,0 +1,482 @@
+[
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x4",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x8",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x18",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x20",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x24",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "UMask": "0x30",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ.MS_SWITCHES",
+ "CounterMask": "1",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x80",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "UMask": "0x1",
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "UMask": "0x2",
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x83",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "Counter": "0,1,2,3",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "CounterMask": "3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "CounterMask": "4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x9C",
+ "UMask": "0x1",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "Counter": "0,1,2,3",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAB",
+ "UMask": "0x2",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "Counter": "0,1,2,3",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x400406",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x200206",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x400206",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "PEBS": "1",
+ "MSRValue": "0x15",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss. ",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "PEBS": "1",
+ "MSRValue": "0x14",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "PEBS": "1",
+ "MSRValue": "0x13",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "PEBS": "1",
+ "MSRValue": "0x12",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
+ "PEBS": "1",
+ "MSRValue": "0x11",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. ",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x300206",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x100206",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x420006",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x410006",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x408006",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x404006",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x402006",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x401006",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC6",
+ "UMask": "0x1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "PEBS": "1",
+ "MSRValue": "0x400806",
+ "Counter": "0,1,2,3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
new file mode 100644
index 000000000000..dfee92596379
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
@@ -0,0 +1,9909 @@
+[
+ {
+ "EventCode": "0x54",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x54",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x4",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x8",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5d",
+ "UMask": "0x10",
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "Counter": "0,1,2,3",
+ "EventName": "TX_EXEC.MISC5",
+ "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x10",
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x60",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x6",
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "CounterMask": "6",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB0",
+ "UMask": "0x10",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x2",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "Errata": "SKL089",
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times an HLE execution started.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times an HLE execution successfully committed",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one). ",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC8",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "Counter": "0,1,2,3",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x1",
+ "BriefDescription": "Number of times an RTM execution started.",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x2",
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x4",
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one). ",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Number of times RTM abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x8",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x10",
+ "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x20",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x40",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC9",
+ "UMask": "0x80",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "Counter": "0,1,2,3",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x200",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "101",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x100",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "503",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x80",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "1009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x40",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "2003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x20",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x10",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "20011",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x8",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "50021",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xCD",
+ "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "PEBS": "2",
+ "MSRValue": "0x4",
+ "Counter": "0,1,2,3",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "TakenAlone": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0084000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0104000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0204000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0404000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0804000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1004000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F84000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x0090000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x0110000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x0210000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0410000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0810000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x1010000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F90000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00840007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F840007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00900007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x01100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x02100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x04100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x08100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x10100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3F900007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "MSRValue": "0x00BC0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "MSRValue": "0x013C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
+ "MSRValue": "0x023C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x043C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x083C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "MSRValue": "0x103C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
+ "MSRValue": "0x3FBC0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B808000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B800122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x0604000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x063B8007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
+ "MSRValue": "0x06040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC08000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC00122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
+ "MSRValue": "0x103FC007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00001",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00004",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=DEMAND_CODE_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00010",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00020",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L2_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00080",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00100",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L3_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00400",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=PF_L1D_AND_SW:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC08000",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=OTHER:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00490",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00120",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_PF_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00491",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_DATA_RD:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC00122",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_RFO:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "\nThis event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
+ "MSRValue": "0x083FC007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OFFCORE_RESPONSE:request=ALL_READS:response=L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0084000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0104000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0204000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0404000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0804000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1004000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F84000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0090000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0110000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0210000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0410000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0810000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1010000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F90000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x00BC000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x013C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x023C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x043C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x083C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x103C000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x3FBC000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0084000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0104000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0204000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0404000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0804000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1004000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F84000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0090000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0110000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0210000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0410000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0810000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1010000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F90000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x00BC000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x013C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x023C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x043C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x083C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x103C000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x3FBC000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0084000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0104000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0204000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0404000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0804000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1004000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F84000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0090000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0110000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0210000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0410000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0810000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1010000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F90000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x00BC000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x013C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x023C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x043C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x083C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x103C000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x3FBC000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0084000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0104000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0204000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0404000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0804000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1004000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F84000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0090000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0110000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0210000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0410000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0810000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1010000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F90000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x00BC000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x013C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x023C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x043C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x083C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x103C000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x3FBC000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0084000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0104000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0204000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0404000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0804000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1004000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F84000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0090000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0110000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0210000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0410000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0810000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1010000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F90000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x00BC000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x013C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x023C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x043C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x083C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x103C000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x3FBC000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0084000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0104000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0204000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0404000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0804000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1004000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F84000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0090000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0110000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0210000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0410000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0810000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1010000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F90000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x00BC000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x013C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x023C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x043C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x083C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x103C000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x3FBC000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0084000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0104000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0204000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0404000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0804000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1004000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F84000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0090000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0110000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0210000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0410000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0810000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1010000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F90000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x00BC000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x013C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x023C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x043C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x083C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x103C000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x3FBC000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0084000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0104000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0204000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0404000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0804000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1004000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F84000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0090000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0110000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0210000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0410000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0810000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1010000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F90000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x00BC000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x013C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x023C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x043C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x083C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x103C000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x3FBC000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0084008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0104008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0204008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0404008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0804008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1004008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F84008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0090008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0110008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0210008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0410008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0810008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1010008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F90008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x00BC008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x013C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x023C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x043C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x083C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x103C008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x3FBC008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0084000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0104000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0204000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0404000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0804000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1004000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F84000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0090000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0110000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0210000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0410000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0810000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1010000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F90000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00BC000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x013C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x023C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x043C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x083C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x103C000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0084000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0104000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0204000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0404000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0804000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1004000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F84000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0090000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0110000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0210000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0410000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0810000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1010000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F90000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00BC000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x013C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x023C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x043C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x083C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x103C000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0084000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0104000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0204000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0404000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0804000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1004000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F84000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0090000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0110000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0210000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0410000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0810000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1010000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F90000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00BC000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x013C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x023C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x043C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x083C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x103C000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0084000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0104000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0204000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0404000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0804000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1004000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F84000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0090000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0110000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0210000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0410000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0810000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1010000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F90000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00BC000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x013C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x023C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x043C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x083C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x103C000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00840007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F840007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00900007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10100007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F900007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00BC0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x013C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x023C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x043C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x083C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x103C0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3FBC0007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x063B800001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0604000001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x063B800002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0604000002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x063B800004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0604000004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x063B800010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0604000010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x063B800020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0604000020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x063B800080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0604000080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x063B800100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0604000100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x063B800400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0604000400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x063B808000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0604008000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B800122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0604000122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x063B8007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x06040007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x103FC00001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x103FC00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x103FC00004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x103FC00010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x103FC00020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x103FC00080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x103FC00100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x103FC00400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x103FC08000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC00122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x103FC007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x083FC00001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x083FC00002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x083FC00004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x083FC00010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x083FC00020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x083FC00080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x083FC00100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x083FC00400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x083FC08000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC00122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x083FC007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
new file mode 100644
index 000000000000..73e27c48bd6e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -0,0 +1,8908 @@
+[
+ {
+ "EventCode": "0x09",
+ "UMask": "0x1",
+ "Counter": "0,1,2,3",
+ "EventName": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "UMask": "0x7",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "UMask": "0x18",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "UMask": "0x20",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x28",
+ "UMask": "0x40",
+ "BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_POWER.THROTTLE",
+ "PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
+ "SampleAfterValue": "200003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x1",
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x2",
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x4",
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x32",
+ "UMask": "0x8",
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "Counter": "0,1,2,3",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCB",
+ "UMask": "0x1",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "Counter": "0,1,2,3",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x1",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x2",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x4",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x8",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x10",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x20",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xEF",
+ "UMask": "0x40",
+ "Counter": "0,1,2,3",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xFE",
+ "UMask": "0x2",
+ "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
+ "Counter": "0,1,2,3",
+ "EventName": "IDI_MISC.WB_UPGRADE",
+ "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xFE",
+ "UMask": "0x4",
+ "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
+ "Counter": "0,1,2,3",
+ "EventName": "IDI_MISC.WB_DOWNGRADE",
+ "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0080020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0100020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0200020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0400020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0800020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1000020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F80020001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0080040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0100040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0200040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0400040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0800040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1000040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F80040001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0080080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0100080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0200080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0400080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0800080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1000080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F80080001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0080100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0100100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0200100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0400100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0800100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1000100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F80100001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0080200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0100200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x0200200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0400200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0800200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x1000200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F80200001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x00803C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x01003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x02003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x04003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x08003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x10003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD TBD",
+ "MSRValue": "0x3F803C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0080020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0100020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0200020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0400020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0800020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1000020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F80020002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0080040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0100040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0200040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0400040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0800040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1000040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F80040002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0080080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0100080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0200080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0400080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0800080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1000080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F80080002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0080100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0100100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0200100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0400100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0800100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1000100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F80100002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0080200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0100200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x0200200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0400200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0800200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x1000200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F80200002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x00803C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x01003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x02003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x04003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x08003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x10003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "MSRValue": "0x3F803C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0080020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0100020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0200020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0400020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0800020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1000020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F80020004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0080040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0100040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0200040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0400040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0800040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1000040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F80040004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0080080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0100080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0200080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0400080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0800080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1000080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F80080004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0080100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0100100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0200100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0400100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0800100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1000100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F80100004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0080200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0100200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x0200200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0400200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0800200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x1000200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F80200004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x00803C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x01003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x02003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x04003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x08003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x10003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD TBD",
+ "MSRValue": "0x3F803C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0080020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0100020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0200020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0400020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0800020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1000020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F80020010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0080040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0100040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0200040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0400040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0800040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1000040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F80040010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0080080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0100080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0200080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0400080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0800080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1000080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F80080010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0080100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0100100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0200100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0400100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0800100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1000100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F80100010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0080200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0100200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x0200200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0400200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0800200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x1000200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F80200010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x00803C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x01003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x02003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x04003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x08003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x10003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "MSRValue": "0x3F803C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0080020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0100020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0200020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0400020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0800020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1000020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F80020020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0080040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0100040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0200040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0400040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0800040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1000040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F80040020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0080080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0100080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0200080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0400080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0800080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1000080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F80080020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0080100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0100100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0200100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0400100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0800100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1000100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F80100020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0080200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0100200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x0200200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0400200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0800200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x1000200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F80200020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x00803C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x01003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x02003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x04003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x08003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x10003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "MSRValue": "0x3F803C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0080020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0100020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0200020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0400020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0800020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1000020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F80020080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0080040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0100040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0200040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0400040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0800040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1000040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F80040080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0080080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0100080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0200080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0400080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0800080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1000080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F80080080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0080100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0100100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0200100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0400100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0800100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1000100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F80100080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0080200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0100200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x0200200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0400200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0800200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x1000200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F80200080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x00803C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x01003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x02003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x04003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x08003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x10003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "MSRValue": "0x3F803C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0080020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0100020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0200020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0400020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0800020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1000020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F80020100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0080040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0100040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0200040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0400040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0800040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1000040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F80040100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0080080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0100080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0200080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0400080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0800080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1000080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F80080100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0080100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0100100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0200100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0400100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0800100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1000100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F80100100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0080200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0100200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x0200200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0400200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0800200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x1000200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F80200100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x00803C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x01003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x02003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x04003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x08003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x10003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "MSRValue": "0x3F803C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0080020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0100020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0200020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0400020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0800020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1000020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F80020400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0080040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0100040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0200040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0400040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0800040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1000040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F80040400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0080080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0100080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0200080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0400080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0800080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1000080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F80080400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0080100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0100100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0200100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0400100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0800100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1000100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F80100400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0080200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0100200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x0200200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0400200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0800200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x1000200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F80200400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x00803C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x01003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x02003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x04003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x08003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x10003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRValue": "0x3F803C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0080028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0100028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0200028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0400028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0800028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1000028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F80028000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0080048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0100048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0200048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0400048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0800048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1000048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F80048000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0080088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0100088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0200088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0400088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0800088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1000088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F80088000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0080108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0100108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0200108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0400108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0800108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1000108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F80108000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0080208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0100208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x0200208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0400208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0800208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x1000208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F80208000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x00803C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x01003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x02003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x04003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x08003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x10003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD TBD",
+ "MSRValue": "0x3F803C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80020490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80040490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80080490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80100490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80200490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00803C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x02003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x08003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80020120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80040120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80080120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80100120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80200120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00803C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x02003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x08003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80020491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80040491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80080491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80100491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80200491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00803C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x02003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x08003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80020122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80040122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80080122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80100122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0080200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x0200200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0400200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0800200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x1000200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80200122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00803C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x02003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x08003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00800207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10000207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F800207F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00800407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10000407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F800407F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00800807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10000807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F800807F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00801007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10001007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F801007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x00802007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x02002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x04002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x08002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x10002007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F802007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00803C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x01003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x02003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x04003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x08003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x10003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD TBD",
+ "MSRValue": "0x3F803C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads have any response type.",
+ "MSRValue": "0x0000010001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads",
+ "MSRValue": "0x08007C0001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "MSRValue": "0x0000010002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "MSRValue": "0x08007C0002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads have any response type.",
+ "MSRValue": "0x0000010004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads",
+ "MSRValue": "0x08007C0004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "MSRValue": "0x0000010010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "MSRValue": "0x08007C0010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "MSRValue": "0x0000010020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "MSRValue": "0x08007C0020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "MSRValue": "0x0000010080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRValue": "0x08007C0080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "MSRValue": "0x0000010100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "MSRValue": "0x08007C0100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
+ "MSRValue": "0x0000010400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "MSRValue": "0x08007C0400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests have any response type.",
+ "MSRValue": "0x0000018000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests",
+ "MSRValue": "0x08007C8000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x0000010122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C0122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD have any response type.",
+ "MSRValue": "0x00000107F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
+ "MSRValue": "0x01003C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD",
+ "MSRValue": "0x08007C07F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0100400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x0080400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0100400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x0080400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0100400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x0080400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0100400010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x0080400010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0100400020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x0080400020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0100400080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x0080400080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0100400100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x0080400100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0100400400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x0080400400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0100408000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x0080408000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100400490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0080400490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100400120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0080400120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100400491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0080400491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0100400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x0080400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x01004007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x00804007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts demand data reads TBD",
+ "MSRValue": "0x3F80400001",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand data writes (RFOs) TBD",
+ "MSRValue": "0x3F80400002",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all demand code reads TBD",
+ "MSRValue": "0x3F80400004",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "MSRValue": "0x3F80400010",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "MSRValue": "0x3F80400020",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "MSRValue": "0x3F80400080",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "MSRValue": "0x3F80400100",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRValue": "0x3F80400400",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "Counts any other requests TBD",
+ "MSRValue": "0x3F80408000",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80400490",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80400120",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80400491",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F80400122",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "Offcore": "1",
+ "EventCode": "0xB7, 0xBB",
+ "UMask": "0x1",
+ "BriefDescription": "TBD TBD",
+ "MSRValue": "0x3F804007F7",
+ "Counter": "0,1,2,3",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
new file mode 100644
index 000000000000..5b7df05f900c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -0,0 +1,969 @@
+[
+ {
+ "EventCode": "0x00",
+ "UMask": "0x1",
+ "BriefDescription": "Instructions retired from execution.",
+ "Counter": "Fixed counter 0",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 0"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x2",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "Fixed counter 1",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 1"
+ },
+ {
+ "EventCode": "0x00",
+ "UMask": "0x3",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "Counter": "Fixed counter 2",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "Fixed counter 2"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x2",
+ "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x03",
+ "UMask": "0x8",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x07",
+ "UMask": "0x1",
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "Counter": "0,1,2,3",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0D",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "Counter": "0,1,2,3",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x1",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x2",
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x0E",
+ "UMask": "0x20",
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x14",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "Counter": "0,1,2,3",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0x3C",
+ "UMask": "0x0",
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "CounterMask": "1",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "25003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "AnyThread": "1",
+ "SampleAfterValue": "25003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x1",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x3C",
+ "UMask": "0x2",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "Counter": "0,1,2,3",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4C",
+ "UMask": "0x1",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "Counter": "0,1,2,3",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x59",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "Counter": "0,1,2,3",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "Invert": "1",
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "CounterMask": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x5E",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "Counter": "0,1,2,3",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x87",
+ "UMask": "0x1",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "Counter": "0,1,2,3",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x20",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA1",
+ "UMask": "0x80",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xa2",
+ "UMask": "0x1",
+ "BriefDescription": "Resource-related stall cycles",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Counts resource-related stall cycles.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA2",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "Counter": "0,1,2,3",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x4",
+ "BriefDescription": "Total execution stalls.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x5",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "CounterMask": "8",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0xc",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "CounterMask": "12",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "CounterMask": "16",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA3",
+ "UMask": "0x14",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "CounterMask": "20",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xA6",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "UMask": "0x4",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "UMask": "0x8",
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "Counter": "0,1,2,3",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA6",
+ "UMask": "0x40",
+ "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "Counter": "0,1,2,3",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "MSRValue": "0x00",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "CounterMask": "4",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xA8",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "Counter": "0,1,2,3",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "CounterMask": "1",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "CounterMask": "4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "CounterMask": "3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "CounterMask": "2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "CounterMask": "4",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "CounterMask": "3",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "CounterMask": "2",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "CounterMask": "1",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xB1",
+ "UMask": "0x10",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x0",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Counter": "0,1,2,3",
+ "EventName": "INST_RETIRED.ANY_P",
+ "Errata": "SKL091, SKL044",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC0",
+ "UMask": "0x1",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "PEBS": "2",
+ "Counter": "1",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "Errata": "SKL091, SKL044",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "1"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC0",
+ "UMask": "0x1",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "PEBS": "2",
+ "Counter": "0,2,3",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "CounterMask": "10",
+ "Errata": "SKL091, SKL044",
+ "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,2,3"
+ },
+ {
+ "EventCode": "0xC1",
+ "UMask": "0x3f",
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "Counter": "0,1,2,3",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "CounterMask": "10",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "Invert": "1",
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "CounterMask": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC2",
+ "UMask": "0x2",
+ "BriefDescription": "Retirement slots used.",
+ "Counter": "0,1,2,3",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "Counts the retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "UMask": "0x1",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "CounterMask": "1",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC3",
+ "UMask": "0x4",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "Counter": "0,1,2,3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x0",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "Errata": "SKL091",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x1",
+ "BriefDescription": "Conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "Errata": "SKL091",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x2",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "Errata": "SKL091",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x4",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "Errata": "SKL091",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x8",
+ "BriefDescription": "Return instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "Errata": "SKL091",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x10",
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "Errata": "SKL091",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x20",
+ "BriefDescription": "Taken branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "Errata": "SKL091",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC4",
+ "UMask": "0x40",
+ "BriefDescription": "Far branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "Errata": "SKL091",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x0",
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x1",
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x2",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x4",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "PEBS": "2",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3"
+ },
+ {
+ "EventCode": "0xC5",
+ "UMask": "0x20",
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "PEBS": "1",
+ "Counter": "0,1,2,3",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "SampleAfterValue": "400009",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCC",
+ "UMask": "0x20",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xCC",
+ "UMask": "0x40",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "Counter": "0,1,2,3",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xE6",
+ "UMask": "0x1",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "Counter": "0,1,2,3",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
new file mode 100644
index 000000000000..22df833fe032
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -0,0 +1,117 @@
+[
+ {
+ "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "LLC_MISSES.MEM_READ",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x4",
+ "EventName": "LLC_MISSES.MEM_WRITE",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xC",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Memory controller clock ticks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode+C37",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
+ "MetricName": "power_channel_ppd %",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
+ "MetricName": "power_self_refresh %",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charges due to page misses",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for reads",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for writes",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_BANDWIDTH.READ",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_BANDWIDTH.WRITE",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
+ "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
+ "Counter": "0,1,2,3",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_READ_LATENCY",
+ "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
+ "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "PerPkg": "1",
+ "ScaleUnit": "6000000000ns",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
new file mode 100644
index 000000000000..cab355872dff
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
@@ -0,0 +1,255 @@
+[
+ {
+ "BriefDescription": "Uncore cache clock ticks",
+ "Counter": "0,1,2,3",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_READ",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_WRITE",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x03",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from local home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from remote home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x02",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x0C",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from local home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from remote home agent",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "Counter": "0,1,2,3",
+ "EventCode": "0x2",
+ "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "PerPkg": "1",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0x0F",
+ "Unit": "UPI LL"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x01",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "Counter": "0,1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x04",
+ "Unit": "IIO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
new file mode 100644
index 000000000000..579733168e23
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
@@ -0,0 +1,285 @@
+[
+ {
+ "EventCode": "0x08",
+ "UMask": "0x1",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x2",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x4",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x8",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0xe",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x10",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x08",
+ "UMask": "0x20",
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x1",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x2",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x4",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x8",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0xe",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x10",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "CounterMask": "1",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x49",
+ "UMask": "0x20",
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x4F",
+ "UMask": "0x10",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "Counter": "0,1,2,3",
+ "EventName": "EPT.WALK_PENDING",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
+ "SampleAfterValue": "2000003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x1",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x2",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x4",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x8",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0xe",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x10",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x10",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "MSRValue": "0x00",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "CounterMask": "1",
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0x85",
+ "UMask": "0x20",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xAE",
+ "UMask": "0x1",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "Counter": "0,1,2,3",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x1",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ },
+ {
+ "EventCode": "0xBD",
+ "UMask": "0x20",
+ "BriefDescription": "STLB flush attempts",
+ "Counter": "0,1,2,3",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "CounterHTOff": "0,1,2,3,4,5,6,7"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index f723e8f7bb09..ee22e4a5e30d 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -31,7 +31,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -42,7 +42,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 8a597e45ed84..34a519d9bfa0 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -778,7 +778,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index 88ba5994b994..e434ec723001 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -121,7 +121,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -187,7 +187,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -253,7 +253,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -319,7 +319,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -385,7 +385,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -451,7 +451,7 @@
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -539,7 +539,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -605,7 +605,7 @@
"EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -682,7 +682,7 @@
"EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -748,7 +748,7 @@
"EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -869,7 +869,7 @@
"EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -935,7 +935,7 @@
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -1067,7 +1067,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -1133,7 +1133,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
@@ -1199,7 +1199,7 @@
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0. ",
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0. ",
"Offcore": "1"
},
{
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 7e3cce3bcf3b..e05c2c8458fc 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -31,4 +31,5 @@ GenuineIntel-6-2A,v15,sandybridge,core
GenuineIntel-6-2C,v2,westmereep-dp,core
GenuineIntel-6-25,v2,westmereep-sp,core
GenuineIntel-6-2F,v2,westmereex,core
-GenuineIntel-6-55,v1,skylakex,core
+GenuineIntel-6-55-[01234],v1,skylakex,core
+GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index bef73c499f83..16b04a20bc12 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -31,7 +31,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-split load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x41",
@@ -42,7 +42,7 @@
},
{
"PEBS": "1",
- "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "PublicDescription": "This event counts line-split store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"EventCode": "0xD0",
"Counter": "0,1,2,3",
"UMask": "0x42",
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index 8a597e45ed84..34a519d9bfa0 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -778,7 +778,7 @@
"CounterHTOff": "0,1,2,3,4,5,6,7"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
"EventCode": "0x03",
"Counter": "0,1,2,3",
"UMask": "0x2",
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -73,7 +73,7 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index 36c903faed0b..71e9737f4614 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -73,7 +73,7 @@
},
{
"BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS_PS + MEM_LOAD_RETIRED.FB_HIT_PS )",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
"MetricGroup": "Memory_Bound;Memory_Lat",
"MetricName": "Load_Miss_Real_Latency"
},
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
index de6e70e552e2..adb42c72f5c8 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
@@ -428,7 +428,7 @@
"EventCode": "0x5C",
"EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
"PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This reponse will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
"UMask": "0x10",
"Unit": "CHA"
},
@@ -967,7 +967,7 @@
"EventCode": "0x57",
"EventName": "UNC_M2M_PREFCAM_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) recieves a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) receives a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
"Unit": "M2M"
},
{
@@ -1041,7 +1041,7 @@
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x1",
"Unit": "UPI LL"
},
@@ -1051,17 +1051,17 @@
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x2",
"Unit": "UPI LL"
},
{
- "BriefDescription": "FLITs received which bypassed the Slot0 Recieve Buffer",
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
"Counter": "0,1,2,3",
"EventCode": "0x31",
"EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
"PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) whcih bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
"UMask": "0x4",
"Unit": "UPI LL"
},
diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
index 05dfe11c2f9e..d8426547219b 100644
--- a/tools/perf/tests/attr.c
+++ b/tools/perf/tests/attr.c
@@ -182,7 +182,7 @@ int test__attr(struct test *test __maybe_unused, int subtest __maybe_unused)
char path_perf[PATH_MAX];
char path_dir[PATH_MAX];
- /* First try developement tree tests. */
+ /* First try development tree tests. */
if (!lstat("./tests", &st))
return run_dir("./tests", "./perf");
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index ff9b60b99f52..44090a9a19f3 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -116,7 +116,7 @@ class Event(dict):
if not self.has_key(t) or not other.has_key(t):
continue
if not data_equal(self[t], other[t]):
- log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+ log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record
index 37940665f736..efd0157b9d22 100644
--- a/tools/perf/tests/attr/base-record
+++ b/tools/perf/tests/attr/base-record
@@ -9,7 +9,7 @@ size=112
config=0
sample_period=*
sample_type=263
-read_format=0
+read_format=0|4
disabled=1
inherit=1
pinned=0
diff --git a/tools/perf/tests/bp_signal.c b/tools/perf/tests/bp_signal.c
index a467615c5a0e..910e25e64188 100644
--- a/tools/perf/tests/bp_signal.c
+++ b/tools/perf/tests/bp_signal.c
@@ -291,12 +291,20 @@ int test__bp_signal(struct test *test __maybe_unused, int subtest __maybe_unused
bool test__bp_signal_is_supported(void)
{
-/*
- * The powerpc so far does not have support to even create
- * instruction breakpoint using the perf event interface.
- * Once it's there we can release this.
- */
-#if defined(__powerpc__) || defined(__s390x__)
+ /*
+ * PowerPC and S390 do not support creation of instruction
+ * breakpoints using the perf_event interface.
+ *
+ * ARM requires explicit rounding down of the instruction
+ * pointer in Thumb mode, and then requires the single-step
+ * to be handled explicitly in the overflow handler to avoid
+ * stepping into the SIGIO handler and getting stuck on the
+ * breakpointed instruction.
+ *
+ * Just disable the test for these architectures until these
+ * issues are resolved.
+ */
+#if defined(__powerpc__) || defined(__s390x__) || defined(__arm__)
return false;
#else
return true;
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 12c09e0ece71..9852b5d624a5 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -424,6 +424,9 @@ static const char *shell_test__description(char *description, size_t size,
if (!fp)
return NULL;
+ /* Skip shebang */
+ while (fgetc(fp) != '\n');
+
description = fgets(description, size, fp);
fclose(fp);
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 6b049f3f5cf4..dbf2c69944d2 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -599,7 +599,7 @@ static int do_test_code_reading(bool try_kcore)
}
ret = perf_event__synthesize_thread_map(NULL, threads,
- perf_event__process, machine, false, 500);
+ perf_event__process, machine, false);
if (ret < 0) {
pr_debug("perf_event__synthesize_thread_map failed\n");
goto out_err;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index 2f008067d989..7c8d2e422401 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -34,7 +34,7 @@ static int init_live_machine(struct machine *machine)
pid_t pid = getpid();
return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
- mmap_handler, machine, true, 500);
+ mmap_handler, machine, true);
}
/*
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index b1af2499a3c9..5ede9b561d32 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -132,7 +132,7 @@ static int synth_all(struct machine *machine)
{
return perf_event__synthesize_threads(NULL,
perf_event__process,
- machine, 0, 500, 1);
+ machine, 0, 1);
}
static int synth_process(struct machine *machine)
@@ -144,7 +144,7 @@ static int synth_process(struct machine *machine)
err = perf_event__synthesize_thread_map(NULL, map,
perf_event__process,
- machine, 0, 500);
+ machine, 0);
thread_map__put(map);
return err;
diff --git a/tools/perf/tests/perf-record.c b/tools/perf/tests/perf-record.c
index 34394cc05077..07f6bd8ed719 100644
--- a/tools/perf/tests/perf-record.c
+++ b/tools/perf/tests/perf-record.c
@@ -58,6 +58,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
char *bname, *mmap_filename;
u64 prev_time = 0;
bool found_cmd_mmap = false,
+ found_coreutils_mmap = false,
found_libc_mmap = false,
found_vdso_mmap = false,
found_ld_mmap = false;
@@ -254,6 +255,8 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
if (bname != NULL) {
if (!found_cmd_mmap)
found_cmd_mmap = !strcmp(bname + 1, cmd);
+ if (!found_coreutils_mmap)
+ found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
if (!found_libc_mmap)
found_libc_mmap = !strncmp(bname + 1, "libc", 4);
if (!found_ld_mmap)
@@ -292,7 +295,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
}
found_exit:
- if (nr_events[PERF_RECORD_COMM] > 1) {
+ if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
++errs;
}
@@ -302,7 +305,7 @@ found_exit:
++errs;
}
- if (!found_cmd_mmap) {
+ if (!found_cmd_mmap && !found_coreutils_mmap) {
pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
++errs;
}
diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh
index 9b7635184dc2..46e076e3c537 100755
--- a/tools/perf/tests/shell/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/probe_vfs_getname.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# Add vfs_getname probe to get syscall args filenames
#
# Arnaldo Carvalho de Melo <acme@kernel.org>, 2017
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index cab7b0aea6ea..61c9f8fc6fa1 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# probe libc's inet_pton & backtrace it with ping
# Installs a probe on libc's inet_pton function, that will use uprobes,
diff --git a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
index ba29535b8580..9b073e7fa88c 100755
--- a/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/record+script_probe_vfs_getname.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# Use vfs_getname probe to get syscall args filenames
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 4ce276efe6b4..50109f27ca07 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -1,3 +1,4 @@
+#!/bin/sh
# Check open filename arg using perf trace + vfs_getname
# Uses the 'perf test shell' library to add probe:vfs_getname to the system
diff --git a/tools/perf/trace/beauty/Build b/tools/perf/trace/beauty/Build
index 304313073242..637365099b7d 100644
--- a/tools/perf/trace/beauty/Build
+++ b/tools/perf/trace/beauty/Build
@@ -7,7 +7,9 @@ endif
libperf-y += kcmp.o
libperf-y += mount_flags.o
libperf-y += pkey_alloc.o
+libperf-y += arch_prctl.o
libperf-y += prctl.o
+libperf-y += renameat.o
libperf-y += sockaddr.o
libperf-y += socket.o
libperf-y += statx.o
diff --git a/tools/perf/trace/beauty/arch_prctl.c b/tools/perf/trace/beauty/arch_prctl.c
new file mode 100644
index 000000000000..fe022ca67e60
--- /dev/null
+++ b/tools/perf/trace/beauty/arch_prctl.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * trace/beauty/arch_prctl.c
+ *
+ * Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+#include <linux/kernel.h>
+
+#include "trace/beauty/generated/x86_arch_prctl_code_array.c"
+
+static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_1, "ARCH_", x86_arch_prctl_codes_1_offset);
+static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_2, "ARCH_", x86_arch_prctl_codes_2_offset);
+
+static struct strarray *x86_arch_prctl_codes[] = {
+ &strarray__x86_arch_prctl_codes_1,
+ &strarray__x86_arch_prctl_codes_2,
+};
+
+static DEFINE_STRARRAYS(x86_arch_prctl_codes);
+
+static size_t x86_arch_prctl__scnprintf_code(int option, char *bf, size_t size, bool show_prefix)
+{
+ return strarrays__scnprintf(&strarrays__x86_arch_prctl_codes, bf, size, "%#x", show_prefix, option);
+}
+
+size_t syscall_arg__scnprintf_x86_arch_prctl_code(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long code = arg->val;
+
+ return x86_arch_prctl__scnprintf_code(code, bf, size, arg->show_string_prefix);
+}
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 039c29039b2c..83c5b202e00e 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -9,26 +9,41 @@
struct strarray {
int offset;
int nr_entries;
+ const char *prefix;
const char **entries;
};
-#define DEFINE_STRARRAY(array) struct strarray strarray__##array = { \
+#define DEFINE_STRARRAY(array, _prefix) struct strarray strarray__##array = { \
.nr_entries = ARRAY_SIZE(array), \
.entries = array, \
+ .prefix = _prefix, \
}
-#define DEFINE_STRARRAY_OFFSET(array, off) struct strarray strarray__##array = { \
+#define DEFINE_STRARRAY_OFFSET(array, _prefix, off) struct strarray strarray__##array = { \
.offset = off, \
.nr_entries = ARRAY_SIZE(array), \
.entries = array, \
+ .prefix = _prefix, \
}
-size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, int val);
-size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, unsigned long flags);
+size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, bool show_prefix, int val);
+size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, bool show_prefix, unsigned long flags);
struct trace;
struct thread;
+struct strarrays {
+ int nr_entries;
+ struct strarray **entries;
+};
+
+#define DEFINE_STRARRAYS(array) struct strarrays strarrays__##array = { \
+ .nr_entries = ARRAY_SIZE(array), \
+ .entries = array, \
+}
+
+size_t strarrays__scnprintf(struct strarrays *sas, char *bf, size_t size, const char *intfmt, bool show_prefix, int val);
+
size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size);
extern struct strarray strarray__socket_families;
@@ -66,6 +81,7 @@ struct augmented_arg {
* @parm: private area, may be an strarray, for instance
* @idx: syscall arg idx (is this the first?)
* @mask: a syscall arg may mask another arg, see syscall_arg__scnprintf_futex_op
+ * @show_string_prefix: When there is a common prefix in a string table, show it or not
*/
struct syscall_arg {
@@ -80,6 +96,7 @@ struct syscall_arg {
void *parm;
u8 idx;
u8 mask;
+ bool show_string_prefix;
};
unsigned long syscall_arg__val(struct syscall_arg *arg, u8 idx);
@@ -93,6 +110,9 @@ size_t syscall_arg__scnprintf_fd(char *bf, size_t size, struct syscall_arg *arg)
size_t syscall_arg__scnprintf_hex(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_HEX syscall_arg__scnprintf_hex
+size_t syscall_arg__scnprintf_ptr(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_PTR syscall_arg__scnprintf_ptr
+
size_t syscall_arg__scnprintf_int(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_INT syscall_arg__scnprintf_int
@@ -135,6 +155,9 @@ size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, st
size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
+size_t syscall_arg__scnprintf_x86_arch_prctl_code(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_X86_ARCH_PRCTL_CODE syscall_arg__scnprintf_x86_arch_prctl_code
+
size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_OPTION syscall_arg__scnprintf_prctl_option
@@ -144,6 +167,9 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a
size_t syscall_arg__scnprintf_prctl_arg3(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_ARG3 syscall_arg__scnprintf_prctl_arg3
+size_t syscall_arg__scnprintf_renameat2_flags(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_RENAMEAT2_FLAGS syscall_arg__scnprintf_renameat2_flags
+
size_t syscall_arg__scnprintf_sockaddr(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_SOCKADDR syscall_arg__scnprintf_sockaddr
@@ -156,7 +182,7 @@ size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_
size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_STATX_MASK syscall_arg__scnprintf_statx_mask
-size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size);
+size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix);
void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg,
size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg));
diff --git a/tools/perf/trace/beauty/clone.c b/tools/perf/trace/beauty/clone.c
index 010406500c30..6eb9a6636171 100644
--- a/tools/perf/trace/beauty/clone.c
+++ b/tools/perf/trace/beauty/clone.c
@@ -10,13 +10,14 @@
#include <sys/types.h>
#include <uapi/linux/sched.h>
-static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size)
+static size_t clone__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
+ const char *prefix = "CLONE_";
int printed = 0;
#define P_FLAG(n) \
if (flags & CLONE_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~CLONE_##n; \
}
@@ -70,5 +71,5 @@ size_t syscall_arg__scnprintf_clone_flags(char *bf, size_t size, struct syscall_
if (!(flags & CLONE_SETTLS))
arg->mask |= SCC_TLS;
- return clone__scnprintf_flags(flags, bf, size);
+ return clone__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/eventfd.c b/tools/perf/trace/beauty/eventfd.c
index db5b9b492113..4bab106213c6 100644
--- a/tools/perf/trace/beauty/eventfd.c
+++ b/tools/perf/trace/beauty/eventfd.c
@@ -13,13 +13,15 @@
static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "EFD_";
int printed = 0, flags = arg->val;
if (flags == 0)
return scnprintf(bf, size, "NONE");
#define P_FLAG(n) \
if (flags & EFD_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~EFD_##n; \
}
diff --git a/tools/perf/trace/beauty/fadvise.sh b/tools/perf/trace/beauty/fadvise.sh
new file mode 100755
index 000000000000..b15ae3875167
--- /dev/null
+++ b/tools/perf/trace/beauty/fadvise.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+printf "static const char *fadvise_advices[] = {\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+POSIX_FADV_(\w+)[[:space:]]+([[:digit:]]+)[[:space:]]+.*'
+
+egrep $regex ${header_dir}/fadvise.h | \
+ sed -r "s/$regex/\2 \1/g" | \
+ sort | xargs printf "\t[%s] = \"%s\",\n" | \
+ grep -v "[6].*DONTNEED" | grep -v "[7].*NOREUSE"
+printf "};\n"
+
+# XXX Fix this properly:
+
+# The grep 6/7 DONTNEED/NOREUSE are a hack to filter out the s/390 oddity See
+# tools/include/uapi/linux/fadvise.h for details.
+
+# Probably fix this when generating the string tables per arch so that We can
+# reliably process on arch FOO a perf.data file collected by 'perf trace
+# record' on arch BAR, e.g. collect on s/390 and process on x86.
diff --git a/tools/perf/trace/beauty/fcntl.c b/tools/perf/trace/beauty/fcntl.c
index e6de31674e24..56ef83b3d130 100644
--- a/tools/perf/trace/beauty/fcntl.c
+++ b/tools/perf/trace/beauty/fcntl.c
@@ -9,27 +9,28 @@
#include <linux/kernel.h>
#include <uapi/linux/fcntl.h>
-static size_t fcntl__scnprintf_getfd(unsigned long val, char *bf, size_t size)
+static size_t fcntl__scnprintf_getfd(unsigned long val, char *bf, size_t size, bool show_prefix)
{
- return scnprintf(bf, size, "%s", val ? "CLOEXEC" : "0");
+ return val ? scnprintf(bf, size, "%s", "0") :
+ scnprintf(bf, size, "%s%s", show_prefix ? "FD_" : "", "CLOEXEC");
}
static size_t syscall_arg__scnprintf_fcntl_getfd(char *bf, size_t size, struct syscall_arg *arg)
{
- return fcntl__scnprintf_getfd(arg->val, bf, size);
+ return fcntl__scnprintf_getfd(arg->val, bf, size, arg->show_string_prefix);
}
-static size_t fcntl__scnprintf_getlease(unsigned long val, char *bf, size_t size)
+static size_t fcntl__scnprintf_getlease(unsigned long val, char *bf, size_t size, bool show_prefix)
{
static const char *fcntl_setlease[] = { "RDLCK", "WRLCK", "UNLCK", };
- static DEFINE_STRARRAY(fcntl_setlease);
+ static DEFINE_STRARRAY(fcntl_setlease, "F_");
- return strarray__scnprintf(&strarray__fcntl_setlease, bf, size, "%x", val);
+ return strarray__scnprintf(&strarray__fcntl_setlease, bf, size, "%x", show_prefix, val);
}
static size_t syscall_arg__scnprintf_fcntl_getlease(char *bf, size_t size, struct syscall_arg *arg)
{
- return fcntl__scnprintf_getlease(arg->val, bf, size);
+ return fcntl__scnprintf_getlease(arg->val, bf, size, arg->show_string_prefix);
}
size_t syscall_arg__scnprintf_fcntl_cmd(char *bf, size_t size, struct syscall_arg *arg)
@@ -68,22 +69,23 @@ out:
size_t syscall_arg__scnprintf_fcntl_arg(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
int cmd = syscall_arg__val(arg, 1);
if (cmd == F_DUPFD)
return syscall_arg__scnprintf_fd(bf, size, arg);
if (cmd == F_SETFD)
- return fcntl__scnprintf_getfd(arg->val, bf, size);
+ return fcntl__scnprintf_getfd(arg->val, bf, size, show_prefix);
if (cmd == F_SETFL)
- return open__scnprintf_flags(arg->val, bf, size);
+ return open__scnprintf_flags(arg->val, bf, size, show_prefix);
if (cmd == F_SETOWN)
return syscall_arg__scnprintf_pid(bf, size, arg);
if (cmd == F_SETLEASE)
- return fcntl__scnprintf_getlease(arg->val, bf, size);
+ return fcntl__scnprintf_getlease(arg->val, bf, size, show_prefix);
/*
* We still don't grab the contents of pointers on entry or exit,
* so just print them as hex numbers
diff --git a/tools/perf/trace/beauty/flock.c b/tools/perf/trace/beauty/flock.c
index cf02ae5f0ba6..c14274edd6d9 100644
--- a/tools/perf/trace/beauty/flock.c
+++ b/tools/perf/trace/beauty/flock.c
@@ -22,13 +22,15 @@
size_t syscall_arg__scnprintf_flock(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "LOCK_";
int printed = 0, op = arg->val;
if (op == 0)
return scnprintf(bf, size, "NONE");
#define P_CMD(cmd) \
if ((op & LOCK_##cmd) == LOCK_##cmd) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #cmd); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #cmd); \
op &= ~LOCK_##cmd; \
}
diff --git a/tools/perf/trace/beauty/futex_op.c b/tools/perf/trace/beauty/futex_op.c
index 1136bde56406..00365156782b 100644
--- a/tools/perf/trace/beauty/futex_op.c
+++ b/tools/perf/trace/beauty/futex_op.c
@@ -19,6 +19,8 @@
static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "FUTEX_";
enum syscall_futex_args {
SCF_UADDR = (1 << 0),
SCF_OP = (1 << 1),
@@ -32,7 +34,7 @@ static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct sysc
size_t printed = 0;
switch (cmd) {
-#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
+#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n);
P_FUTEX_OP(WAIT); arg->mask |= SCF_VAL3|SCF_UADDR2; break;
P_FUTEX_OP(WAKE); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
P_FUTEX_OP(FD); arg->mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
@@ -50,10 +52,10 @@ static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct sysc
}
if (op & FUTEX_PRIVATE_FLAG)
- printed += scnprintf(bf + printed, size - printed, "|PRIV");
+ printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", "PRIVATE_FLAG");
if (op & FUTEX_CLOCK_REALTIME)
- printed += scnprintf(bf + printed, size - printed, "|CLKRT");
+ printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", "CLOCK_REALTIME");
return printed;
}
diff --git a/tools/perf/trace/beauty/futex_val3.c b/tools/perf/trace/beauty/futex_val3.c
index 138b7d588a70..9114f7620571 100644
--- a/tools/perf/trace/beauty/futex_val3.c
+++ b/tools/perf/trace/beauty/futex_val3.c
@@ -7,10 +7,11 @@
static size_t syscall_arg__scnprintf_futex_val3(char *bf, size_t size, struct syscall_arg *arg)
{
+ const char *prefix = "FUTEX_BITSET_";
unsigned int bitset = arg->val;
if (bitset == FUTEX_BITSET_MATCH_ANY)
- return scnprintf(bf, size, "MATCH_ANY");
+ return scnprintf(bf, size, "%s%s", arg->show_string_prefix ? prefix : "", "MATCH_ANY");
return scnprintf(bf, size, "%#xd", bitset);
}
diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
index 5d2a7fd8d407..9efeb6a936c2 100644
--- a/tools/perf/trace/beauty/ioctl.c
+++ b/tools/perf/trace/beauty/ioctl.c
@@ -31,11 +31,12 @@ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
"TCSETSW2", "TCSETSF2", "TIOCGRS48", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
"TIOCGDEV", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG", "TIOCVHANGUP", "TIOCGPKT",
"TIOCGPTLCK", [_IOC_NR(TIOCGEXCL)] = "TIOCGEXCL", "TIOCGPTPEER",
+ "TIOCGISO7816", "TIOCSISO7816",
[_IOC_NR(FIONCLEX)] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
"TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
"TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
"TIOCMIWAIT", "TIOCGICOUNT", };
- static DEFINE_STRARRAY(ioctl_tty_cmd);
+ static DEFINE_STRARRAY(ioctl_tty_cmd, "");
if (nr < strarray__ioctl_tty_cmd.nr_entries && strarray__ioctl_tty_cmd.entries[nr] != NULL)
return scnprintf(bf, size, "%s", strarray__ioctl_tty_cmd.entries[nr]);
@@ -46,7 +47,7 @@ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
static size_t ioctl__scnprintf_drm_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/drm_ioctl_array.c"
- static DEFINE_STRARRAY(drm_ioctl_cmds);
+ static DEFINE_STRARRAY(drm_ioctl_cmds, "");
if (nr < strarray__drm_ioctl_cmds.nr_entries && strarray__drm_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "DRM_%s", strarray__drm_ioctl_cmds.entries[nr]);
@@ -57,7 +58,7 @@ static size_t ioctl__scnprintf_drm_cmd(int nr, int dir, char *bf, size_t size)
static size_t ioctl__scnprintf_sndrv_pcm_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/sndrv_pcm_ioctl_array.c"
- static DEFINE_STRARRAY(sndrv_pcm_ioctl_cmds);
+ static DEFINE_STRARRAY(sndrv_pcm_ioctl_cmds, "");
if (nr < strarray__sndrv_pcm_ioctl_cmds.nr_entries && strarray__sndrv_pcm_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "SNDRV_PCM_%s", strarray__sndrv_pcm_ioctl_cmds.entries[nr]);
@@ -68,7 +69,7 @@ static size_t ioctl__scnprintf_sndrv_pcm_cmd(int nr, int dir, char *bf, size_t s
static size_t ioctl__scnprintf_sndrv_ctl_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/sndrv_ctl_ioctl_array.c"
- static DEFINE_STRARRAY(sndrv_ctl_ioctl_cmds);
+ static DEFINE_STRARRAY(sndrv_ctl_ioctl_cmds, "");
if (nr < strarray__sndrv_ctl_ioctl_cmds.nr_entries && strarray__sndrv_ctl_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "SNDRV_CTL_%s", strarray__sndrv_ctl_ioctl_cmds.entries[nr]);
@@ -79,7 +80,7 @@ static size_t ioctl__scnprintf_sndrv_ctl_cmd(int nr, int dir, char *bf, size_t s
static size_t ioctl__scnprintf_kvm_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/kvm_ioctl_array.c"
- static DEFINE_STRARRAY(kvm_ioctl_cmds);
+ static DEFINE_STRARRAY(kvm_ioctl_cmds, "");
if (nr < strarray__kvm_ioctl_cmds.nr_entries && strarray__kvm_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "KVM_%s", strarray__kvm_ioctl_cmds.entries[nr]);
@@ -90,8 +91,8 @@ static size_t ioctl__scnprintf_kvm_cmd(int nr, int dir, char *bf, size_t size)
static size_t ioctl__scnprintf_vhost_virtio_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/vhost_virtio_ioctl_array.c"
- static DEFINE_STRARRAY(vhost_virtio_ioctl_cmds);
- static DEFINE_STRARRAY(vhost_virtio_ioctl_read_cmds);
+ static DEFINE_STRARRAY(vhost_virtio_ioctl_cmds, "");
+ static DEFINE_STRARRAY(vhost_virtio_ioctl_read_cmds, "");
struct strarray *s = (dir & _IOC_READ) ? &strarray__vhost_virtio_ioctl_read_cmds : &strarray__vhost_virtio_ioctl_cmds;
if (nr < s->nr_entries && s->entries[nr] != NULL)
@@ -103,7 +104,7 @@ static size_t ioctl__scnprintf_vhost_virtio_cmd(int nr, int dir, char *bf, size_
static size_t ioctl__scnprintf_perf_cmd(int nr, int dir, char *bf, size_t size)
{
#include "trace/beauty/generated/ioctl/perf_ioctl_array.c"
- static DEFINE_STRARRAY(perf_ioctl_cmds);
+ static DEFINE_STRARRAY(perf_ioctl_cmds, "");
if (nr < strarray__perf_ioctl_cmds.nr_entries && strarray__perf_ioctl_cmds.entries[nr] != NULL)
return scnprintf(bf, size, "PERF_%s", strarray__perf_ioctl_cmds.entries[nr]);
@@ -111,8 +112,9 @@ static size_t ioctl__scnprintf_perf_cmd(int nr, int dir, char *bf, size_t size)
return scnprintf(bf, size, "(%#x, %#x, %#x)", 0xAE, nr, dir);
}
-static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size)
+static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size, bool show_prefix)
{
+ const char *prefix = "_IOC_";
int dir = _IOC_DIR(cmd),
type = _IOC_TYPE(cmd),
nr = _IOC_NR(cmd),
@@ -142,12 +144,14 @@ static size_t ioctl__scnprintf_cmd(unsigned long cmd, char *bf, size_t size)
printed += scnprintf(bf + printed, size - printed, "%c", '(');
if (dir == _IOC_NONE) {
- printed += scnprintf(bf + printed, size - printed, "%s", "NONE");
+ printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? prefix : "", "NONE");
} else {
if (dir & _IOC_READ)
- printed += scnprintf(bf + printed, size - printed, "%s", "READ");
- if (dir & _IOC_WRITE)
- printed += scnprintf(bf + printed, size - printed, "%s%s", dir & _IOC_READ ? "|" : "", "WRITE");
+ printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? prefix : "", "READ");
+ if (dir & _IOC_WRITE) {
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", dir & _IOC_READ ? "|" : "",
+ show_prefix ? prefix : "", "WRITE");
+ }
}
return printed + scnprintf(bf + printed, size - printed, ", %#x, %#x, %#x)", type, nr, sz);
@@ -157,5 +161,5 @@ size_t syscall_arg__scnprintf_ioctl_cmd(char *bf, size_t size, struct syscall_ar
{
unsigned long cmd = arg->val;
- return ioctl__scnprintf_cmd(cmd, bf, size);
+ return ioctl__scnprintf_cmd(cmd, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/kcmp.c b/tools/perf/trace/beauty/kcmp.c
index b276a274f203..9351f84390a1 100644
--- a/tools/perf/trace/beauty/kcmp.c
+++ b/tools/perf/trace/beauty/kcmp.c
@@ -26,10 +26,10 @@ size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg
return pid__scnprintf_fd(arg->trace, pid, fd, bf, size);
}
-static size_t kcmp__scnprintf_type(int type, char *bf, size_t size)
+static size_t kcmp__scnprintf_type(int type, char *bf, size_t size, bool show_prefix)
{
- static DEFINE_STRARRAY(kcmp_types);
- return strarray__scnprintf(&strarray__kcmp_types, bf, size, "%d", type);
+ static DEFINE_STRARRAY(kcmp_types, "KCMP_");
+ return strarray__scnprintf(&strarray__kcmp_types, bf, size, "%d", show_prefix, type);
}
size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_arg *arg)
@@ -39,5 +39,5 @@ size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_ar
if (type != KCMP_FILE)
arg->mask |= (1 << 3) | (1 << 4); /* Ignore idx1 and idx2 */
- return kcmp__scnprintf_type(type, bf, size);
+ return kcmp__scnprintf_type(type, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c
index c534bd96ef5c..eb31089790e3 100644
--- a/tools/perf/trace/beauty/mmap.c
+++ b/tools/perf/trace/beauty/mmap.c
@@ -5,18 +5,20 @@
static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
struct syscall_arg *arg)
{
+ const char *prot_prefix = "PROT_";
int printed = 0, prot = arg->val;
+ bool show_prefix = arg->show_string_prefix;
if (prot == PROT_NONE)
- return scnprintf(bf, size, "NONE");
+ return scnprintf(bf, size, "%sNONE", show_prefix ? prot_prefix : "");
#define P_MMAP_PROT(n) \
if (prot & PROT_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prot_prefix :"", #n); \
prot &= ~PROT_##n; \
}
- P_MMAP_PROT(EXEC);
P_MMAP_PROT(READ);
+ P_MMAP_PROT(EXEC);
P_MMAP_PROT(WRITE);
P_MMAP_PROT(SEM);
P_MMAP_PROT(GROWSDOWN);
@@ -31,12 +33,12 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
-static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size)
+static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/mmap_flags_array.c"
- static DEFINE_STRARRAY(mmap_flags);
+ static DEFINE_STRARRAY(mmap_flags, "MAP_");
- return strarray__scnprintf_flags(&strarray__mmap_flags, bf, size, flags);
+ return strarray__scnprintf_flags(&strarray__mmap_flags, bf, size, show_prefix, flags);
}
static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
@@ -47,7 +49,7 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
if (flags & MAP_ANONYMOUS)
arg->mask |= (1 << 4) | (1 << 5); /* Mask 4th ('fd') and 5th ('offset') args, ignored */
- return mmap__scnprintf_flags(flags, bf, size);
+ return mmap__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
@@ -55,11 +57,13 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
+ const char *flags_prefix = "MREMAP_";
+ bool show_prefix = arg->show_string_prefix;
int printed = 0, flags = arg->val;
#define P_MREMAP_FLAG(n) \
if (flags & MREMAP_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? flags_prefix : "", #n); \
flags &= ~MREMAP_##n; \
}
@@ -78,7 +82,7 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
static size_t madvise__scnprintf_behavior(int behavior, char *bf, size_t size)
{
#include "trace/beauty/generated/madvise_behavior_array.c"
- static DEFINE_STRARRAY(madvise_advices);
+ static DEFINE_STRARRAY(madvise_advices, "MADV_");
if (behavior < strarray__madvise_advices.nr_entries && strarray__madvise_advices.entries[behavior] != NULL)
return scnprintf(bf, size, "MADV_%s", strarray__madvise_advices.entries[behavior]);
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
index 22c3fdca8975..32bac9c0d694 100755
--- a/tools/perf/trace/beauty/mmap_flags.sh
+++ b/tools/perf/trace/beauty/mmap_flags.sh
@@ -20,12 +20,12 @@ egrep -q $regex ${arch_mman} && \
(egrep $regex ${arch_mman} | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
-egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman} &&
+([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman}) &&
(egrep $regex ${header_dir}/mman-common.h | \
egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
-egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.h>.*' ${arch_mman} &&
+([ ! -f ${arch_mman} ] || egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.h>.*' ${arch_mman}) &&
(egrep $regex ${header_dir}/mman.h | \
sed -r "s/$regex/\2 \1/g" | \
xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
diff --git a/tools/perf/trace/beauty/mode_t.c b/tools/perf/trace/beauty/mode_t.c
index 6879d36d3004..29a8fadfb7f9 100644
--- a/tools/perf/trace/beauty/mode_t.c
+++ b/tools/perf/trace/beauty/mode_t.c
@@ -22,11 +22,13 @@
static size_t syscall_arg__scnprintf_mode_t(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "S_";
int printed = 0, mode = arg->val;
#define P_MODE(n) \
if ((mode & S_##n) == S_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
mode &= ~S_##n; \
}
diff --git a/tools/perf/trace/beauty/mount_flags.c b/tools/perf/trace/beauty/mount_flags.c
index 712935c6620a..10fb14cfaf8f 100644
--- a/tools/perf/trace/beauty/mount_flags.c
+++ b/tools/perf/trace/beauty/mount_flags.c
@@ -11,12 +11,12 @@
#include <linux/log2.h>
#include <sys/mount.h>
-static size_t mount__scnprintf_flags(unsigned long flags, char *bf, size_t size)
+static size_t mount__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/mount_flags_array.c"
- static DEFINE_STRARRAY(mount_flags);
+ static DEFINE_STRARRAY(mount_flags, "MS_");
- return strarray__scnprintf_flags(&strarray__mount_flags, bf, size, flags);
+ return strarray__scnprintf_flags(&strarray__mount_flags, bf, size, show_prefix, flags);
}
unsigned long syscall_arg__mask_val_mount_flags(struct syscall_arg *arg __maybe_unused, unsigned long flags)
@@ -39,5 +39,5 @@ size_t syscall_arg__scnprintf_mount_flags(char *bf, size_t size, struct syscall_
{
unsigned long flags = arg->val;
- return mount__scnprintf_flags(flags, bf, size);
+ return mount__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
index 1b9d6306d274..d66c66315987 100644
--- a/tools/perf/trace/beauty/msg_flags.c
+++ b/tools/perf/trace/beauty/msg_flags.c
@@ -21,13 +21,15 @@
static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "MSG_";
int printed = 0, flags = arg->val;
if (flags == 0)
return scnprintf(bf, size, "NONE");
#define P_MSG_FLAG(n) \
if (flags & MSG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~MSG_##n; \
}
diff --git a/tools/perf/trace/beauty/open_flags.c b/tools/perf/trace/beauty/open_flags.c
index cc673fec9184..78f6566ef110 100644
--- a/tools/perf/trace/beauty/open_flags.c
+++ b/tools/perf/trace/beauty/open_flags.c
@@ -22,15 +22,18 @@
#undef O_LARGEFILE
#define O_LARGEFILE 00100000
-size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size)
+size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
{
+ const char *prefix = "O_";
int printed = 0;
+ if ((flags & O_ACCMODE) == O_RDONLY)
+ printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", "RDONLY");
if (flags == 0)
- return scnprintf(bf, size, "RDONLY");
+ return printed;
#define P_FLAG(n) \
if (flags & O_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~O_##n; \
}
@@ -57,7 +60,7 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size)
#endif
#ifdef O_DSYNC
if ((flags & O_SYNC) == O_SYNC)
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", "SYNC");
else {
P_FLAG(DSYNC);
}
@@ -81,5 +84,5 @@ size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size, struct syscall_a
if (!(flags & O_CREAT))
arg->mask |= 1 << (arg->idx + 1); /* Mask the mode parm */
- return open__scnprintf_flags(flags, bf, size);
+ return open__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/perf_event_open.c b/tools/perf/trace/beauty/perf_event_open.c
index 981185c1974b..11d47dbe63bd 100644
--- a/tools/perf/trace/beauty/perf_event_open.c
+++ b/tools/perf/trace/beauty/perf_event_open.c
@@ -18,6 +18,8 @@
static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "PERF_";
int printed = 0, flags = arg->val;
if (flags == 0)
@@ -25,7 +27,7 @@ static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
#define P_FLAG(n) \
if (flags & PERF_FLAG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~PERF_FLAG_##n; \
}
diff --git a/tools/perf/trace/beauty/pkey_alloc.c b/tools/perf/trace/beauty/pkey_alloc.c
index 1b8ed4cac815..f9596ed02cc4 100644
--- a/tools/perf/trace/beauty/pkey_alloc.c
+++ b/tools/perf/trace/beauty/pkey_alloc.c
@@ -9,14 +9,14 @@
#include <linux/kernel.h>
#include <linux/log2.h>
-size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, unsigned long flags)
+size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, bool show_prefix, unsigned long flags)
{
int i, printed = 0;
if (flags == 0) {
const char *s = sa->entries[0];
if (s)
- return scnprintf(bf, size, "%s", s);
+ return scnprintf(bf, size, "%s%s", show_prefix ? sa->prefix : "", s);
return scnprintf(bf, size, "%d", 0);
}
@@ -30,7 +30,7 @@ size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, uns
printed += scnprintf(bf + printed, size - printed, "|");
if (sa->entries[i] != NULL)
- printed += scnprintf(bf + printed, size - printed, "%s", sa->entries[i]);
+ printed += scnprintf(bf + printed, size - printed, "%s%s", show_prefix ? sa->prefix : "", sa->entries[i]);
else
printed += scnprintf(bf + printed, size - printed, "0x%#", bit);
}
@@ -38,17 +38,17 @@ size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, uns
return printed;
}
-static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size)
+static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/pkey_alloc_access_rights_array.c"
- static DEFINE_STRARRAY(pkey_alloc_access_rights);
+ static DEFINE_STRARRAY(pkey_alloc_access_rights, "PKEY_");
- return strarray__scnprintf_flags(&strarray__pkey_alloc_access_rights, bf, size, access_rights);
+ return strarray__scnprintf_flags(&strarray__pkey_alloc_access_rights, bf, size, show_prefix, access_rights);
}
size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg)
{
unsigned long cmd = arg->val;
- return pkey_alloc__scnprintf_access_rights(cmd, bf, size);
+ return pkey_alloc__scnprintf_access_rights(cmd, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/prctl.c b/tools/perf/trace/beauty/prctl.c
index be7a5d395975..ba2179abed00 100644
--- a/tools/perf/trace/beauty/prctl.c
+++ b/tools/perf/trace/beauty/prctl.c
@@ -11,16 +11,16 @@
#include "trace/beauty/generated/prctl_option_array.c"
-static size_t prctl__scnprintf_option(int option, char *bf, size_t size)
+static size_t prctl__scnprintf_option(int option, char *bf, size_t size, bool show_prefix)
{
- static DEFINE_STRARRAY(prctl_options);
- return strarray__scnprintf(&strarray__prctl_options, bf, size, "%d", option);
+ static DEFINE_STRARRAY(prctl_options, "PR_");
+ return strarray__scnprintf(&strarray__prctl_options, bf, size, "%d", show_prefix, option);
}
-static size_t prctl__scnprintf_set_mm(int option, char *bf, size_t size)
+static size_t prctl__scnprintf_set_mm(int option, char *bf, size_t size, bool show_prefix)
{
- static DEFINE_STRARRAY(prctl_set_mm_options);
- return strarray__scnprintf(&strarray__prctl_set_mm_options, bf, size, "%d", option);
+ static DEFINE_STRARRAY(prctl_set_mm_options, "PR_SET_MM_");
+ return strarray__scnprintf(&strarray__prctl_set_mm_options, bf, size, "%d", show_prefix, option);
}
size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg)
@@ -28,7 +28,7 @@ size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_a
int option = syscall_arg__val(arg, 0);
if (option == PR_SET_MM)
- return prctl__scnprintf_set_mm(arg->val, bf, size);
+ return prctl__scnprintf_set_mm(arg->val, bf, size, arg->show_string_prefix);
/*
* We still don't grab the contents of pointers on entry or exit,
* so just print them as hex numbers
@@ -77,5 +77,5 @@ size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall
if (option < ARRAY_SIZE(masks))
arg->mask |= masks[option];
- return prctl__scnprintf_option(option, bf, size);
+ return prctl__scnprintf_option(option, bf, size, arg->show_string_prefix);
}
diff --git a/tools/perf/trace/beauty/rename_flags.sh b/tools/perf/trace/beauty/rename_flags.sh
new file mode 100755
index 000000000000..54c87c782ab2
--- /dev/null
+++ b/tools/perf/trace/beauty/rename_flags.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+# Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+# SPDX-License-Identifier: LGPL-2.1
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+fs_header=${header_dir}/fs.h
+
+printf "static const char *rename_flags[] = {\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+RENAME_([[:alnum:]_]+)[[:space:]]+\(1[[:space:]]*<<[[:space:]]*([[:xdigit:]]+)[[:space:]]*\)[[:space:]]*.*'
+egrep -q $regex ${fs_header} && \
+(egrep $regex ${fs_header} | \
+ sed -r "s/$regex/\2 \1/g" | \
+ xargs printf "\t[%d + 1] = \"%s\",\n")
+printf "};\n"
diff --git a/tools/perf/trace/beauty/renameat.c b/tools/perf/trace/beauty/renameat.c
new file mode 100644
index 000000000000..6dab340cc506
--- /dev/null
+++ b/tools/perf/trace/beauty/renameat.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: LGPL-2.1
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+
+#include "trace/beauty/beauty.h"
+#include <uapi/linux/fs.h>
+
+static size_t renameat2__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool show_prefix)
+{
+#include "trace/beauty/generated/rename_flags_array.c"
+ static DEFINE_STRARRAY(rename_flags, "RENAME_");
+
+ return strarray__scnprintf_flags(&strarray__rename_flags, bf, size, show_prefix, flags);
+}
+
+size_t syscall_arg__scnprintf_renameat2_flags(char *bf, size_t size, struct syscall_arg *arg)
+{
+ unsigned long flags = arg->val;
+ return renameat2__scnprintf_flags(flags, bf, size, arg->show_string_prefix);
+}
diff --git a/tools/perf/trace/beauty/sched_policy.c b/tools/perf/trace/beauty/sched_policy.c
index 48f2b5c9aa3e..68aa59eeed8d 100644
--- a/tools/perf/trace/beauty/sched_policy.c
+++ b/tools/perf/trace/beauty/sched_policy.c
@@ -17,6 +17,8 @@
static size_t syscall_arg__scnprintf_sched_policy(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "SCHED_";
const char *policies[] = {
"NORMAL", "FIFO", "RR", "BATCH", "ISO", "IDLE", "DEADLINE",
};
@@ -26,13 +28,13 @@ static size_t syscall_arg__scnprintf_sched_policy(char *bf, size_t size,
policy &= SCHED_POLICY_MASK;
if (policy <= SCHED_DEADLINE)
- printed = scnprintf(bf, size, "%s", policies[policy]);
+ printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", policies[policy]);
else
printed = scnprintf(bf, size, "%#x", policy);
#define P_POLICY_FLAG(n) \
if (flags & SCHED_##n) { \
- printed += scnprintf(bf + printed, size - printed, "|%s", #n); \
+ printed += scnprintf(bf + printed, size - printed, "|%s%s", show_prefix ? prefix : "", #n); \
flags &= ~SCHED_##n; \
}
diff --git a/tools/perf/trace/beauty/seccomp.c b/tools/perf/trace/beauty/seccomp.c
index e36156b19c70..4600c28a3cfe 100644
--- a/tools/perf/trace/beauty/seccomp.c
+++ b/tools/perf/trace/beauty/seccomp.c
@@ -8,11 +8,13 @@
static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "SECOMP_SET_MODE_";
int op = arg->val;
size_t printed = 0;
switch (op) {
-#define P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, #n); break
+#define P_SECCOMP_SET_MODE_OP(n) case SECCOMP_SET_MODE_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); break
P_SECCOMP_SET_MODE_OP(STRICT);
P_SECCOMP_SET_MODE_OP(FILTER);
#undef P_SECCOMP_SET_MODE_OP
@@ -31,11 +33,13 @@ static size_t syscall_arg__scnprintf_seccomp_op(char *bf, size_t size, struct sy
static size_t syscall_arg__scnprintf_seccomp_flags(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "SECOMP_FILTER_FLAG_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & SECCOMP_FILTER_FLAG_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~SECCOMP_FILTER_FLAG_##n; \
}
diff --git a/tools/perf/trace/beauty/signum.c b/tools/perf/trace/beauty/signum.c
index 587fec545b8a..21220c56500a 100644
--- a/tools/perf/trace/beauty/signum.c
+++ b/tools/perf/trace/beauty/signum.c
@@ -3,10 +3,12 @@
static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "SIG";
int sig = arg->val;
switch (sig) {
-#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, #n)
+#define P_SIGNUM(n) case SIG##n: return scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n)
P_SIGNUM(HUP);
P_SIGNUM(INT);
P_SIGNUM(QUIT);
diff --git a/tools/perf/trace/beauty/sockaddr.c b/tools/perf/trace/beauty/sockaddr.c
index 9410ad230f10..173c8f760763 100644
--- a/tools/perf/trace/beauty/sockaddr.c
+++ b/tools/perf/trace/beauty/sockaddr.c
@@ -15,7 +15,7 @@ static const char *socket_families[] = {
"BLUETOOTH", "IUCV", "RXRPC", "ISDN", "PHONET", "IEEE802154", "CAIF",
"ALG", "NFC", "VSOCK",
};
-DEFINE_STRARRAY(socket_families);
+DEFINE_STRARRAY(socket_families, "PF_");
static size_t af_inet__scnprintf(struct sockaddr *sa, char *bf, size_t size)
{
@@ -58,7 +58,7 @@ static size_t syscall_arg__scnprintf_augmented_sockaddr(struct syscall_arg *arg,
char family[32];
size_t printed;
- strarray__scnprintf(&strarray__socket_families, family, sizeof(family), "%d", sa->sa_family);
+ strarray__scnprintf(&strarray__socket_families, family, sizeof(family), "%d", arg->show_string_prefix, sa->sa_family);
printed = scnprintf(bf, size, "{ .family: %s", family);
if (sa->sa_family < ARRAY_SIZE(af_scnprintfs) && af_scnprintfs[sa->sa_family])
diff --git a/tools/perf/trace/beauty/socket.c b/tools/perf/trace/beauty/socket.c
index d971a2596417..f23a3dda2902 100644
--- a/tools/perf/trace/beauty/socket.c
+++ b/tools/perf/trace/beauty/socket.c
@@ -9,12 +9,12 @@
#include <sys/types.h>
#include <sys/socket.h>
-static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size)
+static size_t socket__scnprintf_ipproto(int protocol, char *bf, size_t size, bool show_prefix)
{
#include "trace/beauty/generated/socket_ipproto_array.c"
- static DEFINE_STRARRAY(socket_ipproto);
+ static DEFINE_STRARRAY(socket_ipproto, "IPPROTO_");
- return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", protocol);
+ return strarray__scnprintf(&strarray__socket_ipproto, bf, size, "%d", show_prefix, protocol);
}
size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct syscall_arg *arg)
@@ -22,7 +22,7 @@ size_t syscall_arg__scnprintf_socket_protocol(char *bf, size_t size, struct sysc
int domain = syscall_arg__val(arg, 0);
if (domain == AF_INET || domain == AF_INET6)
- return socket__scnprintf_ipproto(arg->val, bf, size);
+ return socket__scnprintf_ipproto(arg->val, bf, size, arg->show_string_prefix);
return syscall_arg__scnprintf_int(bf, size, arg);
}
diff --git a/tools/perf/trace/beauty/socket_type.c b/tools/perf/trace/beauty/socket_type.c
index a63a9a332aa0..bed8d5761ca8 100644
--- a/tools/perf/trace/beauty/socket_type.c
+++ b/tools/perf/trace/beauty/socket_type.c
@@ -20,6 +20,8 @@
static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "SOCK_";
size_t printed;
int type = arg->val,
flags = type & ~SOCK_TYPE_MASK;
@@ -29,7 +31,7 @@ static size_t syscall_arg__scnprintf_socket_type(char *bf, size_t size, struct s
* Can't use a strarray, MIPS may override for ABI reasons.
*/
switch (type) {
-#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, #n); break;
+#define P_SK_TYPE(n) case SOCK_##n: printed = scnprintf(bf, size, "%s%s", show_prefix ? prefix : "", #n); break;
P_SK_TYPE(STREAM);
P_SK_TYPE(DGRAM);
P_SK_TYPE(RAW);
diff --git a/tools/perf/trace/beauty/statx.c b/tools/perf/trace/beauty/statx.c
index 630f2760dd66..811cc0eeb2d5 100644
--- a/tools/perf/trace/beauty/statx.c
+++ b/tools/perf/trace/beauty/statx.c
@@ -13,13 +13,15 @@
size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "AT_";
int printed = 0, flags = arg->val;
if (flags == 0)
- return scnprintf(bf, size, "SYNC_AS_STAT");
+ return scnprintf(bf, size, "%s%s", show_prefix ? "AT_STATX_" : "", "SYNC_AS_STAT");
#define P_FLAG(n) \
if (flags & AT_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~AT_##n; \
}
@@ -41,11 +43,13 @@ size_t syscall_arg__scnprintf_statx_flags(char *bf, size_t size, struct syscall_
size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "STATX_";
int printed = 0, flags = arg->val;
#define P_FLAG(n) \
if (flags & STATX_##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
flags &= ~STATX_##n; \
}
diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
index 42ff58ad613b..6897fab40dcc 100644
--- a/tools/perf/trace/beauty/waitid_options.c
+++ b/tools/perf/trace/beauty/waitid_options.c
@@ -5,11 +5,13 @@
static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
struct syscall_arg *arg)
{
+ bool show_prefix = arg->show_string_prefix;
+ const char *prefix = "W";
int printed = 0, options = arg->val;
#define P_OPTION(n) \
if (options & W##n) { \
- printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
options &= ~W##n; \
}
diff --git a/tools/perf/trace/beauty/x86_arch_prctl.sh b/tools/perf/trace/beauty/x86_arch_prctl.sh
new file mode 100755
index 000000000000..7372d3cab959
--- /dev/null
+++ b/tools/perf/trace/beauty/x86_arch_prctl.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+# Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+# SPDX-License-Identifier: LGPL-2.1
+
+[ $# -eq 1 ] && x86_header_dir=$1 || x86_header_dir=tools/arch/x86/include/uapi/asm/
+
+prctl_arch_header=${x86_header_dir}/prctl.h
+
+print_range () {
+ local idx=$1
+ local prefix=$2
+ local first_entry=$3
+
+ printf "#define x86_arch_prctl_codes_%d_offset %s\n" $idx $first_entry
+ printf "static const char *x86_arch_prctl_codes_%d[] = {\n" $idx
+ regex=`printf '^[[:space:]]*#[[:space:]]*define[[:space:]]+ARCH_([[:alnum:]_]+)[[:space:]]+(%s[[:xdigit:]]+).*' ${prefix}`
+ fmt="\t[%#x - ${first_entry}]= \"%s\",\n"
+ egrep -q $regex ${prctl_arch_header} && \
+ (egrep $regex ${prctl_arch_header} | \
+ sed -r "s/$regex/\2 \1/g" | \
+ xargs printf "$fmt")
+ printf "};\n\n"
+}
+
+print_range 1 0x1 0x1001
+print_range 2 0x2 0x2001
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a96f62ca984a..ffac1d54a3d4 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -2219,10 +2219,21 @@ static int hists_browser__scnprintf_title(struct hist_browser *browser, char *bf
if (!is_report_browser(hbt)) {
struct perf_top *top = hbt->arg;
+ printed += scnprintf(bf + printed, size - printed,
+ " lost: %" PRIu64 "/%" PRIu64,
+ top->lost, top->lost_total);
+
+ printed += scnprintf(bf + printed, size - printed,
+ " drop: %" PRIu64 "/%" PRIu64,
+ top->drop, top->drop_total);
+
if (top->zero)
printed += scnprintf(bf + printed, size - printed, " [z]");
+
+ perf_top__reset_sample_counters(top);
}
+
return printed;
}
diff --git a/tools/perf/ui/tui/helpline.c b/tools/perf/ui/tui/helpline.c
index 4ca799aadb4e..93d6b7240285 100644
--- a/tools/perf/ui/tui/helpline.c
+++ b/tools/perf/ui/tui/helpline.c
@@ -24,7 +24,7 @@ static void tui_helpline__push(const char *msg)
SLsmg_set_color(0);
SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
SLsmg_refresh();
- strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
+ strlcpy(ui_helpline__current, msg, sz);
}
static int tui_helpline__show(const char *format, va_list ap)
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index ecd9f9ceda77..af72be7f5b3b 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -10,6 +10,7 @@ libperf-y += evlist.o
libperf-y += evsel.o
libperf-y += evsel_fprintf.o
libperf-y += find_bit.o
+libperf-y += get_current_dir_name.o
libperf-y += kallsyms.o
libperf-y += levenshtein.o
libperf-y += llvm-utils.o
@@ -76,6 +77,7 @@ libperf-y += stat-shadow.o
libperf-y += stat-display.o
libperf-y += record.o
libperf-y += srcline.o
+libperf-y += srccode.o
libperf-y += data.o
libperf-y += tsc.o
libperf-y += cloexec.o
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 6936daf89ddd..ac9805e0bc76 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -134,6 +134,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
return 0;
}
+#include "arch/arc/annotate/instructions.c"
#include "arch/arm/annotate/instructions.c"
#include "arch/arm64/annotate/instructions.c"
#include "arch/x86/annotate/instructions.c"
@@ -143,6 +144,10 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
static struct arch architectures[] = {
{
+ .name = "arc",
+ .init = arc__annotate_init,
+ },
+ {
.name = "arm",
.init = arm__annotate_init,
},
@@ -1000,6 +1005,7 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64
static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch)
{
unsigned n_insn;
+ unsigned int cover_insn = 0;
u64 offset;
n_insn = annotation__count_insn(notes, start, end);
@@ -1013,21 +1019,34 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64
for (offset = start; offset <= end; offset++) {
struct annotation_line *al = notes->offsets[offset];
- if (al)
+ if (al && al->ipc == 0.0) {
al->ipc = ipc;
+ cover_insn++;
+ }
+ }
+
+ if (cover_insn) {
+ notes->hit_cycles += ch->cycles;
+ notes->hit_insn += n_insn * ch->num;
+ notes->cover_insn += cover_insn;
}
}
}
void annotation__compute_ipc(struct annotation *notes, size_t size)
{
- u64 offset;
+ s64 offset;
if (!notes->src || !notes->src->cycles_hist)
return;
+ notes->total_insn = annotation__count_insn(notes, 0, size - 1);
+ notes->hit_cycles = 0;
+ notes->hit_insn = 0;
+ notes->cover_insn = 0;
+
pthread_mutex_lock(&notes->lock);
- for (offset = 0; offset < size; ++offset) {
+ for (offset = size - 1; offset >= 0; --offset) {
struct cyc_hist *ch;
ch = &notes->src->cycles_hist[offset];
@@ -1758,7 +1777,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
while (!feof(file)) {
/*
* The source code line number (lineno) needs to be kept in
- * accross calls to symbol__parse_objdump_line(), so that it
+ * across calls to symbol__parse_objdump_line(), so that it
* can associate it with the instructions till the next one.
* See disasm_line__new() and struct disasm_line::line_nr.
*/
@@ -2563,6 +2582,22 @@ call_like:
disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset);
}
+static void ipc_coverage_string(char *bf, int size, struct annotation *notes)
+{
+ double ipc = 0.0, coverage = 0.0;
+
+ if (notes->hit_cycles)
+ ipc = notes->hit_insn / ((double)notes->hit_cycles);
+
+ if (notes->total_insn) {
+ coverage = notes->cover_insn * 100.0 /
+ ((double)notes->total_insn);
+ }
+
+ scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)",
+ ipc, coverage);
+}
+
static void __annotation_line__write(struct annotation_line *al, struct annotation *notes,
bool first_line, bool current_entry, bool change_color, int width,
void *obj, unsigned int percent_type,
@@ -2658,6 +2693,11 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
ANNOTATION__MINMAX_CYCLES_WIDTH - 1,
"Cycle(min/max)");
}
+
+ if (show_title && !*al->line) {
+ ipc_coverage_string(bf, sizeof(bf), notes);
+ obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf);
+ }
}
obj__printf(obj, " ");
@@ -2763,6 +2803,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
notes->nr_events = nr_pcnt;
annotation__update_column_widths(notes);
+ sym->annotate2 = true;
return 0;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 5399ba2321bb..fb6463730ba4 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -64,6 +64,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2);
#define ANNOTATION__IPC_WIDTH 6
#define ANNOTATION__CYCLES_WIDTH 6
#define ANNOTATION__MINMAX_CYCLES_WIDTH 19
+#define ANNOTATION__AVG_IPC_WIDTH 36
struct annotation_options {
bool hide_src_code,
@@ -262,6 +263,10 @@ struct annotation {
pthread_mutex_t lock;
u64 max_coverage;
u64 start;
+ u64 hit_cycles;
+ u64 hit_insn;
+ unsigned int total_insn;
+ unsigned int cover_insn;
struct annotation_options *options;
struct annotation_line **offsets;
int nr_events;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 72d5ba2479bf..f69961c4a4f3 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -1983,17 +1983,14 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso)
{
- struct symbol *first_sym = dso__first_symbol(dso);
- struct symbol *last_sym = dso__last_symbol(dso);
-
- if (!first_sym || !last_sym) {
- pr_err("Failed to determine filter for %s\nNo symbols found.\n",
+ if (dso__data_file_size(dso, NULL)) {
+ pr_err("Failed to determine filter for %s\nCannot determine file size.\n",
filt->filename);
return -EINVAL;
}
- filt->addr = first_sym->start;
- filt->size = last_sym->end - first_sym->start;
+ filt->addr = 0;
+ filt->size = dso->data.file_size;
return 0;
}
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index f9ae1a993806..2f3eb6d293ee 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -99,7 +99,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
if (err)
return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
} else
- pr_debug("bpf: successfull builtin compilation\n");
+ pr_debug("bpf: successful builtin compilation\n");
obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
@@ -1603,7 +1603,7 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha
op = bpf_map__add_newop(map, NULL);
if (IS_ERR(op))
- return ERR_PTR(PTR_ERR(op));
+ return ERR_CAST(op);
op->op_type = BPF_MAP_OP_SET_EVSEL;
op->v.evsel = evsel;
}
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 5ac157056cdf..1ea8f898f1a1 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -14,6 +14,7 @@
#include "util.h"
#include "cache.h"
#include <subcmd/exec-cmd.h>
+#include "util/event.h" /* proc_map_timeout */
#include "util/hist.h" /* perf_hist_config */
#include "util/llvm-utils.h" /* perf_llvm_config */
#include "config.h"
@@ -419,6 +420,9 @@ static int perf_buildid_config(const char *var, const char *value)
static int perf_default_core_config(const char *var __maybe_unused,
const char *value __maybe_unused)
{
+ if (!strcmp(var, "core.proc-map-timeout"))
+ proc_map_timeout = strtoul(value, NULL, 10);
+
/* Add other config variables here. */
return 0;
}
@@ -811,14 +815,14 @@ int config_error_nonbool(const char *var)
void set_buildid_dir(const char *dir)
{
if (dir)
- scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir);
+ scnprintf(buildid_dir, MAXPATHLEN, "%s", dir);
/* default to $HOME/.debug */
if (buildid_dir[0] == '\0') {
char *home = getenv("HOME");
if (home) {
- snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s",
+ snprintf(buildid_dir, MAXPATHLEN, "%s/%s",
home, DEBUG_CACHE_DIR);
} else {
strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1);
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 938def6d0bb9..8c155575c6c5 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -36,7 +36,6 @@
struct cs_etm_decoder {
void *data;
void (*packet_printer)(const char *msg);
- bool trace_on;
dcd_tree_handle_t dcd_tree;
cs_etm_mem_cb_type mem_access;
ocsd_datapath_resp_t prev_return;
@@ -116,6 +115,19 @@ int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder,
return 1;
}
+static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
+ ocsd_etmv3_cfg *config)
+{
+ config->reg_idr = params->etmv3.reg_idr;
+ config->reg_ctrl = params->etmv3.reg_ctrl;
+ config->reg_ccer = params->etmv3.reg_ccer;
+ config->reg_trc_id = params->etmv3.reg_trc_id;
+ config->arch_ver = ARCH_V7;
+ config->core_prof = profile_CortexA;
+
+ return 0;
+}
+
static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
ocsd_etmv4_cfg *config)
{
@@ -237,10 +249,19 @@ cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
struct cs_etm_decoder *decoder)
{
const char *decoder_name;
+ ocsd_etmv3_cfg config_etmv3;
ocsd_etmv4_cfg trace_config_etmv4;
void *trace_config;
switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV3:
+ case CS_ETM_PROTO_PTM:
+ cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
+ decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
+ OCSD_BUILTIN_DCD_ETMV3 :
+ OCSD_BUILTIN_DCD_PTM;
+ trace_config = &config_etmv3;
+ break;
case CS_ETM_PROTO_ETMV4i:
cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
@@ -263,11 +284,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder)
decoder->tail = 0;
decoder->packet_count = 0;
for (i = 0; i < MAX_BUFFER; i++) {
+ decoder->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
+ decoder->packet_buffer[i].instr_count = 0;
decoder->packet_buffer[i].last_instr_taken_branch = false;
- decoder->packet_buffer[i].exc = false;
- decoder->packet_buffer[i].exc_ret = false;
+ decoder->packet_buffer[i].last_instr_size = 0;
decoder->packet_buffer[i].cpu = INT_MIN;
}
}
@@ -294,11 +316,13 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
decoder->packet_count++;
decoder->packet_buffer[et].sample_type = sample_type;
- decoder->packet_buffer[et].exc = false;
- decoder->packet_buffer[et].exc_ret = false;
+ decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
decoder->packet_buffer[et].cpu = *((int *)inode->priv);
decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
+ decoder->packet_buffer[et].instr_count = 0;
+ decoder->packet_buffer[et].last_instr_taken_branch = false;
+ decoder->packet_buffer[et].last_instr_size = 0;
if (decoder->packet_count == MAX_BUFFER - 1)
return OCSD_RESP_WAIT;
@@ -321,8 +345,28 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
packet = &decoder->packet_buffer[decoder->tail];
+ switch (elem->isa) {
+ case ocsd_isa_aarch64:
+ packet->isa = CS_ETM_ISA_A64;
+ break;
+ case ocsd_isa_arm:
+ packet->isa = CS_ETM_ISA_A32;
+ break;
+ case ocsd_isa_thumb2:
+ packet->isa = CS_ETM_ISA_T32;
+ break;
+ case ocsd_isa_tee:
+ case ocsd_isa_jazelle:
+ case ocsd_isa_custom:
+ case ocsd_isa_unknown:
+ default:
+ packet->isa = CS_ETM_ISA_UNKNOWN;
+ }
+
packet->start_addr = elem->st_addr;
packet->end_addr = elem->en_addr;
+ packet->instr_count = elem->num_instr_range;
+
switch (elem->last_i_type) {
case OCSD_INSTR_BR:
case OCSD_INSTR_BR_INDIRECT:
@@ -336,15 +380,33 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder,
break;
}
+ packet->last_instr_size = elem->last_instr_sz;
+
return ret;
}
static ocsd_datapath_resp_t
-cs_etm_decoder__buffer_trace_on(struct cs_etm_decoder *decoder,
- const uint8_t trace_chan_id)
+cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder,
+ const uint8_t trace_chan_id)
{
return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
- CS_ETM_TRACE_ON);
+ CS_ETM_DISCONTINUITY);
+}
+
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder,
+ const uint8_t trace_chan_id)
+{
+ return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+ CS_ETM_EXCEPTION);
+}
+
+static ocsd_datapath_resp_t
+cs_etm_decoder__buffer_exception_ret(struct cs_etm_decoder *decoder,
+ const uint8_t trace_chan_id)
+{
+ return cs_etm_decoder__buffer_packet(decoder, trace_chan_id,
+ CS_ETM_EXCEPTION_RET);
}
static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
@@ -359,26 +421,25 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
switch (elem->elem_type) {
case OCSD_GEN_TRC_ELEM_UNKNOWN:
break;
+ case OCSD_GEN_TRC_ELEM_EO_TRACE:
case OCSD_GEN_TRC_ELEM_NO_SYNC:
- decoder->trace_on = false;
- break;
case OCSD_GEN_TRC_ELEM_TRACE_ON:
- resp = cs_etm_decoder__buffer_trace_on(decoder,
- trace_chan_id);
- decoder->trace_on = true;
+ resp = cs_etm_decoder__buffer_discontinuity(decoder,
+ trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
resp = cs_etm_decoder__buffer_range(decoder, elem,
trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION:
- decoder->packet_buffer[decoder->tail].exc = true;
+ resp = cs_etm_decoder__buffer_exception(decoder,
+ trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
- decoder->packet_buffer[decoder->tail].exc_ret = true;
+ resp = cs_etm_decoder__buffer_exception_ret(decoder,
+ trace_chan_id);
break;
case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
- case OCSD_GEN_TRC_ELEM_EO_TRACE:
case OCSD_GEN_TRC_ELEM_ADDR_NACC:
case OCSD_GEN_TRC_ELEM_TIMESTAMP:
case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
@@ -398,11 +459,20 @@ static int cs_etm_decoder__create_etm_packet_decoder(
struct cs_etm_decoder *decoder)
{
const char *decoder_name;
+ ocsd_etmv3_cfg config_etmv3;
ocsd_etmv4_cfg trace_config_etmv4;
void *trace_config;
u8 csid;
switch (t_params->protocol) {
+ case CS_ETM_PROTO_ETMV3:
+ case CS_ETM_PROTO_PTM:
+ cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
+ decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
+ OCSD_BUILTIN_DCD_ETMV3 :
+ OCSD_BUILTIN_DCD_PTM;
+ trace_config = &config_etmv3;
+ break;
case CS_ETM_PROTO_ETMV4i:
cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
index 612b5755f742..a6407d41598f 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h
@@ -23,18 +23,28 @@ struct cs_etm_buffer {
};
enum cs_etm_sample_type {
- CS_ETM_EMPTY = 0,
- CS_ETM_RANGE = 1 << 0,
- CS_ETM_TRACE_ON = 1 << 1,
+ CS_ETM_EMPTY,
+ CS_ETM_RANGE,
+ CS_ETM_DISCONTINUITY,
+ CS_ETM_EXCEPTION,
+ CS_ETM_EXCEPTION_RET,
+};
+
+enum cs_etm_isa {
+ CS_ETM_ISA_UNKNOWN,
+ CS_ETM_ISA_A64,
+ CS_ETM_ISA_A32,
+ CS_ETM_ISA_T32,
};
struct cs_etm_packet {
enum cs_etm_sample_type sample_type;
+ enum cs_etm_isa isa;
u64 start_addr;
u64 end_addr;
+ u32 instr_count;
u8 last_instr_taken_branch;
- u8 exc;
- u8 exc_ret;
+ u8 last_instr_size;
int cpu;
};
@@ -43,6 +53,13 @@ struct cs_etm_queue;
typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64,
size_t, u8 *);
+struct cs_etmv3_trace_params {
+ u32 reg_ctrl;
+ u32 reg_trc_id;
+ u32 reg_ccer;
+ u32 reg_idr;
+};
+
struct cs_etmv4_trace_params {
u32 reg_idr0;
u32 reg_idr1;
@@ -55,6 +72,7 @@ struct cs_etmv4_trace_params {
struct cs_etm_trace_params {
int protocol;
union {
+ struct cs_etmv3_trace_params etmv3;
struct cs_etmv4_trace_params etmv4;
};
};
@@ -78,6 +96,7 @@ enum {
CS_ETM_PROTO_ETMV3 = 1,
CS_ETM_PROTO_ETMV4i,
CS_ETM_PROTO_ETMV4d,
+ CS_ETM_PROTO_PTM,
};
enum {
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 73430b73570d..27a374ddf661 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -31,14 +31,6 @@
#define MAX_TIMESTAMP (~0ULL)
-/*
- * A64 instructions are always 4 bytes
- *
- * Only A64 is supported, so can use this constant for converting between
- * addresses and instruction counts, calculting offsets etc
- */
-#define A64_INSTR_SIZE 4
-
struct cs_etm_auxtrace {
struct auxtrace auxtrace;
struct auxtrace_queues queues;
@@ -91,6 +83,19 @@ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
pid_t tid, u64 time_);
+/* PTMs ETMIDR [11:8] set to b0011 */
+#define ETMIDR_PTM_VERSION 0x00000300
+
+static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
+{
+ etmidr &= ETMIDR_PTM_VERSION;
+
+ if (etmidr == ETMIDR_PTM_VERSION)
+ return CS_ETM_PROTO_PTM;
+
+ return CS_ETM_PROTO_ETMV3;
+}
+
static void cs_etm__packet_dump(const char *pkt_string)
{
const char *color = PERF_COLOR_BLUE;
@@ -122,15 +127,31 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
/* Use metadata to fill in trace parameters for trace decoder */
t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
for (i = 0; i < etm->num_cpu; i++) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
+ if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
+ u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
+
+ t_params[i].protocol =
+ cs_etm__get_v7_protocol_version(etmidr);
+ t_params[i].etmv3.reg_ctrl =
+ etm->metadata[i][CS_ETM_ETMCR];
+ t_params[i].etmv3.reg_trc_id =
+ etm->metadata[i][CS_ETM_ETMTRACEIDR];
+ } else if (etm->metadata[i][CS_ETM_MAGIC] ==
+ __perf_cs_etmv4_magic) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 =
+ etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 =
+ etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 =
+ etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 =
+ etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
+ t_params[i].etmv4.reg_traceidr =
etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
}
/* Set decoder parameters to simply print the trace packets */
@@ -360,15 +381,31 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
goto out_free;
for (i = 0; i < etm->num_cpu; i++) {
- t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
- t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
- t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
- t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
- t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
- t_params[i].etmv4.reg_configr =
+ if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
+ u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
+
+ t_params[i].protocol =
+ cs_etm__get_v7_protocol_version(etmidr);
+ t_params[i].etmv3.reg_ctrl =
+ etm->metadata[i][CS_ETM_ETMCR];
+ t_params[i].etmv3.reg_trc_id =
+ etm->metadata[i][CS_ETM_ETMTRACEIDR];
+ } else if (etm->metadata[i][CS_ETM_MAGIC] ==
+ __perf_cs_etmv4_magic) {
+ t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
+ t_params[i].etmv4.reg_idr0 =
+ etm->metadata[i][CS_ETMV4_TRCIDR0];
+ t_params[i].etmv4.reg_idr1 =
+ etm->metadata[i][CS_ETMV4_TRCIDR1];
+ t_params[i].etmv4.reg_idr2 =
+ etm->metadata[i][CS_ETMV4_TRCIDR2];
+ t_params[i].etmv4.reg_idr8 =
+ etm->metadata[i][CS_ETMV4_TRCIDR8];
+ t_params[i].etmv4.reg_configr =
etm->metadata[i][CS_ETMV4_TRCCONFIGR];
- t_params[i].etmv4.reg_traceidr =
+ t_params[i].etmv4.reg_traceidr =
etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
+ }
}
/* Set decoder parameters to simply print the trace packets */
@@ -510,53 +547,54 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
etmq->last_branch_rb->nr = 0;
}
-static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
-{
- /* Returns 0 for the CS_ETM_TRACE_ON packet */
- if (packet->sample_type == CS_ETM_TRACE_ON)
- return 0;
+static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
+ u64 addr) {
+ u8 instrBytes[2];
+ cs_etm__mem_access(etmq, addr, ARRAY_SIZE(instrBytes), instrBytes);
/*
- * The packet records the execution range with an exclusive end address
- *
- * A64 instructions are constant size, so the last executed
- * instruction is A64_INSTR_SIZE before the end address
- * Will need to do instruction level decode for T32 instructions as
- * they can be variable size (not yet supported).
+ * T32 instruction size is indicated by bits[15:11] of the first
+ * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
+ * denote a 32-bit instruction.
*/
- return packet->end_addr - A64_INSTR_SIZE;
+ return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
}
static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
{
- /* Returns 0 for the CS_ETM_TRACE_ON packet */
- if (packet->sample_type == CS_ETM_TRACE_ON)
+ /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
+ if (packet->sample_type == CS_ETM_DISCONTINUITY)
return 0;
return packet->start_addr;
}
-static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
+static inline
+u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
{
- /*
- * Only A64 instructions are currently supported, so can get
- * instruction count by dividing.
- * Will need to do instruction level decode for T32 instructions as
- * they can be variable size (not yet supported).
- */
- return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
+ /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
+ if (packet->sample_type == CS_ETM_DISCONTINUITY)
+ return 0;
+
+ return packet->end_addr - packet->last_instr_size;
}
-static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet,
+static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
+ const struct cs_etm_packet *packet,
u64 offset)
{
- /*
- * Only A64 instructions are currently supported, so can get
- * instruction address by muliplying.
- * Will need to do instruction level decode for T32 instructions as
- * they can be variable size (not yet supported).
- */
- return packet->start_addr + offset * A64_INSTR_SIZE;
+ if (packet->isa == CS_ETM_ISA_T32) {
+ u64 addr = packet->start_addr;
+
+ while (offset > 0) {
+ addr += cs_etm__t32_instr_size(etmq, addr);
+ offset--;
+ }
+ return addr;
+ }
+
+ /* Assume a 4 byte instruction size (A32/A64) */
+ return packet->start_addr + offset * 4;
}
static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
@@ -888,9 +926,8 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
struct cs_etm_auxtrace *etm = etmq->etm;
struct cs_etm_packet *tmp;
int ret;
- u64 instrs_executed;
+ u64 instrs_executed = etmq->packet->instr_count;
- instrs_executed = cs_etm__instr_count(etmq->packet);
etmq->period_instructions += instrs_executed;
/*
@@ -920,7 +957,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
* executed, but PC has not advanced to next instruction)
*/
u64 offset = (instrs_executed - instrs_over - 1);
- u64 addr = cs_etm__instr_addr(etmq->packet, offset);
+ u64 addr = cs_etm__instr_addr(etmq, etmq->packet, offset);
ret = cs_etm__synth_instruction_sample(
etmq, addr, etm->instructions_sample_period);
@@ -935,7 +972,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
bool generate_sample = false;
/* Generate sample for tracing on packet */
- if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON)
+ if (etmq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
generate_sample = true;
/* Generate sample for branch taken packet */
@@ -963,6 +1000,25 @@ static int cs_etm__sample(struct cs_etm_queue *etmq)
return 0;
}
+static int cs_etm__exception(struct cs_etm_queue *etmq)
+{
+ /*
+ * When the exception packet is inserted, whether the last instruction
+ * in previous range packet is taken branch or not, we need to force
+ * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
+ * to generate branch sample for the instruction range before the
+ * exception is trapped to kernel or before the exception returning.
+ *
+ * The exception packet includes the dummy address values, so don't
+ * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
+ * for generating instruction and branch samples.
+ */
+ if (etmq->prev_packet->sample_type == CS_ETM_RANGE)
+ etmq->prev_packet->last_instr_taken_branch = true;
+
+ return 0;
+}
+
static int cs_etm__flush(struct cs_etm_queue *etmq)
{
int err = 0;
@@ -1005,7 +1061,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq)
}
swap_packet:
- if (etmq->etm->synth_opts.last_branch) {
+ if (etm->sample_branches || etm->synth_opts.last_branch) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
@@ -1018,6 +1074,39 @@ swap_packet:
return err;
}
+static int cs_etm__end_block(struct cs_etm_queue *etmq)
+{
+ int err;
+
+ /*
+ * It has no new packet coming and 'etmq->packet' contains the stale
+ * packet which was set at the previous time with packets swapping;
+ * so skip to generate branch sample to avoid stale packet.
+ *
+ * For this case only flush branch stack and generate a last branch
+ * event for the branches left in the circular buffer at the end of
+ * the trace.
+ */
+ if (etmq->etm->synth_opts.last_branch &&
+ etmq->prev_packet->sample_type == CS_ETM_RANGE) {
+ /*
+ * Use the address of the end of the last reported execution
+ * range.
+ */
+ u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
+
+ err = cs_etm__synth_instruction_sample(
+ etmq, addr,
+ etmq->period_instructions);
+ if (err)
+ return err;
+
+ etmq->period_instructions = 0;
+ }
+
+ return 0;
+}
+
static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
{
struct cs_etm_auxtrace *etm = etmq->etm;
@@ -1078,7 +1167,16 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
*/
cs_etm__sample(etmq);
break;
- case CS_ETM_TRACE_ON:
+ case CS_ETM_EXCEPTION:
+ case CS_ETM_EXCEPTION_RET:
+ /*
+ * If the exception packet is coming,
+ * make sure the previous instruction
+ * range packet to be handled properly.
+ */
+ cs_etm__exception(etmq);
+ break;
+ case CS_ETM_DISCONTINUITY:
/*
* Discontinuity in trace, flush
* previous branch stack
@@ -1100,7 +1198,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
if (err == 0)
/* Flush any remaining branch stack entries */
- err = cs_etm__flush(etmq);
+ err = cs_etm__end_block(etmq);
}
return err;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bbed90e5d9bb..62c8cf622607 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
unlink(tmpbuf);
if (pathname && (fd >= 0))
- strncpy(pathname, tmpbuf, len);
+ strlcpy(pathname, tmpbuf, len);
return fd;
}
@@ -894,7 +894,7 @@ static ssize_t cached_read(struct dso *dso, struct machine *machine,
return r;
}
-static int data_file_size(struct dso *dso, struct machine *machine)
+int dso__data_file_size(struct dso *dso, struct machine *machine)
{
int ret = 0;
struct stat st;
@@ -943,7 +943,7 @@ out:
*/
off_t dso__data_size(struct dso *dso, struct machine *machine)
{
- if (data_file_size(dso, machine))
+ if (dso__data_file_size(dso, machine))
return -1;
/* For now just estimate dso data size is close to file size */
@@ -953,7 +953,7 @@ off_t dso__data_size(struct dso *dso, struct machine *machine)
static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size)
{
- if (data_file_size(dso, machine))
+ if (dso__data_file_size(dso, machine))
return -1;
/* Check the offset sanity. */
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index c5380500bed4..8c8a7abe809d 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -322,6 +322,7 @@ int dso__data_get_fd(struct dso *dso, struct machine *machine);
void dso__data_put_fd(struct dso *dso);
void dso__data_close(struct dso *dso);
+int dso__data_file_size(struct dso *dso, struct machine *machine);
off_t dso__data_size(struct dso *dso, struct machine *machine);
ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
u64 offset, u8 *data, ssize_t size);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 59f38c7693f8..4c23779e271a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
struct utsname uts;
char *arch_name;
- if (!env) { /* Assume local operation */
+ if (!env || !env->arch) { /* Assume local operation */
if (uname(&uts) < 0)
return NULL;
arch_name = uts.machine;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index e9c108a6b1c3..937a5a4f71cc 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -25,6 +25,8 @@
#include "asm/bug.h"
#include "stat.h"
+#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
+
static const char *perf_event__names[] = {
[0] = "TOTAL",
[PERF_RECORD_MMAP] = "MMAP",
@@ -72,6 +74,8 @@ static const char *perf_ns__names[] = {
[CGROUP_NS_INDEX] = "cgroup",
};
+unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
+
const char *perf_event__name(unsigned int id)
{
if (id >= ARRAY_SIZE(perf_event__names))
@@ -323,8 +327,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
struct machine *machine,
- bool mmap_data,
- unsigned int proc_map_timeout)
+ bool mmap_data)
{
char filename[PATH_MAX];
FILE *fp;
@@ -521,8 +524,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
perf_event__handler_t process,
struct perf_tool *tool,
struct machine *machine,
- bool mmap_data,
- unsigned int proc_map_timeout)
+ bool mmap_data)
{
char filename[PATH_MAX];
DIR *tasks;
@@ -548,8 +550,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
*/
if (pid == tgid &&
perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data,
- proc_map_timeout))
+ process, machine, mmap_data))
return -1;
return 0;
@@ -598,7 +599,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
if (_pid == pid) {
/* process the parent's maps too */
rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
- process, machine, mmap_data, proc_map_timeout);
+ process, machine, mmap_data);
if (rc)
break;
}
@@ -612,8 +613,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct thread_map *threads,
perf_event__handler_t process,
struct machine *machine,
- bool mmap_data,
- unsigned int proc_map_timeout)
+ bool mmap_data)
{
union perf_event *comm_event, *mmap_event, *fork_event;
union perf_event *namespaces_event;
@@ -643,7 +643,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
fork_event, namespaces_event,
thread_map__pid(threads, thread), 0,
process, tool, machine,
- mmap_data, proc_map_timeout)) {
+ mmap_data)) {
err = -1;
break;
}
@@ -669,7 +669,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
fork_event, namespaces_event,
comm_event->comm.pid, 0,
process, tool, machine,
- mmap_data, proc_map_timeout)) {
+ mmap_data)) {
err = -1;
break;
}
@@ -690,7 +690,6 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool mmap_data,
- unsigned int proc_map_timeout,
struct dirent **dirent,
int start,
int num)
@@ -734,8 +733,7 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool,
*/
__event__synthesize_thread(comm_event, mmap_event, fork_event,
namespaces_event, pid, 1, process,
- tool, machine, mmap_data,
- proc_map_timeout);
+ tool, machine, mmap_data);
}
err = 0;
@@ -755,7 +753,6 @@ struct synthesize_threads_arg {
perf_event__handler_t process;
struct machine *machine;
bool mmap_data;
- unsigned int proc_map_timeout;
struct dirent **dirent;
int num;
int start;
@@ -767,7 +764,7 @@ static void *synthesize_threads_worker(void *arg)
__perf_event__synthesize_threads(args->tool, args->process,
args->machine, args->mmap_data,
- args->proc_map_timeout, args->dirent,
+ args->dirent,
args->start, args->num);
return NULL;
}
@@ -776,7 +773,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine,
bool mmap_data,
- unsigned int proc_map_timeout,
unsigned int nr_threads_synthesize)
{
struct synthesize_threads_arg *args = NULL;
@@ -806,7 +802,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (thread_nr <= 1) {
err = __perf_event__synthesize_threads(tool, process,
machine, mmap_data,
- proc_map_timeout,
dirent, base, n);
goto free_dirent;
}
@@ -828,7 +823,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
args[i].process = process;
args[i].machine = machine;
args[i].mmap_data = mmap_data;
- args[i].proc_map_timeout = proc_map_timeout;
args[i].dirent = dirent;
}
for (i = 0; i < m; i++) {
@@ -1577,6 +1571,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return al->map;
}
+/*
+ * For branch stacks or branch samples, the sample cpumode might not be correct
+ * because it applies only to the sample 'ip' and not necessary to 'addr' or
+ * branch stack addresses. If possible, use a fallback to deal with those cases.
+ */
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+ struct addr_location *al)
+{
+ struct map *map = thread__find_map(thread, cpumode, addr, al);
+ struct machine *machine = thread->mg->machine;
+ u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
+
+ if (map || addr_cpumode == cpumode)
+ return map;
+
+ return thread__find_map(thread, addr_cpumode, addr, al);
+}
+
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al)
{
@@ -1586,6 +1598,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
return al->sym;
}
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+ u64 addr, struct addr_location *al)
+{
+ al->sym = NULL;
+ if (thread__find_map_fb(thread, cpumode, addr, al))
+ al->sym = map__find_symbol(al->map, al->addr);
+ return al->sym;
+}
+
/*
* Callers need to drop the reference to al->thread, obtained in
* machine__findnew_thread()
@@ -1679,7 +1700,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
void thread__resolve(struct thread *thread, struct addr_location *al,
struct perf_sample *sample)
{
- thread__find_map(thread, sample->cpumode, sample->addr, al);
+ thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
al->cpu = sample->cpu;
al->sym = NULL;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index bfa60bcafbde..eb95f3384958 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -669,8 +669,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool,
int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct thread_map *threads,
perf_event__handler_t process,
- struct machine *machine, bool mmap_data,
- unsigned int proc_map_timeout);
+ struct machine *machine, bool mmap_data);
int perf_event__synthesize_thread_map2(struct perf_tool *tool,
struct thread_map *threads,
perf_event__handler_t process,
@@ -682,7 +681,6 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool,
int perf_event__synthesize_threads(struct perf_tool *tool,
perf_event__handler_t process,
struct machine *machine, bool mmap_data,
- unsigned int proc_map_timeout,
unsigned int nr_threads_synthesize);
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
@@ -797,8 +795,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
pid_t pid, pid_t tgid,
perf_event__handler_t process,
struct machine *machine,
- bool mmap_data,
- unsigned int proc_map_timeout);
+ bool mmap_data);
int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
perf_event__handler_t process,
@@ -829,5 +826,6 @@ int perf_event_paranoid(void);
extern int sysctl_perf_event_max_stack;
extern int sysctl_perf_event_max_contexts_per_stack;
+extern unsigned int proc_map_timeout;
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 668d2a9ef0f4..8c902276d4b4 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -34,6 +34,10 @@
#include <linux/log2.h>
#include <linux/err.h>
+#ifdef LACKS_SIGQUEUE_PROTOTYPE
+int sigqueue(pid_t pid, int sig, const union sigval value);
+#endif
+
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
@@ -1018,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite)
+ bool auxtrace_overwrite, int nr_cblocks)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1028,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp;
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1060,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1176,7 +1180,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e
return err;
}
-int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
+int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter)
{
struct perf_evsel *evsel;
int err = 0;
@@ -1193,7 +1197,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
return err;
}
-int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
+int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
{
char *filter;
int ret = -1;
@@ -1214,15 +1218,15 @@ int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t
}
}
- ret = perf_evlist__set_filter(evlist, filter);
+ ret = perf_evlist__set_tp_filter(evlist, filter);
out_free:
free(filter);
return ret;
}
-int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
+int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid)
{
- return perf_evlist__set_filter_pids(evlist, 1, &pid);
+ return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
}
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 9919eed6d15b..868294491194 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -98,9 +98,9 @@ void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
#define perf_evlist__reset_sample_bit(evlist, bit) \
__perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
-int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
-int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
-int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
+int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter);
+int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid);
+int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite);
+ bool auxtrace_overwrite, int nr_cblocks);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index d37bb1566cd9..dbc0466db368 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1092,7 +1092,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->exclude_user = 1;
}
- if (evsel->own_cpus)
+ if (evsel->own_cpus || evsel->unit)
evsel->attr.read_format |= PERF_FORMAT_ID;
/*
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3147ca76c6fc..82a289ce8b0c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -106,7 +106,7 @@ struct perf_evsel {
char *name;
double scale;
const char *unit;
- struct tep_event_format *tp_format;
+ struct tep_event *tp_format;
off_t id_offset;
struct perf_stat_evsel *stats;
void *priv;
@@ -216,7 +216,7 @@ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *
struct perf_evsel *perf_evsel__new_cycles(bool precise);
-struct tep_event_format *event_format__new(const char *sys, const char *name);
+struct tep_event *event_format__new(const char *sys, const char *name);
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx);
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c
index 0d0a4c6f368b..95ea147f9e18 100644
--- a/tools/perf/util/evsel_fprintf.c
+++ b/tools/perf/util/evsel_fprintf.c
@@ -173,6 +173,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
if (!print_oneline)
printed += fprintf(fp, "\n");
+ /* Add srccode here too? */
if (symbol_conf.bt_stop_list &&
node->sym &&
strlist__has_entry(symbol_conf.bt_stop_list,
diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c
new file mode 100644
index 000000000000..267aa609a582
--- /dev/null
+++ b/tools/perf/util/get_current_dir_name.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+//
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+#include "util.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdlib.h>
+
+/* Android's 'bionic' library, for one, doesn't have this */
+
+char *get_current_dir_name(void)
+{
+ char pwd[PATH_MAX];
+
+ return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd);
+}
+#endif // HAVE_GET_CURRENT_DIR_NAME
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4fd45be95a43..dec6d218c31c 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -988,6 +988,45 @@ static int write_group_desc(struct feat_fd *ff,
}
/*
+ * Return the CPU id as a raw string.
+ *
+ * Each architecture should provide a more precise id string that
+ * can be use to match the architecture's "mapfile".
+ */
+char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
+{
+ return NULL;
+}
+
+/* Return zero when the cpuid from the mapfile.csv matches the
+ * cpuid string generated on this platform.
+ * Otherwise return non-zero.
+ */
+int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
+{
+ regex_t re;
+ regmatch_t pmatch[1];
+ int match;
+
+ if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
+ /* Warn unable to generate match particular string. */
+ pr_info("Invalid regular expression %s\n", mapcpuid);
+ return 1;
+ }
+
+ match = !regexec(&re, cpuid, 1, pmatch, 0);
+ regfree(&re);
+ if (match) {
+ size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
+
+ /* Verify the entire string matched. */
+ if (match_len == strlen(cpuid))
+ return 0;
+ }
+ return 1;
+}
+
+/*
* default get_cpuid(): nothing gets recorded
* actual implementation must be in arch/$(SRCARCH)/util/header.c
*/
@@ -2659,6 +2698,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
struct perf_header *header = &session->header;
int fd = perf_data__fd(session->data);
struct stat st;
+ time_t stctime;
int ret, bit;
hd.fp = fp;
@@ -2668,7 +2708,8 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
if (ret == -1)
return -1;
- fprintf(fp, "# captured on : %s", ctime(&st.st_ctime));
+ stctime = st.st_ctime;
+ fprintf(fp, "# captured on : %s", ctime(&stctime));
fprintf(fp, "# header version : %u\n", header->version);
fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
@@ -2759,7 +2800,7 @@ static int perf_header__adds_write(struct perf_header *header,
lseek(fd, sec_start, SEEK_SET);
/*
* may write more than needed due to dropped feature, but
- * this is okay, reader will skip the mising entries
+ * this is okay, reader will skip the missing entries
*/
err = do_write(&ff, feat_sec, sec_size);
if (err < 0)
@@ -3229,7 +3270,7 @@ static int read_attr(int fd, struct perf_header *ph,
static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
struct tep_handle *pevent)
{
- struct tep_event_format *event;
+ struct tep_event *event;
char bf[128];
/* already prepared */
@@ -3544,7 +3585,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool,
if (ev == NULL)
return -ENOMEM;
- strncpy(ev->data, evsel->unit, size);
+ strlcpy(ev->data, evsel->unit, size + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
@@ -3583,7 +3624,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool,
if (ev == NULL)
return -ENOMEM;
- strncpy(ev->data, evsel->name, len);
+ strlcpy(ev->data, evsel->name, len + 1);
err = process(tool, (union perf_event*) ev, NULL, NULL);
free(ev);
return err;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 828cb9794c76..8aad8330e392 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1160,7 +1160,7 @@ void hist_entry__delete(struct hist_entry *he)
/*
* If this is not the last column, then we need to pad it according to the
- * pre-calculated max lenght for this column, otherwise don't bother adding
+ * pre-calculated max length for this column, otherwise don't bother adding
* spaces because that would break viewing this with, for instance, 'less',
* that would show tons of trailing spaces when a long C++ demangled method
* names is sampled.
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 3badd7f1e1b8..664b5eda8d51 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -62,6 +62,7 @@ enum hist_column {
HISTC_TRACE,
HISTC_SYM_SIZE,
HISTC_DSO_SIZE,
+ HISTC_SYMBOL_IPC,
HISTC_NR_COLS, /* Last entry */
};
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index a1863000e972..bf249552a9b0 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -38,7 +38,7 @@ struct jit_buf_desc {
uint64_t sample_type;
size_t bufsize;
FILE *in;
- bool needs_bswap; /* handles cross-endianess */
+ bool needs_bswap; /* handles cross-endianness */
bool use_arch_timestamp;
void *debug_data;
void *unwinding_data;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8f36ce813bc5..6fcb3bce0442 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -137,7 +137,7 @@ struct machine *machine__new_kallsyms(void)
struct machine *machine = machine__new_host();
/*
* FIXME:
- * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely
+ * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
* ask for not using the kcore parsing code, once this one is fixed
* to create a map per module.
*/
@@ -2493,15 +2493,13 @@ int machines__for_each_thread(struct machines *machines,
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap,
- unsigned int proc_map_timeout,
unsigned int nr_threads_synthesize)
{
if (target__has_task(target))
- return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
+ return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
else if (target__has_cpu(target))
return perf_event__synthesize_threads(tool, process,
machine, data_mmap,
- proc_map_timeout,
nr_threads_synthesize);
/* command specified */
return 0;
@@ -2592,6 +2590,33 @@ int machine__get_kernel_start(struct machine *machine)
return err;
}
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
+{
+ u8 addr_cpumode = cpumode;
+ bool kernel_ip;
+
+ if (!machine->single_address_space)
+ goto out;
+
+ kernel_ip = machine__kernel_ip(machine, addr);
+ switch (cpumode) {
+ case PERF_RECORD_MISC_KERNEL:
+ case PERF_RECORD_MISC_USER:
+ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
+ PERF_RECORD_MISC_USER;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ case PERF_RECORD_MISC_GUEST_USER:
+ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
+ PERF_RECORD_MISC_GUEST_USER;
+ break;
+ default:
+ break;
+ }
+out:
+ return addr_cpumode;
+}
+
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
return dsos__findnew(&machine->dsos, filename);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d856b85862e2..a5d1da60f751 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -42,6 +42,7 @@ struct machine {
u16 id_hdr_size;
bool comm_exec;
bool kptr_restrict_warned;
+ bool single_address_space;
char *root_dir;
char *mmap_name;
struct threads threads[THREADS__TABLE_SIZE];
@@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
return ip >= kernel_start;
}
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
+
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid);
struct comm *machine__thread_exec_comm(struct machine *machine,
@@ -247,17 +250,14 @@ int machines__for_each_thread(struct machines *machines,
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap,
- unsigned int proc_map_timeout,
unsigned int nr_threads_synthesize);
static inline
int machine__synthesize_threads(struct machine *machine, struct target *target,
struct thread_map *threads, bool data_mmap,
- unsigned int proc_map_timeout,
unsigned int nr_threads_synthesize)
{
return __machine__synthesize_threads(machine, NULL, target, threads,
perf_event__process, data_mmap,
- proc_map_timeout,
nr_threads_synthesize);
}
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 354e54550d2b..6751301a755c 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -19,8 +19,10 @@
#include "srcline.h"
#include "namespaces.h"
#include "unwind.h"
+#include "srccode.h"
static void __maps__insert(struct maps *maps, struct map *map);
+static void __maps__insert_name(struct maps *maps, struct map *map);
static inline int is_anon_memory(const char *filename, u32 flags)
{
@@ -420,6 +422,54 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
return ret;
}
+int map__fprintf_srccode(struct map *map, u64 addr,
+ FILE *fp,
+ struct srccode_state *state)
+{
+ char *srcfile;
+ int ret = 0;
+ unsigned line;
+ int len;
+ char *srccode;
+
+ if (!map || !map->dso)
+ return 0;
+ srcfile = get_srcline_split(map->dso,
+ map__rip_2objdump(map, addr),
+ &line);
+ if (!srcfile)
+ return 0;
+
+ /* Avoid redundant printing */
+ if (state &&
+ state->srcfile &&
+ !strcmp(state->srcfile, srcfile) &&
+ state->line == line) {
+ free(srcfile);
+ return 0;
+ }
+
+ srccode = find_sourceline(srcfile, line, &len);
+ if (!srccode)
+ goto out_free_line;
+
+ ret = fprintf(fp, "|%-8d %.*s", line, len, srccode);
+ state->srcfile = srcfile;
+ state->line = line;
+ return ret;
+
+out_free_line:
+ free(srcfile);
+ return ret;
+}
+
+
+void srccode_state_free(struct srccode_state *state)
+{
+ zfree(&state->srcfile);
+ state->line = 0;
+}
+
/**
* map__rip_2objdump - convert symbol start address to objdump address.
* @map: memory map
@@ -496,6 +546,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
static void maps__init(struct maps *maps)
{
maps->entries = RB_ROOT;
+ maps->names = RB_ROOT;
init_rwsem(&maps->lock);
}
@@ -664,6 +715,7 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
static void __map_groups__insert(struct map_groups *mg, struct map *map)
{
__maps__insert(&mg->maps, map);
+ __maps__insert_name(&mg->maps, map);
map->groups = mg;
}
@@ -824,10 +876,34 @@ static void __maps__insert(struct maps *maps, struct map *map)
map__get(map);
}
+static void __maps__insert_name(struct maps *maps, struct map *map)
+{
+ struct rb_node **p = &maps->names.rb_node;
+ struct rb_node *parent = NULL;
+ struct map *m;
+ int rc;
+
+ while (*p != NULL) {
+ parent = *p;
+ m = rb_entry(parent, struct map, rb_node_name);
+ rc = strcmp(m->dso->short_name, map->dso->short_name);
+ if (rc < 0)
+ p = &(*p)->rb_left;
+ else if (rc > 0)
+ p = &(*p)->rb_right;
+ else
+ return;
+ }
+ rb_link_node(&map->rb_node_name, parent, p);
+ rb_insert_color(&map->rb_node_name, &maps->names);
+ map__get(map);
+}
+
void maps__insert(struct maps *maps, struct map *map)
{
down_write(&maps->lock);
__maps__insert(maps, map);
+ __maps__insert_name(maps, map);
up_write(&maps->lock);
}
@@ -846,19 +922,18 @@ void maps__remove(struct maps *maps, struct map *map)
struct map *maps__find(struct maps *maps, u64 ip)
{
- struct rb_node **p, *parent = NULL;
+ struct rb_node *p;
struct map *m;
down_read(&maps->lock);
- p = &maps->entries.rb_node;
- while (*p != NULL) {
- parent = *p;
- m = rb_entry(parent, struct map, rb_node);
+ p = maps->entries.rb_node;
+ while (p != NULL) {
+ m = rb_entry(p, struct map, rb_node);
if (ip < m->start)
- p = &(*p)->rb_left;
+ p = p->rb_left;
else if (ip >= m->end)
- p = &(*p)->rb_right;
+ p = p->rb_right;
else
goto out;
}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index e0f327b51e66..09282aa45c80 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -25,6 +25,7 @@ struct map {
struct rb_node rb_node;
struct list_head node;
};
+ struct rb_node rb_node_name;
u64 start;
u64 end;
bool erange_warned;
@@ -57,6 +58,7 @@ struct kmap {
struct maps {
struct rb_root entries;
+ struct rb_root names;
struct rw_semaphore lock;
};
@@ -172,6 +174,22 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym);
int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
FILE *fp);
+struct srccode_state {
+ char *srcfile;
+ unsigned line;
+};
+
+static inline void srccode_state_init(struct srccode_state *state)
+{
+ state->srcfile = NULL;
+ state->line = 0;
+}
+
+void srccode_state_free(struct srccode_state *state);
+
+int map__fprintf_srccode(struct map *map, u64 addr,
+ FILE *fp, struct srccode_state *state);
+
int map__load(struct map *map);
struct symbol *map__find_symbol(struct map *map, u64 addr);
struct symbol *map__find_symbol_by_name(struct map *map, const char *name);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index cdb95b3a1213..8fc39311a30d 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -153,8 +153,158 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
{
}
+#ifdef HAVE_AIO_SUPPORT
+static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
+{
+ int delta_max, i, prio;
+
+ map->aio.nr_cblocks = mp->nr_cblocks;
+ if (map->aio.nr_cblocks) {
+ map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
+ if (!map->aio.aiocb) {
+ pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
+ return -1;
+ }
+ map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
+ if (!map->aio.cblocks) {
+ pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
+ return -1;
+ }
+ map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
+ if (!map->aio.data) {
+ pr_debug2("failed to allocate data buffer, error %m\n");
+ return -1;
+ }
+ delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
+ for (i = 0; i < map->aio.nr_cblocks; ++i) {
+ map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
+ if (!map->aio.data[i]) {
+ pr_debug2("failed to allocate data buffer area, error %m");
+ return -1;
+ }
+ /*
+ * Use cblock.aio_fildes value different from -1
+ * to denote started aio write operation on the
+ * cblock so it requires explicit record__aio_sync()
+ * call prior the cblock may be reused again.
+ */
+ map->aio.cblocks[i].aio_fildes = -1;
+ /*
+ * Allocate cblocks with priority delta to have
+ * faster aio write system calls because queued requests
+ * are kept in separate per-prio queues and adding
+ * a new request will iterate thru shorter per-prio
+ * list. Blocks with numbers higher than
+ * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
+ */
+ prio = delta_max - i;
+ map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
+ }
+ }
+
+ return 0;
+}
+
+static void perf_mmap__aio_munmap(struct perf_mmap *map)
+{
+ int i;
+
+ for (i = 0; i < map->aio.nr_cblocks; ++i)
+ zfree(&map->aio.data[i]);
+ if (map->aio.data)
+ zfree(&map->aio.data);
+ zfree(&map->aio.cblocks);
+ zfree(&map->aio.aiocb);
+}
+
+int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
+ int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
+ off_t *off)
+{
+ u64 head = perf_mmap__read_head(md);
+ unsigned char *data = md->base + page_size;
+ unsigned long size, size0 = 0;
+ void *buf;
+ int rc = 0;
+
+ rc = perf_mmap__read_init(md);
+ if (rc < 0)
+ return (rc == -EAGAIN) ? 0 : -1;
+
+ /*
+ * md->base data is copied into md->data[idx] buffer to
+ * release space in the kernel buffer as fast as possible,
+ * thru perf_mmap__consume() below.
+ *
+ * That lets the kernel to proceed with storing more
+ * profiling data into the kernel buffer earlier than other
+ * per-cpu kernel buffers are handled.
+ *
+ * Coping can be done in two steps in case the chunk of
+ * profiling data crosses the upper bound of the kernel buffer.
+ * In this case we first move part of data from md->start
+ * till the upper bound and then the reminder from the
+ * beginning of the kernel buffer till the end of
+ * the data chunk.
+ */
+
+ size = md->end - md->start;
+
+ if ((md->start & md->mask) + size != (md->end & md->mask)) {
+ buf = &data[md->start & md->mask];
+ size = md->mask + 1 - (md->start & md->mask);
+ md->start += size;
+ memcpy(md->aio.data[idx], buf, size);
+ size0 = size;
+ }
+
+ buf = &data[md->start & md->mask];
+ size = md->end - md->start;
+ md->start += size;
+ memcpy(md->aio.data[idx] + size0, buf, size);
+
+ /*
+ * Increment md->refcount to guard md->data[idx] buffer
+ * from premature deallocation because md object can be
+ * released earlier than aio write request started
+ * on mmap->data[idx] is complete.
+ *
+ * perf_mmap__put() is done at record__aio_complete()
+ * after started request completion.
+ */
+ perf_mmap__get(md);
+
+ md->prev = head;
+ perf_mmap__consume(md);
+
+ rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off);
+ if (!rc) {
+ *off += size0 + size;
+ } else {
+ /*
+ * Decrement md->refcount back if aio write
+ * operation failed to start.
+ */
+ perf_mmap__put(md);
+ }
+
+ return rc;
+}
+#else
+static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
+ struct mmap_params *mp __maybe_unused)
+{
+ return 0;
+}
+
+static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused)
+{
+}
+#endif
+
void perf_mmap__munmap(struct perf_mmap *map)
{
+ perf_mmap__aio_munmap(map);
if (map->base != NULL) {
munmap(map->base, perf_mmap__mmap_len(map));
map->base = NULL;
@@ -197,7 +347,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
&mp->auxtrace_mp, map->base, fd))
return -1;
- return 0;
+ return perf_mmap__aio_mmap(map, mp);
}
static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index cc5e2d6d17a9..aeb6942fdb00 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -6,9 +6,13 @@
#include <linux/types.h>
#include <linux/ring_buffer.h>
#include <stdbool.h>
+#ifdef HAVE_AIO_SUPPORT
+#include <aio.h>
+#endif
#include "auxtrace.h"
#include "event.h"
+struct aiocb;
/**
* struct perf_mmap - perf's ring buffer mmap details
*
@@ -26,6 +30,14 @@ struct perf_mmap {
bool overwrite;
struct auxtrace_mmap auxtrace_mmap;
char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
+#ifdef HAVE_AIO_SUPPORT
+ struct {
+ void **data;
+ struct aiocb *cblocks;
+ struct aiocb **aiocb;
+ int nr_cblocks;
+ } aio;
+#endif
};
/*
@@ -57,7 +69,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask;
+ int prot, mask, nr_cblocks;
struct auxtrace_mmap_params auxtrace_mp;
};
@@ -85,6 +97,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, void *to,
int push(struct perf_mmap *map, void *to, void *buf, size_t size));
+#ifdef HAVE_AIO_SUPPORT
+int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx,
+ int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
+ off_t *off);
+#else
+static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused,
+ int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
+ off_t *off __maybe_unused)
+{
+ return 0;
+}
+#endif
size_t perf_mmap__mmap_len(struct perf_mmap *map);
diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
index cf8bd123cf73..aed170bd4384 100644
--- a/tools/perf/util/namespaces.c
+++ b/tools/perf/util/namespaces.c
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
+#include <asm/bug.h>
struct namespaces *namespaces__new(struct namespaces_event *event)
{
@@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
char curpath[PATH_MAX];
int oldns = -1;
int newns = -1;
+ char *oldcwd = NULL;
if (nc == NULL)
return;
@@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
return;
+ oldcwd = get_current_dir_name();
+ if (!oldcwd)
+ return;
+
oldns = open(curpath, O_RDONLY);
if (oldns < 0)
- return;
+ goto errout;
newns = open(nsi->mntns_path, O_RDONLY);
if (newns < 0)
@@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
if (setns(newns, CLONE_NEWNS) < 0)
goto errout;
+ nc->oldcwd = oldcwd;
nc->oldns = oldns;
nc->newns = newns;
return;
errout:
+ free(oldcwd);
if (oldns > -1)
close(oldns);
if (newns > -1)
@@ -223,11 +231,16 @@ errout:
void nsinfo__mountns_exit(struct nscookie *nc)
{
- if (nc == NULL || nc->oldns == -1 || nc->newns == -1)
+ if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
return;
setns(nc->oldns, CLONE_NEWNS);
+ if (nc->oldcwd) {
+ WARN_ON_ONCE(chdir(nc->oldcwd));
+ zfree(&nc->oldcwd);
+ }
+
if (nc->oldns > -1) {
close(nc->oldns);
nc->oldns = -1;
diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h
index cae1a9a39722..d5f46c09ea31 100644
--- a/tools/perf/util/namespaces.h
+++ b/tools/perf/util/namespaces.h
@@ -38,6 +38,7 @@ struct nsinfo {
struct nscookie {
int oldns;
int newns;
+ char *oldcwd;
};
int nsinfo__init(struct nsinfo *nsi);
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
index 1904e7f6ec84..897589507d97 100644
--- a/tools/perf/util/ordered-events.c
+++ b/tools/perf/util/ordered-events.c
@@ -219,13 +219,12 @@ int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
return 0;
}
-static int __ordered_events__flush(struct ordered_events *oe)
+static int do_flush(struct ordered_events *oe, bool show_progress)
{
struct list_head *head = &oe->events;
struct ordered_event *tmp, *iter;
u64 limit = oe->next_flush;
u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
- bool show_progress = limit == ULLONG_MAX;
struct ui_progress prog;
int ret;
@@ -263,7 +262,8 @@ static int __ordered_events__flush(struct ordered_events *oe)
return 0;
}
-int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
+static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
+ u64 timestamp)
{
static const char * const str[] = {
"NONE",
@@ -272,12 +272,16 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
"HALF ",
};
int err;
+ bool show_progress = false;
if (oe->nr_events == 0)
return 0;
switch (how) {
case OE_FLUSH__FINAL:
+ show_progress = true;
+ __fallthrough;
+ case OE_FLUSH__TOP:
oe->next_flush = ULLONG_MAX;
break;
@@ -298,6 +302,11 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
break;
}
+ case OE_FLUSH__TIME:
+ oe->next_flush = timestamp;
+ show_progress = false;
+ break;
+
case OE_FLUSH__ROUND:
case OE_FLUSH__NONE:
default:
@@ -308,7 +317,7 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
str[how], oe->nr_events);
pr_oe_time(oe->max_timestamp, "max_timestamp\n");
- err = __ordered_events__flush(oe);
+ err = do_flush(oe, show_progress);
if (!err) {
if (how == OE_FLUSH__ROUND)
@@ -324,7 +333,29 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
return err;
}
-void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
+int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
+{
+ return __ordered_events__flush(oe, how, 0);
+}
+
+int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
+{
+ return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
+}
+
+u64 ordered_events__first_time(struct ordered_events *oe)
+{
+ struct ordered_event *event;
+
+ if (list_empty(&oe->events))
+ return 0;
+
+ event = list_first_entry(&oe->events, struct ordered_event, list);
+ return event->timestamp;
+}
+
+void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
+ void *data)
{
INIT_LIST_HEAD(&oe->events);
INIT_LIST_HEAD(&oe->cache);
@@ -332,6 +363,7 @@ void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t d
oe->max_alloc_size = (u64) -1;
oe->cur_alloc_size = 0;
oe->deliver = deliver;
+ oe->data = data;
}
static void
@@ -375,5 +407,5 @@ void ordered_events__reinit(struct ordered_events *oe)
ordered_events__free(oe);
memset(oe, '\0', sizeof(*oe));
- ordered_events__init(oe, old_deliver);
+ ordered_events__init(oe, old_deliver, oe->data);
}
diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h
index 1338d5c345dc..0920fb0ec6cc 100644
--- a/tools/perf/util/ordered-events.h
+++ b/tools/perf/util/ordered-events.h
@@ -18,6 +18,8 @@ enum oe_flush {
OE_FLUSH__FINAL,
OE_FLUSH__ROUND,
OE_FLUSH__HALF,
+ OE_FLUSH__TOP,
+ OE_FLUSH__TIME,
};
struct ordered_events;
@@ -47,15 +49,19 @@ struct ordered_events {
enum oe_flush last_flush_type;
u32 nr_unordered_events;
bool copy_on_queue;
+ void *data;
};
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
u64 timestamp, u64 file_offset);
void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event);
int ordered_events__flush(struct ordered_events *oe, enum oe_flush how);
-void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver);
+int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp);
+void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
+ void *data);
void ordered_events__free(struct ordered_events *oe);
void ordered_events__reinit(struct ordered_events *oe);
+u64 ordered_events__first_time(struct ordered_events *oe);
static inline
void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size)
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 59be3466d64d..920e1e6551dd 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -2462,7 +2462,7 @@ restart:
if (!name_only && strlen(syms->alias))
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
else
- strncpy(name, syms->symbol, MAX_NAME_LEN);
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
evt_list[evt_i] = strdup(name);
if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7e49baad304d..11a234740632 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
int fd, ret = -1;
char path[PATH_MAX];
- snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
ssize_t sret;
int fd;
- snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -655,45 +655,6 @@ static int is_arm_pmu_core(const char *name)
return 0;
}
-/*
- * Return the CPU id as a raw string.
- *
- * Each architecture should provide a more precise id string that
- * can be use to match the architecture's "mapfile".
- */
-char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
-{
- return NULL;
-}
-
-/* Return zero when the cpuid from the mapfile.csv matches the
- * cpuid string generated on this platform.
- * Otherwise return non-zero.
- */
-int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
-{
- regex_t re;
- regmatch_t pmatch[1];
- int match;
-
- if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
- /* Warn unable to generate match particular string. */
- pr_info("Invalid regular expression %s\n", mapcpuid);
- return 1;
- }
-
- match = !regexec(&re, cpuid, 1, pmatch, 0);
- regfree(&re);
- if (match) {
- size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
-
- /* Verify the entire string matched. */
- if (match_len == strlen(cpuid))
- return 0;
- }
- return 1;
-}
-
static char *perf_pmu__getcpuid(struct perf_pmu *pmu)
{
char *cpuid;
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index e86f8be89157..18a59fba97ff 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -692,7 +692,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
return ret;
for (i = 0; i < ntevs && ret >= 0; i++) {
- /* point.address is the addres of point.symbol + point.offset */
+ /* point.address is the address of point.symbol + point.offset */
tevs[i].point.address -= stext;
tevs[i].point.module = strdup(exec);
if (!tevs[i].point.module) {
@@ -3062,7 +3062,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev,
/*
* Give it a '0x' leading symbol name.
* In __add_probe_trace_events, a NULL symbol is interpreted as
- * invalud.
+ * invalid.
*/
if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0)
goto errout;
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index aac7817d9e14..0b1195cad0e5 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
if (target && build_id_cache__cached(target)) {
/* This is a cached buildid */
- strncpy(sbuildid, target, SBUILD_ID_SIZE);
+ strlcpy(sbuildid, target, SBUILD_ID_SIZE);
dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
goto found;
}
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 50150dfc0cdf..47628e85c5eb 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -386,7 +386,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
struct tep_format_field *field;
if (!evsel->tp_format) {
- struct tep_event_format *tp_format;
+ struct tep_event *tp_format;
tp_format = trace_event__tp_format_id(evsel->attr.config);
if (!tp_format)
@@ -1240,7 +1240,7 @@ static struct {
static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
PyObject *args, PyObject *kwargs)
{
- struct tep_event_format *tp_format;
+ struct tep_event *tp_format;
static char *kwlist[] = { "sys", "name", NULL };
char *sys = NULL;
char *name = NULL;
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index a2eeebbfb25f..68b2570304ec 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -506,7 +506,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
aux_ts = get_trailer_time(buf);
if (!aux_ts) {
pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
- sfq->buffer->data_offset);
+ (s64)sfq->buffer->data_offset);
aux_ts = ~0ULL;
goto out;
}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 89cb887648f9..b93f36b887b5 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -189,7 +189,7 @@ static void define_flag_field(const char *ev_name,
LEAVE;
}
-static void define_event_symbols(struct tep_event_format *event,
+static void define_event_symbols(struct tep_event *event,
const char *ev_name,
struct tep_print_arg *args)
{
@@ -338,7 +338,7 @@ static void perl_process_tracepoint(struct perf_sample *sample,
struct addr_location *al)
{
struct thread *thread = al->thread;
- struct tep_event_format *event = evsel->tp_format;
+ struct tep_event *event = evsel->tp_format;
struct tep_format_field *field;
static char handler[256];
unsigned long long val;
@@ -537,7 +537,7 @@ static int perl_stop_script(void)
static int perl_generate_script(struct tep_handle *pevent, const char *outfile)
{
- struct tep_event_format *event = NULL;
+ struct tep_event *event = NULL;
struct tep_format_field *f;
char fname[PATH_MAX];
int not_first, count;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 69aa93d4ee99..87ef16a1b17e 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -264,7 +264,7 @@ static void define_field(enum tep_print_arg_type field_type,
Py_DECREF(t);
}
-static void define_event_symbols(struct tep_event_format *event,
+static void define_event_symbols(struct tep_event *event,
const char *ev_name,
struct tep_print_arg *args)
{
@@ -332,7 +332,7 @@ static void define_event_symbols(struct tep_event_format *event,
define_event_symbols(event, ev_name, args->next);
}
-static PyObject *get_field_numeric_entry(struct tep_event_format *event,
+static PyObject *get_field_numeric_entry(struct tep_event *event,
struct tep_format_field *field, void *data)
{
bool is_array = field->flags & TEP_FIELD_IS_ARRAY;
@@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
pydict_set_item_string_decref(pyelem, "cycles",
PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
- thread__find_map(thread, sample->cpumode,
- br->entries[i].from, &al);
+ thread__find_map_fb(thread, sample->cpumode,
+ br->entries[i].from, &al);
dsoname = get_dsoname(al.map);
pydict_set_item_string_decref(pyelem, "from_dsoname",
_PyUnicode_FromString(dsoname));
- thread__find_map(thread, sample->cpumode,
- br->entries[i].to, &al);
+ thread__find_map_fb(thread, sample->cpumode,
+ br->entries[i].to, &al);
dsoname = get_dsoname(al.map);
pydict_set_item_string_decref(pyelem, "to_dsoname",
_PyUnicode_FromString(dsoname));
@@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
if (!pyelem)
Py_FatalError("couldn't create Python dictionary");
- thread__find_symbol(thread, sample->cpumode,
- br->entries[i].from, &al);
+ thread__find_symbol_fb(thread, sample->cpumode,
+ br->entries[i].from, &al);
get_symoff(al.sym, &al, true, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "from",
_PyUnicode_FromString(bf));
- thread__find_symbol(thread, sample->cpumode,
- br->entries[i].to, &al);
+ thread__find_symbol_fb(thread, sample->cpumode,
+ br->entries[i].to, &al);
get_symoff(al.sym, &al, true, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "to",
_PyUnicode_FromString(bf));
@@ -790,7 +790,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
struct perf_evsel *evsel,
struct addr_location *al)
{
- struct tep_event_format *event = evsel->tp_format;
+ struct tep_event *event = evsel->tp_format;
PyObject *handler, *context, *t, *obj = NULL, *callchain;
PyObject *dict = NULL, *all_entries_dict = NULL;
static char handler_name[256];
@@ -1590,7 +1590,7 @@ static int python_stop_script(void)
static int python_generate_script(struct tep_handle *pevent, const char *outfile)
{
- struct tep_event_format *event = NULL;
+ struct tep_event *event = NULL;
struct tep_format_field *f;
char fname[PATH_MAX];
int not_first, count;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7d2c8ce6cfad..78a067777144 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -24,6 +24,7 @@
#include "thread.h"
#include "thread-stack.h"
#include "stat.h"
+#include "arch/common.h"
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
@@ -125,7 +126,8 @@ struct perf_session *perf_session__new(struct perf_data *data,
session->tool = tool;
INIT_LIST_HEAD(&session->auxtrace_index);
machines__init(&session->machines);
- ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
+ ordered_events__init(&session->ordered_events,
+ ordered_events__deliver_event, NULL);
if (data) {
if (perf_data__open(data))
@@ -150,6 +152,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
session->machines.host.env = &perf_env;
}
+ session->machines.host.single_address_space =
+ perf_env__single_address_space(session->machines.host.env);
+
if (!data || perf_data__is_write(data)) {
/*
* In O_RDONLY mode this will be performed when reading the
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index f96c005b3c41..6c1a83768eb0 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -13,6 +13,7 @@
#include "strlist.h"
#include <traceevent/event-parse.h>
#include "mem-events.h"
+#include "annotate.h"
#include <linux/kernel.h>
regex_t parent_regex;
@@ -36,7 +37,7 @@ enum sort_mode sort__mode = SORT_MODE__NORMAL;
* -t, --field-separator
*
* option, that uses a special separator character and don't pad with spaces,
- * replacing all occurances of this separator in symbol names (and other
+ * replacing all occurrences of this separator in symbol names (and other
* output) with a '.' character, that thus it's the only non valid separator.
*/
static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
@@ -422,6 +423,64 @@ struct sort_entry sort_srcline_to = {
.se_width_idx = HISTC_SRCLINE_TO,
};
+static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+
+ struct symbol *sym = he->ms.sym;
+ struct map *map = he->ms.map;
+ struct perf_evsel *evsel = hists_to_evsel(he->hists);
+ struct annotation *notes;
+ double ipc = 0.0, coverage = 0.0;
+ char tmp[64];
+
+ if (!sym)
+ return repsep_snprintf(bf, size, "%-*s", width, "-");
+
+ if (!sym->annotate2 && symbol__annotate2(sym, map, evsel,
+ &annotation__default_options, NULL) < 0) {
+ return 0;
+ }
+
+ notes = symbol__annotation(sym);
+
+ if (notes->hit_cycles)
+ ipc = notes->hit_insn / ((double)notes->hit_cycles);
+
+ if (notes->total_insn) {
+ coverage = notes->cover_insn * 100.0 /
+ ((double)notes->total_insn);
+ }
+
+ snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
+ return repsep_snprintf(bf, size, "%-*s", width, tmp);
+}
+
+struct sort_entry sort_sym_ipc = {
+ .se_header = "IPC [IPC Coverage]",
+ .se_cmp = sort__sym_cmp,
+ .se_snprintf = hist_entry__sym_ipc_snprintf,
+ .se_width_idx = HISTC_SYMBOL_IPC,
+};
+
+static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
+ __maybe_unused,
+ char *bf, size_t size,
+ unsigned int width)
+{
+ char tmp[64];
+
+ snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
+ return repsep_snprintf(bf, size, "%-*s", width, tmp);
+}
+
+struct sort_entry sort_sym_ipc_null = {
+ .se_header = "IPC [IPC Coverage]",
+ .se_cmp = sort__sym_cmp,
+ .se_snprintf = hist_entry__sym_ipc_null_snprintf,
+ .se_width_idx = HISTC_SYMBOL_IPC,
+};
+
/* --sort srcfile */
static char no_srcfile[1];
@@ -1574,6 +1633,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
+ DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
};
#undef DIM
@@ -1591,6 +1651,7 @@ static struct sort_dimension bstack_sort_dimensions[] = {
DIM(SORT_CYCLES, "cycles", sort_cycles),
DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
+ DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
};
#undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index a97cf8e6be86..130fe37fe2df 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -229,6 +229,7 @@ enum sort_type {
SORT_SYM_SIZE,
SORT_DSO_SIZE,
SORT_CGROUP_ID,
+ SORT_SYM_IPC_NULL,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -242,6 +243,7 @@ enum sort_type {
SORT_CYCLES,
SORT_SRCLINE_FROM,
SORT_SRCLINE_TO,
+ SORT_SYM_IPC,
/* memory mode specific sort keys */
__SORT_MEMORY_MODE,
diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c
new file mode 100644
index 000000000000..fcc8630f6dff
--- /dev/null
+++ b/tools/perf/util/srccode.c
@@ -0,0 +1,186 @@
+/*
+ * Manage printing of source lines
+ * Copyright (c) 2017, Intel Corporation.
+ * Author: Andi Kleen
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include "linux/list.h"
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+#include <string.h>
+#include "srccode.h"
+#include "debug.h"
+#include "util.h"
+
+#define MAXSRCCACHE (32*1024*1024)
+#define MAXSRCFILES 64
+#define SRC_HTAB_SZ 64
+
+struct srcfile {
+ struct hlist_node hash_nd;
+ struct list_head nd;
+ char *fn;
+ char **lines;
+ char *map;
+ unsigned numlines;
+ size_t maplen;
+};
+
+static struct hlist_head srcfile_htab[SRC_HTAB_SZ];
+static LIST_HEAD(srcfile_list);
+static long map_total_sz;
+static int num_srcfiles;
+
+static unsigned shash(unsigned char *s)
+{
+ unsigned h = 0;
+ while (*s)
+ h = 65599 * h + *s++;
+ return h ^ (h >> 16);
+}
+
+static int countlines(char *map, int maplen)
+{
+ int numl;
+ char *end = map + maplen;
+ char *p = map;
+
+ if (maplen == 0)
+ return 0;
+ numl = 0;
+ while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
+ numl++;
+ p++;
+ }
+ if (p < end)
+ numl++;
+ return numl;
+}
+
+static void fill_lines(char **lines, int maxline, char *map, int maplen)
+{
+ int l;
+ char *end = map + maplen;
+ char *p = map;
+
+ if (maplen == 0 || maxline == 0)
+ return;
+ l = 0;
+ lines[l++] = map;
+ while (p < end && (p = memchr(p, '\n', end - p)) != NULL) {
+ if (l >= maxline)
+ return;
+ lines[l++] = ++p;
+ }
+ if (p < end)
+ lines[l] = p;
+}
+
+static void free_srcfile(struct srcfile *sf)
+{
+ list_del(&sf->nd);
+ hlist_del(&sf->hash_nd);
+ map_total_sz -= sf->maplen;
+ munmap(sf->map, sf->maplen);
+ free(sf->lines);
+ free(sf->fn);
+ free(sf);
+ num_srcfiles--;
+}
+
+static struct srcfile *find_srcfile(char *fn)
+{
+ struct stat st;
+ struct srcfile *h;
+ int fd;
+ unsigned long sz;
+ unsigned hval = shash((unsigned char *)fn) % SRC_HTAB_SZ;
+
+ hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) {
+ if (!strcmp(fn, h->fn)) {
+ /* Move to front */
+ list_del(&h->nd);
+ list_add(&h->nd, &srcfile_list);
+ return h;
+ }
+ }
+
+ /* Only prune if there is more than one entry */
+ while ((num_srcfiles > MAXSRCFILES || map_total_sz > MAXSRCCACHE) &&
+ srcfile_list.next != &srcfile_list) {
+ assert(!list_empty(&srcfile_list));
+ h = list_entry(srcfile_list.prev, struct srcfile, nd);
+ free_srcfile(h);
+ }
+
+ fd = open(fn, O_RDONLY);
+ if (fd < 0 || fstat(fd, &st) < 0) {
+ pr_debug("cannot open source file %s\n", fn);
+ return NULL;
+ }
+
+ h = malloc(sizeof(struct srcfile));
+ if (!h)
+ return NULL;
+
+ h->fn = strdup(fn);
+ if (!h->fn)
+ goto out_h;
+
+ h->maplen = st.st_size;
+ sz = (h->maplen + page_size - 1) & ~(page_size - 1);
+ h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0);
+ close(fd);
+ if (h->map == (char *)-1) {
+ pr_debug("cannot mmap source file %s\n", fn);
+ goto out_fn;
+ }
+ h->numlines = countlines(h->map, h->maplen);
+ h->lines = calloc(h->numlines, sizeof(char *));
+ if (!h->lines)
+ goto out_map;
+ fill_lines(h->lines, h->numlines, h->map, h->maplen);
+ list_add(&h->nd, &srcfile_list);
+ hlist_add_head(&h->hash_nd, &srcfile_htab[hval]);
+ map_total_sz += h->maplen;
+ num_srcfiles++;
+ return h;
+
+out_map:
+ munmap(h->map, sz);
+out_fn:
+ free(h->fn);
+out_h:
+ free(h);
+ return NULL;
+}
+
+/* Result is not 0 terminated */
+char *find_sourceline(char *fn, unsigned line, int *lenp)
+{
+ char *l, *p;
+ struct srcfile *sf = find_srcfile(fn);
+ if (!sf)
+ return NULL;
+ line--;
+ if (line >= sf->numlines)
+ return NULL;
+ l = sf->lines[line];
+ if (!l)
+ return NULL;
+ p = memchr(l, '\n', sf->map + sf->maplen - l);
+ *lenp = p - l;
+ return l;
+}
diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h
new file mode 100644
index 000000000000..e500a746d5f1
--- /dev/null
+++ b/tools/perf/util/srccode.h
@@ -0,0 +1,7 @@
+#ifndef SRCCODE_H
+#define SRCCODE_H 1
+
+/* Result is not 0 terminated */
+char *find_sourceline(char *fn, unsigned line, int *lenp);
+
+#endif
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index e767c4a9d4d2..dc86597d0cc4 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -548,6 +548,34 @@ out:
return srcline;
}
+/* Returns filename and fills in line number in line */
+char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line)
+{
+ char *file = NULL;
+ const char *dso_name;
+
+ if (!dso->has_srcline)
+ goto out;
+
+ dso_name = dso__name(dso);
+ if (dso_name == NULL)
+ goto out;
+
+ if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL))
+ goto out;
+
+ dso->a2l_fails = 0;
+ return file;
+
+out:
+ if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) {
+ dso->has_srcline = 0;
+ dso__free_a2l(dso);
+ }
+
+ return NULL;
+}
+
void free_srcline(char *srcline)
{
if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0)
diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h
index b2bb5502fd62..5762212dc342 100644
--- a/tools/perf/util/srcline.h
+++ b/tools/perf/util/srcline.h
@@ -16,6 +16,7 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
bool show_sym, bool show_addr, bool unwind_inlines,
u64 ip);
void free_srcline(char *srcline);
+char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line);
/* insert the srcline into the DSO, which will take ownership */
void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline);
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index e7b4c44ebb62..665ee374fc01 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -59,6 +59,15 @@ static void print_noise(struct perf_stat_config *config,
print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg);
}
+static void print_cgroup(struct perf_stat_config *config, struct perf_evsel *evsel)
+{
+ if (nr_cgroups) {
+ const char *cgrp_name = evsel->cgrp ? evsel->cgrp->name : "";
+ fprintf(config->output, "%s%s", config->csv_sep, cgrp_name);
+ }
+}
+
+
static void aggr_printout(struct perf_stat_config *config,
struct perf_evsel *evsel, int id, int nr)
{
@@ -336,8 +345,7 @@ static void abs_printout(struct perf_stat_config *config,
fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel));
- if (evsel->cgrp)
- fprintf(output, "%s%s", config->csv_sep, evsel->cgrp->name);
+ print_cgroup(config, evsel);
}
static bool is_mixed_hw_group(struct perf_evsel *counter)
@@ -431,9 +439,7 @@ static void printout(struct perf_stat_config *config, int id, int nr,
config->csv_output ? 0 : -25,
perf_evsel__name(counter));
- if (counter->cgrp)
- fprintf(config->output, "%s%s",
- config->csv_sep, counter->cgrp->name);
+ print_cgroup(config, counter);
if (!config->csv_output)
pm(config, &os, NULL, NULL, "", 0);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 8ad32763cfff..3c22c58b3e90 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -209,12 +209,12 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
int cpu, struct runtime_stat *st)
{
int ctx = evsel_context(counter);
+ u64 count_ns = count;
count *= counter->scale;
- if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
- perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
- update_runtime_stat(st, STAT_NSECS, 0, cpu, count);
+ if (perf_evsel__is_clock(counter))
+ update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index 1cbada2dc6be..f735ee038713 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -334,7 +334,7 @@ static char *cpu_model(void)
if (file) {
while (fgets(buf, 255, file)) {
if (strstr(buf, "model name")) {
- strncpy(cpu_m, &buf[13], 255);
+ strlcpy(cpu_m, &buf[13], 255);
break;
}
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index d188b7588152..01f2c7385e38 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1537,17 +1537,6 @@ int dso__load(struct dso *dso, struct map *map)
dso->adjust_symbols = 0;
if (perfmap) {
- struct stat st;
-
- if (lstat(map_path, &st) < 0)
- goto out;
-
- if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
- pr_warning("File %s not owned by current user or root, "
- "ignoring it (use -f to override).\n", map_path);
- goto out;
- }
-
ret = dso__load_perf_map(map_path, dso);
dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
DSO_BINARY_TYPE__NOT_FOUND;
@@ -1680,11 +1669,22 @@ struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
{
struct maps *maps = &mg->maps;
struct map *map;
+ struct rb_node *node;
down_read(&maps->lock);
- for (map = maps__first(maps); map; map = map__next(map)) {
- if (map->dso && strcmp(map->dso->short_name, name) == 0)
+ for (node = maps->names.rb_node; node; ) {
+ int rc;
+
+ map = rb_entry(node, struct map, rb_node_name);
+
+ rc = strcmp(map->dso->short_name, name);
+ if (rc < 0)
+ node = node->rb_left;
+ else if (rc > 0)
+ node = node->rb_right;
+ else
+
goto out_unlock;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index d026d215bdc6..14d9d438e7e2 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -63,6 +63,7 @@ struct symbol {
u8 ignore:1;
u8 inlined:1;
u8 arch_sym;
+ bool annotate2;
char name[0];
};
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 3d9ed7d0e281..c83372329f89 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -64,6 +64,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
RB_CLEAR_NODE(&thread->rb_node);
/* Thread holds first ref to nsdata. */
thread->nsinfo = nsinfo__new(pid);
+ srccode_state_init(&thread->srccode_state);
}
return thread;
@@ -103,6 +104,7 @@ void thread__delete(struct thread *thread)
unwind__finish_access(thread);
nsinfo__zput(thread->nsinfo);
+ srccode_state_free(&thread->srccode_state);
exit_rwsem(&thread->namespaces_lock);
exit_rwsem(&thread->comm_lock);
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 30e2b4c165fe..712dd48cc0ca 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -8,6 +8,7 @@
#include <unistd.h>
#include <sys/types.h>
#include "symbol.h"
+#include "map.h"
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
@@ -38,6 +39,7 @@ struct thread {
void *priv;
struct thread_stack *ts;
struct nsinfo *nsinfo;
+ struct srccode_state srccode_state;
#ifdef HAVE_LIBUNWIND_SUPPORT
void *addr_space;
struct unwind_libunwind_ops *unwind_libunwind_ops;
@@ -96,9 +98,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al);
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+ struct addr_location *al);
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al);
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+ u64 addr, struct addr_location *al);
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index 8e517def925b..4c8da8c4435f 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -46,8 +46,9 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
samples_per_sec;
ret = SNPRINTF(bf, size,
" PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
- " exact: %4.1f%% [", samples_per_sec,
- ksamples_percent, esamples_percent);
+ " exact: %4.1f%% lost: %" PRIu64 "/%" PRIu64 " drop: %" PRIu64 "/%" PRIu64 " [",
+ samples_per_sec, ksamples_percent, esamples_percent,
+ top->lost, top->lost_total, top->drop, top->drop_total);
} else {
float us_samples_per_sec = top->us_samples / top->delay_secs;
float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
@@ -106,6 +107,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
top->evlist->cpus->nr > 1 ? "s" : "");
}
+ perf_top__reset_sample_counters(top);
return ret;
}
@@ -113,5 +115,5 @@ void perf_top__reset_sample_counters(struct perf_top *top)
{
top->samples = top->us_samples = top->kernel_samples =
top->exact_samples = top->guest_kernel_samples =
- top->guest_us_samples = 0;
+ top->guest_us_samples = top->lost = top->drop = 0;
}
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 9add1f72ce95..19f95eaf75c8 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -22,7 +22,7 @@ struct perf_top {
* Symbols will be added here in perf_event__process_sample and will
* get out after decayed.
*/
- u64 samples;
+ u64 samples, lost, lost_total, drop, drop_total;
u64 kernel_samples, us_samples;
u64 exact_samples;
u64 guest_us_samples, guest_kernel_samples;
@@ -40,6 +40,14 @@ struct perf_top {
const char *sym_filter;
float min_percent;
unsigned int nr_threads_synthesize;
+
+ struct {
+ struct ordered_events *in;
+ struct ordered_events data[2];
+ bool rotate;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ } qe;
};
#define CONSOLE_CLEAR ""
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 32e558a65af3..ad74be1f0e42 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -33,7 +33,7 @@ static int get_common_field(struct scripting_context *context,
int *offset, int *size, const char *type)
{
struct tep_handle *pevent = context->pevent;
- struct tep_event_format *event;
+ struct tep_event *event;
struct tep_format_field *field;
if (!*size) {
@@ -95,7 +95,7 @@ int common_pc(struct scripting_context *context)
}
unsigned long long
-raw_field_value(struct tep_event_format *event, const char *name, void *data)
+raw_field_value(struct tep_event *event, const char *name, void *data)
{
struct tep_format_field *field;
unsigned long long val;
@@ -109,12 +109,12 @@ raw_field_value(struct tep_event_format *event, const char *name, void *data)
return val;
}
-unsigned long long read_size(struct tep_event_format *event, void *ptr, int size)
+unsigned long long read_size(struct tep_event *event, void *ptr, int size)
{
return tep_read_number(event->pevent, ptr, size);
}
-void event_format__fprintf(struct tep_event_format *event,
+void event_format__fprintf(struct tep_event *event,
int cpu, void *data, int size, FILE *fp)
{
struct tep_record record;
@@ -131,7 +131,7 @@ void event_format__fprintf(struct tep_event_format *event,
trace_seq_destroy(&s);
}
-void event_format__print(struct tep_event_format *event,
+void event_format__print(struct tep_event *event,
int cpu, void *data, int size)
{
return event_format__fprintf(event, cpu, data, size, stdout);
@@ -190,12 +190,12 @@ int parse_event_file(struct tep_handle *pevent,
return tep_parse_event(pevent, buf, size, sys);
}
-struct tep_event_format *trace_find_next_event(struct tep_handle *pevent,
- struct tep_event_format *event)
+struct tep_event *trace_find_next_event(struct tep_handle *pevent,
+ struct tep_event *event)
{
static int idx;
int events_count;
- struct tep_event_format *all_events;
+ struct tep_event *all_events;
all_events = tep_get_first_event(pevent);
events_count = tep_get_events_count(pevent);
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index 76f12c705ef9..efe2f58cff4e 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -102,7 +102,7 @@ static unsigned int read4(struct tep_handle *pevent)
if (do_read(&data, 4) < 0)
return 0;
- return __tep_data2host4(pevent, data);
+ return tep_read_number(pevent, &data, 4);
}
static unsigned long long read8(struct tep_handle *pevent)
@@ -111,7 +111,7 @@ static unsigned long long read8(struct tep_handle *pevent)
if (do_read(&data, 8) < 0)
return 0;
- return __tep_data2host8(pevent, data);
+ return tep_read_number(pevent, &data, 8);
}
static char *read_string(void)
diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c
index 95664b2f771e..cbe0dd758e3a 100644
--- a/tools/perf/util/trace-event.c
+++ b/tools/perf/util/trace-event.c
@@ -72,12 +72,12 @@ void trace_event__cleanup(struct trace_event *t)
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-static struct tep_event_format*
+static struct tep_event*
tp_format(const char *sys, const char *name)
{
char *tp_dir = get_events_file(sys);
struct tep_handle *pevent = tevent.pevent;
- struct tep_event_format *event = NULL;
+ struct tep_event *event = NULL;
char path[PATH_MAX];
size_t size;
char *data;
@@ -102,7 +102,7 @@ tp_format(const char *sys, const char *name)
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-struct tep_event_format*
+struct tep_event*
trace_event__tp_format(const char *sys, const char *name)
{
if (!tevent_initialized && trace_event__init2())
@@ -111,7 +111,7 @@ trace_event__tp_format(const char *sys, const char *name)
return tp_format(sys, name);
}
-struct tep_event_format *trace_event__tp_format_id(int id)
+struct tep_event *trace_event__tp_format_id(int id)
{
if (!tevent_initialized && trace_event__init2())
return ERR_PTR(-ENOMEM);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index f024d73bfc40..d9b0a942090a 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -22,17 +22,17 @@ int trace_event__init(struct trace_event *t);
void trace_event__cleanup(struct trace_event *t);
int trace_event__register_resolver(struct machine *machine,
tep_func_resolver_t *func);
-struct tep_event_format*
+struct tep_event*
trace_event__tp_format(const char *sys, const char *name);
-struct tep_event_format *trace_event__tp_format_id(int id);
+struct tep_event *trace_event__tp_format_id(int id);
int bigendian(void);
-void event_format__fprintf(struct tep_event_format *event,
+void event_format__fprintf(struct tep_event *event,
int cpu, void *data, int size, FILE *fp);
-void event_format__print(struct tep_event_format *event,
+void event_format__print(struct tep_event *event,
int cpu, void *data, int size);
int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size);
@@ -40,7 +40,7 @@ int parse_event_file(struct tep_handle *pevent,
char *buf, unsigned long size, char *sys);
unsigned long long
-raw_field_value(struct tep_event_format *event, const char *name, void *data);
+raw_field_value(struct tep_event *event, const char *name, void *data);
void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size);
void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size);
@@ -48,9 +48,9 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz
ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe);
-struct tep_event_format *trace_find_next_event(struct tep_handle *pevent,
- struct tep_event_format *event);
-unsigned long long read_size(struct tep_event_format *event, void *ptr, int size);
+struct tep_event *trace_find_next_event(struct tep_handle *pevent,
+ struct tep_event *event);
+unsigned long long read_size(struct tep_event *event, void *ptr, int size);
unsigned long long eval_flag(const char *flag);
int read_tracing_data(int fd, struct list_head *pattrs);
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 14508ee7707a..ece040b799f6 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -59,6 +59,10 @@ int fetch_kernel_version(unsigned int *puint,
const char *perf_tip(const char *dirpath);
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+char *get_current_dir_name(void);
+#endif
+
#ifndef HAVE_SCHED_GETCPU_SUPPORT
int sched_getcpu(void);
#endif
diff --git a/tools/power/acpi/tools/acpidump/apmain.c b/tools/power/acpi/tools/acpidump/apmain.c
index db213171f8d9..2d9b94b631cb 100644
--- a/tools/power/acpi/tools/acpidump/apmain.c
+++ b/tools/power/acpi/tools/acpidump/apmain.c
@@ -106,7 +106,7 @@ static int ap_insert_action(char *argument, u32 to_be_done)
current_action++;
if (current_action > AP_MAX_ACTIONS) {
- fprintf(stderr, "Too many table options (max %u)\n",
+ fprintf(stderr, "Too many table options (max %d)\n",
AP_MAX_ACTIONS);
return (-1);
}
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index db66a952c173..fd8765af19bb 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -89,6 +89,7 @@ endif
localedir ?= /usr/share/locale
docdir ?= /usr/share/doc/packages/cpupower
confdir ?= /etc/
+bash_completion_dir ?= /usr/share/bash-completion/completions
# Toolchain: what tools do we use, and what options do they need:
@@ -96,7 +97,8 @@ CP = cp -fpR
INSTALL = /usr/bin/install -c
INSTALL_PROGRAM = ${INSTALL}
INSTALL_DATA = ${INSTALL} -m 644
-INSTALL_SCRIPT = ${INSTALL_PROGRAM}
+#bash completion scripts get sourced and so they should be rw only.
+INSTALL_SCRIPT = ${INSTALL} -m 644
# If you are running a cross compiler, you may want to set this
# to something more interesting, like "arm-linux-". If you want
@@ -288,6 +290,8 @@ install-lib:
install-tools:
$(INSTALL) -d $(DESTDIR)${bindir}
$(INSTALL_PROGRAM) $(OUTPUT)cpupower $(DESTDIR)${bindir}
+ $(INSTALL) -d $(DESTDIR)${bash_completion_dir}
+ $(INSTALL_SCRIPT) cpupower-completion.sh '$(DESTDIR)${bash_completion_dir}/cpupower'
install-man:
$(INSTALL_DATA) -D man/cpupower.1 $(DESTDIR)${mandir}/man1/cpupower.1
diff --git a/tools/power/cpupower/cpupower-completion.sh b/tools/power/cpupower/cpupower-completion.sh
new file mode 100644
index 000000000000..e10839cfcfc1
--- /dev/null
+++ b/tools/power/cpupower/cpupower-completion.sh
@@ -0,0 +1,128 @@
+# -*- shell-script -*-
+# bash completion script for cpupower
+# Taken from git.git's completion script.
+
+_cpupower_commands="frequency-info frequency-set idle-info idle-set set info monitor"
+
+_frequency_info ()
+{
+ local flags="-f -w -l -d -p -g -a -s -y -o -m -n --freq --hwfreq --hwlimits --driver --policy --governors --related-cpus --affected-cpus --stats --latency --proc --human --no-rounding"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev" in
+ frequency-info) COMPREPLY=($(compgen -W "$flags" -- "$cur")) ;;
+ esac
+}
+
+_frequency_set ()
+{
+ local flags="-f -g --freq --governor -d --min -u --max -r --related"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev" in
+ -f| --freq | -d | --min | -u | --max)
+ if [ -d /sys/devices/system/cpu/cpufreq/ ] ; then
+ COMPREPLY=($(compgen -W '$(cat $(ls -d /sys/devices/system/cpu/cpufreq/policy* | head -1)/scaling_available_frequencies)' -- "$cur"))
+ fi ;;
+ -g| --governor)
+ if [ -d /sys/devices/system/cpu/cpufreq/ ] ; then
+ COMPREPLY=($(compgen -W '$(cat $(ls -d /sys/devices/system/cpu/cpufreq/policy* | head -1)/scaling_available_governors)' -- "$cur"))
+ fi;;
+ frequency-set) COMPREPLY=($(compgen -W "$flags" -- "$cur")) ;;
+ esac
+}
+
+_idle_info()
+{
+ local flags="-f --silent"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev" in
+ idle-info) COMPREPLY=($(compgen -W "$flags" -- "$cur")) ;;
+ esac
+}
+
+_idle_set()
+{
+ local flags="-d --disable -e --enable -D --disable-by-latency -E --enable-all"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev" in
+ idle-set) COMPREPLY=($(compgen -W "$flags" -- "$cur")) ;;
+ esac
+}
+
+_set()
+{
+ local flags="--perf-bias, -b"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev" in
+ set) COMPREPLY=($(compgen -W "$flags" -- "$cur")) ;;
+ esac
+}
+
+_monitor()
+{
+ local flags="-l -m -i -c -v"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev" in
+ monitor) COMPREPLY=($(compgen -W "$flags" -- "$cur")) ;;
+ esac
+}
+
+_taskset()
+{
+ local prev_to_prev="${COMP_WORDS[COMP_CWORD-2]}"
+ local prev="${COMP_WORDS[COMP_CWORD-1]}"
+ local cur="${COMP_WORDS[COMP_CWORD]}"
+ case "$prev_to_prev" in
+ -c|--cpu) COMPREPLY=($(compgen -W "$_cpupower_commands" -- "$cur")) ;;
+ esac
+ case "$prev" in
+ frequency-info) _frequency_info ;;
+ frequency-set) _frequency_set ;;
+ idle-info) _idle_info ;;
+ idle-set) _idle_set ;;
+ set) _set ;;
+ monitor) _monitor ;;
+ esac
+
+}
+
+_cpupower ()
+{
+ local i
+ local c=1
+ local command
+
+ while test $c -lt $COMP_CWORD; do
+ if test $c == 1; then
+ command="${COMP_WORDS[c]}"
+ fi
+ c=$((++c))
+ done
+
+ # Complete name of subcommand if the user has not finished typing it yet.
+ if test $c -eq $COMP_CWORD -a -z "$command"; then
+ COMPREPLY=($(compgen -W "help -v --version -c --cpu $_cpupower_commands" -- "${COMP_WORDS[COMP_CWORD]}"))
+ return
+ fi
+
+ # Complete arguments to subcommands.
+ case "$command" in
+ -v|--version) return ;;
+ -c|--cpu) _taskset ;;
+ help) COMPREPLY=($(compgen -W "$_cpupower_commands" -- "${COMP_WORDS[COMP_CWORD]}")) ;;
+ frequency-info) _frequency_info ;;
+ frequency-set) _frequency_set ;;
+ idle-info) _idle_info ;;
+ idle-set) _idle_set ;;
+ set) _set ;;
+ monitor) _monitor ;;
+ esac
+}
+
+complete -o bashdefault -o default -F _cpupower cpupower 2>/dev/null \
+ || complete -o default -F _cpupower cpupower
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 84e2b648e622..2fa3c5757bcb 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -585,9 +585,9 @@ current_max_cpu = 0
read_trace_data(filename)
-clear_trace_file()
-# Free the memory
if interval:
+ clear_trace_file()
+ # Free the memory
free_trace_buffer()
if graph_data_present == False:
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 328f62e6ea02..9327c0ddc3a5 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1,6 +1,6 @@
/*
* turbostat -- show CPU frequency and C-state residency
- * on modern Intel turbo-capable processors.
+ * on modern Intel and AMD processors.
*
* Copyright (c) 2013 Intel Corporation.
* Len Brown <len.brown@intel.com>
@@ -71,6 +71,8 @@ unsigned int do_irtl_snb;
unsigned int do_irtl_hsw;
unsigned int units = 1000000; /* MHz etc */
unsigned int genuine_intel;
+unsigned int authentic_amd;
+unsigned int max_level, max_extended_level;
unsigned int has_invariant_tsc;
unsigned int do_nhm_platform_info;
unsigned int no_MSR_MISC_PWR_MGMT;
@@ -1667,30 +1669,51 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
void get_apic_id(struct thread_data *t)
{
- unsigned int eax, ebx, ecx, edx, max_level;
+ unsigned int eax, ebx, ecx, edx;
- eax = ebx = ecx = edx = 0;
+ if (DO_BIC(BIC_APIC)) {
+ eax = ebx = ecx = edx = 0;
+ __cpuid(1, eax, ebx, ecx, edx);
- if (!genuine_intel)
+ t->apic_id = (ebx >> 24) & 0xff;
+ }
+
+ if (!DO_BIC(BIC_X2APIC))
return;
- __cpuid(0, max_level, ebx, ecx, edx);
+ if (authentic_amd) {
+ unsigned int topology_extensions;
- __cpuid(1, eax, ebx, ecx, edx);
- t->apic_id = (ebx >> 24) & 0xf;
+ if (max_extended_level < 0x8000001e)
+ return;
- if (max_level < 0xb)
+ eax = ebx = ecx = edx = 0;
+ __cpuid(0x80000001, eax, ebx, ecx, edx);
+ topology_extensions = ecx & (1 << 22);
+
+ if (topology_extensions == 0)
+ return;
+
+ eax = ebx = ecx = edx = 0;
+ __cpuid(0x8000001e, eax, ebx, ecx, edx);
+
+ t->x2apic_id = eax;
return;
+ }
- if (!DO_BIC(BIC_X2APIC))
+ if (!genuine_intel)
+ return;
+
+ if (max_level < 0xb)
return;
ecx = 0;
__cpuid(0xb, eax, ebx, ecx, edx);
t->x2apic_id = edx;
- if (debug && (t->apic_id != t->x2apic_id))
- fprintf(outf, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+ if (debug && (t->apic_id != (t->x2apic_id & 0xff)))
+ fprintf(outf, "cpu%d: BIOS BUG: apic 0x%x x2apic 0x%x\n",
+ t->cpu_id, t->apic_id, t->x2apic_id);
}
/*
@@ -1953,11 +1976,12 @@ done:
#define PCL_7S 11 /* PC7 Shrink */
#define PCL__8 12 /* PC8 */
#define PCL__9 13 /* PC9 */
-#define PCLUNL 14 /* Unlimited */
+#define PCL_10 14 /* PC10 */
+#define PCLUNL 15 /* Unlimited */
int pkg_cstate_limit = PCLUKN;
char *pkg_cstate_limit_strings[] = { "reserved", "unknown", "pc0", "pc1", "pc2",
- "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "unlimited"};
+ "pc3", "pc4", "pc6", "pc6n", "pc6r", "pc7", "pc7s", "pc8", "pc9", "pc10", "unlimited"};
int nhm_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__3, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
int snb_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCL__7, PCL_7S, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
@@ -1965,7 +1989,7 @@ int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S,
int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7};
int amt_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
-int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
+int glm_pkg_cstate_limits[16] = {PCLUNL, PCL__1, PCL__3, PCL__6, PCL__7, PCL_7S, PCL__8, PCL__9, PCL_10, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
int skx_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV};
@@ -3113,13 +3137,8 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
bclk = discover_bclk(family, model);
switch (model) {
- case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
- case 0x1F: /* Core i7 and i5 Processor - Nehalem */
- case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */
- case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */
case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
- case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
pkg_cstate_limits = nhm_pkg_cstate_limits;
break;
case INTEL_FAM6_SANDYBRIDGE: /* SNB */
@@ -3131,16 +3150,11 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
break;
case INTEL_FAM6_HASWELL_CORE: /* HSW */
case INTEL_FAM6_HASWELL_X: /* HSX */
- case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
case INTEL_FAM6_BROADWELL_X: /* BDX */
- case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
pkg_cstate_limits = hsw_pkg_cstate_limits;
has_misc_feature_control = 1;
@@ -3159,13 +3173,12 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
no_MSR_MISC_PWR_MGMT = 1;
break;
case INTEL_FAM6_XEON_PHI_KNL: /* PHI */
- case INTEL_FAM6_XEON_PHI_KNM:
pkg_cstate_limits = phi_pkg_cstate_limits;
break;
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
- pkg_cstate_limits = bxt_pkg_cstate_limits;
+ pkg_cstate_limits = glm_pkg_cstate_limits;
break;
default:
return 0;
@@ -3220,7 +3233,6 @@ int is_bdx(unsigned int family, unsigned int model)
switch (model) {
case INTEL_FAM6_BROADWELL_X:
- case INTEL_FAM6_BROADWELL_XEON_D:
return 1;
}
return 0;
@@ -3246,9 +3258,7 @@ int has_turbo_ratio_limit(unsigned int family, unsigned int model)
switch (model) {
/* Nehalem compatible, but do not include turbo-ratio limit support */
case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
- case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
case INTEL_FAM6_XEON_PHI_KNL: /* PHI - Knights Landing (different MSR definition) */
- case INTEL_FAM6_XEON_PHI_KNM:
return 0;
default:
return 1;
@@ -3303,7 +3313,6 @@ int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
switch (model) {
case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
- case INTEL_FAM6_XEON_PHI_KNM:
return 1;
default:
return 0;
@@ -3337,21 +3346,15 @@ int has_config_tdp(unsigned int family, unsigned int model)
case INTEL_FAM6_IVYBRIDGE: /* IVB */
case INTEL_FAM6_HASWELL_CORE: /* HSW */
case INTEL_FAM6_HASWELL_X: /* HSX */
- case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
case INTEL_FAM6_BROADWELL_X: /* BDX */
- case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_XEON_PHI_KNL: /* Knights Landing */
- case INTEL_FAM6_XEON_PHI_KNM:
return 1;
default:
return 0;
@@ -3744,9 +3747,7 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
switch (model) {
case INTEL_FAM6_HASWELL_X: /* HSX */
case INTEL_FAM6_BROADWELL_X: /* BDX */
- case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
- case INTEL_FAM6_XEON_PHI_KNM:
return (rapl_dram_energy_units = 15.3 / 1000000);
default:
return (rapl_energy_units);
@@ -3775,7 +3776,6 @@ void rapl_probe(unsigned int family, unsigned int model)
case INTEL_FAM6_SANDYBRIDGE:
case INTEL_FAM6_IVYBRIDGE:
case INTEL_FAM6_HASWELL_CORE: /* HSW */
- case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
@@ -3799,9 +3799,6 @@ void rapl_probe(unsigned int family, unsigned int model)
BIC_PRESENT(BIC_PkgWatt);
break;
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
BIC_PRESENT(BIC_PKG__);
@@ -3820,10 +3817,8 @@ void rapl_probe(unsigned int family, unsigned int model)
break;
case INTEL_FAM6_HASWELL_X: /* HSX */
case INTEL_FAM6_BROADWELL_X: /* BDX */
- case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
- case INTEL_FAM6_XEON_PHI_KNM:
do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
BIC_PRESENT(BIC_PKG__);
BIC_PRESENT(BIC_RAM__);
@@ -3916,7 +3911,6 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model)
switch (model) {
case INTEL_FAM6_HASWELL_CORE: /* HSW */
- case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
do_gfx_perf_limit_reasons = 1;
case INTEL_FAM6_HASWELL_X: /* HSX */
@@ -4128,16 +4122,11 @@ int has_snb_msrs(unsigned int family, unsigned int model)
case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */
case INTEL_FAM6_HASWELL_CORE: /* HSW */
case INTEL_FAM6_HASWELL_X: /* HSW */
- case INTEL_FAM6_HASWELL_ULT: /* HSW */
case INTEL_FAM6_HASWELL_GT3E: /* HSW */
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_BROADWELL_GT3E: /* BDW */
case INTEL_FAM6_BROADWELL_X: /* BDX */
- case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
case INTEL_FAM6_SKYLAKE_X: /* SKX */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
@@ -4166,12 +4155,9 @@ int has_hsw_msrs(unsigned int family, unsigned int model)
return 0;
switch (model) {
- case INTEL_FAM6_HASWELL_ULT: /* HSW */
+ case INTEL_FAM6_HASWELL_CORE:
case INTEL_FAM6_BROADWELL_CORE: /* BDW */
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
case INTEL_FAM6_ATOM_GOLDMONT: /* BXT */
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
@@ -4195,9 +4181,6 @@ int has_skl_msrs(unsigned int family, unsigned int model)
switch (model) {
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */
return 1;
}
@@ -4222,7 +4205,6 @@ int is_knl(unsigned int family, unsigned int model)
return 0;
switch (model) {
case INTEL_FAM6_XEON_PHI_KNL: /* KNL */
- case INTEL_FAM6_XEON_PHI_KNM:
return 1;
}
return 0;
@@ -4436,18 +4418,56 @@ void decode_c6_demotion_policy_msr(void)
base_cpu, msr, msr & (1 << 0) ? "EN" : "DIS");
}
+/*
+ * When models are the same, for the purpose of turbostat, reuse
+ */
+unsigned int intel_model_duplicates(unsigned int model)
+{
+
+ switch(model) {
+ case INTEL_FAM6_NEHALEM_EP: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
+ case INTEL_FAM6_NEHALEM: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
+ case 0x1F: /* Core i7 and i5 Processor - Nehalem */
+ case INTEL_FAM6_WESTMERE: /* Westmere Client - Clarkdale, Arrandale */
+ case INTEL_FAM6_WESTMERE_EP: /* Westmere EP - Gulftown */
+ return INTEL_FAM6_NEHALEM;
+
+ case INTEL_FAM6_NEHALEM_EX: /* Nehalem-EX Xeon - Beckton */
+ case INTEL_FAM6_WESTMERE_EX: /* Westmere-EX Xeon - Eagleton */
+ return INTEL_FAM6_NEHALEM_EX;
+
+ case INTEL_FAM6_XEON_PHI_KNM:
+ return INTEL_FAM6_XEON_PHI_KNL;
+
+ case INTEL_FAM6_HASWELL_ULT:
+ return INTEL_FAM6_HASWELL_CORE;
+
+ case INTEL_FAM6_BROADWELL_X:
+ case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */
+ return INTEL_FAM6_BROADWELL_X;
+
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ return INTEL_FAM6_SKYLAKE_MOBILE;
+ }
+ return model;
+}
void process_cpuid()
{
- unsigned int eax, ebx, ecx, edx, max_level, max_extended_level;
- unsigned int fms, family, model, stepping;
+ unsigned int eax, ebx, ecx, edx;
+ unsigned int fms, family, model, stepping, ecx_flags, edx_flags;
unsigned int has_turbo;
eax = ebx = ecx = edx = 0;
__cpuid(0, max_level, ebx, ecx, edx);
- if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+ if (ebx == 0x756e6547 && ecx == 0x6c65746e && edx == 0x49656e69)
genuine_intel = 1;
+ else if (ebx == 0x68747541 && ecx == 0x444d4163 && edx == 0x69746e65)
+ authentic_amd = 1;
if (!quiet)
fprintf(outf, "CPUID(0): %.4s%.4s%.4s ",
@@ -4461,25 +4481,8 @@ void process_cpuid()
family += (fms >> 20) & 0xff;
if (family >= 6)
model += ((fms >> 16) & 0xf) << 4;
-
- if (!quiet) {
- fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
- max_level, family, model, stepping, family, model, stepping);
- fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
- ecx & (1 << 0) ? "SSE3" : "-",
- ecx & (1 << 3) ? "MONITOR" : "-",
- ecx & (1 << 6) ? "SMX" : "-",
- ecx & (1 << 7) ? "EIST" : "-",
- ecx & (1 << 8) ? "TM2" : "-",
- edx & (1 << 4) ? "TSC" : "-",
- edx & (1 << 5) ? "MSR" : "-",
- edx & (1 << 22) ? "ACPI-TM" : "-",
- edx & (1 << 28) ? "HT" : "-",
- edx & (1 << 29) ? "TM" : "-");
- }
-
- if (!(edx & (1 << 5)))
- errx(1, "CPUID: no MSR");
+ ecx_flags = ecx;
+ edx_flags = edx;
/*
* check max extended function levels of CPUID.
@@ -4489,6 +4492,27 @@ void process_cpuid()
ebx = ecx = edx = 0;
__cpuid(0x80000000, max_extended_level, ebx, ecx, edx);
+ if (!quiet) {
+ fprintf(outf, "0x%x CPUID levels; 0x%x xlevels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
+ max_level, max_extended_level, family, model, stepping, family, model, stepping);
+ fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
+ ecx_flags & (1 << 0) ? "SSE3" : "-",
+ ecx_flags & (1 << 3) ? "MONITOR" : "-",
+ ecx_flags & (1 << 6) ? "SMX" : "-",
+ ecx_flags & (1 << 7) ? "EIST" : "-",
+ ecx_flags & (1 << 8) ? "TM2" : "-",
+ edx_flags & (1 << 4) ? "TSC" : "-",
+ edx_flags & (1 << 5) ? "MSR" : "-",
+ edx_flags & (1 << 22) ? "ACPI-TM" : "-",
+ edx_flags & (1 << 28) ? "HT" : "-",
+ edx_flags & (1 << 29) ? "TM" : "-");
+ }
+ if (genuine_intel)
+ model = intel_model_duplicates(model);
+
+ if (!(edx_flags & (1 << 5)))
+ errx(1, "CPUID: no MSR");
+
if (max_extended_level >= 0x80000007) {
/*
@@ -4576,9 +4600,6 @@ void process_cpuid()
if (crystal_hz == 0)
switch(model) {
case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */
- case INTEL_FAM6_SKYLAKE_DESKTOP: /* SKL */
- case INTEL_FAM6_KABYLAKE_MOBILE: /* KBL */
- case INTEL_FAM6_KABYLAKE_DESKTOP: /* KBL */
crystal_hz = 24000000; /* 24.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT_X: /* DNV */
@@ -4860,6 +4881,8 @@ void topology_probe()
return;
for (i = 0; i <= topo.max_cpu_num; ++i) {
+ if (cpu_is_not_present(i))
+ continue;
fprintf(outf,
"cpu %d pkg %d node %d lnode %d core %d thread %d\n",
i, cpus[i].physical_package_id,
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 01ec04bf91b5..6c16ac36d482 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -15,6 +15,7 @@
#include <linux/dma-mapping.h>
#include <linux/workqueue.h>
#include <linux/libnvdimm.h>
+#include <linux/genalloc.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -215,6 +216,8 @@ struct nfit_test {
static struct workqueue_struct *nfit_wq;
+static struct gen_pool *nfit_pool;
+
static struct nfit_test *to_nfit_test(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1132,6 +1135,9 @@ static void release_nfit_res(void *data)
list_del(&nfit_res->list);
spin_unlock(&nfit_test_lock);
+ if (resource_size(&nfit_res->res) >= DIMM_SIZE)
+ gen_pool_free(nfit_pool, nfit_res->res.start,
+ resource_size(&nfit_res->res));
vfree(nfit_res->buf);
kfree(nfit_res);
}
@@ -1144,7 +1150,7 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
GFP_KERNEL);
int rc;
- if (!buf || !nfit_res)
+ if (!buf || !nfit_res || !*dma)
goto err;
rc = devm_add_action(dev, release_nfit_res, nfit_res);
if (rc)
@@ -1164,6 +1170,8 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
return nfit_res->buf;
err:
+ if (*dma && size >= DIMM_SIZE)
+ gen_pool_free(nfit_pool, *dma, size);
if (buf)
vfree(buf);
kfree(nfit_res);
@@ -1172,9 +1180,16 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
{
+ struct genpool_data_align data = {
+ .align = SZ_128M,
+ };
void *buf = vmalloc(size);
- *dma = (unsigned long) buf;
+ if (size >= DIMM_SIZE)
+ *dma = gen_pool_alloc_algo(nfit_pool, size,
+ gen_pool_first_fit_align, &data);
+ else
+ *dma = (unsigned long) buf;
return __test_alloc(t, size, dma, buf);
}
@@ -2839,6 +2854,17 @@ static __init int nfit_test_init(void)
goto err_register;
}
+ nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
+ if (!nfit_pool) {
+ rc = -ENOMEM;
+ goto err_register;
+ }
+
+ if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
+ rc = -ENOMEM;
+ goto err_register;
+ }
+
for (i = 0; i < NUM_NFITS; i++) {
struct nfit_test *nfit_test;
struct platform_device *pdev;
@@ -2894,6 +2920,9 @@ static __init int nfit_test_init(void)
return 0;
err_register:
+ if (nfit_pool)
+ gen_pool_destroy(nfit_pool);
+
destroy_workqueue(nfit_wq);
for (i = 0; i < NUM_NFITS; i++)
if (instances[i])
@@ -2917,6 +2946,8 @@ static __exit void nfit_test_exit(void)
platform_driver_unregister(&nfit_test_driver);
nfit_test_teardown();
+ gen_pool_destroy(nfit_pool);
+
for (i = 0; i < NUM_NFITS; i++)
put_device(&instances[i]->pdev.dev);
class_destroy(nfit_test_dimm);
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index acf1afa01c5b..397d6b612502 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -7,6 +7,7 @@ LDLIBS+= -lpthread -lurcu
TARGETS = main idr-test multiorder xarray
CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
+ regression4.o \
tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
ifndef SHIFT
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index 77a44c54998f..7a22d6e3732e 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -308,6 +308,7 @@ int main(int argc, char **argv)
regression1_test();
regression2_test();
regression3_test();
+ regression4_test();
iteration_test(0, 10 + 90 * long_run);
iteration_test(7, 10 + 90 * long_run);
single_thread_tests(long_run);
diff --git a/tools/testing/radix-tree/regression.h b/tools/testing/radix-tree/regression.h
index 3c8a1584e9ee..135145af18b7 100644
--- a/tools/testing/radix-tree/regression.h
+++ b/tools/testing/radix-tree/regression.h
@@ -5,5 +5,6 @@
void regression1_test(void);
void regression2_test(void);
void regression3_test(void);
+void regression4_test(void);
#endif
diff --git a/tools/testing/radix-tree/regression4.c b/tools/testing/radix-tree/regression4.c
new file mode 100644
index 000000000000..cf4e5aba6b08
--- /dev/null
+++ b/tools/testing/radix-tree/regression4.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/radix-tree.h>
+#include <linux/rcupdate.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include "regression.h"
+
+static pthread_barrier_t worker_barrier;
+static int obj0, obj1;
+static RADIX_TREE(mt_tree, GFP_KERNEL);
+
+static void *reader_fn(void *arg)
+{
+ int i;
+ void *entry;
+
+ rcu_register_thread();
+ pthread_barrier_wait(&worker_barrier);
+
+ for (i = 0; i < 1000000; i++) {
+ rcu_read_lock();
+ entry = radix_tree_lookup(&mt_tree, 0);
+ rcu_read_unlock();
+ if (entry != &obj0) {
+ printf("iteration %d bad entry = %p\n", i, entry);
+ abort();
+ }
+ }
+
+ rcu_unregister_thread();
+
+ return NULL;
+}
+
+static void *writer_fn(void *arg)
+{
+ int i;
+
+ rcu_register_thread();
+ pthread_barrier_wait(&worker_barrier);
+
+ for (i = 0; i < 1000000; i++) {
+ radix_tree_insert(&mt_tree, 1, &obj1);
+ radix_tree_delete(&mt_tree, 1);
+ }
+
+ rcu_unregister_thread();
+
+ return NULL;
+}
+
+void regression4_test(void)
+{
+ pthread_t reader, writer;
+
+ printv(1, "regression test 4 starting\n");
+
+ radix_tree_insert(&mt_tree, 0, &obj0);
+ pthread_barrier_init(&worker_barrier, NULL, 2);
+
+ if (pthread_create(&reader, NULL, reader_fn, NULL) ||
+ pthread_create(&writer, NULL, writer_fn, NULL)) {
+ perror("pthread_create");
+ exit(1);
+ }
+
+ if (pthread_join(reader, NULL) || pthread_join(writer, NULL)) {
+ perror("pthread_join");
+ exit(1);
+ }
+
+ printv(1, "regression test 4 passed\n");
+}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index f1fe492c8e17..24b9934fb269 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -24,6 +24,8 @@ TARGETS += memory-hotplug
TARGETS += mount
TARGETS += mqueue
TARGETS += net
+TARGETS += netfilter
+TARGETS += networking/timestamping
TARGETS += nsfs
TARGETS += powerpc
TARGETS += proc
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index d9a725478375..72c25a3cb658 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
include ../lib.mk
-all: khdr
+all:
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 1b799e30c06d..4a9785043a39 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -27,3 +27,4 @@ test_flow_dissector
flow_dissector_load
test_netcnt
test_section_names
+test_tcpnotify_user
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index e39dfb4e7970..73aa6d8f4a2f 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -24,12 +24,13 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \
test_sock test_btf test_sockmap test_lirc_mode2_user get_cgroup_id_user \
test_socket_cookie test_cgroup_storage test_select_reuseport test_section_names \
- test_netcnt
+ test_netcnt test_tcpnotify_user
TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \
test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \
sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \
test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \
+ test_tcpnotify_kern.o \
sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \
sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o test_adjust_tail.o \
test_btf_haskv.o test_btf_nokv.o test_sockmap_kern.o test_tunnel_kern.o \
@@ -37,7 +38,8 @@ TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test
test_lwt_seg6local.o sendmsg4_prog.o sendmsg6_prog.o test_lirc_mode2_kern.o \
get_cgroup_id_kern.o socket_cookie_prog.o test_select_reuseport_kern.o \
test_skb_cgroup_id_kern.o bpf_flow.o netcnt_prog.o \
- test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o
+ test_sk_lookup_kern.o test_xdp_vlan.o test_queue_map.o test_stack_map.o \
+ xdp_dummy.o test_map_in_map.o
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -74,6 +76,7 @@ $(OUTPUT)/test_sock_addr: cgroup_helpers.c
$(OUTPUT)/test_socket_cookie: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
$(OUTPUT)/test_tcpbpf_user: cgroup_helpers.c
+$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c
$(OUTPUT)/test_progs: trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
@@ -124,7 +127,14 @@ $(OUTPUT)/test_stack_map.o: test_queue_stack_map.h
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
+BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
+ $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
+ readelf -S ./llvm_btf_verify.o | grep BTF; \
+ /bin/rm -f ./llvm_btf_verify.o)
+ifneq ($(BTF_LLVM_PROBE),)
+ CLANG_FLAGS += -g
+else
ifneq ($(BTF_LLC_PROBE),)
ifneq ($(BTF_PAHOLE_PROBE),)
ifneq ($(BTF_OBJCOPY_PROBE),)
@@ -134,6 +144,17 @@ ifneq ($(BTF_OBJCOPY_PROBE),)
endif
endif
endif
+endif
+
+# Have one program compiled without "-target bpf" to test whether libbpf loads
+# it successfully
+$(OUTPUT)/test_xdp.o: test_xdp.c
+ $(CLANG) $(CLANG_FLAGS) \
+ -O2 -emit-llvm -c $< -o - | \
+ $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
$(OUTPUT)/%.o: %.c
$(CLANG) $(CLANG_FLAGS) \
diff --git a/tools/testing/selftests/bpf/bpf_flow.c b/tools/testing/selftests/bpf/bpf_flow.c
index 107350a7821d..284660f5aa95 100644
--- a/tools/testing/selftests/bpf/bpf_flow.c
+++ b/tools/testing/selftests/bpf/bpf_flow.c
@@ -70,18 +70,18 @@ static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb,
{
void *data_end = (void *)(long)skb->data_end;
void *data = (void *)(long)skb->data;
- __u16 nhoff = skb->flow_keys->nhoff;
+ __u16 thoff = skb->flow_keys->thoff;
__u8 *hdr;
/* Verifies this variable offset does not overflow */
- if (nhoff > (USHRT_MAX - hdr_size))
+ if (thoff > (USHRT_MAX - hdr_size))
return NULL;
- hdr = data + nhoff;
+ hdr = data + thoff;
if (hdr + hdr_size <= data_end)
return hdr;
- if (bpf_skb_load_bytes(skb, nhoff, buffer, hdr_size))
+ if (bpf_skb_load_bytes(skb, thoff, buffer, hdr_size))
return NULL;
return buffer;
@@ -116,7 +116,7 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
return BPF_DROP;
}
-SEC("dissect")
+SEC("flow_dissector")
int _dissect(struct __sk_buff *skb)
{
if (!skb->vlan_present)
@@ -158,13 +158,13 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
/* Only inspect standard GRE packets with version 0 */
return BPF_OK;
- keys->nhoff += sizeof(*gre); /* Step over GRE Flags and Proto */
+ keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */
if (GRE_IS_CSUM(gre->flags))
- keys->nhoff += 4; /* Step over chksum and Padding */
+ keys->thoff += 4; /* Step over chksum and Padding */
if (GRE_IS_KEY(gre->flags))
- keys->nhoff += 4; /* Step over key */
+ keys->thoff += 4; /* Step over key */
if (GRE_IS_SEQ(gre->flags))
- keys->nhoff += 4; /* Step over sequence number */
+ keys->thoff += 4; /* Step over sequence number */
keys->is_encap = true;
@@ -174,7 +174,7 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
if (!eth)
return BPF_DROP;
- keys->nhoff += sizeof(*eth);
+ keys->thoff += sizeof(*eth);
return parse_eth_proto(skb, eth->h_proto);
} else {
@@ -191,7 +191,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
if ((__u8 *)tcp + (tcp->doff << 2) > data_end)
return BPF_DROP;
- keys->thoff = keys->nhoff;
keys->sport = tcp->source;
keys->dport = tcp->dest;
return BPF_OK;
@@ -201,7 +200,6 @@ static __always_inline int parse_ip_proto(struct __sk_buff *skb, __u8 proto)
if (!udp)
return BPF_DROP;
- keys->thoff = keys->nhoff;
keys->sport = udp->source;
keys->dport = udp->dest;
return BPF_OK;
@@ -252,8 +250,8 @@ PROG(IP)(struct __sk_buff *skb)
keys->ipv4_src = iph->saddr;
keys->ipv4_dst = iph->daddr;
- keys->nhoff += iph->ihl << 2;
- if (data + keys->nhoff > data_end)
+ keys->thoff += iph->ihl << 2;
+ if (data + keys->thoff > data_end)
return BPF_DROP;
if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) {
@@ -285,7 +283,7 @@ PROG(IPV6)(struct __sk_buff *skb)
keys->addr_proto = ETH_P_IPV6;
memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr));
- keys->nhoff += sizeof(struct ipv6hdr);
+ keys->thoff += sizeof(struct ipv6hdr);
return parse_ipv6_proto(skb, ip6h->nexthdr);
}
@@ -301,7 +299,7 @@ PROG(IPV6OP)(struct __sk_buff *skb)
/* hlen is in 8-octets and does not include the first 8 bytes
* of the header
*/
- skb->flow_keys->nhoff += (1 + ip6h->hdrlen) << 3;
+ skb->flow_keys->thoff += (1 + ip6h->hdrlen) << 3;
return parse_ipv6_proto(skb, ip6h->nexthdr);
}
@@ -315,7 +313,7 @@ PROG(IPV6FR)(struct __sk_buff *skb)
if (!fragh)
return BPF_DROP;
- keys->nhoff += sizeof(*fragh);
+ keys->thoff += sizeof(*fragh);
keys->is_frag = true;
if (!(fragh->frag_off & bpf_htons(IP6_OFFSET)))
keys->is_first_frag = true;
@@ -341,7 +339,7 @@ PROG(VLAN)(struct __sk_buff *skb)
__be16 proto;
/* Peek back to see if single or double-tagging */
- if (bpf_skb_load_bytes(skb, keys->nhoff - sizeof(proto), &proto,
+ if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
sizeof(proto)))
return BPF_DROP;
@@ -354,14 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
return BPF_DROP;
- keys->nhoff += sizeof(*vlan);
+ keys->thoff += sizeof(*vlan);
}
vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
if (!vlan)
return BPF_DROP;
- keys->nhoff += sizeof(*vlan);
+ keys->thoff += sizeof(*vlan);
/* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 686e57ce40f4..6c77cf7bedce 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -113,6 +113,8 @@ static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_pull_data;
static int (*bpf_msg_push_data)(void *ctx, int start, int end, int flags) =
(void *) BPF_FUNC_msg_push_data;
+static int (*bpf_msg_pop_data)(void *ctx, int start, int cut, int flags) =
+ (void *) BPF_FUNC_msg_pop_data;
static int (*bpf_bind)(void *ctx, void *addr, int addr_len) =
(void *) BPF_FUNC_bind;
static int (*bpf_xdp_adjust_tail)(void *ctx, int offset) =
@@ -154,12 +156,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
(void *) BPF_FUNC_skb_ancestor_cgroup_id;
static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
struct bpf_sock_tuple *tuple,
- int size, unsigned int netns_id,
+ int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_tcp;
static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
struct bpf_sock_tuple *tuple,
- int size, unsigned int netns_id,
+ int size, unsigned long long netns_id,
unsigned long long flags) =
(void *) BPF_FUNC_sk_lookup_udp;
static int (*bpf_sk_release)(struct bpf_sock *sk) =
@@ -168,6 +170,8 @@ static int (*bpf_skb_vlan_push)(void *ctx, __be16 vlan_proto, __u16 vlan_tci) =
(void *) BPF_FUNC_skb_vlan_push;
static int (*bpf_skb_vlan_pop)(void *ctx) =
(void *) BPF_FUNC_skb_vlan_pop;
+static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
+ (void *) BPF_FUNC_rc_pointer_rel;
/* llvm builtin functions that eBPF C program may use to
* emit BPF_LD_ABS and BPF_LD_IND instructions
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 7f90d3645af8..37f947ec44ed 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -22,3 +22,4 @@ CONFIG_NET_CLS_FLOWER=m
CONFIG_LWTUNNEL=y
CONFIG_BPF_STREAM_PARSER=y
CONFIG_XDP_SOCKETS=y
+CONFIG_FTRACE_SYSCALLS=y
diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/connect4_prog.c
index 5a88a681d2ab..1fd244d35ba9 100644
--- a/tools/testing/selftests/bpf/connect4_prog.c
+++ b/tools/testing/selftests/bpf/connect4_prog.c
@@ -21,23 +21,50 @@ int _version SEC("version") = 1;
SEC("cgroup/connect4")
int connect_v4_prog(struct bpf_sock_addr *ctx)
{
+ struct bpf_sock_tuple tuple = {};
struct sockaddr_in sa;
+ struct bpf_sock *sk;
+
+ /* Verify that new destination is available. */
+ memset(&tuple.ipv4.saddr, 0, sizeof(tuple.ipv4.saddr));
+ memset(&tuple.ipv4.sport, 0, sizeof(tuple.ipv4.sport));
+
+ tuple.ipv4.daddr = bpf_htonl(DST_REWRITE_IP4);
+ tuple.ipv4.dport = bpf_htons(DST_REWRITE_PORT4);
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 0;
+ else if (ctx->type == SOCK_STREAM)
+ sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv4),
+ BPF_F_CURRENT_NETNS, 0);
+ else
+ sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv4),
+ BPF_F_CURRENT_NETNS, 0);
+
+ if (!sk)
+ return 0;
+
+ if (sk->src_ip4 != tuple.ipv4.daddr ||
+ sk->src_port != DST_REWRITE_PORT4) {
+ bpf_sk_release(sk);
+ return 0;
+ }
+
+ bpf_sk_release(sk);
/* Rewrite destination. */
ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4);
ctx->user_port = bpf_htons(DST_REWRITE_PORT4);
- if (ctx->type == SOCK_DGRAM || ctx->type == SOCK_STREAM) {
- ///* Rewrite source. */
- memset(&sa, 0, sizeof(sa));
+ /* Rewrite source. */
+ memset(&sa, 0, sizeof(sa));
- sa.sin_family = AF_INET;
- sa.sin_port = bpf_htons(0);
- sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4);
+ sa.sin_family = AF_INET;
+ sa.sin_port = bpf_htons(0);
+ sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4);
- if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
- return 0;
- }
+ if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
+ return 0;
return 1;
}
diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/connect6_prog.c
index 8ea3f7d12dee..26397ab7b3c7 100644
--- a/tools/testing/selftests/bpf/connect6_prog.c
+++ b/tools/testing/selftests/bpf/connect6_prog.c
@@ -29,7 +29,43 @@ int _version SEC("version") = 1;
SEC("cgroup/connect6")
int connect_v6_prog(struct bpf_sock_addr *ctx)
{
+ struct bpf_sock_tuple tuple = {};
struct sockaddr_in6 sa;
+ struct bpf_sock *sk;
+
+ /* Verify that new destination is available. */
+ memset(&tuple.ipv6.saddr, 0, sizeof(tuple.ipv6.saddr));
+ memset(&tuple.ipv6.sport, 0, sizeof(tuple.ipv6.sport));
+
+ tuple.ipv6.daddr[0] = bpf_htonl(DST_REWRITE_IP6_0);
+ tuple.ipv6.daddr[1] = bpf_htonl(DST_REWRITE_IP6_1);
+ tuple.ipv6.daddr[2] = bpf_htonl(DST_REWRITE_IP6_2);
+ tuple.ipv6.daddr[3] = bpf_htonl(DST_REWRITE_IP6_3);
+
+ tuple.ipv6.dport = bpf_htons(DST_REWRITE_PORT6);
+
+ if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
+ return 0;
+ else if (ctx->type == SOCK_STREAM)
+ sk = bpf_sk_lookup_tcp(ctx, &tuple, sizeof(tuple.ipv6),
+ BPF_F_CURRENT_NETNS, 0);
+ else
+ sk = bpf_sk_lookup_udp(ctx, &tuple, sizeof(tuple.ipv6),
+ BPF_F_CURRENT_NETNS, 0);
+
+ if (!sk)
+ return 0;
+
+ if (sk->src_ip6[0] != tuple.ipv6.daddr[0] ||
+ sk->src_ip6[1] != tuple.ipv6.daddr[1] ||
+ sk->src_ip6[2] != tuple.ipv6.daddr[2] ||
+ sk->src_ip6[3] != tuple.ipv6.daddr[3] ||
+ sk->src_port != DST_REWRITE_PORT6) {
+ bpf_sk_release(sk);
+ return 0;
+ }
+
+ bpf_sk_release(sk);
/* Rewrite destination. */
ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
@@ -39,21 +75,19 @@ int connect_v6_prog(struct bpf_sock_addr *ctx)
ctx->user_port = bpf_htons(DST_REWRITE_PORT6);
- if (ctx->type == SOCK_DGRAM || ctx->type == SOCK_STREAM) {
- /* Rewrite source. */
- memset(&sa, 0, sizeof(sa));
+ /* Rewrite source. */
+ memset(&sa, 0, sizeof(sa));
- sa.sin6_family = AF_INET6;
- sa.sin6_port = bpf_htons(0);
+ sa.sin6_family = AF_INET6;
+ sa.sin6_port = bpf_htons(0);
- sa.sin6_addr.s6_addr32[0] = bpf_htonl(SRC_REWRITE_IP6_0);
- sa.sin6_addr.s6_addr32[1] = bpf_htonl(SRC_REWRITE_IP6_1);
- sa.sin6_addr.s6_addr32[2] = bpf_htonl(SRC_REWRITE_IP6_2);
- sa.sin6_addr.s6_addr32[3] = bpf_htonl(SRC_REWRITE_IP6_3);
+ sa.sin6_addr.s6_addr32[0] = bpf_htonl(SRC_REWRITE_IP6_0);
+ sa.sin6_addr.s6_addr32[1] = bpf_htonl(SRC_REWRITE_IP6_1);
+ sa.sin6_addr.s6_addr32[2] = bpf_htonl(SRC_REWRITE_IP6_2);
+ sa.sin6_addr.s6_addr32[3] = bpf_htonl(SRC_REWRITE_IP6_3);
- if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
- return 0;
- }
+ if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0)
+ return 0;
return 1;
}
diff --git a/tools/testing/selftests/bpf/netcnt_prog.c b/tools/testing/selftests/bpf/netcnt_prog.c
index 1198abca1360..9f741e69cebe 100644
--- a/tools/testing/selftests/bpf/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/netcnt_prog.c
@@ -16,12 +16,18 @@ struct bpf_map_def SEC("maps") percpu_netcnt = {
.value_size = sizeof(struct percpu_net_cnt),
};
+BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key,
+ struct percpu_net_cnt);
+
struct bpf_map_def SEC("maps") netcnt = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct net_cnt),
};
+BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
+ struct net_cnt);
+
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
index 5f377ec53f2f..3c789d03b629 100644
--- a/tools/testing/selftests/bpf/test_align.c
+++ b/tools/testing/selftests/bpf/test_align.c
@@ -620,8 +620,8 @@ static int do_test_single(struct bpf_align_test *test)
prog_len = probe_filter_length(prog);
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
- prog, prog_len, 1, "GPL", 0,
- bpf_vlog, sizeof(bpf_vlog), 2);
+ prog, prog_len, BPF_F_STRICT_ALIGNMENT,
+ "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
if (fd_prog < 0 && test->result != REJECT) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index f42b3396d622..8bcd38010582 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -5,6 +5,8 @@
#include <linux/btf.h>
#include <linux/err.h>
#include <linux/kernel.h>
+#include <linux/filter.h>
+#include <linux/unistd.h>
#include <bpf/bpf.h>
#include <sys/resource.h>
#include <libelf.h>
@@ -22,6 +24,9 @@
#include "bpf_rlimit.h"
#include "bpf_util.h"
+#define MAX_INSNS 512
+#define MAX_SUBPROGS 16
+
static uint32_t pass_cnt;
static uint32_t error_cnt;
static uint32_t skip_cnt;
@@ -60,8 +65,8 @@ static int __base_pr(const char *format, ...)
return err;
}
-#define BTF_INFO_ENC(kind, root, vlen) \
- ((!!(root) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
+#define BTF_INFO_ENC(kind, kind_flag, vlen) \
+ ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) \
(name), (info), (size_or_type)
@@ -81,28 +86,44 @@ static int __base_pr(const char *format, ...)
#define BTF_MEMBER_ENC(name, type, bits_offset) \
(name), (type), (bits_offset)
#define BTF_ENUM_ENC(name, val) (name), (val)
+#define BTF_MEMBER_OFFSET(bitfield_size, bits_offset) \
+ ((bitfield_size) << 24 | (bits_offset))
#define BTF_TYPEDEF_ENC(name, type) \
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0), type)
-#define BTF_PTR_ENC(name, type) \
- BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), type)
+#define BTF_PTR_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), type)
+
+#define BTF_CONST_ENC(type) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
+
+#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
+
+#define BTF_FUNC_PROTO_ARG_ENC(name, type) \
+ (name), (type)
+
+#define BTF_FUNC_ENC(name, func_proto) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), func_proto)
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
-#define MAX_NR_RAW_TYPES 1024
+#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
static struct args {
unsigned int raw_test_num;
unsigned int file_test_num;
unsigned int get_info_test_num;
+ unsigned int info_raw_test_num;
bool raw_test;
bool file_test;
bool get_info_test;
bool pprint_test;
bool always_log;
+ bool info_raw_test;
} args;
static char btf_log_buf[BTF_LOG_BUF_SIZE];
@@ -118,7 +139,7 @@ struct btf_raw_test {
const char *str_sec;
const char *map_name;
const char *err_str;
- __u32 raw_types[MAX_NR_RAW_TYPES];
+ __u32 raw_types[MAX_NR_RAW_U32];
__u32 str_sec_size;
enum bpf_map_type map_type;
__u32 key_size;
@@ -137,6 +158,9 @@ struct btf_raw_test {
int str_len_delta;
};
+#define BTF_STR_SEC(str) \
+ .str_sec = str, .str_sec_size = sizeof(str)
+
static struct btf_raw_test raw_tests[] = {
/* enum E {
* E0,
@@ -432,11 +456,11 @@ static struct btf_raw_test raw_tests[] = {
/* const void* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
/* typedef const void * const_void_ptr */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
- /* struct A { */ /* [4] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
+ /* struct A { */ /* [5] */
BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
/* const_void_ptr m; */
- BTF_MEMBER_ENC(NAME_TBD, 3, 0),
+ BTF_MEMBER_ENC(NAME_TBD, 4, 0),
/* } */
BTF_END_RAW,
},
@@ -494,10 +518,10 @@ static struct btf_raw_test raw_tests[] = {
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
/* const void* */ /* [3] */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
- /* typedef const void * const_void_ptr */ /* [4] */
- BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
- /* const_void_ptr[4] */ /* [5] */
- BTF_TYPE_ARRAY_ENC(3, 1, 4),
+ /* typedef const void * const_void_ptr */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [4] */
+ /* const_void_ptr[4] */
+ BTF_TYPE_ARRAY_ENC(4, 1, 4), /* [5] */
BTF_END_RAW,
},
.str_sec = "\0const_void_ptr",
@@ -1293,6 +1317,367 @@ static struct btf_raw_test raw_tests[] = {
},
{
+ .descr = "typedef (invalid name, name_off = 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(0, 1), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__int",
+ .str_sec_size = sizeof("\0__int"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "typedef_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "typedef (invalid name, invalid identifier)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__!int",
+ .str_sec_size = sizeof("\0__!int"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "typedef_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "ptr type (invalid name, name_off <> 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__int",
+ .str_sec_size = sizeof("\0__int"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "ptr_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "volatile type (invalid name, name_off <> 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__int",
+ .str_sec_size = sizeof("\0__int"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "volatile_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "const type (invalid name, name_off <> 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__int",
+ .str_sec_size = sizeof("\0__int"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "const_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "restrict type (invalid name, name_off <> 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1), /* [2] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__int",
+ .str_sec_size = sizeof("\0__int"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "restrict_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "fwd type (invalid name, name_off = 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__skb",
+ .str_sec_size = sizeof("\0__skb"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "fwd_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "fwd type (invalid name, invalid identifier)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__!skb",
+ .str_sec_size = sizeof("\0__!skb"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "fwd_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "array type (invalid name, name_off <> 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), /* [2] */
+ BTF_ARRAY_ENC(1, 1, 4),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0__skb",
+ .str_sec_size = sizeof("\0__skb"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "array_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "struct type (name_off = 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0,
+ BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A",
+ .str_sec_size = sizeof("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct type (invalid name, invalid identifier)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A!\0B",
+ .str_sec_size = sizeof("\0A!\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "struct member (name_off = 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0,
+ BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A",
+ .str_sec_size = sizeof("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "struct member (invalid name, invalid identifier)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0B*",
+ .str_sec_size = sizeof("\0A\0B*"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "enum type (name_off = 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0,
+ BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+ sizeof(int)), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A\0B",
+ .str_sec_size = sizeof("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "enum_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "enum type (invalid name, invalid identifier)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+ sizeof(int)), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A!\0B",
+ .str_sec_size = sizeof("\0A!\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "enum_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "enum member (invalid name, name_off = 0)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0,
+ BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+ sizeof(int)), /* [2] */
+ BTF_ENUM_ENC(0, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "",
+ .str_sec_size = sizeof(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "enum_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "enum member (invalid name, invalid identifier)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0,
+ BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+ sizeof(int)), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0A!",
+ .str_sec_size = sizeof("\0A!"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "enum_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+{
.descr = "arraymap invalid btf key (a bit field)",
.raw_types = {
/* int */ /* [1] */
@@ -1374,6 +1759,954 @@ static struct btf_raw_test raw_tests[] = {
.map_create_err = true,
},
+{
+ .descr = "func proto (int (*)(int, unsigned int))",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* int (*)(int, unsigned int) */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "",
+ .str_sec_size = sizeof(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "func proto (vararg)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int, unsigned int, ...) */
+ BTF_FUNC_PROTO_ENC(0, 3), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ BTF_FUNC_PROTO_ARG_ENC(0, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "",
+ .str_sec_size = sizeof(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "func proto (vararg with name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int b, ... c) */
+ BTF_FUNC_PROTO_ENC(0, 3), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 0),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0c",
+ .str_sec_size = sizeof("\0a\0b\0c"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid arg#3",
+},
+
+{
+ .descr = "func proto (arg after vararg)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, ..., unsigned int b) */
+ BTF_FUNC_PROTO_ENC(0, 3), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 0),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b",
+ .str_sec_size = sizeof("\0a\0b"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid arg#2",
+},
+
+{
+ .descr = "func proto (CONST=>TYPEDEF=>PTR=>FUNC_PROTO)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* typedef void (*func_ptr)(int, unsigned int) */
+ BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [3] */
+ /* const func_ptr */
+ BTF_CONST_ENC(3), /* [4] */
+ BTF_PTR_ENC(6), /* [5] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [6] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0func_ptr",
+ .str_sec_size = sizeof("\0func_ptr"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [5] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0func_typedef",
+ .str_sec_size = sizeof("\0func_typedef"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+
+{
+ .descr = "func proto (btf_resolve(arg))",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* void (*)(const void *) */
+ BTF_FUNC_PROTO_ENC(0, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 3),
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_PTR_ENC(0), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "",
+ .str_sec_size = sizeof(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "func proto (Not all arg has name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int, unsigned int b) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0b",
+ .str_sec_size = sizeof("\0b"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "func proto (Bad arg name_off)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int <bad_name_off>) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0xffffffff, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a",
+ .str_sec_size = sizeof("\0a"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid arg#2",
+},
+
+{
+ .descr = "func proto (Bad arg name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int !!!) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0!!!",
+ .str_sec_size = sizeof("\0a\0!!!"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid arg#2",
+},
+
+{
+ .descr = "func proto (Invalid return type)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* <bad_ret_type> (*)(int, unsigned int) */
+ BTF_FUNC_PROTO_ENC(100, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "",
+ .str_sec_size = sizeof(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid return type",
+},
+
+{
+ .descr = "func proto (with func name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void func_proto(int, unsigned int) */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 2), 0), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ BTF_END_RAW,
+ },
+ .str_sec = "\0func_proto",
+ .str_sec_size = sizeof("\0func_proto"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "func proto (const void arg)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(const void) */
+ BTF_FUNC_PROTO_ENC(0, 1), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(0, 4),
+ BTF_CONST_ENC(0), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "",
+ .str_sec_size = sizeof(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid arg#1",
+},
+
+{
+ .descr = "func (void func(int a, unsigned int b))",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int b) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ /* void func(int a, unsigned int b) */
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0func",
+ .str_sec_size = sizeof("\0a\0b\0func"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "func (No func name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int b) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ /* void <no_name>(int a, unsigned int b) */
+ BTF_FUNC_ENC(0, 3), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b",
+ .str_sec_size = sizeof("\0a\0b"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "func (Invalid func name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int b) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ /* void !!!(int a, unsigned int b) */
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0!!!",
+ .str_sec_size = sizeof("\0a\0b\0!!!"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid name",
+},
+
+{
+ .descr = "func (Some arg has no name)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(0, 2),
+ /* void func(int a, unsigned int) */
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0func",
+ .str_sec_size = sizeof("\0a\0func"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid arg#2",
+},
+
+{
+ .descr = "func (Non zero vlen)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */
+ /* void (*)(int a, unsigned int b) */
+ BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ /* void func(int a, unsigned int b) */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 2), 3), /* [4] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0func",
+ .str_sec_size = sizeof("\0a\0b\0func"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "vlen != 0",
+},
+
+{
+ .descr = "func (Not referring to FUNC_PROTO)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_ENC(NAME_TBD, 1), /* [2] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0func",
+ .str_sec_size = sizeof("\0func"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid type_id",
+},
+
+{
+ .descr = "invalid int kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_INT, 1, 0), 4), /* [2] */
+ BTF_INT_ENC(0, 0, 32),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "int_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid ptr kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 1, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "ptr_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid array kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 1, 0), 0), /* [2] */
+ BTF_ARRAY_ENC(1, 1, 1),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "array_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid enum kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 1, 1), 4), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "enum_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "valid fwd kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_FWD, 1, 0), 0), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "fwd_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "invalid typedef kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(NAME_TBD,
+ BTF_INFO_ENC(BTF_KIND_TYPEDEF, 1, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "typedef_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid volatile kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 1, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "volatile_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid const kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 1, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "const_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid restrict kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 1, 0), 1), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "restrict_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid func kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 0), 0), /* [2] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_FUNC, 1, 0), 2), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "invalid func_proto kind_flag",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 1, 0), 0), /* [2] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC(""),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "func_proto_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid btf_info kind_flag",
+},
+
+{
+ .descr = "valid struct, kind_flag, bitfield_size = 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 8), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(0, 32)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "valid struct, kind_flag, int member, bitfield_size != 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 4)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "valid union, kind_flag, int member, bitfield_size != 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "union_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "valid struct, kind_flag, enum member, bitfield_size != 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 4)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "valid union, kind_flag, enum member, bitfield_size != 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "union_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "valid struct, kind_flag, typedef member, bitfield_size != 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4),/* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 4)),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "valid union, kind_flag, typedef member, bitfield_size != 0",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 4), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 4, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 5, BTF_MEMBER_OFFSET(4, 0)),
+ BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [4] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [5] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C\0D\0E"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "union_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+},
+
+{
+ .descr = "invalid struct, kind_flag, bitfield_size greater than struct size",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 20)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Member exceeds struct_size",
+},
+
+{
+ .descr = "invalid struct, kind_flag, bitfield base_type int not regular",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 20, 4), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(20, 20)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid member base type",
+},
+
+{
+ .descr = "invalid struct, kind_flag, base_type int not regular",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 12, 4), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 4), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(8, 8)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid member base type",
+},
+
+{
+ .descr = "invalid union, kind_flag, bitfield_size greater than struct size",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 1, 2), 2), /* [2] */
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(8, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 1, BTF_MEMBER_OFFSET(20, 0)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "union_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Member exceeds struct_size",
+},
+
+{
+ .descr = "invalid struct, kind_flag, int member, bitfield_size = 0, wrong byte alignment",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid member offset",
+},
+
+{
+ .descr = "invalid struct, kind_flag, enum member, bitfield_size = 0, wrong byte alignment",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [2] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4), /* [2] */
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 2), 12), /* [3] */
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 0)),
+ BTF_MEMBER_ENC(NAME_TBD, 2, BTF_MEMBER_OFFSET(0, 36)),
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0A\0B\0C"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "struct_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 4,
+ .btf_load_err = true,
+ .err_str = "Invalid member offset",
+},
+
}; /* struct btf_raw_test raw_tests[] */
static const char *get_next_str(const char *start, const char *end)
@@ -1381,11 +2714,11 @@ static const char *get_next_str(const char *start, const char *end)
return start < end - 1 ? start + 1 : NULL;
}
-static int get_type_sec_size(const __u32 *raw_types)
+static int get_raw_sec_size(const __u32 *raw_types)
{
int i;
- for (i = MAX_NR_RAW_TYPES - 1;
+ for (i = MAX_NR_RAW_U32 - 1;
i >= 0 && raw_types[i] != BTF_END_RAW;
i--)
;
@@ -1397,7 +2730,8 @@ static void *btf_raw_create(const struct btf_header *hdr,
const __u32 *raw_types,
const char *str,
unsigned int str_sec_size,
- unsigned int *btf_size)
+ unsigned int *btf_size,
+ const char **ret_next_str)
{
const char *next_str = str, *end_str = str + str_sec_size;
unsigned int size_needed, offset;
@@ -1406,7 +2740,7 @@ static void *btf_raw_create(const struct btf_header *hdr,
uint32_t *ret_types;
void *raw_btf;
- type_sec_size = get_type_sec_size(raw_types);
+ type_sec_size = get_raw_sec_size(raw_types);
if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
return NULL;
@@ -1445,6 +2779,8 @@ static void *btf_raw_create(const struct btf_header *hdr,
ret_hdr->str_len = str_sec_size;
*btf_size = size_needed;
+ if (ret_next_str)
+ *ret_next_str = next_str;
return raw_btf;
}
@@ -1464,7 +2800,7 @@ static int do_test_raw(unsigned int test_num)
test->raw_types,
test->str_sec,
test->str_sec_size,
- &raw_btf_size);
+ &raw_btf_size, NULL);
if (!raw_btf)
return -1;
@@ -1541,7 +2877,7 @@ static int test_raw(void)
struct btf_get_info_test {
const char *descr;
const char *str_sec;
- __u32 raw_types[MAX_NR_RAW_TYPES];
+ __u32 raw_types[MAX_NR_RAW_U32];
__u32 str_sec_size;
int btf_size_delta;
int (*special_test)(unsigned int test_num);
@@ -1621,7 +2957,7 @@ static int test_big_btf_info(unsigned int test_num)
test->raw_types,
test->str_sec,
test->str_sec_size,
- &raw_btf_size);
+ &raw_btf_size, NULL);
if (!raw_btf)
return -1;
@@ -1705,7 +3041,7 @@ static int test_btf_id(unsigned int test_num)
test->raw_types,
test->str_sec,
test->str_sec_size,
- &raw_btf_size);
+ &raw_btf_size, NULL);
if (!raw_btf)
return -1;
@@ -1843,7 +3179,7 @@ static int do_test_get_info(unsigned int test_num)
test->raw_types,
test->str_sec,
test->str_sec_size,
- &raw_btf_size);
+ &raw_btf_size, NULL);
if (!raw_btf)
return -1;
@@ -1940,13 +3276,13 @@ static struct btf_file_test file_tests[] = {
},
};
-static int file_has_btf_elf(const char *fn)
+static int file_has_btf_elf(const char *fn, bool *has_btf_ext)
{
Elf_Scn *scn = NULL;
GElf_Ehdr ehdr;
+ int ret = 0;
int elf_fd;
Elf *elf;
- int ret;
if (CHECK(elf_version(EV_CURRENT) == EV_NONE,
"elf_version(EV_CURRENT) == EV_NONE"))
@@ -1978,14 +3314,12 @@ static int file_has_btf_elf(const char *fn)
}
sh_name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
- if (!strcmp(sh_name, BTF_ELF_SEC)) {
+ if (!strcmp(sh_name, BTF_ELF_SEC))
ret = 1;
- goto done;
- }
+ if (!strcmp(sh_name, BTF_EXT_ELF_SEC))
+ *has_btf_ext = true;
}
- ret = 0;
-
done:
close(elf_fd);
elf_end(elf);
@@ -1995,15 +3329,24 @@ done:
static int do_test_file(unsigned int test_num)
{
const struct btf_file_test *test = &file_tests[test_num - 1];
+ const char *expected_fnames[] = {"_dummy_tracepoint",
+ "test_long_fname_1",
+ "test_long_fname_2"};
+ struct bpf_prog_info info = {};
struct bpf_object *obj = NULL;
+ struct bpf_func_info *finfo;
struct bpf_program *prog;
+ __u32 info_len, rec_size;
+ bool has_btf_ext = false;
+ struct btf *btf = NULL;
+ void *func_info = NULL;
struct bpf_map *map;
- int err;
+ int i, err, prog_fd;
fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num,
test->file);
- err = file_has_btf_elf(test->file);
+ err = file_has_btf_elf(test->file, &has_btf_ext);
if (err == -1)
return err;
@@ -2031,6 +3374,7 @@ static int do_test_file(unsigned int test_num)
err = bpf_object__load(obj);
if (CHECK(err < 0, "bpf_object__load: %d", err))
goto done;
+ prog_fd = bpf_program__fd(prog);
map = bpf_object__find_map_by_name(obj, "btf_map");
if (CHECK(!map, "btf_map not found")) {
@@ -2045,9 +3389,100 @@ static int do_test_file(unsigned int test_num)
test->btf_kv_notfound))
goto done;
+ if (!has_btf_ext)
+ goto skip;
+
+ /* get necessary program info */
+ info_len = sizeof(struct bpf_prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+
+ if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
+ fprintf(stderr, "%s\n", btf_log_buf);
+ err = -1;
+ goto done;
+ }
+ if (CHECK(info.nr_func_info != 3,
+ "incorrect info.nr_func_info (1st) %d",
+ info.nr_func_info)) {
+ err = -1;
+ goto done;
+ }
+ rec_size = info.func_info_rec_size;
+ if (CHECK(rec_size != sizeof(struct bpf_func_info),
+ "incorrect info.func_info_rec_size (1st) %d\n", rec_size)) {
+ err = -1;
+ goto done;
+ }
+
+ func_info = malloc(info.nr_func_info * rec_size);
+ if (CHECK(!func_info, "out of memory")) {
+ err = -1;
+ goto done;
+ }
+
+ /* reset info to only retrieve func_info related data */
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 3;
+ info.func_info_rec_size = rec_size;
+ info.func_info = ptr_to_u64(func_info);
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+
+ if (CHECK(err == -1, "invalid get info (2nd) errno:%d", errno)) {
+ fprintf(stderr, "%s\n", btf_log_buf);
+ err = -1;
+ goto done;
+ }
+ if (CHECK(info.nr_func_info != 3,
+ "incorrect info.nr_func_info (2nd) %d",
+ info.nr_func_info)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(info.func_info_rec_size != rec_size,
+ "incorrect info.func_info_rec_size (2nd) %d",
+ info.func_info_rec_size)) {
+ err = -1;
+ goto done;
+ }
+
+ err = btf__get_from_id(info.btf_id, &btf);
+ if (CHECK(err, "cannot get btf from kernel, err: %d", err))
+ goto done;
+
+ /* check three functions */
+ finfo = func_info;
+ for (i = 0; i < 3; i++) {
+ const struct btf_type *t;
+ const char *fname;
+
+ t = btf__type_by_id(btf, finfo->type_id);
+ if (CHECK(!t, "btf__type_by_id failure: id %u",
+ finfo->type_id)) {
+ err = -1;
+ goto done;
+ }
+
+ fname = btf__name_by_offset(btf, t->name_off);
+ err = strcmp(fname, expected_fnames[i]);
+ /* for the second and third functions in .text section,
+ * the compiler may order them either way.
+ */
+ if (i && err)
+ err = strcmp(fname, expected_fnames[3 - i]);
+ if (CHECK(err, "incorrect fname %s", fname ? : "")) {
+ err = -1;
+ goto done;
+ }
+
+ finfo = (void *)finfo + rec_size;
+ }
+
+skip:
fprintf(stderr, "OK");
done:
+ free(func_info);
bpf_object__close(obj);
return err;
}
@@ -2093,7 +3528,8 @@ struct pprint_mapv {
} aenum;
};
-static struct btf_raw_test pprint_test_template = {
+static struct btf_raw_test pprint_test_template[] = {
+{
.raw_types = {
/* unsighed char */ /* [1] */
BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
@@ -2143,13 +3579,140 @@ static struct btf_raw_test pprint_test_template = {
BTF_MEMBER_ENC(NAME_TBD, 15, 192), /* aenum */
BTF_END_RAW,
},
- .str_sec = "\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum",
- .str_sec_size = sizeof("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ .key_size = sizeof(unsigned int),
+ .value_size = sizeof(struct pprint_mapv),
+ .key_type_id = 3, /* unsigned int */
+ .value_type_id = 16, /* struct pprint_mapv */
+ .max_entries = 128 * 1024,
+},
+
+{
+ /* this type will have the same type as the
+ * first .raw_types definition, but struct type will
+ * be encoded with kind_flag set.
+ */
+ .raw_types = {
+ /* unsighed char */ /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
+ /* unsigned short */ /* [2] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
+ /* unsigned int */ /* [3] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
+ /* int */ /* [4] */
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* unsigned long long */ /* [5] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */
+ /* uint8_t[8] */ /* [8] */
+ BTF_TYPE_ARRAY_ENC(9, 1, 8),
+ /* typedef unsigned char uint8_t */ /* [9] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 1),
+ /* typedef unsigned short uint16_t */ /* [10] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 2),
+ /* typedef unsigned int uint32_t */ /* [11] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3),
+ /* typedef int int32_t */ /* [12] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 4),
+ /* typedef unsigned long long uint64_t *//* [13] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 5),
+ /* union (anon) */ /* [14] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
+ BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
+ BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
+ /* enum (anon) */ /* [15] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_ENUM_ENC(NAME_TBD, 2),
+ BTF_ENUM_ENC(NAME_TBD, 3),
+ /* struct pprint_mapv */ /* [16] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
+ BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
+ BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
+ BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
+ BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */
+ BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */
+ BTF_MEMBER_ENC(NAME_TBD, 6, BTF_MEMBER_OFFSET(2, 126)), /* unused_bits2b */
+ BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
+ BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum"),
+ .key_size = sizeof(unsigned int),
+ .value_size = sizeof(struct pprint_mapv),
+ .key_type_id = 3, /* unsigned int */
+ .value_type_id = 16, /* struct pprint_mapv */
+ .max_entries = 128 * 1024,
+},
+
+{
+ /* this type will have the same layout as the
+ * first .raw_types definition. The struct type will
+ * be encoded with kind_flag set, bitfield members
+ * are added typedef/const/volatile, and bitfield members
+ * will have both int and enum types.
+ */
+ .raw_types = {
+ /* unsighed char */ /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 8, 1),
+ /* unsigned short */ /* [2] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 16, 2),
+ /* unsigned int */ /* [3] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),
+ /* int */ /* [4] */
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),
+ /* unsigned long long */ /* [5] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8),
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [6] */
+ BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [7] */
+ /* uint8_t[8] */ /* [8] */
+ BTF_TYPE_ARRAY_ENC(9, 1, 8),
+ /* typedef unsigned char uint8_t */ /* [9] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 1),
+ /* typedef unsigned short uint16_t */ /* [10] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 2),
+ /* typedef unsigned int uint32_t */ /* [11] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3),
+ /* typedef int int32_t */ /* [12] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 4),
+ /* typedef unsigned long long uint64_t *//* [13] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 5),
+ /* union (anon) */ /* [14] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_UNION, 0, 2), 8),
+ BTF_MEMBER_ENC(NAME_TBD, 13, 0),/* uint64_t ui64; */
+ BTF_MEMBER_ENC(NAME_TBD, 8, 0), /* uint8_t ui8a[8]; */
+ /* enum (anon) */ /* [15] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 4), 4),
+ BTF_ENUM_ENC(NAME_TBD, 0),
+ BTF_ENUM_ENC(NAME_TBD, 1),
+ BTF_ENUM_ENC(NAME_TBD, 2),
+ BTF_ENUM_ENC(NAME_TBD, 3),
+ /* struct pprint_mapv */ /* [16] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 1, 8), 32),
+ BTF_MEMBER_ENC(NAME_TBD, 11, BTF_MEMBER_OFFSET(0, 0)), /* uint32_t ui32 */
+ BTF_MEMBER_ENC(NAME_TBD, 10, BTF_MEMBER_OFFSET(0, 32)), /* uint16_t ui16 */
+ BTF_MEMBER_ENC(NAME_TBD, 12, BTF_MEMBER_OFFSET(0, 64)), /* int32_t si32 */
+ BTF_MEMBER_ENC(NAME_TBD, 17, BTF_MEMBER_OFFSET(2, 96)), /* unused_bits2a */
+ BTF_MEMBER_ENC(NAME_TBD, 7, BTF_MEMBER_OFFSET(28, 98)), /* bits28 */
+ BTF_MEMBER_ENC(NAME_TBD, 19, BTF_MEMBER_OFFSET(2, 126)),/* unused_bits2b */
+ BTF_MEMBER_ENC(0, 14, BTF_MEMBER_OFFSET(0, 128)), /* union (anon) */
+ BTF_MEMBER_ENC(NAME_TBD, 15, BTF_MEMBER_OFFSET(0, 192)), /* aenum */
+ /* typedef unsigned int ___int */ /* [17] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 18),
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 6), /* [18] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 15), /* [19] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0unsigned char\0unsigned short\0unsigned int\0int\0unsigned long long\0uint8_t\0uint16_t\0uint32_t\0int32_t\0uint64_t\0ui64\0ui8a\0ENUM_ZERO\0ENUM_ONE\0ENUM_TWO\0ENUM_THREE\0pprint_mapv\0ui32\0ui16\0si32\0unused_bits2a\0bits28\0unused_bits2b\0aenum\0___int"),
.key_size = sizeof(unsigned int),
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
.max_entries = 128 * 1024,
+},
+
};
static struct btf_pprint_test_meta {
@@ -2252,9 +3815,9 @@ static int check_line(const char *expected_line, int nexpected_line,
}
-static int do_test_pprint(void)
+static int do_test_pprint(int test_num)
{
- const struct btf_raw_test *test = &pprint_test_template;
+ const struct btf_raw_test *test = &pprint_test_template[test_num];
struct bpf_create_map_attr create_attr = {};
bool ordered_map, lossless_map, percpu_map;
int err, ret, num_cpus, rounded_value_size;
@@ -2270,10 +3833,10 @@ static int do_test_pprint(void)
uint8_t *raw_btf;
ssize_t nread;
- fprintf(stderr, "%s......", test->descr);
+ fprintf(stderr, "%s(#%d)......", test->descr, test_num);
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
- &raw_btf_size);
+ &raw_btf_size, NULL);
if (!raw_btf)
return -1;
@@ -2463,30 +4026,940 @@ static int test_pprint(void)
unsigned int i;
int err = 0;
+ /* test various maps with the first test template */
for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
- pprint_test_template.descr = pprint_tests_meta[i].descr;
- pprint_test_template.map_type = pprint_tests_meta[i].map_type;
- pprint_test_template.map_name = pprint_tests_meta[i].map_name;
- pprint_test_template.ordered_map = pprint_tests_meta[i].ordered_map;
- pprint_test_template.lossless_map = pprint_tests_meta[i].lossless_map;
- pprint_test_template.percpu_map = pprint_tests_meta[i].percpu_map;
+ pprint_test_template[0].descr = pprint_tests_meta[i].descr;
+ pprint_test_template[0].map_type = pprint_tests_meta[i].map_type;
+ pprint_test_template[0].map_name = pprint_tests_meta[i].map_name;
+ pprint_test_template[0].ordered_map = pprint_tests_meta[i].ordered_map;
+ pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map;
+ pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map;
+
+ err |= count_result(do_test_pprint(0));
+ }
+
+ /* test rest test templates with the first map */
+ for (i = 1; i < ARRAY_SIZE(pprint_test_template); i++) {
+ pprint_test_template[i].descr = pprint_tests_meta[0].descr;
+ pprint_test_template[i].map_type = pprint_tests_meta[0].map_type;
+ pprint_test_template[i].map_name = pprint_tests_meta[0].map_name;
+ pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map;
+ pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map;
+ pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map;
+ err |= count_result(do_test_pprint(i));
+ }
+
+ return err;
+}
+
+#define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \
+ (insn_off), (file_off), (line_off), ((line_num) << 10 | ((line_col) & 0x3ff))
- err |= count_result(do_test_pprint());
+static struct prog_info_raw_test {
+ const char *descr;
+ const char *str_sec;
+ const char *err_str;
+ __u32 raw_types[MAX_NR_RAW_U32];
+ __u32 str_sec_size;
+ struct bpf_insn insns[MAX_INSNS];
+ __u32 prog_type;
+ __u32 func_info[MAX_SUBPROGS][2];
+ __u32 func_info_rec_size;
+ __u32 func_info_cnt;
+ __u32 line_info[MAX_NR_RAW_U32];
+ __u32 line_info_rec_size;
+ __u32 nr_jited_ksyms;
+ bool expected_prog_load_failure;
+} info_raw_tests[] = {
+{
+ .descr = "func_type (main func + one sub)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
+ BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
+ .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info = { {0, 5}, {3, 6} },
+ .func_info_rec_size = 8,
+ .func_info_cnt = 2,
+ .line_info = { BTF_END_RAW },
+},
+
+{
+ .descr = "func_type (Incorrect func_info_rec_size)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
+ BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
+ .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info = { {0, 5}, {3, 6} },
+ .func_info_rec_size = 4,
+ .func_info_cnt = 2,
+ .line_info = { BTF_END_RAW },
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "func_type (Incorrect func_info_cnt)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
+ BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
+ .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info = { {0, 5}, {3, 6} },
+ .func_info_rec_size = 8,
+ .func_info_cnt = 1,
+ .line_info = { BTF_END_RAW },
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "func_type (Incorrect bpf_func_info.insn_off)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4), /* [2] */
+ BTF_FUNC_PROTO_ENC(1, 2), /* [3] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ENC(1, 2), /* [4] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 2),
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 3), /* [5] */
+ BTF_FUNC_ENC(NAME_TBD, 4), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB",
+ .str_sec_size = sizeof("\0int\0unsigned int\0a\0b\0c\0d\0funcA\0funcB"),
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info = { {0, 5}, {2, 6} },
+ .func_info_rec_size = 8,
+ .func_info_cnt = 2,
+ .line_info = { BTF_END_RAW },
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "line_info (No subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+},
+
+{
+ .descr = "line_info (No subprog. insn_off >= prog->len)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+ BPF_LINE_INFO_ENC(4, 0, 0, 5, 6),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .err_str = "line_info[4].insn_off",
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "line_info (Zero bpf insn code)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 64, 8), /* [2] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 2), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0unsigned long\0u64\0u64 a=1;\0return a;"),
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(1, 0, 0, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 1,
+ .err_str = "Invalid insn code at line_info[1]",
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "line_info (No subprog. zero tailing line_info",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0,
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0,
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0,
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 0,
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32),
+ .nr_jited_ksyms = 1,
+},
+
+{
+ .descr = "line_info (No subprog. nonzero tailing line_info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0,
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0,
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0,
+ BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 1,
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32),
+ .nr_jited_ksyms = 1,
+ .err_str = "nonzero tailing record in line_info",
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "line_info (subprog)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+},
+
+{
+ .descr = "line_info (subprog + func_info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_FUNC_PROTO_ENC(1, 1), /* [2] */
+ BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */
+ BTF_FUNC_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 2,
+ .func_info_rec_size = 8,
+ .func_info = { {0, 4}, {5, 3} },
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+},
+
+{
+ .descr = "line_info (subprog. missing 1st func line info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .err_str = "missing bpf_line_info for func#0",
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "line_info (subprog. missing 2nd func line info)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .err_str = "missing bpf_line_info for func#1",
+ .expected_prog_load_failure = true,
+},
+
+{
+ .descr = "line_info (subprog. unordered insn offset)",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_CALL_REL(1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .func_info_cnt = 0,
+ .line_info = {
+ BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+ BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 2, 9),
+ BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+ BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+ BTF_END_RAW,
+ },
+ .line_info_rec_size = sizeof(struct bpf_line_info),
+ .nr_jited_ksyms = 2,
+ .err_str = "Invalid line_info[2].insn_off",
+ .expected_prog_load_failure = true,
+},
+
+};
+
+static size_t probe_prog_length(const struct bpf_insn *fp)
+{
+ size_t len;
+
+ for (len = MAX_INSNS - 1; len > 0; --len)
+ if (fp[len].code != 0 || fp[len].imm != 0)
+ break;
+ return len + 1;
+}
+
+static __u32 *patch_name_tbd(const __u32 *raw_u32,
+ const char *str, __u32 str_off,
+ unsigned int str_sec_size,
+ unsigned int *ret_size)
+{
+ int i, raw_u32_size = get_raw_sec_size(raw_u32);
+ const char *end_str = str + str_sec_size;
+ const char *next_str = str + str_off;
+ __u32 *new_u32 = NULL;
+
+ if (raw_u32_size == -1)
+ return ERR_PTR(-EINVAL);
+
+ if (!raw_u32_size) {
+ *ret_size = 0;
+ return NULL;
+ }
+
+ new_u32 = malloc(raw_u32_size);
+ if (!new_u32)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < raw_u32_size / sizeof(raw_u32[0]); i++) {
+ if (raw_u32[i] == NAME_TBD) {
+ next_str = get_next_str(next_str, end_str);
+ if (CHECK(!next_str, "Error in getting next_str\n")) {
+ free(new_u32);
+ return ERR_PTR(-EINVAL);
+ }
+ new_u32[i] = next_str - str;
+ next_str += strlen(next_str);
+ } else {
+ new_u32[i] = raw_u32[i];
+ }
+ }
+
+ *ret_size = raw_u32_size;
+ return new_u32;
+}
+
+static int test_get_finfo(const struct prog_info_raw_test *test,
+ int prog_fd)
+{
+ struct bpf_prog_info info = {};
+ struct bpf_func_info *finfo;
+ __u32 info_len, rec_size, i;
+ void *func_info = NULL;
+ int err;
+
+ /* get necessary lens */
+ info_len = sizeof(struct bpf_prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
+ fprintf(stderr, "%s\n", btf_log_buf);
+ return -1;
+ }
+ if (CHECK(info.nr_func_info != test->func_info_cnt,
+ "incorrect info.nr_func_info (1st) %d",
+ info.nr_func_info)) {
+ return -1;
+ }
+
+ rec_size = info.func_info_rec_size;
+ if (CHECK(rec_size != sizeof(struct bpf_func_info),
+ "incorrect info.func_info_rec_size (1st) %d", rec_size)) {
+ return -1;
+ }
+
+ if (!info.nr_func_info)
+ return 0;
+
+ func_info = malloc(info.nr_func_info * rec_size);
+ if (CHECK(!func_info, "out of memory"))
+ return -1;
+
+ /* reset info to only retrieve func_info related data */
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = test->func_info_cnt;
+ info.func_info_rec_size = rec_size;
+ info.func_info = ptr_to_u64(func_info);
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err == -1, "invalid get info (2nd) errno:%d", errno)) {
+ fprintf(stderr, "%s\n", btf_log_buf);
+ err = -1;
+ goto done;
+ }
+ if (CHECK(info.nr_func_info != test->func_info_cnt,
+ "incorrect info.nr_func_info (2nd) %d",
+ info.nr_func_info)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(info.func_info_rec_size != rec_size,
+ "incorrect info.func_info_rec_size (2nd) %d",
+ info.func_info_rec_size)) {
+ err = -1;
+ goto done;
+ }
+
+ finfo = func_info;
+ for (i = 0; i < test->func_info_cnt; i++) {
+ if (CHECK(finfo->type_id != test->func_info[i][1],
+ "incorrect func_type %u expected %u",
+ finfo->type_id, test->func_info[i][1])) {
+ err = -1;
+ goto done;
+ }
+ finfo = (void *)finfo + rec_size;
+ }
+
+ err = 0;
+
+done:
+ free(func_info);
+ return err;
+}
+
+static int test_get_linfo(const struct prog_info_raw_test *test,
+ const void *patched_linfo,
+ __u32 cnt, int prog_fd)
+{
+ __u32 i, info_len, nr_jited_ksyms, nr_jited_func_lens;
+ __u64 *jited_linfo = NULL, *jited_ksyms = NULL;
+ __u32 rec_size, jited_rec_size, jited_cnt;
+ struct bpf_line_info *linfo = NULL;
+ __u32 cur_func_len, ksyms_found;
+ struct bpf_prog_info info = {};
+ __u32 *jited_func_lens = NULL;
+ __u64 cur_func_ksyms;
+ int err;
+
+ jited_cnt = cnt;
+ rec_size = sizeof(*linfo);
+ jited_rec_size = sizeof(*jited_linfo);
+ if (test->nr_jited_ksyms)
+ nr_jited_ksyms = test->nr_jited_ksyms;
+ else
+ nr_jited_ksyms = test->func_info_cnt;
+ nr_jited_func_lens = nr_jited_ksyms;
+
+ info_len = sizeof(struct bpf_prog_info);
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+ if (CHECK(err == -1, "err:%d errno:%d", err, errno)) {
+ err = -1;
+ goto done;
+ }
+
+ if (!info.jited_prog_len) {
+ /* prog is not jited */
+ jited_cnt = 0;
+ nr_jited_ksyms = 1;
+ nr_jited_func_lens = 1;
+ }
+
+ if (CHECK(info.nr_line_info != cnt ||
+ info.nr_jited_line_info != jited_cnt ||
+ info.nr_jited_ksyms != nr_jited_ksyms ||
+ info.nr_jited_func_lens != nr_jited_func_lens ||
+ (!info.nr_line_info && info.nr_jited_line_info),
+ "info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) nr_jited_ksyms:%u(expected:%u) nr_jited_func_lens:%u(expected:%u)",
+ info.nr_line_info, cnt,
+ info.nr_jited_line_info, jited_cnt,
+ info.nr_jited_ksyms, nr_jited_ksyms,
+ info.nr_jited_func_lens, nr_jited_func_lens)) {
+ err = -1;
+ goto done;
+ }
+
+ if (CHECK(info.line_info_rec_size != sizeof(struct bpf_line_info) ||
+ info.jited_line_info_rec_size != sizeof(__u64),
+ "info: line_info_rec_size:%u(userspace expected:%u) jited_line_info_rec_size:%u(userspace expected:%u)",
+ info.line_info_rec_size, rec_size,
+ info.jited_line_info_rec_size, jited_rec_size)) {
+ err = -1;
+ goto done;
+ }
+
+ if (!cnt)
+ return 0;
+
+ rec_size = info.line_info_rec_size;
+ jited_rec_size = info.jited_line_info_rec_size;
+
+ memset(&info, 0, sizeof(info));
+
+ linfo = calloc(cnt, rec_size);
+ if (CHECK(!linfo, "!linfo")) {
+ err = -1;
+ goto done;
+ }
+ info.nr_line_info = cnt;
+ info.line_info_rec_size = rec_size;
+ info.line_info = ptr_to_u64(linfo);
+
+ if (jited_cnt) {
+ jited_linfo = calloc(jited_cnt, jited_rec_size);
+ jited_ksyms = calloc(nr_jited_ksyms, sizeof(*jited_ksyms));
+ jited_func_lens = calloc(nr_jited_func_lens,
+ sizeof(*jited_func_lens));
+ if (CHECK(!jited_linfo || !jited_ksyms || !jited_func_lens,
+ "jited_linfo:%p jited_ksyms:%p jited_func_lens:%p",
+ jited_linfo, jited_ksyms, jited_func_lens)) {
+ err = -1;
+ goto done;
+ }
+
+ info.nr_jited_line_info = jited_cnt;
+ info.jited_line_info_rec_size = jited_rec_size;
+ info.jited_line_info = ptr_to_u64(jited_linfo);
+ info.nr_jited_ksyms = nr_jited_ksyms;
+ info.jited_ksyms = ptr_to_u64(jited_ksyms);
+ info.nr_jited_func_lens = nr_jited_func_lens;
+ info.jited_func_lens = ptr_to_u64(jited_func_lens);
+ }
+
+ err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+
+ /*
+ * Only recheck the info.*line_info* fields.
+ * Other fields are not the concern of this test.
+ */
+ if (CHECK(err == -1 ||
+ info.nr_line_info != cnt ||
+ (jited_cnt && !info.jited_line_info) ||
+ info.nr_jited_line_info != jited_cnt ||
+ info.line_info_rec_size != rec_size ||
+ info.jited_line_info_rec_size != jited_rec_size,
+ "err:%d errno:%d info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) line_info_rec_size:%u(expected:%u) jited_linfo_rec_size:%u(expected:%u) line_info:%p jited_line_info:%p",
+ err, errno,
+ info.nr_line_info, cnt,
+ info.nr_jited_line_info, jited_cnt,
+ info.line_info_rec_size, rec_size,
+ info.jited_line_info_rec_size, jited_rec_size,
+ (void *)(long)info.line_info,
+ (void *)(long)info.jited_line_info)) {
+ err = -1;
+ goto done;
+ }
+
+ CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
+ linfo[0].insn_off);
+ for (i = 1; i < cnt; i++) {
+ const struct bpf_line_info *expected_linfo;
+
+ expected_linfo = patched_linfo + (i * test->line_info_rec_size);
+ if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
+ "linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
+ i, linfo[i].insn_off,
+ i - 1, linfo[i - 1].insn_off)) {
+ err = -1;
+ goto done;
+ }
+ if (CHECK(linfo[i].file_name_off != expected_linfo->file_name_off ||
+ linfo[i].line_off != expected_linfo->line_off ||
+ linfo[i].line_col != expected_linfo->line_col,
+ "linfo[%u] (%u, %u, %u) != (%u, %u, %u)", i,
+ linfo[i].file_name_off,
+ linfo[i].line_off,
+ linfo[i].line_col,
+ expected_linfo->file_name_off,
+ expected_linfo->line_off,
+ expected_linfo->line_col)) {
+ err = -1;
+ goto done;
+ }
+ }
+
+ if (!jited_cnt) {
+ fprintf(stderr, "not jited. skipping jited_line_info check. ");
+ err = 0;
+ goto done;
+ }
+
+ if (CHECK(jited_linfo[0] != jited_ksyms[0],
+ "jited_linfo[0]:%lx != jited_ksyms[0]:%lx",
+ (long)(jited_linfo[0]), (long)(jited_ksyms[0]))) {
+ err = -1;
+ goto done;
+ }
+
+ ksyms_found = 1;
+ cur_func_len = jited_func_lens[0];
+ cur_func_ksyms = jited_ksyms[0];
+ for (i = 1; i < jited_cnt; i++) {
+ if (ksyms_found < nr_jited_ksyms &&
+ jited_linfo[i] == jited_ksyms[ksyms_found]) {
+ cur_func_ksyms = jited_ksyms[ksyms_found];
+ cur_func_len = jited_ksyms[ksyms_found];
+ ksyms_found++;
+ continue;
+ }
+
+ if (CHECK(jited_linfo[i] <= jited_linfo[i - 1],
+ "jited_linfo[%u]:%lx <= jited_linfo[%u]:%lx",
+ i, (long)jited_linfo[i],
+ i - 1, (long)(jited_linfo[i - 1]))) {
+ err = -1;
+ goto done;
+ }
+
+ if (CHECK(jited_linfo[i] - cur_func_ksyms > cur_func_len,
+ "jited_linfo[%u]:%lx - %lx > %u",
+ i, (long)jited_linfo[i], (long)cur_func_ksyms,
+ cur_func_len)) {
+ err = -1;
+ goto done;
+ }
+ }
+
+ if (CHECK(ksyms_found != nr_jited_ksyms,
+ "ksyms_found:%u != nr_jited_ksyms:%u",
+ ksyms_found, nr_jited_ksyms)) {
+ err = -1;
+ goto done;
+ }
+
+ err = 0;
+
+done:
+ free(linfo);
+ free(jited_linfo);
+ free(jited_ksyms);
+ free(jited_func_lens);
+ return err;
+}
+
+static int do_test_info_raw(unsigned int test_num)
+{
+ const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
+ unsigned int raw_btf_size, linfo_str_off, linfo_size;
+ int btf_fd = -1, prog_fd = -1, err = 0;
+ void *raw_btf, *patched_linfo = NULL;
+ const char *ret_next_str;
+ union bpf_attr attr = {};
+
+ fprintf(stderr, "BTF prog info raw test[%u] (%s): ", test_num, test->descr);
+ raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
+ test->str_sec, test->str_sec_size,
+ &raw_btf_size, &ret_next_str);
+
+ if (!raw_btf)
+ return -1;
+
+ *btf_log_buf = '\0';
+ btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
+ btf_log_buf, BTF_LOG_BUF_SIZE,
+ args.always_log);
+ free(raw_btf);
+
+ if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
+ err = -1;
+ goto done;
+ }
+
+ if (*btf_log_buf && args.always_log)
+ fprintf(stderr, "\n%s", btf_log_buf);
+ *btf_log_buf = '\0';
+
+ linfo_str_off = ret_next_str - test->str_sec;
+ patched_linfo = patch_name_tbd(test->line_info,
+ test->str_sec, linfo_str_off,
+ test->str_sec_size, &linfo_size);
+ if (IS_ERR(patched_linfo)) {
+ fprintf(stderr, "error in creating raw bpf_line_info");
+ err = -1;
+ goto done;
+ }
+
+ attr.prog_type = test->prog_type;
+ attr.insns = ptr_to_u64(test->insns);
+ attr.insn_cnt = probe_prog_length(test->insns);
+ attr.license = ptr_to_u64("GPL");
+ attr.prog_btf_fd = btf_fd;
+ attr.func_info_rec_size = test->func_info_rec_size;
+ attr.func_info_cnt = test->func_info_cnt;
+ attr.func_info = ptr_to_u64(test->func_info);
+ attr.log_buf = ptr_to_u64(btf_log_buf);
+ attr.log_size = BTF_LOG_BUF_SIZE;
+ attr.log_level = 1;
+ if (linfo_size) {
+ attr.line_info_rec_size = test->line_info_rec_size;
+ attr.line_info = ptr_to_u64(patched_linfo);
+ attr.line_info_cnt = linfo_size / attr.line_info_rec_size;
+ }
+
+ prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ err = ((prog_fd == -1) != test->expected_prog_load_failure);
+ if (CHECK(err, "prog_fd:%d expected_prog_load_failure:%u errno:%d",
+ prog_fd, test->expected_prog_load_failure, errno) ||
+ CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
+ "expected err_str:%s", test->err_str)) {
+ err = -1;
+ goto done;
}
+ if (prog_fd == -1)
+ goto done;
+
+ err = test_get_finfo(test, prog_fd);
+ if (err)
+ goto done;
+
+ err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
+ if (err)
+ goto done;
+
+done:
+ if (!err)
+ fprintf(stderr, "OK");
+
+ if (*btf_log_buf && (err || args.always_log))
+ fprintf(stderr, "\n%s", btf_log_buf);
+
+ if (btf_fd != -1)
+ close(btf_fd);
+ if (prog_fd != -1)
+ close(prog_fd);
+
+ if (!IS_ERR(patched_linfo))
+ free(patched_linfo);
+
+ return err;
+}
+
+static int test_info_raw(void)
+{
+ unsigned int i;
+ int err = 0;
+
+ if (args.info_raw_test_num)
+ return count_result(do_test_info_raw(args.info_raw_test_num));
+
+ for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
+ err |= count_result(do_test_info_raw(i));
+
return err;
}
static void usage(const char *cmd)
{
- fprintf(stderr, "Usage: %s [-l] [[-r test_num (1 - %zu)] | [-g test_num (1 - %zu)] | [-f test_num (1 - %zu)] | [-p]]\n",
+ fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
+ "\t[-g btf_get_info_test_num (1 - %zu)] |\n"
+ "\t[-f btf_file_test_num (1 - %zu)] |\n"
+ "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
+ "\t[-p (pretty print test)]]\n",
cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
- ARRAY_SIZE(file_tests));
+ ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests));
}
static int parse_args(int argc, char **argv)
{
- const char *optstr = "lpf:r:g:";
+ const char *optstr = "lpk:f:r:g:";
int opt;
while ((opt = getopt(argc, argv, optstr)) != -1) {
@@ -2509,6 +4982,10 @@ static int parse_args(int argc, char **argv)
case 'p':
args.pprint_test = true;
break;
+ case 'k':
+ args.info_raw_test_num = atoi(optarg);
+ args.info_raw_test = true;
+ break;
case 'h':
usage(argv[0]);
exit(0);
@@ -2542,6 +5019,14 @@ static int parse_args(int argc, char **argv)
return -1;
}
+ if (args.info_raw_test_num &&
+ (args.info_raw_test_num < 1 ||
+ args.info_raw_test_num > ARRAY_SIZE(info_raw_tests))) {
+ fprintf(stderr, "BTF prog info raw test number must be [1 - %zu]\n",
+ ARRAY_SIZE(info_raw_tests));
+ return -1;
+ }
+
return 0;
}
@@ -2574,13 +5059,17 @@ int main(int argc, char **argv)
if (args.pprint_test)
err |= test_pprint();
+ if (args.info_raw_test)
+ err |= test_info_raw();
+
if (args.raw_test || args.get_info_test || args.file_test ||
- args.pprint_test)
+ args.pprint_test || args.info_raw_test)
goto done;
err |= test_raw();
err |= test_get_info();
err |= test_file();
+ err |= test_info_raw();
done:
print_summary();
diff --git a/tools/testing/selftests/bpf/test_btf_haskv.c b/tools/testing/selftests/bpf/test_btf_haskv.c
index b21b876f475d..e5c79fe0ffdb 100644
--- a/tools/testing/selftests/bpf/test_btf_haskv.c
+++ b/tools/testing/selftests/bpf/test_btf_haskv.c
@@ -24,8 +24,8 @@ struct dummy_tracepoint_args {
struct sock *sock;
};
-SEC("dummy_tracepoint")
-int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
+__attribute__((noinline))
+static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -42,4 +42,16 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
return 0;
}
+__attribute__((noinline))
+static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+{
+ return test_long_fname_2(arg);
+}
+
+SEC("dummy_tracepoint")
+int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
+{
+ return test_long_fname_1(arg);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_btf_nokv.c b/tools/testing/selftests/bpf/test_btf_nokv.c
index 0ed8e088eebf..434188c37774 100644
--- a/tools/testing/selftests/bpf/test_btf_nokv.c
+++ b/tools/testing/selftests/bpf/test_btf_nokv.c
@@ -22,8 +22,8 @@ struct dummy_tracepoint_args {
struct sock *sock;
};
-SEC("dummy_tracepoint")
-int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
+__attribute__((noinline))
+static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
@@ -40,4 +40,16 @@ int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
return 0;
}
+__attribute__((noinline))
+static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+{
+ return test_long_fname_2(arg);
+}
+
+SEC("dummy_tracepoint")
+int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
+{
+ return test_long_fname_1(arg);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_flow_dissector.sh b/tools/testing/selftests/bpf/test_flow_dissector.sh
index c0fb073b5eab..d23d4da66b83 100755
--- a/tools/testing/selftests/bpf/test_flow_dissector.sh
+++ b/tools/testing/selftests/bpf/test_flow_dissector.sh
@@ -59,7 +59,7 @@ else
fi
# Attach BPF program
-./flow_dissector_load -p bpf_flow.o -s dissect
+./flow_dissector_load -p bpf_flow.o -s flow_dissector
# Setup
tc qdisc add dev lo ingress
diff --git a/tools/testing/selftests/bpf/test_libbpf.sh b/tools/testing/selftests/bpf/test_libbpf.sh
index 156d89f1edcc..2989b2e2d856 100755
--- a/tools/testing/selftests/bpf/test_libbpf.sh
+++ b/tools/testing/selftests/bpf/test_libbpf.sh
@@ -33,17 +33,11 @@ trap exit_handler 0 2 3 6 9
libbpf_open_file test_l4lb.o
-# TODO: fix libbpf to load noinline functions
-# [warning] libbpf: incorrect bpf_call opcode
-#libbpf_open_file test_l4lb_noinline.o
+# Load a program with BPF-to-BPF calls
+libbpf_open_file test_l4lb_noinline.o
-# TODO: fix test_xdp_meta.c to load with libbpf
-# [warning] libbpf: test_xdp_meta.o doesn't provide kernel version
-#libbpf_open_file test_xdp_meta.o
-
-# TODO: fix libbpf to handle .eh_frame
-# [warning] libbpf: relocation failed: no section(10)
-#libbpf_open_file ../../../../samples/bpf/tracex3_kern.o
+# Load a program compiled without the "-target bpf" flag
+libbpf_open_file test_xdp.o
# Success
exit 0
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh
index 677686198df3..ec4e15948e40 100755
--- a/tools/testing/selftests/bpf/test_lirc_mode2.sh
+++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh
@@ -21,13 +21,14 @@ do
if grep -q DRV_NAME=rc-loopback $i/uevent
then
LIRCDEV=$(grep DEVNAME= $i/lirc*/uevent | sed sQDEVNAME=Q/dev/Q)
+ INPUTDEV=$(grep DEVNAME= $i/input*/event*/uevent | sed sQDEVNAME=Q/dev/Q)
fi
done
if [ -n $LIRCDEV ];
then
TYPE=lirc_mode2
- ./test_lirc_mode2_user $LIRCDEV
+ ./test_lirc_mode2_user $LIRCDEV $INPUTDEV
ret=$?
if [ $ret -ne 0 ]; then
echo -e ${RED}"FAIL: $TYPE"${NC}
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c b/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
index ba26855563a5..4147130cc3b7 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
+++ b/tools/testing/selftests/bpf/test_lirc_mode2_kern.c
@@ -15,6 +15,9 @@ int bpf_decoder(unsigned int *sample)
if (duration & 0x10000)
bpf_rc_keydown(sample, 0x40, duration & 0xffff, 0);
+ if (duration & 0x20000)
+ bpf_rc_pointer_rel(sample, (duration >> 8) & 0xff,
+ duration & 0xff);
}
return 0;
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
index d470d63c33db..fb5fd6841ef3 100644
--- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c
+++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c
@@ -29,6 +29,7 @@
#include <linux/bpf.h>
#include <linux/lirc.h>
+#include <linux/input.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
@@ -47,12 +48,13 @@
int main(int argc, char **argv)
{
struct bpf_object *obj;
- int ret, lircfd, progfd, mode;
- int testir = 0x1dead;
+ int ret, lircfd, progfd, inputfd;
+ int testir1 = 0x1dead;
+ int testir2 = 0x20101;
u32 prog_ids[10], prog_flags[10], prog_cnt;
- if (argc != 2) {
- printf("Usage: %s /dev/lircN\n", argv[0]);
+ if (argc != 3) {
+ printf("Usage: %s /dev/lircN /dev/input/eventM\n", argv[0]);
return 2;
}
@@ -76,9 +78,9 @@ int main(int argc, char **argv)
return 1;
}
- mode = LIRC_MODE_SCANCODE;
- if (ioctl(lircfd, LIRC_SET_REC_MODE, &mode)) {
- printf("failed to set rec mode: %m\n");
+ inputfd = open(argv[2], O_RDONLY | O_NONBLOCK);
+ if (inputfd == -1) {
+ printf("failed to open input device %s: %m\n", argv[1]);
return 1;
}
@@ -102,29 +104,54 @@ int main(int argc, char **argv)
}
/* Write raw IR */
- ret = write(lircfd, &testir, sizeof(testir));
- if (ret != sizeof(testir)) {
+ ret = write(lircfd, &testir1, sizeof(testir1));
+ if (ret != sizeof(testir1)) {
printf("Failed to send test IR message: %m\n");
return 1;
}
- struct pollfd pfd = { .fd = lircfd, .events = POLLIN };
- struct lirc_scancode lsc;
+ struct pollfd pfd = { .fd = inputfd, .events = POLLIN };
+ struct input_event event;
- poll(&pfd, 1, 100);
+ for (;;) {
+ poll(&pfd, 1, 100);
- /* Read decoded IR */
- ret = read(lircfd, &lsc, sizeof(lsc));
- if (ret != sizeof(lsc)) {
- printf("Failed to read decoded IR: %m\n");
- return 1;
+ /* Read decoded IR */
+ ret = read(inputfd, &event, sizeof(event));
+ if (ret != sizeof(event)) {
+ printf("Failed to read decoded IR: %m\n");
+ return 1;
+ }
+
+ if (event.type == EV_MSC && event.code == MSC_SCAN &&
+ event.value == 0xdead) {
+ break;
+ }
}
- if (lsc.scancode != 0xdead || lsc.rc_proto != 64) {
- printf("Incorrect scancode decoded\n");
+ /* Write raw IR */
+ ret = write(lircfd, &testir2, sizeof(testir2));
+ if (ret != sizeof(testir2)) {
+ printf("Failed to send test IR message: %m\n");
return 1;
}
+ for (;;) {
+ poll(&pfd, 1, 100);
+
+ /* Read decoded IR */
+ ret = read(inputfd, &event, sizeof(event));
+ if (ret != sizeof(event)) {
+ printf("Failed to read decoded IR: %m\n");
+ return 1;
+ }
+
+ if (event.type == EV_REL && event.code == REL_Y &&
+ event.value == 1 ) {
+ break;
+ }
+ }
+
prog_cnt = 10;
ret = bpf_prog_query(lircfd, BPF_LIRC_MODE2, 0, prog_flags, prog_ids,
&prog_cnt);
diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/test_map_in_map.c
new file mode 100644
index 000000000000..ce923e67e08e
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_map_in_map.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018 Facebook */
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") mim_array = {
+ .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
+ .key_size = sizeof(int),
+ /* must be sizeof(__u32) for map in map */
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+ .map_flags = 0,
+};
+
+struct bpf_map_def SEC("maps") mim_hash = {
+ .type = BPF_MAP_TYPE_HASH_OF_MAPS,
+ .key_size = sizeof(int),
+ /* must be sizeof(__u32) for map in map */
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+ .map_flags = 0,
+};
+
+SEC("xdp_mimtest")
+int xdp_mimtest0(struct xdp_md *ctx)
+{
+ int value = 123;
+ int key = 0;
+ void *map;
+
+ map = bpf_map_lookup_elem(&mim_array, &key);
+ if (!map)
+ return XDP_DROP;
+
+ bpf_map_update_elem(map, &key, &value, 0);
+
+ map = bpf_map_lookup_elem(&mim_hash, &key);
+ if (!map)
+ return XDP_DROP;
+
+ bpf_map_update_elem(map, &key, &value, 0);
+
+ return XDP_PASS;
+}
+
+int _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 4db2116e52be..9c79ee017df3 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -258,24 +258,36 @@ static void test_hashmap_percpu(int task, void *data)
close(fd);
}
-static void test_hashmap_walk(int task, void *data)
+static int helper_fill_hashmap(int max_entries)
{
- int fd, i, max_entries = 1000;
- long long key, value, next_key;
- bool next_key_valid = true;
+ int i, fd, ret;
+ long long key, value;
fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
max_entries, map_flags);
- if (fd < 0) {
- printf("Failed to create hashmap '%s'!\n", strerror(errno));
- exit(1);
- }
+ CHECK(fd < 0,
+ "failed to create hashmap",
+ "err: %s, flags: 0x%x\n", strerror(errno), map_flags);
for (i = 0; i < max_entries; i++) {
key = i; value = key;
- assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
+ ret = bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST);
+ CHECK(ret != 0,
+ "can't update hashmap",
+ "err: %s\n", strerror(ret));
}
+ return fd;
+}
+
+static void test_hashmap_walk(int task, void *data)
+{
+ int fd, i, max_entries = 1000;
+ long long key, value, next_key;
+ bool next_key_valid = true;
+
+ fd = helper_fill_hashmap(max_entries);
+
for (i = 0; bpf_map_get_next_key(fd, !i ? NULL : &key,
&next_key) == 0; i++) {
key = next_key;
@@ -306,6 +318,39 @@ static void test_hashmap_walk(int task, void *data)
close(fd);
}
+static void test_hashmap_zero_seed(void)
+{
+ int i, first, second, old_flags;
+ long long key, next_first, next_second;
+
+ old_flags = map_flags;
+ map_flags |= BPF_F_ZERO_SEED;
+
+ first = helper_fill_hashmap(3);
+ second = helper_fill_hashmap(3);
+
+ for (i = 0; ; i++) {
+ void *key_ptr = !i ? NULL : &key;
+
+ if (bpf_map_get_next_key(first, key_ptr, &next_first) != 0)
+ break;
+
+ CHECK(bpf_map_get_next_key(second, key_ptr, &next_second) != 0,
+ "next_key for second map must succeed",
+ "key_ptr: %p", key_ptr);
+ CHECK(next_first != next_second,
+ "keys must match",
+ "i: %d first: %lld second: %lld\n", i,
+ next_first, next_second);
+
+ key = next_first;
+ }
+
+ map_flags = old_flags;
+ close(first);
+ close(second);
+}
+
static void test_arraymap(int task, void *data)
{
int key, next_key, fd;
@@ -1080,6 +1125,94 @@ out_sockmap:
exit(1);
}
+#define MAPINMAP_PROG "./test_map_in_map.o"
+static void test_map_in_map(void)
+{
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_map *map;
+ int mim_fd, fd, err;
+ int pos = 0;
+
+ obj = bpf_object__open(MAPINMAP_PROG);
+
+ fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), sizeof(int),
+ 2, 0);
+ if (fd < 0) {
+ printf("Failed to create hashmap '%s'!\n", strerror(errno));
+ exit(1);
+ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_array");
+ if (IS_ERR(map)) {
+ printf("Failed to load array of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ err = bpf_map__set_inner_map_fd(map, fd);
+ if (err) {
+ printf("Failed to set inner_map_fd for array of maps\n");
+ goto out_map_in_map;
+ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_hash");
+ if (IS_ERR(map)) {
+ printf("Failed to load hash of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ err = bpf_map__set_inner_map_fd(map, fd);
+ if (err) {
+ printf("Failed to set inner_map_fd for hash of maps\n");
+ goto out_map_in_map;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ bpf_program__set_xdp(prog);
+ }
+ bpf_object__load(obj);
+
+ map = bpf_object__find_map_by_name(obj, "mim_array");
+ if (IS_ERR(map)) {
+ printf("Failed to load array of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ mim_fd = bpf_map__fd(map);
+ if (mim_fd < 0) {
+ printf("Failed to get descriptor for array of maps\n");
+ goto out_map_in_map;
+ }
+
+ err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
+ if (err) {
+ printf("Failed to update array of maps\n");
+ goto out_map_in_map;
+ }
+
+ map = bpf_object__find_map_by_name(obj, "mim_hash");
+ if (IS_ERR(map)) {
+ printf("Failed to load hash of maps from test prog\n");
+ goto out_map_in_map;
+ }
+ mim_fd = bpf_map__fd(map);
+ if (mim_fd < 0) {
+ printf("Failed to get descriptor for hash of maps\n");
+ goto out_map_in_map;
+ }
+
+ err = bpf_map_update_elem(mim_fd, &pos, &fd, 0);
+ if (err) {
+ printf("Failed to update hash of maps\n");
+ goto out_map_in_map;
+ }
+
+ close(fd);
+ bpf_object__close(obj);
+ return;
+
+out_map_in_map:
+ close(fd);
+ exit(1);
+}
+
#define MAP_SIZE (32 * 1024)
static void test_map_large(void)
@@ -1534,6 +1667,7 @@ static void run_all_tests(void)
test_hashmap(0, NULL);
test_hashmap_percpu(0, NULL);
test_hashmap_walk(0, NULL);
+ test_hashmap_zero_seed();
test_arraymap(0, NULL);
test_arraymap_percpu(0, NULL);
@@ -1554,6 +1688,8 @@ static void run_all_tests(void)
test_queuemap(0, NULL);
test_stackmap(0, NULL);
+
+ test_map_in_map();
}
int main(void)
diff --git a/tools/testing/selftests/bpf/test_netcnt.c b/tools/testing/selftests/bpf/test_netcnt.c
index 7887df693399..44ed7f29f8ab 100644
--- a/tools/testing/selftests/bpf/test_netcnt.c
+++ b/tools/testing/selftests/bpf/test_netcnt.c
@@ -81,7 +81,10 @@ int main(int argc, char **argv)
goto err;
}
- assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0);
+ if (system("which ping6 &>/dev/null") == 0)
+ assert(!system("ping6 localhost -c 10000 -f -q > /dev/null"));
+ else
+ assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null"));
if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
&prog_cnt)) {
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 2d3c04f45530..126fc624290d 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -51,10 +51,10 @@ static struct {
struct iphdr iph;
struct tcphdr tcp;
} __packed pkt_v4 = {
- .eth.h_proto = bpf_htons(ETH_P_IP),
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = 6,
- .iph.tot_len = bpf_htons(MAGIC_BYTES),
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
@@ -64,13 +64,13 @@ static struct {
struct ipv6hdr iph;
struct tcphdr tcp;
} __packed pkt_v6 = {
- .eth.h_proto = bpf_htons(ETH_P_IPV6),
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = 6,
- .iph.payload_len = bpf_htons(MAGIC_BYTES),
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
-#define CHECK(condition, tag, format...) ({ \
+#define _CHECK(condition, tag, duration, format...) ({ \
int __ret = !!(condition); \
if (__ret) { \
error_cnt++; \
@@ -83,6 +83,11 @@ static struct {
__ret; \
})
+#define CHECK(condition, tag, format...) \
+ _CHECK(condition, tag, duration, format)
+#define CHECK_ATTR(condition, tag, format...) \
+ _CHECK(condition, tag, tattr.duration, format)
+
static int bpf_find_map(const char *test, struct bpf_object *obj,
const char *name)
{
@@ -124,6 +129,53 @@ static void test_pkt_access(void)
bpf_object__close(obj);
}
+static void test_prog_run_xattr(void)
+{
+ const char *file = "./test_pkt_access.o";
+ struct bpf_object *obj;
+ char buf[10];
+ int err;
+ struct bpf_prog_test_run_attr tattr = {
+ .repeat = 1,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .data_out = buf,
+ .data_size_out = 5,
+ };
+
+ err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
+ &tattr.prog_fd);
+ if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
+ return;
+
+ memset(buf, 0, sizeof(buf));
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
+ "incorrect output size, want %lu have %u\n",
+ sizeof(pkt_v4), tattr.data_size_out);
+
+ CHECK_ATTR(buf[5] != 0, "overflow",
+ "BPF_PROG_TEST_RUN ignored size hint\n");
+
+ tattr.data_out = NULL;
+ tattr.data_size_out = 0;
+ errno = 0;
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
+ "err %d errno %d retval %d\n", err, errno, tattr.retval);
+
+ tattr.data_size_out = 1;
+ err = bpf_prog_test_run_xattr(&tattr);
+ CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
+
+ bpf_object__close(obj);
+}
+
static void test_xdp(void)
{
struct vip key4 = {.protocol = 6, .family = AF_INET};
@@ -524,7 +576,7 @@ static void test_bpf_obj_id(void)
load_time < now - 60 || load_time > now + 60 ||
prog_infos[i].created_by_uid != my_uid ||
prog_infos[i].nr_map_ids != 1 ||
- *(int *)prog_infos[i].map_ids != map_infos[i].id ||
+ *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
strcmp((char *)prog_infos[i].name, expected_prog_name),
"get-prog-info(fd)",
"err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
@@ -539,7 +591,7 @@ static void test_bpf_obj_id(void)
load_time, now,
prog_infos[i].created_by_uid, my_uid,
prog_infos[i].nr_map_ids, 1,
- *(int *)prog_infos[i].map_ids, map_infos[i].id,
+ *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
prog_infos[i].name, expected_prog_name))
goto done;
}
@@ -585,7 +637,7 @@ static void test_bpf_obj_id(void)
bzero(&prog_info, sizeof(prog_info));
info_len = sizeof(prog_info);
- saved_map_id = *(int *)(prog_infos[i].map_ids);
+ saved_map_id = *(int *)((long)prog_infos[i].map_ids);
prog_info.map_ids = prog_infos[i].map_ids;
prog_info.nr_map_ids = 2;
err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
@@ -593,12 +645,12 @@ static void test_bpf_obj_id(void)
prog_infos[i].xlated_prog_insns = 0;
CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
memcmp(&prog_info, &prog_infos[i], info_len) ||
- *(int *)prog_info.map_ids != saved_map_id,
+ *(int *)(long)prog_info.map_ids != saved_map_id,
"get-prog-info(next_id->fd)",
"err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
err, errno, info_len, sizeof(struct bpf_prog_info),
memcmp(&prog_info, &prog_infos[i], info_len),
- *(int *)prog_info.map_ids, saved_map_id);
+ *(int *)(long)prog_info.map_ids, saved_map_id);
close(prog_fd);
}
CHECK(nr_id_found != nr_iters,
@@ -1703,7 +1755,7 @@ static void test_reference_tracking()
const char *file = "./test_sk_lookup_kern.o";
struct bpf_object *obj;
struct bpf_program *prog;
- __u32 duration;
+ __u32 duration = 0;
int err = 0;
obj = bpf_object__open(file);
@@ -1837,6 +1889,7 @@ int main(void)
jit_enabled = is_jit_enabled();
test_pkt_access();
+ test_prog_run_xattr();
test_xdp();
test_xdp_adjust_tail();
test_l4lb_all();
diff --git a/tools/testing/selftests/bpf/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/test_sk_lookup_kern.c
index b745bdc08c2b..e21cd736c196 100644
--- a/tools/testing/selftests/bpf/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/test_sk_lookup_kern.c
@@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
return TC_ACT_SHOT;
tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
- sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
if (sk)
bpf_sk_release(sk);
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
@@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk)
bpf_sk_release(sk);
return 0;
@@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
struct bpf_sock *sk;
__u32 family = 0;
- sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
bpf_sk_release(sk);
family = sk->family;
@@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
struct bpf_sock *sk;
__u32 family;
- sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
if (sk) {
sk += 1;
bpf_sk_release(sk);
@@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
struct bpf_sock *sk;
__u32 family;
- sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
sk += 1;
if (sk)
bpf_sk_release(sk);
@@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
- bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
return 0;
}
@@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
bpf_sk_release(sk);
bpf_sk_release(sk);
return 0;
@@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
struct bpf_sock_tuple tuple = {};
struct bpf_sock *sk;
- sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
bpf_sk_release(sk);
return 0;
}
@@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
void lookup_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
- bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+ bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
SEC("fail_no_release_subcall")
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index aeeb76a54d63..73b7493d4120 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -574,24 +574,44 @@ static int bind4_prog_load(const struct sock_addr_test *test)
/* if (sk.family == AF_INET && */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, family)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 16),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 24),
/* (sk.type == SOCK_DGRAM || sk.type == SOCK_STREAM) && */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, type)),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 1),
BPF_JMP_A(1),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 12),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 20),
/* 1st_byte_of_user_ip4 == expected && */
BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, user_ip4)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 18),
+
+ /* 2nd_byte_of_user_ip4 == expected && */
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, user_ip4) + 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[1], 16),
+
+ /* 3rd_byte_of_user_ip4 == expected && */
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, user_ip4) + 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[2], 14),
+
+ /* 4th_byte_of_user_ip4 == expected && */
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, user_ip4) + 3),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[3], 12),
/* 1st_half_of_user_ip4 == expected && */
BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
offsetof(struct bpf_sock_addr, user_ip4)),
- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 8),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 10),
+
+ /* 2nd_half_of_user_ip4 == expected && */
+ BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6,
+ offsetof(struct bpf_sock_addr, user_ip4) + 2),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[1], 8),
/* whole_user_ip4 == expected) { */
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 622ade0a0957..e85a771f607b 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -79,6 +79,8 @@ int txmsg_start;
int txmsg_end;
int txmsg_start_push;
int txmsg_end_push;
+int txmsg_start_pop;
+int txmsg_pop;
int txmsg_ingress;
int txmsg_skb;
int ktls;
@@ -104,6 +106,8 @@ static const struct option long_options[] = {
{"txmsg_end", required_argument, NULL, 'e'},
{"txmsg_start_push", required_argument, NULL, 'p'},
{"txmsg_end_push", required_argument, NULL, 'q'},
+ {"txmsg_start_pop", required_argument, NULL, 'w'},
+ {"txmsg_pop", required_argument, NULL, 'x'},
{"txmsg_ingress", no_argument, &txmsg_ingress, 1 },
{"txmsg_skb", no_argument, &txmsg_skb, 1 },
{"ktls", no_argument, &ktls, 1 },
@@ -473,13 +477,27 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
clock_gettime(CLOCK_MONOTONIC, &s->end);
} else {
int slct, recvp = 0, recv, max_fd = fd;
+ float total_bytes, txmsg_pop_total;
int fd_flags = O_NONBLOCK;
struct timeval timeout;
- float total_bytes;
fd_set w;
fcntl(fd, fd_flags);
+ /* Account for pop bytes noting each iteration of apply will
+ * call msg_pop_data helper so we need to account for this
+ * by calculating the number of apply iterations. Note user
+ * of the tool can create cases where no data is sent by
+ * manipulating pop/push/pull/etc. For example txmsg_apply 1
+ * with txmsg_pop 1 will try to apply 1B at a time but each
+ * iteration will then pop 1B so no data will ever be sent.
+ * This is really only useful for testing edge cases in code
+ * paths.
+ */
total_bytes = (float)iov_count * (float)iov_length * (float)cnt;
+ txmsg_pop_total = txmsg_pop;
+ if (txmsg_apply)
+ txmsg_pop_total *= (total_bytes / txmsg_apply);
+ total_bytes -= txmsg_pop_total;
err = clock_gettime(CLOCK_MONOTONIC, &s->start);
if (err < 0)
perror("recv start time: ");
@@ -488,7 +506,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
timeout.tv_sec = 0;
timeout.tv_usec = 300000;
} else {
- timeout.tv_sec = 1;
+ timeout.tv_sec = 3;
timeout.tv_usec = 0;
}
@@ -503,7 +521,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt,
goto out_errno;
} else if (!slct) {
if (opt->verbose)
- fprintf(stderr, "unexpected timeout\n");
+ fprintf(stderr, "unexpected timeout: recved %zu/%f pop_total %f\n", s->bytes_recvd, total_bytes, txmsg_pop_total);
errno = -EIO;
clock_gettime(CLOCK_MONOTONIC, &s->end);
goto out_errno;
@@ -619,7 +637,7 @@ static int sendmsg_test(struct sockmap_options *opt)
iov_count = 1;
err = msg_loop(rx_fd, iov_count, iov_buf,
cnt, &s, false, opt);
- if (err && opt->verbose)
+ if (opt->verbose)
fprintf(stderr,
"msg_loop_rx: iov_count %i iov_buf %i cnt %i err %i\n",
iov_count, iov_buf, cnt, err);
@@ -931,6 +949,39 @@ run:
}
}
+ if (txmsg_start_pop) {
+ i = 4;
+ err = bpf_map_update_elem(map_fd[5],
+ &i, &txmsg_start_pop, BPF_ANY);
+ if (err) {
+ fprintf(stderr,
+ "ERROR: bpf_map_update_elem %i@%i (txmsg_start_pop): %d (%s)\n",
+ txmsg_start_pop, i, err, strerror(errno));
+ goto out;
+ }
+ } else {
+ i = 4;
+ bpf_map_update_elem(map_fd[5],
+ &i, &txmsg_start_pop, BPF_ANY);
+ }
+
+ if (txmsg_pop) {
+ i = 5;
+ err = bpf_map_update_elem(map_fd[5],
+ &i, &txmsg_pop, BPF_ANY);
+ if (err) {
+ fprintf(stderr,
+ "ERROR: bpf_map_update_elem %i@%i (txmsg_pop): %d (%s)\n",
+ txmsg_pop, i, err, strerror(errno));
+ goto out;
+ }
+ } else {
+ i = 5;
+ bpf_map_update_elem(map_fd[5],
+ &i, &txmsg_pop, BPF_ANY);
+
+ }
+
if (txmsg_ingress) {
int in = BPF_F_INGRESS;
@@ -1082,6 +1133,11 @@ static void test_options(char *options)
snprintf(tstr, OPTSTRING, "end %d,", txmsg_end);
strncat(options, tstr, OPTSTRING);
}
+ if (txmsg_start_pop) {
+ snprintf(tstr, OPTSTRING, "pop (%d,%d),",
+ txmsg_start_pop, txmsg_start_pop + txmsg_pop);
+ strncat(options, tstr, OPTSTRING);
+ }
if (txmsg_ingress)
strncat(options, "ingress,", OPTSTRING);
if (txmsg_skb)
@@ -1264,6 +1320,7 @@ static int test_mixed(int cgrp)
txmsg_apply = txmsg_cork = 0;
txmsg_start = txmsg_end = 0;
txmsg_start_push = txmsg_end_push = 0;
+ txmsg_start_pop = txmsg_pop = 0;
/* Test small and large iov_count values with pass/redir/apply/cork */
txmsg_pass = 1;
@@ -1383,6 +1440,19 @@ static int test_start_end(int cgrp)
txmsg_end = 2;
txmsg_start_push = 1;
txmsg_end_push = 2;
+ txmsg_start_pop = 1;
+ txmsg_pop = 1;
+ err = test_txmsg(cgrp);
+ if (err)
+ goto out;
+
+ /* Cut a byte of pushed data but leave reamining in place */
+ txmsg_start = 1;
+ txmsg_end = 2;
+ txmsg_start_push = 1;
+ txmsg_end_push = 3;
+ txmsg_start_pop = 1;
+ txmsg_pop = 1;
err = test_txmsg(cgrp);
if (err)
goto out;
@@ -1393,6 +1463,9 @@ static int test_start_end(int cgrp)
opt.iov_length = 100;
txmsg_cork = 1600;
+ txmsg_start_pop = 0;
+ txmsg_pop = 0;
+
for (i = 99; i <= 1600; i += 500) {
txmsg_start = 0;
txmsg_end = i;
@@ -1403,6 +1476,17 @@ static int test_start_end(int cgrp)
goto out;
}
+ /* Test pop data in middle of cork */
+ for (i = 99; i <= 1600; i += 500) {
+ txmsg_start_pop = 10;
+ txmsg_pop = i;
+ err = test_exec(cgrp, &opt);
+ if (err)
+ goto out;
+ }
+ txmsg_start_pop = 0;
+ txmsg_pop = 0;
+
/* Test start/end with cork but pull data in middle */
for (i = 199; i <= 1600; i += 500) {
txmsg_start = 100;
@@ -1423,6 +1507,15 @@ static int test_start_end(int cgrp)
if (err)
goto out;
+ /* Test pop with cork pulling last sg entry */
+ txmsg_start_pop = 1500;
+ txmsg_pop = 1600;
+ err = test_exec(cgrp, &opt);
+ if (err)
+ goto out;
+ txmsg_start_pop = 0;
+ txmsg_pop = 0;
+
/* Test start/end pull of single byte in last page */
txmsg_start = 1111;
txmsg_end = 1112;
@@ -1432,6 +1525,13 @@ static int test_start_end(int cgrp)
if (err)
goto out;
+ /* Test pop of single byte in last page */
+ txmsg_start_pop = 1111;
+ txmsg_pop = 1112;
+ err = test_exec(cgrp, &opt);
+ if (err)
+ goto out;
+
/* Test start/end with end < start */
txmsg_start = 1111;
txmsg_end = 0;
@@ -1456,7 +1556,20 @@ static int test_start_end(int cgrp)
txmsg_start_push = 1601;
txmsg_end_push = 1600;
err = test_exec(cgrp, &opt);
+ if (err)
+ goto out;
+
+ /* Test pop with start > data */
+ txmsg_start_pop = 1601;
+ txmsg_pop = 1;
+ err = test_exec(cgrp, &opt);
+ if (err)
+ goto out;
+ /* Test pop with pop range > data */
+ txmsg_start_pop = 1599;
+ txmsg_pop = 10;
+ err = test_exec(cgrp, &opt);
out:
txmsg_start = 0;
txmsg_end = 0;
@@ -1641,6 +1754,12 @@ int main(int argc, char **argv)
case 'q':
txmsg_end_push = atoi(optarg);
break;
+ case 'w':
+ txmsg_start_pop = atoi(optarg);
+ break;
+ case 'x':
+ txmsg_pop = atoi(optarg);
+ break;
case 'a':
txmsg_apply = atoi(optarg);
break;
diff --git a/tools/testing/selftests/bpf/test_sockmap_kern.h b/tools/testing/selftests/bpf/test_sockmap_kern.h
index 14b8bbac004f..e7639f66a941 100644
--- a/tools/testing/selftests/bpf/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/test_sockmap_kern.h
@@ -74,7 +74,7 @@ struct bpf_map_def SEC("maps") sock_bytes = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(int),
- .max_entries = 4
+ .max_entries = 6
};
struct bpf_map_def SEC("maps") sock_redir_flags = {
@@ -181,8 +181,8 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
SEC("sk_msg1")
int bpf_prog4(struct sk_msg_md *msg)
{
- int *bytes, zero = 0, one = 1, two = 2, three = 3;
- int *start, *end, *start_push, *end_push;
+ int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -198,15 +198,19 @@ int bpf_prog4(struct sk_msg_md *msg)
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
if (start_push && end_push)
bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
+ pop = bpf_map_lookup_elem(&sock_bytes, &five);
+ if (start_pop && pop)
+ bpf_msg_pop_data(msg, *start_pop, *pop, 0);
return SK_PASS;
}
SEC("sk_msg2")
int bpf_prog5(struct sk_msg_md *msg)
{
- int zero = 0, one = 1, two = 2, three = 3;
- int *start, *end, *start_push, *end_push;
- int *bytes, len1, len2 = 0, len3;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int *bytes, len1, len2 = 0, len3, len4;
int err1 = -1, err2 = -1;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -247,6 +251,20 @@ int bpf_prog5(struct sk_msg_md *msg)
bpf_printk("sk_msg2: length push_update %i->%i\n",
len2 ? len2 : len1, len3);
}
+ start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
+ pop = bpf_map_lookup_elem(&sock_bytes, &five);
+ if (start_pop && pop) {
+ int err;
+
+ bpf_printk("sk_msg2: pop(%i@%i)\n",
+ start_pop, pop);
+ err = bpf_msg_pop_data(msg, *start_pop, *pop, 0);
+ if (err)
+ bpf_printk("sk_msg2: pop_data err %i\n", err);
+ len4 = (__u64)msg->data_end - (__u64)msg->data;
+ bpf_printk("sk_msg2: length pop_data %i->%i\n",
+ len1 ? len1 : 0, len4);
+ }
bpf_printk("sk_msg2: data length %i err1 %i err2 %i\n",
len1, err1, err2);
@@ -256,8 +274,8 @@ int bpf_prog5(struct sk_msg_md *msg)
SEC("sk_msg3")
int bpf_prog6(struct sk_msg_md *msg)
{
- int *bytes, *start, *end, *start_push, *end_push, *f;
- int zero = 0, one = 1, two = 2, three = 3, key = 0;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
+ int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
@@ -277,6 +295,11 @@ int bpf_prog6(struct sk_msg_md *msg)
if (start_push && end_push)
bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
+ pop = bpf_map_lookup_elem(&sock_bytes, &five);
+ if (start_pop && pop)
+ bpf_msg_pop_data(msg, *start_pop, *pop, 0);
+
f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
if (f && *f) {
key = 2;
@@ -292,8 +315,9 @@ int bpf_prog6(struct sk_msg_md *msg)
SEC("sk_msg4")
int bpf_prog7(struct sk_msg_md *msg)
{
- int zero = 0, one = 1, two = 2, three = 3, len1, len2 = 0, len3;
- int *bytes, *start, *end, *start_push, *end_push, *f;
+ int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int len1, len2 = 0, len3, len4;
int err1 = 0, err2 = 0, key = 0;
__u64 flags = 0;
@@ -335,6 +359,22 @@ int bpf_prog7(struct sk_msg_md *msg)
len2 ? len2 : len1, len3);
}
+ start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
+ pop = bpf_map_lookup_elem(&sock_bytes, &five);
+ if (start_pop && pop) {
+ int err;
+
+ bpf_printk("sk_msg4: pop(%i@%i)\n",
+ start_pop, pop);
+ err = bpf_msg_pop_data(msg, *start_pop, *pop, 0);
+ if (err)
+ bpf_printk("sk_msg4: pop_data err %i\n", err);
+ len4 = (__u64)msg->data_end - (__u64)msg->data;
+ bpf_printk("sk_msg4: length pop_data %i->%i\n",
+ len1 ? len1 : 0, len4);
+ }
+
+
f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
if (f && *f) {
key = 2;
@@ -389,8 +429,8 @@ int bpf_prog9(struct sk_msg_md *msg)
SEC("sk_msg7")
int bpf_prog10(struct sk_msg_md *msg)
{
- int *bytes, *start, *end, *start_push, *end_push;
- int zero = 0, one = 1, two = 2, three = 3;
+ int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
@@ -406,7 +446,11 @@ int bpf_prog10(struct sk_msg_md *msg)
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
if (start_push && end_push)
bpf_msg_push_data(msg, *start_push, *end_push, 0);
-
+ start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
+ pop = bpf_map_lookup_elem(&sock_bytes, &five);
+ if (start_pop && pop)
+ bpf_msg_pop_data(msg, *start_pop, *pop, 0);
+ bpf_printk("return sk drop\n");
return SK_DROP;
}
diff --git a/tools/testing/selftests/bpf/test_tcpnotify.h b/tools/testing/selftests/bpf/test_tcpnotify.h
new file mode 100644
index 000000000000..8b6cea030bfc
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpnotify.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#ifndef _TEST_TCPBPF_H
+#define _TEST_TCPBPF_H
+
+struct tcpnotify_globals {
+ __u32 total_retrans;
+ __u32 ncalls;
+};
+
+struct tcp_notifier {
+ __u8 type;
+ __u8 subtype;
+ __u8 source;
+ __u8 hash;
+};
+
+#define TESTPORT 12877
+#endif
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/test_tcpnotify_kern.c
new file mode 100644
index 000000000000..edbca203ce2d
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpnotify_kern.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/tcp.h>
+#include <netinet/in.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+#include "test_tcpnotify.h"
+
+struct bpf_map_def SEC("maps") global_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(struct tcpnotify_globals),
+ .max_entries = 4,
+};
+
+struct bpf_map_def SEC("maps") perf_event_map = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(__u32),
+ .max_entries = 2,
+};
+
+int _version SEC("version") = 1;
+
+SEC("sockops")
+int bpf_testcb(struct bpf_sock_ops *skops)
+{
+ int rv = -1;
+ int op;
+
+ op = (int) skops->op;
+
+ if (bpf_ntohl(skops->remote_port) != TESTPORT) {
+ skops->reply = -1;
+ return 0;
+ }
+
+ switch (op) {
+ case BPF_SOCK_OPS_TIMEOUT_INIT:
+ case BPF_SOCK_OPS_RWND_INIT:
+ case BPF_SOCK_OPS_NEEDS_ECN:
+ case BPF_SOCK_OPS_BASE_RTT:
+ case BPF_SOCK_OPS_RTO_CB:
+ rv = 1;
+ break;
+
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ bpf_sock_ops_cb_flags_set(skops, (BPF_SOCK_OPS_RETRANS_CB_FLAG|
+ BPF_SOCK_OPS_RTO_CB_FLAG));
+ rv = 1;
+ break;
+ case BPF_SOCK_OPS_RETRANS_CB: {
+ __u32 key = 0;
+ struct tcpnotify_globals g, *gp;
+ struct tcp_notifier msg = {
+ .type = 0xde,
+ .subtype = 0xad,
+ .source = 0xbe,
+ .hash = 0xef,
+ };
+
+ rv = 1;
+
+ /* Update results */
+ gp = bpf_map_lookup_elem(&global_map, &key);
+ if (!gp)
+ break;
+ g = *gp;
+ g.total_retrans = skops->total_retrans;
+ g.ncalls++;
+ bpf_map_update_elem(&global_map, &key, &g,
+ BPF_ANY);
+ bpf_perf_event_output(skops, &perf_event_map,
+ BPF_F_CURRENT_CPU,
+ &msg, sizeof(msg));
+ }
+ break;
+ default:
+ rv = -1;
+ }
+ skops->reply = rv;
+ return 1;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
new file mode 100644
index 000000000000..ff3c4522aed6
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <asm/types.h>
+#include <sys/syscall.h>
+#include <errno.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <sys/socket.h>
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <sys/ioctl.h>
+#include <linux/rtnetlink.h>
+#include <signal.h>
+#include <linux/perf_event.h>
+
+#include "bpf_rlimit.h"
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+
+#include "test_tcpnotify.h"
+#include "trace_helpers.h"
+
+#define SOCKET_BUFFER_SIZE (getpagesize() < 8192L ? getpagesize() : 8192L)
+
+pthread_t tid;
+int rx_callbacks;
+
+static int dummyfn(void *data, int size)
+{
+ struct tcp_notifier *t = data;
+
+ if (t->type != 0xde || t->subtype != 0xad ||
+ t->source != 0xbe || t->hash != 0xef)
+ return 1;
+ rx_callbacks++;
+ return 0;
+}
+
+void tcp_notifier_poller(int fd)
+{
+ while (1)
+ perf_event_poller(fd, dummyfn);
+}
+
+static void *poller_thread(void *arg)
+{
+ int fd = *(int *)arg;
+
+ tcp_notifier_poller(fd);
+ return arg;
+}
+
+int verify_result(const struct tcpnotify_globals *result)
+{
+ return (result->ncalls > 0 && result->ncalls == rx_callbacks ? 0 : 1);
+}
+
+static int bpf_find_map(const char *test, struct bpf_object *obj,
+ const char *name)
+{
+ struct bpf_map *map;
+
+ map = bpf_object__find_map_by_name(obj, name);
+ if (!map) {
+ printf("%s:FAIL:map '%s' not found\n", test, name);
+ return -1;
+ }
+ return bpf_map__fd(map);
+}
+
+static int setup_bpf_perf_event(int mapfd)
+{
+ struct perf_event_attr attr = {
+ .sample_type = PERF_SAMPLE_RAW,
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_BPF_OUTPUT,
+ };
+ int key = 0;
+ int pmu_fd;
+
+ pmu_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, 0);
+ if (pmu_fd < 0)
+ return pmu_fd;
+ bpf_map_update_elem(mapfd, &key, &pmu_fd, BPF_ANY);
+
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
+ return pmu_fd;
+}
+
+int main(int argc, char **argv)
+{
+ const char *file = "test_tcpnotify_kern.o";
+ int prog_fd, map_fd, perf_event_fd;
+ struct tcpnotify_globals g = {0};
+ const char *cg_path = "/foo";
+ int error = EXIT_FAILURE;
+ struct bpf_object *obj;
+ int cg_fd = -1;
+ __u32 key = 0;
+ int rv;
+ char test_script[80];
+ int pmu_fd;
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(0, &cpuset);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
+
+ if (setup_cgroup_environment())
+ goto err;
+
+ cg_fd = create_and_get_cgroup(cg_path);
+ if (!cg_fd)
+ goto err;
+
+ if (join_cgroup(cg_path))
+ goto err;
+
+ if (bpf_prog_load(file, BPF_PROG_TYPE_SOCK_OPS, &obj, &prog_fd)) {
+ printf("FAILED: load_bpf_file failed for: %s\n", file);
+ goto err;
+ }
+
+ rv = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_SOCK_OPS, 0);
+ if (rv) {
+ printf("FAILED: bpf_prog_attach: %d (%s)\n",
+ error, strerror(errno));
+ goto err;
+ }
+
+ perf_event_fd = bpf_find_map(__func__, obj, "perf_event_map");
+ if (perf_event_fd < 0)
+ goto err;
+
+ map_fd = bpf_find_map(__func__, obj, "global_map");
+ if (map_fd < 0)
+ goto err;
+
+ pmu_fd = setup_bpf_perf_event(perf_event_fd);
+ if (pmu_fd < 0 || perf_event_mmap(pmu_fd) < 0)
+ goto err;
+
+ pthread_create(&tid, NULL, poller_thread, (void *)&pmu_fd);
+
+ sprintf(test_script,
+ "/usr/sbin/iptables -A INPUT -p tcp --dport %d -j DROP",
+ TESTPORT);
+ system(test_script);
+
+ sprintf(test_script,
+ "/usr/bin/nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
+ TESTPORT);
+ system(test_script);
+
+ sprintf(test_script,
+ "/usr/sbin/iptables -D INPUT -p tcp --dport %d -j DROP",
+ TESTPORT);
+ system(test_script);
+
+ rv = bpf_map_lookup_elem(map_fd, &key, &g);
+ if (rv != 0) {
+ printf("FAILED: bpf_map_lookup_elem returns %d\n", rv);
+ goto err;
+ }
+
+ sleep(10);
+
+ if (verify_result(&g)) {
+ printf("FAILED: Wrong stats Expected %d calls, got %d\n",
+ g.ncalls, rx_callbacks);
+ goto err;
+ }
+
+ printf("PASSED!\n");
+ error = 0;
+err:
+ bpf_prog_detach(cg_fd, BPF_CGROUP_SOCK_OPS);
+ close(cg_fd);
+ cleanup_cgroup_environment();
+ return error;
+}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 6f61df62f690..33f7d38849b8 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -49,6 +49,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_FIXUPS 8
#define MAX_NR_MAPS 13
+#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -76,7 +77,7 @@ struct bpf_test {
int fixup_percpu_cgroup_storage[MAX_FIXUPS];
const char *errstr;
const char *errstr_unpriv;
- uint32_t retval, retval_unpriv;
+ uint32_t retval, retval_unpriv, insn_processed;
enum {
UNDEF,
ACCEPT,
@@ -86,6 +87,14 @@ struct bpf_test {
uint8_t flags;
__u8 data[TEST_DATA_LEN];
void (*fill_helper)(struct bpf_test *self);
+ uint8_t runs;
+ struct {
+ uint32_t retval, retval_unpriv;
+ union {
+ __u8 data[TEST_DATA_LEN];
+ __u64 data64[TEST_DATA_LEN / 8];
+ };
+ } retvals[MAX_TEST_RUNS];
};
/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -721,8 +730,18 @@ static struct bpf_test tests[] = {
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
- .result = REJECT,
- .errstr = "unknown opcode c4",
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "arsh32 on imm 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
+ BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = -16069393,
},
{
"arsh32 on reg",
@@ -732,8 +751,19 @@ static struct bpf_test tests[] = {
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
- .result = REJECT,
- .errstr = "unknown opcode cc",
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
+ "arsh32 on reg 2",
+ .insns = {
+ BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
+ BPF_MOV64_IMM(BPF_REG_1, 15),
+ BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 43724,
},
{
"arsh64 on imm",
@@ -980,15 +1010,45 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
/* mess up with R1 pointer on stack */
BPF_ST_MEM(BPF_B, BPF_REG_10, -7, 0x23),
- /* fill back into R0 should fail */
+ /* fill back into R0 is fine for priv.
+ * R0 now becomes SCALAR_VALUE.
+ */
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ /* Load from R0 should fail. */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
- .errstr = "corrupted spill",
+ .errstr = "R0 invalid mem access 'inv",
.result = REJECT,
},
{
+ "check corrupted spill/fill, LSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_H, BPF_REG_10, -8, 0xcafe),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ },
+ {
+ "check corrupted spill/fill, MSB",
+ .insns = {
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x12345678),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr_unpriv = "attempt to corrupt spilled",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = POINTER_VALUE,
+ },
+ {
"invalid src register in STX",
.insns = {
BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
@@ -1792,10 +1852,20 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
- "invalid 64B read of family in SK_MSG",
+ "valid access size in SK_MSG",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct sk_msg_md, size)),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SK_MSG,
+ },
+ {
+ "invalid 64B read of size in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, family)),
+ offsetof(struct sk_msg_md, size)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
@@ -1806,10 +1876,10 @@ static struct bpf_test tests[] = {
"invalid read past end of SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
- offsetof(struct sk_msg_md, local_port) + 4),
+ offsetof(struct sk_msg_md, size) + 4),
BPF_EXIT_INSN(),
},
- .errstr = "R0 !read_ok",
+ .errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
@@ -1823,6 +1893,7 @@ static struct bpf_test tests[] = {
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet read for SK_MSG",
@@ -2026,29 +2097,27 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
- "check skb->hash byte load not permitted 1",
+ "check skb->hash byte load permitted 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 1),
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
- "check skb->hash byte load not permitted 2",
+ "check skb->hash byte load permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, hash) + 2),
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
- "check skb->hash byte load not permitted 3",
+ "check skb->hash byte load permitted 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2060,8 +2129,7 @@ static struct bpf_test tests[] = {
#endif
BPF_EXIT_INSN(),
},
- .errstr = "invalid bpf_context access",
- .result = REJECT,
+ .result = ACCEPT,
},
{
"check cb access: byte, wrong type",
@@ -2173,7 +2241,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
- "check skb->hash half load not permitted",
+ "check skb->hash half load permitted 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
#if __BYTE_ORDER == __LITTLE_ENDIAN
@@ -2185,10 +2253,43 @@ static struct bpf_test tests[] = {
#endif
BPF_EXIT_INSN(),
},
+ .result = ACCEPT,
+ },
+ {
+ "check skb->hash half load not permitted, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#endif
+ BPF_EXIT_INSN(),
+ },
.errstr = "invalid bpf_context access",
.result = REJECT,
},
{
+ "check skb->hash half load not permitted, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 3),
+#else
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, hash) + 1),
+#endif
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
"check cb access: half, wrong type",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -2418,6 +2519,10 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, tc_index)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "",
@@ -2904,6 +3009,19 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
+ "alu32: mov u32 const",
+ .insns = {
+ BPF_MOV32_IMM(BPF_REG_7, 0),
+ BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+ BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0,
+ },
+ {
"unpriv: partial copy of pointer",
.insns = {
BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
@@ -3249,6 +3367,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"raw_stack: skb_load_bytes, spilled regs corruption 2",
@@ -3279,6 +3398,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"raw_stack: skb_load_bytes, spilled regs + data",
@@ -3778,6 +3898,7 @@ static struct bpf_test tests[] = {
.errstr = "R2 invalid mem access 'inv'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet access: test16 (arith on data_end)",
@@ -3880,6 +4001,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet access: test21 (x += pkt_ptr, 2)",
@@ -3905,6 +4027,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet access: test22 (x += pkt_ptr, 3)",
@@ -3935,6 +4058,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet access: test23 (x += pkt_ptr, 4)",
@@ -3961,6 +4085,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet access: test24 (x += pkt_ptr, 5)",
@@ -3986,6 +4111,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet access: test25 (marking on <, good access)",
@@ -5117,6 +5243,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid cgroup storage access 5",
@@ -5233,6 +5360,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid per-cpu cgroup storage access 5",
@@ -5270,6 +5398,31 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
+ "write tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "invalid bpf_context access off=152 size=8",
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
+ "read tstamp from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, tstamp)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+ },
+ {
"multiple registers share map_lookup_elem result",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 10),
@@ -7149,6 +7302,7 @@ static struct bpf_test tests[] = {
.errstr = "invalid mem access 'inv'",
.result = REJECT,
.result_unpriv = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map element value illegal alu op, 5",
@@ -7171,6 +7325,7 @@ static struct bpf_test tests[] = {
.fixup_map_hash_48b = { 3 },
.errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"map element value is preserved across register spilling",
@@ -7664,6 +7819,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.retval = 0 /* csum_diff of 64-byte packet */,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
@@ -8576,7 +8732,7 @@ static struct bpf_test tests[] = {
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
},
.fixup_map_hash_8b = { 4 },
- .errstr = "R0 invalid mem access 'inv'",
+ .errstr = "unbounded min value",
.result = REJECT,
},
{
@@ -9626,6 +9782,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' > pkt_end, bad access 1",
@@ -9663,6 +9820,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end > pkt_data', good access",
@@ -9701,6 +9859,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end > pkt_data', bad access 2",
@@ -9719,6 +9878,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' < pkt_end, good access",
@@ -9757,6 +9917,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' < pkt_end, bad access 2",
@@ -9775,6 +9936,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end < pkt_data', good access",
@@ -9792,6 +9954,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end < pkt_data', bad access 1",
@@ -9829,6 +9992,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' >= pkt_end, good access",
@@ -9865,6 +10029,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' >= pkt_end, bad access 2",
@@ -9902,6 +10067,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end >= pkt_data', bad access 1",
@@ -9940,6 +10106,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' <= pkt_end, good access",
@@ -9958,6 +10125,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data' <= pkt_end, bad access 1",
@@ -9996,6 +10164,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end <= pkt_data', good access",
@@ -10032,6 +10201,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_end <= pkt_data', bad access 2",
@@ -10068,6 +10238,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' > pkt_data, bad access 1",
@@ -10105,6 +10276,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data > pkt_meta', good access",
@@ -10143,6 +10315,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data > pkt_meta', bad access 2",
@@ -10161,6 +10334,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' < pkt_data, good access",
@@ -10199,6 +10373,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' < pkt_data, bad access 2",
@@ -10217,6 +10392,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data < pkt_meta', good access",
@@ -10234,6 +10410,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data < pkt_meta', bad access 1",
@@ -10271,6 +10448,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' >= pkt_data, good access",
@@ -10307,6 +10485,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
@@ -10344,6 +10523,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data >= pkt_meta', bad access 1",
@@ -10382,6 +10562,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' <= pkt_data, good access",
@@ -10400,6 +10581,7 @@ static struct bpf_test tests[] = {
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
@@ -10438,6 +10620,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data <= pkt_meta', good access",
@@ -10474,6 +10657,7 @@ static struct bpf_test tests[] = {
.errstr = "R1 offset is outside of the packet",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"XDP pkt read, pkt_data <= pkt_meta', bad access 2",
@@ -10547,7 +10731,7 @@ static struct bpf_test tests[] = {
"check deducing bounds from const, 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
- BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+ BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
@@ -10578,6 +10762,7 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"check deducing bounds from const, 8",
@@ -10591,6 +10776,7 @@ static struct bpf_test tests[] = {
},
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"check deducing bounds from const, 9",
@@ -11065,6 +11251,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "R6 invalid mem access 'inv'",
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: two calls with args",
@@ -11930,6 +12117,7 @@ static struct bpf_test tests[] = {
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
@@ -12073,6 +12261,7 @@ static struct bpf_test tests[] = {
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=8 off=2 size=8",
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: two calls that receive map_value_ptr_or_null via arg. test1",
@@ -12244,6 +12433,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.retval = POINTER_VALUE,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 2",
@@ -12275,6 +12465,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 3",
@@ -12310,6 +12501,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 4",
@@ -12344,6 +12536,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 5",
@@ -12377,6 +12570,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "same insn cannot be used with different",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 6",
@@ -12412,6 +12606,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 7",
@@ -12446,6 +12641,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "R4 invalid mem access",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 8",
@@ -12486,6 +12682,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: pkt_ptr spill into caller stack 9",
@@ -12527,6 +12724,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "invalid access to packet",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"calls: caller stack init to zero or map_value_or_null",
@@ -12892,6 +13090,7 @@ static struct bpf_test tests[] = {
.result = REJECT,
.errstr = "BPF_XADD stores into R2 pkt is not allowed",
.prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"xadd/w check whether src/dst got mangled, 1",
@@ -13378,6 +13577,7 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "Unreleased reference",
.result = REJECT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"reference tracking: alloc, check, free in both subbranches",
@@ -13406,6 +13606,7 @@ static struct bpf_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"reference tracking in call: free reference in subprog",
@@ -13496,6 +13697,28 @@ static struct bpf_test tests[] = {
.result = ACCEPT,
},
{
+ "allocated_stack",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_REG(BPF_MOV, BPF_REG_7, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, -8),
+ BPF_STX_MEM(BPF_B, BPF_REG_10, BPF_REG_7, -9),
+ BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_10, -9),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .result_unpriv = ACCEPT,
+ .insn_processed = 15,
+ },
+ {
"reference tracking in call: free reference in subprog and outside",
.insns = {
BPF_SK_LOOKUP,
@@ -13896,6 +14119,275 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
+ {
+ "calls: ctx read at start of subprog",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+ BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ },
+ {
+ "check wire_len is not readable by sockets",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check wire_len is readable by tc classifier",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ },
+ {
+ "check wire_len is not writable by tc classifier",
+ .insns = {
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, wire_len)),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "invalid bpf_context access",
+ .errstr_unpriv = "R1 leaks addr",
+ .result = REJECT,
+ },
+ {
+ "calls: cross frame pruning",
+ .insns = {
+ /* r8 = !!random();
+ * call pruner()
+ * if (r8)
+ * do something bad;
+ */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_IMM(BPF_REG_8, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_8, 1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "jset: functional",
+ .insns = {
+ /* r0 = 0 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* prep for direct packet access via r2 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ /* reg, bit 63 or bit 0 set, taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x8000000000000001),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_EXIT_INSN(),
+
+ /* reg, bit 62, not taken */
+ BPF_LD_IMM64(BPF_REG_8, 0x4000000000000000),
+ BPF_JMP_REG(BPF_JSET, BPF_REG_7, BPF_REG_8, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, any bit set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, -1, 1),
+ BPF_EXIT_INSN(),
+
+ /* imm, bit 31 set, taken */
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ /* all good - return r0 == 2 */
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .runs = 7,
+ .retvals = {
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 63) | (1U << 31), }
+ },
+ { .retval = 2,
+ .data64 = { (1ULL << 31) | (1U << 0), }
+ },
+ { .retval = 2,
+ .data64 = { (__u32)-1, }
+ },
+ { .retval = 2,
+ .data64 = { ~0x4000000000000000ULL, }
+ },
+ { .retval = 0,
+ .data64 = { 0, }
+ },
+ { .retval = 0,
+ .data64 = { ~0ULL, }
+ },
+ },
+ },
+ {
+ "jset: sign-extend",
+ .insns = {
+ /* r0 = 0 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* prep for direct packet access via r2 */
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_2, 0),
+
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x80000000, 1),
+ BPF_EXIT_INSN(),
+
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 2,
+ .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
+ },
+ {
+ "jset: known const compare",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .retval_unpriv = 1,
+ .result_unpriv = ACCEPT,
+ .retval = 1,
+ .result = ACCEPT,
+ },
+ {
+ "jset: known const compare bad",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "jset: unknown const compare taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "jset: unknown const compare not taken",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "!read_ok",
+ .result_unpriv = REJECT,
+ .errstr = "!read_ok",
+ .result = REJECT,
+ },
+ {
+ "jset: half-known const compare",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_OR, BPF_REG_0, 2),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 3, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ },
+ {
+ "jset: range",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+ BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xff),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0xf0, 3),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSET, BPF_REG_1, 0x10, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0x10, 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .result_unpriv = ACCEPT,
+ .result = ACCEPT,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
@@ -13921,7 +14413,7 @@ static int create_map(uint32_t type, uint32_t size_key,
return fd;
}
-static int create_prog_dummy1(enum bpf_map_type prog_type)
+static int create_prog_dummy1(enum bpf_prog_type prog_type)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_0, 42),
@@ -13932,7 +14424,7 @@ static int create_prog_dummy1(enum bpf_map_type prog_type)
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
-static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
+static int create_prog_dummy2(enum bpf_prog_type prog_type, int mfd, int idx)
{
struct bpf_insn prog[] = {
BPF_MOV64_IMM(BPF_REG_3, idx),
@@ -13947,7 +14439,7 @@ static int create_prog_dummy2(enum bpf_map_type prog_type, int mfd, int idx)
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
}
-static int create_prog_array(enum bpf_map_type prog_type, uint32_t max_elem,
+static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
int p1key)
{
int p2key = 1;
@@ -14018,7 +14510,7 @@ static int create_cgroup_storage(bool percpu)
static char bpf_vlog[UINT_MAX >> 8];
-static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
+static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
struct bpf_insn *prog, int *map_fds)
{
int *fixup_map_hash_8b = test->fixup_map_hash_8b;
@@ -14147,7 +14639,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
do {
prog[*fixup_map_stacktrace].imm = map_fds[12];
fixup_map_stacktrace++;
- } while (fixup_map_stacktrace);
+ } while (*fixup_map_stacktrace);
}
}
@@ -14178,16 +14670,43 @@ out:
return ret;
}
+static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
+ void *data, size_t size_data)
+{
+ __u8 tmp[TEST_DATA_LEN << 2];
+ __u32 size_tmp = sizeof(tmp);
+ uint32_t retval;
+ int err;
+
+ if (unpriv)
+ set_admin(true);
+ err = bpf_prog_test_run(fd_prog, 1, data, size_data,
+ tmp, &size_tmp, &retval, NULL);
+ if (unpriv)
+ set_admin(false);
+ if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
+ printf("Unexpected bpf_prog_test_run error ");
+ return err;
+ }
+ if (!err && retval != expected_val &&
+ expected_val != POINTER_VALUE) {
+ printf("FAIL retval %d != %d ", retval, expected_val);
+ return 1;
+ }
+
+ return 0;
+}
+
static void do_test_single(struct bpf_test *test, bool unpriv,
int *passes, int *errors)
{
- int fd_prog, expected_ret, reject_from_alignment;
+ int fd_prog, expected_ret, alignment_prevented_execution;
int prog_len, prog_type = test->prog_type;
struct bpf_insn *prog = test->insns;
+ int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
- uint32_t expected_val;
- uint32_t retval;
+ __u32 pflags;
int i, err;
for (i = 0; i < MAX_NR_MAPS; i++)
@@ -14198,69 +14717,105 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
do_test_fixup(test, prog_type, prog, map_fds);
prog_len = probe_filter_length(prog);
- fd_prog = bpf_verify_program(prog_type, prog, prog_len,
- test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
+ pflags = 0;
+ if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
+ pflags |= BPF_F_STRICT_ALIGNMENT;
+ if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
+ pflags |= BPF_F_ANY_ALIGNMENT;
+ fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
expected_ret = unpriv && test->result_unpriv != UNDEF ?
test->result_unpriv : test->result;
expected_err = unpriv && test->errstr_unpriv ?
test->errstr_unpriv : test->errstr;
- expected_val = unpriv && test->retval_unpriv ?
- test->retval_unpriv : test->retval;
-
- reject_from_alignment = fd_prog < 0 &&
- (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
- strstr(bpf_vlog, "Unknown alignment.");
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (reject_from_alignment) {
- printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
- strerror(errno));
- goto fail_log;
- }
-#endif
+
+ alignment_prevented_execution = 0;
+
if (expected_ret == ACCEPT) {
- if (fd_prog < 0 && !reject_from_alignment) {
+ if (fd_prog < 0) {
printf("FAIL\nFailed to load prog '%s'!\n",
strerror(errno));
goto fail_log;
}
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (fd_prog >= 0 &&
+ (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
+ alignment_prevented_execution = 1;
+#endif
} else {
if (fd_prog >= 0) {
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
- if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
+ if (!strstr(bpf_vlog, expected_err)) {
printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
expected_err, bpf_vlog);
goto fail_log;
}
}
- if (fd_prog >= 0) {
- __u8 tmp[TEST_DATA_LEN << 2];
- __u32 size_tmp = sizeof(tmp);
-
- if (unpriv)
- set_admin(true);
- err = bpf_prog_test_run(fd_prog, 1, test->data,
- sizeof(test->data), tmp, &size_tmp,
- &retval, NULL);
- if (unpriv)
- set_admin(false);
- if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
- printf("Unexpected bpf_prog_test_run error\n");
+ if (test->insn_processed) {
+ uint32_t insn_processed;
+ char *proc;
+
+ proc = strstr(bpf_vlog, "processed ");
+ insn_processed = atoi(proc + 10);
+ if (test->insn_processed != insn_processed) {
+ printf("FAIL\nUnexpected insn_processed %u vs %u\n",
+ insn_processed, test->insn_processed);
goto fail_log;
}
- if (!err && retval != expected_val &&
- expected_val != POINTER_VALUE) {
- printf("FAIL retval %d != %d\n", retval, expected_val);
- goto fail_log;
+ }
+
+ run_errs = 0;
+ run_successes = 0;
+ if (!alignment_prevented_execution && fd_prog >= 0) {
+ uint32_t expected_val;
+ int i;
+
+ if (!test->runs) {
+ expected_val = unpriv && test->retval_unpriv ?
+ test->retval_unpriv : test->retval;
+
+ err = do_prog_test_run(fd_prog, unpriv, expected_val,
+ test->data, sizeof(test->data));
+ if (err)
+ run_errs++;
+ else
+ run_successes++;
}
+
+ for (i = 0; i < test->runs; i++) {
+ if (unpriv && test->retvals[i].retval_unpriv)
+ expected_val = test->retvals[i].retval_unpriv;
+ else
+ expected_val = test->retvals[i].retval;
+
+ err = do_prog_test_run(fd_prog, unpriv, expected_val,
+ test->retvals[i].data,
+ sizeof(test->retvals[i].data));
+ if (err) {
+ printf("(run %d/%d) ", i + 1, test->runs);
+ run_errs++;
+ } else {
+ run_successes++;
+ }
+ }
+ }
+
+ if (!run_errs) {
+ (*passes)++;
+ if (run_successes > 1)
+ printf("%d cases ", run_successes);
+ printf("OK");
+ if (alignment_prevented_execution)
+ printf(" (NOTE: not executed due to unknown alignment)");
+ printf("\n");
+ } else {
+ printf("\n");
+ goto fail_log;
}
- (*passes)++;
- printf("OK%s\n", reject_from_alignment ?
- " (NOTE: reject due to unknown alignment)" : "");
close_fds:
close(fd_prog);
for (i = 0; i < MAX_NR_MAPS; i++)
diff --git a/tools/testing/selftests/bpf/xdp_dummy.c b/tools/testing/selftests/bpf/xdp_dummy.c
new file mode 100644
index 000000000000..43b0ef1001ed
--- /dev/null
+++ b/tools/testing/selftests/bpf/xdp_dummy.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define KBUILD_MODNAME "xdp_dummy"
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+SEC("xdp_dummy")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+ return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/drivers/net/mlxsw/extack.sh b/tools/testing/selftests/drivers/net/mlxsw/extack.sh
new file mode 100755
index 000000000000..d72d8488a3b2
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/extack.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test operations that we expect to report extended ack.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ netdev_pre_up_test
+ vxlan_vlan_add_test
+ port_vlan_add_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+}
+
+netdev_pre_up_test()
+{
+ RET=0
+
+ ip link add name br1 up type bridge vlan_filtering 0 mcast_snooping 0
+ ip link add name vx1 up type vxlan id 1000 \
+ local 192.0.2.17 remote 192.0.2.18 \
+ dstport 4789 nolearning noudpcsum tos inherit ttl 100
+
+ ip link set dev vx1 master br1
+ check_err $?
+
+ ip link set dev $swp1 master br1
+ check_err $?
+
+ ip link add name br2 up type bridge vlan_filtering 0 mcast_snooping 0
+ ip link add name vx2 up type vxlan id 2000 \
+ local 192.0.2.17 remote 192.0.2.18 \
+ dstport 4789 nolearning noudpcsum tos inherit ttl 100
+
+ ip link set dev vx2 master br2
+ check_err $?
+
+ ip link set dev $swp2 master br2
+ check_err $?
+
+ # Unsupported configuration: mlxsw demands that all offloaded VXLAN
+ # devices have the same TTL.
+ ip link set dev vx2 down
+ ip link set dev vx2 type vxlan ttl 200
+
+ ip link set dev vx2 up &>/dev/null
+ check_fail $?
+
+ ip link set dev vx2 up 2>&1 >/dev/null | grep -q mlxsw_spectrum
+ check_err $?
+
+ log_test "extack - NETDEV_PRE_UP"
+
+ ip link del dev vx2
+ ip link del dev br2
+
+ ip link del dev vx1
+ ip link del dev br1
+}
+
+vxlan_vlan_add_test()
+{
+ RET=0
+
+ ip link add name br1 up type bridge vlan_filtering 1 mcast_snooping 0
+
+ # Unsupported configuration: mlxsw demands VXLAN with "noudpcsum".
+ ip link add name vx1 up type vxlan id 1000 \
+ local 192.0.2.17 remote 192.0.2.18 \
+ dstport 4789 tos inherit ttl 100
+
+ ip link set dev vx1 master br1
+ check_err $?
+
+ bridge vlan add dev vx1 vid 1
+ check_err $?
+
+ ip link set dev $swp1 master br1
+ check_err $?
+
+ bridge vlan add dev vx1 vid 1 pvid untagged 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $?
+
+ log_test "extack - map VLAN at VXLAN device"
+
+ ip link del dev vx1
+ ip link del dev br1
+}
+
+port_vlan_add_test()
+{
+ RET=0
+
+ ip link add name br1 up type bridge vlan_filtering 1 mcast_snooping 0
+
+ # Unsupported configuration: mlxsw demands VXLAN with "noudpcsum".
+ ip link add name vx1 up type vxlan id 1000 \
+ local 192.0.2.17 remote 192.0.2.18 \
+ dstport 4789 tos inherit ttl 100
+
+ ip link set dev $swp1 master br1
+ check_err $?
+
+ bridge vlan del dev $swp1 vid 1
+
+ ip link set dev vx1 master br1
+ check_err $?
+
+ bridge vlan add dev $swp1 vid 1 pvid untagged 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $?
+
+ log_test "extack - map VLAN at port"
+
+ ip link del dev vx1
+ ip link del dev br1
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh b/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh
new file mode 100755
index 000000000000..f02d83e94576
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/one_armed_router.sh
@@ -0,0 +1,259 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test a "one-armed router" [1] scenario. Packets forwarded between H1 and H2
+# should be forwarded by the ASIC, but also trapped so that ICMP redirect
+# packets could be potentially generated.
+#
+# 1. https://en.wikipedia.org/wiki/One-armed_router
+#
+# +---------------------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 192.0.2.1/24 |
+# | | 2001:db8:1::1/64 |
+# | | |
+# | | default via 192.0.2.2 |
+# | | default via 2001:db8:1::2 |
+# +----|----------------------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | +--|--------------------------------------------------------------------+ |
+# | | + $swp1 BR0 (802.1d) | |
+# | | | |
+# | | 192.0.2.2/24 | |
+# | | 2001:db8:1::2/64 | |
+# | | 198.51.100.2/24 | |
+# | | 2001:db8:2::2/64 | |
+# | | | |
+# | | + $swp2 | |
+# | +--|--------------------------------------------------------------------+ |
+# | | |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|----------------------------+
+# | | default via 198.51.100.2 |
+# | | default via 2001:db8:2::2 |
+# | | |
+# | | 2001:db8:2::1/64 |
+# | | 198.51.100.1/24 |
+# | + $h2 |
+# | H2 (vrf) |
+# +---------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="ping_ipv4 ping_ipv6 fwd_mark_ipv4 fwd_mark_ipv6"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/24 2001:db8:1::1/64
+
+ ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+ ip -6 route add default vrf v$h1 nexthop via 2001:db8:1::2
+}
+
+h1_destroy()
+{
+ ip -6 route del default vrf v$h1 nexthop via 2001:db8:1::2
+ ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+ simple_if_fini $h1 192.0.2.1/24 2001:db8:1::1/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.1/24 2001:db8:2::1/64
+
+ ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+ ip -6 route add default vrf v$h2 nexthop via 2001:db8:2::2
+}
+
+h2_destroy()
+{
+ ip -6 route del default vrf v$h2 nexthop via 2001:db8:2::2
+ ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+ simple_if_fini $h2 198.51.100.1/24 2001:db8:2::1/64
+}
+
+switch_create()
+{
+ ip link add name br0 type bridge mcast_snooping 0
+ ip link set dev br0 up
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp1 up
+ ip link set dev $swp2 master br0
+ ip link set dev $swp2 up
+
+ tc qdisc add dev $swp1 clsact
+ tc qdisc add dev $swp2 clsact
+
+ __addr_add_del br0 add 192.0.2.2/24 2001:db8:1::2/64
+ __addr_add_del br0 add 198.51.100.2/24 2001:db8:2::2/64
+}
+
+switch_destroy()
+{
+ __addr_add_del br0 del 198.51.100.2/24 2001:db8:2::2/64
+ __addr_add_del br0 del 192.0.2.2/24 2001:db8:1::2/64
+
+ tc qdisc del dev $swp2 clsact
+ tc qdisc del dev $swp1 clsact
+
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link set dev br0 down
+ ip link del dev br0
+}
+
+ping_ipv4()
+{
+ ping_test $h1 198.51.100.1 ": h1->h2"
+}
+
+ping_ipv6()
+{
+ ping6_test $h1 2001:db8:2::1 ": h1->h2"
+}
+
+fwd_mark_ipv4()
+{
+ # Transmit packets from H1 to H2 and make sure they are trapped at
+ # swp1 due to loopback error, but only forwarded by the ASIC through
+ # swp2
+
+ tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 flower \
+ skip_hw dst_ip 198.51.100.1 ip_proto udp dst_port 52768 \
+ action pass
+
+ tc filter add dev $swp2 egress protocol ip pref 1 handle 101 flower \
+ skip_hw dst_ip 198.51.100.1 ip_proto udp dst_port 52768 \
+ action pass
+
+ tc filter add dev $swp2 egress protocol ip pref 2 handle 102 flower \
+ skip_sw dst_ip 198.51.100.1 ip_proto udp dst_port 52768 \
+ action pass
+
+ ip vrf exec v$h1 $MZ $h1 -c 10 -d 100msec -p 64 -A 192.0.2.1 \
+ -B 198.51.100.1 -t udp dp=52768,sp=42768 -q
+
+ RET=0
+
+ tc_check_packets "dev $swp1 ingress" 101 10
+ check_err $?
+
+ log_test "fwd mark: trapping IPv4 packets due to LBERROR"
+
+ RET=0
+
+ tc_check_packets "dev $swp2 egress" 101 0
+ check_err $?
+
+ log_test "fwd mark: forwarding IPv4 packets in software"
+
+ RET=0
+
+ tc_check_packets "dev $swp2 egress" 102 10
+ check_err $?
+
+ log_test "fwd mark: forwarding IPv4 packets in hardware"
+
+ tc filter del dev $swp2 egress protocol ip pref 2 handle 102 flower
+ tc filter del dev $swp2 egress protocol ip pref 1 handle 101 flower
+ tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+}
+
+fwd_mark_ipv6()
+{
+ tc filter add dev $swp1 ingress protocol ipv6 pref 1 handle 101 flower \
+ skip_hw dst_ip 2001:db8:2::1 ip_proto udp dst_port 52768 \
+ action pass
+
+ tc filter add dev $swp2 egress protocol ipv6 pref 1 handle 101 flower \
+ skip_hw dst_ip 2001:db8:2::1 ip_proto udp dst_port 52768 \
+ action pass
+
+ tc filter add dev $swp2 egress protocol ipv6 pref 2 handle 102 flower \
+ skip_sw dst_ip 2001:db8:2::1 ip_proto udp dst_port 52768 \
+ action pass
+
+ ip vrf exec v$h1 $MZ $h1 -6 -c 10 -d 100msec -p 64 -A 2001:db8:1::1 \
+ -B 2001:db8:2::1 -t udp dp=52768,sp=42768 -q
+
+ RET=0
+
+ tc_check_packets "dev $swp1 ingress" 101 10
+ check_err $?
+
+ log_test "fwd mark: trapping IPv6 packets due to LBERROR"
+
+ RET=0
+
+ tc_check_packets "dev $swp2 egress" 101 0
+ check_err $?
+
+ log_test "fwd mark: forwarding IPv6 packets in software"
+
+ RET=0
+
+ tc_check_packets "dev $swp2 egress" 102 10
+ check_err $?
+
+ log_test "fwd mark: forwarding IPv6 packets in hardware"
+
+ tc filter del dev $swp2 egress protocol ipv6 pref 2 handle 102 flower
+ tc filter del dev $swp2 egress protocol ipv6 pref 1 handle 101 flower
+ tc filter del dev $swp1 ingress protocol ipv6 pref 1 handle 101 flower
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+ forwarding_enable
+
+ sysctl_set net.ipv4.conf.all.accept_redirects 0
+ sysctl_set net.ipv6.conf.all.accept_redirects 0
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ sysctl_restore net.ipv6.conf.all.accept_redirects
+ sysctl_restore net.ipv4.conf.all.accept_redirects
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
new file mode 100755
index 000000000000..94fdbf215c14
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh
@@ -0,0 +1,565 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test various interface configuration scenarios. Observe that configurations
+# deemed valid by mlxsw succeed, invalid configurations fail and that no traces
+# are produced. To prevent the test from passing in case traces are produced,
+# the user can set the 'kernel.panic_on_warn' and 'kernel.panic_on_oops'
+# sysctls in its environment.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+ rif_set_addr_test
+ rif_inherit_bridge_addr_test
+ rif_non_inherit_bridge_addr_test
+ vlan_interface_deletion_test
+ bridge_deletion_test
+ bridge_vlan_flags_test
+ vlan_1_test
+ lag_bridge_upper_test
+ duplicate_vlans_test
+ vlan_rif_refcount_test
+ subport_rif_refcount_test
+ vlan_dev_deletion_test
+ lag_unlink_slaves_test
+ lag_dev_deletion_test
+ vlan_interface_uppers_test
+ devlink_reload_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+}
+
+rif_set_addr_test()
+{
+ local swp1_mac=$(mac_get $swp1)
+ local swp2_mac=$(mac_get $swp2)
+
+ RET=0
+
+ # $swp1 and $swp2 likely got their IPv6 local addresses already, but
+ # here we need to test the transition to RIF.
+ ip addr flush dev $swp1
+ ip addr flush dev $swp2
+ sleep .1
+
+ ip addr add dev $swp1 192.0.2.1/28
+ check_err $?
+
+ ip link set dev $swp1 addr 00:11:22:33:44:55
+ check_err $?
+
+ # IP address enablement should be rejected if the MAC address prefix
+ # doesn't match other RIFs.
+ ip addr add dev $swp2 192.0.2.2/28 &>/dev/null
+ check_fail $? "IP address addition passed for a device with a wrong MAC"
+ ip addr add dev $swp2 192.0.2.2/28 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "no extack for IP address addition"
+
+ ip link set dev $swp2 addr 00:11:22:33:44:66
+ check_err $?
+ ip addr add dev $swp2 192.0.2.2/28 &>/dev/null
+ check_err $?
+
+ # Change of MAC address of a RIF should be forbidden if the new MAC
+ # doesn't share the prefix with other MAC addresses.
+ ip link set dev $swp2 addr 00:11:22:33:00:66 &>/dev/null
+ check_fail $? "change of MAC address passed for a wrong MAC"
+ ip link set dev $swp2 addr 00:11:22:33:00:66 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "no extack for MAC address change"
+
+ log_test "RIF - bad MAC change"
+
+ ip addr del dev $swp2 192.0.2.2/28
+ ip addr del dev $swp1 192.0.2.1/28
+
+ ip link set dev $swp2 addr $swp2_mac
+ ip link set dev $swp1 addr $swp1_mac
+}
+
+rif_inherit_bridge_addr_test()
+{
+ RET=0
+
+ # Create first RIF
+ ip addr add dev $swp1 192.0.2.1/28
+ check_err $?
+
+ # Create a FID RIF
+ ip link add name br1 up type bridge vlan_filtering 0
+ ip link set dev $swp2 master br1
+ ip addr add dev br1 192.0.2.17/28
+ check_err $?
+
+ # Prepare a device with a low MAC address
+ ip link add name d up type dummy
+ ip link set dev d addr 00:11:22:33:44:55
+
+ # Attach the device to br1. That prompts bridge address change, which
+ # should be vetoed, thus preventing the attachment.
+ ip link set dev d master br1 &>/dev/null
+ check_fail $? "Device with low MAC was permitted to attach a bridge with RIF"
+ ip link set dev d master br1 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "no extack for bridge attach rejection"
+
+ ip link set dev $swp2 addr 00:11:22:33:44:55 &>/dev/null
+ check_fail $? "Changing swp2's MAC address permitted"
+ ip link set dev $swp2 addr 00:11:22:33:44:55 2>&1 >/dev/null \
+ | grep -q mlxsw_spectrum
+ check_err $? "no extack for bridge port MAC address change rejection"
+
+ log_test "RIF - attach port with bad MAC to bridge"
+
+ ip link del dev d
+ ip link del dev br1
+ ip addr del dev $swp1 192.0.2.1/28
+}
+
+rif_non_inherit_bridge_addr_test()
+{
+ local swp2_mac=$(mac_get $swp2)
+
+ RET=0
+
+ # Create first RIF
+ ip addr add dev $swp1 192.0.2.1/28
+ check_err $?
+
+ # Create a FID RIF
+ ip link add name br1 up type bridge vlan_filtering 0
+ ip link set dev br1 addr $swp2_mac
+ ip link set dev $swp2 master br1
+ ip addr add dev br1 192.0.2.17/28
+ check_err $?
+
+ # Prepare a device with a low MAC address
+ ip link add name d up type dummy
+ ip link set dev d addr 00:11:22:33:44:55
+
+ # Attach the device to br1. Since the bridge address was set, it should
+ # work.
+ ip link set dev d master br1 &>/dev/null
+ check_err $? "Could not attach a device with low MAC to a bridge with RIF"
+
+ # Port MAC address change should be allowed for a bridge with set MAC.
+ ip link set dev $swp2 addr 00:11:22:33:44:55
+ check_err $? "Changing swp2's MAC address not permitted"
+
+ log_test "RIF - attach port with bad MAC to bridge with set MAC"
+
+ ip link set dev $swp2 addr $swp2_mac
+ ip link del dev d
+ ip link del dev br1
+ ip addr del dev $swp1 192.0.2.1/28
+}
+
+vlan_interface_deletion_test()
+{
+ # Test that when a VLAN interface is deleted, its associated router
+ # interface (RIF) is correctly deleted and not leaked. See commit
+ # c360867ec46a ("mlxsw: spectrum: Delete RIF when VLAN device is
+ # removed") for more details
+ RET=0
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev $swp1 master br0
+
+ ip link add link br0 name br0.10 type vlan id 10
+ ip -6 address add 2001:db8:1::1/64 dev br0.10
+ ip link del dev br0.10
+
+ # If we leaked the previous RIF, then this should produce a trace
+ ip link add link br0 name br0.20 type vlan id 20
+ ip -6 address add 2001:db8:1::1/64 dev br0.20
+ ip link del dev br0.20
+
+ log_test "vlan interface deletion"
+
+ ip link del dev br0
+}
+
+bridge_deletion_test()
+{
+ # Test that when a bridge with VLAN interfaces is deleted, we correctly
+ # delete the associated RIFs. See commit 602b74eda813 ("mlxsw:
+ # spectrum_switchdev: Do not leak RIFs when removing bridge") for more
+ # details
+ RET=0
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev $swp1 master br0
+ ip -6 address add 2001:db8::1/64 dev br0
+
+ ip link add link br0 name br0.10 type vlan id 10
+ ip -6 address add 2001:db8:1::1/64 dev br0.10
+
+ ip link add link br0 name br0.20 type vlan id 20
+ ip -6 address add 2001:db8:2::1/64 dev br0.20
+
+ ip link del dev br0
+
+ # If we leaked previous RIFs, then this should produce a trace
+ ip -6 address add 2001:db8:1::1/64 dev $swp1
+ ip -6 address del 2001:db8:1::1/64 dev $swp1
+
+ log_test "bridge deletion"
+}
+
+bridge_vlan_flags_test()
+{
+ # Test that when bridge VLAN flags are toggled, we do not take
+ # unnecessary references on related structs. See commit 9e25826ffc94
+ # ("mlxsw: spectrum_switchdev: Fix port_vlan refcounting") for more
+ # details
+ RET=0
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev $swp1 master br0
+
+ bridge vlan add vid 10 dev $swp1 pvid untagged
+ bridge vlan add vid 10 dev $swp1 untagged
+ bridge vlan add vid 10 dev $swp1 pvid
+ bridge vlan add vid 10 dev $swp1
+ ip link del dev br0
+
+ # If we did not handle references correctly, then this should produce a
+ # trace
+ devlink dev reload "$DEVLINK_DEV"
+
+ # Allow netdevices to be re-created following the reload
+ sleep 20
+
+ log_test "bridge vlan flags"
+}
+
+vlan_1_test()
+{
+ # Test that VLAN 1 can be configured over mlxsw ports. In the past it
+ # was used internally for untagged traffic. See commit 47bf9df2e820
+ # ("mlxsw: spectrum: Forbid creation of VLAN 1 over port/LAG") for more
+ # details
+ RET=0
+
+ ip link add link $swp1 name $swp1.1 type vlan id 1
+ check_err $? "did not manage to create vlan 1 when should"
+
+ log_test "vlan 1"
+
+ ip link del dev $swp1.1
+}
+
+lag_bridge_upper_test()
+{
+ # Test that ports cannot be enslaved to LAG devices that have uppers
+ # and that failure is handled gracefully. See commit b3529af6bb0d
+ # ("spectrum: Reference count VLAN entries") for more details
+ RET=0
+
+ ip link add name bond1 type bond mode 802.3ad
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev bond1 master br0
+
+ ip link set dev $swp1 down
+ ip link set dev $swp1 master bond1 &> /dev/null
+ check_fail $? "managed to enslave port to lag when should not"
+
+ # This might generate a trace, if we did not handle the failure
+ # correctly
+ ip -6 address add 2001:db8:1::1/64 dev $swp1
+ ip -6 address del 2001:db8:1::1/64 dev $swp1
+
+ log_test "lag with bridge upper"
+
+ ip link del dev br0
+ ip link del dev bond1
+}
+
+duplicate_vlans_test()
+{
+ # Test that on a given port a VLAN is only used once. Either as VLAN
+ # in a VLAN-aware bridge or as a VLAN device
+ RET=0
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev $swp1 master br0
+ bridge vlan add vid 10 dev $swp1
+
+ ip link add link $swp1 name $swp1.10 type vlan id 10 &> /dev/null
+ check_fail $? "managed to create vlan device when should not"
+
+ bridge vlan del vid 10 dev $swp1
+ ip link add link $swp1 name $swp1.10 type vlan id 10
+ check_err $? "did not manage to create vlan device when should"
+ bridge vlan add vid 10 dev $swp1 &> /dev/null
+ check_fail $? "managed to add bridge vlan when should not"
+
+ log_test "duplicate vlans"
+
+ ip link del dev $swp1.10
+ ip link del dev br0
+}
+
+vlan_rif_refcount_test()
+{
+ # Test that RIFs representing VLAN interfaces are not affected from
+ # ports member in the VLAN. We use the offload indication on routes
+ # configured on the RIF to understand if it was created / destroyed
+ RET=0
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev $swp1 master br0
+
+ ip link set dev $swp1 up
+ ip link set dev br0 up
+
+ ip link add link br0 name br0.10 up type vlan id 10
+ ip -6 address add 2001:db8:1::1/64 dev br0.10
+
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ check_err $? "vlan rif was not created before adding port to vlan"
+
+ bridge vlan add vid 10 dev $swp1
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ check_err $? "vlan rif was destroyed after adding port to vlan"
+
+ bridge vlan del vid 10 dev $swp1
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ check_err $? "vlan rif was destroyed after removing port from vlan"
+
+ ip link set dev $swp1 nomaster
+ ip -6 route get fibmatch 2001:db8:1::2 dev br0.10 | grep -q offload
+ check_fail $? "vlan rif was not destroyed after unlinking port from bridge"
+
+ log_test "vlan rif refcount"
+
+ ip link del dev br0.10
+ ip link set dev $swp1 down
+ ip link del dev br0
+}
+
+subport_rif_refcount_test()
+{
+ # Test that RIFs representing upper devices of physical ports are
+ # reference counted correctly and destroyed when should. We use the
+ # offload indication on routes configured on the RIF to understand if
+ # it was created / destroyed
+ RET=0
+
+ ip link add name bond1 type bond mode 802.3ad
+ ip link set dev $swp1 down
+ ip link set dev $swp2 down
+ ip link set dev $swp1 master bond1
+ ip link set dev $swp2 master bond1
+
+ ip link set dev bond1 up
+ ip link add link bond1 name bond1.10 up type vlan id 10
+ ip -6 address add 2001:db8:1::1/64 dev bond1
+ ip -6 address add 2001:db8:2::1/64 dev bond1.10
+
+ ip -6 route get fibmatch 2001:db8:1::2 dev bond1 | grep -q offload
+ check_err $? "subport rif was not created on lag device"
+ ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10 | grep -q offload
+ check_err $? "subport rif was not created on vlan device"
+
+ ip link set dev $swp1 nomaster
+ ip -6 route get fibmatch 2001:db8:1::2 dev bond1 | grep -q offload
+ check_err $? "subport rif of lag device was destroyed when should not"
+ ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10 | grep -q offload
+ check_err $? "subport rif of vlan device was destroyed when should not"
+
+ ip link set dev $swp2 nomaster
+ ip -6 route get fibmatch 2001:db8:1::2 dev bond1 | grep -q offload
+ check_fail $? "subport rif of lag device was not destroyed when should"
+ ip -6 route get fibmatch 2001:db8:2::2 dev bond1.10 | grep -q offload
+ check_fail $? "subport rif of vlan device was not destroyed when should"
+
+ log_test "subport rif refcount"
+
+ ip link del dev bond1.10
+ ip link del dev bond1
+}
+
+vlan_dev_deletion_test()
+{
+ # Test that VLAN devices are correctly deleted / unlinked when enslaved
+ # to bridge
+ RET=0
+
+ ip link add name br10 type bridge
+ ip link add name br20 type bridge
+ ip link add name br30 type bridge
+ ip link add link $swp1 name $swp1.10 type vlan id 10
+ ip link add link $swp1 name $swp1.20 type vlan id 20
+ ip link add link $swp1 name $swp1.30 type vlan id 30
+ ip link set dev $swp1.10 master br10
+ ip link set dev $swp1.20 master br20
+ ip link set dev $swp1.30 master br30
+
+ # If we did not handle the situation correctly, then these operations
+ # might produce a trace
+ ip link set dev $swp1.30 nomaster
+ ip link del dev $swp1.20
+ # Deletion via ioctl uses different code paths from netlink
+ vconfig rem $swp1.10 &> /dev/null
+
+ log_test "vlan device deletion"
+
+ ip link del dev $swp1.30
+ ip link del dev br30
+ ip link del dev br20
+ ip link del dev br10
+}
+
+lag_create()
+{
+ ip link add name bond1 type bond mode 802.3ad
+ ip link set dev $swp1 down
+ ip link set dev $swp2 down
+ ip link set dev $swp1 master bond1
+ ip link set dev $swp2 master bond1
+
+ ip link add link bond1 name bond1.10 type vlan id 10
+ ip link add link bond1 name bond1.20 type vlan id 20
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev bond1 master br0
+
+ ip link add name br10 type bridge
+ ip link set dev bond1.10 master br10
+
+ ip link add name br20 type bridge
+ ip link set dev bond1.20 master br20
+}
+
+lag_unlink_slaves_test()
+{
+ # Test that ports are correctly unlinked from their LAG master, when
+ # the LAG and its VLAN uppers are enslaved to bridges
+ RET=0
+
+ lag_create
+
+ ip link set dev $swp1 nomaster
+ check_err $? "lag slave $swp1 was not unlinked from master"
+ ip link set dev $swp2 nomaster
+ check_err $? "lag slave $swp2 was not unlinked from master"
+
+ # Try to configure corresponding VLANs as router interfaces
+ ip -6 address add 2001:db8:1::1/64 dev $swp1
+ check_err $? "failed to configure ip address on $swp1"
+
+ ip link add link $swp1 name $swp1.10 type vlan id 10
+ ip -6 address add 2001:db8:10::1/64 dev $swp1.10
+ check_err $? "failed to configure ip address on $swp1.10"
+
+ ip link add link $swp1 name $swp1.20 type vlan id 20
+ ip -6 address add 2001:db8:20::1/64 dev $swp1.20
+ check_err $? "failed to configure ip address on $swp1.20"
+
+ log_test "lag slaves unlinking"
+
+ ip link del dev $swp1.20
+ ip link del dev $swp1.10
+ ip address flush dev $swp1
+
+ ip link del dev br20
+ ip link del dev br10
+ ip link del dev br0
+ ip link del dev bond1
+}
+
+lag_dev_deletion_test()
+{
+ # Test that LAG device is correctly deleted, when the LAG and its VLAN
+ # uppers are enslaved to bridges
+ RET=0
+
+ lag_create
+
+ ip link del dev bond1
+
+ log_test "lag device deletion"
+
+ ip link del dev br20
+ ip link del dev br10
+ ip link del dev br0
+}
+
+vlan_interface_uppers_test()
+{
+ # Test that uppers of a VLAN interface are correctly sanitized
+ RET=0
+
+ ip link add name br0 type bridge vlan_filtering 1
+ ip link set dev $swp1 master br0
+
+ ip link add link br0 name br0.10 type vlan id 10
+ ip link add link br0.10 name macvlan0 \
+ type macvlan mode private &> /dev/null
+ check_fail $? "managed to create a macvlan when should not"
+
+ ip -6 address add 2001:db8:1::1/64 dev br0.10
+ ip link add link br0.10 name macvlan0 type macvlan mode private
+ check_err $? "did not manage to create a macvlan when should"
+
+ ip link del dev macvlan0
+
+ ip link add name vrf-test type vrf table 10
+ ip link set dev br0.10 master vrf-test
+ check_err $? "did not manage to enslave vlan interface to vrf"
+ ip link del dev vrf-test
+
+ ip link add name br-test type bridge
+ ip link set dev br0.10 master br-test &> /dev/null
+ check_fail $? "managed to enslave vlan interface to bridge when should not"
+ ip link del dev br-test
+
+ log_test "vlan interface uppers"
+
+ ip link del dev br0
+}
+
+devlink_reload_test()
+{
+ # Test that after executing all the above configuration tests, a
+ # devlink reload can be performed without errors
+ RET=0
+
+ devlink dev reload "$DEVLINK_DEV"
+ check_err $? "devlink reload failed"
+
+ log_test "devlink reload - last test"
+
+ sleep 20
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
index 3b75180f455d..b41d6256b2d0 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh
@@ -8,7 +8,8 @@
lib_dir=$(dirname $0)/../../../../net/forwarding
ALL_TESTS="single_mask_test identical_filters_test two_masks_test \
- multiple_masks_test ctcam_edge_cases_test"
+ multiple_masks_test ctcam_edge_cases_test delta_simple_test \
+ bloom_simple_test bloom_complex_test bloom_delta_test"
NUM_NETIFS=2
source $lib_dir/tc_common.sh
source $lib_dir/lib.sh
@@ -142,7 +143,7 @@ two_masks_test()
tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
$tcflags dst_ip 192.0.2.2 action drop
tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
- $tcflags dst_ip 192.0.0.0/16 action drop
+ $tcflags dst_ip 192.0.0.0/8 action drop
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@@ -235,7 +236,7 @@ ctcam_two_atcam_masks_test()
$tcflags dst_ip 192.0.2.2 action drop
# Filter goes into A-TCAM
tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
- $tcflags dst_ip 192.0.2.0/24 action drop
+ $tcflags dst_ip 192.0.0.0/16 action drop
$MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
-t ip -q
@@ -324,6 +325,258 @@ ctcam_edge_cases_test()
ctcam_no_atcam_masks_test
}
+tp_record()
+{
+ local tracepoint=$1
+ local cmd=$2
+
+ perf record -q -e $tracepoint $cmd
+ return $?
+}
+
+tp_check_hits()
+{
+ local tracepoint=$1
+ local count=$2
+
+ perf_output=`perf script -F trace:event,trace`
+ hits=`echo $perf_output | grep "$tracepoint:" | wc -l`
+ if [[ "$count" -ne "$hits" ]]; then
+ return 1
+ fi
+ return 0
+}
+
+delta_simple_test()
+{
+ # The first filter will create eRP, the second filter will fit into
+ # the first eRP with delta. Remove the first rule then and check that
+ # the eRP stays (referenced by the second filter).
+
+ RET=0
+
+ if [[ "$tcflags" != "skip_sw" ]]; then
+ return 0;
+ fi
+
+ tp_record "objagg:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 1 handle 101 flower $tcflags dst_ip 192.0.0.0/24 \
+ action drop"
+ tp_check_hits "objagg:objagg_obj_root_create" 1
+ check_err $? "eRP was not created"
+
+ tp_record "objagg:*" "tc filter add dev $h2 ingress protocol ip \
+ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \
+ action drop"
+ tp_check_hits "objagg:objagg_obj_root_create" 0
+ check_err $? "eRP was incorrectly created"
+ tp_check_hits "objagg:objagg_obj_parent_assign" 1
+ check_err $? "delta was not created"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_fail $? "Matched a wrong filter"
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Did not match on correct filter"
+
+ tp_record "objagg:*" "tc filter del dev $h2 ingress protocol ip \
+ pref 1 handle 101 flower"
+ tp_check_hits "objagg:objagg_obj_root_destroy" 0
+ check_err $? "eRP was incorrectly destroyed"
+ tp_check_hits "objagg:objagg_obj_parent_unassign" 0
+ check_err $? "delta was incorrectly destroyed"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 2
+ check_err $? "Did not match on correct filter after the first was removed"
+
+ tp_record "objagg:*" "tc filter del dev $h2 ingress protocol ip \
+ pref 2 handle 102 flower"
+ tp_check_hits "objagg:objagg_obj_parent_unassign" 1
+ check_err $? "delta was not destroyed"
+ tp_check_hits "objagg:objagg_obj_root_destroy" 1
+ check_err $? "eRP was not destroyed"
+
+ log_test "delta simple test ($tcflags)"
+}
+
+bloom_simple_test()
+{
+ # Bloom filter requires that the eRP table is used. This test
+ # verifies that Bloom filter is not harming correctness of ACLs.
+ # First, make sure that eRP table is used and then set rule patterns
+ # which are distant enough and will result skipping a lookup after
+ # consulting the Bloom filter. Although some eRP lookups are skipped,
+ # the correct filter should be hit.
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ $tcflags dst_ip 192.0.2.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 5 handle 104 flower \
+ $tcflags dst_ip 198.51.100.2 action drop
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.0.0.0/8 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 101 1
+ check_err $? "Two filters - did not match highest priority"
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 198.51.100.1 -B 198.51.100.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 104 1
+ check_err $? "Single filter - did not match"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Low prio filter - did not match"
+
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 198.0.0.0/8 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 198.51.100.1 -B 198.51.100.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Two filters - did not match highest priority after add"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+ tc filter del dev $h2 ingress protocol ip pref 5 handle 104 flower
+
+ log_test "bloom simple test ($tcflags)"
+}
+
+bloom_complex_test()
+{
+ # Bloom filter index computation is affected from region ID, eRP
+ # ID and from the region key size. In order to excercise those parts
+ # of the Bloom filter code, use a series of regions, each with a
+ # different key size and send packet that should hit all of them.
+ local index
+
+ RET=0
+ NUM_CHAINS=4
+ BASE_INDEX=100
+
+ # Create chain with up to 2 key blocks (ip_proto only)
+ tc chain add dev $h2 ingress chain 1 protocol ip flower \
+ ip_proto tcp &> /dev/null
+ # Create chain with 2-4 key blocks (ip_proto, src MAC)
+ tc chain add dev $h2 ingress chain 2 protocol ip flower \
+ ip_proto tcp \
+ src_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF &> /dev/null
+ # Create chain with 4-8 key blocks (ip_proto, src & dst MAC, IPv4 dest)
+ tc chain add dev $h2 ingress chain 3 protocol ip flower \
+ ip_proto tcp \
+ dst_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF \
+ src_mac 00:00:00:00:00:00/FF:FF:FF:FF:FF:FF \
+ dst_ip 0.0.0.0/32 &> /dev/null
+ # Default chain contains all fields and therefore is 8-12 key blocks
+ tc chain add dev $h2 ingress chain 4
+
+ # We need at least 2 rules in every region to have eRP table active
+ # so create a dummy rule per chain using a different pattern
+ for i in $(eval echo {0..$NUM_CHAINS}); do
+ index=$((BASE_INDEX - 1 - i))
+ tc filter add dev $h2 ingress chain $i protocol ip \
+ pref 2 handle $index flower \
+ $tcflags ip_proto tcp action drop
+ done
+
+ # Add rules to test Bloom filter, each in a different chain
+ index=$BASE_INDEX
+ tc filter add dev $h2 ingress protocol ip \
+ pref 1 handle $((++index)) flower \
+ $tcflags dst_ip 192.0.0.0/16 action goto chain 1
+ tc filter add dev $h2 ingress chain 1 protocol ip \
+ pref 1 handle $((++index)) flower \
+ $tcflags action goto chain 2
+ tc filter add dev $h2 ingress chain 2 protocol ip \
+ pref 1 handle $((++index)) flower \
+ $tcflags src_mac $h1mac action goto chain 3
+ tc filter add dev $h2 ingress chain 3 protocol ip \
+ pref 1 handle $((++index)) flower \
+ $tcflags dst_ip 192.0.0.0/8 action goto chain 4
+ tc filter add dev $h2 ingress chain 4 protocol ip \
+ pref 1 handle $((++index)) flower \
+ $tcflags src_ip 192.0.2.0/24 action drop
+
+ # Send a packet that is supposed to hit all chains
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
+ -t ip -q
+
+ for i in $(eval echo {0..$NUM_CHAINS}); do
+ index=$((BASE_INDEX + i + 1))
+ tc_check_packets "dev $h2 ingress" $index 1
+ check_err $? "Did not match chain $i"
+ done
+
+ # Rules cleanup
+ for i in $(eval echo {$NUM_CHAINS..0}); do
+ index=$((BASE_INDEX - i - 1))
+ tc filter del dev $h2 ingress chain $i \
+ pref 2 handle $index flower
+ index=$((BASE_INDEX + i + 1))
+ tc filter del dev $h2 ingress chain $i \
+ pref 1 handle $index flower
+ done
+
+ # Chains cleanup
+ for i in $(eval echo {$NUM_CHAINS..1}); do
+ tc chain del dev $h2 ingress chain $i
+ done
+
+ log_test "bloom complex test ($tcflags)"
+}
+
+
+bloom_delta_test()
+{
+ # When multiple masks are used, the eRP table is activated. When
+ # masks are close enough (delta) the masks reside on the same
+ # eRP table. This test verifies that the eRP table is correctly
+ # allocated and used in delta condition and that Bloom filter is
+ # still functional with delta.
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 3 handle 103 flower \
+ $tcflags dst_ip 192.1.0.0/16 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.1.2.1 -B 192.1.2.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 103 1
+ check_err $? "Single filter - did not match"
+
+ tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
+ $tcflags dst_ip 192.2.1.0/24 action drop
+
+ $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.2.1.1 -B 192.2.1.2 \
+ -t ip -q
+
+ tc_check_packets "dev $h2 ingress" 102 1
+ check_err $? "Delta filters - did not match second filter"
+
+ tc filter del dev $h2 ingress protocol ip pref 3 handle 103 flower
+ tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
+
+ log_test "bloom delta test ($tcflags)"
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
new file mode 100755
index 000000000000..dcf9f4e913e0
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan.sh
@@ -0,0 +1,1103 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test various aspects of VxLAN offloading which are specific to mlxsw, such
+# as sanitization of invalid configurations and offload indication.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="sanitization_test offload_indication_test \
+ sanitization_vlan_aware_test offload_indication_vlan_aware_test"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+
+setup_prepare()
+{
+ swp1=${NETIFS[p1]}
+ swp2=${NETIFS[p2]}
+
+ ip link set dev $swp1 up
+ ip link set dev $swp2 up
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ip link set dev $swp2 down
+ ip link set dev $swp1 down
+}
+
+sanitization_single_dev_test_pass()
+{
+ ip link set dev $swp1 master br0
+ check_err $?
+ ip link set dev vxlan0 master br0
+ check_err $?
+
+ ip link set dev $swp1 nomaster
+
+ ip link set dev $swp1 master br0
+ check_err $?
+}
+
+sanitization_single_dev_test_fail()
+{
+ ip link set dev $swp1 master br0
+ check_err $?
+ ip link set dev vxlan0 master br0 &> /dev/null
+ check_fail $?
+
+ ip link set dev $swp1 nomaster
+
+ ip link set dev vxlan0 master br0
+ check_err $?
+ ip link set dev $swp1 master br0 &> /dev/null
+ check_fail $?
+}
+
+sanitization_single_dev_valid_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_pass
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device - valid configuration"
+}
+
+sanitization_single_dev_vlan_aware_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0 vlan_filtering 1
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_pass
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with a vlan-aware bridge"
+}
+
+sanitization_single_dev_mcast_enabled_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with a multicast enabled bridge"
+}
+
+sanitization_single_dev_mcast_group_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
+ dev $swp2 group 239.0.0.1
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with a multicast group"
+}
+
+sanitization_single_dev_no_local_ip_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit dstport 4789
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with no local ip"
+}
+
+sanitization_single_dev_local_ipv6_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 2001:db8::1 dstport 4789
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with local ipv6 address"
+}
+
+sanitization_single_dev_learning_enabled_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 learning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_pass
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with learning enabled"
+}
+
+sanitization_single_dev_local_interface_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789 dev $swp2
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with local interface"
+}
+
+sanitization_single_dev_port_range_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789 \
+ srcport 4000 5000
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with udp source port range"
+}
+
+sanitization_single_dev_tos_static_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos 20 local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with static tos"
+}
+
+sanitization_single_dev_ttl_inherit_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl inherit tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with inherit ttl"
+}
+
+sanitization_single_dev_udp_checksum_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning udpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_single_dev_test_fail
+
+ ip link del dev vxlan0
+ ip link del dev br0
+
+ log_test "vxlan device with udp checksum"
+}
+
+sanitization_single_dev_test()
+{
+ # These tests make sure that we correctly sanitize VxLAN device
+ # configurations we do not support
+ sanitization_single_dev_valid_test
+ sanitization_single_dev_vlan_aware_test
+ sanitization_single_dev_mcast_enabled_test
+ sanitization_single_dev_mcast_group_test
+ sanitization_single_dev_no_local_ip_test
+ sanitization_single_dev_local_ipv6_test
+ sanitization_single_dev_learning_enabled_test
+ sanitization_single_dev_local_interface_test
+ sanitization_single_dev_port_range_test
+ sanitization_single_dev_tos_static_test
+ sanitization_single_dev_ttl_inherit_test
+ sanitization_single_dev_udp_checksum_test
+}
+
+sanitization_multi_devs_test_pass()
+{
+ ip link set dev $swp1 master br0
+ check_err $?
+ ip link set dev vxlan0 master br0
+ check_err $?
+ ip link set dev $swp2 master br1
+ check_err $?
+ ip link set dev vxlan1 master br1
+ check_err $?
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+
+ ip link set dev $swp1 master br0
+ check_err $?
+ ip link set dev $swp2 master br1
+ check_err $?
+}
+
+sanitization_multi_devs_test_fail()
+{
+ ip link set dev $swp1 master br0
+ check_err $?
+ ip link set dev vxlan0 master br0
+ check_err $?
+ ip link set dev $swp2 master br1
+ check_err $?
+ ip link set dev vxlan1 master br1 &> /dev/null
+ check_fail $?
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+
+ ip link set dev vxlan1 master br1
+ check_err $?
+ ip link set dev $swp1 master br0
+ check_err $?
+ ip link set dev $swp2 master br1 &> /dev/null
+ check_fail $?
+}
+
+sanitization_multi_devs_valid_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link add dev br1 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_multi_devs_test_pass
+
+ ip link del dev vxlan1
+ ip link del dev vxlan0
+ ip link del dev br1
+ ip link del dev br0
+
+ log_test "multiple vxlan devices - valid configuration"
+}
+
+sanitization_multi_devs_ttl_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link add dev br1 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+ ttl 40 tos inherit local 198.51.100.1 dstport 4789
+
+ sanitization_multi_devs_test_fail
+
+ ip link del dev vxlan1
+ ip link del dev vxlan0
+ ip link del dev br1
+ ip link del dev br0
+
+ log_test "multiple vxlan devices with different ttl"
+}
+
+sanitization_multi_devs_udp_dstport_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link add dev br1 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 5789
+
+ sanitization_multi_devs_test_fail
+
+ ip link del dev vxlan1
+ ip link del dev vxlan0
+ ip link del dev br1
+ ip link del dev br0
+
+ log_test "multiple vxlan devices with different udp destination port"
+}
+
+sanitization_multi_devs_local_ip_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link add dev br1 type bridge mcast_snooping 0
+
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan1 up type vxlan id 20 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.2 dstport 4789
+
+ sanitization_multi_devs_test_fail
+
+ ip link del dev vxlan1
+ ip link del dev vxlan0
+ ip link del dev br1
+ ip link del dev br0
+
+ log_test "multiple vxlan devices with different local ip"
+}
+
+sanitization_multi_devs_test()
+{
+ # The device has a single VTEP, which means all the VxLAN devices
+ # we offload must share certain properties such as source IP and
+ # UDP destination port. These tests make sure that we forbid
+ # configurations that violate this limitation
+ sanitization_multi_devs_valid_test
+ sanitization_multi_devs_ttl_test
+ sanitization_multi_devs_udp_dstport_test
+ sanitization_multi_devs_local_ip_test
+}
+
+sanitization_test()
+{
+ sanitization_single_dev_test
+ sanitization_multi_devs_test
+}
+
+offload_indication_setup_create()
+{
+ # Create a simple setup with two bridges, each with a VxLAN device
+ # and one local port
+ ip link add name br0 up type bridge mcast_snooping 0
+ ip link add name br1 up type bridge mcast_snooping 0
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br1
+
+ ip address add 198.51.100.1/32 dev lo
+
+ ip link add name vxlan0 up master br0 type vxlan id 10 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan1 up master br1 type vxlan id 20 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+}
+
+offload_indication_setup_destroy()
+{
+ ip link del dev vxlan1
+ ip link del dev vxlan0
+
+ ip address del 198.51.100.1/32 dev lo
+
+ ip link set dev $swp2 nomaster
+ ip link set dev $swp1 nomaster
+
+ ip link del dev br1
+ ip link del dev br0
+}
+
+offload_indication_fdb_flood_test()
+{
+ RET=0
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.2
+
+ bridge fdb show brport vxlan0 | grep 00:00:00:00:00:00 \
+ | grep -q offload
+ check_err $?
+
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self
+
+ log_test "vxlan flood entry offload indication"
+}
+
+offload_indication_fdb_bridge_test()
+{
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev vxlan0 self master static \
+ dst 198.51.100.2
+
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_err $?
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_err $?
+
+ log_test "vxlan entry offload indication - initial state"
+
+ # Remove FDB entry from the bridge driver and check that corresponding
+ # entry in the VxLAN driver is not marked as offloaded
+ RET=0
+
+ bridge fdb del de:ad:be:ef:13:37 dev vxlan0 master
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_fail $?
+
+ log_test "vxlan entry offload indication - after removal from bridge"
+
+ # Add the FDB entry back to the bridge driver and make sure it is
+ # marked as offloaded in both drivers
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev vxlan0 master static
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_err $?
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_err $?
+
+ log_test "vxlan entry offload indication - after re-add to bridge"
+
+ # Remove FDB entry from the VxLAN driver and check that corresponding
+ # entry in the bridge driver is not marked as offloaded
+ RET=0
+
+ bridge fdb del de:ad:be:ef:13:37 dev vxlan0 self
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_fail $?
+
+ log_test "vxlan entry offload indication - after removal from vxlan"
+
+ # Add the FDB entry back to the VxLAN driver and make sure it is
+ # marked as offloaded in both drivers
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev vxlan0 self dst 198.51.100.2
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_err $?
+ bridge fdb show brport vxlan0 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_err $?
+
+ log_test "vxlan entry offload indication - after re-add to vxlan"
+
+ bridge fdb del de:ad:be:ef:13:37 dev vxlan0 self master
+}
+
+offload_indication_fdb_test()
+{
+ offload_indication_fdb_flood_test
+ offload_indication_fdb_bridge_test
+}
+
+offload_indication_decap_route_test()
+{
+ RET=0
+
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link set dev vxlan0 down
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link set dev vxlan1 down
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_fail $?
+
+ log_test "vxlan decap route - vxlan device down"
+
+ RET=0
+
+ ip link set dev vxlan1 up
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link set dev vxlan0 up
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ log_test "vxlan decap route - vxlan device up"
+
+ RET=0
+
+ ip address delete 198.51.100.1/32 dev lo
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_fail $?
+
+ ip address add 198.51.100.1/32 dev lo
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ log_test "vxlan decap route - add local route"
+
+ RET=0
+
+ ip link set dev $swp1 nomaster
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link set dev $swp2 nomaster
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_fail $?
+
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br1
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ log_test "vxlan decap route - local ports enslavement"
+
+ RET=0
+
+ ip link del dev br0
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link del dev br1
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_fail $?
+
+ log_test "vxlan decap route - bridge device deletion"
+
+ RET=0
+
+ ip link add name br0 up type bridge mcast_snooping 0
+ ip link add name br1 up type bridge mcast_snooping 0
+ ip link set dev $swp1 master br0
+ ip link set dev $swp2 master br1
+ ip link set dev vxlan0 master br0
+ ip link set dev vxlan1 master br1
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link del dev vxlan0
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ ip link del dev vxlan1
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_fail $?
+
+ log_test "vxlan decap route - vxlan device deletion"
+
+ ip link add name vxlan0 up master br0 type vxlan id 10 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan1 up master br1 type vxlan id 20 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+}
+
+check_fdb_offloaded()
+{
+ local mac=00:11:22:33:44:55
+ local zmac=00:00:00:00:00:00
+
+ bridge fdb show dev vxlan0 | grep $mac | grep self | grep -q offload
+ check_err $?
+ bridge fdb show dev vxlan0 | grep $mac | grep master | grep -q offload
+ check_err $?
+
+ bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
+ check_err $?
+}
+
+check_vxlan_fdb_not_offloaded()
+{
+ local mac=00:11:22:33:44:55
+ local zmac=00:00:00:00:00:00
+
+ bridge fdb show dev vxlan0 | grep $mac | grep -q self
+ check_err $?
+ bridge fdb show dev vxlan0 | grep $mac | grep self | grep -q offload
+ check_fail $?
+
+ bridge fdb show dev vxlan0 | grep $zmac | grep -q self
+ check_err $?
+ bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
+ check_fail $?
+}
+
+check_bridge_fdb_not_offloaded()
+{
+ local mac=00:11:22:33:44:55
+ local zmac=00:00:00:00:00:00
+
+ bridge fdb show dev vxlan0 | grep $mac | grep -q master
+ check_err $?
+ bridge fdb show dev vxlan0 | grep $mac | grep master | grep -q offload
+ check_fail $?
+}
+
+__offload_indication_join_vxlan_first()
+{
+ local vid=$1; shift
+
+ local mac=00:11:22:33:44:55
+ local zmac=00:00:00:00:00:00
+
+ bridge fdb append $zmac dev vxlan0 self dst 198.51.100.2
+
+ ip link set dev vxlan0 master br0
+ bridge fdb add dev vxlan0 $mac self master static dst 198.51.100.2
+
+ RET=0
+ check_vxlan_fdb_not_offloaded
+ ip link set dev $swp1 master br0
+ sleep .1
+ check_fdb_offloaded
+ log_test "offload indication - attach vxlan first"
+
+ RET=0
+ ip link set dev vxlan0 down
+ check_vxlan_fdb_not_offloaded
+ check_bridge_fdb_not_offloaded
+ log_test "offload indication - set vxlan down"
+
+ RET=0
+ ip link set dev vxlan0 up
+ sleep .1
+ check_fdb_offloaded
+ log_test "offload indication - set vxlan up"
+
+ if [[ ! -z $vid ]]; then
+ RET=0
+ bridge vlan del dev vxlan0 vid $vid
+ check_vxlan_fdb_not_offloaded
+ check_bridge_fdb_not_offloaded
+ log_test "offload indication - delete VLAN"
+
+ RET=0
+ bridge vlan add dev vxlan0 vid $vid
+ check_vxlan_fdb_not_offloaded
+ check_bridge_fdb_not_offloaded
+ log_test "offload indication - add tagged VLAN"
+
+ RET=0
+ bridge vlan add dev vxlan0 vid $vid pvid untagged
+ sleep .1
+ check_fdb_offloaded
+ log_test "offload indication - add pvid/untagged VLAN"
+ fi
+
+ RET=0
+ ip link set dev $swp1 nomaster
+ check_vxlan_fdb_not_offloaded
+ log_test "offload indication - detach port"
+}
+
+offload_indication_join_vxlan_first()
+{
+ ip link add dev br0 up type bridge mcast_snooping 0
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ __offload_indication_join_vxlan_first
+
+ ip link del dev vxlan0
+ ip link del dev br0
+}
+
+__offload_indication_join_vxlan_last()
+{
+ local zmac=00:00:00:00:00:00
+
+ RET=0
+
+ bridge fdb append $zmac dev vxlan0 self dst 198.51.100.2
+
+ ip link set dev $swp1 master br0
+
+ bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
+ check_fail $?
+
+ ip link set dev vxlan0 master br0
+
+ bridge fdb show dev vxlan0 | grep $zmac | grep self | grep -q offload
+ check_err $?
+
+ log_test "offload indication - attach vxlan last"
+}
+
+offload_indication_join_vxlan_last()
+{
+ ip link add dev br0 up type bridge mcast_snooping 0
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ __offload_indication_join_vxlan_last
+
+ ip link del dev vxlan0
+ ip link del dev br0
+}
+
+offload_indication_test()
+{
+ offload_indication_setup_create
+ offload_indication_fdb_test
+ offload_indication_decap_route_test
+ offload_indication_setup_destroy
+
+ log_info "offload indication - replay & cleanup"
+ offload_indication_join_vxlan_first
+ offload_indication_join_vxlan_last
+}
+
+sanitization_vlan_aware_test()
+{
+ RET=0
+
+ ip link add dev br0 type bridge mcast_snooping 0 vlan_filtering 1
+
+ ip link add name vxlan10 up master br0 type vxlan id 10 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ ip link add name vxlan20 up master br0 type vxlan id 20 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ # Test that when each VNI is mapped to a different VLAN we can enslave
+ # a port to the bridge
+ bridge vlan add vid 10 dev vxlan10 pvid untagged
+ bridge vlan add vid 20 dev vxlan20 pvid untagged
+
+ ip link set dev $swp1 master br0
+ check_err $?
+
+ log_test "vlan-aware - enslavement to vlan-aware bridge"
+
+ # Try to map both VNIs to the same VLAN and make sure configuration
+ # fails
+ RET=0
+
+ bridge vlan add vid 10 dev vxlan20 pvid untagged &> /dev/null
+ check_fail $?
+
+ log_test "vlan-aware - two vnis mapped to the same vlan"
+
+ # Test that enslavement of a port to a bridge fails when two VNIs
+ # are mapped to the same VLAN
+ RET=0
+
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 20 dev vxlan20 pvid untagged
+ bridge vlan add vid 10 dev vxlan20 pvid untagged
+
+ ip link set dev $swp1 master br0 &> /dev/null
+ check_fail $?
+
+ log_test "vlan-aware - failed enslavement to vlan-aware bridge"
+
+ ip link del dev vxlan20
+ ip link del dev vxlan10
+ ip link del dev br0
+}
+
+offload_indication_vlan_aware_setup_create()
+{
+ # Create a simple setup with two VxLAN devices and a single VLAN-aware
+ # bridge
+ ip link add name br0 up type bridge mcast_snooping 0 vlan_filtering 1 \
+ vlan_default_pvid 0
+
+ ip link set dev $swp1 master br0
+
+ bridge vlan add vid 10 dev $swp1
+ bridge vlan add vid 20 dev $swp1
+
+ ip address add 198.51.100.1/32 dev lo
+
+ ip link add name vxlan10 up master br0 type vxlan id 10 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+ ip link add name vxlan20 up master br0 type vxlan id 20 nolearning \
+ noudpcsum ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ bridge vlan add vid 10 dev vxlan10 pvid untagged
+ bridge vlan add vid 20 dev vxlan20 pvid untagged
+}
+
+offload_indication_vlan_aware_setup_destroy()
+{
+ bridge vlan del vid 20 dev vxlan20
+ bridge vlan del vid 10 dev vxlan10
+
+ ip link del dev vxlan20
+ ip link del dev vxlan10
+
+ ip address del 198.51.100.1/32 dev lo
+
+ bridge vlan del vid 20 dev $swp1
+ bridge vlan del vid 10 dev $swp1
+
+ ip link set dev $swp1 nomaster
+
+ ip link del dev br0
+}
+
+offload_indication_vlan_aware_fdb_test()
+{
+ RET=0
+
+ log_info "vxlan entry offload indication - vlan-aware"
+
+ bridge fdb add de:ad:be:ef:13:37 dev vxlan10 self master static \
+ dst 198.51.100.2 vlan 10
+
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_err $?
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_err $?
+
+ log_test "vxlan entry offload indication - initial state"
+
+ # Remove FDB entry from the bridge driver and check that corresponding
+ # entry in the VxLAN driver is not marked as offloaded
+ RET=0
+
+ bridge fdb del de:ad:be:ef:13:37 dev vxlan10 master vlan 10
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_fail $?
+
+ log_test "vxlan entry offload indication - after removal from bridge"
+
+ # Add the FDB entry back to the bridge driver and make sure it is
+ # marked as offloaded in both drivers
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev vxlan10 master static vlan 10
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_err $?
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_err $?
+
+ log_test "vxlan entry offload indication - after re-add to bridge"
+
+ # Remove FDB entry from the VxLAN driver and check that corresponding
+ # entry in the bridge driver is not marked as offloaded
+ RET=0
+
+ bridge fdb del de:ad:be:ef:13:37 dev vxlan10 self
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_fail $?
+
+ log_test "vxlan entry offload indication - after removal from vxlan"
+
+ # Add the FDB entry back to the VxLAN driver and make sure it is
+ # marked as offloaded in both drivers
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev vxlan10 self dst 198.51.100.2
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep self \
+ | grep -q offload
+ check_err $?
+ bridge fdb show brport vxlan10 | grep de:ad:be:ef:13:37 | grep -v self \
+ | grep -q offload
+ check_err $?
+
+ log_test "vxlan entry offload indication - after re-add to vxlan"
+
+ bridge fdb del de:ad:be:ef:13:37 dev vxlan10 self master vlan 10
+}
+
+offload_indication_vlan_aware_decap_route_test()
+{
+ RET=0
+
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ # Toggle PVID flag on one VxLAN device and make sure route is still
+ # marked as offloaded
+ bridge vlan add vid 10 dev vxlan10 untagged
+
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ # Toggle PVID flag on second VxLAN device and make sure route is no
+ # longer marked as offloaded
+ bridge vlan add vid 20 dev vxlan20 untagged
+
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_fail $?
+
+ # Toggle PVID flag back and make sure route is marked as offloaded
+ bridge vlan add vid 10 dev vxlan10 pvid untagged
+ bridge vlan add vid 20 dev vxlan20 pvid untagged
+
+ ip route show table local | grep 198.51.100.1 | grep -q offload
+ check_err $?
+
+ log_test "vxlan decap route - vni map/unmap"
+}
+
+offload_indication_vlan_aware_join_vxlan_first()
+{
+ ip link add dev br0 up type bridge mcast_snooping 0 \
+ vlan_filtering 1 vlan_default_pvid 1
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ __offload_indication_join_vxlan_first 1
+
+ ip link del dev vxlan0
+ ip link del dev br0
+}
+
+offload_indication_vlan_aware_join_vxlan_last()
+{
+ ip link add dev br0 up type bridge mcast_snooping 0 \
+ vlan_filtering 1 vlan_default_pvid 1
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ __offload_indication_join_vxlan_last
+
+ ip link del dev vxlan0
+ ip link del dev br0
+}
+
+offload_indication_vlan_aware_l3vni_test()
+{
+ local zmac=00:00:00:00:00:00
+
+ RET=0
+
+ sysctl_set net.ipv6.conf.default.disable_ipv6 1
+ ip link add dev br0 up type bridge mcast_snooping 0 \
+ vlan_filtering 1 vlan_default_pvid 0
+ ip link add name vxlan0 up type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ ip link set dev $swp1 master br0
+
+ # The test will use the offload indication on the FDB entry to
+ # understand if the tunnel is offloaded or not
+ bridge fdb append $zmac dev vxlan0 self dst 192.0.2.1
+
+ ip link set dev vxlan0 master br0
+ bridge vlan add dev vxlan0 vid 10 pvid untagged
+
+ # No local port or router port is member in the VLAN, so tunnel should
+ # not be offloaded
+ bridge fdb show brport vxlan0 | grep $zmac | grep self \
+ | grep -q offload
+ check_fail $? "vxlan tunnel offloaded when should not"
+
+ # Configure a VLAN interface and make sure tunnel is offloaded
+ ip link add link br0 name br10 up type vlan id 10
+ sysctl_set net.ipv6.conf.br10.disable_ipv6 0
+ ip -6 address add 2001:db8:1::1/64 dev br10
+ bridge fdb show brport vxlan0 | grep $zmac | grep self \
+ | grep -q offload
+ check_err $? "vxlan tunnel not offloaded when should"
+
+ # Unlink the VXLAN device, make sure tunnel is no longer offloaded,
+ # then add it back to the bridge and make sure it is offloaded
+ ip link set dev vxlan0 nomaster
+ bridge fdb show brport vxlan0 | grep $zmac | grep self \
+ | grep -q offload
+ check_fail $? "vxlan tunnel offloaded after unlinked from bridge"
+
+ ip link set dev vxlan0 master br0
+ bridge fdb show brport vxlan0 | grep $zmac | grep self \
+ | grep -q offload
+ check_fail $? "vxlan tunnel offloaded despite no matching vid"
+
+ bridge vlan add dev vxlan0 vid 10 pvid untagged
+ bridge fdb show brport vxlan0 | grep $zmac | grep self \
+ | grep -q offload
+ check_err $? "vxlan tunnel not offloaded after adding vid"
+
+ log_test "vxlan - l3 vni"
+
+ ip link del dev vxlan0
+ ip link del dev br0
+ sysctl_restore net.ipv6.conf.default.disable_ipv6
+}
+
+offload_indication_vlan_aware_test()
+{
+ offload_indication_vlan_aware_setup_create
+ offload_indication_vlan_aware_fdb_test
+ offload_indication_vlan_aware_decap_route_test
+ offload_indication_vlan_aware_setup_destroy
+
+ log_info "offload indication - replay & cleanup - vlan aware"
+ offload_indication_vlan_aware_join_vxlan_first
+ offload_indication_vlan_aware_join_vxlan_last
+ offload_indication_vlan_aware_l3vni_test
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
new file mode 100755
index 000000000000..fedcb7b35af9
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
@@ -0,0 +1,309 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test VxLAN flooding. The device stores flood records in a singly linked list
+# where each record stores up to three IPv4 addresses of remote VTEPs. The test
+# verifies that packets are correctly flooded in various cases such as deletion
+# of a record in the middle of the list.
+#
+# +--------------------+
+# | H1 (vrf) |
+# | + $h1 |
+# | | 203.0.113.1/24|
+# +----|---------------+
+# |
+# +----|----------------------------------------------------------------------+
+# | SW | |
+# | +--|--------------------------------------------------------------------+ |
+# | | + $swp1 BR0 (802.1d) | |
+# | | | |
+# | | + vxlan0 (vxlan) | |
+# | | local 198.51.100.1 | |
+# | | remote 198.51.100.{2..13} | |
+# | | id 10 dstport 4789 | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | 198.51.100.0/24 via 192.0.2.2 |
+# | |
+# | + $rp1 |
+# | | 192.0.2.1/24 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | R2 (vrf) |
+# | + $rp2 |
+# | 192.0.2.2/24 |
+# | |
+# +-------------------------------------------------------------+
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="flooding_test"
+NUM_NETIFS=4
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 203.0.113.1/24
+}
+
+h1_destroy()
+{
+ simple_if_fini $h1 203.0.113.1/24
+}
+
+switch_create()
+{
+ # Make sure the bridge uses the MAC address of the local port and
+ # not that of the VxLAN's device
+ ip link add dev br0 type bridge mcast_snooping 0
+ ip link set dev br0 address $(mac_get $swp1)
+
+ ip link add name vxlan0 type vxlan id 10 nolearning noudpcsum \
+ ttl 20 tos inherit local 198.51.100.1 dstport 4789
+
+ ip address add 198.51.100.1/32 dev lo
+
+ ip link set dev $swp1 master br0
+ ip link set dev vxlan0 master br0
+
+ ip link set dev br0 up
+ ip link set dev $swp1 up
+ ip link set dev vxlan0 up
+}
+
+switch_destroy()
+{
+ ip link set dev vxlan0 down
+ ip link set dev $swp1 down
+ ip link set dev br0 down
+
+ ip link set dev vxlan0 nomaster
+ ip link set dev $swp1 nomaster
+
+ ip address del 198.51.100.1/32 dev lo
+
+ ip link del dev vxlan0
+
+ ip link del dev br0
+}
+
+router1_create()
+{
+ # This router is in the default VRF, where the VxLAN device is
+ # performing the L3 lookup
+ ip link set dev $rp1 up
+ ip address add 192.0.2.1/24 dev $rp1
+ ip route add 198.51.100.0/24 via 192.0.2.2
+}
+
+router1_destroy()
+{
+ ip route del 198.51.100.0/24 via 192.0.2.2
+ ip address del 192.0.2.1/24 dev $rp1
+ ip link set dev $rp1 down
+}
+
+router2_create()
+{
+ # This router is not in the default VRF, so use simple_if_init()
+ simple_if_init $rp2 192.0.2.2/24
+}
+
+router2_destroy()
+{
+ simple_if_fini $rp2 192.0.2.2/24
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ rp1=${NETIFS[p3]}
+ rp2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+
+ switch_create
+
+ router1_create
+ router2_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router2_destroy
+ router1_destroy
+
+ switch_destroy
+
+ h1_destroy
+
+ vrf_cleanup
+}
+
+flooding_remotes_add()
+{
+ local num_remotes=$1
+ local lsb
+ local i
+
+ for i in $(eval echo {1..$num_remotes}); do
+ lsb=$((i + 1))
+
+ bridge fdb append 00:00:00:00:00:00 dev vxlan0 self \
+ dst 198.51.100.$lsb
+ done
+}
+
+flooding_filters_add()
+{
+ local num_remotes=$1
+ local lsb
+ local i
+
+ tc qdisc add dev $rp2 clsact
+
+ for i in $(eval echo {1..$num_remotes}); do
+ lsb=$((i + 1))
+
+ tc filter add dev $rp2 ingress protocol ip pref $i handle $i \
+ flower ip_proto udp dst_ip 198.51.100.$lsb \
+ dst_port 4789 skip_sw action drop
+ done
+}
+
+flooding_filters_del()
+{
+ local num_remotes=$1
+ local i
+
+ for i in $(eval echo {1..$num_remotes}); do
+ tc filter del dev $rp2 ingress protocol ip pref $i \
+ handle $i flower
+ done
+
+ tc qdisc del dev $rp2 clsact
+}
+
+flooding_check_packets()
+{
+ local packets=("$@")
+ local num_remotes=${#packets[@]}
+ local i
+
+ for i in $(eval echo {1..$num_remotes}); do
+ tc_check_packets "dev $rp2 ingress" $i ${packets[i - 1]}
+ check_err $? "remote $i - did not get expected number of packets"
+ done
+}
+
+flooding_test()
+{
+ # Use 12 remote VTEPs that will be stored in 4 records. The array
+ # 'packets' will store how many packets are expected to be received
+ # by each remote VTEP at each stage of the test
+ declare -a packets=(1 1 1 1 1 1 1 1 1 1 1 1)
+ local num_remotes=12
+
+ RET=0
+
+ # Add FDB entries for remote VTEPs and corresponding tc filters on the
+ # ingress of the nexthop router. These filters will count how many
+ # packets were flooded to each remote VTEP
+ flooding_remotes_add $num_remotes
+ flooding_filters_add $num_remotes
+
+ # Send one packet and make sure it is flooded to all the remote VTEPs
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 1 packet"
+
+ # Delete the third record which corresponds to VTEPs with LSB 8..10
+ # and check that packet is flooded correctly when we remove a record
+ # from the middle of the list
+ RET=0
+
+ packets=(2 2 2 2 2 2 1 1 1 2 2 2)
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.8
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.9
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.10
+
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 2 packets"
+
+ # Delete the first record and make sure the packet is flooded correctly
+ RET=0
+
+ packets=(2 2 2 3 3 3 1 1 1 3 3 3)
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.2
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.3
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.4
+
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 3 packets"
+
+ # Delete the last record and make sure the packet is flooded correctly
+ RET=0
+
+ packets=(2 2 2 4 4 4 1 1 1 3 3 3)
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.11
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.12
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.13
+
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 4 packets"
+
+ # Delete the last record, one entry at a time and make sure single
+ # entries are correctly removed
+ RET=0
+
+ packets=(2 2 2 4 5 5 1 1 1 3 3 3)
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.5
+
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 5 packets"
+
+ RET=0
+
+ packets=(2 2 2 4 5 6 1 1 1 3 3 3)
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.6
+
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 6 packets"
+
+ RET=0
+
+ packets=(2 2 2 4 5 6 1 1 1 3 3 3)
+ bridge fdb del 00:00:00:00:00:00 dev vxlan0 self dst 198.51.100.7
+
+ $MZ $h1 -q -p 64 -b de:ad:be:ef:13:37 -t ip -c 1
+ flooding_check_packets "${packets[@]}"
+ log_test "flood after 7 packets"
+
+ flooding_filters_del $num_remotes
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index ad1eeb14fda7..30996306cabc 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -19,6 +19,7 @@ TEST_GEN_FILES := \
TEST_PROGS := run.sh
top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
include ../../lib.mk
$(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 46648427d537..07f572a1bd3f 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -10,8 +10,6 @@ TEST_PROGS_EXTENDED := gpio-mockup-chardev
GPIODIR := $(realpath ../../../gpio)
GPIOOBJ := gpio-utils.o
-include ../lib.mk
-
all: $(TEST_PROGS_EXTENDED)
override define CLEAN
@@ -19,7 +17,9 @@ override define CLEAN
$(MAKE) -C $(GPIODIR) OUTPUT=$(GPIODIR)/ clean
endef
-$(TEST_PROGS_EXTENDED):| khdr
+KSFT_KHDR_INSTALL := 1
+include ../lib.mk
+
$(TEST_PROGS_EXTENDED): $(GPIODIR)/$(GPIOOBJ)
$(GPIODIR)/$(GPIOOBJ):
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 01a219229238..f9a0e9938480 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,6 +1,7 @@
all:
top_srcdir = ../../../..
+KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
@@ -14,9 +15,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86_64 += x86_64/state_test
TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
+TEST_GEN_PROGS_x86_64 += x86_64/hyperv_cpuid
TEST_GEN_PROGS_x86_64 += dirty_log_test
+TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
+TEST_GEN_PROGS_aarch64 += clear_dirty_log_test
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))
@@ -44,7 +48,6 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
all: $(STATIC_LIBS)
$(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(STATIC_LIBS):| khdr
cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
cscope:
diff --git a/tools/testing/selftests/kvm/clear_dirty_log_test.c b/tools/testing/selftests/kvm/clear_dirty_log_test.c
new file mode 100644
index 000000000000..749336937d37
--- /dev/null
+++ b/tools/testing/selftests/kvm/clear_dirty_log_test.c
@@ -0,0 +1,2 @@
+#define USE_CLEAR_DIRTY_LOG
+#include "dirty_log_test.c"
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index aeff95a91b15..4715cfba20dc 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -51,10 +51,17 @@ static uint64_t random_array[TEST_PAGES_PER_LOOP];
static uint64_t iteration;
/*
- * GPA offset of the testing memory slot. Must be bigger than
- * DEFAULT_GUEST_PHY_PAGES.
+ * Guest physical memory offset of the testing memory slot.
+ * This will be set to the topmost valid physical address minus
+ * the test memory size.
*/
-static uint64_t guest_test_mem = DEFAULT_GUEST_TEST_MEM;
+static uint64_t guest_test_phys_mem;
+
+/*
+ * Guest virtual memory offset of the testing memory slot.
+ * Must not conflict with identity mapped test code.
+ */
+static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
/*
* Continuously write to the first 8 bytes of a random pages within
@@ -66,7 +73,7 @@ static void guest_code(void)
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
- uint64_t addr = guest_test_mem;
+ uint64_t addr = guest_test_virt_mem;
addr += (READ_ONCE(random_array[i]) % guest_num_pages)
* guest_page_size;
addr &= ~(host_page_size - 1);
@@ -209,12 +216,14 @@ static void vm_dirty_log_verify(unsigned long *bmap)
}
static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
- uint64_t extra_mem_pages, void *guest_code)
+ uint64_t extra_mem_pages, void *guest_code,
+ unsigned long type)
{
struct kvm_vm *vm;
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
- vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+ vm = _vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
+ O_RDWR, type);
kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
#ifdef __x86_64__
vm_create_irqchip(vm);
@@ -224,13 +233,14 @@ static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
}
static void run_test(enum vm_guest_mode mode, unsigned long iterations,
- unsigned long interval, bool top_offset)
+ unsigned long interval, uint64_t phys_offset)
{
unsigned int guest_pa_bits, guest_page_shift;
pthread_t vcpu_thread;
struct kvm_vm *vm;
uint64_t max_gfn;
unsigned long *bmap;
+ unsigned long type = 0;
switch (mode) {
case VM_MODE_P52V48_4K:
@@ -241,6 +251,14 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
guest_pa_bits = 52;
guest_page_shift = 16;
break;
+ case VM_MODE_P48V48_4K:
+ guest_pa_bits = 48;
+ guest_page_shift = 12;
+ break;
+ case VM_MODE_P48V48_64K:
+ guest_pa_bits = 48;
+ guest_page_shift = 16;
+ break;
case VM_MODE_P40V48_4K:
guest_pa_bits = 40;
guest_page_shift = 12;
@@ -255,6 +273,19 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
+#ifdef __x86_64__
+ /*
+ * FIXME
+ * The x86_64 kvm selftests framework currently only supports a
+ * single PML4 which restricts the number of physical address
+ * bits we can change to 39.
+ */
+ guest_pa_bits = 39;
+#endif
+#ifdef __aarch64__
+ if (guest_pa_bits != 40)
+ type = KVM_VM_TYPE_ARM_IPA_SIZE(guest_pa_bits);
+#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
/* 1G of guest page sized pages */
@@ -263,31 +294,41 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
- if (top_offset) {
- guest_test_mem = (max_gfn - guest_num_pages) * guest_page_size;
- guest_test_mem &= ~(host_page_size - 1);
+ if (!phys_offset) {
+ guest_test_phys_mem = (max_gfn - guest_num_pages) * guest_page_size;
+ guest_test_phys_mem &= ~(host_page_size - 1);
+ } else {
+ guest_test_phys_mem = phys_offset;
}
- DEBUG("guest test mem offset: 0x%lx\n", guest_test_mem);
+ DEBUG("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
bmap = bitmap_alloc(host_num_pages);
host_bmap_track = bitmap_alloc(host_num_pages);
- vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code);
+ vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code, type);
+
+#ifdef USE_CLEAR_DIRTY_LOG
+ struct kvm_enable_cap cap = {};
+
+ cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT;
+ cap.args[0] = 1;
+ vm_enable_cap(vm, &cap);
+#endif
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- guest_test_mem,
+ guest_test_phys_mem,
TEST_MEM_SLOT_INDEX,
guest_num_pages,
KVM_MEM_LOG_DIRTY_PAGES);
- /* Do 1:1 mapping for the dirty track memory slot */
- virt_map(vm, guest_test_mem, guest_test_mem,
+ /* Do mapping for the dirty track memory slot */
+ virt_map(vm, guest_test_virt_mem, guest_test_phys_mem,
guest_num_pages * guest_page_size, 0);
/* Cache the HVA pointer of the region */
- host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_mem);
+ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
#ifdef __x86_64__
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
@@ -299,7 +340,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
/* Export the shared variables to the guest */
sync_global_to_guest(vm, host_page_size);
sync_global_to_guest(vm, guest_page_size);
- sync_global_to_guest(vm, guest_test_mem);
+ sync_global_to_guest(vm, guest_test_virt_mem);
sync_global_to_guest(vm, guest_num_pages);
/* Start the iterations */
@@ -316,6 +357,10 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
/* Give the vcpu thread some time to dirty some pages */
usleep(interval * 1000);
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+#ifdef USE_CLEAR_DIRTY_LOG
+ kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
+ DIV_ROUND_UP(host_num_pages, 64) * 64);
+#endif
vm_dirty_log_verify(bmap);
iteration++;
sync_global_to_guest(vm, iteration);
@@ -335,23 +380,16 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_free(vm);
}
-static struct vm_guest_modes {
- enum vm_guest_mode mode;
+struct vm_guest_mode_params {
bool supported;
bool enabled;
-} vm_guest_modes[NUM_VM_MODES] = {
-#if defined(__x86_64__)
- { VM_MODE_P52V48_4K, 1, 1, },
- { VM_MODE_P52V48_64K, 0, 0, },
- { VM_MODE_P40V48_4K, 0, 0, },
- { VM_MODE_P40V48_64K, 0, 0, },
-#elif defined(__aarch64__)
- { VM_MODE_P52V48_4K, 0, 0, },
- { VM_MODE_P52V48_64K, 0, 0, },
- { VM_MODE_P40V48_4K, 1, 1, },
- { VM_MODE_P40V48_64K, 1, 1, },
-#endif
};
+struct vm_guest_mode_params vm_guest_mode_params[NUM_VM_MODES];
+
+#define vm_guest_mode_params_init(mode, supported, enabled) \
+({ \
+ vm_guest_mode_params[mode] = (struct vm_guest_mode_params){ supported, enabled }; \
+})
static void help(char *name)
{
@@ -359,25 +397,21 @@ static void help(char *name)
puts("");
printf("usage: %s [-h] [-i iterations] [-I interval] "
- "[-o offset] [-t] [-m mode]\n", name);
+ "[-p offset] [-m mode]\n", name);
puts("");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
TEST_HOST_LOOP_INTERVAL);
- printf(" -o: guest test memory offset (default: 0x%lx)\n",
- DEFAULT_GUEST_TEST_MEM);
- printf(" -t: map guest test memory at the top of the allowed "
- "physical address range\n");
+ printf(" -p: specify guest physical test memory offset\n"
+ " Warning: a low offset can conflict with the loaded test code.\n");
printf(" -m: specify the guest mode ID to test "
"(default: test all supported modes)\n"
" This option may be used multiple times.\n"
" Guest mode IDs:\n");
for (i = 0; i < NUM_VM_MODES; ++i) {
- printf(" %d: %s%s\n",
- vm_guest_modes[i].mode,
- vm_guest_mode_string(vm_guest_modes[i].mode),
- vm_guest_modes[i].supported ? " (supported)" : "");
+ printf(" %d: %s%s\n", i, vm_guest_mode_string(i),
+ vm_guest_mode_params[i].supported ? " (supported)" : "");
}
puts("");
exit(0);
@@ -388,11 +422,34 @@ int main(int argc, char *argv[])
unsigned long iterations = TEST_HOST_LOOP_N;
unsigned long interval = TEST_HOST_LOOP_INTERVAL;
bool mode_selected = false;
- bool top_offset = false;
- unsigned int mode;
+ uint64_t phys_offset = 0;
+ unsigned int mode, host_ipa_limit;
int opt, i;
- while ((opt = getopt(argc, argv, "hi:I:o:tm:")) != -1) {
+#ifdef USE_CLEAR_DIRTY_LOG
+ if (!kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT)) {
+ fprintf(stderr, "KVM_CLEAR_DIRTY_LOG not available, skipping tests\n");
+ exit(KSFT_SKIP);
+ }
+#endif
+
+#ifdef __x86_64__
+ vm_guest_mode_params_init(VM_MODE_P52V48_4K, true, true);
+#endif
+#ifdef __aarch64__
+ vm_guest_mode_params_init(VM_MODE_P40V48_4K, true, true);
+ vm_guest_mode_params_init(VM_MODE_P40V48_64K, true, true);
+
+ host_ipa_limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+ if (host_ipa_limit >= 52)
+ vm_guest_mode_params_init(VM_MODE_P52V48_64K, true, true);
+ if (host_ipa_limit >= 48) {
+ vm_guest_mode_params_init(VM_MODE_P48V48_4K, true, true);
+ vm_guest_mode_params_init(VM_MODE_P48V48_64K, true, true);
+ }
+#endif
+
+ while ((opt = getopt(argc, argv, "hi:I:p:m:")) != -1) {
switch (opt) {
case 'i':
iterations = strtol(optarg, NULL, 10);
@@ -400,22 +457,19 @@ int main(int argc, char *argv[])
case 'I':
interval = strtol(optarg, NULL, 10);
break;
- case 'o':
- guest_test_mem = strtoull(optarg, NULL, 0);
- break;
- case 't':
- top_offset = true;
+ case 'p':
+ phys_offset = strtoull(optarg, NULL, 0);
break;
case 'm':
if (!mode_selected) {
for (i = 0; i < NUM_VM_MODES; ++i)
- vm_guest_modes[i].enabled = 0;
+ vm_guest_mode_params[i].enabled = false;
mode_selected = true;
}
mode = strtoul(optarg, NULL, 10);
TEST_ASSERT(mode < NUM_VM_MODES,
"Guest mode ID %d too big", mode);
- vm_guest_modes[mode].enabled = 1;
+ vm_guest_mode_params[mode].enabled = true;
break;
case 'h':
default:
@@ -426,8 +480,6 @@ int main(int argc, char *argv[])
TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
TEST_ASSERT(interval > 0, "Interval must be greater than zero");
- TEST_ASSERT(!top_offset || guest_test_mem == DEFAULT_GUEST_TEST_MEM,
- "Cannot use both -o [offset] and -t at the same time");
DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
iterations, interval);
@@ -435,13 +487,12 @@ int main(int argc, char *argv[])
srandom(time(0));
for (i = 0; i < NUM_VM_MODES; ++i) {
- if (!vm_guest_modes[i].enabled)
+ if (!vm_guest_mode_params[i].enabled)
continue;
- TEST_ASSERT(vm_guest_modes[i].supported,
+ TEST_ASSERT(vm_guest_mode_params[i].supported,
"Guest mode ID %d (%s) not supported.",
- vm_guest_modes[i].mode,
- vm_guest_mode_string(vm_guest_modes[i].mode));
- run_test(vm_guest_modes[i].mode, iterations, interval, top_offset);
+ i, vm_guest_mode_string(i));
+ run_test(i, iterations, interval, phys_offset);
}
return 0;
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index a4e59e3b4826..a84785b02557 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -36,6 +36,8 @@ typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
enum vm_guest_mode {
VM_MODE_P52V48_4K,
VM_MODE_P52V48_64K,
+ VM_MODE_P48V48_4K,
+ VM_MODE_P48V48_64K,
VM_MODE_P40V48_4K,
VM_MODE_P40V48_64K,
NUM_VM_MODES,
@@ -54,10 +56,14 @@ int kvm_check_cap(long cap);
int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
+struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
+ int perm, unsigned long type);
void kvm_vm_free(struct kvm_vm *vmp);
void kvm_vm_restart(struct kvm_vm *vmp, int perm);
void kvm_vm_release(struct kvm_vm *vmp);
void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
+void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
+ uint64_t first_page, uint32_t num_pages);
int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
size_t len);
@@ -78,6 +84,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
void *arg);
+int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
+ void *arg);
void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index b6022e2f116e..e8c42506a09d 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -268,13 +268,20 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
switch (vm->mode) {
case VM_MODE_P52V48_4K:
- tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
- tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
- break;
+ TEST_ASSERT(false, "AArch64 does not support 4K sized pages "
+ "with 52-bit physical address ranges");
case VM_MODE_P52V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
break;
+ case VM_MODE_P48V48_4K:
+ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
+ break;
+ case VM_MODE_P48V48_64K:
+ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
+ break;
case VM_MODE_P40V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
@@ -305,7 +312,6 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
- fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
- indent, "", pstate, pc);
-
+ fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
+ indent, "", pstate, pc);
}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 1b41e71283d5..23022e9d32eb 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -85,13 +85,13 @@ int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
return ret;
}
-static void vm_open(struct kvm_vm *vm, int perm)
+static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
if (vm->kvm_fd < 0)
exit(KSFT_SKIP);
- vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL);
+ vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
"rc: %i errno: %i", vm->fd, errno);
}
@@ -99,9 +99,13 @@ static void vm_open(struct kvm_vm *vm, int perm)
const char * const vm_guest_mode_string[] = {
"PA-bits:52, VA-bits:48, 4K pages",
"PA-bits:52, VA-bits:48, 64K pages",
+ "PA-bits:48, VA-bits:48, 4K pages",
+ "PA-bits:48, VA-bits:48, 64K pages",
"PA-bits:40, VA-bits:48, 4K pages",
"PA-bits:40, VA-bits:48, 64K pages",
};
+_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
+ "Missing new mode strings?");
/*
* VM Create
@@ -122,7 +126,8 @@ const char * const vm_guest_mode_string[] = {
* descriptor to control the created VM is created with the permissions
* given by perm (e.g. O_RDWR).
*/
-struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
+ int perm, unsigned long type)
{
struct kvm_vm *vm;
int kvm_fd;
@@ -131,22 +136,38 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
TEST_ASSERT(vm != NULL, "Insufficient Memory");
vm->mode = mode;
- vm_open(vm, perm);
+ vm->type = type;
+ vm_open(vm, perm, type);
/* Setup mode specific traits. */
switch (vm->mode) {
case VM_MODE_P52V48_4K:
vm->pgtable_levels = 4;
+ vm->pa_bits = 52;
+ vm->va_bits = 48;
vm->page_size = 0x1000;
vm->page_shift = 12;
- vm->va_bits = 48;
break;
case VM_MODE_P52V48_64K:
vm->pgtable_levels = 3;
vm->pa_bits = 52;
+ vm->va_bits = 48;
vm->page_size = 0x10000;
vm->page_shift = 16;
+ break;
+ case VM_MODE_P48V48_4K:
+ vm->pgtable_levels = 4;
+ vm->pa_bits = 48;
+ vm->va_bits = 48;
+ vm->page_size = 0x1000;
+ vm->page_shift = 12;
+ break;
+ case VM_MODE_P48V48_64K:
+ vm->pgtable_levels = 3;
+ vm->pa_bits = 48;
vm->va_bits = 48;
+ vm->page_size = 0x10000;
+ vm->page_shift = 16;
break;
case VM_MODE_P40V48_4K:
vm->pgtable_levels = 4;
@@ -186,6 +207,11 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
+struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
+{
+ return _vm_create(mode, phy_pages, perm, 0);
+}
+
/*
* VM Restart
*
@@ -203,7 +229,7 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
{
struct userspace_mem_region *region;
- vm_open(vmp, perm);
+ vm_open(vmp, perm, vmp->type);
if (vmp->has_irqchip)
vm_create_irqchip(vmp);
@@ -231,6 +257,19 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
strerror(-ret));
}
+void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
+ uint64_t first_page, uint32_t num_pages)
+{
+ struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
+ .first_page = first_page,
+ .num_pages = num_pages };
+ int ret;
+
+ ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
+ TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
+ strerror(-ret));
+}
+
/*
* Userspace Memory Region Find
*
@@ -1270,14 +1309,24 @@ int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
unsigned long cmd, void *arg)
{
+ int ret;
+
+ ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
+ TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
+ cmd, ret, errno, strerror(errno));
+}
+
+int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
+ unsigned long cmd, void *arg)
+{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
ret = ioctl(vcpu->fd, cmd, arg);
- TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
- cmd, ret, errno, strerror(errno));
+
+ return ret;
}
/*
@@ -1422,7 +1471,7 @@ const char *exit_reason_str(unsigned int exit_reason)
*
* Within the VM specified by vm, locates a range of available physical
* pages at or above paddr_min. If found, the pages are marked as in use
- * and thier base address is returned. A TEST_ASSERT failure occurs if
+ * and their base address is returned. A TEST_ASSERT failure occurs if
* not enough pages are available at or above paddr_min.
*/
vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 52701db0f253..4595e42c6e29 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -44,6 +44,7 @@ struct vcpu {
struct kvm_vm {
int mode;
+ unsigned long type;
int kvm_fd;
int fd;
unsigned int pgtable_levels;
diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c
index 4777f9bb5194..a2ab38be2f47 100644
--- a/tools/testing/selftests/kvm/lib/ucall.c
+++ b/tools/testing/selftests/kvm/lib/ucall.c
@@ -34,7 +34,8 @@ void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
return;
if (type == UCALL_MMIO) {
- vm_paddr_t gpa, start, end, step;
+ vm_paddr_t gpa, start, end, step, offset;
+ unsigned bits;
bool ret;
if (arg) {
@@ -45,25 +46,30 @@ void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
}
/*
- * Find an address within the allowed virtual address space,
- * that does _not_ have a KVM memory region associated with it.
- * Identity mapping an address like this allows the guest to
+ * Find an address within the allowed physical and virtual address
+ * spaces, that does _not_ have a KVM memory region associated with
+ * it. Identity mapping an address like this allows the guest to
* access it, but as KVM doesn't know what to do with it, it
* will assume it's something userspace handles and exit with
* KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
- * Here we start with a guess that the addresses around two
- * thirds of the VA space are unmapped and then work both down
- * and up from there in 1/6 VA space sized steps.
+ * Here we start with a guess that the addresses around 5/8th
+ * of the allowed space are unmapped and then work both down and
+ * up from there in 1/16th allowed space sized steps.
+ *
+ * Note, we need to use VA-bits - 1 when calculating the allowed
+ * virtual address space for an identity mapping because the upper
+ * half of the virtual address space is the two's complement of the
+ * lower and won't match physical addresses.
*/
- start = 1ul << (vm->va_bits * 2 / 3);
- end = 1ul << vm->va_bits;
- step = 1ul << (vm->va_bits / 6);
- for (gpa = start; gpa >= 0; gpa -= step) {
- if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ bits = vm->va_bits - 1;
+ bits = vm->pa_bits < bits ? vm->pa_bits : bits;
+ end = 1ul << bits;
+ start = end * 5 / 8;
+ step = end / 16;
+ for (offset = 0; offset < end - start; offset += step) {
+ if (ucall_mmio_init(vm, start - offset))
return;
- }
- for (gpa = start + step; gpa < end; gpa += step) {
- if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ if (ucall_mmio_init(vm, start + offset))
return;
}
TEST_ASSERT(false, "Can't find a ucall mmio address");
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
index 92c2cfd1b182..ea3c73e8f4f6 100644
--- a/tools/testing/selftests/kvm/x86_64/evmcs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -113,8 +113,8 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
+ "Stage %d: unexpected exit reason: %u (%s),\n",
+ stage, run->exit_reason,
exit_reason_str(run->exit_reason));
memset(&regs1, 0, sizeof(regs1));
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
new file mode 100644
index 000000000000..264425f75806
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for x86 KVM_CAP_HYPERV_CPUID
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+#define VCPU_ID 0
+
+static void guest_code(void)
+{
+}
+
+static void test_hv_cpuid(struct kvm_cpuid2 *hv_cpuid_entries,
+ int evmcs_enabled)
+{
+ int i;
+
+ if (!evmcs_enabled)
+ TEST_ASSERT(hv_cpuid_entries->nent == 6,
+ "KVM_GET_SUPPORTED_HV_CPUID should return 6 entries"
+ " when Enlightened VMCS is disabled (returned %d)",
+ hv_cpuid_entries->nent);
+ else
+ TEST_ASSERT(hv_cpuid_entries->nent == 7,
+ "KVM_GET_SUPPORTED_HV_CPUID should return 7 entries"
+ " when Enlightened VMCS is enabled (returned %d)",
+ hv_cpuid_entries->nent);
+
+ for (i = 0; i < hv_cpuid_entries->nent; i++) {
+ struct kvm_cpuid_entry2 *entry = &hv_cpuid_entries->entries[i];
+
+ TEST_ASSERT((entry->function >= 0x40000000) &&
+ (entry->function <= 0x4000000A),
+ "function %lx is our of supported range",
+ entry->function);
+
+ TEST_ASSERT(entry->index == 0,
+ ".index field should be zero");
+
+ TEST_ASSERT(entry->index == 0,
+ ".index field should be zero");
+
+ TEST_ASSERT(entry->flags == 0,
+ ".flags field should be zero");
+
+ TEST_ASSERT(entry->padding[0] == entry->padding[1]
+ == entry->padding[2] == 0,
+ ".index field should be zero");
+
+ /*
+ * If needed for debug:
+ * fprintf(stdout,
+ * "CPUID%lx EAX=0x%lx EBX=0x%lx ECX=0x%lx EDX=0x%lx\n",
+ * entry->function, entry->eax, entry->ebx, entry->ecx,
+ * entry->edx);
+ */
+ }
+
+}
+
+void test_hv_cpuid_e2big(struct kvm_vm *vm)
+{
+ static struct kvm_cpuid2 cpuid = {.nent = 0};
+ int ret;
+
+ ret = _vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, &cpuid);
+
+ TEST_ASSERT(ret == -1 && errno == E2BIG,
+ "KVM_GET_SUPPORTED_HV_CPUID didn't fail with -E2BIG when"
+ " it should have: %d %d", ret, errno);
+}
+
+
+struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(struct kvm_vm *vm)
+{
+ int nent = 20; /* should be enough */
+ static struct kvm_cpuid2 *cpuid;
+ int ret;
+
+ cpuid = malloc(sizeof(*cpuid) + nent * sizeof(struct kvm_cpuid_entry2));
+
+ if (!cpuid) {
+ perror("malloc");
+ abort();
+ }
+
+ cpuid->nent = nent;
+
+ vcpu_ioctl(vm, VCPU_ID, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+
+ return cpuid;
+}
+
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ int rv;
+ uint16_t evmcs_ver;
+ struct kvm_cpuid2 *hv_cpuid_entries;
+ struct kvm_enable_cap enable_evmcs_cap = {
+ .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ .args[0] = (unsigned long)&evmcs_ver
+ };
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ rv = kvm_check_cap(KVM_CAP_HYPERV_CPUID);
+ if (!rv) {
+ fprintf(stderr,
+ "KVM_CAP_HYPERV_CPUID not supported, skip test\n");
+ exit(KSFT_SKIP);
+ }
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ test_hv_cpuid_e2big(vm);
+
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
+ if (!hv_cpuid_entries)
+ return 1;
+
+ test_hv_cpuid(hv_cpuid_entries, 0);
+
+ free(hv_cpuid_entries);
+
+ vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+ hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
+ if (!hv_cpuid_entries)
+ return 1;
+
+ test_hv_cpuid(hv_cpuid_entries, 1);
+
+ free(hv_cpuid_entries);
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 03da41f0f736..4b3f556265f1 100644
--- a/tools/testing/selftests/kvm/x86_64/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -152,8 +152,8 @@ int main(int argc, char *argv[])
for (stage = 1;; stage++) {
_vcpu_run(vm, VCPU_ID);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
- "Unexpected exit reason: %u (%s),\n",
- run->exit_reason,
+ "Stage %d: unexpected exit reason: %u (%s),\n",
+ stage, run->exit_reason,
exit_reason_str(run->exit_reason));
memset(&regs1, 0, sizeof(regs1));
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 0a8e75886224..8b0f16409ed7 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -16,18 +16,18 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
+ifdef KSFT_KHDR_INSTALL
top_srcdir ?= ../../../..
include $(top_srcdir)/scripts/subarch.include
ARCH ?= $(SUBARCH)
-all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
-
.PHONY: khdr
khdr:
make ARCH=$(ARCH) -C $(top_srcdir) headers_install
-ifdef KSFT_KHDR_INSTALL
-$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+all: khdr $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+else
+all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
endif
.ONESHELL:
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 8cf22b3c2563..6f81130605d7 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -3,6 +3,7 @@ socket
psock_fanout
psock_snd
psock_tpacket
+reuseport_addr_any
reuseport_bpf
reuseport_bpf_cpu
reuseport_bpf_numa
@@ -14,4 +15,5 @@ udpgso_bench_rx
udpgso_bench_tx
tcp_inq
tls
+txring_overwrite
ip_defrag
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 256d82d5fa87..f8f3e90700c0 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -4,13 +4,16 @@
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g
CFLAGS += -I../../../../usr/include/
-TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh
+TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh \
+ rtnetlink.sh xfrm_policy.sh
TEST_PROGS += fib_tests.sh fib-onlink-tests.sh pmtu.sh udpgso.sh ip_defrag.sh
TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh
+TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh
+TEST_PROGS += test_vxlan_fdb_changelink.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket
-TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy
-TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd
+TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
+TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx ip_defrag
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index cd3a2f1545b5..5821bdd98d20 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -14,3 +14,17 @@ CONFIG_IPV6_VTI=y
CONFIG_DUMMY=y
CONFIG_BRIDGE=y
CONFIG_VLAN_8021Q=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_NAT_IPV6=m
+CONFIG_NF_NAT_IPV4=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_IPV6=y
+CONFIG_NF_TABLES_IPV4=y
+CONFIG_NFT_CHAIN_NAT_IPV6=m
+CONFIG_NFT_CHAIN_NAT_IPV4=m
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 85d253546684..3f248d1f5b91 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -15,6 +15,8 @@ PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
PAUSE_ON_CLEANUP=${PAUSE_ON_CLEANUP:=no}
NETIF_TYPE=${NETIF_TYPE:=veth}
NETIF_CREATE=${NETIF_CREATE:=yes}
+MCD=${MCD:=smcrouted}
+MC_CLI=${MC_CLI:=smcroutectl}
relative_path="${BASH_SOURCE%/*}"
if [[ "$relative_path" == "${BASH_SOURCE}" ]]; then
@@ -104,7 +106,7 @@ create_netif_veth()
{
local i
- for i in $(eval echo {1..$NUM_NETIFS}); do
+ for ((i = 1; i <= NUM_NETIFS; ++i)); do
local j=$((i+1))
ip link show dev ${NETIFS[p$i]} &> /dev/null
@@ -135,7 +137,7 @@ if [[ "$NETIF_CREATE" = "yes" ]]; then
create_netif
fi
-for i in $(eval echo {1..$NUM_NETIFS}); do
+for ((i = 1; i <= NUM_NETIFS; ++i)); do
ip link show dev ${NETIFS[p$i]} &> /dev/null
if [[ $? -ne 0 ]]; then
echo "SKIP: could not find all required interfaces"
@@ -477,11 +479,24 @@ master_name_get()
ip -j link show dev $if_name | jq -r '.[]["master"]'
}
+link_stats_get()
+{
+ local if_name=$1; shift
+ local dir=$1; shift
+ local stat=$1; shift
+
+ ip -j -s link show dev $if_name \
+ | jq '.[]["stats64"]["'$dir'"]["'$stat'"]'
+}
+
link_stats_tx_packets_get()
{
- local if_name=$1
+ link_stats_get $1 tx packets
+}
- ip -j -s link show dev $if_name | jq '.[]["stats64"]["tx"]["packets"]'
+link_stats_rx_errors_get()
+{
+ link_stats_get $1 rx errors
}
tc_rule_stats_get()
@@ -783,6 +798,17 @@ multipath_eval()
log_info "Expected ratio $weights_ratio Measured ratio $packets_ratio"
}
+in_ns()
+{
+ local name=$1; shift
+
+ ip netns exec $name bash <<-EOF
+ NUM_NETIFS=0
+ source lib.sh
+ $(for a in "$@"; do printf "%q${IFS:0:1}" "$a"; done)
+ EOF
+}
+
##############################################################################
# Tests
@@ -790,10 +816,11 @@ ping_do()
{
local if_name=$1
local dip=$2
+ local args=$3
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name $PING $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
}
ping_test()
@@ -802,17 +829,18 @@ ping_test()
ping_do $1 $2
check_err $?
- log_test "ping"
+ log_test "ping$3"
}
ping6_do()
{
local if_name=$1
local dip=$2
+ local args=$3
local vrf_name
vrf_name=$(master_name_get $if_name)
- ip vrf exec $vrf_name $PING6 $dip -c 10 -i 0.1 -w 2 &> /dev/null
+ ip vrf exec $vrf_name $PING6 $args $dip -c 10 -i 0.1 -w 2 &> /dev/null
}
ping6_test()
@@ -821,7 +849,7 @@ ping6_test()
ping6_do $1 $2
check_err $?
- log_test "ping6"
+ log_test "ping6$3"
}
learning_test()
diff --git a/tools/testing/selftests/net/forwarding/router_multicast.sh b/tools/testing/selftests/net/forwarding/router_multicast.sh
new file mode 100755
index 000000000000..109e6d785169
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_multicast.sh
@@ -0,0 +1,311 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +------------------+
+# | H1 (v$h1) |
+# | 2001:db8:1::2/64 |
+# | 198.51.100.2/28 |
+# | $h1 + |
+# +-------------|----+
+# |
+# +-------------|-------------------------------+
+# | SW1 | |
+# | $rp1 + |
+# | 198.51.100.1/28 |
+# | 2001:db8:1::1/64 |
+# | |
+# | 2001:db8:2::1/64 2001:db8:3::1/64 |
+# | 198.51.100.17/28 198.51.100.33/28 |
+# | $rp2 + $rp3 + |
+# +--------------|--------------------------|---+
+# | |
+# | |
+# +--------------|---+ +--------------|---+
+# | H2 (v$h2) | | | H3 (v$h3) | |
+# | $h2 + | | $h3 + |
+# | 198.51.100.18/28 | | 198.51.100.34/28 |
+# | 2001:db8:2::2/64 | | 2001:db8:3::2/64 |
+# +------------------+ +------------------+
+#
+
+ALL_TESTS="mcast_v4 mcast_v6"
+NUM_NETIFS=6
+source lib.sh
+source tc_common.sh
+
+require_command $MCD
+require_command $MC_CLI
+table_name=selftests
+
+h1_create()
+{
+ simple_if_init $h1 198.51.100.2/28 2001:db8:1::2/64
+
+ ip route add 198.51.100.16/28 vrf v$h1 nexthop via 198.51.100.1
+ ip route add 198.51.100.32/28 vrf v$h1 nexthop via 198.51.100.1
+
+ ip route add 2001:db8:2::/64 vrf v$h1 nexthop via 2001:db8:1::1
+ ip route add 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::1
+}
+
+h1_destroy()
+{
+ ip route del 2001:db8:3::/64 vrf v$h1
+ ip route del 2001:db8:2::/64 vrf v$h1
+
+ ip route del 198.51.100.32/28 vrf v$h1
+ ip route del 198.51.100.16/28 vrf v$h1
+
+ simple_if_fini $h1 198.51.100.2/28 2001:db8:1::2/64
+}
+
+h2_create()
+{
+ simple_if_init $h2 198.51.100.18/28 2001:db8:2::2/64
+
+ ip route add 198.51.100.0/28 vrf v$h2 nexthop via 198.51.100.17
+ ip route add 198.51.100.32/28 vrf v$h2 nexthop via 198.51.100.17
+
+ ip route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:2::1
+ ip route add 2001:db8:3::/64 vrf v$h2 nexthop via 2001:db8:2::1
+
+ tc qdisc add dev $h2 ingress
+}
+
+h2_destroy()
+{
+ tc qdisc del dev $h2 ingress
+
+ ip route del 2001:db8:3::/64 vrf v$h2
+ ip route del 2001:db8:1::/64 vrf v$h2
+
+ ip route del 198.51.100.32/28 vrf v$h2
+ ip route del 198.51.100.0/28 vrf v$h2
+
+ simple_if_fini $h2 198.51.100.18/28 2001:db8:2::2/64
+}
+
+h3_create()
+{
+ simple_if_init $h3 198.51.100.34/28 2001:db8:3::2/64
+
+ ip route add 198.51.100.0/28 vrf v$h3 nexthop via 198.51.100.33
+ ip route add 198.51.100.16/28 vrf v$h3 nexthop via 198.51.100.33
+
+ ip route add 2001:db8:1::/64 vrf v$h3 nexthop via 2001:db8:3::1
+ ip route add 2001:db8:2::/64 vrf v$h3 nexthop via 2001:db8:3::1
+
+ tc qdisc add dev $h3 ingress
+}
+
+h3_destroy()
+{
+ tc qdisc del dev $h3 ingress
+
+ ip route del 2001:db8:2::/64 vrf v$h3
+ ip route del 2001:db8:1::/64 vrf v$h3
+
+ ip route del 198.51.100.16/28 vrf v$h3
+ ip route del 198.51.100.0/28 vrf v$h3
+
+ simple_if_fini $h3 198.51.100.34/28 2001:db8:3::2/64
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link set dev $rp2 up
+ ip link set dev $rp3 up
+
+ ip address add 198.51.100.1/28 dev $rp1
+ ip address add 198.51.100.17/28 dev $rp2
+ ip address add 198.51.100.33/28 dev $rp3
+
+ ip address add 2001:db8:1::1/64 dev $rp1
+ ip address add 2001:db8:2::1/64 dev $rp2
+ ip address add 2001:db8:3::1/64 dev $rp3
+}
+
+router_destroy()
+{
+ ip address del 2001:db8:3::1/64 dev $rp3
+ ip address del 2001:db8:2::1/64 dev $rp2
+ ip address del 2001:db8:1::1/64 dev $rp1
+
+ ip address del 198.51.100.33/28 dev $rp3
+ ip address del 198.51.100.17/28 dev $rp2
+ ip address del 198.51.100.1/28 dev $rp1
+
+ ip link set dev $rp3 down
+ ip link set dev $rp2 down
+ ip link set dev $rp1 down
+}
+
+start_mcd()
+{
+ SMCROUTEDIR="$(mktemp -d)"
+
+ for ((i = 1; i <= $NUM_NETIFS; ++i)); do
+ echo "phyint ${NETIFS[p$i]} enable" >> \
+ $SMCROUTEDIR/$table_name.conf
+ done
+
+ $MCD -N -I $table_name -f $SMCROUTEDIR/$table_name.conf \
+ -P $SMCROUTEDIR/$table_name.pid
+}
+
+kill_mcd()
+{
+ pkill $MCD
+ rm -rf $SMCROUTEDIR
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp3=${NETIFS[p5]}
+ h3=${NETIFS[p6]}
+
+ start_mcd
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ h3_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h3_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+
+ kill_mcd
+}
+
+create_mcast_sg()
+{
+ local if_name=$1; shift
+ local s_addr=$1; shift
+ local mcast=$1; shift
+ local dest_ifs=${@}
+
+ $MC_CLI -I $table_name add $if_name $s_addr $mcast $dest_ifs
+}
+
+delete_mcast_sg()
+{
+ local if_name=$1; shift
+ local s_addr=$1; shift
+ local mcast=$1; shift
+ local dest_ifs=${@}
+
+ $MC_CLI -I $table_name remove $if_name $s_addr $mcast $dest_ifs
+}
+
+mcast_v4()
+{
+ # Add two interfaces to an MC group, send a packet to the MC group and
+ # verify packets are received on both. Then delete the route and verify
+ # packets are no longer received.
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 122 flower \
+ dst_ip 225.1.2.3 action drop
+ tc filter add dev $h3 ingress protocol ip pref 1 handle 133 flower \
+ dst_ip 225.1.2.3 action drop
+
+ create_mcast_sg $rp1 198.51.100.2 225.1.2.3 $rp2 $rp3
+
+ # Send frames with the corresponding L2 destination address.
+ $MZ $h1 -c 5 -p 128 -t udp -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+ -A 198.51.100.2 -B 225.1.2.3 -q
+
+ tc_check_packets "dev $h2 ingress" 122 5
+ check_err $? "Multicast not received on first host"
+ tc_check_packets "dev $h3 ingress" 133 5
+ check_err $? "Multicast not received on second host"
+
+ delete_mcast_sg $rp1 198.51.100.2 225.1.2.3 $rp2 $rp3
+
+ $MZ $h1 -c 5 -p 128 -t udp -a 00:11:22:33:44:55 -b 01:00:5e:01:02:03 \
+ -A 198.51.100.2 -B 225.1.2.3 -q
+
+ tc_check_packets "dev $h2 ingress" 122 5
+ check_err $? "Multicast received on host although deleted"
+ tc_check_packets "dev $h3 ingress" 133 5
+ check_err $? "Multicast received on second host although deleted"
+
+ tc filter del dev $h3 ingress protocol ip pref 1 handle 133 flower
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 122 flower
+
+ log_test "mcast IPv4"
+}
+
+mcast_v6()
+{
+ # Add two interfaces to an MC group, send a packet to the MC group and
+ # verify packets are received on both. Then delete the route and verify
+ # packets are no longer received.
+
+ RET=0
+
+ tc filter add dev $h2 ingress protocol ipv6 pref 1 handle 122 flower \
+ dst_ip ff0e::3 action drop
+ tc filter add dev $h3 ingress protocol ipv6 pref 1 handle 133 flower \
+ dst_ip ff0e::3 action drop
+
+ create_mcast_sg $rp1 2001:db8:1::2 ff0e::3 $rp2 $rp3
+
+ # Send frames with the corresponding L2 destination address.
+ $MZ $h1 -6 -c 5 -p 128 -t udp -a 00:11:22:33:44:55 \
+ -b 33:33:00:00:00:03 -A 2001:db8:1::2 -B ff0e::3 -q
+
+ tc_check_packets "dev $h2 ingress" 122 5
+ check_err $? "Multicast not received on first host"
+ tc_check_packets "dev $h3 ingress" 133 5
+ check_err $? "Multicast not received on second host"
+
+ delete_mcast_sg $rp1 2001:db8:1::2 ff0e::3 $rp2 $rp3
+
+ $MZ $h1 -6 -c 5 -p 128 -t udp -a 00:11:22:33:44:55 \
+ -b 33:33:00:00:00:03 -A 2001:db8:1::2 -B ff0e::3 -q
+
+ tc_check_packets "dev $h2 ingress" 122 5
+ check_err $? "Multicast received on first host although deleted"
+ tc_check_packets "dev $h3 ingress" 133 5
+ check_err $? "Multicast received on second host although deleted"
+
+ tc filter del dev $h3 ingress protocol ipv6 pref 1 handle 133 flower
+ tc filter del dev $h2 ingress protocol ipv6 pref 1 handle 122 flower
+
+ log_test "mcast IPv6"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/router_vid_1.sh b/tools/testing/selftests/net/forwarding/router_vid_1.sh
new file mode 100755
index 000000000000..a7306c7ac06d
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/router_vid_1.sh
@@ -0,0 +1,135 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="ping_ipv4 ping_ipv6"
+NUM_NETIFS=4
+source lib.sh
+
+h1_create()
+{
+ vrf_create "vrf-h1"
+ ip link set dev vrf-h1 up
+
+ ip link set dev $h1 up
+ vlan_create $h1 1 vrf-h1 192.0.2.2/24 2001:db8:1::2/64
+
+ ip route add 198.51.100.0/24 vrf vrf-h1 nexthop via 192.0.2.1
+ ip route add 2001:db8:2::/64 vrf vrf-h1 nexthop via 2001:db8:1::1
+}
+
+h1_destroy()
+{
+ ip route del 2001:db8:2::/64 vrf vrf-h1
+ ip route del 198.51.100.0/24 vrf vrf-h1
+
+ vlan_destroy $h1 1
+ ip link set dev $h1 down
+
+ ip link set dev vrf-h1 down
+ vrf_destroy "vrf-h1"
+}
+
+h2_create()
+{
+ vrf_create "vrf-h2"
+ ip link set dev vrf-h2 up
+
+ ip link set dev $h2 up
+ vlan_create $h2 1 vrf-h2 198.51.100.2/24 2001:db8:2::2/64
+
+ ip route add 192.0.2.0/24 vrf vrf-h2 nexthop via 198.51.100.1
+ ip route add 2001:db8:1::/64 vrf vrf-h2 nexthop via 2001:db8:2::1
+}
+
+h2_destroy()
+{
+ ip route del 2001:db8:1::/64 vrf vrf-h2
+ ip route del 192.0.2.0/24 vrf vrf-h2
+
+ vlan_destroy $h2 1
+ ip link set dev $h2 down
+
+ ip link set dev vrf-h2 down
+ vrf_destroy "vrf-h2"
+}
+
+router_create()
+{
+ ip link set dev $rp1 up
+ ip link add link $rp1 name $rp1.1 up type vlan id 1
+
+ ip address add 192.0.2.1/24 dev $rp1.1
+ ip address add 2001:db8:1::1/64 dev $rp1.1
+
+ ip link set dev $rp2 up
+ ip link add link $rp2 name $rp2.1 up type vlan id 1
+
+ ip address add 198.51.100.1/24 dev $rp2.1
+ ip address add 2001:db8:2::1/64 dev $rp2.1
+}
+
+router_destroy()
+{
+ ip address del 2001:db8:2::1/64 dev $rp2.1
+ ip address del 198.51.100.1/24 dev $rp2.1
+
+ ip link del dev $rp2.1
+ ip link set dev $rp2 down
+
+ ip address del 2001:db8:1::1/64 dev $rp1.1
+ ip address del 192.0.2.1/24 dev $rp1.1
+
+ ip link del dev $rp1.1
+ ip link set dev $rp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ rp1=${NETIFS[p2]}
+
+ rp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+
+ router_create
+
+ forwarding_enable
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ forwarding_restore
+
+ router_destroy
+
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1.1 198.51.100.2
+}
+
+ping_ipv6()
+{
+ ping6_test $h1.1 2001:db8:2::2
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
new file mode 100755
index 000000000000..56cef3b1c194
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -0,0 +1,786 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +--------------------+ +----------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | + $h1 | | + $h2 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 |
+# +----|---------------+ +--|-------------------+
+# | |
+# +----|--------------------------------------------------|-------------------+
+# | SW | | |
+# | +--|--------------------------------------------------|-----------------+ |
+# | | + $swp1 BR1 (802.1d) + $swp2 | |
+# | | | |
+# | | + vx1 (vxlan) | |
+# | | local 192.0.2.17 | |
+# | | remote 192.0.2.34 192.0.2.50 | |
+# | | id 1000 dstport $VXPORT | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | 192.0.2.32/28 via 192.0.2.18 |
+# | 192.0.2.48/28 via 192.0.2.18 |
+# | |
+# | + $rp1 |
+# | | 192.0.2.17/28 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | VRP2 (vrf) |
+# | + $rp2 |
+# | 192.0.2.18/28 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | + v1 (veth) + v3 (veth) |
+# | | 192.0.2.33/28 | 192.0.2.49/28 |
+# +----|---------------------------------------|----------------+
+# | |
+# +----|------------------------------+ +----|------------------------------+
+# | + v2 (veth) NS1 (netns) | | + v4 (veth) NS2 (netns) |
+# | 192.0.2.34/28 | | 192.0.2.50/28 |
+# | | | |
+# | 192.0.2.16/28 via 192.0.2.33 | | 192.0.2.16/28 via 192.0.2.49 |
+# | 192.0.2.50/32 via 192.0.2.33 | | 192.0.2.34/32 via 192.0.2.49 |
+# | | | |
+# | +-------------------------------+ | | +-------------------------------+ |
+# | | BR2 (802.1d) | | | | BR2 (802.1d) | |
+# | | + vx2 (vxlan) | | | | + vx2 (vxlan) | |
+# | | local 192.0.2.34 | | | | local 192.0.2.50 | |
+# | | remote 192.0.2.17 | | | | remote 192.0.2.17 | |
+# | | remote 192.0.2.50 | | | | remote 192.0.2.34 | |
+# | | id 1000 dstport $VXPORT | | | | id 1000 dstport $VXPORT | |
+# | | | | | | | |
+# | | + w1 (veth) | | | | + w1 (veth) | |
+# | +--|----------------------------+ | | +--|----------------------------+ |
+# | | | | | |
+# | +--|----------------------------+ | | +--|----------------------------+ |
+# | | | VW2 (vrf) | | | | | VW2 (vrf) | |
+# | | + w2 (veth) | | | | + w2 (veth) | |
+# | | 192.0.2.3/28 | | | | 192.0.2.4/28 | |
+# | +-------------------------------+ | | +-------------------------------+ |
+# +-----------------------------------+ +-----------------------------------+
+
+: ${VXPORT:=4789}
+export VXPORT
+
+: ${ALL_TESTS:="
+ ping_ipv4
+ test_flood
+ test_unicast
+ test_ttl
+ test_tos
+ test_ecn_encap
+ test_ecn_decap
+ reapply_config
+ ping_ipv4
+ test_flood
+ test_unicast
+ test_learning
+ "}
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1 192.0.2.1/28
+ tc qdisc add dev $h1 clsact
+}
+
+h1_destroy()
+{
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+ simple_if_init $h2 192.0.2.2/28
+ tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2 192.0.2.2/28
+}
+
+rp1_set_addr()
+{
+ ip address add dev $rp1 192.0.2.17/28
+
+ ip route add 192.0.2.32/28 nexthop via 192.0.2.18
+ ip route add 192.0.2.48/28 nexthop via 192.0.2.18
+}
+
+rp1_unset_addr()
+{
+ ip route del 192.0.2.48/28 nexthop via 192.0.2.18
+ ip route del 192.0.2.32/28 nexthop via 192.0.2.18
+
+ ip address del dev $rp1 192.0.2.17/28
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 0 mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ rp1_set_addr
+
+ ip link add name vx1 type vxlan id 1000 \
+ local 192.0.2.17 dstport "$VXPORT" \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx1 up
+
+ ip link set dev vx1 master br1
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+
+ bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+}
+
+switch_destroy()
+{
+ rp1_unset_addr
+ ip link set dev $rp1 down
+
+ bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+
+ ip link set dev vx1 nomaster
+ ip link set dev vx1 down
+ ip link del dev vx1
+
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+vrp2_create()
+{
+ simple_if_init $rp2 192.0.2.18/28
+ __simple_if_init v1 v$rp2 192.0.2.33/28
+ __simple_if_init v3 v$rp2 192.0.2.49/28
+ tc qdisc add dev v1 clsact
+}
+
+vrp2_destroy()
+{
+ tc qdisc del dev v1 clsact
+ __simple_if_fini v3 192.0.2.49/28
+ __simple_if_fini v1 192.0.2.33/28
+ simple_if_fini $rp2 192.0.2.18/28
+}
+
+ns_init_common()
+{
+ local in_if=$1; shift
+ local in_addr=$1; shift
+ local other_in_addr=$1; shift
+ local nh_addr=$1; shift
+ local host_addr=$1; shift
+
+ ip link set dev $in_if up
+ ip address add dev $in_if $in_addr/28
+ tc qdisc add dev $in_if clsact
+
+ ip link add name br2 type bridge vlan_filtering 0
+ ip link set dev br2 up
+
+ ip link add name w1 type veth peer name w2
+
+ ip link set dev w1 master br2
+ ip link set dev w1 up
+
+ ip link add name vx2 type vxlan id 1000 local $in_addr dstport "$VXPORT"
+ ip link set dev vx2 up
+ bridge fdb append dev vx2 00:00:00:00:00:00 dst 192.0.2.17 self
+ bridge fdb append dev vx2 00:00:00:00:00:00 dst $other_in_addr self
+
+ ip link set dev vx2 master br2
+ tc qdisc add dev vx2 clsact
+
+ simple_if_init w2 $host_addr/28
+
+ ip route add 192.0.2.16/28 nexthop via $nh_addr
+ ip route add $other_in_addr/32 nexthop via $nh_addr
+}
+export -f ns_init_common
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 \
+ ns_init_common v2 192.0.2.34 192.0.2.50 192.0.2.33 192.0.2.3
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+ns2_create()
+{
+ ip netns add ns2
+ ip link set dev v4 netns ns2
+ in_ns ns2 \
+ ns_init_common v4 192.0.2.50 192.0.2.34 192.0.2.49 192.0.2.4
+}
+
+ns2_destroy()
+{
+ ip netns exec ns2 ip link set dev v4 netns 1
+ ip netns del ns2
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ ip link add name v3 type veth peer name v4
+ vrp2_create
+ ns1_create
+ ns2_create
+
+ r1_mac=$(in_ns ns1 mac_get w2)
+ r2_mac=$(in_ns ns2 mac_get w2)
+ h2_mac=$(mac_get $h2)
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns2_destroy
+ ns1_destroy
+ vrp2_destroy
+ ip link del dev v3
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+# For the first round of tests, vx1 is the first device to get attached to the
+# bridge, and that at the point that the local IP is already configured. Try the
+# other scenario of attaching the device to an already-offloaded bridge, and
+# only then attach the local IP.
+reapply_config()
+{
+ echo "Reapplying configuration"
+
+ bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+ rp1_unset_addr
+ ip link set dev vx1 nomaster
+ sleep 5
+
+ ip link set dev vx1 master br1
+ bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx1 00:00:00:00:00:00 dst 192.0.2.50 self
+ sleep 1
+ rp1_set_addr
+ sleep 5
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.2 ": local->local"
+ ping_test $h1 192.0.2.3 ": local->remote 1"
+ ping_test $h1 192.0.2.4 ": local->remote 2"
+}
+
+maybe_in_ns()
+{
+ echo ${1:+in_ns} $1
+}
+
+__flood_counter_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+ local ns=$1; shift
+
+ # Putting the ICMP capture both to HW and to SW will end up
+ # double-counting the packets that are trapped to slow path, such as for
+ # the unicast test. Adding either skip_hw or skip_sw fixes this problem,
+ # but with skip_hw, the flooded packets are not counted at all, because
+ # those are dropped due to MAC address mismatch; and skip_sw is a no-go
+ # for veth-based topologies.
+ #
+ # So try to install with skip_sw and fall back to skip_sw if that fails.
+
+ $(maybe_in_ns $ns) __icmp_capture_add_del \
+ $add_del 100 "" $dev skip_sw 2>/dev/null || \
+ $(maybe_in_ns $ns) __icmp_capture_add_del \
+ $add_del 100 "" $dev skip_hw
+}
+
+flood_counter_install()
+{
+ __flood_counter_add_del add "$@"
+}
+
+flood_counter_uninstall()
+{
+ __flood_counter_add_del del "$@"
+}
+
+flood_fetch_stat()
+{
+ local dev=$1; shift
+ local ns=$1; shift
+
+ $(maybe_in_ns $ns) tc_rule_stats_get $dev 100 ingress
+}
+
+flood_fetch_stats()
+{
+ local counters=("${@}")
+ local counter
+
+ for counter in "${counters[@]}"; do
+ flood_fetch_stat $counter
+ done
+}
+
+vxlan_flood_test()
+{
+ local mac=$1; shift
+ local dst=$1; shift
+ local -a expects=("${@}")
+
+ local -a counters=($h2 "vx2 ns1" "vx2 ns2")
+ local counter
+ local key
+
+ for counter in "${counters[@]}"; do
+ flood_counter_install $counter
+ done
+
+ local -a t0s=($(flood_fetch_stats "${counters[@]}"))
+ $MZ $h1 -c 10 -d 100msec -p 64 -b $mac -B $dst -t icmp -q
+ sleep 1
+ local -a t1s=($(flood_fetch_stats "${counters[@]}"))
+
+ for key in ${!t0s[@]}; do
+ local delta=$((t1s[$key] - t0s[$key]))
+ local expect=${expects[$key]}
+
+ ((expect == delta))
+ check_err $? "${counters[$key]}: Expected to capture $expect packets, got $delta."
+ done
+
+ for counter in "${counters[@]}"; do
+ flood_counter_uninstall $counter
+ done
+}
+
+__test_flood()
+{
+ local mac=$1; shift
+ local dst=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ vxlan_flood_test $mac $dst 10 10 10
+
+ log_test "VXLAN: $what"
+}
+
+test_flood()
+{
+ __test_flood de:ad:be:ef:13:37 192.0.2.100 "flood"
+}
+
+vxlan_fdb_add_del()
+{
+ local add_del=$1; shift
+ local mac=$1; shift
+ local dev=$1; shift
+ local dst=$1; shift
+
+ bridge fdb $add_del dev $dev $mac self static permanent \
+ ${dst:+dst} $dst 2>/dev/null
+ bridge fdb $add_del dev $dev $mac master static 2>/dev/null
+}
+
+__test_unicast()
+{
+ local mac=$1; shift
+ local dst=$1; shift
+ local hit_idx=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ local -a expects=(0 0 0)
+ expects[$hit_idx]=10
+
+ vxlan_flood_test $mac $dst "${expects[@]}"
+
+ log_test "VXLAN: $what"
+}
+
+test_unicast()
+{
+ local -a targets=("$h2_mac $h2"
+ "$r1_mac vx1 192.0.2.34"
+ "$r2_mac vx1 192.0.2.50")
+ local target
+
+ for target in "${targets[@]}"; do
+ vxlan_fdb_add_del add $target
+ done
+
+ __test_unicast $h2_mac 192.0.2.2 0 "local MAC unicast"
+ __test_unicast $r1_mac 192.0.2.3 1 "remote MAC 1 unicast"
+ __test_unicast $r2_mac 192.0.2.4 2 "remote MAC 2 unicast"
+
+ for target in "${targets[@]}"; do
+ vxlan_fdb_add_del del $target
+ done
+}
+
+vxlan_ping_test()
+{
+ local ping_dev=$1; shift
+ local ping_dip=$1; shift
+ local ping_args=$1; shift
+ local capture_dev=$1; shift
+ local capture_dir=$1; shift
+ local capture_pref=$1; shift
+ local expect=$1; shift
+
+ local t0=$(tc_rule_stats_get $capture_dev $capture_pref $capture_dir)
+ ping_do $ping_dev $ping_dip "$ping_args"
+ local t1=$(tc_rule_stats_get $capture_dev $capture_pref $capture_dir)
+ local delta=$((t1 - t0))
+
+ # Tolerate a couple stray extra packets.
+ ((expect <= delta && delta <= expect + 2))
+ check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
+}
+
+test_ttl()
+{
+ RET=0
+
+ tc filter add dev v1 egress pref 77 prot ip \
+ flower ip_ttl 99 action pass
+ vxlan_ping_test $h1 192.0.2.3 "" v1 egress 77 10
+ tc filter del dev v1 egress pref 77 prot ip
+
+ log_test "VXLAN: envelope TTL"
+}
+
+test_tos()
+{
+ RET=0
+
+ tc filter add dev v1 egress pref 77 prot ip \
+ flower ip_tos 0x40 action pass
+ vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10
+ vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0
+ tc filter del dev v1 egress pref 77 prot ip
+
+ log_test "VXLAN: envelope TOS inheritance"
+}
+
+__test_ecn_encap()
+{
+ local q=$1; shift
+ local tos=$1; shift
+
+ RET=0
+
+ tc filter add dev v1 egress pref 77 prot ip \
+ flower ip_tos $tos action pass
+ sleep 1
+ vxlan_ping_test $h1 192.0.2.3 "-Q $q" v1 egress 77 10
+ tc filter del dev v1 egress pref 77 prot ip
+
+ log_test "VXLAN: ECN encap: $q->$tos"
+}
+
+test_ecn_encap()
+{
+ # In accordance with INET_ECN_encapsulate()
+ __test_ecn_encap 0x00 0x00
+ __test_ecn_encap 0x01 0x01
+ __test_ecn_encap 0x02 0x02
+ __test_ecn_encap 0x03 0x02
+}
+
+vxlan_encapped_ping_do()
+{
+ local count=$1; shift
+ local dev=$1; shift
+ local next_hop_mac=$1; shift
+ local dest_ip=$1; shift
+ local dest_mac=$1; shift
+ local inner_tos=$1; shift
+ local outer_tos=$1; shift
+
+ $MZ $dev -c $count -d 100msec -q \
+ -b $next_hop_mac -B $dest_ip \
+ -t udp tos=$outer_tos,sp=23456,dp=$VXPORT,p=$(:
+ )"08:"$( : VXLAN flags
+ )"00:00:00:"$( : VXLAN reserved
+ )"00:03:e8:"$( : VXLAN VNI
+ )"00:"$( : VXLAN reserved
+ )"$dest_mac:"$( : ETH daddr
+ )"$(mac_get w2):"$( : ETH saddr
+ )"08:00:"$( : ETH type
+ )"45:"$( : IP version + IHL
+ )"$inner_tos:"$( : IP TOS
+ )"00:54:"$( : IP total length
+ )"99:83:"$( : IP identification
+ )"40:00:"$( : IP flags + frag off
+ )"40:"$( : IP TTL
+ )"01:"$( : IP proto
+ )"00:00:"$( : IP header csum
+ )"c0:00:02:03:"$( : IP saddr: 192.0.2.3
+ )"c0:00:02:01:"$( : IP daddr: 192.0.2.1
+ )"08:"$( : ICMP type
+ )"00:"$( : ICMP code
+ )"8b:f2:"$( : ICMP csum
+ )"1f:6a:"$( : ICMP request identifier
+ )"00:01:"$( : ICMP request sequence number
+ )"4f:ff:c5:5b:00:00:00:00:"$( : ICMP payload
+ )"6d:74:0b:00:00:00:00:00:"$( :
+ )"10:11:12:13:14:15:16:17:"$( :
+ )"18:19:1a:1b:1c:1d:1e:1f:"$( :
+ )"20:21:22:23:24:25:26:27:"$( :
+ )"28:29:2a:2b:2c:2d:2e:2f:"$( :
+ )"30:31:32:33:34:35:36:37"
+}
+export -f vxlan_encapped_ping_do
+
+vxlan_encapped_ping_test()
+{
+ local ping_dev=$1; shift
+ local nh_dev=$1; shift
+ local ping_dip=$1; shift
+ local inner_tos=$1; shift
+ local outer_tos=$1; shift
+ local stat_get=$1; shift
+ local expect=$1; shift
+
+ local t0=$($stat_get)
+
+ in_ns ns1 \
+ vxlan_encapped_ping_do 10 $ping_dev $(mac_get $nh_dev) \
+ $ping_dip $(mac_get $h1) \
+ $inner_tos $outer_tos
+
+ local t1=$($stat_get)
+ local delta=$((t1 - t0))
+
+ # Tolerate a couple stray extra packets.
+ ((expect <= delta && delta <= expect + 2))
+ check_err $? "Expected to capture $expect packets, got $delta."
+}
+export -f vxlan_encapped_ping_test
+
+__test_ecn_decap()
+{
+ local orig_inner_tos=$1; shift
+ local orig_outer_tos=$1; shift
+ local decapped_tos=$1; shift
+
+ RET=0
+
+ tc filter add dev $h1 ingress pref 77 prot ip \
+ flower ip_tos $decapped_tos action pass
+ sleep 1
+ vxlan_encapped_ping_test v2 v1 192.0.2.17 \
+ $orig_inner_tos $orig_outer_tos \
+ "tc_rule_stats_get $h1 77 ingress" 10
+ tc filter del dev $h1 ingress pref 77
+
+ log_test "VXLAN: ECN decap: $orig_outer_tos/$orig_inner_tos->$decapped_tos"
+}
+
+test_ecn_decap_error()
+{
+ local orig_inner_tos=00
+ local orig_outer_tos=03
+
+ RET=0
+
+ vxlan_encapped_ping_test v2 v1 192.0.2.17 \
+ $orig_inner_tos $orig_outer_tos \
+ "link_stats_rx_errors_get vx1" 10
+
+ log_test "VXLAN: ECN decap: $orig_outer_tos/$orig_inner_tos->error"
+}
+
+test_ecn_decap()
+{
+ # In accordance with INET_ECN_decapsulate()
+ __test_ecn_decap 00 00 0x00
+ __test_ecn_decap 01 01 0x01
+ __test_ecn_decap 02 01 0x02
+ __test_ecn_decap 01 03 0x03
+ __test_ecn_decap 02 03 0x03
+ test_ecn_decap_error
+}
+
+test_learning()
+{
+ local mac=de:ad:be:ef:13:37
+ local dst=192.0.2.100
+
+ # Enable learning on the VxLAN device and set ageing time to 10 seconds
+ ip link set dev br1 type bridge ageing_time 1000
+ ip link set dev vx1 type vxlan ageing 10
+ ip link set dev vx1 type vxlan learning
+ reapply_config
+
+ # Check that flooding works
+ RET=0
+
+ vxlan_flood_test $mac $dst 10 10 10
+
+ log_test "VXLAN: flood before learning"
+
+ # Send a packet with source mac set to $mac from host w2 and check that
+ # a corresponding entry is created in VxLAN device vx1
+ RET=0
+
+ in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+ -t icmp -q
+ sleep 1
+
+ bridge fdb show brport vx1 | grep $mac | grep -q self
+ check_err $?
+ bridge fdb show brport vx1 | grep $mac | grep -q -v self
+ check_err $?
+
+ log_test "VXLAN: show learned FDB entry"
+
+ # Repeat first test and check that packets only reach host w2 in ns1
+ RET=0
+
+ vxlan_flood_test $mac $dst 0 10 0
+
+ log_test "VXLAN: learned FDB entry"
+
+ # Delete the learned FDB entry from the VxLAN and bridge devices and
+ # check that packets are flooded
+ RET=0
+
+ bridge fdb del dev vx1 $mac master self
+ sleep 1
+
+ vxlan_flood_test $mac $dst 10 10 10
+
+ log_test "VXLAN: deletion of learned FDB entry"
+
+ # Re-learn the first FDB entry and check that it is correctly aged-out
+ RET=0
+
+ in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+ -t icmp -q
+ sleep 1
+
+ bridge fdb show brport vx1 | grep $mac | grep -q self
+ check_err $?
+ bridge fdb show brport vx1 | grep $mac | grep -q -v self
+ check_err $?
+
+ vxlan_flood_test $mac $dst 0 10 0
+
+ sleep 20
+
+ bridge fdb show brport vx1 | grep $mac | grep -q self
+ check_fail $?
+ bridge fdb show brport vx1 | grep $mac | grep -q -v self
+ check_fail $?
+
+ vxlan_flood_test $mac $dst 10 10 10
+
+ log_test "VXLAN: Ageing of learned FDB entry"
+
+ # Toggle learning on the bridge port and check that the bridge's FDB
+ # is populated only when it should
+ RET=0
+
+ ip link set dev vx1 type bridge_slave learning off
+
+ in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+ -t icmp -q
+ sleep 1
+
+ bridge fdb show brport vx1 | grep $mac | grep -q -v self
+ check_fail $?
+
+ ip link set dev vx1 type bridge_slave learning on
+
+ in_ns ns1 $MZ w2 -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff -B $dst \
+ -t icmp -q
+ sleep 1
+
+ bridge fdb show brport vx1 | grep $mac | grep -q -v self
+ check_err $?
+
+ log_test "VXLAN: learning toggling on bridge port"
+
+ # Restore previous settings
+ ip link set dev vx1 type vxlan nolearning
+ ip link set dev vx1 type vxlan ageing 300
+ ip link set dev br1 type bridge ageing_time 30000
+ reapply_config
+}
+
+test_all()
+{
+ echo "Running tests with UDP port $VXPORT"
+ tests_run
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+test_all
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh
new file mode 100755
index 000000000000..3bf3da69195f
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# A wrapper to run VXLAN tests with an unusual port number.
+
+VXPORT=8472
+ALL_TESTS="
+ ping_ipv4
+"
+source vxlan_bridge_1d.sh
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
new file mode 100755
index 000000000000..a5789721ba92
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh
@@ -0,0 +1,860 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# +-----------------------+ +------------------------+
+# | H1 (vrf) | | H2 (vrf) |
+# | + $h1.10 | | + $h2.10 |
+# | | 192.0.2.1/28 | | | 192.0.2.2/28 |
+# | | | | | |
+# | | + $h1.20 | | | + $h2.20 |
+# | \ | 198.51.100.1/24 | | \ | 198.51.100.2/24 |
+# | \| | | \| |
+# | + $h1 | | + $h2 |
+# +----|------------------+ +----|-------------------+
+# | |
+# +----|--------------------------------------------------|-------------------+
+# | SW | | |
+# | +--|--------------------------------------------------|-----------------+ |
+# | | + $swp1 BR1 (802.1q) + $swp2 | |
+# | | vid 10 vid 10 | |
+# | | vid 20 vid 20 | |
+# | | | |
+# | | + vx10 (vxlan) + vx20 (vxlan) | |
+# | | local 192.0.2.17 local 192.0.2.17 | |
+# | | remote 192.0.2.34 192.0.2.50 remote 192.0.2.34 192.0.2.50 | |
+# | | id 1000 dstport $VXPORT id 2000 dstport $VXPORT | |
+# | | vid 10 pvid untagged vid 20 pvid untagged | |
+# | +-----------------------------------------------------------------------+ |
+# | |
+# | 192.0.2.32/28 via 192.0.2.18 |
+# | 192.0.2.48/28 via 192.0.2.18 |
+# | |
+# | + $rp1 |
+# | | 192.0.2.17/28 |
+# +----|----------------------------------------------------------------------+
+# |
+# +----|--------------------------------------------------------+
+# | | VRP2 (vrf) |
+# | + $rp2 |
+# | 192.0.2.18/28 |
+# | | (maybe) HW
+# =============================================================================
+# | | (likely) SW
+# | + v1 (veth) + v3 (veth) |
+# | | 192.0.2.33/28 | 192.0.2.49/28 |
+# +----|---------------------------------------|----------------+
+# | |
+# +----|------------------------------+ +----|------------------------------+
+# | + v2 (veth) NS1 (netns) | | + v4 (veth) NS2 (netns) |
+# | 192.0.2.34/28 | | 192.0.2.50/28 |
+# | | | |
+# | 192.0.2.16/28 via 192.0.2.33 | | 192.0.2.16/28 via 192.0.2.49 |
+# | 192.0.2.50/32 via 192.0.2.33 | | 192.0.2.34/32 via 192.0.2.49 |
+# | | | |
+# | +-------------------------------+ | | +-------------------------------+ |
+# | | BR2 (802.1q) | | | | BR2 (802.1q) | |
+# | | + vx10 (vxlan) | | | | + vx10 (vxlan) | |
+# | | local 192.0.2.34 | | | | local 192.0.2.50 | |
+# | | remote 192.0.2.17 | | | | remote 192.0.2.17 | |
+# | | remote 192.0.2.50 | | | | remote 192.0.2.34 | |
+# | | id 1000 dstport $VXPORT | | | | id 1000 dstport $VXPORT | |
+# | | vid 10 pvid untagged | | | | vid 10 pvid untagged | |
+# | | | | | | | |
+# | | + vx20 (vxlan) | | | | + vx20 (vxlan) | |
+# | | local 192.0.2.34 | | | | local 192.0.2.50 | |
+# | | remote 192.0.2.17 | | | | remote 192.0.2.17 | |
+# | | remote 192.0.2.50 | | | | remote 192.0.2.34 | |
+# | | id 2000 dstport $VXPORT | | | | id 2000 dstport $VXPORT | |
+# | | vid 20 pvid untagged | | | | vid 20 pvid untagged | |
+# | | | | | | | |
+# | | + w1 (veth) | | | | + w1 (veth) | |
+# | | | vid 10 | | | | | vid 10 | |
+# | | | vid 20 | | | | | vid 20 | |
+# | +--|----------------------------+ | | +--|----------------------------+ |
+# | | | | | |
+# | +--|----------------------------+ | | +--|----------------------------+ |
+# | | + w2 (veth) VW2 (vrf) | | | | + w2 (veth) VW2 (vrf) | |
+# | | |\ | | | | |\ | |
+# | | | + w2.10 | | | | | + w2.10 | |
+# | | | 192.0.2.3/28 | | | | | 192.0.2.4/28 | |
+# | | | | | | | | | |
+# | | + w2.20 | | | | + w2.20 | |
+# | | 198.51.100.3/24 | | | | 198.51.100.4/24 | |
+# | +-------------------------------+ | | +-------------------------------+ |
+# +-----------------------------------+ +-----------------------------------+
+
+: ${VXPORT:=4789}
+export VXPORT
+
+: ${ALL_TESTS:="
+ ping_ipv4
+ test_flood
+ test_unicast
+ reapply_config
+ ping_ipv4
+ test_flood
+ test_unicast
+ test_learning
+ test_pvid
+ "}
+
+NUM_NETIFS=6
+source lib.sh
+
+h1_create()
+{
+ simple_if_init $h1
+ tc qdisc add dev $h1 clsact
+ vlan_create $h1 10 v$h1 192.0.2.1/28
+ vlan_create $h1 20 v$h1 198.51.100.1/24
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 20
+ vlan_destroy $h1 10
+ tc qdisc del dev $h1 clsact
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ tc qdisc add dev $h2 clsact
+ vlan_create $h2 10 v$h2 192.0.2.2/28
+ vlan_create $h2 20 v$h2 198.51.100.2/24
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 20
+ vlan_destroy $h2 10
+ tc qdisc del dev $h2 clsact
+ simple_if_fini $h2
+}
+
+rp1_set_addr()
+{
+ ip address add dev $rp1 192.0.2.17/28
+
+ ip route add 192.0.2.32/28 nexthop via 192.0.2.18
+ ip route add 192.0.2.48/28 nexthop via 192.0.2.18
+}
+
+rp1_unset_addr()
+{
+ ip route del 192.0.2.48/28 nexthop via 192.0.2.18
+ ip route del 192.0.2.32/28 nexthop via 192.0.2.18
+
+ ip address del dev $rp1 192.0.2.17/28
+}
+
+switch_create()
+{
+ ip link add name br1 type bridge vlan_filtering 1 vlan_default_pvid 0 \
+ mcast_snooping 0
+ # Make sure the bridge uses the MAC address of the local port and not
+ # that of the VxLAN's device.
+ ip link set dev br1 address $(mac_get $swp1)
+ ip link set dev br1 up
+
+ ip link set dev $rp1 up
+ rp1_set_addr
+
+ ip link add name vx10 type vxlan id 1000 \
+ local 192.0.2.17 dstport "$VXPORT" \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx10 up
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 \
+ local 192.0.2.17 dstport "$VXPORT" \
+ nolearning noudpcsum tos inherit ttl 100
+ ip link set dev vx20 up
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ ip link set dev $swp1 master br1
+ ip link set dev $swp1 up
+ bridge vlan add vid 10 dev $swp1
+ bridge vlan add vid 20 dev $swp1
+
+ ip link set dev $swp2 master br1
+ ip link set dev $swp2 up
+ bridge vlan add vid 10 dev $swp2
+ bridge vlan add vid 20 dev $swp2
+
+ bridge fdb append dev vx10 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx10 00:00:00:00:00:00 dst 192.0.2.50 self
+
+ bridge fdb append dev vx20 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx20 00:00:00:00:00:00 dst 192.0.2.50 self
+}
+
+switch_destroy()
+{
+ bridge fdb del dev vx20 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx20 00:00:00:00:00:00 dst 192.0.2.34 self
+
+ bridge fdb del dev vx10 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx10 00:00:00:00:00:00 dst 192.0.2.34 self
+
+ bridge vlan del vid 20 dev $swp2
+ bridge vlan del vid 10 dev $swp2
+ ip link set dev $swp2 down
+ ip link set dev $swp2 nomaster
+
+ bridge vlan del vid 20 dev $swp1
+ bridge vlan del vid 10 dev $swp1
+ ip link set dev $swp1 down
+ ip link set dev $swp1 nomaster
+
+ bridge vlan del vid 20 dev vx20
+ ip link set dev vx20 nomaster
+
+ ip link set dev vx20 down
+ ip link del dev vx20
+
+ bridge vlan del vid 10 dev vx10
+ ip link set dev vx10 nomaster
+
+ ip link set dev vx10 down
+ ip link del dev vx10
+
+ rp1_unset_addr
+ ip link set dev $rp1 down
+
+ ip link set dev br1 down
+ ip link del dev br1
+}
+
+vrp2_create()
+{
+ simple_if_init $rp2 192.0.2.18/28
+ __simple_if_init v1 v$rp2 192.0.2.33/28
+ __simple_if_init v3 v$rp2 192.0.2.49/28
+ tc qdisc add dev v1 clsact
+}
+
+vrp2_destroy()
+{
+ tc qdisc del dev v1 clsact
+ __simple_if_fini v3 192.0.2.49/28
+ __simple_if_fini v1 192.0.2.33/28
+ simple_if_fini $rp2 192.0.2.18/28
+}
+
+ns_init_common()
+{
+ local in_if=$1; shift
+ local in_addr=$1; shift
+ local other_in_addr=$1; shift
+ local nh_addr=$1; shift
+ local host_addr1=$1; shift
+ local host_addr2=$1; shift
+
+ ip link set dev $in_if up
+ ip address add dev $in_if $in_addr/28
+ tc qdisc add dev $in_if clsact
+
+ ip link add name br2 type bridge vlan_filtering 1 vlan_default_pvid 0
+ ip link set dev br2 up
+
+ ip link add name w1 type veth peer name w2
+
+ ip link set dev w1 master br2
+ ip link set dev w1 up
+
+ bridge vlan add vid 10 dev w1
+ bridge vlan add vid 20 dev w1
+
+ ip link add name vx10 type vxlan id 1000 local $in_addr \
+ dstport "$VXPORT"
+ ip link set dev vx10 up
+ bridge fdb append dev vx10 00:00:00:00:00:00 dst 192.0.2.17 self
+ bridge fdb append dev vx10 00:00:00:00:00:00 dst $other_in_addr self
+
+ ip link set dev vx10 master br2
+ tc qdisc add dev vx10 clsact
+
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link add name vx20 type vxlan id 2000 local $in_addr \
+ dstport "$VXPORT"
+ ip link set dev vx20 up
+ bridge fdb append dev vx20 00:00:00:00:00:00 dst 192.0.2.17 self
+ bridge fdb append dev vx20 00:00:00:00:00:00 dst $other_in_addr self
+
+ ip link set dev vx20 master br2
+ tc qdisc add dev vx20 clsact
+
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ simple_if_init w2
+ vlan_create w2 10 vw2 $host_addr1/28
+ vlan_create w2 20 vw2 $host_addr2/24
+
+ ip route add 192.0.2.16/28 nexthop via $nh_addr
+ ip route add $other_in_addr/32 nexthop via $nh_addr
+}
+export -f ns_init_common
+
+ns1_create()
+{
+ ip netns add ns1
+ ip link set dev v2 netns ns1
+ in_ns ns1 \
+ ns_init_common v2 192.0.2.34 192.0.2.50 192.0.2.33 192.0.2.3 \
+ 198.51.100.3
+}
+
+ns1_destroy()
+{
+ ip netns exec ns1 ip link set dev v2 netns 1
+ ip netns del ns1
+}
+
+ns2_create()
+{
+ ip netns add ns2
+ ip link set dev v4 netns ns2
+ in_ns ns2 \
+ ns_init_common v4 192.0.2.50 192.0.2.34 192.0.2.49 192.0.2.4 \
+ 198.51.100.4
+}
+
+ns2_destroy()
+{
+ ip netns exec ns2 ip link set dev v4 netns 1
+ ip netns del ns2
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ rp1=${NETIFS[p5]}
+ rp2=${NETIFS[p6]}
+
+ vrf_prepare
+ forwarding_enable
+
+ h1_create
+ h2_create
+ switch_create
+
+ ip link add name v1 type veth peer name v2
+ ip link add name v3 type veth peer name v4
+ vrp2_create
+ ns1_create
+ ns2_create
+
+ r1_mac=$(in_ns ns1 mac_get w2)
+ r2_mac=$(in_ns ns2 mac_get w2)
+ h2_mac=$(mac_get $h2)
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ ns2_destroy
+ ns1_destroy
+ vrp2_destroy
+ ip link del dev v3
+ ip link del dev v1
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ forwarding_restore
+ vrf_cleanup
+}
+
+# For the first round of tests, vx10 and vx20 were the first devices to get
+# attached to the bridge, and that at the point that the local IP is already
+# configured. Try the other scenario of attaching these devices to a bridge
+# that already has local ports members, and only then assign the local IP.
+reapply_config()
+{
+ log_info "Reapplying configuration"
+
+ bridge fdb del dev vx20 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx20 00:00:00:00:00:00 dst 192.0.2.34 self
+
+ bridge fdb del dev vx10 00:00:00:00:00:00 dst 192.0.2.50 self
+ bridge fdb del dev vx10 00:00:00:00:00:00 dst 192.0.2.34 self
+
+ ip link set dev vx20 nomaster
+ ip link set dev vx10 nomaster
+
+ rp1_unset_addr
+ sleep 5
+
+ ip link set dev vx10 master br1
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ ip link set dev vx20 master br1
+ bridge vlan add vid 20 dev vx20 pvid untagged
+
+ bridge fdb append dev vx10 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx10 00:00:00:00:00:00 dst 192.0.2.50 self
+
+ bridge fdb append dev vx20 00:00:00:00:00:00 dst 192.0.2.34 self
+ bridge fdb append dev vx20 00:00:00:00:00:00 dst 192.0.2.50 self
+
+ rp1_set_addr
+ sleep 5
+}
+
+ping_ipv4()
+{
+ ping_test $h1.10 192.0.2.2 ": local->local vid 10"
+ ping_test $h1.20 198.51.100.2 ": local->local vid 20"
+ ping_test $h1.10 192.0.2.3 ": local->remote 1 vid 10"
+ ping_test $h1.10 192.0.2.4 ": local->remote 2 vid 10"
+ ping_test $h1.20 198.51.100.3 ": local->remote 1 vid 20"
+ ping_test $h1.20 198.51.100.4 ": local->remote 2 vid 20"
+}
+
+maybe_in_ns()
+{
+ echo ${1:+in_ns} $1
+}
+
+__flood_counter_add_del()
+{
+ local add_del=$1; shift
+ local dev=$1; shift
+ local ns=$1; shift
+
+ # Putting the ICMP capture both to HW and to SW will end up
+ # double-counting the packets that are trapped to slow path, such as for
+ # the unicast test. Adding either skip_hw or skip_sw fixes this problem,
+ # but with skip_hw, the flooded packets are not counted at all, because
+ # those are dropped due to MAC address mismatch; and skip_sw is a no-go
+ # for veth-based topologies.
+ #
+ # So try to install with skip_sw and fall back to skip_sw if that fails.
+
+ $(maybe_in_ns $ns) __icmp_capture_add_del \
+ $add_del 100 "" $dev skip_sw 2>/dev/null || \
+ $(maybe_in_ns $ns) __icmp_capture_add_del \
+ $add_del 100 "" $dev skip_hw
+}
+
+flood_counter_install()
+{
+ __flood_counter_add_del add "$@"
+}
+
+flood_counter_uninstall()
+{
+ __flood_counter_add_del del "$@"
+}
+
+flood_fetch_stat()
+{
+ local dev=$1; shift
+ local ns=$1; shift
+
+ $(maybe_in_ns $ns) tc_rule_stats_get $dev 100 ingress
+}
+
+flood_fetch_stats()
+{
+ local counters=("${@}")
+ local counter
+
+ for counter in "${counters[@]}"; do
+ flood_fetch_stat $counter
+ done
+}
+
+vxlan_flood_test()
+{
+ local mac=$1; shift
+ local dst=$1; shift
+ local vid=$1; shift
+ local -a expects=("${@}")
+
+ local -a counters=($h2 "vx10 ns1" "vx20 ns1" "vx10 ns2" "vx20 ns2")
+ local counter
+ local key
+
+ # Packets reach the local host tagged whereas they reach the VxLAN
+ # devices untagged. In order to be able to use the same filter for
+ # all counters, make sure the packets also reach the local host
+ # untagged
+ bridge vlan add vid $vid dev $swp2 untagged
+ for counter in "${counters[@]}"; do
+ flood_counter_install $counter
+ done
+
+ local -a t0s=($(flood_fetch_stats "${counters[@]}"))
+ $MZ $h1 -Q $vid -c 10 -d 100msec -p 64 -b $mac -B $dst -t icmp -q
+ sleep 1
+ local -a t1s=($(flood_fetch_stats "${counters[@]}"))
+
+ for key in ${!t0s[@]}; do
+ local delta=$((t1s[$key] - t0s[$key]))
+ local expect=${expects[$key]}
+
+ ((expect == delta))
+ check_err $? "${counters[$key]}: Expected to capture $expect packets, got $delta."
+ done
+
+ for counter in "${counters[@]}"; do
+ flood_counter_uninstall $counter
+ done
+ bridge vlan add vid $vid dev $swp2
+}
+
+__test_flood()
+{
+ local mac=$1; shift
+ local dst=$1; shift
+ local vid=$1; shift
+ local what=$1; shift
+ local -a expects=("${@}")
+
+ RET=0
+
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: $what"
+}
+
+test_flood()
+{
+ __test_flood de:ad:be:ef:13:37 192.0.2.100 10 "flood vlan 10" \
+ 10 10 0 10 0
+ __test_flood ca:fe:be:ef:13:37 198.51.100.100 20 "flood vlan 20" \
+ 10 0 10 0 10
+}
+
+vxlan_fdb_add_del()
+{
+ local add_del=$1; shift
+ local vid=$1; shift
+ local mac=$1; shift
+ local dev=$1; shift
+ local dst=$1; shift
+
+ bridge fdb $add_del dev $dev $mac self static permanent \
+ ${dst:+dst} $dst 2>/dev/null
+ bridge fdb $add_del dev $dev $mac master static vlan $vid 2>/dev/null
+}
+
+__test_unicast()
+{
+ local mac=$1; shift
+ local dst=$1; shift
+ local hit_idx=$1; shift
+ local vid=$1; shift
+ local what=$1; shift
+
+ RET=0
+
+ local -a expects=(0 0 0 0 0)
+ expects[$hit_idx]=10
+
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: $what"
+}
+
+test_unicast()
+{
+ local -a targets=("$h2_mac $h2"
+ "$r1_mac vx10 192.0.2.34"
+ "$r2_mac vx10 192.0.2.50")
+ local target
+
+ log_info "unicast vlan 10"
+
+ for target in "${targets[@]}"; do
+ vxlan_fdb_add_del add 10 $target
+ done
+
+ __test_unicast $h2_mac 192.0.2.2 0 10 "local MAC unicast"
+ __test_unicast $r1_mac 192.0.2.3 1 10 "remote MAC 1 unicast"
+ __test_unicast $r2_mac 192.0.2.4 3 10 "remote MAC 2 unicast"
+
+ for target in "${targets[@]}"; do
+ vxlan_fdb_add_del del 10 $target
+ done
+
+ log_info "unicast vlan 20"
+
+ targets=("$h2_mac $h2" "$r1_mac vx20 192.0.2.34" \
+ "$r2_mac vx20 192.0.2.50")
+
+ for target in "${targets[@]}"; do
+ vxlan_fdb_add_del add 20 $target
+ done
+
+ __test_unicast $h2_mac 198.51.100.2 0 20 "local MAC unicast"
+ __test_unicast $r1_mac 198.51.100.3 2 20 "remote MAC 1 unicast"
+ __test_unicast $r2_mac 198.51.100.4 4 20 "remote MAC 2 unicast"
+
+ for target in "${targets[@]}"; do
+ vxlan_fdb_add_del del 20 $target
+ done
+}
+
+test_pvid()
+{
+ local -a expects=(0 0 0 0 0)
+ local mac=de:ad:be:ef:13:37
+ local dst=192.0.2.100
+ local vid=10
+
+ # Check that flooding works
+ RET=0
+
+ expects[0]=10; expects[1]=10; expects[3]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: flood before pvid off"
+
+ # Toggle PVID off and test that flood to remote hosts does not work
+ RET=0
+
+ bridge vlan add vid 10 dev vx10
+
+ expects[0]=10; expects[1]=0; expects[3]=0
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: flood after pvid off"
+
+ # Toggle PVID on and test that flood to remote hosts does work
+ RET=0
+
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ expects[0]=10; expects[1]=10; expects[3]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: flood after pvid on"
+
+ # Add a new VLAN and test that it does not affect flooding
+ RET=0
+
+ bridge vlan add vid 30 dev vx10
+
+ expects[0]=10; expects[1]=10; expects[3]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ bridge vlan del vid 30 dev vx10
+
+ log_test "VXLAN: flood after vlan add"
+
+ # Remove currently mapped VLAN and test that flood to remote hosts does
+ # not work
+ RET=0
+
+ bridge vlan del vid 10 dev vx10
+
+ expects[0]=10; expects[1]=0; expects[3]=0
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: flood after vlan delete"
+
+ # Re-add the VLAN and test that flood to remote hosts does work
+ RET=0
+
+ bridge vlan add vid 10 dev vx10 pvid untagged
+
+ expects[0]=10; expects[1]=10; expects[3]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: flood after vlan re-add"
+}
+
+vxlan_ping_test()
+{
+ local ping_dev=$1; shift
+ local ping_dip=$1; shift
+ local ping_args=$1; shift
+ local capture_dev=$1; shift
+ local capture_dir=$1; shift
+ local capture_pref=$1; shift
+ local expect=$1; shift
+
+ local t0=$(tc_rule_stats_get $capture_dev $capture_pref $capture_dir)
+ ping_do $ping_dev $ping_dip "$ping_args"
+ local t1=$(tc_rule_stats_get $capture_dev $capture_pref $capture_dir)
+ local delta=$((t1 - t0))
+
+ # Tolerate a couple stray extra packets.
+ ((expect <= delta && delta <= expect + 2))
+ check_err $? "$capture_dev: Expected to capture $expect packets, got $delta."
+}
+
+__test_learning()
+{
+ local -a expects=(0 0 0 0 0)
+ local mac=$1; shift
+ local dst=$1; shift
+ local vid=$1; shift
+ local idx1=$1; shift
+ local idx2=$1; shift
+ local vx=vx$vid
+
+ # Check that flooding works
+ RET=0
+
+ expects[0]=10; expects[$idx1]=10; expects[$idx2]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: flood before learning"
+
+ # Send a packet with source mac set to $mac from host w2 and check that
+ # a corresponding entry is created in the VxLAN device
+ RET=0
+
+ in_ns ns1 $MZ w2 -Q $vid -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff \
+ -B $dst -t icmp -q
+ sleep 1
+
+ bridge fdb show brport $vx | grep $mac | grep -q self
+ check_err $?
+ bridge fdb show brport $vx | grep $mac | grep "vlan $vid" \
+ | grep -q -v self
+ check_err $?
+
+ log_test "VXLAN: show learned FDB entry"
+
+ # Repeat first test and check that packets only reach host w2 in ns1
+ RET=0
+
+ expects[0]=0; expects[$idx1]=10; expects[$idx2]=0
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: learned FDB entry"
+
+ # Delete the learned FDB entry from the VxLAN and bridge devices and
+ # check that packets are flooded
+ RET=0
+
+ bridge fdb del dev $vx $mac master self vlan $vid
+ sleep 1
+
+ expects[0]=10; expects[$idx1]=10; expects[$idx2]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: deletion of learned FDB entry"
+
+ # Re-learn the first FDB entry and check that it is correctly aged-out
+ RET=0
+
+ in_ns ns1 $MZ w2 -Q $vid -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff \
+ -B $dst -t icmp -q
+ sleep 1
+
+ bridge fdb show brport $vx | grep $mac | grep -q self
+ check_err $?
+ bridge fdb show brport $vx | grep $mac | grep "vlan $vid" \
+ | grep -q -v self
+ check_err $?
+
+ expects[0]=0; expects[$idx1]=10; expects[$idx2]=0
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ sleep 20
+
+ bridge fdb show brport $vx | grep $mac | grep -q self
+ check_fail $?
+ bridge fdb show brport $vx | grep $mac | grep "vlan $vid" \
+ | grep -q -v self
+ check_fail $?
+
+ expects[0]=10; expects[$idx1]=10; expects[$idx2]=10
+ vxlan_flood_test $mac $dst $vid "${expects[@]}"
+
+ log_test "VXLAN: Ageing of learned FDB entry"
+
+ # Toggle learning on the bridge port and check that the bridge's FDB
+ # is populated only when it should
+ RET=0
+
+ ip link set dev $vx type bridge_slave learning off
+
+ in_ns ns1 $MZ w2 -Q $vid -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff \
+ -B $dst -t icmp -q
+ sleep 1
+
+ bridge fdb show brport $vx | grep $mac | grep "vlan $vid" \
+ | grep -q -v self
+ check_fail $?
+
+ ip link set dev $vx type bridge_slave learning on
+
+ in_ns ns1 $MZ w2 -Q $vid -c 1 -p 64 -a $mac -b ff:ff:ff:ff:ff:ff \
+ -B $dst -t icmp -q
+ sleep 1
+
+ bridge fdb show brport $vx | grep $mac | grep "vlan $vid" \
+ | grep -q -v self
+ check_err $?
+
+ log_test "VXLAN: learning toggling on bridge port"
+}
+
+test_learning()
+{
+ local mac=de:ad:be:ef:13:37
+ local dst=192.0.2.100
+ local vid=10
+
+ # Enable learning on the VxLAN devices and set ageing time to 10 seconds
+ ip link set dev br1 type bridge ageing_time 1000
+ ip link set dev vx10 type vxlan ageing 10
+ ip link set dev vx10 type vxlan learning
+ ip link set dev vx20 type vxlan ageing 10
+ ip link set dev vx20 type vxlan learning
+ reapply_config
+
+ log_info "learning vlan 10"
+
+ __test_learning $mac $dst $vid 1 3
+
+ log_info "learning vlan 20"
+
+ mac=ca:fe:be:ef:13:37
+ dst=198.51.100.100
+ vid=20
+
+ __test_learning $mac $dst $vid 2 4
+
+ # Restore previous settings
+ ip link set dev vx20 type vxlan nolearning
+ ip link set dev vx20 type vxlan ageing 300
+ ip link set dev vx10 type vxlan nolearning
+ ip link set dev vx10 type vxlan ageing 300
+ ip link set dev br1 type bridge ageing_time 30000
+ reapply_config
+}
+
+test_all()
+{
+ log_info "Running tests with UDP port $VXPORT"
+ tests_run
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+test_all
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_port_8472.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_port_8472.sh
new file mode 100755
index 000000000000..b1b2d1a3164f
--- /dev/null
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1q_port_8472.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# A wrapper to run VXLAN tests with an unusual port number.
+
+VXPORT=8472
+ALL_TESTS="
+ ping_ipv4
+"
+source vxlan_bridge_1q.sh
diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c
index 406cc70c571d..4b02933cab8a 100644
--- a/tools/testing/selftests/net/msg_zerocopy.c
+++ b/tools/testing/selftests/net/msg_zerocopy.c
@@ -651,12 +651,13 @@ static void do_flush_datagram(int fd, int type)
static void do_rx(int domain, int type, int protocol)
{
+ const int cfg_receiver_wait_ms = 400;
uint64_t tstop;
int fd;
fd = do_setup_rx(domain, type, protocol);
- tstop = gettimeofday_ms() + cfg_runtime_ms;
+ tstop = gettimeofday_ms() + cfg_runtime_ms + cfg_receiver_wait_ms;
do {
if (type == SOCK_STREAM)
do_flush_tcp(fd);
diff --git a/tools/testing/selftests/net/msg_zerocopy.sh b/tools/testing/selftests/net/msg_zerocopy.sh
index c43c6debda06..825ffec85cea 100755
--- a/tools/testing/selftests/net/msg_zerocopy.sh
+++ b/tools/testing/selftests/net/msg_zerocopy.sh
@@ -25,6 +25,8 @@ readonly path_sysctl_mem="net.core.optmem_max"
if [[ "$#" -eq "0" ]]; then
$0 4 tcp -t 1
$0 6 tcp -t 1
+ $0 4 udp -t 1
+ $0 6 udp -t 1
echo "OK. All tests passed"
exit 0
fi
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index a369d616b390..e2c94e47707c 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -26,6 +26,47 @@
# - pmtu_ipv6
# Same as pmtu_ipv4, except for locked PMTU tests, using IPv6
#
+# - pmtu_ipv4_vxlan4_exception
+# Set up the same network topology as pmtu_ipv4, create a VXLAN tunnel
+# over IPv4 between A and B, routed via R1. On the link between R1 and B,
+# set a MTU lower than the VXLAN MTU and the MTU on the link between A and
+# R1. Send IPv4 packets, exceeding the MTU between R1 and B, over VXLAN
+# from A to B and check that the PMTU exception is created with the right
+# value on A
+#
+# - pmtu_ipv6_vxlan4_exception
+# Same as pmtu_ipv4_vxlan4_exception, but send IPv6 packets from A to B
+#
+# - pmtu_ipv4_vxlan6_exception
+# Same as pmtu_ipv4_vxlan4_exception, but use IPv6 transport from A to B
+#
+# - pmtu_ipv6_vxlan6_exception
+# Same as pmtu_ipv4_vxlan6_exception, but send IPv6 packets from A to B
+#
+# - pmtu_ipv4_geneve4_exception
+# Same as pmtu_ipv4_vxlan4_exception, but using a GENEVE tunnel instead of
+# VXLAN
+#
+# - pmtu_ipv6_geneve4_exception
+# Same as pmtu_ipv6_vxlan4_exception, but using a GENEVE tunnel instead of
+# VXLAN
+#
+# - pmtu_ipv4_geneve6_exception
+# Same as pmtu_ipv4_vxlan6_exception, but using a GENEVE tunnel instead of
+# VXLAN
+#
+# - pmtu_ipv6_geneve6_exception
+# Same as pmtu_ipv6_vxlan6_exception, but using a GENEVE tunnel instead of
+# VXLAN
+#
+# - pmtu_ipv{4,6}_fou{4,6}_exception
+# Same as pmtu_ipv4_vxlan4, but using a direct IPv4/IPv6 encapsulation
+# (FoU) over IPv4/IPv6, instead of VXLAN
+#
+# - pmtu_ipv{4,6}_fou{4,6}_exception
+# Same as pmtu_ipv4_vxlan4, but using a generic UDP IPv4/IPv6
+# encapsulation (GUE) over IPv4/IPv6, instead of VXLAN
+#
# - pmtu_vti4_exception
# Set up vti tunnel on top of veth, with xfrm states and policies, in two
# namespaces with matching endpoints. Check that route exception is not
@@ -72,6 +113,22 @@ which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
tests="
pmtu_ipv4_exception ipv4: PMTU exceptions
pmtu_ipv6_exception ipv6: PMTU exceptions
+ pmtu_ipv4_vxlan4_exception IPv4 over vxlan4: PMTU exceptions
+ pmtu_ipv6_vxlan4_exception IPv6 over vxlan4: PMTU exceptions
+ pmtu_ipv4_vxlan6_exception IPv4 over vxlan6: PMTU exceptions
+ pmtu_ipv6_vxlan6_exception IPv6 over vxlan6: PMTU exceptions
+ pmtu_ipv4_geneve4_exception IPv4 over geneve4: PMTU exceptions
+ pmtu_ipv6_geneve4_exception IPv6 over geneve4: PMTU exceptions
+ pmtu_ipv4_geneve6_exception IPv4 over geneve6: PMTU exceptions
+ pmtu_ipv6_geneve6_exception IPv6 over geneve6: PMTU exceptions
+ pmtu_ipv4_fou4_exception IPv4 over fou4: PMTU exceptions
+ pmtu_ipv6_fou4_exception IPv6 over fou4: PMTU exceptions
+ pmtu_ipv4_fou6_exception IPv4 over fou6: PMTU exceptions
+ pmtu_ipv6_fou6_exception IPv6 over fou6: PMTU exceptions
+ pmtu_ipv4_gue4_exception IPv4 over gue4: PMTU exceptions
+ pmtu_ipv6_gue4_exception IPv6 over gue4: PMTU exceptions
+ pmtu_ipv4_gue6_exception IPv4 over gue6: PMTU exceptions
+ pmtu_ipv6_gue6_exception IPv6 over gue6: PMTU exceptions
pmtu_vti6_exception vti6: PMTU exceptions
pmtu_vti4_exception vti4: PMTU exceptions
pmtu_vti4_default_mtu vti4: default MTU assignment
@@ -95,8 +152,8 @@ ns_r2="ip netns exec ${NS_R2}"
# Addresses are:
# - IPv4: PREFIX4.SEGMENT.ID (/24)
# - IPv6: PREFIX6:SEGMENT::ID (/64)
-prefix4="192.168"
-prefix6="fd00"
+prefix4="10.0"
+prefix6="fc00"
a_r1=1
a_r2=2
b_r1=3
@@ -129,12 +186,12 @@ veth6_a_addr="fd00:1::a"
veth6_b_addr="fd00:1::b"
veth6_mask="64"
-vti4_a_addr="192.168.2.1"
-vti4_b_addr="192.168.2.2"
-vti4_mask="24"
-vti6_a_addr="fd00:2::a"
-vti6_b_addr="fd00:2::b"
-vti6_mask="64"
+tunnel4_a_addr="192.168.2.1"
+tunnel4_b_addr="192.168.2.2"
+tunnel4_mask="24"
+tunnel6_a_addr="fd00:2::a"
+tunnel6_b_addr="fd00:2::b"
+tunnel6_mask="64"
dummy6_0_addr="fc00:1000::0"
dummy6_1_addr="fc00:1001::0"
@@ -159,6 +216,89 @@ nsname() {
eval echo \$NS_$1
}
+setup_fou_or_gue() {
+ outer="${1}"
+ inner="${2}"
+ encap="${3}"
+
+ if [ "${outer}" = "4" ]; then
+ modprobe fou || return 2
+ a_addr="${prefix4}.${a_r1}.1"
+ b_addr="${prefix4}.${b_r1}.1"
+ if [ "${inner}" = "4" ]; then
+ type="ipip"
+ ipproto="4"
+ else
+ type="sit"
+ ipproto="41"
+ fi
+ else
+ modprobe fou6 || return 2
+ a_addr="${prefix6}:${a_r1}::1"
+ b_addr="${prefix6}:${b_r1}::1"
+ if [ "${inner}" = "4" ]; then
+ type="ip6tnl"
+ mode="mode ipip6"
+ ipproto="4 -6"
+ else
+ type="ip6tnl"
+ mode="mode ip6ip6"
+ ipproto="41 -6"
+ fi
+ fi
+
+ ${ns_a} ip fou add port 5555 ipproto ${ipproto} || return 2
+ ${ns_a} ip link add ${encap}_a type ${type} ${mode} local ${a_addr} remote ${b_addr} encap ${encap} encap-sport auto encap-dport 5556 || return 2
+
+ ${ns_b} ip fou add port 5556 ipproto ${ipproto}
+ ${ns_b} ip link add ${encap}_b type ${type} ${mode} local ${b_addr} remote ${a_addr} encap ${encap} encap-sport auto encap-dport 5555
+
+ if [ "${inner}" = "4" ]; then
+ ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${encap}_a
+ ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${encap}_b
+ else
+ ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${encap}_a
+ ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${encap}_b
+ fi
+
+ ${ns_a} ip link set ${encap}_a up
+ ${ns_b} ip link set ${encap}_b up
+
+ sleep 1
+}
+
+setup_fou44() {
+ setup_fou_or_gue 4 4 fou
+}
+
+setup_fou46() {
+ setup_fou_or_gue 4 6 fou
+}
+
+setup_fou64() {
+ setup_fou_or_gue 6 4 fou
+}
+
+setup_fou66() {
+ setup_fou_or_gue 6 6 fou
+}
+
+setup_gue44() {
+ setup_fou_or_gue 4 4 gue
+}
+
+setup_gue46() {
+ setup_fou_or_gue 4 6 gue
+}
+
+setup_gue64() {
+ setup_fou_or_gue 6 4 gue
+}
+
+setup_gue66() {
+ setup_fou_or_gue 6 6 gue
+}
+
setup_namespaces() {
for n in ${NS_A} ${NS_B} ${NS_R1} ${NS_R2}; do
ip netns add ${n} || return 1
@@ -202,11 +342,57 @@ setup_vti() {
}
setup_vti4() {
- setup_vti 4 ${veth4_a_addr} ${veth4_b_addr} ${vti4_a_addr} ${vti4_b_addr} ${vti4_mask}
+ setup_vti 4 ${veth4_a_addr} ${veth4_b_addr} ${tunnel4_a_addr} ${tunnel4_b_addr} ${tunnel4_mask}
}
setup_vti6() {
- setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${vti6_a_addr} ${vti6_b_addr} ${vti6_mask}
+ setup_vti 6 ${veth6_a_addr} ${veth6_b_addr} ${tunnel6_a_addr} ${tunnel6_b_addr} ${tunnel6_mask}
+}
+
+setup_vxlan_or_geneve() {
+ type="${1}"
+ a_addr="${2}"
+ b_addr="${3}"
+ opts="${4}"
+
+ if [ "${type}" = "vxlan" ]; then
+ opts="${opts} ttl 64 dstport 4789"
+ opts_a="local ${a_addr}"
+ opts_b="local ${b_addr}"
+ else
+ opts_a=""
+ opts_b=""
+ fi
+
+ ${ns_a} ip link add ${type}_a type ${type} id 1 ${opts_a} remote ${b_addr} ${opts} || return 1
+ ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts}
+
+ ${ns_a} ip addr add ${tunnel4_a_addr}/${tunnel4_mask} dev ${type}_a
+ ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b
+
+ ${ns_a} ip addr add ${tunnel6_a_addr}/${tunnel6_mask} dev ${type}_a
+ ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b
+
+ ${ns_a} ip link set ${type}_a up
+ ${ns_b} ip link set ${type}_b up
+
+ sleep 1
+}
+
+setup_geneve4() {
+ setup_vxlan_or_geneve geneve ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "df set"
+}
+
+setup_vxlan4() {
+ setup_vxlan_or_geneve vxlan ${prefix4}.${a_r1}.1 ${prefix4}.${b_r1}.1 "df set"
+}
+
+setup_geneve6() {
+ setup_vxlan_or_geneve geneve ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1
+}
+
+setup_vxlan6() {
+ setup_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1
}
setup_xfrm() {
@@ -465,6 +651,161 @@ test_pmtu_ipv6_exception() {
test_pmtu_ipvX 6
}
+test_pmtu_ipvX_over_vxlanY_or_geneveY_exception() {
+ type=${1}
+ family=${2}
+ outer_family=${3}
+ ll_mtu=4000
+
+ if [ ${outer_family} -eq 4 ]; then
+ setup namespaces routing ${type}4 || return 2
+ # IPv4 header UDP header VXLAN/GENEVE header Ethernet header
+ exp_mtu=$((${ll_mtu} - 20 - 8 - 8 - 14))
+ else
+ setup namespaces routing ${type}6 || return 2
+ # IPv6 header UDP header VXLAN/GENEVE header Ethernet header
+ exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - 14))
+ fi
+
+ trace "${ns_a}" ${type}_a "${ns_b}" ${type}_b \
+ "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B
+
+ if [ ${family} -eq 4 ]; then
+ ping=ping
+ dst=${tunnel4_b_addr}
+ else
+ ping=${ping6}
+ dst=${tunnel6_b_addr}
+ fi
+
+ # Create route exception by exceeding link layer MTU
+ mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000))
+ mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+ mtu "${ns_b}" veth_B-R1 ${ll_mtu}
+ mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+ mtu "${ns_a}" ${type}_a $((${ll_mtu} + 1000))
+ mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000))
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+
+ # Check that exception was created
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
+ check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on ${type} interface"
+}
+
+test_pmtu_ipv4_vxlan4_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan 4 4
+}
+
+test_pmtu_ipv6_vxlan4_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan 6 4
+}
+
+test_pmtu_ipv4_geneve4_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 4 4
+}
+
+test_pmtu_ipv6_geneve4_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 6 4
+}
+
+test_pmtu_ipv4_vxlan6_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan 4 6
+}
+
+test_pmtu_ipv6_vxlan6_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception vxlan 6 6
+}
+
+test_pmtu_ipv4_geneve6_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 4 6
+}
+
+test_pmtu_ipv6_geneve6_exception() {
+ test_pmtu_ipvX_over_vxlanY_or_geneveY_exception geneve 6 6
+}
+
+test_pmtu_ipvX_over_fouY_or_gueY() {
+ inner_family=${1}
+ outer_family=${2}
+ encap=${3}
+ ll_mtu=4000
+
+ setup namespaces routing ${encap}${outer_family}${inner_family} || return 2
+ trace "${ns_a}" ${encap}_a "${ns_b}" ${encap}_b \
+ "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \
+ "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B
+
+ if [ ${inner_family} -eq 4 ]; then
+ ping=ping
+ dst=${tunnel4_b_addr}
+ else
+ ping=${ping6}
+ dst=${tunnel6_b_addr}
+ fi
+
+ if [ "${encap}" = "gue" ]; then
+ encap_overhead=4
+ else
+ encap_overhead=0
+ fi
+
+ if [ ${outer_family} -eq 4 ]; then
+ # IPv4 header UDP header
+ exp_mtu=$((${ll_mtu} - 20 - 8 - ${encap_overhead}))
+ else
+ # IPv6 header Option 4 UDP header
+ exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - ${encap_overhead}))
+ fi
+
+ # Create route exception by exceeding link layer MTU
+ mtu "${ns_a}" veth_A-R1 $((${ll_mtu} + 1000))
+ mtu "${ns_r1}" veth_R1-A $((${ll_mtu} + 1000))
+ mtu "${ns_b}" veth_B-R1 ${ll_mtu}
+ mtu "${ns_r1}" veth_R1-B ${ll_mtu}
+
+ mtu "${ns_a}" ${encap}_a $((${ll_mtu} + 1000))
+ mtu "${ns_b}" ${encap}_b $((${ll_mtu} + 1000))
+ ${ns_a} ${ping} -q -M want -i 0.1 -w 2 -s $((${ll_mtu} + 500)) ${dst} > /dev/null
+
+ # Check that exception was created
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst})"
+ check_pmtu_value ${exp_mtu} "${pmtu}" "exceeding link layer MTU on ${encap} interface"
+}
+
+test_pmtu_ipv4_fou4_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 4 4 fou
+}
+
+test_pmtu_ipv6_fou4_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 6 4 fou
+}
+
+test_pmtu_ipv4_fou6_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 4 6 fou
+}
+
+test_pmtu_ipv6_fou6_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 6 6 fou
+}
+
+test_pmtu_ipv4_gue4_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 4 4 gue
+}
+
+test_pmtu_ipv6_gue4_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 6 4 gue
+}
+
+test_pmtu_ipv4_gue6_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 4 6 gue
+}
+
+test_pmtu_ipv6_gue6_exception() {
+ test_pmtu_ipvX_over_fouY_or_gueY 6 6 gue
+}
+
test_pmtu_vti4_exception() {
setup namespaces veth vti4 xfrm4 || return 2
trace "${ns_a}" veth_a "${ns_b}" veth_b \
@@ -484,14 +825,14 @@ test_pmtu_vti4_exception() {
# Send DF packet without exceeding link layer MTU, check that no
# exception is created
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${vti4_b_addr} > /dev/null
- pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
+ ${ns_a} ping -q -M want -i 0.1 -w 2 -s ${ping_payload} ${tunnel4_b_addr} > /dev/null
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "" "${pmtu}" "sending packet smaller than PMTU (IP payload length ${esp_payload_rfc4106})" || return 1
# Now exceed link layer MTU by one byte, check that exception is created
# with the right PMTU value
- ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${vti4_b_addr} > /dev/null
- pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti4_b_addr})"
+ ${ns_a} ping -q -M want -i 0.1 -w 2 -s $((ping_payload + 1)) ${tunnel4_b_addr} > /dev/null
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel4_b_addr})"
check_pmtu_value "${esp_payload_rfc4106}" "${pmtu}" "exceeding PMTU (IP payload length $((esp_payload_rfc4106 + 1)))"
}
@@ -506,20 +847,20 @@ test_pmtu_vti6_exception() {
mtu "${ns_b}" veth_b 4000
mtu "${ns_a}" vti6_a 5000
mtu "${ns_b}" vti6_b 5000
- ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+ ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${tunnel6_b_addr} > /dev/null
# Check that exception was created
- pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
check_pmtu_value any "${pmtu}" "creating tunnel exceeding link layer MTU" || return 1
# Decrease tunnel MTU, check for PMTU decrease in route exception
mtu "${ns_a}" vti6_a 3000
- pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
check_pmtu_value "3000" "${pmtu}" "decreasing tunnel MTU" || fail=1
# Increase tunnel MTU, check for PMTU increase in route exception
mtu "${ns_a}" vti6_a 9000
- pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})"
+ pmtu="$(route_get_dst_pmtu_from_exception "${ns_a}" ${tunnel6_b_addr})"
check_pmtu_value "9000" "${pmtu}" "increasing tunnel MTU" || fail=1
return ${fail}
diff --git a/tools/testing/selftests/net/reuseport_addr_any.c b/tools/testing/selftests/net/reuseport_addr_any.c
new file mode 100644
index 000000000000..c6233935fed1
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_addr_any.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Test that sockets listening on a specific address are preferred
+ * over sockets listening on addr_any.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <errno.h>
+#include <error.h>
+#include <linux/dccp.h>
+#include <linux/in.h>
+#include <linux/unistd.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/epoll.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+static const char *IP4_ADDR = "127.0.0.1";
+static const char *IP6_ADDR = "::1";
+static const char *IP4_MAPPED6 = "::ffff:127.0.0.1";
+
+static const int PORT = 8888;
+
+static void build_rcv_fd(int family, int proto, int *rcv_fds, int count,
+ const char *addr_str)
+{
+ struct sockaddr_in addr4 = {0};
+ struct sockaddr_in6 addr6 = {0};
+ struct sockaddr *addr;
+ int opt, i, sz;
+
+ memset(&addr, 0, sizeof(addr));
+
+ switch (family) {
+ case AF_INET:
+ addr4.sin_family = family;
+ if (!addr_str)
+ addr4.sin_addr.s_addr = htonl(INADDR_ANY);
+ else if (!inet_pton(family, addr_str, &addr4.sin_addr.s_addr))
+ error(1, errno, "inet_pton failed: %s", addr_str);
+ addr4.sin_port = htons(PORT);
+ sz = sizeof(addr4);
+ addr = (struct sockaddr *)&addr4;
+ break;
+ case AF_INET6:
+ addr6.sin6_family = AF_INET6;
+ if (!addr_str)
+ addr6.sin6_addr = in6addr_any;
+ else if (!inet_pton(family, addr_str, &addr6.sin6_addr))
+ error(1, errno, "inet_pton failed: %s", addr_str);
+ addr6.sin6_port = htons(PORT);
+ sz = sizeof(addr6);
+ addr = (struct sockaddr *)&addr6;
+ break;
+ default:
+ error(1, 0, "Unsupported family %d", family);
+ /* clang does not recognize error() above as terminating
+ * the program, so it complains that saddr, sz are
+ * not initialized when this code path is taken. Silence it.
+ */
+ return;
+ }
+
+ for (i = 0; i < count; ++i) {
+ rcv_fds[i] = socket(family, proto, 0);
+ if (rcv_fds[i] < 0)
+ error(1, errno, "failed to create receive socket");
+
+ opt = 1;
+ if (setsockopt(rcv_fds[i], SOL_SOCKET, SO_REUSEPORT, &opt,
+ sizeof(opt)))
+ error(1, errno, "failed to set SO_REUSEPORT");
+
+ if (bind(rcv_fds[i], addr, sz))
+ error(1, errno, "failed to bind receive socket");
+
+ if (proto == SOCK_STREAM && listen(rcv_fds[i], 10))
+ error(1, errno, "tcp: failed to listen on receive port");
+ else if (proto == SOCK_DCCP) {
+ if (setsockopt(rcv_fds[i], SOL_DCCP,
+ DCCP_SOCKOPT_SERVICE,
+ &(int) {htonl(42)}, sizeof(int)))
+ error(1, errno, "failed to setsockopt");
+
+ if (listen(rcv_fds[i], 10))
+ error(1, errno, "dccp: failed to listen on receive port");
+ }
+ }
+}
+
+static int connect_and_send(int family, int proto)
+{
+ struct sockaddr_in saddr4 = {0};
+ struct sockaddr_in daddr4 = {0};
+ struct sockaddr_in6 saddr6 = {0};
+ struct sockaddr_in6 daddr6 = {0};
+ struct sockaddr *saddr, *daddr;
+ int fd, sz;
+
+ switch (family) {
+ case AF_INET:
+ saddr4.sin_family = AF_INET;
+ saddr4.sin_addr.s_addr = htonl(INADDR_ANY);
+ saddr4.sin_port = 0;
+
+ daddr4.sin_family = AF_INET;
+ if (!inet_pton(family, IP4_ADDR, &daddr4.sin_addr.s_addr))
+ error(1, errno, "inet_pton failed: %s", IP4_ADDR);
+ daddr4.sin_port = htons(PORT);
+
+ sz = sizeof(saddr4);
+ saddr = (struct sockaddr *)&saddr4;
+ daddr = (struct sockaddr *)&daddr4;
+ break;
+ case AF_INET6:
+ saddr6.sin6_family = AF_INET6;
+ saddr6.sin6_addr = in6addr_any;
+
+ daddr6.sin6_family = AF_INET6;
+ if (!inet_pton(family, IP6_ADDR, &daddr6.sin6_addr))
+ error(1, errno, "inet_pton failed: %s", IP6_ADDR);
+ daddr6.sin6_port = htons(PORT);
+
+ sz = sizeof(saddr6);
+ saddr = (struct sockaddr *)&saddr6;
+ daddr = (struct sockaddr *)&daddr6;
+ break;
+ default:
+ error(1, 0, "Unsupported family %d", family);
+ /* clang does not recognize error() above as terminating
+ * the program, so it complains that saddr, daddr, sz are
+ * not initialized when this code path is taken. Silence it.
+ */
+ return -1;
+ }
+
+ fd = socket(family, proto, 0);
+ if (fd < 0)
+ error(1, errno, "failed to create send socket");
+
+ if (proto == SOCK_DCCP &&
+ setsockopt(fd, SOL_DCCP, DCCP_SOCKOPT_SERVICE,
+ &(int){htonl(42)}, sizeof(int)))
+ error(1, errno, "failed to setsockopt");
+
+ if (bind(fd, saddr, sz))
+ error(1, errno, "failed to bind send socket");
+
+ if (connect(fd, daddr, sz))
+ error(1, errno, "failed to connect send socket");
+
+ if (send(fd, "a", 1, 0) < 0)
+ error(1, errno, "failed to send message");
+
+ return fd;
+}
+
+static int receive_once(int epfd, int proto)
+{
+ struct epoll_event ev;
+ int i, fd;
+ char buf[8];
+
+ i = epoll_wait(epfd, &ev, 1, 3);
+ if (i < 0)
+ error(1, errno, "epoll_wait failed");
+
+ if (proto == SOCK_STREAM || proto == SOCK_DCCP) {
+ fd = accept(ev.data.fd, NULL, NULL);
+ if (fd < 0)
+ error(1, errno, "failed to accept");
+ i = recv(fd, buf, sizeof(buf), 0);
+ close(fd);
+ } else {
+ i = recv(ev.data.fd, buf, sizeof(buf), 0);
+ }
+
+ if (i < 0)
+ error(1, errno, "failed to recv");
+
+ return ev.data.fd;
+}
+
+static void test(int *rcv_fds, int count, int family, int proto, int fd)
+{
+ struct epoll_event ev;
+ int epfd, i, send_fd, recv_fd;
+
+ epfd = epoll_create(1);
+ if (epfd < 0)
+ error(1, errno, "failed to create epoll");
+
+ ev.events = EPOLLIN;
+ for (i = 0; i < count; ++i) {
+ ev.data.fd = rcv_fds[i];
+ if (epoll_ctl(epfd, EPOLL_CTL_ADD, rcv_fds[i], &ev))
+ error(1, errno, "failed to register sock epoll");
+ }
+
+ send_fd = connect_and_send(family, proto);
+
+ recv_fd = receive_once(epfd, proto);
+ if (recv_fd != fd)
+ error(1, 0, "received on an unexpected socket");
+
+ close(send_fd);
+ close(epfd);
+}
+
+
+static void run_one_test(int fam_send, int fam_rcv, int proto,
+ const char *addr_str)
+{
+ /* Below we test that a socket listening on a specific address
+ * is always selected in preference over a socket listening
+ * on addr_any. Bugs where this is not the case often result
+ * in sockets created first or last to get picked. So below
+ * we make sure that there are always addr_any sockets created
+ * before and after a specific socket is created.
+ */
+ int rcv_fds[10], i;
+
+ build_rcv_fd(AF_INET, proto, rcv_fds, 2, NULL);
+ build_rcv_fd(AF_INET6, proto, rcv_fds + 2, 2, NULL);
+ build_rcv_fd(fam_rcv, proto, rcv_fds + 4, 1, addr_str);
+ build_rcv_fd(AF_INET, proto, rcv_fds + 5, 2, NULL);
+ build_rcv_fd(AF_INET6, proto, rcv_fds + 7, 2, NULL);
+ test(rcv_fds, 9, fam_send, proto, rcv_fds[4]);
+ for (i = 0; i < 9; ++i)
+ close(rcv_fds[i]);
+ fprintf(stderr, "pass\n");
+}
+
+static void test_proto(int proto, const char *proto_str)
+{
+ if (proto == SOCK_DCCP) {
+ int test_fd;
+
+ test_fd = socket(AF_INET, proto, 0);
+ if (test_fd < 0) {
+ if (errno == ESOCKTNOSUPPORT) {
+ fprintf(stderr, "DCCP not supported: skipping DCCP tests\n");
+ return;
+ } else
+ error(1, errno, "failed to create a DCCP socket");
+ }
+ close(test_fd);
+ }
+
+ fprintf(stderr, "%s IPv4 ... ", proto_str);
+ run_one_test(AF_INET, AF_INET, proto, IP4_ADDR);
+
+ fprintf(stderr, "%s IPv6 ... ", proto_str);
+ run_one_test(AF_INET6, AF_INET6, proto, IP6_ADDR);
+
+ fprintf(stderr, "%s IPv4 mapped to IPv6 ... ", proto_str);
+ run_one_test(AF_INET, AF_INET6, proto, IP4_MAPPED6);
+}
+
+int main(void)
+{
+ test_proto(SOCK_DGRAM, "UDP");
+ test_proto(SOCK_STREAM, "TCP");
+ test_proto(SOCK_DCCP, "DCCP");
+
+ fprintf(stderr, "SUCCESS\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/net/reuseport_addr_any.sh b/tools/testing/selftests/net/reuseport_addr_any.sh
new file mode 100755
index 000000000000..104592f62ad4
--- /dev/null
+++ b/tools/testing/selftests/net/reuseport_addr_any.sh
@@ -0,0 +1,4 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+./in_netns.sh ./reuseport_addr_any
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index e101af52d1d6..78fc593dfe40 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -205,6 +205,8 @@ kci_test_polrouting()
kci_test_route_get()
{
+ local hash_policy=$(sysctl -n net.ipv4.fib_multipath_hash_policy)
+
ret=0
ip route get 127.0.0.1 > /dev/null
@@ -223,6 +225,19 @@ kci_test_route_get()
check_err $?
ip route get 10.23.7.11 from 10.23.7.12 iif "$devdummy" > /dev/null
check_err $?
+ ip route add 10.23.8.0/24 \
+ nexthop via 10.23.7.13 dev "$devdummy" \
+ nexthop via 10.23.7.14 dev "$devdummy"
+ check_err $?
+ sysctl -wq net.ipv4.fib_multipath_hash_policy=0
+ ip route get 10.23.8.11 > /dev/null
+ check_err $?
+ sysctl -wq net.ipv4.fib_multipath_hash_policy=1
+ ip route get 10.23.8.11 > /dev/null
+ check_err $?
+ sysctl -wq net.ipv4.fib_multipath_hash_policy="$hash_policy"
+ ip route del 10.23.8.0/24
+ check_err $?
ip addr del dev "$devdummy" 10.23.7.11/24
check_err $?
@@ -955,6 +970,111 @@ kci_test_ip6erspan()
ip netns del "$testns"
}
+kci_test_fdb_get()
+{
+ IP="ip -netns testns"
+ BRIDGE="bridge -netns testns"
+ brdev="test-br0"
+ vxlandev="vxlan10"
+ test_mac=de:ad:be:ef:13:37
+ localip="10.0.2.2"
+ dstip="10.0.2.3"
+ ret=0
+
+ bridge fdb help 2>&1 |grep -q 'bridge fdb get'
+ if [ $? -ne 0 ];then
+ echo "SKIP: fdb get tests: iproute2 too old"
+ return $ksft_skip
+ fi
+
+ ip netns add testns
+ if [ $? -ne 0 ]; then
+ echo "SKIP fdb get tests: cannot add net namespace $testns"
+ return $ksft_skip
+ fi
+
+ $IP link add "$vxlandev" type vxlan id 10 local $localip \
+ dstport 4789 2>/dev/null
+ check_err $?
+ $IP link add name "$brdev" type bridge &>/dev/null
+ check_err $?
+ $IP link set dev "$vxlandev" master "$brdev" &>/dev/null
+ check_err $?
+ $BRIDGE fdb add $test_mac dev "$vxlandev" master &>/dev/null
+ check_err $?
+ $BRIDGE fdb add $test_mac dev "$vxlandev" dst $dstip self &>/dev/null
+ check_err $?
+
+ $BRIDGE fdb get $test_mac brport "$vxlandev" 2>/dev/null | grep -q "dev $vxlandev master $brdev"
+ check_err $?
+ $BRIDGE fdb get $test_mac br "$brdev" 2>/dev/null | grep -q "dev $vxlandev master $brdev"
+ check_err $?
+ $BRIDGE fdb get $test_mac dev "$vxlandev" self 2>/dev/null | grep -q "dev $vxlandev dst $dstip"
+ check_err $?
+
+ ip netns del testns &>/dev/null
+
+ if [ $ret -ne 0 ]; then
+ echo "FAIL: bridge fdb get"
+ return 1
+ fi
+
+ echo "PASS: bridge fdb get"
+}
+
+kci_test_neigh_get()
+{
+ dstmac=de:ad:be:ef:13:37
+ dstip=10.0.2.4
+ dstip6=dead::2
+ ret=0
+
+ ip neigh help 2>&1 |grep -q 'ip neigh get'
+ if [ $? -ne 0 ];then
+ echo "SKIP: fdb get tests: iproute2 too old"
+ return $ksft_skip
+ fi
+
+ # ipv4
+ ip neigh add $dstip lladdr $dstmac dev "$devdummy" > /dev/null
+ check_err $?
+ ip neigh get $dstip dev "$devdummy" 2> /dev/null | grep -q "$dstmac"
+ check_err $?
+ ip neigh del $dstip lladdr $dstmac dev "$devdummy" > /dev/null
+ check_err $?
+
+ # ipv4 proxy
+ ip neigh add proxy $dstip dev "$devdummy" > /dev/null
+ check_err $?
+ ip neigh get proxy $dstip dev "$devdummy" 2>/dev/null | grep -q "$dstip"
+ check_err $?
+ ip neigh del proxy $dstip dev "$devdummy" > /dev/null
+ check_err $?
+
+ # ipv6
+ ip neigh add $dstip6 lladdr $dstmac dev "$devdummy" > /dev/null
+ check_err $?
+ ip neigh get $dstip6 dev "$devdummy" 2> /dev/null | grep -q "$dstmac"
+ check_err $?
+ ip neigh del $dstip6 lladdr $dstmac dev "$devdummy" > /dev/null
+ check_err $?
+
+ # ipv6 proxy
+ ip neigh add proxy $dstip6 dev "$devdummy" > /dev/null
+ check_err $?
+ ip neigh get proxy $dstip6 dev "$devdummy" 2>/dev/null | grep -q "$dstip6"
+ check_err $?
+ ip neigh del proxy $dstip6 dev "$devdummy" > /dev/null
+ check_err $?
+
+ if [ $ret -ne 0 ];then
+ echo "FAIL: neigh get"
+ return 1
+ fi
+
+ echo "PASS: neigh get"
+}
+
kci_test_rtnl()
{
kci_add_dummy
@@ -979,6 +1099,8 @@ kci_test_rtnl()
kci_test_macsec
kci_test_ipsec
kci_test_ipsec_offload
+ kci_test_fdb_get
+ kci_test_neigh_get
kci_del_dummy
}
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests
index bea079edc278..2dc95fda7ef7 100755
--- a/tools/testing/selftests/net/run_afpackettests
+++ b/tools/testing/selftests/net/run_afpackettests
@@ -25,3 +25,13 @@ if [ $? -ne 0 ]; then
else
echo "[PASS]"
fi
+
+echo "--------------------"
+echo "running txring_overwrite test"
+echo "--------------------"
+./in_netns.sh ./txring_overwrite
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+else
+ echo "[PASS]"
+fi
diff --git a/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
new file mode 100755
index 000000000000..2d442cdab11e
--- /dev/null
+++ b/tools/testing/selftests/net/test_vxlan_fdb_changelink.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Check FDB default-remote handling across "ip link set".
+
+check_remotes()
+{
+ local what=$1; shift
+ local N=$(bridge fdb sh dev vx | grep 00:00:00:00:00:00 | wc -l)
+
+ echo -ne "expected two remotes after $what\t"
+ if [[ $N != 2 ]]; then
+ echo "[FAIL]"
+ EXIT_STATUS=1
+ else
+ echo "[ OK ]"
+ fi
+}
+
+ip link add name vx up type vxlan id 2000 dstport 4789
+bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.20 self permanent
+bridge fdb ap dev vx 00:00:00:00:00:00 dst 192.0.2.30 self permanent
+check_remotes "fdb append"
+
+ip link set dev vx type vxlan remote 192.0.2.30
+check_remotes "link set"
+
+ip link del dev vx
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/test_vxlan_under_vrf.sh b/tools/testing/selftests/net/test_vxlan_under_vrf.sh
new file mode 100755
index 000000000000..09f9ed92cbe4
--- /dev/null
+++ b/tools/testing/selftests/net/test_vxlan_under_vrf.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking VXLAN underlay in a non-default VRF.
+#
+# It simulates two hypervisors running a VM each using four network namespaces:
+# two for the HVs, two for the VMs.
+# A small VXLAN tunnel is made between the two hypervisors to have the two vms
+# in the same virtual L2:
+#
+# +-------------------+ +-------------------+
+# | | | |
+# | vm-1 netns | | vm-2 netns |
+# | | | |
+# | +-------------+ | | +-------------+ |
+# | | veth-hv | | | | veth-hv | |
+# | | 10.0.0.1/24 | | | | 10.0.0.2/24 | |
+# | +-------------+ | | +-------------+ |
+# | . | | . |
+# +-------------------+ +-------------------+
+# . .
+# . .
+# . .
+# +-----------------------------------+ +------------------------------------+
+# | . | | . |
+# | +----------+ | | +----------+ |
+# | | veth-tap | | | | veth-tap | |
+# | +----+-----+ | | +----+-----+ |
+# | | | | | |
+# | +--+--+ +--------------+ | | +--------------+ +--+--+ |
+# | | br0 | | vrf-underlay | | | | vrf-underlay | | br0 | |
+# | +--+--+ +-------+------+ | | +------+-------+ +--+--+ |
+# | | | | | | | |
+# | +---+----+ +-------+-------+ | | +-------+-------+ +---+----+ |
+# | | vxlan0 |....| veth0 |.|...|.| veth0 |....| vxlan0 | |
+# | +--------+ | 172.16.0.1/24 | | | | 172.16.0.2/24 | +--------+ |
+# | +---------------+ | | +---------------+ |
+# | | | |
+# | hv-1 netns | | hv-2 netns |
+# | | | |
+# +-----------------------------------+ +------------------------------------+
+#
+# This tests both the connectivity between vm-1 and vm-2, and that the underlay
+# can be moved in and out of the vrf by unsetting and setting veth0's master.
+
+set -e
+
+cleanup() {
+ ip link del veth-hv-1 2>/dev/null || true
+ ip link del veth-tap 2>/dev/null || true
+
+ for ns in hv-1 hv-2 vm-1 vm-2; do
+ ip netns del $ns || true
+ done
+}
+
+# Clean start
+cleanup &> /dev/null
+
+[[ $1 == "clean" ]] && exit 0
+
+trap cleanup EXIT
+
+# Setup "Hypervisors" simulated with netns
+ip link add veth-hv-1 type veth peer name veth-hv-2
+setup-hv-networking() {
+ hv=$1
+
+ ip netns add hv-$hv
+ ip link set veth-hv-$hv netns hv-$hv
+ ip -netns hv-$hv link set veth-hv-$hv name veth0
+
+ ip -netns hv-$hv link add vrf-underlay type vrf table 1
+ ip -netns hv-$hv link set vrf-underlay up
+ ip -netns hv-$hv addr add 172.16.0.$hv/24 dev veth0
+ ip -netns hv-$hv link set veth0 up
+
+ ip -netns hv-$hv link add br0 type bridge
+ ip -netns hv-$hv link set br0 up
+
+ ip -netns hv-$hv link add vxlan0 type vxlan id 10 local 172.16.0.$hv dev veth0 dstport 4789
+ ip -netns hv-$hv link set vxlan0 master br0
+ ip -netns hv-$hv link set vxlan0 up
+}
+setup-hv-networking 1
+setup-hv-networking 2
+
+# Check connectivity between HVs by pinging hv-2 from hv-1
+echo -n "Checking HV connectivity "
+ip netns exec hv-1 ping -c 1 -W 1 172.16.0.2 &> /dev/null || (echo "[FAIL]"; false)
+echo "[ OK ]"
+
+# Setups a "VM" simulated by a netns an a veth pair
+setup-vm() {
+ id=$1
+
+ ip netns add vm-$id
+ ip link add veth-tap type veth peer name veth-hv
+
+ ip link set veth-tap netns hv-$id
+ ip -netns hv-$id link set veth-tap master br0
+ ip -netns hv-$id link set veth-tap up
+
+ ip link set veth-hv netns vm-$id
+ ip -netns vm-$id addr add 10.0.0.$id/24 dev veth-hv
+ ip -netns vm-$id link set veth-hv up
+}
+setup-vm 1
+setup-vm 2
+
+# Setup VTEP routes to make ARP work
+bridge -netns hv-1 fdb add 00:00:00:00:00:00 dev vxlan0 dst 172.16.0.2 self permanent
+bridge -netns hv-2 fdb add 00:00:00:00:00:00 dev vxlan0 dst 172.16.0.1 self permanent
+
+echo -n "Check VM connectivity through VXLAN (underlay in the default VRF) "
+ip netns exec vm-1 ping -c 1 -W 1 10.0.0.2 &> /dev/null || (echo "[FAIL]"; false)
+echo "[ OK ]"
+
+# Move the underlay to a non-default VRF
+ip -netns hv-1 link set veth0 vrf vrf-underlay
+ip -netns hv-1 link set veth0 down
+ip -netns hv-1 link set veth0 up
+ip -netns hv-2 link set veth0 vrf vrf-underlay
+ip -netns hv-2 link set veth0 down
+ip -netns hv-2 link set veth0 up
+
+echo -n "Check VM connectivity through VXLAN (underlay in a VRF) "
+ip netns exec vm-1 ping -c 1 -W 1 10.0.0.2 &> /dev/null || (echo "[FAIL]"; false)
+echo "[ OK ]"
diff --git a/tools/testing/selftests/net/txring_overwrite.c b/tools/testing/selftests/net/txring_overwrite.c
new file mode 100644
index 000000000000..fd8b1c663c39
--- /dev/null
+++ b/tools/testing/selftests/net/txring_overwrite.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Verify that consecutive sends over packet tx_ring are mirrored
+ * with their original content intact.
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <assert.h>
+#include <error.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/filter.h>
+#include <linux/if_packet.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <netinet/udp.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+const int eth_off = TPACKET_HDRLEN - sizeof(struct sockaddr_ll);
+const int cfg_frame_size = 1000;
+
+static void build_packet(void *buffer, size_t blen, char payload_char)
+{
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ size_t off = 0;
+
+ memset(buffer, 0, blen);
+
+ eth = buffer;
+ eth->h_proto = htons(ETH_P_IP);
+
+ off += sizeof(*eth);
+ iph = buffer + off;
+ iph->ttl = 8;
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->saddr = htonl(INADDR_LOOPBACK);
+ iph->daddr = htonl(INADDR_LOOPBACK + 1);
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(blen - off);
+ iph->check = 0;
+
+ off += sizeof(*iph);
+ udph = buffer + off;
+ udph->dest = htons(8000);
+ udph->source = htons(8001);
+ udph->len = htons(blen - off);
+ udph->check = 0;
+
+ off += sizeof(*udph);
+ memset(buffer + off, payload_char, blen - off);
+}
+
+static int setup_rx(void)
+{
+ int fdr;
+
+ fdr = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_IP));
+ if (fdr == -1)
+ error(1, errno, "socket r");
+
+ return fdr;
+}
+
+static int setup_tx(char **ring)
+{
+ struct sockaddr_ll laddr = {};
+ struct tpacket_req req = {};
+ int fdt;
+
+ fdt = socket(PF_PACKET, SOCK_RAW, 0);
+ if (fdt == -1)
+ error(1, errno, "socket t");
+
+ laddr.sll_family = AF_PACKET;
+ laddr.sll_protocol = htons(0);
+ laddr.sll_ifindex = if_nametoindex("lo");
+ if (!laddr.sll_ifindex)
+ error(1, errno, "if_nametoindex");
+
+ if (bind(fdt, (void *)&laddr, sizeof(laddr)))
+ error(1, errno, "bind fdt");
+
+ req.tp_block_size = getpagesize();
+ req.tp_block_nr = 1;
+ req.tp_frame_size = getpagesize();
+ req.tp_frame_nr = 1;
+
+ if (setsockopt(fdt, SOL_PACKET, PACKET_TX_RING,
+ (void *)&req, sizeof(req)))
+ error(1, errno, "setsockopt ring");
+
+ *ring = mmap(0, req.tp_block_size * req.tp_block_nr,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fdt, 0);
+ if (!*ring)
+ error(1, errno, "mmap");
+
+ return fdt;
+}
+
+static void send_pkt(int fdt, void *slot, char payload_char)
+{
+ struct tpacket_hdr *header = slot;
+ int ret;
+
+ while (header->tp_status != TP_STATUS_AVAILABLE)
+ usleep(1000);
+
+ build_packet(slot + eth_off, cfg_frame_size, payload_char);
+
+ header->tp_len = cfg_frame_size;
+ header->tp_status = TP_STATUS_SEND_REQUEST;
+
+ ret = sendto(fdt, NULL, 0, 0, NULL, 0);
+ if (ret == -1)
+ error(1, errno, "kick tx");
+}
+
+static int read_verify_pkt(int fdr, char payload_char)
+{
+ char buf[100];
+ int ret;
+
+ ret = read(fdr, buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ error(1, errno, "read");
+
+ if (buf[60] != payload_char) {
+ printf("wrong pattern: 0x%x != 0x%x\n", buf[60], payload_char);
+ return 1;
+ }
+
+ printf("read: %c (0x%x)\n", buf[60], buf[60]);
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ const char payload_patterns[] = "ab";
+ char *ring;
+ int fdr, fdt, ret = 0;
+
+ fdr = setup_rx();
+ fdt = setup_tx(&ring);
+
+ send_pkt(fdt, ring, payload_patterns[0]);
+ send_pkt(fdt, ring, payload_patterns[1]);
+
+ ret |= read_verify_pkt(fdr, payload_patterns[0]);
+ ret |= read_verify_pkt(fdr, payload_patterns[1]);
+
+ if (close(fdt))
+ error(1, errno, "close t");
+ if (close(fdr))
+ error(1, errno, "close r");
+
+ return ret;
+}
diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
new file mode 100755
index 000000000000..aeac53a99aeb
--- /dev/null
+++ b/tools/testing/selftests/net/udpgro.sh
@@ -0,0 +1,182 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a series of udpgro functional tests.
+
+readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+cleanup() {
+ local -r jobs="$(jobs -p)"
+ local -r ns="$(ip netns list|grep $PEER_NS)"
+
+ [ -n "${jobs}" ] && kill -1 ${jobs} 2>/dev/null
+ [ -n "$ns" ] && ip netns del $ns 2>/dev/null
+}
+trap cleanup EXIT
+
+cfg_veth() {
+ ip netns add "${PEER_NS}"
+ ip -netns "${PEER_NS}" link set lo up
+ ip link add type veth
+ ip link set dev veth0 up
+ ip addr add dev veth0 192.168.1.2/24
+ ip addr add dev veth0 2001:db8::2/64 nodad
+
+ ip link set dev veth1 netns "${PEER_NS}"
+ ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
+ ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
+ ip -netns "${PEER_NS}" link set dev veth1 up
+ ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+}
+
+run_one() {
+ # use 'rx' as separator between sender args and receiver args
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local -r rx_args=${all#*rx}
+
+ cfg_veth
+
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ echo "ok" || \
+ echo "failed" &
+
+ # Hack: let bg programs complete the startup
+ sleep 0.1
+ ./udpgso_bench_tx ${tx_args}
+ wait $(jobs -p)
+}
+
+run_test() {
+ local -r args=$@
+
+ printf " %-40s" "$1"
+ ./in_netns.sh $0 __subprocess $2 rx -G -r $3
+}
+
+run_one_nat() {
+ # use 'rx' as separator between sender args and receiver args
+ local addr1 addr2 pid family="" ipt_cmd=ip6tables
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local -r rx_args=${all#*rx}
+
+ if [[ ${tx_args} = *-4* ]]; then
+ ipt_cmd=iptables
+ family=-4
+ addr1=192.168.1.1
+ addr2=192.168.1.3/24
+ else
+ addr1=2001:db8::1
+ addr2="2001:db8::3/64 nodad"
+ fi
+
+ cfg_veth
+ ip -netns "${PEER_NS}" addr add dev veth1 ${addr2}
+
+ # fool the GRO engine changing the destination address ...
+ ip netns exec "${PEER_NS}" $ipt_cmd -t nat -I PREROUTING -d ${addr1} -j DNAT --to-destination ${addr2%/*}
+
+ # ... so that GRO will match the UDP_GRO enabled socket, but packets
+ # will land on the 'plain' one
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 &
+ pid=$!
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${family} -b ${addr2%/*} ${rx_args} && \
+ echo "ok" || \
+ echo "failed"&
+
+ sleep 0.1
+ ./udpgso_bench_tx ${tx_args}
+ kill -INT $pid
+ wait $(jobs -p)
+}
+
+run_one_2sock() {
+ # use 'rx' as separator between sender args and receiver args
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local -r rx_args=${all#*rx}
+
+ cfg_veth
+
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -p 12345 &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} && \
+ echo "ok" || \
+ echo "failed" &
+
+ # Hack: let bg programs complete the startup
+ sleep 0.1
+ ./udpgso_bench_tx ${tx_args} -p 12345
+ sleep 0.1
+ # first UDP GSO socket should be closed at this point
+ ./udpgso_bench_tx ${tx_args}
+ wait $(jobs -p)
+}
+
+run_nat_test() {
+ local -r args=$@
+
+ printf " %-40s" "$1"
+ ./in_netns.sh $0 __subprocess_nat $2 rx -r $3
+}
+
+run_2sock_test() {
+ local -r args=$@
+
+ printf " %-40s" "$1"
+ ./in_netns.sh $0 __subprocess_2sock $2 rx -G -r $3
+}
+
+run_all() {
+ local -r core_args="-l 4"
+ local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
+ local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+
+ echo "ipv4"
+ run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
+
+ # explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
+ # when GRO does not take place
+ run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
+
+ # the GSO packets are aggregated because:
+ # * veth schedule napi after each xmit
+ # * segmentation happens in BH context, veth napi poll is delayed after
+ # the transmission of the last segment
+ run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
+ run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+ run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
+ run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
+
+ run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
+ run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
+
+ echo "ipv6"
+ run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
+ run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
+ run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
+ run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
+ run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
+ run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
+
+ run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
+ run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+ echo "Missing xdp_dummy helper. Build bpf selftest first"
+ exit -1
+fi
+
+if [[ $# -eq 0 ]]; then
+ run_all
+elif [[ $1 == "__subprocess" ]]; then
+ shift
+ run_one $@
+elif [[ $1 == "__subprocess_nat" ]]; then
+ shift
+ run_one_nat $@
+elif [[ $1 == "__subprocess_2sock" ]]; then
+ shift
+ run_one_2sock $@
+fi
diff --git a/tools/testing/selftests/net/udpgro_bench.sh b/tools/testing/selftests/net/udpgro_bench.sh
new file mode 100755
index 000000000000..820bc50f6b68
--- /dev/null
+++ b/tools/testing/selftests/net/udpgro_bench.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Run a series of udpgro benchmarks
+
+readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
+
+cleanup() {
+ local -r jobs="$(jobs -p)"
+ local -r ns="$(ip netns list|grep $PEER_NS)"
+
+ [ -n "${jobs}" ] && kill -INT ${jobs} 2>/dev/null
+ [ -n "$ns" ] && ip netns del $ns 2>/dev/null
+}
+trap cleanup EXIT
+
+run_one() {
+ # use 'rx' as separator between sender args and receiver args
+ local -r all="$@"
+ local -r tx_args=${all%rx*}
+ local rx_args=${all#*rx}
+
+ [[ "${tx_args}" == *"-4"* ]] && rx_args="${rx_args} -4"
+
+ ip netns add "${PEER_NS}"
+ ip -netns "${PEER_NS}" link set lo up
+ ip link add type veth
+ ip link set dev veth0 up
+ ip addr add dev veth0 192.168.1.2/24
+ ip addr add dev veth0 2001:db8::2/64 nodad
+
+ ip link set dev veth1 netns "${PEER_NS}"
+ ip -netns "${PEER_NS}" addr add dev veth1 192.168.1.1/24
+ ip -netns "${PEER_NS}" addr add dev veth1 2001:db8::1/64 nodad
+ ip -netns "${PEER_NS}" link set dev veth1 up
+
+ ip -n "${PEER_NS}" link set veth1 xdp object ../bpf/xdp_dummy.o section xdp_dummy
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx ${rx_args} -r &
+ ip netns exec "${PEER_NS}" ./udpgso_bench_rx -t ${rx_args} -r &
+
+ # Hack: let bg programs complete the startup
+ sleep 0.1
+ ./udpgso_bench_tx ${tx_args}
+}
+
+run_in_netns() {
+ local -r args=$@
+
+ ./in_netns.sh $0 __subprocess ${args}
+}
+
+run_udp() {
+ local -r args=$@
+
+ echo "udp gso - over veth touching data"
+ run_in_netns ${args} -S 0 rx
+
+ echo "udp gso and gro - over veth touching data"
+ run_in_netns ${args} -S 0 rx -G
+}
+
+run_tcp() {
+ local -r args=$@
+
+ echo "tcp - over veth touching data"
+ run_in_netns ${args} -t rx
+}
+
+run_all() {
+ local -r core_args="-l 4"
+ local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
+ local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
+
+ echo "ipv4"
+ run_tcp "${ipv4_args}"
+ run_udp "${ipv4_args}"
+
+ echo "ipv6"
+ run_tcp "${ipv4_args}"
+ run_udp "${ipv6_args}"
+}
+
+if [ ! -f ../bpf/xdp_dummy.o ]; then
+ echo "Missing xdp_dummy helper. Build bpf selftest first"
+ exit -1
+fi
+
+if [[ $# -eq 0 ]]; then
+ run_all
+elif [[ $1 == "__subprocess" ]]; then
+ shift
+ run_one $@
+else
+ run_in_netns $@
+fi
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 99e537ab5ad9..5670a9ffd8eb 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -34,7 +34,10 @@ run_udp() {
run_in_netns ${args}
echo "udp gso"
- run_in_netns ${args} -S
+ run_in_netns ${args} -S 0
+
+ echo "udp gso zerocopy"
+ run_in_netns ${args} -S 0 -z
}
run_tcp() {
diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
index 727cf67a3f75..0c960f673324 100644
--- a/tools/testing/selftests/net/udpgso_bench_rx.c
+++ b/tools/testing/selftests/net/udpgso_bench_rx.c
@@ -31,9 +31,21 @@
#include <sys/wait.h>
#include <unistd.h>
+#ifndef UDP_GRO
+#define UDP_GRO 104
+#endif
+
static int cfg_port = 8000;
static bool cfg_tcp;
static bool cfg_verify;
+static bool cfg_read_all;
+static bool cfg_gro_segment;
+static int cfg_family = PF_INET6;
+static int cfg_alen = sizeof(struct sockaddr_in6);
+static int cfg_expected_pkt_nr;
+static int cfg_expected_pkt_len;
+static int cfg_expected_gso_size;
+static struct sockaddr_storage cfg_bind_addr;
static bool interrupted;
static unsigned long packets, bytes;
@@ -44,6 +56,29 @@ static void sigint_handler(int signum)
interrupted = true;
}
+static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr)
+{
+ struct sockaddr_in6 *addr6 = (void *) sockaddr;
+ struct sockaddr_in *addr4 = (void *) sockaddr;
+
+ switch (domain) {
+ case PF_INET:
+ addr4->sin_family = AF_INET;
+ addr4->sin_port = htons(cfg_port);
+ if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1)
+ error(1, 0, "ipv4 parse error: %s", str_addr);
+ break;
+ case PF_INET6:
+ addr6->sin6_family = AF_INET6;
+ addr6->sin6_port = htons(cfg_port);
+ if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1)
+ error(1, 0, "ipv6 parse error: %s", str_addr);
+ break;
+ default:
+ error(1, 0, "illegal domain");
+ }
+}
+
static unsigned long gettimeofday_ms(void)
{
struct timeval tv;
@@ -63,6 +98,8 @@ static void do_poll(int fd)
do {
ret = poll(&pfd, 1, 10);
+ if (interrupted)
+ break;
if (ret == -1)
error(1, errno, "poll");
if (ret == 0)
@@ -70,15 +107,14 @@ static void do_poll(int fd)
if (pfd.revents != POLLIN)
error(1, errno, "poll: 0x%x expected 0x%x\n",
pfd.revents, POLLIN);
- } while (!ret && !interrupted);
+ } while (!ret);
}
static int do_socket(bool do_tcp)
{
- struct sockaddr_in6 addr = {0};
int fd, val;
- fd = socket(PF_INET6, cfg_tcp ? SOCK_STREAM : SOCK_DGRAM, 0);
+ fd = socket(cfg_family, cfg_tcp ? SOCK_STREAM : SOCK_DGRAM, 0);
if (fd == -1)
error(1, errno, "socket");
@@ -89,10 +125,7 @@ static int do_socket(bool do_tcp)
if (setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val)))
error(1, errno, "setsockopt reuseport");
- addr.sin6_family = PF_INET6;
- addr.sin6_port = htons(cfg_port);
- addr.sin6_addr = in6addr_any;
- if (bind(fd, (void *) &addr, sizeof(addr)))
+ if (bind(fd, (void *)&cfg_bind_addr, cfg_alen))
error(1, errno, "bind");
if (do_tcp) {
@@ -102,6 +135,8 @@ static int do_socket(bool do_tcp)
error(1, errno, "listen");
do_poll(accept_fd);
+ if (interrupted)
+ exit(0);
fd = accept(accept_fd, NULL, NULL);
if (fd == -1)
@@ -164,51 +199,123 @@ static void do_verify_udp(const char *data, int len)
}
}
+static int recv_msg(int fd, char *buf, int len, int *gso_size)
+{
+ char control[CMSG_SPACE(sizeof(uint16_t))] = {0};
+ struct msghdr msg = {0};
+ struct iovec iov = {0};
+ struct cmsghdr *cmsg;
+ uint16_t *gsosizeptr;
+ int ret;
+
+ iov.iov_base = buf;
+ iov.iov_len = len;
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ msg.msg_control = control;
+ msg.msg_controllen = sizeof(control);
+
+ *gso_size = -1;
+ ret = recvmsg(fd, &msg, MSG_TRUNC | MSG_DONTWAIT);
+ if (ret != -1) {
+ for (cmsg = CMSG_FIRSTHDR(&msg); cmsg != NULL;
+ cmsg = CMSG_NXTHDR(&msg, cmsg)) {
+ if (cmsg->cmsg_level == SOL_UDP
+ && cmsg->cmsg_type == UDP_GRO) {
+ gsosizeptr = (uint16_t *) CMSG_DATA(cmsg);
+ *gso_size = *gsosizeptr;
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
/* Flush all outstanding datagrams. Verify first few bytes of each. */
static void do_flush_udp(int fd)
{
- static char rbuf[ETH_DATA_LEN];
- int ret, len, budget = 256;
+ static char rbuf[ETH_MAX_MTU];
+ int ret, len, gso_size, budget = 256;
- len = cfg_verify ? sizeof(rbuf) : 0;
+ len = cfg_read_all ? sizeof(rbuf) : 0;
while (budget--) {
/* MSG_TRUNC will make return value full datagram length */
- ret = recv(fd, rbuf, len, MSG_TRUNC | MSG_DONTWAIT);
+ if (!cfg_expected_gso_size)
+ ret = recv(fd, rbuf, len, MSG_TRUNC | MSG_DONTWAIT);
+ else
+ ret = recv_msg(fd, rbuf, len, &gso_size);
if (ret == -1 && errno == EAGAIN)
- return;
+ break;
if (ret == -1)
error(1, errno, "recv");
- if (len) {
+ if (cfg_expected_pkt_len && ret != cfg_expected_pkt_len)
+ error(1, 0, "recv: bad packet len, got %d,"
+ " expected %d\n", ret, cfg_expected_pkt_len);
+ if (len && cfg_verify) {
if (ret == 0)
error(1, errno, "recv: 0 byte datagram\n");
do_verify_udp(rbuf, ret);
}
+ if (cfg_expected_gso_size && cfg_expected_gso_size != gso_size)
+ error(1, 0, "recv: bad gso size, got %d, expected %d "
+ "(-1 == no gso cmsg))\n", gso_size,
+ cfg_expected_gso_size);
packets++;
bytes += ret;
+ if (cfg_expected_pkt_nr && packets >= cfg_expected_pkt_nr)
+ break;
}
}
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-tv] [-p port]", filepath);
+ error(1, 0, "Usage: %s [-Grtv] [-b addr] [-p port] [-l pktlen] [-n packetnr] [-S gsosize]", filepath);
}
static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "ptv")) != -1) {
+ /* bind to any by default */
+ setup_sockaddr(PF_INET6, "::", &cfg_bind_addr);
+ while ((c = getopt(argc, argv, "4b:Gl:n:p:rS:tv")) != -1) {
switch (c) {
+ case '4':
+ cfg_family = PF_INET;
+ cfg_alen = sizeof(struct sockaddr_in);
+ setup_sockaddr(PF_INET, "0.0.0.0", &cfg_bind_addr);
+ break;
+ case 'b':
+ setup_sockaddr(cfg_family, optarg, &cfg_bind_addr);
+ break;
+ case 'G':
+ cfg_gro_segment = true;
+ break;
+ case 'l':
+ cfg_expected_pkt_len = strtoul(optarg, NULL, 0);
+ break;
+ case 'n':
+ cfg_expected_pkt_nr = strtoul(optarg, NULL, 0);
+ break;
case 'p':
- cfg_port = htons(strtoul(optarg, NULL, 0));
+ cfg_port = strtoul(optarg, NULL, 0);
+ break;
+ case 'r':
+ cfg_read_all = true;
+ break;
+ case 'S':
+ cfg_expected_gso_size = strtol(optarg, NULL, 0);
break;
case 't':
cfg_tcp = true;
break;
case 'v':
cfg_verify = true;
+ cfg_read_all = true;
break;
}
}
@@ -223,12 +330,23 @@ static void parse_opts(int argc, char **argv)
static void do_recv(void)
{
unsigned long tnow, treport;
- int fd;
+ int fd, loop = 0;
fd = do_socket(cfg_tcp);
+ if (cfg_gro_segment && !cfg_tcp) {
+ int val = 1;
+ if (setsockopt(fd, IPPROTO_UDP, UDP_GRO, &val, sizeof(val)))
+ error(1, errno, "setsockopt UDP_GRO");
+ }
+
treport = gettimeofday_ms() + 1000;
do {
+ /* force termination after the second poll(); this cope both
+ * with sender slower than receiver and missing packet errors
+ */
+ if (cfg_expected_pkt_nr && loop++)
+ interrupted = true;
do_poll(fd);
if (cfg_tcp)
@@ -249,6 +367,10 @@ static void do_recv(void)
} while (!interrupted);
+ if (cfg_expected_pkt_nr && (packets != cfg_expected_pkt_nr))
+ error(1, 0, "wrong packet number! got %ld, expected %d\n",
+ packets, cfg_expected_pkt_nr);
+
if (close(fd))
error(1, errno, "close");
}
diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c
index e821564053cf..4074538b5df5 100644
--- a/tools/testing/selftests/net/udpgso_bench_tx.c
+++ b/tools/testing/selftests/net/udpgso_bench_tx.c
@@ -52,6 +52,8 @@ static bool cfg_segment;
static bool cfg_sendmmsg;
static bool cfg_tcp;
static bool cfg_zerocopy;
+static int cfg_msg_nr;
+static uint16_t cfg_gso_size;
static socklen_t cfg_alen;
static struct sockaddr_storage cfg_dst_addr;
@@ -205,14 +207,14 @@ static void send_udp_segment_cmsg(struct cmsghdr *cm)
cm->cmsg_level = SOL_UDP;
cm->cmsg_type = UDP_SEGMENT;
- cm->cmsg_len = CMSG_LEN(sizeof(cfg_mss));
+ cm->cmsg_len = CMSG_LEN(sizeof(cfg_gso_size));
valp = (void *)CMSG_DATA(cm);
- *valp = cfg_mss;
+ *valp = cfg_gso_size;
}
static int send_udp_segment(int fd, char *data)
{
- char control[CMSG_SPACE(sizeof(cfg_mss))] = {0};
+ char control[CMSG_SPACE(sizeof(cfg_gso_size))] = {0};
struct msghdr msg = {0};
struct iovec iov = {0};
int ret;
@@ -241,7 +243,7 @@ static int send_udp_segment(int fd, char *data)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-46cmStuz] [-C cpu] [-D dst ip] [-l secs] [-p port] [-s sendsize]",
+ error(1, 0, "Usage: %s [-46cmtuz] [-C cpu] [-D dst ip] [-l secs] [-m messagenr] [-p port] [-s sendsize] [-S gsosize]",
filepath);
}
@@ -250,7 +252,7 @@ static void parse_opts(int argc, char **argv)
int max_len, hdrlen;
int c;
- while ((c = getopt(argc, argv, "46cC:D:l:mp:s:Stuz")) != -1) {
+ while ((c = getopt(argc, argv, "46cC:D:l:mM:p:s:S:tuz")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
@@ -279,6 +281,9 @@ static void parse_opts(int argc, char **argv)
case 'm':
cfg_sendmmsg = true;
break;
+ case 'M':
+ cfg_msg_nr = strtoul(optarg, NULL, 10);
+ break;
case 'p':
cfg_port = strtoul(optarg, NULL, 0);
break;
@@ -286,6 +291,7 @@ static void parse_opts(int argc, char **argv)
cfg_payload_len = strtoul(optarg, NULL, 0);
break;
case 'S':
+ cfg_gso_size = strtoul(optarg, NULL, 0);
cfg_segment = true;
break;
case 't':
@@ -317,6 +323,8 @@ static void parse_opts(int argc, char **argv)
cfg_mss = ETH_DATA_LEN - hdrlen;
max_len = ETH_MAX_MTU - hdrlen;
+ if (!cfg_gso_size)
+ cfg_gso_size = cfg_mss;
if (cfg_payload_len > max_len)
error(1, 0, "payload length %u exceeds max %u",
@@ -392,10 +400,12 @@ int main(int argc, char **argv)
else
num_sends += send_udp(fd, buf[i]);
num_msgs++;
-
if (cfg_zerocopy && ((num_msgs & 0xF) == 0))
flush_zerocopy(fd);
+ if (cfg_msg_nr && num_msgs >= cfg_msg_nr)
+ break;
+
tnow = gettimeofday_ms();
if (tnow > treport) {
fprintf(stderr,
diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
new file mode 100755
index 000000000000..8db35b99457c
--- /dev/null
+++ b/tools/testing/selftests/net/xfrm_policy.sh
@@ -0,0 +1,302 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Check xfrm policy resolution. Topology:
+#
+# 1.2 1.1 3.1 3.10 2.1 2.2
+# eth1 eth1 veth0 veth0 eth1 eth1
+# ns1 ---- ns3 ----- ns4 ---- ns2
+#
+# ns3 and ns4 are connected via ipsec tunnel.
+# pings from ns1 to ns2 (and vice versa) are supposed to work like this:
+# ns1: ping 10.0.2.2: passes via ipsec tunnel.
+# ns2: ping 10.0.1.2: passes via ipsec tunnel.
+
+# ns1: ping 10.0.1.253: passes via ipsec tunnel (direct policy)
+# ns2: ping 10.0.2.253: passes via ipsec tunnel (direct policy)
+#
+# ns1: ping 10.0.2.254: does NOT pass via ipsec tunnel (exception)
+# ns2: ping 10.0.1.254: does NOT pass via ipsec tunnel (exception)
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+policy_checks_ok=1
+
+KEY_SHA=0xdeadbeef1234567890abcdefabcdefabcdefabcd
+KEY_AES=0x0123456789abcdef0123456789012345
+SPI1=0x1
+SPI2=0x2
+
+do_esp() {
+ local ns=$1
+ local me=$2
+ local remote=$3
+ local lnet=$4
+ local rnet=$5
+ local spi_out=$6
+ local spi_in=$7
+
+ ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet
+ ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet
+
+ # to encrypt packets as they go out (includes forwarded packets that need encapsulation)
+ ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow
+ # to fwd decrypted packets after esp processing:
+ ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow
+}
+
+do_esp_policy_get_check() {
+ local ns=$1
+ local lnet=$2
+ local rnet=$3
+
+ ip -net $ns xfrm policy get src $lnet dst $rnet dir out > /dev/null
+ if [ $? -ne 0 ] && [ $policy_checks_ok -eq 1 ] ;then
+ policy_checks_ok=0
+ echo "FAIL: ip -net $ns xfrm policy get src $lnet dst $rnet dir out"
+ ret=1
+ fi
+
+ ip -net $ns xfrm policy get src $rnet dst $lnet dir fwd > /dev/null
+ if [ $? -ne 0 ] && [ $policy_checks_ok -eq 1 ] ;then
+ policy_checks_ok=0
+ echo "FAIL: ip -net $ns xfrm policy get src $rnet dst $lnet dir fwd"
+ ret=1
+ fi
+}
+
+do_exception() {
+ local ns=$1
+ local me=$2
+ local remote=$3
+ local encryptip=$4
+ local plain=$5
+
+ # network $plain passes without tunnel
+ ip -net $ns xfrm policy add dst $plain dir out priority 10 action allow
+
+ # direct policy for $encryptip, use tunnel, higher prio takes precedence
+ ip -net $ns xfrm policy add dst $encryptip dir out tmpl src $me dst $remote proto esp mode tunnel priority 1 action allow
+}
+
+# policies that are not supposed to match any packets generated in this test.
+do_dummies4() {
+ local ns=$1
+
+ for i in $(seq 10 16);do
+ # dummy policy with wildcard src/dst.
+ echo netns exec $ns ip xfrm policy add src 0.0.0.0/0 dst 10.$i.99.0/30 dir out action block
+ echo netns exec $ns ip xfrm policy add src 10.$i.99.0/30 dst 0.0.0.0/0 dir out action block
+ for j in $(seq 32 64);do
+ echo netns exec $ns ip xfrm policy add src 10.$i.1.0/30 dst 10.$i.$j.0/30 dir out action block
+ # silly, as it encompasses the one above too, but its allowed:
+ echo netns exec $ns ip xfrm policy add src 10.$i.1.0/29 dst 10.$i.$j.0/29 dir out action block
+ # and yet again, even more broad one.
+ echo netns exec $ns ip xfrm policy add src 10.$i.1.0/24 dst 10.$i.$j.0/24 dir out action block
+ echo netns exec $ns ip xfrm policy add src 10.$i.$j.0/24 dst 10.$i.1.0/24 dir fwd action block
+ done
+ done | ip -batch /dev/stdin
+}
+
+do_dummies6() {
+ local ns=$1
+
+ for i in $(seq 10 16);do
+ for j in $(seq 32 64);do
+ echo netns exec $ns ip xfrm policy add src dead:$i::/64 dst dead:$i:$j::/64 dir out action block
+ echo netns exec $ns ip xfrm policy add src dead:$i:$j::/64 dst dead:$i::/24 dir fwd action block
+ done
+ done | ip -batch /dev/stdin
+}
+
+check_ipt_policy_count()
+{
+ ns=$1
+
+ ip netns exec $ns iptables-save -c |grep policy | ( read c rest
+ ip netns exec $ns iptables -Z
+ if [ x"$c" = x'[0:0]' ]; then
+ exit 0
+ elif [ x"$c" = x ]; then
+ echo "ERROR: No counters"
+ ret=1
+ exit 111
+ else
+ exit 1
+ fi
+ )
+}
+
+check_xfrm() {
+ # 0: iptables -m policy rule count == 0
+ # 1: iptables -m policy rule count != 0
+ rval=$1
+ ip=$2
+ lret=0
+
+ ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null
+
+ check_ipt_policy_count ns3
+ if [ $? -ne $rval ] ; then
+ lret=1
+ fi
+ check_ipt_policy_count ns4
+ if [ $? -ne $rval ] ; then
+ lret=1
+ fi
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.$ip > /dev/null
+
+ check_ipt_policy_count ns3
+ if [ $? -ne $rval ] ; then
+ lret=1
+ fi
+ check_ipt_policy_count ns4
+ if [ $? -ne $rval ] ; then
+ lret=1
+ fi
+
+ return $lret
+}
+
+#check for needed privileges
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip
+fi
+
+ip -Version 2>/dev/null >/dev/null
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without the ip tool"
+ exit $ksft_skip
+fi
+
+# needed to check if policy lookup got valid ipsec result
+iptables --version 2>/dev/null >/dev/null
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without iptables tool"
+ exit $ksft_skip
+fi
+
+for i in 1 2 3 4; do
+ ip netns add ns$i
+ ip -net ns$i link set lo up
+done
+
+DEV=veth0
+ip link add $DEV netns ns1 type veth peer name eth1 netns ns3
+ip link add $DEV netns ns2 type veth peer name eth1 netns ns4
+
+ip link add $DEV netns ns3 type veth peer name veth0 netns ns4
+
+DEV=veth0
+for i in 1 2; do
+ ip -net ns$i link set $DEV up
+ ip -net ns$i addr add 10.0.$i.2/24 dev $DEV
+ ip -net ns$i addr add dead:$i::2/64 dev $DEV
+
+ ip -net ns$i addr add 10.0.$i.253 dev $DEV
+ ip -net ns$i addr add 10.0.$i.254 dev $DEV
+ ip -net ns$i addr add dead:$i::fd dev $DEV
+ ip -net ns$i addr add dead:$i::fe dev $DEV
+done
+
+for i in 3 4; do
+ip -net ns$i link set eth1 up
+ip -net ns$i link set veth0 up
+done
+
+ip -net ns1 route add default via 10.0.1.1
+ip -net ns2 route add default via 10.0.2.1
+
+ip -net ns3 addr add 10.0.1.1/24 dev eth1
+ip -net ns3 addr add 10.0.3.1/24 dev veth0
+ip -net ns3 addr add 2001:1::1/64 dev eth1
+ip -net ns3 addr add 2001:3::1/64 dev veth0
+
+ip -net ns3 route add default via 10.0.3.10
+
+ip -net ns4 addr add 10.0.2.1/24 dev eth1
+ip -net ns4 addr add 10.0.3.10/24 dev veth0
+ip -net ns4 addr add 2001:2::1/64 dev eth1
+ip -net ns4 addr add 2001:3::10/64 dev veth0
+ip -net ns4 route add default via 10.0.3.1
+
+for j in 4 6; do
+ for i in 3 4;do
+ ip netns exec ns$i sysctl net.ipv$j.conf.eth1.forwarding=1 > /dev/null
+ ip netns exec ns$i sysctl net.ipv$j.conf.veth0.forwarding=1 > /dev/null
+ done
+done
+
+# abuse iptables rule counter to check if ping matches a policy
+ip netns exec ns3 iptables -p icmp -A FORWARD -m policy --dir out --pol ipsec
+ip netns exec ns4 iptables -p icmp -A FORWARD -m policy --dir out --pol ipsec
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not insert iptables rule"
+ for i in 1 2 3 4;do ip netns del ns$i;done
+ exit $ksft_skip
+fi
+
+# localip remoteip localnet remotenet
+do_esp ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24 $SPI1 $SPI2
+do_esp ns3 dead:3::1 dead:3::10 dead:1::/64 dead:2::/64 $SPI1 $SPI2
+do_esp ns4 10.0.3.10 10.0.3.1 10.0.2.0/24 10.0.1.0/24 $SPI2 $SPI1
+do_esp ns4 dead:3::10 dead:3::1 dead:2::/64 dead:1::/64 $SPI2 $SPI1
+
+do_dummies4 ns3
+do_dummies6 ns4
+
+do_esp_policy_get_check ns3 10.0.1.0/24 10.0.2.0/24
+do_esp_policy_get_check ns4 10.0.2.0/24 10.0.1.0/24
+do_esp_policy_get_check ns3 dead:1::/64 dead:2::/64
+do_esp_policy_get_check ns4 dead:2::/64 dead:1::/64
+
+# ping to .254 should use ipsec, exception is not installed.
+check_xfrm 1 254
+if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .254 to use ipsec tunnel"
+ ret=1
+else
+ echo "PASS: policy before exception matches"
+fi
+
+# installs exceptions
+# localip remoteip encryptdst plaindst
+do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28
+do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28
+
+do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96
+do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96
+
+# ping to .254 should now be excluded from the tunnel
+check_xfrm 0 254
+if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .254 to fail"
+ ret=1
+else
+ echo "PASS: ping to .254 bypassed ipsec tunnel"
+fi
+
+# ping to .253 should use use ipsec due to direct policy exception.
+check_xfrm 1 253
+if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .253 to use ipsec tunnel"
+ ret=1
+else
+ echo "PASS: direct policy matches"
+fi
+
+# ping to .2 should use ipsec.
+check_xfrm 1 2
+if [ $? -ne 0 ]; then
+ echo "FAIL: expected ping to .2 to use ipsec tunnel"
+ ret=1
+else
+ echo "PASS: policy matches"
+fi
+
+for i in 1 2 3 4;do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
new file mode 100644
index 000000000000..47ed6cef93fb
--- /dev/null
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for netfilter selftests
+
+TEST_PROGS := nft_trans_stress.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
new file mode 100644
index 000000000000..1017313e41a8
--- /dev/null
+++ b/tools/testing/selftests/netfilter/config
@@ -0,0 +1,2 @@
+CONFIG_NET_NS=y
+NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
new file mode 100755
index 000000000000..f1affd12c4b1
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_trans_stress.sh
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# This test is for stress-testing the nf_tables config plane path vs.
+# packet path processing: Make sure we never release rules that are
+# still visible to other cpus.
+#
+# set -e
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+testns=testns1
+tables="foo bar baz quux"
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+tmp=$(mktemp)
+
+for table in $tables; do
+ echo add table inet "$table" >> "$tmp"
+ echo flush table inet "$table" >> "$tmp"
+
+ echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
+ echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
+ for c in $(seq 1 400); do
+ chain=$(printf "chain%03u" "$c")
+ echo "add chain inet $table $chain" >> "$tmp"
+ done
+
+ for c in $(seq 1 400); do
+ chain=$(printf "chain%03u" "$c")
+ for BASE in INPUT OUTPUT; do
+ echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
+ done
+ echo "add rule inet $table $chain counter return" >> "$tmp"
+ done
+done
+
+ip netns add "$testns"
+ip -netns "$testns" link set lo up
+
+lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
+cpunum=$((cpunum-1))
+for i in $(seq 0 $cpunum);do
+ mask=$(printf 0x%x $((1<<$i)))
+ ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
+ ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
+done)
+
+sleep 1
+
+for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
+
+for table in $tables;do
+ randsleep=$((RANDOM%10))
+ sleep $randsleep
+ ip netns exec "$testns" nft delete table inet $table 2>/dev/null
+done
+
+randsleep=$((RANDOM%10))
+sleep $randsleep
+
+pkill -9 ping
+
+wait
+
+rm -f "$tmp"
+ip netns del "$testns"
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index 14cfcf006936..9050eeea5f5f 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -1,12 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -I../../../../../usr/include
-TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
+TEST_GEN_FILES := hwtstamp_config rxtimestamp timestamping txtimestamp
+TEST_PROGS := txtimestamp.sh
all: $(TEST_PROGS)
top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
include ../../lib.mk
clean:
- rm -fr $(TEST_PROGS)
+ rm -fr $(TEST_GEN_FILES)
diff --git a/tools/testing/selftests/networking/timestamping/config b/tools/testing/selftests/networking/timestamping/config
new file mode 100644
index 000000000000..a13e3169b0a4
--- /dev/null
+++ b/tools/testing/selftests/networking/timestamping/config
@@ -0,0 +1,2 @@
+CONFIG_IFB=y
+CONFIG_NET_SCH_NETEM=y
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 81a98a240456..2e563d17cf0c 100644
--- a/tools/testing/selftests/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
@@ -39,6 +39,7 @@
#include <inttypes.h>
#include <linux/errqueue.h>
#include <linux/if_ether.h>
+#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <netdb.h>
#include <net/if.h>
@@ -69,15 +70,67 @@ static int do_ipv4 = 1;
static int do_ipv6 = 1;
static int cfg_payload_len = 10;
static int cfg_poll_timeout = 100;
+static int cfg_delay_snd;
+static int cfg_delay_ack;
static bool cfg_show_payload;
static bool cfg_do_pktinfo;
static bool cfg_loop_nodata;
static bool cfg_no_delay;
+static bool cfg_use_cmsg;
+static bool cfg_use_pf_packet;
+static bool cfg_do_listen;
static uint16_t dest_port = 9000;
static struct sockaddr_in daddr;
static struct sockaddr_in6 daddr6;
-static struct timespec ts_prev;
+static struct timespec ts_usr;
+
+static int saved_tskey = -1;
+static int saved_tskey_type = -1;
+
+static bool test_failed;
+
+static int64_t timespec_to_us64(struct timespec *ts)
+{
+ return ts->tv_sec * 1000 * 1000 + ts->tv_nsec / 1000;
+}
+
+static void validate_key(int tskey, int tstype)
+{
+ int stepsize;
+
+ /* compare key for each subsequent request
+ * must only test for one type, the first one requested
+ */
+ if (saved_tskey == -1)
+ saved_tskey_type = tstype;
+ else if (saved_tskey_type != tstype)
+ return;
+
+ stepsize = cfg_proto == SOCK_STREAM ? cfg_payload_len : 1;
+ if (tskey != saved_tskey + stepsize) {
+ fprintf(stderr, "ERROR: key %d, expected %d\n",
+ tskey, saved_tskey + stepsize);
+ test_failed = true;
+ }
+
+ saved_tskey = tskey;
+}
+
+static void validate_timestamp(struct timespec *cur, int min_delay)
+{
+ int max_delay = min_delay + 500 /* processing time upper bound */;
+ int64_t cur64, start64;
+
+ cur64 = timespec_to_us64(cur);
+ start64 = timespec_to_us64(&ts_usr);
+
+ if (cur64 < start64 + min_delay || cur64 > start64 + max_delay) {
+ fprintf(stderr, "ERROR: delay %lu expected between %d and %d\n",
+ cur64 - start64, min_delay, max_delay);
+ test_failed = true;
+ }
+}
static void __print_timestamp(const char *name, struct timespec *cur,
uint32_t key, int payload_len)
@@ -89,32 +142,19 @@ static void __print_timestamp(const char *name, struct timespec *cur,
name, cur->tv_sec, cur->tv_nsec / 1000,
key, payload_len);
- if ((ts_prev.tv_sec | ts_prev.tv_nsec)) {
- int64_t cur_ms, prev_ms;
-
- cur_ms = (long) cur->tv_sec * 1000 * 1000;
- cur_ms += cur->tv_nsec / 1000;
-
- prev_ms = (long) ts_prev.tv_sec * 1000 * 1000;
- prev_ms += ts_prev.tv_nsec / 1000;
-
- fprintf(stderr, " (%+" PRId64 " us)", cur_ms - prev_ms);
- }
+ if (cur != &ts_usr)
+ fprintf(stderr, " (USR %+" PRId64 " us)",
+ timespec_to_us64(cur) - timespec_to_us64(&ts_usr));
- ts_prev = *cur;
fprintf(stderr, "\n");
}
static void print_timestamp_usr(void)
{
- struct timespec ts;
- struct timeval tv; /* avoid dependency on -lrt */
-
- gettimeofday(&tv, NULL);
- ts.tv_sec = tv.tv_sec;
- ts.tv_nsec = tv.tv_usec * 1000;
+ if (clock_gettime(CLOCK_REALTIME, &ts_usr))
+ error(1, errno, "clock_gettime");
- __print_timestamp(" USR", &ts, 0, 0);
+ __print_timestamp(" USR", &ts_usr, 0, 0);
}
static void print_timestamp(struct scm_timestamping *tss, int tstype,
@@ -122,15 +162,20 @@ static void print_timestamp(struct scm_timestamping *tss, int tstype,
{
const char *tsname;
+ validate_key(tskey, tstype);
+
switch (tstype) {
case SCM_TSTAMP_SCHED:
tsname = " ENQ";
+ validate_timestamp(&tss->ts[0], 0);
break;
case SCM_TSTAMP_SND:
tsname = " SND";
+ validate_timestamp(&tss->ts[0], cfg_delay_snd);
break;
case SCM_TSTAMP_ACK:
tsname = " ACK";
+ validate_timestamp(&tss->ts[0], cfg_delay_ack);
break;
default:
error(1, 0, "unknown timestamp type: %u",
@@ -194,7 +239,9 @@ static void __recv_errmsg_cmsg(struct msghdr *msg, int payload_len)
} else if ((cm->cmsg_level == SOL_IP &&
cm->cmsg_type == IP_RECVERR) ||
(cm->cmsg_level == SOL_IPV6 &&
- cm->cmsg_type == IPV6_RECVERR)) {
+ cm->cmsg_type == IPV6_RECVERR) ||
+ (cm->cmsg_level = SOL_PACKET &&
+ cm->cmsg_type == PACKET_TX_TIMESTAMP)) {
serr = (void *) CMSG_DATA(cm);
if (serr->ee_errno != ENOMSG ||
serr->ee_origin != SO_EE_ORIGIN_TIMESTAMPING) {
@@ -269,32 +316,124 @@ static int recv_errmsg(int fd)
return ret == -1;
}
-static void do_test(int family, unsigned int opt)
+static uint16_t get_ip_csum(const uint16_t *start, int num_words,
+ unsigned long sum)
+{
+ int i;
+
+ for (i = 0; i < num_words; i++)
+ sum += start[i];
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ return ~sum;
+}
+
+static uint16_t get_udp_csum(const struct udphdr *udph, int alen)
+{
+ unsigned long pseudo_sum, csum_len;
+ const void *csum_start = udph;
+
+ pseudo_sum = htons(IPPROTO_UDP);
+ pseudo_sum += udph->len;
+
+ /* checksum ip(v6) addresses + udp header + payload */
+ csum_start -= alen * 2;
+ csum_len = ntohs(udph->len) + alen * 2;
+
+ return get_ip_csum(csum_start, csum_len >> 1, pseudo_sum);
+}
+
+static int fill_header_ipv4(void *p)
+{
+ struct iphdr *iph = p;
+
+ memset(iph, 0, sizeof(*iph));
+
+ iph->ihl = 5;
+ iph->version = 4;
+ iph->ttl = 2;
+ iph->saddr = daddr.sin_addr.s_addr; /* set for udp csum calc */
+ iph->daddr = daddr.sin_addr.s_addr;
+ iph->protocol = IPPROTO_UDP;
+
+ /* kernel writes saddr, csum, len */
+
+ return sizeof(*iph);
+}
+
+static int fill_header_ipv6(void *p)
+{
+ struct ipv6hdr *ip6h = p;
+
+ memset(ip6h, 0, sizeof(*ip6h));
+
+ ip6h->version = 6;
+ ip6h->payload_len = htons(sizeof(struct udphdr) + cfg_payload_len);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = 64;
+
+ ip6h->saddr = daddr6.sin6_addr;
+ ip6h->daddr = daddr6.sin6_addr;
+
+ /* kernel does not write saddr in case of ipv6 */
+
+ return sizeof(*ip6h);
+}
+
+static void fill_header_udp(void *p, bool is_ipv4)
{
+ struct udphdr *udph = p;
+
+ udph->source = ntohs(dest_port + 1); /* spoof */
+ udph->dest = ntohs(dest_port);
+ udph->len = ntohs(sizeof(*udph) + cfg_payload_len);
+ udph->check = 0;
+
+ udph->check = get_udp_csum(udph, is_ipv4 ? sizeof(struct in_addr) :
+ sizeof(struct in6_addr));
+}
+
+static void do_test(int family, unsigned int report_opt)
+{
+ char control[CMSG_SPACE(sizeof(uint32_t))];
+ struct sockaddr_ll laddr;
+ unsigned int sock_opt;
+ struct cmsghdr *cmsg;
+ struct msghdr msg;
+ struct iovec iov;
char *buf;
int fd, i, val = 1, total_len;
- if (family == AF_INET6 && cfg_proto != SOCK_STREAM) {
- /* due to lack of checksum generation code */
- fprintf(stderr, "test: skipping datagram over IPv6\n");
- return;
- }
-
total_len = cfg_payload_len;
- if (cfg_proto == SOCK_RAW) {
+ if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
total_len += sizeof(struct udphdr);
- if (cfg_ipproto == IPPROTO_RAW)
- total_len += sizeof(struct iphdr);
+ if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW)
+ if (family == PF_INET)
+ total_len += sizeof(struct iphdr);
+ else
+ total_len += sizeof(struct ipv6hdr);
+
+ /* special case, only rawv6_sendmsg:
+ * pass proto in sin6_port if not connected
+ * also see ANK comment in net/ipv4/raw.c
+ */
+ daddr6.sin6_port = htons(cfg_ipproto);
}
buf = malloc(total_len);
if (!buf)
error(1, 0, "malloc");
- fd = socket(family, cfg_proto, cfg_ipproto);
+ fd = socket(cfg_use_pf_packet ? PF_PACKET : family,
+ cfg_proto, cfg_ipproto);
if (fd < 0)
error(1, errno, "socket");
+ /* reset expected key on each new socket */
+ saved_tskey = -1;
+
if (cfg_proto == SOCK_STREAM) {
if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY,
(char*) &val, sizeof(val)))
@@ -321,54 +460,80 @@ static void do_test(int family, unsigned int opt)
}
}
- opt |= SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_OPT_CMSG |
- SOF_TIMESTAMPING_OPT_ID;
+ sock_opt = SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_OPT_CMSG |
+ SOF_TIMESTAMPING_OPT_ID;
+
+ if (!cfg_use_cmsg)
+ sock_opt |= report_opt;
+
if (cfg_loop_nodata)
- opt |= SOF_TIMESTAMPING_OPT_TSONLY;
+ sock_opt |= SOF_TIMESTAMPING_OPT_TSONLY;
if (setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING,
- (char *) &opt, sizeof(opt)))
+ (char *) &sock_opt, sizeof(sock_opt)))
error(1, 0, "setsockopt timestamping");
for (i = 0; i < cfg_num_pkts; i++) {
- memset(&ts_prev, 0, sizeof(ts_prev));
+ memset(&msg, 0, sizeof(msg));
memset(buf, 'a' + i, total_len);
- if (cfg_proto == SOCK_RAW) {
- struct udphdr *udph;
+ if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
int off = 0;
- if (cfg_ipproto == IPPROTO_RAW) {
- struct iphdr *iph = (void *) buf;
-
- memset(iph, 0, sizeof(*iph));
- iph->ihl = 5;
- iph->version = 4;
- iph->ttl = 2;
- iph->daddr = daddr.sin_addr.s_addr;
- iph->protocol = IPPROTO_UDP;
- /* kernel writes saddr, csum, len */
-
- off = sizeof(*iph);
+ if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) {
+ if (family == PF_INET)
+ off = fill_header_ipv4(buf);
+ else
+ off = fill_header_ipv6(buf);
}
- udph = (void *) buf + off;
- udph->source = ntohs(9000); /* random spoof */
- udph->dest = ntohs(dest_port);
- udph->len = ntohs(sizeof(*udph) + cfg_payload_len);
- udph->check = 0; /* not allowed for IPv6 */
+ fill_header_udp(buf + off, family == PF_INET);
}
print_timestamp_usr();
+
+ iov.iov_base = buf;
+ iov.iov_len = total_len;
+
if (cfg_proto != SOCK_STREAM) {
- if (family == PF_INET)
- val = sendto(fd, buf, total_len, 0, (void *) &daddr, sizeof(daddr));
- else
- val = sendto(fd, buf, total_len, 0, (void *) &daddr6, sizeof(daddr6));
- } else {
- val = send(fd, buf, cfg_payload_len, 0);
+ if (cfg_use_pf_packet) {
+ memset(&laddr, 0, sizeof(laddr));
+
+ laddr.sll_family = AF_PACKET;
+ laddr.sll_ifindex = 1;
+ laddr.sll_protocol = htons(family == AF_INET ? ETH_P_IP : ETH_P_IPV6);
+ laddr.sll_halen = ETH_ALEN;
+
+ msg.msg_name = (void *)&laddr;
+ msg.msg_namelen = sizeof(laddr);
+ } else if (family == PF_INET) {
+ msg.msg_name = (void *)&daddr;
+ msg.msg_namelen = sizeof(daddr);
+ } else {
+ msg.msg_name = (void *)&daddr6;
+ msg.msg_namelen = sizeof(daddr6);
+ }
+ }
+
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ if (cfg_use_cmsg) {
+ memset(control, 0, sizeof(control));
+
+ msg.msg_control = control;
+ msg.msg_controllen = sizeof(control);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SO_TIMESTAMPING;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(uint32_t));
+
+ *((uint32_t *) CMSG_DATA(cmsg)) = report_opt;
}
+
+ val = sendmsg(fd, &msg, 0);
if (val != total_len)
error(1, errno, "send");
@@ -385,7 +550,7 @@ static void do_test(int family, unsigned int opt)
error(1, errno, "close");
free(buf);
- usleep(400 * 1000);
+ usleep(100 * 1000);
}
static void __attribute__((noreturn)) usage(const char *filepath)
@@ -396,15 +561,20 @@ static void __attribute__((noreturn)) usage(const char *filepath)
" -6: only IPv6\n"
" -h: show this message\n"
" -c N: number of packets for each test\n"
+ " -C: use cmsg to set tstamp recording options\n"
" -D: no delay between packets\n"
" -F: poll() waits forever for an event\n"
" -I: request PKTINFO\n"
" -l N: send N bytes at a time\n"
+ " -L listen on hostname and port\n"
" -n: set no-payload option\n"
+ " -p N: connect to port N\n"
+ " -P: use PF_PACKET\n"
" -r: use raw\n"
" -R: use raw (IP_HDRINCL)\n"
- " -p N: connect to port N\n"
" -u: use udp\n"
+ " -v: validate SND delay (usec)\n"
+ " -V: validate ACK delay (usec)\n"
" -x: show payload (up to 70 bytes)\n",
filepath);
exit(1);
@@ -413,9 +583,9 @@ static void __attribute__((noreturn)) usage(const char *filepath)
static void parse_opt(int argc, char **argv)
{
int proto_count = 0;
- char c;
+ int c;
- while ((c = getopt(argc, argv, "46c:DFhIl:np:rRux")) != -1) {
+ while ((c = getopt(argc, argv, "46c:CDFhIl:Lnp:PrRuv:V:x")) != -1) {
switch (c) {
case '4':
do_ipv6 = 0;
@@ -426,6 +596,9 @@ static void parse_opt(int argc, char **argv)
case 'c':
cfg_num_pkts = strtoul(optarg, NULL, 10);
break;
+ case 'C':
+ cfg_use_cmsg = true;
+ break;
case 'D':
cfg_no_delay = true;
break;
@@ -435,9 +608,24 @@ static void parse_opt(int argc, char **argv)
case 'I':
cfg_do_pktinfo = true;
break;
+ case 'l':
+ cfg_payload_len = strtoul(optarg, NULL, 10);
+ break;
+ case 'L':
+ cfg_do_listen = true;
+ break;
case 'n':
cfg_loop_nodata = true;
break;
+ case 'p':
+ dest_port = strtoul(optarg, NULL, 10);
+ break;
+ case 'P':
+ proto_count++;
+ cfg_use_pf_packet = true;
+ cfg_proto = SOCK_DGRAM;
+ cfg_ipproto = 0;
+ break;
case 'r':
proto_count++;
cfg_proto = SOCK_RAW;
@@ -453,11 +641,11 @@ static void parse_opt(int argc, char **argv)
cfg_proto = SOCK_DGRAM;
cfg_ipproto = IPPROTO_UDP;
break;
- case 'l':
- cfg_payload_len = strtoul(optarg, NULL, 10);
+ case 'v':
+ cfg_delay_snd = strtoul(optarg, NULL, 10);
break;
- case 'p':
- dest_port = strtoul(optarg, NULL, 10);
+ case 'V':
+ cfg_delay_ack = strtoul(optarg, NULL, 10);
break;
case 'x':
cfg_show_payload = true;
@@ -475,7 +663,9 @@ static void parse_opt(int argc, char **argv)
if (!do_ipv4 && !do_ipv6)
error(1, 0, "pass -4 or -6, not both");
if (proto_count > 1)
- error(1, 0, "pass -r, -R or -u, not multiple");
+ error(1, 0, "pass -P, -r, -R or -u, not multiple");
+ if (cfg_do_pktinfo && cfg_use_pf_packet)
+ error(1, 0, "cannot ask for pktinfo over pf_packet");
if (optind != argc - 1)
error(1, 0, "missing required hostname argument");
@@ -483,10 +673,12 @@ static void parse_opt(int argc, char **argv)
static void resolve_hostname(const char *hostname)
{
+ struct addrinfo hints = { .ai_family = do_ipv4 ? AF_INET : AF_INET6 };
struct addrinfo *addrs, *cur;
int have_ipv4 = 0, have_ipv6 = 0;
- if (getaddrinfo(hostname, NULL, NULL, &addrs))
+retry:
+ if (getaddrinfo(hostname, NULL, &hints, &addrs))
error(1, errno, "getaddrinfo");
cur = addrs;
@@ -506,14 +698,41 @@ static void resolve_hostname(const char *hostname)
if (addrs)
freeaddrinfo(addrs);
+ if (do_ipv6 && hints.ai_family != AF_INET6) {
+ hints.ai_family = AF_INET6;
+ goto retry;
+ }
+
do_ipv4 &= have_ipv4;
do_ipv6 &= have_ipv6;
}
+static void do_listen(int family, void *addr, int alen)
+{
+ int fd, type;
+
+ type = cfg_proto == SOCK_RAW ? SOCK_DGRAM : cfg_proto;
+
+ fd = socket(family, type, 0);
+ if (fd == -1)
+ error(1, errno, "socket rx");
+
+ if (bind(fd, addr, alen))
+ error(1, errno, "bind rx");
+
+ if (type == SOCK_STREAM && listen(fd, 10))
+ error(1, errno, "listen rx");
+
+ /* leave fd open, will be closed on process exit.
+ * this enables connect() to succeed and avoids icmp replies
+ */
+}
+
static void do_main(int family)
{
- fprintf(stderr, "family: %s\n",
- family == PF_INET ? "INET" : "INET6");
+ fprintf(stderr, "family: %s %s\n",
+ family == PF_INET ? "INET" : "INET6",
+ cfg_use_pf_packet ? "(PF_PACKET)" : "");
fprintf(stderr, "test SND\n");
do_test(family, SOF_TIMESTAMPING_TX_SOFTWARE);
@@ -555,10 +774,17 @@ int main(int argc, char **argv)
fprintf(stderr, "server port: %u\n", dest_port);
fprintf(stderr, "\n");
- if (do_ipv4)
+ if (do_ipv4) {
+ if (cfg_do_listen)
+ do_listen(PF_INET, &daddr, sizeof(daddr));
do_main(PF_INET);
- if (do_ipv6)
+ }
+
+ if (do_ipv6) {
+ if (cfg_do_listen)
+ do_listen(PF_INET6, &daddr6, sizeof(daddr6));
do_main(PF_INET6);
+ }
- return 0;
+ return test_failed;
}
diff --git a/tools/testing/selftests/networking/timestamping/txtimestamp.sh b/tools/testing/selftests/networking/timestamping/txtimestamp.sh
new file mode 100755
index 000000000000..df0d86ca72b7
--- /dev/null
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Send packets with transmit timestamps over loopback with netem
+# Verify that timestamps correspond to netem delay
+
+set -e
+
+setup() {
+ # set 1ms delay on lo egress
+ tc qdisc add dev lo root netem delay 1ms
+
+ # set 2ms delay on ifb0 egress
+ modprobe ifb
+ ip link add ifb_netem0 type ifb
+ ip link set dev ifb_netem0 up
+ tc qdisc add dev ifb_netem0 root netem delay 2ms
+
+ # redirect lo ingress through ifb0 egress
+ tc qdisc add dev lo handle ffff: ingress
+ tc filter add dev lo parent ffff: \
+ u32 match mark 0 0xffff \
+ action mirred egress redirect dev ifb_netem0
+}
+
+run_test_v4v6() {
+ # SND will be delayed 1000us
+ # ACK will be delayed 6000us: 1 + 2 ms round-trip
+ local -r args="$@ -v 1000 -V 6000"
+
+ ./txtimestamp ${args} -4 -L 127.0.0.1
+ ./txtimestamp ${args} -6 -L ::1
+}
+
+run_test_tcpudpraw() {
+ local -r args=$@
+
+ run_test_v4v6 ${args} # tcp
+ run_test_v4v6 ${args} -u # udp
+ run_test_v4v6 ${args} -r # raw
+ run_test_v4v6 ${args} -R # raw (IPPROTO_RAW)
+ run_test_v4v6 ${args} -P # pf_packet
+}
+
+run_test_all() {
+ run_test_tcpudpraw # setsockopt
+ run_test_tcpudpraw -C # cmsg
+ run_test_tcpudpraw -n # timestamp w/o data
+}
+
+if [[ "$(ip netns identify)" == "root" ]]; then
+ ../../net/in_netns.sh $0 $@
+else
+ setup
+ run_test_all
+ echo "OK. All tests passed"
+fi
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index 49621822d7c3..ae43a614835d 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -82,6 +82,16 @@ do { \
} \
} while (0)
+#define SKIP_IF_MSG(x, msg) \
+do { \
+ if ((x)) { \
+ fprintf(stderr, \
+ "[SKIP] Test skipped on line %d: %s\n", \
+ __LINE__, msg); \
+ return MAGIC_SKIP_RETURN_VALUE; \
+ } \
+} while (0)
+
#define _str(s) #s
#define str(s) _str(s)
diff --git a/tools/testing/selftests/powerpc/ptrace/core-pkey.c b/tools/testing/selftests/powerpc/ptrace/core-pkey.c
index e23e2e199eb4..d5c64fee032d 100644
--- a/tools/testing/selftests/powerpc/ptrace/core-pkey.c
+++ b/tools/testing/selftests/powerpc/ptrace/core-pkey.c
@@ -352,10 +352,7 @@ static int write_core_pattern(const char *core_pattern)
FILE *f;
f = fopen(core_pattern_file, "w");
- if (!f) {
- perror("Error writing to core_pattern file");
- return TEST_FAIL;
- }
+ SKIP_IF_MSG(!f, "Try with root privileges");
ret = fwrite(core_pattern, 1, len, f);
fclose(f);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
index 0b4ebcc2f485..ca29fafeed5d 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c
@@ -31,7 +31,7 @@ void gpr(void)
ASM_LOAD_GPR_IMMED(gpr_1)
ASM_LOAD_FPR_SINGLE_PRECISION(flt_1)
:
- : [gpr_1]"i"(GPR_1), [flt_1] "r" (&a)
+ : [gpr_1]"i"(GPR_1), [flt_1] "b" (&a)
: "memory", "r6", "r7", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "r16", "r17",
"r18", "r19", "r20", "r21", "r22", "r23", "r24",
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
index 59206b96e98a..a08a91594dbe 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c
@@ -59,8 +59,8 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
: [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2),
- [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
- [flt_2] "r" (&b), [cptr1] "r" (&cptr[1])
+ [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
+ [flt_2] "b" (&b), [cptr1] "b" (&cptr[1])
: "memory", "r7", "r8", "r9", "r10",
"r11", "r12", "r13", "r14", "r15", "r16",
"r17", "r18", "r19", "r20", "r21", "r22",
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
index b3c061dc9512..f47174746231 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c
@@ -72,7 +72,7 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
- : [val] "r" (cptr[1]), [sprn_dscr]"i"(SPRN_DSCR),
+ : [sprn_dscr]"i"(SPRN_DSCR),
[sprn_tar]"i"(SPRN_TAR), [sprn_ppr]"i"(SPRN_PPR),
[sprn_texasr]"i"(SPRN_TEXASR), [tar_1]"i"(TAR_1),
[dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2), [dscr_2]"i"(DSCR_2),
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
index 277dade1b382..18a685bf6a09 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c
@@ -77,8 +77,7 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
- : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
- [sprn_texasr] "i" (SPRN_TEXASR)
+ : [sprn_texasr] "i" (SPRN_TEXASR)
: "memory", "r0", "r1", "r3", "r4",
"r7", "r8", "r9", "r10", "r11"
);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
index 51427a2465f6..ba04999254e3 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c
@@ -74,7 +74,7 @@ trans:
"3: ;"
: [tfhar] "=r" (tfhar), [res] "=r" (result),
- [texasr] "=r" (texasr), [cptr1] "=r" (cptr1)
+ [texasr] "=r" (texasr), [cptr1] "=b" (cptr1)
: [sprn_texasr] "i" (SPRN_TEXASR)
: "memory", "r0", "r8", "r31"
);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
index 48b462f75023..f70023b25e6e 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c
@@ -65,7 +65,7 @@ trans:
: [sprn_dscr]"i"(SPRN_DSCR), [sprn_tar]"i"(SPRN_TAR),
[sprn_ppr]"i"(SPRN_PPR), [sprn_texasr]"i"(SPRN_TEXASR),
[tar_1]"i"(TAR_1), [dscr_1]"i"(DSCR_1), [tar_2]"i"(TAR_2),
- [dscr_2]"i"(DSCR_2), [cptr1] "r" (&cptr[1])
+ [dscr_2]"i"(DSCR_2), [cptr1] "b" (&cptr[1])
: "memory", "r0", "r1", "r3", "r4", "r5", "r6"
);
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
index 17c23cabac3e..dfba80058977 100644
--- a/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c
@@ -65,8 +65,7 @@ trans:
"3: ;"
: [res] "=r" (result), [texasr] "=r" (texasr)
- : [fp_load] "r" (fp_load), [fp_load_ckpt] "r" (fp_load_ckpt),
- [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "r" (&cptr[1])
+ : [sprn_texasr] "i" (SPRN_TEXASR), [cptr1] "b" (&cptr[1])
: "memory", "r0", "r1", "r3", "r4",
"r7", "r8", "r9", "r10", "r11"
);
diff --git a/tools/testing/selftests/powerpc/tm/.gitignore b/tools/testing/selftests/powerpc/tm/.gitignore
index c3ee8393dae8..208452a93e2c 100644
--- a/tools/testing/selftests/powerpc/tm/.gitignore
+++ b/tools/testing/selftests/powerpc/tm/.gitignore
@@ -11,6 +11,7 @@ tm-signal-context-chk-fpu
tm-signal-context-chk-gpr
tm-signal-context-chk-vmx
tm-signal-context-chk-vsx
+tm-signal-sigreturn-nt
tm-vmx-unavail
tm-unavailable
tm-trap
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 9fc2cf6fbc92..75a685359129 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -4,7 +4,7 @@ SIGNAL_CONTEXT_CHK_TESTS := tm-signal-context-chk-gpr tm-signal-context-chk-fpu
TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack \
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
- $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
+ $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn tm-signal-sigreturn-nt
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
new file mode 100644
index 000000000000..56fbf9f6bbf3
--- /dev/null
+++ b/tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018, Breno Leitao, Gustavo Romero, IBM Corp.
+ *
+ * A test case that creates a signal and starts a suspended transaction
+ * inside the signal handler.
+ *
+ * It returns from the signal handler with the CPU at suspended state, but
+ * without setting usercontext MSR Transaction State (TS) fields.
+ */
+
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <signal.h>
+
+#include "utils.h"
+
+void trap_signal_handler(int signo, siginfo_t *si, void *uc)
+{
+ ucontext_t *ucp = (ucontext_t *) uc;
+
+ asm("tbegin.; tsuspend.;");
+
+ /* Skip 'trap' instruction if it succeed */
+ ucp->uc_mcontext.regs->nip += 4;
+}
+
+int tm_signal_sigreturn_nt(void)
+{
+ struct sigaction trap_sa;
+
+ trap_sa.sa_flags = SA_SIGINFO;
+ trap_sa.sa_sigaction = trap_signal_handler;
+
+ sigaction(SIGTRAP, &trap_sa, NULL);
+
+ raise(SIGTRAP);
+
+ return EXIT_SUCCESS;
+}
+
+int main(int argc, char **argv)
+{
+ test_harness(tm_signal_sigreturn_nt, "tm_signal_sigreturn_nt");
+}
+
diff --git a/tools/testing/selftests/proc/proc-self-map-files-002.c b/tools/testing/selftests/proc/proc-self-map-files-002.c
index 6f1f4a6e1ecb..85744425b08d 100644
--- a/tools/testing/selftests/proc/proc-self-map-files-002.c
+++ b/tools/testing/selftests/proc/proc-self-map-files-002.c
@@ -13,7 +13,7 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-/* Test readlink /proc/self/map_files/... with address 0. */
+/* Test readlink /proc/self/map_files/... with minimum address. */
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
int main(void)
{
const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+#ifdef __arm__
+ unsigned long va = 2 * PAGE_SIZE;
+#else
+ unsigned long va = 0;
+#endif
void *p;
int fd;
unsigned long a, b;
@@ -55,7 +60,7 @@ int main(void)
if (fd == -1)
return 1;
- p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+ p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
if (p == MAP_FAILED) {
if (errno == EPERM)
return 2;
diff --git a/tools/testing/selftests/rcutorture/bin/kvm.sh b/tools/testing/selftests/rcutorture/bin/kvm.sh
index 5a7a62d76a50..19864f1cb27a 100755
--- a/tools/testing/selftests/rcutorture/bin/kvm.sh
+++ b/tools/testing/selftests/rcutorture/bin/kvm.sh
@@ -194,6 +194,14 @@ do
shift
done
+if test -z "$TORTURE_INITRD" || tools/testing/selftests/rcutorture/bin/mkinitrd.sh
+then
+ :
+else
+ echo No initrd and unable to create one, aborting test >&2
+ exit 1
+fi
+
CONFIGFRAG=${KVM}/configs/${TORTURE_SUITE}; export CONFIGFRAG
if test -z "$configs"
diff --git a/tools/testing/selftests/rcutorture/bin/mkinitrd.sh b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
new file mode 100755
index 000000000000..da298394daa2
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/mkinitrd.sh
@@ -0,0 +1,136 @@
+#!/bin/bash
+#
+# Create an initrd directory if one does not already exist.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2013
+#
+# Author: Connor Shu <Connor.Shu@ibm.com>
+
+D=tools/testing/selftests/rcutorture
+
+# Prerequisite checks
+[ -z "$D" ] && echo >&2 "No argument supplied" && exit 1
+if [ ! -d "$D" ]; then
+ echo >&2 "$D does not exist: Malformed kernel source tree?"
+ exit 1
+fi
+if [ -s "$D/initrd/init" ]; then
+ echo "$D/initrd/init already exists, no need to create it"
+ exit 0
+fi
+
+T=${TMPDIR-/tmp}/mkinitrd.sh.$$
+trap 'rm -rf $T' 0 2
+mkdir $T
+
+cat > $T/init << '__EOF___'
+#!/bin/sh
+# Run in userspace a few milliseconds every second. This helps to
+# exercise the NO_HZ_FULL portions of RCU.
+while :
+do
+ q=
+ for i in \
+ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
+ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
+ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
+ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
+ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a \
+ a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a a
+ do
+ q="$q $i"
+ done
+ sleep 1
+done
+__EOF___
+
+# Try using dracut to create initrd
+if command -v dracut >/dev/null 2>&1
+then
+ echo Creating $D/initrd using dracut.
+ # Filesystem creation
+ dracut --force --no-hostonly --no-hostonly-cmdline --module "base" $T/initramfs.img
+ cd $D
+ mkdir -p initrd
+ cd initrd
+ zcat $T/initramfs.img | cpio -id
+ cp $T/init init
+ chmod +x init
+ echo Done creating $D/initrd using dracut
+ exit 0
+fi
+
+# No dracut, so create a C-language initrd/init program and statically
+# link it. This results in a very small initrd, but might be a bit less
+# future-proof than dracut.
+echo "Could not find dracut, attempting C initrd"
+cd $D
+mkdir -p initrd
+cd initrd
+cat > init.c << '___EOF___'
+#ifndef NOLIBC
+#include <unistd.h>
+#include <sys/time.h>
+#endif
+
+volatile unsigned long delaycount;
+
+int main(int argc, int argv[])
+{
+ int i;
+ struct timeval tv;
+ struct timeval tvb;
+
+ for (;;) {
+ sleep(1);
+ /* Need some userspace time. */
+ if (gettimeofday(&tvb, NULL))
+ continue;
+ do {
+ for (i = 0; i < 1000 * 100; i++)
+ delaycount = i * i;
+ if (gettimeofday(&tv, NULL))
+ break;
+ tv.tv_sec -= tvb.tv_sec;
+ if (tv.tv_sec > 1)
+ break;
+ tv.tv_usec += tv.tv_sec * 1000 * 1000;
+ tv.tv_usec -= tvb.tv_usec;
+ } while (tv.tv_usec < 1000);
+ }
+ return 0;
+}
+___EOF___
+
+# build using nolibc on supported archs (smaller executable) and fall
+# back to regular glibc on other ones.
+if echo -e "#if __x86_64__||__i386__||__i486__||__i586__||__i686__" \
+ "||__ARM_EABI__||__aarch64__\nyes\n#endif" \
+ | ${CROSS_COMPILE}gcc -E -nostdlib -xc - \
+ | grep -q '^yes'; then
+ # architecture supported by nolibc
+ ${CROSS_COMPILE}gcc -fno-asynchronous-unwind-tables -fno-ident \
+ -nostdlib -include ../bin/nolibc.h -lgcc -s -static -Os \
+ -o init init.c
+else
+ ${CROSS_COMPILE}gcc -s -static -Os -o init init.c
+fi
+
+rm init.c
+echo "Done creating a statically linked C-language initrd"
+
+exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/nolibc.h b/tools/testing/selftests/rcutorture/bin/nolibc.h
new file mode 100644
index 000000000000..f98f5b92d3eb
--- /dev/null
+++ b/tools/testing/selftests/rcutorture/bin/nolibc.h
@@ -0,0 +1,2197 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/* nolibc.h
+ * Copyright (C) 2017-2018 Willy Tarreau <w@1wt.eu>
+ */
+
+/* some archs (at least aarch64) don't expose the regular syscalls anymore by
+ * default, either because they have an "_at" replacement, or because there are
+ * more modern alternatives. For now we'd rather still use them.
+ */
+#define __ARCH_WANT_SYSCALL_NO_AT
+#define __ARCH_WANT_SYSCALL_NO_FLAGS
+#define __ARCH_WANT_SYSCALL_DEPRECATED
+
+#include <asm/unistd.h>
+#include <asm/ioctls.h>
+#include <asm/errno.h>
+#include <linux/fs.h>
+#include <linux/loop.h>
+
+#define NOLIBC
+
+/* Build a static executable this way :
+ * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ * -static -include nolibc.h -lgcc -o hello hello.c
+ *
+ * Useful calling convention table found here :
+ * http://man7.org/linux/man-pages/man2/syscall.2.html
+ *
+ * This doc is even better :
+ * https://w3challs.com/syscalls/
+ */
+
+
+/* this way it will be removed if unused */
+static int errno;
+
+#ifndef NOLIBC_IGNORE_ERRNO
+#define SET_ERRNO(v) do { errno = (v); } while (0)
+#else
+#define SET_ERRNO(v) do { } while (0)
+#endif
+
+/* errno codes all ensure that they will not conflict with a valid pointer
+ * because they all correspond to the highest addressable memry page.
+ */
+#define MAX_ERRNO 4095
+
+/* Declare a few quite common macros and types that usually are in stdlib.h,
+ * stdint.h, ctype.h, unistd.h and a few other common locations.
+ */
+
+#define NULL ((void *)0)
+
+/* stdint types */
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+typedef unsigned long size_t;
+typedef signed long ssize_t;
+typedef unsigned long uintptr_t;
+typedef signed long intptr_t;
+typedef signed long ptrdiff_t;
+
+/* for stat() */
+typedef unsigned int dev_t;
+typedef unsigned long ino_t;
+typedef unsigned int mode_t;
+typedef signed int pid_t;
+typedef unsigned int uid_t;
+typedef unsigned int gid_t;
+typedef unsigned long nlink_t;
+typedef signed long off_t;
+typedef signed long blksize_t;
+typedef signed long blkcnt_t;
+typedef signed long time_t;
+
+/* for poll() */
+struct pollfd {
+ int fd;
+ short int events;
+ short int revents;
+};
+
+/* for select() */
+struct timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+/* for pselect() */
+struct timespec {
+ long tv_sec;
+ long tv_nsec;
+};
+
+/* for gettimeofday() */
+struct timezone {
+ int tz_minuteswest;
+ int tz_dsttime;
+};
+
+/* for getdents64() */
+struct linux_dirent64 {
+ uint64_t d_ino;
+ int64_t d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[];
+};
+
+/* commonly an fd_set represents 256 FDs */
+#define FD_SETSIZE 256
+typedef struct { uint32_t fd32[FD_SETSIZE/32]; } fd_set;
+
+/* needed by wait4() */
+struct rusage {
+ struct timeval ru_utime;
+ struct timeval ru_stime;
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+};
+
+/* stat flags (WARNING, octal here) */
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFBLK 0060000
+#define S_IFREG 0100000
+#define S_IFIFO 0010000
+#define S_IFLNK 0120000
+#define S_IFSOCK 0140000
+#define S_IFMT 0170000
+
+#define S_ISDIR(mode) (((mode) & S_IFDIR) == S_IFDIR)
+#define S_ISCHR(mode) (((mode) & S_IFCHR) == S_IFCHR)
+#define S_ISBLK(mode) (((mode) & S_IFBLK) == S_IFBLK)
+#define S_ISREG(mode) (((mode) & S_IFREG) == S_IFREG)
+#define S_ISFIFO(mode) (((mode) & S_IFIFO) == S_IFIFO)
+#define S_ISLNK(mode) (((mode) & S_IFLNK) == S_IFLNK)
+#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
+
+#define DT_UNKNOWN 0
+#define DT_FIFO 1
+#define DT_CHR 2
+#define DT_DIR 4
+#define DT_BLK 6
+#define DT_REG 8
+#define DT_LNK 10
+#define DT_SOCK 12
+
+/* all the *at functions */
+#ifndef AT_FDWCD
+#define AT_FDCWD -100
+#endif
+
+/* lseek */
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
+
+/* reboot */
+#define LINUX_REBOOT_MAGIC1 0xfee1dead
+#define LINUX_REBOOT_MAGIC2 0x28121969
+#define LINUX_REBOOT_CMD_HALT 0xcdef0123
+#define LINUX_REBOOT_CMD_POWER_OFF 0x4321fedc
+#define LINUX_REBOOT_CMD_RESTART 0x01234567
+#define LINUX_REBOOT_CMD_SW_SUSPEND 0xd000fce2
+
+
+/* The format of the struct as returned by the libc to the application, which
+ * significantly differs from the format returned by the stat() syscall flavours.
+ */
+struct stat {
+ dev_t st_dev; /* ID of device containing file */
+ ino_t st_ino; /* inode number */
+ mode_t st_mode; /* protection */
+ nlink_t st_nlink; /* number of hard links */
+ uid_t st_uid; /* user ID of owner */
+ gid_t st_gid; /* group ID of owner */
+ dev_t st_rdev; /* device ID (if special file) */
+ off_t st_size; /* total size, in bytes */
+ blksize_t st_blksize; /* blocksize for file system I/O */
+ blkcnt_t st_blocks; /* number of 512B blocks allocated */
+ time_t st_atime; /* time of last access */
+ time_t st_mtime; /* time of last modification */
+ time_t st_ctime; /* time of last status change */
+};
+
+#define WEXITSTATUS(status) (((status) & 0xff00) >> 8)
+#define WIFEXITED(status) (((status) & 0x7f) == 0)
+
+
+/* Below comes the architecture-specific code. For each architecture, we have
+ * the syscall declarations and the _start code definition. This is the only
+ * global part. On all architectures the kernel puts everything in the stack
+ * before jumping to _start just above us, without any return address (_start
+ * is not a function but an entry pint). So at the stack pointer we find argc.
+ * Then argv[] begins, and ends at the first NULL. Then we have envp which
+ * starts and ends with a NULL as well. So envp=argv+argc+1.
+ */
+
+#if defined(__x86_64__)
+/* Syscalls for x86_64 :
+ * - registers are 64-bit
+ * - syscall number is passed in rax
+ * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
+ * - the system call is performed by calling the syscall instruction
+ * - syscall return comes in rax
+ * - rcx and r8..r11 may be clobbered, others are preserved.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ */
+
+#define my_syscall0(num) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret) \
+ : "0"(_num) \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), \
+ "0"(_num) \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ register long _arg3 asm("rdx") = (long)(arg3); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "0"(_num) \
+ : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ register long _arg3 asm("rdx") = (long)(arg3); \
+ register long _arg4 asm("r10") = (long)(arg4); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret), "=r"(_arg4) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "0"(_num) \
+ : "rcx", "r8", "r9", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ register long _arg3 asm("rdx") = (long)(arg3); \
+ register long _arg4 asm("r10") = (long)(arg4); \
+ register long _arg5 asm("r8") = (long)(arg5); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "0"(_num) \
+ : "rcx", "r9", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ long _ret; \
+ register long _num asm("rax") = (num); \
+ register long _arg1 asm("rdi") = (long)(arg1); \
+ register long _arg2 asm("rsi") = (long)(arg2); \
+ register long _arg3 asm("rdx") = (long)(arg3); \
+ register long _arg4 asm("r10") = (long)(arg4); \
+ register long _arg5 asm("r8") = (long)(arg5); \
+ register long _arg6 asm("r9") = (long)(arg6); \
+ \
+ asm volatile ( \
+ "syscall\n" \
+ : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6), "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+/* startup code */
+asm(".section .text\n"
+ ".global _start\n"
+ "_start:\n"
+ "pop %rdi\n" // argc (first arg, %rdi)
+ "mov %rsp, %rsi\n" // argv[] (second arg, %rsi)
+ "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx)
+ "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned when
+ "sub $8, %rsp\n" // entering the callee
+ "call main\n" // main() returns the status code, we'll exit with it.
+ "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits
+ "mov $60, %rax\n" // NR_exit == 60
+ "syscall\n" // really exit
+ "hlt\n" // ensure it does not return
+ "");
+
+/* fcntl / open */
+#define O_RDONLY 0
+#define O_WRONLY 1
+#define O_RDWR 2
+#define O_CREAT 0x40
+#define O_EXCL 0x80
+#define O_NOCTTY 0x100
+#define O_TRUNC 0x200
+#define O_APPEND 0x400
+#define O_NONBLOCK 0x800
+#define O_DIRECTORY 0x10000
+
+/* The struct returned by the stat() syscall, equivalent to stat64(). The
+ * syscall returns 116 bytes and stops in the middle of __unused.
+ */
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+
+ unsigned int st_gid;
+ unsigned int __pad0;
+ unsigned long st_rdev;
+ long st_size;
+ long st_blksize;
+
+ long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ long __unused[3];
+};
+
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+/* Syscalls for i386 :
+ * - mostly similar to x86_64
+ * - registers are 32-bit
+ * - syscall number is passed in eax
+ * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
+ * - all registers are preserved (except eax of course)
+ * - the system call is performed by calling int $0x80
+ * - syscall return comes in eax
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ *
+ * Also, i386 supports the old_select syscall if newselect is not available
+ */
+#define __ARCH_WANT_SYS_OLD_SELECT
+
+#define my_syscall0(num) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = (num); \
+ \
+ asm volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = (num); \
+ register long _arg1 asm("ebx") = (long)(arg1); \
+ \
+ asm volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = (num); \
+ register long _arg1 asm("ebx") = (long)(arg1); \
+ register long _arg2 asm("ecx") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = (num); \
+ register long _arg1 asm("ebx") = (long)(arg1); \
+ register long _arg2 asm("ecx") = (long)(arg2); \
+ register long _arg3 asm("edx") = (long)(arg3); \
+ \
+ asm volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = (num); \
+ register long _arg1 asm("ebx") = (long)(arg1); \
+ register long _arg2 asm("ecx") = (long)(arg2); \
+ register long _arg3 asm("edx") = (long)(arg3); \
+ register long _arg4 asm("esi") = (long)(arg4); \
+ \
+ asm volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ long _ret; \
+ register long _num asm("eax") = (num); \
+ register long _arg1 asm("ebx") = (long)(arg1); \
+ register long _arg2 asm("ecx") = (long)(arg2); \
+ register long _arg3 asm("edx") = (long)(arg3); \
+ register long _arg4 asm("esi") = (long)(arg4); \
+ register long _arg5 asm("edi") = (long)(arg5); \
+ \
+ asm volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+/* startup code */
+asm(".section .text\n"
+ ".global _start\n"
+ "_start:\n"
+ "pop %eax\n" // argc (first arg, %eax)
+ "mov %esp, %ebx\n" // argv[] (second arg, %ebx)
+ "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx)
+ "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned when
+ "push %ecx\n" // push all registers on the stack so that we
+ "push %ebx\n" // support both regparm and plain stack modes
+ "push %eax\n"
+ "call main\n" // main() returns the status code in %eax
+ "movzbl %al, %ebx\n" // retrieve exit code from lower 8 bits
+ "movl $1, %eax\n" // NR_exit == 1
+ "int $0x80\n" // exit now
+ "hlt\n" // ensure it does not
+ "");
+
+/* fcntl / open */
+#define O_RDONLY 0
+#define O_WRONLY 1
+#define O_RDWR 2
+#define O_CREAT 0x40
+#define O_EXCL 0x80
+#define O_NOCTTY 0x100
+#define O_TRUNC 0x200
+#define O_APPEND 0x400
+#define O_NONBLOCK 0x800
+#define O_DIRECTORY 0x10000
+
+/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
+ * exactly 56 bytes (stops before the unused array).
+ */
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused[2];
+};
+
+#elif defined(__ARM_EABI__)
+/* Syscalls for ARM in ARM or Thumb modes :
+ * - registers are 32-bit
+ * - stack is 8-byte aligned
+ * ( http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html)
+ * - syscall number is passed in r7
+ * - arguments are in r0, r1, r2, r3, r4, r5
+ * - the system call is performed by calling svc #0
+ * - syscall return comes in r0.
+ * - only lr is clobbered.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ *
+ * Also, ARM supports the old_select syscall if newselect is not available
+ */
+#define __ARCH_WANT_SYS_OLD_SELECT
+
+#define my_syscall0(num) \
+({ \
+ register long _num asm("r7") = (num); \
+ register long _arg1 asm("r0"); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num asm("r7") = (num); \
+ register long _arg1 asm("r0") = (long)(arg1); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num asm("r7") = (num); \
+ register long _arg1 asm("r0") = (long)(arg1); \
+ register long _arg2 asm("r1") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num asm("r7") = (num); \
+ register long _arg1 asm("r0") = (long)(arg1); \
+ register long _arg2 asm("r1") = (long)(arg2); \
+ register long _arg3 asm("r2") = (long)(arg3); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num asm("r7") = (num); \
+ register long _arg1 asm("r0") = (long)(arg1); \
+ register long _arg2 asm("r1") = (long)(arg2); \
+ register long _arg3 asm("r2") = (long)(arg3); \
+ register long _arg4 asm("r3") = (long)(arg4); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num asm("r7") = (num); \
+ register long _arg1 asm("r0") = (long)(arg1); \
+ register long _arg2 asm("r1") = (long)(arg2); \
+ register long _arg3 asm("r2") = (long)(arg3); \
+ register long _arg4 asm("r3") = (long)(arg4); \
+ register long _arg5 asm("r4") = (long)(arg5); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r" (_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+/* startup code */
+asm(".section .text\n"
+ ".global _start\n"
+ "_start:\n"
+#if defined(__THUMBEB__) || defined(__THUMBEL__)
+ /* We enter here in 32-bit mode but if some previous functions were in
+ * 16-bit mode, the assembler cannot know, so we need to tell it we're in
+ * 32-bit now, then switch to 16-bit (is there a better way to do it than
+ * adding 1 by hand ?) and tell the asm we're now in 16-bit mode so that
+ * it generates correct instructions. Note that we do not support thumb1.
+ */
+ ".code 32\n"
+ "add r0, pc, #1\n"
+ "bx r0\n"
+ ".code 16\n"
+#endif
+ "pop {%r0}\n" // argc was in the stack
+ "mov %r1, %sp\n" // argv = sp
+ "add %r2, %r1, %r0, lsl #2\n" // envp = argv + 4*argc ...
+ "add %r2, %r2, $4\n" // ... + 4
+ "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the
+ "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc)
+ "bl main\n" // main() returns the status code, we'll exit with it.
+ "and %r0, %r0, $0xff\n" // limit exit code to 8 bits
+ "movs r7, $1\n" // NR_exit == 1
+ "svc $0x00\n"
+ "");
+
+/* fcntl / open */
+#define O_RDONLY 0
+#define O_WRONLY 1
+#define O_RDWR 2
+#define O_CREAT 0x40
+#define O_EXCL 0x80
+#define O_NOCTTY 0x100
+#define O_TRUNC 0x200
+#define O_APPEND 0x400
+#define O_NONBLOCK 0x800
+#define O_DIRECTORY 0x4000
+
+/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
+ * exactly 56 bytes (stops before the unused array). In big endian, the format
+ * differs as devices are returned as short only.
+ */
+struct sys_stat_struct {
+#if defined(__ARMEB__)
+ unsigned short st_dev;
+ unsigned short __pad1;
+#else
+ unsigned long st_dev;
+#endif
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+#if defined(__ARMEB__)
+ unsigned short st_rdev;
+ unsigned short __pad2;
+#else
+ unsigned long st_rdev;
+#endif
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused[2];
+};
+
+#elif defined(__aarch64__)
+/* Syscalls for AARCH64 :
+ * - registers are 64-bit
+ * - stack is 16-byte aligned
+ * - syscall number is passed in x8
+ * - arguments are in x0, x1, x2, x3, x4, x5
+ * - the system call is performed by calling svc 0
+ * - syscall return comes in x0.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ *
+ * On aarch64, select() is not implemented so we have to use pselect6().
+ */
+#define __ARCH_WANT_SYS_PSELECT6
+
+#define my_syscall0(num) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0"); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0") = (long)(arg1); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0") = (long)(arg1); \
+ register long _arg2 asm("x1") = (long)(arg2); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0") = (long)(arg1); \
+ register long _arg2 asm("x1") = (long)(arg2); \
+ register long _arg3 asm("x2") = (long)(arg3); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0") = (long)(arg1); \
+ register long _arg2 asm("x1") = (long)(arg2); \
+ register long _arg3 asm("x2") = (long)(arg3); \
+ register long _arg4 asm("x3") = (long)(arg4); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0") = (long)(arg1); \
+ register long _arg2 asm("x1") = (long)(arg2); \
+ register long _arg3 asm("x2") = (long)(arg3); \
+ register long _arg4 asm("x3") = (long)(arg4); \
+ register long _arg5 asm("x4") = (long)(arg5); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r" (_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num asm("x8") = (num); \
+ register long _arg1 asm("x0") = (long)(arg1); \
+ register long _arg2 asm("x1") = (long)(arg2); \
+ register long _arg3 asm("x2") = (long)(arg3); \
+ register long _arg4 asm("x3") = (long)(arg4); \
+ register long _arg5 asm("x4") = (long)(arg5); \
+ register long _arg6 asm("x5") = (long)(arg6); \
+ \
+ asm volatile ( \
+ "svc #0\n" \
+ : "=r" (_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6), "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+/* startup code */
+asm(".section .text\n"
+ ".global _start\n"
+ "_start:\n"
+ "ldr x0, [sp]\n" // argc (x0) was in the stack
+ "add x1, sp, 8\n" // argv (x1) = sp
+ "lsl x2, x0, 3\n" // envp (x2) = 8*argc ...
+ "add x2, x2, 8\n" // + 8 (skip null)
+ "add x2, x2, x1\n" // + argv
+ "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee
+ "bl main\n" // main() returns the status code, we'll exit with it.
+ "and x0, x0, 0xff\n" // limit exit code to 8 bits
+ "mov x8, 93\n" // NR_exit == 93
+ "svc #0\n"
+ "");
+
+/* fcntl / open */
+#define O_RDONLY 0
+#define O_WRONLY 1
+#define O_RDWR 2
+#define O_CREAT 0x40
+#define O_EXCL 0x80
+#define O_NOCTTY 0x100
+#define O_TRUNC 0x200
+#define O_APPEND 0x400
+#define O_NONBLOCK 0x800
+#define O_DIRECTORY 0x4000
+
+/* The struct returned by the newfstatat() syscall. Differs slightly from the
+ * x86_64's stat one by field ordering, so be careful.
+ */
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+
+ unsigned long st_rdev;
+ unsigned long __pad1;
+ long st_size;
+ int st_blksize;
+ int __pad2;
+
+ long st_blocks;
+ long st_atime;
+ unsigned long st_atime_nsec;
+ long st_mtime;
+
+ unsigned long st_mtime_nsec;
+ long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned int __unused[2];
+};
+
+#elif defined(__mips__) && defined(_ABIO32)
+/* Syscalls for MIPS ABI O32 :
+ * - WARNING! there's always a delayed slot!
+ * - WARNING again, the syntax is different, registers take a '$' and numbers
+ * do not.
+ * - registers are 32-bit
+ * - stack is 8-byte aligned
+ * - syscall number is passed in v0 (starts at 0xfa0).
+ * - arguments are in a0, a1, a2, a3, then the stack. The caller needs to
+ * leave some room in the stack for the callee to save a0..a3 if needed.
+ * - Many registers are clobbered, in fact only a0..a2 and s0..s8 are
+ * preserved. See: https://www.linux-mips.org/wiki/Syscall as well as
+ * scall32-o32.S in the kernel sources.
+ * - the system call is performed by calling "syscall"
+ * - syscall return comes in v0, and register a3 needs to be checked to know
+ * if an error occured, in which case errno is in v0.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ */
+
+#define my_syscall0(num) \
+({ \
+ register long _num asm("v0") = (num); \
+ register long _arg4 asm("a3"); \
+ \
+ asm volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "r"(_num) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num asm("v0") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg4 asm("a3"); \
+ \
+ asm volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num asm("v0") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg4 asm("a3"); \
+ \
+ asm volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num asm("v0") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ register long _arg4 asm("a3"); \
+ \
+ asm volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num asm("v0") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ register long _arg4 asm("a3") = (long)(arg4); \
+ \
+ asm volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num asm("v0") = (num); \
+ register long _arg1 asm("a0") = (long)(arg1); \
+ register long _arg2 asm("a1") = (long)(arg2); \
+ register long _arg3 asm("a2") = (long)(arg3); \
+ register long _arg4 asm("a3") = (long)(arg4); \
+ register long _arg5 = (long)(arg5); \
+ \
+ asm volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "sw %7, 16($sp)\n" \
+ "syscall\n " \
+ "addiu $sp, $sp, 32\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+/* startup code, note that it's called __start on MIPS */
+asm(".section .text\n"
+ ".set nomips16\n"
+ ".global __start\n"
+ ".set noreorder\n"
+ ".option pic0\n"
+ ".ent __start\n"
+ "__start:\n"
+ "lw $a0,($sp)\n" // argc was in the stack
+ "addiu $a1, $sp, 4\n" // argv = sp + 4
+ "sll $a2, $a0, 2\n" // a2 = argc * 4
+ "add $a2, $a2, $a1\n" // envp = argv + 4*argc ...
+ "addiu $a2, $a2, 4\n" // ... + 4
+ "li $t0, -8\n"
+ "and $sp, $sp, $t0\n" // sp must be 8-byte aligned
+ "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there!
+ "jal main\n" // main() returns the status code, we'll exit with it.
+ "nop\n" // delayed slot
+ "and $a0, $v0, 0xff\n" // limit exit code to 8 bits
+ "li $v0, 4001\n" // NR_exit == 4001
+ "syscall\n"
+ ".end __start\n"
+ "");
+
+/* fcntl / open */
+#define O_RDONLY 0
+#define O_WRONLY 1
+#define O_RDWR 2
+#define O_APPEND 0x0008
+#define O_NONBLOCK 0x0080
+#define O_CREAT 0x0100
+#define O_TRUNC 0x0200
+#define O_EXCL 0x0400
+#define O_NOCTTY 0x0800
+#define O_DIRECTORY 0x10000
+
+/* The struct returned by the stat() syscall. 88 bytes are returned by the
+ * syscall.
+ */
+struct sys_stat_struct {
+ unsigned int st_dev;
+ long st_pad1[3];
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int st_rdev;
+ long st_pad2[2];
+ long st_size;
+ long st_pad3;
+ long st_atime;
+ long st_atime_nsec;
+ long st_mtime;
+ long st_mtime_nsec;
+ long st_ctime;
+ long st_ctime_nsec;
+ long st_blksize;
+ long st_blocks;
+ long st_pad4[14];
+};
+
+#endif
+
+
+/* Below are the C functions used to declare the raw syscalls. They try to be
+ * architecture-agnostic, and return either a success or -errno. Declaring them
+ * static will lead to them being inlined in most cases, but it's still possible
+ * to reference them by a pointer if needed.
+ */
+static __attribute__((unused))
+void *sys_brk(void *addr)
+{
+ return (void *)my_syscall1(__NR_brk, addr);
+}
+
+static __attribute__((noreturn,unused))
+void sys_exit(int status)
+{
+ my_syscall1(__NR_exit, status & 255);
+ while(1); // shut the "noreturn" warnings.
+}
+
+static __attribute__((unused))
+int sys_chdir(const char *path)
+{
+ return my_syscall1(__NR_chdir, path);
+}
+
+static __attribute__((unused))
+int sys_chmod(const char *path, mode_t mode)
+{
+#ifdef __NR_fchmodat
+ return my_syscall4(__NR_fchmodat, AT_FDCWD, path, mode, 0);
+#else
+ return my_syscall2(__NR_chmod, path, mode);
+#endif
+}
+
+static __attribute__((unused))
+int sys_chown(const char *path, uid_t owner, gid_t group)
+{
+#ifdef __NR_fchownat
+ return my_syscall5(__NR_fchownat, AT_FDCWD, path, owner, group, 0);
+#else
+ return my_syscall3(__NR_chown, path, owner, group);
+#endif
+}
+
+static __attribute__((unused))
+int sys_chroot(const char *path)
+{
+ return my_syscall1(__NR_chroot, path);
+}
+
+static __attribute__((unused))
+int sys_close(int fd)
+{
+ return my_syscall1(__NR_close, fd);
+}
+
+static __attribute__((unused))
+int sys_dup(int fd)
+{
+ return my_syscall1(__NR_dup, fd);
+}
+
+static __attribute__((unused))
+int sys_dup2(int old, int new)
+{
+ return my_syscall2(__NR_dup2, old, new);
+}
+
+static __attribute__((unused))
+int sys_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ return my_syscall3(__NR_execve, filename, argv, envp);
+}
+
+static __attribute__((unused))
+pid_t sys_fork(void)
+{
+ return my_syscall0(__NR_fork);
+}
+
+static __attribute__((unused))
+int sys_fsync(int fd)
+{
+ return my_syscall1(__NR_fsync, fd);
+}
+
+static __attribute__((unused))
+int sys_getdents64(int fd, struct linux_dirent64 *dirp, int count)
+{
+ return my_syscall3(__NR_getdents64, fd, dirp, count);
+}
+
+static __attribute__((unused))
+pid_t sys_getpgrp(void)
+{
+ return my_syscall0(__NR_getpgrp);
+}
+
+static __attribute__((unused))
+pid_t sys_getpid(void)
+{
+ return my_syscall0(__NR_getpid);
+}
+
+static __attribute__((unused))
+int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ return my_syscall2(__NR_gettimeofday, tv, tz);
+}
+
+static __attribute__((unused))
+int sys_ioctl(int fd, unsigned long req, void *value)
+{
+ return my_syscall3(__NR_ioctl, fd, req, value);
+}
+
+static __attribute__((unused))
+int sys_kill(pid_t pid, int signal)
+{
+ return my_syscall2(__NR_kill, pid, signal);
+}
+
+static __attribute__((unused))
+int sys_link(const char *old, const char *new)
+{
+#ifdef __NR_linkat
+ return my_syscall5(__NR_linkat, AT_FDCWD, old, AT_FDCWD, new, 0);
+#else
+ return my_syscall2(__NR_link, old, new);
+#endif
+}
+
+static __attribute__((unused))
+off_t sys_lseek(int fd, off_t offset, int whence)
+{
+ return my_syscall3(__NR_lseek, fd, offset, whence);
+}
+
+static __attribute__((unused))
+int sys_mkdir(const char *path, mode_t mode)
+{
+#ifdef __NR_mkdirat
+ return my_syscall3(__NR_mkdirat, AT_FDCWD, path, mode);
+#else
+ return my_syscall2(__NR_mkdir, path, mode);
+#endif
+}
+
+static __attribute__((unused))
+long sys_mknod(const char *path, mode_t mode, dev_t dev)
+{
+#ifdef __NR_mknodat
+ return my_syscall4(__NR_mknodat, AT_FDCWD, path, mode, dev);
+#else
+ return my_syscall3(__NR_mknod, path, mode, dev);
+#endif
+}
+
+static __attribute__((unused))
+int sys_mount(const char *src, const char *tgt, const char *fst,
+ unsigned long flags, const void *data)
+{
+ return my_syscall5(__NR_mount, src, tgt, fst, flags, data);
+}
+
+static __attribute__((unused))
+int sys_open(const char *path, int flags, mode_t mode)
+{
+#ifdef __NR_openat
+ return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode);
+#else
+ return my_syscall3(__NR_open, path, flags, mode);
+#endif
+}
+
+static __attribute__((unused))
+int sys_pivot_root(const char *new, const char *old)
+{
+ return my_syscall2(__NR_pivot_root, new, old);
+}
+
+static __attribute__((unused))
+int sys_poll(struct pollfd *fds, int nfds, int timeout)
+{
+ return my_syscall3(__NR_poll, fds, nfds, timeout);
+}
+
+static __attribute__((unused))
+ssize_t sys_read(int fd, void *buf, size_t count)
+{
+ return my_syscall3(__NR_read, fd, buf, count);
+}
+
+static __attribute__((unused))
+ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg)
+{
+ return my_syscall4(__NR_reboot, magic1, magic2, cmd, arg);
+}
+
+static __attribute__((unused))
+int sys_sched_yield(void)
+{
+ return my_syscall0(__NR_sched_yield);
+}
+
+static __attribute__((unused))
+int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
+ struct sel_arg_struct {
+ unsigned long n;
+ fd_set *r, *w, *e;
+ struct timeval *t;
+ } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
+ return my_syscall1(__NR_select, &arg);
+#elif defined(__ARCH_WANT_SYS_PSELECT6) && defined(__NR_pselect6)
+ struct timespec t;
+
+ if (timeout) {
+ t.tv_sec = timeout->tv_sec;
+ t.tv_nsec = timeout->tv_usec * 1000;
+ }
+ return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
+#else
+#ifndef __NR__newselect
+#define __NR__newselect __NR_select
+#endif
+ return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
+#endif
+}
+
+static __attribute__((unused))
+int sys_setpgid(pid_t pid, pid_t pgid)
+{
+ return my_syscall2(__NR_setpgid, pid, pgid);
+}
+
+static __attribute__((unused))
+pid_t sys_setsid(void)
+{
+ return my_syscall0(__NR_setsid);
+}
+
+static __attribute__((unused))
+int sys_stat(const char *path, struct stat *buf)
+{
+ struct sys_stat_struct stat;
+ long ret;
+
+#ifdef __NR_newfstatat
+ /* only solution for arm64 */
+ ret = my_syscall4(__NR_newfstatat, AT_FDCWD, path, &stat, 0);
+#else
+ ret = my_syscall2(__NR_stat, path, &stat);
+#endif
+ buf->st_dev = stat.st_dev;
+ buf->st_ino = stat.st_ino;
+ buf->st_mode = stat.st_mode;
+ buf->st_nlink = stat.st_nlink;
+ buf->st_uid = stat.st_uid;
+ buf->st_gid = stat.st_gid;
+ buf->st_rdev = stat.st_rdev;
+ buf->st_size = stat.st_size;
+ buf->st_blksize = stat.st_blksize;
+ buf->st_blocks = stat.st_blocks;
+ buf->st_atime = stat.st_atime;
+ buf->st_mtime = stat.st_mtime;
+ buf->st_ctime = stat.st_ctime;
+ return ret;
+}
+
+
+static __attribute__((unused))
+int sys_symlink(const char *old, const char *new)
+{
+#ifdef __NR_symlinkat
+ return my_syscall3(__NR_symlinkat, old, AT_FDCWD, new);
+#else
+ return my_syscall2(__NR_symlink, old, new);
+#endif
+}
+
+static __attribute__((unused))
+mode_t sys_umask(mode_t mode)
+{
+ return my_syscall1(__NR_umask, mode);
+}
+
+static __attribute__((unused))
+int sys_umount2(const char *path, int flags)
+{
+ return my_syscall2(__NR_umount2, path, flags);
+}
+
+static __attribute__((unused))
+int sys_unlink(const char *path)
+{
+#ifdef __NR_unlinkat
+ return my_syscall3(__NR_unlinkat, AT_FDCWD, path, 0);
+#else
+ return my_syscall1(__NR_unlink, path);
+#endif
+}
+
+static __attribute__((unused))
+pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage)
+{
+ return my_syscall4(__NR_wait4, pid, status, options, rusage);
+}
+
+static __attribute__((unused))
+pid_t sys_waitpid(pid_t pid, int *status, int options)
+{
+ return sys_wait4(pid, status, options, 0);
+}
+
+static __attribute__((unused))
+pid_t sys_wait(int *status)
+{
+ return sys_waitpid(-1, status, 0);
+}
+
+static __attribute__((unused))
+ssize_t sys_write(int fd, const void *buf, size_t count)
+{
+ return my_syscall3(__NR_write, fd, buf, count);
+}
+
+
+/* Below are the libc-compatible syscalls which return x or -1 and set errno.
+ * They rely on the functions above. Similarly they're marked static so that it
+ * is possible to assign pointers to them if needed.
+ */
+
+static __attribute__((unused))
+int brk(void *addr)
+{
+ void *ret = sys_brk(addr);
+
+ if (!ret) {
+ SET_ERRNO(ENOMEM);
+ return -1;
+ }
+ return 0;
+}
+
+static __attribute__((noreturn,unused))
+void exit(int status)
+{
+ sys_exit(status);
+}
+
+static __attribute__((unused))
+int chdir(const char *path)
+{
+ int ret = sys_chdir(path);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int chmod(const char *path, mode_t mode)
+{
+ int ret = sys_chmod(path, mode);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int chown(const char *path, uid_t owner, gid_t group)
+{
+ int ret = sys_chown(path, owner, group);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int chroot(const char *path)
+{
+ int ret = sys_chroot(path);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int close(int fd)
+{
+ int ret = sys_close(fd);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int dup2(int old, int new)
+{
+ int ret = sys_dup2(old, new);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int execve(const char *filename, char *const argv[], char *const envp[])
+{
+ int ret = sys_execve(filename, argv, envp);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t fork(void)
+{
+ pid_t ret = sys_fork();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int fsync(int fd)
+{
+ int ret = sys_fsync(fd);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int getdents64(int fd, struct linux_dirent64 *dirp, int count)
+{
+ int ret = sys_getdents64(fd, dirp, count);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t getpgrp(void)
+{
+ pid_t ret = sys_getpgrp();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t getpid(void)
+{
+ pid_t ret = sys_getpid();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ int ret = sys_gettimeofday(tv, tz);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int ioctl(int fd, unsigned long req, void *value)
+{
+ int ret = sys_ioctl(fd, req, value);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int kill(pid_t pid, int signal)
+{
+ int ret = sys_kill(pid, signal);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int link(const char *old, const char *new)
+{
+ int ret = sys_link(old, new);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+off_t lseek(int fd, off_t offset, int whence)
+{
+ off_t ret = sys_lseek(fd, offset, whence);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int mkdir(const char *path, mode_t mode)
+{
+ int ret = sys_mkdir(path, mode);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int mknod(const char *path, mode_t mode, dev_t dev)
+{
+ int ret = sys_mknod(path, mode, dev);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int mount(const char *src, const char *tgt,
+ const char *fst, unsigned long flags,
+ const void *data)
+{
+ int ret = sys_mount(src, tgt, fst, flags, data);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int open(const char *path, int flags, mode_t mode)
+{
+ int ret = sys_open(path, flags, mode);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int pivot_root(const char *new, const char *old)
+{
+ int ret = sys_pivot_root(new, old);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int poll(struct pollfd *fds, int nfds, int timeout)
+{
+ int ret = sys_poll(fds, nfds, timeout);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+ssize_t read(int fd, void *buf, size_t count)
+{
+ ssize_t ret = sys_read(fd, buf, count);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int reboot(int cmd)
+{
+ int ret = sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+void *sbrk(intptr_t inc)
+{
+ void *ret;
+
+ /* first call to find current end */
+ if ((ret = sys_brk(0)) && (sys_brk(ret + inc) == ret + inc))
+ return ret + inc;
+
+ SET_ERRNO(ENOMEM);
+ return (void *)-1;
+}
+
+static __attribute__((unused))
+int sched_yield(void)
+{
+ int ret = sys_sched_yield();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+ int ret = sys_select(nfds, rfds, wfds, efds, timeout);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int setpgid(pid_t pid, pid_t pgid)
+{
+ int ret = sys_setpgid(pid, pgid);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t setsid(void)
+{
+ pid_t ret = sys_setsid();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+unsigned int sleep(unsigned int seconds)
+{
+ struct timeval my_timeval = { seconds, 0 };
+
+ if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ return my_timeval.tv_sec + !!my_timeval.tv_usec;
+ else
+ return 0;
+}
+
+static __attribute__((unused))
+int stat(const char *path, struct stat *buf)
+{
+ int ret = sys_stat(path, buf);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int symlink(const char *old, const char *new)
+{
+ int ret = sys_symlink(old, new);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int tcsetpgrp(int fd, pid_t pid)
+{
+ return ioctl(fd, TIOCSPGRP, &pid);
+}
+
+static __attribute__((unused))
+mode_t umask(mode_t mode)
+{
+ return sys_umask(mode);
+}
+
+static __attribute__((unused))
+int umount2(const char *path, int flags)
+{
+ int ret = sys_umount2(path, flags);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int unlink(const char *path)
+{
+ int ret = sys_unlink(path);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage)
+{
+ pid_t ret = sys_wait4(pid, status, options, rusage);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t waitpid(pid_t pid, int *status, int options)
+{
+ pid_t ret = sys_waitpid(pid, status, options);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t wait(int *status)
+{
+ pid_t ret = sys_wait(status);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+ssize_t write(int fd, const void *buf, size_t count)
+{
+ ssize_t ret = sys_write(fd, buf, count);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+/* some size-optimized reimplementations of a few common str* and mem*
+ * functions. They're marked static, except memcpy() and raise() which are used
+ * by libgcc on ARM, so they are marked weak instead in order not to cause an
+ * error when building a program made of multiple files (not recommended).
+ */
+
+static __attribute__((unused))
+void *memmove(void *dst, const void *src, size_t len)
+{
+ ssize_t pos = (dst <= src) ? -1 : (long)len;
+ void *ret = dst;
+
+ while (len--) {
+ pos += (dst <= src) ? 1 : -1;
+ ((char *)dst)[pos] = ((char *)src)[pos];
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+void *memset(void *dst, int b, size_t len)
+{
+ char *p = dst;
+
+ while (len--)
+ *(p++) = b;
+ return dst;
+}
+
+static __attribute__((unused))
+int memcmp(const void *s1, const void *s2, size_t n)
+{
+ size_t ofs = 0;
+ char c1 = 0;
+
+ while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
+ ofs++;
+ }
+ return c1;
+}
+
+static __attribute__((unused))
+char *strcpy(char *dst, const char *src)
+{
+ char *ret = dst;
+
+ while ((*dst++ = *src++));
+ return ret;
+}
+
+static __attribute__((unused))
+char *strchr(const char *s, int c)
+{
+ while (*s) {
+ if (*s == (char)c)
+ return (char *)s;
+ s++;
+ }
+ return NULL;
+}
+
+static __attribute__((unused))
+char *strrchr(const char *s, int c)
+{
+ const char *ret = NULL;
+
+ while (*s) {
+ if (*s == (char)c)
+ ret = s;
+ s++;
+ }
+ return (char *)ret;
+}
+
+static __attribute__((unused))
+size_t nolibc_strlen(const char *str)
+{
+ size_t len;
+
+ for (len = 0; str[len]; len++);
+ return len;
+}
+
+#define strlen(str) ({ \
+ __builtin_constant_p((str)) ? \
+ __builtin_strlen((str)) : \
+ nolibc_strlen((str)); \
+})
+
+static __attribute__((unused))
+int isdigit(int c)
+{
+ return (unsigned int)(c - '0') <= 9;
+}
+
+static __attribute__((unused))
+long atol(const char *s)
+{
+ unsigned long ret = 0;
+ unsigned long d;
+ int neg = 0;
+
+ if (*s == '-') {
+ neg = 1;
+ s++;
+ }
+
+ while (1) {
+ d = (*s++) - '0';
+ if (d > 9)
+ break;
+ ret *= 10;
+ ret += d;
+ }
+
+ return neg ? -ret : ret;
+}
+
+static __attribute__((unused))
+int atoi(const char *s)
+{
+ return atol(s);
+}
+
+static __attribute__((unused))
+const char *ltoa(long in)
+{
+ /* large enough for -9223372036854775808 */
+ static char buffer[21];
+ char *pos = buffer + sizeof(buffer) - 1;
+ int neg = in < 0;
+ unsigned long n = neg ? -in : in;
+
+ *pos-- = '\0';
+ do {
+ *pos-- = '0' + n % 10;
+ n /= 10;
+ if (pos < buffer)
+ return pos + 1;
+ } while (n);
+
+ if (neg)
+ *pos-- = '-';
+ return pos + 1;
+}
+
+__attribute__((weak,unused))
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ return memmove(dst, src, len);
+}
+
+/* needed by libgcc for divide by zero */
+__attribute__((weak,unused))
+int raise(int signal)
+{
+ return kill(getpid(), signal);
+}
+
+/* Here come a few helper functions */
+
+static __attribute__((unused))
+void FD_ZERO(fd_set *set)
+{
+ memset(set, 0, sizeof(*set));
+}
+
+static __attribute__((unused))
+void FD_SET(int fd, fd_set *set)
+{
+ if (fd < 0 || fd >= FD_SETSIZE)
+ return;
+ set->fd32[fd / 32] |= 1 << (fd & 31);
+}
+
+/* WARNING, it only deals with the 4096 first majors and 256 first minors */
+static __attribute__((unused))
+dev_t makedev(unsigned int major, unsigned int minor)
+{
+ return ((major & 0xfff) << 8) | (minor & 0xff);
+}
diff --git a/tools/testing/selftests/rcutorture/doc/initrd.txt b/tools/testing/selftests/rcutorture/doc/initrd.txt
index 833f826d6ec2..933b4fd12327 100644
--- a/tools/testing/selftests/rcutorture/doc/initrd.txt
+++ b/tools/testing/selftests/rcutorture/doc/initrd.txt
@@ -1,9 +1,12 @@
-This document describes one way to create the initrd directory hierarchy
-in order to allow an initrd to be built into your kernel. The trick
-here is to steal the initrd file used on your Linux laptop, Ubuntu in
-this case. There are probably much better ways of doing this.
+The rcutorture scripting tools automatically create the needed initrd
+directory using dracut. Failing that, this tool will create an initrd
+containing a single statically linked binary named "init" that loops
+over a very long sleep() call. In both cases, this creation is done
+by tools/testing/selftests/rcutorture/bin/mkinitrd.sh.
-That said, here are the commands:
+However, if you are attempting to run rcutorture on a system that does
+not have dracut installed, and if you don't like the notion of static
+linking, you might wish to press an existing initrd into service:
------------------------------------------------------------------------
cd tools/testing/selftests/rcutorture
@@ -11,22 +14,7 @@ zcat /initrd.img > /tmp/initrd.img.zcat
mkdir initrd
cd initrd
cpio -id < /tmp/initrd.img.zcat
-------------------------------------------------------------------------
-
-Another way to create an initramfs image is using "dracut"[1], which is
-available on many distros, however the initramfs dracut generates is a cpio
-archive with another cpio archive in it, so an extra step is needed to create
-the initrd directory hierarchy.
-
-Here are the commands to create a initrd directory for rcutorture using
-dracut:
-
-------------------------------------------------------------------------
-dracut --no-hostonly --no-hostonly-cmdline --module "base bash shutdown" /tmp/initramfs.img
-cd tools/testing/selftests/rcutorture
-mkdir initrd
-cd initrd
-/usr/lib/dracut/skipcpio /tmp/initramfs.img | zcat | cpio -id < /tmp/initramfs.img
+# Manually verify that initrd contains needed binaries and libraries.
------------------------------------------------------------------------
Interestingly enough, if you are running rcutorture, you don't really
@@ -39,75 +27,12 @@ with 0755 mode.
------------------------------------------------------------------------
#!/bin/sh
-[ -d /dev ] || mkdir -m 0755 /dev
-[ -d /root ] || mkdir -m 0700 /root
-[ -d /sys ] || mkdir /sys
-[ -d /proc ] || mkdir /proc
-[ -d /tmp ] || mkdir /tmp
-mkdir -p /var/lock
-mount -t sysfs -o nodev,noexec,nosuid sysfs /sys
-mount -t proc -o nodev,noexec,nosuid proc /proc
-# Some things don't work properly without /etc/mtab.
-ln -sf /proc/mounts /etc/mtab
-
-# Note that this only becomes /dev on the real filesystem if udev's scripts
-# are used; which they will be, but it's worth pointing out
-if ! mount -t devtmpfs -o mode=0755 udev /dev; then
- echo "W: devtmpfs not available, falling back to tmpfs for /dev"
- mount -t tmpfs -o mode=0755 udev /dev
- [ -e /dev/console ] || mknod --mode=600 /dev/console c 5 1
- [ -e /dev/kmsg ] || mknod --mode=644 /dev/kmsg c 1 11
- [ -e /dev/null ] || mknod --mode=666 /dev/null c 1 3
-fi
-
-mkdir /dev/pts
-mount -t devpts -o noexec,nosuid,gid=5,mode=0620 devpts /dev/pts || true
-mount -t tmpfs -o "nosuid,size=20%,mode=0755" tmpfs /run
-mkdir /run/initramfs
-# compatibility symlink for the pre-oneiric locations
-ln -s /run/initramfs /dev/.initramfs
-
-# Export relevant variables
-export ROOT=
-export ROOTDELAY=
-export ROOTFLAGS=
-export ROOTFSTYPE=
-export IP=
-export BOOT=
-export BOOTIF=
-export UBIMTD=
-export break=
-export init=/sbin/init
-export quiet=n
-export readonly=y
-export rootmnt=/root
-export debug=
-export panic=
-export blacklist=
-export resume=
-export resume_offset=
-export recovery=
-
-for i in /sys/devices/system/cpu/cpu*/online
-do
- case $i in
- '/sys/devices/system/cpu/cpu0/online')
- ;;
- '/sys/devices/system/cpu/cpu*/online')
- ;;
- *)
- echo 1 > $i
- ;;
- esac
-done
-
while :
do
sleep 10
done
------------------------------------------------------------------------
-References:
-[1]: https://dracut.wiki.kernel.org/index.php/Main_Page
-[2]: http://blog.elastocloud.org/2015/06/rapid-linux-kernel-devtest-with-qemu.html
-[3]: https://www.centos.org/forums/viewtopic.php?t=51621
+This approach also allows most of the binaries and libraries in the
+initrd filesystem to be dispensed with, which can save significant
+space in rcutorture's "res" directory.
diff --git a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
index 891ad13e95b2..d27285f8ee82 100644
--- a/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
+++ b/tools/testing/selftests/rcutorture/formal/srcu-cbmc/include/linux/types.h
@@ -131,8 +131,8 @@ struct hlist_node {
* weird ABI and we need to ask it explicitly.
*
* The alignment is required to guarantee that bits 0 and 1 of @next will be
- * clear under normal conditions -- as long as we use call_rcu(),
- * call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
+ * clear under normal conditions -- as long as we use call_rcu() or
+ * call_srcu() to queue callback.
*
* This guarantee is important for few reasons:
* - future call_rcu_lazy() will make use of lower bits in the pointer;
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index e1473234968d..c9a2abf8be1b 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -2731,9 +2731,14 @@ TEST(syscall_restart)
ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0));
ASSERT_EQ(true, WIFSTOPPED(status));
ASSERT_EQ(SIGSTOP, WSTOPSIG(status));
- /* Verify signal delivery came from parent now. */
ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info));
- EXPECT_EQ(getpid(), info.si_pid);
+ /*
+ * There is no siginfo on SIGSTOP any more, so we can't verify
+ * signal delivery came from parent now (getpid() == info.si_pid).
+ * https://lkml.kernel.org/r/CAGXu5jJaZAOzP1qFz66tYrtbuywqb+UN2SOA1VLHpCCOiYvYeg@mail.gmail.com
+ * At least verify the SIGSTOP via PTRACE_GETSIGINFO.
+ */
+ EXPECT_EQ(SIGSTOP, info.si_signo);
/* Restart nanosleep with SIGCONT, which triggers restart_syscall. */
ASSERT_EQ(0, kill(child_pid, SIGCONT));
diff --git a/tools/testing/selftests/tc-testing/.gitignore b/tools/testing/selftests/tc-testing/.gitignore
index 7a60b85e148f..c5cc160948b3 100644
--- a/tools/testing/selftests/tc-testing/.gitignore
+++ b/tools/testing/selftests/tc-testing/.gitignore
@@ -1,2 +1,5 @@
__pycache__/
*.pyc
+plugins/
+*.xml
+*.tap
diff --git a/tools/testing/selftests/tc-testing/TdcPlugin.py b/tools/testing/selftests/tc-testing/TdcPlugin.py
index 3ee9a6dacb52..1d9e279331eb 100644
--- a/tools/testing/selftests/tc-testing/TdcPlugin.py
+++ b/tools/testing/selftests/tc-testing/TdcPlugin.py
@@ -18,11 +18,12 @@ class TdcPlugin:
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
- def pre_case(self, test_ordinal, testid):
+ def pre_case(self, test_ordinal, testid, test_name):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.testid = testid
+ self.args.test_name = test_name
self.args.test_ordinal = test_ordinal
def post_case(self):
diff --git a/tools/testing/selftests/tc-testing/TdcResults.py b/tools/testing/selftests/tc-testing/TdcResults.py
new file mode 100644
index 000000000000..1e4d95fdf8d0
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/TdcResults.py
@@ -0,0 +1,132 @@
+#!/usr/bin/env python3
+
+from enum import Enum
+
+class ResultState(Enum):
+ noresult = -1
+ skip = 0
+ success = 1
+ fail = 2
+
+class TestResult:
+ def __init__(self, test_id="", test_name=""):
+ self.test_id = test_id
+ self.test_name = test_name
+ self.result = ResultState.noresult
+ self.failmsg = ""
+ self.errormsg = ""
+ self.steps = []
+
+ def set_result(self, result):
+ if (isinstance(result, ResultState)):
+ self.result = result
+ return True
+ else:
+ raise TypeError('Unknown result type, must be type ResultState')
+
+ def get_result(self):
+ return self.result
+
+ def set_errormsg(self, errormsg):
+ self.errormsg = errormsg
+ return True
+
+ def append_errormsg(self, errormsg):
+ self.errormsg = '{}\n{}'.format(self.errormsg, errormsg)
+
+ def get_errormsg(self):
+ return self.errormsg
+
+ def set_failmsg(self, failmsg):
+ self.failmsg = failmsg
+ return True
+
+ def append_failmsg(self, failmsg):
+ self.failmsg = '{}\n{}'.format(self.failmsg, failmsg)
+
+ def get_failmsg(self):
+ return self.failmsg
+
+ def add_steps(self, newstep):
+ if type(newstep) == list:
+ self.steps.extend(newstep)
+ elif type(newstep) == str:
+ self.steps.append(step)
+ else:
+ raise TypeError('TdcResults.add_steps() requires a list or str')
+
+ def get_executed_steps(self):
+ return self.steps
+
+class TestSuiteReport():
+ _testsuite = []
+
+ def add_resultdata(self, result_data):
+ if isinstance(result_data, TestResult):
+ self._testsuite.append(result_data)
+ return True
+
+ def count_tests(self):
+ return len(self._testsuite)
+
+ def count_failures(self):
+ return sum(1 for t in self._testsuite if t.result == ResultState.fail)
+
+ def count_skips(self):
+ return sum(1 for t in self._testsuite if t.result == ResultState.skip)
+
+ def find_result(self, test_id):
+ return next((tr for tr in self._testsuite if tr.test_id == test_id), None)
+
+ def update_result(self, result_data):
+ orig = self.find_result(result_data.test_id)
+ if orig != None:
+ idx = self._testsuite.index(orig)
+ self._testsuite[idx] = result_data
+ else:
+ self.add_resultdata(result_data)
+
+ def format_tap(self):
+ ftap = ""
+ ftap += '1..{}\n'.format(self.count_tests())
+ index = 1
+ for t in self._testsuite:
+ if t.result == ResultState.fail:
+ ftap += 'not '
+ ftap += 'ok {} {} - {}'.format(str(index), t.test_id, t.test_name)
+ if t.result == ResultState.skip or t.result == ResultState.noresult:
+ ftap += ' # skipped - {}\n'.format(t.errormsg)
+ elif t.result == ResultState.fail:
+ if len(t.steps) > 0:
+ ftap += '\tCommands executed in this test case:'
+ for step in t.steps:
+ ftap += '\n\t\t{}'.format(step)
+ ftap += '\n\t{}'.format(t.failmsg)
+ ftap += '\n'
+ index += 1
+ return ftap
+
+ def format_xunit(self):
+ from xml.sax.saxutils import escape
+ xunit = "<testsuites>\n"
+ xunit += '\t<testsuite tests=\"{}\" skips=\"{}\">\n'.format(self.count_tests(), self.count_skips())
+ for t in self._testsuite:
+ xunit += '\t\t<testcase classname=\"{}\" '.format(escape(t.test_id))
+ xunit += 'name=\"{}\">\n'.format(escape(t.test_name))
+ if t.failmsg:
+ xunit += '\t\t\t<failure>\n'
+ if len(t.steps) > 0:
+ xunit += 'Commands executed in this test case:\n'
+ for step in t.steps:
+ xunit += '\t{}\n'.format(escape(step))
+ xunit += 'FAILURE: {}\n'.format(escape(t.failmsg))
+ xunit += '\t\t\t</failure>\n'
+ if t.errormsg:
+ xunit += '\t\t\t<error>\n{}\n'.format(escape(t.errormsg))
+ xunit += '\t\t\t</error>\n'
+ if t.result == ResultState.skip:
+ xunit += '\t\t\t<skipped/>\n'
+ xunit += '\t\t</testcase>\n'
+ xunit += '\t</testsuite>\n'
+ xunit += '</testsuites>\n'
+ return xunit
diff --git a/tools/testing/selftests/tc-testing/bpf/Makefile b/tools/testing/selftests/tc-testing/bpf/Makefile
index dc92eb271d9a..be5a5e542804 100644
--- a/tools/testing/selftests/tc-testing/bpf/Makefile
+++ b/tools/testing/selftests/tc-testing/bpf/Makefile
@@ -4,6 +4,7 @@ APIDIR := ../../../../include/uapi
TEST_GEN_FILES = action.o
top_srcdir = ../../../../..
+KSFT_KHDR_INSTALL := 1
include ../../lib.mk
CLANG ?= clang
diff --git a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
index 477a7bd7d7fb..e00c798de0bb 100644
--- a/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
+++ b/tools/testing/selftests/tc-testing/plugin-lib/valgrindPlugin.py
@@ -11,6 +11,7 @@ from string import Template
import subprocess
import time
from TdcPlugin import TdcPlugin
+from TdcResults import *
from tdc_config import *
@@ -21,6 +22,7 @@ class SubPlugin(TdcPlugin):
def __init__(self):
self.sub_class = 'valgrind/SubPlugin'
self.tap = ''
+ self._tsr = TestSuiteReport()
super().__init__()
def pre_suite(self, testcount, testidlist):
@@ -34,10 +36,14 @@ class SubPlugin(TdcPlugin):
def post_suite(self, index):
'''run commands after test_runner goes into a test loop'''
super().post_suite(index)
- self._add_to_tap('\n|---\n')
if self.args.verbose > 1:
print('{}.post_suite'.format(self.sub_class))
- print('{}'.format(self.tap))
+ #print('{}'.format(self.tap))
+ for xx in range(index - 1, self.testcount):
+ res = TestResult('{}-mem'.format(self.testidlist[xx]), 'Test skipped')
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Skipped because of prior setup/teardown failure')
+ self._add_results(res)
if self.args.verbose < 4:
subprocess.check_output('rm -f vgnd-*.log', shell=True)
@@ -128,8 +134,17 @@ class SubPlugin(TdcPlugin):
nle_num = int(nle_mo.group(1))
mem_results = ''
+ res = TestResult('{}-mem'.format(self.args.testid),
+ '{} memory leak check'.format(self.args.test_name))
if (def_num > 0) or (ind_num > 0) or (pos_num > 0) or (nle_num > 0):
mem_results += 'not '
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Memory leak detected')
+ res.append_failmsg(content)
+ else:
+ res.set_result(ResultState.success)
+
+ self._add_results(res)
mem_results += 'ok {} - {}-mem # {}\n'.format(
self.args.test_ordinal, self.args.testid, 'memory leak check')
@@ -138,5 +153,8 @@ class SubPlugin(TdcPlugin):
print('{}'.format(content))
self._add_to_tap(content)
+ def _add_results(self, res):
+ self._tsr.add_resultdata(res)
+
def _add_to_tap(self, more_tap_output):
self.tap += more_tap_output
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index 7607ba3e3cbe..e6e4ce80a726 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -23,6 +23,7 @@ from tdc_config import *
from tdc_helper import *
import TdcPlugin
+from TdcResults import *
class PluginMgrTestFail(Exception):
@@ -60,10 +61,10 @@ class PluginMgr:
for pgn_inst in reversed(self.plugin_instances):
pgn_inst.post_suite(index)
- def call_pre_case(self, test_ordinal, testid):
+ def call_pre_case(self, test_ordinal, testid, test_name):
for pgn_inst in self.plugin_instances:
try:
- pgn_inst.pre_case(test_ordinal, testid)
+ pgn_inst.pre_case(test_ordinal, testid, test_name)
except Exception as ee:
print('exception {} in call to pre_case for {} plugin'.
format(ee, pgn_inst.__class__))
@@ -102,7 +103,6 @@ class PluginMgr:
self.argparser = argparse.ArgumentParser(
description='Linux TC unit tests')
-
def replace_keywords(cmd):
"""
For a given executable command, substitute any known
@@ -131,12 +131,16 @@ def exec_cmd(args, pm, stage, command):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=ENVIR)
- (rawout, serr) = proc.communicate()
- if proc.returncode != 0 and len(serr) > 0:
- foutput = serr.decode("utf-8", errors="ignore")
- else:
- foutput = rawout.decode("utf-8", errors="ignore")
+ try:
+ (rawout, serr) = proc.communicate(timeout=NAMES['TIMEOUT'])
+ if proc.returncode != 0 and len(serr) > 0:
+ foutput = serr.decode("utf-8", errors="ignore")
+ else:
+ foutput = rawout.decode("utf-8", errors="ignore")
+ except subprocess.TimeoutExpired:
+ foutput = "Command \"{}\" timed out\n".format(command)
+ proc.returncode = 255
proc.stdout.close()
proc.stderr.close()
@@ -183,6 +187,7 @@ def run_one_test(pm, args, index, tidx):
result = True
tresult = ""
tap = ""
+ res = TestResult(tidx['id'], tidx['name'])
if args.verbose > 0:
print("\t====================\n=====> ", end="")
print("Test " + tidx["id"] + ": " + tidx["name"])
@@ -190,7 +195,7 @@ def run_one_test(pm, args, index, tidx):
# populate NAMES with TESTID for this test
NAMES['TESTID'] = tidx['id']
- pm.call_pre_case(index, tidx['id'])
+ pm.call_pre_case(index, tidx['id'], tidx['name'])
prepare_env(args, pm, 'setup', "-----> prepare stage", tidx["setup"])
if (args.verbose > 0):
@@ -205,10 +210,11 @@ def run_one_test(pm, args, index, tidx):
pm.call_post_execute()
if (exit_code is None or exit_code != int(tidx["expExitCode"])):
- result = False
print("exit: {!r}".format(exit_code))
print("exit: {}".format(int(tidx["expExitCode"])))
#print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx["expExitCode"], procout))
print(procout)
else:
if args.verbose > 0:
@@ -219,20 +225,15 @@ def run_one_test(pm, args, index, tidx):
if procout:
match_index = re.findall(match_pattern, procout)
if len(match_index) != int(tidx["matchCount"]):
- result = False
+ res.set_result(ResultState.fail)
+ res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
+ else:
+ res.set_result(ResultState.success)
elif int(tidx["matchCount"]) != 0:
- result = False
-
- if not result:
- tresult += 'not '
- tresult += 'ok {} - {} # {}\n'.format(str(index), tidx['id'], tidx['name'])
- tap += tresult
-
- if result == False:
- if procout:
- tap += procout
+ res.set_result(ResultState.fail)
+ res.set_failmsg('No output generated by verify command.')
else:
- tap += 'No output!\n'
+ res.set_result(ResultState.success)
prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
pm.call_post_case()
@@ -241,7 +242,7 @@ def run_one_test(pm, args, index, tidx):
# remove TESTID from NAMES
del(NAMES['TESTID'])
- return tap
+ return res
def test_runner(pm, args, filtered_tests):
"""
@@ -261,25 +262,15 @@ def test_runner(pm, args, filtered_tests):
emergency_exit = False
emergency_exit_message = ''
- if args.notap:
- if args.verbose:
- tap = 'notap requested: omitting test plan\n'
- else:
- tap = str(index) + ".." + str(tcount) + "\n"
+ tsr = TestSuiteReport()
+
try:
pm.call_pre_suite(tcount, [tidx['id'] for tidx in testlist])
except Exception as ee:
ex_type, ex, ex_tb = sys.exc_info()
print('Exception {} {} (caught in pre_suite).'.
format(ex_type, ex))
- # when the extra print statements are uncommented,
- # the traceback does not appear between them
- # (it appears way earlier in the tdc.py output)
- # so don't bother ...
- # print('--------------------(')
- # print('traceback')
traceback.print_tb(ex_tb)
- # print('--------------------)')
emergency_exit_message = 'EMERGENCY EXIT, call_pre_suite failed with exception {} {}\n'.format(ex_type, ex)
emergency_exit = True
stage = 'pre-SUITE'
@@ -295,15 +286,26 @@ def test_runner(pm, args, filtered_tests):
if args.verbose > 1:
print('Not executing test {} {} because DEV2 not defined'.
format(tidx['id'], tidx['name']))
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg('Not executed because DEV2 is not defined')
+ tsr.add_resultdata(res)
continue
try:
badtest = tidx # in case it goes bad
- tap += run_one_test(pm, args, index, tidx)
+ res = run_one_test(pm, args, index, tidx)
+ tsr.add_resultdata(res)
except PluginMgrTestFail as pmtf:
ex_type, ex, ex_tb = sys.exc_info()
stage = pmtf.stage
message = pmtf.message
output = pmtf.output
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ res.set_errormsg(pmtf.message)
+ res.set_failmsg(pmtf.output)
+ tsr.add_resultdata(res)
+ index += 1
print(message)
print('Exception {} {} (caught in test_runner, running test {} {} {} stage {})'.
format(ex_type, ex, index, tidx['id'], tidx['name'], stage))
@@ -322,16 +324,16 @@ def test_runner(pm, args, filtered_tests):
# if we failed in setup or teardown,
# fill in the remaining tests with ok-skipped
count = index
- if not args.notap:
- tap += 'about to flush the tap output if tests need to be skipped\n'
- if tcount + 1 != index:
- for tidx in testlist[index - 1:]:
- msg = 'skipped - previous {} failed'.format(stage)
- tap += 'ok {} - {} # {} {} {}\n'.format(
- count, tidx['id'], msg, index, badtest.get('id', '--Unknown--'))
- count += 1
- tap += 'done flushing skipped test tap output\n'
+ if tcount + 1 != count:
+ for tidx in testlist[count - 1:]:
+ res = TestResult(tidx['id'], tidx['name'])
+ res.set_result(ResultState.skip)
+ msg = 'skipped - previous {} failed {} {}'.format(stage,
+ index, badtest.get('id', '--Unknown--'))
+ res.set_errormsg(msg)
+ tsr.add_resultdata(res)
+ count += 1
if args.pause:
print('Want to pause\nPress enter to continue ...')
@@ -340,7 +342,7 @@ def test_runner(pm, args, filtered_tests):
pm.call_post_suite(index)
- return tap
+ return tsr
def has_blank_ids(idlist):
"""
@@ -381,6 +383,10 @@ def set_args(parser):
Set the command line arguments for tdc.
"""
parser.add_argument(
+ '--outfile', type=str,
+ help='Path to the file in which results should be saved. ' +
+ 'Default target is the current directory.')
+ parser.add_argument(
'-p', '--path', type=str,
help='The full path to the tc executable to use')
sg = parser.add_argument_group(
@@ -416,8 +422,9 @@ def set_args(parser):
'-v', '--verbose', action='count', default=0,
help='Show the commands that are being run')
parser.add_argument(
- '-N', '--notap', action='store_true',
- help='Suppress tap results for command under test')
+ '--format', default='tap', const='tap', nargs='?',
+ choices=['none', 'xunit', 'tap'],
+ help='Specify the format for test results. (Default: TAP)')
parser.add_argument('-d', '--device',
help='Execute the test case in flower category')
parser.add_argument(
@@ -438,6 +445,8 @@ def check_default_settings(args, remaining, pm):
NAMES['TC'] = args.path
if args.device != None:
NAMES['DEV2'] = args.device
+ if 'TIMEOUT' not in NAMES:
+ NAMES['TIMEOUT'] = None
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
@@ -632,12 +641,30 @@ def set_operation_mode(pm, args):
if len(alltests):
catresults = test_runner(pm, args, alltests)
+ if args.format == 'none':
+ print('Test results output suppression requested\n')
+ else:
+ print('\nAll test results: \n')
+ if args.format == 'xunit':
+ suffix = 'xml'
+ res = catresults.format_xunit()
+ elif args.format == 'tap':
+ suffix = 'tap'
+ res = catresults.format_tap()
+ print(res)
+ print('\n\n')
+ if not args.outfile:
+ fname = 'test-results.{}'.format(suffix)
+ else:
+ fname = args.outfile
+ with open(fname, 'w') as fh:
+ fh.write(res)
+ fh.close()
+ if os.getenv('SUDO_UID') is not None:
+ os.chown(fname, uid=int(os.getenv('SUDO_UID')),
+ gid=int(os.getenv('SUDO_GID')))
else:
- catresults = 'No tests found\n'
- if args.notap:
- print('Tap output suppression requested\n')
- else:
- print('All test results: \n\n{}'.format(catresults))
+ print('No tests found\n')
def main():
"""
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index d651bc1501bd..6d91e48c2625 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -15,6 +15,8 @@ NAMES = {
'DEV1': 'v0p1',
'DEV2': '',
'BATCH_FILE': './batch.txt',
+ # Length of time in seconds to wait before terminating a command
+ 'TIMEOUT': 12,
# Name of the namespace to use
'NS': 'tcut',
# Directory containing eBPF test programs
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 6e67e726e5a5..e13eb6cc8901 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -25,6 +25,7 @@ TEST_GEN_FILES += virtual_address_range
TEST_PROGS := run_vmtests
+KSFT_KHDR_INSTALL := 1
include ../lib.mk
$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index fb22bccfbc8a..7ef45a4a3cba 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -23,6 +23,10 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(x) ((x + PAGE_SIZE - 1) & PAGE_MASK)
+/* generic data direction definitions */
+#define READ 0
+#define WRITE 1
+
typedef unsigned long long phys_addr_t;
typedef unsigned long long dma_addr_t;
typedef size_t __kernel_size_t;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 17cecc96f735..b07ac4614e1c 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -70,11 +70,9 @@ static void soft_timer_start(struct hrtimer *hrt, u64 ns)
HRTIMER_MODE_ABS);
}
-static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work)
+static void soft_timer_cancel(struct hrtimer *hrt)
{
hrtimer_cancel(hrt);
- if (work)
- cancel_work_sync(work);
}
static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
@@ -102,23 +100,6 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/*
- * Work function for handling the backup timer that we schedule when a vcpu is
- * no longer running, but had a timer programmed to fire in the future.
- */
-static void kvm_timer_inject_irq_work(struct work_struct *work)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-
- /*
- * If the vcpu is blocked we want to wake it up so that it will see
- * the timer has expired when entering the guest.
- */
- kvm_vcpu_wake_up(vcpu);
-}
-
static u64 kvm_timer_compute_delta(struct arch_timer_context *timer_ctx)
{
u64 cval, now;
@@ -188,7 +169,7 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
return HRTIMER_RESTART;
}
- schedule_work(&timer->expired);
+ kvm_vcpu_wake_up(vcpu);
return HRTIMER_NORESTART;
}
@@ -300,7 +281,7 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu)
* then we also don't need a soft timer.
*/
if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) {
- soft_timer_cancel(&timer->phys_timer, NULL);
+ soft_timer_cancel(&timer->phys_timer);
return;
}
@@ -426,7 +407,7 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
vtimer_restore_state(vcpu);
- soft_timer_cancel(&timer->bg_timer, &timer->expired);
+ soft_timer_cancel(&timer->bg_timer);
}
static void set_cntvoff(u64 cntvoff)
@@ -544,7 +525,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
* In any case, we re-schedule the hrtimer for the physical timer when
* coming back to the VCPU thread in kvm_timer_vcpu_load().
*/
- soft_timer_cancel(&timer->phys_timer, NULL);
+ soft_timer_cancel(&timer->phys_timer);
/*
* The kernel may decide to run userspace after calling vcpu_put, so
@@ -637,7 +618,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
vcpu_ptimer(vcpu)->cntvoff = 0;
- INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
timer->bg_timer.function = kvm_bg_timer_expire;
@@ -792,11 +772,8 @@ out_free_irq:
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- soft_timer_cancel(&timer->bg_timer, &timer->expired);
- soft_timer_cancel(&timer->phys_timer, NULL);
- kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq);
+ soft_timer_cancel(&timer->bg_timer);
}
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 23774970c9df..9e350fd34504 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_RWLOCK(kvm_vmid_lock);
+static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
@@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
+ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
}
/**
@@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
- bool new_gen;
- read_lock(&kvm_vmid_lock);
- new_gen = need_new_vmid_gen(kvm);
- read_unlock(&kvm_vmid_lock);
-
- if (!new_gen)
+ if (!need_new_vmid_gen(kvm))
return;
- write_lock(&kvm_vmid_lock);
+ spin_lock(&kvm_vmid_lock);
/*
* We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm)
* use the same vmid.
*/
if (!need_new_vmid_gen(kvm)) {
- write_unlock(&kvm_vmid_lock);
+ spin_unlock(&kvm_vmid_lock);
return;
}
@@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
@@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm)
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
- write_unlock(&kvm_vmid_lock);
+ smp_wmb();
+ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+
+ spin_unlock(&kvm_vmid_lock);
}
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
@@ -674,8 +673,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_handle_mmio_return(vcpu, vcpu->run);
if (ret)
return ret;
- if (kvm_arm_handle_step_debug(vcpu, vcpu->run))
- return 0;
}
if (run->immediate_exit)
@@ -1205,14 +1202,30 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- bool is_dirty = false;
+ bool flush = false;
+ int r;
+
+ mutex_lock(&kvm->slots_lock);
+
+ r = kvm_get_dirty_log_protect(kvm, log, &flush);
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+
+ mutex_unlock(&kvm->slots_lock);
+ return r;
+}
+
+int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
+{
+ bool flush = false;
int r;
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty);
+ r = kvm_clear_dirty_log_protect(kvm, log, &flush);
- if (is_dirty)
+ if (flush)
kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock);
@@ -1640,8 +1653,10 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
- if (!kvm_arch_check_sve_has_vhe()) {
- kvm_pr_unimpl("SVE system without VHE unsupported. Broken cpu?");
+ in_hyp_mode = is_kernel_in_hyp_mode();
+
+ if (!in_hyp_mode && kvm_arch_requires_vhe()) {
+ kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
return -ENODEV;
}
@@ -1657,8 +1672,6 @@ int kvm_arch_init(void *opaque)
if (err)
return err;
- in_hyp_mode = is_kernel_in_hyp_mode();
-
if (!in_hyp_mode) {
err = init_hyp_mode();
if (err)
diff --git a/virt/kvm/arm/hyp/vgic-v3-sr.c b/virt/kvm/arm/hyp/vgic-v3-sr.c
index 616e5a433ab0..9652c453480f 100644
--- a/virt/kvm/arm/hyp/vgic-v3-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v3-sr.c
@@ -1012,8 +1012,10 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
esr = kvm_vcpu_get_hsr(vcpu);
if (vcpu_mode_is_32bit(vcpu)) {
- if (!kvm_condition_valid(vcpu))
+ if (!kvm_condition_valid(vcpu)) {
+ __kvm_skip_instr(vcpu);
return 1;
+ }
sysreg = esr_cp15_to_sysreg(esr);
} else {
@@ -1123,6 +1125,8 @@ int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
rt = kvm_vcpu_sys_get_rt(vcpu);
fn(vcpu, vmcr, rt);
+ __kvm_skip_instr(vcpu);
+
return 1;
}
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index dac7ceb1a677..08443a15e6be 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -117,6 +117,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
+ /*
+ * The MMIO instruction is emulated and should not be re-executed
+ * in the guest.
+ */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 0;
}
@@ -144,11 +150,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 5eca48bdb1a6..3053bf2584f8 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -115,6 +115,25 @@ static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
put_page(virt_to_page(pmd));
}
+/**
+ * stage2_dissolve_pud() - clear and flush huge PUD entry
+ * @kvm: pointer to kvm structure.
+ * @addr: IPA
+ * @pud: pud pointer for IPA
+ *
+ * Function clears a PUD entry, flushes addr 1st and 2nd stage TLBs. Marks all
+ * pages in the range dirty.
+ */
+static void stage2_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp)
+{
+ if (!stage2_pud_huge(kvm, *pudp))
+ return;
+
+ stage2_pud_clear(kvm, pudp);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ put_page(virt_to_page(pudp));
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max)
{
@@ -607,7 +626,7 @@ static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
addr = start;
do {
pte = pte_offset_kernel(pmd, addr);
- kvm_set_pte(pte, pfn_pte(pfn, prot));
+ kvm_set_pte(pte, kvm_pfn_pte(pfn, prot));
get_page(virt_to_page(pte));
pfn++;
} while (addr += PAGE_SIZE, addr != end);
@@ -1022,7 +1041,7 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr);
- if (!pud)
+ if (!pud || stage2_pud_huge(kvm, *pud))
return NULL;
if (stage2_pud_none(kvm, *pud)) {
@@ -1083,29 +1102,103 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
return 0;
}
-static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr, const pud_t *new_pudp)
+{
+ pud_t *pudp, old_pud;
+
+ pudp = stage2_get_pud(kvm, cache, addr);
+ VM_BUG_ON(!pudp);
+
+ old_pud = *pudp;
+
+ /*
+ * A large number of vcpus faulting on the same stage 2 entry,
+ * can lead to a refault due to the
+ * stage2_pud_clear()/tlb_flush(). Skip updating the page
+ * tables if there is no change.
+ */
+ if (pud_val(old_pud) == pud_val(*new_pudp))
+ return 0;
+
+ if (stage2_pud_present(kvm, old_pud)) {
+ stage2_pud_clear(kvm, pudp);
+ kvm_tlb_flush_vmid_ipa(kvm, addr);
+ } else {
+ get_page(virt_to_page(pudp));
+ }
+
+ kvm_set_pud(pudp, *new_pudp);
+ return 0;
+}
+
+/*
+ * stage2_get_leaf_entry - walk the stage2 VM page tables and return
+ * true if a valid and present leaf-entry is found. A pointer to the
+ * leaf-entry is returned in the appropriate level variable - pudpp,
+ * pmdpp, ptepp.
+ */
+static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
+ pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp)
{
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
- pmdp = stage2_get_pmd(kvm, NULL, addr);
+ *pudpp = NULL;
+ *pmdpp = NULL;
+ *ptepp = NULL;
+
+ pudp = stage2_get_pud(kvm, NULL, addr);
+ if (!pudp || stage2_pud_none(kvm, *pudp) || !stage2_pud_present(kvm, *pudp))
+ return false;
+
+ if (stage2_pud_huge(kvm, *pudp)) {
+ *pudpp = pudp;
+ return true;
+ }
+
+ pmdp = stage2_pmd_offset(kvm, pudp, addr);
if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
return false;
- if (pmd_thp_or_huge(*pmdp))
- return kvm_s2pmd_exec(pmdp);
+ if (pmd_thp_or_huge(*pmdp)) {
+ *pmdpp = pmdp;
+ return true;
+ }
ptep = pte_offset_kernel(pmdp, addr);
if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
return false;
- return kvm_s2pte_exec(ptep);
+ *ptepp = ptep;
+ return true;
+}
+
+static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
+{
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+ bool found;
+
+ found = stage2_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep);
+ if (!found)
+ return false;
+
+ if (pudp)
+ return kvm_s2pud_exec(pudp);
+ else if (pmdp)
+ return kvm_s2pmd_exec(pmdp);
+ else
+ return kvm_s2pte_exec(ptep);
}
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
phys_addr_t addr, const pte_t *new_pte,
unsigned long flags)
{
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte, old_pte;
bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
@@ -1114,7 +1207,31 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
VM_BUG_ON(logging_active && !cache);
/* Create stage-2 page table mapping - Levels 0 and 1 */
- pmd = stage2_get_pmd(kvm, cache, addr);
+ pud = stage2_get_pud(kvm, cache, addr);
+ if (!pud) {
+ /*
+ * Ignore calls from kvm_set_spte_hva for unallocated
+ * address ranges.
+ */
+ return 0;
+ }
+
+ /*
+ * While dirty page logging - dissolve huge PUD, then continue
+ * on to allocate page.
+ */
+ if (logging_active)
+ stage2_dissolve_pud(kvm, addr, pud);
+
+ if (stage2_pud_none(kvm, *pud)) {
+ if (!cache)
+ return 0; /* ignore calls from kvm_set_spte_hva */
+ pmd = mmu_memory_cache_alloc(cache);
+ stage2_pud_populate(kvm, pud, pmd);
+ get_page(virt_to_page(pud));
+ }
+
+ pmd = stage2_pmd_offset(kvm, pud, addr);
if (!pmd) {
/*
* Ignore calls from kvm_set_spte_hva for unallocated
@@ -1182,6 +1299,11 @@ static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
return stage2_ptep_test_and_clear_young((pte_t *)pmd);
}
+static int stage2_pudp_test_and_clear_young(pud_t *pud)
+{
+ return stage2_ptep_test_and_clear_young((pte_t *)pud);
+}
+
/**
* kvm_phys_addr_ioremap - map a device range to guest IPA
*
@@ -1202,7 +1324,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
pfn = __phys_to_pfn(pa);
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
- pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
+ pte_t pte = kvm_pfn_pte(pfn, PAGE_S2_DEVICE);
if (writable)
pte = kvm_s2pte_mkwrite(pte);
@@ -1234,7 +1356,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
struct page *page = pfn_to_page(pfn);
/*
- * PageTransCompoungMap() returns true for THP and
+ * PageTransCompoundMap() returns true for THP and
* hugetlbfs. Make sure the adjustment is done only for THP
* pages.
*/
@@ -1347,9 +1469,12 @@ static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
do {
next = stage2_pud_addr_end(kvm, addr, end);
if (!stage2_pud_none(kvm, *pud)) {
- /* TODO:PUD not supported, revisit later if supported */
- BUG_ON(stage2_pud_huge(kvm, *pud));
- stage2_wp_pmds(kvm, pud, addr, next);
+ if (stage2_pud_huge(kvm, *pud)) {
+ if (!kvm_s2pud_readonly(pud))
+ kvm_set_s2pud_readonly(pud);
+ } else {
+ stage2_wp_pmds(kvm, pud, addr, next);
+ }
}
} while (pud++, addr = next, addr != end);
}
@@ -1392,7 +1517,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
*
* Called to start logging dirty pages after memory region
* KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
- * all present PMD and PTEs are write protected in the memory region.
+ * all present PUD, PMD and PTEs are write protected in the memory region.
* Afterwards read of dirty page log can be called.
*
* Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
@@ -1470,12 +1595,70 @@ static void kvm_send_hwpoison_signal(unsigned long address,
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
}
+static bool fault_supports_stage2_pmd_mappings(struct kvm_memory_slot *memslot,
+ unsigned long hva)
+{
+ gpa_t gpa_start, gpa_end;
+ hva_t uaddr_start, uaddr_end;
+ size_t size;
+
+ size = memslot->npages * PAGE_SIZE;
+
+ gpa_start = memslot->base_gfn << PAGE_SHIFT;
+ gpa_end = gpa_start + size;
+
+ uaddr_start = memslot->userspace_addr;
+ uaddr_end = uaddr_start + size;
+
+ /*
+ * Pages belonging to memslots that don't have the same alignment
+ * within a PMD for userspace and IPA cannot be mapped with stage-2
+ * PMD entries, because we'll end up mapping the wrong pages.
+ *
+ * Consider a layout like the following:
+ *
+ * memslot->userspace_addr:
+ * +-----+--------------------+--------------------+---+
+ * |abcde|fgh Stage-1 PMD | Stage-1 PMD tv|xyz|
+ * +-----+--------------------+--------------------+---+
+ *
+ * memslot->base_gfn << PAGE_SIZE:
+ * +---+--------------------+--------------------+-----+
+ * |abc|def Stage-2 PMD | Stage-2 PMD |tvxyz|
+ * +---+--------------------+--------------------+-----+
+ *
+ * If we create those stage-2 PMDs, we'll end up with this incorrect
+ * mapping:
+ * d -> f
+ * e -> g
+ * f -> h
+ */
+ if ((gpa_start & ~S2_PMD_MASK) != (uaddr_start & ~S2_PMD_MASK))
+ return false;
+
+ /*
+ * Next, let's make sure we're not trying to map anything not covered
+ * by the memslot. This means we have to prohibit PMD size mappings
+ * for the beginning and end of a non-PMD aligned and non-PMD sized
+ * memory slot (illustrated by the head and tail parts of the
+ * userspace view above containing pages 'abcde' and 'xyz',
+ * respectively).
+ *
+ * Note that it doesn't matter if we do the check using the
+ * userspace_addr or the base_gfn, as both are equally aligned (per
+ * the check above) and equally sized.
+ */
+ return (hva & S2_PMD_MASK) >= uaddr_start &&
+ (hva & S2_PMD_MASK) + S2_PMD_SIZE <= uaddr_end;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
{
int ret;
- bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
+ bool write_fault, writable, force_pte = false;
+ bool exec_fault, needs_exec;
unsigned long mmu_seq;
gfn_t gfn = fault_ipa >> PAGE_SHIFT;
struct kvm *kvm = vcpu->kvm;
@@ -1484,7 +1667,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_pfn_t pfn;
pgprot_t mem_type = PAGE_S2;
bool logging_active = memslot_is_logging(memslot);
- unsigned long flags = 0;
+ unsigned long vma_pagesize, flags = 0;
write_fault = kvm_is_write_fault(vcpu);
exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
@@ -1495,6 +1678,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
+ if (!fault_supports_stage2_pmd_mappings(memslot, hva))
+ force_pte = true;
+
+ if (logging_active)
+ force_pte = true;
+
/* Let's check if we will get back a huge page backed by hugetlbfs */
down_read(&current->mm->mmap_sem);
vma = find_vma_intersection(current->mm, hva, hva + 1);
@@ -1504,22 +1693,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
- hugetlb = true;
- gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
- } else {
- /*
- * Pages belonging to memslots that don't have the same
- * alignment for userspace and IPA cannot be mapped using
- * block descriptors even if the pages belong to a THP for
- * the process, because the stage-2 block descriptor will
- * cover more than a single THP and we loose atomicity for
- * unmapping, updates, and splits of the THP or other pages
- * in the stage-2 block range.
- */
- if ((memslot->userspace_addr & ~PMD_MASK) !=
- ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
- force_pte = true;
+ vma_pagesize = vma_kernel_pagesize(vma);
+ /*
+ * PUD level may not exist for a VM but PMD is guaranteed to
+ * exist.
+ */
+ if ((vma_pagesize == PMD_SIZE ||
+ (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) &&
+ !force_pte) {
+ gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
}
up_read(&current->mm->mmap_sem);
@@ -1558,7 +1740,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* should not be mapped with huge pages (it introduces churn
* and performance degradation), so force a pte mapping.
*/
- force_pte = true;
flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
/*
@@ -1573,50 +1754,69 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (mmu_notifier_retry(kvm, mmu_seq))
goto out_unlock;
- if (!hugetlb && !force_pte)
- hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+ if (vma_pagesize == PAGE_SIZE && !force_pte) {
+ /*
+ * Only PMD_SIZE transparent hugepages(THP) are
+ * currently supported. This code will need to be
+ * updated to support other THP sizes.
+ */
+ if (transparent_hugepage_adjust(&pfn, &fault_ipa))
+ vma_pagesize = PMD_SIZE;
+ }
- if (hugetlb) {
- pmd_t new_pmd = pfn_pmd(pfn, mem_type);
- new_pmd = pmd_mkhuge(new_pmd);
- if (writable) {
- new_pmd = kvm_s2pmd_mkwrite(new_pmd);
- kvm_set_pfn_dirty(pfn);
- }
+ if (writable)
+ kvm_set_pfn_dirty(pfn);
- if (fault_status != FSC_PERM)
- clean_dcache_guest_page(pfn, PMD_SIZE);
+ if (fault_status != FSC_PERM)
+ clean_dcache_guest_page(pfn, vma_pagesize);
- if (exec_fault) {
+ if (exec_fault)
+ invalidate_icache_guest_page(pfn, vma_pagesize);
+
+ /*
+ * If we took an execution fault we have made the
+ * icache/dcache coherent above and should now let the s2
+ * mapping be executable.
+ *
+ * Write faults (!exec_fault && FSC_PERM) are orthogonal to
+ * execute permissions, and we preserve whatever we have.
+ */
+ needs_exec = exec_fault ||
+ (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
+
+ if (vma_pagesize == PUD_SIZE) {
+ pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
+
+ new_pud = kvm_pud_mkhuge(new_pud);
+ if (writable)
+ new_pud = kvm_s2pud_mkwrite(new_pud);
+
+ if (needs_exec)
+ new_pud = kvm_s2pud_mkexec(new_pud);
+
+ ret = stage2_set_pud_huge(kvm, memcache, fault_ipa, &new_pud);
+ } else if (vma_pagesize == PMD_SIZE) {
+ pmd_t new_pmd = kvm_pfn_pmd(pfn, mem_type);
+
+ new_pmd = kvm_pmd_mkhuge(new_pmd);
+
+ if (writable)
+ new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+
+ if (needs_exec)
new_pmd = kvm_s2pmd_mkexec(new_pmd);
- invalidate_icache_guest_page(pfn, PMD_SIZE);
- } else if (fault_status == FSC_PERM) {
- /* Preserve execute if XN was already cleared */
- if (stage2_is_exec(kvm, fault_ipa))
- new_pmd = kvm_s2pmd_mkexec(new_pmd);
- }
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
- pte_t new_pte = pfn_pte(pfn, mem_type);
+ pte_t new_pte = kvm_pfn_pte(pfn, mem_type);
if (writable) {
new_pte = kvm_s2pte_mkwrite(new_pte);
- kvm_set_pfn_dirty(pfn);
mark_page_dirty(kvm, gfn);
}
- if (fault_status != FSC_PERM)
- clean_dcache_guest_page(pfn, PAGE_SIZE);
-
- if (exec_fault) {
+ if (needs_exec)
new_pte = kvm_s2pte_mkexec(new_pte);
- invalidate_icache_guest_page(pfn, PAGE_SIZE);
- } else if (fault_status == FSC_PERM) {
- /* Preserve execute if XN was already cleared */
- if (stage2_is_exec(kvm, fault_ipa))
- new_pte = kvm_s2pte_mkexec(new_pte);
- }
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
}
@@ -1637,6 +1837,7 @@ out_unlock:
*/
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
kvm_pfn_t pfn;
@@ -1646,24 +1847,23 @@ static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
spin_lock(&vcpu->kvm->mmu_lock);
- pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ if (!stage2_get_leaf_entry(vcpu->kvm, fault_ipa, &pud, &pmd, &pte))
goto out;
- if (pmd_thp_or_huge(*pmd)) { /* THP, HugeTLB */
+ if (pud) { /* HugeTLB */
+ *pud = kvm_s2pud_mkyoung(*pud);
+ pfn = kvm_pud_pfn(*pud);
+ pfn_valid = true;
+ } else if (pmd) { /* THP, HugeTLB */
*pmd = pmd_mkyoung(*pmd);
pfn = pmd_pfn(*pmd);
pfn_valid = true;
- goto out;
+ } else {
+ *pte = pte_mkyoung(*pte); /* Just a page... */
+ pfn = pte_pfn(*pte);
+ pfn_valid = true;
}
- pte = pte_offset_kernel(pmd, fault_ipa);
- if (pte_none(*pte)) /* Nothing there either */
- goto out;
-
- *pte = pte_mkyoung(*pte); /* Just a page... */
- pfn = pte_pfn(*pte);
- pfn_valid = true;
out:
spin_unlock(&vcpu->kvm->mmu_lock);
if (pfn_valid)
@@ -1849,14 +2049,14 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
}
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
unsigned long end = hva + PAGE_SIZE;
kvm_pfn_t pfn = pte_pfn(pte);
pte_t stage2_pte;
if (!kvm->arch.pgd)
- return;
+ return 0;
trace_kvm_set_spte_hva(hva);
@@ -1865,48 +2065,46 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
* just like a translation fault and clean the cache to the PoC.
*/
clean_dcache_guest_page(pfn, PAGE_SIZE);
- stage2_pte = pfn_pte(pfn, PAGE_S2);
+ stage2_pte = kvm_pfn_pte(pfn, PAGE_S2);
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+
+ return 0;
}
static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
- pmd = stage2_get_pmd(kvm, NULL, gpa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
+ if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
return 0;
- if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
+ if (pud)
+ return stage2_pudp_test_and_clear_young(pud);
+ else if (pmd)
return stage2_pmdp_test_and_clear_young(pmd);
-
- pte = pte_offset_kernel(pmd, gpa);
- if (pte_none(*pte))
- return 0;
-
- return stage2_ptep_test_and_clear_young(pte);
+ else
+ return stage2_ptep_test_and_clear_young(pte);
}
static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
{
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
- pmd = stage2_get_pmd(kvm, NULL, gpa);
- if (!pmd || pmd_none(*pmd)) /* Nothing there */
+ WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
+ if (!stage2_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte))
return 0;
- if (pmd_thp_or_huge(*pmd)) /* THP, HugeTLB */
+ if (pud)
+ return kvm_s2pud_young(*pud);
+ else if (pmd)
return pmd_young(*pmd);
-
- pte = pte_offset_kernel(pmd, gpa);
- if (!pte_none(*pte)) /* Just a page... */
+ else
return pte_young(*pte);
-
- return 0;
}
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index 57b3edebbb40..3828beab93f2 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -26,25 +26,25 @@ TRACE_EVENT(kvm_entry,
);
TRACE_EVENT(kvm_exit,
- TP_PROTO(int idx, unsigned int exit_reason, unsigned long vcpu_pc),
- TP_ARGS(idx, exit_reason, vcpu_pc),
+ TP_PROTO(int ret, unsigned int esr_ec, unsigned long vcpu_pc),
+ TP_ARGS(ret, esr_ec, vcpu_pc),
TP_STRUCT__entry(
- __field( int, idx )
- __field( unsigned int, exit_reason )
+ __field( int, ret )
+ __field( unsigned int, esr_ec )
__field( unsigned long, vcpu_pc )
),
TP_fast_assign(
- __entry->idx = idx;
- __entry->exit_reason = exit_reason;
+ __entry->ret = ARM_EXCEPTION_CODE(ret);
+ __entry->esr_ec = ARM_EXCEPTION_IS_TRAP(ret) ? esr_ec : 0;
__entry->vcpu_pc = vcpu_pc;
),
TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
- __print_symbolic(__entry->idx, kvm_arm_exception_type),
- __entry->exit_reason,
- __print_symbolic(__entry->exit_reason, kvm_arm_exception_class),
+ __print_symbolic(__entry->ret, kvm_arm_exception_type),
+ __entry->esr_ec,
+ __print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
__entry->vcpu_pc)
);
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index f56ff1cf52ec..ceeda7e04a4d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -313,36 +313,30 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
spin_lock_irqsave(&irq->irq_lock, flags);
- /*
- * If this virtual IRQ was written into a list register, we
- * have to make sure the CPU that runs the VCPU thread has
- * synced back the LR state to the struct vgic_irq.
- *
- * As long as the conditions below are true, we know the VCPU thread
- * may be on its way back from the guest (we kicked the VCPU thread in
- * vgic_change_active_prepare) and still has to sync back this IRQ,
- * so we release and re-acquire the spin_lock to let the other thread
- * sync back the IRQ.
- *
- * When accessing VGIC state from user space, requester_vcpu is
- * NULL, which is fine, because we guarantee that no VCPUs are running
- * when accessing VGIC state from user space so irq->vcpu->cpu is
- * always -1.
- */
- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
- irq->vcpu->cpu != -1) /* VCPU thread is running */
- cond_resched_lock(&irq->irq_lock);
-
if (irq->hw) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
} else {
u32 model = vcpu->kvm->arch.vgic.vgic_model;
+ u8 active_source;
irq->active = active;
+
+ /*
+ * The GICv2 architecture indicates that the source CPUID for
+ * an SGI should be provided during an EOI which implies that
+ * the active state is stored somewhere, but at the same time
+ * this state is not architecturally exposed anywhere and we
+ * have no way of knowing the right source.
+ *
+ * This may lead to a VCPU not being able to receive
+ * additional instances of a particular SGI after migration
+ * for a GICv2 VM on some GIC implementations. Oh well.
+ */
+ active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
+
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
active && vgic_irq_is_sgi(irq->intid))
- irq->active_source = requester_vcpu->vcpu_id;
+ irq->active_source = active_source;
}
if (irq->active)
@@ -368,14 +362,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
*/
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{
- if (intid > VGIC_NR_PRIVATE_IRQS)
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+ intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_guest(vcpu->kvm);
}
/* See vgic_change_active_prepare */
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{
- if (intid > VGIC_NR_PRIVATE_IRQS)
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+ intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_guest(vcpu->kvm);
}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7cfdfbc910e0..870b1185173b 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -103,13 +103,13 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
{
/* SGIs and PPIs */
if (intid <= VGIC_MAX_PRIVATE) {
- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
+ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
return &vcpu->arch.vgic_cpu.private_irqs[intid];
}
/* SPIs */
- if (intid <= VGIC_MAX_SPI) {
- intid = array_index_nospec(intid, VGIC_MAX_SPI);
+ if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
+ intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
}
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
*/
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
/* If the interrupt is active, it must stay on the current vcpu */
if (irq->active)
@@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
}
@@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
{
struct kvm_vcpu *vcpu;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
retry:
vcpu = vgic_target_oracle(irq);
@@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr)
{
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_populate_lr(vcpu, irq, lr);
@@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
*multi_sgi = false;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w;
@@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
bool multi_sgi;
u8 prio = 0xff;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
count = compute_ap_list_depth(vcpu, &multi_sgi);
if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
@@ -908,6 +908,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
struct vgic_irq *irq;
bool pending = false;
unsigned long flags;
+ struct vgic_vmcr vmcr;
if (!vcpu->kvm->arch.vgic.enabled)
return false;
@@ -915,11 +916,15 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
return true;
+ vgic_get_vmcr(vcpu, &vmcr);
+
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock);
- pending = irq_is_pending(irq) && irq->enabled;
+ pending = irq_is_pending(irq) && irq->enabled &&
+ !irq->active &&
+ irq->priority < vmcr.pmr;
spin_unlock(&irq->irq_lock);
if (pending)
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 23c2519c5b32..110cbe3f74f8 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -82,7 +82,7 @@ static void async_pf_execute(struct work_struct *work)
might_sleep();
/*
- * This work is run asynchromously to the task which owns
+ * This work is run asynchronously to the task which owns
* mm and might be done in another context, so we must
* access remotely.
*/
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 3710342cf6ad..6855cce3e528 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -175,10 +175,14 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
{
struct kvm_coalesced_mmio_dev *dev, *tmp;
+ if (zone->pio != 1 && zone->pio != 0)
+ return -EINVAL;
+
mutex_lock(&kvm->slots_lock);
list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
- if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
+ if (zone->pio == dev->zone.pio &&
+ coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
kvm_io_bus_unregister_dev(kvm,
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
kvm_iodevice_destructor(&dev->dev);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2679e476b6c3..cf7cc0554094 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -354,7 +354,10 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
- kvm_set_spte_hva(kvm, address, pte);
+
+ if (kvm_set_spte_hva(kvm, address, pte))
+ kvm_flush_remote_tlbs(kvm);
+
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -1133,7 +1136,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
/**
* kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
- * are dirty write protect them for next write.
+ * and reenable dirty page tracking for the corresponding pages.
* @kvm: pointer to kvm instance
* @log: slot id and address to which we copy the log
* @is_dirty: flag set if any page is dirty
@@ -1154,7 +1157,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
*
*/
int kvm_get_dirty_log_protect(struct kvm *kvm,
- struct kvm_dirty_log *log, bool *is_dirty)
+ struct kvm_dirty_log *log, bool *flush)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -1176,37 +1179,114 @@ int kvm_get_dirty_log_protect(struct kvm *kvm,
return -ENOENT;
n = kvm_dirty_bitmap_bytes(memslot);
+ *flush = false;
+ if (kvm->manual_dirty_log_protect) {
+ /*
+ * Unlike kvm_get_dirty_log, we always return false in *flush,
+ * because no flush is needed until KVM_CLEAR_DIRTY_LOG. There
+ * is some code duplication between this function and
+ * kvm_get_dirty_log, but hopefully all architecture
+ * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
+ * can be eliminated.
+ */
+ dirty_bitmap_buffer = dirty_bitmap;
+ } else {
+ dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
+ memset(dirty_bitmap_buffer, 0, n);
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i < n / sizeof(long); i++) {
+ unsigned long mask;
+ gfn_t offset;
+
+ if (!dirty_bitmap[i])
+ continue;
+
+ *flush = true;
+ mask = xchg(&dirty_bitmap[i], 0);
+ dirty_bitmap_buffer[i] = mask;
+
+ if (mask) {
+ offset = i * BITS_PER_LONG;
+ kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
+ offset, mask);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+ }
+
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
+ return -EFAULT;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+/**
+ * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
+ * and reenable dirty page tracking for the corresponding pages.
+ * @kvm: pointer to kvm instance
+ * @log: slot id and address from which to fetch the bitmap of dirty pages
+ */
+int kvm_clear_dirty_log_protect(struct kvm *kvm,
+ struct kvm_clear_dirty_log *log, bool *flush)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int as_id, id, n;
+ gfn_t offset;
+ unsigned long i;
+ unsigned long *dirty_bitmap;
+ unsigned long *dirty_bitmap_buffer;
+
+ as_id = log->slot >> 16;
+ id = (u16)log->slot;
+ if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
+ return -EINVAL;
+
+ if ((log->first_page & 63) || (log->num_pages & 63))
+ return -EINVAL;
+
+ slots = __kvm_memslots(kvm, as_id);
+ memslot = id_to_memslot(slots, id);
+
+ dirty_bitmap = memslot->dirty_bitmap;
+ if (!dirty_bitmap)
+ return -ENOENT;
+
+ n = kvm_dirty_bitmap_bytes(memslot);
+ *flush = false;
dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
- memset(dirty_bitmap_buffer, 0, n);
+ if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
+ return -EFAULT;
spin_lock(&kvm->mmu_lock);
- *is_dirty = false;
- for (i = 0; i < n / sizeof(long); i++) {
- unsigned long mask;
- gfn_t offset;
-
- if (!dirty_bitmap[i])
+ for (offset = log->first_page,
+ i = offset / BITS_PER_LONG, n = log->num_pages / BITS_PER_LONG; n--;
+ i++, offset += BITS_PER_LONG) {
+ unsigned long mask = *dirty_bitmap_buffer++;
+ atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
+ if (!mask)
continue;
- *is_dirty = true;
-
- mask = xchg(&dirty_bitmap[i], 0);
- dirty_bitmap_buffer[i] = mask;
+ mask &= atomic_long_fetch_andnot(mask, p);
+ /*
+ * mask contains the bits that really have been cleared. This
+ * never includes any bits beyond the length of the memslot (if
+ * the length is not aligned to 64 pages), therefore it is not
+ * a problem if userspace sets them in log->dirty_bitmap.
+ */
if (mask) {
- offset = i * BITS_PER_LONG;
+ *flush = true;
kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
offset, mask);
}
}
-
spin_unlock(&kvm->mmu_lock);
- if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
- return -EFAULT;
+
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_get_dirty_log_protect);
+EXPORT_SYMBOL_GPL(kvm_clear_dirty_log_protect);
#endif
bool kvm_largepages_enabled(void)
@@ -1928,32 +2008,33 @@ static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
gfn_t nr_pages_avail;
+ int r = start_gfn <= end_gfn ? 0 : -EINVAL;
ghc->gpa = gpa;
ghc->generation = slots->generation;
ghc->len = len;
- ghc->memslot = __gfn_to_memslot(slots, start_gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
- if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
+ ghc->hva = KVM_HVA_ERR_BAD;
+
+ /*
+ * If the requested region crosses two memslots, we still
+ * verify that the entire region is valid here.
+ */
+ while (!r && start_gfn <= end_gfn) {
+ ghc->memslot = __gfn_to_memslot(slots, start_gfn);
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
+ &nr_pages_avail);
+ if (kvm_is_error_hva(ghc->hva))
+ r = -EFAULT;
+ start_gfn += nr_pages_avail;
+ }
+
+ /* Use the slow path for cross page reads and writes. */
+ if (!r && nr_pages_needed == 1)
ghc->hva += offset;
- } else {
- /*
- * If the requested region crosses two memslots, we still
- * verify that the entire region is valid here.
- */
- while (start_gfn <= end_gfn) {
- nr_pages_avail = 0;
- ghc->memslot = __gfn_to_memslot(slots, start_gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
- &nr_pages_avail);
- if (kvm_is_error_hva(ghc->hva))
- return -EFAULT;
- start_gfn += nr_pages_avail;
- }
- /* Use the slow path for cross page reads and writes. */
+ else
ghc->memslot = NULL;
- }
- return 0;
+
+ return r;
}
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
@@ -1965,7 +2046,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len)
+ void *data, unsigned int offset,
+ unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
@@ -2948,6 +3030,10 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#endif
case KVM_CAP_IOEVENTFD_ANY_LENGTH:
case KVM_CAP_CHECK_EXTENSION_VM:
+ case KVM_CAP_ENABLE_CAP_VM:
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+#endif
return 1;
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
@@ -2971,6 +3057,28 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
return kvm_vm_ioctl_check_extension(kvm, arg);
}
+int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ return -EINVAL;
+}
+
+static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
+ struct kvm_enable_cap *cap)
+{
+ switch (cap->cap) {
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT:
+ if (cap->flags || (cap->args[0] & ~1))
+ return -EINVAL;
+ kvm->manual_dirty_log_protect = cap->args[0];
+ return 0;
+#endif
+ default:
+ return kvm_vm_ioctl_enable_cap(kvm, cap);
+ }
+}
+
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -2984,6 +3092,15 @@ static long kvm_vm_ioctl(struct file *filp,
case KVM_CREATE_VCPU:
r = kvm_vm_ioctl_create_vcpu(kvm, arg);
break;
+ case KVM_ENABLE_CAP: {
+ struct kvm_enable_cap cap;
+
+ r = -EFAULT;
+ if (copy_from_user(&cap, argp, sizeof(cap)))
+ goto out;
+ r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
+ break;
+ }
case KVM_SET_USER_MEMORY_REGION: {
struct kvm_userspace_memory_region kvm_userspace_mem;
@@ -3004,6 +3121,17 @@ static long kvm_vm_ioctl(struct file *filp,
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
break;
}
+#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
+ case KVM_CLEAR_DIRTY_LOG: {
+ struct kvm_clear_dirty_log log;
+
+ r = -EFAULT;
+ if (copy_from_user(&log, argp, sizeof(log)))
+ goto out;
+ r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
+ break;
+ }
+#endif
#ifdef CONFIG_KVM_MMIO
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;